diff --git a/.gitignore b/.gitignore index 20df56d13..6877e8f64 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,7 @@ /.gitee/ /.vscode/ /.idea/ - +/cmake-build-debug/ *.a *.o *.so @@ -21,4 +21,4 @@ objfiles.txt /config.status /ereport.txt -/build/script/version.cfg +/build/script/version.cfg \ No newline at end of file diff --git a/GNUmakefile.in b/GNUmakefile.in index 88004fe3f..8bc0009bd 100644 --- a/GNUmakefile.in +++ b/GNUmakefile.in @@ -66,6 +66,7 @@ install: +@echo "openGauss installation complete." else ifeq ($(enable_privategauss), yes) +ifneq ($(enable_lite_mode), yes) install: $(MAKE) install_mysql_fdw $(MAKE) install_oracle_fdw @@ -76,6 +77,16 @@ install: $(MAKE) -C $(root_builddir)/contrib/gsredistribute $@ +@echo "openGauss installation complete." else +install: + $(MAKE) install_mysql_fdw + $(MAKE) install_oracle_fdw + $(MAKE) install_pldebugger + $(MAKE) -C contrib/postgres_fdw $@ + $(MAKE) -C contrib/hstore $@ + $(MAKE) -C $(root_builddir)/privategauss/kernel/extension/packages $@ + +@echo "openGauss installation complete." +endif +else install: $(MAKE) install_mysql_fdw $(MAKE) install_oracle_fdw @@ -131,6 +142,8 @@ qunitcheck: all fastcheck_single: all +upgradecheck_single: all + fastcheck_single_comm_proxy: all redocheck: all diff --git a/README.md b/README.md index c6fc555dd..e1608e1b0 100644 --- a/README.md +++ b/README.md @@ -1,611 +1,611 @@ -![openGauss Logo](doc/openGauss-logo.png "openGauss logo") - -[English](./README.en.md) | 简体中文 - - - -- [什么是openGauss](#什么是openGauss) -- [安装](#安装) - - [创建配置文件](#创建配置文件) - - [初始化安装环境](#初始化安装环境) - - [执行安装](#执行安装) -- [卸载openGauss](#卸载openGauss) -- [编译](#编译) - - [概述](#概述) - - [操作系统和软件依赖要求](#操作系统和软件依赖要求) - - [下载openGauss](#下载openGauss) - - [编译第三方软件](#编译第三方软件) - - [使用build.sh编译](#使用build编译) - - [使用命令编译](#使用命令编译) - - [编译安装包](#编译安装包) -- [快速入门](#快速入门) -- [文档](#文档) -- [社区](#社区) - - [治理](#治理) - - [交流](#交流) -- [贡献](#贡献) -- [发行说明](#发行说明) -- [许可证](#许可证) - -## 什么是openGauss - -openGauss是一款开源的关系型数据库管理系统,它具有多核高性能、全链路安全性、智能运维等企业级特性。 -openGauss内核早期源自开源数据库PostgreSQL,融合了华为在数据库领域多年的内核经验,在架构、事务、存储引擎、优化器及ARM架构上进行了适配与优化。作为一个开源数据库,期望与广泛的开发者共同构建一个多元化技术的开源数据库社区。 - -openGauss架构 - -**高性能** - -openGauss突破了多核CPU的瓶颈,实现两路鲲鹏128核150万tpmC,内存优化表(MOT)引擎达350万tpmC。 - -**数据分区** - -内部线程共享的关键数据结构进行数据分区,减少加锁访问冲突。比如CLOG就采用分区优化,解决ClogControlLock锁瓶颈。 - -**NUMA化内核数据结构** - -关键数据结构NUMA化分配,减少跨CPU访问。比如全局PGPROC数组按照NUMA Node的数目分为多份,分别在对应NUMA Node上申请内存。解决ProcArrayLock锁瓶颈。 - -**绑核优化** - -把网络中断绑核和后台业务线程绑核区分开,避免运行线程在核间迁移造成的性能不稳定。 - -**ARM指令优化** - -结合ARM平台的原子操作lse进行优化,实现关键互斥变量原子高效操作。 - -**SQL BY PASS** - -通过SQL BY PASS优化SQL执行流程,简化CPU执行开销。 - -**高可靠** - -正常业务负载情况下,RTO小于10秒,降低节点故障导致的业务不可用时间。 - -**并行恢复** - -主机日志传输到备机时,备机日志落盘的同时,发送给重做恢复分发线程,分发线程根据日志类型和日志操作的数据页发给多个并行恢复线程进行日志重做,保证备机的重做速度跟上主机日志的产生速度。这样备机实时处于ready状态,从而实现瞬间故障切换。 - - -**MOT引擎(Beta发布)** - -内存优化表(MOT)存储引擎是一个专为多核大内存优化的存储引擎,具有极高的联机事务处理(OLTP)性能和资源利用率。MOT的数据和索引完全存储在内存中,通过NUMA感知执行,算法消除闩锁争用以及查询JIT本地编译,提供低时延数据访问及高效事务执行。更多请参考[MOT引擎文档](https://opengauss.org/zh/docs/2.1.0/docs/Developerguide/%E5%86%85%E5%AD%98%E8%A1%A8%E7%89%B9%E6%80%A7.html)。 - -**安全** - -openGauss支持账号管理,账号认证,口令复杂度检查,账号锁定,权限管理和校验,传输加密,操作 -审计等全方位的数据库安全能力,保护业务满足安全要求。 - -**易运维** - -openGauss将AI算法集成到数据库中,减少数据库维护的负担。 - -- **SQL预测** - -openGauss根据收集的历史性能数据进行编码和基于深度学习的训练及预测,支持SQL执行时间预测。 - -- **SQL诊断器** - -openGauss支持SQL执行语句的诊断器,提前发现慢查询。 - -- **参数自动调整** - -openGauss通过机器学习方法自动调整数据库参数,提高调参效率,降低正确调参成本。 - - -## 安装 - -### 创建配置文件 - -在安装openGauss之前,需要创建clusterconfig.xml配置文件。XML文件包含部署openGauss的服务器信息、安装路径、IP地址以及端口号等。用于告知openGauss如何部署。用户需根据不同场配置对应的XML文件。 - -下面以一主一备的部署方案为例,说明如何创建XML配置文件。 -以下value取值信息仅为示例,可自行替换。每行信息均有注释进行说明。 - -``` - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -### 初始化安装环境 - -创建完openGauss配置文件后,在执行安装前,为了后续能以最小权限进行安装及openGauss管理操作,保证系统安全性,需要运行安装前置脚本gs_preinstall准备好安装用户及环境。 - -安装前置脚本gs_preinstall可以协助用户自动完成如下的安装环境准备工作: - -- 自动设置Linux内核参数以达到提高服务器负载能力的目的。这些参数直接影响数据库系统的运行状态,请仅在确认必要时调整。 -- 自动将openGauss配置文件、安装包拷贝到openGauss主机的相同目录下。 -- openGauss安装用户、用户组不存在时,自动创建安装用户以及用户组。 -- 读取openGauss配置文件中的目录信息并创建,将目录权限授予安装用户。 - -**注意事项** - -- 用户需要检查上层目录权限,保证安装用户对安装包和配置文件目录读写执行的权限。 -- xml文件中各主机的名称与IP映射配置正确。 -- 只能使用root用户执行gs_preinstall命令。 - -**操作步骤** - -1.以root用户登录待安装openGauss的任意主机,并按规划创建存放安装包的目录。 - - ``` -mkdir -p /opt/software/openGauss -chmod 755 -R /opt/software - ``` - - > **说明** - > - > - 不建议把安装包的存放目录规划到openGauss用户的家目录或其子目录下,可能导致权限问题。 - > - openGauss用户须具有/opt/software/openGauss目录的读写权限。 - -2.将安装包“openGauss-x.x.x-openEULER-64bit.tar.gz”和配置文件“clusterconfig.xml”都上传至上一步所创建的目录中。 - -3.在安装包所在的目录下,解压安装包openGauss-x.x.x-openEULER-64bit.tar.gz。安装包解压后,在/opt/software/openGauss目录下自动生成script目录。在script目录下生成gs_preinstall等OM工具脚本。 - -``` -cd /opt/software/openGauss -tar -zxvf openGauss-x.x.x-openEULER-64bit.tar.gz -``` - -4.进入工具脚本目录。 - - ``` -cd /opt/software/openGauss/script - ``` - -5.如果是openEuler的操作系统,执行如下命令打开performance.sh文件,用#注释sysctl -w vm.min_free_kbytes=112640 &> /dev/null,键入“ESC”键进入指令模式,执行**:wq**保存并退出修改。 - -``` -vi /etc/profile.d/performance.sh -``` - -6.为确保openssl版本正确,执行预安装前请加载安装包中lib库。执行命令如下,其中*{packagePath}*为用户安装包放置的路径,本示例中为/opt/software/openGauss。 - - ``` -export LD_LIBRARY_PATH={packagePath}/script/gspylib/clib:$LD_LIBRARY_PATH - ``` - - -7.为确保成功安装,检查 hostname 与 /etc/hostname 是否一致。预安装过程中,会对hostname进行检查。 - -8.使用gs_preinstall准备好安装环境。若为共用环境需加入--sep-env-file=ENVFILE参数分离环境变量,避免与其他用户相互影响,ENVFILE为用户自行指定的环境变量分离文件的路径。 - 执行如下命令,即采用交互模式执行前置,并在执行过程中自动创建root用户互信和openGauss用户互信: - - ``` -./gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/clusterconfig.xml - ``` - - omm为数据库管理员用户(即运行openGauss的操作系统用户),dbgrp为运行openGauss的操作系统用户的组名,/opt/software/ openGauss/clusterconfig.xml为openGauss的配置文件路径。执行过程中需要根据提示选择建立互信,并输入root或openGauss用户的密码。 - -### 执行安装 - -执行前置脚本准备好openGauss安装环境之后,按照启动安装过程部署openGauss。 - -**前提条件** - -- 已成功执行前置脚本gs_preinstall。 -- 所有服务器操作系统和网络均正常运行。 -- 用户需确保各个主机上的locale保持一致。 - -**操作步骤** - -1.(可选)检查安装包和openGauss配置文件在规划路径下是否已存在,如果没有,重新执行预安装,确保预安装成功,再执行以下步骤。 - -2.登录到openGauss的主机,并切换到omm用户。 - - ``` -su - omm - ``` - - > **说明** - > - > - omm为gs_preinstall脚本中-U参数指定的用户。 - > - 以上述omm用户执行gs_install脚本。否则会报执行错误。 - -3.使用gs_install安装openGauss。若为环境变量分离的模式安装的集群需要source环境变量分离文件ENVFILE。 - - ``` -gs_install -X /opt/software/openGauss/clusterconfig.xml - ``` - - /opt/software/openGauss/script/clusterconfig.xml为openGauss配置文件的路径。在执行过程中,用户需根据提示输入数据库的密码,密码具有一定的复杂度,为保证用户正常使用该数据库,请记住输入的数据库密码。 - - 密码复杂度要求: - - - 长度至少8个字符。 - - 不能和用户名、当前密码(ALTER)、当前密码的倒序相同。 - - 以下至少包含三类:大写字母(A - Z)、小写字母(a - z)、数字(0 - 9)、其他字符(仅限~!@#$%^&*()-_=+\|[{}];:,<.>/?)。 - -4.安装执行成功之后,需要手动删除主机root用户的互信,即删除openGauss数据库各节点上的互信文件。 - - ``` -rm -rf ~/.ssh - ``` - -### 卸载openGauss - -卸载openGauss的过程包括卸载openGauss和清理openGauss服务器环境。 - -#### **执行卸载** - -openGauss提供了卸载脚本,帮助用户卸载openGauss。 - -**操作步骤** - -1.以操作系统用户omm登录数据库主节点。 - -2.使用gs_uninstall卸载openGauss。 - - ``` -gs_uninstall --delete-data - ``` - - 或者在openGauss中每个节点执行本地卸载。 - - ``` -gs_uninstall --delete-data -L - ``` - -#### **一键式环境清理** - -在openGauss卸载完成后,如果不需要在环境上重新部署openGauss,可以运行脚本gs_postuninstall对openGauss服务器上环境信息做清理。openGauss环境清理是对环境准备脚本gs_preinstall所做设置的清理。 -**前提条件** - -- openGauss卸载执行成功。 -- root用户互信可用。 -- 只能使用root用户执行gs_postuninstall命令。 - -**操作步骤** - -1.以root用户登录openGauss服务器。 - -2.查看互信是否建成功,可以互相执行**ssh 主机名**。输入exit退出。 - - ``` - plat1:~ # ssh plat2 - Last login: Tue Jan 5 10:28:18 2016 from plat1 - plat2:~ # exit - logout - Connection to plat2 closed. - plat1:~ # - ``` - -3.进入script路径下。 - - ``` - cd /opt/software/openGauss/script - ``` - -4.使用gs_postuninstall进行清理。若为环境变量分离的模式安装的集群需要source环境变量分离文件ENVFILE。 - - ``` - ./gs_postuninstall -U omm -X /opt/software/openGauss/clusterconfig.xml --delete-user --delete-group - ``` - - 或者在openGauss中每个节点执行本地后置清理。 - - ``` - ./gs_postuninstall -U omm -X /opt/software/openGauss/clusterconfig.xml --delete-user --delete-group -L - ``` - - omm为运行openGauss的操作系统用户名,/opt/software/openGauss/clusterconfig.xml为openGauss配置文件路径。 - -若为环境变量分离的模式安装的集群需删除之前source的环境变量分离的env参数unset MPPDB_ENV_SEPARATE_PATH - -5.删除各openGauss数据库节点root用户互信。 - - -## 编译 - -### 概述 - -编译openGauss需要openGauss-server和binarylibs两个组件。 - -- openGauss-server:openGauss的主要代码。可以从开源社区获取。 - -- binarylibs:openGauss依赖的第三方开源软件,你可以直接编译openGauss-third_party代码获取,也可以从开源社区下载已经编译好的并上传的一个副本。 - -对于数据库、三方库、GCC的编译以及常见问题,参照博客[openGauss数据库编译指导](https://opengauss.org/zh/blogs/blogs.html?post/xingchen/opengauss_compile/) - -在编译openGauss之前,请检查操作系统和软件依赖要求。 - -openGauss可以通过一键式shell工具build.sh进行编译,也可以通过命令进行编译。安装包由build.sh生成。 - -### 操作系统和软件依赖要求 - -openGauss支持以下操作系统: - -- CentOS 7.6(x86架构) - -- openEuler-20.03-LTS(aarch64架构) - -适配其他系统,参照博客[openGauss数据库编译指导](https://opengauss.org/zh/blogs/blogs.html?post/xingchen/opengauss_compile/) - -以下表格列举了编译openGauss的软件要求。 - -建议使用从列出的操作系统安装盘或安装源中获取的以下依赖软件的默认安装包进行安装。如果不存在以下软件,请参考推荐的软件版本。 - -软件依赖要求如下: - -| 软件 | 推荐版本 | -| ------------- | --------------- | -| libaio-devel | 0.3.109-13 | -| flex | 2.5.31及以上版本 | -| bison | 2.7-4 | -| ncurses-devel | 5.9-13.20130511 | -| glibc-devel | 2.17-111 | -| patch | 2.7.1-10 | -| lsb_release | 4.1 | -| readline-devel| 7.0-13 | - -### 下载openGauss - -可以从开源社区下载openGauss-server和openGauss-third_party。 - -https://opengauss.org/zh/ - -可以通过以下网站获取编译好的binarylibs。下载后请解压缩并重命名为**binarylibs**。 - -https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.1.0/openGauss-third_party_binarylibs.tar.gz - - -现在我们已经拥有完整的openGauss代码,把它存储在以下目录中(以sda为例)。 - -- /sda/openGauss-server -- /sda/binarylibs -- /sda/openGauss-third_party - -### 编译第三方软件 - -在编译openGauss之前,需要先编译openGauss依赖的开源及第三方软件。这些开源及第三方软件存储在openGauss-third_party代码仓库中,通常只需要构建一次。如果开源软件有更新,需要重新构建软件。 - -用户也可以直接从**binarylibs**库中获取开源软件编译和构建的输出文件。 - -如果你想自己编译第三方软件,请到openGauss-third_party仓库查看详情。 - -执行完上述脚本后,最终编译和构建的结果保存在与**openGauss-third_party**同级的**binarylibs**目录下。在编译**openGauss-server**时会用到这些文件。 - -### 代码编译 - -##### 使用build.sh编译代码 - -openGauss-server中的build.sh是编译过程中的重要脚本工具。该工具集成了软件安装编译和产品安装包编译功能,可快速进行代码编译和打包。。 - -参数说明请见以下表格。 - -| 选项 | 缺省值 | 参数 | 说明 | -| :---- | :--------------------------- | :------------------------------------- | :------------------------------------------------ | -| -h | 请勿使用此选项。 | - | 帮助菜单。 | -| -m | release | [debug | release | memcheck] | 选择目标版本。 | -| -3rd | ${Code directory}/binarylibs | [binarylibs path] | 指定binarylibs路径。该路径必须是绝对路径。 | -| -pkg | 请勿使用此选项。 | - | 将代码编译结果压缩至安装包。 | -| -nopt | 请勿使用此选项。 | - | 如果使用此功能,则对鲲鹏平台的相关CPU不进行优化。 | - -> **注意** -> -> - **-m [debug | release | memcheck]**表示有三个目标版本可以选择: -> - **release**:生成release版本的二进制程序。此版本编译时,通过配置GCC高级优化选项,去除内核调试代码。此选项通常在生成环境或性能测试环境中使用。 -> - **debug**:表示生成debug版本的二进制程序。此版本编译时,增加了内核代码调试功能,一般用于开发自测环境。 -> - **memcheck**:表示生成memcheck版本的二进制程序。此版本编译时,在debug版本的基础上增加了ASAN功能,用于定位内存问题。 -> - **-3rd [binarylibs path]**为**binarylibs**的路径。默认设置为当前代码文件夹下存在**binarylibs**,因此如果**binarylibs**被移至**openGauss-server**中,或者在**openGauss-server**中创建了到**binarylibs**的软链接,则不需要指定此参数。但请注意,这样做的话,该文件很容易被**git clean**命令删除。 -> - 该脚本中的每个选项都有一个默认值。选项数量少,依赖简单。因此,该脚本易于使用。如果实际需要的参数值与默认值不同,请根据实际情况配置。 - -现在你已经知晓build.sh的用法,只需使用如下命令即可编译openGauss-server。 - -``` -[user@linux openGauss-server]$ sh build.sh -m [debug | release | memcheck] -3rd [binarylibs path] -``` - -举例: - -``` -[user@linux openGauss-server]$ sh build.sh # 编译安装release版本的openGauss。需代码目录下有binarylibs或者其软链接,否则将会失败。 -[user@linux openGauss-server]$ sh build.sh -m debug -3rd /sda/binarylibs # 编译安装debug版本的openGauss -``` - -编译后的软件安装路径为:**/sda/openGauss-server/dest** - -编译后的二进制文件路径为:**/sda/openGauss-server/dest/bin** - -编译日志: **make_compile.log** - - - -##### 使用命令编译代码 - -1.执行以下脚本获取系统版本号: - - ``` - [user@linux openGauss-server]$ sh src/get_PlatForm_str.sh - ``` - - > **注意** - > - > - 命令回显信息即为openGauss支持的操作系统。目前openGauss支持的操作系统为centos7.6_x86_64和openeuler_aarch64。 - > - 如果显示**Failed**或其他版本,表示openGauss不支持当前操作系统。 - -2.配置环境变量,根据代码下载位置添加**____**,并将***替换为上一步的结果。 - - ``` - export CODE_BASE=________ # Path of the openGauss-server file - export BINARYLIBS=________ # Path of the binarylibs file - export GAUSSHOME=$CODE_BASE/dest/ - export GCC_PATH=$BINARYLIBS/buildtools/***/gcc7.3/ - export CC=$GCC_PATH/gcc/bin/gcc - export CXX=$GCC_PATH/gcc/bin/g++ - export LD_LIBRARY_PATH=$GAUSSHOME/lib:$GCC_PATH/gcc/lib64:$GCC_PATH/isl/lib:$GCC_PATH/mpc/lib/:$GCC_PATH/mpfr/lib/:$GCC_PATH/gmp/lib/:$LD_LIBRARY_PATH - export PATH=$GAUSSHOME/bin:$GCC_PATH/gcc/bin:$PATH - - ``` - - 例如,在CENTOS X86-64平台上,binarylibs目录被作为openGauss-server目录的兄弟目录。 - 在openGauss-server目录下执行以下命令。 - - ``` - export CODE_BASE=`pwd` - export BINARYLIBS=`pwd`/../binarylibs - export GAUSSHOME=$CODE_BASE/dest/ - export GCC_PATH=$BINARYLIBS/buildtools/centos7.6_x86_64/gcc7.3/ - export CC=$GCC_PATH/gcc/bin/gcc - export CXX=$GCC_PATH/gcc/bin/g++ - export LD_LIBRARY_PATH=$GAUSSHOME/lib:$GCC_PATH/gcc/lib64:$GCC_PATH/isl/lib:$GCC_PATH/mpc/lib/:$GCC_PATH/mpfr/lib/:$GCC_PATH/gmp/lib/:$LD_LIBRARY_PATH - export PATH=$GAUSSHOME/bin:$GCC_PATH/gcc/bin:$PATH - - ``` -3.选择一个版本进行配置。 - - **debug**版本: - - ``` - ./configure --gcc-version=7.3.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --with-readline --without-zlib - ``` - - **release**版本: - - ``` - ./configure --gcc-version=7.3.0 CC=g++ CFLAGS="-O2 -g3" --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-thread-safety --with-readline --without-zlib - ``` - - **memcheck**版本: - - ``` - ./configure --gcc-version=7.3.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --with-readline --without-zlib --enable-memory-check - ``` - - > **注意** - > - > - *[debug | release | memcheck]*表示有三个目标版本可用。 - > - 在ARM平台上,需要把**-D__USE_NUMA**添加至**CFLAGS**中。 - > - 在**ARMv8.1**及以上平台(如鲲鹏920),需要把**-D__ARM_LSE**添加至**CFLAGS**中。 - > - 如果**binarylibs**被移至**openGauss-server**中,或者在**openGauss-server**中创建了到**binarylibs**的软链接,则不需要指定**--3rd**参数。但请注意,这样做的话,该文件很容易被`git clean`命令删除。 - -4.执行以下命令编译openGauss: - - ``` - [user@linux openGauss-server]$ make -sj - [user@linux openGauss-server]$ make install -sj - ``` - -5.显示如下信息,表示编译和安装成功。 - - ``` - openGauss installation complete. - ``` - -- 编译后的软件安装路径为**$GAUSSHOME**。 - -- 编译后的二进制文件存放路径为:**$GAUSSHOME/bin**。 - - - - -### 编译安装包 - -请先阅读[使用build.sh编译](#使用build.sh编译)章节,了解build.sh的用法,以及如何使用该脚本编译openGauss。 - -现在,只需添加一个-pkg选项,就可以编译安装包。 - -``` -[user@linux openGauss-server]$ sh build.sh -m [debug | release | memcheck] -3rd [binarylibs path] -pkg -``` - -举例: - -``` -sh build.sh -pkg # 生成release版本的openGauss安装包。需代码目录下有binarylibs或者其软链接,否则将会失败。 -sh build.sh -m debug -3rd /sdc/binarylibs -pkg # 生成debug版本的openGauss安装包 -``` - -- 生成的安装包存放目录:**./package**。 - -- 编译日志: **make_compile.log** - -- 安装包打包日志: **./package/make_package.log** - - -## 快速入门 - -参考[快速入门](https://opengauss.org/zh/docs/2.1.0/docs/Quickstart/Quickstart.html)。 - -## 文档 - -更多安装指南、教程和API请参考[用户文档](https://gitee.com/opengauss/docs)。 - -## 社区 - -### 治理 - -查看openGauss是如何实现开放[治理](https://gitee.com/opengauss/community/blob/master/governance.md)。 - -### 交流 - -- WeLink:开发者的交流平台。 -- IRC频道:`#opengauss-meeting`(仅用于会议纪要)。 -- 邮件列表:https://opengauss.org/zh/community/onlineCommunication.html - -## 贡献 - -欢迎大家来参与贡献。详情请参阅我们的[社区贡献](https://opengauss.org/zh/contribution.html)。 - -## 发行说明 - -请参见[发行说明](https://opengauss.org/zh/docs/2.1.0/docs/Releasenotes/Releasenotes.html)。 - -## 许可证 - -[MulanPSL-2.0](http://license.coscl.org.cn/MulanPSL2/) +![openGauss Logo](doc/openGauss-logo.png "openGauss logo") + +[English](./README.en.md) | 简体中文 + + + +- [什么是openGauss](#什么是openGauss) +- [安装](#安装) + - [创建配置文件](#创建配置文件) + - [初始化安装环境](#初始化安装环境) + - [执行安装](#执行安装) +- [卸载openGauss](#卸载openGauss) +- [编译](#编译) + - [概述](#概述) + - [操作系统和软件依赖要求](#操作系统和软件依赖要求) + - [下载openGauss](#下载openGauss) + - [编译第三方软件](#编译第三方软件) + - [使用build.sh编译](#使用build编译) + - [使用命令编译](#使用命令编译) + - [编译安装包](#编译安装包) +- [快速入门](#快速入门) +- [文档](#文档) +- [社区](#社区) + - [治理](#治理) + - [交流](#交流) +- [贡献](#贡献) +- [发行说明](#发行说明) +- [许可证](#许可证) + +## 什么是openGauss + +openGauss是一款开源的关系型数据库管理系统,它具有多核高性能、全链路安全性、智能运维等企业级特性。 +openGauss内核早期源自开源数据库PostgreSQL,融合了华为在数据库领域多年的内核经验,在架构、事务、存储引擎、优化器及ARM架构上进行了适配与优化。作为一个开源数据库,期望与广泛的开发者共同构建一个多元化技术的开源数据库社区。 + +openGauss架构 + +**高性能** + +openGauss突破了多核CPU的瓶颈,实现两路鲲鹏128核150万tpmC,内存优化表(MOT)引擎达350万tpmC。 + +**数据分区** + +内部线程共享的关键数据结构进行数据分区,减少加锁访问冲突。比如CLOG就采用分区优化,解决ClogControlLock锁瓶颈。 + +**NUMA化内核数据结构** + +关键数据结构NUMA化分配,减少跨CPU访问。比如全局PGPROC数组按照NUMA Node的数目分为多份,分别在对应NUMA Node上申请内存。解决ProcArrayLock锁瓶颈。 + +**绑核优化** + +把网络中断绑核和后台业务线程绑核区分开,避免运行线程在核间迁移造成的性能不稳定。 + +**ARM指令优化** + +结合ARM平台的原子操作lse进行优化,实现关键互斥变量原子高效操作。 + +**SQL BY PASS** + +通过SQL BY PASS优化SQL执行流程,简化CPU执行开销。 + +**高可靠** + +正常业务负载情况下,RTO小于10秒,降低节点故障导致的业务不可用时间。 + +**并行恢复** + +主机日志传输到备机时,备机日志落盘的同时,发送给重做恢复分发线程,分发线程根据日志类型和日志操作的数据页发给多个并行恢复线程进行日志重做,保证备机的重做速度跟上主机日志的产生速度。这样备机实时处于ready状态,从而实现瞬间故障切换。 + + +**MOT引擎(Beta发布)** + +内存优化表(MOT)存储引擎是一个专为多核大内存优化的存储引擎,具有极高的联机事务处理(OLTP)性能和资源利用率。MOT的数据和索引完全存储在内存中,通过NUMA感知执行,算法消除闩锁争用以及查询JIT本地编译,提供低时延数据访问及高效事务执行。更多请参考[MOT引擎文档](https://opengauss.org/zh/docs/2.0.0/docs/Developerguide/%E5%86%85%E5%AD%98%E8%A1%A8%E7%89%B9%E6%80%A7.html)。 + +**安全** + +openGauss支持账号管理,账号认证,口令复杂度检查,账号锁定,权限管理和校验,传输加密,操作 +审计等全方位的数据库安全能力,保护业务满足安全要求。 + +**易运维** + +openGauss将AI算法集成到数据库中,减少数据库维护的负担。 + +- **SQL预测** + +openGauss根据收集的历史性能数据进行编码和基于深度学习的训练及预测,支持SQL执行时间预测。 + +- **SQL诊断器** + +openGauss支持SQL执行语句的诊断器,提前发现慢查询。 + +- **参数自动调整** + +openGauss通过机器学习方法自动调整数据库参数,提高调参效率,降低正确调参成本。 + + +## 安装 + +### 创建配置文件 + +在安装openGauss之前,需要创建clusterconfig.xml配置文件。XML文件包含部署openGauss的服务器信息、安装路径、IP地址以及端口号等。用于告知openGauss如何部署。用户需根据不同场配置对应的XML文件。 + +下面以一主一备的部署方案为例,说明如何创建XML配置文件。 +以下value取值信息仅为示例,可自行替换。每行信息均有注释进行说明。 + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +### 初始化安装环境 + +创建完openGauss配置文件后,在执行安装前,为了后续能以最小权限进行安装及openGauss管理操作,保证系统安全性,需要运行安装前置脚本gs_preinstall准备好安装用户及环境。 + +安装前置脚本gs_preinstall可以协助用户自动完成如下的安装环境准备工作: + +- 自动设置Linux内核参数以达到提高服务器负载能力的目的。这些参数直接影响数据库系统的运行状态,请仅在确认必要时调整。 +- 自动将openGauss配置文件、安装包拷贝到openGauss主机的相同目录下。 +- openGauss安装用户、用户组不存在时,自动创建安装用户以及用户组。 +- 读取openGauss配置文件中的目录信息并创建,将目录权限授予安装用户。 + +**注意事项** + +- 用户需要检查上层目录权限,保证安装用户对安装包和配置文件目录读写执行的权限。 +- xml文件中各主机的名称与IP映射配置正确。 +- 只能使用root用户执行gs_preinstall命令。 + +**操作步骤** + +1.以root用户登录待安装openGauss的任意主机,并按规划创建存放安装包的目录。 + + ``` +mkdir -p /opt/software/openGauss +chmod 755 -R /opt/software + ``` + + > **说明** + > + > - 不建议把安装包的存放目录规划到openGauss用户的家目录或其子目录下,可能导致权限问题。 + > - openGauss用户须具有/opt/software/openGauss目录的读写权限。 + +2.将安装包“openGauss-x.x.x-openEULER-64bit.tar.gz”和配置文件“clusterconfig.xml”都上传至上一步所创建的目录中。 + +3.在安装包所在的目录下,解压安装包openGauss-x.x.x-openEULER-64bit.tar.gz。安装包解压后,在/opt/software/openGauss目录下自动生成script目录。在script目录下生成gs_preinstall等OM工具脚本。 + +``` +cd /opt/software/openGauss +tar -zxvf openGauss-x.x.x-openEULER-64bit.tar.gz +``` + +4.进入工具脚本目录。 + + ``` +cd /opt/software/openGauss/script + ``` + +5.如果是openEuler的操作系统,执行如下命令打开performance.sh文件,用#注释sysctl -w vm.min_free_kbytes=112640 &> /dev/null,键入“ESC”键进入指令模式,执行**:wq**保存并退出修改。 + +``` +vi /etc/profile.d/performance.sh +``` + +6.为确保openssl版本正确,执行预安装前请加载安装包中lib库。执行命令如下,其中*{packagePath}*为用户安装包放置的路径,本示例中为/opt/software/openGauss。 + + ``` +export LD_LIBRARY_PATH={packagePath}/script/gspylib/clib:$LD_LIBRARY_PATH + ``` + + +7.为确保成功安装,检查 hostname 与 /etc/hostname 是否一致。预安装过程中,会对hostname进行检查。 + +8.使用gs_preinstall准备好安装环境。若为共用环境需加入--sep-env-file=ENVFILE参数分离环境变量,避免与其他用户相互影响,ENVFILE为用户自行指定的环境变量分离文件的路径。 + 执行如下命令,即采用交互模式执行前置,并在执行过程中自动创建root用户互信和openGauss用户互信: + + ``` +./gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/clusterconfig.xml + ``` + + omm为数据库管理员用户(即运行openGauss的操作系统用户),dbgrp为运行openGauss的操作系统用户的组名,/opt/software/ openGauss/clusterconfig.xml为openGauss的配置文件路径。执行过程中需要根据提示选择建立互信,并输入root或openGauss用户的密码。 + +### 执行安装 + +执行前置脚本准备好openGauss安装环境之后,按照启动安装过程部署openGauss。 + +**前提条件** + +- 已成功执行前置脚本gs_preinstall。 +- 所有服务器操作系统和网络均正常运行。 +- 用户需确保各个主机上的locale保持一致。 + +**操作步骤** + +1.(可选)检查安装包和openGauss配置文件在规划路径下是否已存在,如果没有,重新执行预安装,确保预安装成功,再执行以下步骤。 + +2.登录到openGauss的主机,并切换到omm用户。 + + ``` +su - omm + ``` + + > **说明** + > + > - omm为gs_preinstall脚本中-U参数指定的用户。 + > - 以上述omm用户执行gs_install脚本。否则会报执行错误。 + +3.使用gs_install安装openGauss。若为环境变量分离的模式安装的集群需要source环境变量分离文件ENVFILE。 + + ``` +gs_install -X /opt/software/openGauss/clusterconfig.xml + ``` + + /opt/software/openGauss/script/clusterconfig.xml为openGauss配置文件的路径。在执行过程中,用户需根据提示输入数据库的密码,密码具有一定的复杂度,为保证用户正常使用该数据库,请记住输入的数据库密码。 + + 密码复杂度要求: + + - 长度至少8个字符。 + - 不能和用户名、当前密码(ALTER)、当前密码的倒序相同。 + - 以下至少包含三类:大写字母(A - Z)、小写字母(a - z)、数字(0 - 9)、其他字符(仅限~!@#$%^&*()-_=+\|[{}];:,<.>/?)。 + +4.安装执行成功之后,需要手动删除主机root用户的互信,即删除openGauss数据库各节点上的互信文件。 + + ``` +rm -rf ~/.ssh + ``` + +### 卸载openGauss + +卸载openGauss的过程包括卸载openGauss和清理openGauss服务器环境。 + +#### **执行卸载** + +openGauss提供了卸载脚本,帮助用户卸载openGauss。 + +**操作步骤** + +1.以操作系统用户omm登录数据库主节点。 + +2.使用gs_uninstall卸载openGauss。 + + ``` +gs_uninstall --delete-data + ``` + + 或者在openGauss中每个节点执行本地卸载。 + + ``` +gs_uninstall --delete-data -L + ``` + +#### **一键式环境清理** + +在openGauss卸载完成后,如果不需要在环境上重新部署openGauss,可以运行脚本gs_postuninstall对openGauss服务器上环境信息做清理。openGauss环境清理是对环境准备脚本gs_preinstall所做设置的清理。 +**前提条件** + +- openGauss卸载执行成功。 +- root用户互信可用。 +- 只能使用root用户执行gs_postuninstall命令。 + +**操作步骤** + +1.以root用户登录openGauss服务器。 + +2.查看互信是否建成功,可以互相执行**ssh 主机名**。输入exit退出。 + + ``` + plat1:~ # ssh plat2 + Last login: Tue Jan 5 10:28:18 2016 from plat1 + plat2:~ # exit + logout + Connection to plat2 closed. + plat1:~ # + ``` + +3.进入script路径下。 + + ``` + cd /opt/software/openGauss/script + ``` + +4.使用gs_postuninstall进行清理。若为环境变量分离的模式安装的集群需要source环境变量分离文件ENVFILE。 + + ``` + ./gs_postuninstall -U omm -X /opt/software/openGauss/clusterconfig.xml --delete-user --delete-group + ``` + + 或者在openGauss中每个节点执行本地后置清理。 + + ``` + ./gs_postuninstall -U omm -X /opt/software/openGauss/clusterconfig.xml --delete-user --delete-group -L + ``` + + omm为运行openGauss的操作系统用户名,/opt/software/openGauss/clusterconfig.xml为openGauss配置文件路径。 + +若为环境变量分离的模式安装的集群需删除之前source的环境变量分离的env参数unset MPPDB_ENV_SEPARATE_PATH + +5.删除各openGauss数据库节点root用户互信。 + + +## 编译 + +### 概述 + +编译openGauss需要openGauss-server和binarylibs两个组件。 + +- openGauss-server:openGauss的主要代码。可以从开源社区获取。 + +- binarylibs:openGauss依赖的第三方开源软件,你可以直接编译openGauss-third_party代码获取,也可以从开源社区下载已经编译好的并上传的一个副本。 + +对于数据库、三方库、GCC的编译以及常见问题,参照博客[openGauss数据库编译指导](https://opengauss.org/zh/blogs/blogs.html?post/xingchen/opengauss_compile/) + +在编译openGauss之前,请检查操作系统和软件依赖要求。 + +openGauss可以通过一键式shell工具build.sh进行编译,也可以通过命令进行编译。安装包由build.sh生成。 + +### 操作系统和软件依赖要求 + +openGauss支持以下操作系统: + +- CentOS 7.6(x86架构) + +- openEuler-20.03-LTS(aarch64架构) + +适配其他系统,参照博客[openGauss数据库编译指导](https://opengauss.org/zh/blogs/blogs.html?post/xingchen/opengauss_compile/) + +以下表格列举了编译openGauss的软件要求。 + +建议使用从列出的操作系统安装盘或安装源中获取的以下依赖软件的默认安装包进行安装。如果不存在以下软件,请参考推荐的软件版本。 + +软件依赖要求如下: + +| 软件 | 推荐版本 | +| ------------- | --------------- | +| libaio-devel | 0.3.109-13 | +| flex | 2.5.31及以上版本 | +| bison | 2.7-4 | +| ncurses-devel | 5.9-13.20130511 | +| glibc-devel | 2.17-111 | +| patch | 2.7.1-10 | +| lsb_release | 4.1 | +| readline-devel| 7.0-13 | + +### 下载openGauss + +可以从开源社区下载openGauss-server和openGauss-third_party。 + +https://opengauss.org/zh/ + +可以通过以下网站获取编译好的binarylibs。下载后请解压缩并重命名为**binarylibs**。 + +https://opengauss.obs.cn-south-1.myhuaweicloud.com/2.0.0/openGauss-third_party_binarylibs.tar.gz + + +现在我们已经拥有完整的openGauss代码,把它存储在以下目录中(以sda为例)。 + +- /sda/openGauss-server +- /sda/binarylibs +- /sda/openGauss-third_party + +### 编译第三方软件 + +在编译openGauss之前,需要先编译openGauss依赖的开源及第三方软件。这些开源及第三方软件存储在openGauss-third_party代码仓库中,通常只需要构建一次。如果开源软件有更新,需要重新构建软件。 + +用户也可以直接从**binarylibs**库中获取开源软件编译和构建的输出文件。 + +如果你想自己编译第三方软件,请到openGauss-third_party仓库查看详情。 + +执行完上述脚本后,最终编译和构建的结果保存在与**openGauss-third_party**同级的**binarylibs**目录下。在编译**openGauss-server**时会用到这些文件。 + +### 代码编译 + +##### 使用build.sh编译代码 + +openGauss-server中的build.sh是编译过程中的重要脚本工具。该工具集成了软件安装编译和产品安装包编译功能,可快速进行代码编译和打包。。 + +参数说明请见以下表格。 + +| 选项 | 缺省值 | 参数 | 说明 | +| :---- | :--------------------------- | :------------------------------------- | :------------------------------------------------ | +| -h | 请勿使用此选项。 | - | 帮助菜单。 | +| -m | release | [debug | release | memcheck] | 选择目标版本。 | +| -3rd | ${Code directory}/binarylibs | [binarylibs path] | 指定binarylibs路径。该路径必须是绝对路径。 | +| -pkg | 请勿使用此选项。 | - | 将代码编译结果压缩至安装包。 | +| -nopt | 请勿使用此选项。 | - | 如果使用此功能,则对鲲鹏平台的相关CPU不进行优化。 | + +> **注意** +> +> - **-m [debug | release | memcheck]**表示有三个目标版本可以选择: +> - **release**:生成release版本的二进制程序。此版本编译时,通过配置GCC高级优化选项,去除内核调试代码。此选项通常在生成环境或性能测试环境中使用。 +> - **debug**:表示生成debug版本的二进制程序。此版本编译时,增加了内核代码调试功能,一般用于开发自测环境。 +> - **memcheck**:表示生成memcheck版本的二进制程序。此版本编译时,在debug版本的基础上增加了ASAN功能,用于定位内存问题。 +> - **-3rd [binarylibs path]**为**binarylibs**的路径。默认设置为当前代码文件夹下存在**binarylibs**,因此如果**binarylibs**被移至**openGauss-server**中,或者在**openGauss-server**中创建了到**binarylibs**的软链接,则不需要指定此参数。但请注意,这样做的话,该文件很容易被**git clean**命令删除。 +> - 该脚本中的每个选项都有一个默认值。选项数量少,依赖简单。因此,该脚本易于使用。如果实际需要的参数值与默认值不同,请根据实际情况配置。 + +现在你已经知晓build.sh的用法,只需使用如下命令即可编译openGauss-server。 + +``` +[user@linux openGauss-server]$ sh build.sh -m [debug | release | memcheck] -3rd [binarylibs path] +``` + +举例: + +``` +[user@linux openGauss-server]$ sh build.sh # 编译安装release版本的openGauss。需代码目录下有binarylibs或者其软链接,否则将会失败。 +[user@linux openGauss-server]$ sh build.sh -m debug -3rd /sda/binarylibs # 编译安装debug版本的openGauss +``` + +编译后的软件安装路径为:**/sda/openGauss-server/dest** + +编译后的二进制文件路径为:**/sda/openGauss-server/dest/bin** + +编译日志: **make_compile.log** + + + +##### 使用命令编译代码 + +1.执行以下脚本获取系统版本号: + + ``` + [user@linux openGauss-server]$ sh src/get_PlatForm_str.sh + ``` + + > **注意** + > + > - 命令回显信息即为openGauss支持的操作系统。目前openGauss支持的操作系统为centos7.6_x86_64和openeuler_aarch64。 + > - 如果显示**Failed**或其他版本,表示openGauss不支持当前操作系统。 + +2.配置环境变量,根据代码下载位置添加**____**,并将***替换为上一步的结果。 + + ``` + export CODE_BASE=________ # Path of the openGauss-server file + export BINARYLIBS=________ # Path of the binarylibs file + export GAUSSHOME=$CODE_BASE/dest/ + export GCC_PATH=$BINARYLIBS/buildtools/***/gcc7.3/ + export CC=$GCC_PATH/gcc/bin/gcc + export CXX=$GCC_PATH/gcc/bin/g++ + export LD_LIBRARY_PATH=$GAUSSHOME/lib:$GCC_PATH/gcc/lib64:$GCC_PATH/isl/lib:$GCC_PATH/mpc/lib/:$GCC_PATH/mpfr/lib/:$GCC_PATH/gmp/lib/:$LD_LIBRARY_PATH + export PATH=$GAUSSHOME/bin:$GCC_PATH/gcc/bin:$PATH + + ``` + + 例如,在CENTOS X86-64平台上,binarylibs目录被作为openGauss-server目录的兄弟目录。 + 在openGauss-server目录下执行以下命令。 + + ``` + export CODE_BASE=`pwd` + export BINARYLIBS=`pwd`/../binarylibs + export GAUSSHOME=$CODE_BASE/dest/ + export GCC_PATH=$BINARYLIBS/buildtools/centos7.6_x86_64/gcc7.3/ + export CC=$GCC_PATH/gcc/bin/gcc + export CXX=$GCC_PATH/gcc/bin/g++ + export LD_LIBRARY_PATH=$GAUSSHOME/lib:$GCC_PATH/gcc/lib64:$GCC_PATH/isl/lib:$GCC_PATH/mpc/lib/:$GCC_PATH/mpfr/lib/:$GCC_PATH/gmp/lib/:$LD_LIBRARY_PATH + export PATH=$GAUSSHOME/bin:$GCC_PATH/gcc/bin:$PATH + + ``` +3.选择一个版本进行配置。 + + **debug**版本: + + ``` + ./configure --gcc-version=7.3.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --with-readline --without-zlib + ``` + + **release**版本: + + ``` + ./configure --gcc-version=7.3.0 CC=g++ CFLAGS="-O2 -g3" --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-thread-safety --with-readline --without-zlib + ``` + + **memcheck**版本: + + ``` + ./configure --gcc-version=7.3.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --with-readline --without-zlib --enable-memory-check + ``` + + > **注意** + > + > - *[debug | release | memcheck]*表示有三个目标版本可用。 + > - 在ARM平台上,需要把**-D__USE_NUMA**添加至**CFLAGS**中。 + > - 在**ARMv8.1**及以上平台(如鲲鹏920),需要把**-D__ARM_LSE**添加至**CFLAGS**中。 + > - 如果**binarylibs**被移至**openGauss-server**中,或者在**openGauss-server**中创建了到**binarylibs**的软链接,则不需要指定**--3rd**参数。但请注意,这样做的话,该文件很容易被`git clean`命令删除。 + +4.执行以下命令编译openGauss: + + ``` + [user@linux openGauss-server]$ make -sj + [user@linux openGauss-server]$ make install -sj + ``` + +5.显示如下信息,表示编译和安装成功。 + + ``` + openGauss installation complete. + ``` + +- 编译后的软件安装路径为**$GAUSSHOME**。 + +- 编译后的二进制文件存放路径为:**$GAUSSHOME/bin**。 + + + + +### 编译安装包 + +请先阅读[使用build.sh编译](#使用build.sh编译)章节,了解build.sh的用法,以及如何使用该脚本编译openGauss。 + +现在,只需添加一个-pkg选项,就可以编译安装包。 + +``` +[user@linux openGauss-server]$ sh build.sh -m [debug | release | memcheck] -3rd [binarylibs path] -pkg +``` + +举例: + +``` +sh build.sh -pkg # 生成release版本的openGauss安装包。需代码目录下有binarylibs或者其软链接,否则将会失败。 +sh build.sh -m debug -3rd /sdc/binarylibs -pkg # 生成debug版本的openGauss安装包 +``` + +- 生成的安装包存放目录:**./package**。 + +- 编译日志: **make_compile.log** + +- 安装包打包日志: **./package/make_package.log** + + +## 快速入门 + +参考[快速入门](https://opengauss.org/zh/docs/2.0.0/docs/Quickstart/Quickstart.html)。 + +## 文档 + +更多安装指南、教程和API请参考[用户文档](https://gitee.com/opengauss/docs)。 + +## 社区 + +### 治理 + +查看openGauss是如何实现开放[治理](https://gitee.com/opengauss/community/blob/master/governance.md)。 + +### 交流 + +- WeLink:开发者的交流平台。 +- IRC频道:`#opengauss-meeting`(仅用于会议纪要)。 +- 邮件列表:https://opengauss.org/zh/community/onlineCommunication.html + +## 贡献 + +欢迎大家来参与贡献。详情请参阅我们的[社区贡献](https://opengauss.org/zh/contribution.html)。 + +## 发行说明 + +请参见[发行说明](https://opengauss.org/zh/docs/2.0.0/docs/Releasenotes/Releasenotes.html)。 + +## 许可证 + +[MulanPSL-2.0](http://license.coscl.org.cn/MulanPSL2/) diff --git a/Tools/open_assistants/opengauss_fileset b/Tools/open_assistants/opengauss_fileset deleted file mode 100644 index 2e8c5d033..000000000 --- a/Tools/open_assistants/opengauss_fileset +++ /dev/null @@ -1,62 +0,0 @@ -[overwrite] -CMakeLists.txt -License -GNUmakefile.in -Makefile -README.en.md -README.md -Third_Party_Open_Source_Software_Notice -aclocal.m4 -build -build.sh -cmake -config -configure -contrib -doc -docker -escan.txt -package -simpleInstall -src/DEVELOPERS -src/Makefile -src/Makefile.global.in -src/Makefile.shlib -src/bcc32.mak -src/bin -src/common -src/gausskernel -src/get_PlatForm_str.sh -src/include -src/lib -src/makefiles -src/manager -src/mtlocal.pl -src/nls-global.mk -src/test -src/tools -src/win32.mak -Tools/memory_check -[delete] -third_party -contrib/secbox -contrib/carbondata -contrib/gtmtester -src/bin/gds -src/bin/pg_redis -src/include/ssl/openssl_etcd.cnf -src/test/regress/jar -src/test/regress/krbclient -src/test/regress/obstools -src/tools/casedb -build/script/mpp_release_list_centos -build/script/mpp_release_list_centos_aarch64 -build/script/mpp_release_list_centos_single -build/script/mpp_release_list_euleros -build/script/mpp_release_list_euleros_aarch64 -build/script/mpp_release_list_euleros_aarch64_single -build/script/mpp_release_list_euleros_single -build/script/mpp_release_list_linux_x86_64 -build/script/mpp_release_list_openeuler_aarch64 -build/script/mpp_release_list_openeuler_aarch64_single -build/script/mpp_release_list_kylin_aarch64 diff --git a/Tools/open_assistants/port_openGauss.pl b/Tools/open_assistants/port_openGauss.pl deleted file mode 100644 index dfa10208b..000000000 --- a/Tools/open_assistants/port_openGauss.pl +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/perl - -use strict; -use warnings; -use File::Basename; -use File::Path qw(make_path remove_tree); -use Cwd; - -my $gausskernel_dir = $ARGV[0]; -my $opengauss_dir = $ARGV[1]; - -sub usage -{ - print " usage:\n"; - print " perl port_openGauss.pl GaussDBKernel-server-directory openGauss-server-directory\n"; - print " \n"; -} - -sub valid_line -{ - my ($l) = @_; - $l =~ s/^\s+//g; - $l =~ s/\s+$//g; - return 1 if $l; - return 0; -} - -sub prepare_parentdir -{ - my $dir = $_[0]; - $dir =~ s/\/*$//g; - die "there is no such a directory $dir" unless $dir; - - my $parentdir = dirname($dir); - make_path $parentdir unless -d $parentdir; -} - -if ( !$opengauss_dir || !$gausskernel_dir || $gausskernel_dir eq "-h" || $gausskernel_dir eq "--help" ) { - usage(); - exit(-1); -} -if (! -d $opengauss_dir || ! -d $gausskernel_dir ) { - print "ERROR: $opengauss_dir or $gausskernel_dir does not exist!"; -} - -$opengauss_dir =~ s{/+$}{}g; -$gausskernel_dir =~ s{/+$}{}g; - -my $open_assist_dir = dirname(__FILE__); -if ($open_assist_dir !~ m/^\//) { - $open_assist_dir = cwd() . '/' . $open_assist_dir; -} -$open_assist_dir =~ s/\/\.$//; - -my $opengauss_fileset = "$open_assist_dir/opengauss_fileset"; -my @overwrite_fileset; -my @delete_fileset; - -open my $fset, "<", $opengauss_fileset or die "cannot open $opengauss_fileset: $!\n"; -my $file_type = "none"; -while(my $line=<$fset>) { - chomp $line; - if ($line =~ /\[overwrite\]/) { - $file_type = "overwrite"; - next; - } - elsif ($line =~ /\[delete\]/) { - $file_type = "delete"; - next; - } - - if ($file_type eq "overwrite") { - push @overwrite_fileset, $line; - } - elsif ($file_type eq "delete") { - push @delete_fileset, $line; - } -} - -print "[" . localtime() . "] synchronizing directories and files.\n"; -foreach my $d(qw/src contrib/) { - if ( -d "$opengauss_dir/$d" ) { - remove_tree("$opengauss_dir/$d"); - print "removed $opengauss_dir/$d\n"; - } - make_path("$opengauss_dir/$d"); - print "created $opengauss_dir/$d\n"; -} - -foreach my $f(@overwrite_fileset) { - next unless valid_line($f); - if ( -d "$gausskernel_dir/$f") { - prepare_parentdir("$opengauss_dir/$f"); - remove_tree("$opengauss_dir/$f") if -d "$opengauss_dir/$f"; - system("cp -fr $gausskernel_dir/$f $opengauss_dir/$f") == 0 or print "ERROR: copy $gausskernel_dir/$f failed\n"; - print "copied $opengauss_dir/$f\n"; - } - elsif ( -f "$gausskernel_dir/$f") { - system("cp -f $gausskernel_dir/$f $opengauss_dir/$f") == 0 or print "ERROR: copy $gausskernel_dir/$f failed\n"; - print "copied $opengauss_dir/$f\n"; - } -} - -foreach my $f(@delete_fileset) { - next unless valid_line($f); - if ( -d "$opengauss_dir/$f") { - remove_tree("$opengauss_dir/$f"); - print "deleted $opengauss_dir/$f\n"; - } - elsif ( -f "$opengauss_dir/$f") { - unlink "$opengauss_dir/$f"; - print "deleted $opengauss_dir/$f\n"; - } -} - -print "[" . localtime() . "] synchronized directories and files.\n"; diff --git a/build.sh b/build.sh index 69c52b025..d0bea6617 100755 --- a/build.sh +++ b/build.sh @@ -1,5 +1,12 @@ #!/bin/bash - +####################################################################### +# Copyright (c): 2020-2025, Huawei Tech. Co., Ltd. +# descript: Compile and pack openGauss +# Return 0 means OK. +# Return 1 means failed. +# version: 2.0 +# date: 2020-08-08 +####################################################################### declare build_version_mode='release' declare build_binarylib_dir='None' declare wrap_binaries='NO' @@ -74,10 +81,10 @@ ROOT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) echo "ROOT_DIR : $ROOT_DIR" cd build/script chmod a+x build_opengauss.sh -sh build_opengauss.sh -m ${build_version_mode} -3rd ${build_binarylib_dir} ${not_optimized} -pkg server -mc off +./build_opengauss.sh -m ${build_version_mode} -3rd ${build_binarylib_dir} ${not_optimized} -pkg server if [ "${wrap_binaries}"X = "YES"X ] then - chmod a+x build_opengauss.sh - sh package_opengauss.sh -3rd ${build_binarylib_dir} -m ${build_version_mode} -f ${config_file} + chmod a+x package_opengauss.sh + ./package_opengauss.sh -3rd ${build_binarylib_dir} -m ${build_version_mode} -f ${config_file} fi exit 0 diff --git a/build/script/opengauss_release_list_centos_single b/build/script/aarch64_lite_list similarity index 66% rename from build/script/opengauss_release_list_centos_single rename to build/script/aarch64_lite_list index 53d3efad1..b5bbfe064 100644 --- a/build/script/opengauss_release_list_centos_single +++ b/build/script/aarch64_lite_list @@ -2,47 +2,20 @@ ./bin/gsql ./bin/gaussdb ./bin/gstrace -./bin/gs_basebackup -./bin/gs_probackup -./bin/gs_tar -./bin/gs_encrypt ./bin/gs_dump ./bin/gs_dumpall ./bin/gs_ctl ./bin/gs_initdb ./bin/gs_guc -./bin/encrypt -./bin/openssl ./bin/gs_restore -./bin/gs_cgroup -./bin/openssl ./bin/pg_config ./bin/pg_controldata -./bin/pg_format_cu ./bin/pg_resetxlog -./bin/pg_recvlogical ./bin/alarmItem.conf ./bin/retry_errcodes.conf ./bin/cluster_guc.conf -./bin/bind_net_irq.sh -./bin/setArmOptimization.sh -./bin/krb5kdc -./bin/klist -./bin/kinit -./bin/kdestroy -./bin/kdb5_util -./bin/kadmin.local ./bin/lz4 -./bin/kadmind -./bin/dbmind -./bin/server.key.cipher -./bin/server.key.rand ./bin/gs_plan_simulator.sh -./etc/kerberos/kadm5.acl -./etc/kerberos/kdc.conf -./etc/kerberos/krb5.conf -./etc/kerberos/mppdb-site.xml -./share/postgresql/tmp/udstools.py ./share/postgresql/db4ai ./share/postgresql/snowball_create.sql ./share/postgresql/pg_hba.conf.sample @@ -54,7 +27,6 @@ ./share/postgresql/pg_ident.conf.sample ./share/postgresql/postgres.description ./share/postgresql/postgresql.conf.sample -./share/postgresql/mot.conf.sample ./share/postgresql/extension/plpgsql--1.0.sql ./share/postgresql/extension/hstore.control ./share/postgresql/extension/security_plugin.control @@ -72,8 +44,6 @@ ./share/postgresql/extension/hdfs_fdw.control ./share/postgresql/extension/log_fdw--1.0.sql ./share/postgresql/extension/log_fdw.control -./share/postgresql/extension/mot_fdw--1.0.sql -./share/postgresql/extension/mot_fdw.control ./share/postgresql/extension/postgres_fdw--1.0.sql ./share/postgresql/extension/postgres_fdw.control ./share/postgresql/timezone/GB-Eire @@ -282,7 +252,6 @@ ./share/postgresql/timezone/Canada/Newfoundland ./share/postgresql/timezone/Canada/Saskatchewan ./share/postgresql/timezone/Canada/Pacific -./share/postgresql/timezone/Canada/East-Saskatchewan ./share/postgresql/timezone/Canada/Mountain ./share/postgresql/timezone/Canada/Central ./share/postgresql/timezone/CST6CDT @@ -664,7 +633,6 @@ ./share/postgresql/timezone/Navajo ./share/postgresql/timezone/GMT ./share/postgresql/system_views.sql -./share/postgresql/private_system_views.sql ./share/postgresql/performance_views.sql ./share/postgresql/sql_features.txt ./share/postgresql/pg_cast_oid.txt @@ -703,11 +671,45 @@ ./share/postgresql/timezonesets/Default ./share/postgresql/timezonesets/Etc.txt ./share/postgresql/postgres.bki -./share/llvmir/GaussDB_expr.ir ./share/sslcert/gsql/openssl.cnf ./share/sslcert/grpc/openssl.cnf -./share/sslcert/om/openssl.cnf -./lib/libsimsearch/ +./lib/libnuma.so +./lib/libnuma.so.1 +./lib/libnuma.so.1.0.0 +./lib/libpq.so +./lib/libpq.so.5 +./lib/libpq.so.5.5 +./lib/libssl.so +./lib/libssl.so.1.1 +./lib/libcrypto.so +./lib/libcrypto.so.1.1 +./lib/libcgroup.so +./lib/libcgroup.so.1 +./lib/libz.so +./lib/libz.so.1 +./lib/libz.so.1.2.11 +./lib/liblz4.so +./lib/liblz4.so.1 +./lib/liblz4.so.1.9.2 +./lib/libcjson.so +./lib/libcjson.so.1 +./lib/libcjson.so.1.7.13 +./lib/libcjson_utils.so +./lib/libcjson_utils.so.1 +./lib/libcjson_utils.so.1.7.13 +./lib/libstdc++.so.6 +./lib/libgcc_s.so.1 +./lib/libgomp.so +./lib/libgomp.so.1 +./lib/libgomp.so.1.0.0 +./lib/libdcf.so +./lib/libzstd.so +./lib/libzstd.so.1 +./lib/libzstd.so.1.5.0 +./lib/libcurl.so +./lib/libcurl.so.4 +./lib/libcurl.so.4.6.0 +./lib/libxgboost.so ./lib/postgresql/latin2_and_win1250.so ./lib/postgresql/euc2004_sjis2004.so ./lib/postgresql/euc_kr_and_mic.so @@ -716,12 +718,6 @@ ./lib/postgresql/cyrillic_and_mic.so ./lib/postgresql/utf8_and_johab.so ./lib/postgresql/utf8_and_gb18030.so -./lib/postgresql/pgxs/src/makefiles/pgxs.mk -./lib/postgresql/pgxs/src/Makefile.shlib -./lib/postgresql/pgxs/src/Makefile.port -./lib/postgresql/pgxs/src/nls-global.mk -./lib/postgresql/pgxs/src/Makefile.global -./lib/postgresql/pgxs/src/get_PlatForm_str.sh ./lib/postgresql/pgxs/config/install-sh ./lib/postgresql/euc_cn_and_mic.so ./lib/postgresql/latin_and_mic.so @@ -747,142 +743,9 @@ ./lib/postgresql/pg_plugin ./lib/postgresql/proc_srclib ./lib/postgresql/security_plugin.so -./lib/postgresql/pg_upgrade_support.so -./lib/postgresql/java/pljava.jar -./lib/postgresql/postgres_fdw.so -./lib/postgresql/pgoutput.so -./lib/libpljava.so -./lib/libpq.a -./lib/libpq.so -./lib/libpq.so.5 -./lib/libpq.so.5.5 -./lib/libpq_ce.so -./lib/libpq_ce.so.5 -./lib/libpq_ce.so.5.5 -./lib/libgauss_cl_jni.so -./lib/libcgroup.so -./lib/libcgroup.so.1 -./lib/libcom_err_gauss.so -./lib/libcom_err_gauss.so.3 -./lib/libcom_err_gauss.so.3.0 -./lib/libatomic.so -./lib/libatomic.so.1 -./lib/libatomic.so.1.2.0 -./lib/libmasstree.so -./lib/libupb.so -./lib/libupb.so.9 -./lib/libupb.so.9.0.0 -./lib/libabsl_str_format_internal.so -./lib/libabsl_strings.so -./lib/libabsl_throw_delegate.so -./lib/libabsl_strings_internal.so -./lib/libabsl_base.so -./lib/libabsl_dynamic_annotations.so -./lib/libabsl_spinlock_wait.so -./lib/libabsl_int128.so -./lib/libabsl_bad_optional_access.so -./lib/libabsl_raw_logging_internal.so -./lib/libabsl_log_severity.so -./lib/libaddress_sorting.so -./lib/libaddress_sorting.so.9 -./lib/libgssapi_krb5_gauss.so -./lib/libgssapi_krb5_gauss.so.2 -./lib/libgssapi_krb5_gauss.so.2.2 -./lib/libgssrpc_gauss.so -./lib/libgssrpc_gauss.so.4 -./lib/libgssrpc_gauss.so.4.2 -./lib/libk5crypto_gauss.so -./lib/libk5crypto_gauss.so.3 -./lib/libk5crypto_gauss.so.3.1 -./lib/libkadm5clnt.so -./lib/libkadm5clnt_mit.so -./lib/libkadm5clnt_mit.so.11 -./lib/libkadm5clnt_mit.so.11.0 -./lib/libkadm5clnt_mit.so.12 -./lib/libkadm5clnt_mit.so.12.0 -./lib/libkadm5srv.so -./lib/libkadm5srv_mit.so -./lib/libkadm5srv_mit.so.11 -./lib/libkadm5srv_mit.so.11.0 -./lib/libkadm5srv_mit.so.12 -./lib/libkadm5srv_mit.so.12.0 -./lib/libkdb5.so -./lib/libkdb5.so.9 -./lib/libkdb5.so.9.0 -./lib/libkdb5.so.10 -./lib/libkdb5.so.10.0 -./lib/libkrad.so -./lib/libkrad.so.0 -./lib/libkrad.so.0.0 -./lib/libkrb5_gauss.so -./lib/libkrb5_gauss.so.3 -./lib/libkrb5_gauss.so.3.3 -./lib/libkrb5support_gauss.so -./lib/libkrb5support_gauss.so.0 -./lib/libkrb5support_gauss.so.0.1 -./lib/krb5/plugins/kdb/db2.so -./lib/libverto.so -./lib/libverto.so.0 -./lib/libverto.so.0.0 -./lib/libcurl.so -./lib/libcurl.so.4 -./lib/libcurl.so.4.6.0 -./lib/libcrypto.so -./lib/libcrypto.so.1.1 -./lib/libssl.so -./lib/libssl.so.1.1 -./lib/libgcc_s.so.1 -./lib/libstdc++.so.6 -./lib/libz.so -./lib/libz.so.1 -./lib/libz.so.1.2.11 -./lib/liblz4.so -./lib/liblz4.so.1 -./lib/liblz4.so.1.9.2 -./lib/libcjson.so -./lib/libcjson.so.1 -./lib/libcjson.so.1.7.13 -./lib/libconfig.so -./lib/libconfig.so.4 -./lib/libpgport_tool.so -./lib/libpgport_tool.so.1 -./share/llvmir/GaussDB_expr.ir -./lib/libeSDKLogAPI.so -./lib/libeSDKOBS.so -./lib/liblog4cpp.so -./lib/liblog4cpp.so.5 -./lib/liblog4cpp.so.5.0.6 -./lib/libcharset.so -./lib/libcharset.so.1 -./lib/libcharset.so.1.0.0 -./lib/libiconv.so -./lib/libiconv.so.2 -./lib/libiconv.so.2.6.1 -./lib/libnghttp2.so -./lib/libnghttp2.so.14 -./lib/libnghttp2.so.14.20.0 -./lib/libpcre.so -./lib/libpcre.so.1 -./lib/libpcre.so.1.2.12 -./lib/libsecurec.so -./lib/libxml2.so -./lib/libxml2.so.2 -./lib/libxml2.so.2.9.9 -./lib/libparquet.so -./lib/libparquet.so.14 -./lib/libparquet.so.14.1.0 -./lib/libarrow.so -./lib/libarrow.so.14 -./lib/libarrow.so.14.1.0 -./lib/OBS.ini ./lib/postgresql/latin2_and_win1250.so ./lib/postgresql/euc2004_sjis2004.so -./lib/libdcf.so -./lib/libzstd.so -./lib/libzstd.so.1 -./lib/libzstd.so.1.4.4 - - +./lib/libxgboost.so ./include/postgresql/server/postgres_ext.h ./include/postgresql/server/pg_config_os.h ./include/postgresql/server/pgtime.h @@ -1012,6 +875,7 @@ ./include/postgresql/server/storage/backendid.h ./include/postgresql/server/storage/lock/lock.h ./include/postgresql/server/storage/lock/lwlock.h +./include/postgresql/server/storage/lwlocknames.h ./include/postgresql/server/storage/barrier.h ./include/postgresql/server/storage/shmem.h ./include/postgresql/server/pg_config.h @@ -1035,418 +899,14 @@ ./include/postgresql/server/lib/ilist.h ./include/postgresql/server/pgxc/locator.h ./include/postgresql/server/gstrace/gstrace_infra.h -./include/postgresql/server/extension_dependency.h -./include/postgresql/server/libpq/libpq-fe.h -./include/postgresql/server/access/clog.h -./include/postgresql/server/storage/proc.h -./include/postgresql/server/access/xlog.h -./include/postgresql/server/storage/lwlocknames.h -./include/postgresql/server/access/xloginsert.h -./include/postgresql/server/catalog/pg_control.h -./include/postgresql/server/access/parallel_recovery/redo_item.h -./include/postgresql/server/access/parallel_recovery/posix_semaphore.h -./include/postgresql/server/replication/replicainternal.h -./include/postgresql/server/knl/knl_instance.h -./include/postgresql/server/knl/knl_guc.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_sql.h -./include/postgresql/server/knl/knl_guc/knl_guc_common.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_sql.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_storage.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_storage.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_storage.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_security.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_security.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_network.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_network.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_memory.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_memory.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_resource.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_resource.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_common.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_common.h -./include/postgresql/server/lib/circularqueue.h -./include/postgresql/server/access/double_write_basic.h -./include/postgresql/server/knl/knl_thread.h -./include/postgresql/server/access/sdir.h -./include/postgresql/server/gssignal/gs_signal.h -./include/postgresql/server/knl/knl_session.h -./include/postgresql/server/libpq/pqcomm.h -./include/postgresql/server/cipher.h -./include/postgresql/server/portability/instr_time.h -./include/postgresql/server/utils/memgroup.h -./include/postgresql/server/storage/latch.h -./include/postgresql/server/workload/qnode.h -./include/postgresql/server/streaming/init.h -./include/postgresql/server/streaming/launcher.h -./include/postgresql/server/pgxc/barrier.h -./include/postgresql/server/libcomm/libcomm.h -./include/postgresql/server/hotpatch/hotpatch.h -./include/postgresql/server/hotpatch/hotpatch_backend.h -./include/postgresql/server/postmaster/bgwriter.h -./include/postgresql/server/postmaster/pagewriter.h -./include/postgresql/server/replication/heartbeat.h -./include/postgresql/server/access/multi_redo_settings.h -./include/postgresql/server/access/redo_statistic_msg.h -./include/postgresql/server/replication/rto_statistic.h -./include/postgresql/server/replication/walprotocol.h -./include/postgresql/server/storage/mot/jit_def.h -./include/postgresql/server/threadpool/threadpool.h -./include/postgresql/server/threadpool/threadpool_controler.h -./include/postgresql/server/threadpool/threadpool_group.h -./include/postgresql/server/knl/knl_variable.h -./include/postgresql/server/threadpool/threadpool_listener.h -./include/postgresql/server/threadpool/threadpool_sessctl.h -./include/postgresql/server/storage/procsignal.h -./include/postgresql/server/threadpool/threadpool_worker.h -./include/postgresql/server/threadpool/threadpool_scheduler.h -./include/postgresql/server/threadpool/threadpool_stream.h -./include/postgresql/server/replication/dataqueuedefs.h -./include/postgresql/server/gtm/gtm_c.h -./include/postgresql/server/cm/etcdapi.h -./include/postgresql/server/alarm/alarm.h -./include/postgresql/server/access/xact.h -./include/postgresql/server/access/cstore_am.h -./include/postgresql/server/access/cstore_roughcheck_func.h -./include/postgresql/server/access/cstoreskey.h -./include/postgresql/server/storage/cu.h -./include/postgresql/server/vecexecutor/vectorbatch.h -./include/postgresql/server/cstore.h -./include/postgresql/server/storage/cstore/cstore_mem_alloc.h -./include/postgresql/server/access/cstore_minmax_func.h -./include/postgresql/server/storage/custorage.h -./include/postgresql/server/storage/fd.h -./include/postgresql/server/postmaster/aiocompleter.h -./include/postgresql/server/storage/buf/bufmgr.h -./include/postgresql/server/storage/buf/buf_internals.h -./include/postgresql/server/storage/smgr.h -./include/postgresql/server/catalog/pg_am.h -./include/postgresql/server/catalog/pg_class.h -./include/postgresql/server/catalog/pg_index.h -./include/postgresql/server/rewrite/prs2lock.h -./include/postgresql/server/tcop/stmt_retry.h -./include/postgresql/server/catalog/pg_hashbucket_fn.h -./include/postgresql/server/utils/rel_gs.h -./include/postgresql/server/catalog/pg_partition.h -./include/postgresql/server/catalog/pg_hashbucket.h -./include/postgresql/server/catalog/catalog.h -./include/postgresql/server/catalog/catversion.h -./include/postgresql/server/catalog/pg_namespace.h -./include/postgresql/server/utils/partitionmap_gs.h -./include/postgresql/server/access/heapam.h -./include/postgresql/server/storage/pagecompress.h -./include/postgresql/server/replication/bcm.h -./include/postgresql/server/storage/cstore/cstorealloc.h -./include/postgresql/server/storage/cucache_mgr.h -./include/postgresql/server/storage/cache_mgr.h -./include/postgresql/server/nodes/plannodes.h -./include/postgresql/server/foreign/foreign.h -./include/postgresql/server/access/obs/obs_am.h -./include/postgresql/server/storage/buf/buffile.h -./include/postgresql/server/replication/slot.h -./include/postgresql/server/access/obs/eSDKOBS.h -./include/postgresql/server/commands/defrem.h -./include/postgresql/server/optimizer/pruning.h -./include/postgresql/server/nodes/relation.h -./include/postgresql/server/optimizer/bucketinfo.h -./include/postgresql/server/pgxc/nodemgr.h -./include/postgresql/server/bulkload/dist_fdw.h -./include/postgresql/server/bulkload/importerror.h -./include/postgresql/server/commands/gds_stream.h -./include/postgresql/server/bulkload/utils.h -./include/postgresql/server/cjson/cJSON.h -./include/postgresql/server/ssl/gs_openssl_client.h -./include/postgresql/server/funcapi.h -./include/postgresql/server/executor/executor.h -./include/postgresql/server/executor/execdesc.h -./include/postgresql/server/nodes/execnodes.h -./include/postgresql/server/access/genam.h -./include/postgresql/server/nodes/tidbitmap.h -./include/postgresql/server/access/relscan.h -./include/postgresql/server/access/itup.h -./include/postgresql/server/executor/instrument.h -./include/postgresql/server/miscadmin.h -./include/postgresql/server/libpq/libpq-be.h -./include/postgresql/server/libpq/hba.h -./include/postgresql/server/libpq/sha2.h -./include/postgresql/server/utils/anls_opt.h -./include/postgresql/server/pgxc/pgxc.h -./include/postgresql/server/catalog/namespace.h -./include/postgresql/server/commands/trigger.h -./include/postgresql/server/executor/spi.h -./include/postgresql/server/access/ustore/undo/knl_uundotype.h -./include/postgresql/server/access/ustore/knl_uheap.h -./include/postgresql/server/access/ustore/knl_utuple.h -./include/postgresql/server/access/ustore/knl_utype.h -./include/postgresql/server/access/ustore/knl_upage.h -./include/postgresql/server/access/ustore/knl_uredo.h -./include/postgresql/server/access/ustore/knl_uundovec.h -./include/postgresql/server/access/ustore/knl_uundorecord.h -./include/postgresql/server/access/ustore/undo/knl_uundoxlog.h -./include/postgresql/server/access/ustore/undo/knl_uundotxn.h -./include/postgresql/server/access/ustore/undo/knl_uundozone.h -./include/postgresql/server/access/ustore/undo/knl_uundospace.h -./include/postgresql/server/communication/commproxy_basic.h -./include/postgresql/server/access/parallel_recovery/page_redo.h -./include/postgresql/server/access/parallel_recovery/spsc_blocking_queue.h -./include/postgresql/server/executor/exec/execdesc.h -./include/postgresql/server/db4ai/matrix.h -./include/postgresql/server/db4ai/scores.h -./jre/ASSEMBLY_EXCEPTION -./jre/bin/java -./jre/bin/jjs -./jre/bin/keytool -./jre/bin/orbd -./jre/bin/pack200 -./jre/bin/policytool -./jre/bin/rmid -./jre/bin/rmiregistry -./jre/bin/servertool -./jre/bin/tnameserv -./jre/bin/unpack200 -./jre/lib/amd64/jli/libjli.so -./jre/lib/amd64/jvm.cfg -./jre/lib/amd64/libattach.so -./jre/lib/amd64/libavplugin-ffmpeg-58.so -./jre/lib/amd64/libawt_headless.so -./jre/lib/amd64/libawt.so -./jre/lib/amd64/libawt_xawt.so -./jre/lib/amd64/libdecora_sse.so -./jre/lib/amd64/libdt_socket.so -./jre/lib/amd64/libfontmanager.so -./jre/lib/amd64/libfxplugins.so -./jre/lib/amd64/libglassgtk2.so -./jre/lib/amd64/libglassgtk3.so -./jre/lib/amd64/libglass.so -./jre/lib/amd64/libgstreamer-lite.so -./jre/lib/amd64/libhprof.so -./jre/lib/amd64/libinstrument.so -./jre/lib/amd64/libj2gss.so -./jre/lib/amd64/libj2pcsc.so -./jre/lib/amd64/libj2pkcs11.so -./jre/lib/amd64/libjaas_unix.so -./jre/lib/amd64/libjava_crw_demo.so -./jre/lib/amd64/libjavafx_font_freetype.so -./jre/lib/amd64/libjavafx_font_pango.so -./jre/lib/amd64/libjavafx_font.so -./jre/lib/amd64/libjavafx_iio.so -./jre/lib/amd64/libjava.so -./jre/lib/amd64/libjawt.so -./jre/lib/amd64/libjdwp.so -./jre/lib/amd64/libjfxmedia.so -./jre/lib/amd64/libjfxwebkit.so -./jre/lib/amd64/libjpeg.so -./jre/lib/amd64/libjsdt.so -./jre/lib/amd64/libjsig.so -./jre/lib/amd64/libjsoundalsa.so -./jre/lib/amd64/libjsound.so -./jre/lib/amd64/liblcms.so -./jre/lib/amd64/libmanagement.so -./jre/lib/amd64/libmlib_image.so -./jre/lib/amd64/libnet.so -./jre/lib/amd64/libnio.so -./jre/lib/amd64/libnpt.so -./jre/lib/amd64/libprism_common.so -./jre/lib/amd64/libprism_es2.so -./jre/lib/amd64/libprism_sw.so -./jre/lib/amd64/libsaproc.so -./jre/lib/amd64/libsctp.so -./jre/lib/amd64/libsplashscreen.so -./jre/lib/amd64/libsunec.so -./jre/lib/amd64/libunpack.so -./jre/lib/amd64/libverify.so -./jre/lib/amd64/libzip.so -./jre/lib/amd64/server/libjvm.so -./jre/lib/amd64/server/Xusage.txt -./jre/lib/calendars.properties -./jre/lib/charsets.jar -./jre/lib/classlist -./jre/lib/cmm/CIEXYZ.pf -./jre/lib/cmm/GRAY.pf -./jre/lib/cmm/LINEAR_RGB.pf -./jre/lib/cmm/PYCC.pf -./jre/lib/cmm/sRGB.pf -./jre/lib/content-types.properties -./jre/lib/currency.data -./jre/lib/ext/cldrdata.jar -./jre/lib/ext/dnsns.jar -./jre/lib/ext/jaccess.jar -./jre/lib/ext/jfxrt.jar -./jre/lib/ext/localedata.jar -./jre/lib/ext/meta-index -./jre/lib/ext/nashorn.jar -./jre/lib/ext/sunec.jar -./jre/lib/ext/sunjce_provider.jar -./jre/lib/ext/sunpkcs11.jar -./jre/lib/ext/zipfs.jar -./jre/lib/flavormap.properties -./jre/lib/fontconfig.Euler.properties -./jre/lib/fontconfig.properties -./jre/lib/fontconfig.Ubuntu.properties -./jre/lib/fonts/Roboto-Regular.ttf -./jre/lib/hijrah-config-umalqura.properties -./jre/lib/images/cursors/cursors.properties -./jre/lib/images/cursors/invalid32x32.gif -./jre/lib/images/cursors/motif_CopyDrop32x32.gif -./jre/lib/images/cursors/motif_CopyNoDrop32x32.gif -./jre/lib/images/cursors/motif_LinkDrop32x32.gif -./jre/lib/images/cursors/motif_LinkNoDrop32x32.gif -./jre/lib/images/cursors/motif_MoveDrop32x32.gif -./jre/lib/images/cursors/motif_MoveNoDrop32x32.gif -./jre/lib/javafx-mx.jar -./jre/lib/javafx.properties -./jre/lib/jce.jar -./jre/lib/jexec -./jre/lib/jfr/default.jfc -./jre/lib/jfr.jar -./jre/lib/jfr/profile.jfc -./jre/lib/jfxswt.jar -./jre/lib/jsse.jar -./jre/lib/jvm.hprof.txt -./jre/lib/logging.properties -./jre/lib/management-agent.jar -./jre/lib/management/jmxremote.access -./jre/lib/management/jmxremote.password.template -./jre/lib/management/management.properties -./jre/lib/management/snmp.acl.template -./jre/lib/meta-index -./jre/lib/net.properties -./jre/lib/psfontj2d.properties -./jre/lib/psfont.properties.ja -./jre/lib/resources.jar -./jre/lib/rt.jar -./jre/lib/security/blacklisted.certs -./jre/lib/security/cacerts -./jre/lib/security/java.policy -./jre/lib/security/java.security -./jre/lib/security/policy/limited/local_policy.jar -./jre/lib/security/policy/limited/US_export_policy.jar -./jre/lib/security/policy/unlimited/local_policy.jar -./jre/lib/security/policy/unlimited/US_export_policy.jar -./jre/lib/sound.properties -./jre/lib/tzdb.dat -./jre/LICENSE -./jre/THIRD_PARTY_README -[client] -./bin/gsql -./bin/gs_dump -./bin/gs_dumpall -./bin/gs_restore -./bin/gs_basebackup -./bin/gs_probackup -./lib/postgresql/latin2_and_win1250.so -./lib/postgresql/euc2004_sjis2004.so -./lib/postgresql/euc_kr_and_mic.so -./lib/postgresql/utf8_and_uhc.so -./lib/postgresql/euc_tw_and_big5.so -./lib/postgresql/cyrillic_and_mic.so -./lib/postgresql/utf8_and_johab.so -./lib/postgresql/utf8_and_gb18030.so -./lib/postgresql/pgxs/src/makefiles/pgxs.mk -./lib/postgresql/pgxs/src/Makefile.shlib -./lib/postgresql/pgxs/src/Makefile.port -./lib/postgresql/pgxs/src/nls-global.mk -./lib/postgresql/pgxs/src/Makefile.global -./lib/postgresql/pgxs/config/install-sh -./lib/postgresql/euc_cn_and_mic.so -./lib/postgresql/latin_and_mic.so -./lib/postgresql/utf8_and_sjis2004.so -./lib/postgresql/utf8_and_euc_jp.so -./lib/postgresql/utf8_and_sjis.so -./lib/postgresql/utf8_and_cyrillic.so -./lib/postgresql/utf8_and_euc_kr.so -./lib/postgresql/ascii_and_mic.so -./lib/postgresql/utf8_and_iso8859_1.so -./lib/postgresql/euc_jp_and_sjis.so -./lib/postgresql/dict_snowball.so -./lib/postgresql/utf8_and_ascii.so -./lib/postgresql/utf8_and_euc_tw.so -./lib/postgresql/utf8_and_iso8859.so -./lib/postgresql/utf8_and_win.so -./lib/postgresql/utf8_and_euc_cn.so -./lib/postgresql/utf8_and_gbk.so -./lib/postgresql/utf8_and_euc2004.so -./lib/postgresql/utf8_and_big5.so -./lib/postgresql/java/pljava.jar -./lib/libpljava.so -./lib/libpq.a -./lib/libpq.so -./lib/libpq.so.5 -./lib/libpq.so.5.5 -./lib/libpq_ce.so -./lib/libpq_ce.so.5 -./lib/libpq_ce.so.5.5 -./lib/libgauss_cl_jni.so -./lib/libconfig.so -./lib/libconfig.so.4 -./lib/libcrypto.so -./lib/libcrypto.so.1.1 -./lib/libstdc++.so.6 -./lib/libssl.so -./lib/libssl.so.1.1 -./lib/libpgport_tool.so -./lib/libpgport_tool.so.1 -./lib/libgssapi_krb5_gauss.so -./lib/libgssapi_krb5_gauss.so.2 -./lib/libgssapi_krb5_gauss.so.2.2 -./lib/libgssrpc_gauss.so -./lib/libgssrpc_gauss.so.4 -./lib/libgssrpc_gauss.so.4.2 -./lib/libk5crypto_gauss.so -./lib/libk5crypto_gauss.so.3 -./lib/libk5crypto_gauss.so.3.1 -./lib/libkrb5support_gauss.so -./lib/libkrb5support_gauss.so.0 -./lib/libkrb5support_gauss.so.0.1 -./lib/libkrb5_gauss.so -./lib/libkrb5_gauss.so.3 -./lib/libkrb5_gauss.so.3.3 -./lib/libcom_err_gauss.so -./lib/libcom_err_gauss.so.3 -./lib/libcom_err_gauss.so.3.0 [libpq] -./lib/libpq.a ./lib/libpq.so ./lib/libpq.so.5 ./lib/libpq.so.5.5 -./lib/libpq_ce.so -./lib/libpq_ce.so.5 -./lib/libpq_ce.so.5.5 -./lib/libgauss_cl_jni.so -./lib/libconfig.so -./lib/libconfig.so.4 ./lib/libcrypto.so ./lib/libcrypto.so.1.1 -./lib/libstdc++.so.6 ./lib/libssl.so ./lib/libssl.so.1.1 -./lib/libpgport_tool.so -./lib/libpgport_tool.so.1 -./lib/libgssapi_krb5_gauss.so -./lib/libgssapi_krb5_gauss.so.2 -./lib/libgssapi_krb5_gauss.so.2.2 -./lib/libgssrpc_gauss.so -./lib/libgssrpc_gauss.so.4 -./lib/libgssrpc_gauss.so.4.2 -./lib/libk5crypto_gauss.so -./lib/libk5crypto_gauss.so.3 -./lib/libk5crypto_gauss.so.3.1 -./lib/libkrb5support_gauss.so -./lib/libkrb5support_gauss.so.0 -./lib/libkrb5support_gauss.so.0.1 -./lib/libkrb5_gauss.so -./lib/libkrb5_gauss.so.3 -./lib/libkrb5_gauss.so.3.3 -./lib/libcom_err_gauss.so -./lib/libcom_err_gauss.so.3 -./lib/libcom_err_gauss.so.3.0 -./include/gs_thread.h -./include/gs_threadlocal.h -./include/postgres_ext.h -./include/libpq-fe.h -./include/libpq-events.h -./include/libpq/libpq-fs.h -[version] -V500R002C00 [header] ./include/libpq-fe.h ./include/postgres_ext.h @@ -1455,14 +915,8 @@ V500R002C00 ./include/pg_config.h ./include/pg_config_manual.h ./include/pg_config_os.h -./include/cm_config.h ./include/c.h ./include/port.h -./include/cm_msg.h -./include/cm_c.h -./include/cm_misc.h ./include/libpq-int.h ./include/pqcomm.h ./include/pqexpbuffer.h -./include/xlogdefs.h -./include/cm-libpq-fe.h diff --git a/build/script/opengauss_release_list_euleros_aarch64_single b/build/script/aarch64_opengauss_list similarity index 98% rename from build/script/opengauss_release_list_euleros_aarch64_single rename to build/script/aarch64_opengauss_list index eeb58034a..7e508459d 100644 --- a/build/script/opengauss_release_list_euleros_aarch64_single +++ b/build/script/aarch64_opengauss_list @@ -35,6 +35,7 @@ ./bin/lz4 ./bin/kadmind ./bin/dbmind +./bin/gs_dbmind ./bin/server.key.cipher ./bin/server.key.rand ./bin/gs_plan_simulator.sh @@ -824,9 +825,7 @@ ./lib/libverto.so ./lib/libverto.so.0 ./lib/libverto.so.0.0 -./lib/libcurl.so -./lib/libcurl.so.4 -./lib/libcurl.so.4.6.0 +./lib/libcurl.so* ./lib/libcrypto.so ./lib/libcrypto.so.1.1 ./lib/libssl.so @@ -839,9 +838,7 @@ ./lib/liblz4.so ./lib/liblz4.so.1 ./lib/liblz4.so.1.9.2 -./lib/libcjson.so -./lib/libcjson.so.1 -./lib/libcjson.so.1.7.13 +./lib/libcjson.so* ./lib/libconfig.so ./lib/libconfig.so.4 ./lib/libpgport_tool.so @@ -849,25 +846,13 @@ ./share/llvmir/GaussDB_expr.ir ./lib/libeSDKLogAPI.so ./lib/libeSDKOBS.so -./lib/liblog4cpp.so -./lib/liblog4cpp.so.5 -./lib/liblog4cpp.so.5.0.6 -./lib/libcharset.so -./lib/libcharset.so.1 -./lib/libcharset.so.1.0.0 -./lib/libiconv.so -./lib/libiconv.so.2 -./lib/libiconv.so.2.6.1 -./lib/libnghttp2.so -./lib/libnghttp2.so.14 -./lib/libnghttp2.so.14.20.0 -./lib/libpcre.so -./lib/libpcre.so.1 -./lib/libpcre.so.1.2.12 +./lib/liblog4cpp.so* +./lib/libcharset.so* +./lib/libiconv.so* +./lib/libnghttp2.so* +./lib/libpcre.so* ./lib/libsecurec.so -./lib/libxml2.so -./lib/libxml2.so.2 -./lib/libxml2.so.2.9.9 +./lib/libxml2.so* ./lib/libparquet.so ./lib/libparquet.so.14 ./lib/libparquet.so.14.1.0 @@ -880,7 +865,8 @@ ./lib/libdcf.so ./lib/libzstd.so ./lib/libzstd.so.1 -./lib/libzstd.so.1.4.4 +./lib/libzstd.so.1.5.0 +./lib/libxgboost.so ./include/postgresql/server/postgres_ext.h ./include/postgresql/server/pg_config_os.h @@ -1011,6 +997,7 @@ ./include/postgresql/server/storage/backendid.h ./include/postgresql/server/storage/lock/lock.h ./include/postgresql/server/storage/lock/lwlock.h +./include/postgresql/server/storage/lwlocknames.h ./include/postgresql/server/storage/barrier.h ./include/postgresql/server/storage/shmem.h ./include/postgresql/server/pg_config.h @@ -1444,8 +1431,6 @@ ./include/libpq-fe.h ./include/libpq-events.h ./include/libpq/libpq-fs.h -[version] -V500R002C00 [header] ./include/libpq-fe.h ./include/postgres_ext.h @@ -1457,9 +1442,6 @@ V500R002C00 ./include/cm_config.h ./include/c.h ./include/port.h -./include/cm_msg.h -./include/cm_c.h -./include/cm_misc.h ./include/libpq-int.h ./include/pqcomm.h ./include/pqexpbuffer.h diff --git a/build/script/build_opengauss.sh b/build/script/build_opengauss.sh index de6ad15bd..338a8dd02 100755 --- a/build/script/build_opengauss.sh +++ b/build/script/build_opengauss.sh @@ -12,33 +12,158 @@ # Example: ./build_opengauss.sh -3rd /path/to/your/third_party_binarylibs/ # change it to "N", if you want to build with original build system based on solely Makefiles -CMAKE_PKG="N" +declare CMAKE_PKG="Y" +declare SCRIPT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd) +declare ROOT_DIR=$(dirname "${SCRIPT_DIR}") +declare ROOT_DIR=$(dirname "${ROOT_DIR}") +declare package_type='server' +declare product_mode='opengauss' +declare version_mode='release' +declare binarylib_dir='None' +declare make_check='off' +declare separate_symbol='on' -#(0) pre-check -if [ ! -f opengauss.spec ] || [ ! -f package_internal.sh ]; then - echo "ERROR: there is no opengauss.spec/mpp_package.sh" +function print_help() +{ + echo "Usage: $0 [OPTION] + -h|--help show help information. + -V|--version show version information. + -3rd|--binarylib_dir the directory of third party binarylibs. + -pkg|--package provode type of installation packages, values parameter is server. + -m|--version_mode this values of paramenter is debug, release, memcheck, the default value is release. + -pm product mode, values parameter is opengauss. + -mc|--make_check this values of paramenter is on or off, the default value is on. + -s|--symbol_mode whether separate symbol in debug mode, the default value is on. + -co|--cmake_opt more cmake options +" +} + +function print_version() +{ + echo $(cat ${SCRIPT_DIR}/gaussdb.ver | grep 'VERSION' | awk -F "=" '{print $2}') +} + +if [ $# = 0 ] ; then + echo "missing option" + print_help exit 1 fi -#(1) prepare -cp opengauss.spec gauss.spec +######################################################################### +##read command line paramenters +####################################################################### +while [ $# -gt 0 ]; do + case "$1" in + -h|--help) + print_help + exit 1 + ;; + -V|--version) + print_version + exit 1 + ;; + -3rd|--binarylib_dir) + if [ "$2"X = X ]; then + echo "no given binarylib directory values" + exit 1 + fi + binarylib_dir=$2 + shift 2 + ;; + -pkg) + if [ "$2"X = X ]; then + echo "no given package type name" + exit 1 + fi + package_type=$2 + shift 2 + ;; + -m|--version_mode) + if [ "$2"X = X ]; then + echo "no given version number values" + exit 1 + fi + version_mode=$2 + shift 2 + ;; + -pm) + if [ "$2"X = X ]; then + echo "no given product mode" + exit 1 + fi + product_mode=$2 + shift 2 + ;; + -mc|--make_check) + if [ "$2"X = X ]; then + echo "no given make check values" + exit 1 + fi + make_check=$2 + shift 2 + ;; + -s|--symbol_mode) + if [ "$2"X = X ]; then + echo "no given symbol parameter" + exit 1 + fi + separate_symbol=$2 + shift 2 + ;; + --cmake_opt) + if [ "$2"X = X ]; then + echo "no extra configure options provided" + exit 1 + fi + extra_cmake_opt=$2 + shift 2 + ;; + --config_opt) + if [ "$2"X = X ]; then + echo "no extra configure options provided" + exit 1 + fi + extra_config_opt=$2 + shift 2 + ;; + *) + echo "Internal Error: option processing error: $1" 1>&2 + echo "please input right paramtenter, the following command may help you" + echo "${0} --help or ${0} -h" + exit 1 + esac +done -#(2) invoke package_internal.sh -if [ "$CMAKE_PKG" == "N" ]; then - chmod a+x package_internal.sh - echo "package_internal.sh $@ -nopkg -pm opengauss" - ./package_internal.sh $@ -nopkg -pm opengauss - if [ $? != "0" ]; then - echo "failed in build opengauss" - fi +if [ -e "$SCRIPT_DIR/utils/common.sh" ];then + source $SCRIPT_DIR/utils/common.sh else - chmod a+x cmake_package_internal.sh - echo "cmake_package_internal.sh $@ -nopkg -pm opengauss" - ./cmake_package_internal.sh $@ -nopkg -pm opengauss - if [ $? != "0" ]; then - echo "failed in build opengauss" - fi + exit 1 fi -#(3) remove files which are not necessary -BUILD_DIR="${ROOT_DIR}/mppdb_temp_install" + +#(1) invoke package_internal.sh +if [ "$CMAKE_PKG" == "N" ]; then + declare BUILD_DIR="${ROOT_DIR}/mppdb_temp_install" + source $SCRIPT_DIR/utils/make_compile.sh || exit 1 +else + echo "begin config cmake options:" >> "$LOG_FILE" 2>&1 + declare BUILD_DIR="${ROOT_DIR}/mppdb_temp_install" + declare CMAKE_BUILD_DIR=${ROOT_DIR}/tmp_build + declare CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_MOT=ON ${extra_cmake_opt}" + echo "[cmake options] cmake options is:${CMAKE_OPT}" >> "$LOG_FILE" 2>&1 + source $SCRIPT_DIR/utils/cmake_compile.sh || exit 1 +fi + +function main() +{ + echo "[makegaussdb] $(date +%y-%m-%d' '%T): script dir : ${SCRIPT_DIR}" + echo "[makegaussdb] $(date +%y-%m-%d' '%T): Work root dir : ${ROOT_DIR}" + read_gaussdb_version + read_gaussdb_number + gaussdb_pkg_pre_clean + gaussdb_build +} +main + +echo "now, all build has finished!" +exit 0 diff --git a/build/script/cmake_package_internal.sh b/build/script/cmake_package_internal.sh deleted file mode 100755 index bf9c47a88..000000000 --- a/build/script/cmake_package_internal.sh +++ /dev/null @@ -1,1454 +0,0 @@ -#!/bin/bash -####################################################################### -# Copyright (c): 2020-2021, Huawei Tech. Co., Ltd. -# descript: Compile and pack MPPDB -# Return 0 means OK. -# Return 1 means failed. -# version: 2.0 -# date: 2020-08-08 -####################################################################### - -##default package type is all -declare package_type='all' -declare install_package_format='tar' -declare optimized='true' - -declare product_mode='opengauss' - -##default version mode is relase -declare version_mode='release' -declare binarylib_dir='None' -declare separate_symbol='on' -#detect platform information. -PLATFORM=32 -bit=$(getconf LONG_BIT) -if [ "$bit" -eq 64 ]; then - PLATFORM=64 -fi - -#get OS distributed version. -kernel="" -version="" -ext_version="" -if [ -f "/etc/euleros-release" ]; then - kernel=$(cat /etc/euleros-release | awk -F ' ' '{print $1}' | tr A-Z a-z) - version=$(cat /etc/euleros-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) - ext_version=$version -elif [ -f "/etc/openEuler-release" ]; then - kernel=$(cat /etc/openEuler-release | awk -F ' ' '{print $1}' | tr A-Z a-z) - version=$(cat /etc/openEuler-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) -elif [ -f "/etc/centos-release" ]; then - kernel=$(cat /etc/centos-release | awk -F ' ' '{print $1}' | tr A-Z a-z) - version=$(cat /etc/centos-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) -elif [ -f "/etc/kylin-release" ]; then - kernel=$(cat /etc/kylin-release | awk -F ' ' '{print $1}' | tr A-Z a-z) - version=$(cat /etc/kylin-release | awk '{print $6}' | tr A-Z a-z) -else - kernel=$(lsb_release -d | awk -F ' ' '{print $2}'| tr A-Z a-z) - version=$(lsb_release -r | awk -F ' ' '{print $2}') -fi - -if [ X"$kernel" == X"euleros" ]; then - dist_version="EULER" -elif [ X"$kernel" == X"centos" ]; then - dist_version="CENTOS" -elif [ X"$kernel" == X"openeuler" ]; then - dist_version="OPENEULER" -elif [ X"$kernel" == X"kylin" ]; then - dist_version="KYLIN" -else - echo "Only support EulerOS, OPENEULER(aarch64), CentOS and Kylin platform." - echo "Kernel is $kernel" - exit 1 -fi - -show_package=false - -gcc_version="7.3.0" -##add platform architecture information -cpus_num=$(grep -w processor /proc/cpuinfo|wc -l) -PLATFORM_ARCH=$(uname -p) -if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then - if [ "$dist_version" == "EULER" ]; then - ARCHITECTURE_EXTRA_FLAG=_euleros2.0_${ext_version}_$PLATFORM_ARCH - GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA" - elif [ "$dist_version" == "CENTOS" ]; then - ARCHITECTURE_EXTRA_FLAG=_centos_7.5_$PLATFORM_ARCH - GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA" - elif [ "$dist_version" == "OPENEULER" ]; then - ARCHITECTURE_EXTRA_FLAG=_openeuler_$PLATFORM_ARCH - # it may be risk to enable 'ARM_LSE' for all ARM CPU, but we bid our CPUs are not elder than ARMv8.1 - GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA -D__ARM_LSE" - elif [ "$dist_version" == "KYLIN" ]; then - ARCHITECTURE_EXTRA_FLAG=_kylinv10_sp1_$PLATFORM_ARCH - GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA" - else - ARCHITECTURE_EXTRA_FLAG=_$PLATFORM_ARCH - fi - release_file_list="mpp_release_list_${kernel}_${PLATFORM_ARCH}" -else - if [ "$dist_version" == "KYLIN" ]; then - ARCHITECTURE_EXTRA_FLAG=_kylinv10_sp1_${PLATFORM_ARCH}_intel - else - ARCHITECTURE_EXTRA_FLAG=_euleros2.0_sp5_${PLATFORM_ARCH} - fi - release_file_list="mpp_release_list_${kernel}" -fi - -##default install version storage path -declare mppdb_version='GaussDB Kernel' -declare mppdb_name_for_package="$(echo ${mppdb_version} | sed 's/ /-/g')" -declare package_path='./' -declare version_number='' -declare make_check='off' -declare zip_package='on' -declare extra_config_opt='' - -####################################################################### -##putout the version of mppdb -####################################################################### -function print_version() -{ - echo "$version_number" -} -####################################################################### -## print help information -####################################################################### -function print_help() -{ - echo "Usage: $0 [OPTION] - -h|--help show help information. - -V|--version show version information. - -f|--file provide the file list released. - -3rd|--binarylib_dir the directory of third party binarylibs. - -pkg|--package provode type of installation packages, values parameter is all, server, jdbc, odbc, agent. - -pm product mode, values parameter is single, multiple or opengauss, default value is multiple. - -p|--path generation package storage path. - -t packaging format, values parameter is tar or rpm, the default value is tar. - -m|--version_mode this values of paramenter is debug, release, memcheck, the default value is release. - -mc|--make_check this values of paramenter is on or off, the default value is on. - -s|--symbol_mode whether separate symbol in debug mode, the default value is on. - -cv|--gcc_version gcc-version option: 7.3.0. - -nopt|--not_optimized on kunpeng platform , like 1616 version, without LSE optimized. - -nopkg|--no_package don't zip binaries into packages - -co|--config_opt more config options - -S|--show_pkg show server package name and Bin name base on current configuration. -" -} - -if [ $# = 0 ] ; then - echo "missing option" - print_help - exit 1 -fi - -SCRIPT_PATH=${0} -FIRST_CHAR=$(expr substr "$SCRIPT_PATH" 1 1) -if [ "$FIRST_CHAR" = "/" ]; then - SCRIPT_PATH=${0} -else - SCRIPT_PATH="$(pwd)/${SCRIPT_PATH}" -fi -SCRIPT_NAME=$(basename $SCRIPT_PATH) -SCRIPT_DIR=$(dirname "${SCRIPT_PATH}") -SCRIPT_DIR=$(dirname "$SCRIPT_DIR") - -if [ ! -f "$SCRIPT_DIR/$SCRIPT_NAME" ] ; then - SCRIPT_DIR=$SCRIPT_DIR/script -fi - -package_path=$SCRIPT_DIR -####################################################################### -##read version from $release_file_list -####################################################################### -function read_mpp_version() -{ - cd $SCRIPT_DIR - local head=$(cat $release_file_list | grep "\[version\]" -n | awk -F: '{print $1}') - if [ ! -n "$head" ]; then - echo "error: no find version in the $release_file_list file " - exit 1 - fi - local tail=$(cat $release_file_list | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') - if [ ! -n "$tail" ]; then - local all=$(cat $release_file_list | wc -l) - let tail=$all+1-$head - fi - version_number=$(cat $release_file_list | awk "NR==$head+1,NR==$tail+$head-1") - echo "${mppdb_name_for_package}-${version_number}">version.cfg - #auto read the number from kernal globals.cpp, no need to change it here -} -####################################################################### -##first read mppdb version -####################################################################### - -######################################################################### -##read command line paramenters -####################################################################### -while [ $# -gt 0 ]; do - case "$1" in - -h|--help) - print_help - exit 1 - ;; - -V|--version) - print_version - exit 1 - ;; - -f|--file) - if [ "$2"X = X ]; then - echo "no given file name" - exit 1 - fi - release_file_list=$2 - shift 2 - ;; - -3rd|--binarylib_dir) - if [ "$2"X = X ]; then - echo "no given binarylib directory values" - exit 1 - fi - binarylib_dir=$2 - shift 2 - ;; - -p|--path) - if [ "$2"X = X ]; then - echo "no given generration package path" - exit 1 - fi - package_path=$2 - if [ ! -d "$package_path" ]; then - mkdir -p $package_path - fi - shift 2 - ;; - -pkg) - if [ "$2"X = X ]; then - echo "no given package type name" - exit 1 - fi - package_type=$2 - shift 2 - ;; - -pm) - if [ "$2"X = X ]; then - echo "no given product mode" - exit 1 - fi - product_mode=$2 - shift 2 - ;; - -s|--symbol_mode) - if [ "$2"X = X ]; then - echo "no given symbol parameter" - exit 1 - fi - separate_symbol=$2 - shift 2 - ;; - -t) - if [ "$2"X = X ]; then - echo "no given installation package format values" - exit 1 - fi - if [ "$2" = rpm ]; then - echo "error: do not suport rpm package now!" - exit 1 - fi - install_package_format=$2 - shift 1 - ;; - -m|--version_mode) - if [ "$2"X = X ]; then - echo "no given version number values" - exit 1 - fi - version_mode=$2 - shift 2 - ;; - -mc|--make_check) - if [ "$2"X = X ]; then - echo "no given make check values" - exit 1 - fi - make_check=$2 - shift 2 - ;; - -cv|--gcc_version) - if [ "$2"X = X ]; then - echo "no given gcc version" - exit 1 - fi - gcc_version=$2 - shift 2 - ;; - -nopt|--not_optimized) - optimized='false' - shift 1 - ;; - -nopkg|--no_package) - zip_package='off' - shift 1 - ;; - -co|--config_opt) - if [ "$2"X = X ]; then - echo "no extra configure options provided" - exit 1 - fi - extra_config_opt=$2 - shift 2 - ;; - -S|--show_pkg) - show_package=true - shift - ;; - *) - echo "Internal Error: option processing error: $1" 1>&2 - echo "please input right paramtenter, the following command may help you" - echo "./cmake_package_internal.sh --help or ./cmake_package_internal.sh -h" - exit 1 - esac -done - -if [ "$product_mode"x == "single"x ]; then - release_file_list="${release_file_list}_${product_mode}" -fi - -if [ "$product_mode"x == "opengauss"x ]; then - release_file_list=$(echo ${release_file_list}_single | sed -e 's/mpp_release/opengauss_release/') -fi - -read_mpp_version - -if [ "$gcc_version" = "7.3.0" ]; then - gcc_version=${gcc_version:0:3} -else - echo "Unknown gcc version $gcc_version" - exit 1 -fi - -####################################################################### -## declare all package name -####################################################################### -declare version_string="${mppdb_name_for_package}-${version_number}" -declare package_pre_name="${version_string}-${dist_version}-${PLATFORM}bit" -declare server_package_name="${package_pre_name}.${install_package_format}.gz" -declare agent_package_name="${package_pre_name}-AGENT.${install_package_format}.gz" -declare gsql_package_name="${mppdb_name_for_package}-${version_number}-${dist_version}-${PLATFORM}bit-gsql.${install_package_format}.gz" -declare client_package_name="${package_pre_name}-ClientTools.${install_package_format}.gz" -declare libpq_package_name="${package_pre_name}-Libpq.${install_package_format}.gz" -declare gds_package_name="${package_pre_name}-Gds.${install_package_format}.gz" -declare symbol_package_name="${package_pre_name}-symbol.${install_package_format}.gz" -declare inspection_package_name="${version_string}-Inspection.tar.gz" - -echo "[makemppdb] $(date +%y-%m-%d' '%T): script dir : ${SCRIPT_DIR}" -ROOT_DIR=$(dirname "$SCRIPT_DIR") -ROOT_DIR=$(dirname "$ROOT_DIR") -PLAT_FORM_STR=$(sh "${ROOT_DIR}/src/get_PlatForm_str.sh") -if [ "${PLAT_FORM_STR}"x == "Failed"x ] -then - echo "Only support EulerOS openEuler and Centros platform." - exit 1; -fi -PG_REG_TEST_ROOT="${ROOT_DIR}/" - -CMAKE_BUILD_DIR=${ROOT_DIR}/tmp_build -MPPDB_DECODING_DIR="${ROOT_DIR}/contrib/mppdb_decoding" -PMK_SCHEMA="${ROOT_DIR}/script/pmk_schema.sql" -declare LOG_FILE="${ROOT_DIR}/build/script/makemppdb_pkg.log" -declare BUILD_DIR="${ROOT_DIR}/mppdb_temp_install" -declare ERR_MKGS_FAILED=1 -declare MKGS_OK=0 -if [ "${binarylib_dir}" != 'None' ] && [ -d "${binarylib_dir}" ]; then - BUILD_TOOLS_PATH="${binarylib_dir}/buildtools/${PLAT_FORM_STR}" - PLATFORM_PATH="${binarylib_dir}/platform/${PLAT_FORM_STR}" - BINARYLIBS_PATH="${binarylib_dir}/dependency" -else - BUILD_TOOLS_PATH="${ROOT_DIR}/buildtools/${PLAT_FORM_STR}" - PLATFORM_PATH="${ROOT_DIR}/platform/${PLAT_FORM_STR}" - BINARYLIBS_PATH="${ROOT_DIR}/binarylibs" -fi - -UNIX_ODBC="${BINARYLIBS_PATH}/${PLAT_FORM_STR}/unixodbc" - -if [ "$product_mode"x == "single"x ] || [ "$product_mode"x == "opengauss"x ]; then - declare UPGRADE_SQL_DIR="${ROOT_DIR}/src/include/catalog/upgrade_sql" -fi - -if [ "$product_mode"x == "single"x ]; then - declare UPGRADE_PRIV_SQL_DIR="${ROOT_DIR}/privategauss/include/catalog/upgrade_sql" -fi - -gaussdb_200_file="${binarylib_dir}/buildtools/license_control/gaussdb.version.GaussDB200" -gaussdb_300_file="${binarylib_dir}/buildtools/license_control/gaussdb.version.GaussDB300" -gaussdb_200_standard_file="${binarylib_dir}/buildtools/license_control/gaussdb.license.GaussDB200_Standard" -gaussdb_version_file="${ROOT_DIR}/src/gausskernel/process/postmaster/gaussdb_version.cpp" - -export CC="$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/gcc" -export CXX="$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/g++" -export LD_LIBRARY_PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/lib64:$BUILD_TOOLS_PATH/gcc$gcc_version/isl/lib:$BUILD_TOOLS_PATH/gcc$gcc_version/mpc/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/mpfr/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/gmp/lib/:$LD_LIBRARY_PATH -export PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin:$PATH -jdkpath=${binarylib_dir}/platform/huaweijdk8/${PLATFORM_ARCH}/jdk -if [ ! -d ${jdkpath} ]; then - jdkpath=${binarylib_dir}/platform/openjdk8/${PLATFORM_ARCH}/jdk -fi -export JAVA_HOME=${jdkpath} - -declare p7zpath="${BUILD_TOOLS_PATH}/p7z/bin" -################################### -# build parameter about enable-llt -################################## -COMPLIE_TYPE="comm" -echo "[makemppdb] $(date +%y-%m-%d' '%T): Work root dir : ${ROOT_DIR}" -################################### -# get version number from globals.cpp -################################## -function read_mpp_number() -{ - global_kernal="${ROOT_DIR}/src/common/backend/utils/init/globals.cpp" - version_name="GRAND_VERSION_NUM" - version_num="" - line=$(cat $global_kernal | grep ^const* | grep $version_name) - version_num1=${line#*=} - #remove the symbol; - version_num=$(echo $version_num1 | tr -d ";") - #remove the blank - version_num=$(echo $version_num) - - if echo $version_num | grep -qE '^92[0-9]+$' - then - # get the last three number - latter=${version_num:2} - echo "92.${latter}" >>${SCRIPT_DIR}/version.cfg - else - echo "Cannot get the version number from globals.cpp." - exit 1 - fi -} -read_mpp_number - -####################################################################### -# Print log. -####################################################################### -log() -{ - echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@" - echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@" >> "$LOG_FILE" 2>&1 -} - -####################################################################### -# print log and exit. -####################################################################### -die() -{ - log "$@" - echo "$@" - exit $ERR_MKGS_FAILED -} - -####################################################################### -## Check the installation package production environment -####################################################################### -function mpp_pkg_pre_check() -{ - if [ -d "$BUILD_DIR" ]; then - rm -rf $BUILD_DIR - fi - if [ -d "$LOG_FILE" ]; then - rm -rf $LOG_FILE - fi - - if [ X"$package_type" == X"server" -o X"$package_type" == X"all" ] && [ X"$zip_package" = X"on" ] && [ ! -d "${ROOT_DIR}"/script/script/gspylib/ ]; then - printf "\033[31mCan not found OM script directory. solution steps:\n\033[0m" - echo " 1) git clone git@lfg-y.codehub.huawei.com:2222/Gauss/GaussDBKernel/GaussDBKernel-om.git -b $(git branch | grep '*' | sed -e 's/*//g' -e 's/^ //g')" - echo " 2) if you do not have the permission to git it, please call CMO " - echo " 3) rm -rf ${ROOT_DIR}/script && ln -s /GaussDB_Kernel_OM ${ROOT_DIR}/script" - echo "and then try again!" - exit 1 - fi -} - -####################################################################### -# Install all SQL files from distribute/include/catalog/upgrade_sql -# to INSTALL_DIR/bin/script/upgrade_sql. -# Package all SQL files and then verify them with SHA256. -####################################################################### -function package_upgrade_sql() -{ - echo "Begin to install upgrade_sql files..." - UPGRADE_SQL_TAR="upgrade_sql.tar.gz" - UPGRADE_SQL_SHA256="upgrade_sql.sha256" - MULTIP_IGNORE_VERSION=(289 294 296) - cp -r "${UPGRADE_SQL_DIR}" . - if [ "$product_mode"x == "single"x ]; then - cp -r "${UPGRADE_PRIV_SQL_DIR}" . - fi - [ $? -ne 0 ] && die "Failed to cp upgrade_sql files" - if [ "$product_mode"x == "multiple"x ]; then - for version_num in ${MULTIP_IGNORE_VERSION[*]} - do - find ./upgrade_sql -name *${version_num}* | xargs rm -rf - done - fi - tar -czf ${UPGRADE_SQL_TAR} upgrade_sql - [ $? -ne 0 ] && die "Failed to package ${UPGRADE_SQL_TAR}" - rm -rf ./upgrade_sql > /dev/null 2>&1 - - sha256sum ${UPGRADE_SQL_TAR} | awk -F" " '{print $1}' > "${UPGRADE_SQL_SHA256}" - [ $? -ne 0 ] && die "Failed to generate sha256 sum file for ${UPGRADE_SQL_TAR}" - - chmod 600 ${UPGRADE_SQL_TAR} - chmod 600 ${UPGRADE_SQL_SHA256} - - echo "Successfully packaged upgrade_sql files." -} -####################################################################### -# get cluster version from src/include/pg_config.h by 'DEF_GS_VERSION ' -# then replace OM tools version -####################################################################### -function replace_omtools_version() -{ - local gs_version=$(grep DEF_GS_VERSION ${CMAKE_BUILD_DIR}/pg_config.h | awk -F '"' '{print $2}') - echo $gs_version | grep -e "${mppdb_version}.*build.*compiled.*" > /dev/null 2>&1 - if [ $? -ne 0 ]; then - die "Failed to get gs_version from pg_config.h." - fi - - if [ -f "$1"/script/gspylib/common/VersionInfo.py ] ; then - sed -i -e "s/COMMON_VERSION = \"Gauss200 OM VERSION\"/COMMON_VERSION = \"$(echo ${gs_version})\"/g" -e "s/__GAUSS_PRODUCT_STRING__/$mppdb_version/g" $1/script/gspylib/common/VersionInfo.py - if [ $? -ne 0 ]; then - die "Failed to replace OM tools version number." - fi - else - sed -i "s/COMMON_VERSION = \"Gauss200 OM VERSION\"/COMMON_VERSION = \"$(echo ${gs_version})\"/g" $1/script/gspylib/os/gsOSlib.py - if [ $? -ne 0 ]; then - die "Failed to replace OM tools version number." - fi - fi - - grep 'CATALOG_VERSION_NO' ${PG_REG_TEST_ROOT}/src/include/catalog/catversion.h >/dev/null 2>&1 - if [ $? -ne 0 ]; then - die "Failed to get catalog_version from catversion.h." - fi - - catalog_version=$(grep 'CATALOG_VERSION_NO' ${PG_REG_TEST_ROOT}/src/include/catalog/catversion.h | uniq | awk -F ' ' '{print $NF}') - if [ x"$catalog_version" == x"" ]; then - die "Failed to get catalog_version from catversion.h." - fi - - sed -i "s/TABLESPACE_VERSION_DIRECTORY = .*/TABLESPACE_VERSION_DIRECTORY = \"PG_9.2_$(echo ${catalog_version})\"/g" $1/script/gspylib/common/Common.py - if [ $? -ne 0 ]; then - die "Failed to replacecatalog_version number." - fi - -} -####################################################################### -# get cluster version from src/include/pg_config.h by 'DEF_GS_VERSION ' -# then replace ODBC version -####################################################################### -function replace_odbc_version() -{ - local gs_version=$(grep DEF_GS_VERSION ${CMAKE_BUILD_DIR}/pg_config.h | awk -F '"' '{print $2}') - echo $gs_version | grep -e "${mppdb_version:x}.*build.*compiled.*" > /dev/null 2>&1 - - if [ $? -ne 0 ]; then - die "Failed to get gs_version from pg_config.h." - fi - - if [ -f "$1"/config.h ] ; then - sed -i "/^\\s*#define\\s*DEF_GS_VERSION.*$/d" $1/config.h - echo "#define DEF_GS_VERSION \"$(echo ${gs_version})\"">>$1/config.h - if [ $? -ne 0 ]; then - die "Failed to replace odbc tools version number." - fi - else - echo "Failed to replace odbc tools: can not find file $1/config.h." - fi -} -####################################################################### -##install gaussdb database and others -##select to install something according to variables package_type need -####################################################################### -function mpp_pkg_bld() -{ - case "$package_type" in - all) - echo "Install all" - install_gaussdb - install_inspection - echo "Install all success" - ;; - server) - install_gaussdb - ;; - gsql) - install_gaussdb - ;; - libpq) - install_gaussdb - ;; - inspection) - install_inspection - ;; - *) - echo "Internal Error: option processing error: $package_type" - echo "please input right paramenter values all, server, libpq, gds or gsql " - exit 1 - esac -} -####################################################################### -##install inspection tool scripts -####################################################################### -function install_inspection() -{ - echo "packaging inspection..." - rm -rf ${package_path}/inspection && - mkdir -p ${package_path}/inspection && - - cp -f ${script_dir}/script/gs_check ${package_path}/inspection/ && - cp -rf ${script_dir}/script/gspylib/ ${package_path}/inspection/ && - - mkdir -p ${package_path}/inspection/gspylib/inspection/output/log/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/output/nodes/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/asn1crypto/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/bcrypt/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/cryptography/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/cffi/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/enum/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/idna/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/nacl/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/pyasn1/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/kafka-python/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/pycparser/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/OpenSSL/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/psutil/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/netifaces/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/paramiko/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/paste/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/bottle/ && - - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/asn1crypto/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/bcrypt/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/cffi/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/cryptography/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/enum/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/idna/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/nacl/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/pyasn1/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/kafka-python/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/pycparser/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/OpenSSL/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/six.py ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/_cffi_backend.py ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/_cffi_backend.so_UCS2 ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/_cffi_backend.so_UCS4 ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/_cffi_backend*.so ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/psutil/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/netifaces/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/paramiko/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/paste/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/bottle/ ${package_path}/inspection/gspylib/inspection/lib/ - - mv -f ${package_path}/inspection/gspylib/inspection/readme.txt ${package_path}/inspection/ - - if [ $? -ne 0 ]; then - die "cp -rf ${script_dir}/script/inspection/* ${package_path}/inspection/inspection/ failed" - fi - - find ${package_path}/inspection/ -name .svn -type d -print0 | xargs -0 rm -rf - find ${package_path}/inspection/ -name d2utmp* -print0 | xargs -0 rm -rf - chmod -R +x ${package_path}/inspection/ - - cd ${package_path}/inspection - select_package_command - $package_command "${inspection_package_name}" ./* >>"$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "$package_command ${inspection_package_name} failed" - fi - mv ${inspection_package_name} ${package_path} - rm -rf ${package_path}/inspection/ - echo "install $pkgname tools is ${inspection_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1 - echo "success!" -} -####################################################################### -##install gaussdb database contained server,client and libpq -####################################################################### -function install_gaussdb() -{ - # Generate the license control file, and set md5sum string to the code. - echo "Modify gaussdb_version.cpp file." >> "$LOG_FILE" 2>&1 - make_license_control - echo "Modify gaussdb_version.cpp file success." >> "$LOG_FILE" 2>&1 - cd "$ROOT_DIR/" - if [ $? -ne 0 ]; then - die "change dir to $SRC_DIR failed." - fi - - if [ "$version_mode" = "debug" -a "$separate_symbol" = "on" ]; then - echo "WARNING: do not separate symbol in debug mode!" - fi - - if [ "$product_mode" != "opengauss" ]; then - die "the product mode can only be opengauss!" - fi - - binarylibs_path=${ROOT_DIR}/binarylibs - if [ "${binarylib_dir}"x != "None"x ]; then - binarylibs_path=${binarylib_dir} - fi - - if [ "$product_mode"x == "opengauss"x ]; then - enable_readline="--with-readline" - else - enable_readline="--without-readline" - fi - - export BUILD_TUPLE=${PLATFORM_ARCH} - export THIRD_BIN_PATH="${binarylibs_path}" - export PREFIX_HOME="${BUILD_DIR}" - - if [ "$product_mode"x == "opengauss"x ]; then - if [ "$version_mode"x == "release"x ]; then - CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_MOT=ON" - export DEBUG_TYPE=release - elif [ "$version_mode"x == "memcheck"x ]; then - CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_MOT=ON" - export DEBUG_TYPE=memcheck - else - CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_MOT=ON" - export DEBUG_TYPE=debug - fi - fi - - echo "Begin run cmake for gaussdb server" >> "$LOG_FILE" 2>&1 - echo "CMake options: ${CMAKE_OPT}" >> "$LOG_FILE" 2>&1 - echo "CMake release: ${DEBUG_TYPE}" >> "$LOG_FILE" 2>&1 - - export GAUSSHOME=${BUILD_DIR} - export LD_LIBRARY_PATH=${BUILD_DIR}/lib:${BUILD_DIR}/lib/postgresql:${LD_LIBRARY_PATH} - - cd ${ROOT_DIR} - [ -d "${CMAKE_BUILD_DIR}" ] && rm -rf ${CMAKE_BUILD_DIR} - [ -d "${BUILD_DIR}" ] && rm -rf ${BUILD_DIR} - mkdir -p ${CMAKE_BUILD_DIR} - cd ${CMAKE_BUILD_DIR} - cmake .. ${CMAKE_OPT} - echo "Begin make and install gaussdb server" >> "$LOG_FILE" 2>&1 - make VERBOSE=1 -sj ${cpus_num} - if [ $? -ne 0 ]; then - die "make failed." - fi - make install -sj ${cpus_num} - if [ $? -ne 0 ]; then - die "make install failed." - fi - - ## check build specification - spec="gaussdbkernel" - if ( cat $SCRIPT_DIR/gauss.spec | grep 'PRODUCT' | grep 'GaussDB Kernel' >/dev/null 2>&1 ); then - spec="gaussdbkernel" - elif ( cat $SCRIPT_DIR/gauss.spec | grep 'PRODUCT' | grep 'openGauss' >/dev/null 2>&1 ); then - spec="opengauss" - fi - - chmod 444 ${BUILD_DIR}/bin/cluster_guc.conf - dos2unix ${BUILD_DIR}/bin/cluster_guc.conf > /dev/null 2>&1 - - #back to separate_debug_symbol.sh dir - cd $SCRIPT_DIR - if [ "$version_mode" = "release" -a "$separate_symbol" = "on" -a "$zip_package" = "on" ]; then - chmod +x ./separate_debug_information.sh - ./separate_debug_information.sh - cd $SCRIPT_DIR - mv symbols.tar.gz $symbol_package_name - fi - - #back to root dir - cd $ROOT_DIR -} - -####################################################################### - -####################################################################### -# make package for gsql -####################################################################### -function make_package_gsql() -{ - # mkdir temp directory - mkdir -p gsql - mkdir -p gsql/bin - mkdir -p gsql/lib - mkdir -p gsql/gs_ktool_file - - # copy gsql and depend *.so - cp ${BUILD_DIR}/bin/gsql gsql/bin - if [ $? -ne 0 ]; then - die "copy gsql failed." - fi - - cp ${BUILD_DIR}/bin/gs_ktool gsql/bin - if [ $? -ne 0 ]; then - die "copy gsql failed." - fi - - cp -r ${BUILD_DIR}/etc/gs_ktool_file/gs_ktool_conf.ini gsql/gs_ktool_file - if [ $? -ne 0 ]; then - die "copy gs_ktool_con.ini failed." - fi - - cd gsql - tar -xvf ${package_path}/${libpq_package_name} - if [ $? -ne 0 ]; then - die "unpack libpq failed." - fi - rm -f *.docx - chmod 700 ./lib/*.so* - cd .. - - cp $SCRIPT_DIR/gsql_env.sh gsql/gsql_env.sh - if [ $? -ne 0 ]; then - die "copy gsql_env.sh failed." - fi - chmod +x gsql/gsql_env.sh - - # make package - cd gsql - echo "packaging gsql..." - tar -zcf "${gsql_package_name}" ./* >>"$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "tar ${gsql_package_name} failed" - fi - mv ${gsql_package_name} ${package_path} - - # clean tmp directory - cd .. && rm -rf gsql - - echo "install $pkgname tools is ${gsql_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1 - echo "success!" -} - -####################################################################### -##select package type according to variable package_type -####################################################################### -function mpp_pkg_make() -{ - case "$package_type" in - server) - echo "file list: $release_file_list" - make_package $release_file_list 'server' - make_package $release_file_list 'libpq' - make_package_gsql - make_package $release_file_list 'gds' - ;; - jdbc) - make_package $release_file_list 'jdbc' - ;; - odbc) - make_package $release_file_list 'odbc' - ;; - libpq) - make_package $release_file_list 'libpq' - ;; - gsql) - make_package $release_file_list 'libpq' - make_package_gsql - ;; - esac -} -declare package_command -####################################################################### -##select package command accroding to install_package_format -####################################################################### -function select_package_command() -{ - - case "$install_package_format" in - tar) - tar='tar' - option=' -zcvf' - package_command="$tar$option" - ;; - rpm) - rpm='rpm' - option=' -i' - package_command="$rpm$option" - ;; - esac -} - -############################################################### -## client tools package -## Roach yes -## sslcert no -## Data Studio no -## Database Manager no -## Migration Toolkit no -## Cluster Configuration Assistant (CCA) no -## CAT no -############################################################### -function target_file_copy_for_non_server() -{ - for file in $(echo $1) - do - tar -cpf - $file | ( cd $2; tar -xpf - ) - done -} - -declare bin_name="${package_pre_name}.bin" -declare sha256_name='' -declare script_dir="${ROOT_DIR}/script" -declare root_script='' -declare bin_script='' -####################################################################### -##copy target file into temporary directory temp -####################################################################### -function target_file_copy() -{ - ################################################### - # make bin package - ################################################### - for file in $(echo $1) - do - tar -cpf - $file | ( cd $2; tar -xpf - ) - done - - cd $SCRIPT_DIR - sed 's/^\./\.\/bin/' script_file >binfile - root_script=$(cat script_file) - sed -i '/gs_backup/d' binfile - sed -i '/gs_check/d' binfile - sed -i '/gs_checkos/d' binfile - sed -i '/gs_checkperf/d' binfile - sed -i '/gs_collector/d' binfile - sed -i '/gs_expand/d' binfile - sed -i '/gs_install/d' binfile - sed -i '/gs_om/d' binfile - sed -i '/gs_postuninstall/d' binfile - sed -i '/gs_preinstall/d' binfile - sed -i '/gs_replace/d' binfile - sed -i '/gs_shrink/d' binfile - sed -i '/gs_ssh/d' binfile - sed -i '/gs_sshexkey/d' binfile - sed -i '/gs_uninstall/d' binfile - sed -i '/gs_upgradectl/d' binfile - sed -i '/gs_lcctl/d' binfile - sed -i '/gs_wsr/d' binfile - sed -i '/gs_gucZenith/d' binfile - - - bin_script=$(cat binfile) - rm binfile script_file - cd $BUILD_DIR - for file in $(echo $bin_script) - do - tar -cpf - $file | ( cd $2; tar -xpf - ) - done - - # create script/gspylib/clib, put file encrypt, libcrypto.so.1.1,libssl.so.1.1 - rm -rf $BUILD_DIR/script/gspylib/clib - mkdir -p $BUILD_DIR/script/gspylib/clib - cp $BUILD_DIR/lib/libstdc++.so.6 $BUILD_DIR/script/gspylib/clib - cp $BUILD_DIR/lib/libssl.so.1.1 $BUILD_DIR/script/gspylib/clib - cp $BUILD_DIR/lib/libcrypto.so.1.1 $BUILD_DIR/script/gspylib/clib - cp $BUILD_DIR/bin/encrypt $BUILD_DIR/script/gspylib/clib - - # copy script dir to temp path - cp -rf $BUILD_DIR/script/gspylib/ $2/bin/script/ - cp -rf $BUILD_DIR/script/impl/ $2/bin/script/ - cp -rf $BUILD_DIR/script/local/ $2/bin/script/ - - # clean the files which under bin/script/ is not be used - for x in $(ls $2/bin/script/) - do - filename="$2/bin/script/$x" - if [[ "$filename" = *"__init__.py" ]];then - continue - elif [ -d "$filename" ];then - continue - elif [ -f "$filename" ];then - rm -f "$filename" - fi - done - - chmod -R +x $2/bin/script/ - - if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then - # do nothing in current version - echo "" - else - sed -i '/^process_cpu_affinity|/d' $2/bin/cluster_guc.conf - fi - - #generate bin file - echo "Begin generate ${bin_name} bin file..." >> "$LOG_FILE" 2>&1 - ${p7zpath}/7z a -t7z -sfx "${bin_name}" "$2/*" >> "$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - echo "Please check and makesure '7z' exist. " - die "generate ${bin_name} failed." - fi - echo "End generate ${bin_name} bin file" >> "$LOG_FILE" 2>&1 - - #generate sha256 file - sha256_name="${package_pre_name}.sha256" - echo "Begin generate ${sha256_name} sha256 file..." >> "$LOG_FILE" 2>&1 - sha256sum "${bin_name}" | awk -F" " '{print $1}' > "$sha256_name" - if [ $? -ne 0 ]; then - die "generate sha256 file failed." - fi - echo "End generate ${sha256_name} sha256 file" >> "$LOG_FILE" 2>&1 - - - ################################################### - # make server package - ################################################### - if [ -d "${2}" ]; then - rm -rf ${2} - fi - mkdir -p ${2} - mv ${bin_name} ${sha256_name} $2 - for file in $(echo $root_script) - do - tar -cpf - $file | ( cd $2; tar -xpf - ) - done - - # copy script dir to temp path - cp -rf $BUILD_DIR/script/gspylib/ $2/script/ - cp -rf $BUILD_DIR/script/impl/ $2/script/ - cp -rf $BUILD_DIR/script/local/ $2/script/ - cp -rf $BUILD_DIR/script/stage_step/ $2/script/ - - # copy agent tool to temp path - res=$(cp -rf ${script_dir}/agent/ $2/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/agent to $2 failed. $res" - fi - res=$(cp -f ${script_dir}/agent/common/cmd_sender.py $2/script/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/agent/common/cmd_sender.py to $2/script/ failed. $res" - fi - res=$(cp -f ${script_dir}/agent/common/uploader.py $2/script/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/agent/common/uploader.py to $2/script/ failed. $res" - fi - res=$(cp -f ${script_dir}/agent/common/py_pstree.py $2/script/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/agent/common/py_pstree.py to $2/script/ failed. $res" - fi - # copy the default xml to temp path - res=$(cp -f ${script_dir}/build/cluster_default_agent.xml $2/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/build/cluster_default_agent.xml to $2 failed. $res" - fi - # copy CBG shell tools to temp path - res=$(cp -rf ${script_dir}/build/bin/ $2/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/build/bin to $2 failed. $res" - fi - # copy the CBG config template to temp path - res=$(cp -rf ${script_dir}/build/configtemplate/ $2/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/build/configtemplate/ to $2 failed. $res" - fi - res=$(cp -f ${script_dir}/agent/om_agent.conf $2/configtemplate/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/agent/om_agent.conf to $2/configtemplate/ failed. $res" - fi - - find $2/bin/ -type f -print0 | xargs -0 -n 10 -r dos2unix > /dev/null 2>&1 && - chmod -R +x $2/bin/ && - chmod -R +x $2/script/ -} - -####################################################################### -# read script file list from mpp_release_list -####################################################################### -function read_script_file() -{ - cd $SCRIPT_DIR - local head=$(cat $releasefile | grep "\[script\]" -n | awk -F: '{print $1}') - if [ ! -n "$head" ]; then - die "error: ono find $pkgname in the $releasefile file " - fi - - local tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') - if [ ! -n "$tail" ]; then - local all=$(cat $releasefile | wc -l) - let tail=$all+1-$head - fi - - touch script_file - cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1" >script_file -} - -####################################################################### -##function make_package have three actions -##1.parse release_file_list variable represent file -##2.copy target file into a newly created temporary directory temp -##3.package all file in the temp directory and renome to destination package_path -####################################################################### -function make_package() -{ - cd $SCRIPT_DIR - releasefile=$1 - pkgname=$2 - - local head=$(cat $releasefile | grep "\[$pkgname\]" -n | awk -F: '{print $1}') - if [ ! -n "$head" ]; then - die "error: ono find $pkgname in the $releasefile file " - fi - - local tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') - if [ ! -n "$tail" ]; then - local all=$(cat $releasefile | wc -l) - let tail=$all+1-$head - fi - - dest=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1") - if [ "$pkgname"x = "libpq"x -a "$version_mode" = "debug" ]; then - # copy include file - head=$(cat $releasefile | grep "\[header\]" -n | awk -F: '{print $1}') - if [ ! -n "$head" ]; then - die "error: ono find header in the $releasefile file " - fi - - tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') - if [ ! -n "$tail" ]; then - all=$(cat $releasefile | wc -l) - let tail=$all+1-$head - fi - - dest1=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1") - # copy cm depend library file - head=$(cat $releasefile | grep "\[cmlibrary\]" -n | awk -F: '{print $1}') - if [ ! -n "$head" ]; then - die "error: ono find cmlibrary in the $releasefile file " - fi - - tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') - if [ ! -n "$tail" ]; then - all=$(cat $releasefile | wc -l) - let tail=$all+1-$head - fi - - dest2=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1") - dest=$(echo "$dest";echo "$dest1";echo "$dest2") - - elif [ "$pkgname"x = "libpq"x -a "$version_mode" = "release" ]; then - # copy include file - head=$(cat $releasefile | grep "\[header\]" -n | awk -F: '{print $1}') - if [ ! -n "$head" ]; then - die "error: ono find header in the $releasefile file " - fi - - tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') - if [ ! -n "$tail" ]; then - all=$(cat $releasefile | wc -l) - let tail=$all+1-$head - fi - - dest1=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1") - dest=$(echo "$dest";echo "$dest1") - fi - - if [ "$pkgname"x = "server"x ]; then - read_script_file - fi - - mkdir -p ${BUILD_DIR} - cd ${BUILD_DIR} - rm -rf temp - mkdir temp - if [ "$pkgname"x = "server"x ]; then - copy_script_file "$script_dir/" ${BUILD_DIR} - fi - - case "$pkgname" in - server) - mkdir -p ${BUILD_DIR}/temp/etc - target_file_copy "$dest" ${BUILD_DIR}/temp - ;; - *) - target_file_copy_for_non_server "$dest" ${BUILD_DIR}/temp $pkgname - ;; - esac - - cd ${BUILD_DIR}/temp - select_package_command - - case "$pkgname" in - client) - echo "packaging client..." - $package_command "${client_package_name}" ./* >>"$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "$package_command ${client_package_name} failed" - fi - - mv ${client_package_name} ${package_path} - echo "install $pkgname tools is ${client_package_name} of the current directory " >> "$LOG_FILE" 2>&1 - echo "success!" - ;; - server) - echo "packaging server..." - cp ${SCRIPT_DIR}/version.cfg ${BUILD_DIR}/temp - if [ $? -ne 0 ]; then - die "copy ${SCRIPT_DIR}/version.cfg to ${BUILD_DIR}/temp failed" - fi - - replace_omtools_version ${BUILD_DIR}/temp/ - - #copy inspection lib - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/output/log/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/output/nodes/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/asn1crypto/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/bcrypt/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/cffi/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/cryptography/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/enum/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/idna/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/nacl/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/pyasn1/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/kafka-python/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/pycparser/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/OpenSSL/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/psutil/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/netifaces/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/paramiko/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/bottle/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/paste/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/sqlparse/ && - - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/asn1crypto/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/bcrypt/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/cffi/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/cryptography/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/enum/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/idna/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/nacl/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/pyasn1/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/kafka-python/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/pycparser/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/OpenSSL/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/six.py ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/_cffi_backend.py ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/_cffi_backend.so_UCS2 ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/_cffi_backend.so_UCS4 ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/_cffi_backend*.so ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/psutil/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/netifaces/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/paramiko/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/bottle/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/paste/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/sqlparse/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - - if [ $? -ne 0 ]; then - die "remove svn info failed." - fi - - cp -r ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG} ./install_tools - find ./install_tools -name .svn -type d -print0 | xargs -0 rm -rf - if [ $? -ne 0 ]; then - die "remove svn info failed." - fi - - mkdir -p ./lib - - mv ./install_tools/asn1crypto ./lib - mv ./install_tools/bcrypt ./lib - mv ./install_tools/cffi ./lib - mv ./install_tools/cryptography ./lib - mv ./install_tools/enum ./lib - mv ./install_tools/idna ./lib - mv ./install_tools/nacl ./lib - mv ./install_tools/pyasn1 ./lib - mv ./install_tools/kafka-python ./lib - mv ./install_tools/pycparser ./lib - mv ./install_tools/OpenSSL ./lib - mv ./install_tools/six.py ./lib - mv ./install_tools/_cffi_backend.py ./lib - mv ./install_tools/_cffi_backend.so_UCS2 ./lib - mv ./install_tools/_cffi_backend.so_UCS4 ./lib - mv ./install_tools/_cffi_backend*.so ./lib - mv ./install_tools/paramiko ./lib - mv ./install_tools/psutil ./lib - mv ./install_tools/netifaces ./lib - mv ./install_tools/paste ./lib - mv ./install_tools/bottle ./lib - mv ./install_tools/sqlparse ./lib - - cp -rf ${UNIX_ODBC} . - #Not package unixodbc/bin/odbc_config, so delete it - rm -f ./unixodbc/bin/odbc_config - - rm -r ./install_tools - - #compress the agent package - echo "Agent package is starting." - cp ./lib/_cffi_backend.so_UCS4 ./lib/_cffi_backend.so - cp -r ./script/gspylib/pssh/bin ./agent/ - cp -r ./script/gspylib/clib ./agent/ - if [ "$product_mode"x == "single"x ] - then - if [ ! -e ./agent/centralized_cluster ] - then - touch ./agent/centralized_cluster - echo "This file is used only to distinguish cluster types (generated by the packaging script)." >> ./agent/centralized_cluster - else - echo "This file is used only to distinguish cluster types (generated by the packaging script)." > ./agent/centralized_cluster - fi - fi - $package_command "${agent_package_name}" ./agent ./lib ./cluster_default_agent.xml ./version.cfg >>"$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "$package_command ${agent_package_name} failed" - fi - mv ${agent_package_name} ${package_path} - echo "Agent package has been finished." - - #remove the agent path which only needed by agent before compress server package - echo "Server package is starting." - rm -rf ./agent - - # install upgrade_sql.* files. - package_upgrade_sql - - $package_command "${server_package_name}" ./* >>"$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "$package_command ${server_package_name} failed" - fi - mv ${server_package_name} ${package_path} - echo "install $pkgname tools is ${server_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1 - echo "success!" - ;; - libpq) - echo "packaging libpq..." - $package_command "${libpq_package_name}" ./* >>"$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "$package_command ${libpq_package_name} failed" - fi - mv ${libpq_package_name} ${package_path} - echo "install $pkgname tools is ${libpq_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1 - echo "success!" - ;; - esac -} - -####################################################################### -##copy all file of script directory to target directory -####################################################################### -function copy_script_file() -{ - target_file=$1 - local target_dir=$2 - - cp -rf $target_file/script/ $target_dir/ && - find $target_dir/script/ -type f -print0 | xargs -0 -n 10 -r dos2unix > /dev/null 2>&1 && - find $target_dir/script/gspylib/inspection/ -name d2utmp* -print0 | xargs -0 rm -rf && - cp -rf $target_file/script/gspylib/inspection/lib/checknetspeed/speed_test* $target_dir/script/gspylib/inspection/lib/checknetspeed/ && - cp -rf $target_file/script/gspylib/inspection/lib/*.png $target_dir/script/gspylib/inspection/lib/ && - - if [ $? -ne 0 ]; then - die "cp -r $target_file $target_dir failed " - fi -} -####################################################################### -## generate the version file. -####################################################################### -function make_license_control() -{ - local target_dir=$1 - python_exec=$(which python 2>/dev/null) - - if [ -x "$python_exec" ]; then - $python_exec ${binarylib_dir}/buildtools/license_control/encrypted_version_file.py >> "$LOG_FILE" 2>&1 - fi - - if [ $? -ne 0 ]; then - die "create ${binarylib_dir}/buildtools/license_control license file failed." - fi - - if [ -f "$gaussdb_200_file" ] && [ -f "$gaussdb_300_file" ]; then - # Get the md5sum. - gaussdb_200_sha256sum=$(sha256sum $gaussdb_200_file | awk '{print $1}') - gaussdb_300_sha256sum=$(sha256sum $gaussdb_300_file | awk '{print $1}') - # Modify the source code. - sed -i "s/^[ \t]*const[ \t]\+char[ \t]*\*[ \t]*sha256_digests[ \t]*\[[ \t]*SHA256_DIGESTS_COUNT[ \t]*\][ \t]*=[ \t]*{[ \t]*NULL[ \t]*,[ \t]*NULL[ \t]*}[ \t]*;[ \t]*$/const char \*sha256_digests\[SHA256_DIGESTS_COUNT\] = {\"$gaussdb_200_sha256sum\", \"$gaussdb_300_sha256sum\"};/g" $gaussdb_version_file - fi - - if [ $? -ne 0 ]; then - die "modify '$gaussdb_version_file' failed." - fi -} -####################################################################### -## copy the version file to target directory. -####################################################################### -function copy_license_file() -{ - local target_dir=$1 - - # Copy the version file to bin path. - if [ -f "$gaussdb_200_file" ] && [ -f "$gaussdb_200_file" ] && [ -f "$gaussdb_200_standard_file" ]; then - cp -f $gaussdb_200_file $target_dir && - cp -f $gaussdb_300_file $target_dir && - cp -f $gaussdb_200_standard_file $target_dir - fi - - if [ $? -ne 0 ]; then - die "cp -r ${binarylib_dir}/buildtools/license_control $target_dir failed." - fi -} -####################################################################### -## restore the gaussdb_version.cpp content. -####################################################################### -function restore_license_control() -{ - # Generate license control file. - make_license_control - - # Restore the gaussdb_version.cpp content. - if [ -f "$gaussdb_200_file" ] && [ -f "$gaussdb_300_file" ] && [ -f "$gaussdb_200_standard_file" ]; then - sed -i "s/^[ \t]*const[ \t]\+char[ \t]*\*[ \t]*sha256_digests[ \t]*\[[ \t]*SHA256_DIGESTS_COUNT[ \t]*\][ \t]*=[ \t]*{[ \t]*[a-zA-Z0-9\"]\+[ \t]*,[ \t]*[a-zA-Z0-9\"]\+[ \t]*}[ \t]*;[ \t]*$/const char \*sha256_digests\[SHA256_DIGESTS_COUNT\] = {NULL, NULL};/g" $gaussdb_version_file && - - # Remove the gaussdb.version file. - rm -f $gaussdb_200_file && - rm -f $gaussdb_300_file && - rm -f $gaussdb_200_standard_file - fi - - if [ $? -ne 0 ]; then - die "restore '$gaussdb_version_file' failed, remove ${binarylib_dir}/buildtools/license_control file failed." - fi -} - -############################################################# -# show package for hotpatch sdv. -############################################################# -if [ "$show_package" = true ]; then - echo "package: "$server_package_name - echo "bin: "$bin_name - exit 0 -fi - -############################################################# -# main function -############################################################# -# 1. clean install path and log file -mpp_pkg_pre_check - -# 2. chose action -mpp_pkg_bld -if [ "$zip_package" = "off" ]; then - echo "The option 'nopkg' is on, no package will be zipped." - exit 0 -fi - -# 3. make package -mpp_pkg_make - -#clean mpp_install directory -echo "clean enviroment" -echo "[makemppdb] $(date +%y-%m-%d' '%T): remove ${BUILD_DIR}" >>"$LOG_FILE" 2>&1 - -mkdir ${ROOT_DIR}/output -mv ${ROOT_DIR}/build/script/*.tar.gz ${ROOT_DIR}/output/ -echo "now, all packages has finished!" - -exit 0 diff --git a/build/script/cmake_package_mini.sh b/build/script/cmake_package_mini.sh new file mode 100644 index 000000000..f4b50a451 --- /dev/null +++ b/build/script/cmake_package_mini.sh @@ -0,0 +1,811 @@ +#!/bin/bash +####################################################################### +# Copyright (c): 2020-2021, Huawei Tech. Co., Ltd. +# descript: Compile and pack MPPDB +# Return 0 means OK. +# Return 1 means failed. +# version: 2.0 +# date: 2021-12-12 +####################################################################### + +##default package type is server +declare package_type='server' +declare install_package_format='tar' +##default version mode is relase +declare version_mode='release' +declare binarylib_dir='None' +declare separate_symbol='on' + +#detect platform information. +PLATFORM=32 +bit=$(getconf LONG_BIT) +if [ "$bit" -eq 64 ]; then + PLATFORM=64 +fi + +#get OS distributed version. +kernel="" +version="" +ext_version="" +if [ -f "/etc/euleros-release" ]; then + kernel=$(cat /etc/euleros-release | awk -F ' ' '{print $1}' | tr A-Z a-z) + version=$(cat /etc/euleros-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) + ext_version=$version +elif [ -f "/etc/openEuler-release" ]; then + kernel=$(cat /etc/openEuler-release | awk -F ' ' '{print $1}' | tr A-Z a-z) + version=$(cat /etc/openEuler-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) +elif [ -f "/etc/centos-release" ]; then + kernel=$(cat /etc/centos-release | awk -F ' ' '{print $1}' | tr A-Z a-z) + version=$(cat /etc/centos-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) +else + kernel=$(lsb_release -d | awk -F ' ' '{print $2}'| tr A-Z a-z) + version=$(lsb_release -r | awk -F ' ' '{print $2}') +fi + +if [ X"$kernel" == X"euleros" ]; then + dist_version="EULER" +elif [ X"$kernel" == X"centos" ]; then + dist_version="CENTOS" +elif [ X"$kernel" == X"openeuler" ]; then + dist_version="OPENEULER" +else + echo "Only support EulerOS platform." + echo "Kernel is $kernel" + exit 1 +fi + +show_package=false +gcc_version="7.3.0" + +##add platform architecture information +cpus_num=$(grep -w processor /proc/cpuinfo|wc -l) +PLATFORM_ARCH=$(uname -p) +if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then + ARCHITECTURE_EXTRA_FLAG=_euleros2.0_${ext_version}_$PLATFORM_ARCH + release_file_list="opengauss_release_list_${kernel}_${PLATFORM_ARCH}_mini_single" +else + ARCHITECTURE_EXTRA_FLAG=_euleros2.0_sp5_${PLATFORM_ARCH} + release_file_list="opengauss_release_list_${kernel}_mini_single" +fi + +##default install version storage path +declare mppdb_version='GaussDB Kernel' +declare mppdb_name_for_package="$(echo ${mppdb_version} | sed 's/ /-/g')" +declare package_path='./' +declare version_number='' +declare make_check='off' +declare zip_package='on' +declare extra_config_opt='' + +####################################################################### +##putout the version of mppdb +####################################################################### +function print_version() +{ + echo "$version_number" +} + +####################################################################### +## print help information +####################################################################### +function print_help() +{ + echo "Usage: $0 [OPTION] + -h|--help show help information. + -V|--version show version information. + -f|--file provide the file list released. + -3rd|--binarylib_dir the directory of third party binarylibs. + -pkg|--package provode type of installation packages, values parameter is all, server, jdbc, odbc, agent. + -pm product mode, values parameter is single, multiple or opengauss, default value is multiple. + -p|--path generation package storage path. + -t packaging format, values parameter is tar or rpm, the default value is tar. + -m|--version_mode this values of paramenter is debug, release, memcheck, the default value is release. + -mc|--make_check this values of paramenter is on or off, the default value is on. + -s|--symbol_mode whether separate symbol in debug mode, the default value is on. + -cv|--gcc_version gcc-version option: 7.3.0. + -nopkg|--no_package don't zip binaries into packages + -co|--config_opt more config options + -S|--show_pkg show server package name and Bin name base on current configuration. +" +} + +if [ $# = 0 ] ; then + echo "missing option" + print_help + exit 1 +fi + +SCRIPT_PATH=${0} +FIRST_CHAR=$(expr substr "$SCRIPT_PATH" 1 1) +if [ "$FIRST_CHAR" = "/" ]; then + SCRIPT_PATH=${0} +else + SCRIPT_PATH="$(pwd)/${SCRIPT_PATH}" +fi +SCRIPT_NAME=$(basename $SCRIPT_PATH) +SCRIPT_DIR=$(dirname "${SCRIPT_PATH}") +SCRIPT_DIR=$(dirname "$SCRIPT_DIR") + +if [ ! -f "$SCRIPT_DIR/$SCRIPT_NAME" ] ; then + SCRIPT_DIR=$SCRIPT_DIR/script +fi + +package_path=$SCRIPT_DIR + +####################################################################### +##read version from $release_file_list +####################################################################### +function read_mpp_version() +{ + cd $SCRIPT_DIR + local head=$(cat $release_file_list | grep "\[version\]" -n | awk -F: '{print $1}') + if [ ! -n "$head" ]; then + echo "error: no find version in the $release_file_list file " + exit 1 + fi + local tail=$(cat $release_file_list | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') + if [ ! -n "$tail" ]; then + local all=$(cat $release_file_list | wc -l) + let tail=$all+1-$head + fi + version_number=$(cat $release_file_list | awk "NR==$head+1,NR==$tail+$head-1") + echo "${mppdb_name_for_package}-${version_number}">version.cfg + #auto read the number from kernal globals.cpp, no need to change it here +} + +######################################################################### +##read command line paramenters +####################################################################### +while [ $# -gt 0 ]; do + case "$1" in + -h|--help) + print_help + exit 1 + ;; + -V|--version) + print_version + exit 1 + ;; + -f|--file) + if [ "$2"X = X ]; then + echo "no given file name" + exit 1 + fi + release_file_list=$2 + shift 2 + ;; + -3rd|--binarylib_dir) + if [ "$2"X = X ]; then + echo "no given binarylib directory values" + exit 1 + fi + binarylib_dir=$2 + shift 2 + ;; + -p|--path) + if [ "$2"X = X ]; then + echo "no given generration package path" + exit 1 + fi + package_path=$2 + if [ ! -d "$package_path" ]; then + mkdir -p $package_path + fi + shift 2 + ;; + -pkg) + if [ "$2"X = X ]; then + echo "no given package type name" + exit 1 + fi + package_type=$2 + shift 2 + ;; + -s|--symbol_mode) + if [ "$2"X = X ]; then + echo "no given symbol parameter" + exit 1 + fi + separate_symbol=$2 + shift 2 + ;; + -t) + if [ "$2"X = X ]; then + echo "no given installation package format values" + exit 1 + fi + if [ "$2" = rpm ]; then + echo "error: do not suport rpm package now!" + exit 1 + fi + install_package_format=$2 + shift 1 + ;; + -m|--version_mode) + if [ "$2"X = X ]; then + echo "no given version number values" + exit 1 + fi + version_mode=$2 + shift 2 + ;; + -mc|--make_check) + if [ "$2"X = X ]; then + echo "no given make check values" + exit 1 + fi + make_check=$2 + shift 2 + ;; + -cv|--gcc_version) + if [ "$2"X = X ]; then + echo "no given gcc version" + exit 1 + fi + gcc_version=$2 + shift 2 + ;; + -nopkg|--no_package) + zip_package='off' + shift 1 + ;; + -co|--config_opt) + if [ "$2"X = X ]; then + echo "no extra configure options provided" + exit 1 + fi + extra_config_opt=$2 + shift 2 + ;; + -S|--show_pkg) + show_package=true + shift + ;; + *) + echo "Internal Error: option processing error: $1" 1>&2 + echo "please input right paramtenter, the following command may help you" + echo "./cmake_package_internal.sh --help or ./cmake_package_internal.sh -h" + exit 1 + esac +done + +read_mpp_version + +if [ "$gcc_version" = "7.3.0" ]; then + gcc_version=${gcc_version:0:3} +else + echo "Unknown gcc version $gcc_version" + exit 1 +fi + +####################################################################### +## declare all package name +####################################################################### +declare version_string="${mppdb_name_for_package}-${version_number}" +declare package_pre_name="${version_string}-${dist_version}-${PLATFORM}bit" +declare server_package_name="${package_pre_name}.${install_package_format}.gz" + +declare libpq_package_name="${package_pre_name}-Libpq.${install_package_format}.gz" +declare symbol_package_name="${package_pre_name}-symbol.${install_package_format}.gz" + +echo "[makemppdb] $(date +%y-%m-%d' '%T): script dir : ${SCRIPT_DIR}" +ROOT_DIR=$(dirname "$SCRIPT_DIR") +ROOT_DIR=$(dirname "$ROOT_DIR") +PLAT_FORM_STR=$(sh "${ROOT_DIR}/src/get_PlatForm_str.sh") +if [ "${PLAT_FORM_STR}"x == "Failed"x ] +then + echo "Only support EulerOS openEuler platform." + exit 1 +fi + +CMAKE_BUILD_DIR=${ROOT_DIR}/tmp_build +declare LOG_FILE="${ROOT_DIR}/build/script/makemppdb_pkg.log" +declare BUILD_DIR="${ROOT_DIR}/mppdb_temp_install" +declare ERR_MKGS_FAILED=1 +declare MKGS_OK=0 +if [ "${binarylib_dir}" != 'None' ] && [ -d "${binarylib_dir}" ]; then + BUILD_TOOLS_PATH="${binarylib_dir}/buildtools/${PLAT_FORM_STR}" + PLATFORM_PATH="${binarylib_dir}/platform/${PLAT_FORM_STR}" + BINARYLIBS_PATH="${binarylib_dir}/dependency" +else + BUILD_TOOLS_PATH="${ROOT_DIR}/buildtools/${PLAT_FORM_STR}" + PLATFORM_PATH="${ROOT_DIR}/platform/${PLAT_FORM_STR}" + BINARYLIBS_PATH="${ROOT_DIR}/binarylibs" +fi + +declare UPGRADE_SQL_DIR="${ROOT_DIR}/src/include/catalog/upgrade_sql" + +export CC="$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/gcc" +export CXX="$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/g++" +export LD_LIBRARY_PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/lib64:$BUILD_TOOLS_PATH/gcc$gcc_version/isl/lib:$BUILD_TOOLS_PATH/gcc$gcc_version/mpc/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/mpfr/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/gmp/lib/:$LD_LIBRARY_PATH +export PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin:$PATH +jdkpath=${binarylib_dir}/platform/huaweijdk8/${PLATFORM_ARCH}/jdk +if [ ! -d "${jdkpath}" ]; then + jdkpath=${binarylib_dir}/platform/openjdk8/${PLATFORM_ARCH}/jdk +fi +export JAVA_HOME=${jdkpath} + +declare p7zpath="${BUILD_TOOLS_PATH}/p7z/bin" + +################################### +# build parameter about enable-llt +################################## +echo "[makemppdb] $(date +%y-%m-%d' '%T): Work root dir : ${ROOT_DIR}" + +################################### +# get version number from globals.cpp +################################## +function read_mpp_number() +{ + global_kernal="${ROOT_DIR}/src/common/backend/utils/init/globals.cpp" + version_name="GRAND_VERSION_NUM" + version_num="" + line=$(cat $global_kernal | grep ^const* | grep $version_name) + version_num1=${line#*=} + #remove the symbol; + version_num=$(echo $version_num1 | tr -d ";") + #remove the blank + version_num=$(echo $version_num) + + if echo $version_num | grep -qE '^92[0-9]+$' + then + # get the last three number + latter=${version_num:2} + echo "92.${latter}" >>${SCRIPT_DIR}/version.cfg + else + echo "Cannot get the version number from globals.cpp." + exit 1 + fi +} +read_mpp_number + +####################################################################### +# Print log. +####################################################################### +log() +{ + echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@" + echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@" >> "$LOG_FILE" 2>&1 +} + +####################################################################### +# print log and exit. +####################################################################### +die() +{ + log "$@" + echo "$@" + exit $ERR_MKGS_FAILED +} + +####################################################################### +## Check the installation package production environment +####################################################################### +function mpp_pkg_pre_check() +{ + if [ -d "$BUILD_DIR" ]; then + rm -rf $BUILD_DIR + fi + if [ -d "$LOG_FILE" ]; then + rm -rf $LOG_FILE + fi +} + +####################################################################### +# Install all SQL files from distribute/include/catalog/upgrade_sql +# to INSTALL_DIR/bin/script/upgrade_sql. +# Package all SQL files and then verify them with SHA256. +####################################################################### +function package_upgrade_sql() +{ + echo "Begin to install upgrade_sql files..." + UPGRADE_SQL_TAR="upgrade_sql.tar.gz" + UPGRADE_SQL_SHA256="upgrade_sql.sha256" + MULTIP_IGNORE_VERSION=(289 294 296) + cp -r "${UPGRADE_SQL_DIR}" . + [ $? -ne 0 ] && die "Failed to cp upgrade_sql files" + tar -czf ${UPGRADE_SQL_TAR} upgrade_sql + [ $? -ne 0 ] && die "Failed to package ${UPGRADE_SQL_TAR}" + rm -rf ./upgrade_sql > /dev/null 2>&1 + + sha256sum ${UPGRADE_SQL_TAR} | awk -F" " '{print $1}' > "${UPGRADE_SQL_SHA256}" + [ $? -ne 0 ] && die "Failed to generate sha256 sum file for ${UPGRADE_SQL_TAR}" + + chmod 600 ${UPGRADE_SQL_TAR} + chmod 600 ${UPGRADE_SQL_SHA256} + echo "Successfully packaged upgrade_sql files." +} + +####################################################################### +##install gaussdb database and others +##select to install something according to variables package_type need +####################################################################### +function mpp_pkg_bld() +{ + install_gaussdb +} + +####################################################################### +##install gaussdb database contained server,client and libpq +####################################################################### +function install_gaussdb() +{ + # Generate the license control file, and set md5sum string to the code. + echo "Modify gaussdb_version.cpp file." >> "$LOG_FILE" 2>&1 + echo "Modify gaussdb_version.cpp file success." >> "$LOG_FILE" 2>&1 + cd "$ROOT_DIR/" + if [ $? -ne 0 ]; then + die "change dir to $SRC_DIR failed." + fi + + if [ "$version_mode" = "debug" -a "$separate_symbol" = "on" ]; then + echo "WARNING: do not separate symbol in debug mode!" + fi + + binarylibs_path=${ROOT_DIR}/binarylibs + if [ "${binarylib_dir}"x != "None"x ]; then + binarylibs_path=${binarylib_dir} + fi + + export BUILD_TUPLE=${PLATFORM_ARCH} + export THIRD_BIN_PATH="${binarylibs_path}" + export PREFIX_HOME="${BUILD_DIR}" + + if [ "$version_mode"x == "release"x ]; then + CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_PRIVATEGAUSS=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_LITE_MODE=ON" + export DEBUG_TYPE=release + elif [ "$version_mode"x == "memcheck"x ]; then + CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_PRIVATEGAUSS=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_LITE_MODE=ON" + export DEBUG_TYPE=memcheck + else + CMAKE_OPT="-DENABLE_MULTIPLE_NODES=OFF -DENABLE_PRIVATEGAUSS=OFF -DENABLE_THREAD_SAFETY=ON -DENABLE_LITE_MODE=ON" + export DEBUG_TYPE=debug + fi + + echo "Begin run cmake for gaussdb server" >> "$LOG_FILE" 2>&1 + echo "CMake options: ${CMAKE_OPT}" >> "$LOG_FILE" 2>&1 + echo "CMake release: ${DEBUG_TYPE}" >> "$LOG_FILE" 2>&1 + + export GAUSSHOME=${BUILD_DIR} + export LD_LIBRARY_PATH=${BUILD_DIR}/lib:${BUILD_DIR}/lib/postgresql:${LD_LIBRARY_PATH} + + cd ${ROOT_DIR} + [ -d "${CMAKE_BUILD_DIR}" ] && rm -rf ${CMAKE_BUILD_DIR} + [ -d "${BUILD_DIR}" ] && rm -rf ${BUILD_DIR} + mkdir -p ${CMAKE_BUILD_DIR} + cd ${CMAKE_BUILD_DIR} + cmake .. ${CMAKE_OPT} + echo "Begin make and install gaussdb server" >> "$LOG_FILE" 2>&1 + make VERBOSE=1 -sj ${cpus_num} + if [ $? -ne 0 ]; then + die "make failed." + fi + make install -sj ${cpus_num} + if [ $? -ne 0 ]; then + die "make install failed." + fi + + ## check build specification + spec="gaussdbkernel" + if ( cat $SCRIPT_DIR/gauss.spec | grep 'PRODUCT' | grep 'GaussDB Kernel' >/dev/null 2>&1 ); then + spec="gaussdbkernel" + elif ( cat $SCRIPT_DIR/gauss.spec | grep 'PRODUCT' | grep 'openGauss' >/dev/null 2>&1 ); then + spec="opengauss" + fi + + chmod 444 ${BUILD_DIR}/bin/cluster_guc.conf + dos2unix ${BUILD_DIR}/bin/cluster_guc.conf > /dev/null 2>&1 + + #back to separate_debug_symbol.sh dir + cd $SCRIPT_DIR + if [ "$version_mode" = "release" -a "$separate_symbol" = "on" -a "$zip_package" = "on" ]; then + chmod +x ./separate_debug_information.sh + ./separate_debug_information.sh + cd $SCRIPT_DIR + mv symbols.tar.gz $symbol_package_name + fi + + #back to root dir + cd $ROOT_DIR + + #insert the commitid to version.cfg as the upgrade app path specification + export PATH=${BUILD_DIR}:$PATH + export LD_LIBRARY_PATH=$GAUSSHOME/lib:$LD_LIBRARY_PATH + + commitid=$(LD_PRELOAD='' ${BUILD_DIR}/bin/gaussdb -V | cut -d ")" -f 1 | awk '{print $NF}') + echo "${commitid}" >>${SCRIPT_DIR}/version.cfg + echo "End insert commitid into version.cfg" >> "$LOG_FILE" 2>&1 + + cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/iperf/comm/bin/iperf3 ${BUILD_DIR}/bin + if [ $? -ne 0 ]; then + die "cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/iperf/comm/bin/iperf3 ${BUILD_DIR}/bin failed" + fi + + cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/iperf/comm/lib/libiperf.so.0 ${BUILD_DIR}/lib + if [ $? -ne 0 ]; then + die "cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/iperf/comm/lib/libiperf.so.0 ${BUILD_DIR}/lib failed" + fi + + cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/fio/comm/bin/fio ${BUILD_DIR}/bin + if [ $? -ne 0 ]; then + die "cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/fio/comm/bin/fio ${BUILD_DIR}/bin failed" + fi +} + +####################################################################### +##select package type according to variable package_type +####################################################################### +function mpp_pkg_make() +{ + case "$package_type" in + server) + echo "file list: $release_file_list" + make_package $release_file_list 'server' + make_package $release_file_list 'libpq' + ;; + libpq) + make_package $release_file_list 'libpq' + ;; + esac +} + +declare package_command +####################################################################### +##select package command accroding to install_package_format +####################################################################### +function select_package_command() +{ + + case "$install_package_format" in + tar) + tar='tar' + option=' -zcvf' + package_command="$tar$option" + ;; + rpm) + rpm='rpm' + option=' -i' + package_command="$rpm$option" + ;; + esac +} + +############################################################### +## client tools package +## Roach no +## sslcert no +## Data Studio no +## Database Manager no +## Migration Toolkit no +## Cluster Configuration Assistant (CCA) no +## CAT no +############################################################### +function target_file_copy_for_non_server() +{ + for file in $(echo $1) + do + tar -cpf - $file | ( cd $2; tar -xpf - ) + done +} + +declare bin_name="${package_pre_name}.bin" +declare sha256_name='' +declare script_dir="${ROOT_DIR}/script" + +####################################################################### +##copy target file into temporary directory temp +####################################################################### +function target_file_copy() +{ + ################################################### + # make bin package + ################################################### + for file in $(echo $1) + do + tar -cpf - $file | ( cd $2; tar -xpf - ) + done + cd $BUILD_DIR + if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then + # do nothing in current version + echo "" + else + sed -i '/^process_cpu_affinity|/d' $2/bin/cluster_guc.conf + fi + if [ "$(ls -A /lib64/libaio.so*)" != "" ] + then + cp /lib64/libaio.so* $2/lib/ + elif [ "$(ls -A /lib/libaio.so*)" != "" ] + then + cp /lib/libaio.so* $2/lib/ + fi + + if [ "$(ls -A /lib64/libnuma.so*)" != "" ] + then + cp /lib64/libnuma.so* $2/lib/ + elif [ "$(ls -A /lib/libnuma.so*)" != "" ] + then + cp /lib/libnuma.so* $2/lib/ + fi + + #generate bin file + echo "Begin generate ${bin_name} bin file..." >> "$LOG_FILE" 2>&1 + ${p7zpath}/7z a -t7z -sfx "${bin_name}" "$2/*" >> "$LOG_FILE" 2>&1 + if [ $? -ne 0 ]; then + echo "Please check and makesure '7z' exist. " + die "generate ${bin_name} failed." + fi + echo "End generate ${bin_name} bin file" >> "$LOG_FILE" 2>&1 + + #generate sha256 file + sha256_name="${package_pre_name}.sha256" + echo "Begin generate ${sha256_name} sha256 file..." >> "$LOG_FILE" 2>&1 + sha256sum "${bin_name}" | awk -F" " '{print $1}' > "$sha256_name" + if [ $? -ne 0 ]; then + die "generate sha256 file failed." + fi + echo "End generate ${sha256_name} sha256 file" >> "$LOG_FILE" 2>&1 + + cp $2/lib/libstdc++.so.6 ./ + + ################################################### + # make server package + ################################################### + if [ -d "${2}" ]; then + rm -rf ${2} + fi + mkdir -p ${2} + mkdir -p $2/dependency + cp libstdc++.so.6 $2/dependency + mv ${bin_name} ${sha256_name} $2 +} + + +####################################################################### +##function make_package have three actions +##1.parse release_file_list variable represent file +##2.copy target file into a newly created temporary directory temp +##3.package all file in the temp directory and renome to destination package_path +####################################################################### +function make_package() +{ + cd $SCRIPT_DIR + releasefile=$1 + pkgname=$2 + + local head=$(cat $releasefile | grep "\[$pkgname\]" -n | awk -F: '{print $1}') + if [ ! -n "$head" ]; then + die "error: ono find $pkgname in the $releasefile file " + fi + + local tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') + if [ ! -n "$tail" ]; then + local all=$(cat $releasefile | wc -l) + let tail=$all+1-$head + fi + + dest=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1") + if [ "$pkgname"x = "libpq"x -a \( "$version_mode" = "debug" -o "$version_mode" = "release" \) ]; then + # copy include file + head=$(cat $releasefile | grep "\[header\]" -n | awk -F: '{print $1}') + if [ ! -n "$head" ]; then + die "error: ono find header in the $releasefile file " + fi + + tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') + if [ ! -n "$tail" ]; then + all=$(cat $releasefile | wc -l) + let tail=$all+1-$head + fi + + dest1=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1") + dest=$(echo "$dest";echo "$dest1") + fi + + mkdir -p ${BUILD_DIR} + cd ${BUILD_DIR} + rm -rf temp + mkdir temp + + case "$pkgname" in + server) + mkdir -p ${BUILD_DIR}/temp/etc + target_file_copy "$dest" ${BUILD_DIR}/temp + ;; + *) + target_file_copy_for_non_server "$dest" ${BUILD_DIR}/temp $pkgname + ;; + esac + + cd ${BUILD_DIR}/temp + select_package_command + + case "$pkgname" in + server) + echo "packaging server..." + cp ${SCRIPT_DIR}/version.cfg ${BUILD_DIR}/temp + if [ $? -ne 0 ]; then + die "copy ${SCRIPT_DIR}/version.cfg to ${BUILD_DIR}/temp failed" + fi + cp ${ROOT_DIR}/${open_gauss}/liteom/install.sh ./ + if [ $? -ne 0 ] + then + die "copy ${ROOT_DIR}/${open_gauss}/liteom/install.sh to ${BUILD_DIR}/temp failed" + fi + + cp ${ROOT_DIR}/${open_gauss}/liteom/uninstall.sh ./ + if [ $? -ne 0 ] + then + die "copy ${ROOT_DIR}/${open_gauss}/liteom/uninstall.sh to ${BUILD_DIR}/temp failed" + fi + + cp ${ROOT_DIR}/${open_gauss}/liteom/opengauss_lite.conf ./ + if [ $? -ne 0 ] + then + die "copy ${ROOT_DIR}/${open_gauss}/liteom/opengauss_lite.conf to ${BUILD_DIR}/temp failed" + fi + + # pkg upgrade scripts:upgrade_GAUSSV5.sh, upgrade_common.sh, upgrade_config.sh, upgrade_errorcode.sh + for filename in upgrade_GAUSSV5.sh upgrade_common.sh upgrade_config.sh upgrade_errorcode.sh + do + if ! cp ${ROOT_DIR}/${open_gauss}/liteom/${filename} ./ ; then + die "copy ${ROOT_DIR}/${open_gauss}/liteom/${filename} to ${BUILD_DIR}/temp failed" + fi + done + # install upgrade_sql.* files. + package_upgrade_sql + + $package_command "${server_package_name}" ./* >>"$LOG_FILE" 2>&1 + if [ $? -ne 0 ]; then + die "$package_command ${server_package_name} failed" + fi + mv ${server_package_name} ${package_path} + echo "install $pkgname tools is ${server_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1 + echo "success!" + ;; + libpq) + echo "packaging libpq..." + $package_command "${libpq_package_name}" ./* >>"$LOG_FILE" 2>&1 + if [ $? -ne 0 ]; then + die "$package_command ${libpq_package_name} failed" + fi + mv ${libpq_package_name} ${package_path} + echo "install $pkgname tools is ${libpq_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1 + echo "success!" + ;; + esac +} + + +############################################################# +# show package for hotpatch sdv. +############################################################# +if [ "$show_package" = true ]; then + echo "package: "$server_package_name + echo "bin: "$bin_name + exit 0 +fi + +############################################################# +# main function +############################################################# +# 1. clean install path and log file +mpp_pkg_pre_check + +# 2. chose action +mpp_pkg_bld +if [ "$zip_package" = "off" ]; then + echo "The option 'nopkg' is on, no package will be zipped." + exit 0 +fi + +# 3. make package +mpp_pkg_make + +#clean mpp_install directory +echo "clean enviroment" +echo "[makemppdb] $(date +%y-%m-%d' '%T): remove ${BUILD_DIR}" >>"$LOG_FILE" 2>&1 + +mkdir ${ROOT_DIR}/output +mv ${ROOT_DIR}/build/script/*.tar.gz ${ROOT_DIR}/output/ +echo "now, all packages has finished!" +exit 0 \ No newline at end of file diff --git a/build/script/gauss.spec b/build/script/gauss.spec deleted file mode 100644 index 2a2a86280..000000000 --- a/build/script/gauss.spec +++ /dev/null @@ -1,2 +0,0 @@ -PRODUCT=GaussDB Kernel -VERSION=V500R002C00 diff --git a/build/script/opengauss.spec b/build/script/gaussdb.ver similarity index 56% rename from build/script/opengauss.spec rename to build/script/gaussdb.ver index a3bda6515..70ec24a8a 100644 --- a/build/script/opengauss.spec +++ b/build/script/gaussdb.ver @@ -1,2 +1,2 @@ PRODUCT=openGauss -VERSION=2.1.0 +VERSION=2.1.0 \ No newline at end of file diff --git a/build/script/gsql_env.sh b/build/script/gsql_env.sh deleted file mode 100644 index bd3670089..000000000 --- a/build/script/gsql_env.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/bash - -#----------------------------------------------------- -#Copyright (c): 2020, Huawei Tech. Co., Ltd. -#FileName : gsql_env.sh -#Version : V500R001C10 -#Date : 2020-08-06 -#Description : This file is to configure environment variables of gsql -#----------------------------------------------------- - -#find the absolute path of this script -LOCAL_PATH=${0} -if [ x${LOCAL_PATH:0:1} = "x-" ] || [ "x${LOCAL_PATH}" = "x/bin/bash" ] || [ "x${LOCAL_PATH}" = "x/bin/sh" ]; then - LOCAL_PATH="$(pwd)" -elif [ x${LOCAL_PATH:0:1} != "x/" ]; then - LOCAL_PATH="$(pwd)/$(dirname ${LOCAL_PATH})"; -fi - -function logerr() -{ - printf "ERROR: $* \n" >&2 -} - -function loghint() -{ - printf "HINT: $* \n" >&2 -} - -function logwarning() -{ - printf "WARNING: $* \n" >&2 -} - -function doing() -{ - length_of_line=60 - printf "$1 "; - for ((i=${#1};i<$length_of_line;i++)); do - printf '.'; - done; - printf " " -} - -#------------------------------ -# gsql things -#------------------------------ -function cofig_gsql_and_gs_ktool() -{ - doing 'Configuring LD_LIBRARY_PATH, PATH and GS_KTOOL_FILE_PATH for gsql and gs_ktool...' - LIB_PATH="${LOCAL_PATH}/lib" - BIN_PATH="${LOCAL_PATH}/bin" - GS_KT_FILE_PATH="${LOCAL_PATH}/gs_ktool_file" - if [ ! -f "${LOCAL_PATH}/bin/gsql" ]; then - logerr "failed to locate ./bin/gsql, please source this file at the path where it is. " - return 1; - fi; - if [ ! -f "${LOCAL_PATH}/bin/gs_ktool" ]; then - logerr "failed to locate ./bin/gs_ktool, please source this file at the path where it is. " - return 1; - fi; - if [ ! -f "${LOCAL_PATH}/gs_ktool_file/gs_ktool_conf.ini" ]; then - logerr "failed to locate ./gs_ktool_file/gs_ktool_con.ini, please source this file at the path where it is. " - return 1; - fi; - export LD_LIBRARY_PATH=${LIB_PATH}:${LD_LIBRARY_PATH} - export PATH=${BIN_PATH}:${PATH} - export GS_KTOOL_FILE_PATH=${GS_KT_FILE_PATH} - echo 'done' - return 0 -} - -if [ ! -z "$1" ]; then - echo "Usage:" - echo " source $0" -else - cofig_gsql_and_gs_ktool - if [ 0 -eq $? ]; then - echo 'All things done.' - fi -fi diff --git a/build/script/mppdb.ver b/build/script/mppdb.ver deleted file mode 100644 index 6d953c8cf..000000000 --- a/build/script/mppdb.ver +++ /dev/null @@ -1,5 +0,0 @@ -V=100 -R=003 -C=00 -OfficialVersion= -InternalVersion=B100 diff --git a/build/script/opengauss_release_list_kylin_aarch64_single b/build/script/opengauss_release_list_kylin_aarch64_single deleted file mode 100644 index 39bb3865d..000000000 --- a/build/script/opengauss_release_list_kylin_aarch64_single +++ /dev/null @@ -1,1509 +0,0 @@ -[server] -./bin/gs_log -./bin/gsql -./bin/gaussdb -./bin/gaussdb.version.GaussDB200 -./bin/gaussdb.version.GaussDB300 -./bin/gaussdb.license.GaussDB200_Standard -./bin/gstrace -./bin/gs_encrypt -./bin/gs_dump -./bin/gs_dumpall -./bin/gs_ctl -./bin/gs_initdb -./bin/gs_guc -./bin/gs_basebackup -./bin/gs_probackup -./bin/encrypt -./bin/openssl -./bin/gs_restore -./bin/gs_cgroup -./bin/openssl -./bin/pg_config -./bin/pg_controldata -./bin/pg_format_cu -./bin/pg_resetxlog -./bin/alarmItem.conf -./bin/retry_errcodes.conf -./bin/cluster_guc.conf -./bin/runsessionstat.sh -./bin/drop_caches.sh -./bin/run_drop_cache.sh -./bin/gs_plan_simulator.sh -./bin/dependences/clean_temp.sql -./bin/dependences/initdb.py -./bin/dependences/restore_temp.sql -./bin/dependences/store_pg_class_stats.sql -./bin/dependences/store_pg_statistic_ext_stats.sql -./bin/dependences/store_pg_statistic_stats.sql -./bin/dependences/store_pgxc_class.sql -./bin/krb5kdc -./bin/klist -./bin/kinit -./bin/kdestroy -./bin/kdb5_util -./bin/kadmin.local -./bin/lz4 -./etc/kerberos/kadm5.acl -./etc/kerberos/kdc.conf -./etc/kerberos/krb5.conf -./etc/kerberos/mppdb-site.xml -./share/postgis/PostGIS_install.sh -./share/postgresql/db4ai -./share/postgresql/tmp/udstools.py -./share/postgresql/snowball_create.sql -./share/postgresql/pmk_schema.sql -./share/postgresql/pmk_schema_single_inst.sql -./share/postgresql/pg_hba.conf.sample -./share/postgresql/gs_gazelle.conf.sample -./share/postgresql/pg_service.conf.sample -./share/postgresql/psqlrc.sample -./share/postgresql/conversion_create.sql -./share/postgresql/postgres.shdescription -./share/postgresql/pg_ident.conf.sample -./share/postgresql/postgres.description -./share/postgresql/postgresql.conf.sample -./share/postgresql/extension/plpgsql--1.0.sql -./share/postgresql/extension/hstore.control -./share/postgresql/extension/security_plugin.control -./share/postgresql/extension/security_plugin--1.0.sql -./share/postgresql/extension/tsdb.control -./share/postgresql/extension/tsdb--1.0.sql -./share/postgresql/extension/streaming--1.0.sql -./share/postgresql/extension/streaming.control -./share/postgresql/extension/file_fdw--1.0.sql -./share/postgresql/extension/plpgsql.control -./share/postgresql/extension/dist_fdw.control -./share/postgresql/extension/dist_fdw--1.0.sql -./share/postgresql/extension/hstore--1.1.sql -./share/postgresql/extension/plpgsql--unpackaged--1.0.sql -./share/postgresql/extension/file_fdw.control -./share/postgresql/extension/hstore--unpackaged--1.0.sql -./share/postgresql/extension/hstore--1.0--1.1.sql -./share/postgresql/extension/hdfs_fdw--1.0.sql -./share/postgresql/extension/hdfs_fdw.control -./share/postgresql/extension/gc_fdw--1.0.sql -./share/postgresql/extension/gc_fdw.control -./share/postgresql/extension/log_fdw--1.0.sql -./share/postgresql/extension/log_fdw.control -./share/postgresql/extension/mot_fdw--1.0.sql -./share/postgresql/extension/mot_fdw.control -./share/postgresql/extension/dimsearch--1.0.sql -./share/postgresql/extension/dimsearch.control -./share/postgresql/extension/packages--1.0.sql -./share/postgresql/extension/packages--1.0--1.1.sql -./share/postgresql/extension/packages.control -./share/postgresql/extension/postgres_fdw--1.0.sql -./share/postgresql/extension/postgres_fdw.control -./share/postgresql/timezone/GB-Eire -./share/postgresql/timezone/Turkey -./share/postgresql/timezone/Kwajalein -./share/postgresql/timezone/UCT -./share/postgresql/timezone/Mexico/BajaSur -./share/postgresql/timezone/Mexico/BajaNorte -./share/postgresql/timezone/Mexico/General -./share/postgresql/timezone/Japan -./share/postgresql/timezone/Israel -./share/postgresql/timezone/US/Eastern -./share/postgresql/timezone/US/Samoa -./share/postgresql/timezone/US/Michigan -./share/postgresql/timezone/US/Aleutian -./share/postgresql/timezone/US/Pacific -./share/postgresql/timezone/US/Pacific-New -./share/postgresql/timezone/US/Indiana-Starke -./share/postgresql/timezone/US/Mountain -./share/postgresql/timezone/US/East-Indiana -./share/postgresql/timezone/US/Hawaii -./share/postgresql/timezone/US/Arizona -./share/postgresql/timezone/US/Alaska -./share/postgresql/timezone/US/Central -./share/postgresql/timezone/Greenwich -./share/postgresql/timezone/Poland -./share/postgresql/timezone/CET -./share/postgresql/timezone/GMT-0 -./share/postgresql/timezone/Indian/Mauritius -./share/postgresql/timezone/Indian/Cocos -./share/postgresql/timezone/Indian/Reunion -./share/postgresql/timezone/Indian/Maldives -./share/postgresql/timezone/Indian/Comoro -./share/postgresql/timezone/Indian/Antananarivo -./share/postgresql/timezone/Indian/Christmas -./share/postgresql/timezone/Indian/Kerguelen -./share/postgresql/timezone/Indian/Chagos -./share/postgresql/timezone/Indian/Mayotte -./share/postgresql/timezone/Indian/Mahe -./share/postgresql/timezone/GMT0 -./share/postgresql/timezone/Antarctica/Palmer -./share/postgresql/timezone/Antarctica/Syowa -./share/postgresql/timezone/Antarctica/South_Pole -./share/postgresql/timezone/Antarctica/McMurdo -./share/postgresql/timezone/Antarctica/Rothera -./share/postgresql/timezone/Antarctica/Mawson -./share/postgresql/timezone/Antarctica/Casey -./share/postgresql/timezone/Antarctica/Davis -./share/postgresql/timezone/Antarctica/DumontDUrville -./share/postgresql/timezone/Antarctica/Vostok -./share/postgresql/timezone/Antarctica/Macquarie -./share/postgresql/timezone/ROK -./share/postgresql/timezone/Chile/EasterIsland -./share/postgresql/timezone/Chile/Continental -./share/postgresql/timezone/posixrules -./share/postgresql/timezone/Atlantic/Azores -./share/postgresql/timezone/Atlantic/St_Helena -./share/postgresql/timezone/Atlantic/Madeira -./share/postgresql/timezone/Atlantic/Jan_Mayen -./share/postgresql/timezone/Atlantic/Faroe -./share/postgresql/timezone/Atlantic/Stanley -./share/postgresql/timezone/Atlantic/Cape_Verde -./share/postgresql/timezone/Atlantic/Reykjavik -./share/postgresql/timezone/Atlantic/Canary -./share/postgresql/timezone/Atlantic/South_Georgia -./share/postgresql/timezone/Atlantic/Bermuda -./share/postgresql/timezone/Atlantic/Faeroe -./share/postgresql/timezone/Hongkong -./share/postgresql/timezone/Libya -./share/postgresql/timezone/Iceland -./share/postgresql/timezone/UTC -./share/postgresql/timezone/Australia/Darwin -./share/postgresql/timezone/Australia/North -./share/postgresql/timezone/Australia/NSW -./share/postgresql/timezone/Australia/Sydney -./share/postgresql/timezone/Australia/Hobart -./share/postgresql/timezone/Australia/LHI -./share/postgresql/timezone/Australia/Victoria -./share/postgresql/timezone/Australia/South -./share/postgresql/timezone/Australia/Melbourne -./share/postgresql/timezone/Australia/Lord_Howe -./share/postgresql/timezone/Australia/West -./share/postgresql/timezone/Australia/Brisbane -./share/postgresql/timezone/Australia/Perth -./share/postgresql/timezone/Australia/Eucla -./share/postgresql/timezone/Australia/Canberra -./share/postgresql/timezone/Australia/Queensland -./share/postgresql/timezone/Australia/Broken_Hill -./share/postgresql/timezone/Australia/Lindeman -./share/postgresql/timezone/Australia/ACT -./share/postgresql/timezone/Australia/Currie -./share/postgresql/timezone/Australia/Adelaide -./share/postgresql/timezone/Australia/Yancowinna -./share/postgresql/timezone/Australia/Tasmania -./share/postgresql/timezone/Jamaica -./share/postgresql/timezone/EST5EDT -./share/postgresql/timezone/MET -./share/postgresql/timezone/W-SU -./share/postgresql/timezone/Mideast/Riyadh87 -./share/postgresql/timezone/Mideast/Riyadh88 -./share/postgresql/timezone/Mideast/Riyadh89 -./share/postgresql/timezone/WET -./share/postgresql/timezone/ROC -./share/postgresql/timezone/Factory -./share/postgresql/timezone/EET -./share/postgresql/timezone/PST8PDT -./share/postgresql/timezone/Portugal -./share/postgresql/timezone/NZ -./share/postgresql/timezone/Brazil/West -./share/postgresql/timezone/Brazil/DeNoronha -./share/postgresql/timezone/Brazil/Acre -./share/postgresql/timezone/Brazil/East -./share/postgresql/timezone/EST -./share/postgresql/timezone/Egypt -./share/postgresql/timezone/Universal -./share/postgresql/timezone/Pacific/Enderbury -./share/postgresql/timezone/Pacific/Noumea -./share/postgresql/timezone/Pacific/Kwajalein -./share/postgresql/timezone/Pacific/Norfolk -./share/postgresql/timezone/Pacific/Nauru -./share/postgresql/timezone/Pacific/Efate -./share/postgresql/timezone/Pacific/Kosrae -./share/postgresql/timezone/Pacific/Galapagos -./share/postgresql/timezone/Pacific/Truk -./share/postgresql/timezone/Pacific/Fiji -./share/postgresql/timezone/Pacific/Auckland -./share/postgresql/timezone/Pacific/Samoa -./share/postgresql/timezone/Pacific/Port_Moresby -./share/postgresql/timezone/Pacific/Johnston -./share/postgresql/timezone/Pacific/Apia -./share/postgresql/timezone/Pacific/Tarawa -./share/postgresql/timezone/Pacific/Pitcairn -./share/postgresql/timezone/Pacific/Marquesas -./share/postgresql/timezone/Pacific/Chatham -./share/postgresql/timezone/Pacific/Tahiti -./share/postgresql/timezone/Pacific/Tongatapu -./share/postgresql/timezone/Pacific/Saipan -./share/postgresql/timezone/Pacific/Fakaofo -./share/postgresql/timezone/Pacific/Guam -./share/postgresql/timezone/Pacific/Niue -./share/postgresql/timezone/Pacific/Chuuk -./share/postgresql/timezone/Pacific/Easter -./share/postgresql/timezone/Pacific/Wallis -./share/postgresql/timezone/Pacific/Gambier -./share/postgresql/timezone/Pacific/Majuro -./share/postgresql/timezone/Pacific/Kiritimati -./share/postgresql/timezone/Pacific/Guadalcanal -./share/postgresql/timezone/Pacific/Funafuti -./share/postgresql/timezone/Pacific/Rarotonga -./share/postgresql/timezone/Pacific/Pago_Pago -./share/postgresql/timezone/Pacific/Midway -./share/postgresql/timezone/Pacific/Palau -./share/postgresql/timezone/Pacific/Honolulu -./share/postgresql/timezone/Pacific/Yap -./share/postgresql/timezone/Pacific/Pohnpei -./share/postgresql/timezone/Pacific/Wake -./share/postgresql/timezone/Pacific/Ponape -./share/postgresql/timezone/Iran -./share/postgresql/timezone/Etc/GMT-4 -./share/postgresql/timezone/Etc/GMT-9 -./share/postgresql/timezone/Etc/UCT -./share/postgresql/timezone/Etc/GMT-7 -./share/postgresql/timezone/Etc/Greenwich -./share/postgresql/timezone/Etc/GMT-0 -./share/postgresql/timezone/Etc/GMT-5 -./share/postgresql/timezone/Etc/GMT0 -./share/postgresql/timezone/Etc/GMT+7 -./share/postgresql/timezone/Etc/GMT-2 -./share/postgresql/timezone/Etc/GMT-10 -./share/postgresql/timezone/Etc/GMT+6 -./share/postgresql/timezone/Etc/GMT+1 -./share/postgresql/timezone/Etc/UTC -./share/postgresql/timezone/Etc/GMT-3 -./share/postgresql/timezone/Etc/GMT-1 -./share/postgresql/timezone/Etc/GMT-8 -./share/postgresql/timezone/Etc/GMT-11 -./share/postgresql/timezone/Etc/GMT+12 -./share/postgresql/timezone/Etc/GMT+10 -./share/postgresql/timezone/Etc/GMT-12 -./share/postgresql/timezone/Etc/GMT+5 -./share/postgresql/timezone/Etc/Universal -./share/postgresql/timezone/Etc/GMT-13 -./share/postgresql/timezone/Etc/GMT-6 -./share/postgresql/timezone/Etc/GMT+9 -./share/postgresql/timezone/Etc/GMT+3 -./share/postgresql/timezone/Etc/GMT-14 -./share/postgresql/timezone/Etc/GMT+4 -./share/postgresql/timezone/Etc/Zulu -./share/postgresql/timezone/Etc/GMT+2 -./share/postgresql/timezone/Etc/GMT+0 -./share/postgresql/timezone/Etc/GMT+8 -./share/postgresql/timezone/Etc/GMT+11 -./share/postgresql/timezone/Etc/GMT -./share/postgresql/timezone/Zulu -./share/postgresql/timezone/GMT+0 -./share/postgresql/timezone/Singapore -./share/postgresql/timezone/NZ-CHAT -./share/postgresql/timezone/Cuba -./share/postgresql/timezone/GB -./share/postgresql/timezone/Arctic/Longyearbyen -./share/postgresql/timezone/MST7MDT -./share/postgresql/timezone/PRC -./share/postgresql/timezone/Canada/Eastern -./share/postgresql/timezone/Canada/Yukon -./share/postgresql/timezone/Canada/Atlantic -./share/postgresql/timezone/Canada/Newfoundland -./share/postgresql/timezone/Canada/Saskatchewan -./share/postgresql/timezone/Canada/Pacific -./share/postgresql/timezone/Canada/East-Saskatchewan -./share/postgresql/timezone/Canada/Mountain -./share/postgresql/timezone/Canada/Central -./share/postgresql/timezone/CST6CDT -./share/postgresql/timezone/HST -./share/postgresql/timezone/America/Boa_Vista -./share/postgresql/timezone/America/New_York -./share/postgresql/timezone/America/Santarem -./share/postgresql/timezone/America/Boise -./share/postgresql/timezone/America/St_Lucia -./share/postgresql/timezone/America/Mendoza -./share/postgresql/timezone/America/Mexico_City -./share/postgresql/timezone/America/Chihuahua -./share/postgresql/timezone/America/Indianapolis -./share/postgresql/timezone/America/Virgin -./share/postgresql/timezone/America/Atka -./share/postgresql/timezone/America/Winnipeg -./share/postgresql/timezone/America/Hermosillo -./share/postgresql/timezone/America/Indiana/Indianapolis -./share/postgresql/timezone/America/Indiana/Tell_City -./share/postgresql/timezone/America/Indiana/Winamac -./share/postgresql/timezone/America/Indiana/Knox -./share/postgresql/timezone/America/Indiana/Vincennes -./share/postgresql/timezone/America/Indiana/Vevay -./share/postgresql/timezone/America/Indiana/Petersburg -./share/postgresql/timezone/America/Indiana/Marengo -./share/postgresql/timezone/America/Moncton -./share/postgresql/timezone/America/Campo_Grande -./share/postgresql/timezone/America/Guyana -./share/postgresql/timezone/America/Caracas -./share/postgresql/timezone/America/Maceio -./share/postgresql/timezone/America/Godthab -./share/postgresql/timezone/America/Thunder_Bay -./share/postgresql/timezone/America/Havana -./share/postgresql/timezone/America/Santiago -./share/postgresql/timezone/America/Los_Angeles -./share/postgresql/timezone/America/Buenos_Aires -./share/postgresql/timezone/America/Manaus -./share/postgresql/timezone/America/Bahia -./share/postgresql/timezone/America/North_Dakota/New_Salem -./share/postgresql/timezone/America/North_Dakota/Beulah -./share/postgresql/timezone/America/North_Dakota/Center -./share/postgresql/timezone/America/Bahia_Banderas -./share/postgresql/timezone/America/Edmonton -./share/postgresql/timezone/America/Tegucigalpa -./share/postgresql/timezone/America/Rankin_Inlet -./share/postgresql/timezone/America/Monterrey -./share/postgresql/timezone/America/Cambridge_Bay -./share/postgresql/timezone/America/Porto_Velho -./share/postgresql/timezone/America/Antigua -./share/postgresql/timezone/America/Atikokan -./share/postgresql/timezone/America/Vancouver -./share/postgresql/timezone/America/Anchorage -./share/postgresql/timezone/America/Port-au-Prince -./share/postgresql/timezone/America/Lima -./share/postgresql/timezone/America/Grenada -./share/postgresql/timezone/America/Creston -./share/postgresql/timezone/America/La_Paz -./share/postgresql/timezone/America/Panama -./share/postgresql/timezone/America/Blanc-Sablon -./share/postgresql/timezone/America/Cayenne -./share/postgresql/timezone/America/Santo_Domingo -./share/postgresql/timezone/America/Grand_Turk -./share/postgresql/timezone/America/Toronto -./share/postgresql/timezone/America/Rainy_River -./share/postgresql/timezone/America/Merida -./share/postgresql/timezone/America/Port_of_Spain -./share/postgresql/timezone/America/Nipigon -./share/postgresql/timezone/America/Jamaica -./share/postgresql/timezone/America/Rosario -./share/postgresql/timezone/America/Dawson_Creek -./share/postgresql/timezone/America/Belize -./share/postgresql/timezone/America/Costa_Rica -./share/postgresql/timezone/America/Barbados -./share/postgresql/timezone/America/Danmarkshavn -./share/postgresql/timezone/America/Argentina/La_Rioja -./share/postgresql/timezone/America/Argentina/Mendoza -./share/postgresql/timezone/America/Argentina/Buenos_Aires -./share/postgresql/timezone/America/Argentina/Tucuman -./share/postgresql/timezone/America/Argentina/Ushuaia -./share/postgresql/timezone/America/Argentina/Catamarca -./share/postgresql/timezone/America/Argentina/ComodRivadavia -./share/postgresql/timezone/America/Argentina/Jujuy -./share/postgresql/timezone/America/Argentina/Cordoba -./share/postgresql/timezone/America/Argentina/San_Luis -./share/postgresql/timezone/America/Argentina/Rio_Gallegos -./share/postgresql/timezone/America/Argentina/Salta -./share/postgresql/timezone/America/Argentina/San_Juan -./share/postgresql/timezone/America/Pangnirtung -./share/postgresql/timezone/America/Anguilla -./share/postgresql/timezone/America/Curacao -./share/postgresql/timezone/America/Cancun -./share/postgresql/timezone/America/Montreal -./share/postgresql/timezone/America/Shiprock -./share/postgresql/timezone/America/Thule -./share/postgresql/timezone/America/Scoresbysund -./share/postgresql/timezone/America/Catamarca -./share/postgresql/timezone/America/Sao_Paulo -./share/postgresql/timezone/America/Sitka -./share/postgresql/timezone/America/Asuncion -./share/postgresql/timezone/America/Regina -./share/postgresql/timezone/America/St_Johns -./share/postgresql/timezone/America/Montevideo -./share/postgresql/timezone/America/Eirunepe -./share/postgresql/timezone/America/Denver -./share/postgresql/timezone/America/Metlakatla -./share/postgresql/timezone/America/Araguaina -./share/postgresql/timezone/America/Juneau -./share/postgresql/timezone/America/Marigot -./share/postgresql/timezone/America/Menominee -./share/postgresql/timezone/America/Glace_Bay -./share/postgresql/timezone/America/Tijuana -./share/postgresql/timezone/America/Detroit -./share/postgresql/timezone/America/Belem -./share/postgresql/timezone/America/Jujuy -./share/postgresql/timezone/America/St_Thomas -./share/postgresql/timezone/America/Resolute -./share/postgresql/timezone/America/Cuiaba -./share/postgresql/timezone/America/Halifax -./share/postgresql/timezone/America/St_Barthelemy -./share/postgresql/timezone/America/Guatemala -./share/postgresql/timezone/America/Nassau -./share/postgresql/timezone/America/St_Kitts -./share/postgresql/timezone/America/Cordoba -./share/postgresql/timezone/America/Miquelon -./share/postgresql/timezone/America/Bogota -./share/postgresql/timezone/America/Rio_Branco -./share/postgresql/timezone/America/Ensenada -./share/postgresql/timezone/America/Yakutat -./share/postgresql/timezone/America/Noronha -./share/postgresql/timezone/America/Kentucky/Monticello -./share/postgresql/timezone/America/Kentucky/Louisville -./share/postgresql/timezone/America/Porto_Acre -./share/postgresql/timezone/America/Santa_Isabel -./share/postgresql/timezone/America/El_Salvador -./share/postgresql/timezone/America/Yellowknife -./share/postgresql/timezone/America/Cayman -./share/postgresql/timezone/America/Whitehorse -./share/postgresql/timezone/America/Ojinaga -./share/postgresql/timezone/America/Aruba -./share/postgresql/timezone/America/Nome -./share/postgresql/timezone/America/Fortaleza -./share/postgresql/timezone/America/Martinique -./share/postgresql/timezone/America/Recife -./share/postgresql/timezone/America/Knox_IN -./share/postgresql/timezone/America/Guayaquil -./share/postgresql/timezone/America/Goose_Bay -./share/postgresql/timezone/America/Iqaluit -./share/postgresql/timezone/America/Matamoros -./share/postgresql/timezone/America/Lower_Princes -./share/postgresql/timezone/America/Louisville -./share/postgresql/timezone/America/Coral_Harbour -./share/postgresql/timezone/America/Phoenix -./share/postgresql/timezone/America/Guadeloupe -./share/postgresql/timezone/America/Mazatlan -./share/postgresql/timezone/America/Swift_Current -./share/postgresql/timezone/America/Paramaribo -./share/postgresql/timezone/America/Dominica -./share/postgresql/timezone/America/Kralendijk -./share/postgresql/timezone/America/Montserrat -./share/postgresql/timezone/America/St_Vincent -./share/postgresql/timezone/America/Fort_Wayne -./share/postgresql/timezone/America/Dawson -./share/postgresql/timezone/America/Inuvik -./share/postgresql/timezone/America/Adak -./share/postgresql/timezone/America/Managua -./share/postgresql/timezone/America/Puerto_Rico -./share/postgresql/timezone/America/Tortola -./share/postgresql/timezone/America/Chicago -./share/postgresql/timezone/Africa/Lome -./share/postgresql/timezone/Africa/Brazzaville -./share/postgresql/timezone/Africa/Khartoum -./share/postgresql/timezone/Africa/Ceuta -./share/postgresql/timezone/Africa/Djibouti -./share/postgresql/timezone/Africa/Lagos -./share/postgresql/timezone/Africa/Accra -./share/postgresql/timezone/Africa/El_Aaiun -./share/postgresql/timezone/Africa/Malabo -./share/postgresql/timezone/Africa/Windhoek -./share/postgresql/timezone/Africa/Tripoli -./share/postgresql/timezone/Africa/Bissau -./share/postgresql/timezone/Africa/Blantyre -./share/postgresql/timezone/Africa/Kinshasa -./share/postgresql/timezone/Africa/Porto-Novo -./share/postgresql/timezone/Africa/Nairobi -./share/postgresql/timezone/Africa/Ouagadougou -./share/postgresql/timezone/Africa/Asmera -./share/postgresql/timezone/Africa/Cairo -./share/postgresql/timezone/Africa/Lubumbashi -./share/postgresql/timezone/Africa/Tunis -./share/postgresql/timezone/Africa/Dar_es_Salaam -./share/postgresql/timezone/Africa/Casablanca -./share/postgresql/timezone/Africa/Algiers -./share/postgresql/timezone/Africa/Mbabane -./share/postgresql/timezone/Africa/Monrovia -./share/postgresql/timezone/Africa/Nouakchott -./share/postgresql/timezone/Africa/Banjul -./share/postgresql/timezone/Africa/Kampala -./share/postgresql/timezone/Africa/Conakry -./share/postgresql/timezone/Africa/Mogadishu -./share/postgresql/timezone/Africa/Ndjamena -./share/postgresql/timezone/Africa/Niamey -./share/postgresql/timezone/Africa/Lusaka -./share/postgresql/timezone/Africa/Addis_Ababa -./share/postgresql/timezone/Africa/Sao_Tome -./share/postgresql/timezone/Africa/Abidjan -./share/postgresql/timezone/Africa/Harare -./share/postgresql/timezone/Africa/Asmara -./share/postgresql/timezone/Africa/Douala -./share/postgresql/timezone/Africa/Freetown -./share/postgresql/timezone/Africa/Libreville -./share/postgresql/timezone/Africa/Luanda -./share/postgresql/timezone/Africa/Maseru -./share/postgresql/timezone/Africa/Gaborone -./share/postgresql/timezone/Africa/Maputo -./share/postgresql/timezone/Africa/Timbuktu -./share/postgresql/timezone/Africa/Bangui -./share/postgresql/timezone/Africa/Bamako -./share/postgresql/timezone/Africa/Dakar -./share/postgresql/timezone/Africa/Juba -./share/postgresql/timezone/Africa/Bujumbura -./share/postgresql/timezone/Africa/Johannesburg -./share/postgresql/timezone/Africa/Kigali -./share/postgresql/timezone/Eire -./share/postgresql/timezone/Europe/Vaduz -./share/postgresql/timezone/Europe/Podgorica -./share/postgresql/timezone/Europe/Rome -./share/postgresql/timezone/Europe/Vienna -./share/postgresql/timezone/Europe/Dublin -./share/postgresql/timezone/Europe/Zurich -./share/postgresql/timezone/Europe/London -./share/postgresql/timezone/Europe/Monaco -./share/postgresql/timezone/Europe/Sofia -./share/postgresql/timezone/Europe/Uzhgorod -./share/postgresql/timezone/Europe/Minsk -./share/postgresql/timezone/Europe/Malta -./share/postgresql/timezone/Europe/Busingen -./share/postgresql/timezone/Europe/Gibraltar -./share/postgresql/timezone/Europe/Volgograd -./share/postgresql/timezone/Europe/Budapest -./share/postgresql/timezone/Europe/Vatican -./share/postgresql/timezone/Europe/Luxembourg -./share/postgresql/timezone/Europe/Chisinau -./share/postgresql/timezone/Europe/Nicosia -./share/postgresql/timezone/Europe/Warsaw -./share/postgresql/timezone/Europe/San_Marino -./share/postgresql/timezone/Europe/Copenhagen -./share/postgresql/timezone/Europe/Ljubljana -./share/postgresql/timezone/Europe/Athens -./share/postgresql/timezone/Europe/Skopje -./share/postgresql/timezone/Europe/Andorra -./share/postgresql/timezone/Europe/Kaliningrad -./share/postgresql/timezone/Europe/Amsterdam -./share/postgresql/timezone/Europe/Guernsey -./share/postgresql/timezone/Europe/Isle_of_Man -./share/postgresql/timezone/Europe/Tirane -./share/postgresql/timezone/Europe/Jersey -./share/postgresql/timezone/Europe/Madrid -./share/postgresql/timezone/Europe/Helsinki -./share/postgresql/timezone/Europe/Riga -./share/postgresql/timezone/Europe/Zagreb -./share/postgresql/timezone/Europe/Bratislava -./share/postgresql/timezone/Europe/Prague -./share/postgresql/timezone/Europe/Tallinn -./share/postgresql/timezone/Europe/Stockholm -./share/postgresql/timezone/Europe/Tiraspol -./share/postgresql/timezone/Europe/Belgrade -./share/postgresql/timezone/Europe/Bucharest -./share/postgresql/timezone/Europe/Vilnius -./share/postgresql/timezone/Europe/Sarajevo -./share/postgresql/timezone/Europe/Belfast -./share/postgresql/timezone/Europe/Zaporozhye -./share/postgresql/timezone/Europe/Oslo -./share/postgresql/timezone/Europe/Mariehamn -./share/postgresql/timezone/Europe/Moscow -./share/postgresql/timezone/Europe/Brussels -./share/postgresql/timezone/Europe/Paris -./share/postgresql/timezone/Europe/Istanbul -./share/postgresql/timezone/Europe/Simferopol -./share/postgresql/timezone/Europe/Lisbon -./share/postgresql/timezone/Europe/Berlin -./share/postgresql/timezone/Europe/Kiev -./share/postgresql/timezone/Europe/Samara -./share/postgresql/timezone/MST -./share/postgresql/timezone/Asia/Khandyga -./share/postgresql/timezone/Asia/Manila -./share/postgresql/timezone/Asia/Novokuznetsk -./share/postgresql/timezone/Asia/Baghdad -./share/postgresql/timezone/Asia/Macau -./share/postgresql/timezone/Asia/Urumqi -./share/postgresql/timezone/Asia/Ujung_Pandang -./share/postgresql/timezone/Asia/Ulan_Bator -./share/postgresql/timezone/Asia/Bishkek -./share/postgresql/timezone/Asia/Qatar -./share/postgresql/timezone/Asia/Qyzylorda -./share/postgresql/timezone/Asia/Calcutta -./share/postgresql/timezone/Asia/Riyadh87 -./share/postgresql/timezone/Asia/Dushanbe -./share/postgresql/timezone/Asia/Yekaterinburg -./share/postgresql/timezone/Asia/Dhaka -./share/postgresql/timezone/Asia/Jakarta -./share/postgresql/timezone/Asia/Shanghai -./share/postgresql/timezone/Asia/Ulaanbaatar -./share/postgresql/timezone/Asia/Jerusalem -./share/postgresql/timezone/Asia/Ashkhabad -./share/postgresql/timezone/Asia/Tokyo -./share/postgresql/timezone/Asia/Macao -./share/postgresql/timezone/Asia/Krasnoyarsk -./share/postgresql/timezone/Asia/Saigon -./share/postgresql/timezone/Asia/Omsk -./share/postgresql/timezone/Asia/Damascus -./share/postgresql/timezone/Asia/Phnom_Penh -./share/postgresql/timezone/Asia/Bangkok -./share/postgresql/timezone/Asia/Kamchatka -./share/postgresql/timezone/Asia/Choibalsan -./share/postgresql/timezone/Asia/Ust-Nera -./share/postgresql/timezone/Asia/Aden -./share/postgresql/timezone/Asia/Vientiane -./share/postgresql/timezone/Asia/Sakhalin -./share/postgresql/timezone/Asia/Ashgabat -./share/postgresql/timezone/Asia/Katmandu -./share/postgresql/timezone/Asia/Almaty -./share/postgresql/timezone/Asia/Baku -./share/postgresql/timezone/Asia/Nicosia -./share/postgresql/timezone/Asia/Riyadh88 -./share/postgresql/timezone/Asia/Kashgar -./share/postgresql/timezone/Asia/Riyadh89 -./share/postgresql/timezone/Asia/Taipei -./share/postgresql/timezone/Asia/Tehran -./share/postgresql/timezone/Asia/Kabul -./share/postgresql/timezone/Asia/Samarkand -./share/postgresql/timezone/Asia/Kuala_Lumpur -./share/postgresql/timezone/Asia/Tashkent -./share/postgresql/timezone/Asia/Thimbu -./share/postgresql/timezone/Asia/Thimphu -./share/postgresql/timezone/Asia/Yerevan -./share/postgresql/timezone/Asia/Chungking -./share/postgresql/timezone/Asia/Hebron -./share/postgresql/timezone/Asia/Karachi -./share/postgresql/timezone/Asia/Kolkata -./share/postgresql/timezone/Asia/Aqtobe -./share/postgresql/timezone/Asia/Muscat -./share/postgresql/timezone/Asia/Hong_Kong -./share/postgresql/timezone/Asia/Chongqing -./share/postgresql/timezone/Asia/Oral -./share/postgresql/timezone/Asia/Pontianak -./share/postgresql/timezone/Asia/Colombo -./share/postgresql/timezone/Asia/Pyongyang -./share/postgresql/timezone/Asia/Hovd -./share/postgresql/timezone/Asia/Kuwait -./share/postgresql/timezone/Asia/Anadyr -./share/postgresql/timezone/Asia/Kathmandu -./share/postgresql/timezone/Asia/Irkutsk -./share/postgresql/timezone/Asia/Bahrain -./share/postgresql/timezone/Asia/Dubai -./share/postgresql/timezone/Asia/Jayapura -./share/postgresql/timezone/Asia/Riyadh -./share/postgresql/timezone/Asia/Ho_Chi_Minh -./share/postgresql/timezone/Asia/Singapore -./share/postgresql/timezone/Asia/Tel_Aviv -./share/postgresql/timezone/Asia/Dili -./share/postgresql/timezone/Asia/Rangoon -./share/postgresql/timezone/Asia/Harbin -./share/postgresql/timezone/Asia/Yakutsk -./share/postgresql/timezone/Asia/Magadan -./share/postgresql/timezone/Asia/Amman -./share/postgresql/timezone/Asia/Kuching -./share/postgresql/timezone/Asia/Novosibirsk -./share/postgresql/timezone/Asia/Seoul -./share/postgresql/timezone/Asia/Dacca -./share/postgresql/timezone/Asia/Vladivostok -./share/postgresql/timezone/Asia/Istanbul -./share/postgresql/timezone/Asia/Beirut -./share/postgresql/timezone/Asia/Aqtau -./share/postgresql/timezone/Asia/Brunei -./share/postgresql/timezone/Asia/Gaza -./share/postgresql/timezone/Asia/Tbilisi -./share/postgresql/timezone/Asia/Makassar -./share/postgresql/timezone/Asia/Beijing -./share/postgresql/timezone/Navajo -./share/postgresql/timezone/GMT -./share/postgresql/system_views.sql -./share/postgresql/private_system_views.sql -./share/postgresql/performance_views.sql -./share/postgresql/sql_features.txt -./share/postgresql/pg_cast_oid.txt -./share/postgresql/recovery.conf.sample -./share/postgresql/tsearch_data/english.stop -./share/postgresql/tsearch_data/dutch.stop -./share/postgresql/tsearch_data/hungarian.stop -./share/postgresql/tsearch_data/french.stop -./share/postgresql/tsearch_data/synonym_sample.syn -./share/postgresql/tsearch_data/turkish.stop -./share/postgresql/tsearch_data/portuguese.stop -./share/postgresql/tsearch_data/spanish.stop -./share/postgresql/tsearch_data/hunspell_sample.affix -./share/postgresql/tsearch_data/ispell_sample.affix -./share/postgresql/tsearch_data/danish.stop -./share/postgresql/tsearch_data/german.stop -./share/postgresql/tsearch_data/thesaurus_sample.ths -./share/postgresql/tsearch_data/norwegian.stop -./share/postgresql/tsearch_data/finnish.stop -./share/postgresql/tsearch_data/russian.stop -./share/postgresql/tsearch_data/swedish.stop -./share/postgresql/tsearch_data/ispell_sample.dict -./share/postgresql/tsearch_data/italian.stop -./share/postgresql/information_schema.sql -./share/postgresql/timezonesets/Antarctica.txt -./share/postgresql/timezonesets/Australia.txt -./share/postgresql/timezonesets/Europe.txt -./share/postgresql/timezonesets/America.txt -./share/postgresql/timezonesets/Australia -./share/postgresql/timezonesets/Indian.txt -./share/postgresql/timezonesets/India -./share/postgresql/timezonesets/Pacific.txt -./share/postgresql/timezonesets/Atlantic.txt -./share/postgresql/timezonesets/Africa.txt -./share/postgresql/timezonesets/Asia.txt -./share/postgresql/timezonesets/Default -./share/postgresql/timezonesets/Etc.txt -./share/postgresql/postgres.bki -./share/llvmir/GaussDB_expr.ir -./share/sslcert/gsql/openssl.cnf -./share/sslcert/grpc/openssl.cnf -./lib/libsimsearch/ -./lib/postgresql/latin2_and_win1250.so -./lib/postgresql/euc2004_sjis2004.so -./lib/postgresql/euc_kr_and_mic.so -./lib/postgresql/utf8_and_uhc.so -./lib/postgresql/euc_tw_and_big5.so -./lib/postgresql/cyrillic_and_mic.so -./lib/postgresql/utf8_and_johab.so -./lib/postgresql/utf8_and_gb18030.so -./lib/postgresql/pgxs/src/makefiles/pgxs.mk -./lib/postgresql/pgxs/src/Makefile.shlib -./lib/postgresql/pgxs/src/Makefile.port -./lib/postgresql/pgxs/src/nls-global.mk -./lib/postgresql/pgxs/src/Makefile.global -./lib/postgresql/pgxs/src/get_PlatForm_str.sh -./lib/postgresql/pgxs/config/install-sh -./lib/postgresql/euc_cn_and_mic.so -./lib/postgresql/latin_and_mic.so -./lib/postgresql/utf8_and_sjis2004.so -./lib/postgresql/utf8_and_euc_jp.so -./lib/postgresql/utf8_and_sjis.so -./lib/postgresql/utf8_and_cyrillic.so -./lib/postgresql/hstore.so -./lib/postgresql/tsdb.so -./lib/postgresql/packages.so -./lib/postgresql/utf8_and_euc_kr.so -./lib/postgresql/ascii_and_mic.so -./lib/postgresql/utf8_and_iso8859_1.so -./lib/postgresql/euc_jp_and_sjis.so -./lib/postgresql/dict_snowball.so -./lib/postgresql/utf8_and_ascii.so -./lib/postgresql/utf8_and_euc_tw.so -./lib/postgresql/utf8_and_iso8859.so -./lib/postgresql/utf8_and_win.so -./lib/postgresql/utf8_and_euc_cn.so -./lib/postgresql/utf8_and_gbk.so -./lib/postgresql/utf8_and_euc2004.so -./lib/postgresql/utf8_and_big5.so -./lib/postgresql/mppdb_decoding.so -./lib/postgresql/dimsearch.so -./lib/postgresql/pg_plugin -./lib/postgresql/proc_srclib -./lib/postgresql/security_plugin.so -./lib/postgresql/pg_upgrade_support.so -./lib/postgresql/java/pljava.jar -./lib/postgresql/postgres_fdw.so -./lib/postgresql/pgoutput.so -./lib/libpljava.so -./lib/libpq.a -./lib/libpq.so -./lib/libpq.so.5 -./lib/libpq.so.5.5 -./lib/libpq_ce.so -./lib/libpq_ce.so.5 -./lib/libpq_ce.so.5.5 -./lib/libgauss_cl_jni.so -./lib/libcgroup.so -./lib/libcgroup.so.1 -./lib/libcom_err_gauss.so -./lib/libcom_err_gauss.so.3 -./lib/libcom_err_gauss.so.3.0 -./lib/libatomic.so -./lib/libatomic.so.1 -./lib/libatomic.so.1.2.0 -./lib/libmasstree.so -./lib/libupb.so -./lib/libupb.so.9 -./lib/libupb.so.9.0.0 -./lib/libabsl_str_format_internal.so -./lib/libabsl_strings.so -./lib/libabsl_throw_delegate.so -./lib/libabsl_strings_internal.so -./lib/libabsl_base.so -./lib/libabsl_dynamic_annotations.so -./lib/libabsl_spinlock_wait.so -./lib/libabsl_int128.so -./lib/libabsl_bad_optional_access.so -./lib/libabsl_raw_logging_internal.so -./lib/libabsl_log_severity.so -./lib/libaddress_sorting.so -./lib/libaddress_sorting.so.9 -./lib/libgssapi_krb5_gauss.so -./lib/libgssapi_krb5_gauss.so.2 -./lib/libgssapi_krb5_gauss.so.2.2 -./lib/libgssrpc_gauss.so -./lib/libgssrpc_gauss.so.4 -./lib/libgssrpc_gauss.so.4.2 -./lib/libk5crypto_gauss.so -./lib/libk5crypto_gauss.so.3 -./lib/libk5crypto_gauss.so.3.1 -./lib/libkadm5clnt.so -./lib/libkadm5clnt_mit.so -./lib/libkadm5clnt_mit.so.11 -./lib/libkadm5clnt_mit.so.11.0 -./lib/libkadm5clnt_mit.so.12 -./lib/libkadm5clnt_mit.so.12.0 -./lib/libkadm5srv.so -./lib/libkadm5srv_mit.so -./lib/libkadm5srv_mit.so.11 -./lib/libkadm5srv_mit.so.11.0 -./lib/libkadm5srv_mit.so.12 -./lib/libkadm5srv_mit.so.12.0 -./lib/libkdb5.so -./lib/libkdb5.so.9 -./lib/libkdb5.so.9.0 -./lib/libkdb5.so.10 -./lib/libkdb5.so.10.0 -./lib/libkrad.so -./lib/libkrad.so.0 -./lib/libkrad.so.0.0 -./lib/libkrb5_gauss.so -./lib/libkrb5_gauss.so.3 -./lib/libkrb5_gauss.so.3.3 -./lib/libkrb5support_gauss.so -./lib/libkrb5support_gauss.so.0 -./lib/libkrb5support_gauss.so.0.1 -./lib/krb5/plugins/kdb/db2.so -./lib/libverto.so -./lib/libverto.so.0 -./lib/libverto.so.0.0 -./lib/libcurl.so -./lib/libcurl.so.4 -./lib/libcurl.so.4.6.0 -./lib/libcrypto.so -./lib/libcrypto.so.1.1 -./lib/libssl.so -./lib/libssl.so.1.1 -./lib/libgcc_s.so.1 -./lib/libstdc++.so.6 -./lib/libz.so -./lib/libz.so.1 -./lib/libz.so.1.2.11 -./lib/liblz4.so -./lib/liblz4.so.1 -./lib/liblz4.so.1.9.2 -./lib/libcjson.so -./lib/libcjson.so.1 -./lib/libcjson.so.1.7.13 -./lib/libconfig.so -./lib/libconfig.so.4 -./lib/libpgport_tool.so -./lib/libpgport_tool.so.1 -./share/llvmir/GaussDB_expr.ir -./lib/libeSDKLogAPI.so -./lib/libeSDKOBS.so -./lib/liblog4cpp.so -./lib/liblog4cpp.so.5 -./lib/liblog4cpp.so.5.0.6 -./lib/libcharset.so -./lib/libcharset.so.1 -./lib/libcharset.so.1.0.0 -./lib/libiconv.so -./lib/libiconv.so.2 -./lib/libiconv.so.2.6.1 -./lib/libnghttp2.so -./lib/libnghttp2.so.14 -./lib/libnghttp2.so.14.20.0 -./lib/libpcre.so -./lib/libpcre.so.1 -./lib/libpcre.so.1.2.12 -./lib/libsecurec.so -./lib/libxml2.so -./lib/libxml2.so.2 -./lib/libxml2.so.2.9.9 -./lib/libparquet.so -./lib/libparquet.so.14 -./lib/libparquet.so.14.1.0 -./lib/libarrow.so -./lib/libarrow.so.14 -./lib/libarrow.so.14.1.0 -./lib/OBS.ini -./lib/postgresql/latin2_and_win1250.so -./lib/postgresql/euc2004_sjis2004.so -./lib/libdcf.so -./lib/libzstd.so -./lib/libzstd.so.1 -./lib/libzstd.so.1.4.4 - -./include/postgresql/server/postgres_ext.h -./include/postgresql/server/pg_config_os.h -./include/postgresql/server/pgtime.h -./include/postgresql/server/datatypes.h -./include/postgresql/server/client_logic/client_logic_enums.h -./include/postgresql/server/nodes/primnodes.h -./include/postgresql/server/nodes/parsenodes.h -./include/postgresql/server/nodes/parsenodes_common.h -./include/postgresql/server/nodes/bitmapset.h -./include/postgresql/server/nodes/pg_list.h -./include/postgresql/server/nodes/value.h -./include/postgresql/server/nodes/nodes.h -./include/postgresql/server/utils/sortsupport.h -./include/postgresql/server/utils/varbit.h -./include/postgresql/server/utils/spccache.h -./include/postgresql/server/utils/rangetypes.h -./include/postgresql/server/utils/plpgsql.h -./include/postgresql/server/utils/memtrack.h -./include/postgresql/server/utils/pg_locale.h -./include/postgresql/server/utils/tzparser.h -./include/postgresql/server/utils/syscall_lock.h -./include/postgresql/server/utils/partitionmap.h -./include/postgresql/server/utils/array.h -./include/postgresql/server/utils/relmapper.h -./include/postgresql/server/utils/hsearch.h -./include/postgresql/server/utils/xml.h -./include/postgresql/server/utils/bytea.h -./include/postgresql/server/utils/relcache.h -./include/postgresql/server/utils/pg_rusage.h -./include/postgresql/server/utils/numeric.h -./include/postgresql/server/utils/mmpool.h -./include/postgresql/server/utils/nabstime.h -./include/postgresql/server/utils/fmgrtab.h -./include/postgresql/server/utils/snapmgr.h -./include/postgresql/server/utils/syscache.h -./include/postgresql/server/utils/logtape.h -./include/postgresql/server/utils/datum.h -./include/postgresql/server/utils/guc_tables.h -./include/postgresql/server/utils/snapshot.h -./include/postgresql/server/utils/geo_decls.h -./include/postgresql/server/utils/errcodes.h -./include/postgresql/server/utils/inval.h -./include/postgresql/server/utils/help_config.h -./include/postgresql/server/utils/distribute_test.h -./include/postgresql/server/utils/aiomem.h -./include/postgresql/server/utils/tuplestore.h -./include/postgresql/server/utils/rbtree.h -./include/postgresql/server/utils/gs_bitmap.h -./include/postgresql/server/utils/tuplesort.h -./include/postgresql/server/utils/tqual.h -./include/postgresql/server/utils/ps_status.h -./include/postgresql/server/utils/palloc.h -./include/postgresql/server/utils/reltrigger.h -./include/postgresql/server/utils/acl.h -./include/postgresql/server/utils/ascii.h -./include/postgresql/server/utils/selfuncs.h -./include/postgresql/server/utils/json.h -./include/postgresql/server/utils/portal.h -./include/postgresql/server/utils/atomic.h -./include/postgresql/server/utils/elog.h -./include/postgresql/server/utils/date.h -./include/postgresql/server/utils/plancache.h -./include/postgresql/server/utils/int8.h -./include/postgresql/server/utils/timestamp.h -./include/postgresql/server/utils/bloom_filter.h -./include/postgresql/server/utils/fmgroids.h -./include/postgresql/server/utils/pg_crc_tables.h -./include/postgresql/server/utils/probes.h -./include/postgresql/server/utils/datetime.h -./include/postgresql/server/utils/inet.h -./include/postgresql/server/utils/pg_lzcompress.h -./include/postgresql/server/utils/pg_crc.h -./include/postgresql/server/utils/attoptcache.h -./include/postgresql/server/utils/dynahash.h -./include/postgresql/server/utils/rel.h -./include/postgresql/server/utils/partcache.h -./include/postgresql/server/utils/lsyscache.h -./include/postgresql/server/utils/memutils.h -./include/postgresql/server/utils/memprot.h -./include/postgresql/server/utils/uuid.h -./include/postgresql/server/utils/combocid.h -./include/postgresql/server/utils/builtins.h -./include/postgresql/server/utils/guc.h -./include/postgresql/server/utils/dfs_vector.h -./include/postgresql/server/utils/dynamic_loader.h -./include/postgresql/server/utils/resowner.h -./include/postgresql/server/utils/aes.h -./include/postgresql/server/utils/cash.h -./include/postgresql/server/utils/typcache.h -./include/postgresql/server/utils/formatting.h -./include/postgresql/server/utils/partitionkey.h -./include/postgresql/server/utils/aset.h -./include/postgresql/server/utils/catcache.h -./include/postgresql/server/utils/atomic_arm.h -./include/postgresql/server/utils/oidrbtree.h -./include/postgresql/server/datatype/timestamp.h -./include/postgresql/server/access/rmgr.h -./include/postgresql/server/access/xlogreader.h -./include/postgresql/server/access/xlog_basic.h -./include/postgresql/server/access/tupdesc.h -./include/postgresql/server/access/rmgrlist.h -./include/postgresql/server/access/htup.h -./include/postgresql/server/access/xlogdefs.h -./include/postgresql/server/access/attnum.h -./include/postgresql/server/access/tupmacs.h -./include/postgresql/server/access/xlogrecord.h -./include/postgresql/server/tde_key_management/data_common.h -./include/postgresql/server/tcop/dest.h -./include/postgresql/server/catalog/pg_type.h -./include/postgresql/server/catalog/pg_attribute.h -./include/postgresql/server/catalog/genbki.h -./include/postgresql/server/gs_thread.h -./include/postgresql/server/port/pg_bswap.h -./include/postgresql/server/port/pg_crc32c.h -./include/postgresql/server/securec.h -./include/postgresql/server/securectype.h -./include/postgresql/server/storage/off.h -./include/postgresql/server/storage/block.h -./include/postgresql/server/storage/item.h -./include/postgresql/server/storage/smgr/relfilenode.h -./include/postgresql/server/storage/bufpage.h -./include/postgresql/server/storage/spin.h -./include/postgresql/server/storage/buf.h -./include/postgresql/server/storage/itemid.h -./include/postgresql/server/storage/pg_sema.h -./include/postgresql/server/storage/itemptr.h -./include/postgresql/server/storage/s_lock.h -./include/postgresql/server/storage/backendid.h -./include/postgresql/server/storage/lock.h -./include/postgresql/server/storage/lwlock.h -./include/postgresql/server/storage/barrier.h -./include/postgresql/server/storage/shmem.h -./include/postgresql/server/pg_config.h -./include/postgresql/server/lib/stringinfo.h -./include/postgresql/server/fmgr.h -./include/postgresql/server/fmgr/fmgr_comp.h -./include/postgresql/server/fmgr/fmgr_core.h -./include/postgresql/server/gs_threadlocal.h -./include/postgresql/server/postgres.h -./include/postgresql/server/executor/tuptable.h -./include/postgresql/server/pg_config_manual.h -./include/postgresql/server/mb/pg_wchar.h -./include/postgresql/server/c.h -./include/postgresql/server/port.h -./include/postgresql/server/utils/be_module.h -./include/postgresql/server/nodes/params.h -./include/postgresql/server/securec_check.h -./include/postgresql/server/nodes/memnodes.h -./include/postgresql/server/access/skey.h -./include/postgresql/server/lib/dllist.h -./include/postgresql/server/lib/ilist.h -./include/postgresql/server/pgxc/locator.h -./include/postgresql/server/gstrace/gstrace_infra.h -./include/postgresql/server/extension_dependency.h -./include/postgresql/server/libpq/libpq-fe.h -./include/postgresql/server/access/clog.h -./include/postgresql/server/storage/proc.h -./include/postgresql/server/access/xlog.h -./include/postgresql/server/storage/lwlocknames.h -./include/postgresql/server/access/xloginsert.h -./include/postgresql/server/catalog/pg_control.h -./include/postgresql/server/access/parallel_recovery/redo_item.h -./include/postgresql/server/access/parallel_recovery/posix_semaphore.h -./include/postgresql/server/replication/replicainternal.h -./include/postgresql/server/knl/knl_instance.h -./include/postgresql/server/knl/knl_guc.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_sql.h -./include/postgresql/server/knl/knl_guc/knl_guc_common.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_sql.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_storage.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_storage.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_storage.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_security.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_security.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_network.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_network.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_memory.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_memory.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_resource.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_resource.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_common.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_common.h -./include/postgresql/server/lib/circularqueue.h -./include/postgresql/server/access/double_write_basic.h -./include/postgresql/server/knl/knl_thread.h -./include/postgresql/server/access/sdir.h -./include/postgresql/server/gssignal/gs_signal.h -./include/postgresql/server/knl/knl_session.h -./include/postgresql/server/libpq/pqcomm.h -./include/postgresql/server/cipher.h -./include/postgresql/server/portability/instr_time.h -./include/postgresql/server/utils/memgroup.h -./include/postgresql/server/storage/latch.h -./include/postgresql/server/workload/qnode.h -./include/postgresql/server/streaming/init.h -./include/postgresql/server/streaming/launcher.h -./include/postgresql/server/pgxc/barrier.h -./include/postgresql/server/libcomm/libcomm.h -./include/postgresql/server/hotpatch/hotpatch.h -./include/postgresql/server/hotpatch/hotpatch_backend.h -./include/postgresql/server/postmaster/bgwriter.h -./include/postgresql/server/postmaster/pagewriter.h -./include/postgresql/server/replication/heartbeat.h -./include/postgresql/server/access/multi_redo_settings.h -./include/postgresql/server/access/redo_statistic_msg.h -./include/postgresql/server/replication/rto_statistic.h -./include/postgresql/server/replication/walprotocol.h -./include/postgresql/server/storage/mot/jit_def.h -./include/postgresql/server/threadpool/threadpool.h -./include/postgresql/server/threadpool/threadpool_controler.h -./include/postgresql/server/threadpool/threadpool_group.h -./include/postgresql/server/knl/knl_variable.h -./include/postgresql/server/threadpool/threadpool_listener.h -./include/postgresql/server/threadpool/threadpool_sessctl.h -./include/postgresql/server/storage/procsignal.h -./include/postgresql/server/threadpool/threadpool_worker.h -./include/postgresql/server/threadpool/threadpool_scheduler.h -./include/postgresql/server/threadpool/threadpool_stream.h -./include/postgresql/server/replication/dataqueuedefs.h -./include/postgresql/server/gtm/gtm_c.h -./include/postgresql/server/cm/etcdapi.h -./include/postgresql/server/alarm/alarm.h -./include/postgresql/server/access/xact.h -./include/postgresql/server/access/cstore_am.h -./include/postgresql/server/access/cstore_roughcheck_func.h -./include/postgresql/server/access/cstoreskey.h -./include/postgresql/server/storage/cu.h -./include/postgresql/server/vecexecutor/vectorbatch.h -./include/postgresql/server/cstore.h -./include/postgresql/server/storage/cstore/cstore_mem_alloc.h -./include/postgresql/server/access/cstore_minmax_func.h -./include/postgresql/server/storage/custorage.h -./include/postgresql/server/storage/fd.h -./include/postgresql/server/postmaster/aiocompleter.h -./include/postgresql/server/storage/buf/bufmgr.h -./include/postgresql/server/storage/buf/buf_internals.h -./include/postgresql/server/storage/smgr.h -./include/postgresql/server/catalog/pg_am.h -./include/postgresql/server/catalog/pg_class.h -./include/postgresql/server/catalog/pg_index.h -./include/postgresql/server/rewrite/prs2lock.h -./include/postgresql/server/tcop/stmt_retry.h -./include/postgresql/server/catalog/pg_hashbucket_fn.h -./include/postgresql/server/utils/rel_gs.h -./include/postgresql/server/catalog/pg_partition.h -./include/postgresql/server/catalog/pg_hashbucket.h -./include/postgresql/server/catalog/catalog.h -./include/postgresql/server/catalog/catversion.h -./include/postgresql/server/catalog/pg_namespace.h -./include/postgresql/server/utils/partitionmap_gs.h -./include/postgresql/server/access/heapam.h -./include/postgresql/server/storage/pagecompress.h -./include/postgresql/server/replication/bcm.h -./include/postgresql/server/storage/cstore/cstorealloc.h -./include/postgresql/server/storage/cucache_mgr.h -./include/postgresql/server/storage/cache_mgr.h -./include/postgresql/server/nodes/plannodes.h -./include/postgresql/server/foreign/foreign.h -./include/postgresql/server/access/obs/obs_am.h -./include/postgresql/server/storage/buf/buffile.h -./include/postgresql/server/replication/slot.h -./include/postgresql/server/access/obs/eSDKOBS.h -./include/postgresql/server/commands/defrem.h -./include/postgresql/server/optimizer/pruning.h -./include/postgresql/server/nodes/relation.h -./include/postgresql/server/optimizer/bucketinfo.h -./include/postgresql/server/pgxc/nodemgr.h -./include/postgresql/server/bulkload/dist_fdw.h -./include/postgresql/server/bulkload/importerror.h -./include/postgresql/server/commands/gds_stream.h -./include/postgresql/server/bulkload/utils.h -./include/postgresql/server/cjson/cJSON.h -./include/postgresql/server/ssl/gs_openssl_client.h -./include/postgresql/server/funcapi.h -./include/postgresql/server/executor/executor.h -./include/postgresql/server/executor/execdesc.h -./include/postgresql/server/nodes/execnodes.h -./include/postgresql/server/access/genam.h -./include/postgresql/server/nodes/tidbitmap.h -./include/postgresql/server/access/relscan.h -./include/postgresql/server/access/itup.h -./include/postgresql/server/executor/instrument.h -./include/postgresql/server/miscadmin.h -./include/postgresql/server/libpq/libpq-be.h -./include/postgresql/server/libpq/hba.h -./include/postgresql/server/libpq/sha2.h -./include/postgresql/server/utils/anls_opt.h -./include/postgresql/server/pgxc/pgxc.h -./include/postgresql/server/catalog/namespace.h -./include/postgresql/server/commands/trigger.h -./include/postgresql/server/executor/spi.h -./include/postgresql/server/access/ustore/undo/knl_uundotype.h -./include/postgresql/server/access/ustore/knl_uheap.h -./include/postgresql/server/access/ustore/knl_utuple.h -./include/postgresql/server/access/ustore/knl_utype.h -./include/postgresql/server/access/ustore/knl_upage.h -./include/postgresql/server/access/ustore/knl_uredo.h -./include/postgresql/server/access/ustore/knl_uundovec.h -./include/postgresql/server/access/ustore/knl_uundorecord.h -./include/postgresql/server/access/ustore/undo/knl_uundoxlog.h -./include/postgresql/server/access/ustore/undo/knl_uundotxn.h -./include/postgresql/server/access/ustore/undo/knl_uundozone.h -./include/postgresql/server/access/ustore/undo/knl_uundospace.h -./include/postgresql/server/communication/commproxy_basic.h -./include/postgresql/server/access/parallel_recovery/page_redo.h -./include/postgresql/server/access/parallel_recovery/spsc_blocking_queue.h -./include/postgresql/server/executor/exec/execdesc.h -./include/postgresql/server/db4ai/matrix.h -./include/postgresql/server/db4ai/scores.h -./jre/ASSEMBLY_EXCEPTION -./jre/bin/java -./jre/bin/jjs -./jre/bin/keytool -./jre/bin/orbd -./jre/bin/pack200 -./jre/bin/policytool -./jre/bin/rmid -./jre/bin/rmiregistry -./jre/bin/servertool -./jre/bin/tnameserv -./jre/bin/unpack200 -./jre/lib/aarch64/jli/libjli.so -./jre/lib/aarch64/jvm.cfg -./jre/lib/aarch64/libattach.so -./jre/lib/aarch64/libavplugin-ffmpeg-58.so -./jre/lib/aarch64/libawt_headless.so -./jre/lib/aarch64/libawt.so -./jre/lib/aarch64/libawt_xawt.so -./jre/lib/aarch64/libdecora_sse.so -./jre/lib/aarch64/libdt_socket.so -./jre/lib/aarch64/libfontmanager.so -./jre/lib/aarch64/libfxplugins.so -./jre/lib/aarch64/libglassgtk2.so -./jre/lib/aarch64/libglassgtk3.so -./jre/lib/aarch64/libglass.so -./jre/lib/aarch64/libgstreamer-lite.so -./jre/lib/aarch64/libhprof.so -./jre/lib/aarch64/libinstrument.so -./jre/lib/aarch64/libj2gss.so -./jre/lib/aarch64/libj2pcsc.so -./jre/lib/aarch64/libj2pkcs11.so -./jre/lib/aarch64/libjaas_unix.so -./jre/lib/aarch64/libjava_crw_demo.so -./jre/lib/aarch64/libjavafx_font_freetype.so -./jre/lib/aarch64/libjavafx_font_pango.so -./jre/lib/aarch64/libjavafx_font.so -./jre/lib/aarch64/libjavafx_iio.so -./jre/lib/aarch64/libjava.so -./jre/lib/aarch64/libjawt.so -./jre/lib/aarch64/libjdwp.so -./jre/lib/aarch64/libjfxmedia.so -./jre/lib/aarch64/libjfxwebkit.so -./jre/lib/aarch64/libjpeg.so -./jre/lib/aarch64/libjsdt.so -./jre/lib/aarch64/libjsig.so -./jre/lib/aarch64/libjsoundalsa.so -./jre/lib/aarch64/libjsound.so -./jre/lib/aarch64/liblcms.so -./jre/lib/aarch64/libmanagement.so -./jre/lib/aarch64/libmlib_image.so -./jre/lib/aarch64/libnet.so -./jre/lib/aarch64/libnio.so -./jre/lib/aarch64/libnpt.so -./jre/lib/aarch64/libprism_common.so -./jre/lib/aarch64/libprism_es2.so -./jre/lib/aarch64/libprism_sw.so -./jre/lib/aarch64/libsaproc.so -./jre/lib/aarch64/libsctp.so -./jre/lib/aarch64/libsplashscreen.so -./jre/lib/aarch64/libsunec.so -./jre/lib/aarch64/libunpack.so -./jre/lib/aarch64/libverify.so -./jre/lib/aarch64/libzip.so -./jre/lib/aarch64/server/libjvm.so -./jre/lib/aarch64/server/Xusage.txt -./jre/lib/calendars.properties -./jre/lib/charsets.jar -./jre/lib/classlist -./jre/lib/cmm/CIEXYZ.pf -./jre/lib/cmm/GRAY.pf -./jre/lib/cmm/LINEAR_RGB.pf -./jre/lib/cmm/PYCC.pf -./jre/lib/cmm/sRGB.pf -./jre/lib/content-types.properties -./jre/lib/currency.data -./jre/lib/ext/cldrdata.jar -./jre/lib/ext/dnsns.jar -./jre/lib/ext/jaccess.jar -./jre/lib/ext/jfxrt.jar -./jre/lib/ext/localedata.jar -./jre/lib/ext/meta-index -./jre/lib/ext/nashorn.jar -./jre/lib/ext/sunec.jar -./jre/lib/ext/sunjce_provider.jar -./jre/lib/ext/sunpkcs11.jar -./jre/lib/ext/zipfs.jar -./jre/lib/flavormap.properties -./jre/lib/fontconfig.Euler.properties -./jre/lib/fontconfig.properties -./jre/lib/fontconfig.Ubuntu.properties -./jre/lib/fonts/Roboto-Regular.ttf -./jre/lib/hijrah-config-umalqura.properties -./jre/lib/images/cursors/cursors.properties -./jre/lib/images/cursors/invalid32x32.gif -./jre/lib/images/cursors/motif_CopyDrop32x32.gif -./jre/lib/images/cursors/motif_CopyNoDrop32x32.gif -./jre/lib/images/cursors/motif_LinkDrop32x32.gif -./jre/lib/images/cursors/motif_LinkNoDrop32x32.gif -./jre/lib/images/cursors/motif_MoveDrop32x32.gif -./jre/lib/images/cursors/motif_MoveNoDrop32x32.gif -./jre/lib/javafx-mx.jar -./jre/lib/javafx.properties -./jre/lib/jce.jar -./jre/lib/jexec -./jre/lib/jfr/default.jfc -./jre/lib/jfr.jar -./jre/lib/jfr/profile.jfc -./jre/lib/jfxswt.jar -./jre/lib/jsse.jar -./jre/lib/jvm.hprof.txt -./jre/lib/logging.properties -./jre/lib/management-agent.jar -./jre/lib/management/jmxremote.access -./jre/lib/management/jmxremote.password.template -./jre/lib/management/management.properties -./jre/lib/management/snmp.acl.template -./jre/lib/meta-index -./jre/lib/net.properties -./jre/lib/psfontj2d.properties -./jre/lib/psfont.properties.ja -./jre/lib/resources.jar -./jre/lib/rt.jar -./jre/lib/security/blacklisted.certs -./jre/lib/security/cacerts -./jre/lib/security/java.policy -./jre/lib/security/java.security -./jre/lib/security/policy/limited/local_policy.jar -./jre/lib/security/policy/limited/US_export_policy.jar -./jre/lib/security/policy/unlimited/local_policy.jar -./jre/lib/security/policy/unlimited/US_export_policy.jar -./jre/lib/sound.properties -./jre/lib/tzdb.dat -./jre/LICENSE -./jre/THIRD_PARTY_README -[client] -./bin/gsql -./bin/gs_dump -./bin/gs_dumpall -./bin/gs_restore -./jdbc/gsjdbc4.jar -./jdbc/gsjdbc200.jar -./lib/postgresql/latin2_and_win1250.so -./lib/postgresql/euc2004_sjis2004.so -./lib/postgresql/euc_kr_and_mic.so -./lib/postgresql/utf8_and_uhc.so -./lib/postgresql/euc_tw_and_big5.so -./lib/postgresql/cyrillic_and_mic.so -./lib/postgresql/utf8_and_johab.so -./lib/postgresql/utf8_and_gb18030.so -./lib/postgresql/pgxs/src/makefiles/pgxs.mk -./lib/postgresql/pgxs/src/Makefile.shlib -./lib/postgresql/pgxs/src/Makefile.port -./lib/postgresql/pgxs/src/nls-global.mk -./lib/postgresql/pgxs/src/Makefile.global -./lib/postgresql/pgxs/config/install-sh -./lib/postgresql/euc_cn_and_mic.so -./lib/postgresql/latin_and_mic.so -./lib/postgresql/utf8_and_sjis2004.so -./lib/postgresql/utf8_and_euc_jp.so -./lib/postgresql/utf8_and_sjis.so -./lib/postgresql/utf8_and_cyrillic.so -./lib/postgresql/utf8_and_euc_kr.so -./lib/postgresql/ascii_and_mic.so -./lib/postgresql/utf8_and_iso8859_1.so -./lib/postgresql/euc_jp_and_sjis.so -./lib/postgresql/dict_snowball.so -./lib/postgresql/utf8_and_ascii.so -./lib/postgresql/utf8_and_euc_tw.so -./lib/postgresql/utf8_and_iso8859.so -./lib/postgresql/utf8_and_win.so -./lib/postgresql/utf8_and_euc_cn.so -./lib/postgresql/utf8_and_gbk.so -./lib/postgresql/utf8_and_euc2004.so -./lib/postgresql/utf8_and_big5.so -./lib/postgresql/java/pljava.jar -./lib/libpljava.so -./lib/libpq.a -./lib/libpq.so -./lib/libpq.so.5 -./lib/libpq.so.5.5 -./lib/libpq_ce.so -./lib/libpq_ce.so.5 -./lib/libpq_ce.so.5.5 -./lib/libgauss_cl_jni.so -./lib/libcrypto.so -./lib/libcrypto.so.1.1 -./lib/libstdc++.so.6 -./lib/libssl.so -./lib/libssl.so.1.1 -./lib/libgssapi_krb5_gauss.so -./lib/libgssapi_krb5_gauss.so.2 -./lib/libgssapi_krb5_gauss.so.2.2 -./lib/libgssrpc_gauss.so -./lib/libgssrpc_gauss.so.4 -./lib/libgssrpc_gauss.so.4.2 -./lib/libk5crypto_gauss.so -./lib/libk5crypto_gauss.so.3 -./lib/libk5crypto_gauss.so.3.1 -./lib/libkrb5support_gauss.so -./lib/libkrb5support_gauss.so.0 -./lib/libkrb5support_gauss.so.0.1 -./lib/libkrb5_gauss.so -./lib/libkrb5_gauss.so.3 -./lib/libkrb5_gauss.so.3.3 -./lib/libcom_err_gauss.so -./lib/libcom_err_gauss.so.3 -./lib/libcom_err_gauss.so.3.0 -./odbc/lib/psqlodbcw.la -./odbc/lib/psqlodbcw.so -[libpq] -./lib/libpq.a -./lib/libpq.so -./lib/libpq.so.5 -./lib/libpq.so.5.5 -./lib/libpq_ce.so -./lib/libpq_ce.so.5 -./lib/libpq_ce.so.5.5 -./lib/libgauss_cl_jni.so -./lib/libconfig.so -./lib/libconfig.so.4 -./lib/libcrypto.so -./lib/libcrypto.so.1.1 -./lib/libstdc++.so.6 -./lib/libssl.so -./lib/libssl.so.1.1 -./lib/libpgport_tool.so -./lib/libpgport_tool.so.1 -./lib/libgssapi_krb5_gauss.so -./lib/libgssapi_krb5_gauss.so.2 -./lib/libgssapi_krb5_gauss.so.2.2 -./lib/libgssrpc_gauss.so -./lib/libgssrpc_gauss.so.4 -./lib/libgssrpc_gauss.so.4.2 -./lib/libk5crypto_gauss.so -./lib/libk5crypto_gauss.so.3 -./lib/libk5crypto_gauss.so.3.1 -./lib/libkrb5support_gauss.so -./lib/libkrb5support_gauss.so.0 -./lib/libkrb5support_gauss.so.0.1 -./lib/libkrb5_gauss.so -./lib/libkrb5_gauss.so.3 -./lib/libkrb5_gauss.so.3.3 -./lib/libcom_err_gauss.so -./lib/libcom_err_gauss.so.3 -./lib/libcom_err_gauss.so.3.0 -[cmlibrary] -./lib/libpgport.a -[header] -./include/libpq-fe.h -./include/postgres_ext.h -./include/gs_thread.h -./include/gs_threadlocal.h -./include/pg_config.h -./include/pg_config_manual.h -./include/pg_config_os.h -./include/cm_config.h -./include/c.h -./include/port.h -./include/cm_msg.h -./include/cm_c.h -./include/cm_misc.h -./include/libpq-int.h -./include/pqcomm.h -./include/pqexpbuffer.h -./include/xlogdefs.h -./include/cm-libpq-fe.h -[version] -V500R002C00 -[script] -./script/__init__.py -./script/gs_check -./script/gs_checkos -./script/gs_checkperf -./script/gs_collector -./script/gs_backup -./script/gs_expand -./script/gs_install -./script/gs_om -./script/gs_hotpatch -./script/gs_postuninstall -./script/gs_preinstall -./script/gs_replace -./script/gs_shrink -./script/gs_ssh -./script/gs_sshexkey -./script/gs_uninstall -./script/gs_upgradectl -./script/gs_lcctl -./script/gs_resize -./script/uninstall_force.py -./script/checkRunStatus.py -./script/JsonToDbClustorInfo.py -./script/killall -./script/nodegroup_migrate.sh diff --git a/build/script/opengauss_release_list_openeuler_aarch64_single b/build/script/opengauss_release_list_openeuler_aarch64_single deleted file mode 100644 index eeb58034a..000000000 --- a/build/script/opengauss_release_list_openeuler_aarch64_single +++ /dev/null @@ -1,1467 +0,0 @@ -[server] -./bin/gsql -./bin/gaussdb -./bin/gstrace -./bin/gs_basebackup -./bin/gs_probackup -./bin/gs_tar -./bin/gs_encrypt -./bin/gs_dump -./bin/gs_dumpall -./bin/gs_ctl -./bin/gs_initdb -./bin/gs_guc -./bin/encrypt -./bin/openssl -./bin/gs_restore -./bin/gs_cgroup -./bin/openssl -./bin/pg_config -./bin/pg_controldata -./bin/pg_format_cu -./bin/pg_resetxlog -./bin/pg_recvlogical -./bin/alarmItem.conf -./bin/retry_errcodes.conf -./bin/cluster_guc.conf -./bin/bind_net_irq.sh -./bin/setArmOptimization.sh -./bin/krb5kdc -./bin/klist -./bin/kinit -./bin/kdestroy -./bin/kdb5_util -./bin/kadmin.local -./bin/lz4 -./bin/kadmind -./bin/dbmind -./bin/server.key.cipher -./bin/server.key.rand -./bin/gs_plan_simulator.sh -./etc/kerberos/kadm5.acl -./etc/kerberos/kdc.conf -./etc/kerberos/krb5.conf -./etc/kerberos/mppdb-site.xml -./share/postgresql/tmp/udstools.py -./share/postgresql/db4ai -./share/postgresql/snowball_create.sql -./share/postgresql/pg_hba.conf.sample -./share/postgresql/gs_gazelle.conf.sample -./share/postgresql/pg_service.conf.sample -./share/postgresql/psqlrc.sample -./share/postgresql/conversion_create.sql -./share/postgresql/postgres.shdescription -./share/postgresql/pg_ident.conf.sample -./share/postgresql/postgres.description -./share/postgresql/postgresql.conf.sample -./share/postgresql/mot.conf.sample -./share/postgresql/extension/plpgsql--1.0.sql -./share/postgresql/extension/hstore.control -./share/postgresql/extension/security_plugin.control -./share/postgresql/extension/security_plugin--1.0.sql -./share/postgresql/extension/file_fdw--1.0.sql -./share/postgresql/extension/plpgsql.control -./share/postgresql/extension/dist_fdw.control -./share/postgresql/extension/dist_fdw--1.0.sql -./share/postgresql/extension/hstore--1.1.sql -./share/postgresql/extension/plpgsql--unpackaged--1.0.sql -./share/postgresql/extension/file_fdw.control -./share/postgresql/extension/hstore--unpackaged--1.0.sql -./share/postgresql/extension/hstore--1.0--1.1.sql -./share/postgresql/extension/hdfs_fdw--1.0.sql -./share/postgresql/extension/hdfs_fdw.control -./share/postgresql/extension/log_fdw--1.0.sql -./share/postgresql/extension/log_fdw.control -./share/postgresql/extension/mot_fdw--1.0.sql -./share/postgresql/extension/mot_fdw.control -./share/postgresql/extension/postgres_fdw--1.0.sql -./share/postgresql/extension/postgres_fdw.control -./share/postgresql/timezone/GB-Eire -./share/postgresql/timezone/Turkey -./share/postgresql/timezone/Kwajalein -./share/postgresql/timezone/UCT -./share/postgresql/timezone/Mexico/BajaSur -./share/postgresql/timezone/Mexico/BajaNorte -./share/postgresql/timezone/Mexico/General -./share/postgresql/timezone/Japan -./share/postgresql/timezone/Israel -./share/postgresql/timezone/US/Eastern -./share/postgresql/timezone/US/Samoa -./share/postgresql/timezone/US/Michigan -./share/postgresql/timezone/US/Aleutian -./share/postgresql/timezone/US/Pacific -./share/postgresql/timezone/US/Pacific-New -./share/postgresql/timezone/US/Indiana-Starke -./share/postgresql/timezone/US/Mountain -./share/postgresql/timezone/US/East-Indiana -./share/postgresql/timezone/US/Hawaii -./share/postgresql/timezone/US/Arizona -./share/postgresql/timezone/US/Alaska -./share/postgresql/timezone/US/Central -./share/postgresql/timezone/Greenwich -./share/postgresql/timezone/Poland -./share/postgresql/timezone/CET -./share/postgresql/timezone/GMT-0 -./share/postgresql/timezone/Indian/Mauritius -./share/postgresql/timezone/Indian/Cocos -./share/postgresql/timezone/Indian/Reunion -./share/postgresql/timezone/Indian/Maldives -./share/postgresql/timezone/Indian/Comoro -./share/postgresql/timezone/Indian/Antananarivo -./share/postgresql/timezone/Indian/Christmas -./share/postgresql/timezone/Indian/Kerguelen -./share/postgresql/timezone/Indian/Chagos -./share/postgresql/timezone/Indian/Mayotte -./share/postgresql/timezone/Indian/Mahe -./share/postgresql/timezone/GMT0 -./share/postgresql/timezone/Antarctica/Palmer -./share/postgresql/timezone/Antarctica/Syowa -./share/postgresql/timezone/Antarctica/South_Pole -./share/postgresql/timezone/Antarctica/McMurdo -./share/postgresql/timezone/Antarctica/Rothera -./share/postgresql/timezone/Antarctica/Mawson -./share/postgresql/timezone/Antarctica/Casey -./share/postgresql/timezone/Antarctica/Davis -./share/postgresql/timezone/Antarctica/DumontDUrville -./share/postgresql/timezone/Antarctica/Vostok -./share/postgresql/timezone/Antarctica/Macquarie -./share/postgresql/timezone/ROK -./share/postgresql/timezone/Chile/EasterIsland -./share/postgresql/timezone/Chile/Continental -./share/postgresql/timezone/posixrules -./share/postgresql/timezone/Atlantic/Azores -./share/postgresql/timezone/Atlantic/St_Helena -./share/postgresql/timezone/Atlantic/Madeira -./share/postgresql/timezone/Atlantic/Jan_Mayen -./share/postgresql/timezone/Atlantic/Faroe -./share/postgresql/timezone/Atlantic/Stanley -./share/postgresql/timezone/Atlantic/Cape_Verde -./share/postgresql/timezone/Atlantic/Reykjavik -./share/postgresql/timezone/Atlantic/Canary -./share/postgresql/timezone/Atlantic/South_Georgia -./share/postgresql/timezone/Atlantic/Bermuda -./share/postgresql/timezone/Atlantic/Faeroe -./share/postgresql/timezone/Hongkong -./share/postgresql/timezone/Libya -./share/postgresql/timezone/Iceland -./share/postgresql/timezone/UTC -./share/postgresql/timezone/Australia/Darwin -./share/postgresql/timezone/Australia/North -./share/postgresql/timezone/Australia/NSW -./share/postgresql/timezone/Australia/Sydney -./share/postgresql/timezone/Australia/Hobart -./share/postgresql/timezone/Australia/LHI -./share/postgresql/timezone/Australia/Victoria -./share/postgresql/timezone/Australia/South -./share/postgresql/timezone/Australia/Melbourne -./share/postgresql/timezone/Australia/Lord_Howe -./share/postgresql/timezone/Australia/West -./share/postgresql/timezone/Australia/Brisbane -./share/postgresql/timezone/Australia/Perth -./share/postgresql/timezone/Australia/Eucla -./share/postgresql/timezone/Australia/Canberra -./share/postgresql/timezone/Australia/Queensland -./share/postgresql/timezone/Australia/Broken_Hill -./share/postgresql/timezone/Australia/Lindeman -./share/postgresql/timezone/Australia/ACT -./share/postgresql/timezone/Australia/Currie -./share/postgresql/timezone/Australia/Adelaide -./share/postgresql/timezone/Australia/Yancowinna -./share/postgresql/timezone/Australia/Tasmania -./share/postgresql/timezone/Jamaica -./share/postgresql/timezone/EST5EDT -./share/postgresql/timezone/MET -./share/postgresql/timezone/W-SU -./share/postgresql/timezone/Mideast/Riyadh87 -./share/postgresql/timezone/Mideast/Riyadh88 -./share/postgresql/timezone/Mideast/Riyadh89 -./share/postgresql/timezone/WET -./share/postgresql/timezone/ROC -./share/postgresql/timezone/Factory -./share/postgresql/timezone/EET -./share/postgresql/timezone/PST8PDT -./share/postgresql/timezone/Portugal -./share/postgresql/timezone/NZ -./share/postgresql/timezone/Brazil/West -./share/postgresql/timezone/Brazil/DeNoronha -./share/postgresql/timezone/Brazil/Acre -./share/postgresql/timezone/Brazil/East -./share/postgresql/timezone/EST -./share/postgresql/timezone/Egypt -./share/postgresql/timezone/Universal -./share/postgresql/timezone/Pacific/Enderbury -./share/postgresql/timezone/Pacific/Noumea -./share/postgresql/timezone/Pacific/Kwajalein -./share/postgresql/timezone/Pacific/Norfolk -./share/postgresql/timezone/Pacific/Nauru -./share/postgresql/timezone/Pacific/Efate -./share/postgresql/timezone/Pacific/Kosrae -./share/postgresql/timezone/Pacific/Galapagos -./share/postgresql/timezone/Pacific/Truk -./share/postgresql/timezone/Pacific/Fiji -./share/postgresql/timezone/Pacific/Auckland -./share/postgresql/timezone/Pacific/Samoa -./share/postgresql/timezone/Pacific/Port_Moresby -./share/postgresql/timezone/Pacific/Johnston -./share/postgresql/timezone/Pacific/Apia -./share/postgresql/timezone/Pacific/Tarawa -./share/postgresql/timezone/Pacific/Pitcairn -./share/postgresql/timezone/Pacific/Marquesas -./share/postgresql/timezone/Pacific/Chatham -./share/postgresql/timezone/Pacific/Tahiti -./share/postgresql/timezone/Pacific/Tongatapu -./share/postgresql/timezone/Pacific/Saipan -./share/postgresql/timezone/Pacific/Fakaofo -./share/postgresql/timezone/Pacific/Guam -./share/postgresql/timezone/Pacific/Niue -./share/postgresql/timezone/Pacific/Chuuk -./share/postgresql/timezone/Pacific/Easter -./share/postgresql/timezone/Pacific/Wallis -./share/postgresql/timezone/Pacific/Gambier -./share/postgresql/timezone/Pacific/Majuro -./share/postgresql/timezone/Pacific/Kiritimati -./share/postgresql/timezone/Pacific/Guadalcanal -./share/postgresql/timezone/Pacific/Funafuti -./share/postgresql/timezone/Pacific/Rarotonga -./share/postgresql/timezone/Pacific/Pago_Pago -./share/postgresql/timezone/Pacific/Midway -./share/postgresql/timezone/Pacific/Palau -./share/postgresql/timezone/Pacific/Honolulu -./share/postgresql/timezone/Pacific/Yap -./share/postgresql/timezone/Pacific/Pohnpei -./share/postgresql/timezone/Pacific/Wake -./share/postgresql/timezone/Pacific/Ponape -./share/postgresql/timezone/Iran -./share/postgresql/timezone/Etc/GMT-4 -./share/postgresql/timezone/Etc/GMT-9 -./share/postgresql/timezone/Etc/UCT -./share/postgresql/timezone/Etc/GMT-7 -./share/postgresql/timezone/Etc/Greenwich -./share/postgresql/timezone/Etc/GMT-0 -./share/postgresql/timezone/Etc/GMT-5 -./share/postgresql/timezone/Etc/GMT0 -./share/postgresql/timezone/Etc/GMT+7 -./share/postgresql/timezone/Etc/GMT-2 -./share/postgresql/timezone/Etc/GMT-10 -./share/postgresql/timezone/Etc/GMT+6 -./share/postgresql/timezone/Etc/GMT+1 -./share/postgresql/timezone/Etc/UTC -./share/postgresql/timezone/Etc/GMT-3 -./share/postgresql/timezone/Etc/GMT-1 -./share/postgresql/timezone/Etc/GMT-8 -./share/postgresql/timezone/Etc/GMT-11 -./share/postgresql/timezone/Etc/GMT+12 -./share/postgresql/timezone/Etc/GMT+10 -./share/postgresql/timezone/Etc/GMT-12 -./share/postgresql/timezone/Etc/GMT+5 -./share/postgresql/timezone/Etc/Universal -./share/postgresql/timezone/Etc/GMT-13 -./share/postgresql/timezone/Etc/GMT-6 -./share/postgresql/timezone/Etc/GMT+9 -./share/postgresql/timezone/Etc/GMT+3 -./share/postgresql/timezone/Etc/GMT-14 -./share/postgresql/timezone/Etc/GMT+4 -./share/postgresql/timezone/Etc/Zulu -./share/postgresql/timezone/Etc/GMT+2 -./share/postgresql/timezone/Etc/GMT+0 -./share/postgresql/timezone/Etc/GMT+8 -./share/postgresql/timezone/Etc/GMT+11 -./share/postgresql/timezone/Etc/GMT -./share/postgresql/timezone/Zulu -./share/postgresql/timezone/GMT+0 -./share/postgresql/timezone/Singapore -./share/postgresql/timezone/NZ-CHAT -./share/postgresql/timezone/Cuba -./share/postgresql/timezone/GB -./share/postgresql/timezone/Arctic/Longyearbyen -./share/postgresql/timezone/MST7MDT -./share/postgresql/timezone/PRC -./share/postgresql/timezone/Canada/Eastern -./share/postgresql/timezone/Canada/Yukon -./share/postgresql/timezone/Canada/Atlantic -./share/postgresql/timezone/Canada/Newfoundland -./share/postgresql/timezone/Canada/Saskatchewan -./share/postgresql/timezone/Canada/Pacific -./share/postgresql/timezone/Canada/East-Saskatchewan -./share/postgresql/timezone/Canada/Mountain -./share/postgresql/timezone/Canada/Central -./share/postgresql/timezone/CST6CDT -./share/postgresql/timezone/HST -./share/postgresql/timezone/America/Boa_Vista -./share/postgresql/timezone/America/New_York -./share/postgresql/timezone/America/Santarem -./share/postgresql/timezone/America/Boise -./share/postgresql/timezone/America/St_Lucia -./share/postgresql/timezone/America/Mendoza -./share/postgresql/timezone/America/Mexico_City -./share/postgresql/timezone/America/Chihuahua -./share/postgresql/timezone/America/Indianapolis -./share/postgresql/timezone/America/Virgin -./share/postgresql/timezone/America/Atka -./share/postgresql/timezone/America/Winnipeg -./share/postgresql/timezone/America/Hermosillo -./share/postgresql/timezone/America/Indiana/Indianapolis -./share/postgresql/timezone/America/Indiana/Tell_City -./share/postgresql/timezone/America/Indiana/Winamac -./share/postgresql/timezone/America/Indiana/Knox -./share/postgresql/timezone/America/Indiana/Vincennes -./share/postgresql/timezone/America/Indiana/Vevay -./share/postgresql/timezone/America/Indiana/Petersburg -./share/postgresql/timezone/America/Indiana/Marengo -./share/postgresql/timezone/America/Moncton -./share/postgresql/timezone/America/Campo_Grande -./share/postgresql/timezone/America/Guyana -./share/postgresql/timezone/America/Caracas -./share/postgresql/timezone/America/Maceio -./share/postgresql/timezone/America/Godthab -./share/postgresql/timezone/America/Thunder_Bay -./share/postgresql/timezone/America/Havana -./share/postgresql/timezone/America/Santiago -./share/postgresql/timezone/America/Los_Angeles -./share/postgresql/timezone/America/Buenos_Aires -./share/postgresql/timezone/America/Manaus -./share/postgresql/timezone/America/Bahia -./share/postgresql/timezone/America/North_Dakota/New_Salem -./share/postgresql/timezone/America/North_Dakota/Beulah -./share/postgresql/timezone/America/North_Dakota/Center -./share/postgresql/timezone/America/Bahia_Banderas -./share/postgresql/timezone/America/Edmonton -./share/postgresql/timezone/America/Tegucigalpa -./share/postgresql/timezone/America/Rankin_Inlet -./share/postgresql/timezone/America/Monterrey -./share/postgresql/timezone/America/Cambridge_Bay -./share/postgresql/timezone/America/Porto_Velho -./share/postgresql/timezone/America/Antigua -./share/postgresql/timezone/America/Atikokan -./share/postgresql/timezone/America/Vancouver -./share/postgresql/timezone/America/Anchorage -./share/postgresql/timezone/America/Port-au-Prince -./share/postgresql/timezone/America/Lima -./share/postgresql/timezone/America/Grenada -./share/postgresql/timezone/America/Creston -./share/postgresql/timezone/America/La_Paz -./share/postgresql/timezone/America/Panama -./share/postgresql/timezone/America/Blanc-Sablon -./share/postgresql/timezone/America/Cayenne -./share/postgresql/timezone/America/Santo_Domingo -./share/postgresql/timezone/America/Grand_Turk -./share/postgresql/timezone/America/Toronto -./share/postgresql/timezone/America/Rainy_River -./share/postgresql/timezone/America/Merida -./share/postgresql/timezone/America/Port_of_Spain -./share/postgresql/timezone/America/Nipigon -./share/postgresql/timezone/America/Jamaica -./share/postgresql/timezone/America/Rosario -./share/postgresql/timezone/America/Dawson_Creek -./share/postgresql/timezone/America/Belize -./share/postgresql/timezone/America/Costa_Rica -./share/postgresql/timezone/America/Barbados -./share/postgresql/timezone/America/Danmarkshavn -./share/postgresql/timezone/America/Argentina/La_Rioja -./share/postgresql/timezone/America/Argentina/Mendoza -./share/postgresql/timezone/America/Argentina/Buenos_Aires -./share/postgresql/timezone/America/Argentina/Tucuman -./share/postgresql/timezone/America/Argentina/Ushuaia -./share/postgresql/timezone/America/Argentina/Catamarca -./share/postgresql/timezone/America/Argentina/ComodRivadavia -./share/postgresql/timezone/America/Argentina/Jujuy -./share/postgresql/timezone/America/Argentina/Cordoba -./share/postgresql/timezone/America/Argentina/San_Luis -./share/postgresql/timezone/America/Argentina/Rio_Gallegos -./share/postgresql/timezone/America/Argentina/Salta -./share/postgresql/timezone/America/Argentina/San_Juan -./share/postgresql/timezone/America/Pangnirtung -./share/postgresql/timezone/America/Anguilla -./share/postgresql/timezone/America/Curacao -./share/postgresql/timezone/America/Cancun -./share/postgresql/timezone/America/Montreal -./share/postgresql/timezone/America/Shiprock -./share/postgresql/timezone/America/Thule -./share/postgresql/timezone/America/Scoresbysund -./share/postgresql/timezone/America/Catamarca -./share/postgresql/timezone/America/Sao_Paulo -./share/postgresql/timezone/America/Sitka -./share/postgresql/timezone/America/Asuncion -./share/postgresql/timezone/America/Regina -./share/postgresql/timezone/America/St_Johns -./share/postgresql/timezone/America/Montevideo -./share/postgresql/timezone/America/Eirunepe -./share/postgresql/timezone/America/Denver -./share/postgresql/timezone/America/Metlakatla -./share/postgresql/timezone/America/Araguaina -./share/postgresql/timezone/America/Juneau -./share/postgresql/timezone/America/Marigot -./share/postgresql/timezone/America/Menominee -./share/postgresql/timezone/America/Glace_Bay -./share/postgresql/timezone/America/Tijuana -./share/postgresql/timezone/America/Detroit -./share/postgresql/timezone/America/Belem -./share/postgresql/timezone/America/Jujuy -./share/postgresql/timezone/America/St_Thomas -./share/postgresql/timezone/America/Resolute -./share/postgresql/timezone/America/Cuiaba -./share/postgresql/timezone/America/Halifax -./share/postgresql/timezone/America/St_Barthelemy -./share/postgresql/timezone/America/Guatemala -./share/postgresql/timezone/America/Nassau -./share/postgresql/timezone/America/St_Kitts -./share/postgresql/timezone/America/Cordoba -./share/postgresql/timezone/America/Miquelon -./share/postgresql/timezone/America/Bogota -./share/postgresql/timezone/America/Rio_Branco -./share/postgresql/timezone/America/Ensenada -./share/postgresql/timezone/America/Yakutat -./share/postgresql/timezone/America/Noronha -./share/postgresql/timezone/America/Kentucky/Monticello -./share/postgresql/timezone/America/Kentucky/Louisville -./share/postgresql/timezone/America/Porto_Acre -./share/postgresql/timezone/America/Santa_Isabel -./share/postgresql/timezone/America/El_Salvador -./share/postgresql/timezone/America/Yellowknife -./share/postgresql/timezone/America/Cayman -./share/postgresql/timezone/America/Whitehorse -./share/postgresql/timezone/America/Ojinaga -./share/postgresql/timezone/America/Aruba -./share/postgresql/timezone/America/Nome -./share/postgresql/timezone/America/Fortaleza -./share/postgresql/timezone/America/Martinique -./share/postgresql/timezone/America/Recife -./share/postgresql/timezone/America/Knox_IN -./share/postgresql/timezone/America/Guayaquil -./share/postgresql/timezone/America/Goose_Bay -./share/postgresql/timezone/America/Iqaluit -./share/postgresql/timezone/America/Matamoros -./share/postgresql/timezone/America/Lower_Princes -./share/postgresql/timezone/America/Louisville -./share/postgresql/timezone/America/Coral_Harbour -./share/postgresql/timezone/America/Phoenix -./share/postgresql/timezone/America/Guadeloupe -./share/postgresql/timezone/America/Mazatlan -./share/postgresql/timezone/America/Swift_Current -./share/postgresql/timezone/America/Paramaribo -./share/postgresql/timezone/America/Dominica -./share/postgresql/timezone/America/Kralendijk -./share/postgresql/timezone/America/Montserrat -./share/postgresql/timezone/America/St_Vincent -./share/postgresql/timezone/America/Fort_Wayne -./share/postgresql/timezone/America/Dawson -./share/postgresql/timezone/America/Inuvik -./share/postgresql/timezone/America/Adak -./share/postgresql/timezone/America/Managua -./share/postgresql/timezone/America/Puerto_Rico -./share/postgresql/timezone/America/Tortola -./share/postgresql/timezone/America/Chicago -./share/postgresql/timezone/Africa/Lome -./share/postgresql/timezone/Africa/Brazzaville -./share/postgresql/timezone/Africa/Khartoum -./share/postgresql/timezone/Africa/Ceuta -./share/postgresql/timezone/Africa/Djibouti -./share/postgresql/timezone/Africa/Lagos -./share/postgresql/timezone/Africa/Accra -./share/postgresql/timezone/Africa/El_Aaiun -./share/postgresql/timezone/Africa/Malabo -./share/postgresql/timezone/Africa/Windhoek -./share/postgresql/timezone/Africa/Tripoli -./share/postgresql/timezone/Africa/Bissau -./share/postgresql/timezone/Africa/Blantyre -./share/postgresql/timezone/Africa/Kinshasa -./share/postgresql/timezone/Africa/Porto-Novo -./share/postgresql/timezone/Africa/Nairobi -./share/postgresql/timezone/Africa/Ouagadougou -./share/postgresql/timezone/Africa/Asmera -./share/postgresql/timezone/Africa/Cairo -./share/postgresql/timezone/Africa/Lubumbashi -./share/postgresql/timezone/Africa/Tunis -./share/postgresql/timezone/Africa/Dar_es_Salaam -./share/postgresql/timezone/Africa/Casablanca -./share/postgresql/timezone/Africa/Algiers -./share/postgresql/timezone/Africa/Mbabane -./share/postgresql/timezone/Africa/Monrovia -./share/postgresql/timezone/Africa/Nouakchott -./share/postgresql/timezone/Africa/Banjul -./share/postgresql/timezone/Africa/Kampala -./share/postgresql/timezone/Africa/Conakry -./share/postgresql/timezone/Africa/Mogadishu -./share/postgresql/timezone/Africa/Ndjamena -./share/postgresql/timezone/Africa/Niamey -./share/postgresql/timezone/Africa/Lusaka -./share/postgresql/timezone/Africa/Addis_Ababa -./share/postgresql/timezone/Africa/Sao_Tome -./share/postgresql/timezone/Africa/Abidjan -./share/postgresql/timezone/Africa/Harare -./share/postgresql/timezone/Africa/Asmara -./share/postgresql/timezone/Africa/Douala -./share/postgresql/timezone/Africa/Freetown -./share/postgresql/timezone/Africa/Libreville -./share/postgresql/timezone/Africa/Luanda -./share/postgresql/timezone/Africa/Maseru -./share/postgresql/timezone/Africa/Gaborone -./share/postgresql/timezone/Africa/Maputo -./share/postgresql/timezone/Africa/Timbuktu -./share/postgresql/timezone/Africa/Bangui -./share/postgresql/timezone/Africa/Bamako -./share/postgresql/timezone/Africa/Dakar -./share/postgresql/timezone/Africa/Juba -./share/postgresql/timezone/Africa/Bujumbura -./share/postgresql/timezone/Africa/Johannesburg -./share/postgresql/timezone/Africa/Kigali -./share/postgresql/timezone/Eire -./share/postgresql/timezone/Europe/Vaduz -./share/postgresql/timezone/Europe/Podgorica -./share/postgresql/timezone/Europe/Rome -./share/postgresql/timezone/Europe/Vienna -./share/postgresql/timezone/Europe/Dublin -./share/postgresql/timezone/Europe/Zurich -./share/postgresql/timezone/Europe/London -./share/postgresql/timezone/Europe/Monaco -./share/postgresql/timezone/Europe/Sofia -./share/postgresql/timezone/Europe/Uzhgorod -./share/postgresql/timezone/Europe/Minsk -./share/postgresql/timezone/Europe/Malta -./share/postgresql/timezone/Europe/Busingen -./share/postgresql/timezone/Europe/Gibraltar -./share/postgresql/timezone/Europe/Volgograd -./share/postgresql/timezone/Europe/Budapest -./share/postgresql/timezone/Europe/Vatican -./share/postgresql/timezone/Europe/Luxembourg -./share/postgresql/timezone/Europe/Chisinau -./share/postgresql/timezone/Europe/Nicosia -./share/postgresql/timezone/Europe/Warsaw -./share/postgresql/timezone/Europe/San_Marino -./share/postgresql/timezone/Europe/Copenhagen -./share/postgresql/timezone/Europe/Ljubljana -./share/postgresql/timezone/Europe/Athens -./share/postgresql/timezone/Europe/Skopje -./share/postgresql/timezone/Europe/Andorra -./share/postgresql/timezone/Europe/Kaliningrad -./share/postgresql/timezone/Europe/Amsterdam -./share/postgresql/timezone/Europe/Guernsey -./share/postgresql/timezone/Europe/Isle_of_Man -./share/postgresql/timezone/Europe/Tirane -./share/postgresql/timezone/Europe/Jersey -./share/postgresql/timezone/Europe/Madrid -./share/postgresql/timezone/Europe/Helsinki -./share/postgresql/timezone/Europe/Riga -./share/postgresql/timezone/Europe/Zagreb -./share/postgresql/timezone/Europe/Bratislava -./share/postgresql/timezone/Europe/Prague -./share/postgresql/timezone/Europe/Tallinn -./share/postgresql/timezone/Europe/Stockholm -./share/postgresql/timezone/Europe/Tiraspol -./share/postgresql/timezone/Europe/Belgrade -./share/postgresql/timezone/Europe/Bucharest -./share/postgresql/timezone/Europe/Vilnius -./share/postgresql/timezone/Europe/Sarajevo -./share/postgresql/timezone/Europe/Belfast -./share/postgresql/timezone/Europe/Zaporozhye -./share/postgresql/timezone/Europe/Oslo -./share/postgresql/timezone/Europe/Mariehamn -./share/postgresql/timezone/Europe/Moscow -./share/postgresql/timezone/Europe/Brussels -./share/postgresql/timezone/Europe/Paris -./share/postgresql/timezone/Europe/Istanbul -./share/postgresql/timezone/Europe/Simferopol -./share/postgresql/timezone/Europe/Lisbon -./share/postgresql/timezone/Europe/Berlin -./share/postgresql/timezone/Europe/Kiev -./share/postgresql/timezone/Europe/Samara -./share/postgresql/timezone/MST -./share/postgresql/timezone/Asia/Khandyga -./share/postgresql/timezone/Asia/Manila -./share/postgresql/timezone/Asia/Novokuznetsk -./share/postgresql/timezone/Asia/Baghdad -./share/postgresql/timezone/Asia/Macau -./share/postgresql/timezone/Asia/Urumqi -./share/postgresql/timezone/Asia/Ujung_Pandang -./share/postgresql/timezone/Asia/Ulan_Bator -./share/postgresql/timezone/Asia/Bishkek -./share/postgresql/timezone/Asia/Qatar -./share/postgresql/timezone/Asia/Qyzylorda -./share/postgresql/timezone/Asia/Calcutta -./share/postgresql/timezone/Asia/Riyadh87 -./share/postgresql/timezone/Asia/Dushanbe -./share/postgresql/timezone/Asia/Yekaterinburg -./share/postgresql/timezone/Asia/Dhaka -./share/postgresql/timezone/Asia/Jakarta -./share/postgresql/timezone/Asia/Shanghai -./share/postgresql/timezone/Asia/Ulaanbaatar -./share/postgresql/timezone/Asia/Jerusalem -./share/postgresql/timezone/Asia/Ashkhabad -./share/postgresql/timezone/Asia/Tokyo -./share/postgresql/timezone/Asia/Macao -./share/postgresql/timezone/Asia/Krasnoyarsk -./share/postgresql/timezone/Asia/Saigon -./share/postgresql/timezone/Asia/Omsk -./share/postgresql/timezone/Asia/Damascus -./share/postgresql/timezone/Asia/Phnom_Penh -./share/postgresql/timezone/Asia/Bangkok -./share/postgresql/timezone/Asia/Kamchatka -./share/postgresql/timezone/Asia/Choibalsan -./share/postgresql/timezone/Asia/Ust-Nera -./share/postgresql/timezone/Asia/Aden -./share/postgresql/timezone/Asia/Vientiane -./share/postgresql/timezone/Asia/Sakhalin -./share/postgresql/timezone/Asia/Ashgabat -./share/postgresql/timezone/Asia/Katmandu -./share/postgresql/timezone/Asia/Almaty -./share/postgresql/timezone/Asia/Baku -./share/postgresql/timezone/Asia/Nicosia -./share/postgresql/timezone/Asia/Riyadh88 -./share/postgresql/timezone/Asia/Kashgar -./share/postgresql/timezone/Asia/Riyadh89 -./share/postgresql/timezone/Asia/Taipei -./share/postgresql/timezone/Asia/Tehran -./share/postgresql/timezone/Asia/Kabul -./share/postgresql/timezone/Asia/Samarkand -./share/postgresql/timezone/Asia/Kuala_Lumpur -./share/postgresql/timezone/Asia/Tashkent -./share/postgresql/timezone/Asia/Thimbu -./share/postgresql/timezone/Asia/Thimphu -./share/postgresql/timezone/Asia/Yerevan -./share/postgresql/timezone/Asia/Chungking -./share/postgresql/timezone/Asia/Hebron -./share/postgresql/timezone/Asia/Karachi -./share/postgresql/timezone/Asia/Kolkata -./share/postgresql/timezone/Asia/Aqtobe -./share/postgresql/timezone/Asia/Muscat -./share/postgresql/timezone/Asia/Hong_Kong -./share/postgresql/timezone/Asia/Chongqing -./share/postgresql/timezone/Asia/Oral -./share/postgresql/timezone/Asia/Pontianak -./share/postgresql/timezone/Asia/Colombo -./share/postgresql/timezone/Asia/Pyongyang -./share/postgresql/timezone/Asia/Hovd -./share/postgresql/timezone/Asia/Kuwait -./share/postgresql/timezone/Asia/Anadyr -./share/postgresql/timezone/Asia/Kathmandu -./share/postgresql/timezone/Asia/Irkutsk -./share/postgresql/timezone/Asia/Bahrain -./share/postgresql/timezone/Asia/Dubai -./share/postgresql/timezone/Asia/Jayapura -./share/postgresql/timezone/Asia/Riyadh -./share/postgresql/timezone/Asia/Ho_Chi_Minh -./share/postgresql/timezone/Asia/Singapore -./share/postgresql/timezone/Asia/Tel_Aviv -./share/postgresql/timezone/Asia/Dili -./share/postgresql/timezone/Asia/Rangoon -./share/postgresql/timezone/Asia/Harbin -./share/postgresql/timezone/Asia/Yakutsk -./share/postgresql/timezone/Asia/Magadan -./share/postgresql/timezone/Asia/Amman -./share/postgresql/timezone/Asia/Kuching -./share/postgresql/timezone/Asia/Novosibirsk -./share/postgresql/timezone/Asia/Seoul -./share/postgresql/timezone/Asia/Dacca -./share/postgresql/timezone/Asia/Vladivostok -./share/postgresql/timezone/Asia/Istanbul -./share/postgresql/timezone/Asia/Beirut -./share/postgresql/timezone/Asia/Aqtau -./share/postgresql/timezone/Asia/Brunei -./share/postgresql/timezone/Asia/Gaza -./share/postgresql/timezone/Asia/Tbilisi -./share/postgresql/timezone/Asia/Makassar -./share/postgresql/timezone/Asia/Beijing -./share/postgresql/timezone/Navajo -./share/postgresql/timezone/GMT -./share/postgresql/system_views.sql -./share/postgresql/private_system_views.sql -./share/postgresql/performance_views.sql -./share/postgresql/sql_features.txt -./share/postgresql/pg_cast_oid.txt -./share/postgresql/recovery.conf.sample -./share/postgresql/tsearch_data/english.stop -./share/postgresql/tsearch_data/dutch.stop -./share/postgresql/tsearch_data/hungarian.stop -./share/postgresql/tsearch_data/french.stop -./share/postgresql/tsearch_data/synonym_sample.syn -./share/postgresql/tsearch_data/turkish.stop -./share/postgresql/tsearch_data/portuguese.stop -./share/postgresql/tsearch_data/spanish.stop -./share/postgresql/tsearch_data/hunspell_sample.affix -./share/postgresql/tsearch_data/ispell_sample.affix -./share/postgresql/tsearch_data/danish.stop -./share/postgresql/tsearch_data/german.stop -./share/postgresql/tsearch_data/thesaurus_sample.ths -./share/postgresql/tsearch_data/norwegian.stop -./share/postgresql/tsearch_data/finnish.stop -./share/postgresql/tsearch_data/russian.stop -./share/postgresql/tsearch_data/swedish.stop -./share/postgresql/tsearch_data/ispell_sample.dict -./share/postgresql/tsearch_data/italian.stop -./share/postgresql/information_schema.sql -./share/postgresql/timezonesets/Antarctica.txt -./share/postgresql/timezonesets/Australia.txt -./share/postgresql/timezonesets/Europe.txt -./share/postgresql/timezonesets/America.txt -./share/postgresql/timezonesets/Australia -./share/postgresql/timezonesets/Indian.txt -./share/postgresql/timezonesets/India -./share/postgresql/timezonesets/Pacific.txt -./share/postgresql/timezonesets/Atlantic.txt -./share/postgresql/timezonesets/Africa.txt -./share/postgresql/timezonesets/Asia.txt -./share/postgresql/timezonesets/Default -./share/postgresql/timezonesets/Etc.txt -./share/postgresql/postgres.bki -./share/llvmir/GaussDB_expr.ir -./share/sslcert/gsql/openssl.cnf -./share/sslcert/grpc/openssl.cnf -./share/sslcert/om/openssl.cnf -./lib/libsimsearch/ -./lib/postgresql/latin2_and_win1250.so -./lib/postgresql/euc2004_sjis2004.so -./lib/postgresql/euc_kr_and_mic.so -./lib/postgresql/utf8_and_uhc.so -./lib/postgresql/euc_tw_and_big5.so -./lib/postgresql/cyrillic_and_mic.so -./lib/postgresql/utf8_and_johab.so -./lib/postgresql/utf8_and_gb18030.so -./lib/postgresql/pgxs/src/makefiles/pgxs.mk -./lib/postgresql/pgxs/src/Makefile.shlib -./lib/postgresql/pgxs/src/Makefile.port -./lib/postgresql/pgxs/src/nls-global.mk -./lib/postgresql/pgxs/src/Makefile.global -./lib/postgresql/pgxs/src/get_PlatForm_str.sh -./lib/postgresql/pgxs/config/install-sh -./lib/postgresql/euc_cn_and_mic.so -./lib/postgresql/latin_and_mic.so -./lib/postgresql/utf8_and_sjis2004.so -./lib/postgresql/utf8_and_euc_jp.so -./lib/postgresql/utf8_and_sjis.so -./lib/postgresql/utf8_and_cyrillic.so -./lib/postgresql/hstore.so -./lib/postgresql/utf8_and_euc_kr.so -./lib/postgresql/ascii_and_mic.so -./lib/postgresql/utf8_and_iso8859_1.so -./lib/postgresql/euc_jp_and_sjis.so -./lib/postgresql/dict_snowball.so -./lib/postgresql/utf8_and_ascii.so -./lib/postgresql/utf8_and_euc_tw.so -./lib/postgresql/utf8_and_iso8859.so -./lib/postgresql/utf8_and_win.so -./lib/postgresql/utf8_and_euc_cn.so -./lib/postgresql/utf8_and_gbk.so -./lib/postgresql/utf8_and_euc2004.so -./lib/postgresql/utf8_and_big5.so -./lib/postgresql/mppdb_decoding.so -./lib/postgresql/pg_plugin -./lib/postgresql/proc_srclib -./lib/postgresql/security_plugin.so -./lib/postgresql/pg_upgrade_support.so -./lib/postgresql/java/pljava.jar -./lib/postgresql/postgres_fdw.so -./lib/postgresql/pgoutput.so -./lib/libpljava.so -./lib/libpq.a -./lib/libpq.so -./lib/libpq.so.5 -./lib/libpq.so.5.5 -./lib/libpq_ce.so -./lib/libpq_ce.so.5 -./lib/libpq_ce.so.5.5 -./lib/libgauss_cl_jni.so -./lib/libcgroup.so -./lib/libcgroup.so.1 -./lib/libcom_err_gauss.so -./lib/libcom_err_gauss.so.3 -./lib/libcom_err_gauss.so.3.0 -./lib/libatomic.so -./lib/libatomic.so.1 -./lib/libatomic.so.1.2.0 -./lib/libmasstree.so -./lib/libupb.so -./lib/libupb.so.9 -./lib/libupb.so.9.0.0 -./lib/libabsl_str_format_internal.so -./lib/libabsl_strings.so -./lib/libabsl_throw_delegate.so -./lib/libabsl_strings_internal.so -./lib/libabsl_base.so -./lib/libabsl_dynamic_annotations.so -./lib/libabsl_spinlock_wait.so -./lib/libabsl_int128.so -./lib/libabsl_bad_optional_access.so -./lib/libabsl_raw_logging_internal.so -./lib/libabsl_log_severity.so -./lib/libaddress_sorting.so -./lib/libaddress_sorting.so.9 -./lib/libgssapi_krb5_gauss.so -./lib/libgssapi_krb5_gauss.so.2 -./lib/libgssapi_krb5_gauss.so.2.2 -./lib/libgssrpc_gauss.so -./lib/libgssrpc_gauss.so.4 -./lib/libgssrpc_gauss.so.4.2 -./lib/libk5crypto_gauss.so -./lib/libk5crypto_gauss.so.3 -./lib/libk5crypto_gauss.so.3.1 -./lib/libkadm5clnt.so -./lib/libkadm5clnt_mit.so -./lib/libkadm5clnt_mit.so.11 -./lib/libkadm5clnt_mit.so.11.0 -./lib/libkadm5clnt_mit.so.12 -./lib/libkadm5clnt_mit.so.12.0 -./lib/libkadm5srv.so -./lib/libkadm5srv_mit.so -./lib/libkadm5srv_mit.so.11 -./lib/libkadm5srv_mit.so.11.0 -./lib/libkadm5srv_mit.so.12 -./lib/libkadm5srv_mit.so.12.0 -./lib/libkdb5.so -./lib/libkdb5.so.9 -./lib/libkdb5.so.9.0 -./lib/libkdb5.so.10 -./lib/libkdb5.so.10.0 -./lib/libkrad.so -./lib/libkrad.so.0 -./lib/libkrad.so.0.0 -./lib/libkrb5_gauss.so -./lib/libkrb5_gauss.so.3 -./lib/libkrb5_gauss.so.3.3 -./lib/libkrb5support_gauss.so -./lib/libkrb5support_gauss.so.0 -./lib/libkrb5support_gauss.so.0.1 -./lib/krb5/plugins/kdb/db2.so -./lib/libverto.so -./lib/libverto.so.0 -./lib/libverto.so.0.0 -./lib/libcurl.so -./lib/libcurl.so.4 -./lib/libcurl.so.4.6.0 -./lib/libcrypto.so -./lib/libcrypto.so.1.1 -./lib/libssl.so -./lib/libssl.so.1.1 -./lib/libgcc_s.so.1 -./lib/libstdc++.so.6 -./lib/libz.so -./lib/libz.so.1 -./lib/libz.so.1.2.11 -./lib/liblz4.so -./lib/liblz4.so.1 -./lib/liblz4.so.1.9.2 -./lib/libcjson.so -./lib/libcjson.so.1 -./lib/libcjson.so.1.7.13 -./lib/libconfig.so -./lib/libconfig.so.4 -./lib/libpgport_tool.so -./lib/libpgport_tool.so.1 -./share/llvmir/GaussDB_expr.ir -./lib/libeSDKLogAPI.so -./lib/libeSDKOBS.so -./lib/liblog4cpp.so -./lib/liblog4cpp.so.5 -./lib/liblog4cpp.so.5.0.6 -./lib/libcharset.so -./lib/libcharset.so.1 -./lib/libcharset.so.1.0.0 -./lib/libiconv.so -./lib/libiconv.so.2 -./lib/libiconv.so.2.6.1 -./lib/libnghttp2.so -./lib/libnghttp2.so.14 -./lib/libnghttp2.so.14.20.0 -./lib/libpcre.so -./lib/libpcre.so.1 -./lib/libpcre.so.1.2.12 -./lib/libsecurec.so -./lib/libxml2.so -./lib/libxml2.so.2 -./lib/libxml2.so.2.9.9 -./lib/libparquet.so -./lib/libparquet.so.14 -./lib/libparquet.so.14.1.0 -./lib/libarrow.so -./lib/libarrow.so.14 -./lib/libarrow.so.14.1.0 -./lib/OBS.ini -./lib/postgresql/latin2_and_win1250.so -./lib/postgresql/euc2004_sjis2004.so -./lib/libdcf.so -./lib/libzstd.so -./lib/libzstd.so.1 -./lib/libzstd.so.1.4.4 - -./include/postgresql/server/postgres_ext.h -./include/postgresql/server/pg_config_os.h -./include/postgresql/server/pgtime.h -./include/postgresql/server/datatypes.h -./include/postgresql/server/client_logic/client_logic_enums.h -./include/postgresql/server/nodes/primnodes.h -./include/postgresql/server/nodes/parsenodes.h -./include/postgresql/server/nodes/parsenodes_common.h -./include/postgresql/server/nodes/bitmapset.h -./include/postgresql/server/nodes/pg_list.h -./include/postgresql/server/nodes/value.h -./include/postgresql/server/nodes/nodes.h -./include/postgresql/server/utils/sortsupport.h -./include/postgresql/server/utils/varbit.h -./include/postgresql/server/utils/spccache.h -./include/postgresql/server/utils/rangetypes.h -./include/postgresql/server/utils/plpgsql.h -./include/postgresql/server/utils/memtrack.h -./include/postgresql/server/utils/pg_locale.h -./include/postgresql/server/utils/tzparser.h -./include/postgresql/server/utils/syscall_lock.h -./include/postgresql/server/utils/partitionmap.h -./include/postgresql/server/utils/array.h -./include/postgresql/server/utils/relmapper.h -./include/postgresql/server/utils/hsearch.h -./include/postgresql/server/utils/xml.h -./include/postgresql/server/utils/bytea.h -./include/postgresql/server/utils/relcache.h -./include/postgresql/server/utils/pg_rusage.h -./include/postgresql/server/utils/numeric.h -./include/postgresql/server/utils/mmpool.h -./include/postgresql/server/utils/nabstime.h -./include/postgresql/server/utils/fmgrtab.h -./include/postgresql/server/utils/snapmgr.h -./include/postgresql/server/utils/syscache.h -./include/postgresql/server/utils/logtape.h -./include/postgresql/server/utils/datum.h -./include/postgresql/server/utils/guc_tables.h -./include/postgresql/server/utils/snapshot.h -./include/postgresql/server/utils/geo_decls.h -./include/postgresql/server/utils/errcodes.h -./include/postgresql/server/utils/inval.h -./include/postgresql/server/utils/help_config.h -./include/postgresql/server/utils/distribute_test.h -./include/postgresql/server/utils/aiomem.h -./include/postgresql/server/utils/tuplestore.h -./include/postgresql/server/utils/rbtree.h -./include/postgresql/server/utils/gs_bitmap.h -./include/postgresql/server/utils/tuplesort.h -./include/postgresql/server/utils/ps_status.h -./include/postgresql/server/utils/palloc.h -./include/postgresql/server/utils/reltrigger.h -./include/postgresql/server/utils/acl.h -./include/postgresql/server/utils/ascii.h -./include/postgresql/server/utils/selfuncs.h -./include/postgresql/server/utils/json.h -./include/postgresql/server/utils/portal.h -./include/postgresql/server/utils/atomic.h -./include/postgresql/server/utils/elog.h -./include/postgresql/server/utils/date.h -./include/postgresql/server/utils/plancache.h -./include/postgresql/server/utils/int8.h -./include/postgresql/server/utils/timestamp.h -./include/postgresql/server/utils/bloom_filter.h -./include/postgresql/server/utils/fmgroids.h -./include/postgresql/server/utils/pg_crc_tables.h -./include/postgresql/server/utils/probes.h -./include/postgresql/server/utils/datetime.h -./include/postgresql/server/utils/inet.h -./include/postgresql/server/utils/pg_lzcompress.h -./include/postgresql/server/utils/pg_crc.h -./include/postgresql/server/utils/attoptcache.h -./include/postgresql/server/utils/dynahash.h -./include/postgresql/server/utils/rel.h -./include/postgresql/server/utils/partcache.h -./include/postgresql/server/utils/lsyscache.h -./include/postgresql/server/utils/memutils.h -./include/postgresql/server/utils/memprot.h -./include/postgresql/server/utils/uuid.h -./include/postgresql/server/utils/combocid.h -./include/postgresql/server/utils/builtins.h -./include/postgresql/server/utils/guc.h -./include/postgresql/server/utils/dfs_vector.h -./include/postgresql/server/utils/dynamic_loader.h -./include/postgresql/server/utils/resowner.h -./include/postgresql/server/utils/aes.h -./include/postgresql/server/utils/cash.h -./include/postgresql/server/utils/typcache.h -./include/postgresql/server/utils/formatting.h -./include/postgresql/server/utils/partitionkey.h -./include/postgresql/server/utils/aset.h -./include/postgresql/server/utils/catcache.h -./include/postgresql/server/utils/atomic_arm.h -./include/postgresql/server/utils/oidrbtree.h -./include/postgresql/server/datatype/timestamp.h -./include/postgresql/server/access/rmgr.h -./include/postgresql/server/access/xlogreader.h -./include/postgresql/server/access/xlog_basic.h -./include/postgresql/server/access/tupdesc.h -./include/postgresql/server/access/rmgrlist.h -./include/postgresql/server/access/htup.h -./include/postgresql/server/access/xlogdefs.h -./include/postgresql/server/access/attnum.h -./include/postgresql/server/access/tupmacs.h -./include/postgresql/server/access/xlogrecord.h -./include/postgresql/server/tde_key_management/data_common.h -./include/postgresql/server/tcop/dest.h -./include/postgresql/server/catalog/pg_type.h -./include/postgresql/server/catalog/pg_attribute.h -./include/postgresql/server/catalog/genbki.h -./include/postgresql/server/gs_thread.h -./include/postgresql/server/port/pg_bswap.h -./include/postgresql/server/port/pg_crc32c.h -./include/postgresql/server/securec.h -./include/postgresql/server/securectype.h -./include/postgresql/server/storage/off.h -./include/postgresql/server/storage/buf/block.h -./include/postgresql/server/storage/item/item.h -./include/postgresql/server/storage/smgr/relfilenode.h -./include/postgresql/server/storage/buf/bufpage.h -./include/postgresql/server/storage/spin.h -./include/postgresql/server/storage/buf/buf.h -./include/postgresql/server/storage/item/itemid.h -./include/postgresql/server/storage/lock/pg_sema.h -./include/postgresql/server/storage/item/itemptr.h -./include/postgresql/server/storage/lock/s_lock.h -./include/postgresql/server/storage/backendid.h -./include/postgresql/server/storage/lock/lock.h -./include/postgresql/server/storage/lock/lwlock.h -./include/postgresql/server/storage/barrier.h -./include/postgresql/server/storage/shmem.h -./include/postgresql/server/pg_config.h -./include/postgresql/server/lib/stringinfo.h -./include/postgresql/server/fmgr.h -./include/postgresql/server/fmgr/fmgr_comp.h -./include/postgresql/server/fmgr/fmgr_core.h -./include/postgresql/server/gs_threadlocal.h -./include/postgresql/server/postgres.h -./include/postgresql/server/executor/tuptable.h -./include/postgresql/server/pg_config_manual.h -./include/postgresql/server/mb/pg_wchar.h -./include/postgresql/server/c.h -./include/postgresql/server/port.h -./include/postgresql/server/utils/be_module.h -./include/postgresql/server/nodes/params.h -./include/postgresql/server/securec_check.h -./include/postgresql/server/nodes/memnodes.h -./include/postgresql/server/access/skey.h -./include/postgresql/server/lib/dllist.h -./include/postgresql/server/lib/ilist.h -./include/postgresql/server/pgxc/locator.h -./include/postgresql/server/gstrace/gstrace_infra.h -./include/postgresql/server/extension_dependency.h -./include/postgresql/server/libpq/libpq-fe.h -./include/postgresql/server/access/clog.h -./include/postgresql/server/storage/proc.h -./include/postgresql/server/access/xlog.h -./include/postgresql/server/storage/lwlocknames.h -./include/postgresql/server/access/xloginsert.h -./include/postgresql/server/catalog/pg_control.h -./include/postgresql/server/access/parallel_recovery/redo_item.h -./include/postgresql/server/access/parallel_recovery/posix_semaphore.h -./include/postgresql/server/replication/replicainternal.h -./include/postgresql/server/knl/knl_instance.h -./include/postgresql/server/knl/knl_guc.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_sql.h -./include/postgresql/server/knl/knl_guc/knl_guc_common.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_sql.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_storage.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_storage.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_storage.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_security.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_security.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_network.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_network.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_memory.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_memory.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_resource.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_resource.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_common.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_common.h -./include/postgresql/server/lib/circularqueue.h -./include/postgresql/server/access/double_write_basic.h -./include/postgresql/server/knl/knl_thread.h -./include/postgresql/server/access/sdir.h -./include/postgresql/server/gssignal/gs_signal.h -./include/postgresql/server/knl/knl_session.h -./include/postgresql/server/libpq/pqcomm.h -./include/postgresql/server/cipher.h -./include/postgresql/server/portability/instr_time.h -./include/postgresql/server/utils/memgroup.h -./include/postgresql/server/storage/latch.h -./include/postgresql/server/workload/qnode.h -./include/postgresql/server/streaming/init.h -./include/postgresql/server/streaming/launcher.h -./include/postgresql/server/pgxc/barrier.h -./include/postgresql/server/libcomm/libcomm.h -./include/postgresql/server/hotpatch/hotpatch.h -./include/postgresql/server/hotpatch/hotpatch_backend.h -./include/postgresql/server/postmaster/bgwriter.h -./include/postgresql/server/postmaster/pagewriter.h -./include/postgresql/server/replication/heartbeat.h -./include/postgresql/server/access/multi_redo_settings.h -./include/postgresql/server/access/redo_statistic_msg.h -./include/postgresql/server/replication/rto_statistic.h -./include/postgresql/server/replication/walprotocol.h -./include/postgresql/server/storage/mot/jit_def.h -./include/postgresql/server/threadpool/threadpool.h -./include/postgresql/server/threadpool/threadpool_controler.h -./include/postgresql/server/threadpool/threadpool_group.h -./include/postgresql/server/knl/knl_variable.h -./include/postgresql/server/threadpool/threadpool_listener.h -./include/postgresql/server/threadpool/threadpool_sessctl.h -./include/postgresql/server/storage/procsignal.h -./include/postgresql/server/threadpool/threadpool_worker.h -./include/postgresql/server/threadpool/threadpool_scheduler.h -./include/postgresql/server/threadpool/threadpool_stream.h -./include/postgresql/server/replication/dataqueuedefs.h -./include/postgresql/server/gtm/gtm_c.h -./include/postgresql/server/cm/etcdapi.h -./include/postgresql/server/alarm/alarm.h -./include/postgresql/server/access/xact.h -./include/postgresql/server/access/cstore_am.h -./include/postgresql/server/access/cstore_roughcheck_func.h -./include/postgresql/server/access/cstoreskey.h -./include/postgresql/server/storage/cu.h -./include/postgresql/server/vecexecutor/vectorbatch.h -./include/postgresql/server/cstore.h -./include/postgresql/server/storage/cstore/cstore_mem_alloc.h -./include/postgresql/server/access/cstore_minmax_func.h -./include/postgresql/server/storage/custorage.h -./include/postgresql/server/storage/fd.h -./include/postgresql/server/postmaster/aiocompleter.h -./include/postgresql/server/storage/buf/bufmgr.h -./include/postgresql/server/storage/buf/buf_internals.h -./include/postgresql/server/storage/smgr.h -./include/postgresql/server/catalog/pg_am.h -./include/postgresql/server/catalog/pg_class.h -./include/postgresql/server/catalog/pg_index.h -./include/postgresql/server/rewrite/prs2lock.h -./include/postgresql/server/tcop/stmt_retry.h -./include/postgresql/server/catalog/pg_hashbucket_fn.h -./include/postgresql/server/utils/rel_gs.h -./include/postgresql/server/catalog/pg_partition.h -./include/postgresql/server/catalog/pg_hashbucket.h -./include/postgresql/server/catalog/catalog.h -./include/postgresql/server/catalog/catversion.h -./include/postgresql/server/catalog/pg_namespace.h -./include/postgresql/server/utils/partitionmap_gs.h -./include/postgresql/server/access/heapam.h -./include/postgresql/server/storage/pagecompress.h -./include/postgresql/server/replication/bcm.h -./include/postgresql/server/storage/cstore/cstorealloc.h -./include/postgresql/server/storage/cucache_mgr.h -./include/postgresql/server/storage/cache_mgr.h -./include/postgresql/server/nodes/plannodes.h -./include/postgresql/server/foreign/foreign.h -./include/postgresql/server/access/obs/obs_am.h -./include/postgresql/server/storage/buf/buffile.h -./include/postgresql/server/replication/slot.h -./include/postgresql/server/access/obs/eSDKOBS.h -./include/postgresql/server/commands/defrem.h -./include/postgresql/server/optimizer/pruning.h -./include/postgresql/server/nodes/relation.h -./include/postgresql/server/optimizer/bucketinfo.h -./include/postgresql/server/pgxc/nodemgr.h -./include/postgresql/server/bulkload/dist_fdw.h -./include/postgresql/server/bulkload/importerror.h -./include/postgresql/server/commands/gds_stream.h -./include/postgresql/server/bulkload/utils.h -./include/postgresql/server/cjson/cJSON.h -./include/postgresql/server/ssl/gs_openssl_client.h -./include/postgresql/server/funcapi.h -./include/postgresql/server/executor/executor.h -./include/postgresql/server/executor/execdesc.h -./include/postgresql/server/nodes/execnodes.h -./include/postgresql/server/access/genam.h -./include/postgresql/server/nodes/tidbitmap.h -./include/postgresql/server/access/relscan.h -./include/postgresql/server/access/itup.h -./include/postgresql/server/executor/instrument.h -./include/postgresql/server/miscadmin.h -./include/postgresql/server/libpq/libpq-be.h -./include/postgresql/server/libpq/hba.h -./include/postgresql/server/libpq/sha2.h -./include/postgresql/server/utils/anls_opt.h -./include/postgresql/server/pgxc/pgxc.h -./include/postgresql/server/catalog/namespace.h -./include/postgresql/server/commands/trigger.h -./include/postgresql/server/executor/spi.h -./include/postgresql/server/access/ustore/undo/knl_uundotype.h -./include/postgresql/server/access/ustore/knl_uheap.h -./include/postgresql/server/access/ustore/knl_utuple.h -./include/postgresql/server/access/ustore/knl_utype.h -./include/postgresql/server/access/ustore/knl_upage.h -./include/postgresql/server/access/ustore/knl_uredo.h -./include/postgresql/server/access/ustore/knl_uundovec.h -./include/postgresql/server/access/ustore/knl_uundorecord.h -./include/postgresql/server/access/ustore/undo/knl_uundoxlog.h -./include/postgresql/server/access/ustore/undo/knl_uundotxn.h -./include/postgresql/server/access/ustore/undo/knl_uundozone.h -./include/postgresql/server/access/ustore/undo/knl_uundospace.h -./include/postgresql/server/communication/commproxy_basic.h -./include/postgresql/server/access/parallel_recovery/page_redo.h -./include/postgresql/server/access/parallel_recovery/spsc_blocking_queue.h -./include/postgresql/server/executor/exec/execdesc.h -./include/postgresql/server/db4ai/matrix.h -./include/postgresql/server/db4ai/scores.h -./jre/ASSEMBLY_EXCEPTION -./jre/bin/java -./jre/bin/jjs -./jre/bin/keytool -./jre/bin/orbd -./jre/bin/pack200 -./jre/bin/policytool -./jre/bin/rmid -./jre/bin/rmiregistry -./jre/bin/servertool -./jre/bin/tnameserv -./jre/bin/unpack200 -./jre/lib/aarch64/jli/libjli.so -./jre/lib/aarch64/jvm.cfg -./jre/lib/aarch64/libattach.so -./jre/lib/aarch64/libavplugin-ffmpeg-58.so -./jre/lib/aarch64/libawt_headless.so -./jre/lib/aarch64/libawt.so -./jre/lib/aarch64/libawt_xawt.so -./jre/lib/aarch64/libdecora_sse.so -./jre/lib/aarch64/libdt_socket.so -./jre/lib/aarch64/libfontmanager.so -./jre/lib/aarch64/libfxplugins.so -./jre/lib/aarch64/libglassgtk2.so -./jre/lib/aarch64/libglassgtk3.so -./jre/lib/aarch64/libglass.so -./jre/lib/aarch64/libgstreamer-lite.so -./jre/lib/aarch64/libhprof.so -./jre/lib/aarch64/libinstrument.so -./jre/lib/aarch64/libj2gss.so -./jre/lib/aarch64/libj2pcsc.so -./jre/lib/aarch64/libj2pkcs11.so -./jre/lib/aarch64/libjaas_unix.so -./jre/lib/aarch64/libjava_crw_demo.so -./jre/lib/aarch64/libjavafx_font_freetype.so -./jre/lib/aarch64/libjavafx_font_pango.so -./jre/lib/aarch64/libjavafx_font.so -./jre/lib/aarch64/libjavafx_iio.so -./jre/lib/aarch64/libjava.so -./jre/lib/aarch64/libjawt.so -./jre/lib/aarch64/libjdwp.so -./jre/lib/aarch64/libjfxmedia.so -./jre/lib/aarch64/libjfxwebkit.so -./jre/lib/aarch64/libjpeg.so -./jre/lib/aarch64/libjsdt.so -./jre/lib/aarch64/libjsig.so -./jre/lib/aarch64/libjsoundalsa.so -./jre/lib/aarch64/libjsound.so -./jre/lib/aarch64/liblcms.so -./jre/lib/aarch64/libmanagement.so -./jre/lib/aarch64/libmlib_image.so -./jre/lib/aarch64/libnet.so -./jre/lib/aarch64/libnio.so -./jre/lib/aarch64/libnpt.so -./jre/lib/aarch64/libprism_common.so -./jre/lib/aarch64/libprism_es2.so -./jre/lib/aarch64/libprism_sw.so -./jre/lib/aarch64/libsaproc.so -./jre/lib/aarch64/libsctp.so -./jre/lib/aarch64/libsplashscreen.so -./jre/lib/aarch64/libsunec.so -./jre/lib/aarch64/libunpack.so -./jre/lib/aarch64/libverify.so -./jre/lib/aarch64/libzip.so -./jre/lib/aarch64/server/libjvm.so -./jre/lib/aarch64/server/Xusage.txt -./jre/lib/calendars.properties -./jre/lib/charsets.jar -./jre/lib/classlist -./jre/lib/cmm/CIEXYZ.pf -./jre/lib/cmm/GRAY.pf -./jre/lib/cmm/LINEAR_RGB.pf -./jre/lib/cmm/PYCC.pf -./jre/lib/cmm/sRGB.pf -./jre/lib/content-types.properties -./jre/lib/currency.data -./jre/lib/ext/cldrdata.jar -./jre/lib/ext/dnsns.jar -./jre/lib/ext/jaccess.jar -./jre/lib/ext/jfxrt.jar -./jre/lib/ext/localedata.jar -./jre/lib/ext/meta-index -./jre/lib/ext/nashorn.jar -./jre/lib/ext/sunec.jar -./jre/lib/ext/sunjce_provider.jar -./jre/lib/ext/sunpkcs11.jar -./jre/lib/ext/zipfs.jar -./jre/lib/flavormap.properties -./jre/lib/fontconfig.Euler.properties -./jre/lib/fontconfig.properties -./jre/lib/fontconfig.Ubuntu.properties -./jre/lib/fonts/Roboto-Regular.ttf -./jre/lib/hijrah-config-umalqura.properties -./jre/lib/images/cursors/cursors.properties -./jre/lib/images/cursors/invalid32x32.gif -./jre/lib/images/cursors/motif_CopyDrop32x32.gif -./jre/lib/images/cursors/motif_CopyNoDrop32x32.gif -./jre/lib/images/cursors/motif_LinkDrop32x32.gif -./jre/lib/images/cursors/motif_LinkNoDrop32x32.gif -./jre/lib/images/cursors/motif_MoveDrop32x32.gif -./jre/lib/images/cursors/motif_MoveNoDrop32x32.gif -./jre/lib/javafx-mx.jar -./jre/lib/javafx.properties -./jre/lib/jce.jar -./jre/lib/jexec -./jre/lib/jfr/default.jfc -./jre/lib/jfr.jar -./jre/lib/jfr/profile.jfc -./jre/lib/jfxswt.jar -./jre/lib/jsse.jar -./jre/lib/jvm.hprof.txt -./jre/lib/logging.properties -./jre/lib/management-agent.jar -./jre/lib/management/jmxremote.access -./jre/lib/management/jmxremote.password.template -./jre/lib/management/management.properties -./jre/lib/management/snmp.acl.template -./jre/lib/meta-index -./jre/lib/net.properties -./jre/lib/psfontj2d.properties -./jre/lib/psfont.properties.ja -./jre/lib/resources.jar -./jre/lib/rt.jar -./jre/lib/security/blacklisted.certs -./jre/lib/security/cacerts -./jre/lib/security/java.policy -./jre/lib/security/java.security -./jre/lib/security/policy/limited/local_policy.jar -./jre/lib/security/policy/limited/US_export_policy.jar -./jre/lib/security/policy/unlimited/local_policy.jar -./jre/lib/security/policy/unlimited/US_export_policy.jar -./jre/lib/sound.properties -./jre/lib/tzdb.dat -./jre/LICENSE -./jre/THIRD_PARTY_README -[client] -./bin/gsql -./bin/gs_dump -./bin/gs_dumpall -./bin/gs_restore -./bin/gs_basebackup -./bin/gs_probackup -./lib/postgresql/latin2_and_win1250.so -./lib/postgresql/euc2004_sjis2004.so -./lib/postgresql/euc_kr_and_mic.so -./lib/postgresql/utf8_and_uhc.so -./lib/postgresql/euc_tw_and_big5.so -./lib/postgresql/cyrillic_and_mic.so -./lib/postgresql/utf8_and_johab.so -./lib/postgresql/utf8_and_gb18030.so -./lib/postgresql/pgxs/src/makefiles/pgxs.mk -./lib/postgresql/pgxs/src/Makefile.shlib -./lib/postgresql/pgxs/src/Makefile.port -./lib/postgresql/pgxs/src/nls-global.mk -./lib/postgresql/pgxs/src/Makefile.global -./lib/postgresql/pgxs/config/install-sh -./lib/postgresql/euc_cn_and_mic.so -./lib/postgresql/latin_and_mic.so -./lib/postgresql/utf8_and_sjis2004.so -./lib/postgresql/utf8_and_euc_jp.so -./lib/postgresql/utf8_and_sjis.so -./lib/postgresql/utf8_and_cyrillic.so -./lib/postgresql/utf8_and_euc_kr.so -./lib/postgresql/ascii_and_mic.so -./lib/postgresql/utf8_and_iso8859_1.so -./lib/postgresql/euc_jp_and_sjis.so -./lib/postgresql/dict_snowball.so -./lib/postgresql/utf8_and_ascii.so -./lib/postgresql/utf8_and_euc_tw.so -./lib/postgresql/utf8_and_iso8859.so -./lib/postgresql/utf8_and_win.so -./lib/postgresql/utf8_and_euc_cn.so -./lib/postgresql/utf8_and_gbk.so -./lib/postgresql/utf8_and_euc2004.so -./lib/postgresql/utf8_and_big5.so -./lib/postgresql/java/pljava.jar -./lib/libpljava.so -./lib/libpq.a -./lib/libpq.so -./lib/libpq.so.5 -./lib/libpq.so.5.5 -./lib/libpq_ce.so -./lib/libpq_ce.so.5 -./lib/libpq_ce.so.5.5 -./lib/libgauss_cl_jni.so -./lib/libconfig.so -./lib/libconfig.so.4 -./lib/libcrypto.so -./lib/libcrypto.so.1.1 -./lib/libstdc++.so.6 -./lib/libssl.so -./lib/libssl.so.1.1 -./lib/libpgport_tool.so -./lib/libpgport_tool.so.1 -./lib/libgssapi_krb5_gauss.so -./lib/libgssapi_krb5_gauss.so.2 -./lib/libgssapi_krb5_gauss.so.2.2 -./lib/libgssrpc_gauss.so -./lib/libgssrpc_gauss.so.4 -./lib/libgssrpc_gauss.so.4.2 -./lib/libk5crypto_gauss.so -./lib/libk5crypto_gauss.so.3 -./lib/libk5crypto_gauss.so.3.1 -./lib/libkrb5support_gauss.so -./lib/libkrb5support_gauss.so.0 -./lib/libkrb5support_gauss.so.0.1 -./lib/libkrb5_gauss.so -./lib/libkrb5_gauss.so.3 -./lib/libkrb5_gauss.so.3.3 -./lib/libcom_err_gauss.so -./lib/libcom_err_gauss.so.3 -./lib/libcom_err_gauss.so.3.0 -[libpq] -./lib/libpq.a -./lib/libpq.so -./lib/libpq.so.5 -./lib/libpq.so.5.5 -./lib/libpq_ce.so -./lib/libpq_ce.so.5 -./lib/libpq_ce.so.5.5 -./lib/libgauss_cl_jni.so -./lib/libconfig.so -./lib/libconfig.so.4 -./lib/libcrypto.so -./lib/libcrypto.so.1.1 -./lib/libstdc++.so.6 -./lib/libssl.so -./lib/libssl.so.1.1 -./lib/libpgport_tool.so -./lib/libpgport_tool.so.1 -./lib/libgssapi_krb5_gauss.so -./lib/libgssapi_krb5_gauss.so.2 -./lib/libgssapi_krb5_gauss.so.2.2 -./lib/libgssrpc_gauss.so -./lib/libgssrpc_gauss.so.4 -./lib/libgssrpc_gauss.so.4.2 -./lib/libk5crypto_gauss.so -./lib/libk5crypto_gauss.so.3 -./lib/libk5crypto_gauss.so.3.1 -./lib/libkrb5support_gauss.so -./lib/libkrb5support_gauss.so.0 -./lib/libkrb5support_gauss.so.0.1 -./lib/libkrb5_gauss.so -./lib/libkrb5_gauss.so.3 -./lib/libkrb5_gauss.so.3.3 -./lib/libcom_err_gauss.so -./lib/libcom_err_gauss.so.3 -./lib/libcom_err_gauss.so.3.0 -./include/gs_thread.h -./include/gs_threadlocal.h -./include/postgres_ext.h -./include/libpq-fe.h -./include/libpq-events.h -./include/libpq/libpq-fs.h -[version] -V500R002C00 -[header] -./include/libpq-fe.h -./include/postgres_ext.h -./include/gs_thread.h -./include/gs_threadlocal.h -./include/pg_config.h -./include/pg_config_manual.h -./include/pg_config_os.h -./include/cm_config.h -./include/c.h -./include/port.h -./include/cm_msg.h -./include/cm_c.h -./include/cm_misc.h -./include/libpq-int.h -./include/pqcomm.h -./include/pqexpbuffer.h -./include/xlogdefs.h -./include/cm-libpq-fe.h diff --git a/build/script/package_internal.sh b/build/script/package_internal.sh deleted file mode 100755 index 9a6b3bae9..000000000 --- a/build/script/package_internal.sh +++ /dev/null @@ -1,1552 +0,0 @@ -#!/bin/bash -####################################################################### -# Copyright (c): 2020-2025, Huawei Tech. Co., Ltd. -# descript: Compile and pack MPPDB -# Return 0 means OK. -# Return 1 means failed. -# version: 2.0 -# date: 2020-08-08 -####################################################################### - -##default package type is all -declare package_type='all' -declare install_package_format='tar' -declare optimized='true' - -declare product_mode='opengauss' - -##default version mode is relase -declare version_mode='release' -declare binarylib_dir='None' -declare separate_symbol='on' -#detect platform information. -PLATFORM=32 -bit=$(getconf LONG_BIT) -if [ "$bit" -eq 64 ]; then - PLATFORM=64 -fi - -#get OS distributed version. -kernel="" -version="" -if [ -f "/etc/euleros-release" ]; then - kernel=$(cat /etc/euleros-release | awk -F ' ' '{print $1}' | tr A-Z a-z) - version=$(cat /etc/euleros-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) -elif [ -f "/etc/openEuler-release" ]; then - kernel=$(cat /etc/openEuler-release | awk -F ' ' '{print $1}' | tr A-Z a-z) - version=$(cat /etc/openEuler-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) -elif [ -f "/etc/centos-release" ]; then - kernel=$(cat /etc/centos-release | awk -F ' ' '{print $1}' | tr A-Z a-z) - version=$(cat /etc/centos-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) -elif [ -f "/etc/kylin-release" ]; then - kernel=$(cat /etc/kylin-release | awk -F ' ' '{print $1}' | tr A-Z a-z) - version=$(cat /etc/kylin-release | awk '{print $6}' | tr A-Z a-z) -else - kernel=$(lsb_release -d | awk -F ' ' '{print $2}'| tr A-Z a-z) - version=$(lsb_release -r | awk -F ' ' '{print $2}') -fi - -if [ X"$kernel" == X"euleros" ]; then - dist_version="EULER" -elif [ X"$kernel" == X"centos" ]; then - dist_version="CENTOS" -elif [ X"$kernel" == X"openeuler" ]; then - dist_version="OPENEULER" -elif [ X"$kernel" == X"kylin" ]; then - dist_version="KYLIN" -else - echo "Only support EulerOS, OPENEULER(aarch64), CentOS and Kylin platform." - echo "Kernel is $kernel" - exit 1 -fi - -export MAKE_JOBS=$(($(cat /proc/cpuinfo | grep processor | wc -l) * 2)) -echo "[makemppdb] $(date +%y-%m-%d' '%T): Make jobs number : ${MAKE_JOBS}" - -show_package=false - -gcc_version="7.3.0" -##add platform architecture information -PLATFORM_ARCH=$(uname -p) -if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then - if [ "$dist_version" == "EULER" ]; then - ARCHITECTURE_EXTRA_FLAG=_euleros2.0_sp8_$PLATFORM_ARCH - GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA" - elif [ "$dist_version" == "CENTOS" ]; then - ARCHITECTURE_EXTRA_FLAG=_centos_7.5_$PLATFORM_ARCH - GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA" - elif [ "$dist_version" == "OPENEULER" ]; then - ARCHITECTURE_EXTRA_FLAG=_openeuler_$PLATFORM_ARCH - # it may be risk to enable 'ARM_LSE' for all ARM CPU, but we bid our CPUs are not elder than ARMv8.1 - GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA -D__ARM_LSE" - elif [ "$dist_version" == "KYLIN" ]; then - ARCHITECTURE_EXTRA_FLAG=_kylinv10_sp1_$PLATFORM_ARCH - GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA" - else - ARCHITECTURE_EXTRA_FLAG=_$PLATFORM_ARCH - fi - release_file_list="mpp_release_list_${kernel}_${PLATFORM_ARCH}" -else - if [ "$dist_version" == "KYLIN" ]; then - ARCHITECTURE_EXTRA_FLAG=_kylinv10_sp1_${PLATFORM_ARCH}_intel - else - ARCHITECTURE_EXTRA_FLAG=_euleros2.0_sp5_${PLATFORM_ARCH} - fi - release_file_list="mpp_release_list_${kernel}" -fi - -##default install version storage path -declare mppdb_version='GaussDB Kernel' -declare mppdb_name_for_package="$(echo ${mppdb_version} | sed 's/ /-/g')" -declare package_path='./' -declare version_number='' -declare make_check='off' -declare zip_package='on' -declare extra_config_opt='' - -####################################################################### -##putout the version of mppdb -####################################################################### -function print_version() -{ - echo "$version_number" -} -####################################################################### -## print help information -####################################################################### -function print_help() -{ - echo "Usage: $0 [OPTION] - -h|--help show help information. - -V|--version show version information. - -f|--file provide the file list released. - -3rd|--binarylib_dir the directory of third party binarylibs. - -pkg|--package provode type of installation packages, values parameter is all, server, jdbc, odbc, agent. - -pm product mode, values parameter is single, multiple or opengauss, default value is multiple. - -p|--path generation package storage path. - -t packaging format, values parameter is tar or rpm, the default value is tar. - -m|--version_mode this values of paramenter is debug, release, memcheck, the default value is release. - -mc|--make_check this values of paramenter is on or off, the default value is on. - -s|--symbol_mode whether separate symbol in debug mode, the default value is on. - -cv|--gcc_version gcc-version option: 7.3.0. - -nopt|--not_optimized on kunpeng platform , like 1616 version, without LSE optimized. - -nopkg|--no_package don't zip binaries into packages - -co|--config_opt more config options - -S|--show_pkg show server package name and Bin name base on current configuration. -" -} - -if [ $# = 0 ] ; then - echo "missing option" - print_help - exit 1 -fi - -SCRIPT_PATH=${0} -FIRST_CHAR=$(expr substr "$SCRIPT_PATH" 1 1) -if [ "$FIRST_CHAR" = "/" ]; then - SCRIPT_PATH=${0} -else - SCRIPT_PATH="$(pwd)/${SCRIPT_PATH}" -fi -SCRIPT_NAME=$(basename $SCRIPT_PATH) -SCRIPT_DIR=$(dirname "${SCRIPT_PATH}") -SCRIPT_DIR=$(dirname "$SCRIPT_DIR") - -if [ ! -f "$SCRIPT_DIR/$SCRIPT_NAME" ] ; then - SCRIPT_DIR=$SCRIPT_DIR/script -fi - -package_path=$SCRIPT_DIR -####################################################################### -##read version from $release_file_list -####################################################################### -function read_mpp_version() -{ - cd $SCRIPT_DIR - local head=$(cat $release_file_list | grep "\[version\]" -n | awk -F: '{print $1}') - if [ ! -n "$head" ]; then - echo "error: no find version in the $release_file_list file " - exit 1 - fi - local tail=$(cat $release_file_list | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') - if [ ! -n "$tail" ]; then - local all=$(cat $release_file_list | wc -l) - let tail=$all+1-$head - fi - version_number=$(cat $release_file_list | awk "NR==$head+1,NR==$tail+$head-1") - echo "${mppdb_name_for_package}-${version_number}">version.cfg - #auto read the number from kernal globals.cpp, no need to change it here -} -####################################################################### -##first read mppdb version -####################################################################### - -######################################################################### -##read command line paramenters -####################################################################### -while [ $# -gt 0 ]; do - case "$1" in - -h|--help) - print_help - exit 1 - ;; - -V|--version) - print_version - exit 1 - ;; - -f|--file) - if [ "$2"X = X ]; then - echo "no given file name" - exit 1 - fi - release_file_list=$2 - shift 2 - ;; - -3rd|--binarylib_dir) - if [ "$2"X = X ]; then - echo "no given binarylib directory values" - exit 1 - fi - binarylib_dir=$2 - shift 2 - ;; - -p|--path) - if [ "$2"X = X ]; then - echo "no given generration package path" - exit 1 - fi - package_path=$2 - if [ ! -d "$package_path" ]; then - mkdir -p $package_path - fi - shift 2 - ;; - -pkg) - if [ "$2"X = X ]; then - echo "no given package type name" - exit 1 - fi - package_type=$2 - shift 2 - ;; - -pm) - if [ "$2"X = X ]; then - echo "no given product mode" - exit 1 - fi - product_mode=$2 - shift 2 - ;; - -s|--symbol_mode) - if [ "$2"X = X ]; then - echo "no given symbol parameter" - exit 1 - fi - separate_symbol=$2 - shift 2 - ;; - -t) - if [ "$2"X = X ]; then - echo "no given installation package format values" - exit 1 - fi - if [ "$2" = rpm ]; then - echo "error: do not suport rpm package now!" - exit 1 - fi - install_package_format=$2 - shift 1 - ;; - -m|--version_mode) - if [ "$2"X = X ]; then - echo "no given version number values" - exit 1 - fi - version_mode=$2 - shift 2 - ;; - -mc|--make_check) - if [ "$2"X = X ]; then - echo "no given make check values" - exit 1 - fi - make_check=$2 - shift 2 - ;; - -cv|--gcc_version) - if [ "$2"X = X ]; then - echo "no given gcc version" - exit 1 - fi - gcc_version=$2 - shift 2 - ;; - -nopt|--not_optimized) - optimized='false' - shift 1 - ;; - -nopkg|--no_package) - zip_package='off' - shift 1 - ;; - -co|--config_opt) - if [ "$2"X = X ]; then - echo "no extra configure options provided" - exit 1 - fi - extra_config_opt=$2 - shift 2 - ;; - -S|--show_pkg) - show_package=true - shift - ;; - *) - echo "Internal Error: option processing error: $1" 1>&2 - echo "please input right paramtenter, the following command may help you" - echo "./package_internal.sh --help or ./package_package.sh -h" - exit 1 - esac -done - -if [ "$product_mode"x == "single"x ]; then - release_file_list="${release_file_list}_${product_mode}" -fi - -if [ "$product_mode"x == "opengauss"x ]; then - release_file_list=$(echo ${release_file_list}_single | sed -e 's/mpp_release/opengauss_release/') -fi - -read_mpp_version - -if [ "$gcc_version" = "7.3.0" ]; then - gcc_version=${gcc_version:0:3} -else - echo "Unknown gcc version $gcc_version" - exit 1 -fi - -####################################################################### -## declare all package name -####################################################################### -declare version_string="${mppdb_name_for_package}-${version_number}" -declare package_pre_name="${version_string}-${dist_version}-${PLATFORM}bit" -declare server_package_name="${package_pre_name}.${install_package_format}.gz" -declare agent_package_name="${package_pre_name}-AGENT.${install_package_format}.gz" -declare gsql_package_name="${mppdb_name_for_package}-${version_number}-${dist_version}-${PLATFORM}bit-gsql.${install_package_format}.gz" -declare client_package_name="${package_pre_name}-ClientTools.${install_package_format}.gz" -declare libpq_package_name="${package_pre_name}-Libpq.${install_package_format}.gz" -declare gds_package_name="${package_pre_name}-Gds.${install_package_format}.gz" -declare symbol_package_name="${package_pre_name}-symbol.${install_package_format}.gz" -declare inspection_package_name="${version_string}-Inspection.tar.gz" - -echo "[makemppdb] $(date +%y-%m-%d' '%T): script dir : ${SCRIPT_DIR}" -ROOT_DIR=$(dirname "$SCRIPT_DIR") -ROOT_DIR=$(dirname "$ROOT_DIR") -PLAT_FORM_STR=$(sh "${ROOT_DIR}/src/get_PlatForm_str.sh") -if [ "${PLAT_FORM_STR}"x == "Failed"x ] -then - echo "Only support EulerOS openEuler and Centros platform." - exit 1; -fi -PG_REG_TEST_ROOT="${ROOT_DIR}" -ROACH_DIR="${ROOT_DIR}/distribute/bin/roach" -MPPDB_DECODING_DIR="${ROOT_DIR}/contrib/mppdb_decoding" -PMK_SCHEMA="${ROOT_DIR}/script/pmk_schema.sql" -declare LOG_FILE="${ROOT_DIR}/build/script/makemppdb_pkg.log" -declare BUILD_DIR="${ROOT_DIR}/mppdb_temp_install" -declare ERR_MKGS_FAILED=1 -declare MKGS_OK=0 -if [ "${binarylib_dir}" != 'None' ] && [ -d "${binarylib_dir}" ]; then - BUILD_TOOLS_PATH="${binarylib_dir}/buildtools/${PLAT_FORM_STR}" - PLATFORM_PATH="${binarylib_dir}/platform/${PLAT_FORM_STR}" - BINARYLIBS_PATH="${binarylib_dir}/dependency" -else - BUILD_TOOLS_PATH="${ROOT_DIR}/buildtools/${PLAT_FORM_STR}" - PLATFORM_PATH="${ROOT_DIR}/platform/${PLAT_FORM_STR}" - BINARYLIBS_PATH="${ROOT_DIR}/binarylibs" -fi - -if [ "$product_mode"x == "single"x ] || [ "$product_mode"x == "opengauss"x ]; then - declare UPGRADE_SQL_DIR="${ROOT_DIR}/src/include/catalog/upgrade_sql" -else - declare UPGRADE_SQL_DIR="${ROOT_DIR}/src/distribute/include/catalog/upgrade_sql" -fi - -if [ "$product_mode"x == "single"x ]; then - declare UPGRADE_PRIV_SQL_DIR="${ROOT_DIR}/privategauss/src/include/catalog/upgrade_sql" -fi - -gaussdb_200_file="${binarylib_dir}/buildtools/license_control/gaussdb.version.GaussDB200" -gaussdb_300_file="${binarylib_dir}/buildtools/license_control/gaussdb.version.GaussDB300" -gaussdb_200_standard_file="${binarylib_dir}/buildtools/license_control/gaussdb.license.GaussDB200_Standard" -gaussdb_version_file="${ROOT_DIR}/src/gausskernel/process/postmaster/gaussdb_version.cpp" - -ccache -V >/dev/null 2>&1 && USE_CCACHE="ccache " ENABLE_CCACHE="--enable-ccache" -export CC="${USE_CCACHE}$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/gcc" -export CXX="${USE_CCACHE}$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/g++" -export LD_LIBRARY_PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/lib64:$BUILD_TOOLS_PATH/gcc$gcc_version/isl/lib:$BUILD_TOOLS_PATH/gcc$gcc_version/mpc/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/mpfr/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/gmp/lib/:$LD_LIBRARY_PATH -export PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin:$PATH -jdkpath=${binarylib_dir}/platform/huaweijdk8/${PLATFORM_ARCH}/jdk -if [ ! -d ${jdkpath} ]; then - jdkpath=${binarylib_dir}/platform/openjdk8/${PLATFORM_ARCH}/jdk -fi -export JAVA_HOME=${jdkpath} - -declare p7zpath="${BUILD_TOOLS_PATH}/p7z/bin" -################################### -# build parameter about enable-llt -################################## -COMPLIE_TYPE="comm" -echo "[makemppdb] $(date +%y-%m-%d' '%T): Work root dir : ${ROOT_DIR}" -################################### -# get version number from globals.cpp -################################## -function read_mpp_number() -{ - global_kernal="${ROOT_DIR}/src/common/backend/utils/init/globals.cpp" - version_name="GRAND_VERSION_NUM" - version_num="" - line=$(cat $global_kernal | grep ^const* | grep $version_name) - version_num1=${line#*=} - #remove the symbol; - version_num=$(echo $version_num1 | tr -d ";") - #remove the blank - version_num=$(echo $version_num) - - if echo $version_num | grep -qE '^92[0-9]+$' - then - # get the last three number - latter=${version_num:2} - echo "92.${latter}" >>${SCRIPT_DIR}/version.cfg - else - echo "Cannot get the version number from globals.cpp." - exit 1 - fi -} -read_mpp_number - -####################################################################### -# Print log. -####################################################################### -log() -{ - echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@" - echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@" >> "$LOG_FILE" 2>&1 -} - -####################################################################### -# print log and exit. -####################################################################### -die() -{ - log "$@" - echo "$@" - exit $ERR_MKGS_FAILED -} - -####################################################################### -## Check the installation package production environment -####################################################################### -function mpp_pkg_pre_check() -{ - if [ -d "$BUILD_DIR" ]; then - rm -rf $BUILD_DIR - fi - if [ -d "$LOG_FILE" ]; then - rm -rf $LOG_FILE - fi - - if [ X"$package_type" == X"server" -o X"$package_type" == X"all" ] && [ X"$zip_package" = X"on" ] && [ ! -d "${ROOT_DIR}"/script/script/gspylib/ ]; then - printf "\033[31mCan not found OM script directory. solution steps:\n\033[0m" - echo " 1) git clone git@isource-dg.huawei.com:2222/GaussDB_Kernel/GaussDB_Kernel_OM.git -b $(git branch | grep '*' | sed -e 's/*//g' -e 's/^ //g')" - echo " 2) if you do not have the permission to git it, please call CMO " - echo " 3) rm -rf ${ROOT_DIR}/script && ln -s /GaussDB_Kernel_OM ${ROOT_DIR}/script" - echo "and then try again!" - exit 1 - fi -} - -####################################################################### -# Install all SQL files from distribute/include/catalog/upgrade_sql -# to INSTALL_DIR/bin/script/upgrade_sql. -# Package all SQL files and then verify them with SHA256. -####################################################################### -function package_upgrade_sql() -{ - echo "Begin to install upgrade_sql files..." - UPGRADE_SQL_TAR="upgrade_sql.tar.gz" - UPGRADE_SQL_SHA256="upgrade_sql.sha256" - MULTIP_IGNORE_VERSION=(289 294 296) - cp -r "${UPGRADE_SQL_DIR}" . - if [ "$product_mode"x == "single"x ]; then - cp -r "${UPGRADE_PRIV_SQL_DIR}" . - fi - - [ $? -ne 0 ] && die "Failed to cp upgrade_sql files" - if [ "$product_mode"x == "multiple"x ]; then - for version_num in ${MULTIP_IGNORE_VERSION[*]} - do - find ./upgrade_sql -name *${version_num}* | xargs rm -rf - done - fi - tar -czf ${UPGRADE_SQL_TAR} upgrade_sql - [ $? -ne 0 ] && die "Failed to package ${UPGRADE_SQL_TAR}" - rm -rf ./upgrade_sql > /dev/null 2>&1 - - sha256sum ${UPGRADE_SQL_TAR} | awk -F" " '{print $1}' > "${UPGRADE_SQL_SHA256}" - [ $? -ne 0 ] && die "Failed to generate sha256 sum file for ${UPGRADE_SQL_TAR}" - - chmod 600 ${UPGRADE_SQL_TAR} - chmod 600 ${UPGRADE_SQL_SHA256} - - echo "Successfully packaged upgrade_sql files." -} -####################################################################### -# get cluster version from src/include/pg_config.h by 'DEF_GS_VERSION ' -# then replace OM tools version -####################################################################### -function replace_omtools_version() -{ - local gs_version=$(grep DEF_GS_VERSION ${PG_REG_TEST_ROOT}/src/include/pg_config.h | awk -F '"' '{print $2}') - echo $gs_version | grep -e "${mppdb_version}.*build.*compiled.*" > /dev/null 2>&1 - if [ $? -ne 0 ]; then - die "Failed to get gs_version from pg_config.h." - fi - - if [ -f "$1"/script/gspylib/common/VersionInfo.py ] ; then - sed -i -e "s/COMMON_VERSION = \"Gauss200 OM VERSION\"/COMMON_VERSION = \"$(echo ${gs_version})\"/g" -e "s/__GAUSS_PRODUCT_STRING__/$mppdb_version/g" $1/script/gspylib/common/VersionInfo.py - if [ $? -ne 0 ]; then - die "Failed to replace OM tools version number." - fi - else - sed -i "s/COMMON_VERSION = \"Gauss200 OM VERSION\"/COMMON_VERSION = \"$(echo ${gs_version})\"/g" $1/script/gspylib/os/gsOSlib.py - if [ $? -ne 0 ]; then - die "Failed to replace OM tools version number." - fi - fi - - grep 'CATALOG_VERSION_NO' ${PG_REG_TEST_ROOT}/src/include/catalog/catversion.h >/dev/null 2>&1 - if [ $? -ne 0 ]; then - die "Failed to get catalog_version from catversion.h." - fi - - catalog_version=$(grep 'CATALOG_VERSION_NO' ${PG_REG_TEST_ROOT}/src/include/catalog/catversion.h | uniq | awk -F ' ' '{print $NF}') - if [ x"$catalog_version" == x"" ]; then - die "Failed to get catalog_version from catversion.h." - fi - - sed -i "s/TABLESPACE_VERSION_DIRECTORY = .*/TABLESPACE_VERSION_DIRECTORY = \"PG_9.2_$(echo ${catalog_version})\"/g" $1/script/gspylib/common/Common.py - if [ $? -ne 0 ]; then - die "Failed to replacecatalog_version number." - fi - -} -####################################################################### -# get cluster version from src/include/pg_config.h by 'DEF_GS_VERSION ' -# then replace ODBC version -####################################################################### -function replace_odbc_version() -{ - local gs_version=$(grep DEF_GS_VERSION ${PG_REG_TEST_ROOT}/src/include/pg_config.h | awk -F '"' '{print $2}') - echo $gs_version | grep -e "${mppdb_version:x}.*build.*compiled.*" > /dev/null 2>&1 - - if [ $? -ne 0 ]; then - die "Failed to get gs_version from pg_config.h." - fi - - if [ -f "$1"/config.h ] ; then - sed -i "/^\\s*#define\\s*DEF_GS_VERSION.*$/d" $1/config.h - echo "#define DEF_GS_VERSION \"$(echo ${gs_version})\"">>$1/config.h - if [ $? -ne 0 ]; then - die "Failed to replace odbc tools version number." - fi - else - echo "Failed to replace odbc tools: can not find file $1/config.h." - fi -} -####################################################################### -##install gaussdb database and others -##select to install something according to variables package_type need -####################################################################### -function mpp_pkg_bld() -{ - case "$package_type" in - all) - echo "Install all" - install_gaussdb - install_inspection - echo "Install all success" - ;; - server) - install_gaussdb - ;; - gsql) - install_gaussdb - ;; - libpq) - install_gaussdb - ;; - gds) - install_gaussdb - ;; - inspection) - install_inspection - ;; - *) - echo "Internal Error: option processing error: $package_type" - echo "please input right paramenter values all, server, libpq, gds or gsql " - exit 1 - esac -} -####################################################################### -##install inspection tool scripts -####################################################################### -function install_inspection() -{ - echo "packaging inspection..." - rm -rf ${package_path}/inspection && - mkdir -p ${package_path}/inspection && - - cp -f ${script_dir}/script/gs_check ${package_path}/inspection/ && - cp -rf ${script_dir}/script/gspylib/ ${package_path}/inspection/ && - - mkdir -p ${package_path}/inspection/gspylib/inspection/output/log/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/output/nodes/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/asn1crypto/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/bcrypt/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/cryptography/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/cffi/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/enum/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/idna/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/nacl/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/pyasn1/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/kafka-python/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/pycparser/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/OpenSSL/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/psutil/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/netifaces/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/paramiko/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/paste/ && - mkdir -p ${package_path}/inspection/gspylib/inspection/lib/bottle/ && - - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/asn1crypto/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/bcrypt/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/cffi/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/cryptography/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/enum/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/idna/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/nacl/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/pyasn1/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/kafka-python/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/pycparser/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/OpenSSL/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/ipaddress.py ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/six.py ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/_cffi_backend.py ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/_cffi_backend.so_UCS2 ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/_cffi_backend.so_UCS4 ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/psutil/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/netifaces/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/paramiko/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/paste/ ${package_path}/inspection/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/bottle/ ${package_path}/inspection/gspylib/inspection/lib/ - - mv -f ${package_path}/inspection/gspylib/inspection/readme.txt ${package_path}/inspection/ - - if [ $? -ne 0 ]; then - die "cp -rf ${script_dir}/script/inspection/* ${package_path}/inspection/inspection/ failed" - fi - - find ${package_path}/inspection/ -name .svn -type d -print0 | xargs -0 rm -rf - find ${package_path}/inspection/ -name d2utmp* -print0 | xargs -0 rm -rf - chmod -R +x ${package_path}/inspection/ - - cd ${package_path}/inspection - select_package_command - $package_command "${inspection_package_name}" ./* >>"$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "$package_command ${inspection_package_name} failed" - fi - mv ${inspection_package_name} ${package_path} - rm -rf ${package_path}/inspection/ - echo "install $pkgname tools is ${inspection_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1 - echo "success!" -} -####################################################################### -##install gaussdb database contained server,client and libpq -####################################################################### -function install_gaussdb() -{ - # Generate the license control file, and set md5sum string to the code. - echo "Modify gaussdb_version.cpp file." >> "$LOG_FILE" 2>&1 - make_license_control - echo "Modify gaussdb_version.cpp file success." >> "$LOG_FILE" 2>&1 - #putinto to Code dir - cd "$ROOT_DIR" - #echo "$ROOT_DIR/Code" - if [ $? -ne 0 ]; then - die "change dir to $SRC_DIR failed." - fi - - if [ "$version_mode" = "debug" -a "$separate_symbol" = "on" ]; then - echo "WARNING: do not separate symbol in debug mode!" - fi - - if [ "$product_mode" != "single" ] && [ "$product_mode" != "multiple" ] && [ "$product_mode" != "opengauss" ]; then - die "the product mode can only be multiple, single, or opengauss!" - fi - - binarylibs_path=${ROOT_DIR}/binarylibs - if [ "${binarylib_dir}"x != "None"x ]; then - binarylibs_path=${binarylib_dir} - fi - - #configure - make distclean -sj >> "$LOG_FILE" 2>&1 - - echo "Begin configure." >> "$LOG_FILE" 2>&1 - chmod 755 configure - - if [ "$product_mode"x == "opengauss"x ]; then - enable_readline="--with-readline" - else - enable_readline="--without-readline" - fi - shared_opt="--gcc-version=${gcc_version}.0 --prefix="${BUILD_DIR}" --3rd=${binarylibs_path} --enable-thread-safety ${enable_readline} --without-zlib" - if [ "$product_mode"x == "opengauss"x ]; then - if [ "$version_mode"x == "release"x ]; then - # configure -D__USE_NUMA -D__ARM_LSE with arm opengauss mode - if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then - echo "configure -D__USE_NUMA -D__ARM_LSE with arm opengauss mode" - GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA -D__ARM_LSE" - fi - - ./configure $shared_opt CFLAGS="-O2 -g3 ${GAUSSDB_EXTRA_FLAGS}" --enable-mot CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1 - elif [ "$version_mode"x == "memcheck"x ]; then - ./configure $shared_opt CFLAGS="-O0" --enable-mot --enable-debug --enable-cassert --enable-memory-check CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1 - elif [ "$version_mode"x == "fiurelease"x ]; then - ./configure $shared_opt CFLAGS="-O2 -g3 ${GAUSSDB_EXTRA_FLAGS}" --enable-mot --disable-jemalloc CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1 - elif [ "$version_mode"x == "fiudebug"x ]; then - ./configure $shared_opt CFLAGS="-O0 ${GAUSSDB_EXTRA_FLAGS}" --enable-mot --enable-debug --enable-cassert --disable-jemalloc CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1 - elif [ "$version_mode"x == "mini"x ]; then - ./configure $shared_opt CFLAGS="-O2 ${GAUSSDB_EXTRA_FLAGS}" --disable-llvm CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1 - else - ./configure $shared_opt CFLAGS="-O0 ${GAUSSDB_EXTRA_FLAGS}" --enable-mot --enable-debug --enable-cassert CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1 - fi - fi - - if [ $? -ne 0 ]; then - die "configure failed." - fi - echo "End configure" >> "$LOG_FILE" 2>&1 - - echo "Begin make install MPPDB server" >> "$LOG_FILE" 2>&1 - make clean >> "$LOG_FILE" 2>&1 - - export GAUSSHOME=${BUILD_DIR} - export LD_LIBRARY_PATH=${BUILD_DIR}/lib:${BUILD_DIR}/lib/postgresql:${LD_LIBRARY_PATH} - make -s -j${MAKE_JOBS} >> "$LOG_FILE" 2>&1 - make install -s -j${MAKE_JOBS} >> "$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - make install -s -j${MAKE_JOBS} >> "$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - make install -s -j${MAKE_JOBS} >> "$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "make install failed." - fi - fi - fi - - cd "$ROOT_DIR/contrib/pg_upgrade_support" - make clean >> "$LOG_FILE" 2>&1 - make -sj >> "$LOG_FILE" 2>&1 - make install -sj >> "$LOG_FILE" 2>&1 - echo "End make install MPPDB" >> "$LOG_FILE" 2>&1 - - - cd "$ROOT_DIR" - if [ "${make_check}" = 'on' ]; then - echo "Begin make check MPPDB..." >> "$LOG_FILE" 2>&1 - cd ${PG_REG_TEST_ROOT} - make check -sj >> "$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "make check MPPDB failed." - fi - echo "End make check MPPDB success." >> "$LOG_FILE" 2>&1 - fi - - ## check build specification - spec="gaussdbkernel" - if ( cat $SCRIPT_DIR/gauss.spec | grep 'PRODUCT' | grep 'GaussDB Kernel' >/dev/null 2>&1 ); then - spec="gaussdbkernel" - elif ( cat $SCRIPT_DIR/gauss.spec | grep 'PRODUCT' | grep 'openGauss' >/dev/null 2>&1 ); then - spec="opengauss" - fi - - echo "Begin make install mpp_decoding..." >> "$LOG_FILE" 2>&1 - #copy mppdb_decoding form clienttools to bin - if [ "$version_mode"x == "release"x ]; then - cd "$MPPDB_DECODING_DIR" - make >> "$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "make install mppdb_decoding failed." - fi - echo "End make install mppdb_decoding success." >> "$LOG_FILE" 2>&1 - echo "Begin pack mppdb_decoding..." >> "$LOG_FILE" 2>&1 - cp ${MPPDB_DECODING_DIR}/mppdb_decoding.so ${BUILD_DIR}/lib/postgresql/mppdb_decoding.so - elif [ "$version_mode"x == "memcheck"x ]; then - cd "$MPPDB_DECODING_DIR" - make >> "$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "make install mppdb_decoding failed." - fi - echo "End make install mppdb_decoding success." >> "$LOG_FILE" 2>&1 - echo "Begin pack mppdb_decoding..." >> "$LOG_FILE" 2>&1 - cp ${MPPDB_DECODING_DIR}/mppdb_decoding.so ${BUILD_DIR}/lib/postgresql/mppdb_decoding.so - else - cd "$MPPDB_DECODING_DIR" - make >> "$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "make install mppdb_decoding failed." - fi - echo "End make install mppdb_decoding success." >> "$LOG_FILE" 2>&1 - echo "Begin pack mppdb_decoding..." >> "$LOG_FILE" 2>&1 - cp ${MPPDB_DECODING_DIR}/mppdb_decoding.so ${BUILD_DIR}/lib/postgresql/mppdb_decoding.so - fi - if [ $? -ne 0 ]; then - if [ "$version_mode"x == "release"x ]; then - die "cp ${MPPDB_DECODING_DIR}/mppdb_decoding ${MPPDB_DECODING_DIR}/bin/mppdb_decoding failed" - else - die "cp ${MPPDB_DECODING_DIR}/mppdb_decoding ${MPPDB_DECODING_DIR}/bin/mppdb_decoding failed" - fi - fi - - chmod 444 ${BUILD_DIR}/bin/cluster_guc.conf - dos2unix ${BUILD_DIR}/bin/cluster_guc.conf > /dev/null 2>&1 - - #back to separate_debug_symbol.sh dir - cd $SCRIPT_DIR - if [ "$version_mode" = "release" -a "$separate_symbol" = "on" -a "$zip_package" = "on" ]; then - chmod +x ./separate_debug_information.sh - ./separate_debug_information.sh - cd $SCRIPT_DIR - mv symbols.tar.gz $symbol_package_name - fi - - #back to root dir - cd $ROOT_DIR -} - -####################################################################### - -####################################################################### -# make package for gsql -####################################################################### -function make_package_gsql() -{ - # mkdir temp directory - mkdir -p gsql - mkdir -p gsql/bin - mkdir -p gsql/lib - mkdir -p gsql/gs_ktool_file - - # copy gsql and depend *.so - cp ${BUILD_DIR}/bin/gsql gsql/bin - if [ $? -ne 0 ]; then - die "copy gsql failed." - fi - - cd gsql - tar -xvf ${package_path}/${libpq_package_name} - if [ $? -ne 0 ]; then - die "unpack libpq failed." - fi - rm -f *.docx - chmod 700 ./lib/*.so* - cd .. - - cp $SCRIPT_DIR/gsql_env.sh gsql/gsql_env.sh - if [ $? -ne 0 ]; then - die "copy gsql_env.sh failed." - fi - chmod +x gsql/gsql_env.sh - - # make package - cd gsql - echo "packaging gsql..." - tar -zcf "${gsql_package_name}" ./* >>"$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "tar ${gsql_package_name} failed" - fi - mv ${gsql_package_name} ${package_path} - - # clean tmp directory - cd .. && rm -rf gsql - - echo "install $pkgname tools is ${gsql_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1 - echo "success!" -} - -####################################################################### -##select package type according to variable package_type -####################################################################### -function mpp_pkg_make() -{ - case "$package_type" in - server) - echo "file list: $release_file_list" - make_package $release_file_list 'server' - make_package $release_file_list 'libpq' - make_package_gsql - make_package $release_file_list 'gds' - ;; - jdbc) - make_package $release_file_list 'jdbc' - ;; - odbc) - make_package $release_file_list 'odbc' - ;; - libpq) - make_package $release_file_list 'libpq' - ;; - gsql) - make_package $release_file_list 'libpq' - make_package_gsql - ;; - esac -} -declare package_command -####################################################################### -##select package command accroding to install_package_format -####################################################################### -function select_package_command() -{ - - case "$install_package_format" in - tar) - tar='tar' - option=' -zcvf' - package_command="$tar$option" - ;; - rpm) - rpm='rpm' - option=' -i' - package_command="$rpm$option" - ;; - esac -} - -############################################################### -## client tools package -## Roach yes -## sslcert no -## Data Studio no -## Database Manager no -## Migration Toolkit no -## Cluster Configuration Assistant (CCA) no -## CAT no -############################################################### -function target_file_copy_for_non_server() -{ - for file in $(echo $1) - do - tar -cpf - $file | ( cd $2; tar -xpf - ) - done - - if [ "$3"x = "client"x ]; then - # copy Roach tools - mkdir -p $2/gs_roach - mkdir -p $2/gs_roach/script/util - if [ "$version_mode"x == "release"x ]; then - cp -r ${ROACH_DIR}/bin/package/${PLAT_FORM_STR}/release/bin ${ROACH_DIR}/bin/package/${PLAT_FORM_STR}/release/lib ${ROACH_DIR}/bin/package/${PLAT_FORM_STR}/release/share ${ROACH_DIR}/bin/package/${PLAT_FORM_STR}/release/symbols $2/gs_roach/ - cp -f ${script_dir}/other/roach/util/GSroach* $2/gs_roach/script/util - else - cp -r ${ROACH_DIR}/bin/package/${PLAT_FORM_STR}/debug/bin ${ROACH_DIR}/bin/package/${PLAT_FORM_STR}/debug/lib ${ROACH_DIR}/bin/package/${PLAT_FORM_STR}/debug/share $2/gs_roach/ - cp -f ${script_dir}/other/roach/util/GSroach* $2/gs_roach/script/util - fi - if [ $? -ne 0 ]; then - if [ "$version_mode"x == "release"x ]; then - die "cp -r ${ROACH_DIR}/bin/package/${PLAT_FORM_STR}/release/* $2/gs_roach/ failed" - else - die "cp -r ${ROACH_DIR}/bin/package/${PLAT_FORM_STR}/debug/* $2/gs_roach/ failed" - fi - fi - fi -} - -declare bin_name="${package_pre_name}.bin" -declare sha256_name='' -declare script_dir="${ROOT_DIR}/script" -declare root_script='' -declare bin_script='' -####################################################################### -##copy target file into temporary directory temp -####################################################################### -function target_file_copy() -{ - ################################################### - # make bin package - ################################################### - for file in $(echo $1) - do - tar -cpf - $file | ( cd $2; tar -xpf - ) - done - - cd $SCRIPT_DIR - sed 's/^\./\.\/bin/' script_file >binfile - root_script=$(cat script_file) - sed -i '/gs_backup/d' binfile - sed -i '/gs_check/d' binfile - sed -i '/gs_checkos/d' binfile - sed -i '/gs_checkperf/d' binfile - sed -i '/gs_collector/d' binfile - sed -i '/gs_expand/d' binfile - sed -i '/gs_install/d' binfile - sed -i '/gs_om/d' binfile - sed -i '/gs_postuninstall/d' binfile - sed -i '/gs_preinstall/d' binfile - sed -i '/gs_replace/d' binfile - sed -i '/gs_shrink/d' binfile - sed -i '/gs_ssh/d' binfile - sed -i '/gs_sshexkey/d' binfile - sed -i '/gs_uninstall/d' binfile - sed -i '/gs_upgradectl/d' binfile - sed -i '/gs_lcctl/d' binfile - sed -i '/gs_wsr/d' binfile - sed -i '/gs_gucZenith/d' binfile - - - bin_script=$(cat binfile) - rm binfile script_file - cd $BUILD_DIR - for file in $(echo $bin_script) - do - tar -cpf - $file | ( cd $2; tar -xpf - ) - done - - # create script/gspylib/clib, put file encrypt, libcrypto.so.1.1,libssl.so.1.1 - rm -rf $BUILD_DIR/script/gspylib/clib - mkdir -p $BUILD_DIR/script/gspylib/clib - cp $BUILD_DIR/lib/libstdc++.so.6 $BUILD_DIR/script/gspylib/clib - cp $BUILD_DIR/lib/libssl.so.1.1 $BUILD_DIR/script/gspylib/clib - cp $BUILD_DIR/lib/libcrypto.so.1.1 $BUILD_DIR/script/gspylib/clib - cp $BUILD_DIR/bin/encrypt $BUILD_DIR/script/gspylib/clib - - # copy script dir to temp path - cp -rf $BUILD_DIR/script/gspylib/ $2/bin/script/ - cp -rf $BUILD_DIR/script/impl/ $2/bin/script/ - cp -rf $BUILD_DIR/script/local/ $2/bin/script/ - - # clean the files which under bin/script/ is not be used - for x in $(ls $2/bin/script/) - do - filename="$2/bin/script/$x" - if [[ "$filename" = *"__init__.py" ]];then - continue - elif [ -d "$filename" ];then - continue - elif [ -f "$filename" ];then - rm -f "$filename" - fi - done - - chmod -R +x $2/bin/script/ - - if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then - # do nothing in current version - echo "" - else - sed -i '/^process_cpu_affinity|/d' $2/bin/cluster_guc.conf - fi - - #generate bin file - echo "Begin generate ${bin_name} bin file..." >> "$LOG_FILE" 2>&1 - ${p7zpath}/7z a -t7z -sfx "${bin_name}" "$2/*" >> "$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - echo "Please check and makesure '7z' exist. " - die "generate ${bin_name} failed." - fi - echo "End generate ${bin_name} bin file" >> "$LOG_FILE" 2>&1 - - #generate sha256 file - sha256_name="${package_pre_name}.sha256" - echo "Begin generate ${sha256_name} sha256 file..." >> "$LOG_FILE" 2>&1 - sha256sum "${bin_name}" | awk -F" " '{print $1}' > "$sha256_name" - if [ $? -ne 0 ]; then - die "generate sha256 file failed." - fi - echo "End generate ${sha256_name} sha256 file" >> "$LOG_FILE" 2>&1 - - - ################################################### - # make server package - ################################################### - if [ -d "${2}" ]; then - rm -rf ${2} - fi - mkdir -p ${2} - mv ${bin_name} ${sha256_name} $2 - for file in $(echo $root_script) - do - tar -cpf - $file | ( cd $2; tar -xpf - ) - done - - # copy script dir to temp path - cp -rf $BUILD_DIR/script/gspylib/ $2/script/ - cp -rf $BUILD_DIR/script/impl/ $2/script/ - cp -rf $BUILD_DIR/script/local/ $2/script/ - - # copy agent tool to temp path - res=$(cp -rf ${script_dir}/agent/ $2/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/agent to $2 failed. $res" - fi - res=$(cp -f ${script_dir}/agent/common/cmd_sender.py $2/script/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/agent/common/cmd_sender.py to $2/script/ failed. $res" - fi - res=$(cp -f ${script_dir}/agent/common/uploader.py $2/script/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/agent/common/uploader.py to $2/script/ failed. $res" - fi - res=$(cp -f ${script_dir}/agent/common/py_pstree.py $2/script/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/agent/common/py_pstree.py to $2/script/ failed. $res" - fi - # copy the default xml to temp path - res=$(cp -f ${script_dir}/build/cluster_default_agent.xml $2/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/build/cluster_default_agent.xml to $2 failed. $res" - fi - # copy CBG shell tools to temp path - res=$(cp -rf ${script_dir}/build/bin/ $2/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/build/bin to $2 failed. $res" - fi - # copy the CBG config template to temp path - res=$(cp -rf ${script_dir}/build/configtemplate/ $2/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/build/configtemplate/ to $2 failed. $res" - fi - res=$(cp -f ${script_dir}/agent/om_agent.conf $2/configtemplate/ 2>&1) - if [ $? -ne 0 ]; then - die "copy ${script_dir}/agent/om_agent.conf to $2/configtemplate/ failed. $res" - fi - - find $2/bin/ -type f -print0 | xargs -0 -n 10 -r dos2unix > /dev/null 2>&1 && - chmod -R +x $2/bin/ && - chmod -R +x $2/script/ -} - -####################################################################### -# read script file list from mpp_release_list -####################################################################### -function read_script_file() -{ - cd $SCRIPT_DIR - local head=$(cat $releasefile | grep "\[script\]" -n | awk -F: '{print $1}') - if [ ! -n "$head" ]; then - die "error: ono find $pkgname in the $releasefile file " - fi - - local tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') - if [ ! -n "$tail" ]; then - local all=$(cat $releasefile | wc -l) - let tail=$all+1-$head - fi - - touch script_file - cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1" >script_file -} - -####################################################################### -##function make_package have three actions -##1.parse release_file_list variable represent file -##2.copy target file into a newly created temporary directory temp -##3.package all file in the temp directory and renome to destination package_path -####################################################################### -function make_package() -{ - cd $SCRIPT_DIR - releasefile=$1 - pkgname=$2 - - local head=$(cat $releasefile | grep "\[$pkgname\]" -n | awk -F: '{print $1}') - if [ ! -n "$head" ]; then - die "error: ono find $pkgname in the $releasefile file " - fi - - local tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') - if [ ! -n "$tail" ]; then - local all=$(cat $releasefile | wc -l) - let tail=$all+1-$head - fi - - dest=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1") - if [ "$pkgname"x = "libpq"x -a "$version_mode" = "debug" ]; then - # copy include file - head=$(cat $releasefile | grep "\[header\]" -n | awk -F: '{print $1}') - if [ ! -n "$head" ]; then - die "error: ono find header in the $releasefile file " - fi - - tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') - if [ ! -n "$tail" ]; then - all=$(cat $releasefile | wc -l) - let tail=$all+1-$head - fi - - dest1=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1") - # copy cm depend library file - head=$(cat $releasefile | grep "\[cmlibrary\]" -n | awk -F: '{print $1}') - if [ ! -n "$head" ]; then - die "error: ono find cmlibrary in the $releasefile file " - fi - - tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') - if [ ! -n "$tail" ]; then - all=$(cat $releasefile | wc -l) - let tail=$all+1-$head - fi - - dest2=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1") - dest=$(echo "$dest";echo "$dest1";echo "$dest2") - - elif [ "$pkgname"x = "libpq"x -a "$version_mode" = "release" ]; then - # copy include file - head=$(cat $releasefile | grep "\[header\]" -n | awk -F: '{print $1}') - if [ ! -n "$head" ]; then - die "error: ono find header in the $releasefile file " - fi - - tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') - if [ ! -n "$tail" ]; then - all=$(cat $releasefile | wc -l) - let tail=$all+1-$head - fi - - dest1=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1") - dest=$(echo "$dest";echo "$dest1") - fi - - if [ "$pkgname"x = "server"x ]; then - read_script_file - fi - - mkdir -p ${BUILD_DIR} - cd ${BUILD_DIR} - rm -rf temp - mkdir temp - if [ "$pkgname"x = "server"x ]; then - copy_script_file "$script_dir/" ${BUILD_DIR} - fi - - case "$pkgname" in - server) - mkdir -p ${BUILD_DIR}/temp/etc - target_file_copy "$dest" ${BUILD_DIR}/temp - ;; - *) - target_file_copy_for_non_server "$dest" ${BUILD_DIR}/temp $pkgname - ;; - esac - - cd ${BUILD_DIR}/temp - select_package_command - - case "$pkgname" in - client) - echo "packaging client..." - $package_command "${client_package_name}" ./* >>"$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "$package_command ${client_package_name} failed" - fi - - mv ${client_package_name} ${package_path} - echo "install $pkgname tools is ${client_package_name} of the current directory " >> "$LOG_FILE" 2>&1 - echo "success!" - ;; - server) - echo "packaging server..." - cp ${SCRIPT_DIR}/version.cfg ${BUILD_DIR}/temp - if [ $? -ne 0 ]; then - die "copy ${SCRIPT_DIR}/version.cfg to ${BUILD_DIR}/temp failed" - fi - - #copy inspection lib - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/output/log/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/output/nodes/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/asn1crypto/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/bcrypt/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/cffi/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/cryptography/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/enum/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/idna/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/nacl/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/pyasn1/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/kafka-python/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/pycparser/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/OpenSSL/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/psutil/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/netifaces/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/paramiko/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/bottle/ && - mkdir -p ${BUILD_DIR}/temp/script/gspylib/inspection/lib/paste/ && - - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/asn1crypto/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/bcrypt/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/cffi/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/cryptography/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/enum/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/idna/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/nacl/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/pyasn1/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/kafka-python/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/pycparser/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/OpenSSL/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/ipaddress.py ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/six.py ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/_cffi_backend.py ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/_cffi_backend.so_UCS2 ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/_cffi_backend.so_UCS4 ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/psutil/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/netifaces/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/paramiko/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/bottle/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - cp -rf ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG}/paste/ ${BUILD_DIR}/temp/script/gspylib/inspection/lib/ - - if [ $? -ne 0 ]; then - die "remove svn info failed." - fi - - cp -r ${BINARYLIBS_PATH}/install_tools${ARCHITECTURE_EXTRA_FLAG} ./install_tools - find ./install_tools -name .svn -type d -print0 | xargs -0 rm -rf - if [ $? -ne 0 ]; then - die "remove svn info failed." - fi - - mkdir -p ./lib - - mv ./install_tools/asn1crypto ./lib - mv ./install_tools/bcrypt ./lib - mv ./install_tools/cffi ./lib - mv ./install_tools/cryptography ./lib - mv ./install_tools/enum ./lib - mv ./install_tools/idna ./lib - mv ./install_tools/nacl ./lib - mv ./install_tools/pyasn1 ./lib - mv ./install_tools/kafka-python ./lib - mv ./install_tools/pycparser ./lib - mv ./install_tools/OpenSSL ./lib - mv ./install_tools/ipaddress.py ./lib - mv ./install_tools/six.py ./lib - mv ./install_tools/_cffi_backend.py ./lib - mv ./install_tools/_cffi_backend.so_UCS2 ./lib - mv ./install_tools/_cffi_backend.so_UCS4 ./lib - mv ./install_tools/paramiko ./lib - mv ./install_tools/psutil ./lib - mv ./install_tools/netifaces ./lib - mv ./install_tools/unixodbc . - mv ./install_tools/paste ./lib - mv ./install_tools/bottle ./lib - - #Not package unixodbc/bin/odbc_config, so delete it - rm -f ./unixodbc/bin/odbc_config - - rm -r ./install_tools - - if [ "$product_mode"x == "multiple"x ] - then - mkdir -p ./libcgroup/bin - if [ $? -ne 0 ]; then - die "mkdir -p ./libcgroup/bin failed" - fi - - cp ${BUILD_DIR}/bin/gs_cgroup ./libcgroup/bin - if [ $? -ne 0 ]; then - die "cp ${BUILD_DIR}/bin/gs_cgroup ./libcgroup/bin failed" - fi - mkdir -p ./libcgroup/lib - if [ $? -ne 0 ]; then - die "mkdir -p ./libcgroup/lib failed" - fi - cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/libcgroup/${COMPLIE_TYPE}/lib/libcgroup.so ./libcgroup/lib - if [ $? -ne 0 ]; then - die "cp ${BINARYLIBS_PATH}/${PLAT_FORM_STR}/libcgroup/${COMPLIE_TYPE}/lib/libcgroup.so ./libcgroup/lib failed" - fi - fi - - - #compress the agent package - echo "Agent package is starting." - cp ./lib/_cffi_backend.so_UCS4 ./lib/_cffi_backend.so - cp -r ./script/gspylib/pssh/bin ./agent/ - cp -r ./script/gspylib/clib ./agent/ - if [ "$product_mode"x == "single"x ] - then - if [ ! -e ./agent/centralized_cluster ] - then - touch ./agent/centralized_cluster - echo "This file is used only to distinguish cluster types (generated by the packaging script)." >> ./agent/centralized_cluster - else - echo "This file is used only to distinguish cluster types (generated by the packaging script)." > ./agent/centralized_cluster - fi - fi - $package_command "${agent_package_name}" ./agent ./lib ./cluster_default_agent.xml ./version.cfg >>"$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "$package_command ${agent_package_name} failed" - fi - mv ${agent_package_name} ${package_path} - echo "Agent package has been finished." - - #remove the agent path which only needed by agent before compress server package - echo "Server package is starting." - rm -rf ./agent - - # install upgrade_sql.* files. - package_upgrade_sql - - $package_command "${server_package_name}" ./* >>"$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "$package_command ${server_package_name} failed" - fi - mv ${server_package_name} ${package_path} - echo "install $pkgname tools is ${server_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1 - echo "success!" - ;; - libpq) - echo "packaging libpq..." - $package_command "${libpq_package_name}" ./* >>"$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "$package_command ${libpq_package_name} failed" - fi - mv ${libpq_package_name} ${package_path} - echo "install $pkgname tools is ${libpq_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1 - echo "success!" - ;; - esac -} - -####################################################################### -##copy all file of script directory to target directory -####################################################################### -function copy_script_file() -{ - target_file=$1 - local target_dir=$2 - - cp -rf $target_file/script/ $target_dir/ && - find $target_dir/script/ -type f -print0 | xargs -0 -n 10 -r dos2unix > /dev/null 2>&1 && - find $target_dir/script/gspylib/inspection/ -name d2utmp* -print0 | xargs -0 rm -rf && - cp -rf $target_file/script/gspylib/inspection/lib/checknetspeed/speed_test* $target_dir/script/gspylib/inspection/lib/checknetspeed/ && - cp -rf $target_file/script/gspylib/inspection/lib/*.png $target_dir/script/gspylib/inspection/lib/ && - - if [ $? -ne 0 ]; then - die "cp -r $target_file $target_dir failed " - fi -} -####################################################################### -## generate the version file. -####################################################################### -function make_license_control() -{ - local target_dir=$1 - python_exec=$(which python 2>/dev/null) - - if [ -x "$python_exec" ]; then - $python_exec ${binarylib_dir}/buildtools/license_control/encrypted_version_file.py >> "$LOG_FILE" 2>&1 - fi - - if [ $? -ne 0 ]; then - die "create ${binarylib_dir}/buildtools/license_control license file failed." - fi - - if [ -f "$gaussdb_200_file" ] && [ -f "$gaussdb_300_file" ]; then - # Get the md5sum. - gaussdb_200_sha256sum=$(sha256sum $gaussdb_200_file | awk '{print $1}') - gaussdb_300_sha256sum=$(sha256sum $gaussdb_300_file | awk '{print $1}') - # Modify the source code. - sed -i "s/^[ \t]*const[ \t]\+char[ \t]*\*[ \t]*sha256_digests[ \t]*\[[ \t]*SHA256_DIGESTS_COUNT[ \t]*\][ \t]*=[ \t]*{[ \t]*NULL[ \t]*,[ \t]*NULL[ \t]*}[ \t]*;[ \t]*$/const char \*sha256_digests\[SHA256_DIGESTS_COUNT\] = {\"$gaussdb_200_sha256sum\", \"$gaussdb_300_sha256sum\"};/g" $gaussdb_version_file - fi - - if [ $? -ne 0 ]; then - die "modify '$gaussdb_version_file' failed." - fi -} -####################################################################### -## copy the version file to target directory. -####################################################################### -function copy_license_file() -{ - local target_dir=$1 - - # Copy the version file to bin path. - if [ -f "$gaussdb_200_file" ] && [ -f "$gaussdb_200_file" ] && [ -f "$gaussdb_200_standard_file" ]; then - cp -f $gaussdb_200_file $target_dir && - cp -f $gaussdb_300_file $target_dir && - cp -f $gaussdb_200_standard_file $target_dir - fi - - if [ $? -ne 0 ]; then - die "cp -r ${binarylib_dir}/buildtools/license_control $target_dir failed." - fi -} -####################################################################### -## restore the gaussdb_version.cpp content. -####################################################################### -function restore_license_control() -{ - # Generate license control file. - make_license_control - - # Restore the gaussdb_version.cpp content. - if [ -f "$gaussdb_200_file" ] && [ -f "$gaussdb_300_file" ] && [ -f "$gaussdb_200_standard_file" ]; then - sed -i "s/^[ \t]*const[ \t]\+char[ \t]*\*[ \t]*sha256_digests[ \t]*\[[ \t]*SHA256_DIGESTS_COUNT[ \t]*\][ \t]*=[ \t]*{[ \t]*[a-zA-Z0-9\"]\+[ \t]*,[ \t]*[a-zA-Z0-9\"]\+[ \t]*}[ \t]*;[ \t]*$/const char \*sha256_digests\[SHA256_DIGESTS_COUNT\] = {NULL, NULL};/g" $gaussdb_version_file && - - # Remove the gaussdb.version file. - rm -f $gaussdb_200_file && - rm -f $gaussdb_300_file && - rm -f $gaussdb_200_standard_file - fi - - if [ $? -ne 0 ]; then - die "restore '$gaussdb_version_file' failed, remove ${binarylib_dir}/buildtools/license_control file failed." - fi -} - -############################################################# -# show package for hotpatch sdv. -############################################################# -if [ "$show_package" = true ]; then - echo "package: "$server_package_name - echo "bin: "$bin_name - exit 0 -fi - -############################################################# -# main function -############################################################# -# 1. clean install path and log file -mpp_pkg_pre_check - -# 2. chose action -mpp_pkg_bld -if [ "$zip_package" = "off" ]; then - echo "The option 'nopkg' is on, no package will be zipped." - exit 0 -fi - -# 3. make package -mpp_pkg_make - -#clean mpp_install directory -echo "clean enviroment" -echo "[makemppdb] $(date +%y-%m-%d' '%T): remove ${BUILD_DIR}" >>"$LOG_FILE" 2>&1 - -mkdir ${ROOT_DIR}/output -mv ${ROOT_DIR}/build/script/*.tar.gz ${ROOT_DIR}/output/ -echo "now, all packages has finished!" - -exit 0 diff --git a/build/script/package_opengauss.sh b/build/script/package_opengauss.sh index f6447b28b..0c2d293d0 100755 --- a/build/script/package_opengauss.sh +++ b/build/script/package_opengauss.sh @@ -1,5 +1,5 @@ #!/bin/bash -############################################################################# +####################################################################### # Copyright (c) 2020 Huawei Technologies Co.,Ltd. # # openGauss is licensed under Mulan PSL v2. @@ -14,65 +14,47 @@ # MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. # See the Mulan PSL v2 for more details. # ---------------------------------------------------------------------------- -# Description : gs_backup is a utility to back up or restore binary files and parameter files. -############################################################################# +# descript: Compile and pack GaussDB +# Return 0 means OK. +# Return 1 means failed. +# version: 2.0 +# date: 2021-02-28 +####################################################################### +declare SCRIPT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd) +declare ROOT_DIR=$(dirname "${SCRIPT_DIR}") +declare ROOT_DIR=$(dirname "${ROOT_DIR}") + + +declare package_type='server' +declare product_mode='opengauss' declare version_mode='release' declare binarylib_dir='None' -declare config_file='' +declare om_dir='None' +declare cm_dir='None' +declare show_package='false' +declare install_package_format='tar' -#detect platform information. -PLATFORM=32 -bit=$(getconf LONG_BIT) -if [ "$bit" -eq 64 ]; then - PLATFORM=64 -fi -#get OS distributed version. -kernel="" -version="" -if [ -f "/etc/openEuler-release" ] -then - kernel=$(cat /etc/openEuler-release | awk -F ' ' '{print $1}' | tr A-Z a-z) - version=$(cat /etc/openEuler-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) -elif [ -f "/etc/centos-release" ] -then - kernel=$(cat /etc/centos-release | awk -F ' ' '{print $1}' | tr A-Z a-z) - version=$(cat /etc/centos-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) -elif [ -f "/etc/euleros-release" ] -then - kernel=$(cat /etc/centos-release | awk -F ' ' '{print $1}' | tr A-Z a-z) - version=$(cat /etc/centos-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) -else - kernel=$(lsb_release -d | awk -F ' ' '{print $2}'| tr A-Z a-z) - version=$(lsb_release -r | awk -F ' ' '{print $2}') -fi -## to solve kernel="name=openeuler" -if echo $kernel | grep -q 'openeuler' -then - kernel="openeuler" -fi +function print_help() +{ + echo "Usage: $0 [OPTION] + -h|--help show help information. + -3rd|--binarylib_dir the directory of third party binarylibs. + -pkg|--package provode type of installation packages, values parameter is server. + -m|--version_mode this values of paramenter is debug, release, memcheck, the default value is release. + -pm|--product_mode this values of paramenter is opengauss or lite, the default value is opengauss. +" +} -if [ X"$kernel" == X"centos" ]; then - dist_version="CentOS" -elif [ X"$kernel" == X"openeuler" ]; then - dist_version="openEuler" -elif [ X"$kernel" == X"euleros" ]; then - dist_version="EulerOS" -elif [ X"$kernel" == X"kylin" ]; then - dist_version="Kylin" -elif [ X"$kernel" = X"ubuntu" ]; then - dist_version="Ubuntu" -else - echo "We only support openEuler(aarch64), EulerOS(aarch64), CentOS, Kylin(aarch64) and Ubuntu(x86) platform." - echo "Kernel is $kernel" + +if [ $# = 0 ] ; then + echo "missing option" + print_help exit 1 fi -declare release_file_list="opengauss_release_list_${kernel}_single" -declare dest_list="" - ######################################################################### ##read command line paramenters ####################################################################### @@ -82,9 +64,29 @@ while [ $# -gt 0 ]; do print_help exit 1 ;; - -v|--version) - print_version - exit 1 + -3rd|--binarylib_dir) + if [ "$2"X = X ]; then + echo "no given binarylib directory values" + exit 1 + fi + binarylib_dir=$2 + shift 2 + ;; + -pkg) + if [ "$2"X = X ]; then + echo "no given package type name" + exit 1 + fi + package_type=$2 + shift 2 + ;; + -pm) + if [ "$2"X = X ]; then + echo "no given product mode" + exit 1 + fi + product_mode=$2 + shift 2 ;; -m|--version_mode) if [ "$2"X = X ]; then @@ -94,22 +96,9 @@ while [ $# -gt 0 ]; do version_mode=$2 shift 2 ;; - -3rd|--binarylibs_dir) - if [ "$2"X = X ]; then - echo "no given binarylib directory values" - exit 1 - fi - binarylib_dir=$2 - shift 2 - ;; - -f|--config_file) - if [ "$2"X = X ]; then - echo "no given config file" - shift 1 - else - config_file=$2 - shift 2 - fi + -S|--show_pkg) + show_package=true + shift ;; *) echo "Internal Error: option processing error: $1" 1>&2 @@ -119,436 +108,37 @@ while [ $# -gt 0 ]; do esac done -##add platform architecture information -PLATFORM_ARCH=$(uname -p) -if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then - if [ "$dist_version" != "openEuler" ] && [ "$dist_version" != "EulerOS" ] && [ "$dist_version" != "Kylin" ] ; then - echo "We only support NUMA on openEuler(aarch64), EulerOS(aarch64), Kylin(aarch64) platform." - exit 1 - fi - - release_file_list="opengauss_release_list_${kernel}_aarch64_single" -fi - -if [ "$version_mode" = "mini" ]; then - release_file_list="opengauss_release_list_mini" -fi - -##default install version storage path -declare server_version='openGauss' -declare server_name_for_package="$(echo ${server_version} | sed 's/ /-/g')" # replace blank with '-' for package name. -declare version_number='' - -####################################################################### -##putout the version of server -####################################################################### -function print_version() -{ - echo "$version_number" -} - -####################################################################### -## print help information -####################################################################### -function print_help() -{ - echo "Usage: $0 [OPTION] - -h|--help show help information - -V|--version show version information - -m|--version_mode this values of paramenter is debug, release or memcheck, the default value is release - -3rd|--binarylibs_dir the parent directory of binarylibs -" -} - -####################################################################### -##version 2.0.0 -####################################################################### -function read_srv_version() -{ - cd $SCRIPT_DIR - version_number=$(grep 'VERSION' opengauss.spec | awk -F "=" '{print $2}') - echo "${server_name_for_package}-${version_number}">version.cfg -} - -################################### -# get version number from globals.cpp -################################## -function read_srv_number() -{ - global_kernal="${ROOT_DIR}/src/common/backend/utils/init/globals.cpp" - version_name="GRAND_VERSION_NUM" - version_num="" - line=$(cat $global_kernal | grep ^const* | grep $version_name) - version_num1=${line#*=} - #remove the symbol; - version_num=$(echo $version_num1 | tr -d ";") - #remove the blank - version_num=$(echo $version_num) - - if echo $version_num | grep -qE '^92[0-9]+$' - then - # get the last three number - latter=${version_num:2} - echo "92.${latter}" >>${SCRIPT_DIR}/version.cfg - else - echo "Cannot get the version number from globals.cpp." - exit 1 - fi -} - -SCRIPT_DIR=$(cd $(dirname $0) && pwd) - -test -d ${SCRIPT_DIR}/../../output || mkdir -p ${SCRIPT_DIR}/../../output && rm -fr ${SCRIPT_DIR}/../../output/* -output_path=$(cd ${SCRIPT_DIR}/../../output && pwd) - -read_srv_version - -####################################################################### -## declare all package name -####################################################################### -declare version_string="${server_name_for_package}-${version_number}" -declare package_pre_name="${version_string}-${dist_version}-${PLATFORM}bit" -declare libpq_package_name="${package_pre_name}-Libpq.tar.gz" -declare tools_package_name="${package_pre_name}-tools.tar.gz" -declare kernel_package_name="${package_pre_name}.tar.bz2" -declare kernel_symbol_package_name="${package_pre_name}-symbol.tar.gz" -declare sha256_name="${package_pre_name}.sha256" - -echo "[make single db] $(date +%y-%m-%d' '%T): script dir : ${SCRIPT_DIR}" -ROOT_DIR=$(dirname "$SCRIPT_DIR") -ROOT_DIR=$(dirname "$ROOT_DIR") -PLAT_FORM_STR=$(sh "${ROOT_DIR}/src/get_PlatForm_str.sh") -if [ "${PLAT_FORM_STR}"x == "Failed"x ] -then - echo "We only support openEuler(aarch64), EulerOS(aarch64), CentOS, Kylin(aarch64) platform." - exit 1; -fi - -PG_REG_TEST_ROOT="${ROOT_DIR}/" -PMK_SCHEMA="${ROOT_DIR}/script/pmk_schema.sql" -declare LOG_FILE="${SCRIPT_DIR}/make_package.log" -declare BUILD_DIR="${ROOT_DIR}/mppdb_temp_install" -BUILD_TOOLS_PATH="${ROOT_DIR}/binarylibs/buildtools/${PLAT_FORM_STR}" -BINARYLIBS_PATH="${ROOT_DIR}/binarylibs/dependency/${PLAT_FORM_STR}" -declare UPGRADE_SQL_DIR="${ROOT_DIR}/src/include/catalog/upgrade_sql" -if [ "${binarylib_dir}"x != "None"x ] -then - echo "binarylib dir : ${binarylib_dir}" - BUILD_TOOLS_PATH="${binarylib_dir}/buildtools/${PLAT_FORM_STR}" - BINARYLIBS_PATH="${binarylib_dir}/dependency/${PLAT_FORM_STR}" -fi - -gcc_version="7.3" - -export CC=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/gcc -export CXX=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/g++ -export LD_LIBRARY_PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/lib64:$BUILD_TOOLS_PATH/gcc$gcc_version/isl/lib:$BUILD_TOOLS_PATH/gcc$gcc_version/mpc/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/mpfr/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/gmp/lib/:$LD_LIBRARY_PATH -export PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin:$PATH - -read_srv_number - -####################################################################### -# move pkgs to output directory -####################################################################### -function deploy_pkgs() -{ - for pkg in $@; do - if [ -f $pkg ]; then - mv $pkg $output_path/ - fi - done -} - -####################################################################### -# Print log. -####################################################################### -log() -{ - echo "[make single db] $(date +%y-%m-%d' '%T): $@" - echo "[make single db] $(date +%y-%m-%d' '%T): $@" >> "$LOG_FILE" 2>&1 -} - -####################################################################### -# print log and exit. -####################################################################### -die() -{ - log "$@" - echo "$@" +if [ -e "$SCRIPT_DIR/utils/common.sh" ];then + source $SCRIPT_DIR/utils/common.sh +else exit 1 -} - -####################################################################### -##install gaussdb database contained server -####################################################################### -function install_gaussdb() -{ - cd $SCRIPT_DIR - if [ "$version_mode" = "release" ] || [ "$version_mode" = "mini" ]; then - chmod +x ./separate_debug_information.sh - ./separate_debug_information.sh - cd $SCRIPT_DIR - mv symbols.tar.gz $kernel_symbol_package_name - deploy_pkgs $kernel_symbol_package_name - fi - - #insert the commitid to version.cfg as the upgrade app path specification - export PATH=${BUILD_DIR}:$PATH - export LD_LIBRARY_PATH=${BUILD_DIR}/lib:$LD_LIBRARY_PATH - - commitid=$(LD_PRELOAD='' ${BUILD_DIR}/bin/gaussdb -V | awk '{print $5}' | cut -d ")" -f 1) - if [ -z "$commitid" ] - then - commitid=$(date "+%Y%m%d%H%M%S") - commitid=${commitid:4:8} - fi - echo "${commitid}" >>${SCRIPT_DIR}/version.cfg - echo "End insert commitid into version.cfg" >> "$LOG_FILE" 2>&1 -} - -####################################################################### -# copy directory's files list to $2 -####################################################################### -function copy_files_list() -{ - for file in $(echo $1) - do - test -e $file && tar -cpf - $file | ( cd $2; tar -xpf - ) - done -} - -####################################################################### -# set postgresql.conf.sample from config_file when packing -####################################################################### -function set_config_sample() -{ - if [[ -f $config_file ]] - then - config_sample_file=${BUILD_DIR}/share/postgresql/postgresql.conf.sample - if [[ ! -f "$config_sample_file" ]] - then - echo "postgresql.conf.sample does not exist" - exit 1 - else - echo "#------------------------------------------------------------------------------" >> $config_sample_file - echo "# USER SET CONFIG ON COMPILING TIME" >> $config_sample_file - echo "#------------------------------------------------------------------------------" >> $config_sample_file - while IFS= read -r line; do - SUBSTRING=$(echo $line | cut -d'=' -f 1)"= " - if grep -q "$SUBSTRING" $config_sample_file ; then - sed -i "/$SUBSTRING/c$line" $config_sample_file - else - echo $line >> $config_sample_file - fi - done < $config_file - fi - fi -} - -####################################################################### -##copy target file into temporary directory temp -####################################################################### -function target_file_copy() -{ - cd ${BUILD_DIR} - set_config_sample - copy_files_list "$1" $2 - - cp ${SCRIPT_DIR}/version.cfg ${BUILD_DIR}/temp - cp -rf ${SCRIPT_DIR}/../../simpleInstall ${BUILD_DIR}/temp - if [ $? -ne 0 ]; then - die "copy ${SCRIPT_DIR}/version.cfg to ${BUILD_DIR}/temp failed" - fi - - sed -i '/^process_cpu_affinity|/d' $2/bin/cluster_guc.conf - - #generate tar file - echo "Begin generate ${kernel_package_name} tar file..." >> "$LOG_FILE" 2>&1 - cd $2 - tar -jcvpf "${kernel_package_name}" ./* >> "$LOG_FILE" 2>&1 - cd '-' - mv $2/"${kernel_package_name}" ./ - if [ $? -ne 0 ]; then - die "generate ${kernel_package_name} failed." - fi - echo "End generate ${kernel_package_name} tar file" >> "$LOG_FILE" 2>&1 - - #generate sha256 file - sha256_name="${package_pre_name}.sha256" - echo "Begin generate ${sha256_name} sha256 file..." >> "$LOG_FILE" 2>&1 - sha256sum "${kernel_package_name}" | awk -F" " '{print $1}' > "$sha256_name" - if [ $? -ne 0 ]; then - die "generate sha256 file failed." - fi - echo "End generate ${sha256_name} sha256 file" >> "$LOG_FILE" 2>&1 - - ################################################### - # make server package - ################################################### - if [ -d "${2}" ]; then - rm -rf ${2} - fi -} - -function target_file_copy_for_non_server() -{ - cd ${BUILD_DIR} - copy_files_list "$1" $2 -} - -####################################################################### -##function make_package_prep have two actions -##1.parse release_file_list variable represent file -##2.copy target file into a newly created temporary directory temp -####################################################################### -function prep_dest_list() -{ - cd $SCRIPT_DIR - releasefile=$1 - pkgname=$2 - - local head=$(cat $releasefile | grep "\[$pkgname\]" -n | awk -F: '{print $1}') - if [ ! -n "$head" ]; then - die "error: ono find $pkgname in the $releasefile file " - fi - - local tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') - if [ ! -n "$tail" ]; then - local all=$(cat $releasefile | wc -l) - let tail=$all+1-$head - fi - - dest_list=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1") -} - -function make_package_srv() -{ - echo "Begin package server" - cd $SCRIPT_DIR - prep_dest_list $release_file_list 'server' - - rm -rf ${BUILD_DIR}/temp - mkdir -p ${BUILD_DIR}/temp/etc - target_file_copy "$dest_list" ${BUILD_DIR}/temp - - deploy_pkgs ${sha256_name} ${kernel_package_name} - echo "make server(all) package success!" -} - -####################################################################### -# Install all SQL files from src/distribute/include/catalog/upgrade_sql -# to INSTALL_DIR/bin/script/upgrade_sql. -# Package all SQL files and then verify them with SHA256. -####################################################################### -function make_package_upgrade_sql() -{ - echo "Begin to install upgrade_sql files..." - UPGRADE_SQL_TAR="upgrade_sql.tar.gz" - UPGRADE_SQL_SHA256="upgrade_sql.sha256" - - cd $SCRIPT_DIR - mkdir -p ${BUILD_DIR} - cd ${BUILD_DIR} - rm -rf temp - mkdir temp - cd ${BUILD_DIR}/temp - cp -r "${UPGRADE_SQL_DIR}" ./upgrade_sql - [ $? -ne 0 ] && die "Failed to cp upgrade_sql files" - tar -czf ${UPGRADE_SQL_TAR} upgrade_sql - [ $? -ne 0 ] && die "Failed to package ${UPGRADE_SQL_TAR}" - rm -rf ./upgrade_sql > /dev/null 2>&1 - - sha256sum ${UPGRADE_SQL_TAR} | awk -F" " '{print $1}' > "${UPGRADE_SQL_SHA256}" - [ $? -ne 0 ] && die "Failed to generate sha256 sum file for ${UPGRADE_SQL_TAR}" - - chmod 600 ${UPGRADE_SQL_TAR} - chmod 600 ${UPGRADE_SQL_SHA256} - - deploy_pkgs ${UPGRADE_SQL_TAR} ${UPGRADE_SQL_SHA256} - - echo "Successfully packaged upgrade_sql files." -} - -function make_package_libpq() -{ - cd $SCRIPT_DIR - prep_dest_list $release_file_list 'libpq' - - rm -rf ${BUILD_DIR}/temp - mkdir -p ${BUILD_DIR}/temp - - target_file_copy_for_non_server "$dest_list" ${BUILD_DIR}/temp - - cd ${BUILD_DIR}/temp - echo "packaging libpq..." - tar -zvcf "${libpq_package_name}" ./* >>"$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "$package_command ${libpq_package_name} failed" - fi - - deploy_pkgs ${libpq_package_name} - echo "install $pkgname tools is ${libpq_package_name} of ${output_path} directory " >> "$LOG_FILE" 2>&1 - echo "success!" -} - -function make_package_tools() -{ - cd $SCRIPT_DIR - prep_dest_list $release_file_list 'client' - - rm -rf ${BUILD_DIR}/temp - mkdir -p ${BUILD_DIR}/temp - - cd ${BUILD_DIR}/ - - target_file_copy_for_non_server "$dest_list" ${BUILD_DIR}/temp - - cd ${BUILD_DIR}/temp - echo "packaging tools..." - tar -zvcf "${tools_package_name}" ./* >>"$LOG_FILE" 2>&1 - if [ $? -ne 0 ]; then - die "$package_command ${tools_package_name} failed" - fi - - deploy_pkgs ${tools_package_name} - echo "install $pkgname tools is ${tools_package_name} of ${output_path} directory " >> "$LOG_FILE" 2>&1 - echo "success!" -} - -function spec_prep() -{ - cp opengauss.spec gauss.spec -} - -####################################################################### -## Check the installation package production environment -####################################################################### -function srv_pkg_bld() -{ - install_gaussdb -} - -function srv_pkg_make() -{ - echo "Start package opengauss." - make_package_srv - make_package_libpq - make_package_tools - make_package_upgrade_sql - echo "End package opengauss." -} +fi ############################################################# -# main function +# show package for hotpatch sdv. ############################################################# -# 0. prepare spec file -spec_prep +if [ "$show_package" = true ]; then + echo "package: "$server_package_name + echo "bin: "$bin_name + exit 0 +fi -# 1. build server -srv_pkg_bld +declare BUILD_DIR="${ROOT_DIR}/mppdb_temp_install" +declare PKG_TMP_DIR="${BUILD_DIR}/temp" -# 2. make package -srv_pkg_make +if [ -e "$SCRIPT_DIR/utils/internal_packages.sh" ];then + source $SCRIPT_DIR/utils/internal_packages.sh +else + exit 1 +fi + +function main() +{ + echo "[makegaussdb] $(date +%y-%m-%d' '%T): script dir : ${SCRIPT_DIR}" + echo "[makegaussdb] $(date +%y-%m-%d' '%T): Work root dir : ${ROOT_DIR}" + gaussdb_pkg +} +main echo "now, all packages has finished!" exit 0 diff --git a/build/script/reconstruct.sh b/build/script/reconstruct.sh deleted file mode 100644 index df21e8806..000000000 --- a/build/script/reconstruct.sh +++ /dev/null @@ -1,265 +0,0 @@ -#!/bin/bash -####################################################################### -# Copyright (c): 2020-2025, Huawei Tech. Co., Ltd. -# descript: recompress package -# version: 2.0 -# date: 2021-05-19 -####################################################################### - -declare server_package_path="" -declare agent_package_path="" -declare product_mode="multiple" -declare unpack_server="unpack_server" -declare unpack_agent="unpack_agent" -declare unpack_psycopg2="unpack_psycopg2" -declare compress_command="tar -zcf" -declare decompress_command="tar -zxf" - -function print_help() -{ - echo "Usage: $0 [OPTION] - -h|--help show help information. - -pm product mode, values parameter is single, multiple or opengauss, default value is multiple. - --server-pacakge the server pacakge path. - --agent-package the agent package path, only -pm is single or multiple need. -" -} - -function log() { - echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@" -} - -function error() { - echo -e "\033[31m[makegaussdb] $(date +%y-%m-%d' '%T) Error: $@\033[0m" -} - -while [ $# -gt 0 ]; do - case "$1" in - -h|--help) - print_help - exit 1 - ;; - -pm) - if [ X$2 == X"" ]; then - error "no given pm product mode." - exit 1 - fi - product_mode=$2 - shift 2 - ;; - --server-package) - if [ X$2 == X"" ]; then - error "no given server compress path" - exit 1 - fi - server_package_path=$2 - shift 2 - ;; - --agent-package) - if [ X$2 == X"" ]; then - error "no given agent compress path" - exit 1 - fi - agent_package_path=$2 - shift 2 - ;; - *) - echo "Internal Error: option processing error: $1" 1>&2 - echo "please input right paramtenter, the following command may help you" - echo "sh reconstruct.sh --help or sh reconstruct.sh -h" - exit 1 - esac -done - -function standard_path() { - local package_path=$1 - local first_char=$(expr substr "${package_path}" 1 1) - if [ "${first_char}" != "/" ]; then - package_path="$(pwd)/${package_path}" - fi - echo "${package_path}" -} - -function check_path() { - local package_type=$1 - local package_path=$2 - if [ X${package_path} = X ]; then - error "the paramtenter --${package_type} can not be empty." - exit 1 - fi - if [ ! -f "${package_path}" ]; then - error "the file ${package_path} not exist, please check." - exit 1 - fi -} - -function check_parameter() { - check_path "server-package" ${server_package_path} - server_package_path=$(standard_path ${server_package_path}) - if [ X${product_mode} != X"opengauss" ]; then - check_path "agent-pacakge" ${agent_package_path} - agent_package_path=$(standard_path ${agent_package_path}) - fi -} - -function backup_compress() { - local compress_name=$1 - local bak_package_name="${compress_name%%.*}_old.${compress_name#*.}" - if [ -d "${bak_package_name}" ]; then - rm -rf ${bak_package_name} - fi - cp ${compress_name} ${bak_package_name} -} - -function delete_backup_package() { - local compress_name=$1 - local bak_package_name="${compress_name%%.*}_old.${compress_name#*.}" - if [ -d "${bak_package_name}" ]; then - rm -rf ${bak_package_name} - fi -} - -function final_compress() { - local compress_file=$1 - if [ X"${compress_file##*.}" == X"zip" ]; then - zip -q -r ${compress_file} ./* - else - ${compress_command} ${compress_file} ./* - fi -} - -function begin_decompress() { - local decompress_file=$1 - local decompress_dir=$2 - if [ X"${decompress_file##*.}" == X"zip" ]; then - unzip -q ${decompress_file} -d ${decompress_dir} - else - ${decompress_command} ${decompress_file} -C ${decompress_dir} - fi -} - -function distribute_compress() { - server_dir=$(dirname "${server_package_path}") - server_name=$(basename "${server_package_path}") - - agent_dir=$(dirname "${agent_package_path}") - agent_name=$(basename "${agent_package_path}") - - log "server_name: ${server_name}, agent_name: ${agent_name}" - # decompress server package and copy psycopg2 to lib - cd ${server_dir} - backup_compress ${server_name} - if [ -e "${unpack_server}" ]; then - rm -rf ${unpack_server} - fi - mkdir ${unpack_server} - begin_decompress ${server_name} ${unpack_server} - cd ${unpack_server} && mkdir ${unpack_server} ${unpack_psycopg2} - - euler_name=$(basename "$(find . -name "GaussDB-Kernel-V500R00*-64bit.tar.gz")") - psycopg2_name=$(basename "$(find . -name "GaussDB-Kernel-V500R00*-64bit-Python.tar.gz")") - log "euler_name: ${euler_name}, psycopg2_name: ${psycopg2_name}" - - ${decompress_command} ${euler_name} -C ${unpack_server} - ${decompress_command} ${psycopg2_name} -C ${unpack_psycopg2} - chmod -R 700 ${unpack_psycopg2}/psycopg2 - cp -r ${unpack_psycopg2}/psycopg2 ${unpack_server}/lib - cp -r ${unpack_psycopg2}/psycopg2 ${unpack_server}/script/gspylib/inspection/lib - log "complete copy psycopg2 to server package." - - # decompress agent package and copy psycopg2 to lib, then compress - cd ${agent_dir} - backup_compress ${agent_name} - - if [ -e "${unpack_agent}" ]; then - rm -rf ${unpack_agent} - fi - mkdir ${unpack_agent} - begin_decompress ${agent_name} ${unpack_agent} - cd ${unpack_agent} && mkdir ${unpack_agent} - agent_tar_name=$(basename "$(find . -name "GaussDB-Kernel-V500R00*-64bit-AGENT.tar.gz")") - ${decompress_command} ${agent_tar_name} -C ${unpack_agent} - - cd ${unpack_agent} - cp -r ${server_dir}/${unpack_server}/${unpack_psycopg2}/psycopg2 lib/ - ${compress_command} ${agent_tar_name} ./* - rm -rf ../${agent_tar_name} && mv ${agent_tar_name} ../ && cd ../ && rm -rf ${unpack_agent} - final_compress ${agent_name} - rm -rf ../${agent_name} && mv ${agent_name} ../ && cd ../ && rm -rf ${unpack_agent} - - cd ${agent_dir} - delete_backup_package ${agent_name} - log "complete copy psycopg2 to agent package and compress agent package." - - # compress server package - log "begin to compress server package ......" - cd ${server_dir}/${unpack_server}/${unpack_server} - ${compress_command} ${euler_name} ./* - rm -rf ../${euler_name} && mv ${euler_name} ../ && cd ../ && rm -rf ${unpack_server} - if [ -d "${unpack_psycopg2}" ]; then - rm -rf ${unpack_psycopg2} - fi - final_compress ${server_name} - rm -rf ../${server_name} && mv ${server_name} ../ && cd ../ && rm -rf ${unpack_server} - - cd ${server_dir} - delete_backup_package ${server_name} - log "complete compress server package." -} - -function opengauss_compress() { - server_dir=$(dirname "${server_package_path}") - server_name=$(basename "${server_package_path}") - - cd ${server_dir} - backup_compress ${server_name} - if [ -e "${unpack_server}" ]; then - rm -rf ${unpack_server} - fi - mkdir ${unpack_server} - ${decompress_command} ${server_name} -C ${unpack_server} - cd ${unpack_server} && mkdir ${unpack_agent} ${unpack_psycopg2} - - psycopg2_name=$(basename "$(find . -name "openGauss-*-Python.tar.gz")") - agent_name=$(basename "$(find . -name "openGauss-*-om.tar.gz")") - log "agent_name: ${agent_name}, psycopg2_name: ${psycopg2_name}" - - ${decompress_command} ${agent_name} -C ${unpack_agent} - ${decompress_command} ${psycopg2_name} -C ${unpack_psycopg2} - chmod -R 700 ${unpack_psycopg2}/psycopg2 - cp -r ${unpack_psycopg2}/psycopg2 ${unpack_agent}/lib - cp -r ${unpack_psycopg2}/psycopg2 ${unpack_agent}/script/gspylib/inspection/lib - log "complete copy psycopg2 to agent package." - - # compress agent package - cd ${unpack_agent} - ${compress_command} ${agent_name} ./* - rm -rf ../${agent_name} && mv ${agent_name} ../ && cd ../ && rm -rf ${unpack_agent} - log "complete compress agent package." - - # recover om sha256 - sha256_name="$(echo ${agent_name} | sed 's/\.tar\.gz//').sha256" - if [ -d "${sha256_name}" ]; then - rm -rf ${sha256_name} - fi - sha256sum "${agent_name}" | awk -F" " '{print $1}' > "${sha256_name}" - if [ $? -ne 0 ]; then - die "generate sha256 file failed." - fi - - if [ -d "${unpack_psycopg2}" ]; then - rm -rf ${unpack_psycopg2} - fi - ${compress_command} ${server_name} ./* - rm -rf ../${server_name} && mv ${server_name} ../ && cd ../ && rm -rf ${unpack_server} - - delete_backup_package ${server_name} - log "complete compress server package." -} - -check_parameter -if [ X${product_mode} == X"opengauss" ]; then - opengauss_compress -else - distribute_compress -fi diff --git a/build/script/separate_debug_information.sh b/build/script/separate_debug_information.sh index 609c8a2e2..72e4278eb 100644 --- a/build/script/separate_debug_information.sh +++ b/build/script/separate_debug_information.sh @@ -76,11 +76,14 @@ separate_symbol() echo "$x is a script, do not separate symbol" elif [[ "$x" = *".dat" ]];then echo "$x is a license file, do not separate symbol" - elif [[ "$x" = *".sh" ]];then + # The following second condition judges whether the file is a shell script without a suffix name. + # Usually, executable shell script has a header comment that indicates which interpreter to use, + # e.g., "#!/usr/bin/env bash". + elif [[ "$x" = *".sh" ]] || [[ -f "$x" && -x "$x" && "$(head -c2 $x)" == '#!' ]]; then echo "$x is a shell file, do not separate symbol" elif [[ "$x" = *".la" ]];then echo "$x is a la file, do not separate symbol" - elif [[ "$x" = *".crt" ]];then + elif [[ "$x" = *".crt" ]];then echo "$x is a crt file, do not separate symbol" elif [[ "$x" = *".ini" ]];then echo "$x is a ini file, do not separate symbol" diff --git a/build/script/utils/cmake_compile.sh b/build/script/utils/cmake_compile.sh new file mode 100644 index 000000000..6828d6c82 --- /dev/null +++ b/build/script/utils/cmake_compile.sh @@ -0,0 +1,187 @@ +#!/bin/bash +####################################################################### +# Copyright (c): 2020-2021, Huawei Tech. Co., Ltd. +# descript: Compile and pack openGauss +# Return 0 means OK. +# Return 1 means failed. +# version: 2.0 +# date: 2020-08-08 +####################################################################### +####################################################################### +## Check the installation package production environment +####################################################################### +function gaussdb_pkg_pre_clean() +{ + if [ -d "$BUILD_DIR" ]; then + rm -rf $BUILD_DIR + fi + if [ -d "$LOG_FILE" ]; then + rm -rf $LOG_FILE + fi +} +################################### + +####################################################################### +##read version from gaussdb.ver +####################################################################### +function read_gaussdb_version() +{ + cd ${SCRIPT_DIR} + echo "${gaussdb_name_for_package}-${version_number}" > version.cfg + #auto read the number from kernal globals.cpp, no need to change it here +} + +################################### +# get version number from globals.cpp +################################## +function read_gaussdb_number() +{ + global_kernal="${ROOT_DIR}/src/common/backend/utils/init/globals.cpp" + version_name="GRAND_VERSION_NUM" + version_num="" + line=$(cat $global_kernal | grep ^const* | grep $version_name) + version_num1=${line#*=} + #remove the symbol; + version_num=$(echo $version_num1 | tr -d ";") + #remove the blank + version_num=$(echo $version_num) + + if echo $version_num | grep -qE '^92[0-9]+$' + then + # get the last three number + latter=${version_num:2} + echo "92.${latter}" >>${SCRIPT_DIR}/version.cfg + else + echo "Cannot get the version number from globals.cpp." + exit 1 + fi +} + +####################################################################### +##insert the commitid to version.cfg as the upgrade app path specification +####################################################################### +function get_kernel_commitid() +{ + export PATH=${BUILD_DIR}:$PATH + export LD_LIBRARY_PATH=$GAUSSHOME/lib:$LD_LIBRARY_PATH + commitid=$(LD_PRELOAD='' ${BUILD_DIR}/bin/gaussdb -V | awk '{print $5}' | cut -d ")" -f 1) + echo "${commitid}" >>${SCRIPT_DIR}/version.cfg + echo "End insert commitid into version.cfg" >> "$LOG_FILE" 2>&1 +} + +####################################################################### +## generate the version file. +####################################################################### +function make_license_control() +{ + python_exec=$(which python 2>/dev/null) + + if [ -x "$python_exec" ]; then + $python_exec ${binarylib_dir}/buildtools/license_control/encrypted_version_file.py >> "$LOG_FILE" 2>&1 + fi + + if [ $? -ne 0 ]; then + die "create ${binarylib_dir}/buildtools/license_control license file failed." + fi + + if [ -f "$gaussdb_200_file" ] && [ -f "$gaussdb_300_file" ]; then + # Get the md5sum. + gaussdb_200_sha256sum=$(sha256sum $gaussdb_200_file | awk '{print $1}') + gaussdb_300_sha256sum=$(sha256sum $gaussdb_300_file | awk '{print $1}') + # Modify the source code. + sed -i "s/^[ \t]*const[ \t]\+char[ \t]*\*[ \t]*sha256_digests[ \t]*\[[ \t]*SHA256_DIGESTS_COUNT[ \t]*\][ \t]*=[ \t]*{[ \t]*NULL[ \t]*,[ \t]*NULL[ \t]*}[ \t]*;[ \t]*$/const char \*sha256_digests\[SHA256_DIGESTS_COUNT\] = {\"$gaussdb_200_sha256sum\", \"$gaussdb_300_sha256sum\"};/g" $gaussdb_version_file + fi + + if [ $? -ne 0 ]; then + die "modify '$gaussdb_version_file' failed." + fi +} +function make_gaussdb_kernel() +{ + export BUILD_TUPLE=${PLATFORM_ARCH} + export THIRD_BIN_PATH="${binarylib_dir}" + export PREFIX_HOME="${BUILD_DIR}" + export DEBUG_TYPE=${version_mode} + + echo "Begin make install gaussdb server" >> "$LOG_FILE" 2>&1 + + export GAUSSHOME=${BUILD_DIR} + export LD_LIBRARY_PATH=${BUILD_DIR}/lib:${BUILD_DIR}/lib/postgresql:${LD_LIBRARY_PATH} + + [ -d "${CMAKE_BUILD_DIR}" ] && rm -rf ${CMAKE_BUILD_DIR} + [ -d "${BUILD_DIR}" ] && rm -rf ${BUILD_DIR} + mkdir -p ${CMAKE_BUILD_DIR} + cd ${CMAKE_BUILD_DIR} + cmake .. ${CMAKE_OPT} + if [ $? -ne 0 ]; then + die "cmake failed." + fi + cpus_num=$(grep -w processor /proc/cpuinfo|wc -l) + make -sj ${cpus_num} + if [ $? -ne 0 ]; then + die "make failed." + fi + make install -sj ${cpus_num} + if [ $? -ne 0 ]; then + die "make install failed." + fi + + echo "End make install gaussdb server" >> "$LOG_FILE" 2>&1 +} + + +####################################################################### +##install gaussdb database contained server,client and libpq +####################################################################### +function install_gaussdb() +{ + # Generate the license control file, and set md5sum string to the code. + echo "Modify gaussdb_version.cpp file." >> "$LOG_FILE" 2>&1 + make_license_control + echo "Modify gaussdb_version.cpp file success." >> "$LOG_FILE" 2>&1 + cd "$ROOT_DIR/" + if [ $? -ne 0 ]; then + die "change dir to $ROOT_DIR failed." + fi + + if [ "$version_mode" = "debug" -a "$separate_symbol" = "on" ]; then + echo "WARNING: do not separate symbol in debug mode!" + fi + + if [ "$product_mode" != "opengauss" ]; then + die "the product mode can only be opengauss!" + fi + + echo "build gaussdb kernel." >> "$LOG_FILE" 2>&1 + make_gaussdb_kernel + echo "build gaussdb kernel success." >> "$LOG_FILE" 2>&1 + + chmod 444 ${BUILD_DIR}/bin/cluster_guc.conf + dos2unix ${BUILD_DIR}/bin/cluster_guc.conf > /dev/null 2>&1 + + #insert the commitid to version.cfg as the upgrade app path specification + get_kernel_commitid +} + + + + +####################################################################### +##install gaussdb database and others +##select to install something according to variables package_type need +####################################################################### +function gaussdb_build() +{ + case "$package_type" in + server) + install_gaussdb + ;; + libpq) + install_gaussdb + ;; + *) + echo "Internal Error: option processing error: $package_type" + echo "please input right paramenter values server or libpq " + exit 1 + esac +} diff --git a/build/script/utils/common.sh b/build/script/utils/common.sh new file mode 100644 index 000000000..b86890210 --- /dev/null +++ b/build/script/utils/common.sh @@ -0,0 +1,164 @@ +#!/bin/bash +####################################################################### +# Copyright (c): 2020-2025, Huawei Tech. Co., Ltd. +# descript: Compile and pack openGauss +# Return 0 means OK. +# Return 1 means failed. +# version: 2.0 +# date: 2020-08-08 +####################################################################### + +declare LOG_FILE="${SCRIPT_DIR}/makemppdb_pkg.log" +declare gaussdb_version='openGauss' +declare PLATFORM_ARCH=$(uname -p) +declare package_path=${ROOT_DIR}/output +declare install_package_format="tar" +declare PLATFORM=32 +bit=$(getconf LONG_BIT) +if [ "$bit" -eq 64 ]; then + declare PLATFORM=64 +fi + +# 公共方法 +####################################################################### +##putout the version of gaussdb +####################################################################### +function print_version() +{ + echo "$version_number" +} + +####################################################################### +# Print log. +####################################################################### +function log() +{ + echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@" + echo "[makegaussdb] $(date +%y-%m-%d' '%T): $@" >> "$LOG_FILE" 2>&1 +} + +####################################################################### +# print log and exit. +####################################################################### +function die() +{ + log "$@" + echo "$@" + exit $ERR_MKGS_FAILED +} + + + +####################################################################### +##select package command accroding to install_package_format +####################################################################### +function select_package_command() +{ + + case "$install_package_format" in + tar) + tar='tar' + option=' -zcvf' + package_command="$tar$option" + ;; + esac +} +select_package_command + + +####################################################################### +##get os dist version +####################################################################### +export PLAT_FORM_STR=$(sh "${ROOT_DIR}/src/get_PlatForm_str.sh") +if [ "${PLAT_FORM_STR}"x == "Failed"x -o "${PLAT_FORM_STR}"x == ""x ] +then + echo "We only support openEuler(aarch64), EulerOS(aarch64), CentOS, Kylin(aarch64) platform." + exit 1; +fi + +if [[ "$PLAT_FORM_STR" =~ "euleros" ]]; then + dist_version="EulerOS" + if [ "$PLATFORM_ARCH"X == "aarch64"X ];then + GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA" + fi +elif [[ "$PLAT_FORM_STR" =~ "centos" ]]; then + dist_version="CentOS" + if [ "$PLATFORM_ARCH"X == "aarch64"X ];then + GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA" + fi +elif [[ "$PLAT_FORM_STR" =~ "openeuler" ]]; then + dist_version="openEuler" + if [ "$PLATFORM_ARCH"X == "aarch64"X ];then + GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA -D__ARM_LSE" + fi +elif [[ "$PLAT_FORM_STR" =~ "kylin" ]]; then + dist_version="Kylin" + if [ "$PLATFORM_ARCH"X == "aarch64"X ];then + GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA" + fi +else + echo "We only support openEuler(aarch64), EulerOS(aarch64), CentOS, Kylin(aarch64) platform." + echo "Kernel is $kernel" + exit 1 +fi + +##add platform architecture information +if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then + if [ "$dist_version" != "openEuler" ] && [ "$dist_version" != "EulerOS" ] && [ "$dist_version" != "Kylin" ] ; then + echo "We only support NUMA on openEuler(aarch64), EulerOS(aarch64), Kylin(aarch64) platform." + exit 1 + fi +fi + +if [ "${binarylib_dir}" != 'None' ] && [ -d "${binarylib_dir}" ]; then + BUILD_TOOLS_PATH="${binarylib_dir}/buildtools/${PLAT_FORM_STR}" + PLATFORM_PATH="${binarylib_dir}/platform/${PLAT_FORM_STR}" + BINARYLIBS_PATH="${binarylib_dir}/dependency" +else + die "${binarylib_dir} not exist" +fi + +declare INSTALL_TOOLS_DIR=${BINARYLIBS_PATH}/install_tools_${PLAT_FORM_STR} +declare UNIX_ODBC="${BINARYLIBS_PATH}/${PLAT_FORM_STR}/unixodbc" + +# Comment 编译相关 +gcc_version="7.3" +ccache -V >/dev/null 2>&1 && USE_CCACHE="ccache " ENABLE_CCACHE="--enable-ccache" +export CC="${USE_CCACHE}$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/gcc" +export CXX="${USE_CCACHE}$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin/g++" +export LD_LIBRARY_PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/lib64:$BUILD_TOOLS_PATH/gcc$gcc_version/isl/lib:$BUILD_TOOLS_PATH/gcc$gcc_version/mpc/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/mpfr/lib/:$BUILD_TOOLS_PATH/gcc$gcc_version/gmp/lib/:$LD_LIBRARY_PATH +export PATH=$BUILD_TOOLS_PATH/gcc$gcc_version/gcc/bin:$PATH +export JAVA_HOME=${binarylib_dir}/platform/huaweijdk8/${PLATFORM_ARCH}/jdk + +declare ERR_MKGS_FAILED=1 +declare MKGS_OK=0 + + +gaussdb_200_file="${binarylib_dir}/buildtools/license_control/gaussdb.version.GaussDB200" +gaussdb_300_file="${binarylib_dir}/buildtools/license_control/gaussdb.version.GaussDB300" +gaussdb_200_standard_file="${binarylib_dir}/buildtools/license_control/gaussdb.license.GaussDB200_Standard" +gaussdb_version_file="${ROOT_DIR}/src/gausskernel/process/postmaster/gaussdb_version.cpp" + + +if [ -f "$SCRIPT_DIR/gaussdb.ver" ];then + declare version_number=$(cat ${SCRIPT_DIR}/gaussdb.ver | grep 'VERSION' | awk -F "=" '{print $2}') +else + echo "gaussdb.ver not found!" + exit 1 +fi + +declare release_file_list="${PLATFORM_ARCH}_${product_mode}_list" + +####################################################################### +## declare all package name +####################################################################### +declare gaussdb_name_for_package="$(echo ${gaussdb_version} | sed 's/ /-/g')" +declare version_string="${gaussdb_name_for_package}-${version_number}" +declare package_pre_name="${version_string}-${dist_version}-${PLATFORM}bit" +declare libpq_package_name="${package_pre_name}-Libpq.tar.gz" +declare tools_package_name="${package_pre_name}-tools.tar.gz" +declare kernel_package_name="${package_pre_name}.tar.bz2" +declare symbol_package_name="${package_pre_name}-symbol.tar.gz" +declare sha256_name="${package_pre_name}.sha256" + + diff --git a/build/script/utils/internal_packages.sh b/build/script/utils/internal_packages.sh new file mode 100644 index 000000000..d8e21c46f --- /dev/null +++ b/build/script/utils/internal_packages.sh @@ -0,0 +1,239 @@ +#!/bin/bash +############################################################################# +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms +# and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# ---------------------------------------------------------------------------- +# Description : gs_backup is a utility to back up or restore binary files and parameter files. +############################################################################# + +declare UPGRADE_SQL_DIR="${ROOT_DIR}/src/include/catalog/upgrade_sql" + +####################################################################### +# move pkgs to output directory +####################################################################### +function deploy_pkgs() +{ + mkdir -p $package_path + for pkg in $@; do + if [ -f "$pkg" ]; then + mv $pkg $package_path/ + fi + done +} + +####################################################################### +# copy directory's files list to $2 +####################################################################### +function copy_files_list() +{ + for file in $(echo $1) + do + test -e $file && tar -cpf - $file | ( cd $2; tar -xpf - ) + done +} + +####################################################################### +##copy target file into temporary directory temp +####################################################################### +function target_file_copy() +{ + cd ${BUILD_DIR} + copy_files_list "$1" $2 + + cp ${SCRIPT_DIR}/version.cfg ${BUILD_DIR}/temp + if [ $? -ne 0 ]; then + die "copy ${SCRIPT_DIR}/version.cfg to ${BUILD_DIR}/temp failed" + fi + + sed -i '/^process_cpu_affinity|/d' $2/bin/cluster_guc.conf + + #generate tar file + echo "Begin generate ${kernel_package_name} tar file..." >> "$LOG_FILE" 2>&1 + cd $2 + tar -jcvpf "${kernel_package_name}" ./* >> "$LOG_FILE" 2>&1 + cd '-' + mv $2/"${kernel_package_name}" ./ + if [ $? -ne 0 ]; then + die "generate ${kernel_package_name} failed." + fi + echo "End generate ${kernel_package_name} tar file" >> "$LOG_FILE" 2>&1 + + #generate sha256 file + sha256_name="${package_pre_name}.sha256" + echo "Begin generate ${sha256_name} sha256 file..." >> "$LOG_FILE" 2>&1 + sha256sum "${kernel_package_name}" | awk -F" " '{print $1}' > "$sha256_name" + if [ $? -ne 0 ]; then + die "generate sha256 file failed." + fi + echo "End generate ${sha256_name} sha256 file" >> "$LOG_FILE" 2>&1 + + ################################################### + # make server package + ################################################### + if [ -d "${2}" ]; then + rm -rf ${2} + fi +} + +function target_file_copy_for_non_server() +{ + cd ${BUILD_DIR} + copy_files_list "$1" $2 +} + +####################################################################### +##function make_package_prep have two actions +##1.parse release_file_list variable represent file +##2.copy target file into a newly created temporary directory temp +####################################################################### +function prep_dest_list() +{ + cd $SCRIPT_DIR + releasefile=$1 + pkgname=$2 + + local head=$(cat $releasefile | grep "\[$pkgname\]" -n | awk -F: '{print $1}') + if [ ! -n "$head" ]; then + die "error: ono find $pkgname in the $releasefile file " + fi + + local tail=$(cat $releasefile | sed "1,$head d" | grep "^\[" -n | sed -n "1p" | awk -F: '{print $1}') + if [ ! -n "$tail" ]; then + local all=$(cat $releasefile | wc -l) + let tail=$all+1-$head + fi + + dest_list=$(cat $releasefile | awk "NR==$head+1,NR==$tail+$head-1") +} + +####################################################################### +##back to separate_debug_symbol.sh dir +####################################################################### +function separate_symbol() +{ + cd $SCRIPT_DIR + if [ "$version_mode" = "release" ]; then + chmod +x ./separate_debug_information.sh + ./separate_debug_information.sh + cd $SCRIPT_DIR + mv symbols.tar.gz $symbol_package_name + deploy_pkgs $symbol_package_name + fi +} + +function make_package_srv() +{ + echo "Begin package server" + cd $SCRIPT_DIR + prep_dest_list $release_file_list 'server' + + rm -rf ${BUILD_DIR}/temp + mkdir -p ${BUILD_DIR}/temp/etc + target_file_copy "$dest_list" ${BUILD_DIR}/temp + + deploy_pkgs ${sha256_name} ${kernel_package_name} + echo "make server(all) package success!" +} + +####################################################################### +# Install all SQL files from src/distribute/include/catalog/upgrade_sql +# to INSTALL_DIR/bin/script/upgrade_sql. +# Package all SQL files and then verify them with SHA256. +####################################################################### +function make_package_upgrade_sql() +{ + echo "Begin to install upgrade_sql files..." + UPGRADE_SQL_TAR="upgrade_sql.tar.gz" + UPGRADE_SQL_SHA256="upgrade_sql.sha256" + + cd $SCRIPT_DIR + mkdir -p ${BUILD_DIR} + cd ${BUILD_DIR} + rm -rf temp + mkdir temp + cd ${BUILD_DIR}/temp + cp -r "${UPGRADE_SQL_DIR}" ./upgrade_sql + [ $? -ne 0 ] && die "Failed to cp upgrade_sql files" + tar -czf ${UPGRADE_SQL_TAR} upgrade_sql + [ $? -ne 0 ] && die "Failed to package ${UPGRADE_SQL_TAR}" + rm -rf ./upgrade_sql > /dev/null 2>&1 + + sha256sum ${UPGRADE_SQL_TAR} | awk -F" " '{print $1}' > "${UPGRADE_SQL_SHA256}" + [ $? -ne 0 ] && die "Failed to generate sha256 sum file for ${UPGRADE_SQL_TAR}" + + chmod 600 ${UPGRADE_SQL_TAR} + chmod 600 ${UPGRADE_SQL_SHA256} + + deploy_pkgs ${UPGRADE_SQL_TAR} ${UPGRADE_SQL_SHA256} + + echo "Successfully packaged upgrade_sql files." +} + +function make_package_libpq() +{ + cd $SCRIPT_DIR + prep_dest_list $release_file_list 'libpq' + + rm -rf ${BUILD_DIR}/temp + mkdir -p ${BUILD_DIR}/temp + + target_file_copy_for_non_server "$dest_list" ${BUILD_DIR}/temp + + cd ${BUILD_DIR}/temp + echo "packaging libpq..." + tar -zvcf "${libpq_package_name}" ./* >>"$LOG_FILE" 2>&1 + if [ $? -ne 0 ]; then + die "$package_command ${libpq_package_name} failed" + fi + + deploy_pkgs ${libpq_package_name} + echo "install $pkgname tools is ${libpq_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1 + echo "success!" +} + +function make_package_tools() +{ + cd $SCRIPT_DIR + prep_dest_list $release_file_list 'client' + + rm -rf ${BUILD_DIR}/temp + mkdir -p ${BUILD_DIR}/temp + + cd ${BUILD_DIR}/ + + target_file_copy_for_non_server "$dest_list" ${BUILD_DIR}/temp + + cd ${BUILD_DIR}/temp + echo "packaging tools..." + tar -zvcf "${tools_package_name}" ./* >>"$LOG_FILE" 2>&1 + if [ $? -ne 0 ]; then + die "$package_command ${tools_package_name} failed" + fi + + deploy_pkgs ${tools_package_name} + echo "install $pkgname tools is ${tools_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1 + echo "success!" +} + + +function gaussdb_pkg() +{ + echo "Start package opengauss." + separate_symbol + make_package_srv + make_package_libpq + make_package_tools + make_package_upgrade_sql + echo "End package opengauss." +} diff --git a/build/script/utils/make_compile.sh b/build/script/utils/make_compile.sh new file mode 100644 index 000000000..493a651a7 --- /dev/null +++ b/build/script/utils/make_compile.sh @@ -0,0 +1,286 @@ +#!/bin/bash +####################################################################### +# Copyright (c): 2020-2025, Huawei Tech. Co., Ltd. +# descript: Compile and pack openGauss +# Return 0 means OK. +# Return 1 means failed. +# version: 2.0 +# date: 2020-08-08 +####################################################################### +####################################################################### +## Check the installation package production environment +####################################################################### +function gaussdb_pkg_pre_clean() +{ + if [ -d "$BUILD_DIR" ]; then + rm -rf $BUILD_DIR + fi + if [ -d "$LOG_FILE" ]; then + rm -rf $LOG_FILE + fi +} +################################### + +####################################################################### +##read version from gaussdb.ver +####################################################################### +function read_gaussdb_version() +{ + cd ${SCRIPT_DIR} + echo "${gaussdb_name_for_package}-${version_number}" > version.cfg + #auto read the number from kernal globals.cpp, no need to change it here +} + + +PG_REG_TEST_ROOT="${ROOT_DIR}" +ROACH_DIR="${ROOT_DIR}/distribute/bin/roach" +MPPDB_DECODING_DIR="${ROOT_DIR}/contrib/mppdb_decoding" + + +################################### +# get version number from globals.cpp +################################## +function read_gaussdb_number() +{ + global_kernal="${ROOT_DIR}/src/common/backend/utils/init/globals.cpp" + version_name="GRAND_VERSION_NUM" + version_num="" + line=$(cat $global_kernal | grep ^const* | grep $version_name) + version_num1=${line#*=} + #remove the symbol; + version_num=$(echo $version_num1 | tr -d ";") + #remove the blank + version_num=$(echo $version_num) + + if echo $version_num | grep -qE '^92[0-9]+$' + then + # get the last three number + latter=${version_num:2} + echo "92.${latter}" >>${SCRIPT_DIR}/version.cfg + else + echo "Cannot get the version number from globals.cpp." + exit 1 + fi +} + + +####################################################################### +##insert the commitid to version.cfg as the upgrade app path specification +####################################################################### +function get_kernel_commitid() +{ + export PATH=${BUILD_DIR}:$PATH + export LD_LIBRARY_PATH=$GAUSSHOME/lib:$LD_LIBRARY_PATH + commitid=$(LD_PRELOAD='' ${BUILD_DIR}/bin/gaussdb -V | awk '{print $5}' | cut -d ")" -f 1) + echo "${commitid}" >>${SCRIPT_DIR}/version.cfg + echo "End insert commitid into version.cfg" >> "$LOG_FILE" 2>&1 +} + + +####################################################################### +## generate the version file. +####################################################################### +function make_license_control() +{ + python_exec=$(which python 2>/dev/null) + + if [ -x "$python_exec" ]; then + $python_exec ${binarylib_dir}/buildtools/license_control/encrypted_version_file.py >> "$LOG_FILE" 2>&1 + fi + + if [ $? -ne 0 ]; then + die "create ${binarylib_dir}/buildtools/license_control license file failed." + fi + + if [ -f "$gaussdb_200_file" ] && [ -f "$gaussdb_300_file" ]; then + # Get the md5sum. + gaussdb_200_sha256sum=$(sha256sum $gaussdb_200_file | awk '{print $1}') + gaussdb_300_sha256sum=$(sha256sum $gaussdb_300_file | awk '{print $1}') + # Modify the source code. + sed -i "s/^[ \t]*const[ \t]\+char[ \t]*\*[ \t]*sha256_digests[ \t]*\[[ \t]*SHA256_DIGESTS_COUNT[ \t]*\][ \t]*=[ \t]*{[ \t]*NULL[ \t]*,[ \t]*NULL[ \t]*}[ \t]*;[ \t]*$/const char \*sha256_digests\[SHA256_DIGESTS_COUNT\] = {\"$gaussdb_200_sha256sum\", \"$gaussdb_300_sha256sum\"};/g" $gaussdb_version_file + fi + + if [ $? -ne 0 ]; then + die "modify '$gaussdb_version_file' failed." + fi +} + + +####################################################################### +##back to separate_debug_symbol.sh dir +####################################################################### +function separate_symbol() +{ + cd $SCRIPT_DIR + if [ "$version_mode" = "release" -a "$separate_symbol" = "on" ]; then + chmod +x ./separate_debug_information.sh + ./separate_debug_information.sh + cd $SCRIPT_DIR + mkdir -p $package_path + mv symbols.tar.gz $package_path/$symbol_package_name + + fi +} + +####################################################################### +##install gaussdb database contained server,client and libpq +####################################################################### +function install_gaussdb() +{ + # Generate the license control file, and set md5sum string to the code. + echo "Modify gaussdb_version.cpp file." >> "$LOG_FILE" 2>&1 + make_license_control + echo "Modify gaussdb_version.cpp file success." >> "$LOG_FILE" 2>&1 + #putinto to Code dir + cd "$ROOT_DIR" + #echo "$ROOT_DIR/Code" + if [ $? -ne 0 ]; then + die "change dir to $ROOT_DIR failed." + fi + + if [ "$version_mode" = "debug" -a "$separate_symbol" = "on" ]; then + echo "WARNING: do not separate symbol in debug mode!" + fi + + if [ "$product_mode" != "opengauss" ]; then + die "the product mode can only be opengauss!" + fi + + #configure + make distclean -sj >> "$LOG_FILE" 2>&1 + + echo "Begin configure." >> "$LOG_FILE" 2>&1 + chmod 755 configure + + if [ "$product_mode"x == "opengauss"x ]; then + enable_readline="--with-readline" + else + enable_readline="--without-readline" + fi + shared_opt="--gcc-version=${gcc_version}.0 --prefix="${BUILD_DIR}" --3rd=${binarylib_dir} --enable-thread-safety ${enable_readline} --without-zlib" + if [ "$product_mode"x == "opengauss"x ]; then + if [ "$version_mode"x == "release"x ]; then + # configure -D__USE_NUMA -D__ARM_LSE with arm opengauss mode + if [ "$PLATFORM_ARCH"X == "aarch64"X ] ; then + echo "configure -D__USE_NUMA -D__ARM_LSE with arm opengauss mode" + GAUSSDB_EXTRA_FLAGS=" -D__USE_NUMA -D__ARM_LSE" + fi + + ./configure $shared_opt CFLAGS="-O2 -g3 ${GAUSSDB_EXTRA_FLAGS}" --enable-mot CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1 + elif [ "$version_mode"x == "memcheck"x ]; then + ./configure $shared_opt CFLAGS="-O0" --enable-mot --enable-debug --enable-cassert --enable-memory-check CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1 + elif [ "$version_mode"x == "fiurelease"x ]; then + ./configure $shared_opt CFLAGS="-O2 -g3 ${GAUSSDB_EXTRA_FLAGS}" --enable-mot --disable-jemalloc CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1 + elif [ "$version_mode"x == "fiudebug"x ]; then + ./configure $shared_opt CFLAGS="-O0 ${GAUSSDB_EXTRA_FLAGS}" --enable-mot --enable-debug --enable-cassert --disable-jemalloc CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1 + else + ./configure $shared_opt CFLAGS="-O0 ${GAUSSDB_EXTRA_FLAGS}" --enable-mot --enable-debug --enable-cassert CC=g++ $extra_config_opt >> "$LOG_FILE" 2>&1 + fi + fi + + if [ $? -ne 0 ]; then + die "configure failed." + fi + echo "End configure" >> "$LOG_FILE" 2>&1 + + echo "Begin make install MPPDB server" >> "$LOG_FILE" 2>&1 + make clean >> "$LOG_FILE" 2>&1 + + export GAUSSHOME=${BUILD_DIR} + export LD_LIBRARY_PATH=${BUILD_DIR}/lib:${BUILD_DIR}/lib/postgresql:${LD_LIBRARY_PATH} + make -sj 8>> "$LOG_FILE" 2>&1 + make install -sj 8>> "$LOG_FILE" 2>&1 + if [ $? -ne 0 ]; then + make install -sj 8>> "$LOG_FILE" 2>&1 + if [ $? -ne 0 ]; then + make install -sj 8>> "$LOG_FILE" 2>&1 + if [ $? -ne 0 ]; then + die "make install failed." + fi + fi + fi + + cd "$ROOT_DIR/contrib/pg_upgrade_support" + make clean >> "$LOG_FILE" 2>&1 + make -sj >> "$LOG_FILE" 2>&1 + make install -sj >> "$LOG_FILE" 2>&1 + echo "End make install MPPDB" >> "$LOG_FILE" 2>&1 + + + cd "$ROOT_DIR" + if [ "${make_check}" = 'on' ]; then + echo "Begin make check MPPDB..." >> "$LOG_FILE" 2>&1 + cd ${PG_REG_TEST_ROOT} + make check -sj >> "$LOG_FILE" 2>&1 + if [ $? -ne 0 ]; then + die "make check MPPDB failed." + fi + echo "End make check MPPDB success." >> "$LOG_FILE" 2>&1 + fi + + echo "Begin make install mpp_decoding..." >> "$LOG_FILE" 2>&1 + #copy mppdb_decoding form clienttools to bin + if [ "$version_mode"x == "release"x ]; then + cd "$MPPDB_DECODING_DIR" + make >> "$LOG_FILE" 2>&1 + if [ $? -ne 0 ]; then + die "make install mppdb_decoding failed." + fi + echo "End make install mppdb_decoding success." >> "$LOG_FILE" 2>&1 + echo "Begin pack mppdb_decoding..." >> "$LOG_FILE" 2>&1 + cp ${MPPDB_DECODING_DIR}/mppdb_decoding.so ${BUILD_DIR}/lib/postgresql/mppdb_decoding.so + elif [ "$version_mode"x == "memcheck"x ]; then + cd "$MPPDB_DECODING_DIR" + make >> "$LOG_FILE" 2>&1 + if [ $? -ne 0 ]; then + die "make install mppdb_decoding failed." + fi + echo "End make install mppdb_decoding success." >> "$LOG_FILE" 2>&1 + echo "Begin pack mppdb_decoding..." >> "$LOG_FILE" 2>&1 + cp ${MPPDB_DECODING_DIR}/mppdb_decoding.so ${BUILD_DIR}/lib/postgresql/mppdb_decoding.so + else + cd "$MPPDB_DECODING_DIR" + make >> "$LOG_FILE" 2>&1 + if [ $? -ne 0 ]; then + die "make install mppdb_decoding failed." + fi + echo "End make install mppdb_decoding success." >> "$LOG_FILE" 2>&1 + echo "Begin pack mppdb_decoding..." >> "$LOG_FILE" 2>&1 + cp ${MPPDB_DECODING_DIR}/mppdb_decoding.so ${BUILD_DIR}/lib/postgresql/mppdb_decoding.so + fi + if [ $? -ne 0 ]; then + if [ "$version_mode"x == "release"x ]; then + die "cp ${MPPDB_DECODING_DIR}/mppdb_decoding ${MPPDB_DECODING_DIR}/bin/mppdb_decoding failed" + else + die "cp ${MPPDB_DECODING_DIR}/mppdb_decoding ${MPPDB_DECODING_DIR}/bin/mppdb_decoding failed" + fi + fi + + chmod 444 ${BUILD_DIR}/bin/cluster_guc.conf + dos2unix ${BUILD_DIR}/bin/cluster_guc.conf > /dev/null 2>&1 + + separate_symbol + + get_kernel_commitid +} + + +####################################################################### +##install gaussdb database and others +##select to install something according to variables package_type need +####################################################################### +function gaussdb_build() +{ + case "$package_type" in + server) + install_gaussdb + ;; + libpq) + install_gaussdb + ;; + *) + echo "Internal Error: option processing error: $package_type" + echo "please input right paramenter values server or libpq " + exit 1 + esac +} \ No newline at end of file diff --git a/build/script/opengauss_release_list_openeuler_single b/build/script/x86_64_lite_list similarity index 66% rename from build/script/opengauss_release_list_openeuler_single rename to build/script/x86_64_lite_list index 53d3efad1..42be3fc48 100644 --- a/build/script/opengauss_release_list_openeuler_single +++ b/build/script/x86_64_lite_list @@ -2,47 +2,19 @@ ./bin/gsql ./bin/gaussdb ./bin/gstrace -./bin/gs_basebackup -./bin/gs_probackup -./bin/gs_tar -./bin/gs_encrypt ./bin/gs_dump ./bin/gs_dumpall -./bin/gs_ctl ./bin/gs_initdb +./bin/gs_ctl ./bin/gs_guc -./bin/encrypt -./bin/openssl ./bin/gs_restore -./bin/gs_cgroup -./bin/openssl ./bin/pg_config ./bin/pg_controldata -./bin/pg_format_cu ./bin/pg_resetxlog -./bin/pg_recvlogical ./bin/alarmItem.conf ./bin/retry_errcodes.conf ./bin/cluster_guc.conf -./bin/bind_net_irq.sh -./bin/setArmOptimization.sh -./bin/krb5kdc -./bin/klist -./bin/kinit -./bin/kdestroy -./bin/kdb5_util -./bin/kadmin.local -./bin/lz4 -./bin/kadmind -./bin/dbmind -./bin/server.key.cipher -./bin/server.key.rand ./bin/gs_plan_simulator.sh -./etc/kerberos/kadm5.acl -./etc/kerberos/kdc.conf -./etc/kerberos/krb5.conf -./etc/kerberos/mppdb-site.xml -./share/postgresql/tmp/udstools.py ./share/postgresql/db4ai ./share/postgresql/snowball_create.sql ./share/postgresql/pg_hba.conf.sample @@ -54,7 +26,6 @@ ./share/postgresql/pg_ident.conf.sample ./share/postgresql/postgres.description ./share/postgresql/postgresql.conf.sample -./share/postgresql/mot.conf.sample ./share/postgresql/extension/plpgsql--1.0.sql ./share/postgresql/extension/hstore.control ./share/postgresql/extension/security_plugin.control @@ -72,8 +43,6 @@ ./share/postgresql/extension/hdfs_fdw.control ./share/postgresql/extension/log_fdw--1.0.sql ./share/postgresql/extension/log_fdw.control -./share/postgresql/extension/mot_fdw--1.0.sql -./share/postgresql/extension/mot_fdw.control ./share/postgresql/extension/postgres_fdw--1.0.sql ./share/postgresql/extension/postgres_fdw.control ./share/postgresql/timezone/GB-Eire @@ -282,7 +251,6 @@ ./share/postgresql/timezone/Canada/Newfoundland ./share/postgresql/timezone/Canada/Saskatchewan ./share/postgresql/timezone/Canada/Pacific -./share/postgresql/timezone/Canada/East-Saskatchewan ./share/postgresql/timezone/Canada/Mountain ./share/postgresql/timezone/Canada/Central ./share/postgresql/timezone/CST6CDT @@ -664,7 +632,6 @@ ./share/postgresql/timezone/Navajo ./share/postgresql/timezone/GMT ./share/postgresql/system_views.sql -./share/postgresql/private_system_views.sql ./share/postgresql/performance_views.sql ./share/postgresql/sql_features.txt ./share/postgresql/pg_cast_oid.txt @@ -703,11 +670,41 @@ ./share/postgresql/timezonesets/Default ./share/postgresql/timezonesets/Etc.txt ./share/postgresql/postgres.bki -./share/llvmir/GaussDB_expr.ir ./share/sslcert/gsql/openssl.cnf -./share/sslcert/grpc/openssl.cnf -./share/sslcert/om/openssl.cnf -./lib/libsimsearch/ +./lib/libpq.so +./lib/libpq.so.5 +./lib/libpq.so.5.5 +./lib/libssl.so +./lib/libssl.so.1.1 +./lib/libcrypto.so +./lib/libcrypto.so.1.1 +./lib/libcgroup.so +./lib/libcgroup.so.1 +./lib/libz.so +./lib/libz.so.1 +./lib/libz.so.1.2.11 +./lib/liblz4.so +./lib/liblz4.so.1 +./lib/liblz4.so.1.9.2 +./lib/libcjson.so +./lib/libcjson.so.1 +./lib/libcjson.so.1.7.13 +./lib/libcjson_utils.so +./lib/libcjson_utils.so.1 +./lib/libcjson_utils.so.1.7.13 +./lib/libstdc++.so.6 +./lib/libgcc_s.so.1 +./lib/libgomp.so +./lib/libgomp.so.1 +./lib/libgomp.so.1.0.0 +./lib/libdcf.so +./lib/libzstd.so +./lib/libzstd.so.1 +./lib/libzstd.so.1.5.0 +./lib/libcurl.so +./lib/libcurl.so.4 +./lib/libcurl.so.4.6.0 +./lib/libxgboost.so ./lib/postgresql/latin2_and_win1250.so ./lib/postgresql/euc2004_sjis2004.so ./lib/postgresql/euc_kr_and_mic.so @@ -716,12 +713,6 @@ ./lib/postgresql/cyrillic_and_mic.so ./lib/postgresql/utf8_and_johab.so ./lib/postgresql/utf8_and_gb18030.so -./lib/postgresql/pgxs/src/makefiles/pgxs.mk -./lib/postgresql/pgxs/src/Makefile.shlib -./lib/postgresql/pgxs/src/Makefile.port -./lib/postgresql/pgxs/src/nls-global.mk -./lib/postgresql/pgxs/src/Makefile.global -./lib/postgresql/pgxs/src/get_PlatForm_str.sh ./lib/postgresql/pgxs/config/install-sh ./lib/postgresql/euc_cn_and_mic.so ./lib/postgresql/latin_and_mic.so @@ -747,142 +738,8 @@ ./lib/postgresql/pg_plugin ./lib/postgresql/proc_srclib ./lib/postgresql/security_plugin.so -./lib/postgresql/pg_upgrade_support.so -./lib/postgresql/java/pljava.jar -./lib/postgresql/postgres_fdw.so -./lib/postgresql/pgoutput.so -./lib/libpljava.so -./lib/libpq.a -./lib/libpq.so -./lib/libpq.so.5 -./lib/libpq.so.5.5 -./lib/libpq_ce.so -./lib/libpq_ce.so.5 -./lib/libpq_ce.so.5.5 -./lib/libgauss_cl_jni.so -./lib/libcgroup.so -./lib/libcgroup.so.1 -./lib/libcom_err_gauss.so -./lib/libcom_err_gauss.so.3 -./lib/libcom_err_gauss.so.3.0 -./lib/libatomic.so -./lib/libatomic.so.1 -./lib/libatomic.so.1.2.0 -./lib/libmasstree.so -./lib/libupb.so -./lib/libupb.so.9 -./lib/libupb.so.9.0.0 -./lib/libabsl_str_format_internal.so -./lib/libabsl_strings.so -./lib/libabsl_throw_delegate.so -./lib/libabsl_strings_internal.so -./lib/libabsl_base.so -./lib/libabsl_dynamic_annotations.so -./lib/libabsl_spinlock_wait.so -./lib/libabsl_int128.so -./lib/libabsl_bad_optional_access.so -./lib/libabsl_raw_logging_internal.so -./lib/libabsl_log_severity.so -./lib/libaddress_sorting.so -./lib/libaddress_sorting.so.9 -./lib/libgssapi_krb5_gauss.so -./lib/libgssapi_krb5_gauss.so.2 -./lib/libgssapi_krb5_gauss.so.2.2 -./lib/libgssrpc_gauss.so -./lib/libgssrpc_gauss.so.4 -./lib/libgssrpc_gauss.so.4.2 -./lib/libk5crypto_gauss.so -./lib/libk5crypto_gauss.so.3 -./lib/libk5crypto_gauss.so.3.1 -./lib/libkadm5clnt.so -./lib/libkadm5clnt_mit.so -./lib/libkadm5clnt_mit.so.11 -./lib/libkadm5clnt_mit.so.11.0 -./lib/libkadm5clnt_mit.so.12 -./lib/libkadm5clnt_mit.so.12.0 -./lib/libkadm5srv.so -./lib/libkadm5srv_mit.so -./lib/libkadm5srv_mit.so.11 -./lib/libkadm5srv_mit.so.11.0 -./lib/libkadm5srv_mit.so.12 -./lib/libkadm5srv_mit.so.12.0 -./lib/libkdb5.so -./lib/libkdb5.so.9 -./lib/libkdb5.so.9.0 -./lib/libkdb5.so.10 -./lib/libkdb5.so.10.0 -./lib/libkrad.so -./lib/libkrad.so.0 -./lib/libkrad.so.0.0 -./lib/libkrb5_gauss.so -./lib/libkrb5_gauss.so.3 -./lib/libkrb5_gauss.so.3.3 -./lib/libkrb5support_gauss.so -./lib/libkrb5support_gauss.so.0 -./lib/libkrb5support_gauss.so.0.1 -./lib/krb5/plugins/kdb/db2.so -./lib/libverto.so -./lib/libverto.so.0 -./lib/libverto.so.0.0 -./lib/libcurl.so -./lib/libcurl.so.4 -./lib/libcurl.so.4.6.0 -./lib/libcrypto.so -./lib/libcrypto.so.1.1 -./lib/libssl.so -./lib/libssl.so.1.1 -./lib/libgcc_s.so.1 -./lib/libstdc++.so.6 -./lib/libz.so -./lib/libz.so.1 -./lib/libz.so.1.2.11 -./lib/liblz4.so -./lib/liblz4.so.1 -./lib/liblz4.so.1.9.2 -./lib/libcjson.so -./lib/libcjson.so.1 -./lib/libcjson.so.1.7.13 -./lib/libconfig.so -./lib/libconfig.so.4 -./lib/libpgport_tool.so -./lib/libpgport_tool.so.1 -./share/llvmir/GaussDB_expr.ir -./lib/libeSDKLogAPI.so -./lib/libeSDKOBS.so -./lib/liblog4cpp.so -./lib/liblog4cpp.so.5 -./lib/liblog4cpp.so.5.0.6 -./lib/libcharset.so -./lib/libcharset.so.1 -./lib/libcharset.so.1.0.0 -./lib/libiconv.so -./lib/libiconv.so.2 -./lib/libiconv.so.2.6.1 -./lib/libnghttp2.so -./lib/libnghttp2.so.14 -./lib/libnghttp2.so.14.20.0 -./lib/libpcre.so -./lib/libpcre.so.1 -./lib/libpcre.so.1.2.12 -./lib/libsecurec.so -./lib/libxml2.so -./lib/libxml2.so.2 -./lib/libxml2.so.2.9.9 -./lib/libparquet.so -./lib/libparquet.so.14 -./lib/libparquet.so.14.1.0 -./lib/libarrow.so -./lib/libarrow.so.14 -./lib/libarrow.so.14.1.0 -./lib/OBS.ini ./lib/postgresql/latin2_and_win1250.so ./lib/postgresql/euc2004_sjis2004.so -./lib/libdcf.so -./lib/libzstd.so -./lib/libzstd.so.1 -./lib/libzstd.so.1.4.4 - - ./include/postgresql/server/postgres_ext.h ./include/postgresql/server/pg_config_os.h ./include/postgresql/server/pgtime.h @@ -1012,6 +869,7 @@ ./include/postgresql/server/storage/backendid.h ./include/postgresql/server/storage/lock/lock.h ./include/postgresql/server/storage/lock/lwlock.h +./include/postgresql/server/storage/lwlocknames.h ./include/postgresql/server/storage/barrier.h ./include/postgresql/server/storage/shmem.h ./include/postgresql/server/pg_config.h @@ -1035,418 +893,14 @@ ./include/postgresql/server/lib/ilist.h ./include/postgresql/server/pgxc/locator.h ./include/postgresql/server/gstrace/gstrace_infra.h -./include/postgresql/server/extension_dependency.h -./include/postgresql/server/libpq/libpq-fe.h -./include/postgresql/server/access/clog.h -./include/postgresql/server/storage/proc.h -./include/postgresql/server/access/xlog.h -./include/postgresql/server/storage/lwlocknames.h -./include/postgresql/server/access/xloginsert.h -./include/postgresql/server/catalog/pg_control.h -./include/postgresql/server/access/parallel_recovery/redo_item.h -./include/postgresql/server/access/parallel_recovery/posix_semaphore.h -./include/postgresql/server/replication/replicainternal.h -./include/postgresql/server/knl/knl_instance.h -./include/postgresql/server/knl/knl_guc.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_sql.h -./include/postgresql/server/knl/knl_guc/knl_guc_common.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_sql.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_storage.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_storage.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_storage.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_security.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_security.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_network.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_network.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_memory.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_memory.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_resource.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_resource.h -./include/postgresql/server/knl/knl_guc/knl_session_attr_common.h -./include/postgresql/server/knl/knl_guc/knl_instance_attr_common.h -./include/postgresql/server/lib/circularqueue.h -./include/postgresql/server/access/double_write_basic.h -./include/postgresql/server/knl/knl_thread.h -./include/postgresql/server/access/sdir.h -./include/postgresql/server/gssignal/gs_signal.h -./include/postgresql/server/knl/knl_session.h -./include/postgresql/server/libpq/pqcomm.h -./include/postgresql/server/cipher.h -./include/postgresql/server/portability/instr_time.h -./include/postgresql/server/utils/memgroup.h -./include/postgresql/server/storage/latch.h -./include/postgresql/server/workload/qnode.h -./include/postgresql/server/streaming/init.h -./include/postgresql/server/streaming/launcher.h -./include/postgresql/server/pgxc/barrier.h -./include/postgresql/server/libcomm/libcomm.h -./include/postgresql/server/hotpatch/hotpatch.h -./include/postgresql/server/hotpatch/hotpatch_backend.h -./include/postgresql/server/postmaster/bgwriter.h -./include/postgresql/server/postmaster/pagewriter.h -./include/postgresql/server/replication/heartbeat.h -./include/postgresql/server/access/multi_redo_settings.h -./include/postgresql/server/access/redo_statistic_msg.h -./include/postgresql/server/replication/rto_statistic.h -./include/postgresql/server/replication/walprotocol.h -./include/postgresql/server/storage/mot/jit_def.h -./include/postgresql/server/threadpool/threadpool.h -./include/postgresql/server/threadpool/threadpool_controler.h -./include/postgresql/server/threadpool/threadpool_group.h -./include/postgresql/server/knl/knl_variable.h -./include/postgresql/server/threadpool/threadpool_listener.h -./include/postgresql/server/threadpool/threadpool_sessctl.h -./include/postgresql/server/storage/procsignal.h -./include/postgresql/server/threadpool/threadpool_worker.h -./include/postgresql/server/threadpool/threadpool_scheduler.h -./include/postgresql/server/threadpool/threadpool_stream.h -./include/postgresql/server/replication/dataqueuedefs.h -./include/postgresql/server/gtm/gtm_c.h -./include/postgresql/server/cm/etcdapi.h -./include/postgresql/server/alarm/alarm.h -./include/postgresql/server/access/xact.h -./include/postgresql/server/access/cstore_am.h -./include/postgresql/server/access/cstore_roughcheck_func.h -./include/postgresql/server/access/cstoreskey.h -./include/postgresql/server/storage/cu.h -./include/postgresql/server/vecexecutor/vectorbatch.h -./include/postgresql/server/cstore.h -./include/postgresql/server/storage/cstore/cstore_mem_alloc.h -./include/postgresql/server/access/cstore_minmax_func.h -./include/postgresql/server/storage/custorage.h -./include/postgresql/server/storage/fd.h -./include/postgresql/server/postmaster/aiocompleter.h -./include/postgresql/server/storage/buf/bufmgr.h -./include/postgresql/server/storage/buf/buf_internals.h -./include/postgresql/server/storage/smgr.h -./include/postgresql/server/catalog/pg_am.h -./include/postgresql/server/catalog/pg_class.h -./include/postgresql/server/catalog/pg_index.h -./include/postgresql/server/rewrite/prs2lock.h -./include/postgresql/server/tcop/stmt_retry.h -./include/postgresql/server/catalog/pg_hashbucket_fn.h -./include/postgresql/server/utils/rel_gs.h -./include/postgresql/server/catalog/pg_partition.h -./include/postgresql/server/catalog/pg_hashbucket.h -./include/postgresql/server/catalog/catalog.h -./include/postgresql/server/catalog/catversion.h -./include/postgresql/server/catalog/pg_namespace.h -./include/postgresql/server/utils/partitionmap_gs.h -./include/postgresql/server/access/heapam.h -./include/postgresql/server/storage/pagecompress.h -./include/postgresql/server/replication/bcm.h -./include/postgresql/server/storage/cstore/cstorealloc.h -./include/postgresql/server/storage/cucache_mgr.h -./include/postgresql/server/storage/cache_mgr.h -./include/postgresql/server/nodes/plannodes.h -./include/postgresql/server/foreign/foreign.h -./include/postgresql/server/access/obs/obs_am.h -./include/postgresql/server/storage/buf/buffile.h -./include/postgresql/server/replication/slot.h -./include/postgresql/server/access/obs/eSDKOBS.h -./include/postgresql/server/commands/defrem.h -./include/postgresql/server/optimizer/pruning.h -./include/postgresql/server/nodes/relation.h -./include/postgresql/server/optimizer/bucketinfo.h -./include/postgresql/server/pgxc/nodemgr.h -./include/postgresql/server/bulkload/dist_fdw.h -./include/postgresql/server/bulkload/importerror.h -./include/postgresql/server/commands/gds_stream.h -./include/postgresql/server/bulkload/utils.h -./include/postgresql/server/cjson/cJSON.h -./include/postgresql/server/ssl/gs_openssl_client.h -./include/postgresql/server/funcapi.h -./include/postgresql/server/executor/executor.h -./include/postgresql/server/executor/execdesc.h -./include/postgresql/server/nodes/execnodes.h -./include/postgresql/server/access/genam.h -./include/postgresql/server/nodes/tidbitmap.h -./include/postgresql/server/access/relscan.h -./include/postgresql/server/access/itup.h -./include/postgresql/server/executor/instrument.h -./include/postgresql/server/miscadmin.h -./include/postgresql/server/libpq/libpq-be.h -./include/postgresql/server/libpq/hba.h -./include/postgresql/server/libpq/sha2.h -./include/postgresql/server/utils/anls_opt.h -./include/postgresql/server/pgxc/pgxc.h -./include/postgresql/server/catalog/namespace.h -./include/postgresql/server/commands/trigger.h -./include/postgresql/server/executor/spi.h -./include/postgresql/server/access/ustore/undo/knl_uundotype.h -./include/postgresql/server/access/ustore/knl_uheap.h -./include/postgresql/server/access/ustore/knl_utuple.h -./include/postgresql/server/access/ustore/knl_utype.h -./include/postgresql/server/access/ustore/knl_upage.h -./include/postgresql/server/access/ustore/knl_uredo.h -./include/postgresql/server/access/ustore/knl_uundovec.h -./include/postgresql/server/access/ustore/knl_uundorecord.h -./include/postgresql/server/access/ustore/undo/knl_uundoxlog.h -./include/postgresql/server/access/ustore/undo/knl_uundotxn.h -./include/postgresql/server/access/ustore/undo/knl_uundozone.h -./include/postgresql/server/access/ustore/undo/knl_uundospace.h -./include/postgresql/server/communication/commproxy_basic.h -./include/postgresql/server/access/parallel_recovery/page_redo.h -./include/postgresql/server/access/parallel_recovery/spsc_blocking_queue.h -./include/postgresql/server/executor/exec/execdesc.h -./include/postgresql/server/db4ai/matrix.h -./include/postgresql/server/db4ai/scores.h -./jre/ASSEMBLY_EXCEPTION -./jre/bin/java -./jre/bin/jjs -./jre/bin/keytool -./jre/bin/orbd -./jre/bin/pack200 -./jre/bin/policytool -./jre/bin/rmid -./jre/bin/rmiregistry -./jre/bin/servertool -./jre/bin/tnameserv -./jre/bin/unpack200 -./jre/lib/amd64/jli/libjli.so -./jre/lib/amd64/jvm.cfg -./jre/lib/amd64/libattach.so -./jre/lib/amd64/libavplugin-ffmpeg-58.so -./jre/lib/amd64/libawt_headless.so -./jre/lib/amd64/libawt.so -./jre/lib/amd64/libawt_xawt.so -./jre/lib/amd64/libdecora_sse.so -./jre/lib/amd64/libdt_socket.so -./jre/lib/amd64/libfontmanager.so -./jre/lib/amd64/libfxplugins.so -./jre/lib/amd64/libglassgtk2.so -./jre/lib/amd64/libglassgtk3.so -./jre/lib/amd64/libglass.so -./jre/lib/amd64/libgstreamer-lite.so -./jre/lib/amd64/libhprof.so -./jre/lib/amd64/libinstrument.so -./jre/lib/amd64/libj2gss.so -./jre/lib/amd64/libj2pcsc.so -./jre/lib/amd64/libj2pkcs11.so -./jre/lib/amd64/libjaas_unix.so -./jre/lib/amd64/libjava_crw_demo.so -./jre/lib/amd64/libjavafx_font_freetype.so -./jre/lib/amd64/libjavafx_font_pango.so -./jre/lib/amd64/libjavafx_font.so -./jre/lib/amd64/libjavafx_iio.so -./jre/lib/amd64/libjava.so -./jre/lib/amd64/libjawt.so -./jre/lib/amd64/libjdwp.so -./jre/lib/amd64/libjfxmedia.so -./jre/lib/amd64/libjfxwebkit.so -./jre/lib/amd64/libjpeg.so -./jre/lib/amd64/libjsdt.so -./jre/lib/amd64/libjsig.so -./jre/lib/amd64/libjsoundalsa.so -./jre/lib/amd64/libjsound.so -./jre/lib/amd64/liblcms.so -./jre/lib/amd64/libmanagement.so -./jre/lib/amd64/libmlib_image.so -./jre/lib/amd64/libnet.so -./jre/lib/amd64/libnio.so -./jre/lib/amd64/libnpt.so -./jre/lib/amd64/libprism_common.so -./jre/lib/amd64/libprism_es2.so -./jre/lib/amd64/libprism_sw.so -./jre/lib/amd64/libsaproc.so -./jre/lib/amd64/libsctp.so -./jre/lib/amd64/libsplashscreen.so -./jre/lib/amd64/libsunec.so -./jre/lib/amd64/libunpack.so -./jre/lib/amd64/libverify.so -./jre/lib/amd64/libzip.so -./jre/lib/amd64/server/libjvm.so -./jre/lib/amd64/server/Xusage.txt -./jre/lib/calendars.properties -./jre/lib/charsets.jar -./jre/lib/classlist -./jre/lib/cmm/CIEXYZ.pf -./jre/lib/cmm/GRAY.pf -./jre/lib/cmm/LINEAR_RGB.pf -./jre/lib/cmm/PYCC.pf -./jre/lib/cmm/sRGB.pf -./jre/lib/content-types.properties -./jre/lib/currency.data -./jre/lib/ext/cldrdata.jar -./jre/lib/ext/dnsns.jar -./jre/lib/ext/jaccess.jar -./jre/lib/ext/jfxrt.jar -./jre/lib/ext/localedata.jar -./jre/lib/ext/meta-index -./jre/lib/ext/nashorn.jar -./jre/lib/ext/sunec.jar -./jre/lib/ext/sunjce_provider.jar -./jre/lib/ext/sunpkcs11.jar -./jre/lib/ext/zipfs.jar -./jre/lib/flavormap.properties -./jre/lib/fontconfig.Euler.properties -./jre/lib/fontconfig.properties -./jre/lib/fontconfig.Ubuntu.properties -./jre/lib/fonts/Roboto-Regular.ttf -./jre/lib/hijrah-config-umalqura.properties -./jre/lib/images/cursors/cursors.properties -./jre/lib/images/cursors/invalid32x32.gif -./jre/lib/images/cursors/motif_CopyDrop32x32.gif -./jre/lib/images/cursors/motif_CopyNoDrop32x32.gif -./jre/lib/images/cursors/motif_LinkDrop32x32.gif -./jre/lib/images/cursors/motif_LinkNoDrop32x32.gif -./jre/lib/images/cursors/motif_MoveDrop32x32.gif -./jre/lib/images/cursors/motif_MoveNoDrop32x32.gif -./jre/lib/javafx-mx.jar -./jre/lib/javafx.properties -./jre/lib/jce.jar -./jre/lib/jexec -./jre/lib/jfr/default.jfc -./jre/lib/jfr.jar -./jre/lib/jfr/profile.jfc -./jre/lib/jfxswt.jar -./jre/lib/jsse.jar -./jre/lib/jvm.hprof.txt -./jre/lib/logging.properties -./jre/lib/management-agent.jar -./jre/lib/management/jmxremote.access -./jre/lib/management/jmxremote.password.template -./jre/lib/management/management.properties -./jre/lib/management/snmp.acl.template -./jre/lib/meta-index -./jre/lib/net.properties -./jre/lib/psfontj2d.properties -./jre/lib/psfont.properties.ja -./jre/lib/resources.jar -./jre/lib/rt.jar -./jre/lib/security/blacklisted.certs -./jre/lib/security/cacerts -./jre/lib/security/java.policy -./jre/lib/security/java.security -./jre/lib/security/policy/limited/local_policy.jar -./jre/lib/security/policy/limited/US_export_policy.jar -./jre/lib/security/policy/unlimited/local_policy.jar -./jre/lib/security/policy/unlimited/US_export_policy.jar -./jre/lib/sound.properties -./jre/lib/tzdb.dat -./jre/LICENSE -./jre/THIRD_PARTY_README -[client] -./bin/gsql -./bin/gs_dump -./bin/gs_dumpall -./bin/gs_restore -./bin/gs_basebackup -./bin/gs_probackup -./lib/postgresql/latin2_and_win1250.so -./lib/postgresql/euc2004_sjis2004.so -./lib/postgresql/euc_kr_and_mic.so -./lib/postgresql/utf8_and_uhc.so -./lib/postgresql/euc_tw_and_big5.so -./lib/postgresql/cyrillic_and_mic.so -./lib/postgresql/utf8_and_johab.so -./lib/postgresql/utf8_and_gb18030.so -./lib/postgresql/pgxs/src/makefiles/pgxs.mk -./lib/postgresql/pgxs/src/Makefile.shlib -./lib/postgresql/pgxs/src/Makefile.port -./lib/postgresql/pgxs/src/nls-global.mk -./lib/postgresql/pgxs/src/Makefile.global -./lib/postgresql/pgxs/config/install-sh -./lib/postgresql/euc_cn_and_mic.so -./lib/postgresql/latin_and_mic.so -./lib/postgresql/utf8_and_sjis2004.so -./lib/postgresql/utf8_and_euc_jp.so -./lib/postgresql/utf8_and_sjis.so -./lib/postgresql/utf8_and_cyrillic.so -./lib/postgresql/utf8_and_euc_kr.so -./lib/postgresql/ascii_and_mic.so -./lib/postgresql/utf8_and_iso8859_1.so -./lib/postgresql/euc_jp_and_sjis.so -./lib/postgresql/dict_snowball.so -./lib/postgresql/utf8_and_ascii.so -./lib/postgresql/utf8_and_euc_tw.so -./lib/postgresql/utf8_and_iso8859.so -./lib/postgresql/utf8_and_win.so -./lib/postgresql/utf8_and_euc_cn.so -./lib/postgresql/utf8_and_gbk.so -./lib/postgresql/utf8_and_euc2004.so -./lib/postgresql/utf8_and_big5.so -./lib/postgresql/java/pljava.jar -./lib/libpljava.so -./lib/libpq.a -./lib/libpq.so -./lib/libpq.so.5 -./lib/libpq.so.5.5 -./lib/libpq_ce.so -./lib/libpq_ce.so.5 -./lib/libpq_ce.so.5.5 -./lib/libgauss_cl_jni.so -./lib/libconfig.so -./lib/libconfig.so.4 -./lib/libcrypto.so -./lib/libcrypto.so.1.1 -./lib/libstdc++.so.6 -./lib/libssl.so -./lib/libssl.so.1.1 -./lib/libpgport_tool.so -./lib/libpgport_tool.so.1 -./lib/libgssapi_krb5_gauss.so -./lib/libgssapi_krb5_gauss.so.2 -./lib/libgssapi_krb5_gauss.so.2.2 -./lib/libgssrpc_gauss.so -./lib/libgssrpc_gauss.so.4 -./lib/libgssrpc_gauss.so.4.2 -./lib/libk5crypto_gauss.so -./lib/libk5crypto_gauss.so.3 -./lib/libk5crypto_gauss.so.3.1 -./lib/libkrb5support_gauss.so -./lib/libkrb5support_gauss.so.0 -./lib/libkrb5support_gauss.so.0.1 -./lib/libkrb5_gauss.so -./lib/libkrb5_gauss.so.3 -./lib/libkrb5_gauss.so.3.3 -./lib/libcom_err_gauss.so -./lib/libcom_err_gauss.so.3 -./lib/libcom_err_gauss.so.3.0 [libpq] -./lib/libpq.a +./lib/libcrypto.so +./lib/libcrypto.so.1.1 +./lib/libssl.so +./lib/libssl.so.1.1 ./lib/libpq.so ./lib/libpq.so.5 ./lib/libpq.so.5.5 -./lib/libpq_ce.so -./lib/libpq_ce.so.5 -./lib/libpq_ce.so.5.5 -./lib/libgauss_cl_jni.so -./lib/libconfig.so -./lib/libconfig.so.4 -./lib/libcrypto.so -./lib/libcrypto.so.1.1 -./lib/libstdc++.so.6 -./lib/libssl.so -./lib/libssl.so.1.1 -./lib/libpgport_tool.so -./lib/libpgport_tool.so.1 -./lib/libgssapi_krb5_gauss.so -./lib/libgssapi_krb5_gauss.so.2 -./lib/libgssapi_krb5_gauss.so.2.2 -./lib/libgssrpc_gauss.so -./lib/libgssrpc_gauss.so.4 -./lib/libgssrpc_gauss.so.4.2 -./lib/libk5crypto_gauss.so -./lib/libk5crypto_gauss.so.3 -./lib/libk5crypto_gauss.so.3.1 -./lib/libkrb5support_gauss.so -./lib/libkrb5support_gauss.so.0 -./lib/libkrb5support_gauss.so.0.1 -./lib/libkrb5_gauss.so -./lib/libkrb5_gauss.so.3 -./lib/libkrb5_gauss.so.3.3 -./lib/libcom_err_gauss.so -./lib/libcom_err_gauss.so.3 -./lib/libcom_err_gauss.so.3.0 -./include/gs_thread.h -./include/gs_threadlocal.h -./include/postgres_ext.h -./include/libpq-fe.h -./include/libpq-events.h -./include/libpq/libpq-fs.h -[version] -V500R002C00 [header] ./include/libpq-fe.h ./include/postgres_ext.h @@ -1455,14 +909,8 @@ V500R002C00 ./include/pg_config.h ./include/pg_config_manual.h ./include/pg_config_os.h -./include/cm_config.h ./include/c.h ./include/port.h -./include/cm_msg.h -./include/cm_c.h -./include/cm_misc.h ./include/libpq-int.h ./include/pqcomm.h ./include/pqexpbuffer.h -./include/xlogdefs.h -./include/cm-libpq-fe.h diff --git a/build/script/opengauss_release_list_euleros_single b/build/script/x86_64_opengauss_list similarity index 98% rename from build/script/opengauss_release_list_euleros_single rename to build/script/x86_64_opengauss_list index 057faf9b6..39a7f0850 100644 --- a/build/script/opengauss_release_list_euleros_single +++ b/build/script/x86_64_opengauss_list @@ -35,6 +35,7 @@ ./bin/lz4 ./bin/kadmind ./bin/dbmind +./bin/gs_dbmind ./bin/server.key.cipher ./bin/server.key.rand ./bin/gs_plan_simulator.sh @@ -824,9 +825,7 @@ ./lib/libverto.so ./lib/libverto.so.0 ./lib/libverto.so.0.0 -./lib/libcurl.so -./lib/libcurl.so.4 -./lib/libcurl.so.4.6.0 +./lib/libcurl.so* ./lib/libcrypto.so ./lib/libcrypto.so.1.1 ./lib/libssl.so @@ -839,9 +838,7 @@ ./lib/liblz4.so ./lib/liblz4.so.1 ./lib/liblz4.so.1.9.2 -./lib/libcjson.so -./lib/libcjson.so.1 -./lib/libcjson.so.1.7.13 +./lib/libcjson.so* ./lib/libconfig.so ./lib/libconfig.so.4 ./lib/libpgport_tool.so @@ -849,25 +846,13 @@ ./share/llvmir/GaussDB_expr.ir ./lib/libeSDKLogAPI.so ./lib/libeSDKOBS.so -./lib/liblog4cpp.so -./lib/liblog4cpp.so.5 -./lib/liblog4cpp.so.5.0.6 -./lib/libcharset.so -./lib/libcharset.so.1 -./lib/libcharset.so.1.0.0 -./lib/libiconv.so -./lib/libiconv.so.2 -./lib/libiconv.so.2.6.1 -./lib/libnghttp2.so -./lib/libnghttp2.so.14 -./lib/libnghttp2.so.14.20.0 -./lib/libpcre.so -./lib/libpcre.so.1 -./lib/libpcre.so.1.2.12 +./lib/liblog4cpp.so* +./lib/libcharset.so* +./lib/libiconv.so* +./lib/libnghttp2.so* +./lib/libpcre.so* ./lib/libsecurec.so -./lib/libxml2.so -./lib/libxml2.so.2 -./lib/libxml2.so.2.9.9 +./lib/libxml2.so* ./lib/libparquet.so ./lib/libparquet.so.14 ./lib/libparquet.so.14.1.0 @@ -880,7 +865,8 @@ ./lib/libdcf.so ./lib/libzstd.so ./lib/libzstd.so.1 -./lib/libzstd.so.1.4.4 +./lib/libzstd.so.1.5.0 +./lib/libxgboost.so ./include/postgresql/server/postgres_ext.h ./include/postgresql/server/pg_config_os.h @@ -1009,8 +995,9 @@ ./include/postgresql/server/storage/item/itemptr.h ./include/postgresql/server/storage/lock/s_lock.h ./include/postgresql/server/storage/backendid.h -./include/postgresql/server/storage/lock/lock.h -./include/postgresql/server/storage/lock/lwlock.h +./include/postgresql/server/storage/lock.h +./include/postgresql/server/storage/lwlock.h +./include/postgresql/server/storage/lwlocknames.h ./include/postgresql/server/storage/barrier.h ./include/postgresql/server/storage/shmem.h ./include/postgresql/server/pg_config.h @@ -1444,8 +1431,6 @@ ./include/libpq-fe.h ./include/libpq-events.h ./include/libpq/libpq-fs.h -[version] -V500R002C00 [header] ./include/libpq-fe.h ./include/postgres_ext.h @@ -1457,9 +1442,6 @@ V500R002C00 ./include/cm_config.h ./include/c.h ./include/port.h -./include/cm_msg.h -./include/cm_c.h -./include/cm_misc.h ./include/libpq-int.h ./include/pqcomm.h ./include/pqexpbuffer.h diff --git a/cmake/src/build_function.cmake b/cmake/src/build_function.cmake index 2298a0bc5..1ffeb341b 100755 --- a/cmake/src/build_function.cmake +++ b/cmake/src/build_function.cmake @@ -199,17 +199,14 @@ ENDMACRO(CHECK_CC_ENABLE) function(GET_VERSIONSTR_FROMGIT ret) set(PG_VERSION "9.2.4") set(OPENGAUSS_VERSION "2.1.0") - execute_process( - COMMAND ${CMAKE_SOURCE_DIR}/${openGauss}/cmake/src/buildfunction.sh --d ${PROJECT_TRUNK_DIR} OUTPUT_VARIABLE KERNEL_VERSION_STR) execute_process( COMMAND ${CMAKE_SOURCE_DIR}/${openGauss}/cmake/src/buildfunction.sh --s ${PROJECT_TRUNK_DIR} OUTPUT_VARIABLE GS_VERSION_STR) set(PG_VERSION "${PG_VERSION}" PARENT_SCOPE) + set(${ret} "${GS_VERSION_STR}" PARENT_SCOPE) set(OPENGAUSS_VERSION_NUM_STR, "${OPENGAUSS_VERSION}" PARENT_SCOPE) if(NOT ${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS} STREQUAL OFF_OFF) - set(${ret} "${KERNEL_VERSION_STR}" PARENT_SCOPE) - set(PG_VERSION_STR "openGauss ${OPENGAUSS_VERSION} ${KERNEL_VERSION_STR}") + set(PG_VERSION_STR "openGauss ${OPENGAUSS_VERSION} ${GS_VERSION_STR}") else() - set(${ret} "${GS_VERSION_STR}" PARENT_SCOPE) set(PG_VERSION_STR "${GS_VERSION_STR}") endif() set(PG_VERSION_STR "${PG_VERSION_STR}" PARENT_SCOPE) diff --git a/cmake/src/build_options.cmake b/cmake/src/build_options.cmake index c4084321a..218c1772a 100755 --- a/cmake/src/build_options.cmake +++ b/cmake/src/build_options.cmake @@ -49,6 +49,7 @@ option(ENABLE_LCOV "enable lcov, the old is --enable-lcov" OFF) # new add option(ENABLE_MULTIPLE_NODES "enable distribute,the old is --enable-multiple-nodes" OFF) option(ENABLE_PRIVATEGAUSS "enable privategauss,the old is --enable-pribategauss" OFF) +option(ENABLE_LITE_MODE "enable lite in single_node mode,the old is --enable-lite-mode" OFF) option(ENABLE_DEBUG "enable privategauss,the old is --enable-pribategauss" OFF) option(ENABLE_MOT "enable mot in single_node mode,the old is --enable-mot" OFF) option(ENABLE_MYSQL_FDW "enable export or import data with mysql,the old is --enable-mysql-fdw" OFF) @@ -125,6 +126,12 @@ if(${BUILD_TUPLE} STREQUAL "aarch64") endif() endif() +if(${ENABLE_LITE_MODE} STREQUAL "ON") + set(ENABLE_LLVM_COMPILE OFF) + set(ENABLE_GSS OFF) + set(KRB5 OFF) +endif() + set(PROTECT_OPTIONS -fwrapv -std=c++14 -fnon-call-exceptions ${OPTIMIZE_LEVEL}) set(WARNING_OPTIONS -Wall -Wendif-labels -Werror -Wformat-security) set(OPTIMIZE_OPTIONS -pipe -pthread -fno-aggressive-loop-optimizations -fno-expensive-optimizations -fno-omit-frame-pointer -fno-strict-aliasing -freg-struct-return) @@ -237,8 +244,8 @@ add_definitions(-Wno-builtin-macro-redefined) SET_GCC_FLAGS(DB_COMMON_FLAGS "") #hotpatch -set(HOTPATCH_PLATFORM_LIST suse11_sp1_x86_64 euleros2.0_sp8_aarch64 euleros2.0_sp9_aarch64 euleros2.0_sp2_x86_64 euleros2.0_sp5_x86_64 kylinv10_sp1_aarch64 kylinv10_sp1_x86_64_intel) -set(HOTPATCH_ARM_LIST euleros2.0_sp8_aarch64 euleros2.0_sp9_aarch64 kylinv10_sp1_aarch64) +set(HOTPATCH_PLATFORM_LIST suse11_sp1_x86_64 suse12_sp5_x86_64 euleros2.0_sp8_aarch64 euleros2.0_sp9_aarch64 euleros2.0_sp10_aarch64 euleros2.0_sp2_x86_64 euleros2.0_sp5_x86_64 euleros2.0_sp10_x86_64 kylinv10_sp1_aarch64 kylinv10_sp1_x86_64_intel) +set(HOTPATCH_ARM_LIST euleros2.0_sp8_aarch64 euleros2.0_sp9_aarch64 euleros2.0_sp10_aarch64 kylinv10_sp1_aarch64) list(FIND HOTPATCH_PLATFORM_LIST "${PLAT_FORM_NAME}" RET_HOTPATCH) list(FIND HOTPATCH_ARM_LIST "${PLAT_FORM_NAME}" RET_ARM_HOTPATCH) if(NOT ${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS} STREQUAL OFF_OFF) @@ -246,11 +253,7 @@ if(NOT ${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS} STREQUAL OFF_OFF) if("${GCC_VERSION}" STREQUAL "7.3.0") set(SUPPORT_HOTPATCH "yes") if(NOT ${RET_ARM_HOTPATCH} EQUAL -1) - if("$ENV{DEBUG_TYPE}" STREQUAL "debug") - set(HOTPATCH_ATOMIC_LDS -Wl,-T${LIBHOTPATCH_TOOL_PATH}/atomic_debug.lds) - else() - set(HOTPATCH_ATOMIC_LDS -Wl,-T${LIBHOTPATCH_TOOL_PATH}/atomic.lds) - endif() + set(HOTPATCH_ATOMIC_LDS -Wl,-T${LIBHOTPATCH_TOOL_PATH}/atomic.lds) endif() else() set(SUPPORT_HOTPATCH "no") @@ -262,11 +265,17 @@ else() set(SUPPORT_HOTPATCH "no") endif() -# LLVM version -execute_process(COMMAND ${LLVM_CONFIG} --version OUTPUT_VARIABLE LLVM_VERSION_STR OUTPUT_STRIP_TRAILING_WHITESPACE) -string(REPLACE "." ";" LLVM_VERSION_LIST ${LLVM_VERSION_STR}) -list(GET LLVM_VERSION_LIST 0 LLVM_MAJOR_VERSION) -list(GET LLVM_VERSION_LIST 1 LLVM_MINOR_VERSION) +if(${ENABLE_LITE_MODE} STREQUAL "ON") + set(SUPPORT_HOTPATCH "no") +endif() + +if(${ENABLE_LLVM_COMPILE} STREQUAL "ON") + # LLVM version + execute_process(COMMAND ${LLVM_CONFIG} --version OUTPUT_VARIABLE LLVM_VERSION_STR OUTPUT_STRIP_TRAILING_WHITESPACE) + string(REPLACE "." ";" LLVM_VERSION_LIST ${LLVM_VERSION_STR}) + list(GET LLVM_VERSION_LIST 0 LLVM_MAJOR_VERSION) + list(GET LLVM_VERSION_LIST 1 LLVM_MINOR_VERSION) +endif() if(${NO_CHECK_CONFIG}) string(SUBSTRING "${BUILD_TUPLE}" 0 6 BUILD_HOST_PLATFORM) @@ -305,6 +314,9 @@ SET(EC_CONFIG_IN_FILE ecpg_config.h.in) build_mppdb_config_paths_h(PG_CONFIG_PATH_H) configure_file(${openGauss}/cmake/src/config-in/${CONFIG_IN_FILE} ${CMAKE_BINARY_DIR}/pg_config.h @ONLY) configure_file(${openGauss}/cmake/src/config-in/${EC_CONFIG_IN_FILE} ${CMAKE_BINARY_DIR}/ecpg_config.h @ONLY) +#set host_cpu for pgxs.mk +set(HOST_CPU ${BUILD_TUPLE}) +configure_file(${openGauss}/src/makefiles/pgxs.mk ${CMAKE_BINARY_DIR}/${openGauss}/src/makefiles/pgxs.mk @ONLY) SET(PROJECT_INCLUDE_DIR ${PROJECT_INCLUDE_DIR} ${CMAKE_BINARY_DIR}) # 排斥项 @@ -312,3 +324,6 @@ if("${ENABLE_MULTIPLE_NODES}" STREQUAL "ON" AND "${ENABLE_MOT}" STREQUAL "ON") message(FATAL_ERROR "error: --enable-mot option is not supported with --enable-multiple-nodes option") endif() +if("${ENABLE_MULTIPLE_NODES}" STREQUAL "ON" AND "${ENABLE_LITE_MODE}" STREQUAL "ON") + message(FATAL_ERROR "error: --enable-lite-mode option is not supported with --enable-multiple-nodes option") +endif() diff --git a/cmake/src/buildfunction.sh b/cmake/src/buildfunction.sh index aee6e0779..3e25dc59f 100755 --- a/cmake/src/buildfunction.sh +++ b/cmake/src/buildfunction.sh @@ -114,34 +114,17 @@ function get_gs_version() commits=$(git log | grep "See merge request" | wc -l) mrid=$(git log | grep "See merge request" | head -1 | awk -F! '{print $2}' | grep -o '[0-9]\+') debug_str="$DEBUG_TYPE" + product=$(cat build/script/gaussdb.ver | grep 'PRODUCT' | awk -F "=" '{print $2}') + version=$(cat build/script/gaussdb.ver | grep 'VERSION' | awk -F "=" '{print $2}') if test "$enable_ccache" = yes; then - default_gs_version="(openGauss 2.0.0 build 1f1f1f1f) compiled at 2100-00-00 00:00:00 commit 9999 last mr 9999 debug" + default_gs_version="(${product} ${version} build 1f1f1f1f) compiled at 2100-00-00 00:00:00 commit 9999 last mr 9999 debug" else date_time=$(date -d today +"%Y-%m-%d %H:%M:%S") - default_gs_version="(openGauss 2.1.0 build $csv_version) compiled at $date_time commit $commits last mr $mrid $debug_str" + default_gs_version="(${product} ${version} build ${csv_version}) compiled at $date_time commit $commits last mr $mrid $debug_str" fi printf "${default_gs_version}" } -function get_kernel_version() -{ - cd $1 - csv_version=$(git log | grep commit | head -1 | awk '{print $2}' | cut -b 1-8) - commits=$(git log | grep "See merge request" | wc -l) - mrid=$(git log | grep "See merge request" | head -1 | awk -F! '{print $2}' | grep -o '[0-9]\+') - debug_str="$DEBUG_TYPE" - product=$(cat build/script/gauss.spec | grep 'PRODUCT' | awk -F "=" '{print $2}') - version=$(cat build/script/gauss.spec | grep 'VERSION' | awk -F "=" '{print $2}') - - if test "$enable_ccache" = yes; then - default_kernel_version="(GaussDB Kernel V500R002C00 build 1f1f1f1f1f1f1f1f) compiled at 2100-00-00 00:00:00 commit 9999 last mr 9999 debug" - else - date_time=$(date -d today +"%Y-%m-%d %H:%M:%S") - default_kernel_version="($product $version build $csv_version) compiled at $date_time commit $commits last mr $mrid $debug_str" - fi - printf "${default_kernel_version}" -} - function get_time_for_roach() { tmp=$(date +'%d %b %Y %H:%M:%S') @@ -166,8 +149,6 @@ case "${DO_CMD}" in create_conversionfile ;; --create_snowballfile|snowball) create_snowballfile ;; - --get_kernel_versionstr|--d) - get_kernel_version "$2";; --get_gs_versionstr|--s) get_gs_version "$2";; --get_time_for_roach) diff --git a/cmake/src/config-in/pg_config.h.in b/cmake/src/config-in/pg_config.h.in index e36ccb0d9..d81cdbf1a 100755 --- a/cmake/src/config-in/pg_config.h.in +++ b/cmake/src/config-in/pg_config.h.in @@ -913,6 +913,10 @@ * * (--enable-privategauss) */ #cmakedefine ENABLE_PRIVATEGAUSS +/* Define to 1 if you want to generate gauss product as lite mode. + * * (--enable-lite-mode) */ +#cmakedefine ENABLE_LITE_MODE + /* Define to 1 if you want to use mot * --enable-mot */ #cmakedefine ENABLE_MOT @@ -931,3 +935,7 @@ /* Define to on if you want to collect USTORE statistics */ #cmakedefine DEBUG_UHEAP + +/* Define to 1 if you want to build opengauss rpm package on openeuler os. + * (--with-openeuler-os) */ +#cmakedefine WITH_OPENEULER_OS \ No newline at end of file diff --git a/cmake/src/set_thirdparty_path.cmake b/cmake/src/set_thirdparty_path.cmake index a7542e5ea..2d42167bc 100755 --- a/cmake/src/set_thirdparty_path.cmake +++ b/cmake/src/set_thirdparty_path.cmake @@ -3,6 +3,7 @@ set(3RD_PATH $ENV{THIRD_BIN_PATH}) set(VERSION_TYPE $ENV{DEBUG_TYPE}) option(ENABLE_LLT "enable llt, current value is --enable-llt" OFF) option(ENABLE_UT "enable ut, current value is --enable-ut" OFF) +option(WITH_OPENEULER_OS "Build openGauss rpm package on openEuler os" OFF) execute_process(COMMAND sh ${PROJECT_SRC_DIR}/get_PlatForm_str.sh OUTPUT_VARIABLE PLAT_FORM_STR OUTPUT_STRIP_TRAILING_WHITESPACE) @@ -76,6 +77,7 @@ set(PROTOBUF_HOME ${DEPENDENCY_PATH}/protobuf/${SUPPORT_LLT}) set(THRIFT_HOME ${DEPENDENCY_PATH}/thrift) set(SNAPPY_HOME ${DEPENDENCY_PATH}/snappy/${LIB_UNIFIED_SUPPORT}) set(ZLIB_HOME ${DEPENDENCY_PATH}/zlib1.2.11/${SUPPORT_LLT}) +set(XGBOOST_HOME ${DEPENDENCY_PATH}/xgboost/${SUPPORT_LLT}) set(ZSTD_HOME ${DEPENDENCY_PATH}/zstd) set(LICENSE_HOME ${PLATFORM_PATH}/AdaptiveLM_C_V100R005C01SPC002/${SUPPORT_LLT}) set(HOTPATCH_HOME ${PLATFORM_PATH}/hotpatch) @@ -159,6 +161,12 @@ else() endif() endif() +if(${WITH_OPENEULER_OS} STREQUAL "ON") + set(SECURE_C_CHECK boundscheck) +else() + set(SECURE_C_CHECK securec) +endif() + ############################################################################# # kerberos component ############################################################################# @@ -275,6 +283,12 @@ set(SNAPPY_LIB_PATH ${SNAPPY_HOME}/lib) set(ZLIB_INCLUDE_PATH ${ZLIB_HOME}/include) set(ZLIB_LIB_PATH ${ZLIB_HOME}/lib) +############################################################################# +# xgboost component +############################################################################# +set(XGBOOST_INCLUDE_PATH ${XGBOOST_HOME}/include) +set(XGBOOST_LIB_PATH ${XGBOOST_HOME}/lib64) + ############################################################################# # zstd component ############################################################################# @@ -346,3 +360,15 @@ set(MOCKCPP_3RDPARTY_PATH ${MOCKCPP_HOME}/3rdparty) set(MASSTREE_INCLUDE_PATH ${MASSTREE_HOME}/include) set(MASSTREE_LIB_PATH ${MASSTREE_HOME}/lib) +############################################################################ +# gtest component +############################################################################ +set(GTEST_INCLUDE_PATH ${GTEST_HOME}/include) +set(GTEST_LIB_PATH ${GTEST_HOME}/lib) + +############################################################################ +# mockcpp component +############################################################################ +set(MOCKCPP_INCLUDE_PATH ${MOCKCPP_HOME}/include) +set(MOCKCPP_LIB_PATH ${MOCKCPP_HOME}/lib) +set(MOCKCPP_3RDPARTY_PATH ${MOCKCPP_HOME}/3rdparty) diff --git a/configure b/configure index 204874311..a1d1a7aa5 100755 --- a/configure +++ b/configure @@ -708,6 +708,7 @@ with_ossp_uuid with_selinux krb_srvtab with_python +with_openeuler_os enable_thread_safety INCLUDES TAS @@ -741,12 +742,15 @@ enable_llt enable_llvm llvm_major_version llvm_minor_version +flex_major_version +flex_minor_version enable_ut enable_qunit enable_jemalloc enable_jemalloc_debug enable_privategauss enable_multiple_nodes +enable_lite_mode enable_mot enable_memory_check enable_mysql_fdw @@ -754,7 +758,6 @@ enable_oracle_fdw enable_thread_check enable_shared default_gs_version -default_kernel_version default_port WANTED_LANGUAGES enable_nls @@ -824,12 +827,14 @@ enable_integer_datetimes enable_nls with_pgport with_gs_version +with_openeuler_os enable_shared enable_rpath enable_jemalloc enable_jemalloc_debug enable_privategauss enable_multiple_nodes +enable_lite_mode enable_mot enable_memory_check enable_mysql_fdw @@ -1311,6 +1316,11 @@ Try \`$0 --help' for more information." >&2 esac done +# if compile with_openeuler_os. it should use gcc on os. +if test "${with_openeuler_os+set}" = set; then + gcc_version=$(gcc --version | sed q | awk -F')' '{print $2}' | awk '{print $1}') +fi + if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` { $as_echo "$as_me: error: missing argument to $ac_option" >&2 @@ -1549,6 +1559,7 @@ Optional Features: --disable-float4-byval disable float4 passed by value --disable-float8-byval disable float8 passed by value --enable-ccache build with ccache reducing compile time + --enable-lite-mode generate the gauss product as lite mode Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] @@ -2655,8 +2666,8 @@ $as_echo "$as_me: error: argument required for --with-gs-version option" >&2;} esac else - product=$(cat build/script/gauss.spec | grep 'PRODUCT' | awk -F "=" '{print $2}') - version=$(cat build/script/gauss.spec | grep 'VERSION' | awk -F "=" '{print $2}') + product=$(cat build/script/gaussdb.ver | grep 'PRODUCT' | awk -F "=" '{print $2}') + version=$(cat build/script/gaussdb.ver | grep 'VERSION' | awk -F "=" '{print $2}') gitversion=$(git log 2>/dev/null | grep commit | head -1 | awk '{print $2}' | cut -b 1-8) commits=$(git log 2>/dev/null | grep "See in merge request" | wc -l) debug_str="" @@ -2666,11 +2677,9 @@ else fi if test "$enable_ccache" = yes; then - default_gs_version="(openGauss 2.1.0 build 1f1f1f1f) compiled at 2100-00-00 00:00:00 commit 9999 last mr 9999 debug" - default_kernel_version="(GaussDB Kernel V500R002C00 build 1f1f1f1f) compiled at 2100-00-00 00:00:00 commit 9999 last mr 9999 debug" + default_gs_version="($product $version build 1f1f1f1f) compiled at 2100-00-00 00:00:00 commit 9999 last mr 9999 debug" else default_gs_version="($product $version build $gitversion) compiled at `date -d today +\"%Y-%m-%d %H:%M:%S\"` commit $commits last mr $mrid $debug_str" - default_kernel_version="($product $version build $gitversion) compiled at `date -d today +\"%Y-%m-%d %H:%M:%S\"` commit $commits last mr $mrid $debug_str" fi fi @@ -2679,7 +2688,7 @@ fi $as_echo "$default_gs_version" >&6; } cat >>confdefs.h <<_ACEOF -#define DEF_GS_VERSION "${default_kernel_version}" +#define DEF_GS_VERSION "${default_gs_version}" _ACEOF @@ -2876,14 +2885,6 @@ if test "${enable_mot+set}" = set; then $as_echo "$as_me: error: --enable-mot option is not supported with --enable-multiple-nodes option" >&2;} { (exit 1); exit 1; }; } - fi - - if test "$enable_llvm" = no; then - - { { $as_echo "$as_me:$LINENO: error: --enable-mot option is not supported with --disable-llvm option" >&5 - $as_echo "$as_me: error: --enable-mot option is not supported with --disable-llvm option" >&2;} - { (exit 1); exit 1; }; } - fi ;; no) @@ -3185,6 +3186,91 @@ else fi +# +# --enable-lite-mode enables +# + +# Check whether --enable-lite-mode was given. +if test "${enable_lite_mode+set}" = set; then + enableval=$enable_lite_mode; + case $enableval in + yes) + if test "$enable_multiple_nodes" = yes; then + { { $as_echo "$as_me:$LINENO: error: --enable-lite-mode option is not supported with --enable-multiple-nodes option" >&5 + $as_echo "$as_me: error: --enable-lite-mode option is not supported with --enable-multiple-nodes option" >&2;} + { (exit 1); exit 1; }; } + fi + ;; + no) + : + ;; + *) + { { $as_echo "$as_me:$LINENO: error: no argument expected for --enable-lite-mode option" >&5 +$as_echo "$as_me: error: no argument expected for --enable-lite-mode option" >&2;} + { (exit 1); exit 1; }; } + ;; + esac + +else + enable_lite_mode=no + +fi + +if test "$enable_multiple_nodes" = yes; then + + enable_lite_mode=no +fi + +if test "$enable_lite_mode" = yes; then + +cat >>confdefs.h <<\_ACEOF +#define ENABLE_LITE_MODE 1 +_ACEOF + +fi + + +# +# --with-openeuler-os enable +# +# Check whether --with-openeuler-os was given. +if test "${with_openeuler_os+set}" = set; then + enableval=$with_openeuler_os; + case $enableval in + yes) + if test "$enable_multiple_nodes" = yes; then + { { $as_echo "$as_me:$LINENO: error: --with-openeuler-os option is not supported with --enable-multiple-nodes option" >&5 + $as_echo "$as_me: error: --with-openeuler-os option is not supported with --enable-multiple-nodes option" >&2;} + { (exit 1); exit 1; }; } + fi + ;; + no) + : + ;; + *) + { { $as_echo "$as_me:$LINENO: error: no argument expected for --with-openeuler-os option" >&5 +$as_echo "$as_me: error: no argument expected for --with-openeuler-os option" >&2;} + { (exit 1); exit 1; }; } + ;; + esac + +else + with_openeuler_os=no + +fi + +if test "$enable_multiple_nodes" = yes; then + + with_openeuler_os=no +fi + +if test "$with_openeuler_os" = yes; then + +cat >>confdefs.h <<\_ACEOF +#define WITH_OPENEULER_OS 1 +_ACEOF + +fi # @@ -5771,10 +5857,14 @@ if test "${enable_llvm+set}" = set; then case $enableval in yes) +if test "$enable_lite_mode" = yes; then +{ $as_echo "$as_me:$LINENO: enable_lite_mode is open, llvm will close" >&5 +$as_echo "$as_me: enable_lite_mode is open, llvm will close" >&2;} +else cat >>confdefs.h <<\_ACEOF #define ENABLE_LLVM_COMPILE 1 _ACEOF - +fi ;; no) : @@ -5786,16 +5876,20 @@ $as_echo "$as_me: error: no argument expected for --enable-llvm option" >&2;} ;; esac +else +if test "$enable_lite_mode" = yes; then +{ $as_echo "$as_me:$LINENO: enable_lite_mode is open, llvm will close" >&5 +$as_echo "$as_me: enable_lite_mode is open, llvm will close" >&2;} else cat >>confdefs.h <<\_ACEOF #define ENABLE_LLVM_COMPILE 1 _ACEOF -enable_llvm=yes +fi fi llvm_version_str='10.0.0' -if [ ! -z "${with_3rdpartydir}" ]; then +if [[ ! -z "${with_3rdpartydir}" ]] && [[ "$enable_lite_mode" != yes ]]; then platstr=$(sh src/get_PlatForm_str.sh) llvm_version_str=`${with_3rdpartydir}/dependency/${platstr}/llvm/comm/bin/llvm-config --version` fi @@ -6527,7 +6621,7 @@ fi # JDK # with_jdk='' -if [ ! -z "${with_3rdpartydir}" ]; then +if [[ ! -z "${with_3rdpartydir}" ]] && [[ "$with_openeuler_os" != yes ]]; then platstr=$(sh src/get_PlatForm_str.sh) cpuarch=$(uname -m) for d in "openjdk8" "huaweijdk8"; do @@ -7822,6 +7916,14 @@ else pgac_flex_version=`$FLEX --version 2>/dev/null` { $as_echo "$as_me:$LINENO: using $pgac_flex_version" >&5 $as_echo "$as_me: using $pgac_flex_version" >&6;} + +flex_major_version=$(echo $pgac_flex_version | awk '{print $2}' | awk -F "." '{print $1}') +flex_minor_version=$(echo $pgac_flex_version | awk '{print $2}' | awk -F "." '{print $2}') + +cat >>confdefs.h <<_ACEOF +#define FLEX_MAJOR_VERSION $flex_major_version +#define FLEX_MINOR_VERSION $flex_minor_version +_ACEOF fi @@ -9258,7 +9360,7 @@ $as_echo "$as_me: WARNING: *** Not using spinlocks will cause poor performance." >&2;} fi -if test "$with_gssapi" = no ; then +if test "$with_gssapi_" = no ; then if test "$PORTNAME" != "win32"; then { $as_echo "$as_me:$LINENO: checking for library containing gss_init_sec_context" >&5 $as_echo_n "checking for library containing gss_init_sec_context... " >&6; } @@ -12552,7 +12654,7 @@ fi fi -if test "$with_gssapi" = no ; then +if test "$with_gssapi_" = no ; then for ac_header in gssapi/gssapi.h do @@ -29687,7 +29789,7 @@ fi if test "$enable_multiple_nodes" = yes; then cat >>confdefs.h <<_ACEOF -#define PG_VERSION_STR "openGauss $OPENGAUSS_VERSION ${default_kernel_version} on $host, compiled by $cc_string, `expr $ac_cv_sizeof_void_p \* 8`-bit" +#define PG_VERSION_STR "openGauss $OPENGAUSS_VERSION ${default_gs_version} on $host, compiled by $cc_string, `expr $ac_cv_sizeof_void_p \* 8`-bit" _ACEOF else cat >>confdefs.h <<_ACEOF @@ -31156,8 +31258,10 @@ find src/gausskernel/ -name "*.y" | sort >> ./ereport.txt find src/common/backend -name "*.cpp" | sort >> ./ereport.txt find src/gausskernel/ -name "*.cpp" | sort >> ./ereport.txt -if [[ "$enable_multiple_nodes" != no ]] || [[ "$enable_privategauss" != no ]]; then - find ../distribute/cm -name "*.l" | sort > ./cm_ereport.txt - find ../distribute/cm -name "*.y" | sort >> ./cm_ereport.txt - find ../distribute/cm -name "*.cpp" | sort >> ./cm_ereport.txt +if [[ "$enable_lite_mode" != yes ]]; then + if [[ "$enable_multiple_nodes" != no ]] || [[ "$enable_privategauss" != no ]]; then + find ../distribute/cm -name "*.l" | sort > ./cm_ereport.txt + find ../distribute/cm -name "*.y" | sort >> ./cm_ereport.txt + find ../distribute/cm -name "*.cpp" | sort >> ./cm_ereport.txt + fi fi diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 23c8efce1..c82ec86f2 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -12,6 +12,7 @@ set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/hstore ${CMAKE_CURRENT_SOURCE_DIR}/test_decoding ${CMAKE_CURRENT_SOURCE_DIR}/mppdb_decoding + ${CMAKE_CURRENT_SOURCE_DIR}/sql_decoding ${CMAKE_CURRENT_SOURCE_DIR}/spi ${CMAKE_CURRENT_SOURCE_DIR}/pg_upgrade_support ${CMAKE_CURRENT_SOURCE_DIR}/postgres_fdw @@ -28,6 +29,7 @@ set(CMAKE_MODULE_PATH add_subdirectory(hstore) add_subdirectory(test_decoding) add_subdirectory(mppdb_decoding) +add_subdirectory(sql_decoding) add_subdirectory(spi) if("${ENABLE_MULTIPLE_NODES}" STREQUAL "ON" OR "${ENABLE_PRIVATEGAUSS}" STREQUAL "ON") add_subdirectory(pg_upgrade_support) diff --git a/contrib/file_fdw/file_fdw--1.0.sql b/contrib/file_fdw/file_fdw--1.0.sql index 836e18430..b34fdf8b8 100644 --- a/contrib/file_fdw/file_fdw--1.0.sql +++ b/contrib/file_fdw/file_fdw--1.0.sql @@ -3,7 +3,7 @@ -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION file_fdw" to load this file. \quit -CREATE FUNCTION file_fdw_handler() +CREATE FUNCTION pg_catalog.file_fdw_handler() RETURNS fdw_handler AS 'MODULE_PATHNAME' LANGUAGE C STRICT NOT FENCED; diff --git a/contrib/file_fdw/file_fdw.cpp b/contrib/file_fdw/file_fdw.cpp index 2afb4cd30..48f38a490 100644 --- a/contrib/file_fdw/file_fdw.cpp +++ b/contrib/file_fdw/file_fdw.cpp @@ -158,6 +158,14 @@ Datum file_fdw_handler(PG_FUNCTION_ARGS) PG_RETURN_POINTER(fdwroutine); } +void check_file_fdw_permission() +{ + if ((!initialuser()) && !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode)) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("Dist fdw are only available for the supper user and Operatoradmin"))); + } +} + /* * Validate the generic options given to a FOREIGN DATA WRAPPER, SERVER, * USER MAPPING or FOREIGN TABLE that uses file_fdw. @@ -173,6 +181,7 @@ Datum file_fdw_validator(PG_FUNCTION_ARGS) List* other_options = NIL; ListCell* cell = NULL; + check_file_fdw_permission(); if (catalog == UserMappingRelationId) { ereport( ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("file_fdw doesn't support in USER MAPPING."))); diff --git a/contrib/gauss_connector/deparse.cpp b/contrib/gauss_connector/deparse.cpp new file mode 100644 index 000000000..3bce65c13 --- /dev/null +++ b/contrib/gauss_connector/deparse.cpp @@ -0,0 +1,2936 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * IDENTIFICATION + * contrib/gauss_connector/deparse.cpp + * + * --------------------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "gc_fdw.h" + +#include "access/heapam.h" +#include "access/htup.h" +#include "access/sysattr.h" +#include "catalog/pg_aggregate.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_namespace.h" +#include "catalog/pg_operator.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_type.h" +#include "commands/defrem.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/plannodes.h" +#include "optimizer/clauses.h" +#include "optimizer/prep.h" +#include "optimizer/tlist.h" +#include "optimizer/var.h" +#include "parser/parsetree.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" +#include "utils/syscache.h" +#include "utils/typcache.h" + +/* + * Global context for gcforeign_expr_walker's search of an expression tree. + */ +typedef struct foreign_glob_cxt { + PlannerInfo* root; /* global planner state */ + RelOptInfo* foreignrel; /* the foreign relation we are planning for */ + Relids relids; /* relids of base relations in the underlying + * scan */ +} foreign_glob_cxt; + +/* + * Local (per-tree-level) context for gcforeign_expr_walker's search. + * This is concerned with identifying collations used in the expression. + */ +typedef enum { + FDW_COLLATE_NONE, /* expression is of a noncollatable type, or + * it has default collation that is not + * traceable to a foreign Var */ + FDW_COLLATE_SAFE, /* collation derives from a foreign Var */ + FDW_COLLATE_UNSAFE /* collation is non-default and derives from + * something other than a foreign Var */ +} FDWCollateState; + +typedef struct foreign_loc_cxt { + Oid collation; /* OID of current collation, if any */ + FDWCollateState state; /* state of current collation choice */ +} foreign_loc_cxt; + +/* + * Context for gcDeparseExpr + */ +typedef struct deparse_expr_cxt { + PlannerInfo* root; /* global planner state */ + RelOptInfo* foreignrel; /* the foreign relation we are planning for */ + RelOptInfo* scanrel; /* the underlying scan relation. Same as + * foreignrel, when that represents a join or + * a base relation. */ + StringInfo buf; /* output buffer to append to */ + List** params_list; /* exprs that will become remote Params */ + bool coorquery; /* just for coor query */ + Plan* agg; /* just for coor query */ + List* str_targetlist; /* just for coor query */ + char* agg_arg1; + char* agg_arg2; + List** colmap; + int map; + bool local_schema; +} deparse_expr_cxt; + +#define REL_ALIAS_PREFIX "r" +/* Handy macro to add relation name qualification */ +#define ADD_REL_QUALIFIER(buf, varno) appendStringInfo((buf), "%s%d.", REL_ALIAS_PREFIX, (varno)) +#define ADD_UNSIGNED_REL_QUALIFIER(buf, varno) appendStringInfo((buf), "%s%u.", REL_ALIAS_PREFIX, (varno)) +#define SUBQUERY_REL_ALIAS_PREFIX "s" +#define SUBQUERY_COL_ALIAS_PREFIX "c" + +/* + * Functions to determine whether an expression can be evaluated safely on + * remote server. + */ +static bool gcforeign_expr_walker(Node* node, foreign_glob_cxt* glob_cxt, foreign_loc_cxt* outer_cxt); +static char* deparse_type_name(Oid type_oid, int32 typemod); + +/* + * Functions to construct string representation of a node tree. + */ +static void gcdeparseTargetList(StringInfo buf, PlannerInfo* root, Index rtindex, Relation rel, bool is_returning, + Bitmapset* attrs_used, bool qualify_col, List** retrieved_attrs); +static void gcDeparseExplicitTargetList(List* tlist, List** retrieved_attrs, deparse_expr_cxt* context); +static void gcDeparseSubqueryTargetList(deparse_expr_cxt* context); +static void gcDeparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo* root, bool qualify_col); +static void gcDeparseRelation(StringInfo buf, Relation rel, bool schema = true); +static void gcDeparseExpr(Expr* expr, deparse_expr_cxt* context); +static void gcDeparseVar(Var* node, deparse_expr_cxt* context); +static void simpleDeparseVar(Var* node, deparse_expr_cxt* context); +static void gcDeparseConst(Const* node, deparse_expr_cxt* context, int showtype); +static void gcDeparseParam(Param* node, deparse_expr_cxt* context); +static void gcDeparseArrayRef(ArrayRef* node, deparse_expr_cxt* context); +static void gcDeparseFuncExpr(FuncExpr* node, deparse_expr_cxt* context); +static void gcDeparseOpExpr(OpExpr* node, deparse_expr_cxt* context); +static void gcDeparseOperatorName(StringInfo buf, Form_pg_operator opform); +static void gcDeparseDistinctExpr(DistinctExpr* node, deparse_expr_cxt* context); +static void gcDeparseScalarArrayOpExpr(ScalarArrayOpExpr* node, deparse_expr_cxt* context); +static void gcDeparseRelabelType(RelabelType* node, deparse_expr_cxt* context); +static void gcDeparseBoolExpr(BoolExpr* node, deparse_expr_cxt* context); +static void gcDeparseNullTest(NullTest* node, deparse_expr_cxt* context); +static void gcDeparseArrayExpr(ArrayExpr* node, deparse_expr_cxt* context); +static void printRemoteParam(int paramindex, Oid paramtype, int32 paramtypmod, deparse_expr_cxt* context); +static void printRemotePlaceholder(Oid paramtype, int32 paramtypmod, deparse_expr_cxt* context); +static void gcDeparseSelectSql(List* tlist, bool is_subquery, List** retrieved_attrs, deparse_expr_cxt* context); +static void gcDeparseLockingClause(deparse_expr_cxt* context); +static void gcAppendOrderByClause(List* pathkeys, deparse_expr_cxt* context); +static void appendConditions(List* exprs, deparse_expr_cxt* context); +static void gcDeparseFromExprForRel(StringInfo buf, PlannerInfo* root, RelOptInfo* joinrel, bool use_alias, + List** params_list, bool local_schema = false); +static void gcDeparseFromExpr(List* quals, deparse_expr_cxt* context); +static void simpleDeparseAggref(Aggref* node, deparse_expr_cxt* context); +static void appendAggOrderBy(List* orderList, List* targetList, deparse_expr_cxt* context); +static void appendFunctionName(Oid funcid, deparse_expr_cxt* context); +static Node* deparseSortGroupClause(Index ref, List* tlist, bool force_colno, deparse_expr_cxt* context); +extern bool pgxc_is_expr_shippable(Expr* node, bool* has_aggs); + +/* + * Helper functions + */ +static bool is_subquery_var(Var* node, RelOptInfo* foreignrel, int* relno, int* colno); +static void get_relation_column_alias_ids(Var* node, RelOptInfo* foreignrel, int* relno, int* colno); + +/* + * Examine each qual clause in input_conds, and classify them into two groups, + * which are returned as two lists: + * - remote_conds contains expressions that can be evaluated remotely + * - local_conds contains expressions that can't be evaluated remotely + */ +void classifyConditions( + PlannerInfo* root, RelOptInfo* baserel, List* input_conds, List** remote_conds, List** local_conds) +{ + ListCell* lc = NULL; + + bool can_remote_filter = true; + foreach (lc, input_conds) { + RestrictInfo* ri = lfirst_node(RestrictInfo, lc); + + if (!is_foreign_expr(root, baserel, ri->clause)) { + can_remote_filter = false; + break; + } + } + + *remote_conds = NIL; + *local_conds = NIL; + + foreach (lc, input_conds) { + RestrictInfo* ri = lfirst_node(RestrictInfo, lc); + + if (can_remote_filter && is_foreign_expr(root, baserel, ri->clause)) + *remote_conds = lappend(*remote_conds, ri); + else + *local_conds = lappend(*local_conds, ri); + } +} + +/* + * Returns true if given expr is safe to evaluate on the foreign server. + */ +bool is_foreign_expr(PlannerInfo* root, RelOptInfo* baserel, Expr* expr) +{ + foreign_glob_cxt glob_cxt; + foreign_loc_cxt loc_cxt; + + /* + * Check that the expression consists of nodes that are safe to execute + * remotely. + */ + glob_cxt.root = root; + glob_cxt.foreignrel = baserel; + + glob_cxt.relids = baserel->relids; + loc_cxt.collation = InvalidOid; + loc_cxt.state = FDW_COLLATE_NONE; + if (!gcforeign_expr_walker((Node*)expr, &glob_cxt, &loc_cxt)) + return false; + + /* + * If the expression has a valid collation that does not arise from a + * foreign var, the expression can not be sent over. + */ + if (loc_cxt.state == FDW_COLLATE_UNSAFE) + return false; + + /* + * An expression which includes any mutable functions can't be sent over + * because its result is not stable. For example, sending now() remote + * side could cause confusion from clock offsets. Future versions might + * be able to make this choice with more granularity. (We check this last + * because it requires a lot of expensive catalog lookups.) + */ + if (contain_mutable_functions((Node*)expr)) + return false; + + /* OK to evaluate on the remote server */ + return true; +} + +/* + * Check if expression is safe to execute remotely, and return true if so. + * + * In addition, *outer_cxt is updated with collation information. + * + * We must check that the expression contains only node types we can deparse, + * that all types/functions/operators are safe to send (they are "shippable"), + * and that all collations used in the expression derive from Vars of the + * foreign table. Because of the latter, the logic is pretty close to + * assign_collations_walker() in parse_collate.c, though we can assume here + * that the given expression is valid. Note function mutability is not + * currently considered here. + */ +static bool gcforeign_expr_walker(Node* node, foreign_glob_cxt* glob_cxt, foreign_loc_cxt* outer_cxt) +{ + bool check_type = true; + GcFdwRelationInfo* fpinfo = NULL; + foreign_loc_cxt inner_cxt; + Oid collation; + FDWCollateState state; + + /* Need do nothing for empty subexpressions */ + if (node == NULL) + return true; + + /* May need server info from baserel's fdw_private struct */ + fpinfo = (GcFdwRelationInfo*)(glob_cxt->foreignrel->fdw_private); + + /* Set up inner_cxt for possible recursion to child nodes */ + inner_cxt.collation = InvalidOid; + inner_cxt.state = FDW_COLLATE_NONE; + + switch (nodeTag(node)) { + case T_Var: { + Var* var = (Var*)node; + + /* + * If the Var is from the foreign table, we consider its + * collation (if any) safe to use. If it is from another + * table, we treat its collation the same way as we would a + * Param's collation, ie it's not safe for it to have a + * non-default collation. + */ + if (bms_is_member(var->varno, glob_cxt->relids) && var->varlevelsup == 0) { + /* Var belongs to foreign table */ + + /* + * System columns other than ctid and oid should not be + * sent to the remote, since we don't make any effort to + * ensure that local and remote values match (tableoid, in + * particular, almost certainly doesn't match). + */ + if (var->varattno < 0 && var->varattno != SelfItemPointerAttributeNumber && + var->varattno != ObjectIdAttributeNumber) + return false; + + /* Else check the collation */ + collation = var->varcollid; + state = OidIsValid(collation) ? FDW_COLLATE_SAFE : FDW_COLLATE_NONE; + } else { + /* Var belongs to some other table */ + collation = var->varcollid; + if (collation == InvalidOid || collation == DEFAULT_COLLATION_OID) { + /* + * It's noncollatable, or it's safe to combine with a + * collatable foreign Var, so set state to NONE. + */ + state = FDW_COLLATE_NONE; + } else { + /* + * Do not fail right away, since the Var might appear + * in a collation-insensitive context. + */ + state = FDW_COLLATE_UNSAFE; + } + } + } break; + case T_Const: { + Const* c = (Const*)node; + + /* + * If the constant has nondefault collation, either it's of a + * non-builtin type, or it reflects folding of a CollateExpr. + * It's unsafe to send to the remote unless it's used in a + * non-collation-sensitive context. + */ + collation = c->constcollid; + if (collation == InvalidOid || collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; + else + state = FDW_COLLATE_UNSAFE; + } break; + case T_Param: { + Param* p = (Param*)node; + + if (PARAM_EXTERN != p->paramkind) + return false; + + /* + * Collation rule is same as for Consts and non-foreign Vars. + */ + collation = p->paramcollid; + if (collation == InvalidOid || collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; + else + state = FDW_COLLATE_UNSAFE; + } break; + case T_ArrayRef: { + ArrayRef* ar = (ArrayRef*)node; + + /* Assignment should not be in restrictions. */ + if (ar->refassgnexpr != NULL) + return false; + + /* + * Recurse to remaining subexpressions. Since the array + * subscripts must yield (noncollatable) integers, they won't + * affect the inner_cxt state. + */ + if (!gcforeign_expr_walker((Node*)ar->refupperindexpr, glob_cxt, &inner_cxt)) + return false; + if (!gcforeign_expr_walker((Node*)ar->reflowerindexpr, glob_cxt, &inner_cxt)) + return false; + if (!gcforeign_expr_walker((Node*)ar->refexpr, glob_cxt, &inner_cxt)) + return false; + + /* + * Array subscripting should yield same collation as input, + * but for safety use same logic as for function nodes. + */ + collation = ar->refcollid; + if (collation == InvalidOid) + state = FDW_COLLATE_NONE; + else if (inner_cxt.state == FDW_COLLATE_SAFE && collation == inner_cxt.collation) + state = FDW_COLLATE_SAFE; + else if (collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; + else + state = FDW_COLLATE_UNSAFE; + } break; + case T_FuncExpr: { + FuncExpr* fe = (FuncExpr*)node; + + /* + * If function used by the expression is not shippable, it + * can't be sent to remote because it might have incompatible + * semantics on remote side. + */ + if (!is_shippable(fe->funcid, ProcedureRelationId, fpinfo)) + return false; + + /* + * Recurse to input subexpressions. + */ + if (!gcforeign_expr_walker((Node*)fe->args, glob_cxt, &inner_cxt)) + return false; + + /* + * If function's input collation is not derived from a foreign + * Var, it can't be sent to remote. + */ + if (fe->inputcollid == InvalidOid) + /* OK, inputs are all noncollatable */; + else if (inner_cxt.state != FDW_COLLATE_SAFE || fe->inputcollid != inner_cxt.collation) + return false; + + /* + * Detect whether node is introducing a collation not derived + * from a foreign Var. (If so, we just mark it unsafe for now + * rather than immediately returning false, since the parent + * node might not care.) + */ + collation = fe->funccollid; + if (collation == InvalidOid) + state = FDW_COLLATE_NONE; + else if (inner_cxt.state == FDW_COLLATE_SAFE && collation == inner_cxt.collation) + state = FDW_COLLATE_SAFE; + else if (collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; + else + state = FDW_COLLATE_UNSAFE; + } break; + case T_OpExpr: + case T_DistinctExpr: /* struct-equivalent to OpExpr */ + { + OpExpr* oe = (OpExpr*)node; + + /* + * Similarly, only shippable operators can be sent to remote. + * (If the operator is shippable, we assume its underlying + * function is too.) + */ + if (!is_shippable(oe->opno, OperatorRelationId, fpinfo)) + return false; + + /* + * Recurse to input subexpressions. + */ + if (!gcforeign_expr_walker((Node*)oe->args, glob_cxt, &inner_cxt)) + return false; + + /* + * If operator's input collation is not derived from a foreign + * Var, it can't be sent to remote. + */ + if (oe->inputcollid == InvalidOid) + /* OK, inputs are all noncollatable */; + else if (inner_cxt.state != FDW_COLLATE_SAFE || oe->inputcollid != inner_cxt.collation) + return false; + + /* Result-collation handling is same as for functions */ + collation = oe->opcollid; + if (collation == InvalidOid) + state = FDW_COLLATE_NONE; + else if (inner_cxt.state == FDW_COLLATE_SAFE && collation == inner_cxt.collation) + state = FDW_COLLATE_SAFE; + else if (collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; + else + state = FDW_COLLATE_UNSAFE; + } break; + case T_ScalarArrayOpExpr: { + ScalarArrayOpExpr* oe = (ScalarArrayOpExpr*)node; + + /* + * Again, only shippable operators can be sent to remote. + */ + if (!is_shippable(oe->opno, OperatorRelationId, fpinfo)) + return false; + + /* + * Recurse to input subexpressions. + */ + if (!gcforeign_expr_walker((Node*)oe->args, glob_cxt, &inner_cxt)) + return false; + + /* + * If operator's input collation is not derived from a foreign + * Var, it can't be sent to remote. + */ + if (oe->inputcollid == InvalidOid) + /* OK, inputs are all noncollatable */; + else if (inner_cxt.state != FDW_COLLATE_SAFE || oe->inputcollid != inner_cxt.collation) + return false; + + /* Output is always boolean and so noncollatable. */ + collation = InvalidOid; + state = FDW_COLLATE_NONE; + } break; + case T_RelabelType: { + RelabelType* r = (RelabelType*)node; + + /* + * Recurse to input subexpression. + */ + if (!gcforeign_expr_walker((Node*)r->arg, glob_cxt, &inner_cxt)) + return false; + + /* + * RelabelType must not introduce a collation not derived from + * an input foreign Var (same logic as for a real function). + */ + collation = r->resultcollid; + if (collation == InvalidOid) + state = FDW_COLLATE_NONE; + else if (inner_cxt.state == FDW_COLLATE_SAFE && collation == inner_cxt.collation) + state = FDW_COLLATE_SAFE; + else if (collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; + else + state = FDW_COLLATE_UNSAFE; + } break; + case T_BoolExpr: { + BoolExpr* b = (BoolExpr*)node; + + /* + * Recurse to input subexpressions. + */ + if (!gcforeign_expr_walker((Node*)b->args, glob_cxt, &inner_cxt)) + return false; + + /* Output is always boolean and so noncollatable. */ + collation = InvalidOid; + state = FDW_COLLATE_NONE; + } break; + case T_NullTest: { + NullTest* nt = (NullTest*)node; + + /* + * Recurse to input subexpressions. + */ + if (!gcforeign_expr_walker((Node*)nt->arg, glob_cxt, &inner_cxt)) + return false; + + /* Output is always boolean and so noncollatable. */ + collation = InvalidOid; + state = FDW_COLLATE_NONE; + } break; + case T_ArrayExpr: { + ArrayExpr* a = (ArrayExpr*)node; + + /* + * Recurse to input subexpressions. + */ + if (!gcforeign_expr_walker((Node*)a->elements, glob_cxt, &inner_cxt)) + return false; + + /* + * ArrayExpr must not introduce a collation not derived from + * an input foreign Var (same logic as for a function). + */ + collation = a->array_collid; + if (collation == InvalidOid) + state = FDW_COLLATE_NONE; + else if (inner_cxt.state == FDW_COLLATE_SAFE && collation == inner_cxt.collation) + state = FDW_COLLATE_SAFE; + else if (collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; + else + state = FDW_COLLATE_UNSAFE; + } break; + case T_List: { + List* l = (List*)node; + ListCell* lc = NULL; + + /* + * Recurse to component subexpressions. + */ + foreach (lc, l) { + if (!gcforeign_expr_walker((Node*)lfirst(lc), glob_cxt, &inner_cxt)) + return false; + } + + /* + * When processing a list, collation state just bubbles up + * from the list elements. + */ + collation = inner_cxt.collation; + state = inner_cxt.state; + + /* Don't apply exprType() to the list. */ + check_type = false; + } break; + case T_Aggref: { + Aggref* agg = (Aggref*)node; + ListCell* lc = NULL; + + /* As usual, it must be shippable. */ + if (!is_shippable(agg->aggfnoid, ProcedureRelationId, fpinfo)) + return false; + + /* + * Recurse to input args. aggdirectargs, aggorder and + * aggdistinct are all present in args, so no need to check + * their shippability explicitly. + */ + foreach (lc, agg->args) { + Node* n = (Node*)lfirst(lc); + + /* If TargetEntry, extract the expression from it */ + if (IsA(n, TargetEntry)) { + TargetEntry* tle = (TargetEntry*)n; + + n = (Node*)tle->expr; + } + + if (!gcforeign_expr_walker(n, glob_cxt, &inner_cxt)) + return false; + } + + /* + * For aggorder elements, check whether the sort operator, if + * specified, is shippable or not. + */ + if (agg->aggorder) { + ListCell* lc_order = NULL; + + foreach (lc_order, agg->aggorder) { + SortGroupClause* srt = (SortGroupClause*)lfirst(lc_order); + Oid sortcoltype; + TypeCacheEntry* typentry = NULL; + TargetEntry* tle = NULL; + + tle = get_sortgroupref_tle(srt->tleSortGroupRef, agg->args); + sortcoltype = exprType((Node*)tle->expr); + typentry = lookup_type_cache(sortcoltype, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); + /* Check shippability of non-default sort operator. */ + if (srt->sortop != typentry->lt_opr && srt->sortop != typentry->gt_opr && + !is_shippable(srt->sortop, OperatorRelationId, fpinfo)) + return false; + } + } + + /* + * If aggregate's input collation is not derived from a + * foreign Var, it can't be sent to remote. + */ + if (agg->inputcollid == InvalidOid) + /* OK, inputs are all noncollatable */; + else if (inner_cxt.state != FDW_COLLATE_SAFE || agg->inputcollid != inner_cxt.collation) + return false; + + /* + * Detect whether node is introducing a collation not derived + * from a foreign Var. (If so, we just mark it unsafe for now + * rather than immediately returning false, since the parent + * node might not care.) + */ + collation = agg->aggcollid; + if (collation == InvalidOid) + state = FDW_COLLATE_NONE; + else if (inner_cxt.state == FDW_COLLATE_SAFE && collation == inner_cxt.collation) + state = FDW_COLLATE_SAFE; + else if (collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; + else + state = FDW_COLLATE_UNSAFE; + } break; + default: + + /* + * If it's anything else, assume it's unsafe. This list can be + * expanded later, but don't forget to add deparse support below. + */ + return false; + } + + /* + * If result type of given expression is not shippable, it can't be sent + * to remote because it might have incompatible semantics on remote side. + */ + if (check_type && !is_shippable(exprType(node), TypeRelationId, fpinfo)) + return false; + + /* + * Now, merge my collation information into my parent's state. + */ + if (state > outer_cxt->state) { + /* Override previous parent state */ + outer_cxt->collation = collation; + outer_cxt->state = state; + } else if (state == outer_cxt->state) { + /* Merge, or detect error if there's a collation conflict */ + switch (state) { + case FDW_COLLATE_NONE: + /* Nothing + nothing is still nothing */ + break; + case FDW_COLLATE_SAFE: + if (collation != outer_cxt->collation) { + /* + * Non-default collation always beats default. + */ + if (outer_cxt->collation == DEFAULT_COLLATION_OID) { + /* Override previous parent state */ + outer_cxt->collation = collation; + } else if (collation != DEFAULT_COLLATION_OID) { + /* + * Conflict; show state as indeterminate. We don't + * want to "return false" right away, since parent + * node might not care about collation. + */ + outer_cxt->state = FDW_COLLATE_UNSAFE; + } + } + break; + case FDW_COLLATE_UNSAFE: + /* We're still conflicted ... */ + break; + } + } + + /* It looks OK */ + return true; +} + +/* + * Convert type OID + typmod info into a type name we can ship to the remote + * server. Someplace else had better have verified that this type name is + * expected to be known on the remote end. + * + * This is almost just format_type_with_typemod(), except that if left to its + * own devices, that function will make schema-qualification decisions based + * on the local search_path, which is wrong. We must schema-qualify all + * type names that are not in pg_catalog. We assume here that built-in types + * are all in pg_catalog and need not be qualified; otherwise, qualify. + */ +static char* deparse_type_name(Oid type_oid, int32 typemod) +{ + if (is_builtin(type_oid)) + return format_type_with_typemod(type_oid, typemod); + else { + elog(ERROR, "unsupported data type %u", type_oid); + return NULL; /* keep compiler silence */ + } +} + +/* + * Build the targetlist for given relation to be deparsed as SELECT clause. + * + * The output targetlist contains the columns that need to be fetched from the + * foreign server for the given relation. If foreignrel is an upper relation, + * then the output targetlist can also contain expressions to be evaluated on + * foreign server. + */ +List* build_tlist_to_deparse(RelOptInfo* foreignrel) +{ + List* tlist = NIL; + GcFdwRelationInfo* fpinfo = (GcFdwRelationInfo*)foreignrel->fdw_private; + ListCell* lc = NULL; + + /* + * We require columns specified in foreignrel->reltarget->exprs and those + * required for evaluating the local conditions. + */ + tlist = foreignrel->reltargetlist; + + foreach (lc, fpinfo->local_conds) { + RestrictInfo* rinfo = lfirst_node(RestrictInfo, lc); + + tlist = add_to_flat_tlist( + tlist, pull_var_clause((Node*)rinfo->clause, PVC_REJECT_AGGREGATES, PVC_RECURSE_PLACEHOLDERS)); + } + + return tlist; +} + +/* + * Deparse SELECT statement for given relation into buf. + * + * tlist contains the list of desired columns to be fetched from foreign server. + * For a base relation fpinfo->attrs_used is used to construct SELECT clause, + * hence the tlist is ignored for a base relation. + * + * remote_conds is the list of conditions to be deparsed into the WHERE clause + * (or, in the case of upper relations, into the HAVING clause). + * + * If params_list is not NULL, it receives a list of Params and other-relation + * Vars used in the clauses; these values must be transmitted to the remote + * server as parameter values. + * + * If params_list is NULL, we're generating the query for EXPLAIN purposes, + * so Params and other-relation Vars should be replaced by dummy values. + * + * pathkeys is the list of pathkeys to order the result by. + * + * is_subquery is the flag to indicate whether to deparse the specified + * relation as a subquery. + * + * List of columns selected is returned in retrieved_attrs. + */ +extern void gcDeparseSelectStmtForRel(StringInfo buf, PlannerInfo* root, RelOptInfo* rel, List* tlist, + List* remote_conds, List* pathkeys, bool is_subquery, List** retrieved_attrs, List** params_list) +{ + deparse_expr_cxt context; + List* quals = NIL; + + /* + * We handle relations for foreign tables, joins between those and upper + * relations. + */ + Assert(IS_JOIN_REL(rel) || IS_SIMPLE_REL(rel)); + + /* Fill portions of context common to upper, join and base relation */ + context.buf = buf; + context.root = root; + context.foreignrel = rel; + context.scanrel = rel; + context.params_list = params_list; + context.coorquery = false; + context.local_schema = false; + + /* Construct SELECT clause */ + gcDeparseSelectSql(tlist, is_subquery, retrieved_attrs, &context); + + quals = remote_conds; + + /* Construct FROM and WHERE clauses */ + gcDeparseFromExpr(quals, &context); + + /* Add ORDER BY clause if we found any useful pathkeys */ + if (pathkeys != NULL) + gcAppendOrderByClause(pathkeys, &context); + + /* Add any necessary FOR UPDATE/SHARE. */ + gcDeparseLockingClause(&context); +} + +/* + * Construct a simple SELECT statement that retrieves desired columns + * of the specified foreign table, and append it to "buf". The output + * contains just "SELECT ... ". + * + * We also create an integer List of the columns being retrieved, which is + * returned to *retrieved_attrs, unless we deparse the specified relation + * as a subquery. + * + * tlist is the list of desired columns. is_subquery is the flag to + * indicate whether to deparse the specified relation as a subquery. + * Read prologue of gcDeparseSelectStmtForRel() for details. + */ +static void gcDeparseSelectSql(List* tlist, bool is_subquery, List** retrieved_attrs, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + RelOptInfo* foreignrel = context->foreignrel; + PlannerInfo* root = context->root; + GcFdwRelationInfo* fpinfo = (GcFdwRelationInfo*)foreignrel->fdw_private; + + /* + * Construct SELECT list + */ + appendStringInfoString(buf, "SELECT "); + + if (is_subquery) { + /* + * For a relation that is deparsed as a subquery, emit expressions + * specified in the relation's reltarget. Note that since this is for + * the subquery, no need to care about *retrieved_attrs. + */ + gcDeparseSubqueryTargetList(context); + } else if (IS_JOIN_REL(foreignrel)) { + /* + * For a join or upper relation the input tlist gives the list of + * columns required to be fetched from the foreign server. + */ + gcDeparseExplicitTargetList(tlist, retrieved_attrs, context); + } else { + /* + * For a base relation fpinfo->attrs_used gives the list of columns + * required to be fetched from the foreign server. + */ + RangeTblEntry* rte = planner_rt_fetch(foreignrel->relid, root); + + /* + * Core code already has some lock on each rel being planned, so we + * can use NoLock here. + */ + Relation rel = heap_open(rte->relid, NoLock); + + gcdeparseTargetList(buf, root, foreignrel->relid, rel, false, fpinfo->attrs_used, false, retrieved_attrs); + heap_close(rel, NoLock); + } +} + +/* + * Construct a FROM clause and, if needed, a WHERE clause, and append those to + * "buf". + * + * quals is the list of clauses to be included in the WHERE clause. + * (These may or may not include RestrictInfo decoration.) + */ +static void gcDeparseFromExpr(List* quals, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + RelOptInfo* scanrel = context->scanrel; + + /* For upper relations, scanrel must be either a joinrel or a baserel */ + Assert(IS_JOIN_REL(scanrel) || IS_SIMPLE_REL(scanrel)); + + /* Construct FROM clause */ + appendStringInfoString(buf, " FROM "); + gcDeparseFromExprForRel(buf, + context->root, + scanrel, + (bms_num_members(scanrel->relids) > 1), + context->params_list, + context->local_schema); + + /* Construct WHERE clause */ + if (quals != NIL) { + appendStringInfo(buf, " WHERE "); + appendConditions(quals, context); + } +} + +/* + * Emit a target list that retrieves the columns specified in attrs_used. + * This is used for both SELECT and RETURNING targetlists; the is_returning + * parameter is true only for a RETURNING targetlist. + * + * The tlist text is appended to buf, and we also create an integer List + * of the columns being retrieved, which is returned to *retrieved_attrs. + * + * If qualify_col is true, add relation alias before the column name. + */ +static void gcdeparseTargetList(StringInfo buf, PlannerInfo* root, Index rtindex, Relation rel, bool is_returning, + Bitmapset* attrs_used, bool qualify_col, List** retrieved_attrs) +{ + TupleDesc tupdesc = RelationGetDescr(rel); + bool have_wholerow = false; + bool first = true; + int i; + + *retrieved_attrs = NIL; + + /* If there's a whole-row reference, we'll need all the columns. */ + have_wholerow = bms_is_member(0 - FirstLowInvalidHeapAttributeNumber, attrs_used); + + for (i = 1; i <= tupdesc->natts; i++) { + Form_pg_attribute attr = tupdesc->attrs[i - 1]; + + /* Ignore dropped attributes. */ + if (attr->attisdropped) + continue; + + if (have_wholerow || bms_is_member(i - FirstLowInvalidHeapAttributeNumber, attrs_used)) { + if (!first) + appendStringInfoString(buf, ", "); + else if (is_returning) + appendStringInfoString(buf, " RETURNING "); + first = false; + + gcDeparseColumnRef(buf, rtindex, i, root, qualify_col); + + *retrieved_attrs = lappend_int(*retrieved_attrs, i); + } + } + + /* + * Add ctid and oid if needed. We currently don't support retrieving any + * other system columns. + */ + if (bms_is_member(SelfItemPointerAttributeNumber - FirstLowInvalidHeapAttributeNumber, attrs_used)) { + if (!first) + appendStringInfoString(buf, ", "); + else if (is_returning) + appendStringInfoString(buf, " RETURNING "); + first = false; + + if (qualify_col) + ADD_UNSIGNED_REL_QUALIFIER(buf, rtindex); + appendStringInfoString(buf, "ctid"); + + *retrieved_attrs = lappend_int(*retrieved_attrs, SelfItemPointerAttributeNumber); + } + if (bms_is_member(ObjectIdAttributeNumber - FirstLowInvalidHeapAttributeNumber, attrs_used)) { + if (!first) + appendStringInfoString(buf, ", "); + else if (is_returning) + appendStringInfoString(buf, " RETURNING "); + first = false; + + if (qualify_col) + ADD_UNSIGNED_REL_QUALIFIER(buf, rtindex); + appendStringInfoString(buf, "oid"); + + *retrieved_attrs = lappend_int(*retrieved_attrs, ObjectIdAttributeNumber); + } + + /* Don't generate bad syntax if no undropped columns */ + if (first && !is_returning) + appendStringInfoString(buf, "NULL"); +} + +/* + * Deparse the appropriate locking clause (FOR UPDATE or FOR SHARE) for a + * given relation (context->scanrel). + */ +static void gcDeparseLockingClause(deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + PlannerInfo* root = context->root; + RelOptInfo* rel = context->scanrel; + GcFdwRelationInfo* fpinfo = (GcFdwRelationInfo*)rel->fdw_private; + int relid = -1; + + while ((relid = bms_next_member(rel->relids, relid)) >= 0) { + /* + * Ignore relation if it appears in a lower subquery. Locking clause + * for such a relation is included in the subquery if necessary. + */ + if (bms_is_member(relid, fpinfo->lower_subquery_rels)) + continue; + + /* + * Add FOR UPDATE/SHARE if appropriate. We apply locking during the + * initial row fetch, rather than later on as is done for local + * tables. The extra roundtrips involved in trying to duplicate the + * local semantics exactly don't seem worthwhile (see also comments + * for RowMarkType). + * + * Note: because we actually run the query as a cursor, this assumes + * that DECLARE CURSOR ... FOR UPDATE is supported, which it isn't + * before 8.3. + */ + if (relid == root->parse->resultRelation && + (root->parse->commandType == CMD_UPDATE || root->parse->commandType == CMD_DELETE)) { + /* Relation is UPDATE/DELETE target, so use FOR UPDATE */ + appendStringInfoString(buf, " FOR UPDATE"); + + /* Add the relation alias if we are here for a join relation */ + if (IS_JOIN_REL(rel)) + appendStringInfo(buf, " OF %s%d", REL_ALIAS_PREFIX, relid); + } else { + PlanRowMark* rc = get_plan_rowmark(root->rowMarks, relid); + + if (rc != NULL) { + /* Add the relation alias if we are here for a join relation */ + if (bms_num_members(rel->relids) > 1 /*&& + rc->strength != LCS_NONE*/) + appendStringInfo(buf, " OF %s%d", REL_ALIAS_PREFIX, relid); + } + } + } +} + +/* + * Deparse conditions from the provided list and append them to buf. + * + * The conditions in the list are assumed to be ANDed. This function is used to + * deparse WHERE clauses, JOIN .. ON clauses and HAVING clauses. + * + * Depending on the caller, the list elements might be either RestrictInfos + * or bare clauses. + */ +static void appendConditions(List* exprs, deparse_expr_cxt* context) +{ + int nestlevel; + ListCell* lc = NULL; + bool is_first = true; + StringInfo buf = context->buf; + + /* Make sure any constants in the exprs are printed portably */ + nestlevel = set_transmission_modes(); + + foreach (lc, exprs) { + Expr* expr = (Expr*)lfirst(lc); + + /* Extract clause from RestrictInfo, if required */ + if (IsA(expr, RestrictInfo)) + expr = ((RestrictInfo*)expr)->clause; + + /* Connect expressions with "AND" and parenthesize each condition. */ + if (!is_first) + appendStringInfoString(buf, " AND "); + + appendStringInfoChar(buf, '('); + gcDeparseExpr(expr, context); + appendStringInfoChar(buf, ')'); + + is_first = false; + } + + reset_transmission_modes(nestlevel); +} + +/* + * Deparse given targetlist and append it to context->buf. + * + * tlist is list of TargetEntry's which in turn contain Var nodes. + * + * retrieved_attrs is the list of continuously increasing integers starting + * from 1. It has same number of entries as tlist. + */ +static void gcDeparseExplicitTargetList(List* tlist, List** retrieved_attrs, deparse_expr_cxt* context) +{ + ListCell* lc = NULL; + StringInfo buf = context->buf; + int i = 0; + + *retrieved_attrs = NIL; + + foreach (lc, tlist) { + TargetEntry* tle = lfirst_node(TargetEntry, lc); + + if (i > 0) + appendStringInfoString(buf, ", "); + gcDeparseExpr((Expr*)tle->expr, context); + + *retrieved_attrs = lappend_int(*retrieved_attrs, i + 1); + i++; + } + + if (i == 0) + appendStringInfoString(buf, "NULL"); +} + +/* + * Emit expressions specified in the given relation's reltarget. + * + * This is used for deparsing the given relation as a subquery. + */ +static void gcDeparseSubqueryTargetList(deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + RelOptInfo* foreignrel = context->foreignrel; + bool first = true; + ListCell* lc = NULL; + + /* Should only be called in these cases. */ + Assert(IS_SIMPLE_REL(foreignrel) || IS_JOIN_REL(foreignrel)); + + foreach (lc, foreignrel->reltargetlist) { + Node* node = (Node*)lfirst(lc); + + if (!first) + appendStringInfoString(buf, ", "); + first = false; + + gcDeparseExpr((Expr*)node, context); + } + + /* Don't generate bad syntax if no expressions */ + if (first) + appendStringInfoString(buf, "NULL"); +} + +/* + * Construct FROM clause for given relation + * + * The function constructs ... JOIN ... ON ... for join relation. For a base + * relation it just returns schema-qualified tablename, with the appropriate + * alias if so requested. + */ +static void gcDeparseFromExprForRel( + StringInfo buf, PlannerInfo* root, RelOptInfo* foreignrel, bool use_alias, List** params_list, bool local_schema) +{ + RangeTblEntry* rte = planner_rt_fetch(foreignrel->relid, root); + + /* + * Core code already has some lock on each rel being planned, so we + * can use NoLock here. + */ + Relation rel = heap_open(rte->relid, NoLock); + + gcDeparseRelation(buf, rel, local_schema); + + /* + * Add a unique alias to avoid any conflict in relation names due to + * pulled up subqueries in the query being built for a pushed down + * join. + */ + if (use_alias) + appendStringInfo(buf, " %s%u", REL_ALIAS_PREFIX, foreignrel->relid); + + heap_close(rel, NoLock); +} + +/* + * Construct name to use for given column, and emit it into buf. + * If it has a column_name FDW option, use that instead of attribute name. + * + * If qualify_col is true, qualify column name with the alias of relation. + */ +static void gcDeparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo* root, bool qualify_col) +{ + RangeTblEntry* rte = NULL; + + /* We support fetching the remote side's CTID and OID. */ + if (varattno == SelfItemPointerAttributeNumber) { + if (qualify_col) + ADD_REL_QUALIFIER(buf, varno); + appendStringInfoString(buf, "ctid"); + } else if (varattno == ObjectIdAttributeNumber) { + if (qualify_col) + ADD_REL_QUALIFIER(buf, varno); + appendStringInfoString(buf, "oid"); + } else if (varattno < 0) { + /* + * All other system attributes are fetched as 0, except for table OID, + * which is fetched as the local table OID. However, we must be + * careful; the table could be beneath an outer join, in which case it + * must go to NULL whenever the rest of the row does. + */ + Oid fetchval = 0; + + if (varattno == TableOidAttributeNumber) { + rte = planner_rt_fetch(varno, root); + fetchval = rte->relid; + } + + if (qualify_col) { + appendStringInfoString(buf, "CASE WHEN ("); + ADD_REL_QUALIFIER(buf, varno); + appendStringInfo(buf, "*)::text IS NOT NULL THEN %u END", fetchval); + } else + appendStringInfo(buf, "%u", fetchval); + } else if (varattno == 0) { + /* Whole row reference */ + Relation rel = NULL; + Bitmapset* attrs_used = NULL; + + /* Required only to be passed down to gcdeparseTargetList(). */ + List* retrieved_attrs = NIL; + + /* Get RangeTblEntry from array in PlannerInfo. */ + rte = planner_rt_fetch(varno, root); + + /* + * The lock on the relation will be held by upper callers, so it's + * fine to open it with no lock here. + */ + rel = heap_open(rte->relid, NoLock); + + /* + * The local name of the foreign table can not be recognized by the + * foreign server and the table it references on foreign server might + * have different column ordering or different columns than those + * declared locally. Hence we have to deparse whole-row reference as + * ROW(columns referenced locally). Construct this by deparsing a + * "whole row" attribute. + */ + attrs_used = bms_add_member(NULL, 0 - FirstLowInvalidHeapAttributeNumber); + + /* + * In case the whole-row reference is under an outer join then it has + * to go NULL whenever the rest of the row goes NULL. Deparsing a join + * query would always involve multiple relations, thus qualify_col + * would be true. + */ + if (qualify_col) { + appendStringInfoString(buf, "CASE WHEN ("); + ADD_REL_QUALIFIER(buf, varno); + appendStringInfo(buf, "*)::text IS NOT NULL THEN "); + } + + appendStringInfoString(buf, "ROW("); + gcdeparseTargetList(buf, root, varno, rel, false, attrs_used, qualify_col, &retrieved_attrs); + appendStringInfoString(buf, ")"); + + /* Complete the CASE WHEN statement started above. */ + if (qualify_col) + appendStringInfo(buf, " END"); + + heap_close(rel, NoLock); + bms_free(attrs_used); + } else { + char* colname = NULL; + List* options = NIL; + ListCell* lc = NULL; + + /* varno must not be any of OUTER_VAR, INNER_VAR and INDEX_VAR. */ + Assert(!IS_SPECIAL_VARNO(varno)); + + /* Get RangeTblEntry from array in PlannerInfo. */ + rte = planner_rt_fetch(varno, root); + + /* + * If it's a column of a foreign table, and it has the column_name FDW + * option, use that value. + */ + options = GetForeignColumnOptions(rte->relid, varattno); + foreach (lc, options) { + DefElem* def = (DefElem*)lfirst(lc); + + if (strcmp(def->defname, "column_name") == 0) { + colname = defGetString(def); + break; + } + } + + /* + * If it's a column of a regular table or it doesn't have column_name + * FDW option, use attribute name. + */ + if (colname == NULL) + colname = get_relid_attribute_name(rte->relid, varattno); + + if (qualify_col) + ADD_REL_QUALIFIER(buf, varno); + + appendStringInfoString(buf, quote_identifier(colname)); + } +} + +/* + * Append remote name of specified foreign table to buf. + * Use value of table_name FDW option (if any) instead of relation's name. + * Similarly, schema_name FDW option overrides schema name. + */ +static void gcDeparseRelation(StringInfo buf, Relation rel, bool local_schema) +{ + ForeignTable* table = NULL; + const char* nspname = NULL; + const char* relname = NULL; + ListCell* lc = NULL; + + /* obtain additional catalog information. */ + table = GetForeignTable(RelationGetRelid(rel)); + + /* + * Use value of FDW options if any, instead of the name of object itself. + */ + foreach (lc, table->options) { + DefElem* def = (DefElem*)lfirst(lc); + + if (strcmp(def->defname, "schema_name") == 0) + nspname = defGetString(def); + else if (strcmp(def->defname, "table_name") == 0) + relname = defGetString(def); + } + + /* + * Note: we could skip printing the schema name if it's pg_catalog, but + * that doesn't seem worth the trouble. + */ + if (nspname == NULL) + nspname = get_namespace_name(RelationGetNamespace(rel)); + if (relname == NULL) + relname = RelationGetRelationName(rel); + + if (true == local_schema) { + relname = RelationGetRelationName(rel); + nspname = get_namespace_name(RelationGetNamespace(rel)); + } + + appendStringInfo(buf, "%s.%s", quote_identifier(nspname), quote_identifier(relname)); +} + +/* + * Append a SQL string literal representing "val" to buf. + */ +void gcDeparseStringLiteral(StringInfo buf, const char* val) +{ + const char* valptr = NULL; + + /* + * Rather than making assumptions about the remote server's value of + * u_sess->parser_cxt.standard_conforming_strings, always use E'foo' syntax if there are any + * backslashes. This will fail on remote servers before 8.1, but those + * are long out of support. + */ + if (strchr(val, '\\') != NULL) + appendStringInfoChar(buf, ESCAPE_STRING_SYNTAX); + appendStringInfoChar(buf, '\''); + for (valptr = val; *valptr; valptr++) { + char ch = *valptr; + + if (SQL_STR_DOUBLE(ch, true)) + appendStringInfoChar(buf, ch); + appendStringInfoChar(buf, ch); + } + appendStringInfoChar(buf, '\''); +} + +/* + * Deparse given expression into context->buf. + * + * This function must support all the same node types that gcforeign_expr_walker + * accepts. + * + * Note: unlike ruleutils.c, we just use a simple hard-wired parenthesization + * scheme: anything more complex than a Var, Const, function call or cast + * should be self-parenthesized. + */ +static void gcDeparseExpr(Expr* node, deparse_expr_cxt* context) +{ + if (node == NULL) + return; + + switch (nodeTag(node)) { + case T_Var: + if (context->coorquery) + simpleDeparseVar((Var*)node, context); + else + gcDeparseVar((Var*)node, context); + break; + case T_Const: + gcDeparseConst((Const*)node, context, 0); + break; + case T_Param: + gcDeparseParam((Param*)node, context); + break; + case T_ArrayRef: + gcDeparseArrayRef((ArrayRef*)node, context); + break; + case T_FuncExpr: + gcDeparseFuncExpr((FuncExpr*)node, context); + break; + case T_OpExpr: + gcDeparseOpExpr((OpExpr*)node, context); + break; + case T_DistinctExpr: + gcDeparseDistinctExpr((DistinctExpr*)node, context); + break; + case T_ScalarArrayOpExpr: + gcDeparseScalarArrayOpExpr((ScalarArrayOpExpr*)node, context); + break; + case T_RelabelType: + gcDeparseRelabelType((RelabelType*)node, context); + break; + case T_BoolExpr: + gcDeparseBoolExpr((BoolExpr*)node, context); + break; + case T_NullTest: + gcDeparseNullTest((NullTest*)node, context); + break; + case T_ArrayExpr: + gcDeparseArrayExpr((ArrayExpr*)node, context); + break; + case T_Aggref: + if (context->coorquery) + simpleDeparseAggref((Aggref*)node, context); + break; + default: + elog(ERROR, "unsupported expression type for deparse: %d", (int)nodeTag(node)); + break; + } +} + +/* + * Deparse given Var node into context->buf. + * + * If the Var belongs to the foreign relation, just print its remote name. + * Otherwise, it's effectively a Param (and will in fact be a Param at + * run time). Handle it the same way we handle plain Params --- see + * gcDeparseParam for comments. + */ +static void gcDeparseVar(Var* node, deparse_expr_cxt* context) +{ + Relids relids = context->scanrel->relids; + int relno; + int colno; + + /* Qualify columns when multiple relations are involved. */ + bool qualify_col = (bms_num_members(relids) > 1); + + /* + * If the Var belongs to the foreign relation that is deparsed as a + * subquery, use the relation and column alias to the Var provided by the + * subquery, instead of the remote name. + */ + if (is_subquery_var(node, context->scanrel, &relno, &colno)) { + appendStringInfo(context->buf, "%s%d.%s%d", SUBQUERY_REL_ALIAS_PREFIX, relno, SUBQUERY_COL_ALIAS_PREFIX, colno); + return; + } + + if (bms_is_member(node->varno, relids) && node->varlevelsup == 0) + gcDeparseColumnRef(context->buf, node->varno, node->varattno, context->root, qualify_col); + else { + /* Treat like a Param */ + if (context->params_list != NULL) { + int pindex = 0; + ListCell* lc = NULL; + + /* find its index in params_list */ + foreach (lc, *context->params_list) { + pindex++; + if (equal(node, (Node*)lfirst(lc))) + break; + } + if (lc == NULL) { + /* not in list, so add it */ + pindex++; + *context->params_list = lappend(*context->params_list, node); + } + + printRemoteParam(pindex, node->vartype, node->vartypmod, context); + } else { + printRemotePlaceholder(node->vartype, node->vartypmod, context); + } + } +} + +/* + * Deparse given constant value into context->buf. + * + * This function has to be kept in sync with ruleutils.c's get_const_expr. + * As for that function, showtype can be -1 to never show "::typename" decoration, + * or +1 to always show it, or 0 to show it only if the constant wouldn't be assumed + * to be the right type by default. + */ +static void gcDeparseConst(Const* node, deparse_expr_cxt* context, int showtype) +{ + StringInfo buf = context->buf; + Oid typoutput; + bool typIsVarlena = false; + char* extval = NULL; + bool isfloat = false; + bool needlabel = false; + + if (node->constisnull) { + appendStringInfoString(buf, "NULL"); + if (showtype >= 0) + appendStringInfo(buf, "::%s", deparse_type_name(node->consttype, node->consttypmod)); + return; + } + + getTypeOutputInfo(node->consttype, &typoutput, &typIsVarlena); + extval = OidOutputFunctionCall(typoutput, node->constvalue); + + switch (node->consttype) { + case INT2OID: + case INT4OID: + case INT8OID: + case OIDOID: + case FLOAT4OID: + case FLOAT8OID: + case NUMERICOID: { + /* + * No need to quote unless it's a special value such as 'NaN'. + * See comments in get_const_expr(). + */ + if (strspn(extval, "0123456789+-eE.") == strlen(extval)) { + if (extval[0] == '+' || extval[0] == '-') + appendStringInfo(buf, "(%s)", extval); + else + appendStringInfoString(buf, extval); + if (strcspn(extval, "eE.") != strlen(extval)) + isfloat = true; /* it looks like a float */ + } else + appendStringInfo(buf, "'%s'", extval); + } break; + case BITOID: + case VARBITOID: + appendStringInfo(buf, "B'%s'", extval); + break; + case BOOLOID: + if (strcmp(extval, "t") == 0) + appendStringInfoString(buf, "true"); + else + appendStringInfoString(buf, "false"); + break; + default: + gcDeparseStringLiteral(buf, extval); + break; + } + + pfree(extval); + + if (showtype < 0) + return; + + /* + * For showtype == 0, append ::typename unless the constant will be + * implicitly typed as the right type when it is read in. + * + * XXX this code has to be kept in sync with the behavior of the parser, + * especially make_const. + */ + switch (node->consttype) { + case BOOLOID: + case INT4OID: + case UNKNOWNOID: + needlabel = false; + break; + case NUMERICOID: + needlabel = !isfloat || (node->consttypmod >= 0); + break; + default: + needlabel = true; + break; + } + if (needlabel || showtype > 0) + appendStringInfo(buf, "::%s", deparse_type_name(node->consttype, node->consttypmod)); +} + +/* + * Deparse given Param node. + * + * If we're generating the query "for real", add the Param to + * context->params_list if it's not already present, and then use its index + * in that list as the remote parameter number. During EXPLAIN, there's + * no need to identify a parameter number. + */ +static void gcDeparseParam(Param* node, deparse_expr_cxt* context) +{ + if (context->params_list != NULL) { + int pindex = 0; + ListCell* lc = NULL; + + /* find its index in params_list */ + foreach (lc, *context->params_list) { + pindex++; + if (equal(node, (Node*)lfirst(lc))) + break; + } + if (lc == NULL) { + /* not in list, so add it */ + pindex++; + *context->params_list = lappend(*context->params_list, node); + } + + printRemoteParam(pindex, node->paramtype, node->paramtypmod, context); + } else { + printRemotePlaceholder(node->paramtype, node->paramtypmod, context); + } +} + +/* + * Deparse an array subscript expression. + */ +static void gcDeparseArrayRef(ArrayRef* node, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + ListCell* lowlist_item = NULL; + ListCell* uplist_item = NULL; + + /* Always parenthesize the expression. */ + appendStringInfoChar(buf, '('); + + /* + * Deparse referenced array expression first. If that expression includes + * a cast, we have to parenthesize to prevent the array subscript from + * being taken as typename decoration. We can avoid that in the typical + * case of subscripting a Var, but otherwise do it. + */ + if (IsA(node->refexpr, Var)) + gcDeparseExpr(node->refexpr, context); + else { + appendStringInfoChar(buf, '('); + gcDeparseExpr(node->refexpr, context); + appendStringInfoChar(buf, ')'); + } + + /* Deparse subscript expressions. */ + lowlist_item = list_head(node->reflowerindexpr); /* could be NULL */ + foreach (uplist_item, node->refupperindexpr) { + appendStringInfoChar(buf, '['); + if (lowlist_item != NULL) { + gcDeparseExpr((Expr*)lfirst(lowlist_item), context); + appendStringInfoChar(buf, ':'); + lowlist_item = lnext(lowlist_item); + } + gcDeparseExpr((Expr*)lfirst(uplist_item), context); + appendStringInfoChar(buf, ']'); + } + + appendStringInfoChar(buf, ')'); +} + +/* + * Deparse a function call. + */ +static void gcDeparseFuncExpr(FuncExpr* node, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + bool use_variadic = false; + bool first = true; + ListCell* arg = NULL; + + /* + * If the function call came from an implicit coercion, then just show the + * first argument. + */ + if (node->funcformat == COERCE_IMPLICIT_CAST && context->coorquery == false) { + gcDeparseExpr((Expr*)linitial(node->args), context); + return; + } + + /* + * If the function call came from a cast, then show the first argument + * plus an explicit cast operation. + */ + if (node->funcformat == COERCE_EXPLICIT_CAST || + (node->funcformat == COERCE_IMPLICIT_CAST && context->coorquery == true)) { + Oid rettype = node->funcresulttype; + int32 coercedTypmod = -1; + + /* Get the typmod if this is a length-coercion function */ + (void)exprIsLengthCoercion((Node*)node, &coercedTypmod); + + gcDeparseExpr((Expr*)linitial(node->args), context); + appendStringInfo(buf, "::%s", deparse_type_name(rettype, coercedTypmod)); + return; + } + + /* Check if need to print VARIADIC (cf. ruleutils.c) */ + use_variadic = node->funcvariadic; + + /* + * Normal function: display as proname(args). + */ + appendFunctionName(node->funcid, context); + appendStringInfoChar(buf, '('); + + /* ... and all the arguments */ + foreach (arg, node->args) { + if (!first) + appendStringInfoString(buf, ", "); + if (use_variadic && lnext(arg) == NULL) + appendStringInfoString(buf, "VARIADIC "); + gcDeparseExpr((Expr*)lfirst(arg), context); + first = false; + } + appendStringInfoChar(buf, ')'); +} + +/* + * Deparse given operator expression. To avoid problems around + * priority of operations, we always parenthesize the arguments. + */ +static void gcDeparseOpExpr(OpExpr* node, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + HeapTuple tuple = NULL; + Form_pg_operator form = NULL; + char oprkind; + ListCell* arg = NULL; + + /* Retrieve information about the operator from system catalog. */ + tuple = SearchSysCache1(OPEROID, ObjectIdGetDatum(node->opno)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for operator %u", node->opno); + form = (Form_pg_operator)GETSTRUCT(tuple); + oprkind = form->oprkind; + + /* Sanity check. */ + Assert((oprkind == 'r' && list_length(node->args) == 1) || (oprkind == 'l' && list_length(node->args) == 1) || + (oprkind == 'b' && list_length(node->args) == 2)); + + /* Always parenthesize the expression. */ + appendStringInfoChar(buf, '('); + + /* Deparse left operand. */ + if (oprkind == 'r' || oprkind == 'b') { + arg = list_head(node->args); + gcDeparseExpr((Expr*)lfirst(arg), context); + appendStringInfoChar(buf, ' '); + } + + /* Deparse operator name. */ + gcDeparseOperatorName(buf, form); + + /* Deparse right operand. */ + if (oprkind == 'l' || oprkind == 'b') { + arg = list_tail(node->args); + appendStringInfoChar(buf, ' '); + gcDeparseExpr((Expr*)lfirst(arg), context); + } + + appendStringInfoChar(buf, ')'); + + ReleaseSysCache(tuple); +} + +/* + * Print the name of an operator. + */ +static void gcDeparseOperatorName(StringInfo buf, Form_pg_operator opform) +{ + char* opname = NULL; + + /* opname is not a SQL identifier, so we should not quote it. */ + opname = NameStr(opform->oprname); + + /* Print schema name only if it's not pg_catalog */ + if (opform->oprnamespace != PG_CATALOG_NAMESPACE) { + const char* opnspname = NULL; + + opnspname = get_namespace_name(opform->oprnamespace); + /* Print fully qualified operator name. */ + if (opnspname != NULL) { + appendStringInfo(buf, "OPERATOR(%s.%s)", quote_identifier(opnspname), opname); + } else { + appendStringInfo(buf, "OPERATOR(\"Unknown\".%s)", opname); + } + } else { + /* Just print operator name. */ + appendStringInfoString(buf, opname); + } +} + +/* + * Deparse IS DISTINCT FROM. + */ +static void gcDeparseDistinctExpr(DistinctExpr* node, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + + Assert(list_length(node->args) == 2); + + appendStringInfoChar(buf, '('); + gcDeparseExpr((Expr*)linitial(node->args), context); + appendStringInfoString(buf, " IS DISTINCT FROM "); + gcDeparseExpr((Expr*)lsecond(node->args), context); + appendStringInfoChar(buf, ')'); +} + +/* + * Deparse given ScalarArrayOpExpr expression. To avoid problems + * around priority of operations, we always parenthesize the arguments. + */ +static void gcDeparseScalarArrayOpExpr(ScalarArrayOpExpr* node, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + HeapTuple tuple = NULL; + Form_pg_operator form = NULL; + Expr* arg1 = NULL; + Expr* arg2 = NULL; + + /* Retrieve information about the operator from system catalog. */ + tuple = SearchSysCache1(OPEROID, ObjectIdGetDatum(node->opno)); + if (!HeapTupleIsValid(tuple)) { + elog(ERROR, "cache lookup failed for operator %u", node->opno); + } + form = (Form_pg_operator)GETSTRUCT(tuple); + + /* Sanity check. */ + Assert(list_length(node->args) == 2); + + /* Always parenthesize the expression. */ + appendStringInfoChar(buf, '('); + + /* Deparse left operand. */ + arg1 = (Expr*)linitial(node->args); + gcDeparseExpr(arg1, context); + appendStringInfoChar(buf, ' '); + + /* Deparse operator name plus decoration. */ + gcDeparseOperatorName(buf, form); + appendStringInfo(buf, " %s (", node->useOr ? "ANY" : "ALL"); + + /* Deparse right operand. */ + arg2 = (Expr*)lsecond(node->args); + gcDeparseExpr(arg2, context); + + appendStringInfoChar(buf, ')'); + + /* Always parenthesize the expression. */ + appendStringInfoChar(buf, ')'); + + ReleaseSysCache(tuple); +} + +/* + * Deparse a RelabelType (binary-compatible cast) node. + */ +static void gcDeparseRelabelType(RelabelType* node, deparse_expr_cxt* context) +{ + gcDeparseExpr(node->arg, context); + if (node->relabelformat != COERCE_IMPLICIT_CAST) + appendStringInfo(context->buf, "::%s", deparse_type_name(node->resulttype, node->resulttypmod)); +} + +/* + * Deparse a BoolExpr node. + */ +static void gcDeparseBoolExpr(BoolExpr* node, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + const char* op = NULL; /* keep compiler quiet */ + bool first = true; + ListCell* lc = NULL; + + switch (node->boolop) { + case AND_EXPR: + op = "AND"; + break; + case OR_EXPR: + op = "OR"; + break; + case NOT_EXPR: + appendStringInfoString(buf, "(NOT "); + gcDeparseExpr((Expr*)linitial(node->args), context); + appendStringInfoChar(buf, ')'); + return; + } + + appendStringInfoChar(buf, '('); + foreach (lc, node->args) { + if (!first) + appendStringInfo(buf, " %s ", op); + gcDeparseExpr((Expr*)lfirst(lc), context); + first = false; + } + appendStringInfoChar(buf, ')'); +} + +/* + * Deparse IS [NOT] NULL expression. + */ +static void gcDeparseNullTest(NullTest* node, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + + appendStringInfoChar(buf, '('); + gcDeparseExpr(node->arg, context); + + /* + * For scalar inputs, we prefer to print as IS [NOT] NULL, which is + * shorter and traditional. If it's a rowtype input but we're applying a + * scalar test, must print IS [NOT] DISTINCT FROM NULL to be semantically + * correct. + */ + if (node->argisrow || !type_is_rowtype(exprType((Node*)node->arg))) { + if (node->nulltesttype == IS_NULL) + appendStringInfoString(buf, " IS NULL)"); + else + appendStringInfoString(buf, " IS NOT NULL)"); + } else { + if (node->nulltesttype == IS_NULL) + appendStringInfoString(buf, " IS NOT DISTINCT FROM NULL)"); + else + appendStringInfoString(buf, " IS DISTINCT FROM NULL)"); + } +} + +/* + * Deparse ARRAY[...] construct. + */ +static void gcDeparseArrayExpr(ArrayExpr* node, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + bool first = true; + ListCell* lc = NULL; + + appendStringInfoString(buf, "ARRAY["); + foreach (lc, node->elements) { + if (!first) + appendStringInfoString(buf, ", "); + gcDeparseExpr((Expr*)lfirst(lc), context); + first = false; + } + appendStringInfoChar(buf, ']'); + + /* If the array is empty, we need an explicit cast to the array type. */ + if (node->elements == NIL) + appendStringInfo(buf, "::%s", deparse_type_name(node->array_typeid, -1)); +} + +/* + * Append ORDER BY within aggregate function. + */ +static void appendAggOrderBy(List* orderList, List* targetList, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + ListCell* lc = NULL; + bool first = true; + + foreach (lc, orderList) { + SortGroupClause* srt = (SortGroupClause*)lfirst(lc); + Node* sortexpr = NULL; + Oid sortcoltype; + TypeCacheEntry* typentry = NULL; + + if (!first) + appendStringInfoString(buf, ", "); + first = false; + + sortexpr = deparseSortGroupClause(srt->tleSortGroupRef, targetList, false, context); + sortcoltype = exprType(sortexpr); + /* See whether operator is default < or > for datatype */ + typentry = lookup_type_cache(sortcoltype, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); + if (srt->sortop == typentry->lt_opr) + appendStringInfoString(buf, " ASC"); + else if (srt->sortop == typentry->gt_opr) + appendStringInfoString(buf, " DESC"); + else { + HeapTuple opertup = NULL; + Form_pg_operator operform = NULL; + + appendStringInfoString(buf, " USING "); + + /* Append operator name. */ + opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(srt->sortop)); + if (!HeapTupleIsValid(opertup)) { + elog(ERROR, "cache lookup failed for operator %u", srt->sortop); + } + operform = (Form_pg_operator)GETSTRUCT(opertup); + gcDeparseOperatorName(buf, operform); + ReleaseSysCache(opertup); + } + + if (srt->nulls_first) + appendStringInfoString(buf, " NULLS FIRST"); + else + appendStringInfoString(buf, " NULLS LAST"); + } +} + +/* + * Print the representation of a parameter to be sent to the remote side. + * + * Note: we always label the Param's type explicitly rather than relying on + * transmitting a numeric type OID in PQexecParams(). This allows us to + * avoid assuming that types have the same OIDs on the remote side as they + * do locally --- they need only have the same names. + */ +static void printRemoteParam(int paramindex, Oid paramtype, int32 paramtypmod, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + char* ptypename = deparse_type_name(paramtype, paramtypmod); + + appendStringInfo(buf, "$%d::%s", paramindex, ptypename); +} + +/* + * Print the representation of a placeholder for a parameter that will be + * sent to the remote side at execution time. + * + * This is used when we're just trying to EXPLAIN the remote query. + * We don't have the actual value of the runtime parameter yet, and we don't + * want the remote planner to generate a plan that depends on such a value + * anyway. Thus, we can't do something simple like "$1::paramtype". + * Instead, we emit "((SELECT null::paramtype)::paramtype)". + * In all extant versions of openGauss, the planner will see that as an unknown + * constant value, which is what we want. This might need adjustment if we + * ever make the planner flatten scalar subqueries. Note: the reason for the + * apparently useless outer cast is to ensure that the representation as a + * whole will be parsed as an a_expr and not a select_with_parens; the latter + * would do the wrong thing in the context "x = ANY(...)". + */ +static void printRemotePlaceholder(Oid paramtype, int32 paramtypmod, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + char* ptypename = deparse_type_name(paramtype, paramtypmod); + + appendStringInfo(buf, "((SELECT null::%s)::%s)", ptypename, ptypename); +} + +/* + * Deparse ORDER BY clause according to the given pathkeys for given base + * relation. From given pathkeys expressions belonging entirely to the given + * base relation are obtained and deparsed. + */ +static void gcAppendOrderByClause(List* pathkeys, deparse_expr_cxt* context) +{ + ListCell* lcell = NULL; + int nestlevel; + char* delim = " "; + RelOptInfo* baserel = context->scanrel; + StringInfo buf = context->buf; + + /* Make sure any constants in the exprs are printed portably */ + nestlevel = set_transmission_modes(); + + appendStringInfo(buf, " ORDER BY"); + foreach (lcell, pathkeys) { + PathKey* pathkey = (PathKey*)lfirst(lcell); + Expr* em_expr = NULL; + + em_expr = find_em_expr_for_rel(pathkey->pk_eclass, baserel); + Assert(em_expr != NULL); + + appendStringInfoString(buf, delim); + gcDeparseExpr(em_expr, context); + if (pathkey->pk_strategy == BTLessStrategyNumber) + appendStringInfoString(buf, " ASC"); + else + appendStringInfoString(buf, " DESC"); + + if (pathkey->pk_nulls_first) + appendStringInfoString(buf, " NULLS FIRST"); + else + appendStringInfoString(buf, " NULLS LAST"); + + delim = ", "; + } + reset_transmission_modes(nestlevel); +} + +/* + * appendFunctionName + * Deparses function name from given function oid. + */ +static void appendFunctionName(Oid funcid, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + HeapTuple proctup = NULL; + Form_pg_proc procform = NULL; + const char* proname = NULL; + + proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); + if (!HeapTupleIsValid(proctup)) + elog(ERROR, "cache lookup failed for function %u", funcid); + procform = (Form_pg_proc)GETSTRUCT(proctup); + + /* Print schema name only if it's not pg_catalog */ + if (procform->pronamespace != PG_CATALOG_NAMESPACE) { + const char* schemaname = NULL; + + schemaname = get_namespace_name(procform->pronamespace); + appendStringInfo(buf, "%s.", quote_identifier(schemaname)); + } + + /* Always print the function name */ + proname = NameStr(procform->proname); + appendStringInfo(buf, "%s", quote_identifier(proname)); + + ReleaseSysCache(proctup); +} + +/* + * Appends a sort or group clause. + * + * Like get_rule_sortgroupclause(), returns the expression tree, so caller + * need not find it again. + */ +static Node* deparseSortGroupClause(Index ref, List* tlist, bool force_colno, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + TargetEntry* tle = NULL; + Expr* expr = NULL; + + tle = get_sortgroupref_tle(ref, tlist); + expr = tle->expr; + + if (force_colno) { + /* Use column-number form when requested by caller. */ + Assert(!tle->resjunk); + appendStringInfo(buf, "%d", tle->resno); + } else if (expr && IsA(expr, Const)) { + /* + * Force a typecast here so that we don't emit something like "GROUP + * BY 2", which will be misconstrued as a column position rather than + * a constant. + */ + gcDeparseConst((Const*)expr, context, 1); + } else if ((expr == NULL) || IsA(expr, Var)) + gcDeparseExpr(expr, context); + else { + /* Always parenthesize the expression. */ + appendStringInfoString(buf, "("); + gcDeparseExpr(expr, context); + appendStringInfoString(buf, ")"); + } + + return (Node*)expr; +} + +/* + * Returns true if given Var is deparsed as a subquery output column, in + * which case, *relno and *colno are set to the IDs for the relation and + * column alias to the Var provided by the subquery. + */ +static bool is_subquery_var(Var* node, RelOptInfo* foreignrel, int* relno, int* colno) +{ + GcFdwRelationInfo* fpinfo = (GcFdwRelationInfo*)foreignrel->fdw_private; + RelOptInfo* outerrel = fpinfo->outerrel; + RelOptInfo* innerrel = fpinfo->innerrel; + + /* Should only be called in these cases. */ + Assert(IS_SIMPLE_REL(foreignrel) || IS_JOIN_REL(foreignrel)); + + /* + * If the given relation isn't a join relation, it doesn't have any lower + * subqueries, so the Var isn't a subquery output column. + */ + if (!IS_JOIN_REL(foreignrel)) + return false; + + /* + * If the Var doesn't belong to any lower subqueries, it isn't a subquery + * output column. + */ + if (!bms_is_member(node->varno, fpinfo->lower_subquery_rels)) + return false; + + if (bms_is_member(node->varno, outerrel->relids)) { + /* + * If outer relation is deparsed as a subquery, the Var is an output + * column of the subquery; get the IDs for the relation/column alias. + */ + if (fpinfo->make_outerrel_subquery) { + get_relation_column_alias_ids(node, outerrel, relno, colno); + return true; + } + + /* Otherwise, recurse into the outer relation. */ + return is_subquery_var(node, outerrel, relno, colno); + } else { + Assert(bms_is_member(node->varno, innerrel->relids)); + + /* + * If inner relation is deparsed as a subquery, the Var is an output + * column of the subquery; get the IDs for the relation/column alias. + */ + if (fpinfo->make_innerrel_subquery) { + get_relation_column_alias_ids(node, innerrel, relno, colno); + return true; + } + + /* Otherwise, recurse into the inner relation. */ + return is_subquery_var(node, innerrel, relno, colno); + } +} + +/* + * Get the IDs for the relation and column alias to given Var belonging to + * given relation, which are returned into *relno and *colno. + */ +static void get_relation_column_alias_ids(Var* node, RelOptInfo* foreignrel, int* relno, int* colno) +{ + GcFdwRelationInfo* fpinfo = (GcFdwRelationInfo*)foreignrel->fdw_private; + int i; + ListCell* lc = NULL; + + /* Get the relation alias ID */ + *relno = fpinfo->relation_index; + + /* Get the column alias ID */ + i = 1; + foreach (lc, foreignrel->reltargetlist) { + if (equal(lfirst(lc), (Node*)node)) { + *colno = i; + return; + } + i++; + } + + /* Shouldn't get here */ + elog(ERROR, "unexpected expression in subquery output"); +} + +/* + * the string list of the targetlist of ForeignScan is used in explain command when agg + * is deparsed to remote sql. + */ +List* get_str_targetlist(List* fdw_private) +{ + List* str_targetlist = (List*)list_nth(fdw_private, FdwScanPrivateStrTargetlist); + + List* rs = NIL; + ListCell* lc = NULL; + foreach (lc, str_targetlist) { + Value* val = (Value*)lfirst(lc); + rs = lappend(rs, val->val.str); + } + + return rs; +} + +/* + * the transimition function is found in pg_aggregate. + */ +static char* getAggTransFn(Oid aggfnid) +{ + HeapTuple tuple = NULL; + Form_pg_aggregate aggform = NULL; + Form_pg_proc procform = NULL; + + Oid transfnid = InvalidOid; + char* transfn = NULL; + + /* find transfn oid from pg_aggregate with aggfn oid */ + tuple = SearchSysCache(AGGFNOID, ObjectIdGetDatum(aggfnid), 0, 0, 0); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for aggregate %u", aggfnid); + + aggform = (Form_pg_aggregate)GETSTRUCT(tuple); + + if (!OidIsValid(aggform->aggtransfn)) { + ReleaseSysCache(tuple); + return NULL; + } + transfnid = aggform->aggtransfn; + + ReleaseSysCache(tuple); + + /* find transfn name with transfn oid */ + tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(transfnid)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for function %u", transfnid); + + procform = (Form_pg_proc)GETSTRUCT(tuple); + + transfn = (char*)pstrdup(NameStr(procform->proname)); + + ReleaseSysCache(tuple); + + return transfn; +} + +/* + * get agg function name from pg_proc. + */ +static void deparseAggFunctionName(Oid funcid, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + HeapTuple proctup = NULL; + Form_pg_proc procform = NULL; + const char* proname = NULL; + + proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); + if (!HeapTupleIsValid(proctup)) { + elog(ERROR, "cache lookup failed for function %u", funcid); + } + procform = (Form_pg_proc)GETSTRUCT(proctup); + + /* Print schema name only if it's not pg_catalog */ + if (procform->pronamespace != PG_CATALOG_NAMESPACE) { + elog(ERROR, "can not support user-defined agg function: %u", funcid); + } + + /* Always print the function name */ + proname = NameStr(procform->proname); + appendStringInfo(buf, "%s", quote_identifier(proname)); + + ReleaseSysCache(proctup); +} + +/* + * deparse Var node from targetlist from agg node. + */ +static void simpleDeparseVar(Var* node, deparse_expr_cxt* context) +{ + StringInfo buf = NULL; + int varno; + int varattno; + + PlannerInfo* root = context->root; + + if (OUTER_VAR == node->varno && 0 == node->varnoold) { + List* fscan_targetlist = context->agg->lefttree->targetlist; + if (node->varattno > list_length(fscan_targetlist)) { + elog(ERROR, "varattno is out of range in ForeignScan node."); + } + + TargetEntry* te = (TargetEntry*)list_nth(fscan_targetlist, node->varattno - 1); + gcDeparseExpr(te->expr, context); + return; + } + + buf = context->buf; + varno = node->varnoold; + varattno = node->varoattno; + + if (varno <= 0 || varno >= root->simple_rel_array_size) { + elog(ERROR, "invalid varno found."); + } + + char* colname = NULL; + + /* Get RangeTblEntry from array in PlannerInfo. */ + RangeTblEntry* rte = planner_rt_fetch(varno, root); + if (RTE_RELATION != rte->rtekind) + elog(ERROR, "invalid relation type found."); + + colname = get_relid_attribute_name(rte->relid, varattno); + + appendStringInfoString(buf, quote_identifier(colname)); +} + +static deparse_expr_cxt* copyDeparseContext(deparse_expr_cxt* context) +{ + deparse_expr_cxt* new_context = (deparse_expr_cxt*)palloc0(sizeof(deparse_expr_cxt)); + + new_context->root = context->root; + new_context->foreignrel = context->foreignrel; + new_context->scanrel = context->scanrel; + new_context->buf = context->buf; + new_context->params_list = context->params_list; + new_context->coorquery = context->coorquery; + new_context->agg = context->agg; + new_context->str_targetlist = context->str_targetlist; + new_context->agg_arg1 = context->agg_arg1; + new_context->agg_arg2 = context->agg_arg2; + new_context->local_schema = context->local_schema; + + return new_context; +} + +static void deparseGroupByCol(Expr* expr, deparse_expr_cxt* context, bool addparenth) +{ + StringInfo buf = context->buf; + + if ((expr == NULL) || IsA(expr, Var)) + gcDeparseExpr(expr, context); + else { + /* Always parenthesize the expression. */ + if (addparenth) + appendStringInfoString(buf, "("); + gcDeparseExpr(expr, context); + if (addparenth) + appendStringInfoString(buf, ")"); + } +} + +static void addGroupByColinAggTargetlist(deparse_expr_cxt* context) +{ + List* aggtlist = context->agg->targetlist; + List* fstlist = context->agg->lefttree->targetlist; + + Agg* agg = (Agg*)context->agg; + + StringInfo buf = context->buf; + + for (int i = 0; i < list_length(aggtlist); i++) { + /* check targetentry that is not in grpColIdx */ + TargetEntry* tle = (TargetEntry*)list_nth(aggtlist, i); + Expr* expr = tle->expr; + bool has_aggs = false; + pgxc_is_expr_shippable((Expr*)tle, &has_aggs); + + if (has_aggs) + continue; + + bool found = false; + StringInfo expr1_str, expr2_str; + for (int j = 0; j < agg->numCols; j++) { + AttrNumber attr_idx = agg->grpColIdx[j]; + TargetEntry* fs_tle = (TargetEntry*)list_nth(fstlist, attr_idx - 1); + Expr* fs_expr = fs_tle->expr; + + expr1_str = makeStringInfo(); + expr2_str = makeStringInfo(); + + deparse_expr_cxt* cxt1 = NULL; + deparse_expr_cxt* cxt2 = NULL; + cxt1 = copyDeparseContext(context); + cxt2 = copyDeparseContext(context); + + cxt1->buf = expr1_str; + cxt2->buf = expr2_str; + + deparseGroupByCol(expr, cxt1, false); + deparseGroupByCol(fs_expr, cxt2, false); + + if (!pg_strcasecmp(expr1_str->data, expr2_str->data)) { + found = true; + break; + } + } + if (found) + continue; + + /* add the targetentry to group by clause. */ + appendStringInfoString(buf, ", "); + + deparseGroupByCol(expr, context, true); + } +} + +/* + * append "group by" to select statement. + */ +static void deparseGroupByClause(List* tlist, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + bool first = true; + + Agg* agg = (Agg*)context->agg; + if (agg->numCols <= 0) + return; + + appendStringInfo(buf, " GROUP BY "); + + List* fstargetlist = context->agg->lefttree->targetlist; + for (int i = 0; i < agg->numCols; i++) { + AttrNumber attr_idx = agg->grpColIdx[i]; + + if (!first) + appendStringInfoString(buf, ", "); + first = false; + + if (attr_idx > list_length(fstargetlist)) + elog(ERROR, "invalid attr number in agg->grpColIdx"); + + TargetEntry* tle = (TargetEntry*)list_nth(fstargetlist, attr_idx - 1); + Expr* expr = tle->expr; + + deparseGroupByCol(expr, context, true); + } + + addGroupByColinAggTargetlist(context); +} + +/* + * deparse agg express such as distinct, ... + */ +static void simpleDeparseAggExpr(Aggref* node, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + + appendStringInfoChar(buf, '('); + + /* Add DISTINCT */ + appendStringInfo(buf, "%s", (node->aggdistinct != NIL) ? "DISTINCT " : ""); + + /* aggstar can be set only in zero-argument aggregates */ + if (node->aggstar) { + appendStringInfoChar(buf, '*'); + } else { + ListCell* arg = NULL; + bool first = true; + int start; + int i = 1; + + /* Add all the arguments */ + foreach (arg, node->args) { + TargetEntry* tle = (TargetEntry*)lfirst(arg); + Node* n = (Node*)tle->expr; + + if (tle->resjunk) + continue; + + if (!first) + appendStringInfoString(buf, ", "); + first = false; + + start = buf->len; + + gcDeparseExpr((Expr*)n, context); + + if (1 == i) + context->agg_arg1 = (char*)pstrdup(buf->data + start); + if (2 == i) + context->agg_arg2 = (char*)pstrdup(buf->data + start); + ++i; + } + } + + /* Add ORDER BY */ + if (node->aggorder != NIL) { + appendStringInfoString(buf, " ORDER BY "); + + appendAggOrderBy(node->aggorder, node->args, context); + } + + appendStringInfoChar(buf, ')'); +} + +/* + * deparse express for special agg function, such as regr_sxx, regr_syy, regr_sxy ... + */ +static char* deparseAggFor6ArrayResult(Oid aggfn, char* expr1, char* expr2) +{ + Assert(expr1); + Assert(expr2); + + char* transfn = getAggTransFn(aggfn); + if (NULL == transfn) + elog(ERROR, "No function name found for agg func: %u", aggfn); + + StringInfo result = makeStringInfo(); + + if (!pg_strcasecmp(transfn, "float8_regr_accum")) { + appendStringInfo(result, + "count((%s)+(%s)), sum((%s)::numeric), sum(((%s)::numeric)*((%s)::numeric)), ", + expr1, + expr2, + expr1, + expr1, + expr1); + appendStringInfo(result, + "sum((%s)::numeric), sum(((%s)::numeric)*((%s)::numeric)), sum(((%s)::numeric)*((%s)::numeric))", + expr2, + expr2, + expr2, + expr1, + expr2); + } else { + elog(ERROR, "unsupported transition function to deparse avg expr. funcname: %s", transfn); + } + + pfree(transfn); + + return result->data; +} + +/* + * deparse express for special agg function, such as var_pop, variance, stddev... + */ +static char* deparseAggForTripleResult(Oid aggfn, char* expr) +{ + char* transfn = getAggTransFn(aggfn); + if (transfn == NULL) { + elog(ERROR, "No function name found for agg func: %u", aggfn); + } + + StringInfo result = makeStringInfo(); + + if (!pg_strcasecmp(transfn, "int8_accum") || !pg_strcasecmp(transfn, "int4_accum") || + !pg_strcasecmp(transfn, "int2_accum") || !pg_strcasecmp(transfn, "numeric_accum")) { + appendStringInfo( + result, "count(%s), sum((%s)::numeric), sum(((%s)::numeric)*((%s)::numeric))", expr, expr, expr, expr); + } else if (!pg_strcasecmp(transfn, "float4_accum") || !pg_strcasecmp(transfn, "float8_accum")) { + appendStringInfo(result, "count(%s), sum(%s), sum((%s)*(%s))", expr, expr, expr, expr); + } else + elog(ERROR, "unsupported transition function to deparse avg expr. funcname: %s", transfn); + + pfree(transfn); + + return result->data; +} + +/* + * deparse avg to real array[] + */ +static char* deparseAvg(Oid aggfn, char* expr, deparse_expr_cxt* context) +{ + char* transfn = getAggTransFn(aggfn); + if (NULL == transfn) + elog(ERROR, "No function name found for agg func: %u", aggfn); + + StringInfo result = makeStringInfo(); + + if (!pg_strcasecmp(transfn, "int8_avg_accum") || !pg_strcasecmp(transfn, "int4_avg_accum") || + !pg_strcasecmp(transfn, "int2_avg_accum") || !pg_strcasecmp(transfn, "int1_avg_accum") || + !pg_strcasecmp(transfn, "numeric_avg_accum")) { + appendStringInfo(result, "count(%s), sum((%s)::numeric)", expr, expr); + context->map = 2; + } else if (!pg_strcasecmp(transfn, "interval_accum")) { + appendStringInfo(result, "count(%s), sum(%s)", expr, expr); + context->map = 2; + } else if (!pg_strcasecmp(transfn, "float4_accum") || !pg_strcasecmp(transfn, "float8_accum")) { + appendStringInfo(result, "count(%s), sum(%s), sum((%s)*(%s))", expr, expr, expr, expr); + context->map = 3; + } else { + elog(ERROR, "unsupported transition function to deparse avg expr. funcname: %s", transfn); + } + + pfree(transfn); + + return result->data; +} + +/* + * entry to deparse aggref node + */ +static void simpleDeparseAggref(Aggref* node, deparse_expr_cxt* context) +{ + StringInfo buf = context->buf; + + context->agg_arg1 = NULL; + context->agg_arg2 = NULL; + + int start = buf->len; + + /* get the string of expr in agg func */ + simpleDeparseAggExpr(node, context); + + char* expr = (char*)pstrdup(buf->data + start); + + buf->data[start] = '\0'; + buf->len = strlen(buf->data); + + /* Find aggregate name from aggfnoid which is a pg_proc entry */ + deparseAggFunctionName(node->aggfnoid, context); + + char* fname = (char*)pstrdup(buf->data + start); + + buf->data[start] = '\0'; + buf->len = strlen(buf->data); + + /* func name + (expr) */ + char* func_expr = NULL; + if (!pg_strcasecmp("avg", fname)) { + func_expr = deparseAvg(node->aggfnoid, expr, context); + + appendStringInfo(buf, "%s", func_expr); + } else if (!pg_strcasecmp("var_pop", fname) || !pg_strcasecmp("var_samp", fname) || + !pg_strcasecmp("variance", fname) || !pg_strcasecmp("stddev_pop", fname) || + !pg_strcasecmp("stddev", fname) || !pg_strcasecmp("stddev_samp", fname)) { + func_expr = deparseAggForTripleResult(node->aggfnoid, expr); + context->map = 3; + + appendStringInfo(buf, "%s", func_expr); + } else if (!pg_strcasecmp("regr_sxx", fname) || !pg_strcasecmp("regr_syy", fname) || + !pg_strcasecmp("regr_sxy", fname) || !pg_strcasecmp("regr_r2", fname) || + !pg_strcasecmp("regr_slope", fname) || !pg_strcasecmp("corr", fname) || + !pg_strcasecmp("covar_pop", fname) || !pg_strcasecmp("covar_samp", fname)) { + func_expr = deparseAggFor6ArrayResult(node->aggfnoid, context->agg_arg1, context->agg_arg2); + context->map = 6; + + appendStringInfo(buf, "%s", func_expr); + } else if (!pg_strcasecmp("regr_avgx", fname) || !pg_strcasecmp("regr_avgy", fname) || + !pg_strcasecmp("regr_intercept", fname)) { + func_expr = deparseAggFor6ArrayResult(node->aggfnoid, context->agg_arg2, context->agg_arg1); + context->map = 6; + + appendStringInfo(buf, "%s", func_expr); + } else { + appendStringInfo(buf, "%s%s", fname, expr); + } +} + +/* + * deparse the targetlist for agg node. + */ +static void deparseAggTargetList(List* tlist, deparse_expr_cxt* context) +{ + ListCell* lc = NULL; + StringInfo buf = context->buf; + int start; + int i = 0; + + foreach (lc, tlist) { + TargetEntry* tle = lfirst_node(TargetEntry, lc); + + context->map = 1; + + if (i > 0) + appendStringInfoString(buf, ", "); + i++; + + start = buf->len; + + gcDeparseExpr((Expr*)tle->expr, context); + + char* str_target = (char*)pstrdup(buf->data + start); + + Value* val = makeString(str_target); + context->str_targetlist = lappend(context->str_targetlist, val); + + val = makeInteger(context->map); + *context->colmap = lappend(*context->colmap, val); + } +} + +/* + * the entry to deparse agg node to remote sql in foreignscan node. + */ +static void deparseSelectStmt(StringInfo buf, PlannerInfo* root, RelOptInfo* rel, List* remote_conds, List* paramlist, + Plan* agg, List** str_targetlist, List** colmap, bool local_schema) +{ + List* quals = NIL; + + deparse_expr_cxt* context = (deparse_expr_cxt*)palloc0(sizeof(deparse_expr_cxt)); + + /* Fill portions of context common to upper, join and base relation */ + context->buf = buf; + context->root = root; + context->foreignrel = rel; + context->scanrel = rel; + context->coorquery = true; + context->agg = agg; + context->agg_arg1 = NULL; + context->agg_arg2 = NULL; + context->params_list = ¶mlist; + context->colmap = colmap; + context->local_schema = local_schema; + + /* Construct SELECT clause */ + appendStringInfoString(context->buf, "SELECT "); + + deparseAggTargetList(agg->targetlist, context); + + *str_targetlist = context->str_targetlist; + + /* + * For upper relations, the WHERE clause is built from the remote + * conditions of the underlying scan relation; otherwise, we can use the + * supplied list of remote conditions directly. + */ + quals = remote_conds; + + /* Construct FROM and WHERE clauses */ + gcDeparseFromExpr(quals, context); + + /* Append GROUP BY clause */ + deparseGroupByClause(agg->targetlist, context); +} + +static bool test_remote_sql(const char* sql) +{ + if (NULL == sql) + return false; + + List* query_string_locationlist = NIL; + + List* parsetree_list = pg_parse_query(sql, &query_string_locationlist); + + if (list_length(parsetree_list) != 0) { + Node* parsetree = (Node*)list_nth(parsetree_list, 0); + + (void)pg_analyze_and_rewrite(parsetree, sql, NULL, 0); + } + + return true; +} + +/* + * @Description: just for cooperation analysis on client cluster, + * try to deparse agg node to remote sql in ForeignScan node. + * + * @param[IN] agg : current plan node + * @param[IN] root : PlannerInfo* + * @return: Plan*: remote sql includes agg functions, or leave unchanged + */ +Plan* deparse_agg_node(Plan* agg, PlannerInfo* root) +{ + List* str_targetlist = NIL; + List* colmap = NIL; + + ForeignScan* fscan = (ForeignScan*)agg->lefttree; + + if (fscan->scan.scanrelid <= 0 || (int)fscan->scan.scanrelid >= root->simple_rel_array_size) + return agg; + + RelOptInfo* scanrel = root->simple_rel_array[fscan->scan.scanrelid]; + if (NULL == scanrel->fdwroutine || NULL == scanrel->fdw_private) + return agg; + + GcFdwRelationInfo* fpinfo = (GcFdwRelationInfo*)scanrel->fdw_private; + if (fpinfo->reloid != fscan->scan_relid) + return agg; + + List* remote_quals = (List*)list_nth(fscan->fdw_private, FdwScanPrivateRemoteQuals); + List* param_list = (List*)list_nth(fscan->fdw_private, FdwScanPrivateParamList); + + /* deparse agg node to remote sql that includes agg functions. */ + StringInfo sql = makeStringInfo(); + deparseSelectStmt(sql, root, scanrel, remote_quals, param_list, agg, &str_targetlist, &colmap, false); + + Assert(list_length(agg->targetlist) == list_length(colmap)); + + List* test_str_targetlist = NIL; + List* test_colmap = NIL; + StringInfo test_sql = makeStringInfo(); + deparseSelectStmt(test_sql, root, scanrel, remote_quals, param_list, agg, &test_str_targetlist, &test_colmap, true); + + if (NULL == param_list) { + if (false == test_remote_sql(test_sql->data)) + return agg; + } + + ereport(DEBUG1, (errmodule(MOD_COOP_ANALYZE), errmsg("remote agg sql: %s", sql->data))); + + /* get the real target list of ForeignScan node to match the output of the remote sql. */ + int i, j; + List* aggResultTargetList = NIL; + List* aggScanTargetList = NIL; + ListCell* lc = NULL; + + for (i = 0, j = 0; i < list_length(agg->targetlist); i++) { + TargetEntry* tle = (TargetEntry*)list_nth(agg->targetlist, i); + + /* the just type of Var is valid for the output of the remote sql, so varno set to 0 */ + Var* var = makeVarFromTargetEntry(0, tle); + + TargetEntry* newtle = makeTargetEntry((Expr*)var, i + 1, tle->resname, false); + + aggResultTargetList = lappend(aggResultTargetList, newtle); + + if (INT8ARRAYOID == var->vartype) { + Var* itemvar = (Var*)copyObject(var); + itemvar->vartype = INT8OID; + itemvar->vartypmod = -1; + + TargetEntry* itemtle1 = makeTargetEntry((Expr*)itemvar, j++, tle->resname, false); + TargetEntry* itemtle2 = makeTargetEntry((Expr*)itemvar, j++, tle->resname, false); + + aggScanTargetList = lappend(aggScanTargetList, itemtle1); + aggScanTargetList = lappend(aggScanTargetList, itemtle2); + } else if (FLOAT4ARRAYOID == var->vartype || FLOAT8ARRAYOID == var->vartype || NUMERICARRAY == var->vartype) { + Var* itemvar = (Var*)copyObject(var); + if (NUMERICARRAY == var->vartype) + itemvar->vartype = NUMERICOID; + else + itemvar->vartype = FLOAT8OID; + itemvar->vartypmod = -1; + + Value* val = (Value*)list_nth(colmap, i); + long map = val->val.ival; + for (long item = 0; item < map; item++) { + TargetEntry* itemtle = makeTargetEntry((Expr*)itemvar, j++, tle->resname, false); + aggScanTargetList = lappend(aggScanTargetList, itemtle); + } + } else if (ARRAYINTERVALOID == var->vartype) + elog(ERROR, "unsupport data type in agg pushdown."); + else { + j++; + aggScanTargetList = lappend(aggScanTargetList, newtle); + } + } + + /* reconstruct the fdw_private of ForeignScan node. */ + i = 0; + List* newfdw_private = NIL; + + foreach (lc, fscan->fdw_private) { + /* replace the remote sql with new one. */ + if (FdwScanPrivateSelectSql == i) { + Value* val = makeString(sql->data); + newfdw_private = lappend(newfdw_private, val); + + i++; + continue; + } + + /* add the string of targetlist of foreignscan for the output of explain cmd. */ + if (FdwScanPrivateStrTargetlist == i) { + newfdw_private = lappend(newfdw_private, str_targetlist); + i++; + continue; + } + + /* save the agg result targetlist in fdw_private */ + if (FdwScanPrivateAggResultTargetlist == i) { + newfdw_private = lappend(newfdw_private, aggResultTargetList); + i++; + continue; + } + + /* save the agg scan targetlist in fdw_private */ + if (FdwScanPrivateAggScanTargetlist == i) { + newfdw_private = lappend(newfdw_private, aggScanTargetList); + i++; + continue; + } + + /* save the agg scan targetlist in fdw_private */ + if (FdwScanPrivateAggColmap == i) { + newfdw_private = lappend(newfdw_private, colmap); + i++; + continue; + } + + newfdw_private = lappend(newfdw_private, lfirst(lc)); + i++; + } + + fscan->fdw_private = newfdw_private; + + /* if success, set agg node to be dummy */ + Agg* aggplan = (Agg*)agg; + aggplan->is_dummy = true; + + return agg; +} + +// end of file diff --git a/contrib/gauss_connector/gc_fdw.cpp b/contrib/gauss_connector/gc_fdw.cpp new file mode 100644 index 000000000..ede65e48c --- /dev/null +++ b/contrib/gauss_connector/gc_fdw.cpp @@ -0,0 +1,2599 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * IDENTIFICATION + * contrib/gauss_connector/gc_fdw.cpp + * + * --------------------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "gc_fdw.h" + +#include "access/htup.h" +#include "access/sysattr.h" +#include "access/transam.h" +#include "catalog/pg_class.h" +#include "commands/defrem.h" +#include "commands/explain.h" +#include "commands/vacuum.h" +#include "foreign/fdwapi.h" +#include "funcapi.h" +#include "libpq/pqformat.h" +#include "miscadmin.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "optimizer/cost.h" +#include "optimizer/clauses.h" +#include "optimizer/pathnode.h" +#include "optimizer/paths.h" +#include "optimizer/planmain.h" +#include "optimizer/restrictinfo.h" +#include "optimizer/var.h" +#include "optimizer/tlist.h" +#include "parser/parsetree.h" +#include "pgxc/pgxcnode.h" +#include "pgxc/pgFdwRemote.h" +#include "storage/buf/block.h" +#include "utils/builtins.h" +#include "utils/guc.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/rel.h" +#include "utils/selfuncs.h" + +/* Default CPU cost to start up a foreign query. */ +#define DEFAULT_FDW_STARTUP_COST 100.0 + +/* Default CPU cost to process 1 row (above and beyond u_sess->attr.attr_sql.cpu_tuple_cost). */ +#define DEFAULT_FDW_TUPLE_COST 0.01 + +/* If no remote estimates, assume a sort costs 20% extra */ +#define DEFAULT_FDW_SORT_MULTIPLIER 1.2 + +/* + * Execution state of a foreign scan using gc_fdw. + */ +typedef struct GcFdwScanState { + Relation rel; /* relcache entry for the foreign table. NULL + * for a foreign join scan. */ + TupleDesc tupdesc; /* tuple descriptor of scan */ + AttInMetadata* attinmeta; /* attribute datatype conversion metadata */ + + /* extracted fdw_private data */ + char* query; /* text of SELECT command */ + List* retrieved_attrs; /* list of retrieved attribute numbers */ + + /* for remote query execution */ + PGconn* conn; /* connection for the scan */ + PGXCNodeAllHandles* pgxc_handle; + unsigned int cursor_number; /* quasi-unique ID for my cursor */ + bool cursor_exists; /* have we created the cursor? */ + int numParams; /* number of parameters passed to query */ + FmgrInfo* param_flinfo; /* output conversion functions for them */ + List* param_exprs; /* executable expressions for param values */ + const char** param_values; /* textual values of query parameters */ + + /* for storing result tuples */ + HeapTuple* tuples; /* array of currently-retrieved tuples */ + int num_tuples; /* # of tuples in array */ + int next_tuple; /* index of next one to return */ + + /* batch-level state, for optimizing rewinds and avoiding useless fetch */ + int fetch_ct_2; /* Min(# of fetches done, 2) */ + bool eof_reached; /* true if last fetch reached EOF */ + + /* working memory contexts */ + MemoryContext batch_cxt; /* context holding current batch of tuples */ + MemoryContext temp_cxt; /* context for per-tuple temporary data */ + + int fetch_size; /* number of tuples per fetch */ + + int num_datanode; /* number of execute datanode */ + + int current_idx; /* current index of datanode */ + int cycle_idx; /* cycle index of datanode */ + int max_idx; /* max index of datanode */ + bool have_remote_encoding; /* remote encoding */ + int remote_encoding; + + PgFdwRemoteInfo* remoteinfo; /* remote table info */ + + IterateForeignScan_function IterateForeignScan; + VecIterateForeignScan_function VecIterateForeignScan; + ReScanForeignScan_function ReScanForeignScan; + Oid serverid; + + bool hasagg; /* agg in sql? */ + bool has_array; + + RemoteQueryState* remotestate; + + TupleTableSlot* resultSlot; + TupleTableSlot* scanSlot; +} GcFdwScanState; + +/* + * Identify the attribute where data conversion fails. + */ +typedef struct ConversionLocation { + Relation rel; /* foreign table's relcache entry. */ + AttrNumber cur_attno; /* attribute number being processed, or 0 */ + + /* + * In case of foreign join push down, fdw_scan_tlist is used to identify + * the Var node corresponding to the error location and + * fsstate->ss.ps.state gives access to the RTEs of corresponding relation + * to get the relation name and attribute name. + */ + ForeignScanState* fsstate; +} ConversionLocation; + +/* Callback argument for ec_member_matches_foreign */ +typedef struct { + Expr* current; /* current expr, or NULL if not yet found */ + List* already_used; /* expressions already dealt with */ +} ec_member_foreign_arg; + +/* + * SQL functions + */ +PG_FUNCTION_INFO_V1(gc_fdw_handler); + +/* + * FDW callback routines + */ +static void gcGetForeignRelSize(PlannerInfo* root, RelOptInfo* baserel, Oid foreigntableid); +static void gcGetForeignPaths(PlannerInfo* root, RelOptInfo* baserel, Oid foreigntableid); +static ForeignScan *gcGetForeignPlan(PlannerInfo *root, + RelOptInfo *baserel, + Oid foreigntableid, + ForeignPath *best_path, + List *tlist, + List *scan_clauses/*, + Plan *outer_plan*/); +static void gcBeginForeignScan(ForeignScanState* node, int eflags); +static TupleTableSlot* gcIterateForeignScan(ForeignScanState* node); +static VectorBatch* gcIterateVecForeignScan(VecForeignScanState* node); +static void gcReScanForeignScan(ForeignScanState* node); +static TupleTableSlot* gcIterateNormalForeignScan(ForeignScanState* node); +static VectorBatch* gcIterateNormalVecForeignScan(VecForeignScanState* node); +static TupleTableSlot* gcIteratePBEForeignScan(ForeignScanState* node); +static VectorBatch* gcIteratePBEVecForeignScan(VecForeignScanState* node); +static void gcReScanNormalForeignScan(ForeignScanState* node); +static void gcReScanPBEForeignScan(ForeignScanState* node); +static void gcEndForeignScan(ForeignScanState* node); +static void gcExplainForeignScan(ForeignScanState* node, ExplainState* es); +static bool gcAnalyzeForeignTable(Relation relation, AcquireSampleRowsFunc* func, BlockNumber* totalpages, + void* additionalData, bool estimate_table_rownum); + +/* + * Helper functions + */ +static void gc_estimate_path_cost_size(PlannerInfo* root, RelOptInfo* baserel, List* join_conds, List* pathkeys, + double* p_rows, int* p_width, Cost* p_startup_cost, Cost* p_total_cost); +static void create_cursor(ForeignScanState* node); +static void fetch_more_data(ForeignScanState* node); +static int gcfdw_send_and_fetch_version(PGXCNodeAllHandles* pgxc_handles); +static void gcfdw_fetch_remote_table_info( + PGXCNodeAllHandles* pgxc_handles, ForeignTable* table, void* remote_info, PgFdwMessageTag tag); +static bool gcfdw_get_table_encode(ForeignTable* table, int* remote_encoding); +static void gcfdw_send_remote_encode(PGXCNodeAllHandles* pgxc_handles, int remote_encoding); +static void gcfdw_send_remote_query_param(GcFdwScanState* fsstate); +static void gcfdw_get_datanode_idx(ForeignScanState* node); +static void close_cursor(PGconn* conn, unsigned int cursor_number); +static void prepare_query_params(PlanState* node, List* fdw_exprs, int numParams, FmgrInfo** param_flinfo, + List** param_exprs, const char*** param_values); +static void process_query_params( + ExprContext* econtext, FmgrInfo* param_flinfo, List* param_exprs, const char** param_values); +static HeapTuple make_tuple_from_result_row(PGresult* res, int row, Relation rel, AttInMetadata* attinmeta, + List* retrieved_attrs, ForeignScanState* fsstate, MemoryContext temp_context); +static HeapTuple make_tuple_from_agg_result(PGresult* res, int row, Relation rel, AttInMetadata* attinmeta, + List* retrieved_attrs, ForeignScanState* fsstate, MemoryContext temp_context); + +static void conversion_error_callback(void* arg); +static void gcValidateTableDef(Node* Obj); +static bool CheckForeignExtOption(List* extOptList, const char* str); +static bool GcFdwCanSkip(GcFdwScanState* fsstate); +static void GcFdwCopyRemoteInfo(PgFdwRemoteInfo* new_remote_info, PgFdwRemoteInfo* ori_remote_info); +extern bool hasSpecialArrayType(TupleDesc desc); +extern Snapshot CopySnapshotByCurrentMcxt(Snapshot snapshot); + +/* + * Foreign-data wrapper handler function: return a struct with pointers + * to my callback routines. + */ +Datum gc_fdw_handler(PG_FUNCTION_ARGS) +{ + FdwRoutine* routine = makeNode(FdwRoutine); + + /* Functions for scanning foreign tables */ + routine->GetForeignRelSize = gcGetForeignRelSize; + routine->GetForeignPaths = gcGetForeignPaths; + routine->GetForeignPlan = gcGetForeignPlan; + + routine->BeginForeignScan = gcBeginForeignScan; + routine->IterateForeignScan = gcIterateForeignScan; + routine->VecIterateForeignScan = gcIterateVecForeignScan; + routine->ReScanForeignScan = gcReScanForeignScan; + routine->EndForeignScan = gcEndForeignScan; + + /* Support functions for EXPLAIN */ + routine->ExplainForeignScan = gcExplainForeignScan; + + /* Support functions for ANALYZE */ + routine->AnalyzeForeignTable = gcAnalyzeForeignTable; + + /* Check create/alter foreign table */ + routine->ValidateTableDef = gcValidateTableDef; + + PG_RETURN_POINTER(routine); +} + +/* + * gcGetForeignRelSize + * Estimate # of rows and width of the result of the scan + * + * We should consider the effect of all baserestrictinfo clauses here, but + * not any join clauses. + */ +static void gcGetForeignRelSize(PlannerInfo* root, RelOptInfo* baserel, Oid foreigntableid) +{ + if (isRestoreMode) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_COOP_ANALYZE), + errmsg("cooperation analysis: can't execute the query in restore mode."))); + } + + if (u_sess->attr.attr_common.upgrade_mode != 0) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_COOP_ANALYZE), + errmsg("cooperation analysis: can't execute the query in upgrade mode."))); + } + + if (IS_PGXC_DATANODE) { + Relation relation = RelationIdGetRelation(foreigntableid); + if (RelationIsValid(relation)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_COOP_ANALYZE), + errmsg("Query on datanode is not " + "supported currently for the foreign table: %s.", + RelationGetRelationName(relation)))); + } else { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_TABLE), + errmodule(MOD_COOP_ANALYZE), + errmsg("could not open relation with OID %u", foreigntableid))); + } + } + + GcFdwRelationInfo* fpinfo = NULL; + ListCell* lc = NULL; + RangeTblEntry* rte = planner_rt_fetch(baserel->relid, root); + const char* relnamespace = NULL; + const char* relname = NULL; + const char* refname = NULL; + + /* + * We use GcFdwRelationInfo to pass various information to subsequent + * functions. + */ + fpinfo = (GcFdwRelationInfo*)palloc0(sizeof(GcFdwRelationInfo)); + baserel->fdw_private = (void*)fpinfo; + + /* Base foreign tables need to be pushed down always. */ + fpinfo->pushdown_safe = true; + + /* Look up foreign-table catalog info. */ + fpinfo->table = GetForeignTable(foreigntableid); + fpinfo->server = GetForeignServer(fpinfo->table->serverid); + + /* + * Extract user-settable option values. + */ + fpinfo->fdw_startup_cost = DEFAULT_FDW_STARTUP_COST; + fpinfo->fdw_tuple_cost = DEFAULT_FDW_TUPLE_COST; + fpinfo->shippable_extensions = NIL; + fpinfo->fetch_size = 1000; + + PGXCNodeAllHandles* pgxc_handle = NULL; + + int remote_encoding; + bool have_remote_encoding = gcfdw_get_table_encode(fpinfo->table, &remote_encoding); + + (void)GetConnection(fpinfo->table->serverid, &pgxc_handle, true, have_remote_encoding); + PgFdwRemoteInfo* remote_info = &fpinfo->remote_info; + + u_sess->pgxc_cxt.gc_fdw_run_version = gcfdw_send_and_fetch_version(pgxc_handle); + if (u_sess->pgxc_cxt.gc_fdw_run_version >= GCFDW_VERSION_V1R8C10_1 && have_remote_encoding == true) { + gcfdw_send_remote_encode(pgxc_handle, remote_encoding); + } + + gcfdw_fetch_remote_table_info(pgxc_handle, fpinfo->table, remote_info, PGFDW_GET_TABLE_INFO); + Assert(remote_info->snapsize > 0); + if (remote_info->snapsize <= 0) { + ereport(ERROR, + (errcode(ERRCODE_NO_DATA_FOUND), + errmsg("cooperation analysis: not receive the snapshot from remote cluster"))); + } + + /* + * Identify which baserestrictinfo clauses can be sent to the remote + * server and which can't. + */ + classifyConditions(root, baserel, baserel->baserestrictinfo, &fpinfo->remote_conds, &fpinfo->local_conds); + + /* + * Identify which attributes will need to be retrieved from the remote + * server. These include all attrs needed for joins or final output, plus + * all attrs used in the local_conds. (Note: if we end up using a + * parameterized scan, it's possible that some of the join clauses will be + * sent to the remote and thus we wouldn't really need to retrieve the + * columns used in them. Doesn't seem worth detecting that case though.) + */ + fpinfo->attrs_used = NULL; + pull_varattnos((Node*)baserel->reltargetlist, baserel->relid, &fpinfo->attrs_used); + foreach (lc, fpinfo->local_conds) { + RestrictInfo* rinfo = lfirst_node(RestrictInfo, lc); + + pull_varattnos((Node*)rinfo->clause, baserel->relid, &fpinfo->attrs_used); + } + + /* + * Compute the selectivity and cost of the local_conds, so we don't have + * to do it over again for each path. The best we can do for these + * conditions is to estimate selectivity on the basis of local statistics. + */ + fpinfo->local_conds_sel = clauselist_selectivity(root, fpinfo->local_conds, baserel->relid, JOIN_INNER, NULL); + + cost_qual_eval(&fpinfo->local_conds_cost, fpinfo->local_conds, root); + + /* + * Set cached relation costs to some negative value, so that we can detect + * when they are set to some sensible costs during one (usually the first) + * of the calls to gc_estimate_path_cost_size(). + */ + fpinfo->rel_startup_cost = -1; + fpinfo->rel_total_cost = -1; + + /* + * estimate using whatever statistics we + * have locally, in a way similar to ordinary tables. + */ + { + /* + * If the foreign table has never been ANALYZEd, it will have relpages + * and reltuples equal to zero, which most likely has nothing to do + * with reality. We can't do a whole lot about that if we're not + * allowed to consult the remote server, but we can use a hack similar + * to plancat.c's treatment of empty relations: use a minimum size + * estimate of 10 pages, and divide by the column-datatype-based width + * estimate to get the corresponding number of tuples. + */ + if (baserel->pages == 0 && baserel->tuples == 0) { + baserel->pages = 10; + const int BLOCK_NUM = 10; + baserel->tuples = + (double)(BLOCK_NUM * BLCKSZ) / (baserel->width + MAXALIGN(offsetof(HeapTupleHeaderData, t_bits))); + } + + /* Estimate baserel size as best we can with local statistics. */ + set_baserel_size_estimates(root, baserel); + + /* Fill in basically-bogus cost estimates for use later. */ + gc_estimate_path_cost_size( + root, baserel, NIL, NIL, &fpinfo->rows, &fpinfo->width, &fpinfo->startup_cost, &fpinfo->total_cost); + } + + /* + * Set the name of relation in fpinfo, while we are constructing it here. + * It will be used to build the string describing the join relation in + * EXPLAIN output. We can't know whether VERBOSE option is specified or + * not, so always schema-qualify the foreign table name. + */ + fpinfo->relation_name = makeStringInfo(); + relnamespace = get_namespace_name(get_rel_namespace(foreigntableid)); + relname = get_rel_name(foreigntableid); + refname = rte->eref->aliasname; + const char* quote_relnamespace = (relnamespace != NULL) ? quote_identifier(relnamespace) : "\"Unknown\""; + const char* quote_relname = (relname != NULL) ? quote_identifier(relname) : "\"Unknown\""; + appendStringInfo(fpinfo->relation_name, "%s.%s", quote_relnamespace, quote_relname); + if (*refname && strcmp(refname, relname) != 0) + appendStringInfo(fpinfo->relation_name, " %s", quote_identifier(rte->eref->aliasname)); + + /* No outer and inner relations. */ + fpinfo->make_outerrel_subquery = false; + fpinfo->make_innerrel_subquery = false; + fpinfo->lower_subquery_rels = NULL; + /* Set the relation index. */ + fpinfo->relation_index = baserel->relid; + + fpinfo->reloid = foreigntableid; +} + +/* + * gcGetForeignPaths + * Create possible scan paths for a scan on the foreign table + */ +static void gcGetForeignPaths(PlannerInfo* root, RelOptInfo* baserel, Oid foreigntableid) +{ + GcFdwRelationInfo* fpinfo = (GcFdwRelationInfo*)baserel->fdw_private; + ForeignPath* path = NULL; + + /* + * Create simplest ForeignScan path node and add it to baserel. This path + * corresponds to SeqScan path of regular tables (though depending on what + * baserestrict conditions we were able to send to remote, there might + * actually be an indexscan happening there). We already did all the work + * to estimate cost and size of this path. + */ + path = create_foreignscan_path(root, + baserel, + fpinfo->startup_cost, + fpinfo->total_cost, + NIL, /* no pathkeys */ + NULL, /* no outer rel either */ + NIL, /* no fdw_private list */ + u_sess->opt_cxt.query_dop); + add_path(root, baserel, (Path*)path); +} + +/* + * gcGetForeignPlan + * Create ForeignScan plan node which implements selected best path + */ +static ForeignScan* gcGetForeignPlan(PlannerInfo* root, RelOptInfo* foreignrel, Oid foreigntableid, + ForeignPath* best_path, List* tlist, List* scan_clauses) +{ + GcFdwRelationInfo* fpinfo = (GcFdwRelationInfo*)foreignrel->fdw_private; + Index scan_relid; + List* fdw_private = NIL; + List* remote_exprs = NIL; + List* local_exprs = NIL; + List* params_list = NIL; + List* fdw_scan_tlist = NIL; + List* fdw_recheck_quals = NIL; + List* retrieved_attrs = NIL; + StringInfoData sql; + ListCell* lc = NULL; + + if (IS_SIMPLE_REL(foreignrel)) { + /* + * For base relations, set scan_relid as the relid of the relation. + */ + scan_relid = foreignrel->relid; + + /* + * In a base-relation scan, we must apply the given scan_clauses. + * + * Separate the scan_clauses into those that can be executed remotely + * and those that can't. baserestrictinfo clauses that were + * previously determined to be safe or unsafe by classifyConditions + * are found in fpinfo->remote_conds and fpinfo->local_conds. Anything + * else in the scan_clauses list will be a join clause, which we have + * to check for remote-safety. + * + * Note: the join clauses we see here should be the exact same ones + * previously examined by gcGetForeignPaths. Possibly it'd be + * worth passing forward the classification work done then, rather + * than repeating it here. + * + * This code must match "extract_actual_clauses(scan_clauses, false)" + * except for the additional decision about remote versus local + * execution. + */ + foreach (lc, scan_clauses) { + RestrictInfo* rinfo = lfirst_node(RestrictInfo, lc); + + /* Ignore any pseudoconstants, they're dealt with elsewhere */ + if (rinfo->pseudoconstant) + continue; + + if (list_member_ptr(fpinfo->remote_conds, rinfo)) { + remote_exprs = lappend(remote_exprs, rinfo->clause); + } else if (list_member_ptr(fpinfo->local_conds, rinfo)) { + local_exprs = lappend(local_exprs, rinfo->clause); + } else if (is_foreign_expr(root, foreignrel, rinfo->clause)) { + remote_exprs = lappend(remote_exprs, rinfo->clause); + } else { + local_exprs = lappend(local_exprs, rinfo->clause); + } + } + + /* + * For a base-relation scan, we have to support EPQ recheck, which + * should recheck all the remote quals. + */ + fdw_recheck_quals = remote_exprs; + } else { + /* + * Join relation or upper relation - set scan_relid to 0. + */ + scan_relid = 0; + + /* + * For a join rel, baserestrictinfo is NIL and we are not considering + * parameterization right now, so there should be no scan_clauses for + * a joinrel or an upper rel either. + */ + Assert(!scan_clauses); + + /* + * Instead we get the conditions to apply from the fdw_private + * structure. + */ + remote_exprs = extract_actual_clauses(fpinfo->remote_conds, false); + local_exprs = extract_actual_clauses(fpinfo->local_conds, false); + + /* + * We leave fdw_recheck_quals empty in this case, since we never need + * to apply EPQ recheck clauses. In the case of a joinrel, EPQ + * recheck is handled elsewhere --- see postgresGetForeignJoinPaths(). + * If we're planning an upperrel (ie, remote grouping or aggregation) + * then there's no EPQ to do because SELECT FOR UPDATE wouldn't be + * allowed, and indeed we *can't* put the remote clauses into + * fdw_recheck_quals because the unaggregated Vars won't be available + * locally. + */ + + /* Build the list of columns to be fetched from the foreign server. */ + fdw_scan_tlist = build_tlist_to_deparse(foreignrel); + } + + /* + * Build the query string to be sent for execution, and identify + * expressions to be sent as parameters. + */ + initStringInfo(&sql); + gcDeparseSelectStmtForRel(&sql, + root, + foreignrel, + fdw_scan_tlist, + remote_exprs, + best_path->path.pathkeys, + false, + &retrieved_attrs, + ¶ms_list); + + /* Remember remote_exprs for possible use by postgresPlanDirectModify */ + fpinfo->final_remote_exprs = remote_exprs; + + /* + * Build the fdw_private list that will be available to the executor. + * Items in the list must match order in enum FdwScanPrivateIndex. + * index: + * FdwScanPrivateSelectSql, FdwScanPrivateRetrievedAttrs, FdwScanPrivateFetchSize, + */ + fdw_private = list_make3(makeString(sql.data), retrieved_attrs, makeInteger(fpinfo->fetch_size)); + + /* FdwScanPrivateRemoteInfo */ + PgFdwRemoteInfo* remote_info = makeNode(PgFdwRemoteInfo); + remote_info->snapsize = fpinfo->remote_info.snapsize; + remote_info->snapshot = (Snapshot)palloc0(remote_info->snapsize); + GcFdwCopyRemoteInfo(remote_info, &fpinfo->remote_info); + fdw_private = lappend(fdw_private, remote_info); + + /* FdwScanPrivateStrTargetlist */ + fdw_private = lappend(fdw_private, NIL); + + /* FdwScanPrivateAggResultTargetlist */ + fdw_private = lappend(fdw_private, NIL); + + /* FdwScanPrivateAggScanTargetlist */ + fdw_private = lappend(fdw_private, NIL); + + /* FdwScanPrivateAggColmap */ + fdw_private = lappend(fdw_private, NIL); + + /* FdwScanPrivateRemoteQuals */ + fdw_private = lappend(fdw_private, remote_exprs); + + /* FdwScanPrivateParamList */ + fdw_private = lappend(fdw_private, params_list); + + if (IS_JOIN_REL(foreignrel)) + fdw_private = lappend(fdw_private, makeString(fpinfo->relation_name->data)); + + /* + * Create the ForeignScan node for the given relation. + * + * Note that the remote parameter expressions are stored in the fdw_exprs + * field of the finished plan node; we can't keep them in private state + * because then they wouldn't be subject to later planner processing. + */ + return make_foreignscan(tlist, local_exprs, scan_relid, params_list, fdw_private, EXEC_ON_DATANODES); +} + +/* + * gcBeginForeignScan + * Initiate an executor scan of a foreign openGauss table. + */ +static void gcBeginForeignScan(ForeignScanState* node, int eflags) +{ + ForeignScan* fsplan = (ForeignScan*)node->ss.ps.plan; + EState* estate = node->ss.ps.state; + GcFdwScanState* fsstate = NULL; + RangeTblEntry* rte = NULL; + ForeignTable* table = NULL; + int rtindex; + int numParams; + + /* + * Do nothing in EXPLAIN (no ANALYZE) case. node->fdw_state stays NULL. + */ + if (eflags & EXEC_FLAG_EXPLAIN_ONLY) + return; + + /* + * We'll save private state in node->fdw_state. + */ + fsstate = (GcFdwScanState*)palloc0(sizeof(GcFdwScanState)); + node->fdw_state = (void*)fsstate; + + /* + * Identify which user to do the remote access as. This should match what + * ExecCheckRTEPerms() does. In case of a join or aggregate, use the + * lowest-numbered member RTE as a representative; we would get the same + * result from any. + */ + Assert(fsplan->scan.scanrelid > 0); + + rtindex = fsplan->scan.scanrelid; + rte = rt_fetch(rtindex, estate->es_range_table); + + /* Get info about foreign table. */ + table = GetForeignTable(rte->relid); + + if (IS_PGXC_COORDINATOR) { + return; + } + + /* Get private info created by planner functions. */ + fsstate->query = strVal(list_nth(fsplan->fdw_private, FdwScanPrivateSelectSql)); + fsstate->retrieved_attrs = (List*)list_nth(fsplan->fdw_private, FdwScanPrivateRetrievedAttrs); + fsstate->fetch_size = intVal(list_nth(fsplan->fdw_private, FdwScanPrivateFetchSize)); + + fsstate->remoteinfo = (PgFdwRemoteInfo*)list_nth(fsplan->fdw_private, FdwScanPrivateRemoteInfo); + + /* Create contexts for batches of tuples and per-tuple temp workspace. */ + fsstate->batch_cxt = AllocSetContextCreate(estate->es_query_cxt, + "gc_fdw tuple data", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + fsstate->temp_cxt = AllocSetContextCreate(t_thrd.mem_cxt.msg_mem_cxt, + "gc_fdw temporary data", + ALLOCSET_SMALL_MINSIZE, + ALLOCSET_SMALL_INITSIZE, + ALLOCSET_SMALL_MAXSIZE); + + /* + * Get info we'll need for converting data fetched from the foreign server + * into local representation and error reporting during that process. + */ + List* agg_result_tlist = (List*)list_nth(fsplan->fdw_private, FdwScanPrivateAggResultTargetlist); + + if (NIL == agg_result_tlist) /* remote sql without agg func */ + { + fsstate->rel = node->ss.ss_currentRelation; + fsstate->tupdesc = RelationGetDescr(fsstate->rel); + + TupleDesc scan_desc = CreateTemplateTupleDesc(list_length(fsstate->retrieved_attrs), false); + + TupleTableSlot* slot = node->ss.ss_ScanTupleSlot; + + for (int i = 0; i < list_length(fsstate->retrieved_attrs); i++) { + int attidx = list_nth_int(fsstate->retrieved_attrs, i); + Form_pg_attribute attr = slot->tts_tupleDescriptor->attrs[attidx - 1]; + const char* attname = (const char*)(attr->attname.data); + TupleDescInitEntry(scan_desc, i + 1, attname, attr->atttypid, attr->atttypmod, 0); + } + + fsstate->scanSlot = MakeSingleTupleTableSlot(scan_desc); + fsstate->resultSlot = node->ss.ss_ScanTupleSlot; + } else /* remote sql with agg func */ + { + fsstate->hasagg = true; + + TupleDesc resultDesc = ExecTypeFromTL(agg_result_tlist, false); + + if (hasSpecialArrayType(resultDesc)) + fsstate->has_array = true; + + ExecAssignResultType(&node->ss.ps, resultDesc); + + node->ss.ps.qual = NIL; + node->ss.ps.ps_ProjInfo = NULL; + + if (true == fsstate->has_array) { + List* agg_scan_tlist = (List*)list_nth(fsplan->fdw_private, FdwScanPrivateAggScanTargetlist); + TupleDesc scanDesc = ExecTypeFromTL(agg_scan_tlist, false); + + ExecSetSlotDescriptor(node->ss.ss_ScanTupleSlot, resultDesc); + fsstate->tupdesc = scanDesc; + + fsstate->scanSlot = MakeSingleTupleTableSlot(scanDesc); + fsstate->resultSlot = node->ss.ss_ScanTupleSlot; + } else { + ExecSetSlotDescriptor(node->ss.ss_ScanTupleSlot, resultDesc); + fsstate->tupdesc = resultDesc; + + fsstate->scanSlot = node->ss.ss_ScanTupleSlot; + fsstate->resultSlot = node->ss.ss_ScanTupleSlot; + } + + fsstate->rel = NULL; + } + + for (int i = 0; i < fsstate->resultSlot->tts_tupleDescriptor->natts; i++) { + fsstate->resultSlot->tts_isnull[i] = true; + } + + fsstate->resultSlot->tts_isempty = false; + fsstate->scanSlot->tts_isempty = false; + + fsstate->attinmeta = TupleDescGetAttInMetadata(fsstate->tupdesc); + + /* + * Prepare for processing of parameters used in remote query, if any. + */ + numParams = list_length(fsplan->fdw_exprs); + fsstate->numParams = numParams; + if (numParams > 0) { + prepare_query_params((PlanState *)node, + fsplan->fdw_exprs, + numParams, + &fsstate->param_flinfo, + &fsstate->param_exprs, + &fsstate->param_values); + } + + /* get encoding */ + fsstate->have_remote_encoding = gcfdw_get_table_encode(table, &fsstate->remote_encoding); + + /* + * Get connection to the foreign server. Connection manager will + * establish new connection if necessary. + */ + fsstate->conn = GetConnection( + table->serverid, &fsstate->pgxc_handle, numParams > 0 ? true : false, fsstate->have_remote_encoding); + u_sess->pgxc_cxt.gc_fdw_run_version = gcfdw_send_and_fetch_version(fsstate->pgxc_handle); + + fsstate->serverid = table->serverid; + + /* Assign a unique ID for my cursor */ + fsstate->cursor_number = GetCursorNumber(fsstate->conn); + fsstate->cursor_exists = false; + + if (numParams > 0) { + fsstate->IterateForeignScan = gcIteratePBEForeignScan; + fsstate->VecIterateForeignScan = gcIteratePBEVecForeignScan; + fsstate->ReScanForeignScan = gcReScanPBEForeignScan; + } else { + fsstate->IterateForeignScan = gcIterateNormalForeignScan; + fsstate->VecIterateForeignScan = gcIterateNormalVecForeignScan; + fsstate->ReScanForeignScan = gcReScanNormalForeignScan; + } + + /* get datanode information */ + fsstate->num_datanode = fsstate->remoteinfo->datanodenum; + + /* set datanode index */ + gcfdw_get_datanode_idx(node); + + /* send index and snapshot to remote */ + gcfdw_send_remote_query_param(fsstate); +} + +/* + * gcIterateForeignScan + * Retrieve next row from the result set, or clear tuple slot to indicate + * EOF. + */ +static TupleTableSlot* gcIterateForeignScan(ForeignScanState* node) +{ + if (IS_PGXC_COORDINATOR) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("EXECUTE DIRECT cannot execute SELECT query with foreign table on coordinator"))); + } + GcFdwScanState* fsstate = (GcFdwScanState*)node->fdw_state; + + return fsstate->IterateForeignScan(node); +} + +/* + * postgresIteratelVecForeignScan + * Retrieve next VectorBatch from the result set. + */ +static VectorBatch* gcIterateVecForeignScan(VecForeignScanState* node) +{ + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Retrieve next vector batch is not supported for Foreign table."))); + return NULL; +} + +/* + * gcReScanForeignScan + * Restart the scan. + */ +static void gcReScanForeignScan(ForeignScanState* node) +{ + if (IS_PGXC_COORDINATOR) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("EXECUTE DIRECT cannot execute SELECT query with foreign table on coordinator"))); + } + GcFdwScanState* fsstate = (GcFdwScanState*)node->fdw_state; + return fsstate->ReScanForeignScan(node); +} + +static void postgresConstructResultSlotWithArray(ForeignScanState* node) +{ + GcFdwScanState* fsstate = (GcFdwScanState*)node->fdw_state; + ForeignScan* fsplan = (ForeignScan*)node->ss.ps.plan; + + List* colmap = (List*)list_nth(fsplan->fdw_private, FdwScanPrivateAggColmap); + + TupleTableSlot* resultSlot = fsstate->resultSlot; + TupleTableSlot* scanSlot = fsstate->scanSlot; + + TupleDesc resultDesc = resultSlot->tts_tupleDescriptor; + + long scanAttr, resultAttr, map; + + for (scanAttr = 0, resultAttr = 0; resultAttr < resultDesc->natts; resultAttr++, scanAttr += map) { + Assert(list_length(colmap) == resultDesc->natts); + + Oid typoid = resultDesc->attrs[resultAttr]->atttypid; + Value* val = (Value*)list_nth(colmap, resultAttr); + map = val->val.ival; + + if (1 == map) /* other type */ + { + resultSlot->tts_isnull[resultAttr] = scanSlot->tts_isnull[scanAttr]; + resultSlot->tts_values[resultAttr] = scanSlot->tts_values[scanAttr]; + } else /* array type*/ + { + const int MAX_TRANSDATUMS = 6; + Datum transdatums[MAX_TRANSDATUMS]; + ArrayType* result = NULL; + long attrnum, i; + + if (map > MAX_TRANSDATUMS) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("colmap value is not correct."))); + } + + if (INT8ARRAYOID == typoid) { + Assert(2 == map); + + attrnum = scanAttr; + for (attrnum = scanAttr, i = 0; i < map; i++, attrnum++) + transdatums[i] = scanSlot->tts_isnull[attrnum] ? Int64GetDatum(0) : scanSlot->tts_values[attrnum]; + + result = construct_array(transdatums, map, INT8OID, 8, true, 's'); + } else if (FLOAT4ARRAYOID == typoid) { + for (attrnum = scanAttr, i = 0; i < map; i++, attrnum++) + transdatums[i] = + scanSlot->tts_isnull[attrnum] ? Float4GetDatumFast(0) : scanSlot->tts_values[attrnum]; + + result = construct_array(transdatums, map, FLOAT8OID, sizeof(float8), FLOAT8PASSBYVAL, 'd'); + } else if (FLOAT8ARRAYOID == typoid) { + for (attrnum = scanAttr, i = 0; i < map; i++, attrnum++) + transdatums[i] = + scanSlot->tts_isnull[attrnum] ? Float8GetDatumFast(0) : scanSlot->tts_values[attrnum]; + + result = construct_array(transdatums, map, FLOAT8OID, sizeof(float8), FLOAT8PASSBYVAL, 'd'); + } else if (NUMERICARRAY == typoid) { + Datum d = Int64GetDatum(0); + for (attrnum = scanAttr, i = 0; i < map; i++, attrnum++) + transdatums[i] = scanSlot->tts_isnull[attrnum] ? DirectFunctionCall1(int8_numeric, d) + : scanSlot->tts_values[attrnum]; + + result = construct_array(transdatums, map, NUMERICOID, -1, false, 'i'); + } else + ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("unsupport data type in agg pushdown."))); + + resultSlot->tts_isnull[resultAttr] = false; + resultSlot->tts_values[resultAttr] = PointerGetDatum(result); + } + } + + resultSlot->tts_nvalid = resultDesc->natts; + resultSlot->tts_isempty = false; +} + +static void postgresMapResultFromScanSlot(ForeignScanState* node) +{ + GcFdwScanState* fsstate = (GcFdwScanState*)node->fdw_state; + + if (fsstate->hasagg == false) { + int j = 0; + ListCell* lc = NULL; + foreach (lc, fsstate->retrieved_attrs) { + int i = lfirst_int(lc); + + fsstate->resultSlot->tts_isnull[i - 1] = fsstate->scanSlot->tts_isnull[j]; + fsstate->resultSlot->tts_values[i - 1] = fsstate->scanSlot->tts_values[j]; + j++; + } + + fsstate->resultSlot->tts_nvalid = fsstate->resultSlot->tts_tupleDescriptor->natts; + } else { + if (false == fsstate->has_array) { + // do nothing + } else { + postgresConstructResultSlotWithArray(node); + } + } +} + +/* + * postgresNormalIterateForeignScan + * Retrieve next row from the result set, or clear tuple slot to indicate + * EOF. + */ +static TupleTableSlot* gcIterateNormalForeignScan(ForeignScanState* node) +{ + GcFdwScanState* fsstate = (GcFdwScanState*)node->fdw_state; + + /* reset tupleslot on the begin */ + (void)ExecClearTuple(fsstate->resultSlot); + fsstate->resultSlot->tts_isempty = false; + + TupleTableSlot* slot = node->ss.ss_ScanTupleSlot; + + /* + * If this is the first call after Begin or ReScan, we need to create the + * cursor on the remote side. + */ + if (!fsstate->cursor_exists) { + AutoContextSwitch memGuard(fsstate->batch_cxt); + + fsstate->cursor_exists = true; + + if (GcFdwCanSkip(fsstate)) { + return ExecClearTuple(slot); + } + + fsstate->num_tuples = 0; + + pgfdw_send_query(fsstate->pgxc_handle, fsstate->query, &fsstate->remotestate); + } + + MemoryContextReset(fsstate->temp_cxt); + MemoryContext oldcontext = MemoryContextSwitchTo(fsstate->temp_cxt); + + if (!PgfdwGetTuples(fsstate->pgxc_handle->dn_conn_count, + fsstate->pgxc_handle->datanode_handles, + fsstate->remotestate, + fsstate->scanSlot)) { + fsstate->eof_reached = true; + pgfdw_node_report_error(fsstate->remotestate); + MemoryContextSwitchTo(oldcontext); + return NULL; + } + + fsstate->num_tuples++; + postgresMapResultFromScanSlot(node); + + pgfdw_node_report_error(fsstate->remotestate); + MemoryContextSwitchTo(oldcontext); + + return fsstate->resultSlot; +} + +/* + * gcIteratePBEForeignScan + * Retrieve next row from the result set, or clear tuple slot to indicate + * EOF. + */ +static TupleTableSlot* gcIteratePBEForeignScan(ForeignScanState* node) +{ + GcFdwScanState* fsstate = (GcFdwScanState*)node->fdw_state; + TupleTableSlot* slot = node->ss.ss_ScanTupleSlot; + + /* + * If this is the first call after Begin or ReScan, we need to create the + * cursor on the remote side. + */ + if (!fsstate->cursor_exists) { + if (GcFdwCanSkip(fsstate)) { + return ExecClearTuple(slot); + } + create_cursor(node); + } + + /* + * Get some more tuples, if we've run out. + */ + if (fsstate->next_tuple >= fsstate->num_tuples) { + /* No point in another fetch if we already detected EOF, though. */ + if (!fsstate->eof_reached) { + fetch_more_data(node); + } + + /* If we didn't get any tuples, must be end of data. */ + if (fsstate->next_tuple >= fsstate->num_tuples) { + return ExecClearTuple(slot); + } + } + + HeapTuple tuple = fsstate->tuples[fsstate->next_tuple++]; + + if (fsstate->hasagg && fsstate->has_array) { + MemoryContext old = CurrentMemoryContext; + MemoryContextSwitchTo(fsstate->batch_cxt); + + TupleDesc scanDesc = fsstate->scanSlot->tts_tupleDescriptor; + TupleDesc resultDesc = fsstate->resultSlot->tts_tupleDescriptor; + + heap_deform_tuple(tuple, scanDesc, fsstate->scanSlot->tts_values, fsstate->scanSlot->tts_isnull); + postgresMapResultFromScanSlot(node); + tuple = heap_form_tuple(resultDesc, fsstate->resultSlot->tts_values, fsstate->resultSlot->tts_isnull); + + MemoryContextSwitchTo(old); + } + + /* + * Return the next tuple. + */ + (void)ExecStoreTuple(tuple, slot, InvalidBuffer, false); + + return slot; +} + +/* + * gcIterateNormalVecForeignScan + * Retrieve next VectorBatch from the result set. + */ +static VectorBatch* gcIterateNormalVecForeignScan(VecForeignScanState* node) +{ + GcFdwScanState* fsstate = (GcFdwScanState*)node->fdw_state; + TupleTableSlot* slot = node->ss.ss_ScanTupleSlot; + VectorBatch* batch = node->m_pScanBatch; + Datum* values = node->m_values; + bool* nulls = node->m_nulls; + MemoryContext scanMcxt = node->scanMcxt; + + MemoryContextReset(fsstate->temp_cxt); + MemoryContext oldcontext = MemoryContextSwitchTo(fsstate->temp_cxt); + + /* + * If this is the first call after Begin or ReScan, we need to create the + * cursor on the remote side. + */ + if (!fsstate->cursor_exists) { + AutoContextSwitch memGuard(fsstate->batch_cxt); + + fsstate->cursor_exists = true; + if (GcFdwCanSkip(fsstate)) { + return NULL; + } + + fsstate->num_tuples = 0; + + pgfdw_send_query(fsstate->pgxc_handle, fsstate->query, &fsstate->remotestate); + } + + MemoryContextSwitchTo(oldcontext); + + MemoryContextReset(scanMcxt); + oldcontext = MemoryContextSwitchTo(scanMcxt); + + /* init vectorbatch */ + batch->Reset(true); + + /* get vectorbatch from tuple */ + for (batch->m_rows = 0; batch->m_rows < BatchMaxSize; batch->m_rows++) { + for (int i = 0; i < slot->tts_tupleDescriptor->natts; i++) { + slot->tts_isnull[i] = true; + } + + if (!PgfdwGetTuples(fsstate->pgxc_handle->dn_conn_count, + fsstate->pgxc_handle->datanode_handles, + fsstate->remotestate, + fsstate->scanSlot)) { + fsstate->eof_reached = true; + pgfdw_node_report_error(fsstate->remotestate); + break; + } + + pgfdw_node_report_error(fsstate->remotestate); + + postgresMapResultFromScanSlot(node); + + values = fsstate->resultSlot->tts_values; + nulls = fsstate->resultSlot->tts_isnull; + + int rows = batch->m_rows; + for (int i = 0; i < batch->m_cols; i++) { + ScalarVector* vec = &(batch->m_arr[i]); + if (nulls[i]) { + vec->m_rows++; + vec->SetNull(rows); + continue; + } + + if (vec->m_desc.encoded) + vec->AddVar(values[i], rows); + else + vec->m_vals[rows] = values[i]; + + vec->m_rows++; + } + + fsstate->next_tuple++; + if (fsstate->next_tuple >= fsstate->num_tuples) { + batch->m_rows++; + break; + } + } + + MemoryContextSwitchTo(oldcontext); + + return batch; +} + +/* + * gcIteratePBEVecForeignScan + * Retrieve next VectorBatch from the result set. + */ +static VectorBatch* gcIteratePBEVecForeignScan(VecForeignScanState* node) +{ + GcFdwScanState* fsstate = (GcFdwScanState*)node->fdw_state; + VectorBatch* batch = node->m_pScanBatch; + Datum* values = node->m_values; + bool* nulls = node->m_nulls; + MemoryContext scanMcxt = node->scanMcxt; + TupleDesc tupdesc = fsstate->tupdesc; + MemoryContext oldMemoryContext = NULL; + + /* + * If this is the first call after Begin or ReScan, we need to create the + * cursor on the remote side. + */ + if (!fsstate->cursor_exists) { + if (GcFdwCanSkip(fsstate)) { + return NULL; + } + create_cursor(node); + } + + /* + * Get some more tuples, if we've run out. + */ + if (fsstate->next_tuple >= fsstate->num_tuples) { + /* No point in another fetch if we already detected EOF, though. */ + if (!fsstate->eof_reached) + fetch_more_data(node); + /* If we didn't get any tuples, must be end of data. */ + if (fsstate->next_tuple >= fsstate->num_tuples) { + return NULL; + } + } + + MemoryContextReset(scanMcxt); + oldMemoryContext = MemoryContextSwitchTo(scanMcxt); + + /* init vectorbatch */ + batch->Reset(true); + + /* get vectorbatch from tuple */ + for (batch->m_rows = 0; batch->m_rows < BatchMaxSize; batch->m_rows++) { + HeapTuple tuple = fsstate->tuples[fsstate->next_tuple]; + + if (fsstate->hasagg && fsstate->has_array) { + TupleDesc scanDesc = fsstate->scanSlot->tts_tupleDescriptor; + TupleDesc resultDesc = fsstate->resultSlot->tts_tupleDescriptor; + + heap_deform_tuple(tuple, scanDesc, fsstate->scanSlot->tts_values, fsstate->scanSlot->tts_isnull); + postgresMapResultFromScanSlot(node); + tuple = heap_form_tuple(resultDesc, fsstate->resultSlot->tts_values, fsstate->resultSlot->tts_isnull); + } + + heap_deform_tuple(tuple, tupdesc, values, nulls); + + int rows = batch->m_rows; + for (int i = 0; i < batch->m_cols; i++) { + ScalarVector* vec = &(batch->m_arr[i]); + if (nulls[i]) { + vec->m_rows++; + vec->SetNull(rows); + continue; + } + + if (vec->m_desc.encoded) + vec->AddVar(values[i], rows); + else + vec->m_vals[rows] = values[i]; + + vec->m_rows++; + } + + fsstate->next_tuple++; + if (fsstate->next_tuple >= fsstate->num_tuples) { + batch->m_rows++; + break; + } + } + + MemoryContextSwitchTo(oldMemoryContext); + + return batch; +} + +/* + * gcReScanNormalForeignScan + * Restart the scan. + */ +static void gcReScanNormalForeignScan(ForeignScanState* node) +{ + GcFdwScanState* fsstate = (GcFdwScanState*)node->fdw_state; + + /* If we haven't created the cursor yet, nothing to do. */ + if (!fsstate->cursor_exists) + return; + + fsstate->cursor_exists = false; + + // recontected + DirectReleaseConnection(fsstate->conn, fsstate->pgxc_handle, false); + + fsstate->conn = GetConnection(fsstate->serverid, &fsstate->pgxc_handle, false, fsstate->have_remote_encoding); + gcfdw_send_remote_query_param(fsstate); + + /* Now force a fresh FETCH. */ + fsstate->tuples = NULL; + fsstate->num_tuples = 0; + fsstate->next_tuple = 0; + fsstate->eof_reached = false; +} + +/* + * gcReScanPBEForeignScan + * Restart the scan. + */ +static void gcReScanPBEForeignScan(ForeignScanState* node) +{ + GcFdwScanState* fsstate = (GcFdwScanState*)node->fdw_state; + char sql[64] = {0}; + PGresult* res = NULL; + + /* If we haven't created the cursor yet, nothing to do. */ + if (!fsstate->cursor_exists) + return; + + fsstate->cursor_exists = false; + errno_t rc = snprintf_s(sql, sizeof(sql), sizeof(sql) - 1, "CLOSE c%u", fsstate->cursor_number); + securec_check_ss(rc, "", ""); + + // recontected + DirectReleaseConnection(fsstate->conn, fsstate->pgxc_handle, true); + + fsstate->conn = GetConnection(fsstate->serverid, &fsstate->pgxc_handle, true, fsstate->have_remote_encoding); + gcfdw_send_remote_query_param(fsstate); + + /* + * We don't use a PG_TRY block here, so be careful not to throw error + * without releasing the PGresult. + */ + res = pgfdw_exec_query(fsstate->conn, sql); + if (PQresultStatus(res) != PGRES_COMMAND_OK) { + pgfdw_report_error(ERROR, res, fsstate->conn, true, sql); + } + PQclear(res); + + /* Now force a fresh FETCH. */ + fsstate->tuples = NULL; + fsstate->num_tuples = 0; + fsstate->next_tuple = 0; + fsstate->eof_reached = false; +} + +/* + * gcEndForeignScan + * Finish scanning foreign table and dispose objects used for this scan + */ +static void gcEndForeignScan(ForeignScanState* node) +{ + if (IS_PGXC_COORDINATOR) { + return; + } + + GcFdwScanState* fsstate = (GcFdwScanState*)node->fdw_state; + + /* if fsstate is NULL, we are in EXPLAIN; nothing to do */ + if (fsstate == NULL) + return; + + /* Close the cursor if open, to prevent accumulation of cursors */ + if (fsstate->cursor_exists && fsstate->numParams > 0) { + close_cursor(fsstate->conn, fsstate->cursor_number); + DirectReleaseConnection(fsstate->conn, fsstate->pgxc_handle, true); + } else { + DirectReleaseConnection(fsstate->conn, fsstate->pgxc_handle, false); + } + + /* MemoryContexts will be deleted automatically. */ +} + +/* + * gcExplainForeignScan + * Produce extra output for EXPLAIN of a ForeignScan on a foreign table + */ +static void gcExplainForeignScan(ForeignScanState* node, ExplainState* es) +{ + List* fdw_private = NIL; + char* sql = NULL; + + fdw_private = ((ForeignScan*)node->ss.ps.plan)->fdw_private; + + /* + * Add remote query, when VERBOSE option is specified. + */ + if (es->verbose) { + sql = strVal(list_nth(fdw_private, FdwScanPrivateSelectSql)); + + if (t_thrd.explain_cxt.explain_perf_mode != EXPLAIN_NORMAL && es->planinfo->m_verboseInfo) { + appendStringInfoSpaces(es->planinfo->m_verboseInfo->info_str, 8); + appendStringInfo(es->planinfo->m_verboseInfo->info_str, "Remote SQL: %s\n", sql); + } else { + ExplainPropertyText("Remote SQL", sql, es); + } + } +} + +static void GetJoinRelCostInfo( + const GcFdwRelationInfo *fpinfo, double retrieved_rows, PlannerInfo *root, Cost *startup_cost, Cost *run_cost) +{ + GcFdwRelationInfo *fpinfo_i = NULL; + GcFdwRelationInfo *fpinfo_o = NULL; + QualCost join_cost; + QualCost remote_conds_cost; + double nrows; + + /* For join we expect inner and outer relations set */ + Assert(fpinfo->innerrel && fpinfo->outerrel); + fpinfo_i = (GcFdwRelationInfo *)fpinfo->innerrel->fdw_private; + fpinfo_o = (GcFdwRelationInfo *)fpinfo->outerrel->fdw_private; + + /* Estimate of number of rows in cross product */ + nrows = fpinfo_i->rows * fpinfo_o->rows; + /* Clamp retrieved rows estimate to at most size of cross product */ + retrieved_rows = Min(retrieved_rows, nrows); + + /* + * The cost of foreign join is estimated as cost of generating + * rows for the joining relations + cost for applying quals on the + * rows. + */ + + /* Calculate the cost of clauses pushed down to the foreign server */ + cost_qual_eval(&remote_conds_cost, fpinfo->remote_conds, root); + /* Calculate the cost of applying join clauses */ + cost_qual_eval(&join_cost, fpinfo->joinclauses, root); + + /* + * Startup cost includes startup cost of joining relations and the + * startup cost for join and other clauses. We do not include the + * startup cost specific to join strategy (e.g. setting up hash + * tables) since we do not know what strategy the foreign server + * is going to use. + */ + *startup_cost = fpinfo_i->rel_startup_cost + fpinfo_o->rel_startup_cost; + *startup_cost += join_cost.startup; + *startup_cost += remote_conds_cost.startup; + *startup_cost += fpinfo->local_conds_cost.startup; + + /* + * Run time cost includes: + * + * 1. Run time cost (total_cost - startup_cost) of relations being + * joined + * + * 2. Run time cost of applying join clauses on the cross product + * of the joining relations. + * + * 3. Run time cost of applying pushed down other clauses on the + * result of join + * + * 4. Run time cost of applying nonpushable other clauses locally + * on the result fetched from the foreign server. + */ + *run_cost = fpinfo_i->rel_total_cost - fpinfo_i->rel_startup_cost; + *run_cost += fpinfo_o->rel_total_cost - fpinfo_o->rel_startup_cost; + *run_cost += nrows * join_cost.per_tuple; + nrows = clamp_row_est(nrows * fpinfo->joinclause_sel); + *run_cost += nrows * remote_conds_cost.per_tuple; + *run_cost += fpinfo->local_conds_cost.per_tuple * retrieved_rows; +} + +/* + * gc_estimate_path_cost_size + * Get cost and size estimates for a foreign scan on given foreign relation + * either a base relation or a join between foreign relations or an upper + * relation containing foreign relations. + * + * param_join_conds are the parameterization clauses with outer relations. + * pathkeys specify the expected sort order if any for given path being costed. + * + * The function returns the cost and size estimates in p_row, p_width, + * p_startup_cost and p_total_cost variables. + */ +static void gc_estimate_path_cost_size(PlannerInfo* root, RelOptInfo* foreignrel, List* param_join_conds, + List* pathkeys, double* p_rows, int* p_width, Cost* p_startup_cost, Cost* p_total_cost) +{ + GcFdwRelationInfo* fpinfo = (GcFdwRelationInfo*)foreignrel->fdw_private; + double rows; + double retrieved_rows; + int width; + Cost startup_cost; + Cost total_cost; + Cost cpu_per_tuple; + + /* + * estimate rows using whatever statistics we have locally, in a way + * similar to ordinary tables. + */ + { + Cost run_cost = 0; + /* + * We don't support join conditions in this mode (hence, no + * parameterized paths can be made). + */ + Assert(param_join_conds == NIL); + + /* + * Use rows/width estimates made by set_baserel_size_estimates() for + * base foreign relations and set_joinrel_size_estimates() for join + * between foreign relations. + */ + rows = foreignrel->rows; + width = foreignrel->width; + + /* Back into an estimate of the number of retrieved rows. */ + retrieved_rows = clamp_row_est(rows / fpinfo->local_conds_sel); + + /* + * We will come here again and again with different set of pathkeys + * that caller wants to cost. We don't need to calculate the cost of + * bare scan each time. Instead, use the costs if we have cached them + * already. + */ + if (fpinfo->rel_startup_cost > 0 && fpinfo->rel_total_cost > 0) { + startup_cost = fpinfo->rel_startup_cost; + run_cost = fpinfo->rel_total_cost - fpinfo->rel_startup_cost; + } else if (IS_JOIN_REL(foreignrel)) { + GetJoinRelCostInfo(fpinfo, retrieved_rows, root, &startup_cost, &run_cost); + } else { + /* Clamp retrieved rows estimates to at most foreignrel->tuples. */ + retrieved_rows = Min(retrieved_rows, foreignrel->tuples); + + /* + * Cost as though this were a seqscan, which is pessimistic. We + * effectively imagine the local_conds are being evaluated + * remotely, too. + */ + startup_cost = 0; + run_cost = 0; + run_cost += u_sess->attr.attr_sql.seq_page_cost * foreignrel->pages; + + startup_cost += foreignrel->baserestrictcost.startup; + cpu_per_tuple = u_sess->attr.attr_sql.cpu_tuple_cost + foreignrel->baserestrictcost.per_tuple; + run_cost += cpu_per_tuple * foreignrel->tuples; + } + + /* + * Without remote estimates, we have no real way to estimate the cost + * of generating sorted output. It could be free if the query plan + * the remote side would have chosen generates properly-sorted output + * anyway, but in most cases it will cost something. Estimate a value + * high enough that we won't pick the sorted path when the ordering + * isn't locally useful, but low enough that we'll err on the side of + * pushing down the ORDER BY clause when it's useful to do so. + */ + if (pathkeys != NIL) { + startup_cost *= DEFAULT_FDW_SORT_MULTIPLIER; + run_cost *= DEFAULT_FDW_SORT_MULTIPLIER; + } + + total_cost = startup_cost + run_cost; + } + /* + * Cache the costs for scans without any pathkeys or parameterization + * before adding the costs for transferring data from the foreign server. + * These costs are useful for costing the join between this relation and + * another foreign relation or to calculate the costs of paths with + * pathkeys for this relation, when the costs can not be obtained from the + * foreign server. This function will be called at least once for every + * foreign relation without pathkeys and parameterization. + */ + if (pathkeys == NIL && param_join_conds == NIL) { + fpinfo->rel_startup_cost = startup_cost; + fpinfo->rel_total_cost = total_cost; + } + + /* + * Add some additional cost factors to account for connection overhead + * (fdw_startup_cost), transferring data across the network + * (fdw_tuple_cost per retrieved row), and local manipulation of the data + * (cpu_tuple_cost per retrieved row). + */ + startup_cost += fpinfo->fdw_startup_cost; + total_cost += fpinfo->fdw_startup_cost; + total_cost += fpinfo->fdw_tuple_cost * retrieved_rows; + total_cost += u_sess->attr.attr_sql.cpu_tuple_cost * retrieved_rows; + + /* Return results. */ + *p_rows = rows; + *p_width = width; + *p_startup_cost = startup_cost; + *p_total_cost = total_cost; +} + +/* + * Create cursor for node's query with current parameter values. + */ +static void create_cursor(ForeignScanState* node) +{ + GcFdwScanState* fsstate = (GcFdwScanState*)node->fdw_state; + ExprContext* econtext = node->ss.ps.ps_ExprContext; + int numParams = fsstate->numParams; + const char** values = fsstate->param_values; + PGconn* conn = fsstate->conn; + StringInfoData buf; + PGresult* res = NULL; + + /* + * Construct array of query parameter values in text format. We do the + * conversions in the short-lived per-tuple context, so as not to cause a + * memory leak over repeated scans. + */ + if (numParams > 0) { + MemoryContext oldcontext = NULL; + + oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); + + process_query_params(econtext, fsstate->param_flinfo, fsstate->param_exprs, values); + + MemoryContextSwitchTo(oldcontext); + } + + /* Construct the DECLARE CURSOR command */ + initStringInfo(&buf); + appendStringInfo(&buf, "DECLARE c%u CURSOR FOR\n%s", fsstate->cursor_number, fsstate->query); + + /* + * Notice that we pass NULL for paramTypes, thus forcing the remote server + * to infer types for all parameters. Since we explicitly cast every + * parameter (see deparse.c), the "inference" is trivial and will produce + * the desired result. This allows us to avoid assuming that the remote + * server has the same OIDs we do for the parameters' types. + */ + res = PQexecParams(conn, buf.data, numParams, NULL, values, NULL, NULL, 0); + if (res == NULL) { + pgfdw_report_error(ERROR, NULL, conn, false, buf.data); + } + + /* + * Get the result, and check for success. + * + * We don't use a PG_TRY block here, so be careful not to throw error + * without releasing the PGresult. + */ + if (PQresultStatus(res) != PGRES_COMMAND_OK) { + pgfdw_report_error(ERROR, res, conn, true, fsstate->query); + } + PQclear(res); + + /* Mark the cursor as created, and show no tuples have been retrieved */ + fsstate->cursor_exists = true; + fsstate->tuples = NULL; + fsstate->num_tuples = 0; + fsstate->next_tuple = 0; + fsstate->eof_reached = false; + + /* Clean up */ + pfree(buf.data); +} + +/* + * Fetch some more rows from the node's cursor. + */ +static void fetch_more_data(ForeignScanState* node) +{ + GcFdwScanState* fsstate = (GcFdwScanState*)node->fdw_state; + PGresult* volatile res = NULL; + MemoryContext oldcontext = NULL; + + /* + * We'll store the tuples in the batch_cxt. First, flush the previous + * batch. + */ + fsstate->tuples = NULL; + MemoryContextReset(fsstate->batch_cxt); + oldcontext = MemoryContextSwitchTo(fsstate->batch_cxt); + + /* PGresult must be released before leaving this function. */ + PG_TRY(); + { + PGconn* conn = fsstate->conn; + char sql[64] = {0}; + int numrows; + int i; + + errno_t rc = snprintf_s( + sql, sizeof(sql), sizeof(sql) - 1, "FETCH %d FROM c%u", fsstate->fetch_size, fsstate->cursor_number); + securec_check_ss(rc, "", ""); + + res = pgfdw_exec_query(conn, sql); + /* On error, report the original query, not the FETCH. */ + if (PQresultStatus(res) != PGRES_TUPLES_OK) { + pgfdw_report_error(ERROR, res, conn, false, fsstate->query); + } + + /* Convert the data into HeapTuples */ + numrows = PQntuples(res); + if (numrows == 0) { + fsstate->tuples = NULL; + } else { + fsstate->tuples = (HeapTuple*)palloc0(numrows * sizeof(HeapTuple)); + } + fsstate->num_tuples = numrows; + fsstate->next_tuple = 0; + + for (i = 0; i < numrows; i++) { + Assert(IsA(node->ss.ps.plan, ForeignScan) || IsA(node->ss.ps.plan, VecForeignScan)); + + if (!fsstate->hasagg) { + fsstate->tuples[i] = make_tuple_from_result_row( + res, i, fsstate->rel, fsstate->attinmeta, fsstate->retrieved_attrs, node, fsstate->temp_cxt); + } else { + fsstate->tuples[i] = make_tuple_from_agg_result( + res, i, fsstate->rel, fsstate->attinmeta, fsstate->retrieved_attrs, node, fsstate->temp_cxt); + } + } + + /* Must be EOF if we didn't get as many tuples as we asked for. */ + fsstate->eof_reached = (numrows < fsstate->fetch_size); + + PQclear(res); + res = NULL; + } + PG_CATCH(); + { + if (res != NULL) { + PQclear(res); + } + PG_RE_THROW(); + } + PG_END_TRY(); + + MemoryContextSwitchTo(oldcontext); +} + +/* + * Fetch remote version + */ +void pgfdw_fetch_remote_version(PGconn* conn) +{ + PGresult* volatile res = NULL; + + /* PGresult must be released before leaving this function. */ + PG_TRY(); + { + char sql[64] = {0}; + int numrows; + int result_count = 0; + + errno_t rc = + snprintf_s(sql, sizeof(sql), sizeof(sql) - 1, "SELECT count(1) FROM pg_extension WHERE extname='gc_fdw';"); + securec_check_ss(rc, "", ""); + + res = pgfdw_exec_query(conn, sql); + /* On error, report the original query, not the FETCH. */ + if (PQresultStatus(res) != PGRES_TUPLES_OK) + pgfdw_report_error(ERROR, res, conn, false, NULL); + + /* Convert the data into HeapTuples */ + numrows = PQntuples(res); + + result_count = atoi(PQgetvalue(res, 0, 0)); + if (result_count <= 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), errmsg("gc_fdw: the remote vesion is not match."))); + } + + PQclear(res); + res = NULL; + } + PG_CATCH(); + { + if (res != NULL) + PQclear(res); + PG_RE_THROW(); + } + PG_END_TRY(); +} + +/* + * Fetch remote version. + */ +static int gcfdw_send_and_fetch_version(PGXCNodeAllHandles* pgxc_handles) +{ + StringInfoData retbuf; + initStringInfo(&retbuf); + + pq_sendint(&retbuf, GCFDW_VERSION, 4); + + PgFdwRemoteSender(pgxc_handles, retbuf.data, retbuf.len, PGFDW_GET_VERSION); + + int version = 0; + PgFdwRemoteReceiver(pgxc_handles, &version, sizeof(PgFdwRemoteInfo)); + Assert(version >= GCFDW_VERSION_V1R8C10); + + pfree(retbuf.data); + retbuf.data = NULL; + + return version; +} + +/* + * Fetch remote table information. + */ +static void gcfdw_fetch_remote_table_info( + PGXCNodeAllHandles* pgxc_handles, ForeignTable* table, void* remote_info, PgFdwMessageTag tag) +{ + const char* nspname = NULL; + const char* relname = NULL; + ListCell* lc = NULL; + + /* + * Use value of FDW options if any, instead of the name of object itself. + */ + foreach (lc, table->options) { + DefElem* def = (DefElem*)lfirst(lc); + + if (strcmp(def->defname, "schema_name") == 0) + nspname = defGetString(def); + else if (strcmp(def->defname, "table_name") == 0) + relname = defGetString(def); + } + + Relation rel = heap_open(table->relid, AccessShareLock); + + if (nspname == NULL) + nspname = get_namespace_name(RelationGetNamespace(rel)); + if (relname == NULL) + relname = RelationGetRelationName(rel); + + Assert(nspname != NULL && relname != NULL); + + int nsplen = strlen(nspname); + int rellen = strlen(relname); + + /* + * write scheme name and table name. + */ + StringInfoData retbuf; + initStringInfo(&retbuf); + + pq_sendint(&retbuf, nsplen, 1); + pq_sendbytes(&retbuf, nspname, nsplen); + pq_sendint(&retbuf, rellen, 1); + pq_sendbytes(&retbuf, relname, rellen); + + /* + * write column name and column type name. + */ + TupleDesc tupdesc = RelationGetDescr(rel); + int att_name_len; + int type_name_len; + char* type_name = NULL; + + pq_sendint(&retbuf, tupdesc->natts, 4); + + for (int i = 0; i < tupdesc->natts; i++) { + att_name_len = strlen(tupdesc->attrs[i]->attname.data); + pq_sendint(&retbuf, att_name_len, 4); + pq_sendbytes(&retbuf, tupdesc->attrs[i]->attname.data, att_name_len); + + Assert(InvalidOid != tupdesc->attrs[i]->atttypid); + + type_name = get_typename(tupdesc->attrs[i]->atttypid); + type_name_len = strlen(type_name); + pq_sendint(&retbuf, type_name_len, 4); + pq_sendbytes(&retbuf, type_name, type_name_len); + pq_sendint(&retbuf, tupdesc->attrs[i]->atttypmod, 4); + pfree(type_name); + } + + relation_close(rel, AccessShareLock); + + PgFdwRemoteSender(pgxc_handles, retbuf.data, retbuf.len, tag); + + if (tag == PGFDW_GET_TABLE_INFO) { + PgFdwRemoteInfo* info = (PgFdwRemoteInfo*)remote_info; + PgFdwRemoteReceiver(pgxc_handles, info, sizeof(PgFdwRemoteInfo)); + } else /* For analyze */ + { + PGFDWTableAnalyze* info = (PGFDWTableAnalyze*)remote_info; + PgFdwRemoteReceiver(pgxc_handles, info, sizeof(PGFDWTableAnalyze)); + } + + pfree(retbuf.data); + retbuf.data = NULL; +} + +/* + * get foreign table encode from option + */ +static bool gcfdw_get_table_encode(ForeignTable* table, int* remote_encoding) +{ + bool have_remote_encoding = false; + ListCell* lc = NULL; + DefElem* def = NULL; + foreach (lc, table->options) { + def = (DefElem*)lfirst(lc); + + if (strcmp(def->defname, "encoding") == 0) { + *remote_encoding = pg_char_to_encoding(defGetString(def)); + + if (*remote_encoding == -1) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("%s is not a valid encoding name", defGetString(def)))); + } + + have_remote_encoding = true; + } + } + + return have_remote_encoding; +} + +/* + * Send remote encode information. + */ +static void gcfdw_send_remote_encode(PGXCNodeAllHandles* pgxc_handles, int remote_encoding) +{ + StringInfoData retbuf; + initStringInfo(&retbuf); + + pq_sendint(&retbuf, GetDatabaseEncoding(), 4); + pq_sendint(&retbuf, remote_encoding, 4); + + PgFdwRemoteSender(pgxc_handles, retbuf.data, retbuf.len, PGFDW_GET_ENCODE); + PgFdwRemoteReceiver(pgxc_handles, NULL, 0); + + pfree(retbuf.data); + retbuf.data = NULL; +} + +/* + * Send remote query param. + */ +static void gcfdw_send_remote_query_param(GcFdwScanState* fsstate) +{ + if (u_sess->pgxc_cxt.gc_fdw_run_version >= GCFDW_VERSION_V1R8C10_1 && fsstate->have_remote_encoding == true) { + gcfdw_send_remote_encode(fsstate->pgxc_handle, fsstate->remote_encoding); + } + + StringInfoData retbuf; + initStringInfo(&retbuf); + + pq_sendint(&retbuf, fsstate->current_idx, 4); + pq_sendint(&retbuf, fsstate->cycle_idx, 4); + + PgFdwSendSnapshot(&retbuf, fsstate->remoteinfo->snapshot, fsstate->remoteinfo->snapsize); + + PgFdwRemoteSender(fsstate->pgxc_handle, retbuf.data, retbuf.len, PGFDW_QUERY_PARAM); + PgFdwRemoteReceiver(fsstate->pgxc_handle, NULL, 0); + + pfree(retbuf.data); + retbuf.data = NULL; +} + +/* + * Get datanode idx. + */ +static void gcfdw_get_datanode_idx(ForeignScanState* node) +{ + Plan* fsplan = node->ss.ps.plan; + GcFdwScanState* fsstate = (GcFdwScanState*)node->fdw_state; + + if (IS_PGXC_DATANODE) { + fsstate->max_idx = fsstate->num_datanode; + fsstate->cycle_idx = list_length(fsplan->exec_nodes->nodeList) * SET_DOP(fsplan->dop); + fsstate->current_idx = 0; + + bool found = false; + ListCell* lc = NULL; + foreach (lc, fsplan->exec_nodes->nodeList) { + if (lfirst_int(lc) == u_sess->pgxc_cxt.PGXCNodeId) { + found = true; + break; + } + fsstate->current_idx++; + } + + if (found) { + fsstate->current_idx = + u_sess->stream_cxt.smp_id * list_length(fsplan->exec_nodes->nodeList) + fsstate->current_idx; + } else { + fsstate->current_idx = -1; + } + } else { + fsstate->max_idx = 1; + fsstate->cycle_idx = 1; + fsstate->current_idx = 0; + } + + Assert(fsstate->current_idx < fsstate->cycle_idx); +} + +/* + * Force assorted GUC parameters to settings that ensure that we'll output + * data values in a form that is unambiguous to the remote server. + * + * This is rather expensive and annoying to do once per row, but there's + * little choice if we want to be sure values are transmitted accurately; + * we can't leave the settings in place between rows for fear of affecting + * user-visible computations. + * + * We use the equivalent of a function SET option to allow the settings to + * persist only until the caller calls reset_transmission_modes(). If an + * error is thrown in between, guc.c will take care of undoing the settings. + * + * The return value is the nestlevel that must be passed to + * reset_transmission_modes() to undo things. + */ +int set_transmission_modes(void) +{ + int nestlevel = NewGUCNestLevel(); + + /* + * The values set here should match what pg_dump does. See also + * configure_remote_session in connection.c. + */ + if (u_sess->time_cxt.DateStyle != USE_ISO_DATES) + (void)set_config_option("datestyle", "ISO", PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, 0, false); + if (u_sess->attr.attr_common.IntervalStyle != INTSTYLE_POSTGRES) + (void)set_config_option( + "intervalstyle", "postgres", PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, 0, false); + if (u_sess->attr.attr_common.extra_float_digits < 3) + (void)set_config_option("extra_float_digits", "3", PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, 0, false); + + return nestlevel; +} + +/* + * Undo the effects of set_transmission_modes(). + */ +void reset_transmission_modes(int nestlevel) +{ + AtEOXact_GUC(true, nestlevel); +} + +/* + * Utility routine to close a cursor. + */ +static void close_cursor(PGconn* conn, unsigned int cursor_number) +{ + char sql[64] = {0}; + PGresult* res = NULL; + + errno_t rc = snprintf_s(sql, sizeof(sql), sizeof(sql) - 1, "CLOSE c%u", cursor_number); + securec_check_ss(rc, "", ""); + + /* + * We don't use a PG_TRY block here, so be careful not to throw error + * without releasing the PGresult. + */ + res = pgfdw_exec_query(conn, sql); + if (PQresultStatus(res) != PGRES_COMMAND_OK) + pgfdw_report_error(ERROR, res, conn, true, sql); + PQclear(res); +} + +/* + * Prepare for processing of parameters used in remote query. + */ +static void prepare_query_params(PlanState* node, List* fdw_exprs, int numParams, FmgrInfo** param_flinfo, + List** param_exprs, const char*** param_values) +{ + int i = 0; + ListCell* lc = NULL; + + Assert(numParams > 0); + + /* Prepare for output conversion of parameters used in remote query. */ + *param_flinfo = (FmgrInfo *)palloc0(sizeof(FmgrInfo) * numParams); + + foreach (lc, fdw_exprs) { + Node *param_expr = (Node *)lfirst(lc); + Oid typefnoid = InvalidOid; + bool isvarlena = false; + + getTypeOutputInfo(exprType(param_expr), &typefnoid, &isvarlena); + fmgr_info(typefnoid, &(*param_flinfo)[i]); + i++; + } + + /* + * Prepare remote-parameter expressions for evaluation. (Note: in + * practice, we expect that all these expressions will be just Params, so + * we could possibly do something more efficient than using the full + * expression-eval machinery for this. But probably there would be little + * benefit, and it'd require gc_fdw to know more than is desirable + * about Param evaluation.) + */ + *param_exprs = (List *)ExecInitExpr((Expr *)fdw_exprs, node); + + /* Allocate buffer for text form of query parameters. */ + *param_values = (const char **)palloc0(numParams * sizeof(char *)); +} + +/* + * Construct array of query parameter values in text format. + */ +static void process_query_params( + ExprContext* econtext, FmgrInfo* param_flinfo, List* param_exprs, const char** param_values) +{ + int i = 0; + ListCell* lc = NULL; + + int nestlevel = set_transmission_modes(); + foreach (lc, param_exprs) { + ExprState *expr_state = (ExprState *)lfirst(lc); + Datum expr_value; + bool isNull = false; + + /* Evaluate the parameter expression */ + expr_value = ExecEvalExpr(expr_state, econtext, &isNull, NULL); + + /* + * Get string representation of each parameter value by invoking + * type-specific output function, unless the value is null. + */ + if (isNull) { + param_values[i] = NULL; + } else { + param_values[i] = OutputFunctionCall(¶m_flinfo[i], expr_value); + } + i++; + } + + reset_transmission_modes(nestlevel); +} + +/* + * gcAnalyzeForeignTable + * Test whether analyzing this foreign table is supported + */ +static bool gcAnalyzeForeignTable(Relation relation, AcquireSampleRowsFunc* func, BlockNumber* totalpages, + void* additionalData, bool estimate_table_rownum) +{ + PGFDWTableAnalyze* info = (PGFDWTableAnalyze*)additionalData; + Oid relid = info->relid; + ForeignTable* table = GetForeignTable(relid); + + int remote_encoding; + bool have_remote_encoding = gcfdw_get_table_encode(table, &remote_encoding); + + (void)GetConnection(table->serverid, &info->pgxc_handles, false, have_remote_encoding); + + u_sess->pgxc_cxt.gc_fdw_run_version = gcfdw_send_and_fetch_version(info->pgxc_handles); + if (u_sess->pgxc_cxt.gc_fdw_run_version >= GCFDW_VERSION_V1R8C10_1 && have_remote_encoding == true) { + gcfdw_send_remote_encode(info->pgxc_handles, remote_encoding); + } + + gcfdw_fetch_remote_table_info(info->pgxc_handles, table, info, PGFDW_ANALYZE_TABLE); + + return true; +} + +/* + * Create a tuple from the specified row of the PGresult. + * + * rel is the local representation of the foreign table, attinmeta is + * conversion data for the rel's tupdesc, and retrieved_attrs is an + * integer list of the table column numbers present in the PGresult. + * temp_context is a working context that can be reset after each tuple. + */ +static HeapTuple make_tuple_from_result_row(PGresult* res, int row, Relation rel, AttInMetadata* attinmeta, + List* retrieved_attrs, ForeignScanState* fsstate, MemoryContext temp_context) +{ + HeapTuple tuple = NULL; + TupleDesc tupdesc = NULL; + Datum* values = NULL; + bool* nulls = NULL; + ItemPointer ctid = NULL; + Oid oid = InvalidOid; + ConversionLocation errpos; + ErrorContextCallback errcallback; + MemoryContext oldcontext = NULL; + ListCell* lc = NULL; + int j = 0; + + Assert(row < PQntuples(res)); + + /* + * Do the following work in a temp context that we reset after each tuple. + * This cleans up not only the data we have direct access to, but any + * cruft the I/O functions might leak. + */ + oldcontext = MemoryContextSwitchTo(temp_context); + + if (rel) { + tupdesc = RelationGetDescr(rel); + } else { + GcFdwScanState *fdw_sstate = NULL; + + Assert(fsstate != NULL); + fdw_sstate = (GcFdwScanState *)fsstate->fdw_state; + tupdesc = fdw_sstate->tupdesc; + } + + values = (Datum*)palloc0(tupdesc->natts * sizeof(Datum)); + nulls = (bool*)palloc(tupdesc->natts * sizeof(bool)); + /* Initialize to nulls for any columns not present in result */ + errno_t rc = memset_s(nulls, tupdesc->natts * sizeof(bool), true, tupdesc->natts * sizeof(bool)); + securec_check(rc, "\0", "\0"); + + /* + * Set up and install callback to report where conversion error occurs. + */ + errpos.rel = rel; + errpos.cur_attno = 0; + errpos.fsstate = fsstate; + errcallback.callback = conversion_error_callback; + errcallback.arg = (void*)&errpos; + errcallback.previous = t_thrd.log_cxt.error_context_stack; + t_thrd.log_cxt.error_context_stack = &errcallback; + + /* + * i indexes columns in the relation, j indexes columns in the PGresult. + */ + foreach (lc, retrieved_attrs) { + int i = lfirst_int(lc); + char* valstr = NULL; + + /* fetch next column's textual value */ + if (PQgetisnull(res, row, j)) { + valstr = NULL; + } else { + valstr = PQgetvalue(res, row, j); + } + + /* + * convert value to internal representation + * + * Note: we ignore system columns other than ctid and oid in result + */ + errpos.cur_attno = i; + if (i > 0) { + /* ordinary column */ + Assert(i <= tupdesc->natts); + if (valstr == NULL) { + nulls[i - 1] = true; + } else { + nulls[i - 1] = false; + } + /* Apply the input function even to nulls, to support domains */ + values[i - 1] = InputFunctionCall( + &attinmeta->attinfuncs[i - 1], valstr, attinmeta->attioparams[i - 1], attinmeta->atttypmods[i - 1]); + } else if (i == SelfItemPointerAttributeNumber) { + /* ctid */ + if (valstr != NULL) { + Datum datum = DirectFunctionCall1(tidin, CStringGetDatum(valstr)); + ctid = (ItemPointer)DatumGetPointer(datum); + } + } else if (i == ObjectIdAttributeNumber) { + /* oid */ + if (valstr != NULL) { + Datum datum = DirectFunctionCall1(oidin, CStringGetDatum(valstr)); + oid = DatumGetObjectId(datum); + } + } + errpos.cur_attno = 0; + + j++; + } + + /* Uninstall error context callback. */ + t_thrd.log_cxt.error_context_stack = errcallback.previous; + + /* + * Check we got the expected number of columns. Note: j == 0 and + * PQnfields == 1 is expected, since deparse emits a NULL if no columns. + */ + if (j > 0 && j != PQnfields(res)) + ereport(ERROR, + (errcode(ERRCODE_OPERATE_RESULT_NOT_EXPECTED), + errmsg("remote query result does not match the foreign table"))); + + /* + * Build the result tuple in caller's memory context. + */ + MemoryContextSwitchTo(oldcontext); + + tuple = heap_form_tuple(tupdesc, values, nulls); + + /* + * If we have a CTID to return, install it in both t_self and t_ctid. + * t_self is the normal place, but if the tuple is converted to a + * composite Datum, t_self will be lost; setting t_ctid allows CTID to be + * preserved during EvalPlanQual re-evaluations (see ROW_MARK_COPY code). + */ + if (ctid) + tuple->t_self = tuple->t_data->t_ctid = *ctid; + + /* + * Stomp on the xmin, xmax, and cmin fields from the tuple created by + * heap_form_tuple. heap_form_tuple actually creates the tuple with + * DatumTupleFields, not HeapTupleFields, but the executor expects + * HeapTupleFields and will happily extract system columns on that + * assumption. If we don't do this then, for example, the tuple length + * ends up in the xmin field, which isn't what we want. + */ + + /* + * If we have an OID to return, install it. + */ + if (OidIsValid(oid)) + HeapTupleSetOid(tuple, oid); + + /* Clean up */ + MemoryContextReset(temp_context); + + return tuple; +} + +static HeapTuple make_tuple_from_agg_result(PGresult* res, int row, Relation rel, AttInMetadata* attinmeta, + List* retrieved_attrs, ForeignScanState* fsstate, MemoryContext temp_context) +{ + HeapTuple tuple = NULL; + TupleDesc tupdesc = NULL; + Datum* values = NULL; + bool* nulls = NULL; + MemoryContext oldcontext = NULL; + int j = 0; + + Assert(row < PQntuples(res)); + + /* + * Do the following work in a temp context that we reset after each tuple. + * This cleans up not only the data we have direct access to, but any + * cruft the I/O functions might leak. + */ + oldcontext = MemoryContextSwitchTo(temp_context); + + GcFdwScanState *fdw_sstate = NULL; + + Assert(fsstate != NULL); + fdw_sstate = (GcFdwScanState *)fsstate->fdw_state; + tupdesc = fdw_sstate->tupdesc; + + values = (Datum *)palloc0(tupdesc->natts * sizeof(Datum)); + nulls = (bool *)palloc(tupdesc->natts * sizeof(bool)); + + /* Initialize to nulls for any columns not present in result */ + errno_t rc = memset_s(nulls, tupdesc->natts * sizeof(bool), true, tupdesc->natts * sizeof(bool)); + securec_check(rc, "\0", "\0"); + + /* + * i indexes columns in the relation, j indexes columns in the PGresult. + */ + for (int i = 1; i <= tupdesc->natts; i++) { + char *valstr = NULL; + + /* fetch next column's textual value */ + if (PQgetisnull(res, row, j)) { + valstr = NULL; + } else { + valstr = PQgetvalue(res, row, j); + } + + nulls[i - 1] = (valstr == NULL); + + /* Apply the input function even to nulls, to support domains */ + values[i - 1] = InputFunctionCall( + &attinmeta->attinfuncs[i - 1], valstr, attinmeta->attioparams[i - 1], attinmeta->atttypmods[i - 1]); + j++; + } + + /* + * Check we got the expected number of columns. Note: j == 0 and + * PQnfields == 1 is expected, since deparse emits a NULL if no columns. + */ + if (j > 0 && j != PQnfields(res)) + ereport(ERROR, + (errcode(ERRCODE_OPERATE_RESULT_NOT_EXPECTED), + errmsg("remote query result does not match the foreign table"))); + + /* + * Build the result tuple in caller's memory context. + */ + MemoryContextSwitchTo(oldcontext); + + tuple = heap_form_tuple(tupdesc, values, nulls); + + /* + * Stomp on the xmin, xmax, and cmin fields from the tuple created by + * heap_form_tuple. heap_form_tuple actually creates the tuple with + * DatumTupleFields, not HeapTupleFields, but the executor expects + * HeapTupleFields and will happily extract system columns on that + * assumption. If we don't do this then, for example, the tuple length + * ends up in the xmin field, which isn't what we want. + */ + + /* Clean up */ + MemoryContextReset(temp_context); + + return tuple; +} + +/* + * Callback function which is called when error occurs during column value + * conversion. Print names of column and relation. + */ +static void conversion_error_callback(void *arg) +{ + const char *attname = NULL; + const char *relname = NULL; + bool is_wholerow = false; + ConversionLocation *errpos = (ConversionLocation *)arg; + + if (errpos->rel) { + /* error occurred in a scan against a foreign table */ + TupleDesc tupdesc = RelationGetDescr(errpos->rel); + + if (errpos->cur_attno > 0 && errpos->cur_attno <= tupdesc->natts) { + attname = NameStr(tupdesc->attrs[errpos->cur_attno - 1]->attname); + } else if (errpos->cur_attno == SelfItemPointerAttributeNumber) { + attname = "ctid"; + } else if (errpos->cur_attno == ObjectIdAttributeNumber) { + attname = "oid"; + } + + relname = RelationGetRelationName(errpos->rel); + } else { + /* error occurred in a scan against a foreign join */ + errcontext("processing expression at position %d in select list", errpos->cur_attno); + } + + if (relname != NULL) { + if (is_wholerow) { + errcontext("whole-row reference to foreign table \"%s\"", relname); + } else if (attname != NULL) { + errcontext("column \"%s\" of foreign table \"%s\"", attname, relname); + } + } +} + +/* + * Find an equivalence class member expression, all of whose Vars, come from + * the indicated relation. + */ +extern Expr* find_em_expr_for_rel(EquivalenceClass* ec, RelOptInfo* rel) +{ + ListCell* lc_em = NULL; + + foreach (lc_em, ec->ec_members) { + EquivalenceMember* em = (EquivalenceMember*)lfirst(lc_em); + + if (bms_is_subset(em->em_relids, rel->relids)) { + /* + * If there is more than one equivalence member whose Vars are + * taken entirely from this relation, we'll be content to choose + * any one of those. + */ + return em->em_expr; + } + } + + /* We didn't find any suitable equivalence class expression */ + return NULL; +} + +/* + * gcValidateTableDef + * Check create/alter foreign table + */ +static void gcValidateTableDef(Node* Obj) +{ + if (NULL == Obj) + return; + + switch (nodeTag(Obj)) { + case T_AlterTableStmt: { + List* cmds = ((AlterTableStmt*)Obj)->cmds; + ListCell* lcmd = NULL; + + foreach (lcmd, cmds) { + AlterTableCmd* cmd = (AlterTableCmd*)lfirst(lcmd); + + if (cmd->subtype != AT_AlterColumnType && cmd->subtype != AT_GenericOptions && + cmd->subtype != AT_AlterColumnGenericOptions && cmd->subtype != AT_ChangeOwner && + cmd->subtype != AT_AddNodeList && cmd->subtype != AT_DeleteNodeList && + cmd->subtype != AT_UpdateSliceLike) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_COOP_ANALYZE), + errmsg("Un-support feature"), + errdetail("target table is a foreign table"))); + } + + if (cmd->subtype == AT_GenericOptions && nodeTag(cmd->def) == T_List) { + List* defs = (List*)cmd->def; + ListCell* lc = NULL; + + foreach (lc, defs) { + DefElem* def = (DefElem*)lfirst(lc); + if (def->defaction == DEFELEM_ADD || def->defaction == DEFELEM_DROP) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_COOP_ANALYZE), + errmsg("Un-support feature"), + errdetail("target table is a foreign table"))); + } + } + } + } + break; + } + case T_CreateForeignTableStmt: { + DistributeBy* DisByOp = ((CreateStmt*)Obj)->distributeby; + + /* Start a node by isRestoreMode when adding a node, do not regist distributeby + * info for pgxc_class. So distributeby clause is not needed. + */ + if (IS_PGXC_COORDINATOR && !isRestoreMode) { + if (NULL == DisByOp) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_COOP_ANALYZE), + errmsg("Need DISTRIBUTE BY clause for the foreign table."))); + } + + if (DISTTYPE_ROUNDROBIN != DisByOp->disttype) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_COOP_ANALYZE), + errmsg("Unsupport distribute type."), + errdetail("Supported option values are \"roundrobin\"."))); + } + + if (((CreateForeignTableStmt*)Obj)->part_state) { + /* as for gc_fdw foreign table, unsupport parition table. */ + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("The gc_fdw partition foreign table is not supported."))); + } + } + + /*if the error_relation exists for a gc_fdw foreign table ,the warning reminds that it is not support*/ + if (((CreateForeignTableStmt*)Obj)->error_relation != NULL && + IsA(((CreateForeignTableStmt*)Obj)->error_relation, RangeVar)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_COOP_ANALYZE), + errmsg("The error_relation of the foreign table is not support."))); + } + + /* check write only */ + if (((CreateForeignTableStmt*)Obj)->write_only) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_COOP_ANALYZE), + errmsg("Unsupport write only."))); + } + + /* check optLogRemote and optRejectLimit */ + if (CheckForeignExtOption(((CreateForeignTableStmt*)Obj)->extOptions, optLogRemote)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_COOP_ANALYZE), + errmsg("The REMOTE LOG of gc_fdw foreign table is not support."))); + } + + if (CheckForeignExtOption(((CreateForeignTableStmt*)Obj)->extOptions, optRejectLimit)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_COOP_ANALYZE), + errmsg("The PER NODE REJECT LIMIT of gc_fdw foreign table is not support."))); + } + + break; + } + default: + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmodule(MOD_COOP_ANALYZE), + errmsg("unrecognized node type: %d", (int)nodeTag(Obj)))); + } +} + +/* + * @Description: check log_remote and reject_limit of extOption + * @IN OptList: The foreign table option list. + * @Return: true if log_remote or reject_limit, false for other + * @See also: + */ +static bool CheckForeignExtOption(List* extOptList, const char* str) +{ + bool CheckExtOption = false; + ListCell* cell = NULL; + foreach (cell, extOptList) { + DefElem* Opt = (DefElem*)lfirst(cell); + + if (0 == pg_strcasecmp(Opt->defname, str)) { + CheckExtOption = true; + break; + } + } + + return CheckExtOption; +} + +static bool GcFdwCanSkip(GcFdwScanState* fsstate) +{ + if (fsstate->current_idx == -1 || fsstate->current_idx >= fsstate->max_idx) { + return true; + } + + if (fsstate->remoteinfo->reltype == LOCATOR_TYPE_REPLICATED && fsstate->current_idx != 0) { + return true; + } + + return false; +} + +static void GcFdwCopyRemoteInfo(PgFdwRemoteInfo* new_remote_info, PgFdwRemoteInfo* ori_remote_info) +{ + new_remote_info->reltype = ori_remote_info->reltype; + new_remote_info->datanodenum = ori_remote_info->datanodenum; + + errno_t rc = memcpy_s((char*)new_remote_info->snapshot, + new_remote_info->snapsize, + (char*)ori_remote_info->snapshot, + ori_remote_info->snapsize); + + if (rc != 0) { + Assert(0); + } + + securec_check_c(rc, "\0", "\0"); +} + +bool hasSpecialArrayType(TupleDesc desc) +{ + for (int i = 0; i < desc->natts; i++) { + Oid typoid = desc->attrs[i]->atttypid; + + if (INT8ARRAYOID == typoid || FLOAT8ARRAYOID == typoid || FLOAT4ARRAYOID == typoid || NUMERICARRAY == typoid) { + return true; + } + } + + return false; +} + +// end of file diff --git a/contrib/gc_fdw/gc_fdw--1.0.sql b/contrib/gc_fdw/gc_fdw--1.0.sql index b0fa44999..81f5f6898 100644 --- a/contrib/gc_fdw/gc_fdw--1.0.sql +++ b/contrib/gc_fdw/gc_fdw--1.0.sql @@ -3,12 +3,12 @@ -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION gc_fdw" to load this file. \quit -CREATE FUNCTION gc_fdw_handler() +CREATE FUNCTION pg_catalog.gc_fdw_handler() RETURNS fdw_handler AS 'MODULE_PATHNAME' LANGUAGE C STRICT NOT FENCED; -CREATE FUNCTION gc_fdw_validator(text[], oid) +CREATE FUNCTION pg_catalog.gc_fdw_validator(text[], oid) RETURNS void AS 'MODULE_PATHNAME' LANGUAGE C STRICT NOT FENCED; diff --git a/contrib/hdfs_fdw/hdfs_fdw--1.0.sql b/contrib/hdfs_fdw/hdfs_fdw--1.0.sql index cd58893ad..f098627e0 100644 --- a/contrib/hdfs_fdw/hdfs_fdw--1.0.sql +++ b/contrib/hdfs_fdw/hdfs_fdw--1.0.sql @@ -3,12 +3,12 @@ -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION hdfs_fdw" to load this file. \quit -CREATE FUNCTION hdfs_fdw_handler() +CREATE FUNCTION pg_catalog.hdfs_fdw_handler() RETURNS fdw_handler AS 'MODULE_PATHNAME' LANGUAGE C STRICT NOT FENCED; -CREATE FUNCTION hdfs_fdw_validator(text[], oid) +CREATE FUNCTION pg_catalog.hdfs_fdw_validator(text[], oid) RETURNS void AS 'MODULE_PATHNAME' LANGUAGE C STRICT NOT FENCED; diff --git a/contrib/hdfs_fdw/hdfs_fdw.cpp b/contrib/hdfs_fdw/hdfs_fdw.cpp index 59fcb7dc1..be0899370 100644 --- a/contrib/hdfs_fdw/hdfs_fdw.cpp +++ b/contrib/hdfs_fdw/hdfs_fdw.cpp @@ -363,8 +363,12 @@ void serverOptionValidator(List* ServerOptionList) */ switch (serverType) { case T_OBS_SERVER: { +#ifndef ENABLE_LITE_MODE checkOptionNameValidity(ServerOptionList, OBS_SERVER_OPTION); break; +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } case T_HDFS_SERVER: { FEATURE_NOT_PUBLIC_ERROR("HDFS is not yet supported."); @@ -405,6 +409,7 @@ void serverOptionValidator(List* ServerOptionList) ServerOptionCheckSet(ServerOptionList, serverType, addressFound, cfgPathFound, akFound, sakFound, encrypt, userNameFound, passWordFound, regionFound, hostName, ak, sk, regionStr); +#ifndef ENABLE_LITE_MODE if (T_OBS_SERVER == serverType) { if (addressFound && regionFound) { ereport(ERROR, @@ -442,6 +447,8 @@ void serverOptionValidator(List* ServerOptionList) checkOBSServerValidity(URL, ak, sk, encrypt); } } +#endif + if (T_HDFS_SERVER == serverType && !cfgPathFound) { ereport(ERROR, (errcode(ERRCODE_FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), @@ -1434,6 +1441,7 @@ static void HdfsEndForeignScan(ForeignScanState* scanState) } } +#ifdef ENABLE_LLVM_COMPILE /* * LLVM optimization information should be shown. We check the query * uses LLVM optimization or not. @@ -1502,6 +1510,7 @@ static void HdfsEndForeignScan(ForeignScanState* scanState) } } } +#endif /* clears all file related memory */ if (NULL != executionState->fileReader) { diff --git a/contrib/hdfs_fdw/scheduler.cpp b/contrib/hdfs_fdw/scheduler.cpp index a3208f291..647ce825f 100644 --- a/contrib/hdfs_fdw/scheduler.cpp +++ b/contrib/hdfs_fdw/scheduler.cpp @@ -716,7 +716,11 @@ List* CNSchedulingForAnalyze(unsigned int* totalFilesNum, unsigned int* numOfDns if (isglbstats) { if (IS_OBS_CSV_TXT_FOREIGN_TABLE(foreignTableId)) { /* for dist obs foreign table.*/ +#ifndef ENABLE_LITE_MODE allTask = CNSchedulingForDistOBSFt(foreignTableId); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } else { if (rel_loc_info == NULL) { ereport(ERROR, diff --git a/contrib/hstore/hstore--1.0.sql b/contrib/hstore/hstore--1.0.sql index aa20c8f9a..549710a3f 100644 --- a/contrib/hstore/hstore--1.0.sql +++ b/contrib/hstore/hstore--1.0.sql @@ -5,22 +5,22 @@ CREATE TYPE hstore; -CREATE FUNCTION hstore_in(cstring) +CREATE FUNCTION pg_catalog.hstore_in(cstring) RETURNS hstore AS 'MODULE_PATHNAME' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION hstore_out(hstore) +CREATE FUNCTION pg_catalog.hstore_out(hstore) RETURNS cstring AS 'MODULE_PATHNAME' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION hstore_recv(internal) +CREATE FUNCTION pg_catalog.hstore_recv(internal) RETURNS hstore AS 'MODULE_PATHNAME' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION hstore_send(hstore) +CREATE FUNCTION pg_catalog.hstore_send(hstore) RETURNS bytea AS 'MODULE_PATHNAME' LANGUAGE C STRICT IMMUTABLE NOT FENCED; @@ -34,12 +34,12 @@ CREATE TYPE hstore ( STORAGE = extended ); -CREATE FUNCTION hstore_version_diag(hstore) +CREATE FUNCTION pg_catalog.hstore_version_diag(hstore) RETURNS integer AS 'MODULE_PATHNAME','hstore_version_diag' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION fetchval(hstore,text) +CREATE FUNCTION pg_catalog.fetchval(hstore,text) RETURNS text AS 'MODULE_PATHNAME','hstore_fetchval' LANGUAGE C STRICT IMMUTABLE; @@ -50,7 +50,7 @@ CREATE OPERATOR -> ( PROCEDURE = fetchval ); -CREATE FUNCTION slice_array(hstore,text[]) +CREATE FUNCTION pg_catalog.slice_array(hstore,text[]) RETURNS text[] AS 'MODULE_PATHNAME','hstore_slice_to_array' LANGUAGE C STRICT IMMUTABLE NOT FENCED; @@ -61,17 +61,17 @@ CREATE OPERATOR -> ( PROCEDURE = slice_array ); -CREATE FUNCTION slice(hstore,text[]) +CREATE FUNCTION pg_catalog.slice(hstore,text[]) RETURNS hstore AS 'MODULE_PATHNAME','hstore_slice_to_hstore' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION isexists(hstore,text) +CREATE FUNCTION pg_catalog.isexists(hstore,text) RETURNS bool AS 'MODULE_PATHNAME','hstore_exists' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION exist(hstore,text) +CREATE FUNCTION pg_catalog.exist(hstore,text) RETURNS bool AS 'MODULE_PATHNAME','hstore_exists' LANGUAGE C STRICT IMMUTABLE NOT FENCED; @@ -84,7 +84,7 @@ CREATE OPERATOR ? ( JOIN = contjoinsel ); -CREATE FUNCTION exists_any(hstore,text[]) +CREATE FUNCTION pg_catalog.exists_any(hstore,text[]) RETURNS bool AS 'MODULE_PATHNAME','hstore_exists_any' LANGUAGE C STRICT IMMUTABLE NOT FENCED; @@ -97,7 +97,7 @@ CREATE OPERATOR ?| ( JOIN = contjoinsel ); -CREATE FUNCTION exists_all(hstore,text[]) +CREATE FUNCTION pg_catalog.exists_all(hstore,text[]) RETURNS bool AS 'MODULE_PATHNAME','hstore_exists_all' LANGUAGE C STRICT IMMUTABLE NOT FENCED; @@ -110,27 +110,27 @@ CREATE OPERATOR ?& ( JOIN = contjoinsel ); -CREATE FUNCTION isdefined(hstore,text) +CREATE FUNCTION pg_catalog.isdefined(hstore,text) RETURNS bool AS 'MODULE_PATHNAME','hstore_defined' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION defined(hstore,text) +CREATE FUNCTION pg_catalog.defined(hstore,text) RETURNS bool AS 'MODULE_PATHNAME','hstore_defined' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION delete(hstore,text) +CREATE FUNCTION pg_catalog.delete(hstore,text) RETURNS hstore AS 'MODULE_PATHNAME','hstore_delete' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION delete(hstore,text[]) +CREATE FUNCTION pg_catalog.delete(hstore,text[]) RETURNS hstore AS 'MODULE_PATHNAME','hstore_delete_array' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION delete(hstore,hstore) +CREATE FUNCTION pg_catalog.delete(hstore,hstore) RETURNS hstore AS 'MODULE_PATHNAME','hstore_delete_hstore' LANGUAGE C STRICT IMMUTABLE NOT FENCED; @@ -153,7 +153,7 @@ CREATE OPERATOR - ( PROCEDURE = delete ); -CREATE FUNCTION hs_concat(hstore,hstore) +CREATE FUNCTION pg_catalog.hs_concat(hstore,hstore) RETURNS hstore AS 'MODULE_PATHNAME','hstore_concat' LANGUAGE C STRICT IMMUTABLE NOT FENCED; @@ -164,12 +164,12 @@ CREATE OPERATOR || ( PROCEDURE = hs_concat ); -CREATE FUNCTION hs_contains(hstore,hstore) +CREATE FUNCTION pg_catalog.hs_contains(hstore,hstore) RETURNS bool AS 'MODULE_PATHNAME','hstore_contains' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION hs_contained(hstore,hstore) +CREATE FUNCTION pg_catalog.hs_contained(hstore,hstore) RETURNS bool AS 'MODULE_PATHNAME','hstore_contained' LANGUAGE C STRICT IMMUTABLE NOT FENCED; @@ -211,12 +211,12 @@ CREATE OPERATOR ~ ( JOIN = contjoinsel ); -CREATE FUNCTION tconvert(text,text) +CREATE FUNCTION pg_catalog.tconvert(text,text) RETURNS hstore AS 'MODULE_PATHNAME','hstore_from_text' LANGUAGE C IMMUTABLE NOT FENCED; -- not STRICT; needs to allow (key,NULL) -CREATE FUNCTION hstore(text,text) +CREATE FUNCTION pg_catalog.hstore(text,text) RETURNS hstore AS 'MODULE_PATHNAME','hstore_from_text' LANGUAGE C IMMUTABLE; -- not STRICT; needs to allow (key,NULL) @@ -227,25 +227,25 @@ CREATE OPERATOR => ( PROCEDURE = hstore ); -CREATE FUNCTION hstore(text[],text[]) +CREATE FUNCTION pg_catalog.hstore(text[],text[]) RETURNS hstore AS 'MODULE_PATHNAME', 'hstore_from_arrays' LANGUAGE C IMMUTABLE NOT FENCED; -- not STRICT; allows (keys,null) -CREATE FUNCTION hstore(text[]) +CREATE FUNCTION pg_catalog.hstore(text[]) RETURNS hstore AS 'MODULE_PATHNAME', 'hstore_from_array' LANGUAGE C IMMUTABLE STRICT NOT FENCED; CREATE CAST (text[] AS hstore) - WITH FUNCTION hstore(text[]); + WITH FUNCTION pg_catalog.hstore(text[]); -CREATE FUNCTION hstore(record) +CREATE FUNCTION pg_catalog.hstore(record) RETURNS hstore AS 'MODULE_PATHNAME', 'hstore_from_record' LANGUAGE C IMMUTABLE NOT FENCED; -- not STRICT; allows (null::recordtype) -CREATE FUNCTION hstore_to_array(hstore) +CREATE FUNCTION pg_catalog.hstore_to_array(hstore) RETURNS text[] AS 'MODULE_PATHNAME','hstore_to_array' LANGUAGE C STRICT IMMUTABLE NOT FENCED; @@ -255,7 +255,7 @@ CREATE OPERATOR %% ( PROCEDURE = hstore_to_array ); -CREATE FUNCTION hstore_to_matrix(hstore) +CREATE FUNCTION pg_catalog.hstore_to_matrix(hstore) RETURNS text[] AS 'MODULE_PATHNAME','hstore_to_matrix' LANGUAGE C STRICT IMMUTABLE NOT FENCED; @@ -265,27 +265,27 @@ CREATE OPERATOR %# ( PROCEDURE = hstore_to_matrix ); -CREATE FUNCTION akeys(hstore) +CREATE FUNCTION pg_catalog.akeys(hstore) RETURNS text[] AS 'MODULE_PATHNAME','hstore_akeys' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION avals(hstore) +CREATE FUNCTION pg_catalog.avals(hstore) RETURNS text[] AS 'MODULE_PATHNAME','hstore_avals' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION skeys(hstore) +CREATE FUNCTION pg_catalog.skeys(hstore) RETURNS setof text AS 'MODULE_PATHNAME','hstore_skeys' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION svals(hstore) +CREATE FUNCTION pg_catalog.svals(hstore) RETURNS setof text AS 'MODULE_PATHNAME','hstore_svals' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION each(IN hs hstore, +CREATE FUNCTION pg_catalog.each(IN hs hstore, OUT key text, OUT value text) RETURNS SETOF record @@ -521,7 +521,7 @@ AS OPERATOR 9 ?(hstore,text), OPERATOR 10 ?|(hstore,text[]), OPERATOR 11 ?&(hstore,text[]), - FUNCTION 1 bttextcmp(text,text), + FUNCTION 1 pg_catalog.bttextcmp(text,text), FUNCTION 2 gin_extract_hstore(internal, internal), FUNCTION 3 gin_extract_hstore_query(internal, internal, int2, internal, internal), FUNCTION 4 gin_consistent_hstore(internal, int2, internal, int4, internal, internal), diff --git a/contrib/hstore/hstore--1.1.sql b/contrib/hstore/hstore--1.1.sql index a0ab7c445..6d9313c55 100644 --- a/contrib/hstore/hstore--1.1.sql +++ b/contrib/hstore/hstore--1.1.sql @@ -5,22 +5,22 @@ CREATE TYPE hstore; -CREATE FUNCTION hstore_in(cstring) +CREATE FUNCTION pg_catalog.hstore_in(cstring) RETURNS hstore AS 'MODULE_PATHNAME' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION hstore_out(hstore) +CREATE FUNCTION pg_catalog.hstore_out(hstore) RETURNS cstring AS 'MODULE_PATHNAME' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE FUNCTION hstore_recv(internal) +CREATE FUNCTION pg_catalog.hstore_recv(internal) RETURNS hstore AS 'MODULE_PATHNAME' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION hstore_send(hstore) +CREATE FUNCTION pg_catalog.hstore_send(hstore) RETURNS bytea AS 'MODULE_PATHNAME' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; @@ -34,145 +34,145 @@ CREATE TYPE hstore ( STORAGE = extended ); -CREATE FUNCTION hstore_version_diag(hstore) +CREATE FUNCTION pg_catalog.hstore_version_diag(hstore) RETURNS integer AS 'MODULE_PATHNAME','hstore_version_diag' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION fetchval(hstore,text) +CREATE FUNCTION pg_catalog.fetchval(hstore,text) RETURNS text AS 'MODULE_PATHNAME','hstore_fetchval' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION slice_array(hstore,text[]) +CREATE FUNCTION pg_catalog.slice_array(hstore,text[]) RETURNS text[] AS 'MODULE_PATHNAME','hstore_slice_to_array' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION slice(hstore,text[]) +CREATE FUNCTION pg_catalog.slice(hstore,text[]) RETURNS hstore AS 'MODULE_PATHNAME','hstore_slice_to_hstore' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION isexists(hstore,text) +CREATE FUNCTION pg_catalog.isexists(hstore,text) RETURNS bool AS 'MODULE_PATHNAME','hstore_exists' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION exist(hstore,text) +CREATE FUNCTION pg_catalog.exist(hstore,text) RETURNS bool AS 'MODULE_PATHNAME','hstore_exists' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION exists_any(hstore,text[]) +CREATE FUNCTION pg_catalog.exists_any(hstore,text[]) RETURNS bool AS 'MODULE_PATHNAME','hstore_exists_any' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION exists_all(hstore,text[]) +CREATE FUNCTION pg_catalog.exists_all(hstore,text[]) RETURNS bool AS 'MODULE_PATHNAME','hstore_exists_all' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION isdefined(hstore,text) +CREATE FUNCTION pg_catalog.isdefined(hstore,text) RETURNS bool AS 'MODULE_PATHNAME','hstore_defined' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION defined(hstore,text) +CREATE FUNCTION pg_catalog.defined(hstore,text) RETURNS bool AS 'MODULE_PATHNAME','hstore_defined' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION delete(hstore,text) +CREATE FUNCTION pg_catalog.delete(hstore,text) RETURNS hstore AS 'MODULE_PATHNAME','hstore_delete' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION delete(hstore,text[]) +CREATE FUNCTION pg_catalog.delete(hstore,text[]) RETURNS hstore AS 'MODULE_PATHNAME','hstore_delete_array' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION delete(hstore,hstore) +CREATE FUNCTION pg_catalog.delete(hstore,hstore) RETURNS hstore AS 'MODULE_PATHNAME','hstore_delete_hstore' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION hs_concat(hstore,hstore) +CREATE FUNCTION pg_catalog.hs_concat(hstore,hstore) RETURNS hstore AS 'MODULE_PATHNAME','hstore_concat' LANGUAGE C IMMUTABLE NOT FENCED;; -CREATE FUNCTION hs_contains(hstore,hstore) +CREATE FUNCTION pg_catalog.hs_contains(hstore,hstore) RETURNS bool AS 'MODULE_PATHNAME','hstore_contains' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION hs_contained(hstore,hstore) +CREATE FUNCTION pg_catalog.hs_contained(hstore,hstore) RETURNS bool AS 'MODULE_PATHNAME','hstore_contained' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION tconvert(text,text) +CREATE FUNCTION pg_catalog.tconvert(text,text) RETURNS hstore AS 'MODULE_PATHNAME','hstore_from_text' LANGUAGE C IMMUTABLE NOT FENCED;; -- not STRICT; needs to allow (key,NULL) -CREATE FUNCTION hstore(text,text) +CREATE FUNCTION pg_catalog.hstore(text,text) RETURNS hstore AS 'MODULE_PATHNAME','hstore_from_text' LANGUAGE C IMMUTABLE NOT FENCED;; -- not STRICT; needs to allow (key,NULL) -CREATE FUNCTION hstore(text[],text[]) +CREATE FUNCTION pg_catalog.hstore(text[],text[]) RETURNS hstore AS 'MODULE_PATHNAME', 'hstore_from_arrays' LANGUAGE C IMMUTABLE NOT FENCED;; -- not STRICT; allows (keys,null) -CREATE FUNCTION hstore(text[]) +CREATE FUNCTION pg_catalog.hstore(text[]) RETURNS hstore AS 'MODULE_PATHNAME', 'hstore_from_array' LANGUAGE C IMMUTABLE STRICT NOT FENCED;; CREATE CAST (text[] AS hstore) - WITH FUNCTION hstore(text[]); + WITH FUNCTION pg_catalog.hstore(text[]); -CREATE FUNCTION hstore(record) +CREATE FUNCTION pg_catalog.hstore(record) RETURNS hstore AS 'MODULE_PATHNAME', 'hstore_from_record' LANGUAGE C IMMUTABLE NOT FENCED;; -- not STRICT; allows (null::recordtype) -CREATE FUNCTION hstore_to_array(hstore) +CREATE FUNCTION pg_catalog.hstore_to_array(hstore) RETURNS text[] AS 'MODULE_PATHNAME','hstore_to_array' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION hstore_to_matrix(hstore) +CREATE FUNCTION pg_catalog.hstore_to_matrix(hstore) RETURNS text[] AS 'MODULE_PATHNAME','hstore_to_matrix' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION akeys(hstore) +CREATE FUNCTION pg_catalog.akeys(hstore) RETURNS text[] AS 'MODULE_PATHNAME','hstore_akeys' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION avals(hstore) +CREATE FUNCTION pg_catalog.avals(hstore) RETURNS text[] AS 'MODULE_PATHNAME','hstore_avals' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION skeys(hstore) +CREATE FUNCTION pg_catalog.skeys(hstore) RETURNS setof text AS 'MODULE_PATHNAME','hstore_skeys' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION svals(hstore) +CREATE FUNCTION pg_catalog.svals(hstore) RETURNS setof text AS 'MODULE_PATHNAME','hstore_svals' LANGUAGE C STRICT IMMUTABLE NOT FENCED;; -CREATE FUNCTION each(IN hs hstore, +CREATE FUNCTION pg_catalog.each(IN hs hstore, OUT key text, OUT value text) RETURNS SETOF record diff --git a/contrib/hstore/hstore--unpackaged--1.0.sql b/contrib/hstore/hstore--unpackaged--1.0.sql index b7e73f412..5fee12fad 100644 --- a/contrib/hstore/hstore--unpackaged--1.0.sql +++ b/contrib/hstore/hstore--unpackaged--1.0.sql @@ -4,55 +4,55 @@ \echo Use "CREATE EXTENSION hstore" to load this file. \quit ALTER EXTENSION hstore ADD type hstore; -ALTER EXTENSION hstore ADD function hstore_in(cstring); -ALTER EXTENSION hstore ADD function hstore_out(hstore); -ALTER EXTENSION hstore ADD function hstore_recv(internal); -ALTER EXTENSION hstore ADD function hstore_send(hstore); -ALTER EXTENSION hstore ADD function hstore_version_diag(hstore); -ALTER EXTENSION hstore ADD function fetchval(hstore,text); +ALTER EXTENSION hstore ADD function pg_catalog.hstore_in(cstring); +ALTER EXTENSION hstore ADD function pg_catalog.hstore_out(hstore); +ALTER EXTENSION hstore ADD function pg_catalog.hstore_recv(internal); +ALTER EXTENSION hstore ADD function pg_catalog.hstore_send(hstore); +ALTER EXTENSION hstore ADD function pg_catalog.hstore_version_diag(hstore); +ALTER EXTENSION hstore ADD function pg_catalog.fetchval(hstore,text); ALTER EXTENSION hstore ADD operator ->(hstore,text); -ALTER EXTENSION hstore ADD function slice_array(hstore,text[]); +ALTER EXTENSION hstore ADD function pg_catalog.slice_array(hstore,text[]); ALTER EXTENSION hstore ADD operator ->(hstore,text[]); -ALTER EXTENSION hstore ADD function slice(hstore,text[]); -ALTER EXTENSION hstore ADD function isexists(hstore,text); -ALTER EXTENSION hstore ADD function exist(hstore,text); +ALTER EXTENSION hstore ADD function pg_catalog.slice(hstore,text[]); +ALTER EXTENSION hstore ADD function pg_catalog.isexists(hstore,text); +ALTER EXTENSION hstore ADD function pg_catalog.exist(hstore,text); ALTER EXTENSION hstore ADD operator ?(hstore,text); -ALTER EXTENSION hstore ADD function exists_any(hstore,text[]); +ALTER EXTENSION hstore ADD function pg_catalog.exists_any(hstore,text[]); ALTER EXTENSION hstore ADD operator ?|(hstore,text[]); -ALTER EXTENSION hstore ADD function exists_all(hstore,text[]); +ALTER EXTENSION hstore ADD function pg_catalog.exists_all(hstore,text[]); ALTER EXTENSION hstore ADD operator ?&(hstore,text[]); -ALTER EXTENSION hstore ADD function isdefined(hstore,text); -ALTER EXTENSION hstore ADD function defined(hstore,text); -ALTER EXTENSION hstore ADD function delete(hstore,text); -ALTER EXTENSION hstore ADD function delete(hstore,text[]); -ALTER EXTENSION hstore ADD function delete(hstore,hstore); +ALTER EXTENSION hstore ADD function pg_catalog.isdefined(hstore,text); +ALTER EXTENSION hstore ADD function pg_catalog.defined(hstore,text); +ALTER EXTENSION hstore ADD function pg_catalog.delete(hstore,text); +ALTER EXTENSION hstore ADD function pg_catalog.delete(hstore,text[]); +ALTER EXTENSION hstore ADD function pg_catalog.delete(hstore,hstore); ALTER EXTENSION hstore ADD operator -(hstore,text); ALTER EXTENSION hstore ADD operator -(hstore,text[]); ALTER EXTENSION hstore ADD operator -(hstore,hstore); -ALTER EXTENSION hstore ADD function hs_concat(hstore,hstore); +ALTER EXTENSION hstore ADD function pg_catalog.hs_concat(hstore,hstore); ALTER EXTENSION hstore ADD operator ||(hstore,hstore); -ALTER EXTENSION hstore ADD function hs_contains(hstore,hstore); -ALTER EXTENSION hstore ADD function hs_contained(hstore,hstore); +ALTER EXTENSION hstore ADD function pg_catalog.hs_contains(hstore,hstore); +ALTER EXTENSION hstore ADD function pg_catalog.hs_contained(hstore,hstore); ALTER EXTENSION hstore ADD operator <@(hstore,hstore); ALTER EXTENSION hstore ADD operator @>(hstore,hstore); ALTER EXTENSION hstore ADD operator ~(hstore,hstore); ALTER EXTENSION hstore ADD operator @(hstore,hstore); -ALTER EXTENSION hstore ADD function tconvert(text,text); -ALTER EXTENSION hstore ADD function hstore(text,text); +ALTER EXTENSION hstore ADD function pg_catalog.tconvert(text,text); +ALTER EXTENSION hstore ADD function pg_catalog.hstore(text,text); ALTER EXTENSION hstore ADD operator =>(text,text); -ALTER EXTENSION hstore ADD function hstore(text[],text[]); -ALTER EXTENSION hstore ADD function hstore(text[]); +ALTER EXTENSION hstore ADD function pg_catalog.hstore(text[],text[]); +ALTER EXTENSION hstore ADD function pg_catalog.hstore(text[]); ALTER EXTENSION hstore ADD cast (text[] as hstore); -ALTER EXTENSION hstore ADD function hstore(record); -ALTER EXTENSION hstore ADD function hstore_to_array(hstore); +ALTER EXTENSION hstore ADD function pg_catalog.hstore(record); +ALTER EXTENSION hstore ADD function pg_catalog.hstore_to_array(hstore); ALTER EXTENSION hstore ADD operator %%(NONE,hstore); -ALTER EXTENSION hstore ADD function hstore_to_matrix(hstore); +ALTER EXTENSION hstore ADD function pg_catalog.hstore_to_matrix(hstore); ALTER EXTENSION hstore ADD operator %#(NONE,hstore); -ALTER EXTENSION hstore ADD function akeys(hstore); -ALTER EXTENSION hstore ADD function avals(hstore); -ALTER EXTENSION hstore ADD function skeys(hstore); -ALTER EXTENSION hstore ADD function svals(hstore); -ALTER EXTENSION hstore ADD function each(hstore); +ALTER EXTENSION hstore ADD function pg_catalog.akeys(hstore); +ALTER EXTENSION hstore ADD function pg_catalog.avals(hstore); +ALTER EXTENSION hstore ADD function pg_catalog.skeys(hstore); +ALTER EXTENSION hstore ADD function pg_catalog.svals(hstore); +ALTER EXTENSION hstore ADD function pg_catalog.each(hstore); ALTER EXTENSION hstore ADD function populate_record(anyelement,hstore); ALTER EXTENSION hstore ADD operator #=(anyelement,hstore); ALTER EXTENSION hstore ADD function hstore_eq(hstore,hstore); diff --git a/contrib/hstore/sql/hstore.sql b/contrib/hstore/sql/hstore.sql index 85c2d696b..416af2921 100644 --- a/contrib/hstore/sql/hstore.sql +++ b/contrib/hstore/sql/hstore.sql @@ -50,99 +50,99 @@ select ' '::hstore; -- -> operator -select fetchval('aa=>b, c=>d , b=>16'::hstore, 'c'); -select fetchval('aa=>b, c=>d , b=>16'::hstore, 'b'); -select fetchval('aa=>b, c=>d , b=>16'::hstore, 'aa'); -select (fetchval('aa=>b, c=>d , b=>16'::hstore, 'gg')) is null; -select (fetchval('aa=>NULL, c=>d , b=>16'::hstore, 'aa')) is null; -select (fetchval('aa=>"NULL", c=>d , b=>16'::hstore, 'aa')) is null; +select pg_catalog.fetchval('aa=>b, c=>d , b=>16'::hstore, 'c'); +select pg_catalog.fetchval('aa=>b, c=>d , b=>16'::hstore, 'b'); +select pg_catalog.fetchval('aa=>b, c=>d , b=>16'::hstore, 'aa'); +select (pg_catalog.fetchval('aa=>b, c=>d , b=>16'::hstore, 'gg')) is null; +select (pg_catalog.fetchval('aa=>NULL, c=>d , b=>16'::hstore, 'aa')) is null; +select (pg_catalog.fetchval('aa=>"NULL", c=>d , b=>16'::hstore, 'aa')) is null; -- -> array operator -select slice_array('aa=>"NULL", c=>d , b=>16'::hstore, ARRAY['aa','c']); -select slice_array('aa=>"NULL", c=>d , b=>16'::hstore, ARRAY['c','aa']); -select slice_array('aa=>NULL, c=>d , b=>16'::hstore, ARRAY['aa','c',null]); -select slice_array('aa=>1, c=>3, b=>2, d=>4'::hstore, ARRAY[['b','d'],['aa','c']]); +select pg_catalog.slice_array('aa=>"NULL", c=>d , b=>16'::hstore, ARRAY['aa','c']); +select pg_catalog.slice_array('aa=>"NULL", c=>d , b=>16'::hstore, ARRAY['c','aa']); +select pg_catalog.slice_array('aa=>NULL, c=>d , b=>16'::hstore, ARRAY['aa','c',null]); +select pg_catalog.slice_array('aa=>1, c=>3, b=>2, d=>4'::hstore, ARRAY[['b','d'],['aa','c']]); -- exists/defined -select exist('a=>NULL, b=>qq', 'a'); -select exist('a=>NULL, b=>qq', 'b'); -select exist('a=>NULL, b=>qq', 'c'); -select exist('a=>"NULL", b=>qq', 'a'); -select defined('a=>NULL, b=>qq', 'a'); -select defined('a=>NULL, b=>qq', 'b'); -select defined('a=>NULL, b=>qq', 'c'); -select defined('a=>"NULL", b=>qq', 'a'); +select pg_catalog.exist('a=>NULL, b=>qq', 'a'); +select pg_catalog.exist('a=>NULL, b=>qq', 'b'); +select pg_catalog.exist('a=>NULL, b=>qq', 'c'); +select pg_catalog.exist('a=>"NULL", b=>qq', 'a'); +select pg_catalog.defined('a=>NULL, b=>qq', 'a'); +select pg_catalog.defined('a=>NULL, b=>qq', 'b'); +select pg_catalog.defined('a=>NULL, b=>qq', 'c'); +select pg_catalog.defined('a=>"NULL", b=>qq', 'a'); -- delete -select delete('a=>1 , b=>2, c=>3'::hstore, 'a'); -select delete('a=>null , b=>2, c=>3'::hstore, 'a'); -select delete('a=>1 , b=>2, c=>3'::hstore, 'b'); -select delete('a=>1 , b=>2, c=>3'::hstore, 'c'); -select delete('a=>1 , b=>2, c=>3'::hstore, 'd'); -select pg_column_size(delete('a=>1 , b=>2, c=>3'::hstore, 'b'::text)) - = pg_column_size('a=>1, b=>2'::hstore); +select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, 'a'); +select pg_catalog.delete('a=>null , b=>2, c=>3'::hstore, 'a'); +select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, 'b'); +select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, 'c'); +select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, 'd'); +select pg_catalog.pg_column_size(pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, 'b'::text)) + = pg_catalog.pg_column_size('a=>1, b=>2'::hstore); -- delete (array) -select delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['d','e']); -select delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['d','b']); -select delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['a','c']); -select delete('a=>1 , b=>2, c=>3'::hstore, ARRAY[['b'],['c'],['a']]); -select delete('a=>1 , b=>2, c=>3'::hstore, '{}'::text[]); -select pg_column_size(delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['a','c'])) - = pg_column_size('b=>2'::hstore); -select pg_column_size(delete('a=>1 , b=>2, c=>3'::hstore, '{}'::text[])) - = pg_column_size('a=>1, b=>2, c=>3'::hstore); +select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['d','e']); +select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['d','b']); +select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['a','c']); +select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, ARRAY[['b'],['c'],['a']]); +select pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, '{}'::text[]); +select pg_catalog.pg_column_size(pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, ARRAY['a','c'])) + = pg_catalog.pg_column_size('b=>2'::hstore); +select pg_catalog.pg_column_size(pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, '{}'::text[])) + = pg_catalog.pg_column_size('a=>1, b=>2, c=>3'::hstore); -- delete (hstore) -select delete('aa=>1 , b=>2, c=>3'::hstore, 'aa=>4, b=>2'::hstore); -select delete('aa=>1 , b=>2, c=>3'::hstore, 'aa=>NULL, c=>3'::hstore); -select delete('aa=>1 , b=>2, c=>3'::hstore, 'aa=>1, b=>2, c=>3'::hstore); -select delete('aa=>1 , b=>2, c=>3'::hstore, 'b=>2'::hstore); -select delete('aa=>1 , b=>2, c=>3'::hstore, ''::hstore); -select pg_column_size(delete('a=>1 , b=>2, c=>3'::hstore, 'b=>2'::hstore)) - = pg_column_size('a=>1, c=>3'::hstore); -select pg_column_size(delete('a=>1 , b=>2, c=>3'::hstore, ''::hstore)) - = pg_column_size('a=>1, b=>2, c=>3'::hstore); +select pg_catalog.delete('aa=>1 , b=>2, c=>3'::hstore, 'aa=>4, b=>2'::hstore); +select pg_catalog.delete('aa=>1 , b=>2, c=>3'::hstore, 'aa=>NULL, c=>3'::hstore); +select pg_catalog.delete('aa=>1 , b=>2, c=>3'::hstore, 'aa=>1, b=>2, c=>3'::hstore); +select pg_catalog.delete('aa=>1 , b=>2, c=>3'::hstore, 'b=>2'::hstore); +select pg_catalog.delete('aa=>1 , b=>2, c=>3'::hstore, ''::hstore); +select pg_catalog.pg_column_size(pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, 'b=>2'::hstore)) + = pg_catalog.pg_column_size('a=>1, c=>3'::hstore); +select pg_catalog.pg_column_size(pg_catalog.delete('a=>1 , b=>2, c=>3'::hstore, ''::hstore)) + = pg_catalog.pg_column_size('a=>1, b=>2, c=>3'::hstore); -- hs_concat -select hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f'); -select hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'aq=>l'); -select hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'aa=>l'); -select hs_concat('aa=>1 , b=>2, cq=>3'::hstore, ''); -select hs_concat(''::hstore, 'cq=>l, b=>g, fg=>f'); -select pg_column_size(hs_concat(''::hstore, ''::hstore)) = pg_column_size(''::hstore); -select pg_column_size(hs_concat('aa=>1'::hstore, 'b=>2'::hstore)) - = pg_column_size('aa=>1, b=>2'::hstore); -select pg_column_size(hs_concat('aa=>1, b=>2'::hstore, ''::hstore)) - = pg_column_size('aa=>1, b=>2'::hstore); -select pg_column_size(hs_concat(''::hstore, 'aa=>1, b=>2'::hstore)) - = pg_column_size('aa=>1, b=>2'::hstore); +select pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f'); +select pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'aq=>l'); +select pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'aa=>l'); +select pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, ''); +select pg_catalog.hs_concat(''::hstore, 'cq=>l, b=>g, fg=>f'); +select pg_catalog.pg_column_size(pg_catalog.hs_concat(''::hstore, ''::hstore)) = pg_catalog.pg_column_size(''::hstore); +select pg_catalog.pg_column_size(pg_catalog.hs_concat('aa=>1'::hstore, 'b=>2'::hstore)) + = pg_catalog.pg_column_size('aa=>1, b=>2'::hstore); +select pg_catalog.pg_column_size(pg_catalog.hs_concat('aa=>1, b=>2'::hstore, ''::hstore)) + = pg_catalog.pg_column_size('aa=>1, b=>2'::hstore); +select pg_catalog.pg_column_size(pg_catalog.hs_concat(''::hstore, 'aa=>1, b=>2'::hstore)) + = pg_catalog.pg_column_size('aa=>1, b=>2'::hstore); -- hstore(text,text) -select hs_concat('a=>g, b=>c'::hstore, hstore('asd', 'gf')); -select hs_concat('a=>g, b=>c'::hstore, hstore('b', 'gf')); -select hs_concat('a=>g, b=>c'::hstore, hstore('b', 'NULL')); -select hs_concat('a=>g, b=>c'::hstore, hstore('b', NULL)); -select (hs_concat('a=>g, b=>c'::hstore, hstore(NULL, 'b'))) is null; -select pg_column_size(hstore('b', 'gf')) - = pg_column_size('b=>gf'::hstore); -select pg_column_size(hs_concat('a=>g, b=>c'::hstore, hstore('b', 'gf'))) - = pg_column_size('a=>g, b=>gf'::hstore); +select pg_catalog.hs_concat('a=>g, b=>c'::hstore, pg_catalog.hstore('asd', 'gf')); +select pg_catalog.hs_concat('a=>g, b=>c'::hstore, pg_catalog.hstore('b', 'gf')); +select pg_catalog.hs_concat('a=>g, b=>c'::hstore, pg_catalog.hstore('b', 'NULL')); +select pg_catalog.hs_concat('a=>g, b=>c'::hstore, pg_catalog.hstore('b', NULL)); +select (pg_catalog.hs_concat('a=>g, b=>c'::hstore, pg_catalog.hstore(NULL, 'b'))) is null; +select pg_catalog.pg_column_size(pg_catalog.hstore('b', 'gf')) + = pg_catalog.pg_column_size('b=>gf'::hstore); +select pg_catalog.pg_column_size(pg_catalog.hs_concat('a=>g, b=>c'::hstore, pg_catalog.hstore('b', 'gf'))) + = pg_catalog.pg_column_size('a=>g, b=>gf'::hstore); -- slice() -select slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['g','h','i']); -select slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b']); -select slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['aa','b']); -select slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b','aa']); -select pg_column_size(slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b'])) - = pg_column_size('b=>2, c=>3'::hstore); -select pg_column_size(slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b','aa'])) - = pg_column_size('aa=>1, b=>2, c=>3'::hstore); +select pg_catalog.slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['g','h','i']); +select pg_catalog.slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b']); +select pg_catalog.slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['aa','b']); +select pg_catalog.slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b','aa']); +select pg_catalog.pg_column_size(pg_catalog.slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b'])) + = pg_catalog.pg_column_size('b=>2, c=>3'::hstore); +select pg_catalog.pg_column_size(pg_catalog.slice(hstore 'aa=>1, b=>2, c=>3', ARRAY['c','b','aa'])) + = pg_catalog.pg_column_size('aa=>1, b=>2, c=>3'::hstore); -- array input select '{}'::text[]::hstore; @@ -151,66 +151,66 @@ select ARRAY['a','g','b','h','asd','i']::hstore; select ARRAY[['a','g'],['b','h'],['asd','i']]::hstore; select ARRAY[['a','g','b'],['h','asd','i']]::hstore; select ARRAY[[['a','g'],['b','h'],['asd','i']]]::hstore; -select hstore('{}'::text[]); -select hstore(ARRAY['a','g','b','h','asd']); -select hstore(ARRAY['a','g','b','h','asd','i']); -select hstore(ARRAY[['a','g'],['b','h'],['asd','i']]); -select hstore(ARRAY[['a','g','b'],['h','asd','i']]); -select hstore(ARRAY[[['a','g'],['b','h'],['asd','i']]]); -select hstore('[0:5]={a,g,b,h,asd,i}'::text[]); -select hstore('[0:2][1:2]={{a,g},{b,h},{asd,i}}'::text[]); +select pg_catalog.hstore('{}'::text[]); +select pg_catalog.hstore(ARRAY['a','g','b','h','asd']); +select pg_catalog.hstore(ARRAY['a','g','b','h','asd','i']); +select pg_catalog.hstore(ARRAY[['a','g'],['b','h'],['asd','i']]); +select pg_catalog.hstore(ARRAY[['a','g','b'],['h','asd','i']]); +select pg_catalog.hstore(ARRAY[[['a','g'],['b','h'],['asd','i']]]); +select pg_catalog.hstore('[0:5]={a,g,b,h,asd,i}'::text[]); +select pg_catalog.hstore('[0:2][1:2]={{a,g},{b,h},{asd,i}}'::text[]); -- pairs of arrays -select hstore(ARRAY['a','b','asd'], ARRAY['g','h','i']); -select hstore(ARRAY['a','b','asd'], ARRAY['g','h',NULL]); -select hstore(ARRAY['z','y','x'], ARRAY['1','2','3']); -select hstore(ARRAY['aaa','bb','c','d'], ARRAY[null::text,null,null,null]); -select hstore(ARRAY['aaa','bb','c','d'], null); -select quote_literal(hstore('{}'::text[], '{}'::text[])); -select quote_literal(hstore('{}'::text[], null)); -select hstore(ARRAY['a'], '{}'::text[]); -- error -select hstore('{}'::text[], ARRAY['a']); -- error -select pg_column_size(hstore(ARRAY['a','b','asd'], ARRAY['g','h','i'])) - = pg_column_size('a=>g, b=>h, asd=>i'::hstore); +select pg_catalog.hstore(ARRAY['a','b','asd'], ARRAY['g','h','i']); +select pg_catalog.hstore(ARRAY['a','b','asd'], ARRAY['g','h',NULL]); +select pg_catalog.hstore(ARRAY['z','y','x'], ARRAY['1','2','3']); +select pg_catalog.hstore(ARRAY['aaa','bb','c','d'], ARRAY[null::text,null,null,null]); +select pg_catalog.hstore(ARRAY['aaa','bb','c','d'], null); +select pg_catalog.quote_literal(pg_catalog.hstore('{}'::text[], '{}'::text[])); +select pg_catalog.quote_literal(pg_catalog.hstore('{}'::text[], null)); +select pg_catalog.hstore(ARRAY['a'], '{}'::text[]); -- error +select pg_catalog.hstore('{}'::text[], ARRAY['a']); -- error +select pg_catalog.pg_column_size(pg_catalog.hstore(ARRAY['a','b','asd'], ARRAY['g','h','i'])) + = pg_catalog.pg_column_size('a=>g, b=>h, asd=>i'::hstore); -- keys/values -select akeys(hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f')); -select akeys('""=>1'); -select akeys(''); -select avals(hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f')); -select avals(hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>NULL')); -select avals('""=>1'); -select avals(''); +select pg_catalog.akeys(pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f')); +select pg_catalog.akeys('""=>1'); +select pg_catalog.akeys(''); +select pg_catalog.avals(pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f')); +select pg_catalog.avals(pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>NULL')); +select pg_catalog.avals('""=>1'); +select pg_catalog.avals(''); -select hstore_to_array('aa=>1, cq=>l, b=>g, fg=>NULL'::hstore); +select pg_catalog.hstore_to_array('aa=>1, cq=>l, b=>g, fg=>NULL'::hstore); -select hstore_to_matrix('aa=>1, cq=>l, b=>g, fg=>NULL'::hstore); +select pg_catalog.hstore_to_matrix('aa=>1, cq=>l, b=>g, fg=>NULL'::hstore); -select * from skeys(hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f')); -select * from skeys('""=>1'); -select * from skeys(''); -select * from svals(hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f')); -select *, svals is null from svals(hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>NULL')); -select * from svals('""=>1'); -select * from svals(''); +select * from pg_catalog.skeys(pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f')); +select * from pg_catalog.skeys('""=>1'); +select * from pg_catalog.skeys(''); +select * from pg_catalog.svals(pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>f')); +select *, svals is null from pg_catalog.svals(pg_catalog.hs_concat('aa=>1 , b=>2, cq=>3'::hstore, 'cq=>l, b=>g, fg=>NULL')); +select * from pg_catalog.svals('""=>1'); +select * from pg_catalog.svals(''); -select * from each('aaa=>bq, b=>NULL, ""=>1 '); +select * from pg_catalog.each('aaa=>bq, b=>NULL, ""=>1 '); -- hs_contains -select hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b'::hstore); -select hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b, c=>NULL'::hstore); -select hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b, g=>NULL'::hstore); -select hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'g=>NULL'::hstore); -select hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>c'::hstore); -select hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b'::hstore); -select hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b, c=>q'::hstore); +select pg_catalog.hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b'::hstore); +select pg_catalog.hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b, c=>NULL'::hstore); +select pg_catalog.hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b, g=>NULL'::hstore); +select pg_catalog.hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'g=>NULL'::hstore); +select pg_catalog.hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>c'::hstore); +select pg_catalog.hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b'::hstore); +select pg_catalog.hs_contains('a=>b, b=>1, c=>NULL'::hstore, 'a=>b, c=>q'::hstore); CREATE TABLE testhstore (h hstore); \copy testhstore from 'data/hstore.data' -select count(*) from testhstore where hs_contains(h, 'wait=>NULL'::hstore); -select count(*) from testhstore where hs_contains(h, 'wait=>CC'::hstore); -select count(*) from testhstore where hs_contains(h, 'wait=>CC, public=>t'::hstore); -select count(*) from testhstore where exist(h, 'public'); -select count(*) from testhstore where exists_any(h, ARRAY['public','disabled']); -select count(*) from testhstore where exists_all(h, ARRAY['public','disabled']); +select pg_catalog.count(*) from testhstore where pg_catalog.hs_contains(h, 'wait=>NULL'::hstore); +select pg_catalog.count(*) from testhstore where pg_catalog.hs_contains(h, 'wait=>CC'::hstore); +select pg_catalog.count(*) from testhstore where pg_catalog.hs_contains(h, 'wait=>CC, public=>t'::hstore); +select pg_catalog.count(*) from testhstore where pg_catalog.exist(h, 'public'); +select pg_catalog.count(*) from testhstore where pg_catalog.exists_any(h, ARRAY['public','disabled']); +select pg_catalog.count(*) from testhstore where pg_catalog.exists_all(h, ARRAY['public','disabled']); diff --git a/contrib/log_fdw/log_fdw--1.0.sql b/contrib/log_fdw/log_fdw--1.0.sql index cb7528188..a9549cfb5 100644 --- a/contrib/log_fdw/log_fdw--1.0.sql +++ b/contrib/log_fdw/log_fdw--1.0.sql @@ -10,12 +10,12 @@ -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION log_fdw" to load this file. \quit -CREATE FUNCTION log_fdw_handler() +CREATE FUNCTION pg_catalog.log_fdw_handler() RETURNS fdw_handler AS 'MODULE_PATHNAME' LANGUAGE C STRICT NOT FENCED; -CREATE FUNCTION log_fdw_validator(text[], oid) +CREATE FUNCTION pg_catalog.log_fdw_validator(text[], oid) RETURNS void AS 'MODULE_PATHNAME' LANGUAGE C STRICT NOT FENCED; @@ -26,7 +26,7 @@ CREATE FOREIGN DATA WRAPPER log_fdw CREATE SERVER log_srv FOREIGN DATA WRAPPER log_fdw; -create or replace function gs_create_log_tables() +create or replace function pg_catalog.gs_create_log_tables() RETURNS void AS $$ declare diff --git a/contrib/mppdb_decoding/mppdb_decoding.cpp b/contrib/mppdb_decoding/mppdb_decoding.cpp index 77f1cf948..f36b2e649 100644 --- a/contrib/mppdb_decoding/mppdb_decoding.cpp +++ b/contrib/mppdb_decoding/mppdb_decoding.cpp @@ -54,21 +54,15 @@ PG_MODULE_MAGIC; extern "C" void _PG_init(void); extern "C" void _PG_output_plugin_init(OutputPluginCallbacks* cb); -typedef struct { - MemoryContext context; - bool include_xids; - bool include_timestamp; - bool skip_empty_xacts; - bool xact_wrote_changes; - bool only_local; -} TestDecodingData; - static void pg_decode_startup(LogicalDecodingContext* ctx, OutputPluginOptions* opt, bool is_init); static void pg_decode_shutdown(LogicalDecodingContext* ctx); static void pg_decode_begin_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn); static void pg_output_begin( - LogicalDecodingContext* ctx, TestDecodingData* data, ReorderBufferTXN* txn, bool last_write); + LogicalDecodingContext* ctx, PluginTestDecodingData* data, ReorderBufferTXN* txn, bool last_write); static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr commit_lsn); +static void pg_decode_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn); +static void pg_decode_prepare_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn); + static void pg_decode_change( LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation rel, ReorderBufferChange* change); static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id); @@ -87,6 +81,8 @@ void _PG_output_plugin_init(OutputPluginCallbacks* cb) cb->begin_cb = pg_decode_begin_txn; cb->change_cb = pg_decode_change; cb->commit_cb = pg_decode_commit_txn; + cb->abort_cb = pg_decode_abort_txn; + cb->prepare_cb = pg_decode_prepare_txn; cb->filter_by_origin_cb = pg_decode_filter; cb->shutdown_cb = pg_decode_shutdown; } @@ -95,84 +91,33 @@ void _PG_output_plugin_init(OutputPluginCallbacks* cb) static void pg_decode_startup(LogicalDecodingContext* ctx, OutputPluginOptions* opt, bool is_init) { ListCell* option = NULL; - TestDecodingData* data = NULL; + PluginTestDecodingData* data = NULL; - data = (TestDecodingData*)palloc0(sizeof(TestDecodingData)); + data = (PluginTestDecodingData*)palloc0(sizeof(PluginTestDecodingData)); data->context = AllocSetContextCreate(ctx->context, "text conversion context", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); data->include_xids = true; - data->include_timestamp = false; + data->include_timestamp = true; data->skip_empty_xacts = false; data->only_local = true; + data->tableWhiteList = NIL; ctx->output_plugin_private = data; opt->output_type = OUTPUT_PLUGIN_TEXTUAL_OUTPUT; foreach (option, ctx->output_plugin_options) { - DefElem* elem = (DefElem*)lfirst(option); - - Assert(elem->arg == NULL || IsA(elem->arg, String)); - - if (strcmp(elem->defname, "include-xids") == 0) { - /* if option does not provide a value, it means its value is true */ - if (elem->arg == NULL) - data->include_xids = true; - else if (!parse_bool(strVal(elem->arg), &data->include_xids)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname))); - } else if (strcmp(elem->defname, "include-timestamp") == 0) { - if (elem->arg == NULL) - data->include_timestamp = true; - else if (!parse_bool(strVal(elem->arg), &data->include_timestamp)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname))); - } else if (strcmp(elem->defname, "force-binary") == 0) { - bool force_binary = false; - - if (elem->arg == NULL) - continue; - else if (!parse_bool(strVal(elem->arg), &force_binary)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname))); - - if (force_binary) - opt->output_type = OUTPUT_PLUGIN_BINARY_OUTPUT; - } else if (strcmp(elem->defname, "skip-empty-xacts") == 0) { - - if (elem->arg == NULL) - data->skip_empty_xacts = true; - else if (!parse_bool(strVal(elem->arg), &data->skip_empty_xacts)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname))); - } else if (strcmp(elem->defname, "only-local") == 0) { - - if (elem->arg == NULL) - data->only_local = true; - else if (!parse_bool(strVal(elem->arg), &data->only_local)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname))); - } else { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg( - "option \"%s\" = \"%s\" is unknown", elem->defname, elem->arg ? strVal(elem->arg) : "(null)"))); - } + ParseDecodingOptionPlugin(option, data, opt); } } /* cleanup this plugin's resources */ static void pg_decode_shutdown(LogicalDecodingContext* ctx) { - TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private; + PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private; /* cleanup our own resources via memory context reset */ MemoryContextDelete(data->context); @@ -181,7 +126,7 @@ static void pg_decode_shutdown(LogicalDecodingContext* ctx) /* BEGIN callback */ static void pg_decode_begin_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn) { - TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private; + PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private; data->xact_wrote_changes = false; if (data->skip_empty_xacts) @@ -190,7 +135,8 @@ static void pg_decode_begin_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* t pg_output_begin(ctx, data, txn, true); } -static void pg_output_begin(LogicalDecodingContext* ctx, TestDecodingData* data, ReorderBufferTXN* txn, bool last_write) +static void pg_output_begin(LogicalDecodingContext* ctx, PluginTestDecodingData* data, ReorderBufferTXN* txn, + bool last_write) { OutputPluginPrepareWrite(ctx, last_write); if (data->include_xids) @@ -203,7 +149,7 @@ static void pg_output_begin(LogicalDecodingContext* ctx, TestDecodingData* data, /* COMMIT callback */ static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr commit_lsn) { - TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private; + PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private; if (data->skip_empty_xacts && !data->xact_wrote_changes) return; @@ -221,65 +167,57 @@ static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* OutputPluginWrite(ctx, true); } +/* ABORT callback */ +static void pg_decode_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn) +{ + PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private; + + if (data->skip_empty_xacts && !data->xact_wrote_changes) + return; + + OutputPluginPrepareWrite(ctx, true); + if (data->include_xids) + appendStringInfo(ctx->out, "ABORT %lu", txn->xid); + else + appendStringInfoString(ctx->out, "ABORT"); + + if (data->include_timestamp) + appendStringInfo(ctx->out, " (at %s)", timestamptz_to_str(txn->commit_time)); + + OutputPluginWrite(ctx, true); +} + +/* PREPARE callback */ +static void pg_decode_prepare_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn) +{ + PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private; + + if (data->skip_empty_xacts && !data->xact_wrote_changes) + return; + + OutputPluginPrepareWrite(ctx, true); + if (data->include_xids) + appendStringInfo(ctx->out, "PREPARE %lu", txn->xid); + else + appendStringInfoString(ctx->out, "PREPARE"); + + if (data->include_timestamp) + appendStringInfo(ctx->out, " (at %s)", timestamptz_to_str(txn->commit_time)); + + OutputPluginWrite(ctx, true); +} + + + static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id) { - TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private; + PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private; if (data->only_local && origin_id != InvalidRepOriginId) return true; return false; } -/* - * Print literal `outputstr' already represented as string of type `typid' - * into stringbuf `s'. - * - * Some builtin types aren't quoted, the rest is quoted. Escaping is done as - * if u_sess->parser_cxt.standard_conforming_strings were enabled. - */ -static void print_literal(StringInfo s, Oid typid, char* outputstr) -{ - const char* valptr = NULL; - - switch (typid) { - case INT1OID: - case INT2OID: - case INT4OID: - case INT8OID: - case OIDOID: - case FLOAT4OID: - case FLOAT8OID: - case NUMERICOID: - /* NB: We don't care about Inf, NaN et al. */ - appendStringInfoString(s, outputstr); - break; - - case BITOID: - case VARBITOID: - appendStringInfo(s, "B'%s'", outputstr); - break; - - case BOOLOID: - if (strcmp(outputstr, "t") == 0) - appendStringInfoString(s, "true"); - else - appendStringInfoString(s, "false"); - break; - - default: - appendStringInfoChar(s, '\''); - for (valptr = outputstr; *valptr; valptr++) { - char ch = *valptr; - - if (SQL_STR_DOUBLE(ch, false)) - appendStringInfoChar(s, ch); - appendStringInfoChar(s, ch); - } - appendStringInfoChar(s, '\''); - break; - } -} - /* print the tuple 'tuple' into the StringInfo s */ static void TupleToJsoninfo( cJSON* cols_name, cJSON* cols_type, cJSON* cols_val, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls) @@ -348,11 +286,11 @@ static void TupleToJsoninfo( if (isnull) appendStringInfoString(val_str, "null"); else if (!typisvarlena) - print_literal(val_str, typid, OidOutputFunctionCall(typoutput, origval)); + PrintLiteral(val_str, typid, OidOutputFunctionCall(typoutput, origval)); else { Datum val; /* definitely detoasted Datum */ val = PointerGetDatum(PG_DETOAST_DATUM(origval)); - print_literal(val_str, typid, OidOutputFunctionCall(typoutput, val)); + PrintLiteral(val_str, typid, OidOutputFunctionCall(typoutput, val)); } cJSON* col_val = cJSON_CreateString(val_str->data); cJSON_AddItemToArray(cols_val, col_val); @@ -365,13 +303,12 @@ static void TupleToJsoninfo( static void pg_decode_change( LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation relation, ReorderBufferChange* change) { - TestDecodingData* data = NULL; + PluginTestDecodingData* data = NULL; Form_pg_class class_form; TupleDesc tupdesc; MemoryContext old; char* res = NULL; - data = (TestDecodingData*)ctx->output_plugin_private; - u_sess->attr.attr_common.extra_float_digits = 0; + data = (PluginTestDecodingData*)ctx->output_plugin_private; /* output BEGIN if we haven't yet */ if (data->skip_empty_xacts && !data->xact_wrote_changes) { @@ -385,11 +322,18 @@ static void pg_decode_change( /* Avoid leaking memory by using and resetting our own context */ old = MemoryContextSwitchTo(data->context); + char *schema = get_namespace_name(class_form->relnamespace); + char *table = NameStr(class_form->relname); + if (data->tableWhiteList != NIL && !CheckWhiteList(data->tableWhiteList, schema, table)) { + (void)MemoryContextSwitchTo(old); + MemoryContextReset(data->context); + return; + } + OutputPluginPrepareWrite(ctx, true); cJSON* root = cJSON_CreateObject(); - cJSON* table_name = cJSON_CreateString(quote_qualified_identifier( - get_namespace_name(get_rel_namespace(RelationGetRelid(relation))), NameStr(class_form->relname))); + cJSON* table_name = cJSON_CreateString(quote_qualified_identifier(schema, table)); cJSON_AddItemToObject(root, "table_name", table_name); cJSON* op_type = NULL; diff --git a/contrib/pagehack/CMakeLists.txt b/contrib/pagehack/CMakeLists.txt index cc6a658e2..e6f87f07b 100644 --- a/contrib/pagehack/CMakeLists.txt +++ b/contrib/pagehack/CMakeLists.txt @@ -2,7 +2,7 @@ # pagehack AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} TGT_pagehack_SRC) set(TGT_pagehack_INC - ${TGT_pq_INC} ${ZSTD_INCLUDE_PATH} ${CMAKE_CURRENT_SOURCE_DIR} ${PROJECT_SRC_DIR}/lib/gstrace + ${TGT_pq_INC} ${CMAKE_CURRENT_SOURCE_DIR} ${PROJECT_SRC_DIR}/lib/gstrace ) set(pagehack_DEF_OPTIONS ${MACRO_OPTIONS}) @@ -11,13 +11,12 @@ if(${ENABLE_DEBUG} STREQUAL "ON") endif() set(pagehack_COMPILE_OPTIONS ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${CHECK_OPTIONS} ${BIN_SECURE_OPTIONS} ${OPTIMIZE_OPTIONS}) set(pagehack_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(pagehack_LINK_LIBS -lpgport -lcrypt -ldl -lm -ledit -lssl -lcrypto -lsecurec -lrt -lz -lminiunz -lzstd) +set(pagehack_LINK_LIBS -lpgport -lcrypt -ldl -lm -ledit -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz) add_bintarget(pagehack TGT_pagehack_SRC TGT_pagehack_INC "${pagehack_DEF_OPTIONS}" "${pagehack_COMPILE_OPTIONS}" "${pagehack_LINK_OPTIONS}" "${pagehack_LINK_LIBS}") add_dependencies(pagehack pgport_static) target_link_directories(pagehack PUBLIC ${LIBOPENSSL_LIB_PATH} ${PROTOBUF_LIB_PATH} ${LIBPARQUET_LIB_PATH} ${LIBCURL_LIB_PATH} ${SECURE_LIB_PATH} ${ZLIB_LIB_PATH} ${LIBOBS_LIB_PATH} ${LIBEDIT_LIB_PATH} ${LIBCGROUP_LIB_PATH} ${CMAKE_BINARY_DIR}/lib - ${ZSTD_LIB_PATH} ) install(TARGETS pagehack RUNTIME DESTINATION bin) diff --git a/contrib/pagehack/Makefile b/contrib/pagehack/Makefile index 63bde4093..4c1571e4e 100644 --- a/contrib/pagehack/Makefile +++ b/contrib/pagehack/Makefile @@ -1,6 +1,6 @@ # contrib/pagehack/Makefile MODULE_big = pagehack -OBJS = openGaussCompression.o pagehack.o +OBJS = pagehack.o # executable program, even there is no database server/client PROGRAM = pagehack @@ -14,7 +14,7 @@ subdir = contrib/pagehack top_builddir = ../.. include $(top_builddir)/src/Makefile.global enable_shared = false -override CFLAGS += -lzstd + ifeq ($(enable_debug), yes) PG_CPPFLAGS += -DDEBUG endif diff --git a/contrib/pagehack/compression_algorithm.ini b/contrib/pagehack/compression_algorithm.ini deleted file mode 100644 index fa618a3df..000000000 --- a/contrib/pagehack/compression_algorithm.ini +++ /dev/null @@ -1,1460 +0,0 @@ -size_t GetSizeOfHeadData(bool heapPageData) -{ - if (heapPageData) { - return SizeOfHeapPageHeaderData; - } else { - return SizeOfPageHeaderData; - } -} - -// maybe some itemid is not valid -uint16 HeapPageCalcRealRowCnt (char *buf) { - HeapPageHeaderData *page = (HeapPageHeaderData *)buf; - uint16 cnt = 0; - uint16 i; - uint16 row_cnt = (page->pd_lower - GetPageHeaderSize(page)) / sizeof(ItemIdData); - - for (i = 0; i < row_cnt; i++) { - if (ItemIdIsNormal(GET_ITEMID_BY_IDX(buf, i))) { - cnt++; - } - } - return cnt; -} - -void DecompressDeconvertRows(char *buf, char *aux_buf, int16 *real_order, uint16 max_row_len, uint16 real_row_cnt) { - errno_t ret; - HeapPageHeaderData *page = (HeapPageHeaderData *)buf; - uint16 row_cnt = real_row_cnt; - uint32 total_size = page->pd_special - page->pd_upper; - char *copy_begin = buf + page->pd_upper; - char *row; - uint16 i, j, k, cur, up, row_size; - - ret = memset_sp(aux_buf, BLCKSZ, 0, BLCKSZ); - securec_check(ret, "", ""); - - k = 0; - for (i = 0; i < max_row_len; i++) { - for (j = 0; j < row_cnt; j++) { - up = (j == (row_cnt - 1)) ? page->pd_special : GET_ITEMID_BY_IDX(buf, (real_order[j + 1]))->lp_off; - cur = GET_ITEMID_BY_IDX(buf, (real_order[j]))->lp_off; - row_size = up - cur; - row = aux_buf + cur; - if (i < row_size) { - row[i] = copy_begin[k++]; // this part is reshaped - } - } - } - - if (k != total_size) { - printf("ERROR!!! pg_deconvert_rows error...!!!\n"); - ASSERT(0); - return; - } - - // cp aux_buf to page_buf - ret = memcpy_sp(copy_begin, total_size, aux_buf + page->pd_upper, total_size); - securec_check(ret, "", ""); - return ; -} - -// 1: as tuple_offset order, that means asc order. -// 2: store all itemid's idx. -// 3:maybe some itemid is not in order. -void CompressConvertItemRealOrder(char *buf, int16 *real_order, uint16 real_row_cnt) { - HeapPageHeaderData *page = (HeapPageHeaderData *)buf; - uint16 row_cnt = (page->pd_lower - GetPageHeaderSize(page)) / sizeof(ItemIdData); - ItemIdData *begin = (ItemIdData *)(buf + GetPageHeaderSize(page)); - int16 *link_order = real_order + real_row_cnt; - - int16 i, head, curr, prev; - int16 end = -1; // invalid index - - head = end; - // very likely to seems that itemids stored by desc order, and ignore invalid itemid - for (i = 0; i < row_cnt; i++) { - if (!ItemIdIsNormal(begin + i)) { - continue; - } - - if (head == end) { // set the head idx, insert the first - link_order[i] = end; - head = i; - continue; - } - - if ((begin + i)->lp_off < (begin + head)->lp_off) { - link_order[i] = head; // update the head idx - head = i; - continue; - } - - prev = head; - curr = link_order[head]; - while ((curr != end) && ((begin + i)->lp_off > (begin + curr)->lp_off)) { - prev = curr; - curr = link_order[curr]; - } - - link_order[prev] = i; - link_order[i] = curr; - } - - // arrange the link to array - curr = head; - for (i = 0; i < real_row_cnt; i++) { - real_order[i] = curr; - curr = link_order[curr]; - } - - if (curr != end) { - printf("ERROR!!! pre_convert_real_order error...!!!\n"); - ASSERT(0); - return; - } - -} - -int DecompressPage(const char* src, char* dst, uint8 algorithm) -{ - if (PageIs8BXidHeapVersion(src)) { - return TemplateDecompressPage(src, dst, algorithm); - } else { - return TemplateDecompressPage(src, dst, algorithm); - } -} - -void cprs_diff_deconvert_rows(char *buf, uint32 offset, uint16 min_row_len, uint16 real_row_cnt) { - uint16 row_cnt = real_row_cnt; - uint32 common_size = min_row_len; - uint8 *copy_begin = (uint8 *)(buf + offset); - uint16 i, j; - - for (i = 0; i < common_size; i++) { - for (j = 1; j < row_cnt; j++) { - copy_begin[i * row_cnt + j] += copy_begin[i * row_cnt + (j - 1)]; - } - } - return ; -} - -// to find all row size are diffs in MIN_DIFF_SIZE byts. -bool CompressConvertCheck(char *buf, int16 **real_order, uint16 *max_row_len, uint16 *min_row_len, uint16 *real_row_cnt) { - HeapPageHeaderData *page = (HeapPageHeaderData *)buf; - uint16 row_cnt = (page->pd_lower - GetPageHeaderSize(page)) / sizeof(ItemIdData); - int16 i, row_size; - ItemIdData *ptr = NULL; - uint16 up = page->pd_special; - uint16 min_size = GS_INVALID_ID16; - uint16 max_size = 0; - errno_t ret; - if (page->pd_lower < GetPageHeaderSize(page) || (page->pd_lower > page->pd_upper)) { - return false; - } - - uint16 normal_row_cnt = HeapPageCalcRealRowCnt(buf); - if (normal_row_cnt < MIN_CONVERT_CNT) { // no need convert - return false; - } - - // to store the real tuple order. - /* - --------------------------|-------------------------- - xxxxxxxxxxxxxxxxxxxxxxxxxx|xxxxxxxxxxxxxxxxxxxxxxxxxx - --------------------------|-------------------------- - */ - // the first part is real array order, and the second part is link. - *real_order = (int16 *)malloc(sizeof(uint16) * row_cnt * 2); - if (*real_order == NULL) { - printf("zfunc compress file"); - return false; - } - ret = memset_sp(*real_order, sizeof(uint16) * row_cnt * 2, 0, sizeof(uint16) * row_cnt * 2); - securec_check(ret, "", ""); - - // order the ItemIds by tuple_offset order. - CompressConvertItemRealOrder(buf, *real_order, normal_row_cnt); - - // do the check, to check all size of tuples. - for (i = normal_row_cnt - 1; i >= 0; i--) { - ptr = GET_ITEMID_BY_IDX(buf, ((*real_order)[i])); - - row_size = up - ptr->lp_off; - if (row_size < MIN_CONVERT_CNT * 2) { - return false; - } - - min_size = (row_size < min_size) ? row_size : min_size; - max_size = (row_size > max_size) ? row_size : max_size; - - if ((max_size - min_size) > MIN_DIFF_SIZE) { // no need convert - return false; - } - up = ptr->lp_off; - } - - // get the min row common size. - *max_row_len = max_size; - *min_row_len = min_size; - *real_row_cnt = normal_row_cnt; - return true; -} - -void DecompressDeconvertItemIds(char *buf, char *aux_buf) { - errno_t ret; - HeapPageHeaderData *page = (HeapPageHeaderData *)buf; - uint16 row_cnt = (page->pd_lower - GetPageHeaderSize(page)) / sizeof(ItemIdData); - uint32 total_size = row_cnt * sizeof(ItemIdData); - char *copy_begin = buf + GetPageHeaderSize(page); - uint16 i, j, k; - - // clear aux_buf - ret = memset_sp(aux_buf, BLCKSZ, 0, BLCKSZ); - securec_check(ret, "", ""); - - k = 0; - for (i = 0; i < sizeof(ItemIdData); i++) { - for (j = 0; j < row_cnt; j++) { - aux_buf[j * sizeof(ItemIdData) + i] = copy_begin[k++]; - } - } - - // cp aux_buf to page_buf - ret = memcpy_sp(copy_begin, total_size, aux_buf, total_size); - securec_check(ret, "", ""); - return ; -} - - -void DecompressDeconvertOnePage(char *buf, char *aux_buf, bool diff_convert) { - uint16 max_row_len = 0; - uint16 min_row_len = 0; - int16 *real_order = NULL; // itemids are not in order sometimes. we must find the real - uint16 real_row_cnt = 0; - - if (diff_convert) { - cprs_diff_deconvert_rows(buf, GetPageHeaderSize(buf), sizeof(ItemIdData), - (((HeapPageHeaderData *)buf)->pd_lower - GetPageHeaderSize(buf)) / sizeof(ItemIdData)); - } - - // =======firstly, arrange the itemids. - DecompressDeconvertItemIds(buf, aux_buf); - - if (!CompressConvertCheck(buf, &real_order, &max_row_len, &min_row_len, &real_row_cnt)) { - if (real_order != NULL) { - free(real_order); - } - ASSERT(0); - return ; - } - - // =======and last, the tuples - if (diff_convert) { - cprs_diff_deconvert_rows(buf, ((HeapPageHeaderData *)buf)->pd_upper, min_row_len, real_row_cnt); - } - DecompressDeconvertRows(buf, aux_buf, real_order, max_row_len, real_row_cnt); - - if (real_order != NULL) { - free(real_order); - } - return ; -} - - -void DecompressPageDeconvert(char *src, bool diff_convert) -{ - char *aux_buf = NULL; - errno_t rc; - - aux_buf = (char *)malloc(BLCKSZ); - if (aux_buf == NULL) { - // add log - return; - } - rc = memset_s(aux_buf, BLCKSZ, 0, BLCKSZ); - securec_check(rc, "", ""); - - // do convert - DecompressDeconvertOnePage(src, aux_buf, diff_convert); - - if (aux_buf != NULL) { - free(aux_buf); - } -} - - -/** - * DecompressPage() -- Decompress one compressed page. - * return size of decompressed page which should be BLCKSZ or - * -1 for decompress error - * -2 for unrecognized compression algorithm - * - * note:The size of dst must be greater than or equal to BLCKSZ. - */ -template -int TemplateDecompressPage(const char* src, char* dst, uint8 algorithm) -{ - int decompressed_size; - char* data; - uint32 size; - bool byte_convert, diff_convert; - size_t sizeOfPageHeaderData = GetSizeOfHeadData(heapPageData); - int rc = memcpy_s(dst, sizeOfPageHeaderData, src, sizeOfPageHeaderData); - securec_check(rc, "", ""); - - if (heapPageData) { - data = ((HeapPageCompressData*) src)->data; - size = ((HeapPageCompressData*) src)->size; - byte_convert = ((HeapPageCompressData*) src)->byte_convert; - diff_convert = ((HeapPageCompressData*) src)->diff_convert; - } else { - data = ((PageCompressData*) src)->data; - size = ((PageCompressData*) src)->size; - byte_convert = ((PageCompressData*) src)->byte_convert; - diff_convert = ((PageCompressData*) src)->diff_convert; - } - - switch (algorithm) { - case COMPRESS_ALGORITHM_PGLZ: - decompressed_size = lz_decompress( - data, size, dst + sizeOfPageHeaderData, BLCKSZ - sizeOfPageHeaderData, false); - break; - case COMPRESS_ALGORITHM_ZSTD: - decompressed_size = - ZSTD_decompress(dst + sizeOfPageHeaderData, BLCKSZ - sizeOfPageHeaderData, data, size); - - if (ZSTD_isError(decompressed_size)) { - return -1; - } - - break; - - default: - return COMPRESS_UNSUPPORTED_ERROR; - break; - } - - if (byte_convert) { - // deconvert dst - DecompressPageDeconvert(dst, diff_convert); - } - - return sizeOfPageHeaderData + decompressed_size; -} - -// pg_lz -/* ---------- - * pg_lzcompress.c - - * - * This is an implementation of LZ compression for PostgreSQL. - * It uses a simple history table and generates 2-3 byte tags - * capable of backward copy information for 3-273 bytes with - * a max offset of 4095. - * - * Entry routines: - * - * bool - * pglz_compress(const char *source, int32 slen, PGLZ_Header *dest, - * const PGLZ_Strategy *strategy); - * - * source is the input data to be compressed. - * - * slen is the length of the input data. - * - * dest is the output area for the compressed result. - * It must be at least as big as PGLZ_MAX_OUTPUT(slen). - * - * strategy is a pointer to some information controlling - * the compression algorithm. If NULL, the compiled - * in default strategy is used. - * - * The return value is TRUE if compression succeeded, - * FALSE if not; in the latter case the contents of dest - * are undefined. - * - * void - * pglz_decompress(const PGLZ_Header *source, char *dest) - * - * source is the compressed input. - * - * dest is the area where the uncompressed data will be - * written to. It is the callers responsibility to - * provide enough space. The required amount can be - * obtained with the macro PGLZ_RAW_SIZE(source). - * - * The data is written to buff exactly as it was handed - * to pglz_compress(). No terminating zero byte is added. - * - * The decompression algorithm and internal data format: - * - * PGLZ_Header is defined as - * - * typedef struct PGLZ_Header { - * int32 vl_len_; - * int32 rawsize; - * } - * - * The header is followed by the compressed data itself. - * - * The data representation is easiest explained by describing - * the process of decompression. - * - * If VARSIZE(x) == rawsize + sizeof(PGLZ_Header), then the data - * is stored uncompressed as plain bytes. Thus, the decompressor - * simply copies rawsize bytes from the location after the - * header to the destination. - * - * Otherwise the first byte after the header tells what to do - * the next 8 times. We call this the control byte. - * - * An unset bit in the control byte means, that one uncompressed - * byte follows, which is copied from input to output. - * - * A set bit in the control byte means, that a tag of 2-3 bytes - * follows. A tag contains information to copy some bytes, that - * are already in the output buffer, to the current location in - * the output. Let's call the three tag bytes T1, T2 and T3. The - * position of the data to copy is coded as an offset from the - * actual output position. - * - * The offset is in the upper nibble of T1 and in T2. - * The length is in the lower nibble of T1. - * - * So the 16 bits of a 2 byte tag are coded as - * - * 7---T1--0 7---T2--0 - * OOOO LLLL OOOO OOOO - * - * This limits the offset to 1-4095 (12 bits) and the length - * to 3-18 (4 bits) because 3 is always added to it. To emit - * a tag of 2 bytes with a length of 2 only saves one control - * bit. But we lose one byte in the possible length of a tag. - * - * In the actual implementation, the 2 byte tag's length is - * limited to 3-17, because the value 0xF in the length nibble - * has special meaning. It means, that the next following - * byte (T3) has to be added to the length value of 18. That - * makes total limits of 1-4095 for offset and 3-273 for length. - * - * Now that we have successfully decoded a tag. We simply copy - * the output that occurred bytes back to the current - * output location in the specified . Thus, a - * sequence of 200 spaces (think about bpchar fields) could be - * coded in 4 bytes. One literal space and a three byte tag to - * copy 199 bytes with a -1 offset. Whow - that's a compression - * rate of 98%! Well, the implementation needs to save the - * original data size too, so we need another 4 bytes for it - * and end up with a total compression rate of 96%, what's still - * worth a Whow. - * - * The compression algorithm - * - * The following uses numbers used in the default strategy. - * - * The compressor works best for attributes of a size between - * 1K and 1M. For smaller items there's not that much chance of - * redundancy in the character sequence (except for large areas - * of identical bytes like trailing spaces) and for bigger ones - * our 4K maximum look-back distance is too small. - * - * The compressor creates a table for 8192 lists of positions. - * For each input position (except the last 3), a hash key is - * built from the 4 next input bytes and the position remembered - * in the appropriate list. Thus, the table points to linked - * lists of likely to be at least in the first 4 characters - * matching strings. This is done on the fly while the input - * is compressed into the output area. Table entries are only - * kept for the last 4096 input positions, since we cannot use - * back-pointers larger than that anyway. - * - * For each byte in the input, it's hash key (built from this - * byte and the next 3) is used to find the appropriate list - * in the table. The lists remember the positions of all bytes - * that had the same hash key in the past in increasing backward - * offset order. Now for all entries in the used lists, the - * match length is computed by comparing the characters from the - * entries position with the characters from the actual input - * position. - * - * The compressor starts with a so called "good_match" of 128. - * It is a "prefer speed against compression ratio" optimizer. - * So if the first entry looked at already has 128 or more - * matching characters, the lookup stops and that position is - * used for the next tag in the output. - * - * For each subsequent entry in the history list, the "good_match" - * is lowered by 10%. So the compressor will be more happy with - * short matches the farer it has to go back in the history. - * Another "speed against ratio" preference characteristic of - * the algorithm. - * - * Thus there are 3 stop conditions for the lookup of matches: - * - * - a match >= good_match is found - * - there are no more history entries to look at - * - the next history entry is already too far back - * to be coded into a tag. - * - * Finally the match algorithm checks that at least a match - * of 3 or more bytes has been found, because thats the smallest - * amount of copy information to code into a tag. If so, a tag - * is omitted and all the input bytes covered by that are just - * scanned for the history add's, otherwise a literal character - * is omitted and only his history entry added. - * - * Acknowledgements: - * - * Many thanks to Adisak Pochanayon, who's article about SLZ - * inspired me to write the PostgreSQL compression this way. - * - * Jan Wieck - * - * Copyright (c) 1999-2012, PostgreSQL Global Development Group - * - * src/backend/utils/adt/pg_lzcompress.c - * ---------- - */ -#include "postgres.h" -#include "knl/knl_variable.h" - -#include - -#include "utils/pg_lzcompress.h" - -/* ---------- - * The provided standard strategies - * ---------- - */ -static const PGLZ_Strategy strategy_default_data = { - 32, /* Data chunks less than 32 bytes are not - * compressed */ - INT_MAX, /* No upper limit on what we'll try to - * compress */ - 25, /* Require 25% compression rate, or not worth - * it */ - 1024, /* Give up if no compression in the first 1KB */ - 128, /* Stop history lookup if a match of 128 bytes - * is found */ - 10 /* Lower good match size by 10% at every loop - * iteration */ -}; -const PGLZ_Strategy* const PGLZ_strategy_default = &strategy_default_data; - -static const PGLZ_Strategy strategy_always_data = { - 0, /* Chunks of any size are compressed */ - INT_MAX, - 0, /* It's enough to save one single byte */ - INT_MAX, /* Never give up early */ - 128, /* Stop history lookup if a match of 128 bytes - * is found */ - 6 /* Look harder for a good match */ -}; -const PGLZ_Strategy* const PGLZ_strategy_always = &strategy_always_data; - -/* ---------- - * pglz_hist_idx - - * - * Computes the history table slot for the lookup by the next 4 - * characters in the input. - * - * NB: because we use the next 4 characters, we are not guaranteed to - * find 3-character matches; they very possibly will be in the wrong - * hash list. This seems an acceptable tradeoff for spreading out the - * hash keys more. - * ---------- - */ -#define pglz_hist_idx(_s, _e) \ - (((((_e) - (_s)) < 4) ? (int)(_s)[0] \ - : (((unsigned char)((_s)[0]) << 9) ^ ((unsigned char)((_s)[1]) << 6) ^ \ - ((unsigned char)((_s)[2]) << 3) ^ (unsigned char)((_s)[3]))) & \ - (PGLZ_HISTORY_MASK)) - -/* ---------- - * pglz_hist_add - - * - * Adds a new entry to the history table. - * - * If _recycle is true, then we are recycling a previously used entry, - * and must first delink it from its old hashcode's linked list. - * - * NOTE: beware of multiple evaluations of macro's arguments, and note that - * _hn and _recycle are modified in the macro. - * ---------- - */ -#define pglz_hist_add(_hs, _he, _hn, _recycle, _s, _e) \ - do { \ - int __hindex = pglz_hist_idx((_s), (_e)); \ - PGLZ_HistEntry** __myhsp = &(_hs)[__hindex]; \ - PGLZ_HistEntry* __myhe = &(_he)[_hn]; \ - if (_recycle) { \ - if (__myhe->prev == NULL) \ - (_hs)[__myhe->hindex] = __myhe->next; \ - else \ - __myhe->prev->next = __myhe->next; \ - if (__myhe->next != NULL) \ - __myhe->next->prev = __myhe->prev; \ - } \ - __myhe->next = *__myhsp; \ - __myhe->prev = NULL; \ - __myhe->hindex = __hindex; \ - __myhe->pos = (_s); \ - if (*__myhsp != NULL) \ - (*__myhsp)->prev = __myhe; \ - *__myhsp = __myhe; \ - if (++(_hn) >= PGLZ_HISTORY_SIZE) { \ - (_hn) = 0; \ - (_recycle) = true; \ - } \ - } while (0) - -/* ---------- - * pglz_out_ctrl - - * - * Outputs the last and allocates a new control byte if needed. - * ---------- - */ -#define pglz_out_ctrl(__ctrlp, __ctrlb, __ctrl, __buf) \ - do { \ - if ((((unsigned char)(__ctrl)) & 0xff) == 0) { \ - *(__ctrlp) = __ctrlb; \ - __ctrlp = (__buf)++; \ - __ctrlb = 0; \ - __ctrl = 1; \ - } \ - } while (0) - -/* ---------- - * pglz_out_literal - - * - * Outputs a literal byte to the destination buffer including the - * appropriate control bit. - * ---------- - */ -#define pglz_out_literal(_ctrlp, _ctrlb, _ctrl, _buf, _byte) \ - do { \ - pglz_out_ctrl(_ctrlp, _ctrlb, _ctrl, _buf); \ - *(_buf)++ = (unsigned char)(_byte); \ - (_ctrl) <<= 1; \ - } while (0) - -/* ---------- - * pglz_out_tag - - * - * Outputs a backward reference tag of 2-4 bytes (depending on - * offset and length) to the destination buffer including the - * appropriate control bit. - * ---------- - */ -#define pglz_out_tag(_ctrlp, _ctrlb, _ctrl, _buf, _len, _off) \ - do { \ - pglz_out_ctrl(_ctrlp, _ctrlb, _ctrl, _buf); \ - (_ctrlb) |= (_ctrl); \ - (_ctrl) <<= 1; \ - if ((_len) > 17) { \ - (_buf)[0] = (unsigned char)((((uint32)(_off)&0xf00) >> 4) | 0x0f); \ - (_buf)[1] = (unsigned char)(((uint32)(_off)&0xff)); \ - (_buf)[2] = (unsigned char)((_len)-18); \ - (_buf) += 3; \ - } else { \ - (_buf)[0] = (unsigned char)((((uint32)(_off)&0xf00) >> 4) | ((uint32)(_len)-3)); \ - (_buf)[1] = (unsigned char)((uint32)(_off)&0xff); \ - (_buf) += 2; \ - } \ - } while (0) - -#define HIST_START_LEN (sizeof(PGLZ_HistEntry*) * PGLZ_HISTORY_LISTS) -#define HIST_ENTRIES_LEN (sizeof(PGLZ_HistEntry) * PGLZ_HISTORY_SIZE) - -#define PGLZ_MAX_HISTORY_LISTS 8192 /* must be power of 2 */ -static PGLZ_HistEntry* hist_start[PGLZ_MAX_HISTORY_LISTS]; -static PGLZ_HistEntry hist_entries[PGLZ_HISTORY_SIZE + 1]; - -/* ---------- - * pglz_find_match - - * - * Lookup the history table if the actual input stream matches - * another sequence of characters, starting somewhere earlier - * in the input buffer. - * ---------- - */ -static inline int pglz_find_match( - PGLZ_HistEntry** hstart, const char* input, const char* end, int* lenp, int* offp, int good_match, int good_drop) -{ - PGLZ_HistEntry* hent = NULL; - int32 len = 0; - int32 off = 0; - - /* - * Traverse the linked history list until a good enough match is found. - */ - hent = hstart[pglz_hist_idx(input, end)]; - while (hent != NULL) { - const char* ip = input; - const char* hp = hent->pos; - int32 thisoff; - int32 thislen; - - /* - * Stop if the offset does not fit into our tag anymore. - */ - thisoff = ip - hp; - if (thisoff >= 0x0fff) - break; - - /* - * Determine length of match. A better match must be larger than the - * best so far. And if we already have a match of 16 or more bytes, - * it's worth the call overhead to use memcmp() to check if this match - * is equal for the same size. After that we must fallback to - * character by character comparison to know the exact position where - * the diff occurred. - */ - thislen = 0; - if (len >= 16) { - if (memcmp(ip, hp, len) == 0) { - thislen = len; - ip += len; - hp += len; - while (ip < end && *ip == *hp && thislen < PGLZ_MAX_MATCH) { - thislen++; - ip++; - hp++; - } - } - } else { - while (ip < end && *ip == *hp && thislen < PGLZ_MAX_MATCH) { - thislen++; - ip++; - hp++; - } - } - - /* - * Remember this match as the best (if it is) - */ - if (thislen > len) { - len = thislen; - off = thisoff; - } - - /* - * Advance to the next history entry - */ - hent = hent->next; - - /* - * Be happy with lesser good matches the more entries we visited. But - * no point in doing calculation if we're at end of list. - */ - if (hent != NULL) { - if (len >= good_match) - break; - good_match -= (good_match * good_drop) / 100; - } - } - - /* - * Return match information only if it results at least in one byte - * reduction. - */ - if (len > 2) { - *lenp = len; - *offp = off; - return 1; - } - - return 0; -} - -/* ---------- - * pglz_compress - - * - * Compresses source into dest using strategy. - * ---------- - */ -bool pglz_compress(const char* source, int32 slen, PGLZ_Header* dest, const PGLZ_Strategy* strategy) -{ - unsigned char* bp = ((unsigned char*)dest) + sizeof(PGLZ_Header); - unsigned char* bstart = bp; - int hist_next = 0; - bool hist_recycle = false; - const char* dp = source; - const char* dend = source + slen; - unsigned char ctrl_dummy = 0; - unsigned char* ctrlp = &ctrl_dummy; - unsigned char ctrlb = 0; - unsigned char ctrl = 0; - bool found_match = false; - int32 match_len; - int32 match_off; - int32 good_match; - int32 good_drop; - int32 result_size; - int32 result_max; - int32 need_rate; - - /* - * Our fallback strategy is the default. - */ - if (strategy == NULL) - strategy = PGLZ_strategy_default; - - /* - * If the strategy forbids compression (at all or if source chunk size out - * of range), fail. - */ - if (strategy->match_size_good <= 0 || slen < strategy->min_input_size || slen > strategy->max_input_size) - return false; - - /* - * Save the original source size in the header. - */ - dest->rawsize = slen; - - /* - * Limit the match parameters to the supported range. - */ - good_match = strategy->match_size_good; - if (good_match > PGLZ_MAX_MATCH) - good_match = PGLZ_MAX_MATCH; - else if (good_match < 17) - good_match = 17; - - good_drop = strategy->match_size_drop; - if (good_drop < 0) - good_drop = 0; - else if (good_drop > 100) - good_drop = 100; - - need_rate = strategy->min_comp_rate; - if (need_rate < 0) - need_rate = 0; - else if (need_rate > 99) - need_rate = 99; - - /* - * Compute the maximum result size allowed by the strategy, namely the - * input size minus the minimum wanted compression rate. This had better - * be <= slen, else we might overrun the provided output buffer. - */ - if (slen > (INT_MAX / 100)) { - /* Approximate to avoid overflow */ - result_max = (slen / 100) * (100 - need_rate); - } else - result_max = (slen * (100 - need_rate)) / 100; - - /* - * Initialize the history lists to empty. We do not need to zero the - * hist_entries[] array; its entries are initialized as they are used. - */ - errno_t rc = memset_s(hist_start, HIST_START_LEN, 0, HIST_START_LEN); - securec_check(rc, "\0", "\0"); - - /* - * Compress the source directly into the output buffer. - */ - while (dp < dend) { - /* - * If we already exceeded the maximum result size, fail. - * - * We check once per loop; since the loop body could emit as many as 4 - * bytes (a control byte and 3-byte tag), PGLZ_MAX_OUTPUT() had better - * allow 4 slop bytes. - */ - if (bp - bstart >= result_max) - return false; - - /* - * If we've emitted more than first_success_by bytes without finding - * anything compressible at all, fail. This lets us fall out - * reasonably quickly when looking at incompressible input (such as - * pre-compressed data). - */ - if (!found_match && bp - bstart >= strategy->first_success_by) - return false; - - /* - * Try to find a match in the history - */ - if (pglz_find_match(hist_start, dp, dend, &match_len, &match_off, good_match, good_drop)) { - /* - * Create the tag and add history entries for all matched - * characters. - */ - pglz_out_tag(ctrlp, ctrlb, ctrl, bp, match_len, match_off); - while (match_len--) { - pglz_hist_add( - hist_start, hist_entries, hist_next, hist_recycle, dp, dend); - dp++; /* Do not do this ++ in the line above! */ - /* The macro would do it four times - Jan. */ - } - found_match = true; - } else { - /* - * No match found. Copy one literal byte. - */ - pglz_out_literal(ctrlp, ctrlb, ctrl, bp, *dp); - pglz_hist_add( - hist_start, hist_entries, hist_next, hist_recycle, dp, dend); - dp++; /* Do not do this ++ in the line above! */ - /* The macro would do it four times - Jan. */ - } - } - - /* - * Write out the last control byte and check that we haven't overrun the - * output size allowed by the strategy. - */ - *ctrlp = ctrlb; - result_size = bp - bstart; - if (result_size >= result_max) - return false; - - /* - * Success - need only fill in the actual length of the compressed datum. - */ - SET_VARSIZE_COMPRESSED(dest, result_size + sizeof(PGLZ_Header)); - - return true; -} - -/* ---------- - * lz_compress - - * - * Compresses source into dest using strategy. Returns the number of - * bytes written in buffer dest, or -1 if compression fails. - * ---------- - */ -int32 lz_compress(const char* source, int32 slen, char* dest) -{ - unsigned char* bp = (unsigned char*) dest; - unsigned char* bstart = bp; - int hist_next = 0; - bool hist_recycle = false; - const char* dp = source; - const char* dend = source + slen; - unsigned char ctrl_dummy = 0; - unsigned char* ctrlp = &ctrl_dummy; - unsigned char ctrlb = 0; - unsigned char ctrl = 0; - bool found_match = false; - int32 match_len; - int32 match_off; - int32 good_match; - int32 good_drop; - int32 result_size; - int32 result_max; - int32 need_rate; - errno_t rc; - - const PGLZ_Strategy* strategy = PGLZ_strategy_always; - /* - * Our fallback strategy is the default. - */ - if (strategy == NULL) { - strategy = PGLZ_strategy_default; - } - - /* - * If the strategy forbids compression (at all or if source chunk size out - * of range), fail. - */ - if (strategy->match_size_good <= 0 || slen < strategy->min_input_size || slen > strategy->max_input_size) { - return -1; - } - - /* - * Limit the match parameters to the supported range. - */ - good_match = strategy->match_size_good; - if (good_match > PGLZ_MAX_MATCH) { - good_match = PGLZ_MAX_MATCH; - } else if (good_match < 17) { - good_match = 17; - } - - good_drop = strategy->match_size_drop; - if (good_drop < 0) { - good_drop = 0; - } else if (good_drop > 100) { - good_drop = 100; - } - - need_rate = strategy->min_comp_rate; - if (need_rate < 0) { - need_rate = 0; - } else if (need_rate > 99) { - need_rate = 99; - } - - /* - * Compute the maximum result size allowed by the strategy, namely the - * input size minus the minimum wanted compression rate. This had better - * be <= slen, else we might overrun the provided output buffer. - */ - if (slen > (INT_MAX / 100)) { - /* Approximate to avoid overflow */ - result_max = (slen / 100) * (100 - need_rate); - } else { - result_max = (slen * (100 - need_rate)) / 100; - } - - /* - * Initialize the history lists to empty. We do not need to zero the - * hist_entries[] array; its entries are initialized as they are used. - */ - rc = memset_s(hist_start, HIST_START_LEN, 0, HIST_START_LEN); - securec_check(rc, "\0", "\0"); - - /* - * Compress the source directly into the output buffer. - */ - while (dp < dend) { - /* - * If we already exceeded the maximum result size, fail. - * - * We check once per loop; since the loop body could emit as many as 4 - * bytes (a control byte and 3-byte tag), PGLZ_MAX_OUTPUT() had better - * allow 4 slop bytes. - */ - if (bp - bstart >= result_max) { - return -1; - } - - /* - * If we've emitted more than first_success_by bytes without finding - * anything compressible at all, fail. This lets us fall out - * reasonably quickly when looking at incompressible input (such as - * pre-compressed data). - */ - if (!found_match && bp - bstart >= strategy->first_success_by) { - return -1; - } - - /* - * Try to find a match in the history - */ - if (pglz_find_match(hist_start, dp, dend, &match_len, &match_off, good_match, good_drop)) { - /* - * Create the tag and add history entries for all matched - * characters. - */ - pglz_out_tag(ctrlp, ctrlb, ctrl, bp, match_len, match_off); - while (match_len--) { - pglz_hist_add( - hist_start, hist_entries, hist_next, hist_recycle, dp, - dend); - dp++; /* Do not do this ++ in the line above! */ - /* The macro would do it four times - Jan. */ - } - found_match = true; - } else { - /* - * No match found. Copy one literal byte. - */ - pglz_out_literal(ctrlp, ctrlb, ctrl, bp, *dp); - pglz_hist_add( - hist_start, hist_entries, hist_next, hist_recycle, dp, dend); - dp++; /* Do not do this ++ in the line above! */ - /* The macro would do it four times - Jan. */ - } - } - - /* - * Write out the last control byte and check that we haven't overrun the - * output size allowed by the strategy. - */ - *ctrlp = ctrlb; - result_size = bp - bstart; - if (result_size >= result_max) { - return -1; - } - - /* success */ - return result_size; -} - -/* ---------- - * pglz_decompress - - * - * Decompresses source into dest. Returns the number of bytes - * decompressed in the destination buffer, and *optionally* - * checks that both the source and dest buffers have been - * fully read and written to, respectively. - * ---------- - */ -int32 lz_decompress(const char* source, int32 slen, char* dest, int32 rawsize, bool check_complete) -{ - const unsigned char* sp; - const unsigned char* srcend; - unsigned char* dp; - unsigned char* destend; - errno_t rc = 0; - - sp = (const unsigned char*) source; - srcend = ((const unsigned char*) source) + slen; - dp = (unsigned char*) dest; - destend = dp + rawsize; - - while (sp < srcend && dp < destend) { - /* - * Read one control byte and process the next 8 items (or as many as - * remain in the compressed input). - */ - unsigned char ctrl = *sp++; - int ctrlc; - - for (ctrlc = 0; ctrlc < 8 && sp < srcend && dp < destend; ctrlc++) { - - if (ctrl & 1) { - /* - * Set control bit means we must read a match tag. The match - * is coded with two bytes. First byte uses lower nibble to - * code length - 3. Higher nibble contains upper 4 bits of the - * offset. The next following byte contains the lower 8 bits - * of the offset. If the length is coded as 18, another - * extension tag byte tells how much longer the match really - * was (0-255). - */ - int32 len; - int32 off; - - len = (sp[0] & 0x0f) + 3; - off = ((sp[0] & 0xf0) << 4) | sp[1]; - sp += 2; - if (len == 18) { - len += *sp++; - } - - /* - * Now we copy the bytes specified by the tag from OUTPUT to - * OUTPUT (copy len bytes from dp - off to dp). The copied - * areas could overlap, to preven possible uncertainty, we - * copy only non-overlapping regions. - */ - len = Min(len, destend - dp); - while (off < len) { - /*--------- - * When offset is smaller than length - source and - * destination regions overlap. memmove() is resolving - * this overlap in an incompatible way with pglz. Thus we - * resort to memcpy()-ing non-overlapping regions. - * - * Consider input: 112341234123412341234 - * At byte 5 here ^ we have match with length 16 and - * offset 4. 11234M(len=16, off=4) - * We are decoding first period of match and rewrite match - * 112341234M(len=12, off=8) - * - * The same match is now at position 9, it points to the - * same start byte of output, but from another position: - * the offset is doubled. - * - * We iterate through this offset growth until we can - * proceed to usual memcpy(). If we would try to decode - * the match at byte 5 (len=16, off=4) by memmove() we - * would issue memmove(5, 1, 16) which would produce - * 112341234XXXXXXXXXXXX, where series of X is 12 - * undefined bytes, that were at bytes [5:17]. - * --------- - */ - errno_t rc = memcpy_s(dp, off + 1, dp - off, off); - securec_check(rc, "", ""); - len -= off; - dp += off; - off += off; - } - rc = memcpy_s(dp, len + 1, dp - off, len); - securec_check(rc, "", ""); - dp += len; - } else { - /* - * An unset control bit means LITERAL BYTE. So we just copy - * one from INPUT to OUTPUT. - */ - *dp++ = *sp++; - } - - /* - * Advance the control bit - */ - ctrl >>= 1; - } - } - - /* - * Check we decompressed the right amount. If we are slicing, then we - * won't necessarily be at the end of the source or dest buffers when we - * hit a stop, so we don't test them. - */ - if (check_complete && (dp != destend || sp != srcend)) { - return -1; - } - - /* - * That's it. - */ - return (char*) dp - dest; -} - - -int CompressPage(const char* src, char* dst, int dst_size, RelFileCompressOption option) -{ - if (PageIs8BXidHeapVersion(src)) { - return TemplateCompressPage(src, dst, dst_size, option); - } else { - return TemplateCompressPage(src, dst, dst_size, option); - } -} - -void CompressConvertRows(char *buf, char *aux_buf, int16 *real_order, uint16 max_row_len, uint16 real_row_cnt) { - errno_t ret; - HeapPageHeaderData *page = (HeapPageHeaderData *)buf; - uint16 row_cnt = real_row_cnt; - uint32 total_size = page->pd_special - page->pd_upper; - char *copy_begin = buf + page->pd_upper; - char *row; - uint16 i, j, k, cur, up, row_size; - - ret = memset_sp(aux_buf, BLCKSZ, 0, BLCKSZ); - securec_check(ret, "", ""); - - k = 0; - for (i = 0; i < max_row_len; i++) { - for (j = 0; j < row_cnt; j++) { - up = (j == (row_cnt - 1)) ? page->pd_special : GET_ITEMID_BY_IDX(buf, (real_order[j + 1]))->lp_off; - cur = GET_ITEMID_BY_IDX(buf, (real_order[j]))->lp_off; - row_size = up - cur; - row = buf + cur; - if (i < row_size) { - aux_buf[k++] = row[i]; // this part is reshaped - } - } - } - - if (k != total_size) { - printf("ERROR!!! convert_rows_2 error...!!!\n"); - ASSERT(0); - return; - } - - // cp aux_buf to page_buf - ret = memcpy_sp(copy_begin, total_size, aux_buf, total_size); - securec_check(ret, "", ""); - return ; -} - -void CompressConvertItemIds(char *buf, char *aux_buf) { - errno_t ret; - HeapPageHeaderData *page = (HeapPageHeaderData *)buf; - uint16 row_cnt = (page->pd_lower - GetPageHeaderSize(page)) / sizeof(ItemIdData); - uint32 total_size = row_cnt * sizeof(ItemIdData); - char *copy_begin = buf + GetPageHeaderSize(page); - uint16 i, j, k; - - // clear aux_buf - ret = memset_sp(aux_buf, BLCKSZ, 0, BLCKSZ); - securec_check(ret, "", ""); - - k = 0; - for (i = 0; i < row_cnt; i++) { - for (j = 0; j < sizeof(ItemIdData); j++) { - aux_buf[j * row_cnt + i] = copy_begin[k++]; - } - } - - // cp aux_buf to page_buf - ret = memcpy_sp(copy_begin, total_size, aux_buf, total_size); - securec_check(ret, "", ""); - return ; -} - -void cprs_diff_convert_rows(char *buf, uint32 offset,uint16 min_row_len, uint16 real_row_cnt) { - uint16 row_cnt = real_row_cnt; - uint32 common_size = min_row_len; - uint8 *copy_begin = (uint8 *)(buf + offset); - uint16 i, j; - - for (i = 0; i < common_size; i++) { - for (j = row_cnt - 1; j > 0; j--) { - copy_begin[i * row_cnt + j] -= copy_begin[i * row_cnt + (j - 1)]; - } - } - return ; -} - -bool CompressConvertOnePage(char *buf, char *aux_buf, bool diff_convert) { - uint16 max_row_len = 0; - uint16 min_row_len = 0; - int16 *real_order = NULL; // itemids are not in order sometimes. we must find the real - uint16 real_row_cnt = 0; - if (!CompressConvertCheck(buf, &real_order, &max_row_len, &min_row_len, &real_row_cnt)) { - if (real_order != NULL) { - free(real_order); - } - return false; - } - - CompressConvertRows(buf, aux_buf, real_order, max_row_len, real_row_cnt); - CompressConvertItemIds(buf, aux_buf); - - if (diff_convert) { - cprs_diff_convert_rows(buf, ((HeapPageHeaderData *)buf)->pd_upper, min_row_len, real_row_cnt); - cprs_diff_convert_rows(buf, GetPageHeaderSize(buf), sizeof(ItemIdData), - (((HeapPageHeaderData *)buf)->pd_lower - GetPageHeaderSize(buf)) / sizeof(ItemIdData)); - } - - if (real_order != NULL) { - free(real_order); - } - return true; -} - -void CompressPagePrepareConvert(char *src, bool diff_convert, bool *real_ByteConvert) -{ - char *aux_buf = NULL; - errno_t rc; - - aux_buf = (char *)malloc(BLCKSZ); - if (aux_buf == NULL) { - // add log - return; - } - rc = memset_sp(aux_buf, BLCKSZ, 0, BLCKSZ); - securec_check(rc, "", ""); - - // do convert - *real_ByteConvert = false; - if (CompressConvertOnePage(src, aux_buf, diff_convert)) { - *real_ByteConvert = true; - } - - if (aux_buf != NULL) { - free(aux_buf); - } -} - - -/** - * CompressPage() -- Compress one page. - * - * Only the parts other than the page header will be compressed. The - * compressed data is rounded by chunck_size, The insufficient part is - * filled with zero. Compression needs to be able to save at least one - * chunk of space, otherwise it fail. - * This function returen the size of compressed data or - * -1 for compression fail - * COMPRESS_UNSUPPORTED_ERROR for unrecognized compression algorithm - */ -template -int TemplateCompressPage(const char* src, char* dst, int dst_size, RelFileCompressOption option) -{ - int compressed_size; - int8 level = option.compressLevelSymbol ? option.compressLevel : -option.compressLevel; - size_t sizeOfHeaderData = GetSizeOfHeadData(heapPageData); - char *src_copy = NULL; - bool real_ByteConvert = false; - errno_t rc; - char* data; - - if (option.byteConvert) { - // copy and maybe change it - src_copy = (char *)malloc(BLCKSZ); - if (src_copy == NULL) { - // add log - return -1; - } - rc = memcpy_s(src_copy, BLCKSZ, src, BLCKSZ); - securec_check(rc, "", ""); - CompressPagePrepareConvert(src_copy, option.diffConvert, &real_ByteConvert); /* preprocess convert src */ - } - - if (heapPageData) { - data = ((HeapPageCompressData*)dst)->data; - } else { - data = ((PageCompressData*)dst)->data; - } - - switch (option.compressAlgorithm) { - case COMPRESS_ALGORITHM_PGLZ: - if (real_ByteConvert) { - compressed_size = lz_compress(src_copy + sizeOfHeaderData, BLCKSZ - sizeOfHeaderData, data); - } else { - compressed_size = lz_compress(src + sizeOfHeaderData, BLCKSZ - sizeOfHeaderData, data); - } - break; - case COMPRESS_ALGORITHM_ZSTD: { - if (level == 0 || level < MIN_ZSTD_COMPRESSION_LEVEL || level > MAX_ZSTD_COMPRESSION_LEVEL) { - level = DEFAULT_ZSTD_COMPRESSION_LEVEL; - } - - if (real_ByteConvert) { - compressed_size = ZSTD_compress(data, dst_size, src_copy + sizeOfHeaderData, BLCKSZ - sizeOfHeaderData, level); - } else { - compressed_size = ZSTD_compress(data, dst_size, src + sizeOfHeaderData, BLCKSZ - sizeOfHeaderData, level); - } - - if (ZSTD_isError(compressed_size)) { - if (src_copy != NULL) { - free(src_copy); - } - return -1; - } - break; - } - default: - if (src_copy != NULL) { - free(src_copy); - } - return COMPRESS_UNSUPPORTED_ERROR; - } - - if (compressed_size < 0) { - if (src_copy != NULL) { - free(src_copy); - } - return -1; - } - - if (heapPageData) { - HeapPageCompressData* pcdptr = ((HeapPageCompressData*)dst); - rc = memcpy_s(pcdptr->page_header, sizeOfHeaderData, src, sizeOfHeaderData); - securec_check(rc, "", ""); - pcdptr->size = compressed_size; - pcdptr->byte_convert = real_ByteConvert; - pcdptr->diff_convert = option.diffConvert; - } else { - PageCompressData* pcdptr = ((PageCompressData*)dst); - rc = memcpy_s(pcdptr->page_header, sizeOfHeaderData, src, sizeOfHeaderData); - securec_check(rc, "", ""); - pcdptr->size = compressed_size; - pcdptr->byte_convert = real_ByteConvert; - pcdptr->diff_convert = option.diffConvert; - } - - if (src_copy != NULL) { - free(src_copy); - } - return SIZE_OF_PAGE_COMPRESS_DATA_HEADER_DATA(heapPageData) + compressed_size; -} - -/** - * CompressPageBufferBound() - * -- Get the destination buffer boundary to compress one page. - * Return needed destination buffer size for compress one page or - * -1 for unrecognized compression algorithm - */ -int CompressPageBufferBound(const char* page, uint8 algorithm) -{ - switch (algorithm) { - case COMPRESS_ALGORITHM_PGLZ: - return BLCKSZ + 4; - case COMPRESS_ALGORITHM_ZSTD: - return ZSTD_compressBound(BLCKSZ - GetPageHeaderSize(page)); - default: - return -1; - } -} - - diff --git a/contrib/pagehack/openGaussCompression.cpp b/contrib/pagehack/openGaussCompression.cpp deleted file mode 100644 index b94954ca8..000000000 --- a/contrib/pagehack/openGaussCompression.cpp +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Copyright (c) Huawei Technologies Co., Ltd. 2012-2018. All rights reserved. - */ - -#include "openGaussCompression.h" -#include "storage/checksum_impl.h" -#include "storage/page_compression_impl.h" - -void OpenGaussCompression::SetFilePath(const char *filePath, int segNo) -{ - int rc = snprintf_s(pcaFilePath, MAXPGPATH, MAXPGPATH - 1, PCA_SUFFIX, filePath); - securec_check_ss_c(rc, "\0", "\0"); - rc = snprintf_s(pcdFilePath, MAXPGPATH, MAXPGPATH - 1, PCD_SUFFIX, filePath); - securec_check_ss_c(rc, "\0", "\0"); - - this->segmentNo = segNo; -} - -OpenGaussCompression::~OpenGaussCompression() -{ - if (pcaFd != nullptr) { - fclose(pcaFd); - } - if (pcdFd != nullptr) { - fclose(pcdFd); - } - if (header != nullptr) { - pc_munmap(header); - } -} - -bool OpenGaussCompression::TryOpen() -{ - if ((pcaFd = fopen(this->pcaFilePath, "rb+")) == nullptr) { - return false; - } - if ((pcdFd = fopen(this->pcdFilePath, "rb+")) == nullptr) { - return false; - } - if (fseeko(pcaFd, (off_t)offsetof(PageCompressHeader, chunk_size), SEEK_SET) != 0) { - return false; - } - if (fread(&chunkSize, sizeof(chunkSize), 1, this->pcaFd) <= 0) { - return false; - } - header = pc_mmap(fileno(pcaFd), chunkSize, false); - return true; -} -constexpr int MAX_RETRY_LIMIT = 60; -constexpr long RETRY_SLEEP_TIME = 1000000L; -bool OpenGaussCompression::ReadChunkOfBlock(char *dst, size_t *dstLen, BlockNumber blockNumber) -{ - auto currentAddr = GET_PAGE_COMPRESS_ADDR(header, chunkSize, blockNumber); - size_t tryCount = 0; - do { - auto chunkNum = currentAddr->nchunks; - for (uint8 i = 0; i < chunkNum; i++) { - off_t seekPos = (off_t)OFFSET_OF_PAGE_COMPRESS_CHUNK(chunkSize, currentAddr->chunknos[i]); - uint8 start = i; - while (i < chunkNum - 1 && currentAddr->chunknos[i + 1] == currentAddr->chunknos[i] + 1) { - i++; - } - if (fseeko(this->pcdFd, seekPos, SEEK_SET) != 0) { - return false; - } - size_t readAmount = chunkSize * (i - start + 1); - if (fread(dst + start * chunkSize, 1, readAmount, this->pcdFd) != readAmount && ferror(this->pcdFd)) { - return false; - } - *dstLen += readAmount; - } - if (chunkNum == 0) { - break; - } - if (DecompressPage(dst, decompressedBuffer, header->algorithm) == BLCKSZ) { - break; - } - - if (tryCount < MAX_RETRY_LIMIT) { - ++tryCount; - pg_usleep(RETRY_SLEEP_TIME); - } else { - return false; - } - } while (true); - if (PageIs8BXidHeapVersion(dst)) { - byteConvert = ((HeapPageCompressData *)dst)->byte_convert; - diffConvert = ((HeapPageCompressData *)dst)->diff_convert; - } else { - byteConvert = ((PageCompressData *)dst)->byte_convert; - diffConvert = ((PageCompressData *)dst)->diff_convert; - } - this->blockNumber = blockNumber; - return true; -} - -bool OpenGaussCompression::WriteBackCompressedData(char *source, size_t sourceLen, BlockNumber blockNumber) -{ - auto currentAddr = GET_PAGE_COMPRESS_ADDR(header, chunkSize, blockNumber); - for (size_t i = 0; i < currentAddr->nchunks; ++i) { - off_t seekPos = (off_t)OFFSET_OF_PAGE_COMPRESS_CHUNK(chunkSize, currentAddr->chunknos[i]); - if (fseeko(this->pcdFd, seekPos, SEEK_SET) != 0) { - return false; - } - Assert(sourceLen >= i * chunkSize); - auto writeCount = fwrite(source + i * chunkSize, 1, chunkSize, this->pcdFd); - bool success = chunkSize == writeCount; - if (!success) { - return false; - } - } - fflush(this->pcdFd); - return true; -} - -void OpenGaussCompression::MarkCompressedDirty(char *source, size_t sourceLen) -{ - int rc = memset_s(source + SizeOfHeapPageHeaderData, sourceLen - SizeOfHeapPageHeaderData, 0xFF, - sourceLen - SizeOfHeapPageHeaderData); - securec_check(rc, "\0", "\0"); -} - -void OpenGaussCompression::MarkUncompressedDirty() -{ - constexpr int writeLen = BLCKSZ / 2; - unsigned char fill_byte[writeLen] = {0xFF}; - for (int i = 0; i < writeLen; i++) - fill_byte[i] = 0xFF; - auto rc = memcpy_s(decompressedBuffer + writeLen, BLCKSZ - writeLen, fill_byte, writeLen); - securec_check(rc, "", ""); -} - -BlockNumber OpenGaussCompression::GetMaxBlockNumber() -{ - return (BlockNumber)pg_atomic_read_u32(&header->nblocks); -} - -char *OpenGaussCompression::GetPcdFilePath() -{ - return this->pcdFilePath; -} - -char *OpenGaussCompression::GetDecompressedPage() -{ - return this->decompressedBuffer; -} - -bool OpenGaussCompression::WriteBackUncompressedData() -{ - auto algorithm = header->algorithm; - auto workBufferSize = CompressPageBufferBound(decompressedBuffer, algorithm); - if (workBufferSize < 0) { - return false; - } - char *work_buffer = (char *)malloc(workBufferSize); - RelFileCompressOption relFileCompressOption; - relFileCompressOption.compressPreallocChunks = 0; - relFileCompressOption.compressLevelSymbol = true; - relFileCompressOption.compressLevel = 1; - relFileCompressOption.compressAlgorithm = algorithm; - relFileCompressOption.byteConvert = byteConvert; - relFileCompressOption.diffConvert = diffConvert; - - auto compress_buffer_size = CompressPage(decompressedBuffer, work_buffer, workBufferSize, relFileCompressOption); - if (compress_buffer_size < 0) { - return false; - } - uint8 nchunks = (compress_buffer_size - 1) / chunkSize + 1; - auto bufferSize = chunkSize * nchunks; - if (bufferSize >= BLCKSZ) { - /* store original page if can not save space? */ - free(work_buffer); - work_buffer = (char *)decompressedBuffer; - nchunks = BLCKSZ / chunkSize; - } else { - /* fill zero in the last chunk */ - if (compress_buffer_size < bufferSize) { - auto leftSize = bufferSize - compress_buffer_size; - errno_t rc = memset_s(work_buffer + compress_buffer_size, leftSize, 0, leftSize); - securec_check(rc, "", ""); - } - } - uint8 need_chunks = nchunks; - PageCompressAddr *pcAddr = GET_PAGE_COMPRESS_ADDR(header, chunkSize, blockNumber); - if (pcAddr->allocated_chunks < need_chunks) { - auto chunkno = pg_atomic_fetch_add_u32(&header->allocated_chunks, need_chunks - pcAddr->allocated_chunks); - for (uint8 i = pcAddr->allocated_chunks; i < need_chunks; ++i) { - pcAddr->chunknos[i] = ++chunkno; - } - pcAddr->allocated_chunks = need_chunks; - pcAddr->nchunks = need_chunks; - } - return this->WriteBackCompressedData(work_buffer, compress_buffer_size, blockNumber); -} - -#include "compression_algorithm.ini" \ No newline at end of file diff --git a/contrib/pagehack/openGaussCompression.h b/contrib/pagehack/openGaussCompression.h deleted file mode 100644 index 11adcdc65..000000000 --- a/contrib/pagehack/openGaussCompression.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef OPENGAUSS_SERVER_OPENGAUSSCOMPRESSION_H -#define OPENGAUSS_SERVER_OPENGAUSSCOMPRESSION_H -#define FRONTEND 1 - - -#include -#include "c.h" -#include "storage/buf/block.h" -#include "storage/page_compression.h" - -class OpenGaussCompression { -private: - FILE* pcaFd = nullptr; - FILE* pcdFd = nullptr; - char pcaFilePath[MAXPGPATH]; - char pcdFilePath[MAXPGPATH]; - PageCompressHeader* header = nullptr; - -private: - int segmentNo; - BlockNumber blockNumber; - decltype(PageCompressHeader::chunk_size) chunkSize; - char decompressedBuffer[BLCKSZ]; - bool byteConvert; - bool diffConvert; - -public: - void SetFilePath(const char* filePath, int segNo); - virtual ~OpenGaussCompression(); - bool TryOpen(); - bool ReadChunkOfBlock(char* dst, size_t* dstLen, BlockNumber blockNumber); - bool WriteBackCompressedData(char* source, size_t sourceLen, BlockNumber blockNumber); - bool WriteBackUncompressedData(); - void MarkCompressedDirty(char* source, size_t sourceLen); - void MarkUncompressedDirty(); - BlockNumber GetMaxBlockNumber(); - char* GetPcdFilePath(); - char* GetDecompressedPage(); -}; - -#endif // OPENGAUSS_SERVER_OPENGAUSSCOMPRESSION_H diff --git a/contrib/pagehack/pagehack.cpp b/contrib/pagehack/pagehack.cpp index ded308507..ada0eeef2 100644 --- a/contrib/pagehack/pagehack.cpp +++ b/contrib/pagehack/pagehack.cpp @@ -86,17 +86,16 @@ #include "utils/timestamp.h" #include "cstore.h" #include "common/build_query/build_query.h" +#include #ifdef ENABLE_MULTIPLE_NODES #include "tsdb/utils/constant_def.h" #endif -#include "openGaussCompression.h" - - /* Max number of pg_class oid, currently about 4000 */ #define MAX_PG_CLASS_ID 10000 /* Number of pg_class types */ #define CLASS_TYPE_NUM 512 +#define TEN 10 typedef unsigned char* binary; static const char* indents[] = { // 10 tab is enough to used. @@ -113,6 +112,14 @@ static const char* indents[] = { // 10 tab is enough to used. "\t\t\t\t\t\t\t\t\t\t"}; static const int nIndents = sizeof(indents) / sizeof(indents[0]); static int indentLevel = 0; +static uint64 g_tdCount = 0; +static uint64 g_rpCount = 0; +static uint64 g_tdMax = 0; +static uint64 g_rpMax = 0; +static uint64 g_pageCount = 0; +static uint64 g_freeSpace = 0; +static uint64 g_freeMax = 0; + static HeapTupleData dummyTuple; // add those special tables to parse, so we can read the tuple data @@ -132,7 +139,6 @@ static const char* PgHeapRelName[] = {"pg_class", "pg_am", "pg_statistic", "pg_toast"}; -typedef enum SegmentType { SEG_HEAP, SEG_FSM, SEG_UHEAP, SEG_INDEX_BTREE, SEG_UNDO, SEG_UNKNOWN } SegmentType; static void ParsePgClassTupleData(binary tupdata, int len, binary nullBitmap, int natrrs); static void ParsePgIndexTupleData(binary tupdata, int len, binary nullBitmap, int nattrs); @@ -150,8 +156,6 @@ static void ParseToastTupleData(binary tupdata, int len, binary nullBitmap, int static void ParseTDSlot(const char *page); static void ParseToastIndexTupleData(binary tupdata, int len, binary nullBitmap, int nattrs); -static int parse_uncompressed_page_file(const char *filename, SegmentType type, const uint32 start_point, - const uint32 number_read); static ParseHeapTupleData PgHeapRelTupleParser[] = { ParsePgClassTupleData, // pg_class @@ -806,11 +810,6 @@ typedef struct TwoPhaseRecordOnDisk { uint16 info; /* flag bits for use by rmgr */ } TwoPhaseRecordOnDisk; -typedef struct TwoPhaseLockRecord { - LOCKTAG locktag; - LOCKMODE lockmode; -} TwoPhaseLockRecord; - typedef struct TwoPhaseFileHeader { uint32 magic; /* format identifier */ uint32 total_len; /* actual file length */ @@ -900,6 +899,8 @@ static const char* HACKINGTYPE[] = {"heap", "segment" }; +typedef enum SegmentType { SEG_HEAP, SEG_FSM, SEG_UHEAP, SEG_INDEX_BTREE, SEG_UNDO, SEG_UNKNOWN } SegmentType; + const char* PageTypeNames[] = {"DATA", "FSM", "VM"}; #define GETHEAPSTRUCT(TUP) ((unsigned char*)(TUP) + (TUP)->t_hoff) @@ -1284,7 +1285,7 @@ static void ParseTsCudescXXTupleData(binary tupdata, int len, binary nullBitmap, unsigned char ch = 0; unsigned char bitmask = 0; bool isnulls[nattrs] = {0}; - + if (NULL != nullBitmap) { datlen = (nattrs + 7) / 8; j = 0; @@ -1317,7 +1318,7 @@ static void ParseTsCudescXXTupleData(binary tupdata, int len, binary nullBitmap, fprintf(stdout, "\n%s" "CUId: %u", indents[indentLevel], *(uint32*)nextAttr); nextAttr += sizeof(uint32); } - + if (!isnulls[3]) { // rough check MIN/MAX nextAttr = (char*)att_align_pointer((long int)nextAttr, 'i', -1, nextAttr); @@ -2394,7 +2395,7 @@ static void ParsePgAttributeTupleData(binary tupdata, int len, binary nullBitmap indents[indentLevel], (pgAttributeTupData->attcollation)); fprintf(stdout, - "\n%s" "attkvtype: %d", + "\n%s" "attkvtype: %d", indents[indentLevel], (pgAttributeTupData->attkvtype)); @@ -2427,6 +2428,40 @@ static void parse_uheap_item(const Item item, unsigned len, int blkno, int linen fprintf(stdout, "\t\t\tNumber of columns: %d\n", UHeapTupleHeaderGetNatts(utuple)); fprintf(stdout, "\t\t\tFlag: %d\n", utuple->flag); + if (utuple->flag & UHEAP_HAS_NULL) { + fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_HASNULL "); + } + if (utuple->flag & UHEAP_DELETED) { + fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_DELETED "); + } + if (utuple->flag & UHEAP_INPLACE_UPDATED) { + fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_INPLACE_UPDATED "); + } + if (utuple->flag & UHEAP_UPDATED) { + fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_UPDATED "); + } + if (utuple->flag & UHEAP_XID_KEYSHR_LOCK) { + fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_XID_KEYSHR_LOCK "); + } + if (utuple->flag & UHEAP_XID_NOKEY_EXCL_LOCK) { + fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_XID_NOKEY_EXCL_LOCK "); + } + if (utuple->flag & UHEAP_XID_EXCL_LOCK) { + fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_XID_EXCL_LOCK "); + } + if (utuple->flag & UHEAP_MULTI_LOCKERS) { + fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_MULTI_LOCKERS "); + } + if (utuple->flag & UHEAP_INVALID_XACT_SLOT) { + fprintf(stdout, "\t\t\tFlag: %s\n", "UHEAP_INVALID_XACT_SLOT "); + } + if (utuple->flag & SINGLE_LOCKER_XID_IS_LOCK) { + fprintf(stdout, "\t\t\tFlag: %s\n", "SINGLE_LOCKER_XID_IS_LOCK "); + } + if (utuple->flag & SINGLE_LOCKER_XID_IS_SUBXACT) { + fprintf(stdout, "\t\t\tFlag: %s\n", "SINGLE_LOCKER_XID_IS_SUBXACT "); + } + fprintf(stdout, "%sdata:", indents[indentLevel]); content = ((unsigned char *)(utuple) + utuple->t_hoff); len -= utuple->t_hoff; @@ -2509,11 +2544,10 @@ static void parse_heap_item(const Item item, unsigned len, int blkno, int lineno tup->t_infomask &= ~HEAP_HASEXTERNAL; tup->t_infomask &= ~HEAP_XMAX_COMMITTED; tup->t_infomask &= ~HEAP_COMBOCID; - tup->t_infomask &= ~HEAP_MOVED_IN; tup->t_infomask |= HEAP_XMIN_INVALID; fprintf(stdout, "force writer tup->t_infomask to HEAP_XMIN_INVALID and clean HEAP_XMIN_COMMITTED | HEAP_COMPRESSED | " - "HEAP_HASEXTERNAL | HEAP_XMAX_COMMITTED | HEAP_COMBOCID | HEAP_MOVED_IN"); + "HEAP_HASEXTERNAL | HEAP_XMAX_COMMITTED | HEAP_COMBOCID"); } if (tup->t_infomask & HEAP_XMAX_COMMITTED) fprintf(stdout, "HEAP_XMAX_COMMITTED "); @@ -2523,10 +2557,11 @@ static void parse_heap_item(const Item item, unsigned len, int blkno, int lineno fprintf(stdout, "HEAP_XMAX_IS_MULTI "); if (tup->t_infomask & HEAP_UPDATED) fprintf(stdout, "HEAP_UPDATED "); - if (tup->t_infomask & HEAP_MOVED_OFF) - fprintf(stdout, "HEAP_MOVED_OFF "); - if (tup->t_infomask & HEAP_MOVED_IN) - fprintf(stdout, "HEAP_MOVED_IN "); + if ((tup->t_infomask & HEAP_HAS_8BYTE_UID)) { + fprintf(stdout, "HEAP_HAS_8BYTE_UID "); + } else { + fprintf(stdout, "HEAP_HAS_NO_UID "); + } fprintf(stdout, "\n"); fprintf(stdout, "%st_infomask2: ", indents[indentLevel]); @@ -2644,6 +2679,7 @@ static void parse_one_item(const Item item, unsigned len, int blkno, int lineno, static void ParseHeapPageHeader(const PageHeader page, int blkno, int blknum) { bool checksum_matched = false; + uint64 freeSpace = 0; if (CheckPageZeroCases(page)) { uint16 checksum = pg_checksum_page((char*)page, (BlockNumber)blkno + (SegNo * ((BlockNumber)RELSEG_SIZE))); checksum_matched = (checksum == page->pd_checksum); @@ -2673,25 +2709,32 @@ static void ParseHeapPageHeader(const PageHeader page, int blkno, int blknum) "\tPage size & version: %u, %u\n", (uint16)PageGetPageSize(page), (uint16)PageGetPageLayoutVersion(page)); - if (true) { - fprintf(stdout, - "\tpd_xid_base: %lu, pd_multi_base: %lu\n", - ((HeapPageHeader)(page))->pd_xid_base, - ((HeapPageHeader)(page))->pd_multi_base); - fprintf(stdout, - "\tpd_prune_xid: %lu\n", - ((HeapPageHeader)(page))->pd_prune_xid + ((HeapPageHeader)(page))->pd_xid_base); - } else - fprintf(stdout, "\tpd_prune_xid: %u\n", page->pd_prune_xid); + fprintf(stdout, + "\tpd_xid_base: %lu, pd_multi_base: %lu\n", + ((HeapPageHeader)(page))->pd_xid_base, + ((HeapPageHeader)(page))->pd_multi_base); + fprintf(stdout, + "\tpd_prune_xid: %lu\n", + ((HeapPageHeader)(page))->pd_prune_xid + ((HeapPageHeader)(page))->pd_xid_base); + if (page->pd_upper < page->pd_lower) { + fprintf(stdout, "WARNING: INVALID PAGE!"); + } else { + freeSpace = page->pd_upper - page->pd_lower; + g_freeMax = freeSpace > g_freeMax ? freeSpace : g_freeMax; + g_freeSpace += freeSpace; + } + g_pageCount++; return; } - static void ParseUHeapPageHeader(const PageHeader page, int blkno, int blknum) { bool checksum_matched = false; + uint64 tdCount = 0; + uint64 freeSpace = 0; + UHeapPageHeader upage = (UHeapPageHeader)page; if (CheckPageZeroCases(page)) { - uint16 checksum = pg_checksum_page((char*)page, (BlockNumber)blkno); + uint16 checksum = pg_checksum_page((char*)page, (BlockNumber)blkno + (SegNo * ((BlockNumber)RELSEG_SIZE))); checksum_matched = (checksum == page->pd_checksum); } fprintf(stdout, "page information of block %d/%d\n", blkno, blknum); @@ -2699,9 +2742,9 @@ static void ParseUHeapPageHeader(const PageHeader page, int blkno, int blknum) fprintf(stdout, "\tpd_checksum: 0x%X, verify %s\n", page->pd_checksum, checksum_matched ? "success" : "fail"); fprintf(stdout, "\tpd_flags: "); - if (PageHasFreeLinePointers(page)) + if (UPageHasFreeLinePointers(page)) fprintf(stdout, "PD_HAS_FREE_LINES "); - if (PageIsFull(page)) + if (UPageIsFull(page)) fprintf(stdout, "PD_PAGE_FULL "); if (PageIsAllVisible(page)) fprintf(stdout, "PD_ALL_VISIBLE "); @@ -2712,22 +2755,27 @@ static void ParseUHeapPageHeader(const PageHeader page, int blkno, int blknum) if (PageIsEncrypt(page)) fprintf(stdout, "PD_ENCRYPT_PAGE "); fprintf(stdout, "\n"); - fprintf(stdout, "\tpd_lower: %u, %s\n", page->pd_lower, PageIsEmpty(page) ? "empty" : "non-empty"); + fprintf(stdout, "\tpd_lower: %u, %s\n", page->pd_lower, UPageIsEmpty(upage) ? "empty" : "non-empty"); fprintf(stdout, "\tpd_upper: %u, %s\n", page->pd_upper, PageIsNew(page) ? "new" : "old"); fprintf(stdout, "\tpd_special: %u, size %u\n", page->pd_special, PageGetSpecialSize(page)); - fprintf(stdout, - "\tPage size & version: %u, %u\n", - (uint16)PageGetPageSize(page), - (uint16)PageGetPageLayoutVersion(page)); - - fprintf(stdout, "\tpotential_freespace: %u\n", ((UHeapPageHeaderData *)(page))->potential_freespace); - fprintf(stdout, "\ttd_count: %u\n", ((UHeapPageHeaderData *)(page))->td_count); - fprintf(stdout, "\tpd_prune_xid: %lu\n", ((UHeapPageHeaderData *)(page))->pd_prune_xid); - fprintf(stdout, - "\tpd_xid_base: %lu, pd_multi_base: %lu\n", - ((UHeapPageHeaderData *)(page))->pd_xid_base, - ((UHeapPageHeaderData *)(page))->pd_multi_base); - + fprintf(stdout, "\tPage size & version: %u, %u\n", + (uint16)PageGetPageSize(page), (uint16)PageGetPageLayoutVersion(page)); + fprintf(stdout, "\tpotential_freespace: %u\n", upage->potential_freespace); + fprintf(stdout, "\ttd_count: %u\n", upage->td_count); + fprintf(stdout, "\tpd_prune_xid: %lu\n", upage->pd_prune_xid); + fprintf(stdout, "\tpd_xid_base: %lu, pd_multi_base: %lu\n", + upage->pd_xid_base, upage->pd_multi_base); + if (upage->pd_upper < upage->pd_lower) { + fprintf(stdout, "WARNING: INVALID PAGE!"); + } else { + freeSpace = upage->pd_upper - upage->pd_lower; + g_freeMax = freeSpace > g_freeMax ? freeSpace : g_freeMax; + g_freeSpace += freeSpace; + } + g_pageCount++; + tdCount = upage->td_count; + g_tdCount += tdCount; + g_tdMax = tdCount > g_tdMax ? tdCount : g_tdMax; return; } @@ -2777,14 +2825,10 @@ static void parse_special_data(const char* buffer, SegmentType type) if (!P_ISDELETED(opaque)) fprintf(stdout, "\tbtree tree level: %u\n", opaque->btpo.level); else { - if (PageIs4BXidVersion(page)) - fprintf(stdout, "\tnext txid_old (deleted): %u\n", opaque->btpo.xact_old); - else { - if (uopaque) - fprintf(stdout, "\tnext txid (deleted): %lu\n", ((UBTPageOpaque)uopaque)->xact); - else - fprintf(stdout, "\tnext txid (deleted): %lu\n", ((BTPageOpaque)opaque)->xact); - } + if (uopaque) + fprintf(stdout, "\tnext txid (deleted): %lu\n", ((UBTPageOpaque)uopaque)->xact); + else + fprintf(stdout, "\tnext txid (deleted): %lu\n", ((BTPageOpaque)opaque)->xact); } fprintf(stdout, "\tbtree flag: "); if (P_ISLEAF(opaque)) @@ -2897,12 +2941,14 @@ static void parse_heap_or_index_page(const char* buffer, int blkno, SegmentType nunused = nnormal = ndead = 0; if (type == SEG_UHEAP) { UHeapPageHeaderData *upghdr = (UHeapPageHeaderData *)buffer; - if (upghdr->pd_lower <= SizeOfUHeapPageHeaderData) + if (upghdr->pd_lower <= SizeOfUHeapPageHeaderData) { nline = 0; - else - nline = - (upghdr->pd_lower - (SizeOfUHeapPageHeaderData + SizeOfUHeapTDData(upghdr))) / sizeof(RowPtr); - + } else { + nline = (upghdr->pd_lower - (SizeOfUHeapPageHeaderData + + SizeOfUHeapTDData(upghdr))) / sizeof(RowPtr); + g_rpCount += (uint64)nline; + g_rpMax = (uint64)nline > g_rpMax ? (uint64)nline : g_rpMax; + } fprintf(stdout, "\n\tUHeap tuple information on this page\n"); for (i = FirstOffsetNumber; i <= nline; i++) { rowptr = UPageGetRowPtr(buffer, i); @@ -2936,6 +2982,8 @@ static void parse_heap_or_index_page(const char* buffer, int blkno, SegmentType parse_special_data(buffer, type); } else if (type == SEG_HEAP || type == SEG_INDEX_BTREE) { nline = PageGetMaxOffsetNumber((Page)page); + g_rpCount += (uint64)nline; + g_rpMax = (uint64)nline > g_rpMax ? (uint64)nline : g_rpMax; fprintf(stdout, "\n\tHeap tuple information on this page\n"); for (i = FirstOffsetNumber; i <= nline; i++) { lp = PageGetItemId(page, i); @@ -3097,78 +3145,7 @@ static int parse_a_page(const char* buffer, int blkno, int blknum, SegmentType t return true; } -static BlockNumber CalculateMaxBlockNumber(BlockNumber blknum, BlockNumber start, BlockNumber number) -{ - /* parse */ - if (start >= blknum) { - fprintf(stderr, "start point exceeds the total block number of relation.\n"); - return InvalidBlockNumber; - } else if ((start + number) > blknum) { - fprintf(stderr, "don't have %d blocks from block %d in the relation, only %d blocks\n", number, start, - (blknum - start)); - number = blknum; - } else if (number == 0) { - number = blknum; - } else { - number += start; - } - return number; -} - static int parse_page_file(const char* filename, SegmentType type, const uint32 start_point, const uint32 number_read) -{ - if (type != SEG_HEAP && type != SEG_INDEX_BTREE) { - return parse_uncompressed_page_file(filename, type, start_point, number_read); - } - - auto openGaussCompression = new OpenGaussCompression(); - openGaussCompression->SetFilePath(filename, SegNo); - bool success = openGaussCompression->TryOpen(); - if (!success) { - delete openGaussCompression; - return parse_uncompressed_page_file(filename, type, start_point, number_read); - } - - BlockNumber start = start_point; - BlockNumber blknum = openGaussCompression->GetMaxBlockNumber(); - BlockNumber number = CalculateMaxBlockNumber(blknum, start, number_read); - if (number == InvalidBlockNumber) { - delete openGaussCompression; - return false; - } - char compressed[BLCKSZ]; - size_t compressedLen; - while (start < number) { - if (!openGaussCompression->ReadChunkOfBlock(compressed, &compressedLen, start)) { - fprintf(stderr, "read block %d failed, filename: %s: %s\n", start, openGaussCompression->GetPcdFilePath(), - strerror(errno)); - delete openGaussCompression; - return false; - } - if (!parse_a_page(openGaussCompression->GetDecompressedPage(), start, blknum, type)) { - fprintf(stderr, "Error during parsing block %d/%d\n", start, blknum); - delete openGaussCompression; - return false; - } - if ((write_back && num_item) || dirty_page) { - if (dirty_page) { - openGaussCompression->MarkUncompressedDirty(); - } - if (!openGaussCompression->WriteBackUncompressedData()) { - fprintf(stderr, "write back failed, filename: %s: %s\n", openGaussCompression->GetPcdFilePath(), - strerror(errno)); - delete openGaussCompression; - return false; - } - } - start++; - } - delete openGaussCompression; - return true; -} - -static int parse_uncompressed_page_file(const char *filename, SegmentType type, const uint32 start_point, - const uint32 number_read) { char buffer[BLCKSZ]; FILE* fd = NULL; @@ -3196,9 +3173,21 @@ static int parse_uncompressed_page_file(const char *filename, SegmentType type, blknum = size / BLCKSZ; /* parse */ - number = CalculateMaxBlockNumber(blknum, start, number); - if (number == InvalidBlockNumber) { + if (start >= blknum) { + fprintf(stderr, "start point exceeds the total block number of relation.\n"); + fclose(fd); return false; + } else if ((start + number) > blknum) { + fprintf(stderr, + "don't have %u blocks from block %u in the relation, only %u blocks\n", + number, + start, + (blknum - start)); + number = blknum; + } else if (number == 0) { + number = blknum; + } else { + number += start; } Assert((start * BLCKSZ) < size); @@ -3214,7 +3203,7 @@ static int parse_uncompressed_page_file(const char *filename, SegmentType type, } if (!parse_a_page(buffer, start, blknum, type)) { - fprintf(stderr, "Error during parsing block %d/%d\n", start, blknum); + fprintf(stderr, "Error during parsing block %u/%u\n", start, blknum); fclose(fd); return false; } @@ -3235,6 +3224,16 @@ static int parse_uncompressed_page_file(const char *filename, SegmentType type, start++; } + float8 rpAvg = g_pageCount == 0 ? 0 : (float8)g_rpCount / g_pageCount; + float8 tdAvg = g_pageCount == 0 ? 0 : (float8)g_tdCount / g_pageCount; + float8 freeAvg = g_pageCount == 0 ? 0 : (float8)g_freeSpace / g_pageCount; + fprintf(stdout, "Relation information : pageCount %lu.\n", g_pageCount); + fprintf(stdout, "RP information : rpCount %lu, rpMax %lu, rpAvg %f.\n", + g_rpCount, g_rpMax, rpAvg); + fprintf(stdout, "TD information : tdCount %lu, tdMax %lu, tdAvg %f.\n", + g_tdCount, g_tdMax, tdAvg); + fprintf(stdout, "Freespace information : freeTotal %lu, freeMax %lu, freeAvg %f.\n", + g_freeSpace, g_freeMax, freeAvg); fclose(fd); return true; @@ -3343,7 +3342,7 @@ static void parse_relation_options_struct(char* vardata, int relkind) --indentLevel; } /* else can index relation options but won't solve it now*/ - + } static void parse_oid_array(Oid* ids, int nids) @@ -3751,6 +3750,7 @@ static int parse_cu_file(char* filename, uint64 offset) /* parse a replslot file */ static int parse_slot_file(char* filename) { + const uint32 upperLen = 32; FILE* fd = NULL; ReplicationSlotOnDisk cp; size_t readBytes = 0; @@ -3822,8 +3822,11 @@ static int parse_slot_file(char* filename) fprintf(stdout, "xmin: %lu\n", cp.slotdata.xmin); fprintf( - stdout, "restart_lsn: %X/%X\n", (uint32)(cp.slotdata.restart_lsn >> 32), (uint32)(cp.slotdata.restart_lsn)); - + stdout, "restart_lsn: %X/%X\n", (uint32)(cp.slotdata.restart_lsn >> upperLen), + (uint32)(cp.slotdata.restart_lsn)); + fprintf( + stdout, "confirmed_flush: %X/%X\n", (uint32)(cp.slotdata.confirmed_flush >> upperLen), + (uint32)(cp.slotdata.confirmed_flush)); fclose(fd); return true; @@ -4126,7 +4129,7 @@ static int parse_csnlog_file(char* filename) return true; } -static bool parse_dw_file_head(char* file_head, dw_file_head_t* saved_file_head) +static bool parse_dw_file_head(char* file_head, dw_file_head_t* saved_file_head, int size = 0) { uint32 i; uint16 id; @@ -4256,7 +4259,8 @@ static uint16 parse_batch_data_pages(dw_batch_t* curr_head, uint16 page_num) return page_num; } -static uint16 calc_reading_pages(dw_batch_t** curr_head, char* start_buf, uint16 read_pages, uint16 file_page_id) +static uint16 calc_reading_pages(dw_batch_t** curr_head, char* start_buf, uint16 read_pages, uint16 file_page_id, + uint16 dw_batch_page_num) { uint16 buf_page_id; errno_t rc; @@ -4278,11 +4282,11 @@ static uint16 calc_reading_pages(dw_batch_t** curr_head, char* start_buf, uint16 } Assert((char*)(*curr_head) + (read_pages + readingPages) * BLCKSZ <= start_buf + DW_BUF_MAX * BLCKSZ); - Assert(file_page_id + read_pages + readingPages <= DW_FILE_PAGE); + Assert(file_page_id + read_pages + readingPages <= dw_batch_page_num); return (uint16)readingPages; } -static void parse_dw_batch(char* buf, FILE* fd, dw_file_head_t* file_head, uint16 page_num) +static void parse_dw_batch(char* buf, FILE* fd, dw_file_head_t* file_head, uint16 page_num, uint16 dw_batch_page_num) { uint16 file_page_id, read_pages; uint16 reading_pages; @@ -4296,7 +4300,7 @@ static void parse_dw_batch(char* buf, FILE* fd, dw_file_head_t* file_head, uint1 start_buf = buf; curr_head = (dw_batch_t*)start_buf; read_pages = 0; - reading_pages = Min(DW_BATCH_MAX_FOR_NOHBK, (DW_FILE_PAGE - file_page_id)); + reading_pages = Min(DW_BATCH_MAX_FOR_NOHBK, (dw_batch_page_num - file_page_id)); flush_pages = 0; for (;;) { @@ -4328,43 +4332,92 @@ static void parse_dw_batch(char* buf, FILE* fd, dw_file_head_t* file_head, uint1 break; } - reading_pages = calc_reading_pages(&curr_head, start_buf, read_pages, file_page_id); + reading_pages = calc_reading_pages(&curr_head, start_buf, read_pages, file_page_id, dw_batch_page_num); } } static bool parse_dw_file(const char* file_name, uint32 start_page, uint32 page_num) { - char* buf; + errno_t rc; FILE* fd; size_t result; + uint32 dw_batch_page_num; dw_file_head_t file_head; + char meta_path[PATH_MAX]; + char cur_dir[PATH_MAX]; + dw_batch_meta_file* batch_meta_file; + char* meta_buf = NULL; + char* dw_buf = NULL; + + rc = strcpy_s(cur_dir, PATH_MAX, file_name); + securec_check(rc, "", ""); + (void)dirname(cur_dir); + rc = strcpy_s(meta_path, PATH_MAX, cur_dir); + securec_check(rc, "", ""); + rc = strcat_s(meta_path, PATH_MAX, "\\"); + securec_check(rc, "", ""); + rc = strcat_s(meta_path, PATH_MAX, DW_META_FILE); + securec_check(rc, "", ""); + + fd = fopen(meta_path, "rb+"); + + if (fd == NULL) { + fprintf(stderr, "%s: %s\n", meta_path, strerror(errno)); + return false; + } + + meta_buf = (char*)malloc(DW_META_FILE_BLOCK_NUM * BLCKSZ); + if (meta_buf == NULL) { + fclose(fd); + fprintf(stderr, "out of memory\n"); + return false; + } + + result = fread(meta_buf, sizeof(dw_batch_meta_file), 1, fd); + if (result != 1) { + free(meta_buf); + fclose(fd); + fprintf(stderr, "read %s: %s\n", meta_path, strerror(errno)); + return false; + } + + batch_meta_file = (dw_batch_meta_file *) meta_buf; + dw_batch_page_num = (uint32) (DW_FILE_SIZE_UNIT * batch_meta_file->dw_file_size / BLCKSZ); + + free(meta_buf); + fclose(fd); fd = fopen(file_name, "rb+"); if (fd == NULL) { fprintf(stderr, "%s: %s\n", file_name, strerror(errno)); return false; } - buf = (char*)malloc(BLCKSZ * DW_BUF_MAX_FOR_NOHBK); - if (buf == NULL) { + + dw_buf = (char*)malloc(BLCKSZ * DW_BUF_MAX_FOR_NOHBK); + if (dw_buf == NULL) { fclose(fd); fprintf(stderr, "out of memory\n"); return false; } - result = fread(buf, BLCKSZ, 1, fd); + result = fread(dw_buf, BLCKSZ, 1, fd); if (result != 1) { - free(buf); + free(dw_buf); fclose(fd); fprintf(stderr, "read %s: %s\n", file_name, strerror(errno)); return false; } - if (!parse_dw_file_head(buf, &file_head)) { - free(buf); + if (!parse_dw_file_head(dw_buf, &file_head, BLCKSZ)) { + free(dw_buf); fclose(fd); return false; } if (start_page != 0) { - Assert(start_page < DW_FILE_PAGE); + if (start_page >= dw_batch_page_num) { + fprintf(stdout, "start_page %u exceeds the double write file upper limit offset %u\n", + start_page, dw_batch_page_num - 1); + return false; + } file_head.start = (uint16)start_page; if (page_num != 0) { @@ -4374,11 +4427,12 @@ static bool parse_dw_file(const char* file_name, uint32 start_page, uint32 page_ } } if (page_num == 0) { - page_num = DW_FILE_PAGE - start_page; + page_num = dw_batch_page_num - start_page; } - parse_dw_batch(buf, fd, &file_head, (uint16)page_num); - free(buf); + parse_dw_batch(dw_buf, fd, &file_head, (uint16)page_num, (uint16)dw_batch_page_num); + + free(dw_buf); fclose(fd); return true; } @@ -4566,7 +4620,7 @@ static bool parse_dw_single_flush_file(const char* file_name) fseek(fd, (1 + DW_FIRST_DATA_PAGE_NUM) * BLCKSZ, SEEK_SET); result = fread(second_file_head, 1, BLCKSZ, fd); - + fseek(fd, (1 + DW_FIRST_DATA_PAGE_NUM + 1) * BLCKSZ, SEEK_SET); result = fread(item_buf, 1, blk_num * BLCKSZ, fd); if (blk_num * BLCKSZ != result) { @@ -4684,7 +4738,7 @@ void CheckCRC(pg_crc32 comCrcVal, pg_crc32 pageCrcVal, const uint64 readSize, ch FIN_CRC32C(comCrcVal); if (!EQ_CRC32C(pageCrcVal, comCrcVal)) { - fprintf(stderr, + fprintf(stderr, "Undo meta CRC calculated(%u) is different from CRC recorded(%u) in page.\n", comCrcVal, pageCrcVal); return; } @@ -4734,8 +4788,8 @@ static int ParseUndoZoneMeta(const char *filename, int zid) /* Get page CRC from uspMetaBuffer. */ pageCrcVal = *(pg_crc32 *) (uspMetaBuffer + readSize); - /* - * Calculate the CRC value based on all undospace meta information stored on the page. + /* + * Calculate the CRC value based on all undospace meta information stored on the page. * Then compare with pageCrcVal. */ CheckCRC(comCrcVal, pageCrcVal, readSize, uspMetaBuffer); @@ -4778,7 +4832,7 @@ static int ParseUndoSpaceMeta(const char *filename, int zid, UndoSpaceType type) fprintf(stderr, "Open file(%s), return code desc(%s).\n", UNDO_META_FILE, strerror(errno)); return false; } - + if (type == UNDO_LOG_SPACE) { UNDOZONE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOZONE_COUNT_PER_PAGE, totalPageCnt); lseek(fd, totalPageCnt * UNDO_META_PAGE_SIZE, SEEK_SET); @@ -4812,8 +4866,8 @@ static int ParseUndoSpaceMeta(const char *filename, int zid, UndoSpaceType type) /* Get page CRC from uspMetaBuffer. */ pageCrcVal = *(pg_crc32 *) (uspMetaBuffer + readSize); - /* - * Calculate the CRC value based on all undospace meta information stored on the page. + /* + * Calculate the CRC value based on all undospace meta information stored on the page. * Then compare with pageCrcVal. */ CheckCRC(comCrcVal, pageCrcVal, readSize, uspMetaBuffer); @@ -4822,8 +4876,8 @@ static int ParseUndoSpaceMeta(const char *filename, int zid, UndoSpaceType type) zoneId = (loop - 1) * UNDOSPACE_COUNT_PER_PAGE + offset; uspSpaceInfo = (UndoSpaceMetaInfo *) (uspMetaBuffer + offset * sizeof(UndoSpaceMetaInfo)); if ((zid == INVALID_ZONE_ID) || (zid != INVALID_ZONE_ID && zid == zoneId)) { - fprintf(stdout, "zid=%d, head=%lu, tail=%lu, lsn=%lu.\n", zoneId, - UNDO_PTR_GET_OFFSET(uspSpaceInfo->head), UNDO_PTR_GET_OFFSET(uspSpaceInfo->tail), + fprintf(stdout, "zid=%d, head=%lu, tail=%lu, lsn=%lu.\n", zoneId, + UNDO_PTR_GET_OFFSET(uspSpaceInfo->head), UNDO_PTR_GET_OFFSET(uspSpaceInfo->tail), uspSpaceInfo->lsn); if (zid != INVALID_ZONE_ID) { @@ -4883,8 +4937,10 @@ typedef struct UndoHeader { UndoRecordHeader whdr_; UndoRecordBlock wblk_; UndoRecordTransaction wtxn_; + UndoRecordPayload wpay_; UndoRecordOldTd wtd_; - UndoRecordPayload wpay_; + UndoRecordPartition wpart_; + UndoRecordTablespace wtspc_; } UndoHeader; char g_dir[100] = {0}; @@ -4962,19 +5018,19 @@ bool ReadUndoRecord(UndoHeader *urec, char *buffer, int startingByte, int *alrea } } if ((urec->whdr_.uinfo & UNDO_UREC_INFO_HAS_PARTOID) != 0) { - if (!ReadUndoBytes((char *)&urec->wtd_, SIZE_OF_UNDO_RECORD_PARTITION, + if (!ReadUndoBytes((char *)&urec->wpart_, SIZE_OF_UNDO_RECORD_PARTITION, &readptr, endptr, &myBytesRead, alreadyRead)) { return false; } } if ((urec->whdr_.uinfo & UNDO_UREC_INFO_HAS_TABLESPACEOID) != 0) { - if (!ReadUndoBytes((char *)&urec->wtd_, SIZE_OF_UNDO_RECORD_TABLESPACE, + if (!ReadUndoBytes((char *)&urec->wtspc_, SIZE_OF_UNDO_RECORD_TABLESPACE, &readptr, endptr, &myBytesRead, alreadyRead)) { return false; } } if ((urec->whdr_.uinfo & UNDO_UREC_INFO_PAYLOAD) != 0) { - if (!ReadUndoBytes((char *)&urec->wtd_, SIZE_OF_UNDO_RECORD_PAYLOAD, + if (!ReadUndoBytes((char *)&urec->wpay_, SIZE_OF_UNDO_RECORD_PAYLOAD, &readptr, endptr, &myBytesRead, alreadyRead)) { return false; } @@ -4988,7 +5044,7 @@ static bool ParseUndoRecord(UndoRecPtr urp) char buffer[BLCKSZ] = {'\0'}; BlockNumber blockno = UNDO_PTR_GET_BLOCK_NUM(urp); int zoneId = UNDO_PTR_GET_ZONE_ID(urp); - int startingByte = UNDO_PTR_GET_BLOCK_NUM(urp); + int startingByte = UNDO_PTR_GET_PAGE_OFFSET(urp); int fd = -1; int alreadyRead = 0; off_t seekpos; @@ -5003,6 +5059,7 @@ static bool ParseUndoRecord(UndoRecPtr urp) do { fd = OpenUndoBlock(zoneId, blockno); if (fd < 0) { + free(urec); return false; } seekpos = (off_t)BLCKSZ * (blockno % ((BlockNumber)UNDOSEG_SIZE)); @@ -5013,6 +5070,7 @@ static bool ParseUndoRecord(UndoRecPtr urp) ret = read(fd, (char *)buffer, BLCKSZ); if (ret != BLCKSZ) { close(fd); + free(urec); fprintf(stderr, "Read undo meta page failed, expect size(8192), real size(%u).\n", ret); return false; } @@ -5266,6 +5324,28 @@ static void fill_filenode_map(char** class_map) return; } +static long int strtolSafe(const char* nptr, long int default_value) +{ + char* tmp = NULL; + long int res = strtol(nptr, &tmp, TEN); + if (errno == ERANGE || tmp == nptr || (errno != 0 && res == 0)) { + fprintf(stdout, "WARNING: failed to convert parameter %s to int!\n", nptr); + res = default_value; + } + return res; +} + +static long long int strtollSafe(const char* nptr, long long int default_value) +{ + char* tmp = NULL; + long long int res = strtoll(nptr, &tmp, TEN); + if (errno == ERANGE || tmp == nptr || (errno != 0 && res == 0)) { + fprintf(stdout, "WARNING: failed to convert parameter %s to int!\n", nptr); + res = default_value; + } + return res; +} + int main(int argc, char** argv) { int c; @@ -5318,7 +5398,7 @@ int main(int argc, char** argv) } case 'o': - cu_offset = (uint64)atoll(optarg); + cu_offset = (uint64)strtollSafe(optarg, 0); break; case 'r': // relation name given @@ -5371,19 +5451,19 @@ int main(int argc, char** argv) break; case 's': - start_point = (unsigned)atoi(optarg); + start_point = (unsigned int)strtolSafe(optarg, 0); break; case 'n': - num_block = (unsigned)atoi(optarg); + num_block = (unsigned int)strtolSafe(optarg, 0); break; case 'I': - start_item = (unsigned)atoi(optarg); + start_item = (unsigned int)strtolSafe(optarg, 1); break; case 'N': - num_item = (unsigned)atoi(optarg); + num_item = (unsigned int)strtolSafe(optarg, 0); break; case 'w': @@ -5395,11 +5475,11 @@ int main(int argc, char** argv) break; case 'z': - zid = (int)atoi(optarg); + zid = (int)strtolSafe(optarg, INVALID_ZONE_ID); break; case 'S': - SegNo = (unsigned)atoi(optarg); + SegNo = (unsigned int)strtolSafe(optarg, 0); break; default: diff --git a/contrib/pageinspect/btreefuncs.cpp b/contrib/pageinspect/btreefuncs.cpp index 3fef7de58..86a77ae09 100644 --- a/contrib/pageinspect/btreefuncs.cpp +++ b/contrib/pageinspect/btreefuncs.cpp @@ -106,11 +106,7 @@ static void GetBTPageStatistics(BlockNumber blkno, Buffer buffer, BTPageStat* st /* page type (flags) */ if (P_ISDELETED(opaque)) { stat->type = 'd'; - - if (PageIs4BXidVersion(page)) - stat->btpo.xact = opaque->btpo.xact_old; - else - stat->btpo.xact = ((BTPageOpaque)opaque)->xact; + stat->btpo.xact = ((BTPageOpaque)opaque)->xact; return; } else if (P_IGNORE(opaque)) stat->type = 'e'; diff --git a/contrib/pg_archivecleanup/pg_archivecleanup.cpp b/contrib/pg_archivecleanup/pg_archivecleanup.cpp index 8bc43fa31..435eb014e 100644 --- a/contrib/pg_archivecleanup/pg_archivecleanup.cpp +++ b/contrib/pg_archivecleanup/pg_archivecleanup.cpp @@ -205,7 +205,7 @@ static void SetWALFileNameForCleanup(void) * Use just the prefix of the filename, ignore everything after * first period */ - XLogFileName(exclusiveCleanupFileName, tli, ((uint64)log) << segLen | seg); + XLogFileName(exclusiveCleanupFileName, MAXFNAMELEN, tli, ((uint64)log) << segLen | seg); } } diff --git a/contrib/pg_standby/pg_standby.cpp b/contrib/pg_standby/pg_standby.cpp index d83abcdb3..5ae323673 100644 --- a/contrib/pg_standby/pg_standby.cpp +++ b/contrib/pg_standby/pg_standby.cpp @@ -125,9 +125,9 @@ struct stat stat_buf; #define XLOG_DATA_FNAME_LEN 24 /* Reworked from access/xlog_internal.h */ -#define XLogFileName(fname, tli, logSegNo) \ +#define XLogFileName(fname, len, tli, logSegNo) \ snprintf(fname, \ - XLOG_DATA_FNAME_LEN + 1, \ + len, \ "%08X%08X%08X", \ tli, \ (uint32)((logSegNo) / XLogSegmentsPerXLogId), \ @@ -345,7 +345,7 @@ static bool SetWALFileNameForCleanup(void) } } - XLogFileName(exclusiveCleanupFileName, tli, (((uint32)log) << 32) | seg); + XLogFileName(exclusiveCleanupFileName, MAXFNAMELEN, tli, (((uint32)log) << 32) | seg); return cleanup; } diff --git a/contrib/pg_xlogdump/CMakeLists.txt b/contrib/pg_xlogdump/CMakeLists.txt index c470e4ec6..cf43dabc8 100644 --- a/contrib/pg_xlogdump/CMakeLists.txt +++ b/contrib/pg_xlogdump/CMakeLists.txt @@ -35,7 +35,7 @@ SET(TGT_xlogdump_INC SET(xlogdump_DEF_OPTIONS ${MACRO_OPTIONS} -DFRONTEND) SET(xlogdump_COMPILE_OPTIONS ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${CHECK_OPTIONS} ${BIN_SECURE_OPTIONS} ${OPTIMIZE_OPTIONS}) SET(xlogdump_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -SET(xlogdump_LINK_LIBS libpgcommon.a -lpgport -lcrypt -ldl -lm -ledit -lssl -lcrypto -lsecurec -lrt -lz -lminiunz) +SET(xlogdump_LINK_LIBS libpgcommon.a -lpgport -lcrypt -ldl -lm -ledit -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz) add_bintarget(pg_xlogdump TGT_xlogdump_SRC TGT_xlogdump_INC "${xlogdump_DEF_OPTIONS}" "${xlogdump_COMPILE_OPTIONS}" "${xlogdump_LINK_OPTIONS}" "${xlogdump_LINK_LIBS}") add_dependencies(pg_xlogdump pgport_static pgcommon_static) target_link_directories(pg_xlogdump PUBLIC diff --git a/contrib/pg_xlogdump/pg_xlogdump.cpp b/contrib/pg_xlogdump/pg_xlogdump.cpp index ba44fc414..afc5e9086 100644 --- a/contrib/pg_xlogdump/pg_xlogdump.cpp +++ b/contrib/pg_xlogdump/pg_xlogdump.cpp @@ -78,7 +78,7 @@ typedef struct XLogDumpStats { static void XLogDumpTablePage(XLogReaderState* record, int block_id, RelFileNode rnode, BlockNumber blk); static void XLogDumpXLogRead(const char* directory, TimeLineID timeline_id, XLogRecPtr startptr, char* buf, Size count); static int XLogDumpReadPage(XLogReaderState* state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetPtr, - char* readBuff, TimeLineID* curFileTLI); + char* readBuff, TimeLineID* curFileTLI, char* xlog_path = NULL); static void XLogDumpCountRecord(XLogDumpConfig* config, XLogDumpStats* stats, XLogReaderState* record); static void XLogDumpDisplayRecord(XLogDumpConfig* config, XLogReaderState* record); static void XLogDumpStatsRow(const char* name, uint64 n, uint64 total_count, uint64 rec_len, uint64 total_rec_len, @@ -356,7 +356,7 @@ static void XLogDumpXLogRead(const char* directory, TimeLineID timeline_id, XLog XLByteToSeg(recptr, sendSegNo); - XLogFileName(fname, timeline_id, sendSegNo); + XLogFileName(fname, MAXFNAMELEN, timeline_id, sendSegNo); sendFile = fuzzy_open_file(directory, fname); @@ -371,7 +371,7 @@ static void XLogDumpXLogRead(const char* directory, TimeLineID timeline_id, XLog int err = errno; char fname[MAXPGPATH]; - XLogFileName(fname, timeline_id, sendSegNo); + XLogFileName(fname, MAXFNAMELEN, timeline_id, sendSegNo); fatal_error("could not seek in log segment %s to offset %u: %s", fname, startoff, strerror(err)); } @@ -389,7 +389,7 @@ static void XLogDumpXLogRead(const char* directory, TimeLineID timeline_id, XLog int err = errno; char fname[MAXPGPATH]; - XLogFileName(fname, timeline_id, sendSegNo); + XLogFileName(fname, MAXFNAMELEN, timeline_id, sendSegNo); fatal_error("could not read from log segment %s, offset %d, length %d: %s", fname, @@ -411,7 +411,7 @@ static void XLogDumpXLogRead(const char* directory, TimeLineID timeline_id, XLog * XLogReader read_page callback */ static int XLogDumpReadPage(XLogReaderState* state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetPtr, - char* readBuff, TimeLineID* curFileTLI) + char* readBuff, TimeLineID* curFileTLI, char* xlog_path) { XLogDumpPrivate* dumpprivate = (XLogDumpPrivate*)state->private_data; int count = XLOG_BLCKSZ; @@ -1088,7 +1088,7 @@ begin_read: for (;;) { /* try to read the next record */ - record = XLogReadRecord(xlogreader_state, first_record, &errormsg, false); + record = XLogReadRecord(xlogreader_state, first_record, &errormsg); if (!record) { if (!config.follow || dumpprivate.endptr_reached) break; diff --git a/contrib/pg_xlogdump/rmgrdesc.cpp b/contrib/pg_xlogdump/rmgrdesc.cpp index eeb19b508..48bd73a24 100644 --- a/contrib/pg_xlogdump/rmgrdesc.cpp +++ b/contrib/pg_xlogdump/rmgrdesc.cpp @@ -13,7 +13,6 @@ #include "access/gin.h" #include "access/gist_private.h" #include "access/hash.h" -#include "access/hash_xlog.h" #include "access/heapam.h" #include "access/multixact.h" #include "access/nbtree.h" @@ -43,7 +42,8 @@ #include "access/ustore/knl_uredo.h" -#define PG_RMGR(symname, name, redo, desc, startup, cleanup, safe_restartpoint, undo, undo_desc) {name, desc}, +#define PG_RMGR(symname, name, redo, desc, startup, cleanup, safe_restartpoint, undo, undo_desc, type_name) \ + {name, desc}, const RmgrDescData RmgrDescTable[RM_MAX_ID + 1] = { #include "access/rmgrlist.h" diff --git a/contrib/pgstattuple/pgstattuple.cpp b/contrib/pgstattuple/pgstattuple.cpp index e6c40a5c6..a15989faf 100644 --- a/contrib/pgstattuple/pgstattuple.cpp +++ b/contrib/pgstattuple/pgstattuple.cpp @@ -366,6 +366,7 @@ static void pgstat_hash_page(pgstattuple_type* stat, Relation rel, BlockNumber b Page page; OffsetNumber maxoff; + _hash_getlock(rel, blkno, HASH_SHARE); buf = _hash_getbuf_with_strategy(rel, blkno, HASH_READ, 0, bstrategy); page = BufferGetPage(buf); @@ -392,6 +393,7 @@ static void pgstat_hash_page(pgstattuple_type* stat, Relation rel, BlockNumber b } _hash_relbuf(rel, buf); + _hash_droplock(rel, blkno, HASH_SHARE); } /* diff --git a/contrib/postgres_fdw/connection.cpp b/contrib/postgres_fdw/connection.cpp index f46a70ff7..6cce384c4 100644 --- a/contrib/postgres_fdw/connection.cpp +++ b/contrib/postgres_fdw/connection.cpp @@ -158,8 +158,8 @@ PGconn *GetConnection(ForeignServer *server, UserMapping *user, bool will_prep_s RegisterXactCallback(pgfdw_xact_callback, NULL); RegisterSubXactCallback(pgfdw_subxact_callback, NULL); - CacheRegisterSyscacheCallback(FOREIGNSERVEROID, pgfdw_inval_callback, (Datum)0); - CacheRegisterSyscacheCallback(USERMAPPINGOID, pgfdw_inval_callback, (Datum)0); + CacheRegisterSessionSyscacheCallback(FOREIGNSERVEROID, pgfdw_inval_callback, (Datum)0); + CacheRegisterSessionSyscacheCallback(USERMAPPINGOID, pgfdw_inval_callback, (Datum)0); if (IS_THREAD_POOL_SESSION) { u_sess->ext_fdw_ctx[POSTGRES_TYPE_FDW].fdwExitFunc = pg_fdw_exit; diff --git a/contrib/postgres_fdw/deparse.cpp b/contrib/postgres_fdw/deparse.cpp index 33dada5fd..d4cc79a29 100644 --- a/contrib/postgres_fdw/deparse.cpp +++ b/contrib/postgres_fdw/deparse.cpp @@ -46,6 +46,7 @@ #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "commands/defrem.h" +#include "libpq/pqexpbuffer.h" #include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" #include "optimizer/var.h" @@ -1150,6 +1151,7 @@ static void deparseRelation(StringInfo buf, Relation rel) const char *nspname = NULL; const char *relname = NULL; ListCell *lc; + char parttype = PARTTYPE_NON_PARTITIONED_RELATION; /* obtain additional catalog information. */ ForeignTable* table = GetForeignTable(RelationGetRelid(rel)); @@ -1178,6 +1180,33 @@ static void deparseRelation(StringInfo buf, Relation rel) relname = RelationGetRelationName(rel); } + /* foreign table could not be built from a partitioned table */ + UserMapping* user = GetUserMapping(rel->rd_rel->relowner, table->serverid); + ForeignServer *server = GetForeignServer(table->serverid); + PGconn* conn = GetConnection(server, user, false); + + PQExpBuffer query = createPQExpBuffer(); + appendPQExpBuffer(query, + "SELECT c.parttype FROM pg_class c, pg_namespace n " + "WHERE c.relname = '%s' and c.relnamespace = n.oid and n.nspname = '%s'", + quote_identifier(relname), quote_identifier(nspname)); + + PGresult* res = pgfdw_exec_query(conn, query->data); + if (PQresultStatus(res) != PGRES_TUPLES_OK) { + pgfdw_report_error(ERROR, res, conn, true, query->data); + } + /* res may be empty as the relname/nspname validation is not checked */ + if (PQntuples(res) > 0) { + parttype = *PQgetvalue(res, 0, 0); + } + PQclear(res); + destroyPQExpBuffer(query); + + if (!ENABLE_SQL_BETA_FEATURE(PARTITION_FDW_ON) && + (parttype == PARTTYPE_PARTITIONED_RELATION || parttype == PARTTYPE_SUBPARTITIONED_RELATION)) { + ereport(ERROR, (errmsg("could not operate foreign table on partitioned table"))); + } + appendStringInfo(buf, "%s.%s", quote_identifier(nspname), quote_identifier(relname)); } diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql index 6c7d9fb50..8350aeb45 100755 --- a/contrib/postgres_fdw/sql/postgres_fdw.sql +++ b/contrib/postgres_fdw/sql/postgres_fdw.sql @@ -37,17 +37,17 @@ CREATE TABLE "S 1"."T 2" ( INSERT INTO "S 1"."T 1" SELECT id, id % 10, - to_char(id, 'FM00000'), + pg_catalog.to_char(id, 'FM00000'), '1970-01-01'::timestamptz + ((id % 100) || ' days')::interval, '1970-01-01'::timestamp + ((id % 100) || ' days')::interval, id % 10, id % 10, 'foo'::user_enum - FROM generate_series(1, 1000) id; + FROM pg_catalog.generate_series(1, 1000) id; INSERT INTO "S 1"."T 2" SELECT id, - 'AAA' || to_char(id, 'FM000') - FROM generate_series(1, 100) id; + 'AAA' || pg_catalog.to_char(id, 'FM000') + FROM pg_catalog.generate_series(1, 100) id; ANALYZE "S 1"."T 1"; ANALYZE "S 1"."T 2"; @@ -188,7 +188,7 @@ SELECT 'fixed', NULL FROM ft1 t1 WHERE c1 = 1; -- user-defined operator/function CREATE FUNCTION postgres_fdw_abs(int) RETURNS int AS $$ BEGIN -RETURN abs($1); +RETURN pg_catalog.abs($1); END $$ LANGUAGE plpgsql IMMUTABLE; CREATE OPERATOR === ( @@ -200,7 +200,7 @@ CREATE OPERATOR === ( ); EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = postgres_fdw_abs(t1.c2); EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2; -EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = abs(t1.c2); +EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = pg_catalog.abs(t1.c2); EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = t1.c2; -- =================================================================== @@ -210,7 +210,7 @@ EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 1; -- EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 100 AND t1.c2 = 0; -- BoolExpr EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 IS NULL; -- NullTest EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 IS NOT NULL; -- NullTest -EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE round(abs(c1), 0) = 1; -- FuncExpr +EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE pg_catalog.round(pg_catalog.abs(c1), 0) = 1; -- FuncExpr EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = -c1; -- OpExpr(l) EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE 1 = c1!; -- OpExpr(r) EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE (c1 IS NOT NULL) IS DISTINCT FROM (c1 IS NOT NULL); -- DistinctExpr @@ -225,9 +225,9 @@ SELECT * FROM ft2 a, ft2 b WHERE a.c1 = 47 AND b.c1 = a.c2; -- check both safe and unsafe join conditions EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft2 a, ft2 b - WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7); + WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = pg_catalog.upper(a.c7); SELECT * FROM ft2 a, ft2 b -WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7); +WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = pg_catalog.upper(a.c7); -- bug before 9.3.5 due to sloppy handling of remote-estimate parameters SELECT * FROM ft1 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft2 WHERE c1 < 5)); SELECT * FROM ft2 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft1 WHERE c1 < 5)); @@ -267,12 +267,12 @@ EXPLAIN (VERBOSE, COSTS false) EXECUTE st1(1, 2); EXECUTE st1(1, 1); EXECUTE st1(101, 101); -- subquery using stable function (can't be sent to remote) -PREPARE st2(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND date(c4) = '1970-01-17'::date) ORDER BY c1; +PREPARE st2(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND pg_catalog.date(c4) = '1970-01-17'::date) ORDER BY c1; EXPLAIN (VERBOSE, COSTS false) EXECUTE st2(10, 20); EXECUTE st2(10, 20); EXECUTE st2(101, 121); -- subquery using immutable function (can be sent to remote) -PREPARE st3(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND date(c5) = '1970-01-17'::date) ORDER BY c1; +PREPARE st3(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND pg_catalog.date(c5) = '1970-01-17'::date) ORDER BY c1; EXPLAIN (VERBOSE, COSTS false) EXECUTE st3(10, 20); EXECUTE st3(10, 20); EXECUTE st3(20, 30); @@ -448,34 +448,34 @@ INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive -- Test savepoint/rollback behavior -select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1; -select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1; +select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1; +select c2, pg_catalog.count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1; begin; update ft2 set c2 = 42 where c2 = 0; -select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1; +select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1; savepoint s1; update ft2 set c2 = 44 where c2 = 4; -select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1; +select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1; release savepoint s1; -select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1; +select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1; savepoint s2; update ft2 set c2 = 46 where c2 = 6; -select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1; +select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1; rollback to savepoint s2; -select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1; +select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1; release savepoint s2; -select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1; +select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1; savepoint s3; update ft2 set c2 = -2 where c2 = 42 and c1 = 10; -- fail on remote side rollback to savepoint s3; -select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1; +select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1; release savepoint s3; -select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1; +select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1; -- none of the above is committed yet remotely -select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1; +select c2, pg_catalog.count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1; commit; -select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1; -select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1; +select c2, pg_catalog.count(*) from ft2 where c2 < 500 group by 1 order by 1; +select c2, pg_catalog.count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1; -- =================================================================== -- test serial columns (ie, sequence-based defaults) @@ -530,14 +530,14 @@ begin tg_name, argstr, TG_when, TG_level, TG_OP, relid; oldnew := '{}'::text[]; if TG_OP != 'INSERT' then - oldnew := array_append(oldnew, format('OLD: %s', OLD)); + oldnew := pg_catalog.array_append(oldnew, pg_catalog.format('OLD: %s', OLD)); end if; if TG_OP != 'DELETE' then - oldnew := array_append(oldnew, format('NEW: %s', NEW)); + oldnew := pg_catalog.array_append(oldnew, pg_catalog.format('NEW: %s', NEW)); end if; - RAISE NOTICE '%', array_to_string(oldnew, ','); + RAISE NOTICE '%', pg_catalog.array_to_string(oldnew, ','); if TG_OP = 'DELETE' then return OLD; diff --git a/contrib/security_plugin/access_audit.cpp b/contrib/security_plugin/access_audit.cpp index be6cdce7e..c35911a01 100644 --- a/contrib/security_plugin/access_audit.cpp +++ b/contrib/security_plugin/access_audit.cpp @@ -288,8 +288,8 @@ void audit_open_relation(List *list, Var *col_att, PolicyLabelItem *full_column, } } -static void audit_open_view(RuleLock *rules, Var *col_att, PolicyLabelItem* full_column, - PolicyLabelItem *view_full_column) +static void audit_cursor_view(RuleLock *rules, Var *col_att, PolicyLabelItem *full_column, + PolicyLabelItem *view_full_column) { if (col_att == NULL) return; @@ -364,7 +364,7 @@ void get_fqdn_by_relid(RangeTblEntry *rte, PolicyLabelItem *full_column, Var *co /* schema */ full_column->m_schema = tbl_rel->rd_rel->relnamespace; if (tbl_rel->rd_rules) { /* view */ - audit_open_view(tbl_rel->rd_rules, col_att, full_column, view_full_column); + audit_cursor_view(tbl_rel->rd_rules, col_att, full_column, view_full_column); if (view_full_column) { view_full_column->m_schema = tbl_rel->rd_rel->relnamespace; view_full_column->set_object(rte->relid, O_VIEW); @@ -499,7 +499,7 @@ void access_audit_policy_run(const List* rtable, CmdType cmd_type) /* table object */ RangeTblEntry *rte = (RangeTblEntry *)lfirst(lc); policy_result pol_result; - if (rte == NULL || rte->relname == NULL || rte->rtekind == RTE_REMOTE_DUMMY) { + if (rte == NULL || rte->rtekind == RTE_REMOTE_DUMMY) { continue; } @@ -507,7 +507,8 @@ void access_audit_policy_run(const List* rtable, CmdType cmd_type) int recursion_deep = 0; handle_subquery(rte, rte->subquery->commandType, &pol_result, &checked_tables, &policy_ids, &security_policy_ids, &recursion_deep); - } else if (checked_tables.insert(rte->relname).second) { /* verify if table object already checked */ + } else if (rte->relname != NULL && + checked_tables.insert(rte->relname).second) { /* verify if table object already checked */ /* use query plan commandtype here but not get it from rte directly */ if (!handle_table_entry(rte, cmd_type, &policy_ids, &security_policy_ids, &pol_result)) { continue; diff --git a/contrib/security_plugin/gs_mask_policy.cpp b/contrib/security_plugin/gs_mask_policy.cpp index 9ca0d90ee..94658ecb4 100644 --- a/contrib/security_plugin/gs_mask_policy.cpp +++ b/contrib/security_plugin/gs_mask_policy.cpp @@ -196,7 +196,7 @@ static bool is_valid_for_masking(const char* func_name, Oid funcnsp, int& funcid bool is_valid = true; /* try to find function on pg_proc */ for (int i = 0; i < catlist->n_members && is_valid; ++i) { - HeapTuple proctup = &catlist->members[i]->tuple; + HeapTuple proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_proc procform = (Form_pg_proc) GETSTRUCT(proctup); /* verify namespace */ if (procform->pronamespace != funcnsp) { diff --git a/contrib/security_plugin/gs_policy_labels.cpp b/contrib/security_plugin/gs_policy_labels.cpp index 22f2410f1..22bf0e6f2 100644 --- a/contrib/security_plugin/gs_policy_labels.cpp +++ b/contrib/security_plugin/gs_policy_labels.cpp @@ -36,6 +36,8 @@ #include "utils/snapmgr.h" #include "utils/syscache.h" #include "utils/lsyscache.h" +#include "gs_mask_policy.h" +#include "gs_policy_plugin.h" static THR_LOCAL loaded_labels *all_labels = NULL; @@ -192,13 +194,13 @@ bool check_label_has_object(const PolicyLabelItem *object, return false; } Assert(CheckLabelBoundPolicy != NULL); - loaded_labels *all_labels = get_policy_labels(); - if (all_labels == NULL) { + loaded_labels *cur_all_labels = get_policy_labels(); + if (cur_all_labels == NULL) { return false; } - loaded_labels::const_iterator it = all_labels->begin(); - loaded_labels::const_iterator eit = all_labels->end(); + loaded_labels::const_iterator it = cur_all_labels->begin(); + loaded_labels::const_iterator eit = cur_all_labels->end(); for (; it != eit; ++it) { /* for each item of loaded existing labels, and match labels */ if (labels != NULL && labels->find(*(it->first)) == labels->end()) { @@ -238,4 +240,41 @@ void clear_thread_local_label() delete all_labels; all_labels = NULL; } +} + +void verify_drop_column(AlterTableStmt *stmt) +{ + ListCell *lcmd = NULL; + foreach (lcmd, stmt->cmds) { + AlterTableCmd *cmd = (AlterTableCmd *)lfirst(lcmd); + switch (cmd->subtype) { + case AT_DropColumn: { + /* check by column */ + PolicyLabelItem find_obj(stmt->relation->schemaname, stmt->relation->relname, cmd->name, O_COLUMN); + if (check_label_has_object(&find_obj, is_masking_has_object)) { + char buff[512] = {0}; + int rc = snprintf_s(buff, sizeof(buff), sizeof(buff) - 1, + "Column: %s is part of some resource label, can not be renamed.", find_obj.m_column); + securec_check_ss(rc, "\0", "\0"); + gs_audit_issue_syslog_message("PGAUDIT", buff, AUDIT_POLICY_EVENT, AUDIT_FAILED); + ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\"", buff))); + } + break; + } + case AT_AlterColumnType: { + PolicyLabelItem find_obj(stmt->relation->schemaname, stmt->relation->relname, cmd->name, O_COLUMN); + if (check_label_has_object(&find_obj, is_masking_has_object, true)) { + char buff[512] = {0}; + int ret = snprintf_s(buff, sizeof(buff), sizeof(buff) - 1, + "Column: %s is part of some masking policy, can not be changed.", find_obj.m_column); + securec_check_ss(ret, "\0", "\0"); + gs_audit_issue_syslog_message("PGAUDIT", buff, AUDIT_POLICY_EVENT, AUDIT_FAILED); + ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\"", buff))); + } + break; + } + default: + break; + } + } } \ No newline at end of file diff --git a/contrib/security_plugin/gs_policy_labels.h b/contrib/security_plugin/gs_policy_labels.h index d25c8094c..473197a28 100644 --- a/contrib/security_plugin/gs_policy_labels.h +++ b/contrib/security_plugin/gs_policy_labels.h @@ -56,4 +56,5 @@ bool update_label_value(const gs_stl::gs_string object_name, void reset_policy_labels(); void clear_thread_local_label(); +void verify_drop_column(AlterTableStmt *stmt); #endif /* GS_POLICY_GS_POLICY_LABELS_H_ */ diff --git a/contrib/security_plugin/gs_policy_object_types.cpp b/contrib/security_plugin/gs_policy_object_types.cpp index a4ff66a58..8f74074d8 100644 --- a/contrib/security_plugin/gs_policy_object_types.cpp +++ b/contrib/security_plugin/gs_policy_object_types.cpp @@ -544,6 +544,19 @@ typedef struct ObjectTypeInfo const char* object_name; } ObjectTypeInfo; +typedef struct CmdCursorInfo { + CmdType cmd_type; + const char *object_name; +} CmdCursorInfo; + +static CmdCursorInfo cmd_cursorinfo[] = { + {CMD_SELECT, "FOR SELECT FROM"}, + {CMD_INSERT, "FOR INSERT TO"}, + {CMD_UPDATE, "FOR UPDATE FROM"}, + {CMD_DELETE, "FOR DELETE FROM"}, + {CMD_UNKNOWN, NULL} +}; + static OperInfo oper_infos[] = { {"create", T_CREATE}, {"alter", T_ALTER}, @@ -560,7 +573,7 @@ static OperInfo oper_infos[] = { {"login_success", T_LOGIN_SUCCESS}, {"login_failure", T_LOGIN_FAILURE}, {"copy", T_COPY}, - {"open", T_OPEN}, + {"cursor", T_CURSOR}, {"fetch", T_FETCH}, {"close", T_CLOSE}, {"all", T_ALL}, @@ -621,6 +634,20 @@ static ObjectTypeInfo object_type_infos[] = {O_UNKNOWN, NULL} }; +/* + * get_cursorinfo + * return cursor operation object + */ +const char *get_cursorinfo(CmdType type) +{ + for (int i = 0; cmd_cursorinfo[i].object_name != NULL; ++i) { + if (cmd_cursorinfo[i].cmd_type == type) { + return cmd_cursorinfo[i].object_name; + } + } + return "UNKNOWN"; +} + /* * get_privilege_type * return privilege type in enum PrivType by its name @@ -934,4 +961,17 @@ int get_objtype(int object_type) break; } return objtype; +} + +CmdType get_rte_commandtype(RangeTblEntry *rte) +{ + if (rte->selectedCols) { + return CMD_SELECT; + } else if (rte->insertedCols) { + return CMD_INSERT; + } else if (rte->updatedCols) { + return CMD_UPDATE; + } else { + return CMD_UNKNOWN; + } } \ No newline at end of file diff --git a/contrib/security_plugin/gs_policy_object_types.h b/contrib/security_plugin/gs_policy_object_types.h index f7e00fedd..9286dd9b2 100644 --- a/contrib/security_plugin/gs_policy_object_types.h +++ b/contrib/security_plugin/gs_policy_object_types.h @@ -66,7 +66,7 @@ enum PrivType { T_LOGIN_SUCCESS, T_LOGIN_FAILURE, T_COPY, - T_OPEN, + T_CURSOR, T_FETCH, T_CLOSE, T_ALL @@ -275,5 +275,7 @@ bool name_list_to_label(PolicyLabelItem *item, List *names, char *name = NULL, s void gen_policy_labelitem(PolicyLabelItem &item, const ListCell *rel, int objtype); void gen_policy_label_for_commentstmt(PolicyLabelItem &item, const CommentStmt *commentstmt); int get_objtype(int object_type); +CmdType get_rte_commandtype(RangeTblEntry *rte); +const char *get_cursorinfo(CmdType type); #endif /* GS_POLICY_OBJECT_TYPES_H_ */ diff --git a/contrib/security_plugin/gs_policy_plugin.cpp b/contrib/security_plugin/gs_policy_plugin.cpp index 138f37bb3..5f739f5b0 100644 --- a/contrib/security_plugin/gs_policy_plugin.cpp +++ b/contrib/security_plugin/gs_policy_plugin.cpp @@ -137,10 +137,7 @@ static THR_LOCAL char original_query[256]; static THR_LOCAL MngEventsVector *mng_events = NULL; using StrMap = gs_stl::gs_map; -static THR_LOCAL StrMap* masked_prepared_stmts = NULL; -static THR_LOCAL StrMap* masked_cursor_stmts = NULL; -static void process_masking(ParseState *pstate, Query *query, const policy_set *policy_ids, bool audit_exist); static void gsaudit_next_PostParseAnalyze_hook(ParseState *pstate, Query *query); static void destroy_local_parameter(); static void destory_thread_variables() @@ -285,10 +282,7 @@ static void destroy_local_parameter() mng_events = NULL; } - if (masked_cursor_stmts != NULL) { - delete masked_cursor_stmts; - masked_cursor_stmts = NULL; - } + free_masked_cursor_stmts(); } /* @@ -514,77 +508,6 @@ bool verify_copy_command_is_reparsed(List* parsetree_list, const char* query_str return false; } -static void free_masked_prepared_stmts() -{ - if (masked_prepared_stmts) { - delete masked_prepared_stmts; - masked_prepared_stmts = NULL; - } -} - -template< class T> -static inline void flush_stmt_masking_result(const char* name, T* stmts) -{ - if (stmts) { - StrMap::const_iterator it = stmts->find(name); - if (it != stmts->end()) { - flush_masking_result(it->second); - } - } -} - -static void flush_cursor_stmt_masking_result(const char* name) -{ - flush_stmt_masking_result(name, masked_cursor_stmts); -} - -static void flush_prepare_stmt_masking_result(const char* name) -{ - flush_stmt_masking_result(name, masked_prepared_stmts); -} - -static void close_cursor_stmt_as_masked(const char* name) -{ - if (masked_cursor_stmts == NULL) { - return; - } - - masked_cursor_stmts->erase(name); - if (masked_cursor_stmts->empty() || (strcasecmp(name, "all") == 0)) { - delete masked_cursor_stmts; - masked_cursor_stmts = NULL; - } -} - -static void unprepare_stmt_as_masked(const char* name) -{ - unprepare_stmt(name); - if (!masked_prepared_stmts) { - return; - } - masked_prepared_stmts->erase(name); - if (masked_prepared_stmts->empty() || !strcasecmp(name, "all")) { - delete masked_prepared_stmts; - masked_prepared_stmts = NULL; - } -} - -static inline void set_prepare_stmt_as_masked(const char* name, const masking_result *result) -{ - if (!masked_prepared_stmts) { - masked_prepared_stmts = new StrMap; - } - (*masked_prepared_stmts)[name] = (*result); -} - -static inline void set_cursor_stmt_as_masked(const char* name, const masking_result *result) -{ - if (!masked_cursor_stmts) { - masked_cursor_stmts = new StrMap; - } - (*masked_cursor_stmts)[name] = (*result); -} - void set_result_set_function(const PolicyLabelItem &func) { if (result_set_functions == NULL) { @@ -595,129 +518,6 @@ void set_result_set_function(const PolicyLabelItem &func) } } -/* - * Do masking for given target list - * this function will parse each RTE of the list - * and then will check wether each node need to do mask. - */ -static bool handle_masking(List* targetList, ParseState *pstate, - const policy_set *policy_ids, List* rtable, Node* utilityNode) -{ - if (targetList == NIL || policy_ids->empty()) { - return false; - } - ListCell* temp = NULL; - masking_result masking_result; - foreach(temp, targetList) { - TargetEntry *old_tle = (TargetEntry *) lfirst(temp); - /* Shuffle masking columns can only select directly with out other operations */ - parser_target_entry(pstate, old_tle, policy_ids, &masking_result, rtable, true); - } - if (masking_result.size() > 0) { - if (strlen(t_thrd.security_policy_cxt.prepare_stmt_name) > 0) { - /* prepare statement was masked */ - set_prepare_stmt_as_masked(t_thrd.security_policy_cxt.prepare_stmt_name, - &masking_result); /* save masking event for executing case */ - } else if (utilityNode != NULL) { - switch (nodeTag(utilityNode)) { - case T_DeclareCursorStmt: - { - DeclareCursorStmt* stmt = (DeclareCursorStmt *)utilityNode; - /* save masking event for fetching case */ - set_cursor_stmt_as_masked(stmt->portalname, &masking_result); - } - break; - default: - flush_masking_result(&masking_result); /* invoke masking event */ - } - } else { - flush_masking_result(&masking_result); /* invoke masking event */ - } - return true; - } - return false; -} - -static void select_PostParseAnalyze(ParseState *pstate, Query *&query, const policy_set *policy_ids, bool audit_exist) -{ - Assert(query != NULL); - List *targetList = NIL; - targetList = (query->targetList != NIL) ? query->targetList : pstate->p_target_list; - handle_masking(targetList, pstate, policy_ids, query->rtable, query->utilityStmt); - - /* deal with function type label */ - load_function_label(query, audit_exist); -} - -static bool process_union_masking(Node *union_node, - ParseState *pstate, const Query *query, const policy_set *policy_ids, bool audit_exist) -{ - if (union_node == NULL) { - return false; - } - switch (nodeTag(union_node)) { - /* For each union, we get its query recursively for masking until it doesn't have any union query */ - case T_SetOperationStmt: - { - SetOperationStmt *stmt = (SetOperationStmt *)union_node; - if (stmt->op != SETOP_UNION) { - return false; - } - process_union_masking((Node *)(stmt->larg), pstate, query, policy_ids, audit_exist); - process_union_masking((Node *)(stmt->rarg), pstate, query, policy_ids, audit_exist); - } - break; - case T_RangeTblRef: - { - RangeTblRef *ref = (RangeTblRef *)union_node; - if (ref->rtindex <= 0 || ref->rtindex > list_length(query->rtable)) { - return false; - } - Query* mostQuery = rt_fetch(ref->rtindex, query->rtable)->subquery; - process_masking(pstate, mostQuery, policy_ids, audit_exist); - } - break; - default: - break; - } - return true; -} - -/* - * Main entrance for masking - * Identify components in query tree that need to do masking. - * This function will find all parts which need masking of select query, - * mainly includes CTE / setOperation / normal select columns. - */ -static void process_masking(ParseState *pstate, Query *query, const policy_set *policy_ids, bool audit_exist) -{ - if (query == NULL) { - return; - } - - /* set-operation tree UNION query */ - if (!process_union_masking(query->setOperations, pstate, query, policy_ids, audit_exist)) { - ListCell *lc = NULL; - /* For each Cte, we get its query recursively for masking, and then handle this query in normal way */ - if (query->cteList != NIL) { - foreach(lc, query->cteList) { - CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc); - Query *cte_query = (Query *)cte->ctequery; - process_masking(pstate, cte_query, policy_ids, audit_exist); - } - } - /* find subquery and process each subquery node */ - if (query->rtable != NULL) { - foreach(lc, query->rtable) { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); - Query *subquery = (Query *)rte->subquery; - process_masking(pstate, subquery, policy_ids, audit_exist); - } - } - select_PostParseAnalyze(pstate, query, policy_ids, audit_exist); - } -} - /* * check exchange partition list contains masked table. * For given AlterTableCmd list, check whether ordinary @@ -960,44 +760,6 @@ static void verify_drop_user(const char *rolename) } } -static void verify_drop_column(AlterTableStmt *stmt) -{ - ListCell *lcmd = NULL; - foreach (lcmd, stmt->cmds) { - AlterTableCmd *cmd = (AlterTableCmd *)lfirst(lcmd); - switch (cmd->subtype) { - case AT_DropColumn: { - /* check by column */ - PolicyLabelItem find_obj(stmt->relation->schemaname, stmt->relation->relname, cmd->name, O_COLUMN); - if (check_label_has_object(&find_obj, is_masking_has_object)) { - char buff[512] = {0}; - int rc = snprintf_s(buff, sizeof(buff), sizeof(buff) - 1, - "Column: %s is part of some resource label, can not be renamed.", find_obj.m_column); - securec_check_ss(rc, "\0", "\0"); - gs_audit_issue_syslog_message("PGAUDIT", buff, AUDIT_POLICY_EVENT, AUDIT_FAILED); - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\"", buff))); - } - break; - } - case AT_AlterColumnType: { - PolicyLabelItem find_obj(stmt->relation->schemaname, stmt->relation->relname, cmd->name, O_COLUMN); - if (check_label_has_object(&find_obj, is_masking_has_object, true)) - { - char buff[512] = {0}; - int ret = snprintf_s(buff, sizeof(buff), sizeof(buff) - 1, - "Column: %s is part of some masking policy, can not be changed.", find_obj.m_column); - securec_check_ss(ret, "\0", "\0"); - gs_audit_issue_syslog_message("PGAUDIT", buff, AUDIT_POLICY_EVENT, AUDIT_FAILED); - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\"", buff))); - } - break; - } - default: - break; - } - } -} - /* * Hook ProcessUtility to do session auditing for DDL and utility commands. */ @@ -1077,13 +839,13 @@ static void gsaudit_ProcessUtility_hook(Node *parsetree, const char *queryString if (parsetree != NULL) { switch (nodeTag(parsetree)) { case T_PlannedStmt: { - if (!check_audited_privilige(T_OPEN) && !SECURITY_CHECK_ACL_PRIV(T_OPEN)) { + if (!check_audited_privilige(T_CURSOR) && !SECURITY_CHECK_ACL_PRIV(T_CURSOR)) { break; } char buff[POLICY_STR_BUFF_LEN] = {0}; PlannedStmt *stmt = (PlannedStmt *)parsetree; get_open_cursor_info(stmt, buff, sizeof(buff)); - internal_audit_str(&security_policy_ids, &audit_policy_ids, buff, T_OPEN, "OPEN", O_CURSOR); + internal_audit_str(&security_policy_ids, &audit_policy_ids, buff, T_CURSOR, "OPEN", O_CURSOR); break; } case T_FetchStmt: { @@ -1097,12 +859,12 @@ static void gsaudit_ProcessUtility_hook(Node *parsetree, const char *queryString gs_stl::gs_vector cursor_objects; if (portal && portal->queryDesc && portal->queryDesc->plannedstmt && portal->queryDesc->plannedstmt->rtable) { - get_cursor_tables(portal->queryDesc->plannedstmt->rtable, buff, sizeof(buff), - printed_size, &cursor_objects); + get_cursor_tables(portal->queryDesc->plannedstmt->rtable, buff, sizeof(buff), printed_size, + &cursor_objects); } for (const PolicyLabelItem item : cursor_objects) { - internal_audit_object_str(&security_policy_ids, &audit_policy_ids, &item, T_FETCH, - "FETCH", stmt->portalname); + internal_audit_object_str(&security_policy_ids, &audit_policy_ids, &item, T_FETCH, "FETCH", + stmt->portalname); } flush_cursor_stmt_masking_result(stmt->portalname); /* invoke masking event in this case */ } @@ -1186,14 +948,8 @@ static void gsaudit_ProcessUtility_hook(Node *parsetree, const char *queryString case T_DeallocateStmt: { DeallocateStmt *stmt = (DeallocateStmt *)parsetree; char tmp[POLICY_TMP_BUFF_LEN] = {0}; - int rc; - if (stmt->name == NULL) { - rc = snprintf_s(tmp, sizeof(tmp), sizeof(tmp) - 1, "ALL"); - securec_check_ss(rc, "\0", "\0"); - } else { - rc = snprintf_s(tmp, sizeof(tmp), sizeof(tmp) - 1, "%s", stmt->name); - securec_check_ss(rc, "\0", "\0"); - } + int rc = snprintf_s(tmp, sizeof(tmp), sizeof(tmp) - 1, "%s", stmt->name == NULL ? "ALL" : stmt->name); + securec_check_ss(rc, "\0", "\0"); check_access_table(&audit_policy_ids, tmp, CMD_DEALLOCATE, O_UNKNOWN, tmp); unprepare_stmt_as_masked(tmp); break; @@ -1325,7 +1081,7 @@ static void gsaudit_ProcessUtility_hook(Node *parsetree, const char *queryString names_pair(granted_name, rte1->rolname ? rte1->rolname : "ALL" /* grantee_name */), stmt->is_grant ? T_GRANT : T_REVOKE, stmt->is_grant ? "GRANT" : "REVOKE", - stmt->objtype); + stmt->objtype, stmt->targtype); } } } @@ -1348,7 +1104,29 @@ static void gsaudit_ProcessUtility_hook(Node *parsetree, const char *queryString internal_audit_object_str(&security_policy_ids, &audit_policy_ids, NULL, names_pair(rte2->rolname /* granted_name */, rte1->rolname /* grantee_name */), grantrolestmt->is_grant ? T_GRANT : T_REVOKE, grantrolestmt->is_grant ? "GRANT" : "REVOKE", - O_ROLE, true, true); + O_ROLE, ACL_TARGET_OBJECT, true, true); + } + } + break; + } + case T_GrantDbStmt: { + if (!check_audited_privilige(T_GRANT) && !check_audited_privilige(T_REVOKE) && + !SECURITY_CHECK_ACL_PRIV(T_GRANT) && !SECURITY_CHECK_ACL_PRIV(T_REVOKE)) { + break; + } + GrantDbStmt *grantdbstmt = (GrantDbStmt *)(parsetree); + ListCell *lc1 = NULL; + ListCell *lc2 = NULL; + if (grantdbstmt && grantdbstmt->grantees && grantdbstmt->privileges) { + forboth(lc1, grantdbstmt->grantees, lc2, grantdbstmt->privileges) + { + PrivGrantee *rte1 = (PrivGrantee *)lfirst(lc1); + DbPriv *rte2 = (DbPriv*)lfirst(lc2); + + internal_audit_object_str(&security_policy_ids, &audit_policy_ids, NULL, + names_pair(rte2->db_priv_name, rte1->rolname /* grantee_name */), + grantdbstmt->is_grant ? T_GRANT : T_REVOKE, grantdbstmt->is_grant ? "GRANT" : "REVOKE", + O_UNKNOWN, ACL_TARGET_OBJECT, true, false); } } break; @@ -1718,11 +1496,9 @@ static void gsaudit_ProcessUtility_hook(Node *parsetree, const char *queryString break; } CreateTableAsStmt *createtablestmt = (CreateTableAsStmt *)(parsetree); - if (createtablestmt != NULL) { - IntoClause *intoclause = createtablestmt->into; - if (intoclause != NULL) - audit_table(&security_policy_ids, &audit_policy_ids, intoclause->rel, T_CREATE, - "CREATE", O_TABLE); + if (createtablestmt != NULL && createtablestmt->into != NULL) { + audit_table(&security_policy_ids, &audit_policy_ids, createtablestmt->into->rel, T_CREATE, "CREATE", + O_TABLE); } break; } @@ -1913,19 +1689,6 @@ static const char *ACL_get_object_name(int targetype, int objtype, ListCell *obj return NULL; } -CmdType get_rte_commandtype(RangeTblEntry *rte) -{ - if (rte->selectedCols) { - return CMD_SELECT; - } else if (rte->insertedCols) { - return CMD_INSERT; - } else if (rte->updatedCols) { - return CMD_UPDATE; - } else { - return CMD_UNKNOWN; - } -} - static void gs_audit_executor_start_hook(QueryDesc *queryDesc, int eflags) { /* verify parameter and audit policy */ @@ -2090,17 +1853,9 @@ void _PG_init(void) /* * Uninstall hooks and release local memory context * NOTE: Now the uninstall hooks process is disabled referring funciton internal_unload_library - * we just put the release function pointers here to adapt the uninstall process in the feature. + * we just put the release function here to adapt the uninstall process in the feature. */ void _PG_fini(void) { - user_login_hook = NULL; - ExecutorStart_hook = next_ExecutorStart_hook; - ProcessUtility_hook = next_ProcessUtility_hook; - post_parse_analyze_hook = next_post_parse_analyze_hook; - copy_need_to_be_reparse = NULL; - light_unified_audit_executor_hook = NULL; - opfusion_unified_audit_executor_hook = NULL; - opfusion_unified_audit_flush_logs_hook = NULL; ereport(LOG, (errmsg("Gsaudit extension finished"))); } diff --git a/contrib/security_plugin/gs_policy_plugin.h b/contrib/security_plugin/gs_policy_plugin.h index b11a21083..de227dc82 100644 --- a/contrib/security_plugin/gs_policy_plugin.h +++ b/contrib/security_plugin/gs_policy_plugin.h @@ -48,7 +48,6 @@ const char* GetUserName(char* user_name, size_t user_name_size); bool get_ipaddress(gs_stl::gs_string& ipaddress); extern void set_result_set_function(const PolicyLabelItem &func); void get_name_range_var(const RangeVar *rangevar, gs_stl::gs_string *buffer, bool enforce = true); -CmdType get_rte_commandtype(RangeTblEntry *rte); extern void load_database_policy_info(); bool is_audit_policy_exist_load_policy_info(); diff --git a/contrib/security_plugin/masking.cpp b/contrib/security_plugin/masking.cpp index bb0ad23b0..d91813fc3 100644 --- a/contrib/security_plugin/masking.cpp +++ b/contrib/security_plugin/masking.cpp @@ -1,1215 +1,1418 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * --------------------------------------------------------------------------------------- - * - * masking.cpp - * - * IDENTIFICATION - * src/contrib/security_plugin/masking.cpp - * - * --------------------------------------------------------------------------------------- - */ - -#include -#include "postgres.h" -#include "nodes/nodes.h" -#include "nodes/nodeFuncs.h" -#include "nodes/params.h" -#include "nodes/primnodes.h" -#include "parser/parse_clause.h" -#include "parser/parse_target.h" -#include "parser/parse_expr.h" -#include "parser/parse_oper.h" -#include "parser/parse_func.h" -#include "parser/parse_utilcmd.h" -#include "parser/parse_relation.h" -#include "utils/numeric.h" -#include "catalog/pg_proc.h" -#include "utils/acl.h" -#include "utils/syscache.h" -#include "utils/lsyscache.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "parser/parse_type.h" -#include "nodes/makefuncs.h" -#include "parser/parse_coerce.h" -#include "commands/tablespace.h" -#include "masking.h" -#include "access_audit.h" -#include "gs_threadlocal.h" -#include "gs_policy_plugin.h" -#include "gs_policy/gs_vector.h" -#include "gs_policy/gs_policy_masking.h" - -#define BUFFSIZE 256 - -#ifdef ENABLE_UT -#define static -#endif - -void reset_node_location() -{ - t_thrd.security_policy_cxt.node_location = 0; -} - -const int DATA_TYPE_LENGTH = 2; /* data type length */ -static Node* create_udf_function(ParseState *pstate, Var* var, Oid funcid, masking_result *result, long long polid, - const char* full_column, const func_params *f_params); -static void parse_func(Node *expr); -static void get_var_value(const List *rtable, Var *var, PolicyLabelItem *full_column, PolicyLabelItem *view_name); - -static void get_fqdn_by_joinalias(const List *rtable, const RangeTblEntry *rte, const Var *var, - PolicyLabelItem *full_column, PolicyLabelItem *view_name) -{ - ListCell *l = NULL; - int joinpos = 1; - /* Scan all joinaliasvars of JOIN RTE. - * varattno marks its orders in joinaliasvars, - * and we can use it to get the real var of join statement - */ - foreach (l, rte->joinaliasvars) { - if (joinpos != (int)var->varattno) { - ++joinpos; - continue; - } - /* Get real join var information */ - Var *joinvar = (Var *)lfirst(l); - get_var_value(rtable, joinvar, full_column, view_name); - break; - } -} - -static void parse_value(const List *rtable, RangeTblEntry *rte, Var *var, - PolicyLabelItem *full_column, PolicyLabelItem *view_name) -{ - switch (rte->rtekind) { - case RTE_RELATION: /* relation ID of current table */ - if (OidIsValid(rte->relid)) { - get_fqdn_by_relid(rte, full_column, var, view_name); - } - break; - case RTE_JOIN: /* RTE is JOIN kind */ - get_fqdn_by_joinalias(rtable, rte, var, full_column, view_name); - break; - case RTE_FUNCTION: /* relation is function */ - parse_func(rte->funcexpr); - break; - default: - /* Here is no need to parse subquery or CTE, because subquery or CTE will mask itself. */ - break; - } -} - -static void get_var_value(const List *rtable, Var *var, PolicyLabelItem *full_column, PolicyLabelItem *view_name) -{ - ListCell *l = NULL; - int pos = 1; - /* In TargetList, each TargetEntry's varno points to rtable. - * So, the RangeTableEntry corresponding to each varno needs to be got first. - * Additionally, parse full column name for every (var-rte) pair. - */ - foreach (l, rtable) { - if (pos != (int)var->varno) { - ++pos; - continue; - } - - RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); - parse_value(rtable, rte, var, full_column, view_name); - break; - } -} - -static bool mask_expr_node(ParseState *pstate, Expr*& expr, - const policy_set *policy_ids, masking_result *result, List* rtable, bool can_mask = true); -bool handle_masking_node(ParseState *pstate, Expr*& src_expr, - const policy_set *policy_ids, masking_result *result, List *rtable, bool can_mask = true); - -static bool mask_func(ParseState *pstate, Expr*& expr, - const policy_set *policy_ids, masking_result *result, List* rtable, bool can_mask = true) -{ - if (expr == NULL) { - return false; - } - bool is_masking = false; - if (nodeTag(expr) == T_FuncExpr) { - FuncExpr *fe = (FuncExpr *)(expr); - PolicyLabelItem func_value; - if (get_function_name(fe->funcid, &func_value)) { - set_result_set_function(func_value); - } - if (fe->args != NULL) { - ListCell* temp = NULL; - foreach(temp, fe->args) { - Node *&item = (Node*&)lfirst(temp); - bool expr_masked = mask_expr_node(pstate, (Expr*&)item, policy_ids, result, rtable, can_mask); - is_masking = is_masking || expr_masked; - } - } - } - return is_masking; -} - -static void parse_func(Node* expr) -{ - switch (nodeTag(expr)) { - case T_FuncExpr: - { - FuncExpr *fe = (FuncExpr *)expr; - { - PolicyLabelItem func_value; - if (get_function_name(fe->funcid, &func_value)) { - set_result_set_function(func_value); - } - } - if (fe->args != NIL) { - ListCell* temp = NULL; - foreach(temp, fe->args) { - Node *item = (Node*)lfirst(temp); - if (IsA(item, FuncExpr)) { - parse_func(item); - } - } - } - } - break; - default: - break; - } -} - -static bool get_function_id(int vartype, const char* funcname, Oid *funcid, Oid *rettype, - Oid schemaid = SchemaNameGetSchemaOid("pg_catalog", true)) -{ - CatCList *catlist = NULL; -#ifndef ENABLE_MULTIPLE_NODES - if (t_thrd.proc->workingVersionNum < 92470) { - catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(funcname)); - } else { - catlist = SearchSysCacheList1(PROCALLARGS, CStringGetDatum(funcname)); - } -#else - catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(funcname)); -#endif - if (catlist != NULL) { - for (int i = 0; i < catlist->n_members; ++i) { - HeapTuple proctup = &catlist->members[i]->tuple; - Form_pg_proc procform = (Form_pg_proc) GETSTRUCT(proctup); - if (procform && (int)procform->prorettype == vartype && procform->pronamespace == schemaid) { - (*funcid) = HeapTupleGetOid(proctup); - (*rettype) = procform->prorettype; - break; - } - } - ReleaseSysCacheList(catlist); - return true; - } - return false; -} - -static Node* create_predefined_function(const char* funcname, int funcid, int rettype, Node* arg, int funccollid, - CoercionForm funcformat = COERCE_EXPLICIT_CALL) -{ - FuncExpr *funcexpr = makeNode(FuncExpr); - funcexpr->funcid = funcid; - funcexpr->funcresulttype = rettype; - funcexpr->funcretset = false; - funcexpr->funcvariadic = false; - funcexpr->funcformat = funcformat; - - if (IsA(arg, Var)) { - Var* var = (Var*)arg; - var->location += t_thrd.security_policy_cxt.node_location; - funcexpr->location = var->location; - var->location += strlen(funcname) + 1; - t_thrd.security_policy_cxt.node_location += (strlen(funcname) + 2); - } else if(IsA(arg, Const)) { - Const* const_arg = (Const*)arg; - const_arg->location += t_thrd.security_policy_cxt.node_location; - funcexpr->location = const_arg->location; - const_arg->location += strlen(funcname) + 1; - t_thrd.security_policy_cxt.node_location += (strlen(funcname) + 2); - } else if(IsA(arg, FuncExpr)) { - FuncExpr* fe = (FuncExpr*)arg; - fe->location += t_thrd.security_policy_cxt.node_location; - funcexpr->location = fe->location; - fe->location += strlen(funcname) + 1; - t_thrd.security_policy_cxt.node_location += (strlen(funcname) + 2); - } - - funcexpr->args = lappend(funcexpr->args, arg); - funcexpr->funccollid = funccollid; - funcexpr->inputcollid = 100; /* OID of collation that function should use */ - - return (Node*)funcexpr; -} - -static Node* create_relabel_type(Node* func, int resulttype, int location, - CoercionForm relabelformat = COERCE_EXPLICIT_CAST) -{ - if (func == NULL) { - return NULL; - } - RelabelType *typeexpr = makeNode(RelabelType); - if (!typeexpr)return NULL; - typeexpr->arg = (Expr*)func; - typeexpr->resulttype = resulttype; - typeexpr->resulttypmod = -1; - typeexpr->resultcollid = 100; /* OID of collation */ - typeexpr->relabelformat = relabelformat; - typeexpr->location = location; - return (Node*)typeexpr; -} - -static inline Node* create_string_node(ParseState *pstate, const char* letter, int location, int col_type = TEXTOID) -{ - Node* const_node = (Node*)make_const(pstate, makeString((char*)letter), location); - const_node = coerce_type(pstate, const_node, ((Const*)const_node)->consttype, col_type, -1, - COERCION_IMPLICIT, COERCE_IMPLICIT_CAST, -1); - return const_node; -} - - -Node* create_integer_node(ParseState *pstate, int value, int location, int col_type, bool make_cast) -{ - Node* const_node = (Node*)make_const(pstate, makeInteger(value), location); - if (make_cast) { - const_node = coerce_type(pstate, const_node, ((Const*)const_node)->consttype, col_type, -1, - COERCION_IMPLICIT, COERCE_IMPLICIT_CAST, -1); - } - return const_node; -} - -static inline Node* create_float_node(ParseState *pstate, const char* value, int location, int col_type = FLOAT4OID) -{ - Node* const_node = (Node*)make_const(pstate, makeFloat((char*)value), location); - const_node = coerce_type(pstate, const_node, ((Const*)const_node)->consttype, col_type, -1, - COERCION_IMPLICIT, COERCE_IMPLICIT_CAST, -1); - return const_node; -} - -static inline Node* create_empty_node(ParseState *pstate, int location, int col_type = INT4OID, bool make_cast = true) -{ - Const* const_node = (Const*)create_integer_node(pstate, 0, location, col_type, make_cast); - if (const_node) { - const_node->constisnull = false; - const_node->constbyval = true; - } - return (Node*)const_node; -} - -static Node *create_repeat_function(ParseState *pstate, const char *letter, Node *arg) -{ - Var *var = (Var*)arg; - Oid funcid = 0; - Oid rettype = 0; - if (get_function_id(TEXTOID, "repeat", &funcid, &rettype)) { - Node *const_node = create_string_node(pstate, letter, var->location); - Node *repeat_func = create_predefined_function("repeat", funcid, rettype, const_node, 100); - FuncExpr *funcexpr = (FuncExpr*)repeat_func; - if (get_function_id(INT4OID, "length", &funcid, &rettype)) { - Node *length_func = create_predefined_function("length", funcid, rettype, arg, 0); - funcexpr->args = lappend(funcexpr->args, (Node*)length_func); - } - return repeat_func; - } - return arg; -} - -static Node *regexp_function(ParseState *pstate, Var *var, masking_result *result, long long polid, - const char *full_column, const func_params *f_params) -{ - bool cast = false; - Node *regexp_func_node = NULL; - switch (var->vartype) { - case BPCHAROID: - case VARCHAROID: - case NVARCHAR2OID: - cast = true; - case TEXTOID: - { - Oid funcid = 0; - Oid rettype = TEXTOID; - get_function_id(TEXTOID, "regexpmasking", &funcid, &rettype); - if (funcid > 0) { - regexp_func_node = create_udf_function(pstate, var, funcid, result, polid, full_column, f_params); - if (cast) { - Expr *cast_fun = (Expr*)create_relabel_type(regexp_func_node, var->vartype, var->location); - if (cast_fun != NULL) { /* success */ - regexp_func_node = (Node*)cast_fun; - } - } - (*result)[polid][M_REGEXP].insert(full_column); - } - break; - } - default: - break; - } - return regexp_func_node; -} - -static Node *shuffle_function(ParseState *pstate, Var *var, masking_result *result, long long polid, - const char *full_column) -{ - bool make_cast = false; - Node *shuffle_func_node = NULL; - switch (var->vartype) { - case BPCHAROID: - case VARCHAROID: - case NVARCHAR2OID: - make_cast = true; - case TEXTOID: - { - Oid funcid = 0; - Oid rettype = TEXTOID; - get_function_id(TEXTOID, "shufflemasking", &funcid, &rettype); - if (funcid > 0) { - shuffle_func_node = create_predefined_function("shufflemasking", funcid, TEXTOID, (Node*)var, 100); - if (make_cast) { - Expr *cast_fun = (Expr*)create_relabel_type(shuffle_func_node, var->vartype, var->location); - if (cast_fun != NULL) { /* success */ - shuffle_func_node = (Node*)cast_fun; - } - } - (*result)[polid][M_SHUFFLE].insert(full_column); - } - break; - } - default: - break; - } - return shuffle_func_node; -} - -static Node *random_function(ParseState *pstate, Var *var, masking_result *result, long long polid, - const char *full_column) -{ - bool cast = false; - Node *random_func_node = NULL; - switch (var->vartype) { - case BPCHAROID: - case VARCHAROID: - case NVARCHAR2OID: - cast = true; - case TEXTOID: - { - Oid funcid = 0; - Oid rettype = TEXTOID; - get_function_id(TEXTOID, "randommasking", &funcid, &rettype); - if (funcid > 0) { - random_func_node = create_predefined_function("randommasking", funcid, TEXTOID, (Node*)var, 100); - if (cast) { - Expr *cast_fun = (Expr*)create_relabel_type(random_func_node, var->vartype, var->location); - if (cast_fun != NULL) { /* success */ - random_func_node = (Node*)cast_fun; - } - } - (*result)[polid][M_RANDOM].insert(full_column); - } - break; - } - default: - break; - } - return random_func_node; -} - -static Node *all_digits_function(ParseState *pstate, Var *var, - masking_result *result, long long polid, const char *full_column, - const gs_stl::gs_vector *func_params) -{ - bool make_cast_f = false; - Node *digits_func_node = NULL; - switch (var->vartype) { - case BPCHAROID: - case VARCHAROID: - case NVARCHAR2OID: - make_cast_f = true; - case TEXTOID: - { - Oid funcid = 0; - Oid rettype = TEXTOID; - get_function_id(TEXTOID, "alldigitsmasking", &funcid, &rettype); - if (funcid > 0) { - digits_func_node = create_predefined_function("alldigitsmasking", funcid, TEXTOID, (Node*)var, 100); - if (func_params->size()) { - Node *constnode = create_string_node(pstate, func_params->begin()->c_str() + DATA_TYPE_LENGTH, - var->location); - FuncExpr *funcexpr = (FuncExpr*)digits_func_node; - funcexpr->args = lappend(funcexpr->args, constnode); - } - if (make_cast_f) { - Expr *castfunc = (Expr*)create_relabel_type(digits_func_node, var->vartype, var->location); - if (castfunc != NULL) { /* success */ - digits_func_node = (Node*)castfunc; - } - } - (*result)[polid][M_ALLDIGITS].insert(full_column); - } - break; - } - default: - break; - } - return digits_func_node; -} - -static Node *email_function(ParseState *pstate, int masking_behavious, - Var *var, masking_result *result, long long polid, const char *full_column, - const gs_stl::gs_vector *func_params) -{ - bool makecast = false; - Node *email_func_node = NULL; - switch (var->vartype) { - case BPCHAROID: - case VARCHAROID: - case NVARCHAR2OID: - makecast = true; - case TEXTOID: - { - Oid funcid = 0; - Oid rettype = TEXTOID; - const char *funcname = (masking_behavious == M_BASICEMAIL) ? "basicemailmasking" : "fullemailmasking"; - get_function_id(TEXTOID, funcname, &funcid, &rettype); - if (funcid > 0) { - email_func_node = create_predefined_function(funcname, funcid, TEXTOID, (Node*)var, 100); - if (func_params->size()) { - Node *const_node = create_string_node(pstate, func_params->begin()->c_str() + DATA_TYPE_LENGTH, - var->location); - FuncExpr *funcexpr = (FuncExpr*)email_func_node; - funcexpr->args = lappend(funcexpr->args, const_node); - } - if (makecast) { - Expr *cast_fun = (Expr*)create_relabel_type(email_func_node, var->vartype, var->location); - if (cast_fun != NULL) { /* success */ - email_func_node = (Node*)cast_fun; - } - } - (*result)[polid][masking_behavious].insert(full_column); - } - break; - } - default: - break; - } - return email_func_node; -} - -static Node *maskall_function(ParseState *pstate, - int masking_behavious, Var *var, masking_result *result, long long polid, const char *full_column, - const gs_stl::gs_vector *func_params) -{ - bool make_cast = false; - char time_str[BUFFSIZE] = {0}; - int printed_size = 0; - Node *maskall_func_node = NULL; - switch (var->vartype) { - case BOOLOID: - { - Node *const_int_node = create_integer_node(pstate, 0, var->location, var->vartype); - if (const_int_node != NULL) { /* success */ - maskall_func_node = const_int_node; - } - } - break; - case RELTIMEOID: - printed_size = snprintf_s(time_str, sizeof(time_str), sizeof(time_str) - 1, "1970"); - securec_check_ss(printed_size, "\0", "\0"); - case TIMEOID: - case TIMETZOID: - case INTERVALOID: - if (!printed_size) { - printed_size = snprintf_s(time_str, sizeof(time_str), sizeof(time_str) - 1, "00:00:00.0000+00"); - securec_check_ss(printed_size, "\0", "\0"); - } - case TIMESTAMPOID: - case TIMESTAMPTZOID: - case SMALLDATETIMEOID: - case ABSTIMEOID: - { - if (!printed_size) { - printed_size = snprintf_s(time_str, sizeof(time_str), sizeof(time_str) - 1, "1970-01-01 00:00:00.0000"); - securec_check_ss(printed_size, "\0", "\0"); - } - Node *const_node = create_string_node(pstate, time_str, var->location, var->vartype); - if (const_node != NULL) { /* success */ - maskall_func_node = const_node; - } - } - break; - case BPCHAROID: - case VARCHAROID: - case NVARCHAR2OID: - case NAMEOID: - make_cast = true; - case TEXTOID: - { - const char *replace_str = "x"; - if (func_params && func_params->size() > 0) { - replace_str = func_params->begin()->c_str(); - } - maskall_func_node = create_repeat_function(pstate, replace_str, (Node*)var); - if (maskall_func_node != NULL && make_cast) { - Expr *cast_func = (Expr*)create_relabel_type(maskall_func_node, var->vartype, var->location); - if (cast_func != NULL) { /* success */ - maskall_func_node = (Node*)cast_func; - } - } - } - break; - case INT8OID: - case INT4OID: - case INT2OID: - case INT1OID: - case NUMERICOID: - case FLOAT4OID: /* real */ - case FLOAT8OID: - { - Node *const_int_node = create_integer_node(pstate, 0, var->location, - var->vartype, (var->vartype != CASHOID)); - if (const_int_node != NULL) { /* success */ - maskall_func_node = const_int_node; - } - } - break; - default: /* wrong column type */ - { - ereport(WARNING, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("Unsupported type of column %s will not be masked by policy %lld.", full_column, polid))); - } - break; - } - (*result)[polid][M_MASKALL].insert(full_column); - return maskall_func_node; -} - -/* get funcname from pg_proc */ -static const char* get_udf_function_name(long long funcid, Oid& rettype, func_types* types) -{ - if (!funcid) { - return ""; - } - HeapTuple tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); - if (!HeapTupleIsValid(tuple)) { - return ""; - } - Form_pg_proc func_rel = (Form_pg_proc) GETSTRUCT(tuple); - const char* procname = func_rel->proname.data; - rettype = func_rel->prorettype; - get_function_parameters(tuple, types); - ReleaseSysCache(tuple); - return procname; -} - -static Node* convert_text_to_numeric(Oid resulttype, Node* data) -{ - int funccollid = 100; /* OID of collation of result */ - switch (resulttype) { - case INT1OID: - return create_predefined_function("text_int1", F_TEXT_INT1, resulttype, data, funccollid); - case INT2OID: - return create_predefined_function("text_int2", F_TEXT_INT2, resulttype, data, funccollid); - case INT4OID: - return create_predefined_function("text_int4", F_TEXT_INT4, resulttype, data, funccollid); - case INT8OID: - return create_predefined_function("text_int8", F_TEXT_INT8, resulttype, data, funccollid); - case NUMERICOID: - return create_predefined_function("text_numeric", F_TEXT_INT8, resulttype, data, funccollid); - case FLOAT4OID: - return create_predefined_function("text_float4", F_TEXT_FLOAT4, resulttype, data, funccollid); - case FLOAT8OID: - return create_predefined_function("text_float8", F_TEXT_FLOAT8, resulttype, data, funccollid); - case RELTIMEOID: - return create_predefined_function("text_date", F_TEXT_DATE, resulttype, data, funccollid); - case TIMEOID: - case TIMETZOID: - case INTERVALOID: - case TIMESTAMPOID: - case SMALLDATETIMEOID: - case ABSTIMEOID: - return create_predefined_function("text_timestamp", F_TEXT_TIMESTAMP, resulttype, data, funccollid); - default: - break; - } - return NULL; -} - -static Node* convert_numeric_to_text(Oid resulttype, Node* data) -{ - int funccollid = 100; /* OID of collation of result */ - switch (resulttype) { - case INT1OID: - return create_predefined_function("int1_text", F_INT1_TEXT, TEXTOID, data, funccollid); - case INT2OID: - return create_predefined_function("int2_text", F_INT2_TEXT, TEXTOID, data, funccollid); - case INT4OID: - return create_predefined_function("int4_text", F_INT4_TEXT, TEXTOID, data, funccollid); - case INT8OID: - return create_predefined_function("int8_text", F_INT8_TEXT, TEXTOID, data, funccollid); - case NUMERICOID: - return create_predefined_function("numeric_text", F_NUMERIC_TEXT, TEXTOID, data, funccollid); - case FLOAT4OID: - return create_predefined_function("float4_text", F_FLOAT4_TEXT, TEXTOID, data, funccollid); - case FLOAT8OID: - return create_predefined_function("float8_text", F_FLOAT8_TEXT, TEXTOID, data, funccollid); - case RELTIMEOID: - return create_predefined_function("date_text", F_DATE_TEXT, TEXTOID, data, funccollid); - case TIMEOID: - case TIMETZOID: - case INTERVALOID: - case TIMESTAMPOID: - case SMALLDATETIMEOID: - case ABSTIMEOID: - return create_predefined_function("timestamp_text", F_TIMESTAMP_TEXT, TEXTOID, data, funccollid); - case TIMESTAMPTZOID: - return create_predefined_function("timestampsone_text", F_TIMESTAMPZONE_TEXT, TEXTOID, data, funccollid); - default: - break; - } - return NULL; -} - -static Node* check_and_fix_col_node(Oid prog_col_type, Var* var) -{ - switch (prog_col_type) { /* function input type */ - case TEXTOID: - case BPCHAROID: - case VARCHAROID: - case NVARCHAR2OID: - { - switch (var->vartype) { /* col type */ - case TEXTOID: - case BPCHAROID: - case VARCHAROID: - case NVARCHAR2OID: - return (Node*)var; - break; - default: - break; - } - } - break; - case INT1OID: - case INT2OID: - case INT4OID: - case INT8OID: - case NUMERICOID: - case FLOAT4OID: // real - case FLOAT8OID: - { - if (var->vartype == prog_col_type) { - return (Node*)var; - } - } - break; - default: - break; - } - return NULL; /* function input type not support or mismatching, use maskall */ -} - -static Node* verify_function_return_type(Oid func_rettype, Var* var, FuncExpr* funcexpr) -{ - switch (func_rettype) { /* function return type */ - case TEXTOID: - case BPCHAROID: - case VARCHAROID: - case NVARCHAR2OID: - { - switch (var->vartype) { /* col type */ - case INT1OID: - case INT2OID: - case INT4OID: - case INT8OID: - case NUMERICOID: - case FLOAT4OID: - case FLOAT8OID: - case TIMEOID: - case TIMETZOID: - case INTERVALOID: - case TIMESTAMPOID: - case SMALLDATETIMEOID: - case ABSTIMEOID: - case RELTIMEOID: - return convert_text_to_numeric(var->vartype, (Node*)funcexpr); - break; - case TEXTOID: - case BPCHAROID: - case VARCHAROID: - case NVARCHAR2OID: - { - Expr *cast_fun = (Expr*)create_relabel_type((Node*)funcexpr, var->vartype, var->location); - return (Node*)cast_fun; - } - break; - default: - break; - } - } - break; - case INT1OID: - case INT2OID: - case INT4OID: - case INT8OID: - case NUMERICOID: - case FLOAT4OID: - case FLOAT8OID: - case TIMEOID: - case TIMETZOID: - case INTERVALOID: - case TIMESTAMPOID: - case SMALLDATETIMEOID: - case ABSTIMEOID: - case RELTIMEOID: - { - switch (var->vartype) { - case TEXTOID: - case BPCHAROID: - case VARCHAROID: - case NVARCHAR2OID: - return convert_numeric_to_text(var->vartype, (Node*)funcexpr); - break; - default: - break; - } - } - break; - default: - return NULL; /* function return type not support, use maskall */ - } - return (Node*)funcexpr; -} - -static Node* create_udf_function(ParseState *pstate, Var* var, Oid funcid, masking_result *result, long long polid, - const char* full_column, const func_params *f_params) -{ - Node* ret_node = NULL; - AclResult aclresult; - Oid rescollid = 100; /* OID of collation, or InvalidOid if none */ - PG_TRY(); - { - if (funcid <= 0) { - return NULL; - } - Oid rettype = var->vartype; - aclresult = pg_proc_aclcheck(funcid, GetUserId(), ACL_EXECUTE); - func_types proctypes; - const char* funcname = get_udf_function_name(funcid, rettype, &proctypes); - if (aclresult == ACLCHECK_OK && strlen(funcname) != 0) { - /* verify function input parameters */ - if (verify_proc_params(f_params, &proctypes)) { - Oid prog_col_type = proctypes[0]; - /* check if mismatching between function input and col type */ - Node* newc = check_and_fix_col_node(prog_col_type, var); - if (newc) { - FuncExpr* funcexpr = (FuncExpr*)create_predefined_function(funcname, funcid, rettype, - newc, rescollid); - func_params::const_iterator it = f_params->begin(), eit = f_params->end(); - for (; it != eit; ++it) { - Node * const_node = NULL; - if (!strncasecmp(it->c_str(), "s:", DATA_TYPE_LENGTH)) { - const_node = create_string_node(pstate, it->c_str() + DATA_TYPE_LENGTH, var->location); - } else if (!strncasecmp(it->c_str(), "i:", DATA_TYPE_LENGTH)) - const_node = create_integer_node(pstate, atoi(it->c_str() + DATA_TYPE_LENGTH), - var->location); - else if (!strncasecmp(it->c_str(), "f:", DATA_TYPE_LENGTH)) - const_node = create_float_node(pstate, it->c_str() + DATA_TYPE_LENGTH, var->location); - if (!const_node) - break; - funcexpr->args = lappend(funcexpr->args, const_node); - } - (*result)[polid][M_MASKALL].insert(full_column); - /* verify function output parameters */ - ret_node = verify_function_return_type(rettype, var, funcexpr); - } - } - } - } - PG_CATCH(); - { - PG_RE_THROW(); - } - PG_END_TRY(); - (*result)[polid][M_MASKALL].insert(full_column); - return ret_node; -} - -static Node *credit_card_function(ParseState *pstate, - int masking_behavious, Var *var, masking_result *result, long long polid, const char *full_column, - const gs_stl::gs_vector *func_params) -{ - bool make_cast = false; - Node *credit_func_node = NULL; - switch (var->vartype) { - case BPCHAROID: - case VARCHAROID: - case NVARCHAR2OID: - make_cast = true; - case TEXTOID: - { - Oid funcid = 0; - Oid rettype = 25; - get_function_id(TEXTOID, "creditcardmasking", &funcid, &rettype); - if (funcid > 0) { - credit_func_node = create_predefined_function("creditcardmasking", funcid, TEXTOID, (Node*)var, 100); - if (func_params->size()) { - Node *const_node = create_string_node(pstate, func_params->begin()->c_str() + DATA_TYPE_LENGTH, - var->location); - FuncExpr *fexpr = (FuncExpr*)credit_func_node; - fexpr->args = lappend(fexpr->args, const_node); - } - if (make_cast) { - Expr* castfun = (Expr*)create_relabel_type(credit_func_node, var->vartype, var->location); - if (castfun != NULL) { /* success */ - credit_func_node = (Node*)castfun; - } - } - (*result)[polid][M_CREDIT_CARD].insert(full_column); - } - break; - } - default: - break; - } - return credit_func_node; -} - -static Node *mask_node_by_behavious(bool *is_masking, int masking_behavious, ParseState *pstate, Var* var, - masking_result *result, long long polid, const char* full_column, - const gs_stl::gs_vector *func_params) -{ - Node *masked_node = NULL; - switch (masking_behavious) { - case M_CREDIT_CARD: - { - (*is_masking) = true; - if ((masked_node = credit_card_function(pstate, masking_behavious, var, result, - polid, full_column, func_params)) == NULL) { - masked_node = maskall_function(pstate, masking_behavious, var, - result, polid, full_column, func_params); - } - } - break; - case M_BASICEMAIL: - case M_FULLEMAIL: - { - (*is_masking) = true; - if ((masked_node = email_function(pstate, masking_behavious, var, result, - polid, full_column, func_params)) == NULL) { - masked_node = maskall_function(pstate, masking_behavious, var, result, - polid, full_column, func_params); - } - } - break; - case M_ALLDIGITS: - { - (*is_masking) = true; - if ((masked_node = all_digits_function(pstate, var, result, polid, - full_column, func_params)) == NULL) { - masked_node = maskall_function(pstate, masking_behavious, var, result, - polid, full_column, func_params); - } - } - break; - case M_SHUFFLE: - { - (*is_masking) = true; - if ((masked_node = shuffle_function(pstate, var, result, polid, full_column)) == NULL) { - masked_node = maskall_function(pstate, masking_behavious, var, - result, polid, full_column, func_params); - } - } - break; - case M_RANDOM: - { - (*is_masking) = true; - if ((masked_node = random_function(pstate, var, result, polid, full_column)) == NULL) { - masked_node = maskall_function(pstate, masking_behavious, var, result, - polid, full_column, func_params); - } - } - break; - case M_REGEXP: - { - (*is_masking) = true; - if ((masked_node = regexp_function(pstate, var, result, polid, full_column, func_params)) == NULL) { - masked_node = maskall_function(pstate, masking_behavious, var, result, - polid, full_column, NULL); - } - } - break; - case M_UNKNOWN: - case M_MASKALL: - { - (*is_masking) = true; - masked_node = maskall_function(pstate, masking_behavious, var, result, - polid, full_column, func_params); - } - break; - default: - { - (*is_masking) = true; - if ((masked_node = create_udf_function(pstate, var, masking_behavious, result, - polid, full_column, func_params)) == NULL) { - masked_node = maskall_function(pstate, M_UNKNOWN, var, result, polid, full_column, NULL); - } - } - break; - } - if (masked_node == NULL) { - (*is_masking) = false; - } - return masked_node; -} - -bool handle_masking_node(ParseState *pstate, Expr*& src_expr, - const policy_set *policy_ids, masking_result *result, List* rtable, bool can_mask) -{ - if (src_expr == NULL || policy_ids->empty()) { - return false; - } - Var* var = (Var*)(src_expr); - - PolicyLabelItem full_column(0, 0, O_COLUMN), view_name; - get_var_value(rtable, var, &full_column, &view_name); /* fqdn column name */ - /* Varattno 'zero' references the whole tuple. */ - if (full_column.m_obj_type == O_COLUMN && var->varattno == 0 && - OidIsValid(full_column.m_object) && is_masked_relation_enabled(full_column.m_object)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Un-support operation for whole tuple contains masked column in table \"%s\".", - get_rel_name(full_column.m_object)))); - } - if (full_column.empty() && rtable != NIL) { /* sub query */ - bool is_found = false; - audit_open_relation(rtable, var, &full_column, &is_found); - } - bool is_masking = false; - int masking_behavious = 0; - long long polid = 0; - - gs_stl::gs_vector func_params; - if (check_masking_policy_action(policy_ids, &full_column, &view_name, &masking_behavious, &polid, &func_params)) { - gs_stl::gs_string log_column; - full_column.get_fqdn_value(&log_column); - - /* When mask a column which not allowed to be masked, we will report an error */ - if (!can_mask) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Un-support operation for masking column."))); - } - - Node* masked_node = NULL; - masked_node = mask_node_by_behavious(&is_masking, masking_behavious, pstate, var, - result, polid, log_column.c_str(), &func_params); - - if (is_masking) { - ereport(DEBUG2, (errmodule(MOD_PARSER), - errmsg("Column %s will be masked by masking behavious %d", log_column.c_str(), masking_behavious))); - } - - if (masked_node != NULL) { - src_expr = (Expr*)masked_node; - } - } - return is_masking; -} - -static bool mask_sublink(ParseState *pstate, Expr*& expr, - const policy_set *policy_ids, masking_result *result, List* rtable, bool can_mask) -{ - if (expr == NULL) { - return false; - } - SubLink *sublink = (SubLink *)expr; - Query *query = (Query *) sublink->subselect; - ListCell* temp = NULL; - bool is_masking = false; - foreach (temp, query->targetList) { - TargetEntry *old_tle = (TargetEntry *)lfirst(temp); - is_masking = parser_target_entry(pstate, old_tle, policy_ids, result, query->rtable, can_mask) || is_masking; - } - return is_masking; -} - -static bool mask_list_parameters(List **params, ParseState *pstate, bool *is_masking, const policy_set *policy_ids, - masking_result *result, List* rtable, bool can_mask = true) -{ - List* masked_list = NIL; - ListCell *lc = NULL; - foreach(lc, (*params)) { - Node *item = (Node *) lfirst(lc); - switch (nodeTag(item)) { - case T_SubLink: - { - bool expr_masked = mask_sublink(pstate, (Expr*&)item, policy_ids, result, rtable, can_mask); - *is_masking = expr_masked || *is_masking; - } - break; - case T_Aggref: - { - Aggref* agg = (Aggref *) item; - if (agg && agg->args != NIL && list_length(agg->args) > 0) { - mask_list_parameters(&(agg->args), pstate, is_masking, policy_ids, result, rtable, can_mask); - } - } - break; - case T_OpExpr: - { - OpExpr* opexpr = (OpExpr*)item; - if (opexpr && opexpr->args != NIL && list_length(opexpr->args) > 0) { - mask_list_parameters(&(opexpr->args), pstate, is_masking, policy_ids, result, rtable, can_mask); - } - } - break; - case T_Var: - case T_RelabelType: - case T_FuncExpr: - case T_CaseExpr: - case T_CaseWhen: - { - bool expr_masked = mask_expr_node(pstate, (Expr*&)item, policy_ids, result, rtable, can_mask); - *is_masking = expr_masked || *is_masking; - } - break; - case T_TargetEntry: - { - TargetEntry*& target_entry = (TargetEntry*&)item; - bool expr_masked = parser_target_entry(pstate, target_entry, policy_ids, result, rtable, can_mask); - *is_masking = expr_masked || *is_masking; - } - break; - default: - break; - } - masked_list = lappend(masked_list, item); - } - if (*is_masking) - (*params) = masked_list; - return *is_masking; -} - -static bool mask_expr_node(ParseState *pstate, Expr*& expr, - const policy_set *policy_ids, masking_result *result, List* rtable, bool can_mask) -{ - bool is_masking = false; - if (expr == NULL) { - return false; - } - switch (nodeTag(expr)) { - case T_SubLink: - is_masking = mask_sublink(pstate, expr, policy_ids, result, rtable, can_mask); - break; - case T_FuncExpr: - is_masking = mask_func(pstate, expr, policy_ids, result, rtable, can_mask); - break; - case T_Var: - is_masking = handle_masking_node(pstate, expr, policy_ids, result, rtable, can_mask); - break; - case T_RelabelType: { - RelabelType *relabel = (RelabelType *) expr; - is_masking = mask_expr_node(pstate, (Expr *&)relabel->arg, policy_ids, result, rtable, can_mask); - break; - } - case T_CoerceViaIO: { - CoerceViaIO *coerce = (CoerceViaIO *) expr; - is_masking = mask_expr_node(pstate, (Expr *&)coerce->arg, policy_ids, result, rtable, false); - break; - } - case T_Aggref: { - Aggref *agg = (Aggref *) expr; - if (agg->args != NIL && list_length(agg->args) > 0) { - mask_list_parameters(&(agg->args), pstate, &is_masking, policy_ids, result, rtable, can_mask); - } - break; - } - case T_OpExpr: { - OpExpr *opexpr = (OpExpr *) expr; - if (opexpr->args != NIL && list_length(opexpr->args) > 0) { - mask_list_parameters(&(opexpr->args), pstate, &is_masking, policy_ids, result, rtable, can_mask); - } - break; - } - case T_CaseExpr: - { - CaseExpr *caseexpr = (CaseExpr *) expr; - if (caseexpr->args != NIL && list_length(caseexpr->args) > 0) { - mask_list_parameters(&(caseexpr->args), pstate, &is_masking, policy_ids, result, rtable, can_mask); - } - bool res = mask_expr_node(pstate, (Expr *&)caseexpr->defresult, policy_ids, result, rtable, can_mask); - is_masking = is_masking || res; - } - break; - case T_CaseWhen: - { - CaseWhen *whenexpr = (CaseWhen *) expr; - if (whenexpr->expr != NULL) { - is_masking = mask_expr_node(pstate, (Expr *&)whenexpr->expr, policy_ids, result, rtable, can_mask); - } - bool res = mask_expr_node(pstate, (Expr *&)whenexpr->result, policy_ids, result, rtable, can_mask); - is_masking = is_masking || res; - } - break; - default: - break; - } - return is_masking; -} - -bool parser_target_entry(ParseState *pstate, TargetEntry *&old_tle, - const policy_set *policy_ids, masking_result *result, List* rtable, bool can_mask) -{ - Node* src_expr = (Node*)old_tle->expr; - bool is_masking = false; - switch (nodeTag(src_expr)) { - case T_SubLink: - { - SubLink *sublink = (SubLink *) src_expr; - Query *query = (Query *) sublink->subselect; - ListCell* temp = NULL; - foreach (temp, query->targetList) { - TargetEntry *old_tle = (TargetEntry *) lfirst(temp); - parser_target_entry(pstate, old_tle, policy_ids, result, query->rtable, can_mask); - } - } - break; - case T_Var: - { - is_masking = handle_masking_node(pstate, (Expr *&)old_tle->expr, policy_ids, - result, rtable, can_mask); - if (is_masking) { - old_tle->resorigtbl = 0; - old_tle->resorigcol = 0; - } - } - break; - case T_Aggref: - case T_OpExpr: - case T_RelabelType: - case T_FuncExpr: - case T_CoerceViaIO: - case T_CaseExpr: - { - if (mask_expr_node(pstate, (Expr *&)old_tle->expr, policy_ids, result, rtable, can_mask)) { - old_tle->resorigtbl = 0; - old_tle->resorigcol = 0; - is_masking = true; - } - } - break; - default: - break; - } - return is_masking; -} +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * masking.cpp + * + * IDENTIFICATION + * src/contrib/security_plugin/masking.cpp + * + * --------------------------------------------------------------------------------------- + */ + +#include +#include "postgres.h" +#include "nodes/nodes.h" +#include "nodes/nodeFuncs.h" +#include "nodes/params.h" +#include "nodes/primnodes.h" +#include "parser/parse_clause.h" +#include "parser/parse_target.h" +#include "parser/parse_expr.h" +#include "parser/parse_oper.h" +#include "parser/parse_func.h" +#include "parser/parse_utilcmd.h" +#include "parser/parse_relation.h" +#include "utils/numeric.h" +#include "catalog/pg_proc.h" +#include "utils/acl.h" +#include "utils/syscache.h" +#include "utils/lsyscache.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "parser/parse_type.h" +#include "nodes/makefuncs.h" +#include "parser/parse_coerce.h" +#include "commands/tablespace.h" +#include "masking.h" +#include "access_audit.h" +#include "gs_threadlocal.h" +#include "gs_policy_plugin.h" +#include "gs_policy/gs_vector.h" +#include "gs_policy/gs_policy_masking.h" + +#define BUFFSIZE 256 + +#ifdef ENABLE_UT +#define static +#endif + +using StrMap = gs_stl::gs_map; +static THR_LOCAL StrMap* masked_prepared_stmts = NULL; +static THR_LOCAL StrMap* masked_cursor_stmts = NULL; + +void reset_node_location() +{ + t_thrd.security_policy_cxt.node_location = 0; +} + +const int DATA_TYPE_LENGTH = 2; /* data type length */ +static Node* create_udf_function(ParseState *pstate, Var* var, Oid funcid, masking_result *result, long long polid, + const char* full_column, const func_params *f_params); +static void parse_func(Node *expr); +static void get_var_value(const List *rtable, Var *var, PolicyLabelItem *full_column, PolicyLabelItem *view_name); + +static void get_fqdn_by_joinalias(const List *rtable, const RangeTblEntry *rte, const Var *var, + PolicyLabelItem *full_column, PolicyLabelItem *view_name) +{ + ListCell *l = NULL; + int joinpos = 1; + /* Scan all joinaliasvars of JOIN RTE. + * varattno marks its orders in joinaliasvars, + * and we can use it to get the real var of join statement + */ + foreach (l, rte->joinaliasvars) { + if (joinpos != (int)var->varattno) { + ++joinpos; + continue; + } + /* Get real join var information */ + Var *joinvar = (Var *)lfirst(l); + get_var_value(rtable, joinvar, full_column, view_name); + break; + } +} + +static void parse_value(const List *rtable, RangeTblEntry *rte, Var *var, + PolicyLabelItem *full_column, PolicyLabelItem *view_name) +{ + switch (rte->rtekind) { + case RTE_RELATION: /* relation ID of current table */ + if (OidIsValid(rte->relid)) { + get_fqdn_by_relid(rte, full_column, var, view_name); + } + break; + case RTE_JOIN: /* RTE is JOIN kind */ + get_fqdn_by_joinalias(rtable, rte, var, full_column, view_name); + break; + case RTE_FUNCTION: /* relation is function */ + parse_func(rte->funcexpr); + break; + default: + /* Here is no need to parse subquery or CTE, because subquery or CTE will mask itself. */ + break; + } +} + +static void get_var_value(const List *rtable, Var *var, PolicyLabelItem *full_column, PolicyLabelItem *view_name) +{ + ListCell *l = NULL; + int pos = 1; + /* In TargetList, each TargetEntry's varno points to rtable. + * So, the RangeTableEntry corresponding to each varno needs to be got first. + * Additionally, parse full column name for every (var-rte) pair. + */ + foreach (l, rtable) { + if (pos != (int)var->varno) { + ++pos; + continue; + } + + RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); + parse_value(rtable, rte, var, full_column, view_name); + break; + } +} + +static bool mask_expr_node(ParseState *pstate, Expr*& expr, + const policy_set *policy_ids, masking_result *result, List* rtable, bool can_mask = true); +bool handle_masking_node(ParseState *pstate, Expr*& src_expr, + const policy_set *policy_ids, masking_result *result, List *rtable, bool can_mask = true); + +static bool mask_func(ParseState *pstate, Expr*& expr, + const policy_set *policy_ids, masking_result *result, List* rtable, bool can_mask = true) +{ + if (expr == NULL) { + return false; + } + bool is_masking = false; + if (nodeTag(expr) == T_FuncExpr) { + FuncExpr *fe = (FuncExpr *)(expr); + PolicyLabelItem func_value; + if (get_function_name(fe->funcid, &func_value)) { + set_result_set_function(func_value); + } + if (fe->args != NULL) { + ListCell* temp = NULL; + foreach(temp, fe->args) { + Node *&item = (Node*&)lfirst(temp); + bool expr_masked = mask_expr_node(pstate, (Expr*&)item, policy_ids, result, rtable, can_mask); + is_masking = is_masking || expr_masked; + } + } + } + return is_masking; +} + +static void parse_func(Node* expr) +{ + switch (nodeTag(expr)) { + case T_FuncExpr: + { + FuncExpr *fe = (FuncExpr *)expr; + { + PolicyLabelItem func_value; + if (get_function_name(fe->funcid, &func_value)) { + set_result_set_function(func_value); + } + } + if (fe->args != NIL) { + ListCell* temp = NULL; + foreach(temp, fe->args) { + Node *item = (Node*)lfirst(temp); + if (IsA(item, FuncExpr)) { + parse_func(item); + } + } + } + } + break; + default: + break; + } +} + +static bool get_function_id(int vartype, const char* funcname, Oid *funcid, Oid *rettype, + Oid schemaid = SchemaNameGetSchemaOid("pg_catalog", true)) +{ + CatCList *catlist = NULL; +#ifndef ENABLE_MULTIPLE_NODES + if (t_thrd.proc->workingVersionNum < 92470) { + catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(funcname)); + } else { + catlist = SearchSysCacheList1(PROCALLARGS, CStringGetDatum(funcname)); + } +#else + catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(funcname)); +#endif + if (catlist != NULL) { + for (int i = 0; i < catlist->n_members; ++i) { + HeapTuple proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); + Form_pg_proc procform = (Form_pg_proc) GETSTRUCT(proctup); + if (procform && (int)procform->prorettype == vartype && procform->pronamespace == schemaid) { + (*funcid) = HeapTupleGetOid(proctup); + (*rettype) = procform->prorettype; + ReleaseSysCacheList(catlist); + return true; + } + } + ReleaseSysCacheList(catlist); + return false; + } + return false; +} + +static Node* create_predefined_function(const char* funcname, int funcid, int rettype, Node* arg, int funccollid, + CoercionForm funcformat = COERCE_EXPLICIT_CALL) +{ + FuncExpr *funcexpr = makeNode(FuncExpr); + funcexpr->funcid = funcid; + funcexpr->funcresulttype = rettype; + funcexpr->funcretset = false; + funcexpr->funcvariadic = false; + funcexpr->funcformat = funcformat; + + if (IsA(arg, Var)) { + Var* var = (Var*)arg; + var->location += t_thrd.security_policy_cxt.node_location; + funcexpr->location = var->location; + var->location += strlen(funcname) + 1; + t_thrd.security_policy_cxt.node_location += (strlen(funcname) + 2); + } else if(IsA(arg, Const)) { + Const* const_arg = (Const*)arg; + const_arg->location += t_thrd.security_policy_cxt.node_location; + funcexpr->location = const_arg->location; + const_arg->location += strlen(funcname) + 1; + t_thrd.security_policy_cxt.node_location += (strlen(funcname) + 2); + } else if(IsA(arg, FuncExpr)) { + FuncExpr* fe = (FuncExpr*)arg; + fe->location += t_thrd.security_policy_cxt.node_location; + funcexpr->location = fe->location; + fe->location += strlen(funcname) + 1; + t_thrd.security_policy_cxt.node_location += (strlen(funcname) + 2); + } + + funcexpr->args = lappend(funcexpr->args, arg); + funcexpr->funccollid = funccollid; + funcexpr->inputcollid = 100; /* OID of collation that function should use */ + + return (Node*)funcexpr; +} + +static Node* create_relabel_type(Node* func, int resulttype, int location, + CoercionForm relabelformat = COERCE_EXPLICIT_CAST) +{ + if (func == NULL) { + return NULL; + } + RelabelType *typeexpr = makeNode(RelabelType); + if (!typeexpr)return NULL; + typeexpr->arg = (Expr*)func; + typeexpr->resulttype = resulttype; + typeexpr->resulttypmod = -1; + typeexpr->resultcollid = 100; /* OID of collation */ + typeexpr->relabelformat = relabelformat; + typeexpr->location = location; + return (Node*)typeexpr; +} + +static inline Node* create_string_node(ParseState *pstate, const char* letter, int location, int col_type = TEXTOID) +{ + Node* const_node = (Node*)make_const(pstate, makeString((char*)letter), location); + const_node = coerce_type(pstate, const_node, ((Const*)const_node)->consttype, col_type, -1, + COERCION_IMPLICIT, COERCE_IMPLICIT_CAST, -1); + return const_node; +} + + +Node* create_integer_node(ParseState *pstate, int value, int location, int col_type, bool make_cast) +{ + Node* const_node = (Node*)make_const(pstate, makeInteger(value), location); + if (make_cast) { + const_node = coerce_type(pstate, const_node, ((Const*)const_node)->consttype, col_type, -1, + COERCION_IMPLICIT, COERCE_IMPLICIT_CAST, -1); + } + return const_node; +} + +static inline Node* create_float_node(ParseState *pstate, const char* value, int location, int col_type = FLOAT4OID) +{ + Node* const_node = (Node*)make_const(pstate, makeFloat((char*)value), location); + const_node = coerce_type(pstate, const_node, ((Const*)const_node)->consttype, col_type, -1, + COERCION_IMPLICIT, COERCE_IMPLICIT_CAST, -1); + return const_node; +} + +static inline Node* create_empty_node(ParseState *pstate, int location, int col_type = INT4OID, bool make_cast = true) +{ + Const* const_node = (Const*)create_integer_node(pstate, 0, location, col_type, make_cast); + if (const_node) { + const_node->constisnull = false; + const_node->constbyval = true; + } + return (Node*)const_node; +} + +static Node *create_repeat_function(ParseState *pstate, const char *letter, Node *arg) +{ + Var *var = (Var*)arg; + Oid funcid = 0; + Oid rettype = 0; + if (get_function_id(TEXTOID, "repeat", &funcid, &rettype)) { + Node *const_node = create_string_node(pstate, letter, var->location); + Node *repeat_func = create_predefined_function("repeat", funcid, rettype, const_node, 100); + FuncExpr *funcexpr = (FuncExpr*)repeat_func; + if (get_function_id(INT4OID, "length", &funcid, &rettype)) { + Node *length_func = create_predefined_function("length", funcid, rettype, arg, 0); + funcexpr->args = lappend(funcexpr->args, (Node*)length_func); + } + return repeat_func; + } + return arg; +} + +static Node *regexp_function(ParseState *pstate, Var *var, masking_result *result, long long polid, + const char *full_column, const func_params *f_params) +{ + bool cast = false; + Node *regexp_func_node = NULL; + switch (var->vartype) { + case BPCHAROID: + case VARCHAROID: + case NVARCHAR2OID: + cast = true; + case TEXTOID: + { + Oid funcid = 0; + Oid rettype = TEXTOID; + get_function_id(TEXTOID, "regexpmasking", &funcid, &rettype); + if (funcid > 0) { + regexp_func_node = create_udf_function(pstate, var, funcid, result, polid, full_column, f_params); + if (cast) { + Expr *cast_fun = (Expr*)create_relabel_type(regexp_func_node, var->vartype, var->location); + if (cast_fun != NULL) { /* success */ + regexp_func_node = (Node*)cast_fun; + } + } + (*result)[polid][M_REGEXP].insert(full_column); + } + break; + } + default: + break; + } + return regexp_func_node; +} + +static Node *shuffle_function(ParseState *pstate, Var *var, masking_result *result, long long polid, + const char *full_column) +{ + bool make_cast = false; + Node *shuffle_func_node = NULL; + switch (var->vartype) { + case BPCHAROID: + case VARCHAROID: + case NVARCHAR2OID: + make_cast = true; + case TEXTOID: + { + Oid funcid = 0; + Oid rettype = TEXTOID; + get_function_id(TEXTOID, "shufflemasking", &funcid, &rettype); + if (funcid > 0) { + shuffle_func_node = create_predefined_function("shufflemasking", funcid, TEXTOID, (Node*)var, 100); + if (make_cast) { + Expr *cast_fun = (Expr*)create_relabel_type(shuffle_func_node, var->vartype, var->location); + if (cast_fun != NULL) { /* success */ + shuffle_func_node = (Node*)cast_fun; + } + } + (*result)[polid][M_SHUFFLE].insert(full_column); + } + break; + } + default: + break; + } + return shuffle_func_node; +} + +static Node *random_function(ParseState *pstate, Var *var, masking_result *result, long long polid, + const char *full_column) +{ + bool cast = false; + Node *random_func_node = NULL; + switch (var->vartype) { + case BPCHAROID: + case VARCHAROID: + case NVARCHAR2OID: + cast = true; + case TEXTOID: + { + Oid funcid = 0; + Oid rettype = TEXTOID; + get_function_id(TEXTOID, "randommasking", &funcid, &rettype); + if (funcid > 0) { + random_func_node = create_predefined_function("randommasking", funcid, TEXTOID, (Node*)var, 100); + if (cast) { + Expr *cast_fun = (Expr*)create_relabel_type(random_func_node, var->vartype, var->location); + if (cast_fun != NULL) { /* success */ + random_func_node = (Node*)cast_fun; + } + } + (*result)[polid][M_RANDOM].insert(full_column); + } + break; + } + default: + break; + } + return random_func_node; +} + +static Node *all_digits_function(ParseState *pstate, Var *var, + masking_result *result, long long polid, const char *full_column, + const gs_stl::gs_vector *func_params) +{ + bool make_cast_f = false; + Node *digits_func_node = NULL; + switch (var->vartype) { + case BPCHAROID: + case VARCHAROID: + case NVARCHAR2OID: + make_cast_f = true; + case TEXTOID: + { + Oid funcid = 0; + Oid rettype = TEXTOID; + get_function_id(TEXTOID, "alldigitsmasking", &funcid, &rettype); + if (funcid > 0) { + digits_func_node = create_predefined_function("alldigitsmasking", funcid, TEXTOID, (Node*)var, 100); + if (func_params->size()) { + Node *constnode = create_string_node(pstate, func_params->begin()->c_str() + DATA_TYPE_LENGTH, + var->location); + FuncExpr *funcexpr = (FuncExpr*)digits_func_node; + funcexpr->args = lappend(funcexpr->args, constnode); + } + if (make_cast_f) { + Expr *castfunc = (Expr*)create_relabel_type(digits_func_node, var->vartype, var->location); + if (castfunc != NULL) { /* success */ + digits_func_node = (Node*)castfunc; + } + } + (*result)[polid][M_ALLDIGITS].insert(full_column); + } + break; + } + default: + break; + } + return digits_func_node; +} + +static Node *email_function(ParseState *pstate, int masking_behavious, + Var *var, masking_result *result, long long polid, const char *full_column, + const gs_stl::gs_vector *func_params) +{ + bool makecast = false; + Node *email_func_node = NULL; + switch (var->vartype) { + case BPCHAROID: + case VARCHAROID: + case NVARCHAR2OID: + makecast = true; + case TEXTOID: + { + Oid funcid = 0; + Oid rettype = TEXTOID; + const char *funcname = (masking_behavious == M_BASICEMAIL) ? "basicemailmasking" : "fullemailmasking"; + get_function_id(TEXTOID, funcname, &funcid, &rettype); + if (funcid > 0) { + email_func_node = create_predefined_function(funcname, funcid, TEXTOID, (Node*)var, 100); + if (func_params->size()) { + Node *const_node = create_string_node(pstate, func_params->begin()->c_str() + DATA_TYPE_LENGTH, + var->location); + FuncExpr *funcexpr = (FuncExpr*)email_func_node; + funcexpr->args = lappend(funcexpr->args, const_node); + } + if (makecast) { + Expr *cast_fun = (Expr*)create_relabel_type(email_func_node, var->vartype, var->location); + if (cast_fun != NULL) { /* success */ + email_func_node = (Node*)cast_fun; + } + } + (*result)[polid][masking_behavious].insert(full_column); + } + break; + } + default: + break; + } + return email_func_node; +} + +static Node *maskall_function(ParseState *pstate, + int masking_behavious, Var *var, masking_result *result, long long polid, const char *full_column, + const gs_stl::gs_vector *func_params) +{ + bool make_cast = false; + char time_str[BUFFSIZE] = {0}; + int printed_size = 0; + Node *maskall_func_node = NULL; + switch (var->vartype) { + case BOOLOID: + { + Node *const_int_node = create_integer_node(pstate, 0, var->location, var->vartype); + if (const_int_node != NULL) { /* success */ + maskall_func_node = const_int_node; + } + } + break; + case RELTIMEOID: + printed_size = snprintf_s(time_str, sizeof(time_str), sizeof(time_str) - 1, "1970"); + securec_check_ss(printed_size, "\0", "\0"); + case TIMEOID: + case TIMETZOID: + case INTERVALOID: + if (!printed_size) { + printed_size = snprintf_s(time_str, sizeof(time_str), sizeof(time_str) - 1, "00:00:00.0000+00"); + securec_check_ss(printed_size, "\0", "\0"); + } + case TIMESTAMPOID: + case TIMESTAMPTZOID: + case SMALLDATETIMEOID: + case ABSTIMEOID: + { + if (!printed_size) { + printed_size = snprintf_s(time_str, sizeof(time_str), sizeof(time_str) - 1, "1970-01-01 00:00:00.0000"); + securec_check_ss(printed_size, "\0", "\0"); + } + Node *const_node = create_string_node(pstate, time_str, var->location, var->vartype); + if (const_node != NULL) { /* success */ + maskall_func_node = const_node; + } + } + break; + case BPCHAROID: + case VARCHAROID: + case NVARCHAR2OID: + case NAMEOID: + make_cast = true; + case TEXTOID: + { + const char *replace_str = "x"; + if (func_params && func_params->size() > 0) { + replace_str = func_params->begin()->c_str(); + } + maskall_func_node = create_repeat_function(pstate, replace_str, (Node*)var); + if (maskall_func_node != NULL && make_cast) { + Expr *cast_func = (Expr*)create_relabel_type(maskall_func_node, var->vartype, var->location); + if (cast_func != NULL) { /* success */ + maskall_func_node = (Node*)cast_func; + } + } + } + break; + case INT8OID: + case INT4OID: + case INT2OID: + case INT1OID: + case NUMERICOID: + case FLOAT4OID: /* real */ + case FLOAT8OID: + { + Node *const_int_node = create_integer_node(pstate, 0, var->location, + var->vartype, (var->vartype != CASHOID)); + if (const_int_node != NULL) { /* success */ + maskall_func_node = const_int_node; + } + } + break; + default: /* wrong column type */ + { + ereport(WARNING, (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("Unsupported type of column %s will not be masked by policy %lld.", full_column, polid))); + } + break; + } + (*result)[polid][M_MASKALL].insert(full_column); + return maskall_func_node; +} + +/* get funcname from pg_proc */ +static const char* get_udf_function_name(long long funcid, Oid& rettype, func_types* types) +{ + if (!funcid) { + return ""; + } + HeapTuple tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); + if (!HeapTupleIsValid(tuple)) { + return ""; + } + Form_pg_proc func_rel = (Form_pg_proc) GETSTRUCT(tuple); + const char* procname = func_rel->proname.data; + rettype = func_rel->prorettype; + get_function_parameters(tuple, types); + ReleaseSysCache(tuple); + return procname; +} + +static Node* convert_text_to_numeric(Oid resulttype, Node* data) +{ + int funccollid = 100; /* OID of collation of result */ + switch (resulttype) { + case INT1OID: + return create_predefined_function("text_int1", F_TEXT_INT1, resulttype, data, funccollid); + case INT2OID: + return create_predefined_function("text_int2", F_TEXT_INT2, resulttype, data, funccollid); + case INT4OID: + return create_predefined_function("text_int4", F_TEXT_INT4, resulttype, data, funccollid); + case INT8OID: + return create_predefined_function("text_int8", F_TEXT_INT8, resulttype, data, funccollid); + case NUMERICOID: + return create_predefined_function("text_numeric", F_TEXT_INT8, resulttype, data, funccollid); + case FLOAT4OID: + return create_predefined_function("text_float4", F_TEXT_FLOAT4, resulttype, data, funccollid); + case FLOAT8OID: + return create_predefined_function("text_float8", F_TEXT_FLOAT8, resulttype, data, funccollid); + case RELTIMEOID: + return create_predefined_function("text_date", F_TEXT_DATE, resulttype, data, funccollid); + case TIMEOID: + case TIMETZOID: + case INTERVALOID: + case TIMESTAMPOID: + case SMALLDATETIMEOID: + case ABSTIMEOID: + return create_predefined_function("text_timestamp", F_TEXT_TIMESTAMP, resulttype, data, funccollid); + default: + break; + } + return NULL; +} + +static Node* convert_numeric_to_text(Oid resulttype, Node* data) +{ + int funccollid = 100; /* OID of collation of result */ + switch (resulttype) { + case INT1OID: + return create_predefined_function("int1_text", F_INT1_TEXT, TEXTOID, data, funccollid); + case INT2OID: + return create_predefined_function("int2_text", F_INT2_TEXT, TEXTOID, data, funccollid); + case INT4OID: + return create_predefined_function("int4_text", F_INT4_TEXT, TEXTOID, data, funccollid); + case INT8OID: + return create_predefined_function("int8_text", F_INT8_TEXT, TEXTOID, data, funccollid); + case NUMERICOID: + return create_predefined_function("numeric_text", F_NUMERIC_TEXT, TEXTOID, data, funccollid); + case FLOAT4OID: + return create_predefined_function("float4_text", F_FLOAT4_TEXT, TEXTOID, data, funccollid); + case FLOAT8OID: + return create_predefined_function("float8_text", F_FLOAT8_TEXT, TEXTOID, data, funccollid); + case RELTIMEOID: + return create_predefined_function("date_text", F_DATE_TEXT, TEXTOID, data, funccollid); + case TIMEOID: + case TIMETZOID: + case INTERVALOID: + case TIMESTAMPOID: + case SMALLDATETIMEOID: + case ABSTIMEOID: + return create_predefined_function("timestamp_text", F_TIMESTAMP_TEXT, TEXTOID, data, funccollid); + case TIMESTAMPTZOID: + return create_predefined_function("timestampsone_text", F_TIMESTAMPZONE_TEXT, TEXTOID, data, funccollid); + default: + break; + } + return NULL; +} + +static Node* check_and_fix_col_node(Oid prog_col_type, Var* var) +{ + switch (prog_col_type) { /* function input type */ + case TEXTOID: + case BPCHAROID: + case VARCHAROID: + case NVARCHAR2OID: + { + switch (var->vartype) { /* col type */ + case TEXTOID: + case BPCHAROID: + case VARCHAROID: + case NVARCHAR2OID: + return (Node*)var; + break; + default: + break; + } + } + break; + case INT1OID: + case INT2OID: + case INT4OID: + case INT8OID: + case NUMERICOID: + case FLOAT4OID: // real + case FLOAT8OID: + { + if (var->vartype == prog_col_type) { + return (Node*)var; + } + } + break; + default: + break; + } + return NULL; /* function input type not support or mismatching, use maskall */ +} + +static Node* verify_function_return_type(Oid func_rettype, Var* var, FuncExpr* funcexpr) +{ + switch (func_rettype) { /* function return type */ + case TEXTOID: + case BPCHAROID: + case VARCHAROID: + case NVARCHAR2OID: + { + switch (var->vartype) { /* col type */ + case INT1OID: + case INT2OID: + case INT4OID: + case INT8OID: + case NUMERICOID: + case FLOAT4OID: + case FLOAT8OID: + case TIMEOID: + case TIMETZOID: + case INTERVALOID: + case TIMESTAMPOID: + case SMALLDATETIMEOID: + case ABSTIMEOID: + case RELTIMEOID: + return convert_text_to_numeric(var->vartype, (Node*)funcexpr); + break; + case TEXTOID: + case BPCHAROID: + case VARCHAROID: + case NVARCHAR2OID: + { + Expr *cast_fun = (Expr*)create_relabel_type((Node*)funcexpr, var->vartype, var->location); + return (Node*)cast_fun; + } + break; + default: + break; + } + } + break; + case INT1OID: + case INT2OID: + case INT4OID: + case INT8OID: + case NUMERICOID: + case FLOAT4OID: + case FLOAT8OID: + case TIMEOID: + case TIMETZOID: + case INTERVALOID: + case TIMESTAMPOID: + case SMALLDATETIMEOID: + case ABSTIMEOID: + case RELTIMEOID: + { + switch (var->vartype) { + case TEXTOID: + case BPCHAROID: + case VARCHAROID: + case NVARCHAR2OID: + return convert_numeric_to_text(var->vartype, (Node*)funcexpr); + break; + default: + break; + } + } + break; + default: + return NULL; /* function return type not support, use maskall */ + } + return (Node*)funcexpr; +} + +static Node* create_udf_function(ParseState *pstate, Var* var, Oid funcid, masking_result *result, long long polid, + const char* full_column, const func_params *f_params) +{ + Node* ret_node = NULL; + AclResult aclresult; + Oid rescollid = 100; /* OID of collation, or InvalidOid if none */ + PG_TRY(); + { + if (funcid <= 0) { + return NULL; + } + Oid rettype = var->vartype; + aclresult = pg_proc_aclcheck(funcid, GetUserId(), ACL_EXECUTE); + func_types proctypes; + const char* funcname = get_udf_function_name(funcid, rettype, &proctypes); + if (aclresult == ACLCHECK_OK && strlen(funcname) != 0) { + /* verify function input parameters */ + if (verify_proc_params(f_params, &proctypes)) { + Oid prog_col_type = proctypes[0]; + /* check if mismatching between function input and col type */ + Node* newc = check_and_fix_col_node(prog_col_type, var); + if (newc) { + FuncExpr* funcexpr = (FuncExpr*)create_predefined_function(funcname, funcid, rettype, + newc, rescollid); + func_params::const_iterator it = f_params->begin(), eit = f_params->end(); + for (; it != eit; ++it) { + Node * const_node = NULL; + if (!strncasecmp(it->c_str(), "s:", DATA_TYPE_LENGTH)) { + const_node = create_string_node(pstate, it->c_str() + DATA_TYPE_LENGTH, var->location); + } else if (!strncasecmp(it->c_str(), "i:", DATA_TYPE_LENGTH)) + const_node = create_integer_node(pstate, atoi(it->c_str() + DATA_TYPE_LENGTH), + var->location); + else if (!strncasecmp(it->c_str(), "f:", DATA_TYPE_LENGTH)) + const_node = create_float_node(pstate, it->c_str() + DATA_TYPE_LENGTH, var->location); + if (!const_node) + break; + funcexpr->args = lappend(funcexpr->args, const_node); + } + (*result)[polid][M_MASKALL].insert(full_column); + /* verify function output parameters */ + ret_node = verify_function_return_type(rettype, var, funcexpr); + } + } + } + } + PG_CATCH(); + { + PG_RE_THROW(); + } + PG_END_TRY(); + (*result)[polid][M_MASKALL].insert(full_column); + return ret_node; +} + +static Node *credit_card_function(ParseState *pstate, + int masking_behavious, Var *var, masking_result *result, long long polid, const char *full_column, + const gs_stl::gs_vector *func_params) +{ + bool make_cast = false; + Node *credit_func_node = NULL; + switch (var->vartype) { + case BPCHAROID: + case VARCHAROID: + case NVARCHAR2OID: + make_cast = true; + case TEXTOID: + { + Oid funcid = 0; + Oid rettype = 25; + get_function_id(TEXTOID, "creditcardmasking", &funcid, &rettype); + if (funcid > 0) { + credit_func_node = create_predefined_function("creditcardmasking", funcid, TEXTOID, (Node*)var, 100); + if (func_params->size()) { + Node *const_node = create_string_node(pstate, func_params->begin()->c_str() + DATA_TYPE_LENGTH, + var->location); + FuncExpr *fexpr = (FuncExpr*)credit_func_node; + fexpr->args = lappend(fexpr->args, const_node); + } + if (make_cast) { + Expr* castfun = (Expr*)create_relabel_type(credit_func_node, var->vartype, var->location); + if (castfun != NULL) { /* success */ + credit_func_node = (Node*)castfun; + } + } + (*result)[polid][M_CREDIT_CARD].insert(full_column); + } + break; + } + default: + break; + } + return credit_func_node; +} + +static Node *mask_node_by_behavious(bool *is_masking, int masking_behavious, ParseState *pstate, Var* var, + masking_result *result, long long polid, const char* full_column, + const gs_stl::gs_vector *func_params) +{ + Node *masked_node = NULL; + switch (masking_behavious) { + case M_CREDIT_CARD: + { + (*is_masking) = true; + if ((masked_node = credit_card_function(pstate, masking_behavious, var, result, + polid, full_column, func_params)) == NULL) { + masked_node = maskall_function(pstate, masking_behavious, var, + result, polid, full_column, func_params); + } + } + break; + case M_BASICEMAIL: + case M_FULLEMAIL: + { + (*is_masking) = true; + if ((masked_node = email_function(pstate, masking_behavious, var, result, + polid, full_column, func_params)) == NULL) { + masked_node = maskall_function(pstate, masking_behavious, var, result, + polid, full_column, func_params); + } + } + break; + case M_ALLDIGITS: + { + (*is_masking) = true; + if ((masked_node = all_digits_function(pstate, var, result, polid, + full_column, func_params)) == NULL) { + masked_node = maskall_function(pstate, masking_behavious, var, result, + polid, full_column, func_params); + } + } + break; + case M_SHUFFLE: + { + (*is_masking) = true; + if ((masked_node = shuffle_function(pstate, var, result, polid, full_column)) == NULL) { + masked_node = maskall_function(pstate, masking_behavious, var, + result, polid, full_column, func_params); + } + } + break; + case M_RANDOM: + { + (*is_masking) = true; + if ((masked_node = random_function(pstate, var, result, polid, full_column)) == NULL) { + masked_node = maskall_function(pstate, masking_behavious, var, result, + polid, full_column, func_params); + } + } + break; + case M_REGEXP: + { + (*is_masking) = true; + if ((masked_node = regexp_function(pstate, var, result, polid, full_column, func_params)) == NULL) { + masked_node = maskall_function(pstate, masking_behavious, var, result, + polid, full_column, NULL); + } + } + break; + case M_UNKNOWN: + case M_MASKALL: + { + (*is_masking) = true; + masked_node = maskall_function(pstate, masking_behavious, var, result, + polid, full_column, func_params); + } + break; + default: + { + (*is_masking) = true; + if ((masked_node = create_udf_function(pstate, var, masking_behavious, result, + polid, full_column, func_params)) == NULL) { + masked_node = maskall_function(pstate, M_UNKNOWN, var, result, polid, full_column, NULL); + } + } + break; + } + if (masked_node == NULL) { + (*is_masking) = false; + } + return masked_node; +} + +bool handle_masking_node(ParseState *pstate, Expr*& src_expr, + const policy_set *policy_ids, masking_result *result, List* rtable, bool can_mask) +{ + if (src_expr == NULL || policy_ids->empty()) { + return false; + } + Var* var = (Var*)(src_expr); + + PolicyLabelItem full_column(0, 0, O_COLUMN), view_name; + get_var_value(rtable, var, &full_column, &view_name); /* fqdn column name */ + /* Varattno 'zero' references the whole tuple. */ + if (full_column.m_obj_type == O_COLUMN && var->varattno == 0 && + OidIsValid(full_column.m_object) && is_masked_relation_enabled(full_column.m_object)) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Un-support operation for whole tuple contains masked column in table \"%s\".", + get_rel_name(full_column.m_object)))); + } + if (full_column.empty() && rtable != NIL) { /* sub query */ + bool is_found = false; + audit_open_relation(rtable, var, &full_column, &is_found); + } + bool is_masking = false; + int masking_behavious = 0; + long long polid = 0; + + gs_stl::gs_vector func_params; + if (check_masking_policy_action(policy_ids, &full_column, &view_name, &masking_behavious, &polid, &func_params)) { + gs_stl::gs_string log_column; + full_column.get_fqdn_value(&log_column); + + /* When mask a column which not allowed to be masked, we will report an error */ + if (!can_mask) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Un-support operation for masking column."))); + } + + Node* masked_node = NULL; + masked_node = mask_node_by_behavious(&is_masking, masking_behavious, pstate, var, + result, polid, log_column.c_str(), &func_params); + + if (is_masking) { + ereport(DEBUG2, (errmodule(MOD_PARSER), + errmsg("Column %s will be masked by masking behavious %d", log_column.c_str(), masking_behavious))); + } + + if (masked_node != NULL) { + src_expr = (Expr*)masked_node; + } + } + return is_masking; +} + +static bool mask_sublink(ParseState *pstate, Expr*& expr, + const policy_set *policy_ids, masking_result *result, List* rtable, bool can_mask) +{ + if (expr == NULL) { + return false; + } + SubLink *sublink = (SubLink *)expr; + Query *query = (Query *) sublink->subselect; + ListCell* temp = NULL; + bool is_masking = false; + foreach (temp, query->targetList) { + TargetEntry *old_tle = (TargetEntry *)lfirst(temp); + is_masking = parser_target_entry(pstate, old_tle, policy_ids, result, query->rtable, can_mask) || is_masking; + } + return is_masking; +} + +static bool mask_list_parameters(List **params, ParseState *pstate, bool *is_masking, const policy_set *policy_ids, + masking_result *result, List* rtable, bool can_mask = true) +{ + List* masked_list = NIL; + ListCell *lc = NULL; + foreach(lc, (*params)) { + Node *item = (Node *) lfirst(lc); + switch (nodeTag(item)) { + case T_SubLink: + { + bool expr_masked = mask_sublink(pstate, (Expr*&)item, policy_ids, result, rtable, can_mask); + *is_masking = expr_masked || *is_masking; + } + break; + case T_Aggref: + { + Aggref* agg = (Aggref *) item; + if (agg && agg->args != NIL && list_length(agg->args) > 0) { + mask_list_parameters(&(agg->args), pstate, is_masking, policy_ids, result, rtable, can_mask); + } + } + break; + case T_OpExpr: + { + OpExpr* opexpr = (OpExpr*)item; + if (opexpr && opexpr->args != NIL && list_length(opexpr->args) > 0) { + mask_list_parameters(&(opexpr->args), pstate, is_masking, policy_ids, result, rtable, can_mask); + } + } + break; + case T_Var: + case T_RelabelType: + case T_FuncExpr: + case T_CaseExpr: + case T_CaseWhen: + { + bool expr_masked = mask_expr_node(pstate, (Expr*&)item, policy_ids, result, rtable, can_mask); + *is_masking = expr_masked || *is_masking; + } + break; + case T_TargetEntry: + { + TargetEntry*& target_entry = (TargetEntry*&)item; + bool expr_masked = parser_target_entry(pstate, target_entry, policy_ids, result, rtable, can_mask); + *is_masking = expr_masked || *is_masking; + } + break; + default: + break; + } + masked_list = lappend(masked_list, item); + } + if (*is_masking) + (*params) = masked_list; + return *is_masking; +} + +static bool mask_expr_node(ParseState *pstate, Expr*& expr, + const policy_set *policy_ids, masking_result *result, List* rtable, bool can_mask) +{ + bool is_masking = false; + if (expr == NULL) { + return false; + } + switch (nodeTag(expr)) { + case T_SubLink: + is_masking = mask_sublink(pstate, expr, policy_ids, result, rtable, can_mask); + break; + case T_FuncExpr: + is_masking = mask_func(pstate, expr, policy_ids, result, rtable, can_mask); + break; + case T_Var: + is_masking = handle_masking_node(pstate, expr, policy_ids, result, rtable, can_mask); + break; + case T_RelabelType: { + RelabelType *relabel = (RelabelType *) expr; + is_masking = mask_expr_node(pstate, (Expr *&)relabel->arg, policy_ids, result, rtable, can_mask); + break; + } + case T_CoerceViaIO: { + CoerceViaIO *coerce = (CoerceViaIO *) expr; + is_masking = mask_expr_node(pstate, (Expr *&)coerce->arg, policy_ids, result, rtable, false); + break; + } + case T_Aggref: { + Aggref *agg = (Aggref *) expr; + if (agg->args != NIL && list_length(agg->args) > 0) { + mask_list_parameters(&(agg->args), pstate, &is_masking, policy_ids, result, rtable, can_mask); + } + break; + } + case T_OpExpr: { + OpExpr *opexpr = (OpExpr *) expr; + if (opexpr->args != NIL && list_length(opexpr->args) > 0) { + mask_list_parameters(&(opexpr->args), pstate, &is_masking, policy_ids, result, rtable, can_mask); + } + break; + } + case T_CaseExpr: + { + CaseExpr *caseexpr = (CaseExpr *) expr; + if (caseexpr->args != NIL && list_length(caseexpr->args) > 0) { + mask_list_parameters(&(caseexpr->args), pstate, &is_masking, policy_ids, result, rtable, can_mask); + } + bool res = mask_expr_node(pstate, (Expr *&)caseexpr->defresult, policy_ids, result, rtable, can_mask); + is_masking = is_masking || res; + } + break; + case T_CaseWhen: + { + CaseWhen *whenexpr = (CaseWhen *) expr; + if (whenexpr->expr != NULL) { + is_masking = mask_expr_node(pstate, (Expr *&)whenexpr->expr, policy_ids, result, rtable, can_mask); + } + bool res = mask_expr_node(pstate, (Expr *&)whenexpr->result, policy_ids, result, rtable, can_mask); + is_masking = is_masking || res; + } + break; + default: + break; + } + return is_masking; +} + +bool parser_target_entry(ParseState *pstate, TargetEntry *&old_tle, + const policy_set *policy_ids, masking_result *result, List* rtable, bool can_mask) +{ + Node* src_expr = (Node*)old_tle->expr; + bool is_masking = false; + switch (nodeTag(src_expr)) { + case T_SubLink: + { + SubLink *sublink = (SubLink *) src_expr; + Query *query = (Query *) sublink->subselect; + ListCell* temp = NULL; + foreach (temp, query->targetList) { + TargetEntry *old_tle = (TargetEntry *) lfirst(temp); + parser_target_entry(pstate, old_tle, policy_ids, result, query->rtable, can_mask); + } + } + break; + case T_Var: + { + is_masking = handle_masking_node(pstate, (Expr *&)old_tle->expr, policy_ids, + result, rtable, can_mask); + if (is_masking) { + old_tle->resorigtbl = 0; + old_tle->resorigcol = 0; + } + } + break; + case T_Aggref: + case T_OpExpr: + case T_RelabelType: + case T_FuncExpr: + case T_CoerceViaIO: + case T_CaseExpr: + { + if (mask_expr_node(pstate, (Expr *&)old_tle->expr, policy_ids, result, rtable, can_mask)) { + old_tle->resorigtbl = 0; + old_tle->resorigcol = 0; + is_masking = true; + } + } + break; + default: + break; + } + return is_masking; +} + +void free_masked_cursor_stmts() +{ + if (masked_cursor_stmts != NULL) { + delete masked_cursor_stmts; + masked_cursor_stmts = NULL; + } +} + +void free_masked_prepared_stmts() +{ + if (masked_prepared_stmts) { + delete masked_prepared_stmts; + masked_prepared_stmts = NULL; + } +} + +void close_cursor_stmt_as_masked(const char* name) +{ + if (masked_cursor_stmts == NULL) { + return; + } + + masked_cursor_stmts->erase(name); + if (masked_cursor_stmts->empty() || (strcasecmp(name, "all") == 0)) { + delete masked_cursor_stmts; + masked_cursor_stmts = NULL; + } +} + +void unprepare_stmt_as_masked(const char* name) +{ + unprepare_stmt(name); + if (!masked_prepared_stmts) { + return; + } + masked_prepared_stmts->erase(name); + if (masked_prepared_stmts->empty() || !strcasecmp(name, "all")) { + delete masked_prepared_stmts; + masked_prepared_stmts = NULL; + } +} + +void set_prepare_stmt_as_masked(const char* name, const masking_result *result) +{ + if (!masked_prepared_stmts) { + masked_prepared_stmts = new StrMap; + } + (*masked_prepared_stmts)[name] = (*result); +} + +void set_cursor_stmt_as_masked(const char* name, const masking_result *result) +{ + if (!masked_cursor_stmts) { + masked_cursor_stmts = new StrMap; + } + (*masked_cursor_stmts)[name] = (*result); +} + +template< class T> +static inline void flush_stmt_masking_result(const char* name, T* stmts) +{ + if (stmts) { + StrMap::const_iterator it = stmts->find(name); + if (it != stmts->end()) { + flush_masking_result(it->second); + } + } +} + +void flush_cursor_stmt_masking_result(const char* name) +{ + flush_stmt_masking_result(name, masked_cursor_stmts); +} + +void flush_prepare_stmt_masking_result(const char* name) +{ + flush_stmt_masking_result(name, masked_prepared_stmts); +} + +bool process_union_masking(Node *union_node, ParseState *pstate, const Query *query, const policy_set *policy_ids, + bool audit_exist) +{ + if (union_node == NULL) { + return false; + } + switch (nodeTag(union_node)) { + /* For each union, we get its query recursively for masking until it doesn't have any union query */ + case T_SetOperationStmt: { + SetOperationStmt *stmt = (SetOperationStmt *)union_node; + if (stmt->op != SETOP_UNION) { + return false; + } + process_union_masking((Node *)(stmt->larg), pstate, query, policy_ids, audit_exist); + process_union_masking((Node *)(stmt->rarg), pstate, query, policy_ids, audit_exist); + } + break; + case T_RangeTblRef: { + RangeTblRef *ref = (RangeTblRef *)union_node; + if (ref->rtindex <= 0 || ref->rtindex > list_length(query->rtable)) { + return false; + } + Query *mostQuery = rt_fetch(ref->rtindex, query->rtable)->subquery; + process_masking(pstate, mostQuery, policy_ids, audit_exist); + } + break; + default: + break; + } + return true; +} + +/* + * Main entrance for masking + * Identify components in query tree that need to do masking. + * This function will find all parts which need masking of select query, + * mainly includes CTE / setOperation / normal select columns. + */ +void process_masking(ParseState *pstate, Query *query, const policy_set *policy_ids, bool audit_exist) +{ + if (query == NULL) { + return; + } + + /* set-operation tree UNION query */ + if (!process_union_masking(query->setOperations, pstate, query, policy_ids, audit_exist)) { + ListCell *lc = NULL; + /* For each Cte, we get its query recursively for masking, and then handle this query in normal way */ + if (query->cteList != NIL) { + foreach(lc, query->cteList) { + CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc); + Query *cte_query = (Query *)cte->ctequery; + process_masking(pstate, cte_query, policy_ids, audit_exist); + } + } + /* find subquery and process each subquery node */ + if (query->rtable != NULL) { + foreach(lc, query->rtable) { + RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); + Query *subquery = (Query *)rte->subquery; + process_masking(pstate, subquery, policy_ids, audit_exist); + } + } + select_PostParseAnalyze(pstate, query, policy_ids, audit_exist); + } +} + +void select_PostParseAnalyze(ParseState *pstate, Query *&query, const policy_set *policy_ids, bool audit_exist) +{ + Assert(query != NULL); + List *targetList = NIL; + targetList = (query->targetList != NIL) ? query->targetList : pstate->p_target_list; + handle_masking(targetList, pstate, policy_ids, query->rtable, query->utilityStmt); + + /* deal with function type label */ + load_function_label(query, audit_exist); +} + +/* + * Do masking for given target list + * this function will parse each RTE of the list + * and then will check wether each node need to do mask. + */ +bool handle_masking(List *targetList, ParseState *pstate, const policy_set *policy_ids, List *rtable, Node *utilityNode) +{ + if (targetList == NIL || policy_ids->empty()) { + return false; + } + ListCell *temp = NULL; + masking_result masking_result; + foreach (temp, targetList) { + TargetEntry *old_tle = (TargetEntry *)lfirst(temp); + /* Shuffle masking columns can only select directly with out other operations */ + parser_target_entry(pstate, old_tle, policy_ids, &masking_result, rtable, true); + } + if (masking_result.size() <= 0) { + return false; + } + if (strlen(t_thrd.security_policy_cxt.prepare_stmt_name) > 0) { + /* prepare statement was masked */ + set_prepare_stmt_as_masked(t_thrd.security_policy_cxt.prepare_stmt_name, + &masking_result); /* save masking event for executing case */ + } else if (utilityNode != NULL) { + switch (nodeTag(utilityNode)) { + case T_DeclareCursorStmt: { + DeclareCursorStmt *stmt = (DeclareCursorStmt *)utilityNode; + /* save masking event for fetching case */ + set_cursor_stmt_as_masked(stmt->portalname, &masking_result); + } + break; + default: + flush_masking_result(&masking_result); /* invoke masking event */ + } + } else { + flush_masking_result(&masking_result); /* invoke masking event */ + } + return true; +} \ No newline at end of file diff --git a/contrib/security_plugin/masking.h b/contrib/security_plugin/masking.h index fcbdca5e3..901677057 100644 --- a/contrib/security_plugin/masking.h +++ b/contrib/security_plugin/masking.h @@ -1,39 +1,53 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * masking.h - * - * IDENTIFICATION - * contrib/security_plugin/masking.h - * - * ------------------------------------------------------------------------- - */ - -#ifndef MASKING_H_ -#define MASKING_H_ -#include -#include "parser/parse_node.h" -#include "nodes/primnodes.h" -#include "gs_mask_policy.h" - -bool parser_target_entry(ParseState *pstate, TargetEntry*& old_tle, const policy_set *policy_ids, - masking_result *result, List* rtable, bool can_mask = true); -void reset_node_location(); - -/* col_type for integer should be int8, int4, int2, int1 */ -Node* create_integer_node(ParseState *pstate, int value, int location, int col_type = INT4OID, bool make_cast = true); - - -#endif /* MASKING_H_ */ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * masking.h + * + * IDENTIFICATION + * contrib/security_plugin/masking.h + * + * ------------------------------------------------------------------------- + */ + +#ifndef MASKING_H_ +#define MASKING_H_ +#include +#include "parser/parse_node.h" +#include "nodes/primnodes.h" +#include "gs_mask_policy.h" + +bool parser_target_entry(ParseState *pstate, TargetEntry*& old_tle, const policy_set *policy_ids, + masking_result *result, List* rtable, bool can_mask = true); +void reset_node_location(); + +/* col_type for integer should be int8, int4, int2, int1 */ +Node* create_integer_node(ParseState *pstate, int value, int location, int col_type = INT4OID, bool make_cast = true); + +void free_masked_cursor_stmts(); +void free_masked_prepared_stmts(); +void close_cursor_stmt_as_masked(const char* name); +void unprepare_stmt_as_masked(const char* name); +void set_prepare_stmt_as_masked(const char* name, const masking_result *result); +void set_cursor_stmt_as_masked(const char* name, const masking_result *result); +void flush_cursor_stmt_masking_result(const char* name); +void flush_prepare_stmt_masking_result(const char* name); +bool process_union_masking(Node *union_node, + ParseState *pstate, const Query *query, const policy_set *policy_ids, bool audit_exist); +void process_masking(ParseState *pstate, Query *query, const policy_set *policy_ids, bool audit_exist); +void select_PostParseAnalyze(ParseState *pstate, Query *&query, const policy_set *policy_ids, bool audit_exist); +bool handle_masking(List* targetList, ParseState *pstate, + const policy_set *policy_ids, List* rtable, Node* utilityNode); + +#endif /* MASKING_H_ */ diff --git a/contrib/security_plugin/privileges_audit.cpp b/contrib/security_plugin/privileges_audit.cpp index 04bc52d61..087c5d6ba 100644 --- a/contrib/security_plugin/privileges_audit.cpp +++ b/contrib/security_plugin/privileges_audit.cpp @@ -49,6 +49,25 @@ #define ACCESS_CONTROL_CHECK_ACL_PRIVILIGE(type) \ ((check_acl_privilige_hook == NULL) ? true : check_acl_privilige_hook(type)) +typedef struct AclObjectType { + GrantObjectType grant_type; + PrivObject privi_type; +} AclObjectType; + +static AclObjectType aclobject_infos[] = { + {ACL_OBJECT_COLUMN, O_COLUMN}, + {ACL_OBJECT_RELATION, O_TABLE}, + {ACL_OBJECT_SEQUENCE, O_SEQUENCE}, + {ACL_OBJECT_DATABASE, O_DATABASE}, + {ACL_OBJECT_DOMAIN, O_DOMAIN}, + {ACL_OBJECT_FOREIGN_SERVER, O_SERVER}, + {ACL_OBJECT_FUNCTION, O_FUNCTION}, + {ACL_OBJECT_LANGUAGE, O_LANGUAGE}, + {ACL_OBJECT_NAMESPACE, O_SCHEMA}, + {ACL_OBJECT_TABLESPACE, O_TABLESPACE}, + {ACL_OBJECT_DATA_SOURCE, O_DATA_SOURCE}, +}; + void add_current_path(int objtype, List *fqdn, gs_stl::gs_string *buffer); /* @@ -61,14 +80,17 @@ void add_current_path(int objtype, List *fqdn, gs_stl::gs_string *buffer); * ignore_db: whether ignore database */ void internal_audit_object_str(const policy_set *security_policy_ids, const policy_set *policy_ids, const ListCell *rel, - const names_pair names, int priv_type, const char *priv_name, int objtype, bool is_rolegrant, bool ignore_db) + const names_pair names, int priv_type, const char *priv_name, int objtype, + int target_type, bool is_rolegrant, bool ignore_db) { /* * Note PolicyLabelItem just support table/function/view/column * so that only "all" label will work for other object type */ PolicyLabelItem item; - gen_policy_labelitem(item, rel, objtype); + if (target_type == ACL_TARGET_OBJECT) { + gen_policy_labelitem(item, rel, objtype); + } /* PolicyLabelItem construction will append schema oid by relid */ policy_simple_set policy_result; @@ -213,96 +235,15 @@ bool internal_audit_object_str(const policy_set* security_policy_ids, const poli return is_found; } -/* append audit logs for comment */ -void audit_object(const policy_set *security_policy_ids, const policy_set *policy_ids, const char *relname, - int priv_type, const char *priv_name, int objtype) -{ - switch (objtype) { - case OBJECT_ROLE: - internal_audit_str(security_policy_ids, policy_ids, relname, priv_type, priv_name, O_ROLE); - break; - case OBJECT_USER: - internal_audit_str(security_policy_ids, policy_ids, relname, priv_type, priv_name, O_USER); - break; - case OBJECT_SCHEMA: - internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_SCHEMA); - break; - case OBJECT_SEQUENCE: - internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_SEQUENCE); - break; - case OBJECT_DATABASE: - internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_DATABASE); - break; - case OBJECT_FOREIGN_SERVER: - internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_SERVER); - break; - case OBJECT_FOREIGN_TABLE: - case OBJECT_STREAM: - case OBJECT_TABLE: - internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, - (objtype == OBJECT_TABLE) ? O_TABLE : O_FOREIGNTABLE); - break; - case OBJECT_COLUMN: - internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_COLUMN); - break; - case OBJECT_FUNCTION: - internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_FUNCTION); - break; - case OBJECT_CONTQUERY: - case OBJECT_VIEW: - internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_VIEW); - break; - case OBJECT_INDEX: - internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_INDEX); - break; - case OBJECT_TABLESPACE: - internal_audit_str(policy_ids, policy_ids, relname, priv_type, priv_name, O_TABLESPACE); - break; - default: - break; - } -} - void acl_audit_object(const policy_set *security_policy_ids, const policy_set *policy_ids, const ListCell *rel, - const names_pair names, int priv_type, const char *priv_name, int objtype) + const names_pair names, int priv_type, const char *priv_name, int objtype, int target_type) { - switch (objtype) { - case ACL_OBJECT_COLUMN: - internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_COLUMN); - break; - case ACL_OBJECT_RELATION: - internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_TABLE); - break; - case ACL_OBJECT_SEQUENCE: - internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_SEQUENCE); - break; - case ACL_OBJECT_DATABASE: - internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_DATABASE, - false, true); - break; - case ACL_OBJECT_DOMAIN: - internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_DOMAIN); - break; - case ACL_OBJECT_FOREIGN_SERVER: - internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_SERVER); - break; - case ACL_OBJECT_FUNCTION: - internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_FUNCTION); - break; - case ACL_OBJECT_LANGUAGE: - internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_LANGUAGE); - break; - case ACL_OBJECT_NAMESPACE: - internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_SCHEMA); - break; - case ACL_OBJECT_TABLESPACE: - internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_TABLESPACE); - break; - case ACL_OBJECT_DATA_SOURCE: - internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, O_DATA_SOURCE); - break; - default: - break; + PrivObject type = get_privtype_from_aclobject((GrantObjectType)objtype); + if (type == O_DATABASE) { + internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, type, target_type, + false, true); + } else { + internal_audit_object_str(security_policy_ids, policy_ids, rel, names, priv_type, priv_name, type, target_type); } } @@ -828,37 +769,21 @@ void get_open_cursor_info(PlannedStmt *stmt, char *buff, size_t buff_size) printed_size = snprintf_s(buff, buff_size, buff_size - 1, "%s ", cstmt->portalname); securec_check_ss(printed_size, "\0", "\0"); } - switch (stmt->commandType) { - case CMD_SELECT: { - rc = snprintf_s(buff + printed_size, buff_size - printed_size, buff_size - printed_size - 1, - "FOR SELECT FROM"); - securec_check_ss(rc, "\0", "\0"); - printed_size += rc; - break; - } - case CMD_INSERT: { - rc = snprintf_s(buff + printed_size, buff_size - printed_size, buff_size - printed_size - 1, - "FOR INSERT TO"); - securec_check_ss(rc, "\0", "\0"); - printed_size += rc; - break; - } - case CMD_UPDATE: { - rc = snprintf_s(buff + printed_size, buff_size - printed_size, buff_size - printed_size - 1, - "FOR UPDATE FROM"); - securec_check_ss(rc, "\0", "\0"); - printed_size += rc; - break; - } - case CMD_DELETE: { - rc = snprintf_s(buff + printed_size, buff_size - printed_size, buff_size - printed_size - 1, - "FOR DELETE FROM"); - securec_check_ss(rc, "\0", "\0"); - printed_size += rc; - break; - } - default: - break; - } + + rc = snprintf_s(buff + printed_size, buff_size - printed_size, buff_size - printed_size - 1, + get_cursorinfo(stmt->commandType)); + securec_check_ss(rc, "\0", "\0"); + printed_size += rc; + get_cursor_tables(stmt->rtable, buff, buff_size, printed_size); } + +PrivObject get_privtype_from_aclobject(GrantObjectType acl_type) +{ + for (unsigned int i = 0; i < (sizeof(aclobject_infos) / sizeof(aclobject_infos[0])); ++i) { + if (aclobject_infos[i].grant_type == acl_type) { + return aclobject_infos[i].privi_type; + } + } + return O_UNKNOWN; +} diff --git a/contrib/security_plugin/privileges_audit.h b/contrib/security_plugin/privileges_audit.h index 52e32e410..81bc926f2 100644 --- a/contrib/security_plugin/privileges_audit.h +++ b/contrib/security_plugin/privileges_audit.h @@ -26,13 +26,14 @@ #include "nodes/primnodes.h" #include "nodes/parsenodes.h" #include "gs_policy/gs_vector.h" +#include "gs_policy_object_types.h" #define SET_DB_SCHEMA_TABLE buffer->append(schemaname); \ buffer->push_back('.'); typedef std::pair names_pair; void acl_audit_object(const policy_set *security_policy_ids, const policy_set *policy_ids, const ListCell *rel, - const names_pair names, int priv_type, const char *priv_name, int objtype); + const names_pair names, int priv_type, const char *priv_name, int objtype, int target_type); bool internal_audit_object_str(const policy_set* security_policy_ids, const policy_set* policy_ids, const PolicyLabelItem* item, int priv_type, const char* priv_name, const char* objname = "", bool ignore_db = false); @@ -41,9 +42,7 @@ void internal_audit_str(const policy_set *security_policy_ids, const policy_set void login_object(const policy_set *security_policy_ids, const policy_set *policy_ids, const char *login_str, int priv_type, const char *priv_name); void internal_audit_object_str(const policy_set *security_policy_ids, const policy_set *policy_ids, const ListCell *rel, - const names_pair names, int priv_type, const char *priv_name, int objtype, bool is_rolegrant = false, bool ignore_db = false); -void audit_object(const policy_set *security_policy_ids, const policy_set *policy_ids, - const char *relname, int priv_type, const char *priv_name, int objtype); + const names_pair names, int priv_type, const char *priv_name, int objtype, int target_type = ACL_TARGET_OBJECT, bool is_rolegrant = false, bool ignore_db = false); void audit_table(const policy_set *security_policy_ids, const policy_set *policy_ids, RangeVar *rel, int priv_type, const char *priv_name, int objtype); void alter_table(const policy_set *security_policy_ids, const policy_set *policy_ids, @@ -60,5 +59,6 @@ void destroy_logs(); void get_cursor_tables(List *rtable, char *buff, size_t buff_size, int _printed_size, gs_stl::gs_vector *cursor_objects = nullptr); void get_open_cursor_info(PlannedStmt *stmt, char *buff, size_t buff_size); +PrivObject get_privtype_from_aclobject(GrantObjectType acl_type); #endif /* PRIVILEGES_AUDIT_H_ */ \ No newline at end of file diff --git a/contrib/security_plugin/security_plugin--1.0.sql b/contrib/security_plugin/security_plugin--1.0.sql index 2b0149523..81454b3f8 100644 --- a/contrib/security_plugin/security_plugin--1.0.sql +++ b/contrib/security_plugin/security_plugin--1.0.sql @@ -3,8 +3,8 @@ create or replace function pg_catalog.creditcardmasking(col text,letter char def declare size INTEGER := 4; begin - return CASE WHEN length(col) >= size THEN - REGEXP_REPLACE(left(col, size*(-1)), '[\d+]', letter, 'g') || right(col, size) + return CASE WHEN pg_catalog.length(col) >= size THEN + pg_catalog.REGEXP_REPLACE(pg_catalog.left(col, size*(-1)), '[\d+]', letter, 'g') || pg_catalog.right(col, size) ELSE col end; @@ -16,7 +16,7 @@ declare pos INTEGER := position('@' in col); begin return CASE WHEN pos > 1 THEN - repeat(letter, pos - 1) || substring(col, pos, length(col) - pos +1) + pg_catalog.repeat(letter, pos - 1) || pg_catalog.substring(col, pos, pg_catalog.length(col) - pos +1) ELSE col end; @@ -26,10 +26,10 @@ $$ LANGUAGE plpgsql; create or replace function pg_catalog.fullemailmasking(col text, letter char default 'x') RETURNS text AS $$ declare pos INTEGER := position('@' in col); - dot_pos INTEGER := length(col) - position('.' in reverse(col)) + 1; + dot_pos INTEGER := pg_catalog.length(col) - position('.' in pg_catalog.reverse(col)) + 1; begin return CASE WHEN pos > 2 and dot_pos > pos THEN - repeat(letter, pos - 1) || '@' || repeat(letter, dot_pos - pos - 1) || substring(col, dot_pos, length(col) - dot_pos +1) + pg_catalog.repeat(letter, pos - 1) || '@' || pg_catalog.repeat(letter, dot_pos - pos - 1) || pg_catalog.substring(col, dot_pos, pg_catalog.length(col) - dot_pos +1) ELSE col end; @@ -38,7 +38,7 @@ $$ LANGUAGE plpgsql; create or replace function pg_catalog.alldigitsmasking(col text, letter char default '0') RETURNS text AS $$ begin - return REGEXP_REPLACE(col, '[\d+]', letter, 'g'); + return pg_catalog.REGEXP_REPLACE(col, '[\d+]', letter, 'g'); end; $$ LANGUAGE plpgsql; @@ -46,14 +46,14 @@ create or replace function pg_catalog.shufflemasking(col text) RETURNS text AS $ declare index INTEGER := 0; rd INTEGER; - size INTEGER := length(col); + size INTEGER := pg_catalog.length(col); tmp text := col; res text; begin while size > 0 loop - rd := floor(random() * length(tmp) + 1); - res := res || right(left(tmp, rd), 1); - tmp := left(tmp, rd - 1) || right(tmp, length(tmp) - rd); + rd := pg_catalog.floor(pg_catalog.random() * pg_catalog.length(tmp) + 1); + res := res || pg_catalog.right(pg_catalog.left(tmp, rd), 1); + tmp := pg_catalog.left(tmp, rd - 1) || pg_catalog.right(tmp, pg_catalog.length(tmp) - rd); size := size - 1; END loop; return res; @@ -62,13 +62,13 @@ $$ LANGUAGE plpgsql; create or replace function pg_catalog.randommasking(col text) RETURNS text AS $$ begin - return left(MD5(random()::text), length(col)); + return pg_catalog.left(pg_catalog.MD5(pg_catalog.random()::text), pg_catalog.length(col)); end; $$ LANGUAGE plpgsql; create or replace function pg_catalog.regexpmasking(col text, reg text, replace_text text, pos INTEGER default 0, reg_len INTEGER default -1) RETURNS text AS $$ declare - size INTEGER := length(col); + size INTEGER := pg_catalog.length(col); endpos INTEGER; startpos INTEGER; lstr text; @@ -81,9 +81,9 @@ begin endpos := reg_len + startpos - 1; IF reg_len < 0 THEN endpos := size - 1; END IF; IF reg_len + startpos >= size THEN endpos := size - 1; END IF; - lstr := left(col, startpos); - rstr := right(col, size - endpos - 1); - ltarget := substring(col, startpos+1, endpos - startpos + 1); + lstr := pg_catalog.left(col, startpos); + rstr := pg_catalog.right(col, size - endpos - 1); + ltarget := pg_catalog.substring(col, startpos+1, endpos - startpos + 1); ltarget := pg_catalog.REGEXP_REPLACE(ltarget, reg, replace_text, 'g'); return lstr || ltarget || rstr; end; diff --git a/contrib/sepgsql/relation.cpp b/contrib/sepgsql/relation.cpp index 4375f3190..6270235fb 100644 --- a/contrib/sepgsql/relation.cpp +++ b/contrib/sepgsql/relation.cpp @@ -346,7 +346,7 @@ void sepgsql_relation_drop(Oid relOid) attrList = SearchSysCacheList1(ATTNUM, ObjectIdGetDatum(relOid)); for (i = 0; i < attrList->n_members; i++) { - atttup = &attrList->members[i]->tuple; + atttup = t_thrd.lsc_cxt.FetchTupleFromCatCList(attrList, i); attForm = (Form_pg_attribute)GETSTRUCT(atttup); if (attForm->attisdropped) @@ -360,7 +360,7 @@ void sepgsql_relation_drop(Oid relOid) sepgsql_avc_check_perms(&object, SEPG_CLASS_DB_COLUMN, SEPG_DB_COLUMN__DROP, audit_name, true); pfree(audit_name); } - ReleaseCatCacheList(attrList); + ReleaseSysCacheList(attrList); } } diff --git a/contrib/sql_decoding/CMakeLists.txt b/contrib/sql_decoding/CMakeLists.txt new file mode 100644 index 000000000..6bb2ce49f --- /dev/null +++ b/contrib/sql_decoding/CMakeLists.txt @@ -0,0 +1,11 @@ +#This is the main CMAKE for build all components. +AUX_SOURCE_DIRECTORY(${PROJECT_OPENGS_DIR}/contrib/sql_decoding TGT_sql_decoding_SRC) +set(sql_decoding_DEF_OPTIONS -D_GLIBCXX_USE_CXX11_ABI=0 -DSTREAMPLAN -DPGXC -DENABLE_GSTRACE -D_GNU_SOURCE) +set(sql_decoding_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${LIB_SECURE_OPTIONS} ${CHECK_OPTIONS} -fstack-protector-all) +list(REMOVE_ITEM sql_decoding_COMPILE_OPTIONS -fstack-protector) +set(sql_decoding_LINK_OPTIONS ${LIB_LINK_OPTIONS}) + +add_shared_libtarget(sql_decoding TGT_sql_decoding_SRC "" "${sql_decoding_DEF_OPTIONS}" "${sql_decoding_COMPILE_OPTIONS}" "${sql_decoding_LINK_OPTIONS}") +set_target_properties(sql_decoding PROPERTIES PREFIX "") +install(TARGETS sql_decoding LIBRARY DESTINATION lib/postgresql) + diff --git a/contrib/sql_decoding/logical.conf b/contrib/sql_decoding/logical.conf new file mode 100644 index 000000000..bc09e43a0 --- /dev/null +++ b/contrib/sql_decoding/logical.conf @@ -0,0 +1,2 @@ +wal_level = logical +max_replication_slots = 8 diff --git a/contrib/sql_decoding/logical.control b/contrib/sql_decoding/logical.control new file mode 100644 index 000000000..2f667aa96 --- /dev/null +++ b/contrib/sql_decoding/logical.control @@ -0,0 +1,5 @@ +# roach_api extension +comment = 'sql_decoding wrapper' +default_version = '1.0' +module_pathname = '$libdir/sql_decoding' +relocatable = true diff --git a/contrib/sql_decoding/sql_decoding.cpp b/contrib/sql_decoding/sql_decoding.cpp new file mode 100644 index 000000000..0b61f64c3 --- /dev/null +++ b/contrib/sql_decoding/sql_decoding.cpp @@ -0,0 +1,516 @@ +/* + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * sql_decoding.cpp + * logical decoding output plugin (sql) + * + * + * + * IDENTIFICATION + * contrib/sql_decoding/sql_decoding.cpp + * + * --------------------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "access/sysattr.h" +#include "access/ustore/knl_utuple.h" + +#include "catalog/pg_class.h" +#include "catalog/pg_type.h" + +#include "nodes/parsenodes.h" +#include "replication/logical.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/rel.h" +#include "utils/relcache.h" +#include "utils/syscache.h" +#include "utils/typcache.h" +#include "replication/output_plugin.h" +#include "replication/logical.h" + +PG_MODULE_MAGIC; + +/* These must be available to pg_dlsym() */ +extern "C" void _PG_init(void); +extern "C" void _PG_output_plugin_init(OutputPluginCallbacks* cb); + +static void pg_decode_startup(LogicalDecodingContext* ctx, OutputPluginOptions* opt, bool is_init); +static void pg_decode_shutdown(LogicalDecodingContext* ctx); +static void pg_decode_begin_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn); +static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr commit_lsn); +static void pg_decode_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn); +static void pg_decode_change( + LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation rel, ReorderBufferChange* change); +static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id); + +typedef struct { + MemoryContext context; + bool include_xids; + bool include_timestamp; + bool skip_empty_xacts; + bool xact_wrote_changes; + bool only_local; +} TestDecodingData; + +/* specify output plugin callbacks */ +void _PG_output_plugin_init(OutputPluginCallbacks* cb) +{ + AssertVariableIsOfType(&_PG_output_plugin_init, LogicalOutputPluginInit); + + cb->startup_cb = pg_decode_startup; + cb->begin_cb = pg_decode_begin_txn; + cb->change_cb = pg_decode_change; + cb->commit_cb = pg_decode_commit_txn; + cb->abort_cb = pg_decode_abort_txn; + cb->filter_by_origin_cb = pg_decode_filter; + cb->shutdown_cb = pg_decode_shutdown; +} + +void _PG_init(void) +{ + /* other plugins can perform things here */ +} + +/* initialize this plugin */ +static void pg_decode_startup(LogicalDecodingContext* ctx, OutputPluginOptions* opt, bool is_init = true) +{ + ListCell* option = NULL; + + TestDecodingData *data = (TestDecodingData*)palloc0(sizeof(TestDecodingData)); + data->context = AllocSetContextCreate(ctx->context, + "text conversion context", ALLOCSET_DEFAULT_SIZES); + data->include_xids = true; + data->include_timestamp = false; + data->skip_empty_xacts = false; + data->only_local = true; + + ctx->output_plugin_private = data; + + opt->output_type = OUTPUT_PLUGIN_TEXTUAL_OUTPUT; + + foreach (option, ctx->output_plugin_options) { + DefElem* elem = (DefElem*)lfirst(option); + + Assert(elem->arg == NULL || IsA(elem->arg, String)); + + if (strcmp(elem->defname, "include-xids") == 0) { + /* if option does not provide a value, it means its value is true */ + if (elem->arg == NULL) { + data->include_xids = true; + } else if (!parse_bool(strVal(elem->arg), &data->include_xids)) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname), + errdetail("N/A"), errcause("Wrong input value"), erraction("Input \"on\" or \"off\""))); + } + } else if (strcmp(elem->defname, "include-timestamp") == 0) { + if (elem->arg == NULL) { + data->include_timestamp = true; + } else if (!parse_bool(strVal(elem->arg), &data->include_timestamp)) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname), + errdetail("N/A"), errcause("Wrong input value"), erraction("Input \"on\" or \"off\""))); + } + } else if (strcmp(elem->defname, "skip-empty-xacts") == 0) { + if (elem->arg == NULL) { + data->skip_empty_xacts = true; + } else if (!parse_bool(strVal(elem->arg), &data->skip_empty_xacts)) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname), + errdetail("N/A"), errcause("Wrong input value"), erraction("Input \"on\" or \"off\""))); + } + } else if (strcmp(elem->defname, "only-local") == 0) { + if (elem->arg == NULL) { + data->only_local = true; + } else if (!parse_bool(strVal(elem->arg), &data->only_local)) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname), + errdetail("N/A"), errcause("Wrong input value"), erraction("Input \"on\" or \"off\""))); + } + } else { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("option \"%s\" = \"%s\" is unknown", elem->defname, elem->arg ? strVal(elem->arg) : "(null)"), + errdetail("N/A"), errcause("Wrong input option"), + erraction("Check the product documentation for legal options"))); + } + } +} + +/* cleanup this plugin's resources */ +static void pg_decode_shutdown(LogicalDecodingContext* ctx) +{ + TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private; + + /* cleanup our own resources via memory context reset */ + MemoryContextDelete(data->context); +} + +/* + * Prepare output plugin. + */ +void pg_output_begin(LogicalDecodingContext* ctx, TestDecodingData* data, ReorderBufferTXN* txn, bool last_write) +{ + OutputPluginPrepareWrite(ctx, last_write); + appendStringInfo(ctx->out, "BEGIN %lu", txn->csn); + OutputPluginWrite(ctx, last_write); +} + +/* BEGIN callback */ +static void pg_decode_begin_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn) +{ + TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private; + + data->xact_wrote_changes = false; + if (data->skip_empty_xacts) { + return; + } + pg_output_begin(ctx, data, txn, true); +} + +/* COMMIT callback */ +static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr commit_lsn) +{ + TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private; + + if (data->skip_empty_xacts && !data->xact_wrote_changes) { + return; + } + + OutputPluginPrepareWrite(ctx, true); + appendStringInfoString(ctx->out, "COMMIT"); + appendStringInfo(ctx->out, " (at %s)", timestamptz_to_str(txn->commit_time)); + appendStringInfo(ctx->out, " %lu", txn->csn); + OutputPluginWrite(ctx, true); +} + +/* ABORT callback */ +static void pg_decode_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn) +{ + TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private; + + if (data->skip_empty_xacts && !data->xact_wrote_changes) { + return; + } + OutputPluginPrepareWrite(ctx, true); + if (data->include_xids) { + appendStringInfo(ctx->out, "ABORT %lu", txn->xid); + } else { + appendStringInfoString(ctx->out, "ABORT"); + } + + if (data->include_timestamp) { + appendStringInfo(ctx->out, " (at %s)", timestamptz_to_str(txn->commit_time)); + } + OutputPluginWrite(ctx, true); +} + +static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id) +{ + TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private; + + if (data->only_local && origin_id != InvalidRepOriginId) { + return true; + } + return false; +} + +/* + * Print literal `outputstr' already represented as string of type `typid' + * into stringbuf `s'. + * + * Some builtin types aren't quoted, the rest is quoted. Escaping is done as + * if u_sess->parser_cxt.standard_conforming_strings were enabled. + */ +static void print_literal(StringInfo s, Oid typid, char* outputstr) +{ + const char* valptr = NULL; + + switch (typid) { + case FLOAT4OID: + case FLOAT8OID: + case NUMERICOID: + case INT1OID: + case INT2OID: + case INT4OID: + case INT8OID: + case OIDOID: + + /* NB: We don't care about Inf, NaN et al. */ + appendStringInfoString(s, outputstr); + break; + + case BITOID: + case VARBITOID: + appendStringInfo(s, "B'%s'", outputstr); + break; + + case BOOLOID: + if (strcmp(outputstr, "t") == 0) { + appendStringInfoString(s, "true"); + } else { + appendStringInfoString(s, "false"); + } + break; + + default: + appendStringInfoChar(s, '\''); + for (valptr = outputstr; *valptr; valptr++) { + char ch = *valptr; + + if (SQL_STR_DOUBLE(ch, false)) { + appendStringInfoChar(s, ch); + } + appendStringInfoChar(s, ch); + } + appendStringInfoChar(s, '\''); + break; + } +} + +/* + * Decode tuple into stringinfo. + */ +static void TupleToStringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls) +{ + Assert(tuple != NULL); + if ((tuple->tupTableType == HEAP_TUPLE) && (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data) || + (int)HeapTupleHeaderGetNatts(tuple->t_data, tupdesc) > tupdesc->natts)) { + return; + } + + appendStringInfoChar(s, '('); + /* print all columns individually */ + for (int natt = 0; natt < tupdesc->natts; natt++) { + bool isnull = false; /* column is null? */ + bool typisvarlena = false; + Oid typoutput = 0; /* output function */ + Datum origval = 0; /* possibly toasted Datum */ + + Form_pg_attribute attr = tupdesc->attrs[natt]; /* the attribute itself */ + + if (attr->attisdropped || attr->attnum < 0) { + continue; + } + + /* get Datum from tuple */ + if (tuple->tupTableType == HEAP_TUPLE) { + origval = heap_getattr(tuple, natt + 1, tupdesc, &isnull); + } else { + origval = uheap_getattr((UHeapTuple)tuple, natt + 1, tupdesc, &isnull); + } + + if (skip_nulls && isnull) { + continue; + } + + /* query output function */ + Oid typid = attr->atttypid; /* type of current attribute */ + getTypeOutputInfo(typid, &typoutput, &typisvarlena); + + /* print data */ + if (isnull) { + appendStringInfoString(s, "null"); + } else if (!typisvarlena) { + print_literal(s, typid, OidOutputFunctionCall(typoutput, origval)); + } else { + Datum val = PointerGetDatum(PG_DETOAST_DATUM(origval)); + print_literal(s, typid, OidOutputFunctionCall(typoutput, val)); + } + if (natt < tupdesc->natts - 1) { + appendStringInfoString(s, ", "); + } + } + appendStringInfoChar(s, ')'); +} + +/* + * Decode tuple into stringinfo. + * This function is used for UPDATE or DELETE statements. + */ +static void TupleToStringinfoUpd(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls) +{ + if ((tuple->tupTableType == HEAP_TUPLE) && (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data) || + (int)HeapTupleHeaderGetNatts(tuple->t_data, tupdesc) > tupdesc->natts)) { + return; + } + + bool isFirstAtt = true; + /* print all columns individually */ + for (int natt = 0; natt < tupdesc->natts; natt++) { + Oid typoutput = 0; /* output function */ + Datum origval = 0; /* possibly toasted Datum */ + bool isnull = false; /* column is null? */ + bool typisvarlena = false; + + Form_pg_attribute attr = tupdesc->attrs[natt]; /* the attribute itself */ + + if (attr->attisdropped || attr->attnum < 0) { + continue; + } + + /* get Datum from tuple */ + if (tuple->tupTableType == HEAP_TUPLE) { + origval = heap_getattr(tuple, natt + 1, tupdesc, &isnull); + } else { + origval = uheap_getattr((UHeapTuple)tuple, natt + 1, tupdesc, &isnull); + } + + if (isnull && skip_nulls) { + continue; + } + + if (!isFirstAtt) { + appendStringInfoString(s, " and "); + } else { + isFirstAtt = false; + } + /* print attribute name */ + appendStringInfoString(s, quote_identifier(NameStr(attr->attname))); + appendStringInfoString(s, " = "); + /* query output function */ + Oid typid = attr->atttypid; + getTypeOutputInfo(typid, &typoutput, &typisvarlena); + + /* print data */ + if (isnull) { + appendStringInfoString(s, "null"); + } else if (!typisvarlena) { + print_literal(s, typid, OidOutputFunctionCall(typoutput, origval)); + } else { + Datum val = PointerGetDatum(PG_DETOAST_DATUM(origval)); + print_literal(s, typid, OidOutputFunctionCall(typoutput, val)); + } + } +} + +/* + * Callback for handle decoded tuple. + * Additional info will be added if the tuple is found null. + */ +static void TupleHandler(StringInfo s, TupleDesc tupdesc, ReorderBufferChange* change, bool isHeap, bool isNewTuple) +{ + if (isHeap && isNewTuple) { + if (change->data.tp.newtuple == NULL) { + appendStringInfoString(s, " (no-tuple-data)"); + } else { + TupleToStringinfo(s, tupdesc, &change->data.tp.newtuple->tuple, false); + } + } else if (isHeap && !isNewTuple) { + if (change->data.tp.oldtuple == NULL) { + appendStringInfoString(s, " (no-tuple-data)"); + } else { + TupleToStringinfoUpd(s, tupdesc, &change->data.tp.oldtuple->tuple, true); + } + } else if (!isHeap && isNewTuple) { + if (change->data.utp.newtuple == NULL) { + appendStringInfoString(s, " (no-tuple-data)"); + } else { + TupleToStringinfo(s, tupdesc, (HeapTuple)(&change->data.utp.newtuple->tuple), false); + } + } else { + if (change->data.utp.oldtuple == NULL) { + appendStringInfoString(s, " (no-tuple-data)"); + } else { + TupleToStringinfoUpd(s, tupdesc, (HeapTuple)(&change->data.utp.oldtuple->tuple), true); + } + } +} + + +/* + * Callback for individual changed tuples. + */ +static void pg_decode_change( + LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation relation, ReorderBufferChange* change) +{ + Form_pg_class class_form = NULL; + TupleDesc tupdesc = NULL; + TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private; + u_sess->attr.attr_common.extra_float_digits = 0; + bool isHeap = true; + /* output BEGIN if we haven't yet */ + if (txn != NULL && data->skip_empty_xacts && !data->xact_wrote_changes) { + pg_output_begin(ctx, data, txn, false); + } + data->xact_wrote_changes = true; + + class_form = RelationGetForm(relation); + tupdesc = RelationGetDescr(relation); + /* Avoid leaking memory by using and resetting our own context */ + MemoryContext old = MemoryContextSwitchTo(data->context); + + char *schema = NULL; + char *table = NULL; + schema = get_namespace_name(class_form->relnamespace); + table = NameStr(class_form->relname); + + OutputPluginPrepareWrite(ctx, true); + + switch (change->action) { + case REORDER_BUFFER_CHANGE_INSERT: + case REORDER_BUFFER_CHANGE_UINSERT: + appendStringInfoString(ctx->out, "insert into "); + + appendStringInfoString(ctx->out, quote_qualified_identifier(schema, table)); + if (change->action == REORDER_BUFFER_CHANGE_UINSERT) { + isHeap = false; + } + appendStringInfoString(ctx->out, " values "); + TupleHandler(ctx->out, tupdesc, change, isHeap, true); + break; + case REORDER_BUFFER_CHANGE_UPDATE: + case REORDER_BUFFER_CHANGE_UUPDATE: + + appendStringInfoString(ctx->out, "delete from "); + appendStringInfoString(ctx->out, quote_qualified_identifier(schema, table)); + if (change->action == REORDER_BUFFER_CHANGE_UUPDATE) { + isHeap = false; + } + appendStringInfoString(ctx->out, " where "); + TupleHandler(ctx->out, tupdesc, change, isHeap, false); + appendStringInfoChar(ctx->out, ';'); + appendStringInfoString(ctx->out, "insert into "); + appendStringInfoString(ctx->out, quote_qualified_identifier(schema, table)); + appendStringInfoString(ctx->out, " values "); + TupleHandler(ctx->out, tupdesc, change, isHeap, true); + + break; + + case REORDER_BUFFER_CHANGE_DELETE: + case REORDER_BUFFER_CHANGE_UDELETE: + appendStringInfoString(ctx->out, "delete from "); + + appendStringInfoString(ctx->out, quote_qualified_identifier(schema, table)); + if (change->action == REORDER_BUFFER_CHANGE_UDELETE) { + isHeap = false; + } + appendStringInfoString(ctx->out, " where "); + + TupleHandler(ctx->out, tupdesc, change, isHeap, false); + break; + default: + Assert(false); + } + + appendStringInfoChar(ctx->out, ';'); + MemoryContextSwitchTo(old); + MemoryContextReset(data->context); + + OutputPluginWrite(ctx, true); +} diff --git a/contrib/test_decoding/test_decoding.cpp b/contrib/test_decoding/test_decoding.cpp index 48f2b2dfd..e3f1cdf90 100644 --- a/contrib/test_decoding/test_decoding.cpp +++ b/contrib/test_decoding/test_decoding.cpp @@ -36,21 +36,15 @@ PG_MODULE_MAGIC; extern "C" void _PG_init(void); extern "C" void _PG_output_plugin_init(OutputPluginCallbacks* cb); -typedef struct { - MemoryContext context; - bool include_xids; - bool include_timestamp; - bool skip_empty_xacts; - bool xact_wrote_changes; - bool only_local; -} TestDecodingData; - static void pg_decode_startup(LogicalDecodingContext* ctx, OutputPluginOptions* opt, bool is_init); static void pg_decode_shutdown(LogicalDecodingContext* ctx); static void pg_decode_begin_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn); static void pg_output_begin( - LogicalDecodingContext* ctx, TestDecodingData* data, ReorderBufferTXN* txn, bool last_write); + LogicalDecodingContext* ctx, PluginTestDecodingData* data, ReorderBufferTXN* txn, bool last_write); static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr commit_lsn); +static void pg_decode_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn); +static void pg_decode_prepare_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn); + static void pg_decode_change( LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation rel, ReorderBufferChange* change); static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id); @@ -69,6 +63,8 @@ void _PG_output_plugin_init(OutputPluginCallbacks* cb) cb->begin_cb = pg_decode_begin_txn; cb->change_cb = pg_decode_change; cb->commit_cb = pg_decode_commit_txn; + cb->abort_cb = pg_decode_abort_txn; + cb->prepare_cb = pg_decode_prepare_txn; cb->filter_by_origin_cb = pg_decode_filter; cb->shutdown_cb = pg_decode_shutdown; } @@ -77,84 +73,33 @@ void _PG_output_plugin_init(OutputPluginCallbacks* cb) static void pg_decode_startup(LogicalDecodingContext* ctx, OutputPluginOptions* opt, bool is_init) { ListCell* option = NULL; - TestDecodingData* data = NULL; + PluginTestDecodingData* data = NULL; - data = (TestDecodingData*)palloc0(sizeof(TestDecodingData)); + data = (PluginTestDecodingData*)palloc0(sizeof(PluginTestDecodingData)); data->context = AllocSetContextCreate(ctx->context, "text conversion context", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); data->include_xids = true; - data->include_timestamp = false; + data->include_timestamp = true; data->skip_empty_xacts = false; data->only_local = true; + data->tableWhiteList = NIL; ctx->output_plugin_private = data; opt->output_type = OUTPUT_PLUGIN_TEXTUAL_OUTPUT; foreach (option, ctx->output_plugin_options) { - DefElem* elem = (DefElem*)lfirst(option); - - Assert(elem->arg == NULL || IsA(elem->arg, String)); - - if (strcmp(elem->defname, "include-xids") == 0) { - /* if option does not provide a value, it means its value is true */ - if (elem->arg == NULL) - data->include_xids = true; - else if (!parse_bool(strVal(elem->arg), &data->include_xids)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname))); - } else if (strcmp(elem->defname, "include-timestamp") == 0) { - if (elem->arg == NULL) - data->include_timestamp = true; - else if (!parse_bool(strVal(elem->arg), &data->include_timestamp)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname))); - } else if (strcmp(elem->defname, "force-binary") == 0) { - bool force_binary = false; - - if (elem->arg == NULL) - continue; - else if (!parse_bool(strVal(elem->arg), &force_binary)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname))); - - if (force_binary) - opt->output_type = OUTPUT_PLUGIN_BINARY_OUTPUT; - } else if (strcmp(elem->defname, "skip-empty-xacts") == 0) { - - if (elem->arg == NULL) - data->skip_empty_xacts = true; - else if (!parse_bool(strVal(elem->arg), &data->skip_empty_xacts)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname))); - } else if (strcmp(elem->defname, "only-local") == 0) { - - if (elem->arg == NULL) - data->only_local = true; - else if (!parse_bool(strVal(elem->arg), &data->only_local)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname))); - } else { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg( - "option \"%s\" = \"%s\" is unknown", elem->defname, elem->arg ? strVal(elem->arg) : "(null)"))); - } + ParseDecodingOptionPlugin(option, data, opt); } } /* cleanup this plugin's resources */ static void pg_decode_shutdown(LogicalDecodingContext* ctx) { - TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private; + PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private; /* cleanup our own resources via memory context reset */ MemoryContextDelete(data->context); @@ -163,16 +108,18 @@ static void pg_decode_shutdown(LogicalDecodingContext* ctx) /* BEGIN callback */ static void pg_decode_begin_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn) { - TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private; + PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private; data->xact_wrote_changes = false; - if (data->skip_empty_xacts) + if (data->skip_empty_xacts) { return; + } pg_output_begin(ctx, data, txn, true); } -static void pg_output_begin(LogicalDecodingContext* ctx, TestDecodingData* data, ReorderBufferTXN* txn, bool last_write) +static void pg_output_begin(LogicalDecodingContext* ctx, PluginTestDecodingData* data, ReorderBufferTXN* txn, + bool last_write) { OutputPluginPrepareWrite(ctx, last_write); if (data->include_xids) @@ -185,7 +132,7 @@ static void pg_output_begin(LogicalDecodingContext* ctx, TestDecodingData* data, /* COMMIT callback */ static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr commit_lsn) { - TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private; + PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private; if (data->skip_empty_xacts && !data->xact_wrote_changes) return; @@ -203,65 +150,55 @@ static void pg_decode_commit_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* OutputPluginWrite(ctx, true); } +/* ABORT callback */ +static void pg_decode_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn) +{ + PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private; + + if (data->skip_empty_xacts && !data->xact_wrote_changes) + return; + + OutputPluginPrepareWrite(ctx, true); + if (data->include_xids) + appendStringInfo(ctx->out, "ABORT %lu", txn->xid); + else + appendStringInfoString(ctx->out, "ABORT"); + + if (data->include_timestamp) + appendStringInfo(ctx->out, " (at %s)", timestamptz_to_str(txn->commit_time)); + + OutputPluginWrite(ctx, true); +} + +/* PREPARE callback */ +static void pg_decode_prepare_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn) +{ + PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private; + + if (data->skip_empty_xacts && !data->xact_wrote_changes) + return; + + OutputPluginPrepareWrite(ctx, true); + if (data->include_xids) + appendStringInfo(ctx->out, "PREPARE %lu", txn->xid); + else + appendStringInfoString(ctx->out, "PREPARE"); + + if (data->include_timestamp) + appendStringInfo(ctx->out, " (at %s)", timestamptz_to_str(txn->commit_time)); + + OutputPluginWrite(ctx, true); +} + + static bool pg_decode_filter(LogicalDecodingContext* ctx, RepOriginId origin_id) { - TestDecodingData* data = (TestDecodingData*)ctx->output_plugin_private; + PluginTestDecodingData* data = (PluginTestDecodingData*)ctx->output_plugin_private; if (data->only_local && origin_id != InvalidRepOriginId) return true; return false; } - -/* - * Print literal `outputstr' already represented as string of type `typid' - * into stringbuf `s'. - * - * Some builtin types aren't quoted, the rest is quoted. Escaping is done as - * if u_sess->parser_cxt.standard_conforming_strings were enabled. - */ -static void print_literal(StringInfo s, Oid typid, char* outputstr) -{ - const char* valptr = NULL; - - switch (typid) { - case INT1OID: - case INT2OID: - case INT4OID: - case INT8OID: - case OIDOID: - case FLOAT4OID: - case FLOAT8OID: - case NUMERICOID: - /* NB: We don't care about Inf, NaN et al. */ - appendStringInfoString(s, outputstr); - break; - - case BITOID: - case VARBITOID: - appendStringInfo(s, "B'%s'", outputstr); - break; - - case BOOLOID: - if (strcmp(outputstr, "t") == 0) - appendStringInfoString(s, "true"); - else - appendStringInfoString(s, "false"); - break; - - default: - appendStringInfoChar(s, '\''); - for (valptr = outputstr; *valptr; valptr++) { - char ch = *valptr; - - if (SQL_STR_DOUBLE(ch, false)) - appendStringInfoChar(s, ch); - appendStringInfoChar(s, ch); - } - appendStringInfoChar(s, '\''); - break; - } -} - static void tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls) { if (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data)) @@ -337,11 +274,11 @@ static void tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple else if (typisvarlena && VARATT_IS_EXTERNAL_ONDISK_B(origval)) appendStringInfoString(s, "unchanged-toast-datum"); else if (!typisvarlena) - print_literal(s, typid, OidOutputFunctionCall(typoutput, origval)); + PrintLiteral(s, typid, OidOutputFunctionCall(typoutput, origval)); else { Datum val; /* definitely detoasted Datum */ val = PointerGetDatum(PG_DETOAST_DATUM(origval)); - print_literal(s, typid, OidOutputFunctionCall(typoutput, val)); + PrintLiteral(s, typid, OidOutputFunctionCall(typoutput, val)); } } } @@ -351,13 +288,12 @@ static void tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple static void pg_decode_change( LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation relation, ReorderBufferChange* change) { - TestDecodingData* data = NULL; + PluginTestDecodingData* data = NULL; Form_pg_class class_form; TupleDesc tupdesc; MemoryContext old; - data = (TestDecodingData*)ctx->output_plugin_private; - u_sess->attr.attr_common.extra_float_digits = 0; + data = (PluginTestDecodingData*)ctx->output_plugin_private; /* output BEGIN if we haven't yet */ if (data->skip_empty_xacts && !data->xact_wrote_changes) { @@ -371,12 +307,18 @@ static void pg_decode_change( /* Avoid leaking memory by using and resetting our own context */ old = MemoryContextSwitchTo(data->context); + char *schema = get_namespace_name(class_form->relnamespace); + char *table = NameStr(class_form->relname); + if (data->tableWhiteList != NIL && !CheckWhiteList(data->tableWhiteList, schema, table)) { + (void)MemoryContextSwitchTo(old); + MemoryContextReset(data->context); + return; + } + OutputPluginPrepareWrite(ctx, true); appendStringInfoString(ctx->out, "table "); - appendStringInfoString(ctx->out, - quote_qualified_identifier( - get_namespace_name(get_rel_namespace(RelationGetRelid(relation))), NameStr(class_form->relname))); + appendStringInfoString(ctx->out, quote_qualified_identifier(schema, table)); appendStringInfoString(ctx->out, ":"); switch (change->action) { diff --git a/dependency.xml b/dependency.xml index ddd2b007e..a1ac1995e 100644 --- a/dependency.xml +++ b/dependency.xml @@ -5,10 +5,10 @@ Generic ${offering} - ${BVersion} - Y + ${version} + ${snapshot} - Y + N @@ -17,66 +17,61 @@ - + BVersion Generic Huawei Secure C Huawei Secure C V100R001C01SPC010B002 - Y - - + + BVersion Generic DOPRA SSP - DOPRA SSP V300R021C00SPC020B100 + DOPRA SSP V300R021C10SPC010B100 - Y - + dopra_ssp - - + + BVersion Generic - Cloud Compiler JDK - Cloud Compiler JDK V100R003C30SPC300B001 + BiSheng JDK Enterprise + BiSheng JDK Enterprise 2.1.0.320.B001 - Y - + huaweijdk - - + + BVersion Generic KMC - KMC 21.0.0.B003 + KMC 21.1.0.B006 - Y - Y - + - + \ No newline at end of file diff --git a/distribute_errmsg.txt b/distribute_errmsg.txt new file mode 100644 index 000000000..1d9d8572a --- /dev/null +++ b/distribute_errmsg.txt @@ -0,0 +1,4964 @@ +/* Autogenerated file, please don't edit */ + + +GAUSS-00011: "hash table corrupted" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00017: "unrecognized node type: %d" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00020: "cache lookup failed for function %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00042: "cache lookup failed for relation %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00108: "relation '%s' does not exist" +SQLSTATE: 42P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00119: "cache lookup failed for attribute %d of relation %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00238: "cache lookup failed for aggregate %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00265: "Un-support feature" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00298: "PGXC Node %s: object not defined" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00338: "cache lookup failed for operator %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00478: "out of memory" +SQLSTATE: 53200 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00498: "%s is not a valid encoding name" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00517: "database '%s' is being accessed by other users" +SQLSTATE: 55006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00601: "Permission denied." +SQLSTATE: 42501 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00649: "Permission denied." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00674: "No Datanode defined in cluster" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00702: "relation '%s' already exists" +SQLSTATE: 42P07 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00713: "Un-support feature" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00810: "cache lookup failed for relation %u" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00816: "cache lookup failed for partition %u" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00935: "cache lookup failed for relation %u" +SQLSTATE: 02000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-00998: "%s" +SQLSTATE: 42000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01087: "cache lookup failed for namespace %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01139: "prepared statement '%s' does not exist" +SQLSTATE: 26000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01259: "%s" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01406: "unrecognized heap_lock_tuple status: %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01484: "parameter '%s' must be a list of extension names" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01580: "Postgres-XC does not support this distribution type yet" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01611: "no local indexes found for partition %s" +SQLSTATE: 42809 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01635: "Unexpected node type: %d" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01645: "column store doesn't support backward scan" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01646: "Distributed key column can't be updated in current version" +SQLSTATE: 42P10 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01692: "cannot insert into view '%s'" +SQLSTATE: 55000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01693: "cannot update view '%s'" +SQLSTATE: 55000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01694: "cannot delete from view '%s'" +SQLSTATE: 55000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01713: "PGXC Group %s: group not defined" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01733: "cache lookup failed for partition %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01738: "inserted partition key does not map to any table partition" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-01900: "operator with OID %u does not exist" +SQLSTATE: 42883 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02115: "out of memory" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02116: "Incorrect redistribution operation" +SQLSTATE: 42809 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02117: "a list of nodes should have at least one node" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02119: "Error: no such supported locator type: %c\n" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02120: "cache lookup failed for pgxc_group %s" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02125: "Node(%d) has no buckets on it." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02128: "PGXC Group %s: group already defined" +SQLSTATE: 42710 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02129: "PGXC node %s: only Datanodes can be group members" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02135: "PGXC Group %s: group not defined" +SQLSTATE: 42710 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02136: "PGXC Group %s: group not defined" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02137: "No options specified" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02138: "port value is out of range" +SQLSTATE: 22003 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02141: "port1 value is out of range" +SQLSTATE: 22003 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02143: "control_port1 value is out of range" +SQLSTATE: 22003 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02144: "type value is incorrect, specify 'coordinator or 'datanode'" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02145: "incorrect option: %s" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02146: "PGXC node %s: cannot be a primary node, it has to be a Datanode" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02147: "PGXC node %s: cannot be a preferred node, it has to be a Datanode" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02148: "PGXC node %s: Node type not specified" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02149: "Please choose different node name." +SQLSTATE: 42P17 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02152: "Node name '%s' is too long" +SQLSTATE: 42P17 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02153: "Failed to create coordinator, the maximum number of coordinators %d specified by 'max_coordinators' has been reached." +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02154: "Failed to create datanode, the maximum number of datanodes %d specified by 'max_datanodes' has been reached." +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02155: "PGXC node %s: two nodes cannot be primary" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02157: "cache lookup failed for object %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02158: "PGXC node %s: cannot alter Coordinator to Datanode" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02159: "PGXC node %s: cannot alter Datanode to Coordinator" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02161: "PGXC Node %s: cannot drop local node" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02177: "pool manager only supports UNIX socket" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02178: "unexpected EOF within message length word:%m" +SQLSTATE: 08P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02179: "invalid message length" +SQLSTATE: 08P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02180: "incomplete message from client:%m" +SQLSTATE: 08P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02181: "pooler failed to send res: %m" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02182: "could not receive data from client: %m" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02183: "incomplete message from client" +SQLSTATE: 08P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02184: "unexpected message code" +SQLSTATE: 08P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02186: "Unexpected response from the Datanodes for 'c' message, current request type %d" +SQLSTATE: XX001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02188: "Unexpected response from the Datanodes for 'T' message, current request type %d" +SQLSTATE: XX001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02190: "Unexpected response from the Datanodes for 'G' message, current request type %d" +SQLSTATE: XX001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02191: "Unexpected response from the Datanodes for 'H' message, current request type %d" +SQLSTATE: XX001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02192: "Unexpected response from the Datanodes for 'd' message, current request type %d" +SQLSTATE: XX001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02193: "Unexpected response from the Datanodes for 'D' message, current request type %d" +SQLSTATE: XX001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02195: "Unexpected response from Datanode" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02196: "Failed to read response from Datanodes Detail: %s\n" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02202: "Failed to PREPARE the transaction on one or more nodes" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02209: "Error while running COPY" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02210: "could not obtain connection from pool" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02211: "cannot run transaction to remote nodes during recovery" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02212: "Failed to get next transaction ID" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02215: "Unexpected response from Datanode %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02217: "Failed to read response from Datanodes when ending query" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02218: "Failed to close Datanode cursor" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02219: "Number of user-supplied parameters do not match the number of remote parameters" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02220: "cannot run EXECUTE DIRECT with utility inside a transaction block" +SQLSTATE: 25001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02221: "Could not begin transaction on Datanodes" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02223: "Could not begin transaction on coordinators" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02229: "NULL junk attribute" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02232: "pgxc_pool_reload cannot run inside a transaction block" +SQLSTATE: 25001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02234: "must define Database name or user name" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02236: "The CREATE BARRIER PREPARE message is expected to arrive at a Coordinator from another Coordinator" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02237: "The CREATE BARRIER END message is expected to arrive at a Coordinator from another Coordinator" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02238: "The CREATE BARRIER EXECUTE message is expected to arrive from a Coordinator" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02239: "Failed to send CREATE BARRIER PREPARE request to the node" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02242: "Failed to send CREATE BARRIER EXECUTE request to the node" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02506: "%s" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02610: "conversion between %s and %s is not supported" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02611: "out of memory" +SQLSTATE: 54000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02614: "invalid encoding name '%s'" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02630: "inserted partition key does not map to any table partition" +SQLSTATE: 22003 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-02789: "could not create file '%s': %m" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-03575: "could not write file '%s': %m" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-03736: "could not open relation with OID %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04001: "invalid option '%s'" +SQLSTATE: HV00D +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04018: "could not translate host name '%s' to address: %s" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04048: "out of memory" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04050: "malloc failed" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04265: "delete or update failed because lock conflict" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04313: "can't get old group buckets." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04318: "new node group must contain different number of nodes with before!" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04320: "Failed to receive GTM commit transaction response." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04321: "Failed to receive GTM commit transaction response after %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04333: "pooler: invalid cn/dn node number, input cn: %d, dn: %d; current cn: %d, dn: %d" +SQLSTATE: 08006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04336: "pooler: Failed to duplicate fd, error: %s" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04339: "pooler: Failed to reset agent!" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04528: "pg_pool_validate cannot run inside a transaction block" +SQLSTATE: 25001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04529: "Failed to send query ID to %s while sending query ID with sync" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04530: "Failed to read response from Datanodes while sending query ID with sync. Detail: %s\n" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04531: "Unexpected response from %s while sending query ID with sync" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04532: "Could not begin transaction on %s" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04535: "Failed to read response from DN %u when ending query" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04536: "Unexpected response from DN %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04538: "The relation %s is invalid because of cluster resize, please quit current session and it will be automaticly dropped." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04622: "Password can't contain more than %d characters." +SQLSTATE: 28P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04774: "Snapshot too old." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04779: "The relation '%s' has no distribute type." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04921: "Can not find job id %d in system table pg_job." +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04936: "All 32768 jobids have alloc, and there is no free jobid" +SQLSTATE: 55000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-04942: "Parameter can not be null." +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05215: "message len is invalid." +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05216: "Modulo value: %d out of range %d.\n" +SQLSTATE: 22003 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05218: "Can't get group member" +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05219: "you can only take append_mode = on, off, refresh, read_only, end_catchup" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05220: "Alter-Table set append mode should have 'rel_cn_oid' set together" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05221: "Alter-Table set rel_cn_oid should have 'append_mode' set together" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05222: "Proc redistribution only can be set during data redistribution time" +SQLSTATE: D0011 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05223: "Proc redistribution only can be reset during data redistribution time" +SQLSTATE: D0011 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05224: "Get buckets failed.reason:the buckets number(%d) is not correct(%d)." +SQLSTATE: D0011 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05225: "group_members is null for tuple %u" +SQLSTATE: 22004 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05226: "PGXC Group %u: group not defined" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05228: "Can not find redistributed source group with in_redistribution 'y'." +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05229: "cache lookup failed for node group with oid %u" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05230: "must be system admin or createdb role to create cluster node groups" +SQLSTATE: 42501 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05231: "NodeGroup name %s can not be preserved group name" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05232: "Don't support logic cluster in multi_standby mode." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05233: "PGXC node %s is already specified: duplicate Datanodes is not allowed in node-list" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05234: "Can not create logic cluster group, create installation group first." +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05235: "installation node group or source node group not found" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05236: "Do not support logic cluster in coexistence with common node group!" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05237: "CREATE NODE GROUP ... DISTRIBUTE FROM can only be executed in maintenance mode." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05238: "group_name can not be NULL " +SQLSTATE: XX005 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05239: "The node group name can not be same as redistribution node group name!" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05240: "The node group %s has not any nodes." +SQLSTATE: D0011 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05241: "Some nodes have been allocated to logic cluster!" +SQLSTATE: 53000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05242: "PGXC Group %s: some tables is distributed in installation group,can not create logic cluster." +SQLSTATE: 2BP01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05243: "Do not allow to convert installation group to logic cluster when other node group exists." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05244: "PGXC Group %s is not installation group." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05245: "The installation group has no members." +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05246: "Do not allow to convert installation group to logic clusterwhen other node group exists." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05247: "PGXC Group %s' buckets can not be null" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05248: "must have sysadmin or createdb or alter privilege to resize cluster node groups" +SQLSTATE: 42501 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05249: "No destgroup in resize process" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05250: "NodeGroup name %s is invalid." +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05251: "PGXC GROUP %s: do not support add nodes to installation node group." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05252: "PGXC node %s: already exist in node group %s or duplicate in nodelist." +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05253: "PGXC GROUP %s: do not support delete nodes from installation node group." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05254: "PGXC node %s: does not exist in node group %s or duplicate in nodelist." +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05255: "must have sysadmin or createdb or alter privilege to alter cluster node groups" +SQLSTATE: 42501 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05256: "Alter node group can only be executed in maintenance mode." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05257: "must have sysadmin or createdb or drop privilege to remove cluster node groups" +SQLSTATE: 42501 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05258: "cannot drop '%s' because other objects depend on it" +SQLSTATE: 2BP01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05259: "Do not support distribute from clause for non-logic cluster group!" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05260: "DROP NODE GROUP ... DISTRIBUTE FROM can only be executed in maintenance mode." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05261: "PGXC Group %s: group is not redistributed distination group, can not be dropped" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05262: "PGXC Group %s: group is installation group, can not be dropped" +SQLSTATE: 2BP01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05263: "cannot drop '%s' because at least one role %u depend on it" +SQLSTATE: 2BP01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05265: "default_storage_nodegroup %s not defined." +SQLSTATE: 42710 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05266: "Bucketmap is not found with given groupoid %u" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05267: "cache lookup failed on node group %u" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05268: "Connection to database failed: %s" +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05269: "execute statement: %s failed: %s" +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05270: "PQtuples num is invalid : %d" +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05271: "fail to get tables in database %s for query remain table" +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05272: "can not find node group by this node list." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05273: "Invalid null pointer attribute for gs_get_nodegroup_tablecount()" +SQLSTATE: 42P24 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05275: "count is invalid, count:%d, tmpCount:%d" +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05277: "PGXC node %s: cannot be a central node, it has to be a Coordinator" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05278: "the replication type should be multi-standby, now is %d" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05280: "PGXC Node %s(host = %s, port = %d, sctp_port = %d, control_port = %d): object already defined" +SQLSTATE: 42710 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05281: "Datanode %s: can not be central node." +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05282: "PGXC node %s is central node already." +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05283: "parameter requires a Boolean value" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05284: "PGXC Node %s: node is in logic cluster" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05286: "Write to replicated table returned different results from the Datanodes on current DN:%s and previous DN:%s." +SQLSTATE: XX001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05287: "Unexpected TUPDESC response from Datanode" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05288: "Failed to send queryid to %s" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05289: "Failed to send queryid to %s before PREPARE command" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05293: "Can not find location info for relation oid: %u" +SQLSTATE: 22004 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05294: "foreignPrivateList is NULL" +SQLSTATE: XX005 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05295: "Could not send user pl to CN of the compute pool: %s." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05298: "FATAL state of connection to datanode %u" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05299: "No coordinator nodes defined in cluster" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05302: "Could not begin transaction on Coordinator nodes" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05305: "TUPDESC message has not been received before DATAROW message from %s" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05307: "Unsupport DML two phase commit under gtm free mode." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05309: "Could not begin transaction on Datanode." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05310: "%s.%s not found when analyze fetching global statistics." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05311: "Fetch statistics from myself is unexpected. Maybe switchover happened." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05312: "cooperation analysis: please update to the same version" +SQLSTATE: 08P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05313: "unrecognize LOCKMODE type." +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05314: "invalid address for the compute pool: %s" +SQLSTATE: XX005 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05315: "Failed to send queryid to %s before COMMIT command(1PC)" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05316: "failed to notify node %u to commit" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05317: "failed to receice response from node %u after notify commit" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05318: "failed to send commit csn to node %u" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05319: "Failed to get valid node id from node definitions" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05320: "invalid node_list, coor_num:%d, datanode_num:%d" +SQLSTATE: 22026 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05323: "invalid input/output buffer in node handle" +SQLSTATE: 53200 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05331: "replication type is invalid in PGXCNodeGetNodeId (nodeoid = %u, node_type = %c" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05332: "Failed to connect to the compute pool. See log file for more details." +SQLSTATE: 08006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05333: "pooler: Failed to create agent, number of agent reaches MaxAgentCount: %d" +SQLSTATE: 53300 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05334: "agent cannot alloc memory." +SQLSTATE: 53200 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05336: "pooler: Failed to init agent, number of agent reaches MaxAgentCount: %d" +SQLSTATE: 53300 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05338: "pooler: Failed to get_nodeInfo_from_matric: needCreateNodeArray is null." +SQLSTATE: 01000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05339: "pooler: Failed to get_slave_datanode_oid: can't find slave nodes." +SQLSTATE: 01000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05344: "pooler: failed to create connections in parallel mode, due to failover, pending" +SQLSTATE: 08006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05345: "pooler: failed to create connections in parallel mode, Error Message: %s" +SQLSTATE: 08006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05346: "pooler: agent_release_connections: agent is null" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05347: "must be system admin or operator admin in operation mode to manage pooler" +SQLSTATE: 42501 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05348: "pg_pool_ping cannot run inside a transaction block" +SQLSTATE: 25001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-05559: "null value not allowed" +SQLSTATE: 22004 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-06639: "Unsupported node type %s to check need stream setup for recursive union" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-06649: "Could not find globle planner info when make simple remote query." +SQLSTATE: 22004 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-06650: "Can not add stream operator on to parameterize plan." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-06651: "mark_stream_unsupport." +SQLSTATE: 0A100 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-06682: "rule '%u' does not exist" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-06911: "current transaction is aborted, commands ignored until end of transaction block, firstChar[%c]" +SQLSTATE: 25P02 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-06915: "inserted distribution key does not map to any datanode" +SQLSTATE: 42P29 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-07073: "Unsupport '%s' command during online expansion on '%s'" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-07136: "INSERT ON DUPLICATE KEY UPDATE must have an transformed InsertStmt query." +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-07171: "can't initialize index scans using unusable local index '%s'" +SQLSTATE: XX002 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-07306: "unrecognize data type %u." +SQLSTATE: 42P38 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-07376: "Invalid interval string for 'partition_interval' option" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-07377: "Invalid interval range for 'partition_interval' option" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-07851: "Unsupport distribute type." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-07894: "There is an IO error when remote read CU in cu_id %u of relation %s file %s offset %lu. slotId %d, column '%s' " +SQLSTATE: 58030 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08220: "Archived thread shut down." +SQLSTATE: OP003 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08221: "Wait archived timeout." +SQLSTATE: OP003 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08223: "must be system admin to clean pool connections" +SQLSTATE: 42501 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08327: "Fail to generate plan" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08329: "consumer_num_datanodes should not be zero" +SQLSTATE: 22012 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08331: "attnum overflow the length of subplan targetlist" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08332: "target list is too short" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08473: "Failed to reload searchet because libarary file of searchlet KNNSearcher.so haven't loaded on this node: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08474: "Failed to call Searchlet_configVerify, ret: %d, searchletId: %d, service_name: %s, searchlet_config_name: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08475: "Failed to call Searchlet_reload, ret: %d, searchletId: %d, service_name: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08476: "Failed to call Searchlet_reloadAlgorithm, ret: %d, searchletId: %d, service_name: %s, searchlet_config_name: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08477: "Failed to add vector because libarary file of searchlet KNNSearcher.so haven't loaded on this node: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08478: "Failed to commit vector because libarary file of searchlet KNNSearcher.so haven't loaded on this node: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08479: "Failed soft commit searchletId: %d, service_name: %s, ret: %d." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08480: "Failed to clean vector because libarary file of searchlet KNNSearcher.so haven't loaded on this node: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08481: "Failed to search long feature because libarary file of searchlet KNNSearcher.so haven't loaded on this node: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08482: "Parameter should not be null." +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08483: "Failed to search short feature because libarary file of searchlet KNNSearcher.so haven't been loaded on this node: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08484: "Parameter serviceTableName or target_id or target_vector can not be null." +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08485: "Failed to search short feature because vector data haven't been loaded to searchlet, tablename: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08486: "There is no attribute of long_vector: %s or idxkey: %s in service table: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08487: "Call Searchlet_query return fail on node %s, ret: %d." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08488: "Failed to get status for gpu loading vector because libarary file of searchlet KNNSearcher.so haven't been loaded on this node: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08489: "Failed to get algorithm info because libarary file of searchlet KNNSearcher.so haven't loaded on this node: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08490: "Failed to get resource info because libarary file of searchlet KNNSearcher.so haven't loaded on this node: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08491: "Failed to get detail info because libarary file of searchlet KNNSearcher.so haven't loaded on this node: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08492: "Failed to get attribute info because libarary file of searchlet KNNSearcher.so haven't loaded on this node: %s." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08493: "The relation %s have doesn't exist." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08494: "The operation is not recommended, which will trigger the streaming operator." +SQLSTATE: OP001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08495: "Failed to get expand_search_ratio by algorithm name: %s." +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08496: "Length of searchserver config path: %s exceed the max length %d." +SQLSTATE: 53500 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08497: "Search server config path have not been initialized." +SQLSTATE: D0014 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08498: "Failed to get algorithm from searchlet, ret: %d." +SQLSTATE: 42P17 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08499: "Failed to get resource info from searchlet, ret: %d." +SQLSTATE: 42P17 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08500: "Failed to get searchlet detail info, ret: %d." +SQLSTATE: 42P17 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08501: "Failed to get length of short vector according to algoname from searchlet, algoname: %s." +SQLSTATE: 42P17 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08502: "Failed to init SearchServer, ret: %d, searchserver_config_name: %s, searchlet_config_name: %s." +SQLSTATE: D0014 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08503: "The service table: %s doesn't exist in current schema: %s." +SQLSTATE: 42P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08504: "Failed to create searchlet, searchletId: %d, service_name: %s, searchlet_config_name: %s, algorithmName: %s." +SQLSTATE: OP001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08505: "Failed add vector to searchlet, service_name: %s, searchletId: %d, ret: %d." +SQLSTATE: OP001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08506: "Failed add model info to searchlet, ret: %d, searchletId: %d, algoname: %s." +SQLSTATE: OP001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08507: "Failed to calculate distance according to long vector, ret: %d." +SQLSTATE: OP001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08508: "Failed to calculate distance, serviceTableName: %s, ret: %d." +SQLSTATE: OP001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08509: "Failed to do cstore btree index scan because the relation: %s has no index." +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08510: "Failed to find index relation according to specified index colums." +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08511: "Fail to get rightop. righttop should never be null, please check carefully!" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08512: "Permission for current user: %s to operate service table: %s." +SQLSTATE: 42501 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08513: "Dynamic library file %s does not exist." +SQLSTATE: 58P03 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08514: "Call dlopen to load library file %s failed. error: %s" +SQLSTATE: 58P03 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08515: "Fail to find vectorized function" +SQLSTATE: XX005 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08516: "Failed to search vector long feature because libarary file of searchlet KNNSearcher.so haven't loaded on this node: %s." +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08517: "The first parameter service table name can not be null." +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08518: "No remote server address found!" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08519: "could not connect to server '%s'" +SQLSTATE: 08001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08520: "unsupported data type %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08521: "unsupported expression type for deparse: %d" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08522: "unexpected expression in subquery output" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08523: "can not support user-defined agg function: %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08524: "varattno is out of range in ForeignScan node." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08525: "invalid varno found." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08526: "invalid relation type found." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08527: "invalid attr number in agg->grpColIdx" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08528: "No function name found for agg func: %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08529: "unsupported transition function to deparse avg expr. funcname: %s" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08530: "unsupport data type in agg pushdown." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08531: "cooperation analysis: can't execute the query in restore mode." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08532: "cooperation analysis: can't execute the query in upgrade mode." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08533: "Query on datanode is not supported currently for the foreign table: %s." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08534: "cooperation analysis: not receive the snapshot from remote cluster" +SQLSTATE: P0002 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08535: "EXECUTE DIRECT cannot execute SELECT query with foreign table on coordinator" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08536: "Retrieve next vector batch is not supported for Foreign table." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08537: "colmap value is not correct." +SQLSTATE: XX001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08538: "gc_fdw: the remote vesion is not match." +SQLSTATE: P0002 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08539: "remote query result does not match the foreign table" +SQLSTATE: OP002 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08540: "Need DISTRIBUTE BY clause for the foreign table." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08541: "The gc_fdw partition foreign table is not supported." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08542: "The error_relation of the foreign table is not support." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08543: "Unsupport write only." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08544: "The REMOTE LOG of gc_fdw foreign table is not support." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08545: "The PER NODE REJECT LIMIT of gc_fdw foreign table is not support." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08546: "Cooperation Analysis is not supported." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08547: "%s requires a non-negative numeric value" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08548: "%s requires a non-negative integer value" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08549: "%s requires a not-null string value" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08550: "Password must contain at least %d characters." +SQLSTATE: 28P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08551: "the length of option '%s' is too long." +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08552: "the following options are necessary: address, dbname, username, password" +SQLSTATE: HV00D +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08553: "Shippable cache entry not found." +SQLSTATE: P0002 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08554: "could not seek dbe_sql temp file for write" +SQLSTATE: P002 +CAUSE: "fssfsf" +ACTION: "please resr" + + +GAUSS-08555: "could not write to dbe_sql temp file" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08556: "could not seek dbe_sql temp file for read" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08557: "could not read pkg_service_sql temp file (expect: %u/actual: %lu)" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08558: "given context:%d is not found" +SQLSTATE: P0002 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08559: "invalid parameter" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08560: "directory '%s' has a null path" +SQLSTATE: XX005 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08561: "program limit exceeded" +SQLSTATE: 54000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08562: "no data found" +SQLSTATE: P0002 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08563: "The file name '%s' contains invalid characters '/'." +SQLSTATE: 42602 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08564: "invalid read raw size %lu , max read raw size is %d " +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08565: "could not read from file %d" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08566: "argument %d is null, invalid, or out of range" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08567: "write out of blob maximum bound" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08568: "specified trim length is greater than current LOB value's length" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08569: "The offset is greater than the length of the data, no data found" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08570: "invalid blob value %d" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08571: "len_srcsource offset is beyond the end of the source" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08572: "the first parameter should be blob or clob." +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08573: "the second parameter should be blob or clob." +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08574: "the first and second parameter type should be same." +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08575: "invalid lob fetch mode" +SQLSTATE: XX004 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08576: "the obj parameter should be blob or clob." +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08577: "The length of incoming msg exceeds the max line size %d" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08578: "buffer overflow, limit of %u" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08579: "invalid endianess! please input the right endianess:big_endian 1, little_endian 2" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08580: "invalid parameter value %u" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08581: "GROUP BY column '%s' must be in select list" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08582: "couldn't find aggfnoid: %u from PROC catalog" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08583: "no polymorphic arguments found" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08584: "expected polymorphic argument at position %d but only found %d arguments" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08585: "no array type found for type %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08586: "get type name failed" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08587: "can't get query state" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08588: "function name are not found in cont view's target list checking" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08589: "%s are not allowed in a cont view's target list" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08590: "subqueries are not allowed in a cont view's target list" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08591: "expressions are not allowed in a cont view's target list" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08592: "CASE expressions are not allowed in a cont view's target list" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08593: "sublinks are not allowed in a contview's where clause" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08594: "NullTest are not allowed in a contview's where clause" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08595: "WITH clauses are not supported in contview" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08596: "HAVING clauses are not supported in contview" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08597: "UNION/INTERSECT/EXCEPT clauses are not supported in contview" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08598: "ORDER BY clauses are not supported in contview" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08599: "LIMIT clauses are not supported in contview" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08600: "DISTINCT clauses are not supported in contview" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08601: "%s cannot contain aggregates" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08602: "%s cannot contain window functions" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08603: "%s cannot contain GROUP BY clauses" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08604: "%s cannot contain HAVING clauses" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08605: "%s cannot contain ORDER BY clauses" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08606: "%s cannot contain DISTINCT clauses" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08607: "%s cannot contain LIMIT clauses" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08608: "%s cannot contain UNION/INTERSECT/EXCEPT clauses" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08609: "%s cannot contain FOR UPDATE clauses" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08610: "%s cannot contain CTEs" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08611: "Not support to set different alias on same column" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08612: "Not support to set same alias name with column name in %s function" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08613: "Not support to use alias in GROUP BY clause" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08614: "cont views can only be defined using SELECT queries" +SQLSTATE: 42P17 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08615: "cont queries must have ONLY ONE stream in the FROM clause" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08616: "stream objects do not support schema" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08617: "cont queries must have group columns when do aggregate" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08618: "column '%s' must appear in the GROUP BY clause or be used in an aggregate function" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08619: "can't select '%s' in cont queries" +SQLSTATE: 42702 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08620: "group by columns must be in select columns, %s is illegal" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08621: "cache lookup failed for agg %u" +SQLSTATE: 29P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08622: "gather argument must be a single aggregate column" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08623: "relation is not a continuous view" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08624: "gather aggregates are only supported on continuous views" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08625: "Get contview id %u failed" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08626: "Aggref not found in worker targetlist" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08627: "gather argument is not an aggregate column" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08628: "update streaming monitor anum only support gather_interval and gather_completion_time" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08629: "unexpected group retrieval plan: %d" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08630: "unexpected join type found in group retrieval plan: %d" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08631: "unexpected outer plan found in group retrieval plan: %d" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08632: "Lookup Tuple Hash Entry failed" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08633: "open cstore table failed" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08634: "sync data error, for wrong storage type" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08635: "group_hash_index NULL" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08636: "stream '%s' only support ADD COLUMN action" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08637: "contview do not support alter table action" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08638: "contview's material table do not support alter table action" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08639: "stream and contview can not be renamed" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08640: "stream and contview do not support alter schema action" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08641: "contview do not support triggers" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08642: "constraints on stream is not supported" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08643: "parse tree can't be null" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08644: "streaming objects do not support comment" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08645: "Not supported for streaming engine not enabled" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08646: "contview not defined" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08647: "maximum number of continuous queries exceeded" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08648: "Invalid interval range for 'gather_interval' option" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08649: "attri NULL" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08650: "%s option must be specified as '%s' or '%s'" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08651: "Invalid streaming engine options parameter" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08652: "TIME_COLUMN is not supported on key word '%s'" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08653: "TIME_COLUMN is only supported on column with timestamp type" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08654: "Forbid to set option '%s' for %s" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08655: "%s option must be specified as '%s'/'%s'" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08656: "%s option cannot be specified in conjunction with %s option" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08657: "%s option must be specified as an interval" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08658: "%s option must be specified as a minimum of %d second" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08659: "%s option must be specified in conjunction with %s option" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08660: "%s option must be specified in conjunction with %s or %s option" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08661: "%s option must be specified as an existing column" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08662: "%s option must be specified as a timestamp or timestampz" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08663: "%s option must be specified in conjunction with %s option and %s option" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08664: "partition_interval can't be zero, valid interval range from '30 minute' to '1 year'" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08665: "%s option must be an integral multiple of the %s option" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08666: "schema change is not allowed for historical version, please create new contview" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08667: "contview '%s' failed to get current version" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08668: "contview '%s' schema change is busy, please try again later" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08669: "contview current version '%s' failed to get schema" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08670: "cache lookup failed for reloptions %u" +SQLSTATE: 29P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08671: "ContView with string optimize on doesn't support schema change" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08672: "'%s' options doesn't exist in contview '%s'" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08673: "'%s' options doesn't support on schema change" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08674: "contview '%s' failed to do schema change on existing columns" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08675: "contview '%s' failed to do schema change on different stream" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08676: "invalid _RETURN rule action specification" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08677: "failed to find _RETURN rule for view" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08678: "boundary error on partition %u" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08679: "'%s' is a stream" +SQLSTATE: 42809 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08680: "stream %u RTE missing" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08681: "Cannot insert into stream within transaction." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08682: "unexpected outer plan type: %d" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08683: "tuple updated again in the same transaction" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08684: "type error: unknown argument type %d" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08685: "type error: unsupported argument by this function" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08686: "get contview for id failed" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08687: "streaming_batch_mem is too small to fit a tuple" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08688: "Timeout waiting for task." +SQLSTATE: 57014 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08689: "unrecognized node type when mutating parse tree: %d" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08690: "could not create group search path" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08691: "couldn't find TuplestoreScan node in collector's plan: %d" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08692: "Failed to get group by clause." +SQLSTATE: 22004 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08693: "couldn't find the attr in matrel matching aggref" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08694: "no gather aggregate found" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08695: "period must be greater than 0" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08696: "interval defined in terms of month, year, century etc. not supported" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08697: "interval must not have sub-second precision" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08698: "can only select from one contview which has dictionary mapping feature" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08699: "Not supported for contview only available on stream" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08700: "Not supported for streaming engine only available on default database" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08701: "couldn't get database name" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08702: "could not initialize cudesc oid manager hash table" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08703: "get_cudesc_oids::could not get part cudesc oids" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08704: "add_cudesc_oid::could not add part item" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08705: "part trace: refresh cache failed, partition oid=%u, new oid=%u" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08706: "PartCacheMgr:build partition failed" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08707: "clear_cache: remove entry failed" +SQLSTATE: 22000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08708: "PartCacheMgr:remove partition entry failed, oid=%u" +SQLSTATE: 22000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08709: "could not initialize part id manager hash table" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08710: "PartIdMgr clear cache: remove entry failed" +SQLSTATE: 22000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08711: "build global part id space cache hash table failed" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08712: "cannot build global part id space cache hash table" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08713: "No free part id available in relation with id %u, please slower insert to wait compaction finish" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08714: "cannot find part currently in relation with id %u, \ please slower insert to wait compaction finish" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08715: "cannot find partition(%u) in cache!" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08716: "could not initialize TableStatus hash table" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08717: "Hash table corrupted" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08718: "enter key in table failed!" +SQLSTATE: XX005 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08719: "could not initialize TsTagsCache desc hash table" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08720: "could not initialize max tagid cache desc hash table" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08721: "Invalid column type:%d" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08722: "Tags hash table corrupted" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08723: "build global tags space cache hash table failed" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08724: "No tag id left for new tags in relation %u" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08725: "TagsCacheMgr clear cache: remove entry failed" +SQLSTATE: 22000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08726: "data row is not empty, not support to set size" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08727: "dataRowVector is full, not support expand vector" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08728: "Invalid attkvtype:%d" +SQLSTATE: TS001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08729: "delete tuple of delta table %s failed, HTSU_Result code: %d" +SQLSTATE: TS000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08730: "create tsdb delta table for timeseries table %s failed" +SQLSTATE: TS000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08731: "Alloc job_id failed. Maybe due to no free job_id." +SQLSTATE: 55000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08732: "Update deltamerge job failed. job_id not found. Timeseries relation: %s" +SQLSTATE: TS000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08733: "Can not find datanase with id %u" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08734: "Can not find job id %ld in system table pg_job." +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08735: "search sys cache field when create tscudesc table" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08736: "search sys cache field when create tscudesc index" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08737: "create tag table index failed %s" +SQLSTATE: TS000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08738: "create table failed %s" +SQLSTATE: TS000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08739: "Table name length cannot be longger than %zu." +SQLSTATE: 42P16 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08740: "[TSDB] invalid index name %s" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08741: "timeseries table only support to create single column index" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08742: "Failed to get a valid name from the specified index column" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08743: "Invalid index column %s for timeseries table, column type should be TsTag" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08744: "The tag relation for '%s' does not exist" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08745: "Cannot execute DDL on relation %s since enable_tsdb is off" +SQLSTATE: TS000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08746: "stop search table science dropdb or get shut down signal happened" +SQLSTATE: DB010 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08747: "compaction Auxiliary abort" +SQLSTATE: DB010 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08748: "StopCompactionWorkers wait too long!!!" +SQLSTATE: DB010 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08749: "check_alive_compaction_worker: thread %lu is still alive" +SQLSTATE: DB010 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08750: "search sys cache failed." +SQLSTATE: DB010 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08751: "Compaction work failed beacuse of database been droped." +SQLSTATE: DB010 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08752: "Delete from table is in progress! ." +SQLSTATE: DB010 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08753: "compaction task producer get exit signal" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08754: "invalid time_fill argument" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08755: "the number of timestamps must be less than %d" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08756: "invalid time_fill argument: the distance between start and finish must bigger than interval" +SQLSTATE: 01000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08757: "Fill Hash Table out of memory" +SQLSTATE: 01000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08758: "type %u is unsupported in fill_avg function" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08759: "hash table content fill error" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08760: "group error." +SQLSTATE: 01000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08761: " invalid input argument for top_key function. " +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08762: "fill plan cannot get funciton name!" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08763: "fill plan obtains the null funcExpr!" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08764: "period in time_fill should not be zero" +SQLSTATE: 22012 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08765: "can not find time column" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08766: "unsupport time_fill operation, please check the parameters" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08767: "multiple time_fill function calls are not supported" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08768: " extra operations in time_fill function are not supported" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08769: "fill_last, fill_first or fill_avg function must appeared with correct fill function" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08770: "the query must use only one of the three functions fill_first, fill_last and fill_avg instead of using combination of them." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08771: "time_fill function must appear in the GROUP BY clause " +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08772: "time_fill function must contain an aggregate function" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08773: "fill_last function must contain an aggregate function" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08774: "timeseries table currently can not operate with column table. " +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08775: "Unsupport data type %d in function series" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08776: "Join on time series not supported yet!" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08777: "Only partition table can define period or ttl." +SQLSTATE: 42P16 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08778: "Period need to be defined." +SQLSTATE: 42P16 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08779: "batch load tscudesc error, field_num:%d, loaded_num:%d, field tagid:%u, time tagid:%u" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08780: "cannot match delta table's attribute %s with timeseries table %s" +SQLSTATE: TS000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08781: "Unknown attkvtype: %d" +SQLSTATE: TS000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08783: "deltamerge failed on timeseries relation %s, enable_tsdb_delta is off." +SQLSTATE: 42P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08784: "data row should be not NULL!" +SQLSTATE: TS000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08785: "This query is not supported by optimizer in TSStore." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08786: "cudesc reader idx (%d) exceeds size." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08787: "no field column" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08788: "compaction consumer requeset to quit" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08789: "Interrupt merge. Delete from table is in progress!" +SQLSTATE: DB010 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08790: "Interrupt merge cu. Delete from table is in progress!" +SQLSTATE: DB010 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08791: "PartSearch init failed!, cudesc_oid=%u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08792: "unsupported column in query." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08793: "error[%s] happened on part %u when run scan :%s." +SQLSTATE: XX005 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08794: "unsupported data type length %d" +SQLSTATE: 42804 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08795: "Timeseries store don't support" +SQLSTATE: 42804 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08796: "Load CU failed !.table(%s), column(%s), part_cudesc(%u)." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08797: "invalid CU in cu_id %u of relation %s file %s offset %lu, ret_code %u" +SQLSTATE: XX001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08798: "init_partid use timeseries relation failed!, cudesc_oid=%u, errormsg=%s" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08799: "cannot match TagRow's attribute %s with timeseries table." +SQLSTATE: TS000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08800: "cannot match delta table's attribute %s with timeseries table." +SQLSTATE: TS000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08801: "Query failed. Timeseries relation not found." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08802: "ProjectionInfo not found during timeseries delta scan." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08803: "ProjectionInfo not found during timeseries tag scan." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08804: "Invalid timeseries relname." +SQLSTATE: 42602 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08805: "Timeseries relation should only have one column as partition key. Relation: %s" +SQLSTATE: TS000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08806: "partition key does not map to any table partition" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08807: "Timeseries store: unrecognized heap_update status: %u" +SQLSTATE: D0011 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08808: "Delete row conflict" +SQLSTATE: 21000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08809: "cannot find part(%u) description." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08810: "Unsupported ts data type: %d" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08811: "Invalid column length:%d" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08812: "Alter rule only can apply to one action under expansion." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08813: "Invalid table name prefix redis_, reserved for redistribution." +SQLSTATE: 42P16 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08814: "Integer num_mem_used overflow." +SQLSTATE: 22000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08815: "numStream should not be zero" +SQLSTATE: 22012 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08816: "activeSql should not be zero" +SQLSTATE: 22012 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08817: "action's targetlist cannot be found in source targetlist" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08818: "Expected TargetEntry node, but got node with type %d" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08819: "Fail to do UPDATE/DELETE" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08820: "Duplicate node_ids not expected in source target list" +SQLSTATE: 42710 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08821: "Duplicate ctids not expected in source target list" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08822: "Duplicate tableOid not expected in source target list" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08823: "Duplicate bucketid not expected in source target list" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08824: "cache lookup failed for attribute %s of relation %u" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08825: "Source data plan's target list does not contain ctid colum" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08826: "Unsupported UPDATE/DELETE FOR TIMESERIES." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08827: "Fail to do %s" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08828: "Source data plan's target list does not contain primary key's column (%s.%s)" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08829: "TargetEntry should not return NULL." +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08830: "qry_tle should not be null" +SQLSTATE: XX005 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08831: "expected a VAR node but got node of type %d" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08832: "rel_loc_info can not be NULL" +SQLSTATE: 22004 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08833: "Unexpected command type: %d" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08834: "No distribution information found for remote query path" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08835: "a join rel requires both the left path and right path when create remotequery path" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08836: "can not create remotequery path for ranges of type %d" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08837: "No node list provided for remotequery path when create join path" +SQLSTATE: 22004 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08838: "can not generate shippable query for base relations of type other than plain tables" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08839: "can not handle multiple relations in a single baserel" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08840: "a join relation path should have both left and right paths" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08841: "unexpected unshippable quals in JOIN tree" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08842: "join with unshippable join clauses can not be shipped" +SQLSTATE: 55000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08843: "can not find var with varno = %u and varattno = %d" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08844: "Creating remote query plan for relations of type %d is not supported" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08845: "OBS foreign table doesn't support not-stream plan." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08846: "It is not supported that there are both foreign tables and non-foreign tables in one query." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08847: "remote_query_stmt should not be null" +SQLSTATE: XX005 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08848: "cont view table '%s' cannot be changed" +SQLSTATE: 55000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08849: "Delete statement does not surpport(can't ship)" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08850: "Fail to locate the %d result rel when make distribute dml!" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08851: "simple_rte_array is NULL unexpectly" +SQLSTATE: 22004 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08852: "unrecognized CmdType when check dml: %d" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08853: "cache lookup failed for relation %u." +SQLSTATE: 42P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08854: "insert redistribute key do not match update redistribute key" +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08855: "The distribution of merged and exec node are not the same\n" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08856: "could not convert type %s to %s" +SQLSTATE: 42846 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08857: "LZ4 decompressing serialize plan failed, decompressing result %d" +SQLSTATE: XX001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08858: "LZ4 decompressing serialize plan failed, returnLen not equal with oLen." +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08859: "LZ4 decompressing serialize plan failed, length of serializedPlan not euqal with oLen." +SQLSTATE: XX008 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08860: "This SQL can not be executed because it can not be pushed down to datanode." +SQLSTATE: 55000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08861: "rtiSize should not be zero" +SQLSTATE: 22000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08862: "The CREATE BARRIER QUERY ARCHIVE message is expected to arrive from a Coordinator" +SQLSTATE: OP003 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08863: "DN is not start archived." +SQLSTATE: OP003 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08864: "Failed to send CREATE BARRIER PREPARE requestget all cn_conn: %d" +SQLSTATE: OP001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08865: "CREATE BARRIER command %s failed with error %s" +SQLSTATE: OP001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08866: "All CN is not start archived" +SQLSTATE: OP001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08867: "Failed to send Create BARRIER QUERY ARCHIVE requset to the node" +SQLSTATE: OP001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08868: "CREATE BARRIER command must be sent to a Coordinator" +SQLSTATE: OP003 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08869: "Must be system admin or operator admin in operation mode to create barrier." +SQLSTATE: 42501 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08870: "relation %u not find col %d" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08871: "Don't find Datanode oid: %u, nmember:%d, u_sess->pgxc_cxt.NumDataNodes: %d" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08872: "more than one item satisfied type is %c, relOid is %u" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08873: "Fail to build slicemap for list/range distributed table '%s'" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08874: "Can't get nodeoids for relation %s" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08875: "Failed to find DataNode" +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08876: "list/range redistribute datanode doesn't exist in consumer node list" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08877: "Relation %s is not in redistribution mode" +SQLSTATE: D0011 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08878: "Redistribution rel %s has no start_citd" +SQLSTATE: D0011 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08879: "Redistribution rel %s has no end_citd" +SQLSTATE: D0011 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08880: "%s : %d : The destination buffer or format is a NULL pointer or the invalid parameter handle is invoked." +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08881: "value of global config '%s' could not be null." +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08882: "invalid bucketmap length: %d" +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08883: "member_count can not be zero." +SQLSTATE: D0011 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08884: "PGXC Group %u group not defined" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08885: "Current DN num %d and standby num %d mismatch DN matrix size(%d,%d)" +SQLSTATE: 08006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08886: "dn info of hash table need be refreshed" +SQLSTATE: XX006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08887: "datanode hash create failed" +SQLSTATE: 53200 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08888: "failed to insert data node hash table" +SQLSTATE: 53200 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08889: "DataNode Oid is invalid, invalid Oid is %u." +SQLSTATE: XX006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08890: "delete failed from data node hash table, Oid is %u." +SQLSTATE: XX006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08891: "[CsnMinSync] connections to data nodes or coordinators init failed" +SQLSTATE: 0B000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08892: "failed to send csn to node %u" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08893: "failed to get pooled connections" +SQLSTATE: 08006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08894: "Cannot reload pool when in a transaction block" +SQLSTATE: 25001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08895: "Cannot reload pool when Xact Nodes is not null" +SQLSTATE: 25001 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08896: "failed to palloc handles, dn_num:%d, cn_num:%d" +SQLSTATE: 53200 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08897: "Can't stop query on some data nodes" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08898: "Failed to read response from node, remote:%s, detail:%s." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08899: "u_sess->parser_cxt.param_message should not be NULL" +SQLSTATE: XX005 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08900: "invalid Datanode number: %d, u_sess->pgxc_cxt.NumDataNodes: %d" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08901: "invalid coordinator number: %d,u_sess->pgxc_cxt.NumCoords: %d" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08902: "invalid coordinator number: %d, u_sess->pgxc_cxt.NumCoords: %d" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08903: "pooler: connect failed, invalid argument. Detail:%s%s%s" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08904: "pooler: could not find node of node[%d], oid:%s[%u], needCreateArrayLen[%d], loopIndex[%d], j[%d], isNull[%d]" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08905: "invalid %s node number: %d, handle_cnt is: %d, agent_cnt is %d." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08906: "pooler: The node(oid:%s[%u]) has no available slot, the number of slot in use reaches upper limit!" +SQLSTATE: 53300 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08907: "pooler: Node(oid:%s[%u]) has been removed or altered" +SQLSTATE: 01000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08908: "pooler: Communication failure, failed to send to node %s[%u]session commands or invalid incoming data, %s.localhost: %s, localport: %s,remotehost: %s, remoteaddr: %s, remoteport:%s\n" +SQLSTATE: 08006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08909: "pooler: Communication failure, failed to send session commands or invalid incoming data, error count: %d\n" +SQLSTATE: 08006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08910: "pooler: Communication failure, failed to send session commands or invalid incoming data, error count: %d.\n" +SQLSTATE: 08006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08911: "pooler: shmemNums does not match with usessNums, pgxc_node reload failed.usessNumCoords[%d], usessNumDataNodes[%d], shmemNumCoords[%d], shmemNumDataNodes[%d].Input cnListNum[%d], dnListNum[%d], agentCnNums[%d], agentDnNums[%d]" +SQLSTATE: 08006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08912: "Failed to connect %s, detail:%s" +SQLSTATE: 08006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08913: "Failed to connect Nodes, detail:%s" +SQLSTATE: 08006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08914: "slots_list_by_userid_hashTable corrupted" +SQLSTATE: 22000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08915: "pooler: The node(oid:%s[%u]) host is null." +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08916: "check_connection_status: get node_def failed, node_oid:%s[%u]." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08917: "Invalid Input for fill_conn_entry." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08918: "failed to epoll add errno:%d[%m]." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08919: "check_connection_status: failed to create epfd errno:%d[%m]." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08920: "The number of connections exceeds the upper limit and cannot be displayed. %lu" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08921: "register_pooler_session_param() failed: invalid name or queryString" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08922: "register_pooler_session_param() Get agent failed" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08923: "register_pooler_session_param() Invalid Length!" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08924: "delete_pooler_session_params() Get agent failed" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08925: "delete_pooler_session_params() Invalid Length!" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08926: "drop database failed, clean connections not completed" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08927: "drop user failed, clean connections not completed" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08928: "invalid server type: %d" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08929: "Could not begin transaction on %s[%u]." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08930: "Failed to send command to %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08931: "unexpected EOF on connection of %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08932: "Could not begin transaction on primary Datanode %s[%u]." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08933: "Failed to send command to Datanodes %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08934: "Unexpected response from Datanode %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08935: "the data size to be processed by resource package must not be zero" +SQLSTATE: 22012 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08936: "Can NOT get enough resource for this request. needed free rp: %lu" +SQLSTATE: XX005 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08937: "invalid server type(%d) in %s()" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08938: "conn_count can not be zero[%d]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08939: "Failed to fetch from %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08940: "Failed to fetch from Datanode %u" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08941: "Failed to send snapshot to %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08942: "Failed to send snapshot to Coordinator %s" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08943: "Failed to send command to Coordinator %s" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08944: "Unexpected response from %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08945: "Analyze is canceled because %s" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08946: "Failed to send queryid to %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08947: "Failed to send command to Datanode %u" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08948: "Fail to get conns to DNs." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08949: "Fail to send query to DNs." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08950: "respone datarow should be plan with text type." +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08951: "Unexpected response from DN %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08952: "Failed to send schema name to coordinator %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08953: "Failed to send snapshot to coordinator %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08954: "Failed to send ddl params to coordinator %u" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08955: "Failed to send queryid to coordinator %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08956: "Failed to send command to coordinator %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08957: "group_name is NULL,can't find installation-group." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08958: "Failed to send schema name to %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08959: "Failed to send bucket map to Datanode %u" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08960: "Failed to send ddl params to Datanode %u" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08961: "Failed to send unique sql id to coordinator %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08962: "Failed to send command to Datanode %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08963: "this bucketMap is null only happend during redis" +SQLSTATE: CG000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08964: "Failed to send cid to %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08965: "Failed to read response from CN %u." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08966: "Unexpected response from CN %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08967: "Failed to read response from %s[%u] when ending query" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08968: "Failed to read response from CN %s[%u] when ending query" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08969: "Unexpected response from CN %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08970: "invalid message format, please update to the same version" +SQLSTATE: 08P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08971: "cooperation analysis: relation does not exist." +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08972: "cooperation analysis: relation kind is not supported." +SQLSTATE: 42809 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08973: "cooperation analysis: colomn information does not match." +SQLSTATE: 42809 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08974: "cooperation analysis: unrecognized check result type: %d" +SQLSTATE: 38000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08975: "from remote cluster: %s" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08976: "cooperation analysis: invalid size of snapshot" +SQLSTATE: 22000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08977: "cooperation analysis: Failed to send command to remote cluster" +SQLSTATE: HV00B +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08978: "cooperation analysis: invalid for connecting datanode." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08979: "invalid client encode %d" +SQLSTATE: 22023 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08980: "cooperation analysis: invalid length of schema name." +SQLSTATE: 22000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08981: "cooperation analysis: invalid length of table name." +SQLSTATE: 22000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08982: "get a illegals binary integer from a message buffer" +SQLSTATE: 08P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08983: "cooperation analysis: unrecognized message type: %d" +SQLSTATE: 38000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08984: "get message from remote cluster failed %s" +SQLSTATE: HV00B +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08985: "combiner's row_store cannot be NULL" +SQLSTATE: XX005 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08986: "Fail to operate invalid temp tables because datanode restarted,please try to create temp table again" +SQLSTATE: 22000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08987: "Failed to send unique sql id to %s before PREPARE command" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08988: "failed to send PREPARE TRANSACTION command to the node %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08989: "failed to send %s command to node %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08990: "write_conn_count can not be zero[%d]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08991: "Could not begin transaction %lu on Datanodes." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08992: "Failed to send %s to Datanode %s" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08993: "Could not begin transaction %lu on coordinators." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08994: "Failed to send %s to coordinator %s" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08995: "Failed to receive response when processing remote savepoint statement." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08996: "Unexpected response from Datanode %s" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08997: "Unexpected response from coordinator %s" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08998: "Unexpected response from the %s[%u], result = %d, request type %d" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-08999: "Unexpected remote state on the node %s." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09000: "get_random_dn_conn invalid argument, u_sess.NumDataNodes[%d]" +SQLSTATE: XX000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09001: "Incorrect remote connection node type : %d." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09002: "Failed to COMMIT the transaction on primary dn node: %s." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09003: "Could not begin transaction on primary datanode during get primary dn." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09004: "Failed to send queryid to datanode %s during get primary dn." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09005: "Failed to send query to datanode %s during get primary dn." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09006: "Failed to recieve response from datanode %s during get primary dn." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09007: "target node %s is not in cluster nodes." +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09008: "Failed to send COMMIT/ROLLBACK on nodes: %s.Failed to COMMIT/ROLLBACKthe transaction on nodes: %s." +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09009: "Connection error with Datanode, so failed to COMMIT the transaction on one or more nodes" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09010: "[CsnMinSync] failed to receive reponses from nodes" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09011: "Failed to receive message from %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09012: "[LIGHT PROXY] Failed to fetch from %s[%u]" +SQLSTATE: 08000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09013: "Can not connect to gtm when getting csn, there is a connection error." +SQLSTATE: 08006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09014: "Transaction %lu get invalid commit csn %lu from gtm" +SQLSTATE: 29P02 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09015: "Transaction's xid %lu different from gtm %lu handle %d" +SQLSTATE: 29P02 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09016: "Transaction %lu get invalid commit csn %lu from gtm handle %d" +SQLSTATE: 29P02 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09017: "No such database with ID %u" +SQLSTATE: +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09018: "Broadcast size exceeds the threshold: BroadcastSize=%ld, ThresholdSize=%ld, PlanId=%d" +SQLSTATE: 54000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09019: "Invalid stream type for data skew." +SQLSTATE: XX006 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09020: "SPI connect failure - returned %d, create sweep series hash failure" +SQLSTATE: DB010 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09021: "Cannot get relid of timeseries relation %s." +SQLSTATE: 29P01 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09022: "deltamerge failed on timeseries relation %s, "\ "lock is temporarily unavailable. Try later." +SQLSTATE: 55P03 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09023: "ttl not found for relation %u" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09024: "Can not assign group parent for group with default bucket length %d." +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09025: "Can not find default group parent, please create installation node group first." +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09026: "group parent should have default bucket length %d." +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09027: "Can not assign group parent with different group member." +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09028: "Can not assign group parent for group with bucketcnt %d which already exists." +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09029: "NodeGroup bucketCnt %d is not valid" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09030: "NodeGroup bucketCnt %d is not power of 2" +SQLSTATE: 42601 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09031: "Can not create logic cluster group with non-default bucketcnt value." +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09032: "nodes array is NULL, please assign proper nodes array." +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09033: "Can not create non-default bucketcnt group, create installation group first." +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09034: "installation group with group parent is invalid" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09035: "can't get child node group with bucketlen %d." +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09036: "CREATE NODE GROUP ... DISTRIBUTE FROM can not be used with group parent." +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09037: "installation group %s with group parent is invalid" +SQLSTATE: 42704 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09038: "Do not support drop node group when its child node group exists!" +SQLSTATE: 0A000 +CAUSE: "invalid" +ACTION: "invalid" + + +GAUSS-09039: "pooler: Failed to send SIGTERM to openGauss thread:%lu in PoolManagerAbortTransactions(), failed: %m" +SQLSTATE: 58000 +CAUSE: "invalid" +ACTION: "invalid" + diff --git a/src/gausskernel/dbmind/tools/ai_manager/__init__.py b/distribute_escan.txt similarity index 100% rename from src/gausskernel/dbmind/tools/ai_manager/__init__.py rename to distribute_escan.txt diff --git a/doc/src/sgml/ref/alter_global_configuration.sgmlin b/doc/src/sgml/ref/alter_global_configuration.sgmlin new file mode 100644 index 000000000..d93115797 --- /dev/null +++ b/doc/src/sgml/ref/alter_global_configuration.sgmlin @@ -0,0 +1,16 @@ + + +ALTER GLOBAL CONFIGURATION +7 +SQL - Language Statements + + +ALTER GLOBAL CONFIGURATION +add or fix records of gs_global_config + + + +ALTER GLOBAL CONFIGURATION with(paraname=value, paraname=value...); + + + diff --git a/doc/src/sgml/ref/alter_package.sgmlin b/doc/src/sgml/ref/alter_package.sgmlin new file mode 100644 index 000000000..49bc6406e --- /dev/null +++ b/doc/src/sgml/ref/alter_package.sgmlin @@ -0,0 +1,16 @@ + + +ALTER PACKAGE +7 +SQL - Language Statements + + +ALTER PACKAGE +change the definition of a package + + + +ALTER PACKAGE package_name OWNER TO new_owner; + + + diff --git a/doc/src/sgml/ref/alter_sequence.sgmlin b/doc/src/sgml/ref/alter_sequence.sgmlin index 182347067..4af4bcea6 100755 --- a/doc/src/sgml/ref/alter_sequence.sgmlin +++ b/doc/src/sgml/ref/alter_sequence.sgmlin @@ -22,6 +22,8 @@ ALTER [ LARGE ] SEQUENCE [ IF EXISTS ] namemaxvalue | NO MAXVALUE | NOMAXVALUE ] [ OWNED BY { table_name.column_name | NONE } ]; ALTER [ LARGE ] SEQUENCE [ IF EXISTS ] name OWNER TO new_owner; + +NOTICE: '[ LARGE ]' is only avaliable in CENTRALIZED mode! diff --git a/doc/src/sgml/ref/alter_table_partition.sgmlin b/doc/src/sgml/ref/alter_table_partition.sgmlin index ba6ce41b5..88892afe6 100644 --- a/doc/src/sgml/ref/alter_table_partition.sgmlin +++ b/doc/src/sgml/ref/alter_table_partition.sgmlin @@ -1,61 +1,69 @@ - - -ALTER TABLE PARTITION -7 -SQL - Language Statements - - -ALTER TABLE PARTITION -change the definition of a partition - - - -ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )} - action [, ... ]; -ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )} - RENAME PARTITION { partion_name | FOR ( partition_value [, ...] ) } TO partition_new_name; - -where action can be: -move_clause | - exchange_clause | - row_clause | - merge_clause | - modify_clause | - split_clause | - add_clause | - drop_clause -where move_clause can be: -MOVE PARTITION { partion_name | FOR ( partition_value [, ...] ) } TABLESPACE tablespacename -where exchange_clause can be: -EXCHANGE PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) } - WITH TABLE {[ ONLY ] ordinary_table_name | ordinary_table_name * | ONLY ( ordinary_table_name )} - [ { WITH | WITHOUT } VALIDATION ] [ VERBOSE ] -where row_clause can be: -{ ENABLE | DISABLE } ROW MOVEMENT -where merge_clause can be: -MERGE PARTITIONS { partition_name } [, ...] INTO PARTITION partition_name - [ TABLESPACE tablespacename ] -where modify_clause can be: -MODIFY PARTITION partition_name { UNUSABLE LOCAL INDEXES | REBUILD UNUSABLE LOCAL INDEXES } -where split_clause can be: -SPLIT PARTITION { partition_name | FOR ( partition_value [, ...] ) } { split_point_clause | no_split_point_clause } -where split_point_clause can be: -AT ( partition_value ) INTO ( PARTITION partition_name [ TABLESPACE tablespacename ] , PARTITION partition_name [ TABLESPACE tablespacename ] ) -where no_split_point_clause can be: -INTO {(partition_less_than_item [, ...] ) | (partition_start_end_item [, ...] )} -where add_clause can be: -ADD {partition_less_than_item | partition_start_end_item} -where partition_less_than_item can be: -PARTITION partition_name VALUES LESS THAN ( { partition_value | MAXVALUE } [, ...] ) [ TABLESPACE tablespacename ] -where partition_start_end_item can be: -PARTITION partition_name { - {START(partition_value) END (partition_value) EVERY (interval_value)} | - {START(partition_value) END ({partition_value | MAXVALUE})} | - {START(partition_value)} | - {END({partition_value | MAXVALUE})} -} [TABLESPACE tablespace_name] -where drop_clause can be: -DROP PARTITION { partition_name | FOR ( partition_value [, ...] ) } - - + + +ALTER TABLE PARTITION +7 +SQL - Language Statements + + +ALTER TABLE PARTITION +change the definition of a partition + + + +ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )} + action [, ... ]; +ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )} + RENAME PARTITION { partion_name | FOR ( partition_value [, ...] ) } TO partition_new_name; + +where action can be: + move_clause | + exchange_clause | + row_clause | + merge_clause | + modify_clause | + split_clause | + add_clause | + drop_clause | + truncate_clause +where move_clause can be: +MOVE PARTITION { partion_name | FOR ( partition_value [, ...] ) } TABLESPACE tablespacename +where exchange_clause can be: +EXCHANGE PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) } + WITH TABLE {[ ONLY ] ordinary_table_name | ordinary_table_name * | ONLY ( ordinary_table_name )} + [ { WITH | WITHOUT } VALIDATION ] [ VERBOSE ] +where row_clause can be: +{ ENABLE | DISABLE } ROW MOVEMENT +where merge_clause can be: +MERGE PARTITIONS { partition_name } [, ...] INTO PARTITION partition_name + [ TABLESPACE tablespacename ] +where modify_clause can be: +MODIFY PARTITION partition_name { UNUSABLE LOCAL INDEXES | REBUILD UNUSABLE LOCAL INDEXES } +where split_clause can be: +SPLIT PARTITION { partition_name | FOR ( partition_value [, ...] ) } { split_point_clause | no_split_point_clause } +where split_point_clause can be: +AT ( partition_value ) INTO ( PARTITION partition_name [ TABLESPACE tablespacename ] , PARTITION partition_name [ TABLESPACE tablespacename ] ) +where no_split_point_clause can be: +INTO {(partition_less_than_item [, ...] ) | (partition_start_end_item [, ...] )} +where add_clause can be: +ADD PARTITION ( partition_col1_name = partition_col1_value [, partition_col2_name = partition_col2_value ] [, ...] ) + [ LOCATION 'location1' ] + [ PARTITION (partition_colA_name = partition_colA_value [, partition_colB_name = partition_colB_value ] [, ...] ) ] + [ LOCATION 'location2' ] +ADD {partition_less_than_item | partition_start_end_item} +where partition_less_than_item can be: +PARTITION partition_name VALUES LESS THAN ( { partition_value | MAXVALUE } [, ...] ) [ TABLESPACE tablespacename ] +where partition_start_end_item can be: +PARTITION partition_name { + {START(partition_value) END (partition_value) EVERY (interval_value)} | + {START(partition_value) END ({partition_value | MAXVALUE})} | + {START(partition_value)} | + {END({partition_value | MAXVALUE})} +} [TABLESPACE tablespace_name] +where drop_clause can be: +DROP PARTITION { partition_name | FOR ( partition_value [, ...] ) } +where truncate_clause can be: +TRUNCATE PARTITION { partition_name | FOR ( partition_value [, ...] ) } [ UPDATE GLOBAL INDEX ] +NOTICE: 'truncate_clause' is only avaliable in CENTRALIZED mode! + + \ No newline at end of file diff --git a/doc/src/sgml/ref/alter_table_subpartition.sgmlin b/doc/src/sgml/ref/alter_table_subpartition.sgmlin new file mode 100644 index 000000000..780d2be2a --- /dev/null +++ b/doc/src/sgml/ref/alter_table_subpartition.sgmlin @@ -0,0 +1,43 @@ + + +ALTER TABLE SUBPARTITION +7 +SQL - Language Statements + + +ALTER TABLE SUBPARTITION +change the definition of a subpartition + + + +ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )} + action [, ... ]; + +where action can be: + add_clause | + drop_clause | + split_clause | + truncate_clause +where add_clause can be: +ADD { partition_less_than_item | partition_list_item } [ ( subpartition_definition_list ) ] +MODIFY PARTITION partition_name ADD subpartition_definition +where partition_less_than_item can be: +PARTITION partition_name VALUES LESS THAN ( partition_value | MAXVALUE ) [ TABLESPACE tablespacename ] +where partition_list_item can be: +PARTITION partition_name VALUES ( partition_value [, ...] | DEFAULT ) [ TABLESPACE tablespacename ] +where subpartition_definition_list can be: +SUBPARTITION subpartition_name [ VALUES LESS THAN ( partition_value | MAXVALUE ) | VALUES ( partition_value [, ...] | DEFAULT )] [ TABLESPACE tablespace ] +where drop_clause can be: +DROP PARTITION { partition_name | FOR ( partition_value ) } [ UPDATE GLOBAL INDEX ] +DROP SUBPARTITION { subpartition_name | FOR ( partition_value, subpartition_value ) } [ UPDATE GLOBAL INDEX ] +where split_clause can be: +SPLIT SUBPARTITION { subpartition_name } { split_point_clause } [ UPDATE GLOBAL INDEX ] +where split_point_clause can be: +AT ( subpartition_value ) INTO ( SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] , SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] ) | +VALUES ( subpartition_value ) INTO ( SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] , SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] ) +where truncate_clause can be: +TRUNCATE SUBPARTITION { subpartition_name } [ UPDATE GLOBAL INDEX ] +NOTICE: 'ALTER TABLE SUBPARTITION' is only avaliable in CENTRALIZED mode! + + + \ No newline at end of file diff --git a/doc/src/sgml/ref/create_index.sgmlin b/doc/src/sgml/ref/create_index.sgmlin index 6c4a50963..35b827fed 100644 --- a/doc/src/sgml/ref/create_index.sgmlin +++ b/doc/src/sgml/ref/create_index.sgmlin @@ -1,25 +1,29 @@ - - -CREATE INDEX -7 -SQL - Language Statements - - -CREATE INDEX -define a new index - - - -CREATE [ UNIQUE ] INDEX [ [schema_name.] index_name ] ON table_name [ USING method ] - ({ { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] }[, ...] ) - [ WITH ( {storage_parameter = value} [, ... ] ) ] - [ TABLESPACE tablespace_name ] - [ WHERE predicate ]; -CREATE [ UNIQUE ] INDEX [ [schema_name.] index_name ] ON table_name [ USING method ] - ( { { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [ ASC | DESC ] [ NULLS LAST ] } [, ...] ) - [ LOCAL [ ( { PARTITION index_partition_name [ TABLESPACE index_partition_tablespace ] } [, ...] ) ] | GLOBAL ] - [ WITH ( { storage_parameter = value } [, ...] ) ] - [ TABLESPACE tablespace_name ]; - - - + + +CREATE INDEX +7 +SQL - Language Statements + + +CREATE INDEX +define a new index + + + +CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [schema_name.] index_name ] ON table_name [ USING method ] + ({ { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] }[, ...] ) + [ INCLUDE ( { column_name | ( expression ) }[, ...] ) ] + [ WITH ( {storage_parameter = value} [, ... ] ) ] + [ TABLESPACE tablespace_name ] + [ WHERE predicate ]; +CREATE [ UNIQUE ] INDEX [ [schema_name.] index_name ] ON table_name [ USING method ] + ( { { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [ ASC | DESC ] [ NULLS LAST ] } [, ...] ) + [ LOCAL [ ( { PARTITION index_partition_name | SUBPARTITION index_subpartition_name [ TABLESPACE index_partition_tablespace ] } [, ...] ) ] | GLOBAL ] + [ INCLUDE ( { column_name | ( expression ) }[, ...] ) ] + [ WITH ( { storage_parameter = value } [, ...] ) ] + [ TABLESPACE tablespace_name ]; + +NOTICE: 'SUBPARTITION index_subpartition_name' is only avaliable in CENTRALIZED mode! + + + diff --git a/doc/src/sgml/ref/create_model.sgmlin b/doc/src/sgml/ref/create_model.sgmlin index a6f40f63d..3cb22c661 100644 --- a/doc/src/sgml/ref/create_model.sgmlin +++ b/doc/src/sgml/ref/create_model.sgmlin @@ -23,6 +23,12 @@ where: * linear_regression: Compute a linear regression using Gradient Descent * svm_classification: Compute a support vector machine classifier using Gradient Descent * kmeans: Compute an unsupervised clustering + * pca: Compute a principal component analysis result using Gradient Descent + * multiclass: Compute a classification result for multiple types of data + * xgboost_regression_logistic: Compute logistic regression using XGBoost + * xgboost_binary_logistic: Compute logistic regression for binary classification using XGBoost + * xgboost_regression_squarederror: Compute regression with squarred loss + * xgboost_regression_gamma: Compute gamma regression with log-link using XGBoost * select_query is a standard SELECT query For supervised machine learning algorithms, FEATURES and TARGET clauses are mandatory. For unsupervised machine learning algorithms, FEATURES @@ -54,6 +60,12 @@ For example: * max_seconds: Maximum number of seconds doing the optimization * optimizer: Select optimzier between: gd (gradient descent) or ngd (normalized gradient descent) * tolerance: System stops when the percentage of changes between two iterations is below this percentage + * seed: Seed value for random + * kernel: Name of kernel for svm classification, valid values are 'linear'(default), 'gaussian' and 'polynomial' + * components: Number of output dimensions for kernels different than linear, default is MAX(2*features, 128) + * gamma: Gamma parameter for gaussian kernel, default value is 0.5 + * degree: Degree parameter for polynomial kernel in the range 2 to 9, default is 2 + * coef0: Coef0 parameter for polynomial kernel and value is greater or equal than zero, default is 1.0 * verbose: 0 (no output), 1 (more output) # Hyperparameter list for 'kmeans': @@ -67,7 +79,31 @@ For example: * seeding_function: Algorithm used for initial seeds: 'Random++' or 'Kmeans||' * verbose: 0 (no output), 1 (less output), or 2 (full output) +# Hyperparameter list for 'xgboost_regression_logistic', 'xgboost_binary_logistic', 'xgboost_regression_gamma' and 'xgboost_regression_squarederror': + * batch_size: Number of tuples in each processing batch + * booster: Which booster to use, e.g., gbtree, gblinear or dart (default: gbtree) + * tree_method: The tree construction algorithm used in XGBoost. Choices: auto, exact, approx, hist, gpu_hist (gpu_hist only supported with GPU) + * eval_metric: Evaluation metric for validation data, default is 'rmse' + * seed: Seed value for random + * nthread: Number of parallel threads used to run XGBoost + * max_depth: Maximum depth of a tree (default 6) (valid only for tree boosters) + * gamma: Minimum loss reduction required to make a further partition on a leaf node of the tree + * eta: Step size shrinkage used in update to prevents overfitting (default 0.3) + * min_child_weight: Minimum sum of instance weight (hessian) needed in a child (default 1) + * verbosity: Verbosity of printing messages: 0 (silent), 1 (warning), 2 (info), 3 (debug) +# Hyperparameter list for 'pca': + * max_iterations: Maximum iterations until convergence + * batch_size: Number of tuples in each processing batch + * max_seconds: Maximum number of seconds doing the optimization + * number_components: Number of components to keep and value is greater or equal than 1, default 1 + * tolerance: System stops when the percentage of changes between two iterations is below this percentage, default is 0.0005 + * seed: Seed value for random + * verbose: 0 (no output), 1 (more output) + +# Hyperparameter list for 'multiclass': + * classifier: name of gradient descent binary classifier, currently supports 'svm_classification' and 'logistic_regression' + * and all hyperparameters of the selected binary classifier diff --git a/doc/src/sgml/ref/create_sequence.sgmlin b/doc/src/sgml/ref/create_sequence.sgmlin index 06e4b0188..8ef9fd308 100644 --- a/doc/src/sgml/ref/create_sequence.sgmlin +++ b/doc/src/sgml/ref/create_sequence.sgmlin @@ -20,6 +20,8 @@ CREATE [ LARGE ] SEQUENCE name [ IN [ MINVALUE minvalue | NO MINVALUE | NOMINVALUE] [ MAXVALUE maxvalue | NO MAXVALUE | NOMAXVALUE] [ START [ WITH ] start ] [ CACHE cache ] [ [ NO ] CYCLE | NOCYCLE] [ OWNED BY { table_name.column_name | NONE } ]; + +NOTICE: '[ LARGE ]' is only avaliable in CENTRALIZED mode! diff --git a/doc/src/sgml/ref/create_table.sgmlin b/doc/src/sgml/ref/create_table.sgmlin index 1179928d4..22e104526 100644 --- a/doc/src/sgml/ref/create_table.sgmlin +++ b/doc/src/sgml/ref/create_table.sgmlin @@ -1,73 +1,55 @@ - - -CREATE TABLE -7 -SQL - Language Statements - - -CREATE TABLE -define a new table - - - -CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name - ( { column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ] - | table_constraint - | LIKE source_table [ like_option [...] ] } - [, ... ]) - [ WITH ( {storage_parameter = value} [, ... ] ) ] - [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ] - [ COMPRESS | NOCOMPRESS ] - [ TABLESPACE tablespace_name ] - [ DISTRIBUTE BY { REPLICATION | { HASH ( column_name [,...] ) - | RANGE ( column_name [,...] ) range_distribution_rules - | LIST ( column_name [,...] ) list_distribution_rules } - } ] - [ TO { GROUP groupname | NODE ( nodename [, ... ] ) } ]; - -where column_constraint can be: -[ CONSTRAINT constraint_name ] -{ NOT NULL | - NULL | - CHECK ( expression ) | - DEFAULT default_expr | - GENERATED ALWAYS AS ( generation_expr ) STORED | - UNIQUE index_parameters | - PRIMARY KEY index_parameters | - ENCRYPTED WITH ( COLUMN_ENCRYPTION_KEY = column_encryption_key, ENCRYPTION_TYPE = encryption_type_value ) | - REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] - [ ON DELETE action ] [ ON UPDATE action ] } -[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] -where table_constraint can be: -[ CONSTRAINT constraint_name ] -{ CHECK ( expression ) | - UNIQUE ( column_name [, ... ] ) index_parameters | - PRIMARY KEY ( column_name [, ... ] ) index_parameters | - PARTIAL CLUSTER KEY ( column_name [, ... ] ) | - FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ] - [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] } -[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] -where compress_mode can be: -{ DELTA | PREFIX | DICTIONARY | NUMSTR | NOCOMPRESS } -where like_option can be: -{ INCLUDING | EXCLUDING } { DEFAULTS | GENERATED | CONSTRAINTS | INDEXES | STORAGE | COMMENTS | PARTITION | RELOPTIONS | DISTRIBUTION | ALL } -where index_parameters can be: -[ WITH ( {storage_parameter = value} [, ... ] ) ] -[ USING INDEX TABLESPACE tablespace_name ] -where range_distribution_rules can be: -[ ( SLICE name VALUES LESS THAN (expression | MAXVALUE [, ... ]) [DATANODE datanode_name] - [, ... ] ) | - ( SLICE name START (expression) END (expression) EVERY (expression) [DATANODE datanode_name] - [, ... ] ) | - SLICE REFERENCES table_name -] -where list_distribution_rules can be: -[ ( SLICE name VALUES (expression [, ... ]) [DATANODE datanode_name] - [, ... ] ) | - ( SLICE name VALUES (DEFAULT) [DATANODE datanode_name] ) | - SLICE REFERENCES table_name -] - - - - + + +CREATE TABLE +7 +SQL - Language Statements + + +CREATE TABLE +define a new table + + + +CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name + ( { column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ] + | table_constraint + | LIKE source_table [ like_option [...] ] } + [, ... ]) + [ WITH ( {storage_parameter = value} [, ... ] ) ] + [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ] + [ COMPRESS | NOCOMPRESS ] + [ TABLESPACE tablespace_name ]; + +where column_constraint can be: +[ CONSTRAINT constraint_name ] +{ NOT NULL | + NULL | + CHECK ( expression ) | + DEFAULT default_expr | + GENERATED ALWAYS AS ( generation_expr ) STORED | + UNIQUE index_parameters | + PRIMARY KEY index_parameters | + ENCRYPTED WITH ( COLUMN_ENCRYPTION_KEY = column_encryption_key, ENCRYPTION_TYPE = encryption_type_value ) | + REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] + [ ON DELETE action ] [ ON UPDATE action ] } +[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] +where table_constraint can be: +[ CONSTRAINT constraint_name ] +{ CHECK ( expression ) | + UNIQUE ( column_name [, ... ] ) index_parameters | + PRIMARY KEY ( column_name [, ... ] ) index_parameters | + PARTIAL CLUSTER KEY ( column_name [, ... ] ) | + FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ] + [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] } +[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] +where compress_mode can be: +{ DELTA | PREFIX | DICTIONARY | NUMSTR | NOCOMPRESS } +where like_option can be: +{ INCLUDING | EXCLUDING } { DEFAULTS | GENERATED | CONSTRAINTS | INDEXES | STORAGE | COMMENTS | PARTITION | RELOPTIONS | DISTRIBUTION | ALL } +where index_parameters can be: +[ WITH ( {storage_parameter = value} [, ... ] ) ] +[ USING INDEX TABLESPACE tablespace_name ] + + + + diff --git a/doc/src/sgml/ref/create_table_subpartition.sgmlin b/doc/src/sgml/ref/create_table_subpartition.sgmlin new file mode 100644 index 000000000..9a8ac876e --- /dev/null +++ b/doc/src/sgml/ref/create_table_subpartition.sgmlin @@ -0,0 +1,59 @@ + + +CREATE TABLE SUBPARTITION +7 +SQL - Language Statements + + +CREATE TABLE SUBPARTITION +define a new table subpartition + + + +CREATE TABLE [ IF NOT EXISTS ] subpartition_table_name +( { column_name data_type [ COLLATE collation ] [ column_constraint [ ... ] ] + | table_constraint + | LIKE source_table [ like_option [...] ] } + [, ... ] +) + [ WITH ( {storage_parameter = value} [, ... ] ) ] + [ COMPRESS | NOCOMPRESS ] + [ TABLESPACE tablespace_name ] + PARTITION BY {RANGE | LIST | HASH} (partition_key) SUBPARTITION BY {RANGE | LIST | HASH} (subpartition_key) + ( + PARTITION partition_name1 [ VALUES LESS THAN (val1) | VALUES (val1[, ...]) ] [ TABLESPACE tablespace ] + ( + { SUBPARTITION subpartition_name1 [ VALUES LESS THAN (val1_1) | VALUES (val1_1[, ...])] [ TABLESPACE tablespace ] } [, ...] + ) + [, ...] + ) [ { ENABLE | DISABLE } ROW MOVEMENT ]; + +where column_constraint can be: +[ CONSTRAINT constraint_name ] +{ NOT NULL | + NULL | + CHECK ( expression ) | + DEFAULT default_expr | + GENERATED ALWAYS AS ( generation_expr ) STORED | + UNIQUE index_parameters | + PRIMARY KEY index_parameters | + REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] + [ ON DELETE action ] [ ON UPDATE action ] } +[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] +where table_constraint can be: +[ CONSTRAINT constraint_name ] +{ CHECK ( expression ) | + UNIQUE ( column_name [, ... ] ) index_parameters | + PRIMARY KEY ( column_name [, ... ] ) index_parameters | + FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ] + [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] } +[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] +where like_option can be: +{ INCLUDING | EXCLUDING } { DEFAULTS | GENERATED | CONSTRAINTS | INDEXES | STORAGE | COMMENTS | RELOPTIONS | ALL } +where index_parameters can be: +[ WITH ( {storage_parameter = value} [, ... ] ) ] +[ USING INDEX TABLESPACE tablespace_name ] +NOTICE: 'CREATE TABLE SUBPARTITION' is only avaliable in CENTRALIZED mode! + + + diff --git a/doc/src/sgml/ref/create_type.sgmlin b/doc/src/sgml/ref/create_type.sgmlin index 6898b5dc9..3d445d22f 100755 --- a/doc/src/sgml/ref/create_type.sgmlin +++ b/doc/src/sgml/ref/create_type.sgmlin @@ -47,6 +47,8 @@ CREATE TYPE name CREATE TYPE name AS ENUM ( [ 'lable' [, ... ] ] ) + +CREATE TYPE name AS TABLE OF data_type diff --git a/doc/src/sgml/ref/delete.sgmlin b/doc/src/sgml/ref/delete.sgmlin index 46b6741f5..7d683ae90 100644 --- a/doc/src/sgml/ref/delete.sgmlin +++ b/doc/src/sgml/ref/delete.sgmlin @@ -1,20 +1,28 @@ - - -DELETE -7 -SQL - Language Statements - - -DELETE -delete rows of a table - - - -[ WITH [ RECURSIVE ] with_query [, ...] ] -DELETE [/*+ plan_hint */] FROM [ ONLY ] table_name [ * ] [ [ AS ] alias ] - [ USING using_list ] - [ WHERE condition | WHERE CURRENT OF cursor_name ] [ LIMIT row_count ] - [ RETURNING { * | { output_expr [ [ AS ] output_name ] } [, ...] } ]; - - - + + +DELETE +7 +SQL - Language Statements + + +DELETE +delete rows of a table + + + +[ WITH [ RECURSIVE ] with_query [, ...] ] +DELETE [/*+ plan_hint */] FROM [ ONLY ] table_name [partition_clause] [ * ] [ [ AS ] alias ] + [ USING using_list ] + [ WHERE condition | WHERE CURRENT OF cursor_name ] [ LIMIT row_count ] + [ RETURNING { * | { output_expr [ [ AS ] output_name ] } [, ...] } ]; + +where with_query can be: +with_query_name [ ( column_name [, ...] ) ] AS [ [ NOT ] MATERIALIZED ] +( {select | values | insert | update | delete} ) +where partition_clause can be: +PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) } | +SUBPARTITION { ( subpartition_name ) | FOR ( subpartition_value [, ...] ) } +NOTICE: 'partition_clause' is only avaliable in CENTRALIZED mode! + + + diff --git a/doc/src/sgml/ref/drop_global_configuration.sgmlin b/doc/src/sgml/ref/drop_global_configuration.sgmlin new file mode 100644 index 000000000..0777912a9 --- /dev/null +++ b/doc/src/sgml/ref/drop_global_configuration.sgmlin @@ -0,0 +1,16 @@ + + +DROP GLOBAL CONFIGURATION +7 +SQL - Language Statements + + +DROP GLOBAL CONFIGURATION +drop records from gs_global_config + + + +DROP GLOBAL CONFIGURATION paraname, paraname...; + + + diff --git a/doc/src/sgml/ref/drop_sequence.sgmlin b/doc/src/sgml/ref/drop_sequence.sgmlin index 63630a9ab..3ed37265b 100644 --- a/doc/src/sgml/ref/drop_sequence.sgmlin +++ b/doc/src/sgml/ref/drop_sequence.sgmlin @@ -17,6 +17,8 @@ DROP [ LARGE ] SEQUENCE [ IF EXISTS ] { [schema.] sequence_name } [, ...] [ CASCADE | RESTRICT ]; + +NOTICE: '[ LARGE ]' is only avaliable in CENTRALIZED mode! diff --git a/doc/src/sgml/ref/drop_table.sgmlin b/doc/src/sgml/ref/drop_table.sgmlin index b5ffd6b20..611338bdf 100644 --- a/doc/src/sgml/ref/drop_table.sgmlin +++ b/doc/src/sgml/ref/drop_table.sgmlin @@ -1,17 +1,17 @@ - - -DROP TABLE -7 -SQL - Language Statements - - -DROP TABLE -remove a table - - - -DROP TABLE [ IF EXISTS ] -{[schema.]table_name} [, ...] [ CASCADE | RESTRICT ]; - - + + +DROP TABLE +7 +SQL - Language Statements + + +DROP TABLE +remove a table + + + +DROP TABLE [ IF EXISTS ] +{[schema.]table_name} [, ...] [ CASCADE | RESTRICT ] [ PURGE ]}; + + \ No newline at end of file diff --git a/doc/src/sgml/ref/grant.sgmlin b/doc/src/sgml/ref/grant.sgmlin index e54882550..1bcda56ef 100755 --- a/doc/src/sgml/ref/grant.sgmlin +++ b/doc/src/sgml/ref/grant.sgmlin @@ -1,103 +1,110 @@ - - -GRANT -7 -SQL - Language Statements - - -GRANT -define access privileges - - - -GRANT { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | - ALTER | DROP | COMMENT | INDEX | VACUUM } [, ...] | ALL [ PRIVILEGES ] } - ON { [ TABLE ] table_name [, ...] - | ALL TABLES IN SCHEMA schema_name [, ...] } - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { {{ SELECT | INSERT | UPDATE | REFERENCES | COMMENT } ( column_name [, ...] )} - [, ...] | ALL [ PRIVILEGES ] ( column_name [, ...] ) } - ON [ TABLE ] table_name [, ...] - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { { SELECT | UPDATE | USAGE | ALTER | DROP | COMMENT } [, ...] - | ALL [ PRIVILEGES ] } - ON { [ [ LARGE ] SEQUENCE ] sequence_name [, ...] - | ALL SEQUENCES IN SCHEMA schema_name [, ...] } - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { { CREATE | CONNECT | TEMPORARY | TEMP | ALTER | DROP | COMMENT } - [, ...] | ALL [ PRIVILEGES ] } - ON DATABASE database_name [, ...] - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { USAGE | ALL [ PRIVILEGES ] } - ON DOMAIN domain_name [, ...] - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { { USAGE | DROP } [, ...] | ALL [ PRIVILEGES ] } - ON CLIENT_MASTER_KEY client_master_key - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { { USAGE | DROP } [, ...] | ALL [ PRIVILEGES ] } - ON COLUMN_ENCRYPTION_KEY column_encryption_key - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { USAGE | ALL [ PRIVILEGES ] } - ON FOREIGN DATA WRAPPER fdw_name [, ...] - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { { USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } - ON FOREIGN SERVER server_name [, ...] - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { { EXECUTE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } - ON { FUNCTION {function_name ( [ {[ argmode ] [ arg_name ] arg_type} [, ...] ] )} [, ...] - | ALL FUNCTIONS IN SCHEMA schema_name [, ...] } - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { USAGE | ALL [ PRIVILEGES ] } - ON LANGUAGE lang_name [, ...] - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { { CREATE | USAGE | COMPUTE | ALTER | DROP } [, ...] | ALL [ PRIVILEGES ] } - ON NODE GROUP group_name [, ...] - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { { SELECT | UPDATE } [, ...] | ALL [ PRIVILEGES ] } - ON LARGE OBJECT loid [, ...] - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { { CREATE | USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } - ON SCHEMA schema_name [, ...] - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { { CREATE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } - ON TABLESPACE tablespace_name [, ...] - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { { USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } - ON TYPE type_name [, ...] - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT { USAGE | ALL [PRIVILEGES] } - ON DATA SOURCE src_name [, ...] - TO { [GROUP] role_name | PUBLIC } [, ...] - [WITH GRANT OPTION]; -GRANT { { READ | WRITE | ALTER | DROP } [, ...] | ALL [PRIVILEGES] } - ON DIRECTORY directory_name [, ...] - TO { [GROUP] role_name | PUBLIC } [, ...] - [WITH GRANT OPTION]; -GRANT { { EXECUTE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } - ON PACKAGE package_name [, ...] - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -GRANT role_name [, ...] - TO role_name [, ...] - [ WITH ADMIN OPTION ]; -GRANT ALL { PRIVILEGES | PRIVILEGE } - TO role_name; - - - + + +GRANT +7 +SQL - Language Statements + + +GRANT +define access privileges + + + +GRANT { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | + ALTER | DROP | COMMENT | INDEX | VACUUM } [, ...] | ALL [ PRIVILEGES ] } + ON { [ TABLE ] table_name [, ...] + | ALL TABLES IN SCHEMA schema_name [, ...] } + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { {{ SELECT | INSERT | UPDATE | REFERENCES | COMMENT } ( column_name [, ...] )} + [, ...] | ALL [ PRIVILEGES ] ( column_name [, ...] ) } + ON [ TABLE ] table_name [, ...] + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { { SELECT | UPDATE | USAGE | ALTER | DROP | COMMENT } [, ...] + | ALL [ PRIVILEGES ] } + ON { [ [ LARGE ] SEQUENCE ] sequence_name [, ...] + | ALL SEQUENCES IN SCHEMA schema_name [, ...] } + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { { CREATE | CONNECT | TEMPORARY | TEMP | ALTER | DROP | COMMENT } + [, ...] | ALL [ PRIVILEGES ] } + ON DATABASE database_name [, ...] + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { USAGE | ALL [ PRIVILEGES ] } + ON DOMAIN domain_name [, ...] + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { { USAGE | DROP } [, ...] | ALL [ PRIVILEGES ] } + ON CLIENT_MASTER_KEY client_master_key + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { { USAGE | DROP } [, ...] | ALL [ PRIVILEGES ] } + ON COLUMN_ENCRYPTION_KEY column_encryption_key + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { USAGE | ALL [ PRIVILEGES ] } + ON FOREIGN DATA WRAPPER fdw_name [, ...] + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { { USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } + ON FOREIGN SERVER server_name [, ...] + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { { EXECUTE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } + ON { FUNCTION {function_name ( [ {[ argmode ] [ arg_name ] arg_type} [, ...] ] )} [, ...] + | ALL FUNCTIONS IN SCHEMA schema_name [, ...] } + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { USAGE | ALL [ PRIVILEGES ] } + ON LANGUAGE lang_name [, ...] + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { { CREATE | USAGE | COMPUTE | ALTER | DROP } [, ...] | ALL [ PRIVILEGES ] } + ON NODE GROUP group_name [, ...] + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { { SELECT | UPDATE } [, ...] | ALL [ PRIVILEGES ] } + ON LARGE OBJECT loid [, ...] + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { { CREATE | USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } + ON SCHEMA schema_name [, ...] + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { { CREATE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } + ON TABLESPACE tablespace_name [, ...] + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { { USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } + ON TYPE type_name [, ...] + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT { USAGE | ALL [PRIVILEGES] } + ON DATA SOURCE src_name [, ...] + TO { [GROUP] role_name | PUBLIC } [, ...] + [WITH GRANT OPTION]; +GRANT { { READ | WRITE | ALTER | DROP } [, ...] | ALL [PRIVILEGES] } + ON DIRECTORY directory_name [, ...] + TO { [GROUP] role_name | PUBLIC } [, ...] + [WITH GRANT OPTION]; +GRANT { { EXECUTE | ALTER | DROP } [, ...] | ALL [ PRIVILEGES ] } + ON { PACKAGE package_name [, ...] + | ALL PACKAGES IN SCHEMA schema_name [, ...] } + TO { [ GROUP ] role_name | PUBLIC } [, ...] + [ WITH GRANT OPTION ]; +GRANT role_name [, ...] + TO role_name [, ...] + [ WITH ADMIN OPTION ]; +GRANT ALL { PRIVILEGES | PRIVILEGE } + TO role_name; +GRANT { CREATE ANY TABLE | ALTER ANY TABLE | DROP ANY TABLE | SELECT ANY TABLE | INSERT ANY TABLE | + UPDATE ANY TABLE | DELETE ANY TABLE | CREATE ANY SEQUENCE | CREATE ANY INDEX | + CREATE ANY FUNCTION | EXECUTE ANY FUNCTION | CREATE ANY PACKAGE | + EXECUTE ANY PACKAGE | CREATE ANY TYPE } [, ...] + TO [ GROUP ] role_name [, ...] + [ WITH ADMIN OPTION ]; + + + diff --git a/doc/src/sgml/ref/insert.sgmlin b/doc/src/sgml/ref/insert.sgmlin index dba8b67d8..b003ed018 100644 --- a/doc/src/sgml/ref/insert.sgmlin +++ b/doc/src/sgml/ref/insert.sgmlin @@ -1,20 +1,28 @@ - - -INSERT -7 -SQL - Language Statements - - -INSERT -create new rows in a table - - - -[ WITH [ RECURSIVE ] with_query [, ...] ] -INSERT [/*+ plan_hint */] INTO table_name [ ( column_name [, ...] ) ] - { DEFAULT VALUES | VALUES {( { expression | DEFAULT } [, ...] ) }[, ...] | query } - [ ON DUPLICATE KEY UPDATE { NOTHING | { column_name = { expression | DEFAULT } } [, ...] } ] - [ RETURNING {* | {output_expression [ [ AS ] output_name ] }[, ...]} ]; - - - + + +INSERT +7 +SQL - Language Statements + + +INSERT +create new rows in a table + + + +[ WITH [ RECURSIVE ] with_query [, ...] ] +INSERT [/*+ plan_hint */] INTO table_name [partition_clause] [ AS alias ] [ ( column_name [, ...] ) ] + { DEFAULT VALUES | VALUES {( { expression | DEFAULT } [, ...] ) }[, ...] | query } + [ ON DUPLICATE KEY UPDATE { NOTHING | { column_name = { expression | DEFAULT } } [, ...] [ WHERE condition ] } ] + [ RETURNING {* | {output_expression [ [ AS ] output_name ] }[, ...]} ]; + +where with_query can be: +with_query_name [ ( column_name [, ...] ) ] AS [ [ NOT ] MATERIALIZED ] +( {select | values | insert | update | delete} ) +where partition_clause can be: +PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) } | +SUBPARTITION { ( subpartition_name ) | FOR ( subpartition_value [, ...] ) } +NOTICE: 'partition_clause' is only avaliable in CENTRALIZED mode! + + + diff --git a/doc/src/sgml/ref/merge.sgmlin b/doc/src/sgml/ref/merge.sgmlin index 3167fadfd..cd2043443 100644 --- a/doc/src/sgml/ref/merge.sgmlin +++ b/doc/src/sgml/ref/merge.sgmlin @@ -12,20 +12,26 @@ -MERGE [/*+ plan_hint */] INTO table_name [ [ AS ] alias ] +MERGE [/*+ plan_hint */] INTO table_name [ partition_clause ] [ [ AS ] alias ] USING { { table_name | view_name } | subquery } [ [ AS ] alias ] ON ( condition ) [ WHEN MATCHED THEN - UPDATE SET { column_name = { expression | DEFAULT } | - ( column_name [, ...] ) = ( { expression | DEFAULT } [, ...] ) } [, ...] + UPDATE SET { column_name = { expression | subquery | DEFAULT } | + ( column_name [, ...] ) = ( { expression | subquery | DEFAULT } [, ...] ) } [, ...] [ WHERE condition ] ] [ WHEN NOT MATCHED THEN INSERT { DEFAULT VALUES | - [ ( column_name [, ...] ) ] VALUES ( { expression | DEFAULT } [, ...] ) [, ...] [ WHERE condition ] } + [ ( column_name [, ...] ) ] VALUES ( { expression | subquery | DEFAULT } [, ...] ) [, ...] [ WHERE condition ] } ]; + +where partition_clause can be: +PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) } | +SUBPARTITION { ( subpartition_name ) | FOR ( subpartition_value [, ...] ) } +NOTICE: 'partition_clause' is only avaliable in CENTRALIZED mode! +NOTICE: 'subquery' in the UPDATE and INSERT clauses are only avaliable in CENTRALIZED mode! diff --git a/doc/src/sgml/ref/purge.sgmlin b/doc/src/sgml/ref/purge.sgmlin new file mode 100644 index 000000000..72330ab00 --- /dev/null +++ b/doc/src/sgml/ref/purge.sgmlin @@ -0,0 +1,19 @@ + + +PURGE +7 +SQL - Language Statements + + +PURGE +purge object from gs_recyclebin + + + +PURGE { TABLE [schema_name.]table_name + | INDEX index_name + | RECYCLEBIN + } + + + \ No newline at end of file diff --git a/doc/src/sgml/ref/revoke.sgmlin b/doc/src/sgml/ref/revoke.sgmlin index 7f8f03d5e..af2539ce5 100644 --- a/doc/src/sgml/ref/revoke.sgmlin +++ b/doc/src/sgml/ref/revoke.sgmlin @@ -1,119 +1,125 @@ - - -REVOKE -7 -SQL - Language Statements - - -REVOKE -remove access privileges - - - -REVOKE [ GRANT OPTION FOR ] - { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | - ALTER | DROP | COMMENT | INDEX | VACUUM } [, ...] | ALL [ PRIVILEGES ] } - ON { [ TABLE ] table_name [, ...] - | ALL TABLES IN SCHEMA schema_name [, ...] } - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { {{ SELECT | INSERT | UPDATE | REFERENCES | COMMENT } ( column_name [, ...] )} - [, ...] | ALL [ PRIVILEGES ] ( column_name [, ...] ) } - ON [ TABLE ] table_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { { SELECT | UPDATE | ALTER | DROP | COMMENT } [, ...] - | ALL [ PRIVILEGES ] } - ON { [ SEQUENCE ] sequence_name [, ...] - | ALL SEQUENCES IN SCHEMA schema_name [, ...] } - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { { CREATE | CONNECT | TEMPORARY | TEMP | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } - ON DATABASE database_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { USAGE | ALL [ PRIVILEGES ] } - ON DOMAIN domain_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { { USAGE | DROP } [, ...] | ALL [PRIVILEGES] } - ON CLIENT_MASTER_KEYS client_master_keys_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { { USAGE | DROP } [, ...] | ALL [PRIVILEGES]} - ON COLUMN_ENCRYPTION_KEYS column_encryption_keys_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { { READ | WRITE | ALTER | DROP } [, ...] | ALL [ PRIVILEGES ] } - ON DIRECTORY directory_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { USAGE | ALL [ PRIVILEGES ] } - ON FOREIGN DATA WRAPPER fdw_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { { USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } - ON FOREIGN SERVER server_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { { EXECUTE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } - ON { FUNCTION {function_name ( [ {[ argmode ] [ arg_name ] arg_type} [, ...] ] )} [, ...] - | ALL FUNCTIONS IN SCHEMA schema_name [, ...] } - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { USAGE | ALL [ PRIVILEGES ] } - ON LANGUAGE lang_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { {CREATE | USAGE | COMPUTE | ALTER | DROP } [, ...] | ALL [ PRIVILEGES ] } - ON NODE GROUP group_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { { SELECT | UPDATE } [, ...] | ALL [ PRIVILEGES ] } - ON LARGE OBJECT loid [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { { CREATE | USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } - ON SCHEMA schema_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { { CREATE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } - ON TABLESPACE tablespace_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { { USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } - ON TYPE type_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ GRANT OPTION FOR ] - { USAGE | ALL [ PRIVILEGES ] } - ON DATA SOURCE src_name [, ...] - FROM { [GROUP] role_name | PUBLIC } [, ...]; -REVOKE [ GRANT OPTION FOR ] - { { EXECUTE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } - ON PACKAGE package_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ CASCADE | RESTRICT ]; -REVOKE [ ADMIN OPTION FOR ] - role_name [, ...] FROM role_name [, ...] - [ CASCADE | RESTRICT ]; -REVOKE ALL { PRIVILEGES | PRIVILEGE } FROM role_name; - - - + + +REVOKE +7 +SQL - Language Statements + + +REVOKE +remove access privileges + + + +REVOKE [ GRANT OPTION FOR ] + { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | + ALTER | DROP | COMMENT | INDEX | VACUUM } [, ...] | ALL [ PRIVILEGES ] } + ON { [ TABLE ] table_name [, ...] + | ALL TABLES IN SCHEMA schema_name [, ...] } + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { {{ SELECT | INSERT | UPDATE | REFERENCES | COMMENT } ( column_name [, ...] )} + [, ...] | ALL [ PRIVILEGES ] ( column_name [, ...] ) } + ON [ TABLE ] table_name [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { { SELECT | UPDATE | ALTER | DROP | COMMENT } [, ...] + | ALL [ PRIVILEGES ] } + ON { [ SEQUENCE ] sequence_name [, ...] + | ALL SEQUENCES IN SCHEMA schema_name [, ...] } + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { { CREATE | CONNECT | TEMPORARY | TEMP | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } + ON DATABASE database_name [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { USAGE | ALL [ PRIVILEGES ] } + ON DOMAIN domain_name [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { { USAGE | DROP } [, ...] | ALL [PRIVILEGES] } + ON CLIENT_MASTER_KEYS client_master_keys_name [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { { USAGE | DROP } [, ...] | ALL [PRIVILEGES]} + ON COLUMN_ENCRYPTION_KEYS column_encryption_keys_name [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { { READ | WRITE | ALTER | DROP } [, ...] | ALL [ PRIVILEGES ] } + ON DIRECTORY directory_name [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { USAGE | ALL [ PRIVILEGES ] } + ON FOREIGN DATA WRAPPER fdw_name [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { { USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } + ON FOREIGN SERVER server_name [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { { EXECUTE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } + ON { FUNCTION {function_name ( [ {[ argmode ] [ arg_name ] arg_type} [, ...] ] )} [, ...] + | ALL FUNCTIONS IN SCHEMA schema_name [, ...] } + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { USAGE | ALL [ PRIVILEGES ] } + ON LANGUAGE lang_name [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { {CREATE | USAGE | COMPUTE | ALTER | DROP } [, ...] | ALL [ PRIVILEGES ] } + ON NODE GROUP group_name [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { { SELECT | UPDATE } [, ...] | ALL [ PRIVILEGES ] } + ON LARGE OBJECT loid [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { { CREATE | USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } + ON SCHEMA schema_name [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { { CREATE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } + ON TABLESPACE tablespace_name [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { { USAGE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } + ON TYPE type_name [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ GRANT OPTION FOR ] + { USAGE | ALL [ PRIVILEGES ] } + ON DATA SOURCE src_name [, ...] + FROM { [GROUP] role_name | PUBLIC } [, ...]; +REVOKE [ GRANT OPTION FOR ] + { { EXECUTE | ALTER | DROP } [, ...] | ALL [ PRIVILEGES ] } + ON PACKAGE package_name [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] + [ CASCADE | RESTRICT ]; +REVOKE [ ADMIN OPTION FOR ] + role_name [, ...] FROM role_name [, ...] + [ CASCADE | RESTRICT ]; +REVOKE ALL { PRIVILEGES | PRIVILEGE } FROM role_name; +REVOKE [ ADMIN OPTION FOR ] + { CREATE ANY TABLE | ALTER ANY TABLE | DROP ANY TABLE | SELECT ANY TABLE | INSERT ANY TABLE | + UPDATE ANY TABLE | DELETE ANY TABLE | CREATE ANY SEQUENCE | CREATE ANY INDEX | + CREATE ANY FUNCTION | EXECUTE ANY FUNCTION | CREATE ANY PACKAGE | + EXECUTE ANY PACKAGE | CREATE ANY TYPE } [, ...] + FROM [ GROUP ] role_name [, ...]; + + + diff --git a/doc/src/sgml/ref/select.sgmlin b/doc/src/sgml/ref/select.sgmlin index 25af6032c..0fb2ec3ee 100644 --- a/doc/src/sgml/ref/select.sgmlin +++ b/doc/src/sgml/ref/select.sgmlin @@ -1,53 +1,57 @@ - - -SELECT -7 -SQL - Language Statements - - -SELECT -retrieve rows from a table or view - - - -[ WITH [ RECURSIVE ] with_query [, ...] ] -SELECT [/*+ plan_hint */] [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ] - { * | {expression [ [ AS ] output_name ]} [, ...] } - [ FROM from_item [, ...] ] - [ WHERE condition ] - [ [ START WITH condition ] CONNECT BY condition [ ORDER SIBLINGS BY expression ] ] - [ GROUP BY grouping_element [, ...] ] - [ HAVING condition [, ...] ] - [ WINDOW {window_name AS ( window_definition )} [, ...] ] - [ { UNION | INTERSECT | EXCEPT | MINUS } [ ALL | DISTINCT ] select ] - [ ORDER BY {expression [ [ ASC | DESC | USING operator ] | nlssort_expression_clause ] [ NULLS { FIRST | LAST } ]} [, ...] ] - [ LIMIT { [offset,] count | ALL } ] - [ OFFSET start [ ROW | ROWS ] ] - [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ] - [ {FOR { UPDATE | NO KEY UPDATE | SHARE | KEY SHARE } [ OF table_name [, ...] ] [ NOWAIT ]} [...] ] -TABLE { ONLY { (table_name) | table_name } | table_name [ * ]}; - -where from_item can be: -[ ONLY ] table_name [ * ] [ partition_clause ] [ [ AS ] alias [ ( column_alias [, ...] ) ] ] -[ TABLESAMPLE sampling_method ( argument [, ...] ) [ REPEATABLE ( seed ) ] ] -|( select ) [ AS ] alias [ ( column_alias [, ...] ) ] -|with_query_name [ [ AS ] alias [ ( column_alias [, ...] ) ] ] -|function_name ( [ argument [, ...] ] ) [ AS ] alias [ ( column_alias [, ...] | column_definition [, ...] ) ] -|function_name ( [ argument [, ...] ] ) AS ( column_definition [, ...] ) -|from_item [ NATURAL ] join_type from_item [ ON join_condition | USING ( join_column [, ...] ) ] -where grouping_element can be: -() -|expression -|( expression [, ...] ) -|ROLLUP ( { expression | ( expression [, ...] ) } [, ...] ) -|CUBE ( { expression | ( expression [, ...] ) } [, ...] ) -|GROUPING SETS ( grouping_element [, ...] ) -where with_query can be: -with_query_name [ ( column_name [, ...] ) ] AS [ [ NOT ] MATERIALIZED ] ( {select | values | insert | update | delete} ) -where partition_clause can be: -PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) } -where nlssort_expression_clause can be: -NLSSORT ( column_name, ' NLS_SORT = { SCHINESE_PINYIN_M | generic_m_ci } ' ) - - - + + +SELECT +7 +SQL - Language Statements + + +SELECT +retrieve rows from a table or view + + + +[ WITH [ RECURSIVE ] with_query [, ...] ] +SELECT [/*+ plan_hint */] [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ] + { * | {expression [ [ AS ] output_name ]} [, ...] } + [ FROM from_item [, ...] ] + [ WHERE condition ] + [ [ START WITH condition ] CONNECT BY condition [ ORDER SIBLINGS BY expression ] ] + [ GROUP BY grouping_element [, ...] ] + [ HAVING condition [, ...] ] + [ WINDOW {window_name AS ( window_definition )} [, ...] ] + [ { UNION | INTERSECT | EXCEPT | MINUS } [ ALL | DISTINCT ] select ] + [ ORDER BY {expression [ [ ASC | DESC | USING operator ] | nlssort_expression_clause ] [ NULLS { FIRST | LAST } ]} [, ...] ] + [ LIMIT { [offset,] count | ALL } ] + [ OFFSET start [ ROW | ROWS ] ] + [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ] + [ {FOR { UPDATE | NO KEY UPDATE | SHARE | KEY SHARE } [ OF table_name [, ...] ] [ NOWAIT | WAIT N ]} [...] ] +TABLE { ONLY { (table_name) | table_name } | table_name [ * ]}; + +where from_item can be: +[ ONLY ] table_name [ * ] [ partition_clause ] [ [ AS ] alias [ ( column_alias [, ...] ) ] ] +[ TABLESAMPLE sampling_method ( argument [, ...] ) [ REPEATABLE ( seed ) ] ] +[ TIMECAPSULE {TIMESTAMP | CSN} expression ] +|( select ) [ AS ] alias [ ( column_alias [, ...] ) ] +|with_query_name [ [ AS ] alias [ ( column_alias [, ...] ) ] ] +|function_name ( [ argument [, ...] ] ) [ AS ] alias [ ( column_alias [, ...] | column_definition [, ...] ) ] +|function_name ( [ argument [, ...] ] ) AS ( column_definition [, ...] ) +|from_item [ NATURAL ] join_type from_item [ ON join_condition | USING ( join_column [, ...] ) ] +where grouping_element can be: +() +|expression +|( expression [, ...] ) +|ROLLUP ( { expression | ( expression [, ...] ) } [, ...] ) +|CUBE ( { expression | ( expression [, ...] ) } [, ...] ) +|GROUPING SETS ( grouping_element [, ...] ) +where with_query can be: +with_query_name [ ( column_name [, ...] ) ] AS [ [ NOT ] MATERIALIZED ] ( {select | values | insert | update | delete} ) +where partition_clause can be: +PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) } +SUBPARTITION { ( subpartition_name ) | FOR ( subpartition_value [, ...] )} +where nlssort_expression_clause can be: +NLSSORT ( column_name, ' NLS_SORT = { SCHINESE_PINYIN_M | generic_m_ci } ' ) +NOTICE: '[ [ START WITH condition ] CONNECT BY condition [ ORDER SIBLINGS BY expression ] ]' is only avaliable in CENTRALIZED mode! +NOTICE: 'SUBPARTITION { ( subpartition_name ) | FOR ( subpartition_value [, ...] )}' is only avaliable in CENTRALIZED mode! + + + diff --git a/doc/src/sgml/ref/select_into.sgmlin b/doc/src/sgml/ref/select_into.sgmlin index ea45a7168..3627d4560 100644 --- a/doc/src/sgml/ref/select_into.sgmlin +++ b/doc/src/sgml/ref/select_into.sgmlin @@ -1,30 +1,30 @@ - - -SELECT INTO -7 -SQL - Language Statements - - -SELECT INTO -define a new table from the results of a query - - - -[ WITH [ RECURSIVE ] with_query [, ...] ] -SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ] - { * | {expression [ [ AS ] output_name ]} [, ...] } - INTO [ UNLOGGED ] [ TABLE ] new_table - [ FROM from_item [, ...] ] - [ WHERE condition ] - [ GROUP BY expression [, ...] ] - [ HAVING condition [, ...] ] - [ WINDOW {window_name AS ( window_definition )} [, ...] ] - [ { UNION | INTERSECT | EXCEPT | MINUS } [ ALL | DISTINCT ] select ] - [ ORDER BY {expression [ [ ASC | DESC | USING operator ] | nlssort_expression_clause ] [ NULLS { FIRST | LAST } ]} [, ...] ] - [ LIMIT { count | ALL } ] - [ OFFSET start [ ROW | ROWS ] ] - [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ] - [ {FOR { UPDATE | SHARE } [ OF table_name [, ...] ] [ NOWAIT ]} [...] ]; - - + + +SELECT INTO +7 +SQL - Language Statements + + +SELECT INTO +define a new table from the results of a query + + + +[ WITH [ RECURSIVE ] with_query [, ...] ] +SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ] + { * | {expression [ [ AS ] output_name ]} [, ...] } + INTO [ UNLOGGED ] [ TABLE ] new_table + [ FROM from_item [, ...] ] + [ WHERE condition ] + [ GROUP BY expression [, ...] ] + [ HAVING condition [, ...] ] + [ WINDOW {window_name AS ( window_definition )} [, ...] ] + [ { UNION | INTERSECT | EXCEPT | MINUS } [ ALL | DISTINCT ] select ] + [ ORDER BY {expression [ [ ASC | DESC | USING operator ] | nlssort_expression_clause ] [ NULLS { FIRST | LAST } ]} [, ...] ] + [ LIMIT { count | ALL } ] + [ OFFSET start [ ROW | ROWS ] ] + [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ] + [ {FOR { UPDATE | SHARE } [ OF table_name [, ...] ] [ NOWAIT | WAIT N ]} [...] ]; + + \ No newline at end of file diff --git a/doc/src/sgml/ref/timecapsule_table.sgmlin b/doc/src/sgml/ref/timecapsule_table.sgmlin new file mode 100644 index 000000000..b7cf1e0f6 --- /dev/null +++ b/doc/src/sgml/ref/timecapsule_table.sgmlin @@ -0,0 +1,17 @@ + + +TIMECAPSULE TABLE +7 +SQL - Language Statements + + +TIMECAPSULE TABLE +timecapsule a table to before truncate or drop + + + +TIMECAPSULE TABLE + [schema.]table_name TO { CSN expr | TIMESTAMP expr | BEFORE { DROP [RENAME TO table_name] | TRUNCATE } } + + + \ No newline at end of file diff --git a/doc/src/sgml/ref/truncate.sgmlin b/doc/src/sgml/ref/truncate.sgmlin index bc882740f..bf20fa060 100644 --- a/doc/src/sgml/ref/truncate.sgmlin +++ b/doc/src/sgml/ref/truncate.sgmlin @@ -1,19 +1,19 @@ - - -TRUNCATE -7 -SQL - Language Statements - - -TRUNCATE -empty a table or set of tables - - - -TRUNCATE [ TABLE ] [ ONLY ] {table_name [ * ]} [, ... ] - [ CONTINUE IDENTITY ] [ CASCADE | RESTRICT ]; -ALTER TABLE [ IF EXISTS ] { [ ONLY ] table_name | table_name * | ONLY ( table_name ) } - TRUNCATE PARTITION { partition_name | FOR ( partition_value [, ...] ) } ; - - + + +TRUNCATE +7 +SQL - Language Statements + + +TRUNCATE +empty a table or set of tables + + + +TRUNCATE [ TABLE ] [ ONLY ] {table_name [ * ]} [, ... ] + [ CONTINUE IDENTITY ] [ CASCADE | RESTRICT [ PURGE ]]; +ALTER TABLE [ IF EXISTS ] { [ ONLY ] table_name | table_name * | ONLY ( table_name ) } + TRUNCATE PARTITION { partition_name | FOR ( partition_value [, ...] ) } ; + + \ No newline at end of file diff --git a/doc/src/sgml/ref/update.sgmlin b/doc/src/sgml/ref/update.sgmlin index 730e429c1..eb5974764 100644 --- a/doc/src/sgml/ref/update.sgmlin +++ b/doc/src/sgml/ref/update.sgmlin @@ -1,21 +1,33 @@ - - -UPDATE -7 -SQL - Language Statements - - -UPDATE -update rows of a table - - - -UPDATE [/*+ plan_hint */] [ ONLY ] table_name [ * ] [ [ AS ] alias ] - SET {column_name = { expression | DEFAULT } | - ( column_name [, ...] ) = {( { expression | DEFAULT } [, ...] ) |sub_query } - }[, ...] - [ FROM from_list] [ WHERE condition ] - [ RETURNING {* | {output_expression [ [ AS ] output_name ]} [, ...] }]; - - + + +UPDATE +7 +SQL - Language Statements + + +UPDATE +update rows of a table + + + +UPDATE [/*+ plan_hint */] [ ONLY ] table_name [ partition_clause ] [ * ] [ [ AS ] alias ] + SET {column_name = { expression | DEFAULT } | + ( column_name [, ...] ) = {( { expression | DEFAULT } [, ...] ) |sub_query } + }[, ...] + [ FROM from_list] [ WHERE condition ] + [ RETURNING {* | {output_expression [ [ AS ] output_name ]} [, ...] }]; + +where sub_query can be: +SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ] +{ * | {expression [ [ AS ] output_name ]} [, ...] } +[ FROM from_item [, ...] ] +[ WHERE condition ] +[ GROUP BY grouping_element [, ...] ] +[ HAVING condition [, ...] ] +where partition_clause can be: +PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) } | +SUBPARTITION { ( subpartition_name ) | FOR ( subpartition_value [, ...] ) } +NOTICE: 'partition_clause' is only avaliable in CENTRALIZED mode! + + \ No newline at end of file diff --git a/doc/src/sgml/ref/vacuum.sgmlin b/doc/src/sgml/ref/vacuum.sgmlin index 69766b7db..43569d48f 100644 --- a/doc/src/sgml/ref/vacuum.sgmlin +++ b/doc/src/sgml/ref/vacuum.sgmlin @@ -1,22 +1,24 @@ - - -VACUUM -7 -SQL - Language Statements - - -VACUUM -garbage-collect and optionally analyze a database - - - -VACUUM [ ( { FULL | FREEZE | VERBOSE | {ANALYZE | ANALYSE }} [,...] ) ] - [ table_name [ (column_name [, ...] ) ] ] [ PARTITION ( partition_name ) ]; -VACUUM [ FULL [ COMPACT ] ] [ FREEZE ] [ VERBOSE ] [ table_name ] [ PARTITION ( partition_name ) ]; -VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] { ANALYZE | ANALYSE } [ VERBOSE ] - [ table_name [ (column_name [, ...] ) ] ] [ PARTITION ( partition_name ) ]; -VACUUM DELTAMERGE [ table_name ]; -VACUUM HDFSDIRECTORY [ table_name ]; - - + + +VACUUM +7 +SQL - Language Statements + + +VACUUM +garbage-collect and optionally analyze a database + + + +VACUUM [ ( { FULL | FREEZE | VERBOSE | {ANALYZE | ANALYSE }} [,...] ) ] + [ table_name [ (column_name [, ...] ) ] ] [ PARTITION ( partition_name ) | SUBPARTITION ( subpartition_name ) ]; +VACUUM [ FULL [COMPACT] ] [ FREEZE ] [ VERBOSE ] [ table_name ] + [ PARTITION ( partition_name ) | SUBPARTITION ( subpartition_name ) ]; +VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] { ANALYZE | ANALYSE } [ VERBOSE ] + [ table_name [ (column_name [, ...] ) ] ] [ PARTITION ( partition_name ) | SUBPARTITION ( subpartition_name ) ]; +VACUUM DELTAMERGE [ table_name ]; +VACUUM HDFSDIRECTORY [ table_name ]; +NOTICE: 'SUBPARTITION ( subpartition_name )' is only avaliable in CENTRALIZED mode! + + \ No newline at end of file diff --git a/escan.txt b/escan.txt index 5d2a48dff..5ed1c9c04 100644 --- a/escan.txt +++ b/escan.txt @@ -747,7 +747,6 @@ src/gausskernel/runtime/executor/nodeCtescan.cpp src/gausskernel/runtime/executor/nodeExtensible.cpp src/gausskernel/runtime/executor/nodeForeignscan.cpp src/gausskernel/runtime/executor/nodeFunctionscan.cpp -src/gausskernel/runtime/executor/nodeGD.cpp src/gausskernel/runtime/executor/nodeGroup.cpp src/gausskernel/runtime/executor/nodeHash.cpp src/gausskernel/runtime/executor/nodeHashjoin.cpp diff --git a/liteom/install.sh b/liteom/install.sh new file mode 100644 index 000000000..fe11b0070 --- /dev/null +++ b/liteom/install.sh @@ -0,0 +1,1242 @@ +#!/bin/bash +# -*- coding:utf-8 -*- +############################################################################# +# Copyright (c): 2021, Huawei Tech. Co., Ltd. +# FileName : install.sh +# Version : V1.0.0 +# Date : 2021-04-17 +# Description : the script used to install the single cluster on one machine +######################################### +function check_passwd() +{ + passwd=$1 + local -i num=0 + if [ -n "$(echo $passwd | grep -E --color '^(.*[a-z]+).*$')" ] + then + num=$[ ${num} + 1 ] + fi + if [ -n "$(echo $passwd | grep -E --color '^(.*[A-Z]).*$')" ] + then + num=$[ ${num} + 1 ] + fi + password_sepcical_char=('~' '!' '@' '#' '$' '%' '^' '&' '(' ')' '_' '-' '*' + '+' '=' '{' '[' ']' '}' '|' '\' ':' ';' '"' ',' "'" '.' '<' '.' '?' '/') + for element in ${password_sepcical_char[@]} + do + compare_str="${element}" + if [[ "${passwd}" =~ "${compare_str}" ]] + then + num=$[ ${num} + 1 ] + break + fi + done + + if [ -n "$(echo $passwd | grep -E --color '^(.*[0-9]).*$')" ] + then + num=$[ ${num} + 1 ] + fi + if [ ${#passwd} -lt 8 ] || [ $num -lt 3 ] || [ ${#passwd} -gt 32 ] + then + return 1 + fi + return 0 +} + +declare password="" +read -t 1 password + +if [ "${password}" != "" ] +then + check_passwd "${password}" + if [ $? -ne 0 ] + then + echo -e "\033[31mthe password can contain only 8 to 32 characters + and at least three types of the following characters: uppercase letters, lowercase letters, digits, and special characters..\033[0m" + exit 1 + fi +fi + +declare user=$(whoami) +if [ X"$user" = X"root" ]; then + echo "error: can not install gauss with root" + exit 1 +fi + +declare root_path=$(cd $(dirname $0);pwd) +if [ -e "${root_path}/dependency" ] +then + export LD_LIBRARY_PATH="${root_path}"/dependency:$LD_LIBRARY_PATH +fi + +declare data_path="" +declare app_path="" +declare env_file=~/.bashrc +declare mode_type="single" +declare -a localips +declare -a localhosts +declare -a remoteips +declare -i port=5432 +declare ssl="off" +declare init_parameters="" +declare init_show_parameters="" +declare config_parameters="" +declare password_check="" +declare nodename="" +declare action="start" +declare mode="-Z single_node" +declare -i replconninfo_flag=1 +declare -i install_path_full_flag=1 +declare -i data_full_flag=1 +declare -i start=1 +declare -i ulimit_flag=1 +declare -i interactive_passwd_flag=1 +declare log_path="" +declare guc_file="" +declare log_file="${root_path}/install.log" +declare cert_path="" +declare client_ip="" + + +if [ -e "${log_file}" ] +then + cat /dev/null > "${log_file}" +else + touch "${log_file}" +fi + +if [ $? -ne 0 ] +then + echo "error: failed to create the log file." + exit 1 +fi + +# obtain all IP addresses of the local host. +declare -i ipindex=0 +for localip in $(/sbin/ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v ::1|awk '{print $2}'| awk -F "addr:" '{print $NF}') +do + localips[ipindex]=${localip} + ipindex=$[ ${ipindex} + 1 ] +done +if [ ${#localips[*]} -eq 0 ] +then + die "the ip address of the machine is not detected." +fi + +function usage() +{ + echo " +Usage: $0 [OPTION] +Arguments: + -D|--data-path data path + -R|--app-path app directory + -P|--gsinit-parameter initializing database parameters + -C|--dn-guc database configuration parameters + -m|--mode database installation mode, only three modes are supported: single、primary、and standby, default single + -l|--log-path global log configuration path + -f|--guc-file global GUC configuration file + -n|--nodename node name (the default value is same with mode) + -h|--help show this help, then exit + --env-sep-file detach environment variable files + --start indicates whether to start the cluster. + --ulimit indicates whether to set the maximum number of open files + --cert-path SSL certificate path, if this parameter is used, SSL will be on. + --ssl-client-ip client ip address, the parameter modification takes effect only when the SSL switch is turned on. + " +} + +function info() +{ + echo "$1" >> "${log_file}" + echo -e "\033[32minfo:\033[0m$1" +} + +function log() +{ + echo "$1" >> "${log_file}" + echo "$1" +} + +function die() +{ + echo "$1" >> "${log_file}" + echo -e "\033[31m$1\033[0m" + exit 1 +} + +function warn() +{ + echo "$1" >> "${log_file}" + echo -e "\033[33m$1\033[0m" +} + +function security_check() +{ + if [[ "$1" == *[\(\)\{\}\[\]\<\>\`\\\*\!\|\;\&\$\~\?]* ]];then + die "$1 contain illegal characters." + fi +} + +function path_security_check() +{ + if [[ "$1" == *[\(\)\{\}\[\]\<\>\`\\\ \*\!\|\;\&\$\~\?]* ]];then + die "$1 contain illegal characters." + fi +} + +function hide_password() +{ + tmppassword="" + stty -echo + if [ $1 -eq 0 ] + then + read -p "please input database password:" password + else + read -p "please retry input database password:" password_check + fi + stty echo +} + +# check if it has read/write/execute permission. +function check_red_permission() +{ + if [ ! -r "$1" ] + then + die "the user does not have the read permission on the directory/file $1." + fi +} +function check_write_permission() +{ + if [ ! -w "$1" ] + then + die "the user does not have the write permission on the directory/file $1." + fi +} +function check_execute_permission() +{ + if [ ! -x "$1" ] + then + die "the user does not have the execute permission on the directory/file $1." + fi +} + +# check whether the path exists. +function check_path() +{ + if echo "$1"|grep -Eq "^/{1,}$"; then + die "path cannot be / " + fi + path_security_check "$1" + if [ -e "$1" ] + then + if [ -f "$1" ] + then + die "the path $1 already exists and it is a file." + fi + if [[ -n $(ls -A "$1") && "$2" = "install" ]] + then + install_path_full_flag=0 + elif [[ -n $(ls -A "$1") && "$2" = "data" ]] + then + data_full_flag=0 + fi + else + mkdir -p "$1" + chmod 700 "$1" + fi + check_red_permission "$1" + check_write_permission "$1" + check_execute_permission "$1" +} + +# check whether the IPV4 address is valid. +function is_valid_ipv4() +{ + local ip=$1 + local ret=1 + if [[ "$ip" =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]] + then + # use dots (.) to convert the data into an array to facilitate the following judgment. + ip=(${ip//\./ }) + [[ ${ip[0]} -le 255 && ${ip[1]} -le 255 && ${ip[2]} -le 255 && ${ip[3]} -le 255 ]] + ret=$? + fi + return $ret +} + +# check whether the IPV6 address is valid. +function is_valid_ipv6() +{ + parameter_ip=$1 + if [[ "$1" =~ fe80.* ]] + then + if [ -z $(echo "$1" | grep %) ] + then + die "the local ipv6 address needs to be added with % and the network adapter name(such as fe80*%eth0)." + fi + parameter_ip=${parameter_ip%\%*} + fi + local ip=${parameter_ip} + local ret=1 + if [[ "$ip" =~ ([a-f0-9]{1,4}(:[a-f0-9]{1,4}){7}|[a-f0-9]{1,4}(:[a-f0-9]{1,4}){0,7}::[a-f0-9]{0,4}(:[a-f0-9]{1,4}){0,7}) ]] + then + ret=0 + fi + return $ret +} + +# check whether the local host port is occupied +# check whether the entered ip address is the same as the ip address of the host +# identify the ip address of the remote host and add it to the configuration file later. +function check_conf_paramters() +{ + replconninfo=$(echo $1 | grep "replconninfo") + port_flag=$(echo $1 | grep "port") + if [ "${replconninfo}" != "" ] + then + replconninfo_flag=0 + remote_port_flag=$(echo "$1" | grep "remoteport") + if [ -z "${remote_port_flag}" ] + then + die "replconninfo need parameter of 'remoteport'." + fi + # prevent repeated port identification and remove remoteport. + parameter=$(echo "$1" | sed 's/remoteport//g' | sed "s/remoteport//g" | sed "s#'##g" | sed 's#"##g') + localhost_flag=$(echo "${parameter}" | grep "localhost") + if [ "${localhost_flag}" = "" ] + then + die "replconninfo need parameter of 'localhost.'" + fi + remotehost_flag=$(echo "${parameter}" | grep "remotehost") + if [ "${remotehost_flag}" = "" ] + then + die "replconninfo need parameter of 'remotehost'." + fi + localport_flag=$(echo "${parameter}" | grep "localport") + if [ "${localport_flag}" = "" ] + then + die "replconninfo need parameter of 'localport'." + fi + + # init parameters + parameter=$(echo "$1" | sed "s/remoteport//g" | sed "s#'##g" | sed 's#"##g') + parameter_array=(${parameter//=/ }) + len=${#parameter_array[*]} + if [[ "${localhost_flag}" != "" || "${localport_flag}" != "" || "${remote_port_flag}" != "" || "${remotehost_flag}" != "" ]] + then + index=0 + while [ ${index} -lt $[ ${len} + 1 ] ] + do + key=${parameter_array[${index}]} + if [ "${key}" = "localhost" ] + then + index=$[ ${index} + 1 ] + value=${parameter_array[${index}]} + check_value=${value} + is_valid_ipv4 "$value" + if [ $? -ne 0 ] + then + is_valid_ipv6 "$value" + if [ $? -ne 0 ] + then + is_valid_ipv6 "$value" + die "the local ip address ${value} is not standard." + fi + if [[ "$value" =~ fe80.* ]] + then + check_value=${check_value%\%*} + fi + fi + res=$(echo "${localips[@]}" | grep -wq "${check_value}" && echo "yes" || echo "no") + if [ "${res}" = "no" ] + then + die "the ip address of the network adapter is inconsistent with that of the host ${value}" + else + res=$(echo "${localhosts[@]}" | grep -wq "${check_value}" && echo "yes" || echo "no") + if [ "${res}" = "no" ] + then + localhostscount=${#localhosts[*]} + localhosts[localhostscount]=${value} + fi + fi + elif [ "${key}" = "remotehost" ] + then + index=$[ ${index} + 1 ] + value=${parameter_array[${index}]} + is_valid_ipv4 "$value" + if [ $? -ne 0 ] + then + is_valid_ipv6 "$value" + if [ $? -ne 0 ] + then + die "the remote ip address ${value} is not standard." + fi + fi + res=$(echo "${remoteips[@]}" | grep -wq "${value}" && echo "yes" || echo "no") + if [ "${res}" = "no" ] + then + remoteipcount=${#remoteips[*]} + remoteips[remoteipcount]=${value} + fi + elif [ "${key}" = "localport" ] + then + index=$[ ${index} + 1 ] + value=${parameter_array[${index}]} + if [[ ${value} -lt 1024 || ${value} -gt 65535 ]] + then + die "the port number must be between 1024 and 65535." + fi + netstat | grep "${value}" + if [ $? -eq 0 ] + then + die "the port ${value} is already in use" + fi + elif [ "${key}" = "remoteport" ] + then + index=$[ ${index} + 1 ] + value=${parameter_array[${index}]} + if [[ ${value} -lt 1024 || ${value} -gt 65535 ]] + then + die "the port number must be between 1024 and 65535." + fi + netstat | grep "${value}" + if [ $? -eq 0 ] + then + die "the port ${value} is already in use" + fi + fi + index=$[ ${index} + 1 ] + done + fi + elif [ "${port_flag}" != "" ] + then + parameter=$1 + parameter_array=(${parameter//=/ }) + + if [ ${#parameter_array[*]} -eq 2 ] + then + value=${parameter_array[1]} + if [[ ${value} -lt 1024 || ${value} -gt 65535 ]] + then + die "the port number must be between 1024 and 65535." + fi + netstat | grep "${value}" + if [ $? -eq 0 ] + then + die "the port ${value} is already in use" + fi + port=${value} + fi + fi + config_parameters="${config_parameters} -c \"$1\"" +} + +# check whether the database password is normal and empty. +function check_init_paramters() +{ + parameter=$(echo "$1" | sed "s#'##g" | sed 's#"##g') + interactive_passwd=$(echo "${parameter}" | grep "\-W") + if [ "${interactive_passwd}" != "" ] + then + interactive_passwd_flag=0 + fi + passwd_flag=$(echo "${parameter}" | grep -E "\-w\s+|\-\-pwpasswd=") + if [ "${passwd_flag}" != "" ] + then + pwpasswd_flag=$(echo "${parameter}" | grep -E "\-\-pwpasswd=") + if [ "${pwpasswd_flag}" != "" ] + then + parameter=$(echo "${parameter}" | sed "s/--pwpasswd=/--pwpasswd /g") + fi + parameter_array=(${parameter// / }) + len=${#parameter_array[*]} + index=0 + while [ ${index} -lt $[ ${len} + 1 ] ] + do + key=${parameter_array[${index}]} + if [[ "${key}" = "-w" || "${key}" = "--pwpasswd" ]] + then + index=$[ ${index} + 1 ] + password=${parameter_array[${index}]} + check_passwd $password + if [ $? -ne 0 ] + then + die "the password can contain only 8 to 32 characters + and at least three types of the following characters: uppercase letters, lowercase letters, digits, and special characters." + fi + fi + index=$[ ${index} + 1 ] + done + init_show_parameters="${init_show_parameters} ""-w ******" + else + init_show_parameters="${init_show_parameters} "$1 + fi + init_parameters="${init_parameters} "$1 +} + +function check_os() +{ + if [ ${ulimit_flag} -eq 0 ] + then + ulimit -SHn 1000000 + if [ $? -ne 0 ] + then + echo "error: failed to set the maximum number of open files, an exception occurs when the ulimit -SHn 1000000 command is executed.." + exit 1 + fi + sed -i "/.*ulimit\\s*-SHn/d" ${env_file} + echo "ulimit -SHn 1000000" >> ${env_file} + fi + # check shm + local shared_buffers=1073741824 # 1G + local shmmax=$(cat /proc/sys/kernel/shmmax) + env test ${shared_buffers} -gt ${shmmax} && echo "shared_buffers(1073741824) must be less than shmmax($shmmax), Please check it (vim /proc/sys/kernel/shmmax)." && exit 1 + local shmall=$(cat /proc/sys/kernel/shmall) + local pagesize=$(getconf PAGESIZE) + + if [ $((${shmall}/1024/1024/1024*${pagesize})) -ne 0 ]; then + if [ $((${shared_buffers}/1024/1024/1024-${shmall}/1024/1024/1024*${pagesize})) -gt 0 ]; then + die "the usage of the device [Shared_buffers] space(1073741824) cannot be greater than shmall*PAGESIZE($shmall*$pagesize), Please check it (vim /proc/sys/kernel/shmmall)." + fi + fi + # check sem + local -a sem + local -i index=0 + local max_connection=5000 + local conn_floor + for line in $(cat /proc/sys/kernel/sem) + do + sem[index]=${line} + index=$[ ${index} + 1 ] + done + if [ ${sem[0]} -lt 17 ] + then + die "the maximum number of SEMMSL is not correct, please change it (/proc/sys/kernel/sem)), ensure that the value of first sem is greater than 17." + fi + let conn_floor=(${max_connection}+150)/16 + if [ ${sem[3]} -lt ${conn_floor} ] + then + die "the maximum number of SEMMNI is not correct, please change it (/proc/sys/kernel/sem)), ensure that the value of fourth sem is greater than 320." + fi + + let conn_floor=${conn_floor}*17 + if [ ${sem[1]} -lt ${conn_floor} ] + then + die "the maximum number of SEMMNS is not correct, please change it (/proc/sys/kernel/sem)), ensure that the value of second sem is greater than 5500." + fi + info "[check install env and os setting success.]" +} + +function check() +{ + if [[ "${mode_type}" != "single" && ""${replconninfo_flag} -ne 0 ]] + then + die "the parameter -C|--dn-guc["replconninfo='value'"] can not be empty." + fi +} + +function decompress() +{ + cd $root_path + # get OS distributed version. + kernel="" + if [ -f "/etc/euleros-release" ] + then + kernel=$(cat /etc/euleros-release | awk -F ' ' '{print $1}' | tr a-z A-Z) + if [ "${kernel}" = "EULEROS" ] + then + kernel="EULER" + fi + elif [ -f "/etc/openEuler-release" ] + then + kernel=$(cat /etc/openEuler-release | awk -F ' ' '{print $1}' | tr a-z A-Z) + elif [ -f "/etc/centos-release" ] + then + kernel=$(cat /etc/centos-release | awk -F ' ' '{print $1}' | tr a-z A-Z) + else + kernel=$(lsb_release -d | awk -F ' ' '{print $2}'| tr a-z A-Z) + fi + log "kernel: ${kernel}" + + # detect platform information. + platform=32 + bit=$(getconf LONG_BIT) + if [ "$bit" -eq 64 ] + then + platform=64 + fi + bin_name="GaussDB-Kernel-.*-${kernel}-${platform}bit" + bin_res=$(ls -a | grep -E "${bin_name}.bin") + if [ "${bin_res}" = "" ] + then + die "can not find suitable bin file, expected bin file is ${bin_name}.bin" + fi + log "bin file: ${bin_res}" + + bin_name=$(echo "$bin_res" | sed 's/.bin//g') + sha_res=$(ls -a | grep -E "${bin_name}.sha256") + if [ "${sha_res}" = "" ] + then + die "can not find suitable verification file, expected bin file is ${bin_name}.sha256" + fi + log "verification file: ${sha_res}" + sha256sum "${bin_name}.bin" | awk -F" " '{print $1}' > "${bin_name}-check.sha256" + if [ $? -ne 0 ] + then + die "check integrality of bin file failed" + fi + sha_res=$(sha256sum -t ${bin_name}.sha256 | awk -F" " '{print $1}') + check_res=$(sha256sum -t ${bin_name}-check.sha256 | awk -F" " '{print $1}') + if [ "${sha_res}" != "${check_res}" ] + then + die "computed checksum did not match" + fi + rm -rf "${bin_name}-check.sha256" + cp ${bin_res} ${app_path} && cp version.cfg ${app_path} + if [ $? -ne 0 ] + then + die "copy binary files *.bin and version.cfg to install path error" + fi + cd ${app_path} + ./*.bin + if [ $? -ne 0 ] + then + die "decompress binary files (*.bin) error" + fi + rm -rf ./*.bin +} + +function set_environment() +{ + if [ "$2" = "app" ] + then + # set GAUSSHOME + sed -i "/.*export\\s*GAUSSHOME=/d" ${env_file} + echo "export GAUSSHOME=${app_path}" >> ${env_file} + # set PATH and LD_LIBRARY_PATH + sed -i "/.*export\\s*PATH=/d" ${env_file} + echo "export PATH=${app_path}/bin:"'$PATH' >> ${env_file} + log "export PATH=${app_path}/bin:$PATH >> ${env_file}" + sed -i "/.*export\\s*LD_LIBRARY_PATH=/d" ${env_file} + echo "export LD_LIBRARY_PATH=${app_path}/lib:"'$LD_LIBRARY_PATH' >> ${env_file} + log "export LD_LIBRARY_PATH=${app_path}/lib:$LD_LIBRARY_PATH >> ${env_file}" + info "[set GAUSSHOME environment variables success.]" + elif [ "$2" = "data" ] + then + # set GAUSSDATA + sed -i "/.*export\\s*GAUSSDATA=/d" ${env_file} + echo "export GAUSSDATA=${data_path}" >> ${env_file} + info "[set GAUSSDATA environment variables success.]" + elif [ "$2" = "log" ] + then + # set GAUSSLOG + sed -i "/.*export\\s*GAUSSLOG=/d" ${env_file} + echo "export GAUSSLOG=${log_path}" >> ${env_file} + info "[set GAUSSLOG environment variables success.]" + fi +} + +function init_db() +{ + cd ${app_path}/bin + if [ ! -e gs_initdb ] + then + die "no gauss installation file is found in app-path. check the installation directory." + fi + + info "cmd : gs_initdb -D ${data_path} --nodename=${nodename} ${init_show_parameters}" + ./gs_initdb -D ${data_path} --nodename=${nodename} ${init_parameters} | tee -a ${log_file} + if [ ${PIPESTATUS[0]} -ne 0 ] + then + die "[init primary datanode failed.]" + else + log "[init primary datanode success.]" + fi +} + +function config_db() +{ + cd ${app_path}/bin + if [ ! -e gs_guc ] + then + die "no gauss installation file is found in app-path. check the installation directory." + fi + cmd="./gs_guc set -D ${data_path} ${config_parameters} -c \"application_name='${nodename}'\" " + info "cmd : ${cmd}" + eval ${cmd} | tee -a ${log_file} + if [ ${PIPESTATUS[0]} -ne 0 ] + then + die "config datanode failed, pleae check whether the database is instantiated properly." + fi + listen_host_flag=$(echo ${config_parameters} | grep "listen_addresses") + if [ "${listen_host_flag}" = "" ] + then + localhostscount=${#localhosts[*]} + index=0 + if [ ${localhostscount} -eq 0 ] + then + sed -i "/^#listen_addresses/c\listen_addresses = 'localhost,${localips[0]}'" ${data_path}/postgresql.conf + log "sed -i /^#listen_addresses/c\listen_addresses = 'localhost,${localips[0]}' ${data_path}/postgresql.conf" + else + listen_addresses="" + while [ ${index} -lt ${localhostscount} ] + do + listen_addresses="${listen_addresses}"",""${localhosts[${index}]}" + index=$[ $index + 1 ] + done + sed -i "/^#listen_addresses/c\listen_addresses = 'localhost ${listen_addresses}'" ${data_path}/postgresql.conf + log "sed -i /^#listen_addresses/c\listen_addresses = 'localhost ${listen_addresses}' ${data_path}/postgresql.conf" + fi + + fi + + remote_read_mode_flag=$(echo ${config_parameters} | grep "remote_read_mode") + if [ "${remote_read_mode_flag}" = "" ] + then + sed -i "/.*remote_read_mode = non_authentication/d" ${data_path}/postgresql.conf + echo "remote_read_mode = non_authentication" | tee -a ${data_path}/postgresql.conf + log "echo remote_read_mode = non_authentication | tee -a ${data_path}/postgresql.conf" + fi + + mask_length=32 + localhostscount=${#localhosts[*]} + index=0 + if [ ${localhostscount} -eq 0 ] + then + is_valid_ipv6 localips[0] + if [ $? -eq 0 ] + then + mask_length=128 + fi + sed -i "/.*host\\s*all\\s*all\\s*${localips[0]}\/${mask_length}\\s*trust/d" ${data_path}/pg_hba.conf + echo "host all all ${localips[0]}/${mask_length} trust" | tee -a ${data_path}/pg_hba.conf + log "echo host all all ${localips[0]}/${mask_length} trust | tee -a ${data_path}/pg_hba.conf" + else + while [ ${index} -lt ${localhostscount} ] + do + is_valid_ipv6 ${localhosts[${index}]} + if [ $? -eq 0 ] + then + mask_length=128 + else + mask_length=32 + fi + sed -i "/.*host\\s*all\\s*${user}\\s*${localhosts[${index}]}\/${mask_length}\\s*trust/d" ${data_path}/pg_hba.conf + echo "host all ${user} ${localhosts[${index}]}/${mask_length} trust" | tee -a ${data_path}/pg_hba.conf + log "echo host all ${user} ${localhosts[${index}]}/${mask_length} trust | tee -a ${data_path}/pg_hba.conf" + index=$[ $index + 1 ] + done + fi + + if [ "${mode_type}" != "single" ] + then + remoteipcount=${#remoteips[*]} + index=0 + while [ ${index} -lt ${remoteipcount} ] + do + is_valid_ipv6 ${remoteips[${index}]} + if [ $? -eq 0 ] + then + mask_length=128 + else + mask_length=32 + fi + sed -i "/.*host\\s*all\\s*${user}\\s*${remoteips[${index}]}\/${mask_length}\\s*trust/d" ${data_path}/pg_hba.conf + echo "host all ${user} ${remoteips[${index}]}/${mask_length} trust" | tee -a ${data_path}/pg_hba.conf + log "echo host all ${user} ${remoteips[${index}]}/${mask_length} trust | tee -a ${data_path}/pg_hba.conf" + index=$[ $index + 1 ] + done + fi + log "[config datanode success.]" +} + +function guc_db() +{ + cd ${app_path}/bin + if [ ! -e gs_guc ] + then + die "no gauss installation file is found in app-path. check the installation directory." + fi + guc_config_parameters="" + for line in $(cat ${guc_file} | sed '/^$/d') + do + guc_config_parameters="${guc_config_parameters}"" -c \"${line}\"" + done + cmd="./gs_guc set -D ${data_path} ${guc_config_parameters}" + info "cmd : ${cmd}" + eval ${cmd} | tee -a ${log_file} + if [ ${PIPESTATUS[0]} -ne 0 ] + then + warn "guc config failed, you need to manually configure the GUC." + else + log "guc config success." + fi +} + +function cert_db() +{ + cd ${app_path}/bin + if [ ! -e gs_guc ] + then + die "no gauss installation file (gs_guc) is found in app-path, + check whether OpenGauss has been properly installed in the installation directory." + fi + for filename in server.crt server.key cacert.pem server.key.cipher server.key.rand + do + if ! cp ${cert_path}/${filename} ${data_path} ; then + die "copy ${cert_path}/${filename} to ${data_path} failed" + fi + done + guc_config_parameters=" -c \"ssl=on\" -c \"ssl_ciphers='ALL'\" + -c \"ssl_cert_file='server.crt'\" -c \"ssl_key_file='server.key'\" + -c \"ssl_ca_file='cacert.pem'\" " + cmd="./gs_guc set -D ${data_path} ${guc_config_parameters}" + info "the command of guc config is : ${cmd}" + eval ${cmd} | tee -a ${log_file} + if [ ${PIPESTATUS[0]} -ne 0 ] + then + die "guc config failed, execute cmd ${cmd} failed." + fi + + if [ "${client_ip}" != "" ] + then + is_valid_ipv4 "${client_ip}" + if [ $? -ne 0 ] + then + is_valid_ipv6 "${client_ip}" + if [ $? -ne 0 ] + then + die "the client ip address ${client_ip} is invalid." + fi + fi + cmd="./gs_guc set -D ${data_path} \"-h hostssl all all ${client_ip}/32 cert\" " + info "the command of setting client ip is : ${cmd}" + eval ${cmd} | tee -a ${log_file} + if [ ${PIPESTATUS[0]} -ne 0 ] + then + warn "failed to configure the client IP address." + fi + fi + log "the ssl certificate of the database is configured successfully." +} + +function start_db() +{ + cd ${app_path}/bin + if [ ! -e gs_ctl ] + then + die "no gauss installation file is found in app-path, check the installation directory." + fi + info "cmd: gs_ctl ${action} -D ${data_path} ${mode} | tee -a ${log_file}" + ./gs_ctl ${action} -D ${data_path} ${mode} | tee -a ${log_file} + if [ ${PIPESTATUS[0]} -ne 0 ] + then + die "start datanode failed, maybe the database not be properly installed. please refer to install.log for more detailed information." + else + log "start datanode success." + sleep 5s + eval "gs_ctl query -D ${data_path}" + fi +} + +while [ $# -gt 0 ] +do +case "$1" in + -h|--help) + usage + exit 1 + ;; + -m|--mode) + if [ "$2" = "" ] + then + die "the parameter '-m|--mode' cannot be empty." + fi + mode_type=$2 + if [[ "${mode_type}" != "single" && "${mode_type}" != "primary" && "${mode_type}" != "standby" ]] + then + die "only three modes are supported: single、primary、and standby, default single" + fi + shift 2 + ;; + -n|--nodename) + if [ "$2" = "" ] + then + die "the parameter '-n|--name' cannot be empty." + fi + security_check "$2" + nodename=$2 + shift 2 + ;; + -D|--data-path) + if [ "$2" = "" ] + then + die "the parameter '-D|--data-path' cannot be empty." + fi + data_path=$2 + if [ ${data_path:0:1} != "/" ] + then + data_path="$(pwd)/${data_path}" + fi + shift 2 + ;; + -R|--app-path) + if [ "$2" = "" ] + then + die "the parameter '-R|--app-path' cannot be empty." + fi + app_path=$2 + if [ ${app_path:0:1} != "/" ] + then + app_path="$(pwd)/${app_path}" + fi + shift 2 + ;; + -P|--gsinit-parameter) + if [ "$2" = "" ] + then + die "the parameter '-P|--gsinit-parameter' cannot be empty." + fi + tmp_parameter=$(echo "$2" | sed "s#'##g" | sed 's#"##g') + tmp_passwd_flag=$(echo "${tmp_parameter}" | grep -E "\-w\s+|\-\-pwpasswd=") + if [ "${tmp_passwd_flag}" = "" ] + then + security_check "$2" + fi + check_init_paramters "$2" + shift 2 + ;; + -l|--log-path) + if [ "$2" = "" ] + then + die "the parameter '-l|--log-path' cannot be empty." + fi + log_path="$2" + if [ ${log_path:0:1} != "/" ] + then + log_path="$(pwd)/${log_path}" + fi + shift 2 + ;; + -f|--guc-file) + if [ "$2" = "" ] + then + die "the parameter '-f|--guc-file' cannot be empty." + fi + guc_file="$2" + if [ ${guc_file:0:1} != "/" ] + then + guc_file="$(pwd)/${guc_file}" + fi + check_red_permission "${guc_file}" + shift 2 + ;; + -C|--dn-guc) + if [ "$2" = "" ] + then + die "the parameter '-C|--dn-guc' cannot be empty." + fi + security_check "$2" + check_conf_paramters "$2" + shift 2 + ;; + --env-sep-file) + if [ "$2" = "" ] + then + die "the parameter '--env-sep-file' cannot be empty." + fi + env_file=$2 + if [ ${env_file:0:1} != "/" ] + then + env_file="$(pwd)/${env_file}" + fi + shift 2 + ;; + --start) + start=0 + shift 1 + ;; + --ulimit) + ulimit_flag=0 + shift 1 + ;; + --cert-path) + if [ "$2" = "" ] + then + die "the parameter '--cert-path' cannot be empty." + fi + cert_path=$2 + if [ ${cert_path:0:1} != "/" ] + then + cert_path="$(pwd)/${cert_path}" + fi + shift 2 + ;; + --ssl-client-ip) + if [ "$2" = "" ] + then + die "the parameter '--ssl-client-ip' cannot be empty." + fi + client_ip=$2 + shift 2 + ;; + *) + log "internal error: option processing error" 1>&2 + log "please input right paramtenter, the following command may help you" + log "sh install.sh --help or sh install.sh -h" + exit 1 +esac +done + +function env_file_set() +{ + if [ "${env_file}"X != ~/.bashrc"X" ] + then + if [ ! -e "${env_file}" ] + then + env_file_path=$(dirname ${env_file}) + mkdir -p ${env_file_path} + if [ $? -ne 0 ] + then + die "touch ${env_file} failed." + fi + touch "${env_file}" + chmod 700 "${env_file}" + if [ $? -ne 0 ] + then + die "an error occurred when assigning read/write/execute permission to the directory $1." + fi + fi + check_red_permission "${env_file}" + check_write_permission "${env_file}" + check_execute_permission "${env_file}" + sed -i "/.*export\\s*GAUSSENV=/d" "${env_file}" + echo "export GAUSSENV=${env_file}" >> "${env_file}" + fi +} +function app() +{ + if [ "${app_path}" = "" ] + then + app_path=$(cat ${env_file} | grep -e ".*export\\s*GAUSSHOME="| awk -F "=" '{print $2}') + fi + if [ "${app_path}" != "" ] + then + check_path "${app_path}" "install" + set_environment "${app_path}" "app" + if [ ${install_path_full_flag} -eq 1 ] + then + decompress + info "[begin decompressing binary files success.]" + else + info "[binary files already decompressed, pass.]" + fi + fi +} + +function data() +{ + if [ "${data_path}" = "" ] + then + data_path=$(cat ${env_file} | grep -e ".*export\\s*GAUSSDATA="| awk -F "=" '{print $2}') + fi + if [ "${data_path}" != "" ] + then + check_path "${data_path}" "data" + set_environment "${data_path}" "data" + fi +} + +function cert() +{ + if [ "${cert_path}" != "" ] + then + if [ ! -e "${cert_path}" ] + then + die "the certificate path does not exist." + fi + check_path "${cert_path}" "cert" + if [ -z $(ls -A "${cert_path}" | grep server.crt) ] + then + die "no matching SSL certificate file(server.crt) is found. as a result, + SSL may fail to be opened. Check ssl config carefully after the installation." + else + check_red_permission "${cert_path}/server.crt" + fi + if [ -z $(ls -A "${cert_path}" | grep server.key) ] + then + die "no matching SSL certificate file(server.key) is found. as a result, + SSL may fail to be opened. Check ssl config carefully after the installation." + fi + if [ -z $(ls -A "${cert_path}" | grep cacert.pem) ] + then + die "no matching SSL certificate file(cacert.pem) is found. as a result, + SSL may fail to be opened. Check ssl config carefully after the installation." + fi + fi +} + +function log_file_set() +{ + if [ "${log_path}" != "" ] + then + check_path "${log_path}" "log" + data_contain_log_flag=$(echo ${data_path} | grep "${log_path}") + if [ "${data_contain_log_flag}" != "" ] + then + die "log-path cannot be a subdirectory of data-path." + fi + set_environment ${log_path} log + fi +} + +function start() +{ + if [ "${mode_type}" = "primary" ] + then + mode="-M primary" + if [ "${nodename}" = "" ] + then + nodename="master" + fi + elif [ "${mode_type}" = "standby" ] + then + mode="-b full" + action="build" + if [ "${nodename}" = "" ] + then + nodename="slave" + fi + else + mode="-Z single_node" + if [ "${nodename}" = "" ] + then + nodename="single" + fi + fi + log "mode_type is ${mode_type} and mode is ${mode}, node name is ${nodename}" + # check whether the app-path and data-path paths are contained. + cd ${app_path} + if [ $? -ne 0 ] + then + die "cd app-path failed, no such file or directory." + fi + app_path=$(pwd) + cd ${data_path} + if [ $? -ne 0 ] + then + die "cd data-path failed, no such file or directory." + fi + data_path=$(pwd) + app_contain_data_flag=$(echo ${app_path} | grep "${data_path}") + data_contain_app_flag=$(echo ${data_path} | grep "${app_path}") + if [ "${app_contain_data_flag}" != "" ] + then + die "data-path cannot be a subdirectory of app-path." + fi + if [ "${data_contain_app_flag}" != "" ] + then + die "app-path cannot be a subdirectory of data-path." + fi + if [ ${data_full_flag} -ne 0 ] + then + if [ ${interactive_passwd_flag} -eq 1 ] + then + if [ "${password}" = "" ] + then + index=0 + while [ "${password}" = "" ] + do + if [ ${index} -gt 2 ] + then + die "maximum number of retries exceeded" + fi + hide_password 0 + echo -e "\n" + hide_password 1 + echo -e "\n" + if [ "${password}" != "${password_check}" ] + then + info "the two passwords entered are inconsistent, please retry." + index=$[ ${index} + 1 ] + password="" + continue + fi + check_passwd "${password}" + if [ $? -ne 0 ] + then + die "install gaussdb failed, the password can contain only 8 to 32 characters + and at least three types of the following characters: uppercase letters, lowercase letters, digits, and special characters." + fi + index=$[ ${index} + 1 ] + done + init_parameters="${init_parameters} -w ${password}" + init_show_parameters="${init_show_parameters} ""-w ******" + else + check_passwd "${password}" + if [ $? -ne 0 ] + then + die "install gaussdb failed, the password can contain only 8 to 32 characters + and at least three types of the following characters: uppercase letters, lowercase letters, digits, and special characters." + fi + init_parameters="${init_parameters} -w ${password}" + init_show_parameters="${init_show_parameters} ""-w ******" + fi + fi + init_db + data_full_flag=0 + info "[install datanode success.]" + fi + + if [ "${config_parameters}" != "" ] + then + config_db + info "[config datanode success.]" + fi + + if [ "$guc_file" = "" ] + then + if [ -e "${root_path}/opengauss_lite.conf" ] + then + guc_file="${root_path}/opengauss_lite.conf" + fi + fi + + if [ "${guc_file}" != "" ] + then + guc_db + fi + + if [ "${cert_path}" != "" ] + then + cert_db + fi + + if [ ${start} -eq 0 ] + then + start_db + fi +} + +function main() +{ + check_os + data + if [ "${app_path}" != "" -a "${data_path}" != "" ] + then + if [ ${data_full_flag} -ne 0 ] + then + check + fi + fi + env_file_set + cert + app + log_file_set + source ${env_file} + if [ "${app_path}" != "" -a "${data_path}" != "" ] + then + start + fi + info "run cmd 'source ${env_file}' to make the environment variables take effect." +} +main +exit 0 \ No newline at end of file diff --git a/liteom/opengauss_lite.conf b/liteom/opengauss_lite.conf new file mode 100644 index 000000000..899e79815 --- /dev/null +++ b/liteom/opengauss_lite.conf @@ -0,0 +1,26 @@ +thread_pool_attr='16,1,(nobind)' +enable_asp=off +enable_ustore=off +asp_sample_num=10 +enable_incremental_checkpoint=off +enable_double_write=off +shared_buffers=64MB +cstore_buffers=16MB +num_internal_lock_partitions='CLOG_PART=1,CSNLOG_PART=1,LOG2_LOCKTABLE_PART=4,TWOPHASE_PART=1,FASTPATH_PART=20' +max_locks_per_transaction=64 +max_prepared_transactions=0 +audit_file_remain_threshold=1024 +wal_buffers=16MB +segment_buffers=16 +max_connections=200 +track_activity_query_size=200 +data_replicate_buffer_size=4096 +max_inner_tool_connections=10 +enable_stmt_track=off +use_workload_manager=off +enable_instr_rt_percentile=off +enable_wdr_snapshot=off +enable_global_syscache=on +enable_thread_pool=on +global_syscache_threshold=64MB +local_syscache_threshold=16MB \ No newline at end of file diff --git a/liteom/uninstall.sh b/liteom/uninstall.sh new file mode 100644 index 000000000..5039acdb6 --- /dev/null +++ b/liteom/uninstall.sh @@ -0,0 +1,130 @@ +#!/bin/bash +# -*- coding:utf-8 -*- +############################################################################# +# Copyright (c): 2021, Huawei Tech. Co., Ltd. +# FileName : uninstall.sh +# Version : V1.0.0 +# Date : 2021-04-17 +# Description : the script used to uninstall the single cluster on one machine +######################################### + +declare user=$(whoami) +if [ X"$user" = X"root" ]; then + echo "error: can not uninstall gauss with root" + exit 1 +fi +declare delete_data="false" +declare root_path=$(cd $(dirname $0);pwd) + +declare log_file="${root_path}/uninstall.log" +if [ -e "${log_file}" ] +then + cat /dev/null > ${log_file} +else + touch ${log_file} +fi + +function usage() +{ + echo " +Usage: $0 [OPTION] +Arguments: + --delete-data delete data path and program path + -h|--help show this help, then exit + " +} + +function info() +{ + echo "$1" >> ${log_file} + echo -e "\033[32minfo:\033[0m$1" +} + +function log() +{ + echo "$1" >> ${log_file} + echo "$1" +} + +function die() +{ + echo "$1" >> ${log_file} + echo -e "\033[31m$1\033[0m" + exit 1 +} + +while [ $# -gt 0 ] +do +case "$1" in + -h|--help) + usage + exit 1 + ;; + --delete-data) + delete_data="true" + shift 1 + ;; + *) + echo "internal error: option processing error" 1>&2 + echo "please input right paramtenter, the following command may help you" + echo "sh uninstall.sh --help or sh uninstall.sh -h" + exit 1 +esac +done + +log "delete-data is ${delete_data}" + +function uninstall() { + log "cleaning up related processes" + pids=$(ps -u $user | grep gaussdb | awk '{print $1}') + if [ "${pids}" != "" ] + then + kill -9 $pids + log "clean up related processes $pids" + fi + log "clean up related processes success" +} + +function set_environment() { + env_file=$(echo $GAUSSENV) + if [ "${env_file}" = "" ] + then + env_file=~/.bashrc + fi + source ${env_file} + data_path=$(echo $GAUSSDATA) + if [[ -n "${data_path}" && -e "${data_path}" ]] + then + rm -rf ${data_path} + fi + + app_path=$(echo $GAUSSHOME) + if [[ -n "${app_path}" && -e "${app_path}" ]] + then + rm -rf ${app_path} + fi + + log_path=$(echo $GAUSSLOG) + if [[ -n "${log_path}" && -e "${log_path}" ]] + then + rm -rf ${log_path} + fi + + # set GAUSSHOME and GAUSSDATA + sed -i "/.*export\\s*GAUSSHOME=/d" ${env_file} + sed -i "/.*export\\s*GAUSSDATA=/d" ${env_file} + sed -i "/.*export\\s*GAUSSLOG=/d" ${env_file} + sed -i "/.*export\\s*GAUSSENV=/d" ${env_file} + sed -i "/.*ulimit\\s*-SHn\\s*1000000/d" ${env_file} + # set PATH and LD_LIBRARY_PATH GS_CLUSTER_NAME + sed -i "/.*export\\s*PATH=/d" ${env_file} + sed -i "/.*export\\s*LD_LIBRARY_PATH=/d" ${env_file} +} + +uninstall + +if [ "${delete_data}" = "true" ] +then + set_environment +fi +exit 0 \ No newline at end of file diff --git a/liteom/upgrade_GAUSSV5.sh b/liteom/upgrade_GAUSSV5.sh new file mode 100644 index 000000000..54cce4d9d --- /dev/null +++ b/liteom/upgrade_GAUSSV5.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# Copyright (c) Huawei Technologies Co., Ltd. 2010-2022. All rights reserved. +# date: 2021-12-22 +# version: 1.0 + +if ! source ~/.bashrc; then + echo "Error: check ~/.bashrc first" + exit 1 +fi +CURRENT_DIR=$( + cd $(dirname $0) + pwd +) + +source ${CURRENT_DIR}/upgrade_config.sh +source ${CURRENT_DIR}/upgrade_common.sh + +function main() { + check_and_init + parse_cmd_line $@ + case "${action}" in + upgrade_pre) + upgrade_pre + exit 0 + ;; + upgrade_bin) + upgrade_bin + exit 0 + ;; + upgrade_post) + upgrade_post + exit 0 + ;; + rollback_pre) + rollback_pre + exit 0 + ;; + rollback_bin) + rollback_bin + exit 0 + ;; + rollback_post) + rollback_post + exit 0 + ;; + upgrade_commit) + upgrade_commit + exit 0 + ;; + query_start_mode) + query_start_mode + exit 0 + ;; + switch_over) + switch_over + exit 0 + ;; + *) + log "please input right parameter, the following command may help you" + log "sh upgrade_GAUSSV5.sh --help or sh upgrade_GAUSSV5.sh -h" + die "Must input parameter -t action" ${err_parameter} + ;; + esac +} +main $@ diff --git a/liteom/upgrade_common.sh b/liteom/upgrade_common.sh new file mode 100644 index 000000000..81a4f35d6 --- /dev/null +++ b/liteom/upgrade_common.sh @@ -0,0 +1,1210 @@ +#!/bin/bash +# Copyright (c) Huawei Technologies Co., Ltd. 2010-2022. All rights reserved. +# date: 2021-12-22 +# version: 1.0 + +dn_role="" +min_disk="" +old_version="" +new_version="" +old_cfg="" +big_cfg="" +binfile="" + +upgrade_path=$( + cd $(dirname $0) + pwd +) +source ${upgrade_path}/upgrade_errorcode.sh + +function check_and_init() { + check_config + check_user + check_cmd_conflict +} + +function check_config() { + #check and init GAUSS_LOG_FILE + if [[ X"$GAUSS_LOG_PATH" = X ]]; then + GAUSS_LOG_FILE="$upgrade_path"/upgrade.log + else + check_config_path "$GAUSS_LOG_PATH" + GAUSS_LOG_FILE="$GAUSS_LOG_PATH"/upgrade.log + fi + log_dir=$(dirname "$GAUSS_LOG_FILE") + if [[ ! -d "$log_dir" ]]; then + if ! mkdir -p -m 700 "$log_dir"; then + echo "mkdir -p -m 700 ${log_dir} failed" + exit ${err_check_init} + fi + fi + if touch "$GAUSS_LOG_FILE" && chmod 600 "$GAUSS_LOG_FILE"; then + echo "check log file is ok" >>"$GAUSS_LOG_FILE" + else + echo "touch $GAUSS_LOG_FILE && chmod 600 $GAUSS_LOG_FILE failed" + exit ${err_check_init} + fi + check_env + #check GAUSS_LISTEN_PORT + if [[ X"$GAUSS_LISTEN_PORT" = X ]]; then + GAUSS_LISTEN_PORT=$(grep -E "^[ ]{0,}port" "$PGDATA"/postgresql.conf | sed 's/\s*//g' | awk -F# '{print $1}' | awk -F= '{print $2}') + fi + if [[ X"$GAUSS_LISTEN_PORT" = X ]]; then + GAUSS_LISTEN_PORT=5432 + debug "The value of port cannot be obtained from configuration, use default value 5432" + fi + if ! echo "$GAUSS_LISTEN_PORT" | grep -Eqw "[0-9]{4,5}"; then + die "GAUSS_LISTEN_PORT may be not right" ${err_check_init} + fi + + #check GAUSS_ADMIN_USER + if [[ X"$GAUSS_ADMIN_USER" = X ]]; then + GAUSS_ADMIN_USER=$(whoami) + else + check_config_user "$GAUSS_ADMIN_USER" + fi + + #check GAUSS_UPGRADE_BASE_PATH and cd in + if [[ X"$GAUSS_UPGRADE_BASE_PATH" = X ]]; then + GAUSS_UPGRADE_BASE_PATH=${upgrade_path} + fi + check_config_path "$GAUSS_UPGRADE_BASE_PATH" + if [[ ! -d "$GAUSS_UPGRADE_BASE_PATH" ]]; then + die "$GAUSS_UPGRADE_BASE_PATH must exist" ${err_check_init} + fi + if ! cd "$GAUSS_UPGRADE_BASE_PATH"; then + die "Cannot access $GAUSS_UPGRADE_BASE_PATH" ${err_check_init} + fi + + #check GAUSS_SQL_TAR_PATH + if [[ X"$GAUSS_SQL_TAR_PATH" = X ]]; then + GAUSS_SQL_TAR_PATH=${GAUSS_UPGRADE_BASE_PATH} + fi + check_config_path "$GAUSS_SQL_TAR_PATH" + + #check GAUSS_BACKUP_BASE_PATH + if [[ X"$GAUSS_BACKUP_BASE_PATH" = X ]]; then + GAUSS_BACKUP_BASE_PATH=${GAUSS_UPGRADE_BASE_PATH}/backup + fi + check_config_path "$GAUSS_BACKUP_BASE_PATH" + if [[ ! -d "$GAUSS_BACKUP_BASE_PATH" ]]; then + if ! mkdir -p -m 700 "$GAUSS_BACKUP_BASE_PATH"; then + die "mkdir -p -m 700 $GAUSS_BACKUP_BASE_PATH failed" ${err_check_init} + fi + fi + + #check GAUSS_TMP_PATH + if [[ X"$GAUSS_TMP_PATH" = X ]]; then + GAUSS_TMP_PATH=${GAUSS_UPGRADE_BASE_PATH}/tmp + fi + check_config_path "$GAUSS_TMP_PATH" + if [[ ! -d "$GAUSS_TMP_PATH" ]]; then + if ! mkdir -p -m 700 "$GAUSS_TMP_PATH"; then + die "mkdir -p -m 700 $GAUSS_TMP_PATH failed" ${err_check_init} + fi + fi + + #check GAUSS_UPGRADE_BIN_PATH + if [[ X"$GAUSS_UPGRADE_BIN_PATH" != X ]]; then + check_config_path "$GAUSS_UPGRADE_BIN_PATH" + fi + + #check GAUSS_UPGRADE_SYNC_CONFIG_LIST + check_config_sync_path +} + +function usage() { + echo " +Usage: $0 [OPTION] +Arguments: + -h|--help show this help, then exit + -t upgrade_pre,upgrade_bin,upgrade_post,rollback_pre,rollback_bin,rollback_post,upgrade_commit + query_start_mode,switch_over + --min_disk reserved upgrade disk space in MB, default 2048 + -m|--mode normal、primary、standby and cascade_standby + " +} + +function debug() { + local current_time=$(date +"%Y-%m-%d %H:%M:%S") + echo "[$current_time]" "$1" >>"${GAUSS_LOG_FILE}" +} + +function log() { + local current_time=$(date +"%Y-%m-%d %H:%M:%S") + echo "[$current_time]" "$1" >>"${GAUSS_LOG_FILE}" + echo "$1" +} + +function die() { + local current_time=$(date +"%Y-%m-%d %H:%M:%S") + if [[ -f "${GAUSS_LOG_FILE}" ]];then + echo "[$current_time]" "$1" >>"${GAUSS_LOG_FILE}" + fi + echo -e "\033[31mError: $1\033[0m" + exit $2 +} + +function parse_cmd_line() { + if [[ $# -gt 6 ]];then + log "please input right parameter, the following command may help you" + log "sh upgrade_GAUSSV5.sh --help or sh upgrade_GAUSSV5.sh -h" + exit ${err_parameter} + fi + while [[ $# -gt 0 ]]; do + case "$1" in + -h | --help) + usage + exit 0 + ;; + -m | --mode) + if [[ "$2" = "" ]]; then + die "the parameter -m|--mode cannot be empty." ${err_parameter} + fi + dn_role=$2 + dn_role_list="normal primary standby cascade_standby" + if ! echo "$dn_role_list"|grep -wq "$dn_role"; then + die "only these modes are supported: normal,primary,standby and cascade_standby" ${err_parameter} + fi + shift 2 + ;; + -t) + if [[ "$2" = "" ]]; then + die "the parameter -t cannot be empty." ${err_parameter} + fi + action=$2 + action_list="upgrade_pre upgrade_bin upgrade_post rollback_pre rollback_bin rollback_post upgrade_commit query_start_mode switch_over" + if ! echo "$action_list"|grep -wq "$action"; then + die "only these actions are supported: upgrade_pre, upgrade_bin, upgrade_post, rollback_pre, \ +rollback_bin, rollback_post, upgrade_commit and query_start_mode switch_over" ${err_parameter} + fi + shift 2 + ;; + + --min_disk) + if [[ "$2" = "" ]]; then + die "the parameter --min_disk cannot be empty." ${err_parameter} + fi + min_disk=$2 + if echo ${min_disk} | grep -q "[^0-9]";then + die "min_disk value must be int " ${err_parameter} + fi + if echo ${min_disk} | grep -q "^0";then + die "min_disk value cannot start with 0 " ${err_parameter} + fi + if [[ ${min_disk} -lt 2048 || ${min_disk} -gt 204800 ]]; then + die "min_disk value must be >= 2048 and <= 204800" ${err_parameter} + fi + shift 2 + ;; + *) + log "please input right parameter, the following command may help you" + log "sh upgrade_GAUSSV5.sh --help or sh upgrade_GAUSSV5.sh -h" + exit ${err_parameter} + ;; + esac + done + log "Parse cmd line successfully." + +} + +function check_user() { + user=$(whoami) + if [[ X"$user" == X"root" ]]; then + die "Can not exec the script with root!" ${err_check_init} + fi +} + +function check_env() { + if [[ "$GAUSSHOME" == "" ]]; then + die "GAUSSHOME cannot be null!" ${err_check_init} + fi + if [[ "$GAUSSDATA" == "" ]] && [[ "$PGDATA" == "" ]]; then + die "GAUSSDATA or PGDATA cannot be all null!" ${err_check_init} + fi + if [[ "$PGDATA" == "" ]]; then + PGDATA=${GAUSSDATA} + fi + if [[ "$GAUSSDATA" == "" ]]; then + GAUSSDATA=${PGDATA} + fi + check_config_path "$GAUSSHOME" + check_config_path "$GAUSSDATA" + check_config_path "$PGDATA" + log "Current env value: GAUSSHOME is $GAUSSHOME, PGDATA is $PGDATA." +} + +function check_config_path() { + local temp_value="$1" + if [[ "$temp_value" == *[\(\)\{\}\[\]\<\>\"\'\`\\\ \*\!\|\;\&\$\~\?]* ]];then + die "$temp_value may contain illegal characters" ${err_check_init} + fi + if echo "$temp_value"|grep -Eq "^/{1,}$"; then + die "path cannot be / " ${err_check_init} + fi +} + +function check_config_sync_path() { + if [[ "abc$GAUSS_UPGRADE_SYNC_CONFIG_LIST" == "abc" ]]; then + debug "GAUSS_UPGRADE_SYNC_CONFIG_LIST is null" + fi + if [[ "$GAUSS_UPGRADE_SYNC_CONFIG_LIST" == *[\(\)\{\}\[\]\<\>\"\'\`\\\ \*\!\|\;\&\$\~\?]* ]];then + die "$GAUSS_UPGRADE_SYNC_CONFIG_LIST may contain illegal characters" ${err_check_init} + fi + local array=(${GAUSS_UPGRADE_SYNC_CONFIG_LIST//,/ }) + for var in ${array[@]} + do + if echo "$var"|grep -Eq "^/"; then + die "path in ${GAUSS_UPGRADE_SYNC_CONFIG_LIST} must not be start with /" ${err_check_init} + fi + done + +} + +function check_config_user() { + local temp_value="$1" + if [[ "$temp_value" == *[\(\)\{\}\[\]\<\>\"\'\`\\\ \*\!\|\;\&\$\~\?/]* ]];then + die "$temp_value may contain illegal characters" ${err_check_init} + fi +} + +function check_version() { + if [[ ! -f "${GAUSSHOME}/version.cfg" ]]; then + die "Cannot find current version.cfg!" ${err_upgrade_pre} + else + old_version=$(tail -n 1 "$GAUSSHOME"/version.cfg) + old_cfg=$(sed -n 2p "$GAUSSHOME"/version.cfg | sed 's/\.//g') + fi + if [[ -f "$GAUSS_UPGRADE_BIN_PATH"/version.cfg ]]; then + new_version_cfg_path="${GAUSS_UPGRADE_BIN_PATH}/version.cfg" + elif [[ -f "$GAUSS_UPGRADE_BASE_PATH"/version.cfg ]]; then + new_version_cfg_path="${GAUSS_UPGRADE_BASE_PATH}/version.cfg" + else + die "Cannot find new version.cfg!" ${err_upgrade_pre} + fi + + new_version=$(tail -n 1 "$new_version_cfg_path") + new_cfg=$(sed -n 2p "$new_version_cfg_path" | sed 's/\.//g') + + if [[ X"$old_version" == X || X"$old_cfg" == X || X"$new_version" == X || X"$new_cfg" == X ]]; then + die "Maybe version.cfg is not normal" ${err_upgrade_pre} + fi + if ! echo "$old_cfg"|grep -Ewq "[0-9]{3,6}";then + die "Maybe version.cfg is not normal" ${err_upgrade_pre} + fi + if ! echo "$new_cfg"|grep -Ewq "[0-9]{3,6}";then + die "Maybe version.cfg is not normal" ${err_upgrade_pre} + fi + + if [[ "$old_version" == "$new_version" ]]; then + die "New version is same as old, the commitId is $old_version!" ${err_version_same} + fi + if [[ ${new_cfg} -lt ${old_cfg} ]]; then + die "Current version is newer!" ${err_upgrade_pre} + fi + big_cfg="False" + if [[ ${new_cfg} -gt ${old_cfg} ]]; then + log "Big upgrade is needed!" + big_cfg="True" + fi + + local flag_file="$GAUSS_TMP_PATH"/version_flag + if echo "old_version=$old_version" > "$flag_file" && chmod 600 "$flag_file"; then + debug "Begin to generate $flag_file" + else + die "Write $flag_file failed" ${err_upgrade_pre} + fi + if ! echo "new_version=$new_version" >> "$flag_file"; then + die "Write $flag_file failed" ${err_upgrade_pre} + fi + if ! echo "big_cfg=$big_cfg" >> "$flag_file"; then + die "Write $flag_file failed" ${err_upgrade_pre} + fi + if ! echo "old_cfg=$old_cfg" >> "$flag_file"; then + die "Write $flag_file failed" ${err_upgrade_pre} + fi + log "Old version commitId is $old_version, version info is $old_cfg" + log "New version commitId is $new_version, version info is $new_cfg" + + ##need version.cfg to check big upgrade,note user exec sql on primary dn +} + +function check_disk() { + avail_disk=$(df -BM "$GAUSS_UPGRADE_BASE_PATH" | tail -n 1 | awk '{print $4}') + avail_disk=${avail_disk:0:-1} + if [[ X"$min_disk" == "X" ]]; then + min_disk=2048 + fi + if [[ ${avail_disk} -lt ${min_disk} ]]; then + die "avail disk must be >= ${min_disk}MB, check with cmd: df -BM $GAUSSHOME!" ${err_check_init} + fi + log "Check available disk space successfully." +} + +function check_db_process() { + ps wwx | grep "$GAUSSHOME/bin/gaussdb" | grep -v grep > /dev/null +} + +function check_cmd_conflict() { + LOCKFILE="${GAUSS_TMP_PATH}/.lock_GAUSSDB_UPGRADE_V5" + if [[ -f "$LOCKFILE" ]] + then + pid=$(cat ${LOCKFILE}) + if [[ -n "$pid" ]];then + if ps -p ${pid} | grep -w ${pid} >/dev/null;then + die "Maybe upgrade_GAUSSV5.sh is running!" ${err_check_init} + fi + fi + fi + if ! echo $$ > "$LOCKFILE"; then + die "Write $LOCKFILE failed" ${err_check_init} + fi +} + +function check_pkg() { + if [[ -d "${GAUSS_UPGRADE_BIN_PATH}" ]]; then + local file_list=(bin etc include lib share version.cfg) + for temp_file in ${file_list[@]} + do + if [[ ! -e "${GAUSS_UPGRADE_BIN_PATH}/${temp_file}" ]];then + die "$GAUSS_UPGRADE_BIN_PATH may be not right, ${temp_file} not exits" ${err_upgrade_pre} + fi + done + if [[ $(ls "${GAUSS_UPGRADE_BIN_PATH}" |wc -l) -eq 6 ]]; then + log "The upgrade will use existing files in $GAUSS_UPGRADE_BIN_PATH" + return 0 + else + die "$GAUSS_UPGRADE_BIN_PATH may be not right,exits other files" ${err_upgrade_pre} + fi + + fi + #get OS distributed version. + kernel="" + if [[ -f "/etc/euleros-release" ]]; then + kernel=$(cat /etc/euleros-release | awk -F ' ' '{print $1}' | tr a-z A-Z) + if [[ "${kernel}" = "EULEROS" ]]; then + kernel="EULER" + fi + elif [[ -f "/etc/openEuler-release" ]]; then + kernel=$(cat /etc/openEuler-release | awk -F ' ' '{print $1}' | tr a-z A-Z) + elif [[ -f "/etc/centos-release" ]]; then + kernel=$(cat /etc/centos-release | awk -F ' ' '{print $1}' | tr a-z A-Z) + else + kernel=$(lsb_release -d | awk -F ' ' '{print $2}' | tr a-z A-Z) + fi + log "kernel: ${kernel}" + + #detect platform information. + platform=32 + bit=$(getconf LONG_BIT) + if [[ "$bit" -eq 64 ]]; then + platform=64 + fi + binname="GaussDB-Kernel-.*-${platform}bit" + binfile=$(ls -a | grep -E "${binname}.bin") + shafile=${binfile%.*}.sha256 + if [[ ! -f "${binfile}" ]] || [[ ! -f "${shafile}" ]]; then + die "bin or sha256 file not exit for the platform ${kernel}-${platform}bit!" ${err_upgrade_pre} + fi + sha_expect=$(cat ${shafile}) + sha_current=$(sha256sum ${binfile} | awk '{print $1}') + if [[ "$sha_expect" != "$sha_current" ]]; then + die "The sha256 value of $binfile does not match $shafile!" ${err_upgrade_pre} + fi + if ! echo "binfile=$binfile" >>"$GAUSS_TMP_PATH"/version_flag; then + die "Write $GAUSS_TMP_PATH/version_flag failed" ${err_upgrade_pre} + fi + if [[ ! -f "$GAUSS_UPGRADE_BASE_PATH"/version.cfg ]]; then + die "version.cfg must be exit in $GAUSS_UPGRADE_BASE_PATH" ${err_upgrade_pre} + fi +} + +function bak_gauss() { + ##bak app & postgresql.conf & pg_hba.conf + if [[ -d "$GAUSS_BACKUP_BASE_PATH"/bak_bin_"$old_version" ]];then + rm -rf "$GAUSS_BACKUP_BASE_PATH"/bak_bin_"$old_version" + fi + if cp -rf "${GAUSSHOME}" "$GAUSS_BACKUP_BASE_PATH"/bak_bin_"$old_version"; then + log "Bak gausshome successfully." + else + die "Bak gausshome failed!" ${err_upgrade_pre} + fi + + if ! cp -rf "$PGDATA"/postgresql.conf "$GAUSS_BACKUP_BASE_PATH"/postgresql.conf_"$old_version"; then + die "Bak postgresql.conf failed!" ${err_upgrade_pre} + else + log "Bak postgresql.conf successfully." + fi + + if ! cp -rf "$PGDATA"/pg_hba.conf "$GAUSS_BACKUP_BASE_PATH"/pg_hba.conf_"$old_version"; then + die "Bak pg_hba.conf failed!" ${err_upgrade_pre} + else + log "Bak pg_hba.conf successfully." + fi + +} + +function decompress_pkg() { + if [[ -d "${GAUSS_UPGRADE_BIN_PATH}" ]]; then + return 0 + fi + if [[ -d "$GAUSS_TMP_PATH"/install_bin_"$new_version" ]];then + rm -rf "$GAUSS_TMP_PATH"/install_bin_"$new_version" + fi + if mkdir -p -m 700 "$GAUSS_TMP_PATH"/install_bin_"$new_version"; then + log "begin decompress pkg in $GAUSS_TMP_PATH/install_bin_$new_version" + fi + if [[ X"$binfile" == X ]]; then + binfile=$(grep binfile "$GAUSS_TMP_PATH"/version_flag | awk -F= '{print $2}') + if [[ X"$binfile" == X ]]; then + die "binfile name cannot be null" ${err_upgrade_pre} + fi + fi + + if cp "$binfile" "$GAUSS_TMP_PATH"/install_bin_"$new_version" && cd "$GAUSS_TMP_PATH"/install_bin_"$new_version" && chmod u+x "$binfile" \ + && ./"$binfile" > /dev/null && rm -f "$binfile"; then + + log "Decompress $binfile successfully." + else + die "Decompress $binfile failed" ${err_upgrade_pre} + fi + + if cd "$GAUSS_UPGRADE_BASE_PATH" && cp "$GAUSS_UPGRADE_BASE_PATH"/version.cfg "$GAUSS_TMP_PATH"/install_bin_"$new_version"; then + log "cp version.cfg successfully" + else + die "cp version.cfg failed" ${err_upgrade_pre} + fi +} + +function cp_pkg() { + if [[ X"$new_version" == X ]]; then + new_version=$(grep new_version "$GAUSS_TMP_PATH"/version_flag | awk -F= '{print $2}') + fi + if [[ -d "$GAUSS_UPGRADE_BIN_PATH" ]]; then + new_bin_path="${GAUSS_UPGRADE_BIN_PATH}" + else + new_bin_path="$GAUSS_TMP_PATH"/install_bin_"$new_version" + fi + #check pkg's version.cfg is equal to version_flag + temppkg_version=$(tail -n 1 "$new_bin_path"/version.cfg) + if [[ "$new_version" != "$temppkg_version" ]]; then + die "pkg's version.cfg is not correct!" ${err_upgrade_bin} + fi + if [[ -d "$GAUSSHOME" ]];then + rm -rf "$GAUSSHOME"/* + fi + if cp -rf "$new_bin_path"/* "$GAUSSHOME"; then + log "Binfile upgrade to new version successfully." + else + die "Binfile upgrade to new version failed" ${err_upgrade_bin} + fi + +} + +function prepare_sql() { + #$1: upgrade,upgrade-post,rollback,rollback-post + #$2: maindb,otherdb + temp_old=${old_cfg} + temp_new=${new_cfg} + local action="$1" + local dbname="$2" + local tempfile="$GAUSS_TMP_PATH"/temp_sql/"temp_"${action}_${dbname}.sql + temp_file_num=0 + if echo "START TRANSACTION;set IsInplaceUpgrade = on;" > "$tempfile" && chmod 600 "$tempfile"; then + debug "Begin to generate $tempfile" + else + die "Write $tempfile failed" ${err_upgrade_pre} + fi + if ! echo "SET search_path = 'pg_catalog';SET local client_min_messages = NOTICE;SET local log_min_messages = NOTICE;" >> "$tempfile"; then + die "Write $tempfile failed" ${err_upgrade_pre} + fi + if ! echo "SET statement_timeout = 3600000;" >> "$tempfile"; then + die "Write $tempfile failed" ${err_upgrade_pre} + fi + if [[ "$action" == "upgrade" || "$action" == "upgrade-post" ]]; then + while [[ ${temp_old} -lt ${temp_new} ]]; do + ((temp_old=$temp_old+1)) + local upgrade_sql_file="upgrade_sql/upgrade_catalog_${dbname}/${action}_catalog_${dbname}_${temp_old:0:2}_${temp_old:2}.sql" + if [[ -f "$upgrade_sql_file" ]]; then + if ! cat "$upgrade_sql_file" >> "$tempfile"; then + die "Write $tempfile failed" ${err_upgrade_pre} + fi + debug "$upgrade_sql_file >> $tempfile" + ((temp_file_num=temp_file_num+1)) + fi + done + fi + if [[ "$1" == "rollback" || "$1" == "rollback-post" ]]; then + while [[ ${temp_new} -gt ${temp_old} ]]; do + local upgrade_sql_file="upgrade_sql/rollback_catalog_${dbname}/${action}_catalog_${dbname}_${temp_new:0:2}_${temp_new:2}.sql" + if [[ -f "$upgrade_sql_file" ]]; then + if ! cat "$upgrade_sql_file" >> "$tempfile"; then + die "Write $tempfile failed" ${err_upgrade_pre} + fi + debug "$upgrade_sql_file >>$tempfile" + ((temp_file_num=temp_file_num+1)) + fi + ((temp_new=$temp_new-1)) + done + fi + if ! echo "COMMIT;" >> "$tempfile";then + die "Write $tempfile failed" ${err_upgrade_pre} + fi + #file not meet requirements + if [[ ${temp_file_num} -eq 0 ]]; then + debug "No sql file for ${action} ${dbname}!" + rm -f "$tempfile" + else + debug "get ${temp_file_num} files for ${action} ${dbname}!" + fi +} + +function prepare_sql_all() { + local dir_temp_sql="$GAUSS_TMP_PATH"/temp_sql + local sql_tar_file="$GAUSS_SQL_TAR_PATH"/upgrade_sql.tar.gz + local sql_tar_sha="$GAUSS_SQL_TAR_PATH"/upgrade_sql.sha256 + + if [[ ! -f "${sql_tar_file}" ]] || [[ ! -f "${sql_tar_sha}" ]]; then + die "${sql_tar_file} or ${sql_tar_sha} not exit!" ${err_upgrade_pre} + else + local sha_expect=$(cat ${sql_tar_sha}) + local sha_current=$(sha256sum ${sql_tar_file} | awk '{print $1}') + if [[ "$sha_expect" != "$sha_current" ]]; then + die "The sha256 value of $sql_tar_file does not match $sql_tar_sha!" ${err_upgrade_pre} + fi + if [[ -d "$dir_temp_sql" ]];then + rm -rf "$dir_temp_sql" + fi + if mkdir -p -m 700 "$dir_temp_sql" && tar -zxf "$sql_tar_file" -C "$dir_temp_sql"; then + log "decompress upgrade_sql.tar.gz successfully." + else + die "decompress upgrade_sql.tar.gz failed" ${err_upgrade_pre} + fi + fi + #total 8 + cd "$dir_temp_sql" + for action in upgrade upgrade-post rollback rollback-post; do + for db_base in maindb otherdb; do + prepare_sql ${action} ${db_base} + done + done + cd ${GAUSS_UPGRADE_BASE_PATH} + +} + +function exec_sql() { + #$1: sqlfilename + #$2: maindb,otherdb + query_dn_role + if [[ X"$dn_role" == X"standby" || X"$dn_role" == X"cascade_standby" ]]; then + return 0 + fi + if [[ ! -f "$1" ]]; then + return 0 + fi + + if [[ X"$db_password" == X ]]; then + hide_password + fi + if ! check_upgrade_mode_by_sql;then + return 1 + fi + + tempresult="$GAUSS_TMP_PATH"/"temp_sql_tempresult_$(date +%Y%m%d_%H%M%S)" + if echo "" > "$tempresult" && chmod 600 "$tempresult"; then + debug "begin exec sql ,file name is $1" + else + log "Generate $tempresult failed." + fi + sqlbegin="gsql -p $GAUSS_LISTEN_PORT -U $GAUSS_ADMIN_USER --pipeline -h localhost -X -t -A " + if [[ "$2" == "maindb" ]]; then + if echo "$db_password" | ${sqlbegin} -d postgres --echo-queries --set ON_ERROR_STOP=on -f $1 >> "$tempresult" 2>&1;then + debug "Exec $1 on database: postgres successfully" + else + log "Exec sql on postgres failed." + debug "$(cat ${tempresult})" + rm -f ${tempresult} + return 1 + fi + else + if databases=$(echo ${db_password} | ${sqlbegin} -d postgres -c "SELECT datname FROM pg_catalog.pg_database where datname != 'postgres';");then + temp_num=$(echo ${databases}|awk '{print NF}') + debug "Num of other databases: $temp_num" + else + log "Exec sql to get databases failed." + return 1 + fi + for database in ${databases}; do + debug "Begin exec $1 on database: $database " + echo "$db_password" | ${sqlbegin} -d ${database} --echo-queries --set ON_ERROR_STOP=on -f $1 >> "$tempresult" 2>&1 + done + fi + if grep -wE "ERROR:|FATAL:|could not connect to server" ${tempresult}; then + log "Exec sql failed." + debug "$(cat ${tempresult})" + rm -f ${tempresult} + return 1 + else + debug "Exec all sql successfully." + rm -f ${tempresult} + return 0 + fi + +} + +function guc_delete() { + bak_ifs=$IFS + IFS=$'\n' + if [[ ! -f "$GAUSS_TMP_PATH"/temp_sql/upgrade_sql/set_guc/delete_guc ]]; then + log "No need to delete guc" + fi + for para in $(cat "$GAUSS_TMP_PATH"/temp_sql/upgrade_sql/set_guc/delete_guc); do + if echo ${para}|grep -w datanode > /dev/null;then + para=$(echo ${para}|awk '{print $1}') + if sed -i "/^${para}[ =]/d" ${PGDATA}/postgresql.conf; then + debug "$para was deleted successfully." + else + die "$para was deleted failed" ${err_upgrade_bin} + fi + fi + done + IFS=${bak_ifs} + log "Delete guc successfully" +} + +function stop_dbnode() { + if ! check_db_process; then + return 0 + fi + gs_ctl stop -D ${PGDATA} >>"${GAUSS_LOG_FILE}" 2>&1 +} + +function start_dbnode() { + start_cmd="gs_ctl start -D ${PGDATA} " + if [[ X"$dn_role" = X ]]; then + return 1 + fi + if [[ X"$dn_role" != X"normal" ]]; then + start_cmd="$start_cmd""-M $dn_role " + fi + if [[ X"$1" != X ]]; then + start_option="-o '-u $1' --single_node" + else + start_option="-o --single_node" + fi + + log "start gaussdb by cmd: $start_cmd $start_option" + ${start_cmd} "$start_option" >>"${GAUSS_LOG_FILE}" 2>&1 + +} + +function query_dn_role() { + gs_ctl query -D ${PGDATA} >"${GAUSS_TMP_PATH}/temp_dn_role" + dn_role_temp=$(grep local_role "${GAUSS_TMP_PATH}/temp_dn_role" | head -1 | awk '{print $3}') + rm -f "${GAUSS_TMP_PATH}/temp_dn_role" + + if [[ "$dn_role_temp" = "Normal" ]]; then + dn_role_temp="normal" + elif [[ "$dn_role_temp" = "Primary" ]]; then + dn_role_temp="primary" + elif [[ "$dn_role_temp" = "Standby" ]]; then + dn_role_temp="standby" + elif [[ "$dn_role_temp" = "Cascade" ]]; then + dn_role_temp="cascade_standby" + else + dn_role_temp="" + fi + + if [[ X"$dn_role" = X ]] && [[ X"$dn_role_temp" = X ]]; then + die "dn_role cannot be null" ${err_dn_role_null} + fi + if [[ X"$dn_role" = X ]]; then + dn_role="$dn_role_temp" + fi + if [[ X"$dn_role_temp" = X ]]; then + dn_role_temp="$dn_role" + fi + if [[ "$dn_role" != "$dn_role_temp" ]]; then + die "dn_role maybe not right" ${err_dn_role_null} + fi +} + +function hide_password() { + echo "input sql password:" + read -s db_password +} + +function reload_upgrade_config() { + if check_upgrade_config "$1" "$2"; then + return 0 + fi + local current_time=$(date +"%Y-%m-%d %H:%M:%S") + echo -n \[${current_time}\] " " >>"${GAUSS_LOG_FILE}" + for i in $(seq 1 3);do + if gs_guc reload -D ${PGDATA} -c "$1=$2" >>"${GAUSS_LOG_FILE}" 2>&1; then + return 0 + fi + sleep 2 + done + return 1 +} + +function set_upgrade_config() { + if check_upgrade_config "$1" "$2"; then + return 0 + fi + local current_time=$(date +"%Y-%m-%d %H:%M:%S") + echo -n \[${current_time}\] " " >>"${GAUSS_LOG_FILE}" + for i in $(seq 1 3);do + if gs_guc set -D ${PGDATA} -c "$1=$2" >>"${GAUSS_LOG_FILE}" 2>&1; then + debug "guc set $1=$2 successfully" + return 0 + fi + sleep 2 + done + return 1 +} + +function check_upgrade_config() { + local tempfile="$GAUSS_TMP_PATH"/".temp_check_guc_value" + if gs_guc check -D ${PGDATA} -c "$1" > "$tempfile" 2>&1 ;then + tempvalue=$(cat "$tempfile"|tail -2|head -1|sed 's/[[:space:]]//g'|awk -F= '{print $2}') + if ! rm -f ${tempfile}; then + log "rm -f $tempfile failed" + return 1 + fi + if [[ "$tempvalue" == "$2" ]]; then + debug "guc check $1=$2 successfully" + return 0 + else + return 1 + fi + else + if ! rm -f ${tempfile}; then + log "rm -f $tempfile failed" + return 1 + fi + return 1 + fi +} + +function check_upgrade_mode_by_sql(){ + # check upgrade_mode = 2 by sql + check_upgrade_mode_result="$GAUSS_TMP_PATH"/".temp_upgrade_mode" + if echo "" > ${check_upgrade_mode_result} && chmod 600 ${check_upgrade_mode_result}; then + debug "Begin to generate check_upgrade_mode_result." + else + log "generate $check_upgrade_mode_result failed." + return 1 + fi + echo ${db_password} | gsql -p ${GAUSS_LISTEN_PORT} -U ${GAUSS_ADMIN_USER} -h localhost -d postgres --pipeline -X -t -A \ + -c "show upgrade_mode;" > ${check_upgrade_mode_result} 2>&1 & + sleep 0.1 + + for i in $(seq 1 60); + do + check_mode_sql=$(cat ${check_upgrade_mode_result}) + if [[ "$check_mode_sql" == "2" ]];then + rm -f ${check_upgrade_mode_result} + return 0 + elif [[ "$check_mode_sql" == "0" ]];then + rm -f ${check_upgrade_mode_result} + echo ${db_password} | gsql -p ${GAUSS_LISTEN_PORT} -U ${GAUSS_ADMIN_USER} -h localhost -d postgres --pipeline -X -t -A \ + -c "show upgrade_mode;" > ${check_upgrade_mode_result} 2>&1 & + elif [[ "$check_mode_sql" == "" ]];then + debug "Wait for check_upgrade_mode_result..." + else + log "$(cat ${check_upgrade_mode_result})" + return 1 + fi + sleep 0.5 + done + if [[ -f "${check_upgrade_mode_result}" ]]; then + debug "check_upgrade_mode_result is $(cat ${check_upgrade_mode_result})" + rm -f ${check_upgrade_mode_result} + else + debug "get upgrade_mode by gsql failed" + fi + return 1 +} + +function rollback_pre() { + parses_step + if [[ "$current_step" -lt 1 ]]; then + log "no need do rollback_pre step" + elif [[ "$current_step" -gt 2 ]]; then + die "You should rollback_bin first" ${err_rollback_pre} + else + if [[ X"$big_cfg" == X ]]; then + big_cfg=$(grep big_cfg "$GAUSS_TMP_PATH"/version_flag | awk -F= '{print $2}') + fi + if [[ "$big_cfg" == "True" ]]; then + if ! check_db_process; then + die "Gaussdb is not running" ${err_rollback_pre} + fi + if ! reload_upgrade_config upgrade_mode 2; then + die "set upgrade_mode to 2 failed" ${err_rollback_pre} + fi + record_step 1 + if exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_rollback_maindb.sql maindb && exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_rollback_otherdb.sql otherdb; then + debug "rollback pre sql successfully" + else + die "rollback pre sql failed" ${err_rollback_pre} + fi + if ! reload_upgrade_config upgrade_mode 0; then + die "set upgrade_mode to 0 failed" ${err_upgrade_pre} + fi + fi + record_step 0 + log "The rollback_pre step is executed successfully. " + fi +} + +function rollback_bin() { + parses_step + if [[ "$current_step" -lt 3 ]]; then + log "no need do rollback_bin step" + elif [[ "$current_step" -gt 4 ]]; then + die "You should rollback_post first" ${err_rollback_bin} + else + if [[ X"$old_version" == X ]]; then + old_version=$(grep old_version "$GAUSS_TMP_PATH"/version_flag | awk -F= '{print $2}') + fi + if ! cd "$GAUSS_BACKUP_BASE_PATH"; then + die "$GAUSS_BACKUP_BASE_PATH cannot access" ${err_rollback_bin} + fi + # Once the rollback starts, you must change the status code first. + # Otherwise, the upgrade may continue after the rollback is interrupted. + record_step 3 + query_dn_role + if ! stop_dbnode; then + die "Stop gaussdb failed" ${err_rollback_bin} + fi + cp_gauss_home_config_to_temp ${err_rollback_bin} + if [[ -d "$GAUSSHOME" ]];then + rm -rf "$GAUSSHOME"/* + fi + if cp -r bak_bin_"$old_version"/* "$GAUSSHOME";then + log "Restore gausshome sucessfully!" + else + die "Restore gausshome failed!" ${err_rollback_bin} + fi + cp_temp_config_to_gauss_home ${err_rollback_bin} + + if ! cp -rf ./postgresql.conf_"$old_version" "$PGDATA"/postgresql.conf; then + die "Restore postgresql.conf failed!" ${err_rollback_bin} + fi + if ! cp -rf ./pg_hba.conf_"$old_version" "$PGDATA"/pg_hba.conf; then + die "Restore pg_hba.conf failed!" ${err_rollback_bin} + fi + log "Restore GAUSSHOME,postgresql.conf,pg_hba.conf successfully." + # when rollback postgresql.conf, upgrade_mode should be set to 2 + if [[ X"$big_cfg" == X ]]; then + big_cfg=$(grep big_cfg "$GAUSS_TMP_PATH"/version_flag | awk -F= '{print $2}') + fi + if [[ "$big_cfg" == "True" ]]; then + if ! set_upgrade_config upgrade_mode 2; then + die "set upgrade_mode to 2 failed" ${err_rollback_bin} + fi + fi + if ! cd "$GAUSS_UPGRADE_BASE_PATH"; then + die "$GAUSS_UPGRADE_BASE_PATH cannot access" ${err_rollback_bin} + fi + + if ! start_dbnode; then + die "Start gaussdb failed" ${err_rollback_bin} + fi + record_step 2 + log "The rollback_bin step is executed successfully. " + fi + +} + +function rollback_post() { + parses_step + if [[ "$current_step" -lt 5 ]]; then + log "Cannot do rollback_post step" + else + if [[ X"$big_cfg" == X ]]; then + big_cfg=$(grep big_cfg "$GAUSS_TMP_PATH"/version_flag | awk -F= '{print $2}') + fi + if [[ "$big_cfg" == "True" ]]; then + if ! check_db_process; then + die "Gaussdb is not running" ${err_rollback_post} + fi + if ! reload_upgrade_config upgrade_mode 2; then + die "set upgrade_mode to 2 failed" ${err_upgrade_post} + fi + record_step 5 + if exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_rollback-post_maindb.sql maindb && exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_rollback-post_otherdb.sql otherdb; then + debug "rollback post sql successfully" + else + die "rollback post sql failed" ${err_rollback_post} + fi + fi + record_step 4 + log "The rollback_post step is executed successfully. " + fi + +} + +function upgrade_pre() { + parses_step + if [[ "$current_step" -lt 0 ]]; then + die "Step file may be changed invalid" ${err_upgrade_pre} + elif [[ "$current_step" -lt 1 ]]; then + upgrade_pre_step1 + upgrade_pre_step2 + elif [[ "$current_step" -eq 1 ]]; then + rollback_pre + upgrade_pre_step2 + else + log "no need do upgrade_pre step" + fi +} +function upgrade_pre_step1() { + check_disk + check_version + if [[ "$big_cfg" == "True" ]]; then + prepare_sql_all + fi + check_pkg + bak_gauss + decompress_pkg + record_step 1 +} + +function upgrade_pre_step2() { + if [[ X"$big_cfg" == X ]]; then + big_cfg=$(grep big_cfg "$GAUSS_TMP_PATH"/version_flag | awk -F= '{print $2}') + fi + if [[ "$big_cfg" == "True" ]]; then + if ! check_db_process; then + die "Gaussdb isnot running" ${err_upgrade_pre} + fi + if ! reload_upgrade_config upgrade_mode 2; then + die "set upgrade_mode to 2 failed" ${err_upgrade_pre} + fi + + if exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_upgrade_maindb.sql maindb && exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_upgrade_otherdb.sql otherdb; then + debug "exec pre sql successfully" + else + die "exec pre sql failed" ${err_upgrade_pre} + fi + + fi + record_step 2 + log "The upgrade_pre step is executed successfully. " +} + +function upgrade_bin() { + parses_step + if [[ "$current_step" -lt 0 ]]; then + die "Step file may be changed invalid" ${err_upgrade_bin} + elif [[ "$current_step" -lt 2 ]]; then + die "exec upgrade pre first" ${err_upgrade_bin} + elif [[ "$current_step" -gt 3 ]]; then + log "no need do upgrade_bin step" + else + upgrade_bin_step4 + fi +} + +function upgrade_bin_step4() { + record_step 3 + query_dn_role + if ! stop_dbnode; then + die "Stop gaussdb failed" ${err_upgrade_bin} + fi + cp_gauss_home_config_to_temp ${err_upgrade_bin} + cp_pkg + cp_temp_config_to_gauss_home ${err_upgrade_bin} + guc_delete + if [[ X"$big_cfg" == X ]]; then + big_cfg=$(grep big_cfg "$GAUSS_TMP_PATH"/version_flag | awk -F= '{print $2}') + fi + if [[ X"$old_cfg" == X ]]; then + old_cfg=$(grep old_cfg "$GAUSS_TMP_PATH"/version_flag | awk -F= '{print $2}') + fi + if [[ "$big_cfg" == "True" ]]; then + if ! echo " -u $old_cfg" > "$GAUSSHOME"/bin/start_flag;then + die "Create $GAUSSHOME/bin/start_flag file failed" ${err_upgrade_bin} + fi + if ! start_dbnode "$old_cfg"; then + die "Start gaussdb failed" ${err_upgrade_bin} + fi + else + if ! start_dbnode; then + die "Start gaussdb failed" ${err_upgrade_bin} + fi + fi + record_step 4 + log "The upgrade_bin step is executed successfully. " +} + +function cp_gauss_home_config_to_temp() { + local temp_err_code="$1" + if ! cd "$GAUSSHOME";then + die "cd ${GAUSSHOME} failed" ${temp_err_code} + fi + local array=(${GAUSS_UPGRADE_SYNC_CONFIG_LIST//,/ }) + if [[ -e "temp_gauss_upgrade_conf.tar" ]];then + rm -f temp_gauss_upgrade_conf.tar + fi + for var in ${array[@]} + do + if [[ -e "$var" ]];then + debug "begin to tar ${var}" + if ! tar -rf temp_gauss_upgrade_conf.tar ${var};then + die "tar -rf temp_gauss_upgrade_conf.tar $var failed" ${temp_err_code} + fi + fi + done + if [[ -f temp_gauss_upgrade_conf.tar ]];then + if chmod 600 temp_gauss_upgrade_conf.tar && mv temp_gauss_upgrade_conf.tar "${GAUSS_TMP_PATH}";then + log "cp the configs from GAUSSHOME to temp successfully." + else + die "mv temp_gauss_upgrade_conf.tar "${GAUSS_TMP_PATH}" failed" ${temp_err_code} + fi + + fi + if ! cd - > /dev/null; then + die "cd - failed" ${temp_err_code} + fi +} + +function cp_temp_config_to_gauss_home() { + local temp_err_code="$1" + if [[ -f "${GAUSS_TMP_PATH}/temp_gauss_upgrade_conf.tar" ]];then + if ! tar -xf "${GAUSS_TMP_PATH}/temp_gauss_upgrade_conf.tar" -C "${GAUSSHOME}";then + die "tar -xf temp_gauss_upgrade_conf.tar -C ${GAUSSHOME} failed" ${temp_err_code} + fi + if ! rm -f "${GAUSS_TMP_PATH}/temp_gauss_upgrade_conf.tar";then + die "rm -f temp_gauss_upgrade_conf.tar" ${temp_err_code} + fi + log "Move the configs from temp to GAUSSHOME successfully." + fi +} + +function upgrade_post() { + parses_step + if [[ "$current_step" -lt 0 ]]; then + die "Step file may be changed invalid" ${err_upgrade_post} + elif [[ "$current_step" -lt 4 ]]; then + die "You should exec upgrade_bin first" ${err_upgrade_post} + elif [[ "$current_step" -eq 4 ]]; then + upgrade_post_step56 + elif [[ "$current_step" -eq 5 ]]; then + rollback_post + upgrade_post_step56 + else + log "no need do upgrade_post step" + fi +} +function upgrade_post_step56() { + if [[ X"$big_cfg" == X ]]; then + big_cfg=$(grep big_cfg "$GAUSS_TMP_PATH"/version_flag | awk -F= '{print $2}') + fi + if [[ "$big_cfg" == "True" ]]; then + if ! check_db_process; then + die "Guassdb is not running" ${err_upgrade_post} + fi + record_step 5 + if exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_upgrade-post_maindb.sql maindb && exec_sql "$GAUSS_TMP_PATH"/temp_sql/temp_upgrade-post_otherdb.sql otherdb; then + debug "upgrade post sql successfully" + else + die "upgrade post sql failed" ${err_upgrade_post} + fi + + fi + record_step 6 + log "The upgrade_post step is executed successfully. " +} +function upgrade_commit() { + parses_step + if [[ "$current_step" -eq 0 ]]; then + die "No need commit,upgrade directly" ${err_no_need_commit} + fi + if [[ "$current_step" -ne 6 ]]; then + die "Now you can't commit because the steps are wrong" ${err_upgrade_commit} + fi + if ! rm -f "$GAUSSHOME"/bin/start_flag;then + die "rm $GAUSSHOME/bin/start_flag file failed" ${err_upgrade_commit} + fi + if ! reload_upgrade_config upgrade_mode 0; then + die "set upgrade_mode to 0 failed" ${err_upgrade_commit} + fi + # after commit, need to reset step to 0 + record_step 0 + log "The upgrade_commit step is executed successfully. " +} +function record_step() { + local record_file="$GAUSS_TMP_PATH"/record_step.txt + if echo "$1" > "$record_file" && chmod 600 "$record_file"; then + debug "record step $1 successfully." + else + die "Write step file failed" ${err_inner_sys} + fi +} + + +function parses_step() { + if [[ ! -f "$GAUSS_TMP_PATH"/record_step.txt ]]; then + current_step=0 + else + if grep -q [^0-6] "$GAUSS_TMP_PATH"/record_step.txt; then + die "$GAUSS_TMP_PATH/record_step.txt may be changed manually" ${err_inner_sys} + else + current_step=$(cat "$GAUSS_TMP_PATH"/record_step.txt) + fi + fi + debug "current_step is $current_step" +} + +function query_start_mode() { + parses_step + if [[ "$current_step" -lt 3 ]]; then + log "Start directly" + return 0 + fi + if [[ ! -f "$GAUSS_TMP_PATH"/version_flag ]]; then + log "Start directly" + return 0 + fi + + if grep -Ewq "new_version=\w{8}" "$GAUSS_TMP_PATH"/version_flag ; then + new_version=$(grep new_version "$GAUSS_TMP_PATH"/version_flag | awk -F= '{print $2}') + if gaussdb -V|grep -wq "$new_version";then + if grep -q "^big_cfg=True" "$GAUSS_TMP_PATH"/version_flag;then + old_cfg=$(grep old_cfg "$GAUSS_TMP_PATH"/version_flag | awk -F= '{print $2}') + log "You must start with -u $old_cfg" + return 0 + fi + fi + fi + log "Start directly" + return 0 +} + +function switch_over() { + local temp_file="${GAUSS_TMP_PATH}/temp_switch_over" + if gs_ctl query -D "$PGDATA" > "$temp_file" && chmod 400 "$temp_file"; then + local_role_temp=$(grep local_role "$temp_file" | head -1 | awk '{print $3}') + db_state_temp=$(grep db_state "$temp_file" | head -1 | awk '{print $3}') + peer_role_temp=$(grep peer_role "$temp_file" | head -1 | awk '{print $3}') + rm -f "$temp_file" + else + die "gs_ctl query -D $PGDATA failed" 1 + fi + + if [[ "$local_role_temp" = "Standby" ]] && [[ "$db_state_temp" = "Normal" ]] && [[ "$peer_role_temp" = "Primary" ]]; then + log "Current node can do switchover" + if gs_ctl switchover -D "$GAUSSDATA" -f ;then + log "Switchover success" + else + die "Switchover failed" + fi + else + log "no need do switchover" + fi +} diff --git a/liteom/upgrade_config.sh b/liteom/upgrade_config.sh new file mode 100644 index 000000000..25719d241 --- /dev/null +++ b/liteom/upgrade_config.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# Copyright (c) Huawei Technologies Co., Ltd. 2010-2022. All rights reserved. +# date: 2021-12-22 +# version: 1.0 + +# 数据库监听端口 +GAUSS_LISTEN_PORT="" + +# 数据库管理员用户名 +GAUSS_ADMIN_USER="" + +#数据库升级回退日志路径 +GAUSS_LOG_PATH="" + +#数据库升级根位置 +GAUSS_UPGRADE_BASE_PATH="" + +#数据库SQL包位置 +GAUSS_SQL_TAR_PATH="" + +#数据库低版本备份位置 +GAUSS_BACKUP_BASE_PATH="" + +#数据库临时目录 +GAUSS_TMP_PATH="" + +#是否使用存在的bin解压包 +GAUSS_UPGRADE_BIN_PATH="" + +#需要同步的cluster config 列表 +GAUSS_UPGRADE_SYNC_CONFIG_LIST="lib/postgresql/pg_plugin" \ No newline at end of file diff --git a/liteom/upgrade_errorcode.sh b/liteom/upgrade_errorcode.sh new file mode 100644 index 000000000..9c42f1a01 --- /dev/null +++ b/liteom/upgrade_errorcode.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright (c) Huawei Technologies Co., Ltd. 2010-2022. All rights reserved. +# date: 2021-12-22 +# version: 1.0 + +err_upgrade_pre=201 +err_upgrade_bin=202 +err_upgrade_post=203 +err_rollback_pre=204 +err_rollback_bin=205 +err_rollback_post=206 +err_check_init=207 +err_parameter=208 +err_upgrade_commit=209 +err_version_same=210 +err_no_need_commit=211 +err_inner_sys=212 +err_dn_role_null=213 \ No newline at end of file diff --git a/simpleInstall/README.md b/simpleInstall/README.md index b9437ebc4..4e4634ba7 100644 --- a/simpleInstall/README.md +++ b/simpleInstall/README.md @@ -1,113 +1,113 @@ -## 1 概述 - -本章节主要介绍采用openGauss极简安装脚本(以下简称安装脚本),一键式安装openGauss数据库所必须的系统环境及安装步骤。 - -## 2 安装环境要求 - -### 2.1 openGauss环境要求 - -安装openGauss的具体环境要求,请参考《openGauss安装指南》中的“软硬件环境要求”章节。 - -### 2.2 安装脚本环境要求 - -#### 硬件环境要求 - -安装脚本对安装环境的操作系统及对应处理器架构进行了限制,目前支持的环境如表1所示。 - -**表1** 硬件环境要求 - -| 操作系统 | 处理器架构 | -| --------- | ---------- | -| openEuler | aarch64 | -| openEuler | x86_64 | -| CentOS | x86_64 | - - -## 3 安装openGauss - - -### 执行安装 - -使用如下命令执行单节点安装脚本。 - -```shell -sh install.sh -w password -``` - -使用如下命令执行一主一备安装脚本。 - -```shell -sh install.sh -w password --multinode -``` -#### 参数说明 - -- [-w password] gs_initdb password, this argument is necessary for installation -- [-p port] port of openGauss single node, or master node; default value is 5432 -- [--multinode] install one master and one slave openGauss node - -以上参数的详细信息,请参考《openGauss安装指南》. - - -## 4 导入展示数据库 - -### 4.1 学校数据模型 - -假设A市B学校为了加强对学校的管理,引入了openGauss数据库。在B学校里,主要涉及的对象有学生、教师、班级、院系和课程。本实验假设在B学校数据库中,教师会教授课程,学生会选修课程,院系会聘请教师,班级会组成院系,学生会组成班级。因此,根据此关系,本文给出了相应的关系模式如下。在运行安装脚本时,会根据用户选择安装该展示模型。 - -#### 关系模式 - -对于B校中的5个对象,分别建立属于每个对象的属性集合,具体属性描述如下: - -- 学生(学号,姓名,性别,出生日期,入学日期,家庭住址) -- 教师(教师编号,教师姓名,职称,性别,年龄,入职日期) -- 班级(班级编号,班级名称,班主任) -- 院系(系编号,系名称,系主任) -- 课程(课程编号,课程名称,课程类型,学分) - -上述属性对应的编号为: - -- student(std_id,std_name,std_sex,std_birth,std_in,std_address) -- teacher(tec_id,tec_name,tec_job,tec_sex,tec_age,tec_in) -- class(cla_id,cla_name,cla_teacher) -- school_department(depart_id,depart_name,depart_teacher) -- course(cor_id,cor_name,cor_type,credit) - -对象之间的关系: - -- 一位学生可以选择多门课程,一门课程可被多名学生选择 -- 一位老师可以选择多门课程,一门课程可被多名老师教授 -- 一个院系可由多个班级组成 -- 一个院系可聘请多名老师 -- 一个班级可由多名学生组成 - -### 4.2 金融数据模型 - -假设A市C银行为了方便对银行数据的管理和操作,引入了openGauss数据库。针对C银行的业务,本实验主要将对象分为客户、银行卡、理财产品、保险、基金和资产。因此,针对这些数据库对象,本实验假设C银行的金融数据库存在着以下关系:客户可以办理银行卡,同时客户可以购买不用的银行产品,如资产,理财产品,基金和保险。那么,根据C银行的对象关系,本文给出了相应的关系模式如下。在运行安装脚本时,会根据用户选择安装该展示模型。 - -#### 关系模式 - -对于C银行中的6个对象,分别建立属于每个对象的属性集合,具体属性描述如下: - -- 客户(客户编号、客户名称、客户邮箱,客户身份证,客户手机号,客户登录密码) -- 银行卡(银行卡号,银行卡类型,所属客户编号) -- 理财产品(产品名称,产品编号,产品描述,购买金额,理财年限) -- 保险(保险名称,保险编号,保险金额,适用人群,保险年限,保障项目) -- 基金(基金名称,基金编号,基金类型,基金金额,风险等级,基金管理者) -- 资产(客户编号,商品编号,商品状态,商品数量,商品收益,购买时间) - -上述属性对应的编号为: - -- client(c_id,c_name,c_mail,c_id_card,c_phone,c_password) -- bank_card(b_number,b_type,b_c_id) -- finances_product(p_name,p_id,p_description,p_amount,p_year) -- insurance(i_name,i_id,i_amount,i_person,i_year,i_project) -- fund(f_name,f_id,f_type,f_amount,risk_level,f_manager) -- property(pro_c_id,pro_id,pro_status,pro_quantity,pro_income,pro_purchase_time) - -对象之间的关系: - -- 一个客户可以办理多张银行卡 -- 一个客户可有多笔资产 -- 一个客户可以购买多个理财产品,同一类理财产品可由多个客户购买 -- 一个客户可以购买多个基金,同一类基金可由多个客户购买 -- 一个客户可以购买多个保险,同一类保险可由多个客户购买 \ No newline at end of file +## 1 概述 + +本章节主要介绍采用openGauss极简安装脚本(以下简称安装脚本),一键式安装openGauss数据库所必须的系统环境及安装步骤。 + +## 2 安装环境要求 + +### 2.1 openGauss环境要求 + +安装openGauss的具体环境要求,请参考《openGauss安装指南》中的“软硬件环境要求”章节。 + +### 2.2 安装脚本环境要求 + +#### 硬件环境要求 + +安装脚本对安装环境的操作系统及对应处理器架构进行了限制,目前支持的环境如表1所示。 + +**表1** 硬件环境要求 + +| 操作系统 | 处理器架构 | +| --------- | ---------- | +| openEuler | aarch64 | +| openEuler | x86_64 | +| CentOS | x86_64 | + + +## 3 安装openGauss + + +### 执行安装 + +使用如下命令执行单节点安装脚本。 + +```shell +sh install.sh -w password +``` + +使用如下命令执行一主一备安装脚本。 + +```shell +sh install.sh -w password --multinode +``` +#### 参数说明 + +- [-w password] gs_initdb password, this argument is necessary for installation +- [-p port] port of openGauss single node, or master node; default value is 5432 +- [--multinode] install one master and one slave openGauss node + +以上参数的详细信息,请参考《openGauss安装指南》. + + +## 4 导入展示数据库 + +### 4.1 学校数据模型 + +假设A市B学校为了加强对学校的管理,引入了openGauss数据库。在B学校里,主要涉及的对象有学生、教师、班级、院系和课程。本实验假设在B学校数据库中,教师会教授课程,学生会选修课程,院系会聘请教师,班级会组成院系,学生会组成班级。因此,根据此关系,本文给出了相应的关系模式如下。在运行安装脚本时,会根据用户选择安装该展示模型。 + +#### 关系模式 + +对于B校中的5个对象,分别建立属于每个对象的属性集合,具体属性描述如下: + +- 学生(学号,姓名,性别,出生日期,入学日期,家庭住址) +- 教师(教师编号,教师姓名,职称,性别,年龄,入职日期) +- 班级(班级编号,班级名称,班主任) +- 院系(系编号,系名称,系主任) +- 课程(课程编号,课程名称,课程类型,学分) + +上述属性对应的编号为: + +- student(std_id,std_name,std_sex,std_birth,std_in,std_address) +- teacher(tec_id,tec_name,tec_job,tec_sex,tec_age,tec_in) +- class(cla_id,cla_name,cla_teacher) +- school_department(depart_id,depart_name,depart_teacher) +- course(cor_id,cor_name,cor_type,credit) + +对象之间的关系: + +- 一位学生可以选择多门课程,一门课程可被多名学生选择 +- 一位老师可以选择多门课程,一门课程可被多名老师教授 +- 一个院系可由多个班级组成 +- 一个院系可聘请多名老师 +- 一个班级可由多名学生组成 + +### 4.2 金融数据模型 + +假设A市C银行为了方便对银行数据的管理和操作,引入了openGauss数据库。针对C银行的业务,本实验主要将对象分为客户、银行卡、理财产品、保险、基金和资产。因此,针对这些数据库对象,本实验假设C银行的金融数据库存在着以下关系:客户可以办理银行卡,同时客户可以购买不用的银行产品,如资产,理财产品,基金和保险。那么,根据C银行的对象关系,本文给出了相应的关系模式如下。在运行安装脚本时,会根据用户选择安装该展示模型。 + +#### 关系模式 + +对于C银行中的6个对象,分别建立属于每个对象的属性集合,具体属性描述如下: + +- 客户(客户编号、客户名称、客户邮箱,客户身份证,客户手机号,客户登录密码) +- 银行卡(银行卡号,银行卡类型,所属客户编号) +- 理财产品(产品名称,产品编号,产品描述,购买金额,理财年限) +- 保险(保险名称,保险编号,保险金额,适用人群,保险年限,保障项目) +- 基金(基金名称,基金编号,基金类型,基金金额,风险等级,基金管理者) +- 资产(客户编号,商品编号,商品状态,商品数量,商品收益,购买时间) + +上述属性对应的编号为: + +- client(c_id,c_name,c_mail,c_id_card,c_phone,c_password) +- bank_card(b_number,b_type,b_c_id) +- finances_product(p_name,p_id,p_description,p_amount,p_year) +- insurance(i_name,i_id,i_amount,i_person,i_year,i_project) +- fund(f_name,f_id,f_type,f_amount,risk_level,f_manager) +- property(pro_c_id,pro_id,pro_status,pro_quantity,pro_income,pro_purchase_time) + +对象之间的关系: + +- 一个客户可以办理多张银行卡 +- 一个客户可有多笔资产 +- 一个客户可以购买多个理财产品,同一类理财产品可由多个客户购买 +- 一个客户可以购买多个基金,同一类基金可由多个客户购买 +- 一个客户可以购买多个保险,同一类保险可由多个客户购买 diff --git a/src/.gitignore b/src/.gitignore index c541bca22..ca8441e2e 100644 --- a/src/.gitignore +++ b/src/.gitignore @@ -188,10 +188,6 @@ /include/replication/syncrep_gram.hpp /include/stamp-h /test/regress/pg_regress -/test/regress/sql/*.sql -/test/regress/sql/*/*.sql -/test/regress/expected/*.out -/test/regress/expected/*/*.out /test/regress/results/ /test/regress/tmp_check/ /test/isolation/specparse.cpp diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index da29ef9bd..017763975 100755 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -28,7 +28,7 @@ execute_process( COMMAND ln -fs ${PROJECT_SRC_DIR}/include/port/linux.h ${PROJECT_SRC_DIR}/include/pg_config_os.h ) -INCLUDE_DIRECTORIES(${LIBTHRIFT_INCLUDE_PATH} ${SNAPPY_INCLUDE_PATH} ${CJSON_INCLUDE_PATH} ${BOOST_INCLUDE_PATH} ${ZSTD_INCLUDE_PATH}) +INCLUDE_DIRECTORIES(${LIBTHRIFT_INCLUDE_PATH} ${SNAPPY_INCLUDE_PATH} ${CJSON_INCLUDE_PATH} ${BOOST_INCLUDE_PATH}) set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/bin @@ -44,12 +44,15 @@ add_subdirectory(lib) add_subdirectory(gausskernel) add_subdirectory(test) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/ssl/openssl_gsql.cnf DESTINATION share/sslcert/gds RENAME openssl.cnf) + install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/ssl/openssl_om.cnf DESTINATION share/sslcert/om RENAME openssl.cnf) +endif() install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/ssl/openssl_etcd.cnf DESTINATION share/sslcert/etcd RENAME openssl.cnf) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/ssl/openssl_gsql.cnf DESTINATION share/sslcert/gds RENAME openssl.cnf) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/ssl/openssl_grpc.cnf DESTINATION share/sslcert/grpc RENAME openssl.cnf) install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/ssl/openssl_gsql.cnf DESTINATION share/sslcert/gsql RENAME openssl.cnf) -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/ssl/openssl_om.cnf DESTINATION share/sslcert/om RENAME openssl.cnf) install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/get_PlatForm_str.sh DESTINATION lib/postgresql/pgxs/src) +install(FILES ${CMAKE_BINARY_DIR}/${openGauss}/src/makefiles/pgxs.mk DESTINATION lib/postgresql/pgxs/src/makefiles) +install(FILES ${PROJECT_SRC_DIR}/Makefile.shlib DESTINATION lib/postgresql/pgxs/src/) install(FILES ${PROJECT_OPENGS_DIR}/config/install-sh DESTINATION lib/postgresql/pgxs/config) install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/cm/ DESTINATION include PATTERN "*.h" PATTERN "cm_server" EXCLUDE @@ -145,6 +148,7 @@ install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/storage/mot DESTINATION in endif() install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/commands DESTINATION include/postgresql/server) install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/port DESTINATION include/postgresql/server) +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/lite DESTINATION include/postgresql/server) install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/alarm/alarm.h DESTINATION include/postgresql/server/alarm) install_file(${CMAKE_CURRENT_SOURCE_DIR}/include/storage include/postgresql/server/storage) install_file(${CMAKE_CURRENT_SOURCE_DIR}/include include/postgresql/server) @@ -166,25 +170,36 @@ install(CODE "execute_process( ) # open source install part -install(DIRECTORY ${JAVA_HOME}/jre/ DESTINATION jre FILE_PERMISSIONS OWNER_EXECUTE GROUP_EXECUTE OWNER_READ GROUP_READ) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + install(DIRECTORY ${JAVA_HOME}/jre/ DESTINATION jre FILE_PERMISSIONS OWNER_EXECUTE GROUP_EXECUTE OWNER_READ GROUP_READ) +endif() + if("${ENABLE_MULTIPLE_NODES}" STREQUAL "OFF") install(DIRECTORY ${DCF_LIB_PATH} DESTINATION .) endif() install(DIRECTORY ${ZSTD_LIB_PATH} DESTINATION . PATTERN "*.a" EXCLUDE) -install(DIRECTORY ${LIBOBS_LIB_PATH} DESTINATION .) -install(DIRECTORY ${LIBOBS_INCLUDE_PATH} DESTINATION include/postgresql/server/access/obs) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + install(DIRECTORY ${LIBOBS_LIB_PATH} DESTINATION .) + install(DIRECTORY ${LIBOBS_INCLUDE_PATH} DESTINATION include/postgresql/server/access/obs) +endif() install(DIRECTORY ${CJSON_LIB_PATH} DESTINATION .) install(DIRECTORY ${CJSON_INCLUDE_PATH}/cjson DESTINATION include/postgresql/server) -install(DIRECTORY ${ETCD_BIN_PATH} DESTINATION .) -install(DIRECTORY ${IPERF_LIB_PATH} DESTINATION .) -install(DIRECTORY ${KERBEROS_SBIN_PATH}/ DESTINATION bin) -install(DIRECTORY ${KERBEROS_BIN_PATH} DESTINATION .) -install(DIRECTORY ${KERBEROS_LIB_PATH} DESTINATION .) +if(NOT ${ENABLE_LITE_MODE} STREQUAL ON) + install(DIRECTORY ${ETCD_BIN_PATH} DESTINATION .) + install(DIRECTORY ${IPERF_LIB_PATH} DESTINATION .) +endif() +if(NOT ${ENABLE_LITE_MODE} STREQUAL ON) if(NOT ${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS} STREQUAL OFF_OFF) install(DIRECTORY ${KMC_LIB_PATH} DESTINATION .) endif() +endif() install(DIRECTORY ${LIBCURL_LIB_PATH} DESTINATION .) -install(DIRECTORY ${LIBPARQUET_LIB_PATH} DESTINATION .) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + install(DIRECTORY ${KERBEROS_SBIN_PATH}/ DESTINATION bin) + install(DIRECTORY ${KERBEROS_BIN_PATH} DESTINATION .) + install(DIRECTORY ${KERBEROS_LIB_PATH} DESTINATION .) + install(DIRECTORY ${LIBPARQUET_LIB_PATH} DESTINATION .) +endif() install(DIRECTORY ${LZ4_LIB_PATH} DESTINATION .) install(DIRECTORY ${LZ4_BIN_PATH} DESTINATION .) install(DIRECTORY ${LIBOPENSSL_BIN_PATH} DESTINATION .) @@ -209,12 +224,19 @@ install(CODE "message(\"-- Created symlink: libatomic.so.1 -> libatomic.so.1.2.0 endif() install(FILES ${BUILDTOOLS_PATH}/gcc7.3/gcc/lib64/libgcc_s.so.1 DESTINATION lib) -install(FILES ${SECURE_HOME}/../Dynamic_Lib/libsecurec.so DESTINATION lib) -install(FILES ${PLJAVA_HOME}/lib/libpljava.so DESTINATION lib) -install(FILES ${PLJAVA_HOME}/java/pljava.jar DESTINATION lib/postgresql/java) -install(FILES ${PLJAVA_HOME}/udstools.py DESTINATION share/postgresql/tmp) +install(FILES ${BUILDTOOLS_PATH}/gcc7.3/gcc/lib64/libgomp.so DESTINATION lib) +install(FILES ${BUILDTOOLS_PATH}/gcc7.3/gcc/lib64/libgomp.so.1 DESTINATION lib) +install(FILES ${BUILDTOOLS_PATH}/gcc7.3/gcc/lib64/libgomp.so.1.0.0 DESTINATION lib) +install(FILES ${XGBOOST_LIB_PATH}/libxgboost.so DESTINATION lib) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + install(FILES ${PLJAVA_HOME}/lib/libpljava.so DESTINATION lib) + install(FILES ${PLJAVA_HOME}/java/pljava.jar DESTINATION lib/postgresql/java) + install(FILES ${PLJAVA_HOME}/udstools.py DESTINATION share/postgresql/tmp) +endif() if(NOT ${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS} STREQUAL OFF_OFF) - install(FILES ${LIBHOTPATCH_LIB_PATH}/libdoprapatch.a DESTINATION lib) + if("${SUPPORT_HOTPATCH}" STREQUAL "yes") + install(FILES ${LIBHOTPATCH_LIB_PATH}/libdoprapatch.a DESTINATION lib) + endif() endif() install(CODE "execute_process( COMMAND cp ${BUILDTOOLS_PATH}/gcc7.3/gcc/lib64/libstdc++.so.6.0.24 ${prefix_home}/lib/libstdc++.so.6 @@ -228,15 +250,17 @@ WORKING_DIRECTORY ${prefix_home}/lib)" ) install(CODE "message(\"-- Created symlink: libcgroup.so.1 -> libcgroup.so\")") -# special -install(CODE "execute_process( - COMMAND cp ${LIBPARQUET_LIB_PATH}/libparquet.so ${prefix_home}/lib/libparquet.so - COMMAND cp ${LIBPARQUET_LIB_PATH}/libparquet.so.14 ${prefix_home}/lib/libparquet.so.14 - COMMAND cp ${LIBPARQUET_LIB_PATH}/libparquet.so.14.1.0 ${prefix_home}/lib/libparquet.so.14.1.0 - COMMAND cp ${LIBPARQUET_LIB_PATH}/libarrow.so ${prefix_home}/lib/libarrow.so - COMMAND cp ${LIBPARQUET_LIB_PATH}/libarrow.so.14 ${prefix_home}/lib/libarrow.so.14 - COMMAND cp ${LIBPARQUET_LIB_PATH}/libarrow.so.14.1.0 ${prefix_home}/lib/libarrow.so.14.1.0)" -) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + # special + install(CODE "execute_process( + COMMAND cp ${LIBPARQUET_LIB_PATH}/libparquet.so ${prefix_home}/lib/libparquet.so + COMMAND cp ${LIBPARQUET_LIB_PATH}/libparquet.so.14 ${prefix_home}/lib/libparquet.so.14 + COMMAND cp ${LIBPARQUET_LIB_PATH}/libparquet.so.14.1.0 ${prefix_home}/lib/libparquet.so.14.1.0 + COMMAND cp ${LIBPARQUET_LIB_PATH}/libarrow.so ${prefix_home}/lib/libarrow.so + COMMAND cp ${LIBPARQUET_LIB_PATH}/libarrow.so.14 ${prefix_home}/lib/libarrow.so.14 + COMMAND cp ${LIBPARQUET_LIB_PATH}/libarrow.so.14.1.0 ${prefix_home}/lib/libarrow.so.14.1.0)" + ) +endif() # fastcheck part install(FILES ${PROJECT_SRC_DIR}/test/regress/stub/roach_api_stub/roach_api_stub.control diff --git a/src/Makefile b/src/Makefile index b2db98596..77bb7a572 100755 --- a/src/Makefile +++ b/src/Makefile @@ -12,7 +12,7 @@ subdir = src top_builddir = .. include Makefile.global -ifeq ($(enable_multiple_nodes)_$(enable_privategauss), no_yes) +ifeq ($(enable_multiple_nodes)_$(enable_privategauss)_$(enable_lite_mode), no_yes_no) dirs_in_central_mode = $(top_builddir)/../distribute/cm $(top_builddir)/../distribute/bin endif diff --git a/src/Makefile.global.in b/src/Makefile.global.in index 49fdbd72b..120080089 100644 --- a/src/Makefile.global.in +++ b/src/Makefile.global.in @@ -172,8 +172,8 @@ enable_jemalloc = @enable_jemalloc@ enable_jemalloc_debug = @enable_jemalloc_debug@ enable_privategauss = @enable_privategauss@ enable_multiple_nodes = @enable_multiple_nodes@ +enable_lite_mode = @enable_lite_mode@ enable_mot = @enable_mot@ -enable_llvm = @enable_llvm@ enable_mysql_fdw = @enable_mysql_fdw@ enable_oracle_fdw = @enable_oracle_fdw@ enable_memory_check = @enable_memory_check@ @@ -210,6 +210,7 @@ TCL_SHLIB_LD_LIBS = @TCL_SHLIB_LD_LIBS@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ PTHREAD_LIBS = @PTHREAD_LIBS@ +with_openeuler_os = @with_openeuler_os@ ############################################################################# @@ -253,6 +254,11 @@ ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) else SUPPORT_HOTPATCH = no endif + +ifeq ($(enable_lite_mode), yes) + SUPPORT_HOTPATCH = no +endif + ############################################################################# ############################################################################# # I leave all this home path here because somewhere will it, e. llvm-config # @@ -288,6 +294,7 @@ ifeq ($(with_3rd), NONE) LIBHOTPATCH_HOME = $(top_builddir)/$(PLATFORMPATH)/hotpatch LIBCGROUP_HOME = $(top_builddir)/$(BINARYPATH)/libcgroup/$(LIB_SUPPORT_LLT) LIBLLVM_HOME = $(top_builddir)/$(BINARYPATH)/llvm/comm + XGBOOST_HOME = $(top_builddir)/$(BINARYPATH)/xgboost/comm EVENT_HOME = $(top_builddir)/$(BINARYPATH)/event/$(LIB_SUPPORT_LLT) ZLIB_HOME = $(top_builddir)/$(BINARYPATH)/zlib1.2.11/$(LIB_SUPPORT_LLT) LZ4_HOME = $(top_builddir)/$(BINARYPATH)/lz4/$(LIB_SUPPORT_LLT) @@ -329,6 +336,7 @@ else LIBCGROUP_HOME = $(with_3rd)/$(BINARYPATH)/libcgroup/$(LIB_SUPPORT_LLT) LIBHOTPATCH_HOME = $(with_3rd)/$(PLATFORMPATH)/hotpatch LIBLLVM_HOME = $(with_3rd)/$(BINARYPATH)/llvm/comm + XGBOOST_HOME = $(with_3rd)/$(BINARYPATH)/xgboost/$(LIB_SUPPORT_LLT) EVENT_HOME = $(with_3rd)/$(BINARYPATH)/event/$(LIB_SUPPORT_LLT) ZLIB_HOME = $(with_3rd)/$(BINARYPATH)/zlib1.2.11/$(LIB_SUPPORT_LLT) LZ4_HOME = $(with_3rd)/$(BINARYPATH)/lz4/$(LIB_SUPPORT_LLT) @@ -339,8 +347,10 @@ else LIBORC_HOME = $(with_3rd)/$(BINARYPATH)/liborc/$(LIB_SUPPORT_LLT) SNAPPY_HOME = $(with_3rd)/$(BINARYPATH)/snappy/$(LIB_SUPPORT_LLT) LIBOPENSSL_HOME = $(with_3rd)/$(BINARYPATH)/openssl/$(LIB_NOT_SUPPORT_LLT) +ifneq ($(enable_lite_mode), yes) ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) LIBKMC_HOME = $(with_3rd)/$(PLATFORMPATH)/kmc/comm +endif endif SECURE_HOME = $(with_3rd)/$(PLATFORMPATH)/Huawei_Secure_C/$(LIB_NOT_SUPPORT_LLT) SECUREDYNAMICLIB_HOME = $(with_3rd)/$(PLATFORMPATH)/Huawei_Secure_C/Dynamic_Lib @@ -355,13 +365,26 @@ endif ZSTD_HOME = $(with_3rd)/$(BINARYPATH)/zstd LIBNANOMSG_HOME = $(with_3rd)/$(BINARYPATH)/nanomsg/comm PLJAVA_HOME = $(with_3rd)/$(BINARYPATH)/pljava/$(LIB_SUPPORT_LLT) - JAVA_HOME = $(with_3rd)/$(PLATFORMPATH)/openjdk8/jdk1.8.0_222 + using_openjdk = $(shell if [ -d "$(with_3rd)/$(PLATFORMPATH)/openjdk8" ]; then echo "yes"; else echo "no"; fi;) + ifeq ($(using_openjdk), yes) + JAVA_HOME = $(with_3rd)/$(PLATFORMPATH)/openjdk8/jdk1.8.0_222 + else + JAVA_HOME = $(with_3rd)/platform/huaweijdk8/x86_64/jdk + endif MASSTREE_HOME = $(with_3rd)/$(BINARYPATH)/masstree/comm MYFDW_HOME = $(with_3rd)/dependency/mysql_fdw ORCFDW_HOME = $(with_3rd)/dependency/oracle_fdw DCF_HOME = $(with_3rd)/component/$(PLAT_FORM_STR)/dcf endif +ifeq ($(with_openeuler_os), yes) + JAVA_HOME = /usr/lib/jvm/java/ + LIBORC_HOME = $(with_3rd)/liborc/ + ZLIB_HOME = $(with_3rd)/zlib/ + CJSON_HOME = $(with_3rd)/cjson/ + PROTOBUF_HOME = $(with_3rd)/protobuf/ +endif + LIBCARBONDATA_HOME = $(top_builddir)/../contrib/carbondata ############################################################################## @@ -417,6 +440,15 @@ endif JEMALLOC_INCLUDE_PATH = $(JEMALLOC_HOME)/include JEMALLOC_LIB_PATH = $(JEMALLOC_HOME)/lib +############################################################################## +# secure_c bound check component +############################################################################## +ifeq ($(with_openeuler_os), yes) + SECURE_C_CHECK = boundscheck +else + SECURE_C_CHECK = securec +endif + ############################################################################## # memory and thread checking component ############################################################################## @@ -487,10 +519,16 @@ LIBLLVM_BIN = $(LIBLLVM_HOME)/bin LIBLLVM_INCLUDE_PATH = $(LIBLLVM_HOME)/include LIBLLVM_LIB_PATH = $(LIBLLVM_HOME)/lib LLVM_CONFIG = $(LIBLLVM_BIN)/llvm-config -ifeq ($(enable_llvm), yes) +ifneq ($(enable_lite_mode), yes) LLVM_LIBS = $(shell $(LLVM_CONFIG) --libs) endif +############################################################################# +# xgboost component +############################################################################# +XGBOOST_INCLUDE_PATH = $(XGBOOST_HOME)/include +XGBOOST_LIB_PATH = $(XGBOOST_HOME)/lib64 + ############################################################################# # event component ############################################################################# @@ -591,6 +629,7 @@ LIBOPENSSL_LIB_PATH = $(LIBOPENSSL_HOME)/lib LIBOPENSSL_SSL_PATH = $(LIBOPENSSL_HOME)/ssl LIBOPENSSL_INCLUDE_PATH = $(LIBOPENSSL_HOME)/include +ifneq ($(enable_lite_mode), yes) ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) ############################################################################# # kmc component @@ -598,6 +637,7 @@ ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) LIBKMC_LIB_PATH = $(LIBKMC_HOME)/lib LIBKMC_INCLUDE_PATH = $(LIBKMC_HOME)/include endif +endif ############################################################################# # security component @@ -721,7 +761,7 @@ else # not PGXS endif endif -override CPPFLAGS := $(CPPFLAGS) -I$(LIBODBC_INCLUDE_PATH) -I$(LIBOBS_INCLUDE_PATH) -I$(LIBCGROUP_INCLUDE_PATH) -I$(LIBOPENSSL_INCLUDE_PATH) -I${LIBORC_INCLUDE_PATH} -I${LIBPARQUET_INCLUDE_PATH} -I${PROTOBUF_INCLUDE_PATH} -I${BOOST_INCLUDE_PATH} -I$(LIBLLVM_INCLUDE_PATH) -I$(KERBEROS_INCLUDE_PATH) -I$(CJSON_INCLUDE_PATH) -I$(NUMA_INCLUDE_PATH) -I$(ZLIB_INCLUDE_PATH) -I$(LZ4_INCLUDE_PATH) -I$(LIBCURL_INCLUDE_PATH) -I$(DCF_INCLUDE_PATH) -I$(ZSTD_INCLUDE_PATH) +override CPPFLAGS := $(CPPFLAGS) -I$(LIBODBC_INCLUDE_PATH) -I$(LIBOBS_INCLUDE_PATH) -I$(LIBCGROUP_INCLUDE_PATH) -I$(LIBOPENSSL_INCLUDE_PATH) -I${LIBORC_INCLUDE_PATH} -I${LIBPARQUET_INCLUDE_PATH} -I${PROTOBUF_INCLUDE_PATH} -I${BOOST_INCLUDE_PATH} -I$(LIBLLVM_INCLUDE_PATH) -I$(KERBEROS_INCLUDE_PATH) -I$(CJSON_INCLUDE_PATH) -I$(NUMA_INCLUDE_PATH) -I$(ZLIB_INCLUDE_PATH) -I$(LZ4_INCLUDE_PATH) -I$(LIBCURL_INCLUDE_PATH) -I$(DCF_INCLUDE_PATH) # GDS links to libevent ifeq ($(enable_multiple_nodes), yes) @@ -740,10 +780,9 @@ ifeq ($(enable_mot), yes) override CPPFLAGS := $(CPPFLAGS) -I$(MASSTREE_INCLUDE_PATH) endif -ifeq ($(enable_llvm), yes) - override CPPFLAGS := $(CPPFLAGS) -DENABLE_LLVM_COMPILE +ifeq ($(with_openeuler_os), yes) + override CPPFLAGS := $(CPPFLAGS) -DWITH_OPENEULER_OS endif - CC = @CC@ GCC = @GCC@ C = gcc @@ -813,7 +852,7 @@ DLLWRAP = @DLLWRAP@ LIBS = @LIBS@ # append linking lib for OPENSSL LIBS += -lssl -lcrypto # append linking lib from Huawei_Secure_C -LIBS += -lsecurec +LIBS += -l$(SECURE_C_CHECK) # append linking lib for threading ifeq ($(enable_debug), no) LIBS += -pthread -D_REENTRANT -lrt -pie @@ -856,9 +895,6 @@ endif # append zlib for compression: zlib LDFLAGS += -L$(ZLIB_LIB_PATH) -I$(ZLIB_INCLUDE_PATH) -#append zstd for compression: zstd -LDFLAGS += -L$(ZSTD_LIB_PATH) -I$(ZSTD_INCLUDE_PATH) - LDFLAGS += -L$(SECURE_LIB_PATH) LDFLAGS += -L$(LIBOPENSSL_LIB_PATH) LDFLAGS += -L$(LIBSTD_LIB_PATH) @@ -1070,7 +1106,9 @@ endif libpq = -L$(libpq_builddir) -lpq ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) -libpq_ce = -L$(libpq_builddir) -lpq_ce -L$(top_builddir)/../distribute/bin/gs_ktool/ -lgs_ktool -lsecurec -lkmc + ifneq ($(enable_lite_mode), yes) + libpq_ce = -L$(libpq_builddir) -lpq_ce -L$(top_builddir)/../distribute/bin/gs_ktool/ -lgs_ktool -l$(SECURE_C_CHECK) -lkmc + endif else libpq_ce = -L$(libpq_builddir) -lpq_ce endif @@ -1226,7 +1264,7 @@ ifeq ($(enable_libnet), yes) LIBNET_INCLUDE_PATH = $(LIBNET_HOME)/include LIBNET_LIB_PATH = $(LIBNET_HOME)/lib64 LIBNET_BIN_PATH = $(LIBNET_HOME)/bin - LIBNET_LIBS = -L$(SECURE_LIB_PATH) -lsecurec -L$(LIBNET_LIB_PATH) -llstack -lrte_eal -lrte_kvargs -ldl + LIBNET_LIBS = -L$(SECURE_LIB_PATH) -l$(SECURE_C_CHECK) -L$(LIBNET_LIB_PATH) -llstack -lrte_eal -lrte_kvargs -ldl override CPPFLAGS := $(CPPFLAGS) -DUSE_LIBNET override CXXFLAGS := $(CXXFLAGS) -DUSE_LIBNET override CFLAGS := $(CFLAGS) -DUSE_LIBNET diff --git a/src/Makefile.global.in_for_llt b/src/Makefile.global.in_for_llt index d5c196aeb..152d0d9ab 100755 --- a/src/Makefile.global.in_for_llt +++ b/src/Makefile.global.in_for_llt @@ -390,7 +390,13 @@ LIBEDIT_LIB_PATH = $(LIBEDIT_HOME)/lib ZLIB_HOME = $(top_builddir)/../$(BINARYPATH)/zlib1.2.11/$(LIB_SUPPORT_LLT) ZLIB_INCLUDE_PATH = $(ZLIB_HOME)/include ZLIB_LIB_PATH = $(ZLIB_HOME)/lib + ############################################################################# +# xgboost component +############################################################################# +XGBOOST_HOME = $(top_builddir)/../$(BINARYPATH)/xgboost/$(LIB_SUPPORT_LLT) +XGBOOST_INCLUDE_PATH = $(XGBOOST_HOME)/include +XGBOOST_LIB_PATH = $(XGBOOST_HOME)/lib64 ############################################################################# # lz4 component @@ -512,6 +518,7 @@ ifdef PGXS override CPPFLAGS := -I$(includedir_server) -I$(includedir_internal) -I$(LIBODBC_INCLUDE_PATH) -I$(LIBOBS_INCLUDE_PATH) -I$(LIBCGROUP_INCLUDE_PATH) -I$(LICENSE_INCLUDE_PATH) -I${LIBORC_INCLUDE_PATH} -I${PROTOBUF_INCLUDE_PATH} -I${BOOST_INCLUDE_PATH} -I$(LIBLLVM_INCLUDE_PATH) -I$(KERBEROS_INCLUDE_PATH) -I$(LIBEDIT_INCLUDE_PATH) -I$(EVENT_INCLUDE_PATH) $(CPPFLAGS) + else # not PGXS override CPPFLAGS := -I$(top_srcdir)/src/include -I$(LIBODBC_INCLUDE_PATH) -I$(LIBOBS_INCLUDE_PATH) -I$(LIBCGROUP_INCLUDE_PATH) -I$(LICENSE_INCLUDE_PATH) -I${LIBORC_INCLUDE_PATH} -I${PROTOBUF_INCLUDE_PATH} -I${BOOST_INCLUDE_PATH} -I$(LIBLLVM_INCLUDE_PATH) -I$(KERBEROS_INCLUDE_PATH) -I$(LIBEDIT_INCLUDE_PATH) -I$(EVENT_INCLUDE_PATH) $(CPPFLAGS) @@ -696,6 +703,7 @@ LIBOPENSSL_LIB_PATH = $(LIBOPENSSL_HOME)/lib LIBOPENSSL_SSL_PATH = $(LIBOPENSSL_HOME)/ssl LIBOPENSSL_INCLUDE_PATH = $(LIBOPENSSL_HOME)/include +ifneq ($(enable_lite_mode), yes) ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) ############################################################################# # kmc component @@ -704,6 +712,7 @@ LIBKMC_HOME = $(top_builddir)/../$(PLATFORMPATH)/kmc/comm LIBKMC_LIB_PATH = $(LIBKMC_HOME)/lib LIBKMC_INCLUDE_PATH = $(LIBKMC_HOME)/include endif +endif ############################################################################# diff --git a/src/bin/gs_cgroup/CMakeLists.txt b/src/bin/gs_cgroup/CMakeLists.txt index c8e1c6860..3203cb546 100755 --- a/src/bin/gs_cgroup/CMakeLists.txt +++ b/src/bin/gs_cgroup/CMakeLists.txt @@ -18,7 +18,7 @@ set(TGT_cgroup_INC set(cgroup_DEF_OPTIONS ${MACRO_OPTIONS}) set(cgroup_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(cgroup_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(cgroup_LINK_LIBS libelog.a -lpgport -lcrypt -ldl -lm -lsecurec -pthread -lrt -lminiunz -lcgroup) +set(cgroup_LINK_LIBS libelog.a -lpgport -lcrypt -ldl -lm -l${SECURE_C_CHECK} -pthread -lrt -lminiunz -lcgroup) add_bintarget(gs_cgroup TGT_cgroup_SRC TGT_cgroup_INC "${cgroup_DEF_OPTIONS}" "${cgroup_COMPILE_OPTIONS}" "${cgroup_LINK_OPTIONS}" "${cgroup_LINK_LIBS}") add_dependencies(gs_cgroup elog_static pgport_static) target_link_directories(gs_cgroup PUBLIC diff --git a/src/bin/gs_cgroup/cgexec.cpp b/src/bin/gs_cgroup/cgexec.cpp index f0d308035..5554ac83f 100644 --- a/src/bin/gs_cgroup/cgexec.cpp +++ b/src/bin/gs_cgroup/cgexec.cpp @@ -625,7 +625,6 @@ int cgexec_update_remain_value(char* relpath, u_int64_t cpushares, u_int64_t iow { struct cgroup* cg = NULL; struct cgroup_controller* cgc_cpu = NULL; - struct cgroup_controller* cgc_blkio = NULL; int ret; /* allocate new cgroup structure */ @@ -659,24 +658,6 @@ int cgexec_update_remain_value(char* relpath, u_int64_t cpushares, u_int64_t iow return -1; } - /* sles sp2 version, to update blkio value */ - if (cgutil_is_sles11_sp2 || cgexec_check_SLESSP2_version()) { - cgc_blkio = cgroup_get_controller(cg, MOUNT_BLKIO_NAME); - if (NULL == cgc_blkio) { - fprintf(stderr, "ERROR: failed to add %s controller in %s!\n", MOUNT_BLKIO_NAME, relpath); - cgroup_free_controllers(cg); - cgroup_free(&cg); - return -1; - } - - if (ioweight && (0 != (ret = cgroup_set_value_uint64(cgc_blkio, BLKIO_WEIGHT, ioweight)))) { - fprintf(stderr, "ERROR: failed to set %s as %lu for %s\n", BLKIO_WEIGHT, ioweight, cgroup_strerror(ret)); - cgroup_free_controllers(cg); - cgroup_free(&cg); - return -1; - } - } - /* update controller into kernel */ if (0 != (ret = cgroup_modify_cgroup(cg))) { fprintf(stderr, @@ -777,48 +758,6 @@ static int cgexec_update_remain_cgroup(gscgroup_grp_t* grp, int cls) return 0; } -/* - * function name: cgexec_get_blkio_throttle_value - * description : retrive the blkio throttle value based on relative path - * and the throttle field name - * arguments : - * relpath: the relative path of one control group - * name: the throttle field name in blkio subsystem - * return value : - * NULL: abnormal - * other: normal - * - */ -static char* cgexec_get_blkio_throttle_value(const char* relpath, const char* name) -{ - struct cgroup* cg = NULL; - struct cgroup_controller* cgc = NULL; - char* val = NULL; - int ret = 0; - - /* allocate new cgroup structure */ - if ((cg = cgexec_get_cgroup(relpath)) == NULL) - return NULL; - - /* get controller */ - cgc = cgroup_get_controller(cg, MOUNT_BLKIO_NAME); - if (cgc == NULL) { - cgroup_free(&cg); - return NULL; - } - - ret = cgroup_get_value_string(cgc, name, &val); - if (ret) { - cgroup_free_controllers(cg); - cgroup_free(&cg); - return NULL; - } - - cgroup_free_controllers(cg); - cgroup_free(&cg); - return val; -} - /* * function name: cgexec_update_cgroup_value * description : update the Cgroup information @@ -837,11 +776,7 @@ static int cgexec_update_cgroup_value(gscgroup_grp_t* grp) struct cgroup* cg = NULL; long cpushares = grp->ainfo.shares; struct cgroup_controller* cgc_cpu = NULL; - long ioweight = grp->ainfo.weight; - struct cgroup_controller* cgc_blkio = NULL; - int ret, pass = 0; - char* ioval = NULL; - errno_t rc; + int ret; /* get the relative path */ if (NULL == (relpath = gscgroup_get_relative_path(grp->gid, cgutil_vaddr, current_nodegroup))) @@ -900,67 +835,6 @@ static int cgexec_update_cgroup_value(gscgroup_grp_t* grp) } } - if (cgutil_is_sles11_sp2 || cgexec_check_SLESSP2_version()) { - /* get the BLKIO controller */ - cgc_blkio = cgroup_get_controller(cg, MOUNT_BLKIO_NAME); - if (NULL == cgc_blkio) { - fprintf(stderr, "ERROR: failed to add %s controller in %s!\n", MOUNT_BLKIO_NAME, grp->grpname); - goto error; - } - - if (0 == cgutil_opt.fixed || cgutil_opt.recover) { - /* when it is dynamic value, it updates the blkio.weight value */ - if (ioweight && (0 != (ret = cgroup_set_value_uint64(cgc_blkio, BLKIO_WEIGHT, ioweight)))) { - fprintf( - stderr, "ERROR: failed to set %s as %ld for %s\n", BLKIO_WEIGHT, ioweight, cgroup_strerror(ret)); - goto error; - } - } else if (cgutil_opt.fixed || cgutil_opt.recover) { - /* when it is fixed value, it updates the following value */ - if (cgutil_opt.bpsread[0] && - 0 != (ret = cgroup_set_value_string(cgc_blkio, BLKIO_BPSREAD, cgutil_opt.bpsread))) { - fprintf(stderr, - "ERROR: failed to set %s as %s for %s\n", - BLKIO_BPSREAD, - cgutil_opt.bpsread, - cgroup_strerror(ret)); - goto error; - } - - if (cgutil_opt.iopsread[0] && - 0 != (ret = cgroup_set_value_string(cgc_blkio, BLKIO_IOPSREAD, cgutil_opt.iopsread))) { - fprintf(stderr, - "ERROR: failed to set %s as %s for %s\n", - BLKIO_IOPSREAD, - cgutil_opt.iopsread, - cgroup_strerror(ret)); - goto error; - } - - if (cgutil_opt.bpswrite[0] && - 0 != (ret = cgroup_set_value_string(cgc_blkio, BLKIO_BPSWRITE, cgutil_opt.bpswrite))) { - fprintf(stderr, - "ERROR: failed to set %s as %s for %s\n", - BLKIO_BPSWRITE, - cgutil_opt.bpswrite, - cgroup_strerror(ret)); - goto error; - } - - if (cgutil_opt.iopswrite[0] && - 0 != (ret = cgroup_set_value_string(cgc_blkio, BLKIO_IOPSWRITE, cgutil_opt.iopswrite))) { - fprintf(stderr, - "ERROR: failed to set %s as %s for %s\n", - BLKIO_IOPSWRITE, - cgutil_opt.iopswrite, - cgroup_strerror(ret)); - goto error; - } - - pass++; - } - } - /* modify the value into kernel */ if (0 != (ret = cgroup_modify_cgroup(cg))) { fprintf(stderr, @@ -974,63 +848,6 @@ static int cgexec_update_cgroup_value(gscgroup_grp_t* grp) cgroup_free_controllers(cg); cgroup_free(&cg); - /* save the data into configure file */ - if (pass && cgutil_opt.bpsread[0]) { - ioval = cgexec_get_blkio_throttle_value(relpath, BLKIO_BPSREAD); - if (NULL == ioval || IODATA_LEN < (strlen(ioval) + 1)) - fprintf(stderr, - "ERROR: the blkio value %s for %s can't be saved " - "for no spaces in config file.\n", - cgutil_opt.bpsread, - BLKIO_BPSREAD); - else { - rc = snprintf_s(grp->ainfo.bpsread, IODATA_LEN, IODATA_LEN - 1, "%s", ioval); - securec_check_intval(rc, free(relpath), -1); - } - } - - if (pass && cgutil_opt.iopsread[0]) { - ioval = cgexec_get_blkio_throttle_value(relpath, BLKIO_IOPSREAD); - if (NULL == ioval || IODATA_LEN < (strlen(ioval) + 1)) - fprintf(stderr, - "ERROR: the blkio value %s for %s can't be saved " - "for no spaces in config file.\n", - cgutil_opt.iopsread, - BLKIO_IOPSREAD); - else { - rc = snprintf_s(grp->ainfo.iopsread, IODATA_LEN, IODATA_LEN - 1, "%s", ioval); - securec_check_intval(rc, free(relpath), -1); - } - } - - if (pass && cgutil_opt.bpswrite[0]) { - ioval = cgexec_get_blkio_throttle_value(relpath, BLKIO_BPSWRITE); - if (NULL == ioval || IODATA_LEN < (strlen(ioval) + 1)) - fprintf(stderr, - "ERROR: the blkio value %s for %s can't be saved " - "for no spaces in config file.\n", - cgutil_opt.bpswrite, - BLKIO_BPSWRITE); - else { - rc = snprintf_s(grp->ainfo.bpswrite, IODATA_LEN, IODATA_LEN - 1, "%s", ioval); - securec_check_intval(rc, free(relpath), -1); - } - } - - if (pass && cgutil_opt.iopswrite[0]) { - ioval = cgexec_get_blkio_throttle_value(relpath, BLKIO_IOPSWRITE); - if (NULL == ioval || IODATA_LEN < (strlen(ioval) + 1)) - fprintf(stderr, - "ERROR: the blkio value %s for %s can't be saved " - "for no spaces in config file.\n", - cgutil_opt.iopswrite, - BLKIO_IOPSWRITE); - else { - rc = snprintf_s(grp->ainfo.iopswrite, IODATA_LEN, IODATA_LEN - 1, "%s", ioval); - securec_check_intval(rc, free(relpath), -1); - } - } - free(relpath); relpath = NULL; return 0; @@ -1096,7 +913,6 @@ static int cgexec_create_default_cgroup(char* relpath, int cpushares, int ioweig int ret; struct cgroup* cg = NULL; struct cgroup_controller* cgc_cpu = NULL; - struct cgroup_controller* cgc_blkio = NULL; struct cgroup_controller* cgc_cpuset = NULL; struct cgroup_controller* cgc_cpuacct = NULL; @@ -1134,20 +950,6 @@ static int cgexec_create_default_cgroup(char* relpath, int cpushares, int ioweig goto error; } - /* set the blkio.weight value */ - if (cgutil_is_sles11_sp2 || cgexec_check_SLESSP2_version()) { - cgc_blkio = cgroup_add_controller(cg, MOUNT_BLKIO_NAME); - if (NULL == cgc_blkio) { - fprintf(stderr, "ERROR: failed to add %s controller for %s!\n", MOUNT_BLKIO_NAME, relpath); - goto error; - } - - if (ioweight && (0 != (ret = cgroup_set_value_uint64(cgc_blkio, BLKIO_WEIGHT, ioweight)))) { - fprintf(stderr, "ERROR: failed to set %s as %d for %s\n", BLKIO_WEIGHT, ioweight, cgroup_strerror(ret)); - goto error; - } - } - /* set the cpuset.cpus value */ cgc_cpuset = cgroup_add_controller(cg, MOUNT_CPUSET_NAME); if (NULL == cgc_cpuset) { @@ -1354,7 +1156,6 @@ int cgexec_create_new_cgroup(gscgroup_grp_t* grp) int ret; struct cgroup* cg = NULL; struct cgroup_controller* cg_controllers[MOUNT_SUBSYS_KINDS] = {0}; - errno_t sret; if (NULL == (relpath = gscgroup_get_relative_path(grp->gid, cgutil_vaddr, current_nodegroup))) return -1; @@ -1399,25 +1200,6 @@ int cgexec_create_new_cgroup(gscgroup_grp_t* grp) goto error; } - /* set the blkio.weight value */ - if (cgutil_is_sles11_sp2 || cgexec_check_SLESSP2_version()) { - cg_controllers[MOUNT_BLKIO_ID] = cgroup_add_controller(cg, MOUNT_BLKIO_NAME); - if (NULL == cg_controllers[MOUNT_BLKIO_ID]) { - fprintf(stderr, "ERROR: failed to add %s controller for %s!\n", MOUNT_BLKIO_NAME, grp->grpname); - goto error; - } - - if (grp->ainfo.weight && - (0 != (ret = cgroup_set_value_uint64(cg_controllers[MOUNT_BLKIO_ID], BLKIO_WEIGHT, grp->ainfo.weight)))) { - fprintf(stderr, - "ERROR: failed to set %s as %d for %s\n", - BLKIO_WEIGHT, - grp->ainfo.weight, - cgroup_strerror(ret)); - goto error; - } - } - cg_controllers[MOUNT_CPUSET_ID] = cgroup_add_controller(cg, MOUNT_CPUSET_NAME); if (NULL == cg_controllers[MOUNT_CPUSET_ID]) { fprintf(stderr, "ERROR: failed to add %s controller for %s!\n", MOUNT_CPUSET_NAME, grp->grpname); @@ -1458,66 +1240,6 @@ int cgexec_create_new_cgroup(gscgroup_grp_t* grp) cgroup_free_controllers(cg); cgroup_free(&cg); - /* update the blkio throttle value */ - if (grp->ainfo.bpsread[0] && - (0 != (ret = cgexec_set_blkio_throttle_value(relpath, BLKIO_BPSREAD, grp->ainfo.bpsread)))) { - fprintf( - stderr, "ERROR: failed to set %s as %s for %s\n", BLKIO_BPSREAD, grp->ainfo.bpsread, cgroup_strerror(ret)); - free(relpath); - relpath = NULL; - return -1; - } - /* ok, the blkio throttle values are reverted, then revert configure files. */ - if (cgutil_opt.revert && grp->ainfo.bpsread[0]) { - sret = memset_s(grp->ainfo.bpsread, IODATA_LEN, 0, IODATA_LEN); - securec_check_errno(sret, free(relpath), -1); - } - - if (grp->ainfo.iopsread[0] && - (0 != (ret = cgexec_set_blkio_throttle_value(relpath, BLKIO_IOPSREAD, grp->ainfo.iopsread)))) { - fprintf(stderr, - "ERROR: failed to set %s as %s for %s\n", - BLKIO_IOPSREAD, - grp->ainfo.iopsread, - cgroup_strerror(ret)); - free(relpath); - relpath = NULL; - return -1; - } - if (cgutil_opt.revert && grp->ainfo.iopsread[0]) { - sret = memset_s(grp->ainfo.iopsread, IODATA_LEN, 0, IODATA_LEN); - securec_check_errno(sret, free(relpath), -1); - } - if (grp->ainfo.bpswrite[0] && - (0 != (ret = cgexec_set_blkio_throttle_value(relpath, BLKIO_BPSWRITE, grp->ainfo.bpswrite)))) { - fprintf(stderr, - "ERROR: failed to set %s as %s for %s\n", - BLKIO_BPSWRITE, - grp->ainfo.bpswrite, - cgroup_strerror(ret)); - free(relpath); - relpath = NULL; - return -1; - } - if (cgutil_opt.revert && grp->ainfo.bpswrite[0]) { - sret = memset_s(grp->ainfo.bpswrite, IODATA_LEN, 0, IODATA_LEN); - securec_check_errno(sret, free(relpath), -1); - } - if (grp->ainfo.iopswrite[0] && - (0 != (ret = cgexec_set_blkio_throttle_value(relpath, BLKIO_IOPSWRITE, grp->ainfo.iopswrite)))) { - fprintf(stderr, - "ERROR: failed to set %s as %s for %s\n", - BLKIO_IOPSWRITE, - grp->ainfo.iopswrite, - cgroup_strerror(ret)); - free(relpath); - relpath = NULL; - return -1; - } - if (cgutil_opt.revert && grp->ainfo.iopswrite[0]) { - sret = memset_s(grp->ainfo.iopswrite, IODATA_LEN, 0, IODATA_LEN); - securec_check_errno(sret, free(relpath), -1); - } free(relpath); relpath = NULL; return 0; @@ -2209,7 +1931,7 @@ int cgexec_copy_next_level_cgroup(const char* relpath, gscgroup_grp_t* grp) /* add the controller */ for (int i = 0; i < MOUNT_SUBSYS_KINDS; ++i) { - if ((i == MOUNT_BLKIO_ID && !(cgutil_is_sles11_sp2 || cgexec_check_SLESSP2_version())) || i == MOUNT_MEMORY_ID) + if (i == MOUNT_BLKIO_ID || i == MOUNT_MEMORY_ID) continue; cgc[i] = cgroup_add_controller(newcg, cgutil_subsys_table[i]); @@ -3408,7 +3130,6 @@ static int cgexec_update_fixed_class_cgroup(void) int i, cls = 0, wd = 0; char cpusets[CPUSET_LEN]; int need_reset = 0; - errno_t sret; /* check if the class exists */ for (i = CLASSCG_START_ID; i <= CLASSCG_END_ID; i++) { @@ -3426,30 +3147,6 @@ static int cgexec_update_fixed_class_cgroup(void) wd = cgexec_search_workload_group(cls); if (wd) { - if (cgutil_is_sles11_sp2 || cgexec_check_SLESSP2_version()) { - if ((cgutil_opt.bpsread[0] || cgutil_opt.iopsread[0] || cgutil_opt.bpswrite[0] || - cgutil_opt.iopswrite[0]) && - -1 == cgexec_update_cgroup_value(cgutil_vaddr[wd])) - return -1; - - if (cgutil_opt.bpsread[0]) { - sret = memset_s(cgutil_opt.bpsread, IODATA_LEN, 0, IODATA_LEN); - securec_check_errno(sret, , -1); - } - if (cgutil_opt.iopsread[0]) { - sret = memset_s(cgutil_opt.iopsread, IODATA_LEN, 0, IODATA_LEN); - securec_check_errno(sret, , -1); - } - if (cgutil_opt.bpswrite[0]) { - sret = memset_s(cgutil_opt.bpswrite, IODATA_LEN, 0, IODATA_LEN); - securec_check_errno(sret, , -1); - } - if (cgutil_opt.iopswrite[0]) { - sret = memset_s(cgutil_opt.iopswrite, IODATA_LEN, 0, IODATA_LEN); - securec_check_errno(sret, , -1); - } - } - if (cgutil_opt.setspct) { /* * step 1 check whether the newly set percentage makes the whole percentage higher than 100%. @@ -3485,12 +3182,6 @@ static int cgexec_update_fixed_class_cgroup(void) return -1; } } - if (cgutil_is_sles11_sp2 || cgexec_check_SLESSP2_version()) { - if ((cgutil_opt.bpsread[0] || cgutil_opt.iopsread[0] || cgutil_opt.bpswrite[0] || - cgutil_opt.iopswrite[0]) && - (-1 == cgexec_update_cgroup_value(cgutil_vaddr[cls]))) - return -1; - } if (cgutil_opt.setspct) { /* * step 1: check whether the newly set percentage of class is legal or not. @@ -3566,12 +3257,6 @@ static int cgexec_update_fixed_backend_cgroup(void) } if (bkd) { - if (cgutil_is_sles11_sp2 || cgexec_check_SLESSP2_version()) { - if ((cgutil_opt.bpsread[0] || cgutil_opt.iopsread[0] || cgutil_opt.bpswrite[0] || - cgutil_opt.iopswrite[0]) && - (-1 == cgexec_update_cgroup_value(cgutil_vaddr[bkd]))) - return -1; - } /* set cpuset by percentage*/ if (cgutil_opt.setspct) { /* the same steps with updating workload groups.*/ @@ -3633,12 +3318,6 @@ static int cgexec_update_fixed_top_cgroup(void) } if (top) { - if (cgutil_is_sles11_sp2 || cgexec_check_SLESSP2_version()) { - if ((cgutil_opt.bpsread[0] || cgutil_opt.iopsread[0] || cgutil_opt.bpswrite[0] || - cgutil_opt.iopswrite[0]) && - (-1 == cgexec_update_cgroup_value(cgutil_vaddr[top]))) - return -1; - } if (cgutil_opt.setspct) { if ((need_reset = cgexec_check_cpuset_percent(TOPCG_GAUSSDB, top, cpusets)) == -1) return -1; @@ -3730,8 +3409,8 @@ int cgexec_check_mount_for_upgrade(void) * if no system in default point, we need not umount them. */ for (i = 0; i < MOUNT_SUBSYS_KINDS; ++i) { - /* blkio is invalid, ignore it. */ - if ((i == MOUNT_BLKIO_ID && !(cgutil_is_sles11_sp2 || cgexec_check_SLESSP2_version())) || i == MOUNT_MEMORY_ID) + /* ignore blkio and memory. */ + if (i == MOUNT_BLKIO_ID || i == MOUNT_MEMORY_ID) continue; if (*cgutil_opt.mpoints[i]) { @@ -3870,7 +3549,7 @@ int cgexec_detect_cgroup_mount(void) } for (i = 0; i < MOUNT_SUBSYS_KINDS; ++i) { - if (i == MOUNT_BLKIO_ID && !(cgutil_is_sles11_sp2 || cgexec_check_SLESSP2_version())) + if (i == MOUNT_BLKIO_ID) continue; /* a subsys has not mounted, we must make sure its mount point is valid. */ @@ -4061,7 +3740,7 @@ int cgexec_mount_root_cgroup(void) for (i = 0; i < MOUNT_SUBSYS_KINDS; ++i) { /* 'blkio' is invalid, ignore it */ - if (i == MOUNT_BLKIO_ID && !(cgutil_is_sles11_sp2 || cgexec_check_SLESSP2_version())) + if (i == MOUNT_BLKIO_ID) continue; /* If the subsys has not mounted, we will use new point to mount. */ @@ -4148,7 +3827,7 @@ int cgexec_umount_root_cgroup(void) } for (i = 0; i < MOUNT_SUBSYS_KINDS; ++i) { /* 'blkio' is invalid, ignore it */ - if (i == MOUNT_BLKIO_ID && !(cgutil_is_sles11_sp2 || cgexec_check_SLESSP2_version())) + if (i == MOUNT_BLKIO_ID) continue; if (*cgutil_opt.mpoints[i] == '\0') { diff --git a/src/bin/gs_cgroup/cgptree.cpp b/src/bin/gs_cgroup/cgptree.cpp index 5568d0bcf..81ad6fdb8 100644 --- a/src/bin/gs_cgroup/cgptree.cpp +++ b/src/bin/gs_cgroup/cgptree.cpp @@ -337,23 +337,6 @@ void cgptree_get_group_info(struct group_info* curr_ginfo, const struct cgroup_m error = cgroup_get_value_uint64(cgc, CPU_SHARES, &(curr_ginfo->cpu_shares)); ERROR_REPORT(error, CPU_SHARES, curr_ginfo->grpname); } - /* get values of blkio.weight */ - if ((cgutil_is_sles11_sp2 || cgexec_check_SLESSP2_version()) && (0 == strcmp(mount_info.name, MOUNT_BLKIO_NAME))) { - error = cgroup_get_value_uint64(cgc, BLKIO_WEIGHT, &(curr_ginfo->blkio_weight)); - ERROR_REPORT(error, BLKIO_WEIGHT, curr_ginfo->grpname); - - error = cgroup_get_value_string(cgc, BLKIO_BPSREAD, &(curr_ginfo->blkio_bpsread)); - ERROR_REPORT(error, BLKIO_BPSREAD, curr_ginfo->grpname); - - error = cgroup_get_value_string(cgc, BLKIO_IOPSREAD, &(curr_ginfo->blkio_iopsread)); - ERROR_REPORT(error, BLKIO_IOPSREAD, curr_ginfo->grpname); - - error = cgroup_get_value_string(cgc, BLKIO_BPSWRITE, &(curr_ginfo->blkio_bpswrite)); - ERROR_REPORT(error, BLKIO_BPSWRITE, curr_ginfo->grpname); - - error = cgroup_get_value_string(cgc, BLKIO_IOPSWRITE, &(curr_ginfo->blkio_iopswrite)); - ERROR_REPORT(error, BLKIO_IOPSWRITE, curr_ginfo->grpname); - } /* get values of cpuset.cpus */ if (0 == strcmp(mount_info.name, MOUNT_CPUSET_NAME)) { error = cgroup_get_value_string(cgc, CPUSET_CPUS, &(curr_ginfo->cpuset_cpus)); @@ -583,8 +566,8 @@ static struct group_info* cgptree_get_cgroup_new() while (error != ECGEOF) { - if (*info.name && strcmp(info.name, MOUNT_CPU_NAME) != 0 && strcmp(info.name, MOUNT_BLKIO_NAME) != 0 && - strcmp(info.name, MOUNT_CPUSET_NAME) != 0 && strcmp(info.name, MOUNT_CPUACCT_NAME) != 0) { + if (*info.name && strcmp(info.name, MOUNT_CPU_NAME) != 0 && strcmp(info.name, MOUNT_CPUSET_NAME) != 0 && + strcmp(info.name, MOUNT_CPUACCT_NAME) != 0) { error = cgroup_get_controller_next(&ctrl_handle, &info); if (error && error != ECGEOF) { fprintf(stderr, "get next controller failed: %s\n", cgroup_strerror(error)); @@ -926,7 +909,7 @@ int cgptree_display_cgroups(void) /* print current all mount points */ for (int i = 0; i < MOUNT_SUBSYS_KINDS; ++i) { - if (i == MOUNT_BLKIO_ID && !(cgutil_is_sles11_sp2 || cgexec_check_SLESSP2_version())) + if (i == MOUNT_BLKIO_ID) continue; if (i == 0) diff --git a/src/bin/gs_cgroup/cgutil.h b/src/bin/gs_cgroup/cgutil.h index af8fb2f22..dfd4731e9 100644 --- a/src/bin/gs_cgroup/cgutil.h +++ b/src/bin/gs_cgroup/cgutil.h @@ -69,10 +69,6 @@ typedef struct { char hpath[MAXPGPATH]; /* GAUSSHOME path */ char mpoint[MAXPGPATH]; /* mount point path */ char mpoints[MOUNT_SUBSYS_KINDS][MAXPGPATH]; /* subsys mount points */ - char bpsread[IODATA_LEN]; /* io bps read data */ - char iopsread[IODATA_LEN]; /* io iops read data */ - char bpswrite[IODATA_LEN]; /* io bps write data */ - char iopswrite[IODATA_LEN]; /* io iops write data */ char topname[GPNAME_LEN]; /* top group name */ char bkdname[GPNAME_LEN]; /* backend group name */ char clsname[GPNAME_LEN]; /* class name */ diff --git a/src/bin/gs_cgroup/main.cpp b/src/bin/gs_cgroup/main.cpp index f714b3ca2..a8c0b933f 100644 --- a/src/bin/gs_cgroup/main.cpp +++ b/src/bin/gs_cgroup/main.cpp @@ -105,13 +105,11 @@ static void usage(void) " with '-N' to display the control group configuration of the specified logical cluster.\n" " -P : display all cgroups tree information of whole cluster.\n" " --penalty : the penalty exception flag, should be used with '-E data'.\n" - " -r data : blkio bps read\n" " --recover : recover the group configure to last change by normal user.\n" " with '-N' to recover control group of the specified logical cluster.\n" " --refresh : refresh the cgroup group based on the configuration file.\n" " with '-N' to refresh control group of the specified logical cluster.\n" " --revert : revert the group to default.\n" - " -R data : blkio iops read\n" " -s pct : class group percentage\n" " -S name : specify the Class name together with '-c', '-d', '-u' or '-E' option\n" " if the class name is \"default\", it will be treated as \"DefaultClass\". \n" @@ -124,8 +122,6 @@ static void usage(void) " with '-N' to update control group of the specified logical cluster.\n" " -U name : the user name of database\n" " -V [--version] : show the version.\n" - " -w data : blkio bps write\n" - " -W data : blkio iops write\n" "\n" "Examples:\n" "Root user can execute:\n" @@ -147,14 +143,6 @@ static void usage(void) " update the CPU cores of Gaussdb:user cgroup as 2~8.\n" "gs_cgroup -u --fixed -S class1 -s 40: \n" " update the CPU cores percentage of class1 group as 40%% of the Top group: Class\n" - "gs_cgroup -u -S class -r \"major:minor value\": \n" - " update the BLKIO Readbps throttle value of class cgroup.\n" - " The major:minor value can be retrieved by following steps: \n" - " a. run \"df\" command to find the disk which will be controlled;\n" - " b. run \"ls -l device\" to find the major and minor value of the disk;\n" - " device can't include the device number, such as sdd is correct rather than sdd1;\n" - " it may display: brw-rw---- 1 root disk 8, 48 Mar 10 05:44 device\n" - " so the major:minor value is \"8:48\";\n" "gs_cgroup -S class -G wg -E \"blocktime=5,elapsedtime=5\" -a\n" "gs_cgroup -S class -G wg -E \"spillsize=256,broadcastsize=100\" -a\n" "gs_cgroup -c -N ngname: create control groups for the logical cluster ngname.\n" @@ -370,20 +358,6 @@ static void check_input_for_security(char* input) } } -/* - * @Description: check iops and bps values. - * @IN void - * @Return: 1: valid 0: invalid - * @See also: - */ -static int check_bps_or_iops_value(void) -{ - if (cgutil_opt.bpsread[0] || cgutil_opt.iopsread[0] || cgutil_opt.bpswrite[0] || cgutil_opt.iopswrite[0]) - return 1; - - return 0; -} - /* * @Description: Check whether the name of class, * Class group and Workload group is valid. @@ -515,37 +489,6 @@ static int check_input_valid(void) return 0; } -/* - * @Description: check io values. - * @IN void - * @Return: -1: abnormal 0: normal - * @See also: - */ -static int check_io_value(void) -{ - /* check exception data */ - if (!(cgutil_opt.cflag || cgutil_opt.dflag || cgutil_opt.uflag || *cgutil_opt.edata)) { - fprintf(stderr, - "ERROR: please specify one option from '-c', '-d', '-u', '-E' " - "when using class name!\n"); - return -1; - } - - /* check group percentage with '-u' flag */ - if (cgutil_opt.uflag == 0 || cgutil_opt.clspct || cgutil_opt.grppct) - return 0; - - /* check io values with '--fixed' flag */ - if (cgutil_opt.fixed && cgutil_opt.setspct == 0 && cgutil_opt.setfixed == 0 && !check_bps_or_iops_value()) { - fprintf(stderr, - "ERROR: please specified the fixed percent or IO values " - "when updating fixed values!\n"); - return -1; - } - - return 0; -} - /* * @Description: check user info with flags. * @IN void @@ -646,13 +589,6 @@ static int check_flag_process(void) */ static int check_group_name_process(int top, int bkd) { - /* class group name special process */ - if (cgutil_opt.clsname[0] != '\0') { - if (-1 == check_io_value()) { - return -1; - } - } - /* check class name and percentage */ if (cgutil_opt.clspct && '\0' == cgutil_opt.clsname[0]) { fprintf(stderr, @@ -702,8 +638,7 @@ static int check_group_name_process(int top, int bkd) fprintf(stderr, "ERROR: please specify the top dynamic percent when using top name!\n"); return -1; } else if (cgutil_opt.fixed && - !(cgutil_opt.setspct || cgutil_opt.toppct || top || cgutil_opt.bpsread[0] || - cgutil_opt.iopsread[0] || cgutil_opt.bpswrite[0] || cgutil_opt.iopswrite[0])) { + !(cgutil_opt.setspct || cgutil_opt.toppct || top)) { fprintf(stderr, "ERROR: please specify the cpu core percent or IO values " "when updating fixed values!\n"); @@ -721,8 +656,7 @@ static int check_group_name_process(int top, int bkd) } /* check backend percent with '--fixed' flag */ - if (cgutil_opt.fixed && !(cgutil_opt.setspct || cgutil_opt.bkdpct || bkd || cgutil_opt.bpsread[0] || - cgutil_opt.iopsread[0] || cgutil_opt.bpswrite[0] || cgutil_opt.iopswrite[0])) { + if (cgutil_opt.fixed && !(cgutil_opt.setspct || cgutil_opt.bkdpct || bkd)) { fprintf(stderr, "ERROR: please specified the cpu core percent or IO values " "when updating fixed values!\n"); @@ -1119,24 +1053,6 @@ static int parse_options(int argc, char** argv) break; case 'P': /* display Cgroup tree information */ cgutil_opt.ptree = 1; - break; - case 'r': /* IO bps read */ - sret = strncpy_s(cgutil_opt.bpsread, IODATA_LEN, optarg, IODATA_LEN - 1); - securec_check_errno(sret, , -1); - check_input_for_security(cgutil_opt.bpsread); - - if (*cgutil_opt.bpsread == '\0') - fprintf(stdout, "NOTICE: invalid IO value for '-r' flag!\n"); - - break; - case 'R': /* IO iops read */ - sret = strncpy_s(cgutil_opt.iopsread, IODATA_LEN, optarg, IODATA_LEN - 1); - securec_check_errno(sret, , -1); - check_input_for_security(cgutil_opt.iopsread); - - if (*cgutil_opt.iopsread == '\0') - fprintf(stdout, "NOTICE: invalid IO value for '-R' flag!\n"); - break; case 's': /* Class group percentage */ if (check_and_get_group_percent(optarg, "class") == -1) @@ -1175,16 +1091,6 @@ static int parse_options(int argc, char** argv) case 'V': /* version */ cgutil_version = DEF_GS_VERSION; return 0; - case 'w': /* IO bps write */ - sret = strncpy_s(cgutil_opt.bpswrite, IODATA_LEN, optarg, IODATA_LEN - 1); - securec_check_errno(sret, , -1); - check_input_for_security(cgutil_opt.bpswrite); - break; - case 'W': /* IO iops write */ - sret = strncpy_s(cgutil_opt.iopswrite, IODATA_LEN, optarg, IODATA_LEN - 1); - securec_check_errno(sret, , -1); - check_input_for_security(cgutil_opt.iopswrite); - break; case 1: if (IS_EXCEPT_FLAG(cgutil_opt.eflag, EXCEPT_NONE)) cgutil_opt.eflag = EXCEPT_FLAG(EXCEPT_PENALTY); diff --git a/src/bin/gs_guc/CMakeLists.txt b/src/bin/gs_guc/CMakeLists.txt index 4af9cd3df..f6d3b8903 100755 --- a/src/bin/gs_guc/CMakeLists.txt +++ b/src/bin/gs_guc/CMakeLists.txt @@ -16,7 +16,7 @@ set(TGT_guc_INC set(guc_DEF_OPTIONS ${MACRO_OPTIONS}) set(guc_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(guc_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(guc_LINK_LIBS libelog.a libconfig.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -lrt -lz -lminiunz) +set(guc_LINK_LIBS libelog.a libconfig.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz) set(guc_LINK_DEPEND elog_static config_static pgport_static) set(guc_LINK_DIRS ${LIBOPENSSL_LIB_PATH} ${PROTOBUF_LIB_PATH} ${LIBPARQUET_LIB_PATH} ${KERBEROS_LIB_PATH} ${CMAKE_BINARY_DIR}/lib ${LIBCURL_LIB_PATH} ${ZLIB_LIB_PATH} ${LIBORC_LIB_PATH} ${LIBCGROUP_LIB_PATH} ${LIBEDIT_LIB_PATH} ${SECURE_LIB_PATH}) @@ -54,7 +54,7 @@ set(TGT_encrypt_INC set(encrypt_DEF_OPTIONS ${MACRO_OPTIONS}) set(encrypt_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(encrypt_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(encrypt_LINK_LIBS libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -lrt -lz -lminiunz) +set(encrypt_LINK_LIBS libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz) add_bintarget(encrypt TGT_encrypt_SRC TGT_encrypt_INC "${encrypt_DEF_OPTIONS}" "${encrypt_COMPILE_OPTIONS}" "${encrypt_LINK_OPTIONS}" "${encrypt_LINK_LIBS}") add_dependencies(encrypt elog_static pgport_static) target_link_directories(encrypt PUBLIC diff --git a/src/bin/gs_guc/cluster_guc.conf b/src/bin/gs_guc/cluster_guc.conf index a3fd6c641..a19a76d86 100755 --- a/src/bin/gs_guc/cluster_guc.conf +++ b/src/bin/gs_guc/cluster_guc.conf @@ -36,6 +36,7 @@ allow_system_table_mods|bool|0,0|NULL|NULL| application_name|string|0,0|NULL|NULL| archive_command|string|0,0|NULL|NULL| archive_dest|string|0,0|NULL|NULL| +archive_interval|int|1,1000|NULL|NULL| archive_mode|bool|0,0|NULL|When wal_level set to minimal, parameters archive_mode can not be used.| archive_timeout|int|0,1073741823|s|Forced to switch WAL segment exceeds the parameter setting time. Since forced to switch off prematurely archive remains intact archive the same length. Therefore, archive_timeout to occupy a very small value will result in a huge archive storage space, it is recommended archive_timeout set to 60 seconds.| array_nulls|bool|0,0|NULL|NULL| @@ -45,6 +46,7 @@ audit_database_process|int|0,1|NULL|NULL| audit_directory|string|0,0|NULL|NULL| audit_dml_state|int|0,1|NULL|NULL| audit_dml_state_select|int|0,1|NULL|NULL| +audit_xid_info|int|0,1|NULL|NULL| audit_enabled|bool|0,0|NULL|NULL| audit_file_remain_threshold|int|1,1048576|NULL|NULL| audit_file_remain_time|int|0,730|NULL|NULL| @@ -55,7 +57,7 @@ audit_resource_policy|bool|0,0|NULL|NULL| audit_rotation_interval|int|1,35791394|min|NULL| audit_rotation_size|int|1024,1048576|kB|NULL| audit_space_limit|int|1024,1073741824|kB|NULL| -audit_system_object|int|0,2097151|NULL|NULL| +audit_system_object|int|0,67108863|NULL|NULL| audit_user_locked|int|0,1|NULL|NULL| audit_user_violation|int|0,1|NULL|NULL| audit_set_parameter|int|0,1|NULL|NULL| @@ -161,12 +163,14 @@ enable_codegen|bool|0,0|NULL|NULL| enable_codegen_print|bool|0,0|NULL|Enable dump for llvm function| enable_delta_store|bool|0,0|NULL|NULL| enable_default_cfunc_libpath|bool|0,0|NULL|NULL| +enable_defer_calculate_snapshot|bool|0,0|NULL|NULL| codegen_cost_threshold|int|0,2147483647|NULL|Decided to use LLVM optimization or not| codegen_strategy|enum|partial,pure|NULL|NULL| enable_compress_spill|bool|0,0|NULL|NULL| enable_constraint_optimization|bool|0,0|NULL|Information Constrained Optimization is only limited to the HDFS foreign table. When you execute a query which does not contain HDFS foreign table, the parameter is set to off.| enable_csqual_pushdown|bool|0,0|NULL|NULL| enable_data_replicate|bool|0,0|NULL|When this parameter is set on, replication_type must be 0.| +enable_wal_shipping_compression|bool|0,0|NULL|NULL| enable_mix_replication|bool|0,0|NULL|NULL| enable_instance_metric_persistent|bool|0,0|NULL|NULL| enable_logical_io_statistics|bool|0,0|NULL|NULL| @@ -175,6 +179,7 @@ enable_compress_hll|bool|0,0|NULL|NULL| enable_fast_numeric|bool|0,0|NULL|Enable numeric optimize.| enable_force_vector_engine|bool|0,0|NULL|NULL| enable_global_plancache|bool|0,0|NULL|NULL| +enable_global_syscache|bool|0,0|NULL|NULL| gpc_clean_timeout|int|300,86400|NULL|NULL| enable_hashagg|bool|0,0|NULL|NULL| enable_hashjoin|bool|0,0|NULL|NULL| @@ -186,6 +191,7 @@ enable_kill_query|bool|0,0|NULL|NULL| enable_material|bool|0,0|NULL|NULL| enable_memory_limit|bool|0,0|NULL|NULL| enable_memory_context_control|bool|0,0|NULL|NULL| +enable_memory_context_check_debug|bool|0,0|NULL|NULL| enable_mergejoin|bool|0,0|NULL|NULL| enable_nestloop|bool|0,0|NULL|NULL| enable_index_nestloop|bool|0,0|NULL|NULL| @@ -209,10 +215,12 @@ enable_sort|bool|0,0|NULL|NULL| enable_incremental_catchup|bool|0,0|NULL|NULL| wait_dummy_time|int|1,2147483647|NULL|NULL| max_active_global_temporary_table|int|0,1000000|NULL|NULL| +max_inner_tool_connections|int|1,0x3FFFF|NULL|NULL| max_recursive_times|int|0,2147483647|NULL|NULL| enable_tidscan|bool|0,0|NULL|NULL| enable_thread_pool|bool|0,0|NULL|NULL| thread_pool_attr|string|0,0|NULL|NULL| +thread_pool_stream_attr|string|0,0|NULL|NULL| track_stmt_retention_time|string|0,0|NULL|NULL| enable_vector_engine|bool|0,0|NULL|NULL| enableseparationofduty|bool|0,0|NULL|NULL| @@ -305,7 +313,7 @@ enable_asp|bool|0,0|NULL|NULL| enable_startwith_debug|bool|0,0|NULL|NULL| enable_stmt_track|bool|0,0|NULL|NULL| track_stmt_parameter|bool|0,0|NULL|NULL| -asp_sample_num|int|10000,100000|NULL|NULL| +asp_sample_num|int|10,100000|NULL|NULL| asp_sample_interval|int|1,10|s|NULL| asp_flush_rate|int|1,10|NULL|NULL| asp_retention_days|int|1,7|NULL|NULL| @@ -335,10 +343,11 @@ max_pred_locks_per_transaction|int|10,2147483647|NULL|NULL| max_prepared_transactions|int|0,262143|NULL|NULL| max_process_memory|int|2097152,2147483647|kB|NULL| local_syscache_threshold|int|1024,524288|kB|NULL| +global_syscache_threshold|int|16384,1073741824|kB|NULL| session_statistics_memory|int|5120,1073741823|kB|NULL| session_history_memory|int|10240,1073741823|kB|NULL| max_query_retry_times|int|0,20|NULL|NULL| -max_replication_slots|int|0,262143|NULL|NULL| +max_replication_slots|int|0,1024|NULL|NULL| enable_slot_log|bool|0,0|NULL|NULL| max_changes_in_memory|int|1,2147483647|NULL|NULL| max_cached_tuplebufs|int|1,2147483647|NULL|NULL| @@ -347,7 +356,7 @@ max_standby_archive_delay|int|-1,2147483647|ms|'-1' means to permit backup machi max_standby_streaming_delay|int|-1,2147483647|ms|NULL| recovery_min_apply_delay|int|0,2147483647|ms|NULL| max_user_defined_exception|int|1000,1000|NULL|NULL| -max_wal_senders|int|0,262143|NULL|Check whether the new value of max_wal_senders is less than max_connections and wal_level is archive or hot_standby, otherwise the gaussdb will start failed.| +max_wal_senders|int|0,1024|NULL|Check whether the new value of max_wal_senders is less than max_connections and wal_level is archive, hot_standby or logical, otherwise the gaussdb will start failed.| max_redo_log_size|int|163840,2147483647|kB|NULL| memorypool_enable|bool|0,0|NULL|NULL| memory_fault_percent|int|0,2147483647|NULL|NULL| @@ -488,6 +497,7 @@ transparent_encrypt_kms_url|string|0,0|NULL|NULL| transparent_encrypt_kms_region|string|0,0|NULL|NULL| enable_tde|bool|0,0|NULL|NULL| tde_cmk_id|string|0,0|NULL|NULL| +try_vector_engine_strategy|enum|off,force,optimal|NULL|NULL plog_merge_age|int|0,2147483647|ms|how long to aggregate profile logs.0 disable logging. suggest setting value is 1000 times.| fault_mon_timeout|int|0,1440|min|how many miniutes to monitor lwlock. 0 will disable that.| transform_null_equals|bool|0,0|NULL|This only affects 'expr=NULL', not other comparison operators or other expressions involving some of the equality operator computing (such as IN).| @@ -497,19 +507,22 @@ unix_socket_directory|string|0,0|NULL|NULL| unix_socket_group|string|0,0|NULL|NULL| unix_socket_permissions|int|0,511|NULL|NULL| update_lockwait_timeout|int|0,2147483647|ms|NULL| +uppercase_attribute_name|bool|0,0|NULL|NULL| use_workload_manager|bool|0,0|NULL|NULL| user_metric_retention_time|int|0,3650|day|NULL| ustore_attr|string|0,0|NULL|NULL| +enable_ustore|bool|0,0|NULL|Enable to create ustore table| enable_default_ustore_table|bool|0,0|NULL|Enable to create ustore table by default| reserve_space_for_nullable_atts|bool|0,0|NULL|Enable reserve space for nullable attributes, only applicable to ustore| -undo_space_limit_size|int|102400,2147483647|kB|Maximum physical space of the undo command| -undo_limit_size_per_transaction|int|256,2147483647|kB|Maximum space for allocating undo resources in a transaction| +undo_space_limit_size|int|819200,17179869184|kB|Maximum physical space of the undo command| +undo_limit_size_per_transaction|int|2048,17179869184|kB|Maximum space for allocating undo resources in a transaction| vacuum_cost_delay|int|0,100|ms|NULL| vacuum_cost_limit|int|1,10000|NULL|NULL| vacuum_cost_page_dirty|int|0,10000|NULL|NULL| vacuum_cost_page_hit|int|0,10000|NULL|NULL| vacuum_cost_page_miss|int|0,10000|NULL|NULL| vacuum_gtt_defer_check_age|int|0,1000000|NULL|NULL| +undo_retention_time|int|0,2147483647|s|Sets the maximum retention time of undo| version_retention_age|int64|0,576460752303423487|NULL|NULL| enable_recyclebin|bool|0,0|NULL|Enable recyclebin for user-defined objects restore| recyclebin_retention_time|int|1,2147483647|s|Sets the maximum retention time of objects in recyclebin| @@ -529,6 +542,8 @@ wal_receiver_connect_retries|int|1,2147483647|NULL|NULL| wal_sender_timeout|int|0,2147483647|ms|If the host larger data rebuild operation requires increasing the value of this parameter,the host data at 500G, refer to this parameter is 600. This value can not be greater than the wal_receiver_timeout or database rebuilding timeout parameter.| wal_sync_method|enum|fsync,fsync_writethrough,fdatasync,open_sync,open_datasync|NULL|If fsync set to off, this parameter setting does not make sense, because all data updates are not forced to be written to disk.| wal_writer_delay|int|1,10000|ms|If the time is too long will cause WAL buffers memory shortage, time is too short will cause WAL continue to write, increase disk I/O burden.| +wal_flush_timeout|int|0,90000000|NULL|set timeout when iterator table entry.| +wal_flush_delay|int|0,90000000|NULL|set delay time when iterator table entry.| walsender_max_send_size|int|8,2147483647|kB|NULL| basebackup_timeout|int|0,2147483647|s|NULL| work_mem|int|64,2147483647|kB|For complex queries, it may run several concurrent sort or hash operation, each of which can use the amount of memory that this parameter is declared using the temporary file is insufficient. Also, several running sessions could be sorted the same time. Therefore, the total memory usage may be work_mem several times.| @@ -588,6 +603,8 @@ dcf_data_path|string|0,0|NULL|NULL| dcf_log_path|string|0,0|NULL|NULL| dcf_log_level|string|0,0|NULL|NULL| dcf_election_timeout|int|1,600|s|NULL| +dcf_enable_auto_election_priority|int|0,1|NULL|NULL| +dcf_election_switch_threshold|int|0,2147483647|NULL|NULL| dcf_run_mode|enum|0,2|NULL|NULL| dcf_max_log_file_size|int|1,1000|MB|NULL| dcf_log_backup_file_count|int|1,100|NULL|NULL| @@ -618,6 +635,9 @@ recovery_redo_workers|int|1,8|NULL|NULL| recovery_time_target|int|0,3600|NULL|NULL| pagewriter_sleep|int|0,3600000|ms|NULL| pagewriter_thread_num|int|1,16|NULL|NULL| +audit_thread_num|int|1,48|NULL|NULL| +dw_file_num|int|1,16|NULL|NULL| +dw_file_size|int|32,256|NULL|NULL| incremental_checkpoint_timeout|int|1,3600|s|NULL| enable_incremental_checkpoint|bool|0,0|NULL|NULL| enable_double_write|bool|0,0|NULL|NULL| @@ -636,7 +656,8 @@ enable_auto_explain|bool|0,0|NULL|NULL| auto_explain_level|enum|off,log,notice|NULL|NULL| cost_weight_index|real|1e-10,1e+10|NULL|NULL| default_limit_rows|real|-100,1.79769e+308|NULL|NULL| -sql_beta_feature|enum|resowner_debug,spi_debug,partition_opfusion,index_cost_with_leaf_pages_only,canonical_pathkey,join_sel_with_cast_func,no_unique_index_first,sel_semi_poisson,sel_expr_instr,param_path_gen,rand_cost_opt,param_path_opt,page_est_opt,a_style_coerce,none|NULL|NULL| +sql_beta_feature|enum|partition_fdw_on,partition_opfusion,index_cost_with_leaf_pages_only,canonical_pathkey,join_sel_with_cast_func,no_unique_index_first,sel_semi_poisson,sel_expr_instr,param_path_gen,rand_cost_opt,param_path_opt,page_est_opt,a_style_coerce,predpush_same_level,none|NULL|NULL| +max_logical_replication_workers|int|0,262143|NULL|Maximum number of logical replication worker processes.| walwriter_sleep_threshold|int64|1,50000|NULL|NULL| walwriter_cpu_bind|int|-1,2147483647|NULL|NULL| wal_file_init_num|int|0,1000000|NULL|NULL| @@ -654,8 +675,8 @@ plsql_show_all_error|bool|0,0|NULL|NULL| enable_auto_clean_unique_sql|bool|0,0|NULL|NULL| pldebugger_timeout|int|1,86400|s|NULL| xlog_lock_file_path|string|0,0|NULL|NULL| -max_logical_replication_workers|int|0,262143|NULL|Maximum number of logical replication worker processes.| keep_sync_window|int|0,2147483647|s|NULL| +redo_bind_cpu_attr|string|0,0|NULL|NULL| [cmserver] log_dir|string|0,0|NULL|NULL| log_file_size|int|0,2047|MB|NULL| @@ -690,7 +711,18 @@ az_connect_check_interval|int|1,2147483647|NULL|NULL| az_connect_check_delay_time|int|1,2147483647|NULL|NULL| cmserver_demote_delay_on_etcd_fault|int|1,2147483647|NULL|NULL| instance_phony_dead_restart_interval|int|1800,2147483647|NULL|NULL| -install_type|int|0,1|NULL|NULL| +install_type|int|0,2|NULL|NULL| +enable_dcf|bool|0,0|NULL|NULL| +enable_ssl|bool|0,0|NULL|NULL| +ssl_cert_expire_alert_threshold|int|7,180|NULL|NULL| +ssl_cert_expire_check_interval|int|0,2147483647|NULL|NULL| +delay_arbitrate_timeout|int|0,2147483647|NULL|NULL| +ddb_type|int|0,1|NULL|NULL| +ddb_log_level|string|0,0|NULL|NULL| +ddb_log_backup_file_count|int|1,100|NULL|NULL| +ddb_max_log_file_size|string|0,0|NULL|NULL| +ddb_log_suppress_enable|int|0,1|NULL|NULL| +ddb_election_timeout|int|1,600|NULL|NULL| [cmagent] log_dir|string|0,0|NULL|NULL| log_file_size|int|0,2047|MB|NULL| @@ -718,6 +750,7 @@ unix_socket_directory|string|0,0|NULL|NULL| enable_xc_maintenance_mode|bool|0,0|NULL|NULL| process_cpu_affinity|int|0,2|NULL|NULL| agent_phony_dead_check_interval|int|0,2147483647|NULL|NULL| +enable_dcf|bool|0,0|NULL|NULL| disaster_recovery_type|int|0,2|NULL|NULL| [lcname] allow_concurrent_tuple_update|bool|0,0|NULL|NULL| @@ -780,14 +813,11 @@ memorypool_size|int|131072,1073741823|kB|NULL| cstore_buffers|int|16384,1073741823|kB|NULL| udfworkermemhardlimit|int|0,2147483647|kB|Sets the hard memory limit to be used for fenced UDF.| wal_buffers|int|-1,262144|kB|Every time a transaction is committed, the contents of WAL buffers are written to disk, it is set to a large value will not bring significant performance gains. If you set it to hundreds of megabytes, you may have written to the disk to improve performance on the server a lot of real-time transaction commits. According to experience, the default value is sufficient for most situations.| -max_wal_senders|int|0,262143|NULL|Check whether the new value of max_wal_senders is less than max_connections and wal_level is archive or hot_standby, otherwise the gaussdb will start failed.| +max_wal_senders|int|0,262143|NULL|Check whether the new value of max_wal_senders is less than max_connections and wal_level is archive, hot_standby or logical, otherwise the gaussdb will start failed.| max_replication_slots|int|0,262143|NULL|NULL| autovacuum_freeze_max_age|int64|100000,576460752303423487|NULL|NULL| autovacuum_max_workers|int|0,262143|NULL|NULL| track_activity_query_size|int|100,102400|NULL|NULL| -pset_lob_length|int|0,2147483647|NULL|NULL| -pset_num_width|int|0,128|NULL|NULL| -pset_num_format|string|0,0|NULL|NULL| event_source|string|0,0|NULL|NULL| memorypool_enable|bool|0,0|NULL|NULL| enable_memory_limit|bool|0,0|NULL|NULL| diff --git a/src/bin/gs_guc/cluster_guc.cpp b/src/bin/gs_guc/cluster_guc.cpp index 87a54757d..52a82818a 100644 --- a/src/bin/gs_guc/cluster_guc.cpp +++ b/src/bin/gs_guc/cluster_guc.cpp @@ -493,6 +493,10 @@ char* modify_parameter_value(const char* value, bool localMode) buffer[j] = '\\'; j++; } + if (j >= MAX_VALUE_LEN) { + write_stderr(_("%s: out of memory\n"), progname); + exit(1); + } buffer[j] = value[i]; } else { buffer[j] = value[i]; @@ -3523,6 +3527,15 @@ bool check_cn_dn_parameter_is_valid() "not work on this mode.\n", config_param[para_num]); } + /* enable_memory_context_check_debug only work on debug mode */ + char* memCtxCheckParam = "enable_memory_context_check_debug"; + len = (strlen(tmp) > strlen(memCtxCheckParam)) ? strlen(tmp) : strlen(memCtxCheckParam); + if (strncmp(tmp, memCtxCheckParam, len) == 0) { + all_valid = false; + (void)write_stderr("ERROR: The name of parameter \"%s\" is incorrect." + "not work on this mode.\n", + config_param[para_num]); + } #endif } } diff --git a/src/bin/gs_guc/encrypt.cpp b/src/bin/gs_guc/encrypt.cpp index 54e82023d..bc03bb4b6 100644 --- a/src/bin/gs_guc/encrypt.cpp +++ b/src/bin/gs_guc/encrypt.cpp @@ -110,13 +110,17 @@ int main(int argc, char* argv[]) int key_num = 0; int key_child_num = 0; int i = 0; + int j = 0; char* key_child[MAX_CHILD_NUM]; char* path_child[MAX_CHILD_NUM]; char* keyword = NULL; errno_t rc = EOK; FILE* cmd_fp = NULL; - char read_buf[MAX_CHILD_PATH] = {0}; char* mv_cmd = NULL; + char* keypath[2]; /* we can specify 2 path,the first is the cipher path,the second is the rand path. */ + + keypath[0] = NULL; + keypath[1] = NULL; for (i = 0; i < MAX_CHILD_NUM; i++) { key_child[i] = NULL; @@ -128,36 +132,40 @@ int main(int argc, char* argv[]) return result; } - if (argv[1] != NULL) { - key_num = check_key_num(argv[1]); - if (key_num == 0) { - (void)fprintf(stderr, _("ERROR: invalid passwd length\n")); - return result; - } - - key_child_num = key_num / KEY_SPLIT_LEN + 1; - keyword = (char*)crypt_malloc_zero(KEY_SPLIT_LEN * key_child_num); - if (NULL == keyword) { - (void)fprintf(stderr, _("out of memory\n")); - return result; - } - rc = memcpy_s(keyword, KEY_SPLIT_LEN * key_child_num, argv[1], key_num + 1); - securec_check_c(rc, "\0", "\0"); - rc = memset_s(argv[1], key_num, 0, key_num); - securec_check_c(rc, "\0", "\0"); + key_num = check_key_num(argv[1]); + if (key_num == 0) { + (void)fprintf(stderr, _("ERROR: invalid passwd length\n")); + return result; } - for (i = 2; i < argc; i++) { + key_child_num = key_num / KEY_SPLIT_LEN + 1; + keyword = (char*)crypt_malloc_zero(KEY_SPLIT_LEN * key_child_num); + if (NULL == keyword) { + (void)fprintf(stderr, _("out of memory\n")); + return result; + } + rc = memcpy_s(keyword, KEY_SPLIT_LEN * key_child_num, argv[1], key_num + 1); + securec_check_c(rc, "\0", "\0"); + rc = memset_s(argv[1], key_num, 0, key_num); + securec_check_c(rc, "\0", "\0"); + + for (i = 2, j = 0; i < argc; i++, j++) { if (strlen(argv[i]) > MAX_CHILD_PATH) { (void)fprintf(stderr, _("ERROR: path %s length is more then %d\n"), argv[i], MAX_CHILD_PATH); goto END; } - canonicalize_path(argv[i]); - if (-1 == access(argv[i], R_OK | W_OK)) { - (void)fprintf(stderr, _("ERROR: Could not access the path %s\n"), argv[i]); + + keypath[j] = strdup(argv[i]); + if (NULL == keypath[j]) { + (void)write_stderr(_("%s: out of memory\n"), "gs_guc"); + exit(1); + } + canonicalize_path(keypath[j]); + if (-1 == access(keypath[j], R_OK | W_OK)) { + (void)fprintf(stderr, _("ERROR: Could not access the path %s\n"), keypath[j]); goto END; } - check_path(argv[i]); + check_path(keypath[j]); } init_log((char*)PROG_NAME); @@ -168,7 +176,7 @@ int main(int argc, char* argv[]) securec_check_c(rc, "\0", "\0"); path_child[i] = (char*)crypt_malloc_zero(MAX_CHILD_PATH + 1); - rc = snprintf_s(path_child[i], MAX_CHILD_PATH, MAX_CHILD_PATH - 1, "%s/key_%d", argv[2], i); + rc = snprintf_s(path_child[i], MAX_CHILD_PATH, MAX_CHILD_PATH - 1, "%s/key_%d", keypath[0], i); securec_check_ss_c(rc, "\0", "\0"); create_child_dir(path_child[i]); @@ -177,14 +185,14 @@ int main(int argc, char* argv[]) if (argc == 4) { CRYPT_FREE(path_child[i]); path_child[i] = (char*)crypt_malloc_zero(MAX_CHILD_PATH + 1); - rc = snprintf_s(path_child[i], MAX_CHILD_PATH, MAX_CHILD_PATH - 1, "%s/key_%d", argv[3], i); + rc = snprintf_s(path_child[i], MAX_CHILD_PATH, MAX_CHILD_PATH - 1, "%s/key_%d", keypath[1], i); securec_check_ss_c(rc, "\0", "\0"); create_child_dir(path_child[i]); mv_cmd = (char*)crypt_malloc_zero(MAX_COMMAND_LEN + 1); rc = snprintf_s(mv_cmd, MAX_COMMAND_LEN, MAX_COMMAND_LEN - 1, "mv %s/key_%d/*.rand %s/key_%d/", - argv[2], i, argv[3], i); + keypath[0], i, keypath[1], i); securec_check_ss_c(rc, "\0", "\0"); cmd_fp = popen(mv_cmd, "r"); @@ -195,12 +203,6 @@ int main(int argc, char* argv[]) CRYPT_FREE(path_child[i]); goto END; } - - while (fgets(read_buf, sizeof(read_buf) - 1, cmd_fp) != 0) { - printf("%s\n", read_buf); - rc = memset_s(read_buf, sizeof(read_buf), 0, sizeof(read_buf)); - securec_check_c(rc, "\0", "\0"); - } pclose(cmd_fp); CRYPT_FREE(mv_cmd); } @@ -217,5 +219,7 @@ END: securec_check_c(rc, "\0", "\0"); CRYPT_FREE(keyword); } + CRYPT_FREE(keypath[0]); + CRYPT_FREE(keypath[1]); return result; } diff --git a/src/bin/gs_guc/pg_guc.cpp b/src/bin/gs_guc/pg_guc.cpp index 49e04e309..a075fb8ed 100644 --- a/src/bin/gs_guc/pg_guc.cpp +++ b/src/bin/gs_guc/pg_guc.cpp @@ -836,6 +836,7 @@ ErrCode get_file_lock(const char* path, FileLock* filelock) return CODE_UNKOWN_ERROR; } ret = strcpy_s(newpath, sizeof(newpath), path); + securec_check_c(ret, "\0", "\0"); canonicalize_path(newpath); if (checkPath(newpath) != 0) { write_stderr(_("realpath(%s) failed : %s!\n"), newpath, strerror(errno)); @@ -1540,6 +1541,14 @@ static void do_help_check_guc(void) progname); (void)printf(_("\nOptions for check with -c parameter: \n")); (void)printf(_(" -Z NODE-TYPE only can be \"coordinator\" or \"datanode\"\n")); +#else +#ifdef ENABLE_LITE_MODE + (void)printf(_("\nChecking GUC parameters:\n")); + + (void)printf(_(" %s check [-Z NODE-TYPE] -D DATADIR {-c \"parameter\", -c " + "\"parameter\", ...}\n"), progname); + (void)printf(_(" %s check [-Z NODE-TYPE] -D DATADIR {-c parameter, -c " + "parameter, ...}\n"), progname); #else (void)printf(_("\nChecking GUC parameters:\n")); @@ -1550,6 +1559,7 @@ static void do_help_check_guc(void) "parameter, ...}\n"), progname); +#endif #endif } @@ -1606,6 +1616,27 @@ static void do_help_config_guc(void) (void)printf( _(" e.g. %s set -Z cmagent -N all -I all -c \"program = \'\\\"Hello\\\", World\\!\'\".\n"), progname); (void)printf(_(" e.g. %s set -Z cmagent -c \"program = \'\\\"Hello\\\", World\\!\'\".\n"), progname); +#else +#ifdef ENABLE_LITE_MODE + (void)printf(_(" %s {set | reload} [-Z NODE-TYPE] -D DATADIR " + "[--lcname=LCNAME] [--ignore-node=NODES] " + "{-c \"parameter = value\" -c \"parameter = value\" ...}\n"), progname); + (void)printf(_(" %s {set | reload} [-Z NODE-TYPE] -D DATADIR " + "[--lcname=LCNAME] [--ignore-node=NODES] " + "{-c \" parameter = value \" -c \" parameter = value \" ...}\n"), progname); + (void)printf(_(" %s {set | reload} [-Z NODE-TYPE] -D DATADIR " + "[--lcname=LCNAME] [--ignore-node=NODES] " + "{-c \"parameter = \'value\'\" -c \"parameter = \'value\'\" ...}\n"), progname); + (void)printf(_(" %s {set | reload} [-Z NODE-TYPE] -D DATADIR " + "[--lcname=LCNAME] [--ignore-node=NODES] " + "{-c \" parameter = \'value\' \" -c \" parameter = \'value\' \" ...}\n"), progname); + (void)printf(_(" %s {set | reload} [-Z NODE-TYPE] -D DATADIR " + "[--lcname=LCNAME] [--ignore-node=NODES] {-c \"parameter\" -c \"parameter\" ...}\n"), progname); + (void)printf( + _(" e.g. %s set -Z datanode -D /datanode/data -c \"program = \'\\\"Hello\\\", World\\!\'\".\n"), progname); + (void)printf( + _(" e.g. %s reload -Z datanode -D /datanode/data -c \"program = \'\\\"Hello\\\", World\\!\'\".\n"), progname); + #else (void)printf(_(" %s {set | reload} [-Z NODE-TYPE] [-N NODE-NAME] {-I INSTANCE-NAME | -D DATADIR} " "[--lcname=LCNAME] [--ignore-node=NODES] " @@ -1626,6 +1657,7 @@ static void do_help_config_guc(void) (void)printf( _(" e.g. %s reload -Z datanode -N all -I all -c \"program = \'\\\"Hello\\\", World\\!\'\".\n"), progname); +#endif #endif @@ -1668,6 +1700,32 @@ static void do_help_config_hba(void) (void)printf(_(" %s {set | reload} -Z NODE-TYPE [-N NODE-NAME] {-I INSTANCE-NAME | -D DATADIR} " "[--ignore-node=NODES] -h \"HOSTTYPE DATABASE USERNAME HOSTNAME\" \n"), progname); +#else +#ifdef ENABLE_LITE_MODE + (void)printf(_(" %s {set | reload} [-Z NODE-TYPE] -D DATADIR " + "[--ignore-node=NODES] " + "-h \"HOSTTYPE DATABASE USERNAME IPADDR IPMASK AUTHMEHOD authentication-options\" \n"), + progname); + (void)printf(_(" %s {set | reload} [-Z NODE-TYPE] -D DATADIR " + "[--ignore-node=NODES] " + "-h \"HOSTTYPE DATABASE USERNAME IPADDR-WITH-IPMASK AUTHMEHOD authentication-options\" \n"), + progname); + (void)printf(_(" %s {set | reload} [-Z NODE-TYPE] -D DATADIR " + "[--ignore-node=NODES] " + "-h \"HOSTTYPE DATABASE USERNAME HOSTNAME AUTHMEHOD authentication-options\" \n"), + progname); + + (void)printf(_(" If authentication policy need to set/reload DEFAULT OR COMMENT then provide without " + "authentication menthod, use the form: \n")); + (void)printf(_(" %s {set | reload} [-Z NODE-TYPE] -D DATADIR " + "[--ignore-node=NODES] -h \"HOSTTYPE DATABASE USERNAME IPADDR IPMASK\" \n"), + progname); + (void)printf(_(" %s {set | reload} [-Z NODE-TYPE] -D DATADIR " + "[--ignore-node=NODES] -h \"HOSTTYPE DATABASE USERNAME IPADDR-WITH-IPMASK \" \n"), + progname); + (void)printf(_(" %s {set | reload} [-Z NODE-TYPE] -D DATADIR " + "[--ignore-node=NODES] -h \"HOSTTYPE DATABASE USERNAME HOSTNAME\" \n"), + progname); #else (void)printf(_(" %s {set | reload} [-Z NODE-TYPE] [-N NODE-NAME] {-I INSTANCE-NAME | -D DATADIR} " "[--ignore-node=NODES] " @@ -1694,6 +1752,7 @@ static void do_help_config_hba(void) "[--ignore-node=NODES] -h \"HOSTTYPE DATABASE USERNAME HOSTNAME\" \n"), progname); #endif +#endif } @@ -1716,8 +1775,10 @@ static void do_help_common_options(void) { (void)printf(_("\nCommon options:\n")); +#ifndef ENABLE_LITE_MODE (void)printf(_(" -N nodename in which this command need to be executed\n")); (void)printf(_(" -I instance name\n")); +#endif (void)printf(_(" -D, --pgdata=DATADIR location of the database storage area\n")); (void)printf(_(" -c parameter=value the parameter to set\n")); (void)printf(_(" -c parameter the parameter value to DEFAULT (i.e comments in configuration file)\n")); @@ -2477,7 +2538,9 @@ int main(int argc, char** argv) progname = PROG_NAME; set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("gs_guc")); arraysize = argc - 1; +#ifndef ENABLE_LITE_MODE bool is_cluster_init = false; +#endif if (0 != arraysize) { config_param = ((char**)pg_malloc_zero(arraysize * sizeof(char*))); @@ -2575,10 +2638,12 @@ int main(int argc, char** argv) is_hba_conf = true; temp_config_parameter = xstrdup(optarg); // init cluster static config +#ifndef ENABLE_LITE_MODE if (!is_cluster_init) { init_gauss_cluster_config(); is_cluster_init = true; } +#endif do_hba_analysis(temp_config_parameter); GS_FREE(temp_config_parameter); break; diff --git a/src/bin/gs_loader/CMakeLists.txt b/src/bin/gs_loader/CMakeLists.txt index 94f64d8aa..a9cc47dd7 100644 --- a/src/bin/gs_loader/CMakeLists.txt +++ b/src/bin/gs_loader/CMakeLists.txt @@ -1,8 +1,15 @@ -add_executable(gs_loader ${CMAKE_CURRENT_SOURCE_DIR}/gs_loader.cpp) +# gs_loader +set(TGT_gs_loader_SRC ${CMAKE_CURRENT_SOURCE_DIR}/gs_loader.cpp) + +set(gs_loader_DEF_OPTIONS ${MACRO_OPTIONS}) +set(gs_loader_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) +set(gs_loader_LINK_OPTIONS ${BIN_LINK_OPTIONS}) +set(gs_loader_LINK_LIBS -lpthread -ldl -lm -lrt) +add_bintarget(gs_loader TGT_gs_loader_SRC "" "${gs_loader_DEF_OPTIONS}" "${gs_loader_COMPILE_OPTIONS}" "${gs_loader_LINK_OPTIONS}" "${gs_loader_LINK_LIBS}") install( FILES ${CMAKE_CURRENT_SOURCE_DIR}/gs_loader.sh PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ DESTINATION bin ) -install(TARGETS gs_loader RUNTIME DESTINATION bin) \ No newline at end of file +install(TARGETS gs_loader RUNTIME DESTINATION bin) diff --git a/src/bin/gs_loader/gs_loader.cpp b/src/bin/gs_loader/gs_loader.cpp index d000ce38f..fd3e8b07c 100644 --- a/src/bin/gs_loader/gs_loader.cpp +++ b/src/bin/gs_loader/gs_loader.cpp @@ -81,16 +81,37 @@ void SetX(char *arr1, int n1) bool InputPasswd(char *keyword, int max_len) { char *password = getpass("Input a password:"); - if (password == NULL || strlen(password) > max_len) { + if (password == NULL) { std::cout << "ERROR: read password error"; return false; } - for (int i = 0; i < strlen(password); i++) { + if (strlen(password) > (unsigned)max_len) { + std::cout << "ERROR: read password error"; + SetX(password, strlen(password)); + return false; + } + for (unsigned i = 0; i < strlen(password); i++) { keyword[i] = password[i]; password[i] = 'x'; } return true; } + +void check_danger_character(const char *inputEnvValue) +{ + if (inputEnvValue == NULL) + return; + + const char* dangerCharacterList[] = {";", "`", "\\", "'", "\"", ">", "<", "&", "|", "!", NULL}; + int i = 0; + + for (i = 0; dangerCharacterList[i] != NULL; i++) { + if (strstr(inputEnvValue, dangerCharacterList[i]) != NULL) { + fprintf(stderr, "ERROR: Failed to check input value: invalid token \"%s\".\n", dangerCharacterList[i]); + exit(1); + } + } +} } using namespace gs_loader; @@ -154,14 +175,20 @@ int main(int argc, char **argv) return 0; } } + const char *p = const_cast(params.c_str()); + check_danger_character(p); + FILE *fp = popen(params.c_str(), "w"); if (fp == NULL) { + for (unsigned i = 0; i < strlen(keyword); i++) { + keyword[i] = 'x'; + } std::cout << "ERROR: run gs_loader error" << std::endl; return 0; } fputs(keyword, fp); fputc('\n', fp); - for (int i = 0; i < strlen(keyword); i++) { + for (unsigned i = 0; i < strlen(keyword); i++) { keyword[i] = 'x'; } pclose(fp); diff --git a/src/bin/gs_loader/gs_loader.sh b/src/bin/gs_loader/gs_loader.sh index ee32dba8a..dff46c092 100644 --- a/src/bin/gs_loader/gs_loader.sh +++ b/src/bin/gs_loader/gs_loader.sh @@ -229,7 +229,7 @@ function exec_sql() if res=$(gsql $host $port $user $passwd $db -t -c "$sql" 2>&1) then - echo -e "$res" | sed -r 's/Connect primary node [0-9]+.[0-9]+.[0-9]+.[0-9]+//g' + echo -e "$res" else echo "ERROR: function exec_sql" fi @@ -267,7 +267,7 @@ function exec_sql_file() # delete last line: total time: 10ms if res=$(gsql $host $port $user $passwd $db -t -f "$sql_file" 2>&1) then - echo -e "$res" | sed -r 's/Connect primary node [0-9]+.[0-9]+.[0-9]+.[0-9]+//g' | sed '$ d' + echo -e "$res" | sed '$ d' else echo "ERROR: function exec_sql_file" fi diff --git a/src/bin/gs_log/CMakeLists.txt b/src/bin/gs_log/CMakeLists.txt index 352ad73bd..a4c840d79 100755 --- a/src/bin/gs_log/CMakeLists.txt +++ b/src/bin/gs_log/CMakeLists.txt @@ -21,7 +21,7 @@ set(TGT_log_INC set(log_DEF_OPTIONS ${MACRO_OPTIONS}) set(log_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(log_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(log_LINK_LIBS libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -lrt -lz -lminiunz) +set(log_LINK_LIBS libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz) add_bintarget(gs_log TGT_log_SRC TGT_log_INC "${log_DEF_OPTIONS}" "${log_COMPILE_OPTIONS}" "${log_LINK_OPTIONS}" "${log_LINK_LIBS}") add_dependencies(gs_log elog_static pgport_static) target_link_directories(gs_log PUBLIC diff --git a/src/bin/gsqlerr/CMakeLists.txt b/src/bin/gsqlerr/CMakeLists.txt index 6716edf9d..6206344e7 100755 --- a/src/bin/gsqlerr/CMakeLists.txt +++ b/src/bin/gsqlerr/CMakeLists.txt @@ -13,7 +13,7 @@ set(TGT_scanEreport_INC set(scanEreport_DEF_OPTIONS ${MACRO_OPTIONS}) set(scanEreport_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(scanEreport_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(scanEreport_LINK_LIBS -lsecurec) +set(scanEreport_LINK_LIBS -l${SECURE_C_CHECK}) if("${ENABLE_MEMORY_CHECK}" STREQUAL "ON") set(scanEreport_LINK_LIBS ${scanEreport_LINK_LIBS} -pthread -ldl -lm -lrt) set(scanEreport_DEF_OPTIONS ${scanEreport_DEF_OPTIONS} -D_REENTRANT) @@ -62,7 +62,7 @@ set(TGT_gsqlerr_INC set(gsqlerr_DEF_OPTIONS ${MACRO_OPTIONS}) set(gsqlerr_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(gsqlerr_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(gsqlerr_LINK_LIBS -lsecurec) +set(gsqlerr_LINK_LIBS -l${SECURE_C_CHECK}) if("${ENABLE_MEMORY_CHECK}" STREQUAL "ON") set(gsqlerr_LINK_LIBS ${gsqlerr_LINK_LIBS} -pthread -ldl -lm -lrt) set(gsqlerr_DEF_OPTIONS ${gsqlerr_DEF_OPTIONS} -D_REENTRANT) diff --git a/src/bin/gsqlerr/Makefile b/src/bin/gsqlerr/Makefile index bc16c0a0a..d22ff72fc 100644 --- a/src/bin/gsqlerr/Makefile +++ b/src/bin/gsqlerr/Makefile @@ -29,13 +29,13 @@ endif OBJS1= scanEreport.o $(WIN32RES) ifeq ($(enable_memory_check), yes) -LIBS = -lsecurec -l$(MEMCHECK_LIB_NAME_ASAN) -pthread -D_REENTRANT -ldl -lm -lrt +LIBS = -l$(SECURE_C_CHECK) -l$(MEMCHECK_LIB_NAME_ASAN) -pthread -D_REENTRANT -ldl -lm -lrt else -LIBS = -lsecurec +LIBS = -l$(SECURE_C_CHECK) endif ifeq ($(enable_thread_check), yes) -LIBS = -lsecurec -l$(MEMCHECK_LIB_NAME_TSAN) -pthread -D_REENTRANT -ldl -lm -lrt +LIBS = -l$(SECURE_C_CHECK) -l$(MEMCHECK_LIB_NAME_TSAN) -pthread -D_REENTRANT -ldl -lm -lrt endif all: @@ -50,7 +50,7 @@ ifeq ($(enable_multiple_nodes), yes) endif $(top_builddir)/$(subdir)/scanEreport $(top_srcdir) ereport.txt $(top_srcdir)/src/common/backend/utils/errcodes.txt escan.txt utscanEreport: $(OBJS1) | submake-libpgport - $(CC) -fPIC -shared $(CXXFLAGS) $(OBJS1) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o libutscanEreport.so -lsecurec + $(CC) -fPIC -shared $(CXXFLAGS) $(OBJS1) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o libutscanEreport.so -l$(SECURE_C_CHECK) mv libutscanEreport.so $(top_builddir)/../distribute/test/ut/lib OBJS= gsqlerr.o $(WIN32RES) @@ -59,7 +59,7 @@ gsqlerr: $(OBJS) | submake-libpgport $(CC) $(CXXFLAGS) $(OBJS) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@$(X) utgsqlerr: $(OBJS) | submake-libpgport - $(CC) -fPIC -shared $(CXXFLAGS) $(OBJS) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o libutgsqlerr.so -lsecurec + $(CC) -fPIC -shared $(CXXFLAGS) $(OBJS) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o libutgsqlerr.so -l$(SECURE_C_CHECK) mv libutgsqlerr.so $(top_builddir)/../distribute/test/ut/lib ifeq ($(enable_multiple_nodes), yes) diff --git a/src/bin/gsqlerr/scanEreport.cpp b/src/bin/gsqlerr/scanEreport.cpp index d7ce3e01e..4f953c13a 100644 --- a/src/bin/gsqlerr/scanEreport.cpp +++ b/src/bin/gsqlerr/scanEreport.cpp @@ -202,13 +202,14 @@ static char app_name[FILE_NAME_MAX_LEN]; void parseOutputParam(int argc, char* argv[]); int checkErrMsgItem(const char* errmodule, ERPARA_INFO_T* pstOutErPara); int checkDebugMsgItem(const char* errmodule); +static void check_env_value(const char* input_env_value); /******************************* realize function ************************************/ int main(int argc, char* argv[]) { char* filerealpath_ptr = NULL; - char fileresolved_name[FILE_NAME_MAX_LEN] = {0}; + char fileresolved_name[PATH_MAX] = {0}; char* dirrealpath_ptr = NULL; - char dirresolved_name[FILE_NAME_MAX_LEN] = {0}; + char dirresolved_name[PATH_MAX] = {0}; char cwd[MAXPATH] = {0}; int lRet = 0; char logfilename[MAXPATH] = {0}; @@ -244,6 +245,7 @@ int main(int argc, char* argv[]) } g_sProgDir = dirrealpath_ptr; + check_env_value(g_sProgDir); /* get current work directory */ g_sCurDir = getcwd(cwd, MAXPATH); @@ -453,6 +455,41 @@ void releaseMem(void) return; } +static void check_env_value(const char* input_env_value) +{ + const char* danger_character_list[] = {"|", + ";", + "&", + "$", + "<", + ">", + "`", + "\\", + "'", + "\"", + "{", + "}", + "(", + ")", + "[", + "]", + "~", + "*", + "?", + "!", + "\n", + NULL}; + int i = 0; + + for (i = 0; danger_character_list[i] != NULL; i++) { + if (strstr(input_env_value, danger_character_list[i]) != NULL) { + fprintf( + stderr, "invalid token \"%s\" in input_env_value: (%s)\n", danger_character_list[i], input_env_value); + exit(1); + } + } +} + int outputLog(FILE* logfd, bool isCloseFd, const char* format, ...) { #define TMELEN 32 @@ -1394,7 +1431,8 @@ int saveErrMsg(char* errmsg, char* dir, char* scanfile, int lineno) if (ERROR_LOCATION_NUM >= pstErrMsgItem->mppdb_err_msg_locnum + 1) { int locNum = pstErrMsgItem->mppdb_err_msg_locnum; - pstErrMsgItem->astErrLocate[locNum] = (mppdb_err_msg_location_t*)malloc(sizeof(mppdb_err_msg_location_t)); + pstErrMsgItem->astErrLocate[locNum] = + (mppdb_err_msg_location_t*)malloc(sizeof(mppdb_err_msg_location_t)); if (pstErrMsgItem->astErrLocate[locNum] == NULL) { return outputLog(logfile, false, "Memory alloc failed for err locate\n"); } @@ -1768,7 +1806,8 @@ int compareErrmsg() * 2. else need to malloc memory for old msg item before copy. */ for (int i = pstErrMsgItemOld->mppdb_err_msg_locnum; i < pstErrMsgItemNew->mppdb_err_msg_locnum; i++) { - pstErrMsgItemOld->astErrLocate[i] = (mppdb_err_msg_location_t*)malloc(sizeof(mppdb_err_msg_location_t)); + pstErrMsgItemOld->astErrLocate[i] = + (mppdb_err_msg_location_t*)malloc(sizeof(mppdb_err_msg_location_t)); if (pstErrMsgItemOld->astErrLocate[i] == NULL) { return outputLog(logfile, false, "Memory alloc failed for err locate\n"); } diff --git a/src/bin/initdb/CMakeLists.txt b/src/bin/initdb/CMakeLists.txt index dfeaa0387..16adadcb2 100755 --- a/src/bin/initdb/CMakeLists.txt +++ b/src/bin/initdb/CMakeLists.txt @@ -28,7 +28,7 @@ set(TGT_initdb_INC set(initdb_DEF_OPTIONS ${MACRO_OPTIONS} -DFRONTEND) set(initdb_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(initdb_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(initdb_LINK_LIBS libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -lrt -lz -lminiunz) +set(initdb_LINK_LIBS libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz) add_bintarget(gs_initdb TGT_initdb_SRC TGT_initdb_INC "${initdb_DEF_OPTIONS}" "${initdb_COMPILE_OPTIONS}" "${initdb_LINK_OPTIONS}" "${initdb_LINK_LIBS}") add_dependencies(gs_initdb elog_static pgport_static) target_link_directories(gs_initdb PUBLIC diff --git a/src/bin/initdb/initdb.cpp b/src/bin/initdb/initdb.cpp index 561491698..a617b0f47 100644 --- a/src/bin/initdb/initdb.cpp +++ b/src/bin/initdb/initdb.cpp @@ -333,6 +333,7 @@ static int CreateRestrictedProcess(char* cmd, PROCESS_INFORMATION* processInfo); #endif static void InitUndoSubsystemMeta(); +static void check_input_spec_char(char* input_env_value, bool skip_dollar = false); /* * macros for running pipes to openGauss @@ -398,6 +399,26 @@ int g_bucket_len = DEFAULT_BUCKETSLEN; #define INSERT_BUCKET_SQL_LEN 512 +static void check_input_spec_char(char* input_env_value, bool skip_dollar) +{ + if (input_env_value == NULL) { + return; + } + + const char* danger_character_list[] = {"|", ";", "&", "$", "<", ">", "`", "\\", "!", NULL}; + + int i = 0; + + for (i = 0; danger_character_list[i] != NULL; i++) { + if (strstr((const char*)input_env_value, danger_character_list[i]) != NULL && + !(skip_dollar && danger_character_list[i][0] == '$')) { + write_stderr(_("Error: variable \"%s\" contains invaild symbol \"%s\".\n"), + input_env_value, danger_character_list[i]); + exit(1); + } + } +} + void check_env_value(const char* input_env_value) { const char* danger_character_list[] = {"|", @@ -3642,6 +3663,7 @@ int main(int argc, char* argv[]) "undo", "pg_logical"}; + check_input_spec_char(argv[0]); progname = get_progname(argv[0]); set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("gs_initdb")); @@ -3675,6 +3697,7 @@ int main(int argc, char* argv[]) case 'A': FREE_NOT_STATIC_ZERO_STRING(authmethodlocal_tmp); FREE_NOT_STATIC_ZERO_STRING(authmethodhost_tmp); + check_input_spec_char(optarg); authmethodlocal_tmp = authmethodhost_tmp = xstrdup(optarg); /* @@ -3690,11 +3713,13 @@ int main(int argc, char* argv[]) case 10: if (authmethodlocal_tmp != authmethodhost_tmp) FREE_NOT_STATIC_ZERO_STRING(authmethodlocal_tmp); + check_input_spec_char(optarg); authmethodlocal_tmp = xstrdup(optarg); break; case 11: if (authmethodlocal_tmp != authmethodhost_tmp) FREE_NOT_STATIC_ZERO_STRING(authmethodhost_tmp); + check_input_spec_char(optarg); authmethodhost_tmp = xstrdup(optarg); break; #ifndef ENABLE_MULTIPLE_NODES @@ -3704,21 +3729,24 @@ int main(int argc, char* argv[]) #endif case 'D': FREE_NOT_STATIC_ZERO_STRING(pg_data); + check_input_spec_char(optarg); pg_data = xstrdup(optarg); break; case 'E': FREE_NOT_STATIC_ZERO_STRING(encoding); + check_input_spec_char(optarg); encoding = xstrdup(optarg); break; case 'W': pwprompt = true; break; case 'C': + check_input_spec_char(optarg); if (realpath(optarg, encrypt_pwd_real_path) == NULL) { write_stderr(_("%s: The parameter of -C is invalid.\n"), progname); break; } - + ret = snprintf_s(cipher_key_file, MAXPGPATH, MAXPGPATH - 1, "%s/server.key.cipher", encrypt_pwd_real_path); securec_check_ss_c(ret, "\0", "\0"); @@ -3748,6 +3776,7 @@ int main(int argc, char* argv[]) break; case 'U': FREE_NOT_STATIC_ZERO_STRING(username); + check_input_spec_char(optarg, true); username = xstrdup(optarg); break; case 'd': @@ -3756,6 +3785,7 @@ int main(int argc, char* argv[]) break; case 'g': FREE_NOT_STATIC_ZERO_STRING(new_xlog_file_path); + check_input_spec_char(optarg); new_xlog_file_path = xstrdup(optarg); break; case 'n': @@ -3764,34 +3794,42 @@ int main(int argc, char* argv[]) break; case 'L': FREE_NOT_STATIC_ZERO_STRING(share_path); + check_input_spec_char(optarg); share_path = xstrdup(optarg); break; case 1: FREE_NOT_STATIC_ZERO_STRING(locale); + check_input_spec_char(optarg); locale = xstrdup(optarg); break; case 2: FREE_NOT_STATIC_ZERO_STRING(lc_collate); + check_input_spec_char(optarg); lc_collate = xstrdup(optarg); break; case 3: FREE_NOT_STATIC_ZERO_STRING(lc_ctype); + check_input_spec_char(optarg); lc_ctype = xstrdup(optarg); break; case 4: FREE_NOT_STATIC_ZERO_STRING(lc_monetary); + check_input_spec_char(optarg); lc_monetary = xstrdup(optarg); break; case 5: FREE_NOT_STATIC_ZERO_STRING(lc_numeric); + check_input_spec_char(optarg); lc_numeric = xstrdup(optarg); break; case 6: FREE_NOT_STATIC_ZERO_STRING(lc_time); + check_input_spec_char(optarg); lc_time = xstrdup(optarg); break; case 7: FREE_NOT_STATIC_ZERO_STRING(lc_messages); + check_input_spec_char(optarg); lc_messages = xstrdup(optarg); break; case 8: @@ -3800,6 +3838,7 @@ int main(int argc, char* argv[]) break; case 9: FREE_NOT_STATIC_ZERO_STRING(pwfilename); + check_input_spec_char(optarg); pwfilename = xstrdup(optarg); break; case 's': @@ -3807,10 +3846,12 @@ int main(int argc, char* argv[]) break; case 'T': FREE_NOT_STATIC_ZERO_STRING(default_text_search_config_tmp); + check_input_spec_char(optarg); default_text_search_config_tmp = xstrdup(optarg); break; case 'X': FREE_NOT_STATIC_ZERO_STRING(xlog_dir); + check_input_spec_char(optarg); xlog_dir = xstrdup(optarg); break; case 'S': @@ -3819,16 +3860,19 @@ int main(int argc, char* argv[]) break; case 'H': FREE_NOT_STATIC_ZERO_STRING(host_ip); + check_input_spec_char(optarg); host_ip = xstrdup(optarg); break; #ifdef PGXC case 12: FREE_NOT_STATIC_ZERO_STRING(nodename); + check_input_spec_char(optarg); nodename = xstrdup(optarg); break; #endif case 13: FREE_NOT_STATIC_ZERO_STRING(dbcompatibility); + check_input_spec_char(optarg); dbcompatibility = xstrdup(optarg); break; case 14: @@ -3860,6 +3904,7 @@ int main(int argc, char* argv[]) * already specified with -D / --pgdata */ if (optind < argc && strlen(pg_data) == 0) { + check_input_spec_char(argv[optind]); pg_data = xstrdup(argv[optind]); optind++; } diff --git a/src/bin/pg_basebackup/CMakeLists.txt b/src/bin/pg_basebackup/CMakeLists.txt index 8dedc1dd0..ec08ab163 100755 --- a/src/bin/pg_basebackup/CMakeLists.txt +++ b/src/bin/pg_basebackup/CMakeLists.txt @@ -29,7 +29,10 @@ set(TGT_basebackup_INC set(basebackup_DEF_OPTIONS ${MACRO_OPTIONS} -DHAVE_LIBZ -DFRONTEND) set(basebackup_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(basebackup_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(basebackup_LINK_LIBS lib_string_obj libelog.a libbuildquery.a libpgcommon.a libhotpatchclient.a libpgport.a -lpq -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -lrt -lz -lminiunz -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss -llz4) +set(basebackup_LINK_LIBS lib_string_obj libelog.a libbuildquery.a libpgcommon.a libhotpatchclient.a libpgport.a -lpq -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz -llz4) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND basebackup_LINK_LIBS -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss) +endif() add_bintarget(gs_basebackup TGT_basebackup_SRC TGT_basebackup_INC "${basebackup_DEF_OPTIONS}" "${basebackup_COMPILE_OPTIONS}" "${basebackup_LINK_OPTIONS}" "${basebackup_LINK_LIBS}") add_dependencies(gs_basebackup lib_string_obj elog_static buildquery_static pgcommon_static hotpatchclient_static pgport_static pq) target_link_directories(gs_basebackup PUBLIC @@ -66,7 +69,10 @@ set(TGT_receivexlog_INC set(receivexlog_DEF_OPTIONS ${MACRO_OPTIONS} -DHAVE_LIBZ -DFRONTEND) set(receivexlog_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(receivexlog_LINK_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS} -pie) -set(receivexlog_LINK_LIBS lib_string_obj libelog.a libbuildquery.a libpgcommon.a libhotpatchclient.a libpgport.a -lpq -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -lrt -lz -lminiunz -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss -llz4) +set(receivexlog_LINK_LIBS lib_string_obj libelog.a libbuildquery.a libpgcommon.a libhotpatchclient.a libpgport.a -lpq -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz -llz4) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND receivexlog_LINK_LIBS -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss) +endif() add_bintarget(pg_receivexlog TGT_receivexlog_SRC TGT_receivexlog_INC "${receivexlog_DEF_OPTIONS}" "${receivexlog_COMPILE_OPTIONS}" "${receivexlog_LINK_OPTIONS}" "${receivexlog_LINK_LIBS}") add_dependencies(pg_receivexlog lib_string_obj elog_static buildquery_static pgcommon_static hotpatchclient_static pgport_static pq) target_link_directories(pg_receivexlog PUBLIC @@ -102,7 +108,10 @@ set(TGT_recvlogical_INC set(recvlogical_DEF_OPTIONS ${MACRO_OPTIONS} -DHAVE_LIBZ -DFRONTEND) set(recvlogical_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(recvlogical_LINK_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS} -pie) -set(recvlogical_LINK_LIBS lib_string_obj libelog.a libbuildquery.a libpgcommon.a libhotpatchclient.a libpgport.a -lpq -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -lrt -lz -lminiunz -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss -llz4) +set(recvlogical_LINK_LIBS lib_string_obj libelog.a libbuildquery.a libpgcommon.a libhotpatchclient.a libpgport.a -lpq -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz -llz4) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND recvlogical_LINK_LIBS -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss) +endif() add_bintarget(pg_recvlogical TGT_recvlogical_SRC TGT_recvlogical_INC "${recvlogical_DEF_OPTIONS}" "${recvlogical_COMPILE_OPTIONS}" "${recvlogical_LINK_OPTIONS}" "${recvlogical_LINK_LIBS}") add_dependencies(pg_recvlogical lib_string_obj elog_static buildquery_static pgcommon_static hotpatchclient_static pgport_static pq) target_link_directories(pg_recvlogical PUBLIC diff --git a/src/bin/pg_basebackup/Makefile b/src/bin/pg_basebackup/Makefile index e216e53eb..f4fd079bf 100644 --- a/src/bin/pg_basebackup/Makefile +++ b/src/bin/pg_basebackup/Makefile @@ -21,7 +21,10 @@ include $(top_builddir)/src/Makefile.global override CPPFLAGS := -I$(libpq_srcdir) -I$(ZLIB_INCLUDE_PATH) $(CPPFLAGS) -DHAVE_LIBZ -fPIC -fPIE -DFRONTEND -I$(top_builddir)/src/bin/pg_rewind -I$(top_builddir)/src/bin/pg_ctl -I$(LZ4_INCLUDE_PATH) LDFLAGS += -Wl,-z,relro,-z,now -L$(LZ4_LIB_PATH) -LIBS += -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss -llz4 +LIBS += -llz4 +ifeq ($(enable_lite_mode), no) + LIBS += -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss +endif ifneq "$(MAKECMDGOALS)" "clean" ifneq "$(MAKECMDGOALS)" "distclean" diff --git a/src/bin/pg_basebackup/pg_basebackup.cpp b/src/bin/pg_basebackup/pg_basebackup.cpp index 4a27ebdb1..3a403df1a 100644 --- a/src/bin/pg_basebackup/pg_basebackup.cpp +++ b/src/bin/pg_basebackup/pg_basebackup.cpp @@ -1511,7 +1511,7 @@ static void backup_dw_file(const char *target_dir) char *unaligned_buf = NULL; /* Delete the dw file, if it exists. */ - remove_dw_file(DW_FILE_NAME, target_dir, real_file_path); + remove_dw_file(OLD_DW_FILE_NAME, target_dir, real_file_path); rc = memset_s(real_file_path, (PATH_MAX + 1), 0, (PATH_MAX + 1)); securec_check_c(rc, "\0", "\0"); @@ -2048,7 +2048,7 @@ static int GsBaseBackup(int argc, char** argv) break; } } - + /* If port is not specified by using -p, obtain the port through environment variables */ if (dbport == NULL) { char *value = NULL; @@ -2061,7 +2061,7 @@ static int GsBaseBackup(int argc, char** argv) "or import environment variables PGPORT.\n"), progname); exit(1); } - } + } /* * Any non-option arguments? diff --git a/src/bin/pg_basebackup/pg_recvlogical.cpp b/src/bin/pg_basebackup/pg_recvlogical.cpp index 324f636d0..dee410794 100644 --- a/src/bin/pg_basebackup/pg_recvlogical.cpp +++ b/src/bin/pg_basebackup/pg_recvlogical.cpp @@ -35,6 +35,8 @@ /* Time to sleep between reconnection attempts */ #define RECONNECT_SLEEP_TIME 5 +static const uint64 upperPart = 32; + /* Global Options */ static char* outfile = NULL; static int verbose = 0; @@ -44,6 +46,10 @@ static XLogRecPtr startpos = InvalidXLogRecPtr; static bool do_create_slot = false; static bool do_start_slot = false; static bool do_drop_slot = false; +static bool g_parallel_decode = false; +static char g_decode_style = 'b'; +static bool g_batch_sending = false; +static bool g_raw = false; /* filled pairwise with option, value. value may be NULL */ static char** options; @@ -92,6 +98,7 @@ static void usage(void) " time between status packets sent to server (in seconds, defaults to 10)\n")); printf(_(" -S, --slot=SLOT use existing replication slot SLOT instead of starting a new one\n")); printf(_(" -I, --startpos=PTR Where in an existing slot should the streaming start\n")); + printf(_(" -r, --raw parallel decoding output raw results without converting to text format\n")); printf(_("\nAction to be performed:\n")); printf(_(" --create create a new replication slot (for the slotname see --slot)\n")); printf(_(" --start start streaming in a replication slot (for the slotname see --slot)\n")); @@ -154,13 +161,14 @@ static bool sendFeedback(PGconn* conn, int64 now, bool force, bool replyRequeste if (verbose) fprintf(stderr, - _("%s: confirming write up to %X/%X, flush to %X/%X (slot %s)\n"), + _("%s: confirming write up to %X/%X, flush to %X/%X (slot %s) %ld\n"), progname, (uint32)(output_written_lsn >> 32), (uint32)output_written_lsn, (uint32)(output_fsync_lsn >> 32), (uint32)output_fsync_lsn, - replication_slot); + replication_slot, + now); replybuf[len] = 'r'; len += 1; @@ -276,12 +284,225 @@ static int sendStartReplicationCmd() return ret; } +/* + * append comma as seperator + */ +static inline void AppendSeperator(PQExpBuffer res, uint16 attr, uint16 maxAttr) +{ + if (attr < maxAttr - 1) { + appendPQExpBufferStr(res, ", "); + } +} + +/* + * decode binary style tuple to text + */ +static void ResolveTuple(const char* stream, uint32* curpos, PQExpBuffer res, bool newTup) +{ + uint16 attrnum = ntohs(*(uint16 *)(stream + *curpos)); + *curpos += sizeof(attrnum); + if (newTup) { + appendPQExpBufferStr(res, "new_tuple: {"); + } else { + appendPQExpBufferStr(res, "old_value: {"); + } + for (uint16 i = 0; i < attrnum; i++) { + uint16 colLen = ntohs(*(uint16 *)(stream + *curpos)); + *curpos += sizeof(colLen); + assert(colLen != 0); + appendBinaryPQExpBuffer(res, stream + *curpos, colLen); + *curpos += colLen; + Oid typid = ntohl(*(Oid *)(stream + *curpos)); + *curpos += sizeof(Oid); + appendPQExpBuffer(res, "[typid = %u]: ", typid); + + uint32 dataLen = ntohl(*(uint32 *)(stream + *curpos)); + *curpos += sizeof(dataLen); + const uint32 nullTag = 0XFFFFFFFF; + if (dataLen == nullTag) { + appendPQExpBufferStr(res, "\"NULL\""); + } else if (dataLen == 0) { + appendPQExpBufferStr(res, "\"\""); + } else { + appendPQExpBufferChar(res, '\"'); + appendBinaryPQExpBuffer(res, stream + *curpos, dataLen); + appendPQExpBufferChar(res, '\"'); + *curpos += dataLen; + } + AppendSeperator(res, i, attrnum); + } + appendPQExpBufferChar(res, '}'); +} + +/* + * decode binary style DML to text + */ +static void DMLToText(const char* stream, uint32 *curPos, PQExpBuffer res) +{ + char dtype = stream[*curPos]; + if (dtype == 'I') { + appendPQExpBufferStr(res, "INSERT INTO "); + } else if (dtype == 'U') { + appendPQExpBufferStr(res, "UPDATE "); + } else { + appendPQExpBufferStr(res, "DELETE FROM "); + } + *curPos += 1; + uint16 schemaLen = ntohs(*(uint16 *)(stream + *curPos)); + *curPos += sizeof(schemaLen); + appendBinaryPQExpBuffer(res, stream + *curPos, schemaLen); + *curPos += schemaLen; + appendPQExpBufferChar(res, '.'); + uint16 tableLen = ntohs(*(uint16 *)(stream + *curPos)); + *curPos += sizeof(tableLen); + appendBinaryPQExpBuffer(res, stream + *curPos, tableLen); + *curPos += tableLen; + + if (stream[*curPos] == 'N') { + *curPos += 1; + appendPQExpBufferChar(res, ' '); + ResolveTuple(stream, curPos, res, true); + } + if (stream[*curPos] == 'O') { + *curPos += 1; + appendPQExpBufferChar(res, ' '); + ResolveTuple(stream, curPos, res, false); + } +} + +/* + * decode binary style begin message to text + */ +static void BeginToText(const char* stream, uint32 *curPos, PQExpBuffer res) +{ + appendPQExpBufferStr(res, "BEGIN "); + *curPos += 1; + uint32 CSNupper = ntohl(*(uint32 *)(&stream[*curPos])); + *curPos += sizeof(CSNupper); + uint32 CSNlower = ntohl(*(uint32 *)(&stream[*curPos])); + uint64 CSN = ((uint64)(CSNupper) << upperPart) + CSNlower; + *curPos += sizeof(CSNlower); + appendPQExpBuffer(res, "CSN: %lu ", CSN); + + uint32 LSNupper = ntohl(*(uint32 *)(&stream[*curPos])); + *curPos += sizeof(LSNupper); + uint32 LSNlower = ntohl(*(uint32 *)(&stream[*curPos])); + *curPos += sizeof(LSNlower); + appendPQExpBuffer(res, "commit_lsn: %X/%X", LSNupper, LSNlower); + + if (stream[*curPos] == 'T') { + *curPos += 1; + uint32 timeLen = ntohl(*(uint32 *)(&stream[*curPos])); + *curPos += sizeof(uint32); + appendPQExpBufferStr(res, " commit_time: "); + appendBinaryPQExpBuffer(res, &stream[*curPos], timeLen); + *curPos += timeLen; + } +} + +/* + * decode binary style commit message to text + */ +static void CommitToText(const char* stream, uint32 *curPos, PQExpBuffer res) +{ + appendPQExpBufferStr(res, "COMMIT "); + *curPos += 1; + if (stream[*curPos] == 'X') { + *curPos += 1; + uint32 xidupper = ntohl(*(uint32 *)(&stream[*curPos])); + *curPos += sizeof(xidupper); + uint32 xidlower = ntohl(*(uint32 *)(&stream[*curPos])); + *curPos += sizeof(xidlower); + uint64 xid = ((uint64)(xidupper) << upperPart) + xidlower; + appendPQExpBuffer(res, "xid: %lu", xid); + } + if (stream[*curPos] == 'T') { + *curPos += 1; + uint32 timeLen = ntohl(*(uint32 *)(&stream[*curPos])); + *curPos += sizeof(uint32); + appendPQExpBufferStr(res, " commit_time: "); + appendBinaryPQExpBuffer(res, &stream[*curPos], timeLen); + *curPos += timeLen; + } +} + +/* + * decode binary style log stream to text + */ +static void StreamToText(const char* stream, PQExpBuffer res) +{ + uint32 pos = 0; + uint32 dmlLen = ntohl(*(uint32 *)(&stream[pos])); + /* if this is the end of stream, return */ + if (dmlLen == 0) { + return; + } + + pos += sizeof(dmlLen); + uint32 LSNupper = ntohl(*(uint32 *)(&stream[pos])); + pos += sizeof(LSNupper); + uint32 LSNlower = ntohl(*(uint32 *)(&stream[pos])); + pos += sizeof(LSNlower); + appendPQExpBuffer(res, "current_lsn: %X/%X ", LSNupper, LSNlower); + if (stream[pos] == 'B') { + BeginToText(stream, &pos, res); + } else if (stream[pos] == 'C') { + CommitToText(stream, &pos, res); + } else if (stream[pos] != 'P' && stream[pos] != 'F') { + DMLToText(stream, &pos, res); + } + if (stream[pos] == 'P') { + pos++; + appendPQExpBufferChar(res, '\n'); + StreamToText(stream + pos, res); + } else if (stream[pos] == 'F') { + appendPQExpBufferChar(res, '\n'); + } +} + +/* + * decode batch sending result stream to text. + */ +static void BatchStreamToText(const char* stream, PQExpBuffer res) +{ + uint32 pos = 0; + uint32 changeLen = ntohl(*(uint32 *)(&stream[pos])); + /* if this is the end of stream, return */ + if (changeLen == 0) { + return; + } + pos += sizeof(changeLen); + pos += sizeof(XLogRecPtr); + appendBinaryPQExpBuffer(res, stream + pos, changeLen - sizeof(XLogRecPtr)); + appendPQExpBufferChar(res, '\n'); + + BatchStreamToText(stream + sizeof(changeLen) + changeLen, res); +} + +/* + * Check stream connection. + */ +static bool StreamCheckConn() +{ + PGresult* res = NULL; + res = PQgetResult(conn); + if (PQresultStatus(res) != PGRES_COMMAND_OK) { + fprintf(stderr, _("%s: unexpected termination of replication stream: %s"), progname, PQresultErrorMessage(res)); + if (res != NULL) { + PQclear(res); + } + return false; + } + PQclear(res); + res = NULL; + return true; +} + /* * Start the log streaming */ static void StreamLogicalLog(void) { - PGresult* res = NULL; char* copybuf = NULL; int64 last_status = -1; @@ -433,6 +654,9 @@ static void StreamLogicalLog(void) walEnd = keepalive_message.walEnd; output_written_lsn = Max(walEnd, output_written_lsn); replyRequested = keepalive_message.replyRequested; + if (!g_parallel_decode) { + fprintf(stderr, _("%s: written_lsn = %lu, current time = %ld \n"), progname, output_written_lsn, now); + } /* If the server requested an immediate reply, send one. */ if (replyRequested) { @@ -501,17 +725,23 @@ static void StreamLogicalLog(void) /* signal that a fsync is needed */ output_unsynced = true; + PQExpBuffer res = createPQExpBuffer(); + char *resultStream = copybuf + hdr_len; + if (g_parallel_decode && !g_raw && g_decode_style == 'b') { + StreamToText(copybuf + hdr_len, res); + bytes_left = res->len; + resultStream = res->data; + } else if (g_parallel_decode && g_batch_sending && !g_raw) { + BatchStreamToText(copybuf + hdr_len, res); + bytes_left = res->len; + resultStream = res->data; + } while (bytes_left) { - int ret; - - ret = write(outfd, copybuf + hdr_len + bytes_written, bytes_left); + int ret = write(outfd, resultStream + bytes_written, bytes_left); if (ret < 0) { fprintf(stderr, _("%s: could not write %d bytes to log file \"%s\": %s\n"), - progname, - bytes_left, - outfile, - strerror(errno)); + progname, bytes_left, outfile, strerror(errno)); goto error; } @@ -520,26 +750,20 @@ static void StreamLogicalLog(void) bytes_left -= ret; } + if (g_parallel_decode && (g_decode_style == 'b' || g_batch_sending)) { + continue; + } if (write(outfd, "\n", 1) != 1) { fprintf(stderr, _("%s: could not write %d bytes to log file \"%s\": %s\n"), - progname, - 1, - outfile, - strerror(errno)); + progname, 1, outfile, strerror(errno)); goto error; } } - res = PQgetResult(conn); - if (PQresultStatus(res) != PGRES_COMMAND_OK) { - fprintf(stderr, _("%s: unexpected termination of replication stream: %s"), progname, PQresultErrorMessage(res)); - if (res != NULL) - PQclear(res); + if (!StreamCheckConn()) { goto error; } - PQclear(res); - res = NULL; if (outfd != -1 && strcmp(outfile, "-") != 0) { int64 t = feGetCurrentTimestamp(); @@ -602,6 +826,24 @@ static bool checkIsDigit(const char* arg) return 1; } +static void CheckParallelDecoding(const char *data, const char *val) +{ + if (strncmp(data, "parallel-decode-num", sizeof("parallel-decode-num")) == 0) { + int parallelDecodeNum = atoi(val); + g_parallel_decode = parallelDecodeNum == 1 ? false : true; + } else if (strncmp(data, "decode-style", sizeof("decode-style")) == 0) { + g_decode_style = *val; + } +} + +static void CheckBatchSending(const char *data, const char *val) +{ + if (strncmp(data, "sending-batch", sizeof("sending-batch")) == 0) { + int batchSending = atoi(val); + g_batch_sending = batchSending == 0 ? false : true; + } +} + /** * Get options. */ @@ -613,7 +855,7 @@ static int getOptions(const int argc, char* const* argv) {"verbose", no_argument, NULL, 'v'}, {"version", no_argument, NULL, 'V'}, {"help", no_argument, NULL, '?'}, - /* connnection options */ + /* connection options */ {"dbname", required_argument, NULL, 'd'}, {"host", required_argument, NULL, 'h'}, {"port", required_argument, NULL, 'p'}, @@ -627,6 +869,7 @@ static int getOptions(const int argc, char* const* argv) {"fsync-interval", required_argument, NULL, 'F'}, {"slot", required_argument, NULL, 'S'}, {"startpos", required_argument, NULL, 'I'}, + {"raw", no_argument, NULL, 'r'}, /* action */ {"create", no_argument, NULL, 1}, {"start", no_argument, NULL, 2}, @@ -636,9 +879,12 @@ static int getOptions(const int argc, char* const* argv) int c; int option_index; uint32 hi, lo; - while ((c = getopt_long(argc, argv, "f:F:nvd:h:o:p:U:wWP:s:S:I:", long_options, &option_index)) != -1) { + while ((c = getopt_long(argc, argv, "f:F:nvd:h:o:p:U:wWP:rs:S:I:", long_options, &option_index)) != -1) { switch (c) { /* general options */ + case 'r': + g_raw = true; + break; case 'f': check_env_value_c(optarg); if (outfile) { @@ -705,6 +951,8 @@ static int getOptions(const int argc, char* const* argv) options[(noptions - 1) * 2] = data; options[(noptions - 1) * 2 + 1] = val; + CheckParallelDecoding(data, val); + CheckBatchSending(data, val); } break; diff --git a/src/bin/pg_config/CMakeLists.txt b/src/bin/pg_config/CMakeLists.txt index 5506407e8..ea9bb7fa3 100755 --- a/src/bin/pg_config/CMakeLists.txt +++ b/src/bin/pg_config/CMakeLists.txt @@ -17,7 +17,7 @@ SET(TGT_pgconfig_INC SET(pgconfig_DEF_OPTIONS ${MACRO_OPTIONS}) SET(pgconfig_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS} -fstack-protector-all) SET(pgconfig_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -SET(pgconfig_LINK_LIBS libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -lrt -lz -lminiunz) +SET(pgconfig_LINK_LIBS libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz) add_bintarget(pg_config TGT_pgconfig_SRC TGT_pgconfig_INC "${pgconfig_DEF_OPTIONS}" "${pgconfig_COMPILE_OPTIONS}" "${pgconfig_LINK_OPTIONS}" "${pgconfig_LINK_LIBS}") add_dependencies(pg_config pgport_static) target_link_directories(pg_config PUBLIC diff --git a/src/bin/pg_controldata/CMakeLists.txt b/src/bin/pg_controldata/CMakeLists.txt index 2be82f094..dd62714be 100755 --- a/src/bin/pg_controldata/CMakeLists.txt +++ b/src/bin/pg_controldata/CMakeLists.txt @@ -15,7 +15,7 @@ set(TGT_controldata_INC set(controldata_DEF_OPTIONS ${MACRO_OPTIONS}) set(controldata_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(controldata_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(controldata_LINK_LIBS libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -lrt -lz -lminiunz) +set(controldata_LINK_LIBS libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz) add_bintarget(pg_controldata TGT_controldata_SRC TGT_controldata_INC "${controldata_DEF_OPTIONS}" "${controldata_COMPILE_OPTIONS}" "${controldata_LINK_OPTIONS}" "${controldata_LINK_LIBS}") add_dependencies(pg_controldata elog_static pgport_static) target_link_directories(pg_controldata PUBLIC diff --git a/src/bin/pg_controldata/pg_controldata.cpp b/src/bin/pg_controldata/pg_controldata.cpp index 4c8ad6fb4..d9486f660 100644 --- a/src/bin/pg_controldata/pg_controldata.cpp +++ b/src/bin/pg_controldata/pg_controldata.cpp @@ -30,27 +30,6 @@ #define FirstNormalTransactionId ((TransactionId)3) #define TransactionIdIsNormal(xid) ((xid) >= FirstNormalTransactionId) -/* transform old version LSN into new version. */ -#define XLogRecPtrSwap(x) (((uint64)((x).xlogid)) << 32 | (x).xrecoff) - -/* - * TransactionIdLogicallyPrecedes --- is id1 logically < id2? - */ -bool TransactionIdLogicallyPrecedes(TransactionId id1, TransactionId id2) -{ - /* - * If either ID is a permanent XID then we can just do unsigned - * comparison. If both are normal, do a modulo-2^31 comparison. - */ - int32 diff; - - if (!TransactionIdIsNormal(id1) || !TransactionIdIsNormal(id2)) - return (id1 < id2); - - diff = (int32)(id1 - id2); - return (diff < 0); -} - static void usage(const char* progname) { printf(_("%s displays control information of a openGauss database cluster.\n\n"), progname); diff --git a/src/bin/pg_ctl/CMakeLists.txt b/src/bin/pg_ctl/CMakeLists.txt index de95cf4ab..ea75b668f 100755 --- a/src/bin/pg_ctl/CMakeLists.txt +++ b/src/bin/pg_ctl/CMakeLists.txt @@ -28,18 +28,20 @@ set(TGT_gsctl_INC ${PROJECT_TRUNK_DIR}/distribute/include ${LIBHOTPATCH_INCLUDE_PATH} ${ZLIB_INCLUDE_PATH} - ${ZSTD_INCLUDE_PATH} ) set(gsctl_DEF_OPTIONS ${MACRO_OPTIONS} -DHAVE_LIBZ -DFRONTEND) set(gsctl_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(gsctl_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(gsctl_LINK_LIBS libelog.a libbuildquery.a pg_rewind.a libpgcommon.a libhotpatchclient.a libpgport.a -lpq -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -pthread -lrt -lz -lminiunz -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss) +set(gsctl_LINK_LIBS libelog.a libbuildquery.a pg_rewind.a libpgcommon.a libhotpatchclient.a libpgport.a -lpq -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -pthread -lrt -lz -lminiunz) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND gsctl_LINK_LIBS -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss) +endif() add_bintarget(gs_ctl TGT_gsctl_SRC TGT_gsctl_INC "${gsctl_DEF_OPTIONS}" "${gsctl_COMPILE_OPTIONS}" "${gsctl_LINK_OPTIONS}" "${gsctl_LINK_LIBS}") add_dependencies(gs_ctl elog_static buildquery_static pg_rewind_static pgcommon_static hotpatchclient_static pgport_static pq) target_link_directories(gs_ctl PUBLIC ${LIBOPENSSL_LIB_PATH} ${PROTOBUF_LIB_PATH} ${LIBPARQUET_LIB_PATH} ${LIBCURL_LIB_PATH} - ${ZLIB_LIB_PATH} ${ZSTD_LIB_PATH} ${LIBOBS_LIB_PATH} ${LIBEDIT_LIB_PATH} ${LIBCGROUP_LIB_PATH} ${SECURE_LIB_PATH} + ${ZLIB_LIB_PATH} ${LIBOBS_LIB_PATH} ${LIBEDIT_LIB_PATH} ${LIBCGROUP_LIB_PATH} ${SECURE_LIB_PATH} ${LIBHOTPATCH_LIB_PATH} ${KERBEROS_LIB_PATH} ${CMAKE_BINARY_DIR}/lib ) install(TARGETS gs_ctl RUNTIME DESTINATION bin) diff --git a/src/bin/pg_ctl/Makefile b/src/bin/pg_ctl/Makefile index 52f24e0f6..8e2c47cbe 100644 --- a/src/bin/pg_ctl/Makefile +++ b/src/bin/pg_ctl/Makefile @@ -18,7 +18,9 @@ include $(top_builddir)/src/Makefile.global override CPPFLAGS := -I$(libpq_srcdir) -I$(ZLIB_INCLUDE_PATH) $(CPPFLAGS) -DHAVE_LIBZ -DFRONTEND -I$(top_builddir)/src/bin/pg_rewind -LIBS += -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss +ifeq ($(enable_lite_mode), no) + LIBS += -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss +endif ifneq "$(MAKECMDGOALS)" "clean" ifneq "$(MAKECMDGOALS)" "distclean" diff --git a/src/bin/pg_ctl/backup.cpp b/src/bin/pg_ctl/backup.cpp index f2a1b3ebf..c486d77fd 100755 --- a/src/bin/pg_ctl/backup.cpp +++ b/src/bin/pg_ctl/backup.cpp @@ -306,6 +306,14 @@ static int tblspaceIndex = 0; exit(1); \ } +#define FREE_AND_RESET(ptr) \ + do { \ + if (NULL != (ptr)) { \ + free(ptr); \ + (ptr) = NULL; \ + } \ + } while (0) + static void DisconnectConnection() { if (streamConn != NULL) { @@ -803,6 +811,7 @@ static bool ReceiveAndUnpackTarFile(PGconn* conn, PGresult* res, int rownum) pg_log(PG_WARNING, _("could not read COPY data: %s\n"), PQerrorMessage(conn)); DisconnectConnection(); + FREE_AND_RESET(copybuf); return false; } @@ -816,6 +825,7 @@ static bool ReceiveAndUnpackTarFile(PGconn* conn, PGresult* res, int rownum) pg_log(PG_WARNING, _("invalid tar block header size: %d\n"), r); DisconnectConnection(); + FREE_AND_RESET(copybuf); return false; } totaldone += BUILD_PATH_LEN; @@ -823,6 +833,7 @@ static bool ReceiveAndUnpackTarFile(PGconn* conn, PGresult* res, int rownum) if (sscanf_s(copybuf + 1048, "%20lo", ¤t_len_left) != 1) { pg_log(PG_WARNING, _("could not parse file size\n")); DisconnectConnection(); + FREE_AND_RESET(copybuf); return false; } @@ -830,6 +841,7 @@ static bool ReceiveAndUnpackTarFile(PGconn* conn, PGresult* res, int rownum) if (sscanf_s(©buf[1024], "%07o ", &filemode) != 1) { pg_log(PG_WARNING, _("could not parse file mode\n")); DisconnectConnection(); + FREE_AND_RESET(copybuf); return false; } @@ -839,6 +851,7 @@ static bool ReceiveAndUnpackTarFile(PGconn* conn, PGresult* res, int rownum) if (current_len_left < 0 || current_len_left > INT_MAX - 511) { pg_log(PG_WARNING, _("current_len_left is invalid\n")); DisconnectConnection(); + FREE_AND_RESET(copybuf); return false; } current_padding = ((current_len_left + 511) & ~511) - current_len_left; @@ -877,6 +890,7 @@ static bool ReceiveAndUnpackTarFile(PGconn* conn, PGresult* res, int rownum) strerror(errno)); DisconnectConnection(); + FREE_AND_RESET(copybuf); return false; } } @@ -898,6 +912,7 @@ static bool ReceiveAndUnpackTarFile(PGconn* conn, PGresult* res, int rownum) pg_log(PG_WARNING, _("could not create symbolic link from \"%s\" to \"%s\": %s\n"), filename, ©buf[1081], strerror(errno)); DisconnectConnection(); + FREE_AND_RESET(copybuf); return false; } } @@ -920,12 +935,14 @@ static bool ReceiveAndUnpackTarFile(PGconn* conn, PGresult* res, int rownum) pg_log(PG_WARNING, _("could not create symbolic link from \"%s\" to \"%s\": %s\n"), filename, ©buf[1081], strerror(errno)); DisconnectConnection(); + FREE_AND_RESET(copybuf); return false; } } } else { pg_log(PG_WARNING, _("unrecognized link indicator \"%c\"\n"), copybuf[1080]); DisconnectConnection(); + FREE_AND_RESET(copybuf); return false; } continue; /* directory or link handled */ @@ -943,6 +960,7 @@ static bool ReceiveAndUnpackTarFile(PGconn* conn, PGresult* res, int rownum) if (NULL == file) { pg_log(PG_WARNING, _("could not create file \"%s\": %s\n"), filename, strerror(errno)); DisconnectConnection(); + FREE_AND_RESET(copybuf); return false; } @@ -979,6 +997,7 @@ static bool ReceiveAndUnpackTarFile(PGconn* conn, PGresult* res, int rownum) if (fwrite(copybuf, r, 1, file) != 1) { pg_log(PG_WARNING, _("could not write to file \"%s\": %s\n"), filename, strerror(errno)); DisconnectConnection(); + FREE_AND_RESET(copybuf); return false; } } @@ -1010,6 +1029,7 @@ static bool ReceiveAndUnpackTarFile(PGconn* conn, PGresult* res, int rownum) file = NULL; pg_log(PG_WARNING, _("COPY stream ended before last file was finished\n")); DisconnectConnection(); + FREE_AND_RESET(copybuf); return false; } @@ -2286,7 +2306,7 @@ static bool backup_dw_file(const char* target_dir) char* unaligned_buf = NULL; /* Delete the dw file, if it exists. */ - rc = snprintf_s(dw_file_path, PATH_MAX, PATH_MAX - 1, "%s/%s", target_dir, DW_FILE_NAME); + rc = snprintf_s(dw_file_path, PATH_MAX, PATH_MAX - 1, "%s/%s", target_dir, OLD_DW_FILE_NAME); securec_check_ss_c(rc, "\0", "\0"); if (realpath(dw_file_path, real_file_path) == NULL) { if (real_file_path[0] == '\0') { diff --git a/src/bin/pg_ctl/pg_ctl.cpp b/src/bin/pg_ctl/pg_ctl.cpp index 0e17bc88d..a8f923efe 100755 --- a/src/bin/pg_ctl/pg_ctl.cpp +++ b/src/bin/pg_ctl/pg_ctl.cpp @@ -126,7 +126,8 @@ typedef enum { typedef enum { UNKNOWN_OPERATION = 0, ADD_OPERATION, - REMOVE_OPERATION + REMOVE_OPERATION, + CHANGE_OPERATION } MemberOperation; #define MAX_PERCENT 100 @@ -188,6 +189,7 @@ static char setrunmode_status_file[MAXPGPATH]; static char g_changeroleStatusFile[MAXPGPATH]; static char primary_file[MAXPGPATH]; static char standby_file[MAXPGPATH]; +static char cascade_standby_file[MAXPGPATH]; static char pg_ctl_lockfile[MAXPGPATH]; static char pg_conf_file[MAXPGPATH]; static FILE* lockfile = NULL; @@ -211,6 +213,8 @@ static MemberOperation member_operation = UNKNOWN_OPERATION; static unsigned int new_node_port = 0; static char new_node_ip[IP_LEN] = {0}; static unsigned int new_node_id = 0; +static int group = -1; +static int priority = -1; static bool g_dcfEnabled = false; bool no_need_fsync = false; bool need_copy_upgrade_file = false; @@ -878,18 +882,32 @@ static pgpid_t start_postmaster(void) securec_check_ss_c(ret, "\0", "\0"); } #else - ret = snprintf_s(cmd, - MAXPGPATH, - MAXPGPATH - 1, - "exec \"%s\" %s %s %s< \"%s\" >> \"%s\" 2>&1", - exec_path, - pgdata_opt, - post_opts, - pgha_opt ? pgha_opt : "", - DEVNULL, - log_file); - securec_check_ss_c(ret, "\0", "\0"); - + if (undocumented_version) { + ret = snprintf_s(cmd, + MAXPGPATH, + MAXPGPATH - 1, + "exec \"%s\" -u %u %s %s %s< \"%s\" >> \"%s\" 2>&1", + exec_path, + (uint32)undocumented_version, + pgdata_opt, + post_opts, + pgha_opt ? pgha_opt : "", + DEVNULL, + log_file); + securec_check_ss_c(ret, "\0", "\0"); + } else { + ret = snprintf_s(cmd, + MAXPGPATH, + MAXPGPATH - 1, + "exec \"%s\" %s %s %s< \"%s\" >> \"%s\" 2>&1", + exec_path, + pgdata_opt, + post_opts, + pgha_opt ? pgha_opt : "", + DEVNULL, + log_file); + securec_check_ss_c(ret, "\0", "\0"); + } #endif } else { #ifdef ENABLE_MULTIPLE_NODES @@ -922,16 +940,30 @@ static pgpid_t start_postmaster(void) securec_check_ss_c(ret, "\0", "\0"); } #else - ret = snprintf_s(cmd, - MAXPGPATH, - MAXPGPATH - 1, - "exec \"%s\" %s %s %s < \"%s\" 2>&1", - exec_path, - pgdata_opt, - post_opts, - pgha_opt ? pgha_opt : "", - DEVNULL); - securec_check_ss_c(ret, "\0", "\0"); + if (undocumented_version) { + ret = snprintf_s(cmd, + MAXPGPATH, + MAXPGPATH - 1, + "exec \"%s\" -u %u %s %s %s < \"%s\" 2>&1", + exec_path, + (uint32)undocumented_version, + pgdata_opt, + post_opts, + pgha_opt ? pgha_opt : "", + DEVNULL); + securec_check_ss_c(ret, "\0", "\0"); + } else { + ret = snprintf_s(cmd, + MAXPGPATH, + MAXPGPATH - 1, + "exec \"%s\" %s %s %s < \"%s\" 2>&1", + exec_path, + pgdata_opt, + post_opts, + pgha_opt ? pgha_opt : "", + DEVNULL); + securec_check_ss_c(ret, "\0", "\0"); + } #endif } @@ -2144,6 +2176,10 @@ static void do_notify(uint32 term) pgha_str[strlen("primary")] == '\0') { notify_file = xstrdup(primary_file); notify_mode = PRIMARY_MODE; + } else if ((pgha_str != NULL) && 0 == strncmp(pgha_str, "cascade_standby", strlen("cascade_standby")) && + pgha_str[strlen("cascade_standby")] == '\0') { + notify_file = xstrdup(cascade_standby_file); + notify_mode = CASCADE_STANDBY_MODE; } else { pg_log(PG_WARNING, _(" the parameter of notify is not recognized\n")); exit(1); @@ -3474,6 +3510,7 @@ static void do_advice(void) } #ifndef ENABLE_MULTIPLE_NODES +#ifndef ENABLE_LITE_MODE static void doDCFAddCmdHelp(void) { printf(_(" %s member [-O OPERATION] [-u DCF-NODE-ID] [-i DCF-NODE-IP] " @@ -3486,13 +3523,15 @@ static void doDCFAddCmdHelp(void) static void doDCFOptionHelp(void) { printf(_("\nOptions for DCF:\n")); - printf(_(" -O, --operation=OPERATION Operation of adding or removing a DN configuration.\n")); + printf(_(" -O, --operation=OPERATION Operation of adding or removing or change a DN configuration.\n")); printf(_(" -u, --nodeid=DCF-NODE-ID It is required for member command.\n")); printf(_(" -i, --ip=DCF-NODE-IP It is required when OPERATION of member command is \"add\".\n")); printf(_(" -e, --port=DCF-NODE-PORT It is required when OPERATION of member command is \"add\".\n")); printf(_(" -R, --role=DCF-NODE-ROLE The option is \"follower\" or \"passive\".\n")); printf(_(" -v, --votenum=VOTE-NUM It is required when XMODE is \"minority\".\n")); printf(_(" -x, --xmode=XMODE The option can be \"minority\" or \"normal\" mode in DCF.\n")); + printf(_(" -G, The option is a int type to set group number in DCF.\n")); + printf(_(" --priority, The option is a int type to set priority number in DCF.\n")); } static void doDCFOptionDesHelp(void) @@ -3500,6 +3539,7 @@ static void doDCFOptionDesHelp(void) printf(_("\nOPERATION are:\n")); printf(_(" add add a member to DCF configuration.\n")); printf(_(" remove remove a member from DCF configuration.\n")); + printf(_(" change change DCF node configuration.\n")); printf(_("\nXMODE are:\n")); printf(_(" minority the leader in DCF can reach consensus when getting less than half nodes' response.\n")); printf(_(" normal the leader in DCF can reach consensus when getting more than half nodes' response.\n")); @@ -3509,6 +3549,7 @@ static void doDCFOptionDesHelp(void) } #endif +#endif static void do_help(void) { @@ -3552,15 +3593,18 @@ static void do_help(void) #endif (void)printf(_(" %s querybuild [-D DATADIR]\n"), progname); printf(_(" %s copy [-D DATADIR] [-Q COPYMODE]\n"), progname); -#if defined(ENABLE_MULTIPLE_NODES) || defined(ENABLE_PRIVATEGAUSS) +#if defined(ENABLE_MULTIPLE_NODES) || (defined(ENABLE_PRIVATEGAUSS) && (!defined(ENABLE_LITE_MODE))) (void)printf(_(" %s hotpatch [-D DATADIR] [-a ACTION] [-n NAME]\n"), progname); #endif #ifndef ENABLE_MULTIPLE_NODES +#ifndef ENABLE_LITE_MODE doDCFAddCmdHelp(); +#endif #endif printf(_("\nCommon options:\n")); - printf(_(" -b, --mode=MODE the mode of building the datanode.MODE can be \"full\", \"incremental\", " + printf(_(" -b, --mode=MODE the mode of building the datanode or coordinator." + "MODE can be \"full\", \"incremental\", " "\"auto\", \"standby_full\", \"copy_secure_files\", \"copy_upgrade_file\", \"cross_cluster_full\", " "\"cross_cluster_incremental\", \"cross_cluster_standby_full\"\n")); printf(_(" -D, --pgdata=DATADIR location of the database storage area\n")); @@ -3603,21 +3647,25 @@ static void do_help(void) printf(_(" -p PATH-TO-POSTGRES normally not necessary\n")); printf(_("\nOptions for stop or restart:\n")); printf(_(" -m, --mode=MODE MODE can be \"fast\" or \"immediate\"\n")); +#ifdef ENABLE_MULTIPLE_NODES printf(_("\nOptions for restore:\n")); printf(_(" --remove-backup Remove the pg_rewind_bak dir after restore with \"restore\" command\n")); +#endif printf(_("\nOptions for xlog copy:\n")); printf(_( " -Q, --mode=MODE MODE can be \"copy_from_local\", \"force_copy_from_local\", \"copy_from_share\"\n")); -#if defined(ENABLE_MULTIPLE_NODES) || defined(ENABLE_PRIVATEGAUSS) +#if defined(ENABLE_MULTIPLE_NODES) || (defined(ENABLE_PRIVATEGAUSS) && (!defined(ENABLE_LITE_MODE))) printf(_("\nOptions for hotpatch:\n")); printf( _(" -a ACTION patch command, ACTION can be \"load\" \"unload\" \"active\" \"deactive\" \"info\" \"list\"\n")); printf(_(" -n NAME patch name, NAME should be patch name with path\n")); #endif #ifndef ENABLE_MULTIPLE_NODES +#ifndef ENABLE_LITE_MODE doDCFOptionHelp(); doDCFOptionDesHelp(); +#endif #endif printf(_("\nShutdown modes are:\n")); @@ -3626,7 +3674,6 @@ static void do_help(void) (void)printf(_("\nSwitchover modes are:\n")); (void)printf(_(" -f quit directly, with proper shutdown and do not perform checkpoint\n")); - (void)printf(_(" smart demote primary after all clients have disconnected(not recommended in cluster)\n")); (void)printf(_(" fast demote primary directly, with proper shutdown\n")); printf(_("\nSERVERMODE are:\n")); @@ -3653,9 +3700,7 @@ static void do_help(void) #endif printf(_("\nBuild connection option:\n")); printf(_(" -r, --recvtimeout=INTERVAL time that receiver waits for communication from server (in seconds)\n")); -#ifdef ENABLE_MULTIPLE_NODES - printf(_(" -C, connector CN/DN connect to CN for build\n")); -#endif + printf(_(" -C, connector CN/DN connect to specified CN/DN for build\n")); #if ((defined(ENABLE_MULTIPLE_NODES)) || (defined(ENABLE_PRIVATEGAUSS))) printf("\nReport bugs to GaussDB support.\n"); @@ -3688,6 +3733,8 @@ static void set_member_operation(const char* operationopt) member_operation = ADD_OPERATION; } else if (strcmp(operationopt, "remove") == 0) { member_operation = REMOVE_OPERATION; + } else if (strcmp(operationopt, "change") == 0) { + member_operation = CHANGE_OPERATION; } else { pg_log(PG_WARNING, _("unrecognized member operation \"%s\"\n"), operationopt); exit(1); @@ -3876,6 +3923,8 @@ static char* get_localrole_string(ServerMode mode) return "Primary"; case STANDBY_MODE: return "Standby"; + case CASCADE_STANDBY_MODE: + return "Cascade Standby"; case PENDING_MODE: return "Pending"; default: @@ -4043,30 +4092,36 @@ static bool DoAutoBuild(uint32 term) static bool DoStandbyBuild(uint32 term) { bool buildSuccess = false; - pg_log(PG_WARNING, "%s: change build mode to CROSS_CLUSTER_INC_BUILD.\n", progname); - build_mode = CROSS_CLUSTER_INC_BUILD; - for (int i = 0; i < INC_BUILD_RETRY_TIMES; ++i) { - buildSuccess = do_incremental_build(term); - if (buildSuccess) { - break; + if (conn_str == NULL) { + pg_log(PG_WARNING, "%s: change build mode to CROSS_CLUSTER_INC_BUILD.\n", progname); + build_mode = CROSS_CLUSTER_INC_BUILD; + for (int i = 0; i < INC_BUILD_RETRY_TIMES; ++i) { + buildSuccess = do_incremental_build(term); + if (buildSuccess) { + break; + } + ResetBuildInfo(); } - ResetBuildInfo(); } if (!buildSuccess) { - pg_log(PG_WARNING, "%s:cross inc build failed, change build mode to standby full build.\n", progname); - build_mode = STANDBY_FULL_BUILD; - buildSuccess = do_actual_build(term); - if (!buildSuccess) { - pg_log(PG_WARNING, "%s:standby full build failed, change build mode to cross full build.\n", progname); - build_mode = CROSS_CLUSTER_FULL_BUILD; + if (conn_str != NULL) { buildSuccess = do_actual_build(term); - } - if (!buildSuccess) { - pg_log(PG_WARNING, - "%s:standby full build failed, change build mode to cross standby full build.\n", progname); - build_mode = CROSS_CLUSTER_STANDBY_FULL_BUILD; + } else { + pg_log(PG_WARNING, "%s:cross inc build failed, change build mode to standby full build.\n", progname); + build_mode = STANDBY_FULL_BUILD; buildSuccess = do_actual_build(term); + if (!buildSuccess) { + pg_log(PG_WARNING, "%s:standby full build failed, change build mode to cross full build.\n", progname); + build_mode = CROSS_CLUSTER_FULL_BUILD; + buildSuccess = do_actual_build(term); + } + if (!buildSuccess) { + pg_log(PG_WARNING, + "%s:standby full build failed, change build mode to cross standby full build.\n", progname); + build_mode = CROSS_CLUSTER_STANDBY_FULL_BUILD; + buildSuccess = do_actual_build(term); + } } } return buildSuccess; @@ -4097,7 +4152,7 @@ static void do_build(uint32 term) /* if the connect info is illegal, exit */ if ((conn_str != NULL) && (CheckLegalityOfConnInfo() == false)) { - pg_log(PG_WARNING, "%s: Invalid coordinator connector: %s.\n", progname, conn_str); + pg_log(PG_WARNING, "%s: Invalid datanode/coordinator connector: %s.\n", progname, conn_str); exit(1); } if (pid > 0 && build_mode != COPY_SECURE_FILES_BUILD) { @@ -4210,59 +4265,6 @@ bool get_conn_exe_sql(const char* sqlCommond) return result; } -static void find_nested_pgconf(const char** optlines, char* opt_name) -{ - const char* p = NULL; - int i = 0; - size_t paramlen = 0; - paramlen = (size_t)strnlen(opt_name, MAX_PARAM_LEN); - for (i = 0; optlines[i] != NULL; i++) { - p = optlines[i]; - while (isspace((unsigned char)*p)) { - p++; - } - if (pg_strncasecmp(p, opt_name, paramlen) != 0) { - continue; - } - while (isspace((unsigned char)*p)) { - p++; - } - pg_log(PG_WARNING, - _("There is nested config file in postgresql.conf: %sWhich is not supported by build. " - "Please move out the nested config files from %s and comment the 'include' config in postgresql.conf.\n" - "You can add option '-q' to disable autostart during build and restore the change manually " - "before starting gaussdb.\n"), - p, - pg_data); - exit(1); - } -} - -static void check_nested_pgconf(void) -{ - char config_file[MAXPGPATH] = {0}; - char** optlines = NULL; - int ret = EOK; - static char* optname[] = {"include ", "include_if_exists "}; - - ret = snprintf_s(config_file, MAXPGPATH, MAXPGPATH - 1, "%s/postgresql.conf", pg_data); - securec_check_ss_c(ret, "\0", "\0"); - config_file[MAXPGPATH - 1] = '\0'; - optlines = readfile(config_file); - - if (optlines == NULL) { - pg_log(PG_WARNING, _("%s cannot be opened.\n"), config_file); - exit(1); - } - - for (int i = 0; i < (int)lengthof(optname); i++) { - find_nested_pgconf((const char**)optlines, optname[i]); - } - - freefile(optlines); - optlines = NULL; -} - static void do_full_backup(uint32 term) { int ret = 0; @@ -4526,6 +4528,60 @@ static void CheckBuildParameter() } } +static void find_nested_pgconf(const char** optlines, char* opt_name) +{ + const char* p = NULL; + int i = 0; + size_t paramlen = 0; + paramlen = (size_t)strnlen(opt_name, MAX_PARAM_LEN); + for (i = 0; optlines[i] != NULL; i++) { + p = optlines[i]; + while (isspace((unsigned char)*p)) { + p++; + } + if (pg_strncasecmp(p, opt_name, paramlen) != 0) { + continue; + } + while (isspace((unsigned char)*p)) { + p++; + } + + pg_log(PG_WARNING, + _("There is nested config file in postgresql.conf: %sWhich is not supported by build. " + "Please move out the nested config files from %s and comment the 'include' config in postgresql.conf.\n" + "You can add option '-q' to disable autostart during build and restore the change manually " + "before starting gaussdb.\n"), + p, + pg_data); + exit(1); + } +} + +static void check_nested_pgconf(void) +{ + char config_file[MAXPGPATH] = {0}; + char** optlines = NULL; + int ret = EOK; + static char* optname[] = {"include ", "include_if_exists "}; + + ret = snprintf_s(config_file, MAXPGPATH, MAXPGPATH - 1, "%s/postgresql.conf", pg_data); + securec_check_ss_c(ret, "\0", "\0"); + config_file[MAXPGPATH - 1] = '\0'; + optlines = readfile(config_file); + + if (optlines == NULL) { + pg_log(PG_WARNING, _("%s cannot be opened.\n"), config_file); + exit(1); + } + + for (int i = 0; i < (int)lengthof(optname); i++) { + find_nested_pgconf((const char**)optlines, optname[i]); + } + + freefile(optlines); + optlines = NULL; +} + /* * build_mode: * AUTO_BUILD: do gs_rewind first, after failed 3 times, do full @@ -5475,10 +5531,13 @@ void do_change_role(void) exit(1); } int timeoutRet = 2; + int ret = 0; + char context[MAXPGPATH] = {0}; bool isTimeout = false; bool isSuccess = false; + int roleStatus = -1; ServerMode run_mode = get_runmode(); - if (run_mode == PRIMARY_MODE) { + if (run_mode == PRIMARY_MODE && strcmp(new_role, "passive") != 0) { pg_log(PG_WARNING, _("Can't change primary role.\n")); exit(1); } @@ -5493,8 +5552,15 @@ void do_change_role(void) } pg_log(PG_WARNING, _("Start changing local DCF node role.\n")); + if (strcmp(new_role, "fo") == 0) { + roleStatus = 0; + } else if (strcmp(new_role, "pa") == 0) { + roleStatus = 1; + } + ret = snprintf_s(context, MAXPGPATH, MAXPGPATH - 1, "%d_%d_%d", roleStatus, group, priority); + securec_check_ss_c(ret, "\0", "\0"); /* Write role into change_role_file */ - if (!WriteFileInfo(change_role_file, new_role, strlen(new_role))) { + if (!WriteFileInfo(change_role_file, context, strlen(context))) { RemoveFileIfExist(change_role_file); exit(1); } @@ -5656,6 +5722,8 @@ void SetConfigFilePath() securec_check_ss_c(ret, "\0", "\0"); ret = snprintf_s(standby_file, MAXPGPATH, MAXPGPATH - 1, "%s/standby", pg_data); securec_check_ss_c(ret, "\0", "\0"); + ret = snprintf_s(cascade_standby_file, MAXPGPATH, MAXPGPATH - 1, "%s/cascade_standby", pg_data); + securec_check_ss_c(ret, "\0", "\0"); ret = snprintf_s(pg_ctl_lockfile, MAXPGPATH, MAXPGPATH - 1, "%s/pg_ctl.lock", pg_data); securec_check_ss_c(ret, "\0", "\0"); ret = snprintf_s(pg_conf_file, MAXPGPATH, MAXPGPATH - 1, "%s/postgresql.conf", pg_data); @@ -5726,6 +5794,7 @@ int main(int argc, char** argv) {"force", no_argument, NULL, 'f'}, {"obsmode", no_argument, NULL, 2}, {"no-fsync", no_argument, NULL, 3}, + {"priority", required_argument, NULL, 4}, {"keycn", required_argument, NULL, 'k'}, {"slotname", required_argument, NULL, 'K'}, {"taskid", required_argument, NULL, 'I'}, @@ -5799,10 +5868,15 @@ int main(int argc, char** argv) FREE_AND_RESET(pgxcCommand); pgxcCommand = xstrdup("--single_node"); #ifdef ENABLE_PRIVATEGAUSS - while ((c = getopt_long(argc, argv, "a:b:cD:e:fi:l:m:M:N:n:o:O:p:P:r:R:v:x:sS:t:u:U:wWZ:C:dqL:T:Q:", long_options, +#ifndef ENABLE_LITE_MODE + while ((c = getopt_long(argc, argv, "a:b:cD:e:fi:G:l:m:M:N:n:o:O:p:P:r:R:v:x:sS:t:u:U:wWZ:C:dqL:T:Q:", long_options, &option_index)) != -1) #else - while ((c = getopt_long(argc, argv, "b:cD:e:fi:l:m:M:N:o:O:p:P:r:R:v:x:sS:t:u:U:wWZ:dqL:T:Q:", long_options, + while ((c = getopt_long(argc, argv, "b:cD:e:fi:G:l:m:M:N:o:O:p:P:r:R:v:x:sS:t:u:U:wWZ:C:dqL:T:Q:", long_options, + &option_index)) != -1) +#endif +#else + while ((c = getopt_long(argc, argv, "b:cD:e:fi:G:l:m:M:N:o:O:p:P:r:R:v:x:sS:t:u:U:wWZ:dqL:T:Q:", long_options, &option_index)) != -1) #endif #endif @@ -6143,7 +6217,16 @@ int main(int argc, char** argv) FREE_AND_RESET(taskid); taskid = xstrdup(optarg); break; - + case 'G': { + check_input_for_security(optarg); + check_num_input(optarg); + group = atoi(optarg); + if (group < 0 || group > INT_MAX) { + pg_log(PG_WARNING, _("unexpected vote number specified\n")); + goto Error; + } + break; + } case 1: clear_backup_dir = true; break; @@ -6153,6 +6236,16 @@ int main(int argc, char** argv) case 3: no_need_fsync = true; break; + case 4:{ + check_input_for_security(optarg); + check_num_input(optarg); + priority = atoi(optarg); + if (priority < 0 || priority > INT_MAX) { + pg_log(PG_WARNING, _("unexpected vote number specified\n")); + goto Error; + } + break; + } default: /* getopt_long already issued a suitable error message */ do_advice(); @@ -6194,6 +6287,8 @@ int main(int argc, char** argv) ctl_command = ADD_MEMBER_COMMAND; else if (strcmp(argv[optind], "member") == 0 && member_operation == REMOVE_OPERATION) ctl_command = REMOVE_MEMBER_COMMAND; + else if (strcmp(argv[optind], "member") == 0 && member_operation == CHANGE_OPERATION) + ctl_command = CHANGE_ROLE_COMMAND; else if (strcmp(argv[optind], "changerole") == 0) ctl_command = CHANGE_ROLE_COMMAND; else if (strcmp(argv[optind], "setrunmode") == 0) diff --git a/src/bin/pg_dump/CMakeLists.txt b/src/bin/pg_dump/CMakeLists.txt index 01a0329b5..be0933057 100755 --- a/src/bin/pg_dump/CMakeLists.txt +++ b/src/bin/pg_dump/CMakeLists.txt @@ -60,23 +60,41 @@ set(TGT_dump_INC ${LIBCURL_INCLUDE_PATH} ${BOOST_INCLUDE_PATH} ${ZLIB_INCLUDE_PATH} - ${ZSTD_INCLUDE_PATH} ) -set(dump_DEF_OPTIONS ${MACRO_OPTIONS} -DHAVE_LIBZ -DHAVE_CE) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + set(dump_DEF_OPTIONS ${MACRO_OPTIONS} -DHAVE_LIBZ -DHAVE_CE) +else() + set(dump_DEF_OPTIONS ${MACRO_OPTIONS} -DHAVE_LIBZ) +endif() set(dump_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(dump_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(dump_LINK_LIBS libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -lrt -lz -lminiunz -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss -lpq_ce -lcurl -lcjson) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + set(dump_LINK_LIBS libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz -lpq_ce -lcurl -lcjson) +else() + set(dump_LINK_LIBS libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz -lpq -lcurl -lcjson) +endif() +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND dump_LINK_LIBS -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss) +endif() if(NOT "${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS}" STREQUAL "OFF_OFF") - set(dump_LINK_LIBS ${dump_LINK_LIBS} -lgs_ktool -lkmc) + if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + set(dump_LINK_LIBS ${dump_LINK_LIBS} -lgs_ktool -lkmc) + endif() endif() set(dump_LINK_LIBS utils_aes_aes ${dump_LINK_LIBS}) set(dump_LINK_DIRS ${LIBOPENSSL_LIB_PATH} ${PROTOBUF_LIB_PATH} ${LIBPARQUET_LIB_PATH} ${LIBCURL_LIB_PATH} - ${ZLIB_LIB_PATH} ${ZSTD_LIB_PATH} ${LIBOBS_LIB_PATH} ${LIBEDIT_LIB_PATH} ${LIBCGROUP_LIB_PATH} + ${ZLIB_LIB_PATH} ${LIBOBS_LIB_PATH} ${LIBEDIT_LIB_PATH} ${LIBCGROUP_LIB_PATH} ${SECURE_LIB_PATH} ${KMC_LIB_PATH} ${CJSON_LIB_PATH} ${LIBCURL_LIB_PATH} ${KERBEROS_LIB_PATH} ${CMAKE_BINARY_DIR}/lib) -set(dump_LINK_DEPEND utils_aes_aes elog_static pgport_static pq_ce) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + set(dump_LINK_DEPEND utils_aes_aes elog_static pgport_static pq_ce) +else() + set(dump_LINK_DEPEND utils_aes_aes elog_static pgport_static pq) +endif() if(NOT "${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS}" STREQUAL "OFF_OFF") - list(APPEND dump_LINK_DEPEND gs_ktool) + if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND dump_LINK_DEPEND gs_ktool) + endif() endif() if("${ENABLE_UT}" STREQUAL "ON") @@ -96,9 +114,15 @@ set(dumpall_LINK_LIBS utils_aes_aes ${dump_LINK_LIBS}) set(dumpall_LINK_DIRS ${LIBOPENSSL_LIB_PATH} ${PROTOBUF_LIB_PATH} ${LIBPARQUET_LIB_PATH} ${LIBCURL_LIB_PATH} ${ZLIB_LIB_PATH} ${LIBOBS_LIB_PATH} ${LIBEDIT_LIB_PATH} ${LIBCGROUP_LIB_PATH} ${SECURE_LIB_PATH} ${KMC_LIB_PATH} ${CJSON_LIB_PATH} ${LIBCURL_LIB_PATH} ${KERBEROS_LIB_PATH} ${CMAKE_BINARY_DIR}/lib) -set(dumpall_LINK_DEPEND utils_aes_aes elog_static pgport_static pq_ce) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + set(dumpall_LINK_DEPEND utils_aes_aes elog_static pgport_static pq_ce) +else() + set(dumpall_LINK_DEPEND utils_aes_aes elog_static pgport_static pq) +endif() if(NOT "${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS}" STREQUAL "OFF_OFF") - list(APPEND dumpall_LINK_DEPEND gs_ktool) + if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND dumpall_LINK_DEPEND gs_ktool) + endif() endif() if("${ENABLE_UT}" STREQUAL "ON") @@ -115,15 +139,29 @@ else() endif() set(restore_LINK_LIBS utils_aes_aes ${dump_LINK_LIBS}) -add_bintarget(gs_restore TGT_restore_SRC TGT_dump_INC "${dump_DEF_OPTIONS}" "${dump_COMPILE_OPTIONS}" "${dump_LINK_OPTIONS}" "${restore_LINK_LIBS}") -add_dependencies(gs_restore utils_aes_aes elog_static pgport_static pq_ce) -if(NOT "${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS}" STREQUAL "OFF_OFF") - add_dependencies(gs_restore gs_ktool) -endif() -target_link_directories(gs_restore PUBLIC - ${LIBOPENSSL_LIB_PATH} ${PROTOBUF_LIB_PATH} ${LIBPARQUET_LIB_PATH} ${LIBCURL_LIB_PATH} +set(restore_LINK_DIRS ${LIBOPENSSL_LIB_PATH} ${PROTOBUF_LIB_PATH} ${LIBPARQUET_LIB_PATH} ${LIBCURL_LIB_PATH} ${ZLIB_LIB_PATH} ${LIBOBS_LIB_PATH} ${LIBEDIT_LIB_PATH} ${LIBCGROUP_LIB_PATH} - ${SECURE_LIB_PATH} ${KMC_LIB_PATH} ${CJSON_LIB_PATH} ${LIBCURL_LIB_PATH} ${KERBEROS_LIB_PATH} ${CMAKE_BINARY_DIR}/lib -) -install(TARGETS gs_restore RUNTIME DESTINATION bin) + ${SECURE_LIB_PATH} ${KMC_LIB_PATH} ${CJSON_LIB_PATH} ${LIBCURL_LIB_PATH} ${KERBEROS_LIB_PATH} ${CMAKE_BINARY_DIR}/lib) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + set(restore_LINK_DEPEND utils_aes_aes elog_static pgport_static pq_ce) +else() + set(restore_LINK_DEPEND utils_aes_aes elog_static pgport_static pq) +endif() +if(NOT "${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS}" STREQUAL "OFF_OFF") + if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND restore_LINK_DEPEND gs_ktool) + endif() +endif() +if("${ENABLE_UT}" STREQUAL "ON") + add_shared_libtarget(utgsrestore TGT_restore_SRC TGT_dump_INC "${dump_DEF_OPTIONS}" "${dump_COMPILE_OPTIONS}" "${dump_LINK_OPTIONS}") + add_dependencies(utgsrestore ${restore_LINK_DEPEND}) + target_link_libraries(utgsrestore PRIVATE ${restore_LINK_LIBS}) + target_link_directories(utgsrestore PUBLIC ${restore_LINK_DIRS}) + install(TARGETS utgsrestore LIBRARY DESTINATION lib) +else() + add_bintarget(gs_restore TGT_restore_SRC TGT_dump_INC "${dump_DEF_OPTIONS}" "${dump_COMPILE_OPTIONS}" "${dump_LINK_OPTIONS}" "${restore_LINK_LIBS}") + add_dependencies(gs_restore ${restore_LINK_DEPEND}) + target_link_directories(gs_restore PUBLIC ${restore_LINK_DIRS}) + install(TARGETS gs_restore RUNTIME DESTINATION bin) +endif() diff --git a/src/bin/pg_dump/Makefile b/src/bin/pg_dump/Makefile index 440a56d77..db6f1f4cc 100644 --- a/src/bin/pg_dump/Makefile +++ b/src/bin/pg_dump/Makefile @@ -47,7 +47,10 @@ OBJS= pg_backup_archiver.o pg_backup_db.o pg_backup_custom.o \ pg_backup_null.o pg_backup_tar.o \ pg_backup_directory.o dumpmem.o dumputils.o compress_io.o $(WIN32RES) -LIBS += -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss -lcjson -lcurl -lz +LIBS += -lcjson -lcurl -lz +ifeq ($(enable_lite_mode), no) + LIBS += -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss +endif COMMON_OBJS = $(top_builddir)/src/lib/elog/elog.a EXTRA_OBJS = $(top_builddir)/src/gausskernel/cbb/utils/aes/aes.o @@ -59,7 +62,9 @@ kwlookup.cpp: % : $(top_srcdir)/src/common/backend/parser/% all: submake-aes gs_dump gs_restore gs_dumpall libpq_pgport:=$(subst -lpq,-lpq_ce,$(libpq_pgport)) ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) -libpq_pgport += -L$(top_builddir)/../distribute/bin/gs_ktool/ -lgs_ktool -lsecurec -L$(LIBKMC_LIB_PATH) -lkmc + ifneq ($(enable_lite_mode), yes) + libpq_pgport += -L$(top_builddir)/../distribute/bin/gs_ktool/ -lgs_ktool -l$(SECURE_C_CHECK) -L$(LIBKMC_LIB_PATH) -lkmc + endif endif gs_dump: pg_dump.o common.o pg_dump_sort.o $(OBJS) $(KEYWRDOBJS) $(COMMON_OBJS) | submake-libpq_ce submake-libpgport diff --git a/src/bin/pg_dump/pg_backup_archiver.cpp b/src/bin/pg_dump/pg_backup_archiver.cpp index 617cb4765..cbe083f5c 100644 --- a/src/bin/pg_dump/pg_backup_archiver.cpp +++ b/src/bin/pg_dump/pg_backup_archiver.cpp @@ -209,13 +209,6 @@ Archive* CreateArchive(const char* FileSpec, const ArchiveFormat fmt, const int return (Archive*)AH; } -#ifdef ENABLE_UT -Archive* uttest_CreateArchive(const char* FileSpec, const ArchiveFormat fmt, const int compression, ArchiveMode mode) -{ - return CreateArchive(FileSpec, fmt, compression, mode); -} -#endif - /* Open an existing archive */ /* Public */ Archive* OpenArchive(const char* FileSpec, const ArchiveFormat fmt) diff --git a/src/bin/pg_dump/pg_backup_custom.cpp b/src/bin/pg_dump/pg_backup_custom.cpp index 6f6729a04..60e793a98 100644 --- a/src/bin/pg_dump/pg_backup_custom.cpp +++ b/src/bin/pg_dump/pg_backup_custom.cpp @@ -197,6 +197,13 @@ static void _ArchiveEntry(ArchiveHandle* AH, TocEntry* te) te->formatData = (void*)ctx; } +#ifdef ENABLE_UT +void uttest_custom_ArchiveEntry(ArchiveHandle* AH, TocEntry* te) +{ + _ArchiveEntry(AH, te); +} +#endif + /* * Called by the Archiver to save any extra format-related TOC entry * data. diff --git a/src/bin/pg_dump/pg_dump.cpp b/src/bin/pg_dump/pg_dump.cpp index 7aa8f003e..1a42a86b1 100644 --- a/src/bin/pg_dump/pg_dump.cpp +++ b/src/bin/pg_dump/pg_dump.cpp @@ -152,6 +152,9 @@ const int MAX_CMK_STORE_SIZE = 64; #include "utils/aes.h" #include "pgtime.h" +#ifdef ENABLE_UT +#define static +#endif #define FREE_PTR(ptr) \ if (ptr != NULL) { \ @@ -229,7 +232,7 @@ static char connected_node_type = 'N'; /* 'N' -- NONE char* all_data_nodename_list = NULL; const uint32 USTORE_UPGRADE_VERSION = 92368; const uint32 PACKAGE_ENHANCEMENT = 92444; -const uint32 SUBSCRIPTION_VERSION = 92504; +const uint32 SUBSCRIPTION_VERSION = 92580; #ifdef DUMPSYSLOG char* syslogpath = NULL; @@ -432,8 +435,13 @@ static void getBlobs(Archive* fout); static void dumpBlob(Archive* fout, BlobInfo* binfo); static int dumpBlobs(Archive* fout, void* arg); static void dumpDatabase(Archive* AH); +static void getGrantAnyPrivilegeQuery(Archive* fout, PQExpBuffer grantAnyPrivilegeSql, + Oid roleid, const char* adminOption); +static void dumpAnyPrivilege(Archive* fout); +#ifdef HAVE_CE static void dumpClientGlobalKeys(Archive* fout, const Oid nspoid, const char *nspname); static void dumpColumnEncryptionKeys(Archive* fout, const Oid nspoid, const char *nspname); +#endif static void dumpEncoding(Archive* AH); static void dumpStdStrings(Archive* AH); static void binary_upgrade_set_type_oids_by_type_oid( @@ -567,9 +575,13 @@ int main(int argc, char** argv) {"serializable-deferrable", no_argument, &serializable_deferrable, 1}, {"use-set-session-authorization", no_argument, &use_setsessauth, 1}, {"no-security-labels", no_argument, &no_security_labels, 1}, +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) {"no-publications", no_argument, &no_publications, 1}, +#endif {"no-unlogged-table-data", no_argument, &no_unlogged_table_data, 1}, +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) {"no-subscriptions", no_argument, &no_subscriptions, 1}, +#endif {"include-alter-table", no_argument, &include_alter_table, 1}, {"exclude-self", no_argument, &exclude_self, 1}, {"include-depend-objs", no_argument, &include_depend_objs, 1}, @@ -988,7 +1000,11 @@ int main(int argc, char** argv) dumpDatabase(fout); dumpDirectory(fout); } - + ArchiveHandle* archiveHandle = (ArchiveHandle*)fout; + const char* sqlCmd = "select * from pg_class where relname = 'gs_db_privilege';"; + if (isExistsSQLResult(archiveHandle->connection, sqlCmd)) { + dumpAnyPrivilege(fout); + } /* gets the total number of dump objects */ if (!dataOnly) { for (i = 0; i < numObjs; i++) { @@ -1733,9 +1749,13 @@ void help(const char* pchProgname) printf(_(" --exclude-table-data=TABLE do NOT dump data for the named table(s)\n")); printf(_(" --exclude-with do NOT dump WITH() of table(s)\n")); printf(_(" --inserts dump data as INSERT commands, rather than COPY\n")); +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) printf(_(" --no-publications do not dump publications\n")); +#endif printf(_(" --no-security-labels do not dump security label assignments\n")); +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) printf(_(" --no-subscriptions do not dump subscriptions\n")); +#endif printf(_(" --no-tablespaces do not dump tablespace assignments\n")); printf(_(" --no-unlogged-table-data do not dump unlogged table data\n")); printf(_(" --include-alter-table dump the table delete column\n")); @@ -2078,7 +2098,7 @@ static void ExcludeMatRelTables(Archive* fout, SimpleOidList* oidlists) "SELECT c.oid" " FROM pg_catalog.pg_class c" " WHERE c.relkind = 'r'" - " AND (c.relname like 'matviewmap_%%' OR c.relname like 'mlog_%%'\n)"); + " AND (c.relname like 'matviewmap\\_%%' OR c.relname like 'mlog\\_%%'\n)"); res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK); @@ -3476,6 +3496,91 @@ static void dumpDatabase(Archive* fout) destroyPQExpBuffer(creaQry); } +static void getGrantAnyPrivilegeQuery(Archive* fout, PQExpBuffer grantAnyPrivilegeSql, + Oid roleid, const char* adminOption) +{ + PGresult* anyRes = NULL; + PQExpBuffer gsDbPrivilegeQry = createPQExpBuffer(); + if (gsDbPrivilegeQry == NULL) { + return; + } + appendPQExpBuffer(gsDbPrivilegeQry, + "SELECT pg_roles.rolname, gs_db_privilege.privilege_type from gs_db_privilege " + "left join pg_roles on gs_db_privilege.roleid = pg_roles.oid " + "where gs_db_privilege.roleid = %u and gs_db_privilege.admin_option = '%s';", roleid, adminOption); + anyRes = ExecuteSqlQuery(fout, gsDbPrivilegeQry->data, PGRES_TUPLES_OK); + int tupleNum = PQntuples(anyRes); + if (tupleNum <= 0) { + PQclear(anyRes); + destroyPQExpBuffer(gsDbPrivilegeQry); + return; + } + appendPQExpBuffer(grantAnyPrivilegeSql, "GRANT "); + char* roleName = PQgetvalue(anyRes, 0, 0); + for (int j = 0; j < tupleNum; j++) { + char* privilegeType = PQgetvalue(anyRes, j, 1); + if (j == tupleNum - 1) { + appendPQExpBuffer(grantAnyPrivilegeSql, "%s ", privilegeType); + } else { + appendPQExpBuffer(grantAnyPrivilegeSql, "%s, ", privilegeType); + } + } + appendPQExpBuffer(grantAnyPrivilegeSql, "TO %s", roleName); + if (*adminOption == 't') { + appendPQExpBuffer(grantAnyPrivilegeSql, " WITH ADMIN OPTION"); + } + appendPQExpBuffer(grantAnyPrivilegeSql, ";\n"); + PQclear(anyRes); + destroyPQExpBuffer(gsDbPrivilegeQry); +} +/* + * Dump privilege of database level. + * + */ +static void dumpAnyPrivilege(Archive *fout) +{ + PGresult* res = NULL; + Oid roleid = 0; + PQExpBuffer selectGsDbPrivilegeQry = createPQExpBuffer(); + if (selectGsDbPrivilegeQry == NULL) { + return; + } + PQExpBuffer grantAnyPrivilegeSql = createPQExpBuffer(); + if (grantAnyPrivilegeSql == NULL) { + destroyPQExpBuffer(selectGsDbPrivilegeQry); + return; + } + appendPQExpBuffer(selectGsDbPrivilegeQry, "SELECT distinct roleid from gs_db_privilege;"); + res = ExecuteSqlQuery(fout, selectGsDbPrivilegeQry->data, PGRES_TUPLES_OK); + int roleidNum = PQfnumber(res, "roleid"); + for (int i = 0; i < PQntuples(res); i++) { + roleid = atooid(PQgetvalue(res, i, roleidNum)); + getGrantAnyPrivilegeQuery(fout, grantAnyPrivilegeSql, roleid, "f"); + getGrantAnyPrivilegeQuery(fout, grantAnyPrivilegeSql, roleid, "t"); + } + ArchiveEntry(fout, + nilCatalogId, /* catalog ID */ + createDumpId(), /* dump ID */ + "AnyPrivilege", /* Name */ + NULL, /* Namespace */ + NULL, /* Tablespace */ + "", /* Owner */ + false, /* with oids */ + "AnyPrivilege", /* Desc */ + SECTION_PRE_DATA, /* Section */ + grantAnyPrivilegeSql->data, /* Create */ + "", /* Del */ + NULL, /* Copy */ + NULL, /* Deps */ + 0, /* # Deps */ + NULL, /* Dumper */ + NULL); /* Dumper Arg */ + PQclear(res); + destroyPQExpBuffer(selectGsDbPrivilegeQry); + destroyPQExpBuffer(grantAnyPrivilegeSql); +} + +#ifdef HAVE_CE static void dumpClientGlobalKeys(Archive *fout, const Oid nspoid, const char *nspname) { int j = 0; @@ -3761,7 +3866,7 @@ static void dumpColumnEncryptionKeys(Archive *fout, const Oid nspoid, const char destroyPQExpBuffer(selectClientGlobalKeysArgsQry); destroyPQExpBuffer(createColumnEncryptionKeysQry); } - +#endif /* * dumpEncoding: put the correct encoding into the archive */ @@ -4109,6 +4214,11 @@ void getPublications(Archive *fout) res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK); ntups = PQntuples(res); + if (ntups == 0) { + PQclear(res); + destroyPQExpBuffer(query); + return; + } i_tableoid = PQfnumber(res, "tableoid"); i_oid = PQfnumber(res, "oid"); @@ -4205,6 +4315,7 @@ static void dumpPublication(Archive *fout, const PublicationInfo *pubinfo) destroyPQExpBuffer(delq); destroyPQExpBuffer(query); + destroyPQExpBuffer(labelq); } /* @@ -4316,23 +4427,6 @@ static void dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrin destroyPQExpBuffer(query); } -/* - * Is the currently connected user a superuser? - */ -static bool is_superuser(Archive *fout) -{ - ArchiveHandle *AH = (ArchiveHandle *)fout; - const char *val; - - val = PQparameterStatus(AH->connection, "is_sysadmin"); - - if (val && strcmp(val, "on") == 0) { - return true; - } - - return false; -} - /* * getSubscriptions * get information about subscriptions @@ -4356,15 +4450,13 @@ void getSubscriptions(Archive *fout) return; } - if (!is_superuser(fout)) { - int n; - + if (!isExecUserSuperRole(fout)) { res = ExecuteSqlQuery(fout, "SELECT count(*) FROM pg_subscription " "WHERE subdbid = (SELECT oid FROM pg_catalog.pg_database" " WHERE datname = current_database())", PGRES_TUPLES_OK); - n = atoi(PQgetvalue(res, 0, 0)); + uint64 n = (res != NULL) ? strtoul(PQgetvalue(res, 0, 0), NULL, 10) : 0; if (n > 0) { write_msg(NULL, "WARNING: subscriptions not dumped because current user is not a superuser\n"); } @@ -4388,6 +4480,11 @@ void getSubscriptions(Archive *fout) res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK); ntups = PQntuples(res); + if (ntups == 0) { + PQclear(res); + destroyPQExpBuffer(query); + return; + } i_tableoid = PQfnumber(res, "tableoid"); i_oid = PQfnumber(res, "oid"); @@ -4500,6 +4597,7 @@ static void dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo) destroyPQExpBuffer(delq); destroyPQExpBuffer(query); + destroyPQExpBuffer(labelq); } static void binary_upgrade_set_type_oids_by_type_oid( @@ -10647,10 +10745,10 @@ static void dumpNamespace(Archive* fout, NamespaceInfo* nspinfo) NULL, nspinfo->rolname, nspinfo->nspacl); - +#ifdef HAVE_CE dumpClientGlobalKeys(fout, nspinfo->dobj.catId.oid, nspinfo->dobj.name); dumpColumnEncryptionKeys(fout, nspinfo->dobj.catId.oid, nspinfo->dobj.name); - +#endif free(qnspname); qnspname = NULL; @@ -10959,6 +11057,8 @@ static void dumpTableofType(Archive* fout, TypeInfo* tyinfo) res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK); num = PQntuples(res); + + qtypname = gs_strdup(fmtId(tyinfo->dobj.name)); /* * DROP must be fully qualified in case same name appears in pg_catalog. * CASCADE shouldn't be required here as for normal types since the I/O @@ -10967,12 +11067,15 @@ static void dumpTableofType(Archive* fout, TypeInfo* tyinfo) appendPQExpBuffer(delq, "DROP TYPE %s%s.", if_exists, fmtId(tyinfo->dobj.nmspace->dobj.name)); appendPQExpBuffer(delq, "%s%s;\n", qtypname, if_cascade); - qtypname = gs_strdup(fmtId(tyinfo->dobj.name)); appendPQExpBuffer(q, "CREATE TYPE %s AS TABLE OF ", qtypname); - if (num > 1) { + if (num != 1) { PQclear(res); + destroyPQExpBuffer(q); + destroyPQExpBuffer(delq); + destroyPQExpBuffer(labelq); destroyPQExpBuffer(query); + free(qtypname); return; } field_num = PQfnumber(res, "typname"); @@ -17505,7 +17608,7 @@ static void GenerateSubPartitionDetail(PQExpBuffer result, Archive *fout, TableI if (!PQgetisnull(res, i, i_boundaries)) { boundaryValue = gs_strdup(PQgetvalue(res, i, i_boundaries)); } - if (boundaryValue == NULL || (boundaryValue != NULL && strlen(boundaryValue) == 0)) { + if (boundaryValue == NULL || strlen(boundaryValue) == 0) { appendPQExpBuffer(result, "DEFAULT"); } else if (isTypeString(tbinfo, subpartkeycols[0])) { char *boundStr = gs_strdup(PQgetvalue(res, i, i_boundStr)); @@ -17763,7 +17866,7 @@ static PQExpBuffer createTablePartition(Archive* fout, TableInfo* tbinfo) if (!PQgetisnull(res, i, iBoundaries)) { boundaryValue = gs_strdup(PQgetvalue(res, i, iBoundaries)); } - if (boundaryValue == NULL || (boundaryValue != NULL && strlen(boundaryValue) == 0)) { + if (boundaryValue == NULL || strlen(boundaryValue) == 0) { appendPQExpBuffer(result, "DEFAULT"); } else if (isTypeString(tbinfo, partkeycols[0])) { char *boundStr = gs_strdup(PQgetvalue(res, i, iBoundStr)); @@ -19872,6 +19975,8 @@ static const char* getAttrName(int attrnum, TableInfo* tblInfo) return "xc_node_id"; case BucketIdAttributeNumber: return "tablebucketid"; + case UidAttributeNumber: + return "gs_tuple_uid"; #endif default: break; @@ -20521,6 +20626,10 @@ static void dumpSequence(Archive* fout, TableInfo* tbinfo, bool large) PQntuples(res)), fmtId(tbinfo->dobj.name), PQntuples(res)); + PQclear(res); + destroyPQExpBuffer(query); + destroyPQExpBuffer(delqry); + destroyPQExpBuffer(labelq); exit_nicely(1); } @@ -20531,6 +20640,10 @@ static void dumpSequence(Archive* fout, TableInfo* tbinfo, bool large) "query to get data of sequence \"%s\" returned name \"%s\"\n", tbinfo->dobj.name, PQgetvalue(res, 0, 0)); + PQclear(res); + destroyPQExpBuffer(query); + destroyPQExpBuffer(delqry); + destroyPQExpBuffer(labelq); exit_nicely(1); } #endif @@ -20684,13 +20797,9 @@ static void dumpSequenceData(Archive* fout, TableDataInfo* tdinfo, bool large) PGresult* res = NULL; char* last = NULL; char* start = NULL; - int64 startValue = 0; char* increment = NULL; - int64 incrementValue = 0; char* max = NULL; - int64 maxValue = 0; char* min = NULL; - int64 minValue = 0; bool called = false; PQExpBuffer query = createPQExpBuffer(); @@ -20710,21 +20819,19 @@ static void dumpSequenceData(Archive* fout, TableDataInfo* tdinfo, bool large) PQntuples(res)), fmtId(tbinfo->dobj.name), PQntuples(res)); + PQclear(res); + destroyPQExpBuffer(query); exit_nicely(1); } /*local last_value and isCalled are not reliable. but the following values are reliable */ start = PQgetvalue(res, 0, 0); - startValue = atol(start); increment = PQgetvalue(res, 0, 1); - incrementValue = atol(increment); max = PQgetvalue(res, 0, 2); - maxValue = atol(max); min = PQgetvalue(res, 0, 3); - minValue = atol(min); called = (strcmp(PQgetvalue(res, 0, 4), "t") == 0); @@ -20780,6 +20887,8 @@ static void dumpSequenceData(Archive* fout, TableDataInfo* tdinfo, bool large) PQntuples(res)), fmtId(tbinfo->dobj.name), PQntuples(res)); + PQclear(res); + destroyPQExpBuffer(query); exit_nicely(1); } diff --git a/src/bin/pg_dump/pg_dumpall.cpp b/src/bin/pg_dump/pg_dumpall.cpp index 704c9e876..0a321c839 100644 --- a/src/bin/pg_dump/pg_dumpall.cpp +++ b/src/bin/pg_dump/pg_dumpall.cpp @@ -54,6 +54,10 @@ #define PGDUMP_VERSIONSTR "gs_dump " DEF_GS_VERSION "\n" #define atoxid(x) ((TransactionId)strtoul((x), NULL, 10)) +#ifdef ENABLE_UT +#define static +#endif + static void dropRoles(PGconn* conn); static void dumpRoles(PGconn* conn); static void dumpRoleMembership(PGconn* conn); @@ -230,9 +234,13 @@ int main(int argc, char* argv[]) {"with-key", required_argument, NULL, 7}, {"dont-overwrite-file", no_argument, NULL, 8}, {"use-set-session-authorization", no_argument, &use_setsessauth, 1}, +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) {"no-publications", no_argument, &no_publications, 1}, +#endif {"no-security-labels", no_argument, &no_security_labels, 1}, +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) {"no-subscriptions", no_argument, &no_subscriptions, 1}, +#endif {"no-unlogged-table-data", no_argument, &no_unlogged_table_data, 1}, {"include-alter-table", no_argument, &include_alter_table, 1}, #ifdef ENABLE_MULTIPLE_NODES @@ -1218,10 +1226,14 @@ void help(void) printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n")); printf(_(" --disable-triggers disable triggers during data-only restore\n")); printf(_(" --inserts dump data as INSERT commands, rather than COPY\n")); +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) printf(_(" --no-publications do not dump publications\n")); +#endif printf(_(" --no-security-labels do not dump security label assignments\n")); printf(_(" --no-tablespaces do not dump tablespace assignments\n")); +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) printf(_(" --no-subscriptions do not dump subscriptions\n")); +#endif printf(_(" --no-unlogged-table-data do not dump unlogged table data\n")); printf(_(" --include-alter-table dump the table delete column\n")); printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n")); diff --git a/src/bin/pg_dump/pg_restore.cpp b/src/bin/pg_dump/pg_restore.cpp index 19304022d..1e23b4f8b 100644 --- a/src/bin/pg_dump/pg_restore.cpp +++ b/src/bin/pg_dump/pg_restore.cpp @@ -69,6 +69,10 @@ extern int optind; #include "gauss_sft.h" #endif +#ifdef ENABLE_UT +#define static +#endif + void usage(const char* progname); static bool checkDecryptArchive(char** pFileSpec, const ArchiveFormat fm, const char* key); static void restore_getopts(int argc, char** argv, struct option* options, RestoreOptions* opts, char** inputFileSpec); @@ -149,9 +153,13 @@ int main(int argc, char** argv) {"role", required_argument, NULL, 2}, {"section", required_argument, NULL, 3}, {"use-set-session-authorization", no_argument, &use_setsessauth, 1}, +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) {"no-publications", no_argument, &no_publications, 1}, +#endif {"no-security-labels", no_argument, &no_security_labels, 1}, +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) {"no-subscriptions", no_argument, &no_subscriptions, 1}, +#endif {"rolepassword", required_argument, NULL, 5}, {"with-key", required_argument, NULL, 6}, {"pipeline", no_argument, NULL, 7}, @@ -769,9 +777,13 @@ void usage(const char* pchProgname) printf(_(" --disable-triggers disable triggers during data-only restore\n")); printf(_(" --no-data-for-failed-tables do not restore data of tables that could not be\n" " created\n")); +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) printf(_(" --no-publications do not restore publications\n")); +#endif printf(_(" --no-security-labels do not restore security labels\n")); +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) printf(_(" --no-subscriptions do not restore subscriptions\n")); +#endif printf(_(" --no-tablespaces do not restore tablespace assignments\n")); printf(_(" --section=SECTION restore named section (pre-data, data, or post-data)\n")); printf(_(" --use-set-session-authorization use SET SESSION AUTHORIZATION commands instead of\n" diff --git a/src/bin/pg_probackup/CMakeLists.txt b/src/bin/pg_probackup/CMakeLists.txt index 68aca5089..d23567fe6 100755 --- a/src/bin/pg_probackup/CMakeLists.txt +++ b/src/bin/pg_probackup/CMakeLists.txt @@ -18,7 +18,10 @@ set(TGT_probackup_INC ${ZLIB_INCLUDE_PATH}) set(probackup_DEF_OPTIONS ${MACRO_OPTIONS} -DFRONTEND -DHAVE_LIBZ) set(probackup_COMPILE_OPTIONS ${PROTECT_OPTIONS} ${BIN_SECURE_OPTIONS} ${OS_OPTIONS} ${WARNING_OPTIONS} ${OPTIMIZE_OPTIONS} ${CHECK_OPTIONS}) set(probackup_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(probackup_LINK_LIBS libpgcommon.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -lrt -lz -lminiunz -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss -llz4 -lpq) +set(probackup_LINK_LIBS libpgcommon.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz -llz4 -lpq) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND probackup_LINK_LIBS -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss) +endif() add_bintarget(gs_probackup TGT_probackup_SRC TGT_probackup_INC "${probackup_DEF_OPTIONS}" "${probackup_COMPILE_OPTIONS}" "${probackup_LINK_OPTIONS}" "${probackup_LINK_LIBS}") add_dependencies(gs_probackup pq pgport_static) target_link_directories(gs_probackup PUBLIC diff --git a/src/bin/pg_probackup/Makefile b/src/bin/pg_probackup/Makefile index b0a0ae06d..f8723fb63 100644 --- a/src/bin/pg_probackup/Makefile +++ b/src/bin/pg_probackup/Makefile @@ -33,7 +33,10 @@ include $(top_builddir)/src/Makefile.global EXTRA_CLEAN += logging.h LDFLAGS += -L$(LZ4_LIB_PATH) -LIBS += -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss -llz4 +LIBS += -llz4 +ifeq ($(enable_lite_mode), no) + LIBS += -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss +endif PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -Isrc -I$(top_builddir)/$(subdir) -I$(LZ4_INCLUDE_PATH) -I$(ZLIB_INCLUDE_PATH) override CPPFLAGS := -DFRONTEND $(CPPFLAGS) $(PG_CPPFLAGS) -DHAVE_LIBZ PG_LIBS_INTERNAL = $(libpq_pgport) ${PTHREAD_CFLAGS} diff --git a/src/bin/pg_probackup/backup.cpp b/src/bin/pg_probackup/backup.cpp index 04425fa58..d37c57c13 100644 --- a/src/bin/pg_probackup/backup.cpp +++ b/src/bin/pg_probackup/backup.cpp @@ -27,6 +27,10 @@ #include "file.h" #include "common/fe_memutils.h" +/* list of dirs which will not to be backuped + it will be backuped up in external dirs */ +parray *pgdata_nobackup_dir = NULL; + static int standby_message_timeout_local = 10 ; /* 10 sec = default */ static XLogRecPtr stop_backup_lsn = InvalidXLogRecPtr; static XLogRecPtr stop_stream_lsn = InvalidXLogRecPtr; @@ -740,6 +744,11 @@ do_backup_instance(PGconn *backup_conn, PGNodeInfo *nodeInfo, bool no_sync, bool if (external_dirs) free_dir_list(external_dirs); + if (pgdata_nobackup_dir) { + free_dir_list(pgdata_nobackup_dir); + } + pgdata_nobackup_dir = NULL; + /* Cleanup */ if (backup_list) { @@ -1277,6 +1286,7 @@ wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli, *wal_segment_dir, wal_segment[MAXFNAMELEN]; bool file_exists = false; + bool read_partial_file = false; uint32 try_count = 0, timeout; int rc = 0; @@ -1290,8 +1300,7 @@ wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli, GetXLogSegNo(target_lsn, targetSegNo, instance_config.xlog_seg_size); if (in_prev_segment) targetSegNo--; - GetXLogFileName(wal_segment, tli, targetSegNo, - instance_config.xlog_seg_size); + GetXLogFileName(wal_segment, MAXFNAMELEN, tli, targetSegNo, instance_config.xlog_seg_size); /* * In pg_start_backup we wait for 'target_lsn' in 'pg_wal' directory if it is @@ -1337,7 +1346,10 @@ wait_wal_lsn(XLogRecPtr target_lsn, bool is_start_lsn, TimeLineID tli, if (!file_exists) { file_exists = fileExists(wal_segment_path, FIO_BACKUP_HOST); - if(!file_exists) { + read_partial_file = (!file_exists) + && XRecOffIsNull(target_lsn) + && try_count > timeout / archive_timeout_deno; + if(read_partial_file) { file_exists = fileExists(partial_file, FIO_BACKUP_HOST); } #ifdef HAVE_LIBZ @@ -2414,6 +2426,7 @@ check_external_for_tablespaces(parray *external_list, PGconn *backup_conn) int i = 0; int j = 0; char *tablespace_path = NULL; + bool in_pgdata = false; const char *query = (const char *)"SELECT pg_catalog.pg_tablespace_location(oid) " "FROM pg_catalog.pg_tablespace;"; @@ -2423,13 +2436,23 @@ check_external_for_tablespaces(parray *external_list, PGconn *backup_conn) if (!res) elog(ERROR, "Failed to get list of tablespaces"); + pgdata_nobackup_dir = parray_new(); + for (i = 0; i < res->ntups; i++) { + char full_path[MAXPGPATH] = {0}; + char rel_path[MAXPGPATH] = {0}; tablespace_path = PQgetvalue(res, i, 0); if (strlen(tablespace_path) == 0) { continue; } + if (!is_absolute_path(tablespace_path)) { + join_path_components(rel_path, PG_RELATIVE_TBLSPC_DIR, tablespace_path); + join_path_components(full_path, instance_config.pgdata, rel_path); + tablespace_path = full_path; + } + canonicalize_path(tablespace_path); for (j = 0; j < (int)parray_num(external_list); j++) @@ -2444,6 +2467,13 @@ check_external_for_tablespaces(parray *external_list, PGconn *backup_conn) elog(WARNING, "External directory path (-E option) \"%s\" " "is in tablespace directory \"%s\"", tablespace_path, external_path); + + in_pgdata = path_is_prefix_of_path(instance_config.pgdata, external_path); + if (in_pgdata && + strcmp(external_path, tablespace_path) == 0) { + char *no_backup_dir = pg_strdup(rel_path); + parray_append(pgdata_nobackup_dir, no_backup_dir); + } } } PQclear(res); @@ -2530,9 +2560,9 @@ static bool PathContainPath(const char* path1, const char* path2) size_t path2Len = strlen(path2); if (path1Len == path2Len) { return false; - } else if ((path2Len == (path1Len + 1)) && !IS_DIR_SEP(path2[path1Len - 1])) { + } else if ((path2Len >= (path1Len + 1)) && !IS_DIR_SEP(path2[path1Len])) { return false; - } else if ((path2Len == (path1Len + 1)) && IS_DIR_SEP(path2[path1Len - 1])) { + } else if ((path2Len >= (path1Len + 1)) && IS_DIR_SEP(path2[path1Len])) { return true; } else { return false; diff --git a/src/bin/pg_probackup/catalog.cpp b/src/bin/pg_probackup/catalog.cpp index 8a7eddc23..857758765 100644 --- a/src/bin/pg_probackup/catalog.cpp +++ b/src/bin/pg_probackup/catalog.cpp @@ -1432,8 +1432,9 @@ void anchor_lsn_keep_segments_timelines(InstanceConfig *instance, parray *timeli * covered by other larger interval. */ - GetXLogFileName(begin_segno_str, tlinfo->tli, interval->begin_segno, instance->xlog_seg_size); - GetXLogFileName(end_segno_str, tlinfo->tli, interval->end_segno, instance->xlog_seg_size); + GetXLogFileName(begin_segno_str, MAXFNAMELEN, tlinfo->tli, interval->begin_segno, + instance->xlog_seg_size); + GetXLogFileName(end_segno_str, MAXFNAMELEN, tlinfo->tli, interval->end_segno, instance->xlog_seg_size); elog(LOG, "Timeline %i to stay reachable from timeline %i " "protect from purge WAL interval between " @@ -1487,8 +1488,8 @@ void anchor_lsn_keep_segments_timelines(InstanceConfig *instance, parray *timeli else interval->end_segno = segno; - GetXLogFileName(begin_segno_str, tlinfo->tli, interval->begin_segno, instance->xlog_seg_size); - GetXLogFileName(end_segno_str, tlinfo->tli, interval->end_segno, instance->xlog_seg_size); + GetXLogFileName(begin_segno_str, MAXFNAMELEN, tlinfo->tli, interval->begin_segno, instance->xlog_seg_size); + GetXLogFileName(end_segno_str, MAXFNAMELEN, tlinfo->tli, interval->end_segno, instance->xlog_seg_size); elog(LOG, "Archive backup %s to stay consistent " "protect from purge WAL interval " diff --git a/src/bin/pg_probackup/delete.cpp b/src/bin/pg_probackup/delete.cpp index 0686e7820..d45c3addf 100644 --- a/src/bin/pg_probackup/delete.cpp +++ b/src/bin/pg_probackup/delete.cpp @@ -826,8 +826,8 @@ void do_sanity(XLogSegNo FirstToDeleteSegNo, XLogSegNo OldestToKeepSegNo, if (FirstToDeleteSegNo > 0 && OldestToKeepSegNo > 0) { - GetXLogFileName(first_to_del_str, tlinfo->tli, FirstToDeleteSegNo, xlog_seg_size); - GetXLogFileName(oldest_to_keep_str, tlinfo->tli, OldestToKeepSegNo, xlog_seg_size); + GetXLogFileName(first_to_del_str, MAXFNAMELEN, tlinfo->tli, FirstToDeleteSegNo, xlog_seg_size); + GetXLogFileName(oldest_to_keep_str, MAXFNAMELEN, tlinfo->tli, OldestToKeepSegNo, xlog_seg_size); elog(LOG, "On timeline %i first segment %s is greater than oldest segment to keep %s", tlinfo->tli, first_to_del_str, oldest_to_keep_str); @@ -934,8 +934,8 @@ delete_walfiles_in_tli(XLogRecPtr keep_lsn, timelineInfo *tlinfo, if (OldestToKeepSegNo > 0 && OldestToKeepSegNo > FirstToDeleteSegNo) { /* translate segno number into human readable format */ - GetXLogFileName(first_to_del_str, tlinfo->tli, FirstToDeleteSegNo, xlog_seg_size); - GetXLogFileName(oldest_to_keep_str, tlinfo->tli, OldestToKeepSegNo, xlog_seg_size); + GetXLogFileName(first_to_del_str, MAXFNAMELEN, tlinfo->tli, FirstToDeleteSegNo, xlog_seg_size); + GetXLogFileName(oldest_to_keep_str, MAXFNAMELEN, tlinfo->tli, OldestToKeepSegNo, xlog_seg_size); elog(INFO, "On timeline %i WAL segments between %s and %s %s be removed", tlinfo->tli, first_to_del_str, @@ -1108,6 +1108,11 @@ do_delete_status(InstanceConfig *instance_config, const char *status) parray_qsort(delete_list, pgBackupCompareIdDesc); + /* Lock marked for delete backups */ + if (!dry_run) { + catalog_lock_backup_list(delete_list, parray_num(delete_list) - 1, 0, false); + } + /* delete and calculate free size from delete_list */ for (i = 0; i < parray_num(delete_list); i++) { @@ -1120,8 +1125,9 @@ do_delete_status(InstanceConfig *instance_config, const char *status) if (backup->stream) size_to_delete += backup->wal_bytes; - if (!dry_run && lock_backup(backup, false)) + if (!dry_run) { delete_backup_files(backup); + } n_deleted++; } diff --git a/src/bin/pg_probackup/dir.cpp b/src/bin/pg_probackup/dir.cpp index 7f9fb9ca7..bb1b0857f 100644 --- a/src/bin/pg_probackup/dir.cpp +++ b/src/bin/pg_probackup/dir.cpp @@ -131,6 +131,7 @@ static char dir_check_file(pgFile *file, bool backup_logs); static char check_in_tablespace(pgFile *file, bool in_tablespace); static char check_db_dir(pgFile *file); static char check_digit_file(pgFile *file); +static char check_nobackup_dir(pgFile *file); static void dir_list_file_internal(parray *files, pgFile *parent, const char *parent_dir, bool exclude, bool follow_symlink, bool backup_logs, bool skip_hidden, int external_dir_num, fio_location location); @@ -479,8 +480,8 @@ pgFileCompareLinked(const void *f1, const void *f2) int pgFileCompareSize(const void *f1, const void *f2) { - pgFile *f1p = (pgFile *)const_cast(f1); - pgFile *f2p = (pgFile *)const_cast(f2); + pgFile *f1p = *(pgFile **)f1; + pgFile *f2p = *(pgFile **)f2; if (f1p->size > f2p->size) return 1; @@ -493,7 +494,7 @@ pgFileCompareSize(const void *f1, const void *f2) static int pgCompareString(const void *str1, const void *str2) { - return strcmp((char *)const_cast( str1), (char *)const_cast(str2)); + return strcmp(*(char **) str1, *(char **) str2); } /* Compare two Oids */ @@ -644,6 +645,12 @@ dir_check_file(pgFile *file, bool backup_logs) return CHECK_EXCLUDE_FALSE; } } + + ret = check_nobackup_dir(file); + if (ret != -1) { /* -1 means need backup */ + return ret; + } + } /* @@ -719,6 +726,23 @@ if (in_tablespace) return -1; } +static char check_nobackup_dir(pgFile *file) +{ + char ret = -1; /* -1 means need backup */ + int i = 0; + if (pgdata_nobackup_dir) { + for (i = 0; i < (int)parray_num(pgdata_nobackup_dir); i++) { + char *file_path = (char *)parray_get(pgdata_nobackup_dir, i); + /* relative tablespace path exclude */ + if (strcmp(file->rel_path, file_path) == 0) { + elog(VERBOSE, "Excluding external directory content: %s", file->rel_path); + return CHECK_EXCLUDE_FALSE; + } + } + } + return ret; +} + static char check_db_dir(pgFile *file) { char ret = -1; diff --git a/src/bin/pg_probackup/help.cpp b/src/bin/pg_probackup/help.cpp index 47fc7acee..9d00020a3 100644 --- a/src/bin/pg_probackup/help.cpp +++ b/src/bin/pg_probackup/help.cpp @@ -85,6 +85,7 @@ void help_pg_probackup(void) printf(_(" [--remote-proto=protocol] [--remote-host=destination]\n")); printf(_(" [--remote-path=path] [--remote-user=username]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n")); + printf(_(" [--remote-libpath=libpath]\n")); printf(_(" [--help]\n")); printf(_("\n %s del-instance -B backup-path --instance=instance_name\n"), PROGRAM_NAME); @@ -109,6 +110,7 @@ void help_pg_probackup(void) printf(_(" [--remote-proto=protocol] [--remote-host=destination]\n")); printf(_(" [--remote-path=path] [--remote-user=username]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n")); + printf(_(" [--remote-libpath=libpath]\n")); printf(_(" [--help]\n")); printf(_("\n %s set-backup -B backup-path --instance=instance_name -i backup-id\n"), PROGRAM_NAME); @@ -150,6 +152,7 @@ void help_pg_probackup(void) printf(_(" [--remote-proto=protocol] [--remote-host=destination]\n")); printf(_(" [--remote-path=path] [--remote-user=username]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n")); + printf(_(" [--remote-libpath=libpath]\n")); printf(_(" [--ttl=interval] [--expire-time=time]\n")); printf(_(" [--help]\n")); @@ -164,6 +167,7 @@ void help_pg_probackup(void) printf(_(" [--remote-proto=protocol] [--remote-host=destination]\n")); printf(_(" [--remote-path=path] [--remote-user=username]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n")); + printf(_(" [--remote-libpath=libpath]\n")); printf(_(" [--log-level-console=log-level-console]\n")); printf(_(" [--log-level-file=log-level-file]\n")); printf(_(" [--log-filename=log-filename]\n")); @@ -231,6 +235,7 @@ static void help_add_instance(void) printf(_(" [--remote-proto=protocol] [--remote-host=destination]\n")); printf(_(" [--remote-path=path] [--remote-user=username]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n\n")); + printf(_(" [--remote-libpath=libpath]\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); @@ -247,6 +252,7 @@ static void help_add_instance(void) printf(_(" --remote-path=path path to directory with gs_probackup binary on remote host\n")); printf(_(" (default: current binary path)\n")); printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); + printf(_(" --remote-libpath=libpath library path on remote host\n")); printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n")); } @@ -280,6 +286,7 @@ static void help_set_config(void) printf(_(" [--remote-proto=protocol] [--remote-host=destination]\n")); printf(_(" [--remote-path=path] [--remote-user=username]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n\n")); + printf(_(" [--remote-libpath=libpath]\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); printf(_(" --instance=instance_name name of the instance\n")); @@ -338,6 +345,7 @@ static void help_set_config(void) printf(_(" --remote-path=path path to directory with gs_probackup binary on remote host\n")); printf(_(" (default: current binary path)\n")); printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); + printf(_(" --remote-libpath=libpath library path on remote host\n")); printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n")); } @@ -410,6 +418,7 @@ static void help_backup(void) printf(_(" [--remote-proto=protocol] [--remote-host=destination]\n")); printf(_(" [--remote-path=path] [--remote-user=username]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n")); + printf(_(" [--remote-libpath=libpath]\n")); printf(_(" [--ttl=interval] [--expire-time=time]\n\n")); printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); @@ -491,6 +500,7 @@ static void help_backup(void) printf(_(" --remote-path=path path to directory with gs_probackup binary on remote host\n")); printf(_(" (default: current binary path)\n")); printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); + printf(_(" --remote-libpath=libpath library path on remote host\n")); printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n")); @@ -515,6 +525,7 @@ static void help_restore(void) printf(_(" [--remote-proto=protocol] [--remote-host=destination]\n")); printf(_(" [--remote-path=path] [--remote-user=username]\n")); printf(_(" [--remote-port=port] [--ssh-options=ssh_options]\n")); + printf(_(" [--remote-libpath=libpath]\n")); printf(_(" [--log-level-console=log-level-console]\n")); printf(_(" [--log-level-file=log-level-file]\n")); printf(_(" [--log-filename=log-filename]\n")); @@ -559,6 +570,7 @@ static void help_restore(void) printf(_(" --remote-path=path path to directory with gs_probackup binary on remote host\n")); printf(_(" (default: current binary path)\n")); printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); + printf(_(" --remote-libpath=libpath library path on remote host\n")); printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n")); diff --git a/src/bin/pg_probackup/merge.cpp b/src/bin/pg_probackup/merge.cpp index 5e17e38db..d06bfebe9 100644 --- a/src/bin/pg_probackup/merge.cpp +++ b/src/bin/pg_probackup/merge.cpp @@ -45,7 +45,7 @@ static void *merge_files(void *arg); static void do_in_place_merge(merge_files_arg *arguments, pgFile *dest_file, pgFile *tmp_file, - bool iscontinue); + bool *iscontinue); static void reorder_external_dirs(pgBackup *to_backup, parray *to_external, parray *from_external); @@ -1071,7 +1071,6 @@ static void * merge_files(void *arg) { size_t i = 0; - bool iscontinue = false; merge_files_arg *arguments = (merge_files_arg *) arg; size_t n_files = parray_num(arguments->dest_backup->files); @@ -1079,6 +1078,7 @@ merge_files(void *arg) { pgFile *dest_file = (pgFile *) parray_get(arguments->dest_backup->files, i); pgFile *tmp_file; + bool iscontinue = false; /* check for interrupt */ if (interrupted || thread_interrupted) @@ -1115,7 +1115,7 @@ merge_files(void *arg) goto done; } - do_in_place_merge(arguments, dest_file, tmp_file, iscontinue); + do_in_place_merge(arguments, dest_file, tmp_file, &iscontinue); if (iscontinue) { continue; } @@ -1149,7 +1149,7 @@ merge_files(void *arg) static void do_in_place_merge(merge_files_arg *arguments, pgFile *dest_file, pgFile *tmp_file, - bool iscontinue) + bool *iscontinue) { bool in_place = false; /* keep file as it is */ @@ -1276,11 +1276,11 @@ static void do_in_place_merge(merge_files_arg *arguments, //TODO: report in_place merge bytes. parray_append(arguments->merge_filelist, tmp_file); - iscontinue = true; + *iscontinue = true; } } - iscontinue = false; + *iscontinue = false; } /* Recursively delete a directory and its contents */ @@ -1393,23 +1393,23 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup, setvbuf(out, buffer, _IOFBF, STDIO_BUFSIZE); /* restore file into temp file */ - tmp_file->size = restore_data_file(parent_chain, dest_file, out, to_fullpath_tmp1, - use_bitmap, NULL, InvalidXLogRecPtr, NULL, - /* when retrying merge header map cannot be trusted */ - is_retry ? false : true); + restore_data_file(parent_chain, dest_file, out, to_fullpath_tmp1, + use_bitmap, NULL, InvalidXLogRecPtr, NULL, + /* when retrying merge header map cannot be trusted */ + is_retry ? false : true); if (fclose(out) != 0) elog(ERROR, "Cannot close file \"%s\": %s", to_fullpath_tmp1, strerror(errno)); pg_free(buffer); - /* tmp_file->size is greedy, even if there is single 8KB block in file, - * that was overwritten twice during restore_data_file, we would assume that its size is - * 16KB. - * TODO: maybe we should just trust dest_file->n_blocks? - * No, we can`t, because current binary can be used to merge - * 2 backups of old versions, where n_blocks is missing. + /* In the case of incremental backup, + * the write size returned by restore_data_file() + * is different from the total file size. + * Since n_blocks was supported from the original version in gs_probackup, + * we should just trust dest_file->n_blocks */ + tmp_file->size = dest_file->n_blocks * BLCKSZ; backup_data_file(NULL, tmp_file, to_fullpath_tmp1, to_fullpath_tmp2, InvalidXLogRecPtr, BACKUP_MODE_FULL, diff --git a/src/bin/pg_probackup/parray.cpp b/src/bin/pg_probackup/parray.cpp index 1459e5038..0b679f380 100644 --- a/src/bin/pg_probackup/parray.cpp +++ b/src/bin/pg_probackup/parray.cpp @@ -13,6 +13,8 @@ #include "parray.h" #include "pgut.h" +static size_t qsort_size = 100000; /* 100000 = default size */ + /* members of struct parray are hidden from client. */ struct parray { @@ -191,10 +193,60 @@ parray_num(const parray *array) return array->used; } +static void HeapAdjust(void **array, size_t size, size_t index, + int(*compare)(const void *, const void *)) +{ + size_t parent = index; + size_t child = 2 * parent + 1; /* 2 * n + 1 :left child */ + while (child < size) { + if (child + 1 < size && compare(&array[child + 1], &array[child]) > 0) { + child = child + 1; + } + + if (compare(&array[child], &array[parent]) > 0) { + void *tmp = array[child]; + array[child] = array[parent]; + array[parent] = tmp; + } else { + break; + } + + parent = child; + child = 2 * parent + 1; /* 2 * n + 1 :left child */ + } +} + +static void HeapPop(void **array, size_t size, + int(*compare)(const void *, const void *)) +{ + void *tmp = array[0]; + array[0] = array[size - 1]; + array[size - 1] = tmp; + + HeapAdjust(array, size - 1, 0, compare); +} + +static void HeapSort(void **array, size_t size, + int(*compare)(const void *, const void *)) +{ + for (int64 i = (size - 2) / 2; i >= 0; i--) { /* parent node:(size -2) / 2 */ + HeapAdjust(array, size, i, compare); + } + + for (size_t i = 0; i < size; i++) { + HeapPop(array, size - i, compare); + } +} + void parray_qsort(parray *array, int(*compare)(const void *, const void *)) { - qsort(array->data, array->used, sizeof(void *), compare); + Assert(array->used < (PG_UINT64_MAX / 1024)); + if (array->used <= qsort_size) { + qsort(array->data, array->used, sizeof(void *), compare); + } else { + HeapSort(array->data, array->used, compare); + } } void diff --git a/src/bin/pg_probackup/parsexlog.cpp b/src/bin/pg_probackup/parsexlog.cpp index 977b0fa11..0ed49d98b 100644 --- a/src/bin/pg_probackup/parsexlog.cpp +++ b/src/bin/pg_probackup/parsexlog.cpp @@ -32,10 +32,10 @@ * a bit nicer. */ #if PG_VERSION_NUM >= 100000 -#define PG_RMGR(symname, name, redo, desc, identify, startup, cleanup, mask, undo, undo_desc) \ +#define PG_RMGR(symname, name, redo, desc, identify, startup, cleanup, mask, undo, undo_desc, type_name) \ name, #else -#define PG_RMGR(symname, name, redo, desc, identify, startup, cleanup, undo, undo_desc) \ +#define PG_RMGR(symname, name, redo, desc, identify, startup, cleanup, undo, undo_desc, type_name) \ name, #endif @@ -153,7 +153,7 @@ typedef struct static int SimpleXLogPageRead_local(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *readBuf, - TimeLineID *pageTLI); + TimeLineID *pageTLI, char* xlog_path = NULL); static XLogReaderState *InitXLogPageRead(XLogReaderData *reader_data, const char *archivedir, TimeLineID tli, uint32 segment_size, @@ -662,7 +662,7 @@ get_first_record_lsn(const char *archivedir, XLogSegNo segno, if (segno <= 1) elog(ERROR, "Invalid WAL segment number " UINT64_FORMAT, segno); - GetXLogFileName(wal_segment, tli, segno, instance_config.xlog_seg_size); + GetXLogFileName(wal_segment, MAXFNAMELEN, tli, segno, instance_config.xlog_seg_size); xlogreader = InitXLogPageRead(&reader_data, archivedir, tli, wal_seg_size, false, false, true); @@ -716,7 +716,7 @@ get_next_record_lsn(const char *archivedir, XLogSegNo segno, if (segno <= 1) elog(ERROR, "Invalid WAL segment number " UINT64_FORMAT, segno); - GetXLogFileName(wal_segment, tli, segno, instance_config.xlog_seg_size); + GetXLogFileName(wal_segment, MAXFNAMELEN, tli, segno, instance_config.xlog_seg_size); xlogreader = InitXLogPageRead(&reader_data, archivedir, tli, wal_seg_size, false, false, true); @@ -923,7 +923,7 @@ get_gz_error(gzFile gzf) static int SimpleXLogPageRead_local(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *readBuf, - TimeLineID *pageTLI) + TimeLineID *pageTLI, char* xlog_path) { XLogReaderData *reader_data; uint32 targetPageOff; @@ -1027,7 +1027,7 @@ static int switch_next_wal_segment(XLogReaderData *reader_data, bool *isreturn) char xlogfname[MAXFNAMELEN]; char partial_file[MAXPGPATH]; - GetXLogFileName(xlogfname, reader_data->tli, reader_data->xlogsegno, wal_seg_size); + GetXLogFileName(xlogfname, MAXFNAMELEN, reader_data->tli, reader_data->xlogsegno, wal_seg_size); nRet = snprintf_s(reader_data->xlogpath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", wal_archivedir, xlogfname); securec_check_ss_c(nRet, "\0", "\0"); @@ -1682,8 +1682,7 @@ XLogWaitForConsistency(XLogReaderState *xlogreader) { char xlogfname[MAXFNAMELEN]; - GetXLogFileName(xlogfname, reader_data->tli, reader_data->xlogsegno, - wal_seg_size); + GetXLogFileName(xlogfname, MAXFNAMELEN, reader_data->tli, reader_data->xlogsegno, wal_seg_size); elog(VERBOSE, "Thread [%d]: Possible WAL corruption in %s. Wait for other threads to decide is this a failure", reader_data->thread_num, xlogfname); diff --git a/src/bin/pg_probackup/pg_probackup.cpp b/src/bin/pg_probackup/pg_probackup.cpp index 37d28c2e7..0eebf801b 100644 --- a/src/bin/pg_probackup/pg_probackup.cpp +++ b/src/bin/pg_probackup/pg_probackup.cpp @@ -681,8 +681,11 @@ static void check_backid_option(char *command_name) elog(ERROR, "Invalid backup-id \"%s\"", backup_id_string); } - if (!instance_config.conn_opt.pghost && instance_config.remote.host) - instance_config.conn_opt.pghost = instance_config.remote.host; + if (!instance_config.conn_opt.pghost && + instance_config.remote.host && + IsSshProtocol()) { + instance_config.conn_opt.pghost = instance_config.remote.host; + } /* Setup stream options. They are used in streamutil.c. */ if (instance_config.conn_opt.pghost != NULL) diff --git a/src/bin/pg_probackup/pg_probackupa.h b/src/bin/pg_probackup/pg_probackupa.h index aae53264e..7fe53ffca 100644 --- a/src/bin/pg_probackup/pg_probackupa.h +++ b/src/bin/pg_probackup/pg_probackupa.h @@ -68,6 +68,7 @@ extern const char *PROGRAM_FULL_PATH; #define DATABASE_MAP "database_map" #define HEADER_MAP "page_header_map" #define HEADER_MAP_TMP "page_header_map_tmp" +#define PG_RELATIVE_TBLSPC_DIR "pg_location" /* Timeout defaults */ #define ARCHIVE_TIMEOUT_DEFAULT 300 diff --git a/src/bin/pg_probackup/pg_probackupb.h b/src/bin/pg_probackup/pg_probackupb.h index 599f90896..215b87053 100644 --- a/src/bin/pg_probackup/pg_probackupb.h +++ b/src/bin/pg_probackup/pg_probackupb.h @@ -458,8 +458,8 @@ typedef struct BackupPageHeader2 XLByteToSeg(xlrp, logSegNo, wal_segsz_bytes) #define GetXLogRecPtr(segno, offset, wal_segsz_bytes, dest) \ XLogSegNoOffsetToRecPtr(segno, offset, wal_segsz_bytes, dest) -#define GetXLogFileName(fname, tli, logSegNo, wal_segsz_bytes) \ - XLogFileName(fname, tli, logSegNo, wal_segsz_bytes) +#define GetXLogFileName(fname, len, tli, logSegNo, wal_segsz_bytes) \ + XLogFileName(fname, len, tli, logSegNo, wal_segsz_bytes) #define IsInXLogSeg(xlrp, logSegNo, wal_segsz_bytes) \ XLByteInSeg(xlrp, logSegNo, wal_segsz_bytes) @@ -473,8 +473,8 @@ typedef struct BackupPageHeader2 XLByteToSeg(xlrp, logSegNo) #define GetXLogRecPtr(segno, offset, wal_segsz_bytes, dest) \ XLogSegNoOffsetToRecPtr(segno, offset, dest) -#define GetXLogFileName(fname, tli, logSegNo, wal_segsz_bytes) \ - XLogFileName(fname, tli, logSegNo) +#define GetXLogFileName(fname, len, tli, logSegNo, wal_segsz_bytes) \ + XLogFileName(fname, len, tli, logSegNo ) #define IsInXLogSeg(xlrp, logSegNo, wal_segsz_bytes) \ XLByteInSeg(xlrp, logSegNo) diff --git a/src/bin/pg_probackup/pg_probackupc.h b/src/bin/pg_probackup/pg_probackupc.h index e38c45038..70ffaa239 100644 --- a/src/bin/pg_probackup/pg_probackupc.h +++ b/src/bin/pg_probackup/pg_probackupc.h @@ -50,6 +50,10 @@ extern int rw_timeout; /* backup options */ extern bool smooth_checkpoint; +/* list of dirs which will not to be backuped + it will be backuped up in external dirs */ +extern parray *pgdata_nobackup_dir; + /* remote probackup options */ extern char* remote_agent; diff --git a/src/bin/pg_probackup/pgut.cpp b/src/bin/pg_probackup/pgut.cpp index be0b77dea..c3612163b 100644 --- a/src/bin/pg_probackup/pgut.cpp +++ b/src/bin/pg_probackup/pgut.cpp @@ -198,11 +198,12 @@ static const internalPQconninfoOption PQconninfoOptions[] = { * * support. * */ +#ifdef KRB5 /* Kerberos and GSSAPI authentication support specifying the service name */ {(const char *)"krbsrvname", (const char *)"PGKRBSRVNAME", (const char *)PG_KRB_SRVNAM, NULL, (const char *)"Kerberos-service-name", (const char *)"", 20, offsetof(struct pg_conn, krbsrvname)}, - +#endif {(const char *)"replication", NULL, NULL, NULL, (const char *)"Replication", (const char *)"D", 5, diff --git a/src/bin/pg_probackup/show.cpp b/src/bin/pg_probackup/show.cpp index a2bf196f7..0f87f734e 100644 --- a/src/bin/pg_probackup/show.cpp +++ b/src/bin/pg_probackup/show.cpp @@ -921,7 +921,7 @@ show_archive_plain(const char *instance_name, uint32 xlog_seg_size, cur++; /* Min Segno */ - GetXLogFileName(segno_tmp, tlinfo->tli, tlinfo->begin_segno, xlog_seg_size); + GetXLogFileName(segno_tmp, MAXFNAMELEN, tlinfo->tli, tlinfo->begin_segno, xlog_seg_size); rc = snprintf_s(row->min_segno, lengthof(row->min_segno), lengthof(row->min_segno) - 1, "%s", segno_tmp); securec_check_ss_c(rc, "\0", "\0"); @@ -930,7 +930,7 @@ show_archive_plain(const char *instance_name, uint32 xlog_seg_size, cur++; /* Max Segno */ - GetXLogFileName(segno_tmp, tlinfo->tli, tlinfo->end_segno, xlog_seg_size); + GetXLogFileName(segno_tmp, MAXFNAMELEN, tlinfo->tli, tlinfo->end_segno, xlog_seg_size); rc = snprintf_s(row->max_segno, lengthof(row->max_segno), lengthof(row->max_segno) - 1, "%s", segno_tmp); securec_check_ss_c(rc, "\0", "\0"); @@ -1109,12 +1109,12 @@ show_archive_json(const char *instance_name, uint32 xlog_seg_size, securec_check_ss_c(rc, "\0", "\0"); json_add_value(buf, "switchpoint", tmp_buf, json_level, true); - GetXLogFileName(segno_tmp, tlinfo->tli, tlinfo->begin_segno, xlog_seg_size); + GetXLogFileName(segno_tmp, MAXFNAMELEN, tlinfo->tli, tlinfo->begin_segno, xlog_seg_size); rc = snprintf_s(tmp_buf, lengthof(tmp_buf), lengthof(tmp_buf) - 1, "%s", segno_tmp); securec_check_ss_c(rc, "\0", "\0"); json_add_value(buf, "min-segno", tmp_buf, json_level, true); - GetXLogFileName(segno_tmp, tlinfo->tli, tlinfo->end_segno, xlog_seg_size); + GetXLogFileName(segno_tmp, MAXFNAMELEN, tlinfo->tli, tlinfo->end_segno, xlog_seg_size); rc = snprintf_s(tmp_buf, lengthof(tmp_buf), lengthof(tmp_buf) - 1, "%s", segno_tmp); securec_check_ss_c(rc, "\0", "\0"); json_add_value(buf, "max-segno", tmp_buf, json_level, true); @@ -1162,12 +1162,12 @@ show_archive_json(const char *instance_name, uint32 xlog_seg_size, json_add(buf, JT_BEGIN_OBJECT, &json_level); - GetXLogFileName(segno_tmp, tlinfo->tli, lost_segments->begin_segno, xlog_seg_size); + GetXLogFileName(segno_tmp, MAXFNAMELEN, tlinfo->tli, lost_segments->begin_segno, xlog_seg_size); rc = snprintf_s(tmp_buf, lengthof(tmp_buf), lengthof(tmp_buf) - 1, "%s", segno_tmp); securec_check_ss_c(rc, "\0", "\0"); json_add_value(buf, "begin-segno", tmp_buf, json_level, true); - GetXLogFileName(segno_tmp, tlinfo->tli, lost_segments->end_segno, xlog_seg_size); + GetXLogFileName(segno_tmp, MAXFNAMELEN, tlinfo->tli, lost_segments->end_segno, xlog_seg_size); rc = snprintf_s(tmp_buf, lengthof(tmp_buf), lengthof(tmp_buf) - 1, "%s", segno_tmp); securec_check_ss_c(rc, "\0", "\0"); json_add_value(buf, "end-segno", tmp_buf, json_level, true); diff --git a/src/bin/pg_resetxlog/CMakeLists.txt b/src/bin/pg_resetxlog/CMakeLists.txt index 2b7c0da08..af2fecdab 100755 --- a/src/bin/pg_resetxlog/CMakeLists.txt +++ b/src/bin/pg_resetxlog/CMakeLists.txt @@ -14,7 +14,7 @@ set(TGT_resetxlog_INC set(resetxlog_DEF_OPTIONS ${MACRO_OPTIONS}) set(resetxlog_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(resetxlog_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(resetxlog_LINK_LIBS libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -lrt -lz -lminiunz) +set(resetxlog_LINK_LIBS libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz) add_bintarget(pg_resetxlog TGT_resetxlog_SRC TGT_resetxlog_INC "${resetxlog_DEF_OPTIONS}" "${resetxlog_COMPILE_OPTIONS}" "${resetxlog_LINK_OPTIONS}" "${resetxlog_LINK_LIBS}") add_dependencies(pg_resetxlog pgport_static) target_link_directories(pg_resetxlog PUBLIC diff --git a/src/bin/pg_rewind/CMakeLists.txt b/src/bin/pg_rewind/CMakeLists.txt index 5e591ca8d..0283a3e5e 100755 --- a/src/bin/pg_rewind/CMakeLists.txt +++ b/src/bin/pg_rewind/CMakeLists.txt @@ -1,7 +1,6 @@ #This is the main CMAKE for build all components. # pg_rewind.bin set(TGT_rewind_SRC ${CMAKE_CURRENT_SOURCE_DIR}/datapagemap.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/compressed_rewind.cpp ${CMAKE_CURRENT_SOURCE_DIR}/fetch.cpp ${CMAKE_CURRENT_SOURCE_DIR}/filemap.cpp ${CMAKE_CURRENT_SOURCE_DIR}/file_ops.cpp @@ -17,7 +16,6 @@ set(TGT_rewind_INC ${PROJECT_SRC_DIR}/common/interfaces/libpq ${PROJECT_SRC_DIR}/include/libpq ${LIBOPENSSL_INCLUDE_PATH} - ${ZSTD_INCLUDE_PATH} ) set(rewind_DEF_OPTIONS ${MACRO_OPTIONS}) diff --git a/src/bin/pg_rewind/Makefile b/src/bin/pg_rewind/Makefile index 3d0bcdd99..29f927f57 100644 --- a/src/bin/pg_rewind/Makefile +++ b/src/bin/pg_rewind/Makefile @@ -26,7 +26,7 @@ ifneq "$(MAKECMDGOALS)" "clean" endif endif endif -OBJS = file_ops.o datapagemap.o fetch.o filemap.o logging.o parsexlog.o pg_rewind.o compressed_rewind.o +OBJS = file_ops.o datapagemap.o fetch.o filemap.o logging.o parsexlog.o pg_rewind.o #all:gs_rewind.a diff --git a/src/bin/pg_rewind/compressed_common.h b/src/bin/pg_rewind/compressed_common.h deleted file mode 100644 index cbb7c421e..000000000 --- a/src/bin/pg_rewind/compressed_common.h +++ /dev/null @@ -1,46 +0,0 @@ -/* ------------------------------------------------------------------------- - * - * compressed_common.h - * - * Copyright (c) 2021 Huawei Technologies Co.,Ltd. - * - * ------------------------------------------------------------------------- - */ -#ifndef OPENGAUSS_SERVER_COMPRESS_COMPRESSED_COMMON_H -#define OPENGAUSS_SERVER_COMPRESS_COMPRESSED_COMMON_H - -#include "utils/atomic.h" - - - -struct RewindCompressInfo { - bool compressed = false; /* compressed table or not */ - uint32 oldBlockNumber = 0; - uint32 newBlockNumber = 0; - uint8 algorithm = 0; /* compressed algorithm */ - uint16 chunkSize = 0; /* compressed chunk size */ -}; - -struct CompressedPcaInfo { - char *pcaMap = NULL; - int pcaFd = -1; - char path[MAXPGPATH]; - int32 chunkSize = 0; - int32 algorithm = 0; -}; - -#define COPY_REWIND_COMPRESS_INFO(entry, infoPointer, oldBlock, newBlock) \ - (entry)->rewindCompressInfo.oldBlockNumber = 0; \ - (entry)->rewindCompressInfo.newBlockNumber = 0; \ - (entry)->rewindCompressInfo.compressed = false; \ - (entry)->rewindCompressInfo.algorithm = 0; \ - (entry)->rewindCompressInfo.chunkSize = 0; \ - if ((infoPointer) != NULL && (infoPointer)->compressed) { \ - (entry)->rewindCompressInfo.oldBlockNumber = (oldBlock); \ - (entry)->rewindCompressInfo.newBlockNumber = (newBlock); \ - (entry)->rewindCompressInfo.compressed = true; \ - (entry)->rewindCompressInfo.algorithm = (infoPointer)->algorithm; \ - (entry)->rewindCompressInfo.chunkSize = (infoPointer)->chunkSize; \ - } - -#endif // OPENGAUSS_SERVER_COMPRESS_COMPRESSED_COMMON_H diff --git a/src/bin/pg_rewind/compressed_rewind.cpp b/src/bin/pg_rewind/compressed_rewind.cpp deleted file mode 100644 index a936af733..000000000 --- a/src/bin/pg_rewind/compressed_rewind.cpp +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright (c) Huawei Technologies Co., Ltd. 2012-2018. All rights reserved. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * compressed_rewind.cpp - * Functions for fetching compressed table. - * - * - * IDENTIFICATION - * ./src/bin/pg_rewind/compressed_rewind.cpp - * - * ------------------------------------------------------------------------- - */ -#include "compressed_rewind.h" -#include "libpq/libpq-fe.h" -#include "lib/string.h" -#include "logging.h" -#include "filemap.h" -#include "utils/elog.h" -#include "file_ops.h" - -void FormatPathToPca(const char* path, char* dst, size_t len, bool withPrefix) -{ - errno_t rc; - if (withPrefix) { - rc = snprintf_s(dst, len, len - 1, "%s/" PCA_SUFFIX, pg_data, path); - } else { - rc = snprintf_s(dst, len, len - 1, PCA_SUFFIX, path); - } - securec_check_ss_c(rc, "\0", "\0"); -} - -void FormatPathToPcd(const char* path, char* dst, size_t len, bool withPrefix) -{ - errno_t rc; - if (withPrefix) { - rc = snprintf_s(dst, len, len - 1, "%s/" PCD_SUFFIX, pg_data, path); - } else { - rc = snprintf_s(dst, len, len - 1, PCD_SUFFIX, path); - } - securec_check_ss_c(rc, "\0", "\0"); -} - -template -bool ReadCompressedInfo(T& t, off_t offset, FILE* file, char* pcaFilePath, size_t len) -{ - if (fseeko(file, offset, SEEK_SET) != 0) { - pg_fatal("could not seek in file \"%s\": \"%lu\": %s\n", pcaFilePath, len, strerror(errno)); - return false; - } - if (fread(&t, sizeof(t), 1, file) <= 0) { - pg_fatal("could not open file \"%s\": \"%lu\": %s\n", pcaFilePath, len, strerror(errno)); - return false; - } - return true; -} - -/** - * write RewindCompressInfo - * @param file file fp - * @param pcaFilePath file path,for ereport - * @param rewindCompressInfo pointer of return - * @return sucesss or not - */ -static bool ReadRewindCompressedInfo(FILE* file, char* pcaFilePath, size_t len, RewindCompressInfo* rewindCompressInfo) -{ - off_t offset = (off_t)offsetof(PageCompressHeader, chunk_size); - if (!ReadCompressedInfo(rewindCompressInfo->chunkSize, offset, file, pcaFilePath, len)) { - return false; - } - offset = (off_t)offsetof(PageCompressHeader, algorithm); - if (!ReadCompressedInfo(rewindCompressInfo->algorithm, offset, file, pcaFilePath, len)) { - return false; - } - offset = (off_t)offsetof(PageCompressHeader, nblocks); - if (!ReadCompressedInfo(rewindCompressInfo->oldBlockNumber, offset, file, pcaFilePath, len)) { - return false; - } - rewindCompressInfo->compressed = true; - return true; -} - -bool FetchSourcePca(const char* strValue, RewindCompressInfo* rewindCompressInfo) -{ - size_t length = 0; - PageCompressHeader* ptr = (PageCompressHeader*)PQunescapeBytea((const unsigned char*)strValue, &length); - rewindCompressInfo->compressed = false; - if (length == sizeof(PageCompressHeader)) { - rewindCompressInfo->compressed = true; - rewindCompressInfo->algorithm = ptr->algorithm; - rewindCompressInfo->newBlockNumber = ptr->nblocks; - rewindCompressInfo->oldBlockNumber = 0; - rewindCompressInfo->chunkSize = ptr->chunk_size; - } - PQfreemem(ptr); - return rewindCompressInfo->compressed; -} - -bool ProcessLocalPca(const char* tablePath, RewindCompressInfo* rewindCompressInfo) -{ - rewindCompressInfo->compressed = false; - if (!isRelDataFile(tablePath)) { - return false; - } - char pcaFilePath[MAXPGPATH]; - FormatPathToPca(tablePath, pcaFilePath, MAXPGPATH, true); - FILE* file = fopen(pcaFilePath, "rb"); - if (file == NULL) { - if (errno == ENOENT) { - return false; - } - pg_fatal("could not open file \"%s\": %s\n", pcaFilePath, strerror(errno)); - return false; - } - bool success = ReadRewindCompressedInfo(file, pcaFilePath, MAXPGPATH, rewindCompressInfo); - fclose(file); - return success; -} diff --git a/src/bin/pg_rewind/compressed_rewind.h b/src/bin/pg_rewind/compressed_rewind.h deleted file mode 100644 index 967c0b76f..000000000 --- a/src/bin/pg_rewind/compressed_rewind.h +++ /dev/null @@ -1,21 +0,0 @@ -/* ------------------------------------------------------------------------- - * - * compressed_rewind.h - * - * Copyright (c) 2021 Huawei Technologies Co.,Ltd. - * - * ------------------------------------------------------------------------- - */ -#ifndef OPENGAUSS_SERVER_COMPRESS_COMPRESSED_REWIND_H -#define OPENGAUSS_SERVER_COMPRESS_COMPRESSED_REWIND_H - -#include "compressed_common.h" -#include "storage/page_compression.h" -#include "storage/smgr/relfilenode.h" - -extern bool FetchSourcePca(const char* strValue, RewindCompressInfo* rewindCompressInfo); -extern bool ProcessLocalPca(const char* tablePath, RewindCompressInfo* rewindCompressInfo); -extern void FormatPathToPca(const char* path, char* dst, size_t len, bool withPrefix = false); -extern void FormatPathToPcd(const char* path, char* dst, size_t len, bool withPrefix = false); - -#endif // OPENGAUSS_SERVER_COMPRESS_COMPRESSED_REWIND_H diff --git a/src/bin/pg_rewind/fetch.cpp b/src/bin/pg_rewind/fetch.cpp index 97e34edf4..632218e8e 100755 --- a/src/bin/pg_rewind/fetch.cpp +++ b/src/bin/pg_rewind/fetch.cpp @@ -23,7 +23,6 @@ #include "libpq/libpq-fe.h" #include "libpq/libpq-int.h" #include "common/fe_memutils.h" -#include "compressed_rewind.h" #include "catalog/catalog.h" #include "catalog/pg_type.h" @@ -48,11 +47,11 @@ const uint64 MAX_FILE_SIZE = 0xFFFFFFFF; #define MAX_PARAM_LEN 1024 static BuildErrorCode receiveFileChunks(const char* sql, FILE* file); -static BuildErrorCode execute_pagemap(file_entry_t* entry, FILE* file); +static BuildErrorCode execute_pagemap(datapagemap_t* pagemap, const char* path, FILE* file); static char* run_simple_query(const char* sql); static BuildErrorCode recurse_dir(const char* datadir, const char* path, process_file_callback_t callback); static void get_slot_name_by_app_name(void); -static BuildErrorCode CheckResultSet(PGresult* pgResult); + BuildErrorCode libpqConnect(const char* connstr) { PGresult* res = NULL; @@ -255,22 +254,10 @@ BuildErrorCode fetchSourceFileList() * general, so if the admin has put any custom symbolic links in the data * directory, they won't be copied correctly. */ - /* skip pca/pcd files and concat pca with table file */ - sql = "WITH tmp_table AS (\n" - "SELECT path, size, isdir, pg_tablespace_location(pg_tablespace.oid) AS link_target \n" + sql = "SELECT path, size, isdir, pg_tablespace_location(pg_tablespace.oid) AS link_target \n" "FROM (SELECT * FROM pg_stat_file_recursive('.')) AS files \n" - "LEFT OUTER JOIN pg_tablespace ON files.path ~ '^pg_tblspc/' AND oid :: text = files.filename\n" - "),compressed_address AS (SELECT path pca_path, substr(path, 0, length(path) - 4) AS table_path\n" - "FROM pg_stat_file_recursive('.') WHERE path ~ '_pca$' AND length(path) > 4)\n" - "SELECT path, size, isdir, link_target,\n" - "CASE WHEN pca_path IS NOT NULL THEN pg_read_binary_file(pca_path, 0, %d, true)\n" - "ELSE NULL END AS pchdr\n" - "FROM tmp_table LEFT JOIN compressed_address\n" - "ON tmp_table.path = compressed_address.table_path\nWHERE path !~ '_pca$' AND path !~ '_pcd$'\n"; - char sqlbuf[1024]; - int rc = snprintf_s(sqlbuf, sizeof(sqlbuf), sizeof(sqlbuf) - 1, sql, SIZE_OF_PAGE_COMPRESS_HEADER_DATA); - securec_check_ss_c(rc, "\0", "\0"); - res = PQexec(conn, (const char*)sqlbuf); + "LEFT OUTER JOIN pg_tablespace ON files.path like 'pg_tblspc/%' AND oid::text = files.filename\n"; + res = PQexec(conn, sql); if (PQresultStatus(res) != PGRES_TUPLES_OK) { pg_log(PG_ERROR, "could not fetch file list: %s", PQresultErrorMessage(res)); @@ -278,7 +265,7 @@ BuildErrorCode fetchSourceFileList() } /* sanity check the result set */ - if (PQnfields(res) != 5) { + if (PQnfields(res) != 4) { pg_fatal("unexpected result set while fetching file list\n"); PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); } @@ -321,13 +308,7 @@ BuildErrorCode fetchSourceFileList() } } } - RewindCompressInfo rewindCompressInfo; - RewindCompressInfo *pointer = NULL; - if (!PQgetisnull(res, i, 4) && FetchSourcePca(PQgetvalue(res, i, 4), &rewindCompressInfo)) { - filesize = rewindCompressInfo.newBlockNumber * BLCKSZ; - pointer = &rewindCompressInfo; - } - process_source_file(path, type, filesize, link_target, pointer); + process_source_file(path, type, filesize, link_target); PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); } PQclear(res); @@ -383,7 +364,7 @@ static BuildErrorCode receiveFileChunks(const char* sql, FILE* file) } /* sanity check the result set */ - if (PQnfields(res) != 7 || PQntuples(res) != 1) { + if (PQnfields(res) != 4 || PQntuples(res) != 1) { pg_fatal("unexpected result set size while fetching remote files\n"); PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); } @@ -412,8 +393,6 @@ static BuildErrorCode receiveFileChunks(const char* sql, FILE* file) pg_fatal("unexpected result length while fetching remote files\n"); PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); } - /* check compressed result set */ - CheckResultSet(res); /* Read result set to local variables */ errorno = memcpy_s(&chunkoff, sizeof(int32), PQgetvalue(res, 0, 1), sizeof(int32)); @@ -450,38 +429,14 @@ static BuildErrorCode receiveFileChunks(const char* sql, FILE* file) continue; } - int32 algorithm; - errorno = memcpy_s(&algorithm, sizeof(int32), PQgetvalue(res, 0, 4), sizeof(int32)); - securec_check_c(errorno, "\0", "\0"); - algorithm = ntohl(algorithm); - if (algorithm == 0) { - pg_log(PG_DEBUG, "received chunk for file \"%s\", offset %d, size %d\n", filename, chunkoff, chunksize); - fprintf(file, "received chunk for file \"%s\", offset %d, size %d\n", filename, chunkoff, chunksize); - open_target_file(filename, false); - pg_free(filename); - filename = NULL; - PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); - write_target_range(chunk, chunkoff, chunksize, chunkspace); - } else { - int32 chunkSize; - int errorno = memcpy_s(&chunkSize, sizeof(int32), PQgetvalue(res, 0, 5), sizeof(int32)); - securec_check_c(errorno, "\0", "\0"); - chunkSize = ntohl(chunkSize); - bool rebuild = *PQgetvalue(res, 0, 6) != 0; - char dst[MAXPGPATH]; - /* open pca */ - FormatPathToPca(filename, dst, MAXPGPATH, false); - OpenCompressedPcaFile(dst, chunkSize, algorithm, rebuild); + pg_log(PG_DEBUG, "received chunk for file \"%s\", offset %d, size %d\n", filename, chunkoff, chunksize); + fprintf(file, "received chunk for file \"%s\", offset %d, size %d\n", filename, chunkoff, chunksize); - /* open pcd */ - FormatPathToPcd(filename, dst, MAXPGPATH, false); - open_target_file(dst, false); - BlockNumber blockNumber = chunkoff; - size_t blockSize = chunkspace; - - /* fetch result */ - FetchCompressedFile(chunk, blockNumber, blockSize); - } + open_target_file(filename, false); + pg_free(filename); + filename = NULL; + PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); + write_target_range(chunk, chunkoff, chunksize, chunkspace); PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); PQclear(res); res = NULL; @@ -489,32 +444,6 @@ static BuildErrorCode receiveFileChunks(const char* sql, FILE* file) return BUILD_SUCCESS; } -/** - * check result set of compressed tables - * @param pgResult result - * @return success or not - */ -static BuildErrorCode CheckResultSet(PGresult* res) -{ -#define PQ_TYPE(index, type) (PQftype(res, (index)) != (type)) - if (PQ_TYPE(4, INT4OID) || PQ_TYPE(5, INT4OID) || PQ_TYPE(6, BOOLOID)) { - pg_fatal( - "FetchCompressedFile:unexpected data types: %u %u %u\n", PQftype(res, 4), PQftype(res, 5), PQftype(res, 6)); - PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); - } -#define PQ_FORMAT(index) (PQfformat(res, 0) != 1) - if (PQ_FORMAT(4) && PQ_FORMAT(5) && PQ_FORMAT(6)) { - pg_fatal("unexpected result format while fetching remote files\n"); - PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); - } -#define PQ_ISNULL(index) (PQgetisnull(res, 0, (index))) - if (PQ_ISNULL(4) || PQ_ISNULL(5) || PQ_ISNULL(6)) { - pg_fatal("unexpected null values in result while fetching remote files\n"); - PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); - } - return BUILD_SUCCESS; -} - /* * Receive a single file as a malloc'd buffer. */ @@ -568,43 +497,6 @@ error: return result; } -static void CompressedFileCopy(const file_entry_t* entry, bool rebuild) -{ - Assert(!rebuild || entry->rewindCompressInfo.oldBlockNumber == 0); - if (dry_run) { - return; - } - - char linebuf[MAXPGPATH + 47]; - int ret = snprintf_s(linebuf, - sizeof(linebuf), - sizeof(linebuf) - 1, - "%s\t%u\t%u\t%u\t%u\t%u\n", - entry->path, - entry->rewindCompressInfo.oldBlockNumber, - entry->rewindCompressInfo.newBlockNumber - entry->rewindCompressInfo.oldBlockNumber, - entry->rewindCompressInfo.algorithm, - entry->rewindCompressInfo.chunkSize, - rebuild); - securec_check_ss_c(ret, "\0", "\0"); - if (PQputCopyData(conn, linebuf, strlen(linebuf)) != 1) { - pg_fatal("could not send COPY data: %s", PQerrorMessage(conn)); - } - pg_log(PG_PROGRESS, "CompressedFileCopy:%s", linebuf); -} - -static void CompressedFileRemove(const file_entry_t* entry) -{ - remove_target((file_entry_t*) entry); - char* path = entry->path; - char dst[MAXPGPATH]; - FormatPathToPca(path, dst, MAXPGPATH); - remove_target_file(dst, false); - FormatPathToPcd(path, dst, MAXPGPATH); - remove_target_file(dst, false); - pg_log(PG_PROGRESS, "CompressedFileRemove: %s\n", path); -} - /* * Write a file range to a temporary table in the server. * @@ -614,7 +506,7 @@ static void CompressedFileRemove(const file_entry_t* entry) */ static void fetch_file_range(const char* path, unsigned int begin, unsigned int end) { - char linebuf[MAXPGPATH + 47]; + char linebuf[MAXPGPATH + 23]; int ss_c = 0; /* Split the range into CHUNKSIZE chunks */ @@ -626,12 +518,12 @@ static void fetch_file_range(const char* path, unsigned int begin, unsigned int } else { len = end - begin; } - ss_c = snprintf_s( - linebuf, sizeof(linebuf), sizeof(linebuf) - 1, "%s\t%u\t%u\t%u\t%u\t%u\n", path, begin, len, 0, 0, 0); + ss_c = snprintf_s(linebuf, sizeof(linebuf), sizeof(linebuf) - 1, "%s\t%u\t%u\n", path, begin, len); securec_check_ss_c(ss_c, "\0", "\0"); if (PQputCopyData(conn, linebuf, strlen(linebuf)) != 1) pg_fatal("could not send COPY data: %s", PQerrorMessage(conn)); + begin += len; } } @@ -650,8 +542,7 @@ BuildErrorCode executeFileMap(filemap_t* map, FILE* file) * First create a temporary table, and load it with the blocks that we * need to fetch. */ - sql = "CREATE TEMPORARY TABLE fetchchunks(path text, begin int4, len int4, " - "algorithm int4, chunksize int4, rebuild bool);"; + sql = "CREATE TEMPORARY TABLE fetchchunks(path text, begin int4, len int4);"; res = PQexec(conn, sql); if (PQresultStatus(res) != PGRES_COMMAND_OK) { pg_fatal("could not create temporary table: %s", PQresultErrorMessage(res)); @@ -680,17 +571,11 @@ BuildErrorCode executeFileMap(filemap_t* map, FILE* file) } /* report all the path to check whether it's correct */ - if (entry->rewindCompressInfo.compressed) { - pg_log(PG_PROGRESS, "path: %s, type: %d, action: %d\n", entry->path, entry->type, entry->action); - - } - pg_log(PG_DEBUG, "path: %s, type: %d, action: %d\n", entry->path, entry->type, entry->action); fprintf(file, "path: %s, type: %d, action: %d\n", entry->path, entry->type, entry->action); /* If this is a relation file, copy the modified blocks */ - bool compressed = entry->rewindCompressInfo.compressed; - execute_pagemap(entry, file); + execute_pagemap(&entry->pagemap, entry->path, file); PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); switch (entry->action) { @@ -699,47 +584,29 @@ BuildErrorCode executeFileMap(filemap_t* map, FILE* file) break; case FILE_ACTION_COPY: - if (compressed) { - CompressedFileCopy(entry, true); - PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); - } else { - /* Truncate the old file out of the way, if any */ - open_target_file(entry->path, true); - PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); - fetch_file_range(entry->path, 0, entry->newsize); - PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); - } + /* Truncate the old file out of the way, if any */ + open_target_file(entry->path, true); + PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); + fetch_file_range(entry->path, 0, entry->newsize); + PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); break; case FILE_ACTION_TRUNCATE: - if (compressed) { - CompressedFileTruncate(entry->path, &entry->rewindCompressInfo); - } else { - truncate_target_file(entry->path, entry->newsize); - } + truncate_target_file(entry->path, entry->newsize); PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); break; case FILE_ACTION_COPY_TAIL: - if (compressed) { - CompressedFileCopy(entry, false); - } else { - fetch_file_range(entry->path, entry->oldsize, entry->newsize); - } + fetch_file_range(entry->path, entry->oldsize, entry->newsize); PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); break; case FILE_ACTION_REMOVE: - if (compressed) { - CompressedFileRemove(entry); - } else { - remove_target(entry); - } + remove_target(entry); PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); break; case FILE_ACTION_CREATE: - Assert(!compressed); create_target(entry); PG_CHECKBUILD_AND_FREE_PGRESULT_RETURN(res); break; @@ -770,16 +637,10 @@ BuildErrorCode executeFileMap(filemap_t* map, FILE* file) * We've now copied the list of file ranges that we need to fetch to the * temporary table. Now, actually fetch all of those ranges. */ - sql = "SELECT path, begin, \n" - " pg_read_binary_file(path, begin, len, true) AS chunk, len, algorithm, chunksize,rebuild \n" - "FROM fetchchunks where algorithm =0 \n" - "union all \n" - "select (json->>'path')::text as path, (json->>'blocknum')::int4 as begin, (json->>'data')::bytea as chunk,\n" - "(json->>'len')::int4 as len, algorithm, chunksize,rebuild \n" - "from (select row_to_json(pg_read_binary_file_blocks(path,begin,len)) json, algorithm, chunksize,rebuild \n" - "from fetchchunks where algorithm !=0) \n" - "order by path, begin;"; + " pg_read_binary_file(path, begin, len, true) AS chunk,\n" + " len \n" + "FROM fetchchunks\n"; fprintf(file, "fetch and write file based on temporary table fetchchunks.\n"); return receiveFileChunks(sql, file); @@ -839,7 +700,7 @@ BuildErrorCode backupFileMap(filemap_t* map) /* to be supported later */ break; - case FILE_ACTION_COPY: { + case FILE_ACTION_COPY: /* create fake file for restore when file not exist, otherwise, backup file */ file_entry_t statbuf; if (targetFilemapSearch(entry->path, &statbuf) < 0) { @@ -848,7 +709,6 @@ BuildErrorCode backupFileMap(filemap_t* map) backup_target_file(entry->path, divergeXlogFileName); } break; - } case FILE_ACTION_COPY_TAIL: case FILE_ACTION_TRUNCATE: @@ -872,60 +732,17 @@ BuildErrorCode backupFileMap(filemap_t* map) return BUILD_SUCCESS; } -/** - * combine continue blocks numbers and copy file - * @param entry file entry - * @param file file - */ -static void CompressedFileCopy(file_entry_t* entry, FILE* file) -{ - datapagemap_t* pagemap = &entry->pagemap; - datapagemap_iterator_t* iter = datapagemap_iterate(pagemap); - - BlockNumber blkno; - file_entry_t fileEntry; - fileEntry.path = entry->path; - fileEntry.rewindCompressInfo = entry->rewindCompressInfo; - int invalidNumber = -1; - long int before = invalidNumber; - while (datapagemap_next(iter, &blkno)) { - fprintf(file, " block %u\n", blkno); - if (before == -1) { - fileEntry.rewindCompressInfo.oldBlockNumber = blkno; - before = blkno; - } else { - if (before == blkno - 1) { - before = blkno; - } else { - fileEntry.rewindCompressInfo.newBlockNumber = before + 1; - CompressedFileCopy(&fileEntry, false); - fileEntry.rewindCompressInfo.oldBlockNumber = blkno; - before = blkno; - } - } - } - if (before != invalidNumber) { - fileEntry.rewindCompressInfo.newBlockNumber = before + 1; - CompressedFileCopy(&fileEntry, false); - } -} -static BuildErrorCode execute_pagemap(file_entry_t* entry, FILE* file) +static BuildErrorCode execute_pagemap(datapagemap_t* pagemap, const char* path, FILE* file) { datapagemap_iterator_t* iter = NULL; BlockNumber blkno; off_t offset; - datapagemap_t* pagemap = &entry->pagemap; - char* path = entry->path; iter = datapagemap_iterate(pagemap); - if (entry->rewindCompressInfo.compressed) { - CompressedFileCopy(entry, file); - } else { - while (datapagemap_next(iter, &blkno)) { - fprintf(file, " block %u\n", blkno); - offset = blkno * BLCKSZ; - fetch_file_range(path, offset, offset + BLCKSZ); - } + while (datapagemap_next(iter, &blkno)) { + fprintf(file, " block %u\n", blkno); + offset = blkno * BLCKSZ; + fetch_file_range(path, offset, offset + BLCKSZ); } pg_free(iter); iter = NULL; @@ -972,19 +789,9 @@ static BuildErrorCode recurse_dir(const char* datadir, const char* parentpath, p struct stat fst; char fullpath[MAXPGPATH]; char path[MAXPGPATH]; - const size_t MINPCANAMESIZE = 4; if (strcmp(xlde->d_name, ".") == 0 || strcmp(xlde->d_name, "..") == 0) continue; - /* Skip compressed page files */ - size_t dirNamePath = strlen(xlde->d_name); - if (dirNamePath >= MINPCANAMESIZE) { - const char* suffix = xlde->d_name + dirNamePath - MINPCANAMESIZE; - if (strncmp(suffix, "_pca", MINPCANAMESIZE) == 0 || strncmp(suffix, "_pcd", MINPCANAMESIZE) == 0) { - continue; - } - } - ss_c = snprintf_s(fullpath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", fullparentpath, xlde->d_name); securec_check_ss_c(ss_c, "\0", "\0"); @@ -1015,15 +822,8 @@ static BuildErrorCode recurse_dir(const char* datadir, const char* parentpath, p continue; if (S_ISREG(fst.st_mode)) { - uint64 fileSize = (uint64)fst.st_size; - RewindCompressInfo rewindCompressInfo; - RewindCompressInfo *pointer = NULL; - if (ProcessLocalPca(path, &rewindCompressInfo)) { - fileSize = rewindCompressInfo.oldBlockNumber * BLCKSZ; - pointer = &rewindCompressInfo; - } - if (fileSize <= MAX_FILE_SIZE) { - callback(path, FILE_TYPE_REGULAR, fileSize, NULL, pointer); + if ((uint64)fst.st_size <= MAX_FILE_SIZE) { + callback(path, FILE_TYPE_REGULAR, fst.st_size, NULL); if (increment_return_code != BUILD_SUCCESS) { (void)closedir(xldir); } @@ -1032,7 +832,7 @@ static BuildErrorCode recurse_dir(const char* datadir, const char* parentpath, p pg_log(PG_WARNING, "file size of \"%s\" is over %ld\n", fullpath, MAX_FILE_SIZE); } } else if (S_ISDIR(fst.st_mode)) { - callback(path, FILE_TYPE_DIRECTORY, 0, NULL, NULL); + callback(path, FILE_TYPE_DIRECTORY, 0, NULL); if (increment_return_code != BUILD_SUCCESS) { (void)closedir(xldir); } @@ -1057,7 +857,7 @@ static BuildErrorCode recurse_dir(const char* datadir, const char* parentpath, p } link_target[len] = '\0'; - callback(path, FILE_TYPE_SYMLINK, 0, link_target, NULL); + callback(path, FILE_TYPE_SYMLINK, 0, link_target); /* * If it's a symlink within pg_tblspc, we need to recurse into it, diff --git a/src/bin/pg_rewind/fetch.h b/src/bin/pg_rewind/fetch.h index c383e2087..713d78c16 100755 --- a/src/bin/pg_rewind/fetch.h +++ b/src/bin/pg_rewind/fetch.h @@ -42,9 +42,7 @@ extern XLogRecPtr libpqGetCurrentXlogInsertLocation(void); extern void libpqRequestCheckpoint(void); -typedef void (*process_file_callback_t)(const char* path, file_type_t type, size_t oldsize, const char* link_target, - const RewindCompressInfo* rewindCompressInfo); - +typedef void (*process_file_callback_t)(const char* path, file_type_t type, size_t size, const char* link_target); extern BuildErrorCode traverse_datadir(const char* datadir, process_file_callback_t callback); extern void get_source_slotname(void); diff --git a/src/bin/pg_rewind/file_ops.cpp b/src/bin/pg_rewind/file_ops.cpp index d514a5b99..836866cf8 100644 --- a/src/bin/pg_rewind/file_ops.cpp +++ b/src/bin/pg_rewind/file_ops.cpp @@ -25,8 +25,6 @@ #include "common/fe_memutils.h" #include "common/build_query/build_query.h" -#include "compressed_rewind.h" -#include "storage/page_compression_impl.h" #include "replication/replicainternal.h" #define BLOCKSIZE (8 * 1024) @@ -38,8 +36,6 @@ static int dstfd = -1; static char dstpath[MAXPGPATH] = ""; static bool g_isRelDataFile = false; -static CompressedPcaInfo g_compressedPcaInfo; - static void create_target_dir(const char* path); static void remove_target_dir(const char* path); static void create_target_symlink(const char* path, const char* slink); @@ -104,7 +100,7 @@ void close_target_file(void) dstfd = -1; } -void write_target_range(char* buf, off_t begin, size_t size, int space, bool compressed) +void write_target_range(char* buf, off_t begin, size_t size, int space) { int writeleft; char* p = NULL; @@ -115,7 +111,7 @@ void write_target_range(char* buf, off_t begin, size_t size, int space, bool com if (dry_run) return; - if (!compressed && begin % BLOCKSIZE != 0) { + if (begin % BLOCKSIZE != 0) { (void)close(dstfd); dstfd = -1; pg_fatal("seek position %ld in target file \"%s\" is not in BLOCKSIZEs\n", size, dstpath); @@ -1229,142 +1225,3 @@ bool tablespaceDataIsValid(const char* path) return true; } - -void CompressedFileTruncate(const char *path, const RewindCompressInfo *rewindCompressInfo) -{ - if (dry_run) { - return; - } - - uint16 chunkSize = rewindCompressInfo->chunkSize; - - BlockNumber oldBlockNumber = rewindCompressInfo->oldBlockNumber; - BlockNumber newBlockNumber = rewindCompressInfo->newBlockNumber; - - Assert(oldBlockNumber > newBlockNumber); - char pcaPath[MAXPGPATH]; - FormatPathToPca(path, pcaPath, MAXPGPATH, true); - - int pcaFd = open(pcaPath, O_RDWR | PG_BINARY, 0600); - if (pcaFd < 0) { - pg_fatal("CompressedFileTruncate: could not open file \"%s\": %s\n", pcaPath, strerror(errno)); - return; - } - - PageCompressHeader* map = pc_mmap(pcaFd, chunkSize, false); - if (map == MAP_FAILED) { - pg_fatal("CompressedFileTruncate: Failed to mmap file \"%s\": %s\n", pcaPath, strerror(errno)); - return; - } - /* write zero to truncated addr */ - for (BlockNumber blockNumber = newBlockNumber; blockNumber < oldBlockNumber; ++blockNumber) { - PageCompressAddr* addr = GET_PAGE_COMPRESS_ADDR(map, chunkSize, blockNumber); - for (size_t i = 0; i < addr->allocated_chunks; ++i) { - addr->chunknos[i] = 0; - } - addr->nchunks = 0; - addr->allocated_chunks = 0; - addr->checksum = 0; - } - map->last_synced_nblocks = map->nblocks = newBlockNumber; - - /* find the max used chunk number */ - pc_chunk_number_t beforeUsedChunks = map->allocated_chunks; - pc_chunk_number_t max_used_chunkno = 0; - for (BlockNumber blockNumber = 0; blockNumber < newBlockNumber; ++blockNumber) { - PageCompressAddr* addr = GET_PAGE_COMPRESS_ADDR(map, chunkSize, blockNumber); - for (uint8 i = 0; i < addr->allocated_chunks; i++) { - if (addr->chunknos[i] > max_used_chunkno) { - max_used_chunkno = addr->chunknos[i]; - } - } - } - map->allocated_chunks = map->last_synced_allocated_chunks = max_used_chunkno; - - /* truncate pcd qfile */ - if (beforeUsedChunks > max_used_chunkno) { - char pcdPath[MAXPGPATH]; - FormatPathToPcd(path, pcdPath, MAXPGPATH, false); - truncate_target_file(pcdPath, max_used_chunkno * chunkSize); - } - pc_munmap(map); - pg_log(PG_PROGRESS, "CompressedFileTruncate: %s\n", path); -} - -void OpenCompressedPcaFile(const char* fileName, int32 chunkSize, int32 algorithm, bool rebuild) -{ - if (dry_run) { - return; - } - if (g_compressedPcaInfo.pcaFd != -1 && strcmp(fileName, &g_compressedPcaInfo.path[strlen(pg_data) + 1]) == 0) { - /* already open */ - return; - } - CloseCompressedPcaFile(); - int rc = snprintf_s(g_compressedPcaInfo.path, sizeof(g_compressedPcaInfo.path), - sizeof(g_compressedPcaInfo.path) - 1, - "%s/%s", pg_data, fileName); - securec_check_ss_c(rc, "\0", "\0"); - - int mode = O_RDWR | PG_BINARY; - mode = rebuild ? (mode | O_TRUNC | O_CREAT) : mode; - - g_compressedPcaInfo.pcaFd = open(g_compressedPcaInfo.path, mode, S_IRUSR | S_IWUSR); - if (g_compressedPcaInfo.pcaFd < 0) { - pg_fatal("could not open compressed pca file \"%s\": %s\n", g_compressedPcaInfo.path, strerror(errno)); - return; - } - g_compressedPcaInfo.algorithm = algorithm; - g_compressedPcaInfo.chunkSize = chunkSize; - g_compressedPcaInfo.pcaMap = (char*) pc_mmap(g_compressedPcaInfo.pcaFd, chunkSize, false); - if ((void*)g_compressedPcaInfo.pcaMap == MAP_FAILED) { - pg_fatal("OpenCompressedPcaFile: Failed to mmap file \"%s\": %s\n", g_compressedPcaInfo.path, strerror(errno)); - return; - } -} - -void CloseCompressedPcaFile() -{ - if (g_compressedPcaInfo.pcaFd == -1) { - return; - } - pc_munmap((PageCompressHeader*)g_compressedPcaInfo.pcaMap); - if (close(g_compressedPcaInfo.pcaFd) != 0) { - pg_fatal("could not close target file \"%s\": %s\n", g_compressedPcaInfo.path, gs_strerror(errno)); - } - g_compressedPcaInfo.pcaFd = -1; - g_compressedPcaInfo.pcaMap = NULL; - g_compressedPcaInfo.chunkSize = 0; - g_compressedPcaInfo.algorithm = 0; -} - -void FetchCompressedFile(char* buf, BlockNumber blockNumber, int32 size) -{ - int32 chunkSize = g_compressedPcaInfo.chunkSize; - int needChunks = size / chunkSize; - - PageCompressHeader* pcMap = (PageCompressHeader*) g_compressedPcaInfo.pcaMap; - PageCompressAddr* pcAddr = GET_PAGE_COMPRESS_ADDR(pcMap, chunkSize, blockNumber); - - // 2. allocate chunks - if (pcAddr->allocated_chunks < needChunks) { - auto chunkno = pg_atomic_fetch_add_u32(&pcMap->allocated_chunks, needChunks - pcAddr->allocated_chunks); - for (int i = pcAddr->allocated_chunks; i < needChunks; i++) { - pcAddr->chunknos[i] = ++chunkno; - } - pcAddr->allocated_chunks = needChunks; - } - for (int32 i = 0; i < needChunks; ++i) { - auto buffer_pos = buf + chunkSize * i; - off_t seekpos = (off_t) OFFSET_OF_PAGE_COMPRESS_CHUNK(chunkSize, pcAddr->chunknos[i]); - int32 start = i; - while (i < needChunks - 1 && pcAddr->chunknos[i + 1] == pcAddr->chunknos[i] + 1) { - i++; - } - int write_amount = chunkSize * (i - start + 1); - // open file dstfd - write_target_range(buffer_pos, seekpos, write_amount, 0, true); - } - pcAddr->nchunks = pcAddr->allocated_chunks; - pcAddr->checksum = AddrChecksum32(blockNumber, pcAddr, chunkSize); -} diff --git a/src/bin/pg_rewind/file_ops.h b/src/bin/pg_rewind/file_ops.h index 4b6b2b132..89c971842 100644 --- a/src/bin/pg_rewind/file_ops.h +++ b/src/bin/pg_rewind/file_ops.h @@ -11,11 +11,10 @@ #define FILE_OPS_H #include "filemap.h" -#include "compressed_common.h" extern char* pg_data; extern void open_target_file(const char* path, bool trunc); -extern void write_target_range(char* buf, off_t begin, size_t size, int space, bool compressed = false); +extern void write_target_range(char* buf, off_t begin, size_t size, int space); extern void close_target_file(void); extern void truncate_target_file(const char* path, off_t newsize); extern void create_target(file_entry_t* t); @@ -42,9 +41,6 @@ extern void delete_target_file(const char* file); extern bool isPathInFilemap(const char* path); extern bool tablespaceDataIsValid(const char* path); extern void copy_file(const char* fromfile, char* tofile); -extern void CompressedFileTruncate(const char* path, const RewindCompressInfo* rewindCompressInfo); -void FetchCompressedFile(char* buf, BlockNumber begin, int32 size); -void OpenCompressedPcaFile(const char* fileName, int32 chunkSize, int32 algorithm, bool rebuild); -void CloseCompressedPcaFile(); + #endif /* FILE_OPS_H */ diff --git a/src/bin/pg_rewind/filemap.cpp b/src/bin/pg_rewind/filemap.cpp index 62c7c6129..dc21efe78 100755 --- a/src/bin/pg_rewind/filemap.cpp +++ b/src/bin/pg_rewind/filemap.cpp @@ -19,7 +19,6 @@ #include "catalog/catalog.h" #include "catalog/pg_tablespace.h" #include "common/fe_memutils.h" -#include "compressed_rewind.h" #include "storage/cu.h" #include "storage/smgr/fd.h" @@ -85,6 +84,23 @@ const char *excludeFiles[] = { "pg_dw", "pg_dw_single", "pg_dw.build", + "pg_dw_meta", + "pg_dw_0", + "pg_dw_1", + "pg_dw_2", + "pg_dw_3", + "pg_dw_4", + "pg_dw_5", + "pg_dw_6", + "pg_dw_7", + "pg_dw_8", + "pg_dw_9", + "pg_dw_10", + "pg_dw_11", + "pg_dw_12", + "pg_dw_13", + "pg_dw_14", + "pg_dw_15", "cacert.pem", "server.crt", "server.key", @@ -131,8 +147,7 @@ void filemapInit(void) filemaptarget = filemap_create(); } -void processTargetFileMap(const char* path, file_type_t type, size_t oldsize, const char* link_target, - const RewindCompressInfo* info) +void processTargetFileMap(const char* path, file_type_t type, size_t oldsize, const char* link_target) { file_entry_t* entry = NULL; filemap_t* map = filemaptarget; @@ -148,8 +163,6 @@ void processTargetFileMap(const char* path, file_type_t type, size_t oldsize, co entry->pagemap.bitmap = NULL; entry->pagemap.bitmapsize = 0; - COPY_REWIND_COMPRESS_INFO(entry, info, info == NULL ? 0 : info->oldBlockNumber, 0) - if (map->last != NULL) { map->last->next = entry; map->last = entry; @@ -218,7 +231,7 @@ BuildErrorCode targetFilemapProcess(void) filemap_t* map = filemaptarget; for (i = 0; i < map->narray; i++) { entry = map->array[i]; - process_target_file(entry->path, entry->type, entry->oldsize, entry->link_target, &entry->rewindCompressInfo); + process_target_file(entry->path, entry->type, entry->oldsize, entry->link_target); } return BUILD_SUCCESS; } @@ -301,7 +314,7 @@ static inline bool is_skip_tblspc(const char* path, file_type_t type) if (strstr(path, TABLESPACE_VERSION_DIRECTORY) != NULL && strstr(path, pgxcnodename) == NULL) { return true; } - + /* * Skip invalid tblspc oid */ @@ -329,8 +342,7 @@ static bool process_source_file_sanity_check(const char* path, file_type_t type) * action needs to be taken for the file, depending on whether the file * exists in the target and whether the size matches. */ -void process_source_file(const char* path, file_type_t type, size_t newsize, const char* link_target, - RewindCompressInfo* info) +void process_source_file(const char* path, file_type_t type, size_t newsize, const char* link_target) { bool exists = false; char localpath[MAXPGPATH]; @@ -338,7 +350,6 @@ void process_source_file(const char* path, file_type_t type, size_t newsize, con filemap_t* map = filemap; file_action_t action = FILE_ACTION_NONE; size_t oldsize = 0; - BlockNumber oldBlockNumber = 0; file_entry_t* entry = NULL; int ss_c = 0; bool isreldatafile = false; @@ -489,21 +500,7 @@ void process_source_file(const char* path, file_type_t type, size_t newsize, con * replayed. */ /* mod blocksize 8k to avoid half page write */ - RewindCompressInfo oldRewindCompressInfo; - bool sourceCompressed = info != NULL; - bool targetCompressed = ProcessLocalPca(path, &oldRewindCompressInfo); - if (sourceCompressed && !targetCompressed) { - info->compressed = false; - action = FILE_ACTION_REMOVE; - break; - } else if (!sourceCompressed && targetCompressed) { - info = &oldRewindCompressInfo; - action = FILE_ACTION_REMOVE; - break; - } else if (sourceCompressed && targetCompressed) { - oldBlockNumber = oldRewindCompressInfo.oldBlockNumber; - oldsize = oldBlockNumber * BLCKSZ; - } + oldsize = statbuf.oldsize; if (oldsize % BLOCKSIZE != 0) { oldsize = oldsize - (oldsize % BLOCKSIZE); pg_log(PG_PROGRESS, "target file size mod BLOCKSIZE not equal 0 %s %ld \n", path, statbuf.oldsize); @@ -534,8 +531,6 @@ void process_source_file(const char* path, file_type_t type, size_t newsize, con entry->pagemap.bitmapsize = 0; entry->isrelfile = isreldatafile; - COPY_REWIND_COMPRESS_INFO(entry, info, oldBlockNumber, info == NULL ? 0 : info->newBlockNumber) - if (map->last != NULL) { map->last->next = entry; map->last = entry; @@ -551,8 +546,7 @@ void process_source_file(const char* path, file_type_t type, size_t newsize, con * marks target data directory's files that didn't exist in the source for * deletion. */ -void process_target_file(const char* path, file_type_t type, size_t oldsize, const char* link_target, - const RewindCompressInfo* info) +void process_target_file(const char* path, file_type_t type, size_t oldsize, const char* link_target) { bool exists = false; file_entry_t key; @@ -581,7 +575,7 @@ void process_target_file(const char* path, file_type_t type, size_t oldsize, con */ for (int excludeIdx = 0; excludeFiles[excludeIdx] != NULL; excludeIdx++) { if (strstr(path, excludeFiles[excludeIdx]) != NULL) { - pg_log(PG_DEBUG, "entry \"%s\" excluded from target file list\n", path); + pg_log(PG_DEBUG, "entry \"%s\" excluded from target file list", path); return; } } @@ -633,8 +627,6 @@ void process_target_file(const char* path, file_type_t type, size_t oldsize, con entry->pagemap.bitmapsize = 0; entry->isrelfile = isRelDataFile(path); - COPY_REWIND_COMPRESS_INFO(entry, info, info == NULL ? 0 : info->oldBlockNumber, 0) - if (map->last == NULL) map->first = entry; else @@ -1225,14 +1217,14 @@ static char* relpathbackend_t(RelFileNode rnode, BackendId backend, ForkNumber f path, pathlen, pathlen - 1, "base/%u/%u_%s", rnode.dbNode, rnode.relNode, forkNames_t[forknum]); } else { ss_c = snprintf_s( - path, pathlen, pathlen - 1, "base/%u/%u_b%d_%s", rnode.dbNode, rnode.relNode, + path, pathlen, pathlen - 1, "base/%u/%u_b%d_%s", rnode.dbNode, rnode.relNode, rnode.bucketNode, forkNames_t[forknum]); } } else { if (!IsBucketFileNode(rnode)) { ss_c = snprintf_s(path, pathlen, pathlen - 1, "base/%u/%u", rnode.dbNode, rnode.relNode); } else { - ss_c = snprintf_s(path, pathlen, pathlen - 1, "base/%u/%u_b%d", + ss_c = snprintf_s(path, pathlen, pathlen - 1, "base/%u/%u_b%d", rnode.dbNode, rnode.relNode, rnode.bucketNode); } } @@ -1411,7 +1403,7 @@ bool check_base_path(const char *fname, unsigned int *segNo, RelFileNode *rnode) rnode->dbNode = InvalidOid; rnode->relNode = InvalidOid; rnode->bucketNode = InvalidBktId; - + /* Column Store Table File Format Checking */ nmatch = sscanf_s(fname, "base/%u/%u_C%d.%u", &rnode->dbNode, &rnode->relNode, &columnid, segNo); @@ -1487,7 +1479,7 @@ bool check_abs_tblspac_path(const char *fname, unsigned int *segNo, RelFileNode rnode->dbNode = InvalidOid; rnode->relNode = InvalidOid; rnode->bucketNode = InvalidBktId; - + nmatch = sscanf_s(fname, "PG_9.2_201611171_%[^/]/%u/%u_C%d.%u", buf, sizeof(buf), &rnode->dbNode, &rnode->relNode, &columnid, segNo); if (nmatch == MATCH_FIVE) { diff --git a/src/bin/pg_rewind/filemap.h b/src/bin/pg_rewind/filemap.h index f4e0c9ac5..ad3868ae0 100644 --- a/src/bin/pg_rewind/filemap.h +++ b/src/bin/pg_rewind/filemap.h @@ -8,7 +8,6 @@ #ifndef FILEMAP_H #define FILEMAP_H -#include "compressed_common.h" #include "storage/smgr/relfilenode.h" #include "storage/buf/block.h" @@ -43,9 +42,6 @@ typedef struct file_entry_t { file_action_t action; - /* for compressed table */ - RewindCompressInfo rewindCompressInfo; - /* for a regular file */ size_t oldsize; size_t newsize; @@ -100,13 +96,9 @@ extern void print_filemap(void); extern void print_filemap_to_file(FILE* file); /* Functions for populating the filemap */ -extern void process_source_file(const char* path, file_type_t type, size_t newsize, const char* link_target, - RewindCompressInfo* rewindCompressInfo = nullptr); -extern void process_target_file(const char* path, file_type_t type, size_t newsize, const char* link_target, - const RewindCompressInfo* rewindCompressInfo = nullptr); +extern void process_source_file(const char* path, file_type_t type, size_t newsize, const char* link_target); +extern void process_target_file(const char* path, file_type_t type, size_t newsize, const char* link_target); extern void process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno); -extern void process_waldata_change( - ForkNumber forknum, RelFileNode rnode, StorageEngine store, off_t file_offset, size_t data_size); extern void filemap_finalize(void); extern int targetFilemapSearch(const char* path, file_entry_t* entry); extern bool isRelDataFile(const char* path); diff --git a/src/bin/pg_rewind/nls.mk b/src/bin/pg_rewind/nls.mk index 4f0200b15..d6eb536b5 100644 --- a/src/bin/pg_rewind/nls.mk +++ b/src/bin/pg_rewind/nls.mk @@ -1,7 +1,7 @@ # src/bin/pg_rewind/nls.mk CATALOG_NAME = pg_rewind AVAIL_LANGUAGES =de -GETTEXT_FILES = copy_fetch.c datapagemap.c fetch.c file_ops.c filemap.c libpq_fetch.c logging.c parsexlog.c pg_rewind.c compressed_rewind.c timeline.c ../../common/fe_memutils.c ../../common/restricted_token.c ../../../src/backend/access/transam/xlogreader.c +GETTEXT_FILES = copy_fetch.c datapagemap.c fetch.c file_ops.c filemap.c libpq_fetch.c logging.c parsexlog.c pg_rewind.c timeline.c ../../common/fe_memutils.c ../../common/restricted_token.c ../../../src/backend/access/transam/xlogreader.c GETTEXT_TRIGGERS = pg_log:2 pg_fatal report_invalid_record:2 GETTEXT_FLAGS = pg_log:2:c-format \ diff --git a/src/bin/pg_rewind/pg_rewind.cpp b/src/bin/pg_rewind/pg_rewind.cpp index 0281f8b45..63639ed79 100755 --- a/src/bin/pg_rewind/pg_rewind.cpp +++ b/src/bin/pg_rewind/pg_rewind.cpp @@ -252,7 +252,7 @@ BuildErrorCode gs_increment_build(const char* pgdata, const char* connstr, char* chkpttli); XLByteToSeg(chkptredo, checkSeg); - XLogFileName(divergeXlogFileName, chkpttli, checkSeg); + XLogFileName(divergeXlogFileName, MAXFNAMELEN, chkpttli, checkSeg); pg_log(PG_PROGRESS, "diverge xlogfile is %s, older ones will not be copied or removed.\n", divergeXlogFileName); if (libpqRotateCbmFile(conn, chkptredo) != true) { @@ -696,7 +696,7 @@ static void rewind_dw_file() char* unaligned_buf = NULL; /* Delete the dw file, if it exists. */ - rc = snprintf_s(dw_file_path, MAXPGPATH, MAXPGPATH - 1, "%s/%s", datadir_target, DW_FILE_NAME); + rc = snprintf_s(dw_file_path, MAXPGPATH, MAXPGPATH - 1, "%s/%s", datadir_target, OLD_DW_FILE_NAME); securec_check_ss_c(rc, "\0", "\0"); if (realpath(dw_file_path, real_file_path) == NULL) { if (real_file_path[0] == '\0') { diff --git a/src/bin/pg_upgrade/CMakeLists.txt b/src/bin/pg_upgrade/CMakeLists.txt index d04a44fb2..a838ed5db 100755 --- a/src/bin/pg_upgrade/CMakeLists.txt +++ b/src/bin/pg_upgrade/CMakeLists.txt @@ -16,7 +16,7 @@ set(TGT_format_INC set(format_DEF_OPTIONS ${MACRO_OPTIONS}) set(format_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(format_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(format_LINK_LIBS libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -lrt -lz -lminiunz) +set(format_LINK_LIBS libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz) add_bintarget(pg_format_cu TGT_format_SRC TGT_format_INC "${format_DEF_OPTIONS}" "${format_COMPILE_OPTIONS}" "${format_LINK_OPTIONS}" "${format_LINK_LIBS}") add_dependencies(pg_format_cu pgport_static) target_link_directories(pg_format_cu PUBLIC diff --git a/src/bin/pgxc_clean/CMakeLists.txt b/src/bin/pgxc_clean/CMakeLists.txt index 8471534e2..c8f40077a 100755 --- a/src/bin/pgxc_clean/CMakeLists.txt +++ b/src/bin/pgxc_clean/CMakeLists.txt @@ -16,7 +16,10 @@ set(TGT_clean_INC set(clean_DEF_OPTIONS ${MACRO_OPTIONS} -D_FORTIFY_SOURCE=2) set(clean_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS} -O2) set(clean_LINK_OPTIONS ${BIN_LINK_OPTIONS} -s) -set(clean_LINK_LIBS libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -lrt -lz -lminiunz -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss -lpq) +set(clean_LINK_LIBS libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -lrt -lz -lminiunz -lpq) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND clean_LINK_LIBS -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss) +endif() add_bintarget(gs_clean TGT_clean_SRC TGT_clean_INC "${clean_DEF_OPTIONS}" "${clean_COMPILE_OPTIONS}" "${clean_LINK_OPTIONS}" "${clean_LINK_LIBS}") add_dependencies(gs_clean elog_static pgport_static pq) target_link_directories(gs_clean PUBLIC diff --git a/src/bin/pgxc_clean/Makefile b/src/bin/pgxc_clean/Makefile index 37e0940d0..65105f66f 100644 --- a/src/bin/pgxc_clean/Makefile +++ b/src/bin/pgxc_clean/Makefile @@ -17,7 +17,9 @@ subdir=src/bin/pgxc_clean override CPPFLAGS := -I$(libpq_srcdir) $(CPPFLAGS) -fpic -D_FORTIFY_SOURCE=2 -O2 CFLAGS += -Wl,-z,relro,-z,now -s -ftrapv -fPIE -LIBS += -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss +ifeq ($(enable_lite_mode), no) + LIBS += -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss +endif LDFLAGS += -pie PROGRAM= pgxc_clean diff --git a/src/bin/pgxc_clean/txninfo.cpp b/src/bin/pgxc_clean/txninfo.cpp index c7f40400f..969940067 100644 --- a/src/bin/pgxc_clean/txninfo.cpp +++ b/src/bin/pgxc_clean/txninfo.cpp @@ -92,20 +92,6 @@ int set_node_info(const char* node_name, int port, const char* host, NODE_TYPE t return 0; } -#ifndef ENABLE_LLT -node_info* find_node_info(const char* node_name) -{ - int i; - for (i = 0; i < pgxc_clean_node_count; i++) { - if (pgxc_clean_node_info[i].node_name == NULL) - continue; - if (strcmp(pgxc_clean_node_info[i].node_name, node_name) == 0) - return &pgxc_clean_node_info[i]; - } - return (NULL); -} -#endif - int find_node_index(const char* node_name) { int i; diff --git a/src/bin/pgxc_clean/txninfo.h b/src/bin/pgxc_clean/txninfo.h index 1555bc123..251bc2468 100644 --- a/src/bin/pgxc_clean/txninfo.h +++ b/src/bin/pgxc_clean/txninfo.h @@ -81,9 +81,6 @@ extern void add_txn_info( extern txn_info* find_txn_info(TransactionId gxid); extern database_info* find_database_info(const char* database_name); extern database_info* add_database_info(const char* database_name); -#ifndef ENABLE_LLT -extern node_info* find_node_info(const char* node_name); -#endif extern int find_node_index(const char* node_name); extern int set_node_info(const char* node_name, int port, const char* host, NODE_TYPE type, int index); extern TXN_STATUS check_txn_global_status(txn_info* txn, bool commit_all_prepared, bool rollback_all_prepared); diff --git a/src/bin/psql/CMakeLists.txt b/src/bin/psql/CMakeLists.txt index 99245ac0e..7bc49f377 100755 --- a/src/bin/psql/CMakeLists.txt +++ b/src/bin/psql/CMakeLists.txt @@ -63,16 +63,33 @@ set(TGT_gsql_INC ${LIBCURL_INCLUDE_PATH} ${LIBEDIT_INCLUDE_PATH} ) -set(gsql_DEF_OPTIONS ${MACRO_OPTIONS} -DHAVE_CE) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + set(gsql_DEF_OPTIONS ${MACRO_OPTIONS} -DHAVE_CE) +else() + set(gsql_DEF_OPTIONS ${MACRO_OPTIONS}) +endif() set(gsql_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(gsql_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(gsql_LINK_LIBS utils_aes_aes libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -pthread -lrt -lz -lminiunz -ledit -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss -lcjson -lcurl -lpq_ce -lncurses) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + set(gsql_LINK_LIBS utils_aes_aes libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -pthread -lrt -lz -lminiunz -ledit -lcjson -lcurl -lpq_ce -lncurses) +else() + set(gsql_LINK_LIBS utils_aes_aes libelog.a libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -pthread -lrt -lz -lminiunz -ledit -lcjson -lcurl -lpq -lncurses) +endif() +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND gsql_LINK_LIBS -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss) +endif() set(gsql_LINK_DIRS ${LIBOPENSSL_LIB_PATH} ${PROTOBUF_LIB_PATH} ${LIBPARQUET_LIB_PATH} ${LIBCURL_LIB_PATH} ${KERBEROS_LIB_PATH} ${ZLIB_LIB_PATH} ${LIBOBS_LIB_PATH} ${LIBEDIT_LIB_PATH} ${LIBCGROUP_LIB_PATH} ${SECURE_LIB_PATH} ${KMC_LIB_PATH} ${CJSON_LIB_PATH} ${LIBCURL_LIB_PATH} ${CMAKE_BINARY_DIR}/lib) if(NOT "${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS}" STREQUAL "OFF_OFF") - set(gsql_LINK_LIBS ${gsql_LINK_LIBS} -lgs_ktool -lkmc) + if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + set(gsql_LINK_LIBS ${gsql_LINK_LIBS} -lgs_ktool -lkmc) + endif() endif() +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") set(gsql_LINK_DEPEND utils_aes_aes elog_static pq_ce pgport_static makesgml) +else() +set(gsql_LINK_DEPEND utils_aes_aes elog_static pq pgport_static makesgml) +endif() if("${ENABLE_UT}" STREQUAL "ON") add_shared_libtarget(sqla TGT_gsql_SRC TGT_gsql_INC "${gsql_DEF_OPTIONS}" "${gsql_COMPILE_OPTIONS}" "${gsql_LINK_OPTIONS}") add_dependencies(sqla ${gsql_LINK_DEPEND}) diff --git a/src/bin/psql/Makefile b/src/bin/psql/Makefile index 3a5b579ff..0915dfa17 100644 --- a/src/bin/psql/Makefile +++ b/src/bin/psql/Makefile @@ -1,144 +1,151 @@ -#------------------------------------------------------------------------- -# -# Makefile for src/bin/psql -# -# Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group -# Portions Copyright (c) 1994, Regents of the University of California -# -# src/bin/psql/Makefile -# -#------------------------------------------------------------------------- - -PGFILEDESC = "gsql - the PostgreSQL interactive terminal" -PGAPPICON=win32 - -subdir = src/bin/psql -top_builddir = ../../.. -include $(top_builddir)/src/Makefile.global - -REFDOCDIR= $(top_srcdir)/doc/src/sgml/ref - -MAKESGMLDIR = $(top_builddir)/src/common/pgxc/tools/makesgml -SGMLDIR= $(top_builddir)/doc/src/sgml - -################################################################### -# libedit component -################################################################### -LIBEDIT_HOME = $(with_3rd)/$(BINARYPATH)/libedit/$(LIB_SUPPORT_LLT) -LIBEDIT_INCLUDE_PATH = $(LIBEDIT_HOME)/include -LIBEDIT_LIB_PATH = $(LIBEDIT_HOME)/lib - -override CPPFLAGS := -I. -I$(srcdir) -I$(libpq_srcdir) -I$(top_srcdir)/src/bin/pg_dump -DHAVE_CE -I$(LIBEDIT_INCLUDE_PATH) $(CPPFLAGS) - -ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) -CPPFLAGS += -L$(top_builddir)/../distribute/bin/gs_ktool/ -lgs_ktool -L$(LIBKMC_LIB_PATH) -lkmc -endif - -$(top_builddir)/src/common/interfaces/libpq/client_logic_processor/stmt_processor.o: - $(MAKE) -C $(top_builddir)/src/common/interfaces/libpq/client_logic_processor/ stmt_processor.o ENABLE_CE=1 - -describe.o: $(top_builddir)/src/common/interfaces/libpq/client_logic_processor/stmt_processor.o - -CFLAGS += -Wl,-z,relro,-z,now -I$(CJSON_INCLUDE_PATH) -I$(LIBCURL_INCLUDE_PATH) -LDFLAGS += -L$(CJSON_LIB_PATH) -L$(LIBCURL_LIB_PATH) -L$(LIBEDIT_LIB_PATH) -LIBS += -ledit -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss -lcjson -lcurl - -############################################################################## -# memory checking component -############################################################################### -ifeq ($(enable_memory_check), yes) - LIBS += -l$(MEMCHECK_LIB_NAME_ASAN) - LDFLAGS += -L$(MEMCHECK_LIB_PATH) -fsanitize=address -fsanitize=leak -fno-omit-frame-pointer - CXXFLAGS += -fsanitize=address -fsanitize=leak -fno-omit-frame-pointer -else - ifeq ($(enable_thread_check), yes) - LIBS += -l$(MEMCHECK_LIB_NAME_TSAN) - LDFLAGS += -L$(MEMCHECK_LIB_PATH) -fsanitize=thread -fno-omit-frame-pointer - CXXFLAGS += -fsanitize=thread -fno-omit-frame-pointer - endif -endif - -ifneq "$(MAKECMDGOALS)" "clean" - ifneq "$(MAKECMDGOALS)" "distclean" - ifneq "$(shell which g++ |grep hutaf_llt |wc -l)" "1" - -include $(DEPEND) - endif - endif -endif -OBJS= command.o common.o help.o input.o stringutils.o mainloop.o copy.o \ - startup.o prompt.o variables.o large_obj.o print.o describe.o \ - mbprint.o dumputils.o keywords.o kwlookup.o tab-complete.o\ - sql_help.o \ - $(top_builddir)/src/lib/elog/elog.a \ - $(WIN32RES) -EXTRA_OBJS = $(top_builddir)/src/gausskernel/cbb/utils/aes/aes.o - -FLEXFLAGS = -Cfe -b -p -p - -all: submake-aes gsql - -libpq_pgport:=$(subst -lpq,-lpq_ce,$(libpq_pgport)) - -ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) -libpq_pgport += -L$(top_builddir)/../distribute/bin/gs_ktool/ -lgs_ktool -L$(LIBKMC_LIB_PATH) -lkmc -endif - -$(top_builddir)/src/lib/elog/elog.a: - $(MAKE) -C $(top_builddir)/src/lib/elog elog.a - -gsql: submake-libpq_ce submake-libpgport $(OBJS) - $(CC) $(CFLAGS) $(OBJS) $(EXTRA_OBJS) $(LIBS) $(libpq_pgport) $(LDFLAGS) $(LDFLAGS_EX) -lncurses -o $@$(X) - -help.o: sql_help.h - -sqla: $(OBJS) - $(CC) -fPIC -shared $(CFLAGS) $(OBJS) $(EXTRA_OBJS) $(libpq_pgport) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -lncurses -o lib$@.so - mv lib$@.so $(top_builddir)/../distribute/test/ut/lib - -dumputils.cpp keywords.cpp: % : $(top_srcdir)/src/bin/pg_dump/% - rm -f $@ && $(LN_S) $< . - -kwlookup.cpp: % : $(top_srcdir)/src/common/backend/parser/% - rm -f $@ && $(LN_S) $< . - -sql_help.cpp: sql_help.h ; -sql_help.h: create_help.pl $(wildcard $(REFDOCDIR)/*.sgml) - $(MAKE) -C $(MAKESGMLDIR) - $(MAKE) -C $(SGMLDIR) sgml-files - $(PERL) $< $(REFDOCDIR) $* - -# psqlscan is compiled as part of mainloop -mainloop.o: psqlscan.inc - -psqlscan.inc: psqlscan.l -ifdef FLEX - $(FLEX) $(FLEXFLAGS) -o'$@' $< - @if [ `wc -l topt.feedback = ParseVariableBool(value); } else { - psql_error("\\pset: allowed sqlerror_handle are continue, exit\n"); - return false; + popt->topt.feedback = !popt->topt.feedback; } if (!quiet) { - if (sqlErrHandle == SQLERROR_HANDLE_CONTINUE) { - printf(_("Whenever Error behavior is continue.\n")); + if (popt->topt.feedback) { + puts(_("Showing rows count feedback.")); } else { - printf(_("Whenever Error behavior is exit.\n")); + puts(_("Rows count feedback is off.")); } } return true; } -static bool setBorder(const char* value, printQueryOpt* popt, bool quiet) -{ - if (NULL != value) - popt->topt.border = atoi(value); - - if (!quiet) - printf(_("Border style is %d.\n"), popt->topt.border); - - return true; -} - bool do_pset(const char* param, const char* value, printQueryOpt* popt, bool quiet) { size_t vallen = 0; @@ -2289,9 +2277,9 @@ bool do_pset(const char* param, const char* value, printQueryOpt* popt, bool qui } } - // set Sql Error Handle - else if (strcmp(param, "sqlerror_handle") == 0) { - if (!setSqlErrorHandle(value, vallen, quiet)) { + /* set feedback rows */ + else if (strcmp(param, "feedback") == 0) { + if(!setFeedBack(value, popt, quiet)) { return false; } } @@ -2317,9 +2305,11 @@ bool do_pset(const char* param, const char* value, printQueryOpt* popt, bool qui /* set border style/width */ else if (strcmp(param, "border") == 0) { - if(!setBorder(value, popt, quiet)) { - return false; - } + if (NULL != value) + popt->topt.border = atoi(value); + + if (!quiet) + printf(_("Border style is %d.\n"), popt->topt.border); } /* set expanded/vertical mode */ @@ -2729,6 +2719,7 @@ static void minimal_error_message(PGresult* res) destroyPQExpBuffer(msg); } +#ifndef ENABLE_LITE_MODE /* Show notice message when the password expired time will come. */ static void show_password_notify(PGconn* conn) { @@ -2793,8 +2784,11 @@ static void show_password_notify(PGconn* conn) PQclear(res1); PQclear(res2); + free((void *)date1); + free((void *)date2); return; } +#endif extern void client_server_version_check(PGconn* conn) { diff --git a/src/bin/psql/command.h b/src/bin/psql/command.h index 479c1603a..8c4e168a7 100644 --- a/src/bin/psql/command.h +++ b/src/bin/psql/command.h @@ -21,13 +21,6 @@ typedef enum _backslashResult { * resulted in an error */ } backslashResult; -typedef enum SqlErrorHandle { - SQLERROR_HANDLE_CONTINUE = 0, - SQLERROR_HANDLE_EXIT -} SqlErrorHandle; - -extern SqlErrorHandle sqlErrHandle; - extern backslashResult HandleSlashCmds(PsqlScanState scan_state, PQExpBuffer query_buf); extern int process_file(char* filename, bool single_txn, bool use_relative_path); diff --git a/src/bin/psql/common.cpp b/src/bin/psql/common.cpp index 798093380..88a01d751 100644 --- a/src/bin/psql/common.cpp +++ b/src/bin/psql/common.cpp @@ -2268,10 +2268,8 @@ static bool do_one_parallel(char* query, int fd) if (decode_pwd != NULL) { rc = memset_s(decode_pwd, strlen(decode_pwd), 0, strlen(decode_pwd)); securec_check_c(rc, "\0", "\0"); - if (decode_pwd != NULL) { - OPENSSL_free(decode_pwd); - decode_pwd = NULL; - } + OPENSSL_free(decode_pwd); + decode_pwd = NULL; // Revert the old value for next retry connection. pset.connInfo.values[3] = old_conninfo_values; diff --git a/src/bin/psql/copy.cpp b/src/bin/psql/copy.cpp index d21398331..881660e6d 100644 --- a/src/bin/psql/copy.cpp +++ b/src/bin/psql/copy.cpp @@ -226,7 +226,7 @@ static bool ParseParallelOption(struct copy_options* result, char** errToken) xstrcat(&result->after_tofrom, token); token = strtokx(nullptr, whitespace, ",()", NULL, 0, false, false, pset.encoding); - if (pg_strcasecmp(token, "true") == 0) { + if (pg_strcasecmp(token, "true") == 0 || pg_strcasecmp(token, "on") == 0) { result->hasHeader = true; xstrcat(&result->after_tofrom, " false"); } else { diff --git a/src/bin/psql/describe.cpp b/src/bin/psql/describe.cpp index 6a0045412..929735c0d 100644 --- a/src/bin/psql/describe.cpp +++ b/src/bin/psql/describe.cpp @@ -804,8 +804,8 @@ bool permissionsList(const char* pattern) "c.relname", NULL, "n.nspname !~ '^pg_'" - " AND c.relname not like 'matviewmap_%%'" - " AND c.relname not like 'mlog_%%'" + " AND c.relname not like 'matviewmap\\_%%'" + " AND c.relname not like 'mlog\\_%%'" " AND pg_catalog.pg_table_is_visible(c.oid)"); appendPQExpBuffer(&buf, "ORDER BY 1, 2;"); @@ -1323,6 +1323,36 @@ static bool describeOneTableDetails(const char* schemaname, const char* relation initPQExpBuffer(&tmp_part_buf); initPQExpBuffer(&fullEncryptBuffer); +#ifndef ENABLE_MULTIPLE_NODES + /* + * In describeOneTableDetails(), PQfnumber() is matched according to the lowercase column name. + * However, when uppercase_attribute_name is on, the column names in the result set will be converted to uppercase. + * So we need to turn off it temporarily, and turn on it at the end. + */ + bool uppercaseIsOn = false; + printfPQExpBuffer(&buf, "show uppercase_attribute_name;"); + res = PSQLexec(buf.data, false); + if (NULL == res) { + goto error_return; + } + + uppercaseIsOn = strcmp(PQgetvalue(res, 0, 0), "on") == 0; + + PQclear(res); + res = NULL; + + if (unlikely(uppercaseIsOn)) { + printfPQExpBuffer(&buf, "set uppercase_attribute_name=off;"); + res = PSQLexec(buf.data, false); + if (NULL == res) { + goto error_return; + } + + PQclear(res); + res = NULL; + } +#endif + /* Get general table info */ if (pset.sversion >= 90100) { printfPQExpBuffer(&buf, @@ -2915,6 +2945,16 @@ static bool describeOneTableDetails(const char* schemaname, const char* relation error_return: +#ifndef ENABLE_MULTIPLE_NODES + /* + * If uppercase_attribute_name was originally on, restore it. + */ + if (unlikely(uppercaseIsOn)) { + printfPQExpBuffer(&buf, "set uppercase_attribute_name=on;"); + res = PSQLexec(buf.data, false); + } +#endif + /* clean up */ if (printTableInitialized) { printTableCleanup(&cont); @@ -3424,8 +3464,8 @@ bool listTables(const char* tabtypes, const char* pattern, bool verbose, bool sh */ appendPQExpBuffer(&buf, " AND n.nspname !~ '^pg_toast'\n"); - appendPQExpBuffer(&buf, " AND c.relname not like 'matviewmap_%%'\n"); - appendPQExpBuffer(&buf, " AND c.relname not like 'mlog_%%'\n"); + appendPQExpBuffer(&buf, " AND c.relname not like 'matviewmap\\_%%'\n"); + appendPQExpBuffer(&buf, " AND c.relname not like 'mlog\\_%%'\n"); (void)processSQLNamePattern( pset.db, &buf, pattern, true, false, "n.nspname", "c.relname", NULL, "pg_catalog.pg_table_is_visible(c.oid)"); diff --git a/src/bin/psql/help.cpp b/src/bin/psql/help.cpp index 619a67c65..375d4cde0 100644 --- a/src/bin/psql/help.cpp +++ b/src/bin/psql/help.cpp @@ -277,6 +277,7 @@ void slashUsage(unsigned short int pager) fprintf(output, _(" \\di[S+] [PATTERN] list indexes\n")); fprintf(output, _(" \\dl list large objects, same as \\lo_list\n")); fprintf(output, _(" \\dL[S+] [PATTERN] list procedural languages\n")); + fprintf(output, _(" \\dm[S+] [PATTERN] list materialized views\n")); fprintf(output, _(" \\dn[S+] [PATTERN] list schemas\n")); fprintf(output, _(" \\do[S] [PATTERN] list operators\n")); fprintf(output, _(" \\dO[S+] [PATTERN] list collations\n")); @@ -304,8 +305,8 @@ void slashUsage(unsigned short int pager) fprintf(output, _(" \\pset NAME [VALUE] set table output option\n" " (NAME := {format|border|expanded|fieldsep|fieldsep_zero|footer|null|\n" - " numericlocale|recordsep|recordsep_zero|tuples_only|title|tableattr|pager" - "|sqlerror_handle})\n")); + " numericlocale|recordsep|recordsep_zero|tuples_only|title|tableattr|pager|\n" + " feedback})\n")); fprintf(output, _(" \\t [on|off] show only rows (currently %s)\n"), ON(pset.popt.topt.tuples_only)); fprintf(output, _(" \\T [STRING] set HTML tag attributes, or unset if none\n")); fprintf(output, diff --git a/src/bin/psql/input.cpp b/src/bin/psql/input.cpp index 5e4f5eded..8ed2f1d56 100644 --- a/src/bin/psql/input.cpp +++ b/src/bin/psql/input.cpp @@ -301,14 +301,9 @@ void setHistSize(const char* targetName, const char* targetValue, bool setToDefa void initializeInput(int flags) { #ifdef USE_READLINE - -#ifndef ENABLE_MULTIPLE_NODES flags &= useReadline; -#endif if (flags & 1) { - useReadline = true; - /* these two things must be done in this order: */ initialize_readline(); rl_variable_bind ("enable-meta-key", "off"); diff --git a/src/bin/psql/mainloop.cpp b/src/bin/psql/mainloop.cpp index 527ae9772..172167085 100644 --- a/src/bin/psql/mainloop.cpp +++ b/src/bin/psql/mainloop.cpp @@ -154,7 +154,9 @@ int MainLoop(FILE* source, char* querystring) if (pset.cur_cmd_interactive) { const char* val = GetVariable(pset.vars, "HISTSIZE"); - setHistSize("HISTSIZE", val, val == NULL); + if (val != NULL) { + setHistSize("HISTSIZE", val, false); + } } /* Create working state */ @@ -364,15 +366,10 @@ int MainLoop(FILE* source, char* querystring) } #endif success = SendQuery(query_buf->data); - bool is_handle_error = !success && (sqlErrHandle == SQLERROR_HANDLE_EXIT); - if (is_handle_error) { - exit(EXIT_FAILURE); - } // Query fail, if need retry, invoke QueryRetryController(). // - bool is_retry_on = !success && pset.retry_on; - if (is_retry_on) { + if (!success && pset.retry_on) { success = QueryRetryController(query_buf->data); } } else { diff --git a/src/bin/psql/print.cpp b/src/bin/psql/print.cpp index 44a5f3a6d..d17471e86 100644 --- a/src/bin/psql/print.cpp +++ b/src/bin/psql/print.cpp @@ -335,7 +335,7 @@ static void print_unaligned_text(const printTableContent* cont, FILE* fout) if (cont->opt->stop_table) { printTableFooter* footers = footers_with_default(cont); - if (!opt_tuples_only && footers != NULL && !cancel_pressed) { + if (!opt_tuples_only && footers != NULL && !cancel_pressed && cont->opt->feedback) { printTableFooter* f = NULL; for (f = footers; f != NULL; f = f->next) { @@ -408,7 +408,7 @@ static void print_unaligned_vertical(const printTableContent* cont, FILE* fout) if (cont->opt->stop_table) { /* print footers */ - if (!opt_tuples_only && cont->footers != NULL && !cancel_pressed) { + if (!opt_tuples_only && cont->footers != NULL && !cancel_pressed && cont->opt->feedback) { printTableFooter* f = NULL; print_separator(cont->opt->recordSep, fout); @@ -968,7 +968,7 @@ static void print_aligned_text(const printTableContent* cont, FILE* fout) _print_horizontal_line(col_count, width_wrap, opt_border, PRINT_RULE_BOTTOM, format, fout); /* print footers */ - if ((footers != NULL) && !opt_tuples_only && !cancel_pressed) { + if ((footers != NULL) && !opt_tuples_only && !cancel_pressed && cont->opt->feedback) { printTableFooter* f = NULL; for (f = footers; f != NULL; f = f->next) @@ -1229,7 +1229,7 @@ static void print_aligned_vertical(const printTableContent* cont, FILE* fout) print_aligned_vertical_line(cont, 0, hwidth, dwidth, PRINT_RULE_BOTTOM, fout); /* print footers */ - if (!opt_tuples_only && cont->footers != NULL && !cancel_pressed) { + if (!opt_tuples_only && cont->footers != NULL && !cancel_pressed && cont->opt->feedback) { printTableFooter* f = NULL; if (opt_border < 2) @@ -1362,7 +1362,7 @@ static void print_html_text(const printTableContent* cont, FILE* fout) fputs("
\n", fout); /* print footers */ - if (!opt_tuples_only && footers != NULL && !cancel_pressed) { + if (!opt_tuples_only && footers != NULL && !cancel_pressed && cont->opt->feedback) { printTableFooter* f = NULL; fputs("

", fout); @@ -1434,7 +1434,7 @@ static void print_html_vertical(const printTableContent* cont, FILE* fout) fputs("\n", fout); /* print footers */ - if (!opt_tuples_only && cont->footers != NULL && !cancel_pressed) { + if (!opt_tuples_only && cont->footers != NULL && !cancel_pressed && cont->opt->feedback) { printTableFooter* f = NULL; fputs("

", fout); @@ -1565,7 +1565,7 @@ static void print_latex_text(const printTableContent* cont, FILE* fout) fputs("\\end{tabular}\n\n\\noindent ", fout); /* print footers */ - if ((footers != NULL) && !opt_tuples_only && !cancel_pressed) { + if ((footers != NULL) && !opt_tuples_only && !cancel_pressed && cont->opt->feedback) { printTableFooter* f = NULL; for (f = footers; f != NULL; f = f->next) { @@ -1643,7 +1643,7 @@ static void print_latex_vertical(const printTableContent* cont, FILE* fout) fputs("\\end{tabular}\n\n\\noindent ", fout); /* print footers */ - if ((cont->footers != NULL) && !opt_tuples_only && !cancel_pressed) { + if ((cont->footers != NULL) && !opt_tuples_only && !cancel_pressed && cont->opt->feedback) { printTableFooter* f = NULL; for (f = cont->footers; f != NULL; f = f->next) { @@ -1742,7 +1742,7 @@ static void print_troff_ms_text(const printTableContent* cont, FILE* fout) fputs(".TE\n.DS L\n", fout); /* print footers */ - if ((footers != NULL) && !opt_tuples_only && !cancel_pressed) { + if ((footers != NULL) && !opt_tuples_only && !cancel_pressed && cont->opt->feedback) { printTableFooter* f = NULL; for (f = footers; f != NULL; f = f->next) { @@ -1837,7 +1837,7 @@ static void print_troff_ms_vertical(const printTableContent* cont, FILE* fout) fputs(".TE\n.DS L\n", fout); /* print footers */ - if ((cont->footers != NULL) && !opt_tuples_only && !cancel_pressed) { + if ((cont->footers != NULL) && !opt_tuples_only && !cancel_pressed && cont->opt->feedback) { printTableFooter* f = NULL; for (f = cont->footers; f != NULL; f = f->next) { diff --git a/src/bin/psql/print.h b/src/bin/psql/print.h index 89f301ded..74fc9c070 100644 --- a/src/bin/psql/print.h +++ b/src/bin/psql/print.h @@ -88,6 +88,7 @@ typedef struct printTableOpt { int encoding; /* character encoding */ int env_columns; /* $COLUMNS on psql start, 0 is unset */ int columns; /* target width for wrapped format */ + bool feedback; /* don't output row counts, etc. */ } printTableOpt; /* diff --git a/src/bin/psql/psqlscan.l b/src/bin/psql/psqlscan.l index 0fd34d31d..82af110e9 100644 --- a/src/bin/psql/psqlscan.l +++ b/src/bin/psql/psqlscan.l @@ -194,6 +194,7 @@ static void init_gram_state(PsqlScanState state); static void analyze_state(const char* text,PsqlScanState state); static int upperstrcmp(const char *str1,const char *str2); static GramIdentify keywordRead(const char* text); +static bool IsTranscationTokens(char* yytext, char* token); #define YY_DECL int yylex(PsqlScanState lex_param) @@ -637,13 +638,13 @@ other . if (lex_param->begin_state == BEGIN_UNDEFINED) { /* For a transaction statement, all possible tokens after BEGIN are here */ - if (strncasecmp(yytext, "transaction", strlen("transaction")) == 0 || - strncasecmp(yytext, "work", strlen("work")) == 0 || - strncasecmp(yytext, "isolation", strlen("isolation")) == 0 || - strncasecmp(yytext, "read", strlen("read")) == 0 || - strncasecmp(yytext, "deferrable", strlen("deferrable")) == 0 || - strncasecmp(yytext, "not", strlen("not")) == 0 || - strncasecmp(yytext, ";", strlen(";")) == 0) + if (IsTranscationTokens(yytext, "transaction") || + IsTranscationTokens(yytext, "work") || + IsTranscationTokens(yytext, "isolation") || + IsTranscationTokens(yytext, "read") || + IsTranscationTokens(yytext, "deferrable") || + IsTranscationTokens(yytext, "not") || + strncasecmp(yytext, ";", strlen(";")) == 0) lex_param->begin_state = BEGIN_TRANSACTION; else lex_param->begin_state = BEGIN_ANOYBLOCK; @@ -2041,6 +2042,23 @@ extract_substring(const char *txt, int len) return result; } +static bool IsTranscationTokens(char* yytext, char* token) +{ + int token_len = strlen(token); + if (strncasecmp(yytext, token, token_len) == 0) { + if (strlen(yytext) > token_len) { + if ((yytext[token_len] >= '0' && yytext[token_len] <= '9') || + (yytext[token_len] >= 'a' && yytext[token_len] <= 'z') || + (yytext[token_len] >= 'A' && yytext[token_len] <= 'Z') || + yytext[token_len] == '_') { + return false; + } + } + return true; + } + return false; +} + /* * escape_variable --- process :'VARIABLE' or :"VARIABLE" * @@ -2235,7 +2253,7 @@ analyze_state(const char* text,PsqlScanStateData* state) state->gram_state[IDEN_AS] = true; state->gram_state[IDEN_IS] = true; state->gram_state[IDEN_DETERMINISTIC] = true; - state->count_to_read = 7; + state->count_to_read = -1; break; case IDEN_DETERMINISTIC: /* when meet a DETERMINISTIC, we can read max identify number is 6 */ diff --git a/src/bin/psql/startup.cpp b/src/bin/psql/startup.cpp index 8482dd638..a444a3d70 100644 --- a/src/bin/psql/startup.cpp +++ b/src/bin/psql/startup.cpp @@ -37,6 +37,7 @@ #ifndef WIN32 #include "libpq/libpq-int.h" #endif +#include "nodes/pg_list.h" /* * Global psql options @@ -125,6 +126,150 @@ static void set_aes_key(const char* dencrypt_key); #define PARAMS_ARRAY_SIZE 11 #endif +#ifdef ENABLE_LITE_MODE +void pg_free(void* ptr) +{ + if (ptr != NULL) { + free(ptr); + ptr = NULL; + } +} + +static void list_free_private(List* list, bool deep) +{ + ListCell* cell = NULL; + + cell = list_head(list); + while (cell != NULL) { + ListCell* tmp = cell; + + cell = lnext(cell); + if (deep) { + pg_free(lfirst(tmp)); + } + pg_free(tmp); + } + + if (list != NULL) { + pg_free(list); + } +} + + +void list_free(List* list) +{ + list_free_private(list, false); +} + +static List* new_list(NodeTag type) +{ + List* new_list_val = NULL; + ListCell* new_head = NULL; + + new_head = (ListCell*)pg_malloc(sizeof(*new_head)); + new_head->next = NULL; + /* new_head->data is left undefined! */ + + new_list_val = (List*)pg_malloc(sizeof(*new_list_val)); + new_list_val->type = type; + new_list_val->length = 1; + new_list_val->head = new_head; + new_list_val->tail = new_head; + + return new_list_val; +} + +static void new_tail_cell(List* list) +{ + ListCell* new_tail = NULL; + + new_tail = (ListCell*)pg_malloc(sizeof(*new_tail)); + new_tail->next = NULL; + + list->tail->next = new_tail; + list->tail = new_tail; + list->length++; +} + +List* lappend(List* list, void* datum) +{ + if (list == NIL) + list = new_list(T_List); + else + new_tail_cell(list); + + lfirst(list->tail) = datum; + return list; +} + +static void new_head_cell(List* list) +{ + ListCell* new_head = NULL; + + new_head = (ListCell*)pg_malloc(sizeof(*new_head)); + new_head->next = list->head; + + list->head = new_head; + list->length++; +} + +List* lcons(void* datum, List* list) +{ + if (list == NIL) { + list = new_list(T_List); + } else { + new_head_cell(list); + } + + lfirst(list->head) = datum; + return list; +} + +List* list_delete_cell(List* list, ListCell* cell, ListCell* prev) +{ + Assert(prev != NULL ? lnext(prev) == cell : list_head(list) == cell); + + /* + * If we're about to delete the last node from the list, free the whole + * list instead and return NIL, which is the only valid representation of + * a zero-length list. + */ + if (list->length == 1) { + list_free(list); + return NIL; + } + + /* + * Otherwise, adjust the necessary list links, deallocate the particular + * node we have just removed, and return the list we were given. + */ + list->length--; + + if (prev != NULL) { + prev->next = cell->next; + } else { + list->head = cell->next; + } + + if (list->tail == cell) { + list->tail = prev; + } + + cell->next = NULL; + pg_free(cell); + return list; +} + +List* list_delete_first(List* list) +{ + if (list == NIL) { + return NIL; /* would an error be better? */ + } + + return list_delete_cell(list, list_head(list), NULL); +} +#endif + #ifndef ENABLE_MULTIPLE_NODES static bool IsPrimaryOfCentralizedCluster(PGconn *conn) { @@ -308,6 +453,7 @@ int main(int argc, char* argv[]) #endif /* We rely on unmentioned fields of pset.popt to start out 0/false/NULL */ pset.popt.topt.format = PRINT_ALIGNED; + pset.popt.topt.feedback = true; pset.popt.topt.border = 1; pset.popt.topt.pager = 1; pset.popt.topt.start_table = true; @@ -756,32 +902,32 @@ static void get_password_pipeline(struct adhoc_opts* options) } #ifndef ENABLE_MULTIPLE_NODES -static void TrimHost(char *src) +static char* TrimHost(char* src) { - char *begin = src; - char *end = src + strlen(src); - if (begin == end) { - return; + char* s = 0; + char* e = 0; + char* c = 0; + + for (c = src; (c != NULL) && (*c != '\0'); ++c) { + if (isspace(*c)) { + if (e == NULL) { + e = c; + } + } else { + if (s == NULL) { + s = c; + } + e = 0; + } } - while (isspace(*begin)) { - ++begin; + if (s == NULL) { + s = src; } - while (isspace(*end)) { - --end; + if (e != NULL) { + *e = 0; } - if (begin > end) { - *src = 0; - return; - } - while (begin != end) { - *src = *begin; - src++; - begin++; - } - *src = *end; - src++; - *src = '\0'; - return; + + return s; } static void ParseHostArg(const char *arg, struct adhoc_opts *options) @@ -796,8 +942,7 @@ static void ParseHostArg(const char *arg, struct adhoc_opts *options) char *inputStr = pg_strdup(arg); char *host = NULL; for (char *subStr = strtok(inputStr, sep); subStr != NULL; subStr = strtok(NULL, sep)) { - host = pg_strdup(subStr); - TrimHost(host); + host = pg_strdup(TrimHost(subStr)); if (strlen(host) == 0) { free(host); continue; @@ -869,6 +1014,7 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts* bool is_action_file = false; /* Database Security: Data importing/dumping support AES128. */ char* dencrypt_key = NULL; + char* needmask = NULL; errno_t rc = EOK; #ifdef USE_READLINE useReadline = false; @@ -908,7 +1054,9 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts* } break; case 'd': - options->dbname = optarg; + options->dbname = pg_strdup(optarg); + dbname_alloced = true; + needmask = optarg; break; case 'e': if (!SetVariable(pset.vars, "ECHO", "queries")) { @@ -957,6 +1105,7 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts* rc = memset_s(optarg, strlen(optarg), 0, strlen(optarg)); check_memset_s(rc); set_aes_key(dencrypt_key); + free(dencrypt_key); break; } case 'L': @@ -1115,27 +1264,9 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts* */ while (argc - optind >= 1) { if (options->dbname == NULL) { - char *temp = NULL; - options->dbname = argv[optind]; - if ((temp = strstr(argv[optind], "password")) != NULL) { - options->dbname = pg_strdup(argv[optind]); - rc = memset_s(temp + PASSWORD_STR_LEN, strlen(argv[optind]), - 0, strlen(temp + PASSWORD_STR_LEN)); - check_memset_s(rc); - dbname_alloced = true; - } - /* mask informations in URI string. */ - if (strncmp(options->dbname, "postgresql://", strlen("postgresql://")) == 0) { - options->dbname = pg_strdup(argv[optind]); - char *off_argv = argv[optind] + strlen("postgresql://"); - rc = memset_s(off_argv, strlen(off_argv), '*', strlen(off_argv)); - check_memset_s(rc); - } else if (strncmp(options->dbname, "postgres://", strlen("postgres://")) == 0) { - options->dbname = pg_strdup(argv[optind]); - char *off_argv = argv[optind] + strlen("postgres://"); - rc = memset_s(off_argv, strlen(off_argv), '*', strlen(off_argv)); - check_memset_s(rc); - } + options->dbname = pg_strdup(argv[optind]); + dbname_alloced = true; + needmask = argv[optind]; } else if (options->username == NULL) { options->username = argv[optind]; } else if (!pset.quiet) { @@ -1144,6 +1275,26 @@ static void parse_psql_options(int argc, char* const argv[], struct adhoc_opts* } optind++; } + + if (needmask != NULL) { + /* mask informations in URI string. */ + if (strncmp(options->dbname, "postgresql://", strlen("postgresql://")) == 0) { + char *off_argv = needmask + strlen("postgresql://"); + rc = memset_s(off_argv, strlen(off_argv), '*', strlen(off_argv)); + check_memset_s(rc); + } else if (strncmp(options->dbname, "postgres://", strlen("postgres://")) == 0) { + char *off_argv = needmask + strlen("postgres://"); + rc = memset_s(off_argv, strlen(off_argv), '*', strlen(off_argv)); + check_memset_s(rc); + } + /* mask password */ + char *temp = NULL; + if ((temp = strstr(needmask, "password")) != NULL) { + char *off_argv = temp + PASSWORD_STR_LEN; + rc = memset_s(off_argv, strlen(off_argv), '*', strlen(off_argv)); + check_memset_s(rc); + } + } } /* @@ -1556,4 +1707,4 @@ static void set_aes_key(const char* dencrypt_key) MAX_KEY_LEN); exit(EXIT_FAILURE); } -} \ No newline at end of file +} diff --git a/src/bin/scripts/CMakeLists.txt b/src/bin/scripts/CMakeLists.txt index 824da3068..ed96a3f0b 100755 --- a/src/bin/scripts/CMakeLists.txt +++ b/src/bin/scripts/CMakeLists.txt @@ -95,8 +95,10 @@ set(TGT_scripts_INC set(scripts_DEF_OPTIONS ${MACRO_OPTIONS}) set(scripts_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(scripts_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(scripts_LINK_LIBS libelog.a libpgport.a -lpq -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -pthread -lrt -lz -lminiunz -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss) - +set(scripts_LINK_LIBS libelog.a libpgport.a -lpq -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -pthread -lrt -lz -lminiunz) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND scripts_LINK_LIBS -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss) +endif() add_bintarget(createdb TGT_createdb_SRC TGT_scripts_INC "${scripts_DEF_OPTIONS}" "${scripts_COMPILE_OPTIONS}" "${scripts_LINK_OPTIONS}" "${scripts_LINK_LIBS}") add_dependencies(createdb elog_static pgport_static pq) target_link_directories(createdb PUBLIC diff --git a/src/bin/scripts/Makefile b/src/bin/scripts/Makefile index 220a526d4..34b37033d 100644 --- a/src/bin/scripts/Makefile +++ b/src/bin/scripts/Makefile @@ -21,7 +21,9 @@ PROGRAMS = createdb createlang createuser dropdb droplang dropuser clusterdb vac override CPPFLAGS := -I$(top_srcdir)/src/bin/pg_dump -I$(top_srcdir)/src/bin/psql -I$(libpq_srcdir) $(CPPFLAGS) CFLAGS += -Wl,-z,relro,-z,now -LIBS += -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss +ifeq ($(enable_lite_mode), no) + LIBS += -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss +endif ifneq "$(MAKECMDGOALS)" "clean" ifneq "$(MAKECMDGOALS)" "distclean" diff --git a/src/common/backend/Makefile b/src/common/backend/Makefile index 8409b1305..8089d8fda 100644 --- a/src/common/backend/Makefile +++ b/src/common/backend/Makefile @@ -8,10 +8,6 @@ subdir = src/common/backend top_builddir = ../../.. include $(top_builddir)/src/Makefile.global -ifeq ($(enable_multiple_nodes), yes) -SUBDIRS = catalog client_logic lib libpq nodes parser port regex snowball tsearch utils -else SUBDIRS = catalog client_logic lib libpq nodes parser port regex snowball tsearch utils pgxc_single -endif include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/common/backend/catalog/CMakeLists.txt b/src/common/backend/catalog/CMakeLists.txt index 61e6df6e0..3a6ae4992 100755 --- a/src/common/backend/catalog/CMakeLists.txt +++ b/src/common/backend/catalog/CMakeLists.txt @@ -14,8 +14,9 @@ set(POSTGRES_BKI_SRCS_S @gs_client_global_keys_args.h @pg_job.h @gs_asp.h @pg_job_proc.h @pg_extension_data_source.h @pg_statistic_ext.h @pg_object.h @pg_synonym.h @toasting.h @indexing.h @gs_obsscaninfo.h @pg_directory.h @pg_hashbucket.h @gs_global_chain.h @gs_global_config.h @pg_streaming_stream.h @pg_streaming_cont_query.h @pg_streaming_reaper_status.h @gs_matview.h @gs_matview_dependency.h @pgxc_slice.h -@gs_opt_model.h @pg_recyclebin.h @pg_snapshot.h @gs_model.h @gs_package.h @gs_job_argument.h @gs_job_attribute.h +@gs_opt_model.h @pg_recyclebin.h @pg_snapshot.h @gs_model.h @gs_package.h @gs_job_argument.h @gs_job_attribute.h @pg_uid.h @gs_db_privilege.h @pg_replication_origin.h @pg_publication.h @pg_publication_rel.h @pg_subscription.h" + ) string(REPLACE "@" "${PROJECT_SRC_DIR}/include/catalog/" POSTGRES_BKI_SRCS ${POSTGRES_BKI_SRCS_S}) diff --git a/src/common/backend/catalog/Makefile b/src/common/backend/catalog/Makefile index c1147e0ae..71eb2548d 100644 --- a/src/common/backend/catalog/Makefile +++ b/src/common/backend/catalog/Makefile @@ -23,8 +23,8 @@ OBJS = catalog.o dependency.o heap.o index.o indexing.o namespace.o aclchk.o \ pg_operator.o gs_package.o pg_proc.o pg_range.o pg_db_role_setting.o pg_shdepend.o pg_synonym.o\ pg_type.o pgxc_class.o storage.o storage_gtt.o toasting.o pg_job.o pg_partition.o\ pg_hashbucket.o cstore_ctlg.o dfsstore_ctlg.o pg_builtin_proc.o streaming_stream.o\ - gs_matview.o pgxc_slice.o pg_job_proc.o gs_job_argument.o gs_job_attribute.o\ - pg_publication.o pg_subscription.o + gs_matview.o pgxc_slice.o pg_job_proc.o gs_job_argument.o gs_job_attribute.o pg_uid.o gs_global_config.o\ + gs_db_privilege.o pg_publication.o pg_subscription.o BKIFILES = postgres.bki postgres.description postgres.shdescription @@ -59,9 +59,9 @@ POSTGRES_BKI_SRCS = $(addprefix $(top_srcdir)/src/include/catalog/,\ toasting.h indexing.h gs_obsscaninfo.h pg_directory.h pg_hashbucket.h gs_global_chain.h gs_global_config.h\ pg_streaming_stream.h pg_streaming_cont_query.h pg_streaming_reaper_status.h gs_matview.h\ gs_matview_dependency.h pgxc_slice.h gs_opt_model.h gs_model.h\ - pg_recyclebin.h pg_snapshot.h gs_job_argument.h gs_job_attribute.h\ + pg_recyclebin.h pg_snapshot.h gs_job_argument.h gs_job_attribute.h pg_uid.h gs_db_privilege.h\ pg_replication_origin.h pg_publication.h pg_publication_rel.h pg_subscription.h\ - ) + ) # location of Catalog.pm catalogdir = $(top_srcdir)/src/common/backend/catalog diff --git a/src/common/backend/catalog/aclchk.cpp b/src/common/backend/catalog/aclchk.cpp index 27bcbca6a..07d448e12 100644 --- a/src/common/backend/catalog/aclchk.cpp +++ b/src/common/backend/catalog/aclchk.cpp @@ -20,6 +20,7 @@ #include "access/genam.h" #include "access/heapam.h" +#include "access/reloptions.h" #include "access/sysattr.h" #include "access/transam.h" #include "access/xact.h" @@ -58,6 +59,7 @@ #include "catalog/pg_extension_data_source.h" #include "catalog/gs_global_chain.h" #include "catalog/gs_global_config.h" +#include "catalog/gs_db_privilege.h" #include "commands/dbcommands.h" #include "commands/proclang.h" #include "commands/sec_rls_cmds.h" @@ -184,6 +186,7 @@ const struct AclObjKind { {ACL_KIND_SEQUENCE, ACL_ALL_RIGHTS_SEQUENCE, ACL_ALL_DDL_RIGHTS_SEQUENCE}, {ACL_KIND_DATABASE, ACL_ALL_RIGHTS_DATABASE, ACL_ALL_DDL_RIGHTS_DATABASE}, {ACL_KIND_PROC, ACL_ALL_RIGHTS_FUNCTION, ACL_ALL_DDL_RIGHTS_FUNCTION}, + {ACL_KIND_PACKAGE, ACL_ALL_RIGHTS_PACKAGE, ACL_ALL_DDL_RIGHTS_PACKAGE}, {ACL_KIND_LANGUAGE, ACL_ALL_RIGHTS_LANGUAGE, ACL_ALL_DDL_RIGHTS_LANGUAGE}, {ACL_KIND_LARGEOBJECT, ACL_ALL_RIGHTS_LARGEOBJECT, ACL_ALL_DDL_RIGHTS_LARGEOBJECT}, {ACL_KIND_NAMESPACE, ACL_ALL_RIGHTS_NAMESPACE, ACL_ALL_DDL_RIGHTS_NAMESPACE}, @@ -247,7 +250,7 @@ static List* getRelationsInNamespace(Oid namespaceId, char relkind); static void expand_col_privileges( List* colnames, Oid table_oid, AclMode this_privileges, AclMode* col_privileges, int num_col_privileges); static void expand_all_col_privileges( - Oid table_oid, Form_pg_class classForm, AclMode this_privileges, AclMode* col_privileges, int num_col_privileges, bool relhasbucket); + Oid table_oid, Form_pg_class classForm, AclMode this_privileges, AclMode* col_privileges, int num_col_privileges, bool relhasbucket, bool relhasuids); static AclMode string_to_privilege(const char* privname); static const char* privilege_to_string(AclMode privilege); static void restrict_and_check_grant(AclMode* this_privileges, bool is_grant, @@ -1218,6 +1221,10 @@ void ExecAlterDefaultPrivilegesStmt(AlterDefaultPrivilegesStmt* stmt) all_privileges = ACL_ALL_RIGHTS_PACKAGE; all_ddl_privileges = ACL_ALL_DDL_RIGHTS_PACKAGE; errormsg = gettext_noop("invalid privilege type %s for package"); + ereport(ERROR, (errmodule(MOD_SEC), errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("alter default privileges is not support package yet."), errdetail("N/A"), + errcause("alter default privileges is not support package yet."), + erraction("N/A"))); break; case ACL_OBJECT_TYPE: all_privileges = ACL_ALL_RIGHTS_TYPE; @@ -1796,7 +1803,7 @@ static void expand_col_privileges( * FirstLowInvalidHeapAttributeNumber, up to relation's last attribute. */ static void expand_all_col_privileges( - Oid table_oid, Form_pg_class classForm, AclMode this_privileges, AclMode* col_privileges, int num_col_privileges, bool relhasbucket) + Oid table_oid, Form_pg_class classForm, AclMode this_privileges, AclMode* col_privileges, int num_col_privileges, bool relhasbucket, bool relhasuids) { AttrNumber curr_att; @@ -1807,7 +1814,9 @@ static void expand_all_col_privileges( if (curr_att == InvalidAttrNumber) continue; - + if (curr_att == UidAttributeNumber && !relhasuids) { + continue; + } /* Skip OID column if it doesn't exist */ if (curr_att == ObjectIdAttributeNumber && !classForm->relhasoids) continue; @@ -2157,6 +2166,7 @@ static void ExecGrant_Relation(InternalGrant* istmt) if (!istmt->is_grant && ((privileges & ACL_ALL_RIGHTS_COLUMN) != 0 || (REMOVE_DDL_FLAG(ddl_privileges) & ACL_ALL_DDL_RIGHTS_COLUMN) != 0)) { bool hasbucket = false; + bool relhasuids = false; bool isNull = false; Datum datum; @@ -2165,10 +2175,16 @@ static void ExecGrant_Relation(InternalGrant* istmt) Assert(OidIsValid(DatumGetObjectId(datum))); hasbucket = true; } + if (pg_class_tuple->relkind == RELKIND_RELATION) { + bytea* options = extractRelOptions(tuple, GetDefaultPgClassDesc(), InvalidOid); + relhasuids = StdRdOptionsHasUids(options, pg_class_tuple->relkind); + pfree_ext(options); + } + // get if relhasuids expand_all_col_privileges(relOid, pg_class_tuple, privileges & ACL_ALL_RIGHTS_COLUMN, - col_privileges, num_col_privileges, hasbucket); + col_privileges, num_col_privileges, hasbucket, relhasuids); expand_all_col_privileges(relOid, pg_class_tuple, ddl_privileges & ACL_ALL_DDL_RIGHTS_COLUMN, - col_ddl_privileges, num_col_privileges, hasbucket); + col_ddl_privileges, num_col_privileges, hasbucket, relhasuids); have_col_privileges = true; } @@ -3163,7 +3179,7 @@ static void ExecGrant_Package(InternalGrant* istmt) istmt->ddl_privileges, pkgId, grantorId, - ACL_KIND_PROC, + ACL_KIND_PACKAGE, NameStr(gs_package_tuple->pkgname), 0, NULL); @@ -3417,10 +3433,12 @@ static void ExecGrant_Namespace(InternalGrant* istmt) noldmembers = aclmembers(old_acl, &oldmembers); } - /* Determine ID to do the grant as, and available grant options */ - /* Need special treatment for special schema dbe_perf and schema snapshot*/ + /* + * Determine ID to do the grant as, and available grant options. + * Need special treatment for schema dbe_perf, snapshot and pg_catalog. + */ select_best_grantor(GetUserId(), istmt->privileges, istmt->ddl_privileges, old_acl, ownerId, - &grantorId, &avail_goptions, &avail_ddl_goptions, IsMonitorSpace(nspid)); + &grantorId, &avail_goptions, &avail_ddl_goptions, IsMonitorSpace(nspid), IsSystemNamespace(nspid)); /* * Restrict the privileges to what we can actually grant, and emit the @@ -4845,6 +4863,8 @@ static AclMode pg_aclmask( return pg_database_aclmask(table_oid, roleid, mask, how); case ACL_KIND_PROC: return pg_proc_aclmask(table_oid, roleid, mask, how); + case ACL_KIND_PACKAGE: + return pg_package_aclmask(table_oid, roleid, mask, how); case ACL_KIND_LANGUAGE: return pg_language_aclmask(table_oid, roleid, mask, how); case ACL_KIND_LARGEOBJECT: @@ -4961,7 +4981,7 @@ AclMode pg_attribute_aclmask(Oid table_oid, AttrNumber attnum, Oid roleid, AclMo acl = DatumGetAclP(aclDatum); if (IsMonitorSpace(namespaceId)) { - result = aclmask_dbe_perf(acl, roleid, ownerId, mask, how); + result = aclmask_without_sysadmin(acl, roleid, ownerId, mask, how); } else { result = aclmask(acl, roleid, ownerId, mask, how); } @@ -4974,6 +4994,72 @@ AclMode pg_attribute_aclmask(Oid table_oid, AttrNumber attnum, Oid roleid, AclMo return result; } +static AclMode check_dml_privilege(Form_pg_class classForm, AclMode mask, Oid roleid, AclMode result) +{ + if (is_role_independent(classForm->relowner)) { + return result; + } + switch (classForm->relkind) { + case RELKIND_INDEX: + case RELKIND_GLOBAL_INDEX: + case RELKIND_SEQUENCE: + case RELKIND_LARGE_SEQUENCE: + case RELKIND_COMPOSITE_TYPE: + break; + /* table */ + default: + if ((mask & ACL_SELECT) && !(result & ACL_SELECT)) { + if (HasSpecAnyPriv(roleid, SELECT_ANY_TABLE, false)) { + result |= ACL_SELECT; + } + } + + if ((mask & ACL_INSERT) && !(result & ACL_INSERT)) { + if (HasSpecAnyPriv(roleid, INSERT_ANY_TABLE, false)) { + result |= ACL_INSERT; + } + } + if ((mask & ACL_UPDATE) && !(result & ACL_UPDATE)) { + if (HasSpecAnyPriv(roleid, UPDATE_ANY_TABLE, false)) { + result |= ACL_UPDATE; + } + } + if ((mask & ACL_DELETE) && !(result & ACL_DELETE)) { + if (HasSpecAnyPriv(roleid, DELETE_ANY_TABLE, false)) { + result |= ACL_DELETE; + } + } + break; + } + return result; +} + +static AclMode check_ddl_privilege(char relkind, AclMode mask, Oid roleid, AclMode result) +{ + mask = REMOVE_DDL_FLAG(mask); + switch (relkind) { + case RELKIND_COMPOSITE_TYPE: + case RELKIND_SEQUENCE: + case RELKIND_LARGE_SEQUENCE: + case RELKIND_INDEX: + case RELKIND_GLOBAL_INDEX: + break; + /* table */ + default: + if ((mask & ACL_DROP) && !(result & ACL_DROP)) { + if (HasSpecAnyPriv(roleid, DROP_ANY_TABLE, false)) { + result |= ACL_DROP; + } + } + if ((mask & ACL_ALTER) && !(result & ACL_ALTER)) { + if (HasSpecAnyPriv(roleid, ALTER_ANY_TABLE, false)) { + result |= ACL_ALTER; + } + } + break; + } + return result; +} /* * Exported routine for examining a user's privileges for a table */ @@ -5132,7 +5218,7 @@ AclMode pg_class_aclmask(Oid table_oid, Oid roleid, AclMode mask, AclMaskHow how mask = ADD_DDL_FLAG(mask); } if (IsMonitorSpace(namespaceId)) { - result = aclmask_dbe_perf(acl, roleid, ownerId, mask, how); + result = aclmask_without_sysadmin(acl, roleid, ownerId, mask, how); } else { result = aclmask(acl, roleid, ownerId, mask, how); } @@ -5140,8 +5226,16 @@ AclMode pg_class_aclmask(Oid table_oid, Oid roleid, AclMode mask, AclMaskHow how /* if we have a detoasted copy, free it */ FREE_DETOASTED_ACL(acl, aclDatum); + if ((how == ACLMASK_ANY && result != 0) || IsSysSchema(namespaceId)) { + ReleaseSysCache(tuple); + return result; + } + if (is_ddl_privileges) { + result = check_ddl_privilege(classForm->relkind, mask, roleid, result); + } else { + result = check_dml_privilege(classForm, mask, roleid, result); + } ReleaseSysCache(tuple); - return result; } @@ -5321,11 +5415,14 @@ AclMode pg_proc_aclmask(Oid proc_oid, Oid roleid, AclMode mask, AclMaskHow how, } if (IsMonitorSpace(namespaceId)) { - result = aclmask_dbe_perf(acl, roleid, ownerId, mask, how); + result = aclmask_without_sysadmin(acl, roleid, ownerId, mask, how); } else { result = aclmask(acl, roleid, ownerId, mask, how); } - + if ((mask & ACL_EXECUTE) && !(result & ACL_EXECUTE) && !IsSysSchema(namespaceId) && + HasSpecAnyPriv(roleid, EXECUTE_ANY_FUNCTION, false)) { + result |= ACL_EXECUTE; + } /* if we have a detoasted copy, free it */ FREE_DETOASTED_ACL(acl, aclDatum); if (HeapTupleIsValid(pkgTuple)) { @@ -5377,6 +5474,11 @@ AclMode pg_package_aclmask(Oid packageOid, Oid roleid, AclMode mask, AclMaskHow acl = DatumGetAclP(aclDatum); } result = aclmask(acl, roleid, ownerId, mask, how); + Oid namespaceId = ((Form_gs_package) GETSTRUCT(tuple))->pkgnamespace; + if ((mask & ACL_EXECUTE) && !(result & ACL_EXECUTE) && !IsSysSchema(namespaceId) && + HasSpecAnyPriv(roleid, EXECUTE_ANY_PACKAGE, false)) { + result |= ACL_EXECUTE; + } /* if we have a detoasted copy, free it */ FREE_DETOASTED_ACL(acl, aclDatum); if (HeapTupleIsValid(pkgTuple)) { @@ -5614,7 +5716,6 @@ AclMode pg_largeobject_aclmask_snapshot(Oid lobj_oid, Oid roleid, AclMode mask, /* * Exported routine for examining a user's privileges for blockchain namespace */ - static AclMode gs_blockchain_aclmask(Oid roleid, AclMode mask) { /* Only super user or audit admin have access right to blockchain nsp */ @@ -5625,6 +5726,35 @@ static AclMode gs_blockchain_aclmask(Oid roleid, AclMode mask) } } +static AclMode check_usage_privilege(Oid nsp_oid, AclMode mask, Oid roleid, AclMode result) +{ + if (!(mask & ACL_USAGE) || (result & ACL_USAGE)) { + return result; + } + if (!IsSysSchema(nsp_oid) && HasOneOfAnyPriv(roleid)) { + result |= ACL_USAGE; + } + return result; +} + +/* + * The initial user and operator admin in operation mode + * can bypass permission check for schema pg_catalog. +*/ +static bool is_pg_catalog_bypass_user(Oid roleid) +{ + return roleid == INITIAL_USER_ID || (isOperatoradmin(roleid) && u_sess->attr.attr_security.operation_mode); +} + +/* + * Sysadmin and operator admin in operation mode + * can bypass permission check for all schemas except for schema dbe_perf, snapshot and pg_catalog. +*/ +static bool is_namespace_bypass_user(Oid roleid) +{ + return superuser_arg(roleid) || (isOperatoradmin(roleid) && u_sess->attr.attr_security.operation_mode); +} + /* * Exported routine for examining a user's privileges for a namespace */ @@ -5644,17 +5774,19 @@ AclMode pg_namespace_aclmask(Oid nsp_oid, Oid roleid, AclMode mask, AclMaskHow h /* * The initial user bypass all permission checking. - * Sysadmin bypass all permission checking except for schema dbe_perf and schema snapshot. + * Sysadmin bypass all permission checking except for schema dbe_perf, snapshot and pg_catalog. * Monitoradmin can always bypass permission checking for schema dbe_perf and schema snapshot. - */ + */ if (IsMonitorSpace(nsp_oid)) { if (isMonitoradmin(roleid) || roleid == INITIAL_USER_ID) { return REMOVE_DDL_FLAG(mask); } - } else { - if (superuser_arg(roleid) || (isOperatoradmin(roleid) && u_sess->attr.attr_security.operation_mode)) { + } else if (IsSystemNamespace(nsp_oid)) { + if (is_pg_catalog_bypass_user(roleid)) { return REMOVE_DDL_FLAG(mask); } + } else if (is_namespace_bypass_user(roleid)) { + return REMOVE_DDL_FLAG(mask); } /* @@ -5709,12 +5841,17 @@ AclMode pg_namespace_aclmask(Oid nsp_oid, Oid roleid, AclMode mask, AclMaskHow h check_nodegroup_privilege(roleid, ownerId, mask); } - if (IsMonitorSpace(nsp_oid)) { - result = aclmask_dbe_perf(acl, roleid, ownerId, mask, how); + if (IsMonitorSpace(nsp_oid) || IsSystemNamespace(nsp_oid)) { + result = aclmask_without_sysadmin(acl, roleid, ownerId, mask, how); } else { result = aclmask(acl, roleid, ownerId, mask, how); } - + /* + * Check if ACL_USAGE is being checked and, if so, and not set already as + * part of the result, then check if the user has at least one of ANY privlege, + * which allow usage access to all schemas except system schema. + */ + result = check_usage_privilege(nsp_oid, mask, roleid, result); /* if we have a detoasted copy, free it */ FREE_DETOASTED_ACL(acl, aclDatum); @@ -7048,12 +7185,14 @@ bool pg_publication_ownercheck(Oid pub_oid, Oid roleid) Oid ownerId; /* Superusers bypass all permission checking. */ - if (superuser_arg(roleid)) + if (superuser_arg(roleid)) { return true; + } tuple = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pub_oid)); - if (!HeapTupleIsValid(tuple)) + if (!HeapTupleIsValid(tuple)) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("publication with OID %u does not exist", pub_oid))); + } ownerId = ((Form_pg_publication)GETSTRUCT(tuple))->pubowner; diff --git a/src/common/backend/catalog/builtin_funcs.ini b/src/common/backend/catalog/builtin_funcs.ini index 53cd8e846..eb12a93e5 100755 --- a/src/common/backend/catalog/builtin_funcs.ini +++ b/src/common/backend/catalog/builtin_funcs.ini @@ -241,7 +241,7 @@ ), AddFuncGroup( "array_exists", 1, - AddBuiltinFunc(_0(6009), _1("array_exists"), _2(2), _3(true), _4(false), _5(array_exists), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 2277, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("array_exists"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(6009), _1("array_exists"), _2(2), _3(false), _4(false), _5(array_exists), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 2277, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("array_exists"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "array_extend", 1, @@ -268,6 +268,10 @@ "array_in", 1, AddBuiltinFunc(_0(750), _1("array_in"), _2(3), _3(true), _4(false), _5(array_in), _6(2277), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(3, 2275, 26, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("array_in"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("I/O"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "array_indexby_delete", 1, + AddBuiltinFunc(_0(7896), _1("array_indexby_delete"), _2(1), _3(true), _4(false), _5(array_indexby_delete), _6(2277), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 2277), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("array_indexby_delete"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "array_indexby_length", 1, AddBuiltinFunc(_0(7895), _1("array_indexby_length"), _2(2), _3(false), _4(false), _5(array_indexby_length), _6(23), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 2277, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("array_indexby_length"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("array index by length"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -278,7 +282,7 @@ ), AddFuncGroup( "array_integer_exists", 1, - AddBuiltinFunc(_0(7888), _1("array_integer_exists"), _2(2), _3(true), _4(false), _5(array_integer_exists), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 2277, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("array_integer_exists"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(7888), _1("array_integer_exists"), _2(2), _3(false), _4(false), _5(array_integer_exists), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 2277, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("array_integer_exists"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "array_integer_first", 1, @@ -354,11 +358,11 @@ ), AddFuncGroup( "array_remove", 1, - AddBuiltinFunc(_0(6555), _1("array_remove"), _2(2), _3(false), _4(false), _5(array_remove), _6(2277), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 2277, 2283), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("array_remove"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f')) + AddBuiltinFunc(_0(6555), _1("array_remove"), _2(2), _3(false), _4(false), _5(array_remove), _6(2277), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 2277, 2283), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("array_remove"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "array_replace", 1, - AddBuiltinFunc(_0(6556), _1("array_replace"), _2(3), _3(false), _4(false), _5(array_replace), _6(2277), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(3, 2277, 2283,2283), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("array_replace"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f')) + AddBuiltinFunc(_0(6556), _1("array_replace"), _2(3), _3(false), _4(false), _5(array_replace), _6(2277), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(3, 2277, 2283,2283), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("array_replace"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "array_send", 1, @@ -404,7 +408,7 @@ ), AddFuncGroup( "array_varchar_exists", 1, - AddBuiltinFunc(_0(7882), _1("array_varchar_exists"), _2(2), _3(true), _4(false), _5(array_varchar_exists), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 2277, 1043), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("array_varchar_exists"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(7882), _1("array_varchar_exists"), _2(2), _3(false), _4(false), _5(array_varchar_exists), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 2277, 1043), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("array_varchar_exists"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "array_varchar_first", 1, @@ -1885,7 +1889,7 @@ ), AddFuncGroup( "comm_check_connection_status", 1, - AddBuiltinFunc(_0(1982), _1("comm_check_connection_status"), _2(0), _3(true), _4(true), _5(comm_check_connection_status), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(5, 25, 25, 25, 23, 16), _22(5, 'o', 'o', 'o', 'o', 'o'), _23(5, "node_name", "remote_name", "remote_host", "remote_port", "is_connected"), _24(NULL), _25("comm_check_connection_status"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(1982), _1("comm_check_connection_status"), _2(0), _3(true), _4(true), _5(comm_check_connection_status), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(6, 25, 25, 25, 23, 16, 16), _22(6, 'o', 'o', 'o', 'o', 'o', 'o'), _23(6, "node_name", "remote_name", "remote_host", "remote_port", "is_connected", "no_error_occur"), _24(NULL), _25("comm_check_connection_status"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "complex_array_in", 1, @@ -2272,6 +2276,10 @@ "db4ai_predict_by_text", 1, AddBuiltinFunc(_0(DB4AI_PREDICT_BY_TEXT_OID), _1("db4ai_predict_by_text"), _2(2), _3(false), _4(false), _5(db4ai_predict_by_text), _6(TEXTOID), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(ANYOID), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, TEXTOID, ANYOID), _21(2, TEXTOID, ANYOID), _22(2, 'i', 'v'), _23(NULL), _24(NULL), _25("db4ai_predict_by_text"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "db4ai_predict_by_float8_array", 1, + AddBuiltinFunc(_0(DB4AI_PREDICT_BY_FLOAT8ARRAY_OID), _1("db4ai_predict_by_float8_array"), _2(2), _3(false), _4(false), _5(db4ai_predict_by_float8_array), _6(FLOAT8ARRAYOID), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(ANYOID), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, TEXTOID, ANYOID), _21(2, TEXTOID, ANYOID), _22(2, 'i', 'v'), _23(NULL), _24(NULL), _25("db4ai_predict_by_float8_array"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false)) + ), AddFuncGroup( "dcbrt", 1, AddBuiltinFunc(_0(231), _1("dcbrt"), _2(1), _3(true), _4(false), _5(dcbrt), _6(701), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 701), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("dcbrt"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -2558,17 +2566,17 @@ "fenced_udf_process", 1, AddBuiltinFunc(_0(4207), _1("fenced_udf_process"), _2(1), _3(true), _4(false), _5(fenced_udf_process), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("fenced_udf_process"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "finish", 1, + AddBuiltinFunc(_0(1518), _1("finish"), _2(0), _3(true), _4(true), _5(debug_client_finish), _6(2249), _7(PG_PLDEBUG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(4, 26, 25, 23, 25), _22(4, 'o', 'o', 'o', 'o'), _23(4, "funcoid", "funcname", "lineno", "query"), _24(NULL), _25("debug_client_finish"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "first", 1, - AddBuiltinFunc(_0(6560), _1("first"), _2(1), _3(false), _4(false), _5(aggregate_dummy), _6(2283), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(true), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 2283), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("aggregate_dummy"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f')) + AddBuiltinFunc(_0(6560), _1("first"), _2(1), _3(false), _4(false), _5(aggregate_dummy), _6(2283), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(true), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 2283), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("aggregate_dummy"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "first_transition", 1, - AddBuiltinFunc(_0(6558), _1("first_transition"), _2(2), _3(true), _4(false), _5(first_transition), _6(2283), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 2283, 2283), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("first_transition"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f')) - ), - AddFuncGroup( - "finish", 1, - AddBuiltinFunc(_0(1518), _1("finish"), _2(0), _3(true), _4(true), _5(debug_client_finish), _6(2249), _7(PG_PLDEBUG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(4, 26, 25, 23, 25), _22(4, 'o', 'o', 'o', 'o'), _23(4, "funcoid", "funcname", "lineno", "query"), _24(NULL), _25("debug_client_finish"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(6558), _1("first_transition"), _2(2), _3(true), _4(false), _5(first_transition), _6(2283), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 2283, 2283), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("first_transition"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "first_value", 1, @@ -3437,6 +3445,22 @@ "global_stat_get_hotkeys_info", 1, AddBuiltinFunc(_0(3903), _1("global_stat_get_hotkeys_info"), _2(0), _3(true), _4(true), _5(global_stat_get_hotkeys_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(6, 25, 25, 25, 25, 20, 20), _22(6, 'o', 'o', 'o', 'o', 'o', 'o'), _23(6, "database_name", "schema_name", "table_name", "key_value", "hash_value", "count"), _24(NULL), _25("global_stat_get_hotkeys_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "gs_gsc_clean", 1, + AddBuiltinFunc(_0(9120), _1("gs_gsc_clean"), _2(1), _3(false), _4(false), _5(gs_gsc_clean), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(1), _20(1, 20), _21(NULL), _22(NULL), _23(1, "database_id"), _24("({CONST :consttype 20 :consttypmod -1 :constcollid 0 :constlen 8 :constbyval true :constisnull true :ismaxvalue false :location 74 :constvalue <> :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("gs_gsc_clean"), _26(NULL), _27(NULL), _28(NULL), _29(1, 0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(1, 20), _41(NULL)) + ), + AddFuncGroup( + "gs_gsc_dbstat_info", 1, + AddBuiltinFunc(_0(9121), _1("gs_gsc_dbstat_info"), _2(1), _3(false), _4(true), _5(gs_gsc_dbstat_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(1), _20(1, 20), _21(24, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(24, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(24, "database_id", "database_id", "database_name", "tup_searches", "tup_hits", "tup_miss", "tup_count", "tup_dead", "tup_memory", "rel_searches", "rel_hits", "rel_miss", "rel_count", "rel_dead", "rel_memory", "part_searches", "part_hits", "part_miss", "part_count", "part_dead", "part_memory", "total_memory", "swapout_count", "refcount"), _24("({CONST :consttype 20 :consttypmod -1 :constcollid 0 :constlen 8 :constbyval true :constisnull true :ismaxvalue false :location 74 :constvalue <> :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("gs_gsc_dbstat_info"), _26(NULL), _27(NULL), _28(NULL), _29(1, 0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(24, 20, 20, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _41(NULL)) + ), + AddFuncGroup( + "gs_gsc_catalog_detail", 1, + AddBuiltinFunc(_0(9122), _1("gs_gsc_catalog_detail"), _2(2), _3(false), _4(true), _5(gs_gsc_catalog_detail), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(2), _20(2, 20, 20), _21(13, 20, 20, 20, 25, 20, 25, 20, 25, 25, 20, 20, 20, 20), _22(13, 'i', 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(13, "database_id", "rel_id", "database_id", "database_name", "rel_id", "rel_name", "cache_id", "self", "ctid", "infomask", "infomask2", "hash_value", "refcount"), _24("({CONST :consttype 20 :consttypmod -1 :constcollid 0 :constlen 8 :constbyval true :constisnull true :ismaxvalue false :location 74 :constvalue <> :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false} {CONST :consttype 20 :consttypmod -1 :constcollid 0 :constlen 8 :constbyval true :constisnull true :ismaxvalue false :location 102 :constvalue <> :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("gs_gsc_catalog_detail"), _26(NULL), _27(NULL), _28(NULL), _29(2, 0, 1), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(13, 20, 20, 20, 25, 20, 25, 20, 25, 25, 20, 20, 20, 20), _41(NULL)) + ), + AddFuncGroup( + "gs_gsc_table_detail", 1, + AddBuiltinFunc(_0(9123), _1("gs_gsc_table_detail"), _2(2), _3(false), _4(true), _5(gs_gsc_table_detail), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(2), _20(2, 20, 20), _21(23, 20, 20, 26, 25, 26, 25, 26, 26, 26, 26, 26, 26, 26, 16, 16, 18, 21, 16, 16, 18, 16, 25, 25), _22(23, 'i', 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(23, "database_oid", "rel_oid", "database_oid", "database_name", "reloid", "relname", "relnamespace", "reltype", "reloftype", "relowner", "relam", "relfilenode", "reltablespace", "relhasindex", "relisshared", "relkind", "relnatts", "relhasoids", "relhaspkey", "parttype", "tdhasuids", "attnames", "extinfo"), _24("({CONST :consttype 20 :consttypmod -1 :constcollid 0 :constlen 8 :constbyval true :constisnull true :ismaxvalue false :location 74 :constvalue <> :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false} {CONST :consttype 20 :consttypmod -1 :constcollid 0 :constlen 8 :constbyval true :constisnull true :ismaxvalue false :location 102 :constvalue <> :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("gs_gsc_table_detail"), _26(NULL), _27(NULL), _28(NULL), _29(2, 0, 1), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(23, 20, 20, 26, 25, 26, 25, 26, 26, 26, 26, 26, 26, 26, 16, 16, 18, 21, 16, 16, 18, 16, 25, 25), _41(NULL)) + ), AddFuncGroup( "gs_all_control_group_info", 1, AddBuiltinFunc(_0(4502), _1("gs_all_control_group_info"), _2(0), _3(true), _4(true), _5(gs_all_control_group_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(10, 25, 25, 20, 20, 25, 25, 20, 20, 20, 25), _22(10, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(10, "name", "type", "gid", "classgid", "class", "workload", "shares", "limits", "wdlevel", "cpucores"), _24(NULL), _25("gs_all_control_group_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -3489,6 +3513,10 @@ "gs_fault_inject", 1, AddBuiltinFunc(_0(4000), _1("gs_fault_inject"), _2(6), _3(true), _4(false), _5(gs_fault_inject), _6(20), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(6, 20, 25, 25, 25, 25, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("gs_fault_inject"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "gs_get_active_archiving_standby", 1, + AddBuiltinFunc(_0(4579), _1("gs_get_active_archiving_standby"), _2(0), _3(true), _4(true), _5(gs_get_active_archiving_standby), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(3, 25, 25, 23), _22(3, 'o', 'o', 'o'), _23(3, "standby_name", "archive_location", "archived_file_num"), _24(NULL), _25("gs_get_active_archiving_standby"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "gs_get_global_barrier_status", 1, AddBuiltinFunc(_0(9032), _1("gs_get_global_barrier_status"), _2(0), _3(true), _4(false), _5(gs_get_global_barrier_status), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(2, 25, 25), _22(2, 'o', 'o'), _23(2, "global_barrier_id", "global_achive_barrier_id"), _24(NULL), _25("gs_get_global_barrier_status"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -3517,6 +3545,14 @@ "gs_get_obs_file_context", 1, AddBuiltinFunc(_0(5128), _1("gs_get_obs_file_context"), _2(2), _3(true), _4(false), _5(gs_get_obs_file_context), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, 2275, 2275), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("gs_get_obs_file_context"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL)) ), + AddFuncGroup( + "gs_get_parallel_decode_status", 1, + AddBuiltinFunc(_0(9377), _1("gs_get_parallel_decode_status"), _2(0), _3(false), _4(true), _5(gs_get_parallel_decode_status), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(4, 25, 23, 25, 25), _22(4, 'o', 'o', 'o', 'o'), _23(4, "slot_name", "parallel_decode_num", "read_change_queue_length", "decode_change_queue_length"), _24(NULL), _25("gs_get_parallel_decode_status"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_get_standby_cluster_barrier_status", 1, + AddBuiltinFunc(_0(9039), _1("gs_get_standby_cluster_barrier_status"), _2(0), _3(true), _4(false), _5(gs_get_standby_cluster_barrier_status), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(4, 25, 25, 25, 25), _22(4, 'o', 'o', 'o', 'o'), _23(4, "barrier_id", "barrier_lsn", "recovery_id", "target_id"), _24(NULL), _25("gs_get_standby_cluster_barrier_status"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "gs_hadr_do_switchover", 1, AddBuiltinFunc(_0(9136), _1("gs_hadr_do_switchover"), _2(0), _3(true), _4(false), _5(gs_hadr_do_switchover), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(1, 16), _22(1, 'o'), _23(1, "service_truncation_result"), _24(NULL), _25("gs_hadr_do_switchover"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -3531,15 +3567,23 @@ ), AddFuncGroup( "gs_hadr_local_rto_and_rpo_stat", 1, - AddBuiltinFunc(_0(5077), _1("gs_hadr_local_rto_and_rpo_stat"), _2(0), _3(false), _4(true), _5(gs_hadr_local_rto_and_rpo_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(11,25,25,25,23,25,23,20,20,20,20,20), _22(11,'o','o','o','o','o','o','o','o','o','o','o'), _23(11,"hadr_sender_node_name", "hadr_receiver_node_name", "source_ip", "source_port", "dest_ip", "dest_port", "current_rto", "target_rto", "current_rpo", "target_rpo", "current_sleep_time"), _24(NULL), _25("gs_hadr_local_rto_and_rpo_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(5077), _1("gs_hadr_local_rto_and_rpo_stat"), _2(0), _3(false), _4(true), _5(gs_hadr_local_rto_and_rpo_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(12,25,25,25,23,25,23,20,20,20,20,20,20), _22(12,'o','o','o','o','o','o','o','o','o','o','o','o'), _23(12,"hadr_sender_node_name", "hadr_receiver_node_name", "source_ip", "source_port", "dest_ip", "dest_port", "current_rto", "target_rto", "current_rpo", "target_rpo", "rto_sleep_time", "rpo_sleep_time"), _24(NULL), _25("gs_hadr_local_rto_and_rpo_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "gs_hadr_remote_rto_and_rpo_stat", 1, - AddBuiltinFunc(_0(5078), _1("gs_hadr_remote_rto_and_rpo_stat"), _2(0), _3(false), _4(true), _5(gs_hadr_remote_rto_and_rpo_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(11,25,25,25,23,25,23,20,20,20,20,20), _22(11,'o','o','o','o','o','o','o','o','o','o','o'), _23(11,"hadr_sender_node_name", "hadr_receiver_node_name", "source_ip", "source_port", "dest_ip", "dest_port", "current_rto", "target_rto", "current_rpo", "target_rpo", "current_sleep_time"), _24(NULL), _25("gs_hadr_remote_rto_and_rpo_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(5078), _1("gs_hadr_remote_rto_and_rpo_stat"), _2(0), _3(false), _4(true), _5(gs_hadr_remote_rto_and_rpo_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(12,25,25,25,23,25,23,20,20,20,20,20,20), _22(12,'o','o','o','o','o','o','o','o','o','o','o','o'), _23(12,"hadr_sender_node_name", "hadr_receiver_node_name", "source_ip", "source_port", "dest_ip", "dest_port", "current_rto", "target_rto", "current_rpo", "target_rpo", "rto_sleep_time", "rpo_sleep_time"), _24(NULL), _25("gs_hadr_remote_rto_and_rpo_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "gs_index_advise", 1, - AddBuiltinFunc(_0(4888), _1("gs_index_advise"), _2(1), _3(false), _4(true), _5(gs_index_advise), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 2275), _21(3, 25, 25, 25), _22(3, 'o', 'o', 'o'), _23(3, "schema", "table", "column"), _24(NULL), _25("gs_index_advise"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(4888), _1("gs_index_advise"), _2(1), _3(false), _4(true), _5(gs_index_advise), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 2275), _21(5, 2275, 25, 25, 25, 25), _22(5, 'i', 'o', 'o', 'o', 'o'), _23(5, "sql_string", "schema", "table", "column", "indextype"), _24(NULL), _25("gs_index_advise"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_index_verify", 1, + AddBuiltinFunc(_0(9150), _1("gs_index_verify"), _2(2), _3(false), _4(true), _5(gs_index_verify), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(2, 26, 26), _21(5, 26, 26, 25, 26, 25), _22(5, 'i', 'i', 'o' , 'o', 'o'), _23(5, "oid", "blkno", "ptype", "blkno", "status"), _24(NULL), _25("gs_index_verify"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("Verify ubtree index"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_index_recycle_queue", 1, + AddBuiltinFunc(_0(9151), _1("gs_index_recycle_queue"), _2(3), _3(false), _4(true), _5(gs_index_recycle_queue), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(3, 26, 26, 26), _21(9, 26, 26, 26, 26, 26, 25, 26, 26, 26), _22(9, 'i', 'i','i', 'o', 'o', 'o', 'o', 'o', 'o'), _23(9, "oid", "type", "blkno", "rblkno", "item_offset", "xid", "dblkno", "prev", "next"), _24(NULL), _25("gs_index_recycle_queue"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("Query recyle queue of ubtree"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "gs_io_wait_status", 1, @@ -3549,6 +3593,10 @@ "gs_is_recycle_object", 1, AddBuiltinFunc(_0(4895), _1("gs_is_recycle_object"), _2(3), _3(false), _4(false), _5(gs_is_recycle_object), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(3, 23, 23, 19), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("gs_is_recycle_object"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "gs_parse_page_bypath", 1, + AddBuiltinFunc(_0(2620), _1("gs_parse_page_bypath"), _2(4), _3(true), _4(false), _5(gs_parse_page_bypath), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(4, 25, 20, 25, 16), _21(5, 25, 20, 25, 16, 25), _22(5, 'i', 'i', 'i', 'i', 'o'), _23(5, "path", "blocknum", "relation_type", "read_memory", "output_filepath"), _24(NULL), _25("gs_parse_page_bypath"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("parse data page to output file based on given filepath"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "gs_password_deadline", 1, AddBuiltinFunc(_0(3469), _1("gs_password_deadline"), _2(0), _3(true), _4(false), _5(gs_password_deadline), _6(1186), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(0), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("gs_password_deadline"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -3557,15 +3605,30 @@ "gs_password_notifytime", 1, AddBuiltinFunc(_0(3470), _1("gs_password_notifytime"), _2(0), _3(true), _4(false), _5(gs_password_notifytime), _6(23), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(0), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("gs_password_notifytime"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), - AddFuncGroup( - "gs_read_block_from_remote", 2, - AddBuiltinFunc(_0(4767), _1("gs_read_block_from_remote"), _2(9), _3(true), _4(false), _5(gs_read_block_from_remote), _6(17), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(9, 23, 23, 23, 21, 23, 28, 23, 28, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("gs_read_block_from_remote"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false)), - AddBuiltinFunc(_0(4768), _1("gs_read_block_from_remote"), _2(10), _3(true), _4(false), _5(gs_read_block_from_remote_compress), _6(17), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(10, 23, 23, 23, 21, 21, 23, 28, 23, 28, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("gs_read_block_from_remote_compress"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false)) - ), AddFuncGroup( "gs_paxos_stat_replication", 1, AddBuiltinFunc(_0(4650), _1("gs_paxos_stat_replication"), _2(0), _3(false), _4(true), _5(gs_paxos_stat_replication), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(10), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(16, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 23, 25), _22(16, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(16, "local_role", "peer_role", "local_dcf_role", "peer_dcf_role", "peer_state", "sender_write_location", "sender_commit_location", "sender_flush_location", "sender_replay_location", "receiver_write_location", "receiver_commit_location", "receiver_flush_location", "receiver_replay_location", "sync_percent", "dcf_run_mode", "channel"), _24(NULL), _25("gs_paxos_stat_replication"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "gs_pitr_archive_slot_force_advance", 1, + AddBuiltinFunc(_0(4580), _1("gs_pitr_archive_slot_force_advance"), _2(0), _3(false), _4(false), _5(gs_pitr_archive_slot_force_advance), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 2275), _21(2, 2275, 25), _22(2, 'i', 'o'), _23(2, "stop_barrier_timestamp", "archive_restart_lsn"), _24(NULL), _25("gs_pitr_archive_slot_force_advance"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_pitr_clean_history_global_barriers", 1, + AddBuiltinFunc(_0(4581), _1("gs_pitr_clean_history_global_barriers"), _2(0), _3(false), _4(false), _5(gs_pitr_clean_history_global_barriers), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 2275), _21(2, 2275, 25), _22(2, 'i', 'o'), _23(2, "stop_barrier_timestamp", "oldest_barrier_record"), _24(NULL), _25("gs_pitr_clean_history_global_barriers"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_pitr_get_warning_for_xlog_force_recycle", 1, + AddBuiltinFunc(_0(4582), _1("gs_pitr_get_warning_for_xlog_force_recycle"), _2(0), _3(false), _4(false), _5(gs_pitr_get_warning_for_xlog_force_recycle), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(1, 16), _22(1, 'o'), _23(1, "xlog_force_recycled"), _24(NULL), _25("gs_pitr_get_warning_for_xlog_force_recycle"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_query_standby_cluster_barrier_id_exist", 1, + AddBuiltinFunc(_0(9038), _1("gs_query_standby_cluster_barrier_id_exist"), _2(1), _3(true), _4(false), _5(gs_query_standby_cluster_barrier_id_exist), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("gs_query_standby_cluster_barrier_id_exist"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_read_block_from_remote", 1, + AddBuiltinFunc(_0(4767), _1("gs_read_block_from_remote"), _2(10), _3(true), _4(false), _5(gs_read_block_from_remote), _6(17), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(10, 26, 26, 26, 21, 23, 28, 23, 28, 16, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("gs_read_block_from_remote"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "gs_respool_exception_info", 1, AddBuiltinFunc(_0(4501), _1("gs_respool_exception_info"), _2(1), _3(true), _4(true), _5(gs_respool_exception_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, 2275), _21(6, 25, 25, 25, 25, 25, 20), _22(6, 'o', 'o', 'o', 'o', 'o', 'o'), _23(6, "name", "class", "workload", "rule", "type", "value"), _24(NULL), _25("gs_respool_exception_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -3598,6 +3661,10 @@ "gs_set_obs_file_context",1, AddBuiltinFunc(_0(5129), _1("gs_set_obs_file_context"), _2(3), _3(true), _4(false), _5(gs_set_obs_file_context), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(3, 2275, 2275, 2275), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("gs_set_obs_file_context"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL)) ), + AddFuncGroup( + "gs_set_standby_cluster_target_barrier_id", 1, + AddBuiltinFunc(_0(9037), _1("gs_set_standby_cluster_target_barrier_id"), _2(1), _3(true), _4(false), _5(gs_set_standby_cluster_target_barrier_id), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("gs_set_standby_cluster_target_barrier_id"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "gs_space_shrink", 1, AddBuiltinFunc(_0(7002), _1("gs_space_shrink"), _2(4), _3(true), _4(false), _5(gs_space_shrink), _6(20), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(10000), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(4, INT4OID, INT4OID, INT4OID, INT4OID), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("gs_space_shrink"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -3610,6 +3677,14 @@ "global_space_shrink", 1, AddBuiltinFunc(_0(7008), _1("global_space_shrink"), _2(4), _3(true), _4(false), _5(global_space_shrink), _6(20), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(10000), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, TEXTOID, TEXTOID), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("global_space_shrink"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "gs_get_session_memctx_detail", 1, + AddBuiltinFunc(_0(5254), _1("gs_get_session_memctx_detail"), _2(1), _3(false), _4(true), _5(gs_get_session_memctx_detail), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 25), _21(4, 25, 25, 20, 20), _22(4, 'i', 'o', 'o', 'o'), _23(4, "context_name", "file", "line", "size"), _24(NULL), _25("gs_get_session_memctx_detail"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_get_shared_memctx_detail", 1, + AddBuiltinFunc(_0(5255), _1("gs_get_shared_memctx_detail"), _2(1), _3(false), _4(true), _5(gs_get_shared_memctx_detail), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 25), _21(4, 25, 25, 20, 20), _22(4, 'i', 'o', 'o', 'o'), _23(4, "context_name", "file", "line", "size"), _24(NULL), _25("gs_get_shared_memctx_detail"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "gs_stat_activity_timeout", 1, AddBuiltinFunc(_0(4520), _1("gs_stat_activity_timeout"), _2(1), _3(false), _4(true), _5(gs_stat_activity_timeout), _6(2249), _7(PG_DBEPERF_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 23), _21(10, 23, 19, 20, 20, 26, 25, 25, 1184, 1184, 20), _22(10, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(10, "timeout_threshold", "database", "pid", "sessionid", "usesysid", "application_name", "query", "xact_start", "query_start", "query_id"), _24(NULL), _25("gs_stat_activity_timeout"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -3646,6 +3721,10 @@ "gs_switch_relfilenode", 1, AddBuiltinFunc(_0(4049), _1("gs_switch_relfilenode"), _2(2), _3(true), _4(false), _5(pg_switch_relfilenode_name), _6(20), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(3, 2205, 2205, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("pg_switch_relfilenode_name"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "gs_get_thread_memctx_detail", 1, + AddBuiltinFunc(_0(5256), _1("gs_get_thread_memctx_detail"), _2(2), _3(false), _4(true), _5(gs_get_thread_memctx_detail), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(2, 20, 25), _21(5, 20, 25, 25, 20, 20), _22(5, 'i', 'i', 'o', 'o', 'o'), _23(5, "threadid", "context_name", "file", "line", "size"), _24(NULL), _25("gs_get_thread_memctx_detail"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "gs_total_nodegroup_memory_detail", 1, AddBuiltinFunc(_0(2847), _1("gs_total_nodegroup_memory_detail"), _2(0), _3(true), _4(true), _5(gs_total_nodegroup_memory_detail), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(3, 25, 25, 23), _22(3, 'o', 'o', 'o'), _23(3, "ngname", "memorytype", "memorymbytes"), _24(NULL), _25("gs_total_nodegroup_memory_detail"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -3656,11 +3735,19 @@ ), AddFuncGroup( "gs_undo_meta", 1, - AddBuiltinFunc(_0(4430), _1("gs_undo_meta"), _2(3), _3(false), _4(true), _5(gs_undo_meta), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(3, 23, 23, 23), _21(7, 26, 26, 25, 25, 25, 25, 25), _22(7, 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(7, "zoneId", "persistType", "insert", "discard", "end", "used", "lsn"), _24(NULL), _25("gs_undo_meta"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(4430), _1("gs_undo_meta"), _2(3), _3(false), _4(true), _5(gs_undo_meta), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(3, 23, 23, 23), _21(11, 23, 23, 23, 26, 26, 25, 25, 25, 25, 25, 26), _22(11, 'i', 'i', 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(11, "type", "zoneId", "location", "zoneId", "persistType", "insert", "discard", "end", "used", "lsn", "pid"), _24(NULL), _25("gs_undo_meta"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_stat_undo", 1, + AddBuiltinFunc(_0(4434), _1("gs_stat_undo"), _2(0), _3(false), _4(true), _5(gs_stat_undo), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(10, 23, 25, 23, 23, 26, 26, 26, 26, 23, 23), _22(10, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(10, "curr_used_zone_count", "top_used_zones", "curr_used_undo_size", "undo_threshold", "oldest_xid_in_undo", "oldest_xmin", "total_undo_chain_len", "max_undo_chain_len", "create_undo_file_count", "discard_undo_file_count"), _24(NULL), _25("gs_stat_undo"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "gs_undo_translot", 1, - AddBuiltinFunc(_0(4431), _1("gs_undo_translot"), _2(2), _3(false), _4(true), _5(gs_undo_translot), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(2, 23, 23), _21(5, 26, 25, 25, 25, 25), _22(5, 'o', 'o', 'o', 'o', 'o'), _23(5, "groupId", "xactId", "startUndoPtr", "endUndoPtr","lsn"), _24(NULL), _25("gs_undo_translot"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(4431), _1("gs_undo_translot"), _2(2), _3(false), _4(true), _5(gs_undo_translot), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(2, 23, 23), _21(8, 23, 23, 26, 25, 25, 25, 25, 26), _22(8, 'i', 'i', 'o', 'o', 'o', 'o', 'o', 'o'), _23(8, "location", "zoneId", "groupId", "xactId", "startUndoPtr", "endUndoPtr","lsn", "slot_states"), _24(NULL), _25("gs_undo_translot"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_undo_record", 1, + AddBuiltinFunc(_0(4439), _1("gs_undo_record"), _2(1), _3(false), _4(true), _5(gs_undo_record), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 23), _21(12, 23, 26, 26, 25, 25, 25, 25, 25, 25, 25, 25, 25), _22(12, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(12, "undoptr", "undoptr", "xid", "cid", "reloid", "relfilenode", "utype", "blkprev", "blockno", " uoffset", "prevurp", "payloadlen"), _24(NULL), _25("gs_undo_record"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false)) ), AddFuncGroup( "gs_upload_obs_file", 1, @@ -3726,6 +3813,34 @@ "gs_write_term_log", 1, AddBuiltinFunc(_0(9376), _1("gs_write_term_log"), _2(1), _3(true), _4(false), _5(gs_write_term_log), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(1, 16), _22(1, 'o'), _23(1, "setTermDone"), _24(NULL), _25("gs_write_term_log"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL)) ), + AddFuncGroup( + "gs_stat_wal_entrytable", 1, + AddBuiltinFunc(_0(2861), _1("gs_stat_wal_entrytable"), _2(1), _3(false), _4(true), _5(gs_stat_wal_entrytable), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 20), _21(5, 20, 28, 28, 23, 31), _22(5, 'i', 'o', 'o', 'o', 'o'), _23(5, "idx", "idx", "endlsn", "lrc", "status"), _24(NULL), _25("gs_stat_wal_entrytable"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_walwriter_flush_position", 1, + AddBuiltinFunc(_0(2862), _1("gs_walwriter_flush_position"), _2(1), _3(false), _4(true), _5(gs_walwriter_flush_position), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(12, 23, 23, 23, 28, 31, 28, 28, 28, 28, 28, 28, 1184), _22(12, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(12, "last_flush_status_entry", "last_scanned_lrc", "curr_lrc", "curr_byte_pos", "prev_byte_size", "flush_result", "send_result", "shm_rqst_write_pos", "shm_rqst_flush_pos", "shm_result_write_pos", "shm_result_flush_pos", "curr_time"), _24(NULL), _25("gs_walwriter_flush_position"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_walwriter_flush_stat", 1, + AddBuiltinFunc(_0(2863), _1("gs_walwriter_flush_stat"), _2(1), _3(false), _4(true), _5(gs_walwriter_flush_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 23), _21(17, 23, 28, 28, 28, 28, 31, 31, 31, 31, 28, 28, 31, 31, 28, 28, 1184, 1184), _22(17, 'i','o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(17, "operation", "write_times", "sync_times", "total_xlog_sync_bytes", "total_actual_xlog_sync_bytes", "avg_write_bytes", "avg_actual_write_bytes", "avg_sync_bytes", "avg_actual_sync_bytes", "total_write_time", "total_sync_time", "avg_write_time", "avg_sync_time", "curr_init_xlog_segno", "curr_open_xlog_segno", "last_reset_time", "curr_time"), _24(NULL), _25("gs_walwriter_flush_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_xlogdump_lsn", 1, + AddBuiltinFunc(_0(2619), _1("gs_xlogdump_lsn"), _2(2), _3(true), _4(false), _5(gs_xlogdump_lsn), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(2, 25, 25), _21(3, 25, 25, 25), _22(3, 'i', 'i', 'o'), _23(3, "start_lsn", "end_lsn", "output_filepath"), _24(NULL), _25("gs_xlogdump_lsn"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("dump xlog records to output file based on the given start_lsn and end_lsn"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_xlogdump_xid", 1, + AddBuiltinFunc(_0(2617), _1("gs_xlogdump_xid"), _2(1), _3(true), _4(false), _5(gs_xlogdump_xid), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 28), _21(2, 28, 25), _22(2, 'i', 'o'), _23(2, "c_xid", "output_filepath"), _24(NULL), _25("gs_xlogdump_xid"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("dump xlog records to output file based on the given xid"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_xlogdump_tablepath", 1, + AddBuiltinFunc(_0(2616), _1("gs_xlogdump_tablepath"), _2(3), _3(true), _4(false), _5(gs_xlogdump_tablepath), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(3, 25, 20, 25), _21(4, 25, 20, 25, 25), _22(4, 'i', 'i', 'i', 'o'), _23(4, "path", "blocknum", "relation_type", "output_filepath"), _24(NULL), _25("gs_xlogdump_tablepath"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("dump xlog records to output file based on given filepath"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_xlogdump_parsepage_tablepath", 1, + AddBuiltinFunc(_0(2618), _1("gs_xlogdump_parsepage_tablepath"), _2(4), _3(true), _4(false), _5(gs_xlogdump_parsepage_tablepath), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(4, 25, 20, 25, 16), _21(5, 25, 20, 25, 16, 25), _22(5, 'i', 'i', 'i', 'i', 'o'), _23(5, "path", "blocknum", "relation_type", "read_memory", "output_filepath"), _24(NULL), _25("gs_xlogdump_parsepage_tablepath"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("parse data page to output file based on given filepath"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "gtsquery_compress", 1, AddBuiltinFunc(_0(3695), _1("gtsquery_compress"), _2(1), _3(true), _4(false), _5(gtsquery_compress), _6(2281), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 2281), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("gtsquery_compress"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("GiST tsquery support"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -3799,6 +3914,10 @@ AddBuiltinFunc(_0(3028), _1("has_any_column_privilege"), _2(2), _3(true), _4(false), _5(has_any_column_privilege_name), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(10), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(2, 25, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("has_any_column_privilege_name"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("current user privilege on any column by rel name"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), AddBuiltinFunc(_0(3029), _1("has_any_column_privilege"), _2(2), _3(true), _4(false), _5(has_any_column_privilege_id), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(10), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(2, 26, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("has_any_column_privilege_id"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("current user privilege on any column by rel oid"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "has_any_privilege", 1, + AddBuiltinFunc(_0(5571), _1("has_any_privilege"), _2(2), _3(true), _4(false), _5(has_any_privilege), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(2, 19, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("has_any_privilege"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33("current user privilege on database level"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "has_cek_privilege", 6, AddBuiltinFunc(_0(9130), _1("has_cek_privilege"), _2(3), _3(true), _4(false), _5(has_cek_privilege_name_name), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(3, 19, 25, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("has_cek_privilege_name_name"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), @@ -6041,9 +6160,17 @@ "language_handler_out", 1, AddBuiltinFunc(_0(2303), _1("language_handler_out"), _2(1), _3(true), _4(false), _5(language_handler_out), _6(2275), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 2280), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("language_handler_out"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("I/O"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "large_seq_rollback_ntree", 1, + AddBuiltinFunc(_0(6016), _1("large_seq_rollback_ntree"), _2(1), _3(true), _4(false), _5(large_sequence_rollback_node_tree), _6(194), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 194), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("large_sequence_rollback_node_tree"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "large_seq_upgrade_ntree", 1, + AddBuiltinFunc(_0(6017), _1("large_seq_upgrade_ntree"), _2(1), _3(true), _4(false), _5(large_sequence_upgrade_node_tree), _6(194), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 194), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("large_sequence_upgrade_node_tree"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "last", 1, - AddBuiltinFunc(_0(6561), _1("last"), _2(1), _3(false), _4(false), _5(aggregate_dummy), _6(2283), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(true), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 2283), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("aggregate_dummy"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f')) + AddBuiltinFunc(_0(6561), _1("last"), _2(1), _3(false), _4(false), _5(aggregate_dummy), _6(2283), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(true), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 2283), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("aggregate_dummy"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "last_day", 1, @@ -6051,7 +6178,7 @@ ), AddFuncGroup( "last_transition", 1, - AddBuiltinFunc(_0(6559), _1("last_transition"), _2(2), _3(true), _4(false), _5(last_transition), _6(2283), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 2283, 2283), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("last_transition"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f')) + AddBuiltinFunc(_0(6559), _1("last_transition"), _2(2), _3(true), _4(false), _5(last_transition), _6(2283), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 2283, 2283), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("last_transition"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "last_value", 1, @@ -6283,7 +6410,7 @@ ), AddFuncGroup( "local_double_write_stat", 1, - AddBuiltinFunc(_0(4384), _1("local_double_write_stat"), _2(0), _3(false), _4(true), _5(local_double_write_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(11, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(11, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(11, "node_name", "curr_dwn", "curr_start_page", "file_trunc_num", "file_reset_num", "total_writes", "low_threshold_writes", "high_threshold_writes", "total_pages", "low_threshold_pages", "high_threshold_pages"), _24(NULL), _25("local_double_write_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(4384), _1("local_double_write_stat"), _2(0), _3(false), _4(true), _5(local_double_write_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(12, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(12, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(12, "node_name", "curr_dwn", "curr_start_page", "file_trunc_num", "file_reset_num", "total_writes", "low_threshold_writes", "high_threshold_writes", "total_pages", "low_threshold_pages", "high_threshold_pages", "file_id"), _24(NULL), _25("local_double_write_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "local_pagewriter_stat", 1, @@ -6297,6 +6424,10 @@ "local_redo_stat", 1, AddBuiltinFunc(_0(4388), _1("local_redo_stat"), _2(0), _3(false), _4(true), _5(local_redo_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(23, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 25), _22(23, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(23, "node_name", "redo_start_ptr", "redo_start_time", "redo_done_time", "curr_time", "min_recovery_point", "read_ptr", "last_replayed_read_ptr", "recovery_done_ptr", "read_xlog_io_counter", "read_xlog_io_total_dur", "read_data_io_counter", "read_data_io_total_dur", "write_data_io_counter", "write_data_io_total_dur", "process_pending_counter", "process_pending_total_dur", "apply_counter", "apply_total_dur", "speed", "local_max_ptr", "primary_flush_ptr", "worker_info"), _24(NULL), _25("local_redo_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "local_redo_time_count", 1, + AddBuiltinFunc(_0(4391), _1("local_redo_time_count"), _2(0), _3(false), _4(true), _5(local_redo_time_count), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(19, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(19, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(19, "thread_name", "step1_total", "step1_count", "step2_total", "step2_count", "step3_total", "step3_count", "step4_total", "step4_count", "step5_total", "step5_count", "step6_total", "step6_count", "step7_total", "step7_count", "step8_total", "step8_count", "step9_total", "step9_count"), _24(NULL), _25("local_redo_time_count"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "local_rto_stat", 1, AddBuiltinFunc(_0(3299), _1("local_rto_stat"), _2(0), _3(false), _4(true), _5(local_rto_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(2,25,25), _22(2, 'o', 'o'), _23(2, "node_name", "rto_info"), _24(NULL), _25("local_rto_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -6305,6 +6436,10 @@ "local_single_flush_dw_stat", 1, AddBuiltinFunc(_0(4375), _1("local_single_flush_dw_stat"), _2(0), _3(false), _4(true), _5(local_single_flush_dw_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(6, 25, 25, 25, 25, 25, 25), _22(6, 'o', 'o', 'o', 'o', 'o', 'o'), _23(6, "node_name", "curr_dwn", "curr_start_page", "total_writes", "file_trunc_num", "file_reset_num"), _24(NULL), _25("local_single_flush_dw_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "local_xlog_redo_statics", 1, + AddBuiltinFunc(_0(4390), _1("local_xlog_redo_statics"), _2(0), _3(false), _4(true), _5(local_xlog_redo_statics), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(5, 25, 23, 23, 20, 20), _22(5, 'o', 'o', 'o', 'o', 'o'), _23(5, "xlog_type", "rmid", "info", "num", "extra"), _24(NULL), _25("local_xlog_redo_statics"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "locktag_decode", 1, AddBuiltinFunc(_0(5730), _1("locktag_decode"), _2(1), _3(true), _4(false), _5(locktag_decode), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("locktag_decode"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -6799,7 +6934,7 @@ ), AddFuncGroup( "nlssort", 1, - AddBuiltinFunc(_0(1849), _1("nlssort"), _2(2), _3(false), _4(false), _5(nlssort), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 25, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("nlssort"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(1849), _1("nlssort"), _2(2), _3(false), _4(false), _5(nlssort), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(2, 25, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("nlssort"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(true), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "node_oid_name", 1, @@ -7569,7 +7704,7 @@ ), AddFuncGroup( "pg_create_physical_replication_slot_extern", 1, - AddBuiltinFunc(_0(3790), _1("pg_create_physical_replication_slot_extern"), _2(3), _3(false), _4(false), _5(pg_create_physical_replication_slot_extern), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(3, 19, 16, 25), _21(5, 19, 16, 25, 25, 25), _22(5, 'i', 'i', 'i', 'o', 'o'), _23(5, "slotname", "dummy_standby", "extra_content", "slotname", "xlog_position"), _24(NULL), _25("pg_create_physical_replication_slot_extern"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(3790), _1("pg_create_physical_replication_slot_extern"), _2(4), _3(false), _4(false), _5(pg_create_physical_replication_slot_extern), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(4, 19, 16, 25, 16), _21(6, 19, 16, 25, 16, 25, 25), _22(6, 'i', 'i', 'i', 'i', 'o', 'o'), _23(6, "slotname", "dummy_standby", "extra_content", "need_recycle_xlog", "slotname", "xlog_position"), _24(NULL), _25("pg_create_physical_replication_slot_extern"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "pg_create_restore_point", 1, @@ -7839,6 +7974,10 @@ "pg_logical_slot_get_changes", 1, AddBuiltinFunc(_0(4216), _1("pg_logical_slot_get_changes"), _2(4), _3(false), _4(true), _5(pg_logical_slot_get_changes), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1000), _11(1000), _12(25), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(1), _20(4, 19, 25, 23, 1009), _21(7, 19, 25, 23, 1009, 25, 28, 25), _22(7, 'i', 'i', 'i', 'v', 'o', 'o', 'o'), _23(7, "slotname", "upto_lsn", "upto_nchanges", "options", "location", "xid", "data"), _24("({CONST :consttype 1009 :consttypmod -1 :constcollid 100 :constlen -1 :constbyval false :constisnull false :ismaxvalue false :location 74230 :constvalue 16 [ 64 0 0 0 0 0 0 0 0 0 0 0 25 0 0 0 ] :cursor_data :row_count 0 :cur_dno 0 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("pg_logical_slot_get_changes"), _26(NULL), _27(NULL), _28(NULL), _29(1, 3), _30(false), _31(false), _32(false), _33("get changes from replication slot"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "pg_logical_get_area_changes", 1, + AddBuiltinFunc(_0(4978), _1("pg_logical_get_area_changes"), _2(6), _3(false), _4(true), _5(pg_logical_get_area_changes), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1000), _11(1000), _12(25), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(1), _20(6, 25, 25, 23, 19, 25, 1009), _21(9, 25, 25, 23, 19, 25, 1009, 25, 28, 25), _22(9, 'i', 'i', 'i', 'i', 'i', 'v', 'o', 'o', 'o'), _23(9, "start_lsn", "upto_lsn", "upto_nchanges", "plugin", "xlog_path", "options", "location", "xid", "data"), _24("({CONST :consttype 1009 :consttypmod -1 :constcollid 100 :constlen -1 :constbyval false :constisnull false :ismaxvalue false :location 74230 :constvalue 16 [ 64 0 0 0 0 0 0 0 0 0 0 0 25 0 0 0 ] :cursor_data :row_count 0 :cur_dno 0 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("pg_logical_get_area_changes"), _26(NULL), _27(NULL), _28(NULL), _29(1, 3), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL)) + ), AddFuncGroup( "pg_logical_slot_peek_binary_changes", 1, AddBuiltinFunc(_0(4219), _1("pg_logical_slot_peek_binary_changes"), _2(4), _3(false), _4(true), _5(pg_logical_slot_peek_binary_changes), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1000), _11(1000), _12(25), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(1), _20(4, 19, 25, 23, 1009), _21(7, 19, 25, 23, 1009, 25, 28, 17), _22(7, 'i', 'i', 'i', 'v', 'o', 'o', 'o'), _23(7, "slotname", "upto_lsn", "upto_nchanges", "options", "location", "xid", "data"), _24("({CONST :consttype 1009 :consttypmod -1 :constcollid 100 :constlen -1 :constbyval false :constisnull false :ismaxvalue false :location 75218 :constvalue 16 [ 64 0 0 0 0 0 0 0 0 0 0 0 25 0 0 0 ] :cursor_data :row_count 0 :cur_dno 0 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("pg_logical_slot_peek_binary_changes"), _26(NULL), _27(NULL), _28(NULL), _29(1, 3), _30(false), _31(false), _32(false), _33("peek at binary changes from replication slot"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -7943,10 +8082,6 @@ AddBuiltinFunc(_0(3827), _1("pg_read_binary_file"), _2(4), _3(true), _4(false), _5(pg_read_binary_file), _6(17), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(4, 25, 20, 20, 16), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("pg_read_binary_file"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("read bytea from a file"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), AddBuiltinFunc(_0(3828), _1("pg_read_binary_file"), _2(1), _3(true), _4(false), _5(pg_read_binary_file_all), _6(17), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("pg_read_binary_file_all"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("read bytea from a file"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), - AddFuncGroup( - "pg_read_binary_file_blocks", 1, - AddBuiltinFunc(_0(8413), _1("pg_read_binary_file_blocks"), _2(3), _3(true), _4(true), _5(pg_read_binary_file_blocks), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(100), _11(20), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(3, 25, 20, 20), _21(7, 25, 20, 20, 25, 23, 23, 17), _22(7, 'i', 'i', 'i', 'o', 'o', 'o', 'o'), _23(7, "input", "blocknum", "blockcount", "path", "blocknum", "len", "data"), _24(NULL), _25("pg_read_binary_file_blocks"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f')) - ), AddFuncGroup( "pg_read_file", 2, AddBuiltinFunc(_0(2624), _1("pg_read_file"), _2(3), _3(true), _4(false), _5(pg_read_file), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(3, 25, 20, 20), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("pg_read_file"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("read text from a file - old version for adminpack 1.0"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)), @@ -8093,7 +8228,7 @@ ), AddFuncGroup( "pg_stat_get_activity", 1, - AddBuiltinFunc(_0(2022), _1("pg_stat_get_activity"), _2(1), _3(false), _4(true), _5(pg_stat_get_activity), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 20), _21(20, 20, 26, 20, 20, 26, 25, 25, 25, 16, 1184, 1184, 1184, 1184, 869, 25, 23, 25, 20, 19, 25), _22(20, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(20, "pid", "datid", "pid", "sessionid", "usesysid", "application_name", "state", "query", "waiting", "xact_start", "query_start", "backend_start", "state_change", "client_addr", "client_hostname", "client_port", "enqueue", "query_id", "srespool", "global_sessionid"), _24(NULL), _25("pg_stat_get_activity"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("statistics: information about currently active backends"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(2022), _1("pg_stat_get_activity"), _2(1), _3(false), _4(true), _5(pg_stat_get_activity), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 20), _21(22, 20, 26, 20, 20, 26, 25, 25, 25, 16, 1184, 1184, 1184, 1184, 869, 25, 23, 25, 20, 19, 25, 20, 25), _22(22, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(22, "pid", "datid", "pid", "sessionid", "usesysid", "application_name", "state", "query", "waiting", "xact_start", "query_start", "backend_start", "state_change", "client_addr", "client_hostname", "client_port", "enqueue", "query_id", "srespool", "global_sessionid", "unique_sql_id", "trace_id"), _24(NULL), _25("pg_stat_get_activity"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("statistics: information about currently active backends"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "pg_stat_get_activity_for_temptable", 1, @@ -8105,7 +8240,7 @@ ), AddFuncGroup( "pg_stat_get_activity_with_conninfo", 1, - AddBuiltinFunc(_0(4212), _1("pg_stat_get_activity_with_conninfo"), _2(1), _3(false), _4(true), _5(pg_stat_get_activity_with_conninfo), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 20), _21(21, 20, 26, 20, 20, 26, 25, 25, 25, 16, 1184, 1184, 1184, 1184, 869, 25, 23, 25, 20, 25, 19, 25), _22(21, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(21, "pid", "datid", "pid", "sessionid", "usesysid", "application_name", "state", "query", "waiting", "xact_start", "query_start", "backend_start", "state_change", "client_addr", "client_hostname", "client_port", "enqueue", "query_id", "connection_info", "srespool", "global_sessionid"), _24(NULL), _25("pg_stat_get_activity_with_conninfo"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(4212), _1("pg_stat_get_activity_with_conninfo"), _2(1), _3(false), _4(true), _5(pg_stat_get_activity_with_conninfo), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 20), _21(23, 20, 26, 20, 20, 26, 25, 25, 25, 16, 1184, 1184, 1184, 1184, 869, 25, 23, 25, 20, 25, 19, 25, 20, 25), _22(23, 'i', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(23, "pid", "datid", "pid", "sessionid", "usesysid", "application_name", "state", "query", "waiting", "xact_start", "query_start", "backend_start", "state_change", "client_addr", "client_hostname", "client_port", "enqueue", "query_id", "connection_info", "srespool", "global_sessionid", "unique_sql_id", "trace_id"), _24(NULL), _25("pg_stat_get_activity_with_conninfo"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "pg_stat_get_analyze_count", 1, @@ -8640,10 +8775,6 @@ "pg_stat_segment_extent_usage", 1, AddBuiltinFunc(_0(7001), _1("pg_stat_segment_extent_usage"), _2(4), _3(true), _4(true), _5(pg_stat_segment_extent_usage), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1000), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(4, INT4OID, INT4OID, INT4OID, INT4OID), _21(5, OIDOID, OIDOID, TEXTOID, OIDOID, OIDOID), _22(5, 'o', 'o', 'o', 'o', 'o'), _23(5, "start_block", "extent_size", "usage_type", "ower_location", "special_data"), _24(NULL), _25("pg_stat_segment_extent_usage"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), - AddFuncGroup( - "pg_stat_segment_space_info", 1, - AddBuiltinFunc(_0(7000), _1("pg_stat_segment_space_info"), _2(2), _3(true), _4(true), _5(pg_stat_segment_space_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(100), _11(4), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, INT4OID, INT4OID), _21(8, TEXTOID, OIDOID, INT4OID, OIDOID, OIDOID, OIDOID, FLOAT4OID, OIDOID), _22(8, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(8, "node_name", "extent_size", "forknum", "total_blocks", "meta_data_blocks", "used_data_blocks", "utilization", "high_water_mark"), _24(NULL), _25("pg_stat_segment_space_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) - ), AddFuncGroup( "local_segment_space_info", 1, AddBuiltinFunc(_0(7005), _1("local_segment_space_info"), _2(2), _3(true), _4(true), _5(local_segment_space_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(100), _11(4), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(2, TEXTOID, TEXTOID), _21(8, TEXTOID, OIDOID, INT4OID, OIDOID, OIDOID, OIDOID, FLOAT4OID, OIDOID), _22(8, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(8, "node_name", "extent_size", "forknum", "total_blocks", "meta_data_blocks", "used_data_blocks", "utilization", "high_water_mark"), _24(NULL), _25("local_segment_space_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -8803,6 +8934,22 @@ "pg_xlogfile_name_offset", 1, AddBuiltinFunc(_0(2850), _1("pg_xlogfile_name_offset"), _2(1), _3(true), _4(false), _5(pg_xlogfile_name_offset), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 25), _21(3, 25, 25, 23), _22(3, 'i', 'o', 'o'), _23(3, "wal_location", "file_name", "file_offset"), _24(NULL), _25("pg_xlogfile_name_offset"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "pgxc_disaster_read_set", 1, + AddBuiltinFunc(_0(3268), _1("pgxc_disaster_read_set"), _2(1), _3(true), _4(false), _5(pgxc_disaster_read_set), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(1, 25), _21(2,25,16), _22(2, 'i', 'o'), _23(2, "","set_ok"), _24(NULL), _25("pgxc_disaster_read_set"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "pgxc_disaster_read_init", 1, + AddBuiltinFunc(_0(3269), _1("pgxc_disaster_read_init"), _2(0), _3(true), _4(false), _5(pgxc_disaster_read_init), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(1,16), _22(1, 'o'), _23(1, "init_ok"), _24(NULL), _25("pgxc_disaster_read_init"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "pgxc_disaster_read_clear", 1, + AddBuiltinFunc(_0(3271), _1("pgxc_disaster_read_clear"), _2(0), _3(true), _4(false), _5(pgxc_disaster_read_clear), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(1,16), _22(1, 'o'), _23(1, "clear_ok"), _24(NULL), _25("pgxc_disaster_read_clear"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "pgxc_disaster_read_status", 1, + AddBuiltinFunc(_0(3273), _1("pgxc_disaster_read_status"), _2(0), _3(true), _4(true), _5(pgxc_disaster_read_status), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(100), _11(100), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(0), _21(8, 26, 25, 25, 23, 25, 23, 20, 20), _22(8, 'o', 'o', 'o', 'o', 'o', 'o', 'o','o'), _23(8, "node_oid", "node_type", "host", "port", "host1", "port1", "xlogmaxcsn", "consistency_point_csn"), _24(NULL), _25("pgxc_disaster_read_status"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), AddFuncGroup( "pgxc_get_csn", 1, AddBuiltinFunc(_0(3960), _1("pgxc_get_csn"), _2(1), _3(true), _4(true), _5(pgxc_get_csn), _6(20), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(1, 28), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("pgxc_get_csn"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -9740,9 +9887,9 @@ "remote_ckpt_stat", 1, AddBuiltinFunc(_0(4372), _1("remote_ckpt_stat"), _2(0), _3(false), _4(true), _5(remote_ckpt_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(7, 25, 25, 20, 20, 20, 20, 20), _22(7, 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(7, "node_name", "ckpt_redo_point", "ckpt_clog_flush_num", "ckpt_csnlog_flush_num", "ckpt_multixact_flush_num", "ckpt_predicate_flush_num", "ckpt_twophase_flush_num"), _24(NULL), _25("remote_ckpt_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), - AddFuncGroup( + AddFuncGroup( "remote_double_write_stat", 1, - AddBuiltinFunc(_0(4385), _1("remote_double_write_stat"), _2(0), _3(false), _4(true), _5(remote_double_write_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(11, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(11, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(11, "node_name", "curr_dwn", "curr_start_page", "file_trunc_num", "file_reset_num", "total_writes", "low_threshold_writes", "high_threshold_writes", "total_pages", "low_threshold_pages", "high_threshold_pages"), _24(NULL), _25("remote_double_write_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + AddBuiltinFunc(_0(4385), _1("remote_double_write_stat"), _2(0), _3(false), _4(true), _5(remote_double_write_stat), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(12, 25, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), _22(12, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(12, "node_name", "curr_dwn", "curr_start_page", "file_trunc_num", "file_reset_num", "total_writes", "low_threshold_writes", "high_threshold_writes", "total_pages", "low_threshold_pages", "high_threshold_pages", "file_id"), _24(NULL), _25("remote_double_write_stat"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), AddFuncGroup( "remote_pagewriter_stat", 1, @@ -11844,6 +11991,12 @@ "version", 1, AddBuiltinFunc(_0(89), _1("version"), _2(0), _3(true), _4(false), _5(pgsql_version), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("pgsql_version"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + + AddFuncGroup( + "gs_explain_model", 1, + AddBuiltinFunc(_0(DB4AI_EXPLAIN_MODEL_OID), _1("gs_explain_model"), _2(1), _3(true), _4(false), _5(db4ai_explain_model), _6(25), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 25), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("db4ai_explain_model"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("explain machine learning model"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( "void_in", 1, AddBuiltinFunc(_0(2298), _1("void_in"), _2(1), _3(true), _4(false), _5(void_in), _6(2278), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(1, 2275), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("void_in"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33("I/O"), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) @@ -12011,3 +12164,39 @@ "zhprs_start", 1, AddBuiltinFunc(_0(3792), _1("zhprs_start"), _2(3), _3(true), _4(false), _5(zhprs_start), _6(2281), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('i'), _19(0), _20(3, 2281, 23, 26), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("zhprs_start"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) ), + AddFuncGroup( + "local_bad_block_info", 1, + AddBuiltinFunc(_0(4567), _1("local_bad_block_info"), _2(0), _3(false), _4(true), _5(local_bad_block_info), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(10, 25, 26, 26, 26, 23, 23, 23, 25, 1184, 1184), _22(10, 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o'), _23(10, "node_name", "spc_node", "db_node", "rel_node", "bucket_node", "fork_num", "block_num", "file_path", "check_time", "repair_time"), _24(NULL), _25("local_bad_block_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "local_clear_bad_block_info", 1, + AddBuiltinFunc(_0(4568), _1("local_clear_bad_block_info"), _2(0), _3(false), _4(true), _5(local_clear_bad_block_info), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(0), _21(1, 16), _22(1, 'o'), _23(1, "result"), _24(NULL), _25("local_clear_bad_block_info"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_verify_and_tryrepair_page", 1, + AddBuiltinFunc(_0(4569), _1("gs_verify_and_tryrepair_page"), _2(4), _3(true), _4(true), _5(gs_verify_and_tryrepair_page), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(4, 25, 23, 16, 16), _21(10, 25, 26, 16, 16, 25, 25, 26, 25, 25, 16), _22(10, 'i', 'i', 'i', 'i', 'o', 'o', 'o', 'o', 'o', 'o'), _23(10, "path", "blocknum", "verify_mem", "is_segment", "node_name", "path", "blocknum", "disk_page_res", "mem_page_res", "is_repair"), _24(NULL), _25("gs_verify_and_tryrepair_page"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_repair_page", 1, + AddBuiltinFunc(_0(4570), _1("gs_repair_page"), _2(4), _3(true), _4(true), _5(gs_repair_page), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(4, 25, 26, 16, 23), _21(5, 25, 26, 16, 23, 16), _22(5, 'i', 'i', 'i', 'i', 'o'), _23(5, "path", "blocknum", "is_segment", "timeout", "result"), _24(NULL), _25("gs_repair_page"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_verify_data_file", 1, + AddBuiltinFunc(_0(4571), _1("gs_verify_data_file"), _2(1), _3(true), _4(true), _5(gs_verify_data_file), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(1), _20(1, 16), _21(5, 16, 25, 26, 25, 25), _22(5, 'i', 'o', 'o', 'o', 'o'), _23(5, "verify_segment", "node_name", "rel_oid", "rel_name", "miss_file_path"), _24("({CONST :consttype 16 :consttypmod -1 :constcollid 0 :constlen 1 :constbyval true :constisnull false :ismaxvalue false :location 79 :constvalue 1 [ 0 0 0 0 0 0 0 0 ] :cursor_data :row_count 0 :cur_dno -1 :is_open false :found false :not_found false :null_open false :null_fetch false})"), _25("gs_verify_data_file"), _26(NULL), _27(NULL), _28(NULL), _29(1, 0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_read_file_from_remote", 1, + AddBuiltinFunc(_0(4768), _1("gs_read_file_from_remote"), _2(8), _3(true), _4(false), _5(gs_read_file_from_remote), _6(2249), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(8, 26, 26, 26, 23, 23, 23, 28, 23), _21(10, 26, 26, 26, 23, 23, 23, 28, 23, 17, 28), _22(10,'i','i','i','i','i','i','i','i','o','o'), _23(NULL), _24(NULL), _25("gs_read_file_from_remote"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_read_file_size_from_remote", 1, + AddBuiltinFunc(_0(4769), _1("gs_read_file_size_from_remote"), _2(7), _3(true), _4(false), _5(gs_read_file_size_from_remote), _6(20), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(7, 26, 26, 26, 23, 23, 28, 23), _21(8, 26, 26, 26, 23, 23, 28, 23, 20), _22(8,'i','i','i','i','i','i','i','o'), _23(NULL), _24(NULL), _25("gs_read_file_size_from_remote"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_read_segment_block_from_remote", 1, + AddBuiltinFunc(_0(4770), _1("gs_read_segment_block_from_remote"), _2(11), _3(true), _4(false), _5(gs_read_segment_block_from_remote), _6(17), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(0), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('v'), _19(0), _20(11, 26, 26, 26, 21, 23, 28, 23, 28, 26, 26, 23), _21(NULL), _22(NULL), _23(NULL), _24(NULL), _25("gs_read_segment_block_from_remote"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(NULL), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), + AddFuncGroup( + "gs_repair_file", 1, + AddBuiltinFunc(_0(4771), _1("gs_repair_file"), _2(3), _3(true), _4(true), _5(gs_repair_file), _6(16), _7(PG_CATALOG_NAMESPACE), _8(BOOTSTRAP_SUPERUSERID), _9(INTERNALlanguageId), _10(1), _11(1000), _12(0), _13(0), _14(false), _15(false), _16(false), _17(false), _18('s'), _19(0), _20(3, 26, 25, 23), _21(3, 26, 25, 23), _22(3, 'i', 'i', 'i'), _23(3, "tableoid", "path", "timeout"), _24(NULL), _25("gs_repair_file"), _26(NULL), _27(NULL), _28(NULL), _29(0), _30(false), _31(false), _32(false), _33(NULL), _34('f'), _35(NULL), _36(0), _37(false), _38(NULL), _39(NULL), _40(0)) + ), diff --git a/src/common/backend/catalog/catalog.cpp b/src/common/backend/catalog/catalog.cpp index 28524fa0f..d5c9099c0 100644 --- a/src/common/backend/catalog/catalog.cpp +++ b/src/common/backend/catalog/catalog.cpp @@ -534,7 +534,10 @@ RelFileNodeForkNum relpath_to_filenode(char* path) /* check tablespace version directory */ token = strtok_r(NULL, "/", &tmptoken); - Assert(token != NULL); + if (NULL == token) { + pfree(parsepath); + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid relation file path %s.", path))); + } char tblspcversiondir[MAXPGPATH]; int errorno = snprintf_s(tblspcversiondir, @@ -644,6 +647,14 @@ bool IsSystemClass(Form_pg_class reltuple) return IsSystemNamespace(relnamespace) || IsToastNamespace(relnamespace) || IsPackageSchemaOid(relnamespace); } +bool IsSysSchema(Oid namespaceId) +{ + if (namespaceId == PG_PUBLIC_NAMESPACE) { + return false; + } + return namespaceId < FirstNormalObjectId; +} + /* * IsCatalogRelation * True iff the relation is a system catalog, or the toast table for @@ -1018,7 +1029,7 @@ Oid GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence) /* This logic should match RelationInitPhysicalAddr */ rnode.node.spcNode = ConvertToRelfilenodeTblspcOid(reltablespace); - rnode.node.dbNode = (rnode.node.spcNode == GLOBALTABLESPACE_OID) ? InvalidOid : u_sess->proc_cxt.MyDatabaseId; + rnode.node.dbNode = (rnode.node.spcNode == GLOBALTABLESPACE_OID) ? InvalidOid : GetMyDatabaseId(); rnode.node.bucketNode = InvalidBktId; /* diff --git a/src/common/backend/catalog/cstore_ctlg.cpp b/src/common/backend/catalog/cstore_ctlg.cpp index ff4d1c548..4d9d97c2f 100644 --- a/src/common/backend/catalog/cstore_ctlg.cpp +++ b/src/common/backend/catalog/cstore_ctlg.cpp @@ -1,8 +1,8 @@ /* - * Portions Copyright (c) 2021, openGauss Contributors * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2021, openGauss Contributors * * openGauss is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/common/backend/catalog/dependency.cpp b/src/common/backend/catalog/dependency.cpp index 79446cac1..4a73e2de5 100644 --- a/src/common/backend/catalog/dependency.cpp +++ b/src/common/backend/catalog/dependency.cpp @@ -7,6 +7,7 @@ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 2010-2012 Postgres-XC Development Group + * Portions Copyright (c) 2021, openGauss Contributors * * IDENTIFICATION * src/common/backend/catalog/dependency.cpp @@ -20,6 +21,7 @@ #include "access/sysattr.h" #include "access/xact.h" #include "catalog/dependency.h" +#include "catalog/gs_db_privilege.h" #include "catalog/gs_encrypted_proc.h" #include "catalog/gs_matview.h" #include "catalog/gs_model.h" @@ -148,6 +150,7 @@ static const Oid object_classes[MAX_OCLASS] = { UserMappingRelationId, /* OCLASS_USER_MAPPING */ PgSynonymRelationId, /* OCLASS_SYNONYM */ DefaultAclRelationId, /* OCLASS_DEFACL */ + DbPrivilegeId, /* OCLASS_DB_PRIVILEGE */ ExtensionRelationId, /* OCLASS_EXTENSION */ PgDirectoryRelationId, /* OCLASS_DIRECTORY */ PgJobRelationId, /* OCLASS_PG_JOB */ @@ -1399,6 +1402,11 @@ static void doDeletion(const ObjectAddress* object, int flags) case OCLASS_DEFACL: RemoveDefaultACLById(object->objectId); break; + + case OCLASS_DB_PRIVILEGE: + DropDbPrivByOid(object->objectId); + break; + #ifdef PGXC case OCLASS_PGXC_CLASS: RemovePgxcClass(object->objectId, IS_PGXC_DATANODE); @@ -2436,6 +2444,9 @@ ObjectClass getObjectClass(const ObjectAddress* object) case DefaultAclRelationId: return OCLASS_DEFACL; + case DbPrivilegeId: + return OCLASS_DB_PRIVILEGE; + case ExtensionRelationId: return OCLASS_EXTENSION; case PgxcGroupRelationId: @@ -3043,6 +3054,29 @@ char* getObjectDescription(const ObjectAddress* object) break; } + case OCLASS_DB_PRIVILEGE: { + Relation dbPrivRel = heap_open(DbPrivilegeId, AccessShareLock); + if (!RelationIsValid(dbPrivRel)) { + ereport(ERROR, (errmodule(MOD_SEC), errcode(ERRCODE_SYSTEM_ERROR), + errmsg("Could not open the relation gs_db_privilege."), + errcause("System error."), erraction("Contact engineer to support."))); + } + + HeapTuple dbPrivTuple = SearchSysCache1(DBPRIVOID, ObjectIdGetDatum(object->objectId)); + if (!HeapTupleIsValid(dbPrivTuple)) { + heap_close(dbPrivRel, AccessShareLock); + break; + } + + bool isNull = false; + Datum datum = heap_getattr( + dbPrivTuple, Anum_gs_db_privilege_privilege_type, RelationGetDescr(dbPrivRel), &isNull); + appendStringInfo(&buffer, _("\"%s\""), text_to_cstring(DatumGetTextP(datum))); + + ReleaseSysCache(dbPrivTuple); + heap_close(dbPrivRel, AccessShareLock); + break; + } case OCLASS_EXTENSION: { char* extname = NULL; @@ -3132,10 +3166,16 @@ char* getObjectDescription(const ObjectAddress* object) HeapTuple tup; char *pubname; Form_pg_publication_rel prform; + ScanKeyData scanKey[1]; + ScanKeyInit(&scanKey[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(object->objectId)); + Relation rel = heap_open(PublicationRelRelationId, AccessShareLock); + SysScanDesc scanDesc = systable_beginscan(rel, PublicationRelObjectIndexId, true, NULL, 1, scanKey); - tup = SearchSysCache1(PUBLICATIONREL, ObjectIdGetDatum(object->objectId)); + tup = systable_getnext(scanDesc); if (!HeapTupleIsValid(tup)) { - elog(ERROR, "cache lookup failed for publication table %u", object->objectId); + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("could not find tuple for publication %u", object->objectId))); } prform = (Form_pg_publication_rel)GETSTRUCT(tup); @@ -3143,7 +3183,8 @@ char* getObjectDescription(const ObjectAddress* object) appendStringInfo(&buffer, _("publication table %s in publication %s"), get_rel_name(prform->prrelid), pubname); - ReleaseSysCache(tup); + systable_endscan(scanDesc); + heap_close(rel, AccessShareLock); break; } diff --git a/src/common/backend/catalog/gs_db_privilege.cpp b/src/common/backend/catalog/gs_db_privilege.cpp new file mode 100644 index 000000000..cd5f5ec8f --- /dev/null +++ b/src/common/backend/catalog/gs_db_privilege.cpp @@ -0,0 +1,398 @@ +/* + * Copyright (c) 2021 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * gs_db_privilege.cpp + * routines to support manipulation of the gs_db_privilege relation + * + * IDENTIFICATION + * src/common/backend/catalog/gs_db_privilege.cpp + * + * ------------------------------------------------------------------------- + */ + +#include "access/heapam.h" +#include "access/tableam.h" +#include "catalog/gs_db_privilege.h" +#include "catalog/pg_authid.h" +#include "catalog/pg_auth_members.h" +#include "catalog/indexing.h" +#include "miscadmin.h" +#include "postgres.h" +#include "utils/acl.h" +#include "utils/builtins.h" +#include "utils/rel.h" +#include "utils/lsyscache.h" + +/* + * In internal format grantees have been turned into Oids. + */ +typedef struct { + bool is_grant; /* true = GRANT, false = REVOKE */ + bool admin_opt; /* with admin option */ + List* privileges; /* list of DbPriv nodes */ + List* grantees; /* list of grantees Oids */ +} InternalGrantDb; + +static void ExecuteGrantDbPriv(InternalGrantDb* stmt, Oid granteeId, DbPriv* priv, Relation dbPrivRel); +static void ExecuteRevokeDbPriv(InternalGrantDb* stmt, Oid granteeId, DbPriv* priv, Relation dbPrivRel); + +/* + * Called to execute the utility commands GRANT and REVOKE ANY privileges + */ +void ExecuteGrantDbStmt(GrantDbStmt* stmt) +{ + InternalGrantDb istmt; + ListCell* granteeCell = NULL; + ListCell* privCell = NULL; + + /* Turn the GrantDbStmt into the InternalGrantDb form. */ + istmt.is_grant = stmt->is_grant; + istmt.privileges = stmt->privileges; + istmt.grantees = NIL; /* filled below */ + istmt.admin_opt = stmt->admin_opt; + + /* + * Grant ANY privileges to public operation and + * revoke ANY privileges from public operation are forbidden. + */ + foreach (granteeCell, stmt->grantees) { + PrivGrantee* grantee = (PrivGrantee*)lfirst(granteeCell); + if (grantee->rolname == NULL) { + ereport(ERROR, (errmodule(MOD_SEC), errcode(ERRCODE_INVALID_GRANT_OPERATION), + errmsg("Invalid grant or revoke operation."), + errdetail("Forbid to grant ANY privileges to PUBLIC or revoke ANY privileges from PUBLIC."), + errcause("Forbid to grant ANY privileges to PUBLIC or revoke ANY privileges from PUBLIC."), + erraction("Don't grant ANY privileges to PUBLIC or revoke ANY privileges from PUBLIC."))); + } else { + istmt.grantees= lappend_oid(istmt.grantees, get_role_oid(grantee->rolname, false)); + } + } + + Relation dbPrivRel = heap_open(DbPrivilegeId, RowExclusiveLock); + if (!RelationIsValid(dbPrivRel)) { + ereport(ERROR, (errmodule(MOD_SEC), errcode(ERRCODE_SYSTEM_ERROR), + errmsg("Could not open the relation gs_db_privilege."), + errcause("System error."), erraction("Contact engineer to support."))); + } + + foreach (granteeCell, istmt.grantees) { + Oid granteeId = lfirst_oid(granteeCell); + + foreach (privCell, stmt->privileges) { + DbPriv* priv = (DbPriv*)lfirst(privCell); + + if (stmt->is_grant) { + /* if stmt->is_grant = true, it means GRANT option */ + ExecuteGrantDbPriv(&istmt, granteeId, priv, dbPrivRel); + } else { + /* if stmt->is_grant = false, it means REVOKE option */ + ExecuteRevokeDbPriv(&istmt, granteeId, priv, dbPrivRel); + } + } + } + + /* Close gs_db_privilege. */ + heap_close(dbPrivRel, RowExclusiveLock); +} + +/* + * Internal entry point for granting ANY privileges. + */ +void ExecuteGrantDbPriv(InternalGrantDb* stmt, Oid granteeId, DbPriv* priv, Relation dbPrivRel) +{ + /* Permission check. */ + if (!HasSpecAnyPriv(GetUserId(), priv->db_priv_name, true)) { + ereport(ERROR, (errmodule(MOD_SEC), errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("Permission denied."), errdetail("Permission denied to grant %s.", priv->db_priv_name), + errcause("Insufficient privileges."), erraction("Change to the user with sufficient privileges."))); + } + + HeapTuple dbPrivTuple = SearchSysCache2( + DBPRIVROLEPRIV, ObjectIdGetDatum(granteeId), CStringGetTextDatum(priv->db_priv_name)); + TupleDesc dbPrivDsc = RelationGetDescr(dbPrivRel); + + /* Build a tuple to insert or update */ + Datum newRecord[Natts_gs_db_privilege] = {0}; + bool newRecordNulls[Natts_gs_db_privilege] = {false}; + bool newRecordRepl[Natts_gs_db_privilege] = {false}; + + newRecord[Anum_gs_db_privilege_roleid - 1] = ObjectIdGetDatum(granteeId); + newRecord[Anum_gs_db_privilege_privilege_type - 1] = CStringGetTextDatum(priv->db_priv_name); + newRecord[Anum_gs_db_privilege_admin_option - 1] = BoolGetDatum(stmt->admin_opt); + + bool isNull = false; + HeapTuple tuple = NULL; + if (HeapTupleIsValid(dbPrivTuple)) { + /* If entry for this user' privilge already exists, just skip unless we are adding admin option. */ + Datum datum = heap_getattr(dbPrivTuple, Anum_gs_db_privilege_admin_option, dbPrivDsc, &isNull); + if (!stmt->admin_opt || DatumGetBool(datum)) { + ReleaseSysCache(dbPrivTuple); + return; + } + + /* Update the record in the gs_db_privilege table */ + newRecordRepl[Anum_gs_db_privilege_admin_option - 1] = true; + tuple = (HeapTuple)tableam_tops_modify_tuple(dbPrivTuple, dbPrivDsc, newRecord, newRecordNulls, newRecordRepl); + simple_heap_update(dbPrivRel, &tuple->t_self, tuple); + CatalogUpdateIndexes(dbPrivRel, tuple); + ReleaseSysCache(dbPrivTuple); + } else { + /* Insert new record to the gs_db_privilege relation */ + tuple = heap_form_tuple(dbPrivDsc, newRecord, newRecordNulls); + Oid newRowId = simple_heap_insert(dbPrivRel, tuple); + CatalogUpdateIndexes(dbPrivRel, tuple); + + /* Add shared dependency on users in pg_shdepend */ + ObjectAddress object; + object.classId = DbPrivilegeId; + object.objectId = newRowId; + object.objectSubId = 0; + + ObjectAddress referenced; + referenced.classId = AuthIdRelationId; + referenced.objectId = granteeId; + referenced.objectSubId = 0; + + recordSharedDependencyOn(&object, &referenced, SHARED_DEPENDENCY_DBPRIV); + } +} + +/* + * Internal entry point for revoking ANY privileges. + */ +void ExecuteRevokeDbPriv(InternalGrantDb* stmt, Oid granteeId, DbPriv* priv, Relation dbPrivRel) +{ + /* Permission check. */ + if (!HasSpecAnyPriv(GetUserId(), priv->db_priv_name, true)) { + ereport(ERROR, (errmodule(MOD_SEC), errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("Permission denied."), errdetail("Permission denied to revoke %s.", priv->db_priv_name), + errcause("Insufficient privileges."), erraction("Change to the user with sufficient privileges."))); + } + + HeapTuple dbPrivTuple = SearchSysCache2( + DBPRIVROLEPRIV, ObjectIdGetDatum(granteeId), CStringGetTextDatum(priv->db_priv_name)); + TupleDesc dbPrivDsc = RelationGetDescr(dbPrivRel); + + /* Check if entry for this user' privilge exists, if not, just skip. */ + if (!HeapTupleIsValid(dbPrivTuple)) { + return; + } + + HeapTuple tuple = NULL; + if (!stmt->admin_opt) { + /* Remove the shared dependency */ + deleteSharedDependencyRecordsFor(DbPrivilegeId, HeapTupleGetOid(dbPrivTuple), 0); + /* Remove the entry altogether */ + simple_heap_delete(dbPrivRel, &dbPrivTuple->t_self); + } else { + /* Just turn off the admin option */ + Datum newRecord[Natts_gs_db_privilege] = {0}; + bool newRecordNulls[Natts_gs_db_privilege] = {false}; + bool newRecordRepl[Natts_gs_db_privilege] = {false}; + + newRecord[Anum_gs_db_privilege_admin_option - 1] = BoolGetDatum(false); + newRecordRepl[Anum_gs_db_privilege_admin_option - 1] = true; + + tuple = (HeapTuple)tableam_tops_modify_tuple(dbPrivTuple, dbPrivDsc, newRecord, newRecordNulls, newRecordRepl); + simple_heap_update(dbPrivRel, &tuple->t_self, tuple); + CatalogUpdateIndexes(dbPrivRel, tuple); + } + ReleaseSysCache(dbPrivTuple); +} + +/* + * The role may have the specific privlege explicitly + * or may be a member of roles that have the privlege. + * + * roleId: the role to be checked + * priv: the ANY privilege to be checked + * isAdminOption: if true, return if the role has privilge to GRANT/REVOKE the ANY privilege + * if false, just return if the role has the ANY privilege + */ +bool HasSpecAnyPriv(Oid roleId, const char* priv, bool isAdminOption) +{ + if (superuser_arg(roleId)) { + return true; + } + + HeapTuple dbPrivTuple = NULL; + List* roles_list = NIL; + ListCell* cell = NULL; + + /* + * We have to do a careful search to see + * if roleId has the privileges of any suitable role. + */ + roles_list = roles_has_privs_of(roleId); + foreach (cell, roles_list) { + Oid otherid = lfirst_oid(cell); + dbPrivTuple = SearchSysCache2(DBPRIVROLEPRIV, ObjectIdGetDatum(otherid), CStringGetTextDatum(priv)); + if (!HeapTupleIsValid(dbPrivTuple)) { + continue; + } + + if (!isAdminOption) { + ReleaseSysCache(dbPrivTuple); + return true; + } + + bool isNull = false; + Datum datum = SysCacheGetAttr(DBPRIVROLEPRIV, dbPrivTuple, Anum_gs_db_privilege_admin_option, &isNull); + if (DatumGetBool(datum)) { + ReleaseSysCache(dbPrivTuple); + return true; + } + ReleaseSysCache(dbPrivTuple); + } + return false; +} + +/* + * The role may have at least one of ANY privlege explicitly + * or may be a member of roles that have at least one of ANY privlege. + * + * roleId: the role to be checked + */ +bool HasOneOfAnyPriv(Oid roleId) +{ + bool result = false; + HeapTuple dbPrivTuple = NULL; + ListCell* cell = NULL; + + if (superuser_arg(roleId)) { + return true; + } + + /* + * We have to do a careful search to see + * if roleId has the privileges of any suitable role. + */ + List* roles_list = roles_has_privs_of(roleId); + foreach (cell, roles_list) { + Oid otherid = lfirst_oid(cell); + dbPrivTuple = SearchSysCache1(DBPRIVROLE, ObjectIdGetDatum(otherid)); + if (HeapTupleIsValid(dbPrivTuple)) { + result = true; + ReleaseSysCache(dbPrivTuple); + break; + } + } + return result; +} + +typedef struct { + const char* name; + const char* priv; + bool adminOption; +} DbPrivMap; + +static const DbPrivMap dbPrivMap[] = {{"CREATE ANY TABLE", "create any table", false}, + {"CREATE ANY TABLE WITH ADMIN OPTION", "create any table", true}, + {"ALTER ANY TABLE", "alter any table", false}, {"ALTER ANY TABLE WITH ADMIN OPTION", "alter any table", true}, + {"DROP ANY TABLE", "drop any table", false}, {"DROP ANY TABLE WITH ADMIN OPTION", "drop any table", true}, + {"SELECT ANY TABLE", "select any table", false}, {"SELECT ANY TABLE WITH ADMIN OPTION", "select any table", true}, + {"INSERT ANY TABLE", "insert any table", false}, {"INSERT ANY TABLE WITH ADMIN OPTION", "insert any table", true}, + {"UPDATE ANY TABLE", "update any table", false}, {"UPDATE ANY TABLE WITH ADMIN OPTION", "update any table", true}, + {"DELETE ANY TABLE", "delete any table", false}, {"DELETE ANY TABLE WITH ADMIN OPTION", "delete any table", true}, + {"CREATE ANY SEQUENCE", "create any sequence", false}, + {"CREATE ANY SEQUENCE WITH ADMIN OPTION", "create any sequence", true}, + {"CREATE ANY INDEX", "create any index", false}, + {"CREATE ANY INDEX WITH ADMIN OPTION", "create any index", true}, + {"CREATE ANY FUNCTION", "create any function", false}, + {"CREATE ANY FUNCTION WITH ADMIN OPTION", "create any function", true}, + {"EXECUTE ANY FUNCTION", "execute any function", false}, + {"EXECUTE ANY FUNCTION WITH ADMIN OPTION", "execute any function", true}, + {"CREATE ANY PACKAGE", "create any package", false}, + {"CREATE ANY PACKAGE WITH ADMIN OPTION", "create any package", true}, + {"EXECUTE ANY PACKAGE", "execute any package", false}, + {"EXECUTE ANY PACKAGE WITH ADMIN OPTION", "execute any package", true}, + {"CREATE ANY TYPE", "create any type", false}, {"CREATE ANY TYPE WITH ADMIN OPTION", "create any type", true}, + {NULL, NULL, false}}; + +/* + * has_any_privilege + * + * Check role's ANY privlege. + * + * userName: the user to be checked + * privList: the ANY privilege to be checked + * isAdminOption: if true, return if the user has privilge to GRANT/REVOKE the ANY privilege + * if false, just return if the user has the ANY privilege + */ +Datum has_any_privilege(PG_FUNCTION_ARGS) +{ + Name userName = PG_GETARG_NAME(0); + text* privText = PG_GETARG_TEXT_P(1); + + Oid roleId = get_role_oid(NameStr(*userName), false); + + char* privList = text_to_cstring(privText); + bool result = false; + char* chunk = NULL; + char* nextChunk = NULL; + for (chunk = privList; chunk; chunk = nextChunk) { + /* Split string at commas */ + nextChunk = strchr(chunk, ','); + if (nextChunk != NULL) + *nextChunk++ = '\0'; + + /* Drop leading/trailing whitespace in this chunk */ + while (*chunk != 0 && isspace((unsigned char)*chunk)) { + chunk++; + } + int chunkLen = strlen(chunk); + while (chunkLen > 0 && isspace((unsigned char)chunk[chunkLen - 1])) { + chunkLen--; + } + chunk[chunkLen] = '\0'; + + /* Match to the privileges list. */ + const DbPrivMap* this_priv = NULL; + for (this_priv = dbPrivMap; this_priv->name; this_priv++) { + if (pg_strcasecmp(this_priv->name, chunk) == 0) { + result = result | HasSpecAnyPriv(roleId, this_priv->priv, this_priv->adminOption); + break; + } + } + if (this_priv->name == NULL) { + ereport(ERROR, (errmodule(MOD_SEC), errcode(ERRCODE_SYNTAX_ERROR), + errmsg("Unrecognized privilege type."), errdetail("Unrecognized privilege type: \"%s\".", chunk), + errcause("The privilege type is not supported."), + erraction("Check GRANT/REVOKE syntax to obtain the supported privilege types."))); + } + } + PG_RETURN_BOOL(result); +} + +/* + * Delete the record in gs_db_privilege by oid + */ +void DropDbPrivByOid(Oid rowOid) +{ + Relation dbPrivRel = heap_open(DbPrivilegeId, RowExclusiveLock); + if (!RelationIsValid(dbPrivRel)) { + ereport(ERROR, (errmodule(MOD_SEC), errcode(ERRCODE_SYSTEM_ERROR), + errmsg("Could not open the relation gs_db_privilege."), + errcause("System error."), erraction("Contact engineer to support."))); + } + + HeapTuple dbPrivTuple = SearchSysCache1(DBPRIVOID, ObjectIdGetDatum(rowOid)); + if (HeapTupleIsValid(dbPrivTuple)) { + simple_heap_delete(dbPrivRel, &dbPrivTuple->t_self); + ReleaseSysCache(dbPrivTuple); + } + + heap_close(dbPrivRel, RowExclusiveLock); +} diff --git a/src/common/backend/catalog/gs_global_config.cpp b/src/common/backend/catalog/gs_global_config.cpp new file mode 100644 index 000000000..e15ddb80e --- /dev/null +++ b/src/common/backend/catalog/gs_global_config.cpp @@ -0,0 +1,140 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * gs_global_config.cpp + * + * IDENTIFICATION + * src/common/backend/catalog/gs_global_config.cpp + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" +#include "nodes/parsenodes.h" +#include "nodes/pg_list.h" +#include "utils/relcache.h" +#include "access/heapam.h" +#include "nodes/parsenodes_common.h" +#include "access/tableam.h" +#include "access/htup.h" +#include "utils/elog.h" +#include "fmgr.h" +#include "utils/builtins.h" +#include "catalog/gs_global_config.h" + +void CreateGlobalConfig(DefElem* defel) +{ + HeapTuple htup = NULL; + Relation relation; + bool nulls[Natts_gs_global_config]; + Datum values[Natts_gs_global_config]; + + relation = heap_open(GsGlobalConfigRelationId, RowExclusiveLock); + + values[Anum_gs_global_config_name - 1] = DirectFunctionCall1(namein, CStringGetDatum(defel->defname)); + nulls[Anum_gs_global_config_name - 1] = false; + char *config_value = defGetString(defel); + values[Anum_gs_global_config_value - 1] = DirectFunctionCall1(textin, CStringGetDatum(config_value)); + nulls[Anum_gs_global_config_value - 1] = false; + + htup = (HeapTuple)heap_form_tuple(relation->rd_att, values, nulls); + (void)simple_heap_insert(relation, htup); + heap_close(relation, NoLock); +} + +void AlterGlobalConfig(AlterGlobalConfigStmt *stmt) +{ + ListCell *option = NULL; + HeapTuple tup = NULL; + HeapTuple htup = NULL; + TableScanDesc scan; + bool isNull = false; + bool find; + if (!initialuser()) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to examine"))); + } + foreach (option, stmt->options) { + DefElem *defel = (DefElem *)lfirst(option); + if (strcmp(defel->defname, "weak_password") == 0) { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("Don't alter config named 'weak_password'"))); + } + bool nulls[Natts_gs_global_config]; + Datum values[Natts_gs_global_config]; + bool repl[Natts_pg_resource_pool]; + find = false; + + Relation relation = heap_open(GsGlobalConfigRelationId, RowExclusiveLock); + scan = tableam_scan_begin(relation, SnapshotNow, 0, NULL); + while ((tup = (HeapTuple)tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) { + Datum datum = heap_getattr(tup, Anum_gs_global_config_name, RelationGetDescr(relation), &isNull); + if (!isNull && strcmp(defel->defname, DatumGetCString(datum)) == 0) { + find = true; + char *config_value = defGetString(defel); + values[Anum_gs_global_config_value - 1] = DirectFunctionCall1(textin, CStringGetDatum(config_value)); + nulls[Anum_gs_global_config_value - 1] = false; + repl[Anum_gs_global_config_value - 1] = true; + + repl[Anum_gs_global_config_name - 1] = false; + htup = heap_modify_tuple(tup, RelationGetDescr(relation), values, nulls, repl); + simple_heap_update(relation, &tup->t_self, htup); + break; + } + } + tableam_scan_end(scan); + heap_close(relation, NoLock); + if (!find) { + CreateGlobalConfig(defel); + } + } +} + +void DropGlobalConfig(DropGlobalConfigStmt *stmt) +{ + ListCell *item = NULL; + HeapTuple tup = NULL; + TableScanDesc scan; + bool isNull = false; + bool find; + if (!initialuser()) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to examine"))); + } + foreach (item, stmt->options) { + const char *global_name = strVal(lfirst(item)); + if (strcmp(global_name, "weak_password") == 0) { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("Don't drop config named 'weak_password'"))); + } + find = false; + + Relation relation = heap_open(GsGlobalConfigRelationId, RowExclusiveLock); + scan = tableam_scan_begin(relation, SnapshotNow, 0, NULL); + while ((tup = (HeapTuple)tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) { + Datum datum = heap_getattr(tup, Anum_gs_global_config_name, RelationGetDescr(relation), &isNull); + if (!isNull && strcmp(global_name, DatumGetCString(datum)) == 0) { + find = true; + simple_heap_delete(relation, &tup->t_self); + break; + } + } + tableam_scan_end(scan); + heap_close(relation, NoLock); + if (!find) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_NAME), errmsg("Parameter %s not exists, please check it.\n", global_name))); + } + } +} + diff --git a/src/common/backend/catalog/gs_job_attribute.cpp b/src/common/backend/catalog/gs_job_attribute.cpp index 2056d98e1..a7309a5e8 100644 --- a/src/common/backend/catalog/gs_job_attribute.cpp +++ b/src/common/backend/catalog/gs_job_attribute.cpp @@ -37,6 +37,7 @@ #include "commands/dbcommands.h" #include "commands/extension.h" #include "commands/schemacmds.h" +#include "commands/user.h" #include "executor/spi.h" #include "funcapi.h" #include "mb/pg_wchar.h" @@ -545,6 +546,27 @@ void grant_user_authorization_internal(PG_FUNCTION_ARGS) pfree(DatumGetPointer(attribute_value[0])); } +static char* get_current_username() +{ +#ifndef WIN32 + struct passwd* pw = NULL; + char* pRet = NULL; + + (void)syscalllockAcquire(&getpwuid_lock); + pw = getpwuid(geteuid()); + if (pw == NULL) { + (void)syscalllockRelease(&getpwuid_lock); + return NULL; + } + /* Allocate new memory because later getpwuid() calls can overwrite it. */ + pRet = pstrdup(pw->pw_name); + (void)syscalllockRelease(&getpwuid_lock); + return pRet; +#else + return NULL; +#endif +} + /* * @brief check_credential_name_valid * Check if a user input string is a valid username. @@ -563,6 +585,23 @@ static void check_credential_name_valid(const Datum username) erraction("Please enter a valid str"))); } } + char *inital_user = get_current_username(); + Assert(inital_user != NULL); + if (inital_user == NULL) { + ereport(ERROR, (errmodule(MOD_JOB), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("inital_user username is invalid."), + errdetail("inital_user username is invalid."), + errcause("inital_user is invalid"), + erraction("EXTERNAL_SCRIPT is not supported"))); + } + if (strcmp(name_str, inital_user) == 0) { + ereport(ERROR, (errmodule(MOD_JOB), errcode(ERRCODE_INVALID_NAME), + errmsg("Credential username is invalid."), + errdetail("Credential username is initialuser, which can not be credential user"), + errcause("str is invalid"), + erraction("Please enter a valid str"))); + } + pfree(inital_user); pfree(name_str); } @@ -908,6 +947,10 @@ static char *create_inline_program(Datum job_name, Datum job_type, Datum job_act static const short nrgs_program = 6; FunctionCallInfoData fcinfo_program; InitFunctionCallInfoData(fcinfo_program, NULL, nrgs_program, InvalidOid, NULL, NULL); + rc = memset_s(fcinfo_program.arg, nrgs_program * sizeof(Datum), 0, nrgs_program * sizeof(Datum)); + securec_check(rc, "\0", "\0"); + rc = memset_s(fcinfo_program.argnull, nrgs_program * sizeof(bool), 0, nrgs_program * sizeof(bool)); + securec_check(rc, "\0", "\0"); fcinfo_program.arg[0] = CStringGetTextDatum(program_name); /* program_name */ fcinfo_program.arg[1] = job_type; /* program_type */ fcinfo_program.arg[2] = job_action; /* program_action */ @@ -1094,6 +1137,10 @@ void create_job_1_internal(PG_FUNCTION_ARGS) static const short nrgs_job = 16; FunctionCallInfoData fcinfo_job; InitFunctionCallInfoData(fcinfo_job, NULL, nrgs_job, InvalidOid, NULL, NULL); + errno_t rc = memset_s(fcinfo_job.arg, nrgs_job * sizeof(Datum), 0, nrgs_job * sizeof(Datum)); + securec_check(rc, "\0", "\0"); + rc = memset_s(fcinfo_job.argnull, nrgs_job * sizeof(bool), 0, nrgs_job * sizeof(bool)); + securec_check(rc, "\0", "\0"); fcinfo_job.arg[0] = job_name; /* job_name */ fcinfo_job.arg[1] = program_name; /* program_name */ @@ -1199,12 +1246,16 @@ void create_job_2_internal(PG_FUNCTION_ARGS) static const short nrgs_job = 16; FunctionCallInfoData fcinfo_job; InitFunctionCallInfoData(fcinfo_job, NULL, nrgs_job, InvalidOid, NULL, NULL); + errno_t rc = memset_s(fcinfo_job.arg, nrgs_job * sizeof(Datum), 0, nrgs_job * sizeof(Datum)); + securec_check(rc, "\0", "\0"); + rc = memset_s(fcinfo_job.argnull, nrgs_job * sizeof(bool), 0, nrgs_job * sizeof(bool)); + securec_check(rc, "\0", "\0"); fcinfo_job.arg[0] = PG_GETARG_DATUM(0); /* job_name */ fcinfo_job.arg[1] = program_name; /* program_name */ fcinfo_job.arg[2] = schedule_name; /* schedule_name */ fcinfo_job.arg[3] = PG_GETARG_DATUM(3); /* job_class */ - fcinfo_job.arg[4] = enabled; /* enabled */ + fcinfo_job.arg[4] = PG_GETARG_DATUM(4); /* enabled */ fcinfo_job.arg[5] = PG_GETARG_DATUM(5); /* auto_drop */ fcinfo_job.arg[6] = PG_ARGISNULL(6) ? Datum(0) : PG_GETARG_DATUM(6); /* comments */ fcinfo_job.arg[7] = PG_GETARG_DATUM(7); /* job_style */ @@ -1246,6 +1297,10 @@ void create_job_3_internal(PG_FUNCTION_ARGS) static const short nrgs_job = 16; FunctionCallInfoData fcinfo_job; InitFunctionCallInfoData(fcinfo_job, NULL, nrgs_job, InvalidOid, NULL, NULL); + errno_t rc = memset_s(fcinfo_job.arg, nrgs_job * sizeof(Datum), 0, nrgs_job * sizeof(Datum)); + securec_check(rc, "\0", "\0"); + rc = memset_s(fcinfo_job.argnull, nrgs_job * sizeof(bool), 0, nrgs_job * sizeof(bool)); + securec_check(rc, "\0", "\0"); fcinfo_job.arg[0] = job_name; /* job_name */ fcinfo_job.arg[1] = program_name; /* program_name */ @@ -1996,4 +2051,4 @@ void disable_single_internal(PG_FUNCTION_ARGS) bool force = PG_GETARG_BOOL(1); Datum enable_value = BoolToText(false); enable_single_force(object_name, enable_value, force); -} +} \ No newline at end of file diff --git a/src/common/backend/catalog/gs_matview.cpp b/src/common/backend/catalog/gs_matview.cpp index 187c06b34..5f479e608 100644 --- a/src/common/backend/catalog/gs_matview.cpp +++ b/src/common/backend/catalog/gs_matview.cpp @@ -39,8 +39,6 @@ #include "utils/syscache.h" #include "utils/inval.h" -static Oid FindRoleid(Oid relid); - void create_matview_tuple(Oid matviewOid, Oid matmapid, bool isIncremental) { errno_t rc; @@ -179,6 +177,10 @@ void delete_matview_tuple(Oid matviewOid) matmapobject.objectSubId = 0; performDeletion(&matmapobject, DROP_RESTRICT, PERFORM_DELETION_INTERNAL); + } else { + ereport(DEBUG2, + (errmodule(MOD_OPT), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Matviewmap %d relation is invalid when delete it.", (int)matmapid))); } } @@ -590,7 +592,7 @@ bool CheckPermissionForBasetable(const RangeTblEntry *rte) /* * Get owner by relid */ -static Oid FindRoleid(Oid relid) +Oid FindRoleid(Oid relid) { Oid roleid; HeapTuple tuple = NULL; diff --git a/src/common/backend/catalog/gs_package.cpp b/src/common/backend/catalog/gs_package.cpp index 79eb66334..f336b8ea3 100644 --- a/src/common/backend/catalog/gs_package.cpp +++ b/src/common/backend/catalog/gs_package.cpp @@ -38,9 +38,12 @@ #include "catalog/gs_package_fn.h" #include "catalog/pg_object.h" #include "catalog/pg_proc.h" +#include "catalog/pg_proc_fn.h" #include "catalog/pg_synonym.h" #include "commands/defrem.h" #include "commands/sqladvisor.h" +#include "gs_thread.h" +#include "parser/parse_type.h" #include "pgxc/pgxc.h" #include "utils/acl.h" #include "utils/builtins.h" @@ -54,36 +57,32 @@ #include "utils/pl_global_package_runtime_cache.h" #include "utils/pl_package.h" -static PLpgSQL_datum* copypPackageVarDatum(PLpgSQL_datum* datum); -static PackageRuntimeState* buildPkgRunStatesbyPackage(PLpgSQL_package* pkg); -static PackageRuntimeState* buildPkgRunStatebyPkgRunState(PackageRuntimeState* parentPkgState); -static void copyCurrentSessionPkgs(SessionPackageRuntime* sessionPkgs, DList* pkgList); -static bool pkgExistInSession(PackageRuntimeState* pkgState); -static void copyParentSessionPkgs(SessionPackageRuntime* sessionPkgs, List* pkgList); -static void restorePkgValuesByPkgState(PLpgSQL_package* targetPkg, PackageRuntimeState* pkgState, bool isInit = false); -static void restoreAutonmSessionPkgs(SessionPackageRuntime* sessionPkgs); +#include "tcop/pquery.h" +#include "executor/executor.h" +#include "executor/tstoreReceiver.h" + +static PLpgSQL_datum* CopyPackageVarDatum(PLpgSQL_datum* datum); +static PackageRuntimeState* BuildPkgRunStatesbyPackage(PLpgSQL_package* pkg); +static PackageRuntimeState* BuildPkgRunStatebyPkgRunState(PackageRuntimeState* parentPkgState); +static void CopyCurrentSessionPkgs(SessionPackageRuntime* sessionPkgs, DList* pkgList); +static bool PkgExistInSession(PackageRuntimeState* pkgState); +static void CopyParentSessionPkgs(SessionPackageRuntime* sessionPkgs, List* pkgList); +static void RestorePkgValuesByPkgState(PLpgSQL_package* targetPkg, PackageRuntimeState* pkgState, bool isInit = false); +static void RestoreAutonmSessionPkgs(SessionPackageRuntime* sessionPkgs); +static void ReleaseUnusedPortalContext(List* portalContexts, bool releaseAll = false); +#define MAXSTRLEN ((1 << 11) - 1) static Acl* PackageAclDefault(Oid ownerId) { - AclMode world_default; AclMode owner_default; int nacl = 0; Acl* acl = NULL; AclItem* aip = NULL; - world_default = ACL_NO_RIGHTS; owner_default = ACL_ALL_RIGHTS_PACKAGE; - if (world_default != ACL_NO_RIGHTS) - nacl++; if (owner_default != ACL_NO_RIGHTS) nacl++; acl = allocacl(nacl); aip = ACL_DAT(acl); - if (world_default != ACL_NO_RIGHTS) { - aip->ai_grantee = ACL_ID_PUBLIC; - aip->ai_grantor = ownerId; - ACLITEM_SET_PRIVS_GOPTIONS(*aip, world_default, ACL_NO_RIGHTS); - aip++; - } if (owner_default != ACL_NO_RIGHTS) { aip->ai_grantee = ownerId; @@ -94,7 +93,6 @@ static Acl* PackageAclDefault(Oid ownerId) return acl; } - /* ---------------- * PackageSpecCreate * @@ -332,7 +330,7 @@ Oid PackageSpecCreate(Oid pkgNamespace, const char* pkgName, const Oid ownerId, pkgacl = get_user_default_acl(ACL_OBJECT_PACKAGE, ownerId, pkgNamespace); if (pkgacl != NULL) values[Anum_gs_package_pkgacl - 1] = PointerGetDatum(pkgacl); - else if (PLSQL_SECURITY_DEFINER) { + else if (PLSQL_SECURITY_DEFINER && u_sess->attr.attr_common.upgrade_mode == 0) { values[Anum_gs_package_pkgacl - 1] = PointerGetDatum(PackageAclDefault(ownerId)); } else { nulls[Anum_gs_package_pkgacl - 1] = true; @@ -347,7 +345,7 @@ Oid PackageSpecCreate(Oid pkgNamespace, const char* pkgName, const Oid ownerId, if (OidIsValid(oldPkgOid)) { if (replace != true) { ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), + (errcode(ERRCODE_DUPLICATE_PACKAGE), errmsg("package \"%s\" already exists.", pkgName))); } else { oldpkgtup = SearchSysCache1(PACKAGEOID, ObjectIdGetDatum(oldPkgOid)); @@ -359,6 +357,10 @@ Oid PackageSpecCreate(Oid pkgNamespace, const char* pkgName, const Oid ownerId, errcause("System error"), erraction("Drop and rebuild package."))); } + if (!pg_package_ownercheck(HeapTupleGetOid(oldpkgtup), ownerId)) { + ReleaseSysCache(oldpkgtup); + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PACKAGE, pkgName); + } tup = heap_modify_tuple(oldpkgtup, tupDesc, values, nulls, replaces); simple_heap_update(pkgDesc, &tup->t_self, tup); ReleaseSysCache(oldpkgtup); @@ -384,7 +386,7 @@ Oid PackageSpecCreate(Oid pkgNamespace, const char* pkgName, const Oid ownerId, DeleteTypesDenpendOnPackage(PackageRelationId, pkgOid); /* the 'shared dependencies' also change when update. */ deleteSharedDependencyRecordsFor(PackageRelationId, pkgOid, 0); - dropFunctionByPackageOid(pkgOid); + DeleteFunctionByPackageOid(pkgOid); } heap_freetuple_ext(tup); @@ -459,7 +461,6 @@ Oid PackageSpecCreate(Oid pkgNamespace, const char* pkgName, const Oid ownerId, Oid PackageBodyCreate(Oid pkgNamespace, const char* pkgName, const Oid ownerId, const char* pkgBodySrc, const char* pkgInitSrc, bool replace) { Relation pkgDesc; - Oid pkgOid = InvalidOid; bool nulls[Natts_gs_package]; Datum values[Natts_gs_package]; bool replaces[Natts_gs_package]; @@ -477,7 +478,6 @@ Oid PackageBodyCreate(Oid pkgNamespace, const char* pkgName, const Oid ownerId, Assert(PointerIsValid(pkgBodySrc)); HeapTuple tup = NULL; HeapTuple oldpkgtup = NULL; - Oid packageOid = InvalidOid; Oid oldPkgOid = InvalidOid; /* sanity checks */ if (pkgName == NULL) { @@ -493,8 +493,8 @@ Oid PackageBodyCreate(Oid pkgNamespace, const char* pkgName, const Oid ownerId, erraction("Please rename package name"))); } - packageOid = PackageNameGetOid(pkgName, pkgNamespace); - if (packageOid == InvalidOid) { + oldPkgOid = PackageNameGetOid(pkgName, pkgNamespace); + if (!OidIsValid(oldPkgOid)) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("package spec not found"))); } /* initialize nulls and values */ @@ -516,7 +516,6 @@ Oid PackageBodyCreate(Oid pkgNamespace, const char* pkgName, const Oid ownerId, pkgDesc = heap_open(PackageRelationId, RowExclusiveLock); tupDesc = RelationGetDescr(pkgDesc); - oldPkgOid = PackageNameGetOid(pkgName, pkgNamespace); if (OidIsValid(oldPkgOid)) { oldpkgtup = SearchSysCache1(PACKAGEOID, ObjectIdGetDatum(oldPkgOid)); if (!HeapTupleIsValid(oldpkgtup)) { @@ -531,13 +530,14 @@ Oid PackageBodyCreate(Oid pkgNamespace, const char* pkgName, const Oid ownerId, SysCacheGetAttr(PACKAGEOID, oldpkgtup, Anum_gs_package_pkgbodydeclsrc, &isNull); if (!isNull && !replace) { ereport(ERROR, (errcode(ERRCODE_DUPLICATE_PACKAGE), errmsg("package body already exists"))); - } + } else if (!isNull) { + DeleteFunctionByPackageOid(oldPkgOid); + DeleteTypesDenpendOnPackage(PackageRelationId, oldPkgOid, false); + } tup = heap_modify_tuple(oldpkgtup, tupDesc, values, nulls, replaces); simple_heap_update(pkgDesc, &tup->t_self, tup); ReleaseSysCache(oldpkgtup); isReplaced = true; - } else { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("package spec not found"))); } if (u_sess->attr.attr_common.IsInplaceUpgrade && u_sess->upg_cxt.Inplace_upgrade_next_gs_package_oid != InvalidOid) { @@ -545,39 +545,34 @@ Oid PackageBodyCreate(Oid pkgNamespace, const char* pkgName, const Oid ownerId, u_sess->upg_cxt.Inplace_upgrade_next_gs_package_oid = InvalidOid; } if (HeapTupleIsValid(tup)) { - pkgOid = HeapTupleGetOid(tup); + oldPkgOid = HeapTupleGetOid(tup); } - Assert(OidIsValid(pkgOid)); CatalogUpdateIndexes(pkgDesc, tup); - if (isReplaced) { - DeleteTypesDenpendOnPackage(PackageRelationId, pkgOid, false); - } - heap_freetuple_ext(tup); - heap_close(pkgDesc, NoLock); + heap_close(pkgDesc, RowExclusiveLock); /* Post creation hook for new schema */ - InvokeObjectAccessHook(OAT_POST_CREATE, PackageRelationId, pkgOid, 0, NULL); + InvokeObjectAccessHook(OAT_POST_CREATE, PackageRelationId, oldPkgOid, 0, NULL); /* Advance command counter so new tuple can be seen by validator */ CommandCounterIncrement(); /* Recode the procedure create time. */ - if (OidIsValid(pkgOid)) { - if (!isReplaced) { - PgObjectOption objectOpt = {true, true, false, false}; - CreatePgObject(pkgOid, OBJECT_TYPE_PKGSPEC, ownerId, objectOpt); - } else { - UpdatePgObjectMtime(pkgOid, OBJECT_TYPE_PROC); - } + if (OidIsValid(oldPkgOid)) { + if (!isReplaced) { + PgObjectOption objectOpt = {true, true, false, false}; + CreatePgObject(oldPkgOid, OBJECT_TYPE_PKGSPEC, ownerId, objectOpt); + } else { + UpdatePgObjectMtime(oldPkgOid, OBJECT_TYPE_PROC); + } } - plpgsql_package_validator(pkgOid, false, true); + plpgsql_package_validator(oldPkgOid, false, true); - return pkgOid; + return oldPkgOid; } bool IsFunctionInPackage(List* wholename) @@ -620,15 +615,495 @@ bool IsFunctionInPackage(List* wholename) return false; } +/* free the reference value of var */ static void free_var_value(PLpgSQL_var* var) { if (var->freeval) { + /* means the value is by reference, and need free */ pfree(DatumGetPointer(var->value)); var->freeval = false; } } -void BuildSessionPackageRuntime(uint64 sessionId, uint64 parentSessionId) +/* find auto session stored portals, and build new on depend on it */ +Portal BuildHoldPortalFromAutoSession(PLpgSQL_execstate* estate, int curVarDno, int outParamIndex) +{ + AutoSessionPortalData* holdPortal = NULL; + ListCell* cell = NULL; + PLpgSQL_var *curVar = NULL; + foreach(cell, u_sess->plsql_cxt.storedPortals) { + AutoSessionPortalData* portalData = (AutoSessionPortalData*)lfirst(cell); + if (portalData != NULL && portalData->outParamIndex == outParamIndex) { + holdPortal = portalData; + } + } + + if (holdPortal == NULL) { + /* reset the cursor attribute */ + curVar = (PLpgSQL_var*)(estate->datums[curVarDno]); + free_var_value(curVar); + curVar->value = (Datum)0; + curVar->isnull = true; + curVar = (PLpgSQL_var*)(estate->datums[curVarDno + CURSOR_ISOPEN]); + curVar->value = BoolGetDatum(false); + curVar = (PLpgSQL_var*)(estate->datums[curVarDno + CURSOR_FOUND]); + curVar->value = BoolGetDatum(false); + curVar->isnull = true; + curVar = (PLpgSQL_var*)(estate->datums[curVarDno + CURSOR_NOTFOUND]); + curVar->value = BoolGetDatum(false); + curVar->isnull = true; + curVar = (PLpgSQL_var*)(estate->datums[curVarDno + CURSOR_ROWCOUNT]); + curVar->value = Int32GetDatum(0); + curVar->isnull = true; + return NULL; + } + /* Reset SPI result (note we deliberately don't touch lastoid) */ + SPI_processed = 0; + SPI_tuptable = NULL; + u_sess->SPI_cxt._current->processed = 0; + u_sess->SPI_cxt._current->tuptable = NULL; + + SPI_STACK_LOG("begin", NULL, NULL); + if (_SPI_begin_call(true) < 0) { + ereport(ERROR, (errcode(ERRCODE_SPI_CONNECTION_FAILURE), + errmsg("SPI stack is corrupted when perform cursor operation, current level: %d, connected level: %d", + u_sess->SPI_cxt._curid, u_sess->SPI_cxt._connected))); + } + + Portal portal = CreateNewPortal(true); + portal->holdContext = holdPortal->holdContext; + portal->holdStore = holdPortal->holdStore; + portal->tupDesc = holdPortal->tupDesc; + portal->strategy = holdPortal->strategy; + portal->cursorOptions = holdPortal->cursorOptions; + portal->commandTag = holdPortal->commandTag; + portal->atEnd = holdPortal->atEnd; + portal->atStart = holdPortal->atStart; + portal->portalPos = holdPortal->portalPos; + + portal->autoHeld = true; + portal->resowner = NULL; + portal->createSubid = InvalidSubTransactionId; + portal->activeSubid = InvalidSubTransactionId; + portal->status = PORTAL_READY; + + portal->portalPinned = true; + + /* Pop the SPI stack */ + SPI_STACK_LOG("end", NULL, NULL); + _SPI_end_call(true); + + /* restore cursor var values */ + curVar = (PLpgSQL_var*)(estate->datums[curVarDno + CURSOR_ISOPEN]); + curVar->value = BoolGetDatum(holdPortal->is_open); + curVar = (PLpgSQL_var*)(estate->datums[curVarDno + CURSOR_FOUND]); + curVar->value = BoolGetDatum(holdPortal->found); + curVar->isnull = holdPortal->null_fetch; + curVar = (PLpgSQL_var*)(estate->datums[curVarDno + CURSOR_NOTFOUND]); + curVar->value = BoolGetDatum(holdPortal->not_found); + curVar->isnull = holdPortal->null_fetch; + curVar = (PLpgSQL_var*)(estate->datums[curVarDno + CURSOR_ROWCOUNT]); + curVar->value = Int32GetDatum(holdPortal->row_count); + curVar->isnull = holdPortal->null_open; + + return portal; +} + +static void ReleaseUnusedPortalContext(List* portalContexts, bool releaseAll) +{ + ListCell* cell = NULL; + AutoSessionPortalContextData* portalContext = NULL; + foreach(cell, portalContexts) { + portalContext = (AutoSessionPortalContextData*)lfirst(cell); + if (releaseAll || portalContext->status == CONTEXT_NEW) { + /* + * if context not new, its session id and parent is from auto session. + * we should set them to parent session to delete it. + */ + if (portalContext->status != CONTEXT_NEW) { + portalContext->portalHoldContext->session_id = u_sess->session_id; + portalContext->portalHoldContext->thread_id = gs_thread_self(); + MemoryContextSetParent(portalContext->portalHoldContext, u_sess->top_portal_cxt); + } + MemoryContextDelete(portalContext->portalHoldContext); + } + } +} + +/* restore cursors from auto transaction procedure out param */ +void restoreAutonmSessionCursors(PLpgSQL_execstate* estate, PLpgSQL_row* row) +{ + if (!u_sess->plsql_cxt.call_after_auto) { + return; + } + PLpgSQL_var* curvar = NULL; + for (int i = 0; i < row->nfields; i++) { + if (estate->datums[row->varnos[i]]->dtype != PLPGSQL_DTYPE_VAR) { + continue; + } + curvar = (PLpgSQL_var*)(estate->datums[row->varnos[i]]); + if (curvar->datatype->typoid == REFCURSOROID) { + Portal portal = BuildHoldPortalFromAutoSession(estate, row->varnos[i], i); + if (portal == NULL) { + continue; + } + if (curvar->pkg != NULL) { + MemoryContext temp = MemoryContextSwitchTo(curvar->pkg->pkg_cxt); + assign_text_var(curvar, portal->name); + temp = MemoryContextSwitchTo(temp); + } else { + assign_text_var(curvar, portal->name); + } + curvar->cursor_closed = false; + } + } + + list_free_deep(u_sess->plsql_cxt.storedPortals); + u_sess->plsql_cxt.storedPortals = NIL; + ReleaseUnusedPortalContext(u_sess->plsql_cxt.portalContext); + list_free_deep(u_sess->plsql_cxt.portalContext); + u_sess->plsql_cxt.portalContext = NIL; + u_sess->plsql_cxt.call_after_auto = false; +} + +static bool PortalConextInList(Portal portal, List* PortalContextList) +{ + ListCell* cell = NULL; + AutoSessionPortalContextData* portalContext = NULL; + foreach(cell, PortalContextList) { + portalContext = (AutoSessionPortalContextData*)lfirst(cell); + if (portalContext != NULL && portalContext->portalHoldContext == portal->holdContext) { + return true; + } + } + return false; +} + +void ResetAutoPortalConext(Portal portal) +{ + if (portal->holdStore == NULL) { + return; + } + /* + * we do not call tuplestore_end, because context is another session, + * this may cause memory leak, but it seems not serious, because autonomous + * session will destroy soon. + */ + portal->holdStore = NULL; + List* PortalContextList = u_sess->plsql_cxt.auto_parent_session_pkgs->portalContext; + ListCell* cell = NULL; + AutoSessionPortalContextData* portalContext = NULL; + /* mark context is new, and can be re-used */ + foreach(cell, PortalContextList) { + portalContext = (AutoSessionPortalContextData*)lfirst(cell); + if (portalContext != NULL && portalContext->portalHoldContext == portal->holdContext) { + portalContext->status = CONTEXT_NEW; + if (u_sess->plsql_cxt.parent_context != NULL) { + portalContext->portalHoldContext->session_id = u_sess->plsql_cxt.parent_session_id; + portalContext->portalHoldContext->thread_id = u_sess->plsql_cxt.parent_thread_id; + MemoryContextSetParent(portalContext->portalHoldContext, u_sess->plsql_cxt.parent_context); + } + } + } +} + +MemoryContext GetAvailableHoldContext(List* PortalContextList) +{ + ListCell* cell = NULL; + AutoSessionPortalContextData* portalContext = NULL; + MemoryContext result = NULL; + foreach(cell, PortalContextList) { + portalContext = (AutoSessionPortalContextData*)lfirst(cell); + if (portalContext != NULL && portalContext->status == CONTEXT_NEW) { + result = portalContext->portalHoldContext; + portalContext->status = CONTEXT_USED; + break; + } + } + + if (result == NULL) { + ereport(ERROR, + (errmodule(MOD_GPRC), errcode(ERRCODE_INVALID_STATUS), + errmsg("no available portal hold context"), + errdetail("no available portal hold context when hold autonomous transction procedure cursor"), + errcause("System error"), + erraction("Modify autonomous transction procedure"))); + } + + + u_sess->plsql_cxt.parent_session_id = result->session_id; + u_sess->plsql_cxt.parent_thread_id = result->thread_id; + u_sess->plsql_cxt.parent_context = result->parent; + result->session_id = u_sess->session_id; + result->thread_id = gs_thread_self(); + MemoryContextSetParent(result, u_sess->top_portal_cxt); + + return result; +} + +static List* BuildPortalContextListForAutoSession(PLpgSQL_function* func) +{ + List* result = NIL; + if (func == NULL) { + return result; + } + + if (func->out_param_varno == -1) { + return result; + } + + AutoSessionPortalContextData* portalContext = NULL; + PLpgSQL_datum* outDatum = func->datums[func->out_param_varno]; + if (outDatum->dtype == PLPGSQL_DTYPE_VAR) { + PLpgSQL_var* outVar = (PLpgSQL_var*)outDatum; + if (outVar == NULL || outVar->datatype == NULL || outVar->datatype->typoid != REFCURSOROID) { + return result; + } + portalContext = (AutoSessionPortalContextData*)palloc0(sizeof(AutoSessionPortalContextData)); + portalContext->status = CONTEXT_NEW; + portalContext->portalHoldContext = AllocSetContextCreate(u_sess->top_portal_cxt, + "PortalHoldContext", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + result = lappend(result, portalContext); + return result; + } + + PLpgSQL_row* outRow = (PLpgSQL_row*)outDatum; + if (outRow->refname != NULL) { + /* means out param is just one normal row variable */ + return result; + } + for (int i = 0; i < outRow->nfields; i++) { + if (func->datums[outRow->varnos[i]]->dtype == PLPGSQL_DTYPE_VAR) { + PLpgSQL_var* var = (PLpgSQL_var*)(func->datums[outRow->varnos[i]]); + if (var != NULL && var->datatype != NULL && var->datatype->typoid == REFCURSOROID) { + portalContext = (AutoSessionPortalContextData*)palloc0(sizeof(AutoSessionPortalContextData)); + portalContext->status = CONTEXT_NEW; + portalContext->portalHoldContext = AllocSetContextCreate(u_sess->top_portal_cxt, + "PortalHoldContext", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + result = lappend(result, portalContext); + } + } + } + return result; +} + +static void HoldOutParamPortal(Portal portal) +{ + if (portal->portalPinned && !portal->autoHeld) { + /* + * Doing transaction control, especially abort, inside a cursor + * loop that is not read-only, for example using UPDATE + * ... RETURNING, has weird semantics issues. Also, this + * implementation wouldn't work, because such portals cannot be + * held. (The core grammar enforces that only SELECT statements + * can drive a cursor, but for example PL/pgSQL does not restrict + * it.) + */ + if (portal->strategy != PORTAL_ONE_SELECT) { + ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), + errmsg("cannot perform transaction commands inside a cursor loop that is not read-only"))); + } + + /* skip portal got error */ + if (portal->status == PORTAL_FAILED) + return; + + /* Verify it's in a suitable state to be held */ + if (portal->status != PORTAL_READY) + ereport(ERROR, (errmsg("pinned portal(%s) is not ready to be auto-held, with status[%d]", + portal->name, portal->status))); + + HoldPortal(portal); + portal->autoHeld = true; + } +} + +static AutoSessionPortalData* BuildPortalDataForParentSession(PLpgSQL_execstate* estate, + int curVarDno, int outParamIndex) +{ + PLpgSQL_var* curvar = (PLpgSQL_var*)(estate->datums[curVarDno]); + if (curvar->isnull) { + return NULL; + } + char* curname = TextDatumGetCString(curvar->value); + Portal portal = SPI_cursor_find(curname); + pfree_ext(curname); + if (portal == NULL) { + return NULL; + } + + /* portal not held, hold it */ + if (portal->portalPinned && !portal->autoHeld) { + /* Push the SPI stack */ + SPI_STACK_LOG("begin", NULL, NULL); + if (_SPI_begin_call(true) < 0) { + ereport(ERROR, (errcode(ERRCODE_SPI_CONNECTION_FAILURE), + errmsg("SPI stack is corrupted when perform cursor operation, current level: %d, connected level: %d", + u_sess->SPI_cxt._curid, u_sess->SPI_cxt._connected))); + } + HoldOutParamPortal(portal); + /* Pop the SPI stack */ + SPI_STACK_LOG("end", NULL, NULL); + _SPI_end_call(true); + } + + if (!portal->autoHeld) { + return NULL; + } + + if (!PortalConextInList(portal, u_sess->plsql_cxt.auto_parent_session_pkgs->portalContext)) { + ereport(ERROR, + (errmodule(MOD_GPRC), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("nested call of ref cursor out param for autonomous transaction procedure is not supported yet."), + errdetail("N/A"), + errcause("feature not supported"), + erraction("Modify autonomous transction procedure"))); + } + + MemoryContext oldCtx = MemoryContextSwitchTo(portal->holdContext); + AutoSessionPortalData* portalData = (AutoSessionPortalData*)palloc0(sizeof(AutoSessionPortalData)); + portalData->outParamIndex = outParamIndex; + portalData->strategy = portal->strategy; + portalData->cursorOptions = portal->cursorOptions; + portalData->commandTag = pstrdup(portal->commandTag); + portalData->atEnd = portal->atEnd; + portalData->atStart = portal->atStart; + portalData->portalPos = portal->portalPos; + portalData->holdStore = portal->holdStore; + portalData->holdContext = portal->holdContext; + portalData->tupDesc = CreateTupleDescCopy(portal->tupDesc); + /* cursor attribute data */ + curvar = (PLpgSQL_var*)(estate->datums[curVarDno + CURSOR_ISOPEN]); + portalData->is_open = DatumGetBool(curvar->value); + portalData->null_open = curvar->isnull; + curvar = (PLpgSQL_var*)(estate->datums[curVarDno + CURSOR_FOUND]); + portalData->found = DatumGetBool(curvar->value); + portalData->null_fetch = curvar->isnull; + curvar = (PLpgSQL_var*)(estate->datums[curVarDno + CURSOR_NOTFOUND]); + portalData->not_found = DatumGetBool(curvar->value); + curvar = (PLpgSQL_var*)(estate->datums[curVarDno + CURSOR_ROWCOUNT]); + portalData->row_count = DatumGetInt32(curvar->value); + MemoryContextSwitchTo(oldCtx); + if (u_sess->plsql_cxt.parent_context != NULL) { + portalData->holdContext->session_id = u_sess->plsql_cxt.parent_session_id; + portalData->holdContext->thread_id = u_sess->plsql_cxt.parent_thread_id; + MemoryContextSetParent(portalData->holdContext, u_sess->plsql_cxt.parent_context); + } + + return portalData; +} + +static List* BuildPortalDataListForParentSession(PLpgSQL_execstate* estate) +{ + List* result = NIL; + if (estate == NULL) { + return result; + } + SessionPackageRuntime* sessionPkgs = NULL; + if (u_sess->plsql_cxt.auto_parent_session_pkgs == NULL) { + return result; + } else { + sessionPkgs = u_sess->plsql_cxt.auto_parent_session_pkgs; + } + + /* means no out param */ + if (estate->func->out_param_varno == -1) { + return result; + } + + PLpgSQL_datum* outDatum = estate->datums[estate->func->out_param_varno]; + AutoSessionPortalData* portalData = NULL; + if (outDatum->dtype == PLPGSQL_DTYPE_VAR) { + PLpgSQL_var* outVar = (PLpgSQL_var*)outDatum; + if (outVar == NULL || outVar->datatype == NULL || outVar->datatype->typoid != REFCURSOROID) { + return result; + } + portalData = BuildPortalDataForParentSession(estate, estate->func->out_param_varno, 0); + if (portalData != NULL) { + result = lappend(result, portalData); + } + return result; + } + + PLpgSQL_row* outRow = (PLpgSQL_row*)outDatum; + if (outRow->refname != NULL) { + /* means out param is just one normal row variable */ + return result; + } + for (int i = 0; i < outRow->nfields; i++) { + if (estate->datums[outRow->varnos[i]]->dtype != PLPGSQL_DTYPE_VAR) { + continue; + } + PLpgSQL_var* var = (PLpgSQL_var*)(estate->datums[outRow->varnos[i]]); + if (var != NULL && var->datatype != NULL && var->datatype->typoid == REFCURSOROID) { + portalData = BuildPortalDataForParentSession(estate, outRow->varnos[i], i); + if (portalData != NULL) { + result = lappend(result, portalData); + } + } + } + + u_sess->plsql_cxt.parent_session_id = 0; + u_sess->plsql_cxt.parent_thread_id = 0; + u_sess->plsql_cxt.parent_context = NULL; + return result; +} + +static List* BuildFuncInfoList(PLpgSQL_execstate* estate) +{ + List* result = NIL; + if (estate == NULL) { + return result; + } + + AutoSessionFuncValInfo *autoSessionFuncInfo = (AutoSessionFuncValInfo *)palloc0(sizeof(AutoSessionFuncValInfo)); + if (COMPAT_CURSOR) { + PLpgSQL_var* var = (PLpgSQL_var*)(estate->datums[estate->found_varno]); + autoSessionFuncInfo->found = var->value; + var = (PLpgSQL_var*)(estate->datums[estate->sql_cursor_found_varno]); + if (var->isnull) { + autoSessionFuncInfo->sql_cursor_found = PLPGSQL_NULL; + } else { + autoSessionFuncInfo->sql_cursor_found = var->value; + } + var = (PLpgSQL_var*)(estate->datums[estate->sql_notfound_varno]); + if (var->isnull) { + autoSessionFuncInfo->sql_notfound = PLPGSQL_NULL; + } else { + autoSessionFuncInfo->sql_notfound = var->value; + } + + var = (PLpgSQL_var*)(estate->datums[estate->sql_isopen_varno]); + autoSessionFuncInfo->sql_isopen = var->value; + var = (PLpgSQL_var*)(estate->datums[estate->sql_rowcount_varno]); + autoSessionFuncInfo->sql_rowcount = var->value; + } + PLpgSQL_var* var = (PLpgSQL_var*)(estate->datums[estate->sqlcode_varno]); + if (var->isnull) { + autoSessionFuncInfo->sqlcode_isnull = true; + autoSessionFuncInfo->sqlcode = 0; + } else { + autoSessionFuncInfo->sqlcode_isnull = false; + char* sqlcode = TextDatumGetCString(var->value); + autoSessionFuncInfo->sqlcode = MAKE_SQLSTATE((unsigned char)sqlcode[0], (unsigned char)sqlcode[1], + (unsigned char)sqlcode[2], (unsigned char)sqlcode[3], + (unsigned char)sqlcode[4]); + } + result = lappend(result, autoSessionFuncInfo); + + return result; +} + + +void BuildSessionPackageRuntimeForAutoSession(uint64 sessionId, uint64 parentSessionId, + PLpgSQL_execstate* estate, PLpgSQL_function* func) { SessionPackageRuntime* parentSessionPkgs = NULL; @@ -637,7 +1112,7 @@ void BuildSessionPackageRuntime(uint64 sessionId, uint64 parentSessionId) if (!u_sess->plsql_cxt.not_found_parent_session_pkgs) { if (u_sess->plsql_cxt.auto_parent_session_pkgs == NULL) { MemoryContext oldcontext = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); - parentSessionPkgs = g_instance.global_session_pkg->fetch(parentSessionId); + parentSessionPkgs = g_instance.global_session_pkg->Fetch(parentSessionId); MemoryContextSwitchTo(oldcontext); u_sess->plsql_cxt.auto_parent_session_pkgs = parentSessionPkgs; } else { @@ -654,34 +1129,74 @@ void BuildSessionPackageRuntime(uint64 sessionId, uint64 parentSessionId) SessionPackageRuntime* resultSessionPkgs = (SessionPackageRuntime*)palloc0(sizeof(SessionPackageRuntime)); resultSessionPkgs->context = pkgRuntimeCtx; if (u_sess->plsql_cxt.plpgsqlpkg_dlist_objects != NULL) { - copyCurrentSessionPkgs(resultSessionPkgs, u_sess->plsql_cxt.plpgsqlpkg_dlist_objects); + CopyCurrentSessionPkgs(resultSessionPkgs, u_sess->plsql_cxt.plpgsqlpkg_dlist_objects); } if (parentSessionPkgs) { List* parentPkgList = parentSessionPkgs->runtimes; - copyParentSessionPkgs(resultSessionPkgs, parentPkgList); + CopyParentSessionPkgs(resultSessionPkgs, parentPkgList); } else if (parentSessionId != 0) { u_sess->plsql_cxt.not_found_parent_session_pkgs = true; } - g_instance.global_session_pkg->add(sessionId, resultSessionPkgs); + /* build portal context for auto session */ + resultSessionPkgs->portalContext = BuildPortalContextListForAutoSession(func); + + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + resultSessionPkgs->funcValInfo = BuildFuncInfoList(estate); + } + + g_instance.global_session_pkg->Add(sessionId, resultSessionPkgs); MemoryContextSwitchTo(oldCtx); MemoryContextDelete(resultSessionPkgs->context); } -static void copyCurrentSessionPkgs(SessionPackageRuntime* sessionPkgs, DList* pkgList) +void BuildSessionPackageRuntimeForParentSession(uint64 sessionId, PLpgSQL_execstate* estate) +{ + MemoryContext pkgRuntimeCtx = AllocSetContextCreate(CurrentMemoryContext, + "SessionPackageRuntime", + ALLOCSET_SMALL_MINSIZE, + ALLOCSET_SMALL_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + MemoryContext oldCtx = MemoryContextSwitchTo(pkgRuntimeCtx); + SessionPackageRuntime* resultSessionPkgs = (SessionPackageRuntime*)palloc0(sizeof(SessionPackageRuntime)); + resultSessionPkgs->context = pkgRuntimeCtx; + if (u_sess->plsql_cxt.plpgsqlpkg_dlist_objects != NULL) { + CopyCurrentSessionPkgs(resultSessionPkgs, u_sess->plsql_cxt.plpgsqlpkg_dlist_objects); + } + + /* build portal data for out param ref cursor of automous transaction */ + resultSessionPkgs->portalData = BuildPortalDataListForParentSession(estate); + + /* copy the portal context return to parent session */ + if (u_sess->plsql_cxt.auto_parent_session_pkgs != NULL) { + resultSessionPkgs->portalContext = + CopyPortalContexts(u_sess->plsql_cxt.auto_parent_session_pkgs->portalContext); + } + + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + resultSessionPkgs->funcValInfo = BuildFuncInfoList(estate); + } + + g_instance.global_session_pkg->Add(sessionId, resultSessionPkgs); + + MemoryContextSwitchTo(oldCtx); + MemoryContextDelete(resultSessionPkgs->context); +} + +static void CopyCurrentSessionPkgs(SessionPackageRuntime* sessionPkgs, DList* pkgList) { PLpgSQL_package* pkg = NULL; DListCell* cell = NULL; dlist_foreach_cell(cell, pkgList) { pkg = ((plpgsql_pkg_HashEnt*)lfirst(cell))->package; - PackageRuntimeState* pkgState = buildPkgRunStatesbyPackage(pkg); + PackageRuntimeState* pkgState = BuildPkgRunStatesbyPackage(pkg); sessionPkgs->runtimes = lappend(sessionPkgs->runtimes, pkgState); } } -static bool pkgExistInSession(PackageRuntimeState* pkgState) +static bool PkgExistInSession(PackageRuntimeState* pkgState) { if (pkgState == NULL) { return false; @@ -689,74 +1204,68 @@ static bool pkgExistInSession(PackageRuntimeState* pkgState) PLpgSQL_pkg_hashkey hashkey; hashkey.pkgOid = pkgState->packageId; PLpgSQL_package* getpkg = plpgsql_pkg_HashTableLookup(&hashkey); - if (getpkg) { - return true; - } else { - return false; - } + return getpkg ? true : false; } -static void copyParentSessionPkgs(SessionPackageRuntime* sessionPkgs, List* pkgList) +static void CopyParentSessionPkgs(SessionPackageRuntime* sessionPkgs, List* pkgList) { ListCell* cell = NULL; foreach(cell, pkgList) { /* if package exist in current session, we already copy it */ - if (pkgExistInSession((PackageRuntimeState*)lfirst(cell))) { + if (PkgExistInSession((PackageRuntimeState*)lfirst(cell))) { continue; } PackageRuntimeState* parentPkgState = (PackageRuntimeState*)lfirst(cell); - PackageRuntimeState* pkgState = buildPkgRunStatebyPkgRunState(parentPkgState); + PackageRuntimeState* pkgState = BuildPkgRunStatebyPkgRunState(parentPkgState); sessionPkgs->runtimes = lappend(sessionPkgs->runtimes, pkgState); } } -static PackageRuntimeState* buildPkgRunStatesbyPackage(PLpgSQL_package* pkg) +static PackageRuntimeState* BuildPkgRunStatesbyPackage(PLpgSQL_package* pkg) { PackageRuntimeState* pkgState = (PackageRuntimeState*)palloc0(sizeof(PackageRuntimeState)); pkgState->packageId = pkg->pkg_oid; pkgState->size = pkg->ndatums; pkgState->datums = (PLpgSQL_datum**)palloc0(sizeof(PLpgSQL_datum*) * pkg->ndatums); for (int i = 0; i < pkg->ndatums; i++) { - pkgState->datums[i] = copypPackageVarDatum(pkg->datums[i]); + pkgState->datums[i] = CopyPackageVarDatum(pkg->datums[i]); } return pkgState; } -static PackageRuntimeState* buildPkgRunStatebyPkgRunState(PackageRuntimeState* parentPkgState) +static PackageRuntimeState* BuildPkgRunStatebyPkgRunState(PackageRuntimeState* parentPkgState) { PackageRuntimeState* pkgState = (PackageRuntimeState*)palloc0(sizeof(PackageRuntimeState)); pkgState->packageId = parentPkgState->packageId; pkgState->size = parentPkgState->size; pkgState->datums = (PLpgSQL_datum**)palloc0(sizeof(PLpgSQL_datum*) * parentPkgState->size); for (int i = 0; i < parentPkgState->size; i++) { - pkgState->datums[i] = copypPackageVarDatum(parentPkgState->datums[i]); + pkgState->datums[i] = CopyPackageVarDatum(parentPkgState->datums[i]); } return pkgState; } -static PLpgSQL_datum* copypPackageVarDatum(PLpgSQL_datum* datum) +static PLpgSQL_datum* CopyPackageVarDatum(PLpgSQL_datum* datum) { PLpgSQL_datum* result = NULL; if (datum == NULL) return NULL; /* only VAR store value */ - switch (datum->dtype) { - case PLPGSQL_DTYPE_VAR: { - PLpgSQL_var* newm = copyPlpgsqlVar((PLpgSQL_var*)datum); - result = (PLpgSQL_datum*)newm; - break; - } - default: - break; + if (datum->dtype == PLPGSQL_DTYPE_VAR) { + PLpgSQL_var* var = (PLpgSQL_var*)datum; + if (unlikely(var->nest_table != NULL)) + return NULL; + PLpgSQL_var* newm = copyPlpgsqlVar(var); + result = (PLpgSQL_datum*)newm; } return result; } /* restore package values by Autonm SessionPkgs */ -static void restoreAutonmSessionPkgs(SessionPackageRuntime* sessionPkgs) +static void RestoreAutonmSessionPkgs(SessionPackageRuntime* sessionPkgs) { if (sessionPkgs == NULL || sessionPkgs->runtimes == NULL) { return; @@ -776,12 +1285,12 @@ static void restoreAutonmSessionPkgs(SessionPackageRuntime* sessionPkgs) if (pkg == NULL) { pkg = PackageInstantiation(pkgOid); } - restorePkgValuesByPkgState(pkg, pkgState); + RestorePkgValuesByPkgState(pkg, pkgState); } } /* restore package values by pkgState */ -static void restorePkgValuesByPkgState(PLpgSQL_package* targetPkg, PackageRuntimeState* pkgState, bool isInit) +static void RestorePkgValuesByPkgState(PLpgSQL_package* targetPkg, PackageRuntimeState* pkgState, bool isInit) { if (targetPkg == NULL || pkgState == NULL) { return; @@ -796,13 +1305,19 @@ static void restorePkgValuesByPkgState(PLpgSQL_package* targetPkg, PackageRuntim targetPkg = PackageInstantiation(targetPkg->pkg_oid); } - for (int i = 0; i < targetPkg->ndatums && i < pkgState->size; i++) { - /* null mean datum not a var */ - if (pkgState->datums[i] == NULL) { + int startNum = 0; + int endNum = targetPkg->ndatums < pkgState->size ? targetPkg->ndatums : pkgState->size; + /* when compiling body, need not restore public var */ + if (isInit && targetPkg->is_bodycompiled) { + startNum = targetPkg->public_ndatums; + } + + for (int i = startNum; i < endNum; i++) { + fromVar = (PLpgSQL_var*)pkgState->datums[i]; + /* null mean datum not a var, no need to restore */ + if (fromVar == NULL) { continue; } - - fromVar = (PLpgSQL_var*)pkgState->datums[i]; /* const value cannot be changed, cursor not supported by automo func yet */ if (fromVar->isconst || fromVar->is_cursor_var || fromVar->datatype->typoid == REFCURSOROID) { continue; @@ -835,22 +1350,146 @@ static void restorePkgValuesByPkgState(PLpgSQL_package* targetPkg, PackageRuntim } } -/* init Autonomous session package values by parent session */ +static SessionPackageRuntime* GetSessPkgRuntime(uint64 sessionId) +{ + SessionPackageRuntime* sessionPkgs = NULL; + if (u_sess->plsql_cxt.auto_parent_session_pkgs == NULL) { + MemoryContext oldcontext = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); + sessionPkgs = g_instance.global_session_pkg->Fetch(sessionId); + MemoryContextSwitchTo(oldcontext); + u_sess->plsql_cxt.auto_parent_session_pkgs = sessionPkgs; + } else { + sessionPkgs = u_sess->plsql_cxt.auto_parent_session_pkgs; + } + return sessionPkgs; +} + + +/* update packages already in auto session by parent session */ +void initAutoSessionPkgsValue(uint64 sessionId) +{ + if (u_sess->is_autonomous_session != true || u_sess->SPI_cxt._connected != 0) { + return; + } + + ListCell* cell = NULL; + PackageRuntimeState* pkgState = NULL; + Oid pkgOid = InvalidOid; + PLpgSQL_pkg_hashkey hashkey; + PLpgSQL_package* pkg = NULL; + + SessionPackageRuntime* sessionPkgs = GetSessPkgRuntime(sessionId); + + if (sessionPkgs == NULL) { + u_sess->plsql_cxt.not_found_parent_session_pkgs = true; + return; + } + if (sessionPkgs->runtimes == NULL) { + return; + } + + foreach(cell, sessionPkgs->runtimes) { + pkgState = (PackageRuntimeState*)lfirst(cell); + pkgOid = pkgState->packageId; + hashkey.pkgOid = pkgOid; + pkg = plpgsql_pkg_HashTableLookup(&hashkey); + if (pkg == NULL) { + continue; + } + RestorePkgValuesByPkgState(pkg, pkgState); + } +} + +void setCursorAtrValue(PLpgSQL_execstate* estate, AutoSessionFuncValInfo* FuncValInfo) +{ + PLpgSQL_var* var = NULL; + + var = (PLpgSQL_var*)(estate->datums[estate->found_varno]); + var->value = BoolGetDatum(FuncValInfo->found); + var->isnull = false; + + /* Set the magic implicit cursor attribute variable FOUND to false */ + var = (PLpgSQL_var*)(estate->datums[estate->sql_cursor_found_varno]); + /* if state is -1, found is set NULL */ + if (FuncValInfo->sql_cursor_found == PLPGSQL_NULL) { + var->value = (Datum)0; + var->isnull = true; + } else { + var->value = (FuncValInfo->sql_cursor_found == 1) ? (Datum)1 : (Datum)0; + var->isnull = false; + } + + /* Set the magic implicit cursor attribute variable NOTFOUND to true */ + var = (PLpgSQL_var*)(estate->datums[estate->sql_notfound_varno]); + /* if state is -1, notfound is set NULL */ + if (FuncValInfo->sql_notfound == PLPGSQL_NULL) { + var->value = (Datum)0; + var->isnull = true; + } else { + var->value = (FuncValInfo->sql_notfound == 1) ? (Datum)1 : (Datum)0; + var->isnull = false; + } + + /* Set the magic implicit cursor attribute variable ISOPEN to false */ + var = (PLpgSQL_var*)(estate->datums[estate->sql_isopen_varno]); + var->value = BoolGetDatum(FuncValInfo->sql_isopen); + var->isnull = false; + + /* Set the magic implicit cursor attribute variable ROWCOUNT to 0 */ + var = (PLpgSQL_var*)(estate->datums[estate->sql_rowcount_varno]); + /* reset == true and rowcount == -1, rowcount is set NULL */ + if (FuncValInfo->sql_rowcount == -1) { + var->value = (Datum)0; + var->isnull = true; + } else { + var->value = FuncValInfo->sql_rowcount; + var->isnull = false; + } +} + +void SetFuncInfoValue(List* SessionFuncInfo, PLpgSQL_execstate* estate) +{ + ListCell* cell = NULL; + AutoSessionFuncValInfo* FuncValInfo = NULL; + + foreach(cell, SessionFuncInfo) { + FuncValInfo = (AutoSessionFuncValInfo*)lfirst(cell); + if (COMPAT_CURSOR) { + setCursorAtrValue(estate, FuncValInfo); + } + if (!FuncValInfo->sqlcode_isnull) { + PLpgSQL_var* var = (PLpgSQL_var*)(estate->datums[estate->sqlcode_varno]); + assign_text_var(var, plpgsql_get_sqlstate(FuncValInfo->sqlcode)); + var = (PLpgSQL_var*)(estate->datums[estate->sqlstate_varno]); + assign_text_var(var, plpgsql_get_sqlstate(FuncValInfo->sqlcode)); + } + } +} + +/* update function info already in auto session by parent session */ +void initAutoSessionFuncInfoValue(uint64 sessionId, PLpgSQL_execstate* estate) +{ + SessionPackageRuntime* sessionPkgs = GetSessPkgRuntime(sessionId); + + if (sessionPkgs == NULL) { + u_sess->plsql_cxt.not_found_parent_session_pkgs = true; + return; + } + if (sessionPkgs->funcValInfo == NULL) { + return; + } + SetFuncInfoValue(sessionPkgs->funcValInfo, estate); +} + + +/* update packages when initializing package in auto session by parent session */ void initAutonomousPkgValue(PLpgSQL_package* targetPkg, uint64 sessionId) { if (u_sess->plsql_cxt.not_found_parent_session_pkgs) { return; } - SessionPackageRuntime* sessionPkgs = NULL; - if (u_sess->plsql_cxt.auto_parent_session_pkgs == NULL) { - MemoryContext oldcontext = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); - sessionPkgs = g_instance.global_session_pkg->fetch(sessionId); - MemoryContextSwitchTo(oldcontext); - u_sess->plsql_cxt.auto_parent_session_pkgs = sessionPkgs; - } else { - sessionPkgs = u_sess->plsql_cxt.auto_parent_session_pkgs; - } + SessionPackageRuntime* sessionPkgs = GetSessPkgRuntime(sessionId); if (sessionPkgs == NULL) { u_sess->plsql_cxt.not_found_parent_session_pkgs = true; @@ -863,15 +1502,63 @@ void initAutonomousPkgValue(PLpgSQL_package* targetPkg, uint64 sessionId) foreach(cell, sessionPkgs->runtimes) { pkgState = (PackageRuntimeState*)lfirst(cell); if (targetPkg->pkg_oid == pkgState->packageId) { - restorePkgValuesByPkgState(targetPkg, pkgState, true); + RestorePkgValuesByPkgState(targetPkg, pkgState, true); break; - } else { - continue; } } } -void processAutonmSessionPkgs(PLpgSQL_function* func) +List *processAutonmSessionPkgs(PLpgSQL_function* func, PLpgSQL_execstate* estate, bool isAutonm) +{ + List *autonmsList = NULL; + /* ignore inline_code_block function */ + if (!OidIsValid(func->fn_oid)) { + return NULL; + } + + uint64 currentSessionId = IS_THREAD_POOL_WORKER ? u_sess->session_id : t_thrd.proc_cxt.MyProcPid; + + if (IsAutonomousTransaction(func->action->isAutonomous)) { + /* + * call after plpgsql_exec_autonm_function(), need restore + * autonm session pkgs to current session, and remove + * sessionpkgs from g_instance.global_session_pkg + */ + uint64 automnSessionId = u_sess->SPI_cxt.autonomous_session->current_attach_sessionid; + SessionPackageRuntime* sessionpkgs = g_instance.global_session_pkg->Fetch(automnSessionId); + RestoreAutonmSessionPkgs(sessionpkgs); + if (sessionpkgs != NULL) { + MemoryContext oldcontext = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); + u_sess->plsql_cxt.storedPortals = CopyPortalDatas(sessionpkgs); + u_sess->plsql_cxt.portalContext = CopyPortalContexts(sessionpkgs->portalContext); + autonmsList = CopyFuncInfoDatas(sessionpkgs); + u_sess->plsql_cxt.call_after_auto = true; + MemoryContextSwitchTo(oldcontext); + MemoryContextDelete(sessionpkgs->context); + g_instance.global_session_pkg->Remove(automnSessionId); + } + g_instance.global_session_pkg->Remove(currentSessionId); + } else { + /* + * call after plpgsql_exec_function(), If it is first level + * autonomous func, need add its all session package values + * to global, the parent session will fetch the sessionPkgs, + * and restore package values by it. + */ + if (u_sess->is_autonomous_session == true && u_sess->SPI_cxt._connected == 0) { + BuildSessionPackageRuntimeForParentSession(currentSessionId, estate); + /* autonomous session will be reused by next autonomous procedure, need clean it */ + if (u_sess->plsql_cxt.auto_parent_session_pkgs != NULL) { + MemoryContextDelete(u_sess->plsql_cxt.auto_parent_session_pkgs->context); + u_sess->plsql_cxt.auto_parent_session_pkgs = NULL; + } + u_sess->plsql_cxt.not_found_parent_session_pkgs = false; + } + } + return autonmsList; +} + +void processAutonmSessionPkgsInException(PLpgSQL_function* func) { /* ignore inline_code_block function */ if (!OidIsValid(func->fn_oid)) { @@ -884,16 +1571,23 @@ void processAutonmSessionPkgs(PLpgSQL_function* func) /* * call after plpgsql_exec_autonm_function(), need restore * autonm session pkgs to current session, and remove - * sessionpkgs from g_instance.global_session_pkg. + * sessionpkgs from g_instance.global_session_pkg */ - uint64 automnSessionId = u_sess->SPI_cxt.autonomous_session->current_attach_sessionid; - SessionPackageRuntime* sessionpkgs = g_instance.global_session_pkg->fetch(automnSessionId); - restoreAutonmSessionPkgs(sessionpkgs); - if (sessionpkgs != NULL) { - MemoryContextDelete(sessionpkgs->context); - g_instance.global_session_pkg->remove(automnSessionId); + if (u_sess->SPI_cxt.autonomous_session == NULL) { + /* exception before create autonomous_session */ + return; } - g_instance.global_session_pkg->remove(currentSessionId); + uint64 automnSessionId = u_sess->SPI_cxt.autonomous_session->current_attach_sessionid; + SessionPackageRuntime* sessionpkgs = g_instance.global_session_pkg->Fetch(automnSessionId); + RestoreAutonmSessionPkgs(sessionpkgs); + if (sessionpkgs != NULL) { + MemoryContext oldcontext = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); + ReleaseUnusedPortalContext(sessionpkgs->portalContext, true); + MemoryContextSwitchTo(oldcontext); + MemoryContextDelete(sessionpkgs->context); + g_instance.global_session_pkg->Remove(automnSessionId); + } + g_instance.global_session_pkg->Remove(currentSessionId); } else { /* * call after plpgsql_exec_function(), If it is first level @@ -902,7 +1596,256 @@ void processAutonmSessionPkgs(PLpgSQL_function* func) * and restore package values by it. */ if (u_sess->is_autonomous_session == true && u_sess->SPI_cxt._connected == 0) { - BuildSessionPackageRuntime(currentSessionId, 0); + BuildSessionPackageRuntimeForParentSession(currentSessionId, NULL); + /* autonomous session will be reused by next autonomous procedure, need clean it */ + if (u_sess->plsql_cxt.auto_parent_session_pkgs != NULL) { + MemoryContextDelete(u_sess->plsql_cxt.auto_parent_session_pkgs->context); + u_sess->plsql_cxt.auto_parent_session_pkgs = NULL; + } + u_sess->plsql_cxt.not_found_parent_session_pkgs = false; } } } + +#ifndef ENABLE_MULTIPLE_NODES +Oid GetOldTupleOid(const char* procedureName, oidvector* parameterTypes, Oid procNamespace, + Oid propackageid, Datum* values, Datum parameterModes) +{ + bool enableOutparamOverride = enable_out_param_override(); + if (t_thrd.proc->workingVersionNum < 92470) { + HeapTuple oldtup = SearchSysCache3(PROCNAMEARGSNSP, + PointerGetDatum(procedureName), + values[Anum_pg_proc_proargtypes - 1], + ObjectIdGetDatum(procNamespace)); + if (!HeapTupleIsValid(oldtup)) { + return InvalidOid; + } + Oid oldTupleOid = HeapTupleGetOid(oldtup); + ReleaseSysCache(oldtup); + return oldTupleOid; + } + if (enableOutparamOverride) { + HeapTuple oldtup = NULL; + oldtup = SearchSysCacheForProcAllArgs(PointerGetDatum(procedureName), + values[Anum_pg_proc_allargtypes - 1], + ObjectIdGetDatum(procNamespace), + ObjectIdGetDatum(propackageid), + parameterModes); + if (!HeapTupleIsValid(oldtup)) { + return InvalidOid; + } + Oid oldTupleOid = HeapTupleGetOid(oldtup); + ReleaseSysCache(oldtup); + return oldTupleOid; + } else { + CatCList* catlist = NULL; + catlist = SearchSysCacheList1(PROCALLARGS, CStringGetDatum(procedureName)); + for (int i = 0; i < catlist->n_members; i++) { + HeapTuple proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); + Oid packageid = InvalidOid; + if (HeapTupleIsValid(proctup)) { + Form_pg_proc pform = (Form_pg_proc)GETSTRUCT(proctup); + Oid oldTupleOid = HeapTupleGetOid(proctup); + /* compare function's namespace */ + if (pform->pronamespace != procNamespace) { + continue; + } + bool isNull = false; + Datum packageIdDatum = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_packageid, &isNull); + if (!isNull) { + packageid = ObjectIdGetDatum(packageIdDatum); + } + if (packageid != propackageid) { + continue; + } + oidvector* procParaType = ProcedureGetArgTypes(proctup); + bool result = DatumGetBool( + DirectFunctionCall2(oidvectoreq, PointerGetDatum(procParaType), + PointerGetDatum(parameterTypes))); + if (result) { + ReleaseSysCacheList(catlist); + return oldTupleOid; + } + } + } + if (catlist != NULL) { + ReleaseSysCacheList(catlist); + } + } + return InvalidOid; +} + +/* + * judage the two arglis is same or not + */ +bool isSameArgList(CreateFunctionStmt* stmt1, CreateFunctionStmt* stmt2) +{ + List* argList1 = stmt1->parameters; + List* argList2 = stmt2->parameters; + ListCell* cell = NULL; + int length1 = list_length(argList1); + int length2 = list_length(argList2); + bool enable_outparam_override = enable_out_param_override(); + bool isSameName = true; + FunctionParameter** arr1 = (FunctionParameter**)palloc0(length1 * sizeof(FunctionParameter*)); + FunctionParameter** arr2 = (FunctionParameter**)palloc0(length2 * sizeof(FunctionParameter*)); + FunctionParameter* fp1 = NULL; + FunctionParameter* fp2 = NULL; + int inArgNum1 = 0; + int inArgNum2 = 0; + int inLoc1 = 0; + int inLoc2 = 0; + int length = 0; + foreach(cell, argList1) { + arr1[length] = (FunctionParameter*)lfirst(cell); + if (arr1[length]->mode != FUNC_PARAM_OUT) { + inArgNum1++; + } + length = length + 1; + } + length = 0; + foreach(cell, argList2) { + arr2[length] = (FunctionParameter*)lfirst(cell); + if (arr2[length]->mode != FUNC_PARAM_OUT) { + inArgNum2++; + } + length = length + 1; + } + if (!enable_outparam_override) { + if (inArgNum1 != inArgNum2) { + return false; + } else if (inArgNum1 == inArgNum2 && length1 != length2) { + char message[MAXSTRLEN]; + errno_t rc = sprintf_s(message, MAXSTRLEN, "can not override out param:%s", stmt1->funcname); + securec_check_ss_c(rc, "", ""); + InsertErrorMessage(message, stmt1->startLineNumber); + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmodule(MOD_PLSQL), + errmsg("can not override out param:%s", + NameListToString(stmt1->funcname)))); + } + } else { + if (length1 != length2) { + return false; + } + } + for (int i = 0, j = 0; i < length1 || j < length2; i++, j++) { + if (!enable_outparam_override) { + fp1 = arr1[inLoc1]; + fp2 = arr2[inLoc2]; + } else { + fp1 = arr1[i]; + fp2 = arr2[j]; + } + TypeName* t1 = fp1->argType; + TypeName* t2 = fp2->argType; + if (!enable_outparam_override) { + if (fp1->mode == FUNC_PARAM_OUT && fp2->mode == FUNC_PARAM_OUT) { + continue; + } else if (fp1->mode != FUNC_PARAM_OUT && fp2->mode == FUNC_PARAM_OUT) { + inLoc1++; + continue; + } else if (fp1->mode == FUNC_PARAM_OUT && fp2->mode != FUNC_PARAM_OUT) { + inLoc2++; + continue; + } else { + inLoc1++; + inLoc2++; + } + } + Oid toid1; + Oid toid2; + Type typtup1; + Type typtup2; + errno_t rc; + typtup1 = LookupTypeName(NULL, t1, NULL); + typtup2 = LookupTypeName(NULL, t2, NULL); + bool isTableOf1 = false; + bool isTableOf2 = false; + Oid baseOid1 = InvalidOid; + Oid baseOid2 = InvalidOid; + if (HeapTupleIsValid(typtup1)) { + toid1 = typeTypeId(typtup1); + if (((Form_pg_type)GETSTRUCT(typtup1))->typtype == TYPTYPE_TABLEOF) { + baseOid1 = ((Form_pg_type)GETSTRUCT(typtup1))->typelem; + isTableOf1 = true; + } + ReleaseSysCache(typtup1); + } else { + toid1 = findPackageParameter(strVal(linitial(t1->names))); + if (!OidIsValid(toid1)) { + char message[MAXSTRLEN]; + rc = sprintf_s(message, MAXSTRLEN, "type is not exists %s.", fp1->name); + securec_check_ss_c(rc, "", ""); + InsertErrorMessage(message, stmt1->startLineNumber); + ereport(ERROR, + (errmodule(MOD_PLSQL), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("type is not exists %s.", fp1->name), + errdetail("CommandType: %s", fp1->name), errcause("System error."), + erraction("Contact Huawei Engineer."))); + } + } + if (HeapTupleIsValid(typtup2)) { + toid2 = typeTypeId(typtup2); + if (((Form_pg_type)GETSTRUCT(typtup2))->typtype == TYPTYPE_TABLEOF) { + baseOid2 = ((Form_pg_type)GETSTRUCT(typtup2))->typelem; + isTableOf2 = true; + } + ReleaseSysCache(typtup2); + } else { + toid2 = findPackageParameter(strVal(linitial(t2->names))); + if (!OidIsValid(toid2)) { + char message[MAXSTRLEN]; + rc = sprintf_s(message, MAXSTRLEN, "type is not exists %s.", fp2->name); + securec_check_ss_c(rc, "", ""); + InsertErrorMessage(message, stmt1->startLineNumber); + ereport(ERROR, + (errmodule(MOD_PLSQL), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("type is not exists %s.", fp2->name), + errdetail("CommandType: %s", fp2->name), errcause("System error."), + erraction("Contact Huawei Engineer."))); + } + } + /* If table of type shold check its base type */ + if (isTableOf1 == isTableOf2 && isTableOf1 == true) { + if (baseOid1 != baseOid2 || fp1->mode != fp2->mode) { + pfree(arr1); + pfree(arr2); + return false; + } + } else if (toid1 != toid2 || fp1->mode != fp2->mode) { + pfree(arr1); + pfree(arr2); + return false; + } + if (fp1->name == NULL || fp2->name == NULL) { + char message[MAXSTRLEN]; + rc = sprintf_s(message, MAXSTRLEN, "type is not exists."); + securec_check_ss_c(rc, "", ""); + InsertErrorMessage(message, stmt1->startLineNumber); + ereport(ERROR, + (errmodule(MOD_PLSQL), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("type is not exists."), + errdetail("CommandType."), errcause("System error."), + erraction("Contact Huawei Engineer."))); + } + if (strcmp(fp1->name, fp2->name) != 0) { + isSameName = false; + } + } + pfree(arr1); + pfree(arr2); + /* function delcare in package specification and define in package body must be same */ + if (!isSameName && (stmt1->isFunctionDeclare^stmt2->isFunctionDeclare)) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmodule(MOD_PLSQL), + errmsg("function declared in package specification and " + "package body must be the same, function: %s", + NameListToString(stmt1->funcname)))); + } + return true; +} + +#endif diff --git a/src/common/backend/catalog/heap.cpp b/src/common/backend/catalog/heap.cpp index 241077c08..44127e6a8 100644 --- a/src/common/backend/catalog/heap.cpp +++ b/src/common/backend/catalog/heap.cpp @@ -65,9 +65,11 @@ #include "catalog/pg_tablespace.h" #include "catalog/pg_type.h" #include "catalog/pg_type_fn.h" +#include "catalog/pg_uid_fn.h" #include "catalog/storage.h" #include "catalog/storage_xlog.h" #include "catalog/storage_gtt.h" +#include "commands/matview.h" #include "commands/tablecmds.h" #include "commands/tablespace.h" #include "commands/typecmds.h" @@ -85,7 +87,6 @@ #include "pgxc/groupmgr.h" #include "storage/buf/buf.h" #include "storage/predicate.h" -#include "storage/page_compression.h" #include "storage/buf/bufmgr.h" #include "storage/lmgr.h" #include "storage/smgr/smgr.h" @@ -179,6 +180,16 @@ extern void make_tmptable_cache_key(Oid relNode); #define RELKIND_IN_RTM (relkind == RELKIND_RELATION || relkind == RELKIND_TOASTVALUE || relkind == RELKIND_MATVIEW) +static RangePartitionDefState *MakeRangeDefaultSubpartition(PartitionState *partitionState, char *partitionName, + char *tablespacename); +static ListPartitionDefState *MakeListDefaultSubpartition(PartitionState *partitionState, char *partitionName, + char *tablespacename); +static HashPartitionDefState *MakeHashDefaultSubpartition(PartitionState *partitionState, char *partitionName, + char *tablespacename); +static void MakeDefaultSubpartitionName(PartitionState *partitionState, char **subPartitionName, + const char *partitionName); +static void getSubPartitionInfo(char partitionStrategy, Node *partitionDefState, List **subPartitionDefState, + char **partitionName, char **tablespacename); /* ---------------------------------------------------------------- * XXX UGLY HARD CODED BADNESS FOLLOWS XXX * @@ -375,7 +386,26 @@ static FormData_pg_attribute a9 = {0, false, true, 0}; -static const Form_pg_attribute SysAtt[] = {&a1, &a2, &a3, &a4, &a5, &a6, &a7, &a8, &a9}; + +static FormData_pg_attribute a10 = {0, + {"gs_tuple_uid"}, + INT8OID, + 0, + sizeof(int64), + UidAttributeNumber, + 0, + -1, + -1, + true, + 'p', + 'd', + true, + false, + false, + true, + 0}; + +static const Form_pg_attribute SysAtt[] = {&a1, &a2, &a3, &a4, &a5, &a6, &a7, &a8, &a9, &a10}; #else static const Form_pg_attribute SysAtt[] = {&a1, &a2, &a3, &a4, &a5, &a6, &a7}; #endif @@ -385,7 +415,7 @@ static const Form_pg_attribute SysAtt[] = {&a1, &a2, &a3, &a4, &a5, &a6, &a7}; * Note that we elog if the presented attno is invalid, which would only * happen if there's a problem upstream. */ -Form_pg_attribute SystemAttributeDefinition(AttrNumber attno, bool relhasoids, bool relhasbucket) +Form_pg_attribute SystemAttributeDefinition(AttrNumber attno, bool relhasoids, bool relhasbucket, bool relhasuids) { if (attno >= 0 || attno < -(int)lengthof(SysAtt)) ereport( @@ -396,6 +426,10 @@ Form_pg_attribute SystemAttributeDefinition(AttrNumber attno, bool relhasoids, b if (attno == BucketIdAttributeNumber && !relhasbucket) ereport( ERROR, (errcode(ERRCODE_INVALID_COLUMN_DEFINITION), errmsg("invalid system attribute number %d", attno))); + if (attno == UidAttributeNumber && !relhasuids) { + ereport( + ERROR, (errcode(ERRCODE_INVALID_COLUMN_DEFINITION), errmsg("invalid system attribute number %d", attno))); + } return SysAtt[-attno - 1]; } @@ -480,9 +514,8 @@ static void InitSubPartitionDef(Partition newPartition, Oid partOid, char strate */ Relation heap_create(const char* relname, Oid relnamespace, Oid reltablespace, Oid relid, Oid relfilenode, Oid bucketOid, TupleDesc tupDesc, char relkind, char relpersistence, bool partitioned_relation, bool rowMovement, - bool shared_relation, bool mapped_relation, bool allow_system_table_mods, int8 row_compress, Datum reloptions, - Oid ownerid, bool skip_create_storage, TableAmType tam_type, int8 relindexsplit, StorageType storage_type, - bool newcbi, Oid accessMethodObjectId) + bool shared_relation, bool mapped_relation, bool allow_system_table_mods, int8 row_compress, Oid ownerid, + bool skip_create_storage, TableAmType tam_type, int8 relindexsplit, StorageType storage_type, bool newcbi) { bool create_storage = false; Relation rel; @@ -593,11 +626,9 @@ Relation heap_create(const char* relname, Oid relnamespace, Oid reltablespace, O relpersistence, relkind, row_compress, - reloptions, tam_type, relindexsplit, - storage_type, - accessMethodObjectId + storage_type ); if (partitioned_relation) { @@ -947,7 +978,7 @@ void InsertPgAttributeTuple(Relation pg_attribute_rel, Form_pg_attribute new_att * tuples to pg_attribute. * -------------------------------- */ -static void AddNewAttributeTuples(Oid new_rel_oid, TupleDesc tupdesc, char relkind, bool oidislocal, int oidinhcount, bool hasbucket) +static void AddNewAttributeTuples(Oid new_rel_oid, TupleDesc tupdesc, char relkind, bool oidislocal, int oidinhcount, bool hasbucket, bool hasuids) { Form_pg_attribute attr; int i; @@ -1016,6 +1047,8 @@ static void AddNewAttributeTuples(Oid new_rel_oid, TupleDesc tupdesc, char relki continue; if (!hasbucket && SysAtt[i]->attnum == BucketIdAttributeNumber) continue; + if (!hasuids && SysAtt[i]->attnum == UidAttributeNumber) + continue; rc = memcpy_s(&attStruct, sizeof(FormData_pg_attribute), (char*)SysAtt[i], sizeof(FormData_pg_attribute)); securec_check(rc, "\0", "\0"); @@ -2492,6 +2525,7 @@ Oid heap_create_with_catalog(const char *relname, Oid relnamespace, Oid reltable Oid relbucketOid = InvalidOid; int2vector* bucketcol = NULL; bool relhasbucket = false; + bool relhasuids = false; pg_class_desc = heap_open(RelationRelationId, RowExclusiveLock); @@ -2654,6 +2688,9 @@ Oid heap_create_with_catalog(const char *relname, Oid relnamespace, Oid reltable TableAmType tam = get_tableam_from_reloptions(hreloptions, relkind, InvalidOid); int8 indexsplit = get_indexsplit_from_reloptions(hreloptions, InvalidOid); + /* Get uids info from reloptions */ + relhasuids = StdRdOptionsHasUids(hreloptions, relkind); + /* * Create the relcache entry (mostly dummy at this point) and the physical * disk file. (If we fail further down, it's the smgr's responsibility to @@ -2675,7 +2712,6 @@ Oid heap_create_with_catalog(const char *relname, Oid relnamespace, Oid reltable mapped_relation, allow_system_table_mods, row_compress, - reloptions, ownerid, false, tam, @@ -2837,7 +2873,7 @@ Oid heap_create_with_catalog(const char *relname, Oid relnamespace, Oid reltable * now add tuples to pg_attribute for the attributes in our new relation. */ AddNewAttributeTuples( - relid, new_rel_desc->rd_att, relkind, oidislocal, oidinhcount, relhasbucket); + relid, new_rel_desc->rd_att, relkind, oidislocal, oidinhcount, relhasbucket, relhasuids); if (ceLst != NULL) { AddNewGsSecEncryptedColumnsTuples(relid, ceLst); } @@ -3524,6 +3560,13 @@ void heap_drop_with_catalog(Oid relid) * until transaction commit. This ensures no one else will try to do * something with the doomed relation. */ + if (ISMLOG(RelationGetForm(rel)->relname.data)) { + char *base_relid_str = RelationGetForm(rel)->relname.data + MLOGLEN; + Oid base_relid = atoi(base_relid_str); + if (OidIsValid(base_relid)) { + CacheInvalidateRelcacheByRelid(base_relid); + } + } relation_close(rel, NoLock); /* @@ -5205,7 +5248,7 @@ void dropDeltaTableOnPartition(Oid partId) * */ Partition heapCreatePartition(const char* part_name, bool for_partitioned_table, Oid part_tablespace, Oid part_id, - Oid partFileNode, Oid bucketOid, Oid ownerid, StorageType storage_type, bool newcbi, Datum reloptions) + Oid partFileNode, Oid bucketOid, Oid ownerid, StorageType storage_type, bool newcbi) { Partition new_part_desc = NULL; bool createStorage = false; @@ -5258,8 +5301,7 @@ Partition heapCreatePartition(const char* part_name, bool for_partitioned_table, part_id, /* partition oid */ partFileNode, /* partition's file node, same as partition oid*/ part_tablespace, - for_partitioned_table ? HEAP_DISK : storage_type, - reloptions); + for_partitioned_table ? HEAP_DISK : storage_type); /* * Save newcbi as a context indicator to @@ -5722,7 +5764,7 @@ Oid heapAddRangePartition(Relation pgPartRel, Oid partTableOid, Oid partTablespa /* transform boundary value */ boundaryValue = transformPartitionBoundary(newPartDef->boundary, isTimestamptz); - /*get partition tablespace*/ + /* get partition tablespace oid */ if (newPartDef->tablespacename) { newPartitionTableSpaceOid = get_tablespace_oid(newPartDef->tablespacename, false); } @@ -5763,9 +5805,7 @@ Oid heapAddRangePartition(Relation pgPartRel, Oid partTableOid, Oid partTablespa newPartrelfileOid, bucketOid, ownerid, - storage_type, - false, - reloptions); + storage_type); Assert(newPartitionOid == PartitionGetPartid(newPartition)); if (isSubpartition) { @@ -5876,7 +5916,7 @@ char* GenIntervalPartitionName(Relation rel) rc = snprintf_s(partName, NAMEDATALEN, NAMEDATALEN - 1, INTERVAL_PARTITION_NAME_PREFIX_FMT, suffix); securec_check_ss(rc, "\0", "\0"); existingPartOid = partitionNameGetPartitionOid( - rel->rd_id, partName, PART_OBJ_TYPE_TABLE_PARTITION, AccessExclusiveLock, true, false, NULL, NULL, NoLock); + rel->rd_id, partName, PART_OBJ_TYPE_TABLE_PARTITION, AccessShareLock, true, false, NULL, NULL, NoLock); if (!OidIsValid(existingPartOid)) { return partName; } @@ -5972,9 +6012,7 @@ Oid HeapAddIntervalPartition(Relation pgPartRel, Relation rel, Oid partTableOid, partrelfileOid, bucketOid, ownerid, - storage_type, - false, - reloptions); + storage_type); pfree(partName); Assert(newPartitionOid == PartitionGetPartid(newPartition)); @@ -6008,7 +6046,6 @@ Oid HeapAddListPartition(Relation pgPartRel, Oid partTableOid, Oid partTablespac Oid partrelfileOid = InvalidOid; Relation relation; Partition newListPartition; - int maxLength = 64; /*missing partition definition structure*/ if (!PointerIsValid(newListPartDef)) { ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("missing definition for new partition"))); @@ -6017,10 +6054,10 @@ Oid HeapAddListPartition(Relation pgPartRel, Oid partTableOid, Oid partTablespac if (!PointerIsValid(newListPartDef->boundary)) { ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("boundary not defined for new partition"))); } - if (newListPartDef->boundary->length > maxLength) { + if (newListPartDef->boundary->length > PARTKEY_VALUE_MAXNUM) { ereport(ERROR, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), - errmsg("too many partition keys, allowed is %d", maxLength))); + errmsg("too many partition keys, allowed is %d", PARTKEY_VALUE_MAXNUM))); } /*new partition name check*/ @@ -6031,7 +6068,7 @@ Oid HeapAddListPartition(Relation pgPartRel, Oid partTableOid, Oid partTablespac /* transform boundary value */ boundaryValue = transformListBoundary(newListPartDef->boundary, isTimestamptz); - /*get partition tablespace*/ + /* get partition tablespace oid */ if (newListPartDef->tablespacename) { newPartitionTableSpaceOid = get_tablespace_oid(newListPartDef->tablespacename, false); } @@ -6294,10 +6331,10 @@ Oid HeapAddHashPartition(Relation pgPartRel, Oid partTableOid, Oid partTablespac if (!PointerIsValid(newHashPartDef->boundary)) { ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("boundary not defined for new partition"))); } - if (newHashPartDef->boundary->length > MAX_PARTITIONKEY_NUM) { + if (newHashPartDef->boundary->length != 1) { ereport(ERROR, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), - errmsg("too many partition keys, allowed is %d", MAX_PARTITIONKEY_NUM))); + errmsg("too many partition keys, allowed is 1"))); } /*new partition name check*/ @@ -6309,7 +6346,7 @@ Oid HeapAddHashPartition(Relation pgPartRel, Oid partTableOid, Oid partTablespac bool isTime = false; boundaryValue = transformPartitionBoundary(newHashPartDef->boundary, &isTime); - /*get partition tablespace*/ + /* get partition tablespace oid */ if (newHashPartDef->tablespacename) { newPartitionTableSpaceOid = get_tablespace_oid(newHashPartDef->tablespacename, false); } @@ -6349,9 +6386,7 @@ Oid HeapAddHashPartition(Relation pgPartRel, Oid partTableOid, Oid partTablespac partrelfileOid, bucketOid, ownerid, - storage_type, - false, - reloptions); + storage_type); Assert(newHashPartitionOid == PartitionGetPartid(newHashPartition)); if (isSubpartition) { @@ -6526,9 +6561,7 @@ static void addNewPartitionTupleForTable(Relation pg_partition_rel, const char* new_partition_rfoid, InvalidOid, ownerid, - HEAP_DISK, - false, - reloptions); + HEAP_DISK); Assert(new_partition_oid == PartitionGetPartid(new_partition)); new_partition->pd_part->parttype = PART_OBJ_TYPE_PARTED_TABLE; @@ -6586,11 +6619,18 @@ bool IsExistDefaultSubpartitionName(List *partitionNameList, char *defaultPartit return false; } -void MakeDefaultSubpartitionName(PartitionState *partitionState, char** subPartitionName, const char* partitionName) +static void MakeDefaultSubpartitionName(PartitionState *partitionState, char **subPartitionName, + const char *partitionName) { int numLen = 0; int subPartitionNameLen = 0; - List *partitionNameList = GetPartitionNameList(partitionState->partitionList); + List *partitionNameList = NIL; + if (PointerIsValid(partitionState->partitionList)) { + partitionNameList = GetPartitionNameList(partitionState->partitionList); + } else if (PointerIsValid(partitionState->partitionNameList)) { + partitionNameList = partitionState->partitionNameList; + } + for (int i = 1; i < INT32_MAX_VALUE; i++) { numLen = (int)log10(i) + 1; @@ -6615,10 +6655,13 @@ void MakeDefaultSubpartitionName(PartitionState *partitionState, char** subParti break; } } - list_free_ext(partitionNameList); + + if (PointerIsValid(partitionState->partitionList)) { + list_free_ext(partitionNameList); + } } -ListPartitionDefState *MakeListDefaultSubpartition(PartitionState *partitionState, char *partitionName, +static ListPartitionDefState *MakeListDefaultSubpartition(PartitionState *partitionState, char *partitionName, char *tablespacename) { ListPartitionDefState *subPartitionDefState = makeNode(ListPartitionDefState); @@ -6632,7 +6675,7 @@ ListPartitionDefState *MakeListDefaultSubpartition(PartitionState *partitionStat return subPartitionDefState; } -HashPartitionDefState *MakeHashDefaultSubpartition(PartitionState *partitionState, char *partitionName, +static HashPartitionDefState *MakeHashDefaultSubpartition(PartitionState *partitionState, char *partitionName, char *tablespacename) { HashPartitionDefState *subPartitionDefState = makeNode(HashPartitionDefState); @@ -6644,7 +6687,7 @@ HashPartitionDefState *MakeHashDefaultSubpartition(PartitionState *partitionStat return subPartitionDefState; } -RangePartitionDefState *MakeRangeDefaultSubpartition(PartitionState *partitionState, char *partitionName, +static RangePartitionDefState *MakeRangeDefaultSubpartition(PartitionState *partitionState, char *partitionName, char *tablespacename) { RangePartitionDefState *subPartitionDefState = makeNode(RangePartitionDefState); @@ -6657,7 +6700,7 @@ RangePartitionDefState *MakeRangeDefaultSubpartition(PartitionState *partitionSt return subPartitionDefState; } -void getSubPartitionInfo(char partitionStrategy, Node *partitionDefState, +static void getSubPartitionInfo(char partitionStrategy, Node *partitionDefState, List **subPartitionDefState, char **partitionName, char **tablespacename) { if (partitionStrategy == PART_STRATEGY_LIST) { @@ -6702,42 +6745,93 @@ Node *MakeDefaultSubpartition(PartitionState *partitionState, Node *partitionDef return NULL; } -static void addNewSubPartitionTuplesForPartition(Relation pgPartRel, Oid partTableOid, Oid partTablespace, +List *addNewSubPartitionTuplesForPartition(Relation pgPartRel, Oid partTableOid, Oid partTablespace, Oid bucketOid, Oid ownerid, Datum reloptions, const bool *isTimestamptz, StorageType storage_type, PartitionState *partitionState, Node *partitionDefState, LOCKMODE partLockMode) { - if (partitionState->subPartitionState != NULL) { - PartitionState *subPartitionState = partitionState->subPartitionState; - List *subPartitionDefStateList = NIL; - char *partitionName = NULL; - char *tablespacename = NULL; - ListCell *lc = NULL; - char partitionStrategy = partitionState->partitionStrategy; - char subPartitionStrategy = subPartitionState->partitionStrategy; - getSubPartitionInfo(partitionStrategy, partitionDefState, &subPartitionDefStateList, &partitionName, - &tablespacename); + List *subpartOidList = NIL; + if (partitionState->subPartitionState == NULL) { + return NIL; + } + + PartitionState *subPartitionState = partitionState->subPartitionState; + List *subPartitionDefStateList = NIL; + char *partitionName = NULL; + char *tablespacename = NULL; + ListCell *lc = NULL; + Oid subpartOid = InvalidOid; + char partitionStrategy = partitionState->partitionStrategy; + char subPartitionStrategy = subPartitionState->partitionStrategy; + getSubPartitionInfo(partitionStrategy, partitionDefState, &subPartitionDefStateList, &partitionName, + &tablespacename); + foreach (lc, subPartitionDefStateList) { if (subPartitionStrategy == PART_STRATEGY_LIST) { - foreach (lc, subPartitionDefStateList) { - ListPartitionDefState *subPartitionDefState = (ListPartitionDefState *)lfirst(lc); - (void)HeapAddListPartition(pgPartRel, partTableOid, partTablespace, bucketOid, subPartitionDefState, - ownerid, reloptions, isTimestamptz, storage_type, NULL, true); - } + ListPartitionDefState *subPartitionDefState = (ListPartitionDefState *)lfirst(lc); + subpartOid = HeapAddListPartition(pgPartRel, partTableOid, partTablespace, bucketOid, + subPartitionDefState, ownerid, reloptions, isTimestamptz, storage_type, NULL, true); } else if (subPartitionStrategy == PART_STRATEGY_HASH) { - foreach (lc, subPartitionDefStateList) { - HashPartitionDefState *subPartitionDefState = (HashPartitionDefState *)lfirst(lc); - (void)HeapAddHashPartition(pgPartRel, partTableOid, partTablespace, bucketOid, subPartitionDefState, - ownerid, reloptions, isTimestamptz, storage_type, NULL, true); - } + HashPartitionDefState *subPartitionDefState = (HashPartitionDefState *)lfirst(lc); + subpartOid = HeapAddHashPartition(pgPartRel, partTableOid, partTablespace, bucketOid, + subPartitionDefState, ownerid, reloptions, isTimestamptz, storage_type, NULL, true); } else { - foreach (lc, subPartitionDefStateList) { - RangePartitionDefState *subPartitionDefState = (RangePartitionDefState *)lfirst(lc); - (void)heapAddRangePartition(pgPartRel, partTableOid, partTablespace, bucketOid, subPartitionDefState, - ownerid, reloptions, isTimestamptz, storage_type, partLockMode, NULL, true); - } + RangePartitionDefState *subPartitionDefState = (RangePartitionDefState *)lfirst(lc); + subpartOid = heapAddRangePartition(pgPartRel, partTableOid, partTablespace, bucketOid, + subPartitionDefState, ownerid, reloptions, isTimestamptz, storage_type, partLockMode, NULL, true); + } + if (OidIsValid(subpartOid)) { + subpartOidList = lappend_oid(subpartOidList, subpartOid); } } + + return subpartOidList; +} + +/* + * check whether the partion key has timestampwithzone type. + */ +static void IsPartitionKeyContainTimestampwithzoneType(const PartitionState *partTableState, const TupleDesc tupledesc, + bool *isTimestamptz, int partKeyNum) +{ + ListCell *partKeyCell = NULL; + ColumnRef *col = NULL; + char *columName = NULL; + int partKeyIdx = 0; + int attnum = tupledesc->natts; + Form_pg_attribute *attrs = tupledesc->attrs; + + foreach (partKeyCell, partTableState->partitionKey) { + col = (ColumnRef *)lfirst(partKeyCell); + columName = ((Value *)linitial(col->fields))->val.str; + + isTimestamptz[partKeyIdx] = false; + for (int i = 0; i < attnum; i++) { + if (TIMESTAMPTZOID == attrs[i]->atttypid && 0 == strcmp(columName, attrs[i]->attname.data)) { + isTimestamptz[partKeyIdx] = true; + break; + } + } + partKeyIdx++; + } + + Assert(partKeyIdx == partKeyNum); +} + +/* if partition's tablespace is not provided, it will inherit from parent partitioned + * so for a subpartition, the tablespace itself is used, if not exist, inherit from the partition, and if + * still not exist, inherit from the table */ +Oid GetPartTablespaceOidForSubpartition(Oid reltablespace, const char* partTablespacename) +{ + Oid partTablespaceOid = InvalidOid; + /* get partition tablespace oid */ + if (PointerIsValid(partTablespacename)) { + partTablespaceOid = get_tablespace_oid(partTablespacename, false); + } + if (!OidIsValid(partTablespaceOid)) { + partTablespaceOid = reltablespace; + } + return partTablespaceOid; } /* @@ -6758,38 +6852,19 @@ static void addNewPartitionTuplesForPartition(Relation pg_partition_rel, Oid rel Oid reltablespace, Oid bucketOid, PartitionState* partTableState, Oid ownerid, Datum reloptions, const TupleDesc tupledesc, char strategy, StorageType storage_type, LOCKMODE partLockMode) { - /* - *check whether the partion key has timestampwithzone type. - */ - ListCell* partKeyCell = NULL; - ColumnRef* col = NULL; - char* columName = NULL; int partKeyNum = list_length(partTableState->partitionKey); + bool isTimestamptzForPartKey[partKeyNum]; + memset_s(isTimestamptzForPartKey, sizeof(isTimestamptzForPartKey), 0, sizeof(isTimestamptzForPartKey)); + IsPartitionKeyContainTimestampwithzoneType(partTableState, tupledesc, isTimestamptzForPartKey, partKeyNum); - int attnum = tupledesc->natts; - Form_pg_attribute* attrs = tupledesc->attrs; - - int partKeyIdx = 0; - bool isTimestamptz[partKeyNum]; - - memset_s(isTimestamptz, sizeof(isTimestamptz), 0, sizeof(isTimestamptz)); - - foreach (partKeyCell, partTableState->partitionKey) { - col = (ColumnRef*)lfirst(partKeyCell); - columName = ((Value*)linitial(col->fields))->val.str; - - isTimestamptz[partKeyIdx] = false; - for (int i = 0; i < attnum; i++) { - if (TIMESTAMPTZOID == attrs[i]->atttypid && 0 == strcmp(columName, attrs[i]->attname.data)) { - isTimestamptz[partKeyIdx] = true; - break; - } - } - partKeyIdx++; + bool *isTimestamptzForSubPartKey = NULL; + if (partTableState->subPartitionState != NULL) { + int subPartKeyNum = list_length(partTableState->subPartitionState->partitionKey); + isTimestamptzForSubPartKey = (bool*)palloc0(sizeof(bool) * subPartKeyNum); + IsPartitionKeyContainTimestampwithzoneType(partTableState->subPartitionState, tupledesc, + isTimestamptzForSubPartKey, subPartKeyNum); } - Assert(partKeyIdx == partKeyNum); - ListCell* cell = NULL; Assert(pg_partition_rel); @@ -6817,12 +6892,18 @@ static void addNewPartitionTuplesForPartition(Relation pg_partition_rel, Oid rel partitionDefState, ownerid, reloptions, - isTimestamptz, + isTimestamptzForPartKey, storage_type, subpartition_key_attr_no); - addNewSubPartitionTuplesForPartition( - pg_partition_rel, partitionOid, reltablespace, bucketOid, ownerid, reloptions, isTimestamptz, - storage_type, partTableState, (Node*)partitionDefState, partLockMode); + + Oid partTablespaceOid = + GetPartTablespaceOidForSubpartition(reltablespace, partitionDefState->tablespacename); + List *subpartitionOidList = addNewSubPartitionTuplesForPartition(pg_partition_rel, partitionOid, + partTablespaceOid, bucketOid, ownerid, reloptions, isTimestamptzForSubPartKey, storage_type, + partTableState, (Node *)partitionDefState, partLockMode); + if (subpartitionOidList != NIL) { + list_free_ext(subpartitionOidList); + } } else if (strategy == PART_STRATEGY_HASH) { HashPartitionDefState* partitionDefState = (HashPartitionDefState*)lfirst(cell); if (partTableState->subPartitionState != NULL && partitionDefState->subPartitionDefState == NULL) { @@ -6837,12 +6918,18 @@ static void addNewPartitionTuplesForPartition(Relation pg_partition_rel, Oid rel partitionDefState, ownerid, reloptions, - isTimestamptz, + isTimestamptzForPartKey, storage_type, subpartition_key_attr_no); - addNewSubPartitionTuplesForPartition( - pg_partition_rel, partitionOid, reltablespace, bucketOid, ownerid, reloptions, isTimestamptz, - storage_type, partTableState, (Node*)partitionDefState, partLockMode); + + Oid partTablespaceOid = + GetPartTablespaceOidForSubpartition(reltablespace, partitionDefState->tablespacename); + List *subpartitionOidList = addNewSubPartitionTuplesForPartition(pg_partition_rel, partitionOid, + partTablespaceOid, bucketOid, ownerid, reloptions, isTimestamptzForSubPartKey, storage_type, + partTableState, (Node *)partitionDefState, partLockMode); + if (subpartitionOidList != NIL) { + list_free_ext(subpartitionOidList); + } } else { RangePartitionDefState* partitionDefState = (RangePartitionDefState*)lfirst(cell); if (partTableState->subPartitionState != NULL && partitionDefState->subPartitionDefState == NULL) { @@ -6857,15 +6944,24 @@ static void addNewPartitionTuplesForPartition(Relation pg_partition_rel, Oid rel (RangePartitionDefState*)lfirst(cell), ownerid, reloptions, - isTimestamptz, + isTimestamptzForPartKey, storage_type, partLockMode, subpartition_key_attr_no); - addNewSubPartitionTuplesForPartition( - pg_partition_rel, partitionOid, reltablespace, bucketOid, ownerid, reloptions, isTimestamptz, - storage_type, partTableState, (Node*)partitionDefState, partLockMode); + + Oid partTablespaceOid = + GetPartTablespaceOidForSubpartition(reltablespace, partitionDefState->tablespacename); + List *subpartitionOidList = addNewSubPartitionTuplesForPartition(pg_partition_rel, partitionOid, + partTablespaceOid, bucketOid, ownerid, reloptions, isTimestamptzForSubPartKey, storage_type, + partTableState, (Node *)partitionDefState, partLockMode); + if (subpartitionOidList != NIL) { + list_free_ext(subpartitionOidList); + } } } + if (isTimestamptzForSubPartKey != NULL) { + pfree_ext(isTimestamptzForSubPartKey); + } } void heap_truncate_one_part(Relation rel, Oid partOid) @@ -7428,7 +7524,7 @@ bool* CheckPartkeyHasTimestampwithzone(Relation partTableRel, bool isForSubParti pgPartRel = relation_open(PartitionRelationId, AccessShareLock); - if (isForSubPartition) { + if (isForSubPartition || RelationIsPartitionOfSubPartitionTable(partTableRel)) { partitionTableTuple = SearchSysCache1(PARTRELID, ObjectIdGetDatum(partTableRel->rd_id)); } else { partitionTableTuple = @@ -7493,7 +7589,7 @@ bool* CheckPartkeyHasTimestampwithzone(Relation partTableRel, bool isForSubParti } } - if (isForSubPartition) + if (isForSubPartition || RelationIsPartitionOfSubPartitionTable(partTableRel)) ReleaseSysCache(partitionTableTuple); else heap_freetuple_ext(partitionTableTuple); @@ -7607,3 +7703,37 @@ int GetIndexKeyAttsByTuple(Relation relation, HeapTuple indexTuple) return DatumGetInt16(indkeyDatum); } + +void AddOrDropUidsAttr(Oid relOid, bool oldRelHasUids, bool newRelHasUids) +{ + /* reloption uids don't change, do nothing */ + if (oldRelHasUids == newRelHasUids) { + return; + } + /* insert uid attr if new reloption has uid */ + if (newRelHasUids) { + Relation rel = heap_open(AttributeRelationId, RowExclusiveLock); + CatalogIndexState indstate = CatalogOpenIndexes(rel); + FormData_pg_attribute attStruct; + errno_t rc; + int uidAttrNum = -UidAttributeNumber; + rc = memcpy_s(&attStruct, sizeof(FormData_pg_attribute), + (char*)SysAtt[uidAttrNum - 1], sizeof(FormData_pg_attribute)); + securec_check(rc, "\0", "\0"); + /* Fill in the correct relation OID in the copied tuple */ + attStruct.attrelid = relOid; + InsertPgAttributeTuple(rel, &attStruct, indstate); + CatalogCloseIndexes(indstate); + heap_close(rel, RowExclusiveLock); + + /* Also insert uid backup table */ + InsertUidEntry(relOid); + } else { /* delete attr if new reloption does not has uid */ + ObjectAddress object; + object.classId = RelationRelationId; + object.objectId = relOid; + object.objectSubId = UidAttributeNumber; + performDeletion(&object, DROP_RESTRICT, 0); + } +} + diff --git a/src/common/backend/catalog/index.cpp b/src/common/backend/catalog/index.cpp index f2312132b..e86cc5afc 100644 --- a/src/common/backend/catalog/index.cpp +++ b/src/common/backend/catalog/index.cpp @@ -130,16 +130,14 @@ static bool binary_upgrade_is_next_psort_array_pg_type_oid_valid(); static Oid binary_upgrade_get_next_part_index_pg_class_rfoid(); static Oid bupgrade_get_next_psort_pg_class_rfoid(); -static const int max_hashbucket_index_worker = 4; +static const int max_hashbucket_index_worker = 32; static inline int get_parallel_workers(Relation heap) { - int parallel_workers = RelationGetParallelWorkers(heap, -1); + int parallel_workers = RelationGetParallelWorkers(heap, 0); - if (parallel_workers != -1) { + if (parallel_workers != 0) { parallel_workers = Min(max_hashbucket_index_worker, parallel_workers); - } else { - parallel_workers = max_hashbucket_index_worker; } return parallel_workers; @@ -329,8 +327,8 @@ static TupleDesc ConstructTupleDescriptor(Relation heapRelation, IndexInfo* inde /* * here we are indexing on a system attribute (-1...-n) */ - from = SystemAttributeDefinition( - atnum, heapRelation->rd_rel->relhasoids, RELATION_HAS_BUCKET(heapRelation)); + from = SystemAttributeDefinition(atnum, heapRelation->rd_rel->relhasoids, + RELATION_HAS_BUCKET(heapRelation), RELATION_HAS_UIDS(heapRelation)); } else { /* * here we are indexing on a normal attribute (1...n) @@ -914,9 +912,9 @@ Oid index_create(Relation heapRelation, const char *indexRelationName, Oid index indexRelation = heap_create(indexRelationName, namespaceId, tableSpaceId, indexRelationId, relFileNode, RELATION_CREATE_BUCKET(heapRelation) ? heapRelation->rd_bucketoid : InvalidOid, indexTupDesc, relKind, relpersistence, isLocalPart, false, shared_relation, mapped_relation, allow_system_table_mods, - REL_CMPRS_NOT_SUPPORT, (Datum)reloptions, heapRelation->rd_rel->relowner, skip_create_storage, + REL_CMPRS_NOT_SUPPORT, heapRelation->rd_rel->relowner, skip_create_storage, isUstore ? TAM_USTORE : TAM_HEAP, /* XXX: Index tables are by default HEAP Table Type */ - relindexsplit, storage_type, extra->crossBucket, accessMethodObjectId); + relindexsplit, storage_type, extra->crossBucket); Assert(indexRelationId == RelationGetRelid(indexRelation)); @@ -934,6 +932,7 @@ Oid index_create(Relation heapRelation, const char *indexRelationName, Oid index * XXX should have a cleaner way to create cataloged indexes */ indexRelation->rd_rel->relowner = heapRelation->rd_rel->relowner; + indexRelation->rd_rel->relam = accessMethodObjectId; indexRelation->rd_rel->relhasoids = false; if (accessMethodObjectId == PSORT_AM_OID) { @@ -1245,8 +1244,7 @@ Oid partition_index_create(const char* partIndexName, /* the name of partition i parentIndex->rd_bucketoid, parentIndex->rd_rel->relowner, RelationGetStorageType(parentIndex), - extra->crossbucket, - indexRelOptions); + extra->crossbucket); partitionIndex->pd_part->parttype = PART_OBJ_TYPE_INDEX_PARTITION; partitionIndex->pd_part->rangenum = 0; partitionIndex->pd_part->parentid = parentIndexId; @@ -1284,13 +1282,9 @@ Oid partition_index_create(const char* partIndexName, /* the name of partition i partitionIndex->pd_part->relfrozenxid = (ShortTransactionId)InvalidTransactionId; /* insert into pg_partition */ -#ifndef ENABLE_MULTIPLE_NODES - insertPartitionEntry(pg_partition_rel, partitionIndex, partitionIndex->pd_id, NULL, NULL, 0, 0, 0, indexRelOptions, - PART_OBJ_TYPE_INDEX_PARTITION); -#else insertPartitionEntry( pg_partition_rel, partitionIndex, partitionIndex->pd_id, NULL, NULL, 0, 0, 0, 0, PART_OBJ_TYPE_INDEX_PARTITION); -#endif + /* Make the above change visible */ CommandCounterIncrement(); @@ -2739,8 +2733,7 @@ void index_build(Relation heapRelation, Partition heapPartition, Relation indexR /* sub-partition */ indexInfo->ii_ParallelWorkers = 0; } else if (!RELATION_OWN_BUCKET(heapRelation)) { - /* plain table or part of table, no extra partition or bucket under it */ - indexInfo->ii_ParallelWorkers = plan_create_index_workers(RelationGetRelid(heapRelation)); + indexInfo->ii_ParallelWorkers = parallel_workers; } /* disable parallel building index for system table */ @@ -2963,6 +2956,18 @@ static inline bool IsRedisExtraIndex(const IndexInfo* indexInfo, const TupleTabl (indexInfo->ii_KeyAttrNumbers[0] == (slot->tts_tupleDescriptor->natts - 1))); } +static TransactionId GetCatalogOldestXmin(Relation heapRelation) +{ + TransactionId oldestXmin = GetOldestXmin(heapRelation); + if (IsCatalogRelation(heapRelation) || RelationIsAccessibleInLogicalDecoding(heapRelation)) { + TransactionId catalogXmin = GetReplicationSlotCatalogXmin(); + if (TransactionIdIsNormal(catalogXmin) && TransactionIdPrecedes(catalogXmin, oldestXmin)) { + oldestXmin = catalogXmin; + } + } + return oldestXmin; +} + /* * IndexBuildHeapScan - scan the heap relation to find tuples to be indexed * @@ -3048,7 +3053,7 @@ double IndexBuildHeapScan(Relation heapRelation, Relation indexRelation, IndexIn } else { snapshot = SnapshotAny; /* okay to ignore lazy VACUUMs here */ - OldestXmin = GetOldestXmin(heapRelation); + OldestXmin = GetCatalogOldestXmin(heapRelation); } scan = heap_beginscan_strat(heapRelation, /* relation */ @@ -3456,9 +3461,7 @@ double IndexBuildUHeapScan(Relation heapRelation, Relation indexRelation, IndexI /* * Scan all tuples in the base relation. */ - while ((UHeapGetNextSlotGuts(scan, ForwardScanDirection, slot)) != NULL) { - bool tupleIsAlive = false; - UHeapTuple targuheaptuple = NULL; + while ((UHeapIndexBuildGetNextTuple(sscan, slot)) != NULL) { uheapTuple = ExecGetUHeapTupleFromSlot(slot); CHECK_FOR_INTERRUPTS(); @@ -3467,10 +3470,6 @@ double IndexBuildUHeapScan(Relation heapRelation, Relation indexRelation, IndexI if (ENABLE_WORKLOAD_CONTROL) IOSchedulerAndUpdate(IO_TYPE_READ, 1, IO_TYPE_ROW); - /* UHeapGetNextSlot did the time qual check */ - tupleIsAlive = true; - targuheaptuple = uheapTuple; - reltuples += 1; MemoryContextReset(econtext->ecxt_per_tuple_memory); @@ -3507,7 +3506,7 @@ double IndexBuildUHeapScan(Relation heapRelation, Relation indexRelation, IndexI heapTuple.t_self = uheapTuple->ctid; /* Call the AM's callback routine to process the tuple */ - callback(indexRelation, &heapTuple, values, isnull, tupleIsAlive, callbackState); + callback(indexRelation, &heapTuple, values, isnull, true, callbackState); } UHeapEndScan(scan); @@ -3968,6 +3967,7 @@ void validate_index(Oid heapId, Oid indexId, Snapshot snapshot) ivinfo.message_level = DEBUG2; ivinfo.num_heap_tuples = heapRelation->rd_rel->reltuples; ivinfo.strategy = NULL; + ivinfo.invisibleParts = NULL; state.tuplesort = tuplesort_begin_datum( TIDOID, TIDLessOperator, InvalidOid, false, u_sess->attr.attr_memory.maintenance_work_mem, false); @@ -4255,7 +4255,6 @@ void index_set_state_flags(Oid indexId, IndexStateFlagsAction action) case INDEX_CREATE_SET_VALID: /* Set indisvalid during a CREATE INDEX CONCURRENTLY sequence */ Assert(indexForm->indisready); - Assert(!indexForm->indisvalid); indexForm->indisvalid = true; break; case INDEX_DROP_CLEAR_VALID: @@ -4329,7 +4328,8 @@ Oid IndexGetRelation(Oid indexId, bool missing_ok) * Description : * Notes : */ -void reindex_indexpart_internal(Relation heapRelation, Relation iRel, IndexInfo* indexInfo, Oid indexPartId) +void reindex_indexpart_internal(Relation heapRelation, Relation iRel, IndexInfo* indexInfo, Oid indexPartId, + void* baseDesc) { Oid heapPartId = InvalidOid; Partition heapPart = NULL; @@ -4352,7 +4352,13 @@ void reindex_indexpart_internal(Relation heapRelation, Relation iRel, IndexInfo* heapPart = partitionOpen(heapRelation, heapPartId, ShareLock); indexpart = partitionOpen(iRel, indexPartId, AccessExclusiveLock); - PartitionSetNewRelfilenode(iRel, indexpart, InvalidTransactionId, InvalidMultiXactId); + if (baseDesc) { + /* We'll put the old relfilenode to recycle bin. */ + TrPartitionSetNewRelfilenode(iRel, indexpart, InvalidTransactionId, baseDesc); + } else { + /* We'll build a new physical relation for the index */ + PartitionSetNewRelfilenode(iRel, indexpart, InvalidTransactionId, InvalidMultiXactId); + } index_build(heapRelation, heapPart, iRel, indexpart, indexInfo, false, true, INDEX_CREATE_LOCAL_PARTITION, true); @@ -4368,7 +4374,7 @@ void reindex_indexpart_internal(Relation heapRelation, Relation iRel, IndexInfo* /* * ReindexGlobalIndexInternal - This routine is used to recreate a single global index */ -void ReindexGlobalIndexInternal(Relation heapRelation, Relation iRel, IndexInfo* indexInfo) +void ReindexGlobalIndexInternal(Relation heapRelation, Relation iRel, IndexInfo* indexInfo, void* baseDesc) { List* partitionList = NULL; /* We'll open any partition of relation by partition OID and lock it */ @@ -4378,8 +4384,13 @@ void ReindexGlobalIndexInternal(Relation heapRelation, Relation iRel, IndexInfo* partitionList = relationGetPartitionList(heapRelation, ShareLock); } - /* We'll build a new physical relation for the index */ - RelationSetNewRelfilenode(iRel, InvalidTransactionId, InvalidMultiXactId); + if (baseDesc) { + /* We'll put the old relfilenode to recycle bin. */ + TrRelationSetNewRelfilenode(iRel, InvalidTransactionId, baseDesc); + } else { + /* We'll build a new physical relation for the index */ + RelationSetNewRelfilenode(iRel, InvalidTransactionId, InvalidMultiXactId); + } /* Initialize the index and rebuild */ /* Note: we do not need to re-establish pkey setting */ @@ -4552,9 +4563,9 @@ void reindex_index(Oid indexId, Oid indexPartId, bool skip_constraint_checks, } else /* for partitioned table */ { if (OidIsValid(indexPartId)) { - reindex_indexpart_internal(heapRelation, iRel, indexInfo, indexPartId); + reindex_indexpart_internal(heapRelation, iRel, indexInfo, indexPartId, baseDesc); } else if (RelationIsGlobalIndex(iRel)) { - ReindexGlobalIndexInternal(heapRelation, iRel, indexInfo); + ReindexGlobalIndexInternal(heapRelation, iRel, indexInfo, baseDesc); } else { List* indexPartOidList = NULL; ListCell* partCell = NULL; @@ -4562,7 +4573,7 @@ void reindex_index(Oid indexId, Oid indexPartId, bool skip_constraint_checks, indexPartOidList = indexGetPartitionOidList(iRel); foreach (partCell, indexPartOidList) { Oid indexPartOid = lfirst_oid(partCell); - reindex_indexpart_internal(heapRelation, iRel, indexInfo, indexPartOid); + reindex_indexpart_internal(heapRelation, iRel, indexInfo, indexPartOid, baseDesc); } releasePartitionOidList(&indexPartOidList); @@ -4830,16 +4841,12 @@ bool ReindexRelation(Oid relid, int flags, int reindexType, void *baseDesc, Adap RelationSetIndexList(rel, indexIds, ClassOidIndexId); } - /* - * Close rel, but continue to hold the lock. - */ - heap_close(rel, NoLock); - // reset all local indexes on partition usable if needed if (RELATION_IS_PARTITIONED(rel)) { /* for partitioned table */ Oid partOid; ListCell* cell = NULL; - List* partOidList = relationGetPartitionOidList(rel); + List *partOidList = + RelationIsSubPartitioned(rel) ? RelationGetSubPartitionOidList(rel) : relationGetPartitionOidList(rel); foreach (cell, partOidList) { partOid = lfirst_oid(cell); @@ -4849,6 +4856,13 @@ bool ReindexRelation(Oid relid, int flags, int reindexType, void *baseDesc, Adap result = (indexIds != NIL); + bool isRelSubPartitioned = RelationIsSubPartitioned(rel); + + /* + * Close rel, but continue to hold the lock. + */ + heap_close(rel, NoLock); + if (!isPartitioned) { /* for non partitioned table */ /* * If the relation has a secondary toast rel, reindex that too while we @@ -4862,15 +4876,30 @@ bool ReindexRelation(Oid relid, int flags, int reindexType, void *baseDesc, Adap if (((uint32)flags) & REINDEX_REL_PROCESS_TOAST) { partTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_PARTITION, relid); - foreach (partCell, partTupleList) { - Oid toastOid = ((Form_pg_partition)GETSTRUCT((HeapTuple)lfirst(partCell)))->reltoastrelid; + if (isRelSubPartitioned) { + /* for subpartitioned table, we reindex the toastoid of subpartition */ + HeapTuple partTuple = (HeapTuple)lfirst(partCell); + List *subpartTupleList = + searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_SUB_PARTITION, HeapTupleGetOid(partTuple)); + ListCell* subpartCell = NULL; + foreach (subpartCell, subpartTupleList) { + Oid toastOid = ((Form_pg_partition)GETSTRUCT((HeapTuple)lfirst(subpartCell)))->reltoastrelid; - if (OidIsValid(toastOid)) { - result = ReindexRelation(toastOid, flags, REINDEX_BTREE_INDEX, baseDesc) || result; + if (OidIsValid(toastOid)) { + result = ReindexRelation(toastOid, flags, REINDEX_BTREE_INDEX, baseDesc) || result; + } + } + freePartList(subpartTupleList); + } else { + /* for partitioned table, we reindex the toastoid of partition */ + Oid toastOid = ((Form_pg_partition)GETSTRUCT((HeapTuple)lfirst(partCell)))->reltoastrelid; + + if (OidIsValid(toastOid)) { + result = ReindexRelation(toastOid, flags, REINDEX_BTREE_INDEX, baseDesc) || result; + } } } - freePartList(partTupleList); } } @@ -5827,7 +5856,7 @@ void AddGPIForSubPartition(Oid partTableOid, Oid partOid, Oid subPartOid) /* * @@GaussDB@@ - * Target : This routine is used to scan partition tuples and elete them from all global partition indexes. + * Target : This routine is used to scan partition tuples and delete them from all global partition indexes. */ void ScanPartitionDeleteGPITuples(Relation partTableRel, Relation partRel, const List* indexRelList, const List* indexInfoList) @@ -5898,7 +5927,7 @@ void ScanPartitionDeleteGPITuples(Relation partTableRel, Relation partRel, const * @@GaussDB@@ * Target : This routine is used to delete the partition tuples from all global partition indexes. */ -void DeleteGPITuplesForPartition(Oid partTableOid, Oid partOid) +bool DeleteGPITuplesForPartition(Oid partTableOid, Oid partOid) { Relation partTableRel = NULL; Relation partRel = NULL; @@ -5907,6 +5936,7 @@ void DeleteGPITuplesForPartition(Oid partTableOid, Oid partOid) List* indexRelList = NIL; List* indexInfoList = NIL; ListCell* cell = NULL; + bool all_ubtree = true; partTableRel = heap_open(partTableOid, AccessShareLock); part = partitionOpen(partTableRel, partOid, AccessExclusiveLock); @@ -5918,6 +5948,12 @@ void DeleteGPITuplesForPartition(Oid partTableOid, Oid partOid) Relation indexRel = relation_open(indexOid, RowExclusiveLock); IndexInfo* indexInfo = BuildIndexInfo(indexRel); + if (!RelationIsUstoreIndex(indexRel)) { + all_ubtree = false; + relation_close(indexRel, RowExclusiveLock); + continue; + } + indexRelList = lappend(indexRelList, indexRel); indexInfoList = lappend(indexInfoList, indexInfo); } @@ -5937,6 +5973,8 @@ void DeleteGPITuplesForPartition(Oid partTableOid, Oid partOid) releaseDummyRelation(&partRel); partitionClose(partTableRel, part, NoLock); heap_close(partTableRel, NoLock); + + return all_ubtree; } /* @@ -6091,10 +6129,7 @@ bool RecheckIndexTuple(const IndexScanDesc scan, TupleTableSlot *slot) bool *isnull = (bool*)palloc(sizeof(bool) * nattrs); /* form index datum with correct tuple descriptor */ - TupleDesc tmpDesc = slot->tts_tupleDescriptor; - slot->tts_tupleDescriptor = RelationGetDescr(scan->heapRelation); FormIndexDatum(so->indexInfo, slot, so->fakeEstate, values, isnull); - slot->tts_tupleDescriptor = tmpDesc; IndexTuple trueItup = index_form_tuple(RelationGetDescr(scan->indexRelation), values, isnull); trueItup->t_tid = itup->t_tid; diff --git a/src/common/backend/catalog/information_schema.sql b/src/common/backend/catalog/information_schema.sql index 7d1a8373b..5e1b865c3 100644 --- a/src/common/backend/catalog/information_schema.sql +++ b/src/common/backend/catalog/information_schema.sql @@ -179,7 +179,7 @@ CREATE FUNCTION _pg_interval_type(typid oid, mod int4) RETURNS text AS $$SELECT CASE WHEN $1 IN (1186) /* interval */ - THEN upper(substring(format_type($1, $2) from 'interval[()0-9]* #"%#"' for '#')) + THEN pg_catalog.upper(substring(pg_catalog.format_type($1, $2) from 'interval[()0-9]* #"%#"' for '#')) ELSE null::text END$$; @@ -218,7 +218,7 @@ CREATE DOMAIN sql_identifier AS character varying; */ CREATE VIEW information_schema_catalog_name AS - SELECT CAST(current_database() AS sql_identifier) AS catalog_name; + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS catalog_name; GRANT SELECT ON information_schema_catalog_name TO PUBLIC; @@ -255,7 +255,7 @@ CREATE VIEW applicable_roles AS FROM pg_auth_members m JOIN pg_authid a ON (m.member = a.oid) JOIN pg_authid b ON (m.roleid = b.oid) - WHERE pg_has_role(a.oid, 'USAGE'); + WHERE pg_catalog.pg_has_role(a.oid, 'USAGE'); GRANT SELECT ON applicable_roles TO PUBLIC; @@ -287,19 +287,19 @@ GRANT SELECT ON administrable_role_authorizations TO PUBLIC; */ CREATE VIEW attributes AS - SELECT CAST(current_database() AS sql_identifier) AS udt_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS udt_catalog, CAST(nc.nspname AS sql_identifier) AS udt_schema, CAST(c.relname AS sql_identifier) AS udt_name, CAST(a.attname AS sql_identifier) AS attribute_name, CAST(a.attnum AS cardinal_number) AS ordinal_position, - CAST(pg_get_expr(ad.adbin, ad.adrelid) AS character_data) AS attribute_default, + CAST(pg_catalog.pg_get_expr(ad.adbin, ad.adrelid) AS character_data) AS attribute_default, CAST(CASE WHEN a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) THEN 'NO' ELSE 'YES' END AS yes_or_no) AS is_nullable, -- This column was apparently removed between SQL:2003 and SQL:2008. CAST( CASE WHEN t.typelem <> 0 AND t.typlen = -1 THEN 'ARRAY' - WHEN nt.nspname = 'pg_catalog' THEN format_type(a.atttypid, null) + WHEN nt.nspname = 'pg_catalog' THEN pg_catalog.format_type(a.atttypid, null) ELSE 'USER-DEFINED' END AS character_data) AS data_type, @@ -318,7 +318,7 @@ CREATE VIEW attributes AS CAST(null AS sql_identifier) AS character_set_schema, CAST(null AS sql_identifier) AS character_set_name, - CAST(CASE WHEN nco.nspname IS NOT NULL THEN current_database() END AS sql_identifier) AS collation_catalog, + CAST(CASE WHEN nco.nspname IS NOT NULL THEN pg_catalog.current_database() END AS sql_identifier) AS collation_catalog, CAST(nco.nspname AS sql_identifier) AS collation_schema, CAST(co.collname AS sql_identifier) AS collation_name, @@ -348,7 +348,7 @@ CREATE VIEW attributes AS AS interval_type, CAST(null AS cardinal_number) AS interval_precision, - CAST(current_database() AS sql_identifier) AS attribute_udt_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS attribute_udt_catalog, CAST(nt.nspname AS sql_identifier) AS attribute_udt_schema, CAST(t.typname AS sql_identifier) AS attribute_udt_name, @@ -368,8 +368,8 @@ CREATE VIEW attributes AS WHERE a.attnum > 0 AND NOT a.attisdropped AND c.relkind in ('c') - AND (pg_has_role(c.relowner, 'USAGE') - OR has_type_privilege(c.reltype, 'USAGE')); + AND (pg_catalog.pg_has_role(c.relowner, 'USAGE') + OR pg_catalog.has_type_privilege(c.reltype, 'USAGE')); GRANT SELECT ON attributes TO PUBLIC; @@ -382,17 +382,17 @@ GRANT SELECT ON attributes TO PUBLIC; CREATE VIEW character_sets AS SELECT CAST(null AS sql_identifier) AS character_set_catalog, CAST(null AS sql_identifier) AS character_set_schema, - CAST(getdatabaseencoding() AS sql_identifier) AS character_set_name, - CAST(CASE WHEN getdatabaseencoding() = 'UTF8' THEN 'UCS' ELSE getdatabaseencoding() END AS sql_identifier) AS character_repertoire, - CAST(getdatabaseencoding() AS sql_identifier) AS form_of_use, - CAST(current_database() AS sql_identifier) AS default_collate_catalog, + CAST(pg_catalog.getdatabaseencoding() AS sql_identifier) AS character_set_name, + CAST(CASE WHEN pg_catalog.getdatabaseencoding() = 'UTF8' THEN 'UCS' ELSE pg_catalog.getdatabaseencoding() END AS sql_identifier) AS character_repertoire, + CAST(pg_catalog.getdatabaseencoding() AS sql_identifier) AS form_of_use, + CAST(pg_catalog.current_database() AS sql_identifier) AS default_collate_catalog, CAST(nc.nspname AS sql_identifier) AS default_collate_schema, CAST(c.collname AS sql_identifier) AS default_collate_name FROM pg_database d LEFT JOIN (pg_collation c JOIN pg_namespace nc ON (c.collnamespace = nc.oid)) ON (datcollate = collcollate AND datctype = collctype) - WHERE d.datname = current_database() - ORDER BY char_length(c.collname) DESC, c.collname ASC -- prefer full/canonical name + WHERE d.datname = pg_catalog.current_database() + ORDER BY pg_catalog.char_length(c.collname) DESC, c.collname ASC -- prefer full/canonical name LIMIT 1; GRANT SELECT ON character_sets TO PUBLIC; @@ -404,10 +404,10 @@ GRANT SELECT ON character_sets TO PUBLIC; */ CREATE VIEW check_constraint_routine_usage AS - SELECT CAST(current_database() AS sql_identifier) AS constraint_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS constraint_catalog, CAST(nc.nspname AS sql_identifier) AS constraint_schema, CAST(c.conname AS sql_identifier) AS constraint_name, - CAST(current_database() AS sql_identifier) AS specific_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS specific_catalog, CAST(np.nspname AS sql_identifier) AS specific_schema, CAST(p.proname || '_' || CAST(p.oid AS text) AS sql_identifier) AS specific_name FROM pg_namespace nc, pg_constraint c, pg_depend d, pg_proc p, pg_namespace np @@ -418,7 +418,7 @@ CREATE VIEW check_constraint_routine_usage AS AND d.refobjid = p.oid AND d.refclassid = 'pg_catalog.pg_proc'::regclass AND p.pronamespace = np.oid - AND pg_has_role(p.proowner, 'USAGE'); + AND pg_catalog.pg_has_role(p.proowner, 'USAGE'); GRANT SELECT ON check_constraint_routine_usage TO PUBLIC; @@ -429,22 +429,22 @@ GRANT SELECT ON check_constraint_routine_usage TO PUBLIC; */ CREATE VIEW check_constraints AS - SELECT CAST(current_database() AS sql_identifier) AS constraint_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS constraint_catalog, CAST(rs.nspname AS sql_identifier) AS constraint_schema, CAST(con.conname AS sql_identifier) AS constraint_name, - CAST(substring(pg_get_constraintdef(con.oid) from 7) AS character_data) + CAST(substring(pg_catalog.pg_get_constraintdef(con.oid) from 7) AS character_data) AS check_clause FROM pg_constraint con LEFT OUTER JOIN pg_namespace rs ON (rs.oid = con.connamespace) LEFT OUTER JOIN pg_class c ON (c.oid = con.conrelid) LEFT OUTER JOIN pg_type t ON (t.oid = con.contypid) - WHERE pg_has_role(coalesce(c.relowner, t.typowner), 'USAGE') + WHERE pg_catalog.pg_has_role(coalesce(c.relowner, t.typowner), 'USAGE') AND con.contype = 'c' UNION -- not-null constraints - SELECT CAST(current_database() AS sql_identifier) AS constraint_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS constraint_catalog, CAST(n.nspname AS sql_identifier) AS constraint_schema, CAST(CAST(n.oid AS text) || '_' || CAST(r.oid AS text) || '_' || CAST(a.attnum AS text) || '_not_null' AS sql_identifier) AS constraint_name, -- XXX CAST(a.attname || ' IS NOT NULL' AS character_data) @@ -456,7 +456,7 @@ CREATE VIEW check_constraints AS AND NOT a.attisdropped AND a.attnotnull AND r.relkind = 'r' - AND pg_has_role(r.relowner, 'USAGE'); + AND pg_catalog.pg_has_role(r.relowner, 'USAGE'); GRANT SELECT ON check_constraints TO PUBLIC; @@ -467,13 +467,13 @@ GRANT SELECT ON check_constraints TO PUBLIC; */ CREATE VIEW collations AS - SELECT CAST(current_database() AS sql_identifier) AS collation_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS collation_catalog, CAST(nc.nspname AS sql_identifier) AS collation_schema, CAST(c.collname AS sql_identifier) AS collation_name, CAST('NO PAD' AS character_data) AS pad_attribute FROM pg_collation c, pg_namespace nc WHERE c.collnamespace = nc.oid - AND collencoding IN (-1, (SELECT encoding FROM pg_database WHERE datname = current_database())); + AND collencoding IN (-1, (SELECT encoding FROM pg_database WHERE datname = pg_catalog.current_database())); GRANT SELECT ON collations TO PUBLIC; @@ -484,15 +484,15 @@ GRANT SELECT ON collations TO PUBLIC; */ CREATE VIEW collation_character_set_applicability AS - SELECT CAST(current_database() AS sql_identifier) AS collation_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS collation_catalog, CAST(nc.nspname AS sql_identifier) AS collation_schema, CAST(c.collname AS sql_identifier) AS collation_name, CAST(null AS sql_identifier) AS character_set_catalog, CAST(null AS sql_identifier) AS character_set_schema, - CAST(getdatabaseencoding() AS sql_identifier) AS character_set_name + CAST(pg_catalog.getdatabaseencoding() AS sql_identifier) AS character_set_name FROM pg_collation c, pg_namespace nc WHERE c.collnamespace = nc.oid - AND collencoding IN (-1, (SELECT encoding FROM pg_database WHERE datname = current_database())); + AND collencoding IN (-1, (SELECT encoding FROM pg_database WHERE datname = pg_catalog.current_database())); GRANT SELECT ON collation_character_set_applicability TO PUBLIC; @@ -511,10 +511,10 @@ GRANT SELECT ON collation_character_set_applicability TO PUBLIC; */ CREATE VIEW column_domain_usage AS - SELECT CAST(current_database() AS sql_identifier) AS domain_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS domain_catalog, CAST(nt.nspname AS sql_identifier) AS domain_schema, CAST(t.typname AS sql_identifier) AS domain_name, - CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, CAST(nc.nspname AS sql_identifier) AS table_schema, CAST(c.relname AS sql_identifier) AS table_name, CAST(a.attname AS sql_identifier) AS column_name @@ -528,10 +528,10 @@ CREATE VIEW column_domain_usage AS AND a.atttypid = t.oid AND t.typtype = 'd' AND c.relkind IN ('r', 'm', 'v', 'f') - AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') AND a.attnum > 0 AND NOT a.attisdropped - AND pg_has_role(t.typowner, 'USAGE'); + AND pg_catalog.pg_has_role(t.typowner, 'USAGE'); GRANT SELECT ON column_domain_usage TO PUBLIC; @@ -544,7 +544,7 @@ GRANT SELECT ON column_domain_usage TO PUBLIC; CREATE VIEW column_privileges AS SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, CAST(grantee.rolname AS sql_identifier) AS grantee, - CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, CAST(nc.nspname AS sql_identifier) AS table_schema, CAST(x.relname AS sql_identifier) AS table_name, CAST(x.attname AS sql_identifier) AS column_name, @@ -552,7 +552,7 @@ CREATE VIEW column_privileges AS CAST( CASE WHEN -- object owner always has grant options - pg_has_role(x.grantee, x.relowner, 'USAGE') + pg_catalog.pg_has_role(x.grantee, x.relowner, 'USAGE') OR x.grantable THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable @@ -565,7 +565,7 @@ CREATE VIEW column_privileges AS pr_c.prtype, pr_c.grantable, pr_c.relowner - FROM (SELECT oid, relname, relnamespace, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* + FROM (SELECT oid, relname, relnamespace, relowner, (pg_catalog.aclexplode(coalesce(relacl, pg_catalog.acldefault('r', relowner)))).* FROM pg_class WHERE relkind IN ('r', 'm', 'v', 'f') ) pr_c (oid, relname, relnamespace, relowner, grantor, grantee, prtype, grantable), @@ -582,7 +582,7 @@ CREATE VIEW column_privileges AS pr_a.prtype, pr_a.grantable, c.relowner - FROM (SELECT attrelid, attname, (aclexplode(coalesce(attacl, acldefault('c', relowner)))).* + FROM (SELECT attrelid, attname, (pg_catalog.aclexplode(coalesce(attacl, pg_catalog.acldefault('c', relowner)))).* FROM pg_attribute a JOIN pg_class cc ON (a.attrelid = cc.oid) WHERE attnum > 0 AND NOT attisdropped @@ -603,9 +603,9 @@ CREATE VIEW column_privileges AS AND x.grantee = grantee.oid AND x.grantor = u_grantor.oid AND x.prtype IN ('INSERT', 'SELECT', 'UPDATE', 'REFERENCES', 'COMMENT') - AND (x.relname not like 'mlog_%' AND x.relname not like 'matviewmap_%') - AND (pg_has_role(u_grantor.oid, 'USAGE') - OR pg_has_role(grantee.oid, 'USAGE') + AND (x.relname not like 'mlog\_%' AND x.relname not like 'matviewmap\_%') + AND (pg_catalog.pg_has_role(u_grantor.oid, 'USAGE') + OR pg_catalog.pg_has_role(grantee.oid, 'USAGE') OR grantee.rolname = 'PUBLIC'); GRANT SELECT ON column_privileges TO PUBLIC; @@ -617,10 +617,10 @@ GRANT SELECT ON column_privileges TO PUBLIC; */ CREATE VIEW column_udt_usage AS - SELECT CAST(current_database() AS sql_identifier) AS udt_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS udt_catalog, CAST(coalesce(nbt.nspname, nt.nspname) AS sql_identifier) AS udt_schema, CAST(coalesce(bt.typname, t.typname) AS sql_identifier) AS udt_name, - CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, CAST(nc.nspname AS sql_identifier) AS table_schema, CAST(c.relname AS sql_identifier) AS table_name, CAST(a.attname AS sql_identifier) AS column_name @@ -634,8 +634,8 @@ CREATE VIEW column_udt_usage AS AND a.atttypid = t.oid AND nc.oid = c.relnamespace AND a.attnum > 0 AND NOT a.attisdropped AND c.relkind in ('r', 'm', 'v', 'f') - AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') - AND pg_has_role(coalesce(bt.typowner, t.typowner), 'USAGE'); + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') + AND pg_catalog.pg_has_role(coalesce(bt.typowner, t.typowner), 'USAGE'); GRANT SELECT ON column_udt_usage TO PUBLIC; @@ -646,12 +646,12 @@ GRANT SELECT ON column_udt_usage TO PUBLIC; */ CREATE VIEW columns AS - SELECT CAST(current_database() AS sql_identifier) AS table_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, CAST(nc.nspname AS sql_identifier) AS table_schema, CAST(c.relname AS sql_identifier) AS table_name, CAST(a.attname AS sql_identifier) AS column_name, CAST(a.attnum AS cardinal_number) AS ordinal_position, - CAST(CASE WHEN ad.adgencol <> 's' THEN pg_get_expr(ad.adbin, ad.adrelid) END AS character_data) AS column_default, + CAST(CASE WHEN ad.adgencol <> 's' THEN pg_catalog.pg_get_expr(ad.adbin, ad.adrelid) END AS character_data) AS column_default, CAST(CASE WHEN a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) THEN 'NO' ELSE 'YES' END AS yes_or_no) AS is_nullable, @@ -659,11 +659,11 @@ CREATE VIEW columns AS CAST( CASE WHEN t.typtype = 'd' THEN CASE WHEN bt.typelem <> 0 AND bt.typlen = -1 THEN 'ARRAY' - WHEN nbt.nspname = 'pg_catalog' THEN format_type(t.typbasetype, null) + WHEN nbt.nspname = 'pg_catalog' THEN pg_catalog.format_type(t.typbasetype, null) ELSE 'USER-DEFINED' END ELSE CASE WHEN t.typelem <> 0 AND t.typlen = -1 THEN 'ARRAY' - WHEN nt.nspname = 'pg_catalog' THEN format_type(a.atttypid, null) + WHEN nt.nspname = 'pg_catalog' THEN pg_catalog.format_type(a.atttypid, null) ELSE 'USER-DEFINED' END END AS character_data) @@ -709,18 +709,18 @@ CREATE VIEW columns AS CAST(null AS sql_identifier) AS character_set_schema, CAST(null AS sql_identifier) AS character_set_name, - CAST(CASE WHEN nco.nspname IS NOT NULL THEN current_database() END AS sql_identifier) AS collation_catalog, + CAST(CASE WHEN nco.nspname IS NOT NULL THEN pg_catalog.current_database() END AS sql_identifier) AS collation_catalog, CAST(nco.nspname AS sql_identifier) AS collation_schema, CAST(co.collname AS sql_identifier) AS collation_name, - CAST(CASE WHEN t.typtype = 'd' THEN current_database() ELSE null END + CAST(CASE WHEN t.typtype = 'd' THEN pg_catalog.current_database() ELSE null END AS sql_identifier) AS domain_catalog, CAST(CASE WHEN t.typtype = 'd' THEN nt.nspname ELSE null END AS sql_identifier) AS domain_schema, CAST(CASE WHEN t.typtype = 'd' THEN t.typname ELSE null END AS sql_identifier) AS domain_name, - CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS udt_catalog, CAST(coalesce(nbt.nspname, nt.nspname) AS sql_identifier) AS udt_schema, CAST(coalesce(bt.typname, t.typname) AS sql_identifier) AS udt_name, @@ -741,7 +741,7 @@ CREATE VIEW columns AS CAST(null AS yes_or_no) AS identity_cycle, CAST(CASE WHEN ad.adgencol = 's' THEN 'ALWAYS' ELSE 'NEVER' END AS character_data) AS is_generated, - CAST(CASE WHEN ad.adgencol = 's' THEN pg_get_expr(ad.adbin, ad.adrelid) END AS character_data) AS generation_expression, + CAST(CASE WHEN ad.adgencol = 's' THEN pg_catalog.pg_get_expr(ad.adbin, ad.adrelid) END AS character_data) AS generation_expression, CAST(CASE WHEN c.relkind = 'r' OR (c.relkind = 'v' @@ -757,14 +757,14 @@ CREATE VIEW columns AS LEFT JOIN (pg_collation co JOIN pg_namespace nco ON (co.collnamespace = nco.oid)) ON a.attcollation = co.oid AND (nco.nspname, co.collname) <> ('pg_catalog', 'default') - WHERE (NOT pg_is_other_temp_schema(nc.oid)) + WHERE (NOT pg_catalog.pg_is_other_temp_schema(nc.oid)) AND a.attnum > 0 AND NOT a.attisdropped AND c.relkind in ('r', 'm', 'v', 'f') - AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') - AND (pg_has_role(c.relowner, 'USAGE') - OR has_column_privilege(c.oid, a.attnum, + AND (pg_catalog.pg_has_role(c.relowner, 'USAGE') + OR pg_catalog.has_column_privilege(c.oid, a.attnum, 'SELECT, INSERT, UPDATE, REFERENCES')); GRANT SELECT ON columns TO PUBLIC; @@ -776,11 +776,11 @@ GRANT SELECT ON columns TO PUBLIC; */ CREATE VIEW constraint_column_usage AS - SELECT CAST(current_database() AS sql_identifier) AS table_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, CAST(tblschema AS sql_identifier) AS table_schema, CAST(tblname AS sql_identifier) AS table_name, CAST(colname AS sql_identifier) AS column_name, - CAST(current_database() AS sql_identifier) AS constraint_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS constraint_catalog, CAST(cstrschema AS sql_identifier) AS constraint_schema, CAST(cstrname AS sql_identifier) AS constraint_name @@ -817,7 +817,7 @@ CREATE VIEW constraint_column_usage AS ) AS x (tblschema, tblname, tblowner, colname, cstrschema, cstrname) - WHERE pg_has_role(x.tblowner, 'USAGE'); + WHERE pg_catalog.pg_has_role(x.tblowner, 'USAGE'); GRANT SELECT ON constraint_column_usage TO PUBLIC; @@ -828,10 +828,10 @@ GRANT SELECT ON constraint_column_usage TO PUBLIC; */ CREATE VIEW constraint_table_usage AS - SELECT CAST(current_database() AS sql_identifier) AS table_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, CAST(nr.nspname AS sql_identifier) AS table_schema, CAST(r.relname AS sql_identifier) AS table_name, - CAST(current_database() AS sql_identifier) AS constraint_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS constraint_catalog, CAST(nc.nspname AS sql_identifier) AS constraint_schema, CAST(c.conname AS sql_identifier) AS constraint_name @@ -842,7 +842,7 @@ CREATE VIEW constraint_table_usage AS AND ( (c.contype = 'f' AND c.confrelid = r.oid) OR (c.contype IN ('p', 'u') AND c.conrelid = r.oid) ) AND r.relkind = 'r' - AND pg_has_role(r.relowner, 'USAGE'); + AND pg_catalog.pg_has_role(r.relowner, 'USAGE'); GRANT SELECT ON constraint_table_usage TO PUBLIC; @@ -872,10 +872,10 @@ GRANT SELECT ON constraint_table_usage TO PUBLIC; */ CREATE VIEW domain_constraints AS - SELECT CAST(current_database() AS sql_identifier) AS constraint_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS constraint_catalog, CAST(rs.nspname AS sql_identifier) AS constraint_schema, CAST(con.conname AS sql_identifier) AS constraint_name, - CAST(current_database() AS sql_identifier) AS domain_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS domain_catalog, CAST(n.nspname AS sql_identifier) AS domain_schema, CAST(t.typname AS sql_identifier) AS domain_name, CAST(CASE WHEN condeferrable THEN 'YES' ELSE 'NO' END @@ -886,8 +886,8 @@ CREATE VIEW domain_constraints AS WHERE rs.oid = con.connamespace AND n.oid = t.typnamespace AND t.oid = con.contypid - AND (pg_has_role(t.typowner, 'USAGE') - OR has_type_privilege(t.oid, 'USAGE')); + AND (pg_catalog.pg_has_role(t.typowner, 'USAGE') + OR pg_catalog.has_type_privilege(t.oid, 'USAGE')); GRANT SELECT ON domain_constraints TO PUBLIC; @@ -898,10 +898,10 @@ GRANT SELECT ON domain_constraints TO PUBLIC; */ CREATE VIEW domain_udt_usage AS - SELECT CAST(current_database() AS sql_identifier) AS udt_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS udt_catalog, CAST(nbt.nspname AS sql_identifier) AS udt_schema, CAST(bt.typname AS sql_identifier) AS udt_name, - CAST(current_database() AS sql_identifier) AS domain_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS domain_catalog, CAST(nt.nspname AS sql_identifier) AS domain_schema, CAST(t.typname AS sql_identifier) AS domain_name @@ -912,7 +912,7 @@ CREATE VIEW domain_udt_usage AS AND t.typbasetype = bt.oid AND bt.typnamespace = nbt.oid AND t.typtype = 'd' - AND pg_has_role(bt.typowner, 'USAGE'); + AND pg_catalog.pg_has_role(bt.typowner, 'USAGE'); GRANT SELECT ON domain_udt_usage TO PUBLIC; @@ -923,13 +923,13 @@ GRANT SELECT ON domain_udt_usage TO PUBLIC; */ CREATE VIEW domains AS - SELECT CAST(current_database() AS sql_identifier) AS domain_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS domain_catalog, CAST(nt.nspname AS sql_identifier) AS domain_schema, CAST(t.typname AS sql_identifier) AS domain_name, CAST( CASE WHEN t.typelem <> 0 AND t.typlen = -1 THEN 'ARRAY' - WHEN nbt.nspname = 'pg_catalog' THEN format_type(t.typbasetype, null) + WHEN nbt.nspname = 'pg_catalog' THEN pg_catalog.format_type(t.typbasetype, null) ELSE 'USER-DEFINED' END AS character_data) AS data_type, @@ -948,7 +948,7 @@ CREATE VIEW domains AS CAST(null AS sql_identifier) AS character_set_schema, CAST(null AS sql_identifier) AS character_set_name, - CAST(CASE WHEN nco.nspname IS NOT NULL THEN current_database() END AS sql_identifier) AS collation_catalog, + CAST(CASE WHEN nco.nspname IS NOT NULL THEN pg_catalog.current_database() END AS sql_identifier) AS collation_catalog, CAST(nco.nspname AS sql_identifier) AS collation_schema, CAST(co.collname AS sql_identifier) AS collation_name, @@ -980,7 +980,7 @@ CREATE VIEW domains AS CAST(t.typdefault AS character_data) AS domain_default, - CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS udt_catalog, CAST(nbt.nspname AS sql_identifier) AS udt_schema, CAST(bt.typname AS sql_identifier) AS udt_name, @@ -997,8 +997,8 @@ CREATE VIEW domains AS LEFT JOIN (pg_collation co JOIN pg_namespace nco ON (co.collnamespace = nco.oid)) ON t.typcollation = co.oid AND (nco.nspname, co.collname) <> ('pg_catalog', 'default') - WHERE (pg_has_role(t.typowner, 'USAGE') - OR has_type_privilege(t.oid, 'USAGE')); + WHERE (pg_catalog.pg_has_role(t.typowner, 'USAGE') + OR pg_catalog.has_type_privilege(t.oid, 'USAGE')); GRANT SELECT ON domains TO PUBLIC; @@ -1014,7 +1014,7 @@ GRANT SELECT ON domains TO PUBLIC; CREATE VIEW enabled_roles AS SELECT CAST(a.rolname AS sql_identifier) AS role_name FROM pg_authid a - WHERE pg_has_role(a.oid, 'USAGE'); + WHERE pg_catalog.pg_has_role(a.oid, 'USAGE'); GRANT SELECT ON enabled_roles TO PUBLIC; @@ -1033,10 +1033,10 @@ GRANT SELECT ON enabled_roles TO PUBLIC; */ CREATE VIEW key_column_usage AS - SELECT CAST(current_database() AS sql_identifier) AS constraint_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS constraint_catalog, CAST(nc_nspname AS sql_identifier) AS constraint_schema, CAST(conname AS sql_identifier) AS constraint_name, - CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, CAST(nr_nspname AS sql_identifier) AS table_schema, CAST(relname AS sql_identifier) AS table_name, CAST(a.attname AS sql_identifier) AS column_name, @@ -1059,12 +1059,12 @@ CREATE VIEW key_column_usage AS AND nc.oid = c.connamespace AND c.contype IN ('p', 'u', 'f') AND r.relkind = 'r' - AND (NOT pg_is_other_temp_schema(nr.oid)) ) AS ss + AND (NOT pg_catalog.pg_is_other_temp_schema(nr.oid)) ) AS ss WHERE ss.roid = a.attrelid AND a.attnum = (ss.x).x AND NOT a.attisdropped - AND (pg_has_role(relowner, 'USAGE') - OR has_column_privilege(roid, a.attnum, + AND (pg_catalog.pg_has_role(relowner, 'USAGE') + OR pg_catalog.has_column_privilege(roid, a.attnum, 'SELECT, INSERT, UPDATE, REFERENCES')); GRANT SELECT ON key_column_usage TO PUBLIC; @@ -1092,7 +1092,7 @@ GRANT SELECT ON key_column_usage TO PUBLIC; */ CREATE VIEW parameters AS - SELECT CAST(current_database() AS sql_identifier) AS specific_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS specific_catalog, CAST(n_nspname AS sql_identifier) AS specific_schema, CAST(proname || '_' || CAST(p_oid AS text) AS sql_identifier) AS specific_name, CAST((ss.x).n AS cardinal_number) AS ordinal_position, @@ -1109,7 +1109,7 @@ CREATE VIEW parameters AS CAST(NULLIF(proargnames[(ss.x).n], '') AS sql_identifier) AS parameter_name, CAST( CASE WHEN t.typelem <> 0 AND t.typlen = -1 THEN 'ARRAY' - WHEN nt.nspname = 'pg_catalog' THEN format_type(t.oid, null) + WHEN nt.nspname = 'pg_catalog' THEN pg_catalog.format_type(t.oid, null) ELSE 'USER-DEFINED' END AS character_data) AS data_type, CAST(null AS cardinal_number) AS character_maximum_length, @@ -1126,7 +1126,7 @@ CREATE VIEW parameters AS CAST(null AS cardinal_number) AS datetime_precision, CAST(null AS character_data) AS interval_type, CAST(null AS cardinal_number) AS interval_precision, - CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS udt_catalog, CAST(nt.nspname AS sql_identifier) AS udt_schema, CAST(t.typname AS sql_identifier) AS udt_name, CAST(null AS sql_identifier) AS scope_catalog, @@ -1141,8 +1141,8 @@ CREATE VIEW parameters AS _pg_expandarray(coalesce(p.proallargtypes, p.proargtypes::oid[])) AS x FROM pg_namespace n, pg_proc p WHERE n.oid = p.pronamespace - AND (pg_has_role(p.proowner, 'USAGE') OR - has_function_privilege(p.oid, 'EXECUTE'))) AS ss + AND (pg_catalog.pg_has_role(p.proowner, 'USAGE') OR + pg_catalog.has_function_privilege(p.oid, 'EXECUTE'))) AS ss WHERE t.oid = (ss.x).x AND t.typnamespace = nt.oid; GRANT SELECT ON parameters TO PUBLIC; @@ -1162,12 +1162,12 @@ GRANT SELECT ON parameters TO PUBLIC; */ CREATE VIEW referential_constraints AS - SELECT CAST(current_database() AS sql_identifier) AS constraint_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS constraint_catalog, CAST(ncon.nspname AS sql_identifier) AS constraint_schema, CAST(con.conname AS sql_identifier) AS constraint_name, CAST( CASE WHEN npkc.nspname IS NULL THEN NULL - ELSE current_database() END + ELSE pg_catalog.current_database() END AS sql_identifier) AS unique_constraint_catalog, CAST(npkc.nspname AS sql_identifier) AS unique_constraint_schema, CAST(pkc.conname AS sql_identifier) AS unique_constraint_name, @@ -1210,10 +1210,10 @@ CREATE VIEW referential_constraints AS AND pkc.conrelid = con.confrelid LEFT JOIN pg_namespace npkc ON pkc.connamespace = npkc.oid - WHERE pg_has_role(c.relowner, 'USAGE') + WHERE pg_catalog.pg_has_role(c.relowner, 'USAGE') -- SELECT privilege omitted, per SQL standard - OR has_table_privilege(c.oid, 'INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') - OR has_any_column_privilege(c.oid, 'INSERT, UPDATE, REFERENCES') ; + OR pg_catalog.has_table_privilege(c.oid, 'INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') + OR pg_catalog.has_any_column_privilege(c.oid, 'INSERT, UPDATE, REFERENCES') ; GRANT SELECT ON referential_constraints TO PUBLIC; @@ -1276,22 +1276,22 @@ GRANT SELECT ON role_column_grants TO PUBLIC; CREATE VIEW routine_privileges AS SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, CAST(grantee.rolname AS sql_identifier) AS grantee, - CAST(current_database() AS sql_identifier) AS specific_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS specific_catalog, CAST(n.nspname AS sql_identifier) AS specific_schema, CAST(p.proname || '_' || CAST(p.oid AS text) AS sql_identifier) AS specific_name, - CAST(current_database() AS sql_identifier) AS routine_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS routine_catalog, CAST(n.nspname AS sql_identifier) AS routine_schema, CAST(p.proname AS sql_identifier) AS routine_name, CAST(p.prtype AS character_data) AS privilege_type, CAST( CASE WHEN -- object owner always has grant options - pg_has_role(grantee.oid, p.proowner, 'USAGE') + pg_catalog.pg_has_role(grantee.oid, p.proowner, 'USAGE') OR p.grantable THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable FROM ( - SELECT oid, proname, proowner, pronamespace, (aclexplode(coalesce(proacl, acldefault('f', proowner)))).* FROM pg_proc + SELECT oid, proname, proowner, pronamespace, (pg_catalog.aclexplode(coalesce(proacl, pg_catalog.acldefault('f', proowner)))).* FROM pg_proc ) p (oid, proname, proowner, pronamespace, grantor, grantee, prtype, grantable), pg_namespace n, pg_authid u_grantor, @@ -1305,8 +1305,8 @@ CREATE VIEW routine_privileges AS AND grantee.oid = p.grantee AND u_grantor.oid = p.grantor AND p.prtype IN ('EXECUTE', 'ALTER', 'DROP', 'COMMENT') - AND (pg_has_role(u_grantor.oid, 'USAGE') - OR pg_has_role(grantee.oid, 'USAGE') + AND (pg_catalog.pg_has_role(u_grantor.oid, 'USAGE') + OR pg_catalog.pg_has_role(grantee.oid, 'USAGE') OR grantee.rolname = 'PUBLIC'); GRANT SELECT ON routine_privileges TO PUBLIC; @@ -1365,10 +1365,10 @@ GRANT SELECT ON role_routine_grants TO PUBLIC; */ CREATE VIEW routines AS - SELECT CAST(current_database() AS sql_identifier) AS specific_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS specific_catalog, CAST(n.nspname AS sql_identifier) AS specific_schema, CAST(p.proname || '_' || CAST(p.oid AS text) AS sql_identifier) AS specific_name, - CAST(current_database() AS sql_identifier) AS routine_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS routine_catalog, CAST(n.nspname AS sql_identifier) AS routine_schema, CAST(p.proname AS sql_identifier) AS routine_name, CAST('FUNCTION' AS character_data) AS routine_type, @@ -1381,7 +1381,7 @@ CREATE VIEW routines AS CAST( CASE WHEN t.typelem <> 0 AND t.typlen = -1 THEN 'ARRAY' - WHEN nt.nspname = 'pg_catalog' THEN format_type(t.oid, null) + WHEN nt.nspname = 'pg_catalog' THEN pg_catalog.format_type(t.oid, null) ELSE 'USER-DEFINED' END AS character_data) AS data_type, CAST(null AS cardinal_number) AS character_maximum_length, @@ -1398,7 +1398,7 @@ CREATE VIEW routines AS CAST(null AS cardinal_number) AS datetime_precision, CAST(null AS character_data) AS interval_type, CAST(null AS cardinal_number) AS interval_precision, - CAST(current_database() AS sql_identifier) AS type_udt_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS type_udt_catalog, CAST(nt.nspname AS sql_identifier) AS type_udt_schema, CAST(t.typname AS sql_identifier) AS type_udt_name, CAST(null AS sql_identifier) AS scope_catalog, @@ -1410,12 +1410,12 @@ CREATE VIEW routines AS CAST(CASE WHEN l.lanname = 'sql' THEN 'SQL' ELSE 'EXTERNAL' END AS character_data) AS routine_body, CAST( - CASE WHEN pg_has_role(p.proowner, 'USAGE') THEN p.prosrc ELSE null END + CASE WHEN pg_catalog.pg_has_role(p.proowner, 'USAGE') THEN p.prosrc ELSE null END AS character_data) AS routine_definition, CAST( CASE WHEN l.lanname = 'c' THEN p.prosrc ELSE null END AS character_data) AS external_name, - CAST(upper(l.lanname) AS character_data) AS external_language, + CAST(pg_catalog.upper(l.lanname) AS character_data) AS external_language, CAST('GENERAL' AS character_data) AS parameter_style, CAST(CASE WHEN p.provolatile = 'i' THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_deterministic, @@ -1466,8 +1466,8 @@ CREATE VIEW routines AS WHERE n.oid = p.pronamespace AND p.prolang = l.oid AND p.prorettype = t.oid AND t.typnamespace = nt.oid - AND (pg_has_role(p.proowner, 'USAGE') - OR has_function_privilege(p.oid, 'EXECUTE')); + AND (pg_catalog.pg_has_role(p.proowner, 'USAGE') + OR pg_catalog.has_function_privilege(p.oid, 'EXECUTE')); GRANT SELECT ON routines TO PUBLIC; @@ -1478,7 +1478,7 @@ GRANT SELECT ON routines TO PUBLIC; */ CREATE VIEW schemata AS - SELECT CAST(current_database() AS sql_identifier) AS catalog_name, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS catalog_name, CAST(n.nspname AS sql_identifier) AS schema_name, CAST(u.rolname AS sql_identifier) AS schema_owner, CAST(null AS sql_identifier) AS default_character_set_catalog, @@ -1486,7 +1486,7 @@ CREATE VIEW schemata AS CAST(null AS sql_identifier) AS default_character_set_name, CAST(null AS character_data) AS sql_path FROM pg_namespace n, pg_authid u - WHERE n.nspowner = u.oid AND pg_has_role(n.nspowner, 'USAGE'); + WHERE n.nspowner = u.oid AND pg_catalog.pg_has_role(n.nspowner, 'USAGE'); GRANT SELECT ON schemata TO PUBLIC; @@ -1497,7 +1497,7 @@ GRANT SELECT ON schemata TO PUBLIC; */ CREATE VIEW sequences AS - SELECT CAST(current_database() AS sql_identifier) AS sequence_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS sequence_catalog, CAST(nc.nspname AS sql_identifier) AS sequence_schema, CAST(c.relname AS sql_identifier) AS sequence_name, CAST('int16' AS character_data) AS data_type, @@ -1505,17 +1505,17 @@ CREATE VIEW sequences AS CAST(2 AS cardinal_number) AS numeric_precision_radix, CAST(0 AS cardinal_number) AS numeric_scale, -- XXX: The following could be improved if we had LATERAL. - CAST((pg_sequence_parameters(c.oid)).start_value AS character_data) AS start_value, - CAST((pg_sequence_parameters(c.oid)).minimum_value AS character_data) AS minimum_value, - CAST((pg_sequence_parameters(c.oid)).maximum_value AS character_data) AS maximum_value, - CAST((pg_sequence_parameters(c.oid)).increment AS character_data) AS increment, - CAST(CASE WHEN (pg_sequence_parameters(c.oid)).cycle_option THEN 'YES' ELSE 'NO' END AS yes_or_no) AS cycle_option + CAST((pg_catalog.pg_sequence_parameters(c.oid)).start_value AS character_data) AS start_value, + CAST((pg_catalog.pg_sequence_parameters(c.oid)).minimum_value AS character_data) AS minimum_value, + CAST((pg_catalog.pg_sequence_parameters(c.oid)).maximum_value AS character_data) AS maximum_value, + CAST((pg_catalog.pg_sequence_parameters(c.oid)).increment AS character_data) AS increment, + CAST(CASE WHEN (pg_catalog.pg_sequence_parameters(c.oid)).cycle_option THEN 'YES' ELSE 'NO' END AS yes_or_no) AS cycle_option FROM pg_namespace nc, pg_class c WHERE c.relnamespace = nc.oid AND (c.relkind = 'L' or c.relkind = 'S') - AND (NOT pg_is_other_temp_schema(nc.oid)) - AND (pg_has_role(c.relowner, 'USAGE') - OR has_sequence_privilege(c.oid, 'SELECT, UPDATE, USAGE') ); + AND (NOT pg_catalog.pg_is_other_temp_schema(nc.oid)) + AND (pg_catalog.pg_has_role(c.relowner, 'USAGE') + OR pg_catalog.has_sequence_privilege(c.oid, 'SELECT, UPDATE, USAGE') ); GRANT SELECT ON sequences TO PUBLIC; @@ -1560,7 +1560,7 @@ INSERT INTO sql_implementation_info VALUES ('10003', 'CATALOG NAME', NULL, 'Y', INSERT INTO sql_implementation_info VALUES ('10004', 'COLLATING SEQUENCE', NULL, (SELECT default_collate_name FROM character_sets), NULL); INSERT INTO sql_implementation_info VALUES ('23', 'CURSOR COMMIT BEHAVIOR', 1, NULL, 'close cursors and retain prepared statements'); INSERT INTO sql_implementation_info VALUES ('2', 'DATA SOURCE NAME', NULL, '', NULL); -INSERT INTO sql_implementation_info VALUES ('17', 'DBMS NAME', NULL, (select trim(trailing ' ' from substring(version() from '^[^0-9]*'))), NULL); +INSERT INTO sql_implementation_info VALUES ('17', 'DBMS NAME', NULL, (select trim(trailing ' ' from substring(pg_catalog.version() from '^[^0-9]*'))), NULL); INSERT INTO sql_implementation_info VALUES ('18', 'DBMS VERSION', NULL, '???', NULL); -- filled by initdb INSERT INTO sql_implementation_info VALUES ('26', 'DEFAULT TRANSACTION ISOLATION', 2, NULL, 'READ COMMITTED; user-settable'); INSERT INTO sql_implementation_info VALUES ('28', 'IDENTIFIER CASE', 3, NULL, 'stored in mixed case - case sensitive'); @@ -1718,10 +1718,10 @@ GRANT SELECT ON sql_sizing_profiles TO PUBLIC; */ CREATE VIEW table_constraints AS - SELECT CAST(current_database() AS sql_identifier) AS constraint_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS constraint_catalog, CAST(nc.nspname AS sql_identifier) AS constraint_schema, CAST(c.conname AS sql_identifier) AS constraint_name, - CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, CAST(nr.nspname AS sql_identifier) AS table_schema, CAST(r.relname AS sql_identifier) AS table_name, CAST( @@ -1744,20 +1744,20 @@ CREATE VIEW table_constraints AS AND c.conrelid = r.oid AND c.contype NOT IN ('t', 'x') -- ignore nonstandard constraints AND r.relkind = 'r' - AND (NOT pg_is_other_temp_schema(nr.oid)) - AND (pg_has_role(r.relowner, 'USAGE') + AND (NOT pg_catalog.pg_is_other_temp_schema(nr.oid)) + AND (pg_catalog.pg_has_role(r.relowner, 'USAGE') -- SELECT privilege omitted, per SQL standard - OR has_table_privilege(r.oid, 'INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') - OR has_any_column_privilege(r.oid, 'INSERT, UPDATE, REFERENCES') ) + OR pg_catalog.has_table_privilege(r.oid, 'INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') + OR pg_catalog.has_any_column_privilege(r.oid, 'INSERT, UPDATE, REFERENCES') ) UNION ALL -- not-null constraints - SELECT CAST(current_database() AS sql_identifier) AS constraint_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS constraint_catalog, CAST(nr.nspname AS sql_identifier) AS constraint_schema, CAST(CAST(nr.oid AS text) || '_' || CAST(r.oid AS text) || '_' || CAST(a.attnum AS text) || '_not_null' AS sql_identifier) AS constraint_name, -- XXX - CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, CAST(nr.nspname AS sql_identifier) AS table_schema, CAST(r.relname AS sql_identifier) AS table_name, CAST('CHECK' AS character_data) AS constraint_type, @@ -1774,11 +1774,11 @@ CREATE VIEW table_constraints AS AND a.attnum > 0 AND NOT a.attisdropped AND r.relkind = 'r' - AND (NOT pg_is_other_temp_schema(nr.oid)) - AND (pg_has_role(r.relowner, 'USAGE') + AND (NOT pg_catalog.pg_is_other_temp_schema(nr.oid)) + AND (pg_catalog.pg_has_role(r.relowner, 'USAGE') -- SELECT privilege omitted, per SQL standard - OR has_table_privilege(r.oid, 'INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') - OR has_any_column_privilege(r.oid, 'INSERT, UPDATE, REFERENCES') ); + OR pg_catalog.has_table_privilege(r.oid, 'INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') + OR pg_catalog.has_any_column_privilege(r.oid, 'INSERT, UPDATE, REFERENCES') ); GRANT SELECT ON table_constraints TO PUBLIC; @@ -1799,20 +1799,20 @@ GRANT SELECT ON table_constraints TO PUBLIC; CREATE VIEW table_privileges AS SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, CAST(grantee.rolname AS sql_identifier) AS grantee, - CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, CAST(nc.nspname AS sql_identifier) AS table_schema, CAST(c.relname AS sql_identifier) AS table_name, CAST(c.prtype AS character_data) AS privilege_type, CAST( CASE WHEN -- object owner always has grant options - pg_has_role(grantee.oid, c.relowner, 'USAGE') + pg_catalog.pg_has_role(grantee.oid, c.relowner, 'USAGE') OR c.grantable THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable, CAST(CASE WHEN c.prtype = 'SELECT' THEN 'YES' ELSE 'NO' END AS yes_or_no) AS with_hierarchy FROM ( - SELECT oid, relname, relnamespace, relkind, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* FROM pg_class + SELECT oid, relname, relnamespace, relkind, relowner, (pg_catalog.aclexplode(coalesce(relacl, pg_catalog.acldefault('r', relowner)))).* FROM pg_class ) AS c (oid, relname, relnamespace, relkind, relowner, grantor, grantee, prtype, grantable), pg_namespace nc, pg_authid u_grantor, @@ -1824,14 +1824,14 @@ CREATE VIEW table_privileges AS WHERE c.relnamespace = nc.oid AND c.relkind IN ('r', 'm', 'v') - AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') AND c.grantee = grantee.oid AND c.grantor = u_grantor.oid AND (c.prtype IN ('INSERT', 'SELECT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER') OR c.prtype IN ('ALTER', 'DROP', 'COMMENT', 'INDEX', 'VACUUM') ) - AND (pg_has_role(u_grantor.oid, 'USAGE') - OR pg_has_role(grantee.oid, 'USAGE') + AND (pg_catalog.pg_has_role(u_grantor.oid, 'USAGE') + OR pg_catalog.pg_has_role(grantee.oid, 'USAGE') OR grantee.rolname = 'PUBLIC'); GRANT SELECT ON table_privileges TO PUBLIC; @@ -1864,12 +1864,12 @@ GRANT SELECT ON role_table_grants TO PUBLIC; */ CREATE VIEW tables AS - SELECT CAST(current_database() AS sql_identifier) AS table_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, CAST(nc.nspname AS sql_identifier) AS table_schema, CAST(c.relname AS sql_identifier) AS table_name, CAST( - CASE WHEN nc.oid = pg_my_temp_schema() THEN 'LOCAL TEMPORARY' + CASE WHEN nc.oid = pg_catalog.pg_my_temp_schema() THEN 'LOCAL TEMPORARY' WHEN c.relkind = 'r' THEN 'BASE TABLE' WHEN c.relkind = 'm' THEN 'MATERIALIZED VIEW' WHEN c.relkind = 'v' THEN 'VIEW' @@ -1880,7 +1880,7 @@ CREATE VIEW tables AS CAST(null AS sql_identifier) AS self_referencing_column_name, CAST(null AS character_data) AS reference_generation, - CAST(CASE WHEN t.typname IS NOT NULL THEN current_database() ELSE null END AS sql_identifier) AS user_defined_type_catalog, + CAST(CASE WHEN t.typname IS NOT NULL THEN pg_catalog.current_database() ELSE null END AS sql_identifier) AS user_defined_type_catalog, CAST(nt.nspname AS sql_identifier) AS user_defined_type_schema, CAST(t.typname AS sql_identifier) AS user_defined_type_name, @@ -1896,11 +1896,11 @@ CREATE VIEW tables AS LEFT JOIN (pg_type t JOIN pg_namespace nt ON (t.typnamespace = nt.oid)) ON (c.reloftype = t.oid) WHERE c.relkind IN ('r', 'm', 'v', 'f') - AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') - AND (NOT pg_is_other_temp_schema(nc.oid)) - AND (pg_has_role(c.relowner, 'USAGE') - OR has_table_privilege(c.oid, 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') - OR has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES') ); + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') + AND (NOT pg_catalog.pg_is_other_temp_schema(nc.oid)) + AND (pg_catalog.pg_has_role(c.relowner, 'USAGE') + OR pg_catalog.has_table_privilege(c.oid, 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') + OR pg_catalog.has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES') ); GRANT SELECT ON tables TO PUBLIC; @@ -1927,10 +1927,10 @@ GRANT SELECT ON tables TO PUBLIC; */ CREATE VIEW triggered_update_columns AS - SELECT CAST(current_database() AS sql_identifier) AS trigger_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS trigger_catalog, CAST(n.nspname AS sql_identifier) AS trigger_schema, CAST(t.tgname AS sql_identifier) AS trigger_name, - CAST(current_database() AS sql_identifier) AS event_object_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS event_object_catalog, CAST(n.nspname AS sql_identifier) AS event_object_schema, CAST(c.relname AS sql_identifier) AS event_object_table, CAST(a.attname AS sql_identifier) AS event_object_column @@ -1945,10 +1945,10 @@ CREATE VIEW triggered_update_columns AS AND t.oid = ta.tgoid AND (a.attrelid, a.attnum) = (t.tgrelid, ta.tgattnum) AND NOT t.tgisinternal - AND (NOT pg_is_other_temp_schema(n.oid)) - AND (pg_has_role(c.relowner, 'USAGE') + AND (NOT pg_catalog.pg_is_other_temp_schema(n.oid)) + AND (pg_catalog.pg_has_role(c.relowner, 'USAGE') -- SELECT privilege omitted, per SQL standard - OR has_column_privilege(c.oid, a.attnum, 'INSERT, UPDATE, REFERENCES') ); + OR pg_catalog.has_column_privilege(c.oid, a.attnum, 'INSERT, UPDATE, REFERENCES') ); GRANT SELECT ON triggered_update_columns TO PUBLIC; @@ -1991,23 +1991,23 @@ GRANT SELECT ON triggered_update_columns TO PUBLIC; */ CREATE VIEW triggers AS - SELECT CAST(current_database() AS sql_identifier) AS trigger_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS trigger_catalog, CAST(n.nspname AS sql_identifier) AS trigger_schema, CAST(t.tgname AS sql_identifier) AS trigger_name, CAST(em.text AS character_data) AS event_manipulation, - CAST(current_database() AS sql_identifier) AS event_object_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS event_object_catalog, CAST(n.nspname AS sql_identifier) AS event_object_schema, CAST(c.relname AS sql_identifier) AS event_object_table, CAST(null AS cardinal_number) AS action_order, -- XXX strange hacks follow CAST( - CASE WHEN pg_has_role(c.relowner, 'USAGE') - THEN (SELECT m[1] FROM regexp_matches(pg_get_triggerdef(t.oid), E'.{35,} WHEN \\((.+)\\) EXECUTE PROCEDURE') AS rm(m) LIMIT 1) + CASE WHEN pg_catalog.pg_has_role(c.relowner, 'USAGE') + THEN (SELECT m[1] FROM pg_catalog.regexp_matches(pg_catalog.pg_get_triggerdef(t.oid), E'.{35,} WHEN \\((.+)\\) EXECUTE PROCEDURE') AS rm(m) LIMIT 1) ELSE null END AS character_data) AS action_condition, CAST( - substring(pg_get_triggerdef(t.oid) from - position('EXECUTE PROCEDURE' in substring(pg_get_triggerdef(t.oid) from 48)) + 47) + substring(pg_catalog.pg_get_triggerdef(t.oid) from + position('EXECUTE PROCEDURE' in substring(pg_catalog.pg_get_triggerdef(t.oid) from 48)) + 47) AS character_data) AS action_statement, CAST( -- hard-wired reference to TRIGGER_TYPE_ROW @@ -2034,11 +2034,11 @@ CREATE VIEW triggers AS AND c.oid = t.tgrelid AND t.tgtype & em.num <> 0 AND NOT t.tgisinternal - AND (NOT pg_is_other_temp_schema(n.oid)) - AND (pg_has_role(c.relowner, 'USAGE') + AND (NOT pg_catalog.pg_is_other_temp_schema(n.oid)) + AND (pg_catalog.pg_has_role(c.relowner, 'USAGE') -- SELECT privilege omitted, per SQL standard - OR has_table_privilege(c.oid, 'INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') - OR has_any_column_privilege(c.oid, 'INSERT, UPDATE, REFERENCES') ); + OR pg_catalog.has_table_privilege(c.oid, 'INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') + OR pg_catalog.has_any_column_privilege(c.oid, 'INSERT, UPDATE, REFERENCES') ); GRANT SELECT ON triggers TO PUBLIC; @@ -2051,19 +2051,19 @@ GRANT SELECT ON triggers TO PUBLIC; CREATE VIEW udt_privileges AS SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, CAST(grantee.rolname AS sql_identifier) AS grantee, - CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS udt_catalog, CAST(n.nspname AS sql_identifier) AS udt_schema, CAST(t.typname AS sql_identifier) AS udt_name, CAST(t.prtype AS character_data) AS privilege_type, -- sic CAST( CASE WHEN -- object owner always has grant options - pg_has_role(grantee.oid, t.typowner, 'USAGE') + pg_catalog.pg_has_role(grantee.oid, t.typowner, 'USAGE') OR t.grantable THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable FROM ( - SELECT oid, typname, typnamespace, typtype, typowner, (aclexplode(coalesce(typacl, acldefault('T', typowner)))).* FROM pg_type + SELECT oid, typname, typnamespace, typtype, typowner, (pg_catalog.aclexplode(coalesce(typacl, pg_catalog.acldefault('T', typowner)))).* FROM pg_type ) AS t (oid, typname, typnamespace, typtype, typowner, grantor, grantee, prtype, grantable), pg_namespace n, pg_authid u_grantor, @@ -2078,8 +2078,8 @@ CREATE VIEW udt_privileges AS AND t.grantee = grantee.oid AND t.grantor = u_grantor.oid AND t.prtype IN ('USAGE', 'ALTER', 'DROP', 'COMMENT') - AND (pg_has_role(u_grantor.oid, 'USAGE') - OR pg_has_role(grantee.oid, 'USAGE') + AND (pg_catalog.pg_has_role(u_grantor.oid, 'USAGE') + OR pg_catalog.pg_has_role(grantee.oid, 'USAGE') OR grantee.rolname = 'PUBLIC'); GRANT SELECT ON udt_privileges TO PUBLIC; @@ -2116,7 +2116,7 @@ CREATE VIEW usage_privileges AS -- Collations have no real privileges, so we represent all collations with implicit usage privilege here. SELECT CAST(u.rolname AS sql_identifier) AS grantor, CAST('PUBLIC' AS sql_identifier) AS grantee, - CAST(current_database() AS sql_identifier) AS object_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS object_catalog, CAST(n.nspname AS sql_identifier) AS object_schema, CAST(c.collname AS sql_identifier) AS object_name, CAST('COLLATION' AS character_data) AS object_type, @@ -2129,14 +2129,14 @@ CREATE VIEW usage_privileges AS WHERE u.oid = c.collowner AND c.collnamespace = n.oid - AND collencoding IN (-1, (SELECT encoding FROM pg_database WHERE datname = current_database())) + AND collencoding IN (-1, (SELECT encoding FROM pg_database WHERE datname = pg_catalog.current_database())) UNION ALL /* domains */ SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, CAST(grantee.rolname AS sql_identifier) AS grantee, - CAST(current_database() AS sql_identifier) AS object_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS object_catalog, CAST(n.nspname AS sql_identifier) AS object_schema, CAST(t.typname AS sql_identifier) AS object_name, CAST('DOMAIN' AS character_data) AS object_type, @@ -2144,12 +2144,12 @@ CREATE VIEW usage_privileges AS CAST( CASE WHEN -- object owner always has grant options - pg_has_role(grantee.oid, t.typowner, 'USAGE') + pg_catalog.pg_has_role(grantee.oid, t.typowner, 'USAGE') OR t.grantable THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable FROM ( - SELECT oid, typname, typnamespace, typtype, typowner, (aclexplode(coalesce(typacl, acldefault('T', typowner)))).* FROM pg_type + SELECT oid, typname, typnamespace, typtype, typowner, (pg_catalog.aclexplode(coalesce(typacl, pg_catalog.acldefault('T', typowner)))).* FROM pg_type ) AS t (oid, typname, typnamespace, typtype, typowner, grantor, grantee, prtype, grantable), pg_namespace n, pg_authid u_grantor, @@ -2164,8 +2164,8 @@ CREATE VIEW usage_privileges AS AND t.grantee = grantee.oid AND t.grantor = u_grantor.oid AND t.prtype IN ('USAGE') - AND (pg_has_role(u_grantor.oid, 'USAGE') - OR pg_has_role(grantee.oid, 'USAGE') + AND (pg_catalog.pg_has_role(u_grantor.oid, 'USAGE') + OR pg_catalog.pg_has_role(grantee.oid, 'USAGE') OR grantee.rolname = 'PUBLIC') UNION ALL @@ -2173,7 +2173,7 @@ CREATE VIEW usage_privileges AS /* foreign-data wrappers */ SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, CAST(grantee.rolname AS sql_identifier) AS grantee, - CAST(current_database() AS sql_identifier) AS object_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS object_catalog, CAST('' AS sql_identifier) AS object_schema, CAST(fdw.fdwname AS sql_identifier) AS object_name, CAST('FOREIGN DATA WRAPPER' AS character_data) AS object_type, @@ -2181,12 +2181,12 @@ CREATE VIEW usage_privileges AS CAST( CASE WHEN -- object owner always has grant options - pg_has_role(grantee.oid, fdw.fdwowner, 'USAGE') + pg_catalog.pg_has_role(grantee.oid, fdw.fdwowner, 'USAGE') OR fdw.grantable THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable FROM ( - SELECT fdwname, fdwowner, (aclexplode(coalesce(fdwacl, acldefault('F', fdwowner)))).* FROM pg_foreign_data_wrapper + SELECT fdwname, fdwowner, (pg_catalog.aclexplode(coalesce(fdwacl, pg_catalog.acldefault('F', fdwowner)))).* FROM pg_foreign_data_wrapper ) AS fdw (fdwname, fdwowner, grantor, grantee, prtype, grantable), pg_authid u_grantor, ( @@ -2198,8 +2198,8 @@ CREATE VIEW usage_privileges AS WHERE u_grantor.oid = fdw.grantor AND grantee.oid = fdw.grantee AND fdw.prtype IN ('USAGE') - AND (pg_has_role(u_grantor.oid, 'USAGE') - OR pg_has_role(grantee.oid, 'USAGE') + AND (pg_catalog.pg_has_role(u_grantor.oid, 'USAGE') + OR pg_catalog.pg_has_role(grantee.oid, 'USAGE') OR grantee.rolname = 'PUBLIC') UNION ALL @@ -2207,7 +2207,7 @@ CREATE VIEW usage_privileges AS /* foreign servers */ SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, CAST(grantee.rolname AS sql_identifier) AS grantee, - CAST(current_database() AS sql_identifier) AS object_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS object_catalog, CAST('' AS sql_identifier) AS object_schema, CAST(srv.srvname AS sql_identifier) AS object_name, CAST('FOREIGN SERVER' AS character_data) AS object_type, @@ -2215,12 +2215,12 @@ CREATE VIEW usage_privileges AS CAST( CASE WHEN -- object owner always has grant options - pg_has_role(grantee.oid, srv.srvowner, 'USAGE') + pg_catalog.pg_has_role(grantee.oid, srv.srvowner, 'USAGE') OR srv.grantable THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable FROM ( - SELECT srvname, srvowner, (aclexplode(coalesce(srvacl, acldefault('S', srvowner)))).* FROM pg_foreign_server + SELECT srvname, srvowner, (pg_catalog.aclexplode(coalesce(srvacl, pg_catalog.acldefault('S', srvowner)))).* FROM pg_foreign_server ) AS srv (srvname, srvowner, grantor, grantee, prtype, grantable), pg_authid u_grantor, ( @@ -2232,8 +2232,8 @@ CREATE VIEW usage_privileges AS WHERE u_grantor.oid = srv.grantor AND grantee.oid = srv.grantee AND srv.prtype IN ('USAGE') - AND (pg_has_role(u_grantor.oid, 'USAGE') - OR pg_has_role(grantee.oid, 'USAGE') + AND (pg_catalog.pg_has_role(u_grantor.oid, 'USAGE') + OR pg_catalog.pg_has_role(grantee.oid, 'USAGE') OR grantee.rolname = 'PUBLIC') UNION ALL @@ -2241,7 +2241,7 @@ CREATE VIEW usage_privileges AS /* sequences */ SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, CAST(grantee.rolname AS sql_identifier) AS grantee, - CAST(current_database() AS sql_identifier) AS object_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS object_catalog, CAST(n.nspname AS sql_identifier) AS object_schema, CAST(c.relname AS sql_identifier) AS object_name, CAST('SEQUENCE' AS character_data) AS object_type, @@ -2249,12 +2249,12 @@ CREATE VIEW usage_privileges AS CAST( CASE WHEN -- object owner always has grant options - pg_has_role(grantee.oid, c.relowner, 'USAGE') + pg_catalog.pg_has_role(grantee.oid, c.relowner, 'USAGE') OR c.grantable THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable FROM ( - SELECT oid, relname, relnamespace, relkind, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* FROM pg_class + SELECT oid, relname, relnamespace, relkind, relowner, (pg_catalog.aclexplode(coalesce(relacl, pg_catalog.acldefault('r', relowner)))).* FROM pg_class ) AS c (oid, relname, relnamespace, relkind, relowner, grantor, grantee, prtype, grantable), pg_namespace n, pg_authid u_grantor, @@ -2269,8 +2269,8 @@ CREATE VIEW usage_privileges AS AND c.grantee = grantee.oid AND c.grantor = u_grantor.oid AND c.prtype IN ('USAGE') - AND (pg_has_role(u_grantor.oid, 'USAGE') - OR pg_has_role(grantee.oid, 'USAGE') + AND (pg_catalog.pg_has_role(u_grantor.oid, 'USAGE') + OR pg_catalog.pg_has_role(grantee.oid, 'USAGE') OR grantee.rolname = 'PUBLIC'); GRANT SELECT ON usage_privileges TO PUBLIC; @@ -2303,7 +2303,7 @@ GRANT SELECT ON role_usage_grants TO PUBLIC; */ CREATE VIEW user_defined_types AS - SELECT CAST(current_database() AS sql_identifier) AS user_defined_type_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS user_defined_type_catalog, CAST(n.nspname AS sql_identifier) AS user_defined_type_schema, CAST(c.relname AS sql_identifier) AS user_defined_type_name, CAST('STRUCTURED' AS character_data) AS user_defined_type_category, @@ -2338,8 +2338,8 @@ CREATE VIEW user_defined_types AS WHERE n.oid = c.relnamespace AND t.typrelid = c.oid AND c.relkind = 'c' - AND (pg_has_role(t.typowner, 'USAGE') - OR has_type_privilege(t.oid, 'USAGE')); + AND (pg_catalog.pg_has_role(t.typowner, 'USAGE') + OR pg_catalog.has_type_privilege(t.oid, 'USAGE')); GRANT SELECT ON user_defined_types TO PUBLIC; @@ -2351,10 +2351,10 @@ GRANT SELECT ON user_defined_types TO PUBLIC; CREATE VIEW view_column_usage AS SELECT DISTINCT - CAST(current_database() AS sql_identifier) AS view_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS view_catalog, CAST(nv.nspname AS sql_identifier) AS view_schema, CAST(v.relname AS sql_identifier) AS view_name, - CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, CAST(nt.nspname AS sql_identifier) AS table_schema, CAST(t.relname AS sql_identifier) AS table_name, CAST(a.attname AS sql_identifier) AS column_name @@ -2376,10 +2376,10 @@ CREATE VIEW view_column_usage AS AND dt.refobjid = t.oid AND t.relnamespace = nt.oid AND t.relkind IN ('r', 'm', 'v', 'f') - AND (t.relname not like 'mlog_%' AND t.relname not like 'matviewmap_%') + AND (t.relname not like 'mlog\_%' AND t.relname not like 'matviewmap\_%') AND t.oid = a.attrelid AND dt.refobjsubid = a.attnum - AND pg_has_role(t.relowner, 'USAGE'); + AND pg_catalog.pg_has_role(t.relowner, 'USAGE'); GRANT SELECT ON view_column_usage TO PUBLIC; @@ -2391,10 +2391,10 @@ GRANT SELECT ON view_column_usage TO PUBLIC; CREATE VIEW view_routine_usage AS SELECT DISTINCT - CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, CAST(nv.nspname AS sql_identifier) AS table_schema, CAST(v.relname AS sql_identifier) AS table_name, - CAST(current_database() AS sql_identifier) AS specific_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS specific_catalog, CAST(np.nspname AS sql_identifier) AS specific_schema, CAST(p.proname || '_' || CAST(p.oid AS text) AS sql_identifier) AS specific_name @@ -2412,7 +2412,7 @@ CREATE VIEW view_routine_usage AS AND dp.refclassid = 'pg_catalog.pg_proc'::regclass AND dp.refobjid = p.oid AND p.pronamespace = np.oid - AND pg_has_role(p.proowner, 'USAGE'); + AND pg_catalog.pg_has_role(p.proowner, 'USAGE'); GRANT SELECT ON view_routine_usage TO PUBLIC; @@ -2424,10 +2424,10 @@ GRANT SELECT ON view_routine_usage TO PUBLIC; CREATE VIEW view_table_usage AS SELECT DISTINCT - CAST(current_database() AS sql_identifier) AS view_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS view_catalog, CAST(nv.nspname AS sql_identifier) AS view_schema, CAST(v.relname AS sql_identifier) AS view_name, - CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, CAST(nt.nspname AS sql_identifier) AS table_schema, CAST(t.relname AS sql_identifier) AS table_name @@ -2447,8 +2447,8 @@ CREATE VIEW view_table_usage AS AND dt.refobjid = t.oid AND t.relnamespace = nt.oid AND t.relkind IN ('r', 'm', 'v', 'f') - AND (t.relname not like 'mlog_%' AND t.relname not like 'matviewmap_%') - AND pg_has_role(t.relowner, 'USAGE'); + AND (t.relname not like 'mlog\_%' AND t.relname not like 'matviewmap\_%') + AND pg_catalog.pg_has_role(t.relowner, 'USAGE'); GRANT SELECT ON view_table_usage TO PUBLIC; @@ -2459,13 +2459,13 @@ GRANT SELECT ON view_table_usage TO PUBLIC; */ CREATE VIEW views AS - SELECT CAST(current_database() AS sql_identifier) AS table_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, CAST(nc.nspname AS sql_identifier) AS table_schema, CAST(c.relname AS sql_identifier) AS table_name, CAST( - CASE WHEN pg_has_role(c.relowner, 'USAGE') - THEN pg_get_viewdef(c.oid) + CASE WHEN pg_catalog.pg_has_role(c.relowner, 'USAGE') + THEN pg_catalog.pg_get_viewdef(c.oid) ELSE null END AS character_data) AS view_definition, @@ -2504,10 +2504,10 @@ CREATE VIEW views AS WHERE c.relnamespace = nc.oid AND c.relkind = 'v' - AND (NOT pg_is_other_temp_schema(nc.oid)) - AND (pg_has_role(c.relowner, 'USAGE') - OR has_table_privilege(c.oid, 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') - OR has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES') ); + AND (NOT pg_catalog.pg_is_other_temp_schema(nc.oid)) + AND (pg_catalog.pg_has_role(c.relowner, 'USAGE') + OR pg_catalog.has_table_privilege(c.oid, 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') + OR pg_catalog.has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES') ); GRANT SELECT ON views TO PUBLIC; @@ -2520,7 +2520,7 @@ GRANT SELECT ON views TO PUBLIC; */ CREATE VIEW data_type_privileges AS - SELECT CAST(current_database() AS sql_identifier) AS object_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS object_catalog, CAST(x.objschema AS sql_identifier) AS object_schema, CAST(x.objname AS sql_identifier) AS object_name, CAST(x.objtype AS character_data) AS object_type, @@ -2548,13 +2548,13 @@ GRANT SELECT ON data_type_privileges TO PUBLIC; */ CREATE VIEW element_types AS - SELECT CAST(current_database() AS sql_identifier) AS object_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS object_catalog, CAST(n.nspname AS sql_identifier) AS object_schema, CAST(x.objname AS sql_identifier) AS object_name, CAST(x.objtype AS character_data) AS object_type, CAST(x.objdtdid AS sql_identifier) AS collection_type_identifier, CAST( - CASE WHEN nbt.nspname = 'pg_catalog' THEN format_type(bt.oid, null) + CASE WHEN nbt.nspname = 'pg_catalog' THEN pg_catalog.format_type(bt.oid, null) ELSE 'USER-DEFINED' END AS character_data) AS data_type, CAST(null AS cardinal_number) AS character_maximum_length, @@ -2562,7 +2562,7 @@ CREATE VIEW element_types AS CAST(null AS sql_identifier) AS character_set_catalog, CAST(null AS sql_identifier) AS character_set_schema, CAST(null AS sql_identifier) AS character_set_name, - CAST(CASE WHEN nco.nspname IS NOT NULL THEN current_database() END AS sql_identifier) AS collation_catalog, + CAST(CASE WHEN nco.nspname IS NOT NULL THEN pg_catalog.current_database() END AS sql_identifier) AS collation_catalog, CAST(nco.nspname AS sql_identifier) AS collation_schema, CAST(co.collname AS sql_identifier) AS collation_name, CAST(null AS cardinal_number) AS numeric_precision, @@ -2574,7 +2574,7 @@ CREATE VIEW element_types AS CAST(null AS character_data) AS domain_default, -- XXX maybe a bug in the standard - CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS udt_catalog, CAST(nbt.nspname AS sql_identifier) AS udt_schema, CAST(bt.typname AS sql_identifier) AS udt_name, @@ -2594,7 +2594,7 @@ CREATE VIEW element_types AS FROM pg_class c, pg_attribute a WHERE c.oid = a.attrelid AND c.relkind IN ('r', 'm', 'v', 'f', 'c') - AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') AND attnum > 0 AND NOT attisdropped UNION ALL @@ -2649,8 +2649,8 @@ CREATE VIEW _pg_foreign_table_columns AS FROM pg_foreign_table t, pg_authid u, pg_namespace n, pg_class c, pg_attribute a WHERE u.oid = c.relowner - AND (pg_has_role(c.relowner, 'USAGE') - OR has_column_privilege(c.oid, a.attnum, 'SELECT, INSERT, UPDATE, REFERENCES')) + AND (pg_catalog.pg_has_role(c.relowner, 'USAGE') + OR pg_catalog.has_column_privilege(c.oid, a.attnum, 'SELECT, INSERT, UPDATE, REFERENCES')) AND n.oid = c.relnamespace AND c.oid = t.ftrelid AND c.relkind = 'f' @@ -2662,12 +2662,12 @@ CREATE VIEW _pg_foreign_table_columns AS * COLUMN_OPTIONS view */ CREATE VIEW column_options AS - SELECT CAST(current_database() AS sql_identifier) AS table_catalog, + SELECT CAST(pg_catalog.current_database() AS sql_identifier) AS table_catalog, c.nspname AS table_schema, c.relname AS table_name, c.attname AS column_name, - CAST((pg_options_to_table(c.attfdwoptions)).option_name AS sql_identifier) AS option_name, - CAST((pg_options_to_table(c.attfdwoptions)).option_value AS character_data) AS option_value + CAST((pg_catalog.pg_options_to_table(c.attfdwoptions)).option_name AS sql_identifier) AS option_name, + CAST((pg_catalog.pg_options_to_table(c.attfdwoptions)).option_value AS character_data) AS option_value FROM _pg_foreign_table_columns c; GRANT SELECT ON column_options TO PUBLIC; @@ -2678,14 +2678,14 @@ CREATE VIEW _pg_foreign_data_wrappers AS SELECT w.oid, w.fdwowner, w.fdwoptions, - CAST(current_database() AS sql_identifier) AS foreign_data_wrapper_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS foreign_data_wrapper_catalog, CAST(fdwname AS sql_identifier) AS foreign_data_wrapper_name, CAST(u.rolname AS sql_identifier) AS authorization_identifier, CAST('c' AS character_data) AS foreign_data_wrapper_language FROM pg_foreign_data_wrapper w, pg_authid u WHERE u.oid = w.fdwowner - AND (pg_has_role(fdwowner, 'USAGE') - OR has_foreign_data_wrapper_privilege(w.oid, 'USAGE')); + AND (pg_catalog.pg_has_role(fdwowner, 'USAGE') + OR pg_catalog.has_foreign_data_wrapper_privilege(w.oid, 'USAGE')); /* @@ -2695,8 +2695,8 @@ CREATE VIEW _pg_foreign_data_wrappers AS CREATE VIEW foreign_data_wrapper_options AS SELECT foreign_data_wrapper_catalog, foreign_data_wrapper_name, - CAST((pg_options_to_table(w.fdwoptions)).option_name AS sql_identifier) AS option_name, - CAST((pg_options_to_table(w.fdwoptions)).option_value AS character_data) AS option_value + CAST((pg_catalog.pg_options_to_table(w.fdwoptions)).option_name AS sql_identifier) AS option_name, + CAST((pg_catalog.pg_options_to_table(w.fdwoptions)).option_value AS character_data) AS option_value FROM _pg_foreign_data_wrappers w; GRANT SELECT ON foreign_data_wrapper_options TO PUBLIC; @@ -2721,9 +2721,9 @@ GRANT SELECT ON foreign_data_wrappers TO PUBLIC; CREATE VIEW _pg_foreign_servers AS SELECT s.oid, s.srvoptions, - CAST(current_database() AS sql_identifier) AS foreign_server_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS foreign_server_catalog, CAST(srvname AS sql_identifier) AS foreign_server_name, - CAST(current_database() AS sql_identifier) AS foreign_data_wrapper_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS foreign_data_wrapper_catalog, CAST(w.fdwname AS sql_identifier) AS foreign_data_wrapper_name, CAST(srvtype AS character_data) AS foreign_server_type, CAST(srvversion AS character_data) AS foreign_server_version, @@ -2731,8 +2731,8 @@ CREATE VIEW _pg_foreign_servers AS FROM pg_foreign_server s, pg_foreign_data_wrapper w, pg_authid u WHERE w.oid = s.srvfdw AND u.oid = s.srvowner - AND (pg_has_role(s.srvowner, 'USAGE') - OR has_server_privilege(s.oid, 'USAGE')); + AND (pg_catalog.pg_has_role(s.srvowner, 'USAGE') + OR pg_catalog.has_server_privilege(s.oid, 'USAGE')); /* @@ -2742,8 +2742,8 @@ CREATE VIEW _pg_foreign_servers AS CREATE VIEW foreign_server_options AS SELECT foreign_server_catalog, foreign_server_name, - CAST((pg_options_to_table(s.srvoptions)).option_name AS sql_identifier) AS option_name, - CAST((pg_options_to_table(s.srvoptions)).option_value AS character_data) AS option_value + CAST((pg_catalog.pg_options_to_table(s.srvoptions)).option_name AS sql_identifier) AS option_name, + CAST((pg_catalog.pg_options_to_table(s.srvoptions)).option_value AS character_data) AS option_value FROM _pg_foreign_servers s; GRANT SELECT ON TABLE foreign_server_options TO PUBLIC; @@ -2769,20 +2769,20 @@ GRANT SELECT ON foreign_servers TO PUBLIC; /* Base view for foreign tables */ CREATE VIEW _pg_foreign_tables AS SELECT - CAST(current_database() AS sql_identifier) AS foreign_table_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS foreign_table_catalog, n.nspname AS foreign_table_schema, c.relname AS foreign_table_name, t.ftoptions AS ftoptions, - CAST(current_database() AS sql_identifier) AS foreign_server_catalog, + CAST(pg_catalog.current_database() AS sql_identifier) AS foreign_server_catalog, CAST(srvname AS sql_identifier) AS foreign_server_name, CAST(u.rolname AS sql_identifier) AS authorization_identifier FROM pg_foreign_table t, pg_foreign_server s, pg_foreign_data_wrapper w, pg_authid u, pg_namespace n, pg_class c WHERE w.oid = s.srvfdw AND u.oid = c.relowner - AND (pg_has_role(c.relowner, 'USAGE') - OR has_table_privilege(c.oid, 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') - OR has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES')) + AND (pg_catalog.pg_has_role(c.relowner, 'USAGE') + OR pg_catalog.has_table_privilege(c.oid, 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') + OR pg_catalog.has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES')) AND n.oid = c.relnamespace AND c.oid = t.ftrelid AND c.relkind = 'f' @@ -2797,8 +2797,8 @@ CREATE VIEW foreign_table_options AS SELECT foreign_table_catalog, foreign_table_schema, foreign_table_name, - CAST((pg_options_to_table(t.ftoptions)).option_name AS sql_identifier) AS option_name, - CAST((pg_options_to_table(t.ftoptions)).option_value AS character_data) AS option_value + CAST((pg_catalog.pg_options_to_table(t.ftoptions)).option_name AS sql_identifier) AS option_name, + CAST((pg_catalog.pg_options_to_table(t.ftoptions)).option_value AS character_data) AS option_value FROM _pg_foreign_tables t; GRANT SELECT ON TABLE foreign_table_options TO PUBLIC; @@ -2842,10 +2842,10 @@ CREATE VIEW user_mapping_options AS SELECT authorization_identifier, foreign_server_catalog, foreign_server_name, - CAST((pg_options_to_table(um.umoptions)).option_name AS sql_identifier) AS option_name, + CAST((pg_catalog.pg_options_to_table(um.umoptions)).option_name AS sql_identifier) AS option_name, CAST(CASE WHEN (umuser <> 0 AND authorization_identifier = current_user) - OR (umuser = 0 AND pg_has_role(srvowner, 'USAGE')) - OR (SELECT rolsuper FROM pg_authid WHERE rolname = current_user) THEN (pg_options_to_table(um.umoptions)).option_value + OR (umuser = 0 AND pg_catalog.pg_has_role(srvowner, 'USAGE')) + OR (SELECT rolsuper FROM pg_authid WHERE rolname = current_user) THEN (pg_catalog.pg_options_to_table(um.umoptions)).option_value ELSE NULL END AS character_data) AS option_value FROM _pg_user_mappings um; diff --git a/src/common/backend/catalog/namespace.cpp b/src/common/backend/catalog/namespace.cpp index 18104d7a7..36a80a56b 100644 --- a/src/common/backend/catalog/namespace.cpp +++ b/src/common/backend/catalog/namespace.cpp @@ -36,6 +36,7 @@ #include "catalog/pg_collation.h" #include "catalog/pg_conversion.h" #include "catalog/pg_conversion_fn.h" +#include "catalog/pg_language.h" #include "catalog/pg_namespace.h" #include "catalog/pg_opclass.h" #include "catalog/pg_operator.h" @@ -52,7 +53,9 @@ #include "catalog/pgxc_class.h" #include "catalog/pgxc_group.h" #include "catalog/indexing.h" +#include "catalog/gs_db_privilege.h" #include "commands/dbcommands.h" +#include "commands/proclang.h" #include "commands/tablecmds.h" #include "funcapi.h" #include "mb/pg_wchar.h" @@ -151,8 +154,6 @@ * Note: all data pointed to by these List variables is in t_thrd.top_mem_cxt. */ -static volatile uint32 gt_tempID_seed = 0; - /* Local functions */ static void InitTempTableNamespace(void); static void RemoveTempRelations(Oid tempNamespaceId); @@ -214,7 +215,6 @@ Oid RangeVarGetRelidExtended(const RangeVar* relation, LOCKMODE lockmode, bool m bool target_is_partition, bool isSupportSynonym, RangeVarGetRelidCallback callback, void* callback_arg, StringInfo detailInfo, Oid* refSynOid) { - uint64 inval_count; Oid relId; Oid oldRelId = InvalidOid; bool retry = false; @@ -251,13 +251,18 @@ Oid RangeVarGetRelidExtended(const RangeVar* relation, LOCKMODE lockmode, bool m * by calling AcceptInvalidationMessages() before beginning this loop, but * that would add a significant amount overhead, so for now we don't. */ + uint64 sess_inval_count; + uint64 thrd_inval_count = 0; for (;;) { /* * Remember this value, so that, after looking up the relation name * and locking its OID, we can check whether any invalidation messages * have been processed that might require a do-over. */ - inval_count = u_sess->inval_cxt.SharedInvalidMessageCounter; + sess_inval_count = u_sess->inval_cxt.SIMCounter; + if (EnableLocalSysCache()) { + thrd_inval_count = t_thrd.lsc_cxt.lsc->inval_cxt.SIMCounter; + } /* * Some non-default relpersistence value may have been specified. The @@ -376,8 +381,16 @@ Oid RangeVarGetRelidExtended(const RangeVar* relation, LOCKMODE lockmode, bool m /* * If no invalidation message were processed, we're done! */ - if (inval_count == u_sess->inval_cxt.SharedInvalidMessageCounter) - break; + if (EnableLocalSysCache()) { + if (sess_inval_count == u_sess->inval_cxt.SIMCounter && + thrd_inval_count == t_thrd.lsc_cxt.lsc->inval_cxt.SIMCounter) { + break; + } + } else { + if (sess_inval_count == u_sess->inval_cxt.SIMCounter) { + break; + } + } /* * Something may have changed. Let's repeat the name lookup, to make @@ -468,6 +481,72 @@ Oid RangeVarGetCreationNamespace(const RangeVar* newRelation) return namespaceId; } +bool CheckRelationCreateAnyPrivilege(Oid userId, char relkind) +{ + AclResult aclResult = ACLCHECK_NO_PRIV; + switch (relkind) { + case RELKIND_COMPOSITE_TYPE: + if (HasSpecAnyPriv(userId, CREATE_ANY_TYPE, false)) { + aclResult = ACLCHECK_OK; + } + break; + /* sequence object */ + case RELKIND_SEQUENCE: + case RELKIND_LARGE_SEQUENCE: + if (HasSpecAnyPriv(userId, CREATE_ANY_SEQUENCE, false)) { + aclResult = ACLCHECK_OK; + } + break; + case RELKIND_INDEX: + case RELKIND_GLOBAL_INDEX: + if (HasSpecAnyPriv(userId, CREATE_ANY_INDEX, false)) { + aclResult = ACLCHECK_OK; + } + break; + /* table */ + default: + if (HasSpecAnyPriv(userId, CREATE_ANY_TABLE, false)) { + aclResult = ACLCHECK_OK; + } + break; + } + return (aclResult == ACLCHECK_OK) ? true : false; +} + +/* + * checking whether the user has create any permission + */ +bool CheckCreatePrivilegeInNamespace(Oid namespaceId, Oid roleId, const char* anyPrivilege) +{ + /* Check we have creation rights in target namespace */ + AclResult aclResult = pg_namespace_aclcheck(namespaceId, roleId, ACL_CREATE); + /* + * anyResult is true, explain that the current user is granted create any permission + */ + bool anyResult = false; + if (aclResult != ACLCHECK_OK && !IsSysSchema(namespaceId)) { + anyResult = HasSpecAnyPriv(roleId, anyPrivilege, false); + } + if (aclResult != ACLCHECK_OK && !anyResult) { + aclcheck_error(aclResult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceId)); + } + return anyResult; +} + +static void CheckCreateRelPrivilegeInNamespace(char relkind, Oid namespaceId) +{ + bool anyResult = false; + AclResult aclResult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_CREATE); + if (aclResult != ACLCHECK_OK && !IsSysSchema(namespaceId)) { + if (relkind != '\0') { + anyResult = CheckRelationCreateAnyPrivilege(GetUserId(), relkind); + } + } + if (aclResult != ACLCHECK_OK && (!anyResult)) { + aclcheck_error(aclResult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceId)); + } +} + /* * RangeVarGetAndCheckCreationNamespace * @@ -494,9 +573,9 @@ Oid RangeVarGetCreationNamespace(const RangeVar* newRelation) * As a further side-effect, if the select namespace is a temporary namespace, * we mark the RangeVar as RELPERSISTENCE_TEMP. */ -Oid RangeVarGetAndCheckCreationNamespace(RangeVar* relation, LOCKMODE lockmode, Oid* existing_relation_id) +Oid RangeVarGetAndCheckCreationNamespace(RangeVar* relation, LOCKMODE lockmode, + Oid* existing_relation_id, char relkind) { - uint64 inval_count; Oid relid; Oid oldrelid = InvalidOid; Oid nspid; @@ -522,11 +601,13 @@ Oid RangeVarGetAndCheckCreationNamespace(RangeVar* relation, LOCKMODE lockmode, * while we're doing the name lookups and acquiring locks. See comments * in that function for a more detailed explanation of this logic. */ + uint64 sess_inval_count; + uint64 thrd_inval_count = 0; for (;;) { - AclResult aclresult; - - inval_count = u_sess->inval_cxt.SharedInvalidMessageCounter; - + sess_inval_count = u_sess->inval_cxt.SIMCounter; + if (EnableLocalSysCache()) { + thrd_inval_count = t_thrd.lsc_cxt.lsc->inval_cxt.SIMCounter; + } /* Look up creation namespace and check for existing relation. */ nspid = RangeVarGetCreationNamespace(relation); Assert(OidIsValid(nspid)); @@ -542,12 +623,8 @@ Oid RangeVarGetAndCheckCreationNamespace(RangeVar* relation, LOCKMODE lockmode, */ if (IsBootstrapProcessingMode()) break; - /* Check namespace permissions. */ - aclresult = pg_namespace_aclcheck(nspid, GetUserId(), ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(nspid)); - + CheckCreateRelPrivilegeInNamespace(relkind, nspid); if (retry) { /* If nothing changed, we're done. */ if (relid == oldrelid && nspid == oldnspid) @@ -574,8 +651,16 @@ Oid RangeVarGetAndCheckCreationNamespace(RangeVar* relation, LOCKMODE lockmode, } /* If no invalidation message were processed, we're done! */ - if (inval_count == u_sess->inval_cxt.SharedInvalidMessageCounter) - break; + if (EnableLocalSysCache()) { + if (sess_inval_count == u_sess->inval_cxt.SIMCounter && + thrd_inval_count == t_thrd.lsc_cxt.lsc->inval_cxt.SIMCounter) { + break; + } + } else { + if (sess_inval_count == u_sess->inval_cxt.SIMCounter) { + break; + } + } /* Something may have changed, so recheck our work. */ retry = true; @@ -917,6 +1002,85 @@ bool TypeIsVisible(Oid typid) return visible; } +bool isTableofIndexbyType(Oid typeOid) +{ + bool result = false; + HeapTuple typeTup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid)); + if (!HeapTupleIsValid(typeTup)) { + return result; + } + + if (((Form_pg_type)GETSTRUCT(typeTup))->typtype == TYPTYPE_TABLEOF && + (((Form_pg_type)GETSTRUCT(typeTup))->typcategory == TYPCATEGORY_TABLEOF_VARCHAR || + ((Form_pg_type)GETSTRUCT(typeTup))->typcategory == TYPCATEGORY_TABLEOF_INTEGER)) { + result = true; + } + + ReleaseSysCache(typeTup); + + return result; +} + +bool isTableofType(Oid typeOid, Oid* base_oid, Oid* indexbyType) +{ + bool result = false; + HeapTuple typeTup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid)); + if (!HeapTupleIsValid(typeTup)) { + return result; + } + + if (((Form_pg_type)GETSTRUCT(typeTup))->typtype == TYPTYPE_TABLEOF) { + *base_oid = ((Form_pg_type)GETSTRUCT(typeTup))->typelem; + result = true; + } + + if (indexbyType != NULL) { + if (((Form_pg_type)GETSTRUCT(typeTup))->typcategory == TYPCATEGORY_TABLEOF_VARCHAR) { + *indexbyType = VARCHAROID; + } else if (((Form_pg_type)GETSTRUCT(typeTup))->typcategory == TYPCATEGORY_TABLEOF_INTEGER) { + *indexbyType = INT4OID; + } else { + *indexbyType = InvalidOid; + } + } + + ReleaseSysCache(typeTup); + + return result; +} + +bool IsPlpgsqlLanguageOid(Oid langoid) +{ + HeapTuple tp; + bool isNull = true; + char* langName = NULL; + + Relation relation = heap_open(LanguageRelationId, NoLock); + tp = SearchSysCache1(LANGOID, ObjectIdGetDatum(langoid)); + if (!HeapTupleIsValid(tp)) { + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + (errmsg("cache lookup failed for language %u", langoid), errdetail("N/A."), + errcause("System error."), erraction("Contact engineer to support.")))); + } + Datum datum = heap_getattr(tp, Anum_pg_language_lanname, RelationGetDescr(relation), &isNull); + if (isNull) { + heap_close(relation, NoLock); + ReleaseSysCache(tp); + ereport(ERROR, + (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for language name %u", langoid))); + } + langName = NameStr(*DatumGetName(datum)); + int result = strcasecmp(langName, "plpgsql"); + heap_close(relation, NoLock); + ReleaseSysCache(tp); + + if (result == 0) { + return true; + } else { + return false; + } +} + static FuncCandidateList FuncnameAddCandidates(FuncCandidateList resultList, HeapTuple procTup, List* argNames, Oid namespaceId, Oid objNsp, int nargs, CatCList* catList, bool expandVariadic, bool expandDefaults, bool includeOut, Oid refSynOid, bool enable_outparam_override) @@ -946,11 +1110,15 @@ static FuncCandidateList FuncnameAddCandidates(FuncCandidateList resultList, Hea // For compatiable with the non-A special cases, for example function with out // param can't be called by SQL in A, but some of these are stilled called by // such as gsql in SQL. - Datum pprokind = SysCacheGetAttr(PROCOID, procTup, Anum_pg_proc_prokind, &isNull); - if ((!isNull && PROC_IS_FUNC(pprokind)) || isNull) { - proNargs = procForm->pronargs; - allArgTypes = NULL; - includeOut = false; + Datum prolangoid = SysCacheGetAttr(PROCOID, procTup, Anum_pg_proc_prolang, &isNull); + if (strcasecmp(get_language_name((Oid)prolangoid), "plpgsql") != 0 || + u_sess->attr.attr_common.IsInplaceUpgrade || IsInitdb) { + Datum pprokind = SysCacheGetAttr(PROCOID, procTup, Anum_pg_proc_prokind, &isNull); + if ((!isNull && PROC_IS_FUNC(pprokind)) || isNull) { + proNargs = procForm->pronargs; + allArgTypes = NULL; + includeOut = false; + } } } else { proNargs = procForm->pronargs; @@ -1089,6 +1257,15 @@ static FuncCandidateList FuncnameAddCandidates(FuncCandidateList resultList, Hea } } + int allArgNum = 0; +#ifndef ENABLE_MULTIPLE_NODES + Datum allArgs = SysCacheGetAttr(PROCOID, procTup, Anum_pg_proc_proallargtypes, &isNull); + if (!isNull) { + ArrayType* arr1 = DatumGetArrayTypeP(allArgs); /* ensure not toasted */ + allArgNum = ARR_DIMS(arr1)[0]; + } +#endif + /* * We must compute the effective argument list so that we can easily * compare it to earlier results. We waste a palloc cycle if it gets @@ -1104,6 +1281,7 @@ static FuncCandidateList FuncnameAddCandidates(FuncCandidateList resultList, Hea newResult->packageOid = DatumGetObjectId(packageOidDatum); /* record the referenced synonym oid for building view dependency. */ newResult->refSynOid = refSynOid; + newResult->allArgNum = allArgNum; Oid* proargtypes = NULL; #ifndef ENABLE_MULTIPLE_NODES @@ -1138,6 +1316,20 @@ static FuncCandidateList FuncnameAddCandidates(FuncCandidateList resultList, Hea securec_check(rc, "\0", "\0"); } + /* + * some procedure args have tableof variable, + * when match the proc parameters' type, + * we should change to its base type. + */ + if (numProcAllArgs > 0 && newResult->args != NULL) { + for (int i = 0; i < numProcAllArgs; i++) { + Oid base_oid = InvalidOid; + if(isTableofType(newResult->args[i], &base_oid, NULL)) { + newResult->args[i] = base_oid; + } + } + } + if (variadic) { int i; @@ -1354,11 +1546,11 @@ FuncCandidateList FuncnameGetCandidates(List* names, int nargs, List* argnames, } #endif - if (u_sess->plsql_cxt.curr_compile_context != NULL && + if (OidIsValid(u_sess->plsql_cxt.running_pkg_oid)) { + caller_pkg_oid = u_sess->plsql_cxt.running_pkg_oid; + } else if (u_sess->plsql_cxt.curr_compile_context != NULL && u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package != NULL) { caller_pkg_oid = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid; - } else { - caller_pkg_oid = u_sess->plsql_cxt.running_pkg_oid; } /* check for caller error */ Assert(nargs >= 0 || !(expand_variadic || expand_defaults)); @@ -1382,11 +1574,19 @@ FuncCandidateList FuncnameGetCandidates(List* names, int nargs, List* argnames, /* Step1. search syscache by name only and add candidates from pg_proc */ CatCList* catlist = NULL; +#ifndef ENABLE_MULTIPLE_NODES + if (t_thrd.proc->workingVersionNum < 92470) { + catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(funcname)); + } else { + catlist = SearchSysCacheList1(PROCALLARGS, CStringGetDatum(funcname)); + } +#else catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(funcname)); - for (i = 0; i < catlist->n_members; i++) { +#endif + for (i = 0; i < catlist->n_members; i++) { namespaceId = initNamesapceId; - HeapTuple proctup = &catlist->members[i]->tuple; + HeapTuple proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); if (!OidIsValid(HeapTupleGetOid(proctup)) || !HeapTupleIsValid(proctup)) { continue; } @@ -1510,7 +1710,7 @@ FuncCandidateList FuncnameGetCandidates(List* names, int nargs, List* argnames, catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(NameStr(synForm->synobjname))); #endif for (i = 0; i < catlist->n_members; i++) { - HeapTuple procTuple = &catlist->members[i]->tuple; + HeapTuple procTuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); if (!OidIsValid(HeapTupleGetOid(procTuple))) { continue; } @@ -1568,7 +1768,7 @@ KeyCandidateList CeknameGetCandidates(const List *names, bool key_create) catlist = SearchSysCacheList1(COLUMNSETTINGNAME, CStringGetDatum(keyname)); for (i = 0; i < catlist->n_members; i++) { - HeapTuple keytup = &catlist->members[i]->tuple; + HeapTuple keytup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_gs_column_keys keyform = (Form_gs_column_keys)GETSTRUCT(keytup); KeyCandidateList newResult; @@ -1636,7 +1836,7 @@ KeyCandidateList GlobalSettingGetCandidates(const List *names, bool key_create) catlist = SearchSysCacheList1(GLOBALSETTINGNAME, CStringGetDatum(keyname)); for (i = 0; i < catlist->n_members; i++) { - HeapTuple keytup = &catlist->members[i]->tuple; + HeapTuple keytup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_gs_client_global_keys keyform = (Form_gs_client_global_keys)GETSTRUCT(keytup); KeyCandidateList newResult; @@ -1888,8 +2088,27 @@ bool FunctionIsVisible(Oid funcid) visible = false; oidvector* proargs = ProcedureGetArgTypes(proctup); - clist = FuncnameGetCandidates(list_make1(makeString(proname)), nargs, NIL, false, false, false); +#ifndef ENABLE_MULTIPLE_NODES + bool enable_outparam_override = false; + enable_outparam_override = enable_out_param_override(); + if (enable_outparam_override) { + bool isNull = false; + Datum argTypes = ProcedureGetAllArgTypes(proctup, &isNull); + if (!isNull) { + oidvector* allArgTypes = (oidvector *)PG_DETOAST_DATUM(argTypes); + int proNargs = allArgTypes->dim1; + clist = FuncnameGetCandidates(list_make1(makeString(proname)), + proNargs, NIL, false, false, false, true); + } else { + clist = FuncnameGetCandidates(list_make1(makeString(proname)), nargs, NIL, false, false, false); + } + } else { + clist = FuncnameGetCandidates(list_make1(makeString(proname)), nargs, NIL, false, false, false); + } +#else + clist = FuncnameGetCandidates(list_make1(makeString(proname)), nargs, NIL, false, false, false); +#endif for (; clist; clist = clist->next) { if (memcmp(clist->args, proargs->values, nargs * sizeof(Oid)) == 0) { /* Found the expected entry; is it the right proc? */ @@ -1973,7 +2192,7 @@ Oid OpernameGetOprid(List* names, Oid oprleft, Oid oprright) continue; /* do not look in temp namespace */ for (i = 0; i < catlist->n_members; i++) { - HeapTuple opertup = &catlist->members[i]->tuple; + HeapTuple opertup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_operator operform = (Form_pg_operator)GETSTRUCT(opertup); if (operform->oprnamespace == namespaceId) { @@ -2050,7 +2269,7 @@ FuncCandidateList OpernameGetCandidates(List* names, char oprkind) resultSpace = (char*)palloc(catlist->n_members * SPACE_PER_OP); for (i = 0; i < catlist->n_members; i++) { - HeapTuple opertup = &catlist->members[i]->tuple; + HeapTuple opertup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_operator operform = (Form_pg_operator)GETSTRUCT(opertup); int pathpos = 0; FuncCandidateList newResult; @@ -2127,6 +2346,7 @@ FuncCandidateList OpernameGetCandidates(List* names, char oprkind) newResult->pathpos = pathpos; newResult->oid = HeapTupleGetOid(opertup); newResult->nargs = 2; + newResult->packageOid = InvalidOid; newResult->nvargs = 0; newResult->ndargs = 0; newResult->argnumbers = NULL; @@ -2950,7 +3170,7 @@ bool IsPackageFunction(List* funcname) #endif bool isFirstPackageFunction = false; for (int i = 0; i < catlist->n_members; i++) { - HeapTuple proctup = &catlist->members[i]->tuple; + HeapTuple proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_proc procform = (Form_pg_proc)GETSTRUCT(proctup); Datum packageid_datum = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_packageid, &isNull); Oid packageid = InvalidOid; @@ -2981,7 +3201,7 @@ bool IsPackageFunction(List* funcname) } /* package function and not package function can not overload */ - proctup = &catlist->members[i]->tuple; + proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Datum ispackage = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_package, &isNull); result = DatumGetBool(ispackage); if (IsSystemObjOid(HeapTupleGetOid(proctup))) { @@ -2996,16 +3216,16 @@ bool IsPackageFunction(List* funcname) if (result && isFirstPackageFunction) { continue; } else if (!result) { - ReleaseCatCacheList(catlist); + ReleaseSysCacheList(catlist); return false; } } else if (OidIsValid(packageid)) { - ReleaseCatCacheList(catlist); + ReleaseSysCacheList(catlist); return true; } } - ReleaseCatCacheList(catlist); + ReleaseSysCacheList(catlist); if (isFirstPackageFunction) { return true; } else { @@ -3190,7 +3410,6 @@ Oid QualifiedNameGetCreationNamespace(const List* names, char** objname_p) /* deconstruct the name list */ DeconstructQualifiedName(names, &schemaname, objname_p); - uint64 inval_count; Oid nspid = InvalidOid; Oid oldnspid = InvalidOid; bool retry = false; @@ -3200,9 +3419,13 @@ Oid QualifiedNameGetCreationNamespace(const List* names, char** objname_p) * tracking whether any invalidation messages are processed * while we're doing the name lookups and acquiring locks. */ + uint64 sess_inval_count; + uint64 thrd_inval_count = 0; for (;;) { - inval_count = u_sess->inval_cxt.SharedInvalidMessageCounter; - + sess_inval_count = u_sess->inval_cxt.SIMCounter; + if (EnableLocalSysCache()) { + thrd_inval_count = t_thrd.lsc_cxt.lsc->inval_cxt.SIMCounter; + } /* Look up creation namespace. */ nspid = SchemaNameGetSchemaOid(schemaname); Assert(OidIsValid(nspid)); @@ -3225,8 +3448,16 @@ Oid QualifiedNameGetCreationNamespace(const List* names, char** objname_p) LockDatabaseObject(NamespaceRelationId, nspid, 0, AccessShareLock); /* If no invalidation message were processed, we're done! */ - if (inval_count == u_sess->inval_cxt.SharedInvalidMessageCounter) - break; + if (EnableLocalSysCache()) { + if (sess_inval_count == u_sess->inval_cxt.SIMCounter && + thrd_inval_count == t_thrd.lsc_cxt.lsc->inval_cxt.SIMCounter) { + break; + } + } else { + if (sess_inval_count == u_sess->inval_cxt.SIMCounter) { + break; + } + } /* Something may have changed, so recheck our work. */ retry = true; @@ -3276,7 +3507,10 @@ Oid get_namespace_oid(const char* nspname, bool missing_ok) if (!OidIsValid(oid) && !missing_ok) { char message[MAXSTRLEN]; errno_t rc = sprintf_s(message, MAXSTRLEN, "schema \"%s\" does not exist", nspname); - securec_check_ss_c(rc, "", ""); + if (strlen(nspname) > MAXSTRLEN) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA), errmsg("The schema name exceeds the maximum length."))); + } + securec_check_ss(rc, "", ""); InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc, true); ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA), errmsg("schema \"%s\" does not exist", nspname))); } @@ -4558,7 +4792,7 @@ void InitializeSearchPath(void) * In normal mode, arrange for a callback on any syscache invalidation * of pg_namespace rows. */ - CacheRegisterSyscacheCallback(NAMESPACEOID, NamespaceCallback, (Datum)0); + CacheRegisterSessionSyscacheCallback(NAMESPACEOID, NamespaceCallback, (Datum)0); /* Force search path to be recomputed on next use */ u_sess->catalog_cxt.baseSearchPathValid = false; } @@ -4841,11 +5075,12 @@ void FetchDefaultArgumentPos(int** defpos, int2vector* adefpos, const char* argm /* * @Description: get the namespace's owner that has the same name as the namespace. * @in nspid : namespace oid. + * @anyPriv : anyPriv is true, explain that the user has create any permission * @in is_securityadmin : whether the is a security administrator doing this. * @return : return InvalidOid if there is no appropriate role. * return the owner's oid if the namespace has the same name as its owner. */ -Oid GetUserIdFromNspId(Oid nspid, bool is_securityadmin) +Oid GetUserIdFromNspId(Oid nspid, bool is_securityadmin, bool anyPriv) { char* rolname = NULL; Oid nspowner = InvalidOid; @@ -4868,7 +5103,8 @@ Oid GetUserIdFromNspId(Oid nspid, bool is_securityadmin) * return the owner's oid */ if (!strcmp(NameStr(nsptup->nspname), rolname)) { - if (!is_securityadmin && (!superuser_arg(GetUserId()) && !has_privs_of_role(GetUserId(), nspowner) && + if (!is_securityadmin && (!superuser_arg(GetUserId()) && + !has_privs_of_role(GetUserId(), nspowner) && (!anyPriv) && !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode))) { ReleaseSysCache(tuple); ereport(ERROR, diff --git a/src/common/backend/catalog/objectaddress.cpp b/src/common/backend/catalog/objectaddress.cpp index 2b998e04f..60f75ed2e 100644 --- a/src/common/backend/catalog/objectaddress.cpp +++ b/src/common/backend/catalog/objectaddress.cpp @@ -161,9 +161,6 @@ static ObjectAddress get_object_address_attribute( static ObjectAddress get_object_address_type(ObjectType objtype, List* objname, bool missing_ok); static ObjectAddress get_object_address_opcf(ObjectType objtype, List* objname, List* objargs, bool missing_ok); static const ObjectPropertyType* get_object_property_data(Oid class_id); -static ObjectAddress get_object_address_publication_rel(List *objname, List *objargs, - Relation *relation, bool missing_ok); - /* * Translate an object name and arguments (as passed by the parser) to an @@ -191,19 +188,23 @@ ObjectAddress get_object_address( ObjectAddress address; ObjectAddress old_address = {InvalidOid, InvalidOid, 0}; Relation relation = NULL; - uint64 inval_count; + Relation old_relation = NULL; List* objargs_agg = NULL; /* Some kind of lock must be taken. */ Assert(lockmode != NoLock); - + uint64 sess_inval_count; + uint64 thrd_inval_count = 0; for (;;) { /* * Remember this value, so that, after looking up the object name and * locking it, we can check whether any invalidation messages have * been processed that might require a do-over. */ - inval_count = u_sess->inval_cxt.SharedInvalidMessageCounter; + sess_inval_count = u_sess->inval_cxt.SIMCounter; + if (EnableLocalSysCache()) { + thrd_inval_count = t_thrd.lsc_cxt.lsc->inval_cxt.SIMCounter; + } /* Look up object address. */ switch (objtype) { @@ -336,8 +337,6 @@ ObjectAddress get_object_address( case OBJECT_DIRECTORY: address = get_object_address_unqualified(objtype, objname, missing_ok); break; - case OBJECT_PUBLICATION_REL: - address = get_object_address_publication_rel(objname, objargs, &relation, missing_ok); default: ereport( ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unrecognized objtype: %d", (int)objtype))); @@ -362,9 +361,21 @@ ObjectAddress get_object_address( */ if (OidIsValid(old_address.classId)) { if (old_address.classId == address.classId && old_address.objectId == address.objectId && - old_address.objectSubId == address.objectSubId) - break; + old_address.objectSubId == address.objectSubId) { + if (old_relation != NULL) { + Assert(old_relation == relation); + /* should be 2 */ + Assert(old_relation->rd_refcnt > 1); + heap_close(old_relation, NoLock); + old_relation = NULL; + } + break; + } if (old_address.classId != RelationRelationId) { + if (old_relation != NULL) { + heap_close(old_relation, NoLock); + old_relation = NULL; + } if (IsSharedRelation(old_address.classId)) UnlockSharedObject(old_address.classId, old_address.objectId, 0, lockmode); else @@ -403,9 +414,18 @@ ObjectAddress get_object_address( * up no longer refers to the object we locked, so we retry the lookup * and see whether we get the same answer. */ - if (inval_count == u_sess->inval_cxt.SharedInvalidMessageCounter || relation != NULL) - break; + if (EnableLocalSysCache()) { + if (sess_inval_count == u_sess->inval_cxt.SIMCounter && + thrd_inval_count == t_thrd.lsc_cxt.lsc->inval_cxt.SIMCounter) { + break; + } + } else { + if (sess_inval_count == u_sess->inval_cxt.SIMCounter) { + break; + } + } old_address = address; + old_relation = relation; } /* Return the object address and the relation. */ @@ -768,6 +788,8 @@ static ObjectAddress get_object_address_attribute( (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("column \"%s\" of relation \"%s\" does not exist", attname, NameListToString(relname)))); + /* close but save lock */ + heap_close(relation, NoLock); address.classId = RelationRelationId; address.objectId = InvalidOid; address.objectSubId = InvalidAttrNumber; @@ -829,47 +851,6 @@ static ObjectAddress get_object_address_type(ObjectType objtype, List* objname, return address; } -/* - * Find the ObjectAddress for a publication relation. The objname parameter - * is the relation name; objargs contains the publication name. - */ -static ObjectAddress get_object_address_publication_rel(List *objname, List *objargs, Relation *relation, - bool missing_ok) -{ - ObjectAddress address; - char *pubname; - Publication *pub; - - address.classId = PublicationRelRelationId; - address.objectId = InvalidOid; - address.objectSubId = InvalidOid; - - *relation = relation_openrv_extended(makeRangeVarFromNameList(objname), AccessShareLock, missing_ok); - if (!relation) - return address; - - /* fetch publication name from input list */ - pubname = strVal(linitial(objargs)); - - /* Now look up the pg_publication tuple */ - pub = GetPublicationByName(pubname, missing_ok); - if (!pub) - return address; - - /* Find the publication relation mapping in syscache. */ - address.objectId = - GetSysCacheOid2(PUBLICATIONRELMAP, ObjectIdGetDatum(RelationGetRelid(*relation)), ObjectIdGetDatum(pub->oid)); - if (!OidIsValid(address.objectId)) { - if (!missing_ok) - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("publication relation \"%s\" in publication \"%s\" does not exist", - RelationGetRelationName(*relation), pubname))); - return address; - } - - return address; -} - /* * Find the ObjectAddress for an opclass or opfamily. */ diff --git a/src/common/backend/catalog/performance_views.sql b/src/common/backend/catalog/performance_views.sql index e84e81324..513280b98 100644 --- a/src/common/backend/catalog/performance_views.sql +++ b/src/common/backend/catalog/performance_views.sql @@ -115,13 +115,13 @@ CREATE VIEW dbe_perf.global_instance_time AS CREATE VIEW dbe_perf.workload_sql_count AS SELECT pg_user.respool as workload, - sum(S.select_count)::bigint AS select_count, - sum(S.update_count)::bigint AS update_count, - sum(S.insert_count)::bigint AS insert_count, - sum(S.delete_count)::bigint AS delete_count, - sum(S.ddl_count)::bigint AS ddl_count, - sum(S.dml_count)::bigint AS dml_count, - sum(S.dcl_count)::bigint AS dcl_count + pg_catalog.sum(S.select_count)::bigint AS select_count, + pg_catalog.sum(S.update_count)::bigint AS update_count, + pg_catalog.sum(S.insert_count)::bigint AS insert_count, + pg_catalog.sum(S.delete_count)::bigint AS delete_count, + pg_catalog.sum(S.ddl_count)::bigint AS ddl_count, + pg_catalog.sum(S.dml_count)::bigint AS dml_count, + pg_catalog.sum(S.dcl_count)::bigint AS dcl_count FROM pg_user left join pg_stat_get_sql_count() AS S on pg_user.usename = S.user_name GROUP by pg_user.respool; @@ -129,22 +129,22 @@ CREATE VIEW dbe_perf.workload_sql_count AS CREATE VIEW dbe_perf.workload_sql_elapse_time AS SELECT pg_user.respool as workload, - sum(S.total_select_elapse)::bigint AS total_select_elapse, - MAX(S.max_select_elapse) AS max_select_elapse, - MIN(S.min_select_elapse) AS min_select_elapse, - ((sum(S.total_select_elapse) / greatest(sum(S.select_count), 1))::bigint) AS avg_select_elapse, - sum(S.total_update_elapse)::bigint AS total_update_elapse, - MAX(S.max_update_elapse) AS max_update_elapse, - MIN(S.min_update_elapse) AS min_update_elapse, - ((sum(S.total_update_elapse) / greatest(sum(S.update_count), 1))::bigint) AS avg_update_elapse, - sum(S.total_insert_elapse)::bigint AS total_insert_elapse, - MAX(S.max_insert_elapse) AS max_insert_elapse, - MIN(S.min_insert_elapse) AS min_insert_elapse, - ((sum(S.total_insert_elapse) / greatest(sum(S.insert_count), 1))::bigint) AS avg_insert_elapse, - sum(S.total_delete_elapse)::bigint AS total_delete_elapse, - MAX(S.max_delete_elapse) AS max_delete_elapse, - MIN(S.min_delete_elapse) AS min_delete_elapse, - ((sum(S.total_delete_elapse) / greatest(sum(S.delete_count), 1))::bigint) AS avg_delete_elapse + pg_catalog.sum(S.total_select_elapse)::bigint AS total_select_elapse, + pg_catalog.MAX(S.max_select_elapse) AS max_select_elapse, + pg_catalog.MIN(S.min_select_elapse) AS min_select_elapse, + ((pg_catalog.sum(S.total_select_elapse) / greatest(pg_catalog.sum(S.select_count), 1))::bigint) AS avg_select_elapse, + pg_catalog.sum(S.total_update_elapse)::bigint AS total_update_elapse, + pg_catalog.MAX(S.max_update_elapse) AS max_update_elapse, + pg_catalog.MIN(S.min_update_elapse) AS min_update_elapse, + ((pg_catalog.sum(S.total_update_elapse) / greatest(pg_catalog.sum(S.update_count), 1))::bigint) AS avg_update_elapse, + pg_catalog.sum(S.total_insert_elapse)::bigint AS total_insert_elapse, + pg_catalog.MAX(S.max_insert_elapse) AS max_insert_elapse, + pg_catalog.MIN(S.min_insert_elapse) AS min_insert_elapse, + ((pg_catalog.sum(S.total_insert_elapse) / greatest(pg_catalog.sum(S.insert_count), 1))::bigint) AS avg_insert_elapse, + pg_catalog.sum(S.total_delete_elapse)::bigint AS total_delete_elapse, + pg_catalog.MAX(S.max_delete_elapse) AS max_delete_elapse, + pg_catalog.MIN(S.min_delete_elapse) AS min_delete_elapse, + ((pg_catalog.sum(S.total_delete_elapse) / greatest(pg_catalog.sum(S.delete_count), 1))::bigint) AS avg_delete_elapse FROM pg_user left join pg_stat_get_sql_count() AS S on pg_user.usename = S.user_name GROUP by pg_user.respool; @@ -249,7 +249,7 @@ SELECT giwi.bg_resp_avg as bg_resp_avg, giwi.bg_resp_total as bg_resp_total FROM - pg_user left join get_instr_workload_info(0) AS giwi on pg_user.usesysid = giwi.user_oid; + pg_user left join pg_catalog.get_instr_workload_info(0) AS giwi on pg_user.usesysid = giwi.user_oid; CREATE OR REPLACE FUNCTION dbe_perf.get_global_user_transaction (OUT node_name name, OUT usename name, OUT commit_counter bigint, @@ -298,18 +298,18 @@ CREATE VIEW dbe_perf.global_user_transaction AS CREATE VIEW dbe_perf.workload_transaction AS select pg_user.respool as workload, - sum(W.commit_counter)::bigint as commit_counter, - sum(W.rollback_counter)::bigint as rollback_counter, - min(W.resp_min)::bigint as resp_min, - max(W.resp_max)::bigint as resp_max, - ((sum(W.resp_total) / greatest(sum(W.commit_counter), 1))::bigint) AS resp_avg, - sum(W.resp_total)::bigint as resp_total, - sum(W.bg_commit_counter)::bigint as bg_commit_counter, - sum(W.bg_rollback_counter)::bigint as bg_rollback_counter, - min(W.bg_resp_min)::bigint as bg_resp_min, - max(W.bg_resp_max)::bigint as bg_resp_max, - ((sum(W.bg_resp_total) / greatest(sum(W.bg_commit_counter), 1))::bigint) AS bg_resp_avg, - sum(W.bg_resp_total)::bigint as bg_resp_total + pg_catalog.sum(W.commit_counter)::bigint as commit_counter, + pg_catalog.sum(W.rollback_counter)::bigint as rollback_counter, + pg_catalog.min(W.resp_min)::bigint as resp_min, + pg_catalog.max(W.resp_max)::bigint as resp_max, + ((pg_catalog.sum(W.resp_total) / greatest(pg_catalog.sum(W.commit_counter), 1))::bigint) AS resp_avg, + pg_catalog.sum(W.resp_total)::bigint as resp_total, + pg_catalog.sum(W.bg_commit_counter)::bigint as bg_commit_counter, + pg_catalog.sum(W.bg_rollback_counter)::bigint as bg_rollback_counter, + pg_catalog.min(W.bg_resp_min)::bigint as bg_resp_min, + pg_catalog.max(W.bg_resp_max)::bigint as bg_resp_max, + ((pg_catalog.sum(W.bg_resp_total) / greatest(pg_catalog.sum(W.bg_commit_counter), 1))::bigint) AS bg_resp_avg, + pg_catalog.sum(W.bg_resp_total)::bigint as bg_resp_total from pg_user left join dbe_perf.user_transaction AS W on pg_user.usename = W.usename group by @@ -361,18 +361,18 @@ CREATE VIEW dbe_perf.global_workload_transaction AS CREATE VIEW dbe_perf.summary_workload_transaction AS SELECT W.workload AS workload, - sum(W.commit_counter) AS commit_counter, - sum(W.rollback_counter) AS rollback_counter, - coalesce(min(NULLIF(W.resp_min, 0)), 0) AS resp_min, - max(W.resp_max) AS resp_max, - ((sum(W.resp_total) / greatest(sum(W.commit_counter), 1))::bigint) AS resp_avg, - sum(W.resp_total) AS resp_total, - sum(W.bg_commit_counter) AS bg_commit_counter, - sum(W.bg_rollback_counter) AS bg_rollback_counter, - coalesce(min(NULLIF(W.bg_resp_min, 0)), 0) AS bg_resp_min, - max(W.bg_resp_max) AS bg_resp_max, - ((sum(W.bg_resp_total) / greatest(sum(W.bg_commit_counter), 1))::bigint) AS bg_resp_avg, - sum(W.bg_resp_total) AS bg_resp_total + pg_catalog.sum(W.commit_counter) AS commit_counter, + pg_catalog.sum(W.rollback_counter) AS rollback_counter, + coalesce(pg_catalog.min(NULLIF(W.resp_min, 0)), 0) AS resp_min, + pg_catalog.max(W.resp_max) AS resp_max, + ((pg_catalog.sum(W.resp_total) / greatest(pg_catalog.sum(W.commit_counter), 1))::bigint) AS resp_avg, + pg_catalog.sum(W.resp_total) AS resp_total, + pg_catalog.sum(W.bg_commit_counter) AS bg_commit_counter, + pg_catalog.sum(W.bg_rollback_counter) AS bg_rollback_counter, + coalesce(pg_catalog.min(NULLIF(W.bg_resp_min, 0)), 0) AS bg_resp_min, + pg_catalog.max(W.bg_resp_max) AS bg_resp_max, + ((pg_catalog.sum(W.bg_resp_total) / greatest(pg_catalog.sum(W.bg_commit_counter), 1))::bigint) AS bg_resp_avg, + pg_catalog.sum(W.bg_resp_total) AS bg_resp_total FROM dbe_perf.get_global_workload_transaction() AS W GROUP by W.workload; @@ -527,7 +527,7 @@ CREATE VIEW dbe_perf.session_cpu_runtime AS S.query, S.node_group, T.top_cpu_dn - FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T + FROM pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T WHERE S.pid = T.threadid; CREATE VIEW dbe_perf.session_memory_runtime AS @@ -542,7 +542,7 @@ CREATE VIEW dbe_perf.session_memory_runtime AS S.query, S.node_group, T.top_mem_dn - FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T + FROM pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T WHERE S.pid = T.threadid; CREATE OR REPLACE VIEW dbe_perf.session_stat_activity AS @@ -568,8 +568,10 @@ CREATE OR REPLACE VIEW dbe_perf.session_stat_activity AS ELSE S.srespool END AS resource_pool, S.query_id, - S.query - FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_authid U + S.query, + S.unique_sql_id, + S.trace_id + FROM pg_database D, pg_catalog.pg_stat_get_activity(NULL) AS S, pg_authid U WHERE S.datid = D.oid AND S.usesysid = U.oid; @@ -579,7 +581,7 @@ CREATE OR REPLACE FUNCTION dbe_perf.get_global_session_stat_activity out client_hostname text, out client_port integer, out backend_start timestamptz, out xact_start timestamptz, out query_start timestamptz, out state_change timestamptz, out waiting boolean, out enqueue text, out state text, out resource_pool name, - out query_id bigint, out query text) + out query_id bigint, out query text, out unique_sql_id bigint, out trace_id text) RETURNS setof record AS $$ DECLARE @@ -614,6 +616,8 @@ DECLARE resource_pool :=row_data.resource_pool; query_id :=row_data.query_id; query := row_data.query; + unique_sql_id := row_data.unique_sql_id; + trace_id := row_data.trace_id; return next; END LOOP; END LOOP; @@ -625,7 +629,7 @@ CREATE VIEW dbe_perf.global_session_stat_activity AS SELECT * FROM dbe_perf.get_global_session_stat_activity(); CREATE VIEW dbe_perf.thread_wait_status AS - SELECT * FROM pg_stat_get_status(NULL); + SELECT * FROM pg_catalog.pg_stat_get_status(NULL); CREATE OR REPLACE FUNCTION dbe_perf.get_global_thread_wait_status() RETURNS setof dbe_perf.thread_wait_status @@ -665,7 +669,7 @@ CREATE VIEW DBE_PERF.wlm_user_resource_runtime AS T.total_temp_space, T.used_spill_space, T.total_spill_space - FROM (select usename, (gs_wlm_user_resource_info(usename::cstring)).* from pg_user) T; + FROM (select usename, (pg_catalog.gs_wlm_user_resource_info(usename::cstring)).* from pg_user) T; CREATE VIEW dbe_perf.wlm_user_resource_config AS SELECT @@ -679,7 +683,7 @@ CREATE VIEW dbe_perf.wlm_user_resource_config AS T.spacelimit, T.childcount, T.childlist - FROM pg_authid AS S, gs_wlm_get_user_info(NULL) AS T, pg_resource_pool AS R + FROM pg_authid AS S, pg_catalog.gs_wlm_get_user_info(NULL) AS T, pg_resource_pool AS R WHERE S.oid = T.userid AND T.rpoid = R.oid; CREATE VIEW dbe_perf.operator_history_table AS @@ -712,7 +716,7 @@ CREATE VIEW dbe_perf.global_operator_history_table AS --history operator-level view for DM in single CN CREATE VIEW dbe_perf.operator_history AS - SELECT * FROM pg_stat_get_wlm_operator_info(0); + SELECT * FROM pg_catalog.pg_stat_get_wlm_operator_info(0); CREATE OR REPLACE FUNCTION dbe_perf.get_global_operator_history() RETURNS setof dbe_perf.operator_history @@ -741,7 +745,7 @@ CREATE VIEW dbe_perf.global_operator_history AS --real time operator-level view in single CN CREATE VIEW dbe_perf.operator_runtime AS SELECT t.* - FROM dbe_perf.session_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t + FROM dbe_perf.session_stat_activity AS s, pg_catalog.pg_stat_get_wlm_realtime_operator_info(NULL) as t WHERE s.query_id = t.queryid; CREATE OR REPLACE FUNCTION dbe_perf.get_global_operator_runtime() @@ -838,7 +842,7 @@ SELECT S.mem_top5_value, S.top_mem_dn, S.top_cpu_dn -FROM pg_stat_get_wlm_session_info(0) S; +FROM pg_catalog.pg_stat_get_wlm_session_info(0) S; CREATE OR REPLACE FUNCTION dbe_perf.get_global_statement_complex_history() RETURNS setof dbe_perf.statement_complex_history @@ -941,7 +945,7 @@ CREATE VIEW dbe_perf.statement_complex_runtime AS S.node_group, T.top_cpu_dn, T.top_mem_dn - FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T + FROM pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T WHERE S.pid = T.threadid; CREATE OR REPLACE FUNCTION dbe_perf.get_global_statement_complex_runtime() @@ -982,7 +986,7 @@ CREATE VIEW dbe_perf.statement_iostat_complex_runtime AS S.query, S.node_group, T.curr_io_limits as curr_io_limits - FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_session_iostat_info(0) AS T + FROM pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T WHERE S.pid = T.threadid; CREATE OR REPLACE VIEW dbe_perf.statement_wlmstat_complex_runtime AS @@ -1013,7 +1017,7 @@ CREATE OR REPLACE VIEW dbe_perf.statement_wlmstat_complex_runtime AS S.query, S.is_plana, S.node_group - FROM pg_database D, pg_stat_get_session_wlmstat(NULL) AS S, pg_authid AS U, gs_wlm_session_respool(0) AS T + FROM pg_database D, pg_catalog.pg_stat_get_session_wlmstat(NULL) AS S, pg_authid AS U, pg_catalog.gs_wlm_session_respool(0) AS T WHERE S.datid = D.oid AND S.usesysid = U.oid AND T.threadid = S.threadid; @@ -1090,9 +1094,9 @@ CREATE VIEW dbe_perf.statio_all_indexes AS N.nspname AS schemaname, C.relname AS relname, I.relname AS indexrelname, - pg_stat_get_blocks_fetched(I.oid) - - pg_stat_get_blocks_hit(I.oid) AS idx_blks_read, - pg_stat_get_blocks_hit(I.oid) AS idx_blks_hit + pg_catalog.pg_stat_get_blocks_fetched(I.oid) - + pg_catalog.pg_stat_get_blocks_hit(I.oid) AS idx_blks_read, + pg_catalog.pg_stat_get_blocks_hit(I.oid) AS idx_blks_hit FROM pg_class C JOIN pg_index X ON C.oid = X.indrelid JOIN pg_class I ON I.oid = X.indexrelid @@ -1203,7 +1207,7 @@ LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW dbe_perf.summary_statio_all_indexes AS SELECT Ti.schemaname, COALESCE(Ti.relname, Tn.toastname) AS relname, COALESCE(Ti.indexrelname, Tn.toastindexname) AS indexrelname, - SUM(Ti.idx_blks_read) idx_blks_read, SUM(Ti.idx_blks_hit) idx_blks_hit + pg_catalog.SUM(Ti.idx_blks_read) idx_blks_read, pg_catalog.SUM(Ti.idx_blks_hit) idx_blks_hit FROM dbe_perf.get_summary_statio_all_indexes() as Ti LEFT JOIN dbe_perf.get_local_toastname_and_toastindexname() AS Tn ON (Tn.shemaname = Ti.toastrelschemaname AND Tn.relname = Ti.toastrelname) @@ -1214,9 +1218,9 @@ CREATE VIEW dbe_perf.statio_all_sequences AS C.oid AS relid, N.nspname AS schemaname, C.relname AS relname, - pg_stat_get_blocks_fetched(C.oid) - - pg_stat_get_blocks_hit(C.oid) AS blks_read, - pg_stat_get_blocks_hit(C.oid) AS blks_hit + pg_catalog.pg_stat_get_blocks_fetched(C.oid) - + pg_catalog.pg_stat_get_blocks_hit(C.oid) AS blks_read, + pg_catalog.pg_stat_get_blocks_hit(C.oid) AS blks_hit FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) WHERE C.relkind = 'S' or C.relkind = 'L'; @@ -1255,7 +1259,7 @@ CREATE VIEW dbe_perf.global_statio_all_sequences AS CREATE VIEW dbe_perf.summary_statio_all_sequences AS SELECT schemaname, relname, - SUM(blks_read) blks_read, SUM(blks_hit) blks_hit + pg_catalog.SUM(blks_read) blks_read, pg_catalog.SUM(blks_hit) blks_hit FROM dbe_perf.get_global_statio_all_sequences() GROUP BY (schemaname, relname); @@ -1264,18 +1268,18 @@ CREATE VIEW dbe_perf.statio_all_tables AS C.oid AS relid, N.nspname AS schemaname, C.relname AS relname, - pg_stat_get_blocks_fetched(C.oid) - - pg_stat_get_blocks_hit(C.oid) AS heap_blks_read, - pg_stat_get_blocks_hit(C.oid) AS heap_blks_hit, - sum(pg_stat_get_blocks_fetched(I.indexrelid) - - pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_read, - sum(pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_hit, - pg_stat_get_blocks_fetched(T.oid) - - pg_stat_get_blocks_hit(T.oid) AS toast_blks_read, - pg_stat_get_blocks_hit(T.oid) AS toast_blks_hit, - pg_stat_get_blocks_fetched(X.oid) - - pg_stat_get_blocks_hit(X.oid) AS tidx_blks_read, - pg_stat_get_blocks_hit(X.oid) AS tidx_blks_hit + pg_catalog.pg_stat_get_blocks_fetched(C.oid) - + pg_catalog.pg_stat_get_blocks_hit(C.oid) AS heap_blks_read, + pg_catalog.pg_stat_get_blocks_hit(C.oid) AS heap_blks_hit, + pg_catalog.sum(pg_catalog.pg_stat_get_blocks_fetched(I.indexrelid) - + pg_catalog.pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_read, + pg_catalog.sum(pg_catalog.pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_hit, + pg_catalog.pg_stat_get_blocks_fetched(T.oid) - + pg_catalog.pg_stat_get_blocks_hit(T.oid) AS toast_blks_read, + pg_catalog.pg_stat_get_blocks_hit(T.oid) AS toast_blks_hit, + pg_catalog.pg_stat_get_blocks_fetched(X.oid) - + pg_catalog.pg_stat_get_blocks_hit(X.oid) AS tidx_blks_read, + pg_catalog.pg_stat_get_blocks_hit(X.oid) AS tidx_blks_hit FROM pg_class C LEFT JOIN pg_index I ON C.oid = I.indrelid LEFT JOIN pg_class T ON C.reltoastrelid = T.oid LEFT JOIN @@ -1399,10 +1403,10 @@ LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW dbe_perf.summary_statio_all_tables AS SELECT Ti.schemaname as schemaname, COALESCE(Ti.relname, Tn.toastname) as relname, - SUM(Ti.heap_blks_read) heap_blks_read, SUM(Ti.heap_blks_hit) heap_blks_hit, - SUM(Ti.idx_blks_read) idx_blks_read, SUM(Ti.idx_blks_hit) idx_blks_hit, - SUM(Ti.toast_blks_read) toast_blks_read, SUM(Ti.toast_blks_hit) toast_blks_hit, - SUM(Ti.tidx_blks_read) tidx_blks_read, SUM(Ti.tidx_blks_hit) tidx_blks_hit + pg_catalog.SUM(Ti.heap_blks_read) heap_blks_read, pg_catalog.SUM(Ti.heap_blks_hit) heap_blks_hit, + pg_catalog.SUM(Ti.idx_blks_read) idx_blks_read, pg_catalog.SUM(Ti.idx_blks_hit) idx_blks_hit, + pg_catalog.SUM(Ti.toast_blks_read) toast_blks_read, pg_catalog.SUM(Ti.toast_blks_hit) toast_blks_hit, + pg_catalog.SUM(Ti.tidx_blks_read) tidx_blks_read, pg_catalog.SUM(Ti.tidx_blks_hit) tidx_blks_hit FROM dbe_perf.get_summary_statio_all_tables() Ti left join dbe_perf.get_local_toast_relation() Tn on Tn.shemaname = Ti.toastrelschemaname and Tn.relname = Ti.toastrelname GROUP BY (1, 2); @@ -1494,7 +1498,7 @@ LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW dbe_perf.summary_statio_sys_indexes AS SELECT Ti.schemaname, COALESCE(Ti.relname, Tn.toastname) AS relname, COALESCE(Ti.indexrelname, Tn.toastindexname) AS indexrelname, - SUM(Ti.idx_blks_read) idx_blks_read, SUM(Ti.idx_blks_hit) idx_blks_hit + pg_catalog.SUM(Ti.idx_blks_read) idx_blks_read, pg_catalog.SUM(Ti.idx_blks_hit) idx_blks_hit FROM dbe_perf.get_summary_statio_sys_indexes() AS Ti LEFT JOIN dbe_perf.get_local_toastname_and_toastindexname() AS Tn ON (Tn.shemaname = Ti.toastrelschemaname AND Tn.relname = Ti.toastrelname) @@ -1539,7 +1543,7 @@ CREATE VIEW dbe_perf.global_statio_sys_sequences AS CREATE VIEW dbe_perf.summary_statio_sys_sequences AS SELECT schemaname, relname, - SUM(blks_read) blks_read, SUM(blks_hit) blks_hit + pg_catalog.SUM(blks_read) blks_read, pg_catalog.SUM(blks_hit) blks_hit FROM dbe_perf.get_global_statio_sys_sequences() GROUP BY (schemaname, relname); @@ -1649,10 +1653,10 @@ LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW dbe_perf.summary_statio_sys_tables AS SELECT Ti.schemaname, COALESCE(Ti.relname, Tn.toastname, NULL) as relname, - SUM(Ti.heap_blks_read) heap_blks_read, SUM(Ti.heap_blks_hit) heap_blks_hit, - SUM(Ti.idx_blks_read) idx_blks_read, SUM(Ti.idx_blks_hit) idx_blks_hit, - SUM(Ti.toast_blks_read) toast_blks_read, SUM(Ti.toast_blks_hit) toast_blks_hit, - SUM(Ti.tidx_blks_read) tidx_blks_read, SUM(Ti.tidx_blks_hit) tidx_blks_hit + pg_catalog.SUM(Ti.heap_blks_read) heap_blks_read, pg_catalog.SUM(Ti.heap_blks_hit) heap_blks_hit, + pg_catalog.SUM(Ti.idx_blks_read) idx_blks_read, pg_catalog.SUM(Ti.idx_blks_hit) idx_blks_hit, + pg_catalog.SUM(Ti.toast_blks_read) toast_blks_read, pg_catalog.SUM(Ti.toast_blks_hit) toast_blks_hit, + pg_catalog.SUM(Ti.tidx_blks_read) tidx_blks_read, pg_catalog.SUM(Ti.tidx_blks_hit) tidx_blks_hit FROM dbe_perf.get_summary_statio_sys_tables() as Ti LEFT JOIN dbe_perf.get_local_toast_relation() Tn ON Tn.shemaname = Ti.toastrelschemaname AND Tn.relname = Ti.toastrelname @@ -1747,7 +1751,7 @@ CREATE VIEW dbe_perf.summary_statio_user_indexes AS SELECT Ti.schemaname, COALESCE(Ti.relname, Tn.toastname) AS relname, COALESCE(Ti.indexrelname, Tn.toastindexname) AS indexrelname, - SUM(Ti.idx_blks_read) idx_blks_read, SUM(Ti.idx_blks_hit) idx_blks_hit + pg_catalog.SUM(Ti.idx_blks_read) idx_blks_read, pg_catalog.SUM(Ti.idx_blks_hit) idx_blks_hit FROM dbe_perf.get_summary_statio_user_indexes() AS Ti LEFT JOIN dbe_perf.get_local_toastname_and_toastindexname() AS Tn ON (Tn.shemaname = Ti.toastrelschemaname AND Tn.relname = Ti.toastrelname) @@ -1792,7 +1796,7 @@ CREATE VIEW dbe_perf.global_statio_user_sequences AS CREATE VIEW dbe_perf.summary_statio_user_sequences AS SELECT schemaname, relname, - SUM(blks_read) blks_read, SUM(blks_hit) blks_hit + pg_catalog.SUM(blks_read) blks_read, pg_catalog.SUM(blks_hit) blks_hit FROM dbe_perf.get_global_statio_user_sequences() GROUP BY (schemaname, relname); @@ -1901,10 +1905,10 @@ LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW dbe_perf.summary_statio_user_tables AS SELECT Ti.schemaname as schemaname, COALESCE(Ti.relname, Tn.toastname) as relname, - SUM(Ti.heap_blks_read) heap_blks_read, SUM(Ti.heap_blks_hit) heap_blks_hit, - SUM(Ti.idx_blks_read) idx_blks_read, SUM(Ti.idx_blks_hit) idx_blks_hit, - SUM(Ti.toast_blks_read) toast_blks_read, SUM(Ti.toast_blks_hit) toast_blks_hit, - SUM(Ti.tidx_blks_read) tidx_blks_read, SUM(Ti.tidx_blks_hit) tidx_blks_hit + pg_catalog.SUM(Ti.heap_blks_read) heap_blks_read, pg_catalog.SUM(Ti.heap_blks_hit) heap_blks_hit, + pg_catalog.SUM(Ti.idx_blks_read) idx_blks_read, pg_catalog.SUM(Ti.idx_blks_hit) idx_blks_hit, + pg_catalog.SUM(Ti.toast_blks_read) toast_blks_read, pg_catalog.SUM(Ti.toast_blks_hit) toast_blks_hit, + pg_catalog.SUM(Ti.tidx_blks_read) tidx_blks_read, pg_catalog.SUM(Ti.tidx_blks_hit) tidx_blks_hit FROM dbe_perf.get_summary_statio_user_tables() AS Ti LEFT JOIN dbe_perf.get_local_toast_relation() Tn ON Tn.shemaname = Ti.toastrelschemaname AND Tn.relname = Ti.toastrelname GROUP BY (1, 2); @@ -1924,9 +1928,9 @@ DECLARE query_str_nodes := 'select * from dbe_perf.node_name'; FOR row_name IN EXECUTE(query_str_nodes) LOOP query_str := 'SELECT D.datname AS datname, - pg_stat_get_db_cu_mem_hit(D.oid) AS mem_hit, - pg_stat_get_db_cu_hdd_sync(D.oid) AS hdd_sync_read, - pg_stat_get_db_cu_hdd_asyn(D.oid) AS hdd_asyn_read + pg_catalog.pg_stat_get_db_cu_mem_hit(D.oid) AS mem_hit, + pg_catalog.pg_stat_get_db_cu_hdd_sync(D.oid) AS hdd_sync_read, + pg_catalog.pg_stat_get_db_cu_hdd_asyn(D.oid) AS hdd_asyn_read FROM pg_database D;'; FOR each_node_out IN EXECUTE(query_str) LOOP node_name1 := row_name.node_name; @@ -1953,25 +1957,25 @@ CREATE VIEW dbe_perf.stat_all_tables AS C.oid AS relid, N.nspname AS schemaname, C.relname AS relname, - pg_stat_get_numscans(C.oid) AS seq_scan, - pg_stat_get_tuples_returned(C.oid) AS seq_tup_read, - sum(pg_stat_get_numscans(I.indexrelid))::bigint AS idx_scan, - sum(pg_stat_get_tuples_fetched(I.indexrelid))::bigint + - pg_stat_get_tuples_fetched(C.oid) AS idx_tup_fetch, - pg_stat_get_tuples_inserted(C.oid) AS n_tup_ins, - pg_stat_get_tuples_updated(C.oid) AS n_tup_upd, - pg_stat_get_tuples_deleted(C.oid) AS n_tup_del, - pg_stat_get_tuples_hot_updated(C.oid) AS n_tup_hot_upd, - pg_stat_get_live_tuples(C.oid) AS n_live_tup, - pg_stat_get_dead_tuples(C.oid) AS n_dead_tup, - pg_stat_get_last_vacuum_time(C.oid) as last_vacuum, - pg_stat_get_last_autovacuum_time(C.oid) as last_autovacuum, - pg_stat_get_last_analyze_time(C.oid) as last_analyze, - pg_stat_get_last_autoanalyze_time(C.oid) as last_autoanalyze, - pg_stat_get_vacuum_count(C.oid) AS vacuum_count, - pg_stat_get_autovacuum_count(C.oid) AS autovacuum_count, - pg_stat_get_analyze_count(C.oid) AS analyze_count, - pg_stat_get_autoanalyze_count(C.oid) AS autoanalyze_count + pg_catalog.pg_stat_get_numscans(C.oid) AS seq_scan, + pg_catalog.pg_stat_get_tuples_returned(C.oid) AS seq_tup_read, + pg_catalog.sum(pg_catalog.pg_stat_get_numscans(I.indexrelid))::bigint AS idx_scan, + pg_catalog.sum(pg_catalog.pg_stat_get_tuples_fetched(I.indexrelid))::bigint + + pg_catalog.pg_stat_get_tuples_fetched(C.oid) AS idx_tup_fetch, + pg_catalog.pg_stat_get_tuples_inserted(C.oid) AS n_tup_ins, + pg_catalog.pg_stat_get_tuples_updated(C.oid) AS n_tup_upd, + pg_catalog.pg_stat_get_tuples_deleted(C.oid) AS n_tup_del, + pg_catalog.pg_stat_get_tuples_hot_updated(C.oid) AS n_tup_hot_upd, + pg_catalog.pg_stat_get_live_tuples(C.oid) AS n_live_tup, + pg_catalog.pg_stat_get_dead_tuples(C.oid) AS n_dead_tup, + pg_catalog.pg_stat_get_last_vacuum_time(C.oid) as last_vacuum, + pg_catalog.pg_stat_get_last_autovacuum_time(C.oid) as last_autovacuum, + pg_catalog.pg_stat_get_last_analyze_time(C.oid) as last_analyze, + pg_catalog.pg_stat_get_last_autoanalyze_time(C.oid) as last_autoanalyze, + pg_catalog.pg_stat_get_vacuum_count(C.oid) AS vacuum_count, + pg_catalog.pg_stat_get_autovacuum_count(C.oid) AS autovacuum_count, + pg_catalog.pg_stat_get_analyze_count(C.oid) AS analyze_count, + pg_catalog.pg_stat_get_autoanalyze_count(C.oid) AS autoanalyze_count FROM pg_class C LEFT JOIN pg_index I ON C.oid = I.indrelid LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) @@ -2112,15 +2116,15 @@ LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW dbe_perf.summary_stat_all_tables AS SELECT Ti.schemaname, COALESCE(Ti.relname, Tn.toastname, NULL) as relname, - SUM(Ti.seq_scan) seq_scan, SUM(Ti.seq_tup_read) seq_tup_read, - SUM(Ti.idx_scan) idx_scan, SUM(Ti.idx_tup_fetch) idx_tup_fetch, - SUM(Ti.n_tup_ins) n_tup_ins, SUM(Ti.n_tup_upd) n_tup_upd, - SUM(Ti.n_tup_del) n_tup_del, SUM(Ti.n_tup_hot_upd) n_tup_hot_upd, - SUM(Ti.n_live_tup) n_live_tup, SUM(Ti.n_dead_tup) n_dead_tup, - MAX(Ti.last_vacuum) last_vacuum, MAX(Ti.last_autovacuum) last_autovacuum, - MAX(Ti.last_analyze) last_analyze, MAX(Ti.last_autoanalyze) last_autoanalyze, - SUM(Ti.vacuum_count) vacuum_count, SUM(Ti.autovacuum_count) autovacuum_count, - SUM(Ti.analyze_count) analyze_count, SUM(Ti.autoanalyze_count) autoanalyze_count + pg_catalog.SUM(Ti.seq_scan) seq_scan, pg_catalog.SUM(Ti.seq_tup_read) seq_tup_read, + pg_catalog.SUM(Ti.idx_scan) idx_scan, pg_catalog.SUM(Ti.idx_tup_fetch) idx_tup_fetch, + pg_catalog.SUM(Ti.n_tup_ins) n_tup_ins, pg_catalog.SUM(Ti.n_tup_upd) n_tup_upd, + pg_catalog.SUM(Ti.n_tup_del) n_tup_del, pg_catalog.SUM(Ti.n_tup_hot_upd) n_tup_hot_upd, + pg_catalog.SUM(Ti.n_live_tup) n_live_tup, pg_catalog.SUM(Ti.n_dead_tup) n_dead_tup, + pg_catalog.MAX(Ti.last_vacuum) last_vacuum, pg_catalog.MAX(Ti.last_autovacuum) last_autovacuum, + pg_catalog.MAX(Ti.last_analyze) last_analyze, pg_catalog.MAX(Ti.last_autoanalyze) last_autoanalyze, + pg_catalog.SUM(Ti.vacuum_count) vacuum_count, pg_catalog.SUM(Ti.autovacuum_count) autovacuum_count, + pg_catalog.SUM(Ti.analyze_count) analyze_count, pg_catalog.SUM(Ti.autoanalyze_count) autoanalyze_count FROM (SELECT * FROM dbe_perf.get_summary_stat_all_tables()) AS Ti LEFT JOIN dbe_perf.get_local_toast_relation() Tn ON Tn.shemaname = Ti.toastrelschemaname AND Tn.relname = Ti.toastrelname @@ -2133,9 +2137,9 @@ CREATE VIEW dbe_perf.stat_all_indexes AS N.nspname AS schemaname, C.relname AS relname, I.relname AS indexrelname, - pg_stat_get_numscans(I.oid) AS idx_scan, - pg_stat_get_tuples_returned(I.oid) AS idx_tup_read, - pg_stat_get_tuples_fetched(I.oid) AS idx_tup_fetch + pg_catalog.pg_stat_get_numscans(I.oid) AS idx_scan, + pg_catalog.pg_stat_get_tuples_returned(I.oid) AS idx_tup_read, + pg_catalog.pg_stat_get_tuples_fetched(I.oid) AS idx_tup_fetch FROM pg_class C JOIN pg_index X ON C.oid = X.indrelid JOIN pg_class I ON I.oid = X.indexrelid @@ -2229,7 +2233,7 @@ LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW dbe_perf.summary_stat_all_indexes AS SELECT Ti.schemaname, COALESCE(Ti.relname, Tn.toastname) AS relname, COALESCE(Ti.indexrelname, Tn.toastindexname) AS indexrelname, - SUM(Ti.idx_scan) idx_scan, SUM(Ti.idx_tup_read) idx_tup_read, SUM(Ti.idx_tup_fetch) idx_tup_fetch + pg_catalog.SUM(Ti.idx_scan) idx_scan, pg_catalog.SUM(Ti.idx_tup_read) idx_tup_read, pg_catalog.SUM(Ti.idx_tup_fetch) idx_tup_fetch FROM dbe_perf.get_summary_stat_all_indexes() AS Ti LEFT JOIN dbe_perf.get_local_toastname_and_toastindexname() AS Tn ON (Tn.shemaname = Ti.toastrelschemaname AND Tn.relname = Ti.toastrelname) @@ -2375,15 +2379,15 @@ LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW dbe_perf.summary_stat_sys_tables AS SELECT Ti.schemaname, COALESCE(Ti.relname, Tn.toastname, NULL) AS relname, - SUM(Ti.seq_scan) seq_scan, SUM(Ti.seq_tup_read) seq_tup_read, - SUM(Ti.idx_scan) idx_scan, SUM(Ti.idx_tup_fetch) idx_tup_fetch, - SUM(Ti.n_tup_ins) n_tup_ins, SUM(Ti.n_tup_upd) n_tup_upd, - SUM(Ti.n_tup_del) n_tup_del, SUM(Ti.n_tup_hot_upd) n_tup_hot_upd, - SUM(Ti.n_live_tup) n_live_tup, SUM(Ti.n_dead_tup) n_dead_tup, - MAX(Ti.last_vacuum) last_vacuum, MAX(Ti.last_autovacuum) last_autovacuum, - MAX(Ti.last_analyze) last_analyze, MAX(Ti.last_autoanalyze) last_autoanalyze, - SUM(Ti.vacuum_count) vacuum_count, SUM(Ti.autovacuum_count) autovacuum_count, - SUM(Ti.analyze_count) analyze_count, SUM(Ti.autoanalyze_count) autoanalyze_count + pg_catalog.SUM(Ti.seq_scan) seq_scan, pg_catalog.SUM(Ti.seq_tup_read) seq_tup_read, + pg_catalog.SUM(Ti.idx_scan) idx_scan, pg_catalog.SUM(Ti.idx_tup_fetch) idx_tup_fetch, + pg_catalog.SUM(Ti.n_tup_ins) n_tup_ins, pg_catalog.SUM(Ti.n_tup_upd) n_tup_upd, + pg_catalog.SUM(Ti.n_tup_del) n_tup_del, pg_catalog.SUM(Ti.n_tup_hot_upd) n_tup_hot_upd, + pg_catalog.SUM(Ti.n_live_tup) n_live_tup, pg_catalog.SUM(Ti.n_dead_tup) n_dead_tup, + pg_catalog.MAX(Ti.last_vacuum) last_vacuum, pg_catalog.MAX(Ti.last_autovacuum) last_autovacuum, + pg_catalog.MAX(Ti.last_analyze) last_analyze, pg_catalog.MAX(Ti.last_autoanalyze) last_autoanalyze, + pg_catalog.SUM(Ti.vacuum_count) vacuum_count, pg_catalog.SUM(Ti.autovacuum_count) autovacuum_count, + pg_catalog.SUM(Ti.analyze_count) analyze_count, pg_catalog.SUM(Ti.autoanalyze_count) autoanalyze_count FROM dbe_perf.get_summary_stat_sys_tables() as Ti LEFT JOIN dbe_perf.get_local_toast_relation() as Tn ON (Tn.shemaname = Ti.toastrelschemaname AND Tn.relname = Ti.toastrelname) GROUP BY (1, 2); @@ -2480,7 +2484,7 @@ LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW dbe_perf.summary_stat_sys_indexes AS SELECT Ti.schemaname AS schemaname, COALESCE(Ti.relname, Tn.toastname) AS relname, COALESCE(Ti.indexrelname, Tn.toastindexname) AS indexrelname, - SUM(Ti.idx_scan) idx_scan, SUM(Ti.idx_tup_read) idx_tup_read, SUM(Ti.idx_tup_fetch) idx_tup_fetch + pg_catalog.SUM(Ti.idx_scan) idx_scan, pg_catalog.SUM(Ti.idx_tup_read) idx_tup_read, pg_catalog.SUM(Ti.idx_tup_fetch) idx_tup_fetch FROM dbe_perf.get_summary_stat_sys_indexes() AS Ti LEFT JOIN dbe_perf.get_local_toastname_and_toastindexname() AS Tn ON (Tn.shemaname = Ti.toastrelschemaname AND Tn.relname = Ti.toastrelname) @@ -2626,15 +2630,15 @@ LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW dbe_perf.summary_stat_user_tables AS SELECT Ti.schemaname AS schemaname, COALESCE(Ti.relname, Tn.toastname, NULL) AS relname, - SUM(Ti.seq_scan) seq_scan, SUM(Ti.seq_tup_read) seq_tup_read, - SUM(Ti.idx_scan) idx_scan, SUM(Ti.idx_tup_fetch) idx_tup_fetch, - SUM(Ti.n_tup_ins) n_tup_ins, SUM(Ti.n_tup_upd) n_tup_upd, - SUM(Ti.n_tup_del) n_tup_del, SUM(Ti.n_tup_hot_upd) n_tup_hot_upd, - SUM(Ti.n_live_tup) n_live_tup, SUM(Ti.n_dead_tup) n_dead_tup, - MAX(Ti.last_vacuum) last_vacuum, MAX(Ti.last_autovacuum) last_autovacuum, - MAX(Ti.last_analyze) last_analyze, MAX(Ti.last_autoanalyze) last_autoanalyze, - SUM(Ti.vacuum_count) vacuum_count, SUM(Ti.autovacuum_count) autovacuum_count, - SUM(Ti.analyze_count) analyze_count, SUM(Ti.autoanalyze_count) autoanalyze_count + pg_catalog.SUM(Ti.seq_scan) seq_scan, pg_catalog.SUM(Ti.seq_tup_read) seq_tup_read, + pg_catalog.SUM(Ti.idx_scan) idx_scan, pg_catalog.SUM(Ti.idx_tup_fetch) idx_tup_fetch, + pg_catalog.SUM(Ti.n_tup_ins) n_tup_ins, pg_catalog.SUM(Ti.n_tup_upd) n_tup_upd, + pg_catalog.SUM(Ti.n_tup_del) n_tup_del, pg_catalog.SUM(Ti.n_tup_hot_upd) n_tup_hot_upd, + pg_catalog.SUM(Ti.n_live_tup) n_live_tup, pg_catalog.SUM(Ti.n_dead_tup) n_dead_tup, + pg_catalog.MAX(Ti.last_vacuum) last_vacuum, pg_catalog.MAX(Ti.last_autovacuum) last_autovacuum, + pg_catalog.MAX(Ti.last_analyze) last_analyze, pg_catalog.MAX(Ti.last_autoanalyze) last_autoanalyze, + pg_catalog.SUM(Ti.vacuum_count) vacuum_count, pg_catalog.SUM(Ti.autovacuum_count) autovacuum_count, + pg_catalog.SUM(Ti.analyze_count) analyze_count, pg_catalog.SUM(Ti.autoanalyze_count) autoanalyze_count FROM dbe_perf.get_summary_stat_user_tables() AS Ti LEFT JOIN dbe_perf.get_local_toast_relation() AS Tn ON (Tn.shemaname = Ti.toastrelschemaname AND Tn.relname = Ti.toastrelname) GROUP BY (1, 2); @@ -2731,7 +2735,7 @@ LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW dbe_perf.summary_stat_user_indexes AS SELECT Ti.schemaname, COALESCE(Ti.relname, Tn.toastname) AS relname, COALESCE(Ti.indexrelname, Tn.toastindexname) AS indexrelname, - SUM(Ti.idx_scan) idx_scan, SUM(Ti.idx_tup_read) idx_tup_read, SUM(Ti.idx_tup_fetch) idx_tup_fetch + pg_catalog.SUM(Ti.idx_scan) idx_scan, pg_catalog.SUM(Ti.idx_tup_read) idx_tup_read, pg_catalog.SUM(Ti.idx_tup_fetch) idx_tup_fetch FROM dbe_perf.get_summary_stat_user_indexes() as Ti LEFT JOIN dbe_perf.get_local_toastname_and_toastindexname() AS Tn ON (Tn.shemaname = Ti.toastrelschemaname AND Tn.relname = Ti.toastrelname) @@ -2741,24 +2745,24 @@ CREATE VIEW dbe_perf.stat_database AS SELECT D.oid AS datid, D.datname AS datname, - pg_stat_get_db_numbackends(D.oid) AS numbackends, - pg_stat_get_db_xact_commit(D.oid) AS xact_commit, - pg_stat_get_db_xact_rollback(D.oid) AS xact_rollback, - pg_stat_get_db_blocks_fetched(D.oid) - - pg_stat_get_db_blocks_hit(D.oid) AS blks_read, - pg_stat_get_db_blocks_hit(D.oid) AS blks_hit, - pg_stat_get_db_tuples_returned(D.oid) AS tup_returned, - pg_stat_get_db_tuples_fetched(D.oid) AS tup_fetched, - pg_stat_get_db_tuples_inserted(D.oid) AS tup_inserted, - pg_stat_get_db_tuples_updated(D.oid) AS tup_updated, - pg_stat_get_db_tuples_deleted(D.oid) AS tup_deleted, - pg_stat_get_db_conflict_all(D.oid) AS conflicts, - pg_stat_get_db_temp_files(D.oid) AS temp_files, - pg_stat_get_db_temp_bytes(D.oid) AS temp_bytes, - pg_stat_get_db_deadlocks(D.oid) AS deadlocks, - pg_stat_get_db_blk_read_time(D.oid) AS blk_read_time, - pg_stat_get_db_blk_write_time(D.oid) AS blk_write_time, - pg_stat_get_db_stat_reset_time(D.oid) AS stats_reset + pg_catalog.pg_stat_get_db_numbackends(D.oid) AS numbackends, + pg_catalog.pg_stat_get_db_xact_commit(D.oid) AS xact_commit, + pg_catalog.pg_stat_get_db_xact_rollback(D.oid) AS xact_rollback, + pg_catalog.pg_stat_get_db_blocks_fetched(D.oid) - + pg_catalog.pg_stat_get_db_blocks_hit(D.oid) AS blks_read, + pg_catalog.pg_stat_get_db_blocks_hit(D.oid) AS blks_hit, + pg_catalog.pg_stat_get_db_tuples_returned(D.oid) AS tup_returned, + pg_catalog.pg_stat_get_db_tuples_fetched(D.oid) AS tup_fetched, + pg_catalog.pg_stat_get_db_tuples_inserted(D.oid) AS tup_inserted, + pg_catalog.pg_stat_get_db_tuples_updated(D.oid) AS tup_updated, + pg_catalog.pg_stat_get_db_tuples_deleted(D.oid) AS tup_deleted, + pg_catalog.pg_stat_get_db_conflict_all(D.oid) AS conflicts, + pg_catalog.pg_stat_get_db_temp_files(D.oid) AS temp_files, + pg_catalog.pg_stat_get_db_temp_bytes(D.oid) AS temp_bytes, + pg_catalog.pg_stat_get_db_deadlocks(D.oid) AS deadlocks, + pg_catalog.pg_stat_get_db_blk_read_time(D.oid) AS blk_read_time, + pg_catalog.pg_stat_get_db_blk_write_time(D.oid) AS blk_write_time, + pg_catalog.pg_stat_get_db_stat_reset_time(D.oid) AS stats_reset FROM pg_database D; CREATE OR REPLACE FUNCTION dbe_perf.get_global_stat_database @@ -2820,11 +2824,11 @@ CREATE VIEW dbe_perf.summary_stat_database AS FROM dbe_perf.stat_database AS SUMMARY_ITEM, (SELECT datname, - SUM(numbackends) numbackends, SUM(xact_commit) xact_commit, SUM(xact_rollback) xact_rollback, - SUM(blks_read) blks_read, SUM(blks_hit) blks_hit, SUM(tup_returned) tup_returned, - SUM(tup_fetched) tup_fetched, SUM(temp_files) temp_files, - SUM(temp_bytes) temp_bytes, SUM(blk_read_time) blk_read_time, - SUM(blk_write_time) blk_write_time, MAX(stats_reset) stats_reset + pg_catalog.SUM(numbackends) numbackends, pg_catalog.SUM(xact_commit) xact_commit, pg_catalog.SUM(xact_rollback) xact_rollback, + pg_catalog.SUM(blks_read) blks_read, pg_catalog.SUM(blks_hit) blks_hit, pg_catalog.SUM(tup_returned) tup_returned, + pg_catalog.SUM(tup_fetched) tup_fetched, pg_catalog.SUM(temp_files) temp_files, + pg_catalog.SUM(temp_bytes) temp_bytes, pg_catalog.SUM(blk_read_time) blk_read_time, + pg_catalog.SUM(blk_write_time) blk_write_time, pg_catalog.MAX(stats_reset) stats_reset FROM dbe_perf.get_global_stat_database() GROUP BY (datname)) AS ALL_NODES WHERE ALL_NODES.datname = SUMMARY_ITEM.datname; @@ -2832,11 +2836,11 @@ CREATE VIEW dbe_perf.stat_database_conflicts AS SELECT D.oid AS datid, D.datname AS datname, - pg_stat_get_db_conflict_tablespace(D.oid) AS confl_tablespace, - pg_stat_get_db_conflict_lock(D.oid) AS confl_lock, - pg_stat_get_db_conflict_snapshot(D.oid) AS confl_snapshot, - pg_stat_get_db_conflict_bufferpin(D.oid) AS confl_bufferpin, - pg_stat_get_db_conflict_startup_deadlock(D.oid) AS confl_deadlock + pg_catalog.pg_stat_get_db_conflict_tablespace(D.oid) AS confl_tablespace, + pg_catalog.pg_stat_get_db_conflict_lock(D.oid) AS confl_lock, + pg_catalog.pg_stat_get_db_conflict_snapshot(D.oid) AS confl_snapshot, + pg_catalog.pg_stat_get_db_conflict_bufferpin(D.oid) AS confl_bufferpin, + pg_catalog.pg_stat_get_db_conflict_startup_deadlock(D.oid) AS confl_deadlock FROM pg_database D; CREATE OR REPLACE FUNCTION dbe_perf.get_global_stat_database_conflicts @@ -2876,11 +2880,11 @@ CREATE VIEW dbe_perf.global_stat_database_conflicts AS CREATE VIEW dbe_perf.summary_stat_database_conflicts AS SELECT D.datname AS datname, - pg_stat_get_db_conflict_tablespace(D.oid) AS confl_tablespace, - pg_stat_get_db_conflict_lock(D.oid) AS confl_lock, - pg_stat_get_db_conflict_snapshot(D.oid) AS confl_snapshot, - pg_stat_get_db_conflict_bufferpin(D.oid) AS confl_bufferpin, - pg_stat_get_db_conflict_startup_deadlock(D.oid) AS confl_deadlock + pg_catalog.pg_stat_get_db_conflict_tablespace(D.oid) AS confl_tablespace, + pg_catalog.pg_stat_get_db_conflict_lock(D.oid) AS confl_lock, + pg_catalog.pg_stat_get_db_conflict_snapshot(D.oid) AS confl_snapshot, + pg_catalog.pg_stat_get_db_conflict_bufferpin(D.oid) AS confl_bufferpin, + pg_catalog.pg_stat_get_db_conflict_startup_deadlock(D.oid) AS confl_deadlock FROM pg_database D; CREATE VIEW dbe_perf.stat_xact_all_tables AS @@ -2888,15 +2892,15 @@ CREATE VIEW dbe_perf.stat_xact_all_tables AS C.oid AS relid, N.nspname AS schemaname, C.relname AS relname, - pg_stat_get_xact_numscans(C.oid) AS seq_scan, - pg_stat_get_xact_tuples_returned(C.oid) AS seq_tup_read, - sum(pg_stat_get_xact_numscans(I.indexrelid))::bigint AS idx_scan, - sum(pg_stat_get_xact_tuples_fetched(I.indexrelid))::bigint + - pg_stat_get_xact_tuples_fetched(C.oid) AS idx_tup_fetch, - pg_stat_get_xact_tuples_inserted(C.oid) AS n_tup_ins, - pg_stat_get_xact_tuples_updated(C.oid) AS n_tup_upd, - pg_stat_get_xact_tuples_deleted(C.oid) AS n_tup_del, - pg_stat_get_xact_tuples_hot_updated(C.oid) AS n_tup_hot_upd + pg_catalog.pg_stat_get_xact_numscans(C.oid) AS seq_scan, + pg_catalog.pg_stat_get_xact_tuples_returned(C.oid) AS seq_tup_read, + pg_catalog.sum(pg_catalog.pg_stat_get_xact_numscans(I.indexrelid))::bigint AS idx_scan, + pg_catalog.sum(pg_catalog.pg_stat_get_xact_tuples_fetched(I.indexrelid))::bigint + + pg_catalog.pg_stat_get_xact_tuples_fetched(C.oid) AS idx_tup_fetch, + pg_catalog.pg_stat_get_xact_tuples_inserted(C.oid) AS n_tup_ins, + pg_catalog.pg_stat_get_xact_tuples_updated(C.oid) AS n_tup_upd, + pg_catalog.pg_stat_get_xact_tuples_deleted(C.oid) AS n_tup_del, + pg_catalog.pg_stat_get_xact_tuples_hot_updated(C.oid) AS n_tup_hot_upd FROM pg_class C LEFT JOIN pg_index I ON C.oid = I.indrelid LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) @@ -3021,9 +3025,9 @@ LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW dbe_perf.summary_stat_xact_all_tables AS SELECT Ti.schemaname, COALESCE(Ti.relname, Tn.toastname, NULL) AS relname, - SUM(Ti.seq_scan) seq_scan, SUM(Ti.seq_tup_read) seq_tup_read, SUM(Ti.idx_scan) idx_scan, - SUM(Ti.idx_tup_fetch) idx_tup_fetch, SUM(Ti.n_tup_ins) n_tup_ins, SUM(Ti.n_tup_upd) n_tup_upd, - SUM(Ti.n_tup_del) n_tup_del, SUM(Ti.n_tup_hot_upd) n_tup_hot_upd + pg_catalog.SUM(Ti.seq_scan) seq_scan, pg_catalog.SUM(Ti.seq_tup_read) seq_tup_read, pg_catalog.SUM(Ti.idx_scan) idx_scan, + pg_catalog.SUM(Ti.idx_tup_fetch) idx_tup_fetch, pg_catalog.SUM(Ti.n_tup_ins) n_tup_ins, pg_catalog.SUM(Ti.n_tup_upd) n_tup_upd, + pg_catalog.SUM(Ti.n_tup_del) n_tup_del, pg_catalog.SUM(Ti.n_tup_hot_upd) n_tup_hot_upd FROM dbe_perf.get_summary_stat_xact_all_tables() as Ti LEFT JOIN dbe_perf.get_local_toast_relation() AS Tn ON (Tn.shemaname = Ti.toastrelschemaname AND Tn.relname = Ti.toastrelname) GROUP BY (1, 2); @@ -3132,9 +3136,9 @@ LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW dbe_perf.summary_stat_xact_sys_tables AS SELECT Ti.schemaname, COALESCE(Ti.relname, Tn.toastname) AS relname, - SUM(Ti.seq_scan) seq_scan, SUM(Ti.seq_tup_read) seq_tup_read, SUM(Ti.idx_scan) idx_scan, - SUM(Ti.idx_tup_fetch) idx_tup_fetch, SUM(Ti.n_tup_ins) n_tup_ins, SUM(Ti.n_tup_upd) n_tup_upd, - SUM(Ti.n_tup_del) n_tup_del, SUM(Ti.n_tup_hot_upd) n_tup_hot_upd + pg_catalog.SUM(Ti.seq_scan) seq_scan, pg_catalog.SUM(Ti.seq_tup_read) seq_tup_read, pg_catalog.SUM(Ti.idx_scan) idx_scan, + pg_catalog.SUM(Ti.idx_tup_fetch) idx_tup_fetch, pg_catalog.SUM(Ti.n_tup_ins) n_tup_ins, pg_catalog.SUM(Ti.n_tup_upd) n_tup_upd, + pg_catalog.SUM(Ti.n_tup_del) n_tup_del, pg_catalog.SUM(Ti.n_tup_hot_upd) n_tup_hot_upd FROM dbe_perf.get_summary_stat_xact_sys_tables() as Ti LEFT JOIN dbe_perf.get_local_toast_relation() AS Tn ON (Tn.shemaname = Ti.toastrelschemaname AND Tn.relname = Ti.toastrelname) GROUP BY (1, 2); @@ -3243,9 +3247,9 @@ LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW dbe_perf.summary_stat_xact_user_tables AS SELECT Ti.schemaname, COALESCE(Ti.relname, Tn.toastname) AS relname, - SUM(Ti.seq_scan) seq_scan, SUM(Ti.seq_tup_read) seq_tup_read, SUM(Ti.idx_scan) idx_scan, - SUM(Ti.idx_tup_fetch) idx_tup_fetch, SUM(Ti.n_tup_ins) n_tup_ins, SUM(Ti.n_tup_upd) n_tup_upd, - SUM(Ti.n_tup_del) n_tup_del, SUM(Ti.n_tup_hot_upd) n_tup_hot_upd + pg_catalog.SUM(Ti.seq_scan) seq_scan, pg_catalog.SUM(Ti.seq_tup_read) seq_tup_read, pg_catalog.SUM(Ti.idx_scan) idx_scan, + pg_catalog.SUM(Ti.idx_tup_fetch) idx_tup_fetch, pg_catalog.SUM(Ti.n_tup_ins) n_tup_ins, pg_catalog.SUM(Ti.n_tup_upd) n_tup_upd, + pg_catalog.SUM(Ti.n_tup_del) n_tup_del, pg_catalog.SUM(Ti.n_tup_hot_upd) n_tup_hot_upd FROM dbe_perf.get_summary_stat_xact_user_tables() AS Ti LEFT JOIN dbe_perf.get_local_toast_relation() AS Tn ON (Tn.shemaname = Ti.toastrelschemaname AND Tn.relname = Ti.toastrelname) GROUP BY (1, 2); @@ -3255,9 +3259,9 @@ CREATE VIEW dbe_perf.stat_user_functions AS P.oid AS funcid, N.nspname AS schemaname, P.proname AS funcname, - pg_stat_get_function_calls(P.oid) AS calls, - pg_stat_get_function_total_time(P.oid) AS total_time, - pg_stat_get_function_self_time(P.oid) AS self_time + pg_catalog.pg_stat_get_function_calls(P.oid) AS calls, + pg_catalog.pg_stat_get_function_total_time(P.oid) AS total_time, + pg_catalog.pg_stat_get_function_self_time(P.oid) AS self_time FROM pg_proc P LEFT JOIN pg_namespace N ON (N.oid = P.pronamespace) WHERE P.prolang != 12 -- fast check to eliminate built-in functions AND pg_stat_get_function_calls(P.oid) IS NOT NULL; @@ -3297,7 +3301,7 @@ CREATE VIEW dbe_perf.global_stat_user_functions AS CREATE VIEW dbe_perf.summary_stat_user_functions AS SELECT schemaname, funcname, - SUM(calls) calls, SUM(total_time) total_time, SUM(self_time) self_time + pg_catalog.SUM(calls) calls, pg_catalog.SUM(total_time) total_time, pg_catalog.SUM(self_time) self_time FROM dbe_perf.get_global_stat_user_functions() GROUP BY (schemaname, funcname); @@ -3306,12 +3310,12 @@ CREATE VIEW dbe_perf.stat_xact_user_functions AS P.oid AS funcid, N.nspname AS schemaname, P.proname AS funcname, - pg_stat_get_xact_function_calls(P.oid) AS calls, - pg_stat_get_xact_function_total_time(P.oid) AS total_time, - pg_stat_get_xact_function_self_time(P.oid) AS self_time + pg_catalog.pg_stat_get_xact_function_calls(P.oid) AS calls, + pg_catalog.pg_stat_get_xact_function_total_time(P.oid) AS total_time, + pg_catalog.pg_stat_get_xact_function_self_time(P.oid) AS self_time FROM pg_proc P LEFT JOIN pg_namespace N ON (N.oid = P.pronamespace) WHERE P.prolang != 12 -- fast check to eliminate built-in functions - AND pg_stat_get_xact_function_calls(P.oid) IS NOT NULL; + AND pg_catalog.pg_stat_get_xact_function_calls(P.oid) IS NOT NULL; CREATE OR REPLACE FUNCTION dbe_perf.get_global_stat_xact_user_functions (OUT node_name name, OUT funcid oid, OUT schemaname name, OUT funcname name, OUT calls bigint, @@ -3348,7 +3352,7 @@ CREATE VIEW dbe_perf.global_stat_xact_user_functions AS CREATE VIEW dbe_perf.summary_stat_xact_user_functions AS SELECT schemaname, funcname, - SUM(calls) calls, SUM(total_time) total_time, SUM(self_time) self_time + pg_catalog.SUM(calls) calls, pg_catalog.SUM(total_time) total_time, pg_catalog.SUM(self_time) self_time FROM dbe_perf.get_global_stat_xact_user_functions() GROUP BY (schemaname, funcname); @@ -3390,8 +3394,8 @@ CREATE VIEW dbe_perf.global_stat_bad_block AS CREATE VIEW dbe_perf.summary_stat_bad_block AS SELECT databaseid, tablespaceid, relfilenode, - SUM(forknum) forknum, SUM(error_count) error_count, - MIN(first_time) first_time, MAX(last_time) last_time + pg_catalog.SUM(forknum) forknum, pg_catalog.SUM(error_count) error_count, + pg_catalog.MIN(first_time) first_time, pg_catalog.MAX(last_time) last_time FROM dbe_perf.get_global_stat_bad_block() GROUP BY (databaseid, tablespaceid, relfilenode); @@ -3435,13 +3439,13 @@ CREATE VIEW dbe_perf.global_file_redo_iostat AS CREATE VIEW dbe_perf.summary_file_redo_iostat AS SELECT - sum(phywrts) AS phywrts, - sum(phyblkwrt) AS phyblkwrt, - sum(writetim) AS writetim, - ((sum(writetim) / greatest(sum(phywrts), 1))::bigint) AS avgiotim, - max(lstiotim) AS lstiotim, - min(miniotim) AS miniotim, - max(maxiowtm) AS maxiowtm + pg_catalog.sum(phywrts) AS phywrts, + pg_catalog.sum(phyblkwrt) AS phyblkwrt, + pg_catalog.sum(writetim) AS writetim, + ((pg_catalog.sum(writetim) / greatest(pg_catalog.sum(phywrts), 1))::bigint) AS avgiotim, + pg_catalog.max(lstiotim) AS lstiotim, + pg_catalog.min(miniotim) AS miniotim, + pg_catalog.max(maxiowtm) AS maxiowtm FROM dbe_perf.get_global_file_redo_iostat(); CREATE VIEW dbe_perf.local_rel_iostat AS @@ -3480,8 +3484,8 @@ CREATE VIEW dbe_perf.global_rel_iostat AS CREATE VIEW dbe_perf.summary_rel_iostat AS SELECT - sum(phyrds) AS phyrds, sum(phywrts) AS phywrts, sum(phyblkrd) AS phyblkrd, - sum(phyblkwrt) AS phyblkwrt + pg_catalog.sum(phyrds) AS phyrds, pg_catalog.sum(phywrts) AS phywrts, pg_catalog.sum(phyblkrd) AS phyblkrd, + pg_catalog.sum(phyblkwrt) AS phyblkwrt FROM dbe_perf.get_global_rel_iostat(); @@ -3532,10 +3536,10 @@ CREATE VIEW dbe_perf.global_file_iostat AS CREATE VIEW dbe_perf.summary_file_iostat AS SELECT filenum, dbid, spcid, - sum(phyrds) AS phyrds, sum(phywrts) AS phywrts, sum(phyblkrd) AS phyblkrd, - sum(phyblkwrt) AS phyblkwrt, sum(readtim) AS readtim, sum(writetim) AS writetim, - ((sum(readtim + writetim) / greatest(sum(phyrds + phywrts), 1))::bigint) AS avgiotim, - max(lstiotim) AS lstiotim, min(miniotim) AS miniotim, max(maxiowtm) AS maxiowtm + pg_catalog.sum(phyrds) AS phyrds, pg_catalog.sum(phywrts) AS phywrts, pg_catalog.sum(phyblkrd) AS phyblkrd, + pg_catalog.sum(phyblkwrt) AS phyblkwrt, pg_catalog.sum(readtim) AS readtim, pg_catalog.sum(writetim) AS writetim, + ((pg_catalog.sum(readtim + writetim) / greatest(pg_catalog.sum(phyrds + phywrts), 1))::bigint) AS avgiotim, + pg_catalog.max(lstiotim) AS lstiotim, pg_catalog.min(miniotim) AS miniotim, pg_catalog.max(maxiowtm) AS maxiowtm FROM dbe_perf.get_global_file_iostat() GROUP by (filenum, dbid, spcid); @@ -3740,7 +3744,7 @@ CREATE VIEW dbe_perf.replication_stat AS W.receiver_replay_location, W.sync_priority, W.sync_state - FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + FROM pg_catalog.pg_stat_get_activity(NULL) AS S, pg_authid U, pg_stat_get_wal_senders() AS W WHERE S.usesysid = U.oid AND S.pid = W.pid; @@ -3988,7 +3992,8 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp OUT lwlock_time bigint, OUT lwlock_wait_time bigint, OUT details bytea, - OUT is_slow_sql bool) + OUT is_slow_sql bool, + OUT trace_id text) RETURNS setof record AS $$ DECLARE @@ -4055,6 +4060,7 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp lwlock_wait_time := row_data.lwlock_wait_time; details := row_data.details; is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; return next; END LOOP; END LOOP; @@ -4116,7 +4122,8 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp OUT lwlock_time bigint, OUT lwlock_wait_time bigint, OUT details bytea, - OUT is_slow_sql bool) + OUT is_slow_sql bool, + OUT trace_id text) RETURNS setof record AS $$ DECLARE @@ -4183,6 +4190,7 @@ CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp lwlock_wait_time := row_data.lwlock_wait_time; details := row_data.details; is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; return next; END LOOP; END LOOP; @@ -4250,22 +4258,22 @@ CREATE VIEW dbe_perf.global_statement_count AS CREATE VIEW dbe_perf.summary_statement_count AS SELECT user_name, - SUM(select_count) AS select_count, SUM(update_count) AS update_count, - SUM(insert_count) AS insert_count, SUM(delete_count) AS delete_count, - SUM(mergeinto_count) AS mergeinto_count, SUM(ddl_count) AS ddl_count, - SUM(dml_count) AS dml_count, SUM(dcl_count) AS dcl_count, - SUM(total_select_elapse) AS total_select_elapse, - ((SUM(total_select_elapse) / greatest(SUM(select_count), 1))::bigint) AS avg_select_elapse, - MAX(max_select_elapse) AS max_select_elapse, MIN(min_select_elapse) AS min_select_elapse, - SUM(total_update_elapse) AS total_update_elapse, - ((SUM(total_update_elapse) / greatest(SUM(update_count), 1))::bigint) AS avg_update_elapse, - MAX(max_update_elapse) AS max_update_elapse, MIN(min_update_elapse) AS min_update_elapse, - SUM(total_insert_elapse) AS total_insert_elapse, - ((SUM(total_insert_elapse) / greatest(SUM(insert_count), 1))::bigint) AS avg_insert_elapse, - MAX(max_insert_elapse) AS max_insert_elapse, MIN(min_insert_elapse) AS min_insert_elapse, - SUM(total_delete_elapse) AS total_delete_elapse, - ((SUM(total_delete_elapse) / greatest(SUM(delete_count), 1))::bigint) AS avg_delete_elapse, - MAX(max_delete_elapse) AS max_delete_elapse, MIN(min_delete_elapse) AS min_delete_elapse + pg_catalog.SUM(select_count) AS select_count, pg_catalog.SUM(update_count) AS update_count, + pg_catalog.SUM(insert_count) AS insert_count, pg_catalog.SUM(delete_count) AS delete_count, + pg_catalog.SUM(mergeinto_count) AS mergeinto_count, pg_catalog.SUM(ddl_count) AS ddl_count, + pg_catalog.SUM(dml_count) AS dml_count, pg_catalog.SUM(dcl_count) AS dcl_count, + pg_catalog.SUM(total_select_elapse) AS total_select_elapse, + ((pg_catalog.SUM(total_select_elapse) / greatest(pg_catalog.SUM(select_count), 1))::bigint) AS avg_select_elapse, + pg_catalog.MAX(max_select_elapse) AS max_select_elapse, pg_catalog.MIN(min_select_elapse) AS min_select_elapse, + pg_catalog.SUM(total_update_elapse) AS total_update_elapse, + ((pg_catalog.SUM(total_update_elapse) / greatest(pg_catalog.SUM(update_count), 1))::bigint) AS avg_update_elapse, + pg_catalog.MAX(max_update_elapse) AS max_update_elapse, pg_catalog.MIN(min_update_elapse) AS min_update_elapse, + pg_catalog.SUM(total_insert_elapse) AS total_insert_elapse, + ((pg_catalog.SUM(total_insert_elapse) / greatest(pg_catalog.SUM(insert_count), 1))::bigint) AS avg_insert_elapse, + pg_catalog.MAX(max_insert_elapse) AS max_insert_elapse, pg_catalog.MIN(min_insert_elapse) AS min_insert_elapse, + pg_catalog.SUM(total_delete_elapse) AS total_delete_elapse, + ((pg_catalog.SUM(total_delete_elapse) / greatest(pg_catalog.SUM(delete_count), 1))::bigint) AS avg_delete_elapse, + pg_catalog.MAX(max_delete_elapse) AS max_delete_elapse, pg_catalog.MIN(min_delete_elapse) AS min_delete_elapse FROM dbe_perf.get_global_statement_count() GROUP by (user_name); /* configuration */ @@ -4331,7 +4339,7 @@ CREATE VIEW dbe_perf.global_config_settings AS /* waits*/ CREATE VIEW dbe_perf.wait_events AS - SELECT * FROM get_instr_wait_event(NULL); + SELECT * FROM pg_catalog.get_instr_wait_event(NULL); CREATE OR REPLACE FUNCTION dbe_perf.get_global_wait_events() RETURNS setof dbe_perf.wait_events @@ -4368,7 +4376,7 @@ DECLARE BEGIN QUERY_STR_NODES := 'select * from dbe_perf.node_name'; FOR ROW_NAME IN EXECUTE(QUERY_STR_NODES) LOOP - QUERY_STR := 'SELECT * FROM get_instr_rt_percentile(0)'; + QUERY_STR := 'SELECT * FROM pg_catalog.get_instr_rt_percentile(0)'; FOR ROW_DATA IN EXECUTE(QUERY_STR) LOOP p80 = ROW_DATA."P80"; p95 = ROW_DATA."P95"; @@ -4451,7 +4459,7 @@ CREATE VIEW dbe_perf.global_ckpt_status AS CREATE OR REPLACE VIEW dbe_perf.global_double_write_status AS SELECT node_name, curr_dwn, curr_start_page, file_trunc_num, file_reset_num, total_writes, low_threshold_writes, high_threshold_writes, - total_pages, low_threshold_pages, high_threshold_pages + total_pages, low_threshold_pages, high_threshold_pages, file_id FROM pg_catalog.local_double_write_stat(); CREATE OR REPLACE VIEW DBE_PERF.global_single_flush_dw_status AS @@ -4479,7 +4487,7 @@ SELECT node_name, rto_info FROM pg_catalog.local_rto_stat(); CREATE OR REPLACE VIEW dbe_perf.global_streaming_hadr_rto_and_rpo_stat AS -SELECT hadr_sender_node_name, hadr_receiver_node_name, current_rto, target_rto, current_rpo, target_rpo, current_sleep_time +SELECT hadr_sender_node_name, hadr_receiver_node_name, current_rto, target_rto, current_rpo, target_rpo, rto_sleep_time, rpo_sleep_time FROM pg_catalog.gs_hadr_local_rto_and_rpo_stat(); CREATE OR REPLACE VIEW dbe_perf.global_recovery_status AS @@ -4574,7 +4582,7 @@ SELECT S.pl_compilation_time, S.net_send_time, S.data_io_time -FROM pg_stat_get_wlm_session_info(0) S where S.is_slow_query = 1; +FROM pg_catalog.pg_stat_get_wlm_session_info(0) S where S.is_slow_query = 1; CREATE OR REPLACE FUNCTION dbe_perf.global_slow_query_history RETURNS setof dbe_perf.gs_slow_query_history @@ -4667,17 +4675,17 @@ CREATE OR REPLACE VIEW DBE_PERF.local_active_session AS tt(sampleid, sample_time, need_flush_sample, databaseid, thread_id, sessionid, start_time, event, lwtid, psessionid, tlevel, smpid, userid, application_name, client_addr, client_hostname, client_port, query_id, unique_query_id, user_id, cn_id, unique_query, locktag, lockmode, block_sessionid, wait_status, global_sessionid, final_block_sessionid, level, head) - AS(SELECT las.*, las.block_sessionid AS final_block_sessionid, 1 AS level, array_append('{}', las.sessionid) AS head FROM las + AS(SELECT las.*, las.block_sessionid AS final_block_sessionid, 1 AS level, pg_catalog.array_append('{}', las.sessionid) AS head FROM las UNION ALL SELECT tt.sampleid, tt.sample_time, tt.need_flush_sample, tt.databaseid, tt.thread_id, tt.sessionid, tt.start_time, tt.event, tt.lwtid, tt.psessionid, tt.tlevel, tt.smpid, tt.userid, tt.application_name, tt.client_addr, tt.client_hostname, tt.client_port, tt.query_id, tt.unique_query_id, - tt.user_id, tt.cn_id, tt.unique_query, tt.locktag, tt.lockmode, tt.block_sessionid, tt.wait_status, tt.global_sessionid, las.block_sessionid AS final_block_sessionid, tt.level + 1 AS level, array_append(tt.head, las.sessionid) AS head + tt.user_id, tt.cn_id, tt.unique_query, tt.locktag, tt.lockmode, tt.block_sessionid, tt.wait_status, tt.global_sessionid, las.block_sessionid AS final_block_sessionid, tt.level + 1 AS level, pg_catalog.array_append(tt.head, las.sessionid) AS head FROM tt INNER JOIN las ON tt.final_block_sessionid = las.sessionid WHERE las.sampleid = tt.sampleid AND (las.block_sessionid IS NOT NULL OR las.block_sessionid != 0) AND las.sessionid != all(head) AND las.sessionid != las.block_sessionid) SELECT sampleid, sample_time, need_flush_sample, databaseid, thread_id, sessionid, start_time, event, lwtid, psessionid, tlevel, smpid, userid, application_name, client_addr, client_hostname, client_port, query_id, unique_query_id, user_id, cn_id, unique_query, locktag, lockmode, block_sessionid, final_block_sessionid, wait_status, global_sessionid FROM tt - WHERE level = (SELECT MAX(level) FROM tt t1 WHERE t1.sampleid = tt.sampleid AND t1.sessionid = tt.sessionid); + WHERE level = (SELECT pg_catalog.MAX(level) FROM tt t1 WHERE t1.sampleid = tt.sampleid AND t1.sessionid = tt.sessionid); grant select on all tables in schema dbe_perf to public; diff --git a/src/common/backend/catalog/pg_builtin_proc.cpp b/src/common/backend/catalog/pg_builtin_proc.cpp index 8bbce7618..c7b525d28 100755 --- a/src/common/backend/catalog/pg_builtin_proc.cpp +++ b/src/common/backend/catalog/pg_builtin_proc.cpp @@ -28,6 +28,7 @@ #include "catalog/pg_namespace.h" #include "catalog/pg_proc.h" #include "db4ai/predict_by.h" +#include "db4ai/explain_model.h" #include "access/transam.h" #include "storage/smgr/segment.h" #include "utils/fmgroids.h" diff --git a/src/common/backend/catalog/pg_conversion.cpp b/src/common/backend/catalog/pg_conversion.cpp index 568cadaf1..dba951ed2 100644 --- a/src/common/backend/catalog/pg_conversion.cpp +++ b/src/common/backend/catalog/pg_conversion.cpp @@ -182,7 +182,7 @@ Oid FindDefaultConversion(Oid name_space, int32 for_encoding, int32 to_encoding) CONDEFAULT, ObjectIdGetDatum(name_space), Int32GetDatum(for_encoding), Int32GetDatum(to_encoding)); for (i = 0; i < catlist->n_members; i++) { - tuple = &catlist->members[i]->tuple; + tuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); body = (Form_pg_conversion)GETSTRUCT(tuple); if (body->condefault) { proc = body->conproc; diff --git a/src/common/backend/catalog/pg_enum.cpp b/src/common/backend/catalog/pg_enum.cpp index 525bdb449..73188a7cd 100644 --- a/src/common/backend/catalog/pg_enum.cpp +++ b/src/common/backend/catalog/pg_enum.cpp @@ -220,7 +220,7 @@ restart: existing = (HeapTuple*)palloc(nelems * sizeof(HeapTuple)); for (i = 0; i < nelems; i++) { /* Sort the existing enum lable */ - existing[i] = &(list->members[i]->tuple); + existing[i] = t_thrd.lsc_cxt.FetchTupleFromCatCList(list, i); } qsort(existing, nelems, sizeof(HeapTuple), sort_order_cmp); } @@ -294,7 +294,7 @@ restart: RenumberEnumType(pg_enum, existing, nelems); /* Clean up and start over */ pfree_ext(existing); - ReleaseCatCacheList(list); + ReleaseSysCacheList(list); goto restart; } @@ -396,7 +396,7 @@ restart: /* Done with info about existing members */ pfree_ext(existing); - ReleaseCatCacheList(list); + ReleaseSysCacheList(list); /* Create the new pg_enum entry */ errno_t rc = memset_s(nulls, sizeof(nulls), false, sizeof(nulls)); @@ -454,7 +454,7 @@ void RenameEnumLabel(Oid enumTypeOid, const char* oldVal, const char* newVal) * prefer a friendlier error message.) */ for (i = 0; i < nelems; i++) { - enum_tup = &(list->members[i]->tuple); + enum_tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(list, i); en = (Form_pg_enum)GETSTRUCT(enum_tup); if (strcmp(NameStr(en->enumlabel), oldVal) == 0) @@ -464,7 +464,7 @@ void RenameEnumLabel(Oid enumTypeOid, const char* oldVal, const char* newVal) } if (!old_tup) { - ReleaseCatCacheList(list); + ReleaseSysCacheList(list); heap_close(pg_enum, RowExclusiveLock); ereport( @@ -472,7 +472,7 @@ void RenameEnumLabel(Oid enumTypeOid, const char* oldVal, const char* newVal) } if (found_new) { - ReleaseCatCacheList(list); + ReleaseSysCacheList(list); heap_close(pg_enum, RowExclusiveLock); ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("enum label \"%s\" already exists", newVal))); @@ -482,7 +482,7 @@ void RenameEnumLabel(Oid enumTypeOid, const char* oldVal, const char* newVal) enum_tup = heap_copytuple(old_tup); en = (Form_pg_enum)GETSTRUCT(enum_tup); - ReleaseCatCacheList(list); + ReleaseSysCacheList(list); /* Update the pg_enum entry */ (void)namestrcpy(&en->enumlabel, newVal); diff --git a/src/common/backend/catalog/pg_hashbucket.cpp b/src/common/backend/catalog/pg_hashbucket.cpp index cdb2b26f2..29b260432 100755 --- a/src/common/backend/catalog/pg_hashbucket.cpp +++ b/src/common/backend/catalog/pg_hashbucket.cpp @@ -497,7 +497,9 @@ Partition bucketGetPartition(Partition part, int2 bucketid) Partition bucket = NULL; MemoryContext oldcxt; errno_t rc; - + if (!IsBootstrapProcessingMode()) { + ResourceOwnerEnlargeFakepartRefs(t_thrd.utils_cxt.CurrentResourceOwner); + } oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); bucket = (Partition)palloc0(sizeof(PartitionData)); diff --git a/src/common/backend/catalog/pg_job.cpp b/src/common/backend/catalog/pg_job.cpp index b6b6207d6..dddee626c 100644 --- a/src/common/backend/catalog/pg_job.cpp +++ b/src/common/backend/catalog/pg_job.cpp @@ -439,6 +439,46 @@ void update_pg_job_dbname(Oid jobid, const char* dbname) heap_close(job_relation, RowExclusiveLock); } +void update_pg_job_username(Oid jobid, const char* username) +{ + Relation job_relation = NULL; + HeapTuple tup = NULL; + HeapTuple newtuple = NULL; + Datum values[Natts_pg_job]; + bool nulls[Natts_pg_job]; + bool replaces[Natts_pg_job]; + errno_t rc = 0; + + rc = memset_s(values, sizeof(values), 0, sizeof(values)); + securec_check(rc, "\0", "\0"); + + rc = memset_s(nulls, sizeof(nulls), false, sizeof(nulls)); + securec_check_c(rc, "\0", "\0"); + + rc = memset_s(replaces, sizeof(replaces), false, sizeof(replaces)); + securec_check_c(rc, "\0", "\0"); + + replaces[Anum_pg_job_log_user - 1] = true; + values[Anum_pg_job_log_user - 1] = CStringGetDatum(username); + + replaces[Anum_pg_job_nspname - 1] = true; + values[Anum_pg_job_nspname - 1] = CStringGetDatum(username); + + job_relation = heap_open(PgJobRelationId, RowExclusiveLock); + + tup = get_job_tup(jobid); + + newtuple = heap_modify_tuple(tup, RelationGetDescr(job_relation), values, nulls, replaces); + + simple_heap_update(job_relation, &newtuple->t_self, newtuple); + + CatalogUpdateIndexes(job_relation, newtuple); + ReleaseSysCache(tup); + heap_freetuple_ext(newtuple); + + heap_close(job_relation, RowExclusiveLock); +} + /* * Description: Update job info to pg_job according to job execute status. * diff --git a/src/common/backend/catalog/pg_job_proc.cpp b/src/common/backend/catalog/pg_job_proc.cpp index a3a6e04a8..d4a2546f0 100644 --- a/src/common/backend/catalog/pg_job_proc.cpp +++ b/src/common/backend/catalog/pg_job_proc.cpp @@ -57,6 +57,7 @@ #include "catalog/pg_authid.h" #include "catalog/pg_database.h" #include "catalog/gs_job_argument.h" +#include "catalog/gs_job_attribute.h" #include "fmgr.h" #include "utils/syscache.h" #include "utils/timestamp.h" @@ -135,6 +136,21 @@ Datum dbe_insert_pg_job_proc(Datum job_id, Datum job_action, const Datum job_nam return job_id; } +int4 get_job_id_from_pg_job(Datum job_name) +{ + Relation pg_job_rel = heap_open(PgJobRelationId, AccessShareLock); + HeapTuple tuple = search_from_pg_job(pg_job_rel, job_name); + int4 job_id = 0; + if (tuple != NULL) { + bool isnull = false; + job_id = heap_getattr(tuple, Anum_pg_job_job_id, pg_job_rel->rd_att, &isnull); + heap_freetuple_ext(tuple); + } + + heap_close(pg_job_rel, AccessShareLock); + return job_id; +} + /* * @brief search_from_pg_job_proc_no_exception * @@ -142,14 +158,23 @@ Datum dbe_insert_pg_job_proc(Datum job_id, Datum job_action, const Datum job_nam * @param job_name * @return HeapTuple */ -static HeapTuple search_from_pg_job_proc_no_exception(Relation rel, Datum job_name) +HeapTuple search_from_pg_job_proc_no_exception(Relation rel, Datum job_name) { - ScanKeyInfo scan_key_info; - scan_key_info.attribute_value = job_name; - scan_key_info.attribute_number = Anum_pg_job_proc_job_name; - scan_key_info.procedure = F_TEXTEQ; - List *tuples = search_by_sysscan_1(rel, &scan_key_info); + int4 job_id = get_job_id_from_pg_job(job_name); + ScanKeyInfo scan_key_info1; + scan_key_info1.attribute_value = job_name; + scan_key_info1.attribute_number = Anum_pg_job_proc_job_name; + scan_key_info1.procedure = F_TEXTEQ; + ScanKeyInfo scan_key_info2; + scan_key_info2.attribute_value = job_id; + scan_key_info2.attribute_number = Anum_pg_job_proc_job_id; + scan_key_info2.procedure = F_INT4EQ; + List *tuples = search_by_sysscan_2(rel, &scan_key_info1, &scan_key_info2); + if (tuples == NIL) { + return NULL; + } if (list_length(tuples) != 1) { + list_free_deep(tuples); return NULL; } HeapTuple tuple = (HeapTuple)linitial(tuples); @@ -187,6 +212,7 @@ void lookup_pg_job_proc(Datum name, Datum *job_id, Datum *job_action) bool isnull = false; Relation rel = heap_open(PgJobProcRelationId, AccessShareLock); HeapTuple tuple = search_from_pg_job_proc(rel, name); + /* integer job id */ if (job_id != NULL) { *job_id = heap_getattr(tuple, Anum_pg_job_proc_job_id, rel->rd_att, &isnull); @@ -204,6 +230,7 @@ void lookup_pg_job_proc(Datum name, Datum *job_id, Datum *job_action) *job_action = PointerGetDatum(PG_DETOAST_DATUM_COPY(job_action_src)); } } + heap_freetuple_ext(tuple); heap_close(rel, AccessShareLock); } diff --git a/src/common/backend/catalog/pg_object.cpp b/src/common/backend/catalog/pg_object.cpp index b933653eb..7f90f0bfd 100644 --- a/src/common/backend/catalog/pg_object.cpp +++ b/src/common/backend/catalog/pg_object.cpp @@ -36,6 +36,7 @@ #include "utils/rel.h" #include "miscadmin.h" #include "catalog/index.h" +#include "utils/knl_relcache.h" /* * @Description: Insert a new record to pg_object. @@ -94,13 +95,13 @@ void CreatePgObject(Oid objectOid, PgObjectType objectType, Oid creator, const P nulls[Anum_pg_object_mtime - 1] = true; } - if (objectOpt.hasCreatecsn && (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM)) { + if (objectOpt.hasCreatecsn && (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM)) { values[Anum_pg_object_createcsn - 1] = UInt64GetDatum(csn); } else { nulls[Anum_pg_object_createcsn - 1] = true; } - if (objectOpt.hasChangecsn && (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM)) { + if (objectOpt.hasChangecsn && (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM)) { values[Anum_pg_object_changecsn - 1] = UInt64GetDatum(csn); } else { nulls[Anum_pg_object_changecsn - 1] = true; @@ -159,7 +160,7 @@ void DeletePgObject(Oid objectOid, PgObjectType objectType) * @in relationChangecsn: the changecsn of table recorded in pg_object. * Returns: CommitSeqNo */ -static CommitSeqNo GetMaxChangecsn(List* indexIds, CommitSeqNo relationChangecsn) +static CommitSeqNo GetMaxChangecsn(Relation rel, List* indexIds, CommitSeqNo relationChangecsn) { HeapTuple tup = NULL; TupleDesc tupdesc = NULL; @@ -213,7 +214,7 @@ void GetObjectCSN(Oid objectOid, Relation userRel, PgObjectType objectType, Obje The pg_object system table does not hold objects generated during the database init process,so the object's createcsn and changecsn is zero during the process. */ - if (IsInitdb || !(t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM)) { + if (IsInitdb || !(t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM)) { return; } /* @@ -248,7 +249,7 @@ void GetObjectCSN(Oid objectOid, Relation userRel, PgObjectType objectType, Obje bool hasindex = userRel->rd_rel->relhasindex; if (hasindex) { List* indexIds = RelationGetIndexList(userRel); - csnInfo->changecsn = GetMaxChangecsn(indexIds, csnInfo->changecsn); + csnInfo->changecsn = GetMaxChangecsn(userRel, indexIds, csnInfo->changecsn); } } ReleaseSysCache(tup); @@ -281,6 +282,9 @@ bool CheckObjectExist(Oid objectOid, PgObjectType objectType) case OBJECT_TYPE_PROC: tup = SearchSysCache1(PROCOID, ObjectIdGetDatum(objectOid)); break; + case OBJECT_TYPE_PKGSPEC: + tup = SearchSysCache1(PACKAGEOID, ObjectIdGetDatum(objectOid)); + break; default: return false; } @@ -331,7 +335,7 @@ void UpdatePgObjectMtime(Oid objectOid, PgObjectType objectType) securec_check(rc, "\0", "\0"); replaces[Anum_pg_object_mtime - 1] = true; values[Anum_pg_object_mtime - 1] = nowtime; - if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM) { + if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM) { if (!u_sess->exec_cxt.isExecTrunc) { replaces[Anum_pg_object_changecsn - 1] = true; values[Anum_pg_object_changecsn - 1] = UInt64GetDatum(csn); @@ -342,7 +346,7 @@ void UpdatePgObjectMtime(Oid objectOid, PgObjectType objectType) CatalogUpdateIndexes(relation, newtuple); ReleaseSysCache(tup); heap_freetuple_ext(newtuple); - if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM) { + if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM) { if (!u_sess->exec_cxt.isExecTrunc && (objectType == OBJECT_TYPE_RELATION || objectType == OBJECT_TYPE_INDEX)) { Relation userRel = RelationIdGetRelation(objectOid); @@ -367,7 +371,7 @@ void UpdatePgObjectChangecsn(Oid objectOid, PgObjectType objectType) Relation relation = NULL; HeapTuple tup = NULL; CommitSeqNo csn = t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo; - if (!(t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM) || IsInitdb) { + if (!(t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM) || IsInitdb) { return; } if (!CheckObjectExist(objectOid, objectType)) { diff --git a/src/common/backend/catalog/pg_partition.cpp b/src/common/backend/catalog/pg_partition.cpp index ef5ddb688..dc9747fa2 100644 --- a/src/common/backend/catalog/pg_partition.cpp +++ b/src/common/backend/catalog/pg_partition.cpp @@ -203,8 +203,6 @@ void ExceptionHandlerForPartition(GetPartitionOidArgs *args, Oid partitionOid, b */ static Oid partitionGetPartitionOid(GetPartitionOidArgs* args, LOCKMODE lockMode, bool missingOk, bool noWait) { - /* remember SharedInvalidMessageCounter */ - uint64 invalCount = 0; Oid partitionOid = InvalidOid; Oid partitionOldOid = InvalidOid; bool retry = false; @@ -225,13 +223,18 @@ static Oid partitionGetPartitionOid(GetPartitionOidArgs* args, LOCKMODE lockMode * by calling AcceptInvalidationMessages() before beginning this loop, but * that would add a significant amount overhead, so for now we don't. */ + uint64 sess_inval_count; + uint64 thrd_inval_count = 0; for (;;) { /* * Remember this value, so that, after looking up the partition name * and locking its OID, we can check whether any invalidation messages * have been processed that might require a do-over. */ - invalCount = u_sess->inval_cxt.SharedInvalidMessageCounter; + sess_inval_count = u_sess->inval_cxt.SIMCounter; + if (EnableLocalSysCache()) { + thrd_inval_count = t_thrd.lsc_cxt.lsc->inval_cxt.SIMCounter; + } /* get partition oid */ if (args->givenPartitionName) { @@ -318,8 +321,15 @@ static Oid partitionGetPartitionOid(GetPartitionOidArgs* args, LOCKMODE lockMode /* * If no invalidation message were processed, we're done! */ - if (u_sess->inval_cxt.SharedInvalidMessageCounter == invalCount) { - break; + if (EnableLocalSysCache()) { + if (sess_inval_count == u_sess->inval_cxt.SIMCounter && + thrd_inval_count == t_thrd.lsc_cxt.lsc->inval_cxt.SIMCounter) { + break; + } + } else { + if (sess_inval_count == u_sess->inval_cxt.SIMCounter) { + break; + } } /* @@ -427,8 +437,8 @@ Oid partitionNameGetPartitionOid(Oid partitionedRelationOid, const char* partiti * @Return: partition oid * @See also: partitionNameGetPartitionOid() */ -Oid partitionValuesGetPartitionOid( - Relation rel, List* partKeyValueList, LOCKMODE lockMode, bool topClosed, bool missingOk, bool noWait) +Oid partitionValuesGetPartitionOid(Relation rel, List *partKeyValueList, LOCKMODE lockMode, bool topClosed, + bool missingOk, bool noWait) { GetPartitionOidArgs args; /* get partition oid from given values */ @@ -448,6 +458,41 @@ Oid partitionValuesGetPartitionOid( return partitionGetPartitionOid(&args, lockMode, missingOk, noWait); } +Oid subpartitionValuesGetSubpartitionOid(Relation rel, List *partKeyValueList, List *subpartKeyValueList, + LOCKMODE lockMode, bool topClosed, bool missingOk, bool noWait, Oid *partOidForSubPart) +{ + GetPartitionOidArgs args; + /* get partition oid from given values */ + args.givenPartitionName = false; + args.partitionedRel = rel; + args.partKeyValueList = partKeyValueList; + args.partitionedRelOid = rel->rd_id; + args.topClosed = topClosed; + /* the following arguments is not used. */ + args.partitionName = NULL; + args.objectType = PART_OBJ_TYPE_TABLE_PARTITION; + args.callback = NULL; + args.callbackArgs = NULL; + args.callbackObjLockMode = NoLock; + + *partOidForSubPart = partitionGetPartitionOid(&args, lockMode, missingOk, noWait); + if (!OidIsValid(*partOidForSubPart)) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("The partition which owns the subpartition is missing"), + errdetail("N/A"), + errcause("Maybe the subpartition table is dropped"), + erraction("Check system table 'pg_partition' for more information"))); + } + + Partition part = partitionOpen(rel, *partOidForSubPart, lockMode); + Relation partrel = partitionGetRelation(rel, part); + Oid subpartOid = partitionValuesGetPartitionOid(partrel, subpartKeyValueList, lockMode, true, true, false); + releaseDummyRelation(&partrel); + partitionClose(rel, part, NoLock); + + return subpartOid; +} + /* * @@GaussDB@@ * Target : data partition @@ -647,7 +692,7 @@ static Oid getPartitionIndexFormData(Oid indexid, Oid partitionid, Form_pg_parti errhint("In redistribution, local parititon index maybe not exists."))); return InvalidOid; } - tuple = &catlist->members[0]->tuple; + tuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, 0); partitionForm = (Form_pg_partition)GETSTRUCT(tuple); result = HeapTupleGetOid(tuple); if (!onlyOid) { @@ -698,6 +743,34 @@ Oid getPartitionIndexTblspcOid(Oid indexid, Oid partitionid) return tblspcOid; } +/* + * @Description: Given OIDs of index relation and heap partition, get relname of index partition. + * @IN indexid: given index relation's OID + * @IN partitionid: given partition's OID + * @Return: relname Oid of index partition. + * @See also: + */ +char* getPartitionIndexName(Oid indexid, Oid partitionid) +{ + FormData_pg_partition indexPartFormData; + Oid indexPartOid = InvalidOid; + char* name = NULL; + + errno_t rc = memset_s(&indexPartFormData, PARTITION_TUPLE_SIZE, 0, PARTITION_TUPLE_SIZE); + securec_check(rc, "\0", "\0"); + + indexPartOid = getPartitionIndexFormData(indexid, partitionid, &indexPartFormData); + if (OidIsValid(indexPartOid)) { + name = pstrdup(NameStr(indexPartFormData.relname)); + } else { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("Invalid Oid of local index %u on the partition %u.", indexid, partitionid))); + } + + return name; +} + /* * from index partition oid, get heap partition oid. */ @@ -949,6 +1022,125 @@ List* RelationGetSubPartitionList(Relation relation, LOCKMODE lockmode) return subPartList; } +/* give one partitioned relation, return a list, consisting of relname of all its partition and subpartition */ +List* RelationGetPartitionNameList(Relation relation) +{ + List* partnameList = NIL; + List *partTupleList = NIL; + if (!RelationIsPartitioned(relation)) { + return partnameList; + } + + if (RelationIsPartitionOfSubPartitionTable(relation)) { /* a partition of subpartition */ + partTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_SUB_PARTITION, relation->rd_id); + Assert(PointerIsValid(partTupleList)); + + ListCell *partCell = NULL; + Form_pg_partition partitionTuple = NULL; + foreach (partCell, partTupleList) { + partitionTuple = (Form_pg_partition)GETSTRUCT((HeapTuple)lfirst(partCell)); + partnameList = lappend(partnameList, pstrdup(partitionTuple->relname.data)); + } + list_free_deep(partTupleList); + } else { /* a relation */ + partTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_PARTITION, relation->rd_id); + Assert(PointerIsValid(partTupleList)); + + ListCell *partCell = NULL; + Form_pg_partition partitionTuple = NULL; + foreach (partCell, partTupleList) { + partitionTuple = (Form_pg_partition)GETSTRUCT((HeapTuple)lfirst(partCell)); + partnameList = lappend(partnameList, pstrdup(partitionTuple->relname.data)); + if (RelationIsSubPartitioned(relation)) { + List *subpartTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_SUB_PARTITION, + HeapTupleGetOid((HeapTuple)lfirst(partCell))); + + ListCell *subpartCell = NULL; + Form_pg_partition subpartitionTuple = NULL; + foreach (subpartCell, subpartTupleList) { + subpartitionTuple = (Form_pg_partition)GETSTRUCT((HeapTuple)lfirst(subpartCell)); + partnameList = lappend(partnameList, pstrdup(subpartitionTuple->relname.data)); + } + list_free_deep(subpartTupleList); + } + } + list_free_deep(partTupleList); + } + + return partnameList; +} + +void RelationGetSubpartitionInfo(Relation relation, char *subparttype, List **subpartKeyPosList, + int2vector **subpartitionKey) +{ + if (!RelationIsSubPartitioned(relation)) { + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("Un-support feature"), + errdetail("Can not get subpartition information for NON-SUBPARTITIONED table"), + errcause("Try get the subpartition information on a NON-SUBPARTITIONED object"), + erraction("Check system table 'pg_partition' for more information"))); + } + + Oid partid = InvalidOid; + ArrayType* pos_array = NULL; + Datum pos_raw = (Datum)0; + bool isNull = false; + + Relation pg_partition = heap_open(PartitionRelationId, AccessShareLock); + ScanKeyData key[2]; + SysScanDesc scan = NULL; + HeapTuple tuple = NULL; + ScanKeyInit(&key[0], Anum_pg_partition_parttype, BTEqualStrategyNumber, F_CHAREQ, + CharGetDatum(PART_OBJ_TYPE_TABLE_PARTITION)); + ScanKeyInit(&key[1], Anum_pg_partition_parentid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relation->rd_id)); + scan = systable_beginscan(pg_partition, PartitionParentOidIndexId, true, NULL, 2, key); + if (HeapTupleIsValid((tuple = systable_getnext(scan)))) { + partid = HeapTupleGetOid(tuple); + pos_raw = heap_getattr(tuple, Anum_pg_partition_partkey, RelationGetDescr(pg_partition), &isNull); + } + systable_endscan(scan); + + if (isNull) { + heap_close(pg_partition, AccessShareLock); + ereport(ERROR, + (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), + errmsg("Null partition key value for relation \"%s\"", RelationGetRelationName(relation)), + errdetail("N/A"), + errcause("Unexpected partition tuple in pg_partition for this relation"), + erraction("Check system table 'pg_partition' for more information"))); + } + pos_array = DatumGetArrayTypeP(pos_raw); + int ncolumn = ARR_DIMS(pos_array)[0]; + int16* attnums = (int16*)ARR_DATA_PTR(pos_array); + Assert(ncolumn > 0); + if (PointerIsValid(subpartitionKey)) { + *subpartitionKey = buildint2vector(attnums, ncolumn); + } + if (PointerIsValid(subpartKeyPosList)) { + for (int i = 0; i < ncolumn; i++) { + *subpartKeyPosList = lappend_int(*subpartKeyPosList, (int)(attnums[i]) - 1); + } + } + + if (PointerIsValid(subparttype)) { + ScanKeyData subkey[2]; + SysScanDesc subscan = NULL; + HeapTuple subtuple = NULL; + ScanKeyInit(&subkey[0], Anum_pg_partition_parttype, BTEqualStrategyNumber, F_CHAREQ, + CharGetDatum(PART_OBJ_TYPE_TABLE_SUB_PARTITION)); + ScanKeyInit(&subkey[1], Anum_pg_partition_parentid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(partid)); + subscan = systable_beginscan(pg_partition, PartitionParentOidIndexId, true, NULL, 2, subkey); + if (HeapTupleIsValid((subtuple = systable_getnext(subscan)))) { + *subparttype = ((Form_pg_partition)GETSTRUCT(subtuple))->partstrategy; + } else { + *subparttype = PART_STRATEGY_INVALID; + } + systable_endscan(subscan); + } + + heap_close(pg_partition, AccessShareLock); +} + // give one partitioned index relation, // return a list, consisting of oid of all its index partition List* indexGetPartitionOidList(Relation indexRelation) @@ -1006,6 +1198,18 @@ Relation SubPartitionGetRelation(Relation heap, Partition subPart, LOCKMODE lock return subPartRel; } +Partition SubPartitionOidGetPartition(Relation rel, Oid subPartOid, LOCKMODE lockmode) +{ + Oid parentOid = partid_get_parentid(subPartOid); + Partition part = partitionOpen(rel, parentOid, lockmode); + Relation partRel = partitionGetRelation(rel, part); + Partition subPart = partitionOpen(partRel, subPartOid, lockmode); + releaseDummyRelation(&partRel); + partitionClose(rel, part, NoLock); + + return subPart; +} + Relation SubPartitionOidGetParentRelation(Relation rel, Oid subPartOid, LOCKMODE lockmode) { Oid parentOid = partid_get_parentid(subPartOid); @@ -1200,3 +1404,24 @@ HeapTuple searchPgPartitionByParentIdCopy(char parttype, Oid parentId) return tuple; } + +/* + * @@GaussDB@@ + * Target : GetBaseRelOid + * Brief : + * Description : For partition, return parentId, For indexpartition, return parentId, + * For subpartition, return grandparentId, For indexsubpartition, return parentId, + * + * Notes : + */ +Oid GetBaseRelOidOfParition(Relation relation) +{ + Assert(RelationIsPartition(relation)); + + if (RelationIsSubPartitionOfSubPartitionTable(relation)) { + return relation->grandparentId; + } + + return relation->parentId; +} + diff --git a/src/common/backend/catalog/pg_proc.cpp b/src/common/backend/catalog/pg_proc.cpp index 76735029d..49efdcc5e 100644 --- a/src/common/backend/catalog/pg_proc.cpp +++ b/src/common/backend/catalog/pg_proc.cpp @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------- * * pg_proc.cpp - * routines to support manipulation of the pg_proc relation + * routines to support manipulation of the pg_proc relation * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * src/common/backend/catalog/pg_proc.cpp + * src/common/backend/catalog/pg_proc.cpp * * ------------------------------------------------------------------------- */ @@ -19,6 +19,7 @@ #include "access/transam.h" #include "access/xact.h" #include "catalog/dependency.h" +#include "catalog/gs_encrypted_proc.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/objectaccess.h" @@ -30,6 +31,7 @@ #include "catalog/gs_encrypted_proc.h" #include "catalog/pg_proc_fn.h" #include "catalog/pg_type.h" +#include "client_logic/client_logic_proc.h" #include "commands/defrem.h" #include "commands/user.h" #include "commands/trigger.h" @@ -40,6 +42,7 @@ #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "parser/parse_type.h" +#include "parser/parse_coerce.h" #include "tcop/pquery.h" #include "tcop/tcopprot.h" #include "utils/acl.h" @@ -117,29 +120,23 @@ static void checkFunctionConflicts(HeapTuple oldtup, const char* procedureName, bool isAgg, bool isWindowFunc); static bool user_define_func_check(Oid languageId, const char* probin, char** absolutePath, CFunType* function_type); static const char* get_file_name(const char* filePath, CFunType function_type); -static int get_decimal_from_hex(char hex); + +#ifndef ENABLE_MULTIPLE_NODES +static void CheckInParameterConflicts(CatCList* catlist, const char* procedureName, oidvector* inpara_type, + oidvector* proc_para_type, Oid languageId, bool isOraStyle, bool replace); +#endif static Acl* ProcAclDefault(Oid ownerId) { - AclMode world_default; AclMode owner_default; int nacl = 0; Acl* acl = NULL; AclItem* aip = NULL; - world_default = ACL_NO_RIGHTS; owner_default = ACL_ALL_RIGHTS_FUNCTION; - if (world_default != ACL_NO_RIGHTS) - nacl++; if (owner_default != ACL_NO_RIGHTS) nacl++; acl = allocacl(nacl); aip = ACL_DAT(acl); - if (world_default != ACL_NO_RIGHTS) { - aip->ai_grantee = ACL_ID_PUBLIC; - aip->ai_grantor = ownerId; - ACLITEM_SET_PRIVS_GOPTIONS(*aip, world_default, ACL_NO_RIGHTS); - aip++; - } if (owner_default != ACL_NO_RIGHTS) { aip->ai_grantee = ownerId; @@ -244,14 +241,12 @@ static char* get_temp_library(bool absolute_path) "%s/pg_plugin/%ld%lu", t_thrd.proc_cxt.pkglib_path, GetCurrentTransactionStartTimestamp(), - (GTM_MODE) ? (GetCurrentTransactionId()) : - (isExecCN ? GetCurrentTransactionId() : t_thrd.xact_cxt.cn_xid)); + (isExecCN ? GetCurrentTransactionId() : t_thrd.xact_cxt.cn_xid)); } else { appendStringInfo(&temp_file_strinfo, "$libdir/pg_plugin/%ld%lu", GetCurrentTransactionStartTimestamp(), - (GTM_MODE) ? (GetCurrentTransactionId()) : - (isExecCN ? GetCurrentTransactionId() : t_thrd.xact_cxt.cn_xid)); + (isExecCN ? GetCurrentTransactionId() : t_thrd.xact_cxt.cn_xid)); } return temp_file_strinfo.data; @@ -561,10 +556,12 @@ static char* getCFunProbin(const char* probin, Oid procNamespace, Oid proowner, * @in procNamespace - function's namespace oid * @in package - is a package function or not * @in packageid - is package oid + * @in isOraStyle: Is A db style. * @return - new function conflicts old functions or not */ -static bool checkPackageFunctionConflicts( - const char* procedureName, Datum allParameterTypes, oidvector* parameterTypes, Oid procNamespace, bool package, Oid propackageid) +static bool checkPackageFunctionConflicts(const char* procedureName, + Datum allParameterTypes, oidvector* parameterTypes, Datum parameterModes, + Oid procNamespace, bool package, Oid propackageid, Oid languageId, bool isOraStyle, bool replace) { int inpara_count; int allpara_count = 0; @@ -574,6 +571,9 @@ static bool checkPackageFunctionConflicts( oidvector* inpara_type = NULL; Oid* p_argtypes = NULL; HeapTuple proctup = NULL; +#ifndef ENABLE_MULTIPLE_NODES + bool enable_outparam_override = enable_out_param_override(); +#endif errno_t rc = EOK; if (allParameterTypes != PointerGetDatum(NULL)) { arr = DatumGetArrayTypeP(allParameterTypes); @@ -594,7 +594,7 @@ static bool checkPackageFunctionConflicts( /* search the function */ /* Search syscache by name only */ - CatCList *catlist = NULL; + CatCList *catlist = NULL; #ifndef ENABLE_MULTIPLE_NODES if (t_thrd.proc->workingVersionNum < 92470) { catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(procedureName)); @@ -604,8 +604,9 @@ static bool checkPackageFunctionConflicts( #else catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(procedureName)); #endif + for (int i = 0; i < catlist->n_members; i++) { - proctup = &catlist->members[i]->tuple; + proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Oid* argtypes = NULL; Datum proallargtypes; bool isNull = false; @@ -613,8 +614,9 @@ static bool checkPackageFunctionConflicts( Form_pg_proc pform = NULL; oidvector* proc_allpara_type = NULL; oidvector* proc_para_type = NULL; + Datum pro_arg_modes = 0; bool result1 = false; - bool result3 = false; + bool result2 = false; if (HeapTupleIsValid(proctup)) { pform = (Form_pg_proc)GETSTRUCT(proctup); /* compare function's namespace */ @@ -630,7 +632,7 @@ static bool checkPackageFunctionConflicts( ispackage = DatumGetBool(propackage); /* only check package function */ if (ispackage != package) { - ReleaseCatCacheList(catlist); + ReleaseSysCacheList(catlist); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Do not allow package function overload not package function."))); @@ -644,7 +646,7 @@ static bool checkPackageFunctionConflicts( arr = DatumGetArrayTypeP(proallargtypes); /* ensure not toasted */ allnumargs = ARR_DIMS(arr)[0]; if (ARR_NDIM(arr) != 1 || allnumargs < 0 || ARR_HASNULL(arr) || ARR_ELEMTYPE(arr) != OIDOID) { - ReleaseCatCacheList(catlist); + ReleaseSysCacheList(catlist); ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("proallargtypes is not a 1-D Oid array"))); } @@ -657,6 +659,12 @@ static bool checkPackageFunctionConflicts( pfree_ext(argtypes); } + proc_para_type = ProcedureGetArgTypes(proctup); +#ifndef ENABLE_MULTIPLE_NODES + CheckInParameterConflicts(catlist, procedureName, inpara_type, proc_para_type, languageId, isOraStyle, + replace); +#endif + /* No need to compare param type if param count is not same */ if (pform->pronargs != allpara_count && pform->pronargs != inpara_count && allnumargs != allpara_count && allnumargs != inpara_count) { @@ -666,23 +674,33 @@ static bool checkPackageFunctionConflicts( continue; } - proc_para_type = ProcedureGetArgTypes(proctup); - - /* old function in param type compare new function all param type */ - if (allpara_type != NULL) { + pro_arg_modes = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_proargmodes, &isNull); +#ifndef ENABLE_MULTIPLE_NODES + if (!enable_outparam_override) { result1 = DatumGetBool( - DirectFunctionCall2(oidvectoreq, PointerGetDatum(proc_para_type), PointerGetDatum(allpara_type))); + DirectFunctionCall2(oidvectoreq, PointerGetDatum(proc_para_type), PointerGetDatum(inpara_type))); + } +#endif + + if (proc_allpara_type != NULL && allpara_type != NULL) { + /* old function all param type compare new function all param type */ + result2 = DatumGetBool(DirectFunctionCall2( + oidvectoreq, PointerGetDatum(allpara_type), PointerGetDatum(proc_allpara_type))); } - if (proc_allpara_type != NULL) { - if (allpara_type != NULL) { - /* old function all param type compare new function all param type */ - result3 = DatumGetBool(DirectFunctionCall2( - oidvectoreq, PointerGetDatum(allpara_type), PointerGetDatum(proc_allpara_type))); + result = result1 || result2; +#ifndef ENABLE_MULTIPLE_NODES + if (result && IsPlpgsqlLanguageOid(languageId) && !OidIsValid(propackageid) && !isOraStyle) { + if (DatumGetPointer(pro_arg_modes) == NULL) { + result &= (DatumGetPointer(parameterModes) == NULL); + } else if (DatumGetPointer(parameterModes) == NULL) { + result = false; + } else { + result &= IsProArgModesEqual(parameterModes, pro_arg_modes); } } +#endif - result = result1 || result3; if (proc_allpara_type != NULL) { pfree_ext(proc_allpara_type); } @@ -698,7 +716,7 @@ static bool checkPackageFunctionConflicts( pfree_ext(allpara_type); } - ReleaseCatCacheList(catlist); + ReleaseSysCacheList(catlist); return result; } @@ -723,6 +741,7 @@ static void checkFunctionConflicts(HeapTuple oldtup, const char* procedureName, { Datum proargnames; bool isnull = false; + Oid origin_return_type; if (!replace) { ereport(ERROR, (errcode(ERRCODE_DUPLICATE_FUNCTION), @@ -749,9 +768,13 @@ static void checkFunctionConflicts(HeapTuple oldtup, const char* procedureName, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("function \"%s\" is a masking function,it can not be changed", procedureName))); } - Oid origin_return_type = oldproc->prorettype; + origin_return_type = oldproc->prorettype; /* A db donot check function return type when replace */ if (!isOraStyle) { + /* + * For client logic type use original return type from gs_cl_proc + * and remove all data from gs_cl_proc + */ if(IsClientLogicType(oldproc->prorettype)) { Oid functionId = HeapTupleGetOid(oldtup); HeapTuple gs_oldtup = SearchSysCache1(GSCLPROCID, functionId); @@ -759,14 +782,15 @@ static void checkFunctionConflicts(HeapTuple oldtup, const char* procedureName, if (HeapTupleIsValid(gs_oldtup)) { Datum gs_ret_orig = SysCacheGetAttr(GSCLPROCID, gs_oldtup, Anum_gs_encrypted_proc_prorettype_orig, &isNull); - if(!isNull) { - origin_return_type = DatumGetObjectId(gs_ret_orig); + /* never should happen, since if the function return type was + client logic we must insert its original type on creation, but + since some old code might create by error functions that its + original return type is not saved, and for avoid undefined behaviour, + it is checked again. */ + if (!isNull) { + origin_return_type = ObjectIdGetDatum(gs_ret_orig); } - Relation gs_rel = heap_open(ClientLogicProcId, RowExclusiveLock); - deleteDependencyRecordsFor(ClientLogicProcId, HeapTupleGetOid(gs_oldtup), true); - simple_heap_delete(gs_rel, &gs_oldtup->t_self); - heap_close(gs_rel, RowExclusiveLock); - ReleaseSysCache(gs_oldtup); + delete_proc_client_info(gs_oldtup); } } /* @@ -1000,7 +1024,7 @@ static bool user_define_func_check(Oid languageId, const char* probin, char** ab } /* ---------------------------------------------------------------- - * ProcedureCreate + * ProcedureCreate * * Note: allParameterTypes, parameterModes, parameterNames, and proconfig * are either arrays of the proper types or NULL. We declare them Datum, @@ -1118,8 +1142,12 @@ Oid ProcedureCreate(const char* procedureName, Oid procNamespace, Oid propackage } } + bool existOutParam = false; if (allParameterTypes != PointerGetDatum(NULL)) { for (i = 0; i < allParamCount; i++) { + if (paramModes[i] == PROARGMODE_OUT || paramModes[i] == PROARGMODE_INOUT) { + existOutParam = true; + } if (paramModes == NULL || paramModes[i] == PROARGMODE_IN || paramModes[i] == PROARGMODE_VARIADIC) continue; /* ignore input-only params */ @@ -1145,7 +1173,7 @@ Oid ProcedureCreate(const char* procedureName, Oid procNamespace, Oid propackage /* * Do not allow polymorphic return type unless at least one input argument - * is polymorphic. ANYRANGE return type is even stricter: must have an + * is polymorphic. ANYRANGE return type is even stricter: must have an * ANYRANGE input (since we can't deduce the specific range type from * ANYELEMENT). Also, do not allow return type INTERNAL unless at least * one input argument is INTERNAL. @@ -1307,7 +1335,7 @@ Oid ProcedureCreate(const char* procedureName, Oid procNamespace, Oid propackage * generic arrays, but they support only one-dimensional arrays with no * nulls (and no null bitmap). */ - oidvector* dummy = MakeMd5HashArgTypes((oidvector*)allParameterTypes); + oidvector* dummy = MakeMd5HashOids((oidvector*)allParameterTypes); values[Anum_pg_proc_allargtypes - 1] = PointerGetDatum(dummy); values[Anum_pg_proc_allargtypesext - 1] = PointerGetDatum(allParameterTypes); @@ -1418,18 +1446,9 @@ Oid ProcedureCreate(const char* procedureName, Oid procNamespace, Oid propackage } else { /* Check for pre-existing definition */ #ifndef ENABLE_MULTIPLE_NODES - if (t_thrd.proc->workingVersionNum < 92470) { - oldtup = SearchSysCache3(PROCNAMEARGSNSP, - PointerGetDatum(procedureName), - values[Anum_pg_proc_proargtypes - 1], - ObjectIdGetDatum(procNamespace)); - } else { - oldtup = SearchSysCache4(PROCALLARGS, - PointerGetDatum(procedureName), - values[Anum_pg_proc_allargtypes - 1], - ObjectIdGetDatum(procNamespace), - ObjectIdGetDatum(propackageid)); - } + Oid oldTupleOid = GetOldTupleOid(procedureName, parameterTypes, procNamespace, + propackageid, values, parameterModes); + oldtup = SearchSysCache1(PROCOID, ObjectIdGetDatum(oldTupleOid)); #else oldtup = SearchSysCache3(PROCNAMEARGSNSP, PointerGetDatum(procedureName), @@ -1437,7 +1456,49 @@ Oid ProcedureCreate(const char* procedureName, Oid procNamespace, Oid propackage ObjectIdGetDatum(procNamespace)); #endif } +#ifndef ENABLE_MULTIPLE_NODES + if (enable_out_param_override() && !u_sess->attr.attr_common.IsInplaceUpgrade && !IsInitdb && !proIsProcedure && + IsPlpgsqlLanguageOid(languageObjectId)) { + bool findOutParamFunc = false; + CatCList *catlist = NULL; + if (t_thrd.proc->workingVersionNum < 92470) { + catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(procedureName)); + } else { + catlist = SearchSysCacheList1(PROCALLARGS, CStringGetDatum(procedureName)); + } + for (int i = 0; i < catlist->n_members; ++i) { + HeapTuple proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); + Form_pg_proc procform = (Form_pg_proc)GETSTRUCT(proctup); + bool isNull = false; + Datum packageOidDatum = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_packageid, &isNull); + Oid packageOid = InvalidOid; + if (!isNull) { + packageOid = DatumGetObjectId(packageOidDatum); + } + if (packageOid == propackageid && procform->pronamespace == procNamespace) { + isNull = false; + (void)SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_proallargtypes, &isNull); + if (!isNull) { + findOutParamFunc = true; + break; + } + } + } + ReleaseSysCacheList(catlist); + if (existOutParam) { + if (!HeapTupleIsValid(oldtup) && findOutParamFunc) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), + (errmsg("\"%s\" functions with plpgsql language and out params are not supported Overloaded.", + procedureName), + errdetail("N/A."), + errcause("functions with plpgsql language and out params are not supported Overloaded."), + erraction("Drop function before create function.")))); + } + } + } +#endif if (HeapTupleIsValid(oldtup)) { /* There is one; okay to replace it? */ bool isNull = false; @@ -1491,7 +1552,8 @@ Oid ProcedureCreate(const char* procedureName, Oid procNamespace, Oid propackage } else { /* checking for package function */ bool conflicts = - checkPackageFunctionConflicts(procedureName, allParameterTypes, parameterTypes, procNamespace, package , propackageid); + checkPackageFunctionConflicts(procedureName, allParameterTypes, parameterTypes, parameterModes, + procNamespace, package, propackageid, languageObjectId, isOraStyle, replace); if (conflicts) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -1502,7 +1564,7 @@ Oid ProcedureCreate(const char* procedureName, Oid procNamespace, Oid propackage proacl = get_user_default_acl(ACL_OBJECT_FUNCTION, proowner, procNamespace); if (proacl != NULL) values[Anum_pg_proc_proacl - 1] = PointerGetDatum(proacl); - else if (PLSQL_SECURITY_DEFINER){ + else if (PLSQL_SECURITY_DEFINER && u_sess->attr.attr_common.upgrade_mode == 0){ values[Anum_pg_proc_proacl - 1] = PointerGetDatum(ProcAclDefault(proowner)); } else { nulls[Anum_pg_proc_proacl - 1] = true; @@ -1539,6 +1601,7 @@ Oid ProcedureCreate(const char* procedureName, Oid procNamespace, Oid propackage /* the 'shared dependencies' also change when update. */ deleteSharedDependencyRecordsFor(ProcedureRelationId, retval, 0); + (void) deleteDependencyRecordsFor(ClientLogicProcId, retval, true); /* send invalid message for for relation holding replaced function as trigger */ InvalidRelcacheForTriggerFunction(retval, ((Form_pg_proc)GETSTRUCT(tup))->prorettype); @@ -1774,9 +1837,9 @@ Datum fmgr_c_validator(PG_FUNCTION_ARGS) * @param[IN] pstate - parce state * @param[IN] left node * @param[IN] right node - * param[IN] ltypeid: left node type id - * param[IN] rtypeid : right node type id - * @return: void + * @param[IN] ltypeid: left node type id + * @param[IN] rtypeid : right node type id + * @return: NULL (to match signature of hook) */ Node *sql_create_proc_operator_ref(ParseState *pstate, Node *left, Node *right, Oid *ltypeid, Oid *rtypeid) { @@ -1806,7 +1869,8 @@ Node *sql_create_proc_operator_ref(ParseState *pstate, Node *left, Node *right, * we need to support type casting because the type may be downgraded (for example from double to int and * it will be truncated) */ - if (var->vartypmod == (int)*type) { + if (var->vartypmod == (int)*type || + can_coerce_type(1, (Oid*)&(var->vartypmod), (Oid*)type, COERCION_ASSIGNMENT)) { /* update the parameter data type to the column data type */ *type = var->vartype; /* update the data types in the parser info structure */ @@ -1850,8 +1914,8 @@ Datum fmgr_sql_validator(PG_FUNCTION_ARGS) bool replace = false; /* - * 3 means the number of arguments of function fmgr_sql_validator, while 'is_place' is the third one, - * and 2 is the postion of 'is_place' in PG_FUNCTION_ARGS + * 3 means the number of arguments of function fmgr_sql_validator, while 'is_replace' is the third one, + * and 2 is the position of 'is_replace' in PG_FUNCTION_ARGS */ if (PG_NARGS() >= 3) { replace = PG_GETARG_BOOL(2); @@ -1997,7 +2061,7 @@ static void sql_function_parse_error_callback(void* arg) /* * Adjust a syntax error occurring inside the function body of a CREATE - * FUNCTION or DO command. This can be used by any function validator or + * FUNCTION or DO command. This can be used by any function validator or * anonymous-block handler, not only for SQL-language functions. * It is assumed that the syntax error position is initially relative to the * function body string (as passed in). If possible, we adjust the position @@ -2115,7 +2179,7 @@ static bool match_prosrc_to_literal(const char* prosrc, const char* literal, int /* * This implementation handles backslashes and doubled quotes in the - * string literal. It does not handle the SQL syntax for literals + * string literal. It does not handle the SQL syntax for literals * continued across line boundaries. * * We do the comparison a character at a time, not a byte at a time, so @@ -2361,90 +2425,6 @@ bool isSameParameterList(List* parameterList1, List* parameterList2) } return true; } -/* - * judage the two arglis is same or not - */ -bool isSameArgList(CreateFunctionStmt* stmt1, CreateFunctionStmt* stmt2) -{ - List* argList1 = stmt1->parameters; - List* argList2 = stmt2->parameters; - ListCell* cell = NULL; - int length1 = list_length(argList1); - int length2 = list_length(argList2); - - if (length1 != length2) { - return false; - } - FunctionParameter** arr1 = (FunctionParameter**)palloc0(length1 * sizeof(FunctionParameter*)); - FunctionParameter** arr2 = (FunctionParameter**)palloc0(length2 * sizeof(FunctionParameter*)); - int length = 0; - foreach(cell, argList1) { - arr1[length] = (FunctionParameter*)lfirst(cell); - length = length + 1; - } - length = 0; - foreach(cell, argList2) { - arr2[length] = (FunctionParameter*)lfirst(cell); - length = length + 1; - } - for (int i = 0; i < length1; i++) { - FunctionParameter* fp1 = arr1[i]; - FunctionParameter* fp2 = arr2[i]; - TypeName* t1 = fp1->argType; - TypeName* t2 = fp2->argType; - Oid toid1; - Oid toid2; - Type typtup1; - Type typtup2; - errno_t rc; - typtup1 = LookupTypeName(NULL, t1, NULL); - typtup2 = LookupTypeName(NULL, t2, NULL); - if (HeapTupleIsValid(typtup1)) { - toid1 = typeTypeId(typtup1); - ReleaseSysCache(typtup1); - } else { - toid1 = findPackageParameter(strVal(linitial(t1->names))); - if (!OidIsValid(toid1)) { - char message[MAXSTRLEN]; - rc = sprintf_s(message, MAXSTRLEN, "type is not exists %s.", fp1->name); - securec_check_ss_c(rc, "", ""); - InsertErrorMessage(message, stmt1->startLineNumber); - ereport(ERROR, - (errmodule(MOD_PLSQL), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("type is not exists %s.", fp1->name), - errdetail("CommandType: %s", fp1->name), - errcause("System error."), - erraction("Contact Huawei Engineer."))); - } - } - if (HeapTupleIsValid(typtup2)) { - toid2 = typeTypeId(typtup2); - ReleaseSysCache(typtup2); - } else { - toid2 = findPackageParameter(strVal(linitial(t2->names))); - if (!OidIsValid(toid2)) { - char message[MAXSTRLEN]; - rc = sprintf_s(message, MAXSTRLEN, "type is not exists %s.", fp2->name); - securec_check_ss_c(rc, "", ""); - InsertErrorMessage(message, stmt1->startLineNumber); - ereport(ERROR, - (errmodule(MOD_PLSQL), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("type is not exists %s.", fp2->name), - errdetail("CommandType: %s", fp2->name), - errcause("System error."), - erraction("Contact Huawei Engineer."))); - } - } - if (toid1 != toid2 || fp1->mode != fp2->mode) { - pfree(arr1); - pfree(arr2); - return false; - } - } - pfree(arr1); - pfree(arr2); - return true; -} char* getFuncName(List* funcNameList) { char* schemaname = NULL; @@ -2454,6 +2434,30 @@ char* getFuncName(List* funcNameList) { return funcname; } +bool isDefinerACL() +{ + /* + * if in upgrade mode,we can't set package function as definer right. + */ + if (PLSQL_SECURITY_DEFINER && (u_sess->attr.attr_common.upgrade_mode == 0 || + (!OidIsValid(u_sess->upg_cxt.Inplace_upgrade_next_pg_proc_oid) && + u_sess->attr.attr_common.upgrade_mode != 0))) { + return true; + } + return false; +} + +/* make str md5 hash */ +static void make_md5_hash(char* in_str, char* res_hash) +{ + text* in_text = cstring_to_text(in_str); + size_t len = VARSIZE_ANY_EXHDR(in_text); + if (!pg_md5_hash(VARDATA_ANY(in_text), len, res_hash)) { + ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); + } + pfree_ext(in_text); +} + /* Return decimal value for a hexadecimal digit */ static int get_decimal_from_hex(char hex) { @@ -2465,9 +2469,9 @@ static int get_decimal_from_hex(char hex) } } -oidvector* MakeMd5HashArgTypes(oidvector* paramterTypes) +oidvector* MakeMd5HashOids(oidvector* paramterTypes) { - char hex[MD5_HASH_LEN + 1]; + char* hexarr = (char*)palloc0(sizeof(char) * (MD5_HASH_LEN + 1)); Oid* oidvec = paramterTypes->values; int parameterCount = paramterTypes->dim1; @@ -2480,19 +2484,10 @@ oidvector* MakeMd5HashArgTypes(oidvector* paramterTypes) appendStringInfoSpaces(&oidvec2str, 1); } appendStringInfo(&oidvec2str, "%d", oidvec[parameterCount - 1]); + /* convert oidvector to text and make md5 hash */ + make_md5_hash(oidvec2str.data, hexarr); - /* - * convert oidvector to text and make md5 hash - */ - text* in_text = cstring_to_text(oidvec2str.data); - size_t len = VARSIZE_ANY_EXHDR(in_text); - if (!pg_md5_hash(VARDATA_ANY(in_text), len, hex)) { - ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); - } - pfree_ext(oidvec2str.data); - pfree_ext(in_text); - in_text = NULL; /* * hex: an MD5 sum is 16 bytes long. @@ -2500,9 +2495,11 @@ oidvector* MakeMd5HashArgTypes(oidvector* paramterTypes) */ Oid hex2oid[MD5_HASH_LEN]; for (i = 0; i < MD5_HASH_LEN; i++) { - hex2oid[i] = get_decimal_from_hex(hex[i]); + hex2oid[i] = get_decimal_from_hex(hexarr[i]); } + pfree_ext(hexarr); + /* Build a oidvector using the hash value and use it as allargtypes field value. */ return buildoidvector(hex2oid, MD5_HASH_LEN); } @@ -2541,3 +2538,129 @@ Datum ProcedureGetAllArgTypes(HeapTuple tuple, bool* isNull) } return allargtypes; } + +#ifndef ENABLE_MULTIPLE_NODES +char* ConvertArgModesToString(Datum proArgModes) +{ + Assert(DatumGetPointer(proArgModes) != NULL); + + ArrayType* arr = DatumGetArrayTypeP(proArgModes); + Datum* arrdatum = NULL; + int ndatums; + deconstruct_array(arr, CHAROID, 1, true, 'c', &arrdatum, NULL, &ndatums); + char* str = (char*) palloc0(sizeof(char) * (ndatums + 1)); + int i; + int left = 0; + int right = ndatums - 1; + char ch; + for (i = 0; i < ndatums; i++) { + ch = DatumGetChar(arrdatum[i]); + if (ch == 'i') { + str[left] = 'i'; + left++; + } else if (ch == 'b') { + str[right] = 'b'; + right--; + } + } + for (i = left; i <= right; i++) { + str[i] = 'o'; + } + str[ndatums] = '\0'; + pfree_ext(arrdatum); + return str; +} + +bool IsProArgModesEqual(Datum argModes1, Datum argModes2) +{ + bool isEqual = false; + if (DatumGetPointer(argModes1) == NULL && DatumGetPointer(argModes2) == NULL) { + isEqual = true; + } else if (DatumGetPointer(argModes1) != NULL && DatumGetPointer(argModes2) != NULL) { + char* str1 = ConvertArgModesToString(argModes1); + char* str2 = ConvertArgModesToString(argModes2); + if (strcmp(str1, str2) == 0) { + isEqual = true; + } + pfree_ext(str1); + pfree_ext(str2); + } + return isEqual; +} + +bool IsProArgModesEqualByTuple(HeapTuple tup, TupleDesc desc, oidvector* argModes) +{ + bool isNull = false; + Datum argmodes = heap_getattr(tup, Anum_pg_proc_proargmodes, desc, &isNull); + oidvector* oriArgModesVec = ConvertArgModesToMd5Vector(argmodes); + + bool isEqual = DatumGetBool( + DirectFunctionCall2(oidvectoreq, PointerGetDatum(oriArgModesVec), PointerGetDatum(argModes))); + + pfree_ext(oriArgModesVec); + return isEqual; +} + +oidvector* ConvertArgModesToMd5Vector(Datum proArgModes) +{ + char* modesStr = NULL; + char* hexarr = (char*)palloc0(sizeof(char) * (MD5_HASH_LEN + 1)); + int i; + if (proArgModes != PointerGetDatum(NULL)) { + modesStr = ConvertArgModesToString(proArgModes); + } else { + modesStr = (char*)palloc0(sizeof(char)); + modesStr[0] = '\0'; + } + make_md5_hash(modesStr, hexarr); + + pfree_ext(modesStr); + + Oid hex2oid[MD5_HASH_LEN]; + for (i = 0; i < MD5_HASH_LEN; i++) { + hex2oid[i] = get_decimal_from_hex(hexarr[i]); + } + + pfree_ext(hexarr); + + return buildoidvector(hex2oid, MD5_HASH_LEN); +} + +oidvector* MergeOidVector(oidvector* allArgTypes, oidvector* argModes) +{ + Assert(allArgTypes != NULL); + Assert(argModes != NULL); + + oidvector* res = NULL; + + int len1 = allArgTypes->dim1; + int len2 = argModes->dim1; + + errno_t rc = EOK; + Oid* oids = (Oid*)palloc0(sizeof(Oid) * (len1 + len2)); + rc = memcpy_s(oids, (len1 + len2) * sizeof(Oid), allArgTypes->values, len1 * sizeof(Oid)); + securec_check(rc, "\0", "\0"); + rc = memcpy_s(&oids[len1], len2 * sizeof(Oid), argModes->values, len2 * sizeof(Oid)); + securec_check(rc, "\0", "\0"); + + res = buildoidvector(oids, len1 + len2); + + pfree_ext(oids); + + return res; +} + +static void CheckInParameterConflicts(CatCList* catlist, const char* procedureName, oidvector* inpara_type, + oidvector* proc_para_type, Oid languageId, bool isOraStyle, bool replace) +{ + if (IsPlpgsqlLanguageOid(languageId) && !isOraStyle) { + bool same = DatumGetBool( + DirectFunctionCall2(oidvectoreq, PointerGetDatum(inpara_type), PointerGetDatum(proc_para_type))); + if (same && !replace) { + ReleaseSysCacheList(catlist); + ereport(ERROR, (errcode(ERRCODE_DUPLICATE_FUNCTION), + errmsg("function \"%s\" already exists with same argument types", procedureName))); + } + } +} +#endif diff --git a/src/common/backend/catalog/pg_publication.cpp b/src/common/backend/catalog/pg_publication.cpp index d3a85932d..9900f25bd 100644 --- a/src/common/backend/catalog/pg_publication.cpp +++ b/src/common/backend/catalog/pg_publication.cpp @@ -107,6 +107,16 @@ static Publication *GetPublication(Oid pubid) */ static bool is_publishable_class(Oid relid, Form_pg_class reltuple) { + /* internal namespace, doesn't need to publish */ + if (reltuple->relnamespace == CSTORE_NAMESPACE || reltuple->relnamespace == PG_PKG_SERVICE_NAMESPACE || +#ifndef ENABLE_MULTIPLE_NODES + reltuple->relnamespace == DBE_PLDEVELOPER_NAMESPACE || +#endif + reltuple->relnamespace == PG_SNAPSHOT_NAMESPACE || reltuple->relnamespace == PG_SQLADVISOR_NAMESPACE || + reltuple->relnamespace == PG_BLOCKCHAIN_NAMESPACE || reltuple->relnamespace == PG_DB4AI_NAMESPACE || + reltuple->relnamespace == PG_PLDEBUG_NAMESPACE) { + return false; + } return reltuple->relkind == RELKIND_RELATION && !IsCatalogClass(relid, reltuple) && reltuple->relpersistence == RELPERSISTENCE_PERMANENT && /* @@ -210,7 +220,7 @@ List *GetRelationPublications(Oid relid) /* Find all publications associated with the relation. */ pubrellist = SearchSysCacheList1(PUBLICATIONRELMAP, ObjectIdGetDatum(relid)); for (i = 0; i < pubrellist->n_members; i++) { - HeapTuple tup = &pubrellist->members[i]->tuple; + HeapTuple tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(pubrellist, i); Oid pubid = ((Form_pg_publication_rel)GETSTRUCT(tup))->prpubid; result = lappend_oid(result, pubid); diff --git a/src/common/backend/catalog/pg_shdepend.cpp b/src/common/backend/catalog/pg_shdepend.cpp index c39d4264a..f56445846 100644 --- a/src/common/backend/catalog/pg_shdepend.cpp +++ b/src/common/backend/catalog/pg_shdepend.cpp @@ -22,6 +22,7 @@ #include "catalog/catalog.h" #include "catalog/dependency.h" #include "catalog/indexing.h" +#include "catalog/gs_db_privilege.h" #include "catalog/gs_package.h" #include "catalog/pg_authid.h" #include "catalog/pg_collation.h" @@ -1092,6 +1093,8 @@ static void storeObjectDescription( appendStringInfo(descs, _("privileges for %s"), objdesc); else if (deptype == SHARED_DEPENDENCY_RLSPOLICY) appendStringInfo(descs, _("target of %s"), objdesc); + else if (deptype == SHARED_DEPENDENCY_DBPRIV) + appendStringInfo(descs, _("privileges for %s"), objdesc); else ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), @@ -1243,6 +1246,14 @@ void shdepDropOwned(List* roleids, DropBehavior behavior) add_exact_object_address(&obj, deleteobjs); } break; + case SHARED_DEPENDENCY_DBPRIV: + if (sdepForm->dbid == u_sess->proc_cxt.MyDatabaseId) { + obj.classId = sdepForm->classid; + obj.objectId = sdepForm->objid; + obj.objectSubId = sdepForm->objsubid; + add_exact_object_address(&obj, deleteobjs); + } + break; default: break; } diff --git a/src/common/backend/catalog/pg_subscription.cpp b/src/common/backend/catalog/pg_subscription.cpp index 03e5f92cb..3d1d6833e 100644 --- a/src/common/backend/catalog/pg_subscription.cpp +++ b/src/common/backend/catalog/pg_subscription.cpp @@ -28,7 +28,7 @@ #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/syscache.h" - +#include "replication/worker_internal.h" static List *textarray_to_stringlist(ArrayType *textarray); @@ -62,7 +62,9 @@ Subscription *GetSubscription(Oid subid, bool missing_ok) /* Get conninfo */ datum = SysCacheGetAttr(SUBSCRIPTIONOID, tup, Anum_pg_subscription_subconninfo, &isnull); - Assert(!isnull); + if (unlikely(isnull)) { + ereport(ERROR, (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("null conninfo for subscription %u", subid))); + } sub->conninfo = TextDatumGetCString(datum); /* Get slotname */ @@ -75,12 +77,18 @@ Subscription *GetSubscription(Oid subid, bool missing_ok) /* Get synccommit */ datum = SysCacheGetAttr(SUBSCRIPTIONOID, tup, Anum_pg_subscription_subsynccommit, &isnull); - Assert(!isnull); + if (unlikely(isnull)) { + ereport(ERROR, (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), + errmsg("null synccommit for subscription %u", subid))); + } sub->synccommit = TextDatumGetCString(datum); /* Get publications */ datum = SysCacheGetAttr(SUBSCRIPTIONOID, tup, Anum_pg_subscription_subpublications, &isnull); - Assert(!isnull); + if (unlikely(isnull)) { + ereport(ERROR, (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), + errmsg("null publications for subscription %u", subid))); + } sub->publications = textarray_to_stringlist(DatumGetArrayTypeP(datum)); ReleaseSysCache(tup); @@ -121,6 +129,7 @@ int CountDBSubscriptions(Oid dbid) */ void FreeSubscription(Subscription *sub) { + pfree(sub->synccommit); pfree(sub->name); pfree(sub->conninfo); if (sub->slotname) { @@ -173,6 +182,46 @@ char *get_subscription_name(Oid subid, bool missing_ok) return subname; } +/* Clear the list content, only deal with DefElem and string content */ +static void ClearListContent(List *list) +{ + ListCell *cell = NULL; + foreach(cell, list) { + DefElem* def = (DefElem*)lfirst(cell); + if (def->arg == NULL || !IsA(def->arg, String)) { + continue; + } + + char *str = strVal(def->arg); + if (str == NULL || str[0] == '\0') { + continue; + } + + size_t len = strlen(str); + errno_t errCode = memset_s(str, len, 0, len); + securec_check(errCode, "\0", "\0"); + } +} + +/* + * Decrypt conninfo for subscription. + * IMPORTANT: caller should clear and free the memory after using it immediately + */ +char *DecryptConninfo(char *encryptConninfo) +{ + const char* sensitiveOptionsArray[] = {"password"}; + const int sensitiveArrayLength = lengthof(sensitiveOptionsArray); + List *defList = ConninfoToDefList(encryptConninfo); + DecryptOptions(defList, sensitiveOptionsArray, sensitiveArrayLength, SUBSCRIPTION_MODE); + char *decryptConninfo = DefListToString(defList); + + /* defList has plain content, clear it before free */ + ClearListContent(defList); + list_free_ext(defList); + /* IMPORTANT: caller should clear and free the memory after using it immediately */ + return decryptConninfo; +} + /* * Convert text array to list of strings. * diff --git a/src/common/backend/catalog/pg_synonym.cpp b/src/common/backend/catalog/pg_synonym.cpp index 73e22ecdc..8bcac70c3 100644 --- a/src/common/backend/catalog/pg_synonym.cpp +++ b/src/common/backend/catalog/pg_synonym.cpp @@ -326,6 +326,44 @@ void AlterSynonymOwner(List* name, Oid newOwnerId) heap_close(rel, RowExclusiveLock); } +/* + * AlterSynonymOwnerByOid - ALTER Synonym OWNER TO newowner by Oid + * This is currently only used to propagate ALTER PACKAGE OWNER to a + * package. Package will build Synonym for ref cursor type. + * It assumes the caller has done all needed checks. + */ +void AlterSynonymOwnerByOid(Oid synonymOid, Oid newOwnerId) +{ + HeapTuple tuple = NULL; + Relation rel = NULL; + + rel = heap_open(PgSynonymRelationId, RowExclusiveLock); + tuple = SearchSysCache1(SYNOID, ObjectIdGetDatum(synonymOid)); + if (!HeapTupleIsValid(tuple)) { + ereport( + ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for synonym %u", synonymOid))); + } + Form_pg_synonym synForm = (Form_pg_synonym)GETSTRUCT(tuple); + + /* + * If the new owner is the same as the existing owner, consider the command to have succeeded. + * ps. This is for dump restoration purposes. + */ + if (synForm->synowner != newOwnerId) { + /* Change its owner */ + synForm->synowner = newOwnerId; + + simple_heap_update(rel, &tuple->t_self, tuple); + CatalogUpdateIndexes(rel, tuple); + + /* Update owner dependency reference. */ + changeDependencyOnOwner(PgSynonymRelationId, HeapTupleGetOid(tuple), newOwnerId); + } + + ReleaseSysCache(tuple); + heap_close(rel, NoLock); +} + /* * RemoveSynonymById * Given synonym oid, remove the synonym tuple. diff --git a/src/common/backend/catalog/pg_uid.cpp b/src/common/backend/catalog/pg_uid.cpp new file mode 100644 index 000000000..1b157d105 --- /dev/null +++ b/src/common/backend/catalog/pg_uid.cpp @@ -0,0 +1,272 @@ +/* + * Copyright (c) Huawei Technologies Co.,Ltd. 2021-2023. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * pg_uid.cpp + * + * IDENTIFICATION + * src/common/backend/catalog/pg_uid.cpp + * + *------------------------------------------------------------------------- + */ + +#include "catalog/namespace.h" +#include "catalog/pg_uid.h" +#include "catalog/pg_uid_fn.h" +#include "catalog/storage.h" +#include "utils/rel.h" +#include "utils/inval.h" +#include "utils/builtins.h" +#include "catalog/indexing.h" +#include "catalog/pgxc_class.h" +#include "pgxc/redistrib.h" +#include "storage/lock/lock.h" +#include "tcop/utility.h" +#include "utils/syscache.h" +#include "utils/fmgroids.h" +#include "access/heapam.h" +#include "utils/snapmgr.h" +#include "access/xact.h" +#include "access/hash.h" +#include "pgxc/groupmgr.h" +#include "pgxc/pgxc.h" +#include "access/reloptions.h" +#include "access/hbucket_am.h" +#include "executor/node/nodeModifyTable.h" +#include "nodes/makefuncs.h" + +static UidHashValue* FindUidHashCache(Oid dbOid, Oid relOid); + +void DeleteDatabaseUidEntry(Oid dbOid) +{ + HASH_SEQ_STATUS status; + UidHashValue* entry; + LWLockAcquire(HashUidLock, LW_EXCLUSIVE); + hash_seq_init(&status, t_thrd.storage_cxt.uidHashCache); + while ((entry = (UidHashValue *) hash_seq_search(&status)) != NULL) { + if (entry->key.dbOid != dbOid) { + continue; + } + if (hash_search(t_thrd.storage_cxt.uidHashCache, &entry->key, HASH_REMOVE, NULL) == NULL) { + ereport(ERROR, (errmsg("uid hash cache corrupted"))); + } + } + LWLockRelease(HashUidLock); +} + +/* Delete uid entry in gs_uid, only drop table call this function. */ +void DeleteUidEntry(Oid relid) +{ + HeapTuple tuple = SearchSysCache1(UIDRELID, relid); + if (!HeapTupleIsValid(tuple)) { + return; + } + + /* remove hash table fisrt */ + UidHashKey key = {.dbOid = u_sess->proc_cxt.MyDatabaseId, .relOid = relid}; + LWLockAcquire(HashUidLock, LW_EXCLUSIVE); + (void)hash_search(t_thrd.storage_cxt.uidHashCache, (void*)&key, HASH_REMOVE, NULL); + LWLockRelease(HashUidLock); + + Relation gs_uid = heap_open(UidRelationId, RowExclusiveLock); + simple_heap_delete(gs_uid, &tuple->t_self); + ReleaseSysCache(tuple); + heap_close(gs_uid, RowExclusiveLock); + return; +} + +void InsertUidEntry(Oid relid) +{ + Datum values[Natts_gs_uid]; + bool nulls[Natts_gs_uid]; + errno_t errorno = EOK; + + HeapTuple tuple = SearchSysCache1(UIDRELID, relid); + /* already inserted one */ + if (HeapTupleIsValid(tuple)) { + ReleaseSysCache(tuple); + return; + } + + /* This is a tad tedious, but way cleaner than what we used to do... */ + errorno = memset_s(values, sizeof(values), 0, sizeof(values)); + securec_check_c(errorno, "\0", "\0"); + + errorno = memset_s(nulls, sizeof(nulls), 0, sizeof(nulls)); + securec_check_c(errorno, "\0", "\0"); + + Relation gs_uid = heap_open(UidRelationId, RowExclusiveLock); + values[Anum_gs_uid_relid - 1] = ObjectIdGetDatum(relid); + values[Anum_gs_uid_backup - 1] = UInt64GetDatum(0); + HeapTuple newTuple = heap_form_tuple(RelationGetDescr(gs_uid), values, nulls); + (void)simple_heap_insert(gs_uid, newTuple); + CatalogUpdateIndexes(gs_uid, newTuple); + + heap_freetuple_ext(newTuple); + heap_close(gs_uid, RowExclusiveLock); + return; +} +static void UpdateUidEntryInternal(Oid relOid, uint64 &backupUid, bool init) +{ + Relation gs_uid = heap_open(UidRelationId, RowExclusiveLock); + HeapTuple tuple = SearchSysCacheCopy1(UIDRELID, relOid); + Assert(HeapTupleIsValid(tuple)); + Form_gs_uid relUid = (Form_gs_uid)GETSTRUCT(tuple); + if (init) { + backupUid = relUid->uid_backup + UID_RESTORE_DURATION; + } + if ((uint64)relUid->uid_backup < backupUid) { /* check again */ + relUid->uid_backup = backupUid; + heap_inplace_update(gs_uid, tuple, true); + } + heap_freetuple_ext(tuple); + heap_close(gs_uid, RowExclusiveLock); +} + +static bool UidEntryExist(Oid relOid) +{ + HeapTuple tuple = SearchSysCache1(UIDRELID, relOid); + if (!HeapTupleIsValid(tuple)) { + return false; + } else { + ReleaseSysCache(tuple); + return true; + } +} + +static bool UpdateUidEntry(Relation relation, uint64 backupUid, UidHashValue* value) +{ + uint32 expected = BACKUP_NO_START; + bool backup = pg_atomic_compare_exchange_u32(&value->backUpState, + &expected, BACKUP_IN_PROGRESS); + if (!backup) { + return false; /* other one is doing the same thing */ + } + Assert(value); + /* recheck again, quick bypass */ + if (value->backupUidRange >= backupUid) { /* safe to compare if hold uidBackup flag */ + pg_atomic_write_u32(&value->backUpState, BACKUP_NO_START); + return false; + } + PG_TRY(); + { + UpdateUidEntryInternal(RelationGetRelid(relation), backupUid, false); + } + PG_CATCH(); + { + ereport(LOG, (errmsg("Update uid hash cache failed."))); + pg_atomic_write_u32(&value->backUpState, BACKUP_NO_START); + PG_RE_THROW(); + } + PG_END_TRY(); + value->backupUidRange = backupUid; /* safe to write if hold uidBackup flag */ + pg_atomic_write_u32(&value->backUpState, BACKUP_NO_START); + return true; +} + +void InitUidCache(void) +{ + int rc = 0; + HASHCTL ctl; + rc = memset_s(&ctl, sizeof(ctl), 0, sizeof(ctl)); + securec_check(rc, "\0", "\0"); + + ctl.keysize = sizeof(UidHashKey); + ctl.entrysize = sizeof(UidHashValue); + ctl.hash = tag_hash; + t_thrd.storage_cxt.uidHashCache = HeapMemInitHash( + "Shared Uid hash by request", 64, 256, &ctl, HASH_ELEM | HASH_FUNCTION); + if (!t_thrd.storage_cxt.uidHashCache) + ereport(FATAL, (errmsg("could not initialize shared uid hash table"))); +} + +static UidHashValue* FindUidHashCache(Oid dbOid, Oid relOid) +{ + UidHashKey key = {.dbOid = dbOid, .relOid = relOid}; + UidHashValue* value = NULL; + LWLockAcquire(HashUidLock, LW_SHARED); + value = (UidHashValue*)hash_search(t_thrd.storage_cxt.uidHashCache, + (void*)&key, HASH_FIND, NULL); + LWLockRelease(HashUidLock); + return value; +} + +void BuildUidHashCache(Oid dbOid, Oid relOid) +{ + if (FindUidHashCache(dbOid, relOid) || !UidEntryExist(relOid) || RecoveryInProgress()) { + return; + } + UidHashKey key = {.dbOid = dbOid, .relOid = relOid}; + UidHashValue* value = NULL; + bool found = false; + LWLockAcquire(HashUidLock, LW_EXCLUSIVE); + value = (UidHashValue*)hash_search(t_thrd.storage_cxt.uidHashCache, + (void*)&key, HASH_ENTER, &found); + if (!found) { + PG_TRY(); + { + uint64 backupUid = 0; + UpdateUidEntryInternal(relOid, backupUid, true); + Assert(backupUid - UID_RESTORE_DURATION >= 0); + value->currentUid = (backupUid - UID_RESTORE_DURATION) + 1; + value->backupUidRange = backupUid; + value->backUpState = BACKUP_NO_START; + } + PG_CATCH(); + { + ereport(LOG, (errmsg("Build uid hash cache failed."))); + hash_search(t_thrd.storage_cxt.uidHashCache, (void*)&key, HASH_REMOVE, NULL); + LWLockRelease(HashUidLock); + PG_RE_THROW(); + } + PG_END_TRY(); + } + LWLockRelease(HashUidLock); +} + +#define FetchCurState(curBackupUid, curUid, value) \ + do { \ + curBackupUid = pg_atomic_read_u64(&value->backupUidRange); \ + curUid = pg_atomic_read_u64(&value->currentUid); \ + } while (0) + +#define TRIGGER_FACTOR (4) +uint64 GetNewUidForTuple(Relation relation) +{ + Assert(RELATION_HAS_UIDS(relation)); + Oid dbOid = relation->rd_node.dbNode; + Oid relOid = RelationGetRelid(relation); + UidHashValue* value = (UidHashValue*)FindUidHashCache(dbOid, relOid); + if (value == NULL) { + BuildUidHashCache(dbOid, relOid); + value = (UidHashValue*)FindUidHashCache(dbOid, relOid); + } + Assert(value); + uint64 res; + uint64 curBackupUid; + uint64 curUid; + FetchCurState(curBackupUid, curUid, value); + if (curUid + (uint64)UID_RESTORE_DURATION / TRIGGER_FACTOR > curBackupUid) { + (void)UpdateUidEntry(relation, curBackupUid + UID_RESTORE_DURATION, value); + } + res = pg_atomic_fetch_add_u64(&value->currentUid, 1); + FetchCurState(curBackupUid, curUid, value); + while (res > curBackupUid) { + (void)UpdateUidEntry(relation, curBackupUid + UID_RESTORE_DURATION, value); + pg_usleep(10000L); /* 10ms delay */ + FetchCurState(curBackupUid, curUid, value); + } + return res; +} + diff --git a/src/common/backend/catalog/pgxc_slice.cpp b/src/common/backend/catalog/pgxc_slice.cpp index d7f886f1c..7eeeef904 100644 --- a/src/common/backend/catalog/pgxc_slice.cpp +++ b/src/common/backend/catalog/pgxc_slice.cpp @@ -77,7 +77,7 @@ static void AddReferencedSlices(Oid relid, DistributeBy *distributeby) relation = heap_open(PgxcSliceRelationId, RowExclusiveLock); for (i = 0; i < slicelist->n_members; i++) { - tup = &slicelist->members[i]->tuple; + tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(slicelist, i); bool isnull = false; Datum val = fastgetattr(tup, Anum_pgxc_slice_type, RelationGetDescr(relation), &isnull); if (DatumGetChar(val) == PGXC_SLICE_TYPE_TABLE) { @@ -299,7 +299,7 @@ void RemovePgxcSlice(Oid relid) CatCList *slicelist = SearchSysCacheList1(PGXCSLICERELID, ObjectIdGetDatum(relid)); for (int i = 0; i < slicelist->n_members; i++) { - tup = &slicelist->members[i]->tuple; + tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(slicelist, i); simple_heap_delete(relation, &tup->t_self); } ReleaseSysCacheList(slicelist); diff --git a/src/common/backend/catalog/storage.cpp b/src/common/backend/catalog/storage.cpp index 02c4a420d..d5f6f52be 100644 --- a/src/common/backend/catalog/storage.cpp +++ b/src/common/backend/catalog/storage.cpp @@ -40,6 +40,7 @@ #include "catalog/pg_hashbucket_fn.h" #include "catalog/pg_tablespace.h" #include "commands/tablespace.h" +#include "commands/verify.h" #include "pgxc/pgxc.h" #include "storage/freespace.h" #include "storage/lmgr.h" @@ -318,30 +319,17 @@ void log_smgrcreate(RelFileNode* rnode, ForkNumber forkNum) if (IsSegmentFileNode(*rnode)) { return; } - - xl_smgr_create_compress xlrec; - uint size; - uint8 info = XLOG_SMGR_CREATE | XLR_SPECIAL_REL_UPDATE; - /* - * compressOptions Copy - */ - if (rnode->opt != 0) { - xlrec.pageCompressOpts = rnode->opt; - size = sizeof(xl_smgr_create_compress); - info |= XLR_REL_COMPRESS; - } else { - size = sizeof(xl_smgr_create); - } - + /* * Make an XLOG entry reporting the file creation. */ - xlrec.xlrec.forkNum = forkNum; - RelFileNodeRelCopy(xlrec.xlrec.rnode, *rnode); + xl_smgr_create xlrec; + xlrec.forkNum = forkNum; + RelFileNodeRelCopy(xlrec.rnode, *rnode); XLogBeginInsert(); - XLogRegisterData((char*)&xlrec, size); - XLogInsert(RM_SMGR_ID, info, false, rnode->bucketNode); + XLogRegisterData((char*)&xlrec, sizeof(xlrec)); + XLogInsert(RM_SMGR_ID, XLOG_SMGR_CREATE | XLR_SPECIAL_REL_UPDATE, rnode->bucketNode); } static void CStoreRelDropStorage(Relation rel, RelFileNode* rnode, Oid ownerid) @@ -703,26 +691,15 @@ void RelationTruncate(Relation rel, BlockNumber nblocks) * Make an XLOG entry reporting the file truncation. */ XLogRecPtr lsn; - xl_smgr_truncate_compress xlrec; - uint size; - uint8 info = XLOG_SMGR_TRUNCATE | XLR_SPECIAL_REL_UPDATE; + xl_smgr_truncate xlrec; - xlrec.xlrec.blkno = nblocks; - - if (rel->rd_node.opt != 0) { - xlrec.pageCompressOpts = rel->rd_node.opt; - size = sizeof(xl_smgr_truncate_compress); - info |= XLR_REL_COMPRESS; - } else { - size = sizeof(xl_smgr_truncate); - } - - RelFileNodeRelCopy(xlrec.xlrec.rnode, rel->rd_node); + xlrec.blkno = nblocks; + RelFileNodeRelCopy(xlrec.rnode, rel->rd_node); XLogBeginInsert(); - XLogRegisterData((char*)&xlrec, size); + XLogRegisterData((char*)&xlrec, sizeof(xlrec)); - lsn = XLogInsert(RM_SMGR_ID, info, false, rel->rd_node.bucketNode); + lsn = XLogInsert(RM_SMGR_ID, XLOG_SMGR_TRUNCATE | XLR_SPECIAL_REL_UPDATE, rel->rd_node.bucketNode); /* * Flush, because otherwise the truncation of the main relation might @@ -742,6 +719,7 @@ void RelationTruncate(Relation rel, BlockNumber nblocks) /* Do the real work */ smgrtruncate(rel->rd_smgr, MAIN_FORKNUM, nblocks); + BatchClearBadBlock(rel->rd_node, MAIN_FORKNUM, nblocks); } void PartitionTruncate(Relation parent, Partition part, BlockNumber nblocks) @@ -799,7 +777,7 @@ void PartitionTruncate(Relation parent, Partition part, BlockNumber nblocks) XLogBeginInsert(); XLogRegisterData((char*)&xlrec, sizeof(xlrec)); - lsn = XLogInsert(RM_SMGR_ID, XLOG_SMGR_TRUNCATE | XLR_SPECIAL_REL_UPDATE, false, part->pd_node.bucketNode); + lsn = XLogInsert(RM_SMGR_ID, XLOG_SMGR_TRUNCATE | XLR_SPECIAL_REL_UPDATE, part->pd_node.bucketNode); /* * Flush, because otherwise the truncation of the main relation might @@ -817,6 +795,7 @@ void PartitionTruncate(Relation parent, Partition part, BlockNumber nblocks) /* Do the real work */ smgrtruncate(rel->rd_smgr, MAIN_FORKNUM, nblocks); + BatchClearBadBlock(rel->rd_node, MAIN_FORKNUM, nblocks); /* release fake relation */ releaseDummyRelation(&rel); @@ -967,6 +946,7 @@ void push_del_rel_to_hashtbl(bool isCommit) entry->rnode.bucketNode = pending->relnode.bucketNode; entry->maxSegNo = -1; } + BatchClearBadBlock(pending->relnode, pending->forknum, 0); } } } @@ -1233,7 +1213,7 @@ void smgr_redo(XLogReaderState* record) { XLogRecPtr lsn = record->EndRecPtr; uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; - bool compress = XLogRecGetInfo(record) & XLR_REL_COMPRESS; + /* Backup blocks are not used in smgr records */ Assert(!XLogRecHasAnyBlockRefs(record)); @@ -1242,14 +1222,14 @@ void smgr_redo(XLogReaderState* record) RelFileNode rnode; RelFileNodeCopy(rnode, xlrec->rnode, XLogRecGetBucketId(record)); - rnode.opt = compress ? ((xl_smgr_create_compress*)XLogRecGetData(record))->pageCompressOpts : 0; - smgr_redo_create(rnode, xlrec->forkNum, (char *)xlrec); - /* Redo column file, attid is hidden in forkNum */ + smgr_redo_create(rnode, xlrec->forkNum, (char *)xlrec); + /* Redo column file, attid is hidden in forkNum */ + } else if (info == XLOG_SMGR_TRUNCATE) { xl_smgr_truncate* xlrec = (xl_smgr_truncate*)XLogRecGetData(record); RelFileNode rnode; RelFileNodeCopy(rnode, xlrec->rnode, XLogRecGetBucketId(record)); - rnode.opt = compress ? ((xl_smgr_truncate_compress*)XLogRecGetData(record))->pageCompressOpts : 0; + /* * Forcibly create relation if it doesn't exist (which suggests that * it was dropped somewhere later in the WAL sequence). As in @@ -1290,7 +1270,7 @@ void smgrApplyXLogTruncateRelation(XLogReaderState* record) smgrclosenode(rbnode); - XLogTruncateRelation(record, rbnode.node, MAIN_FORKNUM, xlrec->blkno); + XLogTruncateRelation(rbnode.node, MAIN_FORKNUM, xlrec->blkno); } /* @@ -2192,13 +2172,6 @@ void ColMainFileNodesAppend(RelFileNode* bcmFileNode, BackendId backend) u_sess->catalog_cxt.ColMainFileNodes[u_sess->catalog_cxt.ColMainFileNodesCurNum].node = *bcmFileNode; u_sess->catalog_cxt.ColMainFileNodes[u_sess->catalog_cxt.ColMainFileNodesCurNum].backend = backend; ++u_sess->catalog_cxt.ColMainFileNodesCurNum; - - ereport(DEBUG1, - (errmsg("Row[MAIN] relation dropped: %u/%u/%u backend(%d)", - bcmFileNode->spcNode, - bcmFileNode->dbNode, - bcmFileNode->relNode, - backend))); } /* search some one in Column Heap Main file list. diff --git a/src/common/backend/catalog/system_views.sql b/src/common/backend/catalog/system_views.sql index 883bdb3ff..10bee9baf 100644 --- a/src/common/backend/catalog/system_views.sql +++ b/src/common/backend/catalog/system_views.sql @@ -111,7 +111,7 @@ CREATE VIEW pg_rules AS N.nspname AS schemaname, C.relname AS tablename, R.rulename AS rulename, - pg_get_ruledef(R.oid) AS definition + pg_catalog.pg_get_ruledef(R.oid) AS definition FROM (pg_rewrite R JOIN pg_class C ON (C.oid = R.ev_class)) LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) WHERE R.rulename != '_RETURN'; @@ -140,7 +140,7 @@ SELECT labelname WHEN 'column' THEN relcolumn ELSE '' END AS columnname -FROM gs_policy_label WHERE length(fqdntype)>0 ORDER BY labelname, labeltype ,fqdntype; +FROM gs_policy_label WHERE pg_catalog.length(fqdntype)>0 ORDER BY labelname, labeltype ,fqdntype; REVOKE ALL on pg_catalog.gs_labels FROM public; --for audit @@ -168,7 +168,7 @@ create view pg_catalog.gs_auditing_access as from gs_auditing_policy p left join gs_auditing_policy_access a ON (a.policyoid=p.Oid) left join gs_labels l ON (a.labelname=l.labelname) - where length(a.accesstype) > 0 order by 1,3; + where pg_catalog.length(a.accesstype) > 0 order by 1,3; REVOKE ALL on pg_catalog.gs_auditing_access FROM public; @@ -196,7 +196,7 @@ create view pg_catalog.gs_auditing_privilege as from gs_auditing_policy p left join gs_auditing_policy_privileges priv ON (priv.policyoid=p.Oid) left join gs_labels l ON (priv.labelname=l.labelname) - where length(priv.privilegetype) > 0 order by 1,3; + where pg_catalog.length(priv.privilegetype) > 0 order by 1,3; REVOKE ALL on pg_catalog.gs_auditing_privilege FROM public; @@ -272,8 +272,8 @@ CREATE VIEW pg_views AS SELECT N.nspname AS schemaname, C.relname AS viewname, - pg_get_userbyid(C.relowner) AS viewowner, - pg_get_viewdef(C.oid) AS definition + pg_catalog.pg_get_userbyid(C.relowner) AS viewowner, + pg_catalog.pg_get_viewdef(C.oid) AS definition FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) WHERE C.relkind = 'v'; @@ -281,13 +281,13 @@ CREATE VIEW pg_tables AS SELECT N.nspname AS schemaname, C.relname AS tablename, - pg_get_userbyid(C.relowner) AS tableowner, + pg_catalog.pg_get_userbyid(C.relowner) AS tableowner, T.spcname AS tablespace, C.relhasindex AS hasindexes, C.relhasrules AS hasrules, C.relhastriggers AS hastriggers, case - when pg_check_authid(po.creator) then pg_get_userbyid(po.creator) + when pg_catalog.pg_check_authid(po.creator) then pg_catalog.pg_get_userbyid(po.creator) else CAST(NULL AS name) end as tablecreator, po.ctime AS created, @@ -301,10 +301,10 @@ CREATE VIEW pg_catalog.gs_matviews AS SELECT N.nspname AS schemaname, C.relname AS matviewname, - pg_get_userbyid(C.relowner) AS matviewowner, + pg_catalog.pg_get_userbyid(C.relowner) AS matviewowner, T.spcname AS tablespace, C.relhasindex AS hasindexes, - pg_get_viewdef(C.oid) AS definition + pg_catalog.pg_get_viewdef(C.oid) AS definition FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) LEFT JOIN pg_tablespace T ON (T.oid = C.reltablespace) WHERE C.relkind = 'm'; @@ -315,7 +315,7 @@ CREATE VIEW pg_indexes AS C.relname AS tablename, I.relname AS indexname, T.spcname AS tablespace, - pg_get_indexdef(I.oid) AS indexdef + pg_catalog.pg_get_indexdef(I.oid) AS indexdef FROM pg_index X JOIN pg_class C ON (C.oid = X.indrelid) JOIN pg_class I ON (I.oid = X.indexrelid) LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) @@ -326,12 +326,12 @@ CREATE VIEW pg_indexes AS CREATE VIEW pg_catalog.pg_gtt_relstats WITH (security_barrier) AS SELECT n.nspname AS schemaname, c.relname AS tablename, - (select relfilenode from pg_get_gtt_relstats(c.oid)), - (select relpages from pg_get_gtt_relstats(c.oid)), - (select reltuples from pg_get_gtt_relstats(c.oid)), - (select relallvisible from pg_get_gtt_relstats(c.oid)), - (select relfrozenxid from pg_get_gtt_relstats(c.oid)), - (select relminmxid from pg_get_gtt_relstats(c.oid)) + (select relfilenode from pg_catalog.pg_get_gtt_relstats(c.oid)), + (select relpages from pg_catalog.pg_get_gtt_relstats(c.oid)), + (select reltuples from pg_catalog.pg_get_gtt_relstats(c.oid)), + (select relallvisible from pg_catalog.pg_get_gtt_relstats(c.oid)), + (select relfrozenxid from pg_catalog.pg_get_gtt_relstats(c.oid)), + (select relminmxid from pg_catalog.pg_get_gtt_relstats(c.oid)) FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace @@ -341,7 +341,7 @@ CREATE VIEW pg_catalog.pg_gtt_attached_pids WITH (security_barrier) AS SELECT n.nspname AS schemaname, c.relname AS tablename, c.oid AS relid, - array(select pid from pg_gtt_attached_pid(c.oid)) AS pids + array(select pid from pg_catalog.pg_gtt_attached_pid(c.oid)) AS pids FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace @@ -408,30 +408,30 @@ SELECT s.nspname AS schemaname, (SELECT n.nspname, c.relname, a.attname, - (select stainherit from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stainherit, - (select stanullfrac from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stanullfrac, - (select stawidth from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stawidth, - (select stadistinct from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stadistinct, - (select stakind1 from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stakind1, - (select stakind2 from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stakind2, - (select stakind3 from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stakind3, - (select stakind4 from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stakind4, - (select stakind5 from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stakind5, - (select stanumbers1 from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stanumbers1, - (select stanumbers2 from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stanumbers2, - (select stanumbers3 from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stanumbers3, - (select stanumbers4 from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stanumbers4, - (select stanumbers5 from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stanumbers5, - (select stavalues1 from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stavalues1, - (select stavalues2 from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stavalues2, - (select stavalues3 from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stavalues3, - (select stavalues4 from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stavalues4, - (select stavalues5 from pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stavalues5 + (select stainherit from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stainherit, + (select stanullfrac from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stanullfrac, + (select stawidth from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stawidth, + (select stadistinct from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stadistinct, + (select stakind1 from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stakind1, + (select stakind2 from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stakind2, + (select stakind3 from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stakind3, + (select stakind4 from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stakind4, + (select stakind5 from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stakind5, + (select stanumbers1 from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stanumbers1, + (select stanumbers2 from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stanumbers2, + (select stanumbers3 from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stanumbers3, + (select stanumbers4 from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stanumbers4, + (select stanumbers5 from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stanumbers5, + (select stavalues1 from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stavalues1, + (select stavalues2 from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stavalues2, + (select stavalues3 from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stavalues3, + (select stavalues4 from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stavalues4, + (select stavalues5 from pg_catalog.pg_get_gtt_statistics(c.oid, a.attnum, ''::text)) as stavalues5 FROM pg_class c JOIN pg_attribute a ON c.oid = a.attrelid LEFT JOIN pg_namespace n ON n.oid = c.relnamespace - WHERE c.relpersistence='g' AND c.relkind in('r','p','i','t') and a.attnum > 0 and NOT a.attisdropped AND has_column_privilege(c.oid, a.attnum, 'select'::text)) s; + WHERE c.relpersistence='g' AND c.relkind in('r','p','i','t') and a.attnum > 0 and NOT a.attisdropped AND pg_catalog.has_column_privilege(c.oid, a.attnum, 'select'::text)) s; CREATE VIEW pg_stats AS SELECT @@ -495,7 +495,7 @@ CREATE VIEW pg_stats AS FROM pg_statistic s JOIN pg_class c ON (c.oid = s.starelid AND s.starelkind='c') JOIN pg_attribute a ON (c.oid = attrelid AND attnum = s.staattnum) LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) - WHERE NOT attisdropped AND has_column_privilege(c.oid, a.attnum, 'select'); + WHERE NOT attisdropped AND pg_catalog.has_column_privilege(c.oid, a.attnum, 'select'); CREATE VIEW pg_catalog.pg_ext_stats AS SELECT @@ -587,9 +587,9 @@ SELECT WHEN rel.relkind = 'L' THEN 'large sequence'::text WHEN rel.relkind = 'f' THEN 'foreign table'::text END AS objtype, rel.relnamespace AS objnamespace, - CASE WHEN pg_table_is_visible(rel.oid) - THEN quote_ident(rel.relname) - ELSE quote_ident(nsp.nspname) || '.' || quote_ident(rel.relname) + CASE WHEN pg_catalog.pg_table_is_visible(rel.oid) + THEN pg_catalog.quote_ident(rel.relname) + ELSE pg_catalog.quote_ident(nsp.nspname) || '.' || pg_catalog.quote_ident(rel.relname) END AS objname, l.provider, l.label FROM @@ -603,9 +603,9 @@ SELECT l.objoid, l.classoid, l.objsubid, 'column'::text AS objtype, rel.relnamespace AS objnamespace, - CASE WHEN pg_table_is_visible(rel.oid) - THEN quote_ident(rel.relname) - ELSE quote_ident(nsp.nspname) || '.' || quote_ident(rel.relname) + CASE WHEN pg_catalog.pg_table_is_visible(rel.oid) + THEN pg_catalog.quote_ident(rel.relname) + ELSE pg_catalog.quote_ident(nsp.nspname) || '.' || pg_catalog.quote_ident(rel.relname) END || '.' || att.attname AS objname, l.provider, l.label FROM @@ -623,9 +623,9 @@ SELECT WHEN pro.proisagg = false THEN 'function'::text END AS objtype, pro.pronamespace AS objnamespace, - CASE WHEN pg_function_is_visible(pro.oid) - THEN quote_ident(pro.proname) - ELSE quote_ident(nsp.nspname) || '.' || quote_ident(pro.proname) + CASE WHEN pg_catalog.pg_function_is_visible(pro.oid) + THEN pg_catalog.quote_ident(pro.proname) + ELSE pg_catalog.quote_ident(nsp.nspname) || '.' || pg_catalog.quote_ident(pro.proname) END || '(' || pg_catalog.pg_get_function_arguments(pro.oid) || ')' AS objname, l.provider, l.label FROM @@ -640,9 +640,9 @@ SELECT CASE WHEN typ.typtype = 'd' THEN 'domain'::text ELSE 'type'::text END AS objtype, typ.typnamespace AS objnamespace, - CASE WHEN pg_type_is_visible(typ.oid) - THEN quote_ident(typ.typname) - ELSE quote_ident(nsp.nspname) || '.' || quote_ident(typ.typname) + CASE WHEN pg_catalog.pg_type_is_visible(typ.oid) + THEN pg_catalog.quote_ident(typ.typname) + ELSE pg_catalog.quote_ident(nsp.nspname) || '.' || pg_catalog.quote_ident(typ.typname) END AS objname, l.provider, l.label FROM @@ -668,7 +668,7 @@ SELECT l.objoid, l.classoid, l.objsubid, 'language'::text AS objtype, NULL::oid AS objnamespace, - quote_ident(lan.lanname) AS objname, + pg_catalog.quote_ident(lan.lanname) AS objname, l.provider, l.label FROM pg_seclabel l @@ -680,7 +680,7 @@ SELECT l.objoid, l.classoid, l.objsubid, 'schema'::text AS objtype, nsp.oid AS objnamespace, - quote_ident(nsp.nspname) AS objname, + pg_catalog.quote_ident(nsp.nspname) AS objname, l.provider, l.label FROM pg_seclabel l @@ -692,7 +692,7 @@ SELECT l.objoid, l.classoid, 0::int4 AS objsubid, 'database'::text AS objtype, NULL::oid AS objnamespace, - quote_ident(dat.datname) AS objname, + pg_catalog.quote_ident(dat.datname) AS objname, l.provider, l.label FROM pg_shseclabel l @@ -702,7 +702,7 @@ SELECT l.objoid, l.classoid, 0::int4 AS objsubid, 'tablespace'::text AS objtype, NULL::oid AS objnamespace, - quote_ident(spc.spcname) AS objname, + pg_catalog.quote_ident(spc.spcname) AS objname, l.provider, l.label FROM pg_shseclabel l @@ -712,7 +712,7 @@ SELECT l.objoid, l.classoid, 0::int4 AS objsubid, 'role'::text AS objtype, NULL::oid AS objnamespace, - quote_ident(rol.rolname) AS objname, + pg_catalog.quote_ident(rol.rolname) AS objname, l.provider, l.label FROM pg_shseclabel l @@ -724,7 +724,7 @@ CREATE VIEW pg_settings AS CREATE RULE pg_settings_u AS ON UPDATE TO pg_settings WHERE new.name = old.name DO - SELECT set_config(old.name, new.setting, 'f'); + SELECT pg_catalog.set_config(old.name, new.setting, 'f'); CREATE RULE pg_settings_n AS ON UPDATE TO pg_settings @@ -748,26 +748,26 @@ CREATE VIEW pg_stat_all_tables AS C.oid AS relid, N.nspname AS schemaname, C.relname AS relname, - pg_stat_get_numscans(C.oid) AS seq_scan, - pg_stat_get_tuples_returned(C.oid) AS seq_tup_read, - sum(pg_stat_get_numscans(I.indexrelid))::bigint AS idx_scan, - sum(pg_stat_get_tuples_fetched(I.indexrelid))::bigint + - pg_stat_get_tuples_fetched(C.oid) AS idx_tup_fetch, - pg_stat_get_tuples_inserted(C.oid) AS n_tup_ins, - pg_stat_get_tuples_updated(C.oid) AS n_tup_upd, - pg_stat_get_tuples_deleted(C.oid) AS n_tup_del, - pg_stat_get_tuples_hot_updated(C.oid) AS n_tup_hot_upd, - pg_stat_get_live_tuples(C.oid) AS n_live_tup, - pg_stat_get_dead_tuples(C.oid) AS n_dead_tup, - pg_stat_get_last_vacuum_time(C.oid) as last_vacuum, - pg_stat_get_last_autovacuum_time(C.oid) as last_autovacuum, - pg_stat_get_last_analyze_time(C.oid) as last_analyze, - pg_stat_get_last_autoanalyze_time(C.oid) as last_autoanalyze, - pg_stat_get_vacuum_count(C.oid) AS vacuum_count, - pg_stat_get_autovacuum_count(C.oid) AS autovacuum_count, - pg_stat_get_analyze_count(C.oid) AS analyze_count, - pg_stat_get_autoanalyze_count(C.oid) AS autoanalyze_count, - pg_stat_get_last_data_changed_time(C.oid) AS last_data_changed + pg_catalog.pg_stat_get_numscans(C.oid) AS seq_scan, + pg_catalog.pg_stat_get_tuples_returned(C.oid) AS seq_tup_read, + pg_catalog.sum(pg_catalog.pg_stat_get_numscans(I.indexrelid))::bigint AS idx_scan, + pg_catalog.sum(pg_catalog.pg_stat_get_tuples_fetched(I.indexrelid))::bigint + + pg_catalog.pg_stat_get_tuples_fetched(C.oid) AS idx_tup_fetch, + pg_catalog.pg_stat_get_tuples_inserted(C.oid) AS n_tup_ins, + pg_catalog.pg_stat_get_tuples_updated(C.oid) AS n_tup_upd, + pg_catalog.pg_stat_get_tuples_deleted(C.oid) AS n_tup_del, + pg_catalog.pg_stat_get_tuples_hot_updated(C.oid) AS n_tup_hot_upd, + pg_catalog.pg_stat_get_live_tuples(C.oid) AS n_live_tup, + pg_catalog.pg_stat_get_dead_tuples(C.oid) AS n_dead_tup, + pg_catalog.pg_stat_get_last_vacuum_time(C.oid) as last_vacuum, + pg_catalog.pg_stat_get_last_autovacuum_time(C.oid) as last_autovacuum, + pg_catalog.pg_stat_get_last_analyze_time(C.oid) as last_analyze, + pg_catalog.pg_stat_get_last_autoanalyze_time(C.oid) as last_autoanalyze, + pg_catalog.pg_stat_get_vacuum_count(C.oid) AS vacuum_count, + pg_catalog.pg_stat_get_autovacuum_count(C.oid) AS autovacuum_count, + pg_catalog.pg_stat_get_analyze_count(C.oid) AS analyze_count, + pg_catalog.pg_stat_get_autoanalyze_count(C.oid) AS autoanalyze_count, + pg_catalog.pg_stat_get_last_data_changed_time(C.oid) AS last_data_changed FROM pg_class C LEFT JOIN pg_index I ON C.oid = I.indrelid LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) @@ -779,15 +779,15 @@ CREATE VIEW pg_stat_xact_all_tables AS C.oid AS relid, N.nspname AS schemaname, C.relname AS relname, - pg_stat_get_xact_numscans(C.oid) AS seq_scan, - pg_stat_get_xact_tuples_returned(C.oid) AS seq_tup_read, - sum(pg_stat_get_xact_numscans(I.indexrelid))::bigint AS idx_scan, - sum(pg_stat_get_xact_tuples_fetched(I.indexrelid))::bigint + - pg_stat_get_xact_tuples_fetched(C.oid) AS idx_tup_fetch, - pg_stat_get_xact_tuples_inserted(C.oid) AS n_tup_ins, - pg_stat_get_xact_tuples_updated(C.oid) AS n_tup_upd, - pg_stat_get_xact_tuples_deleted(C.oid) AS n_tup_del, - pg_stat_get_xact_tuples_hot_updated(C.oid) AS n_tup_hot_upd + pg_catalog.pg_stat_get_xact_numscans(C.oid) AS seq_scan, + pg_catalog.pg_stat_get_xact_tuples_returned(C.oid) AS seq_tup_read, + pg_catalog.sum(pg_catalog.pg_stat_get_xact_numscans(I.indexrelid))::bigint AS idx_scan, + pg_catalog.sum(pg_catalog.pg_stat_get_xact_tuples_fetched(I.indexrelid))::bigint + + pg_catalog.pg_stat_get_xact_tuples_fetched(C.oid) AS idx_tup_fetch, + pg_catalog.pg_stat_get_xact_tuples_inserted(C.oid) AS n_tup_ins, + pg_catalog.pg_stat_get_xact_tuples_updated(C.oid) AS n_tup_upd, + pg_catalog.pg_stat_get_xact_tuples_deleted(C.oid) AS n_tup_del, + pg_catalog.pg_stat_get_xact_tuples_hot_updated(C.oid) AS n_tup_hot_upd FROM pg_class C LEFT JOIN pg_index I ON C.oid = I.indrelid LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) @@ -819,18 +819,18 @@ CREATE VIEW pg_statio_all_tables AS C.oid AS relid, N.nspname AS schemaname, C.relname AS relname, - pg_stat_get_blocks_fetched(C.oid) - - pg_stat_get_blocks_hit(C.oid) AS heap_blks_read, - pg_stat_get_blocks_hit(C.oid) AS heap_blks_hit, - sum(pg_stat_get_blocks_fetched(I.indexrelid) - - pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_read, - sum(pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_hit, - pg_stat_get_blocks_fetched(T.oid) - - pg_stat_get_blocks_hit(T.oid) AS toast_blks_read, - pg_stat_get_blocks_hit(T.oid) AS toast_blks_hit, - pg_stat_get_blocks_fetched(X.oid) - - pg_stat_get_blocks_hit(X.oid) AS tidx_blks_read, - pg_stat_get_blocks_hit(X.oid) AS tidx_blks_hit + pg_catalog.pg_stat_get_blocks_fetched(C.oid) - + pg_catalog.pg_stat_get_blocks_hit(C.oid) AS heap_blks_read, + pg_catalog.pg_stat_get_blocks_hit(C.oid) AS heap_blks_hit, + pg_catalog.sum(pg_catalog.pg_stat_get_blocks_fetched(I.indexrelid) - + pg_catalog.pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_read, + pg_catalog.sum(pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_hit, + pg_catalog.pg_stat_get_blocks_fetched(T.oid) - + pg_catalog.pg_stat_get_blocks_hit(T.oid) AS toast_blks_read, + pg_catalog.pg_stat_get_blocks_hit(T.oid) AS toast_blks_hit, + pg_catalog.pg_stat_get_blocks_fetched(X.oid) - + pg_catalog.pg_stat_get_blocks_hit(X.oid) AS tidx_blks_read, + pg_catalog.pg_stat_get_blocks_hit(X.oid) AS tidx_blks_hit FROM pg_class C LEFT JOIN pg_index I ON C.oid = I.indrelid LEFT JOIN pg_class T ON C.reltoastrelid = T.oid LEFT JOIN @@ -856,9 +856,9 @@ CREATE VIEW pg_stat_all_indexes AS N.nspname AS schemaname, C.relname AS relname, I.relname AS indexrelname, - pg_stat_get_numscans(I.oid) AS idx_scan, - pg_stat_get_tuples_returned(I.oid) AS idx_tup_read, - pg_stat_get_tuples_fetched(I.oid) AS idx_tup_fetch + pg_catalog.pg_stat_get_numscans(I.oid) AS idx_scan, + pg_catalog.pg_stat_get_tuples_returned(I.oid) AS idx_tup_read, + pg_catalog.pg_stat_get_tuples_fetched(I.oid) AS idx_tup_fetch FROM pg_class C JOIN pg_index X ON C.oid = X.indrelid JOIN pg_class I ON I.oid = X.indexrelid @@ -883,8 +883,8 @@ CREATE VIEW pg_statio_all_indexes AS C.relname AS relname, I.relname AS indexrelname, pg_stat_get_blocks_fetched(I.oid) - - pg_stat_get_blocks_hit(I.oid) AS idx_blks_read, - pg_stat_get_blocks_hit(I.oid) AS idx_blks_hit + pg_catalog.pg_stat_get_blocks_hit(I.oid) AS idx_blks_read, + pg_catalog.pg_stat_get_blocks_hit(I.oid) AS idx_blks_hit FROM pg_class C JOIN pg_index X ON C.oid = X.indrelid JOIN pg_class I ON I.oid = X.indexrelid @@ -907,8 +907,8 @@ CREATE VIEW pg_statio_all_sequences AS N.nspname AS schemaname, C.relname AS relname, pg_stat_get_blocks_fetched(C.oid) - - pg_stat_get_blocks_hit(C.oid) AS blks_read, - pg_stat_get_blocks_hit(C.oid) AS blks_hit + pg_catalog.pg_stat_get_blocks_hit(C.oid) AS blks_read, + pg_catalog.pg_stat_get_blocks_hit(C.oid) AS blks_hit FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) WHERE C.relkind = 'S' or C.relkind = 'L'; @@ -948,8 +948,10 @@ CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity AS END AS resource_pool, S.query_id, S.query, - S.connection_info - FROM pg_database D, pg_stat_get_activity_with_conninfo(NULL) AS S, pg_authid U + S.connection_info, + S.unique_sql_id, + S.trace_id + FROM pg_database D, pg_catalog.pg_stat_get_activity_with_conninfo(NULL) AS S, pg_authid U WHERE S.datid = D.oid AND S.usesysid = U.oid; @@ -979,7 +981,7 @@ CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity_ng AS S.query_id, S.query, N.node_group - FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_stat_get_activity_ng(NULL) AS N, pg_authid U + FROM pg_database D, pg_catalog.pg_stat_get_activity(NULL) AS S, pg_catalog.pg_stat_get_activity_ng(NULL) AS N, pg_authid U WHERE S.datid = D.oid AND S.usesysid = U.oid AND S.sessionid = N.sessionid; @@ -1017,7 +1019,7 @@ CREATE OR REPLACE VIEW pg_catalog.pg_session_wlmstat AS S.query, S.is_plana, S.node_group - FROM pg_database D, pg_stat_get_session_wlmstat(NULL) AS S, pg_authid AS U, gs_wlm_session_respool(0) AS T + FROM pg_database D, pg_catalog.pg_stat_get_session_wlmstat(NULL) AS S, pg_authid AS U, pg_catalog.gs_wlm_session_respool(0) AS T WHERE S.datid = D.oid AND S.usesysid = U.oid AND T.sessionid = S.sessionid; @@ -1033,7 +1035,7 @@ CREATE VIEW pg_wlm_statistics AS control_group, status, action - FROM pg_stat_get_wlm_statistics(NULL); + FROM pg_catalog.pg_stat_get_wlm_statistics(NULL); CREATE VIEW gs_session_memory_statistics AS SELECT @@ -1047,7 +1049,7 @@ SELECT S.query, S.node_group, T.top_mem_dn -FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +FROM pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T WHERE S.pid = T.threadid; CREATE VIEW pg_session_iostat AS @@ -1065,10 +1067,10 @@ CREATE VIEW pg_session_iostat AS S.query, S.node_group, T.curr_io_limits as curr_io_limits -FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_session_iostat_info(0) AS T +FROM pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T WHERE S.pid = T.threadid; -CREATE VIEW gs_cluster_resource_info AS SELECT * FROM pg_stat_get_wlm_node_resource_info(0); +CREATE VIEW gs_cluster_resource_info AS SELECT * FROM pg_catalog.pg_stat_get_wlm_node_resource_info(0); CREATE VIEW gs_session_cpu_statistics AS SELECT @@ -1082,7 +1084,7 @@ SELECT S.query, S.node_group, T.top_cpu_dn -FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +FROM pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T WHERE S.sessionid = T.threadid; CREATE VIEW gs_wlm_session_statistics AS @@ -1136,7 +1138,7 @@ SELECT S.node_group, T.top_cpu_dn, T.top_mem_dn -FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +FROM pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T WHERE S.pid = T.threadid; CREATE OR REPLACE FUNCTION gs_wlm_get_all_user_resource_info() @@ -1150,7 +1152,7 @@ DECLARE BEGIN query_str := 'SELECT rolname FROM pg_authid'; FOR row_name IN EXECUTE(query_str) LOOP - query_str2 := 'SELECT * FROM gs_wlm_user_resource_info(''' || row_name.rolname || ''')'; + query_str2 := 'SELECT * FROM pg_catalog.gs_wlm_user_resource_info(''' || row_name.rolname || ''')'; FOR row_data IN EXECUTE(query_str2) LOOP return next row_data; END LOOP; @@ -1276,7 +1278,7 @@ DECLARE record_cnt int; BEGIN record_cnt := 0; - query_str := 'SELECT * FROM pg_stat_get_wlm_instance_info_with_cleanup()'; + query_str := 'SELECT * FROM pg_catalog.pg_stat_get_wlm_instance_info_with_cleanup()'; EXECUTE 'INSERT INTO gs_wlm_instance_history ' || query_str; return record_cnt; END; $$ @@ -1374,7 +1376,7 @@ create table gs_wlm_session_query_info_all ); CREATE VIEW gs_wlm_session_info_all AS -SELECT * FROM pg_stat_get_wlm_session_info(0); +SELECT * FROM pg_catalog.pg_stat_get_wlm_session_info(0); CREATE VIEW gs_wlm_session_info AS SELECT @@ -1522,7 +1524,7 @@ FROM gs_wlm_session_info_all S; -CREATE OR REPLACE FUNCTION create_wlm_session_info(IN flag int) +CREATE OR REPLACE FUNCTION pg_catalog.create_wlm_session_info(IN flag int) RETURNS int AS $$ DECLARE @@ -1531,7 +1533,7 @@ DECLARE BEGIN record_cnt := 0; - query_str := 'SELECT * FROM pg_stat_get_wlm_session_info(1)'; + query_str := 'SELECT * FROM pg_catalog.pg_stat_get_wlm_session_info(1)'; IF flag > 0 THEN EXECUTE 'INSERT INTO gs_wlm_session_query_info_all ' || query_str; @@ -1554,7 +1556,7 @@ CREATE VIEW gs_wlm_cgroup_info AS relpath, valid, node_group - FROM pg_stat_get_cgroup_info(NULL); + FROM pg_catalog.pg_stat_get_cgroup_info(NULL); CREATE VIEW gs_wlm_user_info AS SELECT @@ -1568,7 +1570,7 @@ SELECT T.spacelimit, T.childcount, T.childlist -FROM pg_roles AS S, gs_wlm_get_user_info(NULL) AS T, pg_resource_pool AS R +FROM pg_roles AS S, pg_catalog.gs_wlm_get_user_info(NULL) AS T, pg_resource_pool AS R WHERE S.oid = T.userid AND T.rpoid = R.oid; CREATE VIEW gs_wlm_resource_pool AS @@ -1583,11 +1585,11 @@ SELECT T.waiting_count, T.iops_limits as io_limits, T.io_priority -FROM gs_wlm_get_resource_pool_info(0) AS T, pg_resource_pool AS R +FROM pg_catalog.gs_wlm_get_resource_pool_info(0) AS T, pg_resource_pool AS R WHERE T.respool_oid = R.oid; CREATE VIEW gs_wlm_rebuild_user_resource_pool AS - SELECT * FROM gs_wlm_rebuild_user_resource_pool(0); + SELECT * FROM pg_catalog.gs_wlm_rebuild_user_resource_pool(0); CREATE VIEW gs_wlm_workload_records AS SELECT @@ -1606,7 +1608,7 @@ CREATE VIEW gs_wlm_workload_records AS P.queue_type AS enqueue, S.query, P.node_group - FROM pg_stat_get_session_wlmstat(NULL) AS S, pg_authid U, gs_wlm_get_workload_records(0) P + FROM pg_catalog.pg_stat_get_session_wlmstat(NULL) AS S, pg_authid U, pg_catalog.gs_wlm_get_workload_records(0) P WHERE P.query_pid = S.threadpid AND S.usesysid = U.oid; @@ -1699,7 +1701,7 @@ BEGIN T.totalsize AS totalsize, T.freesize AS freesize, T.usedsize AS usedsize - FROM pv_thread_memory_detail() T;'; + FROM pg_catalog.pv_thread_memory_detail() T;'; FOR row_data IN EXECUTE(query_str) LOOP sessid = row_data.sessid; sesstype = row_data.sesstype; @@ -1735,8 +1737,8 @@ CREATE VIEW pg_stat_replication AS W.receiver_replay_location, W.sync_priority, W.sync_state - FROM pg_stat_get_activity(NULL) AS S, pg_authid U, - pg_stat_get_wal_senders() AS W + FROM pg_catalog.pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_catalog.pg_stat_get_wal_senders() AS W WHERE S.usesysid = U.oid AND S.pid = W.pid; @@ -1760,35 +1762,35 @@ CREATE VIEW pg_stat_database AS SELECT D.oid AS datid, D.datname AS datname, - pg_stat_get_db_numbackends(D.oid) AS numbackends, - pg_stat_get_db_xact_commit(D.oid) AS xact_commit, - pg_stat_get_db_xact_rollback(D.oid) AS xact_rollback, - pg_stat_get_db_blocks_fetched(D.oid) - - pg_stat_get_db_blocks_hit(D.oid) AS blks_read, - pg_stat_get_db_blocks_hit(D.oid) AS blks_hit, - pg_stat_get_db_tuples_returned(D.oid) AS tup_returned, - pg_stat_get_db_tuples_fetched(D.oid) AS tup_fetched, - pg_stat_get_db_tuples_inserted(D.oid) AS tup_inserted, - pg_stat_get_db_tuples_updated(D.oid) AS tup_updated, - pg_stat_get_db_tuples_deleted(D.oid) AS tup_deleted, - pg_stat_get_db_conflict_all(D.oid) AS conflicts, - pg_stat_get_db_temp_files(D.oid) AS temp_files, - pg_stat_get_db_temp_bytes(D.oid) AS temp_bytes, - pg_stat_get_db_deadlocks(D.oid) AS deadlocks, - pg_stat_get_db_blk_read_time(D.oid) AS blk_read_time, - pg_stat_get_db_blk_write_time(D.oid) AS blk_write_time, - pg_stat_get_db_stat_reset_time(D.oid) AS stats_reset + pg_catalog.pg_stat_get_db_numbackends(D.oid) AS numbackends, + pg_catalog.pg_stat_get_db_xact_commit(D.oid) AS xact_commit, + pg_catalog.pg_stat_get_db_xact_rollback(D.oid) AS xact_rollback, + pg_catalog.pg_stat_get_db_blocks_fetched(D.oid) - + pg_catalog.pg_stat_get_db_blocks_hit(D.oid) AS blks_read, + pg_catalog.pg_stat_get_db_blocks_hit(D.oid) AS blks_hit, + pg_catalog.pg_stat_get_db_tuples_returned(D.oid) AS tup_returned, + pg_catalog.pg_stat_get_db_tuples_fetched(D.oid) AS tup_fetched, + pg_catalog.pg_stat_get_db_tuples_inserted(D.oid) AS tup_inserted, + pg_catalog.pg_stat_get_db_tuples_updated(D.oid) AS tup_updated, + pg_catalog.pg_stat_get_db_tuples_deleted(D.oid) AS tup_deleted, + pg_catalog.pg_stat_get_db_conflict_all(D.oid) AS conflicts, + pg_catalog.pg_stat_get_db_temp_files(D.oid) AS temp_files, + pg_catalog.pg_stat_get_db_temp_bytes(D.oid) AS temp_bytes, + pg_catalog.pg_stat_get_db_deadlocks(D.oid) AS deadlocks, + pg_catalog.pg_stat_get_db_blk_read_time(D.oid) AS blk_read_time, + pg_catalog.pg_stat_get_db_blk_write_time(D.oid) AS blk_write_time, + pg_catalog.pg_stat_get_db_stat_reset_time(D.oid) AS stats_reset FROM pg_database D; CREATE VIEW pg_stat_database_conflicts AS SELECT D.oid AS datid, D.datname AS datname, - pg_stat_get_db_conflict_tablespace(D.oid) AS confl_tablespace, - pg_stat_get_db_conflict_lock(D.oid) AS confl_lock, - pg_stat_get_db_conflict_snapshot(D.oid) AS confl_snapshot, - pg_stat_get_db_conflict_bufferpin(D.oid) AS confl_bufferpin, - pg_stat_get_db_conflict_startup_deadlock(D.oid) AS confl_deadlock + pg_catalog.pg_stat_get_db_conflict_tablespace(D.oid) AS confl_tablespace, + pg_catalog.pg_stat_get_db_conflict_lock(D.oid) AS confl_lock, + pg_catalog.pg_stat_get_db_conflict_snapshot(D.oid) AS confl_snapshot, + pg_catalog.pg_stat_get_db_conflict_bufferpin(D.oid) AS confl_bufferpin, + pg_catalog.pg_stat_get_db_conflict_startup_deadlock(D.oid) AS confl_deadlock FROM pg_database D; CREATE VIEW pg_stat_user_functions AS @@ -1796,24 +1798,24 @@ CREATE VIEW pg_stat_user_functions AS P.oid AS funcid, N.nspname AS schemaname, P.proname AS funcname, - pg_stat_get_function_calls(P.oid) AS calls, - pg_stat_get_function_total_time(P.oid) AS total_time, - pg_stat_get_function_self_time(P.oid) AS self_time + pg_catalog.pg_stat_get_function_calls(P.oid) AS calls, + pg_catalog.pg_stat_get_function_total_time(P.oid) AS total_time, + pg_catalog.pg_stat_get_function_self_time(P.oid) AS self_time FROM pg_proc P LEFT JOIN pg_namespace N ON (N.oid = P.pronamespace) WHERE P.prolang != 12 -- fast check to eliminate built-in functions - AND pg_stat_get_function_calls(P.oid) IS NOT NULL; + AND pg_catalog.pg_stat_get_function_calls(P.oid) IS NOT NULL; CREATE VIEW pg_stat_xact_user_functions AS SELECT P.oid AS funcid, N.nspname AS schemaname, P.proname AS funcname, - pg_stat_get_xact_function_calls(P.oid) AS calls, - pg_stat_get_xact_function_total_time(P.oid) AS total_time, - pg_stat_get_xact_function_self_time(P.oid) AS self_time + pg_catalog.pg_stat_get_xact_function_calls(P.oid) AS calls, + pg_catalog.pg_stat_get_xact_function_total_time(P.oid) AS total_time, + pg_catalog.pg_stat_get_xact_function_self_time(P.oid) AS self_time FROM pg_proc P LEFT JOIN pg_namespace N ON (N.oid = P.pronamespace) WHERE P.prolang != 12 -- fast check to eliminate built-in functions - AND pg_stat_get_xact_function_calls(P.oid) IS NOT NULL; + AND pg_catalog.pg_stat_get_xact_function_calls(P.oid) IS NOT NULL; CREATE VIEW pg_stat_bgwriter AS SELECT @@ -1840,7 +1842,7 @@ CREATE VIEW pg_user_mappings AS ELSE A.rolname END AS usename, - CASE WHEN pg_has_role(S.srvowner, 'USAGE') OR has_server_privilege(S.oid, 'USAGE') THEN + CASE WHEN pg_catalog.pg_has_role(S.srvowner, 'USAGE') OR pg_catalog.has_server_privilege(S.oid, 'USAGE') THEN U.umoptions ELSE NULL @@ -1852,47 +1854,47 @@ CREATE VIEW pg_user_mappings AS REVOKE ALL on pg_user_mapping FROM public; -- these functions are added for supporting default format transformation -CREATE OR REPLACE FUNCTION to_char(NUMERIC) +CREATE OR REPLACE FUNCTION pg_catalog.to_char(NUMERIC) RETURNS VARCHAR2 AS $$ SELECT CAST(numeric_out($1) AS VARCHAR2) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION to_char(INT2) +CREATE OR REPLACE FUNCTION pg_catalog.to_char(INT2) RETURNS VARCHAR2 -AS $$ SELECT CAST(int2out($1) AS VARCHAR2) $$ +AS $$ SELECT CAST(pg_catalog.int2out($1) AS VARCHAR2) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION to_char(INT4) +CREATE OR REPLACE FUNCTION pg_catalog.to_char(INT4) RETURNS VARCHAR2 -AS $$ SELECT CAST(int4out($1) AS VARCHAR2) $$ +AS $$ SELECT CAST(pg_catalog.int4out($1) AS VARCHAR2) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION to_char(INT8) +CREATE OR REPLACE FUNCTION pg_catalog.to_char(INT8) RETURNS VARCHAR2 -AS $$ SELECT CAST(int8out($1) AS VARCHAR2) $$ +AS $$ SELECT CAST(pg_catalog.int8out($1) AS VARCHAR2) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION to_char(FLOAT4) +CREATE OR REPLACE FUNCTION pg_catalog.to_char(FLOAT4) RETURNS VARCHAR2 -AS $$ SELECT CAST(float4out($1) AS VARCHAR2) $$ +AS $$ SELECT CAST(pg_catalog.float4out($1) AS VARCHAR2) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION to_char(FLOAT8) +CREATE OR REPLACE FUNCTION pg_catalog.to_char(FLOAT8) RETURNS VARCHAR2 -AS $$ SELECT CAST(float8out($1) AS VARCHAR2) $$ +AS $$ SELECT CAST(pg_catalog.float8out($1) AS VARCHAR2) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION to_char(TEXT) +CREATE OR REPLACE FUNCTION pg_catalog.to_char(TEXT) RETURNS TEXT AS $$ SELECT $1 $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION to_number(TEXT) +CREATE OR REPLACE FUNCTION pg_catalog.to_number(TEXT) RETURNS NUMERIC -AS $$ SELECT numeric_in(textout($1), 0::Oid, -1) $$ +AS $$ SELECT pg_catalog.numeric_in(textout($1), 0::Oid, -1) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE CAST (VARCHAR2 AS RAW) WITH FUNCTION hextoraw(text) AS IMPLICIT; +CREATE CAST (VARCHAR2 AS RAW) WITH FUNCTION pg_catalog.hextoraw(text) AS IMPLICIT; -- -- We have a few function definitions in here, too. @@ -1903,7 +1905,7 @@ CREATE CAST (VARCHAR2 AS RAW) WITH FUNCTION hextoraw(text) AS IMPLICIT; -- Tsearch debug function. Defined here because it'd be pretty unwieldy -- to put it into pg_proc.h -CREATE FUNCTION ts_debug(IN config regconfig, IN document text, +CREATE FUNCTION pg_catalog.ts_debug(IN config regconfig, IN document text, OUT alias text, OUT description text, OUT token text, @@ -1943,10 +1945,10 @@ WHERE tt.tokid = parse.tokid $$ LANGUAGE SQL STRICT STABLE NOT FENCED; -COMMENT ON FUNCTION ts_debug(regconfig,text) IS +COMMENT ON FUNCTION pg_catalog.ts_debug(regconfig,text) IS 'debug function for text search configuration'; -CREATE FUNCTION ts_debug(IN document text, +CREATE FUNCTION pg_catalog.ts_debug(IN document text, OUT alias text, OUT description text, OUT token text, @@ -1959,7 +1961,7 @@ $$ $$ LANGUAGE SQL STRICT STABLE NOT FENCED; -COMMENT ON FUNCTION ts_debug(text) IS +COMMENT ON FUNCTION pg_catalog.ts_debug(text) IS 'debug function for current text search configuration'; -- @@ -1971,133 +1973,133 @@ COMMENT ON FUNCTION ts_debug(text) IS -- to get filled in.) -- -CREATE OR REPLACE FUNCTION TO_TEXT(INT2) +CREATE OR REPLACE FUNCTION pg_catalog.TO_TEXT(INT2) RETURNS TEXT -AS $$ select CAST(int2out($1) AS VARCHAR) $$ +AS $$ select CAST(pg_catalog.int2out($1) AS VARCHAR) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION TO_TEXT(INT4) +CREATE OR REPLACE FUNCTION pg_catalog.TO_TEXT(INT4) RETURNS TEXT -AS $$ select CAST(int4out($1) AS VARCHAR) $$ +AS $$ select CAST(pg_catalog.int4out($1) AS VARCHAR) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION TO_TEXT(INT8) +CREATE OR REPLACE FUNCTION pg_catalog.TO_TEXT(INT8) RETURNS TEXT -AS $$ select CAST(int8out($1) AS VARCHAR) $$ +AS $$ select CAST(pg_catalog.int8out($1) AS VARCHAR) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION TO_TEXT(FLOAT4) +CREATE OR REPLACE FUNCTION pg_catalog.TO_TEXT(FLOAT4) RETURNS TEXT -AS $$ select CAST(float4out($1) AS VARCHAR) $$ +AS $$ select CAST(pg_catalog.float4out($1) AS VARCHAR) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION TO_TEXT(FLOAT8) +CREATE OR REPLACE FUNCTION pg_catalog.TO_TEXT(FLOAT8) RETURNS TEXT -AS $$ select CAST(float8out($1) AS VARCHAR) $$ +AS $$ select CAST(pg_catalog.float8out($1) AS VARCHAR) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION TO_TEXT(NUMERIC) +CREATE OR REPLACE FUNCTION pg_catalog.TO_TEXT(NUMERIC) RETURNS TEXT -AS $$ SELECT CAST(numeric_out($1) AS VARCHAR) $$ +AS $$ SELECT CAST(pg_catalog.numeric_out($1) AS VARCHAR) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION TO_TEXT(INTERVAL) +CREATE OR REPLACE FUNCTION pg_catalog.TO_TEXT(INTERVAL) RETURNS TEXT -AS $$ select CAST(interval_out($1) AS TEXT) $$ +AS $$ select CAST(pg_catalog.interval_out($1) AS TEXT) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; --logical decoding CREATE CAST (INTERVAL AS TEXT) WITH FUNCTION -TO_TEXT(INTERVAL) AS IMPLICIT; +pg_catalog.TO_TEXT(INTERVAL) AS IMPLICIT; -create or replace function to_number(text) +create or replace function pg_catalog.to_number(text) returns numeric -AS $$ select numeric_in(textout($1), 0::Oid, -1) $$ +AS $$ select pg_catalog.numeric_in(pg_catalog.textout($1), 0::Oid, -1) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION time_text(time) +CREATE OR REPLACE FUNCTION pg_catalog.time_text(time) RETURNS text -AS $$ SELECT CAST(time_out($1) AS text) $$ +AS $$ SELECT CAST(pg_catalog.time_out($1) AS text) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE CAST (time AS text) WITH FUNCTION time_text(time) AS IMPLICIT; +CREATE CAST (time AS text) WITH FUNCTION pg_catalog.time_text(time) AS IMPLICIT; -CREATE OR REPLACE FUNCTION timetz_text(timetz) +CREATE OR REPLACE FUNCTION pg_catalog.timetz_text(timetz) RETURNS text -AS $$ SELECT CAST(timetz_out($1) AS text) $$ +AS $$ SELECT CAST(pg_catalog.timetz_out($1) AS text) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE CAST (timetz AS text) WITH FUNCTION timetz_text(timetz) AS IMPLICIT; +CREATE CAST (timetz AS text) WITH FUNCTION pg_catalog.timetz_text(timetz) AS IMPLICIT; -CREATE OR REPLACE FUNCTION reltime_text(reltime) +CREATE OR REPLACE FUNCTION pg_catalog.reltime_text(reltime) RETURNS text -AS $$ SELECT CAST(reltimeout($1) AS text) $$ +AS $$ SELECT CAST(pg_catalog.reltimeout($1) AS text) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE CAST (reltime AS text) WITH FUNCTION reltime_text(reltime) AS IMPLICIT; +CREATE CAST (reltime AS text) WITH FUNCTION pg_catalog.reltime_text(reltime) AS IMPLICIT; -CREATE OR REPLACE FUNCTION abstime_text(abstime) +CREATE OR REPLACE FUNCTION pg_catalog.abstime_text(abstime) RETURNS text -AS $$ SELECT CAST(abstimeout($1) AS text) $$ +AS $$ SELECT CAST(pg_catalog.abstimeout($1) AS text) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE CAST (abstime AS text) WITH FUNCTION abstime_text(abstime) AS IMPLICIT; +CREATE CAST (abstime AS text) WITH FUNCTION pg_catalog.abstime_text(abstime) AS IMPLICIT; /*text to num*/ -create or replace function int1(text) +create or replace function pg_catalog.int1(text) returns int1 -as $$ select cast(to_number($1) as int1)$$ +as $$ select cast(pg_catalog.to_number($1) as int1)$$ language sql IMMUTABLE strict NOT FENCED; -create or replace function int2(text) +create or replace function pg_catalog.int2(text) returns int2 -as $$ select cast(to_number($1) as int2)$$ +as $$ select cast(pg_catalog.to_number($1) as int2)$$ language sql IMMUTABLE strict NOT FENCED; -create or replace function int4(text) +create or replace function pg_catalog.int4(text) returns int4 -as $$ select cast(to_number($1) as int4) $$ +as $$ select cast(pg_catalog.to_number($1) as int4) $$ language sql IMMUTABLE strict NOT FENCED; -create or replace function int8(text) +create or replace function pg_catalog.int8(text) returns int8 -as $$ select cast(to_number($1) as int8) $$ +as $$ select cast(pg_catalog.to_number($1) as int8) $$ language sql IMMUTABLE strict NOT FENCED; -create or replace function float4(text) +create or replace function pg_catalog.float4(text) returns float4 -as $$ select cast(to_number($1) as float4) $$ +as $$ select cast(pg_catalog.to_number($1) as float4) $$ language sql IMMUTABLE strict NOT FENCED; -create or replace function float8(text) +create or replace function pg_catalog.float8(text) returns float8 -as $$ select cast(to_number($1) as float8) $$ +as $$ select cast(pg_catalog.to_number($1) as float8) $$ language sql IMMUTABLE strict NOT FENCED; /*character to numeric*/ -CREATE OR REPLACE FUNCTION TO_NUMERIC(CHAR) +CREATE OR REPLACE FUNCTION pg_catalog.TO_NUMERIC(CHAR) RETURNS NUMERIC -AS $$ SELECT TO_NUMBER($1::TEXT)$$ +AS $$ SELECT pg_catalog.TO_NUMBER($1::TEXT)$$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE OR REPLACE FUNCTION TO_NUMERIC(VARCHAR) +CREATE OR REPLACE FUNCTION pg_catalog.TO_NUMERIC(VARCHAR) RETURNS NUMERIC -AS $$ SELECT TO_NUMBER($1::TEXT)$$ +AS $$ SELECT pg_catalog.TO_NUMBER($1::TEXT)$$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; /*character to int*/ -CREATE OR REPLACE FUNCTION TO_INTEGER(VARCHAR) +CREATE OR REPLACE FUNCTION pg_catalog.TO_INTEGER(VARCHAR) RETURNS INTEGER -AS $$ SELECT int4in(varcharout($1)) $$ +AS $$ SELECT pg_catalog.int4in(pg_catalog.varcharout($1)) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE OR REPLACE FUNCTION TO_INTEGER(CHAR) +CREATE OR REPLACE FUNCTION pg_catalog.TO_INTEGER(CHAR) RETURNS INTEGER -AS $$ SELECT int4in(bpcharout($1)) $$ +AS $$ SELECT pg_catalog.int4in(pg_catalog.bpcharout($1)) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (TEXT AS RAW) WITH FUNCTION hextoraw(TEXT); -CREATE CAST (RAW AS TEXT) WITH FUNCTION rawtohex(raw) AS IMPLICIT; +CREATE CAST (TEXT AS RAW) WITH FUNCTION pg_catalog.hextoraw(TEXT); +CREATE CAST (RAW AS TEXT) WITH FUNCTION pg_catalog.rawtohex(raw) AS IMPLICIT; CREATE CAST (BLOB AS RAW) WITHOUT FUNCTION AS IMPLICIT; CREATE CAST (RAW AS BLOB) WITHOUT FUNCTION AS IMPLICIT; @@ -2106,293 +2108,293 @@ CREATE CAST (TEXT AS CLOB) WITHOUT FUNCTION AS IMPLICIT; CREATE CAST (CLOB AS TEXT) WITHOUT FUNCTION AS IMPLICIT; /* text to clob */ -CREATE OR REPLACE FUNCTION to_clob(TEXT) +CREATE OR REPLACE FUNCTION pg_catalog.to_clob(TEXT) RETURNS CLOB AS $$ select $1 $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; /* char to clob */ -CREATE OR REPLACE FUNCTION to_clob(CHAR) +CREATE OR REPLACE FUNCTION pg_catalog.to_clob(CHAR) RETURNS CLOB AS $$ select CAST($1 AS TEXT) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE OR REPLACE FUNCTION to_clob(VARCHAR) +CREATE OR REPLACE FUNCTION pg_catalog.to_clob(VARCHAR) RETURNS CLOB AS $$ select CAST($1 AS TEXT) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE OR REPLACE FUNCTION to_clob(NVARCHAR2) +CREATE OR REPLACE FUNCTION pg_catalog.to_clob(NVARCHAR2) RETURNS CLOB AS $$ select CAST($1 AS TEXT) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; /*character to int8*/ -CREATE OR REPLACE FUNCTION TO_BIGINT(VARCHAR) +CREATE OR REPLACE FUNCTION pg_catalog.TO_BIGINT(VARCHAR) RETURNS BIGINT -AS $$ SELECT int8in(varcharout($1))$$ +AS $$ SELECT pg_catalog.int8in(pg_catalog.varcharout($1))$$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; /*float8 to numeric*/ -CREATE OR REPLACE FUNCTION TO_NUMERIC(double precision) +CREATE OR REPLACE FUNCTION pg_catalog.TO_NUMERIC(double precision) RETURNS NUMERIC -AS $$ SELECT TO_NUMBER($1::TEXT)$$ +AS $$ SELECT pg_catalog.TO_NUMBER($1::TEXT)$$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; /*date to char(n)*/ -CREATE OR REPLACE FUNCTION TO_TEXT(TIMESTAMP WITHOUT TIME ZONE) +CREATE OR REPLACE FUNCTION pg_catalog.TO_TEXT(TIMESTAMP WITHOUT TIME ZONE) RETURNS TEXT -AS $$ select CAST(timestamp_out($1) AS VARCHAR2) $$ +AS $$ select CAST(pg_catalog.timestamp_out($1) AS VARCHAR2) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE OR REPLACE FUNCTION TO_TEXT(TIMESTAMP WITH TIME ZONE) +CREATE OR REPLACE FUNCTION pg_catalog.TO_TEXT(TIMESTAMP WITH TIME ZONE) RETURNS TEXT -AS $$ select CAST(timestamptz_out($1) AS VARCHAR2) $$ +AS $$ select CAST(pg_catalog.timestamptz_out($1) AS VARCHAR2) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE OR REPLACE FUNCTION TRUNC(TIMESTAMP WITH TIME ZONE) +CREATE OR REPLACE FUNCTION pg_catalog.TRUNC(TIMESTAMP WITH TIME ZONE) RETURNS TIMESTAMP WITHOUT TIME ZONE AS $$ SELECT CAST(DATE_TRUNC('day',$1) AS TIMESTAMP WITHOUT TIME ZONE); $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE OR REPLACE FUNCTION SUBSTR(TEXT, INT8, INT8) RETURNS TEXT AS $$ - select SUBSTR($1, $2::INT4, $3::INT4); +CREATE OR REPLACE FUNCTION pg_catalog.SUBSTR(TEXT, INT8, INT8) RETURNS TEXT AS $$ + select pg_catalog.SUBSTR($1, $2::INT4, $3::INT4); $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION SUBSTR(TEXT, INT8) RETURNS TEXT AS $$ - select SUBSTR($1, $2::INT4); +CREATE OR REPLACE FUNCTION pg_catalog.SUBSTR(TEXT, INT8) RETURNS TEXT AS $$ + select pg_catalog.SUBSTR($1, $2::INT4); $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; /* timestamp to varchar2 */ -CREATE OR REPLACE FUNCTION TO_VARCHAR2(TIMESTAMP WITHOUT TIME ZONE) +CREATE OR REPLACE FUNCTION pg_catalog.TO_VARCHAR2(TIMESTAMP WITHOUT TIME ZONE) RETURNS VARCHAR2 -AS $$ select CAST(timestamp_out($1) AS VARCHAR2) $$ +AS $$ select CAST(pg_catalog.timestamp_out($1) AS VARCHAR2) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; /* interval to varchar2 */ -CREATE OR REPLACE FUNCTION TO_VARCHAR2(INTERVAL) +CREATE OR REPLACE FUNCTION pg_catalog.TO_VARCHAR2(INTERVAL) RETURNS VARCHAR2 -AS $$ select CAST(interval_out($1) AS VARCHAR2) $$ +AS $$ select CAST(pg_catalog.interval_out($1) AS VARCHAR2) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (INTERVAL AS VARCHAR2) WITH FUNCTION TO_VARCHAR2(INTERVAL) AS IMPLICIT; +CREATE CAST (INTERVAL AS VARCHAR2) WITH FUNCTION pg_catalog.TO_VARCHAR2(INTERVAL) AS IMPLICIT; /* char,varchar2 to interval */ -CREATE OR REPLACE FUNCTION TO_INTERVAL(BPCHAR) +CREATE OR REPLACE FUNCTION pg_catalog.TO_INTERVAL(BPCHAR) RETURNS INTERVAL -AS $$ select interval_in(bpcharout($1), 0::Oid, -1) $$ +AS $$ select pg_catalog.interval_in(pg_catalog.bpcharout($1), 0::Oid, -1) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE OR REPLACE FUNCTION TO_INTERVAL(VARCHAR2) +CREATE OR REPLACE FUNCTION pg_catalog.TO_INTERVAL(VARCHAR2) RETURNS INTERVAL -AS $$ select interval_in(varcharout($1), 0::Oid, -1) $$ +AS $$ select pg_catalog.interval_in(pg_catalog.varcharout($1), 0::Oid, -1) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (BPCHAR AS INTERVAL) WITH FUNCTION TO_INTERVAL(BPCHAR) AS IMPLICIT; -CREATE CAST (VARCHAR2 AS INTERVAL) WITH FUNCTION TO_INTERVAL(VARCHAR2) AS IMPLICIT; +CREATE CAST (BPCHAR AS INTERVAL) WITH FUNCTION pg_catalog.TO_INTERVAL(BPCHAR) AS IMPLICIT; +CREATE CAST (VARCHAR2 AS INTERVAL) WITH FUNCTION pg_catalog.TO_INTERVAL(VARCHAR2) AS IMPLICIT; /* raw to varchar2 */ -CREATE CAST (RAW AS VARCHAR2) WITH FUNCTION rawtohex(RAW) AS IMPLICIT; +CREATE CAST (RAW AS VARCHAR2) WITH FUNCTION pg_catalog.rawtohex(RAW) AS IMPLICIT; /* varchar2,char to timestamp */ -CREATE OR REPLACE FUNCTION TO_TS(VARCHAR2) +CREATE OR REPLACE FUNCTION pg_catalog.TO_TS(VARCHAR2) RETURNS TIMESTAMP WITHOUT TIME ZONE -AS $$ select timestamp_in(varcharout($1), 0::Oid, -1) $$ +AS $$ select pg_catalog.timestamp_in(pg_catalog.varcharout($1), 0::Oid, -1) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE OR REPLACE FUNCTION TO_TS(BPCHAR) +CREATE OR REPLACE FUNCTION pg_catalog.TO_TS(BPCHAR) RETURNS TIMESTAMP WITHOUT TIME ZONE -AS $$ select timestamp_in(bpcharout($1), 0::Oid, -1) $$ +AS $$ select pg_catalog.timestamp_in(pg_catalog.bpcharout($1), 0::Oid, -1) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE OR REPLACE FUNCTION timestamp_to_smalldatetime(TIMESTAMP WITHOUT TIME ZONE) +CREATE OR REPLACE FUNCTION pg_catalog.timestamp_to_smalldatetime(TIMESTAMP WITHOUT TIME ZONE) RETURNS SMALLDATETIME -AS $$ select smalldatetime_in(timestamp_out($1), 0::Oid, -1) $$ +AS $$ select pg_catalog.smalldatetime_in(pg_catalog.timestamp_out($1), 0::Oid, -1) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (TIMESTAMP WITHOUT TIME ZONE AS SMALLDATETIME) WITH FUNCTION timestamp_to_smalldatetime(TIMESTAMP WITHOUT TIME ZONE) AS IMPLICIT; +CREATE CAST (TIMESTAMP WITHOUT TIME ZONE AS SMALLDATETIME) WITH FUNCTION pg_catalog.timestamp_to_smalldatetime(TIMESTAMP WITHOUT TIME ZONE) AS IMPLICIT; -CREATE OR REPLACE FUNCTION smalldatetime_to_timestamp(smalldatetime) +CREATE OR REPLACE FUNCTION pg_catalog.smalldatetime_to_timestamp(smalldatetime) RETURNS TIMESTAMP WITHOUT TIME ZONE -AS $$ select timestamp_in(smalldatetime_out($1), 0::Oid, -1) $$ +AS $$ select pg_catalog.timestamp_in(pg_catalog.smalldatetime_out($1), 0::Oid, -1) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (smalldatetime AS TIMESTAMP WITHOUT TIME ZONE) WITH FUNCTION smalldatetime_to_timestamp(smalldatetime) AS IMPLICIT; +CREATE CAST (smalldatetime AS TIMESTAMP WITHOUT TIME ZONE) WITH FUNCTION pg_catalog.smalldatetime_to_timestamp(smalldatetime) AS IMPLICIT; /* smalldatetime to text */ -CREATE OR REPLACE FUNCTION TO_TEXT(smalldatetime) +CREATE OR REPLACE FUNCTION pg_catalog.TO_TEXT(smalldatetime) RETURNS TEXT -AS $$ select CAST(smalldatetime_out($1) AS VARCHAR2) $$ +AS $$ select CAST(pg_catalog.smalldatetime_out($1) AS VARCHAR2) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (smalldatetime AS TEXT) WITH FUNCTION TO_TEXT(smalldatetime) AS IMPLICIT; +CREATE CAST (smalldatetime AS TEXT) WITH FUNCTION pg_catalog.TO_TEXT(smalldatetime) AS IMPLICIT; /* smalldatetime to varchar2 */ -CREATE OR REPLACE FUNCTION SMALLDATETIME_TO_VARCHAR2(smalldatetime) +CREATE OR REPLACE FUNCTION pg_catalog.SMALLDATETIME_TO_VARCHAR2(smalldatetime) RETURNS VARCHAR2 -AS $$ select CAST(smalldatetime_out($1) AS VARCHAR2) $$ +AS $$ select CAST(pg_catalog.smalldatetime_out($1) AS VARCHAR2) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (smalldatetime AS VARCHAR2) WITH FUNCTION SMALLDATETIME_TO_VARCHAR2(smalldatetime) AS IMPLICIT; +CREATE CAST (smalldatetime AS VARCHAR2) WITH FUNCTION pg_catalog.SMALLDATETIME_TO_VARCHAR2(smalldatetime) AS IMPLICIT; /* varchar2, bpchar to smalldatetime */ -CREATE OR REPLACE FUNCTION VARCHAR2_TO_SMLLDATETIME(VARCHAR2) +CREATE OR REPLACE FUNCTION pg_catalog.VARCHAR2_TO_SMLLDATETIME(VARCHAR2) RETURNS SMALLDATETIME -AS $$ select smalldatetime_in(varcharout($1), 0::Oid, -1) $$ +AS $$ select pg_catalog.smalldatetime_in(pg_catalog.varcharout($1), 0::Oid, -1) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE OR REPLACE FUNCTION BPCHAR_TO_SMALLDATETIME(BPCHAR) +CREATE OR REPLACE FUNCTION pg_catalog.BPCHAR_TO_SMALLDATETIME(BPCHAR) RETURNS SMALLDATETIME -AS $$ select smalldatetime_in(bpcharout($1), 0::Oid, -1) $$ +AS $$ select pg_catalog.smalldatetime_in(pg_catalog.bpcharout($1), 0::Oid, -1) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (VARCHAR2 AS SMALLDATETIME) WITH FUNCTION VARCHAR2_TO_SMLLDATETIME(VARCHAR2) AS IMPLICIT; +CREATE CAST (VARCHAR2 AS SMALLDATETIME) WITH FUNCTION pg_catalog.VARCHAR2_TO_SMLLDATETIME(VARCHAR2) AS IMPLICIT; -CREATE CAST (BPCHAR AS SMALLDATETIME) WITH FUNCTION BPCHAR_TO_SMALLDATETIME(BPCHAR) AS IMPLICIT; +CREATE CAST (BPCHAR AS SMALLDATETIME) WITH FUNCTION pg_catalog.BPCHAR_TO_SMALLDATETIME(BPCHAR) AS IMPLICIT; /*abstime TO smalldatetime*/ -CREATE OR REPLACE FUNCTION abstime_to_smalldatetime(ABSTIME) +CREATE OR REPLACE FUNCTION pg_catalog.abstime_to_smalldatetime(ABSTIME) RETURNS SMALLDATETIME -AS $$ select smalldatetime_in(timestamp_out($1), 0::Oid, -1) $$ +AS $$ select pg_catalog.smalldatetime_in(pg_catalog.timestamp_out($1), 0::Oid, -1) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (ABSTIME AS SMALLDATETIME) WITH FUNCTION abstime_to_smalldatetime(ABSTIME) AS IMPLICIT; +CREATE CAST (ABSTIME AS SMALLDATETIME) WITH FUNCTION pg_catalog.abstime_to_smalldatetime(ABSTIME) AS IMPLICIT; /*smalldatetime_to_abstime*/ -CREATE OR REPLACE FUNCTION smalldatetime_to_abstime(smalldatetime) +CREATE OR REPLACE FUNCTION pg_catalog.smalldatetime_to_abstime(smalldatetime) RETURNS abstime -AS $$ select abstimein(smalldatetime_out($1)) $$ +AS $$ select pg_catalog.abstimein(pg_catalog.smalldatetime_out($1)) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (smalldatetime AS abstime) WITH FUNCTION smalldatetime_to_abstime(smalldatetime) AS IMPLICIT; +CREATE CAST (smalldatetime AS abstime) WITH FUNCTION pg_catalog.smalldatetime_to_abstime(smalldatetime) AS IMPLICIT; /*smalldatetime to time*/ -CREATE OR REPLACE FUNCTION smalldatetime_to_time(smalldatetime) +CREATE OR REPLACE FUNCTION pg_catalog.smalldatetime_to_time(smalldatetime) RETURNS time -AS $$ select time_in(smalldatetime_out($1), 0::Oid, -1) $$ +AS $$ select pg_catalog.time_in(pg_catalog.smalldatetime_out($1), 0::Oid, -1) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (smalldatetime AS time) WITH FUNCTION smalldatetime_to_time(smalldatetime) AS IMPLICIT; +CREATE CAST (smalldatetime AS time) WITH FUNCTION pg_catalog.smalldatetime_to_time(smalldatetime) AS IMPLICIT; /*smalldatetime_to_timestamptz*/ -CREATE OR REPLACE FUNCTION smalldatetime_to_timestamptz(smalldatetime) +CREATE OR REPLACE FUNCTION pg_catalog.smalldatetime_to_timestamptz(smalldatetime) RETURNS TIMESTAMP WITH TIME ZONE -AS $$ select timestamptz_in(smalldatetime_out($1), 0::Oid, -1) $$ +AS $$ select pg_catalog.timestamptz_in(pg_catalog.smalldatetime_out($1), 0::Oid, -1) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (smalldatetime AS TIMESTAMP WITH TIME ZONE) WITH FUNCTION smalldatetime_to_timestamptz(smalldatetime) AS IMPLICIT; +CREATE CAST (smalldatetime AS TIMESTAMP WITH TIME ZONE) WITH FUNCTION pg_catalog.smalldatetime_to_timestamptz(smalldatetime) AS IMPLICIT; /*timestamptz_to_smalldatetime*/ -CREATE OR REPLACE FUNCTION timestamptz_to_smalldatetime(TIMESTAMP WITH TIME ZONE) +CREATE OR REPLACE FUNCTION pg_catalog.timestamptz_to_smalldatetime(TIMESTAMP WITH TIME ZONE) RETURNS smalldatetime -AS $$ select smalldatetime_in(TIMESTAMPTZ_OUT($1), 0::Oid, -1) $$ +AS $$ select pg_catalog.smalldatetime_in(pg_catalog.TIMESTAMPTZ_OUT($1), 0::Oid, -1) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (TIMESTAMP WITH TIME ZONE AS smalldatetime) WITH FUNCTION timestamptz_to_smalldatetime(TIMESTAMP WITH TIME ZONE) AS IMPLICIT; +CREATE CAST (TIMESTAMP WITH TIME ZONE AS smalldatetime) WITH FUNCTION pg_catalog.timestamptz_to_smalldatetime(TIMESTAMP WITH TIME ZONE) AS IMPLICIT; create type exception as (code integer, message varchar2); -create or replace function regexp_substr(text,text) +create or replace function pg_catalog.regexp_substr(text,text) returns text AS '$libdir/plpgsql','regexp_substr' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION report_application_error( +CREATE OR REPLACE FUNCTION pg_catalog.report_application_error( IN log text, IN code integer default null )RETURNS void AS '$libdir/plpgsql','report_application_error' LANGUAGE C VOLATILE NOT FENCED; -create or replace function bitand(bigint,bigint) +create or replace function pg_catalog.bitand(bigint,bigint) returns bigint as $$ select $1 & $2 $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -create or replace function regexp_like(text,text) +create or replace function pg_catalog.regexp_like(text,text) returns boolean as $$ select $1 ~ $2 $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -create or replace function regexp_like(text,text,text) +create or replace function pg_catalog.regexp_like(text,text,text) returns boolean as $$ select case $3 when 'i' then $1 ~* $2 else $1 ~ $2 end;$$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION INTERVALTONUM(INTERVAL) +CREATE OR REPLACE FUNCTION pg_catalog.INTERVALTONUM(INTERVAL) RETURNS NUMERIC AS '$libdir/plpgsql','intervaltonum' LANGUAGE C STRICT IMMUTABLE NOT FENCED; -CREATE CAST (INTERVAL AS NUMERIC) WITH FUNCTION INTERVALTONUM(INTERVAL) AS IMPLICIT; +CREATE CAST (INTERVAL AS NUMERIC) WITH FUNCTION pg_catalog.INTERVALTONUM(INTERVAL) AS IMPLICIT; /* add for nvarcahr2 data type */ -CREATE OR REPLACE FUNCTION TO_NUMERIC(NVARCHAR2) +CREATE OR REPLACE FUNCTION pg_catalog.TO_NUMERIC(NVARCHAR2) RETURNS NUMERIC -AS $$ SELECT TO_NUMBER($1::TEXT)$$ +AS $$ SELECT pg_catalog.TO_NUMBER($1::TEXT)$$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (NVARCHAR2 AS NUMERIC) WITH FUNCTION TO_NUMERIC(NVARCHAR2) AS IMPLICIT; +CREATE CAST (NVARCHAR2 AS NUMERIC) WITH FUNCTION pg_catalog.TO_NUMERIC(NVARCHAR2) AS IMPLICIT; -CREATE OR REPLACE FUNCTION TO_INTEGER(NVARCHAR2) +CREATE OR REPLACE FUNCTION pg_catalog.TO_INTEGER(NVARCHAR2) RETURNS INTEGER -AS $$ SELECT int4in(nvarchar2out($1))$$ +AS $$ SELECT pg_catalog.int4in(pg_catalog.nvarchar2out($1))$$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (NVARCHAR2 AS INTEGER) WITH FUNCTION TO_INTEGER(NVARCHAR2) AS IMPLICIT; +CREATE CAST (NVARCHAR2 AS INTEGER) WITH FUNCTION pg_catalog.TO_INTEGER(NVARCHAR2) AS IMPLICIT; -CREATE OR REPLACE FUNCTION TO_NVARCHAR2(TIMESTAMP WITHOUT TIME ZONE) +CREATE OR REPLACE FUNCTION pg_catalog.TO_NVARCHAR2(TIMESTAMP WITHOUT TIME ZONE) RETURNS NVARCHAR2 -AS $$ select CAST(timestamp_out($1) AS NVARCHAR2) $$ +AS $$ select CAST(pg_catalog.timestamp_out($1) AS NVARCHAR2) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (TIMESTAMP WITHOUT TIME ZONE AS NVARCHAR2) WITH FUNCTION TO_NVARCHAR2(TIMESTAMP WITHOUT TIME ZONE) AS IMPLICIT; +CREATE CAST (TIMESTAMP WITHOUT TIME ZONE AS NVARCHAR2) WITH FUNCTION pg_catalog.TO_NVARCHAR2(TIMESTAMP WITHOUT TIME ZONE) AS IMPLICIT; -CREATE OR REPLACE FUNCTION TO_NVARCHAR2(INTERVAL) +CREATE OR REPLACE FUNCTION pg_catalog.TO_NVARCHAR2(INTERVAL) RETURNS NVARCHAR2 -AS $$ select CAST(interval_out($1) AS NVARCHAR2) $$ +AS $$ select CAST(pg_catalog.interval_out($1) AS NVARCHAR2) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (INTERVAL AS NVARCHAR2) WITH FUNCTION TO_NVARCHAR2(INTERVAL) AS IMPLICIT; +CREATE CAST (INTERVAL AS NVARCHAR2) WITH FUNCTION pg_catalog.TO_NVARCHAR2(INTERVAL) AS IMPLICIT; -CREATE OR REPLACE FUNCTION TO_NVARCHAR2(NUMERIC) +CREATE OR REPLACE FUNCTION pg_catalog.TO_NVARCHAR2(NUMERIC) RETURNS NVARCHAR2 -AS $$ SELECT CAST(numeric_out($1) AS NVARCHAR2) $$ +AS $$ SELECT CAST(pg_catalog.numeric_out($1) AS NVARCHAR2) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION TO_NVARCHAR2(INT2) +CREATE OR REPLACE FUNCTION pg_catalog.TO_NVARCHAR2(INT2) RETURNS NVARCHAR2 -AS $$ select CAST(int2out($1) AS NVARCHAR2) $$ +AS $$ select CAST(pg_catalog.int2out($1) AS NVARCHAR2) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION TO_NVARCHAR2(INT4) +CREATE OR REPLACE FUNCTION pg_catalog.TO_NVARCHAR2(INT4) RETURNS NVARCHAR2 -AS $$ select CAST(int4out($1) AS NVARCHAR2) $$ +AS $$ select CAST(pg_catalog.int4out($1) AS NVARCHAR2) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION TO_NVARCHAR2(INT8) +CREATE OR REPLACE FUNCTION pg_catalog.TO_NVARCHAR2(INT8) RETURNS NVARCHAR2 -AS $$ select CAST(int8out($1) AS NVARCHAR2) $$ +AS $$ select CAST(pg_catalog.int8out($1) AS NVARCHAR2) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION TO_NVARCHAR2(FLOAT4) +CREATE OR REPLACE FUNCTION pg_catalog.TO_NVARCHAR2(FLOAT4) RETURNS NVARCHAR2 -AS $$ select CAST(float4out($1) AS NVARCHAR2) $$ +AS $$ select CAST(pg_catalog.float4out($1) AS NVARCHAR2) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE OR REPLACE FUNCTION TO_NVARCHAR2(FLOAT8) +CREATE OR REPLACE FUNCTION pg_catalog.TO_NVARCHAR2(FLOAT8) RETURNS NVARCHAR2 -AS $$ select CAST(float8out($1) AS NVARCHAR2) $$ +AS $$ select CAST(pg_catalog.float8out($1) AS NVARCHAR2) $$ LANGUAGE SQL STRICT IMMUTABLE NOT FENCED; -CREATE CAST (INT2 AS NVARCHAR2) WITH FUNCTION TO_NVARCHAR2(INT2) AS IMPLICIT; -CREATE CAST (INT4 AS NVARCHAR2) WITH FUNCTION TO_NVARCHAR2(INT4) AS IMPLICIT; -CREATE CAST (INT8 AS NVARCHAR2) WITH FUNCTION TO_NVARCHAR2(INT8) AS IMPLICIT; -CREATE CAST (NUMERIC AS NVARCHAR2) WITH FUNCTION TO_NVARCHAR2(NUMERIC) AS IMPLICIT; -CREATE CAST (FLOAT4 AS NVARCHAR2) WITH FUNCTION TO_NVARCHAR2(FLOAT4) AS IMPLICIT; -CREATE CAST (FLOAT8 AS NVARCHAR2) WITH FUNCTION TO_NVARCHAR2(FLOAT8) AS IMPLICIT; +CREATE CAST (INT2 AS NVARCHAR2) WITH FUNCTION pg_catalog.TO_NVARCHAR2(INT2) AS IMPLICIT; +CREATE CAST (INT4 AS NVARCHAR2) WITH FUNCTION pg_catalog.TO_NVARCHAR2(INT4) AS IMPLICIT; +CREATE CAST (INT8 AS NVARCHAR2) WITH FUNCTION pg_catalog.TO_NVARCHAR2(INT8) AS IMPLICIT; +CREATE CAST (NUMERIC AS NVARCHAR2) WITH FUNCTION pg_catalog.TO_NVARCHAR2(NUMERIC) AS IMPLICIT; +CREATE CAST (FLOAT4 AS NVARCHAR2) WITH FUNCTION pg_catalog.TO_NVARCHAR2(FLOAT4) AS IMPLICIT; +CREATE CAST (FLOAT8 AS NVARCHAR2) WITH FUNCTION pg_catalog.TO_NVARCHAR2(FLOAT8) AS IMPLICIT; -CREATE OR REPLACE FUNCTION TO_TS(NVARCHAR2) +CREATE OR REPLACE FUNCTION pg_catalog.TO_TS(NVARCHAR2) RETURNS TIMESTAMP WITHOUT TIME ZONE -AS $$ select timestamp_in(nvarchar2out($1), 0::Oid, -1) $$ +AS $$ select pg_catalog.timestamp_in(pg_catalog.nvarchar2out($1), 0::Oid, -1) $$ LANGUAGE SQL IMMUTABLE STRICT NOT FENCED; -CREATE CAST (NVARCHAR2 AS TIMESTAMP WITHOUT TIME ZONE) WITH FUNCTION TO_TS(NVARCHAR2) AS IMPLICIT; +CREATE CAST (NVARCHAR2 AS TIMESTAMP WITHOUT TIME ZONE) WITH FUNCTION pg_catalog.TO_TS(NVARCHAR2) AS IMPLICIT; -create or replace function regex_like_m(text,text) returns boolean +create or replace function pg_catalog.regex_like_m(text,text) returns boolean as $$ declare source_line integer := 1; @@ -2406,46 +2408,46 @@ declare source_array array_text := array_text(); regex_array array_text := array_text(); begin - if left($2,1) <> '^' and right($2,1) <> '$' then + if pg_catalog.left($2,1) <> '^' and pg_catalog.right($2,1) <> '$' then return $1 ~ $2; end if; --source string to source_array - for i in 1..length($1) loop + for i in 1..pg_catalog.length($1) loop if substr($1,i,1) ~ '\n' then if position = i then source_array(source_line) := '\n'; else - source_array(source_line) := substr($1,position,i - position); + source_array(source_line) := pg_catalog.substr($1,position,i - position); end if; position := i + 1; source_line := source_line + 1; end if; end loop; - if position <= length($1) or position = 1 then - source_array(source_line) := substr($1,position); + if position <= pg_catalog.length($1) or position = 1 then + source_array(source_line) := pg_catalog.substr($1,position); else - if position > length($1) then + if position > pg_catalog.length($1) then source_line := source_line - 1; end if; end if; --regexp string to regex_array position := 1; - for i in 1..length($2) loop - if substr($2,i,1) ~ '\n' then + for i in 1..pg_catalog.length($2) loop + if pg_catalog.substr($2,i,1) ~ '\n' then if position = i then regex_array(regex_line) := '\n'; else - regex_array(regex_line) := substr($2,position,i - position); + regex_array(regex_line) := pg_catalog.substr($2,position,i - position); end if; position := i + 1; regex_line := regex_line + 1; end if; end loop; - if position <= length($2) or position = 1 then - regex_array(regex_line) := substr($2,position); + if position <= pg_catalog.length($2) or position = 1 then + regex_array(regex_line) := pg_catalog.substr($2,position); else - if position > length($2) then + if position > pg_catalog.length($2) then regex_line := regex_line - 1; end if; end if; @@ -2468,12 +2470,12 @@ begin end if; end loop; if left($2,1) = '^' then - regex_temp := substr($2,2); + regex_temp := pg_catalog.substr($2,2); else regex_temp := $2; end if; if right($2,1) = '$' then - regex_temp := substr(regex_temp,1,length(regex_temp)-1); + regex_temp := pg_catalog.substr(regex_temp,1,pg_catalog.length(regex_temp)-1); end if; if flag then flag := $1 ~ regex_temp; @@ -2482,14 +2484,14 @@ begin end; $$ LANGUAGE plpgsql shippable NOT FENCED; -create or replace function regexp_like(text,text,text) +create or replace function pg_catalog.regexp_like(text,text,text) returns boolean as $$ declare regex_char varchar(1); begin - for i in 1..length($3) loop - regex_char := substr($3,i,1); + for i in 1..pg_catalog.length($3) loop + regex_char := pg_catalog.substr($3,i,1); if regex_char <> 'i' and regex_char <> 'm' and regex_char <> 'c' then raise info 'illegal argument for function'; return false; @@ -2498,20 +2500,20 @@ begin case right($3, 1) when 'i' then return $1 ~* $2; when 'c' then return $1 ~ $2; - when 'm' then return regex_like_m($1,$2); + when 'm' then return pg_catalog.regex_like_m($1,$2); end case; end; $$ LANGUAGE plpgsql shippable NOT FENCED; -create or replace function rawtohex(text) +create or replace function pg_catalog.rawtohex(text) returns text AS '$libdir/plpgsql','rawtohex' LANGUAGE C STRICT IMMUTABLE NOT FENCED; /* * login_audit_messages */ -CREATE OR REPLACE FUNCTION login_audit_messages(in flag boolean) returns table (username text, database text, logintime timestamp with time zone, mytype text, result text, client_conninfo text) AUTHID DEFINER +CREATE OR REPLACE FUNCTION pg_catalog.login_audit_messages(in flag boolean) returns table (username text, database text, logintime timestamp with time zone, mytype text, result text, client_conninfo text) AUTHID DEFINER AS $$ DECLARE user_id text; @@ -2521,13 +2523,13 @@ SQL_STMT VARCHAR2(500); fail_cursor REFCURSOR; success_cursor REFCURSOR; BEGIN - SELECT text(oid) FROM pg_authid WHERE rolname=SESSION_USER INTO user_id; + SELECT text(oid) FROM pg_catalog.pg_authid WHERE rolname=SESSION_USER INTO user_id; SELECT SESSION_USER INTO user_name; - SELECT CURRENT_DATABASE() INTO db_name; + SELECT pg_catalog.CURRENT_DATABASE() INTO db_name; IF flag = true THEN - SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo FROM pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo FROM pg_catalog.pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE type IN (''login_success'') AND username =' || quote_literal(user_name) || - ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; OPEN success_cursor FOR EXECUTE SQL_STMT; --search bottom up for all the success login info FETCH LAST FROM success_cursor into username, database, logintime, mytype, result, client_conninfo; @@ -2537,9 +2539,9 @@ BEGIN END IF; CLOSE success_cursor; ELSE - SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo FROM pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo FROM pg_catalog.pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE type IN (''login_success'', ''login_failed'') AND username =' || quote_literal(user_name) || - ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; OPEN fail_cursor FOR EXECUTE SQL_STMT; --search bottom up FETCH LAST FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo; @@ -2565,7 +2567,7 @@ LANGUAGE plpgsql NOT FENCED; * This is a special API for DataStudio, not the common behavrior. * Highly suggest to use the login_audit_messages instead of this. */ -CREATE OR REPLACE FUNCTION login_audit_messages_pid(flag boolean) +CREATE OR REPLACE FUNCTION pg_catalog.login_audit_messages_pid(flag boolean) RETURNS TABLE(username text, database text, logintime timestamp with time zone, mytype text, result text, client_conninfo text, backendid bigint) AUTHID DEFINER AS $$ DECLARE @@ -2578,15 +2580,15 @@ success_cursor REFCURSOR; mybackendid bigint; curSessionFound boolean; BEGIN - SELECT text(oid) FROM pg_authid WHERE rolname=SESSION_USER INTO user_id; + SELECT text(oid) FROM pg_catalog.pg_authid WHERE rolname=SESSION_USER INTO user_id; SELECT SESSION_USER INTO user_name; - SELECT CURRENT_DATABASE() INTO db_name; - SELECT pg_backend_pid() INTO mybackendid; + SELECT pg_catalog.CURRENT_DATABASE() INTO db_name; + SELECT pg_catalog.pg_backend_pid() INTO mybackendid; curSessionFound = false; IF flag = true THEN - SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo, split_part(thread_id,''@'',1) backendid FROM pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo, pg_catalog.split_part(thread_id,''@'',1) backendid FROM pg_catalog.pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE type IN (''login_success'') AND username =' || quote_literal(user_name) || - ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; OPEN success_cursor FOR EXECUTE SQL_STMT; --search bottom up for all the success login info FETCH LAST FROM success_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; @@ -2606,9 +2608,9 @@ BEGIN END IF; END IF; ELSE - SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo, split_part(thread_id,''@'',1) backendid FROM pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE - type IN (''login_success'', ''login_failed'') AND username =' || quote_literal(user_name) || - ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo, pg_catalog.split_part(thread_id,''@'',1) backendid FROM pg_catalog.pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'', ''login_failed'') AND username =' || pg_catalog.quote_literal(user_name) || + ' AND database =' || pg_catalog.quote_literal(db_name) || ' AND userid =' || pg_catalog.quote_literal(user_id) || ';'; OPEN fail_cursor FOR EXECUTE SQL_STMT; --search bottom up FETCH LAST FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; @@ -2644,7 +2646,7 @@ LANGUAGE plpgsql NOT FENCED; * local way to fetch all thread wait status in local node. */ CREATE VIEW pg_thread_wait_status AS - SELECT * FROM pg_stat_get_status(NULL); + SELECT * FROM pg_catalog.pg_stat_get_status(NULL); /* * pgxc_thread_wait_status @@ -2652,7 +2654,7 @@ CREATE VIEW pg_thread_wait_status AS * parallel way to fetch global thread wait status. */ CREATE VIEW pgxc_thread_wait_status AS - SELECT * FROM pgxc_get_thread_wait_status(); + SELECT * FROM pg_catalog.pgxc_get_thread_wait_status(); /* *gs_sql_count @@ -2685,7 +2687,7 @@ CREATE VIEW gs_sql_count AS avg_delete_elapse, max_delete_elapse, min_delete_elapse - FROM pg_stat_get_sql_count(); + FROM pg_catalog.pg_stat_get_sql_count(); CREATE VIEW pg_os_threads AS SELECT @@ -2694,7 +2696,7 @@ CREATE VIEW pg_os_threads AS S.lwpid, S.thread_name, S.creation_time - FROM pg_stat_get_thread() AS S; + FROM pg_catalog.pg_stat_get_thread() AS S; CREATE VIEW pg_node_env AS SELECT @@ -2705,13 +2707,13 @@ CREATE VIEW pg_node_env AS S.installpath, S.datapath, S.log_directory - FROM pg_stat_get_env() AS S; + FROM pg_catalog.pg_stat_get_env() AS S; /* * PGXC system view to look for libcomm stat */ CREATE VIEW pg_comm_status AS - SELECT * FROM pg_comm_status(); + SELECT * FROM pg_catalog.pg_comm_status(); /* * PGXC system view to look for libcomm recv stream status */ @@ -2734,7 +2736,7 @@ CREATE VIEW pg_comm_recv_stream AS S.speed, S.quota, S.buff_usize - FROM pg_comm_recv_stream() AS S; + FROM pg_catalog.pg_comm_recv_stream() AS S; /* * PGXC system view to look for libcomm send stream status @@ -2758,22 +2760,22 @@ CREATE VIEW pg_comm_send_stream AS S.speed, S.quota, S.wait_quota - FROM pg_comm_send_stream() AS S; + FROM pg_catalog.pg_comm_send_stream() AS S; /* * PGXC sytem view to show running transctions on node */ CREATE VIEW pg_running_xacts AS SELECT * -FROM pg_get_running_xacts(); +FROM pg_catalog.pg_get_running_xacts(); /* * PGXC sytem view to show variable cache on node */ CREATE VIEW pg_variable_info AS -SELECT * FROM pg_get_variable_info(); +SELECT * FROM pg_catalog.pg_get_variable_info(); --Test distribute situation -create or replace function table_skewness(table_name text, column_name text, +create or replace function pg_catalog.table_skewness(table_name text, column_name text, OUT seqNum text, OUT Num text, OUT Ratio text, row_num text default '0') RETURNS setof record AS $$ @@ -2797,14 +2799,14 @@ DECLARE if tolal_num = 0 then seqNum = 0; Num = 0; - Ratio = ROUND(0, 3) || '%'; + Ratio = pg_catalog.ROUND(0, 3) || '%'; return; end if; for row_data in EXECUTE execute_query loop seqNum = row_data.seqNum; Num = row_data.num; - Ratio = ROUND(row_data.num / tolal_num * 100, 3) || '%'; + Ratio = pg_catalog.ROUND(row_data.num / tolal_num * 100, 3) || '%'; RETURN next; end loop; END; @@ -2819,7 +2821,7 @@ CREATE VIEW pg_get_invalid_backends AS S.datname AS dbname, S.backend_start, S.query - FROM pg_pool_validate(false, ' ') AS C LEFT JOIN pg_stat_activity AS S + FROM pg_catalog.pg_pool_validate(false, ' ') AS C LEFT JOIN pg_stat_activity AS S ON (C.pid = S.sessionid); /* @@ -2835,7 +2837,7 @@ CREATE VIEW pg_get_senders_catchup_time AS 'Wal' AS type, W.catchup_start, W.catchup_end - FROM pg_stat_get_wal_senders() AS W + FROM pg_catalog.pg_stat_get_wal_senders() AS W UNION ALL SELECT D.pid, @@ -2846,9 +2848,9 @@ CREATE VIEW pg_get_senders_catchup_time AS 'Data' AS type, D.catchup_start, D.catchup_end - FROM pg_stat_get_data_senders() AS D; + FROM pg_catalog.pg_stat_get_data_senders() AS D; -CREATE OR REPLACE FUNCTION pg_stat_session_cu(OUT mem_hit int, OUT hdd_sync_read int, OUT hdd_asyn_read int) +CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_session_cu(OUT mem_hit int, OUT hdd_sync_read int, OUT hdd_asyn_read int) RETURNS setof record AS $$ DECLARE @@ -2856,7 +2858,7 @@ DECLARE query_str text; statname text; BEGIN - query_str := 'select statname, sum(value) as value from gs_session_stat group by statname;'; + query_str := 'select statname, pg_catalog.sum(value) as value from gs_session_stat group by statname;'; FOR stat_result IN EXECUTE(query_str) LOOP statname := stat_result.statname; IF statname = 'n_cu_mem_hit' THEN @@ -2872,27 +2874,27 @@ DECLARE LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW gs_stat_session_cu AS - SELECT DISTINCT * from pg_stat_session_cu(); + SELECT DISTINCT * from pg_catalog.pg_stat_session_cu(); /* * PGXC system view to look for libcomm delay information */ CREATE VIEW pg_comm_delay AS - SELECT DISTINCT * from pg_comm_delay(); + SELECT DISTINCT * from pg_catalog.pg_comm_delay(); CREATE VIEW gs_comm_proxy_thread_status AS - SELECT DISTINCT * from gs_comm_proxy_thread_status(); + SELECT DISTINCT * from pg_catalog.gs_comm_proxy_thread_status(); ALTER TEXT SEARCH CONFIGURATION ngram ADD MAPPING FOR zh_words, en_word, numeric, alnum, grapsymbol, multisymbol WITH simple; CREATE VIEW gs_all_control_group_info AS - SELECT DISTINCT * from gs_all_control_group_info(); + SELECT DISTINCT * from pg_catalog.gs_all_control_group_info(); CREATE VIEW mpp_tables AS SELECT n.nspname AS schemaname, c.relname AS tablename, - pg_get_userbyid(c.relowner) AS tableowner, t.spcname AS tablespace, x.pgroup,x.nodeoids + pg_catalog.pg_get_userbyid(c.relowner) AS tableowner, t.spcname AS tablespace, x.pgroup,x.nodeoids FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace @@ -2929,15 +2931,15 @@ create table gs_wlm_operator_info --real time operator-level view in single CN CREATE VIEW gs_wlm_operator_statistics AS SELECT t.* -FROM pg_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t +FROM pg_stat_activity AS s, pg_catalog.pg_stat_get_wlm_realtime_operator_info(NULL) as t where s.query_id = t.queryid; --history operator-level view for DM in single CN CREATE VIEW gs_wlm_operator_history AS -SELECT * FROM pg_stat_get_wlm_operator_info(0); +SELECT * FROM pg_catalog.pg_stat_get_wlm_operator_info(0); --function used to get history table from hash table -CREATE OR REPLACE FUNCTION create_wlm_operator_info(IN flag int) +CREATE OR REPLACE FUNCTION pg_catalog.create_wlm_operator_info(IN flag int) RETURNS int AS $$ DECLARE @@ -3006,10 +3008,10 @@ create table gs_wlm_plan_operator_info ); CREATE VIEW gs_wlm_plan_operator_history AS -SELECT * FROM gs_stat_get_wlm_plan_operator_info(0); +SELECT * FROM pg_catalog.gs_stat_get_wlm_plan_operator_info(0); --perf hist encoder -CREATE OR REPLACE FUNCTION encode_feature_perf_hist +CREATE OR REPLACE FUNCTION pg_catalog.encode_feature_perf_hist ( IN datname text, OUT queryid bigint, @@ -3078,7 +3080,7 @@ CREATE TABLE gs_wlm_plan_encoding_table encode text ); -CREATE OR REPLACE FUNCTION gather_encoding_info(IN datname text) +CREATE OR REPLACE FUNCTION pg_catalog.gather_encoding_info(IN datname text) RETURNS int AS $$ DECLARE @@ -3086,12 +3088,12 @@ DECLARE EXECUTE 'INSERT INTO gs_wlm_plan_encoding_table (queryid, plan_node_id, parent_node_id, encode, startup_time, total_time, rows, peak_memory) SELECT queryid, plan_node_id, parent_node_id, encode, startup_time, total_time, rows, peak_memory - FROM encode_feature_perf_hist('''|| datname ||''') order by queryid, plan_node_id;'; + FROM pg_catalog.encode_feature_perf_hist('''|| datname ||''') order by queryid, plan_node_id;'; RETURN 0; END;$$ LANGUAGE plpgsql NOT FENCED; -CREATE OR REPLACE FUNCTION pg_catalog.copy_error_log_create() +CREATE OR REPLACE FUNCTION pg_catalog.pg_catalog.copy_error_log_create() RETURNS bool AS $$ DECLARE @@ -3143,9 +3145,9 @@ DECLARE query_str_nodes := 'SELECT group_name,group_kind FROM pgxc_group WHERE group_kind = ''v'' OR group_kind = ''i'' '; FOR row_name IN EXECUTE(query_str_nodes) LOOP IF row_name.group_kind = 'i' THEN - query_str := 'SELECT *,CAST(''' || row_name.group_name || ''' AS TEXT) AS nodegroup,CAST(''' || row_name.group_kind || ''' AS TEXT) AS group_kind FROM gs_all_nodegroup_control_group_info(''installation'')'; + query_str := 'SELECT *,CAST(''' || row_name.group_name || ''' AS TEXT) AS nodegroup,CAST(''' || row_name.group_kind || ''' AS TEXT) AS group_kind FROM pg_catalog.gs_all_nodegroup_control_group_info(''installation'')'; ELSE - query_str := 'SELECT *,CAST(''' || row_name.group_name || ''' AS TEXT) AS nodegroup,CAST(''' || row_name.group_kind || ''' AS TEXT) AS group_kind FROM gs_all_nodegroup_control_group_info(''' ||row_name.group_name||''')'; + query_str := 'SELECT *,CAST(''' || row_name.group_name || ''' AS TEXT) AS nodegroup,CAST(''' || row_name.group_kind || ''' AS TEXT) AS group_kind FROM pg_catalog.gs_all_nodegroup_control_group_info(''' ||row_name.group_name||''')'; END IF; FOR row_data IN EXECUTE(query_str) LOOP return next row_data; @@ -3156,11 +3158,11 @@ DECLARE LANGUAGE 'plpgsql' NOT FENCED; -- the view for function gs_total_nodegroup_memory_detail. -CREATE VIEW pg_catalog.gs_total_nodegroup_memory_detail AS SELECT * FROM gs_total_nodegroup_memory_detail(); +CREATE VIEW pg_catalog.gs_total_nodegroup_memory_detail AS SELECT * FROM pg_catalog.gs_total_nodegroup_memory_detail(); -- the view for function gs_get_control_group_info. CREATE VIEW pg_catalog.gs_get_control_group_info AS - SELECT * from gs_get_control_group_info() AS + SELECT * from pg_catalog.gs_get_control_group_info() AS ( name text, type text, @@ -3189,7 +3191,7 @@ SELECT t.ec_query, t.ec_libodbc_type, t.ec_fetch_count -FROM pg_stat_activity AS s, pg_stat_get_wlm_realtime_ec_operator_info(NULL) as t +FROM pg_stat_activity AS s, pg_catalog.pg_stat_get_wlm_realtime_ec_operator_info(NULL) as t where s.query_id = t.queryid and t.ec_operator > 0; --ec history operator-level view for DM in single CN @@ -3209,7 +3211,7 @@ SELECT ec_username, ec_query, ec_libodbc_type -FROM pg_stat_get_wlm_ec_operator_info(0) where ec_operator > 0; +FROM pg_catalog.pg_stat_get_wlm_ec_operator_info(0) where ec_operator > 0; --table definition for ec history info create table gs_wlm_ec_operator_info @@ -3232,10 +3234,10 @@ create table gs_wlm_ec_operator_info -- create view pg_tde_info CREATE VIEW pg_catalog.pg_tde_info AS -SELECT * from pg_tde_info(); +SELECT * from pg_catalog.pg_tde_info(); --get delta infomation in single DN -CREATE OR REPLACE FUNCTION pg_get_delta_info(IN rel TEXT, IN schema_name TEXT, OUT part_name TEXT, OUT live_tuple INT8, OUT data_size INT8, OUT blockNum INT8) +CREATE OR REPLACE FUNCTION pg_catalog.pg_get_delta_info(IN rel TEXT, IN schema_name TEXT, OUT part_name TEXT, OUT live_tuple INT8, OUT data_size INT8, OUT blockNum INT8) RETURNS setof record AS $$ DECLARE @@ -3255,7 +3257,7 @@ DECLARE EXECUTE(query_str) INTO row_data; query_select_str := 'select count(*) from cstore.' || row_data.relname || ''; EXECUTE (query_select_str) INTO live_tuple; - query_size_str := 'select * from pg_relation_size(' || row_data.oid || ')'; + query_size_str := 'select * from pg_catalog.pg_relation_size(' || row_data.oid || ')'; EXECUTE (query_size_str) INTO data_size; blockNum := data_size/8192; part_name := 'non partition table'; @@ -3268,7 +3270,7 @@ DECLARE FOR row_data IN EXECUTE(query_str) LOOP query_select_str := 'select count(*) from cstore.' || row_data.relname || ''; EXECUTE (query_select_str) INTO live_tuple; - query_size_str := 'select * from pg_relation_size(' || row_data.oid || ')'; + query_size_str := 'select * from pg_catalog.pg_relation_size(' || row_data.oid || ')'; EXECUTE (query_size_str) INTO data_size; END LOOP; blockNum := data_size/8192; @@ -3280,9 +3282,9 @@ DECLARE LANGUAGE 'plpgsql' NOT FENCED; CREATE VIEW pg_catalog.pg_stat_bad_block AS - SELECT DISTINCT * from pg_stat_bad_block(); + SELECT DISTINCT * from pg_catalog.pg_stat_bad_block(); -CREATE OR REPLACE FUNCTION lock_cluster_ddl() +CREATE OR REPLACE FUNCTION pg_catalog.lock_cluster_ddl() RETURNS boolean AS $$ DECLARE @@ -3294,7 +3296,7 @@ DECLARE BEGIN query_database_oid := 'SELECT datname FROM pg_database WHERE datallowconn = true order by datname'; for databse_name in EXECUTE(query_database_oid) LOOP - lock_str = format('SELECT * FROM pgxc_lock_for_sp_database(''%s'')', databse_name.datname); + lock_str = pg_catalog.format('SELECT * FROM pg_catalog.pgxc_lock_for_sp_database(''%s'')', databse_name.datname); begin EXECUTE(lock_str) into lock_result; if lock_result = 'f' then @@ -3307,7 +3309,7 @@ DECLARE END; $$ LANGUAGE 'plpgsql' NOT FENCED; -CREATE OR REPLACE FUNCTION unlock_cluster_ddl() +CREATE OR REPLACE FUNCTION pg_catalog.unlock_cluster_ddl() RETURNS bool AS $$ DECLARE @@ -3319,7 +3321,7 @@ DECLARE BEGIN query_database_oid := 'SELECT datname FROM pg_database WHERE datallowconn = true order by datname'; for databse_name in EXECUTE(query_database_oid) LOOP - unlock_str = format('SELECT * FROM pgxc_unlock_for_sp_database(''%s'')', databse_name.datname); + unlock_str = format('SELECT * FROM pg_catalog.pgxc_unlock_for_sp_database(''%s'')', databse_name.datname); begin EXECUTE(unlock_str) into unlock_result; if unlock_result = 'f' then @@ -3343,17 +3345,19 @@ CREATE TABLE PLAN_TABLE_DATA( object_name name, object_type varchar2(30), object_owner name, - projection varchar2(4000) + projection varchar2(4000), + cost float8, + cardinality float8 ); CREATE VIEW PLAN_TABLE AS -SELECT statement_id,plan_id,id,operation,options,object_name,object_type,object_owner,projection +SELECT statement_id,plan_id,id,operation,options,object_name,object_type,object_owner,projection,cost,cardinality FROM PLAN_TABLE_DATA -WHERE session_id=pg_current_sessionid() -AND user_id=pg_current_userid(); +WHERE session_id=pg_catalog.pg_current_sessionid() +AND user_id=pg_catalog.pg_current_userid(); -- get pgxc dirty tables stat -CREATE OR REPLACE FUNCTION pgxc_get_stat_dirty_tables(in dirty_percent int4, in n_tuples int4, out relid oid, out relname name, out schemaname name, out n_tup_ins int8, out n_tup_upd int8, out n_tup_del int8, out n_live_tup int8, out n_dead_tup int8, out dirty_page_rate numeric(5,2)) +CREATE OR REPLACE FUNCTION pg_catalog.pgxc_get_stat_dirty_tables(in dirty_percent int4, in n_tuples int4, out relid oid, out relname name, out schemaname name, out n_tup_ins int8, out n_tup_upd int8, out n_tup_del int8, out n_live_tup int8, out n_dead_tup int8, out dirty_page_rate numeric(5,2)) RETURNS setof record AS $$ DECLARE @@ -3362,8 +3366,8 @@ DECLARE BEGIN query_str := 'SELECT oid relid, s.relname,s.schemaname,s.n_tup_ins,s.n_tup_upd,s.n_tup_del,s.n_live_tup,s.n_dead_tup,s.dirty_page_rate FROM pg_class p, - (SELECT relname, schemaname, SUM(n_tup_ins) n_tup_ins, SUM(n_tup_upd) n_tup_upd, SUM(n_tup_del) n_tup_del, SUM(n_live_tup) n_live_tup, SUM(n_dead_tup) n_dead_tup, CAST((SUM(n_dead_tup) / SUM(n_dead_tup + n_live_tup + 0.00001) * 100) - AS NUMERIC(5,2)) dirty_page_rate FROM pgxc_stat_dirty_tables('||dirty_percent||','||n_tuples||') GROUP BY (relname,schemaname)) s + (SELECT relname, schemaname, pg_catalog.SUM(n_tup_ins) n_tup_ins, pg_catalog.SUM(n_tup_upd) n_tup_upd, pg_catalog.SUM(n_tup_del) n_tup_del, pg_catalog.SUM(n_live_tup) n_live_tup, pg_catalog.SUM(n_dead_tup) n_dead_tup, CAST((pg_catalog.SUM(n_dead_tup) / pg_catalog.SUM(n_dead_tup + n_live_tup + 0.00001) * 100) + AS pg_catalog.NUMERIC(5,2)) dirty_page_rate FROM pg_catalog.pgxc_stat_dirty_tables('||dirty_percent||','||n_tuples||') GROUP BY (relname,schemaname)) s WHERE p.relname = s.relname AND p.relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = s.schemaname) ORDER BY dirty_page_rate DESC'; FOR row_data IN EXECUTE(query_str) LOOP relid = row_data.relid; @@ -3380,7 +3384,7 @@ DECLARE END; $$ LANGUAGE 'plpgsql' NOT FENCED; -CREATE OR REPLACE FUNCTION pgxc_get_stat_dirty_tables(in dirty_percent int4, in n_tuples int4,in schema text, out relid oid, out relname name, out schemaname name, out n_tup_ins int8, out n_tup_upd int8, out n_tup_del int8, out n_live_tup int8, out n_dead_tup int8, out dirty_page_rate numeric(5,2)) +CREATE OR REPLACE FUNCTION pg_catalog.pgxc_get_stat_dirty_tables(in dirty_percent int4, in n_tuples int4,in schema text, out relid oid, out relname name, out schemaname name, out n_tup_ins int8, out n_tup_upd int8, out n_tup_del int8, out n_live_tup int8, out n_dead_tup int8, out dirty_page_rate numeric(5,2)) RETURNS setof record AS $$ DECLARE @@ -3389,8 +3393,8 @@ DECLARE BEGIN query_str := 'SELECT oid relid, s.relname,s.schemaname,s.n_tup_ins,s.n_tup_upd,s.n_tup_del,s.n_live_tup,s.n_dead_tup,s.dirty_page_rate FROM pg_class p, - (SELECT relname, schemaname, SUM(n_tup_ins) n_tup_ins, SUM(n_tup_upd) n_tup_upd, SUM(n_tup_del) n_tup_del, SUM(n_live_tup) n_live_tup, SUM(n_dead_tup) n_dead_tup, CAST((SUM(n_dead_tup) / SUM(n_dead_tup + n_live_tup + 0.00001) * 100) - AS NUMERIC(5,2)) dirty_page_rate FROM pgxc_stat_dirty_tables('||dirty_percent||','||n_tuples||','''||schema||''') GROUP BY (relname,schemaname)) s + (SELECT relname, schemaname, pg_catalog.SUM(n_tup_ins) n_tup_ins, pg_catalog.SUM(n_tup_upd) n_tup_upd, pg_catalog.SUM(n_tup_del) n_tup_del, pg_catalog.SUM(n_live_tup) n_live_tup, pg_catalog.SUM(n_dead_tup) n_dead_tup, CAST((pg_catalog.SUM(n_dead_tup) / pg_catalog.SUM(n_dead_tup + n_live_tup + 0.00001) * 100) + AS pg_catalog.NUMERIC(5,2)) dirty_page_rate FROM pg_catalog.pgxc_stat_dirty_tables('||dirty_percent||','||n_tuples||','''||schema||''') GROUP BY (relname,schemaname)) s WHERE p.relname = s.relname AND p.relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = s.schemaname) ORDER BY dirty_page_rate DESC'; FOR row_data IN EXECUTE(query_str) LOOP relid = row_data.relid; @@ -3409,11 +3413,11 @@ LANGUAGE 'plpgsql' NOT FENCED; CREATE OR REPLACE VIEW pg_catalog.get_global_prepared_xacts AS SELECT p.transaction, p.gid, p.prepared, u.rolname AS owner, d.datname AS database, p.node_name - FROM get_local_prepared_xact() p + FROM pg_catalog.get_local_prepared_xact() p LEFT JOIN pg_authid u ON p.ownerid = u.oid LEFT JOIN pg_database d ON p.dbid = d.oid UNION ALL - SELECT * FROM get_remote_prepared_xacts(); + SELECT * FROM pg_catalog.get_remote_prepared_xacts(); CREATE unlogged table statement_history( db_name name, @@ -3466,7 +3470,8 @@ CREATE unlogged table statement_history( lwlock_time bigint, lwlock_wait_time bigint, details bytea, - is_slow_sql bool + is_slow_sql bool, + trace_id text ); REVOKE ALL on table pg_catalog.statement_history FROM public; create index statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); @@ -3504,6 +3509,35 @@ GRANT SELECT ON TABLE SYS_DUMMY TO PUBLIC; CREATE TYPE pg_catalog.bulk_exception as (error_index integer, error_code integer, error_message text); +CREATE VIEW pg_catalog.gs_db_privileges AS + SELECT + pg_catalog.pg_get_userbyid(roleid) AS rolename, + privilege_type AS privilege_type, + CASE + WHEN admin_option THEN + 'yes' + ELSE + 'no' + END AS admin_option + FROM pg_catalog.gs_db_privilege; + +CREATE OR REPLACE VIEW pg_catalog.gs_gsc_memory_detail AS + SELECT db_id, sum(totalsize) AS totalsize, sum(freesize) AS freesize, sum(usedsize) AS usedsize + FROM ( + SELECT + CASE WHEN contextname like '%GlobalSysDBCacheEntryMemCxt%' THEN substring(contextname, 29) + ELSE substring(parent, 29) END AS db_id, + totalsize, + freesize, + usedsize + FROM pg_catalog.pg_shared_memory_detail() + WHERE contextname LIKE '%GlobalSysDBCacheEntryMemCxt%' OR parent LIKE '%GlobalSysDBCacheEntryMemCxt%' + )a + GROUP BY db_id; + +CREATE OR REPLACE VIEW pg_catalog.gs_lsc_memory_detail AS +SELECT * FROM pg_catalog.pv_thread_memory_detail() WHERE contextname LIKE '%LocalSysCache%' OR parent LIKE '%LocalSysCache%'; + CREATE VIEW pg_publication_tables AS SELECT P.pubname AS pubname, @@ -3511,7 +3545,7 @@ CREATE VIEW pg_publication_tables AS C.relname AS tablename FROM pg_publication P, pg_class C JOIN pg_namespace N ON (N.oid = C.relnamespace) - WHERE C.oid IN (SELECT relid FROM pg_get_publication_tables(P.pubname)); + WHERE C.oid IN (SELECT relid FROM pg_catalog.pg_get_publication_tables(P.pubname)); CREATE VIEW pg_stat_subscription AS SELECT @@ -3524,11 +3558,13 @@ CREATE VIEW pg_stat_subscription AS st.latest_end_lsn, st.latest_end_time FROM pg_subscription su - LEFT JOIN pg_stat_get_subscription(NULL) st + LEFT JOIN pg_catalog.pg_stat_get_subscription(NULL) st ON (st.subid = su.oid); CREATE VIEW pg_replication_origin_status AS SELECT * - FROM pg_show_replication_origin_status(); + FROM pg_catalog.pg_show_replication_origin_status(); REVOKE ALL ON pg_replication_origin_status FROM public; + +REVOKE ALL ON pg_subscription FROM public; diff --git a/src/common/backend/catalog/toasting.cpp b/src/common/backend/catalog/toasting.cpp index 7571f1b55..63c14f8fc 100644 --- a/src/common/backend/catalog/toasting.cpp +++ b/src/common/backend/catalog/toasting.cpp @@ -40,6 +40,10 @@ #include "utils/syscache.h" #include "utils/lsyscache.h" #include "utils/partitionmap.h" +#include "utils/acl.h" +#include "commands/dbcommands.h" +#include "catalog/pg_authid.h" +#include "tcop/utility.h" /* Potentially set by contrib/pg_upgrade_support functions */ static bool create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, @@ -642,6 +646,261 @@ static bool binary_upgrade_is_next_part_toast_pg_type_oid_valid() return true; } +static void InitTempToastNamespace(void) +{ + char toastNamespaceName[NAMEDATALEN]; + char PGXCNodeNameSimplified[NAMEDATALEN]; + Oid toastspaceId; + uint32 timeLineId = 0; + CreateSchemaStmt* create_stmt = NULL; + char str[NAMEDATALEN * 2 + 64] = {0}; + uint32 tempID = 0; + const uint32 NAME_SIMPLIFIED_LEN = 7; + uint32 nameLen = strlen(g_instance.attr.attr_common.PGXCNodeName); + int ret; + errno_t rc; + +#ifndef ENABLE_MULTIPLE_NODES + Assert(g_instance.exec_cxt.global_application_name != NULL); + nameLen = strlen(g_instance.exec_cxt.global_application_name); +#endif + + if (pg_database_aclcheck(u_sess->proc_cxt.MyDatabaseId, GetUserId(), ACL_CREATE_TEMP) != ACLCHECK_OK) { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("permission denied to create temporary tables in database \"%s\"", + get_and_check_db_name(u_sess->proc_cxt.MyDatabaseId)))); + } + + check_nodegroup_privilege(GetUserId(), GetUserId(), ACL_CREATE); + + if (RecoveryInProgress()) + ereport(ERROR, + (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION), errmsg("cannot create temporary tables during recovery"))); + + timeLineId = get_controlfile_timeline(); + tempID = __sync_add_and_fetch(>_tempID_seed, 1); + + ret = strncpy_s(PGXCNodeNameSimplified, + sizeof(PGXCNodeNameSimplified), +#ifndef ENABLE_MULTIPLE_NODES + g_instance.exec_cxt.global_application_name, +#else + g_instance.attr.attr_common.PGXCNodeName, +#endif + nameLen >= NAME_SIMPLIFIED_LEN ? NAME_SIMPLIFIED_LEN : nameLen); + securec_check(ret, "\0", "\0"); + + HeapTuple tup = NULL; + char* bootstrap_username = NULL; + tup = SearchSysCache1(AUTHOID, BOOTSTRAP_SUPERUSERID); + if (HeapTupleIsValid(tup)) { + bootstrap_username = pstrdup(NameStr(((Form_pg_authid)GETSTRUCT(tup))->rolname)); + ReleaseSysCache(tup); + } else { + ereport(ERROR, + (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for role %u", BOOTSTRAP_SUPERUSERID))); + } + if (!IsInitdb) { + ret = snprintf_s(toastNamespaceName, + sizeof(toastNamespaceName), + sizeof(toastNamespaceName) - 1, + "pg_toast_temp_%s_%u_%u_%lu", + PGXCNodeNameSimplified, + timeLineId, + tempID, + IS_THREAD_POOL_WORKER ? u_sess->session_id : (uint64)t_thrd.proc_cxt.MyProcPid); + } else { + ret = snprintf_s(toastNamespaceName, + sizeof(toastNamespaceName), + sizeof(toastNamespaceName) - 1, + "pg_toast_temp_%s", + PGXCNodeNameSimplified); + } + + securec_check_ss(ret, "\0", "\0"); + + toastspaceId = get_namespace_oid(toastNamespaceName, true); + if (OidIsValid(toastspaceId)) { + ereport(ERROR, + (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("toast Namespace Named %s has existed, please drop it and try again", toastNamespaceName))); + } + + create_stmt = makeNode(CreateSchemaStmt); + create_stmt->authid = bootstrap_username; + create_stmt->schemaElts = NULL; + create_stmt->schemaname = toastNamespaceName; + create_stmt->temptype = Temp_Toast; + rc = memset_s(str, sizeof(str), 0, sizeof(str)); + securec_check(rc, "", ""); + ret = snprintf_s(str, + sizeof(str), + sizeof(str) - 1, + "CREATE SCHEMA %s AUTHORIZATION \"%s\"", + toastNamespaceName, + bootstrap_username); + securec_check_ss(ret, "\0", "\0"); + ProcessUtility((Node*)create_stmt, str, NULL, false, None_Receiver, false, NULL); + + /* Advance command counter to make namespace visible */ + CommandCounterIncrement(); + + Assert(OidIsValid(u_sess->catalog_cxt.myTempNamespace) && OidIsValid(u_sess->catalog_cxt.myTempToastNamespace)); + + u_sess->catalog_cxt.baseSearchPathValid = false; +} + +bool create_toast_by_sid(Oid *toastOid) +{ + char toast_relname[NAMEDATALEN]; + char toast_idxname[NAMEDATALEN]; + TupleDesc tupdesc; + IndexInfo* indexInfo = NULL; + Relation toast_rel; + Oid namespaceid = 0; + Oid collationObjectId[2]; + Oid classObjectId[2]; + int16 coloptions[2]; + errno_t rc = EOK; + uint64 session_id = 0; + if (OidIsValid(u_sess->plsql_cxt.ActiveLobToastOid)) { + *toastOid = u_sess->plsql_cxt.ActiveLobToastOid; + return false; + } + + session_id = IS_THREAD_POOL_WORKER ? u_sess->session_id : t_thrd.proc_cxt.MyProcPid; + + rc = snprintf_s(toast_relname, sizeof(toast_relname), sizeof(toast_relname) - 1, "pg_temp_toast_%u", session_id); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s( + toast_idxname, sizeof(toast_idxname), sizeof(toast_idxname) - 1, "pg_temp_toast_%u_index", session_id); + securec_check_ss(rc, "\0", "\0"); + + /* this is pretty painful... need a tuple descriptor */ + tupdesc = CreateTemplateTupleDesc(3, false); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "chunk_id", OIDOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "chunk_seq", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "chunk_data", BYTEAOID, -1, 0); + + /* + * Ensure that the toast table doesn't itself get toasted, or we'll be + * toast :-(. This is essential for chunk_data because type bytea is + * toastable; hit the other two just to be sure. + */ + tupdesc->attrs[0]->attstorage = 'p'; + tupdesc->attrs[1]->attstorage = 'p'; + tupdesc->attrs[2]->attstorage = 'p'; + if (OidIsValid(u_sess->catalog_cxt.myTempToastNamespace)) { + namespaceid = GetTempToastNamespace(); + } else { + InitTempToastNamespace(); + namespaceid = GetTempToastNamespace(); + } + + StorageType storage_type = HEAP_DISK; + Datum reloptions = (Datum)0; + Oid toast_relid = heap_create_with_catalog(toast_relname, + namespaceid, + u_sess->proc_cxt.MyDatabaseTableSpace, + InvalidOid, + InvalidOid, + InvalidOid, + GetUserId(), + tupdesc, + NIL, + RELKIND_TOASTVALUE, + RELPERSISTENCE_PERMANENT, + false, + false, + true, + 0, + ONCOMMIT_NOOP, + reloptions, + false, + true, + NULL, + REL_CMPRS_NOT_SUPPORT, + NULL, + true, + NULL, + storage_type); + Assert(toast_relid != InvalidOid); + + toast_rel = heap_open(toast_relid, ShareLock); + + Datum indexReloptions = (Datum)0; + List* indexOptions = NULL; + if (RelationIsUstoreFormat(toast_rel)) { + DefElem* def = makeDefElem("storage_type", (Node*)makeString(TABLE_ACCESS_METHOD_USTORE)); + indexOptions = list_make1(def); + indexReloptions = transformRelOptions((Datum)0, indexOptions, NULL, NULL, false, false); + } + + indexInfo = makeNode(IndexInfo); + indexInfo->ii_NumIndexAttrs = 2; + indexInfo->ii_NumIndexKeyAttrs = indexInfo->ii_NumIndexAttrs; + indexInfo->ii_KeyAttrNumbers[0] = 1; + indexInfo->ii_KeyAttrNumbers[1] = 2; + indexInfo->ii_Expressions = NIL; + indexInfo->ii_ExpressionsState = NIL; + indexInfo->ii_Predicate = NIL; + indexInfo->ii_PredicateState = NIL; + indexInfo->ii_ExclusionOps = NULL; + indexInfo->ii_ExclusionProcs = NULL; + indexInfo->ii_ExclusionStrats = NULL; + indexInfo->ii_Unique = true; + indexInfo->ii_ReadyForInserts = true; + indexInfo->ii_Concurrent = false; + indexInfo->ii_BrokenHotChain = false; + indexInfo->ii_PgClassAttrId = 0; + indexInfo->ii_ParallelWorkers = 0; + + collationObjectId[0] = InvalidOid; + collationObjectId[1] = InvalidOid; + + classObjectId[0] = OID_BTREE_OPS_OID; + classObjectId[1] = INT4_BTREE_OPS_OID; + + coloptions[0] = 0; + coloptions[1] = 0; + + IndexCreateExtraArgs extra; + SetIndexCreateExtraArgs(&extra, InvalidOid, false, false); + + index_create(toast_rel, + toast_idxname, + InvalidOid, + InvalidOid, + indexInfo, + list_make2((void*)"chunk_id", (void*)"chunk_seq"), + BTREE_AM_OID, + u_sess->proc_cxt.MyDatabaseTableSpace, + collationObjectId, + classObjectId, + coloptions, + indexReloptions, + true, + false, + false, + false, + true, + !u_sess->upg_cxt.new_catalog_need_storage, + false, + &extra, + false); + + heap_close(toast_rel, NoLock); + u_sess->plsql_cxt.ActiveLobToastOid = toast_relid; + *toastOid = toast_relid; + /* + * Make changes visible + */ + CommandCounterIncrement(); + + return true; +} + static Oid binary_upgrade_get_next_part_toast_pg_type_oid() { Oid old_part_toast_pg_type_oid; diff --git a/src/common/backend/client_logic/client_logic.cpp b/src/common/backend/client_logic/client_logic.cpp index e20daba17..7800f3cdc 100644 --- a/src/common/backend/client_logic/client_logic.cpp +++ b/src/common/backend/client_logic/client_logic.cpp @@ -252,7 +252,7 @@ void get_catalog_name(const RangeVar * const rel) Oid existing_relid; RangeVar *relation = const_cast(rel); relation->catalogname = get_database_name(u_sess->proc_cxt.MyDatabaseId); - namespace_id = RangeVarGetAndCheckCreationNamespace(relation, NoLock, &existing_relid); + namespace_id = RangeVarGetAndCheckCreationNamespace(relation, NoLock, &existing_relid, '\0'); if (relation->schemaname == NULL) { relation->schemaname = get_namespace_name(namespace_id); } diff --git a/src/common/backend/client_logic/client_logic_proc.cpp b/src/common/backend/client_logic/client_logic_proc.cpp index 596e366fb..65eace9ea 100755 --- a/src/common/backend/client_logic/client_logic_proc.cpp +++ b/src/common/backend/client_logic/client_logic_proc.cpp @@ -16,7 +16,7 @@ * client_logic_proc.cpp * * IDENTIFICATION - * src\common\backend\client_logic\client_logic_proc.cpp + * src\common\backend\client_logic\client_logic_proc.cpp * * ------------------------------------------------------------------------- */ @@ -33,6 +33,12 @@ #include "access/xact.h" +/* + * @Description: save record in pg_depend for the dependency between clietn logic proc table and pg_proc + * @param[IN] func_id Oid of function in pg_proc + * @param[IN] gs_encrypted_proc_id Oid of function in gs_encrypted_proc + * @return: void + */ void record_proc_depend(const Oid func_id, const Oid gs_encrypted_proc_id) { ObjectAddress pg_proc_addr; @@ -46,6 +52,49 @@ void record_proc_depend(const Oid func_id, const Oid gs_encrypted_proc_id) recordDependencyOn(&gs_encrypted_proc_addr, &pg_proc_addr, DEPENDENCY_INTERNAL); } +void verify_out_param(HeapTuple oldtup, int *out_param_id) +{ + bool isNull = false; + Datum proargmodes; +#ifndef ENABLE_MULTIPLE_NODES + if (t_thrd.proc->workingVersionNum < 92470) { + proargmodes = SysCacheGetAttr(PROCNAMEARGSNSP, oldtup, Anum_pg_proc_proargmodes, &isNull); + } else { + proargmodes = SysCacheGetAttr(PROCALLARGS, oldtup, Anum_pg_proc_proargmodes, &isNull); + } +#else + proargmodes = SysCacheGetAttr(PROCNAMEARGSNSP, oldtup, Anum_pg_proc_proargmodes, &isNull); +#endif + if (isNull) { + return; + } + ArrayType *arr = DatumGetArrayTypeP(proargmodes); /* ensure not toasted */ + if (arr == NULL) { + return; + } + int n_modes = ARR_DIMS(arr)[0]; + char *argmodes = (char*)ARR_DATA_PTR(arr); + for (int i = 0; i < n_modes; i++) { + if (argmodes[i] == PROARGMODE_OUT || argmodes[i] == PROARGMODE_INOUT || + argmodes[i] == PROARGMODE_TABLE) { + if (*out_param_id == -1) { + *out_param_id = i; + } else { + /* there is more than one out param - ignore */ + *out_param_id = -1; + break; + } + } + } +} + +/* + * @Description: inserting into gs tale the original return type - calclation by out params might happen for PLPGSQL + * @param[IN] func_id Oid of function in pg_proc + * @param[IN] ret_type original return type + * @param[IN] res_type client logic data type + * @return: void + */ void add_rettype_orig(const Oid func_id, const Oid ret_type, const Oid res_type) { bool nulls[Natts_pg_proc] = {0}; @@ -64,40 +113,17 @@ void add_rettype_orig(const Oid func_id, const Oid ret_type, const Oid res_type) errmsg("cache lookup failed for function %u when initialize function cache.", func_id))); } /* for dynamic plpgsql there is not enough to replace ret value - * also need to replace out parameter through wicj this value returned + * also need to replace out parameter through which this value returned */ int out_param_id = -1; Oid out_param_type = InvalidOid; int allnumargs = 0; - Datum proargmodes; -#ifndef ENABLE_MULTIPLE_NODES - if (t_thrd.proc->workingVersionNum < 92470) { - proargmodes = SysCacheGetAttr(PROCNAMEARGSNSP, oldtup, Anum_pg_proc_proargmodes, &isNull); - } else { - proargmodes = SysCacheGetAttr(PROCALLARGS, oldtup, Anum_pg_proc_proargmodes, &isNull); - } -#else - proargmodes = SysCacheGetAttr(PROCNAMEARGSNSP, oldtup, Anum_pg_proc_proargmodes, &isNull); -#endif - if (!isNull) { - ArrayType* arr = DatumGetArrayTypeP(proargmodes); /* ensure not toasted */ - if (arr) { - int n_modes = ARR_DIMS(arr)[0]; - char* argmodes = (char*)ARR_DATA_PTR(arr); - for (int i = 0; i < n_modes; i++) { - if (argmodes[i] == PROARGMODE_OUT || argmodes[i] == PROARGMODE_INOUT || - argmodes[i] == PROARGMODE_TABLE) { - if (out_param_id == -1) { - out_param_id = i; - } else { - /* there is more than one out param - ignore */ - out_param_id = -1; - break; - } - } - } - } - } + + /* since return type must match OUT params, if there is only 1 OUT param, so its data type is the return type + * if there are more than 1 out param, the return type is going to be recoerd which is beign processed in another + * function + */ + verify_out_param(oldtup, &out_param_id); if (out_param_id > -1) { /* there is one out param - replace allargs as well */ Datum proallargtypes = SysCacheGetAttr(PROCOID, oldtup, Anum_pg_proc_proallargtypes, &isNull); @@ -122,12 +148,14 @@ void add_rettype_orig(const Oid func_id, const Oid ret_type, const Oid res_type) CatalogUpdateIndexes(rel, tup); ReleaseSysCache(oldtup); heap_close(rel, RowExclusiveLock); + CommandCounterIncrement(); HeapTuple gs_tup; HeapTuple gs_oldtup = SearchSysCache1(GSCLPROCID, ObjectIdGetDatum(func_id)); Relation gs_rel = heap_open(ClientLogicProcId, RowExclusiveLock); TupleDesc gs_tupDesc = RelationGetDescr(gs_rel); gs_values[Anum_gs_encrypted_proc_last_change - 1] = - DirectFunctionCall1(timestamptz_timestamp, GetCurrentTimestamp()); + DirectFunctionCall1(timestamptz_timestamp, GetCurrentTimestamp()); + gs_values[Anum_gs_encrypted_proc_prorettype_orig - 1] = ObjectIdGetDatum(ret_type); if (!HeapTupleIsValid(gs_oldtup)) { gs_values[Anum_gs_encrypted_proc_func_id - 1] = ObjectIdGetDatum(func_id); gs_values[Anum_gs_encrypted_proc_prorettype_orig - 1] = ObjectIdGetDatum(ret_type); @@ -175,7 +203,40 @@ void add_rettype_orig(const Oid func_id, const Oid ret_type, const Oid res_type) ce_cache_refresh_type |= 0x20; /* update PROC cache */ } -void add_allargtypes_orig(const Oid func_id, Datum* all_types_orig, Datum* all_types, const int tup_natts) + +/* + * @Description: get the pg_type Oid related to specific pg_class record + * @param[IN] relid Oid of the relation that we want to get its related type + * @return: type related to relid + */ +static Oid get_reltype_from_relid(const Oid relid) +{ + Oid reltypid = InvalidOid; + if (relid != InvalidOid) { + HeapTuple tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + if (HeapTupleIsValid(tuple)) { + bool is_null; + Datum typid_datum = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reltype, &is_null); + if(!is_null) { + reltypid = DatumGetObjectId(typid_datum); + } + ReleaseSysCache(tuple); + } + } + return reltypid; +} + +/* + * @Description: adding to the original parameters data types to gs_encrypted_proc + * @param[IN] func_id pg_proc Oid of the function + * @param[IN] all_types_orig Datum array of original data types in case of client logic, else -1 + * @param[IN] all_types real types of the pg_proc record for this functions + * @param[IN] tup_natts number of attributes that will return in the function result type + * @param[IN] relid relation id of the table that the columns returned from + * @return: void + */ +void add_allargtypes_orig(const Oid func_id, + Datum* all_types_orig, Datum* all_types, const int tup_natts, const Oid relid) { Datum proargmodes = 0; Datum gs_all_types_orig = 0; @@ -192,7 +253,7 @@ void add_allargtypes_orig(const Oid func_id, Datum* all_types_orig, Datum* all_t bool nulls[Natts_pg_proc] = {0}; Datum values[Natts_pg_proc] = {0}; bool replaces[Natts_pg_proc] = {0}; - + const Oid reltypid = get_reltype_from_relid(relid); HeapTuple oldtup = SearchSysCache1(PROCOID, ObjectIdGetDatum(func_id)); if (!HeapTupleIsValid(oldtup)) { ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmodule(MOD_EXECUTOR), @@ -201,6 +262,17 @@ void add_allargtypes_orig(const Oid func_id, Datum* all_types_orig, Datum* all_t bool isNull = false; Datum proAllArgTypes = SysCacheGetAttr(PROCOID, oldtup, Anum_pg_proc_proallargtypes, &isNull); if (isNull) { + if (reltypid != InvalidOid) { + values[Anum_pg_proc_prorettype - 1] = ObjectIdGetDatum(reltypid); + replaces[Anum_pg_proc_prorettype-1] = true; + Relation rel = heap_open(ProcedureRelationId, RowExclusiveLock); + TupleDesc tupDesc = RelationGetDescr(rel); + HeapTuple tup = heap_modify_tuple(oldtup, tupDesc, values, nulls, replaces); + simple_heap_update(rel, &tup->t_self, tup); + CatalogUpdateIndexes(rel, tup); + heap_close(rel, RowExclusiveLock); + CommandCounterIncrement(); + } ReleaseSysCache(oldtup); return; /* nothing to update */ } @@ -216,6 +288,7 @@ void add_allargtypes_orig(const Oid func_id, Datum* all_types_orig, Datum* all_t ReleaseSysCache(oldtup); return; } + /* for replacing the content of arr_all_arg_types it is easier to work with */ vec_all_arg_types = (Oid*)ARR_DATA_PTR(arr_all_arg_types); #ifndef ENABLE_MULTIPLE_NODES @@ -257,35 +330,37 @@ void add_allargtypes_orig(const Oid func_id, Datum* all_types_orig, Datum* all_t } /* Replace orig data types with replaced data types */ - int col = 0; - for (int i = 0; i < tup_natts; i++) { - if (vec_proargmodes) { - bool is_input_params = vec_proargmodes[col] == PROARGMODE_IN || vec_proargmodes[col] == PROARGMODE_VARIADIC; - while (is_input_params) { - col++; - if (col > allnumargs) { - ereport(ERROR, (errcode(ERRCODE_ARRAY_ELEMENT_ERROR), - errmsg("mismatch in number of output parameters for proallargtypes replace"))); + if (vec_proargmodes) { + int col = 0; + for (int i = 0; i < tup_natts; i++) { + for(; col < allnumargs; col++) { + if(vec_proargmodes[col] == PROARGMODE_OUT || vec_proargmodes[col] == PROARGMODE_INOUT || + vec_proargmodes[col] == PROARGMODE_TABLE) { + vec_all_arg_types[col] = DatumGetObjectId(all_types[i]); + vec_gs_all_types_orig[col] = DatumGetObjectId(all_types_orig[i]); + col++; /* force move to the next arg */ + break; } - is_input_params = vec_proargmodes[col] == PROARGMODE_IN || vec_proargmodes[col] == PROARGMODE_INOUT || - vec_proargmodes[col] == PROARGMODE_VARIADIC; } - vec_all_arg_types[col] = DatumGetObjectId(all_types[i]); - vec_gs_all_types_orig[col] = DatumGetObjectId(all_types_orig[i]); - col++; } } values[Anum_pg_proc_proallargtypes - 1] = PointerGetDatum(arr_all_arg_types); replaces[Anum_pg_proc_proallargtypes - 1] = true; + if (reltypid != InvalidOid) { + values[Anum_pg_proc_prorettype - 1] = ObjectIdGetDatum(reltypid); + replaces[Anum_pg_proc_prorettype-1] = true; + } + Relation rel = heap_open(ProcedureRelationId, RowExclusiveLock); TupleDesc tupDesc = RelationGetDescr(rel); HeapTuple tup = heap_modify_tuple(oldtup, tupDesc, values, nulls, replaces); simple_heap_update(rel, &tup->t_self, tup); CatalogUpdateIndexes(rel, tup); heap_close(rel, RowExclusiveLock); + CommandCounterIncrement(); Relation gs_rel = heap_open(ClientLogicProcId, RowExclusiveLock); TupleDesc gs_tupDesc = RelationGetDescr(gs_rel); - gs_values[Anum_gs_encrypted_proc_last_change - 1] = + gs_values[Anum_gs_encrypted_proc_last_change - 1] = DirectFunctionCall1(timestamptz_timestamp, GetCurrentTimestamp()); if (!HeapTupleIsValid(gs_oldtup)) { gs_values[Anum_gs_encrypted_proc_func_id - 1] = ObjectIdGetDatum(func_id); @@ -312,45 +387,10 @@ void add_allargtypes_orig(const Oid func_id, Datum* all_types_orig, Datum* all_t CatalogUpdateIndexes(gs_rel, gs_tup); heap_close(gs_rel, RowExclusiveLock); ce_cache_refresh_type |= 0x20; /* update PROC cache */ - CommandCounterIncrement(); ReleaseSysCache(oldtup); + CommandCounterIncrement(); } -void verify_out_param(HeapTuple oldtup, int *out_param_id) -{ - bool isNull = false; - Datum proargmodes; -#ifndef ENABLE_MULTIPLE_NODES - if (t_thrd.proc->workingVersionNum < 92470) { - proargmodes = SysCacheGetAttr(PROCNAMEARGSNSP, oldtup, Anum_pg_proc_proargmodes, &isNull); - } else { - proargmodes = SysCacheGetAttr(PROCALLARGS, oldtup, Anum_pg_proc_proargmodes, &isNull); - } -#else - proargmodes = SysCacheGetAttr(PROCNAMEARGSNSP, oldtup, Anum_pg_proc_proargmodes, &isNull); -#endif - if (isNull) { - return; - } - ArrayType *arr = DatumGetArrayTypeP(proargmodes); /* ensure not toasted */ - if (arr == NULL) { - return; - } - int n_modes = ARR_DIMS(arr)[0]; - char *argmodes = (char*)ARR_DATA_PTR(arr); - for (int i = 0; i < n_modes; i++) { - if (argmodes[i] == PROARGMODE_OUT || argmodes[i] == PROARGMODE_INOUT || - argmodes[i] == PROARGMODE_TABLE) { - if (*out_param_id == -1) { - *out_param_id = i; - } else { - /* there is more than one out param - ignore */ - *out_param_id = -1; - break; - } - } - } -} void verify_rettype_for_out_param(const Oid func_id) { @@ -362,6 +402,10 @@ void verify_rettype_for_out_param(const Oid func_id) bool gs_replaces[Natts_gs_encrypted_proc] = {0}; replaces[Anum_pg_proc_prorettype - 1] = true; bool isNull = false; + int out_param_id = -1; + Oid out_param_type = InvalidOid; + int allnumargs = 0; + HeapTuple oldtup = SearchSysCache1(PROCOID, ObjectIdGetDatum(func_id)); if (!HeapTupleIsValid(oldtup)) { ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmodule(MOD_EXECUTOR), @@ -376,9 +420,6 @@ void verify_rettype_for_out_param(const Oid func_id) ReleaseSysCache(oldtup); return; } - int out_param_id = -1; - Oid out_param_type = InvalidOid; - int allnumargs = 0; verify_out_param(oldtup, &out_param_id); if (out_param_id == -1) { ReleaseSysCache(oldtup); @@ -401,6 +442,7 @@ void verify_rettype_for_out_param(const Oid func_id) return; } values[Anum_pg_proc_prorettype - 1] = out_param_type; + Relation rel = heap_open(ProcedureRelationId, RowExclusiveLock); TupleDesc tupDesc = RelationGetDescr(rel); HeapTuple tup = heap_modify_tuple(oldtup, tupDesc, values, nulls, replaces); @@ -408,6 +450,7 @@ void verify_rettype_for_out_param(const Oid func_id) CatalogUpdateIndexes(rel, tup); ReleaseSysCache(oldtup); heap_close(rel, RowExclusiveLock); + CommandCounterIncrement(); /* keep original rettype in gs_encrypted_proc */ HeapTuple gs_tup; HeapTuple gs_oldtup = SearchSysCache1(GSCLPROCID, ObjectIdGetDatum(func_id)); @@ -424,7 +467,7 @@ void verify_rettype_for_out_param(const Oid func_id) gs_all_args_orig[i] = ObjectIdGetDatum(-1); } gs_all_args_orig[out_param_id] = out_param_type; - ArrayType *arr_gs_all_types_orig = + ArrayType *arr_gs_all_types_orig = construct_array(gs_all_args_orig, allnumargs, INT4OID, sizeof(int4), true, 'i'); gs_values[Anum_gs_encrypted_proc_proallargtypes_orig - 1] = PointerGetDatum(arr_gs_all_types_orig); pfree_ext(gs_all_args_orig); @@ -450,14 +493,31 @@ void verify_rettype_for_out_param(const Oid func_id) ce_cache_refresh_type |= 0x20; /* update PROC cache */ } +/* + * @Description: delete all procedure client logic info and remove dependencies + * @param[IN] tuple of gs_encrypted_proc to be removed + * @return: void + */ +void delete_proc_client_info(HeapTuple old_gs_tup) +{ + deleteDependencyRecordsFor(ClientLogicProcId, HeapTupleGetOid(old_gs_tup), true); + Relation gs_rel = heap_open(ClientLogicProcId, RowExclusiveLock); + simple_heap_delete(gs_rel, &old_gs_tup->t_self); + heap_close(gs_rel, RowExclusiveLock); + ReleaseSysCache(old_gs_tup); + CommandCounterIncrement(); +} + +/* + * @Description: delete all procedure client logic info and remove dependencies + * @param[IN] func_id Oid of functions in pg_proc + * @return: void + */ void delete_proc_client_info(Oid func_id) { HeapTuple old_gs_tup = SearchSysCache1(GSCLPROCID, ObjectIdGetDatum(func_id)); if (HeapTupleIsValid(old_gs_tup)) { - Relation gs_rel = heap_open(ClientLogicProcId, RowExclusiveLock); - deleteDependencyRecordsFor(ClientLogicProcId, HeapTupleGetOid(old_gs_tup), true); - simple_heap_delete(gs_rel, &old_gs_tup->t_self); - heap_close(gs_rel, RowExclusiveLock); - ReleaseSysCache(old_gs_tup); + ereport(DEBUG5, (errmsg("removing client logic procedure info for %u", func_id))); + delete_proc_client_info(old_gs_tup); } } diff --git a/src/common/backend/lib/dllist.cpp b/src/common/backend/lib/dllist.cpp index c781ec648..79cc63a23 100644 --- a/src/common/backend/lib/dllist.cpp +++ b/src/common/backend/lib/dllist.cpp @@ -235,15 +235,18 @@ DllistWithLock::~DllistWithLock() SpinLockFree(&m_lock); } -void DllistWithLock::Remove(Dlelem* e) +bool DllistWithLock::RemoveConfirm(Dlelem* e) { + bool found = false; START_CRIT_SECTION(); SpinLockAcquire(&(m_lock)); if (e->dle_list == &m_list) { + found = true; DLRemove(e); } SpinLockRelease(&(m_lock)); END_CRIT_SECTION(); + return found; } void DllistWithLock::AddHead(Dlelem* e) @@ -279,6 +282,17 @@ Dlelem* DllistWithLock::RemoveHead() return head; } +Dlelem* DllistWithLock::RemoveTail() +{ + Dlelem* head = NULL; + START_CRIT_SECTION(); + SpinLockAcquire(&(m_lock)); + head = DLRemTail(&m_list); + SpinLockRelease(&(m_lock)); + END_CRIT_SECTION(); + return head; +} + bool DllistWithLock::IsEmpty() { START_CRIT_SECTION(); @@ -302,6 +316,12 @@ void DllistWithLock::GetLock() SpinLockAcquire(&(m_lock)); } +Dlelem* DllistWithLock::RemoveHeadNoLock() +{ + Dlelem* head = DLRemHead(&m_list); + return head; +} + void DllistWithLock::ReleaseLock() { SpinLockRelease(&(m_lock)); diff --git a/src/common/backend/libpq/auth.cpp b/src/common/backend/libpq/auth.cpp index 977379a73..8064d2224 100644 --- a/src/common/backend/libpq/auth.cpp +++ b/src/common/backend/libpq/auth.cpp @@ -64,12 +64,17 @@ extern GlobalNodeDefinition* global_node_definition; * Global authentication functions * ---------------------------------------------------------------- */ +#ifndef ENABLE_UT static void sendAuthRequest(Port* port, AuthRequest areq); static void auth_failed(Port* port, int status); static char* recv_password_packet(Port* port); static int recv_and_check_password_packet(Port* port); -static void clear_gss_info(pg_gssinfo* gss); -static void clear_gssconn_info(GssConn *gss); +#else +void sendAuthRequest(Port* port, AuthRequest areq); +void auth_failed(Port* port, int status); +char* recv_password_packet(Port* port); +int recv_and_check_password_packet(Port* port); +#endif /* ---------------------------------------------------------------- * Ident authentication @@ -175,8 +180,15 @@ static int pg_krb5_recvauth(Port* port); #else #include #endif - +#ifndef ENABLE_UT static int pg_GSS_recvauth(Port* port); +static void clear_gss_info(pg_gssinfo* gss); +static void clear_gssconn_info(GssConn *gss); +#else +int pg_GSS_recvauth(Port* port); +void clear_gss_info(pg_gssinfo* gss); +void clear_gssconn_info(GssConn *gss); +#endif #endif /* ENABLE_GSS */ /* ---------------------------------------------------------------- @@ -227,7 +239,10 @@ THR_LOCAL ClientAuthentication_hook_type ClientAuthentication_hook = NULL; * Note that many sorts of failure report additional information in the * postmaster log, which we hope is only readable by good guys. */ -static void auth_failed(Port* port, int status) +#ifndef ENABLE_UT +static +#endif + void auth_failed(Port* port, int status) { const char* errstr = NULL; int errcode_return = ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION; @@ -649,6 +664,12 @@ void ClientAuthentication(Port* port) case uaCert: #ifdef USE_SSL + /* Forbid remote connection with initial user. */ + if (isRemoteInitialUser(port)) { + ereport(FATAL, + (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION), + errmsg("Forbid remote connection with initial user."))); + } status = CheckCertAuth(port); #else Assert(false); @@ -791,7 +812,10 @@ void GenerateFakeSaltBytes(const char* user_name, char* fake_salt_bytes, int sal /* * Send an authentication request packet to the frontend. */ -static bool GsGenerateFakeEncryptString(char* encrypt_string, const Port* port, int len) +#ifndef ENABLE_UT +static +#endif + bool GsGenerateFakeEncryptString(char* encrypt_string, const Port* port, int len) { int retval = 0; errno_t rc = EOK; @@ -829,7 +853,10 @@ static bool GsGenerateFakeEncryptString(char* encrypt_string, const Port* port, securec_check_ss(rc, "\0", "\0"); return true; } -static void sendAuthRequest(Port* port, AuthRequest areq) +#ifndef ENABLE_UT +static +#endif + void sendAuthRequest(Port* port, AuthRequest areq) { /* Database Security: Support SHA256.*/ int32 stored_method = 0; @@ -858,7 +885,7 @@ static void sendAuthRequest(Port* port, AuthRequest areq) t_thrd.int_cxt.ImmediateInterruptOK = false; #ifdef ENABLE_MULTIPLE_NODES - if (IsDSorHaWalSender()) { + if (IsDSorHaWalSender() && !AM_WAL_HADR_SENDER && !AM_WAL_HADR_CN_SENDER) { #else if (IsDSorHaWalSender() && is_node_internal_connection(port) && !AM_WAL_HADR_SENDER) { #endif @@ -1168,7 +1195,10 @@ static void sendAuthRequest(Port* port, AuthRequest areq) * * Returns NULL if couldn't get password, else palloc'd string. */ -static char* recv_password_packet(Port* port) +#ifndef ENABLE_UT +static +#endif + char* recv_password_packet(Port* port) { StringInfoData buf; @@ -1245,7 +1275,10 @@ static char* recv_password_packet(Port* port) * Called when we have sent an authorization request for a password. * Get the response and check it. */ -static int recv_and_check_password_packet(Port* port) +#ifndef ENABLE_UT +static +#endif + int recv_and_check_password_packet(Port* port) { int result; int rcs = 0; @@ -1480,7 +1513,10 @@ static GSS_DLLIMP gss_OID GSS_C_NT_USER_NAME = &GSS_C_NT_USER_NAME_desc; * Generate an error for GSSAPI authentication. The caller should apply * _() to errmsg to make it translatable. */ -static void pg_GSS_error(int severity, char* errmsg, OM_uint32 maj_stat, OM_uint32 min_stat) +#ifndef ENABLE_UT +static +#endif + void pg_GSS_error(int severity, char* errmsg, OM_uint32 maj_stat, OM_uint32 min_stat) { gss_buffer_desc gmsg; OM_uint32 lmin_s, msg_ctx; @@ -1521,7 +1557,10 @@ static void pg_GSS_error(int severity, char* errmsg, OM_uint32 maj_stat, OM_uint ereport(severity, (errmsg_internal("%s", errmsg), errdetail_internal("%s: %s", msg_major, msg_minor))); } -static int pg_GSS_recvauth(Port* port) +#ifndef ENABLE_UT +static +#endif + int pg_GSS_recvauth(Port* port) { OM_uint32 maj_stat, min_stat, gflags; int mtype; @@ -1813,7 +1852,10 @@ static int pg_GSS_recvauth(Port* port) * @in size : the size of data which need to be sent. * @return : the number of bytes sent or -1 for error. */ -static int GssInternalSend(int fd, const void* data, int size) +#ifndef ENABLE_UT +static +#endif + int GssInternalSend(int fd, const void* data, int size) { ssize_t nbytes; ssize_t nSend = 0; @@ -1850,7 +1892,10 @@ static int GssInternalSend(int fd, const void* data, int size) * @in size : the size of data which need recv. * @return : the number of bytes received or -1 for error. */ -static int GssInternalRecv(int fd, void* data, int size) +#ifndef ENABLE_UT +static +#endif + int GssInternalRecv(int fd, void* data, int size) { #define MSG_NOTIFICATION 0x8000 @@ -1939,7 +1984,10 @@ static int GssSendWithType(GssConn* gss_conn, char type) * @in type : the data type need to recv. * @return : 0 for success and -1 for failed. */ -static int GssRecvWithType(GssConn* gss_conn, char type) +#ifndef ENABLE_UT +static +#endif + int GssRecvWithType(GssConn* gss_conn, char type) { #define MSG_HEAD_LEN 5 /* sizeof(char)+sizeof(int) */ @@ -2094,7 +2142,10 @@ static char* GssGetKerberosHostName() * @in server_host : the server ip. * @return : 0 for success and -1 for error. */ -static int GssImportName(GssConn* gss_conn, char* server_host) +#ifndef ENABLE_UT +static +#endif + int GssImportName(GssConn* gss_conn, char* server_host) { #define MAXENVLEN 1024 @@ -2139,6 +2190,7 @@ static int GssImportName(GssConn* gss_conn, char* server_host) * @in gss_conn : stored the messages used in authentication. * @return : 0 for success and -1 for error. */ + static int GssClientInit(GssConn* gss_conn) { #define MAX_KERBEROS_CAPACITY 3000 /* The max capacity of kerberos is 3000 per second */ @@ -2409,7 +2461,10 @@ static int GssServerAccept(GssConn* gss_conn) * @in gss_conn : to stored the messages used in gss authentication. * @return : 0 for success and -1 for error. */ -static int GssServerContinue(GssConn* gss_conn) +#ifndef ENABLE_UT +static +#endif + int GssServerContinue(GssConn* gss_conn) { int re = -1; @@ -2483,6 +2538,42 @@ int GssServerAuth(int socket, const char* krb_keyfile) return GssServerContinue(&gss_conn); } +/* + * release kerberos gss connection info + * if the handle to be released is specified GSS_C_NO_CREDENTIAL or GSS_C_NO_CONTEXT(which is initial status), + * the function will complete successfully but do nothing, so that it's safe to invoke the function without pre-judge + */ +#ifndef ENABLE_UT +static +#endif + void clear_gss_info(pg_gssinfo* gss) +{ + /* status codes coming from gss interface */ + OM_uint32 lmin_s = 0; + /* Release service principal credentials */ + (void)gss_release_cred(&lmin_s, &gss->cred); + /* Release gss security context and name after server authentication finished */ + (void)gss_delete_sec_context(&lmin_s, &gss->ctx, GSS_C_NO_BUFFER); + /* Release gss_name and gss_buf */ + (void)gss_release_name(&lmin_s, &gss->name); +} + +/* + * release kerberos gss stream connection info + * the function will complete successfully but do nothing, so that it's safe to invoke the function without pre-judge + */ +#ifndef ENABLE_UT +static +#endif + void clear_gssconn_info(GssConn *gss) +{ + /* status codes coming from gss interface */ + OM_uint32 lmin_s = 0; + /* Release gss security context and name after server authentication finished */ + (void)gss_delete_sec_context(&lmin_s, &gss->gctx, GSS_C_NO_BUFFER); + /* Release gss_name and gss_buf */ + (void)gss_release_name(&lmin_s, &gss->gtarg_nam); +} #endif /* ENABLE_GSS */ /* ---------------------------------------------------------------- @@ -3016,7 +3107,10 @@ ident_inet_done: */ #ifdef HAVE_UNIX_SOCKETS -static int auth_peer(hbaPort* port) +#ifndef ENABLE_UT +static +#endif + int auth_peer(hbaPort* port) { char ident_user[IDENT_USERNAME_MAX + 1]; uid_t uid; @@ -3544,7 +3638,10 @@ static int CheckLDAPAuth(Port* port) * ---------------------------------------------------------------- */ #ifdef USE_SSL -static int CheckCertAuth(Port* port) +#ifndef ENABLE_UT +static +#endif + int CheckCertAuth(Port* port) { Assert(port->ssl); @@ -3645,34 +3742,3 @@ static int CheckIAMAuth(Port* port) return STATUS_OK; } #endif - -/* - * release kerberos gss connection info - * if the handle to be released is specified GSS_C_NO_CREDENTIAL or GSS_C_NO_CONTEXT(which is initial status), - * the function will complete successfully but do nothing, so that it's safe to invoke the function without pre-judge - */ -static void clear_gss_info(pg_gssinfo* gss) -{ - /* status codes coming from gss interface */ - OM_uint32 lmin_s = 0; - /* Release service principal credentials */ - (void)gss_release_cred(&lmin_s, &gss->cred); - /* Release gss security context and name after server authentication finished */ - (void)gss_delete_sec_context(&lmin_s, &gss->ctx, GSS_C_NO_BUFFER); - /* Release gss_name and gss_buf */ - (void)gss_release_name(&lmin_s, &gss->name); -} - -/* - * release kerberos gss stream connection info - * the function will complete successfully but do nothing, so that it's safe to invoke the function without pre-judge - */ -static void clear_gssconn_info(GssConn *gss) -{ - /* status codes coming from gss interface */ - OM_uint32 lmin_s = 0; - /* Release gss security context and name after server authentication finished */ - (void)gss_delete_sec_context(&lmin_s, &gss->gctx, GSS_C_NO_BUFFER); - /* Release gss_name and gss_buf */ - (void)gss_release_name(&lmin_s, &gss->gtarg_nam); -} diff --git a/src/common/backend/libpq/be-secure.cpp b/src/common/backend/libpq/be-secure.cpp index bbd89cdca..c9f02b34e 100644 --- a/src/common/backend/libpq/be-secure.cpp +++ b/src/common/backend/libpq/be-secure.cpp @@ -96,7 +96,6 @@ typedef enum DHKeyLength { DHKey8192 } DHKeyLength; -static int verify_cb(int ok, X509_STORE_CTX* ctx); static void info_cb(const SSL* ssl, int type, int args); static const char* SSLerrmessage(void); static void set_user_config_ssl_ciphers(const char* sslciphers); @@ -122,16 +121,19 @@ extern THR_LOCAL unsigned char disable_pqlocking; /* security ciphers suites in SSL connection */ static const char* ssl_ciphers_map[] = { - TLS1_TXT_DHE_RSA_WITH_AES_128_GCM_SHA256, /* TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 */ - TLS1_TXT_DHE_RSA_WITH_AES_256_GCM_SHA384, /* TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 */ - TLS1_TXT_DHE_RSA_WITH_AES_128_CCM, /* TLS_DHE_RSA_WITH_AES_128_CCM */ - TLS1_TXT_DHE_RSA_WITH_AES_256_CCM, /* TLS_DHE_RSA_WITH_AES_256_CCM */ - TLS1_TXT_ECDHE_RSA_WITH_AES_256_GCM_SHA384, /* TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 */ TLS1_TXT_ECDHE_RSA_WITH_AES_128_GCM_SHA256, /* TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 */ - TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, /* TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 */ + TLS1_TXT_ECDHE_RSA_WITH_AES_256_GCM_SHA384, /* TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 */ TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, /* TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 */ + TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, /* TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 */ + /* The following are compatible with earlier versions of the client. */ + TLS1_TXT_DHE_RSA_WITH_AES_128_GCM_SHA256, /* TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, */ + TLS1_TXT_DHE_RSA_WITH_AES_256_GCM_SHA384, /* TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 */ NULL}; +#ifndef ENABLE_UT +static +#endif +bool g_server_crl_err = false; #endif const char* ssl_cipher_file = "server.key.cipher"; @@ -442,60 +444,60 @@ ssize_t secure_write(Port* port, void* ptr, size_t len) * criteria (e.g., accepting self-signed or expired certs), but * for now we accept the default checks. */ +int be_verify_cb(int ok, X509_STORE_CTX* ctx) +{ + if (ok) { + return ok; + } - static int verify_cb(int ok, X509_STORE_CTX* ctx) - { - int cert_error = X509_STORE_CTX_get_error(ctx); + /* + * When the CRL is abnormal, it won't be used to check whether the certificate is revoked, + * and the services shouldn't be affected due to the CRL exception. + */ + const int crl_err_scenarios[] = { + X509_V_ERR_CRL_HAS_EXPIRED, + X509_V_ERR_UNABLE_TO_GET_CRL, + X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE, + X509_V_ERR_CRL_SIGNATURE_FAILURE, + X509_V_ERR_CRL_NOT_YET_VALID, + X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD, + X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD, + X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER, + X509_V_ERR_KEYUSAGE_NO_CRL_SIGN, + X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION, + X509_V_ERR_DIFFERENT_CRL_SCOPE, + X509_V_ERR_CRL_PATH_VALIDATION_ERROR + }; + bool ignore_crl_err = false; - if (!ok) - { - ereport(LOG, (errmsg("verify error:num=%d:%s \n", cert_error, - X509_verify_cert_error_string(cert_error)))); - switch (cert_error) - { - case X509_V_ERR_CRL_HAS_EXPIRED: - ok = 1; - break; - case X509_V_ERR_UNABLE_TO_GET_CRL: - ok = 1; - break; - case X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE: - ok = 1; - break; - case X509_V_ERR_CRL_SIGNATURE_FAILURE: - ok = 1; - break; - case X509_V_ERR_CRL_NOT_YET_VALID: - ok = 1; - break; - case X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD: - ok = 1; - break; - case X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD: - ok = 1; - break; - case X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER: - ok = 1; - break; - case X509_V_ERR_KEYUSAGE_NO_CRL_SIGN: - ok = 1; - break; - case X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION: - ok = 1; - break; - case X509_V_ERR_DIFFERENT_CRL_SCOPE: - ok = 1; - break; - case X509_V_ERR_CRL_PATH_VALIDATION_ERROR: - ok = 1; - break; - default: - break; - } - } + int err_code = X509_STORE_CTX_get_error(ctx); + const char *err_msg = X509_verify_cert_error_string(err_code); + if (!g_server_crl_err) { + for (size_t i = 0; i < sizeof(crl_err_scenarios) / sizeof(crl_err_scenarios[0]); i++) { + if (err_code == crl_err_scenarios[i]) { + ereport(LOG, + (errmsg("During SSL authentication, there are some errors in the CRL, so we just ignore the CRL. " + "{ssl err code: %d, ssl err message: %s}\n", err_code, err_msg))); + + g_server_crl_err = true; + ignore_crl_err = true; + break; + } + } + } else { + if (err_code == X509_V_ERR_CERT_REVOKED) { + g_server_crl_err = false; /* reset */ + ignore_crl_err = true; + } + } - return ok; - } + if (ignore_crl_err) { + X509_STORE_CTX_set_error(ctx, X509_V_OK); + ok = 1; + } + + return ok; +} /* * This callback is used to copy SSL information messages @@ -855,7 +857,8 @@ static void initialize_SSL(void) * presented. We might fail such connections later, depending on * what we find in pg_hba.conf. */ - SSL_CTX_set_verify(u_sess->libpq_cxt.SSL_server_context, (SSL_VERIFY_PEER | SSL_VERIFY_CLIENT_ONCE), verify_cb); + SSL_CTX_set_verify(u_sess->libpq_cxt.SSL_server_context, (SSL_VERIFY_PEER | SSL_VERIFY_CLIENT_ONCE), + be_verify_cb); /* Increase the depth to support multi-level certificate. */ SSL_CTX_set_verify_depth(u_sess->libpq_cxt.SSL_server_context, (MAX_CERTIFICATE_DEPTH_SUPPORTED - 2)); @@ -889,6 +892,8 @@ static int open_server_SSL(Port* port) Assert(port->ssl == NULL); Assert(port->peer == NULL); + g_server_crl_err = false; + port->ssl = SSL_new(u_sess->libpq_cxt.SSL_server_context); if (port->ssl == NULL) { ereport(COMMERROR, diff --git a/src/common/backend/libpq/hba.cpp b/src/common/backend/libpq/hba.cpp index 46289eb1b..89dd61d48 100644 --- a/src/common/backend/libpq/hba.cpp +++ b/src/common/backend/libpq/hba.cpp @@ -1396,7 +1396,7 @@ static void check_hba_replication(hbaPort* port) HbaToken* tok = NULL; tok = (HbaToken*)lfirst(cell); if (token_is_keyword(tok, "replication") && hba->conntype != ctLocal) { - if (hba->auth_method != uaGSS && !AM_WAL_HADR_SENDER) { + if (hba->auth_method != uaGSS && !AM_WAL_HADR_SENDER && !AM_WAL_HADR_CN_SENDER) { hba->auth_method = uaTrust; } ereport(LOG, @@ -1590,7 +1590,7 @@ static void check_hba(hbaPort* port) if (!isUsernameSame && hba->auth_method == uaTrust) { hba->auth_method = get_default_auth_method(port->user_name); } - } else if (hba->auth_method == uaTrust) { + } else if (hba->auth_method == uaTrust || hba->auth_method == uaPeer) { /* For non-initdb user, password is always needed */ hba->auth_method = get_default_auth_method(port->user_name); } diff --git a/src/common/backend/libpq/pqcomm.cpp b/src/common/backend/libpq/pqcomm.cpp index 40cde3203..8e54da4df 100644 --- a/src/common/backend/libpq/pqcomm.cpp +++ b/src/common/backend/libpq/pqcomm.cpp @@ -810,7 +810,7 @@ int StreamServerPort(int family, char* hostName, unsigned short portNumber, cons sinp = (struct sockaddr*)(addr->ai_addr); if (addr->ai_family == AF_INET6) { result = inet_net_ntop(AF_INET6, - &((struct sockaddr_in*)sinp)->sin_addr, + &((struct sockaddr_in6*)sinp)->sin6_addr, 128, t_thrd.postmaster_cxt.LocalAddrList[t_thrd.postmaster_cxt.LocalIpNum], IP_LEN); @@ -1587,6 +1587,39 @@ int pq_flush(void) return res; } +/* If query is been canceled, print information */ +static void QueryCancelPrint() +{ + if (t_thrd.storage_cxt.cancel_from_timeout) { + /* For STATEMENT、AUTOVACUUM、WLM、DEADLOCK timeout */ + ereport(LOG, + (errcode(ERRCODE_QUERY_CANCELED), + errmsg("canceling statement due to statement timeout"), + ignore_interrupt(true))); + } else { + /* If node is coordinator, client is "user". And if node is datanode, client is "coordinator" */ + ereport(LOG, + (errcode(ERRCODE_QUERY_CANCELED), + errmsg("canceling statement due to %s request", IS_PGXC_DATANODE ? "coordinator" : "user"), + ignore_interrupt(true))); + } + return; +} + +/* If send message to client failed, set flag to true */ +static void SetConnectionLostFlag() +{ + if ((StreamThreadAmI() == false) && (!t_thrd.proc_cxt.proc_exit_inprogress)) { + /* For client connection, set ClientConnectionLost to true */ + t_thrd.int_cxt.ClientConnectionLost = 1; + InterruptPending = 1; + } else if (StreamThreadAmI()) { + /* For stream connection, set StreamConnectionLost to true */ + t_thrd.int_cxt.StreamConnectionLost = 1; + } + return; +} + /* -------------------------------- * internal_flush - flush pending output * @@ -1602,8 +1635,10 @@ int internal_flush(void) static THR_LOCAL int last_reported_send_errno = 0; + errno_t ret; char* bufptr = t_thrd.libpq_cxt.PqSendBuffer + t_thrd.libpq_cxt.PqSendStart; char* bufend = t_thrd.libpq_cxt.PqSendBuffer + t_thrd.libpq_cxt.PqSendPointer; + char connTimeInfoStr[INITIAL_EXPBUFFER_SIZE] = {'\0'}; WaitState oldStatus = pgstat_report_waitstatus(STATE_WAIT_UNDEFINED, true); if (StreamThreadAmI() == false) { @@ -1617,6 +1652,20 @@ int internal_flush(void) global_node_definition ? global_node_definition->num_nodes : -1); } + /* + * In session initialize process, client is connecting. + * If reply messages failed, we record entire connecting time and print it in error message. + */ + if (u_sess->clientConnTime_cxt.checkOnlyInConnProcess) { + instr_time currentTime; + INSTR_TIME_SET_CURRENT(u_sess->clientConnTime_cxt.connEndTime); + currentTime = u_sess->clientConnTime_cxt.connEndTime; + INSTR_TIME_SUBTRACT(currentTime, u_sess->clientConnTime_cxt.connStartTime); + ret = sprintf_s(connTimeInfoStr, INITIAL_EXPBUFFER_SIZE, + " Client connection consumes %lfms.", INSTR_TIME_GET_MILLISEC(currentTime)); + securec_check_ss(ret, "", ""); + } + while (bufptr < bufend) { int r; @@ -1624,18 +1673,7 @@ int internal_flush(void) if (unlikely(r == 0 && (StreamThreadAmI() == true || u_sess->proc_cxt.MyProcPort->is_logic_conn))) { /* Stop query when cancel happend */ if (t_thrd.int_cxt.QueryCancelPending) { - if (t_thrd.storage_cxt.cancel_from_timeout) { - ereport(LOG, - (errcode(ERRCODE_QUERY_CANCELED), - errmsg("canceling statement due to statement timeout"), - ignore_interrupt(true))); - } else { - ereport(LOG, - (errcode(ERRCODE_QUERY_CANCELED), - errmsg("canceling statement due to %s request", IS_PGXC_DATANODE ? "coordinator" : "user"), - ignore_interrupt(true))); - } - + QueryCancelPrint(); (void)pgstat_report_waitstatus(oldStatus); return EOF; } else { @@ -1671,16 +1709,16 @@ int internal_flush(void) last_reported_send_errno = errno; ereport(COMMERROR, (errcode_for_socket_access(), - errmsg("could not send data to client [ Remote IP: %s PORT: %s FD: %d BLOCK: %d]. Detail: %m", + errmsg("could not send data to client [ Remote IP: %s PORT: %s FD: %d BLOCK: %d].%s Detail: %m", u_sess->proc_cxt.MyProcPort->remote_host, (u_sess->proc_cxt.MyProcPort->remote_port != NULL && u_sess->proc_cxt.MyProcPort->remote_port[0] != '\0') ? u_sess->proc_cxt.MyProcPort->remote_port : "", u_sess->proc_cxt.MyProcPort->sock, - u_sess->proc_cxt.MyProcPort->noblock))); + u_sess->proc_cxt.MyProcPort->noblock, + connTimeInfoStr))); } - /* * We drop the buffered data anyway so that processing can * continue, even though we'll probably quit soon. We also set a @@ -1688,12 +1726,7 @@ int internal_flush(void) * the connection. */ t_thrd.libpq_cxt.PqSendStart = t_thrd.libpq_cxt.PqSendPointer = 0; - if ((StreamThreadAmI() == false) && (!t_thrd.proc_cxt.proc_exit_inprogress)) { - t_thrd.int_cxt.ClientConnectionLost = 1; - InterruptPending = 1; - } else if (StreamThreadAmI()) { - t_thrd.int_cxt.StreamConnectionLost = 1; - } + SetConnectionLostFlag(); (void)pgstat_report_waitstatus(oldStatus); return EOF; } diff --git a/src/common/backend/libpq/sha2.cpp b/src/common/backend/libpq/sha2.cpp index fef86f448..3b996ffe2 100644 --- a/src/common/backend/libpq/sha2.cpp +++ b/src/common/backend/libpq/sha2.cpp @@ -697,6 +697,14 @@ bool pg_sha256_encrypt( return true; } +#ifdef ENABLE_LITE_MODE +bool pg_sha256_encrypt_v1( + const char* password, const char* salt_s, size_t salt_len, char* buf, char* client_key_buf) +{ + return pg_sha256_encrypt(password, salt_s, salt_len, buf, client_key_buf, ITERATION_COUNT_V1); +} +#endif + /* Caller must ensure that the length of the password1 and password2 are the same, and equal to the length */ int XOR_between_password(const char* password1, const char* password2, char* r, int length) { diff --git a/src/common/backend/nodes/copyfuncs.cpp b/src/common/backend/nodes/copyfuncs.cpp index 6f88706f4..1271749d9 100644 --- a/src/common/backend/nodes/copyfuncs.cpp +++ b/src/common/backend/nodes/copyfuncs.cpp @@ -312,6 +312,7 @@ static ModifyTable* _copyModifyTable(const ModifyTable* from) COPY_NODE_FIELD(updateTlist); COPY_NODE_FIELD(exclRelTlist); COPY_SCALAR_FIELD(exclRelRTIndex); + COPY_NODE_FIELD(upsertWhere); return newnode; } @@ -569,6 +570,8 @@ static void CopyScanFields(const Scan* from, Scan* newnode) newnode->pruningInfo = copyPruningResult(from->pruningInfo); COPY_SCALAR_FIELD(scan_qual_optimized); COPY_SCALAR_FIELD(predicate_pushdown_optimized); + COPY_SCALAR_FIELD(scanBatchMode); + COPY_SCALAR_FIELD(tableRows); /* partition infos */ COPY_NODE_FIELD(bucketInfo); @@ -1273,6 +1276,7 @@ static HashJoin* _copyHashJoin(const HashJoin* from) COPY_SCALAR_FIELD(rebuildHashTable); COPY_SCALAR_FIELD(isSonicHash); CopyMemInfoFields(&from->mem_info, &newnode->mem_info); + COPY_SCALAR_FIELD(joinRows); return newnode; } @@ -1547,6 +1551,9 @@ static PlanRowMark* _copyPlanRowMark(const PlanRowMark* from) COPY_SCALAR_FIELD(rowmarkId); COPY_SCALAR_FIELD(markType); COPY_SCALAR_FIELD(noWait); + if (t_thrd.proc->workingVersionNum >= WAIT_N_TUPLE_LOCK_VERSION_NUM) { + COPY_SCALAR_FIELD(waitSec); + } COPY_SCALAR_FIELD(isParent); COPY_SCALAR_FIELD(numAttrs); COPY_BITMAPSET_FIELD(bms_nodeids); @@ -2367,6 +2374,7 @@ static Var* _copyVar(const Var* from) { Var* newnode = makeNode(Var); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(varno); COPY_SCALAR_FIELD(varattno); COPY_SCALAR_FIELD(vartype); @@ -2387,6 +2395,7 @@ static Const* _copyConst(const Const* from) { Const* newnode = makeNode(Const); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(consttype); COPY_SCALAR_FIELD(consttypmod); COPY_SCALAR_FIELD(constcollid); @@ -2421,6 +2430,7 @@ static Param* _copyParam(const Param* from) { Param* newnode = makeNode(Param); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(paramkind); COPY_SCALAR_FIELD(paramid); COPY_SCALAR_FIELD(paramtype); @@ -2440,6 +2450,7 @@ static Rownum* _copyRownum(const Rownum* from) { Rownum* newnode = (Rownum*)makeNode(Rownum); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(rownumcollid); COPY_LOCATION_FIELD(location); @@ -2453,6 +2464,7 @@ static Aggref* _copyAggref(const Aggref* from) { Aggref* newnode = makeNode(Aggref); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(aggfnoid); COPY_SCALAR_FIELD(aggtype); #ifdef PGXC @@ -2482,6 +2494,7 @@ static GroupingFunc* _copyGroupingFunc(const GroupingFunc* from) { GroupingFunc* newnode = makeNode(GroupingFunc); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(args); COPY_NODE_FIELD(refs); COPY_NODE_FIELD(cols); @@ -2509,6 +2522,7 @@ static WindowFunc* _copyWindowFunc(const WindowFunc* from) { WindowFunc* newnode = makeNode(WindowFunc); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(winfnoid); COPY_SCALAR_FIELD(wintype); COPY_SCALAR_FIELD(wincollid); @@ -2529,6 +2543,7 @@ static ArrayRef* _copyArrayRef(const ArrayRef* from) { ArrayRef* newnode = makeNode(ArrayRef); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(refarraytype); COPY_SCALAR_FIELD(refelemtype); COPY_SCALAR_FIELD(reftypmod); @@ -2548,6 +2563,7 @@ static FuncExpr* _copyFuncExpr(const FuncExpr* from) { FuncExpr* newnode = makeNode(FuncExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(funcid); COPY_SCALAR_FIELD(funcresulttype); COPY_SCALAR_FIELD(funcresulttype_orig); @@ -2570,6 +2586,7 @@ static NamedArgExpr* _copyNamedArgExpr(const NamedArgExpr* from) { NamedArgExpr* newnode = makeNode(NamedArgExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(arg); COPY_STRING_FIELD(name); COPY_SCALAR_FIELD(argnumber); @@ -2581,6 +2598,7 @@ static NamedArgExpr* _copyNamedArgExpr(const NamedArgExpr* from) template static void _copyCommonOpExprPart(const T* from, T* newnode) { + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(opno); COPY_SCALAR_FIELD(opfuncid); COPY_SCALAR_FIELD(opresulttype); @@ -2627,6 +2645,7 @@ static ScalarArrayOpExpr* _copyScalarArrayOpExpr(const ScalarArrayOpExpr* from) { ScalarArrayOpExpr* newnode = makeNode(ScalarArrayOpExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(opno); COPY_SCALAR_FIELD(opfuncid); COPY_SCALAR_FIELD(useOr); @@ -2644,6 +2663,7 @@ static BoolExpr* _copyBoolExpr(const BoolExpr* from) { BoolExpr* newnode = makeNode(BoolExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(boolop); COPY_NODE_FIELD(args); COPY_LOCATION_FIELD(location); @@ -2658,6 +2678,7 @@ static SubLink* _copySubLink(const SubLink* from) { SubLink* newnode = makeNode(SubLink); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(subLinkType); COPY_NODE_FIELD(testexpr); COPY_NODE_FIELD(operName); @@ -2674,6 +2695,7 @@ static SubPlan* _copySubPlan(const SubPlan* from) { SubPlan* newnode = makeNode(SubPlan); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(subLinkType); COPY_NODE_FIELD(testexpr); COPY_NODE_FIELD(paramIds); @@ -2700,6 +2722,7 @@ static AlternativeSubPlan* _copyAlternativeSubPlan(const AlternativeSubPlan* fro { AlternativeSubPlan* newnode = makeNode(AlternativeSubPlan); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(subplans); return newnode; @@ -2712,6 +2735,7 @@ static FieldSelect* _copyFieldSelect(const FieldSelect* from) { FieldSelect* newnode = makeNode(FieldSelect); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(arg); COPY_SCALAR_FIELD(fieldnum); COPY_SCALAR_FIELD(resulttype); @@ -2728,6 +2752,7 @@ static FieldStore* _copyFieldStore(const FieldStore* from) { FieldStore* newnode = makeNode(FieldStore); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(arg); COPY_NODE_FIELD(newvals); COPY_NODE_FIELD(fieldnums); @@ -2743,6 +2768,7 @@ static RelabelType* _copyRelabelType(const RelabelType* from) { RelabelType* newnode = makeNode(RelabelType); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(arg); COPY_SCALAR_FIELD(resulttype); COPY_SCALAR_FIELD(resulttypmod); @@ -2760,6 +2786,7 @@ static CoerceViaIO* _copyCoerceViaIO(const CoerceViaIO* from) { CoerceViaIO* newnode = makeNode(CoerceViaIO); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(arg); COPY_SCALAR_FIELD(resulttype); COPY_SCALAR_FIELD(resultcollid); @@ -2776,6 +2803,7 @@ static ArrayCoerceExpr* _copyArrayCoerceExpr(const ArrayCoerceExpr* from) { ArrayCoerceExpr* newnode = makeNode(ArrayCoerceExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(arg); COPY_SCALAR_FIELD(elemfuncid); COPY_SCALAR_FIELD(resulttype); @@ -2795,6 +2823,7 @@ static ConvertRowtypeExpr* _copyConvertRowtypeExpr(const ConvertRowtypeExpr* fro { ConvertRowtypeExpr* newnode = makeNode(ConvertRowtypeExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(arg); COPY_SCALAR_FIELD(resulttype); COPY_SCALAR_FIELD(convertformat); @@ -2810,6 +2839,7 @@ static CollateExpr* _copyCollateExpr(const CollateExpr* from) { CollateExpr* newnode = makeNode(CollateExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(arg); COPY_SCALAR_FIELD(collOid); COPY_LOCATION_FIELD(location); @@ -2824,6 +2854,7 @@ static CaseExpr* _copyCaseExpr(const CaseExpr* from) { CaseExpr* newnode = makeNode(CaseExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(casetype); COPY_SCALAR_FIELD(casecollid); COPY_NODE_FIELD(arg); @@ -2841,6 +2872,7 @@ static CaseWhen* _copyCaseWhen(const CaseWhen* from) { CaseWhen* newnode = makeNode(CaseWhen); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(expr); COPY_NODE_FIELD(result); COPY_LOCATION_FIELD(location); @@ -2855,6 +2887,7 @@ static CaseTestExpr* _copyCaseTestExpr(const CaseTestExpr* from) { CaseTestExpr* newnode = makeNode(CaseTestExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(typeId); COPY_SCALAR_FIELD(typeMod); COPY_SCALAR_FIELD(collation); @@ -2869,6 +2902,7 @@ static ArrayExpr* _copyArrayExpr(const ArrayExpr* from) { ArrayExpr* newnode = makeNode(ArrayExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(array_typeid); COPY_SCALAR_FIELD(array_collid); COPY_SCALAR_FIELD(element_typeid); @@ -2886,6 +2920,7 @@ static RowExpr* _copyRowExpr(const RowExpr* from) { RowExpr* newnode = makeNode(RowExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(args); COPY_SCALAR_FIELD(row_typeid); COPY_SCALAR_FIELD(row_format); @@ -2902,6 +2937,7 @@ static RowCompareExpr* _copyRowCompareExpr(const RowCompareExpr* from) { RowCompareExpr* newnode = makeNode(RowCompareExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(rctype); COPY_NODE_FIELD(opnos); COPY_NODE_FIELD(opfamilies); @@ -2919,6 +2955,7 @@ static CoalesceExpr* _copyCoalesceExpr(const CoalesceExpr* from) { CoalesceExpr* newnode = makeNode(CoalesceExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(coalescetype); COPY_SCALAR_FIELD(coalescecollid); COPY_NODE_FIELD(args); @@ -2936,6 +2973,7 @@ static MinMaxExpr* _copyMinMaxExpr(const MinMaxExpr* from) { MinMaxExpr* newnode = makeNode(MinMaxExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(minmaxtype); COPY_SCALAR_FIELD(minmaxcollid); COPY_SCALAR_FIELD(inputcollid); @@ -2953,6 +2991,7 @@ static XmlExpr* _copyXmlExpr(const XmlExpr* from) { XmlExpr* newnode = makeNode(XmlExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(op); COPY_STRING_FIELD(name); COPY_NODE_FIELD(named_args); @@ -2973,6 +3012,7 @@ static NullTest* _copyNullTest(const NullTest* from) { NullTest* newnode = makeNode(NullTest); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(arg); COPY_SCALAR_FIELD(nulltesttype); COPY_SCALAR_FIELD(argisrow); @@ -2987,6 +3027,7 @@ static HashFilter* _copyHashFilter(const HashFilter* from) { HashFilter* newnode = makeNode(HashFilter); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(arg); COPY_NODE_FIELD(typeOids); COPY_NODE_FIELD(nodeList); @@ -3001,6 +3042,7 @@ static BooleanTest* _copyBooleanTest(const BooleanTest* from) { BooleanTest* newnode = makeNode(BooleanTest); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(arg); COPY_SCALAR_FIELD(booltesttype); @@ -3014,6 +3056,7 @@ static CoerceToDomain* _copyCoerceToDomain(const CoerceToDomain* from) { CoerceToDomain* newnode = makeNode(CoerceToDomain); + COPY_SCALAR_FIELD(xpr.selec); COPY_NODE_FIELD(arg); COPY_SCALAR_FIELD(resulttype); COPY_SCALAR_FIELD(resulttypmod); @@ -3031,6 +3074,7 @@ static CoerceToDomainValue* _copyCoerceToDomainValue(const CoerceToDomainValue* { CoerceToDomainValue* newnode = makeNode(CoerceToDomainValue); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(typeId); COPY_SCALAR_FIELD(typeMod); COPY_SCALAR_FIELD(collation); @@ -3046,6 +3090,7 @@ static SetToDefault* _copySetToDefault(const SetToDefault* from) { SetToDefault* newnode = makeNode(SetToDefault); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(typeId); COPY_SCALAR_FIELD(typeMod); COPY_SCALAR_FIELD(collation); @@ -3061,6 +3106,7 @@ static CurrentOfExpr* _copyCurrentOfExpr(const CurrentOfExpr* from) { CurrentOfExpr* newnode = makeNode(CurrentOfExpr); + COPY_SCALAR_FIELD(xpr.selec); COPY_SCALAR_FIELD(cvarno); COPY_STRING_FIELD(cursor_name); COPY_SCALAR_FIELD(cursor_param); @@ -3154,6 +3200,7 @@ static PartitionState* _copyPartitionState(const PartitionState* from) COPY_NODE_FIELD(partitionList); COPY_SCALAR_FIELD(rowMovement); COPY_NODE_FIELD(subPartitionState); + COPY_NODE_FIELD(partitionNameList); return newnode; } @@ -3229,6 +3276,17 @@ static AddPartitionState* _copyAddPartitionState(const AddPartitionState* from) return newnode; } +static AddSubPartitionState* _copyAddSubPartitionState(const AddSubPartitionState* from) +{ + AddSubPartitionState* newnode = makeNode(AddSubPartitionState); + + COPY_STRING_FIELD(partitionName); + COPY_NODE_FIELD(subPartitionList); + + return newnode; +} + + static RangePartitionStartEndDefState* _copyRangePartitionStartEndDefState(const RangePartitionStartEndDefState* from) { RangePartitionStartEndDefState* newnode = makeNode(RangePartitionStartEndDefState); @@ -3262,6 +3320,7 @@ static RangePartitionindexDefState* _copyRangePartitionindexDefState(const Range COPY_STRING_FIELD(name); COPY_STRING_FIELD(tablespace); + COPY_NODE_FIELD(sublist); return newnode; } @@ -3345,7 +3404,6 @@ static RestrictInfo* _copyRestrictInfo(const RestrictInfo* from) COPY_SCALAR_FIELD(hashjoinoperator); COPY_SCALAR_FIELD(left_bucketsize); COPY_SCALAR_FIELD(right_bucketsize); - COPY_SCALAR_FIELD(converted); return newnode; } @@ -3508,6 +3566,7 @@ static RangeTblEntry* _copyRangeTblEntry(const RangeTblEntry* from) COPY_SCALAR_FIELD(isexcluded); COPY_SCALAR_FIELD(sublink_pull_up); COPY_SCALAR_FIELD(is_ustore); + COPY_SCALAR_FIELD(pulled_from_subquery); return newnode; } @@ -3587,6 +3646,9 @@ static RowMarkClause* _copyRowMarkClause(const RowMarkClause* from) COPY_SCALAR_FIELD(rti); COPY_SCALAR_FIELD(forUpdate); COPY_SCALAR_FIELD(noWait); + if (t_thrd.proc->workingVersionNum >= WAIT_N_TUPLE_LOCK_VERSION_NUM) { + COPY_SCALAR_FIELD(waitSec); + } COPY_SCALAR_FIELD(pushedDown); if (t_thrd.proc->workingVersionNum >= ENHANCED_TUPLE_LOCK_VERSION_NUM) { COPY_SCALAR_FIELD(strength); @@ -3614,6 +3676,7 @@ static UpsertClause* _copyUpsertClause(const UpsertClause* from) COPY_NODE_FIELD(targetList); COPY_LOCATION_FIELD(location); + COPY_NODE_FIELD(whereClause); return newnode; } @@ -3626,6 +3689,7 @@ static UpsertExpr* _copyUpsertExpr(const UpsertExpr* from) COPY_NODE_FIELD(updateTlist); COPY_NODE_FIELD(exclRelTlist); COPY_SCALAR_FIELD(exclRelIndex); + COPY_NODE_FIELD(upsertWhere); return newnode; } @@ -4110,6 +4174,9 @@ static LockingClause* _copyLockingClause(const LockingClause* from) if (t_thrd.proc->workingVersionNum >= ENHANCED_TUPLE_LOCK_VERSION_NUM) { COPY_SCALAR_FIELD(strength); } + if (t_thrd.proc->workingVersionNum >= WAIT_N_TUPLE_LOCK_VERSION_NUM) { + COPY_SCALAR_FIELD(waitSec); + } return newnode; } @@ -4191,6 +4258,24 @@ static PredpushHint* _copyPredpushHint(const PredpushHint* from) return newnode; } +/* + * @Description: Copy hint filelds. + * @in from: Source hint. + * @out newnode: Target hint. + */ +static PredpushSameLevelHint* _copyPredpushSameLevelHint(const PredpushSameLevelHint* from) +{ + PredpushSameLevelHint* newnode = makeNode(PredpushSameLevelHint); + + CopyBaseHintFilelds((const Hint*)from, (Hint*)newnode); + COPY_SCALAR_FIELD(negative); + COPY_STRING_FIELD(dest_name); + COPY_SCALAR_FIELD(dest_id); + COPY_BITMAPSET_FIELD(candidates); + + return newnode; +} + /* * @Description: Copy hint filelds. * @in from: Source hint. @@ -4421,6 +4506,7 @@ static HintState* _copyHintState(const HintState* from) COPY_NODE_FIELD(set_hint); COPY_NODE_FIELD(cache_plan_hint); COPY_NODE_FIELD(no_gpc_hint); + COPY_NODE_FIELD(predpush_same_level_hint); return newnode; } @@ -4728,6 +4814,27 @@ static GrantRoleStmt* _copyGrantRoleStmt(const GrantRoleStmt* from) return newnode; } +static DbPriv* _copyDbPriv(const DbPriv* from) +{ + DbPriv* newnode = makeNode(DbPriv); + + COPY_STRING_FIELD(db_priv_name); + + return newnode; +} + +static GrantDbStmt* _copyGrantDbStmt(const GrantDbStmt* from) +{ + GrantDbStmt* newnode = makeNode(GrantDbStmt); + + COPY_SCALAR_FIELD(is_grant); + COPY_NODE_FIELD(privileges); + COPY_NODE_FIELD(grantees); + COPY_SCALAR_FIELD(admin_opt); + + return newnode; +} + static AlterDefaultPrivilegesStmt* _copyAlterDefaultPrivilegesStmt(const AlterDefaultPrivilegesStmt* from) { AlterDefaultPrivilegesStmt* newnode = makeNode(AlterDefaultPrivilegesStmt); @@ -5956,6 +6063,10 @@ static LockStmt* _copyLockStmt(const LockStmt* from) COPY_NODE_FIELD(relations); COPY_SCALAR_FIELD(mode); COPY_SCALAR_FIELD(nowait); + COPY_SCALAR_FIELD(cancelable); + if (t_thrd.proc->workingVersionNum >= WAIT_N_TUPLE_LOCK_VERSION_NUM) { + COPY_SCALAR_FIELD(waitSec); + } return newnode; } @@ -6212,6 +6323,16 @@ static DropSubscriptionStmt *_copyDropSubscriptionStmt(const DropSubscriptionStm return newnode; } +static PredictByFunction *_copyPredictByFunctionStmt(const PredictByFunction *from) +{ + PredictByFunction* newnode = makeNode(PredictByFunction); + COPY_STRING_FIELD(model_name); + COPY_SCALAR_FIELD(model_name_location); + COPY_NODE_FIELD(model_args); + COPY_SCALAR_FIELD(model_args_location); + return newnode; +} + /* **************************************************************** * pg_list.h copy functions * **************************************************************** @@ -6407,6 +6528,24 @@ static DropResourcePoolStmt* _copyDropResourcePoolStmt(const DropResourcePoolStm return newnode; } +static AlterGlobalConfigStmt* _copyAlterGlobalConfigStmt(const AlterGlobalConfigStmt* from) +{ + AlterGlobalConfigStmt* newnode = makeNode(AlterGlobalConfigStmt); + + COPY_NODE_FIELD(options); + + return newnode; +} + +static DropGlobalConfigStmt* _copyDropGlobalConfigStmt(const DropGlobalConfigStmt* from) +{ + DropGlobalConfigStmt* newnode = makeNode(DropGlobalConfigStmt); + + COPY_NODE_FIELD(options); + + return newnode; +} + static CreateWorkloadGroupStmt* _copyCreateWorkloadGroupStmt(const CreateWorkloadGroupStmt* from) { CreateWorkloadGroupStmt* newnode = makeNode(CreateWorkloadGroupStmt); @@ -7065,6 +7204,9 @@ void* copyObject(const void* from) case T_AddPartitionState: retval = _copyAddPartitionState((AddPartitionState*)from); break; + case T_AddSubPartitionState: + retval = _copyAddSubPartitionState((AddSubPartitionState*)from); + break; case T_RangePartitionStartEndDefState: retval = _copyRangePartitionStartEndDefState((RangePartitionStartEndDefState*)from); break; @@ -7165,6 +7307,9 @@ void* copyObject(const void* from) case T_GrantRoleStmt: retval = _copyGrantRoleStmt((GrantRoleStmt*)from); break; + case T_GrantDbStmt: + retval = _copyGrantDbStmt((GrantDbStmt*)from); + break; case T_AlterDefaultPrivilegesStmt: retval = _copyAlterDefaultPrivilegesStmt((AlterDefaultPrivilegesStmt*)from); break; @@ -7501,6 +7646,12 @@ void* copyObject(const void* from) case T_DropResourcePoolStmt: retval = _copyDropResourcePoolStmt((DropResourcePoolStmt*)from); break; + case T_AlterGlobalConfigStmt: + retval = _copyAlterGlobalConfigStmt((AlterGlobalConfigStmt*)from); + break; + case T_DropGlobalConfigStmt: + retval = _copyDropGlobalConfigStmt((DropGlobalConfigStmt*)from); + break; case T_CreateWorkloadGroupStmt: retval = _copyCreateWorkloadGroupStmt((CreateWorkloadGroupStmt*)from); break; @@ -7694,6 +7845,9 @@ void* copyObject(const void* from) case T_AccessPriv: retval = _copyAccessPriv((AccessPriv*)from); break; + case T_DbPriv: + retval = _copyDbPriv((DbPriv*)from); + break; case T_XmlSerialize: retval = _copyXmlSerialize((XmlSerialize*)from); break; @@ -7801,6 +7955,9 @@ void* copyObject(const void* from) case T_PredpushHint: retval = _copyPredpushHint((PredpushHint*)from); break; + case T_PredpushSameLevelHint: + retval = _copyPredpushSameLevelHint((PredpushSameLevelHint*)from); + break; case T_RewriteHint: retval = _copyRewriteHint((RewriteHint*)from); break; @@ -7840,6 +7997,9 @@ void* copyObject(const void* from) case T_AlterSubscriptionStmt: retval = _copyAlterSubscriptionStmt((AlterSubscriptionStmt *)from); break; + case T_PredictByFunction: + retval = _copyPredictByFunctionStmt((PredictByFunction *)from); + break; case T_DropSubscriptionStmt: retval = _copyDropSubscriptionStmt((DropSubscriptionStmt *)from); break; diff --git a/src/common/backend/nodes/equalfuncs.cpp b/src/common/backend/nodes/equalfuncs.cpp index c986b1a9f..ba789f587 100644 --- a/src/common/backend/nodes/equalfuncs.cpp +++ b/src/common/backend/nodes/equalfuncs.cpp @@ -710,6 +710,7 @@ static bool _equalUpsertExpr(const UpsertExpr* a, const UpsertExpr* b) COMPARE_NODE_FIELD(updateTlist); COMPARE_NODE_FIELD(exclRelTlist); COMPARE_SCALAR_FIELD(exclRelIndex); + COMPARE_NODE_FIELD(upsertWhere); return true; } @@ -1071,6 +1072,23 @@ static bool _equalGrantRoleStmt(const GrantRoleStmt* a, const GrantRoleStmt* b) return true; } +static bool _equalDbPriv(const DbPriv* a, const DbPriv* b) +{ + COMPARE_STRING_FIELD(db_priv_name); + + return true; +} + +static bool _equalGrantDbStmt(const GrantDbStmt* a, const GrantDbStmt* b) +{ + COMPARE_SCALAR_FIELD(is_grant); + COMPARE_NODE_FIELD(privileges); + COMPARE_NODE_FIELD(grantees); + COMPARE_SCALAR_FIELD(admin_opt); + + return true; +} + static bool _equalAlterDefaultPrivilegesStmt(const AlterDefaultPrivilegesStmt* a, const AlterDefaultPrivilegesStmt* b) { COMPARE_NODE_FIELD(options); @@ -1190,6 +1208,14 @@ static bool _equalAddPartitionState(const AddPartitionState* a, const AddPartiti return true; } +static bool _equalAddSubPartitionState(const AddSubPartitionState* a, const AddSubPartitionState* b) +{ + COMPARE_STRING_FIELD(partitionName); + COMPARE_NODE_FIELD(subPartitionList); + + return true; +} + static bool _equalIntervalPartitionDefState(const IntervalPartitionDefState* a, const IntervalPartitionDefState* b) { COMPARE_SCALAR_FIELD(partInterval); @@ -1203,6 +1229,7 @@ static bool _equalRangePartitionindexDefState( { COMPARE_STRING_FIELD(name); COMPARE_STRING_FIELD(tablespace); + COMPARE_NODE_FIELD(sublist); return true; } @@ -1214,6 +1241,8 @@ static bool _equalPartitionState(const PartitionState* a, const PartitionState* COMPARE_NODE_FIELD(partitionKey); COMPARE_NODE_FIELD(partitionList); COMPARE_SCALAR_FIELD(rowMovement); + COMPARE_NODE_FIELD(subPartitionState); + COMPARE_NODE_FIELD(partitionNameList); return true; } @@ -2130,6 +2159,9 @@ static bool _equalLockStmt(const LockStmt* a, const LockStmt* b) COMPARE_NODE_FIELD(relations); COMPARE_SCALAR_FIELD(mode); COMPARE_SCALAR_FIELD(nowait); + if (t_thrd.proc->workingVersionNum >= WAIT_N_TUPLE_LOCK_VERSION_NUM) { + COMPARE_SCALAR_FIELD(waitSec); + } return true; } @@ -2580,6 +2612,9 @@ static bool _equalLockingClause(const LockingClause* a, const LockingClause* b) if (t_thrd.proc->workingVersionNum >= ENHANCED_TUPLE_LOCK_VERSION_NUM) { COMPARE_SCALAR_FIELD(strength); } + if (t_thrd.proc->workingVersionNum >= WAIT_N_TUPLE_LOCK_VERSION_NUM) { + COMPARE_SCALAR_FIELD(waitSec); + } return true; } @@ -2644,6 +2679,7 @@ static bool _equalRangeTblEntry(const RangeTblEntry* a, const RangeTblEntry* b) COMPARE_SCALAR_FIELD(isexcluded); COMPARE_SCALAR_FIELD(sublink_pull_up); COMPARE_SCALAR_FIELD(is_ustore); + COMPARE_SCALAR_FIELD(pulled_from_subquery); return true; } @@ -2724,6 +2760,10 @@ static bool _equalRowMarkClause(const RowMarkClause* a, const RowMarkClause* b) COMPARE_SCALAR_FIELD(rti); COMPARE_SCALAR_FIELD(forUpdate); COMPARE_SCALAR_FIELD(noWait); + if (t_thrd.proc->workingVersionNum >= WAIT_N_TUPLE_LOCK_VERSION_NUM) { + COMPARE_SCALAR_FIELD(waitSec); + } + COMPARE_SCALAR_FIELD(pushedDown); if (t_thrd.proc->workingVersionNum >= ENHANCED_TUPLE_LOCK_VERSION_NUM) { COMPARE_SCALAR_FIELD(strength); @@ -2759,6 +2799,7 @@ static bool _equalUpsertClause(const UpsertClause* a, const UpsertClause* b) { COMPARE_NODE_FIELD(targetList); COMPARE_LOCATION_FIELD(location); + COMPARE_NODE_FIELD(whereClause); return true; } @@ -3435,6 +3476,9 @@ bool equal(const void* a, const void* b) case T_GrantRoleStmt: retval = _equalGrantRoleStmt((GrantRoleStmt*)a, (GrantRoleStmt*)b); break; + case T_GrantDbStmt: + retval = _equalGrantDbStmt((GrantDbStmt*)a, (GrantDbStmt*)b); + break; case T_AlterDefaultPrivilegesStmt: retval = _equalAlterDefaultPrivilegesStmt((AlterDefaultPrivilegesStmt*)a, (AlterDefaultPrivilegesStmt*)b); break; @@ -3479,6 +3523,9 @@ bool equal(const void* a, const void* b) case T_AddPartitionState: retval = _equalAddPartitionState((AddPartitionState*)a, (AddPartitionState*)b); break; + case T_AddSubPartitionState: + retval = _equalAddSubPartitionState((AddSubPartitionState*)a, (AddSubPartitionState*)b); + break; case T_SplitInfo: retval = _equalSplitInfo((SplitInfo*)a, (SplitInfo*)b); break; @@ -3992,6 +4039,9 @@ bool equal(const void* a, const void* b) case T_AccessPriv: retval = _equalAccessPriv((AccessPriv*)a, (AccessPriv*)b); break; + case T_DbPriv: + retval = _equalDbPriv((DbPriv*)a, (DbPriv*)b); + break; case T_XmlSerialize: retval = _equalXmlSerialize((XmlSerialize*)a, (XmlSerialize*)b); break; diff --git a/src/common/backend/nodes/nodeFuncs.cpp b/src/common/backend/nodes/nodeFuncs.cpp index 768615a07..0ee58ebb9 100644 --- a/src/common/backend/nodes/nodeFuncs.cpp +++ b/src/common/backend/nodes/nodeFuncs.cpp @@ -49,6 +49,19 @@ Oid exprType(const Node* expr) } switch (nodeTag(expr)) { + case T_BoolExpr: + case T_BooleanTest: + case T_CurrentOfExpr: + case T_HashFilter: + case T_NullTest: + case T_ScalarArrayOpExpr: + case T_RowCompareExpr: + type = BOOLOID; + break; + case T_GroupingFunc: + case T_GroupingId: + type = INT4OID; + break; case T_Var: type = ((const Var*)expr)->vartype; break; @@ -61,9 +74,6 @@ Oid exprType(const Node* expr) case T_Aggref: type = ((const Aggref*)expr)->aggtype; break; - case T_GroupingFunc: - type = INT4OID; - break; case T_WindowFunc: type = ((const WindowFunc*)expr)->wintype; break; @@ -92,12 +102,6 @@ Oid exprType(const Node* expr) case T_NullIfExpr: type = ((const NullIfExpr*)expr)->opresulttype; break; - case T_ScalarArrayOpExpr: - type = BOOLOID; - break; - case T_BoolExpr: - type = BOOLOID; - break; case T_SubLink: { const SubLink* sublink = (const SubLink*)expr; @@ -187,9 +191,6 @@ Oid exprType(const Node* expr) case T_RowExpr: type = ((const RowExpr*)expr)->row_typeid; break; - case T_RowCompareExpr: - type = BOOLOID; - break; case T_CoalesceExpr: type = ((const CoalesceExpr*)expr)->coalescetype; break; @@ -205,13 +206,6 @@ Oid exprType(const Node* expr) type = XMLOID; } break; - case T_NullTest: - case T_HashFilter: - type = BOOLOID; - break; - case T_BooleanTest: - type = BOOLOID; - break; case T_CoerceToDomain: type = ((const CoerceToDomain*)expr)->resulttype; break; @@ -221,20 +215,15 @@ Oid exprType(const Node* expr) case T_SetToDefault: type = ((const SetToDefault*)expr)->typeId; break; - case T_CurrentOfExpr: - type = BOOLOID; - break; case T_PlaceHolderVar: type = exprType((Node*)((const PlaceHolderVar*)expr)->phexpr); break; - case T_GroupingId: - type = INT4OID; - break; case T_Rownum: - type = NUMERICOID; - break; - case T_GradientDescentExpr: - type = ((const GradientDescentExpr*)expr)->fieldtype; + if (ROWNUM_TYPE_COMPAT) { + type = NUMERICOID; + } else { + type = INT8OID; + } break; default: ereport(ERROR, @@ -849,9 +838,6 @@ Oid exprCollation(const Node* expr) case T_PlaceHolderVar: coll = exprCollation((Node*)((const PlaceHolderVar*)expr)->phexpr); break; - case T_GradientDescentExpr: - coll = InvalidOid; - break; default: ereport( ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("unrecognized node type: %d", (int)nodeTag(expr)))); @@ -1564,7 +1550,6 @@ bool expression_tree_walker(Node* node, bool (*walker)(), void* context) case T_Null: case T_PgFdwRemoteInfo: case T_Rownum: - case T_GradientDescentExpr: /* primitive node types with no expression subnodes */ break; case T_Aggref: { @@ -1809,6 +1794,8 @@ bool expression_tree_walker(Node* node, bool (*walker)(), void* context) UpsertExpr* upsertClause = (UpsertExpr*)node; if (p2walker(upsertClause->updateTlist, context)) return true; + if (p2walker(upsertClause->upsertWhere, context)) + return true; } break; case T_JoinExpr: { JoinExpr* join = (JoinExpr*)node; @@ -2515,6 +2502,7 @@ Node* expression_tree_mutator(Node* node, Node* (*mutator)(Node*, void*), void* FLATCOPY(newnode, upsertClause, UpsertExpr, isCopy); MUTATE(newnode->updateTlist, upsertClause->updateTlist, List*); + MUTATE(newnode->upsertWhere, upsertClause->upsertWhere, Node*); return (Node*)newnode; } break; case T_FromExpr: { diff --git a/src/common/backend/nodes/nodes.cpp b/src/common/backend/nodes/nodes.cpp index e35aa7bf2..6417ad7de 100755 --- a/src/common/backend/nodes/nodes.cpp +++ b/src/common/backend/nodes/nodes.cpp @@ -89,6 +89,8 @@ static const TagStr g_tagStrArr[] = {{T_Invalid, "Invalid"}, {T_CreateResourcePoolStmt, "CreateResourcePoolStmt"}, {T_AlterResourcePoolStmt, "AlterResourcePoolStmt"}, {T_DropResourcePoolStmt, "DropResourcePoolStmt"}, + {T_AlterGlobalConfigStmt, "AlterGlobalConfigStmt"}, + {T_DropGlobalConfigStmt, "DropGlobalConfigStmt"}, {T_CreateWorkloadGroupStmt, "CreateWorkloadGroupStmt"}, {T_AlterWorkloadGroupStmt, "AlterWorkloadGroupStmt"}, {T_DropWorkloadGroupStmt, "DropWorkloadGroupStmt"}, @@ -231,6 +233,7 @@ static const TagStr g_tagStrArr[] = {{T_Invalid, "Invalid"}, {T_RangePartitionindexDefState, "RangePartitionindexDefState"}, {T_SplitPartitionState, "SplitPartitionState"}, {T_AddPartitionState, "AddPartitionState"}, + {T_AddSubPartitionState, "AddSubPartitionState"}, {T_RangePartitionStartEndDefState, "RangePartitionStartEndDefState"}, {T_PlannerInfo, "PlannerInfo"}, {T_PlannerGlobal, "PlannerGlobal"}, @@ -299,6 +302,7 @@ static const TagStr g_tagStrArr[] = {{T_Invalid, "Invalid"}, {T_SetOperationStmt, "SetOperationStmt"}, {T_GrantStmt, "GrantStmt"}, {T_GrantRoleStmt, "GrantRoleStmt"}, + {T_GrantDbStmt, "GrantDbStmt"}, {T_AlterDefaultPrivilegesStmt, "AlterDefaultPrivilegesStmt"}, {T_ClosePortalStmt, "ClosePortalStmt"}, {T_ClusterStmt, "ClusterStmt"}, @@ -439,6 +443,7 @@ static const TagStr g_tagStrArr[] = {{T_Invalid, "Invalid"}, {T_PrivGrantee, "PrivGrantee"}, {T_FuncWithArgs, "FuncWithArgs"}, {T_AccessPriv, "AccessPriv"}, + {T_DbPriv, "DbPriv"}, {T_CreateOpClassItem, "CreateOpClassItem"}, {T_TableLikeClause, "TableLikeClause"}, {T_FunctionParameter, "FunctionParameter"}, @@ -584,12 +589,9 @@ static const TagStr g_tagStrArr[] = {{T_Invalid, "Invalid"}, // DB4AI {T_CreateModelStmt, "CreateModelStmt"}, {T_PredictByFunction, "PredictByFunction"}, - {T_GradientDescent, "GradientDescent"}, - {T_GradientDescentState, "GradientDescentState"}, - {T_KMeans, "Kmeans"}, - {T_KMeansState, "KmeansState"}, - {T_GradientDescentExpr, "GradientDescentExpr"}, - {T_GradientDescentExprState, "GradientDescentExprState"}, + {T_TrainModel, "TrainModel"}, + {T_TrainModelState, "TrainModelState"}, + {T_ExplainModelStmt, "ExplainModelStmt"}, // End DB4AI {T_TdigestData, "TdigestData"}, {T_CentroidPoint, "CentroidPoint"} diff --git a/src/common/backend/nodes/outfuncs.cpp b/src/common/backend/nodes/outfuncs.cpp index 70dda9cca..1bd1ccb61 100755 --- a/src/common/backend/nodes/outfuncs.cpp +++ b/src/common/backend/nodes/outfuncs.cpp @@ -709,6 +709,9 @@ static void _outScanInfo(StringInfo str, Scan* node) WRITE_NODE_FIELD(tablesample); out_mem_info(str, &node->mem_info); + if (t_thrd.proc->workingVersionNum >= SCAN_BATCH_MODE_VERSION_NUM) { + WRITE_BOOL_FIELD(scanBatchMode); + } } /* @@ -776,11 +779,15 @@ static void _outModifyTable(StringInfo str, ModifyTable* node) WRITE_NODE_FIELD(exclRelTlist); WRITE_INT_FIELD(exclRelRTIndex); } + if (t_thrd.proc->workingVersionNum >= UPSERT_WHERE_VERSION_NUM) { + WRITE_NODE_FIELD(upsertWhere); + } #else WRITE_ENUM_FIELD(upsertAction, UpsertAction); WRITE_NODE_FIELD(updateTlist); WRITE_NODE_FIELD(exclRelTlist); WRITE_INT_FIELD(exclRelRTIndex); + WRITE_NODE_FIELD(upsertWhere); #endif } @@ -790,6 +797,9 @@ static void _outUpsertClause(StringInfo str, const UpsertClause* node) WRITE_NODE_FIELD(targetList); WRITE_INT_FIELD(location); + if (t_thrd.proc->workingVersionNum >= UPSERT_WHERE_VERSION_NUM) { + WRITE_NODE_FIELD(whereClause); + } } static void _outUpsertExpr(StringInfo str, const UpsertExpr* node) @@ -800,6 +810,7 @@ static void _outUpsertExpr(StringInfo str, const UpsertExpr* node) WRITE_NODE_FIELD(updateTlist); WRITE_NODE_FIELD(exclRelTlist); WRITE_INT_FIELD(exclRelIndex); + WRITE_NODE_FIELD(upsertWhere); } static void _outMergeWhenClause(StringInfo str, const MergeWhenClause* node) { @@ -936,7 +947,7 @@ static void _outBitmapAnd(StringInfo str, BitmapAnd* node) _outPlanInfo(str, (Plan*)node); WRITE_NODE_FIELD(bitmapplans); - if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM) { + if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM) { WRITE_BOOL_FIELD(is_ustore); } } @@ -948,7 +959,7 @@ static void _outBitmapOr(StringInfo str, BitmapOr* node) _outPlanInfo(str, (Plan*)node); WRITE_NODE_FIELD(bitmapplans); - if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM) { + if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM) { WRITE_BOOL_FIELD(is_ustore); } } @@ -1015,7 +1026,7 @@ static void _outIndexScan(StringInfo str, IndexScan* node) { WRITE_NODE_TYPE("INDEXSCAN"); _outCommonIndexScanPart(str, node); - if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM) { + if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM) { WRITE_BOOL_FIELD(is_ustore); } } @@ -1305,7 +1316,7 @@ static void _outBitmapIndexScan(StringInfo str, BitmapIndexScan* node) _outToken(str, get_namespace_name(get_rel_namespace(node->indexid))); } #endif // STREAMPLAN - if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM) { + if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM) { WRITE_BOOL_FIELD(is_ustore); } } @@ -1969,6 +1980,9 @@ static void _outPlanRowMark(StringInfo str, PlanRowMark* node) WRITE_UINT_FIELD(rowmarkId); WRITE_ENUM_FIELD(markType, RowMarkType); WRITE_BOOL_FIELD(noWait); + if (t_thrd.proc->workingVersionNum >= WAIT_N_TUPLE_LOCK_VERSION_NUM) { + WRITE_INT_FIELD(waitSec); + } WRITE_BOOL_FIELD(isParent); WRITE_INT_FIELD(numAttrs); WRITE_BITMAPSET_FIELD(bms_nodeids); @@ -2921,7 +2935,7 @@ static void _outIndexPath(StringInfo str, IndexPath* node) WRITE_ENUM_FIELD(indexscandir, ScanDirection); WRITE_FLOAT_FIELD(indextotalcost, "%.2f"); WRITE_FLOAT_FIELD(indexselectivity, "%.4f"); - if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM) { + if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM) { WRITE_BOOL_FIELD(is_ustore); } } @@ -2943,7 +2957,7 @@ static void _outBitmapAndPath(StringInfo str, BitmapAndPath* node) WRITE_NODE_FIELD(bitmapquals); WRITE_FLOAT_FIELD(bitmapselectivity, "%.4f"); - if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM) { + if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM) { WRITE_BOOL_FIELD(is_ustore); } } @@ -2956,7 +2970,7 @@ static void _outBitmapOrPath(StringInfo str, BitmapOrPath* node) WRITE_NODE_FIELD(bitmapquals); WRITE_FLOAT_FIELD(bitmapselectivity, "%.4f"); - if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM) { + if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM) { WRITE_BOOL_FIELD(is_ustore); } } @@ -3507,6 +3521,8 @@ static void _outPartitionState(StringInfo str, PartitionState* node) WRITE_NODE_FIELD(partitionKey); WRITE_NODE_FIELD(partitionList); WRITE_ENUM_FIELD(rowMovement, RowMovementValue); + WRITE_NODE_FIELD(subPartitionState); + WRITE_NODE_FIELD(partitionNameList); } static void _outRangePartitionindexDefState(StringInfo str, RangePartitionindexDefState* node) @@ -3515,6 +3531,7 @@ static void _outRangePartitionindexDefState(StringInfo str, RangePartitionindexD WRITE_STRING_FIELD(name); WRITE_STRING_FIELD(tablespace); + WRITE_NODE_FIELD(sublist); } static void _outRangePartitionStartEndDefState(StringInfo str, RangePartitionStartEndDefState* node) @@ -3536,6 +3553,15 @@ static void _outAddPartitionState(StringInfo str, AddPartitionState* node) WRITE_BOOL_FIELD(isStartEnd); } +static void _outAddSubPartitionState(StringInfo str, const AddSubPartitionState* node) +{ + WRITE_NODE_TYPE("ADDSUBPARTITIONSTATE"); + + WRITE_STRING_FIELD(partitionName); + WRITE_NODE_FIELD(subPartitionList); +} + + static void _outCreateStmt(StringInfo str, const CreateStmt* node) { WRITE_NODE_TYPE("CREATESTMT"); @@ -3724,6 +3750,9 @@ static void _outLockingClause(StringInfo str, LockingClause* node) if (t_thrd.proc->workingVersionNum >= ENHANCED_TUPLE_LOCK_VERSION_NUM) { WRITE_ENUM_FIELD(strength, LockClauseStrength); } + if (t_thrd.proc->workingVersionNum >= WAIT_N_TUPLE_LOCK_VERSION_NUM) { + WRITE_BOOL_FIELD(waitSec); + } } static void _outXmlSerialize(StringInfo str, XmlSerialize* node) @@ -3995,6 +4024,21 @@ static void _outPredpushHint(StringInfo str, PredpushHint* node) WRITE_BITMAPSET_FIELD(candidates); } +/* + * @Description: Predpush same level hint node to string. + * @out str: String buf. + * @in node: Predpush same level hint struct. + */ +static void _outPredpushSameLevelHint(StringInfo str, PredpushSameLevelHint* node) +{ + WRITE_NODE_TYPE("PREDPUSHSAMELEVELHINT"); + _outBaseHint(str, (Hint*)node); + WRITE_BOOL_FIELD(negative); + WRITE_STRING_FIELD(dest_name); + WRITE_INT_FIELD(dest_id); + WRITE_BITMAPSET_FIELD(candidates); +} + /* * @Description: Rewrite hint node to string. * @out str: String buf. @@ -4207,14 +4251,19 @@ static void _outHintState(StringInfo str, HintState* node) if (t_thrd.proc->workingVersionNum >= PREDPUSH_VERSION_NUM) { WRITE_NODE_FIELD(predpush_hint); } - WRITE_NODE_FIELD(rewrite_hint); - WRITE_NODE_FIELD(gather_hint); + if (t_thrd.proc->workingVersionNum >= EXECUTE_DIRECT_ON_MULTI_VERSION_NUM) { + WRITE_NODE_FIELD(rewrite_hint); + } if (t_thrd.proc->workingVersionNum >= HINT_ENHANCEMENT_VERSION_NUM) { + WRITE_NODE_FIELD(gather_hint); WRITE_NODE_FIELD(no_expand_hint); WRITE_NODE_FIELD(set_hint); WRITE_NODE_FIELD(cache_plan_hint); WRITE_NODE_FIELD(no_gpc_hint); } + if (t_thrd.proc->workingVersionNum >= PREDPUSH_SAME_LEVEL_VERSION_NUM) { + WRITE_NODE_FIELD(predpush_same_level_hint); + } } static void _outQuery(StringInfo str, Query* node) @@ -4366,6 +4415,9 @@ static void _outRowMarkClause(StringInfo str, RowMarkClause* node) WRITE_UINT_FIELD(rti); WRITE_BOOL_FIELD(forUpdate); WRITE_BOOL_FIELD(noWait); + if (t_thrd.proc->workingVersionNum >= WAIT_N_TUPLE_LOCK_VERSION_NUM) { + WRITE_INT_FIELD(waitSec); + } WRITE_BOOL_FIELD(pushedDown); if (t_thrd.proc->workingVersionNum >= ENHANCED_TUPLE_LOCK_VERSION_NUM) { WRITE_ENUM_FIELD(strength, LockClauseStrength); @@ -4618,7 +4670,7 @@ static void _outRangeTblEntry(StringInfo str, RangeTblEntry* node) WRITE_BOOL_FIELD(sublink_pull_up); } - if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM) { + if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM) { WRITE_BOOL_FIELD(is_ustore); } @@ -5504,31 +5556,58 @@ static void _outIndexVar(StringInfo str, IndexVar* node) WRITE_BOOL_FIELD(indexpath); } -static void _outGradientDescent(StringInfo str, GradientDescent* node) +static void _outTrainModel(StringInfo str, TrainModel* node) { - WRITE_NODE_TYPE("SGD"); + AlgorithmAPI *api = get_algorithm_api(node->algorithm); + int num_hyperp; + const HyperparameterDefinition* definition = api->get_hyperparameters_definitions(api, &num_hyperp); + + if (node->configurations != 1) + elog(ERROR, "TODO_DB4AI_API: more than one hyperparameter configuration"); + + WRITE_NODE_TYPE("TrainModel"); _outPlanInfo(str, (Plan*)node); appendStringInfoString(str, " :algorithm "); - appendStringInfoString(str, gd_get_algorithm(node->algorithm)->name); - appendStringInfoString(str, " :optimizer "); - appendStringInfoString(str, gd_get_optimizer_name(node->optimizer)); - WRITE_INT_FIELD(targetcol); - WRITE_INT_FIELD(max_iterations); - WRITE_INT_FIELD(max_seconds); - WRITE_INT_FIELD(batch_size); - WRITE_BOOL_FIELD(verbose); - WRITE_FLOAT_FIELD(learning_rate, "%.16g"); - WRITE_FLOAT_FIELD(decay, "%.16g"); - WRITE_FLOAT_FIELD(tolerance, "%.16g"); - WRITE_INT_FIELD(seed); - WRITE_FLOAT_FIELD(lambda, "%.16g"); -} + appendStringInfoString(str, api->name); -static void _outGradientDescentExpr(StringInfo str, GradientDescentExpr* node) -{ - WRITE_NODE_TYPE("GradientDescentExpr"); - WRITE_UINT_FIELD(field); - WRITE_OID_FIELD(fieldtype); + HyperparametersGD *hyperp = (HyperparametersGD *)node->hyperparameters[0]; + while (num_hyperp-- > 0) { + switch (definition->type) { + case INT4OID: { + int32_t *value_addr = (int32_t *)((char *)hyperp + definition->offset); + appendStringInfo(str, " : %s %d", definition->name, *value_addr); + break; + } + case INT8OID: { + int64_t *value_addr = (int64_t *)((char *)hyperp + definition->offset); + appendStringInfo(str, " : %s %ld", definition->name, *value_addr); + break; + } + case FLOAT8OID: { + double *value_addr = (double *)((char *)hyperp + definition->offset); + appendStringInfo(str, " : %s %.16g", definition->name, *value_addr); + break; + } + case BOOLOID: { + bool *value_addr = (bool *)((char *)hyperp + definition->offset); + appendStringInfo(str, " : %s %s", definition->name, booltostr(*value_addr)); + break; + } + case CSTRINGOID: { + char **value_addr = (char **)((char *)hyperp + definition->offset); + appendStringInfo(str, " : %s %s", definition->name, *value_addr); + break; + } + case ANYENUMOID: { + void *value_addr = (void *)((char *)hyperp + definition->offset); + appendStringInfo(str, " : %s %s", definition->name, definition->validation.enum_getter(value_addr)); + break; + } + default: + break; + } + definition++; + } } /* @@ -6006,6 +6085,9 @@ static void _outNode(StringInfo str, const void* obj) case T_AddPartitionState: _outAddPartitionState(str, (AddPartitionState*)obj); break; + case T_AddSubPartitionState: + _outAddSubPartitionState(str, (AddSubPartitionState*)obj); + break; case T_CreateForeignTableStmt: _outCreateForeignTableStmt(str, (CreateForeignTableStmt*)obj); break; @@ -6375,6 +6457,9 @@ static void _outNode(StringInfo str, const void* obj) case T_PredpushHint: _outPredpushHint(str, (PredpushHint *)obj); break; + case T_PredpushSameLevelHint: + _outPredpushSameLevelHint(str, (PredpushSameLevelHint *)obj); + break; case T_RewriteHint: _outRewriteHint(str, (RewriteHint *)obj); break; @@ -6392,11 +6477,9 @@ static void _outNode(StringInfo str, const void* obj) break; case T_NoGPCHint: _outNoGPCHint(str, (NoGPCHint*) obj); - case T_GradientDescent: - _outGradientDescent(str, (GradientDescent*)obj); + case T_TrainModel: + _outTrainModel(str, (TrainModel*)obj); break; - case T_GradientDescentExpr: - _outGradientDescentExpr(str, (GradientDescentExpr*)obj); case T_PLDebug_variable: _outPLDebug_variable(str, (PLDebug_variable*) obj); break; diff --git a/src/common/backend/nodes/params.cpp b/src/common/backend/nodes/params.cpp index fdfb6e7f9..94ce9aa80 100644 --- a/src/common/backend/nodes/params.cpp +++ b/src/common/backend/nodes/params.cpp @@ -70,9 +70,14 @@ ParamListInfo copyParamList(ParamListInfo from) } get_typlenbyval(nprm->ptype, &typLen, &typByVal); nprm->value = datumCopy(nprm->value, typByVal, typLen); - nprm->tableOfIndexType = oprm->tableOfIndexType; - nprm->tableOfIndex = copyTableOfIndex(oprm->tableOfIndex); - nprm->isnestedtable = oprm->isnestedtable; + nprm->tabInfo = NULL; + if (oprm->tabInfo != NULL) { + nprm->tabInfo = (TableOfInfo*)palloc0(sizeof(TableOfInfo)); + nprm->tabInfo->tableOfIndexType = oprm->tabInfo->tableOfIndexType; + nprm->tabInfo->tableOfIndex = copyTableOfIndex(oprm->tabInfo->tableOfIndex); + nprm->tabInfo->isnestedtable = oprm->tabInfo->isnestedtable; + nprm->tabInfo->tableOfLayers = oprm->tabInfo->tableOfLayers; + } } return retval; diff --git a/src/common/backend/nodes/readfuncs.cpp b/src/common/backend/nodes/readfuncs.cpp index 3e067765b..01d04dad2 100755 --- a/src/common/backend/nodes/readfuncs.cpp +++ b/src/common/backend/nodes/readfuncs.cpp @@ -1050,8 +1050,8 @@ static JoinMethodHint* _readJoinHint(void) } /* - * @Description: Read string to join hint struct. - * @return: Join hint struct. + * @Description: Read string to predpush hint struct. + * @return: Predpush hint struct. */ static PredpushHint* _readPredpushHint(void) { @@ -1066,6 +1066,23 @@ static PredpushHint* _readPredpushHint(void) READ_DONE(); } +/* + * @Description: Read string to predpush same level hint struct. + * @return: Predpush same level hint struct. + */ +static PredpushSameLevelHint* _readPredpushSameLevelHint(void) +{ + READ_LOCALS(PredpushSameLevelHint); + + _readBaseHint(&(local_node->base)); + READ_BOOL_FIELD(negative); + READ_STRING_FIELD(dest_name); + READ_INT_FIELD(dest_id); + READ_BITMAPSET_FIELD(candidates); + + READ_DONE(); +} + /* * @Description: Read string to rewrite hint struct. * @return: rewrite hint struct. @@ -1329,8 +1346,12 @@ static HintState* _readHintState() IF_EXIST(predpush_hint) { READ_NODE_FIELD(predpush_hint); } - READ_NODE_FIELD(rewrite_hint); - READ_NODE_FIELD(gather_hint); + IF_EXIST(rewrite_hint) { + READ_NODE_FIELD(rewrite_hint); + } + IF_EXIST(gather_hint) { + READ_NODE_FIELD(gather_hint); + } IF_EXIST(no_expand_hint) { READ_NODE_FIELD(no_expand_hint); } @@ -1343,6 +1364,9 @@ static HintState* _readHintState() IF_EXIST(no_gpc_hint) { READ_NODE_FIELD(no_gpc_hint); } + IF_EXIST(predpush_same_level_hint) { + READ_NODE_FIELD(predpush_same_level_hint); + } READ_DONE(); } @@ -1611,6 +1635,10 @@ static RowMarkClause* _readRowMarkClause(void) READ_UINT_FIELD(rti); READ_BOOL_FIELD(forUpdate); READ_BOOL_FIELD(noWait); + IF_EXIST(waitSec) { + READ_INT_FIELD(waitSec); + } + READ_BOOL_FIELD(pushedDown); IF_EXIST(strength) { READ_ENUM_FIELD(strength, LockClauseStrength); @@ -3339,6 +3367,9 @@ static Scan* _readScan(Scan* local_node) READ_NODE_FIELD(tablesample); } read_mem_info(&local_node->mem_info); + IF_EXIST(scanBatchMode) { + READ_BOOL_FIELD(scanBatchMode); + } READ_DONE(); } @@ -3945,6 +3976,10 @@ static ModifyTable* _readModifyTable(ModifyTable* local_node) READ_INT_FIELD(exclRelRTIndex); } + IF_EXIST(upsertWhere) { + READ_NODE_FIELD(upsertWhere); + } + READ_DONE(); } @@ -3956,6 +3991,9 @@ static UpsertExpr* _readUpsertExpr(void) READ_NODE_FIELD(updateTlist); READ_NODE_FIELD(exclRelTlist); READ_INT_FIELD(exclRelIndex); + IF_EXIST(upsertWhere) { + READ_NODE_FIELD(upsertWhere); + } READ_DONE(); } @@ -3966,7 +4004,9 @@ static UpsertClause* _readUpsertClause(void) READ_NODE_FIELD(targetList); READ_INT_FIELD(location); - + IF_EXIST(whereClause) { + READ_NODE_FIELD(whereClause); + } READ_DONE(); } @@ -4282,6 +4322,10 @@ static PlanRowMark* _readPlanRowMark(void) READ_UINT_FIELD(rowmarkId); READ_ENUM_FIELD(markType, RowMarkType); READ_BOOL_FIELD(noWait); + IF_EXIST(waitSec) { + READ_INT_FIELD(waitSec); + } + READ_BOOL_FIELD(isParent); READ_INT_FIELD(numAttrs); READ_BITMAPSET_FIELD(bms_nodeids); @@ -5522,6 +5566,8 @@ static PartitionState* _readPartitionState() READ_NODE_FIELD(partitionKey); READ_NODE_FIELD(partitionList); READ_ENUM_FIELD(rowMovement, RowMovementValue); + READ_NODE_FIELD(subPartitionState); + READ_NODE_FIELD(partitionNameList); if (local_node->partitionStrategy == '0') { local_node->partitionStrategy = 0; @@ -5541,6 +5587,7 @@ static RangePartitionindexDefState* _readRangePartitionindexDefState() READ_STRING_FIELD(name); READ_STRING_FIELD(tablespace); + READ_NODE_FIELD(sublist); READ_DONE(); } @@ -5601,6 +5648,16 @@ static AddPartitionState* _readAddPartitionState() READ_DONE(); } +static AddSubPartitionState* _readAddSubPartitionState() +{ + READ_LOCALS(AddSubPartitionState); + + READ_NODE_FIELD(subPartitionList); + READ_STRING_FIELD(partitionName); + + READ_DONE(); +} + static QualSkewInfo* _readQualSkewInfo() { READ_LOCALS_NO_FIELDS(QualSkewInfo); @@ -6056,6 +6113,8 @@ Node* parseNodeString(void) return_value = _readSplitPartitionState(); } else if (MATCH("ADDPARTITIONSTATE", 17)) { return_value = _readAddPartitionState(); + } else if (MATCH("ADDSUBPARTITIONSTATE", 20)) { + return_value = _readAddSubPartitionState(); } else if (MATCH("CLIENTLOGICCOLUMNREF", 20)) { return_value = _readClientLogicColumnRef(); } else if (MATCH("GLOBALPARAM", 11)) { @@ -6072,6 +6131,8 @@ Node* parseNodeString(void) return_value = _readUpsertClause(); } else if (MATCH("PREDPUSHHINT", 12)) { return_value = _readPredpushHint(); + } else if (MATCH("PREDPUSHSAMELEVELHINT", 21)) { + return_value = _readPredpushSameLevelHint(); } else if (MATCH("REWRITEHINT", 11)) { return_value = _readRewriteHint(); } else if (MATCH("GATHERHINT", 10)) { diff --git a/src/common/backend/parser/analyze.cpp b/src/common/backend/parser/analyze.cpp index 5d6412058..4c9bf3b49 100644 --- a/src/common/backend/parser/analyze.cpp +++ b/src/common/backend/parser/analyze.cpp @@ -75,6 +75,7 @@ #include "catalog/pgxc_node.h" #include "access/xact.h" #include "utils/distribute_test.h" +#include "tcop/utility.h" #endif #include "utils/rel.h" #include "utils/rel_gs.h" @@ -87,12 +88,14 @@ #include "db4ai/aifuncs.h" #include "db4ai/create_model.h" +#include "db4ai/hyperparameter_validation.h" #ifndef ENABLE_MULTIPLE_NODES #include "optimizer/clauses.h" #endif /* Hook for plugins to get control at end of parse analysis */ THR_LOCAL post_parse_analyze_hook_type post_parse_analyze_hook = NULL; +static const int MILLISECONDS_PER_SECONDS = 1000; static Query* transformDeleteStmt(ParseState* pstate, DeleteStmt* stmt); static Query* transformInsertStmt(ParseState* pstate, InsertStmt* stmt); @@ -127,6 +130,16 @@ static void set_ancestor_ps_contain_foreigntbl(ParseState* subParseState); static bool include_groupingset(Node* groupClause); static void transformGroupConstToColumn(ParseState* pstate, Node* groupClause, List* targetList); static bool checkAllowedTableCombination(ParseState* pstate); +#ifdef ENABLE_MULTIPLE_NODES +static bool ContainSubLinkWalker(Node* node, void* context); +static bool ContainSubLink(Node* clause); +#endif /* ENABLE_MULTIPLE_NODES */ + +#ifndef ENABLE_MULTIPLE_NODES +static const char* NOKEYUPDATE_KEYSHARE_ERRMSG = "/NO KEY UPDATE/KEY SHARE"; +#else +static const char* NOKEYUPDATE_KEYSHARE_ERRMSG = ""; +#endif /* * parse_analyze @@ -154,7 +167,9 @@ Query* parse_analyze( parse_fixed_parameters(pstate, paramTypes, numParams); } + PUSH_SKIP_UNIQUE_SQL_HOOK(); query = transformTopLevelStmt(pstate, parseTree, isFirstNode, isCreateView); + POP_SKIP_UNIQUE_SQL_HOOK(); /* it's unsafe to deal with plugins hooks as dynamic lib may be released */ if (post_parse_analyze_hook && !(g_instance.status > NoShutdown)) { @@ -834,7 +849,7 @@ static Query* transformDeleteStmt(ParseState* pstate, DeleteStmt* stmt) // @Online expansion: check if the target relation is being redistributed in read only mode if (!u_sess->attr.attr_sql.enable_cluster_resize && pstate->p_target_relation && - RelationInClusterResizingReadOnly(pstate->p_target_relation)) { + RelationInClusterResizingWriteErrorMode(pstate->p_target_relation)) { ereport(ERROR, (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION), errmsg("%s is redistributing, please retry later.", pstate->p_target_relation->rd_rel->relname.data))); @@ -1456,7 +1471,7 @@ static Query* transformInsertStmt(ParseState* pstate, InsertStmt* stmt) * so we don't need to double check if target table is DFS table here anymore. */ if (!u_sess->attr.attr_sql.enable_cluster_resize && pstate->p_target_relation != NULL && - RelationInClusterResizingReadOnly(pstate->p_target_relation)) { + RelationInClusterResizingWriteErrorMode(pstate->p_target_relation)) { ereport(ERROR, (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION), errmsg("%s is redistributing, please retry later.", pstate->p_target_relation->rd_rel->relname.data))); @@ -1954,6 +1969,30 @@ static bool CheckRlsPolicyForUpsert(Relation targetrel) return false; } +/* + * The following check only affect distributed deployment. + * Sublink in upsert's where clause is supported for centralized mode. + */ +#ifdef ENABLE_MULTIPLE_NODES +static bool ContainSubLinkWalker(Node* node, void* context) +{ + if (node == NULL) { + return false; + } + + if (IsA(node, SubLink)) { + return true; + } + + return expression_tree_walker(node, (bool (*)())ContainSubLinkWalker, (void*)context); +} + +static bool ContainSubLink(Node* clause) +{ + return ContainSubLinkWalker(clause, NULL); +} +#endif /* ENABLE_MULTIPLE_NODES */ + static UpsertExpr* transformUpsertClause(ParseState* pstate, UpsertClause* upsertClause, RangeVar* relation) { UpsertExpr* result = NULL; @@ -1961,6 +2000,7 @@ static UpsertExpr* transformUpsertClause(ParseState* pstate, UpsertClause* upser RangeTblEntry* exclRte = NULL; int exclRelIndex = 0; List* exclRelTlist = NIL; + Node* updateWhere = NULL; UpsertAction action = UPSERT_NOTHING; Relation targetrel = pstate->p_target_relation; @@ -1983,12 +2023,6 @@ static UpsertExpr* transformUpsertClause(ParseState* pstate, UpsertClause* upser "UPDATE."))); } - if (RelationIsSubPartitioned(targetrel)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Subpartition is not supported for INSERT ON DUPLICATE KEY UPDATE."))); - } - if (upsertClause->targetList != NIL) { pstate->p_is_insert = false; action = UPSERT_UPDATE; @@ -2020,7 +2054,21 @@ static UpsertExpr* transformUpsertClause(ParseState* pstate, UpsertClause* upser addRTEtoQuery(pstate, pstate->p_target_rangetblentry, false, true, true); updateTlist = transformTargetList(pstate, upsertClause->targetList); + /* Done with select-like processing, move on transforming to match update set target column */ updateTlist = transformUpdateTargetList(pstate, updateTlist, upsertClause->targetList, relation); + updateWhere = transformWhereClause(pstate, upsertClause->whereClause, "WHERE"); +#ifdef ENABLE_MULTIPLE_NODES + /* Do not support sublinks in update where clause for now */ + if (ContainSubLink(updateWhere)) { + ereport(ERROR, + (errmodule(MOD_OPT_PLANNER), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Feature is not supported for INSERT ON DUPLICATE KEY UPDATE."), + errdetail("Do not support sublink in where clause."), + errcause("Unsupported syntax."), + erraction("Check if the query can be rewritten into MERGE INTO statement " + "or reduce the sublink in where clause."))); + } +#endif /* ENABLE_MULTIPLE_NODES */ /* We can't update primary or unique key in upsert, check it here */ #ifdef ENABLE_MULTIPLE_NODES if (IS_PGXC_COORDINATOR && !u_sess->attr.attr_sql.enable_upsert_to_merge) { @@ -2037,6 +2085,7 @@ static UpsertExpr* transformUpsertClause(ParseState* pstate, UpsertClause* upser result->exclRelIndex = exclRelIndex; result->exclRelTlist = exclRelTlist; result->upsertAction = action; + result->upsertWhere = updateWhere; return result; } @@ -3322,7 +3371,7 @@ static Query* transformUpdateStmt(ParseState* pstate, UpdateStmt* stmt) // check if the target relation is being redistributed in read only mode if (!u_sess->attr.attr_sql.enable_cluster_resize && pstate->p_target_relation != NULL && - RelationInClusterResizingReadOnly(pstate->p_target_relation)) { + RelationInClusterResizingWriteErrorMode(pstate->p_target_relation)) { ereport(ERROR, (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION), errmsg("%s is redistributing, please retry later.", pstate->p_target_relation->rd_rel->relname.data))); @@ -4181,39 +4230,40 @@ void CheckSelectLocking(Query* qry) if (qry->setOperations) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE is not allowed " - "with UNION/INTERSECT/EXCEPT"))); + errmsg("SELECT FOR UPDATE/SHARE%s is not allowed with UNION/INTERSECT/EXCEPT", + NOKEYUPDATE_KEYSHARE_ERRMSG))); } if (qry->distinctClause != NIL) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE is not allowed with DISTINCT clause"))); + errmsg("SELECT FOR UPDATE/SHARE%s is not allowed with DISTINCT clause", NOKEYUPDATE_KEYSHARE_ERRMSG))); } if (qry->groupClause != NIL) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE is not allowed with GROUP BY clause"))); + errmsg("SELECT FOR UPDATE/SHARE%s is not allowed with GROUP BY clause", NOKEYUPDATE_KEYSHARE_ERRMSG))); } if (qry->havingQual != NULL) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE is not allowed with HAVING clause"))); + errmsg("SELECT FOR UPDATE/SHARE%s is not allowed with HAVING clause", NOKEYUPDATE_KEYSHARE_ERRMSG))); } if (qry->hasAggs) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE is not allowed with aggregate functions"))); + errmsg("SELECT FOR UPDATE/SHARE%s is not allowed with aggregate functions", + NOKEYUPDATE_KEYSHARE_ERRMSG))); } if (qry->hasWindowFuncs) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE is not allowed with window functions"))); + errmsg("SELECT FOR UPDATE/SHARE%s is not allowed with window functions", NOKEYUPDATE_KEYSHARE_ERRMSG))); } if (expression_returns_set((Node*)qry->targetList)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE is not allowed with set-returning functions " - "in the target list"))); + errmsg("SELECT FOR UPDATE/SHARE%s is not allowed with set-returning functions in the target list", + NOKEYUPDATE_KEYSHARE_ERRMSG))); } } @@ -4244,11 +4294,24 @@ static void transformLockingClause(ParseState* pstate, Query* qry, LockingClause #ifdef ENABLE_MULTIPLE_NODES || true #endif - ) { + ) { lc->strength = lc->forUpdate ? LCS_FORUPDATE : LCS_FORSHARE; } allrels->strength = lc->strength; allrels->noWait = lc->noWait; + allrels->waitSec = lc->waitSec; + + /* The processing delay of the ProcSleep function is in milliseconds. Set the delay to int_max/1000. */ + /* The processing delay of the ProcSleep function is in milliseconds. Set the delay to int_max/1000. */ + if (lc->waitSec > (MAX_INT32 / MILLISECONDS_PER_SECONDS)) { + ereport(ERROR, + (errmodule(MOD_OPT_PLANNER), errcode(ERRCODE_INVALID_OPTION), + errmsg("The delay ranges from 0 to 2147483."), + errdetail("N/A"), + errcause("Invalid input parameter."), + erraction("Modify SQL statement according to the manual."))); + } + if (lockedRels == NIL) { /* all regular tables used in query */ @@ -4264,17 +4327,17 @@ static void transformLockingClause(ParseState* pstate, Query* qry, LockingClause heap_close(rel, AccessShareLock); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE cannot be used with " - "column table \"%s\"", rte->eref->aliasname))); + errmsg("SELECT FOR UPDATE/SHARE%s cannot be used with " + "column table \"%s\"", NOKEYUPDATE_KEYSHARE_ERRMSG, rte->eref->aliasname))); } heap_close(rel, AccessShareLock); - applyLockingClause(qry, i, lc->strength, lc->noWait, pushedDown); + applyLockingClause(qry, i, lc->strength, lc->noWait, pushedDown, lc->waitSec); rte->requiredPerms |= ACL_SELECT_FOR_UPDATE; break; case RTE_SUBQUERY: - applyLockingClause(qry, i, lc->strength, lc->noWait, pushedDown); + applyLockingClause(qry, i, lc->strength, lc->noWait, pushedDown, lc->waitSec); /* * FOR [KEY] UPDATE/SHARE of subquery is propagated to all of @@ -4304,8 +4367,8 @@ static void transformLockingClause(ParseState* pstate, Query* qry, LockingClause if (thisrel->catalogname || thisrel->schemaname) { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE must specify unqualified " - "relation names"), + errmsg("SELECT FOR UPDATE/SHARE%s must specify unqualified " + "relation names", NOKEYUPDATE_KEYSHARE_ERRMSG), parser_errposition(pstate, thisrel->location))); } @@ -4322,46 +4385,48 @@ static void transformLockingClause(ParseState* pstate, Query* qry, LockingClause heap_close(rel, AccessShareLock); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE cannot be used with " - "column table \"%s\"", rte->eref->aliasname), + errmsg("SELECT FOR UPDATE/SHARE%s cannot be used with column table \"%s\"", + NOKEYUPDATE_KEYSHARE_ERRMSG, rte->eref->aliasname), parser_errposition(pstate, thisrel->location))); } heap_close(rel, AccessShareLock); - applyLockingClause(qry, i, lc->strength, lc->noWait, pushedDown); + applyLockingClause(qry, i, lc->strength, lc->noWait, pushedDown, + lc->waitSec); rte->requiredPerms |= ACL_SELECT_FOR_UPDATE; break; case RTE_SUBQUERY: - applyLockingClause(qry, i, lc->strength, lc->noWait, pushedDown); + applyLockingClause(qry, i, lc->strength, lc->noWait, pushedDown, + lc->waitSec); /* see comment above */ transformLockingClause(pstate, rte->subquery, allrels, true); break; case RTE_JOIN: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE cannot be applied " - "to a join"), + errmsg("SELECT FOR UPDATE/SHARE%s cannot be applied to a join", + NOKEYUPDATE_KEYSHARE_ERRMSG), parser_errposition(pstate, thisrel->location))); break; case RTE_FUNCTION: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE cannot be applied " - "to a function"), + errmsg("SELECT FOR UPDATE/SHARE%s cannot be applied to a function", + NOKEYUPDATE_KEYSHARE_ERRMSG), parser_errposition(pstate, thisrel->location))); break; case RTE_VALUES: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE cannot be applied " - "to VALUES"), + errmsg("SELECT FOR UPDATE/SHARE%s cannot be applied to VALUES", + NOKEYUPDATE_KEYSHARE_ERRMSG), parser_errposition(pstate, thisrel->location))); break; case RTE_CTE: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE cannot be applied " - "to a WITH query"), + errmsg("SELECT FOR UPDATE/SHARE%s cannot be applied to a WITH query", + NOKEYUPDATE_KEYSHARE_ERRMSG), parser_errposition(pstate, thisrel->location))); break; default: @@ -4387,7 +4452,8 @@ static void transformLockingClause(ParseState* pstate, Query* qry, LockingClause /* * Record locking info for a single rangetable item */ -void applyLockingClause(Query* qry, Index rtindex, LockClauseStrength strength, bool noWait, bool pushedDown) +void applyLockingClause(Query* qry, Index rtindex, LockClauseStrength strength, bool noWait, bool pushedDown, + int waitSec) { RowMarkClause* rc = NULL; @@ -4414,6 +4480,7 @@ void applyLockingClause(Query* qry, Index rtindex, LockClauseStrength strength, rc->strength = Max(rc->strength, strength); rc->forUpdate = rc->strength == LCS_FORUPDATE; rc->noWait = rc->noWait || noWait; + rc->waitSec = Max(rc->waitSec, waitSec); rc->pushedDown = rc->pushedDown && pushedDown; return; } @@ -4421,9 +4488,10 @@ void applyLockingClause(Query* qry, Index rtindex, LockClauseStrength strength, /* Make a new RowMarkClause */ rc = makeNode(RowMarkClause); rc->rti = rtindex; - rc->strength = strength; rc->forUpdate = strength == LCS_FORUPDATE; + rc->strength = strength; rc->noWait = noWait; + rc->waitSec = waitSec; rc->pushedDown = pushedDown; qry->rowMarks = lappend(qry->rowMarks, rc); } diff --git a/src/common/backend/parser/gram.y b/src/common/backend/parser/gram.y index 6a60f6efc..4d57dcb6b 100644 --- a/src/common/backend/parser/gram.y +++ b/src/common/backend/parser/gram.y @@ -9,6 +9,7 @@ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 2010-2012 Postgres-XC Development Group + * Portions Copyright (c) 2021, openGauss Contributors * * * IDENTIFICATION @@ -236,11 +237,15 @@ static Node *make_node_from_scanbuf(int start_pos, int end_pos, core_yyscan_t yy static int64 SequenceStrGetInt64(const char *str); static int GetLoadType(int load_type_f, int load_type_s); static Node *MakeSqlLoadNode(char *colname); + +/* start with .. connect by related utilities */ static bool IsConnectByRootIdent(Node* node); static void ValidateTripleTuple(Node* node, core_yyscan_t yyscanner, int location, char* token); static Node* MakeConnectByRootNode(ColumnRef* cr, int location); static char* MakeConnectByRootColName(char* tabname, char* colname); static void FilterStartWithUseCases(SelectStmt* stmt, List* locking_clause, core_yyscan_t yyscanner, int location); +static FuncCall* MakePriorAsFunc(); + #ifndef ENABLE_MULTIPLE_NODES static bool CheckWhetherInColList(char *colname, List *col_list); #endif @@ -293,6 +298,7 @@ static int errstate; ResTarget *target; struct PrivTarget *privtarget; AccessPriv *accesspriv; + DbPriv *dbpriv; InsertStmt *istmt; VariableSetStmt *vsetstmt; /* PGXC_BEGIN */ @@ -303,7 +309,7 @@ static int errstate; MergeWhenClause *mergewhen; UpsertClause *upsert; EncryptionType algtype; - LockClauseStrength lockstrength; + LockClauseStrength lockstrength; } %type stmt schema_stmt @@ -329,10 +335,10 @@ static int errstate; DropAssertStmt DropSynonymStmt DropTrigStmt DropRuleStmt DropCastStmt DropRoleStmt DropRlsPolicyStmt DropUserStmt DropdbStmt DropTableSpaceStmt DropDataSourceStmt DropDirectoryStmt DropFdwStmt DropForeignServerStmt DropUserMappingStmt ExplainStmt ExecDirectStmt FetchStmt - GrantStmt GrantRoleStmt IndexStmt InsertStmt ListenStmt LoadStmt + GrantStmt GrantRoleStmt GrantDbStmt IndexStmt InsertStmt ListenStmt LoadStmt LockStmt NotifyStmt ExplainableStmt PreparableStmt CreateFunctionStmt CreateProcedureStmt CreatePackageStmt CreatePackageBodyStmt AlterFunctionStmt AlterProcedureStmt ReindexStmt RemoveAggrStmt - RemoveFuncStmt RemoveOperStmt RemovePackageStmt RenameStmt RevokeStmt RevokeRoleStmt + RemoveFuncStmt RemoveOperStmt RemovePackageStmt RenameStmt RevokeStmt RevokeRoleStmt RevokeDbStmt RuleActionStmt RuleActionStmtOrEmpty RuleStmt SecLabelStmt SelectStmt TimeCapsuleStmt TransactionStmt TruncateStmt CallFuncStmt UnlistenStmt UpdateStmt VacuumStmt @@ -351,6 +357,7 @@ static int errstate; CreateAppWorkloadGroupMappingStmt AlterAppWorkloadGroupMappingStmt DropAppWorkloadGroupMappingStmt MergeStmt PurgeStmt CreateMatViewStmt RefreshMatViewStmt CreateWeakPasswordDictionaryStmt DropWeakPasswordDictionaryStmt + AlterGlobalConfigStmt DropGlobalConfigStmt CreatePublicationStmt AlterPublicationStmt CreateSubscriptionStmt AlterSubscriptionStmt DropSubscriptionStmt @@ -391,11 +398,11 @@ static int errstate; transaction_mode_item create_extension_opt_item alter_extension_opt_item -%type opt_lock lock_type cast_context +%type opt_lock lock_type cast_context opt_wait %type vacuum_option_list vacuum_option_elem opt_verify_options %type opt_check opt_force opt_or_replace opt_grant_grant_option opt_grant_admin_option - opt_nowait opt_if_exists opt_with_data opt_large_seq + opt_nowait opt_if_exists opt_with_data opt_large_seq opt_cancelable %type OptRoleList AlterOptRoleList %type CreateOptRoleElem AlterOptRoleElem @@ -426,7 +433,7 @@ static int errstate; opt_class opt_inline_handler opt_validator validator_clause opt_collate -%type qualified_name OptConstrFromTable opt_index_name +%type qualified_name insert_target OptConstrFromTable opt_index_name insert_partition_clause update_delete_partition_clause %type all_Op MathOp @@ -439,7 +446,8 @@ static int errstate; %type grantee %type grantee_list %type privilege -%type privileges privilege_list +%type privileges privilege_list db_privileges db_privilege_list +%type db_privilege %type privilege_str %type privilege_target %type function_with_argtypes @@ -657,7 +665,8 @@ static int errstate; interval_expr maxValueItem list_partitioning_clause hash_partitioning_clause range_start_end_item range_less_than_item list_partition_item hash_partition_item subpartitioning_clause range_subpartitioning_clause hash_subpartitioning_clause - list_subpartitioning_clause subpartition_item + list_subpartitioning_clause subpartition_item opt_subpartition_index_def + range_subpartition_index_list range_subpartition_index_item %type range_partition_definition_list list_partition_definition_list hash_partition_definition_list maxValueList column_item_list tablespaceList opt_interval_tablespaceList split_dest_partition_define_list split_dest_listsubpartition_define_list split_dest_rangesubpartition_define_list @@ -780,7 +789,7 @@ static int errstate; BACKWARD BARRIER BEFORE BEGIN_NON_ANOYBLOCK BEGIN_P BETWEEN BIGINT BINARY BINARY_DOUBLE BINARY_INTEGER BIT BLANKS BLOB_P BLOCKCHAIN BODY_P BOGUS BOOLEAN_P BOTH BUCKETCNT BUCKETS BY BYTEAWITHOUTORDER BYTEAWITHOUTORDERWITHEQUAL - CACHE CALL CALLED CASCADE CASCADED CASE CAST CATALOG_P CHAIN CHAR_P + CACHE CALL CALLED CANCELABLE CASCADE CASCADED CASE CAST CATALOG_P CHAIN CHAR_P CHARACTER CHARACTERISTICS CHARACTERSET CHECK CHECKPOINT CLASS CLEAN CLIENT CLIENT_MASTER_KEY CLIENT_MASTER_KEYS CLOB CLOSE CLUSTER COALESCE COLLATE COLLATION COLUMN COLUMN_ENCRYPTION_KEY COLUMN_ENCRYPTION_KEYS COMMENT COMMENTS COMMIT COMMITTED COMPACT COMPATIBLE_ILLEGAL_CHARS COMPLETE COMPRESS CONCURRENTLY CONDITION CONFIGURATION CONNECTION CONSTANT CONSTRAINT CONSTRAINTS @@ -829,7 +838,7 @@ static int errstate; OBJECT_P OF OFF OFFSET OIDS ON ONLY OPERATOR OPTIMIZATION OPTION OPTIONALLY OPTIONS OR ORDER OUT_P OUTER_P OVER OVERLAPS OVERLAY OWNED OWNER - PACKAGE PARSER PARTIAL PARTITION PARTITIONS PASSING PASSWORD PCTFREE PER_P PERCENT PERFORMANCE PERM PLACING PLAN PLANS POLICY POSITION + PACKAGE PACKAGES PARSER PARTIAL PARTITION PARTITIONS PASSING PASSWORD PCTFREE PER_P PERCENT PERFORMANCE PERM PLACING PLAN PLANS POLICY POSITION /* PGXC_BEGIN */ POOL PRECEDING PRECISION /* PGXC_END */ @@ -862,7 +871,7 @@ static int errstate; VACUUM VALID VALIDATE VALIDATION VALIDATOR VALUE_P VALUES VARCHAR VARCHAR2 VARIABLES VARIADIC VARRAY VARYING VCGROUP VERBOSE VERIFY VERSION_P VIEW VOLATILE - WEAK WHEN WHERE WHITESPACE_P WINDOW WITH WITHIN WITHOUT WORK WORKLOAD WRAPPER WRITE + WAIT WEAK WHEN WHERE WHITESPACE_P WINDOW WITH WITHIN WITHOUT WORK WORKLOAD WRAPPER WRITE XML_P XMLATTRIBUTES XMLCONCAT XMLELEMENT XMLEXISTS XMLFOREST XMLPARSE XMLPI XMLROOT XMLSERIALIZE @@ -879,19 +888,24 @@ static int errstate; %token NULLS_FIRST NULLS_LAST WITH_TIME INCLUDING_ALL RENAME_PARTITION PARTITION_FOR + SUBPARTITION_FOR ADD_PARTITION DROP_PARTITION REBUILD_PARTITION MODIFY_PARTITION + ADD_SUBPARTITION + DROP_SUBPARTITION NOT_ENFORCED VALID_BEGIN DECLARE_CURSOR + START_WITH CONNECT_BY /* Precedence: lowest to highest */ %nonassoc PARTIAL_EMPTY_PREC %nonassoc CLUSTER %nonassoc SET /* see relation_expr_opt_alias */ -%right FEATURES TARGET // DB4AI +%right PRIOR +%right FEATURES TARGET // DB4AI %left UNION EXCEPT MINUS_P %left INTERSECT %left OR @@ -942,7 +956,7 @@ static int errstate; /* Unary Operators */ %left AT /* sets precedence for AT TIME ZONE */ %left COLLATE -%right UMINUS +%right UMINUS BY NAME_P PASSING ROW TYPE_P VALUE_P %left '[' ']' %left '(' ')' %left TYPECAST @@ -1039,6 +1053,7 @@ stmt : | AlterOwnerStmt | AlterRlsPolicyStmt | AlterResourcePoolStmt + | AlterGlobalConfigStmt | AlterSeqStmt | AlterSchemaStmt | AlterSubscriptionStmt @@ -1142,6 +1157,7 @@ stmt : | DropRlsPolicyStmt | DropPLangStmt | DropResourcePoolStmt + | DropGlobalConfigStmt | DropRuleStmt | DropStmt | DropSubscriptionStmt @@ -1159,6 +1175,7 @@ stmt : | FetchStmt | GrantStmt | GrantRoleStmt + | GrantDbStmt | IndexStmt | InsertStmt | ListenStmt @@ -1178,6 +1195,7 @@ stmt : | RenameStmt | RevokeStmt | RevokeRoleStmt + | RevokeDbStmt | RuleStmt | SecLabelStmt | SelectStmt @@ -1977,7 +1995,7 @@ generic_set: n->name = $1; n->args = $3; /* if we are setting role, we switch to the new syntax which check the password of role */ - if(!strcmp("role", n->name) || !pg_strcasecmp("session_authorization", n->name)) + if(!pg_strcasecmp("role", n->name) || !pg_strcasecmp("session_authorization", n->name)) { const char* message = "SET TO rolename\" not yet supported"; InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); @@ -1999,7 +2017,7 @@ generic_set: n->name = $1; n->args = $3; /* if we are setting role, we switch to the new syntax which check the password of role */ - if(!strcmp("role", n->name) || !pg_strcasecmp("session_authorization", n->name)) + if(!pg_strcasecmp("role", n->name) || !pg_strcasecmp("session_authorization", n->name)) { const char* message = "SET TO rolename\" not yet supported"; InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); @@ -2942,6 +2960,151 @@ alter_partition_cmd: n->def = (Node*)s; $$ = (Node *)n; } + | ADD_PARTITION name VALUES '(' DEFAULT ')' OptTableSpace + { + ListPartitionDefState *p = makeNode(ListPartitionDefState); + AlterTableCmd *n = makeNode(AlterTableCmd); + AddPartitionState *s = makeNode(AddPartitionState); + p->partitionName = $2; + Const *n_default = makeNode(Const); + n_default->ismaxvalue = true; + n_default->location = -1; + p->boundary = list_make1(n_default); + p->tablespacename = $7; + s->partitionList = list_make1(p); + s->isStartEnd = false; + n->subtype = AT_AddPartition; + n->def = (Node*)s; + $$ = (Node *)n; + } + | ADD_PARTITION name VALUES LESS THAN + '(' maxValueList ')' OptTableSpace '(' subpartition_definition_list ')' + { + RangePartitionDefState *p = makeNode(RangePartitionDefState); + AlterTableCmd *n = makeNode(AlterTableCmd); + AddPartitionState *s = makeNode(AddPartitionState); + p->partitionName = $2; + p->boundary = $7; + p->tablespacename = $9; + p->subPartitionDefState = $11; + int i = 0; + ListCell *elem = NULL; + List *parts = p->subPartitionDefState; + foreach(elem, parts) { + if (!IsA((Node*)lfirst(elem), HashPartitionDefState)) { + break; + } + HashPartitionDefState *hashPart = (HashPartitionDefState*)lfirst(elem); + hashPart->boundary = list_make1(makeIntConst(i, -1)); + i++; + } + s->partitionList = list_make1(p); + s->isStartEnd = false; + n->subtype = AT_AddPartition; + n->def = (Node*)s; + $$ = (Node *)n; + } + | ADD_PARTITION name VALUES '(' expr_list ')' OptTableSpace '(' subpartition_definition_list ')' + { + ListPartitionDefState *p = makeNode(ListPartitionDefState); + AlterTableCmd *n = makeNode(AlterTableCmd); + AddPartitionState *s = makeNode(AddPartitionState); + p->partitionName = $2; + p->boundary = $5; + p->tablespacename = $7; + p->subPartitionDefState = $9; + int i = 0; + ListCell *elem = NULL; + List *parts = p->subPartitionDefState; + foreach(elem, parts) { + if (!IsA((Node*)lfirst(elem), HashPartitionDefState)) { + break; + } + HashPartitionDefState *hashPart = (HashPartitionDefState*)lfirst(elem); + hashPart->boundary = list_make1(makeIntConst(i, -1)); + i++; + } + s->partitionList = list_make1(p); + s->isStartEnd = false; + n->subtype = AT_AddPartition; + n->def = (Node*)s; + $$ = (Node *)n; + } + | ADD_PARTITION name VALUES '(' DEFAULT ')' OptTableSpace '(' subpartition_definition_list ')' + { + ListPartitionDefState *p = makeNode(ListPartitionDefState); + AlterTableCmd *n = makeNode(AlterTableCmd); + AddPartitionState *s = makeNode(AddPartitionState); + p->partitionName = $2; + Const *n_default = makeNode(Const); + n_default->ismaxvalue = true; + n_default->location = -1; + p->boundary = list_make1(n_default); + p->tablespacename = $7; + p->subPartitionDefState = $9; + int i = 0; + ListCell *elem = NULL; + List *parts = p->subPartitionDefState; + foreach(elem, parts) { + if (!IsA((Node*)lfirst(elem), HashPartitionDefState)) { + break; + } + HashPartitionDefState *hashPart = (HashPartitionDefState*)lfirst(elem); + hashPart->boundary = list_make1(makeIntConst(i, -1)); + i++; + } + s->partitionList = list_make1(p); + s->isStartEnd = false; + n->subtype = AT_AddPartition; + n->def = (Node*)s; + $$ = (Node *)n; + } + /* ALTER TABLE MODIFY PARTITION ADD SUBPARTITION */ + | MODIFY_PARTITION name ADD_SUBPARTITION name VALUES LESS THAN '(' maxValueList ')' OptTableSpace + { + RangePartitionDefState *p = makeNode(RangePartitionDefState); + AlterTableCmd *n = makeNode(AlterTableCmd); + AddSubPartitionState *s = makeNode(AddSubPartitionState); + p->partitionName = $4; + p->boundary = $9; + p->tablespacename = $11; + s->subPartitionList = list_make1(p); + s->partitionName = $2; + n->subtype = AT_AddSubPartition; + n->def = (Node*)s; + $$ = (Node *)n; + } + | MODIFY_PARTITION name ADD_SUBPARTITION name VALUES '(' expr_list ')' OptTableSpace + { + ListPartitionDefState *p = makeNode(ListPartitionDefState); + AlterTableCmd *n = makeNode(AlterTableCmd); + AddSubPartitionState *s = makeNode(AddSubPartitionState); + p->partitionName = $4; + p->boundary = $7; + p->tablespacename = $9; + s->subPartitionList = list_make1(p); + s->partitionName = $2; + n->subtype = AT_AddSubPartition; + n->def = (Node*)s; + $$ = (Node *)n; + } + | MODIFY_PARTITION name ADD_SUBPARTITION name VALUES '(' DEFAULT ')' OptTableSpace + { + ListPartitionDefState *p = makeNode(ListPartitionDefState); + AlterTableCmd *n = makeNode(AlterTableCmd); + AddSubPartitionState *s = makeNode(AddSubPartitionState); + p->partitionName = $4; + Const *n_default = makeNode(Const); + n_default->ismaxvalue = true; + n_default->location = -1; + p->boundary = list_make1(n_default); + p->tablespacename = $9; + s->subPartitionList = list_make1(p); + s->partitionName = $2; + n->subtype = AT_AddSubPartition; + n->def = (Node*)s; + $$ = (Node *)n; + } /* ALTER TABLE DROP PARTITION */ | DROP_PARTITION ColId OptGPI { @@ -2967,6 +3130,30 @@ alter_partition_cmd: n->alterGPI = $6; $$ = (Node *)n; } + /* ALTER TABLE DROP SUBPARTITION */ + | DROP_SUBPARTITION ColId OptGPI + { + AlterTableCmd *n = makeNode(AlterTableCmd); + n->subtype = AT_DropSubPartition; + n->name = $2; + n->behavior = DROP_CASCADE; + n->missing_ok = FALSE; + n->alterGPI = $3; + $$ = (Node *)n; + } + | DROP_SUBPARTITION FOR '(' expr_list ')' OptGPI + { + RangePartitionDefState *p = makeNode(RangePartitionDefState); + AlterTableCmd *n = makeNode(AlterTableCmd); + + p->boundary = $4; + n->subtype = AT_DropSubPartition; + n->def = (Node*)p; + n->behavior = DROP_CASCADE; + n->missing_ok = FALSE; + n->alterGPI = $6; + $$ = (Node *)n; + } /* merge 2 or more partitions into 1 partition */ | MERGE PARTITIONS name_list INTO PARTITION name OptTableSpace OptGPI { @@ -4870,6 +5057,15 @@ range_partitioning_clause: errmsg("Un-support feature"), errdetail("The partition key's length should be 1."))); } + if ($8 != NULL && $7 != NULL) { + const char* message = "Un-support feature"; + InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); + ereport(errstate, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Un-support feature"), + errdetail("Subpartitions do not support interval partition."), + errcause("System error."), erraction("Contact engineer to support."))); + } n->partitionKey = $5; n->intervalPartDef = (IntervalPartitionDefState *)$7; n->partitionList = $10; @@ -5023,7 +5219,7 @@ range_subpartitioning_clause: list_subpartitioning_clause: SUBPARTITION BY LIST '(' column_item_list ')' { -#ifdef ENABLE_MULTIPLE_NODE +#ifdef ENABLE_MULTIPLE_NODES const char* message = "Un-support feature"; InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); ereport(ERROR, @@ -7556,9 +7752,14 @@ SeqOptElem: CACHE NumericOnly { $$ = makeDefElem("owned_by", (Node *)$3); } - | START opt_with NumericOnly + | START_WITH NumericOnly { - $$ = makeDefElem("start", (Node *)$3); + $$ = makeDefElem("start", (Node *)$2); + } + + | START NumericOnly + { + $$ = makeDefElem("start", (Node *)$2); } | RESTART { @@ -10919,14 +11120,6 @@ privilege: SELECT opt_column_list n->cols = $2; $$ = n; } - | CONNECT opt_column_list - { - AccessPriv *n = makeNode(AccessPriv); - n->priv_name = pstrdup($1); - n->cols = $2; - $$ = n; - } - ; @@ -11102,6 +11295,14 @@ privilege_target: n->objs = $5; $$ = n; } + | ALL PACKAGES IN_P SCHEMA name_list + { + PrivTarget *n = (PrivTarget *)palloc(sizeof(PrivTarget)); + n->targtype = ACL_TARGET_ALL_IN_SCHEMA; + n->objtype = ACL_OBJECT_PACKAGE; + n->objs = $5; + $$ = n; + } | DATA_P SOURCE_P name_list { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); @@ -11240,6 +11441,138 @@ opt_granted_by: GRANTED BY RoleId { $$ = $3; } | /*EMPTY*/ { $$ = NULL; } ; +/***************************************************************************** + * + * GRANT and REVOKE DATABASE PRIVILEGE statements + * + *****************************************************************************/ + +GrantDbStmt: + GRANT db_privileges TO grantee_list opt_grant_admin_option + { + GrantDbStmt *n = makeNode(GrantDbStmt); + n->is_grant = true; + n->privileges = $2; + n->grantees = $4; + n->admin_opt = $5; + $$ = (Node*)n; + } + ; + +RevokeDbStmt: + REVOKE db_privileges FROM grantee_list + { + GrantDbStmt *n = makeNode(GrantDbStmt); + n->is_grant = false; + n->privileges = $2; + n->grantees = $4; + n->admin_opt = false; + $$ = (Node*)n; + } + | REVOKE ADMIN OPTION FOR db_privileges FROM grantee_list + { + GrantDbStmt *n = makeNode(GrantDbStmt); + n->is_grant = false; + n->privileges = $5; + n->grantees = $7; + n->admin_opt = true; + $$ = (Node*)n; + } + ; + +db_privileges: db_privilege_list { $$ = $1; } + ; + +db_privilege_list: db_privilege { $$ = list_make1($1); } + | db_privilege_list ',' db_privilege { $$ = lappend($1, $3); } + ; + +db_privilege: CREATE ANY TABLE + { + DbPriv *n = makeNode(DbPriv); + n->db_priv_name = pstrdup("create any table"); + $$ = n; + } + | ALTER ANY TABLE + { + DbPriv *n = makeNode(DbPriv); + n->db_priv_name = pstrdup("alter any table"); + $$ = n; + } + | DROP ANY TABLE + { + DbPriv *n = makeNode(DbPriv); + n->db_priv_name = pstrdup("drop any table"); + $$ = n; + } + | SELECT ANY TABLE + { + DbPriv *n = makeNode(DbPriv); + n->db_priv_name = pstrdup("select any table"); + $$ = n; + } + | INSERT ANY TABLE + { + DbPriv *n = makeNode(DbPriv); + n->db_priv_name = pstrdup("insert any table"); + $$ = n; + } + | UPDATE ANY TABLE + { + DbPriv *n = makeNode(DbPriv); + n->db_priv_name = pstrdup("update any table"); + $$ = n; + } + | DELETE_P ANY TABLE + { + DbPriv *n = makeNode(DbPriv); + n->db_priv_name = pstrdup("delete any table"); + $$ = n; + } + | CREATE ANY SEQUENCE + { + DbPriv *n = makeNode(DbPriv); + n->db_priv_name = pstrdup("create any sequence"); + $$ = n; + } + | CREATE ANY INDEX + { + DbPriv *n = makeNode(DbPriv); + n->db_priv_name = pstrdup("create any index"); + $$ = n; + } + | CREATE ANY FUNCTION + { + DbPriv *n = makeNode(DbPriv); + n->db_priv_name = pstrdup("create any function"); + $$ = n; + } + | EXECUTE ANY FUNCTION + { + DbPriv *n = makeNode(DbPriv); + n->db_priv_name = pstrdup("execute any function"); + $$ = n; + } + | CREATE ANY PACKAGE + { + DbPriv *n = makeNode(DbPriv); + n->db_priv_name = pstrdup("create any package"); + $$ = n; + } + | EXECUTE ANY PACKAGE + { + DbPriv *n = makeNode(DbPriv); + n->db_priv_name = pstrdup("execute any package"); + $$ = n; + } + | CREATE ANY TYPE_P + { + DbPriv *n = makeNode(DbPriv); + n->db_priv_name = pstrdup("create any type"); + $$ = n; + } + ; + /***************************************************************************** * * ALTER DEFAULT PRIVILEGES statement @@ -11331,6 +11664,7 @@ defacl_privilege_target: | TYPES_P { $$ = ACL_OBJECT_TYPE; } | CLIENT_MASTER_KEYS { $$ = ACL_OBJECT_GLOBAL_SETTING; } | COLUMN_ENCRYPTION_KEYS { $$ = ACL_OBJECT_COLUMN_SETTING; } + | PACKAGES { $$ = ACL_OBJECT_PACKAGE; } ; @@ -11373,7 +11707,7 @@ IndexStmt: CREATE opt_unique INDEX opt_concurrently opt_index_name } | CREATE opt_unique INDEX opt_concurrently opt_index_name ON qualified_name access_method_clause '(' index_params ')' - LOCAL opt_partition_index_def opt_reloptions OptTableSpace + LOCAL opt_partition_index_def opt_include opt_reloptions OptTableSpace { IndexStmt *n = makeNode(IndexStmt); @@ -11385,8 +11719,9 @@ IndexStmt: CREATE opt_unique INDEX opt_concurrently opt_index_name n->accessMethod = $8; n->indexParams = $10; n->partClause = $13; - n->options = $14; - n->tableSpace = $15; + n->indexIncludingParams = $14; + n->options = $15; + n->tableSpace = $16; n->isPartitioned = true; n->isGlobal = false; n->excludeOpNames = NIL; @@ -11402,7 +11737,7 @@ IndexStmt: CREATE opt_unique INDEX opt_concurrently opt_index_name } | CREATE opt_unique INDEX opt_concurrently opt_index_name ON qualified_name access_method_clause '(' index_params ')' - GLOBAL opt_reloptions OptTableSpace + GLOBAL opt_include opt_reloptions OptTableSpace { IndexStmt *n = makeNode(IndexStmt); @@ -11414,8 +11749,9 @@ IndexStmt: CREATE opt_unique INDEX opt_concurrently opt_index_name n->accessMethod = $8; n->indexParams = $10; n->partClause = NULL; - n->options = $13; - n->tableSpace = $14; + n->indexIncludingParams = $13; + n->options = $14; + n->tableSpace = $15; n->isPartitioned = true; n->isGlobal = true; n->excludeOpNames = NIL; @@ -11577,7 +11913,43 @@ range_partition_index_item: $$ = (Node*)def; } + | PARTITION index_name OptTableSpace opt_subpartition_index_def + { + RangePartitionindexDefState* def = makeNode(RangePartitionindexDefState); + def->name = $2; + def->tablespace = $3; + def->sublist = (List *)$4; + $$ = (Node*)def; + } ; + +opt_subpartition_index_def: + '(' range_subpartition_index_list ')' + { + $$ = (Node *)$2; + } + ; + +range_subpartition_index_list: + range_subpartition_index_item + { + $$ = (Node*)list_make1($1); + } + | range_subpartition_index_list ',' range_subpartition_index_item + { + $$ = (Node*)lappend((List*)$1, $3); + } + ; + +range_subpartition_index_item: + SUBPARTITION index_name OptTableSpace + { + RangePartitionindexDefState* def = makeNode(RangePartitionindexDefState); + def->name = $2; + def->tablespace = $3; + $$ = (Node*)def; + } + ; /***************************************************************************** * * QUERY: @@ -11661,9 +12033,9 @@ CreateFunctionStmt: n->options = $8; n->options = lappend(n->options, makeDefElem("as", (Node *)list_make1(makeString(funSource->bodySrc)))); - n->options = lappend(n->options, makeDefElem("language", (Node *)makeString("plpgsql"))); + n->withClause = NIL; n->withClause = NIL; n->isProcedure = false; @@ -11673,11 +12045,19 @@ CreateFunctionStmt: ; CallFuncStmt: CALL func_name '(' ')' { +#ifndef ENABLE_MULTIPLE_NODES $$ = makeCallFuncStmt($2, NULL, enable_out_param_override()); +#else + $$ = makeCallFuncStmt($2, NULL, false); +#endif } | CALL func_name '(' callfunc_args ')' { +#ifndef ENABLE_MULTIPLE_NODES $$ = makeCallFuncStmt($2, $4, enable_out_param_override()); +#else + $$ = makeCallFuncStmt($2, $4, false); +#endif } ; callfunc_args: func_arg_expr @@ -11737,11 +12117,12 @@ CreateProcedureStmt: CreatePackageStmt: CREATE opt_or_replace PACKAGE pkg_name invoker_rights as_is {pg_yyget_extra(yyscanner)->core_yy_extra.include_ora_comment = true;} { - pg_yyget_extra(yyscanner)->core_yy_extra.in_slash_proc_body = true; u_sess->plsql_cxt.package_as_line = GetLineNumber(t_thrd.postgres_cxt.debug_query_string, @6); char *pkgNameBegin = NULL; char *pkgNameEnd = NULL; char *pkgName = NULL; + base_yy_extra_type *yyextra = pg_yyget_extra(yyscanner); + yyextra->core_yy_extra.in_slash_proc_body = true; switch (list_length($4)) { case 1: pkgName = strVal(linitial($4)); @@ -11762,7 +12143,6 @@ CreatePackageStmt: pkgNameBegin = pg_strtolower(pkgNameBegin); int tok = YYEMPTY; - base_yy_extra_type *yyextra = pg_yyget_extra(yyscanner); if (yychar == YYEOF || yychar == YYEMPTY) tok = YYLEX; else @@ -11790,7 +12170,7 @@ CreatePackageStmt: // append pkgname into toks toks = lappend_int(toks, tok); positions = lappend_int(positions, yylloc); - if (tok == IDENT) { + if (tok != ';') { // append ; into toks tok = YYLEX; toks = lappend_int(toks, tok); @@ -11811,11 +12191,14 @@ CreatePackageStmt: } pfree(name_info.data); pkg_name_temp = NULL; - } + } else { + parser_yyerror("package spec is not ended correctly"); + } } tok = YYLEX; } - + yyextra->core_yy_extra.query_string_locationlist = + lappend_int(yyextra->core_yy_extra.query_string_locationlist, yylloc); if (toks == NULL || toks->length < 1) { parser_yyerror("package spec is not ended correctly"); } @@ -11827,10 +12210,6 @@ CreatePackageStmt: if (toks->length < 2) { parser_yyerror("package spec is not ended correctly"); } - last_tok = list_nth_int(toks, toks->length - 2); - if (last_tok != IDENT) { - parser_yyerror("variable/func/proc declare in package spec is not ended correctly"); - } // Package spec content. int name_start_pos = list_nth_int(positions, positions->length - 2); @@ -11849,14 +12228,12 @@ CreatePackageStmt: } /* Reset the flag which mark whether we are in slash proc. */ - pg_yyget_extra(yyscanner)->core_yy_extra.in_slash_proc_body = false; + yyextra->core_yy_extra.in_slash_proc_body = false; yyextra->core_yy_extra.dolqstart = NULL; /* * Add the end location of slash proc to the locationlist for the multi-query * processed. */ - yyextra->core_yy_extra.query_string_locationlist = - lappend_int(yyextra->core_yy_extra.query_string_locationlist, yylloc); u_sess->plsql_cxt.package_as_line = GetLineNumber(t_thrd.postgres_cxt.debug_query_string, @6); #ifndef ENABLE_MULTIPLE_NODES @@ -11908,11 +12285,11 @@ invoker_rights: AUTHID DEFINER } | { - if (PLSQL_SECURITY_DEFINER) { - $$ = true; - } else { - $$ = false; - } + if (PLSQL_SECURITY_DEFINER && u_sess->attr.attr_common.upgrade_mode == 0) { + $$ = true; + } else { + $$ = false; + } } ; @@ -12040,10 +12417,6 @@ pkg_body_subprogram: { if (toks->length < 2) { parser_yyerror("package is not ended correctly"); } - last_tok = list_nth_int(toks, toks->length - 2); - if (last_tok != IDENT) { - parser_yyerror("package is not ended correctly"); - } // package name. int name_start_pos = list_nth_int(positions, positions->length - 2); @@ -12530,7 +12903,7 @@ createfunc_opt_item: createproc_opt_item: - common_func_opt_item + common_func_opt_item { $$ = $1; } @@ -13215,7 +13588,7 @@ RenameStmt: ALTER AGGREGATE func_name aggr_args RENAME TO name n->missing_ok = false; $$ = (Node *)n; } - | ALTER DATA_P SOURCE_P name RENAME TO name + | ALTER DATA_P SOURCE_P name RENAME TO name { RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_DATA_SOURCE; @@ -14125,6 +14498,14 @@ AlterOwnerStmt: ALTER AGGREGATE func_name aggr_args OWNER TO RoleId n->newowner = $6; $$ = (Node *)n; } + | ALTER PACKAGE pkg_name OWNER TO RoleId + { + AlterOwnerStmt *n = makeNode(AlterOwnerStmt); + n->objectType = OBJECT_PACKAGE; + n->object = $3; + n->newowner = $6; + $$ = (Node *)n; + } | ALTER opt_procedural LANGUAGE name OWNER TO RoleId { AlterOwnerStmt *n = makeNode(AlterOwnerStmt); @@ -15764,7 +16145,7 @@ VacuumStmt: if ($5) { const char* message = "COMPACT can not be used with PARTITION"; - InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); + InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); ereport(errstate, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("COMPACT can not be used with PARTITION"))); } @@ -15775,6 +16156,28 @@ VacuumStmt: $6->partitionname = $9; $$ = (Node *)n; } + | VACUUM opt_full opt_freeze opt_verbose opt_compact qualified_name SUBPARTITION '('name')' + { + VacuumStmt *n = makeNode(VacuumStmt); + n->options = VACOPT_VACUUM; + if ($2) + n->options |= VACOPT_FULL; + if ($4) + n->options |= VACOPT_VERBOSE; + if ($5) + { + const char* message = "COMPACT can not be used with SUBPARTITION"; + InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); + ereport(errstate, (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("COMPACT can not be used with SUBPARTITION"))); + } + n->freeze_min_age = $3 ? 0 : -1; + n->freeze_table_age = $3 ? 0 : -1; + n->relation = $6; + n->va_cols = NIL; + $6->subpartitionname = $9; + $$ = (Node *)n; + } | VACUUM opt_full opt_freeze opt_verbose opt_compact AnalyzeStmt { VacuumStmt *n = (VacuumStmt *) $6; @@ -15835,6 +16238,21 @@ VacuumStmt: $5->partitionname = $9; $$ = (Node *) n; } + | VACUUM '(' vacuum_option_list ')' qualified_name opt_name_list SUBPARTITION '('name')' + { + VacuumStmt *n = makeNode(VacuumStmt); + n->options = VACOPT_VACUUM | $3; + if (n->options & VACOPT_FREEZE) + n->freeze_min_age = n->freeze_table_age = 0; + else + n->freeze_min_age = n->freeze_table_age = -1; + n->relation = $5; + n->va_cols = $6; + if (n->va_cols != NIL) /* implies analyze */ + n->options |= VACOPT_ANALYZE; + $5->subpartitionname = $9; + $$ = (Node *) n; + } ; vacuum_option_list: @@ -17254,6 +17672,7 @@ CreateResourcePoolStmt: CREATE RESOURCE POOL resource_pool_name OptWith } ; + /***************************************************************************** * * QUERY: @@ -17275,6 +17694,14 @@ AlterResourcePoolStmt: ALTER RESOURCE POOL resource_pool_name OptWith } ; +AlterGlobalConfigStmt:ALTER GLOBAL CONFIGURATION OptWith + { + AlterGlobalConfigStmt *n = makeNode(AlterGlobalConfigStmt); + n->options = $4; + $$ = (Node *)n; + } + ; + /***************************************************************************** * * QUERY: @@ -17301,6 +17728,13 @@ DropResourcePoolStmt: DROP RESOURCE POOL resource_pool_name resource_pool_name: ColId { $$ = $1; }; +DropGlobalConfigStmt:DROP GLOBAL CONFIGURATION name_list + { + DropGlobalConfigStmt *n = makeNode(DropGlobalConfigStmt); + n->options = $4; + $$ = (Node *)n; + } + ; /***************************************************************************** * @@ -17784,6 +18218,50 @@ DeallocateStmt: DEALLOCATE name } ; +insert_partition_clause: update_delete_partition_clause + { +#ifdef ENABLE_MULTIPLE_NODES + const char* message = "In distributed mode, insert/update/delete does not support specified partitions."; + InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); + ereport(errstate, + (errmodule(MOD_PARSER), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("In distributed mode, insert/update/delete does not support specified partitions."), + errdetail("N/A"),errcause("Feature is not supported this operation."), + erraction("Contact engineer to support."))); +#endif + $$ = $1; + } + | /* EMPTY */ { $$ = NULL; } + ; + +update_delete_partition_clause: PARTITION '(' name ')' + { + $$ = makeRangeVar(NULL, NULL, @3); + $$->partitionname = $3; + $$->ispartition = true; + } + | SUBPARTITION '(' name ')' + { + $$ = makeRangeVar(NULL, NULL, @3); + $$->subpartitionname = $3; + $$->issubpartition = true; + } + | PARTITION_FOR '(' expr_list ')' + { + $$ = makeRangeVar(NULL, NULL, @3); + $$->partitionKeyValuesList = $3; + $$->ispartition = true; + } + | SUBPARTITION_FOR '(' expr_list ')' + { + $$ = makeRangeVar(NULL, NULL, @3); + $$->partitionKeyValuesList = $3; + $$->issubpartition = true; + } + ; + + /***************************************************************************** * * QUERY: @@ -17791,7 +18269,7 @@ DeallocateStmt: DEALLOCATE name * *****************************************************************************/ -InsertStmt: opt_with_clause INSERT hint_string INTO qualified_name insert_rest returning_clause +InsertStmt: opt_with_clause INSERT hint_string INTO insert_target insert_rest returning_clause { $6->relation = $5; $6->returningList = $7; @@ -17799,7 +18277,7 @@ InsertStmt: opt_with_clause INSERT hint_string INTO qualified_name insert_rest r $6->hintState = create_hintstate($3); $$ = (Node *) $6; } - | opt_with_clause INSERT hint_string INTO qualified_name insert_rest upsert_clause returning_clause + | opt_with_clause INSERT hint_string INTO insert_target insert_rest upsert_clause returning_clause { if ($8 != NIL) { const char* message = "RETURNING clause is not yet supported whithin INSERT ON DUPLICATE KEY UPDATE statement."; @@ -17843,6 +18321,7 @@ InsertStmt: opt_with_clause INSERT hint_string INTO qualified_name insert_rest r } } + MergeStmt *m = makeNode(MergeStmt); m->is_insert_update = true; @@ -17898,6 +18377,40 @@ InsertStmt: opt_with_clause INSERT hint_string INTO qualified_name insert_rest r } ; +/* + * It is difficult to use relation_expr_opt_alias as update or delete statement, + * because VALUES in insert_rest would have a shift/reduce conflict with VALUES + * as alias if as is optional. + * We tried to fix such conflict by adding noassoc/left priority to VALUES, but + * it will make delete statement with VALUES as alias name unable to be resolved. + * So AS is required for now. + */ +insert_target: + qualified_name insert_partition_clause + { + if ($2 != NULL) { + $1->partitionname = $2->partitionname; + $1->ispartition = $2->ispartition; + $1->partitionKeyValuesList = $2->partitionKeyValuesList; + $1->subpartitionname = $2->subpartitionname; + $1->issubpartition = $2->issubpartition; + } + $$ = $1; + } + | qualified_name insert_partition_clause AS ColId + { + if ($2 != NULL) { + $1->partitionname = $2->partitionname; + $1->ispartition = $2->ispartition; + $1->partitionKeyValuesList = $2->partitionKeyValuesList; + $1->subpartitionname = $2->subpartitionname; + $1->issubpartition = $2->issubpartition; + } + $1->alias = makeAlias($4, NIL); + $$ = $1; + } + ; + insert_rest: SelectStmt { @@ -17946,7 +18459,7 @@ returning_clause: ; upsert_clause: - ON DUPLICATE KEY UPDATE set_clause_list + ON DUPLICATE KEY UPDATE set_clause_list where_clause { if (u_sess->attr.attr_sql.enable_upsert_to_merge #ifdef ENABLE_MULTIPLE_NODES @@ -17970,14 +18483,16 @@ upsert_clause: InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc); ereport(errstate, (errmodule(MOD_PARSER), - errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Update with subquery is not yet supported whithin INSERT ON DUPLICATE KEY UPDATE statement."))); + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Update with subquery is not yet supported whithin INSERT ON DUPLICATE KEY UPDATE statement."))); } } #endif + UpsertClause *uc = makeNode(UpsertClause); uc->targetList = $5; uc->location = @1; + uc->whereClause = $6; $$ = (Node *) uc; } } @@ -18043,13 +18558,14 @@ using_clause: * *****************************************************************************/ -LockStmt: LOCK_P opt_table relation_expr_list opt_lock opt_nowait +LockStmt: LOCK_P opt_table relation_expr_list opt_lock opt_nowait opt_cancelable { LockStmt *n = makeNode(LockStmt); n->relations = $3; n->mode = $4; n->nowait = $5; + n->cancelable = $6; $$ = (Node *)n; } ; @@ -18072,6 +18588,12 @@ opt_nowait: NOWAIT { $$ = TRUE; } | /*EMPTY*/ { $$ = FALSE; } ; +opt_cancelable: CANCELABLE { $$ = TRUE; } + | /*EMPTY*/ { $$ = FALSE; } + ; + +opt_wait: WAIT Iconst { $$ = $2; } + ; /***************************************************************************** * @@ -18983,41 +19505,41 @@ having_clause: ; start_with_clause: - START WITH start_with_expr connect_by_expr + START_WITH start_with_expr connect_by_expr { StartWithClause *n = makeNode(StartWithClause); - n->startWithExpr = $3; - n->connectByExpr = $4; + n->startWithExpr = $2; + n->connectByExpr = $3; n->siblingsOrderBy = NULL; n->priorDirection = false; n->nocycle = false; $$ = (Node *) n; } - | START WITH start_with_expr CONNECT BY NOCYCLE a_expr + | START_WITH start_with_expr CONNECT_BY NOCYCLE a_expr { StartWithClause *n = makeNode(StartWithClause); - n->startWithExpr = $3; - n->connectByExpr = $7; + n->startWithExpr = $2; + n->connectByExpr = $5; n->siblingsOrderBy = NULL; n->priorDirection = false; n->nocycle = true; $$ = (Node *) n; } - | connect_by_expr START WITH start_with_expr + | connect_by_expr START_WITH start_with_expr { StartWithClause *n = makeNode(StartWithClause); - n->startWithExpr = $4; + n->startWithExpr = $3; n->connectByExpr = $1; n->siblingsOrderBy = NULL; n->priorDirection = false; n->nocycle = false; $$ = (Node *) n; } - | CONNECT BY NOCYCLE a_expr START WITH start_with_expr + | CONNECT_BY NOCYCLE a_expr START_WITH start_with_expr { StartWithClause *n = makeNode(StartWithClause); - n->startWithExpr = $7; - n->connectByExpr = $4; + n->startWithExpr = $5; + n->connectByExpr = $3; n->siblingsOrderBy = NULL; n->priorDirection = false; n->nocycle = true; @@ -19038,7 +19560,7 @@ start_with_expr: ; connect_by_expr: - CONNECT BY a_expr + CONNECT_BY a_expr { #ifdef ENABLE_MULTIPLE_NODES const char* message = "START WITH CONNECT BY is not yet supported."; @@ -19046,7 +19568,7 @@ connect_by_expr: ereport(errstate, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("START WITH CONNECT BY is not yet supported."))); #endif - $$ = $3; + $$ = $2; } ; @@ -19073,6 +19595,22 @@ for_locking_item: n->forUpdate = TRUE; n->strength = LCS_FORUPDATE; n->noWait = $5; + n->waitSec = 0; + $$ = (Node *) n; + } + | FOR UPDATE hint_string locked_rels_list opt_wait + { + LockingClause *n = makeNode(LockingClause); + n->lockedRels = $4; + n->forUpdate = TRUE; + n->strength = LCS_FORUPDATE; + n->waitSec = $5; + /* When the delay time is 0, the processing is based on the nowait logic. */ + if (n->waitSec == 0) { + n->noWait = true; + } else { + n->noWait = false; + } $$ = (Node *) n; } | for_locking_strength locked_rels_list opt_nowait @@ -19085,6 +19623,7 @@ for_locking_item: n->forUpdate = true; } n->noWait = $3; + n->waitSec = 0; $$ = (Node *) n; } ; @@ -19174,7 +19713,7 @@ table_ref: relation_expr if (catlist->n_members == 0 && rel == NULL) { char message[MAXFNAMELEN]; int rc = sprintf_s(message, MAXFNAMELEN, "relation \"%s\" does not exist", r->relname); - securec_check_ss_c(rc, "", ""); + securec_check_ss(rc, "", ""); ReleaseSysCacheList(catlist); InsertErrorMessage(message, u_sess->plsql_cxt.plpgsql_yylloc, true); ereport(ERROR, @@ -19237,13 +19776,18 @@ table_ref: relation_expr $1->isbucket = true; $$ = (Node *)$1; } - | relation_expr PARTITION_FOR '(' maxValueList ')' + | relation_expr PARTITION_FOR '(' expr_list ')' { $1->partitionKeyValuesList = $4; $1->ispartition = true; $$ = (Node *)$1; } - + | relation_expr SUBPARTITION_FOR '(' expr_list ')' + { + $1->partitionKeyValuesList = $4; + $1->issubpartition = true; + $$ = (Node *)$1; + } | relation_expr PARTITION '(' name ')' alias_clause { $1->partitionname = $4; @@ -19251,24 +19795,27 @@ table_ref: relation_expr $1->ispartition = true; $$ = (Node *)$1; } - - | relation_expr SUBPARTITION '(' name ')' alias_clause - { - $1->subpartitionname = $4; - $1->alias = $6; - $1->issubpartition = true; - $$ = (Node *)$1; - } - - - | relation_expr PARTITION_FOR '(' maxValueList ')' alias_clause + | relation_expr SUBPARTITION '(' name ')' alias_clause + { + $1->subpartitionname = $4; + $1->alias = $6; + $1->issubpartition = true; + $$ = (Node *)$1; + } + | relation_expr PARTITION_FOR '(' expr_list ')' alias_clause { $1->partitionKeyValuesList = $4; $1->alias = $6; $1->ispartition = true; $$ = (Node *)$1; } - + | relation_expr SUBPARTITION_FOR '(' expr_list ')' alias_clause + { + $1->partitionKeyValuesList = $4; + $1->alias = $6; + $1->issubpartition = true; + $$ = (Node *)$1; + } | func_table { RangeFunction *n = makeNode(RangeFunction); @@ -19582,6 +20129,45 @@ relation_expr_opt_alias: relation_expr %prec UMINUS $1->alias = alias; $$ = $1; } + | relation_expr update_delete_partition_clause %prec UMINUS + { + if ($2 != NULL) { + $1->partitionname = $2->partitionname; + $1->ispartition = $2->ispartition; + $1->partitionKeyValuesList = $2->partitionKeyValuesList; + $1->subpartitionname = $2->subpartitionname; + $1->issubpartition = $2->issubpartition; + } + $$ = $1; + } + | relation_expr update_delete_partition_clause ColId + { + if ($2 != NULL) { + $1->partitionname = $2->partitionname; + $1->ispartition = $2->ispartition; + $1->partitionKeyValuesList = $2->partitionKeyValuesList; + $1->subpartitionname = $2->subpartitionname; + $1->issubpartition = $2->issubpartition; + } + Alias *alias = makeNode(Alias); + alias->aliasname = $3; + $1->alias = alias; + $$ = $1; + } + | relation_expr update_delete_partition_clause AS ColId + { + if ($2 != NULL) { + $1->partitionname = $2->partitionname; + $1->ispartition = $2->ispartition; + $1->partitionKeyValuesList = $2->partitionKeyValuesList; + $1->subpartitionname = $2->subpartitionname; + $1->issubpartition = $2->issubpartition; + } + Alias *alias = makeNode(Alias); + alias->aliasname = $4; + $1->alias = alias; + $$ = $1; + } ; /* @@ -20085,7 +20671,7 @@ character: CHARACTER opt_varying | CHAR_P opt_varying { $$ = (char *)($2 ? "varchar": "bpchar"); } | NVARCHAR - { $$ = "nvarchar2"; } + { $$ = "nvarchar2"; } | NVARCHAR2 { $$ = "nvarchar2"; } | VARCHAR @@ -20340,6 +20926,13 @@ client_logic_type: * it's factored out just to eliminate redundant coding. */ a_expr: c_expr { $$ = $1; } + | PRIOR '(' a_expr ')' + { + List *argList = list_make1($3); + FuncCall *funcNode = MakePriorAsFunc(); + funcNode->args = argList; + $$ = (Node *)funcNode; + } | a_expr TYPECAST Typename { $$ = makeTypeCast($1, $3, @2); } | a_expr COLLATE any_name @@ -20897,17 +21490,20 @@ b_expr: c_expr * inside parentheses, such as function arguments; that cannot introduce * ambiguity to the b_expr syntax. */ -c_expr: columnref { $$ = $1; } +c_expr: columnref %prec UMINUS { $$ = $1; } | AexprConst { $$ = $1; } - | PRIOR '(' c_expr ')' + | PRIOR '(' columnref ')' { - Node* n = $3; - AssertEreport(IsA(n, ColumnRef), - MOD_OPT, - "Inconsistent expression after PRIOR"); - ColumnRef *col = (ColumnRef *)$3; - col->prior = true; - $$ = n; + ColumnRef *col = (ColumnRef *)$3; + col->prior = true; + $$ = (Node *)col; + } + | PRIOR '(' c_expr ',' func_arg_list ')' + { + List* argList = list_concat(list_make1($3), $5); + FuncCall* funcNode = MakePriorAsFunc(); + funcNode->args = argList; + $$ = (Node *)funcNode; } | PRIOR columnref { @@ -22802,6 +23398,15 @@ target_el: a_expr AS ColLabel $$->indirection = NIL; $$->val = (Node *)$1; $$->location = @1; + + ColumnRef* cr = (ColumnRef*) $1; + /* PRIOR(x) in target list implies func call */ + if (IsA($1, ColumnRef) && cr->prior) { + FuncCall *fn = MakePriorAsFunc(); + cr->prior = false; + fn->args = list_make1(cr); + $$->val = (Node *)fn; + } } | '*' { @@ -23206,6 +23811,7 @@ unreserved_keyword: | CACHE | CALL | CALLED + | CANCELABLE | CASCADE | CASCADED | CATALOG_P @@ -23232,6 +23838,7 @@ unreserved_keyword: | COMPRESS | CONDITION | CONFIGURATION + | CONNECT | CONNECTION | CONSTANT | CONSTRAINTS @@ -23427,6 +24034,7 @@ unreserved_keyword: | OWNED | OWNER | PACKAGE + | PACKAGES | PARSER | PARTIAL %prec PARTIAL_EMPTY_PREC | PARTITION @@ -23450,6 +24058,7 @@ unreserved_keyword: | PREPARE | PREPARED | PRESERVE + | PRIOR | PRIVATE | PRIVILEGE | PRIVILEGES @@ -23517,6 +24126,7 @@ unreserved_keyword: | SHIPPABLE | SHOW | SHUTDOWN + | SIBLINGS | SIMPLE | SIZE | SKIP @@ -23529,6 +24139,7 @@ unreserved_keyword: | SPLIT | STABLE | STANDALONE_P + | START | STATEMENT | STATEMENT_ID | STATISTICS @@ -23591,6 +24202,7 @@ unreserved_keyword: | VERSION_P | VIEW | VOLATILE + | WAIT | WEAK | WHITESPACE_P | WITHIN @@ -23746,7 +24358,6 @@ reserved_keyword: | COLLATE | COLUMN | CONSTRAINT - | CONNECT | CREATE | CURRENT_CATALOG | CURRENT_DATE @@ -23796,7 +24407,6 @@ reserved_keyword: | PERFORMANCE | PLACING | PRIMARY - | PRIOR | PROCEDURE | REFERENCES | REJECT_P @@ -23804,9 +24414,7 @@ reserved_keyword: | ROWNUM | SELECT | SESSION_USER - | SIBLINGS | SOME - | START | SYMMETRIC | SYSDATE | TABLE @@ -24173,17 +24781,26 @@ check_indirection(List *indirection, core_yyscan_t yyscanner) static List * extractArgTypes(List *parameters) { - List *result = NIL; - ListCell *i; + List *result = NIL; + ListCell *i; - foreach(i, parameters) - { - FunctionParameter *p = (FunctionParameter *) lfirst(i); - - if (p->mode != FUNC_PARAM_OUT && p->mode != FUNC_PARAM_TABLE) - result = lappend(result, p->argType); - } - return result; + foreach(i, parameters) + { + FunctionParameter *p = (FunctionParameter *) lfirst(i); +#ifndef ENABLE_MULTIPLE_NODES + if ((p->mode == FUNC_PARAM_OUT && enable_out_param_override()) + || p->mode == FUNC_PARAM_IN + || p->mode == FUNC_PARAM_INOUT + || p->mode == FUNC_PARAM_VARIADIC) { +#else + if (p->mode == FUNC_PARAM_IN + || p->mode == FUNC_PARAM_INOUT + || p->mode == FUNC_PARAM_VARIADIC) { +#endif + result = lappend(result, p->argType); + } + } + return result; } /* insertSelectOptions() @@ -25795,6 +26412,21 @@ static void RemoveFillerCol(List *filler_list, List *col_list) return; } +static FuncCall* MakePriorAsFunc() +{ + List *funcName = list_make1(makeString("prior")); + FuncCall *n = makeNode(FuncCall); + n->funcname = funcName; + n->args = NIL; + n->agg_order = NIL; + n->agg_star = FALSE; + n->agg_distinct = FALSE; + n->func_variadic = FALSE; + n->over = NULL; + n->call_func = false; + return n; +} + /* * Must undefine this stuff before including scan.c, since it has different * definitions for these macros. @@ -25804,4 +26436,5 @@ static void RemoveFillerCol(List *filler_list, List *col_list) #undef yylloc #undef yylex +#undef yylex #include "scan.inc" diff --git a/src/common/backend/parser/hint_gram.y b/src/common/backend/parser/hint_gram.y index 2ef455612..d23662c1d 100755 --- a/src/common/backend/parser/hint_gram.y +++ b/src/common/backend/parser/hint_gram.y @@ -51,15 +51,16 @@ static double convert_to_numeric(Node *value); %type join_hint_item join_order_hint join_method_hint stream_hint row_hint scan_hint skew_hint expr_const - pred_push_hint rewrite_hint gather_hint set_hint plancache_hint guc_value no_expand_hint no_gpc_hint + pred_push_hint pred_push_same_level_hint rewrite_hint gather_hint set_hint plancache_hint guc_value no_expand_hint + no_gpc_hint %type relation_list join_hint_list relation_item relation_list_with_p ident_list skew_relist column_list_p column_list value_list_p value_list value_list_item value_type value_list_with_bracket %token IDENT FCONST SCONST BCONST XCONST %token ICONST %token NestLoop_P MergeJoin_P HashJoin_P No_P Leading_P Rows_P Broadcast_P Redistribute_P BlockName_P - TableScan_P IndexScan_P IndexOnlyScan_P Skew_P HINT_MULTI_NODE_P NULL_P TRUE_P FALSE_P Predpush_P Rewrite_P - Gather_P Set_P USE_CPLAN_P USE_GPLAN_P ON_P OFF_P No_expand_P NO_GPC_P + TableScan_P IndexScan_P IndexOnlyScan_P Skew_P HINT_MULTI_NODE_P NULL_P TRUE_P FALSE_P Predpush_P + PredpushSameLevel_P Rewrite_P Gather_P Set_P USE_CPLAN_P USE_GPLAN_P ON_P OFF_P No_expand_P NO_GPC_P %nonassoc IDENT NULL_P @@ -131,6 +132,10 @@ join_hint_item: $$ = (Node *) multi_node_hint; } | pred_push_hint + { + $$ = $1; + } + | pred_push_same_level_hint { $$ = $1; } @@ -305,6 +310,20 @@ pred_push_hint: $$ = (Node *) predpushHint; } +pred_push_same_level_hint: + PredpushSameLevel_P '(' ident_list ',' IDENT ')' + { + PredpushSameLevelHint *predpushSameLevelHint = makeNode(PredpushSameLevelHint); + predpushSameLevelHint->base.relnames = $3; + predpushSameLevelHint->base.hint_keyword = HINT_KEYWORD_PREDPUSH_SAME_LEVEL; + predpushSameLevelHint->base.state = HINT_STATE_NOTUSED; + predpushSameLevelHint->dest_name = $5; + predpushSameLevelHint->dest_id = 0; + predpushSameLevelHint->candidates = NULL; + predpushSameLevelHint->negative = false; + $$ = (Node *) predpushSameLevelHint; + } + join_order_hint: Leading_P '(' relation_list_with_p ')' { diff --git a/src/common/backend/parser/hint_scan.l b/src/common/backend/parser/hint_scan.l index 2c38083e9..efe2e80fb 100755 --- a/src/common/backend/parser/hint_scan.l +++ b/src/common/backend/parser/hint_scan.l @@ -41,6 +41,7 @@ static const hintKeyword parsers[] = {HINT_TRUE, TRUE_P}, {HINT_FALSE, FALSE_P}, {HINT_PRED_PUSH, Predpush_P}, + {HINT_PRED_PUSH_SAME_LEVEL, PredpushSameLevel_P}, {HINT_REWRITE, Rewrite_P}, {HINT_GATHER, Gather_P}, {HINT_SET, Set_P}, diff --git a/src/common/backend/parser/parse_clause.cpp b/src/common/backend/parser/parse_clause.cpp index 5e60326fb..76fd8f9e5 100644 --- a/src/common/backend/parser/parse_clause.cpp +++ b/src/common/backend/parser/parse_clause.cpp @@ -46,6 +46,7 @@ #include "storage/tcap.h" #include "utils/guc.h" #include "utils/lsyscache.h" +#include "utils/partitionkey.h" #include "utils/rel.h" #include "utils/rel_gs.h" #include "utils/syscache.h" @@ -219,6 +220,19 @@ int setTargetTable(ParseState* pstate, RangeVar* relation, bool inh, bool alsoSo * Now build an RTE. */ rte = addRangeTableEntryForRelation(pstate, pstate->p_target_relation, relation->alias, inh, false); + + /* IUD contain partition. */ + if (relation->ispartition) { + rte->isContainPartition = true; + rte->partitionOid = getPartitionOidForRTE(rte, relation, pstate, pstate->p_target_relation); + } + /* IUD contain subpartition. */ + if (relation->issubpartition) { + rte->isContainSubPartition = true; + rte->subpartitionOid = + GetSubPartitionOidForRTE(rte, relation, pstate, pstate->p_target_relation, &rte->partitionOid); + } + pstate->p_target_rangetblentry = rte; /* assume new rte is at end */ diff --git a/src/common/backend/parser/parse_compatibility.cpp b/src/common/backend/parser/parse_compatibility.cpp index 31f2f5bf9..f51ba92f3 100644 --- a/src/common/backend/parser/parse_compatibility.cpp +++ b/src/common/backend/parser/parse_compatibility.cpp @@ -143,7 +143,11 @@ static void insert_jointerm(OperatorPlusProcessContext* ctx, Expr* expr, RangeTb ListCell* lc = NULL; JoinTerm* jterm = NULL; +#ifdef ENABLE_MULTIPLE_NODES + Assert(IsA(expr, A_Expr)); +#else Assert(IsA(expr, A_Expr) || IsA(expr, NullTest)); +#endif /* lrte is the RTE with operator "(+)", it couldn't be NULL */ Assert(lrte != NULL); @@ -567,8 +571,13 @@ bool plus_outerjoin_precheck(const OperatorPlusProcessContext* ctx, Node* expr, return false; } +#ifdef ENABLE_MULTIPLE_NODES + /* Only support A_Expr with "(+)" for now */ + if (list_length(lhasplus) && !IsA(expr, A_Expr)) { +#else /* Only support A_Expr and NullTest with "(+)" for now */ if (list_length(lhasplus) && !IsA(expr, A_Expr) && !IsA(expr, NullTest)) { +#endif ereport( ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("Operator \"(+)\" can only be used in common expression."))); } diff --git a/src/common/backend/parser/parse_expr.cpp b/src/common/backend/parser/parse_expr.cpp index 7b77c3343..00589084f 100644 --- a/src/common/backend/parser/parse_expr.cpp +++ b/src/common/backend/parser/parse_expr.cpp @@ -5,6 +5,7 @@ * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2021, openGauss Contributors * * * IDENTIFICATION @@ -693,6 +694,10 @@ Node* transformColumnRef(ParseState* pstate, ColumnRef* cref) return node; } } + if (pstate->p_bind_describe_hook != NULL) { + node = (*pstate->p_bind_describe_hook)(pstate, cref); + return node; + } } } break; @@ -977,7 +982,7 @@ static bool isCol2Function(List* fields) catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(funcname)); #endif for (int i = 0; i < catlist->n_members; i++) { - HeapTuple proctup = &catlist->members[i]->tuple; + HeapTuple proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_proc procform = (Form_pg_proc)GETSTRUCT(proctup); /* get function all args */ Oid *p_argtypes = NULL; @@ -1412,15 +1417,18 @@ static bool NeedExtractOutParam(FuncCall* fn, Node* result) * When proc_outparam_override is on, extract all but select func */ FuncExpr* funcexpr = (FuncExpr*)result; + if (is_function_with_plpgsql_language_and_outparam(funcexpr->funcid) && !fn->call_func) { + return true; + } char prokind = get_func_prokind(funcexpr->funcid); if (!PROC_IS_PRO(prokind) && !fn->call_func) { return false; } - - if (enable_out_param_override()) { - return true; - } +#ifndef ENABLE_MULTIPLE_NODES + return enable_out_param_override(); +#else return false; +#endif } static Node* transformFuncCall(ParseState* pstate, FuncCall* fn) @@ -2267,8 +2275,8 @@ static Node* transformCurrentOfExpr(ParseState* pstate, CurrentOfExpr* cexpr) } // Locate in the system catalog the information for a model name -static char* select_prediction_function(Model* model){ - +static char* select_prediction_function(const Model* model){ + char* result; switch(model->return_type){ case BOOLOID: @@ -2280,6 +2288,9 @@ static char* select_prediction_function(Model* model){ case FLOAT8OID: result = "db4ai_predict_by_float8"; break; + case FLOAT8ARRAYOID: + result = "db4ai_predict_by_float8_array"; + break; case INT1OID: case INT2OID: case INT4OID: @@ -2300,7 +2311,7 @@ static char* select_prediction_function(Model* model){ default: ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Cannot trigger prediction for model with oid %d", model->return_type))); + errmsg("Cannot trigger prediction for model with oid %u", model->return_type))); result = NULL; break; } @@ -2321,7 +2332,7 @@ static Node* transformPredictByFunction(ParseState* pstate, PredictByFunction* p errmsg("Model name for prediction cannot be null"))); } - Model* model = get_model(p->model_name, true); + const Model* model = get_model(p->model_name, true); if (model == NULL) { ereport(ERROR, (errmsg( "No model found with name %s", p->model_name))); diff --git a/src/common/backend/parser/parse_func.cpp b/src/common/backend/parser/parse_func.cpp index 73de82e59..b2c595164 100644 --- a/src/common/backend/parser/parse_func.cpp +++ b/src/common/backend/parser/parse_func.cpp @@ -1398,33 +1398,67 @@ FuncCandidateList func_select_candidate(int nargs, Oid* input_typeids, FuncCandi if (ncandidates == 1) return candidates; -#ifndef ENABLE_MULTIPLE_NODES - Oid caller_pkg_oid = InvalidOid; - if (u_sess->plsql_cxt.curr_compile_context != NULL && - u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package != NULL) { - caller_pkg_oid = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid; - } else { - caller_pkg_oid = u_sess->plsql_cxt.running_pkg_oid; - } - nbestMatch = 0; - ncandidates = 0; - last_candidate = NULL; - for (current_candidate = candidates; current_candidate != NULL; current_candidate = current_candidate->next) { - nmatch = 0; - if (current_candidate->packageOid == caller_pkg_oid) { - nmatch++; - } - keep_candidate(nmatch, nbestMatch, current_candidate, last_candidate, candidates, ncandidates); - } - if (last_candidate) /* terminate rebuilt list */ - last_candidate->next = NULL; - if (ncandidates == 1) - return candidates; -#endif return NULL; /* failed to select a best candidate */ } /* func_select_candidate() */ + +/* + * sort_candidate_func_list + * + * sort the candidate functions by function's all param num. + */ +FuncCandidateList sort_candidate_func_list(FuncCandidateList oldCandidates) +{ + if (oldCandidates == NULL || oldCandidates->next == NULL) { + return oldCandidates; + } + + FuncCandidateList cur = oldCandidates; + int size = 0; + while (cur) { + size++; + cur = cur->next; + } + + cur = oldCandidates; + FuncCandidateList* candidates = (FuncCandidateList*)palloc0(sizeof(FuncCandidateList) * size); + int index = 0; + while (cur) { + candidates[index++] = cur; + cur = cur->next; + } + + FuncCandidateList sortedCandidates = NULL; + FuncCandidateList lastCandidate = NULL; + for (int i = 0; i < size; i++) { + if (candidates[i] == NULL) { + continue; + } + int smallestIndex = i; + for (int j = 0; j < size; j++) { + FuncCandidateList cur2 = candidates[j]; + if (cur2 != NULL && candidates[smallestIndex]->allArgNum > cur2->allArgNum) { + smallestIndex = j; + } + } + + FuncCandidateList smallest = candidates[smallestIndex]; + if (lastCandidate == NULL) { + lastCandidate = smallest; + sortedCandidates = smallest; + } else { + lastCandidate->next = smallest; + lastCandidate = lastCandidate->next; + smallest->next = NULL; + } + candidates[smallestIndex] = NULL; + } + + pfree(candidates); + return sortedCandidates; +} + /* func_get_detail() * * Find the named function in the system catalogs. @@ -1510,6 +1544,8 @@ FuncDetailCode func_get_detail(List* funcname, List* fargs, List* fargnames, int } } } + + raw_candidates = sort_candidate_func_list(raw_candidates); #else /* Get list of possible candidates from namespace search */ raw_candidates = FuncnameGetCandidates(funcname, nargs, fargnames, expand_variadic, expand_defaults, false); @@ -1533,6 +1569,7 @@ FuncDetailCode func_get_detail(List* funcname, List* fargs, List* fargnames, int } #endif + /* * Quickly check if there is an exact match to the input datatypes (there * can be only one) @@ -1969,7 +2006,7 @@ Oid LookupFuncName(List* funcname, int nargs, const Oid* argtypes, bool noError) clist->args[i] = cl_get_input_param_original_type(clist->oid, i); } } - if (memcmp(argtypes, clist->args, nargs * sizeof(Oid)) == 0) + if (memcmp(argtypes, clist->args, nargs * sizeof(Oid)) == 0 && OidIsValid(clist->oid)) return clist->oid; clist = clist->next; } diff --git a/src/common/backend/parser/parse_hint.cpp b/src/common/backend/parser/parse_hint.cpp index f33cc4927..a531309d7 100755 --- a/src/common/backend/parser/parse_hint.cpp +++ b/src/common/backend/parser/parse_hint.cpp @@ -92,6 +92,7 @@ static void drop_duplicate_gather_hint(PlannerInfo* root, HintState* hstate); static void drop_duplicate_scan_hint(PlannerInfo* root, HintState* hstate); static void drop_duplicate_skew_hint(PlannerInfo* root, HintState* hstate); static void drop_duplicate_predpush_hint(PlannerInfo* root, HintState* hstate); +static void drop_duplicate_predpush_same_level_hint(PlannerInfo* root, HintState* hstate); static int find_relid_aliasname(Query* parse, const char* aliasname, bool find_in_rtable = false); static Relids create_bms_of_relids( PlannerInfo* root, Query* parse, Hint* hint, List* relnamelist = NIL, Relids currelids = NULL); @@ -184,8 +185,8 @@ static void append_value(StringInfo buf, Value* value, Node* node) } } -#define HINT_NUM 15 -#define HINT_KEYWORD_NUM 20 +#define HINT_NUM 16 +#define HINT_KEYWORD_NUM 21 typedef struct { HintKeyword keyword; @@ -206,6 +207,7 @@ const char* G_HINT_KEYWORD[HINT_KEYWORD_NUM] = { (char*) HINT_INDEXONLYSCAN, (char*) HINT_SKEW, (char*) HINT_PRED_PUSH, + (char*) HINT_PRED_PUSH_SAME_LEVEL, (char*) HINT_REWRITE, (char*) HINT_GATHER, (char*) HINT_NO_EXPAND, @@ -298,6 +300,32 @@ Relids predpush_candidates_same_level(PlannerInfo *root) return result; } +/* + * is_predpush_same_level_matched + * Check if the predpush samwe level is matched. + * @param predpush same level hint, relids from baserel, param path info. + * @param check_dest: don't check dest id when create paths. + * @return true if matched. + */ +bool is_predpush_same_level_matched(PredpushSameLevelHint* hint, Relids relids, ParamPathInfo* ppi) +{ + if (ppi == NULL) { + return false; + } + if (hint->dest_id == 0 || hint->candidates == NULL) { + return false; + } + + if (!bms_is_member(hint->dest_id, relids)) { + return false; + } + + if (!bms_equal(ppi->ppi_req_outer, hint->candidates)) { + return false; + } + return true; +} + /* * @Description: get the prompts for no_gpc hint into subquery. * @in hint: subquery no_gpc hint. @@ -380,6 +408,35 @@ static void PredpushHintDesc(PredpushHint* hint, StringInfo buf) appendStringInfoString(buf, ")"); } +/* + * @Description: get the prompts for predicate pushdown same level. + * @in hint: predicate pushdown same level hint. + * @out buf: String buf. + */ +static void PredpushSameLevelHintDesc(PredpushSameLevelHint* hint, StringInfo buf) +{ + Hint base_hint = hint->base; + + Assert(buf != NULL); + + appendStringInfo(buf, " %s(", KeywordDesc(hint->base.hint_keyword)); + + if (hint->candidates != NULL) { + appendStringInfo(buf, "("); + } + + relnamesToBuf(base_hint.relnames, buf); + if (hint->dest_name != NULL) { + appendStringInfo(buf, ", %s", hint->dest_name); + } + + if (hint->candidates != NULL) { + appendStringInfo(buf, ")"); + } + + appendStringInfoString(buf, ")"); +} + /* * @Description: get the prompts for rewrite hint into subquery. * @in hint: rewrite hint. @@ -712,6 +769,9 @@ char* descHint(Hint* hint) case T_PredpushHint: PredpushHintDesc((PredpushHint*)hint, &str); break; + case T_PredpushSameLevelHint: + PredpushSameLevelHintDesc((PredpushSameLevelHint*)hint, &str); + break; case T_RewriteHint: RewriteHintDesc((RewriteHint*)hint, &str); break; @@ -775,6 +835,7 @@ void desc_hint_in_state(PlannerInfo* root, HintState* hstate) find_unused_hint_to_buf(hstate->block_name_hint, &str_buf); find_unused_hint_to_buf(hstate->set_hint, &str_buf); find_unused_hint_to_buf(hstate->no_gpc_hint, &str_buf); + find_unused_hint_to_buf(hstate->predpush_same_level_hint, &str_buf); /* for skew hint */ ListCell* lc = NULL; @@ -810,6 +871,21 @@ static void PredpushHintDelete(PredpushHint* hint) pfree_ext(hint); } +/* + * @Description: Delete predicate pushdown same level, free memory. + * @in hint: predicate pushdown same level hint. + */ +static void PredpushSameLevelHintDelete(PredpushSameLevelHint* hint) +{ + if (hint == NULL) + return; + + HINT_FREE_RELNAMES(hint); + + bms_free(hint->candidates); + pfree_ext(hint); +} + /* * @Description: Delete join method hint, free memory. * @in hint: Join hint. @@ -1023,6 +1099,9 @@ void hintDelete(Hint* hint) case T_PredpushHint: PredpushHintDelete((PredpushHint*)hint); break; + case T_PredpushSameLevelHint: + PredpushSameLevelHintDelete((PredpushSameLevelHint*)hint); + break; case T_RewriteHint: RewriteHintDelete((RewriteHint*)hint); break; @@ -1095,6 +1174,11 @@ void HintStateDelete(HintState* hintState) PredpushHint* hint = (PredpushHint*)lfirst(lc); PredpushHintDelete(hint); } + + foreach (lc, hintState->predpush_same_level_hint) { + PredpushSameLevelHint* hint = (PredpushSameLevelHint*)lfirst(lc); + PredpushSameLevelHintDelete(hint); + } } /* @@ -1117,6 +1201,7 @@ HintState* HintStateCreate() hstate->hint_warning = NIL; hstate->multi_node_hint = false; hstate->predpush_hint = NIL; + hstate->predpush_same_level_hint = NIL; hstate->rewrite_hint = NIL; hstate->gather_hint = NIL; hstate->set_hint = NIL; @@ -1256,6 +1341,11 @@ static void AddPredpushHint(HintState* hstate, Hint* hint) hstate->predpush_hint = lappend(hstate->predpush_hint, hint); } +static void AddPredpushSameLevelHint(HintState* hstate, Hint* hint) +{ + hstate->predpush_same_level_hint = lappend(hstate->predpush_same_level_hint, hint); +} + static void AddRewriteHint(HintState* hstate, Hint* hint) { hstate->rewrite_hint = lappend(hstate->rewrite_hint, hint); @@ -1303,6 +1393,7 @@ const AddHintFunc G_HINT_CREATOR[HINT_NUM] = { AddScanMethodHint, AddMultiNodeHint, AddPredpushHint, + AddPredpushSameLevelHint, AddSkewHint, AddRewriteHint, AddGatherHint, @@ -1454,7 +1545,7 @@ static Relids create_bms_of_relids(PlannerInfo* root, Query* parse, Hint* hint, /* For skew hint we will found relation from parse`s rtable. */ bool find_in_rtable = false; - if (IsA(hint, SkewHint) || IsA(hint, PredpushHint)) { + if (IsA(hint, SkewHint) || IsA(hint, PredpushHint) || IsA(hint, PredpushSameLevelHint)) { find_in_rtable = true; } @@ -1549,6 +1640,9 @@ static List* set_hint_relids(PlannerInfo* root, Query* parse, List* l) case T_PredpushHint: ((PredpushHint*)hint)->candidates = relids; break; + case T_PredpushSameLevelHint: + ((PredpushSameLevelHint*)hint)->candidates = relids; + break; default: break; } @@ -1972,6 +2066,38 @@ static void drop_duplicate_predpush_hint(PlannerInfo* root, HintState* hstate) } } +/* + * @Description: Delete duplicate predpush same level hint. + * @in hstate: Hint state. + */ +static void drop_duplicate_predpush_same_level_hint(PlannerInfo* root, HintState* hstate) +{ + bool hasError = false; + ListCell* lc = NULL; + + foreach (lc, hstate->predpush_same_level_hint) { + PredpushSameLevelHint* predpushSameLevelHint = (PredpushSameLevelHint*)lfirst(lc); + + if (predpushSameLevelHint->base.state != HINT_STATE_DUPLICATION) { + ListCell* lc_next = lnext(lc); + while (lc_next != NULL) { + PredpushSameLevelHint* predpush_same_level_hint = (PredpushSameLevelHint*)lfirst(lc_next); + + if (predpushSameLevelHint->dest_id == predpush_same_level_hint->dest_id) { + predpush_same_level_hint->base.state = HINT_STATE_DUPLICATION; + hasError = true; + } + + lc_next = lnext(lc_next); + } + } + } + + if (hasError) { + hstate->predpush_same_level_hint = delete_invalid_hint(root, hstate, hstate->predpush_same_level_hint); + } +} + /* * @Description: Delete duplicate rewrite hint. * @in hstate: Hint state. @@ -3204,7 +3330,7 @@ static void transform_skew_hint(PlannerInfo* root, Query* parse, List* skew_hint * transfrom subquery name * @in root: query level info. * @in parse: parse tree. - * @in skew_hint_list: SkewHint list. + * @in predpush_hint_list: predpush hint list. */ static void transform_predpush_hint(PlannerInfo* root, Query* parse, List* predpush_hint_list) { @@ -3229,6 +3355,37 @@ static void transform_predpush_hint(PlannerInfo* root, Query* parse, List* predp return; } +/* + * @Description: Transform predpush same level hint into processible type, including: + * transfrom destination name + * @in root: query level info. + * @in parse: parse tree. + * @in predpush_join_hint_list: predpush same level hint list. + */ +static void transform_predpush_same_level_hint(PlannerInfo* root, Query* parse, List* predpush_same_level_hint_list) +{ + if (predpush_same_level_hint_list == NIL) { + return; + } + + ListCell* lc = NULL; + foreach (lc, predpush_same_level_hint_list) { + PredpushSameLevelHint* predpush_same_level_hint = (PredpushSameLevelHint*)lfirst(lc); + if (predpush_same_level_hint->dest_name == NULL) { + continue; + } + + int relid = find_relid_aliasname(parse, predpush_same_level_hint->dest_name, true); + if (relid <= NOTFOUNDRELNAME) { + continue; + } + + predpush_same_level_hint->dest_id = relid; + } + + return; +} + /* * @Description: Transform rewrite hint into bitmap. * @in root: query level info. @@ -3392,11 +3549,13 @@ void transform_hints(PlannerInfo* root, Query* parse, HintState* hstate) hstate->scan_hint = set_hint_relids(root, parse, hstate->scan_hint); hstate->skew_hint = set_hint_relids(root, parse, hstate->skew_hint); hstate->predpush_hint = set_hint_relids(root, parse, hstate->predpush_hint); + hstate->predpush_same_level_hint = set_hint_relids(root, parse, hstate->predpush_same_level_hint); transform_leading_hint(root, parse, hstate); /* Transform predpush hint, for subquery name */ transform_predpush_hint(root, parse, hstate->predpush_hint); + transform_predpush_same_level_hint(root, parse, hstate->predpush_same_level_hint); transform_rewrite_hint(root, parse, hstate->rewrite_hint); @@ -3407,6 +3566,7 @@ void transform_hints(PlannerInfo* root, Query* parse, HintState* hstate) drop_duplicate_scan_hint(root, hstate); drop_duplicate_skew_hint(root, hstate); drop_duplicate_predpush_hint(root, hstate); + drop_duplicate_predpush_same_level_hint(root, hstate); drop_duplicate_rewrite_hint(root, hstate); drop_duplicate_gather_hint(root, hstate); @@ -3582,7 +3742,7 @@ bool permit_predpush(PlannerInfo *root) return !predpushHint->negative; } -const unsigned int G_NUM_SET_HINT_WHITE_LIST = 32; +const unsigned int G_NUM_SET_HINT_WHITE_LIST = 33; const char* G_SET_HINT_WHITE_LIST[G_NUM_SET_HINT_WHITE_LIST] = { /* keep in the ascending alphabetical order of frequency */ (char*)"best_agg_plan", @@ -3616,7 +3776,8 @@ const char* G_SET_HINT_WHITE_LIST[G_NUM_SET_HINT_WHITE_LIST] = { (char*)"node_name", (char*)"query_dop", (char*)"random_page_cost", - (char*)"seq_page_cost"}; + (char*)"seq_page_cost", + (char*)"try_vector_engine_strategy"}; static int param_str_cmp(const void *s1, const void *s2) { diff --git a/src/common/backend/parser/parse_merge.cpp b/src/common/backend/parser/parse_merge.cpp index 22d6c1a76..f7d961eff 100644 --- a/src/common/backend/parser/parse_merge.cpp +++ b/src/common/backend/parser/parse_merge.cpp @@ -49,9 +49,6 @@ static List* transformUpdateTargetList(ParseState* pstate, List* origTlist); static void checkUpdateOnJoinKey( ParseState* pstate, MergeWhenClause* clause, List* join_var_list, bool is_insert_update); static void checkUpdateOnDistributeKey(RangeTblEntry* rte, List* targetlist); -static bool contain_subquery(Node* clause); -static bool contain_subquery_walker(Node* node, void* context); -static void check_sublink_in_action(List* mergeActionList, bool is_insert_update); static void check_source_table_replicated(Node* source_relation); static void check_target_table_columns(ParseState* pstate, bool is_insert_update); static bool checkTargetTableReplicated(RangeTblEntry* rte); @@ -820,13 +817,6 @@ static void checkUnsupportedCases(ParseState* pstate, MergeStmt* stmt) (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Target relation type is not supported for %s", stmt->is_insert_update ? "INSERT ... ON DUPLICATE KEY UPDATE" : "MERGE INTO"))); - - if (RelationIsSubPartitioned(pstate->p_target_relation)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Subpartition is not supported for %s", - stmt->is_insert_update ? "INSERT ... ON DUPLICATE KEY UPDATE" : "MERGE INTO"))); - } } static Query* tryTransformMergeInsertStmt(ParseState* pstate, MergeStmt* stmt) @@ -846,6 +836,80 @@ static Query* tryTransformMergeInsertStmt(ParseState* pstate, MergeStmt* stmt) free_parsestate(insert_pstate); return insert_query; } +#ifdef ENABLE_MULTIPLE_NODES +static bool contain_subquery_walker(Node* node, void* context) +{ + if (node == NULL) { + return false; + } + if (IsA(node, SubLink)) { + return true; + } + return expression_tree_walker(node, (bool (*)())contain_subquery_walker, (void*)context); +} + +static bool contain_subquery(Node* clause) +{ + return contain_subquery_walker(clause, NULL); +} + +/* + * cannot have Subquery in action's qual and targetlist + * report error if we found any. + */ +static void check_sublink_in_action(List* mergeActionList, bool is_insert_update) +{ + ListCell* lc = NULL; + /* check action's qual and target list */ + foreach (lc, mergeActionList) { + MergeAction* action = (MergeAction*)lfirst(lc); + if (contain_subquery((Node*)action->qual)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Subquery in WHERE clauses are not yet supported for %s", + is_insert_update ? "INSERT ... ON DUPLICATE KEY UPDATE" : "MERGE INTO"))); + } + if (contain_subquery((Node*)action->targetList)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Subquery in INSERT/UPDATE clauses are not yet supported for %s", + is_insert_update ? "INSERT ... ON DUPLICATE KEY UPDATE" : "MERGE INTO"))); + } + } +} +#endif + +/* + * get current namespace for columns in the range of relation, subquery + * param idx is the index in RTE collection only including relation, subquery + */ +List* get_varnamespace(ParseState* pstate, int idx = 0) +{ + List* result = NULL; + ListCell* lc = NULL; + int cur_idx = 1; + foreach (lc, pstate->p_rtable) { + RangeTblEntry* rte = (RangeTblEntry*)lfirst(lc); + if (rte->rtekind != RTE_RELATION && rte->rtekind != RTE_SUBQUERY) { + continue; + } + + if (idx <= 0) { // if id is a non-positive integer, all RTEs are obtained. + ParseNamespaceItem* tmp = makeNamespaceItem(rte, false, true); + result = lappend(result, tmp); + cur_idx += 1; + continue; + } + + if (cur_idx == idx) { // if the value of id is valid, the specified RTE will be obtained + ParseNamespaceItem* tmp = makeNamespaceItem(rte, false, true); + result = lappend(result, tmp); + break; + } + cur_idx += 1; + } + return result; +} /* * transformMergeStmt - @@ -1089,8 +1153,17 @@ Query* transformMergeStmt(ParseState* pstate, MergeStmt* stmt) List* icolumns = NIL; List* attrnos = NIL; List* save_relnamespace = NIL; + List* save_varnamespace = NIL; RangeTblEntry* sourceRelRTE = NULL; + /* + * For MERGE INTO type SQL, in the RTE list, the object with index 1 is the target table, and the + * object with index 2 is the source table (the index starts from 1). + * For the insert scenario, var namespace needs to be specified as the source table. + */ + const unsigned int merge_source_rte_index = 2; + List* tmp_varnamespace = get_varnamespace(pstate, merge_source_rte_index); + /* * Assume that the top-level join RTE is at the end. * The source relation is just before that. @@ -1102,8 +1175,9 @@ Query* transformMergeStmt(ParseState* pstate, MergeStmt* stmt) * source relation. */ save_relnamespace = pstate->p_relnamespace; + save_varnamespace = pstate->p_varnamespace; pstate->p_relnamespace = list_make1(makeNamespaceItem(sourceRelRTE, false, true)); - + pstate->p_varnamespace = tmp_varnamespace; /* * Transform the when condition. * @@ -1112,6 +1186,7 @@ Query* transformMergeStmt(ParseState* pstate, MergeStmt* stmt) * WHEN MATCHED or WHEN NOT MATCHED actions to execute. */ action->qual = transformWhereClause(pstate, mergeWhenClause->condition, "WHEN"); + pstate->p_varnamespace = save_varnamespace; pstate->p_is_insert = true; @@ -1202,6 +1277,12 @@ Query* transformMergeStmt(ParseState* pstate, MergeStmt* stmt) case CMD_UPDATE: { List* set_clause_list_copy = mergeWhenClause->targetList; + List* save_varnamespace = NIL; + List* tmp_varnamespace = get_varnamespace(pstate); + save_varnamespace = pstate->p_varnamespace; + pstate->p_varnamespace = tmp_varnamespace; + pstate->use_level = true; + /* * Transform the when condition. * @@ -1210,6 +1291,8 @@ Query* transformMergeStmt(ParseState* pstate, MergeStmt* stmt) * WHEN MATCHED or WHEN NOT MATCHED actions to execute. */ action->qual = transformWhereClause(pstate, mergeWhenClause->condition, "WHEN"); + pstate->p_varnamespace = save_varnamespace; + pstate->use_level = false; fixResTargetListWithTableNameRef(pstate->p_target_relation, stmt->relation, set_clause_list_copy); mergeWhenClause->targetList = set_clause_list_copy; @@ -1227,10 +1310,10 @@ Query* transformMergeStmt(ParseState* pstate, MergeStmt* stmt) mergeActionList = lappend(mergeActionList, action); } - +#ifdef ENABLE_MULTIPLE_NODES /* we don't support subqueries in action */ check_sublink_in_action(mergeActionList, stmt->is_insert_update); - +#endif /* cannot reference system column */ check_system_column_reference(join_var_list, mergeActionList, stmt->is_insert_update); @@ -1625,48 +1708,6 @@ static void checkUpdateOnDistributeKey(RangeTblEntry* rte, List* targetlist) } } -static bool contain_subquery(Node* clause) -{ - return contain_subquery_walker(clause, NULL); -} - -static bool contain_subquery_walker(Node* node, void* context) -{ - if (node == NULL) { - return false; - } - if (IsA(node, SubLink)) { - return true; - } - return expression_tree_walker(node, (bool (*)())contain_subquery_walker, (void*)context); -} - -/* - * cannot have Subquery in action's qual and targetlist - * report error if we found any. - */ -static void check_sublink_in_action(List* mergeActionList, bool is_insert_update) -{ - ListCell* lc = NULL; - /* check action's qual and target list */ - foreach (lc, mergeActionList) { - MergeAction* action = (MergeAction*)lfirst(lc); - - if (contain_subquery((Node*)action->qual)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Subquery in WHERE clauses are not yet supported for %s", - is_insert_update ? "INSERT ... ON DUPLICATE KEY UPDATE" : "MERGE INTO"))); - } - if (contain_subquery((Node*)action->targetList)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Subquery in INSERT/UPDATE clauses are not yet supported for %s", - is_insert_update ? "INSERT ... ON DUPLICATE KEY UPDATE" : "MERGE INTO"))); - } - } -} - /* * cannot reference system column in * 1. on clause diff --git a/src/common/backend/parser/parse_node.cpp b/src/common/backend/parser/parse_node.cpp index c38b85e4a..3430ad52e 100644 --- a/src/common/backend/parser/parse_node.cpp +++ b/src/common/backend/parser/parse_node.cpp @@ -70,6 +70,10 @@ ParseState* make_parsestate(ParseState* parentParseState) pstate->p_create_proc_operator_hook = parentParseState->p_create_proc_operator_hook; pstate->p_create_proc_insert_hook = parentParseState->p_create_proc_insert_hook; pstate->p_cl_hook_state = parentParseState->p_cl_hook_state; + pstate->p_bind_variable_columnref_hook = parentParseState->p_bind_variable_columnref_hook; + pstate->p_bind_hook_state = parentParseState->p_bind_hook_state; + pstate->p_bind_describe_hook = parentParseState->p_bind_describe_hook; + pstate->p_describeco_hook_state = parentParseState->p_describeco_hook_state; } return pstate; diff --git a/src/common/backend/parser/parse_oper.cpp b/src/common/backend/parser/parse_oper.cpp index 19acb9dc7..cf35bc263 100644 --- a/src/common/backend/parser/parse_oper.cpp +++ b/src/common/backend/parser/parse_oper.cpp @@ -963,8 +963,8 @@ static Oid find_oper_cache_entry(OprCacheKey* key) hash_create("Operator lookup cache", 256, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); /* Arrange to flush cache on pg_operator and pg_cast changes */ - CacheRegisterSyscacheCallback(OPERNAMENSP, InvalidateOprCacheCallBack, (Datum)0); - CacheRegisterSyscacheCallback(CASTSOURCETARGET, InvalidateOprCacheCallBack, (Datum)0); + CacheRegisterSessionSyscacheCallback(OPERNAMENSP, InvalidateOprCacheCallBack, (Datum)0); + CacheRegisterSessionSyscacheCallback(CASTSOURCETARGET, InvalidateOprCacheCallBack, (Datum)0); } /* Look for an existing entry */ diff --git a/src/common/backend/parser/parse_relation.cpp b/src/common/backend/parser/parse_relation.cpp index 025341518..9acfa8817 100755 --- a/src/common/backend/parser/parse_relation.cpp +++ b/src/common/backend/parser/parse_relation.cpp @@ -37,6 +37,7 @@ #endif #include "utils/builtins.h" #include "utils/lsyscache.h" +#include "utils/partitionkey.h" #include "utils/rel.h" #include "utils/rel_gs.h" #include "utils/syscache.h" @@ -67,14 +68,11 @@ static void expandTupleDesc(TupleDesc tupdesc, Alias* eref, int rtindex, int sub bool include_dropped, List** colnames, List** colvars); static void setRteOrientation(Relation rel, RangeTblEntry* rte); static int32* getValuesTypmods(RangeTblEntry* rte); + #ifndef PGXC static int specialAttNum(const char* attname); #endif -static Oid getPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState *pstate, Relation rel); -static Oid getSubPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState *pstate, Relation rel, - Oid *partOid); - static char *ReplaceSWCTEOutSrting(ParseState *pstate, RangeTblEntry *rte, char *label) { ListCell *lc = NULL; @@ -584,12 +582,13 @@ Node* scanRTEForColumn(ParseState* pstate, RangeTblEntry* rte, char* colname, in continue; } if (strcmp(strVal(lfirst(c)), colname) == 0) { + if (result != NULL) { ereport(ERROR, - (errcode(ERRCODE_AMBIGUOUS_COLUMN), - errmsg("column reference \"%s\" is ambiguous", colname), - parser_errposition(pstate, location))); + (errcode(ERRCODE_AMBIGUOUS_COLUMN), errmsg("column reference \"%s\" is ambiguous", colname), + parser_errposition(pstate, location))); } + /* * When user specifies a query on a hidden column. but it actually invisible to the user. * So just ignore this column, this only happens on timeseries table. @@ -684,6 +683,17 @@ Node* colNameToVar(ParseState* pstate, char* colname, bool localonly, int locati *final_rte = rte; } + /* + * Under normal circumstances, the alias of the column with the same name needs to be specified, but in + * some cases, it can not be specified. For example, the scene of 'merge into (matched)' takes + * priority to find in the target RTE. If it is found, it will jump out. Otherwise, it will be found + * in the source RTE. This search order is also found in the order of the RTE list, use_level of pstate + * attribute represents whether to use this rule. + */ + if (result != NULL && pstate->use_level) { + break; + } + if (result != NULL) { ereport(ERROR, (errcode(ERRCODE_AMBIGUOUS_COLUMN), @@ -967,159 +977,7 @@ static void buildScalarFunctionAlias(Node* funcexpr, char* funcname, Alias* alia eref->colnames = list_make1(makeString(eref->aliasname)); } -/* - * @@GaussDB@@ - * Target : data partition - * Brief : select * from partition (partition_name) - * : or select from partition for (partition_values_list) - * Description : get partition oid for rte->partitionOid - */ -static Oid getPartitionOidForRTE(RangeTblEntry* rte, RangeVar* relation, ParseState* pstate, Relation rel) -{ - Oid partitionOid = InvalidOid; - if (!PointerIsValid(rte) || !PointerIsValid(relation) || !PointerIsValid(pstate) || !PointerIsValid(rel)) { - return InvalidOid; - } - - /* relation is not partitioned table. */ - if (!rte->ispartrel || rte->relkind != RELKIND_RELATION) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("relation \"%s\" is not partitioned table", relation->relname))); - } else { - /* relation is partitioned table, from clause is partition (partition_name). */ - if (PointerIsValid(relation->partitionname)) { - partitionOid = partitionNameGetPartitionOid(rte->relid, - relation->partitionname, - PART_OBJ_TYPE_TABLE_PARTITION, - AccessShareLock, - true, - false, - NULL, - NULL, - NoLock); - /* partiton does not exist. */ - if (!OidIsValid(partitionOid)) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_TABLE), - errmsg("partition \"%s\" of relation \"%s\" does not exist", - relation->partitionname, - relation->relname))); - } - - rte->pname = makeAlias(relation->partitionname, NIL); - } else { - /* from clause is partition for (partition_key_values_list). */ - if (rel->partMap->type == PART_TYPE_LIST) { - ListPartitionDefState* listPartDef = NULL; - listPartDef = makeNode(ListPartitionDefState); - listPartDef->boundary = relation->partitionKeyValuesList; - listPartDef->boundary = transformListPartitionValue(pstate, listPartDef->boundary, false, true); - listPartDef->boundary = transformIntoTargetType( - rel->rd_att->attrs, (((ListPartitionMap*)rel->partMap)->partitionKey)->values[0], listPartDef->boundary); - - rte->plist = listPartDef->boundary; - - partitionOid = - partitionValuesGetPartitionOid(rel, listPartDef->boundary, AccessShareLock, true, true, false); - - pfree_ext(listPartDef); - } else if (rel->partMap->type == PART_TYPE_HASH) { - HashPartitionDefState* hashPartDef = NULL; - hashPartDef = makeNode(HashPartitionDefState); - hashPartDef->boundary = relation->partitionKeyValuesList; - hashPartDef->boundary = transformListPartitionValue(pstate, hashPartDef->boundary, false, true); - hashPartDef->boundary = transformIntoTargetType( - rel->rd_att->attrs, (((HashPartitionMap*)rel->partMap)->partitionKey)->values[0], hashPartDef->boundary); - - rte->plist = hashPartDef->boundary; - - partitionOid = - partitionValuesGetPartitionOid(rel, hashPartDef->boundary, AccessShareLock, true, true, false); - - pfree_ext(hashPartDef); - } else { - RangePartitionDefState* rangePartDef = NULL; - rangePartDef = makeNode(RangePartitionDefState); - rangePartDef->boundary = relation->partitionKeyValuesList; - - transformPartitionValue(pstate, (Node*)rangePartDef, false); - - rangePartDef->boundary = transformConstIntoTargetType( - rel->rd_att->attrs, ((RangePartitionMap*)rel->partMap)->partitionKey, rangePartDef->boundary); - - rte->plist = rangePartDef->boundary; - - partitionOid = - partitionValuesGetPartitionOid(rel, rangePartDef->boundary, AccessShareLock, true, true, false); - - pfree_ext(rangePartDef); - } - - /* partition does not exist. */ - if (!OidIsValid(partitionOid)) { - if (rel->partMap->type == PART_TYPE_RANGE || - rel->partMap->type == PART_TYPE_LIST || rel->partMap->type == PART_TYPE_HASH) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("The partition number is invalid or out-of-range"))); - } else { - /* shouldn't happen */ - ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unsupported partition type"))); - } - } - } - } - - return partitionOid; -} - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : select * from subpartition (subpartition_name) - * Description : get partition oid for rte->partitionOid - */ -static Oid getSubPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState *pstate, Relation rel, - Oid *partOid) -{ - Oid subPartitionOid = InvalidOid; - - if (!PointerIsValid(rte) || !PointerIsValid(relation) || !PointerIsValid(pstate) || !PointerIsValid(rel)) { - return InvalidOid; - } - - /* relation is not partitioned table. */ - if (!rte->ispartrel || rte->relkind != RELKIND_RELATION) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("relation \"%s\" is not partitioned table", relation->relname))); - } else { - /* relation is partitioned table, from clause is subpartition (subpartition_name). */ - if (PointerIsValid(relation->subpartitionname)) { - subPartitionOid = partitionNameGetPartitionOid(rte->relid, - relation->subpartitionname, - PART_OBJ_TYPE_TABLE_SUB_PARTITION, - AccessShareLock, - true, - false, - NULL, - NULL, - NoLock, - partOid); - /* partiton does not exist. */ - if (!OidIsValid(subPartitionOid)) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_TABLE), - errmsg("subpartition \"%s\" of relation \"%s\" does not exist", - relation->subpartitionname, - relation->relname))); - } - - rte->pname = makeAlias(relation->subpartitionname, NIL); - } - } - return subPartitionOid; -} /* * Open a table during parse analysis @@ -1355,7 +1213,7 @@ RangeTblEntry* addRangeTableEntry(ParseState* pstate, RangeVar* relation, Alias* /* select from clause contain subpartition. */ if (relation->issubpartition) { rte->isContainSubPartition = true; - rte->subpartitionOid = getSubPartitionOidForRTE(rte, relation, pstate, rel, &rte->partitionOid); + rte->subpartitionOid = GetSubPartitionOidForRTE(rte, relation, pstate, rel, &rte->partitionOid); } if (!rte->relhasbucket && relation->isbucket) { ereport(ERROR, (errmsg("table is normal,cannot contains buckets(0,1,2...)"))); @@ -2906,7 +2764,8 @@ int attnameAttNum(Relation rd, const char* attname, bool sysColOK) if (sysColOK) { if ((i = specialAttNum(attname)) != InvalidAttrNumber) { if ((i != ObjectIdAttributeNumber || rd->rd_rel->relhasoids) && - (i != BucketIdAttributeNumber || RELATION_HAS_BUCKET(rd))) { + (i != BucketIdAttributeNumber || RELATION_HAS_BUCKET(rd)) && + (i != UidAttributeNumber || RELATION_HAS_UIDS(rd))) { return i; } } @@ -2952,7 +2811,8 @@ Name attnumAttName(Relation rd, int attid) if (attid <= 0) { Form_pg_attribute sysatt; - sysatt = SystemAttributeDefinition(attid, rd->rd_rel->relhasoids, RELATION_HAS_BUCKET(rd)); + sysatt = SystemAttributeDefinition(attid, rd->rd_rel->relhasoids, + RELATION_HAS_BUCKET(rd), RELATION_HAS_UIDS(rd)); return &sysatt->attname; } if (attid > rd->rd_att->natts) { @@ -2973,7 +2833,8 @@ Oid attnumTypeId(Relation rd, int attid) if (attid <= 0) { Form_pg_attribute sysatt; - sysatt = SystemAttributeDefinition(attid, rd->rd_rel->relhasoids, RELATION_HAS_BUCKET(rd)); + sysatt = SystemAttributeDefinition(attid, rd->rd_rel->relhasoids, + RELATION_HAS_BUCKET(rd), RELATION_HAS_UIDS(rd)); return sysatt->atttypid; } if (attid > rd->rd_att->natts) { diff --git a/src/common/backend/parser/parse_startwith.cpp b/src/common/backend/parser/parse_startwith.cpp index 5ae0b92db..36ba6938a 100644 --- a/src/common/backend/parser/parse_startwith.cpp +++ b/src/common/backend/parser/parse_startwith.cpp @@ -526,13 +526,6 @@ static bool preSkipPLSQLParams(ParseState *pstate, ColumnRef *cref) } } - if (pstate->p_bind_variable_columnref_hook != NULL) { - node = (*pstate->p_bind_variable_columnref_hook)(pstate, cref); - if (node != NULL) { - return true; - } - } - return false; } @@ -776,6 +769,10 @@ static Node *replaceListFakeValue(List *lst) static Node *tryReplaceFakeValue(Node *node) { + if (node == NULL) { + return node; + } + if (IsA(node, Rownum)) { node = makeIntConst(CONNECT_BY_ROWNUM_FAKEVALUE, -1); } else if (is_cref_by_name(node, "level")) { @@ -1316,7 +1313,13 @@ static SelectStmt *CreateStartWithCTEInnerBranch(ParseState* pstate, if (whereClause != NULL) { JoinExpr *final_join = (JoinExpr *)origin_table; /* pushdown requires deep copying of the quals */ - final_join->quals = (Node *)copyObject(whereClause); + Node *whereCopy = (Node *)copyObject(whereClause); + if (final_join->quals == NULL) { + final_join->quals = whereCopy; + } else { + final_join->quals = + (Node *)makeA_Expr(AEXPR_AND, NULL, whereCopy, final_join->quals, -1); + } } } diff --git a/src/common/backend/parser/parse_type.cpp b/src/common/backend/parser/parse_type.cpp index 4f795f0f2..93ce52500 100644 --- a/src/common/backend/parser/parse_type.cpp +++ b/src/common/backend/parser/parse_type.cpp @@ -35,74 +35,6 @@ #include "utils/syscache.h" #include "utils/pl_package.h" -const static Oid cstoreSupportType[] = {BOOLOID, - HLL_OID, // same as BYTEA - BYTEAOID, - CHAROID, - HLL_HASHVAL_OID, // same as INT8 - INT8OID, - INT2OID, - INT4OID, - INT1OID, - NUMERICOID, - BPCHAROID, - VARCHAROID, - NVARCHAR2OID, - SMALLDATETIMEOID, - TEXTOID, - OIDOID, - FLOAT4OID, - FLOAT8OID, - ABSTIMEOID, - RELTIMEOID, - TINTERVALOID, - INETOID, - DATEOID, - TIMEOID, - TIMESTAMPOID, - TIMESTAMPTZOID, - INTERVALOID, - TIMETZOID, - CASHOID, - CIDROID, - BITOID, - VARBITOID, - CLOBOID, - BOOLARRAYOID, // array - HLL_ARRAYOID, - BYTEARRAYOID, - CHARARRAYOID, - HLL_HASHVAL_ARRAYOID, - INT8ARRAYOID, - INT2ARRAYOID, - INT4ARRAYOID, - INT1ARRAYOID, - ARRAYNUMERICOID, - BPCHARARRAYOID, - VARCHARARRAYOID, - NVARCHAR2ARRAYOID, - SMALLDATETIMEARRAYOID, - TEXTARRAYOID, - FLOAT4ARRAYOID, - FLOAT8ARRAYOID, - ABSTIMEARRAYOID, - RELTIMEARRAYOID, - ARRAYTINTERVALOID, - INETARRAYOID, - DATEARRAYOID, - TIMEARRAYOID, - TIMESTAMPARRAYOID, - TIMESTAMPTZARRAYOID, - ARRAYINTERVALOID, - ARRAYTIMETZOID, - CASHARRAYOID, - CIDRARRAYOID, - BITARRAYOID, - VARBITARRAYOID, - BYTEAWITHOUTORDERCOLOID, - BYTEAWITHOUTORDERWITHEQUALCOLOID -}; - static int32 typenameTypeMod(ParseState* pstate, const TypeName* typname, Type typ); static bool IsTypeInBlacklist(Oid typoid); @@ -408,8 +340,10 @@ Type LookupTypeNameExtended(ParseState* pstate, const TypeName* typname, int32* } if (u_sess->plsql_cxt.need_pkg_dependencies && OidIsValid(pkgOid) && !notPkgType) { + MemoryContext temp = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); u_sess->plsql_cxt.pkg_dependencies = list_append_unique_oid(u_sess->plsql_cxt.pkg_dependencies, pkgOid); + MemoryContextSwitchTo(temp); } if (!OidIsValid(typoid)) { @@ -982,19 +916,83 @@ fail: * The performance of this function relies on compiler to flat the branches. But * it is ok if compiler failed to do its job as it is not in critical code path. */ -bool IsTypeSupportedByCStore(_in_ Oid typeOid, _in_ int32 typeMod) +bool IsTypeSupportedByCStore(Oid typeOid) { - // we don't support user defined type. - // - if (typeOid >= FirstNormalObjectId) { - return false; + switch (typeOid) { + case BOOLOID: + case HLL_OID: // same as BYTEA + case BYTEAOID: + case CHAROID: + case HLL_HASHVAL_OID: // same as INT8 + case INT8OID: + case INT2OID: + case INT4OID: + case INT1OID: + case NUMERICOID: + case BPCHAROID: + case VARCHAROID: + case NVARCHAR2OID: + case SMALLDATETIMEOID: + case TEXTOID: + case OIDOID: + case FLOAT4OID: + case FLOAT8OID: + case ABSTIMEOID: + case RELTIMEOID: + case TINTERVALOID: + case INETOID: + case DATEOID: + case TIMEOID: + case TIMESTAMPOID: + case TIMESTAMPTZOID: + case INTERVALOID: + case TIMETZOID: + case CASHOID: + case CIDROID: + case BITOID: + case VARBITOID: + case CLOBOID: + case BOOLARRAYOID: // array + case HLL_ARRAYOID: + case BYTEARRAYOID: + case CHARARRAYOID: + case HLL_HASHVAL_ARRAYOID: + case INT8ARRAYOID: + case INT2ARRAYOID: + case INT4ARRAYOID: + case INT1ARRAYOID: + case ARRAYNUMERICOID: + case BPCHARARRAYOID: + case VARCHARARRAYOID: + case NVARCHAR2ARRAYOID: + case SMALLDATETIMEARRAYOID: + case TEXTARRAYOID: + case FLOAT4ARRAYOID: + case FLOAT8ARRAYOID: + case ABSTIMEARRAYOID: + case RELTIMEARRAYOID: + case ARRAYTINTERVALOID: + case INETARRAYOID: + case DATEARRAYOID: + case TIMEARRAYOID: + case TIMESTAMPARRAYOID: + case TIMESTAMPTZARRAYOID: + case ARRAYINTERVALOID: + case ARRAYTIMETZOID: + case CASHARRAYOID: + case CIDRARRAYOID: + case BITARRAYOID: + case VARBITARRAYOID: + case BYTEAWITHOUTORDERCOLOID: + case BYTEAWITHOUTORDERWITHEQUALCOLOID: + case VOIDOID: + return true; + default: + break; } - for (uint32 i = 0; i < sizeof(cstoreSupportType) / sizeof(Oid); ++i) { - if (cstoreSupportType[i] == typeOid) { - return true; - } - } + ereport(DEBUG2, (errmodule(MOD_OPT_PLANNER), + errmsg("Vectorize plan failed due to unsupport type: %d", typeOid))); return false; } /* diff --git a/src/common/backend/parser/parse_utilcmd.cpp b/src/common/backend/parser/parse_utilcmd.cpp index f00a28ce6..ba6bb716f 100644 --- a/src/common/backend/parser/parse_utilcmd.cpp +++ b/src/common/backend/parser/parse_utilcmd.cpp @@ -237,7 +237,7 @@ Oid *namespaceid, bool isFirstNode) * preexisting relation in that namespace with the same name, and updates * stmt->relation->relpersistence if the select namespace is temporary. */ - *namespaceid = RangeVarGetAndCheckCreationNamespace(stmt->relation, NoLock, &existing_relid); + *namespaceid = RangeVarGetAndCheckCreationNamespace(stmt->relation, NoLock, &existing_relid, RELKIND_RELATION); /* * Check whether relation is in ledger schema. If it is, we add hash column. @@ -906,7 +906,11 @@ static void transformColumnDefinition(CreateStmtContext* cxt, ColumnDef* column, column->typname->names = NIL; column->typname->typeOid = NUMERICOID; large = true; +#ifdef ENABLE_MULTIPLE_NODES } else if ((is_enc_type(typname) && IS_MAIN_COORDINATOR) || +#else + } else if (is_enc_type(typname) || +#endif (!u_sess->attr.attr_common.enable_beta_features && strcmp(typname, "int16") == 0)) { ereport(ERROR, (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), @@ -1276,6 +1280,96 @@ static DistributeBy* GetHideTagDistribution(TupleDesc tupleDesc) return distributeby; } +/* + * Support Create table like on table with subpartitions + * In transform phase, we need fill PartitionState + * 1. Recursively fill partitionList in PartitionState also including subpartitionList + * 2. Recursively fill PartitionState also including SubPartitionState + */ +static PartitionState *transformTblSubpartition(Relation relation, HeapTuple partitionTuple, + List* partitionList, List* subPartitionList) +{ + ListCell *lc1 = NULL; + ListCell *lc2 = NULL; + ListCell *lc3 = NULL; + + List *partKeyColumns = NIL; + List *partitionDefinitions = NIL; + PartitionState *partState = NULL; + PartitionState *subPartState = NULL; + Form_pg_partition tupleForm = NULL; + Form_pg_partition partitionForm = NULL; + Form_pg_partition subPartitionForm = NULL; + + tupleForm = (Form_pg_partition)GETSTRUCT(partitionTuple); + + /* prepare partition definitions */ + transformTableLikePartitionProperty( + relation, partitionTuple, &partKeyColumns, partitionList, &partitionDefinitions); + + partState = makeNode(PartitionState); + partState->partitionKey = partKeyColumns; + partState->partitionList = partitionDefinitions; + partState->partitionStrategy = tupleForm->partstrategy; + + partState->rowMovement = relation->rd_rel->relrowmovement ? ROWMOVEMENT_ENABLE : ROWMOVEMENT_DISABLE; + + /* prepare subpartition definitions */ + forboth(lc1, partitionList, lc2, subPartitionList) { + List *subPartKeyColumns = NIL; + List *subPartitionDefinitions = NIL; + RangePartitionDefState *partitionDef = NULL; + + HeapTuple partTuple = (HeapTuple)lfirst(lc1); + List *subPartitions = (List *)lfirst(lc2); + + HeapTuple subPartTuple = (HeapTuple)linitial(subPartitions); + subPartitionForm = (Form_pg_partition)GETSTRUCT(subPartTuple); + partitionForm = (Form_pg_partition)GETSTRUCT(partTuple); + + if (subPartitionForm->partstrategy != PART_STRATEGY_RANGE) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Un-support feature"), + errdetail("Create Table like with subpartition only support range strategy."))); + } + + Oid partOid = HeapTupleGetOid(partTuple); + Partition part = partitionOpen(relation, partOid, AccessShareLock); + Relation partRel = partitionGetRelation(relation, part); + + transformTableLikePartitionProperty( + partRel, partTuple, &subPartKeyColumns, subPartitions, &subPartitionDefinitions); + + if (subPartState == NULL) { + subPartState = makeNode(PartitionState); + subPartState->partitionKey = subPartKeyColumns; + subPartState->partitionList = NULL; + subPartState->partitionStrategy = subPartitionForm->partstrategy; + } + + /* Here do this for reserve origin subpartitions order */ + foreach(lc3, partitionDefinitions) { + RangePartitionDefState *rightDef = (RangePartitionDefState*)lfirst(lc3); + + if (pg_strcasecmp(NameStr(partitionForm->relname), rightDef->partitionName) == 0) { + partitionDef = rightDef; + break; + } + } + + Assert(partitionDef != NULL); + partitionDef->subPartitionDefState = subPartitionDefinitions; + + releaseDummyRelation(&partRel); + partitionClose(relation, part, NoLock); + } + + partState->subPartitionState = subPartState; + + return partState; +} + /* * transformTableLikeClause * @@ -1339,15 +1433,6 @@ static void transformTableLikeClause( cancel_parser_errposition_callback(&pcbstate); - if (RelationIsSubPartitioned(relation)) { - ereport( - ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Subpartition table does not support create table like."), - errdetail("N/A."), - errcause("The feature is not currently supported."), erraction("Use other actions instead.")))); - } - // If specify 'INCLUDING ALL' for non-partitioned table, just remove the option 'INCLUDING PARTITION'. // Right shift 10 bits can handle both 'INCLUDING ALL' and 'INCLUDING ALL EXCLUDING option(s)'. // if add a new option, the number '10'(see marco 'MAX_TABLE_LIKE_OPTIONS') should be changed. @@ -1760,23 +1845,29 @@ static void transformTableLikeClause( HeapTuple partitionTableTuple = NULL; Form_pg_partition partitionForm = NULL; List* partitionList = NIL; + List* subPartitionList = NIL; // read out partitioned table tuple, and partition tuple list partitionTableTuple = searchPgPartitionByParentIdCopy(PART_OBJ_TYPE_PARTED_TABLE, ObjectIdGetDatum(relation->rd_id)); partitionList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_PARTITION, ObjectIdGetDatum(relation->rd_id)); + if (RelationIsSubPartitioned(relation)) { + subPartitionList = searchPgSubPartitionByParentId(PART_OBJ_TYPE_TABLE_SUB_PARTITION, partitionList); + } + if (partitionTableTuple != NULL) { partitionForm = (Form_pg_partition)GETSTRUCT(partitionTableTuple); - if (partitionForm->partstrategy == PART_STRATEGY_LIST || - partitionForm->partstrategy == PART_STRATEGY_HASH) { - freePartList(partitionList); - heap_freetuple_ext(partitionTableTuple); + + if (partitionForm->partstrategy == PART_STRATEGY_LIST || + partitionForm->partstrategy == PART_STRATEGY_HASH) { + freePartList(partitionList); + heap_freetuple_ext(partitionTableTuple); heap_close(relation, NoLock); - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Un-support feature"), - errdetail("The Like feature is not supported currently for List and Hash."))); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Un-support feature"), + errdetail("The Like feature is not supported currently for List and Hash."))); } bool value_partition_rel = (partitionForm->partstrategy == PART_STRATEGY_VALUE); @@ -1784,7 +1875,7 @@ static void transformTableLikeClause( * We only have to create PartitionState for a range partition table * with known partitions or a value partition table(HDFS). */ - if ((NIL != partitionList) || value_partition_rel) { + if ((NIL != partitionList && subPartitionList == NIL) || value_partition_rel) { { List* partKeyColumns = NIL; List* partitionDefinitions = NIL; @@ -1815,6 +1906,17 @@ static void transformTableLikeClause( freePartList(partitionList); } + } else if (subPartitionList != NULL) { + n = transformTblSubpartition(relation, + partitionTableTuple, + partitionList, + subPartitionList); + + /* store the produced partition state in CreateStmtContext */ + cxt->csc_partTableState = n; + + freePartList(partitionList); + freePartList(subPartitionList); } heap_freetuple_ext(partitionTableTuple); @@ -3078,7 +3180,7 @@ static IndexStmt* transformIndexConstraint(Constraint* constraint, CreateStmtCon AssertEreport(attnum <= heap_rel->rd_att->natts, MOD_OPT, ""); attform = heap_rel->rd_att->attrs[attnum - 1]; } else - attform = SystemAttributeDefinition(attnum, heap_rel->rd_rel->relhasoids, RELATION_HAS_BUCKET(heap_rel)); + attform = SystemAttributeDefinition(attnum, heap_rel->rd_rel->relhasoids, RELATION_HAS_BUCKET(heap_rel), RELATION_HAS_UIDS(heap_rel)); attname = pstrdup(NameStr(attform->attname)); if (i < indnkeyatts) { @@ -3593,21 +3695,12 @@ IndexStmt* transformIndexStmt(Oid relid, IndexStmt* stmt, const char* queryStrin if (!isColStore && (0 != pg_strcasecmp(stmt->accessMethod, DEFAULT_INDEX_TYPE)) && (0 != pg_strcasecmp(stmt->accessMethod, DEFAULT_GIN_INDEX_TYPE)) && (0 != pg_strcasecmp(stmt->accessMethod, DEFAULT_GIST_INDEX_TYPE)) && - (0 != pg_strcasecmp(stmt->accessMethod, DEFAULT_HASH_INDEX_TYPE)) && (0 != pg_strcasecmp(stmt->accessMethod, DEFAULT_USTORE_INDEX_TYPE))) { /* row store only support btree/ubtree/gin/gist index */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("access method \"%s\" does not support row store", stmt->accessMethod))); } - - if (0 == pg_strcasecmp(stmt->accessMethod, DEFAULT_HASH_INDEX_TYPE) && - t_thrd.proc->workingVersionNum < SUPPORT_HASH_XLOG_VERSION_NUM) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("access method \"%s\" does not support row store", stmt->accessMethod))); - } - if (isColStore && (!isPsortMothed && !isCBtreeMethod && !isCGinBtreeMethod)) { /* column store support psort/cbtree/gin index */ ereport(ERROR, @@ -4025,6 +4118,7 @@ List* transformAlterTableStmt(Oid relid, AlterTableStmt* stmt, const char* query AlterTableCmd* newcmd = NULL; Node* rangePartDef = NULL; AddPartitionState* addDefState = NULL; + AddSubPartitionState* addSubdefState = NULL; SplitPartitionState* splitDefState = NULL; ListCell* cell = NULL; @@ -4196,7 +4290,28 @@ List* transformAlterTableStmt(Oid relid, AlterTableStmt* stmt, const char* query newcmds = lappend(newcmds, cmd); break; + case AT_AddSubPartition: + /* transform the boundary of subpartition, + * this step transform it from A_Const into Const */ + addSubdefState = (AddSubPartitionState*)cmd->def; + if (!PointerIsValid(addSubdefState)) { + ereport(ERROR, (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmodule(MOD_OPT), + errmsg("Missing definition of adding subpartition"), + errdetail("The AddSubPartitionState in ADD SUBPARTITION command is not found"), + errcause("Try ADD SUBPARTITION without subpartition defination"), + erraction("Please check DDL syntax for \"ADD SUBPARTITION\""))); + } + /* A_Const -->Const */ + foreach (cell, addSubdefState->subPartitionList) { + rangePartDef = (Node*)lfirst(cell); + transformPartitionValue(pstate, rangePartDef, true); + } + + newcmds = lappend(newcmds, cmd); + break; + case AT_DropPartition: + case AT_DropSubPartition: case AT_TruncatePartition: case AT_ExchangePartition: case AT_TruncateSubPartition: @@ -4228,6 +4343,7 @@ List* transformAlterTableStmt(Oid relid, AlterTableStmt* stmt, const char* query ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not split LIST/HASH partition table"))); } + /* transform the boundary of range partition: from A_Const into Const */ splitDefState = (SplitPartitionState*)cmd->def; if (!PointerIsValid(splitDefState->split_point)) { diff --git a/src/common/backend/parser/parser.cpp b/src/common/backend/parser/parser.cpp index 31640d316..ba75d042c 100644 --- a/src/common/backend/parser/parser.cpp +++ b/src/common/backend/parser/parser.cpp @@ -272,6 +272,23 @@ int base_yylex(YYSTYPE* lvalp, YYLTYPE* llocp, core_yyscan_t yyscanner) break; } break; + case SUBPARTITION: + + GET_NEXT_TOKEN(); + + switch (next_token) { + case FOR: + cur_token = SUBPARTITION_FOR; + break; + default: + /* save the lookahead token for next time */ + SET_LOOKAHEAD_TOKEN(); + /* and back up the output info to cur_token */ + lvalp->core_yystype = cur_yylval; + *llocp = cur_yylloc; + break; + } + break; case ADD_P: /* * ADD PARTITION must be reduced to one token @@ -282,6 +299,9 @@ int base_yylex(YYSTYPE* lvalp, YYLTYPE* llocp, core_yyscan_t yyscanner) case PARTITION: cur_token = ADD_PARTITION; break; + case SUBPARTITION: + cur_token = ADD_SUBPARTITION; + break; default: /* save the lookahead token for next time */ SET_LOOKAHEAD_TOKEN(); @@ -303,6 +323,9 @@ int base_yylex(YYSTYPE* lvalp, YYLTYPE* llocp, core_yyscan_t yyscanner) case PARTITION: cur_token = DROP_PARTITION; break; + case SUBPARTITION: + cur_token = DROP_SUBPARTITION; + break; default: /* save the lookahead token for next time */ SET_LOOKAHEAD_TOKEN(); @@ -411,7 +434,44 @@ int base_yylex(YYSTYPE* lvalp, YYLTYPE* llocp, core_yyscan_t yyscanner) break; } break; + case START: + /* + * START WITH must be reduced to one token, to allow START as table / column alias. + */ + GET_NEXT_TOKEN(); + switch (next_token) { + case WITH: + cur_token = START_WITH; + break; + default: + /* save the lookahead token for next time */ + SET_LOOKAHEAD_TOKEN(); + /* and back up the output info to cur_token */ + lvalp->core_yystype = cur_yylval; + *llocp = cur_yylloc; + break; + } + break; + case CONNECT: + /* + * CONNECT BY must be reduced to one token, to allow CONNECT as table / column alias. + */ + GET_NEXT_TOKEN(); + + switch (next_token) { + case BY: + cur_token = CONNECT_BY; + break; + default: + /* save the lookahead token for next time */ + SET_LOOKAHEAD_TOKEN(); + /* and back up the output info to cur_token */ + lvalp->core_yystype = cur_yylval; + *llocp = cur_yylloc; + break; + } + break; default: break; } diff --git a/src/common/backend/parser/scan.l b/src/common/backend/parser/scan.l index 75f3c8055..1b86965ab 100755 --- a/src/common/backend/parser/scan.l +++ b/src/common/backend/parser/scan.l @@ -51,6 +51,14 @@ */ #define YYSTYPE core_YYSTYPE +/* + * define core_yylex for flex >= 2.6 + */ +#if FLEX_MAJOR_VERSION >= 2 && FLEX_MINOR_VERSION >= 6 +#define YY_DECL int core_yylex \ + (YYSTYPE * yylval_param, YYLTYPE * yylloc_param , yyscan_t yyscanner) +#endif + /* * Set the type of yyextra. All state variables used by the scanner should * be in yyextra, *not* statically allocated. diff --git a/src/common/backend/pgxc_single/CMakeLists.txt b/src/common/backend/pgxc_single/CMakeLists.txt index 765a53326..cec00e6c0 100755 --- a/src/common/backend/pgxc_single/CMakeLists.txt +++ b/src/common/backend/pgxc_single/CMakeLists.txt @@ -1,23 +1,24 @@ -#This is the main CMAKE for build bin. - -set(CMAKE_VERBOSE_MAKEFILE ON) -set(CMAKE_RULE_MESSAGES OFF) -set(CMAKE_SKIP_RPATH TRUE) - - -set(CMAKE_MODULE_PATH - ${CMAKE_CURRENT_SOURCE_DIR}/barrier - ${CMAKE_CURRENT_SOURCE_DIR}/copy - ${CMAKE_CURRENT_SOURCE_DIR}/locator - ${CMAKE_CURRENT_SOURCE_DIR}/nodemgr - ${CMAKE_CURRENT_SOURCE_DIR}/pool -) - -if("${ENABLE_MULTIPLE_NODES}" STREQUAL "OFF") - add_subdirectory(barrier) - add_subdirectory(copy) - add_subdirectory(locator) - add_subdirectory(nodemgr) - add_subdirectory(pool) -endif() - +#This is the main CMAKE for build bin. + +set(CMAKE_VERBOSE_MAKEFILE ON) +set(CMAKE_RULE_MESSAGES OFF) +set(CMAKE_SKIP_RPATH TRUE) + + +set(CMAKE_MODULE_PATH + ${CMAKE_CURRENT_SOURCE_DIR}/barrier + ${CMAKE_CURRENT_SOURCE_DIR}/copy + ${CMAKE_CURRENT_SOURCE_DIR}/locator + ${CMAKE_CURRENT_SOURCE_DIR}/nodemgr + ${CMAKE_CURRENT_SOURCE_DIR}/pool +) + +add_subdirectory(barrier) +add_subdirectory(copy) + +if("${ENABLE_MULTIPLE_NODES}" STREQUAL "OFF") + add_subdirectory(locator) + add_subdirectory(nodemgr) + add_subdirectory(pool) +endif() + diff --git a/src/common/backend/pgxc_single/Makefile b/src/common/backend/pgxc_single/Makefile index 6c187da3a..8b893fb5f 100644 --- a/src/common/backend/pgxc_single/Makefile +++ b/src/common/backend/pgxc_single/Makefile @@ -8,6 +8,10 @@ subdir = src/common/backend/pgxc_single top_builddir = ../../../.. include $(top_builddir)/src/Makefile.global +ifeq ($(enable_multiple_nodes), yes) +SUBDIRS = barrier +else SUBDIRS = barrier locator copy nodemgr pool +endif include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/common/backend/pgxc_single/barrier/barrier.cpp b/src/common/backend/pgxc_single/barrier/barrier.cpp index e21db40bc..8d1adb3a5 100755 --- a/src/common/backend/pgxc_single/barrier/barrier.cpp +++ b/src/common/backend/pgxc_single/barrier/barrier.cpp @@ -22,6 +22,8 @@ #include "libpq/libpq.h" #include "libpq/pqformat.h" #include "pgxc/barrier.h" +#include "postmaster/barrier_creator.h" +#include "postmaster/barrier_preparse.h" #include "pgxc/execRemote.h" #include "pgxc/locator.h" #include "pgxc/pgxc.h" @@ -32,14 +34,24 @@ #include "securec_check.h" #include "utils/elog.h" #include "replication/walreceiver.h" +#include "replication/archive_walreceiver.h" + +#define atolsn(x) ((XLogRecPtr)strtoul((x), NULL, 0)) #ifdef ENABLE_MULTIPLE_NODES -static const char* generate_barrier_id(const char* id); -static PGXCNodeAllHandles* PrepareBarrier(const char* id); -static void ExecuteBarrier(const char* id); -static void EndBarrier(PGXCNodeAllHandles* handles, const char* id); -static void WriteBarrierLSNFile(XLogRecPtr barrierLSN); +static void PrepareBarrier(PGXCNodeAllHandles* prepared_handles, const char* id, bool isSwitchoverBarrier); +static void ExecuteBarrier(const char* id, bool isSwitchoverBarrier = false); +static void EndBarrier(PGXCNodeAllHandles* handles, const char* id, bool isSwitchoverBarrier = false); +static void CommitBarrier(PGXCNodeAllHandles* prepared_handles, const char* id); +static void WriteBarrierLSNFile(XLogRecPtr barrierLSN, const char* barrier_id); +static void replace_barrier_id_compatible(const char* id, char** log_id); +static void RequestXLogFromStream(); +static void barrier_redo_pause(char* barrierId); +static bool TryBarrierLockWithTimeout(); +static void CheckBarrierCommandStatus(PGXCNodeAllHandles* conn_handles, const char* id, const char* command, bool isCn, + bool isSwitchoverBarrier = false); #endif +static const int BARRIER_LOCK_TIMEOUT_MS = 2000; // 2S /* * Prepare ourselves for an incoming BARRIER. We must disable all new 2PC * commits and let the ongoing commits to finish. We then remember the @@ -57,32 +69,35 @@ static void WriteBarrierLSNFile(XLogRecPtr barrierLSN); * set a timeout. The lock should be release after the timeout and the * barrier should be canceled. */ -void ProcessCreateBarrierPrepare(const char* id) +void ProcessCreateBarrierPrepare(const char* id, bool isSwitchoverBarrier) { #ifndef ENABLE_MULTIPLE_NODES DISTRIBUTED_FEATURE_NOT_SUPPORTED(); return; #else - StringInfoData buf; + ereport(DEBUG1, + (errmsg("Receive CREATE BARRIER <%s> PREPARE message on Coordinator", id))); if (!IS_PGXC_COORDINATOR || !IsConnFromCoord()) ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), + (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), errmsg("The CREATE BARRIER PREPARE message is expected to " "arrive at a Coordinator from another Coordinator"))); - - (void)LWLockAcquire(BarrierLock, LW_EXCLUSIVE); + if (isSwitchoverBarrier) { + if (!LWLockHeldByMe(BarrierLock)) + LWLockAcquire(BarrierLock, LW_EXCLUSIVE); + } else { + if (!TryBarrierLockWithTimeout()) + ereport(ERROR, + (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), + errmsg("Wait Barrier lock timeout barrierId:%s", id))); + } pq_beginmessage(&buf, 'b'); pq_sendstring(&buf, id); pq_endmessage(&buf); pq_flush(); - - /* - * TODO Start a timer to terminate the pending barrier after a specified - * timeout - */ #endif } @@ -96,28 +111,53 @@ void ProcessCreateBarrierEnd(const char* id) DISTRIBUTED_FEATURE_NOT_SUPPORTED(); return; #else - StringInfoData buf; - + ereport(DEBUG1, + (errmsg("Receive CREATE BARRIER <%s> END message on Coordinator", id))); if (!IS_PGXC_COORDINATOR || !IsConnFromCoord()) ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), + (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), errmsg("The CREATE BARRIER END message is expected to " "arrive at a Coordinator from another Coordinator"))); - - LWLockRelease(BarrierLock); + if (LWLockHeldByMe(BarrierLock)) + LWLockRelease(BarrierLock); pq_beginmessage(&buf, 'b'); pq_sendstring(&buf, id); pq_endmessage(&buf); pq_flush(); - - /* - * TODO Stop the timer - */ #endif } + +void ProcessCreateBarrierCommit(const char* id) +{ +#ifndef ENABLE_MULTIPLE_NODES + DISTRIBUTED_FEATURE_NOT_SUPPORTED(); + return; +#else + + StringInfoData buf; + ereport(DEBUG1, + (errmsg("Receive CREATE BARRIER <%s> COMMIT message on Coordinator", id))); + if (!IS_PGXC_COORDINATOR || !IsConnFromCoord()) + ereport(ERROR, + (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), + errmsg("The CREATE BARRIER COMMIT message is expected to " + "arrive at a Coordinator from another Coordinator"))); + XLogBeginInsert(); + XLogRegisterData((char*)id, strlen(id) + 1); + + XLogRecPtr recptr = XLogInsert(RM_BARRIER_ID, XLOG_BARRIER_COMMIT, InvalidBktId); + XLogWaitFlush(recptr); + + + pq_beginmessage(&buf, 'b'); + pq_sendstring(&buf, id); + pq_endmessage(&buf); + pq_flush(); +#endif +} /* * Execute the CREATE BARRIER command. Write a BARRIER WAL record and flush the * WAL buffers to disk before returning to the caller. Writing the WAL record @@ -129,11 +169,36 @@ void ProcessCreateBarrierExecute(const char* id, bool isSwitchoverBarrier) DISTRIBUTED_FEATURE_NOT_SUPPORTED(); return; #else - StringInfoData buf; + XLogRecPtr recptr; + char* log_id = (char*)id; + char barrierLsn[BARRIER_LSN_LENGTH]; + int rc; + + ereport(DEBUG1, + (errmsg("Receive CREATE BARRIER <%s> EXECUTE message on Coordinator or Datanode", id))); + if (!IsConnFromCoord()) + ereport(ERROR, + (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), + errmsg("The CREATE BARRIER EXECUTE message is expected to " + "arrive from a Coordinator"))); + + if (unlikely(t_thrd.proc->workingVersionNum < BACKUP_SLOT_VERSION_NUM)) { + replace_barrier_id_compatible(id, &log_id); + } + + if (IS_CSN_BARRIER(id)) { + CommitSeqNo csn = CsnBarrierNameGetCsn(id); + if (csn == 0) { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("Wrong barrier CSN.")))); + } + UpdateNextMaxKnownCSN(csn); + } if (isSwitchoverBarrier == true) { - ereport(LOG, (errmsg("Handling DISASTER RECOVERY SWITCHOVER BARRIER."))); + ereport(LOG, (errmsg("Handling DISASTER RECOVERY SWITCHOVER BARRIER:<%s>.", id))); // The access of all users is not blocked temporarily. /* * Force a checkpoint before starting the switchover. This will force dirty @@ -142,88 +207,159 @@ void ProcessCreateBarrierExecute(const char* id, bool isSwitchoverBarrier) RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT); } - if (!IsConnFromCoord()) - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("The CREATE BARRIER EXECUTE message is expected to " - "arrive from a Coordinator"))); - { - XLogRecData rdata[1]; - XLogRecPtr recptr; + XLogBeginInsert(); + XLogRegisterData((char*)log_id, strlen(log_id) + 1); - rdata[0].data = (char*)id; - rdata[0].len = strlen(id) + 1; - rdata[0].buffer = InvalidBuffer; - rdata[0].next = NULL; + if (u_sess->attr.attr_storage.enable_cbm_tracking && IS_ROACH_BARRIER(id)) { + LWLockAcquire(CBMParseXlogLock, LW_EXCLUSIVE); + } + recptr = XLogInsert(RM_BARRIER_ID, isSwitchoverBarrier? XLOG_BARRIER_SWITCHOVER : XLOG_BARRIER_CREATE, + InvalidBktId); + XLogWaitFlush(recptr); - recptr = XLogInsert(RM_BARRIER_ID, XLOG_BARRIER_CREATE, rdata, isSwitchoverBarrier); - XLogWaitFlush(recptr); + if (IS_CSN_BARRIER(id) && !isSwitchoverBarrier) { + SpinLockAcquire(&g_instance.streaming_dr_cxt.mutex); + rc = strncpy_s((char *)g_instance.streaming_dr_cxt.currentBarrierId, MAX_BARRIER_ID_LENGTH, + id, MAX_BARRIER_ID_LENGTH - 1); + securec_check(rc, "\0", "\0"); + SpinLockRelease(&g_instance.streaming_dr_cxt.mutex); + ereport(DEBUG4, (errmodule(MOD_RTO_RPO), errmsg("refresh currentBarrier, barrier id %s", id))); + } + + // record disaster recovery barrier lsn + if (IS_CSN_BARRIER(id) || IS_HADR_BARRIER(id)) { + pg_atomic_init_u64(&g_instance.archive_obs_cxt.barrierLsn, recptr); + rc = snprintf_s(barrierLsn, BARRIER_LSN_LENGTH, BARRIER_LSN_LENGTH - 1, "0x%lx", recptr); + securec_check_ss_c(rc, "\0", "\0"); + } else { + if (u_sess->attr.attr_storage.enable_cbm_tracking) { + (void)ForceTrackCBMOnce(recptr, 0, false, true); + } + WriteBarrierLSNFile(recptr, id); + } + + if (isSwitchoverBarrier) { + g_instance.streaming_dr_cxt.switchoverBarrierLsn = recptr; } pq_beginmessage(&buf, 'b'); pq_sendstring(&buf, id); + pq_sendstring(&buf, barrierLsn); pq_endmessage(&buf); pq_flush(); + + if (unlikely(log_id != id)) { + pfree_ext(log_id); + } #endif } -void RequestBarrier(const char* id, char* completionTag, bool isSwitchoverBarrier) +#ifdef ENABLE_MULTIPLE_NODES +static void ExecBarrierOnFirstExecCnNode(const char* id, const char* firstExecNode, char* completionTag) +{ + int rc = 0; + char queryString[MAX_BARRIER_SQL_LENGTH] = {0}; + + rc = sprintf_s(queryString, MAX_BARRIER_SQL_LENGTH, "create barrier '%s';", id); + securec_check_ss(rc, "\0", "\0"); + ereport(LOG, (errmsg("Send <%s> to First Exec Coordinator <%s>", queryString, firstExecNode))); + + if (!IsConnFromCoord()) { + RemoteQuery* step = makeNode(RemoteQuery); + step->combine_type = COMBINE_TYPE_SAME; + step->sql_statement = pstrdup(queryString); + step->force_autocommit = false; + step->exec_type = EXEC_ON_COORDS; + step->is_temp = false; + ExecRemoteUtilityParallelBarrier(step, firstExecNode); + pfree_ext(step->sql_statement); + pfree_ext(step); + if (completionTag != NULL) { + rc = sprintf_s(completionTag, COMPLETION_TAG_BUFSIZE, "BARRIER %s", id); + securec_check_ss(rc, "\0", "\0"); + } + } +} +#endif + +void RequestBarrier(char* id, char* completionTag, bool isSwitchoverBarrier) { #ifndef ENABLE_MULTIPLE_NODES DISTRIBUTED_FEATURE_NOT_SUPPORTED(); return; #else + if (id == NULL) + ereport(ERROR, + (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), errmsg("CREATE BARRIER with no barrier name."))); + PGXCNodeAllHandles* coord_handles = NULL; + char* barrier_id = id; + bool isCsnBarrier = (strcmp(id, CSN_BARRIER_NAME) == 0); + int rc = 0; - PGXCNodeAllHandles* prepared_handles = NULL; - const char* barrier_id = NULL; - - elog(DEBUG1, "CREATE BARRIER request received"); + ereport(DEBUG1, (errmsg("CREATE BARRIER request received"))); /* * Ensure that we are a Coordinator and the request is not from another * coordinator */ if (!IS_PGXC_COORDINATOR) - ereport( - ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("CREATE BARRIER command must be sent to a Coordinator"))); - - if (IsConnFromCoord()) ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("CREATE BARRIER command is not expected from another Coordinator"))); + (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), errmsg("CREATE BARRIER command must be sent to a Coordinator"))); + + /* only superuser or operation-admin user can create barrier */ + if (!superuser() && !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("Must be system admin or operator admin in operation mode to create barrier.")))); + + ereport(LOG, (errmsg("CREATE BARRIER <%s>", barrier_id))); /* - * Get a barrier id if the user has not supplied it + * Ensure all barrier commond execuet on first coordinator */ - barrier_id = generate_barrier_id(id); - - elog(DEBUG1, "CREATE BARRIER <%s>", barrier_id); + char* firstExecNode = find_first_exec_cn(); + bool isFirstNode = (strcmp(firstExecNode, g_instance.attr.attr_common.PGXCNodeName) == 0); + if (isFirstNode == false) { + ExecBarrierOnFirstExecCnNode(barrier_id, firstExecNode, completionTag); + return; + } /* * Step One. Prepare all Coordinators for upcoming barrier request */ - prepared_handles = PrepareBarrier(barrier_id); + coord_handles = get_handles(NIL, GetAllCoordNodes(), true); + PrepareBarrier(coord_handles, barrier_id, isSwitchoverBarrier); + + if (isCsnBarrier) + GetCsnBarrierName(barrier_id, isSwitchoverBarrier); /* * Step two. Issue BARRIER command to all involved components, including * Coordinators and Datanodes */ - ExecuteBarrier(barrier_id); + ExecuteBarrier(barrier_id, isSwitchoverBarrier); /* - * Step three. Inform Coordinators about a successfully completed barrier + * Step three. Inform Coordinators to release barrier lock */ - EndBarrier(prepared_handles, barrier_id); + EndBarrier(coord_handles, barrier_id, isSwitchoverBarrier); + + /* + * Step four. Inform Coordinators about a successfully completed barrier + */ + if (!isSwitchoverBarrier) + CommitBarrier(coord_handles, barrier_id); + /* Finally report the barrier to GTM to backup its restart point */ - ReportBarrierGTM((char*)barrier_id); + ReportBarrierGTM(barrier_id); /* Free the handles */ - pfree_pgxc_all_handles(prepared_handles); + pfree_pgxc_all_handles(coord_handles); - if (completionTag) { - int rc = snprintf_s(completionTag, COMPLETION_TAG_BUFSIZE, COMPLETION_TAG_BUFSIZE - 1, "BARRIER %s", barrier_id); - securec_check_ss(rc, "", ""); + if (completionTag != NULL) { + rc = sprintf_s(completionTag, COMPLETION_TAG_BUFSIZE, "BARRIER %s", barrier_id); + securec_check_ss(rc, "\0", "\0"); } - + ereport(u_sess->attr.attr_storage.HaModuleDebug ? LOG : DEBUG2, (errmsg("Create Barrier Success %s", barrier_id))); #endif } @@ -244,7 +380,7 @@ void DisasterRecoveryRequestBarrier(const char* id, bool isSwitchoverBarrier) LWLockAcquire(BarrierLock, LW_EXCLUSIVE); if (isSwitchoverBarrier == true) { - ereport(LOG, (errmsg("This is DISASTER RECOVERY SWITCHOVER BARRIER."))); + ereport(LOG, (errmsg("This is DISASTER RECOVERY SWITCHOVER BARRIER:<%s>.", id))); // The access of all users is not blocked temporarily. /* * Force a checkpoint before starting the switchover. This will force dirty @@ -256,9 +392,13 @@ void DisasterRecoveryRequestBarrier(const char* id, bool isSwitchoverBarrier) XLogBeginInsert(); XLogRegisterData((char*)id, strlen(id) + 1); - recptr = XLogInsert(RM_BARRIER_ID, XLOG_BARRIER_CREATE, false, InvalidBktId, isSwitchoverBarrier); + recptr = XLogInsert(RM_BARRIER_ID, XLOG_BARRIER_CREATE, InvalidBktId); XLogWaitFlush(recptr); - +#ifndef ENABLE_LITE_MODE + if (t_thrd.role == BARRIER_CREATOR) { + UpdateGlobalBarrierListOnMedia(id, g_instance.attr.attr_common.PGXCNodeName); + } +#endif SpinLockAcquire(&g_instance.archive_obs_cxt.barrier_lock); pg_atomic_init_u64(&g_instance.archive_obs_cxt.barrierLsn, recptr); rc = memcpy_s(g_instance.archive_obs_cxt.barrierName, MAX_BARRIER_ID_LENGTH, id, strlen(id)); @@ -293,28 +433,613 @@ void CreateHadrSwitchoverBarrier() XLogBeginInsert(); XLogRegisterData((char*)barrier_id, strlen(barrier_id) + 1); - recptr = XLogInsert(RM_BARRIER_ID, XLOG_BARRIER_CREATE, false, InvalidBktId, true); + recptr = XLogInsert(RM_BARRIER_ID, XLOG_BARRIER_SWITCHOVER, InvalidBktId); XLogWaitFlush(recptr); g_instance.streaming_dr_cxt.switchoverBarrierLsn = recptr; } -static void barrier_redo_pause() +void barrier_redo(XLogReaderState* record) { +#ifdef ENABLE_MULTIPLE_NODES + int rc = 0; + uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; + if (info == XLOG_BARRIER_COMMIT) + return; + Assert(!XLogRecHasAnyBlockRefs(record)); volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; - while (IS_DISASTER_RECOVER_MODE) { - RedoInterruptCallBack(); - if ((strncmp((char *)walrcv->lastRecoveredBarrierId, (char *)walrcv->recoveryTargetBarrierId, - BARRIER_ID_WITHOUT_TIMESTAMP_LEN) < 0 || - strcmp((char *)walrcv->lastRecoveredBarrierId + BARRIER_ID_WITHOUT_TIMESTAMP_LEN + 1, - (char *)walrcv->recoveryTargetBarrierId + BARRIER_ID_WITHOUT_TIMESTAMP_LEN + 1) < 0) || - strcmp((char *)walrcv->lastRecoveredBarrierId, (char *)walrcv->recoveryStopBarrierId) == 0 || - strcmp((char *)walrcv->lastRecoveredBarrierId, (char *)walrcv->recoverySwitchoverBarrierId) == 0) { + /* Nothing to do */ + XLogRecPtr barrierLSN = record->EndRecPtr; + char* barrierId = XLogRecGetData(record); + if (IS_HADR_BARRIER(barrierId) && IS_DISASTER_RECOVER_MODE) { + ereport(WARNING, (errmsg("The HADR barrier %s is not for streaming standby cluster", barrierId))); + return; + } + SpinLockAcquire(&walrcv->mutex); + if (BARRIER_LE(barrierId, (char *)walrcv->lastRecoveredBarrierId)) { + ereport(WARNING, (errmodule(MOD_REDO), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("The new redo barrier is smaller than the last one."))); + } + SpinLockRelease(&walrcv->mutex); + + if ((strncmp((barrierId + BARRIER_ID_WITHOUT_TIMESTAMP_LEN), + HADR_SWITCHOVER_BARRIER_TAIL, strlen(HADR_SWITCHOVER_BARRIER_TAIL)) == 0) && + (info == XLOG_BARRIER_SWITCHOVER)) { + walrcv->lastSwitchoverBarrierLSN = barrierLSN; + ereport(LOG, (errmsg("GET SWITCHOVER BARRIER <%s>, LSN <%X/%X>", barrierId, + (uint32)(walrcv->lastSwitchoverBarrierLSN >> 32), + (uint32)(walrcv->lastSwitchoverBarrierLSN)))); + } + + SpinLockAcquire(&walrcv->mutex); + walrcv->lastRecoveredBarrierLSN = barrierLSN; + rc = strncpy_s((char *)walrcv->lastRecoveredBarrierId, MAX_BARRIER_ID_LENGTH, barrierId, MAX_BARRIER_ID_LENGTH - 1); + securec_check_ss(rc, "\0", "\0"); + SpinLockRelease(&walrcv->mutex); + + if (info == XLOG_BARRIER_CREATE) { + WriteBarrierLSNFile(barrierLSN, barrierId); + } + + if (!GTM_FREE_MODE && IS_CSN_BARRIER(barrierId)) { + CommitSeqNo csn = CsnBarrierNameGetCsn(barrierId); + UpdateXLogMaxCSN(csn); + if (t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo < csn + 1) + t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo = csn + 1; + } + + if (!IS_DISASTER_RECOVER_MODE || XLogRecPtrIsInvalid(t_thrd.xlog_cxt.minRecoveryPoint) || + XLByteLT(barrierLSN, t_thrd.xlog_cxt.minRecoveryPoint) || + t_thrd.shemem_ptr_cxt.ControlFile->backupEndRequired) { + return; + } + + if (g_instance.csn_barrier_cxt.barrier_hash_table != NULL) { + LWLockAcquire(g_instance.csn_barrier_cxt.barrier_hashtbl_lock, LW_EXCLUSIVE); + BarrierCacheDeleteBarrierId(barrierId); + LWLockRelease(g_instance.csn_barrier_cxt.barrier_hashtbl_lock); + ereport(LOG, (errmsg("remove barrierID %s from hash table", barrierId))); + } + + SetXLogReplayRecPtr(record->ReadRecPtr, record->EndRecPtr); + CheckRecoveryConsistency(); + UpdateMinRecoveryPoint(barrierLSN, false); + + barrier_redo_pause(barrierId); +#else + int rc = 0; + volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + /* Nothing to do */ + XLogRecPtr barrierLSN = record->EndRecPtr; + char* barrierId = XLogRecGetData(record); + uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; + if (info == XLOG_BARRIER_COMMIT) + return; + if (BARRIER_LE(barrierId, (char *)walrcv->lastRecoveredBarrierId)) { + ereport(WARNING, (errmodule(MOD_REDO), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("The new redo barrier is smaller than the last one."))); + } + + if (BARRIER_EQ(barrierId, HADR_SWITCHOVER_BARRIER_ID) && (info == XLOG_BARRIER_SWITCHOVER)) { + walrcv->lastSwitchoverBarrierLSN = barrierLSN; + ereport(LOG, (errmsg("GET SWITCHOVER BARRIER <%s>, LSN <%X/%X>", barrierId, + (uint32)(walrcv->lastSwitchoverBarrierLSN >> 32), + (uint32)(walrcv->lastSwitchoverBarrierLSN)))); + } + SpinLockAcquire(&walrcv->mutex); + walrcv->lastRecoveredBarrierLSN = barrierLSN; + rc = strncpy_s((char *)walrcv->lastRecoveredBarrierId, MAX_BARRIER_ID_LENGTH, barrierId, MAX_BARRIER_ID_LENGTH - 1); + securec_check_ss(rc, "\0", "\0"); + SpinLockRelease(&walrcv->mutex); +#endif +} + +bool is_barrier_pausable(const char* id) +{ + if (IS_CSN_BARRIER(id)) + return true; + else + return false; +} + +#ifdef ENABLE_MULTIPLE_NODES + + +static void SaveAllNodeBarrierLsnInfo(const char* id, const PGXCNodeAllHandles* connHandles) +{ + int conn; + for (conn = 0; conn < connHandles->co_conn_count + connHandles->dn_conn_count; conn++) { + PGXCNodeHandle* handle = NULL; + if (conn < connHandles->co_conn_count) + handle = connHandles->coord_handles[conn]; + else + handle = connHandles->datanode_handles[conn - connHandles->co_conn_count]; + + if (handle == NULL || handle->inBuffer == NULL) { + ereport(WARNING, (errmsg("SaveAllNodeBarrierLsnInfo get handle is NULL, conn: %d", conn))); + g_instance.archive_obs_cxt.barrier_lsn_info[conn].barrierLsn = 0; + g_instance.archive_obs_cxt.barrier_lsn_info[conn].nodeoid = 0; + break; + } + + char* lsn = handle->inBuffer + 5 + strlen(id) + 1; + g_instance.archive_obs_cxt.barrier_lsn_info[conn].barrierLsn = atolsn(lsn); + g_instance.archive_obs_cxt.barrier_lsn_info[conn].nodeoid = handle->nodeoid; + } + errno_t errorno = memcpy_s(g_instance.archive_obs_cxt.barrierName, MAX_BARRIER_ID_LENGTH, id, strlen(id) + 1); + securec_check(errorno, "\0", "\0"); +} + +void barrier_desc(StringInfo buf, uint8 xl_info, char* rec) +{ + Assert(xl_info == XLOG_BARRIER_CREATE); + appendStringInfo(buf, "BARRIER %s", rec); +} + +static void SendBarrierRequestToCns(PGXCNodeAllHandles* coord_handles, const char* id, const char* cmd, char type) +{ + int conn; + int msglen; + int barrier_idlen; + errno_t rc; + + /* Ensure that get all coordinators cnt not include current node itself */ + if (u_sess->pgxc_cxt.NumCoords == coord_handles->co_conn_count) { + ereport(ERROR, + (errcode(ERRCODE_OPERATE_FAILED), + errmsg("Failed to send %s request" + "get all cn_conn: %d", cmd, + coord_handles->co_conn_count))); + } + + for (conn = 0; conn < coord_handles->co_conn_count; conn++) { + PGXCNodeHandle* handle = coord_handles->coord_handles[conn]; + + /* Invalid connection state, return error */ + if (handle->state != DN_CONNECTION_STATE_IDLE) { + ereport(ERROR, + (errcode(ERRCODE_OPERATE_FAILED), + errmsg("Failed to send %s request " + "to the node", cmd))); + } + + barrier_idlen = strlen(id) + 1; + + msglen = 4; /* for the length itself */ + msglen += barrier_idlen; + msglen += 1; /* for barrier command itself */ + + /* msgType + msgLen */ + ensure_out_buffer_capacity(1 + msglen, handle); + + Assert(handle->outBuffer != NULL); + handle->outBuffer[handle->outEnd++] = 'b'; + msglen = htonl(msglen); + rc = memcpy_s(handle->outBuffer + handle->outEnd, handle->outSize - handle->outEnd, &msglen, sizeof(int)); + securec_check(rc, "\0", "\0"); + handle->outEnd += 4; + + handle->outBuffer[handle->outEnd++] = type; + + rc = memcpy_s(handle->outBuffer + handle->outEnd, handle->outSize - handle->outEnd, id, barrier_idlen); + securec_check(rc, "\0", "\0"); + handle->outEnd += barrier_idlen; + + handle->state = DN_CONNECTION_STATE_QUERY; + + pgxc_node_flush(handle); + } +} + +static void CheckBarrierCommandStatus(PGXCNodeAllHandles* conn_handles, const char* id, const char* command, bool isCn, + bool isSwitchoverBarrier) +{ + int conn; + int count = isCn? conn_handles->co_conn_count : conn_handles->dn_conn_count; + RemoteQueryState* combiner = NULL; + struct timeval timeout; + timeout.tv_sec = ERROR_CHECK_TIMEOUT; + timeout.tv_usec = 0; + ereport(DEBUG1, (errmsg("Check CREATE BARRIER <%s> %s command status", id, command))); + + combiner = CreateResponseCombiner(count, COMBINE_TYPE_NONE); + + for (conn = 0; conn < count; conn++) { + PGXCNodeHandle* handle = NULL; + + if (isCn) + handle = conn_handles->coord_handles[conn]; + else + handle = conn_handles->datanode_handles[conn]; + + if (pgxc_node_receive(1, &handle, (isCn && !isSwitchoverBarrier)? &timeout : NULL)) + ereport( + ERROR, (errcode(ERRCODE_OPERATE_FAILED), errmsg("Failed to receive response from the remote side"))); + if (handle_response(handle, combiner) != RESPONSE_BARRIER_OK) + ereport(ERROR, + (errcode(ERRCODE_OPERATE_FAILED), + errmsg("CREATE BARRIER command %s failed with error %s", command, handle->error))); + } + CloseCombiner(combiner); + + ereport(DEBUG1, + (errmsg("Successfully completed CREATE BARRIER <%s> %s command on " + "all nodes", + id, + command))); +} + +static bool TryBarrierLockWithTimeout() +{ + bool getLock = false; + if (LWLockHeldByMe(BarrierLock)) { + getLock = true; + } else { + TimestampTz start_time = GetCurrentTimestamp(); + do { + if(LWLockConditionalAcquire(BarrierLock, LW_EXCLUSIVE)) { + getLock = true; + break; + } + pg_usleep(1000L); + } while (ComputeTimeStamp(start_time) < BARRIER_LOCK_TIMEOUT_MS); + } + return getLock; +} + +/* + * Prepare all Coordinators for barrier. During this step all the Coordinators + * are informed to suspend any new 2PC transactions. The Coordinators should + * disable new 2PC transactions and then wait for the existing transactions to + * complete. Once all "in-flight" 2PC transactions are over, the Coordinators + * respond back. + * + * That completes the first step in barrier generation + * + * Any errors will be reported via ereport. + */ +static void PrepareBarrier(PGXCNodeAllHandles* coord_handles, const char* id, bool isSwitchoverBarrier) +{ + ereport(DEBUG1, (errmsg("Preparing Coordinators for BARRIER"))); + /* + * Send a CREATE BARRIER PREPARE message to all the Coordinators. We should + * send an asynchronous request so that we can disable local commits and + * then wait for the remote Coordinators to finish the work + */ + SendBarrierRequestToCns(coord_handles, id, isSwitchoverBarrier? "CREATE SWITCHOVER BARRIER PREPARE" : + "CREATE BARRIER PREPARE", isSwitchoverBarrier? CREATE_SWITCHOVER_BARRIER_PREPARE : CREATE_BARRIER_PREPARE); + + /* + * Disable local commits + */ + + if (isSwitchoverBarrier) { + if (!LWLockHeldByMe(BarrierLock)) + LWLockAcquire(BarrierLock, LW_EXCLUSIVE); + } else { + if (!TryBarrierLockWithTimeout()) + ereport(ERROR, + (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), + errmsg("Wait Barrier lock timeout barrierId:%s", id))); + } + + ereport(DEBUG2, (errmsg("Disabled 2PC commits originating at the driving Coordinator"))); + + /* + * future Start a timer to cancel the barrier request in case of a timeout + */ + + /* + * Local in-flight commits are now over. Check status of the remote + * Coordinators + */ + CheckBarrierCommandStatus(coord_handles, id, "PREPARE", true, isSwitchoverBarrier); +} + + +static void ExecuteBarrierOnNodes(const char* id, bool isSwitchoverBarrier, PGXCNodeAllHandles* conn_handles, bool isCn) +{ + int msglen; + int barrier_idlen; + errno_t rc = 0; + /* + * Send a CREATE BARRIER request to all nodes + */ + int handleNum = isCn ? conn_handles->co_conn_count : conn_handles->dn_conn_count; + for (int conn = 0; conn < handleNum; conn++) { + PGXCNodeHandle* handle = NULL; + + if (isCn) + handle = conn_handles->coord_handles[conn]; + else + handle = conn_handles->datanode_handles[conn]; + + /* Invalid connection state, return error */ + if (handle->state != DN_CONNECTION_STATE_IDLE) + ereport(ERROR, + (errcode(ERRCODE_OPERATE_FAILED), + errmsg("Failed to send CREATE BARRIER EXECUTE request " + "to the node"))); + + barrier_idlen = strlen(id) + 1; + + msglen = 4; /* for the length itself */ + msglen += barrier_idlen; + msglen += 1; /* for barrier command itself */ + + /* msgType + msgLen */ + ensure_out_buffer_capacity(1 + msglen, handle); + + Assert(handle->outBuffer != NULL); + handle->outBuffer[handle->outEnd++] = 'b'; + msglen = htonl(msglen); + rc = memcpy_s(handle->outBuffer + handle->outEnd, handle->outSize - handle->outEnd, &msglen, sizeof(int)); + securec_check(rc, "\0", "\0"); + handle->outEnd += 4; + + if (isSwitchoverBarrier) { + handle->outBuffer[handle->outEnd++] = CREATE_SWITCHOVER_BARRIER_EXECUTE; + } else { + handle->outBuffer[handle->outEnd++] = CREATE_BARRIER_EXECUTE; + } + + rc = memcpy_s(handle->outBuffer + handle->outEnd, handle->outSize - handle->outEnd, id, barrier_idlen); + securec_check(rc, "\0", "\0"); + handle->outEnd += barrier_idlen; + + handle->state = DN_CONNECTION_STATE_QUERY; + pgxc_node_flush(handle); + } + + CheckBarrierCommandStatus(conn_handles, id, "EXECUTE", isCn, isSwitchoverBarrier); +} + + +/* + * Execute the barrier command on all the components, including Datanodes and + * Coordinators. + */ +static void ExecuteBarrier(const char* id, bool isSwitchoverBarrier) +{ + List* barrierDataNodeList = GetAllDataNodes(); + List* barrierCoordList = GetAllCoordNodes(); + PGXCNodeAllHandles* conn_handles = NULL; + XLogRecPtr recptr; + char* log_id = (char*)id; + int connCnt; + int rc; + conn_handles = get_handles(barrierDataNodeList, barrierCoordList, false); + connCnt = conn_handles->co_conn_count + conn_handles->dn_conn_count; + + ereport(DEBUG1, + (errmsg("Sending CREATE BARRIER <%s> EXECUTE message to " + "Datanodes and Coordinator", + id))); + // first write barrier xlog to all dns + ExecuteBarrierOnNodes(id, isSwitchoverBarrier, conn_handles, false); + // then write barrier xlog to all other cns + ExecuteBarrierOnNodes(id, isSwitchoverBarrier, conn_handles, true); + + if (unlikely(t_thrd.proc->workingVersionNum < BACKUP_SLOT_VERSION_NUM)) { + replace_barrier_id_compatible(id, &log_id); + } + + if (IS_CSN_BARRIER(id)) { + CommitSeqNo csn = CsnBarrierNameGetCsn(id); + UpdateNextMaxKnownCSN(csn); + } + + if (isSwitchoverBarrier == true) { + ereport(LOG, (errmsg("Sending DISASTER RECOVERY SWITCHOVER BARRIER:<%s>.", id))); + // The access of all users is not blocked temporarily. + /* + * Force a checkpoint before starting the switchover. This will force dirty + * buffers out to disk, to ensure source database is up-to-date on disk + */ + RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT); + } + + /* + * Also WAL log the BARRIER locally and flush the WAL buffers to disk + */ + XLogBeginInsert(); + XLogRegisterData((char*)log_id, strlen(log_id) + 1); + + if (u_sess->attr.attr_storage.enable_cbm_tracking && IS_ROACH_BARRIER(id)) { + LWLockAcquire(CBMParseXlogLock, LW_EXCLUSIVE); + } + recptr = XLogInsert(RM_BARRIER_ID, isSwitchoverBarrier? XLOG_BARRIER_SWITCHOVER : XLOG_BARRIER_CREATE, + InvalidBktId); + XLogWaitFlush(recptr); + + if (IS_CSN_BARRIER(id) && !isSwitchoverBarrier) { + SpinLockAcquire(&g_instance.streaming_dr_cxt.mutex); + rc = strncpy_s((char *)g_instance.streaming_dr_cxt.currentBarrierId, MAX_BARRIER_ID_LENGTH, + id, MAX_BARRIER_ID_LENGTH - 1); + securec_check(rc, "\0", "\0"); + SpinLockRelease(&g_instance.streaming_dr_cxt.mutex); + ereport(DEBUG4, (errmodule(MOD_RTO_RPO), errmsg("refresh currentBarrier, barrier id %s", id))); + } + + if (IS_ROACH_BARRIER(id)) { + if (u_sess->attr.attr_storage.enable_cbm_tracking) { + (void)ForceTrackCBMOnce(recptr, 0, false, true); + } + WriteBarrierLSNFile(recptr, id); + } + + /* Only obs-based disaster recovery needs the following processing */ + if (g_instance.archive_obs_cxt.archive_slot_num != 0 && g_instance.archive_obs_cxt.barrier_lsn_info != NULL) { + if (IS_HADR_BARRIER(id) || IS_CSN_BARRIER(id)) { + SpinLockAcquire(&g_instance.archive_obs_cxt.barrier_lock); + SaveAllNodeBarrierLsnInfo(id, conn_handles); + g_instance.archive_obs_cxt.barrier_lsn_info[connCnt].barrierLsn = recptr; + SpinLockRelease(&g_instance.archive_obs_cxt.barrier_lock); + } + + if (t_thrd.role == BARRIER_CREATOR && !isSwitchoverBarrier) { + UpdateGlobalBarrierListOnMedia(id, g_instance.attr.attr_common.PGXCNodeName); + } + } + + if (isSwitchoverBarrier) { + g_instance.streaming_dr_cxt.switchoverBarrierLsn = recptr; + } + + list_free(barrierCoordList); + list_free(barrierDataNodeList); + pfree_pgxc_all_handles(conn_handles); + if (unlikely(log_id != id)) { + pfree_ext(log_id); + } +} + +void CleanupBarrierLock() +{ + List* barrierCoordList = GetAllCoordNodes(); + PGXCNodeAllHandles* conn_handles = NULL; + char* id = "cleanup"; + + conn_handles = get_handles(NULL, barrierCoordList, false); + + if (LWLockHeldByMe(BarrierLock)) + LWLockRelease(BarrierLock); + + SendBarrierRequestToCns(conn_handles, id, "CREATE BARRIER CLEANUP", CREATE_BARRIER_END); + + CheckBarrierCommandStatus(conn_handles, id, "CLEANUP", true); + pfree_pgxc_all_handles(conn_handles); +} + + +static void CommitBarrier(PGXCNodeAllHandles* prepared_handles, const char* id) +{ + SendBarrierRequestToCns(prepared_handles, id, "CREATE BARRIER COMMIT", CREATE_BARRIER_COMMIT); + CheckBarrierCommandStatus(prepared_handles, id, "COMMIT", true); + XLogBeginInsert(); + XLogRegisterData((char*)id, strlen(id) + 1); + + XLogRecPtr recptr = XLogInsert(RM_BARRIER_ID, XLOG_BARRIER_COMMIT, InvalidBktId); + XLogWaitFlush(recptr); +} + +/* + * Resume 2PC commits on the local as well as remote Coordinators. + */ +static void EndBarrier(PGXCNodeAllHandles* prepared_handles, const char* id, bool isSwitchoverBarrier) +{ + /* Resume 2PC locally */ + LWLockRelease(BarrierLock); + + SendBarrierRequestToCns(prepared_handles, id, "CREATE BARRIER END", CREATE_BARRIER_END); + + CheckBarrierCommandStatus(prepared_handles, id, "END", true, isSwitchoverBarrier); +} + +static void WriteBarrierLSNFile(XLogRecPtr barrier_lsn, const char* barrier_id) +{ + char filename[MAXPGPATH] = {0}; + const char *prefix = NULL; + errno_t errorno = EOK; + FILE* fp = NULL; + + if (strncmp(barrier_id, ROACH_FULL_BAK_PREFIX, strlen(ROACH_FULL_BAK_PREFIX)) == 0) { + prefix = ROACH_FULL_BAK_PREFIX; + } else if (strncmp(barrier_id, ROACH_INC_BAK_PREFIX, strlen(ROACH_INC_BAK_PREFIX)) == 0) { + prefix = ROACH_INC_BAK_PREFIX; + } else { + return; + } + + errorno = snprintf_s(filename, sizeof(filename), sizeof(filename) - 1, "%s.%s", BARRIER_LSN_FILE, prefix); + securec_check_ss(errorno, "\0", "\0"); + + fp = AllocateFile(filename, PG_BINARY_W); + + if (fp == NULL) + ereport(ERROR, (errcode_for_file_access(), errmsg("could not create file \"%s\": %m", filename))); + + if (fprintf(fp, "%08X/%08X", (uint32)(barrier_lsn >> 32), (uint32)barrier_lsn) != BARRIER_LSN_FILE_LENGTH || + fflush(fp) != 0 || pg_fsync(fileno(fp)) != 0 || ferror(fp) || FreeFile(fp)) + ereport(ERROR, (errcode_for_file_access(), errmsg("could not write file \"%s\": %m", filename))); +} + +/* + * v5r1c20 barrier id: gs_roach_full_backupkey/gs_roach_inc_backupkey + * before: roach_barrier_backupkey + * To be compatible, during upgrade to v5r1c20, we still use old barrier name. + */ +void replace_barrier_id_compatible(const char* id, char** log_id) { + const char *prefix = NULL; + char *tmp_id = NULL; + int rc; + int len; + + if (strncmp(id, ROACH_FULL_BAK_PREFIX, strlen(ROACH_FULL_BAK_PREFIX)) == 0) { + prefix = ROACH_FULL_BAK_PREFIX; + } else if (strncmp(id, ROACH_INC_BAK_PREFIX, strlen(ROACH_INC_BAK_PREFIX)) == 0) { + prefix = ROACH_INC_BAK_PREFIX; + } else { + return; + } + + len = strlen(id) + strlen("roach_barrier"); + tmp_id = (char *)palloc0(len); + rc = snprintf_s(tmp_id, len, len - 1, "%s%s", "roach_barrier", id + strlen(prefix)); + securec_check_ss(rc, "", ""); + + *log_id = tmp_id; +} + +static void RequestXLogFromStream() +{ + XLogRecPtr replayEndPtr = GetXLogReplayRecPtr(NULL); + if (t_thrd.xlog_cxt.is_cascade_standby && (CheckForSwitchoverTrigger() || CheckForFailoverTrigger())) { + HandleCascadeStandbyPromote(&replayEndPtr); + return; + } + if (!WalRcvInProgress() && g_instance.pid_cxt.WalReceiverPID == 0) { + volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + SpinLockAcquire(&walrcv->mutex); + walrcv->receivedUpto = 0; + SpinLockRelease(&walrcv->mutex); + if (t_thrd.xlog_cxt.readFile >= 0) { + close(t_thrd.xlog_cxt.readFile); + t_thrd.xlog_cxt.readFile = -1; + } + + RequestXLogStreaming(&replayEndPtr, t_thrd.xlog_cxt.PrimaryConnInfo, REPCONNTARGET_PRIMARY, + u_sess->attr.attr_storage.PrimarySlotName); + } + +} + +static void barrier_redo_pause(char* barrierId) +{ + if (!is_barrier_pausable(barrierId)) { + return; + } + volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + + while (true) { + SpinLockAcquire(&walrcv->mutex); + if (BARRIER_LT((char *)walrcv->lastRecoveredBarrierId, (char *)walrcv->recoveryTargetBarrierId) || + BARRIER_LE((char *)walrcv->lastRecoveredBarrierId, (char *)walrcv->recoveryStopBarrierId)|| + BARRIER_EQ((char *)walrcv->lastRecoveredBarrierId, (char *)walrcv->recoverySwitchoverBarrierId)) { + walrcv->isPauseByTargetBarrier = false; + SpinLockRelease(&walrcv->mutex); break; } else { + walrcv->isPauseByTargetBarrier = true; + SpinLockRelease(&walrcv->mutex); pg_usleep(1000L); - update_recovery_barrier(); + RedoInterruptCallBack(); + if(IS_OBS_DISASTER_RECOVER_MODE) { + update_recovery_barrier(); + } else if (IS_DISASTER_RECOVER_MODE) { + RequestXLogFromStream(); + } ereport(DEBUG4, ((errmodule(MOD_REDO), errcode(ERRCODE_LOG), errmsg("Sleeping to get a new target global barrier %s;" "lastRecoveredBarrierId is %s; lastRecoveredBarrierLSN is %X/%X;" @@ -331,359 +1056,4 @@ static void barrier_redo_pause() } } } - -void barrier_redo(XLogReaderState* record) -{ - ereport(LOG, (errmsg("barrier_redo begin."))); - int rc = 0; - volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; - /* Nothing to do */ - XLogRecPtr barrierLSN = record->EndRecPtr; - char* barrierId = XLogRecGetData(record); - - if (strcmp(barrierId, HADR_SWITCHOVER_BARRIER_ID) == 0) { - walrcv->lastSwitchoverBarrierLSN = barrierLSN; - ereport(LOG, (errmsg("GET SWITCHOVER BARRIER <%s>, LSN <%X/%X>", barrierId, - (uint32)(walrcv->lastSwitchoverBarrierLSN >> 32), - (uint32)(walrcv->lastSwitchoverBarrierLSN)))); - return; - } - - walrcv->lastRecoveredBarrierLSN = barrierLSN; - if (strcmp(barrierId, (char *)walrcv->lastRecoveredBarrierId) <= 0) { - ereport(WARNING, (errmodule(MOD_REDO), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("The new redo barrier is smaller than the last one."))); - } - - rc = strncpy_s((char *)walrcv->lastRecoveredBarrierId, MAX_BARRIER_ID_LENGTH, barrierId, MAX_BARRIER_ID_LENGTH - 1); - securec_check_ss(rc, "\0", "\0"); - - if (XLByteLE(barrierLSN, t_thrd.xlog_cxt.minRecoveryPoint)) { - return; - } - UpdateMinRecoveryPoint(barrierLSN, false); - barrier_redo_pause(); - return; -} - -#ifdef ENABLE_MULTIPLE_NODES -// 我们删除了的pg源码, 或者编译有问题的死代码 - -static const char* generate_barrier_id(const char* id) -{ - static const int LEN_GEN_ID = 1024; - char genid[LEN_GEN_ID]; - TimestampTz ts; - - /* - * If the caller can passed a NULL value, generate an id which is - * guaranteed to be unique across the cluster. We use a combination of - * the Coordinator node id and current timestamp. - */ - - if (id) - return id; - - ts = GetCurrentTimestamp(); -#ifdef HAVE_INT64_TIMESTAMP - int rc = snprintf_s(genid, LEN_GEN_ID, LEN_GEN_ID - 1, "%s_" INT64_FORMAT, PGXCNodeName, ts); -#else - int rc = snprintf_s(genid, LEN_GEN_ID, LEN_GEN_ID - 1, "%s_%.0f", PGXCNodeName, ts); #endif - securec_check_ss(rc, "", ""); - return pstrdup(genid); -} - -void barrier_desc(StringInfo buf, uint8 xl_info, char* rec) -{ - Assert(xl_info == XLOG_BARRIER_CREATE); - appendStringInfo(buf, "BARRIER %s", rec); -} - -static PGXCNodeAllHandles* SendBarrierPrepareRequest(List* coords, const char* id) -{ - PGXCNodeAllHandles* coord_handles; - int conn; - int msglen; - int barrier_idlen; - - coord_handles = get_handles(NIL, coords, true); - - for (conn = 0; conn < coord_handles->co_conn_count; conn++) { - PGXCNodeHandle* handle = coord_handles->coord_handles[conn]; - - /* Invalid connection state, return error */ - if (handle->state != DN_CONNECTION_STATE_IDLE) - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Failed to send CREATE BARRIER PREPARE request " - "to the node"))); - - barrier_idlen = strlen(id) + 1; - - msglen = 4; /* for the length itself */ - msglen += barrier_idlen; - msglen += 1; /* for barrier command itself */ - - /* msgType + msgLen */ - if (ensure_out_buffer_capacity(handle->outEnd + 1 + msglen, handle) != 0) { - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("Out of memory"))); - } - - handle->outBuffer[handle->outEnd++] = 'b'; - msglen = htonl(msglen); - int rc = memcpy_s(handle->outBuffer + handle->outEnd, handle->outSize - handle->outEnd, &msglen, 4); - securec_check(rc, "\0", "\0"); - handle->outEnd += 4; - - handle->outBuffer[handle->outEnd++] = CREATE_BARRIER_PREPARE; - - rc = memcpy_s(handle->outBuffer + handle->outEnd, handle->outSize - handle->outEnd, id, barrier_idlen); - securec_check(rc, "\0", "\0"); - handle->outEnd += barrier_idlen; - - handle->state = DN_CONNECTION_STATE_QUERY; - - pgxc_node_flush(handle); - } - - return coord_handles; -} - -static void CheckBarrierCommandStatus(PGXCNodeAllHandles* conn_handles, const char* id, const char* command) -{ - int conn; - int count = conn_handles->co_conn_count + conn_handles->dn_conn_count; - - elog(DEBUG1, "Check CREATE BARRIER <%s> %s command status", id, command); - - for (conn = 0; conn < count; conn++) { - PGXCNodeHandle* handle = NULL; - - if (conn < conn_handles->co_conn_count) - handle = conn_handles->coord_handles[conn]; - else - handle = conn_handles->datanode_handles[conn - conn_handles->co_conn_count]; - - if (pgxc_node_receive(1, &handle, NULL)) - ereport( - ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("Failed to receive response from the remote side"))); - - if (handle_response(handle, NULL) != RESPONSE_BARRIER_OK) - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("CREATE BARRIER PREPARE command failed " - "with error %s", - handle->error))); - } - - elog(DEBUG1, - "Successfully completed CREATE BARRIER <%s> %s command on " - "all nodes", - id, - command); -} - -static void SendBarrierEndRequest(PGXCNodeAllHandles* coord_handles, const char* id) -{ - int conn; - int msglen; - int barrier_idlen; - - elog(DEBUG1, "Sending CREATE BARRIER <%s> END command to all Coordinators", id); - - for (conn = 0; conn < coord_handles->co_conn_count; conn++) { - PGXCNodeHandle* handle = coord_handles->coord_handles[conn]; - - /* Invalid connection state, return error */ - if (handle->state != DN_CONNECTION_STATE_IDLE) - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Failed to send CREATE BARRIER PREPARE request " - "to the node"))); - - barrier_idlen = strlen(id) + 1; - - msglen = 4; /* for the length itself */ - msglen += barrier_idlen; - msglen += 1; /* for barrier command itself */ - - /* msgType + msgLen */ - if (ensure_out_buffer_capacity(handle->outEnd + 1 + msglen, handle) != 0) { - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("Out of memory"))); - } - - handle->outBuffer[handle->outEnd++] = 'b'; - msglen = htonl(msglen); - int rc = memcpy_s(handle->outBuffer + handle->outEnd, handle->outSize - handle->outEnd, &msglen, 4); - securec_check(rc, "\0", "\0"); - handle->outEnd += 4; - - handle->outBuffer[handle->outEnd++] = CREATE_BARRIER_END; - - rc = memcpy_s(handle->outBuffer + handle->outEnd, handle->outSize - handle->outEnd, id, barrier_idlen); - securec_check(rc, "\0", "\0"); - handle->outEnd += barrier_idlen; - - handle->state = DN_CONNECTION_STATE_QUERY; - pgxc_node_flush(handle); - } -} - -/* - * Prepare all Coordinators for barrier. During this step all the Coordinators - * are informed to suspend any new 2PC transactions. The Coordinators should - * disable new 2PC transactions and then wait for the existing transactions to - * complete. Once all "in-flight" 2PC transactions are over, the Coordinators - * respond back. - * - * That completes the first step in barrier generation - * - * Any errors will be reported via ereport. - */ -static PGXCNodeAllHandles* PrepareBarrier(const char* id) -{ - PGXCNodeAllHandles* coord_handles = NULL; - - elog(DEBUG1, "Preparing Coordinators for BARRIER"); - - /* - * Send a CREATE BARRIER PREPARE message to all the Coordinators. We should - * send an asynchronous request so that we can disable local commits and - * then wait for the remote Coordinators to finish the work - */ - coord_handles = SendBarrierPrepareRequest(GetAllCoordNodes(), id); - - /* - * Disable local commits - */ - LWLockAcquire(BarrierLock, LW_EXCLUSIVE); - - elog(DEBUG2, "Disabled 2PC commits originating at the driving Coordinator"); - - /* - * TODO Start a timer to cancel the barrier request in case of a timeout - */ - - /* - * Local in-flight commits are now over. Check status of the remote - * Coordinators - */ - CheckBarrierCommandStatus(coord_handles, id, "PREPARE"); - - return coord_handles; -} - -/* - * Execute the barrier command on all the components, including Datanodes and - * Coordinators. - */ -static void ExecuteBarrier(const char* id) -{ - List* barrierDataNodeList = GetAllDataNodes(); - List* barrierCoordList = GetAllCoordNodes(); - PGXCNodeAllHandles* conn_handles; - int conn; - int msglen; - int barrier_idlen; - - conn_handles = get_handles(barrierDataNodeList, barrierCoordList, false); - - elog(DEBUG1, - "Sending CREATE BARRIER <%s> EXECUTE message to " - "Datanodes and Coordinator", - id); - /* - * Send a CREATE BARRIER request to all the Datanodes and the Coordinators - */ - for (conn = 0; conn < conn_handles->co_conn_count + conn_handles->dn_conn_count; conn++) { - PGXCNodeHandle* handle = NULL; - - if (conn < conn_handles->co_conn_count) - handle = conn_handles->coord_handles[conn]; - else - handle = conn_handles->datanode_handles[conn - conn_handles->co_conn_count]; - - /* Invalid connection state, return error */ - if (handle->state != DN_CONNECTION_STATE_IDLE) - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Failed to send CREATE BARRIER EXECUTE request " - "to the node"))); - - barrier_idlen = strlen(id) + 1; - - msglen = 4; /* for the length itself */ - msglen += barrier_idlen; - msglen += 1; /* for barrier command itself */ - - /* msgType + msgLen */ - if (ensure_out_buffer_capacity(handle->outEnd + 1 + msglen, handle) != 0) { - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("Out of memory"))); - } - - handle->outBuffer[handle->outEnd++] = 'b'; - msglen = htonl(msglen); - int rc = memcpy_s(handle->outBuffer + handle->outEnd, handle->outSize - handle->outEnd, &msglen, 4); - securec_check(rc, "\0", "\0"); - handle->outEnd += 4; - - handle->outBuffer[handle->outEnd++] = CREATE_BARRIER_EXECUTE; - - rc = memcpy_s(handle->outBuffer + handle->outEnd, handle->outSize - handle->outEnd, id, barrier_idlen); - securec_check(rc, "\0", "\0"); - handle->outEnd += barrier_idlen; - - handle->state = DN_CONNECTION_STATE_QUERY; - pgxc_node_flush(handle); - } - - CheckBarrierCommandStatus(conn_handles, id, "EXECUTE"); - - pfree_pgxc_all_handles(conn_handles); - - /* - * Also WAL log the BARRIER locally and flush the WAL buffers to disk - */ - { - XLogRecData rdata[1]; - XLogRecPtr recptr; - - rdata[0].data = (char*)id; - rdata[0].len = strlen(id) + 1; - rdata[0].buffer = InvalidBuffer; - rdata[0].next = NULL; - - recptr = XLogInsert(RM_BARRIER_ID, XLOG_BARRIER_CREATE, rdata); - XLogWaitFlush(recptr); - } -} - -/* - * Resume 2PC commits on the local as well as remote Coordinators. - */ -static void EndBarrier(PGXCNodeAllHandles* prepared_handles, const char* id) -{ - /* Resume 2PC locally */ - LWLockRelease(BarrierLock); - - SendBarrierEndRequest(prepared_handles, id); - - CheckBarrierCommandStatus(prepared_handles, id, "END"); -} - -static void WriteBarrierLSNFile(XLogRecPtr barrierLSN) -{ - FILE* fp = NULL; - - fp = AllocateFile(BARRIER_LSN_FILE, PG_BINARY_W); - if (fp == NULL) - ereport(ERROR, (errcode_for_file_access(), errmsg("could not create file \"%s\": %m", BARRIER_LSN_FILE))); - - if (fprintf(fp, "%08X/%08X", (uint32)(barrierLSN >> 32), (uint32)barrierLSN) != BARRIER_LSN_FILE_LENGTH || - fflush(fp) != 0 || pg_fsync(fileno(fp)) != 0 || ferror(fp) || FreeFile(fp)) - ereport(ERROR, (errcode_for_file_access(), errmsg("could not write file \"%s\": %m", BARRIER_LSN_FILE))); -} - -#endif - diff --git a/src/common/backend/pgxc_single/copy/remotecopy.cpp b/src/common/backend/pgxc_single/copy/remotecopy.cpp index 197974565..e84d7f47b 100644 --- a/src/common/backend/pgxc_single/copy/remotecopy.cpp +++ b/src/common/backend/pgxc_single/copy/remotecopy.cpp @@ -68,8 +68,8 @@ void RemoteCopy_GetRelationLoc(RemoteCopyData* state, Relation rel, List* attnum } } - state->idx_dist_by_col = -1; - if (state->rel_loc && state->rel_loc->partAttrNum != 0) { + state->idx_dist_by_col = NULL; + if (state->rel_loc && state->rel_loc->partAttrNum != NULL) { /* * Find the column used as key for data distribution. * First scan attributes of tuple descriptor with the list @@ -79,15 +79,19 @@ void RemoteCopy_GetRelationLoc(RemoteCopyData* state, Relation rel, List* attnum */ if (attnums != NIL) { ListCell* cur = NULL; - foreach (cur, attnums) { + foreach (cur, state->rel_loc->partAttrNum) { int attnum = lfirst_int(cur); - if (state->rel_loc->partAttrNum == attnum) { - state->idx_dist_by_col = attnum - 1; - break; + if (list_member_int(attnums, attnum)) { + state->idx_dist_by_col = lappend_int(state->idx_dist_by_col, attnum - 1); } } } else { - state->idx_dist_by_col = state->rel_loc->partAttrNum - 1; + ListCell* cell = NULL; + AttrNumber num; + foreach (cell, state->rel_loc->partAttrNum) { + num = lfirst_int(cell); + state->idx_dist_by_col = lappend_int(state->idx_dist_by_col, num - 1); + } } } @@ -126,15 +130,7 @@ void RemoteCopy_BuildStatement( quote_qualified_identifier(get_namespace_name(RelationGetNamespace(rel)), RelationGetRelationName(rel))); if (attnamelist != NIL) { - ListCell* cell = NULL; - ListCell* prev = NULL; appendStringInfoString(&state->query_buf, " ("); - foreach (cell, attnamelist) { - if (prev != NULL) - appendStringInfoString(&state->query_buf, ", "); - appendStringInfoString(&state->query_buf, quote_identifier(strVal(lfirst(cell)))); - prev = cell; - } /* * For COPY FROM, we need to append unspecified attributes that have @@ -151,14 +147,25 @@ void RemoteCopy_BuildStatement( Expr* defexpr = (Expr*)build_column_default(rel, attnum); if (defexpr && ((!pgxc_is_expr_shippable(expression_planner(defexpr), NULL)) || (list_member_int(state->idx_dist_by_col, attnum - 1)))) { - appendStringInfoString(&state->query_buf, ", "); appendStringInfoString( &state->query_buf, quote_identifier(NameStr(tupDesc->attrs[attnum - 1]->attname))); + appendStringInfoString(&state->query_buf, ", "); } } } } + ListCell* cell = NULL; + foreach (cell, attnamelist) { + appendStringInfoString(&state->query_buf, quote_identifier(strVal(lfirst(cell)))); + appendStringInfoString(&state->query_buf, ", "); + } + int blankPos = 1; + int delimPos = 1; + state->query_buf.data[state->query_buf.len - blankPos] = '\0'; + state->query_buf.data[state->query_buf.len - blankPos - delimPos] = '\0'; + state->query_buf.len -= (blankPos + delimPos); + appendStringInfoChar(&state->query_buf, ')'); } @@ -288,6 +295,11 @@ void RemoteCopy_BuildStatement( if (options->rco_fill_missing_fields) appendStringInfoString(&state->query_buf, " FILL_MISSING_FIELDS"); + + if (options->transform_query_string) { + appendStringInfoChar(&state->query_buf, ' '); + appendStringInfoString(&state->query_buf, options->transform_query_string); + } } /* @@ -301,15 +313,24 @@ RemoteCopyOptions* makeRemoteCopyOptions(void) return NULL; #else RemoteCopyOptions* res = (RemoteCopyOptions*)palloc(sizeof(RemoteCopyOptions)); - res->rco_binary = false; + res->rco_format = FORMAT_UNKNOWN; res->rco_oids = false; - res->rco_csv_mode = false; + res->rco_without_escaping = false; res->rco_delim = NULL; res->rco_null_print = NULL; res->rco_quote = NULL; res->rco_escape = NULL; + res->rco_eol = NULL; res->rco_force_quote = NIL; res->rco_force_notnull = NIL; + res->rco_eol_type = EOL_NL; + res->rco_date_format = NULL; + res->rco_time_format = NULL; + res->rco_timestamp_format = NULL; + res->rco_smalldatetime_format = NULL; + res->rco_compatible_illegal_chars = false; + res->rco_ignore_extra_data = false; + res->rco_fill_missing_fields = false; return res; #endif } diff --git a/src/common/backend/pgxc_single/nodemgr/groupmgr.cpp b/src/common/backend/pgxc_single/nodemgr/groupmgr.cpp index 677e8bf31..e290bd3d7 100644 --- a/src/common/backend/pgxc_single/nodemgr/groupmgr.cpp +++ b/src/common/backend/pgxc_single/nodemgr/groupmgr.cpp @@ -59,6 +59,7 @@ #include "storage/proc.h" #include "utils/elog.h" #include "utils/snapmgr.h" +#include "utils/knl_relcache.h" #define CHAR_BUF_SIZE 512 #define BUCKET_MAP_SIZE 32 @@ -2691,7 +2692,7 @@ static void PgxcGroupSetSeqNodes(const char* group_name, bool allnodes) appendStringInfoString(&str, query); } - ReleaseCatCache(tp); + ReleaseSysCache(tp); relation_close(relseq, AccessShareLock); if (seqName != relName) @@ -3581,7 +3582,7 @@ char* PgxcGroupGetStmtExecGroupInRedis() static void BucketMapCacheAddEntry(Oid groupoid, Datum groupanme_datum, Datum bucketmap_datum, ItemPointer ctid) { /* BucketmapCache and its underlying element is allocated in u_sess.MEMORY_CONTEXT_EXECUTOR */ - MemoryContext oldcontext = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_EXECUTOR)); + MemoryContext oldcontext = MemoryContextSwitchTo(LocalGBucketMapMemCxt()); /* Create bucketmap element */ BucketMapCache* bmc = (BucketMapCache*)palloc0(sizeof(BucketMapCache)); @@ -3598,7 +3599,7 @@ static void BucketMapCacheAddEntry(Oid groupoid, Datum groupanme_datum, Datum bu elog(DEBUG2, "Add [%s][%u]'s bucketmap to BucketMapCache", bmc->groupname, groupoid); /* Insert element into bucketmap */ - u_sess->relcache_cxt.g_bucketmap_cache = lappend(u_sess->relcache_cxt.g_bucketmap_cache, bmc); + AppendLocalRelCacheGBucketMapCache((ListCell *)bmc); /* Swith back to original memory context */ MemoryContextSwitchTo(oldcontext); @@ -3616,7 +3617,7 @@ static void BucketMapCacheAddEntry(Oid groupoid, Datum groupanme_datum, Datum bu */ static void BucketMapCacheRemoveEntry(Oid groupoid) { - if (u_sess->relcache_cxt.g_bucketmap_cache == NIL) { + if (LocalRelCacheGBucketMapCache() == NIL) { /* * We may run into here, when a new node group is drop but no table access * of its table happened. @@ -3631,7 +3632,7 @@ static void BucketMapCacheRemoveEntry(Oid groupoid) ListCell* next = NULL; bool found = false; - for (cell = list_head(u_sess->relcache_cxt.g_bucketmap_cache); cell; cell = next) { + for (cell = list_head(LocalRelCacheGBucketMapCache()); cell; cell = next) { BucketMapCache* bmc = (BucketMapCache*)lfirst(cell); next = lnext(cell); if (bmc->groupoid == groupoid) { @@ -3642,8 +3643,7 @@ static void BucketMapCacheRemoveEntry(Oid groupoid) pfree_ext(bmc->groupname); /* Remove it from global bucketmap cache list */ - u_sess->relcache_cxt.g_bucketmap_cache = - list_delete_cell(u_sess->relcache_cxt.g_bucketmap_cache, cell, prev); + DeteleLocalRelCacheGBucketMapCache(cell, prev); pfree_ext(bmc); break; } else { @@ -3704,7 +3704,7 @@ static void BucketMapCacheUpdate(BucketMapCache* bmc) /* * Name: ClearInvalidBucketMapCache() * - * Brief: clear invalid cell in t_thrd.pgxc_cxt.g_bucketmap_cache + * Brief: clear invalid cell in LocalRelCacheGBucketMapCache * * Parameters: * none @@ -3717,7 +3717,7 @@ static void ClearInvalidBucketMapCache(void) ListCell* cell = NULL; ListCell* next = NULL; - for (cell = list_head(u_sess->relcache_cxt.g_bucketmap_cache); cell; cell = next) { + for (cell = list_head(LocalRelCacheGBucketMapCache()); cell; cell = next) { next = lnext(cell); BucketMapCache* bmc = (BucketMapCache*)lfirst(cell); HeapTuple tuple = SearchSysCache1(PGXCGROUPOID, ObjectIdGetDatum(bmc->groupoid)); @@ -3728,8 +3728,8 @@ static void ClearInvalidBucketMapCache(void) } ReleaseSysCache(tuple); } - if ((unsigned int)list_length(u_sess->relcache_cxt.g_bucketmap_cache) >= u_sess->relcache_cxt.max_bucket_map_size) { - u_sess->relcache_cxt.max_bucket_map_size *= 2; + if ((unsigned int)list_length(LocalRelCacheGBucketMapCache()) >= LocalRelCacheMaxBucketMapSize()) { + EnlargeLocalRelCacheMaxBucketMapSize(2); } } @@ -3769,23 +3769,23 @@ uint2* BucketMapCacheGetBucketmap(Oid groupoid, int *bucketlen) { Assert(groupoid != InvalidOid); - if (u_sess->relcache_cxt.g_bucketmap_cache == NIL) { + if (LocalRelCacheGBucketMapCache() == NIL) { elog(DEBUG2, "Global bucketmap cache is not setup"); } - if ((unsigned int)list_length(u_sess->relcache_cxt.g_bucketmap_cache) >= u_sess->relcache_cxt.max_bucket_map_size) { + if ((unsigned int)list_length(LocalRelCacheGBucketMapCache()) >= LocalRelCacheMaxBucketMapSize()) { ClearInvalidBucketMapCache(); } - while (u_sess->relcache_cxt.max_bucket_map_size / 2 > - (unsigned int)list_length(u_sess->relcache_cxt.g_bucketmap_cache) && - u_sess->relcache_cxt.max_bucket_map_size / 2 > BUCKET_MAP_SIZE) { - u_sess->relcache_cxt.max_bucket_map_size /= 2; + while (LocalRelCacheMaxBucketMapSize() / 2 > + (unsigned int)list_length(LocalRelCacheGBucketMapCache()) && + LocalRelCacheMaxBucketMapSize() / 2 > BUCKET_MAP_SIZE) { + EnlargeLocalRelCacheMaxBucketMapSize(0.5); } ListCell* cell = NULL; uint2* bucketmap = NULL; /* Search bucketmap from cache */ - foreach (cell, u_sess->relcache_cxt.g_bucketmap_cache) { + foreach (cell, LocalRelCacheGBucketMapCache()) { BucketMapCache* bmc = (BucketMapCache*)lfirst(cell); if (bmc->groupoid == groupoid) { diff --git a/src/common/backend/pgxc_single/nodemgr/nodemgr.cpp b/src/common/backend/pgxc_single/nodemgr/nodemgr.cpp index 1ff247e5d..1d8987f76 100644 --- a/src/common/backend/pgxc_single/nodemgr/nodemgr.cpp +++ b/src/common/backend/pgxc_single/nodemgr/nodemgr.cpp @@ -742,6 +742,12 @@ void PgxcNodeGetOids(Oid** coOids, Oid** dnOids, int* num_coords, int* num_dns, LWLockRelease(NodeTableLock); } +void PgxcNodeGetOidsForInit(Oid** coOids, Oid** dnOids, int* num_coords, int* num_dns, int * num_primaries, bool update_preferred) +{ + Assert(false); + DISTRIBUTED_FEATURE_NOT_SUPPORTED(); +} + void PgxcNodeGetStandbyOids(Oid** coOids, Oid** dnOids, int* numCoords, int* numStandbyDns, bool needInitPGXC) { LWLockAcquire(NodeTableLock, LW_SHARED); diff --git a/src/common/backend/pgxc_single/pool/execRemote.cpp b/src/common/backend/pgxc_single/pool/execRemote.cpp index 7d9d8135c..5ea7e91f4 100755 --- a/src/common/backend/pgxc_single/pool/execRemote.cpp +++ b/src/common/backend/pgxc_single/pool/execRemote.cpp @@ -303,6 +303,8 @@ RemoteQueryState* CreateResponseCombiner(int node_count, CombineType combine_typ combiner->currentRow.msgnode = 0; combiner->row_store = RowStoreAlloc(CurrentMemoryContext, ROW_STORE_MAX_MEM, t_thrd.utils_cxt.CurrentResourceOwner); + combiner->maxCSN = InvalidCommitSeqNo; + combiner->hadrMainStandby = false; combiner->tapenodes = NULL; combiner->remoteCopyType = REMOTE_COPY_NONE; combiner->copy_file = NULL; @@ -2774,14 +2776,6 @@ void pgxc_node_remote_commit(bool barrierLockHeld) /* white-box test inject end */ } #endif - /* - * only send to the node whose command = commitCmd - * ignore the commit prepared situation which has been checked in PrepareTransaction() - */ - if (u_sess->pgxc_cxt.remoteXactState->remoteNodeStatus[i] == RXACT_NODE_NONE) { - pgxc_node_send_gxid(connections[i], t_thrd.xact_cxt.XactXidStoreForCheck, true); - } - if (pgxc_node_send_queryid(connections[i], u_sess->debug_query_id) != 0) { const int dest_max = 256; rc = sprintf_s(errMsg, @@ -3053,9 +3047,6 @@ int pgxc_node_remote_abort(void) new_connections[new_conn_count++] = connections[i]; } } else { - /* only send to the node whose command is rollbackCmd , not rollback prepared */ - pgxc_node_send_gxid(connections[i], t_thrd.xact_cxt.XactXidStoreForCheck, true); - if (pgxc_node_send_query(connections[i], rollbackCmd)) { rc = sprintf_s(errMsg, ERRMSG_BUFF_SIZE, @@ -6800,12 +6791,12 @@ HeapTuple* RecvRemoteSampleMessage( */ void PGXCNodeCleanAndRelease(int code, Datum arg) { - /* clean gpc cn refcount and plancache in shared memory */ - CNGPCCleanUpSession(); - /* Clean up prepared transactions before releasing connections */ DropAllPreparedStatements(); + /* clean saved plan but not save into gpc */ + GPCCleanUpSessionSavedPlan(); + /* Release Datanode connections */ release_handles(); diff --git a/src/common/backend/pgxc_single/pool/pgxcnode.cpp b/src/common/backend/pgxc_single/pool/pgxcnode.cpp index 80be29ceb..e1f0559cb 100644 --- a/src/common/backend/pgxc_single/pool/pgxcnode.cpp +++ b/src/common/backend/pgxc_single/pool/pgxcnode.cpp @@ -1444,6 +1444,8 @@ pgxc_node_all_free(void) u_sess->pgxc_cxt.NumCoords = 0; u_sess->pgxc_cxt.dn_handles = NULL; u_sess->pgxc_cxt.NumDataNodes = 0; + u_sess->pgxc_cxt.NumTotalDataNodes = 0; + u_sess->pgxc_cxt.NumStandbyDataNodes = 0; u_sess->pgxc_cxt.primary_data_node = InvalidOid; u_sess->pgxc_cxt.num_preferred_data_nodes = 0; } diff --git a/src/common/backend/pgxc_single/pool/poolutils.cpp b/src/common/backend/pgxc_single/pool/poolutils.cpp index 650096aec..55e64f67a 100755 --- a/src/common/backend/pgxc_single/pool/poolutils.cpp +++ b/src/common/backend/pgxc_single/pool/poolutils.cpp @@ -61,6 +61,30 @@ Datum pgxc_pool_check(PG_FUNCTION_ARGS) PG_RETURN_BOOL(PoolManagerCheckConnectionInfo()); } +Datum pgxc_disaster_read_set(PG_FUNCTION_ARGS) +{ + DISTRIBUTED_FEATURE_NOT_SUPPORTED(); + PG_RETURN_BOOL(false); +} + +Datum pgxc_disaster_read_init(PG_FUNCTION_ARGS) +{ + DISTRIBUTED_FEATURE_NOT_SUPPORTED(); + PG_RETURN_BOOL(false); +} + +Datum pgxc_disaster_read_clear(PG_FUNCTION_ARGS) +{ + DISTRIBUTED_FEATURE_NOT_SUPPORTED(); + PG_RETURN_BOOL(false); +} + +Datum pgxc_disaster_read_status(PG_FUNCTION_ARGS) +{ + DISTRIBUTED_FEATURE_NOT_SUPPORTED(); + PG_RETURN_NULL(); +} + /* * pgxc_pool_reload * diff --git a/src/common/backend/tsearch/ts_utils.cpp b/src/common/backend/tsearch/ts_utils.cpp index 3b00e6a8a..f12b01d7e 100644 --- a/src/common/backend/tsearch/ts_utils.cpp +++ b/src/common/backend/tsearch/ts_utils.cpp @@ -55,7 +55,7 @@ char* get_tsfile_prefix_tmp(bool isExecCN) "%s/tsearch_data/%ld%lu", tmpSharepath, GetCurrentTransactionStartTimestamp(), - (GTM_MODE) ? (GetCurrentTransactionId()) : (isExecCN ? GetCurrentTransactionId() : t_thrd.xact_cxt.cn_xid)); + (isExecCN ? GetCurrentTransactionId() : t_thrd.xact_cxt.cn_xid)); /* We need to check for my_exec_path */ check_backend_env(strinfo.data); diff --git a/src/common/backend/utils/adt/CMakeLists.txt b/src/common/backend/utils/adt/CMakeLists.txt index 78c07a9c9..b33909070 100755 --- a/src/common/backend/utils/adt/CMakeLists.txt +++ b/src/common/backend/utils/adt/CMakeLists.txt @@ -8,7 +8,6 @@ set(TGT_adt_INC ${PROJECT_TRUNK_DIR}/distribute/include ${PROJECT_SRC_DIR}/include ${PROJECT_SRC_DIR}/lib/gstrace - ${LIBXML_INCLUDE_PATH}/libxml2 ${LZ4_INCLUDE_PATH} ${ZLIB_INCLUDE_PATH} ${LIBOPENSSL_INCLUDE_PATH} diff --git a/src/common/backend/utils/adt/Makefile b/src/common/backend/utils/adt/Makefile index 59699cb22..c18e3f113 100644 --- a/src/common/backend/utils/adt/Makefile +++ b/src/common/backend/utils/adt/Makefile @@ -34,12 +34,12 @@ OBJS = acl.o arrayfuncs.o array_selfuncs.o array_typanalyze.o \ tid.o timestamp.o varbit.o varchar.o varlena.o version.o xid.o \ network.o mac.o inet_cidr_ntop.o inet_net_pton.o \ ri_triggers.o pg_lzcompress.o pg_lsn.o pg_locale.o formatting.o \ - ascii.o quote.o pgstatfuncs.o encode.o dbsize.o genfile.o trigfuncs.o \ + ascii.o quote.o pgxlogstatfuncs.o pgundostatfuncs.o pgstatfuncs.o encode.o dbsize.o genfile.o trigfuncs.o \ tsginidx.o tsgistidx.o tsquery.o tsquery_cleanup.o tsquery_gist.o \ tsquery_op.o tsquery_rewrite.o tsquery_util.o tsrank.o \ tsvector.o tsvector_op.o tsvector_parser.o \ txid.o uuid.o windowfuncs.o xml.o extended_statistics.o clientlogic_bytea.o clientlogicsettings.o \ - median_aggs.o expr_distinct.o nlssort.o first_last_agg.o + median_aggs.o expr_distinct.o nlssort.o memory_func.o first_last_agg.o like.o: like.cpp like_match.cpp diff --git a/src/common/backend/utils/adt/a_compat.cpp b/src/common/backend/utils/adt/a_compat.cpp index 3432b6eaf..9078e49cf 100644 --- a/src/common/backend/utils/adt/a_compat.cpp +++ b/src/common/backend/utils/adt/a_compat.cpp @@ -39,6 +39,11 @@ static text* dotrim(const char* string, int stringlen, const char* set, int setl Datum lower(PG_FUNCTION_ARGS) { text* in_string = PG_GETARG_TEXT_PP(0); + if (unlikely(VARATT_IS_HUGE_TOAST_POINTER(in_string))) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("lower() arguments cannot exceed 1GB"))); + } char* out_string = NULL; text* result = NULL; @@ -66,6 +71,11 @@ Datum lower(PG_FUNCTION_ARGS) Datum upper(PG_FUNCTION_ARGS) { text* in_string = PG_GETARG_TEXT_PP(0); + if (unlikely(VARATT_IS_HUGE_TOAST_POINTER(in_string))) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("upper() arguments cannot exceed 1GB"))); + } char* out_string = NULL; text* result = NULL; @@ -637,7 +647,15 @@ Datum rtrim1(PG_FUNCTION_ARGS) text* string = PG_GETARG_TEXT_PP(0); text* ret = NULL; - ret = dotrim(VARDATA_ANY(string), VARSIZE_ANY_EXHDR(string), " ", 1, false, true); + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && CHAR_COERCE_COMPAT) { + /* + * char(n) will not ignore the tailing blanks in A_FORMAT compatibility. + * here, we just return original input. + */ + PG_RETURN_TEXT_P(string); + } else { + ret = dotrim(VARDATA_ANY(string), VARSIZE_ANY_EXHDR(string), " ", 1, false, true); + } if ((ret == NULL || 0 == VARSIZE_ANY_EXHDR(ret)) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) PG_RETURN_NULL(); diff --git a/src/common/backend/utils/adt/acl.cpp b/src/common/backend/utils/adt/acl.cpp index b7518f376..6a30f0ed2 100644 --- a/src/common/backend/utils/adt/acl.cpp +++ b/src/common/backend/utils/adt/acl.cpp @@ -81,7 +81,6 @@ static AclResult pg_role_aclcheck(Oid role_oid, Oid roleid, AclMode mode); static void RoleMembershipCacheCallback(Datum arg, int cacheid, uint32 hashvalue); static Oid get_role_oid_or_public(const char* rolname); -static List * roles_has_privs_of(Oid roleid); static Oid convert_cmk_name(text *keyname); static Oid convert_column_key_name(text *keyname); static AclMode convert_cmk_priv_string(text *priv_type_text); @@ -1351,10 +1350,10 @@ static bool has_privs_of_role_without_sysadmin(Oid member, Oid role) /* - * aclmask_dbe_perf --- compute bitmask of all privileges of held by roleid - * when related to schema dbe_perf and objects in schema dbe_perf. + * aclmask_without_sysadmin --- compute bitmask of all privileges of held by roleid + * when related to schema dbe_perf, snapshot and pg_catalog. */ -AclMode aclmask_dbe_perf(const Acl *acl, Oid roleid, Oid ownerId, AclMode mask, AclMaskHow how) +AclMode aclmask_without_sysadmin(const Acl *acl, Oid roleid, Oid ownerId, AclMode mask, AclMaskHow how) { AclMode result; AclMode remaining; @@ -5220,7 +5219,7 @@ void initialize_acl(void) * In normal mode, set a callback on any syscache invalidation of * pg_auth_members rows */ - CacheRegisterSyscacheCallback(AUTHMEMROLEMEM, RoleMembershipCacheCallback, (Datum)0); + CacheRegisterSessionSyscacheCallback(AUTHMEMROLEMEM, RoleMembershipCacheCallback, (Datum)0); } } @@ -5263,7 +5262,7 @@ static bool has_rolinherit(Oid roleid) * For the benefit of select_best_grantor, the result is defined to be * in breadth-first order, ie, closer relationships earlier. */ -static List* roles_has_privs_of(Oid roleid) +List* roles_has_privs_of(Oid roleid) { List* roles_list = NIL; ListCell* l = NULL; @@ -5299,7 +5298,7 @@ static List* roles_has_privs_of(Oid roleid) /* Find roles that memberid is directly a member of */ memlist = SearchSysCacheList1(AUTHMEMMEMROLE, ObjectIdGetDatum(memberid)); for (i = 0; i < memlist->n_members; i++) { - HeapTuple tup = &memlist->members[i]->tuple; + HeapTuple tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(memlist, i); Oid otherid = ((Form_pg_auth_members)GETSTRUCT(tup))->roleid; /* @@ -5373,7 +5372,7 @@ static List* roles_is_member_of(Oid roleid) /* Find roles that memberid is directly a member of */ memlist = SearchSysCacheList1(AUTHMEMMEMROLE, ObjectIdGetDatum(memberid)); for (i = 0; i < memlist->n_members; i++) { - HeapTuple tup = &memlist->members[i]->tuple; + HeapTuple tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(memlist, i); Oid otherid = ((Form_pg_auth_members)GETSTRUCT(tup))->roleid; /* @@ -5587,7 +5586,7 @@ bool is_admin_of_role(Oid member, Oid role) /* Find roles that memberid is directly a member of */ memlist = SearchSysCacheList1(AUTHMEMMEMROLE, ObjectIdGetDatum(memberid)); for (i = 0; i < memlist->n_members; i++) { - HeapTuple tup = &memlist->members[i]->tuple; + HeapTuple tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(memlist, i); Oid otherid = ((Form_pg_auth_members)GETSTRUCT(tup))->roleid; if (otherid == role && ((Form_pg_auth_members)GETSTRUCT(tup))->admin_option) { @@ -5644,12 +5643,13 @@ static int count_one_bits(AclMode mask) * *grantorId: receives the OID of the role to do the grant as * *grantOptions: receives the grant options actually held by grantorId * isDbePerf: if the object in question belonging to schema dbe_perf + * isPgCatalog: if the object in question belonging to schema pg_catalog * * If no grant options exist, we set grantorId to roleId, grantOptions to 0. */ void select_best_grantor( Oid roleId, AclMode privileges, AclMode ddlPrivileges, const Acl* acl, Oid ownerId, - Oid* grantorId, AclMode* grantOptions, AclMode* grantDdlOptions, bool isDbePerf) + Oid* grantorId, AclMode* grantOptions, AclMode* grantDdlOptions, bool isDbePerf, bool isPgCatalog) { /* remove ddl privileges flag from Aclitem */ ddlPrivileges = REMOVE_DDL_FLAG(ddlPrivileges); @@ -5674,6 +5674,13 @@ void select_best_grantor( *grantDdlOptions = ddl_needed_goptions; return; } + } else if (isPgCatalog) { + if (roleId == ownerId || roleId == INITIAL_USER_ID) { + *grantorId = ownerId; + *grantOptions = needed_goptions; + *grantDdlOptions = ddl_needed_goptions; + return; + } } else { if (roleId == ownerId || (superuser_arg(roleId) && !is_role_independent(ownerId))) { *grantorId = ownerId; diff --git a/src/common/backend/utils/adt/arrayfuncs.cpp b/src/common/backend/utils/adt/arrayfuncs.cpp index 3ec36d6f2..60f8da6f0 100644 --- a/src/common/backend/utils/adt/arrayfuncs.cpp +++ b/src/common/backend/utils/adt/arrayfuncs.cpp @@ -96,6 +96,7 @@ static ArrayType* create_array_envelope(int ndims, int* dimv, const int* lbv, in static ArrayType* array_fill_internal( ArrayType* dims, ArrayType* lbs, Datum value, bool isnull, Oid elmtype, FunctionCallInfo fcinfo); static ArrayType* array_deleteidx_internal(ArrayType *v, int delIndex); +static void checkEnv(); /* * complex_array_in : @@ -1672,6 +1673,7 @@ Datum array_length(PG_FUNCTION_ARGS) Datum array_indexby_length(PG_FUNCTION_ARGS) { + checkEnv(); if (PG_ARGISNULL(0)) { PG_RETURN_INT32(0); } @@ -1712,6 +1714,9 @@ Datum array_indexby_length(PG_FUNCTION_ARGS) */ Datum array_exists(PG_FUNCTION_ARGS) { + if (PG_ARGISNULL(0) || PG_ARGISNULL(1)) { + PG_RETURN_BOOL(false); + } ArrayType* v = PG_GETARG_ARRAYTYPE_P(0); int index = PG_GETARG_INT32(1); int* dimv = NULL; @@ -1781,7 +1786,7 @@ static bool array_index_exists_internal(ArrayType* v, HTAB* table_index, Oid tab Datum array_varchar_exists(PG_FUNCTION_ARGS) { checkEnv(); - if (PG_ARGISNULL(0)) { + if (PG_ARGISNULL(0) || PG_ARGISNULL(1)) { PG_RETURN_BOOL(false); } ArrayType* v = PG_GETARG_ARRAYTYPE_P(0); @@ -1795,17 +1800,27 @@ Datum array_varchar_exists(PG_FUNCTION_ARGS) errmsg("array_varchar_exists must be call in procedure"))); } + /* transfer varchar format */ + bool isTran = false; + if (VARATT_IS_1B(index_datum)) { + index_datum = transVaratt1BTo4B(index_datum); + isTran = true; + } + bool result = array_index_exists_internal(v, u_sess->SPI_cxt.cur_tableof_index->tableOfIndex, u_sess->SPI_cxt.cur_tableof_index->tableOfIndexType, index_datum); + if (isTran) { + pfree(DatumGetPointer(index_datum)); + } PG_RETURN_BOOL(result); } Datum array_integer_exists(PG_FUNCTION_ARGS) { checkEnv(); - if (PG_ARGISNULL(0)) { + if (PG_ARGISNULL(0) || PG_ARGISNULL(1)) { PG_RETURN_BOOL(false); } ArrayType* v = PG_GETARG_ARRAYTYPE_P(0); @@ -1963,6 +1978,14 @@ Datum array_varchar_next(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("array_varchar_next must be call in procedure"))); } + + /* transfer varchar format */ + bool isTran = false; + if (VARATT_IS_1B(index_datum)) { + index_datum = transVaratt1BTo4B(index_datum); + isTran = true; + } + /* turn varchar index */ HTAB* table_index = u_sess->SPI_cxt.cur_tableof_index->tableOfIndex; TableOfIndexKey key; @@ -1971,9 +1994,15 @@ Datum array_varchar_next(PG_FUNCTION_ARGS) index = getTableOfIndexByDatumValue(key, table_index, NULL); /* if exist index? */ if (index < 0) { + if (isTran) { + pfree(DatumGetPointer(index_datum)); + } PG_RETURN_NULL(); } Datum next_datum = tableOfIndexVarcharNextValue(table_index, &key); + if (isTran) { + pfree(DatumGetPointer(index_datum)); + } if (next_datum == Datum(0)) { PG_RETURN_NULL(); } else { @@ -1999,6 +2028,12 @@ Datum array_varchar_prior(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("array_varchar_prior must be call in procedure"))); } + /* transfer varchar format */ + bool isTran = false; + if (VARATT_IS_1B(index_datum)) { + index_datum = transVaratt1BTo4B(index_datum); + isTran = true; + } /* turn varchar index */ HTAB* table_index = u_sess->SPI_cxt.cur_tableof_index->tableOfIndex; TableOfIndexKey key; @@ -2006,10 +2041,15 @@ Datum array_varchar_prior(PG_FUNCTION_ARGS) key.exprdatum = index_datum; index = getTableOfIndexByDatumValue(key, table_index, NULL); if (index < 0) { + if (isTran) { + pfree(DatumGetPointer(index_datum)); + } PG_RETURN_NULL(); } Datum prior_datum = tableOfIndexVarcharPriorValue(table_index, &key); - /* check ? */ + if (isTran) { + pfree(DatumGetPointer(index_datum)); + } if (prior_datum == Datum(0)) { PG_RETURN_NULL(); } else { @@ -2274,7 +2314,8 @@ static ArrayType* array_index_delete_internal(ArrayType* v, HTAB* table_index, O TableOfIndexKey key; key.exprtypeid = tableOfIndexType; key.exprdatum = index_datum; - int index = getTableOfIndexByDatumValue(key, table_index, NULL); + PLpgSQL_var* var = NULL; + int index = getTableOfIndexByDatumValue(key, table_index, &var); if (index < 0) { return v; } @@ -2283,6 +2324,17 @@ static ArrayType* array_index_delete_internal(ArrayType* v, HTAB* table_index, O bool found = false; (void)hash_search(table_index, (const void*)&key, HASH_REMOVE, &found); + /* for nest table, need delete inner vars */ + if (var != NULL && var->tableOfIndex != NULL) { + HASH_SEQ_STATUS hashSeq; + hash_seq_init(&hashSeq, var->tableOfIndex); + TableOfIndexEntry* srcEntry = NULL; + while ((srcEntry = (TableOfIndexEntry*)hash_seq_search(&hashSeq)) != NULL) { + var->value = (Datum)array_index_delete_internal(DatumGetArrayTypeP(var->value), var->tableOfIndex, + var->tableOfIndexType, srcEntry->key.exprdatum); + } + } + return array; } @@ -2328,10 +2380,19 @@ Datum array_varchar_deleteidx(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("array_varchar_deleteidx must be call in procedure"))); } + /* transfer varchar format */ + bool isTran = false; + if (VARATT_IS_1B(index_datum)) { + index_datum = transVaratt1BTo4B(index_datum); + isTran = true; + } ArrayType* array = array_index_delete_internal(v, u_sess->SPI_cxt.cur_tableof_index->tableOfIndex, u_sess->SPI_cxt.cur_tableof_index->tableOfIndexType, index_datum); + if (isTran) { + pfree(DatumGetPointer(index_datum)); + } PG_RETURN_ARRAYTYPE_P(array); } @@ -2500,6 +2561,37 @@ Datum array_delete(PG_FUNCTION_ARGS) PG_RETURN_ARRAYTYPE_P(array); } +static void deleteTableOfIndexElement(HTAB* tableOfIndex) +{ + if (tableOfIndex == NULL) { + return; + } + HASH_SEQ_STATUS hashSeq; + hash_seq_init(&hashSeq, tableOfIndex); + TableOfIndexEntry* srcEntry = NULL; + bool found = false; + while ((srcEntry = (TableOfIndexEntry*)hash_seq_search(&hashSeq)) != NULL) { + if (srcEntry->var != NULL) { + deleteTableOfIndexElement(srcEntry->var->tableOfIndex); + } + (void)hash_search(tableOfIndex, (const void*)&srcEntry->key, HASH_REMOVE, &found); + } +} + +Datum array_indexby_delete(PG_FUNCTION_ARGS) +{ + checkEnv(); + ArrayType* v = PG_GETARG_ARRAYTYPE_P(0); + ArrayType* array = construct_empty_array(ARR_ELEMTYPE(v)); + if (u_sess->SPI_cxt.cur_tableof_index->tableOfIndex == NULL) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("array_indexby_delete must be call in procedure"))); + } + deleteTableOfIndexElement(u_sess->SPI_cxt.cur_tableof_index->tableOfIndex); + + PG_RETURN_ARRAYTYPE_P(array); +} + Datum array_trim(PG_FUNCTION_ARGS) { ArrayType *v = PG_GETARG_ARRAYTYPE_P(0); @@ -5531,279 +5623,6 @@ Datum array_unnest(PG_FUNCTION_ARGS) } } -/* - * check if search is same as replace - */ -static bool array_same_replace(FunctionCallInfo locfcinfo, Datum search, bool search_isnull, - Datum replace, bool replace_isnull) -{ - if (search_isnull != replace_isnull) { - return false; - } else if (search_isnull == true) { - Assert(replace_isnull); - return true; - } else { - locfcinfo->arg[0] = search; - locfcinfo->arg[1] = replace; - locfcinfo->argnull[0] = search_isnull; - locfcinfo->argnull[1] = replace_isnull; - locfcinfo->isnull = false; - return DatumGetBool(FunctionCallInvoke(locfcinfo)); - } -} - -/* - * array_replace/array_remove support - * - * Find all array entries matching (not distinct from) search/search_isnull, - * and delete them if remove is true, else replace them with - * replace/replace_isnull. Comparisons are done using the specified - * collation. fcinfo is passed only for caching purposes. - */ -static ArrayType *array_replace_internal(ArrayType *array, Datum search, bool search_isnull, Datum replace, - bool replace_isnull, bool remove, Oid collation, FunctionCallInfo fcinfo) -{ - - ArrayType *result = NULL; - Oid element_type; - Datum *values = NULL; - bool *nulls = NULL; - int *dim = NULL; - int ndim; - int nitems; - int nresult; - int i; - int32 nbytes = 0; - int32 dataoffset; - bool hasnulls = false; - int typlen; - bool typbyval = false; - char typalign; - char *arraydataptr = NULL; - bits8 *bitmap = NULL; - int bitmask; - bool changed = false; - TypeCacheEntry *typentry = NULL; - FunctionCallInfoData locfcinfo; - errno_t rc = EOK; - - element_type = ARR_ELEMTYPE(array); - ndim = ARR_NDIM(array); - dim = ARR_DIMS(array); - nitems = ArrayGetNItems(ndim, dim); - /* Return input array unmodified if it is empty */ - if (nitems <= 0) - return array; - /* - * We can't remove elements from multi-dimensional arrays, since the - * result might not be rectangular. - */ - if (remove && ndim > 1) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("removing elements from multidimensional arrays is not supported"))); - /* - * We arrange to look up the equality function only once per series of - * calls, assuming the element type doesn't change underneath us. - */ - typentry = (TypeCacheEntry *) fcinfo->flinfo->fn_extra; - if (typentry == NULL || typentry->type_id != element_type) { - typentry = lookup_type_cache(element_type, TYPECACHE_EQ_OPR_FINFO); - if (!OidIsValid(typentry->eq_opr_finfo.fn_oid)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("could not identify an equality operator for type %s", - format_type_be(element_type)))); - fcinfo->flinfo->fn_extra = (void *) typentry; - } - typlen = typentry->typlen; - typbyval = typentry->typbyval; - typalign = typentry->typalign; - /* - * Detoast values if they are toasted. The replacement value must be - * detoasted for insertion into the result array, while detoasting the - * search value only once saves cycles. - */ - if (typlen == -1) { - if (!search_isnull) - search = PointerGetDatum(PG_DETOAST_DATUM(search)); - if (!replace_isnull) - replace = PointerGetDatum(PG_DETOAST_DATUM(replace)); - } - /* Prepare to apply the comparison operator */ - InitFunctionCallInfoData(locfcinfo, &typentry->eq_opr_finfo, 2, collation, NULL, NULL); - /* directly return if search is same as replace */ - if (!remove && array_same_replace(&locfcinfo, search, search_isnull, replace, replace_isnull)) { - return array; - } - /* Allocate temporary arrays for new values */ - values = (Datum *) palloc(nitems * sizeof(Datum)); - nulls = (bool *) palloc(nitems * sizeof(bool)); - /* Loop over source data */ - arraydataptr = ARR_DATA_PTR(array); - bitmap = ARR_NULLBITMAP(array); - bitmask = 1; - hasnulls = false; - nresult = 0; - for (i = 0; i < nitems; i++) { - Datum elt; - bool isNull = false; - bool oprresult = false; - bool skip = false; - /* Get source element, checking for NULL */ - if (bitmap && (*bitmap & bitmask) == 0) { - isNull = true; - /* If searching for NULL, we have a match */ - if (search_isnull) { - if (remove) { - skip = true; - changed = true; - } else if (!replace_isnull) { - values[nresult] = replace; - isNull = false; - changed = true; - } - } - } else { - isNull = false; - elt = fetch_att(arraydataptr, typbyval, typlen); - arraydataptr = att_addlength_datum(arraydataptr, typlen, elt); - arraydataptr = (char *) att_align_nominal(arraydataptr, typalign); - if (search_isnull) { - /* no match possible, keep element */ - values[nresult] = elt; - } else { - /* Compare the pair of elements */ - locfcinfo.arg[0] = elt; - locfcinfo.arg[1] = search; - locfcinfo.argnull[0] = false; - locfcinfo.argnull[1] = false; - locfcinfo.isnull = false; - oprresult = DatumGetBool(FunctionCallInvoke(&locfcinfo)); - if (locfcinfo.isnull || !oprresult) { - /* no match, keep element */ - values[nresult] = elt; - } else { - /* match, so replace or delete */ - changed = true; - if (remove) { - skip = true; - } else { - values[nresult] = replace; - isNull = replace_isnull; - } - } - } - } - if (!skip) { - nulls[nresult] = isNull; - if (isNull) { - hasnulls = true; - } else { - /* Update total result size */ - nbytes = att_addlength_datum(nbytes, typlen, values[nresult]); - nbytes = att_align_nominal(nbytes, typalign); - /* check for overflow of total request */ - if (!AllocSizeIsValid(nbytes)) - ereport(ERROR, - (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("array size exceeds the maximum allowed (%d)", - (int) MaxAllocSize))); - } - nresult++; - } - /* advance bitmap pointer if any */ - if (bitmap) { - bitmask <<= 1; - if (bitmask == 0x100) { - bitmap++; - bitmask = 1; - } - } - } - /* - * If not changed just return the original array - */ - if (!changed) { - pfree(values); - pfree(nulls); - return array; - } - /* If all elements were removed return an empty array */ - if (nresult == 0) { - pfree(values); - pfree(nulls); - return construct_empty_array(element_type); - } - /* Allocate and initialize the result array */ - if (hasnulls) { - dataoffset = ARR_OVERHEAD_WITHNULLS(ndim, nresult); - nbytes += dataoffset; - } else { - dataoffset = 0; /* marker for no null bitmap */ - nbytes += ARR_OVERHEAD_NONULLS(ndim); - } - result = (ArrayType *) palloc0(nbytes); - SET_VARSIZE(result, nbytes); - result->ndim = ndim; - result->dataoffset = dataoffset; - result->elemtype = element_type; - rc = memcpy_s(ARR_DIMS(result), nbytes - sizeof(ArrayType), ARR_DIMS(array), ndim * sizeof(int)); - securec_check(rc, "\0", "\0"); - rc = memcpy_s(ARR_LBOUND(result), nbytes - (sizeof(ArrayType) + sizeof(int) * ndim), - ARR_LBOUND(array), ndim * sizeof(int)); - securec_check(rc, "\0", "\0"); - - if (remove) { - /* Adjust the result length */ - ARR_DIMS(result)[0] = nresult; - } - /* Insert data into result array */ - CopyArrayEls(result, values, nulls, nresult, typlen, typbyval, typalign, false); - pfree(values); - pfree(nulls); - return result; -} - -/* - * Remove any occurrences of an element from an array - * - * If used on a multi-dimensional array this will raise an error. - */ -Datum array_remove(PG_FUNCTION_ARGS) -{ - ArrayType *array = NULL; - Datum search = PG_GETARG_DATUM(1); - bool search_isnull = PG_ARGISNULL(1); - - if (PG_ARGISNULL(0)) - PG_RETURN_NULL(); - - array = PG_GETARG_ARRAYTYPE_P(0); - array = array_replace_internal(array, search, search_isnull, (Datum) 0, true, true, PG_GET_COLLATION(), fcinfo); - PG_RETURN_ARRAYTYPE_P(array); -} - -/* - * Replace any occurrences of an element in an array - */ -Datum array_replace(PG_FUNCTION_ARGS) -{ - ArrayType *array = NULL; - Datum search = PG_GETARG_DATUM(1); - bool search_isnull = PG_ARGISNULL(1); - Datum replace = PG_GETARG_DATUM(2); - bool replace_isnull = PG_ARGISNULL(2); - - if (PG_ARGISNULL(0)) - PG_RETURN_NULL(); - - array = PG_GETARG_ARRAYTYPE_P(0); - array = array_replace_internal(array, search, search_isnull, replace, replace_isnull, false, PG_GET_COLLATION(), - fcinfo); - PG_RETURN_ARRAYTYPE_P(array); -} - /* * The type, dimension needs to be checked during collection operations. */ @@ -6084,6 +5903,9 @@ Datum array_union(PG_FUNCTION_ARGS) rc = memcpy_s(ARR_LBOUND(result), ndims * sizeof(int), lbs, ndims * sizeof(int)); securec_check(rc, "", ""); + pfree_ext(dims); + pfree_ext(lbs); + /* data area is arg1 then arg2. And make sure the destMax of memcpy_s should never be zero. */ if (ndatabytes1 > 0) { rc = memcpy_s(ARR_DATA_PTR(result), ndatabytes1, dat1, ndatabytes1); @@ -6357,3 +6179,295 @@ Datum array_except_distinct(PG_FUNCTION_ARGS) ArrayType* result = array_except_internal(v1, v2, typentry, true); PG_RETURN_ARRAYTYPE_P(result); } + +/* + * check if search is same as replace + */ +static bool array_same_replace(FunctionCallInfo locfcinfo, Datum search, bool search_isnull, + Datum replace, bool replace_isnull) +{ + if (search_isnull != replace_isnull) { + return false; + } else if (search_isnull) { + Assert(replace_isnull); + return true; + } else { + locfcinfo->arg[0] = search; + locfcinfo->arg[1] = replace; + locfcinfo->argnull[0] = search_isnull; + locfcinfo->argnull[1] = replace_isnull; + locfcinfo->isnull = false; + return DatumGetBool(FunctionCallInvoke(locfcinfo)); + } +} + +/* + * array_replace/array_remove support + * + * Find all array entries matching (not distinct from) search/search_isnull, + * and delete them if remove is true, else replace them with + * replace/replace_isnull. Comparisons are done using the specified + * collation. fcinfo is passed only for caching purposes. + */ +static ArrayType *array_replace_internal(ArrayType *array, Datum search, bool search_isnull, Datum replace, + bool replace_isnull, bool remove, Oid collation, FunctionCallInfo fcinfo) +{ + ArrayType *result = NULL; + Oid element_type; + Datum *values = NULL; + bool *nulls = NULL; + int *dim = NULL; + int ndim; + int nitems; + int nresult; + int i; + int32 nbytes = 0; + int32 dataoffset; + bool hasnulls = false; + int typlen; + bool typbyval = false; + char typalign; + char *arraydataptr = NULL; + bits8 *bitmap = NULL; + int bitmask; + bool changed = false; + TypeCacheEntry *typentry = NULL; + FunctionCallInfoData locfcinfo; + errno_t rc = EOK; + + element_type = ARR_ELEMTYPE(array); + ndim = ARR_NDIM(array); + dim = ARR_DIMS(array); + nitems = ArrayGetNItems(ndim, dim); + + /* Return input array unmodified if it is empty */ + if (nitems <= 0) + return array; + + /* + * We can't remove elements from multi-dimensional arrays, since the + * result might not be rectangular. + */ + if (remove && ndim > 1) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("removing elements from multidimensional arrays is not supported"))); + + /* + * We arrange to look up the equality function only once per series of + * calls, assuming the element type doesn't change underneath us. + */ + typentry = (TypeCacheEntry *) fcinfo->flinfo->fn_extra; + if (typentry == NULL || typentry->type_id != element_type) { + typentry = lookup_type_cache(element_type, TYPECACHE_EQ_OPR_FINFO); + if (!OidIsValid(typentry->eq_opr_finfo.fn_oid)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmsg("could not identify an equality operator for type %s", + format_type_be(element_type)))); + fcinfo->flinfo->fn_extra = (void *) typentry; + } + typlen = typentry->typlen; + typbyval = typentry->typbyval; + typalign = typentry->typalign; + + /* + * Detoast values if they are toasted. The replacement value must be + * detoasted for insertion into the result array, while detoasting the + * search value only once saves cycles. + */ + if (typlen == -1) { + if (!search_isnull) + search = PointerGetDatum(PG_DETOAST_DATUM(search)); + if (!replace_isnull) + replace = PointerGetDatum(PG_DETOAST_DATUM(replace)); + } + + /* Prepare to apply the comparison operator */ + InitFunctionCallInfoData(locfcinfo, &typentry->eq_opr_finfo, 2, collation, NULL, NULL); + + /* directly return if search is same as replace */ + if (!remove && array_same_replace(&locfcinfo, search, search_isnull, replace, replace_isnull)) { + return array; + } + + /* Allocate temporary arrays for new values */ + values = (Datum *) palloc(nitems * sizeof(Datum)); + nulls = (bool *) palloc(nitems * sizeof(bool)); + + /* Loop over source data */ + arraydataptr = ARR_DATA_PTR(array); + bitmap = ARR_NULLBITMAP(array); + bitmask = 1; + hasnulls = false; + nresult = 0; + + for (i = 0; i < nitems; i++) { + Datum elt; + bool isNull = false; + bool oprresult = false; + bool skip = false; + + /* Get source element, checking for NULL */ + if (bitmap && (*bitmap & bitmask) == 0) { + isNull = true; + /* If searching for NULL, we have a match */ + if (search_isnull) { + if (remove) { + skip = true; + changed = true; + } else if (!replace_isnull) { + values[nresult] = replace; + isNull = false; + changed = true; + } + } + } else { + isNull = false; + elt = fetch_att(arraydataptr, typbyval, typlen); + arraydataptr = att_addlength_datum(arraydataptr, typlen, elt); + arraydataptr = (char *) att_align_nominal(arraydataptr, typalign); + + if (search_isnull) { + /* no match possible, keep element */ + values[nresult] = elt; + } else { + /* Compare the pair of elements */ + locfcinfo.arg[0] = elt; + locfcinfo.arg[1] = search; + locfcinfo.argnull[0] = false; + locfcinfo.argnull[1] = false; + locfcinfo.isnull = false; + oprresult = DatumGetBool(FunctionCallInvoke(&locfcinfo)); + if (locfcinfo.isnull || !oprresult) { + /* no match, keep element */ + values[nresult] = elt; + } else { + /* match, so replace or delete */ + changed = true; + if (remove) { + skip = true; + } else { + values[nresult] = replace; + isNull = replace_isnull; + } + } + } + } + + if (!skip) { + nulls[nresult] = isNull; + if (isNull) { + hasnulls = true; + } else { + /* Update total result size */ + nbytes = att_addlength_datum(nbytes, typlen, values[nresult]); + nbytes = att_align_nominal(nbytes, typalign); + /* check for overflow of total request */ + if (!AllocSizeIsValid(nbytes)) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("array size exceeds the maximum allowed (%d)", + (int) MaxAllocSize))); + } + nresult++; + } + + /* advance bitmap pointer if any */ + if (bitmap) { + bitmask <<= 1; + if (bitmask == 0x100) { + bitmap++; + bitmask = 1; + } + } + } + + /* + * If not changed just return the original array + */ + if (!changed) { + pfree(values); + pfree(nulls); + return array; + } + + /* If all elements were removed return an empty array */ + if (nresult == 0) { + pfree(values); + pfree(nulls); + return construct_empty_array(element_type); + } + + /* Allocate and initialize the result array */ + if (hasnulls) { + dataoffset = ARR_OVERHEAD_WITHNULLS(ndim, nresult); + nbytes += dataoffset; + } else { + dataoffset = 0; /* marker for no null bitmap */ + nbytes += ARR_OVERHEAD_NONULLS(ndim); + } + result = (ArrayType *) palloc0(nbytes); + SET_VARSIZE(result, nbytes); + result->ndim = ndim; + result->dataoffset = dataoffset; + result->elemtype = element_type; + rc = memcpy_s(ARR_DIMS(result), nbytes - sizeof(ArrayType), ARR_DIMS(array), ndim * sizeof(int)); + securec_check(rc, "\0", "\0"); + rc = memcpy_s(ARR_LBOUND(result), nbytes - (sizeof(ArrayType) + sizeof(int) * ndim), + ARR_LBOUND(array), ndim * sizeof(int)); + securec_check(rc, "\0", "\0"); + + if (remove) { + /* Adjust the result length */ + ARR_DIMS(result)[0] = nresult; + } + + /* Insert data into result array */ + CopyArrayEls(result, values, nulls, nresult, typlen, typbyval, typalign, false); + + pfree(values); + pfree(nulls); + + return result; +} + +/* + * Remove any occurrences of an element from an array + * + * If used on a multi-dimensional array this will raise an error. + */ +Datum array_remove(PG_FUNCTION_ARGS) +{ + ArrayType *array = NULL; + Datum search = PG_GETARG_DATUM(1); + bool search_isnull = PG_ARGISNULL(1); + + if (PG_ARGISNULL(0)) + PG_RETURN_NULL(); + array = PG_GETARG_ARRAYTYPE_P(0); + + array = array_replace_internal(array, search, search_isnull, (Datum) 0, true, true, PG_GET_COLLATION(), fcinfo); + PG_RETURN_ARRAYTYPE_P(array); +} + +/* + * Replace any occurrences of an element in an array + */ +Datum array_replace(PG_FUNCTION_ARGS) +{ + ArrayType *array = NULL; + Datum search = PG_GETARG_DATUM(1); + bool search_isnull = PG_ARGISNULL(1); + Datum replace = PG_GETARG_DATUM(2); + bool replace_isnull = PG_ARGISNULL(2); + + if (PG_ARGISNULL(0)) + PG_RETURN_NULL(); + array = PG_GETARG_ARRAYTYPE_P(0); + + array = array_replace_internal(array, search, search_isnull, replace, replace_isnull, false, PG_GET_COLLATION(), + fcinfo); + PG_RETURN_ARRAYTYPE_P(array); +} + diff --git a/src/common/backend/utils/adt/dbsize.cpp b/src/common/backend/utils/adt/dbsize.cpp index 3cfcbf594..7c49ad544 100644 --- a/src/common/backend/utils/adt/dbsize.cpp +++ b/src/common/backend/utils/adt/dbsize.cpp @@ -70,7 +70,6 @@ #include "storage/custorage.h" #include "storage/smgr/segment.h" #include "storage/cstore/cstore_compress.h" -#include "storage/page_compression.h" #include "vecexecutor/vecnodes.h" #ifdef PGXC @@ -793,7 +792,6 @@ int64 calculate_relation_size(RelFileNode* rfn, BackendId backend, ForkNumber fo relationpath = relpathbackend(*rfn, backend, forknum); - bool rowCompress = IS_COMPRESSED_RNODE((*rfn), forknum); for (segcount = 0;; segcount++) { struct stat fst; @@ -810,7 +808,7 @@ int64 calculate_relation_size(RelFileNode* rfn, BackendId backend, ForkNumber fo else ereport(ERROR, (errcode_for_file_access(), errmsg("could not stat file \"%s\": %m", pathname))); } - totalsize += rowCompress ? CalculateMainForkSize((char*)pathname, rfn, forknum) : fst.st_size; + totalsize += fst.st_size; } pfree_ext(relationpath); @@ -1326,7 +1324,8 @@ static int64 CalculateIndexSize(Relation rel, int forkNumOption) Relation partIndexRel = NULL; Relation cstorePartIndexRel = NULL; - partOids = relationGetPartitionOidList(baseRel); + partOids = RelationIsSubPartitioned(baseRel) ? RelationGetSubPartitionOidList(baseRel) : + relationGetPartitionOidList(baseRel); foreach (cell, partOids) { partOid = lfirst_oid(cell); diff --git a/src/common/backend/utils/adt/extended_statistics.cpp b/src/common/backend/utils/adt/extended_statistics.cpp index eaf9ab977..287d7fd40 100644 --- a/src/common/backend/utils/adt/extended_statistics.cpp +++ b/src/common/backend/utils/adt/extended_statistics.cpp @@ -621,7 +621,7 @@ bool es_is_type_supported_by_cstore(VacAttrStats* stats) return false; for (unsigned int i = 0; i < stats->num_attrs; ++i) { - if (!IsTypeSupportedByCStore(stats->attrtypid[i], stats->attrtypmod[i])) { + if (!IsTypeSupportedByCStore(stats->attrtypid[i])) { return false; } } diff --git a/src/common/backend/utils/adt/float.cpp b/src/common/backend/utils/adt/float.cpp index a3c077ac7..77a1c3a89 100644 --- a/src/common/backend/utils/adt/float.cpp +++ b/src/common/backend/utils/adt/float.cpp @@ -25,7 +25,6 @@ #include "libpq/pqformat.h" #include "utils/array.h" #include "utils/builtins.h" -#include "utils/guc_sql.h" #include "optimizer/pgxcship.h" #include "miscadmin.h" @@ -369,20 +368,6 @@ Datum float4in(PG_FUNCTION_ARGS) Datum float4out(PG_FUNCTION_ARGS) { float4 num = PG_GETARG_FLOAT4(0); - - if (u_sess->attr.attr_sql.for_print_tuple) { - char *result = NULL; - double var = (double)num; - if (strcmp(u_sess->attr.attr_common.pset_num_format, "")) { - result = apply_num_format(var); - } else if (u_sess->attr.attr_common.pset_num_width > 0) { - result = apply_num_width(var); - } - if (result != NULL) { - PG_RETURN_CSTRING(result); - } - } - char* ascii = (char*)palloc(MAXFLOATWIDTH + 1); errno_t rc = EOK; @@ -597,20 +582,6 @@ Datum float8in(PG_FUNCTION_ARGS) Datum float8out(PG_FUNCTION_ARGS) { float8 num = PG_GETARG_FLOAT8(0); - - if (u_sess->attr.attr_sql.for_print_tuple) { - char *result = NULL; - double var = num; - if (strcmp(u_sess->attr.attr_common.pset_num_format, "")) { - result = apply_num_format(var); - } else if (u_sess->attr.attr_common.pset_num_width > 0) { - result = apply_num_width(var); - } - if (result != NULL) { - PG_RETURN_CSTRING(result); - } - } - char* ascii = (char*)palloc(MAXDOUBLEWIDTH + 1); errno_t rc = EOK; diff --git a/src/common/backend/utils/adt/genfile.cpp b/src/common/backend/utils/adt/genfile.cpp index a96365143..67322b8cb 100644 --- a/src/common/backend/utils/adt/genfile.cpp +++ b/src/common/backend/utils/adt/genfile.cpp @@ -316,132 +316,6 @@ Datum pg_read_binary_file_all(PG_FUNCTION_ARGS) PG_RETURN_BYTEA_P(read_binary_file(filename, 0, -1, false)); } -struct CompressAddressItemState { - uint32 blkno; - int segmentNo; - ReadBlockChunksStruct rbStruct; - FILE *pcaFile; -}; - -static void ReadBinaryFileBlocksFirstCall(PG_FUNCTION_ARGS, int32 startBlockNum, int32 blockCount) -{ - char* path = convert_and_check_filename(PG_GETARG_TEXT_PP(0)); - int segmentNo = 0; - UndoFileType undoFileType = UNDO_INVALID; - if (!is_row_data_file(path, &segmentNo, &undoFileType)) { - ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("%s is not a relation file.", path))); - } - /* create a function context for cross-call persistence */ - FuncCallContext* fctx = SRF_FIRSTCALL_INIT(); - - /* switch to memory context appropriate for multiple function calls */ - MemoryContext mctx = MemoryContextSwitchTo(fctx->multi_call_memory_ctx); - - /* initialize file scanning code */ - CompressAddressItemState* itemState = (CompressAddressItemState*)palloc(sizeof(CompressAddressItemState)); - - /* save mmap to inter_call_data->pcMap */ - char pcaFilePath[MAXPGPATH]; - errno_t rc = snprintf_s(pcaFilePath, MAXPGPATH, MAXPGPATH - 1, PCA_SUFFIX, path); - securec_check_ss(rc, "\0", "\0"); - FILE* pcaFile = AllocateFile((const char*)pcaFilePath, "rb"); - if (pcaFile == NULL) { - ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", pcaFilePath))); - } - PageCompressHeader* map = pc_mmap(fileno(pcaFile), ReadChunkSize(pcaFile, pcaFilePath, MAXPGPATH), true); - if (map == MAP_FAILED) { - ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("Failed to mmap %s: %m", pcaFilePath))); - } - if ((BlockNumber)startBlockNum + (BlockNumber)blockCount > map->nblocks) { - auto blockNum = map->nblocks; - ReleaseMap(map, pcaFilePath); - ereport(ERROR, - (ERRCODE_INVALID_PARAMETER_VALUE, - errmsg("invalid blocknum \"%d\" and block count \"%d\", the max blocknum is \"%u\"", - startBlockNum, - blockCount, - blockNum))); - } - /* construct ReadBlockChunksStruct */ - char* pcdFilePath = (char*)palloc0(MAXPGPATH); - rc = snprintf_s(pcdFilePath, MAXPGPATH, MAXPGPATH - 1, PCD_SUFFIX, path); - securec_check_ss(rc, "\0", "\0"); - FILE* fp = AllocateFile(pcdFilePath, "rb"); - if (fp == NULL) { - ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", pcdFilePath))); - } - char* pageBuffer = (char*)palloc(BLCKSZ); - itemState->pcaFile = pcaFile; - itemState->rbStruct.header = map; - itemState->rbStruct.pageBuffer = pageBuffer; - itemState->rbStruct.pageBufferLen = BLCKSZ; - itemState->rbStruct.fp = fp; - itemState->rbStruct.segmentNo = segmentNo; - itemState->rbStruct.fileName = pcdFilePath; - - /* - * build tupdesc for result tuples. This must match this function's - * pg_proc entry! - */ - TupleDesc tupdesc = CreateTemplateTupleDesc(4, false, TAM_HEAP); - TupleDescInitEntry(tupdesc, (AttrNumber)1, "path", TEXTOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)2, "blocknum", INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)3, "len", INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)4, "data", BYTEAOID, -1, 0); - fctx->tuple_desc = BlessTupleDesc(tupdesc); - - itemState->blkno = startBlockNum; - fctx->max_calls = blockCount; - fctx->user_fctx = itemState; - - MemoryContextSwitchTo(mctx); -} - -Datum pg_read_binary_file_blocks(PG_FUNCTION_ARGS) -{ - int32 startBlockNum = PG_GETARG_INT32(1); - int32 blockCount = PG_GETARG_INT32(2); - - if (startBlockNum < 0 || blockCount <= 0 || startBlockNum + blockCount > RELSEG_SIZE) { - ereport(ERROR, (ERRCODE_INVALID_PARAMETER_VALUE, - errmsg("invalid blocknum \"%d\" or block count \"%d\"", startBlockNum, blockCount))); - } - - /* stuff done only on the first call of the function */ - if (SRF_IS_FIRSTCALL()) { - ReadBinaryFileBlocksFirstCall(fcinfo, startBlockNum, blockCount); - } - - /* stuff done on every call of the function */ - FuncCallContext *fctx = SRF_PERCALL_SETUP(); - CompressAddressItemState *itemState = (CompressAddressItemState *)fctx->user_fctx; - - if (fctx->call_cntr < fctx->max_calls) { - bytea *buf = (bytea *)palloc(BLCKSZ + VARHDRSZ); - size_t len = ReadAllChunkOfBlock(VARDATA(buf), BLCKSZ, itemState->blkno, itemState->rbStruct); - SET_VARSIZE(buf, len + VARHDRSZ); - Datum values[4]; - values[0] = PG_GETARG_DATUM(0); - values[1] = Int32GetDatum(itemState->blkno); - values[2] = Int32GetDatum(len); - values[3] = PointerGetDatum(buf); - - /* Build and return the result tuple. */ - bool nulls[4]; - securec_check(memset_s(nulls, sizeof(nulls), 0, sizeof(nulls)), "\0", "\0"); - HeapTuple tuple = heap_form_tuple(fctx->tuple_desc, (Datum*)values, (bool*)nulls); - Datum result = HeapTupleGetDatum(tuple); - itemState->blkno++; - SRF_RETURN_NEXT(fctx, result); - } else { - if (itemState->rbStruct.header != NULL) { - pc_munmap(itemState->rbStruct.header); - } - FreeFile(itemState->pcaFile); - FreeFile(itemState->rbStruct.fp); - SRF_RETURN_DONE(fctx); - } -} /* * stat a file diff --git a/src/common/backend/utils/adt/int.cpp b/src/common/backend/utils/adt/int.cpp index 10eb1be59..c39654b13 100644 --- a/src/common/backend/utils/adt/int.cpp +++ b/src/common/backend/utils/adt/int.cpp @@ -39,7 +39,6 @@ #include "libpq/pqformat.h" #include "utils/array.h" #include "utils/builtins.h" -#include "utils/guc_sql.h" #define SAMESIGN(a, b) (((a) < 0) == ((b) < 0)) @@ -70,23 +69,10 @@ Datum int2in(PG_FUNCTION_ARGS) */ Datum int2out(PG_FUNCTION_ARGS) { - char* result = NULL; int16 arg1 = PG_GETARG_INT16(0); + char* result = (char*)palloc(7); /* sign, 5 digits, '\0' */ - if (u_sess->attr.attr_sql.for_print_tuple) { - double var = (double)arg1; - if (strcmp(u_sess->attr.attr_common.pset_num_format, "")) { - result = apply_num_format(var); - } else if (u_sess->attr.attr_common.pset_num_width > 0) { - result = apply_num_width(var); - } - } - - if (result == NULL) { - result = (char*)palloc(7); /* sign, 5 digits, '\0' */ - pg_itoa(arg1, result); - } - + pg_itoa(arg1, result); PG_RETURN_CSTRING(result); } @@ -318,23 +304,10 @@ Datum int4in(PG_FUNCTION_ARGS) */ Datum int4out(PG_FUNCTION_ARGS) { - char* result = NULL; int32 arg1 = PG_GETARG_INT32(0); + char* result = (char*)palloc(12); /* sign, 10 digits, '\0' */ - if (u_sess->attr.attr_sql.for_print_tuple) { - double var = (double)arg1; - if (strcmp(u_sess->attr.attr_common.pset_num_format, "")) { - result = apply_num_format(var); - } else if (u_sess->attr.attr_common.pset_num_width > 0) { - result = apply_num_width(var); - } - } - - if (result == NULL) { - result = (char*)palloc(12); /* sign, 10 digits, '\0' */ - pg_ltoa(arg1, result); - } - + pg_ltoa(arg1, result); PG_RETURN_CSTRING(result); } @@ -1252,23 +1225,10 @@ Datum int1in(PG_FUNCTION_ARGS) // int1out - converts uint8 to "num" Datum int1out(PG_FUNCTION_ARGS) { - char* result = NULL; uint8 arg1 = PG_GETARG_UINT8(0); + char* result = (char*)palloc(5); /* sign, 3 digits, '\0' */ - if (u_sess->attr.attr_sql.for_print_tuple) { - double var = (double)arg1; - if (strcmp(u_sess->attr.attr_common.pset_num_format, "")) { - result = apply_num_format(var); - } else if (u_sess->attr.attr_common.pset_num_width > 0) { - result = apply_num_width(var); - } - } - - if (result == NULL) { - result = (char*)palloc(5); /* sign, 3 digits, '\0' */ - pg_ctoa(arg1, result); - } - + pg_ctoa(arg1, result); PG_RETURN_CSTRING(result); } diff --git a/src/common/backend/utils/adt/int16.cpp b/src/common/backend/utils/adt/int16.cpp index 3b07fe2e5..dd27c8dd7 100644 --- a/src/common/backend/utils/adt/int16.cpp +++ b/src/common/backend/utils/adt/int16.cpp @@ -18,7 +18,6 @@ * Internal 128-bit integer operations. * * Portions Copyright (c) 2018, Huawei Tech. Co., Ltd. - * Portions Copyright (c) 2021, openGauss Contributors * * IDENTIFICATION * src/common/backend/utils/adt/int16.cpp diff --git a/src/common/backend/utils/adt/int8.cpp b/src/common/backend/utils/adt/int8.cpp index a05ed0653..6dface912 100644 --- a/src/common/backend/utils/adt/int8.cpp +++ b/src/common/backend/utils/adt/int8.cpp @@ -24,7 +24,6 @@ #include "libpq/pqformat.h" #include "utils/int8.h" #include "utils/builtins.h" -#include "utils/guc_sql.h" #define MAXINT8LEN 25 @@ -154,23 +153,11 @@ Datum int8in(PG_FUNCTION_ARGS) Datum int8out(PG_FUNCTION_ARGS) { int64 val = PG_GETARG_INT64(0); + char buf[MAXINT8LEN + 1]; char* result = NULL; - if (u_sess->attr.attr_sql.for_print_tuple) { - double var = (double)val; - if (strcmp(u_sess->attr.attr_common.pset_num_format, "")) { - result = apply_num_format(var); - } else if (u_sess->attr.attr_common.pset_num_width > 0) { - result = apply_num_width(var); - } - } - - if (result == NULL) { - char buf[MAXINT8LEN + 1]; - pg_lltoa(val, buf); - result = pstrdup(buf); - } - + pg_lltoa(val, buf); + result = pstrdup(buf); PG_RETURN_CSTRING(result); } diff --git a/src/common/backend/utils/adt/like.cpp b/src/common/backend/utils/adt/like.cpp index 811cb1952..9aafceeaf 100644 --- a/src/common/backend/utils/adt/like.cpp +++ b/src/common/backend/utils/adt/like.cpp @@ -252,6 +252,11 @@ Datum textlike(PG_FUNCTION_ARGS) { text* str = PG_GETARG_TEXT_PP(0); text* pat = PG_GETARG_TEXT_PP(1); + if (unlikely(VARATT_IS_HUGE_TOAST_POINTER(str))) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("like 'partten' cannot exceed 1GB"))); + } bool result = false; char *s, *p; int slen, plen; diff --git a/src/common/backend/utils/adt/lockfuncs.cpp b/src/common/backend/utils/adt/lockfuncs.cpp index e559daf96..a028373c1 100644 --- a/src/common/backend/utils/adt/lockfuncs.cpp +++ b/src/common/backend/utils/adt/lockfuncs.cpp @@ -54,7 +54,8 @@ const char* const LockTagTypeNames[] = {"relation", "userlock", "advisory", "filenode", - "subtransactionid"}; + "subtransactionid", + "tuple_uid"}; /* This must match enum PredicateLockTargetType (predicate_internals.h) */ static const char* const PredicateLockTagTypeNames[] = {"relation", "page", "tuple"}; @@ -347,6 +348,20 @@ Datum pg_lock_status(PG_FUNCTION_ARGS) values[5] = UInt16GetDatum(instance->locktag.locktag_field5); nulls[6] = true; nulls[7] = true; + nulls[8] = true; + nulls[9] = true; + nulls[10] = true; + break; + case LOCKTAG_UID: + values[1] = ObjectIdGetDatum(instance->locktag.locktag_field1); + values[2] = ObjectIdGetDatum(instance->locktag.locktag_field2); + nulls[3] = true; + nulls[4] = true; + nulls[5] = true; + nulls[6] = true; + values[7] = TransactionIdGetDatum((uint64)instance->locktag.locktag_field3 << 32 | + ((uint64)instance->locktag.locktag_field4)); + nulls[8] = true; nulls[9] = true; nulls[10] = true; @@ -639,9 +654,9 @@ static bool pgxc_advisory_lock(int64 key64, int32 key1, int32 key2, bool iskeybi * can not process SIGUSR1 of "pgxc_pool_reload" command immediately. */ #ifdef ENABLE_MULTIPLE_NODES - if (u_sess->sig_cxt.got_PoolReload) { + if (IsGotPoolReload()) { processPoolerReload(); - u_sess->sig_cxt.got_PoolReload = false; + ResetGotPoolReload(false); } #endif PgxcNodeGetOids(&coOids, &dnOids, &numcoords, &numdnodes, false); @@ -1460,6 +1475,23 @@ Datum pgxc_lock_for_sp_database(PG_FUNCTION_ARGS) PG_RETURN_BOOL(true); } +bool pg_try_advisory_lock_for_redis(Relation rel) +{ + LOCKMODE lockmode = u_sess->attr.attr_sql.enable_cluster_resize ? ExclusiveLock : ShareLock; + LockLevel locklevel = u_sess->attr.attr_sql.enable_cluster_resize ? SESSION_LOCK : TRANSACTION_LOCK; + TryType locktry = u_sess->attr.attr_sql.enable_cluster_resize ? WAIT : DONT_WAIT; + bool result = pgxc_advisory_lock(0, 65534, RelationGetRelCnOid(rel), false, lockmode, locklevel, locktry, NULL); + if (u_sess->attr.attr_sql.enable_cluster_resize && result) { + return true; + } else if (result) { + LOCKTAG tag; + SET_LOCKTAG_INT32_DB(tag, u_sess->proc_cxt.MyDatabaseId, 65534, RelationGetRelCnOid(rel)); + (void)LockRelease(&tag, ShareLock, false); + return true; + } + return false; +} + /* * pgxc_lock_for_backup * diff --git a/src/common/backend/utils/adt/memory_func.cpp b/src/common/backend/utils/adt/memory_func.cpp new file mode 100644 index 000000000..c193aebb0 --- /dev/null +++ b/src/common/backend/utils/adt/memory_func.cpp @@ -0,0 +1,522 @@ + +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ + +#include "memory_func.h" + +#ifdef MEMORY_CONTEXT_TRACK +/* + * for view gs_get_shared_memctx_detail; + */ +void gs_recursive_shared_memory_context(const MemoryContext context, + const char* ctx_name, StringInfoData* buf, bool isShared) +{ + bool checkLock = false; + + if (context == NULL) { + return; + } + + PG_TRY(); + { + CHECK_FOR_INTERRUPTS(); + check_stack_depth(); + + if (isShared) { + MemoryContextLock(context); + checkLock = true; + } + + if (context->type == T_SharedAllocSetContext && strcmp(ctx_name, context->name) == 0) { +#ifndef ENABLE_MEMORY_CHECK + GetAllocBlockInfo((AllocSet)context, buf); +#else + appendStringInfo(buf, "context : %s\n", context->name); + GetAsanBlockInfo((AsanSet)context, buf); +#endif + } + + /* recursive MemoryContext's child */ + for (MemoryContext child = context->firstchild; child != NULL; child = child->nextchild) { + if (child->is_shared) { + gs_recursive_shared_memory_context(child, ctx_name, buf, child->is_shared); + } + } + } + PG_CATCH(); + { + if (isShared && checkLock) { + MemoryContextUnlock(context); + } + PG_RE_THROW(); + } + PG_END_TRY(); + + if (isShared) { + MemoryContextUnlock(context); + } + + return; +} + +/* + * for view gs_get_thread_memctx_detail and gs_get_session_memctx_detail; + */ +void gs_recursive_unshared_memory_context(const MemoryContext context, + const char* ctx_name, StringInfoData* buf) +{ + if (context == NULL) { + return; + } + +#ifndef ENABLE_MEMORY_CHECK + if ((context->type == T_AllocSetContext) && strcmp(ctx_name, context->name) == 0) { + GetAllocBlockInfo((AllocSet)context, buf); + } +#else + if ((context->type == T_AsanSetContext) && strcmp(ctx_name, context->name) == 0) { + appendStringInfo(buf, "context : %s\n", context->name); + GetAsanBlockInfo((AsanSet)context, buf); + } +#endif + + CHECK_FOR_INTERRUPTS(); + check_stack_depth(); + + /* recursive MemoryContext's child */ + for (MemoryContext child = context->firstchild; child != NULL; child = child->nextchild) { + gs_recursive_unshared_memory_context(child, ctx_name, buf); + } + + return; +} + +/* + * file dictionary order, line number from small to large + */ +static int gs_alloc_chunk_cmp(const void* cmp_a, const void* cmp_b) +{ + if (cmp_a == NULL && cmp_b == NULL) { + return 0; + } else if (cmp_a == NULL) { + return 1; + } else if (cmp_b == NULL) { + return -1; + } + + AllocChunk chunk_a = (AllocChunk)cmp_a; + AllocChunk chunk_b = (AllocChunk)cmp_b; + + int cmp_file = strcmp(chunk_a->file, chunk_b->file); + if (cmp_file != 0) { + return cmp_file; + } + + if (chunk_a->line < chunk_b->line) { + return -1; + } else if (chunk_a->line == chunk_b->line) { + return 0; + } else { + return 1; + } + + return 0; +} + +/* + * file dictionary order, line number from small to large; the size of same file and line will be summation; + */ +static void gs_sort_memctx_info(AllocChunk memctx_info_res, int memctx_info_cnt, int* res_len) +{ + int i = 0; + int j = 1; + + qsort(memctx_info_res, memctx_info_cnt, sizeof(AllocChunkData), gs_alloc_chunk_cmp); + + while (j < memctx_info_cnt) { + AllocChunk chunk_i = &memctx_info_res[i]; + AllocChunk chunk_j = &memctx_info_res[j]; + if (strcmp(chunk_i->file, chunk_j->file) == 0 && chunk_i->line == chunk_j->line) { + chunk_i->size += chunk_j->size; + ++j; + continue; + } + + ++i; + chunk_i = &memctx_info_res[i]; + const char* tmp = chunk_i->file; + chunk_i->file = chunk_j->file; + chunk_j->file = tmp; + chunk_i->line = chunk_j->line; + chunk_i->size = chunk_j->size; + ++j; + } + *res_len = i + 1; +} + +/* + * collect the file and line info + */ +static AllocChunk gs_collate_memctx_info(StringInfo mem_info, int* res_len) +{ + if (mem_info == NULL) { + *res_len = 0; + return NULL; + } + + int i = 0; + int memctx_info_cnt = 0; + + /* find alloc chunk info count */ + for (int i = 0; i < mem_info->len; ++i) { + if (mem_info->data[i] == ':') { + ++memctx_info_cnt; + } + } + + if (memctx_info_cnt == 0) { + *res_len = 0; + return NULL; + } + + /* Traverse memory application information */ + AllocChunk memctx_info_res = (AllocChunk)palloc(sizeof(AllocChunkData) * memctx_info_cnt); + char* file_name = mem_info->data; + char* tmp_file_name = mem_info->data; + char* line = NULL; + char* real_size = NULL; + const int divide_size = 2; + for (i = 0; i < memctx_info_cnt; ++i) { + file_name = tmp_file_name; + line = strchr(file_name, ':'); + if (line == NULL) { + continue; + } + *line = '\0'; + ++line; + + real_size = strchr(line, ','); + if (real_size == NULL) { + continue; + } + *real_size = '\0'; + real_size += divide_size; + + tmp_file_name = strchr(real_size, '\n'); + if (tmp_file_name == NULL) { + continue; + } + *tmp_file_name = '\0'; + ++tmp_file_name; + + if (strcmp(file_name, "(null)") == 0 || strcmp(line, "0") == 0) { + continue; + } + + char* file_name_begin_pos = strrchr(file_name, '/'); + if (file_name_begin_pos != NULL) { + file_name = file_name_begin_pos + 1; + } + + AllocChunk chunk_res = &memctx_info_res[i]; + int file_name_len = strlen(file_name); + chunk_res->file = (char*)palloc(sizeof(char) * (file_name_len + 1)); + int rc = memcpy_s((char*)chunk_res->file, file_name_len, file_name, file_name_len); + securec_check_c(rc, "\0", "\0"); + ((char*)chunk_res->file)[file_name_len] = '\0'; + chunk_res->line = atoi(line); + chunk_res->size = atoi(real_size); + } + + gs_sort_memctx_info(memctx_info_res, memctx_info_cnt, res_len); + + return memctx_info_res; +} + +static TupleDesc get_memctx_view_first_row(const unsigned col_num) +{ + /* the col num of view */ + TupleDesc tupdesc = CreateTemplateTupleDesc(col_num, false); + + TupleDescInitEntry(tupdesc, (AttrNumber)ARG_1, "file", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)ARG_2, "line", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)ARG_3, "size", INT8OID, -1, 0); + + return BlessTupleDesc(tupdesc); +} + +static HeapTuple fetch_memctx_view_values(FuncCallContext* funcctx, AllocChunk memctx_chunk, + const unsigned col_num) +{ + Datum values[col_num]; + bool nulls[col_num]; + + errno_t rc; + rc = memset_s(values, sizeof(values), 0, sizeof(values)); + securec_check_c(rc, "\0", "\0"); + rc = memset_s(nulls, sizeof(nulls), 0, sizeof(nulls)); + securec_check_c(rc, "\0", "\0"); + + values[ARG_0] = CStringGetTextDatum(memctx_chunk->file); + values[ARG_1] = Int64GetDatum(memctx_chunk->line); + values[ARG_2] = Int64GetDatum(memctx_chunk->size); + + return heap_form_tuple(funcctx->tuple_desc, values, nulls); +} + +void gs_check_context_name_valid(const char* ctx_name) +{ + if (!t_thrd.utils_cxt.gs_mp_inited) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unsupported view for memory protection feature is disabled."))); + } + + if (ctx_name == NULL || strlen(ctx_name) == 0) { + ereport(ERROR, (errcode(ERRCODE_INVALID_NAME), errmsg("invalid name of memory context: NULL or \"\""))); + } + + if (strlen(ctx_name) >= MEMORY_CONTEXT_NAME_LEN) { + ereport(ERROR, (errcode(ERRCODE_INVALID_NAME), + errmsg("The name of memory context is too long(>=%dbytes)", MEMORY_CONTEXT_NAME_LEN))); + } +} +#endif +/* + * select gs_get_thread_memctx_detail(tid, 'CBBTopMemoryContext'); + * tid is from the 2sd item of pv_thread_memory_context(); + */ +Datum gs_get_thread_memctx_detail(PG_FUNCTION_ARGS) +{ +#ifndef MEMORY_CONTEXT_TRACK + FuncCallContext* funcctx = NULL; + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unsupported view in lite mode or numa mode."))); + SRF_RETURN_DONE(funcctx); +#else + + if (!superuser() && !isMonitoradmin(GetUserId())) { + aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_PROC, "gs_get_thread_memctx_detail"); + } + + const ThreadId tid = PG_GETARG_INT64(0); + char* ctx_name = TextDatumGetCString(PG_GETARG_TEXT_PP(1)); + + gs_check_context_name_valid(ctx_name); + + if (tid == 0) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid thread id %ld .", tid))); + } + +#define GS_THREAD_MEMCTX_VIEW_ATTRNUM 3 + FuncCallContext* funcctx = NULL; + AllocChunk memctx_info_res; + + if (SRF_IS_FIRSTCALL()) { + funcctx = SRF_FIRSTCALL_INIT(); + MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + StringInfoData mem_info; + initStringInfo(&mem_info); + + if (tid == PostmasterPid) { + if (!IsNormalProcessingMode()) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("the thread state is abnormal %ld\n", tid))); + } + gs_recursive_unshared_memory_context(PmTopMemoryContext, ctx_name, &mem_info); + } else { + bool find_thread = false; + MemoryContext thrd_top_cxt = NULL; + uint32 max_thread_count = g_instance.proc_base->allProcCount - + g_instance.attr.attr_storage.max_prepared_xacts * NUM_TWOPHASE_PARTITIONS; + volatile PGPROC* proc = NULL; + for (uint32 idx = 0; idx < max_thread_count; idx++) { + proc = g_instance.proc_base_all_procs[idx]; + if (proc->pid == tid) { + thrd_top_cxt = proc->topmcxt; + find_thread = true; + break; + } + } + + if (find_thread == false) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("can not find pid %ld\n", tid))); + } + + PG_TRY(); + { + (void)syscalllockAcquire(&((PGPROC*)proc)->deleMemContextMutex); + gs_recursive_unshared_memory_context(thrd_top_cxt, ctx_name, &mem_info); + (void)syscalllockRelease(&((PGPROC*)proc)->deleMemContextMutex); + } + PG_CATCH(); + { + (void)syscalllockRelease(&((PGPROC*)proc)->deleMemContextMutex); + PG_RE_THROW(); + } + PG_END_TRY(); + } + + int memctx_info_len = 0; + memctx_info_res = gs_collate_memctx_info(&mem_info, &memctx_info_len); + + funcctx->tuple_desc = get_memctx_view_first_row(GS_THREAD_MEMCTX_VIEW_ATTRNUM); + funcctx->max_calls = memctx_info_len; + funcctx->user_fctx = memctx_info_res; + + FreeStringInfo(&mem_info); + MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + memctx_info_res = (AllocChunk)(funcctx->user_fctx); + if (funcctx->call_cntr < funcctx->max_calls) { + AllocChunk memctx_chunk = memctx_info_res + funcctx->call_cntr; + HeapTuple tuple = fetch_memctx_view_values(funcctx, memctx_chunk, GS_THREAD_MEMCTX_VIEW_ATTRNUM); + pfree_ext(ctx_name); + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); + } + + pfree_ext(memctx_info_res); + pfree_ext(ctx_name); + SRF_RETURN_DONE(funcctx); +#endif +} + +/* + * select gs_get_session_memctx_detail('CBBTopMemoryContext'); + */ +Datum gs_get_session_memctx_detail(PG_FUNCTION_ARGS) +{ +#ifndef MEMORY_CONTEXT_TRACK + FuncCallContext* funcctx = NULL; + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unsupported view in lite mode or numa mode."))); + SRF_RETURN_DONE(funcctx); +#else + + if (!superuser() && !isMonitoradmin(GetUserId())) { + aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_PROC, "gs_get_session_memctx_detail"); + } + + char* ctx_name = TextDatumGetCString(PG_GETARG_TEXT_PP(0)); + + gs_check_context_name_valid(ctx_name); + + if (!ENABLE_THREAD_POOL) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unsupported view for thread pool is disabled."))); + } + +#define GS_SESSION_MEMCTX_VIEW_ATTRNUM 3 + FuncCallContext* funcctx = NULL; + AllocChunk memctx_info_res; + knl_sess_control* sess = NULL; + + if (SRF_IS_FIRSTCALL()) { + funcctx = SRF_FIRSTCALL_INIT(); + MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + + StringInfoData mem_info; + initStringInfo(&mem_info); + g_threadPoolControler->GetSessionCtrl()->getSessionMemoryContextInfo(ctx_name, &mem_info, &sess); + + int memctx_info_len = 0; + memctx_info_res = gs_collate_memctx_info(&mem_info, &memctx_info_len); + + funcctx->tuple_desc = get_memctx_view_first_row(GS_SESSION_MEMCTX_VIEW_ATTRNUM); + funcctx->max_calls = memctx_info_len; + funcctx->user_fctx = memctx_info_res; + + FreeStringInfo(&mem_info); + MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + memctx_info_res = (AllocChunk)(funcctx->user_fctx); + if (funcctx->call_cntr < funcctx->max_calls) { + AllocChunk memctx_chunk = memctx_info_res + funcctx->call_cntr; + HeapTuple tuple = fetch_memctx_view_values(funcctx, memctx_chunk, GS_SESSION_MEMCTX_VIEW_ATTRNUM); + pfree_ext(ctx_name); + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); + } + + pfree_ext(memctx_info_res); + pfree_ext(ctx_name); + SRF_RETURN_DONE(funcctx); +#endif +} + +/* + * select gs_get_shared_memctx_detail('CBBTopMemoryContext'); + */ +Datum gs_get_shared_memctx_detail(PG_FUNCTION_ARGS) +{ +#ifndef MEMORY_CONTEXT_TRACK + FuncCallContext* funcctx = NULL; + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unsupported view in lite mode or numa mode."))); + SRF_RETURN_DONE(funcctx); +#else + + if (!superuser() && !isMonitoradmin(GetUserId())) { + aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_PROC, "gs_get_shared_memctx_detail"); + } + + char* ctx_name = TextDatumGetCString(PG_GETARG_TEXT_PP(0)); + + gs_check_context_name_valid(ctx_name); + +#define GS_SHARED_MEMCTX_VIEW_ATTRNUM 3 + FuncCallContext* funcctx = NULL; + AllocChunk memctx_info_res; + + if (SRF_IS_FIRSTCALL()) { + funcctx = SRF_FIRSTCALL_INIT(); + MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + + StringInfoData mem_info; + initStringInfo(&mem_info); + gs_recursive_shared_memory_context(g_instance.instance_context, ctx_name, &mem_info, true); + + int memctx_info_len = 0; + memctx_info_res = gs_collate_memctx_info(&mem_info, &memctx_info_len); + + funcctx->tuple_desc = get_memctx_view_first_row(GS_SHARED_MEMCTX_VIEW_ATTRNUM); + funcctx->max_calls = memctx_info_len; + funcctx->user_fctx = memctx_info_res; + + FreeStringInfo(&mem_info); + MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + memctx_info_res = (AllocChunk)(funcctx->user_fctx); + if (funcctx->call_cntr < funcctx->max_calls) { + AllocChunk memctx_chunk = memctx_info_res + funcctx->call_cntr; + HeapTuple tuple = fetch_memctx_view_values(funcctx, memctx_chunk, GS_SHARED_MEMCTX_VIEW_ATTRNUM); + pfree_ext(ctx_name); + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); + } + + pfree_ext(memctx_info_res); + pfree_ext(ctx_name); + SRF_RETURN_DONE(funcctx); +#endif +} + diff --git a/src/common/backend/utils/adt/misc.cpp b/src/common/backend/utils/adt/misc.cpp index f577eb533..a544ca3e2 100644 --- a/src/common/backend/utils/adt/misc.cpp +++ b/src/common/backend/utils/adt/misc.cpp @@ -74,10 +74,15 @@ Datum current_database(PG_FUNCTION_ARGS) Datum current_query(PG_FUNCTION_ARGS) { /* there is no easy way to access the more concise 'query_string' */ - if (t_thrd.postgres_cxt.debug_query_string) - PG_RETURN_TEXT_P(cstring_to_text(t_thrd.postgres_cxt.debug_query_string)); - else + if (t_thrd.postgres_cxt.debug_query_string) { + char *mask_string = maskPassword(t_thrd.postgres_cxt.debug_query_string); + if (mask_string == NULL) { + mask_string = (char *)t_thrd.postgres_cxt.debug_query_string; + } + PG_RETURN_TEXT_P(cstring_to_text(mask_string)); + } else { PG_RETURN_NULL(); + } } /* @@ -274,11 +279,7 @@ Datum pg_cancel_invalid_query(PG_FUNCTION_ARGS) (errmsg("must be system admin to cancel invalid queries running in all server processes")))); PG_RETURN_BOOL(false); } else { - if (GTM_LITE_MODE) { - proc_cancel_invalid_gtm_lite_conn(); - } else { - pgstat_cancel_invalid_gtm_conn(); - } + proc_cancel_invalid_gtm_lite_conn(); PG_RETURN_BOOL(true); } #endif diff --git a/src/common/backend/utils/adt/nlssort.cpp b/src/common/backend/utils/adt/nlssort.cpp index 78195c725..5358be459 100644 --- a/src/common/backend/utils/adt/nlssort.cpp +++ b/src/common/backend/utils/adt/nlssort.cpp @@ -25,6 +25,7 @@ #include "postgres.h" #include "mb/pg_wchar.h" #include "utils/builtins.h" +#include "knl/knl_session.h" #include "../mb/nlssort/nlssort_pinyin_map1_simple.map" #include "../mb/nlssort/nlssort_pinyin_map1_complex.map" @@ -67,6 +68,13 @@ Datum nlssort(PG_FUNCTION_ARGS) if (PG_ARGISNULL(0)) { PG_RETURN_NULL(); } + if (VARSIZE_ANY_EXHDR(PG_GETARG_TEXT_P(0)) == 0) { + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + PG_RETURN_NULL(); + } else { + PG_RETURN_TEXT_P(cstring_to_text("\0")); + } + } /* encode the first argument with "gb18030" */ Datum tmp = DirectFunctionCall3(pg_convert, PG_GETARG_DATUM(0), CStringGetDatum(GetDatabaseEncodingName()), @@ -80,6 +88,8 @@ Datum nlssort(PG_FUNCTION_ARGS) errcause("Error in the nlssort parameter."), erraction("Please check and revise your parameter."))); } + pfree_ext(chars_to_be_encoded); + pfree_ext(nlssort_arg); pfree_ext(sort_method); PG_RETURN_TEXT_P(cstring_to_text(chars_encoded)); @@ -223,6 +233,7 @@ char *remove_trailing_spaces(const char *src_str) { bool is_all_space = true; int len = strlen(src_str); + Assert(len > 0); int buf_size = strlen(src_str) + 1; char *dst_str = (char *)palloc(buf_size); diff --git a/src/common/backend/utils/adt/numeric.cpp b/src/common/backend/utils/adt/numeric.cpp index 48471ec04..e77a2e38a 100644 --- a/src/common/backend/utils/adt/numeric.cpp +++ b/src/common/backend/utils/adt/numeric.cpp @@ -39,7 +39,6 @@ #include "utils/biginteger.h" #include "utils/gs_bitmap.h" #include "utils/guc.h" -#include "utils/guc_sql.h" #include "utils/int8.h" #include "utils/numeric.h" #include "utils/sortsupport.h" @@ -161,7 +160,6 @@ static void zero_var(NumericVar* var); static const char* set_var_from_str(const char* str, const char* cp, NumericVar* dest); static void set_var_from_num(Numeric value, NumericVar* dest); -static void init_var_from_num(Numeric num, NumericVar* dest); static void set_var_from_var(const NumericVar* value, NumericVar* dest); static char* get_str_from_var(NumericVar* var); static char* get_str_from_var_sci(NumericVar* var, int rscale); @@ -169,7 +167,6 @@ static char* get_str_from_var_sci(NumericVar* var, int rscale); static void apply_typmod(NumericVar* var, int32 typmod); static int32 numericvar_to_int32(const NumericVar* var); -static bool numericvar_to_int64(const NumericVar* var, int64* result); static double numeric_to_double_no_overflow(Numeric num); static double numericvar_to_double_no_overflow(NumericVar* var); @@ -336,22 +333,6 @@ Datum numeric_out(PG_FUNCTION_ARGS) char* str = NULL; int scale = 0; - if (u_sess->attr.attr_sql.for_print_tuple && !u_sess->attr.attr_sql.numeric_out_for_format) { - char *result = NULL; - /* to prevent stack overflow*/ - u_sess->attr.attr_sql.numeric_out_for_format = true; - double var = DatumGetFloat8(DirectFunctionCall1(numeric_float8, NumericGetDatum(num))); - u_sess->attr.attr_sql.numeric_out_for_format = false; - if (strcmp(u_sess->attr.attr_common.pset_num_format, "")) { - result = apply_num_format(var); - } else if (u_sess->attr.attr_common.pset_num_width > 0) { - result = apply_num_width(var); - } - if (result != NULL) { - PG_RETURN_CSTRING(result); - } - } - /* * Handle NaN */ @@ -1395,7 +1376,7 @@ static Datum numeric_abbrev_convert(Datum original_datum, SortSupport ssup) * This is to handle packed datums without needing a palloc/pfree cycle; * we keep and reuse a buffer large enough to handle any short datum. */ - if (VARATT_IS_SHORT(original_varatt)) { + if (!VARATT_IS_HUGE_TOAST_POINTER(original_varatt) && VARATT_IS_SHORT(original_varatt)) { void* buf = nss->buf; Size sz = VARSIZE_SHORT(original_varatt) - VARHDRSZ_SHORT; @@ -4122,9 +4103,7 @@ static const char* set_var_from_str(const char* str, const char* cp, NumericVar* static void set_var_from_num(Numeric num, NumericVar* dest) { Assert(!NUMERIC_IS_BI(num)); - int ndigits; - - ndigits = NUMERIC_NDIGITS(num); + int ndigits = NUMERIC_NDIGITS(num); alloc_var(dest, ndigits); @@ -4152,7 +4131,7 @@ static void set_var_from_num(Numeric num, NumericVar* dest) * propagate to the original Numeric! It's OK to use it as the destination * argument of one of the calculational functions, though. */ -static inline void init_var_from_num(Numeric num, NumericVar* dest) +void init_var_from_num(Numeric num, NumericVar* dest) { Assert(!NUMERIC_IS_BI(num)); dest->ndigits = NUMERIC_NDIGITS(num); @@ -4617,7 +4596,7 @@ static void apply_typmod(NumericVar* var, int32 typmod) * * If overflow, return false (no error is raised). Return true if okay. */ -static bool numericvar_to_int64(const NumericVar* var, int64* result) +bool numericvar_to_int64(const NumericVar* var, int64* result) { NumericDigit* digits = NULL; int ndigits; @@ -18565,7 +18544,7 @@ int convert_int128_to_short_numeric_byscale(_out_ char* outBuf, _in_ int128 v, _ * @IN value: input numeric value. * @return: Numeric - Datum points to fast numeric format */ -Datum try_convert_numeric_normal_to_fast(Datum value) +Datum try_convert_numeric_normal_to_fast(Datum value, ScalarVector *arr) { Numeric val = DatumGetNumeric(value); @@ -18580,7 +18559,7 @@ Datum try_convert_numeric_normal_to_fast(Datum value) // should be ( whole_scale <= MAXINT64DIGIT) if (CAN_CONVERT_BI64(whole_scale)) { int64 result = convert_short_numeric_to_int64_byscale(val, numVar.dscale); - return makeNumeric64(result, numVar.dscale); + return makeNumeric64(result, numVar.dscale, arr); } else if (CAN_CONVERT_BI128(whole_scale)) { int128 result = 0; convert_short_numeric_to_int128_byscale(val, numVar.dscale, result); diff --git a/src/common/backend/utils/adt/pg_lzcompress.cpp b/src/common/backend/utils/adt/pg_lzcompress.cpp index f78152f56..6ff680251 100644 --- a/src/common/backend/utils/adt/pg_lzcompress.cpp +++ b/src/common/backend/utils/adt/pg_lzcompress.cpp @@ -320,9 +320,6 @@ const PGLZ_Strategy* const PGLZ_strategy_always = &strategy_always_data; #define HIST_START_LEN (sizeof(PGLZ_HistEntry*) * PGLZ_HISTORY_LISTS) #define HIST_ENTRIES_LEN (sizeof(PGLZ_HistEntry) * PGLZ_HISTORY_SIZE) -#define MAX_GOOD_DROP 100 -#define MAX_NEED_RATE 99 - /* ---------- * pglz_find_match - * @@ -667,281 +664,3 @@ void pglz_decompress(const PGLZ_Header* source, char* dest) * That's it. */ } - -/* ---------- - * lz_compress - - * - * Compresses source into dest using strategy. Returns the number of - * bytes written in buffer dest, or -1 if compression fails. - * ---------- - */ -int32 lz_compress(const char* source, int32 slen, char* dest) -{ - unsigned char* bp = (unsigned char*) dest; - unsigned char* bstart = bp; - int hist_next = 0; - bool hist_recycle = false; - const char* dp = source; - const char* dend = source + slen; - unsigned char ctrl_dummy = 0; - unsigned char* ctrlp = &ctrl_dummy; - unsigned char ctrlb = 0; - unsigned char ctrl = 0; - bool found_match = false; - int32 match_len; - int32 match_off; - int32 good_match; - int32 good_drop; - int32 result_size; - int32 result_max; - int32 need_rate; - errno_t rc; - - const PGLZ_Strategy* strategy = PGLZ_strategy_always; - /* - * Our fallback strategy is the default. - */ - if (strategy == NULL) { - strategy = PGLZ_strategy_default; - } - - /* - * If the strategy forbids compression (at all or if source chunk size out - * of range), fail. - */ - if (strategy->match_size_good <= 0 || slen < strategy->min_input_size || slen > strategy->max_input_size) { - return -1; - } - - /* - * Limit the match parameters to the supported range. - */ - good_match = strategy->match_size_good; - if (good_match > PGLZ_MAX_MATCH) { - good_match = PGLZ_MAX_MATCH; - } else if (good_match < 17) { - good_match = 17; - } - - good_drop = strategy->match_size_drop; - if (good_drop < 0) { - good_drop = 0; - } else if (good_drop > MAX_GOOD_DROP) { - good_drop = MAX_GOOD_DROP; - } - - need_rate = strategy->min_comp_rate; - if (need_rate < 0) { - need_rate = 0; - } else if (need_rate > MAX_NEED_RATE) { - need_rate = MAX_NEED_RATE; - } - - /* - * Compute the maximum result size allowed by the strategy, namely the - * input size minus the minimum wanted compression rate. This had better - * be <= slen, else we might overrun the provided output buffer. - */ - if (slen > (INT_MAX / 100)) { - /* Approximate to avoid overflow */ - result_max = (slen / 100) * (100 - need_rate); - } else { - result_max = (slen * (100 - need_rate)) / 100; - } - - /* - * Initialize the history lists to empty. We do not need to zero the - * hist_entries[] array; its entries are initialized as they are used. - */ - rc = memset_s(u_sess->utils_cxt.hist_start, HIST_START_LEN, 0, HIST_START_LEN); - securec_check(rc, "\0", "\0"); - - /* - * Compress the source directly into the output buffer. - */ - while (dp < dend) { - /* - * If we already exceeded the maximum result size, fail. - * - * We check once per loop; since the loop body could emit as many as 4 - * bytes (a control byte and 3-byte tag), PGLZ_MAX_OUTPUT() had better - * allow 4 slop bytes. - */ - if (bp - bstart >= result_max) { - return -1; - } - - /* - * If we've emitted more than first_success_by bytes without finding - * anything compressible at all, fail. This lets us fall out - * reasonably quickly when looking at incompressible input (such as - * pre-compressed data). - */ - if (!found_match && bp - bstart >= strategy->first_success_by) { - return -1; - } - - /* - * Try to find a match in the history - */ - if (pglz_find_match(u_sess->utils_cxt.hist_start, dp, dend, &match_len, &match_off, good_match, good_drop)) { - /* - * Create the tag and add history entries for all matched - * characters. - */ - pglz_out_tag(ctrlp, ctrlb, ctrl, bp, match_len, match_off); - while (match_len--) { - pglz_hist_add( - u_sess->utils_cxt.hist_start, u_sess->utils_cxt.hist_entries, hist_next, hist_recycle, dp, - dend); - dp++; /* Do not do this ++ in the line above! */ - /* The macro would do it four times - Jan. */ - } - found_match = true; - } else { - /* - * No match found. Copy one literal byte. - */ - pglz_out_literal(ctrlp, ctrlb, ctrl, bp, *dp); - pglz_hist_add( - u_sess->utils_cxt.hist_start, u_sess->utils_cxt.hist_entries, hist_next, hist_recycle, dp, dend); - dp++; /* Do not do this ++ in the line above! */ - /* The macro would do it four times - Jan. */ - } - } - - /* - * Write out the last control byte and check that we haven't overrun the - * output size allowed by the strategy. - */ - *ctrlp = ctrlb; - result_size = bp - bstart; - if (result_size >= result_max) { - return -1; - } - - /* success */ - return result_size; -} - -/* ---------- - * pglz_decompress - - * - * Decompresses source into dest. Returns the number of bytes - * decompressed in the destination buffer, and *optionally* - * checks that both the source and dest buffers have been - * fully read and written to, respectively. - * ---------- - */ -int32 lz_decompress(const char* source, int32 slen, char* dest, int32 rawsize, bool check_complete) -{ - const unsigned char* sp; - const unsigned char* srcend; - unsigned char* dp; - unsigned char* destend; - errno_t rc = 0; - - sp = (const unsigned char*) source; - srcend = ((const unsigned char*) source) + slen; - dp = (unsigned char*) dest; - destend = dp + rawsize; - - while (sp < srcend && dp < destend) { - /* - * Read one control byte and process the next 8 items (or as many as - * remain in the compressed input). - */ - unsigned char ctrl = *sp++; - int ctrlc; - - for (ctrlc = 0; ctrlc < 8 && sp < srcend && dp < destend; ctrlc++) { - if (ctrl & 1) { - /* - * Set control bit means we must read a match tag. The match - * is coded with two bytes. First byte uses lower nibble to - * code length - 3. Higher nibble contains upper 4 bits of the - * offset. The next following byte contains the lower 8 bits - * of the offset. If the length is coded as 18, another - * extension tag byte tells how much longer the match really - * was (0-255). - */ - int32 len; - int32 off; - - len = (sp[0] & 0x0f) + 3; - off = ((sp[0] & 0xf0) << 4) | sp[1]; - sp += 2; - if (len == 18) { - len += *sp++; - } - - /* - * Now we copy the bytes specified by the tag from OUTPUT to - * OUTPUT (copy len bytes from dp - off to dp). The copied - * areas could overlap, to preven possible uncertainty, we - * copy only non-overlapping regions. - */ - len = Min(len, destend - dp); - while (off < len) { - /*--------- - * When offset is smaller than length - source and - * destination regions overlap. memmove() is resolving - * this overlap in an incompatible way with pglz. Thus we - * resort to memcpy()-ing non-overlapping regions. - * - * Consider input: 112341234123412341234 - * At byte 5 here ^ we have match with length 16 and - * offset 4. 11234M(len=16, off=4) - * We are decoding first period of match and rewrite match - * 112341234M(len=12, off=8) - * - * The same match is now at position 9, it points to the - * same start byte of output, but from another position: - * the offset is doubled. - * - * We iterate through this offset growth until we can - * proceed to usual memcpy(). If we would try to decode - * the match at byte 5 (len=16, off=4) by memmove() we - * would issue memmove(5, 1, 16) which would produce - * 112341234XXXXXXXXXXXX, where series of X is 12 - * undefined bytes, that were at bytes [5:17]. - * --------- - */ - errno_t rc = memcpy_s(dp, off + 1, dp - off, off); - securec_check(rc, "", ""); - len -= off; - dp += off; - off += off; - } - rc = memcpy_s(dp, len + 1, dp - off, len); - securec_check(rc, "", ""); - dp += len; - } else { - /* - * An unset control bit means LITERAL BYTE. So we just copy - * one from INPUT to OUTPUT. - */ - *dp++ = *sp++; - } - - /* - * Advance the control bit - */ - ctrl >>= 1; - } - } - - /* - * Check we decompressed the right amount. If we are slicing, then we - * won't necessarily be at the end of the source or dest buffers when we - * hit a stop, so we don't test them. - */ - if (check_complete && (dp != destend || sp != srcend)) { - return -1; - } - - /* - * That's it. - */ - return (char*) dp - dest; -} diff --git a/src/common/backend/utils/adt/pgstatfuncs.cpp b/src/common/backend/utils/adt/pgstatfuncs.cpp index e2f3f8f65..d79c6ebe3 100644 --- a/src/common/backend/utils/adt/pgstatfuncs.cpp +++ b/src/common/backend/utils/adt/pgstatfuncs.cpp @@ -21,7 +21,10 @@ #include "access/ustore/undo/knl_uundoapi.h" #include "access/ustore/undo/knl_uundotxn.h" #include "access/ustore/undo/knl_uundozone.h" +#include "access/ubtree.h" #include "access/redo_statistic.h" +#include "access/xlog_internal.h" +#include "access/multi_redo_api.h" #include "connector.h" #include "catalog/namespace.h" #include "catalog/pg_database.h" @@ -32,6 +35,7 @@ #include "commands/dbcommands.h" #include "commands/user.h" #include "commands/vacuum.h" +#include "commands/verify.h" #include "funcapi.h" #include "gaussdb_version.h" #include "libpq/ip.h" @@ -72,6 +76,7 @@ #include "instruments/list.h" #include "replication/rto_statistic.h" #include "storage/lock/lock.h" +#include "nodes/makefuncs.h" #define UINT32_ACCESS_ONCE(var) ((uint32)(*((volatile uint32*)&(var)))) #define NUM_PG_LOCKTAG_ID 12 @@ -82,9 +87,7 @@ #define LOWERCASE_LETTERS_ID 87 #define DISPLACEMENTS_VALUE 32 -const int PG_STAT_USP_PERSIST_META_COLS = 7; -const int STAT_UNDO_LOG_SIZE = 17; -const int PG_STAT_TRANSLOT_META_COLS = 5; +const uint32 INDEX_STATUS_VIEW_COL_NUM = 3; /* bogus ... these externs should be in a header file */ extern Datum pg_stat_get_numscans(PG_FUNCTION_ARGS); @@ -195,6 +198,7 @@ extern Datum pg_stat_get_buf_written_backend(PG_FUNCTION_ARGS); extern Datum pg_stat_get_buf_fsync_backend(PG_FUNCTION_ARGS); extern Datum pg_stat_get_buf_alloc(PG_FUNCTION_ARGS); +char g_dir[100] = {0}; typedef enum XactAction { XACTION_INSERT = 0, XACTION_UPDATE, @@ -259,8 +263,6 @@ extern Datum mot_global_memory_detail(PG_FUNCTION_ARGS); extern Datum mot_local_memory_detail(PG_FUNCTION_ARGS); extern Datum gs_total_nodegroup_memory_detail(PG_FUNCTION_ARGS); -/* ustore stat */ -extern Datum gs_stat_ustore(PG_FUNCTION_ARGS); extern Datum track_memory_context_detail(PG_FUNCTION_ARGS); @@ -285,6 +287,8 @@ extern Datum pg_autovac_timeout(PG_FUNCTION_ARGS); static int64 pgxc_exec_autoanalyze_timeout(Oid relOid, int32 coordnum, char* funcname); extern bool allow_autoanalyze(HeapTuple tuple); +int g_stat_file_id = -1; + /* the size of GaussDB_expr.ir */ #define IR_FILE_SIZE 29800 #define WAITSTATELEN 256 @@ -338,6 +342,11 @@ static const char* WaitStateDesc[] = { "wait data sync", // STATE_WAIT_DATASYNC "wait data sync queue", // STATE_WAIT_DATASYNC_QUEUE "flush data", // STATE_WAIT_FLUSH_DATA + "wait reserve td", // STATE_WAIT_RESERVE_TD + "wait td rollback", // STATE_WAIT_TD_ROLLBACK + "wait transaction rollback", // STATE_WAIT_TRANSACTION_ROLLBACK + "prune table", // STATE_PRUNE_TABLE + "prune index", // STATE_PRUNE_INDEX "stream get conn", // STATE_STREAM_WAIT_CONNECT_NODES "wait producer ready", // STATE_STREAM_WAIT_PRODUCER_READY "synchronize quit", // STATE_STREAM_WAIT_THREAD_SYNC_QUIT @@ -345,6 +354,7 @@ static const char* WaitStateDesc[] = { "wait active statement", // STATE_WAIT_ACTIVE_STATEMENT "wait memory", // STATE_WAIT_MEMORY "Sort", // STATE_EXEC_SORT + "Sort - fetch tuple", // STATE_EXEC_SORT_FETCH_TUPLE "Sort - write file", // STATE_EXEC_SORT_WRITE_FILE "Material", // STATE_EXEC_MATERIAL "Material - write file", // STATE_EXEC_MATERIAL_WRITE_FILE @@ -378,8 +388,13 @@ static const char* WaitStateDesc[] = { "gtm set sequence val", // STATE_GTM_SEQUENCE_SET_VAL "gtm drop sequence", // STATE_GTM_DROP_SEQUENCE "gtm rename sequence", // STATE_GTM_RENAME_SEQUENCE + "gtm set disaster cluster", // STATE_GTM_SET_DISASTER_CLUSTER + "gtm get disaster cluster", // STATE_GTM_GET_DISASTER_CLUSTER + "gtm del disaster cluster", // STATE_GTM_DEL_DISASTER_CLUSTER "wait sync consumer next step", // STATE_WAIT_SYNC_CONSUMER_NEXT_STEP - "wait sync producer next step" // STATE_WAIT_SYNC_PRODUCER_NEXT_STEP + "wait sync producer next step", // STATE_WAIT_SYNC_PRODUCER_NEXT_STEP + "gtm set consistency point", // STATE_GTM_SET_CONSISTENCY_POINT + "wait sync bgworkers" // STATE_WAIT_SYNC_BGWORKERS }; // description for WaitStatePhase enums. @@ -392,6 +407,7 @@ static const char* WaitStatePhaseDesc[] = { "autovacuum", // PHASE_AUTOVACUUM }; + /* ---------- * pgstat_get_waitstatusdesc() - * @@ -613,6 +629,45 @@ static TimestampTz pgxc_last_autovac_time(Oid relOid, char* funcname) return result; } +/* + * Build tuple desc and store for the caller result + * return the tuple store, the tupdesc would be return by pointer. + */ +Tuplestorestate *BuildTupleResult(FunctionCallInfo fcinfo, TupleDesc *tupdesc) +{ + ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; + Tuplestorestate *tupstore = NULL; + + MemoryContext per_query_ctx; + MemoryContext oldcontext; + + /* check to see if caller supports returning a tuplestore */ + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + } + + if (!(rsinfo->allowedModes & SFRM_Materialize)) + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not " + "allowed in this context"))); + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, tupdesc) != TYPEFUNC_COMPOSITE) + ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("return type must be a row type"))); + + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + + tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = *tupdesc; + + (void)MemoryContextSwitchTo(oldcontext); + + return tupstore; +} + Datum pg_stat_get_numscans(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); @@ -1314,13 +1369,6 @@ static Datum pg_stat_segment_space_info_internal(Oid spaceid, Oid dbid, PG_FUNCT SRF_RETURN_DONE(funcctx); } -Datum pg_stat_segment_space_info(PG_FUNCTION_ARGS) -{ - Oid spaceid = PG_GETARG_OID(0); - Oid dbid = PG_GETARG_OID(1); - return pg_stat_segment_space_info_internal(spaceid, dbid, fcinfo); -} - Oid get_tablespace_oid_by_name(const char *tablespacename) { Relation rel = heap_open(TableSpaceRelationId, AccessShareLock); @@ -2078,7 +2126,7 @@ Datum pg_stat_get_activity(PG_FUNCTION_ARGS) Datum pg_stat_get_activity_helper(PG_FUNCTION_ARGS, bool has_conninfo) { - const int ATT_COUNT = has_conninfo ? 20 : 19; + const int ATT_COUNT = has_conninfo ? 22 : 21; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; MemoryContext oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory); @@ -2106,9 +2154,13 @@ Datum pg_stat_get_activity_helper(PG_FUNCTION_ARGS, bool has_conninfo) TupleDescInitEntry(tupdesc, (AttrNumber)ARG_18, "connection_info", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)ARG_19, "srespool", NAMEOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)ARG_20, "global_sessionid", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)ARG_21, "unique_sql_id", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)ARG_22, "trace_id", TEXTOID, -1, 0); } else { TupleDescInitEntry(tupdesc, (AttrNumber)ARG_18, "srespool", NAMEOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)ARG_19, "global_sessionid", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)ARG_20, "unique_sql_id", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)ARG_21, "trace_id", TEXTOID, -1, 0); } rsinfo->returnMode = SFRM_Materialize; @@ -2154,7 +2206,7 @@ char* GetGlobalSessionStr(GlobalSessionId globalSessionId) void insert_pg_stat_get_activity_with_conninfo(Tuplestorestate *tupStore, TupleDesc tupDesc, const PgBackendStatus *beentry) { - const int ATT_NUM = 20; + const int ATT_NUM = 22; Datum values[ATT_NUM]; bool nulls[ATT_NUM]; @@ -2338,6 +2390,8 @@ void insert_pg_stat_get_activity_with_conninfo(Tuplestorestate *tupStore, TupleD char* gId = GetGlobalSessionStr(beentry->globalSessionId); values[ARR_19] = CStringGetTextDatum(gId); pfree(gId); + values[ARR_20] = Int64GetDatum(beentry->st_unique_sql_key.unique_sql_id); + values[ARR_21] = CStringGetTextDatum(beentry->trace_cxt.trace_id); } else { /* No permissions to view data about this session */ values[ARR_6] = CStringGetTextDatum(""); @@ -2361,6 +2415,8 @@ void insert_pg_stat_get_activity_with_conninfo(Tuplestorestate *tupStore, TupleD nulls[ARR_17] = true; nulls[ARR_18] = true; nulls[ARR_19] = true; + nulls[ARR_20] = true; + nulls[ARR_21] = true; } tuplestore_putvalues(tupStore, tupDesc, values, nulls); @@ -2368,7 +2424,7 @@ void insert_pg_stat_get_activity_with_conninfo(Tuplestorestate *tupStore, TupleD void insert_pg_stat_get_activity(Tuplestorestate *tupStore, TupleDesc tupDesc, const PgBackendStatus *beentry) { - const int ATT_NUM = 19; + const int ATT_NUM = 21; Datum values[ATT_NUM]; bool nulls[ATT_NUM]; @@ -2549,6 +2605,8 @@ void insert_pg_stat_get_activity(Tuplestorestate *tupStore, TupleDesc tupDesc, c (int)beentry->globalSessionId.nodeId, beentry->globalSessionId.sessionId, beentry->globalSessionId.seq); values[ARR_18] = CStringGetTextDatum(globalSessionId.data); pfree(globalSessionId.data); + values[ARR_19] = Int64GetDatum(beentry->st_unique_sql_key.unique_sql_id); + values[ARR_20] = CStringGetTextDatum(beentry->trace_cxt.trace_id); } else { /* No permissions to view data about this session */ values[ARR_6] = CStringGetTextDatum(""); @@ -2571,6 +2629,8 @@ void insert_pg_stat_get_activity(Tuplestorestate *tupStore, TupleDesc tupDesc, c nulls[ARR_16] = true; nulls[ARR_17] = true; nulls[ARR_18] = true; + nulls[ARR_19] = true; + nulls[ARR_20] = true; } tuplestore_putvalues(tupStore, tupDesc, values, nulls); @@ -3362,6 +3422,10 @@ static char* GetLocktagDecode(const char* locktag) appendStringInfo(&tag, "database:%u, relation:%u, page:=%u, tuple:%u, bucket:%u", locktagField1, locktagField2, locktagField3, locktagField4, locktagField5); break; + case LOCKTAG_UID: + appendStringInfo(&tag, "database:%u, relation:%u, uid:%lu", locktagField1, locktagField2, + (((uint64)locktagField3) << DISPLACEMENTS_VALUE) + locktagField4); + break; case LOCKTAG_TRANSACTION: appendStringInfo(&tag, "transactionid:%lu", TransactionIdGetDatum( (TransactionId)locktagField1 | ((TransactionId)locktagField2 << DISPLACEMENTS_VALUE))); @@ -4051,224 +4115,6 @@ Datum pg_stat_get_env(PG_FUNCTION_ARGS) } } -Datum gs_stat_ustore(PG_FUNCTION_ARGS) -{ - char result[9216] = {0}; -#ifdef DEBUG_UHEAP - errno_t ret; - LWLockAcquire(UHeapStatLock, LW_SHARED); - - ret = snprintf_s(result, sizeof(result), sizeof(result), "Prune Page \n"); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "\tPrune Page (SUCCESS) = %lu \n", - UHeapStat_shared->prune_page[PRUNE_PAGE_SUCCESS]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "\tPrune Page (NO_SPACE) = %lu \n", - UHeapStat_shared->prune_page[PRUNE_PAGE_NO_SPACE]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tPrune Page (UPDATE_IN_PROGRESS)= %lu \n=========================\n\n", - UHeapStat_shared->prune_page[PRUNE_PAGE_UPDATE_IN_PROGRESS]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tPrune Page (PRUNE_PAGE_IN_RECOVERY)= %lu \n=========================\n\n", - UHeapStat_shared->prune_page[PRUNE_PAGE_IN_RECOVERY]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tPrune Page (PRUNE_PAGE_INVALID)= %lu \n=========================\n\n", - UHeapStat_shared->prune_page[PRUNE_PAGE_INVALID]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tPrune Page (PRUNE_PAGE_XID_FILTER)= %lu \n=========================\n\n", - UHeapStat_shared->prune_page[PRUNE_PAGE_XID_FILTER]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tPrune Page (PRUNE_PAGE_FILLFACTOR)= %lu \n=========================\n\n", - UHeapStat_shared->prune_page[PRUNE_PAGE_FILLFACTOR]); - securec_check_ss(ret, "\0", "\0"); - - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "Prune Page OPs profile \n"); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "\t Prune Page SUC: %u %u %u \n", - UHeapStat_shared->op_count_suc.ins, UHeapStat_shared->op_count_suc.del, UHeapStat_shared->op_count_suc.upd); - securec_check_ss(ret, "\0", "\0"); - - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "\t Prune Page TOT: %u %u %u \n", - UHeapStat_shared->op_count_tot.ins, UHeapStat_shared->op_count_tot.del, UHeapStat_shared->op_count_tot.upd); - securec_check_ss(ret, "\0", "\0"); - - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "Prune Page OPs freespace profile \n"); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\t Prune Page FreeSpace TOT: %u %u %u \n", UHeapStat_shared->op_space_tot.ins, - UHeapStat_shared->op_space_tot.del, UHeapStat_shared->op_space_tot.upd); - securec_check_ss(ret, "\0", "\0"); - - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "PageReserveTransactionSlot (where to get transaction slot) \n"); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tSlot has been reserved by current xid: %lu \n", - UHeapStat_shared->get_transslot_from[TRANSSLOT_RESERVED_BY_CURRENT_XID]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tGot free slot after invalidating slots: %lu \n", - UHeapStat_shared->get_transslot_from[TRANSSLOT_FREE_AFTER_INVALIDATION]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tGot free slot after freezing slots: %lu \n", - UHeapStat_shared->get_transslot_from[TRANSSLOT_FREE_AFTER_FREEZING]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tCannot get a free transaction slot: %lu \n=========================\n\n", - UHeapStat_shared->get_transslot_from[TRANSSLOT_CANNOT_GET]); - securec_check_ss(ret, "\0", "\0"); - - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "Inplace Update Stats \n"); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "\tINPLACE UPDATE: %lu \n", - UHeapStat_shared->update[INPLACE_UPDATE]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "\tNON INPLACE UPDATE: %lu \n", - UHeapStat_shared->update[NON_INPLACE_UPDATE]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tTotal: %lu \n=========================\n\n", - UHeapStat_shared->update[NON_INPLACE_UPDATE] + UHeapStat_shared->update[INPLACE_UPDATE]); - securec_check_ss(ret, "\0", "\0"); - - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "Non Inplace Update Reasons \n"); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "\tIndex Updated: %lu \n", - UHeapStat_shared->noninplace_update_cause[INDEX_UPDATED]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "\tToast: %lu \n", - UHeapStat_shared->noninplace_update_cause[TOAST]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "\tPrune Page Failed: %lu \n", - UHeapStat_shared->noninplace_update_cause[PAGE_PRUNE_FAILED]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "\tSlot reused: %lu \n", - UHeapStat_shared->noninplace_update_cause[SLOT_REUSED]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tnblocks < NBLOCKS: %lu \n=========================\n\n", - UHeapStat_shared->noninplace_update_cause[nblocks_LESS_THAN_NBLOCKS]); - securec_check_ss(ret, "\0", "\0"); - - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "Slot status in UHeapTupleFetch \n"); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "\tFROZEN_SLOT = %lu \n", - UHeapStat_shared->visibility_check_with_xid[VISIBILITY_CHECK_SUCCESS_FROZEN_SLOT]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "\tXID < Oldest XID in UNDO = %lu \n", - UHeapStat_shared->visibility_check_with_xid[VISIBILITY_CHECK_SUCCESS_OLDEST_XID]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tSlot is invalid and Xid is Visible in Snapshot = %lu \n", - UHeapStat_shared->visibility_check_with_xid[VISIBILITY_CHECK_SUCCESS_INVALID_SLOT]); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tFetch Trans Info From UNDO = %lu\n=========================\n\n", - UHeapStat_shared->visibility_check_with_xid[VISIBILITY_CHECK_SUCCESS_UNDO]); - securec_check_ss(ret, "\0", "\0"); - - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "Undo Chain Traversal Stat \n"); - securec_check_ss(ret, "\0", "\0"); - - double tuple_old_version_visit_rate = 0.0; - if (UHeapStat_shared->tuple_visits > 0) { - tuple_old_version_visit_rate = - 1.0 * UHeapStat_shared->tuple_old_version_visits / UHeapStat_shared->tuple_visits; - } - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "Tuple visits: %lu\tOld version visits: %lu\tOld version visit rate: %.6f \n", - UHeapStat_shared->tuple_visits, UHeapStat_shared->tuple_old_version_visits, tuple_old_version_visit_rate); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "Undo Chain Traversal Length \n"); - securec_check_ss(ret, "\0", "\0"); - - double chain_visited_avg_len = 0.0; - if (UHeapStat_shared->undo_chain_visited_count > 0) { - chain_visited_avg_len = - UHeapStat_shared->undo_chain_visited_sum_len * 1.0 / UHeapStat_shared->undo_chain_visited_count; - } - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\t# Of undo_chain_visited_sum_len = %ld | visited_count = %ld | miss_count = %ld | visited_avg_len = %lf | " - "visited_max_len = %ld | visited_min_len = %ld \n", - UHeapStat_shared->undo_chain_visited_sum_len, UHeapStat_shared->undo_chain_visited_count, - UHeapStat_shared->undo_chain_visited_miss_count, chain_visited_avg_len, - UHeapStat_shared->undo_chain_visited_max_len, UHeapStat_shared->undo_chain_visited_min_len); - securec_check_ss(ret, "\0", "\0"); - - double page_visited_avg_len = 0.0; - if (UHeapStat_shared->undo_chain_visited_count > 0) { - page_visited_avg_len = - UHeapStat_shared->undo_page_visited_sum_len * 1.0 / UHeapStat_shared->undo_chain_visited_count; - } - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\t# Of undo_page_visited_sum_len = %ld | visited_count = %ld | page_visited_avg_len = %lf \n", - UHeapStat_shared->undo_page_visited_sum_len, UHeapStat_shared->undo_chain_visited_count, page_visited_avg_len); - securec_check_ss(ret, "\0", "\0"); - - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "prepare undo record rzero count %lu nzero_count %lu \n", UHeapStat_shared->undo_record_prepare_rzero_count, - UHeapStat_shared->undo_record_prepare_nzero_count); - securec_check_ss(ret, "\0", "\0"); - - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "groups allocated %lu released %lu \n", - UHeapStat_shared->undo_groups_allocate, UHeapStat_shared->undo_groups_release); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "slots allocated %lu released %lu \n", - UHeapStat_shared->undo_slots_allocate, UHeapStat_shared->undo_slots_recycle); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "space recycle %lu unrecycle %lu \n", - UHeapStat_shared->undo_space_recycle, UHeapStat_shared->undo_space_unrecycle); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "oldest xid delay %lu \n", - UHeapStat_shared->oldest_xid_having_undo_delay); - securec_check_ss(ret, "\0", "\0"); - - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), "Undo lock information: \n"); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tDiscard lock hold time(us): total %lu\tmin %lu\tmax %lu\tcnt %lu \tavg %.6f \n", - UHeapStat_shared->undo_discard_lock_hold_time_sum, UHeapStat_shared->undo_discard_lock_hold_time_min, - UHeapStat_shared->undo_discard_lock_hold_time_max, UHeapStat_shared->undo_discard_lock_hold_cnt, - 1.0 * UHeapStat_shared->undo_discard_lock_hold_time_sum / Max(1, UHeapStat_shared->undo_discard_lock_hold_cnt)); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tDiscard lock wait time(us): total %lu\tmin %lu\tmax %lu\tcnt %lu\tavg %.6f \n", - UHeapStat_shared->undo_discard_lock_wait_time_sum, UHeapStat_shared->undo_discard_lock_wait_time_min, - UHeapStat_shared->undo_discard_lock_wait_time_max, UHeapStat_shared->undo_discard_lock_wait_cnt, - 1.0 * UHeapStat_shared->undo_discard_lock_wait_time_sum / Max(1, UHeapStat_shared->undo_discard_lock_wait_cnt)); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tSpace lock hold time(us): total %lu\tmin %lu\tmax %lu\tcnt %lu\tavg %.6f \n", - UHeapStat_shared->undo_space_lock_hold_time_sum, UHeapStat_shared->undo_space_lock_hold_time_min, - UHeapStat_shared->undo_space_lock_hold_time_max, UHeapStat_shared->undo_space_lock_hold_cnt, - 1.0 * UHeapStat_shared->undo_space_lock_hold_time_sum / Max(1, UHeapStat_shared->undo_space_lock_hold_cnt)); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "\tSpace lock wait time(us): total %lu\tmin %lu\tmax %lu\tcnt %lu\tavg %.6f \n", - UHeapStat_shared->undo_space_lock_wait_time_sum, UHeapStat_shared->undo_space_lock_wait_time_min, - UHeapStat_shared->undo_space_lock_wait_time_max, UHeapStat_shared->undo_space_lock_wait_cnt, - 1.0 * UHeapStat_shared->undo_space_lock_wait_time_sum / Max(1, UHeapStat_shared->undo_space_lock_wait_cnt)); - securec_check_ss(ret, "\0", "\0"); - ret = snprintf_s(result + strlen(result), sizeof(result), sizeof(result), - "INSERT: %lu\tRetry: %lu\tRetry Time: %lu\tRetry Max: %lu\tRetry Time MAX: %lu \n", UHeapStat_shared->dml, - UHeapStat_shared->retry, UHeapStat_shared->retry_time, UHeapStat_shared->retry_max, - UHeapStat_shared->retry_time_max); - securec_check_ss(ret, "\0", "\0"); - - result[strlen(result)] = '\0'; - - LWLockRelease(UHeapStatLock); - -#endif - PG_RETURN_TEXT_P(cstring_to_text(result)); -} - Datum pg_backend_pid(PG_FUNCTION_ARGS) { PG_RETURN_INT64(t_thrd.proc_cxt.MyProcPid); @@ -5405,7 +5251,10 @@ Datum pg_stat_get_wlm_realtime_ec_operator_info(PG_FUNCTION_ARGS) } if (statistics->ec_query) { - values[++i] = CStringGetTextDatum(statistics->ec_query); + char* mask_string = NULL; + MASK_PASSWORD_START(mask_string, statistics->ec_query); + values[++i] = CStringGetTextDatum(mask_string); + MASK_PASSWORD_END(mask_string, statistics->ec_query); } else { nulls[++i] = true; } @@ -5623,7 +5472,10 @@ Datum pg_stat_get_wlm_statistics(PG_FUNCTION_ARGS) securec_check(rc, "\0", "\0"); /* Locking is probably not really necessary */ - values[++i] = CStringGetTextDatum(statistics->stmt); + char* mask_string = NULL; + MASK_PASSWORD_START(mask_string, statistics->stmt); + values[++i] = CStringGetTextDatum(mask_string); + MASK_PASSWORD_END(mask_string, statistics->stmt); values[++i] = Int64GetDatum(statistics->blocktime); values[++i] = Int64GetDatum(statistics->elapsedtime); values[++i] = Int64GetDatum(statistics->totalcputime); @@ -6568,11 +6420,14 @@ Datum pg_stat_get_wlm_session_info(PG_FUNCTION_ARGS) else nulls[++i] = true; values[++i] = Int64GetDatum(detail->debug_query_id); - if (detail->statement && detail->statement[0]) - values[++i] = CStringGetTextDatum(detail->statement); - else + if (detail->statement && detail->statement[0]) { + char* mask_string = NULL; + MASK_PASSWORD_START(mask_string, detail->statement); + values[++i] = CStringGetTextDatum(mask_string); + MASK_PASSWORD_END(mask_string, detail->statement); + } else { nulls[++i] = true; - + } if (t_thrd.proc->workingVersionNum >= SLOW_QUERY_VERSION) { values[++i] = CStringGetTextDatum(PlanListToString(detail->gendata.query_plan)); } else { @@ -6778,7 +6633,10 @@ Datum pg_stat_get_wlm_ec_operator_info(PG_FUNCTION_ARGS) } if (statistics->ec_query) { - values[++i] = CStringGetTextDatum(statistics->ec_query); + char* mask_string = NULL; + MASK_PASSWORD_START(mask_string, statistics->ec_query); + values[++i] = CStringGetTextDatum(mask_string); + MASK_PASSWORD_END(mask_string, statistics->ec_query); } else { nulls[++i] = true; } @@ -8797,7 +8655,7 @@ Datum pg_buffercache_pages(PG_FUNCTION_ARGS) fctx->record[i].forknum = bufHdr->tag.forkNum; fctx->record[i].blocknum = bufHdr->tag.blockNum; fctx->record[i].usagecount = BUF_STATE_GET_USAGECOUNT(buf_state); - fctx->record[i].pinning_backends = BUF_STATE_GET_REFCOUNT(buf_state); + fctx->record[i].pinning_backends = BUF_STATE_GET_REFCOUNT(buf_state); if (buf_state & BM_DIRTY) fctx->record[i].isdirty = true; @@ -8841,7 +8699,7 @@ Datum pg_buffercache_pages(PG_FUNCTION_ARGS) * Set all fields except the bufferid to null if the buffer is unused * or not valid. */ - if (fctx->record[i].blocknum == InvalidBlockNumber || fctx->record[i].isvalid == false) { + if (fctx->record[i].blocknum == InvalidBlockNumber) { nulls[1] = true; nulls[2] = true; nulls[3] = true; @@ -8873,7 +8731,7 @@ Datum pg_buffercache_pages(PG_FUNCTION_ARGS) nulls[8] = false; values[9] = BoolGetDatum(fctx->record[i].isvalid); nulls[9] = false; - values[10] = Int16GetDatum(fctx->record[i].usagecount); + values[10] = Int16GetDatum(fctx->record[i].usagecount); nulls[10] = false; values[11] = Int32GetDatum(fctx->record[i].pinning_backends); nulls[11] = false; @@ -10959,25 +10817,26 @@ void insert_comm_client_info(Tuplestorestate *tupStore, TupleDesc tupDesc, const void fill_callcxt_for_comm_check_connection_status(FuncCallContext *funcctx) { - const int att_num = 5; + const int attNum = 6; int att_idx = 1; ConnectionStatus *conns_entry = NULL; - + MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); - - TupleDesc tupdesc = CreateTemplateTupleDesc(att_num, false); - + + TupleDesc tupdesc = CreateTemplateTupleDesc(attNum, false); + TupleDescInitEntry(tupdesc, (AttrNumber) att_idx++, "node_name", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) att_idx++, "remote_name", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) att_idx++, "remote_host", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) att_idx++, "remote_port", INT4OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) att_idx++, "is_connected", BOOLOID, -1, 0); - + TupleDescInitEntry(tupdesc, (AttrNumber) att_idx++, "no_error_occur", BOOLOID, -1, 0); + funcctx->tuple_desc = BlessTupleDesc(tupdesc); - + funcctx->max_calls = check_connection_status(&conns_entry); funcctx->user_fctx = conns_entry; - + MemoryContextSwitchTo(oldcontext); return; } @@ -10994,6 +10853,7 @@ void fill_values_for_comm_check_connection_status(Datum *values, values[att_idx++] = CStringGetTextDatum(poolCon->remote_host); values[att_idx++] = (Datum)(poolCon->remote_port); values[att_idx++] = BoolGetDatum(poolCon->is_connected); + values[att_idx++] = BoolGetDatum(poolCon->no_error_occur); return; } @@ -11004,9 +10864,20 @@ void fill_values_for_comm_check_connection_status(Datum *values, */ Datum comm_check_connection_status(PG_FUNCTION_ARGS) { +#ifndef ENABLE_MULTIPLE_NODES + FuncCallContext* funcctx = NULL; + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unsupported function/view in single node mode."))); + SRF_RETURN_DONE(funcctx); +#else + if (IS_PGXC_DATANODE) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unsupported functin/view in datenode."))); + } + FuncCallContext *funcctx = NULL; ConnectionStatus *conns_entry = NULL; - const int att_num = 5; + const int attNum = 6; if (SRF_IS_FIRSTCALL()) { funcctx = SRF_FIRSTCALL_INIT(); @@ -11019,8 +10890,8 @@ Datum comm_check_connection_status(PG_FUNCTION_ARGS) if (funcctx->call_cntr < funcctx->max_calls) { /* for each row */ - Datum values[att_num] = {0}; - bool nulls[att_num] = {false}; + Datum values[attNum] = {0}; + bool nulls[attNum] = {false}; fill_values_for_comm_check_connection_status(values, funcctx, conns_entry); HeapTuple tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); @@ -11038,6 +10909,7 @@ Datum comm_check_connection_status(PG_FUNCTION_ARGS) /* nothing left */ SRF_RETURN_DONE(funcctx); } +#endif } @@ -13901,10 +13773,64 @@ Datum remote_single_flush_dw_stat(PG_FUNCTION_ARGS) #endif } +static void gs_stat_read_dw_batch(Tuplestorestate *tupStore, TupleDesc tupDesc) +{ + + int i, j; + errno_t rc = 0; + int row_num = g_instance.dw_batch_cxt.batch_meta_file.dw_file_num; + int col_num = DW_VIEW_COL_NUM; + + Datum values[col_num]; + bool nulls[col_num]; + + for (i = 0; i < row_num; i++) { + g_stat_file_id = i; + + rc = memset_s(values, sizeof(values), 0, sizeof(values)); + securec_check(rc, "\0", "\0"); + rc = memset_s(nulls, sizeof(nulls), 1, sizeof(nulls)); + securec_check(rc, "\0", "\0"); + + for (j = 0; j < col_num; j++) { + values[j] = g_dw_view_col_arr[j].get_data(); + nulls[j] = false; + } + + tuplestore_putvalues(tupStore, tupDesc, values, nulls); + } + + g_stat_file_id = -1; +} + Datum local_double_write_stat(PG_FUNCTION_ARGS) { - HeapTuple tuple = form_function_tuple(DW_VIEW_COL_NUM, DW_BATCH_FUNC); - PG_RETURN_DATUM(HeapTupleGetDatum(tuple)); + int i; + int col_num = DW_VIEW_COL_NUM; + TupleDesc tupdesc = NULL; + + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + + MemoryContext oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory); + + tupdesc = CreateTemplateTupleDesc(DW_VIEW_COL_NUM, false, TAM_HEAP); + + for (i = 0; i < col_num; i++) { + TupleDescInitEntry(tupdesc, (AttrNumber)(i + 1), + g_dw_view_col_arr[i].name, g_dw_view_col_arr[i].data_type, -1, 0); + } + + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); + rsinfo->setDesc = BlessTupleDesc(tupdesc); + + (void)gs_stat_read_dw_batch(rsinfo->setResult, rsinfo->setDesc); + + MemoryContextSwitchTo(oldcontext); + + tuplestore_donestoring(rsinfo->setResult); + + return (Datum) 0; } Datum remote_double_write_stat(PG_FUNCTION_ARGS) @@ -13969,6 +13895,72 @@ Datum remote_double_write_stat(PG_FUNCTION_ARGS) #endif } +Datum local_redo_time_count(PG_FUNCTION_ARGS) +{ + TupleDesc tupdesc; + Tuplestorestate *tupstore = BuildTupleResult(fcinfo, &tupdesc); + const uint32 xlog_redo_static_cols = 21; + Datum values[xlog_redo_static_cols]; + bool nulls[xlog_redo_static_cols]; + + RedoWorkerTimeCountsInfo *workerCountInfoList = NULL; + uint32 realNum = 0; + GetRedoWorkerTimeCount(&workerCountInfoList, &realNum); + + for (uint32 i = 0; i < realNum; ++i) { + uint32 k = 0; + values[k] = CStringGetTextDatum(workerCountInfoList[i].worker_name); + nulls[k++] = false; + pfree(workerCountInfoList[i].worker_name); + + for (uint32 j = 0; j < (uint32)TIME_COST_NUM; ++j) { + values[k] = Int64GetDatum(workerCountInfoList[i].time_cost[j].totalDuration); + nulls[k++] = false; + values[k] = Int64GetDatum(workerCountInfoList[i].time_cost[j].counter); + nulls[k++] = false; + } + tuplestore_putvalues(tupstore, tupdesc, values, nulls); + } + if (workerCountInfoList != NULL) { + pfree(workerCountInfoList); + } + tuplestore_donestoring(tupstore); + return (Datum)0; +} + +Datum local_xlog_redo_statics(PG_FUNCTION_ARGS) +{ + TupleDesc tupdesc; + Tuplestorestate *tupstore = BuildTupleResult(fcinfo, &tupdesc); + const uint32 xlog_redo_static_cols = 5; + Datum values[xlog_redo_static_cols]; + bool nulls[xlog_redo_static_cols]; + const uint32 subtypeShiftSize = 4; + for (uint32 i = 0; i < RM_NEXT_ID; ++i) { + for (uint32 j = 0; j < MAX_XLOG_INFO_NUM; ++j) { + if (g_instance.comm_cxt.predo_cxt.xlogStatics[i][j].total_num == 0) { + continue; + } + uint32 k = 0; + values[k] = CStringGetTextDatum(RmgrTable[i].rm_type_name((j << subtypeShiftSize))); + nulls[k++] = false; + values[k] = Int32GetDatum(i); + nulls[k++] = false; + values[k] = Int32GetDatum(j << subtypeShiftSize); + nulls[k++] = false; + values[k] = Int64GetDatum(g_instance.comm_cxt.predo_cxt.xlogStatics[i][j].total_num); + nulls[k++] = false; + values[k] = Int64GetDatum(g_instance.comm_cxt.predo_cxt.xlogStatics[i][j].extra_num); + nulls[k++] = false; + tuplestore_putvalues(tupstore, tupdesc, values, nulls); + } + } + + tuplestore_donestoring(tupstore); + + return (Datum)0; +} + Datum local_redo_stat(PG_FUNCTION_ARGS) { TupleDesc tupdesc = NULL; @@ -14552,7 +14544,8 @@ Datum gs_hadr_local_rto_and_rpo_stat(PG_FUNCTION_ARGS) TupleDescInitEntry(tupdesc, (AttrNumber)8, "target_rto", INT8OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)9, "current_rpo", INT8OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)10, "target_rpo", INT8OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)11, "current_sleep_time", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)11, "rto_sleep_time", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)12, "rpo_sleep_time", INT8OID, -1, 0); /* complete descriptor of the tupledesc */ funcctx->tuple_desc = BlessTupleDesc(tupdesc); @@ -14589,7 +14582,8 @@ Datum gs_hadr_local_rto_and_rpo_stat(PG_FUNCTION_ARGS) values[7] = Int64GetDatum(entry->target_rto); values[8] = Int64GetDatum(entry->current_rpo); values[9] = Int64GetDatum(entry->target_rpo); - values[10] = Int64GetDatum(entry->current_sleep_time); + values[10] = Int64GetDatum(entry->rto_sleep_time); + values[11] = Int64GetDatum(entry->rpo_sleep_time); tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); @@ -14628,7 +14622,8 @@ Datum gs_hadr_remote_rto_and_rpo_stat(PG_FUNCTION_ARGS) TupleDescInitEntry(tupdesc, (AttrNumber)8, "target_rto", INT8OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)9, "current_rpo", INT8OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber)10, "target_rpo", INT8OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)11, "current_sleep_time", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)11, "rto_sleep_time", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)12, "rpo_sleep_time", INT8OID, -1, 0); funcctx->tuple_desc = BlessTupleDesc(tupdesc); funcctx->max_calls = u_sess->pgxc_cxt.NumDataNodes + u_sess->pgxc_cxt.NumCoords; @@ -14731,610 +14726,135 @@ Datum track_memory_context_detail(PG_FUNCTION_ARGS) } } -using namespace undo; -static uint64 UndoSize(UndoSpaceType type) +#ifdef ENABLE_MULTIPLE_NODES +/* Get the head row of the view of index status */ +TupleDesc get_index_status_view_frist_row() { - uint64 used = 0; - for (auto idx = 0; idx < UNDO_ZONE_COUNT; idx++) { - undo::UndoZone *uzone = (undo::UndoZone *)g_instance.undo_cxt.uZones[idx]; - if (uzone == NULL) { - continue; - } - if (type == UNDO_LOG_SPACE) { - used += uzone->UndoSize(); + TupleDesc tupdesc = NULL; + tupdesc = CreateTemplateTupleDesc(INDEX_STATUS_VIEW_COL_NUM, false, TAM_HEAP); + TupleDescInitEntry(tupdesc, (AttrNumber)1, "node_name", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)2, "indisready", BOOLOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)3, "indisvalid", BOOLOID, -1, 0); + return BlessTupleDesc(tupdesc); +} + +HeapTuple fetch_local_index_status(FuncCallContext *funcctx, char *schname, char *idxname) +{ + if (funcctx->call_cntr < funcctx->max_calls) { + /* for local cn, get index status */ + Datum values[INDEX_STATUS_VIEW_COL_NUM]; + bool nulls[INDEX_STATUS_VIEW_COL_NUM] = {false}; + + Oid idx_oid = InvalidOid; + if (schname == NULL || strlen(schname) == 0) { + idx_oid = RangeVarGetRelid(makeRangeVar(NULL, idxname, -1), NoLock, false); } else { - used += uzone->SlotSize(); - } - } - return used; -} - -static void ReadUndoZoneMetaFromShared(int id, TupleDesc *tupleDesc, Tuplestorestate *tupstore) -{ - Assert(tupleDesc != NULL); - Assert(tupstore != NULL); - - uint32 startIdx = 0; - uint32 endIdx = 0; - uint64 used = 0; - char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; - if (id == INVALID_ZONE_ID) { - used = UndoSize(UNDO_LOG_SPACE) + UndoSize(UNDO_SLOT_SPACE); - endIdx = UNDO_ZONE_COUNT - 1; - } else { - used = UndoSize(UNDO_LOG_SPACE); - startIdx = id; - endIdx = id; - } - - for (auto idx = startIdx; idx <= endIdx; idx++) { - bool nulls[PG_STAT_USP_PERSIST_META_COLS] = {false}; - Datum values[PG_STAT_USP_PERSIST_META_COLS]; - UndoZone *uzone = (undo::UndoZone *)g_instance.undo_cxt.uZones[idx]; - if (uzone == NULL) { - continue; - } - values[0] = ObjectIdGetDatum((Oid)uzone->GetZoneId()); - values[1] = ObjectIdGetDatum((Oid)uzone->GetPersitentLevel()); - errno_t rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_FORMAT, UNDO_PTR_GET_OFFSET(uzone->GetInsert())); - securec_check_ss(rc, "\0", "\0"); - values[2] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, - UNDO_PTR_GET_OFFSET(uzone->GetDiscard())); - securec_check_ss(rc, "\0", "\0"); - values[3] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, - UNDO_PTR_GET_OFFSET(uzone->GetForceDiscard())); - securec_check_ss(rc, "\0", "\0"); - values[4] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, - used); - securec_check_ss(rc, "\0", "\0"); - values[5] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, - uzone->GetLSN()); - securec_check_ss(rc, "\0", "\0"); - values[6] = CStringGetTextDatum(textBuffer); - tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); - } - - tuplestore_donestoring(tupstore); -} - -void Checkfd(int fd) -{ - if (fd < 0) { - ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg("could not open file \%s", UNDO_META_FILE))); - return; - } -} - -void Checkid(const int id, uint32* startIdx, uint32* endIdx) -{ - if (id == INVALID_ZONE_ID) { - *endIdx = UNDO_ZONE_COUNT - 1; - } else { - *startIdx = id; - *endIdx = id; - } -} - -void GetZoneMetaValues(Datum *values, char *textBuffer, - UndoZoneMetaInfo undoZoneMeta, uint32 idx, errno_t *rc) -{ - *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, - UNDO_REC_PTR_FORMAT, UNDO_PTR_GET_OFFSET(undoZoneMeta.insert)); - securec_check_ss(*rc, "\0", "\0"); - values[2] = CStringGetTextDatum(textBuffer); - *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, - UNDO_REC_PTR_FORMAT, UNDO_PTR_GET_OFFSET(undoZoneMeta.discard)); - securec_check_ss(*rc, "\0", "\0"); - values[3] = CStringGetTextDatum(textBuffer); - *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, - UNDO_REC_PTR_FORMAT, UNDO_PTR_GET_OFFSET(undoZoneMeta.forceDiscard)); - securec_check_ss(*rc, "\0", "\0"); - values[4] = CStringGetTextDatum(textBuffer); - *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, - UNDO_REC_PTR_FORMAT, 0); - securec_check_ss(*rc, "\0", "\0"); - values[5] = CStringGetTextDatum(textBuffer); - *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, - UNDO_REC_PTR_FORMAT, undoZoneMeta.lsn); - securec_check_ss(*rc, "\0", "\0"); - values[6] = CStringGetTextDatum(textBuffer); -} - -static void ReadUndoZoneMetaFromDisk(int id, TupleDesc *tupleDesc, Tuplestorestate *tupstore) -{ - Assert(tupleDesc != NULL); - Assert(tupstore != NULL); - int ret = 0; - uint32 startIdx = 0; - uint32 endIdx = 0; - char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; - int fd = BasicOpenFile(UNDO_META_FILE, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); - - Checkfd(fd); - Checkid(id, &startIdx, &endIdx); - for (auto idx = startIdx; idx <= endIdx; idx++) { - bool nulls[PG_STAT_USP_PERSIST_META_COLS] = {false}; - Datum values[PG_STAT_USP_PERSIST_META_COLS]; - uint32 readPos = 0; - UndoZoneMetaInfo undoZoneMeta; - errno_t rc; - - if (idx < PERSIST_ZONE_COUNT) { - readPos = (idx / UNDOZONE_COUNT_PER_PAGE) * UNDO_META_PAGE_SIZE + - (idx % UNDOZONE_COUNT_PER_PAGE) * sizeof(UndoZoneMetaInfo); - lseek(fd, readPos, SEEK_SET); - ret = read(fd, &undoZoneMeta, sizeof(UndoZoneMetaInfo)); - if (ret != sizeof(UndoZoneMetaInfo)) { - ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), - errmsg("Read undo meta file fail, expect size(%lu), real size(%u)", - sizeof(UndoZoneMetaInfo), ret))); - break; - } - } else { - rc = memset_s(&undoZoneMeta, sizeof(UndoZoneMetaInfo), 0, sizeof(UndoZoneMetaInfo)); - securec_check(rc, "\0", "\0"); - } - DECLARE_NODE_COUNT(); - GET_UPERSISTENCE_BY_ZONEID((int)idx, nodeCount); - values[0] = ObjectIdGetDatum((Oid)idx); - values[1] = ObjectIdGetDatum((Oid)upersistence); - GetZoneMetaValues(values, textBuffer, undoZoneMeta, idx, &rc); - tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); - } - - tuplestore_donestoring(tupstore); - close(fd); -} - -static void ReadTransSlotMetaFromShared(int id, TupleDesc *tupleDesc, Tuplestorestate *tupstore) -{ - Assert(tupleDesc != NULL); - Assert(tupstore != NULL); - - uint32 startIdx = 0; - uint32 endIdx = 0; - char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; - - if (id == INVALID_ZONE_ID) { - endIdx = UNDO_ZONE_COUNT - 1; - } else { - startIdx = id; - endIdx = id; - } - - for (auto idx = startIdx; idx <= endIdx; idx++) { - bool nulls[PG_STAT_USP_PERSIST_META_COLS] = {false}; - Datum values[PG_STAT_USP_PERSIST_META_COLS]; - UndoZone *uzone = (undo::UndoZone *)g_instance.undo_cxt.uZones[idx]; - if (uzone == NULL) { - continue; - } - values[0] = ObjectIdGetDatum((Oid)idx); - values[1] = ObjectIdGetDatum((Oid)0); // unused - errno_t rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_FORMAT, UNDO_PTR_GET_OFFSET(uzone->GetAllocate())); - securec_check_ss(rc, "\0", "\0"); - values[2] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_FORMAT, UNDO_PTR_GET_OFFSET(uzone->GetRecycle())); - securec_check_ss(rc, "\0", "\0"); - values[3] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, "%016lu", - UNDO_PTR_GET_OFFSET(uzone->GetRecycleXid())); - securec_check_ss(rc, "\0", "\0"); - values[4] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, "%016lu", - GetOldestXminForUndo()); - securec_check_ss(rc, "\0", "\0"); - values[5] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, "%016lu", - pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo)); - securec_check_ss(rc, "\0", "\0"); - values[6] = CStringGetTextDatum(textBuffer); - tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); - } - - tuplestore_donestoring(tupstore); -} - -void GetTransMetaValues(Datum *values, char *textBuffer, - UndoZoneMetaInfo undoZoneMeta, uint32 loop, errno_t *rc) -{ - *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, - UNDO_REC_PTR_FORMAT, UNDO_PTR_GET_OFFSET(undoZoneMeta.allocate)); - securec_check_ss(*rc, "\0", "\0"); - values[2] = CStringGetTextDatum(textBuffer); - *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, - UNDO_REC_PTR_FORMAT, UNDO_PTR_GET_OFFSET(undoZoneMeta.recycle)); - securec_check_ss(*rc, "\0", "\0"); - values[3] = CStringGetTextDatum(textBuffer); - *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, - "%016lu", undoZoneMeta.recycleXid); - securec_check_ss(*rc, "\0", "\0"); - values[4] = CStringGetTextDatum(textBuffer); - *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, - "%016lu", GetOldestXminForUndo()); - securec_check_ss(*rc, "\0", "\0"); - values[5] = CStringGetTextDatum(textBuffer); - *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, - "%016lu", pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo)); - securec_check_ss(*rc, "\0", "\0"); - values[6] = CStringGetTextDatum(textBuffer); -} - -static void ReadTransSlotMetaFromDisk(int id, TupleDesc *tupleDesc, Tuplestorestate *tupstore) -{ - Assert(tupleDesc != NULL); - Assert(tupstore != NULL); - int ret = 0; - uint32 startIdx = 0; - uint32 endIdx = 0; - char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; - int fd = BasicOpenFile(UNDO_META_FILE, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); - - Checkfd(fd); - Checkid(id, &startIdx, &endIdx); - for (auto idx = startIdx; idx <= endIdx; idx++) { - bool nulls[PG_STAT_USP_PERSIST_META_COLS] = {false}; - Datum values[PG_STAT_USP_PERSIST_META_COLS]; - uint32 readPos = 0; - UndoZoneMetaInfo undoZoneMeta; - errno_t rc; - - if (idx < PERSIST_ZONE_COUNT) { - readPos = (idx / UNDOZONE_COUNT_PER_PAGE) * UNDO_META_PAGE_SIZE + - (idx % UNDOZONE_COUNT_PER_PAGE) * sizeof(UndoZoneMetaInfo); - lseek(fd, readPos, SEEK_SET); - ret = read(fd, &undoZoneMeta, sizeof(UndoZoneMetaInfo)); - if (ret != sizeof(UndoZoneMetaInfo)) { - ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), - errmsg("Read undo meta file fail, expect size(%lu), real size(%u)", - sizeof(UndoZoneMetaInfo), ret))); - break; - } - } else { - rc = memset_s(&undoZoneMeta, sizeof(UndoZoneMetaInfo), 0, sizeof(UndoZoneMetaInfo)); - securec_check(rc, "\0", "\0"); + idx_oid = RangeVarGetRelid(makeRangeVar(schname, idxname, -1), NoLock, false); } - DECLARE_NODE_COUNT(); - GET_UPERSISTENCE_BY_ZONEID((int)idx, nodeCount); - values[0] = ObjectIdGetDatum((Oid)idx); - values[1] = ObjectIdGetDatum((Oid)upersistence); - GetTransMetaValues(values, textBuffer, undoZoneMeta, idx, &rc); - tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); - } - - tuplestore_donestoring(tupstore); - close(fd); -} - -static uint64 UndoSpaceSize(UndoSpaceType type) -{ - uint64 used = 0; - for (auto idx = 0; idx < UNDO_ZONE_COUNT; idx++) { - UndoSpace *usp; - if (g_instance.undo_cxt.uZones[idx] == NULL) { - continue; + if (!OidIsValid(idx_oid)) { + ereport(ERROR, (errmodule(MOD_FUNCTION), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("The given schema or index name cannot find."), + errdetail("Cannot find valid oid from the given index name."), + errcause("Input error schema or index name."), + erraction("Check the input schema and index name."))); } - if (type == UNDO_LOG_SPACE) { - usp = ((UndoZone *)g_instance.undo_cxt.uZones[idx])->GetUndoSpace(); - } else { - usp = ((UndoZone *)g_instance.undo_cxt.uZones[idx])->GetSlotSpace(); + HeapTuple indexTuple = SearchSysCacheCopy1(INDEXRELID, ObjectIdGetDatum(idx_oid)); + if (!HeapTupleIsValid(indexTuple)) { + ereport(ERROR, (errmodule(MOD_CACHE), errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("cache lookup failed for index %u.", idx_oid), + errdetail("The index tuple is invalide."), + errcause("The index is not found in syscache."), + erraction("Retry this function."))); } - used += (uint64)usp->Used(); + Form_pg_index indexForm = (Form_pg_index)GETSTRUCT(indexTuple); + + values[0] = CStringGetTextDatum(g_instance.attr.attr_common.PGXCNodeName); + values[1] = BoolGetDatum(indexForm->indisready); + values[2] = BoolGetDatum(indexForm->indisvalid); + return heap_form_tuple(funcctx->tuple_desc, values, nulls); } - return used; + return NULL; } -static void ReadUndoSpaceFromShared(int id, TupleDesc *tupleDesc, Tuplestorestate *tupstore, - UndoSpaceType type) +/* + * @Description : Get index status on all nodes. + * @in : schemaname, idxname + * @out : None + * @return : Node. + */ +Datum gs_get_index_status(PG_FUNCTION_ARGS) { - Assert(tupleDesc != NULL); - Assert(tupstore != NULL); - - uint32 startIdx = 0; - uint32 endIdx = 0; - char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; - uint64 used = UndoSpaceSize(type); - - if (id == INVALID_ZONE_ID) { - used = UndoSpaceSize(UNDO_LOG_SPACE) + UndoSpaceSize(UNDO_SLOT_SPACE); - endIdx = UNDO_ZONE_COUNT - 1; - } else { - used = UndoSpaceSize(type); - startIdx = id; - endIdx = id; - } - - for (auto idx = startIdx; idx <= endIdx; idx++) { - bool nulls[PG_STAT_USP_PERSIST_META_COLS] = {false}; - Datum values[PG_STAT_USP_PERSIST_META_COLS]; - UndoSpace *usp; - if (g_instance.undo_cxt.uZones[idx] == NULL) { - continue; - } - if (type == UNDO_LOG_SPACE) { - usp = ((UndoZone *)g_instance.undo_cxt.uZones[idx])->GetUndoSpace(); - } else { - usp = ((UndoZone *)g_instance.undo_cxt.uZones[idx])->GetSlotSpace(); - } - values[0] = ObjectIdGetDatum((Oid)idx); - values[1] = ObjectIdGetDatum((Oid)0); // unused - errno_t rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_FORMAT, UNDO_PTR_GET_OFFSET(usp->Tail())); - securec_check_ss(rc, "\0", "\0"); - values[2] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_FORMAT, UNDO_PTR_GET_OFFSET(usp->Head())); - securec_check_ss(rc, "\0", "\0"); - values[3] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_FORMAT, used); - securec_check_ss(rc, "\0", "\0"); - values[4] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_FORMAT, usp->Used()); - securec_check_ss(rc, "\0", "\0"); - values[5] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, - UNDO_REC_PTR_FORMAT, usp->LSN()); - securec_check_ss(rc, "\0", "\0"); - values[6] = CStringGetTextDatum(textBuffer); - tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); - } - - tuplestore_donestoring(tupstore); -} - -void GetUndoSpaceValues(Datum *values, char *textBuffer, - UndoSpaceMetaInfo undoSpaceMeta, uint32 loop, errno_t *rc) -{ - *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, - UNDO_REC_PTR_FORMAT, UNDO_PTR_GET_OFFSET(undoSpaceMeta.tail)); - securec_check_ss(*rc, "\0", "\0"); - values[2] = CStringGetTextDatum(textBuffer); - *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, - UNDO_REC_PTR_FORMAT, UNDO_PTR_GET_OFFSET(undoSpaceMeta.head)); - securec_check_ss(*rc, "\0", "\0"); - values[3] = CStringGetTextDatum(textBuffer); - *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, - UNDO_REC_PTR_FORMAT, (uint64)0xFFFF); - securec_check_ss(*rc, "\0", "\0"); - values[4] = CStringGetTextDatum(textBuffer); - *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, - UNDO_REC_PTR_FORMAT, (undoSpaceMeta.tail - undoSpaceMeta.head) / BLCKSZ); - securec_check_ss(*rc, "\0", "\0"); - values[5] = CStringGetTextDatum(textBuffer); - *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, - UNDO_REC_PTR_FORMAT, undoSpaceMeta.lsn); - securec_check_ss(*rc, "\0", "\0"); - values[6] = CStringGetTextDatum(textBuffer); -} - -static void ReadUndoSpaceFromDisk(int id, TupleDesc *tupleDesc, Tuplestorestate *tupstore, - UndoSpaceType type) -{ - Assert(tupleDesc != NULL); - Assert(tupstore != NULL); - int ret = 0; - uint32 startIdx = 0; - uint32 endIdx = 0; - uint32 undoSpaceBegin = 0; - uint32 undoZoneMetaPageCnt = 0; - uint32 undoSpaceMetaPageCnt = 0; - char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; - int fd = BasicOpenFile(UNDO_META_FILE, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); - - Checkfd(fd); - Checkid(id, &startIdx, &endIdx); - - /* Seek start position for writing transactionGroup meta. */ - UNDOZONE_META_PAGE_COUNT(UNDO_ZONE_COUNT, UNDOZONE_COUNT_PER_PAGE, undoZoneMetaPageCnt); - UNDOZONE_META_PAGE_COUNT(UNDO_ZONE_COUNT, UNDOSPACE_COUNT_PER_PAGE, undoSpaceMetaPageCnt); - if (type == UNDO_LOG_SPACE) { - undoSpaceBegin = undoZoneMetaPageCnt * UNDO_META_PAGE_SIZE; - } else { - undoSpaceBegin = (undoZoneMetaPageCnt + undoSpaceMetaPageCnt) * UNDO_META_PAGE_SIZE; - } - - for (auto idx = startIdx; idx <= endIdx; idx++) { - bool nulls[PG_STAT_USP_PERSIST_META_COLS] = {false}; - Datum values[PG_STAT_USP_PERSIST_META_COLS]; - uint32 readPos = 0; - UndoSpaceMetaInfo undoSpaceMeta; - errno_t rc; - if (idx < PERSIST_ZONE_COUNT) { - readPos = undoSpaceBegin + (idx / UNDOZONE_COUNT_PER_PAGE) * UNDO_META_PAGE_SIZE + - (idx % UNDOZONE_COUNT_PER_PAGE) * sizeof(UndoSpaceMetaInfo); - lseek(fd, readPos, SEEK_SET); - ret = read(fd, &undoSpaceMeta, sizeof(UndoSpaceMetaInfo)); - if (ret != sizeof(UndoSpaceMetaInfo)) { - ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), - errmsg("Read undo meta file fail, expect size(%lu), real size(%u)", - sizeof(UndoSpaceMetaInfo), ret))); - break; - } - } else { - rc = memset_s(&undoSpaceMeta, sizeof(UndoSpaceMetaInfo), 0, sizeof(UndoSpaceMetaInfo)); - securec_check(rc, "\0", "\0"); - } - DECLARE_NODE_COUNT(); - GET_UPERSISTENCE_BY_ZONEID((int)idx, nodeCount); - values[0] = ObjectIdGetDatum((Oid)idx); - values[1] = ObjectIdGetDatum((Oid)upersistence); - GetUndoSpaceValues(values, textBuffer, undoSpaceMeta, idx, &rc); - tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); - } - - tuplestore_donestoring(tupstore); - close(fd); -} - -bool Checkrsinfo(const ReturnSetInfo *rsinfo) -{ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - return true; - } - if (!(rsinfo->allowedModes & SFRM_Materialize)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - return true; - } - return false; -} - -Datum gs_undo_meta(PG_FUNCTION_ARGS) -{ - int type = PG_GETARG_INT32(0); // Indicate meta data type(0:undozone, 1:group, 2:undoSpace, 3:slotSpace) - int id = PG_GETARG_INT32(1); // zoneId (-1 represents all) - int metaLocation = PG_GETARG_INT32(2); // meta location (0:shared , 1:disk) - ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; - TupleDesc tupDesc; - Tuplestorestate *tupstore = NULL; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - - if (Checkrsinfo(rsinfo)) { - PG_RETURN_VOID(); - } - if (get_call_result_type(fcinfo, NULL, &tupDesc) != TYPEFUNC_COMPOSITE) { - elog(ERROR, "return type must be a row type"); + if (IS_PGXC_DATANODE) { + ereport(ERROR, (errmodule(MOD_FUNCTION), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unsupported function on datanodes."), + errdetail("Cannot execute a global function direct on datanodes."), + errcause("Execute a global function on datanodes."), + erraction("Execute this function on coordinators."))); PG_RETURN_VOID(); } - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupDesc; - MemoryContextSwitchTo(oldcontext); + Datum values[INDEX_STATUS_VIEW_COL_NUM]; + bool nulls[INDEX_STATUS_VIEW_COL_NUM] = {false}; - switch (type) { - /* Undospace meta info. */ - case 0: - if (metaLocation == 0) { - ReadUndoZoneMetaFromShared(id, &tupDesc, tupstore); + char* schname = PG_GETARG_CSTRING(0); + char* idxname = PG_GETARG_CSTRING(1); + + if (schname == NULL || strlen(schname) == 0 || idxname == NULL || strlen(idxname) == 0) { + ereport(ERROR, (errmodule(MOD_INDEX), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Invalid input schema name or index name."), + errdetail("The input schema or index name is null."), + errcause("Input empty or less parameters."), + erraction("Please input the correct schema name and index name."))); + PG_RETURN_VOID(); + } + + FuncCallContext *funcctx = NULL; + /* get the fist row of the view */ + if (SRF_IS_FIRSTCALL()) { + funcctx = SRF_FIRSTCALL_INIT(); + MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + funcctx->tuple_desc = get_index_status_view_frist_row(); + /* for coordinator, get a view of all nodes */ + funcctx->max_calls = u_sess->pgxc_cxt.NumDataNodes + u_sess->pgxc_cxt.NumCoords; + funcctx->user_fctx = get_remote_index_status(funcctx->tuple_desc, schname, idxname); + (void)MemoryContextSwitchTo(oldcontext); + + if (funcctx->user_fctx == NULL) { + SRF_RETURN_DONE(funcctx); + } + } + + /* stuff done on every call of the function */ + funcctx = SRF_PERCALL_SETUP(); + + if (funcctx->user_fctx) { + Tuplestorestate* tupstore = ((TableDistributionInfo*)funcctx->user_fctx)->state->tupstore; + TupleTableSlot* slot = ((TableDistributionInfo*)funcctx->user_fctx)->slot; + + if (!tuplestore_gettupleslot(tupstore, true, false, slot)) { + HeapTuple tuple = fetch_local_index_status(funcctx, schname, idxname); + if (tuple != NULL) { + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); } else { - ReadUndoZoneMetaFromDisk(id, &tupDesc, tupstore); + /* release context when all tuples are returned */ + FreeParallelFunctionState(((TableDistributionInfo*)funcctx->user_fctx)->state); + ExecDropSingleTupleTableSlot(slot); + pfree_ext(funcctx->user_fctx); + funcctx->user_fctx = NULL; + SRF_RETURN_DONE(funcctx); } - break; - /* TransactionGroup meta info. */ - case 1: - if (metaLocation == 0) { - ReadTransSlotMetaFromShared(id, &tupDesc, tupstore); - } else { - ReadTransSlotMetaFromDisk(id, &tupDesc, tupstore); - } - break; - case 2: - if (metaLocation == 0) { - ReadUndoSpaceFromShared(id, &tupDesc, tupstore, UNDO_LOG_SPACE); - } else { - ReadUndoSpaceFromDisk(id, &tupDesc, tupstore, UNDO_LOG_SPACE); - } - break; - case 3: - if (metaLocation == 0) { - ReadUndoSpaceFromShared(id, &tupDesc, tupstore, UNDO_SLOT_SPACE); - } else { - ReadUndoSpaceFromDisk(id, &tupDesc, tupstore, UNDO_SLOT_SPACE); - } - break; - default: - break; - } - - PG_RETURN_VOID(); -} - -Datum gs_undo_translot(PG_FUNCTION_ARGS) -{ - int rc = 0; - int type = PG_GETARG_INT32(0); // Indicates query meta from share memory or persistent file - int zoneId = PG_GETARG_INT32(1); // zone id - char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; - ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; - TupleDesc tupDesc; - Tuplestorestate *tupstore = NULL; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - PG_RETURN_VOID(); - } - if (!(rsinfo->allowedModes & SFRM_Materialize)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - PG_RETURN_VOID(); - } - if (get_call_result_type(fcinfo, NULL, &tupDesc) != TYPEFUNC_COMPOSITE) { - elog(ERROR, "return type must be a row type"); - PG_RETURN_VOID(); - } - - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = tupDesc; - MemoryContextSwitchTo(oldcontext); - - if (type == 0 || type == 1) { - if (zoneId < 0 || zoneId >= UNDO_ZONE_COUNT) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Zone id is invalid %d", zoneId))); - PG_RETURN_VOID(); } - } - UndoZone *uzone = (UndoZone *)g_instance.undo_cxt.uZones[zoneId]; - if (uzone == NULL) { - PG_RETURN_VOID(); - } - TransactionSlot *slot = NULL; - /* Query translot meta info from shared memory. */ - UndoSlotBuffer buf; - for (UndoSlotPtr slotPtr = uzone->GetRecycle(); slotPtr < uzone->GetAllocate(); - slotPtr = GetNextSlotPtr(slotPtr)) { - buf.PrepareTransactionSlot(slotPtr); - slot = buf.FetchTransactionSlot(slotPtr); - bool nulls[PG_STAT_TRANSLOT_META_COLS] = {false}; - Datum values[PG_STAT_TRANSLOT_META_COLS]; + for (uint32 i = 0; i < INDEX_STATUS_VIEW_COL_NUM; i++) { + values[i] = tableam_tslot_getattr(slot, (i + 1), &nulls[i]); + } - rc = memset_s(textBuffer, STAT_UNDO_LOG_SIZE, 0, STAT_UNDO_LOG_SIZE); - securec_check(rc, "\0", "\0"); - values[0] = ObjectIdGetDatum((Oid)zoneId); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, "%016lu", - (uint64)slot->XactId()); - securec_check_ss(rc, "\0", "\0"); - values[1] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, - UNDO_PTR_GET_OFFSET(slot->StartUndoPtr())); - securec_check_ss(rc, "\0", "\0"); - values[2] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, - UNDO_PTR_GET_OFFSET(slot->EndUndoPtr())); - securec_check_ss(rc, "\0", "\0"); - values[3] = CStringGetTextDatum(textBuffer); - rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, - UNDO_PTR_GET_OFFSET(slotPtr)); - securec_check_ss(rc, "\0", "\0"); - values[4] = CStringGetTextDatum(textBuffer); - tuplestore_putvalues(tupstore, tupDesc, values, nulls); + HeapTuple tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + (void)ExecClearTuple(slot); + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); } - buf.Release(); - tuplestore_donestoring(tupstore); - PG_RETURN_VOID(); + SRF_RETURN_DONE(funcctx); } + +#endif diff --git a/src/common/backend/utils/adt/pgundostatfuncs.cpp b/src/common/backend/utils/adt/pgundostatfuncs.cpp new file mode 100644 index 000000000..c3b2ce386 --- /dev/null +++ b/src/common/backend/utils/adt/pgundostatfuncs.cpp @@ -0,0 +1,1597 @@ +/* ------------------------------------------------------------------------- + * + * pgstatfuncs.c + * Functions for accessing the statistics collector data + * + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/backend/utils/adt/pgundostatfuncs.cpp + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "knl/knl_variable.h" +#include + +#include "access/transam.h" +#include "access/tableam.h" +#include "access/ustore/undo/knl_uundoapi.h" +#include "access/ustore/undo/knl_uundotxn.h" +#include "access/ustore/undo/knl_uundozone.h" +#include "access/ubtree.h" +#include "access/redo_statistic.h" +#include "access/xlog.h" +#include "connector.h" +#include "commands/user.h" +#include "commands/vacuum.h" +#include "funcapi.h" +#include "gaussdb_version.h" +#include "libpq/ip.h" +#include "miscadmin.h" +#include "pgstat.h" +#include "utils/acl.h" +#include "utils/builtins.h" +#include "utils/globalplancache.h" +#include "utils/inet.h" +#include "utils/timestamp.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/memprot.h" +#include "utils/typcache.h" +#include "utils/syscache.h" +#include "pgxc/pgxc.h" +#include "pgxc/nodemgr.h" +#include "storage/lock/lwlock.h" +#include "postgres.h" +#include "knl/knl_variable.h" +#include "storage/smgr/segment.h" +#include "storage/proc.h" +#include "storage/procarray.h" +#include "storage/buf/buf_internals.h" +#include "workload/cpwlm.h" +#include "workload/workload.h" +#include "pgxc/pgxcnode.h" +#include "access/hash.h" +#include "libcomm/libcomm.h" +#include "pgxc/poolmgr.h" +#include "pgxc/execRemote.h" +#include "utils/elog.h" +#include "utils/memtrace.h" +#include "commands/user.h" +#include "instruments/gs_stat.h" +#include "instruments/list.h" +#include "replication/rto_statistic.h" +#include "storage/lock/lock.h" + +const int STAT_USTORE_BUFF_SIZE = 10240; +const int STAT_UNDO_COLS = 10; +const int STAT_UNDO_BUFFER_SIZE = 500; +const uint TOP_USED_ZONE_NUM = 3; +const float FORCE_RECYCLE_PERCENT = 0.8; +const int UNDO_SLOT_FILE_MAXSIZE = 1024 * 32; +const int MBYTES_TO_KBYTES = 1024; +const int UNDO_TOPUSED = 0; +const int UNDO_SECONDUSED = 1; +const int UNDO_THIRDUSED = 2; +const int PG_STAT_USP_PERSIST_META_COLS = 9; +const int STAT_UNDO_LOG_SIZE = 17; +const int PG_STAT_UBTREE_IDX_VERFIY_COLS = 4; +const int PG_STAT_UBTREE_RECYCLE_QUEUE_COLS = 6; +const int PG_STAT_TRANSLOT_META_COLS = 7; +const int COMMITED_STATUS = 0; +const int INPROCESS_STATUS = 1; +const int ABORTING_STATUS = 2; +const int ABORTED_STATUS = 3; + +const int TYPE_UNDO_ZONE = 0; +const int TYPE_GROUP = 1; +const int TYPE_UNDO_SPACE = 2; +const int TYPE_SLOT_SPACE = 3; + +/* ustore stat */ +extern Datum gs_stat_ustore(PG_FUNCTION_ARGS); + +typedef struct UndoHeader { + UndoRecordHeader whdr_; + UndoRecordBlock wblk_; + UndoRecordTransaction wtxn_; + UndoRecordPayload wpay_; + UndoRecordOldTd wtd_; + UndoRecordPartition wpart_; + UndoRecordTablespace wtspc_; +} UndoHeader; + +void Checkfd(int fd) +{ + if (fd < 0) { + ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg("could not open file \%s", UNDO_META_FILE))); + return; + } +} + +Datum gs_stat_ustore(PG_FUNCTION_ARGS) +{ + char result[STAT_USTORE_BUFF_SIZE] = {0}; +#ifdef DEBUG_UHEAP + errno_t ret; + LWLockAcquire(UHeapStatLock, LW_SHARED); + + ret = snprintf_s(result, sizeof(result), sizeof(result) - 1, "Prune Page \n"); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tPrune Page (SUCCESS) = %lu \n", + UHeapStat_shared->prune_page[PRUNE_PAGE_SUCCESS]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tPrune Page (NO_SPACE) = %lu \n", + UHeapStat_shared->prune_page[PRUNE_PAGE_NO_SPACE]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tPrune Page (UPDATE_IN_PROGRESS)= %lu \n=========================\n\n", + UHeapStat_shared->prune_page[PRUNE_PAGE_UPDATE_IN_PROGRESS]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tPrune Page (PRUNE_PAGE_IN_RECOVERY)= %lu \n=========================\n\n", + UHeapStat_shared->prune_page[PRUNE_PAGE_IN_RECOVERY]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tPrune Page (PRUNE_PAGE_INVALID)= %lu \n=========================\n\n", + UHeapStat_shared->prune_page[PRUNE_PAGE_INVALID]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tPrune Page (PRUNE_PAGE_XID_FILTER)= %lu \n=========================\n\n", + UHeapStat_shared->prune_page[PRUNE_PAGE_XID_FILTER]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tPrune Page (PRUNE_PAGE_FILLFACTOR)= %lu \n=========================\n\n", + UHeapStat_shared->prune_page[PRUNE_PAGE_FILLFACTOR]); + securec_check_ss(ret, "\0", "\0"); + + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "Prune Page OPs profile \n"); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\t Prune Page SUC: %u %u %u \n", + UHeapStat_shared->op_count_suc.ins, UHeapStat_shared->op_count_suc.del, UHeapStat_shared->op_count_suc.upd); + securec_check_ss(ret, "\0", "\0"); + + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\t Prune Page TOT: %u %u %u \n", + UHeapStat_shared->op_count_tot.ins, UHeapStat_shared->op_count_tot.del, UHeapStat_shared->op_count_tot.upd); + securec_check_ss(ret, "\0", "\0"); + + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "Prune Page OPs freespace profile \n"); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\t Prune Page FreeSpace TOT: %u %u %u \n", UHeapStat_shared->op_space_tot.ins, + UHeapStat_shared->op_space_tot.del, UHeapStat_shared->op_space_tot.upd); + securec_check_ss(ret, "\0", "\0"); + + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "PageReserveTransactionSlot (where to get transaction slot) \n"); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tSlot has been reserved by current xid: %lu \n", + UHeapStat_shared->get_transslot_from[TRANSSLOT_RESERVED_BY_CURRENT_XID]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tGot free slot after invalidating slots: %lu \n", + UHeapStat_shared->get_transslot_from[TRANSSLOT_FREE_AFTER_INVALIDATION]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tGot free slot after freezing slots: %lu \n", + UHeapStat_shared->get_transslot_from[TRANSSLOT_FREE_AFTER_FREEZING]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tCannot get a free transaction slot: %lu \n=========================\n\n", + UHeapStat_shared->get_transslot_from[TRANSSLOT_CANNOT_GET]); + securec_check_ss(ret, "\0", "\0"); + + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "Inplace Update Stats \n"); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tINPLACE UPDATE: %lu \n", + UHeapStat_shared->update[INPLACE_UPDATE]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tNON INPLACE UPDATE: %lu \n", + UHeapStat_shared->update[NON_INPLACE_UPDATE]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tTotal: %lu \n=========================\n\n", + UHeapStat_shared->update[NON_INPLACE_UPDATE] + UHeapStat_shared->update[INPLACE_UPDATE]); + securec_check_ss(ret, "\0", "\0"); + + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "Non Inplace Update Reasons \n"); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tIndex Updated: %lu \n", + UHeapStat_shared->noninplace_update_cause[INDEX_UPDATED]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tToast: %lu \n", + UHeapStat_shared->noninplace_update_cause[TOAST]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tPrune Page Failed: %lu \n", + UHeapStat_shared->noninplace_update_cause[PAGE_PRUNE_FAILED]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tSlot reused: %lu \n", + UHeapStat_shared->noninplace_update_cause[SLOT_REUSED]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tnblocks < NBLOCKS: %lu \n=========================\n\n", + UHeapStat_shared->noninplace_update_cause[nblocks_LESS_THAN_NBLOCKS]); + securec_check_ss(ret, "\0", "\0"); + + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "Slot status in UHeapTupleFetch \n"); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tFROZEN_SLOT = %lu \n", + UHeapStat_shared->visibility_check_with_xid[VISIBILITY_CHECK_SUCCESS_FROZEN_SLOT]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tXID < Oldest XID in UNDO = %lu \n", + UHeapStat_shared->visibility_check_with_xid[VISIBILITY_CHECK_SUCCESS_OLDEST_XID]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tSlot is invalid and Xid is Visible in Snapshot = %lu \n", + UHeapStat_shared->visibility_check_with_xid[VISIBILITY_CHECK_SUCCESS_INVALID_SLOT]); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tFetch Trans Info From UNDO = %lu\n=========================\n\n", + UHeapStat_shared->visibility_check_with_xid[VISIBILITY_CHECK_SUCCESS_UNDO]); + securec_check_ss(ret, "\0", "\0"); + + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "Undo Chain Traversal Stat \n"); + securec_check_ss(ret, "\0", "\0"); + + double tuple_old_version_visit_rate = 0.0; + if (UHeapStat_shared->tuple_visits > 0) { + tuple_old_version_visit_rate = + 1.0 * UHeapStat_shared->tuple_old_version_visits / UHeapStat_shared->tuple_visits; + } + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "Tuple visits: %lu\tOld version visits: %lu\tOld version visit rate: %.6f \n", + UHeapStat_shared->tuple_visits, UHeapStat_shared->tuple_old_version_visits, tuple_old_version_visit_rate); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "Undo Chain Traversal Length \n"); + securec_check_ss(ret, "\0", "\0"); + + double chain_visited_avg_len = 0.0; + if (UHeapStat_shared->undo_chain_visited_count > 0) { + chain_visited_avg_len = + UHeapStat_shared->undo_chain_visited_sum_len * 1.0 / UHeapStat_shared->undo_chain_visited_count; + } + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\t# Of undo_chain_visited_sum_len = %ld | visited_count = %ld | miss_count = %ld | visited_avg_len = %lf | " + "visited_max_len = %ld | visited_min_len = %ld \n", + UHeapStat_shared->undo_chain_visited_sum_len, UHeapStat_shared->undo_chain_visited_count, + UHeapStat_shared->undo_chain_visited_miss_count, chain_visited_avg_len, + UHeapStat_shared->undo_chain_visited_max_len, UHeapStat_shared->undo_chain_visited_min_len); + securec_check_ss(ret, "\0", "\0"); + + double page_visited_avg_len = 0.0; + if (UHeapStat_shared->undo_chain_visited_count > 0) { + page_visited_avg_len = + UHeapStat_shared->undo_page_visited_sum_len * 1.0 / UHeapStat_shared->undo_chain_visited_count; + } + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\t# Of undo_page_visited_sum_len = %ld | visited_count = %ld | page_visited_avg_len = %lf \n", + UHeapStat_shared->undo_page_visited_sum_len, UHeapStat_shared->undo_chain_visited_count, page_visited_avg_len); + securec_check_ss(ret, "\0", "\0"); + + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "prepare undo record rzero count %lu nzero_count %lu \n", UHeapStat_shared->undo_record_prepare_rzero_count, + UHeapStat_shared->undo_record_prepare_nzero_count); + securec_check_ss(ret, "\0", "\0"); + + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "groups allocated %lu released %lu \n", + UHeapStat_shared->undo_groups_allocate, UHeapStat_shared->undo_groups_release); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "slots allocated %lu released %lu \n", + UHeapStat_shared->undo_slots_allocate, UHeapStat_shared->undo_slots_recycle); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "space recycle %lu unrecycle %lu \n", + UHeapStat_shared->undo_space_recycle, UHeapStat_shared->undo_space_unrecycle); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "oldest xid delay %lu \n", + UHeapStat_shared->oldest_xid_having_undo_delay); + securec_check_ss(ret, "\0", "\0"); + + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "Undo lock information: \n"); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tDiscard lock hold time(us): total %lu\tmin %lu\tmax %lu\tcnt %lu \tavg %.6f \n", + UHeapStat_shared->undo_discard_lock_hold_time_sum, UHeapStat_shared->undo_discard_lock_hold_time_min, + UHeapStat_shared->undo_discard_lock_hold_time_max, UHeapStat_shared->undo_discard_lock_hold_cnt, + 1.0 * UHeapStat_shared->undo_discard_lock_hold_time_sum / Max(1, UHeapStat_shared->undo_discard_lock_hold_cnt)); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tDiscard lock wait time(us): total %lu\tmin %lu\tmax %lu\tcnt %lu\tavg %.6f \n", + UHeapStat_shared->undo_discard_lock_wait_time_sum, UHeapStat_shared->undo_discard_lock_wait_time_min, + UHeapStat_shared->undo_discard_lock_wait_time_max, UHeapStat_shared->undo_discard_lock_wait_cnt, + 1.0 * UHeapStat_shared->undo_discard_lock_wait_time_sum / Max(1, UHeapStat_shared->undo_discard_lock_wait_cnt)); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tSpace lock hold time(us): total %lu\tmin %lu\tmax %lu\tcnt %lu\tavg %.6f \n", + UHeapStat_shared->undo_space_lock_hold_time_sum, UHeapStat_shared->undo_space_lock_hold_time_min, + UHeapStat_shared->undo_space_lock_hold_time_max, UHeapStat_shared->undo_space_lock_hold_cnt, + 1.0 * UHeapStat_shared->undo_space_lock_hold_time_sum / Max(1, UHeapStat_shared->undo_space_lock_hold_cnt)); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "\tSpace lock wait time(us): total %lu\tmin %lu\tmax %lu\tcnt %lu\tavg %.6f \n", + UHeapStat_shared->undo_space_lock_wait_time_sum, UHeapStat_shared->undo_space_lock_wait_time_min, + UHeapStat_shared->undo_space_lock_wait_time_max, UHeapStat_shared->undo_space_lock_wait_cnt, + 1.0 * UHeapStat_shared->undo_space_lock_wait_time_sum / Max(1, UHeapStat_shared->undo_space_lock_wait_cnt)); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(result + strlen(result), sizeof(result) - strlen(result), sizeof(result) - strlen(result) -1, + "INSERT: %lu\tRetry: %lu\tRetry Time: %lu\tRetry Max: %lu\tRetry Time MAX: %lu \n", UHeapStat_shared->dml, + UHeapStat_shared->retry, UHeapStat_shared->retry_time, UHeapStat_shared->retry_max, + UHeapStat_shared->retry_time_max); + securec_check_ss(ret, "\0", "\0"); + + result[strlen(result)] = '\0'; + + LWLockRelease(UHeapStatLock); + +#endif + PG_RETURN_TEXT_P(cstring_to_text(result)); +} + +using namespace undo; + +bool ReadUndoBytes(char *destptr, int destlen, char **readeptr, char *endptr, int *myBytesRead, int *alreadyRead) +{ + if (*myBytesRead >= destlen) { + *myBytesRead -= destlen; + return true; + } + int remaining = destlen - *myBytesRead; + int maxReadOnCurrPage = endptr - *readeptr; + int canRead = Min(remaining, maxReadOnCurrPage); + if (canRead == 0) { + return false; + } + errno_t rc = memcpy_s(destptr + *myBytesRead, remaining, *readeptr, canRead); + securec_check(rc, "\0", "\0"); + *readeptr += canRead; + *alreadyRead += canRead; + *myBytesRead = 0; + return (canRead == remaining); +} +bool ReadUndoRecord(UndoHeader *urec, char *buffer, int startingByte, int *alreadyRead) +{ + char *readptr = buffer + startingByte; + char *endptr = buffer + BLCKSZ; + int myBytesRead = *alreadyRead; + if (!ReadUndoBytes((char *)&(urec->whdr_), SIZE_OF_UNDO_RECORD_HEADER, &readptr, endptr, &myBytesRead, + alreadyRead)) { + return false; + } + if (!ReadUndoBytes((char *)&(urec->wblk_), SIZE_OF_UNDO_RECORD_BLOCK, &readptr, endptr, &myBytesRead, + alreadyRead)) { + return false; + } + if ((urec->whdr_.uinfo & UNDO_UREC_INFO_TRANSAC) != 0) { + if (!ReadUndoBytes((char *)&urec->wtxn_, SIZE_OF_UNDO_RECORD_TRANSACTION, &readptr, endptr, &myBytesRead, + alreadyRead)) { + return false; + } + } + if ((urec->whdr_.uinfo & UNDO_UREC_INFO_OLDTD) != 0) { + if (!ReadUndoBytes((char *)&urec->wtd_, SIZE_OF_UNDO_RECORD_OLDTD, &readptr, endptr, &myBytesRead, + alreadyRead)) { + return false; + } + } + if ((urec->whdr_.uinfo & UNDO_UREC_INFO_HAS_PARTOID) != 0) { + if (!ReadUndoBytes((char *)&urec->wpart_, SIZE_OF_UNDO_RECORD_PARTITION, &readptr, endptr, &myBytesRead, + alreadyRead)) { + return false; + } + } + if ((urec->whdr_.uinfo & UNDO_UREC_INFO_HAS_TABLESPACEOID) != 0) { + if (!ReadUndoBytes((char *)&urec->wtspc_, SIZE_OF_UNDO_RECORD_TABLESPACE, &readptr, endptr, &myBytesRead, + alreadyRead)) { + return false; + } + } + if ((urec->whdr_.uinfo & UNDO_UREC_INFO_PAYLOAD) != 0) { + if (!ReadUndoBytes((char *)&urec->wpay_, SIZE_OF_UNDO_RECORD_PAYLOAD, &readptr, endptr, &myBytesRead, + alreadyRead)) { + return false; + } + } + return true; +} +static int OpenUndoBlock(int zoneId, BlockNumber blockno) +{ + char fileName[100] = {0}; + errno_t rc = EOK; + int segno = blockno / UNDOSEG_SIZE; + rc = snprintf_s(fileName, sizeof(fileName), sizeof(fileName), "undo/permanent/%05X.%07zX", zoneId, segno); + securec_check_ss(rc, "\0", "\0"); + int fd = open(fileName, O_RDONLY | PG_BINARY, S_IRUSR | S_IWUSR); + Checkfd(fd); + + return fd; +} +static bool ParseUndoRecord(UndoRecPtr urp, Tuplestorestate *tupstore, TupleDesc tupDesc) +{ + char buffer[BLCKSZ] = {'\0'}; + BlockNumber blockno = UNDO_PTR_GET_BLOCK_NUM(urp); + int zoneId = UNDO_PTR_GET_ZONE_ID(urp); + int startingByte = ((urp) & ((UINT64CONST(1) << 44) - 1)) % BLCKSZ; + int fd = -1; + int alreadyRead = 0; + off_t seekpos; + errno_t rc = EOK; + uint32 ret = 0; + UndoHeader *urec = (UndoHeader *)malloc(sizeof(UndoHeader)); + UndoRecPtr blkprev = INVALID_UNDO_REC_PTR; + rc = memset_s(urec, sizeof(UndoHeader), (0), sizeof(UndoHeader)); + securec_check(rc, "\0", "\0"); + do { + fd = OpenUndoBlock(zoneId, blockno); + if (fd < 0) { + free(urec); + return false; + } + seekpos = (off_t)BLCKSZ * (blockno % ((BlockNumber)UNDOSEG_SIZE)); + lseek(fd, seekpos, SEEK_SET); + rc = memset_s(buffer, BLCKSZ, 0, BLCKSZ); + securec_check(rc, "\0", "\0"); + ret = read(fd, (char *)buffer, BLCKSZ); + if (ret != BLCKSZ) { + close(fd); + free(urec); + fprintf(stderr, "Read undo meta page failed, expect size(8192), real size(%u).\n", ret); + return false; + } + if (ReadUndoRecord(urec, buffer, startingByte, &alreadyRead)) { + break; + } + startingByte = UNDO_LOG_BLOCK_HEADER_SIZE; + blockno++; + } while (true); + blkprev = urec->wblk_.blkprev; + char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; + bool nulls[11] = {false}; + Datum values[11]; + + rc = memset_s(textBuffer, STAT_UNDO_LOG_SIZE, 0, STAT_UNDO_LOG_SIZE); + securec_check(rc, "\0", "\0"); + values[ARR_0] = ObjectIdGetDatum(urp); + values[ARR_1] = ObjectIdGetDatum(urec->whdr_.xid); + + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, urec->whdr_.cid); + securec_check_ss(rc, "\0", "\0"); + values[ARR_2] = CStringGetTextDatum(textBuffer); + + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, urec->whdr_.reloid); + securec_check_ss(rc, "\0", "\0"); + values[ARR_3] = CStringGetTextDatum(textBuffer); + + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, + urec->whdr_.relfilenode); + securec_check_ss(rc, "\0", "\0"); + values[ARR_4] = CStringGetTextDatum(textBuffer); + + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, urec->whdr_.uinfo); + securec_check_ss(rc, "\0", "\0"); + values[ARR_5] = CStringGetTextDatum(textBuffer); + + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, urec->wblk_.blkprev); + securec_check_ss(rc, "\0", "\0"); + values[ARR_6] = CStringGetTextDatum(textBuffer); + + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, urec->wblk_.blkno); + securec_check_ss(rc, "\0", "\0"); + values[ARR_7] = CStringGetTextDatum(textBuffer); + + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, urec->wblk_.offset); + securec_check_ss(rc, "\0", "\0"); + values[ARR_8] = CStringGetTextDatum(textBuffer); + + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, urec->wtxn_.prevurp); + securec_check_ss(rc, "\0", "\0"); + values[ARR_9] = CStringGetTextDatum(textBuffer); + + rc = + snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, urec->wpay_.payloadlen); + securec_check_ss(rc, "\0", "\0"); + values[ARR_10] = CStringGetTextDatum(textBuffer); + + tuplestore_putvalues(tupstore, tupDesc, values, nulls); + free(urec); + close(fd); + if (blkprev != INVALID_UNDO_REC_PTR) { + ParseUndoRecord(blkprev, tupstore, tupDesc); + } + return true; +} + +void moveMaxUsedSpaceItem (uint startIdx, uint32 *maxUsedSpace, + uint32 *maxUsedSpaceZoneId, const uint size) +{ + for (uint usedSpaceIdx = (size - 1); usedSpaceIdx > startIdx; usedSpaceIdx--) { + maxUsedSpace[usedSpaceIdx] = maxUsedSpace[usedSpaceIdx-1]; + maxUsedSpaceZoneId[usedSpaceIdx] = maxUsedSpaceZoneId[usedSpaceIdx-1]; + } +} + +void UpdateMaxUsedSapceStat(uint32 usedSpace, uint32 idx, uint32 *maxUsedSpace, + uint32 *maxUsedSpaceZoneId, const uint size) +{ + for (uint usedSpaceIdx = 0; usedSpaceIdx < size; usedSpaceIdx++) { + if (usedSpace > maxUsedSpace[usedSpaceIdx]) { + if (maxUsedSpaceZoneId[usedSpaceIdx] != idx) { + moveMaxUsedSpaceItem(usedSpaceIdx, maxUsedSpace, maxUsedSpaceZoneId, size); + } + maxUsedSpace[usedSpaceIdx] = usedSpace; + maxUsedSpaceZoneId[usedSpaceIdx] = idx; + break; + } + } +} +Datum gs_stat_undo(PG_FUNCTION_ARGS) +{ +#ifdef ENABLE_MULTIPLE_NODES + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported view in multiple nodes mode."))); + PG_RETURN_VOID(); +#else + uint32 undoTotalSize = g_instance.undo_cxt.undoTotalSize; + uint32 limitSize = (uint32)(u_sess->attr.attr_storage.undo_space_limit_size * FORCE_RECYCLE_PERCENT); + uint32 createdUndoFiles = 0; + uint32 discardedUndoFiles = 0; + uint32 zoneUsedCount = 0; + uint32 maxUsedSpace[TOP_USED_ZONE_NUM] = {0}; + uint32 maxUsedSpaceZoneId[TOP_USED_ZONE_NUM] = {0}; + + int rc = 0; + char textBuffer[STAT_UNDO_BUFFER_SIZE] = {'\0'}; + ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; + TupleDesc tupDesc; + Tuplestorestate *tupstore = NULL; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + PG_RETURN_VOID(); + } + if (!(rsinfo->allowedModes & SFRM_Materialize)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + PG_RETURN_VOID(); + } + if (get_call_result_type(fcinfo, NULL, &tupDesc) != TYPEFUNC_COMPOSITE) { + elog(ERROR, "return type must be a row type"); + PG_RETURN_VOID(); + } + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupDesc; + MemoryContextSwitchTo(oldcontext); + for (uint64 idx = 0; idx <= UNDO_ZONE_COUNT - 1; idx++) { + if (g_instance.undo_cxt.uZones == NULL) { + break; + } + UndoZone *uzone = (undo::UndoZone *)g_instance.undo_cxt.uZones[idx]; + if (uzone == NULL) { + continue; + } + uint32 usedSpace = uzone->UndoSize() * (BLCKSZ / MBYTES_TO_KBYTES) / MBYTES_TO_KBYTES; + if (usedSpace != 0) { + UpdateMaxUsedSapceStat(usedSpace, idx, maxUsedSpace, maxUsedSpaceZoneId, TOP_USED_ZONE_NUM); + } + zoneUsedCount += 1; + UndoSpace *undoSpace = uzone->GetUndoSpace(); + UndoSpace *slotSpace = uzone->GetSlotSpace(); + createdUndoFiles += (undoSpace->Tail() / UNDO_FILE_MAXSIZE + slotSpace->Tail() / UNDO_SLOT_FILE_MAXSIZE); + discardedUndoFiles += (undoSpace->Head() / UNDO_FILE_MAXSIZE + slotSpace->Head() / UNDO_SLOT_FILE_MAXSIZE); + } + bool nulls[STAT_UNDO_COLS] = {false}; + Datum values[STAT_UNDO_COLS]; + rc = memset_s(textBuffer, STAT_UNDO_BUFFER_SIZE, 0, STAT_UNDO_BUFFER_SIZE); + securec_check(rc, "\0", "\0"); + + values[ARR_0] = UInt32GetDatum((uint32)zoneUsedCount); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, "%u : %u, %u : %u, %u : %u", + maxUsedSpaceZoneId[UNDO_TOPUSED], maxUsedSpace[UNDO_TOPUSED], maxUsedSpaceZoneId[UNDO_SECONDUSED], + maxUsedSpace[UNDO_SECONDUSED], maxUsedSpaceZoneId[UNDO_THIRDUSED], maxUsedSpace[UNDO_THIRDUSED]); + securec_check_ss(rc, "\0", "\0"); + values[ARR_1] = CStringGetTextDatum(textBuffer); + values[ARR_2] = UInt32GetDatum((uint32)(undoTotalSize * (BLCKSZ / MBYTES_TO_KBYTES) / MBYTES_TO_KBYTES)); + values[ARR_3] = UInt32GetDatum((uint32)(limitSize * (BLCKSZ / MBYTES_TO_KBYTES) / MBYTES_TO_KBYTES)); + values[ARR_4] = UInt64GetDatum((uint64)g_instance.undo_cxt.oldestXidInUndo); + values[ARR_5] = UInt64GetDatum((uint64)GetGlobalOldestXmin()); + values[ARR_6] = Int64GetDatum((int64)g_instance.undo_cxt.undoChainTotalSize); + values[ARR_7] = Int64GetDatum((int64)g_instance.undo_cxt.maxChainSize); + values[ARR_8] = UInt32GetDatum((uint32)createdUndoFiles); + values[ARR_9] = UInt32GetDatum((uint32)discardedUndoFiles); + + tuplestore_putvalues(tupstore, tupDesc, values, nulls); + tuplestore_donestoring(tupstore); + PG_RETURN_VOID(); +#endif +} + +#ifndef ENABLE_MULTIPLE_NODES +static uint64 UndoSize(UndoSpaceType type) +{ + uint64 used = 0; + for (auto idx = 0; idx < UNDO_ZONE_COUNT; idx++) { + undo::UndoZone *uzone = (undo::UndoZone *)g_instance.undo_cxt.uZones[idx]; + if (uzone == NULL) { + continue; + } + if (type == UNDO_LOG_SPACE) { + used += uzone->UndoSize(); + } else { + used += uzone->SlotSize(); + } + } + return used; +} + +static void PutTranslotInfoToTuple(int zoneId, uint32 offset, TransactionSlot *slot, Tuplestorestate *tupstore, + TupleDesc tupDesc) +{ + if (slot->XactId() != InvalidTransactionId || slot->StartUndoPtr() != INVALID_UNDO_REC_PTR) { + char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; + bool nulls[PG_STAT_TRANSLOT_META_COLS] = {false}; + Datum values[PG_STAT_TRANSLOT_META_COLS]; + int rc = 0; + + rc = memset_s(textBuffer, STAT_UNDO_LOG_SIZE, 0, STAT_UNDO_LOG_SIZE); + securec_check(rc, "\0", "\0"); + values[ARR_0] = ObjectIdGetDatum((Oid)zoneId); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, "%016lu", (uint64)slot->XactId()); + securec_check_ss(rc, "\0", "\0"); + values[ARR_1] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(slot->StartUndoPtr())); + securec_check_ss(rc, "\0", "\0"); + values[ARR_2] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(slot->EndUndoPtr())); + securec_check_ss(rc, "\0", "\0"); + values[ARR_3] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, offset); + securec_check_ss(rc, "\0", "\0"); + values[ARR_4] = CStringGetTextDatum(textBuffer); + if (TransactionIdDidCommit((uint64)slot->XactId())) { + values[ARR_5] = COMMITED_STATUS; + } else if (TransactionIdIsInProgress((uint64)slot->XactId())) { + values[ARR_5] = INPROCESS_STATUS; + } else if (slot->NeedRollback()) { + values[ARR_5] = ABORTING_STATUS; + } else { + values[ARR_5] = ABORTED_STATUS; + } + tuplestore_putvalues(tupstore, tupDesc, values, nulls); + } +} + +static void GetTranslotFromOneSegFile(int fd, int zoneId, Tuplestorestate *tupstore, TupleDesc tupDesc) +{ + TransactionSlot *slot = NULL; + errno_t rc = EOK; + off_t seekpos; + uint32 ret = 0; + char buffer[BLCKSZ] = {'\0'}; + + for (uint32 loop = 0; loop < UNDO_META_SEG_SIZE; loop++) { + seekpos = (off_t)BLCKSZ * loop; + lseek(fd, seekpos, SEEK_SET); + rc = memset_s(buffer, BLCKSZ, 0, BLCKSZ); + securec_check(rc, "\0", "\0"); + + ret = read(fd, (char *)buffer, BLCKSZ); + if (ret != BLCKSZ) { + close(fd); + ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg("can't read a block"))); + } + + for (uint32 offset = UNDO_LOG_BLOCK_HEADER_SIZE; offset < BLCKSZ - MAXALIGN(sizeof(TransactionSlot)); + offset += MAXALIGN(sizeof(TransactionSlot))) { + slot = (TransactionSlot *)(buffer + offset); + PutTranslotInfoToTuple(zoneId, offset, slot, tupstore, tupDesc); + } + } +} + +static void GetTranslotFromSegFiles(int zoneId, int segnobegin, int segnoend, Tuplestorestate *tupstore, + TupleDesc tupDesc) +{ + for (int segcurrent = segnobegin; segcurrent <= segnoend; segcurrent++) { + errno_t rc = EOK; + char fileName[100] = {0}; + rc = snprintf_s(fileName, sizeof(fileName), sizeof(fileName) - 1, "undo/permanent/%05X.meta.%07zX", zoneId, + segcurrent); + securec_check_ss(rc, "\0", "\0"); + int fd = open(fileName, O_RDONLY | PG_BINARY, S_IRUSR | S_IWUSR); + Checkfd(fd); + GetTranslotFromOneSegFile(fd, zoneId, tupstore, tupDesc); + close(fd); + } +} + +static void ReadTranslotFromDisk(int startIdx, int endIdx, Tuplestorestate *tupstore, TupleDesc tupDesc) +{ + int fd = BasicOpenFile(UNDO_META_FILE, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); + for (auto idx = startIdx; idx <= endIdx; idx++) { + uint32 undoSpaceBegin = 0; + uint32 undoZoneMetaPageCnt = 0; + uint32 undoSpaceMetaPageCnt = 0; + UNDOZONE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOZONE_COUNT_PER_PAGE, undoZoneMetaPageCnt); + UNDOZONE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOSPACE_COUNT_PER_PAGE, undoSpaceMetaPageCnt); + undoSpaceBegin = (undoZoneMetaPageCnt + undoSpaceMetaPageCnt) * UNDO_META_PAGE_SIZE; + uint32 readPos = 0; + UndoSpaceMetaInfo undoSpaceMeta; + + readPos = undoSpaceBegin + (idx / UNDOSPACE_COUNT_PER_PAGE) * UNDO_META_PAGE_SIZE + + (idx % UNDOSPACE_COUNT_PER_PAGE) * sizeof(UndoSpaceMetaInfo); + lseek(fd, readPos, SEEK_SET); + int ret = read(fd, &undoSpaceMeta, sizeof(UndoSpaceMetaInfo)); + if (ret != sizeof(UndoSpaceMetaInfo)) { + ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), + errmsg("Read undo meta file fail, expect size(%lu), real size(%u)", sizeof(UndoSpaceMetaInfo), ret))); + break; + } + + int segnobegin = undoSpaceMeta.head / UNDO_SLOT_FILE_MAXSIZE; + int segnoend = undoSpaceMeta.tail / UNDO_SLOT_FILE_MAXSIZE - 1; + + GetTranslotFromSegFiles(idx, segnobegin, segnoend, tupstore, tupDesc); + } + close(fd); +} + +static void ReadTranslotFromMemory(int startIdx, int endIdx, Tuplestorestate *tupstore, TupleDesc tupDesc) +{ + char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; + for (auto idx = startIdx; idx <= endIdx; idx++) { + UndoZone *uzone = (UndoZone *)g_instance.undo_cxt.uZones[idx]; + if (uzone == NULL) { + continue; + } + for (UndoSlotPtr slotPtr = uzone->GetRecycle(); slotPtr < uzone->GetAllocate(); + slotPtr = GetNextSlotPtr(slotPtr)) { + /* Query translot meta info from shared memory. */ + UndoSlotBuffer buf; + buf.PrepareTransactionSlot(slotPtr); + TransactionSlot *slot = NULL; + slot = buf.FetchTransactionSlot(slotPtr); + bool nulls[PG_STAT_TRANSLOT_META_COLS] = {false}; + Datum values[PG_STAT_TRANSLOT_META_COLS]; + errno_t rc; + + rc = memset_s(textBuffer, STAT_UNDO_LOG_SIZE, 0, STAT_UNDO_LOG_SIZE); + securec_check(rc, "\0", "\0"); + values[ARR_0] = ObjectIdGetDatum((Oid)idx); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, "%016lu", (uint64)slot->XactId()); + securec_check_ss(rc, "\0", "\0"); + values[ARR_1] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(slot->StartUndoPtr())); + securec_check_ss(rc, "\0", "\0"); + values[ARR_2] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(slot->EndUndoPtr())); + securec_check_ss(rc, "\0", "\0"); + values[ARR_3] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(slotPtr)); + securec_check_ss(rc, "\0", "\0"); + values[ARR_4] = CStringGetTextDatum(textBuffer); + if (TransactionIdDidCommit((uint64)slot->XactId())) { + values[ARR_5] = COMMITED_STATUS; + } else if (TransactionIdIsInProgress((uint64)slot->XactId())) { + values[ARR_5] = INPROCESS_STATUS; + } else if (slot->NeedRollback()) { + values[ARR_5] = ABORTING_STATUS; + } else { + values[ARR_5] = ABORTED_STATUS; + } + tuplestore_putvalues(tupstore, tupDesc, values, nulls); + buf.Release(); + uzone->ReleaseSlotBuffer(); + } + } + + tuplestore_donestoring(tupstore); +} + + +static void ReadUndoZoneMetaFromShared(int id, TupleDesc *tupleDesc, Tuplestorestate *tupstore) +{ + Assert(tupleDesc != NULL); + Assert(tupstore != NULL); + + uint32 startIdx = 0; + uint32 endIdx = 0; + uint64 used = 0; + char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; + if (id == INVALID_ZONE_ID) { + used = UndoSize(UNDO_LOG_SPACE) + UndoSize(UNDO_SLOT_SPACE); + endIdx = UNDO_ZONE_COUNT - 1; + } else { + used = UndoSize(UNDO_LOG_SPACE); + endIdx = id; + startIdx = id; + } + + for (auto idx = startIdx; idx <= endIdx; idx++) { + Datum values[PG_STAT_USP_PERSIST_META_COLS]; + bool nulls[PG_STAT_USP_PERSIST_META_COLS] = {false}; + UndoZone *uzone = (undo::UndoZone *)g_instance.undo_cxt.uZones[idx]; + if (uzone == NULL) { + continue; + } + values[ARR_0] = ObjectIdGetDatum((Oid)uzone->GetZoneId()); + values[ARR_1] = ObjectIdGetDatum((Oid)uzone->GetPersitentLevel()); + errno_t rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(uzone->GetInsert())); + securec_check_ss(rc, "\0", "\0"); + values[ARR_2] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(uzone->GetDiscard())); + securec_check_ss(rc, "\0", "\0"); + values[ARR_3] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(uzone->GetForceDiscard())); + securec_check_ss(rc, "\0", "\0"); + values[ARR_4] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, used); + securec_check_ss(rc, "\0", "\0"); + values[ARR_5] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, uzone->GetLSN()); + securec_check_ss(rc, "\0", "\0"); + values[ARR_6] = CStringGetTextDatum(textBuffer); + values[ARR_7] = ObjectIdGetDatum((Oid)uzone->GetAttachPid()); + tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); + } + + tuplestore_donestoring(tupstore); +} + +void Checkid(const int id, uint32 *startIdx, uint32 *endIdx) +{ + if (id == INVALID_ZONE_ID) { + *endIdx = UNDO_ZONE_COUNT - 1; + } else { + *startIdx = id; + *endIdx = id; + } +} + +void GetZoneMetaValues(Datum *values, char *textBuffer, UndoZoneMetaInfo undoZoneMeta, uint32 idx, errno_t *rc) +{ + *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(undoZoneMeta.insert)); + securec_check_ss(*rc, "\0", "\0"); + values[ARR_2] = CStringGetTextDatum(textBuffer); + *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(undoZoneMeta.discard)); + securec_check_ss(*rc, "\0", "\0"); + values[ARR_3] = CStringGetTextDatum(textBuffer); + *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(undoZoneMeta.forceDiscard)); + securec_check_ss(*rc, "\0", "\0"); + values[ARR_4] = CStringGetTextDatum(textBuffer); + *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, UNDO_REC_PTR_FORMAT, 0); + securec_check_ss(*rc, "\0", "\0"); + values[ARR_5] = CStringGetTextDatum(textBuffer); + *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, UNDO_REC_PTR_FORMAT, undoZoneMeta.lsn); + securec_check_ss(*rc, "\0", "\0"); + values[ARR_6] = CStringGetTextDatum(textBuffer); +} + +static void ReadTransSlotMetaFromShared(int id, TupleDesc *tupleDesc, Tuplestorestate *tupstore) +{ + Assert(tupleDesc != NULL); + Assert(tupstore != NULL); + + uint32 startIdx = 0; + uint32 endIdx = 0; + char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; + TransactionId recycleXmin; + TransactionId oldestXmin = GetOldestXminForUndo(&recycleXmin); + elog(LOG, "oldestXmin %lu", oldestXmin); + + if (id == INVALID_ZONE_ID) { + endIdx = UNDO_ZONE_COUNT - 1; + } else { + startIdx = id; + endIdx = id; + } + + for (auto idx = startIdx; idx <= endIdx; idx++) { + bool nulls[PG_STAT_USP_PERSIST_META_COLS] = {false}; + Datum values[PG_STAT_USP_PERSIST_META_COLS]; + UndoZone *uzone = (undo::UndoZone *)g_instance.undo_cxt.uZones[idx]; + if (uzone == NULL) { + continue; + } + values[ARR_0] = ObjectIdGetDatum((Oid)idx); + values[ARR_1] = ObjectIdGetDatum((Oid)0); // unused + errno_t rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(uzone->GetAllocate())); + securec_check_ss(rc, "\0", "\0"); + values[ARR_2] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(uzone->GetRecycle())); + securec_check_ss(rc, "\0", "\0"); + values[ARR_3] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, "%016lu", + UNDO_PTR_GET_OFFSET(uzone->GetRecycleXid())); + securec_check_ss(rc, "\0", "\0"); + values[ARR_4] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, "%016lu", recycleXmin); + securec_check_ss(rc, "\0", "\0"); + values[ARR_5] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, "%016lu", + pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo)); + securec_check_ss(rc, "\0", "\0"); + values[ARR_6] = CStringGetTextDatum(textBuffer); + tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); + } + + tuplestore_donestoring(tupstore); +} + +void GetTransMetaValues(Datum *values, char *textBuffer, UndoZoneMetaInfo undoZoneMeta, uint32 loop, errno_t *rc) +{ + TransactionId recycleXmin; + TransactionId oldestXmin = GetOldestXminForUndo(&recycleXmin); + elog(LOG, "oldestXmin %lu", oldestXmin); + *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(undoZoneMeta.allocate)); + securec_check_ss(*rc, "\0", "\0"); + values[ARR_2] = CStringGetTextDatum(textBuffer); + *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(undoZoneMeta.recycle)); + securec_check_ss(*rc, "\0", "\0"); + values[ARR_3] = CStringGetTextDatum(textBuffer); + *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, "%016lu", undoZoneMeta.recycleXid); + securec_check_ss(*rc, "\0", "\0"); + values[ARR_4] = CStringGetTextDatum(textBuffer); + *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, "%016lu", recycleXmin); + securec_check_ss(*rc, "\0", "\0"); + values[ARR_5] = CStringGetTextDatum(textBuffer); + *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, "%016lu", + pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo)); + securec_check_ss(*rc, "\0", "\0"); + values[ARR_6] = CStringGetTextDatum(textBuffer); +} + +static void ReadUndoMetaFromDisk(int id, TupleDesc *tupleDesc, Tuplestorestate *tupstore, const int type) +{ + Assert(tupleDesc != NULL); + Assert(tupstore != NULL); + int ret = 0; + uint32 startIdx = 0; + uint32 endIdx = 0; + char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; + int fd = BasicOpenFile(UNDO_META_FILE, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); + + Checkfd(fd); + Checkid(id, &startIdx, &endIdx); + for (auto idx = startIdx; idx <= endIdx; idx++) { + bool nulls[PG_STAT_USP_PERSIST_META_COLS] = {false}; + Datum values[PG_STAT_USP_PERSIST_META_COLS]; + uint32 readPos = 0; + UndoZoneMetaInfo undoZoneMeta; + errno_t rc; + + if (idx < PERSIST_ZONE_COUNT) { + readPos = (idx / UNDOZONE_COUNT_PER_PAGE) * UNDO_META_PAGE_SIZE + + (idx % UNDOZONE_COUNT_PER_PAGE) * sizeof(UndoZoneMetaInfo); + lseek(fd, readPos, SEEK_SET); + ret = read(fd, &undoZoneMeta, sizeof(UndoZoneMetaInfo)); + if (ret != sizeof(UndoZoneMetaInfo)) { + ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg( + "Read undo meta file fail, expect size(%lu), real size(%u)", sizeof(UndoZoneMetaInfo), ret))); + break; + } + } else { + rc = memset_s(&undoZoneMeta, sizeof(UndoZoneMetaInfo), 0, sizeof(UndoZoneMetaInfo)); + securec_check(rc, "\0", "\0"); + } + DECLARE_NODE_COUNT(); + GET_UPERSISTENCE_BY_ZONEID((int)idx, nodeCount); + values[ARR_0] = ObjectIdGetDatum((Oid)idx); + values[ARR_1] = ObjectIdGetDatum((Oid)upersistence); + if (type == TYPE_UNDO_ZONE) { + GetZoneMetaValues(values, textBuffer, undoZoneMeta, idx, &rc); + } else { + GetTransMetaValues(values, textBuffer, undoZoneMeta, idx, &rc); + } + + tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); + } + + tuplestore_donestoring(tupstore); + close(fd); +} + + +static uint64 UndoSpaceSize(UndoSpaceType type) +{ + uint64 used = 0; + for (auto idx = 0; idx < UNDO_ZONE_COUNT; idx++) { + UndoSpace *usp; + if (g_instance.undo_cxt.uZones[idx] == NULL) { + continue; + } + if (type == UNDO_LOG_SPACE) { + usp = ((UndoZone *)g_instance.undo_cxt.uZones[idx])->GetUndoSpace(); + } else { + usp = ((UndoZone *)g_instance.undo_cxt.uZones[idx])->GetSlotSpace(); + } + used += (uint64)usp->Used(); + } + return used; +} + +static void ReadUndoSpaceFromShared(int id, TupleDesc *tupleDesc, Tuplestorestate *tupstore, UndoSpaceType type) +{ + Assert(tupleDesc != NULL); + Assert(tupstore != NULL); + + uint32 startIdx = 0; + uint32 endIdx = 0; + char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; + uint64 used = UndoSpaceSize(type); + + if (id == INVALID_ZONE_ID) { + used = UndoSpaceSize(UNDO_LOG_SPACE) + UndoSpaceSize(UNDO_SLOT_SPACE); + endIdx = UNDO_ZONE_COUNT - 1; + } else { + used = UndoSpaceSize(type); + startIdx = id; + endIdx = id; + } + + for (auto idx = startIdx; idx <= endIdx; idx++) { + bool nulls[PG_STAT_USP_PERSIST_META_COLS] = {false}; + Datum values[PG_STAT_USP_PERSIST_META_COLS]; + UndoSpace *usp; + if (g_instance.undo_cxt.uZones[idx] == NULL) { + continue; + } + if (type == UNDO_LOG_SPACE) { + usp = ((UndoZone *)g_instance.undo_cxt.uZones[idx])->GetUndoSpace(); + } else { + usp = ((UndoZone *)g_instance.undo_cxt.uZones[idx])->GetSlotSpace(); + } + values[ARR_0] = ObjectIdGetDatum((Oid)idx); + values[ARR_1] = ObjectIdGetDatum((Oid)0); // unused + errno_t rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(usp->Tail())); + securec_check_ss(rc, "\0", "\0"); + values[ARR_2] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(usp->Head())); + securec_check_ss(rc, "\0", "\0"); + values[ARR_3] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, used); + securec_check_ss(rc, "\0", "\0"); + values[ARR_4] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, usp->Used()); + securec_check_ss(rc, "\0", "\0"); + values[ARR_5] = CStringGetTextDatum(textBuffer); + rc = snprintf_s(textBuffer, sizeof(textBuffer), sizeof(textBuffer) - 1, UNDO_REC_PTR_FORMAT, usp->LSN()); + securec_check_ss(rc, "\0", "\0"); + values[ARR_6] = CStringGetTextDatum(textBuffer); + tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); + } + + tuplestore_donestoring(tupstore); +} + +void GetUndoSpaceValues(Datum *values, char *textBuffer, UndoSpaceMetaInfo undoSpaceMeta, uint32 loop, errno_t *rc) +{ + *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(undoSpaceMeta.tail)); + securec_check_ss(*rc, "\0", "\0"); + values[ARR_2] = CStringGetTextDatum(textBuffer); + *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, UNDO_REC_PTR_FORMAT, + UNDO_PTR_GET_OFFSET(undoSpaceMeta.head)); + securec_check_ss(*rc, "\0", "\0"); + values[ARR_3] = CStringGetTextDatum(textBuffer); + *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, UNDO_REC_PTR_FORMAT, (uint64)0xFFFF); + securec_check_ss(*rc, "\0", "\0"); + values[ARR_4] = CStringGetTextDatum(textBuffer); + *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, UNDO_REC_PTR_FORMAT, + (undoSpaceMeta.tail - undoSpaceMeta.head) / BLCKSZ); + securec_check_ss(*rc, "\0", "\0"); + values[ARR_5] = CStringGetTextDatum(textBuffer); + *rc = snprintf_s(textBuffer, STAT_UNDO_LOG_SIZE, STAT_UNDO_LOG_SIZE - 1, UNDO_REC_PTR_FORMAT, undoSpaceMeta.lsn); + securec_check_ss(*rc, "\0", "\0"); + values[ARR_6] = CStringGetTextDatum(textBuffer); +} + +static void ReadUndoSpaceFromDisk(int id, TupleDesc *tupleDesc, Tuplestorestate *tupstore, UndoSpaceType type) +{ + Assert(tupleDesc != NULL); + Assert(tupstore != NULL); + int ret = 0; + uint32 startIdx = 0; + uint32 endIdx = 0; + uint32 undoSpaceBegin = 0; + uint32 undoZoneMetaPageCnt = 0; + uint32 undoSpaceMetaPageCnt = 0; + char textBuffer[STAT_UNDO_LOG_SIZE] = {'\0'}; + int fd = BasicOpenFile(UNDO_META_FILE, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); + + Checkfd(fd); + Checkid(id, &startIdx, &endIdx); + + /* Seek start position for writing transactionGroup meta. */ + UNDOZONE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOZONE_COUNT_PER_PAGE, undoZoneMetaPageCnt); + UNDOZONE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOSPACE_COUNT_PER_PAGE, undoSpaceMetaPageCnt); + if (type == UNDO_LOG_SPACE) { + undoSpaceBegin = undoZoneMetaPageCnt * UNDO_META_PAGE_SIZE; + } else { + undoSpaceBegin = (undoZoneMetaPageCnt + undoSpaceMetaPageCnt) * UNDO_META_PAGE_SIZE; + } + + for (auto idx = startIdx; idx <= endIdx; idx++) { + bool nulls[PG_STAT_USP_PERSIST_META_COLS] = {false}; + Datum values[PG_STAT_USP_PERSIST_META_COLS]; + uint32 readPos = 0; + UndoSpaceMetaInfo undoSpaceMeta; + errno_t rc; + if (idx < PERSIST_ZONE_COUNT) { + readPos = undoSpaceBegin + (idx / UNDOZONE_COUNT_PER_PAGE) * UNDO_META_PAGE_SIZE + + (idx % UNDOZONE_COUNT_PER_PAGE) * sizeof(UndoSpaceMetaInfo); + lseek(fd, readPos, SEEK_SET); + ret = read(fd, &undoSpaceMeta, sizeof(UndoSpaceMetaInfo)); + if (ret != sizeof(UndoSpaceMetaInfo)) { + ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg( + "Read undo meta file fail, expect size(%lu), real size(%u)", sizeof(UndoSpaceMetaInfo), ret))); + break; + } + } else { + rc = memset_s(&undoSpaceMeta, sizeof(UndoSpaceMetaInfo), 0, sizeof(UndoSpaceMetaInfo)); + securec_check(rc, "\0", "\0"); + } + DECLARE_NODE_COUNT(); + GET_UPERSISTENCE_BY_ZONEID((int)idx, nodeCount); + values[ARR_0] = ObjectIdGetDatum((Oid)idx); + values[ARR_1] = ObjectIdGetDatum((Oid)upersistence); + GetUndoSpaceValues(values, textBuffer, undoSpaceMeta, idx, &rc); + tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); + } + + tuplestore_donestoring(tupstore); + close(fd); +} + +bool Checkrsinfo(const ReturnSetInfo *rsinfo) +{ + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + return true; + } + if (!(rsinfo->allowedModes & SFRM_Materialize)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + return true; + } + return false; +} +#endif + +Datum gs_undo_meta(PG_FUNCTION_ARGS) +{ +#ifdef ENABLE_MULTIPLE_NODES + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported view in multiple nodes mode."))); + PG_RETURN_VOID(); +#else + int type = PG_GETARG_INT32(0); // Indicate meta data type(0:undozone, 1:group, 2:undoSpace, 3:slotSpace) + int id = PG_GETARG_INT32(1); // zoneId (-1 represents all) + int metaLocation = PG_GETARG_INT32(2); // meta location (0:shared , 1:disk) + ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; + TupleDesc tupDesc; + Tuplestorestate *tupstore = NULL; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + + if (g_instance.undo_cxt.uZones == NULL) { + elog(ERROR, "Haven't used Ustore"); + } + + if (id < -1 || id >= UNDO_ZONE_COUNT || (metaLocation != 0 && metaLocation != 1)) { + elog(ERROR, "Invalid input param"); + } + + if (Checkrsinfo(rsinfo)) { + PG_RETURN_VOID(); + } + + if (get_call_result_type(fcinfo, NULL, &tupDesc) != TYPEFUNC_COMPOSITE) { + elog(ERROR, "return type must be a row type"); + PG_RETURN_VOID(); + } + + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupDesc; + MemoryContextSwitchTo(oldcontext); + + switch (type) { + /* Undospace meta info. */ + case TYPE_UNDO_ZONE: + if (metaLocation == 0) { + ReadUndoZoneMetaFromShared(id, &tupDesc, tupstore); + } else { + ReadUndoMetaFromDisk(id, &tupDesc, tupstore, TYPE_UNDO_ZONE); + } + break; + /* TransactionGroup meta info. */ + case TYPE_GROUP: + if (metaLocation == 0) { + ReadTransSlotMetaFromShared(id, &tupDesc, tupstore); + } else { + ReadUndoMetaFromDisk(id, &tupDesc, tupstore, TYPE_GROUP); + } + break; + case TYPE_UNDO_SPACE: + if (metaLocation == 0) { + ReadUndoSpaceFromShared(id, &tupDesc, tupstore, UNDO_LOG_SPACE); + } else { + ReadUndoSpaceFromDisk(id, &tupDesc, tupstore, UNDO_LOG_SPACE); + } + break; + case TYPE_SLOT_SPACE: + if (metaLocation == 0) { + ReadUndoSpaceFromShared(id, &tupDesc, tupstore, UNDO_SLOT_SPACE); + } else { + ReadUndoSpaceFromDisk(id, &tupDesc, tupstore, UNDO_SLOT_SPACE); + } + break; + default: + elog(ERROR, "Invalid input param"); + break; + } + + PG_RETURN_VOID(); +#endif +} + +Datum gs_undo_record(PG_FUNCTION_ARGS) +{ +#ifdef ENABLE_MULTIPLE_NODES + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported view in multiple nodes mode."))); + PG_RETURN_VOID(); +#else + UndoRecPtr undoptr = DatumGetUInt64(PG_GETARG_DATUM(0)); + ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; + TupleDesc tupDesc; + Tuplestorestate *tupstore = NULL; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + PG_RETURN_VOID(); + } + if (!(rsinfo->allowedModes & SFRM_Materialize)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + PG_RETURN_VOID(); + } + if (get_call_result_type(fcinfo, NULL, &tupDesc) != TYPEFUNC_COMPOSITE) { + elog(ERROR, "return type must be a row type"); + PG_RETURN_VOID(); + } + + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupDesc; + MemoryContextSwitchTo(oldcontext); + + ParseUndoRecord(undoptr, tupstore, tupDesc); + tuplestore_donestoring(tupstore); + + PG_RETURN_VOID(); +#endif +} + +Datum gs_undo_translot(PG_FUNCTION_ARGS) +{ +#ifdef ENABLE_MULTIPLE_NODES + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported view in multiple nodes mode."))); + PG_RETURN_VOID(); +#else + int type = PG_GETARG_INT32(0); // Indicates query meta from share memory or persistent file + int32 zoneId = PG_GETARG_INT32(1); // zone id + ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; + TupleDesc tupDesc; + Tuplestorestate *tupstore = NULL; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + + if (g_instance.undo_cxt.uZones == NULL) { + elog(ERROR, "Haven't used Ustore"); + } + + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + PG_RETURN_VOID(); + } + if (!(rsinfo->allowedModes & SFRM_Materialize)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + PG_RETURN_VOID(); + } + if (get_call_result_type(fcinfo, NULL, &tupDesc) != TYPEFUNC_COMPOSITE) { + elog(ERROR, "return type must be a row type"); + PG_RETURN_VOID(); + } + + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupDesc; + MemoryContextSwitchTo(oldcontext); + + if (type == 0 || type == 1) { + if (zoneId < -1 || zoneId >= UNDO_ZONE_COUNT) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Zone id is invalid %d", zoneId))); + PG_RETURN_VOID(); + } + } else { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Invalid input param"))); + PG_RETURN_VOID(); + } + + uint32 startIdx = 0; + uint32 endIdx = 0; + + if (zoneId == INVALID_ZONE_ID) { + endIdx = PERSIST_ZONE_COUNT - 1; + } else { + startIdx = zoneId; + endIdx = zoneId; + } + + if (type == 1) { + ReadTranslotFromDisk(startIdx, endIdx, tupstore, tupDesc); + } else { + ReadTranslotFromMemory(startIdx, endIdx, tupstore, tupDesc); + } + + PG_RETURN_VOID(); +#endif +} + +Datum gs_index_verify(PG_FUNCTION_ARGS) +{ +#ifdef ENABLE_MULTIPLE_NODES + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported view in multiple nodes mode."))); + PG_RETURN_VOID(); +#else + Oid relationOid = PG_GETARG_OID(0); + uint32 blkno = (uint32)PG_GETARG_OID(1); + ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; + TupleDesc tupDesc; + Tuplestorestate *tupstore = NULL; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + PG_RETURN_VOID(); + } + if (!(rsinfo->allowedModes & SFRM_Materialize)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + PG_RETURN_VOID(); + } + if (get_call_result_type(fcinfo, NULL, &tupDesc) != TYPEFUNC_COMPOSITE) { + elog(ERROR, "return type must be a row type"); + PG_RETURN_VOID(); + } + + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupDesc; + MemoryContextSwitchTo(oldcontext); + + if (relationOid == InvalidOid) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("relation oid is invalid %d", relationOid))); + PG_RETURN_VOID(); + } + + Relation relation = relation_open(relationOid, AccessShareLock); + Assert(relation->rd_isvalid); + if (!RelationIsUstoreIndex(relation)) { + relation_close(relation, AccessShareLock); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_USTORE), + errmsg("Relaiton corresponding to oid(%u) is not ubtree index.", relationOid), + errdetail("N/A"), + errcause("feature not supported"), + erraction("check defination of this rel"))); + PG_RETURN_VOID(); + } + + if (blkno == InvalidBlockNumber) { + relation_close(relation, AccessShareLock); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_USTORE), + errmsg("Block number(%u) is invalid.", blkno), + errdetail("N/A"), + errcause("Invalid block number."), + erraction("Check the blkno parameter."))); + PG_RETURN_VOID(); + } else if (blkno == 0) { + // Verfiy whole index tree + UBTreeVerifyIndex(relation, &tupDesc, tupstore, UBTREE_VERIFY_OUTPUT_PARAM_CNT); + } else { + // Verify single index page + uint32 verifyRes; + Page page = NULL; + bool nulls[UBTREE_VERIFY_OUTPUT_PARAM_CNT] = {false}; + Datum values[UBTREE_VERIFY_OUTPUT_PARAM_CNT]; + BTScanInsert cmpKeys = UBTreeMakeScanKey(relation, NULL); + Buffer buf = _bt_getbuf(relation, blkno, BT_READ); + if (BufferIsInvalid(buf)) { + relation_close(relation, AccessShareLock); + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Block number %u is invalid", blkno))); + PG_RETURN_VOID(); + } + + page = (Page)BufferGetPage(buf); + verifyRes = UBTreeVerifyOnePage(relation, page, cmpKeys, NULL); + _bt_relbuf(relation, buf); + pfree(cmpKeys); + values[ARR_0] = CStringGetTextDatum(UBTGetVerifiedPageTypeStr((uint32)VERIFY_MAIN_PAGE)); + values[ARR_1] = ObjectIdGetDatum((Oid)blkno); + values[ARR_2] = CStringGetTextDatum(UBTGetVerifiedResultStr(verifyRes)); + tuplestore_putvalues(tupstore, tupDesc, values, nulls); + } + + tuplestore_donestoring(tupstore); + relation_close(relation, AccessShareLock); + PG_RETURN_VOID(); +#endif +} + +Datum gs_index_recycle_queue(PG_FUNCTION_ARGS) +{ +#ifdef ENABLE_MULTIPLE_NODES + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported view in multiple nodes mode."))); + PG_RETURN_VOID(); +#else + Oid relationOid = PG_GETARG_OID(0); + uint32 type = (uint32)PG_GETARG_OID(1); + uint32 blkno = (uint32)PG_GETARG_OID(2); + ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; + TupleDesc tupDesc; + Tuplestorestate *tupstore = NULL; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + PG_RETURN_VOID(); + } + if (!(rsinfo->allowedModes & SFRM_Materialize)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + PG_RETURN_VOID(); + } + if (get_call_result_type(fcinfo, NULL, &tupDesc) != TYPEFUNC_COMPOSITE) { + elog(ERROR, "return type must be a row type"); + PG_RETURN_VOID(); + } + + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupDesc; + MemoryContextSwitchTo(oldcontext); + + if (relationOid == InvalidOid) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("relation oid is invalid %u", relationOid))); + PG_RETURN_VOID(); + } + if (type > RECYCLE_NONE_FORK) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Invalid parse type %u", type))); + PG_RETURN_VOID(); + } + + Relation relation = relation_open(relationOid, AccessShareLock); + Assert(relation->rd_isvalid); + if (!RelationIsUstoreIndex(relation)) { + relation_close(relation, AccessShareLock); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_USTORE), + errmsg("Relaiton correspondisng to oid(%u) is not ubtree index.", relationOid), + errdetail("N/A"), + errcause("feature not supported"), + erraction("check defination of this rel"))); + PG_RETURN_VOID(); + } + + if (type == RECYCLE_NONE_FORK) { + /* + * Check blkno first.Blkno 0 is the meta page of ubtree index, no need dump. + * And blkno -1 is invalid block number. + */ + if (blkno == 0 || blkno == InvalidBlockNumber) { + relation_close(relation, AccessShareLock); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_USTORE), + errmsg("Block number(%u) is invalid.", blkno), + errdetail("N/A"), + errcause("Invalid block number."), + erraction("Check the blkno parameter."))); + PG_RETURN_VOID(); + } + Buffer buf = _bt_getbuf(relation, blkno, BT_READ); + if (BufferIsInvalid(buf)) { + relation_close(relation, AccessShareLock); + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Block number %u is invalid", blkno))); + PG_RETURN_VOID(); + } + (void) UBTreeRecycleQueuePageDump(relation, buf, true, &tupDesc, tupstore, UBTREE_RECYCLE_OUTPUT_PARAM_CNT); + _bt_relbuf(relation, buf); + } else { + UBTreeDumpRecycleQueueFork(relation, (UBTRecycleForkNumber)type, &tupDesc, tupstore, + UBTREE_RECYCLE_OUTPUT_PARAM_CNT); + } + + relation_close(relation, AccessShareLock); + // call verify interface in index module + tuplestore_donestoring(tupstore); + PG_RETURN_VOID(); +#endif +} diff --git a/src/common/backend/utils/adt/pgxlogstatfuncs.cpp b/src/common/backend/utils/adt/pgxlogstatfuncs.cpp new file mode 100644 index 000000000..1d90f358f --- /dev/null +++ b/src/common/backend/utils/adt/pgxlogstatfuncs.cpp @@ -0,0 +1,363 @@ +/* ------------------------------------------------------------------------- + * + * pgstatfuncs.c + * Functions for accessing the statistics collector data + * + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/backend/utils/adt/pgxlogstatfuncs.cpp + * + * ------------------------------------------------------------------------- + */ + +#include "postgres.h" +#include "knl/knl_variable.h" +#include + +#include "access/transam.h" +#include "access/tableam.h" +#include "access/redo_statistic.h" +#include "access/xlog.h" +#include "connector.h" +#include "catalog/namespace.h" +#include "catalog/pg_database.h" +#include "catalog/pg_tablespace.h" +#include "catalog/pg_type.h" +#include "catalog/pg_partition_fn.h" +#include "catalog/pg_namespace.h" +#include "commands/dbcommands.h" +#include "commands/user.h" +#include "commands/vacuum.h" +#include "funcapi.h" +#include "gaussdb_version.h" +#include "libpq/ip.h" +#include "miscadmin.h" +#include "pgstat.h" +#include "utils/acl.h" +#include "utils/builtins.h" +#include "utils/globalplancache.h" +#include "utils/inet.h" +#include "utils/timestamp.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/memprot.h" +#include "utils/typcache.h" +#include "utils/syscache.h" +#include "pgxc/pgxc.h" +#include "pgxc/nodemgr.h" +#include "postmaster/autovacuum.h" +#include "postmaster/postmaster.h" +#include "storage/lock/lwlock.h" +#include "postgres.h" +#include "knl/knl_variable.h" +#include "storage/smgr/segment.h" +#include "storage/proc.h" +#include "storage/procarray.h" +#include "storage/buf/buf_internals.h" +#include "workload/cpwlm.h" +#include "workload/workload.h" +#include "pgxc/pgxcnode.h" +#include "access/hash.h" +#include "libcomm/libcomm.h" +#include "pgxc/poolmgr.h" +#include "pgxc/execRemote.h" +#include "utils/elog.h" +#include "utils/memtrace.h" +#include "commands/user.h" +#include "instruments/gs_stat.h" +#include "instruments/list.h" +#include "replication/rto_statistic.h" +#include "storage/lock/lock.h" + +const int STAT_XLOG_TBLENTRY_COLS = 4; +const int STAT_XLOG_TEXT_BUFFER_SIZE = 1024; +const int STAT_XLOG_FLUSH_LOCATION = 12; +const int STAT_XLOG_FLUSH_STAT = 16; +const int STAT_XLOG_FLUSH_STAT_OFF = -1; +const int STAT_XLOG_FLUSH_STAT_ON = 0; +const int STAT_XLOG_FLUSH_STAT_GET = 1; +const int STAT_XLOG_FLUSH_STAT_CLEAR = 2; + +static void ReadAllWalInsertStatusTable(int64 walInsertStatusEntryCount, TupleDesc *tupleDesc, + Tuplestorestate *tupstore) +{ + Assert(tupleDesc != NULL); + Assert(tupstore != NULL); + + volatile WALInsertStatusEntry *entry_ptr = NULL; + for (int64 idx = 0; idx < walInsertStatusEntryCount; idx++) { + entry_ptr = &g_instance.wal_cxt.walInsertStatusTable[GET_STATUS_ENTRY_INDEX(idx)]; + + bool nulls[STAT_XLOG_TBLENTRY_COLS] = {false}; + Datum values[STAT_XLOG_TBLENTRY_COLS]; + + values[ARR_0] = UInt64GetDatum(idx); + values[ARR_1] = UInt64GetDatum(entry_ptr->endLSN); + values[ARR_2] = Int32GetDatum(entry_ptr->LRC); + values[ARR_3] = UInt32GetDatum(entry_ptr->status); + tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); + } + + tuplestore_donestoring(tupstore); +} + +static void ReadWalInsertStatusTableByTldx(int64 idx, TupleDesc *tupleDesc, Tuplestorestate *tupstore) +{ + Assert(tupleDesc != NULL); + Assert(tupstore != NULL); + + volatile WALInsertStatusEntry *entry_ptr = NULL; + entry_ptr = &g_instance.wal_cxt.walInsertStatusTable[GET_STATUS_ENTRY_INDEX(idx)]; + bool nulls[STAT_XLOG_TBLENTRY_COLS] = {false}; + Datum values[STAT_XLOG_TBLENTRY_COLS]; + values[ARR_0] = UInt64GetDatum(idx); + values[ARR_1] = UInt64GetDatum(entry_ptr->endLSN); + values[ARR_2] = Int32GetDatum(entry_ptr->LRC); + values[ARR_3] = UInt32GetDatum(entry_ptr->status); + tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); + + tuplestore_donestoring(tupstore); +} + +Datum gs_stat_wal_entrytable(PG_FUNCTION_ARGS) +{ + int64 idx = PG_GETARG_INT64(0); // -1: all walInsertStatus; n:walInsertStatusTable[n] + + int64 walInsertStatusEntryCount = + GET_WAL_INSERT_STATUS_ENTRY_CNT(g_instance.attr.attr_storage.wal_insert_status_entries_power); + ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; + TupleDesc tupDesc; + Tuplestorestate *tupstore = NULL; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + + if (idx >= walInsertStatusEntryCount || idx < -1) { + elog(ERROR, "The idx out of range."); + PG_RETURN_VOID(); + } + + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + PG_RETURN_VOID(); + } + if (!(rsinfo->allowedModes & SFRM_Materialize)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + PG_RETURN_VOID(); + } + + if (get_call_result_type(fcinfo, NULL, &tupDesc) != TYPEFUNC_COMPOSITE) { + elog(ERROR, "return type must be a row type"); + PG_RETURN_VOID(); + } + + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupDesc; + MemoryContextSwitchTo(oldcontext); + + if (idx == -1) { + ReadAllWalInsertStatusTable(walInsertStatusEntryCount, &tupDesc, tupstore); + } else { + ReadWalInsertStatusTableByTldx(idx, &tupDesc, tupstore); + } + + PG_RETURN_VOID(); +} + +static void ReadFlushLocation(TupleDesc *tupleDesc, Tuplestorestate *tupstore) +{ + Assert(tupleDesc != NULL); + Assert(tupstore != NULL); + + bool nulls[STAT_XLOG_FLUSH_LOCATION] = {false}; + Datum values[STAT_XLOG_FLUSH_LOCATION]; + + volatile XLogCtlInsert *Insert = &t_thrd.shemem_ptr_cxt.XLogCtl->Insert; + volatile XLogwrtRqst *LogwrtRqst = &t_thrd.shemem_ptr_cxt.XLogCtl->LogwrtRqst; + volatile XLogwrtResult *LogwrtResult = &t_thrd.shemem_ptr_cxt.XLogCtl->LogwrtResult; + + values[ARR_0] = Int32GetDatum(g_instance.wal_cxt.lastWalStatusEntryFlushed); + values[ARR_1] = Int32GetDatum(g_instance.wal_cxt.lastLRCScanned); + values[ARR_2] = Int32GetDatum(Insert->CurrLRC); + values[ARR_3] = UInt64GetDatum(Insert->CurrBytePos); + values[ARR_4] = UInt32GetDatum(Insert->PrevByteSize); + values[ARR_5] = UInt64GetDatum(g_instance.wal_cxt.flushResult); + values[ARR_6] = UInt64GetDatum(g_instance.wal_cxt.sentResult); + values[ARR_7] = UInt64GetDatum(LogwrtRqst->Write); + values[ARR_8] = UInt64GetDatum(LogwrtRqst->Flush); + values[ARR_9] = UInt64GetDatum(LogwrtResult->Write); + values[ARR_10] = UInt64GetDatum(LogwrtResult->Flush); + values[ARR_11] = TimestampTzGetDatum(GetCurrentTimestamp()); + + tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); + tuplestore_donestoring(tupstore); +} + + +static void GetWalwriterFlushStat(TupleDesc *tupleDesc, Tuplestorestate *tupstore) +{ + Assert(tupleDesc != NULL); + Assert(tupstore != NULL); + + bool nulls[STAT_XLOG_FLUSH_STAT] = {false}; + Datum values[STAT_XLOG_FLUSH_STAT]; + + if (g_instance.wal_cxt.xlogFlushStats->writeTimes != 0) { + g_instance.wal_cxt.xlogFlushStats->avgActualWriteBytes = + g_instance.wal_cxt.xlogFlushStats->totalActualXlogSyncBytes / g_instance.wal_cxt.xlogFlushStats->writeTimes; + g_instance.wal_cxt.xlogFlushStats->avgWriteTime = + g_instance.wal_cxt.xlogFlushStats->totalWriteTime / g_instance.wal_cxt.xlogFlushStats->writeTimes; + g_instance.wal_cxt.xlogFlushStats->avgWriteBytes = + g_instance.wal_cxt.xlogFlushStats->totalXlogSyncBytes / g_instance.wal_cxt.xlogFlushStats->writeTimes; + } + + if (g_instance.wal_cxt.xlogFlushStats->syncTimes != 0) { + g_instance.wal_cxt.xlogFlushStats->avgSyncTime = + g_instance.wal_cxt.xlogFlushStats->totalSyncTime / g_instance.wal_cxt.xlogFlushStats->syncTimes; + g_instance.wal_cxt.xlogFlushStats->avgSyncBytes = + g_instance.wal_cxt.xlogFlushStats->totalXlogSyncBytes / g_instance.wal_cxt.xlogFlushStats->syncTimes; + g_instance.wal_cxt.xlogFlushStats->avgActualSyncBytes = + g_instance.wal_cxt.xlogFlushStats->totalActualXlogSyncBytes / g_instance.wal_cxt.xlogFlushStats->syncTimes; + } + + values[ARR_0] = UInt64GetDatum(g_instance.wal_cxt.xlogFlushStats->writeTimes); + values[ARR_1] = UInt64GetDatum(g_instance.wal_cxt.xlogFlushStats->syncTimes); + values[ARR_2] = UInt64GetDatum(g_instance.wal_cxt.xlogFlushStats->totalXlogSyncBytes); + values[ARR_3] = UInt64GetDatum(g_instance.wal_cxt.xlogFlushStats->totalActualXlogSyncBytes); + values[ARR_4] = UInt32GetDatum(g_instance.wal_cxt.xlogFlushStats->avgWriteBytes); + values[ARR_5] = UInt32GetDatum(g_instance.wal_cxt.xlogFlushStats->avgActualWriteBytes); + values[ARR_6] = UInt32GetDatum(g_instance.wal_cxt.xlogFlushStats->avgSyncBytes); + values[ARR_7] = UInt32GetDatum(g_instance.wal_cxt.xlogFlushStats->avgActualSyncBytes); + values[ARR_8] = UInt64GetDatum(g_instance.wal_cxt.xlogFlushStats->totalWriteTime); + values[ARR_9] = UInt64GetDatum(g_instance.wal_cxt.xlogFlushStats->totalSyncTime); + values[ARR_10] = UInt64GetDatum(g_instance.wal_cxt.xlogFlushStats->avgWriteTime); + values[ARR_11] = UInt64GetDatum(g_instance.wal_cxt.xlogFlushStats->avgSyncTime); + values[ARR_12] = UInt64GetDatum(GetNewestXLOGSegNo(t_thrd.proc_cxt.DataDir)); + values[ARR_13] = UInt64GetDatum(g_instance.wal_cxt.xlogFlushStats->currOpenXlogSegNo); + values[ARR_14] = TimestampTzGetDatum(g_instance.wal_cxt.xlogFlushStats->lastRestTime); + values[ARR_15] = TimestampTzGetDatum(GetCurrentTimestamp()); + tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); + tuplestore_donestoring(tupstore); +} + + +static void ClearWalwriterFlushStat(TupleDesc *tupleDesc, Tuplestorestate *tupstore) +{ + Assert(tupleDesc != NULL); + Assert(tupstore != NULL); + + g_instance.wal_cxt.xlogFlushStats->writeTimes = 0; + g_instance.wal_cxt.xlogFlushStats->syncTimes = 0; + g_instance.wal_cxt.xlogFlushStats->totalXlogSyncBytes = 0; + g_instance.wal_cxt.xlogFlushStats->totalActualXlogSyncBytes = 0; + g_instance.wal_cxt.xlogFlushStats->avgWriteBytes = 0; + g_instance.wal_cxt.xlogFlushStats->avgActualWriteBytes = 0; + g_instance.wal_cxt.xlogFlushStats->avgSyncBytes = 0; + g_instance.wal_cxt.xlogFlushStats->avgActualSyncBytes = 0; + g_instance.wal_cxt.xlogFlushStats->totalWriteTime = 0; + g_instance.wal_cxt.xlogFlushStats->totalSyncTime = 0; + g_instance.wal_cxt.xlogFlushStats->avgWriteTime = 0; + g_instance.wal_cxt.xlogFlushStats->avgSyncTime = 0; + g_instance.wal_cxt.xlogFlushStats->currOpenXlogSegNo = 0; + g_instance.wal_cxt.xlogFlushStats->lastRestTime = GetCurrentTimestamp(); + + GetWalwriterFlushStat(tupleDesc, tupstore); +} + +Datum gs_walwriter_flush_position(PG_FUNCTION_ARGS) +{ + ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; + TupleDesc tupDesc; + Tuplestorestate *tupstore = NULL; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + PG_RETURN_VOID(); + } + if (!(rsinfo->allowedModes & SFRM_Materialize)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + PG_RETURN_VOID(); + } + + if (get_call_result_type(fcinfo, NULL, &tupDesc) != TYPEFUNC_COMPOSITE) { + elog(ERROR, "return type must be a row type"); + PG_RETURN_VOID(); + } + + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupDesc; + MemoryContextSwitchTo(oldcontext); + + ReadFlushLocation(&tupDesc, tupstore); + + PG_RETURN_VOID(); +} + +Datum gs_walwriter_flush_stat(PG_FUNCTION_ARGS) +{ + int operation = PG_GETARG_INT32(0); + + ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; + TupleDesc tupDesc; + Tuplestorestate *tupstore = NULL; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + PG_RETURN_VOID(); + } + if (!(rsinfo->allowedModes & SFRM_Materialize)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + PG_RETURN_VOID(); + } + + if (get_call_result_type(fcinfo, NULL, &tupDesc) != TYPEFUNC_COMPOSITE) { + elog(ERROR, "return type must be a row type"); + PG_RETURN_VOID(); + } + + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupDesc; + MemoryContextSwitchTo(oldcontext); + + if (operation == STAT_XLOG_FLUSH_STAT_OFF) { + g_instance.wal_cxt.xlogFlushStats->statSwitch = false; + elog(INFO, "The xlogFlushStats switch is turned off."); + GetWalwriterFlushStat(&tupDesc, tupstore); + } else if (operation == STAT_XLOG_FLUSH_STAT_ON) { + g_instance.wal_cxt.xlogFlushStats->statSwitch = true; + elog(INFO, "The xlogFlushStats switch is turned on."); + GetWalwriterFlushStat(&tupDesc, tupstore); + } else if (operation == STAT_XLOG_FLUSH_STAT_GET) { + GetWalwriterFlushStat(&tupDesc, tupstore); + } else if (operation == STAT_XLOG_FLUSH_STAT_CLEAR) { + ClearWalwriterFlushStat(&tupDesc, tupstore); + } else { + elog(ERROR, "Parameter \"operation\" out of range."); + } + + PG_RETURN_VOID(); +} \ No newline at end of file diff --git a/src/common/backend/utils/adt/regexp.cpp b/src/common/backend/utils/adt/regexp.cpp index 83940e9f7..f1825a567 100644 --- a/src/common/backend/utils/adt/regexp.cpp +++ b/src/common/backend/utils/adt/regexp.cpp @@ -5,6 +5,7 @@ * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2021, openGauss Contributors * * * IDENTIFICATION @@ -39,6 +40,11 @@ #define PG_GETARG_TEXT_PP_IF_EXISTS(_n) ((PG_NARGS() > (_n)) ? PG_GETARG_TEXT_PP(_n) : NULL) +#define REGEX_COMPAT_MODE \ + ((u_sess->attr.attr_sql.sql_compatibility == A_FORMAT || \ + u_sess->attr.attr_sql.sql_compatibility == B_FORMAT) && \ + AFORMAT_REGEX_MATCH) + /* all the options of interest for regex functions */ typedef struct pg_re_flags { int cflags; /* compile flags for Spencer's regex code */ @@ -84,8 +90,9 @@ typedef struct regexp_matches_ctx { */ /* Local functions */ -static regexp_matches_ctx* setup_regexp_matches(text* orig_str, text* pattern, text* flags, Oid collation, - bool force_glob, bool use_subpatterns, bool ignore_degenerate); +static regexp_matches_ctx* setup_regexp_matches(text* orig_str, text* pattern, + pg_re_flags *re_flags, Oid collation, + bool use_subpatterns, bool ignore_degenerate, int start_search); static void cleanup_regexp_matches(regexp_matches_ctx* matchctx); static ArrayType* build_regexp_matches_result(regexp_matches_ctx* matchctx); static Datum build_regexp_split_result(regexp_matches_ctx* splitctx); @@ -299,6 +306,17 @@ static bool RE_compile_and_execute( return RE_execute(re, dat, dat_len, nmatch, pmatch); } +static void parse_re_set_n_flag(pg_re_flags* flags) +{ + if (REGEX_COMPAT_MODE) { + /* \n doesn't match . or [^ ] */ + flags->cflags &= ~REG_NLSTOP; + } else { + /* \n affects ^ $ . [^ */ + flags->cflags |= REG_NEWLINE; + } +} + /* * parse_re_flags - parse the options argument of regexp_matches and friends * @@ -314,6 +332,11 @@ static void parse_re_flags(pg_re_flags* flags, text* opts) flags->cflags = REG_ADVANCED; flags->glob = false; + if (REGEX_COMPAT_MODE) { + /* \n doesn't match . or [^ ] by default for compatible */ + flags->cflags |= REG_NLSTOP; + } + if (opts != NULL) { char* opt_p = VARDATA_ANY(opts); int opt_len = VARSIZE_ANY_EXHDR(opts); @@ -338,9 +361,11 @@ static void parse_re_flags(pg_re_flags* flags, text* opts) flags->cflags |= REG_ICASE; break; case 'm': /* Perloid synonym for n */ - case 'n': /* \n affects ^ $ . [^ */ flags->cflags |= REG_NEWLINE; break; + case 'n': + parse_re_set_n_flag(flags); + break; case 'p': /* ~Perl, \n affects . [^ */ flags->cflags |= REG_NLSTOP; flags->cflags &= ~REG_NLANCH; @@ -504,6 +529,108 @@ Datum textregexsubstr(PG_FUNCTION_ARGS) CHECK_RETNULL_RETURN_DATUM(result); } +static void regexp_check_args(FunctionCallInfo fcinfo, int* arg, int init_val, int n, bool* has_null) +{ + if (PG_NARGS() > n) { + if (PG_ARGISNULL(n)) + *has_null = true; + else + *arg = PG_GETARG_INT32(n); + } + + if (*arg < init_val) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("argument '%d' is out of range", *arg), + errhint("should start from %d", init_val))); + } +} + +static void regexp_get_re_flags(FunctionCallInfo fcinfo, pg_re_flags* re_flags, int n) +{ + text* flags = NULL; + + /* match params */ + if (PG_NARGS() > n && !PG_ARGISNULL(n)) + flags = PG_GETARG_TEXT_PP(n); + + parse_re_flags(re_flags, flags); + if (re_flags->glob) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid regexp option: \"%c\"", 'g'))); + } +} +/* + * regexp_replace cluster's common function. + * regexp_replace(source, pattern [, replace_str [, position [, occurrence [, match_param]]]]) + */ +Datum regexp_replace(PG_FUNCTION_ARGS) +{ + text* src = NULL; + text* pattern = NULL; + text* r = NULL; + int position = 1; + int occurrence = 0; + regex_t* re = NULL; + text* result = NULL; + pg_re_flags re_flags; + bool has_null = false; + + /* source string */ + if (!PG_ARGISNULL(ARG_0)) + src = PG_GETARG_TEXT_PP(ARG_0); + + /* pattern string */ + if (!PG_ARGISNULL(1)) + pattern = PG_GETARG_TEXT_PP(1); + + /* replace string */ + if (PG_NARGS() > ARG_2) { + if (!PG_ARGISNULL(ARG_2)) + r = PG_GETARG_TEXT_PP(ARG_2); + } + + regexp_check_args(fcinfo, &position, 1, ARG_3, &has_null); + regexp_check_args(fcinfo, &occurrence, 0, ARG_4, &has_null); + regexp_get_re_flags(fcinfo, &re_flags, ARG_5); + + if (src == NULL || has_null) + PG_RETURN_NULL(); + + if (pattern == NULL) + PG_RETURN_TEXT_P(src); + + re = RE_compile_and_cache(pattern, re_flags.cflags, PG_GET_COLLATION()); + result = replace_text_regexp(src, (void*)re, r, position, occurrence); + + if (VARHDRSZ == VARSIZE(result) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) + PG_RETURN_NULL(); + else + PG_RETURN_TEXT_P(result); +} + +/* regexp_replace(source, pattern) */ +Datum regexp_replace_noopt(PG_FUNCTION_ARGS) +{ + return regexp_replace(fcinfo); +} + +/* regexp_replace(source, pattern, replace_str, position) */ +Datum regexp_replace_position(PG_FUNCTION_ARGS) +{ + return regexp_replace(fcinfo); +} + +/* regexp_replace(source, pattern, replace_str, position, occurrence) */ +Datum regexp_replace_occur(PG_FUNCTION_ARGS) +{ + return regexp_replace(fcinfo); +} +/* regexp_replace(source, pattern, replace_str, position, occurrence, flags) */ +Datum regexp_replace_matchopt(PG_FUNCTION_ARGS) +{ + return regexp_replace(fcinfo); +} + /* * textregexreplace_noopt() * Return a string matched by a regular expression, with replacement. @@ -518,6 +645,8 @@ Datum textregexreplace_noopt(PG_FUNCTION_ARGS) text* r = NULL; regex_t* re = NULL; text* result = NULL; + int occurrence = 1; + int cflags = REG_ADVANCED; if (PG_ARGISNULL(0)) PG_RETURN_NULL(); @@ -531,9 +660,17 @@ Datum textregexreplace_noopt(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(2)) r = PG_GETARG_TEXT_PP(2); - re = RE_compile_and_cache(p, REG_ADVANCED, PG_GET_COLLATION()); + if (REGEX_COMPAT_MODE) + cflags |= REG_NLSTOP; - result = replace_text_regexp(s, (void*)re, r, false); + re = RE_compile_and_cache(p, cflags, PG_GET_COLLATION()); + + if (REGEX_COMPAT_MODE) { + /* replace all the occurrence matched in O/M compatible mode */ + occurrence = 0; + } + + result = replace_text_regexp(s, (void*)re, r, 1, occurrence); if (VARHDRSZ == VARSIZE(result) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) PG_RETURN_NULL(); @@ -554,31 +691,40 @@ Datum textregexreplace(PG_FUNCTION_ARGS) regex_t* re = NULL; pg_re_flags flags; text* result = NULL; + int occurrence = 1; - if (PG_ARGISNULL(0)) + if (PG_ARGISNULL(ARG_0)) PG_RETURN_NULL(); - s = PG_GETARG_TEXT_PP(0); + s = PG_GETARG_TEXT_PP(ARG_0); - if (PG_ARGISNULL(1)) + if (PG_ARGISNULL(ARG_1)) PG_RETURN_TEXT_P(s); - p = PG_GETARG_TEXT_PP(1); + p = PG_GETARG_TEXT_PP(ARG_1); - if (!PG_ARGISNULL(2)) - r = PG_GETARG_TEXT_PP(2); + if (!PG_ARGISNULL(ARG_2)) + r = PG_GETARG_TEXT_PP(ARG_2); - if (!PG_ARGISNULL(3)) { - opt = PG_GETARG_TEXT_PP(3); + if (!PG_ARGISNULL(ARG_3)) { + opt = PG_GETARG_TEXT_PP(ARG_3); parse_re_flags(&flags, opt); } else { + if (REGEX_COMPAT_MODE) { + /* return null in O/M compatible mode */ + PG_RETURN_NULL(); + } flags.glob = false; flags.cflags = REG_ADVANCED; } + if (flags.glob) { + occurrence = 0; + } + re = RE_compile_and_cache(p, flags.cflags, PG_GET_COLLATION()); - result = replace_text_regexp(s, (void*)re, r, flags.glob); + result = replace_text_regexp(s, (void*)re, r, 1, occurrence); if (VARHDRSZ == VARSIZE(result) && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) PG_RETURN_NULL(); @@ -707,6 +853,184 @@ Datum similar_escape(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(result); } +/* function prototype: + * regexp_count(src , pattern [, position [, flags ]) + */ +Datum regexp_count(PG_FUNCTION_ARGS) +{ + text* src = NULL; + text* pattern = NULL; + int position = 1; + int count = 0; + pg_re_flags re_flags; + bool has_null = false; + + /* source string */ + if (!PG_ARGISNULL(ARG_0)) + src = PG_GETARG_TEXT_P_COPY(ARG_0); + + /* pattern string */ + if (!PG_ARGISNULL(ARG_1)) + pattern = PG_GETARG_TEXT_PP(ARG_1); + + regexp_check_args(fcinfo, &position, 1, ARG_2, &has_null); + regexp_get_re_flags(fcinfo, &re_flags, ARG_3); + + if (src == NULL || pattern == NULL || has_null) { + PG_RETURN_NULL(); + } + + re_flags.glob = true; + regexp_matches_ctx* matchctx = setup_regexp_matches(src, pattern, &re_flags, + PG_GET_COLLATION(), false, false, position - 1); + count = matchctx->nmatches; + + /* release space to avoid intraquery memory leak */ + cleanup_regexp_matches(matchctx); + PG_RETURN_INT32(count); +} + +Datum regexp_count_noopt(PG_FUNCTION_ARGS) +{ + return regexp_count(fcinfo); +} + +Datum regexp_count_position(PG_FUNCTION_ARGS) +{ + return regexp_count(fcinfo); +} + +Datum regexp_count_matchopt(PG_FUNCTION_ARGS) +{ + return regexp_count(fcinfo); +} + +Datum regexp_instr_core(text* src, text* pattern, int position, + int occurrence, int return_opt, + pg_re_flags* re_flags, Oid collation) +{ + int start = 0; + int end = 0; + int index = 0; + int start_search = position - 1; + + /* convert string to pg_wchar form for matching */ + int len = VARSIZE_ANY_EXHDR(src); + + pg_wchar* wide_str = (pg_wchar*)palloc(sizeof(pg_wchar) * (len + 1)); + int wide_len = pg_mb2wchar_with_len(VARDATA_ANY(src), wide_str, len); + if (position > wide_len) { + PG_RETURN_INT32(0); + } + /* set up the compiled pattern */ + regex_t* cpattern = RE_compile_and_cache(pattern, re_flags->cflags, collation); + + /* temporary output space for RE package */ + regmatch_t* pmatch = (regmatch_t*)palloc(sizeof(regmatch_t)); + + /* search for the pattern, perhaps repeatedly */ + while (index < occurrence && RE_wchar_execute(cpattern, wide_str, wide_len, + start_search, 1, pmatch)) { + start = pmatch[0].rm_so; + end = pmatch[0].rm_eo; + + /* + * Advance search position. Normally we start the next search at the + * end of the previous match; but if the match was of zero length, we + * have to advance by one character, or we'd just find the same match + * again. + */ + start_search = end; + index++; + if (pmatch[0].rm_so == pmatch[0].rm_eo) + start_search++; + if (start_search > wide_len) + break; + } + + /* Clean up temp storage */ + pfree_ext(wide_str); + pfree_ext(pmatch); + + /* return 0 if we do not find the specified occurrence */ + if (index < occurrence) { + PG_RETURN_INT32(0); + } + /* or we return the matched occurrence start or end index (start from 1) + * counting from the beginning of the origin string + */ + if (return_opt == 0) { + PG_RETURN_INT32(start + 1); + } else { + PG_RETURN_INT32(end + 1); + } +} + +/* function prototype: + * regexp_instr(src, + pattern + [, position + [, occurrence + [, return_opt + [, match_opt]]]]) + */ +Datum regexp_instr(PG_FUNCTION_ARGS) +{ + text* src = NULL; + text* pattern = NULL; + int position = 1; + int occurrence = 1; + int return_opt = 0; + pg_re_flags re_flags; + bool has_null = false; + + if (!PG_ARGISNULL(ARG_0)) + src = PG_GETARG_TEXT_PP(ARG_0); + + if (!PG_ARGISNULL(ARG_1)) + pattern = PG_GETARG_TEXT_PP(ARG_1); + + regexp_check_args(fcinfo, &position, 1, ARG_2, &has_null); + regexp_check_args(fcinfo, &occurrence, 1, ARG_3, &has_null); + /* return option: + * 0: returns the position of the first character of the occurrence. (default) + * non-0: returns the position of the character following the occurrence. + */ + regexp_check_args(fcinfo, &return_opt, 0, ARG_4, &has_null); + regexp_get_re_flags(fcinfo, &re_flags, ARG_5); + + if (pattern == NULL || src == NULL || VARSIZE_ANY_EXHDR(src) == 0 || has_null) + PG_RETURN_NULL(); + + return regexp_instr_core(src, pattern, position, occurrence, return_opt, + &re_flags, PG_GET_COLLATION()); +} + +Datum regexp_instr_noopt(PG_FUNCTION_ARGS) +{ + return regexp_instr(fcinfo); +} + +Datum regexp_instr_position(PG_FUNCTION_ARGS) +{ + return regexp_instr(fcinfo); +} + +Datum regexp_instr_occurren(PG_FUNCTION_ARGS) +{ + return regexp_instr(fcinfo); +} + +Datum regexp_instr_returnopt(PG_FUNCTION_ARGS) +{ + return regexp_instr(fcinfo); +} + +Datum regexp_instr_matchopt(PG_FUNCTION_ARGS) +{ + return regexp_instr(fcinfo); +} + /* * regexp_matches() * Return a table of matches of a pattern within a string. @@ -719,14 +1043,19 @@ Datum regexp_matches(PG_FUNCTION_ARGS) if (SRF_IS_FIRSTCALL()) { text* pattern = PG_GETARG_TEXT_PP(1); text* flags = PG_GETARG_TEXT_PP_IF_EXISTS(2); + pg_re_flags re_flags; MemoryContext oldcontext; funcctx = SRF_FIRSTCALL_INIT(); oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + /* Determine options */ + parse_re_flags(&re_flags, flags); + /* be sure to copy the input string into the multi-call ctx */ matchctx = - setup_regexp_matches(PG_GETARG_TEXT_P_COPY(0), pattern, flags, PG_GET_COLLATION(), false, true, false); + setup_regexp_matches(PG_GETARG_TEXT_P_COPY(0), pattern, &re_flags, + PG_GET_COLLATION(), true, false, 0); /* Pre-create workspace that build_regexp_matches_result needs */ matchctx->elems = (Datum*)palloc(sizeof(Datum) * matchctx->npatterns); @@ -771,21 +1100,23 @@ Datum regexp_matches_no_flags(PG_FUNCTION_ARGS) * but it seems clearer to distinguish the functionality this way than to * key it all off one "is_split" flag. */ -static regexp_matches_ctx* setup_regexp_matches(text* orig_str, text* pattern, text* flags, Oid collation, - bool force_glob, bool use_subpatterns, bool ignore_degenerate) +static regexp_matches_ctx* setup_regexp_matches(text* orig_str, text* pattern, + pg_re_flags *re_flags, + Oid collation, + bool use_subpatterns, + bool ignore_degenerate, + int start_search) { regexp_matches_ctx* matchctx = (regexp_matches_ctx*)palloc0(sizeof(regexp_matches_ctx)); int orig_len; pg_wchar* wide_str = NULL; int wide_len; - pg_re_flags re_flags; regex_t* cpattern = NULL; regmatch_t* pmatch = NULL; int pmatch_len; int array_len; int array_idx; int prev_match_end; - int start_search; /* save original string --- we'll extract result substrings from it */ matchctx->orig_str = orig_str; @@ -795,19 +1126,8 @@ static regexp_matches_ctx* setup_regexp_matches(text* orig_str, text* pattern, t wide_str = (pg_wchar*)palloc(sizeof(pg_wchar) * (orig_len + 1)); wide_len = pg_mb2wchar_with_len(VARDATA_ANY(orig_str), wide_str, orig_len); - /* determine options */ - parse_re_flags(&re_flags, flags); - if (force_glob) { - /* user mustn't specify 'g' for regexp_split */ - if (re_flags.glob) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("regexp_split does not support the global option"))); - /* but we find all the matches anyway */ - re_flags.glob = true; - } - /* set up the compiled pattern */ - cpattern = RE_compile_and_cache(pattern, re_flags.cflags, collation); + cpattern = RE_compile_and_cache(pattern, re_flags->cflags, collation); /* do we want to remember subpatterns? */ if (use_subpatterns && cpattern->re_nsub > 0) { @@ -823,13 +1143,12 @@ static regexp_matches_ctx* setup_regexp_matches(text* orig_str, text* pattern, t pmatch = (regmatch_t*)palloc(sizeof(regmatch_t) * pmatch_len); /* the real output space (grown dynamically if needed) */ - array_len = re_flags.glob ? 256 : 32; + array_len = re_flags->glob ? 256 : 32; matchctx->match_locs = (int*)palloc(sizeof(int) * array_len); array_idx = 0; /* search for the pattern, perhaps repeatedly */ prev_match_end = 0; - start_search = 0; while (RE_wchar_execute(cpattern, wide_str, wide_len, start_search, pmatch_len, pmatch)) { /* * If requested, ignore degenerate matches, which are zero-length @@ -860,7 +1179,7 @@ static regexp_matches_ctx* setup_regexp_matches(text* orig_str, text* pattern, t prev_match_end = pmatch[0].rm_eo; /* if not glob, stop after one match */ - if (!re_flags.glob) + if (!re_flags->glob) break; /* @@ -935,7 +1254,7 @@ static ArrayType* build_regexp_matches_result(regexp_matches_ctx* matchctx) /* return value datatype must be text */ #define RESET_NULL_FLAG(_result) \ do { \ - if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !RETURN_NS) { \ + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && !RETURN_NS) { \ if ((_result) == ((Datum)0)) { \ fcinfo->isnull = true; \ } else { \ @@ -962,14 +1281,26 @@ Datum regexp_split_to_table(PG_FUNCTION_ARGS) if (SRF_IS_FIRSTCALL()) { text* pattern = PG_GETARG_TEXT_PP(1); text* flags = PG_GETARG_TEXT_PP_IF_EXISTS(2); + pg_re_flags re_flags; MemoryContext oldcontext; funcctx = SRF_FIRSTCALL_INIT(); oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + /* Determine options */ + parse_re_flags(&re_flags, flags); + /* User mustn't specify 'g' */ + if (re_flags.glob) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("regexp_split does not support the global option"))); + } + /* But we find all the matches anyway */ + re_flags.glob = true; + /* be sure to copy the input string into the multi-call ctx */ - splitctx = - setup_regexp_matches(PG_GETARG_TEXT_P_COPY(0), pattern, flags, PG_GET_COLLATION(), true, false, true); + splitctx = setup_regexp_matches(PG_GETARG_TEXT_P_COPY(0), pattern, &re_flags, + PG_GET_COLLATION(), false, true, 0); MemoryContextSwitchTo(oldcontext); funcctx->user_fctx = (void*)splitctx; @@ -1007,14 +1338,27 @@ Datum regexp_split_to_array(PG_FUNCTION_ARGS) { ArrayBuildState* astate = NULL; regexp_matches_ctx* splitctx = NULL; + text* flags = PG_GETARG_TEXT_PP_IF_EXISTS(2); + pg_re_flags re_flags; + + /* Determine options */ + parse_re_flags(&re_flags, flags); + /* User mustn't specify 'g' */ + if (re_flags.glob) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("regexp_split does not support the global option"))); + } + /* But we find all the matches anyway */ + re_flags.glob = true; splitctx = setup_regexp_matches(PG_GETARG_TEXT_PP(0), PG_GETARG_TEXT_PP(1), - PG_GETARG_TEXT_PP_IF_EXISTS(2), + &re_flags, PG_GET_COLLATION(), - true, false, - true); + true, + 0); while (splitctx->next_match <= splitctx->nmatches) { astate = accumArrayResult(astate, build_regexp_split_result(splitctx), false, TEXTOID, CurrentMemoryContext); @@ -1034,14 +1378,19 @@ Datum regexp_match_to_array(PG_FUNCTION_ARGS) { regexp_matches_ctx* splitctx = NULL; ArrayType* rs = NULL; + text* flags = PG_GETARG_TEXT_PP_IF_EXISTS(2); + pg_re_flags re_flags; + + /* Determine options */ + parse_re_flags(&re_flags, flags); splitctx = setup_regexp_matches(PG_GETARG_TEXT_PP(0), PG_GETARG_TEXT_PP(1), - PG_GETARG_TEXT_PP_IF_EXISTS(2), + &re_flags, PG_GET_COLLATION(), - false, true, - false); + false, + 0); if (splitctx->nmatches > 0) { splitctx->elems = (Datum*)palloc(sizeof(Datum) * splitctx->npatterns); @@ -1156,17 +1505,36 @@ char* regexp_fixed_prefix(text* text_re, bool case_insensitive, Oid collation, b return result; } + +static void regexp_get_match_position(regmatch_t *pmatch, size_t nsize, regex_t* re, + int *so, int *eo) +{ + if (u_sess->attr.attr_sql.enforce_a_behavior || re->re_nsub <= 0) { + /* no parenthesized subexpression, use whole match */ + *so = pmatch[0].rm_so; + *eo = pmatch[0].rm_eo; + } else { + /* has parenthesized subexpressions, use the first one */ + *so = pmatch[1].rm_so; + *eo = pmatch[1].rm_eo; + } +} + Datum textregexsubstr_enforce_a(PG_FUNCTION_ARGS) { text* s = PG_GETARG_TEXT_PP(0); text* p = PG_GETARG_TEXT_PP(1); + int cflags = REG_ADVANCED; regex_t* re = NULL; regmatch_t pmatch[2]; int so = 0; int eo = 0; /* Compile RE */ - re = RE_compile_and_cache(p, REG_ADVANCED, PG_GET_COLLATION()); + if (REGEX_COMPAT_MODE) { + cflags |= REG_NLSTOP; + } + re = RE_compile_and_cache(p, cflags, PG_GET_COLLATION()); if (!RE_execute(re, VARDATA_ANY(s), VARSIZE_ANY_EXHDR(s), 2, pmatch)) PG_RETURN_NULL(); /* definitely no match */ @@ -1174,20 +1542,7 @@ Datum textregexsubstr_enforce_a(PG_FUNCTION_ARGS) // for adaptting A db's match rules, enforce_a_behavior must be true, // and use all-subexpression matches default. but the POSIX match rules // reserved for extension. - if (true == u_sess->attr.attr_sql.enforce_a_behavior) { - so = pmatch[0].rm_so; - eo = pmatch[0].rm_eo; - } else { - if (re->re_nsub > 0) { - /* has parenthesized subexpressions, use the first one */ - so = pmatch[1].rm_so; - eo = pmatch[1].rm_eo; - } else { - /* no parenthesized subexpression, use whole match */ - so = pmatch[0].rm_so; - eo = pmatch[0].rm_eo; - } - } + regexp_get_match_position(pmatch, sizeof(pmatch) / sizeof(regmatch_t), re, &so, &eo); /* * It is possible to have a match to the whole pattern but no match @@ -1201,3 +1556,128 @@ Datum textregexsubstr_enforce_a(PG_FUNCTION_ARGS) return DirectFunctionCall3(text_substr, PointerGetDatum(s), Int32GetDatum(so + 1), Int32GetDatum(eo - so)); } + +text* regexp_substr_get_occurrence(text* src, text* pattern, + pg_re_flags* re_flags, + int position, + int occurrence, + Oid collation) +{ + regex_t* re = NULL; + int so = 0; + int eo = 0; + pg_wchar* data = NULL; + int src_len = VARSIZE_ANY_EXHDR(src); + size_t data_len; + int search_start = position - 1; + int count = 0; + regmatch_t pmatch[2]; + text* result = NULL; + + /* Compile RE */ + re = RE_compile_and_cache(pattern, re_flags->cflags, collation); + + /* Convert data string to wide characters. */ + data = (pg_wchar*)palloc((src_len + 1) * sizeof(pg_wchar)); + data_len = pg_mb2wchar_with_len(VARDATA_ANY(src), data, src_len); + + while ((unsigned int)(search_start) <= data_len) { + int regexec_result; + + CHECK_FOR_INTERRUPTS(); + + regexec_result = pg_regexec(re, + data, + data_len, + search_start, + NULL, /* no details */ + sizeof(pmatch) / sizeof(regmatch_t), + pmatch, + 0); + + if (regexec_result == REG_NOMATCH) + break; + + if (regexec_result != REG_OKAY) { + char errMsg[100]; + + pg_regerror(regexec_result, re, errMsg, sizeof(errMsg)); + ereport(ERROR, + (errcode(ERRCODE_INVALID_REGULAR_EXPRESSION), + errmsg("regular expression failed: %s", errMsg))); + } + + count++; + + regexp_get_match_position(pmatch, sizeof(pmatch) / sizeof(regmatch_t), re, &so, &eo); + + if (so < 0 || eo < 0) { + break; + } + + if (count == occurrence) { + result = text_substring(PointerGetDatum(src), + so + 1, + eo - so, + false); + break; + } + + if (so == eo) { + search_start++; + } else { + search_start = eo; + } + } + + pfree_ext(data); + return result; +} + +Datum regexp_substr_core(PG_FUNCTION_ARGS) +{ + text* pattern = NULL; + int position = 1; + int occurrence = 1; + pg_re_flags re_flags; + text* src = NULL; + bool has_null = false; + + if (!PG_ARGISNULL(ARG_0)) + src = PG_GETARG_TEXT_PP(ARG_0); + + if (!PG_ARGISNULL(ARG_1)) + pattern = PG_GETARG_TEXT_PP(ARG_1); + + regexp_check_args(fcinfo, &position, 1, ARG_2, &has_null); + regexp_check_args(fcinfo, &occurrence, 1, ARG_3, &has_null); + regexp_get_re_flags(fcinfo, &re_flags, ARG_4); + + if (pattern == NULL || src == NULL || VARSIZE_ANY_EXHDR(src) == 0 || has_null) + PG_RETURN_NULL(); + + text* ret = regexp_substr_get_occurrence(src, pattern, &re_flags, position, occurrence, + PG_GET_COLLATION()); + + if (ret == NULL || (VARHDRSZ == VARSIZE(ret) && + u_sess->attr.attr_sql.sql_compatibility == A_FORMAT)) { + PG_RETURN_NULL(); + } else { + PG_RETURN_TEXT_P(ret); + } +} + +Datum regexp_substr_with_position(PG_FUNCTION_ARGS) +{ + return regexp_substr_core(fcinfo); +} + +Datum regexp_substr_with_occur(PG_FUNCTION_ARGS) +{ + return regexp_substr_core(fcinfo); +} + +Datum regexp_substr_with_opt(PG_FUNCTION_ARGS) +{ + return regexp_substr_core(fcinfo); +} \ No newline at end of file diff --git a/src/common/backend/utils/adt/ri_triggers.cpp b/src/common/backend/utils/adt/ri_triggers.cpp index 3ca3ce7ad..6bccb2048 100644 --- a/src/common/backend/utils/adt/ri_triggers.cpp +++ b/src/common/backend/utils/adt/ri_triggers.cpp @@ -336,7 +336,7 @@ static Datum RI_FKey_check(PG_FUNCTION_ARGS) quoteRelationName(pkrelname, pk_rel); rc = snprintf_s(querystr, sizeof(querystr), sizeof(querystr) - 1, - IsShareLockForForeignKey(trigdata->tg_relation) ? "SELECT 1 FROM ONLY %s x FOR SHARE OF x" : + IsShareLockForForeignKey(pk_rel) ? "SELECT 1 FROM ONLY %s x FOR SHARE OF x" : "SELECT 1 FROM ONLY %s x FOR KEY SHARE OF x", pkrelname); securec_check_ss(rc, "\0", "\0"); @@ -476,7 +476,7 @@ static Datum RI_FKey_check(PG_FUNCTION_ARGS) queryoids[i] = fk_type; } - appendStringInfo(&querybuf, IsShareLockForForeignKey(trigdata->tg_relation) ? " FOR SHARE OF x" : + appendStringInfo(&querybuf, IsShareLockForForeignKey(pk_rel) ? " FOR SHARE OF x" : " FOR KEY SHARE OF x"); /* Prepare and save the plan */ @@ -798,7 +798,7 @@ Datum RI_FKey_noaction(PG_FUNCTION_ARGS) queryoids[i] = pk_type; } - appendStringInfo(&querybuf, IsShareLockForForeignKey(trigdata->tg_relation) ? + appendStringInfo(&querybuf, IsShareLockForForeignKey(fk_rel) ? " FOR SHARE OF x" : " FOR KEY SHARE OF x"); /* Prepare and save the plan */ @@ -1335,7 +1335,7 @@ Datum RI_FKey_restrict(PG_FUNCTION_ARGS) queryoids[i] = pk_type; } - appendStringInfo(&querybuf, IsShareLockForForeignKey(trigdata->tg_relation) ? + appendStringInfo(&querybuf, IsShareLockForForeignKey(fk_rel) ? " FOR SHARE OF x" : " FOR KEY SHARE OF x"); /* Prepare and save the plan */ diff --git a/src/common/backend/utils/adt/ruleutils.cpp b/src/common/backend/utils/adt/ruleutils.cpp index c5c2531c4..61a5d376d 100644 --- a/src/common/backend/utils/adt/ruleutils.cpp +++ b/src/common/backend/utils/adt/ruleutils.cpp @@ -123,6 +123,8 @@ #define MAXFLOATWIDTH 64 #define MAXDOUBLEWIDTH 128 +#define atooid(x) ((Oid)strtoul((x), NULL, 10)) + /* ---------- * Local data types * ---------- @@ -146,6 +148,7 @@ typedef struct { bool qrw_phase; /* for qrw phase, we support more deparse rule */ bool viewdef; /* just for dump viewdef */ bool is_fqs; /* just for fqs query */ + bool is_upsert_clause; /* just for upsert clause */ } deparse_context; /* @@ -203,6 +206,15 @@ typedef struct tableInfo { char* relname; } tableInfo; +typedef struct SubpartitionInfo { + bool issubpartition; + char subparttype; /* subpartition type, 'r'/'l'/'h' */ + Oid subparentid; + Oid subpartkeytype; /* the typeid of subpartkey */ + AttrNumber attnum; /* the attribute number of subpartkey in the relation */ + bool istypestring; +} SubpartitionInfo; + /* ---------- * Local functions * @@ -220,9 +232,9 @@ static char* pg_get_triggerdef_worker(Oid trigid, bool pretty); static void decompile_column_index_array(Datum column_index_array, Oid relId, StringInfo buf); static char* pg_get_ruledef_worker(Oid ruleoid, int prettyFlags); static char *pg_get_indexdef_worker(Oid indexrelid, int colno, const Oid *excludeOps, bool attrsOnly, bool showTblSpc, - int prettyFlags, bool dumpSchemaOnly = false); + int prettyFlags, bool dumpSchemaOnly = false, bool showSubpartitionLocal = true); static void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, StringInfoData *buf, - bool dumpSchemaOnly); + bool dumpSchemaOnly, bool showSubpartitionLocal); static char* pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, int prettyFlags); static text* pg_get_expr_worker(text* expr, Oid relid, const char* relname, int prettyFlags); static int print_function_arguments(StringInfo buf, HeapTuple proctup, bool print_table_args, bool print_defaults); @@ -290,7 +302,7 @@ static void get_sublink_expr(SubLink* sublink, deparse_context* context); static void get_from_clause(Query* query, const char* prefix, deparse_context* context, List* fromlist = NIL); static void get_from_clause_item(Node* jtnode, Query* query, deparse_context* context); static void get_from_clause_partition(RangeTblEntry* rte, StringInfo buf, deparse_context* context); -static void get_from_clause_subpartition(RangeTblEntry* rte, StringInfo buf); +static void get_from_clause_subpartition(RangeTblEntry* rte, StringInfo buf, deparse_context* context); static void get_from_clause_bucket(RangeTblEntry* rte, StringInfo buf, deparse_context* context); static void get_from_clause_alias(Alias* alias, RangeTblEntry* rte, deparse_context* context); static void get_from_clause_coldeflist( @@ -310,14 +322,15 @@ static char* flatten_reloptions(Oid relid); static Oid SearchSysTable(const char* query); static void replace_cl_types_in_argtypes(Oid func_id, int numargs, Oid* argtypes, bool *is_client_logic); +static void AppendSubPartitionByInfo(StringInfo buf, Oid tableoid, SubpartitionInfo *subpartinfo); +static void AppendSubPartitionDetail(StringInfo buf, tableInfo tableinfo, SubpartitionInfo *subpartinfo); static void AppendRangeIntervalPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tableinfo, int partkeynum, - Oid *iPartboundary); + Oid *iPartboundary, SubpartitionInfo *subpartinfo); static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tableinfo, int partkeynum, - Oid *iPartboundary); + Oid *iPartboundary, SubpartitionInfo *subpartinfo); static void AppendHashPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tableinfo, int partkeynum, - Oid *iPartboundary); + Oid *iPartboundary, SubpartitionInfo *subpartinfo); static void AppendTablespaceInfo(const char *spcname, StringInfo buf, tableInfo tableinfo); -static char *FormatListPartitioninfo(char *pvalue, bool isString); /* from pgxcship */ Var* get_var_from_node(Node* node, bool (*func)(Oid) = func_oid_check_reject); @@ -952,11 +965,26 @@ static void get_table_partitiondef(StringInfo query, StringInfo buf, Oid tableoi SysScanDesc scan = NULL; HeapTuple tuple = NULL; char relkind = RELKIND_RELATION; /* set default */ - char partype = PART_STRATEGY_VALUE; /* set default */ + char partstrategy = PART_STRATEGY_VALUE; /* set default */ + char parttype = PARTTYPE_NON_PARTITIONED_RELATION; /* set default */ int partkeynum = 0; Oid* iPartboundary = NULL; Form_pg_partition partition = NULL; + HeapTuple ctuple = SearchSysCache1(RELOID, ObjectIdGetDatum(tableoid)); + if (!HeapTupleIsValid(ctuple)) { + ereport(ERROR, + (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("cache lookup failed for relid %u", tableoid))); + } + Form_pg_class reltuple = (Form_pg_class)GETSTRUCT(ctuple); + parttype = reltuple->parttype; + ReleaseSysCache(ctuple); + + if (parttype == PARTTYPE_NON_PARTITIONED_RELATION) { + return; + } + relation = heap_open(PartitionRelationId, AccessShareLock); ScanKeyInit(&key[0], Anum_pg_partition_parttype, BTEqualStrategyNumber, F_CHAREQ, CharGetDatum(relkind)); @@ -973,7 +1001,7 @@ static void get_table_partitiondef(StringInfo query, StringInfo buf, Oid tableoi if (tableinfo.relkind == RELKIND_FOREIGN_TABLE || tableinfo.relkind == RELKIND_STREAM) { appendStringInfo(buf, "PARTITION BY ("); } else { - partype = partition->partstrategy; + partstrategy = partition->partstrategy; switch (partition->partstrategy) { case PART_STRATEGY_RANGE: case PART_STRATEGY_INTERVAL: @@ -1012,6 +1040,7 @@ static void get_table_partitiondef(StringInfo query, StringInfo buf, Oid tableoi } firstFlag = false; appendStringInfo(buf, "%s", quote_identifier(attname)); + pfree_ext(attname); } } appendStringInfo(buf, ")"); @@ -1019,7 +1048,7 @@ static void get_table_partitiondef(StringInfo query, StringInfo buf, Oid tableoi systable_endscan(scan); heap_close(relation, AccessShareLock); - if (partype == PART_STRATEGY_INTERVAL) { + if (partstrategy == PART_STRATEGY_INTERVAL) { resetStringInfo(query); appendStringInfo(query, "SELECT p.interval[1] AS interval FROM pg_partition p " @@ -1034,33 +1063,200 @@ static void get_table_partitiondef(StringInfo query, StringInfo buf, Oid tableoi appendStringInfo(buf, "\nINTERVAL ('%s')", ivalue); } - if (partype == PART_STRATEGY_RANGE || partype == PART_STRATEGY_INTERVAL) { - AppendRangeIntervalPartitionInfo(buf, tableoid, tableinfo, partkeynum, iPartboundary); - } else if (partype == PART_STRATEGY_LIST) { - AppendListPartitionInfo(buf, tableoid, tableinfo, partkeynum, iPartboundary); - } else if (partype == PART_STRATEGY_HASH) { - AppendHashPartitionInfo(buf, tableoid, tableinfo, partkeynum, iPartboundary); - } else { /* If partype is 'value' or other type, no slice info */ + SubpartitionInfo *subpartinfo = (SubpartitionInfo *)palloc0(sizeof(SubpartitionInfo)); + if (parttype == PARTTYPE_SUBPARTITIONED_RELATION) { + AppendSubPartitionByInfo(buf, tableoid, subpartinfo); + } + + if (partstrategy == PART_STRATEGY_RANGE || partstrategy == PART_STRATEGY_INTERVAL) { + AppendRangeIntervalPartitionInfo(buf, tableoid, tableinfo, partkeynum, iPartboundary, subpartinfo); + } else if (partstrategy == PART_STRATEGY_LIST) { + AppendListPartitionInfo(buf, tableoid, tableinfo, partkeynum, iPartboundary, subpartinfo); + } else if (partstrategy == PART_STRATEGY_HASH) { + AppendHashPartitionInfo(buf, tableoid, tableinfo, partkeynum, iPartboundary, subpartinfo); + } else { /* If partstrategy is 'value' or other type, no slice info */ + pfree_ext(iPartboundary); + pfree_ext(subpartinfo); return; } if (tableinfo.relrowmovement) { appendStringInfo(buf, "\n%s", "ENABLE ROW MOVEMENT"); } + pfree_ext(iPartboundary); + pfree_ext(subpartinfo); +} + +static void AppendSubPartitionByInfo(StringInfo buf, Oid tableoid, SubpartitionInfo *subpartinfo) +{ + Relation partrel = NULL; + ScanKeyData key[2]; + SysScanDesc scan = NULL; + HeapTuple parttuple = NULL; + ScanKeyData subkey[2]; + SysScanDesc subscan = NULL; + HeapTuple subparttuple = NULL; + bool isnull = false; + + partrel = heap_open(PartitionRelationId, AccessShareLock); + ScanKeyInit(&key[0], Anum_pg_partition_parttype, BTEqualStrategyNumber, F_CHAREQ, + CharGetDatum(PARTTYPE_PARTITIONED_RELATION)); + ScanKeyInit(&key[1], Anum_pg_partition_parentid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(tableoid)); + scan = systable_beginscan(partrel, PartitionParentOidIndexId, true, NULL, 2, key); + parttuple = systable_getnext(scan); + + if (!HeapTupleIsValid(parttuple)) { + systable_endscan(scan); + heap_close(partrel, AccessShareLock); + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("could not find partition tuple for subpartition relation %u", tableoid))); + } + + Datum datum = SysCacheGetAttr(PARTRELID, parttuple, Anum_pg_partition_partkey, &isnull); + Assert(!isnull); + int2vector *partVec = (int2vector *)DatumGetPointer(datum); + int partkeynum = partVec->dim1; + if (partkeynum != 1) { + systable_endscan(scan); + heap_close(partrel, AccessShareLock); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("only support one partkey in subpartition table"))); + } + char *attname = get_attname(tableoid, partVec->values[0]); + Oid subparentid = HeapTupleGetOid(parttuple); + + ScanKeyInit(&subkey[0], Anum_pg_partition_parttype, BTEqualStrategyNumber, F_CHAREQ, + CharGetDatum(PARTTYPE_SUBPARTITIONED_RELATION)); + ScanKeyInit(&subkey[1], Anum_pg_partition_parentid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(subparentid)); + subscan = systable_beginscan(partrel, PartitionParentOidIndexId, true, NULL, 2, subkey); + subparttuple = systable_getnext(subscan); + + if (!HeapTupleIsValid(subparttuple)) { + systable_endscan(scan); + systable_endscan(subscan); + heap_close(partrel, AccessShareLock); + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("could not find subpartition tuple for subpartition relation %u", tableoid))); + } + + Form_pg_partition part = (Form_pg_partition)GETSTRUCT(subparttuple); + switch (part->partstrategy) { + case PART_STRATEGY_RANGE: + appendStringInfo(buf, " SUBPARTITION BY RANGE ("); + break; + case PART_STRATEGY_LIST: + /* restructure list partitioned table definition */ + appendStringInfo(buf, " SUBPARTITION BY LIST ("); + break; + case PART_STRATEGY_HASH: + /* restructure hash partitioned table definition */ + appendStringInfo(buf, " SUBPARTITION BY HASH ("); + break; + default: + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("unrecognized subpartition type %c", part->partstrategy))); + } + appendStringInfo(buf, "%s", quote_identifier(attname)); + appendStringInfo(buf, ")"); + pfree_ext(attname); + + subpartinfo->issubpartition = true; + subpartinfo->attnum = partVec->values[0]; + subpartinfo->subparttype = part->partstrategy; + subpartinfo->subpartkeytype = get_atttype(tableoid, subpartinfo->attnum); + subpartinfo->istypestring = isTypeString(subpartinfo->subpartkeytype); + + systable_endscan(scan); + systable_endscan(subscan); + heap_close(partrel, AccessShareLock); +} + +static void AppendSubPartitionDetail(StringInfo buf, tableInfo tableinfo, SubpartitionInfo *subpartinfo) +{ + appendStringInfo(buf, "\n ("); + + StringInfo query = makeStringInfo(); + appendStringInfo(query, + "SELECT /*+ hashjoin(p t) */ p.relname AS partName, " + "array_to_string(p.boundaries, ',') as partbound, " + "array_to_string(p.boundaries, ''',''') as partboundstr, " + "t.spcname AS reltblspc " + "FROM pg_partition p LEFT JOIN pg_tablespace t " + "ON p.reltablespace = t.oid " + "WHERE p.parentid = %u AND p.parttype = '%c' AND p.partstrategy = '%c' " + "ORDER BY p.boundaries[1]::%s ASC", + subpartinfo->subparentid, PART_OBJ_TYPE_TABLE_SUB_PARTITION, subpartinfo->subparttype, + get_typename(subpartinfo->subpartkeytype)); + + (void)SPI_execute(query->data, true, INT_MAX); + int proc = SPI_processed; + SPITupleTable *spitup = SPI_tuptable; + for (int i = 0; i < proc; i++) { + if (i > 0) { + appendStringInfo(buf, ","); + } + HeapTuple spi_tuple = spitup->vals[i]; + TupleDesc spi_tupdesc = spitup->tupdesc; + char *pname = SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partname")); + appendStringInfo(buf, "\n SUBPARTITION %s", quote_identifier(pname)); + + if (subpartinfo->subparttype == PART_STRATEGY_RANGE) { + appendStringInfo(buf, " VALUES LESS THAN ("); + char *pvalue = SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partbound")); + if (pvalue == NULL || strlen(pvalue) == 0) { + appendStringInfo(buf, "MAXVALUE"); + } else if (subpartinfo->istypestring) { + char *svalue = SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partboundstr")); + appendStringInfo(buf, "'%s'", svalue); + pfree_ext(svalue); + } else { + appendStringInfo(buf, "%s", pvalue); + } + appendStringInfo(buf, ")"); + pfree_ext(pvalue); + } else if (subpartinfo->subparttype == PART_STRATEGY_LIST) { + appendStringInfo(buf, " VALUES ("); + char *pvalue = SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partbound")); + if (pvalue == NULL || strlen(pvalue) == 0) { + appendStringInfo(buf, "DEFAULT"); + } else if (subpartinfo->istypestring) { + char *svalue = SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partboundstr")); + appendStringInfo(buf, "'%s'", svalue); + pfree_ext(svalue); + } else { + appendStringInfo(buf, "%s", pvalue); + } + appendStringInfo(buf, ")"); + pfree_ext(pvalue); + } + + /* + * Append partition tablespace. + * Skip it, if partition tablespace is the same as partitioned table. + */ + int fno = SPI_fnumber(spi_tupdesc, "reltblspc"); + const char *spcname = SPI_getvalue(spi_tuple, spi_tupdesc, fno); + AppendTablespaceInfo(spcname, buf, tableinfo); + } + DestroyStringInfo(query); + + appendStringInfo(buf, "\n )"); } static void AppendRangeIntervalPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tableinfo, int partkeynum, - Oid *iPartboundary) + Oid *iPartboundary, SubpartitionInfo *subpartinfo) { appendStringInfo(buf, "\n( "); /* get table partitions info */ StringInfo query = makeStringInfo(); - appendStringInfo(query, "SELECT /*+ hashjoin(p t) */p.relname AS partName, "); + appendStringInfo(query, "SELECT /*+ hashjoin(p t) */p.relname AS partname, "); for (int i = 1; i <= partkeynum; i++) { appendStringInfo(query, "p.boundaries[%d] AS partboundary_%d, ", i, i); } appendStringInfo(query, + "p.oid AS partoid, " "t.spcname AS reltblspc " "FROM pg_partition p LEFT JOIN pg_tablespace t " "ON p.reltablespace = t.oid " @@ -1077,15 +1273,15 @@ static void AppendRangeIntervalPartitionInfo(StringInfo buf, Oid tableoid, table (void)SPI_execute(query->data, true, INT_MAX); int proc = SPI_processed; + SPITupleTable *spitup = SPI_tuptable; for (int i = 0; i < proc; i++) { if (i > 0) { appendStringInfo(buf, ","); } - HeapTuple spi_tuple = SPI_tuptable->vals[i]; - TupleDesc spi_tupdesc = SPI_tuptable->tupdesc; + HeapTuple spi_tuple = spitup->vals[i]; + TupleDesc spi_tupdesc = spitup->tupdesc; char *pname = SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partname")); - appendStringInfo(buf, "\n "); - appendStringInfo(buf, "PARTITION %s VALUES LESS THAN (", quote_identifier(pname)); + appendStringInfo(buf, "\n PARTITION %s VALUES LESS THAN (", quote_identifier(pname)); for (int j = 0; j < partkeynum; j++) { if (j > 0) { @@ -1120,13 +1316,19 @@ static void AppendRangeIntervalPartitionInfo(StringInfo buf, Oid tableoid, table int fno = SPI_fnumber(spi_tupdesc, "reltblspc"); const char *spcname = SPI_getvalue(spi_tuple, spi_tupdesc, fno); AppendTablespaceInfo(spcname, buf, tableinfo); + + if (subpartinfo->issubpartition) { + subpartinfo->subparentid = + atooid(SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partoid"))); + AppendSubPartitionDetail(buf, tableinfo, subpartinfo); + } } DestroyStringInfo(query); appendStringInfo(buf, "\n)"); } static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tableinfo, int partkeynum, - Oid *iPartboundary) + Oid *iPartboundary, SubpartitionInfo *subpartinfo) { appendStringInfo(buf, "\n( "); @@ -1136,34 +1338,41 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl /* get table partitions info */ StringInfo query = makeStringInfo(); appendStringInfo(query, - "SELECT /*+ hashjoin(p t) */p.relname AS partName, " - "p.boundaries AS partboundary, " + "SELECT /*+ hashjoin(p t) */p.relname AS partname, " + "array_to_string(p.boundaries, ',') as partbound, " + "array_to_string(p.boundaries, ''',''') as partboundstr, " + "p.oid AS partoid, " "t.spcname AS reltblspc " "FROM pg_partition p LEFT JOIN pg_tablespace t " "ON p.reltablespace = t.oid " "WHERE p.parentid = %u AND p.parttype = '%c' " - "AND p.partstrategy = '%c' ORDER BY p.relname::text ASC", - tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST); + "AND p.partstrategy = '%c' ORDER BY p.boundaries[1]::%s ASC", + tableoid, PART_OBJ_TYPE_TABLE_PARTITION, PART_STRATEGY_LIST, get_typename(*iPartboundary)); (void)SPI_execute(query->data, true, INT_MAX); int proc = SPI_processed; + SPITupleTable *spitup = SPI_tuptable; for (int i = 0; i < proc; i++) { if (i > 0) { appendStringInfo(buf, ","); } - HeapTuple spi_tuple = SPI_tuptable->vals[i]; - TupleDesc spi_tupdesc = SPI_tuptable->tupdesc; + HeapTuple spi_tuple = spitup->vals[i]; + TupleDesc spi_tupdesc = spitup->tupdesc; char *pname = SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partname")); - appendStringInfo(buf, "\n "); - appendStringInfo(buf, "PARTITION %s VALUES (", quote_identifier(pname)); + appendStringInfo(buf, "\n PARTITION %s VALUES (", quote_identifier(pname)); - char *pvalue = SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partboundary")); - pvalue[strlen(pvalue) - 1] = '\0'; - char *fvalue = FormatListPartitioninfo(pvalue + 1, isTypeString(*iPartboundary)); - appendStringInfo(buf, "%s", fvalue); + char *pvalue = SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partbound")); + if (pvalue == NULL || strlen(pvalue) == 0) { + appendStringInfo(buf, "DEFAULT"); + } else if (isTypeString(*iPartboundary)) { + char *svalue = SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partboundstr")); + appendStringInfo(buf, "'%s'", svalue); + pfree_ext(svalue); + } else { + appendStringInfo(buf, "%s", pvalue); + } appendStringInfo(buf, ")"); pfree_ext(pvalue); - pfree_ext(fvalue); /* * Append partition tablespace. @@ -1172,6 +1381,12 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl int fno = SPI_fnumber(spi_tupdesc, "reltblspc"); const char *spcname = SPI_getvalue(spi_tuple, spi_tupdesc, fno); AppendTablespaceInfo(spcname, buf, tableinfo); + + if (subpartinfo->issubpartition) { + subpartinfo->subparentid = + atooid(SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partoid"))); + AppendSubPartitionDetail(buf, tableinfo, subpartinfo); + } } DestroyStringInfo(query); @@ -1179,7 +1394,7 @@ static void AppendListPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl } static void AppendHashPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tableinfo, int partkeynum, - Oid *iPartboundary) + Oid *iPartboundary, SubpartitionInfo *subpartinfo) { appendStringInfo(buf, "\n( "); @@ -1189,8 +1404,9 @@ static void AppendHashPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl /* get table partitions info */ StringInfo query = makeStringInfo(); appendStringInfo(query, - "SELECT /*+ hashjoin(p t) */p.relname AS partName, " + "SELECT /*+ hashjoin(p t) */p.relname AS partname, " "p.boundaries[1] AS partboundary, " + "p.oid AS partoid, " "t.spcname AS reltblspc " "FROM pg_partition p LEFT JOIN pg_tablespace t " "ON p.reltablespace = t.oid " @@ -1201,15 +1417,15 @@ static void AppendHashPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl (void)SPI_execute(query->data, true, INT_MAX); int proc = SPI_processed; + SPITupleTable *spitup = SPI_tuptable; for (int i = 0; i < proc; i++) { if (i > 0) { appendStringInfo(buf, ","); } - HeapTuple spi_tuple = SPI_tuptable->vals[i]; - TupleDesc spi_tupdesc = SPI_tuptable->tupdesc; + HeapTuple spi_tuple = spitup->vals[i]; + TupleDesc spi_tupdesc = spitup->tupdesc; char *pname = SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partname")); - appendStringInfo(buf, "\n "); - appendStringInfo(buf, "PARTITION %s", quote_identifier(pname)); + appendStringInfo(buf, "\n PARTITION %s", quote_identifier(pname)); /* * Append partition tablespace. @@ -1218,6 +1434,12 @@ static void AppendHashPartitionInfo(StringInfo buf, Oid tableoid, tableInfo tabl int fno = SPI_fnumber(spi_tupdesc, "reltblspc"); const char *spcname = SPI_getvalue(spi_tuple, spi_tupdesc, fno); AppendTablespaceInfo(spcname, buf, tableinfo); + + if (subpartinfo->issubpartition) { + subpartinfo->subparentid = + atooid(SPI_getvalue(spi_tuple, spi_tupdesc, SPI_fnumber(spi_tupdesc, "partoid"))); + AppendSubPartitionDetail(buf, tableinfo, subpartinfo); + } } DestroyStringInfo(query); @@ -1233,32 +1455,6 @@ static void AppendTablespaceInfo(const char *spcname, StringInfo buf, tableInfo } } -static char *FormatListPartitioninfo(char *pvalue, bool isString) -{ - StringInfoData str; - initStringInfo(&str); - - if (isString) { - char *slicevalue = NULL; - char *nextvalue = NULL; - bool firstFlag = true; - - slicevalue = strtok_s(pvalue, ",", &nextvalue); - while (slicevalue != NULL) { - if (!firstFlag) { - appendStringInfo(&str, ", "); - } - firstFlag = false; - appendStringInfo(&str, "'%s'", slicevalue); - slicevalue = strtok_s(NULL, ",", &nextvalue); - } - } else { - appendBinaryStringInfo(&str, pvalue, strlen(pvalue)); - } - - return str.data; -} - /* * @Description: get collation's namespace oid by collation oid. * @in colloid - collation oid. @@ -1340,7 +1536,7 @@ static int get_table_attribute( /* Format properly if not first attr */ actual_atts == 0 ? appendStringInfo(buf, " (") : appendStringInfo(buf, ","); - appendStringInfo(buf, "\n "); + appendStringInfo(buf, "\n "); actual_atts++; /* Attribute name */ @@ -2692,6 +2888,7 @@ static char* pg_get_triggerdef_worker(Oid trigid, bool pretty) context.wrapColumn = WRAP_COLUMN_DEFAULT; context.indentLevel = PRETTYINDENT_STD; context.qrw_phase = false; + context.is_upsert_clause = false; get_rule_expr(qual, &context, false); @@ -2762,7 +2959,8 @@ Datum pg_get_indexdef_for_dump(PG_FUNCTION_ARGS) Oid indexrelid = PG_GETARG_OID(0); bool dumpSchemaOnly = PG_GETARG_BOOL(1); - PG_RETURN_TEXT_P(string_to_text(pg_get_indexdef_worker(indexrelid, 0, NULL, false, true, 0, dumpSchemaOnly))); + PG_RETURN_TEXT_P(string_to_text(pg_get_indexdef_worker(indexrelid, 0, NULL, false, true, 0, dumpSchemaOnly, + false))); } Datum pg_get_indexdef_ext(PG_FUNCTION_ARGS) @@ -2773,7 +2971,8 @@ Datum pg_get_indexdef_ext(PG_FUNCTION_ARGS) int prettyFlags; prettyFlags = pretty ? (PRETTYFLAG_PAREN | PRETTYFLAG_INDENT) : 0; - PG_RETURN_TEXT_P(string_to_text(pg_get_indexdef_worker(indexrelid, colno, NULL, colno != 0, true, prettyFlags))); + PG_RETURN_TEXT_P(string_to_text(pg_get_indexdef_worker(indexrelid, colno, NULL, colno != 0, true, prettyFlags, + false, false))); } /** @@ -2797,7 +2996,7 @@ char* pg_get_indexdef_columns(Oid indexrelid, bool pretty) } static void AppendOnePartitionIndex(Oid indexRelId, Oid partOid, bool showTblSpc, bool *isFirst, - StringInfoData *buf) + StringInfoData *buf, bool isSub = false) { Oid partIdxOid = getPartitionIndexOid(indexRelId, partOid); @@ -2815,6 +3014,10 @@ static void AppendOnePartitionIndex(Oid indexRelId, Oid partOid, bool showTblSpc appendStringInfo(buf, ", "); } + if (isSub) { + appendStringInfo(buf, "\n "); + } + appendStringInfo(buf, "PARTITION %s", quote_identifier(partIdxTuple->relname.data)); if (showTblSpc && OidIsValid(partIdxTuple->reltablespace)) { char *tblspacName = get_tablespace_name(partIdxTuple->reltablespace); @@ -2848,7 +3051,7 @@ static void GetIndexdefForIntervalPartTabDumpSchemaOnly(Oid indexrelid, RangePar } static void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, bool showTblSpc, StringInfoData *buf, - bool dumpSchemaOnly) + bool dumpSchemaOnly, bool showSubpartitionLocal) { Oid relid = idxrec->indrelid; /* @@ -2863,8 +3066,13 @@ static void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, boo } appendStringInfo(buf, " LOCAL"); - /* subpartition don't support creating index local on specifying partitions */ - if (RelationIsSubPartitioned(rel)) { + /* + * The LOCAL index information of the subpartition table is more. + * And the meta-statements (e.g. \d \d+ \dS) are used more. + * Therefore, when the meta-statement is called, the subpartition LOCAL index information is not displayed. + */ + bool isSub = RelationIsSubPartitioned(rel); + if (isSub && !showSubpartitionLocal) { heap_close(rel, NoLock); return; } @@ -2872,18 +3080,24 @@ static void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, boo List *partList = NIL; ListCell *lc = NULL; bool isFirst = true; - if (RelationIsSubPartitioned(rel)) { + if (isSub) { /* reserve this code, oneday we will support it */ partList = RelationGetSubPartitionOidList(rel); } else { partList = relationGetPartitionOidList(rel); } + if (isSub) { + appendStringInfo(buf, "\n"); + } appendStringInfoChar(buf, '('); foreach (lc, partList) { Oid partOid = DatumGetObjectId(lfirst(lc)); - AppendOnePartitionIndex(indexrelid, partOid, showTblSpc, &isFirst, buf); + AppendOnePartitionIndex(indexrelid, partOid, showTblSpc, &isFirst, buf, isSub); + } + if (isSub) { + appendStringInfo(buf, "\n"); } appendStringInfo(buf, ") "); @@ -2898,7 +3112,7 @@ static void pg_get_indexdef_partitions(Oid indexrelid, Form_pg_index idxrec, boo * NULL then it points to an array of exclusion operator OIDs. */ static char *pg_get_indexdef_worker(Oid indexrelid, int colno, const Oid *excludeOps, bool attrsOnly, bool showTblSpc, - int prettyFlags, bool dumpSchemaOnly) + int prettyFlags, bool dumpSchemaOnly, bool showSubpartitionLocal) { /* might want a separate isConstraint parameter later */ bool isConstraint = (excludeOps != NULL); @@ -3102,7 +3316,7 @@ static char *pg_get_indexdef_worker(Oid indexrelid, int colno, const Oid *exclud if (idxrelrec->parttype == PARTTYPE_PARTITIONED_RELATION && idxrelrec->relkind != RELKIND_GLOBAL_INDEX) { - pg_get_indexdef_partitions(indexrelid, idxrec, showTblSpc, &buf, dumpSchemaOnly); + pg_get_indexdef_partitions(indexrelid, idxrec, showTblSpc, &buf, dumpSchemaOnly, showSubpartitionLocal); } /* @@ -4035,8 +4249,16 @@ char* pg_get_functiondef_worker(Oid funcid, int* headerlines) } if (proc->proisstrict) appendStringInfoString(&buf, " STRICT"); - if (proc->prosecdef) - appendStringInfoString(&buf, " SECURITY DEFINER"); + if (PLSQL_SECURITY_DEFINER) { + if (proc->prosecdef) { + appendStringInfoString(&buf, " AUTHID DEFINER"); + } else { + appendStringInfoString(&buf, " AUTHID CURRENT_USER"); + } + } else { + if (proc->prosecdef) + appendStringInfoString(&buf, " SECURITY DEFINER"); + } if (proc->proleakproof) appendStringInfoString(&buf, " LEAKPROOF"); @@ -4443,6 +4665,7 @@ static char* deparse_expression_pretty( context.wrapColumn = WRAP_COLUMN_DEFAULT; context.indentLevel = startIndent; context.qrw_phase = false; + context.is_upsert_clause = false; get_rule_expr(expr, &context, showimplicit, no_alias); return buf.data; @@ -4594,22 +4817,35 @@ static void set_deparse_planstate(deparse_namespace* dpns, PlanState* ps) * reference sourceTargetList, which comes from outer plan of the join (source table) */ if (mps->operation == CMD_MERGE) { - dpns->inner_planstate = dpns->outer_planstate->lefttree; + PlanState* jplanstate = dpns->outer_planstate; + if (IsA(jplanstate, StreamState) || IsA(jplanstate, VecStreamState)) + jplanstate = jplanstate->lefttree; + if (jplanstate->plan != NULL && IsJoinPlan((Node*)jplanstate->plan) && + ((Join*)jplanstate->plan)->jointype == JOIN_RIGHT) { + dpns->inner_planstate = innerPlanState(jplanstate); + } else { + dpns->inner_planstate = outerPlanState(jplanstate); + } } } else dpns->inner_planstate = innerPlanState(ps); -#ifdef ENABLE_MULTIPLE_NODES - if (IsA(ps, ModifyTableState)) -#else - if (IsA(ps, ModifyTableState) && ((ModifyTableState*)ps)->mt_upsert != NULL && - ((ModifyTableState*)ps)->mt_upsert->us_action != UPSERT_NONE) -#endif + if (IsA(ps, ModifyTableState) && ((ModifyTableState*)ps)->mt_upsert->us_excludedtlist != NIL) { + /* For upsert deparse state. The second condition is somewhat ad-hoc but there's no flag to + * mark upsert clause under PlanState. + */ dpns->inner_tlist = ((ModifyTableState*)ps)->mt_upsert->us_excludedtlist; - else if (dpns->inner_planstate != NULL) - dpns->inner_tlist = dpns->inner_planstate->plan->targetlist; - else + } else if (dpns->inner_planstate != NULL) { + if ((IsA(ps, ModifyTableState) || IsA(ps, VecModifyTableState) || IsA(ps, DistInsertSelectState)) && + ((ModifyTableState *)ps)->operation == CMD_MERGE) { + /* For merge into statements, source relation is always the inner one. */ + dpns->inner_tlist = ((ModifyTable*)(ps->plan))->mergeSourceTargetList; + } else { + dpns->inner_tlist = dpns->inner_planstate->plan->targetlist; + } + } else { dpns->inner_tlist = NIL; + } /* index_tlist is set only if it's an IndexOnlyScan */ if (IsA(ps->plan, IndexOnlyScan)) @@ -4954,6 +5190,7 @@ static void make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc, i context.viewdef = false; context.is_fqs = false; #endif /* PGXC */ + context.is_upsert_clause = false; errno_t rc = memset_s(&dpns, sizeof(dpns), 0, sizeof(dpns)); securec_check(rc, "\0", "\0"); @@ -5114,6 +5351,11 @@ static void make_viewdef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc, i * The query generated has all object names schema-qualified. This is * done by temporarily setting search_path to NIL. * It calls get_query_def without pretty print flags. + * + * Caution: get_query_def calls AcquireRewriteLocks, which might modify the RTEs + * in place. So it is generally appropriate for the caller of this routine to + * have first done a copyObject() to make a writable copy of the querytree in + * the current memory context. * ---------- */ void deparse_query(Query* query, StringInfo buf, List* parentnamespace, bool finalise_aggs, bool sortgroup_colno, @@ -5230,6 +5472,7 @@ static void get_query_def(Query* query, StringInfo buf, List* parentnamespace, T context.qrw_phase = qrw_phase; context.viewdef = viewdef; context.is_fqs = is_fqs; + context.is_upsert_clause = false; errno_t rc = memset_s(&dpns, sizeof(dpns), 0, sizeof(dpns)); securec_check(rc, "", ""); @@ -5498,6 +5741,8 @@ static void get_select_query_def(Query* query, deparse_context* context, TupleDe appendStringInfo(buf, " OF %s", quote_identifier(rte->eref->aliasname)); if (rc->noWait) appendStringInfo(buf, " NOWAIT"); + if (rc->waitSec > 0) + appendStringInfo(buf, " WAIT %d", rc->waitSec); } } @@ -5593,6 +5838,7 @@ void get_hint_string(HintState* hstate, StringInfo buf) get_hint_string_internal(hstate->block_name_hint, buf); get_hint_string_internal(hstate->scan_hint, buf); get_hint_string_internal(hstate->predpush_hint, buf); + get_hint_string_internal(hstate->predpush_same_level_hint, buf); get_hint_string_internal(hstate->rewrite_hint, buf); get_hint_string_internal(hstate->gather_hint, buf); get_hint_string_internal(hstate->cache_plan_hint, buf); @@ -6515,6 +6761,17 @@ static void get_insert_query_def(Query* query, deparse_context* context) get_hint_string(query->hintState, buf); appendStringInfo(buf, "INTO %s ", generate_relation_name(rte->relid, NIL)); + /* During gray scale upgrade, do not deparse alias since old node cannot parse it. */ + if (t_thrd.proc->workingVersionNum >= UPSERT_WHERE_VERSION_NUM) { + if (rte->alias != NULL) { + /* Deparse alias if given */ + appendStringInfo(buf, "AS %s ", quote_identifier(rte->alias->aliasname)); + } else if (rte->eref != NULL && query->upsertClause != NULL) { + /* Deparse synonym as alias for upsert statement's target table */ + appendStringInfo(buf, "AS %s ", quote_identifier(rte->eref->aliasname)); + } + } + /* * Add the insert-column-names list. To handle indirection properly, we * need to look for indirection nodes in the top targetlist (if it's @@ -6704,7 +6961,21 @@ static void get_insert_query_def(Query* query, deparse_context* context) if (upsertClause->upsertAction == UPSERT_NOTHING) { appendStringInfoString(buf, "NOTHING"); } else { + Assert(!context->is_upsert_clause); /* upsert clause cannot be nested */ + context->is_upsert_clause = true; get_update_query_targetlist_def(query, upsertClause->updateTlist, rte, context); + context->is_upsert_clause = false; + /* Add WHERE clause for UPDATE clause in UPSERT statement if given */ + if (upsertClause->upsertWhere != NULL) { + appendContextKeyword(context, " WHERE ", PRETTYINDENT_STD, PRETTYINDENT_STD, 1); + if (IsA(upsertClause->upsertWhere, List)) { + /* Need to revert flattened ands */ + Expr* expr = make_ands_explicit((List*)upsertClause->upsertWhere); + get_rule_expr((Node*)expr, context, false); + } else { + get_rule_expr(upsertClause->upsertWhere, context, false); + } + } } } @@ -8281,8 +8552,9 @@ static RangeTblEntry* find_rte_by_refname(const char* refname, deparse_context* foreach (rtlist, dpns->rtable) { RangeTblEntry* rte = (RangeTblEntry*)lfirst(rtlist); - - if (strcmp(rte->eref->aliasname, refname) == 0) { + /* duplicately named pulled-up rtable in upsert clause will not lead to ambiguity */ + if (!(context->is_upsert_clause && rte->pulled_from_subquery) && + strcmp(rte->eref->aliasname, refname) == 0) { if (result != NULL) return NULL; /* it's ambiguous */ result = rte; @@ -9626,11 +9898,6 @@ static void get_rule_expr(Node* node, deparse_context* context, bool showimplici } } break; - case T_GradientDescentExpr: { - GradientDescentExpr* gdnode = (GradientDescentExpr*)node; - appendStringInfo(buf, "GD(%s)", gd_get_expr_name(gdnode->field)); - } break; - default: if (context->qrw_phase) appendStringInfo(buf, "", (int)nodeTag(node)); @@ -10637,7 +10904,7 @@ static void get_from_clause_partition(RangeTblEntry* rte, StringInfo buf, depars } } -static void get_from_clause_subpartition(RangeTblEntry* rte, StringInfo buf) +static void get_from_clause_subpartition(RangeTblEntry* rte, StringInfo buf, deparse_context* context) { Assert(rte->ispartrel); @@ -10646,6 +10913,20 @@ static void get_from_clause_subpartition(RangeTblEntry* rte, StringInfo buf) pfree(rte->pname->aliasname); rte->pname->aliasname = getPartitionName(rte->subpartitionOid, false); appendStringInfo(buf, " SUBPARTITION(%s)", quote_identifier(rte->pname->aliasname)); + } else { + ListCell* cell = NULL; + char* semicolon = ""; + + Assert(rte->plist); + appendStringInfo(buf, " SUBPARTITION FOR("); + foreach (cell, rte->plist) { + Node* col = (Node*)lfirst(cell); + + appendStringInfoString(buf, semicolon); + get_rule_expr(processIndirection(col, context, false), context, false); + semicolon = " ,"; + } + appendStringInfo(buf, ")"); } } @@ -10718,7 +10999,7 @@ static void get_from_clause_item(Node* jtnode, Query* query, deparse_context* co get_from_clause_partition(rte, buf, context); } if (rte->isContainSubPartition) { - get_from_clause_subpartition(rte, buf); + get_from_clause_subpartition(rte, buf, context); } if (rte->isbucket) { get_from_clause_bucket(rte, buf, context); diff --git a/src/common/backend/utils/adt/selfuncs.cpp b/src/common/backend/utils/adt/selfuncs.cpp index 3ab7f5870..35c572731 100755 --- a/src/common/backend/utils/adt/selfuncs.cpp +++ b/src/common/backend/utils/adt/selfuncs.cpp @@ -4737,9 +4737,8 @@ void examine_variable(PlannerInfo* root, Node* node, int varRelid, VariableStatD * to identify which column(s) the index * depends on. */ - vardata->acl_ok = - (pg_class_aclcheck(rte->relid, GetUserId(), - ACL_SELECT) == ACLCHECK_OK); + vardata->acl_ok = (rte->securityQuals == NIL) && + (pg_class_aclcheck(rte->relid, GetUserId(), ACL_SELECT) == ACLCHECK_OK); } else { @@ -4827,11 +4826,9 @@ static void examine_simple_variable(PlannerInfo* root, Var* var, VariableStatDat if (HeapTupleIsValid(vardata->statsTuple)) { /* check if user has permission to read this column */ - vardata->acl_ok = - (pg_class_aclcheck(rte->relid, GetUserId(), - ACL_SELECT) == ACLCHECK_OK) || - (pg_attribute_aclcheck(rte->relid, var->varattno, GetUserId(), - ACL_SELECT) == ACLCHECK_OK); + vardata->acl_ok = (rte->securityQuals == NIL) && + ((pg_class_aclcheck(rte->relid, GetUserId(), ACL_SELECT) == ACLCHECK_OK) || + (pg_attribute_aclcheck(rte->relid, var->varattno, GetUserId(), ACL_SELECT) == ACLCHECK_OK)); } else { @@ -4894,8 +4891,12 @@ static void examine_simple_variable(PlannerInfo* root, Var* var, VariableStatDat * RelOptInfos. For instance, if any subquery pullup happened during * planning, Vars in the targetlist might have gotten replaced, and we * need to see the replacement expressions. + * This is a temporary fix for mislocated varattno after inlist2join + * optimization. */ - subquery = rel->subroot->parse; + if (!rel->subroot->parse->is_from_inlist2join_rewrite) { + subquery = rel->subroot->parse; + } Assert(IsA(subquery, Query)); /* Get the subquery output expression referenced by the upper Var */ @@ -5067,6 +5068,9 @@ double get_variable_numdistinct(VariableStatData* vardata, bool* isdefault, bool case BucketIdAttributeNumber: stadistinct = 1.0; /* only 1 value */ break; + case UidAttributeNumber: + stadistinct = 1.0; /* only 1 value */ + break; #endif default: stadistinct = 0.0; /* means "unknown" */ diff --git a/src/common/backend/utils/adt/txid.cpp b/src/common/backend/utils/adt/txid.cpp index 170fe700e..7b30dafc2 100644 --- a/src/common/backend/utils/adt/txid.cpp +++ b/src/common/backend/utils/adt/txid.cpp @@ -268,109 +268,10 @@ Datum pgxc_snapshot_status(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported view in single node mode."))); SRF_RETURN_DONE(funcctx); #else - if (!GTM_MODE) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unsupported function or view in %s mode.", GTM_LITE_MODE ? "GTM-Lite" : "GTM-Free"))); - } FuncCallContext* funcctx = NULL; - - if (SRF_IS_FIRSTCALL()) { - MemoryContext oldcontext; - TupleDesc tupdesc; - - funcctx = SRF_FIRSTCALL_INIT(); - - oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); - - tupdesc = CreateTemplateTupleDesc(10, false); - TupleDescInitEntry(tupdesc, (AttrNumber)1, "xmin", XIDOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)2, "xmax", XIDOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)3, "xcnt", INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)4, "oldestxmin", XIDOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)5, "next_xid", XIDOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)6, "timeline", INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)7, "active_thread_num", INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)8, "max_active_thread_num", INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)9, "snapshot_num", INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)10, "snapshot_totalsize", INT8OID, -1, 0); - - funcctx->tuple_desc = BlessTupleDesc(tupdesc); - - funcctx->user_fctx = palloc0(sizeof(int)); - funcctx->max_calls = 1; - - MemoryContextSwitchTo(oldcontext); - } - - /* stuff done on every call of the function */ - funcctx = SRF_PERCALL_SETUP(); - - if (funcctx->call_cntr < funcctx->max_calls) { - /* for each row */ - Datum values[10]; - bool nulls[10]; - HeapTuple tuple; - GTM_SnapshotStatus sn_stat; - errno_t ss_rc = 0; - - ss_rc = memset_s(values, sizeof(values), 0, sizeof(values)); - securec_check(ss_rc, "\0", "\0"); - ss_rc = memset_s(nulls, sizeof(nulls), 0, sizeof(nulls)); - securec_check(ss_rc, "\0", "\0"); - - sn_stat = GetGTMSnapshotStatus(GetCurrentTransactionKey()); - if (!sn_stat) { - ereport(ERROR, - (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg( - "GTM error, could not obtain snapshot_status, please check GTM is running or failovering."))); - } - /* log important result of transaction information */ - elog(LOG, - "GTM next_xid:%lu, GTM oldestxmin:%lu, GTM gtmtimeline:%u", - sn_stat->next_xid, - sn_stat->recent_global_xmin, - sn_stat->timeline); - /* simple check: check warplimit */ - TransactionId xid = sn_stat->next_xid; - /* - * Check oldestxmin and next_xid in case that on-fly scence. - * Notice: - * This is a simple check: Here we just check and log GTM oldestxmin - * and GTM next_xid without holding any locks. If GTM crash now, GTM - * will lost all information, and DN will get next_xid as oldestxmin - * to do vacuum and prune. It's very dangerous. - * diff_num_threadhold, which as checking threadhold, is always same - * as vacuum_defer_cleanup_age. - */ - int diff_num_threadhold = (u_sess->attr.attr_storage.vacuum_defer_cleanup_age > 2000) - ? u_sess->attr.attr_storage.vacuum_defer_cleanup_age - : 2000; - xid = sn_stat->recent_global_xmin + diff_num_threadhold; - if (TransactionIdFollowsOrEquals(sn_stat->next_xid, xid)) { - elog(WARNING, - "A very old transaction is running, recent_global_xmin: %lu, next_xid: %lu.", - sn_stat->recent_global_xmin, - sn_stat->next_xid); - } - /* Values available to all callers */ - values[0] = TransactionIdGetDatum(sn_stat->xmin); - values[1] = TransactionIdGetDatum(sn_stat->xmax); - values[2] = Int32GetDatum(0); - values[3] = TransactionIdGetDatum(sn_stat->recent_global_xmin); - values[4] = TransactionIdGetDatum(sn_stat->next_xid); - values[5] = Int32GetDatum(sn_stat->timeline); - values[6] = Int32GetDatum(sn_stat->active_thread_num); - values[7] = Int32GetDatum(sn_stat->max_thread_num); - values[8] = Int32GetDatum(sn_stat->snapshot_num); - values[9] = Int64GetDatum(sn_stat->snapshot_totalsize); - /* notice: we free memory using memory contxt, so don't have memory leak here.*/ - tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); - SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); - } else { - /* nothing left */ - SRF_RETURN_DONE(funcctx); - } + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unsupported function or view in %s mode.", GTM_LITE_MODE ? "GTM-Lite" : "GTM-Free"))); + SRF_RETURN_DONE(funcctx); #endif } diff --git a/src/common/backend/utils/adt/varchar.cpp b/src/common/backend/utils/adt/varchar.cpp index affccc03b..998602bd6 100644 --- a/src/common/backend/utils/adt/varchar.cpp +++ b/src/common/backend/utils/adt/varchar.cpp @@ -142,6 +142,12 @@ static BpChar* bpchar_input(const char* s, size_t len, int32 atttypmod) maxlen = atttypmod - VARHDRSZ; if (len > maxlen) { + + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && CHAR_COERCE_COMPAT) + ereport(ERROR, + (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), + errmsg("value too long for type character(%d)", (int)maxlen))); + /* Verify that extra characters are spaces, and clip them off */ size_t mbmaxlen = pg_mbcharcliplen(s, len, maxlen); size_t j; @@ -282,6 +288,12 @@ Datum bpchar(PG_FUNCTION_ARGS) PG_RETURN_BPCHAR_P(source); if (len > maxlen) { + + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && CHAR_COERCE_COMPAT) + ereport(ERROR, + (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), + errmsg("value too long for type character(%d)", maxlen))); + /* Verify that extra characters are spaces, and clip them off */ size_t maxmblen; @@ -436,6 +448,11 @@ static VarChar* varchar_input(const char* s, size_t len, int32 atttypmod) maxlen = atttypmod - VARHDRSZ; + if (len > maxlen && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && CHAR_COERCE_COMPAT) + ereport(ERROR, + (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), + errmsg("value too long for type character varying(%d)", (int)maxlen))); + if (atttypmod >= (int32)VARHDRSZ && len > maxlen) { /* Verify that extra characters are spaces, and clip them off */ size_t mbmaxlen = pg_mbcharcliplen(s, len, maxlen); @@ -578,6 +595,10 @@ Datum varchar(PG_FUNCTION_ARGS) PG_RETURN_VARCHAR_P(source); /* only reach here if string is too long... */ + if (len > maxlen && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && CHAR_COERCE_COMPAT) + ereport(ERROR, + (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), + errmsg("value too long for type character varying(%d)", maxlen))); /* truncate multibyte string preserving multibyte boundary */ maxmblen = pg_mbcharcliplen(s_data, len, maxlen); @@ -1071,6 +1092,11 @@ static NVarChar2* nvarchar2_input(const char* s, size_t len, int32 atttypmod) maxlen = atttypmod - VARHDRSZ; + if (len > maxlen && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && CHAR_COERCE_COMPAT) + ereport(ERROR, + (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), + errmsg("value too long for type nvarchar2(%d)", (int)maxlen))); + if (atttypmod >= (int32)VARHDRSZ && len > maxlen) { /* Verify that extra characters are spaces, and clip them off */ size_t mbmaxlen = pg_mbcharcliplen_orig(s, len, maxlen); @@ -1181,6 +1207,11 @@ Datum nvarchar2(PG_FUNCTION_ARGS) if (maxlen < 0 || len <= maxlen) PG_RETURN_NVARCHAR2_P(source); + if (len > maxlen && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && CHAR_COERCE_COMPAT) + ereport(ERROR, + (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), + errmsg("value too long for type nvarchar2(%d)", maxlen))); + /* only reach here if string is too long... */ /* truncate multibyte string preserving multibyte boundary */ diff --git a/src/common/backend/utils/adt/varlena.cpp b/src/common/backend/utils/adt/varlena.cpp index 1d884e9a2..2ba2ce1ae 100644 --- a/src/common/backend/utils/adt/varlena.cpp +++ b/src/common/backend/utils/adt/varlena.cpp @@ -363,56 +363,6 @@ Datum byteain(PG_FUNCTION_ARGS) PG_RETURN_BYTEA_P(result); } -static bytea* sub_blob(bytea* data, int32 amount) -{ - int32 len = 0; - int32 length = 0; - bytea* result = NULL; - char* ptr = NULL; - int offset = 0; - errno_t rc = EOK; - const int MAX_BATCH_SIZE = 32767; - length = VARSIZE_ANY_EXHDR(data); - if (amount < 1 || amount > MAX_BATCH_SIZE) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("argument is null, invalid, or out of range"))); - } - if (offset > length) { - return NULL; - } - if (length < amount) { - len = length + VARHDRSZ; - } else { - len = amount + VARHDRSZ; - } - /* Avoid the memcpy exceeding */ - if (len > ((length - offset) + VARHDRSZ + 1)) { - len = (length - offset) + VARHDRSZ + 1; - } - result = (bytea*)palloc0(len); - SET_VARSIZE(result, len); - ptr = VARDATA(result); - rc = memcpy_s(ptr, (len - VARHDRSZ), VARDATA_ANY(data) + (offset - 1), (len - VARHDRSZ)); - securec_check(rc, "\0", "\0"); - return result; -} -static text* sub_text(text* t1, int32 amount) -{ - int32 len; - int32 offset = 0; - const int MAX_BATCH_SIZE = 32767; - int32 len1 = text_length(PointerGetDatum(t1)); - if (amount < 1 || amount > MAX_BATCH_SIZE) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("argument is null, invalid, or out of range"))); - } - len = len1 < (amount + offset - 1) ? (len1 - offset + 1) : amount; - return text_substring(PointerGetDatum(t1), Int32GetDatum(offset), Int32GetDatum(len), false); -} - - /* * byteaout - converts to printable representation of byte array * @@ -422,12 +372,6 @@ static text* sub_text(text* t1, int32 amount) Datum byteaout(PG_FUNCTION_ARGS) { bytea* vlena = PG_GETARG_BYTEA_PP(0); - - if (u_sess->attr.attr_sql.for_print_tuple && u_sess->attr.attr_common.pset_lob_length != 0) { - bytea* tmp = NULL; - tmp = sub_blob(vlena, u_sess->attr.attr_common.pset_lob_length); - vlena = tmp; - } char* result = NULL; char* rp = NULL; @@ -524,12 +468,6 @@ Datum rawout(PG_FUNCTION_ARGS) fcinfo->fncollation = DEFAULT_COLLATION_OID; bytea* data = PG_GETARG_BYTEA_P(0); - - if (u_sess->attr.attr_sql.for_print_tuple && u_sess->attr.attr_common.pset_lob_length != 0) { - bytea* tmp = NULL; - tmp = sub_blob(data, u_sess->attr.attr_common.pset_lob_length); - data = tmp; - } text* ans = NULL; int datalen = 0; int resultlen = 0; @@ -575,6 +513,10 @@ Datum rawtotext(PG_FUNCTION_ARGS) Datum texttoraw(PG_FUNCTION_ARGS) { Datum arg1 = PG_GETARG_DATUM(0); + if (VARATT_IS_HUGE_TOAST_POINTER(DatumGetPointer(arg1))) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("texttoraw could not support more than 1GB clob/blob data"))); + } Datum result; Datum cstring_arg1; @@ -689,12 +631,13 @@ Datum textin(PG_FUNCTION_ARGS) Datum textout(PG_FUNCTION_ARGS) { Datum txt = PG_GETARG_DATUM(0); - - if (u_sess->attr.attr_sql.for_print_tuple && u_sess->attr.attr_common.pset_lob_length != 0) { - text* outputText = sub_text((text*)DatumGetPointer(txt), u_sess->attr.attr_common.pset_lob_length); - PG_RETURN_CSTRING(text_to_cstring(outputText)); + if (VARATT_IS_HUGE_TOAST_POINTER(DatumGetPointer(txt))) { + int len = strlen("") + 1; + char *res = (char *)palloc(len); + errno_t rc = strcpy_s(res, len, ""); + securec_check_c(rc, "\0", "\0"); + PG_RETURN_CSTRING(res); } - PG_RETURN_CSTRING(TextDatumGetCString(txt)); } @@ -778,6 +721,22 @@ Datum unknownsend(PG_FUNCTION_ARGS) PG_RETURN_BYTEA_P(pq_endtypsend(&buf)); } +static Datum text_length_huge(Datum str) +{ + if (pg_database_encoding_max_length() == 1) { + PG_RETURN_INT64(toast_raw_datum_size(str) - VARHDRSZ); + } else { + int64 result = 0; + text* t = DatumGetTextPP(str); + result = calculate_huge_length(t); + + if ((Pointer)(t) != (Pointer)(str)) + pfree_ext(t); + + PG_RETURN_INT64(result); + } +} + /* ========== PUBLIC ROUTINES ========== */ /* @@ -789,8 +748,12 @@ Datum textlen(PG_FUNCTION_ARGS) { Datum str = PG_GETARG_DATUM(0); - /* try to avoid decompressing argument */ - PG_RETURN_INT32(text_length(str)); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)DatumGetTextPP(str))) { + return text_length_huge(str); + } else { + /* try to avoid decompressing argument */ + PG_RETURN_INT32(text_length(str)); + } } /* @@ -882,12 +845,27 @@ Datum textcat(PG_FUNCTION_ARGS) static text* text_catenate(text* t1, text* t2) { text* result = NULL; - int len1, len2, len; + int64 len1, len2, len; char* ptr = NULL; int rc = 0; - len1 = VARSIZE_ANY_EXHDR(t1); - len2 = VARSIZE_ANY_EXHDR(t2); + if (VARATT_IS_HUGE_TOAST_POINTER(t1)) { + struct varatt_lob_external large_toast_pointer; + + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, t1); + len1 = large_toast_pointer.va_rawsize; + } else { + len1 = VARSIZE_ANY_EXHDR(t1); + } + + if (VARATT_IS_HUGE_TOAST_POINTER(t2)) { + struct varatt_lob_external large_toast_pointer; + + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, t2); + len2 = large_toast_pointer.va_rawsize; + } else { + len2 = VARSIZE_ANY_EXHDR(t2); + } /* paranoia ... probably should throw error instead? */ if (len1 < 0) @@ -896,20 +874,29 @@ static text* text_catenate(text* t1, text* t2) len2 = 0; len = len1 + len2 + VARHDRSZ; - result = (text*)palloc(len); + if (len > MAX_TOAST_CHUNK_SIZE + VARHDRSZ) { +#ifdef ENABLE_MULTIPLE_NODES + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Un-support clob/blob type more than 1GB for distributed system"))); +#endif + Oid toastOid = get_toast_oid(); + result = text_catenate_huge(t1, t2, toastOid); + } else { + result = (text*)palloc(len); - /* Set size of result string... */ - SET_VARSIZE(result, len); + /* Set size of result string... */ + SET_VARSIZE(result, len); - /* Fill data field of result string... */ - ptr = VARDATA(result); - if (len1 > 0) { - rc = memcpy_s(ptr, len1, VARDATA_ANY(t1), len1); - securec_check(rc, "\0", "\0"); - } - if (len2 > 0) { - rc = memcpy_s(ptr + len1, len2, VARDATA_ANY(t2), len2); - securec_check(rc, "\0", "\0"); + /* Fill data field of result string... */ + ptr = VARDATA(result); + if (len1 > 0) { + rc = memcpy_s(ptr, len1, VARDATA_ANY(t1), len1); + securec_check(rc, "\0", "\0"); + } + if (len2 > 0) { + rc = memcpy_s(ptr + len1, len2, VARDATA_ANY(t2), len2); + securec_check(rc, "\0", "\0"); + } } return result; @@ -1283,6 +1270,12 @@ Datum text_substr_orclcompat(PG_FUNCTION_ARGS) mblen_converter fun_mblen; fun_mblen = *pg_wchar_table[GetDatabaseEncoding()].mblen; + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)DatumGetPointer(str))) { + struct varlena* dest_ptr = (struct varlena*)DatumGetPointer(str); + struct varlena* res_ptr = heap_tuple_untoast_attr_slice(dest_ptr, start, length); + return PointerGetDatum(res_ptr); + } + is_compress = (VARATT_IS_COMPRESSED(DatumGetPointer(str)) || VARATT_IS_EXTERNAL(DatumGetPointer(str))); // orclcompat is true, withlen is true baseIdx = 6 + (int)is_compress + (eml - 1) * 8; @@ -1310,6 +1303,11 @@ Datum text_substr_no_len_orclcompat(PG_FUNCTION_ARGS) mblen_converter fun_mblen; fun_mblen = *pg_wchar_table[GetDatabaseEncoding()].mblen; + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)DatumGetPointer(str))) { + ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("The data is lob that larger than 1GB, you must use a substr with len(substr(str, start, len))"))); + } + is_compress = (VARATT_IS_COMPRESSED(DatumGetPointer(str)) || VARATT_IS_EXTERNAL(DatumGetPointer(str))); // orclcompat is true, withlen is false baseIdx = 4 + (int)is_compress + (eml - 1) * 8; @@ -1866,6 +1864,10 @@ Datum texteq(PG_FUNCTION_ARGS) { Datum arg1 = PG_GETARG_DATUM(0); Datum arg2 = PG_GETARG_DATUM(1); + if (VARATT_IS_HUGE_TOAST_POINTER(DatumGetPointer(arg1)) && VARATT_IS_HUGE_TOAST_POINTER(DatumGetPointer(arg2))) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("texteq could not support more than 1GB clob/blob data"))); + } bool result = false; Size len1, len2; @@ -1897,6 +1899,10 @@ Datum textne(PG_FUNCTION_ARGS) { Datum arg1 = PG_GETARG_DATUM(0); Datum arg2 = PG_GETARG_DATUM(1); + if (VARATT_IS_HUGE_TOAST_POINTER(DatumGetPointer(arg1)) || VARATT_IS_HUGE_TOAST_POINTER(DatumGetPointer(arg2))) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("textne could not support more than 1GB clob/blob data"))); + } bool result = false; Size len1, len2; @@ -2003,6 +2009,10 @@ Datum text_lt(PG_FUNCTION_ARGS) { text* arg1 = PG_GETARG_TEXT_PP(0); text* arg2 = PG_GETARG_TEXT_PP(1); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg1) || VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg2)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("text_lt could not support more than 1GB clob/blob data"))); + } bool result = false; result = (text_cmp(arg1, arg2, PG_GET_COLLATION()) < 0); @@ -2017,6 +2027,10 @@ Datum text_le(PG_FUNCTION_ARGS) { text* arg1 = PG_GETARG_TEXT_PP(0); text* arg2 = PG_GETARG_TEXT_PP(1); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg1) || VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg2)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("text_le could not support more than 1GB clob/blob data"))); + } bool result = false; result = (text_cmp(arg1, arg2, PG_GET_COLLATION()) <= 0); @@ -2031,6 +2045,10 @@ Datum text_gt(PG_FUNCTION_ARGS) { text* arg1 = PG_GETARG_TEXT_PP(0); text* arg2 = PG_GETARG_TEXT_PP(1); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg1) || VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg2)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("text_gt could not support more than 1GB clob/blob data"))); + } bool result = false; result = (text_cmp(arg1, arg2, PG_GET_COLLATION()) > 0); @@ -2045,6 +2063,10 @@ Datum text_ge(PG_FUNCTION_ARGS) { text* arg1 = PG_GETARG_TEXT_PP(0); text* arg2 = PG_GETARG_TEXT_PP(1); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg1) || VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg2)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("text_ge could not support more than 1GB clob/blob data"))); + } bool result = false; result = (text_cmp(arg1, arg2, PG_GET_COLLATION()) >= 0); @@ -2059,6 +2081,10 @@ Datum bttextcmp(PG_FUNCTION_ARGS) { text* arg1 = PG_GETARG_TEXT_PP(0); text* arg2 = PG_GETARG_TEXT_PP(1); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg1) || VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg2)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("bttextcmp could not support more than 1GB clob/blob data"))); + } int32 result; result = text_cmp(arg1, arg2, PG_GET_COLLATION()); @@ -2763,6 +2789,10 @@ Datum text_larger(PG_FUNCTION_ARGS) { text* arg1 = PG_GETARG_TEXT_PP(0); text* arg2 = PG_GETARG_TEXT_PP(1); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg1) || VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg2)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("text_larger could not support more than 1GB clob/blob data"))); + } text* result = NULL; result = ((text_cmp(arg1, arg2, PG_GET_COLLATION()) > 0) ? arg1 : arg2); @@ -2774,6 +2804,10 @@ Datum text_smaller(PG_FUNCTION_ARGS) { text* arg1 = PG_GETARG_TEXT_PP(0); text* arg2 = PG_GETARG_TEXT_PP(1); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg1) || VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg2)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("text_smaller could not support more than 1GB clob/blob data"))); + } text* result = NULL; result = ((text_cmp(arg1, arg2, PG_GET_COLLATION()) < 0) ? arg1 : arg2); @@ -2811,6 +2845,10 @@ Datum text_pattern_lt(PG_FUNCTION_ARGS) { text* arg1 = PG_GETARG_TEXT_PP(0); text* arg2 = PG_GETARG_TEXT_PP(1); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg1) || VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg2)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("text_pattern_lt could not support more than 1GB clob/blob data"))); + } int result; result = internal_text_pattern_compare(arg1, arg2); @@ -2825,6 +2863,10 @@ Datum text_pattern_le(PG_FUNCTION_ARGS) { text* arg1 = PG_GETARG_TEXT_PP(0); text* arg2 = PG_GETARG_TEXT_PP(1); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg1) || VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg2)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("text_pattern_le could not support more than 1GB clob/blob data"))); + } int result; result = internal_text_pattern_compare(arg1, arg2); @@ -2839,6 +2881,10 @@ Datum text_pattern_ge(PG_FUNCTION_ARGS) { text* arg1 = PG_GETARG_TEXT_PP(0); text* arg2 = PG_GETARG_TEXT_PP(1); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg1) || VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg2)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("text_pattern_ge could not support more than 1GB clob/blob data"))); + } int result; result = internal_text_pattern_compare(arg1, arg2); @@ -2853,6 +2899,10 @@ Datum text_pattern_gt(PG_FUNCTION_ARGS) { text* arg1 = PG_GETARG_TEXT_PP(0); text* arg2 = PG_GETARG_TEXT_PP(1); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg1) || VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg2)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("text_pattern_gt could not support more than 1GB clob/blob data"))); + } int result; result = internal_text_pattern_compare(arg1, arg2); @@ -2867,6 +2917,10 @@ Datum bttext_pattern_cmp(PG_FUNCTION_ARGS) { text* arg1 = PG_GETARG_TEXT_PP(0); text* arg2 = PG_GETARG_TEXT_PP(1); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg1) || VARATT_IS_HUGE_TOAST_POINTER((varlena *)arg2)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("bttext_pattern_cmp could not support more than 1GB clob/blob data"))); + } int result; result = internal_text_pattern_compare(arg1, arg2); @@ -2919,8 +2973,23 @@ static bytea* bytea_catenate(bytea* t1, bytea* t2) char* ptr = NULL; int rc = 0; - len1 = VARSIZE_ANY_EXHDR(t1); - len2 = VARSIZE_ANY_EXHDR(t2); + if (VARATT_IS_HUGE_TOAST_POINTER(t1)) { + struct varatt_lob_external large_toast_pointer; + + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, t1); + len1 = large_toast_pointer.va_rawsize; + } else { + len1 = VARSIZE_ANY_EXHDR(t1); + } + + if (VARATT_IS_HUGE_TOAST_POINTER(t2)) { + struct varatt_lob_external large_toast_pointer; + + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, t2); + len2 = large_toast_pointer.va_rawsize; + } else { + len2 = VARSIZE_ANY_EXHDR(t2); + } /* paranoia ... probably should throw error instead? */ if (len1 < 0) @@ -2929,20 +2998,30 @@ static bytea* bytea_catenate(bytea* t1, bytea* t2) len2 = 0; len = len1 + len2 + VARHDRSZ; - result = (bytea*)palloc(len); - /* Set size of result string... */ - SET_VARSIZE(result, len); + if (len > MAX_TOAST_CHUNK_SIZE + VARHDRSZ) { +#ifdef ENABLE_MULTIPLE_NODES + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Un-support clob/blob type more than 1GB for distributed system"))); +#endif + Oid toastOid = get_toast_oid(); + result = text_catenate_huge(t1, t2, toastOid); + } else { + result = (bytea*)palloc(len); - /* Fill data field of result string... */ - ptr = VARDATA(result); - if (len1 > 0) { - rc = memcpy_s(ptr, len1, VARDATA_ANY(t1), len1); - securec_check(rc, "\0", "\0"); - } - if (len2 > 0) { - rc = memcpy_s(ptr + len1, len2, VARDATA_ANY(t2), len2); - securec_check(rc, "\0", "\0"); + /* Set size of result string... */ + SET_VARSIZE(result, len); + + /* Fill data field of result string... */ + ptr = VARDATA(result); + if (len1 > 0) { + rc = memcpy_s(ptr, len1, VARDATA_ANY(t1), len1); + securec_check(rc, "\0", "\0"); + } + if (len2 > 0) { + rc = memcpy_s(ptr + len1, len2, VARDATA_ANY(t2), len2); + securec_check(rc, "\0", "\0"); + } } return result; } @@ -4071,6 +4150,12 @@ Datum replace_text(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(2)) to_sub_text = PG_GETARG_TEXT_PP(2); + if (VARATT_IS_HUGE_TOAST_POINTER(src_text) || VARATT_IS_HUGE_TOAST_POINTER(from_sub_text)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("replace() arguments cannot exceed 1GB"))); + } + text_position_setup(src_text, from_sub_text, &state); /* @@ -4254,8 +4339,9 @@ static void appendStringInfoRegexpSubstr( * * Note: to avoid having to include regex.h in builtins.h, we declare * the regexp argument as void *, but really it's regex_t *. + * occur : the n-th matched occurrence, start from 1. */ -text* replace_text_regexp(text* src_text, void* regexp, text* replace_text, bool glob) +text* replace_text_regexp(text* src_text, void* regexp, text* replace_text, int position, int occur) { text* ret_text = NULL; regex_t* re = (regex_t*)regexp; @@ -4264,8 +4350,10 @@ text* replace_text_regexp(text* src_text, void* regexp, text* replace_text, bool regmatch_t pmatch[REGEXP_REPLACE_BACKREF_CNT]; pg_wchar* data = NULL; size_t data_len; - int search_start; + int search_start = position - 1; int data_pos; + int count = 0; + int replace_len; char* start_ptr = NULL; bool have_escape = false; @@ -4274,7 +4362,9 @@ text* replace_text_regexp(text* src_text, void* regexp, text* replace_text, bool /* Convert data string to wide characters. */ data = (pg_wchar*)palloc((src_text_len + 1) * sizeof(pg_wchar)); data_len = pg_mb2wchar_with_len(VARDATA_ANY(src_text), data, src_text_len); - + if ((unsigned int)(position) > data_len) { + return src_text; + } /* Check whether replace_text has escape char. */ if (replace_text != NULL) have_escape = check_replace_text_has_escape_char(replace_text); @@ -4283,7 +4373,6 @@ text* replace_text_regexp(text* src_text, void* regexp, text* replace_text, bool start_ptr = (char*)VARDATA_ANY(src_text); data_pos = 0; - search_start = 0; while ((unsigned int)(search_start) <= data_len) { int regexec_result; @@ -4327,25 +4416,28 @@ text* replace_text_regexp(text* src_text, void* regexp, text* replace_text, bool data_pos = pmatch[0].rm_so; } - /* - * Copy the replace_text. Process back references when the - * replace_text has escape characters. - */ - if (replace_text != NULL && have_escape) - appendStringInfoRegexpSubstr(&buf, replace_text, pmatch, start_ptr, data_pos); - else if (replace_text != NULL) - appendStringInfoText(&buf, replace_text); + count++; + + replace_len = charlen_to_bytelen(start_ptr, pmatch[0].rm_eo - data_pos); + + if (occur == 0 || count == occur) { + /* + * Copy the replace_text. Process back references when the + * replace_text has escape characters. + */ + if (replace_text != NULL && have_escape) + appendStringInfoRegexpSubstr(&buf, replace_text, pmatch, start_ptr, data_pos); + else if (replace_text != NULL) + appendStringInfoText(&buf, replace_text); + } else { + /* not the n-th matched occurrence */ + appendBinaryStringInfo(&buf, start_ptr, replace_len); + } /* Advance start_ptr and data_pos over the matched text. */ - start_ptr += charlen_to_bytelen(start_ptr, pmatch[0].rm_eo - data_pos); + start_ptr += replace_len; data_pos = pmatch[0].rm_eo; - /* - * When global option is off, replace the first instance only. - */ - if (!glob) - break; - /* * Advance search position. Normally we start the next search at the * end of the previous match; but if the match was of zero length, we @@ -5719,6 +5811,15 @@ Datum interval_list_agg_noarg2_transfn(PG_FUNCTION_ARGS) PG_RETURN_POINTER(state); } +static void check_huge_toast_pointer(Datum value, Oid valtype) +{ + if ((valtype == TEXTOID || valtype == CLOBOID || valtype == BLOBOID) && + VARATT_IS_HUGE_TOAST_POINTER(DatumGetPointer(value))) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("concat could not support more than 1GB clob/blob data"))); + } +} + /* * Implementation of both concat() and concat_ws(). * @@ -5779,6 +5880,7 @@ static text* concat_internal(const char* sepstr, int seplen, int argidx, Functio /* call the appropriate type output function, append the result */ valtype = get_fn_expr_argtype(fcinfo->flinfo, i); + check_huge_toast_pointer(value, valtype); if (!OidIsValid(valtype)) ereport(ERROR, (errcode(ERRCODE_INDETERMINATE_DATATYPE), errmsg("could not determine data type of concat() input"))); @@ -5834,6 +5936,10 @@ Datum text_concat_ws(PG_FUNCTION_ARGS) Datum text_left(PG_FUNCTION_ARGS) { text* str = PG_GETARG_TEXT_PP(0); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)str)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("text_left could not support more than 1GB clob/blob data"))); + } const char* p = VARDATA_ANY(str); int len = VARSIZE_ANY_EXHDR(str); int n = PG_GETARG_INT32(1); @@ -5867,6 +5973,10 @@ Datum text_left(PG_FUNCTION_ARGS) Datum text_right(PG_FUNCTION_ARGS) { text* str = PG_GETARG_TEXT_PP(0); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)str)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("text_right could not support more than 1GB clob/blob data"))); + } const char* p = VARDATA_ANY(str); int len = VARSIZE_ANY_EXHDR(str); int n = PG_GETARG_INT32(1); @@ -5900,6 +6010,10 @@ Datum text_right(PG_FUNCTION_ARGS) Datum text_reverse(PG_FUNCTION_ARGS) { text* str = PG_GETARG_TEXT_PP(0); + if (VARATT_IS_HUGE_TOAST_POINTER((varlena *)str)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("text_reverse could not support more than 1GB clob/blob data"))); + } const char* p = VARDATA_ANY(str); int len = VARSIZE_ANY_EXHDR(str); const char* endp = p + len; diff --git a/src/common/backend/utils/cache/Makefile b/src/common/backend/utils/cache/Makefile index fa5261e48..c2ed85ef3 100644 --- a/src/common/backend/utils/cache/Makefile +++ b/src/common/backend/utils/cache/Makefile @@ -21,6 +21,11 @@ ifneq "$(MAKECMDGOALS)" "clean" endif OBJS = attoptcache.o catcache.o inval.o plancache.o relcache.o relmapper.o \ spccache.o syscache.o lsyscache.o typcache.o ts_cache.o partcache.o \ - relfilenodemap.o + relfilenodemap.o \ + knl_globalsysdbcache.o \ + knl_localsysdbcache.o \ + knl_globalsystabcache.o knl_globalsystupcache.o knl_globalbasedefcache.o knl_globaltabdefcache.o knl_globalpartdefcache.o \ + knl_localsystabcache.o knl_localsystupcache.o knl_localbasedefcache.o knl_localtabdefcache.o knl_localpartdefcache.o \ + knl_globalrelmapcache.o knl_globalbucketlist.o knl_globaldbstatmanager.o include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/common/backend/utils/cache/attoptcache.cpp b/src/common/backend/utils/cache/attoptcache.cpp index 6ff6dfc34..a68e12fe7 100644 --- a/src/common/backend/utils/cache/attoptcache.cpp +++ b/src/common/backend/utils/cache/attoptcache.cpp @@ -77,9 +77,9 @@ static void InitializeAttoptCache(void) ctl.hcxt = u_sess->cache_mem_cxt; u_sess->cache_cxt.att_opt_cache_hash = hash_create("Attopt cache", 256, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); - + /* Watch for invalidation events. */ - CacheRegisterSyscacheCallback(ATTNUM, InvalidateAttoptCacheCallback, (Datum)0); + CacheRegisterSessionSyscacheCallback(ATTNUM, InvalidateAttoptCacheCallback, (Datum)0); } /* diff --git a/src/common/backend/utils/cache/catcache.cpp b/src/common/backend/utils/cache/catcache.cpp index 5bdb8f387..815bfdb41 100644 --- a/src/common/backend/utils/cache/catcache.cpp +++ b/src/common/backend/utils/cache/catcache.cpp @@ -49,6 +49,8 @@ #include "utils/fmgrtab.h" #include "utils/hashutils.h" #include "utils/inval.h" +#include "utils/knl_catcache.h" +#include "utils/knl_relcache.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/rel.h" @@ -94,21 +96,23 @@ static inline HeapTuple SearchCatCacheInternal( static HeapTuple SearchCatCacheMiss( CatCache* cache, int nkeys, uint32 hashValue, Index hashIndex, Datum v1, Datum v2, Datum v3, Datum v4, int level); -static uint32 CatalogCacheComputeHashValue(CatCache* cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4); -static uint32 CatalogCacheComputeTupleHashValue(CatCache* cache, int nkeys, HeapTuple tuple); -static inline bool CatalogCacheCompareTuple( - const CatCache* cache, int nkeys, const Datum* cachekeys, const Datum* searchkeys); - #ifdef CATCACHE_STATS static void CatCachePrintStats(int code, Datum arg); #endif +static CatCList* SearchCatCacheListMiss(CatCache* cache, int nkeys, Datum* arguments, uint32 lHashValue); static void CatCacheRemoveCTup(CatCache* cache, CatCTup* ct); static void CatCacheRemoveCList(CatCache* cache, CatCList* cl); static void CatalogCacheInitializeCache(CatCache* cache); static CatCTup* CatalogCacheCreateEntry(CatCache* cache, HeapTuple ntp, Datum* arguments, uint32 hashValue, Index hashIndex, bool negative, bool isnailed = false); -static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, const int* attnos, Datum* keys); -static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, const int* attnos, Datum* srckeys, Datum* dstkeys); + + +#ifndef ENABLE_MULTIPLE_NODES +#define PROCALLARGS_KEY_NUM 4 +static uint32 CatalogCacheComputeTupleHashValueForProcAllArgs( + int* cc_keyno, TupleDesc cc_tupdesc, CCHashFN *cc_hashfunc, Oid cc_reloid, int nkeys, HeapTuple tuple); +#endif + /* * internal support functions @@ -242,7 +246,7 @@ static uint32 uuidhashfast(Datum datum) } /* Lookup support functions for a type. */ -static void GetCCHashEqFuncs(Oid keytype, CCHashFN* hashfunc, RegProcedure* eqfunc, CCFastEqualFN* fasteqfunc) +void GetCCHashEqFuncs(Oid keytype, CCHashFN* hashfunc, RegProcedure* eqfunc, CCFastEqualFN* fasteqfunc) { switch (keytype) { case BOOLOID: @@ -327,30 +331,29 @@ static void GetCCHashEqFuncs(Oid keytype, CCHashFN* hashfunc, RegProcedure* eqfu * * Compute the hash value associated with a given set of lookup keys */ -static uint32 CatalogCacheComputeHashValue(CatCache* cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4) +uint32 CatalogCacheComputeHashValue(CCHashFN *cc_hashfunc, int nkeys, Datum *arguments) { uint32 hashValue = 0; uint32 oneHash; - CCHashFN* cc_hashfunc = cache->cc_hashfunc; switch (nkeys) { case 4: - oneHash = (cc_hashfunc[3])(v4); + oneHash = (cc_hashfunc[3])(arguments[3]); hashValue ^= oneHash << 24; hashValue ^= oneHash >> 8; /* FALLTHROUGH */ case 3: - oneHash = (cc_hashfunc[2])(v3); + oneHash = (cc_hashfunc[2])(arguments[2]); hashValue ^= oneHash << 16; hashValue ^= oneHash >> 16; /* FALLTHROUGH */ case 2: - oneHash = (cc_hashfunc[1])(v2); + oneHash = (cc_hashfunc[1])(arguments[1]); hashValue ^= oneHash << 8; hashValue ^= oneHash >> 24; /* FALLTHROUGH */ case 1: - oneHash = (cc_hashfunc[0])(v1); + oneHash = (cc_hashfunc[0])(arguments[0]); hashValue ^= oneHash; break; default: @@ -361,26 +364,19 @@ static uint32 CatalogCacheComputeHashValue(CatCache* cache, int nkeys, Datum v1, return hashValue; } -/* - * CatalogCacheComputeTupleHashValue - * - * Compute the hash value associated with a given tuple to be cached - */ -static uint32 CatalogCacheComputeTupleHashValue(CatCache* cache, int nkeys, HeapTuple tuple) +uint32 CatalogCacheComputeTupleHashValueInternal( + int* cc_keyno, TupleDesc cc_tupdesc, CCHashFN *cc_hashfunc, Oid cc_reloid, int nkeys, HeapTuple tuple) { Datum v1 = 0, v2 = 0, v3 = 0, v4 = 0; bool isNull = false; - int* cc_keyno = cache->cc_keyno; - TupleDesc cc_tupdesc = cache->cc_tupdesc; - /* Now extract key fields from tuple, insert into scankey */ switch (nkeys) { case 4: v4 = (cc_keyno[3] == ObjectIdAttributeNumber) ? ObjectIdGetDatum(HeapTupleGetOid(tuple)) : fastgetattr(tuple, cc_keyno[3], cc_tupdesc, &isNull); #ifndef ENABLE_MULTIPLE_NODES - Assert(!isNull || (cache->cc_reloid == ProcedureRelationId)); + Assert(!isNull || ((cc_reloid == ProcedureRelationId) && v4 == 0)); #else Assert(!isNull); #endif @@ -393,10 +389,7 @@ static uint32 CatalogCacheComputeTupleHashValue(CatCache* cache, int nkeys, Heap v2 = (cc_keyno[1] == ObjectIdAttributeNumber) ? ObjectIdGetDatum(HeapTupleGetOid(tuple)) : fastgetattr(tuple, cc_keyno[1], cc_tupdesc, &isNull); #ifndef ENABLE_MULTIPLE_NODES - if (unlikely(cache->cc_indexoid == ProcedureNameAllArgsNspIndexId && isNull)) { - v2 = 0; - } - Assert(!isNull || (cache->cc_reloid == ProcedureRelationId)); + Assert(!isNull || ((cc_reloid == ProcedureRelationId) && v2 == 0)); #else Assert(!isNull); #endif @@ -411,7 +404,39 @@ static uint32 CatalogCacheComputeTupleHashValue(CatCache* cache, int nkeys, Heap break; } - return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4); + Datum arguments[CATCACHE_MAXKEYS]; + arguments[0] = v1; + arguments[1] = v2; + arguments[2] = v3; + arguments[3] = v4; + return CatalogCacheComputeHashValue(cc_hashfunc, nkeys, arguments); +} + +/* + * CatalogCacheComputeTupleHashValue + * + * Compute the hash value associated with a given tuple to be cached + */ +uint32 CatalogCacheComputeTupleHashValue( + int cc_id, int* cc_keyno, TupleDesc cc_tupdesc, CCHashFN *cc_hashfunc, Oid cc_reloid, int cc_nkeys, HeapTuple tup) +{ + uint32 hashValue; + + switch (cc_id) { +#ifndef ENABLE_MULTIPLE_NODES + case PROCALLARGS: { + hashValue = CatalogCacheComputeTupleHashValueForProcAllArgs( + cc_keyno, cc_tupdesc, cc_hashfunc, cc_reloid, cc_nkeys, tup); + break; + } +#endif + default: + hashValue = CatalogCacheComputeTupleHashValueInternal( + cc_keyno, cc_tupdesc, cc_hashfunc, cc_reloid, cc_nkeys, tup); + break; + } + + return hashValue; } /* @@ -419,10 +444,9 @@ static uint32 CatalogCacheComputeTupleHashValue(CatCache* cache, int nkeys, Heap * * Compare a tuple to the passed arguments. */ -static inline bool CatalogCacheCompareTuple( - const CatCache* cache, int nkeys, const Datum* cachekeys, const Datum* searchkeys) +bool CatalogCacheCompareTuple( + const CCFastEqualFN *cc_fastequal, int nkeys, const Datum* cachekeys, const Datum* searchkeys) { - const CCFastEqualFN* cc_fastequal = cache->cc_fastequal; int i; for (i = 0; i < nkeys; i++) { @@ -546,7 +570,7 @@ static void CatCacheRemoveCList(CatCache* cache, CatCList* cl) /* delink from member tuples */ for (i = cl->n_members; --i >= 0;) { - CatCTup* ct = cl->members[i]; + CatCTup* ct = cl->systups[i]; Assert(ct->c_list == cl); ct->c_list = NULL; @@ -587,6 +611,10 @@ static void CatCacheRemoveCList(CatCache* cache, CatCList* cl) */ void CatalogCacheIdInvalidate(int cacheId, uint32 hashValue) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->systabcache.CacheIdHashValueInvalidateLocal(cacheId, hashValue); + return; + } CatCache* ccp = NULL; CACHE3_elog(DEBUG2, "CatalogCacheIdInvalidate: called, cacheId %d, hashValue: %d", cacheId, hashValue); @@ -674,6 +702,10 @@ void CatalogCacheIdInvalidate(int cacheId, uint32 hashValue) */ void AtEOXact_CatCache(bool isCommit) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->systabcache.AtEOXact_CatCache(isCommit); + return; + } #ifdef USE_ASSERT_CHECKING if (assert_enabled) { CatCache* ccp = NULL; @@ -768,6 +800,10 @@ static void ResetCatalogCache(CatCache* cache) */ void ResetCatalogCaches(void) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->systabcache.ResetCatalogCaches(); + return; + } CatCache* cache = NULL; if (!RecoveryInProgress()) { @@ -796,6 +832,10 @@ void ResetCatalogCaches(void) */ void CatalogCacheFlushCatalog(Oid catId) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->systabcache.CatalogCacheFlushCatalogLocal(catId); + return; + } CatCache* cache = NULL; CACHE2_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId); @@ -807,7 +847,7 @@ void CatalogCacheFlushCatalog(Oid catId) ResetCatalogCache(cache); /* Tell inval.c to call syscache callbacks for this cache */ - CallSyscacheCallbacks(cache->id, 0); + CallSessionSyscacheCallbacks(cache->id, 0); } } @@ -1121,9 +1161,9 @@ void InitCatCachePhase2(CatCache* cache, bool touch_index) * pg_authid and pg_auth_members syscaches for authentication even if * we don't yet have relcache entries for those catalogs' indexes. */ -static bool IndexScanOK(CatCache* cache, ScanKey cur_skey) +bool IndexScanOK(int cache_id) { - switch (cache->id) { + switch (cache_id) { case INDEXRELID: /* @@ -1132,7 +1172,7 @@ static bool IndexScanOK(CatCache* cache, ScanKey cur_skey) * just force all pg_index searches to be heap scans until we've * built the critical relcaches. */ - if (!u_sess->relcache_cxt.criticalRelcachesBuilt) + if (!LocalRelCacheCriticalRelcachesBuilt()) return false; break; @@ -1156,7 +1196,7 @@ static bool IndexScanOK(CatCache* cache, ScanKey cur_skey) * Protect authentication lookups occurring before relcache has * collected entries for shared indexes. */ - if (!u_sess->relcache_cxt.criticalSharedRelcachesBuilt) + if (!LocalRelCacheCriticalSharedRelcachesBuilt()) return false; break; default: @@ -1231,6 +1271,8 @@ void SearchCatCacheCheck(){ */ HeapTuple SearchCatCacheInternal(CatCache* cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4, int level) { + /* dont call SearchCatCache , call SearchSysCache instead */ + Assert(!EnableLocalSysCache()); Datum arguments[CATCACHE_MAXKEYS]; uint32 hashValue; Index hashIndex; @@ -1260,7 +1302,7 @@ HeapTuple SearchCatCacheInternal(CatCache* cache, int nkeys, Datum v1, Datum v2, /* * find the hash bucket in which to look for the tuple */ - hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4); + hashValue = CatalogCacheComputeHashValue(cache->cc_hashfunc, nkeys, arguments); hashIndex = HASH_INDEX(hashValue, (uint32)cache->cc_nbuckets); /* @@ -1276,7 +1318,7 @@ HeapTuple SearchCatCacheInternal(CatCache* cache, int nkeys, Datum v1, Datum v2, if (ct->hash_value != hashValue) continue; /* quickly skip entry if wrong hash val */ - if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments)) + if (!CatalogCacheCompareTuple(cache->cc_fastequal, nkeys, ct->keys, arguments)) continue; /* @@ -1327,7 +1369,7 @@ HeapTuple CreateHeapTuple4BuiltinFunc(const Builtin_func* func, TupleDesc desc); * one buitin function if it exists. Then we create the heap tuple by the infomation we get from builtin * function arrarys,and return it. */ -static HeapTuple SearchBuiltinProcByNameArgNsp(CatCache* cache, int nkeys, Datum* arguments) +static HeapTuple SearchBuiltinProcByNameArgNsp(int nkeys, Datum* arguments) { char* funcname = NULL; oidvector* argtypes = NULL; @@ -1390,7 +1432,7 @@ static HeapTuple SearchBuiltinProcByNameArgNsp(CatCache* cache, int nkeys, Datum * oid is the unique index for builtin functions, so we can find only one buitin function if it exists. * Then we create the heap tuple by the infomation we get from builtin function arrarys,and return it. */ -static HeapTuple SearchBuiltinProcByOid(CatCache* cache, int nkeys, Datum* arguments) +static HeapTuple SearchBuiltinProcByOid(int nkeys, Datum* arguments) { const Builtin_func* bfunc = NULL; @@ -1417,18 +1459,18 @@ static HeapTuple SearchBuiltinProcByOid(CatCache* cache, int nkeys, Datum* argum * index for builtin functions. We can get the search mode infomation from cache to determine which * function will be called for processing */ -static HeapTuple SearchBuiltinProcCacheMiss(CatCache* cache, int nkeys, Datum* arguments) +HeapTuple SearchBuiltinProcCacheMiss(int cache_id, int nkeys, Datum* arguments) { - if (CacheIsProcNameArgNsp(cache)) { - return SearchBuiltinProcByNameArgNsp(cache, nkeys, arguments); - } else if (CacheIsProcOid(cache)) { - return SearchBuiltinProcByOid(cache, nkeys, arguments); + if (CacheIsProcNameArgNsp(cache_id)) { + return SearchBuiltinProcByNameArgNsp(nkeys, arguments); + } else if (CacheIsProcOid(cache_id)) { + return SearchBuiltinProcByOid(nkeys, arguments); } else { return NULL; } } -static HeapTuple GetPgAttributeAttrTuple(TupleDesc tupleDesc, const Form_pg_attribute attr) +HeapTuple GetPgAttributeAttrTuple(TupleDesc tupleDesc, const Form_pg_attribute attr) { Datum values[Natts_pg_attribute]; bool isnull[Natts_pg_attribute]; @@ -1471,7 +1513,7 @@ static HeapTuple GetPgAttributeAttrTuple(TupleDesc tupleDesc, const Form_pg_attr return heap_form_tuple(tupleDesc, values, isnull); } -static HeapTuple SearchPgAttributeCacheMiss(CatCache* cache, int nkeys, const Datum* arguments) +HeapTuple SearchPgAttributeCacheMiss(int cache_id, TupleDesc cc_tupdesc, int nkeys, const Datum* arguments) { Assert(nkeys == 2); Oid relOid = DatumGetObjectId(arguments[0]); @@ -1481,15 +1523,16 @@ static HeapTuple SearchPgAttributeCacheMiss(CatCache* cache, int nkeys, const Da } const FormData_pg_attribute* catlogAttrs = catalogDesc.attrs; FormData_pg_attribute tempAttr; - if (cache->id == ATTNUM) { + if (cache_id == ATTNUM) { int16 attNum = DatumGetInt16(arguments[1]); Form_pg_attribute attr; if (attNum < 0) { - /* The system table does not have the bucket column, so incoming false */ - if ((attNum == ObjectIdAttributeNumber && !catalogDesc.hasoids) || attNum == BucketIdAttributeNumber) { + /* The system table does not have the bucket column or uids column, so incoming false */ + if ((attNum == ObjectIdAttributeNumber && !catalogDesc.hasoids) || attNum == BucketIdAttributeNumber || + attNum == UidAttributeNumber) { return NULL; } - attr = SystemAttributeDefinition(attNum, catalogDesc.hasoids, false); + attr = SystemAttributeDefinition(attNum, catalogDesc.hasoids, false, false); attr->attrelid = relOid; } else if (attNum <= catalogDesc.natts && attNum > 0) { tempAttr = catlogAttrs[attNum - 1]; @@ -1497,14 +1540,14 @@ static HeapTuple SearchPgAttributeCacheMiss(CatCache* cache, int nkeys, const Da } else { return NULL; } - return GetPgAttributeAttrTuple(cache->cc_tupdesc, attr); - } else if (cache->id == ATTNAME) { + return GetPgAttributeAttrTuple(cc_tupdesc, attr); + } else if (cache_id == ATTNAME) { Form_pg_attribute attr; for (int16 attnum = 0; attnum < catalogDesc.natts; attnum++) { tempAttr = catlogAttrs[attnum]; attr = &tempAttr; if (strcmp(NameStr(*DatumGetName(arguments[1])), NameStr(attr->attname)) == 0) { - return GetPgAttributeAttrTuple(cache->cc_tupdesc, attr); + return GetPgAttributeAttrTuple(cc_tupdesc, attr); } } attr = SystemAttributeByName(NameStr(*DatumGetName(arguments[1])), catalogDesc.hasoids); @@ -1512,11 +1555,11 @@ static HeapTuple SearchPgAttributeCacheMiss(CatCache* cache, int nkeys, const Da return NULL; } attr->attrelid = relOid; - return GetPgAttributeAttrTuple(cache->cc_tupdesc, attr); + return GetPgAttributeAttrTuple(cc_tupdesc, attr); } else { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("pg_attribute does not have syscache with id %d", cache->id))); + errmsg("pg_attribute does not have syscache with id %d", cache_id))); } return NULL; } @@ -1557,10 +1600,10 @@ static HeapTuple SearchCatCacheMiss( cur_skey[3].sk_argument = v4; /* For search a function, we firstly try to search it in built-in function list */ - if (IsProcCache(cache) && u_sess->attr.attr_common.IsInplaceUpgrade == false) { + if (IsProcCache(cache->cc_reloid) && u_sess->attr.attr_common.IsInplaceUpgrade == false) { CACHE2_elog(DEBUG2, "SearchCatCacheMiss(%d): function not found in pg_proc", cache->id); - ntp = SearchBuiltinProcCacheMiss(cache, nkeys, arguments); + ntp = SearchBuiltinProcCacheMiss(cache->id, nkeys, arguments); if (HeapTupleIsValid(ntp)) { CACHE2_elog(DEBUG2, "SearchCatCacheMiss(%d): match a built-in function", cache->id); ct = CatalogCacheCreateEntry(cache, ntp, arguments, hashValue, hashIndex, false); @@ -1573,9 +1616,9 @@ static HeapTuple SearchCatCacheMiss( } /* Insert hardcoded system catalogs' attributes into pg_attribute's syscache. */ - if (IsAttributeCache(cache) && IsSystemObjOid(DatumGetObjectId(arguments[0]))) { + if (IsAttributeCache(cache->cc_reloid) && IsSystemObjOid(DatumGetObjectId(arguments[0]))) { CACHE2_elog(DEBUG2, "SearchCatCacheMiss: cat tuple not in cat cache %d", cache->id); - ntp = SearchPgAttributeCacheMiss(cache, nkeys, arguments); + ntp = SearchPgAttributeCacheMiss(cache->id, cache->cc_tupdesc, nkeys, arguments); if (HeapTupleIsValid(ntp)) { ct = CatalogCacheCreateEntry(cache, ntp, arguments, hashValue, hashIndex, false); heap_freetuple(ntp); @@ -1606,7 +1649,7 @@ static HeapTuple SearchCatCacheMiss( ereport(DEBUG1, (errmsg("cache->cc_reloid - %d", cache->cc_reloid))); scandesc = systable_beginscan( - relation, cache->cc_indexoid, IndexScanOK(cache, cur_skey), NULL, nkeys, cur_skey); + relation, cache->cc_indexoid, IndexScanOK(cache->id), NULL, nkeys, cur_skey); while (HeapTupleIsValid(ntp = systable_getnext(scandesc))) { ct = CatalogCacheCreateEntry(cache, ntp, arguments, hashValue, hashIndex, false); @@ -1667,6 +1710,8 @@ static HeapTuple SearchCatCacheMiss( return &ct->tuple; } + + /* * ReleaseCatCache * @@ -1680,6 +1725,8 @@ static HeapTuple SearchCatCacheMiss( */ void ReleaseCatCache(HeapTuple tuple) { + /* dont call ReleaseCatCache , call ReleaseSysCache instead */ + Assert(!EnableLocalSysCache()); CatCTup* ct = (CatCTup*)(((char*)tuple) - offsetof(CatCTup, tuple)); /* Safety checks to ensure we were handed a cache entry */ @@ -1717,7 +1764,12 @@ uint32 GetCatCacheHashValue(CatCache* cache, Datum v1, Datum v2, Datum v3, Datum /* * calculate the hash value */ - return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4); + Datum arguments[CATCACHE_MAXKEYS]; + arguments[0] = v1; + arguments[1] = v2; + arguments[2] = v3; + arguments[3] = v4; + return CatalogCacheComputeHashValue(cache->cc_hashfunc, cache->cc_nkeys, arguments); } HeapTuple CreateHeapTuple4BuiltinFuncDesc(const Builtin_func* func, TupleDesc desc) @@ -2054,7 +2106,7 @@ HeapTuple CreateHeapTuple4BuiltinFunc(const Builtin_func* func, TupleDesc desc) * generic arrays, but they support only one-dimensional arrays with no * nulls (and no null bitmap). */ - oidvector* dummy = MakeMd5HashArgTypes((oidvector*)allParameterTypes); + oidvector* dummy = MakeMd5HashOids((oidvector*)allParameterTypes); values[Anum_pg_proc_allargtypes - 1] = PointerGetDatum(dummy); values[Anum_pg_proc_allargtypesext - 1] = PointerGetDatum(allParameterTypes); @@ -2169,7 +2221,8 @@ CatCTup* CreateCatCTup(CatCache* cache, Datum* arguments, HeapTuple ntp) Index hashIndex; Dlelem* elt = NULL; - hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp); + hashValue = CatalogCacheComputeTupleHashValue(cache->id, cache->cc_keyno, cache->cc_tupdesc, cache->cc_hashfunc, + cache->cc_reloid, cache->cc_nkeys, ntp); hashIndex = HASH_INDEX(hashValue, (uint32)cache->cc_nbuckets); for (elt = DLGetHead(&cache->cc_bucket[hashIndex]); elt; elt = DLGetSucc(elt)) { @@ -2227,13 +2280,14 @@ List* SearchPgAttributeCacheList(CatCache* cache, int nkey, Datum* arguments, Li if (!catalogDesc.hasoids && index == 1) { continue; } - /* The system table does not have the bucket column, so incoming false */ - attr = SystemAttributeDefinition(-(index + 1), catalogDesc.hasoids, false); + /* The system table does not have the bucket column or uid column, so incoming false */ + attr = SystemAttributeDefinition(-(index + 1), catalogDesc.hasoids, false, false); attr->attrelid = relOid; } heapTuple = GetPgAttributeAttrTuple(cache->cc_tupdesc, attr); cTup = NULL; - hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, heapTuple); + hashValue = CatalogCacheComputeTupleHashValue(cache->id, cache->cc_keyno, cache->cc_tupdesc, + cache->cc_hashfunc, cache->cc_reloid, cache->cc_nkeys, heapTuple); hashIndex = HASH_INDEX(hashValue, static_cast(cache->cc_nbuckets)); for (dlelem = DLGetHead(&cache->cc_bucket[hashIndex]); dlelem; dlelem = DLGetSucc(dlelem)) { @@ -2456,18 +2510,13 @@ void InsertBuiltinFuncDescInBootstrap() */ CatCList* SearchCatCacheList(CatCache* cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4) { + /* dont call SearchCatCacheList , call SearchSysCacheList instead */ + Assert(!EnableLocalSysCache()); + Datum arguments[CATCACHE_MAXKEYS]; uint32 lHashValue; Dlelem* elt = NULL; CatCList* cl = NULL; - CatCTup* ct = NULL; - List* volatile ctlist = NULL; - ListCell* ctlist_item = NULL; - int nmembers; - bool ordered = false; - HeapTuple ntp; - MemoryContext oldcxt; - int i; SearchCatCacheCheck(); @@ -2490,12 +2539,11 @@ CatCList* SearchCatCacheList(CatCache* cache, int nkeys, Datum v1, Datum v2, Dat arguments[3] = v4; /* - * compute a hash value of the given keys for faster search. We don't * presently divide the CatCList items into buckets, but this still lets * us skip non-matching items quickly most of the time. */ - lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4); + lHashValue = CatalogCacheComputeHashValue(cache->cc_hashfunc, nkeys, arguments); /* * scan the items until we find a match or exhaust our list @@ -2515,7 +2563,7 @@ CatCList* SearchCatCacheList(CatCache* cache, int nkeys, Datum v1, Datum v2, Dat if (cl->nkeys != nkeys) continue; - if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments)) + if (!CatalogCacheCompareTuple(cache->cc_fastequal, nkeys, cl->keys, arguments)) continue; /* @@ -2541,6 +2589,22 @@ CatCList* SearchCatCacheList(CatCache* cache, int nkeys, Datum v1, Datum v2, Dat return cl; } + return SearchCatCacheListMiss(cache, nkeys, arguments, lHashValue); +} + +static CatCList* SearchCatCacheListMiss(CatCache* cache, int nkeys, Datum* arguments, uint32 lHashValue) +{ + Dlelem* elt = NULL; + CatCList* cl = NULL; + CatCTup* ct = NULL; + List* volatile ctlist = NULL; + ListCell* ctlist_item = NULL; + int nmembers; + bool ordered = false; + HeapTuple ntp; + MemoryContext oldcxt; + int i; + /* * List was not found in cache, so we have to build it by reading the * relation. For each matching tuple found in the relation, use an @@ -2559,11 +2623,12 @@ CatCList* SearchCatCacheList(CatCache* cache, int nkeys, Datum v1, Datum v2, Dat * which has the same name with the one we want to find, lappend it * into the ctlist */ - if (IsProcCache(cache) && CacheIsProcNameArgNsp(cache) && u_sess->attr.attr_common.IsInplaceUpgrade == false) { + if (IsProcCache(cache->cc_reloid) && CacheIsProcNameArgNsp(cache->id) && + u_sess->attr.attr_common.IsInplaceUpgrade == false) { ctlist = SearchBuiltinProcCacheList(cache, nkeys, arguments, ctlist); } - if (IsAttributeCache(cache) && IsSystemObjOid(DatumGetObjectId(arguments[0]))) { + if (IsAttributeCache(cache->cc_reloid) && IsSystemObjOid(DatumGetObjectId(arguments[0]))) { ctlist = SearchPgAttributeCacheList(cache, nkeys, arguments, ctlist); } @@ -2581,15 +2646,15 @@ CatCList* SearchCatCacheList(CatCache* cache, int nkeys, Datum v1, Datum v2, Dat rc = memcpy_s( cur_skey, sizeof(ScanKeyData) * CATCACHE_MAXKEYS, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys); securec_check(rc, "", ""); - cur_skey[0].sk_argument = v1; - cur_skey[1].sk_argument = v2; - cur_skey[2].sk_argument = v3; - cur_skey[3].sk_argument = v4; + cur_skey[0].sk_argument = arguments[0]; + cur_skey[1].sk_argument = arguments[1]; + cur_skey[2].sk_argument = arguments[2]; + cur_skey[3].sk_argument = arguments[3]; relation = heap_open(cache->cc_reloid, AccessShareLock); scandesc = systable_beginscan( - relation, cache->cc_indexoid, IndexScanOK(cache, cur_skey), NULL, nkeys, cur_skey); + relation, cache->cc_indexoid, IndexScanOK(cache->id), NULL, nkeys, cur_skey); /* The list will be ordered iff we are doing an index scan */ ordered = (scandesc->irel != NULL); @@ -2598,14 +2663,14 @@ CatCList* SearchCatCacheList(CatCache* cache, int nkeys, Datum v1, Datum v2, Dat uint32 hashValue; Index hashIndex; - if (IsProcCache(cache) && IsSystemObjOid(HeapTupleGetOid(ntp)) && + if (IsProcCache(cache->cc_reloid) && IsSystemObjOid(HeapTupleGetOid(ntp)) && u_sess->attr.attr_common.IsInplaceUpgrade == false) { continue; } - if (IsAttributeCache(cache)) { + if (IsAttributeCache(cache->cc_reloid)) { bool attIsNull = false; - Oid attrelid = DatumGetObjectId(SysCacheGetAttr(cache->id, ntp, - Anum_pg_attribute_attrelid, &attIsNull)); + Oid attrelid = DatumGetObjectId( + SysCacheGetAttr(cache->id, ntp, Anum_pg_attribute_attrelid, &attIsNull)); if (IsSystemObjOid(attrelid) && IsValidCatalogParam(GetCatalogParam(attrelid))) { continue; } @@ -2615,27 +2680,28 @@ CatCList* SearchCatCacheList(CatCache* cache, int nkeys, Datum v1, Datum v2, Dat * See if there's an entry for this tuple already. */ ct = NULL; - hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp); + hashValue = CatalogCacheComputeTupleHashValue(cache->id, cache->cc_keyno, cache->cc_tupdesc, + cache->cc_hashfunc, cache->cc_reloid, cache->cc_nkeys, ntp); hashIndex = HASH_INDEX(hashValue, static_cast(cache->cc_nbuckets)); for (elt = DLGetHead(&cache->cc_bucket[hashIndex]); elt; elt = DLGetSucc(elt)) { ct = (CatCTup*)DLE_VAL(elt); - + /* ignore dead and negative entries */ if (ct->dead || ct->negative) - continue; /* ignore dead and negative entries */ - + continue; + /* quickly skip entry if wrong hash val */ if (ct->hash_value != hashValue) - continue; /* quickly skip entry if wrong hash val */ + continue; /* A built-in function is all in pg_proc, in upgrade senario, we skip searching * the builtin functions from builtin function array. In non-upgrade mode, the function * found from heap must exist in builtin array. */ - if (IsProcCache(cache) && IsSystemObjOid(HeapTupleGetOid(&(ct->tuple))) && + if (IsProcCache(cache->cc_reloid) && IsSystemObjOid(HeapTupleGetOid(&(ct->tuple))) && u_sess->attr.attr_common.IsInplaceUpgrade == false) { continue; } - if (IsAttributeCache(cache)) { + if (IsAttributeCache(cache->cc_reloid)) { bool attIsNull = false; Oid attrelid = DatumGetObjectId(SysCacheGetAttr(cache->id, &(ct->tuple), Anum_pg_attribute_attrelid, &attIsNull)); @@ -2684,7 +2750,8 @@ CatCList* SearchCatCacheList(CatCache* cache, int nkeys, Datum v1, Datum v2, Dat */ oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); nmembers = list_length(ctlist); - cl = (CatCList*)palloc(offsetof(CatCList, members) + (nmembers + 1) * sizeof(CatCTup*)); + cl = (CatCList*)palloc(sizeof(CatCList) + (nmembers) * sizeof(CatCTup*)); + cl->systups = (CatCTup**)(((char *)cl) + sizeof(CatCList)); /* Extract key values */ CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno, arguments, cl->keys); @@ -2718,7 +2785,7 @@ CatCList* SearchCatCacheList(CatCache* cache, int nkeys, Datum v1, Datum v2, Dat i = 0; foreach (ctlist_item, ctlist) { - cl->members[i++] = ct = (CatCTup*)lfirst(ctlist_item); + cl->systups[i++] = ct = (CatCTup*)lfirst(ctlist_item); Assert(ct->c_list == NULL); ct->c_list = cl; /* release the temporary refcount on the member */ @@ -2769,6 +2836,8 @@ ct->refcount == 0 && (ct->c_list == NULL || ct->c_list->refcount == 0)) */ void ReleaseCatCacheList(CatCList* list) { + /* dont call ReleaseCatCacheList , call ReleaseSysCacheList instead */ + Assert(!EnableLocalSysCache()); /* Safety checks to ensure we were handed a cache entry */ Assert(list->cl_magic == CL_MAGIC); Assert(list->refcount > 0); @@ -2884,7 +2953,7 @@ static CatCTup* CatalogCacheCreateEntry( /* * Helper routine that frees keys stored in the keys array. */ -static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, const int* attnos, Datum* keys) +void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, const int* attnos, Datum* keys) { int i; @@ -2897,7 +2966,8 @@ static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, const int* attnos, Da Assert(attnum > 0); if (!tupdesc->attrs[attnum - 1]->attbyval) { - pfree(DatumGetPointer(keys[i])); + void *ptr = DatumGetPointer(keys[i]); + pfree_ext(ptr); keys[i] = (Datum)NULL; } } @@ -2908,7 +2978,7 @@ static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, const int* attnos, Da * one, guaranteeing that the datums are fully allocated in the current memory * context. */ -static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, const int* attnos, Datum* srckeys, Datum* dstkeys) +void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, const int* attnos, Datum* srckeys, Datum* dstkeys) { int i; @@ -2980,6 +3050,11 @@ static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, const int* attnos, Da void PrepareToInvalidateCacheTuple( Relation relation, HeapTuple tuple, HeapTuple newtuple, void (*function)(int, uint32, Oid)) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->systabcache.PrepareToInvalidateCacheTuple( + relation, tuple, newtuple, function); + return; + } CatCache* ccp = NULL; Oid reloid; @@ -3015,7 +3090,8 @@ void PrepareToInvalidateCacheTuple( if (ccp->cc_tupdesc == NULL) CatalogCacheInitializeCache(ccp); - hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple); + hashvalue = CatalogCacheComputeTupleHashValue(ccp->id, ccp->cc_keyno, ccp->cc_tupdesc, ccp->cc_hashfunc, + ccp->cc_reloid, ccp->cc_nkeys, tuple); dbid = ccp->cc_relisshared ? (Oid)0 : u_sess->proc_cxt.MyDatabaseId; (*function)(ccp->id, hashvalue, dbid); @@ -3023,7 +3099,8 @@ void PrepareToInvalidateCacheTuple( if (newtuple) { uint32 newhashvalue; - newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple); + newhashvalue = CatalogCacheComputeTupleHashValue(ccp->id, ccp->cc_keyno, ccp->cc_tupdesc, + ccp->cc_hashfunc, ccp->cc_reloid, ccp->cc_nkeys, newtuple); if (newhashvalue != hashvalue) (*function)(ccp->id, newhashvalue, dbid); @@ -3059,3 +3136,288 @@ void PrintCatCacheListLeakWarning(CatCList* list) list->my_cache->id, list->refcount))); } + + +#ifndef ENABLE_MULTIPLE_NODES +/* + * Specific CatalogCacheComputeTupleHashValue Function to support ProcedureCreate! + */ +static uint32 CatalogCacheComputeTupleHashValueForProcAllArgs( + int* cc_keyno, TupleDesc cc_tupdesc, CCHashFN *cc_hashfunc, Oid cc_reloid, int nkeys, HeapTuple tuple) +{ + Assert(nkeys == PROCALLARGS_KEY_NUM); + Assert(HeapTupleIsValid(tuple)); + + Datum v1 = 0, v2 = 0, v3 = 0, v4 = 0; + uint32 hashValue; + + bool isNull = false; + + /* Extract key fields from tuple */ + v1 = fastgetattr(tuple, cc_keyno[0], cc_tupdesc, &isNull); + Assert(!isNull); + + v2 = fastgetattr(tuple, cc_keyno[1], cc_tupdesc, &isNull); + Assert(!isNull); + + /* + * Comput hashvaule with concat key2 and argmodes + * Function with same paramterTypes and different argmodes + * should have different hashkeys. + */ + oidvector* allArgTypes = (oidvector*)DatumGetPointer(v2); + Assert(allArgTypes != NULL); + + Datum tmp = heap_getattr(tuple, Anum_pg_proc_proargmodes, cc_tupdesc, &isNull); + + oidvector* argModes = ConvertArgModesToMd5Vector(tmp); + oidvector* v2WithArgModes = MergeOidVector(allArgTypes, argModes); + + pfree_ext(argModes); + + Datum newKey2 = PointerGetDatum(v2WithArgModes); + + v3 = fastgetattr(tuple, cc_keyno[2], cc_tupdesc, &isNull); + Assert(!isNull); + + v4 = fastgetattr(tuple, cc_keyno[3], cc_tupdesc, &isNull); + Assert(!isNull); + + Datum arguments[CATCACHE_MAXKEYS]; + arguments[0] = v1; + arguments[1] = newKey2; + arguments[2] = v3; + arguments[3] = v4; + hashValue = CatalogCacheComputeHashValue(cc_hashfunc, nkeys, arguments); + + pfree_ext(v2WithArgModes); + return hashValue; +} + +static bool SearchHashBucketWithArgModes( + CatCache* cache, int nkeys, uint32 hashValue, const Datum* searchkeys, oidvector* argModes, HeapTuple *tuple) +{ + bool found = false; + Dlelem* elt = NULL; + CatCTup* ct = NULL; + + Index hashIndex = HASH_INDEX(hashValue, (uint32)cache->cc_nbuckets); + /* + * scan the hash bucket until we find a match or exhaust our tuples + */ + for (elt = DLGetHead(&cache->cc_bucket[hashIndex]); elt; elt = DLGetSucc(elt)) { + + ct = (CatCTup*)DLE_VAL(elt); + + if (ct->dead) + continue; /* ignore dead entries */ + + if (ct->hash_value != hashValue) + continue; /* quickly skip entry if wrong hash val */ + + if (!CatalogCacheCompareTuple(cache->cc_fastequal, nkeys, ct->keys, searchkeys)) + continue; + + /* + * The comparison of hashvalue and keys is not enough. + */ + if (!IsProArgModesEqualByTuple(&ct->tuple, cache->cc_tupdesc, argModes)) { + continue; + } + + /* + * We found a match in the cache. Move it to the front of the list + * for its hashbucket, in order to speed subsequent searches. (The + * most frequently accessed elements in any hashbucket will tend to be + * near the front of the hashbucket's list.) + */ + DLMoveToFront(&ct->cache_elem); + + /* + * If it's a positive entry, bump its refcount and return it. If it's + * negative, we can report failure to the caller. + */ + if (!ct->negative && t_thrd.utils_cxt.CurrentResourceOwner != NULL) { + ResourceOwnerEnlargeCatCacheRefs(t_thrd.utils_cxt.CurrentResourceOwner); + ct->refcount++; + ResourceOwnerRememberCatCacheRef(t_thrd.utils_cxt.CurrentResourceOwner, &ct->tuple); + + CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d", cache->cc_relname, hashIndex); + +#ifdef CATCACHE_STATS + cache->cc_hits++; +#endif + + *tuple = &ct->tuple; + found = true; + break; + } + } + return found; +} + +/* + * Specific SearchCatCacheMiss Function to support ProcedureCreate! + */ +static HeapTuple SearchCatCacheMissWithArgModes(CatCache* cache, int nkeys, uint32 hashValue, + Datum* arguments, oidvector* argModes) +{ + Assert(cache->id == PROCALLARGS); + + ScanKeyData cur_skey[CATCACHE_MAXKEYS]; + Relation relation; + SysScanDesc scandesc; + HeapTuple ntp; + CatCTup* ct = NULL; + errno_t rc = EOK; + + Index hashIndex = HASH_INDEX(hashValue, (uint32)cache->cc_nbuckets); + + /* + * Ok, need to make a lookup in the relation, copy the scankey and fill + * out any per-call fields. + */ + rc = memcpy_s(cur_skey, sizeof(ScanKeyData) * CATCACHE_MAXKEYS, cache->cc_skey, sizeof(ScanKeyData) * nkeys); + securec_check(rc, "", ""); + cur_skey[0].sk_argument = arguments[0]; + cur_skey[1].sk_argument = arguments[1]; + cur_skey[2].sk_argument = arguments[2]; + cur_skey[3].sk_argument = arguments[3]; + + relation = heap_open(cache->cc_reloid, AccessShareLock); + + ereport(DEBUG1, (errmsg("cache->cc_reloid - %d", cache->cc_reloid))); + + scandesc = systable_beginscan( + relation, cache->cc_indexoid, IndexScanOK(cache->id), NULL, nkeys, cur_skey); + + while (HeapTupleIsValid(ntp = systable_getnext(scandesc))) { + + /* + * The key2 (pg_proc_allargtypes) can be duplicate in table. + * We need to compare the proargmodes to make sure the function is correct. + */ + if (!IsProArgModesEqualByTuple(ntp, cache->cc_tupdesc, argModes)) { + continue; + } + + ct = CatalogCacheCreateEntry(cache, ntp, arguments, hashValue, hashIndex, false); + + /* immediately set the refcount to 1 */ + ResourceOwnerEnlargeCatCacheRefs(t_thrd.utils_cxt.CurrentResourceOwner); + ct->refcount++; + ResourceOwnerRememberCatCacheRef(t_thrd.utils_cxt.CurrentResourceOwner, &ct->tuple); + break; /* assume only one match */ + } + + systable_endscan(scandesc); + + heap_close(relation, AccessShareLock); + /* + * If tuple was not found, we need to build a negative cache entry + * containing a fake tuple. The fake tuple has the correct key columns, + * but nulls everywhere else. + * However, in this specific function for procallargs, we no longer build + * negative entry any more, because when we create a overload pg-style + * function with the same number intype parameters and different outtype + * parameters (a in int) vs (a in int, b out int), syscache will find no + * suitable cache tuple and make a new negative cache entry, but error will + * raise in such case, and no one will free the negative cache entry! + * In fact, the case metioned above should find a suitable tuple to return + * the caller, but for the reason we adapt a suit of Specific Function + * to support ProcedureCreate, syscache couldn't find the tuple. Someone + * may find new methods to solve the problem and refactor this! + */ + if (ct == NULL) { + return NULL; + } + + CACHE4_elog(DEBUG2, + "SearchCatCacheMissWithArgModes(%s): Contains %d/%d tuples", + cache->cc_relname, + cache->cc_ntup, + u_sess->cache_cxt.cache_header->ch_ntup); + CACHE3_elog(DEBUG2, "SearchCatCacheMissWithArgModes(%s): put in bucket %d", cache->cc_relname, hashIndex); + +#ifdef CATCACHE_STATS + cache->cc_newloads++; +#endif + + return &ct->tuple; +} + +/* + * Specific SearchCatCache Function to support ProcedureCreate! + */ +HeapTuple SearchSysCacheForProcAllArgs(Datum v1, Datum v2, Datum v3, Datum v4, Datum proArgModes) +{ + + if (EnableLocalSysCache()) { + HeapTuple tmp = t_thrd.lsc_cxt.lsc->systabcache.SearchTupleForProcAllArgs(v1, v2, v3, v4, proArgModes); + return tmp; + } + + Assert(PointerIsValid(u_sess->syscache_cxt.SysCache[PROCALLARGS])); + Assert(u_sess->syscache_cxt.SysCache[PROCALLARGS]->cc_nkeys == PROCALLARGS_KEY_NUM); + + CatCache* cache = u_sess->syscache_cxt.SysCache[PROCALLARGS]; + int nkeys = cache->cc_nkeys; + Datum arguments[CATCACHE_MAXKEYS]; + uint32 hashValue; + + SearchCatCacheCheck(); + + /* + * one-time startup overhead for each cache + */ + if (unlikely(cache->cc_tupdesc == NULL)) + CatalogCacheInitializeCache(cache); + +#ifdef CATCACHE_STATS + cache->cc_searches++; +#endif + + /* + * Comput hashvaule with concat key2 and argmodes + * Function with same paramterTypes and different argmodes + * should have different hashkeys. + */ + oidvector* allArgTypes = (oidvector*)DatumGetPointer(v2); + + Assert(allArgTypes != NULL); + + /* remember free the oidvectors when we no longer use them */ + oidvector* argModes = ConvertArgModesToMd5Vector(proArgModes); + oidvector* v2WithArgModes = MergeOidVector(allArgTypes, argModes); + Datum newKey2 = PointerGetDatum(v2WithArgModes); + + /* Initialize local parameter array */ + arguments[0] = v1; + arguments[1] = newKey2; + arguments[2] = v3; + arguments[3] = v4; + + /* + * find the hash bucket in which to look for the tuple + */ + hashValue = CatalogCacheComputeHashValue(cache->cc_hashfunc, nkeys, arguments); + + /* reset parameter array */ + pfree_ext(v2WithArgModes); + arguments[1] = v2; + + HeapTuple ret_tuple = NULL; + bool found = false; + found = SearchHashBucketWithArgModes(cache, nkeys, hashValue, arguments, argModes, &ret_tuple); + if (found) { + + pfree_ext(argModes); + return ret_tuple; + } + + ret_tuple = SearchCatCacheMissWithArgModes(cache, nkeys, hashValue, arguments, argModes); + + pfree_ext(argModes); + return ret_tuple; +} +#endif diff --git a/src/common/backend/utils/cache/inval.cpp b/src/common/backend/utils/cache/inval.cpp index 8a0f403a9..4b5852f7c 100644 --- a/src/common/backend/utils/cache/inval.cpp +++ b/src/common/backend/utils/cache/inval.cpp @@ -313,7 +313,8 @@ static void AddRelcacheInvalidationMessage(InvalidationListHeader* hdr, Oid dbId /* Don't add a duplicate item */ /* We assume dbId need not be checked because it will never change */ - ProcessMessageList(hdr->rclist, if (msg->rc.id == SHAREDINVALRELCACHE_ID && msg->rc.relId == relId) return ); + ProcessMessageList(hdr->rclist, if (msg->rc.id == SHAREDINVALRELCACHE_ID && + (msg->rc.relId == relId || msg->rc.relId == InvalidOid)) return); /* OK, add the item */ msg.rc.id = SHAREDINVALRELCACHE_ID; @@ -411,7 +412,7 @@ static void ProcessInvalidationMessagesMulti( */ static void RegisterCatcacheInvalidation(int cacheId, uint32 hashValue, Oid dbId) { - AddCatcacheInvalidationMessage(&u_sess->inval_cxt.transInvalInfo->CurrentCmdInvalidMsgs, cacheId, hashValue, dbId); + AddCatcacheInvalidationMessage(&GetInvalCxt()->transInvalInfo->CurrentCmdInvalidMsgs, cacheId, hashValue, dbId); } /* @@ -421,7 +422,7 @@ static void RegisterCatcacheInvalidation(int cacheId, uint32 hashValue, Oid dbId */ static void RegisterCatalogInvalidation(Oid dbId, Oid catId) { - AddCatalogInvalidationMessage(&u_sess->inval_cxt.transInvalInfo->CurrentCmdInvalidMsgs, dbId, catId); + AddCatalogInvalidationMessage(&GetInvalCxt()->transInvalInfo->CurrentCmdInvalidMsgs, dbId, catId); } /* @@ -431,7 +432,7 @@ static void RegisterCatalogInvalidation(Oid dbId, Oid catId) */ static void RegisterRelcacheInvalidation(Oid dbId, Oid relId) { - AddRelcacheInvalidationMessage(&u_sess->inval_cxt.transInvalInfo->CurrentCmdInvalidMsgs, dbId, relId); + AddRelcacheInvalidationMessage(&GetInvalCxt()->transInvalInfo->CurrentCmdInvalidMsgs, dbId, relId); /* * Most of the time, relcache invalidation is associated with system @@ -455,7 +456,7 @@ static void RegisterRelcacheInvalidation(Oid dbId, Oid relId) static void RegisterPartcacheInvalidation(Oid dbId, Oid partId) { - AddPartcacheInvalidationMessage(&u_sess->inval_cxt.transInvalInfo->CurrentCmdInvalidMsgs, dbId, partId); + AddPartcacheInvalidationMessage(&GetInvalCxt()->transInvalInfo->CurrentCmdInvalidMsgs, dbId, partId); /* * Most of the time, relcache invalidation is associated with system @@ -493,20 +494,141 @@ static void SendCatcacheInvalidation(int cacheId, uint32 hashValue, Oid dbId) SendSharedInvalidMessages(&msg, 1); } +void LocalExecuteThreadAndSessionInvalidationMessage(SharedInvalidationMessage* msg) +{ + LocalExecuteThreadInvalidationMessage(msg); + LocalExecuteSessionInvalidationMessage(msg); +} + +static void ThreadInvalidCatalog(SharedInvalidationMessage* msg) +{ + if (msg->cat.dbId == t_thrd.lsc_cxt.lsc->my_database_id || msg->cat.dbId == InvalidOid) { + CatalogCacheFlushCatalog(msg->cat.catId); + /* CatalogCacheFlushCatalog calls CallSyscacheCallbacks as needed */ + } +} +static void ThreadInvalidRelCache(SharedInvalidationMessage* msg) +{ + if (msg->rc.dbId == t_thrd.lsc_cxt.lsc->my_database_id || msg->rc.dbId == InvalidOid) { + RelationCacheInvalidateEntry(msg->rc.relId); + knl_u_inval_context *inval_cxt = &t_thrd.lsc_cxt.lsc->inval_cxt; + for (int i = 0; i < inval_cxt->relcache_callback_count; i++) { + struct RELCACHECALLBACK* ccitem = inval_cxt->relcache_callback_list + i; + (*ccitem->function)(ccitem->arg, msg->rc.relId); + } + } +} +static void ThreadInvalidSmgr(SharedInvalidationMessage* msg) +{ + /* + * We could have smgr entries for relations of other databases, so no + * short-circuit test is possible here. + */ + RelFileNodeBackend rnode; + RelFileNodeCopy(rnode.node, msg->sm.rnode, InvalidBktId); + rnode.backend = (msg->sm.backend_hi << 16) | (int)msg->sm.backend_lo; + smgrclosenode(rnode); +} +static void ThreadInvalidHbktSmgr(SharedInvalidationMessage* msg) +{ + RelFileNodeBackend rnode; + /* Hash bucket table is always regular relations */ + rnode.backend = InvalidBackendId; + RelFileNodeCopy(rnode.node, msg->hbksm.rnode, (int) msg->hbksm.bucketId); + smgrclosenode(rnode); +} +static void ThreadInvalidRelmap(SharedInvalidationMessage* msg) +{ + /* We only care about our own database and shared catalogs */ + if (msg->rm.dbId == InvalidOid) { + RelationMapInvalidate(true); + } else if (msg->rm.dbId == t_thrd.lsc_cxt.lsc->my_database_id) { + RelationMapInvalidate(false); + } +} +static void ThreadInvalidPartCache(SharedInvalidationMessage* msg) +{ + if (msg->pc.dbId == t_thrd.lsc_cxt.lsc->my_database_id || msg->pc.dbId == InvalidOid) { + PartitionCacheInvalidateEntry(msg->pc.partId); + knl_u_inval_context *inval_cxt = &t_thrd.lsc_cxt.lsc->inval_cxt; + for (int i = 0; i < inval_cxt->partcache_callback_count; i++) { + struct PARTCACHECALLBACK* ccitem = inval_cxt->partcache_callback_list + i; + (*ccitem->function)(ccitem->arg, msg->pc.partId); + } + } +} +static void ThreadInvalidCatCache(SharedInvalidationMessage* msg) +{ + if (msg->cc.dbId == t_thrd.lsc_cxt.lsc->my_database_id || msg->cc.dbId == InvalidOid) { + CatalogCacheIdInvalidate(msg->cc.id, msg->cc.hashValue); + CallThreadSyscacheCallbacks(msg->cc.id, msg->cc.hashValue); + } +} /* - * LocalExecuteInvalidationMessage + * LocalExecuteThreadInvalidationMessage * * Process a single invalidation message (which could be of any type). * Only the local caches are flushed; this does not transmit the message * to other backends. */ -void LocalExecuteInvalidationMessage(SharedInvalidationMessage* msg) +void LocalExecuteThreadInvalidationMessage(SharedInvalidationMessage* msg) +{ + if (!EnableLocalSysCache()) { + return; + } + Assert(CheckMyDatabaseMatch()); + switch (msg->id) { + case SHAREDINVALCATALOG_ID: { /* reset system table */ + ThreadInvalidCatalog(msg); + break; + } + case SHAREDINVALRELCACHE_ID: { /* invalid table and call callbackfunc registered on the table */ + ThreadInvalidRelCache(msg); + break; + } + case SHAREDINVALSMGR_ID: { /* invalid smgr struct, dont care which db it belongs to */ + ThreadInvalidSmgr(msg); + break; + } + case SHAREDINVALHBKTSMGR_ID: { /* invalid smgr struct, dont care which db it belongs to */ + ThreadInvalidHbktSmgr(msg); + break; + } + case SHAREDINVALRELMAP_ID: { /* invalid relmap cxt */ + ThreadInvalidRelmap(msg); + break; + } + case SHAREDINVALPARTCACHE_ID: { /* invalid partcache and call callbackfunc registered on the part */ + ThreadInvalidPartCache(msg); + break; + } + case SHAREDINVALFUNC_ID: { /* func hashtable no in thread, so invalid it by session inval msg */ + break; + } + default:{ + if (msg->id >= 0) { /* invalid catcache, most cases are ddls on rel */ + ThreadInvalidCatCache(msg); + } else { + ereport(FATAL, (errmsg("unrecognized SI message ID: %d", msg->id))); + } + } + } +} + +/* + * + * + * Process a single invalidation message (which could be of any type). + * Only the local caches are flushed; this does not transmit the message + * to other backends. + */ +static void LocalExecuteInvalidationMessage(SharedInvalidationMessage* msg) { if (msg->id >= 0) { if (msg->cc.dbId == u_sess->proc_cxt.MyDatabaseId || msg->cc.dbId == InvalidOid) { CatalogCacheIdInvalidate(msg->cc.id, msg->cc.hashValue); - CallSyscacheCallbacks(msg->cc.id, msg->cc.hashValue); + CallSessionSyscacheCallbacks(msg->cc.id, msg->cc.hashValue); } } else if (msg->id == SHAREDINVALCATALOG_ID) { if (msg->cat.dbId == u_sess->proc_cxt.MyDatabaseId || msg->cat.dbId == InvalidOid) { @@ -563,7 +685,7 @@ void LocalExecuteInvalidationMessage(SharedInvalidationMessage* msg) } } else if (msg->id == SHAREDINVALFUNC_ID) { if (msg->fm.dbId == u_sess->proc_cxt.MyDatabaseId || msg->fm.dbId == InvalidOid) { - plpgsql_HashTableDeleteAndCheckFunc(msg->fm.cacheId, msg->fm.objId); + plpgsql_hashtable_delete_and_check_invalid_item(msg->fm.cacheId, msg->fm.objId); } } else { ereport(FATAL, (errmsg("unrecognized SI message ID: %d", msg->id))); @@ -581,6 +703,60 @@ void LocalExecuteInvalidationMessage(SharedInvalidationMessage* msg) } } +void LocalExecuteSessionInvalidationMessage(SharedInvalidationMessage* msg) +{ + if (!EnableLocalSysCache()) { + LocalExecuteInvalidationMessage(msg); + return; + } + Assert(CheckMyDatabaseMatch()); + if (msg->id >= 0) { + if (msg->cc.dbId == t_thrd.lsc_cxt.lsc->my_database_id || msg->cc.dbId == InvalidOid) { + CallSessionSyscacheCallbacks(msg->cc.id, msg->cc.hashValue); + } + } else if (msg->id == SHAREDINVALCATALOG_ID) { + if (msg->cat.dbId == t_thrd.lsc_cxt.lsc->my_database_id || msg->cat.dbId == InvalidOid) { + t_thrd.lsc_cxt.lsc->systabcache.SessionCatCacheCallBack(msg->cat.catId); + /* CatalogCacheFlushCatalog calls CallSyscacheCallbacks as needed */ + } + } else if (msg->id == SHAREDINVALRELCACHE_ID) { + if (msg->rc.dbId == t_thrd.lsc_cxt.lsc->my_database_id || msg->rc.dbId == InvalidOid) { + knl_u_inval_context *inval_cxt = &u_sess->inval_cxt; + for (int i = 0; i < inval_cxt->relcache_callback_count; i++) { + struct RELCACHECALLBACK* ccitem = inval_cxt->relcache_callback_list + i; + (*ccitem->function)(ccitem->arg, msg->rc.relId); + } + } + } else if (msg->id == SHAREDINVALSMGR_ID) { + /* on GSC mode, smgrcache is in thread memcxt */ + } else if (msg->id == SHAREDINVALHBKTSMGR_ID) { + /* on GSC mode, smgrcache is in thread memcxt */ + } else if (msg->id == SHAREDINVALRELMAP_ID) { + /* on GSC mode, relmap is in thread memcxt */ + } else if (msg->id == SHAREDINVALPARTCACHE_ID) { + if (msg->pc.dbId == t_thrd.lsc_cxt.lsc->my_database_id || msg->pc.dbId == InvalidOid) { + knl_u_inval_context *inval_cxt = &u_sess->inval_cxt; + for (int i = 0; i < inval_cxt->partcache_callback_count; i++) { + struct PARTCACHECALLBACK* ccitem = inval_cxt->partcache_callback_list + i; + (*ccitem->function)(ccitem->arg, msg->pc.partId); + } + } + } else if (msg->id == SHAREDINVALFUNC_ID) { + if (msg->fm.dbId == t_thrd.lsc_cxt.lsc->my_database_id || msg->fm.dbId == InvalidOid) { + plpgsql_hashtable_delete_and_check_invalid_item(msg->fm.cacheId, msg->fm.objId); + } + } else { + ereport(FATAL, (errmsg("unrecognized SI message ID: %d", msg->id))); + } + + if (ENABLE_GPC) { + bool check = GlobalPlanCache::MsgCheck(msg); + if (check == true && u_sess->pcache_cxt.gpc_remote_msg == false) { + u_sess->pcache_cxt.gpc_in_ddl = true; + } + } +} + /* * InvalidateSystemCaches * @@ -594,26 +770,31 @@ void LocalExecuteInvalidationMessage(SharedInvalidationMessage* msg) */ void InvalidateSystemCaches(void) { + InvalidateThreadSystemCaches(); + InvalidateSessionSystemCaches(); +} + +void InvalidateSessionSystemCaches(void) +{ + if (!EnableLocalSysCache()) { + ResetCatalogCaches(); + RelationCacheInvalidate(); + PartitionCacheInvalidate(); + } int i; - - ResetCatalogCaches(); - RelationCacheInvalidate(); /* gets smgr and relmap too */ - PartitionCacheInvalidate(); - for (i = 0; i < u_sess->inval_cxt.syscache_callback_count; i++) { - struct SYSCACHECALLBACK* ccitem = u_sess->inval_cxt.syscache_callback_list + i; - + knl_u_inval_context *inval_cxt = &u_sess->inval_cxt; + for (i = 0; i < inval_cxt->syscache_callback_count; i++) { + struct SYSCACHECALLBACK* ccitem = inval_cxt->syscache_callback_list + i; (*ccitem->function)(ccitem->arg, ccitem->id, 0); } - for (i = 0; i < u_sess->inval_cxt.relcache_callback_count; i++) { - struct RELCACHECALLBACK* ccitem = u_sess->inval_cxt.relcache_callback_list + i; - + for (i = 0; i < inval_cxt->relcache_callback_count; i++) { + struct RELCACHECALLBACK* ccitem = inval_cxt->relcache_callback_list + i; (*ccitem->function)(ccitem->arg, InvalidOid); } - for (i = 0; i < u_sess->inval_cxt.partcache_callback_count; i++) { - struct PARTCACHECALLBACK* ccitem = u_sess->inval_cxt.partcache_callback_list + i; - + for (i = 0; i < inval_cxt->partcache_callback_count; i++) { + struct PARTCACHECALLBACK* ccitem = inval_cxt->partcache_callback_list + i; (*ccitem->function)(ccitem->arg, InvalidOid); } @@ -623,20 +804,34 @@ void InvalidateSystemCaches(void) } } -/* - * AcceptInvalidationMessages - * Read and process invalidation messages from the shared invalidation - * message queue. - * - * Note: - * This should be called as the first step in processing a transaction. - */ -void AcceptInvalidationMessages(void) +void InvalidateThreadSystemCaches(void) +{ + if (!EnableLocalSysCache()) { + return; + } + int i; + ResetCatalogCaches(); + RelationCacheInvalidate(); /* gets smgr and relmap too */ + PartitionCacheInvalidate(); + knl_u_inval_context *inval_cxt = &t_thrd.lsc_cxt.lsc->inval_cxt; + for (i = 0; i < inval_cxt->syscache_callback_count; i++) { + struct SYSCACHECALLBACK* ccitem = inval_cxt->syscache_callback_list + i; + (*ccitem->function)(ccitem->arg, ccitem->id, 0); + } + + for (i = 0; i < inval_cxt->relcache_callback_count; i++) { + struct RELCACHECALLBACK* ccitem = inval_cxt->relcache_callback_list + i; + (*ccitem->function)(ccitem->arg, InvalidOid); + } + + for (i = 0; i < inval_cxt->partcache_callback_count; i++) { + struct PARTCACHECALLBACK* ccitem = inval_cxt->partcache_callback_list + i; + (*ccitem->function)(ccitem->arg, InvalidOid); + } +} + +static void TestCodeToForceCacheFlushes() { - u_sess->pcache_cxt.gpc_remote_msg = true; - ++u_sess->inval_cxt.deepthInAcceptInvalidationMessage; - ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage, InvalidateSystemCaches); - u_sess->pcache_cxt.gpc_remote_msg = false; /* * Test code to force cache flushes anytime a flush could happen. * @@ -665,8 +860,48 @@ void AcceptInvalidationMessages(void) #elif defined(CLOBBER_CACHE_RECURSIVELY) InvalidateSystemCaches(); #endif +} - --u_sess->inval_cxt.deepthInAcceptInvalidationMessage; +/* + * AcceptInvalidationMessages + * Read and process invalidation messages from the shared invalidation + * message queue. + * + * Note: + * This should be called as the first step in processing a transaction. + */ +void AcceptInvalidationMessages() +{ + if (EnableLocalSysCache()) { + u_sess->pcache_cxt.gpc_remote_msg = true; + knl_u_inval_context *inval_cxt = &t_thrd.lsc_cxt.lsc->inval_cxt; + ++inval_cxt->DeepthInAcceptInvalidationMessage; + if (!IS_THREAD_POOL_WORKER) { + ReceiveSharedInvalidMessages(LocalExecuteThreadAndSessionInvalidationMessage, + InvalidateSystemCaches, false); + } else { + ReceiveSharedInvalidMessages(LocalExecuteThreadInvalidationMessage, InvalidateThreadSystemCaches, false); + u_sess->pcache_cxt.gpc_remote_msg = false; + TestCodeToForceCacheFlushes(); + --inval_cxt->DeepthInAcceptInvalidationMessage; + + u_sess->pcache_cxt.gpc_remote_msg = true; + inval_cxt = &u_sess->inval_cxt; + ++inval_cxt->DeepthInAcceptInvalidationMessage; + ReceiveSharedInvalidMessages(LocalExecuteSessionInvalidationMessage, InvalidateSessionSystemCaches, true); + } + u_sess->pcache_cxt.gpc_remote_msg = false; + TestCodeToForceCacheFlushes(); + --inval_cxt->DeepthInAcceptInvalidationMessage; + return; + } + + u_sess->pcache_cxt.gpc_remote_msg = true; + ++u_sess->inval_cxt.DeepthInAcceptInvalidationMessage; + ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage, InvalidateSystemCaches, false); + u_sess->pcache_cxt.gpc_remote_msg = false; + TestCodeToForceCacheFlushes(); + --u_sess->inval_cxt.DeepthInAcceptInvalidationMessage; } /* @@ -675,12 +910,13 @@ void AcceptInvalidationMessages(void) */ void AtStart_Inval(void) { - Assert(u_sess->inval_cxt.transInvalInfo == NULL); - u_sess->inval_cxt.transInvalInfo = + knl_u_inval_context *inval_cxt = GetInvalCxt(); + Assert(inval_cxt->transInvalInfo == NULL); + inval_cxt->transInvalInfo = (TransInvalidationInfo*)MemoryContextAllocZero(u_sess->top_transaction_mem_cxt, sizeof(TransInvalidationInfo)); - u_sess->inval_cxt.transInvalInfo->my_level = GetCurrentTransactionNestLevel(); - u_sess->inval_cxt.SharedInvalidMessagesArray = NULL; - u_sess->inval_cxt.numSharedInvalidMessagesArray = 0; + inval_cxt->transInvalInfo->my_level = GetCurrentTransactionNestLevel(); + inval_cxt->SharedInvalidMessagesArray = NULL; + inval_cxt->numSharedInvalidMessagesArray = 0; } /* @@ -707,53 +943,54 @@ void PostPrepare_Inval(void) void AtSubStart_Inval(void) { TransInvalidationInfo* myInfo = NULL; - - Assert(u_sess->inval_cxt.transInvalInfo != NULL); + knl_u_inval_context *inval_cxt = GetInvalCxt(); + Assert(inval_cxt->transInvalInfo != NULL); myInfo = (TransInvalidationInfo*)MemoryContextAllocZero(u_sess->top_transaction_mem_cxt, sizeof(TransInvalidationInfo)); - myInfo->parent = u_sess->inval_cxt.transInvalInfo; + myInfo->parent = inval_cxt->transInvalInfo; myInfo->my_level = GetCurrentTransactionNestLevel(); - u_sess->inval_cxt.transInvalInfo = myInfo; + inval_cxt->transInvalInfo = myInfo; } /* - * Collect invalidation messages into u_sess->inval_cxt.SharedInvalidMessagesArray array. + * Collect invalidation messages into GetInvalCxt()->SharedInvalidMessagesArray array. */ static void MakeSharedInvalidMessagesArray(const SharedInvalidationMessage* msgs, int n) { /* * Initialise array first time through in each commit */ - if (u_sess->inval_cxt.SharedInvalidMessagesArray == NULL) { - u_sess->inval_cxt.maxSharedInvalidMessagesArray = FIRSTCHUNKSIZE; - u_sess->inval_cxt.numSharedInvalidMessagesArray = 0; + knl_u_inval_context *inval_cxt = GetInvalCxt(); + if (inval_cxt->SharedInvalidMessagesArray == NULL) { + inval_cxt->maxSharedInvalidMessagesArray = FIRSTCHUNKSIZE; + inval_cxt->numSharedInvalidMessagesArray = 0; /* * Although this is being palloc'd we don't actually free it directly. * We're so close to EOXact that we now we're going to lose it anyhow. */ - u_sess->inval_cxt.SharedInvalidMessagesArray = (SharedInvalidationMessage*)palloc( - u_sess->inval_cxt.maxSharedInvalidMessagesArray * sizeof(SharedInvalidationMessage)); + inval_cxt->SharedInvalidMessagesArray = (SharedInvalidationMessage*)palloc( + inval_cxt->maxSharedInvalidMessagesArray * sizeof(SharedInvalidationMessage)); } - if ((u_sess->inval_cxt.numSharedInvalidMessagesArray + n) > u_sess->inval_cxt.maxSharedInvalidMessagesArray) { - while ((u_sess->inval_cxt.numSharedInvalidMessagesArray + n) > u_sess->inval_cxt.maxSharedInvalidMessagesArray) - u_sess->inval_cxt.maxSharedInvalidMessagesArray *= 2; + if ((inval_cxt->numSharedInvalidMessagesArray + n) > inval_cxt->maxSharedInvalidMessagesArray) { + while ((inval_cxt->numSharedInvalidMessagesArray + n) > inval_cxt->maxSharedInvalidMessagesArray) + inval_cxt->maxSharedInvalidMessagesArray *= 2; - u_sess->inval_cxt.SharedInvalidMessagesArray = - (SharedInvalidationMessage*)repalloc(u_sess->inval_cxt.SharedInvalidMessagesArray, - u_sess->inval_cxt.maxSharedInvalidMessagesArray * sizeof(SharedInvalidationMessage)); + inval_cxt->SharedInvalidMessagesArray = + (SharedInvalidationMessage*)repalloc(inval_cxt->SharedInvalidMessagesArray, + inval_cxt->maxSharedInvalidMessagesArray * sizeof(SharedInvalidationMessage)); } /* * Append the next chunk onto the array */ - int rc = memcpy_s(u_sess->inval_cxt.SharedInvalidMessagesArray + u_sess->inval_cxt.numSharedInvalidMessagesArray, + int rc = memcpy_s(inval_cxt->SharedInvalidMessagesArray + inval_cxt->numSharedInvalidMessagesArray, n * sizeof(SharedInvalidationMessage), msgs, n * sizeof(SharedInvalidationMessage)); securec_check(rc, "\0", "\0"); - u_sess->inval_cxt.numSharedInvalidMessagesArray += n; + inval_cxt->numSharedInvalidMessagesArray += n; } /* @@ -772,16 +1009,16 @@ static void MakeSharedInvalidMessagesArray(const SharedInvalidationMessage* msgs int xactGetCommittedInvalidationMessages(SharedInvalidationMessage** msgs, bool* RelcacheInitFileInval) { MemoryContext oldcontext; - + knl_u_inval_context *inval_cxt = GetInvalCxt(); /* Must be at top of stack */ - Assert(u_sess->inval_cxt.transInvalInfo != NULL && u_sess->inval_cxt.transInvalInfo->parent == NULL); + Assert(inval_cxt->transInvalInfo != NULL && inval_cxt->transInvalInfo->parent == NULL); /* * Relcache init file invalidation requires processing both before and * after we send the SI messages. However, we need not do anything unless * we committed. */ - *RelcacheInitFileInval = u_sess->inval_cxt.transInvalInfo->RelcacheInitFileInval; + *RelcacheInitFileInval = inval_cxt->transInvalInfo->RelcacheInitFileInval; /* * Walk through TransInvalidationInfo to collect all the messages into a @@ -794,17 +1031,17 @@ int xactGetCommittedInvalidationMessages(SharedInvalidationMessage** msgs, bool* oldcontext = MemoryContextSwitchTo(t_thrd.mem_cxt.cur_transaction_mem_cxt); ProcessInvalidationMessagesMulti( - &u_sess->inval_cxt.transInvalInfo->CurrentCmdInvalidMsgs, MakeSharedInvalidMessagesArray); + &inval_cxt->transInvalInfo->CurrentCmdInvalidMsgs, MakeSharedInvalidMessagesArray); ProcessInvalidationMessagesMulti( - &u_sess->inval_cxt.transInvalInfo->PriorCmdInvalidMsgs, MakeSharedInvalidMessagesArray); + &inval_cxt->transInvalInfo->PriorCmdInvalidMsgs, MakeSharedInvalidMessagesArray); MemoryContextSwitchTo(oldcontext); Assert( - !(u_sess->inval_cxt.numSharedInvalidMessagesArray > 0 && u_sess->inval_cxt.SharedInvalidMessagesArray == NULL)); + !(inval_cxt->numSharedInvalidMessagesArray > 0 && inval_cxt->SharedInvalidMessagesArray == NULL)); - *msgs = u_sess->inval_cxt.SharedInvalidMessagesArray; + *msgs = inval_cxt->SharedInvalidMessagesArray; - return u_sess->inval_cxt.numSharedInvalidMessagesArray; + return inval_cxt->numSharedInvalidMessagesArray; } /* @@ -874,37 +1111,38 @@ void ProcessCommittedInvalidationMessages( */ void AtEOXact_Inval(bool isCommit) { + knl_u_inval_context *inval_cxt = GetInvalCxt(); if (isCommit) { /* Must be at top of stack */ - Assert(u_sess->inval_cxt.transInvalInfo != NULL && u_sess->inval_cxt.transInvalInfo->parent == NULL); + Assert(inval_cxt->transInvalInfo != NULL && inval_cxt->transInvalInfo->parent == NULL); /* * Relcache init file invalidation requires processing both before and * after we send the SI messages. However, we need not do anything * unless we committed. */ - if (u_sess->inval_cxt.transInvalInfo->RelcacheInitFileInval) { + if (inval_cxt->transInvalInfo->RelcacheInitFileInval) { RelationCacheInitFilePreInvalidate(); } - AppendInvalidationMessages(&u_sess->inval_cxt.transInvalInfo->PriorCmdInvalidMsgs, - &u_sess->inval_cxt.transInvalInfo->CurrentCmdInvalidMsgs); + + AppendInvalidationMessages(&inval_cxt->transInvalInfo->PriorCmdInvalidMsgs, + &inval_cxt->transInvalInfo->CurrentCmdInvalidMsgs); ProcessInvalidationMessagesMulti( - &u_sess->inval_cxt.transInvalInfo->PriorCmdInvalidMsgs, SendSharedInvalidMessages); + &inval_cxt->transInvalInfo->PriorCmdInvalidMsgs, SendSharedInvalidMessages); - if (u_sess->inval_cxt.transInvalInfo->RelcacheInitFileInval) { + if (inval_cxt->transInvalInfo->RelcacheInitFileInval) { RelationCacheInitFilePostInvalidate(); } - } else if (u_sess->inval_cxt.transInvalInfo != NULL) { + } else if (inval_cxt->transInvalInfo != NULL) { /* Must be at top of stack */ - Assert(u_sess->inval_cxt.transInvalInfo->parent == NULL); - + Assert(inval_cxt->transInvalInfo->parent == NULL); ProcessInvalidationMessages( - &u_sess->inval_cxt.transInvalInfo->PriorCmdInvalidMsgs, LocalExecuteInvalidationMessage); + &inval_cxt->transInvalInfo->PriorCmdInvalidMsgs, LocalExecuteThreadAndSessionInvalidationMessage); } /* Need not free anything explicitly */ - u_sess->inval_cxt.transInvalInfo = NULL; + inval_cxt->transInvalInfo = NULL; } /* @@ -928,7 +1166,8 @@ void AtEOXact_Inval(bool isCommit) void AtEOSubXact_Inval(bool isCommit) { int my_level = GetCurrentTransactionNestLevel(); - TransInvalidationInfo* myInfo = u_sess->inval_cxt.transInvalInfo; + knl_u_inval_context *inval_cxt = GetInvalCxt(); + TransInvalidationInfo* myInfo = inval_cxt->transInvalInfo; if (isCommit) { /* Must be at non-top of stack */ @@ -947,18 +1186,17 @@ void AtEOSubXact_Inval(bool isCommit) } /* Pop the transaction state stack */ - u_sess->inval_cxt.transInvalInfo = myInfo->parent; + inval_cxt->transInvalInfo = myInfo->parent; /* Need not free anything else explicitly */ pfree_ext(myInfo); } else if (myInfo != NULL && myInfo->my_level == my_level) { /* Must be at non-top of stack */ Assert(myInfo->parent != NULL); - - ProcessInvalidationMessages(&myInfo->PriorCmdInvalidMsgs, LocalExecuteInvalidationMessage); + ProcessInvalidationMessages(&myInfo->PriorCmdInvalidMsgs, LocalExecuteThreadAndSessionInvalidationMessage); /* Pop the transaction state stack */ - u_sess->inval_cxt.transInvalInfo = myInfo->parent; + inval_cxt->transInvalInfo = myInfo->parent; /* Need not free anything else explicitly */ pfree_ext(myInfo); @@ -982,19 +1220,21 @@ void AtEOSubXact_Inval(bool isCommit) */ void CommandEndInvalidationMessages(void) { + knl_u_inval_context *inval_cxt = GetInvalCxt(); /* * You might think this shouldn't be called outside any transaction, but * bootstrap does it, and also ABORT issued when not in a transaction. So * just quietly return if no state to work on. */ - if (u_sess->inval_cxt.transInvalInfo == NULL) { + if (inval_cxt->transInvalInfo == NULL) { return; } - + ProcessInvalidationMessagesMulti( + &inval_cxt->transInvalInfo->CurrentCmdInvalidMsgs, GlobalExecuteSharedInvalidMessages); ProcessInvalidationMessages( - &u_sess->inval_cxt.transInvalInfo->CurrentCmdInvalidMsgs, LocalExecuteInvalidationMessage); - AppendInvalidationMessages(&u_sess->inval_cxt.transInvalInfo->PriorCmdInvalidMsgs, - &u_sess->inval_cxt.transInvalInfo->CurrentCmdInvalidMsgs); + &inval_cxt->transInvalInfo->CurrentCmdInvalidMsgs, LocalExecuteThreadAndSessionInvalidationMessage); + AppendInvalidationMessages(&inval_cxt->transInvalInfo->PriorCmdInvalidMsgs, + &inval_cxt->transInvalInfo->CurrentCmdInvalidMsgs); } /* @@ -1111,7 +1351,7 @@ void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newt void CacheInvalidateFunction(Oid funcId, Oid pkgId) { - AddFunctionCacheInvalidationMessage(&u_sess->inval_cxt.transInvalInfo->CurrentCmdInvalidMsgs, + AddFunctionCacheInvalidationMessage(&GetInvalCxt()->transInvalInfo->CurrentCmdInvalidMsgs, u_sess->proc_cxt.MyDatabaseId, funcId, pkgId); } @@ -1129,7 +1369,6 @@ void CacheInvalidateFunction(Oid funcId, Oid pkgId) void CacheInvalidateCatalog(Oid catalogId) { Oid databaseId; - if (IsSharedRelation(catalogId)) { databaseId = InvalidOid; } else { @@ -1340,7 +1579,7 @@ void CacheInvalidateHeapTupleInplace(Relation relation, HeapTuple tuple) } /* - * CacheRegisterSyscacheCallback + * CacheRegisterThreadSyscacheCallback * Register the specified function to be called for all future * invalidation events in the specified cache. The cache ID and the * hash value of the tuple being invalidated will be passed to the @@ -1352,21 +1591,62 @@ void CacheInvalidateHeapTupleInplace(Relation relation, HeapTuple tuple) * worth troubling over, especially since most of the current callees just * flush all cached state anyway. */ -void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg) +void CacheRegisterThreadSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg) { - if (u_sess->inval_cxt.syscache_callback_count >= MAX_SYSCACHE_CALLBACKS) { + if (!EnableLocalSysCache()) { + CacheRegisterSessionSyscacheCallback(cacheid, func, arg); + return; + } + knl_u_inval_context *inval_cxt = &t_thrd.lsc_cxt.lsc->inval_cxt; + for (int i = 0; i < inval_cxt->syscache_callback_count; i++) { + if (inval_cxt->syscache_callback_list[i].id == cacheid && + inval_cxt->syscache_callback_list[i].function == func) { + Assert(IS_THREAD_POOL_STREAM); + if (inval_cxt->syscache_callback_list[i].arg != arg) { + inval_cxt->syscache_callback_list[i].arg = arg; + } + return; + } + } + if (inval_cxt->syscache_callback_count >= MAX_SYSCACHE_CALLBACKS) { + ereport(FATAL, (errmsg("out of syscache_callback_list slots"))); + } + Assert(func != NULL); + inval_cxt->syscache_callback_list[inval_cxt->syscache_callback_count].id = cacheid; + inval_cxt->syscache_callback_list[inval_cxt->syscache_callback_count].function = func; + inval_cxt->syscache_callback_list[inval_cxt->syscache_callback_count].arg = arg; + + ++inval_cxt->syscache_callback_count; +} + +/* + * CacheRegisterSessionSyscacheCallback + * make sure the cache the func flush is in u_sess, or you should use CacheRegisterThreadSyscacheCallback + */ +void CacheRegisterSessionSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg) +{ + knl_u_inval_context *inval_cxt = &u_sess->inval_cxt; + for (int i = 0; i < inval_cxt->syscache_callback_count; i++) { + if (inval_cxt->syscache_callback_list[i].id == cacheid && + inval_cxt->syscache_callback_list[i].function == func) { + Assert(inval_cxt->syscache_callback_list[i].arg == arg); + inval_cxt->syscache_callback_list[i].arg = arg; + return; + } + } + if (inval_cxt->syscache_callback_count >= MAX_SYSCACHE_CALLBACKS) { ereport(FATAL, (errmsg("out of syscache_callback_list slots"))); } - u_sess->inval_cxt.syscache_callback_list[u_sess->inval_cxt.syscache_callback_count].id = cacheid; - u_sess->inval_cxt.syscache_callback_list[u_sess->inval_cxt.syscache_callback_count].function = func; - u_sess->inval_cxt.syscache_callback_list[u_sess->inval_cxt.syscache_callback_count].arg = arg; + inval_cxt->syscache_callback_list[inval_cxt->syscache_callback_count].id = cacheid; + inval_cxt->syscache_callback_list[inval_cxt->syscache_callback_count].function = func; + inval_cxt->syscache_callback_list[inval_cxt->syscache_callback_count].arg = arg; - ++u_sess->inval_cxt.syscache_callback_count; + ++inval_cxt->syscache_callback_count; } /* - * CacheRegisterRelcacheCallback + * CacheRegisterThreadRelcacheCallback * Register the specified function to be called for all future * relcache invalidation events. The OID of the relation being * invalidated will be passed to the function. @@ -1374,19 +1654,58 @@ void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, D * NOTE: InvalidOid will be passed if a cache reset request is received. * In this case the called routines should flush all cached state. */ -void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg) +void CacheRegisterThreadRelcacheCallback(RelcacheCallbackFunction func, Datum arg) { - if (u_sess->inval_cxt.relcache_callback_count >= MAX_RELCACHE_CALLBACKS) { + if (!EnableLocalSysCache()) { + CacheRegisterSessionRelcacheCallback(func, arg); + return; + } + knl_u_inval_context *inval_cxt = &t_thrd.lsc_cxt.lsc->inval_cxt; + for (int i = 0; i < inval_cxt->relcache_callback_count; i++) { + if (inval_cxt->relcache_callback_list[i].function == func) { + Assert(IS_THREAD_POOL_STREAM); + Assert(inval_cxt->relcache_callback_list[i].arg == arg); + if (inval_cxt->relcache_callback_list[i].arg != arg) { + inval_cxt->relcache_callback_list[i].arg = arg; + } + return; + } + } + if (inval_cxt->relcache_callback_count >= MAX_RELCACHE_CALLBACKS) { ereport(FATAL, (errmsg("out of relcache_callback_list slots"))); } + Assert(func != NULL); + inval_cxt->relcache_callback_list[inval_cxt->relcache_callback_count].function = func; + inval_cxt->relcache_callback_list[inval_cxt->relcache_callback_count].arg = arg; - u_sess->inval_cxt.relcache_callback_list[u_sess->inval_cxt.relcache_callback_count].function = func; - u_sess->inval_cxt.relcache_callback_list[u_sess->inval_cxt.relcache_callback_count].arg = arg; + ++inval_cxt->relcache_callback_count; +} - ++u_sess->inval_cxt.relcache_callback_count; +/* + * CacheRegisterSessionRelcacheCallback + * make sure the cache the func flush is in u_sess, or you should use CacheRegisterThreadRelcacheCallback + */ +void CacheRegisterSessionRelcacheCallback(RelcacheCallbackFunction func, Datum arg) +{ + knl_u_inval_context *inval_cxt = &u_sess->inval_cxt; + for (int i = 0; i < inval_cxt->relcache_callback_count; i++) { + if (inval_cxt->relcache_callback_list[i].function == func) { + if (inval_cxt->relcache_callback_list[i].arg != arg) { + inval_cxt->relcache_callback_list[i].arg = arg; + } + return; + } + } + if (inval_cxt->relcache_callback_count >= MAX_RELCACHE_CALLBACKS) { + ereport(FATAL, (errmsg("out of relcache_callback_list slots"))); + } + inval_cxt->relcache_callback_list[inval_cxt->relcache_callback_count].function = func; + inval_cxt->relcache_callback_list[inval_cxt->relcache_callback_count].arg = arg; + + ++inval_cxt->relcache_callback_count; } /* - * CacheRegisterRelcacheCallback + * CacheRegisterThreadPartcacheCallback * Register the specified function to be called for all future * relcache invalidation events. The OID of the relation being * invalidated will be passed to the function. @@ -1394,30 +1713,86 @@ void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg) * NOTE: InvalidOid will be passed if a cache reset request is received. * In this case the called routines should flush all cached state. */ -void CacheRegisterPartcacheCallback(PartcacheCallbackFunction func, Datum arg) +void CacheRegisterThreadPartcacheCallback(PartcacheCallbackFunction func, Datum arg) { - if (u_sess->inval_cxt.partcache_callback_count >= MAX_PARTCACHE_CALLBACKS) { + if (!EnableLocalSysCache()) { + CacheRegisterSessionPartcacheCallback(func, arg); + return; + } + knl_u_inval_context *inval_cxt = &t_thrd.lsc_cxt.lsc->inval_cxt; + for (int i = 0; i < inval_cxt->partcache_callback_count; i++) { + if (inval_cxt->partcache_callback_list[i].function == func) { + Assert(IS_THREAD_POOL_STREAM); + Assert(inval_cxt->partcache_callback_list[i].arg == arg); + if (inval_cxt->partcache_callback_list[i].arg != arg) { + inval_cxt->partcache_callback_list[i].arg = arg; + } + return; + } + } + if (inval_cxt->partcache_callback_count >= MAX_PARTCACHE_CALLBACKS) { ereport(FATAL, (errmsg("out of partcache_callback_list slots"))); } + Assert(func != NULL); + inval_cxt->partcache_callback_list[inval_cxt->partcache_callback_count].function = func; + inval_cxt->partcache_callback_list[inval_cxt->partcache_callback_count].arg = arg; - u_sess->inval_cxt.partcache_callback_list[u_sess->inval_cxt.partcache_callback_count].function = func; - u_sess->inval_cxt.partcache_callback_list[u_sess->inval_cxt.partcache_callback_count].arg = arg; - - ++u_sess->inval_cxt.partcache_callback_count; + ++inval_cxt->partcache_callback_count; } /* - * CallSyscacheCallbacks + * CacheRegisterSessionPartcacheCallback + * make sure the cache the func flush is in u_sess, or you should use CacheRegisterThreadPartcacheCallback + */ +void CacheRegisterSessionPartcacheCallback(PartcacheCallbackFunction func, Datum arg) +{ + knl_u_inval_context *inval_cxt = &u_sess->inval_cxt; + for (int i = 0; i < inval_cxt->partcache_callback_count; i++) { + if (inval_cxt->partcache_callback_list[i].function == func) { + if (inval_cxt->partcache_callback_list[i].arg != arg) { + inval_cxt->partcache_callback_list[i].arg = arg; + } + return; + } + } + if (inval_cxt->partcache_callback_count >= MAX_PARTCACHE_CALLBACKS) { + ereport(FATAL, (errmsg("out of partcache_callback_list slots"))); + } + inval_cxt->partcache_callback_list[inval_cxt->partcache_callback_count].function = func; + inval_cxt->partcache_callback_list[inval_cxt->partcache_callback_count].arg = arg; + + ++inval_cxt->partcache_callback_count; +} + +/* + * CallThreadSyscacheCallbacks * * This is exported so that CatalogCacheFlushCatalog can call it, saving * this module from knowing which catcache IDs correspond to which catalogs. */ -void CallSyscacheCallbacks(int cacheid, uint32 hashvalue) +void CallThreadSyscacheCallbacks(int cacheid, uint32 hashvalue) +{ + Assert(EnableGlobalSysCache()); + int i; + knl_u_inval_context *inval_cxt = &t_thrd.lsc_cxt.lsc->inval_cxt; + for (i = 0; i < inval_cxt->syscache_callback_count; i++) { + struct SYSCACHECALLBACK* ccitem = inval_cxt->syscache_callback_list + i; + if (ccitem->id == cacheid) { + (*ccitem->function)(ccitem->arg, cacheid, hashvalue); + } + } +} + +/* + * CallSessionSyscacheCallbacks + * make sure the cache the func flush is in u_sess, or you should use CallThreadSyscacheCallbacks + */ +void CallSessionSyscacheCallbacks(int cacheid, uint32 hashvalue) { int i; - - for (i = 0; i < u_sess->inval_cxt.syscache_callback_count; i++) { - struct SYSCACHECALLBACK* ccitem = u_sess->inval_cxt.syscache_callback_list + i; + knl_u_inval_context *inval_cxt = &u_sess->inval_cxt; + for (i = 0; i < inval_cxt->syscache_callback_count; i++) { + struct SYSCACHECALLBACK* ccitem = inval_cxt->syscache_callback_list + i; if (ccitem->id == cacheid) { (*ccitem->function)(ccitem->arg, cacheid, hashvalue); } @@ -1431,23 +1806,25 @@ void CallSyscacheCallbacks(int cacheid, uint32 hashvalue) static void PrepareInvalidationState(void) { TransInvalidationInfo *myInfo; + knl_u_inval_context *inval_cxt = GetInvalCxt(); - if (u_sess->inval_cxt.transInvalInfo != NULL && - u_sess->inval_cxt.transInvalInfo->my_level == GetCurrentTransactionNestLevel()) + if (inval_cxt->transInvalInfo != NULL && + inval_cxt->transInvalInfo->my_level == GetCurrentTransactionNestLevel()) { return; + } myInfo = (TransInvalidationInfo *)MemoryContextAllocZero(u_sess->top_transaction_mem_cxt, sizeof(TransInvalidationInfo)); - myInfo->parent = u_sess->inval_cxt.transInvalInfo; + myInfo->parent = inval_cxt->transInvalInfo; myInfo->my_level = GetCurrentTransactionNestLevel(); /* * If there's any previous entry, this one should be for a deeper nesting * level. */ - Assert(u_sess->inval_cxt.transInvalInfo == NULL || myInfo->my_level > u_sess->inval_cxt.transInvalInfo->my_level); + Assert(inval_cxt->transInvalInfo == NULL || myInfo->my_level > inval_cxt->transInvalInfo->my_level); - u_sess->inval_cxt.transInvalInfo = myInfo; + inval_cxt->transInvalInfo = myInfo; } /* diff --git a/src/common/backend/utils/cache/knl_globalbasedefcache.cpp b/src/common/backend/utils/cache/knl_globalbasedefcache.cpp new file mode 100644 index 000000000..a275640f1 --- /dev/null +++ b/src/common/backend/utils/cache/knl_globalbasedefcache.cpp @@ -0,0 +1,403 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ + +#include "access/transam.h" +#include "executor/executor.h" +#include "miscadmin.h" +#include "postgres.h" +#include "utils/knl_catcache.h" +#include "utils/knl_globalbasedefcache.h" +#include "utils/knl_globaldbstatmanager.h" +#include "utils/knl_partcache.h" +#include "utils/memutils.h" + +static uint64 GetBitmapSetSize(int nwords) +{ + return (offsetof(Bitmapset, words) + (nwords) * sizeof(bitmapword)); +} + +static uint64 GetRelEstimateSize(GlobalRelationEntry *entry) +{ + uint64 rel_size = + ((AllocSet)entry->rel_mem_manager)->totalSpace + /* memcxt total space */ + sizeof(GlobalRelationEntry) + CHUNK_ALGIN_PAD + /* palloc GlobalRelationEntry with chunk head */ + sizeof(AllocSetContext) + CHUNK_ALGIN_PAD; /* create MemoryContext with chunk head */ + return rel_size; +} + +static uint64 GetPartEstimateSize(GlobalPartitionEntry *entry) +{ + /* every palloc attached with a chunk head */ + uint64 part_size = + sizeof(GlobalPartitionEntry) + CHUNK_ALGIN_PAD + /* palloc GlobalPartitionEntry with chunk head */ + sizeof(PartitionData) + CHUNK_ALGIN_PAD + /* palloc PartitionData with chunk head */ + PARTITION_TUPLE_SIZE + CHUNK_ALGIN_PAD + /* palloc pd_part with chunk head */ + (entry->part->rd_options == NULL ? 0 : (VARSIZE(entry->part->rd_options) + CHUNK_ALGIN_PAD)) + + /* palloc rd_options with chunk head */ + (entry->part->pd_indexattr == NULL ? 0 : /* palloc pd_indexattr with chunk head */ + (GetBitmapSetSize(entry->part->pd_indexattr->nwords) + CHUNK_ALGIN_PAD)) + + (entry->part->pd_indexlist == NULL ? 0 : /* palloc pd_indexlist with chunk head, and its elements */ + sizeof(List) + CHUNK_ALGIN_PAD + + (sizeof(ListCell) + CHUNK_ALGIN_PAD) * entry->part->pd_indexlist->length); + return part_size; +} + +template +void GlobalBaseDefCache::RemoveElemFromBucket(GlobalBaseEntry *base) +{ + if (is_relation) { + GlobalRelationEntry *entry = (GlobalRelationEntry *)base; + uint64 rel_size = GetRelEstimateSize(entry); + pg_atomic_fetch_sub_u64(&m_base_space, rel_size); + m_db_entry->MemoryEstimateSub(rel_size); + } else { + GlobalPartitionEntry *entry = (GlobalPartitionEntry *)base; + uint64 part_size = GetPartEstimateSize(entry); + pg_atomic_fetch_sub_u64(&m_base_space, part_size); + m_db_entry->MemoryEstimateSub(part_size); + } + m_bucket_list.RemoveElemFromBucket(&base->cache_elem); +} +template +void GlobalBaseDefCache::AddHeadToBucket(Index hash_index, GlobalBaseEntry *base) +{ + if (is_relation) { + GlobalRelationEntry *entry = (GlobalRelationEntry *)base; + uint64 rel_size = GetRelEstimateSize(entry); + pg_atomic_fetch_add_u64(&m_base_space, rel_size); + m_db_entry->MemoryEstimateAdd(rel_size); + } else { + GlobalPartitionEntry *entry = (GlobalPartitionEntry *)base; + uint64 part_size = GetPartEstimateSize(entry); + pg_atomic_fetch_add_u64(&m_base_space, part_size); + m_db_entry->MemoryEstimateAdd(part_size); + } + m_bucket_list.AddHeadToBucket(hash_index, &base->cache_elem); +} + +template void GlobalBaseDefCache::AddHeadToBucket(Index hash_index, GlobalBaseEntry *base); +template void GlobalBaseDefCache::AddHeadToBucket(Index hash_index, GlobalBaseEntry *base); +template void GlobalBaseDefCache::RemoveElemFromBucket(GlobalBaseEntry *base); +template void GlobalBaseDefCache::RemoveElemFromBucket(GlobalBaseEntry *base); + + +template +void GlobalBaseEntry::Free(GlobalBaseEntry *entry) +{ + Assert(entry->refcount == 0); + if (is_relation) { + Assert(entry->type == GLOBAL_RELATION_ENTRY); + if (((GlobalRelationEntry *)entry)->rel_mem_manager != NULL) { + MemoryContextDelete(((GlobalRelationEntry *)entry)->rel_mem_manager); + } + } else { + Assert(entry->type == GLOBAL_PARTITION_ENTRY); + if (((GlobalPartitionEntry *)entry)->part != NULL) { + PartitionDestroyPartition(((GlobalPartitionEntry *)entry)->part); + } + } + pfree(entry); +} +template void GlobalBaseEntry::Free(GlobalBaseEntry *entry); +template void GlobalBaseEntry::Free(GlobalBaseEntry *entry); + +void GlobalBaseEntry::Release() +{ + /* we dont free entry here, free when search */ + Assert(this->refcount > 0); + (void)pg_atomic_fetch_sub_u64(&this->refcount, 1); +} + +void GlobalBaseDefCache::InitHashTable() +{ + MemoryContext old = MemoryContextSwitchTo(m_db_entry->GetRandomMemCxt()); + m_bucket_list.Init(m_nbuckets); + m_obj_locks = (pthread_rwlock_t *)palloc0(sizeof(pthread_rwlock_t) * m_nbuckets); + for (int i = 0; i < m_nbuckets; i++) { + PthreadRwLockInit(&m_obj_locks[i], NULL); + } + + m_is_swappingouts = (volatile uint32 *)palloc0(sizeof(volatile uint32) * m_nbuckets); + + /* acquire more oid locks for concurrent ddl */ + if (!m_is_shared) { + m_oid_locks = (pthread_rwlock_t *)palloc0(sizeof(pthread_rwlock_t) * m_nbuckets); + for (int i = 0; i < m_nbuckets; i++) { + PthreadRwLockInit(&m_oid_locks[i], NULL); + } + } else { + m_oid_locks = NULL; + } + (void)MemoryContextSwitchTo(old); +} + +void GlobalBaseDefCache::Init(int nbucket) +{ + Assert(!m_is_inited); + m_nbuckets = ResizeHashBucket(nbucket, g_instance.global_sysdbcache.dynamic_hash_bucket_strategy); + InitHashTable(); +} + +GlobalBaseEntry *GlobalBaseDefCache::SearchReadOnly(Oid obj_oid, uint32 hash_value) +{ + pg_atomic_fetch_add_u64(m_searches, 1); + Index hash_index = HASH_INDEX(hash_value, (uint32)m_nbuckets); + pthread_rwlock_t *obj_lock = &m_obj_locks[hash_index]; + + int location = INVALID_LOCATION; + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, obj_lock); + GlobalBaseEntry *entry = FindEntryWithIndex(obj_oid, hash_index, &location); + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, obj_lock); + if (entry == NULL) { + return NULL; + } + pg_atomic_fetch_add_u64(m_hits, 1); + TopnLruMoveToFront(&entry->cache_elem, m_bucket_list.GetBucket(hash_index), obj_lock, location); + return entry; +} + +template +void GlobalBaseDefCache::FreeDeadEntrys() +{ + while (m_dead_entries.GetLength() > 0) { + Dlelem *elt = m_dead_entries.RemoveHead(); + if (elt == NULL) { + break; + } + GlobalBaseEntry *entry = (GlobalBaseEntry *)DLE_VAL(elt); + if (entry->refcount != 0) { + /* we move the active entry to tail of list and let next call free it */ + m_dead_entries.AddTail(&entry->cache_elem); + break; + } else { + entry->Free(entry); + } + } +} +template void GlobalBaseDefCache::FreeDeadEntrys(); +template void GlobalBaseDefCache::FreeDeadEntrys(); + +template +void GlobalBaseDefCache::Invalidate(Oid dbid, Oid obj_oid) +{ + uint32 hash_value = oid_hash((void *)&(obj_oid), sizeof(Oid)); + pthread_rwlock_t *oid_lock = NULL; + bool need_oid_lock = !is_relation || !IsSystemObjOid(obj_oid); + if (need_oid_lock) { + oid_lock = GetHashValueLock(hash_value); + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, oid_lock); + } + + Index hash_index = HASH_INDEX(hash_value, (uint32)m_nbuckets); + pthread_rwlock_t *obj_lock = &m_obj_locks[hash_index]; + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, obj_lock); + for (Dlelem *elt = DLGetHead(m_bucket_list.GetBucket(hash_index)); elt != NULL;) { + GlobalBaseEntry *entry = (GlobalBaseEntry *)DLE_VAL(elt); + elt = DLGetSucc(elt); + if (entry->oid != obj_oid) { + continue; + } + HandleDeadEntry(entry); + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, obj_lock); + if (need_oid_lock) { + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, oid_lock); + } +} +template void GlobalBaseDefCache::Invalidate(Oid dbid, Oid obj_oid); +template void GlobalBaseDefCache::Invalidate(Oid dbid, Oid obj_oid); + +template +void GlobalBaseDefCache::InvalidateRelationNodeListBy(bool (*IsInvalidEntry)(GlobalBaseEntry *)) +{ + for (int hash_index = 0; hash_index < m_nbuckets; hash_index++) { + pthread_rwlock_t *obj_lock = &m_obj_locks[hash_index]; + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, obj_lock); + for (Dlelem *elt = DLGetHead(m_bucket_list.GetBucket(hash_index)); elt != NULL;) { + GlobalBaseEntry *entry = (GlobalBaseEntry *)DLE_VAL(elt); + elt = DLGetSucc(elt); + if (IsInvalidEntry(entry)) { + HandleDeadEntry(entry); + } + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, obj_lock); + } +} +template void GlobalBaseDefCache::InvalidateRelationNodeListBy(bool (*IsInvalidEntry)(GlobalBaseEntry *)); + +template +void GlobalBaseDefCache::ResetCaches() +{ + for (int hash_index = 0; hash_index < m_nbuckets; hash_index++) { + /* if not force, we are swappingout, oid lock is not needed */ + if (force && !m_is_shared) { + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, &m_oid_locks[hash_index]); + } + + pthread_rwlock_t *obj_lock = &m_obj_locks[hash_index]; + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, obj_lock); + for (Dlelem *elt = DLGetHead(m_bucket_list.GetBucket(hash_index)); elt;) { + GlobalBaseEntry *entry = (GlobalBaseEntry *)DLE_VAL(elt); + elt = DLGetSucc(elt); + if (force || entry->refcount == 0) { + HandleDeadEntry(entry); + } + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, obj_lock); + + if (force && !m_is_shared) { + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_oid_locks[hash_index]); + } + } +} +template void GlobalBaseDefCache::ResetCaches(); +template void GlobalBaseDefCache::ResetCaches(); +template void GlobalBaseDefCache::ResetCaches(); +template void GlobalBaseDefCache::ResetCaches(); + + +template +void GlobalBaseDefCache::HandleDeadEntry(GlobalBaseEntry *entry) +{ + RemoveElemFromBucket(entry); + if (entry->refcount == 0) { + m_dead_entries.AddHead(&entry->cache_elem); + } else { + m_dead_entries.AddTail(&entry->cache_elem); + } +} +template void GlobalBaseDefCache::HandleDeadEntry(GlobalBaseEntry *entry); +template void GlobalBaseDefCache::HandleDeadEntry(GlobalBaseEntry *entry); + +GlobalBaseEntry *GlobalBaseDefCache::FindEntryWithIndex(Oid obj_oid, Index hash_index, int *location) +{ + int index = 0; + for (Dlelem *elt = DLGetHead(m_bucket_list.GetBucket(hash_index)); elt != NULL; elt = DLGetSucc(elt)) { + index++; + GlobalBaseEntry *entry = (GlobalBaseEntry *)DLE_VAL(elt); + if (entry->oid != obj_oid) { + continue; + } + pg_atomic_fetch_add_u64(&entry->refcount, 1); + *location = index; + return entry; + } + return NULL; +} + +bool GlobalBaseDefCache::EntryExist(Oid obj_oid, Index hash_index) +{ + for (Dlelem *elt = DLGetHead(m_bucket_list.GetBucket(hash_index)); elt != NULL; elt = DLGetSucc(elt)) { + GlobalBaseEntry *entry = (GlobalBaseEntry *)DLE_VAL(elt); + if (entry->oid != obj_oid) { + continue; + } + return true; + } + return false; +} + +template +void GlobalBaseDefCache::RemoveTailElements(Index hash_index) +{ + /* shared db never do lru on tabdef */ + if (m_is_shared) { + return; + } + + /* only one thread can do swapout for the bucket */ + ResourceOwnerEnlargeGlobalIsExclusive(LOCAL_SYSDB_RESOWNER); + if (!atomic_compare_exchange_u32(&m_is_swappingouts[hash_index], 0, 1)) { + return; + } + ResourceOwnerRememberGlobalIsExclusive(LOCAL_SYSDB_RESOWNER, &m_is_swappingouts[hash_index]); + + bool listBelowThreshold = m_bucket_list.GetBucket(hash_index)->dll_len < MAX_GSC_LIST_LENGTH; + + uint64 swapout_count_once = 0; + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, &m_obj_locks[hash_index]); + uint64 max_swapout_count_once = GetSwapOutNum(listBelowThreshold, m_bucket_list.GetBucket(hash_index)->dll_len); + for (Dlelem *elt = DLGetTail(m_bucket_list.GetBucket(hash_index)); elt != NULL;) { + Dlelem *tmp = elt; + elt = DLGetPred(elt); + if (is_relation) { + GlobalRelationEntry *entry = (GlobalRelationEntry *)DLE_VAL(tmp); + if (g_instance.global_sysdbcache.RelationHasSysCache(entry->rel->rd_id) || + unlikely(!RelationHasReferenceCountZero(entry->rel))) { + Assert(entry->type == GLOBAL_RELATION_ENTRY); + DLMoveToFront(&entry->cache_elem); + break; + } + } + HandleDeadEntry((GlobalBaseEntry *)DLE_VAL(tmp)); + swapout_count_once++; + + /* keep elements as many as possible */ + if (swapout_count_once == max_swapout_count_once) { + break; + } + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_obj_locks[hash_index]); + + Assert(m_is_swappingouts[hash_index] == 1); + atomic_compare_exchange_u32(&m_is_swappingouts[hash_index], 1, 0); + ResourceOwnerForgetGlobalIsExclusive(LOCAL_SYSDB_RESOWNER, &m_is_swappingouts[hash_index]); +} +template void GlobalBaseDefCache::RemoveTailElements(Index hash_index); +template void GlobalBaseDefCache::RemoveTailElements(Index hash_index); + +template +void GlobalBaseDefCache::RemoveAllTailElements() +{ + /* shared db never do lru on tabdef */ + if (m_is_shared) { + return; + } + for (int hash_index = 0; hash_index < m_nbuckets; hash_index++) { + RemoveTailElements(hash_index); +#ifndef ENABLE_LITE_MODE + /* memory is under control, so stop swapout */ + if (g_instance.global_sysdbcache.MemoryUnderControl()) { + break; + } +#endif + } +} + +template void GlobalBaseDefCache::RemoveAllTailElements(); +template void GlobalBaseDefCache::RemoveAllTailElements(); + +GlobalBaseDefCache::GlobalBaseDefCache(Oid db_oid, bool is_shared, GlobalSysDBCacheEntry *entry, char relkind) +{ + m_db_oid = db_oid; + m_is_shared = is_shared; + m_is_inited = false; + + m_relkind = relkind; + if (m_relkind == PARTTYPE_PARTITIONED_RELATION) { + m_searches = &entry->m_dbstat->part_searches; + m_hits = &entry->m_dbstat->part_hits; + m_newloads = &entry->m_dbstat->part_newloads; + } else { + Assert(m_relkind == RELKIND_RELATION); + m_searches = &entry->m_dbstat->rel_searches; + m_hits = &entry->m_dbstat->rel_hits; + m_newloads = &entry->m_dbstat->rel_newloads; + } + m_base_space = 0; + m_obj_locks = NULL; + m_db_entry = entry; +} \ No newline at end of file diff --git a/src/common/backend/utils/cache/knl_globalbucketlist.cpp b/src/common/backend/utils/cache/knl_globalbucketlist.cpp new file mode 100644 index 000000000..c6367e552 --- /dev/null +++ b/src/common/backend/utils/cache/knl_globalbucketlist.cpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "utils/atomic.h" +#include "utils/knl_globalbucketlist.h" +#include "utils/knl_globaldbstatmanager.h" + +void GlobalBucketList::Init(int nbuckets) +{ + m_nbuckets = nbuckets; + Assert(m_bucket_entry == NULL); + m_bucket_entry = (BucketEntry *)palloc0(sizeof(BucketEntry) * (uint)m_nbuckets); + DLInitList(&m_active_bucket_list); + SpinLockInit(&m_active_bucket_list_lock); + for (int i = 0; i < nbuckets; i++) { + m_bucket_entry[i].elem.dle_val = &m_bucket_entry[i]; + } + m_elem_count = 0; +} +void GlobalBucketList::AddHeadToBucket(Index hash_index, Dlelem *elem) +{ + Dllist *bucket = &(m_bucket_entry[hash_index].cc_bucket); + /* active bucket */ + if (DLIsNIL(bucket)) { + SpinLockAcquire(&(m_active_bucket_list_lock)); + DLAddHead(&m_active_bucket_list, &m_bucket_entry[hash_index].elem); + SpinLockRelease(&(m_active_bucket_list_lock)); + } + DLAddHead(bucket, elem); + pg_atomic_fetch_add_u64(&m_elem_count, 1); +} + +void GlobalBucketList::RemoveElemFromBucket(Dlelem *elem) +{ + Dllist *bucket = elem->dle_list; + DLRemove(elem); + /* inactive bucket */ + if (DLIsNIL(bucket)) { + BucketEntry *bucket_obj = (BucketEntry *)(bucket); + Assert(&bucket_obj->cc_bucket == bucket); + Assert(DLGetListHdr(&bucket_obj->elem) == &m_active_bucket_list); + SpinLockAcquire(&(m_active_bucket_list_lock)); + DLRemove(&bucket_obj->elem); + SpinLockRelease(&(m_active_bucket_list_lock)); + } + pg_atomic_fetch_sub_u64(&m_elem_count, 1); +} + +/* + * @return INVALID_INDEX means there are no active bucket, no need weedout, otherwise return natural number + */ +Index GlobalBucketList::GetTailBucketIndex() +{ + Index tail_index = INVALID_INDEX; + SpinLockAcquire(&(m_active_bucket_list_lock)); + if (!DLIsNIL(&m_active_bucket_list)) { + Dlelem *elt = DLGetTail(&m_active_bucket_list); + BucketEntry *bucket_entry = (BucketEntry *)DLE_VAL(elt); + tail_index = bucket_entry - m_bucket_entry; + } + SpinLockRelease(&(m_active_bucket_list_lock)); + return tail_index; +} \ No newline at end of file diff --git a/src/common/backend/utils/cache/knl_globaldbstatmanager.cpp b/src/common/backend/utils/cache/knl_globaldbstatmanager.cpp new file mode 100644 index 000000000..437ba36a4 --- /dev/null +++ b/src/common/backend/utils/cache/knl_globaldbstatmanager.cpp @@ -0,0 +1,235 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ + +#include "utils/knl_globalsysdbcache.h" +#include "executor/executor.h" + +void GlobalSysDBCacheEntry::Free(GlobalSysDBCacheEntry *entry) +{ + GSC_CACHE2_elog("GlobalSysDBCacheEntry Free with db oid %u", entry->db_id); + /* in case palloc fail */ + for (uint i = 0; i < entry->m_memcxt_nums; i++) { + if (entry->m_mem_cxt_groups[i] != NULL) { + MemoryContextDelete(entry->m_mem_cxt_groups[i]); + } + } + pfree_ext(entry->m_dbstat); + pfree(entry); +} + +void GlobalSysDBCacheEntry::Release() +{ + g_instance.global_sysdbcache.ReleaseGSCEntry(this); +} + +template +void GlobalSysDBCacheEntry::ResetDBCache() +{ + m_systabCache->ResetCatCaches(); + m_tabdefCache->ResetRelCaches(); + if (m_dbOid != InvalidOid) { + m_partdefCache->ResetPartCaches(); + } +} + +void GlobalSysDBCacheEntry::RemoveTailElements() +{ + m_systabCache->RemoveAllTailElements(); + m_tabdefCache->RemoveAllTailElements(); + if (m_dbOid != InvalidOid) { + m_partdefCache->RemoveAllTailElements(); + } +} + +template void GlobalSysDBCacheEntry::ResetDBCache(); +template void GlobalSysDBCacheEntry::ResetDBCache(); + +void GlobalSysDBCacheEntry::MemoryEstimateAdd(uint64 size) +{ + (void)pg_atomic_fetch_add_u64(&m_rough_used_space, size); + (void)pg_atomic_fetch_add_u64(&g_instance.global_sysdbcache.gsc_rough_used_space, size); +} +void GlobalSysDBCacheEntry::MemoryEstimateSub(uint64 size) +{ + (void)pg_atomic_fetch_sub_u64(&m_rough_used_space, size); + (void)pg_atomic_fetch_sub_u64(&g_instance.global_sysdbcache.gsc_rough_used_space, size); +} + +GlobalDBStatManager::GlobalDBStatManager() +{ + m_nbuckets = 0; + m_dbstat_nbuckets= NULL; + m_dbstat_buckets = NULL; + + m_mydb_refs = NULL; + m_mydb_roles = NULL; + m_max_backend_id = 0; + m_backend_ref_lock = NULL; + + m_dbstat_memcxt = NULL; +} + +void GlobalDBStatManager::InitDBStat(int nbuckets, MemoryContext top) +{ + m_dbstat_memcxt = +#ifdef ENABLE_LITE_MODE + AllocSetContextCreate(top, "GlobalSysDBStatCache", ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, + ALLOCSET_SMALL_MAXSIZE, SHARED_CONTEXT); +#else + AllocSetContextCreate(top, "GlobalSysDBStatCache", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE, SHARED_CONTEXT); +#endif + MemoryContext old = MemoryContextSwitchTo(m_dbstat_memcxt); + /* dbstat use lock as dbentry */ + m_nbuckets = nbuckets; + m_dbstat_nbuckets = (int *)palloc0(nbuckets * sizeof(int)); + m_dbstat_buckets = (List**)palloc0(nbuckets * sizeof(List *)); + + m_backend_ref_lock = (pthread_rwlock_t *)palloc0(sizeof(pthread_rwlock_t)); + PthreadRwLockInit(m_backend_ref_lock, NULL); +#ifdef ENABLE_LITE_MODE + m_max_backend_id = 64; +#else + m_max_backend_id = 1024; +#endif + m_mydb_refs = (GlobalSysDBCacheEntry **)palloc0(sizeof(GlobalSysDBCacheEntry *) * m_max_backend_id); + m_mydb_roles = (int *)palloc0(sizeof(int) * m_max_backend_id); + + (void)MemoryContextSwitchTo(old); +} + +void GlobalDBStatManager::RecordSwapOutDBEntry(GlobalSysDBCacheEntry *entry) +{ + List *cur_dbstat_bucket = m_dbstat_buckets[entry->m_hash_index]; + ListCell *lc; + foreach(lc, cur_dbstat_bucket) { + GlobalSysCacheStat *cur_dbstat = (GlobalSysCacheStat *)lfirst(lc); + if (cur_dbstat->db_oid == entry->m_dbOid) { + cur_dbstat->tup_searches += entry->m_dbstat->tup_searches; + cur_dbstat->tup_hits += entry->m_dbstat->tup_hits; + cur_dbstat->tup_newloads += entry->m_dbstat->tup_newloads; + + cur_dbstat->rel_searches += entry->m_dbstat->rel_searches; + cur_dbstat->rel_hits += entry->m_dbstat->rel_hits; + cur_dbstat->rel_newloads += entry->m_dbstat->rel_newloads; + + cur_dbstat->part_searches += entry->m_dbstat->part_searches; + cur_dbstat->part_hits += entry->m_dbstat->part_hits; + cur_dbstat->part_newloads += entry->m_dbstat->part_newloads; + + cur_dbstat->swapout_count += 1; + + break; + } + } + if (lc != NULL) { + return; + } + MemoryContext old = MemoryContextSwitchTo(m_dbstat_memcxt); + m_dbstat_buckets[entry->m_hash_index] = lappend(m_dbstat_buckets[entry->m_hash_index], entry->m_dbstat); + entry->m_dbstat = NULL; + (void)MemoryContextSwitchTo(old); +} + +void GlobalDBStatManager::DropDB(Oid db_oid, Index hash_index) +{ + GlobalSysCacheStat *cur_dbstat = NULL; + + ListCell *lc = NULL; + foreach(lc, m_dbstat_buckets[hash_index]) { + cur_dbstat = (GlobalSysCacheStat *)lfirst(lc); + if (cur_dbstat->db_oid == db_oid) { + m_dbstat_buckets[hash_index] = list_delete_ptr(m_dbstat_buckets[hash_index], cur_dbstat); + pfree_ext(cur_dbstat); + break; + } + } +} + +void GlobalDBStatManager::GetDBStat(GlobalSysCacheStat *db_stat) +{ + List *cur_dbstat_bucket = m_dbstat_buckets[db_stat->hash_index]; + ListCell *lc = NULL; + foreach(lc, cur_dbstat_bucket) { + GlobalSysCacheStat *cur_dbstat = (GlobalSysCacheStat *)lfirst(lc); + if (cur_dbstat->db_oid == db_stat->db_oid) { + db_stat->tup_searches += cur_dbstat->tup_searches; + db_stat->tup_hits += cur_dbstat->tup_hits; + db_stat->tup_newloads += cur_dbstat->tup_newloads; + + db_stat->rel_searches += cur_dbstat->rel_searches; + db_stat->rel_hits += cur_dbstat->rel_hits; + db_stat->rel_newloads += cur_dbstat->rel_newloads; + + db_stat->part_searches += cur_dbstat->part_searches; + db_stat->part_hits += cur_dbstat->part_hits; + db_stat->part_newloads += cur_dbstat->part_newloads; + + db_stat->swapout_count = cur_dbstat->swapout_count; + break; + } + } +} + + +bool GlobalDBStatManager::IsDBUsedByProc(GlobalSysDBCacheEntry *entry) +{ + Assert(entry->m_dbOid != InvalidOid); + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, m_backend_ref_lock); + bool used = false; + for (int i = 0; i < (int)m_max_backend_id; i++) { + if (m_mydb_refs[i] == entry) { + used = true; + break; + } + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, m_backend_ref_lock); + return used; +} + +void GlobalDBStatManager::RepallocThreadEntryArray(Oid backend_id) +{ + while ((uint)backend_id >= m_max_backend_id) { + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, m_backend_ref_lock); + if ((uint)backend_id < m_max_backend_id) { + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, m_backend_ref_lock); + return; + } + + GSC_CACHE3_elog("MyBackendId %d is greater equal than m_max_backend_id %u", backend_id, m_max_backend_id); + uint max_backend_id = m_max_backend_id << 1; + int len_refs = sizeof(GlobalSysDBCacheEntry *) * max_backend_id; + m_mydb_refs = (GlobalSysDBCacheEntry **)repalloc(m_mydb_refs, len_refs); + int len_roles = sizeof(int) * max_backend_id; + m_mydb_roles = (int *)repalloc(m_mydb_roles, len_roles); + errno_t rc = memset_s(m_mydb_refs + m_max_backend_id, len_refs >> 1, 0, len_refs >> 1); + securec_check(rc, "\0", "\0"); + rc = memset_s(m_mydb_roles + m_max_backend_id, len_roles >> 1, 0, len_roles >> 1); + securec_check(rc, "\0", "\0"); + m_max_backend_id = max_backend_id; + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, m_backend_ref_lock); + } +} + +void GlobalDBStatManager::ThreadHoldDB(GlobalSysDBCacheEntry *db) +{ + RepallocThreadEntryArray(t_thrd.proc_cxt.MyBackendId); + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, m_backend_ref_lock); + BackendId MyBackendId = t_thrd.proc_cxt.MyBackendId; + m_mydb_refs[MyBackendId] = db; + m_mydb_roles[MyBackendId] = t_thrd.role; + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, m_backend_ref_lock); +} \ No newline at end of file diff --git a/src/common/backend/utils/cache/knl_globalpartdefcache.cpp b/src/common/backend/utils/cache/knl_globalpartdefcache.cpp new file mode 100644 index 000000000..4573ac53a --- /dev/null +++ b/src/common/backend/utils/cache/knl_globalpartdefcache.cpp @@ -0,0 +1,120 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ + +#include "executor/executor.h" +#include "miscadmin.h" +#include "utils/knl_catcache.h" +#include "utils/knl_globaldbstatmanager.h" +#include "utils/knl_globalpartdefcache.h" +#include "utils/knl_globaltabdefcache.h" +#include "utils/knl_partcache.h" +#include "utils/memutils.h" + +static void PartitionPointerToNULL(Partition part) +{ + part->pd_indexattr = NULL; + part->pd_indexlist = NULL; + part->pd_part = NULL; + part->pd_smgr = NULL; + part->rd_options = NULL; + part->pd_pgstat_info = NULL; +} + +void CopyPartitionData(Partition dest_partition, Partition src_partition) +{ + /* if you add variable to partition, please check if you need put it in gsc, + * if not, set it zero when copy, and reinit it when local get the copy result + * if the variable changed, there is no lock and no part inval msg, + * set it zero and reinit it when copy into local */ + Assert(sizeof(PartitionData) == 168); + *dest_partition = *src_partition; + /* init all pointers to NULL, so we can free memory correctly when meeting exception */ + PartitionPointerToNULL(dest_partition); + dest_partition->pd_indexattr = bms_copy(src_partition->pd_indexattr); + dest_partition->pd_indexlist = list_copy(src_partition->pd_indexlist); + Assert(src_partition->pd_refcnt == 0); + dest_partition->pd_refcnt = 0; + + /* We just copy fixed field */ + dest_partition->pd_part = (Form_pg_partition)palloc(PARTITION_TUPLE_SIZE); + memcpy_s(dest_partition->pd_part, PARTITION_TUPLE_SIZE, src_partition->pd_part, PARTITION_TUPLE_SIZE); + + dest_partition->pd_smgr = NULL; + Assert(src_partition->pd_isvalid); + dest_partition->pd_isvalid = true; + dest_partition->rd_options = CopyOption(src_partition->rd_options); + Assert(src_partition->pd_pgstat_info == NULL); + dest_partition->pd_pgstat_info = NULL; +} + +void GlobalPartDefCache::Insert(Partition part, uint32 hash_value) +{ + Index hash_index = HASH_INDEX(hash_value, (uint32)m_nbuckets); + /* dllist is too long, swapout some */ + if (m_bucket_list.GetBucket(hash_index)->dll_len >= MAX_GSC_LIST_LENGTH) { + GlobalBaseDefCache::RemoveTailElements(hash_index); + /* maybe no element can be swappedout */ + return; + } + + pthread_rwlock_t *obj_lock = &m_obj_locks[hash_index]; + GlobalPartitionEntry *entry = CreateEntry(part); + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, obj_lock); + bool found = GlobalBaseDefCache::EntryExist(part->pd_id, hash_index); + if (found) { + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, obj_lock); + entry->Free(entry); + return; + } + GlobalBaseDefCache::AddHeadToBucket(hash_index, entry); + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, obj_lock); + pg_atomic_fetch_add_u64(m_newloads, 1); +} + +GlobalPartitionEntry *GlobalPartDefCache::CreateEntry(Partition part) +{ + ResourceOwnerEnlargeGlobalBaseEntry(LOCAL_SYSDB_RESOWNER); + MemoryContext old = MemoryContextSwitchTo(m_db_entry->GetRandomMemCxt()); + GlobalPartitionEntry *entry = (GlobalPartitionEntry *)palloc(sizeof(GlobalPartitionEntry)); + entry->type = GLOBAL_PARTITION_ENTRY; + entry->oid = part->pd_id; + entry->refcount = 0; + entry->part = NULL; + DLInitElem(&entry->cache_elem, (void *)entry); + ResourceOwnerRememberGlobalBaseEntry(LOCAL_SYSDB_RESOWNER, entry); + entry->part = (Partition)palloc0(sizeof(PartitionData)); + CopyPartitionData(entry->part, part); + ResourceOwnerForgetGlobalBaseEntry(LOCAL_SYSDB_RESOWNER, entry); + MemoryContextSwitchTo(old); + return entry; +} + +void GlobalPartDefCache::Init() +{ + MemoryContext old = MemoryContextSwitchTo(m_db_entry->GetRandomMemCxt()); + GlobalBaseDefCache::Init(GLOBAL_INIT_PARTCACHE_SIZE); + MemoryContextSwitchTo(old); + m_is_inited = true; +} + +GlobalPartDefCache::GlobalPartDefCache(Oid dbOid, bool isShared, struct GlobalSysDBCacheEntry *entry) + : GlobalBaseDefCache(dbOid, isShared, entry, PARTTYPE_PARTITIONED_RELATION) +{ + m_is_inited = false; +} + +template void GlobalPartDefCache::ResetPartCaches(); +template void GlobalPartDefCache::ResetPartCaches(); \ No newline at end of file diff --git a/src/common/backend/utils/cache/knl_globalrelmapcache.cpp b/src/common/backend/utils/cache/knl_globalrelmapcache.cpp new file mode 100644 index 000000000..450282c0b --- /dev/null +++ b/src/common/backend/utils/cache/knl_globalrelmapcache.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ + +#include "executor/executor.h" +#include "knl/knl_session.h" +#include "knl/knl_thread.h" +#include "utils/knl_globalrelmapcache.h" +#include "utils/relmapper.h" +#include "postgres.h" +#include "securec.h" +#include "storage/lock/lwlock.h" +#include "storage/smgr/fd.h" + +GlobalRelMapCache::GlobalRelMapCache(Oid dbOid, bool shared) +{ + m_dbOid = dbOid; + m_isInited = false; + m_isShared = shared; +} + +GlobalRelMapCache::~GlobalRelMapCache() +{ + m_isInited = false; +} + +/* RelMapCache initialization phase 1 */ +void GlobalRelMapCache::Init() +{ + errno_t rc = memset_s(&m_relmap, sizeof(RelMapFile), 0, sizeof(RelMapFile)); + securec_check(rc, "\0", "\0"); + + PthreadRwLockInit(&m_lock, NULL); +} + +/* RelMapCache initialization phase 2 */ +void GlobalRelMapCache::InitPhase2() +{ + if (m_isInited) { + return; + } + + LWLockAcquire(RelationMappingLock, LW_SHARED); + + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, &m_lock); + if (!m_isInited) { + load_relmap_file(m_isShared, &m_relmap); + pg_memory_barrier(); + + /* Mark relmapcache as initalized once relmap is loaded */ + m_isInited = true; + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_lock); + + LWLockRelease(RelationMappingLock); +} + +void GlobalRelMapCache::UpdateBy(RelMapFile *rel_map) +{ + /* here we have write lock of RelationMappingLock */ + if (!m_isInited) { + return; + } + + /* + * dont lock relmap by RelationMappingLock, the only call happened after + * lwlockaquire(relmap), ?? need refine + */ + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, &m_lock); + + /* we dont care what magic the rel_map is */ + errno_t rc = memcpy_s(&m_relmap, sizeof(RelMapFile), rel_map, sizeof(RelMapFile)); + securec_check(rc, "\0", "\0"); + + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_lock); +} + +void GlobalRelMapCache::CopyInto(RelMapFile *dest_relmap) +{ + Assert(m_isInited); + + /* obtain a lock and do copy */ + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, &m_lock); + + /* crc and pad needn't copy. + * should make sure the dest_relmap are all zero */ + errno_t rc = memcpy_s(dest_relmap, sizeof(RelMapFile), &m_relmap, + offsetof(RelMapFile, mappings) + m_relmap.num_mappings * sizeof(RelMapping)); + securec_check(rc, "\0", "\0"); + + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_lock); +} \ No newline at end of file diff --git a/src/common/backend/utils/cache/knl_globalsysdbcache.cpp b/src/common/backend/utils/cache/knl_globalsysdbcache.cpp new file mode 100644 index 000000000..3c4b6d562 --- /dev/null +++ b/src/common/backend/utils/cache/knl_globalsysdbcache.cpp @@ -0,0 +1,1280 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ + +#include "utils/knl_globalsysdbcache.h" +#include "utils/memutils.h" +#include "utils/builtins.h" +#include "access/xlog.h" +#include "catalog/pg_authid.h" +#include "catalog/pg_auth_members.h" +#include "catalog/pg_database.h" +#include "catalog/pg_pltemplate.h" +#include "catalog/pg_shdescription.h" +#include "catalog/pg_shdepend.h" +#include "catalog/pg_shseclabel.h" +#include "catalog/pg_auth_history.h" +#include "catalog/pg_user_status.h" +#include "catalog/pgxc_group.h" +#include "catalog/pg_workload_group.h" +#include "catalog/pg_app_workloadgroup_mapping.h" +#include "catalog/gs_global_config.h" +#include "catalog/pg_db_role_setting.h" +#include "catalog/pg_job.h" +#include "catalog/pg_job_proc.h" +#include "catalog/pg_extension_data_source.h" +#include "catalog/gs_obsscaninfo.h" +#include "catalog/indexing.h" +#include "catalog/toasting.h" +#include "catalog/pg_am.h" +#include "postgres.h" +#include "executor/executor.h" +#include "utils/knl_globalsystupcache.h" +#include "utils/knl_catcache.h" +#include "knl/knl_session.h" +#include "utils/syscache.h" +#include "storage/proc.h" +#include "funcapi.h" +#include "commands/dbcommands.h" + +bool atomic_compare_exchange_u32(volatile uint32* ptr, uint32 expected, uint32 newval) +{ + Assert((expected == 0 && newval == 1) || (expected == 1 && newval == 0)); + uint32 current = expected; + bool ret = pg_atomic_compare_exchange_u32(ptr, ¤t, newval); + Assert((ret && current == expected) || (!ret && current != expected)); + Assert(expected == 0 || ret); + return ret; +} + +void TopnLruMoveToFront(Dlelem *e, Dllist *list, pthread_rwlock_t *lock, int location) +{ + if (location <= GLOBAL_BUCKET_DEFAULT_TOP_N) { + return; + } + if (PthreadRWlockTryWrlock(LOCAL_SYSDB_RESOWNER, lock) != 0) { + return; + } + if (DLGetListHdr(e) == list) { + DLMoveToFront(e); + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, lock); +} + +void GlobalSysDBCache::ReleaseGSCEntry(GlobalSysDBCacheEntry *entry) +{ + Assert(entry != NULL && entry->m_dbOid != InvalidOid); + Assert(entry->m_hash_index == HASH_INDEX(oid_hash((void *)&(entry->m_dbOid), + sizeof(Oid)), m_nbuckets)); + Assert(entry->m_refcount > 0); + + /* minus m_refcount */ + pg_atomic_fetch_sub_u64(&entry->m_refcount, 1); +} + +void GlobalSysDBCache::RemoveElemFromBucket(GlobalSysDBCacheEntry *entry) +{ + /* shared db never remove */ + Assert(entry->m_dbOid != InvalidOid); + m_bucket_list.RemoveElemFromBucket(&entry->m_cache_elem); + m_dbstat_manager.RecordSwapOutDBEntry(entry); +} + +void GlobalSysDBCache::AddHeadToBucket(Index hash_index, GlobalSysDBCacheEntry *entry) +{ + m_bucket_list.AddHeadToBucket(hash_index, &entry->m_cache_elem); + + /* add this for hashtable */ + entry->MemoryEstimateAdd(GLOBAL_DB_MEMORY_MIN); + m_dbstat_manager.ThreadHoldDB(entry); +} + +void GlobalSysDBCache::HandleDeadDB(GlobalSysDBCacheEntry *entry) +{ + RemoveElemFromBucket(entry); + if (entry->m_refcount == 0) { + m_dead_dbs.AddHead(&entry->m_cache_elem); + } else { + m_dead_dbs.AddTail(&entry->m_cache_elem); + } +} + +void GlobalSysDBCache::FreeDeadDBs() +{ + while (m_dead_dbs.GetLength() > 0) { + Dlelem *elt = m_dead_dbs.RemoveHead(); + if (elt == NULL) { + break; + } + GlobalSysDBCacheEntry *dbEntry = (GlobalSysDBCacheEntry *)DLE_VAL(elt); + Assert(dbEntry->m_dbOid != InvalidOid); + /* refcount means ref may leak */ + if (dbEntry->m_refcount != 0 && m_dbstat_manager.IsDBUsedByProc(dbEntry)) { + GSC_CACHE1_elog("GlobalSysDBCacheEntry used can not be freed"); + /* clear memory, this proc may exit, and forget to call releasedb */ + dbEntry->ResetDBCache(); + /* we move the active entry to tail of list and let next call free it */ + m_dead_dbs.AddTail(&dbEntry->m_cache_elem); + break; + } else { + /* sub all to delete, make sure no one use the entry */ + dbEntry->MemoryEstimateSub(dbEntry->m_rough_used_space); + Assert(dbEntry->m_rough_used_space == 0); + dbEntry->Free(dbEntry); + } + } +} + +GlobalSysDBCacheEntry *GlobalSysDBCache::FindGSCEntryWithoutLock(Oid db_id, Index hash_index, int *location) +{ + int index = 0; + for (Dlelem *elt = DLGetHead(m_bucket_list.GetBucket(hash_index)); elt != NULL; elt = DLGetSucc(elt)) { + index++; + GlobalSysDBCacheEntry *entry = (GlobalSysDBCacheEntry *)DLE_VAL(elt); + if (entry->m_dbOid != db_id) { + continue; + } + pg_atomic_fetch_add_u64(&entry->m_refcount, 1); + *location = index; + return entry; + } + return NULL; +} + +GlobalSysDBCacheEntry *GlobalSysDBCache::SearchGSCEntry(Oid db_id, Index hash_index, char *db_name) +{ + int location = INVALID_LOCATION; + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); + GlobalSysDBCacheEntry *existDbEntry = FindGSCEntryWithoutLock(db_id, hash_index, &location); + if (existDbEntry != NULL) { + m_dbstat_manager.ThreadHoldDB(existDbEntry); + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); + if (existDbEntry != NULL) { + TopnLruMoveToFront(&existDbEntry->m_cache_elem, m_bucket_list.GetBucket(hash_index), &m_db_locks[hash_index], + location); + return existDbEntry; + } + + /* create existDbEntry is a simple operator, so put the code in the write lock is ok */ + GlobalSysDBCacheEntry *newDbEntry = CreateGSCEntry(db_id, hash_index, db_name); + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); + + /* otherwise other thread insert one db before */ + existDbEntry = FindGSCEntryWithoutLock(db_id, hash_index, &location); + if (existDbEntry != NULL) { + m_dbstat_manager.ThreadHoldDB(existDbEntry); + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); + newDbEntry->Free(newDbEntry); + return existDbEntry; + } + + AddHeadToBucket(hash_index, newDbEntry); + newDbEntry->m_refcount = 1; + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); + return newDbEntry; +} + +GlobalSysDBCacheEntry *GlobalSysDBCache::GetGSCEntry(Oid db_id, char *db_name) +{ + Assert(db_id != InvalidOid); + uint32 hash_value = oid_hash((void *)&(db_id), sizeof(Oid)); + Index hash_index = HASH_INDEX(hash_value, m_nbuckets); + GlobalSysDBCacheEntry *entry = SearchGSCEntry(db_id, hash_index, db_name); + + Refresh(entry); + return entry; +} + +GlobalSysDBCacheEntry *GlobalSysDBCache::GetSharedGSCEntry() +{ + if (unlikely(m_global_shared_db_entry->GetDBUsedSpace() > GLOBAL_DB_MEMORY_MAX)) { + m_global_shared_db_entry->ResetDBCache(); + } + + return m_global_shared_db_entry; +} + +void GlobalSysDBCache::ReleaseTempGSCEntry(GlobalSysDBCacheEntry *entry) +{ + ResourceOwner owner = LOCAL_SYSDB_RESOWNER; + ResourceOwnerForgetGlobalDBEntry(owner, entry); + pg_atomic_fetch_sub_u64(&entry->m_refcount, 1); + PthreadRWlockUnlock(owner, &m_db_locks[entry->m_hash_index]); +} + +GlobalSysDBCacheEntry *GlobalSysDBCache::FindTempGSCEntry(Oid db_id) +{ + ResourceOwner owner = LOCAL_SYSDB_RESOWNER; + ResourceOwnerEnlargeGlobalDBEntry(owner); + uint32 hash_value = oid_hash((void *)&(db_id), sizeof(Oid)); + Index hash_index = HASH_INDEX(hash_value, m_nbuckets); + int location = INVALID_LOCATION; + PthreadRWlockRdlock(owner, &m_db_locks[hash_index]); + GlobalSysDBCacheEntry *exist_db = FindGSCEntryWithoutLock(db_id, hash_index, &location); + if (exist_db == NULL) { + PthreadRWlockUnlock(owner, &m_db_locks[hash_index]); + return NULL; + } + Assert(exist_db->m_hash_index == hash_index); + ResourceOwnerRememberGlobalDBEntry(owner, exist_db); + return exist_db; +} + +void GlobalSysDBCache::DropDB(Oid db_id, bool need_clear) +{ + Assert(db_id != InvalidOid); + uint32 hash_value = oid_hash((void *)&(db_id), sizeof(Oid)); + Index hash_index = HASH_INDEX(hash_value, m_nbuckets); + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); + for (Dlelem *elt = DLGetHead(m_bucket_list.GetBucket(hash_index)); elt != NULL; elt = DLGetSucc(elt)) { + GlobalSysDBCacheEntry *entry = (GlobalSysDBCacheEntry *)DLE_VAL(elt); + if (entry->m_dbOid != db_id) { + continue; /* ignore dead entries */ + } + /* we dont care refcount here, all dropdb caller make sure that no other active backend accesses this db + * if any new session access dead db, ereport fatal and retry */ + entry->m_isDead = true; + HandleDeadDB(entry); + if (need_clear) { + m_dbstat_manager.DropDB(db_id, hash_index); + } + break; + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); + FreeDeadDBs(); +} + +static void InitGlobalSysDBCacheEntry(GlobalSysDBCacheEntry *entry, MemoryContext entry_parent, Oid db_id, + Index hash_index, char *db_name) +{ + entry->m_refcount = 0; + entry->m_isDead = false; + entry->m_hash_value = oid_hash((void *)&(db_id), sizeof(Oid)); + entry->m_hash_index = hash_index; + entry->m_dbOid = db_id; + entry->m_dbstat = + (GlobalSysCacheStat *)MemoryContextAllocZero(entry_parent, sizeof(GlobalSysCacheStat)); + entry->m_dbstat->hash_index = hash_index; + entry->m_dbstat->db_oid = db_id; + /* the follow code may bring about exception like error report, which will be converted to FATAL */ + const int MAX_CXT_NAME = 100; + char cxtname[MAX_CXT_NAME]; + error_t rc = sprintf_s(cxtname, MAX_CXT_NAME, "%s_%u", "GlobalSysDBCacheEntryMemCxt", db_id); + securec_check_ss(rc, "\0", "\0"); + for (uint32 i = 0; i < entry->m_memcxt_nums; i++) { + entry->m_mem_cxt_groups[i] = +#ifdef ENABLE_LIET_MODE + AllocSetContextCreate(entry_parent, cxtname, ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, + ALLOCSET_SMALL_MAXSIZE, SHARED_CONTEXT); +#else + AllocSetContextCreate(entry_parent, cxtname, ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE, SHARED_CONTEXT); +#endif + } + entry->m_dbName = MemoryContextStrdup(entry->m_mem_cxt_groups[0], db_name); + + /* allocate global CatCache memory */ + entry->m_systabCache = New(entry->m_mem_cxt_groups[0]) GlobalSysTabCache(db_id, + db_id == InvalidOid, entry); + entry->m_systabCache->Init(); + + /* allocate global RelCache memory */ + entry->m_tabdefCache = New(entry->m_mem_cxt_groups[0]) GlobalTabDefCache(db_id, + db_id == InvalidOid, entry); + entry->m_tabdefCache->Init(); + + /* emptry shared partdef, need refine */ + if (db_id == InvalidOid) { + entry->m_partdefCache = NULL; + } else { + entry->m_partdefCache = New(entry->m_mem_cxt_groups[0]) GlobalPartDefCache(db_id, + db_id == InvalidOid, entry); + entry->m_partdefCache->Init(); + } + + /* allocate global RelmapCache */ + entry->m_relmapCache = New(entry->m_mem_cxt_groups[0]) GlobalRelMapCache(db_id, + db_id == InvalidOid); + entry->m_relmapCache->Init(); +} + +/** + * assign appropriate nums of memory context for one db according to nums of active dbs + * for high concurrent scene, nums of dbs is small, conflict of palloc is high, so need more nums of memcxts + * for multidbs scene, conflict of palloc in one db is low, so need less nums of memcxts + **/ +static uint32 GetMemoryContextNum(uint32 active_db_num) +{ + uint32 memcxt_nums; + if (active_db_num < 16) { + memcxt_nums = 16; + } else if (active_db_num < 64) { + memcxt_nums = 8; + } else { + memcxt_nums = 4; + } + return memcxt_nums; +} + +GlobalSysDBCacheEntry *GlobalSysDBCache::CreateGSCEntry(Oid db_id, Index hash_index, char *db_name) +{ + Assert(hash_index == HASH_INDEX(oid_hash((void *)&(db_id), sizeof(Oid)), m_nbuckets)); + ResourceOwner owner = LOCAL_SYSDB_RESOWNER; + Assert(t_thrd.lsc_cxt.lsc != NULL); + Assert(owner != NULL); + ResourceOwnerEnlargeGlobalDBEntry(owner); + uint32 memcxt_nums = GetMemoryContextNum(m_bucket_list.GetActiveElementCount()); + + GlobalSysDBCacheEntry *entry = + (GlobalSysDBCacheEntry *)MemoryContextAllocZero(m_global_sysdb_mem_cxt, + offsetof(GlobalSysDBCacheEntry, m_mem_cxt_groups) + memcxt_nums * sizeof(MemoryContext)); + entry->m_memcxt_nums = memcxt_nums; + ResourceOwnerRememberGlobalDBEntry(owner, entry); + InitGlobalSysDBCacheEntry(entry, m_global_sysdb_mem_cxt, db_id, hash_index, db_name); + ResourceOwnerForgetGlobalDBEntry(owner, entry); + DLInitElem(&entry->m_cache_elem, (void *)entry); + + GSC_CACHE2_elog("GlobalSysDBCacheEntry Create with db oid %u", db_id); + return entry; +} + +GlobalSysDBCacheEntry *GlobalSysDBCache::CreateSharedGSCEntry() +{ + Oid db_id = InvalidOid; + uint32 hash_value = oid_hash((void *)&(db_id), sizeof(Oid)); + Index hash_index = HASH_INDEX(hash_value, m_nbuckets); + uint32 memcxt_nums = GetMemoryContextNum(0); + GlobalSysDBCacheEntry *entry = + (GlobalSysDBCacheEntry *)MemoryContextAllocZero(m_global_sysdb_mem_cxt, + offsetof(GlobalSysDBCacheEntry, m_mem_cxt_groups) + memcxt_nums * sizeof(MemoryContext)); + entry->m_memcxt_nums = memcxt_nums; + InitGlobalSysDBCacheEntry(entry, m_global_sysdb_mem_cxt, db_id, hash_index, ""); + + DLInitElem(&entry->m_cache_elem, (void *)entry); + return entry; +} + +void GlobalSysDBCache::CalcDynamicHashBucketStrategy() +{ + uint64 expect_max_db_count = EXPECT_MAX_DB_COUNT; + uint64 rel_db_count = m_bucket_list.GetActiveElementCount(); + if (rel_db_count < expect_max_db_count) { + dynamic_hash_bucket_strategy = DynamicHashBucketDefault; + return; + } + + if (rel_db_count <= (expect_max_db_count << 1)) { + dynamic_hash_bucket_strategy = DynamicHashBucketHalf; + return; + } + if (rel_db_count <= (expect_max_db_count << 2)) { + dynamic_hash_bucket_strategy = DynamicHashBucketQuarter; + return; + } + if (rel_db_count <= (expect_max_db_count << 3)) { + dynamic_hash_bucket_strategy = DynamicHashBucketEighth; + return; + } + dynamic_hash_bucket_strategy = DynamicHashBucketMin; +} + +DynamicGSCMemoryLevel GlobalSysDBCache::CalcDynamicGSCMemoryLevel(uint64 total_space) +{ + if (total_space < SAFETY_GSC_MEMORY_SPACE) { + return DynamicGSCMemoryLow; + } + + if (total_space < REAL_GSC_MEMORY_SPACE) { + return DynamicGSCMemoryHigh; + } + + if (total_space < MAX_GSC_MEMORY_SPACE) { + return DynamicGSCMemoryOver; + } + return DynamicGSCMemoryOutOfControl; +} + +void GlobalSysDBCache::GSCMemThresholdCheck() +{ + if (!StopInsertGSC()) { + return; + } + + /* only one clean is enough */ + ResourceOwnerEnlargeGlobalIsExclusive(LOCAL_SYSDB_RESOWNER); + if (!atomic_compare_exchange_u32(&m_is_memorychecking, 0, 1)) { + return; + } + ResourceOwnerRememberGlobalIsExclusive(LOCAL_SYSDB_RESOWNER, &m_is_memorychecking); + + CalcDynamicHashBucketStrategy(); + DynamicGSCMemoryLevel memory_level = CalcDynamicGSCMemoryLevel(gsc_rough_used_space); + + Index last_hash_index = m_swapout_hash_index; + switch (memory_level) { + /* clean all to recycle memory */ + case DynamicGSCMemoryOutOfControl: + while (last_hash_index < (Index)m_nbuckets) { + SwapoutGivenDBInstance(last_hash_index, ALL_DB_OID); + SwapOutGivenDBContent(last_hash_index, ALL_DB_OID, memory_level); + last_hash_index++; +#ifndef ENABLE_LITE_MODE + if (MemoryUnderControl()) { + break; + } +#endif + } + break; + /* swapout all to recycle memory */ + case DynamicGSCMemoryOver: + /* fall through */ + + /* swapout one to recycle memory */ + case DynamicGSCMemoryHigh: + while (last_hash_index < (Index)m_nbuckets) { +#ifdef ENABLE_LITE_MODE + SwapoutGivenDBInstance(last_hash_index, ALL_DB_OID); +#endif + SwapOutGivenDBContent(last_hash_index, ALL_DB_OID, memory_level); + last_hash_index++; +#ifndef ENABLE_LITE_MODE + if (MemoryUnderControl()) { + break; + } +#endif + } + break; + case DynamicGSCMemoryLow: + /* memory is enough, needn't swapout */ + break; + } + m_swapout_hash_index = last_hash_index % ((Index)m_nbuckets); + + Assert(m_is_memorychecking == 1); + atomic_compare_exchange_u32(&m_is_memorychecking, 1, 0); + ResourceOwnerForgetGlobalIsExclusive(LOCAL_SYSDB_RESOWNER, &m_is_memorychecking); + FreeDeadDBs(); +} + +void GlobalSysDBCache::InvalidAllRelations() +{ + for (Index hash_index = 0; hash_index < (Index)m_nbuckets; hash_index++) { + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); + for (Dlelem *elt = DLGetTail(m_bucket_list.GetBucket(hash_index)); elt != NULL;) { + GlobalSysDBCacheEntry *entry = (GlobalSysDBCacheEntry *)DLE_VAL(elt); + elt = DLGetPred(elt); + entry->m_tabdefCache->ResetRelCaches(); + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); + } + m_global_shared_db_entry->m_tabdefCache->ResetRelCaches(); +} + +void GlobalSysDBCache::SwapOutGivenDBContent(Index hash_index, Oid db_id, DynamicGSCMemoryLevel mem_level) +{ + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); + for (Dlelem *elt = DLGetTail(m_bucket_list.GetBucket(hash_index)); elt != NULL;) { + GlobalSysDBCacheEntry *entry = (GlobalSysDBCacheEntry *)DLE_VAL(elt); + elt = DLGetPred(elt); + if (db_id != (Oid)ALL_DB_OID && entry->m_dbOid != db_id) { + continue; + } + + if (mem_level == DynamicGSCMemoryOutOfControl) { + /* we need drop the element, but dbentry is special, we clean it instead */ + entry->ResetDBCache(); + } else if (mem_level == DynamicGSCMemoryOver) { + entry->RemoveTailElements(); + } else if (mem_level == DynamicGSCMemoryHigh) { + if (entry->GetDBUsedSpace() > GLOBAL_DB_MEMORY_MAX) { + entry->RemoveTailElements(); + /* memory is under control, swapout one db is enough */ + break; + } + } else { + Assert(false); + } +#ifndef ENABLE_LITE_MODE + /* we are not in gs_gsc_clean, so break when memory is under control */ + if (mem_level != DynamicGSCMemoryOutOfControl && MemoryUnderControl()) { + break; + } +#endif + + /* clean given db only */ + if (db_id != (Oid)ALL_DB_OID) { + break; + } + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); +} + +void GlobalSysDBCache::SwapoutGivenDBInstance(Index hash_index, Oid db_id) +{ + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); + for (Dlelem *elt = DLGetTail(m_bucket_list.GetBucket(hash_index)); elt != NULL;) { + GlobalSysDBCacheEntry *entry = (GlobalSysDBCacheEntry *)DLE_VAL(elt); + elt = DLGetPred(elt); + if (db_id != (Oid)ALL_DB_OID && entry->m_dbOid != db_id) { + continue; + } + + if (entry->m_refcount == 0) { + GSC_CACHE2_elog("GlobalSysDBCacheEntry Weedout with db oid %u", entry->db_id); + HandleDeadDB(entry); + } + + /* clean given db only */ + if (db_id != (Oid)ALL_DB_OID) { + break; + } + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); +} + +void GlobalSysDBCache::InitRelStoreInSharedFlag() +{ + errno_t rc = memset_s(m_rel_store_in_shared, FirstBootstrapObjectId, 0, FirstBootstrapObjectId); + securec_check(rc, "\0", "\0"); + m_rel_store_in_shared[AuthIdRelationId] = true; + m_rel_store_in_shared[AuthMemRelationId] = true; + m_rel_store_in_shared[DatabaseRelationId] = true; + m_rel_store_in_shared[PLTemplateRelationId] = true; + m_rel_store_in_shared[SharedDescriptionRelationId] = true; + m_rel_store_in_shared[SharedDependRelationId] = true; + m_rel_store_in_shared[SharedSecLabelRelationId] = true; + m_rel_store_in_shared[TableSpaceRelationId] = true; + m_rel_store_in_shared[AuthHistoryRelationId] = true; + m_rel_store_in_shared[UserStatusRelationId] = true; + + m_rel_store_in_shared[PgxcGroupRelationId] = true; + m_rel_store_in_shared[PgxcNodeRelationId] = true; + m_rel_store_in_shared[ResourcePoolRelationId] = true; + m_rel_store_in_shared[WorkloadGroupRelationId] = true; + m_rel_store_in_shared[AppWorkloadGroupMappingRelationId] = true; + m_rel_store_in_shared[GsGlobalConfigRelationId] = true; + + m_rel_store_in_shared[DbRoleSettingRelationId] = true; + m_rel_store_in_shared[PgJobRelationId] = true; + m_rel_store_in_shared[PgJobProcRelationId] = true; + m_rel_store_in_shared[DataSourceRelationId] = true; + m_rel_store_in_shared[GSObsScanInfoRelationId] = true; + m_rel_store_in_shared[AuthIdRolnameIndexId] = true; + m_rel_store_in_shared[AuthIdOidIndexId] = true; + m_rel_store_in_shared[AuthMemRoleMemIndexId] = true; + m_rel_store_in_shared[AuthMemMemRoleIndexId] = true; + m_rel_store_in_shared[DatabaseNameIndexId] = true; + m_rel_store_in_shared[DatabaseOidIndexId] = true; + m_rel_store_in_shared[PLTemplateNameIndexId] = true; + m_rel_store_in_shared[SharedDescriptionObjIndexId] = true; + m_rel_store_in_shared[SharedDependDependerIndexId] = true; + m_rel_store_in_shared[SharedDependReferenceIndexId] = true; + m_rel_store_in_shared[SharedSecLabelObjectIndexId] = true; + m_rel_store_in_shared[TablespaceOidIndexId] = true; + m_rel_store_in_shared[TablespaceNameIndexId] = true; + m_rel_store_in_shared[AuthHistoryIndexId] = true; + m_rel_store_in_shared[AuthHistoryOidIndexId] = true; + m_rel_store_in_shared[UserStatusRoleidIndexId] = true; + m_rel_store_in_shared[UserStatusOidIndexId] = true; + + m_rel_store_in_shared[PgxcNodeNodeNameIndexId] = true; + m_rel_store_in_shared[PgxcNodeNodeNameIndexIdOld] = true; + m_rel_store_in_shared[PgxcNodeNodeIdIndexId] = true; + m_rel_store_in_shared[PgxcNodeOidIndexId] = true; + m_rel_store_in_shared[PgxcGroupGroupNameIndexId] = true; + m_rel_store_in_shared[PgxcGroupOidIndexId] = true; + m_rel_store_in_shared[PgxcGroupToastTable] = true; + m_rel_store_in_shared[PgxcGroupToastIndex] = true; + m_rel_store_in_shared[ResourcePoolPoolNameIndexId] = true; + m_rel_store_in_shared[ResourcePoolOidIndexId] = true; + m_rel_store_in_shared[WorkloadGroupGroupNameIndexId] = true; + m_rel_store_in_shared[WorkloadGroupOidIndexId] = true; + m_rel_store_in_shared[AppWorkloadGroupMappingNameIndexId] = true; + m_rel_store_in_shared[AppWorkloadGroupMappingOidIndexId] = true; + + m_rel_store_in_shared[DbRoleSettingDatidRolidIndexId] = true; + m_rel_store_in_shared[PgJobOidIndexId] = true; + m_rel_store_in_shared[PgJobIdIndexId] = true; + m_rel_store_in_shared[PgJobProcOidIndexId] = true; + m_rel_store_in_shared[PgJobProcIdIndexId] = true; + m_rel_store_in_shared[DataSourceOidIndexId] = true; + m_rel_store_in_shared[DataSourceNameIndexId] = true; + m_rel_store_in_shared[PgShdescriptionToastTable] = true; + m_rel_store_in_shared[PgShdescriptionToastIndex] = true; + m_rel_store_in_shared[PgDbRoleSettingToastTable] = true; + m_rel_store_in_shared[PgDbRoleSettingToastIndex] = true; + + m_rel_store_in_shared[SubscriptionRelationId] = true; + m_rel_store_in_shared[SubscriptionObjectIndexId] = true; + m_rel_store_in_shared[SubscriptionNameIndexId] = true; + m_rel_store_in_shared[ReplicationOriginRelationId] = true; + m_rel_store_in_shared[ReplicationOriginIdentIndex] = true; + m_rel_store_in_shared[ReplicationOriginNameIndex] = true; +} + +void GlobalSysDBCache::InitRelForInitSysCacheFlag() +{ + errno_t rc = memset_s(m_rel_for_init_syscache, FirstNormalObjectId, 0, FirstNormalObjectId); + securec_check(rc, "\0", "\0"); + m_rel_for_init_syscache[ClassOidIndexId] = true; + m_rel_for_init_syscache[AttributeRelidNumIndexId] = true; + m_rel_for_init_syscache[IndexRelidIndexId] = true; + m_rel_for_init_syscache[OpclassOidIndexId] = true; + m_rel_for_init_syscache[AccessMethodProcedureIndexId] = true; + m_rel_for_init_syscache[RewriteRelRulenameIndexId] = true; + m_rel_for_init_syscache[TriggerRelidNameIndexId] = true; + m_rel_for_init_syscache[DatabaseNameIndexId] = true; + m_rel_for_init_syscache[DatabaseOidIndexId] = true; + m_rel_for_init_syscache[AuthIdRolnameIndexId] = true; + m_rel_for_init_syscache[AuthIdOidIndexId] = true; + m_rel_for_init_syscache[AuthMemMemRoleIndexId] = true; + m_rel_for_init_syscache[UserStatusRoleidIndexId] = true; +} + +void GlobalSysDBCache::InitSysCacheRelIds() +{ + errno_t rc = memset_s(m_syscache_relids, FirstNormalObjectId, 0, FirstNormalObjectId); + securec_check(rc, "\0", "\0"); + for (int i = 0; i < SysCacheSize; i++) { + m_syscache_relids[cacheinfo[i].reloid] = true; + } +} + +/* + * whenever you change server_mode, call RefreshHotStandby plz + * we need know current_mode to support gsc feature + */ +void GlobalSysDBCache::RefreshHotStandby() +{ + hot_standby = (t_thrd.postmaster_cxt.HaShmData->current_mode != STANDBY_MODE || XLogStandbyInfoActive()); +} + +void GlobalSysDBCache::Init(MemoryContext parent) +{ + /* every process should call this func once */ + Assert(!m_is_inited); + if (!EnableGlobalSysCache()) { + return; + } + Assert(m_global_sysdb_mem_cxt == NULL); + InitRelStoreInSharedFlag(); + InitRelForInitSysCacheFlag(); + InitSysCacheRelIds(); +#ifdef ENABLE_LIET_MODE + m_global_sysdb_mem_cxt = AllocSetContextCreate(parent, "GlobalSysDBCache", ALLOCSET_SMALL_MINSIZE, + ALLOCSET_SMALL_INITSIZE, ALLOCSET_SMALL_MAXSIZE, SHARED_CONTEXT); +#else + m_global_sysdb_mem_cxt = AllocSetContextCreate(parent, "GlobalSysDBCache", ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE, SHARED_CONTEXT); +#endif + + MemoryContext old = MemoryContextSwitchTo(m_global_sysdb_mem_cxt); + m_nbuckets = INIT_DB_SIZE; + m_bucket_list.Init(m_nbuckets); + m_db_locks = (pthread_rwlock_t *)palloc0(sizeof(pthread_rwlock_t) * m_nbuckets); + for (int i = 0; i < m_nbuckets; i++) { + PthreadRwLockInit(&m_db_locks[i], NULL); + } + m_dbstat_manager.InitDBStat(m_nbuckets, m_global_sysdb_mem_cxt); + Assert(g_instance.attr.attr_memory.global_syscache_threshold != 0); + /* the conf file dont have value of gsc threshold, so we need init it by default value */ + if (m_global_syscache_threshold == 0) { + UpdateGSCConfig(g_instance.attr.attr_memory.global_syscache_threshold); + } + m_global_shared_db_entry = CreateSharedGSCEntry(); + MemoryContextSwitchTo(old); + dynamic_hash_bucket_strategy = DynamicHashBucketDefault; + m_is_inited = true; +} + +GlobalSysDBCache::GlobalSysDBCache() +{ + m_db_locks = NULL; + m_global_sysdb_mem_cxt = NULL; + m_nbuckets = 0; + m_global_shared_db_entry = NULL; + m_global_syscache_threshold = 0; + m_is_memorychecking = 0; + hot_standby = true; + recovery_finished = false; + m_swapout_hash_index = 0; + m_is_inited = false; +} + +void GlobalSysDBCache::Refresh(GlobalSysDBCacheEntry *entry) +{ + if (unlikely(gsc_rough_used_space > MAX_GSC_MEMORY_SPACE)) { + GSCMemThresholdCheck(); + } + FreeDeadDBs(); +} + +struct GlobalDBStatInfo : GlobalSysCacheStat { + Datum db_name; + uint64 tup_count; + uint64 tup_dead; + uint64 tup_space; + + uint64 rel_count; + uint64 rel_dead; + uint64 rel_space; + + uint64 part_count; + uint64 part_dead; + uint64 part_space; + uint64 total_space; + uint64 refcount; +}; + +static GlobalDBStatInfo *ConstructGlobalDBStatInfo(GlobalSysDBCacheEntry *entry, GlobalDBStatManager *dbstat_manager) +{ + GlobalDBStatInfo *statinfo = (GlobalDBStatInfo *)palloc0(sizeof(GlobalDBStatInfo)); + statinfo->db_oid = entry->m_dbOid; + statinfo->db_name = CStringGetTextDatum(entry->m_dbName); + statinfo->hash_index = entry->m_hash_index; + statinfo->swapout_count = 0; + statinfo->tup_searches = entry->m_dbstat->tup_searches; + statinfo->tup_hits = entry->m_dbstat->tup_hits; + statinfo->tup_newloads = entry->m_dbstat->tup_newloads; + statinfo->tup_count = entry->m_systabCache->GetActiveElementsNum(); + statinfo->tup_dead = entry->m_systabCache->GetDeadElementsNum(); + statinfo->tup_space = entry->m_systabCache->GetSysCacheSpaceNum(); + + statinfo->rel_searches = entry->m_dbstat->rel_searches; + statinfo->rel_hits = entry->m_dbstat->rel_hits; + statinfo->rel_newloads = entry->m_dbstat->rel_newloads; + statinfo->rel_count = entry->m_tabdefCache->GetActiveElementsNum(); + statinfo->rel_dead = entry->m_tabdefCache->GetDeadElementsNum(); + statinfo->rel_space = entry->m_tabdefCache->GetSysCacheSpaceNum(); + if (entry->m_partdefCache == NULL) { + statinfo->part_searches = 0; + statinfo->part_hits = 0; + statinfo->part_newloads = 0; + statinfo->part_count = 0; + statinfo->part_dead = 0; + statinfo->part_space = 0; + } else { + statinfo->part_searches = entry->m_dbstat->part_searches; + statinfo->part_hits = entry->m_dbstat->part_hits; + statinfo->part_newloads = entry->m_dbstat->part_newloads; + statinfo->part_count = entry->m_partdefCache->GetActiveElementsNum(); + statinfo->part_dead = entry->m_partdefCache->GetDeadElementsNum(); + statinfo->part_space = entry->m_partdefCache->GetSysCacheSpaceNum(); + } + statinfo->total_space = entry->GetDBUsedSpace(); + statinfo->refcount = entry->m_refcount; + if (dbstat_manager != NULL) { + dbstat_manager->GetDBStat((GlobalSysCacheStat *)statinfo); + } + return statinfo; +} + +/* @param db_id 0 means fetch shared db, >0 means fetch given db and shared db, ALL_DB_OID/null means fetch all dbs + @param ALL_REL_OID/null means fetch all cache, otherwise fetch given cache */ +List *GlobalSysDBCache::GetGlobalDBStatDetail(Oid db_id, Oid rel_id, GscStatDetail stat_detail) +{ + List *stat_list = NIL; + /* collect stat info of shared db */ + GlobalSysDBCacheEntry *shared = GetSharedGSCEntry(); + if (stat_detail == GscStatDetailDBInfo) { + stat_list = lappend(stat_list, ConstructGlobalDBStatInfo(shared, NULL)); + } else if (stat_detail == GscStatDetailTuple) { + stat_list = lappend3(stat_list, shared->m_systabCache->GetCatalogTupleStats(rel_id)); + } else { + Assert(stat_detail == GscStatDetailTable); + stat_list = lappend3(stat_list, shared->m_tabdefCache->GetTableStats(rel_id)); + } + if (db_id == InvalidOid) { + return stat_list; + } + Index given_hash_index = ALL_DB_INDEX; + if (db_id != ALL_DB_OID) { + uint32 hash_value = oid_hash((void *)&(db_id), sizeof(Oid)); + given_hash_index = HASH_INDEX(hash_value, m_nbuckets); + } + for (int hash_index = 0; hash_index < m_nbuckets; hash_index++) { + if (DLIsNIL(m_bucket_list.GetBucket(hash_index))) { + continue; + } + if (given_hash_index != ALL_DB_INDEX && given_hash_index != (Index)hash_index) { + continue; + } + CHECK_FOR_INTERRUPTS(); + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); + for (Dlelem *elt = DLGetHead(m_bucket_list.GetBucket(hash_index)); elt != NULL; elt = DLGetSucc(elt)) { + GlobalSysDBCacheEntry *entry = (GlobalSysDBCacheEntry *)DLE_VAL(elt); + if (db_id != ALL_DB_OID && db_id != entry->m_dbOid) { + continue; + } + if (stat_detail == GscStatDetailDBInfo) { + stat_list = lappend(stat_list, ConstructGlobalDBStatInfo(entry, &m_dbstat_manager)); + } else if (stat_detail == GscStatDetailTuple) { + stat_list = lappend3(stat_list, entry->m_systabCache->GetCatalogTupleStats(rel_id)); + } else { + Assert(stat_detail == GscStatDetailTable); + stat_list = lappend3(stat_list, entry->m_tabdefCache->GetTableStats(rel_id)); + } + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_db_locks[hash_index]); + } + return stat_list; +} + +static void CheckDbOidValid(Oid dbOid) +{ + if (dbOid != InvalidOid && dbOid != ALL_DB_OID && + !SearchSysCacheExists(DATABASEOID, ObjectIdGetDatum(dbOid), 0, 0, 0)) { + ereport(ERROR, (errmodule(MOD_GSC), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("dbOid doesn't exist."), + errdetail("dbOid is invalid, please pass valid dbOid."), errcause("N/A"), + erraction("Please search pg_database table to find valid dbOid."))); + } +} + +static const int GLOBAL_STAT_INFO_NUM = 23; +bool gs_gsc_dbstat_firstcall(PG_FUNCTION_ARGS) +{ + if (!superuser()) { + ereport(ERROR, (errmodule(MOD_GSC), errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("Fail to see gsc dbstat info."), + errdetail("Insufficient privilege to see gsc dbstat info."), errcause("N/A"), + erraction("Please login in with superuser or sysdba role or contact database administrator."))); + } + Oid dbOid = PG_ARGISNULL(0) ? ALL_DB_OID : PG_GETARG_OID(0); + CheckDbOidValid(dbOid); + FuncCallContext *funcctx = NULL; + if (!SRF_IS_FIRSTCALL()) { + return true; + } + funcctx = SRF_FIRSTCALL_INIT(); + MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + funcctx->tuple_desc = CreateTemplateTupleDesc(GLOBAL_STAT_INFO_NUM, false); + int i = 1; + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "database_id", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "database_name", TEXTOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "tup_searches", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "tup_hit", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "tup_miss", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "tup_count", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "tup_dead", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "tup_memory", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "rel_searches", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "rel_hit", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "rel_miss", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "rel_count", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "rel_dead", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "rel_memory", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "part_searches", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "part_hit", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "part_miss", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "part_count", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "part_dead", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "part_memory", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "total_memory", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "swapout_count", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "refcount", INT8OID, -1, 0); + funcctx->tuple_desc = BlessTupleDesc(funcctx->tuple_desc); + + List *tmp_list = NIL; + if (EnableGlobalSysCache()) { + tmp_list = g_instance.global_sysdbcache.GetGlobalDBStatDetail(dbOid, ALL_REL_OID, GscStatDetailDBInfo); + } + if (list_length(tmp_list) == 0) { + MemoryContextSwitchTo(oldcontext); + return false; + } + funcctx->max_calls = list_length(tmp_list); + GlobalDBStatInfo **stat_infos = (GlobalDBStatInfo **)palloc(sizeof(GlobalDBStatInfo *) * funcctx->max_calls); + MemoryContextSwitchTo(oldcontext); + funcctx->user_fctx = stat_infos; + ListCell *cell = NULL; + foreach (cell, tmp_list) { + *stat_infos = (GlobalDBStatInfo *)lfirst(cell); + stat_infos++; + } + list_free_ext(tmp_list); + return true; +} + +Datum gs_gsc_dbstat_info(PG_FUNCTION_ARGS) +{ + FuncCallContext *funcctx = NULL; + if (!gs_gsc_dbstat_firstcall(fcinfo)) { + SRF_RETURN_DONE(funcctx); + } + + funcctx = SRF_PERCALL_SETUP(); + if (funcctx->call_cntr >= funcctx->max_calls) { + SRF_RETURN_DONE(funcctx); + } + Datum values[GLOBAL_STAT_INFO_NUM]; + bool nulls[GLOBAL_STAT_INFO_NUM] = {0}; + GlobalDBStatInfo *stat_info = ((GlobalDBStatInfo **)funcctx->user_fctx)[funcctx->call_cntr]; + int i = 0; + values[i++] = Int64GetDatum(stat_info->db_oid); + values[i++] = stat_info->db_name; + + values[i++] = Int64GetDatum((int64)stat_info->tup_searches); + values[i++] = Int64GetDatum((int64)stat_info->tup_hits); + values[i++] = Int64GetDatum((int64)stat_info->tup_newloads); + values[i++] = Int64GetDatum((int64)stat_info->tup_count); + values[i++] = Int64GetDatum((int64)stat_info->tup_dead); + values[i++] = Int64GetDatum((int64)stat_info->tup_space); + + values[i++] = Int64GetDatum((int64)stat_info->rel_searches); + values[i++] = Int64GetDatum((int64)stat_info->rel_hits); + values[i++] = Int64GetDatum((int64)stat_info->rel_newloads); + values[i++] = Int64GetDatum((int64)stat_info->rel_count); + values[i++] = Int64GetDatum((int64)stat_info->rel_dead); + values[i++] = Int64GetDatum((int64)stat_info->rel_space); + + values[i++] = Int64GetDatum((int64)stat_info->part_searches); + values[i++] = Int64GetDatum((int64)stat_info->part_hits); + values[i++] = Int64GetDatum((int64)stat_info->part_newloads); + values[i++] = Int64GetDatum((int64)stat_info->part_count); + values[i++] = Int64GetDatum((int64)stat_info->part_dead); + values[i++] = Int64GetDatum((int64)stat_info->part_space); + + values[i++] = Int64GetDatum((int64)stat_info->total_space); + values[i++] = Int64GetDatum((int64)stat_info->swapout_count); + values[i++] = Int64GetDatum((int64)stat_info->refcount); + HeapTuple tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + Datum result = HeapTupleGetDatum(tuple); + SRF_RETURN_NEXT(funcctx, result); +} + +/* @param db_id 0 means clean shared db, >0 means clean given db and shared db, ALL_DB_OID/null means clean all dbs */ +void GlobalSysDBCache::Clean(Oid db_id) +{ + m_global_shared_db_entry->ResetDBCache(); + if (db_id == InvalidOid) { + return; + } + Index hash_index = ALL_DB_INDEX; + if (db_id != ALL_DB_OID) { + uint32 hash_value = oid_hash((void *)&(db_id), sizeof(Oid)); + hash_index = HASH_INDEX(hash_value, m_nbuckets); + } + /* skip m_is_swappingout check here */ + if (hash_index == ALL_DB_INDEX) { + Assert(db_id == ALL_DB_OID); + for (Index hash_index = 0; hash_index < (Index)m_nbuckets; hash_index++) { + SwapoutGivenDBInstance(hash_index, ALL_DB_OID); + SwapOutGivenDBContent(hash_index, ALL_DB_OID, DynamicGSCMemoryOutOfControl); + } + } else { + SwapoutGivenDBInstance(hash_index, ALL_DB_OID); + SwapOutGivenDBContent(hash_index, ALL_DB_OID, DynamicGSCMemoryOutOfControl); + } + FreeDeadDBs(); +} + +Datum gs_gsc_clean(PG_FUNCTION_ARGS) +{ + if (!superuser()) { + ereport(ERROR, (errmodule(MOD_GSC), errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("Fail to clean gsc."), + errdetail("Insufficient privilege to clean gsc."), errcause("N/A"), + erraction("Please login in with superuser or sysdba role or contact database administrator."))); + } + Oid dbOid = PG_ARGISNULL(0) ? ALL_DB_OID : PG_GETARG_OID(0); + CheckDbOidValid(dbOid); + if (EnableGlobalSysCache()) { + g_instance.global_sysdbcache.Clean(dbOid); + PG_RETURN_BOOL(true); + } + PG_RETURN_BOOL(false); +} + +static const int GLOBAL_CATALOG_INFO_NUM = 11; +bool gs_gsc_catalog_firstcall(PG_FUNCTION_ARGS) +{ + if (!superuser()) { + ereport(ERROR, (errmodule(MOD_GSC), errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("Fail to see gsc catalog detail."), + errdetail("Insufficient privilege to see gsc catalog detail."), errcause("N/A"), + erraction("Please login in with superuser or sysdba role or contact database administrator."))); + } + Oid dbOid = PG_ARGISNULL(0) ? ALL_DB_OID : PG_GETARG_OID(0); + Oid relOid = PG_ARGISNULL(1) ? ALL_REL_OID : PG_GETARG_INT32(1); + CheckDbOidValid(dbOid); + FuncCallContext *funcctx = NULL; + if (!SRF_IS_FIRSTCALL()) { + return true; + } + funcctx = SRF_FIRSTCALL_INIT(); + MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + funcctx->tuple_desc = CreateTemplateTupleDesc(GLOBAL_CATALOG_INFO_NUM, false); + int i = 1; + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "database_id", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "database_name", TEXTOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "rel_id", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "rel_name", TEXTOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "cache_id", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "self", TEXTOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "ctid", TEXTOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "infomask", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "infomask2", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "hash_value", INT8OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)i++, "refcount", INT8OID, -1, 0); + funcctx->tuple_desc = BlessTupleDesc(funcctx->tuple_desc); + + List *tmp_list = NIL; + if (EnableGlobalSysCache()) { + tmp_list = g_instance.global_sysdbcache.GetGlobalDBStatDetail(dbOid, relOid, GscStatDetailTuple); + } + if (list_length(tmp_list) == 0) { + MemoryContextSwitchTo(oldcontext); + return false; + } + funcctx->max_calls = list_length(tmp_list); + GlobalCatalogTupleStat **tuple_stat_list = + (GlobalCatalogTupleStat **)palloc(sizeof(GlobalCatalogTupleStat *) * funcctx->max_calls); + MemoryContextSwitchTo(oldcontext); + funcctx->user_fctx = tuple_stat_list; + ListCell *cell = NULL; + foreach (cell, tmp_list) { + *tuple_stat_list = (GlobalCatalogTupleStat *)lfirst(cell); + tuple_stat_list++; + } + list_free_ext(tmp_list); + return true; +} + +Datum gs_gsc_catalog_detail(PG_FUNCTION_ARGS) +{ + FuncCallContext *funcctx = NULL; + if (!gs_gsc_catalog_firstcall(fcinfo)) { + SRF_RETURN_DONE(funcctx); + } + + funcctx = SRF_PERCALL_SETUP(); + if (funcctx->call_cntr >= funcctx->max_calls) { + SRF_RETURN_DONE(funcctx); + } + Datum values[GLOBAL_CATALOG_INFO_NUM]; + bool nulls[GLOBAL_CATALOG_INFO_NUM] = {0}; + GlobalCatalogTupleStat *tuple_stat = ((GlobalCatalogTupleStat **)funcctx->user_fctx)[funcctx->call_cntr]; + int i = 0; + char str[NAMEDATALEN]; + values[i++] = Int64GetDatum(tuple_stat->db_oid); + values[i++] = tuple_stat->db_name_datum; + values[i++] = Int64GetDatum((int64)tuple_stat->rel_oid); + values[i++] = tuple_stat->rel_name_datum; + values[i++] = Int64GetDatum((int64)tuple_stat->cache_id); + + error_t rc = sprintf_s(str, NAMEDATALEN, "(%u, %u)", BlockIdGetBlockNumber(&tuple_stat->self.ip_blkid), + (uint)tuple_stat->self.ip_posid); + securec_check_ss(rc, "\0", "\0"); + values[i++] = CStringGetTextDatum(str); + rc = sprintf_s(str, NAMEDATALEN, "(%u, %u)", BlockIdGetBlockNumber(&tuple_stat->ctid.ip_blkid), + (uint)tuple_stat->ctid.ip_posid); + securec_check_ss(rc, "\0", "\0"); + values[i++] = CStringGetTextDatum(str); + + values[i++] = Int64GetDatum((int64)tuple_stat->infomask); + values[i++] = Int64GetDatum((int64)tuple_stat->infomask2); + values[i++] = Int64GetDatum((int64)tuple_stat->hash_value); + values[i++] = Int64GetDatum((int64)tuple_stat->refcount); + HeapTuple tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + Datum result = HeapTupleGetDatum(tuple); + SRF_RETURN_NEXT(funcctx, result); +} + +static const int GLOBAL_TABLE_INFO_NUM = 21; +bool gs_gsc_table_firstcall(PG_FUNCTION_ARGS) +{ + if (!superuser()) { + ereport(ERROR, (errmodule(MOD_GSC), errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("Fail to see gsc table detail."), + errdetail("Insufficient privilege to see gsc table detail."), errcause("N/A"), + erraction("Please login in with superuser or sysdba role or contact database administrator."))); + } + + Oid dbOid = PG_ARGISNULL(0) ? ALL_DB_OID : PG_GETARG_OID(0); + Oid relOid = PG_ARGISNULL(1) ? ALL_REL_OID : PG_GETARG_INT32(1); + CheckDbOidValid(dbOid); + + FuncCallContext *funcctx = NULL; + if (!SRF_IS_FIRSTCALL()) { + return true; + } + + funcctx = SRF_FIRSTCALL_INIT(); + MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + funcctx->tuple_desc = CreateTemplateTupleDesc(GLOBAL_TABLE_INFO_NUM, false); + int attrno = 1; + + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "database_id", OIDOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "database_name", TEXTOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "reloid", OIDOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "relname", TEXTOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "relnamespace", OIDOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "reltype", OIDOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "reloftype", OIDOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "relowner", OIDOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "relam", OIDOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "relfilenode", OIDOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "reltablespace", OIDOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "relhasindex", BOOLOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "relisshared", BOOLOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "relkind", CHAROID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "relnatts", INT2OID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "relhasoids", BOOLOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "relhaspkey", BOOLOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "parttype", CHAROID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "tdhasuids", BOOLOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "attnames", TEXTOID, -1, 0); + TupleDescInitEntry(funcctx->tuple_desc, (AttrNumber)attrno++, "extinfo", TEXTOID, -1, 0); + + funcctx->tuple_desc = BlessTupleDesc(funcctx->tuple_desc); + + List *tmp_list = NIL; + if (EnableGlobalSysCache()) { + tmp_list = g_instance.global_sysdbcache.GetGlobalDBStatDetail(dbOid, relOid, GscStatDetailTable); + } + + if (list_length(tmp_list) == 0) { + MemoryContextSwitchTo(oldcontext); + return false; + } + + funcctx->max_calls = list_length(tmp_list); + GlobalCatalogTableStat **table_stat_list = + (GlobalCatalogTableStat **)palloc(sizeof(GlobalCatalogTableStat *) * funcctx->max_calls); + MemoryContextSwitchTo(oldcontext); + funcctx->user_fctx = table_stat_list; + ListCell *cell = NULL; + foreach (cell, tmp_list) { + *table_stat_list = (GlobalCatalogTableStat *)lfirst(cell); + table_stat_list++; + } + list_free_ext(tmp_list); + + return true; +} + +int rd_rel_to_datum(Datum *values, Form_pg_class rd_rel) +{ + int attrno = 0; + values[attrno++] = rd_rel->relnamespace; + values[attrno++] = rd_rel->reltype; + values[attrno++] = rd_rel->reloftype; + values[attrno++] = rd_rel->relowner; + values[attrno++] = rd_rel->relam; + values[attrno++] = rd_rel->relfilenode; + values[attrno++] = rd_rel->reltablespace; + values[attrno++] = rd_rel->relhasindex; + values[attrno++] = rd_rel->relisshared; + values[attrno++] = rd_rel->relkind; + values[attrno++] = rd_rel->relnatts; + values[attrno++] = rd_rel->relhasoids; + values[attrno++] = rd_rel->relhaspkey; + values[attrno++] = rd_rel->parttype; + return attrno; +} + +int rd_att_to_datum(Datum *values, TupleDesc rd_att) +{ + int attrno = 0; + values[attrno++] = rd_att->tdhasuids; + StringInfoData strinfo; + initStringInfo(&strinfo); + + for (int i = 0; i < rd_att->natts; i++) { + if (i != 0) { + appendStringInfoString(&strinfo, ","); + } + appendStringInfoString(&strinfo, "'"); + appendStringInfoString(&strinfo, rd_att->attrs[i]->attname.data); + appendStringInfoString(&strinfo, "'"); + } + values[attrno++] = CStringGetTextDatum(strinfo.data); + pfree_ext(strinfo.data); + + return attrno; +} + +Datum gs_gsc_table_detail(PG_FUNCTION_ARGS) +{ + FuncCallContext *funcctx = NULL; + if (!gs_gsc_table_firstcall(fcinfo)) { + SRF_RETURN_DONE(funcctx); + } + + funcctx = SRF_PERCALL_SETUP(); + if (funcctx->call_cntr >= funcctx->max_calls) { + SRF_RETURN_DONE(funcctx); + } + + Datum values[GLOBAL_TABLE_INFO_NUM]; + bool nulls[GLOBAL_TABLE_INFO_NUM] = {0}; + nulls[GLOBAL_TABLE_INFO_NUM - 1] = true; + GlobalCatalogTableStat *table_stat = ((GlobalCatalogTableStat **)funcctx->user_fctx)[funcctx->call_cntr]; + int attrno = 0; + + values[attrno++] = Int64GetDatum(table_stat->db_id); + values[attrno++] = table_stat->db_name; + values[attrno++] = Int64GetDatum((int64)table_stat->rel_id); + values[attrno++] = CStringGetTextDatum(table_stat->rd_rel->relname.data); + attrno = rd_rel_to_datum(values + attrno, table_stat->rd_rel) + attrno; + attrno = rd_att_to_datum(values + attrno, table_stat->rd_att) + attrno; + Assert(attrno == GLOBAL_TABLE_INFO_NUM - 1); + + /* form physical tuple and return as datum tuple */ + HeapTuple tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + Datum result = HeapTupleGetDatum(tuple); + + SRF_RETURN_NEXT(funcctx, result); +} + + +int ResizeHashBucket(int origin_nbucket, DynamicHashBucketStrategy strategy) +{ + int cc_nbuckets = origin_nbucket; + Assert(cc_nbuckets > 0 && (cc_nbuckets & -cc_nbuckets) == cc_nbuckets); + if (cc_nbuckets <= MinHashBucketSize) { + return cc_nbuckets; + } + switch (strategy) { + case DynamicHashBucketDefault: + break; + case DynamicHashBucketHalf: + cc_nbuckets = cc_nbuckets >> 1; + break; + case DynamicHashBucketQuarter: + cc_nbuckets = cc_nbuckets >> 2; + break; + case DynamicHashBucketEighth: + cc_nbuckets = cc_nbuckets >> 3; + break; + case DynamicHashBucketMin: + /* when off, dont do palloc */ + cc_nbuckets = MinHashBucketSize; + break; + } + if (cc_nbuckets <= MinHashBucketSize) { + return MinHashBucketSize; + } + return cc_nbuckets; +} + +void NotifyGscRecoveryFinished() +{ + if (EnableGlobalSysCache()) { + g_instance.global_sysdbcache.recovery_finished = true; + } +} \ No newline at end of file diff --git a/src/common/backend/utils/cache/knl_globalsystabcache.cpp b/src/common/backend/utils/cache/knl_globalsystabcache.cpp new file mode 100644 index 000000000..cc1400519 --- /dev/null +++ b/src/common/backend/utils/cache/knl_globalsystabcache.cpp @@ -0,0 +1,245 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ + +#include "executor/executor.h" +#include "utils/knl_globalsystabcache.h" +#include "utils/knl_globaldbstatmanager.h" +#include "knl/knl_instance.h" +#include "knl/knl_session.h" +#include "utils/memutils.h" +#include "storage/lmgr.h" + +GlobalSysTabCache::GlobalSysTabCache(Oid dbOid, bool isShared, GlobalSysDBCacheEntry *dbEntry) +{ + Assert((isShared && dbOid == InvalidOid) || (!isShared && dbOid != InvalidOid)); + m_dbOid = dbOid; + m_isShared = isShared; + m_dbEntry = dbEntry; + m_isInited = false; + m_global_systupcaches = NULL; + m_systab_locks = NULL; + m_tup_count = 0; + m_tup_space = 0; +} + +/* + * @Description: + * Initialization function for current GlobalSysTabCache objects, in lite-mode to reduce + * memory consumption we do lazy initialization, here lazy says do init when it gets used + * + * @param[IN] void + * + * @return: void + */ +void GlobalSysTabCache::Init() +{ + Assert(!m_isInited); + MemoryContext old = MemoryContextSwitchTo(m_dbEntry->GetRandomMemCxt()); + m_systab_locks = (pthread_rwlock_t *)palloc0(sizeof(pthread_rwlock_t) * SysCacheSize); + m_global_systupcaches = (GlobalSysTupCache **)palloc0(sizeof(GlobalSysTupCache *) * SysCacheSize); + + /* + * For lite-mode, we do not create GlobalSysTupCache aggresively to limit memory + * consumption as lower as possible + */ +#ifndef ENABLE_LITE_MODE + for (int cache_id = 0; cache_id < SysCacheSize; cache_id++) { + PthreadRwLockInit(&m_systab_locks[cache_id], NULL); + /* we split catcache into shared_db_entry and mydb_entry, + * so for different db_entry, just init systupcaches which are necessary */ + if ((m_isShared && g_instance.global_sysdbcache.HashSearchSharedRelation(cacheinfo[cache_id].reloid)) || + (!m_isShared && !g_instance.global_sysdbcache.HashSearchSharedRelation(cacheinfo[cache_id].reloid))) { + m_global_systupcaches[cache_id] = New(m_dbEntry->GetRandomMemCxt()) GlobalSysTupCache( + m_dbOid, cache_id, m_isShared, m_dbEntry); + m_global_systupcaches[cache_id]->SetStatInfoPtr(&m_tup_count, &m_tup_space); + } + } +#endif + MemoryContextSwitchTo(old); + + /* Mark initialization stage is done */ + m_isInited = true; +} + +/* + * @Description: + * Reset all none-shared catlog table in current, here reset means delete the GloablCTup + * from cc_list and cc_bucket and move all systuple dead_list + * + * @param[IN] void + * + * @return: void + */ +template +void GlobalSysTabCache::ResetCatCaches() +{ + /* global reset dont reset shared cache */ + for (int i = 0; i < SysCacheSize; i++) { + /* shared table is separated from normal db */ + if (m_global_systupcaches[i] == NULL) { + continue; + } + + /* handle SysTupCache's cc_list and cc_bucket's CatCTup elements */ + m_global_systupcaches[i]->ResetCatalogCache(); + } +} +template void GlobalSysTabCache::ResetCatCaches(); +template void GlobalSysTabCache::ResetCatCaches(); + +void GlobalSysTabCache::RemoveAllTailElements() +{ + for (int i = 0; i < SysCacheSize; i++) { + // shared table is separated from normal db + if (m_global_systupcaches[i] == NULL) { + continue; + } + m_global_systupcaches[i]->RemoveAllTailElements(); +#ifndef ENABLE_LITE_MODE + /* memory is under control, so stop swapout */ + if (g_instance.global_sysdbcache.MemoryUnderControl()) { + break; + } +#endif + } +} + +/* + * @Description: + * Find GlobalSysTupCatch(table level) with given cache_id + * + * @param[IN] cache_id: GSTC's cache id (array index of SysTabCache) + * + * @return: target GlobalSysTupCache object + */ +GlobalSysTupCache *GlobalSysTabCache::CacheIdGetGlobalSysTupCache(int cache_id) +{ + Assert(m_isInited); + Assert((m_isShared && g_instance.global_sysdbcache.HashSearchSharedRelation(cacheinfo[cache_id].reloid)) || + (!m_isShared && !g_instance.global_sysdbcache.HashSearchSharedRelation(cacheinfo[cache_id].reloid))); + +#ifdef ENABLE_LITE_MODE + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, &m_systab_locks[cache_id]); + if (unlikely(m_global_systupcaches[cache_id] == NULL)) { + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_systab_locks[cache_id]); + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, &m_systab_locks[cache_id]); + if (m_global_systupcaches[cache_id] == NULL) { + m_global_systupcaches[cache_id] = New(m_dbEntry->GetRandomMemCxt()) GlobalSysTupCache( + m_dbOid, cache_id, m_isShared, m_dbEntry); + m_global_systupcaches[cache_id]->SetStatInfoPtr(&m_tup_count, &m_tup_space); + } + m_global_systupcaches[cache_id]->Init(); + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_systab_locks[cache_id]); +#endif + + if (likely(m_global_systupcaches[cache_id]->Inited())) { + return m_global_systupcaches[cache_id]; + } + + /* do cache initialization if not set up yet */ + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, &m_systab_locks[cache_id]); + m_global_systupcaches[cache_id]->Init(); + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_systab_locks[cache_id]); + + return m_global_systupcaches[cache_id]; +} + +/* + * @Description: + * Find GlobalSysTupCatch(table level) with given cache_id and hash_value and mark + * it(them) as dead and move to dead_list + * + * Invalid: means mark systuple satisfy cache_id and hash_value as "dead" + * Reset: means mark all systuples under cache_id as "dead" + * + * @param[IN] cache_id: GSTC's cache id (array index of SysTabCache), hash_value + * + * @return: target GlobalSysTupCache object + */ +void GlobalSysTabCache::InvalidTuples(int cache_id, uint32 hash_value, bool reset) +{ +#ifdef ENABLE_LITE_MODE + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, &m_systab_locks[cache_id]); + if (unlikely(m_global_systupcaches[cache_id] == NULL)) { + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_systab_locks[cache_id]); + return; + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_systab_locks[cache_id]); +#endif + + /* maybe upgrade from version before v5r2c00, the cacheid is out of order + * whatever, we cache nothing except relmap, so just ignore the catcache invalmsg */ + if (unlikely(!g_instance.global_sysdbcache.recovery_finished) && m_global_systupcaches[cache_id] == NULL) { + return; + } + + if (!m_global_systupcaches[cache_id]->Inited()) { + return; + } + + if (reset) { + /* for all systuple */ + m_global_systupcaches[cache_id]->ResetCatalogCache(); + } else { + /* for systuples satisfy hash_value */ + m_global_systupcaches[cache_id]->HashValueInvalidate(hash_value); + } +} + +/* + * @Description: + * Fetch tuple stats for given relOid from GlobalSysTabCache object (current + * database) + * + * @param[IN] relOid: parsed query tree. + * + * @return: list of tup stats in "GlobalCatalogTupleStat" + */ +List *GlobalSysTabCache::GetCatalogTupleStats(Oid relOid) +{ + List *tuple_stat_list = NIL; + + /* Scan each GlobalSysCache object's holding GlobalCTup's statinfo */ + for (int i = 0; i < SysCacheSize; i++) { + /* + * Skip GSC object for shared tables, because shared-table's GSC by design is + * stored in a special GlobalSysTabCache(DB) and set NULL to its entry + */ + if (m_global_systupcaches[i] == NULL) { + continue; + } + + /* Skip those not initiliazed */ + if (!m_global_systupcaches[i]->Inited()) { + continue; + } + + Assert(cacheinfo[i].reloid == m_global_systupcaches[i]->GetCCRelOid()); + /* skip undesired rel */ + if (relOid != ALL_REL_OID && (Oid)relOid != cacheinfo[i].reloid) { + continue; + } + + /* Fetch current GSC's info */ + List *tmp = m_global_systupcaches[i]->GetGlobalCatCTupStat(); + tuple_stat_list = lappend3(tuple_stat_list, tmp); + + pfree_ext(tmp); + } + + return tuple_stat_list; +} diff --git a/src/common/backend/utils/cache/knl_globalsystupcache.cpp b/src/common/backend/utils/cache/knl_globalsystupcache.cpp new file mode 100644 index 000000000..094efbf29 --- /dev/null +++ b/src/common/backend/utils/cache/knl_globalsystupcache.cpp @@ -0,0 +1,1798 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ + +#include "utils/knl_globalsystupcache.h" +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "access/genam.h" +#include "access/hash.h" +#include "access/heapam.h" +#include "access/relscan.h" +#include "access/sysattr.h" +#include "access/transam.h" +#include "access/tuptoaster.h" +#include "access/valid.h" +#include "catalog/gs_obsscaninfo.h" +#include "catalog/gs_policy_label.h" +#include "catalog/indexing.h" +#include "catalog/pg_operator.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_type.h" +#include "catalog/pg_attribute.h" +#include "catalog/pg_aggregate.h" +#include "catalog/pg_amop.h" +#include "catalog/pg_amproc.h" +#include "catalog/pg_auth_members.h" +#include "catalog/pg_authid.h" +#include "catalog/pg_obsscaninfo.h" +#include "catalog/pg_cast.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_constraint.h" +#include "catalog/pg_conversion.h" +#include "catalog/pg_database.h" +#include "catalog/pg_db_role_setting.h" +#include "catalog/pg_default_acl.h" +#include "catalog/pg_depend.h" +#include "catalog/pg_description.h" +#include "catalog/pg_directory.h" +#include "catalog/pg_enum.h" +#include "catalog/pg_foreign_data_wrapper.h" +#include "catalog/pg_foreign_server.h" +#include "catalog/pg_foreign_table.h" +#include "catalog/pg_job.h" +#include "catalog/pg_job_proc.h" +#include "catalog/pg_language.h" +#include "catalog/pg_namespace.h" +#include "catalog/pg_object.h" +#include "catalog/pg_opclass.h" +#include "catalog/pg_operator.h" +#include "catalog/pg_opfamily.h" +#include "catalog/pg_partition.h" +#include "catalog/pg_partition_fn.h" +#include "catalog/pg_hashbucket.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_proc_fn.h" +#include "catalog/pg_range.h" +#include "catalog/pg_rewrite.h" +#include "catalog/pg_seclabel.h" +#include "catalog/pg_shseclabel.h" +#include "catalog/pg_shdescription.h" +#include "catalog/pg_shdepend.h" +#include "catalog/pg_statistic.h" +#include "catalog/pg_statistic_ext.h" +#include "catalog/pg_synonym.h" +#include "catalog/pg_tablespace.h" +#include "catalog/pg_ts_config.h" +#include "catalog/pg_ts_config_map.h" +#include "catalog/pg_ts_dict.h" +#include "catalog/pg_ts_parser.h" +#include "catalog/pg_ts_template.h" +#include "catalog/pg_type.h" +#include "catalog/pg_user_mapping.h" +#include "catalog/pg_user_status.h" +#include "catalog/pg_extension_data_source.h" +#include "catalog/pg_streaming_stream.h" +#include "catalog/pg_streaming_cont_query.h" +#include "catalog/heap.h" +#include "executor/executor.h" +#include "funcapi.h" +#include "miscadmin.h" +#include "parser/parse_relation.h" +#include "parser/parse_type.h" +#include "pgstat.h" +#ifdef ENABLE_MULTIPLE_NODES +#include "catalog/pgxc_class.h" +#include "catalog/pgxc_node.h" +#include "catalog/pgxc_group.h" +#include "catalog/pg_resource_pool.h" +#include "catalog/pg_workload_group.h" +#include "catalog/pg_app_workloadgroup_mapping.h" +#endif +#ifdef CatCache_STATS +#include "storage/ipc.h" /* for on_proc_exit */ +#endif +#include "storage/lmgr.h" +#include "storage/sinvaladt.h" +#include "utils/acl.h" +#include "utils/atomic.h" +#include "utils/datum.h" +#include "utils/builtins.h" +#include "utils/elog.h" +#include "utils/extended_statistics.h" +#include "utils/fmgroids.h" +#include "utils/fmgrtab.h" +#include "utils/hashutils.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/rel.h" +#include "utils/rel_gs.h" +#include "utils/relcache.h" +#include "utils/resowner.h" +#include "utils/syscache.h" +#include "utils/snapmgr.h" +#include "utils/knl_relcache.h" +#include "utils/knl_catcache.h" +#include "utils/knl_globaltabdefcache.h" +#include "utils/sec_rls_utils.h" + +void GlobalCatCTup::Release() +{ + /* Decrement the reference count of a GlobalCatCache tuple */ + Assert(ct_magic == CT_MAGIC); + Assert(refcount > 0); + my_cache->ReleaseGlobalCatCTup(this); +} + +void GlobalCatCList::Release() +{ + /* Safety checks to ensure we were handed a cache entry */ + Assert(cl_magic == CL_MAGIC); + Assert(refcount > 0); + my_cache->ReleaseGlobalCatCList(this); +} + +void GlobalSysTupCache::ReleaseGlobalCatCTup(GlobalCatCTup *ct) +{ + if (unlikely(!ct->canInsertGSC)) { + pfree(ct); + return; + } + (void)pg_atomic_fetch_sub_u64(&ct->refcount, 1); +} + +void GlobalSysTupCache::ReleaseGlobalCatCList(GlobalCatCList *cl) +{ + if (unlikely(!cl->canInsertGSC)) { + FreeGlobalCatCList(cl); + return; + } + (void)pg_atomic_fetch_sub_u64(&cl->refcount, 1); +} + +static uint64 GetClEstimateSize(GlobalCatCList *cl) +{ + uint64 cl_size = offsetof(GlobalCatCList, members) + + (cl->n_members + 1) * sizeof(GlobalCatCTup *) + + cl->nkeys * (NAMEDATALEN + CHUNK_ALGIN_PAD) + /* estimate space of keys */ + CHUNK_ALGIN_PAD; + return cl_size; +} + +static uint64 GetCtEstimateSize(GlobalCatCTup *ct) +{ + uint64 ct_size = sizeof(GlobalCatCTup) + MAXIMUM_ALIGNOF + ct->tuple.t_len + CHUNK_ALGIN_PAD; + return ct_size; +} + +void GlobalSysTupCache::AddHeadToCCList(GlobalCatCList *cl) +{ + uint64 cl_size = GetClEstimateSize(cl); + /* record space of list */ + m_dbEntry->MemoryEstimateAdd(cl_size); + pg_atomic_fetch_add_u64(m_tup_space, cl_size); + DLAddHead(&cc_lists, &cl->cache_elem); +} +void GlobalSysTupCache::RemoveElemFromCCList(GlobalCatCList *cl) +{ + uint64 cl_size = GetClEstimateSize(cl); + m_dbEntry->MemoryEstimateSub(cl_size); + pg_atomic_fetch_sub_u64(m_tup_space, cl_size); + DLRemove(&cl->cache_elem); +} + +void GlobalSysTupCache::AddHeadToBucket(Index hash_index, GlobalCatCTup *ct) +{ + uint64 ct_size = GetCtEstimateSize(ct); + pg_atomic_fetch_add_u64(m_tup_space, ct_size); + /* record space of tup */ + m_dbEntry->MemoryEstimateAdd(ct_size); + pg_atomic_fetch_add_u64(m_tup_count, 1); + DLAddHead(&cc_buckets[hash_index], &ct->cache_elem); +} + +void GlobalSysTupCache::RemoveElemFromBucket(GlobalCatCTup *ct) +{ + uint64 ct_size = GetCtEstimateSize(ct); + pg_atomic_fetch_sub_u64(m_tup_space, ct_size); + /* free space of tup */ + m_dbEntry->MemoryEstimateSub(ct_size); + pg_atomic_fetch_sub_u64(m_tup_count, 1); + DLRemove(&ct->cache_elem); +} + +void GlobalSysTupCache::HandleDeadGlobalCatCTup(GlobalCatCTup *ct) +{ + /* this func run in wr lock, so dont call free directly */ + RemoveElemFromBucket(ct); + ct->dead = true; + if (ct->refcount == 0) { + m_dead_cts.AddHead(&ct->cache_elem); + } else { + m_dead_cts.AddTail(&ct->cache_elem); + } +} + +void GlobalSysTupCache::FreeDeadCts() +{ + while (m_dead_cts.GetLength() > 0) { + Dlelem *elt = m_dead_cts.RemoveHead(); + if (elt == NULL) { + break; + } + GlobalCatCTup *ct = (GlobalCatCTup *)DLE_VAL(elt); + if (ct->refcount != 0) { + /* we move the active entry to tail of list and let next call free it */ + m_dead_cts.AddTail(&ct->cache_elem); + break; + } else { + pfree(ct); + } + } +} + +void GlobalSysTupCache::RemoveTailTupleElements(Index hash_index) +{ + /* only one thread can do swapout for the bucket */ + ResourceOwnerEnlargeGlobalIsExclusive(LOCAL_SYSDB_RESOWNER); + if (!atomic_compare_exchange_u32(&m_is_tup_swappingouts[hash_index], 0, 1)) { + return; + } + ResourceOwnerRememberGlobalIsExclusive(LOCAL_SYSDB_RESOWNER, &m_is_tup_swappingouts[hash_index]); + + bool listBelowThreshold = GetBucket(hash_index)->dll_len < MAX_GSC_LIST_LENGTH; + + uint64 swapout_count_once = 0; + /* we are the only one to do swapout, so acquire wrlock is ok */ + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, &m_bucket_rw_locks[hash_index]); + + uint64 max_swapout_count_once = GetSwapOutNum(listBelowThreshold, GetBucket(hash_index)->dll_len); + for (Dlelem *elt = DLGetTail(GetBucket(hash_index)); elt != NULL;) { + GlobalCatCTup *ct = (GlobalCatCTup *)DLE_VAL(elt); + elt = DLGetPred(elt); + if (ct->refcount != 0) { + /* we dont know how many ct are unused, maybe no one, so break to avoid meaningless work + * whatever, another thread does swappout again */ + DLMoveToFront(&ct->cache_elem); + break; + } + HandleDeadGlobalCatCTup(ct); + swapout_count_once++; + + /* swapout elements as many as possible */ + if (swapout_count_once == max_swapout_count_once) { + break; + } + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_bucket_rw_locks[hash_index]); + + Assert(m_is_tup_swappingouts[hash_index] == 1); + atomic_compare_exchange_u32(&m_is_tup_swappingouts[hash_index], 1, 0); + ResourceOwnerForgetGlobalIsExclusive(LOCAL_SYSDB_RESOWNER, &m_is_tup_swappingouts[hash_index]); +} + +void GlobalSysTupCache::FreeGlobalCatCList(GlobalCatCList *cl) +{ + Assert(cl->refcount == 0 || !cl->canInsertGSC); + for (int i = 0; i < cl->n_members; i++) { + cl->members[i]->Release(); + } + CatCacheFreeKeys(m_relinfo.cc_tupdesc, cl->nkeys, m_relinfo.cc_keyno, cl->keys); + pfree(cl); +} + +void GlobalSysTupCache::HandleDeadGlobalCatCList(GlobalCatCList *cl) +{ + /* this func run in wr lock, so dont call free directly */ + RemoveElemFromCCList(cl); + if (cl->refcount == 0) { + m_dead_cls.AddHead(&cl->cache_elem); + } else { + m_dead_cls.AddTail(&cl->cache_elem); + } +} + +void GlobalSysTupCache::FreeDeadCls() +{ + while (m_dead_cls.GetLength() > 0) { + Dlelem *elt = m_dead_cls.RemoveHead(); + if (elt == NULL) { + break; + } + GlobalCatCList *cl = (GlobalCatCList *)DLE_VAL(elt); + if (cl->refcount != 0) { + /* we move the active entry to tail of list and let next call free it */ + m_dead_cls.AddTail(&cl->cache_elem); + break; + } else { + FreeGlobalCatCList(cl); + } + } + FreeDeadCts(); +} + +void GlobalSysTupCache::RemoveTailListElements() +{ + /* only one thread can do swapout for the bucket */ + ResourceOwnerEnlargeGlobalIsExclusive(LOCAL_SYSDB_RESOWNER); + if (!atomic_compare_exchange_u32(&m_is_list_swappingout, 0, 1)) { + return; + } + ResourceOwnerRememberGlobalIsExclusive(LOCAL_SYSDB_RESOWNER, &m_is_list_swappingout); + + /* cllist search is slow, so limit cc_lists's length */ + bool listBelowThreshold = cc_lists.dll_len < MAX_GSC_LIST_LENGTH; + + uint64 swapout_count = 0; + /* the code here is exclusive by set m_cclist_in_swapping 1, so acquire wrlock is ok */ + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, m_list_rw_lock); + uint64 max_swapout_count_once = GetSwapOutNum(listBelowThreshold, cc_lists.dll_len); + + for (Dlelem *elt = DLGetTail(&cc_lists); elt != NULL;) { + GlobalCatCList *cl = (GlobalCatCList *)DLE_VAL(elt); + elt = DLGetPred(elt); + if (cl->refcount != 0) { + DLMoveToFront(&cl->cache_elem); + /* we dont know how many cl are unused, maybe no one, so break to avoid meaningless work + * whatever, another thread does swappout again */ + break; + } + HandleDeadGlobalCatCList(cl); + swapout_count++; + + if (swapout_count > max_swapout_count_once) { + break; + } + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, m_list_rw_lock); + + Assert(m_is_list_swappingout == 1); + atomic_compare_exchange_u32(&m_is_list_swappingout, 1, 0); + ResourceOwnerForgetGlobalIsExclusive(LOCAL_SYSDB_RESOWNER, &m_is_list_swappingout); +} + +void GlobalSysTupCache::RemoveAllTailElements() +{ + if (!m_isInited) { + return; + } + RemoveTailListElements(); +#ifndef ENABLE_LITE_MODE + /* memory is under control, so stop swapout */ + if (g_instance.global_sysdbcache.MemoryUnderControl()) { + return; + } +#endif + for (int hash_index = 0; hash_index < cc_nbuckets; hash_index++) { + RemoveTailTupleElements(hash_index); +#ifndef ENABLE_LITE_MODE + /* memory is under control, so stop swapout */ + if (g_instance.global_sysdbcache.MemoryUnderControl()) { + break; + } +#endif + } +} + +GlobalSysTupCache::GlobalSysTupCache(Oid dbOid, int cache_id, bool isShared, GlobalSysDBCacheEntry *entry) +{ + m_isInited = false; + + /* global systup share cxt group with global systab */ + m_searches = &entry->m_dbstat->tup_searches; + m_hits = &entry->m_dbstat->tup_hits; + m_newloads = &entry->m_dbstat->tup_newloads; + m_dbEntry = entry; + m_dbOid = dbOid; + m_cache_id = cache_id; + m_bucket_rw_locks = NULL; + m_list_rw_lock = NULL; + m_concurrent_lock = NULL; + cc_id = -1; + cc_nbuckets = -1; + m_relinfo.cc_tupdesc = NULL; + m_relinfo.cc_relname = NULL; + m_relinfo.cc_tupdesc = NULL; + m_relinfo.cc_reloid = -1; + m_relinfo.cc_indexoid = -1; + m_relinfo.cc_nkeys = -1; + m_relinfo.cc_relisshared = isShared; + Assert((isShared && m_dbOid == InvalidOid) || (!isShared && m_dbOid != InvalidOid)); + + for (int i = 0; i < CATCACHE_MAXKEYS; i++) { + m_relinfo.cc_keyno[i] = -1; + errno_t rc = memset_s(&cc_skey[i], sizeof(ScanKeyData), 0, sizeof(ScanKeyData)); + securec_check(rc, "", ""); + m_relinfo.cc_hashfunc[i] = NULL; + m_relinfo.cc_fastequal[i] = NULL; + } + cc_buckets = NULL; + m_is_tup_swappingouts = NULL; + m_is_list_swappingout = 0; + enable_rls = false; +} + +void GlobalSysTupCache::SetStatInfoPtr(volatile uint64 *tup_count, volatile uint64 *tup_space) +{ + m_tup_count = tup_count; + m_tup_space = tup_space; +} + +/*cat tuple***********************************************************************************************************/ +/* SEARCH_TUPLE_SKIP used by searchtupleinternal */ +GlobalCatCTup *GlobalSysTupCache::FindSearchKeyTupleFromCache(InsertCatTupInfo *tup_info, int *location) +{ + uint32 hash_value = tup_info->hash_value; + Index hash_index = tup_info->hash_index; + Datum *arguments = tup_info->arguments; + int index = 0; + for (Dlelem *elt = DLGetHead(GetBucket(hash_index)); elt != NULL; elt = DLGetSucc(elt)) { + index++; + GlobalCatCTup *ct = (GlobalCatCTup *)DLE_VAL(elt); + if (ct->hash_value != hash_value) { + continue; + } + if (!CatalogCacheCompareTuple(m_relinfo.cc_fastequal, m_relinfo.cc_nkeys, ct->keys, arguments)) { + continue; + } + if (unlikely(u_sess->attr.attr_common.IsInplaceUpgrade) && ct->tuple.t_self.ip_posid == 0) { + Assert(ct->tuple.t_tableOid == InvalidOid); + continue; + } + pg_atomic_fetch_add_u64(&ct->refcount, 1); + *location = index; + return ct; + } + return NULL; +} + +/* SCAN_TUPLE_SKIP used by searchtupleinternal */ +GlobalCatCTup *GlobalSysTupCache::FindScanKeyTupleFromCache(InsertCatTupInfo *tup_info) +{ + uint32 hash_value = tup_info->hash_value; + Index hash_index = tup_info->hash_index; + Datum *arguments = tup_info->arguments; + for (Dlelem *elt = DLGetHead(GetBucket(hash_index)); elt != NULL; elt = DLGetSucc(elt)) { + GlobalCatCTup *ct = (GlobalCatCTup *)DLE_VAL(elt); + if (ct->hash_value != hash_value) { + continue; + } + if (!CatalogCacheCompareTuple(m_relinfo.cc_fastequal, m_relinfo.cc_nkeys, ct->keys, arguments)) { + continue; + } + if (unlikely(u_sess->attr.attr_common.IsInplaceUpgrade) && ct->tuple.t_self.ip_posid == 0) { + Assert(ct->tuple.t_tableOid == InvalidOid); + continue; + } + pg_atomic_fetch_add_u64(&ct->refcount, 1); + return ct; + } + return NULL; +} + +/* PROC_LIST_SKIP used by SearchBuiltinProcCacheList */ +GlobalCatCTup *GlobalSysTupCache::FindHashTupleFromCache(InsertCatTupInfo *tup_info) +{ + uint32 hash_value = tup_info->hash_value; + Index hash_index = tup_info->hash_index; + for (Dlelem *elt = DLGetHead(GetBucket(hash_index)); elt != NULL; elt = DLGetSucc(elt)) { + GlobalCatCTup *ct = (GlobalCatCTup *)DLE_VAL(elt); + if (ct->hash_value != hash_value) { + continue; /* quickly skip entry if wrong hash val */ + } + pg_atomic_fetch_add_u64(&ct->refcount, 1); + return ct; + } + return NULL; +} + +/* PGATTR_LIST_SKIP used by SearchPgAttributeCacheList */ +GlobalCatCTup *GlobalSysTupCache::FindPgAttrTupleFromCache(InsertCatTupInfo *tup_info) +{ + Index hash_index = tup_info->hash_index; + int16 attnum = tup_info->attnum; + CatalogRelationBuildParam *catalogDesc = tup_info->catalogDesc; + for (Dlelem *elt = DLGetHead(GetBucket(hash_index)); elt != NULL; elt = DLGetSucc(elt)) { + GlobalCatCTup *ct = (GlobalCatCTup *)DLE_VAL(elt); + bool attnumIsNull = false; + int curAttnum = DatumGetInt16(SysCacheGetAttr(cc_id, &ct->tuple, Anum_pg_attribute_attnum, &attnumIsNull)); + /* quickly skip entry if wrong tuple */ + if ((attnum < catalogDesc->natts && curAttnum != attnum) || + (attnum >= catalogDesc->natts && curAttnum != -(attnum - catalogDesc->natts + 1))) { + continue; + } + pg_atomic_fetch_add_u64(&ct->refcount, 1); + return ct; + } + return NULL; +} + +/* SCAN_LIST_SKIP used by searchlistinternal when scan */ +GlobalCatCTup *GlobalSysTupCache::FindSameTupleFromCache(InsertCatTupInfo *tup_info) +{ + uint32 hash_value = tup_info->hash_value; + Index hash_index = tup_info->hash_index; + HeapTuple ntp = tup_info->ntp; + for (Dlelem *elt = DLGetHead(GetBucket(hash_index)); elt != NULL; elt = DLGetSucc(elt)) { + GlobalCatCTup *ct = (GlobalCatCTup *)DLE_VAL(elt); + if (ct->hash_value != hash_value) { + continue; /* ignore dead entries */ + } + if (IsProcCache(m_relinfo.cc_reloid) && IsSystemObjOid(HeapTupleGetOid(&(ct->tuple))) && + likely(u_sess->attr.attr_common.IsInplaceUpgrade == false)) { + continue; + } + if (IsAttributeCache(m_relinfo.cc_reloid)) { + bool attIsNull = false; + Oid attrelid = + DatumGetObjectId(SysCacheGetAttr(cc_id, &(ct->tuple), Anum_pg_attribute_attrelid, &attIsNull)); + if (IsSystemObjOid(attrelid) && IsValidCatalogParam(GetCatalogParam(attrelid))) { + continue; + } + } + + if (!ItemPointerEqualsNoCheck(&(ct->tuple.t_self), &(ntp->t_self))) { + continue; /* not same tuple */ + } + pg_atomic_fetch_add_u64(&ct->refcount, 1); + return ct; + } + return NULL; +} + +/** + * insert a new tuple into globalcatcache, if there is already one, do nothing + */ +GlobalCatCTup *GlobalSysTupCache::InsertHeapTupleIntoGlobalCatCache(InsertCatTupInfo *tup_info) +{ + /* palloc before write lock */ + /* Allocate memory for GlobalCatCTup and the cached tuple in one go */ + MemoryContext oldcxt = MemoryContextSwitchTo(m_dbEntry->GetRandomMemCxt()); + GlobalCatCTup *new_ct = (GlobalCatCTup *)palloc(sizeof(GlobalCatCTup) + MAXIMUM_ALIGNOF + tup_info->ntp->t_len); + MemoryContextSwitchTo(oldcxt); + new_ct->ct_magic = CT_MAGIC; + /* releases and free by my_cache when palloc fail */ + new_ct->my_cache = this; + new_ct->dead = false; + new_ct->hash_value = tup_info->hash_value; + DLInitElem(&new_ct->cache_elem, (void *)new_ct); + CopyTupleIntoGlobalCatCTup(new_ct, tup_info->ntp); + /* not find, now we insert the tuple into cache and unlock lock */ + new_ct->refcount = 1; + new_ct->canInsertGSC = true; + + /* insert bucket, only w bucket_lock, we has r m_concurrent_lock already */ + pthread_rwlock_t *bucket_lock = &m_bucket_rw_locks[tup_info->hash_index]; + /* find again, in case insert op when we create tuple */ + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, bucket_lock); + GlobalCatCTup *ct = FindTupleFromCache(tup_info); + if (unlikely(ct != NULL)) { + /* other thread has inserted one */ + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, bucket_lock); + pfree_ext(new_ct); + return ct; + } + + AddHeadToBucket(tup_info->hash_index, new_ct); + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, bucket_lock); + return new_ct; +} + +static inline MemoryContext GetLSCMemcxt(int cache_id) +{ + /* use lsc's memcxt to palloc, and insert the element into lsc's hashtable */ + return t_thrd.lsc_cxt.lsc->systabcache.GetLocalTupCacheMemoryCxt(cache_id); +} +/** + * insert committed tuple into globalcatcache, uncommitted tuple into localcatcache + */ +GlobalCatCTup *GlobalSysTupCache::InsertHeapTupleIntoLocalCatCache(InsertCatTupInfo *tup_info) +{ + /* Allocate memory for GlobalCatCTup and the cached tuple in one struct */ + MemoryContext oldcxt = + MemoryContextSwitchTo(GetLSCMemcxt(m_cache_id)); + GlobalCatCTup *new_ct = (GlobalCatCTup *)palloc(sizeof(GlobalCatCTup) + MAXIMUM_ALIGNOF + tup_info->ntp->t_len); + MemoryContextSwitchTo(oldcxt); + new_ct->ct_magic = CT_MAGIC; + /* releases and free by my_cache when palloc fail */ + new_ct->my_cache = this; + new_ct->dead = false; + new_ct->hash_value = tup_info->hash_value; + DLInitElem(&new_ct->cache_elem, (void *)new_ct); + CopyTupleIntoGlobalCatCTup(new_ct, tup_info->ntp); + new_ct->refcount = 1; + new_ct->canInsertGSC = false; + return new_ct; +} +/** + * insert tuple into globalcatcache memory, others into localcatcache memory + */ +GlobalCatCTup *GlobalSysTupCache::InsertHeapTupleIntoCatCacheInSingle(InsertCatTupInfo *tup_info) +{ + HeapTuple ntp = tup_info->ntp; + if (HeapTupleHasExternal(ntp)) { + tup_info->ntp = toast_flatten_tuple(ntp, m_relinfo.cc_tupdesc); + Assert(tup_info->hash_value == + CatalogCacheComputeTupleHashValue(cc_id, m_relinfo.cc_keyno, m_relinfo.cc_tupdesc, m_relinfo.cc_hashfunc, + m_relinfo.cc_reloid, m_relinfo.cc_nkeys, tup_info->ntp)); + } + GlobalCatCTup *ct; + if (tup_info->canInsertGSC) { + /* the tuple must meet condition that xmin is committed and xmax == 0 */ + ct = InsertHeapTupleIntoGlobalCatCache(tup_info); + } else { + /* xmin uncommitted or xmax != 0, we dont care whether xmax is committed, just store it in localcatcache */ + ct = InsertHeapTupleIntoLocalCatCache(tup_info); + } + if (tup_info->ntp != ntp) { + heap_freetuple_ext(tup_info->ntp); + tup_info->ntp = ntp; + } + return ct; +} + +/** + * insert tuple into globalcatcache memory, others into localcatcache memory + * should call only by searchsyscachelist + * we do this check because when accept inval msg, we clear cc_list, even they are valid + */ +GlobalCatCTup *GlobalSysTupCache::InsertHeapTupleIntoCatCacheInList(InsertCatTupInfo *tup_info) +{ + HeapTuple ntp = tup_info->ntp; + if (HeapTupleHasExternal(ntp)) { + tup_info->ntp = toast_flatten_tuple(ntp, m_relinfo.cc_tupdesc); + Assert(tup_info->hash_value == + CatalogCacheComputeTupleHashValue(cc_id, m_relinfo.cc_keyno, m_relinfo.cc_tupdesc, m_relinfo.cc_hashfunc, + m_relinfo.cc_reloid, m_relinfo.cc_nkeys, tup_info->ntp)); + } + GlobalCatCTup *ct = NULL; + if (tup_info->canInsertGSC) { + /* insert bucket, only rd bucket_lock, built in func never be changed */ + pthread_rwlock_t *bucket_lock = &m_bucket_rw_locks[tup_info->hash_index]; + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, bucket_lock); + ct = FindTupleFromCache(tup_info); + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, bucket_lock); + if (unlikely(ct == NULL)) { + /* the tuple must meet condition that xmin is committed and xmax == 0 */ + ct = InsertHeapTupleIntoGlobalCatCache(tup_info); + } + } else { + /* the tuple must meet condition that xmin is committed and xmax == 0 */ + ct = InsertHeapTupleIntoLocalCatCache(tup_info); + } + + if (tup_info->ntp != ntp) { + heap_freetuple_ext(tup_info->ntp); + tup_info->ntp = ntp; + } + return ct; +} + +GlobalCatCTup *GlobalSysTupCache::SearchMissFromProcAndAttribute(InsertCatTupInfo *tup_info) +{ + /* for now, tup_info->type is SCAN_TUPLE_SKIP. it's ok */ + Datum *arguments = tup_info->arguments; + /* For search a function, we firstly try to search it in built-in function list */ + if (IsProcCache(m_relinfo.cc_reloid) && likely(u_sess->attr.attr_common.IsInplaceUpgrade == false)) { + CACHE2_elog(DEBUG2, "SearchGlobalCatCacheMiss(%d): function not found in pg_proc", cc_id); + HeapTuple ntp = SearchBuiltinProcCacheMiss(cc_id, m_relinfo.cc_nkeys, arguments); + if (HeapTupleIsValid(ntp)) { + CACHE2_elog(DEBUG2, "SearchGlobalCatCacheMiss(%d): match a built-in function", cc_id); + tup_info->ntp = ntp; + /* hard code, never change */ + Assert(ntp->t_tableOid == InvalidOid); + tup_info->canInsertGSC = true; + GlobalCatCTup *ct = InsertHeapTupleIntoCatCacheInSingle(tup_info); + heap_freetuple(ntp); + return ct; + } + } + + /* Insert hardcoded system catalogs' attributes into pg_attribute's syscache. */ + if (IsAttributeCache(m_relinfo.cc_reloid) && IsSystemObjOid(DatumGetObjectId(arguments[0]))) { + CACHE2_elog(DEBUG2, "SearchGlobalCatCacheMiss: cat tuple not in cat cache %d", cc_id); + HeapTuple ntp = SearchPgAttributeCacheMiss(cc_id, m_relinfo.cc_tupdesc, m_relinfo.cc_nkeys, arguments); + if (HeapTupleIsValid(ntp)) { + tup_info->ntp = ntp; + /* hard code, never change */ + tup_info->canInsertGSC = true; + GlobalCatCTup *ct = InsertHeapTupleIntoCatCacheInSingle(tup_info); + heap_freetuple(ntp); + return ct; + } + } + return NULL; +} + +void AcquireGSCTableReadLock(bool *has_concurrent_lock, pthread_rwlock_t *concurrent_lock) +{ + int cur_index = t_thrd.lsc_cxt.lsc->rdlock_info.count; + if (unlikely(cur_index == MAX_GSC_READLOCK_COUNT) || + PthreadRWlockTryRdlock(LOCAL_SYSDB_RESOWNER, concurrent_lock) != 0) { + *has_concurrent_lock = false; + return; + } + *has_concurrent_lock = true; + t_thrd.lsc_cxt.lsc->rdlock_info.concurrent_lock[cur_index] = concurrent_lock; + t_thrd.lsc_cxt.lsc->rdlock_info.has_concurrent_lock[cur_index] = has_concurrent_lock; + t_thrd.lsc_cxt.lsc->rdlock_info.count++; +} +void ReleaseGSCTableReadLock(bool *has_concurrent_lock, pthread_rwlock_t *concurrent_lock) +{ + Assert(*has_concurrent_lock); + *has_concurrent_lock = false; + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, concurrent_lock); + Assert(t_thrd.lsc_cxt.lsc->rdlock_info.concurrent_lock[t_thrd.lsc_cxt.lsc->rdlock_info.count - 1] == + concurrent_lock); + Assert(t_thrd.lsc_cxt.lsc->rdlock_info.has_concurrent_lock[t_thrd.lsc_cxt.lsc->rdlock_info.count - 1] == + has_concurrent_lock); + t_thrd.lsc_cxt.lsc->rdlock_info.count--; +} + +GlobalCatCTup *GlobalSysTupCache::SearchTupleFromFile(uint32 hash_value, Datum *arguments, bool is_exclusive) +{ + InsertCatTupInfo tup_info; + tup_info.find_type = SCAN_TUPLE_SKIP; + tup_info.arguments = arguments; + tup_info.hash_value = hash_value; + tup_info.hash_index = HASH_INDEX(hash_value, (uint32)cc_nbuckets); + /* for cache read, make sure no one can clear syscache before we insert the result */ + tup_info.has_concurrent_lock = !is_exclusive; + tup_info.is_exclusive = is_exclusive; + Assert(is_exclusive); + GlobalCatCTup *ct = SearchTupleMiss(&tup_info); + if (ct != NULL) { + pg_atomic_fetch_add_u64(m_newloads, 1); + } + return ct; +} + +GlobalCatCTup *GlobalSysTupCache::SearchTupleMiss(InsertCatTupInfo *tup_info) +{ + GlobalCatCTup *ct = SearchMissFromProcAndAttribute(tup_info); + if (ct != NULL) { + return ct; + } + + Relation relation = heap_open(m_relinfo.cc_reloid, AccessShareLock); + ereport(DEBUG1, (errmsg("cache->cc_reloid - %d", m_relinfo.cc_reloid))); + /* + * Ok, need to make a lookup in the relation, copy the scankey and fill + * out any per-call fields. + */ + ScanKeyData cur_skey[CATCACHE_MAXKEYS]; + errno_t rc = memcpy_s(cur_skey, sizeof(ScanKeyData) * CATCACHE_MAXKEYS, cc_skey, + sizeof(ScanKeyData) * m_relinfo.cc_nkeys); + securec_check(rc, "", ""); + Datum *arguments = tup_info->arguments; + cur_skey[0].sk_argument = arguments[0]; + cur_skey[1].sk_argument = arguments[1]; + cur_skey[2].sk_argument = arguments[2]; + cur_skey[3].sk_argument = arguments[3]; + SysScanDesc scandesc = + systable_beginscan(relation, m_relinfo.cc_indexoid, IndexScanOK(cc_id), NULL, + m_relinfo.cc_nkeys, cur_skey); + + if (tup_info->has_concurrent_lock) { + AcquireGSCTableReadLock(&tup_info->has_concurrent_lock, m_concurrent_lock); + } + HeapTuple ntp; + while (HeapTupleIsValid(ntp = systable_getnext(scandesc))) { + tup_info->ntp = ntp; + if (!tup_info->has_concurrent_lock) { + tup_info->canInsertGSC = false; + } else { + tup_info->canInsertGSC = CanTupleInertGSC(ntp); + if (!tup_info->canInsertGSC) { + /* unlock concurrent immediately, any one can invalid cache now */ + ReleaseGSCTableReadLock(&tup_info->has_concurrent_lock, m_concurrent_lock); + } + } + ct = InsertHeapTupleIntoCatCacheInSingle(tup_info); + break; /* assume only one match */ + } + /* unlock finally */ + if (tup_info->has_concurrent_lock) { + ReleaseGSCTableReadLock(&tup_info->has_concurrent_lock, m_concurrent_lock); + } + systable_endscan(scandesc); + heap_close(relation, AccessShareLock); + + /* + * global catcache match disk , not need negative tuple + */ + return ct; +} +/* + * SearchTupleInternal + * + * This call searches a system cache for a tuple, opening the relation + * if necessary (on the first access to a particular cache). + * + * The result is NULL if not found, or a pointer to a HeapTuple in + * the cache. The caller must not modify the tuple, and must call + * Release() when done with it. + * + * The search key values should be expressed as Datums of the key columns' + * datatype(s). (Pass zeroes for any unused parameters.) As a special + * exception, the passed-in key for a NAME column can be just a C string; + * the caller need not go to the trouble of converting it to a fully + * null-padded NAME. + */ +/* + * Work-horse for SearchGlobalCatCache/SearchGlobalCatCacheN. + */ + +GlobalCatCTup *GlobalSysTupCache::SearchTupleInternal(uint32 hash_value, Datum *arguments) +{ + FreeDeadCts(); + pg_atomic_fetch_add_u64(m_searches, 1); + /* + * scan the hash bucket until we find a match or exhaust our tuples + */ + Index hash_index = HASH_INDEX(hash_value, (uint32)cc_nbuckets); + pthread_rwlock_t *bucket_lock = &m_bucket_rw_locks[hash_index]; + InsertCatTupInfo tup_info; + tup_info.find_type = SEARCH_TUPLE_SKIP; + tup_info.arguments = arguments; + tup_info.hash_value = hash_value; + tup_info.hash_index = hash_index; + int location = INVALID_LOCATION; + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, bucket_lock); + GlobalCatCTup *ct = FindSearchKeyTupleFromCache(&tup_info, &location); + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, bucket_lock); + + if (ct != NULL) { + pg_atomic_fetch_add_u64(m_hits, 1); + TopnLruMoveToFront(&ct->cache_elem, GetBucket(hash_index), bucket_lock, location); + return ct; + } + + /* not match */ + tup_info.find_type = SCAN_TUPLE_SKIP; + bool canInsertGSC = !g_instance.global_sysdbcache.StopInsertGSC(); + if (unlikely(GetBucket(hash_index)->dll_len >= MAX_GSC_LIST_LENGTH)) { + RemoveTailTupleElements(hash_index); + /* maybe no element can be swappedout */ + canInsertGSC = canInsertGSC && GetBucket(hash_index)->dll_len < MAX_GSC_LIST_LENGTH; + } + tup_info.is_exclusive = !canInsertGSC; + tup_info.has_concurrent_lock = canInsertGSC; + + ct = SearchTupleMiss(&tup_info); + if (ct != NULL) { + pg_atomic_fetch_add_u64(m_newloads, 1); + } + return ct; +} + +GlobalCatCList *GlobalSysTupCache::FindListInternal(uint32 hash_value, int nkeys, Datum *arguments, int *location) +{ + int index = 0; + for (Dlelem *elt = DLGetHead(&cc_lists); elt; elt = DLGetSucc(elt)) { + if (index > MAX_GSC_LIST_LENGTH) { + /* cc_lists is too long, the tail elements should be swapout */ + break; + } + index++; + GlobalCatCList *cl = (GlobalCatCList *)DLE_VAL(elt); + if (likely(cl->hash_value != hash_value)) { + continue; /* quickly skip entry if wrong hash val */ + } + if (cl->nkeys != nkeys) { + continue; + } + if (!CatalogCacheCompareTuple(m_relinfo.cc_fastequal, nkeys, cl->keys, arguments)) { + continue; + } + /* Bump the list's refcount */ + pg_atomic_fetch_add_u64(&cl->refcount, 1); + CACHE2_elog(DEBUG2, "SearchGlobalCatCacheList(%s): found list", m_relinfo.cc_relname); + *location = index; + return cl; + /* Global list never be negative */ + } + return NULL; +} +/* + * SearchGlobalCatCacheList + * + * Generate a list of all tuples matching a partial key (that is, + * a key specifying just the first K of the cache's N key columns). + * + * The caller must not modify the list object or the pointed-to tuples, + * and must call Release() when done with the list. + */ +GlobalCatCList *GlobalSysTupCache::SearchListInternal(uint32 hash_value, int nkeys, Datum *arguments) +{ + FreeDeadCls(); + Assert(nkeys > 0 && nkeys < m_relinfo.cc_nkeys); + int location = INVALID_LOCATION; + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, m_list_rw_lock); + GlobalCatCList *cl = FindListInternal(hash_value, nkeys, arguments, &location); + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, m_list_rw_lock); + + if (cl != NULL) { + ResourceOwnerRememberGlobalCatCList(LOCAL_SYSDB_RESOWNER, cl); + TopnLruMoveToFront(&cl->cache_elem, &cc_lists, m_list_rw_lock, location); + return cl; + } + + bool canInsertGSC = !g_instance.global_sysdbcache.StopInsertGSC(); + /* if cc_lists is too long, swapout it */ + if (cc_lists.dll_len >= MAX_GSC_LIST_LENGTH) { + RemoveTailListElements(); + /* maybe no element can be swappedout */ + canInsertGSC = canInsertGSC && cc_lists.dll_len < MAX_GSC_LIST_LENGTH; + } + cl = SearchListFromFile(hash_value, nkeys, arguments, !canInsertGSC); + return cl; +} + +GlobalCatCList *GlobalSysTupCache::CreateCatCacheList(InsertCatListInfo *list_info) +{ + MemoryContext old; + if (list_info->canInsertGSC) { + old = MemoryContextSwitchTo(m_dbEntry->GetRandomMemCxt()); + } else { + old = MemoryContextSwitchTo(GetLSCMemcxt(m_cache_id)); + } + GlobalCatCList *new_cl = (GlobalCatCList *)palloc(offsetof(GlobalCatCList, members) + + (list_length(list_info->ctlist) + 1) * sizeof(GlobalCatCTup *)); + ResourceOwnerRememberGlobalCatCList(LOCAL_SYSDB_RESOWNER, new_cl); + new_cl->refcount = 1; + new_cl->canInsertGSC = false; + new_cl->nkeys = list_info->nkeys; + /* releases and free by my_cache when palloc fail */ + new_cl->my_cache = this; + new_cl->cl_magic = CL_MAGIC; + new_cl->hash_value = list_info->hash_value; + DLInitElem(&new_cl->cache_elem, new_cl); + new_cl->ordered = list_info->ordered; + new_cl->n_members = list_length(list_info->ctlist); + int index = 0; + ListCell *lc; + foreach (lc, list_info->ctlist) { + new_cl->members[index] = (GlobalCatCTup *)lfirst(lc); + index++; + } + /* + * Avoid double-free elemnts of list_info->ctlist by Resowner and Try-Catch(), + * to be safely we free info_list->ctlist here to avoid double-free happen, normally + * Resowner will do this + */ + list_free_ext(list_info->ctlist); + + Assert(index == new_cl->n_members); + errno_t rc = memset_s(new_cl->keys, list_info->nkeys * sizeof(Datum), 0, list_info->nkeys * sizeof(Datum)); + securec_check(rc, "", ""); + + /* palloc maybe fail */ + CatCacheCopyKeys(m_relinfo.cc_tupdesc, list_info->nkeys, m_relinfo.cc_keyno, list_info->arguments, new_cl->keys); + MemoryContextSwitchTo(old); + new_cl->canInsertGSC = list_info->canInsertGSC; + return new_cl; +} + +GlobalCatCList *GlobalSysTupCache::InsertListIntoCatCacheList(InsertCatListInfo *list_info, GlobalCatCList *cl) +{ + if (!cl->canInsertGSC) { + return cl; + } + + int location = INVALID_LOCATION; + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, m_list_rw_lock); + GlobalCatCList *exist_cl = FindListInternal(list_info->hash_value, list_info->nkeys, list_info->arguments, + &location); + if (exist_cl != NULL) { + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, m_list_rw_lock); + ResourceOwnerForgetGlobalCatCList(LOCAL_SYSDB_RESOWNER, cl); + ResourceOwnerRememberGlobalCatCList(LOCAL_SYSDB_RESOWNER, exist_cl); + /* we need mark clist's refcont to 0 then do real free up. */ + cl->refcount = 0; + FreeGlobalCatCList(cl); + cl = exist_cl; + } else { + AddHeadToCCList(cl); + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, m_list_rw_lock); + } + return cl; +} + +GlobalCatCList *GlobalSysTupCache::SearchListMiss(InsertCatListInfo *list_info) +{ + Relation relation; + ScanKeyData cur_skey[CATCACHE_MAXKEYS]; + SysScanDesc scandesc; + errno_t rc; + + /* + * Ok, need to make a lookup in the relation, copy the scankey and + * fill out any per-call fields. + */ + rc = memcpy_s(cur_skey, sizeof(ScanKeyData) * CATCACHE_MAXKEYS, cc_skey, + sizeof(ScanKeyData) * m_relinfo.cc_nkeys); + securec_check(rc, "", ""); + cur_skey[0].sk_argument = list_info->arguments[0]; + cur_skey[1].sk_argument = list_info->arguments[1]; + cur_skey[2].sk_argument = list_info->arguments[2]; + cur_skey[3].sk_argument = list_info->arguments[3]; + + relation = heap_open(m_relinfo.cc_reloid, AccessShareLock); + + scandesc = systable_beginscan(relation, m_relinfo.cc_indexoid, IndexScanOK(cc_id), + NULL, list_info->nkeys, cur_skey); + if (list_info->has_concurrent_lock) { + AcquireGSCTableReadLock(&list_info->has_concurrent_lock, m_concurrent_lock); + } + + /* The list will be ordered iff we are doing an index scan */ + list_info->ordered = (scandesc->irel != NULL); + + HeapTuple ntp; + InsertCatTupInfo tup_info; + tup_info.find_type = SCAN_LIST_SKIP; + /* for list check, we cannot use arguments, instead we use t_self */ + tup_info.arguments = NULL; + while (HeapTupleIsValid(ntp = systable_getnext(scandesc))) { + if (IsProcCache(m_relinfo.cc_reloid) && IsSystemObjOid(HeapTupleGetOid(ntp)) && + likely(u_sess->attr.attr_common.IsInplaceUpgrade == false)) { + continue; + } + if (IsAttributeCache(m_relinfo.cc_reloid)) { + bool attIsNull = false; + Oid attrelid = DatumGetObjectId(SysCacheGetAttr(cc_id, ntp, Anum_pg_attribute_attrelid, &attIsNull)); + if (IsSystemObjOid(attrelid) && IsValidCatalogParam(GetCatalogParam(attrelid))) { + continue; + } + } + /* + * See if there's an entry for this tuple already. + */ + InitInsertCatTupInfo(&tup_info, ntp, list_info->arguments); + tup_info.canInsertGSC = list_info->has_concurrent_lock && CanTupleInertGSC(ntp); + GlobalCatCTup *ct = InsertHeapTupleIntoCatCacheInList(&tup_info); + list_info->canInsertGSC = list_info->canInsertGSC && ct->canInsertGSC; + /* + * Careful here: enlarge resource owner catref array, add entry to ctlist, then bump its refcount. + * This way leaves state correct if enlarge or lappend runs out of memory_context_list + * We use resource owner to track referenced cachetups for safety reasons. Because ctlist is now + * built from different sources, i.e. built-in catalogs and physical relation tuples. If failure in later + * sources is not caught, resource owner will clean up ref count for cachetups got from previous sources. + */ + list_info->ctlist = lappend(list_info->ctlist, ct); + } + systable_endscan(scandesc); + heap_close(relation, AccessShareLock); + if (list_info->has_concurrent_lock && (!list_info->canInsertGSC || list_length(list_info->ctlist) == 0)) { + ReleaseGSCTableReadLock(&list_info->has_concurrent_lock, m_concurrent_lock); + list_info->canInsertGSC = false; + } + GlobalCatCList *cl = CreateCatCacheList(list_info); + return cl; +} + +GlobalCatCList *GlobalSysTupCache::SearchListFromFile(uint32 hash_value, int nkeys, Datum *arguments, + bool is_exclusive) +{ + InsertCatListInfo list_info; + list_info.ordered = false; + list_info.ctlist = NIL; + list_info.nkeys = nkeys; + list_info.arguments = arguments; + list_info.hash_value = hash_value; + list_info.is_exclusive = is_exclusive; + list_info.has_concurrent_lock = !is_exclusive; + list_info.canInsertGSC = !is_exclusive; + /* + * List was not found in cache, so we have to build it by reading the + * relation. For each matching tuple found in the relation, use an + * existing cache entry if possible, else build a new one. + * + * We have to bump the member refcounts temporarily to ensure they won't + * get dropped from the cache while loading other members. We use a PG_TRY + * block to ensure we can undo those refcounts if we get an error before + * we finish constructing the CatCList. + */ + + /* Firstly, check the builtin functions. if there are functions + * which has the same name with the one we want to find, lappend it + * into the ctlist + */ + if (IsProcCache(m_relinfo.cc_reloid) && CacheIsProcNameArgNsp(cc_id) && + likely(u_sess->attr.attr_common.IsInplaceUpgrade == false)) { + SearchBuiltinProcCacheList(&list_info); + } + + if (IsAttributeCache(m_relinfo.cc_reloid) && IsSystemObjOid(DatumGetObjectId(arguments[0]))) { + SearchPgAttributeCacheList(&list_info); + } + GlobalCatCList *cl = NULL; + PG_TRY(); + { + cl = SearchListMiss(&list_info); + } + PG_CATCH(); + { + ReleaseAllGSCRdConcurrentLock(); + ReleaseTempList(list_info.ctlist); + PG_RE_THROW(); + } + PG_END_TRY(); + + cl = InsertListIntoCatCacheList(&list_info, cl); + if (list_info.has_concurrent_lock) { + ReleaseGSCTableReadLock(&list_info.has_concurrent_lock, m_concurrent_lock); + } + return cl; +} + +/*cat list***********************************************************************************************************/ +void GlobalSysTupCache::ReleaseTempList(const List *ctlist) +{ + ListCell *ctlist_item = NULL; + GlobalCatCTup *ct = NULL; + foreach (ctlist_item, ctlist) { + ct = (GlobalCatCTup *)lfirst(ctlist_item); + ct->Release(); + } +} + +void GlobalSysTupCache::InitInsertCatTupInfo(InsertCatTupInfo *tup_info, HeapTuple ntp, Datum *arguments) +{ + uint32 hash_value = CatalogCacheComputeTupleHashValue(cc_id, m_relinfo.cc_keyno, m_relinfo.cc_tupdesc, + m_relinfo.cc_hashfunc, m_relinfo.cc_reloid, m_relinfo.cc_nkeys, ntp); + Index hash_index = HASH_INDEX(hash_value, (uint32)(cc_nbuckets)); + tup_info->arguments = arguments; + tup_info->hash_value = hash_value; + tup_info->hash_index = hash_index; + tup_info->ntp = ntp; +} + +void GlobalSysTupCache::SearchPgAttributeCacheList(InsertCatListInfo *list_info) +{ + const FormData_pg_attribute *catlogAttrs = NULL; + Assert(list_info->nkeys == 1); + Oid relOid = ObjectIdGetDatum(list_info->arguments[0]); + CatalogRelationBuildParam catalogDesc = GetCatalogParam(relOid); + catlogAttrs = catalogDesc.attrs; + if (catalogDesc.oid == InvalidOid) { + return; + } + PG_TRY(); + { + bool hasBucketAttr = false; + for (int16 attnum = 0; attnum < catalogDesc.natts + GetSysAttLength(hasBucketAttr); attnum++) { + Form_pg_attribute attr; + FormData_pg_attribute tempAttr; + if (attnum < catalogDesc.natts) { + tempAttr = catlogAttrs[attnum]; + attr = &tempAttr; + } else { + int16 index = attnum - catalogDesc.natts; + if (!catalogDesc.hasoids && index == 1) { + continue; + } + /* The system table does not have the bucket column, so incoming false */ + attr = SystemAttributeDefinition(-(index + 1), catalogDesc.hasoids, false, false); + attr->attrelid = relOid; + } + HeapTuple ntp = GetPgAttributeAttrTuple(m_relinfo.cc_tupdesc, attr); + InsertCatTupInfo tup_info; + tup_info.find_type = PGATTR_LIST_SKIP; + InitInsertCatTupInfo(&tup_info, ntp, list_info->arguments); + /* hard code, never change */ + tup_info.is_exclusive = list_info->is_exclusive; + tup_info.canInsertGSC = true; + tup_info.attnum = attnum; + tup_info.catalogDesc = &catalogDesc; + GlobalCatCTup *ct = InsertHeapTupleIntoCatCacheInList(&tup_info); + heap_freetuple(ntp); + /* + * Careful here: enlarge resource owner catref array, add entry to ctlist, then bump its refcount. + * This way leaves state correct if enlarge or lappend runs out of memory_context_list + * We use resource owner to track referenced cachetups for safety reasons. Because ctlist is now + * built from different sources, i.e. built-in catalogs and physical relation tuples. If failure in later + * sources is not caught, resource owner will clean up ref count for cachetups got from previous sources. + */ + list_info->ctlist = lappend(list_info->ctlist, ct); + } + } + PG_CATCH(); + { + ReleaseAllGSCRdConcurrentLock(); + ReleaseTempList(list_info->ctlist); + PG_RE_THROW(); + } + PG_END_TRY(); +} + +void GlobalSysTupCache::SearchBuiltinProcCacheList(InsertCatListInfo *list_info) +{ + int i; + GlobalCatCTup *ct = NULL; + const FuncGroup *gfuncs = NULL; + Assert(list_info->nkeys == 1); + char *funcname = NameStr(*(DatumGetName(list_info->arguments[0]))); + gfuncs = SearchBuiltinFuncByName(funcname); + if (gfuncs == NULL) { + CACHE3_elog(DEBUG2, "%s: the function \"%s\" does not in built-in list", __FUNCTION__, funcname); + return; + } + PG_TRY(); + { + for (i = 0; i < gfuncs->fnums; i++) { + const Builtin_func *bfunc = &gfuncs->funcs[i]; + HeapTuple ntp = CreateHeapTuple4BuiltinFunc(bfunc, NULL); + InsertCatTupInfo tup_info; + tup_info.find_type = PROC_LIST_SKIP; + InitInsertCatTupInfo(&tup_info, ntp, list_info->arguments); + /* hard code, never change */ + tup_info.is_exclusive = list_info->is_exclusive; + tup_info.canInsertGSC = true; + ct = InsertHeapTupleIntoCatCacheInList(&tup_info); + heap_freetuple(ntp); + list_info->ctlist = lappend(list_info->ctlist, ct); + } + } + PG_CATCH(); + { + ReleaseAllGSCRdConcurrentLock(); + ReleaseTempList(list_info->ctlist); + PG_RE_THROW(); + } + PG_END_TRY(); +} + +/*create init cat****************************************************************************************************/ +/* + * CatalogCacheCreateEntry + * Create a new GlobalCatCTup entry, copying the given HeapTuple and other + * supplied data into it. The new entry initially has refcount 0. + * this func makes sure the tuple has a shared memory context + * this func never fail + */ +void GlobalSysTupCache::CopyTupleIntoGlobalCatCTup(GlobalCatCTup *ct, HeapTuple dtp) +{ + Assert(!HeapTupleHasExternal(dtp)); + ct->tuple = *dtp; + Assert(dtp->tupTableType == HEAP_TUPLE); + ct->tuple.tupTableType = HEAP_TUPLE; + ct->tuple.t_data = (HeapTupleHeader)MAXALIGN(((char *)ct) + sizeof(GlobalCatCTup)); + /* copy tuple contents */ + errno_t rc = memcpy_s((char *)ct->tuple.t_data, dtp->t_len, (const char *)dtp->t_data, dtp->t_len); + securec_check(rc, "", ""); + + /* extract keys - they'll point into the tuple if not by-value */ + for (int i = 0; i < m_relinfo.cc_nkeys; i++) { + Datum atp; + bool isnull = false; + atp = heap_getattr(&ct->tuple, m_relinfo.cc_keyno[i], m_relinfo.cc_tupdesc, &isnull); +#ifndef ENABLE_MULTIPLE_NODES + Assert(!isnull || ((m_relinfo.cc_reloid == ProcedureRelationId) && atp == 0)); +#else + Assert(!isnull); +#endif + ct->keys[i] = atp; + } +} + +void GlobalSysTupCache::InitHashTable() +{ + MemoryContext old = MemoryContextSwitchTo(m_dbEntry->GetRandomMemCxt()); + /* this func allow palloc fail */ + size_t sz = (cc_nbuckets + 1) * sizeof(Dllist) + PG_CACHE_LINE_SIZE; + if (cc_buckets == NULL) { + cc_buckets = (Dllist *)CACHELINEALIGN(palloc0(sz)); + } + if (m_bucket_rw_locks == NULL) { + m_bucket_rw_locks = (pthread_rwlock_t *)palloc0((cc_nbuckets + 1) * sizeof(pthread_rwlock_t)); + for (int i = 0; i <= cc_nbuckets; i++) { + PthreadRwLockInit(&m_bucket_rw_locks[i], NULL); + } + } + if (m_is_tup_swappingouts == NULL) { + m_is_tup_swappingouts = (volatile uint32 *)palloc0(sizeof(volatile uint32) * (cc_nbuckets + 1)); + } + m_is_list_swappingout = 0; + + (void)MemoryContextSwitchTo(old); +} + +/* call on global memory context */ +void GlobalSysTupCache::InitCacheInfo(Oid reloid, Oid indexoid, int nkeys, const int *key, int nbuckets) +{ + int i; + + /* + * nbuckets is the number of hash buckets to use in this GlobalCatCache. + * Currently we just use a hard-wired estimate of an appropriate size for + * each cache; maybe later make them dynamically resizable? + * + * nbuckets must be a power of two. We check this via Assert rather than + * a full runtime check because the values will be coming from constant + * tables. + * + * If you're confused by the power-of-two check, see comments in + * bitmapset.c for an explanation. + */ + Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets); + /* + * first switch to the cache context so our allocations do not vanish at + * the end of a transaction + */ + cc_nbuckets = ResizeHashBucket(nbuckets, g_instance.global_sysdbcache.dynamic_hash_bucket_strategy); + /* + * Allocate a new cache structure, aligning to a cacheline boundary + * + * Note: we assume zeroing initializes the Dllist headers correctly + */ + InitHashTable(); + MemoryContext old = MemoryContextSwitchTo(m_dbEntry->GetRandomMemCxt()); + DLInitList(&cc_lists); + /* + * initialize the cache's relation information for the relation + * corresponding to this cache, and initialize some of the new cache's + * other internal fields. But don't open the relation yet. + */ + cc_id = m_cache_id; + m_relinfo.cc_relname = NULL; + m_relinfo.cc_reloid = reloid; + m_relinfo.cc_indexoid = indexoid; + m_relinfo.cc_tupdesc = (TupleDesc)NULL; + m_relinfo.cc_nkeys = nkeys; + for (i = 0; i < nkeys; ++i) { + m_relinfo.cc_keyno[i] = key[i]; + } + + if (m_list_rw_lock == NULL) { + m_list_rw_lock = (pthread_rwlock_t *)palloc0(sizeof(pthread_rwlock_t)); + PthreadRwLockInit(m_list_rw_lock, NULL); + } + if (m_concurrent_lock == NULL) { + m_concurrent_lock = (pthread_rwlock_t *)palloc0(sizeof(pthread_rwlock_t)); + PthreadRwLockInit(m_concurrent_lock, NULL); + } + + (void)MemoryContextSwitchTo(old); +} + +void GlobalSysTupCache::InitRelationInfo() +{ + Relation relation; + int i; + + /* + * During inplace or online upgrade, the to-be-fabricated catalogs are still missing, + * for which we can not throw an ERROR. + */ + LockRelationOid(m_relinfo.cc_reloid, AccessShareLock); + + relation = RelationIdGetRelation(m_relinfo.cc_reloid); + if (!RelationIsValid(relation)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not open relation with OID %u", m_relinfo.cc_reloid))); + } + + pgstat_initstats(relation); + + /* + * switch to the cache context so our allocations do not vanish at the end + * of a transaction + */ + + MemoryContext old = MemoryContextSwitchTo(m_dbEntry->GetRandomMemCxt()); + + /* + * copy the relcache's tuple descriptor to permanent cache storage + */ + if (m_relinfo.cc_tupdesc == NULL) { + m_relinfo.cc_tupdesc = CopyTupleDesc(RelationGetDescr(relation)); + } + + /* + * save the relation's name and relisshared flag, too (cc_relname is used + * only for debugging purposes) + */ + if (m_relinfo.cc_relname == NULL) { + m_relinfo.cc_relname = pstrdup(RelationGetRelationName(relation)); + } + Assert(m_relinfo.cc_relisshared == RelationGetForm(relation)->relisshared); + + /* + * return to the caller's memory context and close the rel + */ + MemoryContextSwitchTo(old); + + enable_rls = RelationEnableRowSecurity(relation); + + heap_close(relation, AccessShareLock); + + /* + * initialize cache's key information + */ + for (i = 0; i < m_relinfo.cc_nkeys; ++i) { + Oid keytype; + RegProcedure eqfunc; + if (m_relinfo.cc_keyno[i] > 0) + keytype = m_relinfo.cc_tupdesc->attrs[m_relinfo.cc_keyno[i] - 1]->atttypid; + else { + if (m_relinfo.cc_keyno[i] != ObjectIdAttributeNumber) + ereport(FATAL, (errmsg("only sys attr supported in caches is OID"))); + keytype = OIDOID; + } + GetCCHashEqFuncs(keytype, &m_relinfo.cc_hashfunc[i], &eqfunc, &m_relinfo.cc_fastequal[i]); + /* + * Do equality-function lookup (we assume this won't need a catalog + * lookup for any supported type) + */ + pfree_ext(cc_skey[i].sk_func.fnLibPath); + fmgr_info_cxt(eqfunc, &cc_skey[i].sk_func, m_dbEntry->GetRandomMemCxt()); + /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */ + cc_skey[i].sk_attno = m_relinfo.cc_keyno[i]; + /* Fill in sk_strategy as well --- always standard equality */ + cc_skey[i].sk_strategy = BTEqualStrategyNumber; + cc_skey[i].sk_subtype = InvalidOid; + if (keytype == TEXTOID) { + cc_skey[i].sk_collation = DEFAULT_COLLATION_OID; + } else { + cc_skey[i].sk_collation = InvalidOid; + } + } +} + +/*class func*******************************************************************************************************/ +void GlobalSysTupCache::Init() +{ + if (m_isInited) { + return; + } + /* this init called by systab, when a write lock is aquired + * init func followed by new, so m_isInited must be false + * lock first when call this func */ + const cachedesc *cache_info = &cacheinfo[m_cache_id]; + InitCacheInfo(cache_info->reloid, cache_info->indoid, cache_info->nkeys, cache_info->key, cache_info->nbuckets); + InitRelationInfo(); + pg_memory_barrier(); + m_isInited = true; +} + +/* + * CatalogCacheIdInvalidate + * + * Invalidate entries in the specified cache, given a hash value. + * + * We delete cache entries that match the hash value. + * We don't care whether the invalidation is the result + * of a tuple insertion or a deletion. + * + * We used to try to match positive cache entries by TID, but that is + * unsafe after a VACUUM FULL on a system catalog: an inval event could + * be queued before VACUUM FULL, and then processed afterwards, when the + * target tuple that has to be invalidated has a different TID than it + * did when the event was created. So now we just compare hash values and + * accept the small risk of unnecessary invalidations due to false matches. + * + * This routine is only quasi-public: it should only be used by inval.c. + */ +/* + * HeapTupleInvalidate() + * + * pay attention: + * match disk one by one, so call this func when update , insert or delete every time + */ +void GlobalSysTupCache::HashValueInvalidate(uint32 hash_value) +{ + if (!m_isInited) { + return; + } + /* + * inspect caches to find the proper cache + * list of cache_header is never changed after init, so lock is not needed + * Invalidate *all *CatCLists in this cache; it's too hard to tell + * which searches might still be correct, so just zap 'em all. + */ + /* avoid other session read from table and havn't insert into gsc */ + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, m_concurrent_lock); + + if (cc_lists.dll_len > 0) { + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, m_list_rw_lock); + for (Dlelem *elt = DLGetHead(&cc_lists); elt;) { + GlobalCatCList *cl = (GlobalCatCList *)DLE_VAL(elt); + elt = DLGetSucc(elt); + HandleDeadGlobalCatCList(cl); + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, m_list_rw_lock); + } else { + /* no one can insert because we have m_concurrent_lock exclusive lock + * no one can scan table now */ + } + + uint32 hash_index = HASH_INDEX(hash_value, cc_nbuckets); + if (GetBucket(hash_index)->dll_len > 0) { + pthread_rwlock_t *bucket_lock = &m_bucket_rw_locks[hash_index]; + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, bucket_lock); + for (Dlelem *elt = DLGetHead(GetBucket(hash_index)); elt;) { + GlobalCatCTup *ct = (GlobalCatCTup *)DLE_VAL(elt); + elt = DLGetSucc(elt); + if (hash_value != ct->hash_value) { + continue; + } + HandleDeadGlobalCatCTup(ct); + /* maybe multi dead tuples, find them all */ + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, bucket_lock); + } else { + /* no one can insert because we have m_concurrent_lock exclusive lock + * no one can scan table now */ + } + + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, m_concurrent_lock); +} + +/* + * ResetGlobalCatCache + * + * Reset one catalog cache to empty. + * + * This is not very efficient if the target cache is nearly empty. + * However, it shouldn't need to be efficient; we don't invoke it often. + */ +template +void GlobalSysTupCache::ResetCatalogCache() +{ + if (!m_isInited) { + return; + } + + /* if not force, we are swappingout, m_concurrent_lock is not needed */ + if (force) { + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, m_concurrent_lock); + } + + if (cc_lists.dll_len > 0) { + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, m_list_rw_lock); + for (Dlelem *elt = DLGetHead(&cc_lists); elt;) { + GlobalCatCList *cl = (GlobalCatCList *)DLE_VAL(elt); + elt = DLGetSucc(elt); + if (force || cl->refcount == 0) { + HandleDeadGlobalCatCList(cl); + } + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, m_list_rw_lock); + } + + /* Remove each tuple in this cache */ + for (int i = 0; i < cc_nbuckets; i++) { + if (GetBucket(i)->dll_len == 0) { + continue; + } + pthread_rwlock_t *bucket_lock = &m_bucket_rw_locks[i]; + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, bucket_lock); + for (Dlelem *elt = DLGetHead(&cc_buckets[i]); elt;) { + GlobalCatCTup *ct = (GlobalCatCTup *)DLE_VAL(elt); + elt = DLGetSucc(elt); + if (force || ct->refcount == 0) { + HandleDeadGlobalCatCTup(ct); + } + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, bucket_lock); + } + + if (force) { + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, m_concurrent_lock); + } +} +template void GlobalSysTupCache::ResetCatalogCache(); +template void GlobalSysTupCache::ResetCatalogCache(); + +List *GlobalSysTupCache::GetGlobalCatCTupStat() +{ + List *tuple_stat_list = NIL; + if (!m_isInited) { + return tuple_stat_list; + } + + /* Remove each tuple in this cache */ + for (int i = 0; i < cc_nbuckets; i++) { + if (GetBucket(i)->dll_len == 0) { + continue; + } + pthread_rwlock_t *bucket_lock = &m_bucket_rw_locks[i]; + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, bucket_lock); + for (Dlelem *elt = DLGetHead(&cc_buckets[i]); elt;) { + GlobalCatCTup *ct = (GlobalCatCTup *)DLE_VAL(elt); + elt = DLGetSucc(elt); + GlobalCatalogTupleStat *tup_stat = (GlobalCatalogTupleStat*)palloc(sizeof(GlobalCatalogTupleStat)); + tup_stat->db_oid = m_dbOid; + tup_stat->db_name_datum = CStringGetTextDatum(m_dbEntry->m_dbName); + tup_stat->rel_oid = m_relinfo.cc_reloid; + tup_stat->rel_name_datum = CStringGetTextDatum(m_relinfo.cc_relname); + tup_stat->cache_id = m_cache_id; + tup_stat->self = ct->tuple.t_self; + tup_stat->ctid = ct->tuple.t_data->t_ctid; + tup_stat->infomask = ct->tuple.t_data->t_infomask; + tup_stat->infomask2 = ct->tuple.t_data->t_infomask2; + tup_stat->hash_value = ct->hash_value; + tup_stat->refcount = ct->refcount; + tuple_stat_list = lappend(tuple_stat_list, tup_stat); + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, bucket_lock); + } + return tuple_stat_list; +} + +#ifndef ENABLE_MULTIPLE_NODES +GlobalCatCTup *GlobalSysTupCache::SearchTupleWithArgModes(uint32 hash_value, Datum *arguments, oidvector* argModes) +{ + FreeDeadCts(); + pg_atomic_fetch_add_u64(m_searches, 1); + /* + * scan the hash bucket until we find a match or exhaust our tuples + */ + Index hash_index = HASH_INDEX(hash_value, (uint32)cc_nbuckets); + pthread_rwlock_t *bucket_lock = &m_bucket_rw_locks[hash_index]; + InsertCatTupInfo tup_info; + tup_info.find_type = SEARCH_TUPLE_SKIP; + tup_info.arguments = arguments; + tup_info.hash_value = hash_value; + tup_info.hash_index = hash_index; + int location = INVALID_LOCATION; + + int index = 0; + GlobalCatCTup *ct = NULL; + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, bucket_lock); + for (Dlelem *elt = DLGetHead(GetBucket(hash_index)); elt != NULL; elt = DLGetSucc(elt)) { + index++; + ct = (GlobalCatCTup *)DLE_VAL(elt); + if (ct->hash_value != hash_value) { + ct = NULL; + continue; + } + if (!CatalogCacheCompareTuple(m_relinfo.cc_fastequal, m_relinfo.cc_nkeys, ct->keys, arguments)) { + ct = NULL; + continue; + } + if (unlikely(u_sess->attr.attr_common.IsInplaceUpgrade) && ct->tuple.t_self.ip_posid == 0) { + Assert(ct->tuple.t_tableOid == InvalidOid); + ct = NULL; + continue; + } + /* + * Make sure that the in-out-info(argmodes) should be the same. + */ + if (!IsProArgModesEqualByTuple(&ct->tuple, m_relinfo.cc_tupdesc, argModes)) { + ct = NULL; + continue; + } + pg_atomic_fetch_add_u64(&ct->refcount, 1); + location = index; + break; + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, bucket_lock); + + if (ct != NULL) { + pg_atomic_fetch_add_u64(m_hits, 1); + TopnLruMoveToFront(&ct->cache_elem, GetBucket(hash_index), bucket_lock, location); + return ct; + } + + /* not match */ + tup_info.find_type = SCAN_TUPLE_SKIP; + bool canInsertGSC = !g_instance.global_sysdbcache.StopInsertGSC(); + /* if cc_lists is too long, swapout it */ + if (cc_lists.dll_len >= MAX_GSC_LIST_LENGTH) { + RemoveTailListElements(); + /* maybe no element can be swappedout */ + canInsertGSC = canInsertGSC && cc_lists.dll_len < MAX_GSC_LIST_LENGTH; + } + + tup_info.is_exclusive = !canInsertGSC; + tup_info.has_concurrent_lock = canInsertGSC; + ct = SearchTupleMissWithArgModes(&tup_info, argModes); + if (ct != NULL) { + pg_atomic_fetch_add_u64(m_newloads, 1); + } + return ct; +} + +GlobalCatCTup *GlobalSysTupCache::SearchTupleFromFileWithArgModes( + uint32 hash_value, Datum *arguments, oidvector* argModes, bool is_exclusive) +{ + InsertCatTupInfo tup_info; + tup_info.find_type = SCAN_TUPLE_SKIP; + tup_info.arguments = arguments; + tup_info.hash_value = hash_value; + tup_info.hash_index = HASH_INDEX(hash_value, (uint32)cc_nbuckets); + /* not match */ + tup_info.has_concurrent_lock = !is_exclusive; + tup_info.is_exclusive = is_exclusive; + Assert(is_exclusive); + GlobalCatCTup *ct = SearchTupleMissWithArgModes(&tup_info, argModes); + if (ct != NULL) { + pg_atomic_fetch_add_u64(m_newloads, 1); + } + return ct; +} + +GlobalCatCTup *GlobalSysTupCache::SearchTupleMissWithArgModes(InsertCatTupInfo *tup_info, oidvector* argModes) +{ + GlobalCatCTup *ct = SearchMissFromProcAndAttribute(tup_info); + if (ct != NULL) { + return ct; + } + Relation relation = heap_open(m_relinfo.cc_reloid, AccessShareLock); + ereport(DEBUG1, (errmsg("cache->cc_reloid - %d", m_relinfo.cc_reloid))); + /* + * Ok, need to make a lookup in the relation, copy the scankey and fill + * out any per-call fields. + */ + ScanKeyData cur_skey[CATCACHE_MAXKEYS]; + errno_t rc = memcpy_s(cur_skey, sizeof(ScanKeyData) * CATCACHE_MAXKEYS, cc_skey, + sizeof(ScanKeyData) * m_relinfo.cc_nkeys); + securec_check(rc, "", ""); + Datum *arguments = tup_info->arguments; + cur_skey[0].sk_argument = arguments[0]; + cur_skey[1].sk_argument = arguments[1]; + cur_skey[2].sk_argument = arguments[2]; + cur_skey[3].sk_argument = arguments[3]; + SysScanDesc scandesc = + systable_beginscan(relation, m_relinfo.cc_indexoid, IndexScanOK(cc_id), NULL, + m_relinfo.cc_nkeys, cur_skey); + /* for cache read, make sure no one can clear syscache before we insert the result */ + tup_info->has_concurrent_lock = !tup_info->is_exclusive; + + if (tup_info->has_concurrent_lock) { + AcquireGSCTableReadLock(&tup_info->has_concurrent_lock, m_concurrent_lock); + } + HeapTuple ntp; + while (HeapTupleIsValid(ntp = systable_getnext(scandesc))) { + /* + * The key2 (pg_proc_allargtypes) can be duplicate in table. + * We need to compare the proargmodes to make sure the function is correct. + */ + if (!IsProArgModesEqualByTuple(ntp, m_relinfo.cc_tupdesc, argModes)) { + continue; + } + + tup_info->ntp = ntp; + if (!tup_info->has_concurrent_lock) { + tup_info->canInsertGSC = false; + } else { + tup_info->canInsertGSC = CanTupleInertGSC(ntp); + if (!tup_info->canInsertGSC) { + /* unlock concurrent immediately, any one can invalid cache now */ + ReleaseGSCTableReadLock(&tup_info->has_concurrent_lock, m_concurrent_lock); + } + } + ct = InsertHeapTupleIntoCatCacheInSingle(tup_info); + break; /* assume only one match */ + } + /* unlock finally */ + if (tup_info->has_concurrent_lock) { + ReleaseGSCTableReadLock(&tup_info->has_concurrent_lock, m_concurrent_lock); + } + systable_endscan(scandesc); + heap_close(relation, AccessShareLock); + + /* + * global catcache match disk , not need negative tuple + */ + return ct; +} +#endif diff --git a/src/common/backend/utils/cache/knl_globaltabdefcache.cpp b/src/common/backend/utils/cache/knl_globaltabdefcache.cpp new file mode 100644 index 000000000..20cddf93e --- /dev/null +++ b/src/common/backend/utils/cache/knl_globaltabdefcache.cpp @@ -0,0 +1,632 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ + +#include "executor/executor.h" +#include "utils/knl_globaltabdefcache.h" +#include "commands/trigger.h" +#include "rewrite/rewriteRlsPolicy.h" +#include "utils/builtins.h" +#include "knl/knl_session.h" +#include "utils/palloc.h" +#include "utils/memutils.h" +#include "utils/knl_relcache.h" +#include "utils/knl_catcache.h" +#include "utils/partitionmap.h" +#include "catalog/indexing.h" +#include "catalog/pg_publication.h" + +static void RelationPointerToNULL(Relation rel) +{ + rel->rd_smgr = NULL; + rel->rd_rel = NULL; + rel->rd_att = NULL; + rel->rd_rules = NULL; + rel->rd_rulescxt = NULL; + rel->trigdesc = NULL; + rel->rd_rlsdesc = NULL; + rel->rd_indexlist = NIL; + rel->rd_indexattr = NULL; + rel->rd_keyattr = NULL; + rel->rd_idattr = NULL; + rel->rd_pkattr = NULL; + rel->rd_pubactions = NULL; + rel->rd_options = NULL; + rel->rd_index = NULL; + rel->rd_indextuple = NULL; + rel->rd_am = NULL; + rel->rd_indexcxt = NULL; + rel->rd_aminfo = NULL; + rel->rd_opfamily = NULL; + rel->rd_opcintype = NULL; + rel->rd_support = NULL; + rel->rd_supportinfo = NULL; + rel->rd_indoption = NULL; + rel->rd_indexprs = NULL; + rel->rd_indpred = NULL; + rel->rd_exclops = NULL; + rel->rd_exclprocs = NULL; + rel->rd_exclstrats = NULL; + rel->rd_amcache = NULL; + rel->rd_indcollation = NULL; + rel->rd_fdwroutine = NULL; + rel->rd_bucketkey = NULL; + rel->partMap = NULL; + rel->pgstat_info = NULL; + rel->rd_locator_info = NULL; + rel->sliceMap = NULL; + rel->parent = NULL; + /* double linked list node, partition and bucket relation would be stored in fakerels list of resource owner */ + rel->node.prev = NULL; + rel->node.next = NULL; +} + +static void *CopyPubActions(void *pubActions) +{ + if (pubActions == NULL) { + return NULL; + } + PublicationActions *res = (PublicationActions*)palloc(sizeof(PublicationActions)); + errno_t rc = memcpy_s(res, sizeof(PublicationActions), pubActions, sizeof(PublicationActions)); + securec_check(rc, "", ""); + return res; +} + +static Form_pg_class CopyRelationRdrel(Relation rel) +{ + Assert(rel->rd_rel != NULL); + /* Copy the relation tuple form + * + * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE. The + * variable-length fields (relacl, reloptions) are NOT stored in the + * relcache --- there'd be little point in it, since we don't copy the + * tuple's nulls bitmap and hence wouldn't know if the values are valid. + * Bottom line is that relacl *cannot* be retrieved from the relcache. Get + * it from the syscache if you need it. The same goes for the original + * form of reloptions (however, we do store the parsed form of reloptions + * in rd_options). + */ + Form_pg_class rd_rel = (Form_pg_class)palloc(sizeof(FormData_pg_class)); + errno_t rc = memcpy_s(rd_rel, sizeof(FormData_pg_class), rel->rd_rel, CLASS_TUPLE_SIZE); + securec_check(rc, "", ""); + return rd_rel; +} + +TupleDesc CopyTupleDesc(TupleDesc tupdesc) +{ + TupleDesc rd_att = CreateTupleDescCopyConstr(tupdesc); + if (tupdesc->constr == NULL) { + return rd_att; + } + /* TupleConstrCopy dont copy clusterKeys info, so we copy it manually */ + TupleConstr *dst = rd_att->constr; + TupleConstr *src = tupdesc->constr; + Assert(src != NULL); + dst->clusterKeyNum = src->clusterKeyNum; + Assert(dst->clusterKeys == NULL); + if (dst->clusterKeyNum == 0) { + return rd_att; + } + size_t len = sizeof(AttrNumber) * dst->clusterKeyNum; + dst->clusterKeys = (AttrNumber *)palloc(len); + errno_t rc = memcpy_s(dst->clusterKeys, len, src->clusterKeys, len); + securec_check(rc, "", ""); + return rd_att; +} + +static RuleLock *CopyRelationRules(Relation rel, MemoryContext rules_cxt) +{ + if (rel->rd_rules == NULL) { + return NULL; + } + MemoryContext old = MemoryContextSwitchTo(rules_cxt); + RuleLock *rd_rules = (RuleLock *)palloc(sizeof(RuleLock)); + rd_rules->numLocks = rel->rd_rules->numLocks; + RewriteRule **rules = (RewriteRule **)palloc(sizeof(RewriteRule *) * rel->rd_rules->numLocks); + rd_rules->rules = rules; + for (int i = 0; i < rel->rd_rules->numLocks; i++) { + rules[i] = (RewriteRule *)palloc(sizeof(RewriteRule)); + *rules[i] = *rel->rd_rules->rules[i]; + rules[i]->actions = (List *)copyObject(rel->rd_rules->rules[i]->actions); + rules[i]->qual = (Node *)copyObject(rel->rd_rules->rules[i]->qual); + } + MemoryContextSwitchTo(old); + return rd_rules; +} + +static RlsPoliciesDesc *CopyRelationRls(Relation rel, MemoryContext rls_cxt) +{ + if (rel->rd_rlsdesc == NULL) { + return NULL; + } + MemoryContext old = MemoryContextSwitchTo(rls_cxt); + RlsPoliciesDesc *rd_rlsdesc = (RlsPoliciesDesc *)palloc(sizeof(RlsPoliciesDesc)); + rd_rlsdesc->rlsCxt = rls_cxt; + rd_rlsdesc->rlsPolicies = NULL; + ListCell *lc; + foreach (lc, rel->rd_rlsdesc->rlsPolicies) { + RlsPolicy *dst_rls = (RlsPolicy *)palloc(sizeof(RlsPolicy)); + RlsPolicy *src_rls = (RlsPolicy *)lfirst(lc); + *dst_rls = *src_rls; + dst_rls->policyName = pstrdup(src_rls->policyName); + dst_rls->roles = DatumGetArrayTypePCopy(src_rls->roles); + dst_rls->usingExpr = (Expr *)copyObject(src_rls->usingExpr); + rd_rlsdesc->rlsPolicies = lappend(rd_rlsdesc->rlsPolicies, dst_rls); + } + MemoryContextSwitchTo(old); + return rd_rlsdesc; +} + +extern bytea *CopyOption(bytea *options) +{ + if (options == NULL) { + return NULL; + } + bytea *copy = (bytea *)palloc(VARSIZE(options)); + errno_t rc = memcpy_s(copy, VARSIZE(options), options, VARSIZE(options)); + /* this func called also by partcopy, which has no memcxt protect, so pfree when memcpy failed */ + securec_check(rc, (char *)copy, ""); + return copy; +} + +static Form_pg_am CopyRelationAm(Relation rel) +{ + if (rel->rd_am == NULL) { + return NULL; + } + Form_pg_am rd_am = (Form_pg_am)palloc(sizeof(FormData_pg_am)); + *rd_am = *rel->rd_am; + return rd_am; +} + +static void CopyRelationIndexAccessInfo(Relation newrel, Relation rel, MemoryContext index_cxt) +{ + if (!RelationIsIndex(rel)) { + Assert(rel->rd_indnkeyatts == 0); + Assert(rel->rd_indexcxt == NULL); + return; + } + + newrel->rd_indexcxt = index_cxt; + int indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); + int nsupport = rel->rd_am->amsupport * RelationGetNumberOfAttributes(rel); + newrel->rd_aminfo = NULL; + + newrel->rd_opfamily = (Oid*)MemoryContextAllocZero(index_cxt, indnkeyatts * sizeof(Oid)); + newrel->rd_opcintype = (Oid*)MemoryContextAllocZero(index_cxt, indnkeyatts * sizeof(Oid)); + + if (rel->rd_am->amsupport > 0) { + newrel->rd_support = (RegProcedure*)MemoryContextAllocZero(index_cxt, nsupport * sizeof(RegProcedure)); + newrel->rd_supportinfo = (FmgrInfo*)MemoryContextAllocZero(index_cxt, nsupport * sizeof(FmgrInfo)); + } else { + newrel->rd_support = NULL; + newrel->rd_supportinfo = NULL; + } + + newrel->rd_indcollation = (Oid*)MemoryContextAllocZero(index_cxt, indnkeyatts * sizeof(Oid)); + + newrel->rd_indoption = (int16*)MemoryContextAllocZero(index_cxt, indnkeyatts * sizeof(int16)); + + errno_t rc = memcpy_s(newrel->rd_opfamily, indnkeyatts * sizeof(Oid), rel->rd_opfamily, indnkeyatts * sizeof(Oid)); + securec_check(rc, "", ""); + rc = memcpy_s(newrel->rd_opcintype, indnkeyatts * sizeof(Oid), rel->rd_opcintype, indnkeyatts * sizeof(Oid)); + securec_check(rc, "", ""); + if (nsupport > 0) { + rc = memcpy_s(newrel->rd_support, nsupport * sizeof(RegProcedure), + rel->rd_support, nsupport * sizeof(RegProcedure)); + securec_check(rc, "", ""); + rc = memcpy_s(newrel->rd_supportinfo, nsupport * sizeof(FmgrInfo), + rel->rd_supportinfo, nsupport * sizeof(FmgrInfo)); + securec_check(rc, "", ""); + } + rc = memcpy_s(newrel->rd_indcollation, indnkeyatts * sizeof(Oid), rel->rd_indcollation, indnkeyatts * sizeof(Oid)); + securec_check(rc, "", ""); + rc = memcpy_s(newrel->rd_indoption, indnkeyatts * sizeof(int16), rel->rd_indoption, indnkeyatts * sizeof(int16)); + securec_check(rc, "", ""); + MemoryContext oldcxt = MemoryContextSwitchTo(index_cxt); + if (rel->rd_indexprs) { + newrel->rd_indexprs = (List *)copyObject(rel->rd_indexprs); + } + if (rel->rd_indpred) { + newrel->rd_indpred = (List *)copyObject(rel->rd_indpred); + } + (void)MemoryContextSwitchTo(oldcxt); + if (rel->rd_exclops == NULL) { + Assert(rel->rd_exclprocs == NULL); + Assert(rel->rd_exclstrats == NULL); + newrel->rd_exclops = NULL; + newrel->rd_exclprocs = NULL; + newrel->rd_exclstrats = NULL; + } else { + Assert(rel->rd_exclprocs != NULL); + Assert(rel->rd_exclstrats != NULL); + newrel->rd_exclops = (Oid*)MemoryContextAlloc(index_cxt, sizeof(Oid) * indnkeyatts); + newrel->rd_exclprocs = (Oid*)MemoryContextAlloc(index_cxt, sizeof(Oid) * indnkeyatts); + newrel->rd_exclstrats = (uint16*)MemoryContextAlloc(index_cxt, sizeof(uint16) * indnkeyatts); + + rc = memcpy_s(newrel->rd_exclops, sizeof(Oid) * indnkeyatts, rel->rd_exclops, sizeof(Oid) * indnkeyatts); + securec_check(rc, "", ""); + rc = memcpy_s(newrel->rd_exclprocs, sizeof(Oid) * indnkeyatts, rel->rd_exclprocs, sizeof(Oid) * indnkeyatts); + securec_check(rc, "", ""); + rc = memcpy_s(newrel->rd_exclstrats, sizeof(uint16) * indnkeyatts, + rel->rd_exclstrats, sizeof(uint16) * indnkeyatts); + securec_check(rc, "", ""); + } + + /* not inited by relcache */ + newrel->rd_amcache = NULL; +} + +static RelationBucketKey *CopyRelationBucketKey(Relation rel) +{ + if (rel->rd_bucketkey == NULL) { + return NULL; + } + RelationBucketKey *rd_bucketkey = (RelationBucketKey *)palloc(sizeof(RelationBucketKey)); + rd_bucketkey->bucketKey = int2vectorCopy(rel->rd_bucketkey->bucketKey); + int n_column = rel->rd_bucketkey->bucketKey->dim1; + int n_col_len = sizeof(Oid) * n_column; + rd_bucketkey->bucketKeyType = (Oid *)palloc(n_col_len); + errno_t rc = memcpy_s(rd_bucketkey->bucketKeyType, n_col_len, rel->rd_bucketkey->bucketKeyType, n_col_len); + securec_check(rc, "", ""); + return rd_bucketkey; +} + +static PartitionMap *CopyRangePartitionMap(RangePartitionMap *src_rpm) +{ + RangePartitionMap *dst_rpm = (RangePartitionMap *)palloc(sizeof(RangePartitionMap)); + + *dst_rpm = *src_rpm; + + dst_rpm->partitionKey = int2vectorCopy(src_rpm->partitionKey); + + size_t key_len = sizeof(Oid) * src_rpm->partitionKey->dim1; + dst_rpm->partitionKeyDataType = (Oid *)palloc(key_len); + errno_t rc = memcpy_s(dst_rpm->partitionKeyDataType, key_len, src_rpm->partitionKeyDataType, key_len); + securec_check(rc, "", ""); + + dst_rpm->rangeElements = + copyRangeElements(src_rpm->rangeElements, src_rpm->rangeElementsNum, src_rpm->partitionKey->dim1); + + if (src_rpm->type.type == PART_TYPE_INTERVAL) { + Assert(src_rpm->partitionKey->dim1 == 1); + dst_rpm->intervalValue = (Interval *)palloc(sizeof(Interval)); + *dst_rpm->intervalValue = *src_rpm->intervalValue; +#define OidVectorSize(n) (offsetof(oidvector, values) + (n) * sizeof(Oid)) + if (src_rpm->intervalTablespace != NULL) { + size_t interval_len = OidVectorSize(src_rpm->intervalTablespace->dim1); + dst_rpm->intervalTablespace = (oidvector *)palloc(interval_len); + rc = memcpy_s(dst_rpm->intervalTablespace, interval_len, src_rpm->intervalTablespace, interval_len); + securec_check(rc, "", ""); + } else { + Assert(dst_rpm->intervalTablespace == NULL); + } + } + return (PartitionMap *)dst_rpm; +} + +static PartitionMap *CopyPartitionMap(Relation rel) +{ + if (rel->partMap == NULL) { + return NULL; + } + switch (rel->partMap->type) { + case PART_TYPE_VALUE: { + ValuePartitionMap *dst_vpm = (ValuePartitionMap *)palloc0(sizeof(ValuePartitionMap)); + ValuePartitionMap *src_vpm = (ValuePartitionMap *)rel->partMap; + dst_vpm->type = src_vpm->type; + dst_vpm->relid = src_vpm->relid; + dst_vpm->partList = list_copy(src_vpm->partList); + return (PartitionMap *)dst_vpm; + } + case PART_TYPE_RANGE: + case PART_TYPE_INTERVAL: { + return (PartitionMap *)CopyRangePartitionMap((RangePartitionMap *)rel->partMap); + } + case PART_TYPE_LIST: { + ListPartitionMap *dst_lpm = (ListPartitionMap *)palloc(sizeof(ListPartitionMap)); + ListPartitionMap *src_lpm = (ListPartitionMap *)rel->partMap; + *dst_lpm = *src_lpm; + dst_lpm->partitionKey = int2vectorCopy(src_lpm->partitionKey); + size_t key_len = sizeof(Oid) * src_lpm->partitionKey->dim1; + dst_lpm->partitionKeyDataType = (Oid *)palloc(key_len); + errno_t rc = memcpy_s(dst_lpm->partitionKeyDataType, key_len, src_lpm->partitionKeyDataType, key_len); + securec_check(rc, "", ""); + dst_lpm->listElements = CopyListElements(src_lpm->listElements, src_lpm->listElementsNum); + return (PartitionMap *)dst_lpm; + } + case PART_TYPE_HASH: { + HashPartitionMap *dst_hpm = (HashPartitionMap *)palloc(sizeof(HashPartitionMap)); + HashPartitionMap *src_hpm = (HashPartitionMap *)rel->partMap; + *dst_hpm = *src_hpm; + dst_hpm->partitionKey = int2vectorCopy(src_hpm->partitionKey); + size_t key_len = sizeof(Oid) * src_hpm->partitionKey->dim1; + dst_hpm->partitionKeyDataType = (Oid *)palloc(key_len); + errno_t rc = memcpy_s(dst_hpm->partitionKeyDataType, key_len, src_hpm->partitionKeyDataType, key_len); + securec_check(rc, "", ""); + dst_hpm->hashElements = + CopyHashElements(src_hpm->hashElements, src_hpm->hashElementsNum, src_hpm->partitionKey->dim1); + return (PartitionMap *)dst_hpm; + } + default: + ereport(ERROR, + (errcode(ERRCODE_PARTITION_ERROR), + errmsg("Fail to copy partitionmap for partitioned table \"%u\".", RelationGetRelid(rel)), + errdetail("Incorrect partition strategy \"%c\" for partitioned table.", rel->partMap->type))); + return NULL; + } + return NULL; +} + +static RelationLocInfo *CopyRelationLocInfoWithOutBucketPtr(RelationLocInfo *srcInfo) +{ + /* locator info is used only for IS_PGXC_COORDINATOR */ + if (srcInfo == NULL || !IS_PGXC_COORDINATOR) { + return NULL; + } + RelationLocInfo *dst_info = CopyRelationLocInfo(srcInfo); + dst_info->buckets_ptr = NULL; + return dst_info; +} + +static PartitionMap *CopyRelationSliceMap(Relation rel) +{ + if (rel->sliceMap == NULL) { + return NULL; + } + return CopyRangePartitionMap((RangePartitionMap *)rel->sliceMap); +} + +static void SpecialWorkForGlobalRel(Relation rel) +{ + /* see define in rel.h */ + rel->rd_createSubid = InvalidSubTransactionId; + rel->rd_newRelfilenodeSubid = InvalidSubTransactionId; + Assert(rel->rd_isnailed ? rel->rd_refcnt == 1 : rel->rd_refcnt == 0); + /* see RelationInitLockInfo in lmgr.cpp */ + Assert(rel->rd_lockInfo.lockRelId.bktId == InvalidOid); + /* global cache never open file */ + Assert(rel->rd_smgr == NULL); + /* refcnt must be one if isnailed or zero */ + Assert(rel->rd_isnailed ? rel->rd_refcnt == 1 : rel->rd_refcnt == 0); + + Assert(!g_instance.global_sysdbcache.IsCritialForInitSysCache(rel->rd_id) || + (rel->rd_isnailed && rel->rd_refcnt == 1)); + + /* 0 invalid 1 yes 2 transaction tmp */ + Assert(rel->rd_indexvalid <= 1); + Assert((rel->rd_node.dbNode == InvalidOid && rel->rd_node.spcNode == GLOBALTABLESPACE_OID) || + (rel->rd_node.dbNode != InvalidOid && rel->rd_node.spcNode != GLOBALTABLESPACE_OID)); + Assert(rel->rd_node.spcNode != InvalidOid); + Assert(rel->rd_node.relNode != InvalidOid); + if (rel->rd_locator_info != NULL) { + Assert(IS_PGXC_COORDINATOR && rel->rd_id >= FirstNormalObjectId); + List *old_node_list = rel->rd_locator_info->nodeList; + ListCell *lc; + foreach (lc, old_node_list) { + lfirst_int(lc) = PGXCNodeGetNodeOid(lfirst_int(lc), PGXC_NODE_DATANODE); + } + } +} + +Relation CopyRelationData(Relation newrel, Relation rel, MemoryContext rules_cxt, MemoryContext rls_cxt, + MemoryContext index_cxt) +{ + /* if you add variable to relation, please check if you need put it in gsc, + * if not, set it zero when copy, and reinit it when local get the copy result + * otherwise, do the copy work here + * if the variable changed, there is no lock and no rel inval msg, + * set it zero and reinit it when copy into local */ + Assert(sizeof(RelationData) == 520); + /* all copied exclude pointer */ + *newrel = *rel; + Assert(rel->rd_createSubid == InvalidSubTransactionId); + Assert(rel->rd_newRelfilenodeSubid == InvalidSubTransactionId); + /* init all pointers to NULL, so we can free memory correctly when meeting exception */ + RelationPointerToNULL(newrel); + + newrel->rd_rel = CopyRelationRdrel(rel); + newrel->rd_att = CopyTupleDesc(rel->rd_att); + Assert(rel->rd_att->tdrefcount == 1); + newrel->rd_att->tdrefcount = 1; + Assert(rel->rd_att->tdhasoid == rel->rd_rel->relhasoids); + + newrel->rd_rulescxt = rules_cxt; + newrel->rd_rules = CopyRelationRules(rel, rules_cxt); + /* CopyTriggerDesc check null pointer */ + newrel->trigdesc = CopyTriggerDesc(rel->trigdesc); + newrel->rd_rlsdesc = CopyRelationRls(rel, rls_cxt); + + /* this is oid list, just copy list */ + newrel->rd_indexlist = list_copy(rel->rd_indexlist); + /* bms_copy check null pointer */ + newrel->rd_indexattr = bms_copy(rel->rd_indexattr); + newrel->rd_keyattr = bms_copy(rel->rd_keyattr); + newrel->rd_idattr = bms_copy(rel->rd_idattr); + newrel->rd_pkattr = bms_copy(rel->rd_pkattr); + + newrel->rd_pubactions = CopyPubActions(rel->rd_pubactions); + + newrel->rd_options = CopyOption(rel->rd_options); + + newrel->rd_indextuple = heap_copytuple(rel->rd_indextuple); + if (newrel->rd_indextuple != NULL) { + newrel->rd_index = (Form_pg_index)GETSTRUCT(newrel->rd_indextuple); + } else { + newrel->rd_index = NULL; + } + + newrel->rd_am = CopyRelationAm(rel); + + CopyRelationIndexAccessInfo(newrel, rel, index_cxt); + + /* not inited by relcache */ + newrel->rd_fdwroutine = NULL; + + newrel->rd_bucketkey = CopyRelationBucketKey(rel); + + newrel->partMap = (PartitionMap *)CopyPartitionMap(rel); + + newrel->pgstat_info = NULL; + + newrel->rd_locator_info = CopyRelationLocInfoWithOutBucketPtr(rel->rd_locator_info); + + Assert(rel->sliceMap == NULL || IsLocatorDistributedBySlice(rel->rd_locator_info->locatorType)); + newrel->sliceMap = CopyRelationSliceMap(rel); + + newrel->entry = NULL; + return newrel; +} + +void GlobalTabDefCache::Insert(Relation rel, uint32 hash_value) +{ + Index hash_index = HASH_INDEX(hash_value, (uint32)m_nbuckets); + /* dllist is too long, swapout some */ + if (m_bucket_list.GetBucket(hash_index)->dll_len >= MAX_GSC_LIST_LENGTH) { + GlobalBaseDefCache::RemoveTailElements(hash_index); + /* maybe no element can be swappedout */ + return; + } + Assert((m_is_shared && g_instance.global_sysdbcache.HashSearchSharedRelation(rel->rd_id)) || + ((!m_is_shared && !g_instance.global_sysdbcache.HashSearchSharedRelation(rel->rd_id)))); + + Assert(rel->rd_bucketkey != (RelationBucketKey *)&(rel->rd_bucketkey)); + + GlobalRelationEntry *entry = CreateEntry(rel); + PthreadRWlockWrlock(LOCAL_SYSDB_RESOWNER, &m_obj_locks[hash_index]); + bool found = GlobalBaseDefCache::EntryExist(rel->rd_id, hash_index); + if (found) { + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_obj_locks[hash_index]); + entry->Free(entry); + return; + } + + GlobalBaseDefCache::AddHeadToBucket(hash_index, entry); + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, &m_obj_locks[hash_index]); + pg_atomic_fetch_add_u64(m_newloads, 1); +} + +/* write lock need */ +GlobalRelationEntry *GlobalTabDefCache::CreateEntry(Relation rel) +{ + ResourceOwner owner = LOCAL_SYSDB_RESOWNER; + ResourceOwnerEnlargeGlobalBaseEntry(owner); + MemoryContext old = MemoryContextSwitchTo(m_db_entry->GetRandomMemCxt()); + GlobalRelationEntry *entry = (GlobalRelationEntry *)palloc(sizeof(GlobalRelationEntry)); + entry->type = GLOBAL_RELATION_ENTRY; + entry->rel_mem_manager = NULL; + entry->oid = rel->rd_id; + entry->refcount = 0; + DLInitElem(&entry->cache_elem, (void *)entry); + ResourceOwnerRememberGlobalBaseEntry(owner, (GlobalBaseEntry *)entry); + entry->rel_mem_manager = + AllocSetContextCreate(CurrentMemoryContext, RelationGetRelationName(rel), ALLOCSET_SMALL_MINSIZE, + ALLOCSET_SMALL_INITSIZE, ALLOCSET_SMALL_MAXSIZE, SHARED_CONTEXT); + (void)MemoryContextSwitchTo(entry->rel_mem_manager); + entry->rel = (Relation)palloc0(sizeof(RelationData)); + CopyRelationData(entry->rel, rel, entry->rel_mem_manager, entry->rel_mem_manager, entry->rel_mem_manager); + SpecialWorkForGlobalRel(entry->rel); + ResourceOwnerForgetGlobalBaseEntry(owner, (GlobalBaseEntry *)entry); + MemoryContextSwitchTo(old); + + return entry; +} + +void GlobalTabDefCache::Init() +{ + MemoryContext old = MemoryContextSwitchTo(m_db_entry->GetRandomMemCxt()); + GlobalBaseDefCache::Init(GLOBAL_INIT_RELCACHE_SIZE); + m_catalog_lock = (pthread_mutex_t *)palloc(sizeof(pthread_mutex_t)); + pthread_mutex_init(m_catalog_lock, NULL); + MemoryContextSwitchTo(old); + m_is_inited = true; +} + +TupleDesc GlobalTabDefCache::GetPgClassDescriptor() +{ + ResourceOwner owner = LOCAL_SYSDB_RESOWNER; + PthreadMutexLock(owner, m_catalog_lock, true); + if (m_pgclassdesc != NULL) { + PthreadMutexUnlock(owner, m_catalog_lock, true); + return m_pgclassdesc; + } + MemoryContext old = MemoryContextSwitchTo(m_db_entry->GetRandomMemCxt()); + TupleDesc tmp = BuildHardcodedDescriptor(Natts_pg_class, Desc_pg_class, true); + PthreadMutexUnlock(owner, m_catalog_lock, true); + m_pgclassdesc = tmp; + MemoryContextSwitchTo(old); + return m_pgclassdesc; +} + +TupleDesc GlobalTabDefCache::GetPgIndexDescriptor() +{ + ResourceOwner owner = LOCAL_SYSDB_RESOWNER; + PthreadMutexLock(owner, m_catalog_lock, true); + if (m_pgindexdesc != NULL) { + PthreadMutexUnlock(owner, m_catalog_lock, true); + return m_pgindexdesc; + } + MemoryContext old = MemoryContextSwitchTo(m_db_entry->GetRandomMemCxt()); + TupleDesc tmp = BuildHardcodedDescriptor(Natts_pg_index, Desc_pg_index, false); + PthreadMutexUnlock(owner, m_catalog_lock, true); + m_pgindexdesc = tmp; + MemoryContextSwitchTo(old); + return m_pgindexdesc; +} + +GlobalTabDefCache::GlobalTabDefCache(Oid db_id, bool is_shared, struct GlobalSysDBCacheEntry *entry) + : GlobalBaseDefCache(db_id, is_shared, entry, RELKIND_RELATION) +{ + m_pgclassdesc = NULL; + m_pgindexdesc = NULL; + m_catalog_lock = NULL; + m_is_inited = false; +} + +List *GlobalTabDefCache::GetTableStats(Oid rel_oid) +{ + List *table_stat_list = NIL; + if (!m_is_inited) { + return table_stat_list; + } + + /* Remove each tuple in this cache */ + for (int hash_index = 0; hash_index < m_nbuckets; hash_index++) { + if (m_bucket_list.GetBucket(hash_index)->dll_len == 0) { + continue; + } + pthread_rwlock_t *obj_lock = &m_obj_locks[hash_index]; + PthreadRWlockRdlock(LOCAL_SYSDB_RESOWNER, obj_lock); + for (Dlelem *elt = DLGetHead(m_bucket_list.GetBucket(hash_index)); elt;) { + GlobalRelationEntry *entry = (GlobalRelationEntry *)DLE_VAL(elt); + elt = DLGetSucc(elt); + if (rel_oid != ALL_REL_OID && entry->rel->rd_id != rel_oid) { + continue; + } + GlobalCatalogTableStat *table_stat = (GlobalCatalogTableStat*)palloc(sizeof(GlobalCatalogTableStat)); + table_stat->db_id = m_db_oid; + table_stat->db_name = CStringGetTextDatum(m_db_entry->m_dbName); + table_stat->rel_id = entry->rel->rd_id; + table_stat->rd_rel = CopyRelationRdrel(entry->rel); + table_stat->rd_att = CopyTupleDesc(entry->rel->rd_att); + table_stat_list = lappend(table_stat_list, table_stat); + } + PthreadRWlockUnlock(LOCAL_SYSDB_RESOWNER, obj_lock); + } + return table_stat_list; +} + +template void GlobalTabDefCache::ResetRelCaches(); +template void GlobalTabDefCache::ResetRelCaches(); \ No newline at end of file diff --git a/src/common/backend/utils/cache/knl_localbasedefcache.cpp b/src/common/backend/utils/cache/knl_localbasedefcache.cpp new file mode 100644 index 000000000..9d1635d4d --- /dev/null +++ b/src/common/backend/utils/cache/knl_localbasedefcache.cpp @@ -0,0 +1,112 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ + +#include "utils/knl_localbasedefcache.h" +#include "knl/knl_instance.h" +#include "utils/knl_relcache.h" +#include "utils/knl_catcache.h" +#include "utils/knl_partcache.h" +#include "utils/relmapper.h" +#include "postmaster/autovacuum.h" +#include "pgxc/bucketmap.h" + +LocalBaseEntry *LocalBaseDefCache::SearchEntryFromLocal(Oid oid, Index hash_index) +{ + for (Dlelem *elt = DLGetHead(m_bucket_list.GetBucket(hash_index)); elt != NULL; elt = DLGetSucc(elt)) { + LocalBaseEntry *entry = (LocalBaseEntry *)DLE_VAL(elt); + if (unlikely(entry->oid != oid)) { + continue; + } + DLMoveToFront(&entry->cache_elem); + return entry; + } + return NULL; +} + +void LocalBaseDefCache::CreateDefBucket(size_t size) +{ + invalid_entries.Init(); + m_nbuckets = ResizeHashBucket(size, g_instance.global_sysdbcache.dynamic_hash_bucket_strategy); + m_bucket_list.Init(m_nbuckets); +} + +LocalBaseEntry *LocalBaseDefCache::CreateEntry(Index hash_index, size_t size) +{ + MemoryContext old = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); + LocalBaseEntry *entry = (LocalBaseEntry *)palloc(size); + MemoryContextSwitchTo(old); + DLInitElem(&entry->cache_elem, (void *)entry); + m_bucket_list.AddHeadToBucket(hash_index, &entry->cache_elem); + return entry; +} +template +void LocalBaseDefCache::RemoveTailDefElements() +{ + Index tail_index = m_bucket_list.GetTailBucketIndex(); + if (unlikely(tail_index == INVALID_INDEX)) { + return; + } + + int swapout_count_once = 0; + int max_swapout_count_once = m_bucket_list.GetBucket(tail_index)->dll_len >> 1; + for (Dlelem *elt = DLGetTail(m_bucket_list.GetBucket(tail_index)); elt != NULL;) { + Dlelem *tmp = elt; + elt = DLGetPred(elt); + if (is_relation) { + LocalRelationEntry *entry = (LocalRelationEntry *)DLE_VAL(tmp); + /* dont remove rel created by current transaction or temp table */ + if (RelationHasReferenceCountZero(entry->rel) && entry->rel->rd_createSubid == InvalidSubTransactionId && + entry->rel->rd_newRelfilenodeSubid == InvalidSubTransactionId) { + Assert(!entry->rel->rd_isnailed); + Assert(entry->rel->entry == entry); + /* clear call remove relation */ + RelationClearRelation(entry->rel, false); + swapout_count_once++; + } else { + DLMoveToFront(&entry->cache_elem); + /* lsc do lru strategy, so the front of this entry are all refered probably, just break */ + break; + } + } else { + LocalPartitionEntry *entry = (LocalPartitionEntry *)DLE_VAL(tmp); + /* dont remove part created by current transaction or temp table */ + if (PartitionHasReferenceCountZero(entry->part) && + entry->part->pd_newRelfilenodeSubid == InvalidSubTransactionId && + entry->part->pd_createSubid == InvalidSubTransactionId) { + Assert(entry->part->entry == entry); + /* clear call remove partation */ + PartitionClearPartition(entry->part, false); + swapout_count_once++; + } else { + DLMoveToFront(&entry->cache_elem); + /* lsc do lru strategy, so the front of this entry are all refered probably, just break */ + break; + } + } + + /* keep elements as many as possible */ + if (swapout_count_once == max_swapout_count_once) { + break; + } + } + + if (!DLIsNIL(m_bucket_list.GetBucket(tail_index))) { + m_bucket_list.MoveBucketToHead(tail_index); + } +} + +template void LocalBaseDefCache::RemoveTailDefElements(); +template void LocalBaseDefCache::RemoveTailDefElements(); diff --git a/src/common/backend/utils/cache/knl_localpartdefcache.cpp b/src/common/backend/utils/cache/knl_localpartdefcache.cpp new file mode 100644 index 000000000..a54bd21a3 --- /dev/null +++ b/src/common/backend/utils/cache/knl_localpartdefcache.cpp @@ -0,0 +1,459 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ + +#include "access/xact.h" +#include "knl/knl_instance.h" +#include "storage/smgr/smgr.h" +#include "utils/knl_catcache.h" +#include "utils/knl_localtabdefcache.h" +#include "utils/knl_partcache.h" +#include "utils/memutils.h" +#include "utils/rel_gs.h" +#include "utils/resowner.h" +#include "utils/snapmgr.h" + +static bool IsPartitionStoreInglobal(Partition part); +LocalPartDefCache::LocalPartDefCache() +{ + ResetInitFlag(); +} + +Partition LocalPartDefCache::SearchPartitionFromLocal(Oid part_oid) +{ + if (unlikely(!m_is_inited)) { + return NULL; + } + uint32 hash_value = oid_hash((void *)&(part_oid), sizeof(Oid)); + Index hash_index = HASH_INDEX(hash_value, (uint32)m_nbuckets); + LocalPartitionEntry *entry = (LocalPartitionEntry *)LocalBaseDefCache::SearchEntryFromLocal(part_oid, hash_index); + if (unlikely(entry == NULL)) { + return NULL; + } + return entry->part; +} + +static inline void SpecialWorkForLocalPart(Partition part) +{ + if (part->pd_part->parentid != InvalidOid || part->pd_part->relfilenode != InvalidOid) { + PartitionInitPhysicalAddr(part); + } +} + +void LocalPartDefCache::CopyLocalPartition(Partition dest, Partition src) +{ + CopyPartitionData(dest, src); + SpecialWorkForLocalPart(dest); +} + +template +Partition LocalPartDefCache::SearchPartitionFromGlobalCopy(Oid part_oid) +{ + if (unlikely(!m_is_inited)) { + return NULL; + } + if (invalid_entries.ExistDefValue(part_oid)) { + return NULL; + } + if (HistoricSnapshotActive()) { + return NULL; + } + + if (!g_instance.global_sysdbcache.hot_standby) { + return NULL; + } + if (unlikely(!g_instance.global_sysdbcache.recovery_finished)) { + return NULL; + } + uint32 hash_value = oid_hash((void *)&(part_oid), sizeof(Oid)); + ResourceOwnerEnlargeGlobalBaseEntry(LOCAL_SYSDB_RESOWNER); + GlobalPartitionEntry *global = (GlobalPartitionEntry *)m_global_partdefcache->SearchReadOnly(part_oid, hash_value); + if (global == NULL) { + return NULL; + } + ResourceOwnerRememberGlobalBaseEntry(LOCAL_SYSDB_RESOWNER, global); + + MemoryContext old = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); + Partition copy = (Partition)palloc(sizeof(PartitionData)); + CopyLocalPartition(copy, global->part); + MemoryContextSwitchTo(old); + + ResourceOwnerForgetGlobalBaseEntry(LOCAL_SYSDB_RESOWNER, global); + global->Release(); + if (insert_into_local) { + Index hash_index = HASH_INDEX(hash_value, (uint32)m_nbuckets); + RemovePartitionByOid(part_oid, hash_index); + CreateLocalPartEntry(copy, hash_index); + } + return copy; +} + +template Partition LocalPartDefCache::SearchPartitionFromGlobalCopy(Oid part_id); +template Partition LocalPartDefCache::SearchPartitionFromGlobalCopy(Oid part_id); + +Partition LocalPartDefCache::SearchPartition(Oid part_oid) +{ + if (unlikely(!m_is_inited)) { + return NULL; + } + Partition local = SearchPartitionFromLocal(part_oid); + if (likely(local != NULL || IsBootstrapProcessingMode())) { + return local; + } + return SearchPartitionFromGlobalCopy(part_oid); +} + +void LocalPartDefCache::RemovePartition(Partition part) +{ + m_bucket_list.RemoveElemFromBucket(&part->entry->cache_elem); + pfree_ext(part->entry); +} + +void LocalPartDefCache::CreateLocalPartEntry(Partition part, Index hash_index) +{ + if (t_thrd.lsc_cxt.lsc->LocalSysDBCacheNeedSwapOut()) { + LocalBaseDefCache::RemoveTailDefElements(); + } + LocalPartitionEntry *entry = + (LocalPartitionEntry *)LocalBaseDefCache::CreateEntry(hash_index, sizeof(LocalPartitionEntry)); + entry->part = part; + entry->oid = part->pd_id; + entry->obj_is_nailed = false; + part->entry = entry; +} + +void LocalPartDefCache::InsertPartitionIntoLocal(Partition part) +{ + uint32 hash_value = oid_hash((void *)&(part->pd_id), sizeof(Oid)); + Index hash_index = HASH_INDEX(hash_value, (uint32)m_nbuckets); + CreateLocalPartEntry(part, hash_index); +} + +static bool IsPartOidStoreInGlobal(Oid part_oid) +{ + if (unlikely(IsBootstrapProcessingMode())) { + return false; + } + if (unlikely(t_thrd.lsc_cxt.lsc->GetThreadDefExclusive())) { + return false; + } + + if (unlikely(t_thrd.lsc_cxt.lsc->partdefcache.invalid_entries.ExistDefValue(part_oid))) { + return false; + } + + if (unlikely(u_sess->attr.attr_common.IsInplaceUpgrade)) { + return false; + } + + if (HistoricSnapshotActive()) { + return false; + } + + if (!g_instance.global_sysdbcache.hot_standby) { + return false; + } + if (unlikely(!g_instance.global_sysdbcache.recovery_finished)) { + return false; + } + if (g_instance.global_sysdbcache.StopInsertGSC()) { + return false; + } + return true; +} + +static bool IsPartitionStoreInglobal(Partition part) +{ + Assert(part->pd_createSubid == InvalidSubTransactionId); + Assert(part->pd_newRelfilenodeSubid == InvalidSubTransactionId); + Assert(part->pd_isvalid); + + return true; +} + +void LocalPartDefCache::InsertPartitionIntoGlobal(Partition part, uint32 hash_value) +{ + if (!IsPartitionStoreInglobal(part)) { + return; + } + /* when insert, we must make sure m_global_shared_tabdefcache is inited at least */ + Assert(m_global_partdefcache != NULL); + m_global_partdefcache->Insert(part, hash_value); +} + +Partition LocalPartDefCache::RemovePartitionByOid(Oid part_oid, Index hash_index) +{ + LocalPartitionEntry *entry = (LocalPartitionEntry *)LocalBaseDefCache::SearchEntryFromLocal(part_oid, hash_index); + if (entry == NULL) { + return NULL; + } + Partition old_part = entry->part; + RemovePartition(entry->part); + return old_part; +} + +void LocalPartDefCache::Init() +{ + if (m_is_inited) { + return; + } + m_global_partdefcache = t_thrd.lsc_cxt.lsc->GetGlobalPartDefCache(); + m_db_id = t_thrd.lsc_cxt.lsc->my_database_id; + part_cache_need_eoxact_work = false; + m_is_inited = true; +} + +void LocalPartDefCache::InvalidateGlobalPartition(Oid db_id, Oid part_oid, bool is_commit) +{ + if (!is_commit) { + invalid_entries.InsertInvalidDefValue(part_oid); + return; + } + Assert(db_id != InvalidOid); + Assert(CheckMyDatabaseMatch()); + if (m_global_partdefcache == NULL) { + Assert(!m_is_inited); + GlobalSysDBCacheEntry *entry = g_instance.global_sysdbcache.FindTempGSCEntry(db_id); + if (entry == NULL) { + return; + } + entry->m_partdefCache->Invalidate(db_id, part_oid); + g_instance.global_sysdbcache.ReleaseTempGSCEntry(entry); + } else { + Assert(db_id == t_thrd.lsc_cxt.lsc->my_database_id); + m_global_partdefcache->Invalidate(db_id, part_oid); + } +} + +void LocalPartDefCache::InvalidateAll(void) +{ + if (unlikely(!m_is_inited)) { + return; + } + List *rebuildList = NIL; + Dlelem *bucket_elt; + forloopactivebucketlist(bucket_elt, m_bucket_list.GetActiveBucketList()) { + Dlelem *elt; + forloopbucket(elt, bucket_elt) { + LocalPartitionEntry *entry = (LocalPartitionEntry *)DLE_VAL(elt); + elt = DLGetSucc(elt); + Partition part = entry->part; + Assert(part->entry == entry); + + /* Must close all smgr references to avoid leaving dangling ptrs */ + PartitionCloseSmgr(part); + + /* Ignore new relations, since they are never cross-backend targets */ + if (part->pd_createSubid != InvalidSubTransactionId) + continue; + + if (PartitionHasReferenceCountZero(part)) { + /* Delete this entry immediately */ + PartitionClearPartition(part, false); + } else { + rebuildList = lappend(rebuildList, part); + } + } + } + + /* + * Now zap any remaining smgr cache entries. This must happen before we + * start to rebuild entries, since that may involve catalog fetches which + * will re-open catalog files. + */ + smgrcloseall(); + + ListCell *l = NULL; + /* Phase 2: rebuild the items found to need rebuild in phase 1 */ + foreach (l, rebuildList) { + Partition part = (Partition)lfirst(l); + PartitionClearPartition(part, true); + } + list_free_ext(rebuildList); +} + +void LocalPartDefCache::AtEOXact_PartitionCache(bool isCommit) +{ + invalid_entries.ResetInitFlag(); + /* + * To speed up transaction exit, we want to avoid scanning the partitioncache + * unless there is actually something for this routine to do. Other than + * the debug-only Assert checks, most transactions don't create any work + * for us to do here, so we keep a static flag that gets set if there is + * anything to do. (Currently, this means either a partition is created in + * the current xact, or one is given a new relfilenode, or an index list + * is forced.) For simplicity, the flag remains set till end of top-level + * transaction, even though we could clear it at subtransaction end in + * some cases. + */ + if (!part_cache_need_eoxact_work +#ifdef USE_ASSERT_CHECKING + && !assert_enabled +#endif + ) { + return; + } + + Dlelem *bucket_elt; + forloopactivebucketlist(bucket_elt, m_bucket_list.GetActiveBucketList()) { + Dlelem *elt; + forloopbucket(elt, bucket_elt) { + LocalPartitionEntry *entry = (LocalPartitionEntry *)DLE_VAL(elt); + elt = DLGetSucc(elt); + Partition part = entry->part; + + /* + * The relcache entry's ref count should be back to its normal + * not-in-a-transaction state: 0 unless it's nailed in cache. + * + * In bootstrap mode, this is NOT true, so don't check it --- the + * bootstrap code expects relations to stay open across start/commit + * transaction calls. (That seems bogus, but it's not worth fixing.) + */ +#ifdef USE_ASSERT_CHECKING + if (!IsBootstrapProcessingMode()) { + const int expected_refcnt = 0; + Assert(part->pd_refcnt == expected_refcnt); + } +#endif + + /* + * Is it a partition created in the current transaction? + * + * During commit, reset the flag to zero, since we are now out of the + * creating transaction. During abort, simply delete the relcache + * entry --- it isn't interesting any longer. (NOTE: if we have + * forgotten the new-ness of a new relation due to a forced cache + * flush, the entry will get deleted anyway by shared-cache-inval + * processing of the aborted pg_class insertion.) + */ + if (part->pd_createSubid != InvalidSubTransactionId) { + if (isCommit) { + part->pd_createSubid = InvalidSubTransactionId; + } else { + PartitionClearPartition(part, false); + continue; + } + } + + /* + * Likewise, reset the hint about the relfilenode being new. + */ + part->pd_newRelfilenodeSubid = InvalidSubTransactionId; + } + } + + /* Once done with the transaction, we can reset need_eoxact_work */ + part_cache_need_eoxact_work = false; +} + +void LocalPartDefCache::AtEOSubXact_PartitionCache(bool isCommit, SubTransactionId mySubid, + SubTransactionId parentSubid) +{ + /* + * Skip the relcache scan if nothing to do --- see notes for + * AtEOXact_PartitionCache. + */ + if (!part_cache_need_eoxact_work) + return; + + Dlelem *bucket_elt; + forloopactivebucketlist(bucket_elt, m_bucket_list.GetActiveBucketList()) { + Dlelem *elt; + forloopbucket(elt, bucket_elt) { + LocalPartitionEntry *entry = (LocalPartitionEntry *)DLE_VAL(elt); + elt = DLGetSucc(elt); + Partition part = entry->part; + + /* + * Is it a partition created in the current subtransaction? + * + * During subcommit, mark it as belonging to the parent, instead. + * During subabort, simply delete the partition entry. + */ + if (part->pd_createSubid == mySubid) { + if (isCommit) + part->pd_createSubid = parentSubid; + else { + PartitionClearPartition(part, false); + continue; + } + } + + /* + * Likewise, update or drop any new-relfilenode-in-subtransaction + * hint. + */ + if (part->pd_newRelfilenodeSubid == mySubid) { + if (isCommit) + part->pd_newRelfilenodeSubid = parentSubid; + else + part->pd_newRelfilenodeSubid = InvalidSubTransactionId; + } + } + } +} + +Partition LocalPartDefCache::PartitionIdGetPartition(Oid part_oid, StorageType storage_type) +{ + Partition pd; + /* + * first try to find reldesc in the cache + */ + pd = SearchPartition(part_oid); + if (PartitionIsValid(pd)) { + PartitionIncrementReferenceCount(pd); + /* revalidate cache entry if necessary */ + if (!pd->pd_isvalid) { + /* + * Indexes only have a limited number of possible schema changes, + * and we don't want to use the full-blown procedure because it's + * a headache for indexes that reload itself depends on. + */ + if (pd->pd_part->parttype == PART_OBJ_TYPE_INDEX_PARTITION) { + PartitionReloadIndexInfo(pd); + } else { + PartitionClearPartition(pd, true); + } + } + return pd; + } + + /* + * no partdesc in the cache, so have PartitionBuildDesc() build one and add + * it. + */ + bool is_oid_store_in_global = IsPartOidStoreInGlobal(part_oid); + bool has_concurrent_lock = is_oid_store_in_global; + uint32 hash_value = oid_hash((void *)&(part_oid), sizeof(Oid)); + pthread_rwlock_t *oid_lock = NULL; + if (has_concurrent_lock) { + oid_lock = m_global_partdefcache->GetHashValueLock(hash_value); + AcquireGSCTableReadLock(&has_concurrent_lock, oid_lock); + } + pd = PartitionBuildDesc(part_oid, storage_type, true); + if (PartitionIsValid(pd) && is_oid_store_in_global && has_concurrent_lock) { + InsertPartitionIntoGlobal(pd, hash_value); + } + if (has_concurrent_lock) { + ReleaseGSCTableReadLock(&has_concurrent_lock, oid_lock); + } + + if (PartitionIsValid(pd)) { + PartitionIncrementReferenceCount(pd); + } + + return pd; +} \ No newline at end of file diff --git a/src/common/backend/utils/cache/knl_localsysdbcache.cpp b/src/common/backend/utils/cache/knl_localsysdbcache.cpp new file mode 100644 index 000000000..aa58fdc8a --- /dev/null +++ b/src/common/backend/utils/cache/knl_localsysdbcache.cpp @@ -0,0 +1,1072 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ + +#include "utils/knl_localsysdbcache.h" +#include "utils/knl_globalsysdbcache.h" +#include "utils/resowner.h" +#include "knl/knl_instance.h" +#include "executor/executor.h" +#include "utils/postinit.h" +#include "storage/sinvaladt.h" +#include "utils/hashutils.h" +#include "utils/knl_relcache.h" +#include "utils/acl.h" +#include "utils/spccache.h" +#include "commands/dbcommands.h" +#include "tsearch/ts_cache.h" +#include "optimizer/predtest.h" +#include "utils/attoptcache.h" +#include "parser/parse_oper.h" +#include "utils/typcache.h" +#include "utils/relfilenodemap.h" +#include "postmaster/bgworker.h" +#include "storage/lmgr.h" +#ifdef USE_ASSERT_CHECKING +class LSCCloseCheck { +public: + LSCCloseCheck() + { + m_lsc_closed = true; + } + ~LSCCloseCheck() + { + Assert(!EnableGlobalSysCache() || m_lsc_closed || g_instance.distribute_test_param_instance->elevel == PANIC); + } + void setCloseFlag(bool value) + { + m_lsc_closed = value; + } +private: + bool m_lsc_closed; +}; +thread_local LSCCloseCheck lsc_close_check = LSCCloseCheck(); +#endif + +void ReLoadLSCWhenWaitMission() +{ + if (!EnableLocalSysCache()) { + return; + } + LocalSysDBCache *lsc = t_thrd.lsc_cxt.lsc; + if (unlikely(lsc == NULL)) { + return; + } + if (lsc->GetMyGlobalDBEntry() != NULL && lsc->GetMyGlobalDBEntry()->m_isDead) { + t_thrd.proc_cxt.PostInit->InitLoadLocalSysCache(u_sess->proc_cxt.MyDatabaseId, + u_sess->proc_cxt.MyProcPort->database_name); + } + g_instance.global_sysdbcache.GSCMemThresholdCheck(); +} + +void RememberRelSonMemCxtSpace(Relation rel) +{ + if (rel->rd_rulescxt != NULL) { + t_thrd.lsc_cxt.lsc->rel_index_rule_space += ((AllocSet)rel->rd_rulescxt)->totalSpace; + } + if (rel->rd_indexcxt != NULL) { + t_thrd.lsc_cxt.lsc->rel_index_rule_space += ((AllocSet)rel->rd_indexcxt)->totalSpace; + } +} +void ForgetRelSonMemCxtSpace(Relation rel) +{ + if (rel->rd_rulescxt != NULL) { + t_thrd.lsc_cxt.lsc->rel_index_rule_space -= ((AllocSet)rel->rd_rulescxt)->totalSpace; + } + if (rel->rd_indexcxt != NULL) { + t_thrd.lsc_cxt.lsc->rel_index_rule_space -= ((AllocSet)rel->rd_indexcxt)->totalSpace; + } + if (t_thrd.lsc_cxt.lsc->rel_index_rule_space < 0) { + t_thrd.lsc_cxt.lsc->rel_index_rule_space = 0; + } +} + +/* call all access dbid after initsession */ +bool CheckMyDatabaseMatch() +{ + if (EnableLocalSysCache()) { + return u_sess->proc_cxt.MyDatabaseId == InvalidOid || + u_sess->proc_cxt.MyDatabaseId == t_thrd.lsc_cxt.lsc->my_database_id; + } else { + return true; + } +} +char *GetMyDatabasePath() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->my_database_path; + } else { + return u_sess->proc_cxt.DatabasePath; + } +} +Oid GetMyDatabaseId() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->my_database_id; + } else { + return u_sess->proc_cxt.MyDatabaseId; + } +} +Oid GetMyDatabaseTableSpace() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->my_database_tablespace; + } else { + return u_sess->proc_cxt.MyDatabaseTableSpace; + } +} + +bool IsGotPoolReload() +{ + if (EnableLocalSysCache()) { + return u_sess->sig_cxt.got_pool_reload || t_thrd.lsc_cxt.lsc->got_pool_reload; + } else { + return u_sess->sig_cxt.got_pool_reload; + } +} +void ResetGotPoolReload(bool value) +{ + if (EnableLocalSysCache()) { + u_sess->sig_cxt.got_pool_reload = value; + t_thrd.lsc_cxt.lsc->got_pool_reload = value; + } else { + u_sess->sig_cxt.got_pool_reload = value; + } +} + +bool DeepthInAcceptInvalidationMessageNotZero() +{ + if (EnableLocalSysCache()) { + return u_sess->inval_cxt.DeepthInAcceptInvalidationMessage > 0 || + t_thrd.lsc_cxt.lsc->inval_cxt.DeepthInAcceptInvalidationMessage > 0; + } else { + return u_sess->inval_cxt.DeepthInAcceptInvalidationMessage > 0; + } +} +void ResetDeepthInAcceptInvalidationMessage(int value) +{ + if (EnableLocalSysCache()) { + u_sess->inval_cxt.DeepthInAcceptInvalidationMessage = value; + t_thrd.lsc_cxt.lsc->inval_cxt.DeepthInAcceptInvalidationMessage = value; + } else { + u_sess->inval_cxt.DeepthInAcceptInvalidationMessage = value; + } +} + +static bool SwitchToSessionSysCache() +{ + if ( +#ifdef ENABLE_MULTIPLE_NODES + /* ts code dont use gsc */ + t_thrd.role != TS_COMPACTION && + t_thrd.role != TS_COMPACTION_CONSUMER && + t_thrd.role != TS_COMPACTION_AUXILIAY +#else + true +#endif + ) { + return false; + } + return true; +} + +/* after call close, you should never use syscache before rebuild it */ +void CloseLocalSysDBCache() +{ + if (!EnableLocalSysCache()) { + closeAllVfds(); + } + if (t_thrd.lsc_cxt.lsc == NULL) { + return; + } + t_thrd.lsc_cxt.lsc->CloseLocalSysDBCache(); +} + +static inline HeapTuple GetTupleFromLscCatList(CatCList *cl, int index) +{ + Assert(EnableLocalSysCache()); + return &(((GlobalCatCTup **)cl->systups)[index]->tuple); +} + +static inline HeapTuple GetTupleFromSessCatList(CatCList *cl, int index) +{ + Assert(!EnableLocalSysCache()); + return &(cl->systups[index]->tuple); +} + +void CreateLocalSysDBCache() +{ + /* every thread should call this func once */ + if (!EnableGlobalSysCache()) { + t_thrd.lsc_cxt.enable_lsc = false; + t_thrd.lsc_cxt.FetchTupleFromCatCList = GetTupleFromSessCatList; + return; + } + + Assert(t_thrd.lsc_cxt.lsc == NULL); + t_thrd.lsc_cxt.lsc = New(THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_DEFAULT))LocalSysDBCache(); + /* use this object to invalid gsc, only work with timeseries worker */ + t_thrd.lsc_cxt.lsc->CreateDBObject(); + t_thrd.lsc_cxt.FetchTupleFromCatCList = GetTupleFromLscCatList; + t_thrd.lsc_cxt.lsc->recovery_finished = g_instance.global_sysdbcache.recovery_finished; + t_thrd.lsc_cxt.enable_lsc = !SwitchToSessionSysCache(); + if (!t_thrd.lsc_cxt.enable_lsc) { + t_thrd.lsc_cxt.FetchTupleFromCatCList = GetTupleFromSessCatList; + t_thrd.lsc_cxt.lsc->is_closed = true; +#ifdef USE_ASSERT_CHECKING + lsc_close_check.setCloseFlag(true); +#endif + } else { +#ifdef USE_ASSERT_CHECKING + lsc_close_check.setCloseFlag(false); +#endif + } +} +static void ReleaseBadPtrList(bool isCommit); +static void ThreadNodeGroupCallback(Datum arg, int cacheid, uint32 hashvalue) +{ + RelationCacheInvalidateBuckets(); +} + +bool EnableGlobalSysCache() +{ + return g_instance.attr.attr_common.enable_global_syscache; +} + +MemoryContext LocalSharedCacheMemCxt() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->lsc_share_memcxt; + } else { + return u_sess->cache_mem_cxt; + } +} + +MemoryContext LocalMyDBCacheMemCxt() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->lsc_mydb_memcxt; + } else { + return u_sess->cache_mem_cxt; + } +} + +extern MemoryContext LocalGBucketMapMemCxt() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->lsc_mydb_memcxt; + } else { + return SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_EXECUTOR); + } +} + +MemoryContext LocalSmgrStorageMemoryCxt() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->lsc_mydb_memcxt; + } else { + return SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE); + } +} + +struct HTAB *GetTypeCacheHash() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->TypeCacheHash; + } else { + return u_sess->tycache_cxt.TypeCacheHash; + } +} + +knl_u_inval_context *GetInvalCxt() +{ + if (EnableLocalSysCache()) { + return &t_thrd.lsc_cxt.lsc->inval_cxt; + } else { + return &u_sess->inval_cxt; + } +} + +void UnRegisterSysCacheCallBack(knl_u_inval_context *inval_cxt, int cacheid, SyscacheCallbackFunction func) +{ + for (int i = 0; i < inval_cxt->syscache_callback_count; i++) { + if (inval_cxt->syscache_callback_list[i].id != cacheid || + inval_cxt->syscache_callback_list[i].function != func) { + continue; + } + for (; i < inval_cxt->syscache_callback_count - 1; i++) { + inval_cxt->syscache_callback_list[i].id = inval_cxt->syscache_callback_list[i + 1].id; + inval_cxt->syscache_callback_list[i].function = inval_cxt->syscache_callback_list[i + 1].function; + inval_cxt->syscache_callback_list[i].arg = inval_cxt->syscache_callback_list[i + 1].arg; + } + --inval_cxt->syscache_callback_count; + break; + } +} + +void UnRegisterRelCacheCallBack(knl_u_inval_context *inval_cxt, RelcacheCallbackFunction func) +{ + for (int i = 0; i < inval_cxt->relcache_callback_count; i++) { + if (inval_cxt->relcache_callback_list[i].function != func) { + continue; + } + for (; i < inval_cxt->relcache_callback_count - 1; i++) { + inval_cxt->relcache_callback_list[i].function = inval_cxt->relcache_callback_list[i + 1].function; + inval_cxt->relcache_callback_list[i].arg = inval_cxt->relcache_callback_list[i + 1].arg; + } + --inval_cxt->relcache_callback_count; + break; + } +} + +void UnRegisterPartCacheCallBack(knl_u_inval_context *inval_cxt, PartcacheCallbackFunction func) +{ + for (int i = 0; i < inval_cxt->partcache_callback_count; i++) { + if (inval_cxt->partcache_callback_list[i].function != func) { + continue; + } + for (; i < inval_cxt->partcache_callback_count - 1; i++) { + inval_cxt->partcache_callback_list[i].function = inval_cxt->partcache_callback_list[i + 1].function; + inval_cxt->partcache_callback_list[i].arg = inval_cxt->partcache_callback_list[i + 1].arg; + } + --inval_cxt->partcache_callback_count; + break; + } +} + +static void ClearMyDBOfRelMapCxt(knl_u_relmap_context *relmap_cxt) +{ + relmap_cxt->local_map->magic = 0; + relmap_cxt->local_map->num_mappings = 0; + relmap_cxt->active_shared_updates->num_mappings = 0; + relmap_cxt->active_local_updates->num_mappings = 0; + relmap_cxt->pending_shared_updates->num_mappings = 0; + relmap_cxt->pending_local_updates->num_mappings = 0; + + /* since clear when switchdb, just set their memcxt lsc_mydb_cxt */ + relmap_cxt->RelfilenodeMapHash = NULL; + relmap_cxt->UHeapRelfilenodeMapHash = NULL; +} + +struct HTAB *GetTableSpaceCacheHash() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->TableSpaceCacheHash; + } else { + return u_sess->cache_cxt.TableSpaceCacheHash; + } +} + +struct HTAB *GetSMgrRelationHash() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->SMgrRelationHash; + } else { + return u_sess->storage_cxt.SMgrRelationHash; + } +} + +struct vfd *GetVfdCache() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->VfdCache; + } else { + return u_sess->storage_cxt.VfdCache; + } +} + +struct vfd **GetVfdCachePtr() +{ + if (EnableLocalSysCache()) { + return &t_thrd.lsc_cxt.lsc->VfdCache; + } else { + return &u_sess->storage_cxt.VfdCache; + } +} + +void SetVfdCache(vfd *value) +{ + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->VfdCache = value; + } else { + u_sess->storage_cxt.VfdCache = value; + } +} + +void SetSizeVfdCache(Size value) +{ + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->SizeVfdCache = value; + } else { + u_sess->storage_cxt.SizeVfdCache = value; + } +} + +Size GetSizeVfdCache() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->SizeVfdCache; + } else { + return u_sess->storage_cxt.SizeVfdCache; + } +} + +Size *GetSizeVfdCachePtr() +{ + if (EnableLocalSysCache()) { + return &t_thrd.lsc_cxt.lsc->SizeVfdCache; + } else { + return &u_sess->storage_cxt.SizeVfdCache; + } +} + + +int GetVfdNfile() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->nfile; + } else { + return u_sess->storage_cxt.nfile; + } +} +void AddVfdNfile(int n) +{ + Assert(n == 1 || n == -1); + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->nfile += n; + } else { + u_sess->storage_cxt.nfile += n; + } +} + +dlist_head *getUnownedReln() +{ + if (EnableLocalSysCache()) { + return &t_thrd.lsc_cxt.lsc->unowned_reln; + } else { + return &u_sess->storage_cxt.unowned_reln; + } +} + +knl_u_relmap_context *GetRelMapCxt() +{ + if (EnableLocalSysCache()) { + return &t_thrd.lsc_cxt.lsc->relmap_cxt; + } else { + return &u_sess->relmap_cxt; + } +} + +void LocalSysDBCache::LocalSysDBCacheReleaseGlobalReSource(bool is_commit) +{ + ResourceOwnerReleaseRWLock(local_sysdb_resowner, is_commit); + ResourceOwnerReleaseGlobalCatCList(local_sysdb_resowner, is_commit); + ResourceOwnerReleaseGlobalCatCTup(local_sysdb_resowner, is_commit); + ResourceOwnerReleaseGlobalBaseEntry(local_sysdb_resowner, is_commit); + ResourceOwnerReleaseGlobalDBEntry(local_sysdb_resowner, is_commit); + ResourceOwnerReleaseGlobalIsExclusive(local_sysdb_resowner, is_commit); +} + +void LocalSysDBCache::LocalSysDBCacheReleaseCritialReSource(bool include_shared) +{ + closeAllVfds(); + LocalSysDBCacheReleaseGlobalReSource(false); + ReleaseBadPtrList(false); + + systabcache.ReleaseGlobalRefcount(include_shared); + if (m_global_db != NULL) { + m_global_db->Release(); + m_global_db = NULL; + } + + rdlock_info.count = 0; + + tabdefcache.ResetInitFlag(); + partdefcache.ResetInitFlag(); + systabcache.ResetInitFlag(include_shared); + + /* not zero when ereport error on searching */ + SetThreadDefExclusive(IS_THREAD_POOL_STREAM || IsBgWorkerProcess()); +} + +/* switch db, alter db */ +bool LocalSysDBCache::LocalSysDBCacheNeedClearMyDB(Oid db_id, const char *db_name) +{ + if (likely(db_name != NULL && db_id == InvalidOid)) { + if (strcmp(my_database_name, db_name) != 0) { + return true; + } + } else if (unlikely(db_name == NULL && db_id != InvalidOid)) { + if (my_database_id != db_id) { + return true; + } + } else if (unlikely(db_name != NULL && db_id != InvalidOid)) { + if (strcmp(my_database_name, db_name) != 0 || my_database_id != db_id) { + return true; + } + } else { + return true; + Assert(t_thrd.role == AUTOVACUUM_LAUNCHER || t_thrd.role == UNDO_LAUNCHER || t_thrd.role == CATCHUP || + (IsBootstrapProcessingMode() && + t_thrd.role == MASTER_THREAD && + strcmp(t_thrd.proc_cxt.MyProgName, "BootStrap") == 0 + )); + } + + /* cache hit, when initsession, we lock db to avoid alter db */ + Oid old_db_id = t_thrd.proc->databaseId; + if (u_sess->proc_cxt.MyDatabaseId == InvalidOid && IS_THREAD_POOL_WORKER) { + LockSharedObject(DatabaseRelationId, my_database_id, 0, RowExclusiveLock); + t_thrd.proc->databaseId = my_database_id; + UnlockSharedObject(DatabaseRelationId, my_database_id, 0, RowExclusiveLock); + } + Assert(m_global_db != NULL); + if (m_global_db->m_isDead) { + t_thrd.proc->databaseId = old_db_id; + return true; + } + return false; +} + +/* clear cache of mydb memcxt */ +void LocalSysDBCache::LocalSysDBCacheClearMyDB(Oid db_id, const char *db_name) +{ + LocalSysDBCacheReleaseCritialReSource(false); + + TypeCacheHash = NULL; + SMgrRelationHash = NULL; + VfdCache = NULL; + SizeVfdCache = 0; + Assert(nfile == 0); + nfile = 0; + + my_database_id = InvalidOid; + my_database_tablespace = InvalidOid; + my_database_name[0] = '\0'; + dlist_init(&unowned_reln); + pfree_ext(my_database_path); + ClearMyDBOfRelMapCxt(&relmap_cxt); + UnRegisterRelCacheCallBack(&inval_cxt, TypeCacheRelCallback); + UnRegisterRelCacheCallBack(&inval_cxt, RelfilenodeMapInvalidateCallback); + UnRegisterRelCacheCallBack(&inval_cxt, UHeapRelfilenodeMapInvalidateCallback); + MemoryContextResetAndDeleteChildren(lsc_mydb_memcxt); + + is_inited = false; + other_space = ((AllocSet)lsc_top_memcxt)->totalSpace + ((AllocSet)lsc_share_memcxt)->totalSpace; + rel_index_rule_space = 0; +} + +void LocalSysDBCache::LocalSysDBCacheReSet() +{ + LocalSysDBCacheReleaseCritialReSource(true); + + /* reset flag */ + abort_count = 0; + my_database_id = InvalidOid; + my_database_tablespace = InvalidOid; + my_database_name[0] = '\0'; + my_database_path = NULL; + SMgrRelationHash = NULL; + TableSpaceCacheHash = NULL; + TypeCacheHash = NULL; + VfdCache = NULL; + SizeVfdCache = 0; + Assert(nfile == 0); + nfile = 0; + dlist_init(&unowned_reln); + bad_ptr_obj.ResetInitFlag(); + + /* unregist callback who cache is on lsc */ + UnRegisterRelCacheCallBack(&inval_cxt, TypeCacheRelCallback); + UnRegisterRelCacheCallBack(&inval_cxt, RelfilenodeMapInvalidateCallback); + UnRegisterSysCacheCallBack(&inval_cxt, TABLESPACEOID, InvalidateTableSpaceCacheCallback); + + MemoryContextResetAndDeleteChildren(lsc_mydb_memcxt); + MemoryContextResetAndDeleteChildren(lsc_share_memcxt); + + MemoryContext old = MemoryContextSwitchTo(lsc_share_memcxt); + knl_u_relmap_init(&relmap_cxt); + MemoryContextSwitchTo(old); + + other_space = ((AllocSet)lsc_top_memcxt)->totalSpace + ((AllocSet)lsc_share_memcxt)->totalSpace; + rel_index_rule_space = 0; + is_inited = false; + is_closed = false; + is_lsc_catbucket_created = false; +} + +static bool LSCMemroyOverflow(uint64 total_space) +{ + const uint32 kb_bit_double = 10; + if (unlikely(total_space < ((uint64)g_instance.attr.attr_memory.local_syscache_threshold << kb_bit_double))) { + return false; + } + return true; +} + +bool LocalSysDBCache::LocalSysDBCacheNeedReBuild() +{ + /* we have recovered from startup, redo may dont tell us inval msgs, so discard all lsc */ + if (unlikely(!recovery_finished && g_instance.global_sysdbcache.recovery_finished)) { + return true; + } else if (!g_instance.global_sysdbcache.hot_standby) { + /* for standby, if not hot, the cache may be invalid */ + return true; + } + + /* it seems only fmgr_info_cxt has no resowner to avoid mem leak. + * we assum 1kb memory leaked once */ + if (unlikely(abort_count > (uint64)g_instance.attr.attr_memory.local_syscache_threshold)) { + return true; + } + + uint64 total_space = other_space + AllocSetContextUsedSpace((AllocSet)lsc_mydb_memcxt) + + AllocSetContextUsedSpace((AllocSet)u_sess->cache_mem_cxt) + rel_index_rule_space; + return LSCMemroyOverflow(total_space); +} + +/* rebuild cache on memcxt of mydb or share, call it only before initsyscache */ +void LocalSysDBCache::LocalSysDBCacheReBuild() +{ + /* invalid session cache */ + int i; + knl_u_inval_context *inval_cxt = &u_sess->inval_cxt; + for (i = 0; i < inval_cxt->syscache_callback_count; i++) { + struct SYSCACHECALLBACK* ccitem = inval_cxt->syscache_callback_list + i; + (*ccitem->function)(ccitem->arg, ccitem->id, 0); + } + + for (i = 0; i < inval_cxt->relcache_callback_count; i++) { + struct RELCACHECALLBACK* ccitem = inval_cxt->relcache_callback_list + i; + (*ccitem->function)(ccitem->arg, InvalidOid); + } + + for (i = 0; i < inval_cxt->partcache_callback_count; i++) { + struct PARTCACHECALLBACK* ccitem = inval_cxt->partcache_callback_list + i; + (*ccitem->function)(ccitem->arg, InvalidOid); + } + + LocalSysDBCacheReSet(); + CreateCatBucket(); +} + +bool LocalSysDBCache::LocalSysDBCacheNeedSwapOut() +{ + uint64 total_space = other_space + rel_index_rule_space + ((AllocSet)lsc_mydb_memcxt)->totalSpace; + return LSCMemroyOverflow(total_space); +} + +void LocalSysDBCache::CloseLocalSysDBCache() +{ + if (is_closed) { + return; + } + LocalSysDBCacheReleaseCritialReSource(true); + is_inited = false; + is_closed = true; +#ifdef USE_ASSERT_CHECKING + lsc_close_check.setCloseFlag(true); +#endif +} + +/* every cache knowes whether it is inited */ +void LocalSysDBCache::ClearSysCacheIfNecessary(Oid db_id, const char *db_name) +{ + CreateCatBucket(); + if (unlikely(!is_inited)) { + return; + } + other_space = ((AllocSet)lsc_top_memcxt)->totalSpace + ((AllocSet)lsc_share_memcxt)->totalSpace; + /* rebuild if memory gt double of threshold */ + if (unlikely(LocalSysDBCacheNeedReBuild())) { + recovery_finished = g_instance.global_sysdbcache.recovery_finished; + LocalSysDBCacheReBuild(); + return; + } + + if (LocalSysDBCacheNeedClearMyDB(db_id, db_name)) { + LocalSysDBCacheClearMyDB(db_id, db_name); + return; + } + + /* only thread who switch sess need clean smgr block info */ + /* actually, ts task_producter need do this too, but those code need restructure. ts is discarded */ + if (IS_THREAD_POOL_WORKER) { + smgrcleanblocknumall(); + } + g_instance.global_sysdbcache.Refresh(m_global_db); +} + +void LocalSysDBCache::CreateDBObject() +{ + Assert(lsc_top_memcxt == NULL); + lsc_top_memcxt = AllocSetContextCreate( + THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_DEFAULT), "LocalSysCacheTopMemoryContext", ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE, STANDARD_CONTEXT); + lsc_share_memcxt = + AllocSetContextCreate(lsc_top_memcxt, "LocalSysCacheShareMemoryContext", ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE, STANDARD_CONTEXT); + + lsc_mydb_memcxt = + AllocSetContextCreate(lsc_top_memcxt, "LocalSysCacheMyDBMemoryContext", ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE, STANDARD_CONTEXT); + MemoryContext old = MemoryContextSwitchTo(lsc_top_memcxt); + systabcache.CreateObject(); + tabdefcache.CreateDefBucket(); + partdefcache.CreateDefBucket(); + knl_u_inval_init(&inval_cxt); + dlist_init(&unowned_reln); + MemoryContextSwitchTo(old); + if (IS_PGXC_COORDINATOR) { + CacheRegisterThreadSyscacheCallback(PGXCGROUPOID, ThreadNodeGroupCallback, (Datum)0); + } + /* init t_thrd resource owner */ + local_sysdb_resowner = + ResourceOwnerCreate(NULL, "InitLocalSysCache", lsc_top_memcxt); + + old = MemoryContextSwitchTo(lsc_share_memcxt); + knl_u_relmap_init(&relmap_cxt); + MemoryContextSwitchTo(old); + m_shared_global_db = g_instance.global_sysdbcache.GetSharedGSCEntry(); + other_space = ((AllocSet)lsc_top_memcxt)->totalSpace + ((AllocSet)lsc_share_memcxt)->totalSpace; + rel_index_rule_space = 0; + is_lsc_catbucket_created = false; +} + +void LocalSysDBCache::CreateCatBucket() +{ + if (likely(is_lsc_catbucket_created)) { + return; + } + MemoryContext old = MemoryContextSwitchTo(lsc_share_memcxt); + systabcache.CreateCatBuckets(); + MemoryContextSwitchTo(old); + other_space = ((AllocSet)lsc_top_memcxt)->totalSpace + ((AllocSet)lsc_share_memcxt)->totalSpace; + rel_index_rule_space = 0; + is_lsc_catbucket_created = true; +} + +void LocalSysDBCache::SetDatabaseName(const char *db_name) +{ + if (db_name != NULL && db_name[0] != '\0') { + size_t len = strlen(db_name); + Assert(len > 0 && len < NAMEDATALEN); + errno_t rc = memcpy_s(my_database_name, len + 1, db_name, len + 1); + securec_check(rc, "\0", "\0"); + return; + } + + if (my_database_name[0] == '\0') { + t_thrd.proc_cxt.PostInit->GetDatabaseName(my_database_name); + } + + if (my_database_name[0] == '\0') { + char *tmp = get_database_name(my_database_id); + size_t len = strlen(tmp); + errno_t rc = memcpy_s(my_database_name, len + 1, tmp, len + 1); + securec_check(rc, "\0", "\0"); + pfree_ext(tmp); + } +} + +void LocalSysDBCache::InitThreadDatabase(Oid db_id, const char *db_name, Oid db_tabspc) +{ + Assert(db_id != InvalidOid); + if (my_database_id == db_id) { + Assert(my_database_name[0] != '\0'); + Assert(my_database_tablespace == db_tabspc); + Assert(db_name == NULL || strcmp(my_database_name, db_name) == 0); + return; + } else if (my_database_id != InvalidOid) { + /* we has lock db and set thrd.proc.dbid, this should never happened */ + Assert(false); + ereport(FATAL, (errno, errmsg("lsc has some error, please try again!"))); + } + Assert(db_id == u_sess->proc_cxt.MyDatabaseId); + Assert(my_database_id == InvalidOid); + Assert(my_database_tablespace == InvalidOid); + Assert(my_database_name[0] == '\0'); + my_database_id = db_id; + my_database_tablespace = db_tabspc; + if (db_id == TemplateDbOid) { + my_database_name[0] = '\0'; + return; + } + SetDatabaseName(db_name); +} + +void LocalSysDBCache::InitSessionDatabase(Oid db_id, const char *db_name, Oid db_tabspc) +{ + if (m_global_db != NULL && m_global_db->m_isDead) { + ereport(FATAL, (errno, errmsg("It is too later to fix syscache, please try again!"))); + return; + } + Assert(db_id != InvalidOid); + if (!is_inited) { + my_database_id = db_id; + if (db_name != NULL && !IsBootstrapProcessingMode()) { + SetDatabaseName(db_name); + } + my_database_tablespace = db_tabspc; + } else { + Assert(my_database_id == db_id); + Assert(my_database_tablespace == db_tabspc); + Assert(strcmp(my_database_name, db_name) == 0); + } +} + +void LocalSysDBCache::InitDatabasePath(const char *db_path) +{ + Assert(db_path != NULL); + Assert(strcmp(u_sess->proc_cxt.DatabasePath, db_path) == 0); + if (my_database_path == NULL) { + my_database_path = MemoryContextStrdup(lsc_share_memcxt, db_path); + } else if (strcmp(my_database_path, db_path) != 0) { + pfree_ext(my_database_path); + my_database_path = MemoryContextStrdup(lsc_share_memcxt, db_path); + } +} + +void LocalSysDBCache::Init() +{ + Assert(!is_inited); + Assert(m_global_db == NULL); + Assert(my_database_id != InvalidOid); + Assert(my_database_id == u_sess->proc_cxt.MyDatabaseId); + m_global_db = g_instance.global_sysdbcache.GetGSCEntry(my_database_id, my_database_name); + is_inited = true; +} + +void LocalSysDBCache::InitRelMapPhase2() +{ + Assert(m_shared_global_db != NULL); + m_shared_global_db->m_relmapCache->InitPhase2(); + if (!IS_MAGIC_EXIST(relmap_cxt.shared_map->magic)) { + m_shared_global_db->m_relmapCache->CopyInto(relmap_cxt.shared_map); + } +} + +void LocalSysDBCache::InitRelMapPhase3() +{ + Assert(m_global_db != NULL); + m_global_db->m_relmapCache->InitPhase2(); + if (!IS_MAGIC_EXIST(relmap_cxt.local_map->magic)) { + m_global_db->m_relmapCache->CopyInto(relmap_cxt.local_map); + } +} + +void LocalSysDBCache::LoadRelMapFromGlobal(bool shared) +{ + GlobalSysDBCacheEntry *global_db = shared ? m_shared_global_db : m_global_db; + RelMapFile *rel_map = shared ? relmap_cxt.shared_map : relmap_cxt.local_map; + Assert(global_db != NULL); + global_db->m_relmapCache->CopyInto(rel_map); +} + +void LocalSysDBCache::InvalidateGlobalRelMap(bool shared, Oid db_id, RelMapFile *rel_map) +{ + if (shared) { + Assert(m_shared_global_db != NULL); + GlobalSysDBCacheEntry *global_db = m_shared_global_db; + global_db->m_relmapCache->UpdateBy(rel_map); + } else if (!is_inited) { + Assert(m_global_db == NULL && !is_inited); + GlobalSysDBCacheEntry *entry = g_instance.global_sysdbcache.FindTempGSCEntry(db_id); + if (entry == NULL) { + return; + } + entry->m_relmapCache->UpdateBy(rel_map); + g_instance.global_sysdbcache.ReleaseTempGSCEntry(entry); + } else { + Assert(my_database_id == db_id); + m_global_db->m_relmapCache->UpdateBy(rel_map); + } +} + +void LocalSysDBCache::SetThreadDefExclusive(bool is_exclusive) +{ + m_is_def_exclusive = is_exclusive; +} + +LocalSysDBCache::LocalSysDBCache() +{ + lsc_top_memcxt = NULL; + lsc_share_memcxt = NULL; + lsc_mydb_memcxt = NULL; + m_global_db = NULL; + my_database_id = InvalidOid; + my_database_name[0] = '\0'; + my_database_path = NULL; + my_database_tablespace = InvalidOid; + TableSpaceCacheHash = NULL; + TypeCacheHash = NULL; + SMgrRelationHash = NULL; + VfdCache = NULL; + SizeVfdCache = 0; + nfile = 0; + local_sysdb_resowner = NULL; + + abort_count = 0; + + rdlock_info.count = 0; + got_pool_reload = false; + m_shared_global_db = NULL; + + is_lsc_catbucket_created = false; + is_closed = false; + is_inited = false; +#ifdef USE_ASSERT_CHECKING + lsc_close_check.setCloseFlag(false); +#endif +} + +void AtEOXact_SysDBCache(bool is_commit) +{ + if (!EnableLocalSysCache()) { + return; + } + Assert(t_thrd.lsc_cxt.lsc != NULL); + ResourceOwnerReleaseLocalCatCList(t_thrd.lsc_cxt.lsc->local_sysdb_resowner, is_commit); + ResourceOwnerReleaseLocalCatCTup(t_thrd.lsc_cxt.lsc->local_sysdb_resowner, is_commit); + ResourceOwnerReleaseRelationRef(t_thrd.lsc_cxt.lsc->local_sysdb_resowner, is_commit); + ResourceOwnerReleasePartitionRef(t_thrd.lsc_cxt.lsc->local_sysdb_resowner, is_commit); + + t_thrd.lsc_cxt.lsc->LocalSysDBCacheReleaseGlobalReSource(is_commit); + + ReleaseBadPtrList(is_commit); + if (!is_commit) { + t_thrd.lsc_cxt.lsc->abort_count++; + } + /* resowner make sure the lock released */ + t_thrd.lsc_cxt.lsc->rdlock_info.count = 0; + + t_thrd.lsc_cxt.lsc->SetThreadDefExclusive(IS_THREAD_POOL_STREAM || IsBgWorkerProcess()); +} + +void ReBuildLSC() +{ + if (!EnableLocalSysCache()) { + return; + } + if (t_thrd.lsc_cxt.lsc == NULL || t_thrd.lsc_cxt.lsc->is_closed) { + return; + } + t_thrd.lsc_cxt.lsc->LocalSysDBCacheReBuild(); +} + +void AppendBadPtr(void *elem) +{ + BadPtrObj *obj = &t_thrd.lsc_cxt.lsc->bad_ptr_obj; + /* enlarge size of dad ptr list if necessary */ + int newmax = 0; + if (obj->nbadptr >= obj->maxbadptr) { + if (obj->bad_ptr_lists == NULL) { + newmax = 16; + obj->bad_ptr_lists = (void **)MemoryContextAlloc(t_thrd.lsc_cxt.lsc->lsc_share_memcxt, + newmax * sizeof(void *)); + obj->maxbadptr = newmax; + } else { + newmax = obj->maxbadptr * 2; + obj->bad_ptr_lists = (void **)repalloc(obj->bad_ptr_lists, newmax * sizeof(void *)); + obj->maxbadptr = newmax; + } + } + + /* remember bad ptr */ + Assert(obj->nbadptr < obj->maxbadptr); + obj->bad_ptr_lists[obj->nbadptr] = elem; + obj->nbadptr++; +} + +void RemoveBadPtr(void *elem) +{ + BadPtrObj *obj = &t_thrd.lsc_cxt.lsc->bad_ptr_obj; + void **bad_lists = obj->bad_ptr_lists; + int nc = obj->nbadptr - 1; + for (int i = nc; i >= 0; i--) { + if (bad_lists[i] == elem) { + while (i < nc) { + bad_lists[i] = bad_lists[i + 1]; + i++; + } + obj->nbadptr = nc; + return; + } + } +} + +static void ReleaseBadPtrList(bool isCommit) +{ + BadPtrObj *obj = &t_thrd.lsc_cxt.lsc->bad_ptr_obj; + while (obj->nbadptr > 0) { + if (isCommit) { + /* DFX: print some debug info here */ + } + pfree_ext(obj->bad_ptr_lists[obj->nbadptr - 1]); /* 只释放了指针 */ + obj->nbadptr--; + } +} + +void StreamTxnContextSaveInvalidMsg(void *stc) +{ + if (!EnableLocalSysCache()) { + STCSaveElem(((StreamTxnContext *)stc)->lsc_dbcache, NULL); + return; + } + STCSaveElem(((StreamTxnContext *)stc)->lsc_dbcache, t_thrd.lsc_cxt.lsc); + /* we don't know what bgworker do, + * just stop insert rel/part into gsc, + * tuple has its flag to decide hot to do insert*/ + t_thrd.lsc_cxt.lsc->SetThreadDefExclusive(true); +} +void StreamTxnContextRestoreInvalidMsg(void *stc) +{ + if (!EnableLocalSysCache()) { + return; + } + LocalSysDBCache *lsc_dbcache = ((StreamTxnContext *)stc)->lsc_dbcache; + InvalidBaseEntry *src_part = &lsc_dbcache->partdefcache.invalid_entries; + InvalidBaseEntry *dst_part = &t_thrd.lsc_cxt.lsc->partdefcache.invalid_entries; + for (int i = 0; i < src_part->count; i++) { + dst_part->InsertInvalidDefValue(src_part->invalid_values[i]); + } + + InvalidBaseEntry *src_rel = &lsc_dbcache->tabdefcache.invalid_entries; + InvalidBaseEntry *dst_rel = &t_thrd.lsc_cxt.lsc->tabdefcache.invalid_entries; + for (int i = 0; i < src_rel->count; i++) { + dst_rel->InsertInvalidDefValue(src_rel->invalid_values[i]); + } + + for (int i = 0; i < SysCacheSize; i++) { + InvalidBaseEntry *src_tup = &lsc_dbcache->systabcache.local_systupcaches[i]->invalid_entries; + InvalidBaseEntry *dst_tup = &t_thrd.lsc_cxt.lsc->systabcache.local_systupcaches[i]->invalid_entries; + for (int i = 0; i < src_tup->count; i++) { + dst_tup->InsertInvalidDefValue(src_tup->invalid_values[i]); + dst_tup->is_reset |= src_tup->is_reset; + } + } +} + +void ReleaseAllGSCRdConcurrentLock() +{ + if (!EnableLocalSysCache() || t_thrd.lsc_cxt.lsc->rdlock_info.count == 0) { + return; + } + while (t_thrd.lsc_cxt.lsc->rdlock_info.count > 0) { + int cur_index = t_thrd.lsc_cxt.lsc->rdlock_info.count - 1; + ReleaseGSCTableReadLock(t_thrd.lsc_cxt.lsc->rdlock_info.has_concurrent_lock[cur_index], + t_thrd.lsc_cxt.lsc->rdlock_info.concurrent_lock[cur_index]); + } +} \ No newline at end of file diff --git a/src/common/backend/utils/cache/knl_localsystabcache.cpp b/src/common/backend/utils/cache/knl_localsystabcache.cpp new file mode 100644 index 000000000..087de410d --- /dev/null +++ b/src/common/backend/utils/cache/knl_localsystabcache.cpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ + +#include "utils/knl_globalsysdbcache.h" +#include "utils/knl_localsystabcache.h" +#include "utils/knl_localsystupcache.h" +#include "utils/memutils.h" +#include "utils/resowner.h" +#include "storage/ipc.h" +#include "utils/catcache.h" + +void LocalSysTabCache::CreateObject() +{ + local_systupcaches = (LocalSysTupCache **)palloc0(sizeof(LocalSysTupCache *) * SysCacheSize); + for (int cache_id = 0; cache_id < SysCacheSize; cache_id++) { + local_systupcaches[cache_id] = New(CurrentMemoryContext) LocalSysTupCache(cache_id); + } +} + +void LocalSysTabCache::CreateCatBuckets() +{ + for (int cache_id = 0; cache_id < SysCacheSize; cache_id++) { + local_systupcaches[cache_id]->CreateCatBucket(); + } +} \ No newline at end of file diff --git a/src/common/backend/utils/cache/knl_localsystupcache.cpp b/src/common/backend/utils/cache/knl_localsystupcache.cpp new file mode 100644 index 000000000..ddf196099 --- /dev/null +++ b/src/common/backend/utils/cache/knl_localsystupcache.cpp @@ -0,0 +1,830 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ + +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "access/genam.h" +#include "access/hash.h" +#include "access/heapam.h" +#include "access/relscan.h" +#include "access/sysattr.h" +#include "access/transam.h" +#include "access/tuptoaster.h" +#include "access/valid.h" +#include "catalog/pg_operator.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_proc_fn.h" +#include "catalog/pg_type.h" +#include "catalog/pg_attribute.h" +#include "catalog/heap.h" +#include "funcapi.h" +#include "miscadmin.h" +#include "parser/parse_relation.h" +#include "parser/parse_type.h" +#include "pgstat.h" +#ifdef CatCache_STATS +#include "storage/ipc.h" /* for on_proc_exit */ +#endif +#include "storage/lmgr.h" +#include "utils/acl.h" +#include "utils/datum.h" +#include "utils/builtins.h" +#include "utils/elog.h" +#include "utils/extended_statistics.h" +#include "utils/fmgroids.h" +#include "utils/fmgrtab.h" +#include "utils/hashutils.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/rel.h" +#include "utils/rel_gs.h" +#include "utils/relcache.h" +#include "utils/resowner.h" +#include "utils/syscache.h" +#include "miscadmin.h" +#include "utils/knl_catcache.h" +#include "utils/knl_localsystupcache.h" +#include "utils/knl_globalsystabcache.h" + +void LocalSysTupCache::ResetInitFlag() +{ + DLInitList(&m_dead_cts); + DLInitList(&m_dead_cls); + DLInitList(&cc_lists); + invalid_entries.ResetInitFlag(); + /* DLInitList(Bucket); is already called by releaseglobalrefcount */ + m_global_systupcache = NULL; + m_relinfo.cc_relname = NULL; + m_relinfo.cc_tupdesc = NULL; + for (int i = 0; i < CATCACHE_MAXKEYS; i++) { + m_relinfo.cc_hashfunc[i] = NULL; + m_relinfo.cc_fastequal[i] = NULL; + } + m_db_id = InvalidOid; + cc_searches = 0; + cc_hits = 0; + cc_neg_hits = 0; + cc_newloads = 0; + cc_invals = 0; + cc_lsearches = 0; + cc_lhits = 0; + m_is_inited = false; + m_is_inited_phase2 = false; + m_rls_user = InvalidOid; +} + +void LocalSysTupCache::HandleDeadLocalCatCTup(LocalCatCTup *ct) +{ + DLRemove(&ct->cache_elem); + if (ct->refcount == 0) { + FreeLocalCatCTup(ct); + } else { + DLAddTail(&m_dead_cts, &ct->cache_elem); + } +} + +void LocalSysTupCache::FreeLocalCatCTup(LocalCatCTup *ct) +{ + Assert(ct->refcount == 0); + if (ct->global_ct == NULL) { + CatCacheFreeKeys(m_relinfo.cc_tupdesc, m_relinfo.cc_nkeys, m_relinfo.cc_keyno, ct->keys); + pfree_ext(ct); + return; + } + ct->global_ct->Release(); + pfree_ext(ct); +} + +void LocalSysTupCache::FreeDeadCts() +{ + while (unlikely(m_dead_cts.dll_len > 0)) { + Dlelem *elt = DLRemHead(&m_dead_cts); + LocalCatCTup *ct = (LocalCatCTup *)DLE_VAL(elt); + if (unlikely(ct->refcount != 0)) { + /* we move the active entry to tail of list and let next call free it */ + DLAddTail(&m_dead_cts, &ct->cache_elem); + break; + } else { + FreeLocalCatCTup(ct); + } + } +} + +void LocalSysTupCache::HandleDeadLocalCatCList(LocalCatCList *cl) +{ + DLRemove(&cl->cache_elem); + if (cl->refcount == 0) { + FreeLocalCatCList(cl); + } else { + DLAddTail(&m_dead_cls, &cl->cache_elem); + } +} + +void LocalSysTupCache::FreeLocalCatCList(LocalCatCList *cl) +{ + Assert(cl->refcount == 0); + cl->global_cl->Release(); + pfree_ext(cl); +} + +void LocalSysTupCache::FreeDeadCls() +{ + while (unlikely(m_dead_cls.dll_len > 0)) { + Dlelem *elt = DLRemHead(&m_dead_cls); + LocalCatCList *cl = (LocalCatCList *)DLE_VAL(elt); + if (unlikely(cl->refcount != 0)) { + /* we move the active entry to tail of list and let next call free it */ + DLAddTail(&m_dead_cls, &cl->cache_elem); + break; + } else { + FreeLocalCatCList(cl); + } + } +} + + +/* call when switch db, reset memcxt after it func */ +void LocalSysTupCache::ReleaseGlobalRefcount() +{ + /* not inited, nothing to do */ + if (unlikely(!m_is_inited_phase2)) { + return; + } + /* release cl */ + for (Dlelem *elt = DLGetHead(&cc_lists); elt; elt = DLGetSucc(elt)) { + LocalCatCList *cl = (LocalCatCList *)DLE_VAL(elt); + cl->global_cl->Release(); + } + DLInitList(&cc_lists); + while (m_dead_cls.dll_len > 0) { + Dlelem *elt = DLRemHead(&m_dead_cls); + LocalCatCList *cl = (LocalCatCList *)DLE_VAL(elt); + cl->global_cl->Release(); + } + + /* release ct */ + for (int hash_index = 0; hash_index < cc_nbuckets; hash_index++) { + for (Dlelem *elt = DLGetHead(GetBucket(hash_index)); elt;) { + LocalCatCTup *ct = (LocalCatCTup *)DLE_VAL(elt); + elt = DLGetSucc(elt); + Assert(ct->global_ct == NULL || ct->global_ct->refcount > 0); + /* we dont release exclusive tuple, because it is a local tuple */ + if (ct->global_ct != NULL && ct->global_ct->canInsertGSC) { + ct->global_ct->Release(); + } + } + DLInitList(GetBucket(hash_index)); + } + /* we dont reset mydb tuple buckets, instead we use bucket_db array to record them */ + while (m_dead_cts.dll_len > 0) { + Dlelem *elt = DLRemHead(&m_dead_cts); + LocalCatCTup *ct = (LocalCatCTup *)DLE_VAL(elt); + if (ct->global_ct != NULL) { + ct->global_ct->Release(); + } + } +} + +void LocalSysTupCache::ResetCatalogCache() +{ + /* not inited, nothing to do */ + if (unlikely(!m_is_inited_phase2)) { + return; + } + for (Dlelem *elt = DLGetHead(&cc_lists); elt;) { + LocalCatCList *cl = (LocalCatCList *)DLE_VAL(elt); + elt = DLGetSucc(elt); + HandleDeadLocalCatCList(cl); + } + /* Remove each tuple in this cache, now all cl are released. */ + for (int hash_index = 0; hash_index < cc_nbuckets; hash_index++) { + for (Dlelem *elt = DLGetHead(GetBucket(hash_index)); elt;) { + LocalCatCTup *ct = (LocalCatCTup *)DLE_VAL(elt); + elt = DLGetSucc(elt); + Assert(ct->global_ct == NULL || ct->global_ct->refcount > 0); + HandleDeadLocalCatCTup(ct); + } + } +} + +void LocalSysTupCache::HashValueInvalidateLocal(uint32 hash_value) +{ + /* not inited, nothing to do */ + if (unlikely(!m_is_inited_phase2)) { + return; + } + /* + * inspect caches to find the proper cache + * list of cache_header is never changed after init, so lock is not needed + * Invalidate *all *CatCLists in this cache; it's too hard to tell + * which searches might still be correct, so just zap 'em all. + */ + for (Dlelem *elt = DLGetHead(&cc_lists); elt;) { + LocalCatCList *cl = (LocalCatCList *)DLE_VAL(elt); + elt = DLGetSucc(elt); + HandleDeadLocalCatCList(cl); + } + uint32 hash_index = HASH_INDEX(hash_value, cc_nbuckets); + for (Dlelem *elt = DLGetHead(GetBucket(hash_index)); elt;) { + LocalCatCTup *ct = (LocalCatCTup *)DLE_VAL(elt); + elt = DLGetSucc(elt); + if (hash_value == ct->hash_value) { + HandleDeadLocalCatCTup(ct); + } + } +} + +template +void LocalSysTupCache::FlushGlobalByInvalidMsg(Oid db_id, uint32 hash_value) +{ + if (db_id == InvalidOid) { + Assert(!m_is_inited_phase2 || m_relinfo.cc_relisshared); + GlobalSysTabCache *global_systab = t_thrd.lsc_cxt.lsc->GetSharedSysTabCache(); + global_systab->InvalidTuples(m_cache_id, hash_value, reset); + return; + } + if (m_is_inited) { + InitPhase2(); + Assert(db_id == m_db_id); + } + if (!m_is_inited_phase2) { + Assert(!m_is_inited); + /* redoxact meand !m_is_inited_phase2 */ + GlobalSysDBCacheEntry *entry = g_instance.global_sysdbcache.FindTempGSCEntry(db_id); + if (entry == NULL) { + return; + } + entry->m_systabCache->InvalidTuples(m_cache_id, hash_value, reset); + g_instance.global_sysdbcache.ReleaseTempGSCEntry(entry); + } else { + Assert(db_id == m_db_id); + if (reset) { + m_global_systupcache->ResetCatalogCache(); + } else { + m_global_systupcache->HashValueInvalidate(hash_value); + } + } +} + +template void LocalSysTupCache::FlushGlobalByInvalidMsg(Oid db_id, uint32 hash_value); +template void LocalSysTupCache::FlushGlobalByInvalidMsg(Oid db_id, uint32 hash_value); + + +void LocalSysTupCache::PrepareToInvalidateCacheTuple(HeapTuple tuple, HeapTuple newtuple, + void (*function)(int, uint32, Oid)) +{ + if (m_relinfo.cc_indexoid == ProcedureNameAllArgsNspIndexId + && t_thrd.proc->workingVersionNum < 92470) { + return; + } + InitPhase2(); + Assert(m_global_systupcache != NULL); + Assert(CheckMyDatabaseMatch()); + uint32 hash_value = CatalogCacheComputeTupleHashValue(cc_id, m_relinfo.cc_keyno, m_relinfo.cc_tupdesc, + m_relinfo.cc_hashfunc, m_relinfo.cc_reloid, m_relinfo.cc_nkeys, tuple); + Oid dbid = m_relinfo.cc_relisshared ? (Oid)0 : t_thrd.lsc_cxt.lsc->my_database_id; + /* for every session si msg */ + (*function)(cc_id, hash_value, dbid); + if (newtuple) { + uint32 new_hash_value = CatalogCacheComputeTupleHashValue(cc_id, m_relinfo.cc_keyno, m_relinfo.cc_tupdesc, + m_relinfo.cc_hashfunc, m_relinfo.cc_reloid, m_relinfo.cc_nkeys, newtuple); + if (new_hash_value != hash_value) { + (*function)(cc_id, new_hash_value, dbid); + } + } +} + +LocalSysTupCache::LocalSysTupCache(int cache_id) +{ + m_cache_id = cache_id; + cc_buckets = NULL; + m_local_mem_cxt = NULL; + ResetInitFlag(); + const cachedesc *cur_cache_info = &cacheinfo[m_cache_id]; + cc_nbuckets = ResizeHashBucket(cur_cache_info->nbuckets, g_instance.global_sysdbcache.dynamic_hash_bucket_strategy); + cc_id = m_cache_id; + m_relinfo.cc_reloid = cur_cache_info->reloid; + m_relinfo.cc_indexoid = cur_cache_info->indoid; + m_relinfo.cc_nkeys = cur_cache_info->nkeys; + for (int i = 0; i < m_relinfo.cc_nkeys; ++i) { + m_relinfo.cc_keyno[i] = cur_cache_info->key[i]; + } + m_relinfo.cc_relisshared = g_instance.global_sysdbcache.HashSearchSharedRelation(m_relinfo.cc_reloid); +} + +void LocalSysTupCache::CreateCatBucket() +{ + invalid_entries.Init(); + Assert(cc_nbuckets > 0 && (cc_nbuckets & -cc_nbuckets) == cc_nbuckets); + size_t sz = (cc_nbuckets) * (sizeof(Dllist)); + cc_buckets = (Dllist *)palloc0(sz); + if (m_relinfo.cc_relisshared) { + m_local_mem_cxt = t_thrd.lsc_cxt.lsc->lsc_share_memcxt; + } else { + m_local_mem_cxt = t_thrd.lsc_cxt.lsc->lsc_mydb_memcxt; + } +} + +void LocalSysTupCache::FlushRlsUserImpl() +{ + Oid cur_user = GetCurrentUserId(); + if (likely(m_rls_user == GetCurrentUserId()) || cur_user == InvalidOid) { + return; + } + /* we must flush the local cache, which dont belong to current user */ + ResetCatalogCache(); + m_rls_user = GetCurrentUserId(); +} + + +void LocalSysTupCache::InitPhase2Impl() +{ + Assert(m_is_inited); + Assert(!m_is_inited_phase2); + Assert(m_db_id == InvalidOid); + Assert(m_global_systupcache == NULL); + /* for now we even dont know which db to connect */ + if (m_relinfo.cc_relisshared) { + m_db_id = InvalidOid; + GlobalSysTabCache *global_shared_systab = t_thrd.lsc_cxt.lsc->GetSharedSysTabCache(); + m_global_systupcache = global_shared_systab->CacheIdGetGlobalSysTupCache(m_cache_id); + } else { + Assert(CheckMyDatabaseMatch()); + GlobalSysTabCache *global_systabcache = t_thrd.lsc_cxt.lsc->GetGlobalSysTabCache(); + m_db_id = t_thrd.lsc_cxt.lsc->my_database_id; + m_global_systupcache = global_systabcache->CacheIdGetGlobalSysTupCache(m_cache_id); + } + Assert(m_global_systupcache != NULL); + m_relinfo.cc_relname = m_global_systupcache->GetCCRelName(); + for (int i = 0; i < CATCACHE_MAXKEYS; i++) { + m_relinfo.cc_hashfunc[i] = m_global_systupcache->GetCCHashFunc()[i]; + m_relinfo.cc_fastequal[i] = m_global_systupcache->GetCCFastEqual()[i]; + } + m_relinfo.cc_tupdesc = m_global_systupcache->GetCCTupleDesc(); + m_is_inited_phase2 = true; +} + +void LocalSysTupCache::RemoveTailTupleElements(Index hash_index) +{ + bool listBelowThreshold = GetBucket(hash_index)->dll_len < MAX_LSC_LIST_LENGTH; + if (listBelowThreshold && !t_thrd.lsc_cxt.lsc->LocalSysDBCacheNeedSwapOut()) { + return; + } + Dllist *list = GetBucket(hash_index); + for (Dlelem *elt = DLGetTail(list); elt != NULL;) { + LocalCatCTup *ct = (LocalCatCTup *)DLE_VAL(elt); + elt = DLGetPred(elt); + if (ct->refcount != 0) { + DLMoveToFront(&ct->cache_elem); + /* lsc do lru strategy, so the front of this cl are all refered probably, just break */ + break; + } + HandleDeadLocalCatCTup(ct); + } +} + +void LocalSysTupCache::RemoveTailListElements() +{ + bool listBelowThreshold = cc_lists.dll_len < MAX_LSC_LIST_LENGTH; + if (listBelowThreshold && !t_thrd.lsc_cxt.lsc->LocalSysDBCacheNeedSwapOut()) { + return; + } + int swapout_count = 0; + int max_swapout_count_once = cc_lists.dll_len >> 1; + for (Dlelem *elt = DLGetTail(&cc_lists); elt != NULL;) { + LocalCatCList *cl = (LocalCatCList *)DLE_VAL(elt); + elt = DLGetPred(elt); + if (cl->refcount != 0) { + DLMoveToFront(&cl->cache_elem); + /* lsc do lru strategy, so the front of this cl are all refered probably, just break */ + break; + } + HandleDeadLocalCatCList(cl); + swapout_count++; + /* lsc is limitted by local_syscache_threshold, so dont be aggressive to swapout */ + if (listBelowThreshold || swapout_count > max_swapout_count_once) { + break; + } + } +} + +LocalCatCTup *LocalSysTupCache::SearchTupleFromGlobal(Datum *arguments, uint32 hash_value, Index hash_index, int level) +{ + LocalCatCTup *ct = NULL; + ResourceOwnerEnlargeGlobalCatCTup(LOCAL_SYSDB_RESOWNER); + GlobalCatCTup *global_ct; + /* gsc only cache snapshotnow + * for rls, we cann't know how to store the rls info, so dont cache it */ + bool bypass_gsc = HistoricSnapshotActive() || + m_global_systupcache->enable_rls || + !g_instance.global_sysdbcache.hot_standby || + unlikely(!g_instance.global_sysdbcache.recovery_finished); + if (invalid_entries.ExistTuple(hash_value) || bypass_gsc) { + global_ct = m_global_systupcache->SearchTupleFromFile(hash_value, arguments, true); + } else { + global_ct = m_global_systupcache->SearchTuple(hash_value, arguments); + } + + /* In bootstrap mode, we don't build negative entries, because the cache + * invalidation mechanism isn't alive and can't clear them if the tuple + * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need + * cache inval for that.) + */ + if (global_ct == NULL) { + if (IsBootstrapProcessingMode()) { + return NULL; + } + ct = CreateLocalCatCTup(NULL, arguments, hash_value, hash_index); + } else { + Assert(global_ct->refcount > 0); + ResourceOwnerRememberGlobalCatCTup(LOCAL_SYSDB_RESOWNER, global_ct); + ct = CreateLocalCatCTup(global_ct, arguments, hash_value, hash_index); + ResourceOwnerForgetGlobalCatCTup(LOCAL_SYSDB_RESOWNER, global_ct); + } + return ct; +} + +/* + * Work-horse for SearchTuple/SearchTupleN. + */ +LocalCatCTup *LocalSysTupCache::SearchTupleInternal(int nkeys, Datum v1, Datum v2, Datum v3, Datum v4, int level) +{ + Assert(m_relinfo.cc_nkeys == nkeys); + SearchCatCacheCheck(); + FreeDeadCts(); + FreeDeadCls(); + cc_searches++; + /* Initialize local parameter array */ + Datum arguments[CATCACHE_MAXKEYS]; + arguments[0] = v1; + arguments[1] = v2; + arguments[2] = v3; + arguments[3] = v4; + /* + * find the hash bucket in which to look for the tuple + */ + uint32 hash_value = CatalogCacheComputeHashValue(m_relinfo.cc_hashfunc, nkeys, arguments); + Index hash_index = HASH_INDEX(hash_value, (uint32)cc_nbuckets); + /* + * scan the hash bucket until we find a match or exhaust our tuples + * remove dead tuple by the way + */ + bool found = false; + LocalCatCTup *ct = NULL; + for (Dlelem *elt = DLGetHead(GetBucket(hash_index)); elt;) { + ct = (LocalCatCTup *)DLE_VAL(elt); + elt = DLGetSucc(elt); + if (unlikely(ct->hash_value != hash_value)) { + continue; /* quickly skip entry if wrong hash val */ + } + if (unlikely(!CatalogCacheCompareTuple(m_relinfo.cc_fastequal, nkeys, ct->keys, arguments))) { + continue; + } + /* + * We found a match in the cache. Move it to the front of the list + * for its hashbucket, in order to speed subsequent searches. (The + * most frequently accessed elements in any hashbucket will tend to be + * near the front of the hashbucket's list.) + */ + DLMoveToFront(&ct->cache_elem); + found = true; + break; + } + + /* if not found, search from global cache */ + if (unlikely(!found)) { + ct = SearchTupleFromGlobal(arguments, hash_value, hash_index, level); + if (ct == NULL) { + return NULL; + } + } + + /* + * If it's a positive entry, bump its refcount and return it. If it's + * negative, we can report failure to the caller. + */ + if (likely(ct->global_ct != NULL)) { + CACHE3_elog(DEBUG2, "SearchLocalCatCache(%s): found in bucket %d", m_relinfo.cc_relname, hash_index); + ResourceOwnerEnlargeLocalCatCTup(LOCAL_SYSDB_RESOWNER); + ct->refcount++; + cc_hits++; + ResourceOwnerRememberLocalCatCTup(LOCAL_SYSDB_RESOWNER, ct); + } else { + CACHE3_elog(DEBUG2, "SearchLocalCatCache(%s): found neg entry in bucket %d", m_relinfo.cc_relname, hash_index); + cc_neg_hits++; + ct = NULL; + } + + RemoveTailTupleElements(hash_index); + return ct; +} + +/* + * Create a new CatCTup entry, point to global_ct, The new entry initially has refcount 0. + */ +LocalCatCTup *LocalSysTupCache::CreateLocalCatCTup(GlobalCatCTup *global_ct, Datum *arguments, uint32 hash_value, + Index hash_index) +{ + MemoryContext oldcxt = MemoryContextSwitchTo(m_local_mem_cxt); + LocalCatCTup *ct = (LocalCatCTup *)palloc(sizeof(LocalCatCTup)); + + /* + * Finish initializing the CatCTup header, and add it to the cache's + * linked list and counts. + */ + ct->ct_magic = CT_MAGIC; + DLInitElem(&ct->cache_elem, (void *)ct); + ct->refcount = 0; + ct->hash_value = hash_value; + ct->global_ct = global_ct; + /* palloc maybe fail, but that means global_ct is null, so nothing need to do */ + if (global_ct != NULL) { + errno_t rc = memcpy_s(ct->keys, sizeof(Datum) * CATCACHE_MAXKEYS, + global_ct->keys, sizeof(Datum) * CATCACHE_MAXKEYS); + securec_check(rc, "", ""); + } else { + errno_t rc = memset_s(ct->keys, m_relinfo.cc_nkeys * sizeof(Datum), 0, m_relinfo.cc_nkeys * sizeof(Datum)); + securec_check(rc, "\0", "\0"); + CatCacheCopyKeys(m_relinfo.cc_tupdesc, m_relinfo.cc_nkeys, m_relinfo.cc_keyno, arguments, ct->keys); + } + MemoryContextSwitchTo(oldcxt); + DLAddHead(&cc_buckets[hash_index], &ct->cache_elem); + return ct; +} + +LocalCatCList *LocalSysTupCache::SearchListFromGlobal(int nkeys, Datum *arguments, uint32 hash_value, int level) +{ + ResourceOwnerEnlargeGlobalCatCList(LOCAL_SYSDB_RESOWNER); + /* + * List was not found in cache, so we have to build it by reading the + * relation. For each matching tuple found in the relation, use an + * existing cache entry if possible, else build a new one. + * + * We have to bump the member refcounts temporarily to ensure they won't + * get dropped from the cache while loading other members. We use a PG_TRY + * block to ensure we can undo those refcounts if we get an error before + * we finish constructing the CatCList. + */ + bool bypass_gsc = HistoricSnapshotActive() || + m_global_systupcache->enable_rls || + !g_instance.global_sysdbcache.hot_standby || + unlikely(!g_instance.global_sysdbcache.recovery_finished); + GlobalCatCList *global_cl; + if (invalid_entries.ExistList() || bypass_gsc) { + global_cl = m_global_systupcache->SearchListFromFile(hash_value, nkeys, arguments, true); + } else { + global_cl = m_global_systupcache->SearchList(hash_value, nkeys, arguments); + } + Assert(global_cl != NULL); + + MemoryContext oldcxt = MemoryContextSwitchTo(m_local_mem_cxt); + LocalCatCList *new_cl = (LocalCatCList *)palloc0(sizeof(LocalCatCList)); + MemoryContextSwitchTo(oldcxt); + new_cl->cl_magic = CL_MAGIC; + new_cl->hash_value = hash_value; + DLInitElem(&new_cl->cache_elem, new_cl); + errno_t rc = memcpy_s(new_cl->keys, nkeys * sizeof(Datum), global_cl->keys, nkeys * sizeof(Datum)); + securec_check(rc, "", ""); + new_cl->refcount = 1; /* for the moment */ + new_cl->ordered = global_cl->ordered; + new_cl->nkeys = nkeys; + new_cl->n_members = global_cl->n_members; + new_cl->global_cl = global_cl; + new_cl->systups = (CatCTup **)global_cl->members; + ResourceOwnerForgetGlobalCatCList(LOCAL_SYSDB_RESOWNER, global_cl); + DLAddHead(&cc_lists, &new_cl->cache_elem); + CACHE3_elog(DEBUG2, "SearchLocalCatCacheList(%s): made list of %d members", + m_relinfo.cc_relname, new_cl->n_members); + return new_cl; +} + +/* + * SearchListInternal + * + * Generate a list of all tuples matching a partial key (that is, + * a key specifying just the first K of the cache's N key columns). + * + * The caller must not modify the list object or the pointed-to tuples, + * and must call ReleaseLocalCatCList() when done with the list. + */ +LocalCatCList *LocalSysTupCache::SearchListInternal(int nkeys, Datum v1, Datum v2, Datum v3, Datum v4, int level) +{ + SearchCatCacheCheck(); + Assert(nkeys > 0 && nkeys < m_relinfo.cc_nkeys); + cc_lsearches++; + + /* Initialize local parameter array */ + Datum arguments[CATCACHE_MAXKEYS]; + arguments[0] = v1; + arguments[1] = v2; + arguments[2] = v3; + arguments[3] = v4; + /* + * compute a hash value of the given keys for faster search. We don't + * presently divide the CatCList items into buckets, but this still lets + * us skip non-matching items quickly most of the time. + */ + uint32 hash_value = CatalogCacheComputeHashValue(m_relinfo.cc_hashfunc, nkeys, arguments); + + ResourceOwnerEnlargeLocalCatCList(LOCAL_SYSDB_RESOWNER); + /* + * scan the items until we find a match or exhaust our list + * remove dead list by the way + */ + bool found = false; + LocalCatCList *cl = NULL; + for (Dlelem *elt = DLGetHead(&cc_lists); elt; elt = DLGetSucc(elt)) { + cl = (LocalCatCList *)DLE_VAL(elt); + if (likely(cl->hash_value != hash_value)) { + continue; /* quickly skip entry if wrong hash val */ + } + if (unlikely(cl->nkeys != nkeys)) { + continue; + } + if (unlikely(!CatalogCacheCompareTuple(m_relinfo.cc_fastequal, nkeys, cl->keys, arguments))) { + continue; + } + DLMoveToFront(&cl->cache_elem); + /* Bump the list's refcount */ + cl->refcount++; + CACHE2_elog(DEBUG2, "SearchLocalCatCacheList(%s): found list", m_relinfo.cc_relname); + cc_lhits++; + found = true; + break; + } + + if (unlikely(!found)) { + cl = SearchListFromGlobal(nkeys, arguments, hash_value, level); + } + ResourceOwnerRememberLocalCatCList(LOCAL_SYSDB_RESOWNER, cl); + + RemoveTailListElements(); + return cl; +} + +uint32 LocalSysTupCache::GetCatCacheHashValue(Datum v1, Datum v2, Datum v3, Datum v4) +{ + InitPhase2(); + Datum arguments[CATCACHE_MAXKEYS]; + arguments[0] = v1; + arguments[1] = v2; + arguments[2] = v3; + arguments[3] = v4; + return CatalogCacheComputeHashValue(m_relinfo.cc_hashfunc, m_relinfo.cc_nkeys, arguments); +} + +#ifndef ENABLE_MULTIPLE_NODES +LocalCatCTup *LocalSysTupCache::SearchTupleFromGlobalForProcAllArgs( + Datum *arguments, uint32 hash_value, Index hash_index, oidvector* argModes) +{ + LocalCatCTup *ct = NULL; + ResourceOwnerEnlargeGlobalCatCTup(LOCAL_SYSDB_RESOWNER); + GlobalCatCTup *global_ct; + /* gsc only cache snapshotnow + * for rls, we cann't know how to store the rls info, so dont cache it */ + bool bypass_gsc = HistoricSnapshotActive() || + m_global_systupcache->enable_rls || + !g_instance.global_sysdbcache.hot_standby || + unlikely(!g_instance.global_sysdbcache.recovery_finished); + if (invalid_entries.ExistTuple(hash_value) || bypass_gsc) { + global_ct = m_global_systupcache->SearchTupleFromFileWithArgModes(hash_value, arguments, argModes, true); + } else { + global_ct = m_global_systupcache->SearchTupleWithArgModes(hash_value, arguments, argModes); + } + + /* + * In this specific function for procallargs, we no longer build + * negative entry any more, because when we create a overload pg-style + * function with the same number intype parameters and different outtype + * parameters (a in int) vs (a in int, b out int), syscache will find no + * suitable cache tuple and make a new negative cache entry, but error will + * raise in such case, and no one will free the negative cache entry! + * In fact, the case metioned above should find a suitable tuple to return + * the caller, but for the reason we adapt a suit of Specific Function + * to support ProcedureCreate, syscache couldn't find the tuple. Someone + * may find new methods to solve the problem and refactor this! + */ + if (global_ct == NULL) { + return NULL; + } + + Assert(global_ct == NULL || global_ct->refcount > 0); + ResourceOwnerRememberGlobalCatCTup(LOCAL_SYSDB_RESOWNER, global_ct); + ct = CreateLocalCatCTup(global_ct, arguments, hash_value, hash_index); + ResourceOwnerForgetGlobalCatCTup(LOCAL_SYSDB_RESOWNER, global_ct); + return ct; +} + +/* + * Specific SearchLocalCatCTuple Function to support ProcedureCreate! + */ +LocalCatCTup *LocalSysTupCache::SearchLocalCatCTupleForProcAllArgs( + Datum v1, Datum v2, Datum v3, Datum v4, Datum proArgModes) +{ + InitPhase2(); + + SearchCatCacheCheck(); + FreeDeadCts(); + FreeDeadCls(); + cc_searches++; + + Datum arguments[CATCACHE_MAXKEYS]; + /* + * Logic here is the same in Sys/CatCache Search + */ + oidvector* allArgTypes = (oidvector*)DatumGetPointer(v2); + + Assert(allArgTypes != NULL); + + oidvector* argModes = ConvertArgModesToMd5Vector(proArgModes); + oidvector* v2WithArgModes = MergeOidVector(allArgTypes, argModes); + Datum newKey2 = PointerGetDatum(v2WithArgModes); + + /* Initialize local parameter array */ + arguments[0] = v1; + arguments[1] = newKey2; + arguments[2] = v3; + arguments[3] = v4; + /* + * find the hash bucket in which to look for the tuple + */ + uint32 hash_value = CatalogCacheComputeHashValue(m_relinfo.cc_hashfunc, m_relinfo.cc_nkeys, arguments); + Index hash_index = HASH_INDEX(hash_value, (uint32)cc_nbuckets); + + /* reset parameter array */ + pfree_ext(v2WithArgModes); + arguments[1] = v2; + + /* + * scan the hash bucket until we find a match or exhaust our tuples + * remove dead tuple by the way + */ + bool found = false; + LocalCatCTup *ct = NULL; + for (Dlelem *elt = DLGetHead(GetBucket(hash_index)); elt;) { + ct = (LocalCatCTup *)DLE_VAL(elt); + elt = DLGetSucc(elt); + if (unlikely(ct->hash_value != hash_value)) { + continue; /* quickly skip entry if wrong hash val */ + } + if (unlikely(!CatalogCacheCompareTuple(m_relinfo.cc_fastequal, m_relinfo.cc_nkeys, ct->keys, arguments))) { + continue; + } + + /* + * The comparison of hashvalue and keys is not enough. + */ + if (!IsProArgModesEqualByTuple(&ct->global_ct->tuple, m_relinfo.cc_tupdesc, argModes)) { + continue; + } + /* + * We found a match in the cache. Move it to the front of the list + * for its hashbucket, in order to speed subsequent searches. (The + * most frequently accessed elements in any hashbucket will tend to be + * near the front of the hashbucket's list.) + */ + DLMoveToFront(&ct->cache_elem); + found = true; + break; + } + + /* if not found, search from global cache */ + if (unlikely(!found)) { + ct = SearchTupleFromGlobalForProcAllArgs(arguments, hash_value, hash_index, argModes); + if (ct == NULL) { + pfree_ext(argModes); + return NULL; + } + } + /* + * If it's a positive entry, bump its refcount and return it. If it's + * negative, we can report failure to the caller. + */ + if (likely(ct->global_ct != NULL)) { + CACHE3_elog(DEBUG2, "SearchLocalCatCache(%s): found in bucket %d", m_relinfo.cc_relname, hash_index); + ct->refcount++; + cc_hits++; + ResourceOwnerRememberLocalCatCTup(LOCAL_SYSDB_RESOWNER, ct); + } + + RemoveTailTupleElements(hash_index); + + pfree_ext(argModes); + return ct; +} +#endif diff --git a/src/common/backend/utils/cache/knl_localtabdefcache.cpp b/src/common/backend/utils/cache/knl_localtabdefcache.cpp new file mode 100644 index 000000000..46d3f77e0 --- /dev/null +++ b/src/common/backend/utils/cache/knl_localtabdefcache.cpp @@ -0,0 +1,1139 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + */ + + +#include "catalog/indexing.h" +#include "catalog/pg_amproc.h" +#include "catalog/pg_opclass.h" +#include "catalog/pg_rewrite.h" +#include "catalog/pg_trigger.h" +#include "catalog/storage_gtt.h" +#include "catalog/heap.h" +#include "commands/matview.h" +#include "commands/sec_rls_cmds.h" +#include "postmaster/autovacuum.h" +#include "pgxc/bucketmap.h" +#include "utils/knl_catcache.h" +#include "knl/knl_instance.h" +#include "utils/knl_localtabdefcache.h" +#include "utils/knl_relcache.h" +#include "utils/memutils.h" +#include "utils/relmapper.h" +#include "utils/sec_rls_utils.h" + + +bool has_locator_info(GlobalBaseEntry *entry) +{ + return ((GlobalRelationEntry *)entry)->rel->rd_locator_info != NULL; +} + +static bool IsRelationStoreInglobal(Relation rel); + +LocalTabDefCache::LocalTabDefCache() +{ + ResetInitFlag(); +} + +static void SetGttInfo(Relation rel) +{ + if (rel->rd_rel->relpersistence == RELPERSISTENCE_GLOBAL_TEMP && rel->rd_backend != BackendIdForTempRelations) { + RelationCloseSmgr(rel); + rel->rd_backend = BackendIdForTempRelations; + BlockNumber relpages = 0; + double reltuples = 0; + BlockNumber relallvisible = 0; + get_gtt_relstats(RelationGetRelid(rel), &relpages, &reltuples, &relallvisible, NULL); + rel->rd_rel->relpages = (float8)relpages; + rel->rd_rel->reltuples = (float8)reltuples; + rel->rd_rel->relallvisible = (int4)relallvisible; + } +} + +Relation LocalTabDefCache::SearchRelationFromLocal(Oid rel_oid) +{ + uint32 hash_value = oid_hash((void *)&(rel_oid), sizeof(Oid)); + Index hash_index = HASH_INDEX(hash_value, (uint32)m_nbuckets); + LocalRelationEntry *entry = (LocalRelationEntry *)LocalBaseDefCache::SearchEntryFromLocal(rel_oid, hash_index); + + if (unlikely(entry == NULL)) { + return NULL; + } + Assert(entry->rel->rd_node.spcNode != InvalidOid); + Assert(entry->rel->rd_node.relNode != InvalidOid); + Assert(entry->rel->rd_islocaltemp == false); + SetGttInfo(entry->rel); + return entry->rel; +} + +template +Relation LocalTabDefCache::SearchRelationFromGlobalCopy(Oid rel_oid) +{ + if (unlikely(!m_is_inited)) { + return NULL; + } + if (invalid_entries.ExistDefValue(rel_oid)) { + return NULL; + } + if (HistoricSnapshotActive()) { + return NULL; + } + if (!g_instance.global_sysdbcache.hot_standby) { + return NULL; + } + if (unlikely(!g_instance.global_sysdbcache.recovery_finished)) { + return NULL; + } + uint32 hash_value = oid_hash((void *)&(rel_oid), sizeof(Oid)); + Index hash_index = HASH_INDEX(hash_value, (uint32)m_nbuckets); + ResourceOwner owner = LOCAL_SYSDB_RESOWNER; + ResourceOwnerEnlargeGlobalBaseEntry(owner); + GlobalRelationEntry *global; + if (g_instance.global_sysdbcache.HashSearchSharedRelation(rel_oid)) { + global = m_global_shared_tabdefcache->SearchReadOnly(rel_oid, hash_value); + } else { + Assert(m_global_tabdefcache != NULL); + Assert(m_is_inited_phase2); + if (!m_is_inited_phase2 || m_global_tabdefcache == NULL) { + ereport(FATAL, + (errcode(ERRCODE_INVALID_STATUS), + errmsg("rel_oid %u is shared but search return false", rel_oid))); + } + global = m_global_tabdefcache->SearchReadOnly(rel_oid, hash_value); + } + + if (global == NULL) { + return NULL; + } + ResourceOwnerRememberGlobalBaseEntry(owner, global); + + MemoryContext old = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); + Relation copy = (Relation)palloc(sizeof(RelationData)); + CopyLocalRelation(copy, global->rel); + MemoryContextSwitchTo(old); + + ResourceOwnerForgetGlobalBaseEntry(owner, global); + global->Release(); + if (insert_into_local) { + Assert(RemoveRelationByOid(rel_oid, hash_index) == NULL); + CreateLocalRelEntry(copy, hash_index); + } + + Assert(copy->rd_node.spcNode != InvalidOid); + Assert(copy->rd_node.relNode != InvalidOid); + return copy; +} +template Relation LocalTabDefCache::SearchRelationFromGlobalCopy(Oid rel_oid); +template Relation LocalTabDefCache::SearchRelationFromGlobalCopy(Oid rel_oid); + +Relation LocalTabDefCache::SearchRelation(Oid rel_oid) +{ + Relation rel = SearchRelationFromLocal(rel_oid); + if (rel == NULL) { + rel = SearchRelationFromGlobalCopy(rel_oid); + } + return rel; +} + +void LocalTabDefCache::CreateLocalRelEntry(Relation rel, Index hash_index) +{ + if (t_thrd.lsc_cxt.lsc->LocalSysDBCacheNeedSwapOut()) { + LocalBaseDefCache::RemoveTailDefElements(); + } + Assert(rel->rd_fdwroutine == NULL); + Assert(rel->rd_isnailed ? rel->rd_refcnt == 1 : rel->rd_refcnt == 0); + Assert(rel->rd_att->tdrefcount > 0); + LocalRelationEntry *entry = + (LocalRelationEntry *)LocalBaseDefCache::CreateEntry(hash_index, sizeof(LocalRelationEntry)); + entry->rel = rel; + entry->oid = rel->rd_id; + entry->obj_is_nailed = rel->rd_isnailed; + rel->entry = entry; + RememberRelSonMemCxtSpace(rel); +} + +static bool IsRelOidStoreInGlobal(Oid rel_oid) +{ + /* pgxc node need reload, we invalid all relcache with locator info */ + if (unlikely(IsGotPoolReload() && IS_PGXC_COORDINATOR && !IsSystemObjOid(rel_oid))) { + return false; + } + if (unlikely(IsBootstrapProcessingMode())) { + return false; + } + if (unlikely(t_thrd.lsc_cxt.lsc->GetThreadDefExclusive())) { + return false; + } + + if (unlikely(t_thrd.lsc_cxt.lsc->tabdefcache.invalid_entries.ExistDefValue(rel_oid))) { + return false; + } + + if (unlikely(u_sess->attr.attr_common.IsInplaceUpgrade)) { + return false; + } + if (HistoricSnapshotActive()) { + return false; + } + if (!g_instance.global_sysdbcache.hot_standby) { + return false; + } + if (unlikely(!g_instance.global_sysdbcache.recovery_finished)) { + return false; + } + if (g_instance.global_sysdbcache.StopInsertGSC()) { + return false; + } + return true; +} + +static bool IsRelationStoreInglobal(Relation rel) +{ + Assert(rel->rd_createSubid == InvalidSubTransactionId); + Assert(rel->rd_newRelfilenodeSubid == InvalidSubTransactionId); + Assert(rel->rd_rel->relowner != InvalidOid); + Assert(rel->rd_isvalid); + /* 2 is tmp index */ + Assert(rel->rd_indexvalid != 2); + + return true; +} + +void LocalTabDefCache::InsertRelationIntoGlobal(Relation rel, uint32 hash_value) +{ + /* not insert tmp rel or creating rel of session into global cache */ + if (!IsRelationStoreInglobal(rel)) { + return; + } + /* when insert, we must make sure m_global_shared_tabdefcache is inited at least */ + Assert(m_global_tabdefcache != NULL || (rel->rd_rel->relisshared && m_global_shared_tabdefcache != NULL)); + /* upgrade mode never do insert */ + if (g_instance.global_sysdbcache.HashSearchSharedRelation(rel->rd_id)) { + Assert(rel->rd_rel->relisshared); + m_global_shared_tabdefcache->Insert(rel, hash_value); + } else { + Assert(!rel->rd_rel->relisshared); + m_global_tabdefcache->Insert(rel, hash_value); + } +} + +void LocalTabDefCache::InsertRelationIntoLocal(Relation rel) +{ + uint32 hash_value = oid_hash((void *)&(rel->rd_id), sizeof(Oid)); + Index hash_index = HASH_INDEX(hash_value, (uint32)m_nbuckets); + CreateLocalRelEntry(rel, hash_index); +} + +void LocalTabDefCache::RemoveRelation(Relation rel) +{ + ForgetRelSonMemCxtSpace(rel); + m_bucket_list.RemoveElemFromBucket(&rel->entry->cache_elem); + pfree_ext(rel->entry); +} + +Relation LocalTabDefCache::RemoveRelationByOid(Oid rel_oid, Index hash_index) +{ + LocalRelationEntry *entry = (LocalRelationEntry *)LocalBaseDefCache::SearchEntryFromLocal(rel_oid, hash_index); + if (entry == NULL) { + return NULL; + } + Relation old_rel = entry->rel; + RemoveRelation(old_rel); + return old_rel; +} + +static void SpecialWorkOfRelationLocInfo(Relation rel) +{ + if (rel->rd_locator_info == NULL) { + return; + } + Assert(IS_PGXC_COORDINATOR && rel->rd_id >= FirstNormalObjectId); + /* global store nodeoid, we need convert it to nodeid */ + ListCell *lc; + foreach (lc, rel->rd_locator_info->nodeList) { + Oid datanode_oid = lfirst_oid(lc); + int seqNum = PGXCNodeGetNodeId(datanode_oid, PGXC_NODE_DATANODE); + if (likely(seqNum >= 0)) { + lfirst_oid(lc) = seqNum; + } else { + /* convert failed, so free locator info and refrush global, and call RelationBuildLocator + * pgxc_pool_reload may cause this, we loss some datanodes after pgxc_pool_reload. + * actually we only need refrush rd_locator_info->nodeList. but we assumpt that + * pgxc_pool_reload is a small probability event + */ + t_thrd.lsc_cxt.lsc->tabdefcache.InvalidateGlobalRelationNodeList(); + FreeRelationLocInfo(rel->rd_locator_info); + RelationBuildLocator(rel); + return; + } + } + + /* code below can be packaged as a func */ + RelationLocInfo *rd_locator_info = rel->rd_locator_info; + Assert(rd_locator_info->buckets_ptr == NULL); + rd_locator_info->buckets_ptr = NULL; + rd_locator_info->buckets_cnt = 0; + if (!IsAutoVacuumWorkerProcess()) { + InitBuckets(rd_locator_info, rel); + } + + /* + * If the locator type is round robin, we set a node to + * use next time. In addition, if it is replicated, + * we choose a node to use for balancing reads. + */ + if (rd_locator_info->locatorType == LOCATOR_TYPE_RROBIN || + rd_locator_info->locatorType == LOCATOR_TYPE_REPLICATED) { + int offset; + /* + * pick a random one to start with, + * since each process will do this independently + */ + offset = compute_modulo(abs(rand()), list_length(rd_locator_info->nodeList)); + + srand(time(NULL)); + rd_locator_info->roundRobinNode = rd_locator_info->nodeList->head; /* initialize */ + for (int j = 0; j < offset && rd_locator_info->roundRobinNode->next != NULL; j++) + rd_locator_info->roundRobinNode = rd_locator_info->roundRobinNode->next; + } +} + +static void SpecialWorkForLocalRel(Relation rel) +{ + if (RelationIsIndex(rel)) { + rel->rd_aminfo = (RelationAmInfo *)MemoryContextAllocZero(rel->rd_indexcxt, sizeof(RelationAmInfo)); + } + SetGttInfo(rel); + + if (unlikely(rel->rd_rel->relkind == RELKIND_MATVIEW) && !rel->rd_isscannable && !heap_is_matview_init_state(rel)) { + /* matview may open smgr, whatever, we dont care */ + rel->rd_isscannable = true; + } + SpecialWorkOfRelationLocInfo(rel); + Assert(rel->rd_mlogoid == InvalidOid || + rel->rd_mlogoid == find_matview_mlog_table(rel->rd_id) || + find_matview_mlog_table(rel->rd_id) == InvalidOid); + RelationInitPhysicalAddr(rel); + Assert(rel->rd_node.spcNode != InvalidOid); + Assert(rel->rd_node.relNode != InvalidOid); +} + +void LocalTabDefCache::CopyLocalRelation(Relation dest, Relation src) +{ + MemoryContext rules_cxt = NULL; + if (src->rd_rules != NULL) { + rules_cxt = AllocSetContextCreate(LocalMyDBCacheMemCxt(), RelationGetRelationName(src), + ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, ALLOCSET_SMALL_MAXSIZE); + } + MemoryContext rls_cxt = NULL; + if (src->rd_rlsdesc != NULL) { + rls_cxt = AllocSetContextCreate(LocalMyDBCacheMemCxt(), RelationGetRelationName(src), + ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, ALLOCSET_SMALL_MAXSIZE); + } + MemoryContext index_cxt = NULL; + if (RelationIsIndex(src)) { + index_cxt = AllocSetContextCreate(LocalMyDBCacheMemCxt(), RelationGetRelationName(src), + ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, ALLOCSET_SMALL_MAXSIZE); + } + + CopyRelationData(dest, src, rules_cxt, rls_cxt, index_cxt); + SpecialWorkForLocalRel(dest); +} + +void LocalTabDefCache::Init() +{ + if (m_is_inited) { + SetLocalRelCacheCriticalRelcachesBuilt(false); + SetLocalRelCacheCriticalSharedRelcachesBuilt(false); + return; + } + /* we dont know what dbid is now */ + + needNewLocalCacheFile = false; + criticalRelcachesBuilt = false; + criticalSharedRelcachesBuilt = false; + + relcacheInvalsReceived = 0; + initFileRelationIds = NIL; + need_eoxact_work = false; + + g_bucketmap_cache = NIL; + max_bucket_map_size = BUCKET_MAP_SIZE; + + EOXactTupleDescArray = NULL; + NextEOXactTupleDescNum = 0; + EOXactTupleDescArrayLen = 0; + + /* + * relation mapper needs to be initialized too + */ + RelationMapInitialize(); + m_is_inited = true; +} + +void LocalTabDefCache::FormrDesc(const char *relationName, Oid relationReltype, bool is_shared, bool hasoids, int natts, + const FormData_pg_attribute *attrs) +{ + if (SearchRelationFromLocal(attrs[0].attrelid) != NULL) { + return; + } + if (SearchRelationFromGlobalCopy(attrs[0].attrelid) != NULL) { + return; + } + formrdesc(relationName, relationReltype, is_shared, hasoids, natts, attrs); + /* fake-up rel, never insert into gsc */ +} + +void LocalTabDefCache::LoadCriticalIndex(Oid indexoid, Oid heapoid) +{ + if (SearchRelationFromLocal(indexoid) != NULL) { + return; + } + if (SearchRelationFromGlobalCopy(indexoid) != NULL) { + return; + } + + Relation ird = load_critical_index(indexoid, heapoid); + if (IsRelOidStoreInGlobal(indexoid)) { + /* system table never need a readwrite lock if not on upgrade mode */ + uint32 hash_value = oid_hash((void *)&indexoid, sizeof(Oid)); + InsertRelationIntoGlobal(ird, hash_value); + } +} + +void LocalTabDefCache::InitPhase2() +{ + if (m_is_inited_phase2) { + return; + } + + /* load shared relcache only */ + m_global_shared_tabdefcache = t_thrd.lsc_cxt.lsc->GetSharedTabDefCache(); + + MemoryContext oldcxt; + + /* + * relation mapper needs initialized too + */ + RelationMapInitializePhase2(); + + /* + * In bootstrap mode, the shared catalog isn't there yet, so do nothing. + */ + if (IsBootstrapProcessingMode()) { + m_is_inited_phase2 = true; + return; + } + + /* + * switch to cache memory context + */ + oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); + + /* + * Try to load the shared relcache cache file. If unsuccessful, bootstrap + * the cache with pre-made descriptors for the critical shared catalogs. + */ + + FormrDesc("pg_database", DatabaseRelation_Rowtype_Id, true, true, Natts_pg_database, Desc_pg_database); + FormrDesc("pg_authid", AuthIdRelation_Rowtype_Id, true, true, Natts_pg_authid, Desc_pg_authid); + FormrDesc("pg_auth_members", AuthMemRelation_Rowtype_Id, true, false, Natts_pg_auth_members, Desc_pg_auth_members); + FormrDesc("pg_user_status", UserStatusRelation_Rowtype_Id, true, true, Natts_pg_user_status, Desc_pg_user_status); + +#define NUM_CRITICAL_SHARED_RELS 4 /* fix if you change list above */ + (void)MemoryContextSwitchTo(oldcxt); + m_is_inited_phase2 = true; +} + +static bool FlushInitRelation(Relation rel) +{ + bool restart = false; + /* + * If it's a faked-up entry, read the real pg_class tuple. + */ + if (rel->rd_rel->relowner == InvalidOid) { + RelationCacheInvalidOid(rel); + + /* relowner had better be OK now, else we'll loop forever */ + if (rel->rd_rel->relowner == InvalidOid) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid relowner in pg_class entry for \"%s\"", RelationGetRelationName(rel)))); + restart = true; + } + + /* + * Fix data that isn't saved in relcache cache file. + * + * relhasrules or relhastriggers could possibly be wrong or out of + * date. If we don't actually find any rules or triggers, clear the + * local copy of the flag so that we don't get into an infinite loop + * here. We don't make any attempt to fix the pg_class entry, though. + */ + if (rel->rd_rel->relhasrules && rel->rd_rules == NULL) { + RelationBuildRuleLock(rel); + if (rel->rd_rules == NULL) + rel->rd_rel->relhasrules = false; + restart = true; + } + if (rel->rd_rel->relhastriggers && rel->trigdesc == NULL) { + RelationBuildTriggers(rel); + if (rel->trigdesc == NULL) + rel->rd_rel->relhastriggers = false; + restart = true; + } + + /* get row level security policies for this rel */ + if (RelationEnableRowSecurity(rel) && rel->rd_rlsdesc == NULL) { + RelationBuildRlsPolicies(rel); + Assert(rel->rd_rlsdesc != NULL); + restart = true; + } + return restart; +} + +void LocalTabDefCache::InitPhase3(void) +{ + if (m_is_inited_phase3) { + Assert(CheckMyDatabaseMatch()); + SetLocalRelCacheCriticalRelcachesBuilt(true); + SetLocalRelCacheCriticalSharedRelcachesBuilt(true); + return; + } + m_global_tabdefcache = t_thrd.lsc_cxt.lsc->GetGlobalTabDefCache(); + m_db_id = t_thrd.lsc_cxt.lsc->my_database_id; + m_pgclassdesc = NULL; + m_pgindexdesc = NULL; + MemoryContext oldcxt; + /* + * relation mapper needs initialized too + */ + RelationMapInitializePhase3(); + + /* + * switch to cache memory context + */ + oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); + /* + * Try to load the local relcache cache file. If unsuccessful, bootstrap + * the cache with pre-made descriptors for the critical "nailed-in" system + * catalogs. + * + * Vacuum-full pg_class will move entries of indexes on pg_class to the end + * of pg_class. When bootstraping critical catcaches and relcached from scratch, + * we have to do sequential scan for these entries. For databases with millions + * of pg_class entries, this could take quite a long time. To make matters worse, + * when multiple backends parallelly do this, they may block one another severly + * due to lock on the same share buffer partition. To avoid such case, we only allow + * one backend to bootstrap critical catcaches at one time. When it is done, the other + * parallel backends would first try to load from init file once again. + * Since share catalogs usually have much less entries, we only do so for local catalogs + * at present. + */ + FormrDesc("pg_class", RelationRelation_Rowtype_Id, false, true, Natts_pg_class, Desc_pg_class); + FormrDesc("pg_attribute", AttributeRelation_Rowtype_Id, false, false, Natts_pg_attribute, Desc_pg_attribute); + FormrDesc("pg_proc", ProcedureRelation_Rowtype_Id, false, true, Natts_pg_proc, Desc_pg_proc); + FormrDesc("pg_type", TypeRelation_Rowtype_Id, false, true, Natts_pg_type, Desc_pg_type); + (void)MemoryContextSwitchTo(oldcxt); +#define NUM_CRITICAL_LOCAL_RELS 4 /* fix if you change list above */ + if (IsBootstrapProcessingMode()) { + /* In bootstrap mode, the faked-up formrdesc info is all we'll have */ + m_is_inited_phase3 = true; + return; + } + + /* + * If we didn't get the critical system indexes loaded into relcache, do + * so now. These are critical because the catcache and/or opclass cache + * depend on them for fetches done during relcache load. Thus, we have an + * infinite-recursion problem. We can break the recursion by doing + * heapscans instead of indexscans at certain key spots. To avoid hobbling + * performance, we only want to do that until we have the critical indexes + * loaded into relcache. Thus, the flag u_sess->relcache_cxt.criticalRelcachesBuilt is used to + * decide whether to do heapscan or indexscan at the key spots, and we set + * it true after we've loaded the critical indexes. + * + * The critical indexes are marked as "nailed in cache", partly to make it + * easy for load_relcache_init_file to count them, but mainly because we + * cannot flush and rebuild them once we've set u_sess->relcache_cxt.criticalRelcachesBuilt to + * true. (NOTE: perhaps it would be possible to reload them by + * temporarily setting u_sess->relcache_cxt.criticalRelcachesBuilt to false again. For now, + * though, we just nail 'em in.) + * + * RewriteRelRulenameIndexId and TriggerRelidNameIndexId are not critical + * in the same way as the others, because the critical catalogs don't + * (currently) have any rules or triggers, and so these indexes can be + * rebuilt without inducing recursion. However they are used during + * relcache load when a rel does have rules or triggers, so we choose to + * nail them for performance reasons. + */ + /* we dont load from file */ + Assert(!LocalRelCacheCriticalRelcachesBuilt()); + LoadCriticalIndex(ClassOidIndexId, RelationRelationId); + LoadCriticalIndex(AttributeRelidNumIndexId, AttributeRelationId); + LoadCriticalIndex(IndexRelidIndexId, IndexRelationId); + LoadCriticalIndex(OpclassOidIndexId, OperatorClassRelationId); + LoadCriticalIndex(AccessMethodProcedureIndexId, AccessMethodProcedureRelationId); + LoadCriticalIndex(RewriteRelRulenameIndexId, RewriteRelationId); + LoadCriticalIndex(TriggerRelidNameIndexId, TriggerRelationId); +#define NUM_CRITICAL_LOCAL_INDEXES 7 /* fix if you change list above */ + SetLocalRelCacheCriticalRelcachesBuilt(true); + + /* + * Process critical shared indexes too. + * + * DatabaseNameIndexId isn't critical for relcache loading, but rather for + * initial lookup of u_sess->proc_cxt.MyDatabaseId, without which we'll never find any + * non-shared catalogs at all. Autovacuum calls InitPostgres with a + * database OID, so it instead depends on DatabaseOidIndexId. We also + * need to nail up some indexes on pg_authid and pg_auth_members for use + * during client authentication. + */ + Assert(!LocalRelCacheCriticalSharedRelcachesBuilt()); + LoadCriticalIndex(DatabaseNameIndexId, DatabaseRelationId); + LoadCriticalIndex(DatabaseOidIndexId, DatabaseRelationId); + LoadCriticalIndex(AuthIdRolnameIndexId, AuthIdRelationId); + LoadCriticalIndex(AuthIdOidIndexId, AuthIdRelationId); + LoadCriticalIndex(AuthMemMemRoleIndexId, AuthMemRelationId); + LoadCriticalIndex(UserStatusRoleidIndexId, UserStatusRelationId); +#define NUM_CRITICAL_SHARED_INDEXES 6 /* fix if you change list above */ + SetLocalRelCacheCriticalSharedRelcachesBuilt(true); + + /* + * Now, scan all the relcache entries and update anything that might be + * wrong in the results from formrdesc or the relcache cache file. If we + * faked up relcache entries using formrdesc, then read the real pg_class + * rows and replace the fake entries with them. Also, if any of the + * relcache entries have rules or triggers, load that info the hard way + * since it isn't recorded in the cache file. + * + * Whenever we access the catalogs to read data, there is a possibility of + * a shared-inval cache flush causing relcache entries to be removed. + * Since hash_seq_search only guarantees to still work after the *current* + * entry is removed, it's unsafe to continue the hashtable scan afterward. + * We handle this by restarting the scan from scratch after each access. + * This is theoretically O(N^2), but the number of entries that actually + * need to be fixed is small enough that it doesn't matter. + */ + Dlelem *bucket_elt; + forloopactivebucketlist(bucket_elt, m_bucket_list.GetActiveBucketList()) { + Dlelem *elt; + forloopbucket(elt, bucket_elt) { + LocalRelationEntry *entry = (LocalRelationEntry *)DLE_VAL(elt); + Assert(entry->rel->entry == entry); + elt = DLGetSucc(elt); + Relation rel = entry->rel; + /* + * Make sure *this* entry doesn't get flushed while we work with it. + */ + RelationIncrementReferenceCount(rel); + bool restart = FlushInitRelation(rel); + /* Release hold on the rel */ + RelationDecrementReferenceCount(rel); + + /* Now, restart the hashtable scan if needed */ + if (restart) { + if (IsRelOidStoreInGlobal(rel->rd_id)) { + /* system table never need a readwrite lock if not on upgrade mode */ + uint32 hash_value = oid_hash((void *)&(rel->rd_id), sizeof(Oid)); + InsertRelationIntoGlobal(rel, hash_value); + } + bucket_elt = DLGetHead(m_bucket_list.GetActiveBucketList()); + elt = NULL; + } + } + } + m_is_inited_phase3 = true; +#ifdef MEMORY_CONTEXT_CHECKING + MemoryContextCheck(LocalMyDBCacheMemCxt(), false); +#endif +} + +void LocalTabDefCache::InvalidateGlobalRelation(Oid db_id, Oid rel_oid, bool is_commit) +{ + if (unlikely(db_id == InvalidOid && rel_oid == InvalidOid)) { + /* This is used by alter publication as changes in publications may affect + * large number of tables. see function CacheInvalidateRelcacheAll */ + g_instance.global_sysdbcache.InvalidAllRelations(); + return; + } + if (!is_commit) { + invalid_entries.InsertInvalidDefValue(rel_oid); + return; + } + if (db_id == InvalidOid) { + t_thrd.lsc_cxt.lsc->GetSharedTabDefCache()->Invalidate(db_id, rel_oid); + } else if (m_global_tabdefcache == NULL) { + Assert(!m_is_inited_phase3); + Assert(CheckMyDatabaseMatch()); + GlobalSysDBCacheEntry *entry = g_instance.global_sysdbcache.FindTempGSCEntry(db_id); + if (entry == NULL) { + return; + } + entry->m_tabdefCache->Invalidate(db_id, rel_oid); + g_instance.global_sysdbcache.ReleaseTempGSCEntry(entry); + } else { + Assert(CheckMyDatabaseMatch()); + Assert(m_db_id == t_thrd.lsc_cxt.lsc->my_database_id); + Assert(m_db_id == db_id); + m_global_tabdefcache->Invalidate(db_id, rel_oid); + } +} + +void LocalTabDefCache::InvalidateRelationAll() +{ + /* + * Reload relation mapping data before starting to reconstruct cache. + */ + RelationMapInvalidateAll(); + + List *rebuildFirstList = NIL; + List *rebuildList = NIL; + Dlelem *bucket_elt; + forloopactivebucketlist(bucket_elt, m_bucket_list.GetActiveBucketList()) { + Dlelem *elt; + forloopbucket(elt, bucket_elt) { + LocalRelationEntry *entry = (LocalRelationEntry *)DLE_VAL(elt); + Assert(entry->rel->entry == entry); + elt = DLGetSucc(elt); + Relation rel = entry->rel; + /* Must close all smgr references to avoid leaving dangling ptrs */ + RelationCloseSmgr(rel); + + /* Ignore new rels, since they are never cross-backend targets */ + if (rel->rd_createSubid != InvalidSubTransactionId) { + continue; + } + AddLocalRelCacheInvalsReceived(1); + + if (RelationHasReferenceCountZero(rel)) { + /* Delete this entry immediately */ + Assert(!rel->rd_isnailed); + RelationClearRelation(rel, false); + } else { + /* + * If it's a mapped rel, immediately update its rd_node in + * case its relfilenode changed. We must do this during phase 1 + * in case the rel is consulted during rebuild of other + * relcache entries in phase 2. It's safe since consulting the + * map doesn't involve any access to relcache entries. + */ + if (RelationIsMapped(rel)) + RelationInitPhysicalAddr(rel); + + /* + * Add this entry to list of stuff to rebuild in second pass. + * pg_class goes to the front of rebuildFirstList while + * pg_class_oid_index goes to the back of rebuildFirstList, so + * they are done first and second respectively. Other nailed + * rels go to the front of rebuildList, so they'll be done + * next in no particular order; and everything else goes to the + * back of rebuildList. + */ + if (RelationGetRelid(rel) == RelationRelationId) + rebuildFirstList = lcons(rel, rebuildFirstList); + else if (RelationGetRelid(rel) == ClassOidIndexId) + rebuildFirstList = lappend(rebuildFirstList, rel); + else if (rel->rd_isnailed) + rebuildList = lcons(rel, rebuildList); + else + rebuildList = lappend(rebuildList, rel); + } + /* RelationClearRelation call RelationCacheDelete to remove rel not rebuilt + * RelationClearRelation needn't insert into global, global already updated */ + } + } + + /* + * Now zap any remaining smgr cache entries. This must happen before we + * start to rebuild entries, since that may involve catalog fetches which + * will re-open catalog files. + */ + smgrcloseall(); + ListCell *l = NULL; + /* Phase 2: rebuild the items found to need rebuild in phase 1 */ + foreach (l, rebuildFirstList) { + Relation rel = (Relation)lfirst(l); + /* RelationClearRelation call RelationBuildDesc to rebuild the rel, + * and RelationBuildDesc call SEARCH_RELATION_FROM_GLOBAL to search first */ + RelationClearRelation(rel, true); + } + list_free_ext(rebuildFirstList); + foreach (l, rebuildList) { + Relation rel = (Relation)lfirst(l); + RelationClearRelation(rel, true); + } + list_free_ext(rebuildList); +} + +void LocalTabDefCache::InvalidateRelationNodeList() +{ + Dlelem *bucket_elt; + forloopactivebucketlist(bucket_elt, m_bucket_list.GetActiveBucketList()) { + Dlelem *elt; + forloopbucket(elt, bucket_elt) { + LocalRelationEntry *entry = (LocalRelationEntry *)DLE_VAL(elt); + Assert(entry->rel->entry == entry); + elt = DLGetSucc(elt); + Relation rel = entry->rel; + if (rel->rd_locator_info != NULL) { + RelationClearRelation(rel, !RelationHasReferenceCountZero(rel)); + } + } + } +} + +void LocalTabDefCache::InvalidateRelationBucketsAll() +{ + Dlelem *bucket_elt; + forloopactivebucketlist(bucket_elt, m_bucket_list.GetActiveBucketList()) { + Dlelem *elt; + forloopbucket(elt, bucket_elt) { + LocalRelationEntry *entry = (LocalRelationEntry *)DLE_VAL(elt); + Assert(entry->rel->entry == entry); + elt = DLGetSucc(elt); + Relation rel = entry->rel; + if (rel->rd_locator_info != NULL) { + InvalidateBuckets(rel->rd_locator_info); + } + } + } +} + +void LocalTabDefCache::RememberToFreeTupleDescAtEOX(TupleDesc td) +{ + if (EOXactTupleDescArray == NULL) { + MemoryContext oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); + EOXactTupleDescArrayLen = 16; + EOXactTupleDescArray = (TupleDesc *)palloc(EOXactTupleDescArrayLen * sizeof(TupleDesc)); + NextEOXactTupleDescNum = 0; + MemoryContextSwitchTo(oldcxt); + } else if (NextEOXactTupleDescNum >= EOXactTupleDescArrayLen) { + int32 newlen = EOXactTupleDescArrayLen * 2; + Assert(EOXactTupleDescArrayLen > 0); + EOXactTupleDescArray = (TupleDesc *)repalloc(EOXactTupleDescArray, newlen * sizeof(TupleDesc)); + EOXactTupleDescArrayLen = newlen; + } + EOXactTupleDescArray[NextEOXactTupleDescNum++] = td; +} + +/* Free all tupleDescs remembered in RememberToFreeTupleDescAtEOX in a batch when a transaction ends */ +void LocalTabDefCache::AtEOXact_FreeTupleDesc() +{ + if (EOXactTupleDescArrayLen > 0) { + Assert(EOXactTupleDescArray != NULL); + for (int i = 0; i < NextEOXactTupleDescNum; i++) { + Assert(EOXactTupleDescArray[i]->tdrefcount == 0); + FreeTupleDesc(EOXactTupleDescArray[i]); + } + pfree_ext(EOXactTupleDescArray); + } + NextEOXactTupleDescNum = 0; + EOXactTupleDescArrayLen = 0; +} + +/* + * AtEOXact_RelationCache + * + * Clean up the relcache at main-transaction commit or abort. + * + * Note: this must be called *before* processing invalidation messages. + * In the case of abort, we don't want to try to rebuild any invalidated + * cache entries (since we can't safely do database accesses). Therefore + * we must reset refcnts before handling pending invalidations. + * + * + */ +void LocalTabDefCache::AtEOXact_RelationCache(bool isCommit) +{ + invalid_entries.ResetInitFlag(); +#ifdef MEMORY_CONTEXT_CHECKING + MemoryContextCheck(LocalMyDBCacheMemCxt(), false); +#endif + /* + * To speed up transaction exit, we want to avoid scanning the relcache + * unless there is actually something for this routine to do. Other than + * the debug-only Assert checks, most transactions don't create any work + * for us to do here, so we keep a static flag that gets set if there is + * anything to do. (Currently, this means either a relation is created in + * the current xact, or one is given a new relfilenode, or an index list + * is forced.) For simplicity, the flag remains set till end of top-level + * transaction, even though we could clear it at subtransaction end in + * some cases. + */ + if (!LocalRelCacheNeedEOXactWork() +#ifdef USE_ASSERT_CHECKING + && !assert_enabled +#endif + ) { + return; + } + + Dlelem *bucket_elt; + forloopactivebucketlist(bucket_elt, m_bucket_list.GetActiveBucketList()) { + Dlelem *elt; + forloopbucket(elt, bucket_elt) { + LocalRelationEntry *entry = (LocalRelationEntry *)DLE_VAL(elt); + elt = DLGetSucc(elt); + Relation rel = entry->rel; + + /* + * The relcache entry's ref count should be back to its normal + * not-in-a-transaction state: 0 unless it's nailed in cache. + * + * In bootstrap mode, this is NOT true, so don't check it --- the + * bootstrap code expects rels to stay open across start/commit + * transaction calls. (That seems bogus, but it's not worth fixing.) + */ + if (!IsBootstrapProcessingMode()) { + int expected_refcnt; + + expected_refcnt = rel->rd_isnailed ? 1 : 0; + if (rel->rd_refcnt != expected_refcnt && IsolatedResourceOwner != NULL) { + elog(WARNING, "relation \"%s\" rd_refcnt is %d but expected_refcnt %d. ", + RelationGetRelationName(rel), rel->rd_refcnt, expected_refcnt); + PrintResourceOwnerLeakWarning(); + } +#ifdef USE_ASSERT_CHECKING + Assert(rel->rd_refcnt == expected_refcnt); +#endif + } + + /* + * Is it a rel created in the current transaction? + * + * During commit, reset the flag to zero, since we are now out of the + * creating transaction. During abort, simply delete the relcache + * entry --- it isn't interesting any longer. (NOTE: if we have + * forgotten the new-ness of a new rel due to a forced cache + * flush, the entry will get deleted anyway by shared-cache-inval + * processing of the aborted pg_class insertion.) + */ + if (rel->rd_createSubid != InvalidSubTransactionId) { + if (isCommit) + rel->rd_createSubid = InvalidSubTransactionId; + else if (RelationHasReferenceCountZero(rel)) { + RelationClearRelation(rel, false); + continue; + } else { + /* + * Hmm, somewhere there's a (leaked?) reference to the rel. + * We daren't remove the entry for fear of dereferencing a + * dangling pointer later. Bleat, and mark it as not belonging to + * the current transaction. Hopefully it'll get cleaned up + * eventually. This must be just a WARNING to avoid + * error-during-error-recovery loops. + */ + rel->rd_createSubid = InvalidSubTransactionId; + ereport(WARNING, (errmsg("cannot remove relcache entry for \"%s\" because it has nonzero refcount", + RelationGetRelationName(rel)))); + } + } + + /* + * Likewise, reset the hint about the relfilenode being new. + */ + rel->rd_newRelfilenodeSubid = InvalidSubTransactionId; + + /* + * Flush any temporary index list. + */ + if (rel->rd_indexvalid == 2) { + list_free_ext(rel->rd_indexlist); + rel->rd_indexlist = NIL; + rel->rd_oidindex = InvalidOid; + rel->rd_indexvalid = 0; + } + if (rel->partMap != NULL && unlikely(rel->partMap->isDirty)) { + RelationClearRelation(rel, false); + } + } + } + /* Once done with the transaction, we can reset u_sess->relcache_cxt.need_eoxact_work */ + SetLocalRelCacheNeedEOXactWork(false); +} + +/* + * AtEOSubXact_RelationCache + * + * Clean up the relcache at sub-transaction commit or abort. + * + * Note: this must be called *before* processing invalidation messages. + */ +void LocalTabDefCache::AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid, SubTransactionId parentSubid) +{ + /* dont update global info here, even we commit this relation create operator */ + /* + * Skip the relcache scan if nothing to do --- see notes for + * AtEOXact_RelationCache. + */ + if (!LocalRelCacheNeedEOXactWork()) + return; + + Dlelem *bucket_elt; + forloopactivebucketlist(bucket_elt, m_bucket_list.GetActiveBucketList()) { + Dlelem *elt; + forloopbucket(elt, bucket_elt) { + LocalRelationEntry *entry = (LocalRelationEntry *)DLE_VAL(elt); + elt = DLGetSucc(elt); + Relation rel = entry->rel; + + /* + * Is it a rel created in the current subtransaction? + * + * During subcommit, mark it as belonging to the parent, instead. + * During subabort, simply delete the relcache entry. + */ + if (rel->rd_createSubid == mySubid) { + if (isCommit) + rel->rd_createSubid = parentSubid; + else if (RelationHasReferenceCountZero(rel)) { + RelationClearRelation(rel, false); + continue; + } else { + /* + * Hmm, somewhere there's a (leaked?) reference to the rel. + * We daren't remove the entry for fear of dereferencing a + * dangling pointer later. Bleat, and transfer it to the parent + * subtransaction so we can try again later. This must be just a + * WARNING to avoid error-during-error-recovery loops. + */ + rel->rd_createSubid = parentSubid; + ereport(WARNING, (errmsg("cannot remove relcache entry for \"%s\" because it has nonzero refcount", + RelationGetRelationName(rel)))); + } + } + + /* + * Likewise, update or drop any new-relfilenode-in-subtransaction + * hint. + */ + if (rel->rd_newRelfilenodeSubid == mySubid) { + if (isCommit) + rel->rd_newRelfilenodeSubid = parentSubid; + else + rel->rd_newRelfilenodeSubid = InvalidSubTransactionId; + } + + /* + * Flush any temporary index list. + */ + if (rel->rd_indexvalid == 2) { + list_free_ext(rel->rd_indexlist); + rel->rd_indexlist = NIL; + rel->rd_oidindex = InvalidOid; + rel->rd_indexvalid = 0; + } + } + } +} + +Relation LocalTabDefCache::RelationIdGetRelation(Oid rel_oid) +{ + Assert(CheckMyDatabaseMatch()); + Relation rd = SearchRelation(rel_oid); + if (RelationIsValid(rd)) { + RelationIncrementReferenceCount(rd); + /* revalidate cache entry if necessary */ + if (!rd->rd_isvalid) { + /* + * Indexes only have a limited number of possible schema changes, + * and we don't want to use the full-blown procedure because it's + * a headache for indexes that reload itself depends on. + */ + if (RelationIsIndex(rd)) { + RelationReloadIndexInfo(rd); + } else { + RelationClearRelation(rd, true); + } + } + + /* + * In some cases, after the relcache is built, the temp table's node group is dropped + * because of cluster resizeing, so we should do checking when get the rel directly from + * relcache. + */ + if (rd->rd_rel->relpersistence == RELPERSISTENCE_TEMP) + (void)checkGroup(rel_oid, RELATION_IS_OTHER_TEMP(rd)); + + return rd; + } + + /* + * no reldesc in the cache, so have RelationBuildDesc() build one and add + * it. + */ + Assert(m_is_inited_phase2); + bool is_oid_store_in_global = IsRelOidStoreInGlobal(rel_oid); + /* sys table dont need a rwlock */ + Assert(m_global_tabdefcache != NULL || g_instance.global_sysdbcache.HashSearchSharedRelation(rel_oid)); + bool has_concurrent_lock = is_oid_store_in_global && + !IsSystemObjOid(rel_oid); + uint32 hash_value = oid_hash((void *)&(rel_oid), sizeof(Oid)); + pthread_rwlock_t *oid_lock = NULL; + if (has_concurrent_lock) { + oid_lock = m_global_tabdefcache->GetHashValueLock(hash_value); + AcquireGSCTableReadLock(&has_concurrent_lock, oid_lock); + } + rd = RelationBuildDesc(rel_oid, true, true); + /* system table dont insert into global only when upgrading, who never be modified except on upgrade mode */ + if (RelationIsValid(rd) && is_oid_store_in_global && (IsSystemObjOid(rel_oid) || has_concurrent_lock)) { + InsertRelationIntoGlobal(rd, hash_value); + } + if (has_concurrent_lock) { + ReleaseGSCTableReadLock(&has_concurrent_lock, oid_lock); + } + + if (RelationIsValid(rd)) { + RelationIncrementReferenceCount(rd); + /* Insert TDE key to buffer cache for tde table */ + if (g_instance.attr.attr_security.enable_tde && IS_PGXC_DATANODE && RelationisEncryptEnable(rd)) { + RelationInsertTdeInfoToCache(rd); + } + } + + return rd; +} + +void LocalTabDefCache::ResetInitFlag() +{ + m_bucket_list.ResetContent(); + invalid_entries.ResetInitFlag(); + needNewLocalCacheFile = false; + criticalRelcachesBuilt = false; + criticalSharedRelcachesBuilt = false; + + relcacheInvalsReceived = 0; + initFileRelationIds = NIL; + need_eoxact_work = false; + + g_bucketmap_cache = NIL; + max_bucket_map_size = 0; + + EOXactTupleDescArray = NULL; + NextEOXactTupleDescNum = 0; + EOXactTupleDescArrayLen = 0; + + m_global_tabdefcache = NULL; + m_pgclassdesc = NULL; + m_pgindexdesc = NULL; + m_global_shared_tabdefcache = NULL; + + m_is_inited = false; + m_is_inited_phase2 = false; + m_is_inited_phase3 = false; + + m_db_id = InvalidOid; +} \ No newline at end of file diff --git a/src/common/backend/utils/cache/lsyscache.cpp b/src/common/backend/utils/cache/lsyscache.cpp index 37cb7686a..4485b621b 100644 --- a/src/common/backend/utils/cache/lsyscache.cpp +++ b/src/common/backend/utils/cache/lsyscache.cpp @@ -233,7 +233,7 @@ bool get_ordering_op_properties(Oid opno, Oid* opfamily, Oid* opcintype, int16* */ catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno)); for (i = 0; i < catlist->n_members; i++) { - HeapTuple tuple = &catlist->members[i]->tuple; + HeapTuple tuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_amop aform = (Form_pg_amop)GETSTRUCT(tuple); /* must be btree */ if (!OID_IS_BTREE(aform->amopmethod)) { @@ -404,7 +404,7 @@ Oid get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type) */ catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno)); for (i = 0; i < catlist->n_members; i++) { - HeapTuple tuple = &catlist->members[i]->tuple; + HeapTuple tuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_amop aform = (Form_pg_amop)GETSTRUCT(tuple); /* must be btree */ if (!OID_IS_BTREE(aform->amopmethod)) { @@ -456,7 +456,7 @@ List* get_mergejoin_opfamilies(Oid opno) catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno)); for (i = 0; i < catlist->n_members; i++) { - HeapTuple tuple = &catlist->members[i]->tuple; + HeapTuple tuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_amop aform = (Form_pg_amop)GETSTRUCT(tuple); /* must be btree equality */ if (OID_IS_BTREE(aform->amopmethod) && aform->amopstrategy == BTEqualStrategyNumber) { @@ -501,7 +501,7 @@ bool get_compatible_hash_operators(Oid opno, Oid* lhs_opno, Oid* rhs_opno) */ catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno)); for (i = 0; i < catlist->n_members; i++) { - HeapTuple tuple = &catlist->members[i]->tuple; + HeapTuple tuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_amop aform = (Form_pg_amop)GETSTRUCT(tuple); if (aform->amopmethod == HASH_AM_OID && aform->amopstrategy == HTEqualStrategyNumber) { /* No extra lookup needed if given operator is single-type */ @@ -586,7 +586,7 @@ bool get_op_hash_functions(Oid opno, RegProcedure* lhs_procno, RegProcedure* rhs catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno)); for (i = 0; i < catlist->n_members; i++) { - HeapTuple tuple = &catlist->members[i]->tuple; + HeapTuple tuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_amop aform = (Form_pg_amop)GETSTRUCT(tuple); if (aform->amopmethod == HASH_AM_OID && aform->amopstrategy == HTEqualStrategyNumber) { @@ -653,7 +653,7 @@ List* get_op_btree_interpretation(Oid opno) */ catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno)); for (i = 0; i < catlist->n_members; i++) { - HeapTuple op_tuple = &catlist->members[i]->tuple; + HeapTuple op_tuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_amop op_form = (Form_pg_amop)GETSTRUCT(op_tuple); StrategyNumber op_strategy; /* must be btree */ @@ -680,7 +680,7 @@ List* get_op_btree_interpretation(Oid opno) if (OidIsValid(op_negator)) { catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(op_negator)); for (i = 0; i < catlist->n_members; i++) { - HeapTuple op_tuple = &catlist->members[i]->tuple; + HeapTuple op_tuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_amop op_form = (Form_pg_amop)GETSTRUCT(op_tuple); StrategyNumber op_strategy; /* must be btree */ @@ -734,7 +734,7 @@ bool equality_ops_are_compatible(Oid opno1, Oid opno2) catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno1)); result = false; for (i = 0; i < catlist->n_members; i++) { - HeapTuple op_tuple = &catlist->members[i]->tuple; + HeapTuple op_tuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_amop op_form = (Form_pg_amop)GETSTRUCT(op_tuple); /* must be btree or hash */ if (OID_IS_BTREE(op_form->amopmethod) || op_form->amopmethod == HASH_AM_OID) { @@ -1518,7 +1518,7 @@ char* get_func_langname(Oid funcid) ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for function %u", funcid))); } Datum datum = heap_getattr(tp, Anum_pg_proc_prolang, RelationGetDescr(relation), &isNull); - langoid = DatumGetInt16(datum); + langoid = DatumGetObjectId(datum); heap_close(relation, NoLock); ReleaseSysCache(tp); @@ -1729,7 +1729,6 @@ Oid get_valid_relname_relid(const char* relnamespace, const char* relname) Oid oldnspid = InvalidOid; Oid relid = InvalidOid; Oid oldrelid = InvalidOid; - uint64 inval_count = 0; bool retry = false; Assert(relnamespace != NULL); Assert(relname != NULL); @@ -1741,8 +1740,13 @@ Oid get_valid_relname_relid(const char* relnamespace, const char* relname) * the answer doesn't change. * */ + uint64 sess_inval_count; + uint64 thrd_inval_count = 0; for (;;) { - inval_count = u_sess->inval_cxt.SharedInvalidMessageCounter; + sess_inval_count = u_sess->inval_cxt.SIMCounter; + if (EnableLocalSysCache()) { + thrd_inval_count = t_thrd.lsc_cxt.lsc->inval_cxt.SIMCounter; + } nspid = get_namespace_oid(relnamespace, false); relid = get_relname_relid(relname, nspid); /* @@ -1797,8 +1801,15 @@ Oid get_valid_relname_relid(const char* relnamespace, const char* relname) LockDatabaseObject(NamespaceRelationId, nspid, 0, AccessShareLock); } /* If no invalidation message were processed, we're done! */ - if (inval_count == u_sess->inval_cxt.SharedInvalidMessageCounter) { - break; + if (EnableLocalSysCache()) { + if (sess_inval_count == u_sess->inval_cxt.SIMCounter && + thrd_inval_count == t_thrd.lsc_cxt.lsc->inval_cxt.SIMCounter) { + break; + } + } else { + if (sess_inval_count == u_sess->inval_cxt.SIMCounter) { + break; + } } /* * Let's repeat the name lookup, to make @@ -2597,7 +2608,7 @@ Oid get_pgxc_nodeoid(const char* nodename) int i; memlist = SearchSysCacheList1(PGXCNODENAME, PointerGetDatum(nodename)); for (i = 0; i < memlist->n_members; i++) { - HeapTuple tup = &memlist->members[i]->tuple; + HeapTuple tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(memlist, i); char node_type = ((Form_pgxc_node)GETSTRUCT(tup))->node_type; if (node_type == PGXC_NODE_COORDINATOR || node_type == PGXC_NODE_DATANODE) { node_oid = HeapTupleGetOid(tup); @@ -2615,7 +2626,7 @@ Oid get_pgxc_datanodeoid(const char* nodename, bool missingOK) int i; memlist = SearchSysCacheList1(PGXCNODENAME, PointerGetDatum(nodename)); for (i = 0; i < memlist->n_members; i++) { - HeapTuple tup = &memlist->members[i]->tuple; + HeapTuple tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(memlist, i); char node_type = ((Form_pgxc_node)GETSTRUCT(tup))->node_type; if (node_type == PGXC_NODE_DATANODE) { dn_oid = HeapTupleGetOid(tup); @@ -2657,7 +2668,7 @@ bool check_pgxc_node_name_is_exist( Assert(host != NULL); memlist = SearchSysCacheList1(PGXCNODENAME, PointerGetDatum(nodename)); for (i = 0; i < memlist->n_members; i++) { - HeapTuple tup = &memlist->members[i]->tuple; + HeapTuple tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(memlist, i); const char* host_exist = NameStr(((Form_pgxc_node)GETSTRUCT(tup))->node_host); int port_exist = ((Form_pgxc_node)GETSTRUCT(tup))->node_port; int comm_sctp_port_exist = ((Form_pgxc_node)GETSTRUCT(tup))->sctp_port; @@ -2745,7 +2756,7 @@ void get_node_info(const char* nodename, bool* node_is_ccn, ItemPointer tuple_po heap_close(rel, AccessShareLock); return; } - tuple = &memlist->members[0]->tuple; + tuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(memlist, 0); /* get nodeis_central for the node */ Datum centralDatum = SysCacheGetAttr(PGXCNODEOID, tuple, Anum_pgxc_node_is_central, &is_null); if (!is_null) { @@ -2861,7 +2872,7 @@ bool is_pgxc_central_nodename(const char* nodename) is_central = false; goto result; } - tup = &memlist->members[0]->tuple; + tup = t_thrd.lsc_cxt.FetchTupleFromCatCList(memlist, 0); central_datum = ((Form_pgxc_node)GETSTRUCT(tup))->nodeis_central; is_central = DatumGetBool(central_datum); result: @@ -5041,7 +5052,7 @@ Oid get_func_oid(const char* funcname, Oid funcnamespace, Expr* expr) /* Search syscache by name only */ #ifndef ENABLE_MULTIPLE_NODES - if (u_sess->attr.attr_common.IsInplaceUpgrade) { + if (t_thrd.proc->workingVersionNum < 92470) { catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(funcname)); } else { catlist = SearchSysCacheList1(PROCALLARGS, CStringGetDatum(funcname)); @@ -5075,7 +5086,7 @@ Oid get_func_oid(const char* funcname, Oid funcnamespace, Expr* expr) catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(funcname)); #endif for (i = 0; i < catlist->n_members; i++) { - HeapTuple proctup = &catlist->members[i]->tuple; + HeapTuple proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_proc procform = (Form_pg_proc)GETSTRUCT(proctup); if (OidIsValid(funcnamespace)) { /* Consider only procs in specified namespace */ @@ -5138,8 +5149,9 @@ Oid get_func_oid(const char* funcname, Oid funcnamespace, Expr* expr) continue; } + Oid func_oid = HeapTupleGetOid(proctup); ReleaseSysCacheList(catlist); - return HeapTupleGetOid(proctup); + return func_oid; } ReleaseSysCacheList(catlist); return InvalidOid; @@ -5160,7 +5172,7 @@ Oid get_func_oid_ext(const char* funcname, Oid funcnamespace, Oid funcrettype, i catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(funcname)); #endif for (i = 0; i < catlist->n_members; i++) { - HeapTuple proctup = &catlist->members[i]->tuple; + HeapTuple proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_proc procform = (Form_pg_proc)GETSTRUCT(proctup); if (OidIsValid(funcnamespace)) { /* Consider only procs in specified namespace */ @@ -5186,8 +5198,9 @@ Oid get_func_oid_ext(const char* funcname, Oid funcnamespace, Oid funcrettype, i if (j < funcnargs) { continue; } + Oid func_oid = HeapTupleGetOid(proctup); ReleaseSysCacheList(catlist); - return HeapTupleGetOid(proctup); + return func_oid; } ReleaseSysCacheList(catlist); return InvalidOid; diff --git a/src/common/backend/utils/cache/partcache.cpp b/src/common/backend/utils/cache/partcache.cpp index 42f431fee..2979c7025 100644 --- a/src/common/backend/utils/cache/partcache.cpp +++ b/src/common/backend/utils/cache/partcache.cpp @@ -24,6 +24,7 @@ #include "postgres.h" #include "knl/knl_variable.h" #include +#include "access/csnlog.h" #include "access/reloptions.h" #include "access/sysattr.h" #include "access/transam.h" @@ -58,7 +59,6 @@ #include "rewrite/rewriteDefine.h" #include "rewrite/rewriteHandler.h" #include "storage/lmgr.h" -#include "storage/page_compression.h" #include "storage/smgr/smgr.h" #include "storage/smgr/segment.h" #include "catalog/storage.h" @@ -78,46 +78,9 @@ #include "catalog/pg_partition.h" #include "postmaster/autovacuum.h" #include "nodes/makefuncs.h" - -/* - * part 1:macro definitions, global virables, and typedefs - */ -typedef struct partidcacheent { - Oid partoid; - Partition partdesc; -} PartIdCacheEnt; - -#define PartitionCacheInsert(PARTITION) \ - do { \ - PartIdCacheEnt* idhentry; \ - bool found = true; \ - idhentry = (PartIdCacheEnt*)hash_search( \ - u_sess->cache_cxt.PartitionIdCache, (void*)&((PARTITION)->pd_id), HASH_ENTER, &found); \ - /* used to give notice if found -- now just keep quiet */ \ - idhentry->partdesc = PARTITION; \ - } while (0) - -#define PartitionIdCacheLookup(ID, PARTITION) \ - do { \ - PartIdCacheEnt* hentry; \ - hentry = (PartIdCacheEnt*)hash_search(u_sess->cache_cxt.PartitionIdCache, (void*)&(ID), HASH_FIND, NULL); \ - if (hentry != NULL) \ - (PARTITION) = hentry->partdesc; \ - else \ - (PARTITION) = NULL; \ - } while (0) - -#define PartitionCacheDelete(PARTITION) \ - do { \ - PartIdCacheEnt* idhentry; \ - idhentry = (PartIdCacheEnt*)hash_search( \ - u_sess->cache_cxt.PartitionIdCache, (void*)&((PARTITION)->pd_id), HASH_REMOVE, NULL); \ - if (idhentry == NULL) \ - ereport(WARNING, \ - (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("trying to delete a rd_id partdesc that does not exist"))); \ - } while (0) - -#define INITPARTCACHESIZE 100 +#include "utils/knl_relcache.h" +#include "utils/knl_partcache.h" +#include "replication/walreceiver.h" /* *part 2: static functions used only in this c source file @@ -126,12 +89,7 @@ typedef struct partidcacheent { */ static HeapTuple ScanPgPartition(Oid targetPartId, bool indexOK, Snapshot snapshot); static Partition AllocatePartitionDesc(Form_pg_partition relp); -static Partition PartitionBuildDesc(Oid targetPartId, StorageType storage_type, bool insertIt); -static void PartitionInitPhysicalAddr(Partition partition); -static void PartitionDestroyPartition(Partition partition); static void PartitionFlushPartition(Partition partition); -static void PartitionClearPartition(Partition partition, bool rebuild); -static void PartitionReloadIndexInfo(Partition part); static void PartitionParseRelOptions(Partition partition, HeapTuple tuple); @@ -172,7 +130,7 @@ static HeapTuple ScanPgPartition(Oid targetPartId, bool indexOK, Snapshot snapsh pg_partition_desc = heap_open(PartitionRelationId, AccessShareLock); pg_partition_scan = systable_beginscan(pg_partition_desc, PartitionOidIndexId, - indexOK && u_sess->relcache_cxt.criticalRelcachesBuilt, + indexOK && LocalRelCacheCriticalRelcachesBuilt(), snapshot, 1, key); @@ -200,8 +158,8 @@ static Partition AllocatePartitionDesc(Form_pg_partition partp) Form_pg_partition partitionForm; errno_t rc = 0; - /* Relcache entries must live in u_sess->cache_mem_cxt */ - oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + /* Relcache entries must live in LocalMyDBCacheMemCxt() */ + oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); /* * allocate and zero space for new relation descriptor @@ -235,7 +193,7 @@ static Partition AllocatePartitionDesc(Form_pg_partition partp) return partition; } -StorageType PartitionGetStorageType(Oid parentOid) +StorageType PartitionGetStorageType(Partition partition, Oid parentOid) { HeapTuple pg_class_tuple; StorageType storageType; @@ -261,7 +219,7 @@ StorageType PartitionGetStorageType(Oid parentOid) return storageType; } -static Partition PartitionBuildDesc(Oid targetPartId, StorageType storage_type, bool insertIt) +Partition PartitionBuildDesc(Oid targetPartId, StorageType storage_type, bool insertIt) { Partition partition; Oid partid; @@ -291,7 +249,6 @@ static Partition PartitionBuildDesc(Oid targetPartId, StorageType storage_type, * to partition->pd_part. */ partition = AllocatePartitionDesc(partp); - /* * initialize the partition's partition id (partition->pd_id) */ @@ -324,7 +281,7 @@ static Partition PartitionBuildDesc(Oid targetPartId, StorageType storage_type, partitionTableOid = partition->pd_part->parentid; } if (storage_type == INVALID_STORAGE) { - storage_type = PartitionGetStorageType(partitionTableOid); + storage_type = PartitionGetStorageType(partition, partitionTableOid); } } @@ -346,24 +303,30 @@ static Partition PartitionBuildDesc(Oid targetPartId, StorageType storage_type, /* Assign value in partitiongetrelation. */ partition->partMap = NULL; + if (IS_DISASTER_RECOVER_MODE) { + TransactionId xmin = HeapTupleGetRawXmin(pg_partition_tuple); + partition->xmin_csn = CSNLogGetDRCommitSeqNo(xmin); + } else { + partition->xmin_csn = InvalidCommitSeqNo; + } + /* * now we can free the memory allocated for pg_class_tuple */ heap_freetuple_ext(pg_partition_tuple); + /* It's fully valid */ + partition->pd_isvalid = true; /* * Insert newly created relation into partcache hash table, if requested. */ if (insertIt) { - PartitionCacheInsert(partition); + PartitionIdCacheInsertIntoLocal(partition); } - /* It's fully valid */ - partition->pd_isvalid = true; - return partition; } -static void PartitionInitPhysicalAddr(Partition partition) +void PartitionInitPhysicalAddr(Partition partition) { partition->pd_node.spcNode = ConvertToRelfilenodeTblspcOid(partition->pd_part->reltablespace); if (partition->pd_node.spcNode == GLOBALTABLESPACE_OID) { @@ -387,12 +350,6 @@ static void PartitionInitPhysicalAddr(Partition partition) partition->pd_id))); } } - - partition->pd_node.opt = 0; - if (partition->rd_options) { - SetupPageCompressForRelation(&partition->pd_node, &((StdRdOptions*)(partition->rd_options))->compress, - PartitionGetPartitionName(partition)); - } } /* @@ -402,6 +359,9 @@ static void PartitionInitPhysicalAddr(Partition partition) */ Partition PartitionIdGetPartition(Oid partitionId, StorageType storage_type) { + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->partdefcache.PartitionIdGetPartition(partitionId, storage_type); + } Partition pd; /* * first try to find reldesc in the cache @@ -481,7 +441,7 @@ void PartitionClose(Partition partition) } Partition PartitionBuildLocalPartition(const char *relname, Oid partid, Oid partfilenode, Oid parttablespace, - StorageType storage_type, Datum reloptions) + StorageType storage_type) { Partition part; MemoryContext oldcxt; @@ -489,7 +449,7 @@ Partition PartitionBuildLocalPartition(const char *relname, Oid partid, Oid part /* * switch to the cache context to create the partcache entry. */ - oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); /* * allocate a new relation descriptor and fill in basic state fields. @@ -505,7 +465,7 @@ Partition PartitionBuildLocalPartition(const char *relname, Oid partid, Oid part part->pd_newRelfilenodeSubid = InvalidSubTransactionId; /* must flag that we have rels created in this transaction */ - u_sess->cache_cxt.part_cache_need_eoxact_work = true; + SetPartCacheNeedEoxactWork(true); /* * initialize partition tuple form (caller may add/override data later) @@ -530,11 +490,6 @@ Partition PartitionBuildLocalPartition(const char *relname, Oid partid, Oid part if (partfilenode != InvalidOid) { PartitionInitPhysicalAddr(part); - /* compressed option was set by PartitionInitPhysicalAddr if part->rd_options != NULL */ - if (part->rd_options == NULL && reloptions) { - StdRdOptions* options = (StdRdOptions*)default_reloptions(reloptions, false, RELOPT_KIND_HEAP); - SetupPageCompressForRelation(&part->pd_node, &options->compress, PartitionGetPartitionName(part)); - } } if (storage_type == SEGMENT_PAGE) { @@ -543,16 +498,17 @@ Partition PartitionBuildLocalPartition(const char *relname, Oid partid, Oid part part->pd_node.bucketNode = InvalidBktId; } + /* It's fully valid */ + part->pd_isvalid = true; /* * Okay to insert into the partcache hash tables. */ - PartitionCacheInsert(part); + PartitionIdCacheInsertIntoLocal(part); /* * done building partcache entry. */ (void)MemoryContextSwitchTo(oldcxt); - /* It's fully valid */ - part->pd_isvalid = true; + /* * Caller expects us to pin the returned entry. */ @@ -566,7 +522,7 @@ Partition PartitionBuildLocalPartition(const char *relname, Oid partid, Oid part * Physically delete a partition cache entry and all subsidiary data. * Caller must already have unhooked the entry from the hash table. */ -static void PartitionDestroyPartition(Partition partition) +void PartitionDestroyPartition(Partition partition) { Assert(PartitionHasReferenceCountZero(partition)); @@ -580,13 +536,10 @@ static void PartitionDestroyPartition(Partition partition) * Free all the subsidiary data structures of the partcache entry, then the * entry itself. */ - if (partition->pd_part) { - pfree_ext(partition->pd_part); - } + pfree_ext(partition->pd_indexattr); + pfree_ext(partition->pd_part); list_free_ext(partition->pd_indexlist); - if (partition->rd_options) { - pfree_ext(partition->rd_options); - } + pfree_ext(partition->rd_options); if (partition->partrel) { /* in function releaseDummyRelation, owner->nfakerelrefs decrease one due to ResourceOwnerForgetFakerelRef, * which is not we expect, so we use ResourceOwnerRememberFakerelRef correspondingly */ @@ -627,7 +580,7 @@ static void PartitionDestroyPartition(Partition partition) * to match the relation's refcnt status, but we keep it as a crosscheck * that we're doing what the caller expects. */ -static void PartitionClearPartition(Partition partition, bool rebuild) +void PartitionClearPartition(Partition partition, bool rebuild) { /* * As per notes above, a rel to be rebuilt MUST have refcnt > 0; while of @@ -681,7 +634,7 @@ static void PartitionClearPartition(Partition partition, bool rebuild) */ if (!rebuild) { /* Remove it from the hash table */ - PartitionCacheDelete(partition); + PartitionIdCacheDeleteLocal(partition); /* And release storage */ PartitionDestroyPartition(partition); @@ -715,17 +668,23 @@ static void PartitionClearPartition(Partition partition, bool rebuild) * is good because whatever ref counts the entry may have do not * necessarily belong to that resource owner. */ - Partition newpart; + Partition newpart = NULL; Oid save_partid = PartitionGetPartid(partition); errno_t rc = 0; /* Build temporary entry, but don't link it into hashtable */ int4 tempNode = partition->pd_node.bucketNode; - newpart = PartitionBuildDesc(save_partid, INVALID_STORAGE, false); + if (EnableLocalSysCache()) { + // call build means local doesnt contain relation or it it invalid, so search from global directly + newpart = t_thrd.lsc_cxt.lsc->partdefcache.SearchPartitionFromGlobalCopy(save_partid); + } + if (!RelationIsValid(newpart)) { + newpart = PartitionBuildDesc(save_partid, INVALID_STORAGE, false); + } if (NULL == newpart) { /* Should only get here if partition was deleted */ - PartitionCacheDelete(partition); + PartitionIdCacheDeleteLocal(partition); PartitionDestroyPartition(partition); ereport(ERROR, (errcode(ERRCODE_OBJECT_IN_USE), errmsg("partition %u deleted while still in use", save_partid))); @@ -790,6 +749,7 @@ static void PartitionClearPartition(Partition partition, bool rebuild) SWAPFIELD(PartitionMap*, partMap); } + SWAPFIELD(LocalPartitionEntry*, entry); #undef SWAPFIELD /* And now we can throw away the temporary entry */ @@ -838,7 +798,7 @@ void PartitionForgetPartition(Oid partid) { Partition partition; - PartitionIdCacheLookup(partid, partition); + PartitionIdCacheLookupOnlyLocal(partid, partition); if (!PointerIsValid(partition)) { return; /* not in cache, nothing to do */ @@ -871,7 +831,7 @@ void PartitionCacheInvalidateEntry(Oid partitionId) { Partition partition; - PartitionIdCacheLookup(partitionId, partition); + PartitionIdCacheLookupOnlyLocal(partitionId, partition); if (PointerIsValid(partition)) { PartitionFlushPartition(partition); @@ -908,8 +868,13 @@ void PartitionCacheInvalidateEntry(Oid partitionId) * items. This should ensure that system catalogs are up to date before * we attempt to use them to reload information about other open relations. */ + void PartitionCacheInvalidate(void) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->partdefcache.InvalidateAll(); + return; + } HASH_SEQ_STATUS status; PartIdCacheEnt* idhentry = NULL; Partition partition; @@ -966,7 +931,7 @@ void PartitionCloseSmgrByOid(Oid partitionId) { Partition partition; - PartitionIdCacheLookup(partitionId, partition); + PartitionIdCacheLookupOnlyLocal(partitionId, partition); if (!PointerIsValid(partition)) { return; /* not in cache, nothing to do */ @@ -990,6 +955,10 @@ void PartitionCloseSmgrByOid(Oid partitionId) */ void AtEOXact_PartitionCache(bool isCommit) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->partdefcache.AtEOXact_PartitionCache(isCommit); + return; + } HASH_SEQ_STATUS status; PartIdCacheEnt* idhentry = NULL; @@ -1070,6 +1039,10 @@ void AtEOXact_PartitionCache(bool isCommit) */ void AtEOSubXact_PartitionCache(bool isCommit, SubTransactionId mySubid, SubTransactionId parentSubid) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->partdefcache.AtEOSubXact_PartitionCache(isCommit, mySubid, parentSubid); + return; + } HASH_SEQ_STATUS status; PartIdCacheEnt* idhentry = NULL; @@ -1127,6 +1100,10 @@ void AtEOSubXact_PartitionCache(bool isCommit, SubTransactionId mySubid, SubTran void PartitionCacheInitialize(void) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->partdefcache.Init(); + return; + } HASHCTL ctl; errno_t rc; @@ -1140,6 +1117,7 @@ void PartitionCacheInitialize(void) ctl.entrysize = sizeof(PartIdCacheEnt); ctl.hash = oid_hash; ctl.hcxt = u_sess->cache_mem_cxt; + const int INITPARTCACHESIZE = 128; u_sess->cache_cxt.PartitionIdCache = hash_create("Partcache by OID", INITPARTCACHESIZE, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); } @@ -1249,6 +1227,73 @@ bytea* merge_rel_part_reloption(Oid rel_oid, Oid part_oid) return merged_rd_options; } +void UpdatePartrelPointer(Relation partrel, Relation rel, Partition part) +{ + Assert(partrel->rd_refcnt == part->pd_refcnt); + Assert(partrel->rd_isvalid == part->pd_isvalid); + Assert(partrel->rd_indexvalid == part->pd_indexvalid); + Assert(partrel->rd_createSubid == part->pd_createSubid); + Assert(partrel->rd_newRelfilenodeSubid == part->pd_newRelfilenodeSubid); + + Assert(partrel->rd_rel->reltoastrelid == part->pd_part->reltoastrelid); + Assert(partrel->rd_rel->reltablespace == part->pd_part->reltablespace); + Assert(partrel->rd_rel->relfilenode == part->pd_part->relfilenode); + Assert(partrel->rd_rel->relpages == part->pd_part->relpages); + Assert(partrel->rd_rel->reltuples == part->pd_part->reltuples); + Assert(partrel->rd_rel->relallvisible == part->pd_part->relallvisible); + Assert(partrel->rd_rel->relcudescrelid == part->pd_part->relcudescrelid); + Assert(partrel->rd_rel->relcudescidx == part->pd_part->relcudescidx); + Assert(partrel->rd_rel->reldeltarelid == part->pd_part->reldeltarelid); + Assert(partrel->rd_rel->reldeltaidx == part->pd_part->reldeltaidx); + Assert(partrel->rd_bucketoid == rel->rd_bucketoid); + + partrel->rd_att = rel->rd_att; + Assert(partrel->rd_partHeapOid == part->pd_part->indextblid); + partrel->rd_index = rel->rd_index; + partrel->rd_indextuple = rel->rd_indextuple; + partrel->rd_am = rel->rd_am; + partrel->rd_indnkeyatts = rel->rd_indnkeyatts; + Assert(partrel->rd_tam_type == rel->rd_tam_type); + + partrel->rd_aminfo = rel->rd_aminfo; + partrel->rd_opfamily = rel->rd_opfamily; + partrel->rd_opcintype = rel->rd_opcintype; + partrel->rd_support = rel->rd_support; + partrel->rd_supportinfo = rel->rd_supportinfo; + + partrel->rd_indoption = rel->rd_indoption; + partrel->rd_indexprs = rel->rd_indexprs; + partrel->rd_indpred = rel->rd_indpred; + partrel->rd_exclops = rel->rd_exclops; + partrel->rd_exclprocs = rel->rd_exclprocs; + partrel->rd_exclstrats = rel->rd_exclstrats; + + partrel->rd_amcache = rel->rd_amcache; + partrel->rd_indcollation = rel->rd_indcollation; + Assert(partrel->rd_id == part->pd_id); + partrel->rd_indexlist = part->pd_indexlist; + Assert(partrel->rd_oidindex == part->pd_oidindex); + Assert(partrel->rd_toastoid == part->pd_toastoid); + Assert(partrel->subpartitiontype == part->pd_part->parttype); + partrel->pgstat_info = part->pd_pgstat_info; + Assert(partrel->parentId == rel->rd_id); + Assert(partrel->rd_isblockchain == rel->rd_isblockchain); + Assert(partrel->storage_type == rel->storage_type); +} + +static void SetRelationPartitionMap(Relation relation, Partition part) +{ + if (!(PartitionHasSubpartition(part) && part->pd_part->parttype == PART_OBJ_TYPE_TABLE_PARTITION)) { + return; + } + if (part->partMap != NULL) { + relation->partMap = part->partMap; + } else { + RelationInitPartitionMap(relation, true); + part->partMap = relation->partMap; + } +} + /* * @@GaussDB@@ * Target : data partition @@ -1266,6 +1311,21 @@ Relation partitionGetRelation(Relation rel, Partition part) bytea* des_reloption = NULL; Assert(PointerIsValid(rel) && PointerIsValid(part)); + /* + * If the rel is subpartitiontable and the part is subpartition, we need open Level 1 partition to get subpartition + * relation. When the caller gets subpartition, The level-1 partition has been locked. Therefore, partitionOpen used + * NoLock here. + */ + if (RelationIsSubPartitioned(rel) && rel->rd_id != part->pd_part->parentid) { + Assert(rel->rd_id == partid_get_parentid(part->pd_part->parentid)); + Partition parentPart = partitionOpen(rel, part->pd_part->parentid, NoLock); + Relation parentPartRel = partitionGetRelation(rel, parentPart); + relation = partitionGetRelation(parentPartRel, part); + releaseDummyRelation(&parentPartRel); + partitionClose(rel, parentPart, NoLock); + return relation; + } + Assert(rel->rd_id == part->pd_part->parentid); /* * Memory malloced in merge_rel_part_reloption cannot mount in CacheMemoryContext, @@ -1276,7 +1336,7 @@ Relation partitionGetRelation(Relation rel, Partition part) merge_reloption = merge_rel_part_reloption(RelationGetRelid(rel), PartitionGetPartid(part)); } - oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); relation = (Relation)palloc0(sizeof(RelationData)); if (!IsBootstrapProcessingMode()) { ResourceOwnerRememberFakerelRef(t_thrd.utils_cxt.CurrentResourceOwner, relation); @@ -1324,7 +1384,7 @@ Relation partitionGetRelation(Relation rel, Partition part) relation->rd_indexcxt = NULL; } else { Assert(rel->rd_indexcxt != NULL); - relation->rd_indexcxt = AllocSetContextCreate(u_sess->cache_mem_cxt, + relation->rd_indexcxt = AllocSetContextCreate(LocalMyDBCacheMemCxt(), PartitionGetPartitionName(part), ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, @@ -1385,14 +1445,8 @@ Relation partitionGetRelation(Relation rel, Partition part) securec_check(ret, "\0", "\0"); } - if (PartitionHasSubpartition(part) && part->pd_part->parttype == PART_OBJ_TYPE_TABLE_PARTITION) { - if (part->partMap != NULL) { - relation->partMap = part->partMap; - } else { - RelationInitPartitionMap(relation, true); - part->partMap = relation->partMap; - } - } + SetRelationPartitionMap(relation, part); + (void)MemoryContextSwitchTo(oldcxt); return relation; @@ -1446,7 +1500,7 @@ void releaseDummyRelation(Relation* relation) * Brief : reload minimal information for an open index partition * Description : */ -static void PartitionReloadIndexInfo(Partition part) +void PartitionReloadIndexInfo(Partition part) { HeapTuple pg_partition_tuple; Form_pg_partition partForm; @@ -1638,7 +1692,7 @@ void PartitionSetNewRelfilenode(Relation parent, Partition part, TransactionId f part->pd_newRelfilenodeSubid = GetCurrentSubTransactionId(); /* ... and now we have eoxact cleanup work to do */ - u_sess->cache_cxt.part_cache_need_eoxact_work = true; + SetPartCacheNeedEoxactWork(true); } static void PartitionParseRelOptions(Partition partition, HeapTuple tuple) @@ -1668,13 +1722,13 @@ static void PartitionParseRelOptions(Partition partition, HeapTuple tuple) options = heap_reloptions(RELKIND_RELATION, datum, false); /* - * Copy parsed data into u_sess->cache_mem_cxt. To guard against the + * Copy parsed data into LocalMyDBCacheMemCxt(). To guard against the * possibility of leaks in the reloptions code, we want to do the actual * parsing in the caller's memory context and copy the results into - * u_sess->cache_mem_cxt after the fact. + * LocalMyDBCacheMemCxt() after the fact. */ if (options != NULL) { - partition->rd_options = (bytea*)MemoryContextAlloc(u_sess->cache_mem_cxt, VARSIZE(options)); + partition->rd_options = (bytea*)MemoryContextAlloc(LocalMyDBCacheMemCxt(), VARSIZE(options)); rc = memcpy_s(partition->rd_options, VARSIZE(options), options, VARSIZE(options)); securec_check(rc, "", ""); pfree_ext(options); @@ -1850,7 +1904,7 @@ Datum SetWaitCleanGpiRelOptions(Datum oldOptions, bool enable) } /* Update pg_partition's tuple attribute reloptions wait_clean_gpi */ -static void UpdateWaitCleanGpiRelOptions(Relation pgPartition, HeapTuple partTuple, bool enable, bool inplace) +void UpdateWaitCleanGpiRelOptions(Relation pgPartition, HeapTuple partTuple, bool enable, bool inplace) { HeapTuple newTuple; Datum partOptions; diff --git a/src/common/backend/utils/cache/plancache.cpp b/src/common/backend/utils/cache/plancache.cpp index 0197bae9f..c0b7bdd5e 100644 --- a/src/common/backend/utils/cache/plancache.cpp +++ b/src/common/backend/utils/cache/plancache.cpp @@ -133,12 +133,12 @@ bool IsStreamSupport() */ void InitPlanCache(void) { - CacheRegisterRelcacheCallback(PlanCacheRelCallback, (Datum)0); - CacheRegisterPartcacheCallback(PlanCacheRelCallback, (Datum)0); - CacheRegisterSyscacheCallback(PROCOID, PlanCacheFuncCallback, (Datum)0); - CacheRegisterSyscacheCallback(NAMESPACEOID, PlanCacheSysCallback, (Datum)0); - CacheRegisterSyscacheCallback(OPEROID, PlanCacheSysCallback, (Datum)0); - CacheRegisterSyscacheCallback(AMOPOPID, PlanCacheSysCallback, (Datum)0); + CacheRegisterSessionRelcacheCallback(PlanCacheRelCallback, (Datum)0); + CacheRegisterSessionPartcacheCallback(PlanCacheRelCallback, (Datum)0); + CacheRegisterSessionSyscacheCallback(PROCOID, PlanCacheFuncCallback, (Datum)0); + CacheRegisterSessionSyscacheCallback(NAMESPACEOID, PlanCacheSysCallback, (Datum)0); + CacheRegisterSessionSyscacheCallback(OPEROID, PlanCacheSysCallback, (Datum)0); + CacheRegisterSessionSyscacheCallback(AMOPOPID, PlanCacheSysCallback, (Datum)0); } /* ddl no need to global it */ @@ -1598,13 +1598,6 @@ static bool ChooseCustomPlan(CachedPlanSource* plansource, ParamListInfo boundPa return ret; } - /* Don't choose custom plan if using pbe optimization */ - bool isPbeAndFqs = u_sess->attr.attr_sql.enable_pbe_optimization && plansource->gplan_is_fqs; - if (isPbeAndFqs) { - ReportReasonForPlanChoose(PBE_OPTIMIZATION); - return false; - } - /* Let settings force the decision */ if (unlikely(PLAN_CACHE_MODE_AUTO != u_sess->attr.attr_sql.g_planCacheMode)) { if (PLAN_CACHE_MODE_FORCE_GENERIC_PLAN == u_sess->attr.attr_sql.g_planCacheMode) { @@ -1618,6 +1611,13 @@ static bool ChooseCustomPlan(CachedPlanSource* plansource, ParamListInfo boundPa } } + /* Don't choose custom plan if using pbe optimization */ + bool isPbeAndFqs = u_sess->attr.attr_sql.enable_pbe_optimization && plansource->gplan_is_fqs; + if (isPbeAndFqs) { + ReportReasonForPlanChoose(PBE_OPTIMIZATION); + return false; + } + /* Generate custom plans until we have done at least 5 (arbitrary) */ if (plansource->num_custom_plans < 5) { ReportReasonForPlanChoose(TRY_CPLAN); @@ -1994,6 +1994,178 @@ void ReleaseCachedPlan(CachedPlan* plan, bool useResOwner) } } +/* + * CachedPlanIsSimplyValid: quick check for plan still being valid + * + * This function must not be used unless CachedPlanAllowsSimpleValidityCheck + * previously said it was OK. + * + * If the plan is valid, and "owner" is not NULL, record a refcount on + * the plan in that resowner before returning. It is caller's responsibility + * to be sure that a refcount is held on any plan that's being actively used. + * long + * The code here is unconditionally safe as as the only use of this + * CachedPlanSource is in connection with the particular CachedPlan pointer + * that's passed in. If the plansource were being used for other purposes, + * it's possible that its generic plan could be invalidated and regenerated + * while the current caller wasn't looking, and then there could be a chance + * collision of address between this caller's now-stale plan pointer and the + * actual address of the new generic plan. For current uses, that scenario + * can't happen; but with a plansource shared across multiple uses, it'd be + * advisable to also save plan->generation and verify that that still matches. + */ +bool CachedPlanIsSimplyValid(CachedPlanSource *plansource, CachedPlan *plan, ResourceOwner owner) +{ + /* + * Careful here: since the caller doesn't necessarily hold a refcount on + * the plan to start with, it's possible that "plan" is a dangling + * pointer. Don't dereference it until we've verified that it still + * matches the plansource's gplan (which is either valid or NULL). + */ + Assert(plansource->magic == CACHEDPLANSOURCE_MAGIC); + + /* + * Has cache invalidation fired on this plan? We can check this right + * away since there are no locks that we'd need to acquire first. Note + * that here we *do* check plansource->is_valid, so as to force plan + * rebuild if that's become false. + */ + if (!plansource->is_valid || plan != plansource->gplan || !plan->is_valid) + return false; + + Assert(plan->magic == CACHEDPLAN_MAGIC); + + /* Is the search_path still the same as when we made it? */ + Assert(plansource->search_path != NULL); + if (!OverrideSearchPathMatchesCurrent(plansource->search_path)) + return false; + + /* It's still good. Bump refcount if requested. */ + if (owner) { + ResourceOwnerEnlargePlanCacheRefs(owner); + plan->refcount++; + ResourceOwnerRememberPlanCacheRef(owner, plan); + } + + return true; +} + +/* + * CachedPlanAllowsSimpleValidityCheck: can we use CachedPlanIsSimplyValid? + * + * This function, together with CachedPlanIsSimplyValid, provides a fast path + * for revalidating "simple" generic plans. The core requirement to be simple + * is that the plan must not require taking any locks, which translates to + * not touching any tables; this happens to match up well with an important + * use-case in PL/pgSQL. This function tests whether that's true, along + * with checking some other corner cases that we'd rather not bother with + * handling in the fast path. (Note that it's still possible for such a plan + * to be invalidated, for example due to a change in a function that was + * inlined into the plan.) + * + * If the plan is simply valid, and "owner" is not NULL, record a refcount on + * the plan in that resowner before returning. It is caller's responsibility + * to be sure that a refcount is held on any plan that's being actively used. + * + * This must only be called on known-valid generic plans (eg, ones just + * returned by GetCachedPlan). If it returns true, the caller may re-use + * the cached plan as long as CachedPlanIsSimplyValid returns true; that + * check is much cheaper than the full revalidation done by GetCachedPlan. + * Nonetheless, no required checks are omitted. + */ +bool CachedPlanAllowsSimpleValidityCheck(CachedPlanSource *plansource, CachedPlan *plan, ResourceOwner owner) +{ + ListCell *lc; + + /* + * Sanity-check that the caller gave us a validated generic plan. Notice + * that we *don't* assert plansource->is_valid as you might expect; that's + * because it's possible that that's already false when GetCachedPlan + * returns, e.g. because ResetPlanCache happened partway through. We + * should accept the plan as long as plan->is_valid is true, and expect to + * replan after the next CachedPlanIsSimplyValid call. + */ + Assert(plansource->magic == CACHEDPLANSOURCE_MAGIC); + Assert(plan->magic == CACHEDPLAN_MAGIC); + Assert(plan->is_valid); + Assert(plan == plansource->gplan); + Assert(plansource->search_path != NULL); + Assert(OverrideSearchPathMatchesCurrent(plansource->search_path)); + + /* We don't support oneshot plans here. */ + if (plansource->is_oneshot) + return false; + Assert(!plan->is_oneshot); + + /* + * If the plan is dependent on RLS considerations, or it's transient, + * reject. These things probably can't ever happen for table-free + * queries, but for safety's sake let's check. + */ + + if (plan->dependsOnRole) + return false; + if (TransactionIdIsValid(plan->saved_xmin)) + return false; + + /* + * Reject if AcquirePlannerLocks would have anything to do. This is + * simplistic, but there's no need to inquire any more carefully; indeed, + * for current callers it shouldn't even be possible to hit any of these + * checks. + */ + foreach(lc, plansource->query_list) { + Query *query = lfirst_node(Query, lc); + + if (query->commandType == CMD_UTILITY) + return false; + if (query->rtable || query->cteList || query->hasSubLinks) + return false; + } + + /* + * Reject if AcquireExecutorLocks would have anything to do. This is + * probably unnecessary given the previous check, but let's be safe. + */ + foreach(lc, plan->stmt_list) + { + PlannedStmt *plannedstmt = lfirst_node(PlannedStmt, lc); + ListCell *lc2; + + if (plannedstmt->commandType == CMD_UTILITY) + return false; + + /* + * We have to grovel through the rtable because it's likely to contain + * an RTE_RESULT relation, rather than being totally empty. + */ + foreach(lc2, plannedstmt->rtable) + { + RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc2); + + if (rte->rtekind == RTE_RELATION) + return false; + } + + if (!IsA(plannedstmt->planTree, BaseResult)) + return false; + } + + /* + * Okay, it's simple. Note that what we've primarily established here is + * that no locks need be taken before checking the plan's is_valid flag. + */ + + /* Bump refcount if requested. */ + if (owner) { + ResourceOwnerEnlargePlanCacheRefs(owner); + plan->refcount++; + ResourceOwnerRememberPlanCacheRef(owner, plan); + } + + return true; +} + /* * CachedPlanSetParentContext: move a CachedPlanSource to a new memory context * diff --git a/src/common/backend/utils/cache/relcache.cpp b/src/common/backend/utils/cache/relcache.cpp index 7a962f7f6..603df604e 100644 --- a/src/common/backend/utils/cache/relcache.cpp +++ b/src/common/backend/utils/cache/relcache.cpp @@ -114,6 +114,8 @@ #include "catalog/pg_ts_parser.h" #include "catalog/pg_ts_template.h" #include "catalog/pg_type.h" +#include "catalog/pg_uid.h" +#include "catalog/pg_uid_fn.h" #include "catalog/pg_user_mapping.h" #include "catalog/pg_user_status.h" #include "catalog/pg_workload_group.h" @@ -132,7 +134,7 @@ #include "catalog/gs_column_keys_args.h" #include "catalog/gs_client_global_keys.h" #include "catalog/gs_client_global_keys_args.h" - +#include "catalog/gs_db_privilege.h" #include "catalog/gs_matview.h" #include "catalog/gs_matview_dependency.h" #include "catalog/pg_snapshot.h" @@ -174,7 +176,6 @@ #include "rewrite/rewriteDefine.h" #include "rewrite/rewriteRlsPolicy.h" #include "storage/lmgr.h" -#include "storage/page_compression.h" #include "storage/smgr/smgr.h" #include "storage/smgr/segment.h" #include "threadpool/threadpool.h" @@ -195,12 +196,17 @@ #include "utils/partitionmap_gs.h" #include "utils/resowner.h" #include "utils/evp_cipher.h" +#include "access/csnlog.h" #include "access/cstore_am.h" #include "nodes/nodeFuncs.h" #include "nodes/makefuncs.h" +#include "replication/walreceiver.h" #ifdef ENABLE_MULTIPLE_NODES #include "tsdb/common/ts_tablecmds.h" #endif /* ENABLE_MULTIPLE_NODES */ +#include "utils/knl_relcache.h" +#include "utils/knl_partcache.h" +#include "utils/knl_localtabdefcache.h" /* * name of relcache init file(s), used to speed up backend startup @@ -212,15 +218,15 @@ /* * hardcoded tuple descriptors, contents generated by genbki.pl */ -static const FormData_pg_attribute Desc_pg_class[Natts_pg_class] = {Schema_pg_class}; -static const FormData_pg_attribute Desc_pg_attribute[Natts_pg_attribute] = {Schema_pg_attribute}; -static const FormData_pg_attribute Desc_pg_proc[Natts_pg_proc] = {Schema_pg_proc}; -static const FormData_pg_attribute Desc_pg_type[Natts_pg_type] = {Schema_pg_type}; -static const FormData_pg_attribute Desc_pg_database[Natts_pg_database] = {Schema_pg_database}; -static const FormData_pg_attribute Desc_pg_authid[Natts_pg_authid] = {Schema_pg_authid}; -static const FormData_pg_attribute Desc_pg_auth_members[Natts_pg_auth_members] = {Schema_pg_auth_members}; -static const FormData_pg_attribute Desc_pg_index[Natts_pg_index] = {Schema_pg_index}; -static const FormData_pg_attribute Desc_pg_user_status[Natts_pg_user_status] = {Schema_pg_user_status}; +extern const FormData_pg_attribute Desc_pg_class[Natts_pg_class] = {Schema_pg_class}; +extern const FormData_pg_attribute Desc_pg_attribute[Natts_pg_attribute] = {Schema_pg_attribute}; +extern const FormData_pg_attribute Desc_pg_proc[Natts_pg_proc] = {Schema_pg_proc}; +extern const FormData_pg_attribute Desc_pg_type[Natts_pg_type] = {Schema_pg_type}; +extern const FormData_pg_attribute Desc_pg_database[Natts_pg_database] = {Schema_pg_database}; +extern const FormData_pg_attribute Desc_pg_authid[Natts_pg_authid] = {Schema_pg_authid}; +extern const FormData_pg_attribute Desc_pg_auth_members[Natts_pg_auth_members] = {Schema_pg_auth_members}; +extern const FormData_pg_attribute Desc_pg_index[Natts_pg_index] = {Schema_pg_index}; +extern const FormData_pg_attribute Desc_pg_user_status[Natts_pg_user_status] = {Schema_pg_user_status}; static const FormData_pg_attribute Desc_pg_default_acl[Natts_pg_default_acl] = {Schema_pg_default_acl}; static const FormData_pg_attribute Desc_pg_pltemplate[Natts_pg_pltemplate] = {Schema_pg_pltemplate}; @@ -288,6 +294,7 @@ static const FormData_pg_attribute Desc_gs_job_attribute[Natts_gs_job_attribute] static const FormData_pg_attribute Desc_pg_object[Natts_pg_object] = {Schema_pg_object}; static const FormData_pg_attribute Desc_pg_synonym[Natts_pg_synonym] = {Schema_pg_synonym}; static const FormData_pg_attribute Desc_pg_hashbucket[Natts_pg_hashbucket] = {Schema_pg_hashbucket}; +static const FormData_pg_attribute Desc_gs_uid[Natts_gs_uid] = {Schema_gs_uid}; static const FormData_pg_attribute Desc_pg_snapshot[Natts_pg_snapshot] = {Schema_gs_txn_snapshot}; static const FormData_pg_attribute Desc_pg_recyclebin[Natts_pg_recyclebin] = {Schema_gs_recyclebin}; static const FormData_pg_attribute Desc_gs_global_chain[Natts_gs_global_chain] = {Schema_gs_global_chain}; @@ -320,6 +327,7 @@ static const FormData_pg_attribute Desc_gs_opt_model[Natts_gs_opt_model] = {Sche static const FormData_pg_attribute Desc_gs_model_warehouse[Natts_gs_model_warehouse] = {Schema_gs_model_warehouse}; static const FormData_pg_attribute Desc_gs_package[Natts_gs_package] = {Schema_gs_package}; +static const FormData_pg_attribute Desc_gs_db_privilege[Natts_gs_db_privilege] = {Schema_gs_db_privilege}; static const FormData_pg_attribute Desc_pg_subscription[Natts_pg_subscription] = {Schema_pg_subscription}; static const FormData_pg_attribute Desc_pg_publication[Natts_pg_publication] = {Schema_pg_publication}; static const FormData_pg_attribute Desc_pg_publication_rel[Natts_pg_publication_rel] = {Schema_pg_publication_rel}; @@ -829,6 +837,15 @@ static struct CatalogRelationBuildParam catalogBuildParam[CATALOG_NUM] = {{Defau Desc_pg_directory, false, true}, + {DbPrivilegeId, + "gs_db_privilege", + DbPrivilege_Rowtype_Id, + false, + true, + Natts_gs_db_privilege, + Desc_gs_db_privilege, + false, + true}, {ObsScanInfoRelationId, "pg_obsscaninfo", ObsScanInfoRelation_Rowtype_Id, @@ -910,6 +927,15 @@ static struct CatalogRelationBuildParam catalogBuildParam[CATALOG_NUM] = {{Defau Desc_pg_snapshot, false, true}, + {UidRelationId, + "gs_uid", + UidRelationId_Rowtype_Id, + false, + false, + Natts_gs_uid, + Desc_gs_uid, + false, + true}, {PgxcClassRelationId, "pgxc_class", PgxcClassRelation_Rowtype_Id, @@ -1220,38 +1246,6 @@ typedef struct relidcacheent { */ THR_LOCAL bool needNewLocalCacheFile = false; -/* - * macros to manipulate the lookup hashtables - */ -#define RelationCacheInsert(RELATION) \ - do { \ - RelIdCacheEnt* idhentry = NULL; \ - bool found = false; \ - idhentry = (RelIdCacheEnt*)hash_search( \ - u_sess->relcache_cxt.RelationIdCache, (void*)&((RELATION)->rd_id), HASH_ENTER, &found); \ - /* used to give notice if found -- now just keep quiet */ \ - idhentry->reldesc = RELATION; \ - } while (0) - -#define RelationIdCacheLookup(ID, RELATION) \ - do { \ - RelIdCacheEnt* hentry = NULL; \ - hentry = (RelIdCacheEnt*)hash_search(u_sess->relcache_cxt.RelationIdCache, (void*)&(ID), HASH_FIND, NULL); \ - if (hentry != NULL) \ - (RELATION) = hentry->reldesc; \ - else \ - (RELATION) = NULL; \ - } while (0) - -#define RelationCacheDelete(RELATION) \ - do { \ - RelIdCacheEnt* idhentry; \ - idhentry = (RelIdCacheEnt*)hash_search( \ - u_sess->relcache_cxt.RelationIdCache, (void*)&((RELATION)->rd_id), HASH_REMOVE, NULL); \ - if (idhentry == NULL) \ - ereport(WARNING, (errmsg("trying to delete a rd_id reldesc that does not exist"))); \ - } while (0) - /* * Special cache for opclass-related information * @@ -1268,38 +1262,29 @@ typedef struct opclasscacheent { } OpClassCacheEnt; /* non-export function prototypes */ -static void RelationDestroyRelation(Relation relation, bool remember_tupdesc); static void RememberToFreeTupleDescAtEOX(TupleDesc td); -static void RelationClearRelation(Relation relation, bool rebuild); -static void RelationReloadIndexInfo(Relation relation); static void RelationFlushRelation(Relation relation); static bool load_relcache_init_file(bool shared); static void write_relcache_init_file(bool shared); static void write_item(const void* data, Size len, FILE* fp); -static void formrdesc(const char* relationName, Oid relationReltype, bool isshared, bool hasoids, int natts, - const FormData_pg_attribute* attrs); - static Relation AllocateRelationDesc(Form_pg_class relp); static void RelationParseRelOptions(Relation relation, HeapTuple tuple); static void RelationBuildTupleDesc(Relation relation, bool onlyLoadInitDefVal); -static Relation RelationBuildDesc(Oid targetRelId, bool insertIt, bool buildkey = true); -static void RelationInitPhysicalAddr(Relation relation); + static void RelationInitBucketKey(Relation relation, HeapTuple tuple); static void RelationInitBucketInfo(Relation relation, HeapTuple tuple); -static void load_critical_index(Oid indexoid, Oid heapoid); -static TupleDesc GetPgClassDescriptor(void); -static TupleDesc GetPgIndexDescriptor(void); + static void AttrDefaultFetch(Relation relation); static void CheckConstraintFetch(Relation relation); static List* insert_ordered_oid(List* list, Oid datum); -static void IndexSupportInitialize(oidvector* indclass, RegProcedure* indexSupport, Oid* opFamily, Oid* opcInType, - StrategyNumber maxSupportNumber, AttrNumber maxAttributeNumber); -static OpClassCacheEnt* LookupOpclassInfo(Oid operatorClassOid, StrategyNumber numSupport); +static void IndexSupportInitialize(Relation relation, oidvector* indclass, StrategyNumber maxSupportNumber, + AttrNumber maxAttributeNumber); +static OpClassCacheEnt* LookupOpclassInfo(Relation relation, Oid operatorClassOid, StrategyNumber numSupport); static void RelationCacheInitFileRemoveInDir(const char* tblspcpath); static void unlink_initfile(const char* initfilename); -static void SetBackendId(Relation relation); + /* * ScanPgRelation * @@ -1319,15 +1304,15 @@ HeapTuple ScanPgRelation(Oid targetRelId, bool indexOK, bool force_non_historic) SysScanDesc pg_class_scan; ScanKeyData key[1]; Snapshot snapshot = NULL; - /* * If something goes wrong during backend startup, we might find ourselves * trying to read pg_class before we've selected a database. That ain't * gonna work, so bail out with a useful error message. If this happens, * it probably means a relcache entry that needs to be nailed isn't. */ - if (!OidIsValid(u_sess->proc_cxt.MyDatabaseId)) + if (!OidIsValid(GetMyDatabaseId())) { ereport(FATAL, (errmsg("cannot read pg_class without having selected a database"))); + } /* * form a scan key @@ -1359,7 +1344,7 @@ HeapTuple ScanPgRelation(Oid targetRelId, bool indexOK, bool force_non_historic) */ pg_class_scan = systable_beginscan( - pg_class_desc, ClassOidIndexId, indexOK && u_sess->relcache_cxt.criticalRelcachesBuilt, snapshot, 1, key); + pg_class_desc, ClassOidIndexId, indexOK && LocalRelCacheCriticalRelcachesBuilt(), snapshot, 1, key); #ifdef ENABLE_MULTIPLE_NODES /* If we do seqscan on pg_class, skip sync. */ @@ -1394,8 +1379,8 @@ static Relation AllocateRelationDesc(Form_pg_class relp) MemoryContext oldcxt; Form_pg_class relationForm; - /* Relcache entries must live in u_sess->cache_mem_cxt */ - oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + /* Relcache entries must live in LocalMyDBCacheMemCxt */ + oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); /* * allocate and zero space for new relation descriptor @@ -1423,7 +1408,8 @@ static Relation AllocateRelationDesc(Form_pg_class relp) */ relationForm = (Form_pg_class)palloc(sizeof(FormData_pg_class)); - MemCpy(relationForm, relp, CLASS_TUPLE_SIZE); + errno_t rc = memcpy_s(relationForm, CLASS_TUPLE_SIZE, relp, CLASS_TUPLE_SIZE); + securec_check(rc, "", ""); /* initialize relation tuple form */ relation->rd_rel = relationForm; @@ -1473,17 +1459,18 @@ static void RelationParseRelOptions(Relation relation, HeapTuple tuple) * we might not have any other for pg_class yet (consider executing this * code for pg_class itself) */ - options = extractRelOptions( - tuple, GetPgClassDescriptor(), RelationIsIndex(relation) ? relation->rd_am->amoptions : InvalidOid); + options = extractRelOptions(tuple, GetLSCPgClassDescriptor(), + RelationIsIndex(relation) ? relation->rd_am->amoptions : InvalidOid); /* - * Copy parsed data into u_sess->cache_mem_cxt. To guard against the + * Copy parsed data into LocalMyDBCacheMemCxt. To guard against the * possibility of leaks in the reloptions code, we want to do the actual * parsing in the caller's memory context and copy the results into - * u_sess->cache_mem_cxt after the fact. + * LocalMyDBCacheMemCxt after the fact. */ if (options != NULL) { - relation->rd_options = (bytea*)MemoryContextAlloc(u_sess->cache_mem_cxt, VARSIZE(options)); - MemCpy(relation->rd_options, options, VARSIZE(options)); + relation->rd_options = (bytea*)MemoryContextAlloc(LocalMyDBCacheMemCxt(), VARSIZE(options)); + errno_t rc = memcpy_s(relation->rd_options, VARSIZE(options), options, VARSIZE(options)); + securec_check(rc, "", ""); pfree_ext(options); } } @@ -1520,7 +1507,7 @@ static void RelationBuildTupleDesc(Relation relation, bool onlyLoadInitDefVal) relation->rd_att->tdtypmod = -1; /* unnecessary, but... */ relation->rd_att->tdhasoid = relation->rd_rel->relhasoids; - constr = (TupleConstr*)MemoryContextAllocZero(u_sess->cache_mem_cxt, sizeof(TupleConstr)); + constr = (TupleConstr*)MemoryContextAllocZero(LocalMyDBCacheMemCxt(), sizeof(TupleConstr)); constr->has_not_null = false; constr->has_generated_stored = false; } @@ -1553,7 +1540,7 @@ static void RelationBuildTupleDesc(Relation relation, bool onlyLoadInitDefVal) */ pg_attribute_desc = heap_open(AttributeRelationId, AccessShareLock); pg_attribute_scan = systable_beginscan( - pg_attribute_desc, AttributeRelidNumIndexId, u_sess->relcache_cxt.criticalRelcachesBuilt, snapshot, 2, skey); + pg_attribute_desc, AttributeRelidNumIndexId, LocalRelCacheCriticalRelcachesBuilt(), snapshot, 2, skey); /* * add attribute data to relation->rd_att @@ -1567,7 +1554,7 @@ static void RelationBuildTupleDesc(Relation relation, bool onlyLoadInitDefVal) return; /* set all the *TupInitDefVal* objects later. */ - initdvals = (TupInitDefVal*)MemoryContextAllocZero(u_sess->cache_mem_cxt, need * sizeof(TupInitDefVal)); + initdvals = (TupInitDefVal*)MemoryContextAllocZero(LocalMyDBCacheMemCxt(), need * sizeof(TupInitDefVal)); while (HeapTupleIsValid(pg_attribute_tuple = systable_getnext(pg_attribute_scan))) { Form_pg_attribute attp; @@ -1611,8 +1598,9 @@ static void RelationBuildTupleDesc(Relation relation, bool onlyLoadInitDefVal) /* fetch and copy the default value. */ bytea* val = DatumGetByteaP(dval); int len = VARSIZE(val) - VARHDRSZ; - char* buf = (char*)MemoryContextAlloc(u_sess->cache_mem_cxt, len); - MemCpy(buf, VARDATA(val), len); + char* buf = (char*)MemoryContextAlloc(LocalMyDBCacheMemCxt(), len); + errno_t rc = memcpy_s(buf, len, VARDATA(val), len); + securec_check(rc, "", ""); initdvals[attp->attnum - 1].isNull = false; initdvals[attp->attnum - 1].datum = (Datum*)buf; @@ -1628,7 +1616,7 @@ static void RelationBuildTupleDesc(Relation relation, bool onlyLoadInitDefVal) if (attp->atthasdef && !onlyLoadInitDefVal) { if (attrdef == NULL) attrdef = (AttrDefault*)MemoryContextAllocZero( - u_sess->cache_mem_cxt, RelationGetNumberOfAttributes(relation) * sizeof(AttrDefault)); + LocalMyDBCacheMemCxt(), RelationGetNumberOfAttributes(relation) * sizeof(AttrDefault)); attrdef[ndef].adnum = attp->attnum; attrdef[ndef].adbin = NULL; ndef++; @@ -1716,7 +1704,7 @@ static void RelationBuildTupleDesc(Relation relation, bool onlyLoadInitDefVal) else constr->defval = attrdef; constr->num_defval = ndef; - constr->generatedCols = (char *)MemoryContextAllocZero(u_sess->cache_mem_cxt, + constr->generatedCols = (char *)MemoryContextAllocZero(LocalMyDBCacheMemCxt(), RelationGetNumberOfAttributes(relation) * sizeof(char)); AttrDefaultFetch(relation); } else { @@ -1729,7 +1717,7 @@ static void RelationBuildTupleDesc(Relation relation, bool onlyLoadInitDefVal) { constr->num_check = relation->rd_rel->relchecks; constr->check = - (ConstrCheck*)MemoryContextAllocZero(u_sess->cache_mem_cxt, constr->num_check * sizeof(ConstrCheck)); + (ConstrCheck*)MemoryContextAllocZero(LocalMyDBCacheMemCxt(), constr->num_check * sizeof(ConstrCheck)); CheckConstraintFetch(relation); } else { constr->num_check = 0; @@ -1764,7 +1752,7 @@ static void RelationBuildTupleDesc(Relation relation, bool onlyLoadInitDefVal) * manageable. The other subsidiary data structures are simple enough * to be easy to free explicitly, anyway. */ -static void RelationBuildRuleLock(Relation relation) +void RelationBuildRuleLock(Relation relation) { MemoryContext rulescxt; MemoryContext oldcxt; @@ -1782,11 +1770,13 @@ static void RelationBuildRuleLock(Relation relation) * Make the private context. Parameters are set on the assumption that * it'll probably not contain much data. */ - rulescxt = AllocSetContextCreate(u_sess->cache_mem_cxt, + + rulescxt = AllocSetContextCreate(LocalMyDBCacheMemCxt(), RelationGetRelationName(relation), ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, ALLOCSET_SMALL_MAXSIZE); + relation->rd_rulescxt = rulescxt; /* @@ -1955,7 +1945,7 @@ static Relation CatalogRelationBuildDesc(const char* relationName, Oid relationR bool has_not_null = false; MemoryContext oldcxt; /* Relcache entries must live in t_thrd.mem_cxt.cache_mem_cxt */ - oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); /* * allocate new relation desc, clear all fields of reldesc */ @@ -2054,7 +2044,7 @@ CatalogRelationBuildParam GetCatalogParam(Oid targetId) return result; } -static void SetBackendId(Relation relation) +void SetBackendId(Relation relation) { switch (relation->rd_rel->relpersistence) { case RELPERSISTENCE_UNLOGGED: @@ -2085,7 +2075,7 @@ static void SetBackendId(Relation relation) * (suggesting we are trying to access a just-deleted relation). * Any other error is reported via elog. */ -static Relation RelationBuildDesc(Oid targetRelId, bool insertIt, bool buildkey) +Relation RelationBuildDesc(Oid targetRelId, bool insertIt, bool buildkey) { Relation relation; HeapTuple pg_class_tuple; @@ -2102,7 +2092,6 @@ static Relation RelationBuildDesc(Oid targetRelId, bool insertIt, bool buildkey) */ if (!HeapTupleIsValid(pg_class_tuple)) return NULL; - /* * get information from the pg_class_tuple */ @@ -2112,6 +2101,7 @@ static Relation RelationBuildDesc(Oid targetRelId, bool insertIt, bool buildkey) CatalogRelationBuildParam catalogParam = GetCatalogParam(targetRelId); if (catalogParam.oid != 0) { int natts = 0; + catalogParam.relationReltype = relp->reltype; relation = CatalogRelationBuildDesc(catalogParam.relationName, catalogParam.relationReltype, catalogParam.isshared, @@ -2199,6 +2189,11 @@ static Relation RelationBuildDesc(Oid targetRelId, bool insertIt, bool buildkey) /* extract reloptions if any */ RelationParseRelOptions(relation, pg_class_tuple); + relation->rd_att->tdhasuids = RELATION_HAS_UIDS(relation); + if (RELATION_HAS_UIDS(relation)) { + BuildUidHashCache(GetMyDatabaseId(), relid); + } + if (RelationIsRedistributeDest(relation)) relation->rd_att->tdisredistable = true; @@ -2259,6 +2254,12 @@ static Relation RelationBuildDesc(Oid targetRelId, bool insertIt, bool buildkey) relation->rd_createcsn = csnInfo.createcsn; relation->rd_changecsn = csnInfo.changecsn; + if (relation->rd_id >= FirstNormalObjectId && IS_DISASTER_RECOVER_MODE) { + TransactionId xmin = HeapTupleGetRawXmin(pg_class_tuple); + relation->xmin_csn = CSNLogGetDRCommitSeqNo(xmin); + } else { + relation->xmin_csn = InvalidCommitSeqNo; + } /* * now we can free the memory allocated for pg_class_tuple */ @@ -2284,14 +2285,14 @@ static Relation RelationBuildDesc(Oid targetRelId, bool insertIt, bool buildkey) relation->rd_isblockchain = false; } + /* It's fully valid */ + relation->rd_isvalid = true; /* * Insert newly created relation into relcache hash table, if requested. */ - if (insertIt) - RelationCacheInsert(relation); - - /* It's fully valid */ - relation->rd_isvalid = true; + if (insertIt) { + RelationIdCacheInsertIntoLocal(relation); + } return relation; } @@ -2402,7 +2403,7 @@ RelationInitBucketKey(Relation relation, HeapTuple tuple) } /* build Bucket key */ - old_context = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + old_context = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); /* Initialize int2verctor structure for attribute number array of bucket key*/ bkey = buildint2vector(NULL, nColumn); @@ -2433,13 +2434,16 @@ RelationInitBucketKey(Relation relation, HeapTuple tuple) * be treated as shared, even if relisshared isn't set. Hence we do not * look at relisshared here. */ -static void RelationInitPhysicalAddr(Relation relation) +void RelationInitPhysicalAddr(Relation relation) { relation->rd_node.spcNode = ConvertToRelfilenodeTblspcOid(relation->rd_rel->reltablespace); if (relation->rd_node.spcNode == GLOBALTABLESPACE_OID) relation->rd_node.dbNode = InvalidOid; - else - relation->rd_node.dbNode = u_sess->proc_cxt.MyDatabaseId; + else { + Assert(CheckMyDatabaseMatch()); + relation->rd_node.dbNode = GetMyDatabaseId(); + } + if (relation->rd_rel->relfilenode) { /* * Even if we are using a decoding snapshot that doesn't represent @@ -2495,12 +2499,6 @@ static void RelationInitPhysicalAddr(Relation relation) if (!RelationIsPartitioned(relation) && relation->storage_type == SEGMENT_PAGE) { relation->rd_node.bucketNode = SegmentBktId; } - - // setup page compression options - relation->rd_node.opt = 0; - if (relation->rd_options && REL_SUPPORT_COMPRESSED(relation)) { - SetupPageCompressForRelation(&relation->rd_node, &((StdRdOptions*)(relation->rd_options))->compress, RelationGetRelationName(relation)); - } } static void IndexRelationInitKeyNums(Relation relation) @@ -2513,7 +2511,7 @@ static void IndexRelationInitKeyNums(Relation relation) indnkeyatts = RelationGetNumberOfAttributes(relation); } else { Datum indkeyDatum = - heap_getattr(relation->rd_indextuple, Anum_pg_index_indnkeyatts, GetPgIndexDescriptor(), &isnull); + heap_getattr(relation->rd_indextuple, Anum_pg_index_indnkeyatts, GetLSCPgIndexDescriptor(), &isnull); Assert(!isnull); indnkeyatts = DatumGetInt16(indkeyDatum); } @@ -2557,7 +2555,7 @@ void RelationInitIndexAccessInfo(Relation relation, HeapTuple index_tuple) ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for index %u", RelationGetRelid(relation)))); - oldcontext = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + oldcontext = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); relation->rd_indextuple = heap_copytuple(tuple); relation->rd_index = (Form_pg_index)GETSTRUCT(relation->rd_indextuple); (void)MemoryContextSwitchTo(oldcontext); @@ -2576,7 +2574,7 @@ void RelationInitIndexAccessInfo(Relation relation, HeapTuple index_tuple) ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for access method %u", relation->rd_rel->relam))); - aform = (Form_pg_am)MemoryContextAlloc(u_sess->cache_mem_cxt, sizeof *aform); + aform = (Form_pg_am)MemoryContextAlloc(LocalMyDBCacheMemCxt(), sizeof *aform); rc = memcpy_s(aform, sizeof(*aform), GETSTRUCT(tuple), sizeof(*aform)); securec_check(rc, "\0", "\0"); ReleaseSysCache(tuple); @@ -2605,17 +2603,20 @@ void RelationInitIndexAccessInfo(Relation relation, HeapTuple index_tuple) * Context parameters are set on the assumption that it'll probably not * contain much data. */ - indexcxt = AllocSetContextCreate(u_sess->cache_mem_cxt, + indexcxt = AllocSetContextCreate(LocalMyDBCacheMemCxt(), RelationGetRelationName(relation), ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, ALLOCSET_SMALL_MAXSIZE); + relation->rd_indexcxt = indexcxt; /* * Allocate arrays to hold data. Opclasses are not used for included * columns, so allocate them for indnkeyatts only. */ + + relation->rd_aminfo = (RelationAmInfo*)MemoryContextAllocZero(indexcxt, sizeof(RelationAmInfo)); relation->rd_opfamily = (Oid*)MemoryContextAllocZero(indexcxt, indnkeyatts * sizeof(Oid)); @@ -2634,13 +2635,14 @@ void RelationInitIndexAccessInfo(Relation relation, HeapTuple index_tuple) relation->rd_indcollation = (Oid*)MemoryContextAllocZero(indexcxt, indnkeyatts * sizeof(Oid)); relation->rd_indoption = (int16*)MemoryContextAllocZero(indexcxt, indnkeyatts * sizeof(int16)); - + /* * indcollation cannot be referenced directly through the C struct, * because it comes after the variable-width indkey field. Must extract * the datum the hard way... */ - indcollDatum = fastgetattr(relation->rd_indextuple, Anum_pg_index_indcollation, GetPgIndexDescriptor(), &isnull); + indcollDatum = fastgetattr(relation->rd_indextuple, Anum_pg_index_indcollation, + GetLSCPgIndexDescriptor(), &isnull); Assert(!isnull); indcoll = (oidvector*)DatumGetPointer(indcollDatum); rc = memcpy_s(relation->rd_indcollation, indnkeyatts * sizeof(Oid), indcoll->values, indnkeyatts * sizeof(Oid)); @@ -2651,7 +2653,8 @@ void RelationInitIndexAccessInfo(Relation relation, HeapTuple index_tuple) * comes after the variable-width indkey field. Must extract the datum * the hard way... */ - indclassDatum = fastgetattr(relation->rd_indextuple, Anum_pg_index_indclass, GetPgIndexDescriptor(), &isnull); + indclassDatum = fastgetattr(relation->rd_indextuple, Anum_pg_index_indclass, + GetLSCPgIndexDescriptor(), &isnull); Assert(!isnull); indclass = (oidvector*)DatumGetPointer(indclassDatum); @@ -2660,13 +2663,14 @@ void RelationInitIndexAccessInfo(Relation relation, HeapTuple index_tuple) * opfamilies and opclass input types. (aminfo and supportinfo are left * as zeroes, and are filled on-the-fly when used) */ - IndexSupportInitialize( - indclass, relation->rd_support, relation->rd_opfamily, relation->rd_opcintype, amsupport, indnkeyatts); + IndexSupportInitialize(relation, + indclass, amsupport, indnkeyatts); /* * Similarly extract indoption and copy it to the cache entry */ - indoptionDatum = fastgetattr(relation->rd_indextuple, Anum_pg_index_indoption, GetPgIndexDescriptor(), &isnull); + indoptionDatum = fastgetattr(relation->rd_indextuple, Anum_pg_index_indoption, + GetLSCPgIndexDescriptor(), &isnull); Assert(!isnull); indoption = (int2vector*)DatumGetPointer(indoptionDatum); rc = memcpy_s(relation->rd_indoption, indnkeyatts * sizeof(int16), indoption->values, indnkeyatts * sizeof(int16)); @@ -2697,9 +2701,13 @@ void RelationInitIndexAccessInfo(Relation relation, HeapTuple index_tuple) * numbers must always match those obtainable from the system catalog entries * for the index and access method. */ -static void IndexSupportInitialize(oidvector* indclass, RegProcedure* indexSupport, Oid* opFamily, Oid* opcInType, - StrategyNumber maxSupportNumber, AttrNumber maxAttributeNumber) +static void IndexSupportInitialize(Relation relation, oidvector* indclass, StrategyNumber maxSupportNumber, + AttrNumber maxAttributeNumber) { + RegProcedure* indexSupport = relation->rd_support; + Oid* opFamily = relation->rd_opfamily; + Oid* opcInType = relation->rd_opcintype; + int attIndex; for (attIndex = 0; attIndex < maxAttributeNumber; attIndex++) { @@ -2709,7 +2717,7 @@ static void IndexSupportInitialize(oidvector* indclass, RegProcedure* indexSuppo ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("bogus pg_index tuple"))); /* look up the info for this opclass, using a cache */ - opcentry = LookupOpclassInfo(indclass->values[attIndex], maxSupportNumber); + opcentry = LookupOpclassInfo(relation, indclass->values[attIndex], maxSupportNumber); /* copy cached data into relcache entry */ opFamily[attIndex] = opcentry->opcfamily; @@ -2744,7 +2752,7 @@ static void IndexSupportInitialize(oidvector* indclass, RegProcedure* indexSuppo * be able to flush this cache as well as the contents of relcache entries * for indexes. */ -static OpClassCacheEnt* LookupOpclassInfo(Oid operatorClassOid, StrategyNumber numSupport) +static OpClassCacheEnt* LookupOpclassInfo(Relation relation, Oid operatorClassOid, StrategyNumber numSupport) { OpClassCacheEnt* opcentry = NULL; bool found = false; @@ -2757,7 +2765,6 @@ static OpClassCacheEnt* LookupOpclassInfo(Oid operatorClassOid, StrategyNumber n if (u_sess->relcache_cxt.OpClassCache == NULL) { /* First time through: initialize the opclass cache */ HASHCTL ctl; - MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(OpClassCacheEnt); @@ -2766,9 +2773,8 @@ static OpClassCacheEnt* LookupOpclassInfo(Oid operatorClassOid, StrategyNumber n u_sess->relcache_cxt.OpClassCache = hash_create("Operator class cache", 64, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); } - - opcentry = - (OpClassCacheEnt*)hash_search(u_sess->relcache_cxt.OpClassCache, (void*)&operatorClassOid, HASH_ENTER, &found); + opcentry = (OpClassCacheEnt *)hash_search( + u_sess->relcache_cxt.OpClassCache, (void *)&operatorClassOid, HASH_ENTER, &found); /* * After opcentry->supportProcs palloc failed, but opcentry has been inserted @@ -2811,7 +2817,7 @@ static OpClassCacheEnt* LookupOpclassInfo(Oid operatorClassOid, StrategyNumber n * looking up info for the opclasses used by the indexes we would like to * reference here. */ - indexOK = u_sess->relcache_cxt.criticalRelcachesBuilt || + indexOK = LocalRelCacheCriticalRelcachesBuilt() || (operatorClassOid != OID_BTREE_OPS_OID && operatorClassOid != INT2_BTREE_OPS_OID); /* @@ -2901,7 +2907,7 @@ static OpClassCacheEnt* LookupOpclassInfo(Oid operatorClassOid, StrategyNumber n * * NOTE: we assume we are already switched into u_sess->cache_mem_cxt. */ -static void formrdesc(const char* relationName, Oid relationReltype, bool isshared, bool hasoids, int natts, +extern void formrdesc(const char* relationName, Oid relationReltype, bool isshared, bool hasoids, int natts, const FormData_pg_attribute* attrs) { Relation relation; @@ -2990,7 +2996,9 @@ static void formrdesc(const char* relationName, Oid relationReltype, bool isshar */ has_not_null = false; for (i = 0; i < natts; i++) { - MemCpy(relation->rd_att->attrs[i], &attrs[i], ATTRIBUTE_FIXED_PART_SIZE); + errno_t rc = memcpy_s(relation->rd_att->attrs[i], ATTRIBUTE_FIXED_PART_SIZE, + &attrs[i], ATTRIBUTE_FIXED_PART_SIZE); + securec_check(rc, "", ""); has_not_null = has_not_null || attrs[i].attnotnull; /* make sure attcacheoff is valid */ relation->rd_att->attrs[i]->attcacheoff = -1; @@ -3048,13 +3056,14 @@ static void formrdesc(const char* relationName, Oid relationReltype, bool isshar relation->rd_rel->relhasindex = true; } + /* It's fully valid */ + relation->rd_isvalid = true; + /* * add new reldesc to relcache */ - RelationCacheInsert(relation); - - /* It's fully valid */ - relation->rd_isvalid = true; + RelationIdCacheInsertIntoLocal(relation); + Assert(relation->rd_rel->relowner == InvalidOid); } /* ---------------------------------------------------------------- @@ -3080,6 +3089,10 @@ static void formrdesc(const char* relationName, Oid relationReltype, bool isshar */ Relation RelationIdGetRelation(Oid relationId) { + Assert(CheckMyDatabaseMatch()); + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->tabdefcache.RelationIdGetRelation(relationId); + } Relation rd; /* @@ -3117,7 +3130,7 @@ Relation RelationIdGetRelation(Oid relationId) * no reldesc in the cache, so have RelationBuildDesc() build one and add * it. */ - rd = RelationBuildDesc(relationId, true); + rd = RelationBuildDesc(relationId, true, true); if (RelationIsValid(rd)) { RelationIncrementReferenceCount(rd); /* Insert TDE key to buffer cache for tde table */ @@ -3148,10 +3161,11 @@ void RelationIncrementReferenceCount(Relation rel) if (RelationIsPartition(rel) || RelationIsBucket(rel)) { return; } - ResourceOwnerEnlargeRelationRefs(t_thrd.utils_cxt.CurrentResourceOwner); + + ResourceOwnerEnlargeRelationRefs(LOCAL_SYSDB_RESOWNER); rel->rd_refcnt += 1; if (!IsBootstrapProcessingMode()) - ResourceOwnerRememberRelationRef(t_thrd.utils_cxt.CurrentResourceOwner, rel); + ResourceOwnerRememberRelationRef(LOCAL_SYSDB_RESOWNER, rel); } /* @@ -3167,7 +3181,7 @@ void RelationDecrementReferenceCount(Relation rel) Assert(rel->rd_refcnt > 0); rel->rd_refcnt -= 1; if (!IsBootstrapProcessingMode()) - ResourceOwnerForgetRelationRef(t_thrd.utils_cxt.CurrentResourceOwner, rel); + ResourceOwnerForgetRelationRef(LOCAL_SYSDB_RESOWNER, rel); } void RelationIncrementReferenceCount(Oid relationId) @@ -3178,7 +3192,7 @@ void RelationIncrementReferenceCount(Oid relationId) void RelationDecrementReferenceCount(Oid relationId) { Relation rd; - RelationIdCacheLookup(relationId, rd); + RelationIdCacheLookupOnlyLocal(relationId, rd); if (RelationIsValid(rd)) { RelationDecrementReferenceCount(rd); } @@ -3234,7 +3248,7 @@ void RelationClose(Relation relation) * the heap and index in that order. This is ensured in current usage by * only applying this to indexes being opened or having positive refcount. */ -static void RelationReloadIndexInfo(Relation relation) +void RelationReloadIndexInfo(Relation relation) { bool indexOK = false; HeapTuple pg_class_tuple; @@ -3262,9 +3276,9 @@ static void RelationReloadIndexInfo(Relation relation) ereport(DEBUG1, (errmsg("relation->rd_rel->relisshared-%d criticalRelcachesBuilt-%d", relation->rd_rel->relisshared, - u_sess->relcache_cxt.criticalRelcachesBuilt))); + LocalRelCacheCriticalRelcachesBuilt()))); - if (relation->rd_rel->relisshared && !u_sess->relcache_cxt.criticalRelcachesBuilt) { + if (relation->rd_rel->relisshared && !LocalRelCacheCriticalRelcachesBuilt()) { relation->rd_isvalid = true; return; } @@ -3277,12 +3291,14 @@ static void RelationReloadIndexInfo(Relation relation) */ indexOK = (RelationGetRelid(relation) != ClassOidIndexId); pg_class_tuple = ScanPgRelation(RelationGetRelid(relation), indexOK, false); - if (!HeapTupleIsValid(pg_class_tuple)) + if (!HeapTupleIsValid(pg_class_tuple)) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("could not find pg_class tuple for index %u", RelationGetRelid(relation)))); + } relp = (Form_pg_class)GETSTRUCT(pg_class_tuple); - MemCpy(relation->rd_rel, relp, CLASS_TUPLE_SIZE); + errno_t rc = memcpy_s(relation->rd_rel, CLASS_TUPLE_SIZE, relp, CLASS_TUPLE_SIZE); + securec_check(rc, "", ""); /* Reload reloptions in case they changed */ if (relation->rd_options) pfree_ext(relation->rd_options); @@ -3357,13 +3373,37 @@ static void RelationDestroySliceMap(Relation relation) return; } + +static void RelationDestroyIndex(Relation rel) +{ + if (rel->rd_indexcxt != NULL) { + MemoryContextDelete(rel->rd_indexcxt); + rel->rd_indexcxt = NULL; + } +} + +void RelationDestroyRule(Relation rel) +{ + if (rel->rd_rulescxt != NULL) { + MemoryContextDelete(rel->rd_rulescxt); + rel->rd_rulescxt = NULL; + } +} + +void RelationDestroyRls(Relation rel) +{ + if (rel->rd_rlsdesc != NULL) { + MemoryContextDelete(rel->rd_rlsdesc->rlsCxt); + rel->rd_rlsdesc = NULL; + } +} /* * RelationDestroyRelation * * Physically delete a relation cache entry and all subsidiary data. * Caller must already have unhooked the entry from the hash table. */ -static void RelationDestroyRelation(Relation relation, bool remember_tupdesc) +void RelationDestroyRelation(Relation relation, bool remember_tupdesc) { Assert(RelationHasReferenceCountZero(relation)); @@ -3378,8 +3418,7 @@ static void RelationDestroyRelation(Relation relation, bool remember_tupdesc) * Free all the subsidiary data structures of the relcache entry, then the * entry itself. */ - if (relation->rd_rel) - pfree_ext(relation->rd_rel); + pfree_ext(relation->rd_rel); /* can't use DecrTupleDescRefCount here */ Assert(relation->rd_att->tdrefcount > 0); if (--relation->rd_att->tdrefcount == 0) { @@ -3399,29 +3438,18 @@ static void RelationDestroyRelation(Relation relation, bool remember_tupdesc) } list_free_ext(relation->rd_indexlist); bms_free_ext(relation->rd_indexattr); - bms_free_ext(relation->rd_pkattr); bms_free_ext(relation->rd_keyattr); + bms_free_ext(relation->rd_pkattr); bms_free_ext(relation->rd_idattr); - if (relation->rd_options) { - pfree_ext(relation->rd_pubactions); - } + pfree_ext(relation->rd_pubactions); FreeTriggerDesc(relation->trigdesc); - if (relation->rd_rlsdesc) { - MemoryContextDelete(relation->rd_rlsdesc->rlsCxt); - relation->rd_rlsdesc = NULL; - } - if (relation->rd_options) - pfree_ext(relation->rd_options); - if (relation->rd_indextuple) - pfree_ext(relation->rd_indextuple); - if (relation->rd_am) - pfree_ext(relation->rd_am); - if (relation->rd_indexcxt) - MemoryContextDelete(relation->rd_indexcxt); - if (relation->rd_rulescxt) - MemoryContextDelete(relation->rd_rulescxt); - if (relation->rd_fdwroutine) - pfree_ext(relation->rd_fdwroutine); + RelationDestroyRls(relation); + pfree_ext(relation->rd_options); + pfree_ext(relation->rd_indextuple); + pfree_ext(relation->rd_am); + RelationDestroyIndex(relation); + RelationDestroyRule(relation); + pfree_ext(relation->rd_fdwroutine); if (relation->partMap) { RelationDestroyPartitionMap(relation->partMap); } @@ -3461,7 +3489,7 @@ static void RelationDestroyRelation(Relation relation, bool remember_tupdesc) * to match the relation's refcnt status, but we keep it as a crosscheck * that we're doing what the caller expects. */ -static void RelationClearRelation(Relation relation, bool rebuild) +void RelationClearRelation(Relation relation, bool rebuild) { /* * As per notes above, a rel to be rebuilt MUST have refcnt > 0; while of @@ -3546,7 +3574,7 @@ static void RelationClearRelation(Relation relation, bool rebuild) */ if (!rebuild) { /* Remove it from the hash table */ - RelationCacheDelete(relation); + RelationCacheDeleteLocal(relation); /* And release storage */ RelationDestroyRelation(relation, false); @@ -3580,14 +3608,20 @@ static void RelationClearRelation(Relation relation, bool rebuild) * is good because whatever ref counts the entry may have do not * necessarily belong to that resource owner. */ - Relation newrel; + Relation newrel = NULL; Oid save_relid = RelationGetRelid(relation); bool keep_tupdesc = false; bool keep_rules = false; bool buildkey = !REALTION_BUCKETKEY_INITED(relation); /* Build temporary entry, but don't link it into hashtable */ - newrel = RelationBuildDesc(save_relid, false, buildkey); + if (EnableLocalSysCache()) { + // call build means local doesnt contain relation or it it invalid, so search from global directly + newrel = t_thrd.lsc_cxt.lsc->tabdefcache.SearchRelationFromGlobalCopy(save_relid); + } + if (!RelationIsValid(newrel)) { + newrel = RelationBuildDesc(save_relid, false, buildkey); + } if (newrel == NULL) { /* * We can validly get here, if we're using a historic snapshot in @@ -3638,9 +3672,12 @@ static void RelationClearRelation(Relation relation, bool rebuild) { RelationData tmpstruct; - MemCpy(&tmpstruct, newrel, sizeof(RelationData)); - MemCpy(newrel, relation, sizeof(RelationData)); - MemCpy(relation, &tmpstruct, sizeof(RelationData)); + errno_t rc = memcpy_s(&tmpstruct, sizeof(RelationData), newrel, sizeof(RelationData)); + securec_check(rc, "", ""); + rc = memcpy_s(newrel, sizeof(RelationData), relation, sizeof(RelationData)); + securec_check(rc, "", ""); + rc = memcpy_s(relation, sizeof(RelationData), &tmpstruct, sizeof(RelationData)); + securec_check(rc, "", ""); } /* rd_smgr must not be swapped, due to back-links from smgr level */ @@ -3655,8 +3692,8 @@ static void RelationClearRelation(Relation relation, bool rebuild) /* un-swap rd_rel pointers, swap contents instead */ SWAPFIELD(Form_pg_class, rd_rel); /* ... but actually, we don't have to update newrel->rd_rel */ - MemCpy(relation->rd_rel, newrel->rd_rel, CLASS_TUPLE_SIZE); - + errno_t rc = memcpy_s(relation->rd_rel, CLASS_TUPLE_SIZE, newrel->rd_rel, CLASS_TUPLE_SIZE); + securec_check(rc, "", ""); if (newrel->partMap) { RebuildPartitonMap(newrel->partMap, relation->partMap); SWAPFIELD(PartitionMap*, partMap); @@ -3681,14 +3718,12 @@ static void RelationClearRelation(Relation relation, bool rebuild) /* pgstat_info must be preserved */ SWAPFIELD(struct PgStat_TableStatus*, pgstat_info); - /* mlog OID override must be preserved */ - SWAPFIELD(Oid, rd_mlogoid); - /* newcbi flag and its related information must be preserved */ if (newrel->newcbi) { SWAPFIELD(bool, newcbi); relation->rd_node.bucketNode = newrel->rd_node.bucketNode; } + SWAPFIELD(LocalRelationEntry*, entry); #undef SWAPFIELD /* And now we can throw away the temporary entry */ @@ -3737,7 +3772,7 @@ void RelationForgetRelation(Oid rid) { Relation relation; - RelationIdCacheLookup(rid, relation); + RelationIdCacheLookupOnlyLocal(rid, relation); if (!PointerIsValid(relation)) return; /* not in cache, nothing to do */ @@ -3766,12 +3801,16 @@ void RelationForgetRelation(Oid rid) */ void RelationCacheInvalidateEntry(Oid relationId) { + if (unlikely(relationId == InvalidOid)) { + RelationCacheInvalidate(); + return; + } Relation relation; - RelationIdCacheLookup(relationId, relation); + RelationIdCacheLookupOnlyLocal(relationId, relation); if (PointerIsValid(relation)) { - u_sess->relcache_cxt.relcacheInvalsReceived++; + AddLocalRelCacheInvalsReceived(1); RelationFlushRelation(relation); } } @@ -3806,8 +3845,13 @@ void RelationCacheInvalidateEntry(Oid relationId) * items. This should ensure that system catalogs are up to date before * we attempt to use them to reload information about other open relations. */ + void RelationCacheInvalidate(void) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.InvalidateRelationAll(); + return; + } HASH_SEQ_STATUS status; RelIdCacheEnt* idhentry = NULL; Relation relation; @@ -3896,8 +3940,13 @@ void RelationCacheInvalidate(void) * * Need to invalidate bucket_ptr after modify node group. */ + void RelationCacheInvalidateBuckets(void) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.InvalidateRelationBucketsAll(); + return; + } HASH_SEQ_STATUS status; RelIdCacheEnt* idhentry = NULL; Relation relation; @@ -3912,6 +3961,26 @@ void RelationCacheInvalidateBuckets(void) } } +void InvalidateRelationNodeList() +{ + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.InvalidateRelationNodeList(); + return; + } + HASH_SEQ_STATUS status; + RelIdCacheEnt* idhentry = NULL; + Relation relation; + + hash_seq_init(&status, u_sess->relcache_cxt.RelationIdCache); + + while ((idhentry = (RelIdCacheEnt*)hash_seq_search(&status)) != NULL) { + relation = idhentry->reldesc; + if (relation->rd_locator_info != NULL) { + RelationClearRelation(relation, !RelationHasReferenceCountZero(relation)); + } + } +} + /* * RelationCloseSmgrByOid - close a relcache entry's smgr link * @@ -3922,7 +3991,7 @@ void RelationCloseSmgrByOid(Oid relationId) { Relation relation; - RelationIdCacheLookup(relationId, relation); + RelationIdCacheLookupOnlyLocal(relationId, relation); if (!PointerIsValid(relation)) return; /* not in cache, nothing to do */ @@ -3963,14 +4032,52 @@ TransactionId RelationGetRelFrozenxid64(Relation r) return relfrozenxid64; } +TransactionId PartGetRelFrozenxid64(Partition part) +{ + Relation partRel; + HeapTuple partTuple; + Datum datum; + bool isNull; + TransactionId relfrozenxid64; + + partTuple = SearchSysCache1(PARTRELID, ObjectIdGetDatum(part->pd_id)); + if (!HeapTupleIsValid(partTuple)) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_TABLE), + errmsg("cache lookup failed for relation %u", part->pd_id))); + } + + partRel = heap_open(PartitionRelationId, AccessShareLock); + + datum = heap_getattr(partTuple, Anum_pg_partition_relfrozenxid64, RelationGetDescr(partRel), &isNull); + if (isNull) { + relfrozenxid64 = part->pd_part->relfrozenxid; + if (TransactionIdPrecedes(t_thrd.xact_cxt.ShmemVariableCache->nextXid, relfrozenxid64) || + !TransactionIdIsNormal(relfrozenxid64)) + relfrozenxid64 = FirstNormalTransactionId; + } else { + relfrozenxid64 = DatumGetTransactionId(datum); + } + + heap_close(partRel, AccessShareLock); + ReleaseSysCache(partTuple); + + return relfrozenxid64; +} + Oid RelationGetBucketOid(Relation relation) { return relation->rd_bucketoid; } /* Remember old tupleDescs when processing invalid messages */ + void RememberToFreeTupleDescAtEOX(TupleDesc td) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.RememberToFreeTupleDescAtEOX(td); + return; + } if (u_sess->relcache_cxt.EOXactTupleDescArray == NULL) { MemoryContext oldcxt = NULL; oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); @@ -3995,6 +4102,10 @@ void RememberToFreeTupleDescAtEOX(TupleDesc td) /* Free all tupleDescs remembered in RememberToFreeTupleDescAtEOX in a batch when a transaction ends */ void AtEOXact_FreeTupleDesc() { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.AtEOXact_FreeTupleDesc(); + return; + } if (u_sess->relcache_cxt.EOXactTupleDescArrayLen > 0) { Assert(u_sess->relcache_cxt.EOXactTupleDescArray != NULL); int i; @@ -4026,6 +4137,10 @@ void AtEOXact_FreeTupleDesc() */ void AtEOXact_RelationCache(bool isCommit) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.AtEOXact_RelationCache(isCommit); + return; + } HASH_SEQ_STATUS status; RelIdCacheEnt* idhentry = NULL; @@ -4124,6 +4239,11 @@ void AtEOXact_RelationCache(bool isCommit) relation->rd_pkindex = InvalidOid; relation->rd_indexvalid = 0; } + if (relation->partMap != NULL && relation->partMap->isDirty) { + RelationClearRelation(relation, false); + hash_seq_term(&status); + hash_seq_init(&status, u_sess->relcache_cxt.RelationIdCache); + } } /* Once done with the transaction, we can reset u_sess->relcache_cxt.need_eoxact_work */ @@ -4139,6 +4259,10 @@ void AtEOXact_RelationCache(bool isCommit) */ void AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid, SubTransactionId parentSubid) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.AtEOSubXact_RelationCache(isCommit, mySubid, parentSubid); + return; + } HASH_SEQ_STATUS status; RelIdCacheEnt* idhentry = NULL; @@ -4211,9 +4335,8 @@ void AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid, SubTrans * and enter it into the relcache. */ Relation RelationBuildLocalRelation(const char* relname, Oid relnamespace, TupleDesc tupDesc, Oid relid, - Oid relfilenode, Oid reltablespace, bool shared_relation, bool mapped_relation, char relpersistence, - char relkind, int8 row_compress, Datum reloptions, TableAmType tam_type, int8 relindexsplit, - StorageType storage_type, Oid accessMethodObjectId) + Oid relfilenode, Oid reltablespace, bool shared_relation, bool mapped_relation, char relpersistence, char relkind, + int8 row_compress, TableAmType tam_type, int8 relindexsplit, StorageType storage_type) { Relation rel; MemoryContext oldcxt; @@ -4263,7 +4386,7 @@ Relation RelationBuildLocalRelation(const char* relname, Oid relnamespace, Tuple /* * switch to the cache context to create the relcache entry. */ - oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); /* * allocate a new relation descriptor and fill in basic state fields. @@ -4286,7 +4409,7 @@ Relation RelationBuildLocalRelation(const char* relname, Oid relnamespace, Tuple rel->rd_newRelfilenodeSubid = InvalidSubTransactionId; /* must flag that we have rels created in this transaction */ - u_sess->relcache_cxt.need_eoxact_work = true; + SetLocalRelCacheNeedEOXactWork(true); /* * create a new tuple descriptor from the one passed in. We do this @@ -4329,7 +4452,6 @@ Relation RelationBuildLocalRelation(const char* relname, Oid relnamespace, Tuple rel->rd_rel->relowner = BOOTSTRAP_SUPERUSERID; rel->rd_rel->parttype = PARTTYPE_NON_PARTITIONED_RELATION; rel->rd_rel->relrowmovement = false; - rel->rd_rel->relam = accessMethodObjectId; /* set up persistence and relcache fields dependent on it */ rel->rd_rel->relpersistence = relpersistence; @@ -4386,31 +4508,26 @@ Relation RelationBuildLocalRelation(const char* relname, Oid relnamespace, Tuple RelationInitPhysicalAddr(rel); - /* compressed option was set by RelationInitPhysicalAddr if rel->rd_options != NULL */ - if (rel->rd_options == NULL && reloptions && SUPPORT_COMPRESSED(relkind, rel->rd_rel->relam)) { - StdRdOptions *options = (StdRdOptions *) default_reloptions(reloptions, false, RELOPT_KIND_HEAP); - SetupPageCompressForRelation(&rel->rd_node, &options->compress, RelationGetRelationName(rel)); - } - - /* materialized view not initially scannable */ if (relkind == RELKIND_MATVIEW) rel->rd_isscannable = false; else rel->rd_isscannable = true; + /* It's fully valid */ + rel->rd_isvalid = true; + /* * Okay to insert into the relcache hash tables. */ - RelationCacheInsert(rel); + RelationIdCacheInsertIntoLocal(rel); /* * done building relcache entry. */ (void)MemoryContextSwitchTo(oldcxt); - /* It's fully valid */ - rel->rd_isvalid = true; + /* * Caller expects us to pin the returned entry. @@ -4652,7 +4769,7 @@ void RelationSetNewRelfilenode(Relation relation, TransactionId freezeXid, Multi */ relation->rd_newRelfilenodeSubid = GetCurrentSubTransactionId(); /* ... and now we have eoxact cleanup work to do */ - u_sess->relcache_cxt.need_eoxact_work = true; + SetLocalRelCacheNeedEOXactWork(true); } RelFileNodeBackend CreateNewRelfilenode(Relation relation, TransactionId freezeXid) @@ -4680,6 +4797,34 @@ RelFileNodeBackend CreateNewRelfilenode(Relation relation, TransactionId freezeX return newrnode; } +RelFileNodeBackend CreateNewRelfilenodePart(Relation parent, Partition part) +{ + Oid newrelfilenode; + RelFileNodeBackend newrnode; + + /* Allocate a new relfilenode */ + /* + * Create storage for the main fork of the new relfilenode. + * + * NOTE: any conflict in relfilenode value will be caught here, if + * GetNewRelFileNode messes up for any reason. + */ + + newrelfilenode = GetNewRelFileNode(part->pd_part->reltablespace, NULL, parent->rd_rel->relpersistence); + + newrnode.node = part->pd_node; + newrnode.node.relNode = newrelfilenode; + newrnode.backend = parent->rd_backend; + if (RelationIsCrossBucketIndex(parent)) { + part->newcbi = true; + } + + partition_create_new_storage(parent, part, newrnode); + + return newrnode; +} + + void UpdatePgclass(Relation relation, TransactionId freezeXid, const RelFileNodeBackend *rnode) { Relation pg_class; @@ -4742,6 +4887,74 @@ void UpdatePgclass(Relation relation, TransactionId freezeXid, const RelFileNode return; } +void UpdatePartition(Relation parent, Partition part, TransactionId freezeXid, const RelFileNodeBackend *newrnode) +{ + Relation pg_partition; + HeapTuple tuple; + HeapTuple ntup; + Form_pg_partition partform; + Datum values[Natts_pg_partition]; + bool nulls[Natts_pg_partition]; + bool replaces[Natts_pg_partition]; + errno_t rc; + + /* + * Get a writable copy of the pg_partition tuple for the given relation. + */ + pg_partition = heap_open(PartitionRelationId, RowExclusiveLock); + + tuple = SearchSysCacheCopy1(PARTRELID, ObjectIdGetDatum(PartitionGetPartid(part))); + + if (!HeapTupleIsValid(tuple)) { + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("could not find tuple for partition %u", PartitionGetPartid(part)))); + } + partform = (Form_pg_partition)GETSTRUCT(tuple); + + ereport(LOG, + (errmsg("Partition %s(%u) set newfilenode %u oldfilenode %u xid %lu", + PartitionGetPartitionName(part), + PartitionGetPartid(part), + newrnode->node.relNode, + part->pd_node.relNode, + GetCurrentTransactionIdIfAny()))); + + + Assert(!((part)->pd_part->relfilenode == InvalidOid)); + partform->relfilenode = newrnode->node.relNode; + + Assert(!RELKIND_IS_SEQUENCE(parent->rd_rel->relkind)); + partform->relpages = 0; /* it's empty until further notice */ + partform->reltuples = 0; + partform->relallvisible = 0; + + /* set relfrozenxid64 */ + partform->relfrozenxid = (ShortTransactionId)InvalidTransactionId; + + rc = memset_s(values, sizeof(values), 0, sizeof(values)); + securec_check(rc, "\0", "\0"); + rc = memset_s(nulls, sizeof(nulls), false, sizeof(nulls)); + securec_check(rc, "\0", "\0"); + rc = memset_s(replaces, sizeof(replaces), false, sizeof(replaces)); + securec_check(rc, "\0", "\0"); + + replaces[Anum_pg_partition_relfrozenxid64 - 1] = true; + values[Anum_pg_partition_relfrozenxid64 - 1] = TransactionIdGetDatum(freezeXid); + + ntup = heap_modify_tuple(tuple, RelationGetDescr(pg_partition), values, nulls, replaces); + + simple_heap_update(pg_partition, &ntup->t_self, ntup); + CatalogUpdateIndexes(pg_partition, ntup); + + heap_freetuple_ext(ntup); + heap_freetuple_ext(tuple); + + heap_close(pg_partition, RowExclusiveLock); + + return; +} + /* * RelationCacheInitialize * @@ -4755,9 +4968,12 @@ void UpdatePgclass(Relation relation, TransactionId freezeXid, const RelFileNode */ #define INITRELCACHESIZE 400 - void RelationCacheInitialize(void) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.Init(); + return; + } HASHCTL ctl; /* @@ -4788,8 +5004,13 @@ void RelationCacheInitialize(void) * entries for the catalogs themselves. RelationCacheInitializePhase3 * will clean up as needed. */ + void RelationCacheInitializePhase2(void) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.InitPhase2(); + return; + } MemoryContext oldcxt; /* @@ -4845,8 +5066,7 @@ void RelationCacheInvalidOid(Relation relation) * Use need to use hardcoded info in schemapg.h to fix it. */ natts = relation->rd_rel->relnatts; - errno_t rc = EOK; - MemCpy((char*)relation->rd_rel, (char*)relp, CLASS_TUPLE_SIZE); + errno_t rc = memcpy_s((char*)relation->rd_rel, CLASS_TUPLE_SIZE, (char*)relp, CLASS_TUPLE_SIZE); securec_check(rc, "\0", "\0"); relation->rd_rel->relnatts = natts; @@ -4882,8 +5102,13 @@ void RelationCacheInvalidOid(Relation relation) * open any system catalog or use any catcache. The last step is to * rewrite the cache files if needed. */ + void RelationCacheInitializePhase3(void) { + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.InitPhase3(); + return; + } HASH_SEQ_STATUS status; RelIdCacheEnt* idhentry = NULL; MemoryContext oldcxt; @@ -5127,7 +5352,7 @@ retry: * indexoid is the OID of the target index, heapoid is the OID of the catalog * it belongs to. */ -static void load_critical_index(Oid indexoid, Oid heapoid) +extern Relation load_critical_index(Oid indexoid, Oid heapoid) { Relation ird; int curRetryCnt = 0; @@ -5171,6 +5396,7 @@ retry_if_standby_mode: ird->rd_refcnt = 1; UnlockRelationOid(indexoid, AccessShareLock); UnlockRelationOid(heapoid, AccessShareLock); + return ird; } /* * GetPgClassDescriptor -- get a predefined tuple descriptor for pg_class @@ -5187,17 +5413,14 @@ retry_if_standby_mode: TupleDesc BuildHardcodedDescriptor(int natts, const FormData_pg_attribute* attrs, bool hasoids) { TupleDesc result; - MemoryContext oldcxt; int i; - - oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); - result = CreateTemplateTupleDesc(natts, hasoids, TAM_HEAP); result->tdtypeid = RECORDOID; /* not right, but we don't care */ result->tdtypmod = -1; for (i = 0; i < natts; i++) { - MemCpy(result->attrs[i], &attrs[i], ATTRIBUTE_FIXED_PART_SIZE); + errno_t rc = memcpy_s(result->attrs[i], ATTRIBUTE_FIXED_PART_SIZE, &attrs[i], ATTRIBUTE_FIXED_PART_SIZE); + securec_check(rc, "", ""); /* make sure attcacheoff is valid */ result->attrs[i]->attcacheoff = -1; } @@ -5207,16 +5430,17 @@ TupleDesc BuildHardcodedDescriptor(int natts, const FormData_pg_attribute* attrs /* Note: we don't bother to set up a TupleConstr entry */ - (void)MemoryContextSwitchTo(oldcxt); - return result; } -static TupleDesc GetPgClassDescriptor(void) +extern TupleDesc GetPgClassDescriptor(void) { /* Already done? */ - if (u_sess->relcache_cxt.pgclassdesc == NULL) + if (u_sess->relcache_cxt.pgclassdesc == NULL) { + MemoryContext oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); u_sess->relcache_cxt.pgclassdesc = BuildHardcodedDescriptor(Natts_pg_class, Desc_pg_class, true); + (void)MemoryContextSwitchTo(oldcxt); + } return u_sess->relcache_cxt.pgclassdesc; } @@ -5229,14 +5453,17 @@ static TupleDesc GetPgClassDescriptor(void) */ TupleDesc GetDefaultPgClassDesc(void) { - return GetPgClassDescriptor(); + return GetLSCPgClassDescriptor(); } -static TupleDesc GetPgIndexDescriptor(void) +TupleDesc GetPgIndexDescriptor(void) { /* Already done? */ - if (u_sess->relcache_cxt.pgindexdesc == NULL) + if (u_sess->relcache_cxt.pgindexdesc == NULL) { + MemoryContext oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); u_sess->relcache_cxt.pgindexdesc = BuildHardcodedDescriptor(Natts_pg_index, Desc_pg_index, false); + (void)MemoryContextSwitchTo(oldcxt); + } return u_sess->relcache_cxt.pgindexdesc; } @@ -5249,7 +5476,7 @@ static TupleDesc GetPgIndexDescriptor(void) */ TupleDesc GetDefaultPgIndexDesc(void) { - return GetPgIndexDescriptor(); + return GetLSCPgIndexDescriptor(); } /* @@ -5315,7 +5542,7 @@ static void AttrDefaultFetch(Relation relation) ereport(WARNING, (errmsg("null adbin for attr %s of rel %s", NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname), RelationGetRelationName(relation)))); else - attrdef[i].adbin = MemoryContextStrdup(u_sess->cache_mem_cxt, TextDatumGetCString(val)); + attrdef[i].adbin = MemoryContextStrdup(LocalMyDBCacheMemCxt(), TextDatumGetCString(val)); break; } @@ -5371,7 +5598,7 @@ static void CheckConstraintFetch(Relation relation) check[found].ccvalid = conform->convalidated; check[found].ccnoinherit = conform->connoinherit; - check[found].ccname = MemoryContextStrdup(u_sess->cache_mem_cxt, NameStr(conform->conname)); + check[found].ccname = MemoryContextStrdup(LocalMyDBCacheMemCxt(), NameStr(conform->conname)); /* Grab and test conbin is actually set */ val = fastgetattr(htup, Anum_pg_constraint_conbin, conrel->rd_att, &isnull); @@ -5380,7 +5607,7 @@ static void CheckConstraintFetch(Relation relation) (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("null conbin for rel %s", RelationGetRelationName(relation)))); - check[found].ccbin = MemoryContextStrdup(u_sess->cache_mem_cxt, TextDatumGetCString(val)); + check[found].ccbin = MemoryContextStrdup(LocalMyDBCacheMemCxt(), TextDatumGetCString(val)); found++; } @@ -5396,7 +5623,7 @@ static void CheckConstraintFetch(Relation relation) void SaveCopyList(Relation relation, List* result, int oidIndex) { - MemoryContext oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + MemoryContext oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); if (relation->rd_indexlist) { list_free_ext(relation->rd_indexlist); } @@ -5564,7 +5791,7 @@ List* RelationGetIndexList(Relation relation, bool inc_unused) * because it comes after the variable-width indkey field. Must * extract the datum the hard way... */ - indclassDatum = heap_getattr(htup, Anum_pg_index_indclass, GetPgIndexDescriptor(), &isnull); + indclassDatum = heap_getattr(htup, Anum_pg_index_indclass, GetLSCPgIndexDescriptor(), &isnull); Assert(!isnull); indclass = (oidvector*)DatumGetPointer(indclassDatum); /* @@ -5730,7 +5957,7 @@ int RelationGetIndexNum(Relation relation) * because it comes after the variable-width indkey field. Must * extract the datum the hard way... */ - indclassDatum = heap_getattr(htup, Anum_pg_index_indclass, GetPgIndexDescriptor(), &isnull); + indclassDatum = heap_getattr(htup, Anum_pg_index_indclass, GetLSCPgIndexDescriptor(), &isnull); Assert(!isnull); indclass = (oidvector*)DatumGetPointer(indclassDatum); @@ -5808,7 +6035,7 @@ void RelationSetIndexList(Relation relation, List* indexIds, Oid oidIndex) Assert(relation->rd_isnailed); /* Copy the list into the cache context (could fail for lack of mem) */ - oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); indexIds = list_copy(indexIds); (void)MemoryContextSwitchTo(oldcxt); /* Okay to replace old list */ @@ -5822,7 +6049,7 @@ void RelationSetIndexList(Relation relation, List* indexIds, Oid oidIndex) relation->rd_pkindex = InvalidOid; relation->rd_indexvalid = 2; /* mark list as forced */ /* must flag that we have a forced index list */ - u_sess->relcache_cxt.need_eoxact_work = true; + SetLocalRelCacheNeedEOXactWork(true); } /* @@ -5947,7 +6174,8 @@ List* RelationGetIndexExpressions(Relation relation) * successfully completing the work, we copy it into the relcache entry. * This avoids problems if we get some sort of error partway through. */ - exprsDatum = heap_getattr(relation->rd_indextuple, Anum_pg_index_indexprs, GetPgIndexDescriptor(), &isnull); + exprsDatum = heap_getattr(relation->rd_indextuple, Anum_pg_index_indexprs, + GetLSCPgIndexDescriptor(), &isnull); Assert(!isnull); exprsString = TextDatumGetCString(exprsDatum); result = (List*)stringToNode(exprsString); @@ -5994,7 +6222,8 @@ List* RelationGetDummyIndexExpressions(Relation relation) } /* Extract raw node tree(s) from index tuple. */ - exprsDatum = heap_getattr(relation->rd_indextuple, Anum_pg_index_indexprs, GetPgIndexDescriptor(), &isnull); + exprsDatum = heap_getattr(relation->rd_indextuple, Anum_pg_index_indexprs, + GetLSCPgIndexDescriptor(), &isnull); Assert(!isnull); exprsString = TextDatumGetCString(exprsDatum); rawExprs = (List*)stringToNode(exprsString); @@ -6042,7 +6271,7 @@ List* RelationGetIndexPredicate(Relation relation) * successfully completing the work, we copy it into the relcache entry. * This avoids problems if we get some sort of error partway through. */ - predDatum = heap_getattr(relation->rd_indextuple, Anum_pg_index_indpred, GetPgIndexDescriptor(), &isnull); + predDatum = heap_getattr(relation->rd_indextuple, Anum_pg_index_indpred, GetLSCPgIndexDescriptor(), &isnull); Assert(!isnull); predString = TextDatumGetCString(predDatum); result = (List*)stringToNode(predString); @@ -6116,7 +6345,7 @@ static void ClusterConstraintFetch(__inout Relation relation) int numkeys = ARR_DIMS(arr)[0]; errno_t rc = EOK; - *pClusterKeys = (AttrNumber*)MemoryContextAllocZero(u_sess->cache_mem_cxt, numkeys * sizeof(AttrNumber)); + *pClusterKeys = (AttrNumber*)MemoryContextAllocZero(LocalMyDBCacheMemCxt(), numkeys * sizeof(AttrNumber)); rc = memcpy_s(*pClusterKeys, numkeys * sizeof(int16), ARR_DATA_PTR(arr), numkeys * sizeof(int16)); securec_check(rc, "\0", "\0"); @@ -6217,16 +6446,16 @@ Bitmapset* RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind att * won't be returned at all by RelationGetIndexList. */ indexattrs = NULL; - pkindexattrs = NULL; uindexattrs = NULL; + pkindexattrs = NULL; idindexattrs = NULL; foreach (l, indexoidlist) { Oid indexOid = lfirst_oid(l); Relation indexDesc; IndexInfo* indexInfo = NULL; int i; - bool isPK; /* primary key */ bool isKey = false; /* candidate key */ + bool isPK; /* primary key */ bool isIDKey = false; /* replica identity index */ indexDesc = index_open(indexOid, AccessShareLock); @@ -6234,10 +6463,10 @@ Bitmapset* RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind att /* Extract index key information from the index's pg_index row */ indexInfo = BuildIndexInfo(indexDesc); - /* Is this a primary key? */ - isPK = (indexOid == relpkindex); /* Can this index be referenced by a foreign key? */ isKey = indexInfo->ii_Unique && indexInfo->ii_Expressions == NIL && indexInfo->ii_Predicate == NIL; + /* Is this a primary key? */ + isPK = (indexOid == relpkindex); /* Is this index the configured (or default) replica identity? */ isIDKey = (indexOid == relreplindex); @@ -6294,9 +6523,9 @@ Bitmapset* RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind att * leave the relcache entry looking like the other ones are valid but * empty. */ - oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); - relation->rd_pkattr = bms_copy(pkindexattrs); + oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); relation->rd_keyattr = bms_copy(uindexattrs); + relation->rd_pkattr = bms_copy(pkindexattrs); relation->rd_idattr = bms_copy(idindexattrs); relation->rd_indexattr = bms_copy(indexattrs); (void)MemoryContextSwitchTo(oldcxt); @@ -6365,7 +6594,7 @@ Bitmapset* IndexGetAttrBitmap(Relation relation, IndexInfo *indexInfo) pull_varattnos((Node*)indexInfo->ii_Predicate, 1, &indexattrs); /* Now save copies of the bitmaps in the relcache entry */ - oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); relation->rd_indexattr = bms_copy(indexattrs); (void)MemoryContextSwitchTo(oldcxt); @@ -6404,9 +6633,12 @@ void RelationGetExclusionInfo(Relation indexRelation, Oid** operators, Oid** pro /* Quick exit if we have the data cached already */ if (indexRelation->rd_exclstrats != NULL) { - MemCpy(ops, indexRelation->rd_exclops, sizeof(Oid) * indnkeyatts); - MemCpy(funcs, indexRelation->rd_exclprocs, sizeof(Oid) * indnkeyatts); - MemCpy(strats, indexRelation->rd_exclstrats, sizeof(uint16) * indnkeyatts); + errno_t rc = memcpy_s(ops, sizeof(Oid) * indnkeyatts, indexRelation->rd_exclops, sizeof(Oid) * indnkeyatts); + securec_check(rc, "", ""); + rc = memcpy_s(funcs, sizeof(Oid) * indnkeyatts, indexRelation->rd_exclprocs, sizeof(Oid) * indnkeyatts); + securec_check(rc, "", ""); + rc = memcpy_s(strats, sizeof(uint16) * indnkeyatts, indexRelation->rd_exclstrats, sizeof(uint16) * indnkeyatts); + securec_check(rc, "", ""); return; } @@ -6484,12 +6716,17 @@ void RelationGetExclusionInfo(Relation indexRelation, Oid** operators, Oid** pro /* Save a copy of the results in the relcache entry. */ oldcxt = MemoryContextSwitchTo(indexRelation->rd_indexcxt); + indexRelation->rd_exclops = (Oid*)palloc(sizeof(Oid) * indnkeyatts); indexRelation->rd_exclprocs = (Oid*)palloc(sizeof(Oid) * indnkeyatts); indexRelation->rd_exclstrats = (uint16*)palloc(sizeof(uint16) * indnkeyatts); - MemCpy(indexRelation->rd_exclops, ops, sizeof(Oid) * indnkeyatts); - MemCpy(indexRelation->rd_exclprocs, funcs, sizeof(Oid) * indnkeyatts); - MemCpy(indexRelation->rd_exclstrats, strats, sizeof(uint16) * indnkeyatts); + + errno_t rc = memcpy_s(indexRelation->rd_exclops, sizeof(Oid) * indnkeyatts, ops, sizeof(Oid) * indnkeyatts); + securec_check(rc, "", ""); + rc = memcpy_s(indexRelation->rd_exclprocs, sizeof(Oid) * indnkeyatts, funcs, sizeof(Oid) * indnkeyatts); + securec_check(rc, "", ""); + rc = memcpy_s(indexRelation->rd_exclstrats, sizeof(uint16) * indnkeyatts, strats, sizeof(uint16) * indnkeyatts); + securec_check(rc, "", ""); (void)MemoryContextSwitchTo(oldcxt); } @@ -6504,6 +6741,14 @@ struct PublicationActions* GetRelationPublicationActions(Relation relation) int rc; PublicationActions* pubactions = (PublicationActions*)palloc0(sizeof(PublicationActions)); + /* + * If not publishable, it publishes no actions. (pgoutput_change() will + * ignore it.) + */ + if (!is_publishable_relation(relation)) { + return pubactions; + } + if (relation->rd_pubactions) { errno_t rcs = memcpy_s(pubactions, sizeof(PublicationActions), relation->rd_pubactions, sizeof(PublicationActions)); @@ -6548,8 +6793,8 @@ struct PublicationActions* GetRelationPublicationActions(Relation relation) } /* Now save copy of the actions in the relcache entry. */ - oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); - relation->rd_pubactions = (PublicationActions*)palloc(sizeof(PublicationActions)); + oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); + relation->rd_pubactions = palloc(sizeof(PublicationActions)); rc = memcpy_s(relation->rd_pubactions, sizeof(PublicationActions), pubactions, sizeof(PublicationActions)); securec_check(rc, "", ""); MemoryContextSwitchTo(oldcxt); @@ -6931,8 +7176,8 @@ static bool load_relcache_init_file(bool shared) rel->rd_oidindex = InvalidOid; rel->rd_pkindex = InvalidOid; rel->rd_indexattr = NULL; - rel->rd_pkattr = NULL; rel->rd_keyattr = NULL; + rel->rd_pkattr = NULL; rel->rd_idattr = NULL; rel->rd_pubactions = NULL; rel->rd_createSubid = InvalidSubTransactionId; @@ -6973,7 +7218,7 @@ static bool load_relcache_init_file(bool shared) * Now insert all the new relcache entries into the cache. */ for (relno = 0; relno < num_rels; relno++) { - RelationCacheInsert(rels[relno]); + RelationIdCacheInsertIntoLocal(rels[relno]); /* also make a list of their OIDs, for RelationIdIsInInitFile */ if (!shared) u_sess->relcache_cxt.initFileRelationIds = @@ -7216,7 +7461,7 @@ static void write_item(const void* data, Size len, FILE* fp) */ bool RelationIdIsInInitFile(Oid relationId) { - return list_member_oid(u_sess->relcache_cxt.initFileRelationIds, relationId); + return list_member_oid(LocalRelCacheInitFileRelationIds(), relationId); } /* @@ -7460,7 +7705,7 @@ List* PartitionGetPartIndexList(Partition part, bool inc_unused) * because it comes after the variable-width indkey field. Must * extract the datum the hard way... */ - indclassDatum = heap_getattr(indexTup, Anum_pg_index_indclass, GetPgIndexDescriptor(), &isnull); + indclassDatum = heap_getattr(indexTup, Anum_pg_index_indclass, GetLSCPgIndexDescriptor(), &isnull); Assert(!isnull); indclass = (oidvector*)DatumGetPointer(indclassDatum); @@ -7485,7 +7730,7 @@ List* PartitionGetPartIndexList(Partition part, bool inc_unused) heap_close(partrel, AccessShareLock); /* Now save a copy of the completed list in the relcache entry. */ - oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); if (part->pd_indexlist) { list_free_ext(part->pd_indexlist); } @@ -7761,6 +8006,15 @@ Relation tuple_get_rel(HeapTuple pg_class_tuple, LOCKMODE lockmode, TupleDesc tu if (lockmode != NoLock) { LockRelationOid(relid, lockmode); } + if (EnableLocalSysCache()) { + // local and global may has this relation + Relation rel = t_thrd.lsc_cxt.lsc->tabdefcache.SearchRelation(relid); + if (RelationIsValid(rel)) { + pgstat_initstats(rel); + RelationIncrementReferenceCount(rel); + return rel; + } + } Form_pg_class relp = (Form_pg_class)GETSTRUCT(pg_class_tuple); /* allocate storage for the relation descriptor, and copy pg_class_tuple to relation->rd_rel. */ Relation relation = AllocateRelationDesc(relp); @@ -7772,7 +8026,7 @@ Relation tuple_get_rel(HeapTuple pg_class_tuple, LOCKMODE lockmode, TupleDesc tu relation->rd_backend = InvalidBackendId; relation->rd_islocaltemp = false; /* initialize the tuple descriptor (relation->rd_att). */ - oldcxt = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + oldcxt = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); relation->rd_att = CreateTupleDescCopy(tuple_desc); relation->rd_att->tdtypeid = relation->rd_rel->reltype; @@ -7808,8 +8062,8 @@ Relation tuple_get_rel(HeapTuple pg_class_tuple, LOCKMODE lockmode, TupleDesc tu relation->rd_isvalid = true; pgstat_initstats(relation); - RelationCacheInsert(relation); relation->rd_att->tdrefcount = 1; + RelationIdCacheInsertIntoLocal(relation); if (RelationIsValid(relation)) { RelationIncrementReferenceCount(relation); } @@ -7878,44 +8132,3 @@ char RelationGetRelReplident(Relation r) return relreplident; } -void SetupPageCompressForRelation(RelFileNode* node, PageCompressOpts* compress_options, const char* relationName) -{ - uint1 algorithm = compress_options->compressType; - if (algorithm == COMPRESS_TYPE_NONE) { - node->opt = 0; - } else { - if (!SUPPORT_PAGE_COMPRESSION) { - ereport(ERROR, (errmsg("unsupported page compression on this platform"))); - } - - uint1 compressLevel; - bool symbol = false; - if (compress_options->compressLevel >= 0) { - symbol = true; - compressLevel = compress_options->compressLevel; - } else { - symbol = false; - compressLevel = -compress_options->compressLevel; - } - - bool success = false; - uint1 chunkSize = ConvertChunkSize(compress_options->compressChunkSize, &success); - if (!success) { - ereport(ERROR, (errmsg("invalid compress_chunk_size %d , must be one of %d, %d, %d or %d for %s", - compress_options->compressChunkSize, BLCKSZ / 16, BLCKSZ / 8, BLCKSZ / 4, BLCKSZ / 2, - relationName))); - } - - uint1 preallocChunks = 0; - if (compress_options->compressPreallocChunks >= BLCKSZ / compress_options->compressChunkSize) { - ereport(ERROR, (errmsg("invalid compress_prealloc_chunks %d , must be less than %d for %s", - compress_options->compressPreallocChunks, - BLCKSZ / compress_options->compressChunkSize, relationName))); - } else { - preallocChunks = (uint1)(compress_options->compressPreallocChunks); - } - node->opt = 0; - SET_COMPRESS_OPTION((*node), compress_options->compressByteConvert, compress_options->compressDiffConvert, - preallocChunks, symbol, compressLevel, algorithm, chunkSize); - } -} \ No newline at end of file diff --git a/src/common/backend/utils/cache/relfilenodemap.cpp b/src/common/backend/utils/cache/relfilenodemap.cpp index 82af0c38a..5e3181a2e 100644 --- a/src/common/backend/utils/cache/relfilenodemap.cpp +++ b/src/common/backend/utils/cache/relfilenodemap.cpp @@ -35,6 +35,7 @@ #include "utils/relmapper.h" #include "utils/snapmgr.h" #include "utils/syscache.h" +#include "utils/knl_localsysdbcache.h" const int HASH_ELEM_SIZE = 1024; @@ -52,14 +53,15 @@ typedef struct { * RelfilenodeMapInvalidateCallback * Flush mapping entries when pg_class is updated in a relevant fashion. */ -static void RelfilenodeMapInvalidateCallback(Datum arg, Oid relid) +void RelfilenodeMapInvalidateCallback(Datum arg, Oid relid) { HASH_SEQ_STATUS status; RelfilenodeMapEntry* entry = NULL; + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); /* callback only gets registered after creating the hash */ - Assert(u_sess->relmap_cxt.RelfilenodeMapHash != NULL); + Assert(relmap_cxt->RelfilenodeMapHash != NULL); - hash_seq_init(&status, u_sess->relmap_cxt.RelfilenodeMapHash); + hash_seq_init(&status, relmap_cxt->RelfilenodeMapHash); while ((entry = (RelfilenodeMapEntry*)hash_seq_search(&status)) != NULL) { /* * If relid is InvalidOid, signalling a complete reset, we must remove @@ -69,21 +71,22 @@ static void RelfilenodeMapInvalidateCallback(Datum arg, Oid relid) if (relid == InvalidOid || /* complete reset */ entry->relid == InvalidOid || /* negative cache entry */ entry->relid == relid) { /* individual flushed relation */ - if (hash_search(u_sess->relmap_cxt.RelfilenodeMapHash, (void *) &entry->key, HASH_REMOVE, NULL) == NULL) { + if (hash_search(relmap_cxt->RelfilenodeMapHash, (void *) &entry->key, HASH_REMOVE, NULL) == NULL) { ereport(ERROR, (errcode(ERRCODE_RELFILENODEMAP), errmsg("hash table corrupted"))); } } } } -static void UHeapRelfilenodeMapInvalidateCallback(Datum arg, Oid relid) +void UHeapRelfilenodeMapInvalidateCallback(Datum arg, Oid relid) { HASH_SEQ_STATUS status; RelfilenodeMapEntry* entry = NULL; + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); /* callback only gets registered after creating the hash */ - Assert(u_sess->relmap_cxt.UHeapRelfilenodeMapHash != NULL); + Assert(relmap_cxt->UHeapRelfilenodeMapHash != NULL); - hash_seq_init(&status, u_sess->relmap_cxt.UHeapRelfilenodeMapHash); + hash_seq_init(&status, relmap_cxt->UHeapRelfilenodeMapHash); while ((entry = (RelfilenodeMapEntry*)hash_seq_search(&status)) != NULL) { /* * If relid is InvalidOid, signalling a complete reset, we must remove @@ -93,7 +96,7 @@ static void UHeapRelfilenodeMapInvalidateCallback(Datum arg, Oid relid) if (relid == InvalidOid || /* complete reset */ entry->relid == InvalidOid || /* negative cache entry */ entry->relid == relid) { /* individual flushed relation */ - if (hash_search(u_sess->relmap_cxt.UHeapRelfilenodeMapHash, + if (hash_search(relmap_cxt->UHeapRelfilenodeMapHash, (void *) &entry->key, HASH_REMOVE, NULL) == NULL) { ereport(ERROR, (errcode(ERRCODE_RELFILENODEMAP), errmsg("hash table corrupted"))); } @@ -108,21 +111,21 @@ static void UHeapRelfilenodeMapInvalidateCallback(Datum arg, Oid relid) static void InitializeRelfilenodeMap() { int i; - + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); /* build skey */ - errno_t ret = memset_s(&u_sess->relmap_cxt.relfilenodeSkey, sizeof(u_sess->relmap_cxt.relfilenodeSkey), 0, - sizeof(u_sess->relmap_cxt.relfilenodeSkey)); + errno_t ret = memset_s(&relmap_cxt->relfilenodeSkey, sizeof(relmap_cxt->relfilenodeSkey), 0, + sizeof(relmap_cxt->relfilenodeSkey)); securec_check(ret, "\0", "\0"); for (i = 0; i < 2; i++) { - fmgr_info_cxt(F_OIDEQ, &u_sess->relmap_cxt.relfilenodeSkey[i].sk_func, u_sess->cache_mem_cxt); - u_sess->relmap_cxt.relfilenodeSkey[i].sk_strategy = BTEqualStrategyNumber; - u_sess->relmap_cxt.relfilenodeSkey[i].sk_subtype = InvalidOid; - u_sess->relmap_cxt.relfilenodeSkey[i].sk_collation = InvalidOid; + fmgr_info_cxt(F_OIDEQ, &relmap_cxt->relfilenodeSkey[i].sk_func, LocalMyDBCacheMemCxt()); + relmap_cxt->relfilenodeSkey[i].sk_strategy = BTEqualStrategyNumber; + relmap_cxt->relfilenodeSkey[i].sk_subtype = InvalidOid; + relmap_cxt->relfilenodeSkey[i].sk_collation = InvalidOid; } - u_sess->relmap_cxt.relfilenodeSkey[0].sk_attno = Anum_pg_class_reltablespace; - u_sess->relmap_cxt.relfilenodeSkey[1].sk_attno = Anum_pg_class_relfilenode; + relmap_cxt->relfilenodeSkey[0].sk_attno = Anum_pg_class_reltablespace; + relmap_cxt->relfilenodeSkey[1].sk_attno = Anum_pg_class_relfilenode; /* Initialize the hash table. */ HASHCTL ctl; @@ -131,40 +134,55 @@ static void InitializeRelfilenodeMap() ctl.keysize = sizeof(RelfilenodeMapKey); ctl.entrysize = sizeof(RelfilenodeMapEntry); ctl.hash = tag_hash; - ctl.hcxt = u_sess->cache_mem_cxt; + if (EnableLocalSysCache()) { + Assert(t_thrd.lsc_cxt.lsc->relmap_cxt.RelfilenodeMapHash == NULL); + ctl.hcxt = t_thrd.lsc_cxt.lsc->lsc_mydb_memcxt; - /* - * Only create the u_sess->relmap_cxt.RelfilenodeMapHash now, so we don't end up partially - * initialized when fmgr_info_cxt() above ERRORs out with an out of memory - * error. - */ - u_sess->relmap_cxt.RelfilenodeMapHash = - hash_create("RelfilenodeMap cache", 1024, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + /* + * Only create the relmap_cxt->RelfilenodeMapHash now, so we don't end up partially + * initialized when fmgr_info_cxt() above ERRORs out with an out of memory + * error. + */ + t_thrd.lsc_cxt.lsc->relmap_cxt.RelfilenodeMapHash = + hash_create("RelfilenodeMap cache", 1024, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); - /* Watch for invalidation events. */ - CacheRegisterRelcacheCallback(RelfilenodeMapInvalidateCallback, (Datum)0); + /* Watch for invalidation events. */ + CacheRegisterThreadRelcacheCallback(RelfilenodeMapInvalidateCallback, (Datum)0); + } else { + ctl.hcxt = u_sess->cache_mem_cxt; + /* + * Only create the relmap_cxt->RelfilenodeMapHash now, so we don't end up partially + * initialized when fmgr_info_cxt() above ERRORs out with an out of memory + * error. + */ + u_sess->relmap_cxt.RelfilenodeMapHash = + hash_create("RelfilenodeMap cache", 1024, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + + /* Watch for invalidation events. */ + CacheRegisterSessionRelcacheCallback(RelfilenodeMapInvalidateCallback, (Datum)0); + } } static void UHeapInitRelfilenodeMap(void) { HASHCTL ctl; int i; - + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); /* build skey */ - errno_t ret = memset_s(&u_sess->relmap_cxt.uHeapRelfilenodeSkey, - sizeof(u_sess->relmap_cxt.relfilenodeSkey), 0, sizeof(u_sess->relmap_cxt.relfilenodeSkey)); + errno_t ret = memset_s(&relmap_cxt->uHeapRelfilenodeSkey, + sizeof(relmap_cxt->relfilenodeSkey), 0, sizeof(relmap_cxt->relfilenodeSkey)); securec_check(ret, "\0", "\0"); for (i = 0; i < 2; i++) { - fmgr_info_cxt(F_OIDEQ, &u_sess->relmap_cxt.uHeapRelfilenodeSkey[i].sk_func, - u_sess->cache_mem_cxt); - u_sess->relmap_cxt.uHeapRelfilenodeSkey[i].sk_strategy = BTEqualStrategyNumber; - u_sess->relmap_cxt.uHeapRelfilenodeSkey[i].sk_subtype = InvalidOid; - u_sess->relmap_cxt.uHeapRelfilenodeSkey[i].sk_collation = InvalidOid; + fmgr_info_cxt(F_OIDEQ, &relmap_cxt->uHeapRelfilenodeSkey[i].sk_func, + LocalMyDBCacheMemCxt()); + relmap_cxt->uHeapRelfilenodeSkey[i].sk_strategy = BTEqualStrategyNumber; + relmap_cxt->uHeapRelfilenodeSkey[i].sk_subtype = InvalidOid; + relmap_cxt->uHeapRelfilenodeSkey[i].sk_collation = InvalidOid; } - u_sess->relmap_cxt.uHeapRelfilenodeSkey[0].sk_attno = Anum_pg_class_reltablespace; - u_sess->relmap_cxt.uHeapRelfilenodeSkey[1].sk_attno = Anum_pg_class_relfilenode; + relmap_cxt->uHeapRelfilenodeSkey[0].sk_attno = Anum_pg_class_reltablespace; + relmap_cxt->uHeapRelfilenodeSkey[1].sk_attno = Anum_pg_class_relfilenode; /* Initialize the hash table. */ ret = memset_s(&ctl, sizeof(ctl), 0, sizeof(ctl)); @@ -172,18 +190,34 @@ static void UHeapInitRelfilenodeMap(void) ctl.keysize = sizeof(RelfilenodeMapKey); ctl.entrysize = sizeof(RelfilenodeMapEntry); ctl.hash = tag_hash; - ctl.hcxt = u_sess->cache_mem_cxt; + if (EnableLocalSysCache()) { + Assert(relmap_cxt->UHeapRelfilenodeMapHash == NULL); + ctl.hcxt = t_thrd.lsc_cxt.lsc->lsc_mydb_memcxt; - /* - * Only create the u_sess->relmap_cxt.RelfilenodeMapHash now, so we don't end up partially - * initialized when fmgr_info_cxt() above ERRORs out with an out of memory - * error. - */ - u_sess->relmap_cxt.UHeapRelfilenodeMapHash = hash_create("UHeapRelfilenodeMap cache", - HASH_ELEM_SIZE, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + /* + * Only create the relmap_cxt->RelfilenodeMapHash now, so we don't end up partially + * initialized when fmgr_info_cxt() above ERRORs out with an out of memory + * error. + */ + relmap_cxt->UHeapRelfilenodeMapHash = hash_create("UHeapRelfilenodeMap cache", + HASH_ELEM_SIZE, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); - /* Watch for invalidation events. */ - CacheRegisterRelcacheCallback(UHeapRelfilenodeMapInvalidateCallback, (Datum)0); + /* Watch for invalidation events. */ + CacheRegisterThreadRelcacheCallback(UHeapRelfilenodeMapInvalidateCallback, (Datum)0); + } else { + ctl.hcxt = u_sess->cache_mem_cxt; + + /* + * Only create the relmap_cxt->RelfilenodeMapHash now, so we don't end up partially + * initialized when fmgr_info_cxt() above ERRORs out with an out of memory + * error. + */ + u_sess->relmap_cxt.UHeapRelfilenodeMapHash = hash_create("UHeapRelfilenodeMap cache", + HASH_ELEM_SIZE, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + + /* Watch for invalidation events. */ + CacheRegisterSessionRelcacheCallback(UHeapRelfilenodeMapInvalidateCallback, (Datum)0); + } } /* @@ -203,7 +237,8 @@ Oid RelidByRelfilenode(Oid reltablespace, Oid relfilenode, bool segment) ScanKeyData skey[2]; Oid relid; int rc = 0; - if (u_sess->relmap_cxt.RelfilenodeMapHash == NULL) { + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); + if (relmap_cxt->RelfilenodeMapHash == NULL) { InitializeRelfilenodeMap(); } @@ -228,7 +263,7 @@ Oid RelidByRelfilenode(Oid reltablespace, Oid relfilenode, bool segment) * since querying invalid values isn't supposed to be a frequent thing, * but it's basically free. */ - entry = (RelfilenodeMapEntry*)hash_search(u_sess->relmap_cxt.RelfilenodeMapHash, (void*)&key, HASH_FIND, &found); + entry = (RelfilenodeMapEntry*)hash_search(relmap_cxt->RelfilenodeMapHash, (void*)&key, HASH_FIND, &found); if (found) return entry->relid; @@ -254,7 +289,7 @@ Oid RelidByRelfilenode(Oid reltablespace, Oid relfilenode, bool segment) relation = heap_open(RelationRelationId, AccessShareLock); /* copy scankey to local copy, it will be modified during the scan */ - rc = memcpy_s(skey, sizeof(skey), u_sess->relmap_cxt.relfilenodeSkey, sizeof(skey)); + rc = memcpy_s(skey, sizeof(skey), relmap_cxt->relfilenodeSkey, sizeof(skey)); securec_check(rc, "", ""); /* set scan arguments */ @@ -313,7 +348,7 @@ Oid RelidByRelfilenode(Oid reltablespace, Oid relfilenode, bool segment) */ if (relid != InvalidOid) { entry = - (RelfilenodeMapEntry*)hash_search(u_sess->relmap_cxt.RelfilenodeMapHash, (void*)&key, HASH_ENTER, &found); + (RelfilenodeMapEntry*)hash_search(relmap_cxt->RelfilenodeMapHash, (void*)&key, HASH_ENTER, &found); entry->relid = relid; } if (found) @@ -333,7 +368,8 @@ Oid UHeapRelidByRelfilenode(Oid reltablespace, Oid relfilenode) ScanKeyData skey[2]; Oid relid; int rc = 0; - if (u_sess->relmap_cxt.UHeapRelfilenodeMapHash == NULL) + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); + if (relmap_cxt->UHeapRelfilenodeMapHash == NULL) UHeapInitRelfilenodeMap(); /* pg_class will show 0 when the value is actually u_sess->proc_cxt.MyDatabaseTableSpace */ @@ -357,7 +393,7 @@ Oid UHeapRelidByRelfilenode(Oid reltablespace, Oid relfilenode) * since querying invalid values isn't supposed to be a frequent thing, * but it's basically free. */ - entry = (RelfilenodeMapEntry*)hash_search(u_sess->relmap_cxt.UHeapRelfilenodeMapHash, + entry = (RelfilenodeMapEntry*)hash_search(relmap_cxt->UHeapRelfilenodeMapHash, (void*)&key, HASH_FIND, &found); if (found) @@ -374,7 +410,7 @@ Oid UHeapRelidByRelfilenode(Oid reltablespace, Oid relfilenode) relation = heap_open(RelationRelationId, AccessShareLock); /* copy scankey to local copy, it will be modified during the scan */ - rc = memcpy_s(skey, sizeof(skey), u_sess->relmap_cxt.uHeapRelfilenodeSkey, sizeof(skey)); + rc = memcpy_s(skey, sizeof(skey), relmap_cxt->uHeapRelfilenodeSkey, sizeof(skey)); securec_check(rc, "", ""); /* set scan arguments */ @@ -406,7 +442,7 @@ Oid UHeapRelidByRelfilenode(Oid reltablespace, Oid relfilenode) * new entry if we had entered it above. */ if (relid != InvalidOid) { - entry = (RelfilenodeMapEntry*)hash_search(u_sess->relmap_cxt.UHeapRelfilenodeMapHash, + entry = (RelfilenodeMapEntry*)hash_search(relmap_cxt->UHeapRelfilenodeMapHash, (void*)&key, HASH_ENTER, &found); entry->relid = relid; } @@ -441,7 +477,7 @@ Oid PartitionRelidByRelfilenode(Oid reltablespace, Oid relfilenode, Oid &partati /* check plain relations by looking in pg_class */ relation = heap_open(PartitionRelationId, AccessShareLock); - rc = memcpy_s(skey, sizeof(skey), u_sess->relmap_cxt.relfilenodeSkey, sizeof(skey)); + rc = memcpy_s(skey, sizeof(skey), GetRelMapCxt()->relfilenodeSkey, sizeof(skey)); securec_check(rc, "", ""); ScanKeyInit(&skey[0], Anum_pg_partition_reltablespace, BTEqualStrategyNumber, F_OIDEQ, @@ -538,7 +574,7 @@ Oid UHeapPartitionRelidByRelfilenode(Oid reltablespace, Oid relfilenode, Oid& pa /* check plain relations by looking in pg_class */ relation = heap_open(PartitionRelationId, AccessShareLock); - rc = memcpy_s(skey, sizeof(skey), u_sess->relmap_cxt.uHeapRelfilenodeSkey, sizeof(skey)); + rc = memcpy_s(skey, sizeof(skey), GetRelMapCxt()->uHeapRelfilenodeSkey, sizeof(skey)); securec_check(rc, "", ""); skey[0].sk_attno = Anum_pg_partition_reltablespace; skey[1].sk_attno = Anum_pg_partition_relfilenode; diff --git a/src/common/backend/utils/cache/relmapper.cpp b/src/common/backend/utils/cache/relmapper.cpp index 4636f8d8f..f08d4363e 100644 --- a/src/common/backend/utils/cache/relmapper.cpp +++ b/src/common/backend/utils/cache/relmapper.cpp @@ -59,8 +59,6 @@ /* non-export function prototypes */ static void apply_map_update(RelMapFile* map, Oid relationId, Oid fileNode, bool add_okay); static void merge_map_updates(RelMapFile* map, const RelMapFile* updates, bool add_okay); -static void load_relmap_file(bool shared); -static void recover_relmap_file(bool shared, bool backupfile); static void write_relmap_file(bool shared, RelMapFile* newmap, bool write_wal, bool send_sinval, bool preserve_files, Oid dbid, Oid tsid, const char* dbpath); static void perform_relmap_update(bool shared, const RelMapFile* updates); @@ -73,6 +71,7 @@ static pg_crc32 RelmapCrcComp(RelMapFile* map); static int32 ReadRelMapFile(RelMapFile* map, int fd, bool isNewMap); static int32 WriteRelMapFile(RelMapFile* map, int fd); +static void recover_relmap_file(bool shared, bool backupfile, RelMapFile* real_map); /* * RelationMapOidToFilenode * @@ -89,29 +88,29 @@ Oid RelationMapOidToFilenode(Oid relationId, bool shared) { const RelMapFile* map = NULL; int32 i; - + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); /* If there are active updates, believe those over the main maps */ if (shared) { - map = u_sess->relmap_cxt.active_shared_updates; + map = relmap_cxt->active_shared_updates; for (i = 0; i < map->num_mappings; i++) { if (relationId == map->mappings[i].mapoid) { return map->mappings[i].mapfilenode; } } - map = u_sess->relmap_cxt.shared_map; + map = relmap_cxt->shared_map; for (i = 0; i < map->num_mappings; i++) { if (relationId == map->mappings[i].mapoid) { return map->mappings[i].mapfilenode; } } } else { - map = u_sess->relmap_cxt.active_local_updates; + map = relmap_cxt->active_local_updates; for (i = 0; i < map->num_mappings; i++) { if (relationId == map->mappings[i].mapoid) { return map->mappings[i].mapfilenode; } } - map = u_sess->relmap_cxt.local_map; + map = relmap_cxt->local_map; for (i = 0; i < map->num_mappings; i++) { if (relationId == map->mappings[i].mapoid) { return map->mappings[i].mapfilenode; @@ -138,29 +137,29 @@ Oid RelationMapFilenodeToOid(Oid filenode, bool shared) { const RelMapFile* map = NULL; int32 i; - + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); /* If there are active updates, believe those over the main maps */ if (shared) { - map = u_sess->relmap_cxt.active_shared_updates; + map = relmap_cxt->active_shared_updates; for (i = 0; i < map->num_mappings; i++) { if (filenode == map->mappings[i].mapfilenode) { return map->mappings[i].mapoid; } } - map = u_sess->relmap_cxt.shared_map; + map = relmap_cxt->shared_map; for (i = 0; i < map->num_mappings; i++) { if (filenode == map->mappings[i].mapfilenode) { return map->mappings[i].mapoid; } } } else { - map = u_sess->relmap_cxt.active_local_updates; + map = relmap_cxt->active_local_updates; for (i = 0; i < map->num_mappings; i++) { if (filenode == map->mappings[i].mapfilenode) { return map->mappings[i].mapoid; } } - map = u_sess->relmap_cxt.local_map; + map = relmap_cxt->local_map; for (i = 0; i < map->num_mappings; i++) { if (filenode == map->mappings[i].mapfilenode) { return map->mappings[i].mapoid; @@ -182,15 +181,15 @@ Oid RelationMapFilenodeToOid(Oid filenode, bool shared) void RelationMapUpdateMap(Oid relationId, Oid fileNode, bool shared, bool immediate) { RelMapFile* map = NULL; - + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); if (IsBootstrapProcessingMode()) { /* * In bootstrap mode, the mapping gets installed in permanent map. */ if (shared) { - map = u_sess->relmap_cxt.shared_map; + map = relmap_cxt->shared_map; } else { - map = u_sess->relmap_cxt.local_map; + map = relmap_cxt->local_map; } } else { /* @@ -206,16 +205,16 @@ void RelationMapUpdateMap(Oid relationId, Oid fileNode, bool shared, bool immedi if (immediate) { /* Make it active, but only locally */ if (shared) { - map = u_sess->relmap_cxt.active_shared_updates; + map = relmap_cxt->active_shared_updates; } else { - map = u_sess->relmap_cxt.active_local_updates; + map = relmap_cxt->active_local_updates; } } else { /* Make it pending */ if (shared) { - map = u_sess->relmap_cxt.pending_shared_updates; + map = relmap_cxt->pending_shared_updates; } else { - map = u_sess->relmap_cxt.pending_local_updates; + map = relmap_cxt->pending_local_updates; } } } @@ -291,7 +290,7 @@ static void merge_map_updates(RelMapFile* map, const RelMapFile* updates, bool a */ void RelationMapRemoveMapping(Oid relationId) { - RelMapFile* map = u_sess->relmap_cxt.active_local_updates; + RelMapFile* map = GetRelMapCxt()->active_local_updates; int32 i; for (i = 0; i < map->num_mappings; i++) { @@ -318,16 +317,25 @@ void RelationMapRemoveMapping(Oid relationId) */ void RelationMapInvalidate(bool shared) { + if (EnableLocalSysCache()) { + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); + RelMapFile *rel_map = shared ? relmap_cxt->shared_map : relmap_cxt->local_map; + if (IS_MAGIC_EXIST(rel_map->magic)) { + t_thrd.lsc_cxt.lsc->LoadRelMapFromGlobal(shared); + } + return; + } + if (shared) { if (IS_MAGIC_EXIST(u_sess->relmap_cxt.shared_map->magic)) { LWLockAcquire(RelationMappingLock, LW_SHARED); - load_relmap_file(true); + load_relmap_file(true, u_sess->relmap_cxt.shared_map); LWLockRelease(RelationMappingLock); } } else { if (IS_MAGIC_EXIST(u_sess->relmap_cxt.local_map->magic)) { LWLockAcquire(RelationMappingLock, LW_SHARED); - load_relmap_file(false); + load_relmap_file(false, u_sess->relmap_cxt.local_map); LWLockRelease(RelationMappingLock); } } @@ -342,12 +350,23 @@ void RelationMapInvalidate(bool shared) */ void RelationMapInvalidateAll(void) { + if (EnableLocalSysCache()) { + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); + if (IS_MAGIC_EXIST(relmap_cxt->shared_map->magic)) { + t_thrd.lsc_cxt.lsc->LoadRelMapFromGlobal(true); + } + if (IS_MAGIC_EXIST(relmap_cxt->local_map->magic)) { + t_thrd.lsc_cxt.lsc->LoadRelMapFromGlobal(false); + } + return; + } + LWLockAcquire(RelationMappingLock, LW_SHARED); if (IS_MAGIC_EXIST(u_sess->relmap_cxt.shared_map->magic)) { - load_relmap_file(true); + load_relmap_file(true, u_sess->relmap_cxt.shared_map); } if (IS_MAGIC_EXIST(u_sess->relmap_cxt.local_map->magic)) { - load_relmap_file(false); + load_relmap_file(false, u_sess->relmap_cxt.local_map); } LWLockRelease(RelationMappingLock); } @@ -359,13 +378,14 @@ void RelationMapInvalidateAll(void) */ void AtCCI_RelationMap(void) { - if (u_sess->relmap_cxt.pending_shared_updates->num_mappings != 0) { - merge_map_updates(u_sess->relmap_cxt.active_shared_updates, u_sess->relmap_cxt.pending_shared_updates, true); - u_sess->relmap_cxt.pending_shared_updates->num_mappings = 0; + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); + if (relmap_cxt->pending_shared_updates->num_mappings != 0) { + merge_map_updates(relmap_cxt->active_shared_updates, relmap_cxt->pending_shared_updates, true); + relmap_cxt->pending_shared_updates->num_mappings = 0; } - if (u_sess->relmap_cxt.pending_local_updates->num_mappings != 0) { - merge_map_updates(u_sess->relmap_cxt.active_local_updates, u_sess->relmap_cxt.pending_local_updates, true); - u_sess->relmap_cxt.pending_local_updates->num_mappings = 0; + if (relmap_cxt->pending_local_updates->num_mappings != 0) { + merge_map_updates(relmap_cxt->active_local_updates, relmap_cxt->pending_local_updates, true); + relmap_cxt->pending_local_updates->num_mappings = 0; } } @@ -386,32 +406,33 @@ void AtCCI_RelationMap(void) */ void AtEOXact_RelationMap(bool isCommit) { + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); if (isCommit) { /* * We should not get here with any "pending" updates. (We could * logically choose to treat such as committed, but in the current * code this should never happen.) */ - Assert(u_sess->relmap_cxt.pending_shared_updates->num_mappings == 0); - Assert(u_sess->relmap_cxt.pending_local_updates->num_mappings == 0); + Assert(relmap_cxt->pending_shared_updates->num_mappings == 0); + Assert(relmap_cxt->pending_local_updates->num_mappings == 0); /* * Write any active updates to the actual map files, then reset them. */ - if (u_sess->relmap_cxt.active_shared_updates->num_mappings != 0) { - perform_relmap_update(true, u_sess->relmap_cxt.active_shared_updates); - u_sess->relmap_cxt.active_shared_updates->num_mappings = 0; + if (relmap_cxt->active_shared_updates->num_mappings != 0) { + perform_relmap_update(true, relmap_cxt->active_shared_updates); + relmap_cxt->active_shared_updates->num_mappings = 0; } - if (u_sess->relmap_cxt.active_local_updates->num_mappings != 0) { - perform_relmap_update(false, u_sess->relmap_cxt.active_local_updates); - u_sess->relmap_cxt.active_local_updates->num_mappings = 0; + if (relmap_cxt->active_local_updates->num_mappings != 0) { + perform_relmap_update(false, relmap_cxt->active_local_updates); + relmap_cxt->active_local_updates->num_mappings = 0; } } else { /* Abort --- drop all local and pending updates */ - u_sess->relmap_cxt.active_shared_updates->num_mappings = 0; - u_sess->relmap_cxt.active_local_updates->num_mappings = 0; - u_sess->relmap_cxt.pending_shared_updates->num_mappings = 0; - u_sess->relmap_cxt.pending_local_updates->num_mappings = 0; + relmap_cxt->active_shared_updates->num_mappings = 0; + relmap_cxt->active_local_updates->num_mappings = 0; + relmap_cxt->pending_shared_updates->num_mappings = 0; + relmap_cxt->pending_local_updates->num_mappings = 0; } } @@ -427,10 +448,11 @@ void AtPrepare_RelationMap(void) if (u_sess->attr.attr_common.IsInplaceUpgrade) { return; } - if (u_sess->relmap_cxt.active_shared_updates->num_mappings != 0 || - u_sess->relmap_cxt.active_local_updates->num_mappings != 0 || - u_sess->relmap_cxt.pending_shared_updates->num_mappings != 0 || - u_sess->relmap_cxt.pending_local_updates->num_mappings != 0) { + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); + if (relmap_cxt->active_shared_updates->num_mappings != 0 || + relmap_cxt->active_local_updates->num_mappings != 0 || + relmap_cxt->pending_shared_updates->num_mappings != 0 || + relmap_cxt->pending_local_updates->num_mappings != 0) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot PREPARE a transaction that modified relation mapping"))); } @@ -464,16 +486,17 @@ void RelationMapFinishBootstrap(void) { Assert(IsBootstrapProcessingMode()); + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); /* Shouldn't be anything "pending" ... */ - Assert(u_sess->relmap_cxt.active_shared_updates->num_mappings == 0); - Assert(u_sess->relmap_cxt.active_local_updates->num_mappings == 0); - Assert(u_sess->relmap_cxt.pending_shared_updates->num_mappings == 0); - Assert(u_sess->relmap_cxt.pending_local_updates->num_mappings == 0); + Assert(relmap_cxt->active_shared_updates->num_mappings == 0); + Assert(relmap_cxt->active_local_updates->num_mappings == 0); + Assert(relmap_cxt->pending_shared_updates->num_mappings == 0); + Assert(relmap_cxt->pending_local_updates->num_mappings == 0); /* Write the files; no WAL or sinval needed */ - write_relmap_file(true, u_sess->relmap_cxt.shared_map, false, false, false, InvalidOid, GLOBALTABLESPACE_OID, NULL); + write_relmap_file(true, relmap_cxt->shared_map, false, false, false, InvalidOid, GLOBALTABLESPACE_OID, NULL); write_relmap_file(false, - u_sess->relmap_cxt.local_map, + relmap_cxt->local_map, false, false, false, @@ -490,6 +513,11 @@ void RelationMapFinishBootstrap(void) */ void RelationMapInitialize(void) { + if (EnableLocalSysCache()) { + /* when first init or rebuild lsc, they are all palloc0, so nothing need to do + * when switchdb , we memset them zero, nothing need to do */ + return; + } /* The static variables should initialize to zeroes, but let's be sure */ u_sess->relmap_cxt.shared_map->magic = 0; /* mark it not loaded */ u_sess->relmap_cxt.local_map->magic = 0; @@ -518,8 +546,12 @@ void RelationMapInitializePhase2(void) /* * Load the shared map file, die on error. */ + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->InitRelMapPhase2(); + return; + } LWLockAcquire(RelationMappingLock, LW_SHARED); - load_relmap_file(true); + load_relmap_file(true, u_sess->relmap_cxt.shared_map); LWLockRelease(RelationMappingLock); } @@ -540,8 +572,13 @@ void RelationMapInitializePhase3(void) /* * Load the local map file, die on error. */ + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->InitRelMapPhase3(); + return; + } + LWLockAcquire(RelationMappingLock, LW_SHARED); - load_relmap_file(false); + load_relmap_file(false, u_sess->relmap_cxt.local_map); LWLockRelease(RelationMappingLock); } @@ -553,9 +590,8 @@ void RelationMapInitializePhase3(void) * * Note that the local case requires u_sess->proc_cxt.DatabasePath to be set up. */ -static void load_relmap_file(bool shared) +void load_relmap_file(bool shared, RelMapFile *map) { - RelMapFile* map = NULL; char map_file_name[2][MAXPGPATH]; char* file_name = NULL; pg_crc32 crc; @@ -574,13 +610,12 @@ static void load_relmap_file(bool shared) rc = snprintf_s(map_file_name[1], sizeof(map_file_name[1]), sizeof(map_file_name[1]) - 1, "global/%s", RELMAPPER_FILENAME_BAK); securec_check_ss(rc, "\0", "\0"); - map = u_sess->relmap_cxt.shared_map; } else { rc = snprintf_s(map_file_name[0], sizeof(map_file_name[0]), sizeof(map_file_name[0]) - 1, "%s/%s", - u_sess->proc_cxt.DatabasePath, + GetMyDatabasePath(), RELMAPPER_FILENAME); securec_check_ss(rc, "\0", "\0"); @@ -588,10 +623,9 @@ static void load_relmap_file(bool shared) sizeof(map_file_name[1]), sizeof(map_file_name[1]) - 1, "%s/%s", - u_sess->proc_cxt.DatabasePath, + GetMyDatabasePath(), RELMAPPER_FILENAME_BAK); securec_check_ss(rc, "\0", "\0"); - map = u_sess->relmap_cxt.local_map; } // check backup file @@ -660,9 +694,9 @@ loop: ereport(FATAL, (errmsg("relation mapping file \"%s\" contains invalid data", file_name))); } if (retry == true) { - recover_relmap_file(shared, false); + recover_relmap_file(shared, false, map); } else if (fix_backup == true) { - recover_relmap_file(shared, true); + recover_relmap_file(shared, true, map); } } @@ -724,15 +758,16 @@ static void write_relmap_file(bool shared, RelMapFile* newmap, bool write_wal, b fname[0] = RELMAPPER_FILENAME_BAK; fname[1] = RELMAPPER_FILENAME; + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); for (int i = 0; i < 2; i++) { if (shared) { rc = snprintf_s(map_file_name, sizeof(map_file_name), sizeof(map_file_name) - 1, "global/%s", fname[i]); securec_check_ss_c(rc, "\0", "\0"); - real_map = u_sess->relmap_cxt.shared_map; + real_map = relmap_cxt->shared_map; } else { rc = snprintf_s(map_file_name, sizeof(map_file_name), sizeof(map_file_name) - 1, "%s/%s", dbpath, fname[i]); securec_check_ss_c(rc, "\0", "\0"); - real_map = u_sess->relmap_cxt.local_map; + real_map = relmap_cxt->local_map; } fd = BasicOpenFile(map_file_name, O_WRONLY | O_CREAT | PG_BINARY, S_IRUSR | S_IWUSR); @@ -793,6 +828,9 @@ static void write_relmap_file(bool shared, RelMapFile* newmap, bool write_wal, b (errcode_for_file_access(), errmsg("could not close relation mapping file \"%s\": %m", map_file_name))); } } + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->InvalidateGlobalRelMap(shared, dbid, newmap); + } /* * Now that the file is safely on disk, send sinval message to let other * backends know to re-read it. We must do this inside the critical @@ -861,16 +899,20 @@ static void perform_relmap_update(bool shared, const RelMapFile* updates) * trouble. */ LWLockAcquire(RelationMappingLock, LW_EXCLUSIVE); - + knl_u_relmap_context *relmap_cxt = GetRelMapCxt(); /* Be certain we see any other updates just made */ - load_relmap_file(shared); + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->LoadRelMapFromGlobal(shared); + } else { + load_relmap_file(shared, shared ? relmap_cxt->shared_map : relmap_cxt->local_map); + } /* Prepare updated data in a local variable */ if (shared) { - rc = memcpy_s(&new_map, sizeof(RelMapFile), u_sess->relmap_cxt.shared_map, sizeof(RelMapFile)); + rc = memcpy_s(&new_map, sizeof(RelMapFile), relmap_cxt->shared_map, sizeof(RelMapFile)); securec_check(rc, "", ""); } else { - rc = memcpy_s(&new_map, sizeof(RelMapFile), u_sess->relmap_cxt.local_map, sizeof(RelMapFile)); + rc = memcpy_s(&new_map, sizeof(RelMapFile), relmap_cxt->local_map, sizeof(RelMapFile)); securec_check(rc, "", ""); } @@ -882,6 +924,7 @@ static void perform_relmap_update(bool shared, const RelMapFile* updates) updates, (g_instance.attr.attr_common.allowSystemTableMods || u_sess->attr.attr_common.IsInplaceUpgrade)); + Assert(CheckMyDatabaseMatch()); /* Write out the updated map and do other necessary tasks */ write_relmap_file(shared, &new_map, @@ -901,10 +944,9 @@ static void perform_relmap_update(bool shared, const RelMapFile* updates) * we should recover the file using the content of backup file or, * if there is no backup file, we create it immediately. */ -static void recover_relmap_file(bool shared, bool backupfile) +static void recover_relmap_file(bool shared, bool backupfile, RelMapFile* real_map) { int fd; - RelMapFile* real_map = NULL; char map_file_name[MAXPGPATH]; char* file_name = NULL; int level; @@ -922,16 +964,14 @@ static void recover_relmap_file(bool shared, bool backupfile) if (shared) { rc = snprintf_s(map_file_name, sizeof(map_file_name), sizeof(map_file_name) - 1, "global/%s", file_name); securec_check_ss(rc, "\0", "\0"); - real_map = u_sess->relmap_cxt.shared_map; } else { rc = snprintf_s(map_file_name, sizeof(map_file_name), sizeof(map_file_name) - 1, "%s/%s", - u_sess->proc_cxt.DatabasePath, + GetMyDatabasePath(), file_name); securec_check_ss(rc, "\0", "\0"); - real_map = u_sess->relmap_cxt.local_map; } ereport(level, (errmsg("recover the relation mapping file %s", map_file_name))); @@ -969,6 +1009,10 @@ static void recover_relmap_file(bool shared, bool backupfile) (errcode_for_file_access(), errmsg("recover failed could not close relation mapping file \"%s\": %m", map_file_name))); } + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->InvalidateGlobalRelMap(shared, + shared ? InvalidOid : u_sess->proc_cxt.MyDatabaseId, real_map); + } } /* @@ -1023,7 +1067,6 @@ void relmap_redo(XLogReaderState* record) XLogRecPtr lsn = record->EndRecPtr; UpdateMinRecoveryPoint(lsn, false); write_relmap_file((xlrec->dbid == InvalidOid), &new_map, false, true, false, xlrec->dbid, xlrec->tsid, dbpath); - pfree_ext(dbpath); } else { ereport(PANIC, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("relmap_redo: unknown op code %u", info))); diff --git a/src/common/backend/utils/cache/spccache.cpp b/src/common/backend/utils/cache/spccache.cpp index 16f0beddd..6fd41b51b 100644 --- a/src/common/backend/utils/cache/spccache.cpp +++ b/src/common/backend/utils/cache/spccache.cpp @@ -43,16 +43,16 @@ typedef struct { * and easy and doesn't cost much, since there shouldn't be terribly many * tablespaces, nor do we expect them to be frequently modified. */ -static void InvalidateTableSpaceCacheCallback(Datum arg, int cacheid, uint32 hashvalue) +void InvalidateTableSpaceCacheCallback(Datum arg, int cacheid, uint32 hashvalue) { HASH_SEQ_STATUS status; TableSpaceCacheEntry* spc = NULL; - - hash_seq_init(&status, u_sess->cache_cxt.TableSpaceCacheHash); + struct HTAB *TableSpaceCacheHash = GetTableSpaceCacheHash(); + hash_seq_init(&status, TableSpaceCacheHash); while ((spc = (TableSpaceCacheEntry*)hash_seq_search(&status)) != NULL) { if (spc->opts != NULL) pfree_ext(spc->opts); - if (hash_search(u_sess->cache_cxt.TableSpaceCacheHash, (void*)&spc->oid, HASH_REMOVE, NULL) == NULL) + if (hash_search(TableSpaceCacheHash, (void*)&spc->oid, HASH_REMOVE, NULL) == NULL) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("hash table corrupted"))); } } @@ -72,12 +72,20 @@ static void InitializeTableSpaceCache(void) ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(TableSpaceCacheEntry); ctl.hash = oid_hash; - ctl.hcxt = u_sess->cache_mem_cxt; - u_sess->cache_cxt.TableSpaceCacheHash = + if (EnableLocalSysCache()) { + ctl.hcxt = t_thrd.lsc_cxt.lsc->lsc_share_memcxt; + t_thrd.lsc_cxt.lsc->TableSpaceCacheHash = + hash_create("TableSpace cache", 16, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + /* Watch for invalidation events. */ + CacheRegisterThreadSyscacheCallback(TABLESPACEOID, InvalidateTableSpaceCacheCallback, (Datum)0); + } else { + ctl.hcxt = u_sess->cache_mem_cxt; + u_sess->cache_cxt.TableSpaceCacheHash = hash_create("TableSpace cache", 16, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); - - /* Watch for invalidation events. */ - CacheRegisterSyscacheCallback(TABLESPACEOID, InvalidateTableSpaceCacheCallback, (Datum)0); + /* Watch for invalidation events. */ + CacheRegisterSessionSyscacheCallback(TABLESPACEOID, InvalidateTableSpaceCacheCallback, (Datum)0); + } + } /* @@ -100,9 +108,9 @@ static TableSpaceCacheEntry* get_tablespace(Oid spcid) spcid = ConvertToRelfilenodeTblspcOid(spcid); /* Find existing cache entry, if any. */ - if (!u_sess->cache_cxt.TableSpaceCacheHash) + if (!GetTableSpaceCacheHash()) InitializeTableSpaceCache(); - spc = (TableSpaceCacheEntry*)hash_search(u_sess->cache_cxt.TableSpaceCacheHash, (void*)&spcid, HASH_FIND, NULL); + spc = (TableSpaceCacheEntry*)hash_search(GetTableSpaceCacheHash(), (void*)&spcid, HASH_FIND, NULL); if (spc != NULL) return spc; @@ -129,7 +137,7 @@ static TableSpaceCacheEntry* get_tablespace(Oid spcid) ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), errmsg("Invalid tablespace relation option."))); } - opts = (TableSpaceOpts*)MemoryContextAlloc(u_sess->cache_mem_cxt, VARSIZE(bytea_opts)); + opts = (TableSpaceOpts*)MemoryContextAlloc(LocalSharedCacheMemCxt(), VARSIZE(bytea_opts)); rc = memcpy_s(opts, VARSIZE(bytea_opts), bytea_opts, VARSIZE(bytea_opts)); securec_check(rc, "", ""); } @@ -141,7 +149,7 @@ static TableSpaceCacheEntry* get_tablespace(Oid spcid) * reading the pg_tablespace entry, since doing so could cause a cache * flush. */ - spc = (TableSpaceCacheEntry*)hash_search(u_sess->cache_cxt.TableSpaceCacheHash, (void*)&spcid, HASH_ENTER, NULL); + spc = (TableSpaceCacheEntry*)hash_search(GetTableSpaceCacheHash(), (void*)&spcid, HASH_ENTER, NULL); spc->opts = opts; return spc; } diff --git a/src/common/backend/utils/cache/syscache.cpp b/src/common/backend/utils/cache/syscache.cpp index a7074deeb..f6b954fc2 100644 --- a/src/common/backend/utils/cache/syscache.cpp +++ b/src/common/backend/utils/cache/syscache.cpp @@ -32,6 +32,7 @@ #include "catalog/pg_aggregate.h" #include "catalog/gs_client_global_keys.h" #include "catalog/gs_column_keys.h" +#include "catalog/gs_db_privilege.h" #include "catalog/gs_encrypted_columns.h" #include "catalog/gs_encrypted_proc.h" #include "catalog/gs_job_argument.h" @@ -84,6 +85,7 @@ #include "catalog/pg_ts_parser.h" #include "catalog/pg_ts_template.h" #include "catalog/pg_type.h" +#include "catalog/pg_uid.h" #include "catalog/pg_user_mapping.h" #include "catalog/pg_extension_data_source.h" #include "catalog/pg_streaming_stream.h" @@ -102,6 +104,7 @@ #include "utils/rel_gs.h" #include "utils/syscache.h" #include "catalog/pg_user_status.h" +#include "lite/memory_lite.h" #include "catalog/pg_publication.h" #include "catalog/pg_publication_rel.h" #include "catalog/pg_replication_origin.h" @@ -141,19 +144,13 @@ /* * struct cachedesc: information defining a single syscache */ -struct cachedesc { - Oid reloid; /* OID of the relation being cached */ - Oid indoid; /* OID of index relation for this cache */ - int nkeys; /* # of keys needed for cache lookup */ - int key[4]; /* attribute numbers of key attrs */ - int nbuckets; /* number of hash buckets for this cache */ -}; -static const struct cachedesc cacheinfo[] = {{AggregateRelationId, /* AGGFNOID */ - AggregateFnoidIndexId, - 1, - {Anum_pg_aggregate_aggfnoid, 0, 0, 0}, - 32}, +const cachedesc cacheinfo[] = { + {AggregateRelationId, /* AGGFNOID */ + AggregateFnoidIndexId, + 1, + {Anum_pg_aggregate_aggfnoid, 0, 0, 0}, + 32}, {AccessMethodRelationId, /* AMNAME */ AmNameIndexId, 1, @@ -458,24 +455,24 @@ static const struct cachedesc cacheinfo[] = {{AggregateRelationId, /* AGGFNOID * PartitionOidIndexId, 1, {ObjectIdAttributeNumber, 0, 0, 0}, - 1024}, + PARTITION_OID_INDEX_ID_NBUCKETS}, {PartitionRelationId, /* PARTPARTOID */ PartitionPartOidIndexId, 3, {Anum_pg_partition_relname, Anum_pg_partition_parttype, Anum_pg_partition_parentid, 0}, - 1024}, + PARTITION_PART_OID_INDEX_ID_NBUCKETS}, {PartitionRelationId, /* PARTINDEXTBLPARENTOID */ PartitionIndexTableIdParentOidIndexId, 3, {Anum_pg_partition_indextblid, Anum_pg_partition_parentid, ObjectIdAttributeNumber, 0}, - 1024}, + PARTITION_INDEX_TABLE_ID_PARENT_OID_INDEX_ID_NBUCKETS}, {PgJobRelationId, /* PGJOBID */ PgJobIdIndexId, 1, {Anum_pg_job_job_id, 0, 0, 0}, - 2048}, + PG_JOB_ID_INDEX_ID_NBUCKETS}, {PgJobProcRelationId, /* PGJOBPROCID */ PgJobProcIdIndexId, 1, @@ -485,39 +482,39 @@ static const struct cachedesc cacheinfo[] = {{AggregateRelationId, /* AGGFNOID * PgObjectIndex, 2, {Anum_pg_object_oid, Anum_pg_object_type, 0, 0}, - 2048}, + PG_OBJECT_INDEX_NBUCKETS}, #ifdef PGXC {PgxcClassRelationId, /* PGXCCLASSRELID */ PgxcClassPgxcRelIdIndexId, 1, {Anum_pgxc_class_pcrelid, 0, 0, 0}, - 1024}, + PGXC_CLASS_PGXC_REL_ID_INDEX_ID_NBUCKETS}, {PgxcGroupRelationId, /* PGXCGROUPNAME */ PgxcGroupGroupNameIndexId, 1, {Anum_pgxc_group_name, 0, 0, 0}, - 256}, + PGXC_GROUP_GROUP_NAME_INDEX_ID_NBUCKETS}, {PgxcGroupRelationId, /* PGXCGROUPOID */ PgxcGroupOidIndexId, 1, {ObjectIdAttributeNumber, 0, 0, 0}, - 256}, + PGXC_GROUP_OID_INDEX_ID_NBUCKETS}, {PgxcNodeRelationId, /* PGXCNODENAMETYPE */ PgxcNodeNodeNameIndexId, 3, {Anum_pgxc_node_name, Anum_pgxc_node_type, ObjectIdAttributeNumber, 0}, - 256}, + PGXC_NODE_NODE_NAME_INDEX_ID_NBUCKETS}, {PgxcNodeRelationId, /* PGXCNODEOID */ PgxcNodeOidIndexId, 1, {ObjectIdAttributeNumber, 0, 0, 0}, - 256}, + PGXC_NODE_OID_INDEX_ID_NBUCKETS}, {PgxcNodeRelationId, /* PGXCNODEIDENTIFIER */ PgxcNodeNodeIdIndexId, 1, {Anum_pgxc_node_id, 0, 0, 0}, - 256}, + PGXC_NODE_NODE_ID_INDEX_ID_NBUCKETS}, {ResourcePoolRelationId, /* PGXCRESOURCEPOOLNAME */ ResourcePoolPoolNameIndexId, 1, @@ -532,12 +529,12 @@ static const struct cachedesc cacheinfo[] = {{AggregateRelationId, /* AGGFNOID * WorkloadGroupGroupNameIndexId, 1, {Anum_pg_workload_group_wgname, 0, 0, 0}, - 256}, + WORKLOAD_GROUP_GROUP_NAME_INDEX_ID_NBUCKETS}, {WorkloadGroupRelationId, /* PGXCWORKLOADGROUPOID */ WorkloadGroupOidIndexId, 1, {ObjectIdAttributeNumber, 0, 0, 0}, - 256}, + WORKLOAD_GROUP_OID_INDEX_ID_NBUCKETS}, {AppWorkloadGroupMappingRelationId, /* PGXCAPPWGMAPPINGNAME */ AppWorkloadGroupMappingNameIndexId, 1, @@ -552,8 +549,20 @@ static const struct cachedesc cacheinfo[] = {{AggregateRelationId, /* AGGFNOID * PgxcSliceIndexId, 4, {Anum_pgxc_slice_relid, Anum_pgxc_slice_type, Anum_pgxc_slice_relname, Anum_pgxc_slice_sindex}, - 1024}, + PGXC_SLICE_INDEX_ID_NBUCKETS}, #endif + {SubscriptionRelationId, /* SUBSCRIPTIONNAME */ + SubscriptionNameIndexId, + 2, + {Anum_pg_subscription_subdbid, Anum_pg_subscription_subname, 0, 0}, + 4 + }, + {SubscriptionRelationId, /* SUBSCRIPTIONOID */ + SubscriptionObjectIndexId, + 1, + {ObjectIdAttributeNumber, 0, 0, 0}, + 4 + }, #ifndef ENABLE_MULTIPLE_NODES {ProcedureRelationId, /* PROCNAMEARGSNSP */ ProcedureNameArgsNspNewIndexId, @@ -604,7 +613,7 @@ static const struct cachedesc cacheinfo[] = {{AggregateRelationId, /* AGGFNOID * Anum_pg_statistic_starelkind, Anum_pg_statistic_staattnum, Anum_pg_statistic_stainherit}, - 1024}, + STATISTIC_RELID_KIND_ATTNUM_INH_INDEX_ID_NBUCKETS}, {StatisticExtRelationId, /* STATEXTRELKINDKEYINH (For pg_statistic_ext multi-column stats) */ StatisticExtRelidKindInhKeyIndexId, 4, @@ -612,57 +621,63 @@ static const struct cachedesc cacheinfo[] = {{AggregateRelationId, /* AGGFNOID * Anum_pg_statistic_ext_starelkind, Anum_pg_statistic_ext_stainherit, Anum_pg_statistic_ext_stakey}, - 1024}, + STATISTIC_EXT_RELID_KIND_INH_KEY_INDEX_ID_NBUCKETS}, {StreamingContQueryRelationId, /* STREAMCQDEFRELID */ StreamingContQueryDefrelidIndexId, 1, {Anum_streaming_cont_query_defrelid, 0, 0, 0}, - 2048}, + STREAMING_CONT_QUERY_DEFRELID_INDEX_ID_NBUCKETS}, {StreamingContQueryRelationId, /* STREAMCQID */ StreamingContQueryIdIndexId, 1, {Anum_streaming_cont_query_id, 0, 0, 0}, - 2048}, + STREAMING_CONT_QUERY_ID_INDEX_ID_NBUCKETS}, {StreamingContQueryRelationId, /* STREAMCQLOOKUPID */ StreamingContQueryLookupidxidIndexId, 1, {Anum_streaming_cont_query_lookupidxid, 0, 0, 0}, - 2048}, + STREAMING_CONT_QUERY_LOOKUP_ID_XID_INDEX_ID_NBUCKETS}, {StreamingContQueryRelationId, /* STREAMCQMATRELID */ StreamingContQueryMatrelidIndexId, 1, {Anum_streaming_cont_query_matrelid, 0, 0, 0}, - 2048}, + STREAMING_CONT_QUERY_MATRELID_INDEX_ID_NBUCKETS}, {StreamingContQueryRelationId, /* STREAMCQOID */ StreamingContQueryOidIndexId, 1, {ObjectIdAttributeNumber, 0, 0, 0}, - 2048}, + STREAMING_CONT_QUERY_OID_INDEX_ID_NBUCKETS}, {StreamingContQueryRelationId, /* STREAMCQRELID */ StreamingContQueryRelidIndexId, 1, {Anum_streaming_cont_query_relid, 0, 0, 0}, - 2048}, + STREAMING_CONT_QUERY_RELID_INDEX_ID_NBUCKETS}, {StreamingContQueryRelationId, /* STREAMCQSCHEMACHANGE */ StreamingContQuerySchemaChangeIndexId, 2, {Anum_streaming_cont_query_matrelid, Anum_streaming_cont_query_active, 0, 0}, - 2048}, + STREAMING_CONT_QUERY_SCHEMA_CHANGE_INDEX_ID_NBUCKETS}, {StreamingStreamRelationId, /* STREAMOID */ StreamingStreamOidIndexId, 1, {ObjectIdAttributeNumber, 0, 0, 0}, - 2048}, + STREAMING_STREAM_OID_INDEX_ID_NBUCKETS}, {StreamingStreamRelationId, /* STREAMRELID */ StreamingStreamRelidIndexId, 1, {Anum_streaming_stream_relid, 0, 0, 0}, - 2048}, + STREAMING_STREAM_RELID_INDEX_ID_NBUCKETS}, {StreamingReaperStatusRelationId, /* REAPERCQOID */ StreamingReaperStatusOidIndexId, 1, {Anum_streaming_reaper_status_id, 0, 0, 0}, - 2048}, + STREAMING_REAPER_STATUS_OID_INDEX_ID_NBUCKETS}, + {PublicationRelRelationId, /* PUBLICATIONRELMAP */ + PublicationRelMapIndexId, + 2, + {Anum_pg_publication_rel_prrelid, Anum_pg_publication_rel_prpubid, 0, 0}, + 64 + }, {PgSynonymRelationId, /* SYNOID */ SynonymOidIndexId, 1, @@ -768,12 +783,12 @@ static const struct cachedesc cacheinfo[] = {{AggregateRelationId, /* AGGFNOID * PackageOidIndexId, 1, {ObjectIdAttributeNumber, 0, 0, 0}, - 2048}, + PACKAGE_OID_INDEX_ID_NBUCKETS}, {PackageRelationId, /* PKGNAMENSP */ PackageNameIndexId, 2, {Anum_gs_package_pkgname, Anum_gs_package_pkgnamespace, 0, 0}, - 2048}, + PACKAGE_NAME_INDEX_ID_NBUCKETS}, {PublicationRelationId, /* PUBLICATIONNAME */ PublicationNameIndexId, 1, @@ -783,8 +798,7 @@ static const struct cachedesc cacheinfo[] = {{AggregateRelationId, /* AGGFNOID * 0, 0 }, - 8 - }, + 8}, {PublicationRelationId, /* PUBLICATIONOID */ PublicationObjectIndexId, 1, @@ -794,52 +808,27 @@ static const struct cachedesc cacheinfo[] = {{AggregateRelationId, /* AGGFNOID * 0, 0 }, - 8 - }, - {PublicationRelRelationId, /* PUBLICATIONREL */ - PublicationRelObjectIndexId, + 8}, + {UidRelationId, /* UIDRELID */ + UidRelidIndexId, 1, - { - ObjectIdAttributeNumber, - 0, - 0, - 0 - }, - 64 - }, - {PublicationRelRelationId, /* PUBLICATIONRELMAP */ - PublicationRelMapIndexId, - 2, - { - Anum_pg_publication_rel_prrelid, - Anum_pg_publication_rel_prpubid, - 0, - 0 - }, - 64 - }, - {SubscriptionRelationId, /* SUBSCRIPTIONNAME */ - SubscriptionNameIndexId, - 2, - { - Anum_pg_subscription_subdbid, - Anum_pg_subscription_subname, - 0, - 0 - }, - 4 - }, - {SubscriptionRelationId, /* SUBSCRIPTIONOID */ - SubscriptionObjectIndexId, + {Anum_gs_uid_relid, 0, 0, 0}, + 64}, + {DbPrivilegeId, /* DBPRIVOID */ + DbPrivilegeOidIndexId, 1, - { - ObjectIdAttributeNumber, - 0, - 0, - 0 - }, - 4 - } + {ObjectIdAttributeNumber, 0, 0, 0}, + 128}, + {DbPrivilegeId, /* DBPRIVROLE */ + DbPrivilegeRoleidIndexId, + 1, + {Anum_gs_db_privilege_roleid, 0, 0, 0}, + 1024}, + {DbPrivilegeId, /* DBPRIVROLEPRIV */ + DbPrivilegeRoleidPrivilegeTypeIndexId, + 2, + {Anum_gs_db_privilege_roleid, Anum_gs_db_privilege_privilege_type, 0, 0}, + 128} }; int SysCacheSize = lengthof(cacheinfo); @@ -854,6 +843,12 @@ int SysCacheSize = lengthof(cacheinfo); */ void InitCatalogCache(void) { + if (EnableLocalSysCache()) { + /* use global catcache */ + Assert(!u_sess->syscache_cxt.CacheInitialized); + t_thrd.lsc_cxt.lsc->systabcache.Init(); + return; + } int cacheId; Assert(!u_sess->syscache_cxt.CacheInitialized); @@ -889,6 +884,8 @@ void InitCatalogCache(void) */ void InitCatalogCachePhase2(void) { + /* gsc never do this work */ + Assert(!EnableLocalSysCache()); int cacheId; Assert(u_sess->syscache_cxt.CacheInitialized); @@ -925,6 +922,10 @@ void InitCatalogCachePhase2(void) */ HeapTuple SearchSysCache(int cacheId, Datum key1, Datum key2, Datum key3, Datum key4, int level) { + if (EnableLocalSysCache()) { + HeapTuple tmp = t_thrd.lsc_cxt.lsc->systabcache.SearchTuple(cacheId, key1, key2, key3, key4); + return tmp; + } Assert(cacheId >= 0); Assert(cacheId < SysCacheSize); Assert(PointerIsValid(u_sess->syscache_cxt.SysCache[cacheId])); @@ -934,6 +935,10 @@ HeapTuple SearchSysCache(int cacheId, Datum key1, Datum key2, Datum key3, Datum HeapTuple SearchSysCache1(int cacheId, Datum key1) { + if (EnableLocalSysCache()) { + HeapTuple tmp = t_thrd.lsc_cxt.lsc->systabcache.SearchTuple1(cacheId, key1); + return tmp; + } Assert(cacheId >= 0); Assert(cacheId < SysCacheSize); Assert(PointerIsValid(u_sess->syscache_cxt.SysCache[cacheId])); @@ -944,6 +949,10 @@ HeapTuple SearchSysCache1(int cacheId, Datum key1) HeapTuple SearchSysCache2(int cacheId, Datum key1, Datum key2) { + if (EnableLocalSysCache()) { + HeapTuple tmp = t_thrd.lsc_cxt.lsc->systabcache.SearchTuple2(cacheId, key1, key2); + return tmp; + } Assert(cacheId >= 0); Assert(cacheId < SysCacheSize); Assert(PointerIsValid(u_sess->syscache_cxt.SysCache[cacheId])); @@ -954,6 +963,10 @@ HeapTuple SearchSysCache2(int cacheId, Datum key1, Datum key2) HeapTuple SearchSysCache3(int cacheId, Datum key1, Datum key2, Datum key3) { + if (EnableLocalSysCache()) { + HeapTuple tmp = t_thrd.lsc_cxt.lsc->systabcache.SearchTuple3(cacheId, key1, key2, key3); + return tmp; + } Assert(cacheId >= 0); Assert(cacheId < SysCacheSize); Assert(PointerIsValid(u_sess->syscache_cxt.SysCache[cacheId])); @@ -964,10 +977,18 @@ HeapTuple SearchSysCache3(int cacheId, Datum key1, Datum key2, Datum key3) HeapTuple SearchSysCache4(int cacheId, Datum key1, Datum key2, Datum key3, Datum key4) { + if (EnableLocalSysCache()) { + HeapTuple tmp = t_thrd.lsc_cxt.lsc->systabcache.SearchTuple4(cacheId, key1, key2, key3, key4); + return tmp; + } Assert(cacheId >= 0); Assert(cacheId < SysCacheSize); Assert(PointerIsValid(u_sess->syscache_cxt.SysCache[cacheId])); Assert(u_sess->syscache_cxt.SysCache[cacheId]->cc_nkeys == 4); +#ifndef ENABLE_MULTIPLE_NODES + /* For PROCALLARGS we have a suit of specific functions! */ + Assert(cacheId != PROCALLARGS); +#endif return SearchCatCache4(u_sess->syscache_cxt.SysCache[cacheId], key1, key2, key3, key4); } @@ -978,9 +999,25 @@ HeapTuple SearchSysCache4(int cacheId, Datum key1, Datum key2, Datum key3, Datum */ void ReleaseSysCache(HeapTuple tuple) { + if (EnableLocalSysCache()) { + LocalCatCTup *ct = ResourceOwnerForgetLocalCatCTup(LOCAL_SYSDB_RESOWNER, tuple); + Assert(ct != NULL); + ct->Release(); + return; + } ReleaseCatCache(tuple); } +extern void ReleaseSysCacheList(catclist *cl) +{ + if (EnableLocalSysCache()) { + LocalCatCList *tuples = (LocalCatCList *)cl; + ResourceOwnerForgetLocalCatCList(LOCAL_SYSDB_RESOWNER, tuples); + tuples->Release(); + return; + } + ReleaseCatCacheList(cl); +} /* * SearchSysCacheCopy * @@ -1010,6 +1047,11 @@ HeapTuple SearchSysCacheCopy(int cacheId, Datum key1, Datum key2, Datum key3, Da */ bool SearchSysCacheExists(int cacheId, Datum key1, Datum key2, Datum key3, Datum key4) { +#ifndef ENABLE_MULTIPLE_NODES + /* For PROCALLARGS we have a suit of specific functions! */ + Assert(cacheId != PROCALLARGS); +#endif + HeapTuple tuple; tuple = SearchSysCache(cacheId, key1, key2, key3, key4); @@ -1126,15 +1168,25 @@ Datum SysCacheGetAttr(int cacheId, HeapTuple tup, AttrNumber attributeNumber, bo * valid (because the caller recently fetched the tuple via this same * cache), but there are cases where we have to initialize the cache here. */ - if (cacheId < 0 || cacheId >= SysCacheSize || !PointerIsValid(u_sess->syscache_cxt.SysCache[cacheId])) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid cache ID: %d", cacheId))); - } - if (!PointerIsValid(u_sess->syscache_cxt.SysCache[cacheId]->cc_tupdesc)) { - InitCatCachePhase2(u_sess->syscache_cxt.SysCache[cacheId], false); - Assert(PointerIsValid(u_sess->syscache_cxt.SysCache[cacheId]->cc_tupdesc)); - } + if (EnableLocalSysCache()) { + if (cacheId < 0 || cacheId >= SysCacheSize) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid cache ID: %d", cacheId))); + } + if (!PointerIsValid(t_thrd.lsc_cxt.lsc->systabcache.GetCCTupleDesc(cacheId))) { + Assert(PointerIsValid(t_thrd.lsc_cxt.lsc->systabcache.GetCCTupleDesc(cacheId))); + } + return heap_getattr(tup, attributeNumber, t_thrd.lsc_cxt.lsc->systabcache.GetCCTupleDesc(cacheId), isNull); + } else { + if (cacheId < 0 || cacheId >= SysCacheSize || !PointerIsValid(u_sess->syscache_cxt.SysCache[cacheId])) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid cache ID: %d", cacheId))); + } + if (!PointerIsValid(u_sess->syscache_cxt.SysCache[cacheId]->cc_tupdesc)) { + InitCatCachePhase2(u_sess->syscache_cxt.SysCache[cacheId], false); + Assert(PointerIsValid(u_sess->syscache_cxt.SysCache[cacheId]->cc_tupdesc)); + } - return heap_getattr(tup, attributeNumber, u_sess->syscache_cxt.SysCache[cacheId]->cc_tupdesc, isNull); + return heap_getattr(tup, attributeNumber, u_sess->syscache_cxt.SysCache[cacheId]->cc_tupdesc, isNull); + } } /* @@ -1149,6 +1201,12 @@ Datum SysCacheGetAttr(int cacheId, HeapTuple tup, AttrNumber attributeNumber, bo */ uint32 GetSysCacheHashValue(int cacheId, Datum key1, Datum key2, Datum key3, Datum key4) { + if (EnableLocalSysCache()) { + if (cacheId < 0 || cacheId >= SysCacheSize) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid cache ID: %d", cacheId))); + } + return t_thrd.lsc_cxt.lsc->systabcache.GetCatCacheHashValue(cacheId, key1, key2, key3, key4); + } if (cacheId < 0 || cacheId >= SysCacheSize || !PointerIsValid(u_sess->syscache_cxt.SysCache[cacheId])) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid cache ID: %d", cacheId))); } @@ -1161,10 +1219,27 @@ uint32 GetSysCacheHashValue(int cacheId, Datum key1, Datum key2, Datum key3, Dat */ struct catclist* SearchSysCacheList(int cacheId, int nkeys, Datum key1, Datum key2, Datum key3, Datum key4) { - + if (EnableLocalSysCache()) { + catclist *cl = t_thrd.lsc_cxt.lsc->systabcache.SearchCatCList(cacheId, nkeys, key1, key2, key3, key4); + return cl; + } if (cacheId < 0 || cacheId >= SysCacheSize || !PointerIsValid(u_sess->syscache_cxt.SysCache[cacheId])) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid cache ID: %d", cacheId))); } return SearchCatCacheList(u_sess->syscache_cxt.SysCache[cacheId], nkeys, key1, key2, key3, key4); } + +#ifndef ENABLE_MULTIPLE_NODES +bool SearchSysCacheExistsForProcAllArgs(Datum key1, Datum key2, Datum key3, Datum key4, Datum proArgModes) +{ + HeapTuple tuple; + + tuple = SearchSysCacheForProcAllArgs(key1, key2, key3, key4, proArgModes); + if (!HeapTupleIsValid(tuple)) { + return false; + } + ReleaseSysCache(tuple); + return true; +} +#endif diff --git a/src/common/backend/utils/cache/ts_cache.cpp b/src/common/backend/utils/cache/ts_cache.cpp index 456473cf6..4091552ab 100644 --- a/src/common/backend/utils/cache/ts_cache.cpp +++ b/src/common/backend/utils/cache/ts_cache.cpp @@ -78,8 +78,8 @@ static void InvalidateTSCacheCallBack(Datum arg, int cacheid, uint32 hashvalue) hash_seq_init(&status, hash); while ((entry = (TSAnyCacheEntry*)hash_seq_search(&status)) != NULL) entry->isvalid = false; - /* Also invalidate the current-config cache if it's pg_ts_config */ + /* when detach from thread, mark u_sess->tscache_cxt.TSCurrentConfigCache as invalid */ if (hash == u_sess->tscache_cxt.TSConfigCacheHash) u_sess->tscache_cxt.TSCurrentConfigCache = InvalidOid; } @@ -98,8 +98,7 @@ void init_ts_parser_cache() ctl.hcxt = u_sess->cache_mem_cxt; u_sess->tscache_cxt.TSParserCacheHash = hash_create("Tsearch parser cache", 4, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); - /* Flush cache on pg_ts_parser changes */ - CacheRegisterSyscacheCallback( + CacheRegisterSessionSyscacheCallback( TSPARSEROID, InvalidateTSCacheCallBack, PointerGetDatum(u_sess->tscache_cxt.TSParserCacheHash)); } @@ -200,10 +199,9 @@ void init_ts_distionary_cache() ctl.hcxt = u_sess->cache_mem_cxt; u_sess->tscache_cxt.TSDictionaryCacheHash = hash_create("Tsearch dictionary cache", 8, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); - /* Flush cache on pg_ts_dict and pg_ts_template changes */ - CacheRegisterSyscacheCallback( + CacheRegisterSessionSyscacheCallback( TSDICTOID, InvalidateTSCacheCallBack, PointerGetDatum(u_sess->tscache_cxt.TSDictionaryCacheHash)); - CacheRegisterSyscacheCallback( + CacheRegisterSessionSyscacheCallback( TSTEMPLATEOID, InvalidateTSCacheCallBack, PointerGetDatum(u_sess->tscache_cxt.TSDictionaryCacheHash)); } @@ -353,9 +351,9 @@ static void init_ts_config_cache(void) u_sess->tscache_cxt.TSConfigCacheHash = hash_create("Tsearch configuration cache", 16, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); /* Flush cache on pg_ts_config and pg_ts_config_map changes */ - CacheRegisterSyscacheCallback( + CacheRegisterSessionSyscacheCallback( TSCONFIGOID, InvalidateTSCacheCallBack, PointerGetDatum(u_sess->tscache_cxt.TSConfigCacheHash)); - CacheRegisterSyscacheCallback( + CacheRegisterSessionSyscacheCallback( TSCONFIGMAP, InvalidateTSCacheCallBack, PointerGetDatum(u_sess->tscache_cxt.TSConfigCacheHash)); } diff --git a/src/common/backend/utils/cache/typcache.cpp b/src/common/backend/utils/cache/typcache.cpp index a2968d35c..31ffe9c15 100644 --- a/src/common/backend/utils/cache/typcache.cpp +++ b/src/common/backend/utils/cache/typcache.cpp @@ -118,7 +118,6 @@ static void cache_array_element_properties(TypeCacheEntry* typentry); static bool record_fields_have_equality(TypeCacheEntry* typentry); static bool record_fields_have_compare(TypeCacheEntry* typentry); static void cache_record_field_properties(TypeCacheEntry* typentry); -static void TypeCacheRelCallback(Datum arg, Oid relid); static void load_enum_cache_data(TypeCacheEntry* tcache); static EnumItem* find_enumitem(TypeCacheEnumData* enum_data, Oid arg); static int enum_oid_cmp(const void* left, const void* right); @@ -135,12 +134,19 @@ void init_type_cache() ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(TypeCacheEntry); ctl.hash = oid_hash; - ctl.hcxt = u_sess->cache_mem_cxt; - u_sess->tycache_cxt.TypeCacheHash = - hash_create("Type information cache", 64, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); - - /* Also set up a callback for relcache SI invalidations */ - CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum)0); + if (EnableLocalSysCache()) { + ctl.hcxt = t_thrd.lsc_cxt.lsc->lsc_mydb_memcxt; + t_thrd.lsc_cxt.lsc->TypeCacheHash = + hash_create("Type information cache", 64, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + /* Also set up a callback for relcache SI invalidations */ + CacheRegisterThreadRelcacheCallback(TypeCacheRelCallback, (Datum)0); + } else { + ctl.hcxt = u_sess->cache_mem_cxt; + u_sess->tycache_cxt.TypeCacheHash = + hash_create("Type information cache", 64, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + /* Also set up a callback for relcache SI invalidations */ + CacheRegisterSessionRelcacheCallback(TypeCacheRelCallback, (Datum)0); + } } /* * lookup_type_cache @@ -159,12 +165,12 @@ TypeCacheEntry* lookup_type_cache(Oid type_id, int flags) bool found = false; errno_t rc; - if (u_sess->tycache_cxt.TypeCacheHash == NULL) { + if (GetTypeCacheHash() == NULL) { init_type_cache(); } /* Try to look up an existing entry */ - typentry = (TypeCacheEntry*)hash_search(u_sess->tycache_cxt.TypeCacheHash, (void*)&type_id, HASH_FIND, NULL); + typentry = (TypeCacheEntry*)hash_search(GetTypeCacheHash(), (void*)&type_id, HASH_FIND, NULL); if (typentry == NULL) { /* * If we didn't find one, we want to make one. But first look up the @@ -187,7 +193,7 @@ TypeCacheEntry* lookup_type_cache(Oid type_id, int flags) /* Now make the typcache entry */ - typentry = (TypeCacheEntry*)hash_search(u_sess->tycache_cxt.TypeCacheHash, (void*)&type_id, HASH_ENTER, &found); + typentry = (TypeCacheEntry*)hash_search(GetTypeCacheHash(), (void*)&type_id, HASH_ENTER, &found); Assert(!found); /* it wasn't there a moment ago */ rc = memset_s(typentry, sizeof(TypeCacheEntry), 0, sizeof(TypeCacheEntry)); @@ -373,15 +379,15 @@ TypeCacheEntry* lookup_type_cache(Oid type_id, int flags) eq_opr_func = get_opcode(typentry->eq_opr); if (eq_opr_func != InvalidOid) - fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo, u_sess->cache_mem_cxt); + fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo, LocalMyDBCacheMemCxt()); } if ((flags & TYPECACHE_CMP_PROC_FINFO) && typentry->cmp_proc_finfo.fn_oid == InvalidOid && typentry->cmp_proc != InvalidOid) { - fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo, u_sess->cache_mem_cxt); + fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo, LocalMyDBCacheMemCxt()); } if ((flags & TYPECACHE_HASH_PROC_FINFO) && typentry->hash_proc_finfo.fn_oid == InvalidOid && typentry->hash_proc != InvalidOid) { - fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo, u_sess->cache_mem_cxt); + fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo, LocalMyDBCacheMemCxt()); } /* @@ -478,12 +484,12 @@ static void load_rangetype_info(TypeCacheEntry* typentry) } /* set up cached fmgrinfo structs */ - fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo, u_sess->cache_mem_cxt); + fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo, LocalMyDBCacheMemCxt()); if (OidIsValid(canonicalOid)) { - fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo, u_sess->cache_mem_cxt); + fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo, LocalMyDBCacheMemCxt()); } if (OidIsValid(subdiffOid)) { - fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo, u_sess->cache_mem_cxt); + fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo, LocalMyDBCacheMemCxt()); } /* Lastly, set up link to the element type --- this marks data valid */ @@ -815,13 +821,13 @@ void assign_record_type_typmod(TupleDesc tupDesc) * hashtable that indexes composite-type typcache entries by their typrelid. * But it's still not clear it's worth the trouble. */ -static void TypeCacheRelCallback(Datum arg, Oid relid) +void TypeCacheRelCallback(Datum arg, Oid relid) { HASH_SEQ_STATUS status; TypeCacheEntry* typentry = NULL; - /* u_sess->tycache_cxt.TypeCacheHash must exist, else this callback wouldn't be registered */ - hash_seq_init(&status, u_sess->tycache_cxt.TypeCacheHash); + /* GetTypeCacheHash() must exist, else this callback wouldn't be registered */ + hash_seq_init(&status, GetTypeCacheHash()); while ((typentry = (TypeCacheEntry*)hash_seq_search(&status)) != NULL) { if (typentry->typtype != TYPTYPE_COMPOSITE) { continue; /* skip non-composites */ diff --git a/src/common/backend/utils/errcodes.txt b/src/common/backend/utils/errcodes.txt index 931158935..f497becd4 100644 --- a/src/common/backend/utils/errcodes.txt +++ b/src/common/backend/utils/errcodes.txt @@ -397,7 +397,6 @@ Section: Class 42 - Syntax Error or Access Rule Violation 42705 E ERRCODE_UNDEFINED_KEY undefined_key 42711 E ERRCODE_DUPLICATE_KEY duplicate_key 42713 E ERRCODE_UNDEFINED_CL_COLUMN undefined_cl_column -42716 E ERRCODE_CL_FUNCTION_UPDATE internal - cl function being updatd Section: Class 44 - WITH CHECK OPTION Violation diff --git a/src/common/backend/utils/error/be_module.cpp b/src/common/backend/utils/error/be_module.cpp index e429edd27..21f0b527a 100755 --- a/src/common/backend/utils/error/be_module.cpp +++ b/src/common/backend/utils/error/be_module.cpp @@ -51,6 +51,7 @@ const module_data module_map[] = {{MOD_ALL, "ALL"}, {MOD_EXECUTOR, "EXECUTOR"}, {MOD_OPFUSION, "OPFUSION"}, {MOD_GPC, "GPC"}, + {MOD_GSC, "GSC"}, {MOD_VEC_EXECUTOR, "VEC_EXECUTOR"}, {MOD_STREAM, "STREAM"}, {MOD_LLVM, "LLVM"}, @@ -122,6 +123,8 @@ const module_data module_map[] = {{MOD_ALL, "ALL"}, {MOD_SPI, "SPI"}, {MOD_NEST_COMPILE, "NEST_COMPILE"}, {MOD_RESOWNER, "RESOWNER"}, + {MOD_LOGICAL_DECODE, "LOGICAL_DECODE"}, + {MOD_GPRC, "GPRC"}, /* add your module name above */ {MOD_MAX, "BACKEND"}}; diff --git a/src/common/backend/utils/error/elog.cpp b/src/common/backend/utils/error/elog.cpp index 509be4319..f517e3500 100644 --- a/src/common/backend/utils/error/elog.cpp +++ b/src/common/backend/utils/error/elog.cpp @@ -133,6 +133,8 @@ extern THR_LOCAL char* g_instance.attr.attr_common.event_source; static void write_eventlog(int level, const char* line, int len); #endif +static const int CREATE_ALTER_SUBSCRIPTION = 16; + /* Macro for checking t_thrd.log_cxt.errordata_stack_depth is reasonable */ #define CHECK_STACK_DEPTH() \ do { \ @@ -167,6 +169,27 @@ static int output_backtrace_to_log(StringInfoData* pOutBuf); static void write_asp_chunks(char *data, int len, bool end); static void write_asplog(char *data, int len, bool end); +#define MASK_OBS_PATH() \ + do { \ + char* childStmt = mask_funcs3_parameters(yylval.str); \ + if (childStmt != NULL) { \ + if (mask_string == NULL) { \ + mask_string = MemoryContextStrdup( \ + SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_SECURITY), query_string); \ + } \ + if (unlikely(yyextra.literallen != (int)strlen(childStmt))) { \ + ereport(ERROR, \ + (errcode(ERRCODE_SYNTAX_ERROR), \ + errmsg("parse error on statement %s.", childStmt))); \ + } \ + rc = memcpy_s(mask_string + yylloc + 1, yyextra.literallen, childStmt, yyextra.literallen); \ + securec_check(rc, "\0", "\0"); \ + rc = memset_s(childStmt, yyextra.literallen, 0, yyextra.literallen); \ + securec_check(rc, "", ""); \ + pfree(childStmt); \ + } \ + } while (0) + /* * in_error_recursion_trouble --- are we at risk of infinite error recursion? * @@ -2411,6 +2434,11 @@ static void log_line_prefix(StringInfo buf, ErrorData* edata) (int)u_sess->globalSessionId.nodeId, u_sess->globalSessionId.sessionId, u_sess->globalSessionId.seq); break; + case 'T': + if (u_sess->trace_cxt.trace_id[0]) { + appendStringInfo(buf, "%s", u_sess->trace_cxt.trace_id); + } + break; case '%': appendStringInfoChar(buf, '%'); break; @@ -2719,7 +2747,7 @@ static void write_csvlog(ErrorData* edata) * Unpack MAKE_SQLSTATE code. Note that this returns a pointer to a * static THR_LOCAL buffer. */ -char* unpack_sql_state(int sql_state) +const char* unpack_sql_state(int sql_state) { char* buf = t_thrd.buf_cxt.unpack_sql_state_buf; int i; @@ -2734,7 +2762,7 @@ char* unpack_sql_state(int sql_state) } /* if sqlcode is init by the database, it must be positive; if is init by the user, then it must be negative */ -char *plpgsql_get_sqlstate(int sqlcode) +const char *plpgsql_get_sqlstate(int sqlcode) { if (sqlcode >= 0) { return unpack_sql_state(sqlcode); @@ -3091,9 +3119,8 @@ static void send_message_to_server_log(ErrorData* edata) } /* If in the syslogger process, try to write messages direct to file */ - if (t_thrd.role == SYSLOGGER && - t_thrd.log_cxt.Log_destination & LOG_DESTINATION_STDERR && - t_thrd.logger.syslogFile != NULL) { + FILE* logfile = LOG_DESTINATION_CSVLOG ? t_thrd.logger.csvlogFile : t_thrd.logger.syslogFile; + if (t_thrd.role == SYSLOGGER && logfile != NULL) { write_syslogger_file(buf.data, buf.len, LOG_DESTINATION_STDERR); } @@ -4197,6 +4224,14 @@ char* mask_funcs3_parameters(const char* query_string) return mask_string; } +static void inline ClearYylval(const core_YYSTYPE *yylval) +{ + int rc = memset_s(yylval->str, strlen(yylval->str), 0, strlen(yylval->str)); + securec_check(rc, "\0", "\0"); + rc = memset_s((char*)yylval->keyword, strlen(yylval->keyword), 0, strlen(yylval->keyword)); + securec_check(rc, "\0", "\0"); +} + /* * Mask the password in statment CREATE ROLE, CREATE USER, ALTER ROLE, ALTER USER, CREATE GROUP * SET ROLE, CREATE DATABASE LINK, and some function @@ -4212,7 +4247,8 @@ static char* mask_Password_internal(const char* query_string) int currToken = 59; /* initialize prevToken as ';' */ bool isPassword = false; char* mask_string = NULL; - const char* funcs[] = {"dblink_connect"}; /* the function list need mask */ + /* the function list need mask */ + const char* funcs[] = {"dblink_connect", "create_credential", "pg_create_physical_replication_slot_extern"}; int funcNum = sizeof(funcs) / sizeof(funcs[0]); int position[16] = {0}; int length[16] = {0}; @@ -4221,6 +4257,7 @@ static char* mask_Password_internal(const char* query_string) bool isChildStmt = false; errno_t rc = EOK; int truncateLen = 0; /* accumulate total length for each truncate */ + YYLTYPE conninfoStartPos = 0; /* connection start postion for CreateSubscriptionStmt */ /* the functions need to mask all contents */ const char* funCrypt[] = {"gs_encrypt_aes128", "gs_decrypt_aes128", "gs_encrypt", "gs_decrypt"}; @@ -4256,6 +4293,7 @@ static char* mask_Password_internal(const char* query_string) * 13 - for funcs3 * 14 - create/alter text search dictionary * 15 - for funCrypt + * 16 - create/alter subscription(CREATE_ALTER_SUBSCRIPTION) */ int curStmtType = 0; int prevToken[5] = {0}; @@ -4317,7 +4355,30 @@ static char* mask_Password_internal(const char* query_string) SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_SECURITY), query_string); } - rc = memcpy_s(mask_string + yylloc + 1, yyextra.literallen, childStmt, strlen(childStmt)); + /* + * After mask child statement, child statement length maybe be large than origin query + * statement length. So we should enlarge buffer size. + */ + int childStmtLen = strlen(childStmt); + int subQueryLen = strlen(yylval.str); + int maskStringLen = strlen(mask_string); + if (subQueryLen < childStmtLen) { + /* Need more space, enlarge length is (childStmtLen - subQueryLen) */ + maskStringLen += (childStmtLen - subQueryLen) + 1; + char* maskStrNew = (char*)selfpalloc0(maskStringLen); + rc = memcpy_s(maskStrNew, maskStringLen, mask_string, strlen(mask_string)); + securec_check(rc, "\0", "\0"); + selfpfree(mask_string); + mask_string = maskStrNew; + } + + /* + * After enlarge buffer size, value of new buffer position is '0', and using strlen() will + * get wrong result of buffer length, which is smaller than real length. + * So use 'maskStringLen' here to indicate real buffer length. + */ + rc = memcpy_s(mask_string + yylloc + 1, maskStringLen - yylloc, + childStmt, strlen(childStmt) + 1); securec_check(rc, "\0", "\0"); rc = memset_s(childStmt, strlen(childStmt), 0, strlen(childStmt)); securec_check(rc, "", ""); @@ -4344,8 +4405,19 @@ static char* mask_Password_internal(const char* query_string) if (ch == '\'' || ch == '\"') ++position[idx]; + + /* Calcute the difference between origin password length and mask password length */ + position[idx] -= truncateLen; + length[idx] = strlen(yylval.str); ++idx; + + /* record the conninfo start pos, we will use it to calculate the actual length of conninfo */ + if (curStmtType == CREATE_ALTER_SUBSCRIPTION) { + conninfoStartPos = yylloc; + /* the yylval store the conninfo, so we clear it here */ + ClearYylval(&yylval); + } /* * use a fixed length of masked password. * For a matched token, position[idx] is query_string's position, but mask_string is truncated, @@ -4363,18 +4435,31 @@ static char* mask_Password_internal(const char* query_string) * the len of password may be shorter than actual, * we need to find the start position of password word by looking forward. */ - char wordHead = position[i] > 0 ? query_string[position[i] - 1] : '\0'; + char wordHead = position[i] > 0 ? mask_string[position[i] - 1] : '\0'; if (isPassword && wordHead != '\0' && wordHead != '\'' && wordHead != '\"') { while (position[i] > 0 && !isspace(wordHead) && wordHead != '\'' && wordHead != '\"') { position[i]--; - wordHead = query_string[position[i] - 1]; + wordHead = mask_string[position[i] - 1]; } - length[i] = strlen(query_string + position[i]); + length[i] = strlen(mask_string + position[i]); /* if the last char is ';', we should keep it */ - if (query_string[position[i] + length[i] - 1] == ';') { + if (mask_string[position[i] + length[i] - 1] == ';') { length[i]--; } } + + /* + * After core_yylex, double quotation marks will be parsed to single quotation mark. + * Calcute length of '\'' and double this length. + */ + int lengthOfQuote = 0; + for (int len = 0; len < length[i]; len++) { + if ((yylval.str != NULL) && (yylval.str[len] == '\'')) { + lengthOfQuote++; + } + } + length[i] += lengthOfQuote; + if (length[i] < maskLen) { /* need more space. */ int plen = strlen(mask_string) + maskLen - length[i] + 1; @@ -4385,23 +4470,23 @@ static char* mask_Password_internal(const char* query_string) mask_string = maskStrNew; } - char* maskBegin = mask_string + position[i] - truncateLen; - int copySize = strlen(mask_string) - (position[i] - truncateLen) - length[i] + 1; + char* maskBegin = mask_string + position[i]; + int copySize = strlen(mask_string) - position[i] - length[i] + 1; rc = memmove_s(maskBegin + maskLen, copySize, maskBegin + length[i], copySize); securec_check(rc, "", ""); - if (length[i] > maskLen) { - truncateLen += (length[i] - maskLen); - } + /* + * After masking password, the origin password had been transformed to '*', which length equals + * to u_sess->attr.attr_security.Password_min_length. + * So we should record the difference between origin password length and mask password length. + */ + truncateLen = strlen(query_string) - strlen(mask_string); rc = memset_s(maskBegin, maskLen, '*', maskLen); securec_check(rc, "", ""); need_clear_yylval = true; } if (need_clear_yylval) { - rc = memset_s(yylval.str, strlen(yylval.str), 0, strlen(yylval.str)); - securec_check(rc, "\0", "\0"); - rc = memset_s((char*)yylval.keyword, strlen(yylval.keyword), 0, strlen(yylval.keyword)); - securec_check(rc, "\0", "\0"); + ClearYylval(&yylval); need_clear_yylval = false; } idx = 0; @@ -4701,6 +4786,13 @@ static char* mask_Password_internal(const char* query_string) } break; case IDENT: + if (curStmtType == 14) { + if (pg_strncasecmp(yylval.str, "obs", strlen("obs")) == 0) { + MASK_OBS_PATH(); + } + curStmtType = 0; + } + if ((prevToken[1] == SERVER && prevToken[2] == OPTIONS) || (prevToken[1] == FOREIGN && prevToken[2] == TABLE && prevToken[3] == OPTIONS)) { if (pg_strcasecmp(yylval.str, "secret_access_key") == 0) { @@ -4727,6 +4819,15 @@ static char* mask_Password_internal(const char* query_string) } else { curStmtType = 0; } + } else if (prevToken[1] == ALTER && prevToken[2] == SUBSCRIPTION) { + /* + * For SUBSCRIPTION, there are 3 cases need to mask conninfo(which has username and password): + * 1. CREATE SUBSCRIPTION name CONNECTION Sconst. Which could be coverd by case CONNECTION. + * 2. ALTER SUBSCRIPTION name CONNECTION Sconst. Which could be coverd by case CONNECTION. + * 3. ALTER SUBSCRIPTION name SET (conninfo='xx'). Here we deal with this case. + */ + curStmtType = pg_strcasecmp(yylval.str, "conninfo") == 0 ? CREATE_ALTER_SUBSCRIPTION : 0; + idx = 0; } break; case SCONST: @@ -4745,25 +4846,68 @@ static char* mask_Password_internal(const char* query_string) } } else if (curStmtType == 14) { if (pg_strncasecmp(yylval.str, "obs", strlen("obs")) == 0) { - char* childStmt = mask_funcs3_parameters(yylval.str); - if (childStmt != NULL) { - if (mask_string == NULL) { - mask_string = MemoryContextStrdup( - SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_SECURITY), query_string); - } - if (unlikely(yyextra.literallen != (int)strlen(childStmt))) { - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("parse error on statement %s.", childStmt))); - } - rc = memcpy_s(mask_string + yylloc + 1, yyextra.literallen, childStmt, yyextra.literallen); - securec_check(rc, "\0", "\0"); - rc = memset_s(childStmt, yyextra.literallen, 0, yyextra.literallen); - securec_check(rc, "", ""); - pfree(childStmt); - } + MASK_OBS_PATH(); } curStmtType = 0; + } else if (curStmtType == CREATE_ALTER_SUBSCRIPTION && + (prevToken[0] == '=' || prevToken[1] == ALTER)) { + /* + * ALTER SUBSCRIPTION name SET (conninfo='xx') + * ALTER SUBSCRIPTION name CONNECTION Sconst + * mask connection info + */ + if (mask_string == NULL) { + mask_string = MemoryContextStrdup( + SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_SECURITY), query_string); + } + /* + * mask to the end of string, cause the length[0] may be shorter than actual. + * For example: + * ALTER SUBSCRIPTION name SET (conninfo='host=''1.1.1.1'' password=''password_123'''); + * ALTER SUBSCRIPTION name CONNECTION 'host=''1.1.1.1'' password=''password_123'''; + */ + int maskLen = strlen(query_string + position[0]); + /* if the last char is ';', we should keep it */ + if (query_string[position[0] + maskLen - 1] == ';') { + maskLen--; + } + rc = memset_s(mask_string + position[0], maskLen, '*', maskLen); + securec_check(rc, "", ""); + /* the yylval store the conninfo, so we clear it here */ + ClearYylval(&yylval); + idx = 0; + curStmtType = 0; + } + break; + case SUBSCRIPTION: + if (prevToken[0] == CREATE || prevToken[0] == ALTER) { + prevToken[1] = prevToken[0]; + prevToken[2] = SUBSCRIPTION; + } + break; + case CONNECTION: + if (prevToken[2] == SUBSCRIPTION) { + curStmtType = CREATE_ALTER_SUBSCRIPTION; + prevToken[3] = CONNECTION; + } + break; + case PUBLICATION: + if (curStmtType == CREATE_ALTER_SUBSCRIPTION && prevToken[2] == SUBSCRIPTION && + prevToken[3] == CONNECTION) { + /* + * CREATE SUBSCRIPTION name CONNECTION Sconst PUBLICATION xxx, try to mask Sconst. + * it should not happen that conninfoStartPos < 0, if it does, we will mask the + * string from the beginning to current pos to ensure all sensetive info is masked. + */ + if (mask_string == NULL) { + mask_string = MemoryContextStrdup( + SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_SECURITY), query_string); + } + int maskLen = yylloc - conninfoStartPos; + rc = memset_s(mask_string + conninfoStartPos, maskLen, '*', maskLen); + securec_check(rc, "", ""); + idx = 0; + curStmtType = 0; } break; default: diff --git a/src/common/backend/utils/fmgr/fmgr.cpp b/src/common/backend/utils/fmgr/fmgr.cpp index e07d98dbd..7e1ca35cc 100755 --- a/src/common/backend/utils/fmgr/fmgr.cpp +++ b/src/common/backend/utils/fmgr/fmgr.cpp @@ -2515,7 +2515,7 @@ struct varlena* pg_detoast_datum_copy(struct varlena* datum) } } -struct varlena* pg_detoast_datum_slice(struct varlena* datum, int32 first, int32 count) +struct varlena* pg_detoast_datum_slice(struct varlena* datum, int64 first, int32 count) { /* Only get the specified portion from the toast rel */ return heap_tuple_untoast_attr_slice(datum, first, count); diff --git a/src/common/backend/utils/fmgr/funcapi.cpp b/src/common/backend/utils/fmgr/funcapi.cpp index ba1959f8e..db0383191 100644 --- a/src/common/backend/utils/fmgr/funcapi.cpp +++ b/src/common/backend/utils/fmgr/funcapi.cpp @@ -17,6 +17,7 @@ #include "knl/knl_variable.h" #include "catalog/gs_encrypted_proc.h" #include "catalog/namespace.h" +#include "catalog/pg_language.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "funcapi.h" @@ -173,6 +174,100 @@ static void shutdown_multi_func_call(Datum arg) MemoryContextDelete(funcctx->multi_call_memory_ctx); } +bool is_function_with_plpgsql_language_and_outparam(Oid funcid) +{ +#ifndef ENABLE_MULTIPLE_NODES + if (!enable_out_param_override() || u_sess->attr.attr_sql.sql_compatibility != A_FORMAT || funcid == InvalidOid) { + return false; + } +#else + return false; +#endif + char* funclang = get_func_langname(funcid); + if (strcasecmp(funclang, "plpgsql") != 0) { + pfree(funclang); + return false; + } + pfree(funclang); + + HeapTuple tp; + bool existOutParam = false; + bool isNull = false; + tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); + if (!HeapTupleIsValid(tp)) { + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + (errmsg("cache lookup failed for function %u", funcid), errdetail("N/A."), + errcause("System error."), erraction("Contact engineer to support.")))); + } + Datum pprokind = SysCacheGetAttr(PROCOID, tp, Anum_pg_proc_prokind, &isNull); + if ((!isNull && !PROC_IS_FUNC(pprokind))) { + ReleaseSysCache(tp); + return false; + } + isNull = false; + (void)SysCacheGetAttr(PROCOID, tp, Anum_pg_proc_proallargtypes, &isNull); + if (!isNull) { + existOutParam = true; + } + ReleaseSysCache(tp); + return existOutParam; +} + +TupleDesc get_func_param_desc(HeapTuple tp, Oid resultTypeId, int* return_out_args_num) +{ + Oid *p_argtypes = NULL; + char **p_argnames = NULL; + char *p_argmodes = NULL; + int p_nargs = get_func_arg_info(tp, &p_argtypes, &p_argnames, &p_argmodes); + char* p_name = NameStr(((Form_pg_proc)GETSTRUCT(tp))->proname); + int out_args_num = 0; + for (int i = 0; i < p_nargs; i++) { + if (p_argmodes[i] == 'o' || p_argmodes[i] == 'b') { + out_args_num++; + } + } + if (return_out_args_num != NULL) { + *return_out_args_num = out_args_num; + } + /* The return field is in the first column, and the parameter starts in the second column. */ + TupleDesc resultTupleDesc = CreateTemplateTupleDesc(out_args_num + 1, false); + TupleDescInitEntry(resultTupleDesc, (AttrNumber)1, p_name, resultTypeId, -1, 0); + int attindex = 2; + for (int i = 0; i < p_nargs; i++) { + if (p_argmodes[i] == 'o' || p_argmodes[i] == 'b') { + TupleDescInitEntry(resultTupleDesc, (AttrNumber)attindex, p_argnames[i], p_argtypes[i], p_argmodes[i], + 0); + attindex++; + } + } + + return resultTupleDesc; +} + +void construct_func_param_desc(Oid funcid, TypeFuncClass* typclass, TupleDesc* tupdesc, Oid* resultTypeId) +{ + if (tupdesc == NULL || resultTypeId == NULL || typclass == NULL) { + return; + } + Oid paramTypeOid = is_function_with_plpgsql_language_and_outparam(funcid); + if (paramTypeOid == InvalidOid) { + return; + } + /* Contruct argument tuple descriptor */ + HeapTuple tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); + if (!HeapTupleIsValid(tp)) { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmodule(MOD_PLSQL), + errmsg("Cache lookup failed for function %u", funcid), + errdetail("Fail to get the function param type oid."))); + } + + *tupdesc = get_func_param_desc(tp, *resultTypeId); + *resultTypeId = RECORDOID; + *typclass = TYPEFUNC_COMPOSITE; + + ReleaseSysCache(tp); +} + /* * get_call_result_type * Given a function's call info record, determine the kind of datatype @@ -215,6 +310,9 @@ TypeFuncClass get_expr_result_type(Node* expr, Oid* resultTypeId, TupleDesc* res if (expr && IsA(expr, FuncExpr)) { result = internal_get_result_type(((FuncExpr*)expr)->funcid, expr, NULL, resultTypeId, resultTupleDesc, resultTypeId_orig); + + /* A_FORMAT return param seperation */ + construct_func_param_desc(((FuncExpr*)expr)->funcid, &result, resultTupleDesc, resultTypeId); } else if (expr && IsA(expr, OpExpr)) { result = internal_get_result_type(get_opcode(((OpExpr*)expr)->opno), expr, NULL, resultTypeId, resultTupleDesc, NULL); @@ -732,6 +830,7 @@ static TypeFuncClass get_type_func_class(Oid typid) case TYPTYPE_DOMAIN: case TYPTYPE_ENUM: case TYPTYPE_RANGE: + case TYPTYPE_TABLEOF: return TYPEFUNC_SCALAR; case TYPTYPE_PSEUDO: diff --git a/src/common/backend/utils/hash/dynahash.cpp b/src/common/backend/utils/hash/dynahash.cpp index 0702cc5a6..537a95d20 100644 --- a/src/common/backend/utils/hash/dynahash.cpp +++ b/src/common/backend/utils/hash/dynahash.cpp @@ -235,6 +235,8 @@ HTAB* hash_create(const char* tabname, long nelem, HASHCTL* info, int flags) } } + MemoryContext currentDynaHashCxt = t_thrd.dyhash_cxt.CurrentDynaHashCxt; + /* Initialize the hash header, plus a copy of the table name */ hashp = (HTAB*)DynaHashAlloc(sizeof(HTAB) + strlen(tabname) + 1); MemSet(hashp, 0, sizeof(HTAB)); @@ -318,7 +320,7 @@ HTAB* hash_create(const char* tabname, long nelem, HASHCTL* info, int flags) /* setup hash table defaults */ hashp->hctl = NULL; hashp->dir = NULL; - hashp->hcxt = t_thrd.dyhash_cxt.CurrentDynaHashCxt; + hashp->hcxt = currentDynaHashCxt; hashp->isshared = false; } diff --git a/src/common/backend/utils/init/globals.cpp b/src/common/backend/utils/init/globals.cpp index d08c64d36..95d047e12 100644 --- a/src/common/backend/utils/init/globals.cpp +++ b/src/common/backend/utils/init/globals.cpp @@ -59,9 +59,10 @@ bool open_join_children = true; bool will_shutdown = false; /* hard-wired binary version number */ -const uint32 GRAND_VERSION_NUM = 92507; +const uint32 GRAND_VERSION_NUM = 92602; -const uint32 SUPPORT_HASH_XLOG_VERSION_NUM = 92501; +const uint32 PREDPUSH_SAME_LEVEL_VERSION_NUM = 92522; +const uint32 UPSERT_WHERE_VERSION_NUM = 92514; const uint32 FUNC_PARAM_COL_VERSION_NUM = 92500; const uint32 SUBPARTITION_VERSION_NUM = 92436; const uint32 DEFAULT_MAT_CTE_NUM = 92429; @@ -83,7 +84,7 @@ const uint32 RANGE_LIST_DISTRIBUTION_VERSION_NUM = 92272; const uint32 BACKUP_SLOT_VERSION_NUM = 92282; const uint32 ML_OPT_MODEL_VERSION_NUM = 92284; const uint32 FIX_SQL_ADD_RELATION_REF_COUNT = 92291; -const uint32 INPLACE_UPDATE_WERSION_NUM = 92350; +const uint32 INPLACE_UPDATE_VERSION_NUM = 92350; const uint32 GENERATED_COL_VERSION_NUM = 92355; const uint32 SEGMENT_PAGE_VERSION_NUM = 92360; const uint32 COMMENT_PROC_VERSION_NUM = 92372; @@ -96,25 +97,37 @@ const uint32 SWCB_VERSION_NUM = 92427; const uint32 COMMENT_ROWTYPE_TABLEOF_VERSION_NUM = 92513; const uint32 PRIVS_DIRECTORY_VERSION_NUM = 92460; const uint32 COMMENT_RECORD_PARAM_VERSION_NUM = 92484; -const uint32 ANALYZER_HOOK_VERSION_NUM = 92502; -const uint32 PUBLICATION_VERSION_NUM = 92504; +const uint32 SCAN_BATCH_MODE_VERSION_NUM = 92568; +const uint32 PUBLICATION_VERSION_NUM = 92580; /* Version number of the guc parameter backend_version added in V500R001C20 */ const uint32 V5R1C20_BACKEND_VERSION_NUM = 92305; -/* Version number starting from V500R002C00 */ +/* Version number starting from V500R002C10 */ const uint32 V5R2C00_START_VERSION_NUM = 92350; -/* Version number of the guc parameter backend_version added in V500R002C00 */ +/* Version number of the guc parameter backend_version added in V500R002C10 */ const uint32 V5R2C00_BACKEND_VERSION_NUM = 92412; +const uint32 ANALYZER_HOOK_VERSION_NUM = 92592; + /* This variable indicates wheather the instance is in progress of upgrade as a whole */ uint32 volatile WorkingGrandVersionNum = GRAND_VERSION_NUM; -const uint32 ENHANCED_TUPLE_LOCK_VERSION_NUM = 92506; +const uint32 ENHANCED_TUPLE_LOCK_VERSION_NUM = 92583; const uint32 TWOPHASE_FILE_VERSION = 92414; +const uint32 HASUID_VERSION_NUM = 92550; +const uint32 WAIT_N_TUPLE_LOCK_VERSION_NUM = 92573; + +const uint32 PARALLEL_DECODE_VERSION_NUM = 92556; + +const uint32 CREATE_INDEX_CONCURRENTLY_DIST_VERSION_NUM = 92569; + +const uint32 SUPPORT_DATA_REPAIR = 92579; bool InplaceUpgradePrecommit = false; +const uint32 DISASTER_READ_VERSION_NUM = 92592; + #ifdef PGXC bool useLocalXid = false; #endif diff --git a/src/common/backend/utils/init/miscinit.cpp b/src/common/backend/utils/init/miscinit.cpp index c1c09411b..8664009a8 100644 --- a/src/common/backend/utils/init/miscinit.cpp +++ b/src/common/backend/utils/init/miscinit.cpp @@ -642,10 +642,12 @@ Oid get_pgxc_logic_groupoid(const char* rolename) * NodeGroupCallback * Syscache inval callback function */ -static void NodeGroupCallback(Datum arg, int cacheid, uint32 hashvalue) +static void SessionNodeGroupCallback(Datum arg, int cacheid, uint32 hashvalue) { u_sess->misc_cxt.current_nodegroup_mode = NG_UNKNOWN; - RelationCacheInvalidateBuckets(); + if (!EnableLocalSysCache()) { + RelationCacheInvalidateBuckets(); + } } /* @@ -655,7 +657,7 @@ static void NodeGroupCallback(Datum arg, int cacheid, uint32 hashvalue) static void RegisterNodeGroupCacheCallback() { if (IS_PGXC_COORDINATOR && !u_sess->misc_cxt.nodegroup_callback_registered) { - CacheRegisterSyscacheCallback(PGXCGROUPOID, NodeGroupCallback, (Datum)0); + CacheRegisterSessionSyscacheCallback(PGXCGROUPOID, SessionNodeGroupCallback, (Datum)0); u_sess->misc_cxt.nodegroup_callback_registered = true; } } @@ -790,7 +792,7 @@ bool has_rolvcadmin(Oid roleid) static void DecreaseUserCountReuse(Oid roleid, bool ispoolerreuse) { - if (ispoolerreuse == true && ENABLE_THREAD_POOL) { + if (ispoolerreuse == true && IS_THREAD_POOL_WORKER) { DecreaseUserCount(roleid); } } @@ -906,7 +908,7 @@ void InitializeSessionUserId(const char* rolename, bool ispoolerreuse, Oid usero /* Also mark our PGPROC entry with the authenticated user id */ /* (We assume this is an atomic store so no lock is needed) */ - DecreaseUserCountReuse(roleid, ispoolerreuse); + DecreaseUserCountReuse(t_thrd.proc->roleId, ispoolerreuse); t_thrd.proc->roleId = roleid; /* @@ -918,7 +920,7 @@ void InitializeSessionUserId(const char* rolename, bool ispoolerreuse, Oid usero /* * Is role allowed to login at all? */ - if (ENABLE_THREAD_POOL) { + if (IS_THREAD_POOL_WORKER) { IncreaseUserCount(roleid); } @@ -967,11 +969,12 @@ void InitializeSessionUserIdStandalone(void) #ifdef ENABLE_MULTIPLE_NODES AssertState(!IsUnderPostmaster || IsAutoVacuumWorkerProcess() || IsJobSchedulerProcess() || IsJobWorkerProcess() || AM_WAL_SENDER || IsTxnSnapCapturerProcess() || IsTxnSnapWorkerProcess() || IsUndoWorkerProcess() || - CompactionProcess::IsTsCompactionProcess() || IsRbCleanerProcess() || IsRbWorkerProcess()); + CompactionProcess::IsTsCompactionProcess() || IsRbCleanerProcess() || IsRbWorkerProcess() || + t_thrd.role == PARALLEL_DECODE || t_thrd.role == LOGICAL_READ_RECORD); #else /* ENABLE_MULTIPLE_NODES */ AssertState(!IsUnderPostmaster || IsAutoVacuumWorkerProcess() || IsJobSchedulerProcess() || IsJobWorkerProcess() || AM_WAL_SENDER || IsTxnSnapCapturerProcess() || IsTxnSnapWorkerProcess() || IsUndoWorkerProcess() || IsRbCleanerProcess() || - IsRbWorkerProcess()); + IsRbWorkerProcess() || t_thrd.role == PARALLEL_DECODE || t_thrd.role == LOGICAL_READ_RECORD); #endif /* ENABLE_MULTIPLE_NODES */ /* In pooler stateless reuse mode, to reset session userid */ @@ -1139,6 +1142,37 @@ static void UnlinkLockFile(int status, Datum filename) } } +/* + * proc_exit callback to unlock a lockfile. + */ +static void UnLockPidLockFile(int status, Datum fileDes) +{ + int fd = DatumGetInt32(fileDes); + + if (fd != -1) { + close(fd); + } +} + +static void CreatePidLockFile(const char* filename) +{ + int fd = -1; + char pid_lock_file[MAXPGPATH] = {0}; + int rc = snprintf_s(pid_lock_file, MAXPGPATH, MAXPGPATH - 1, "%s.lock", filename); + securec_check_ss(rc, "", ""); + + if ((fd = open(pid_lock_file, O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR)) == -1) { + ereport(FATAL, (errcode_for_file_access(), errmsg("could not create or open lock file \"%s\": %m", pid_lock_file))); + } + + if (flock(fd, LOCK_EX | LOCK_NB) == -1) { + close(fd); + ereport(FATAL, (errcode_for_file_access(), errmsg("could not lock file \"%s\": %m", pid_lock_file))); + } + + on_proc_exit(UnLockPidLockFile, Int32GetDatum(fd)); +} + /* * Create a lockfile. * @@ -1157,6 +1191,11 @@ static void CreateLockFile(const char* filename, bool amPostmaster, bool isDDLoc pid_t my_pid, my_p_pid, my_gp_pid; const char* envvar = NULL; + /* Grab a file lock to establish our priority to process postmaster.pid */ + if (isDDLock) { + CreatePidLockFile(filename); + } + /* * If the PID in the lockfile is our own PID or our parent's or * grandparent's PID, then the file must be stale (probably left over from @@ -1863,4 +1902,4 @@ bool contain_backend_version(uint32 version_number) { return ((version_number >= V5R1C20_BACKEND_VERSION_NUM && version_number < V5R2C00_START_VERSION_NUM) || (version_number >= V5R2C00_BACKEND_VERSION_NUM)); -} \ No newline at end of file +} diff --git a/src/common/backend/utils/init/postinit.cpp b/src/common/backend/utils/init/postinit.cpp index c4e7b212e..f448b775a 100644 --- a/src/common/backend/utils/init/postinit.cpp +++ b/src/common/backend/utils/init/postinit.cpp @@ -59,6 +59,7 @@ #include "postmaster/snapcapturer.h" #include "postmaster/rbcleaner.h" #include "replication/catchup.h" +#include "replication/logicalfuncs.h" #include "replication/walsender.h" #include "storage/buf/bufmgr.h" #include "storage/smgr/fd.h" @@ -92,6 +93,7 @@ #include "instruments/percentile.h" #include "instruments/instr_workload.h" #include "gs_policy/policy_common.h" +#include "utils/knl_relcache.h" #ifndef WIN32_ONLY_COMPILER #include "dynloader.h" #else @@ -223,7 +225,7 @@ static HeapTuple GetDatabaseTuple(const char* dbname) */ relation = heap_open(DatabaseRelationId, AccessShareLock); scan = systable_beginscan( - relation, DatabaseNameIndexId, u_sess->relcache_cxt.criticalSharedRelcachesBuilt, NULL, 1, key); + relation, DatabaseNameIndexId, LocalRelCacheCriticalSharedRelcachesBuilt(), NULL, 1, key); tuple = systable_getnext(scan); @@ -260,7 +262,7 @@ static HeapTuple GetDatabaseTupleByOid(Oid dboid) */ relation = heap_open(DatabaseRelationId, AccessShareLock); scan = systable_beginscan( - relation, DatabaseOidIndexId, u_sess->relcache_cxt.criticalSharedRelcachesBuilt, NULL, 1, key); + relation, DatabaseOidIndexId, LocalRelCacheCriticalSharedRelcachesBuilt(), NULL, 1, key); tuple = systable_getnext(scan); @@ -324,6 +326,10 @@ static void PerformAuthentication(Port* port) else ereport(LOG, (errmsg("connection authorized: user=%s database=%s", port->user_name, port->database_name))); } + if (AM_WAL_DB_SENDER) { + Oid userId = get_role_oid(port->user_name, false); + CheckLogicalPremissions(userId); + } /* INSTR: update user login counter */ if (IsUnderPostmaster && !IsBootstrapProcessingMode() && !dummyStandbyMode) @@ -784,9 +790,9 @@ static void process_startup_options(Port* port, bool am_superuser) } /* check 2 -- forbid non-initial users, except gs_roach and during cluster resizing with gs_redis */ - if (!dummyStandbyMode && GetRoleOid(port->user_name) != INITIAL_USER_ID && + if (!dummyStandbyMode && GetRoleOid(port->user_name) != INITIAL_USER_ID && !AM_WAL_HADR_SENDER && !(ClusterResizingInProgress() && u_sess->proc_cxt.clientIsGsredis) && !u_sess->proc_cxt.clientIsGsroach && - !AM_WAL_HADR_SENDER) { + !AM_WAL_HADR_CN_SENDER) { ereport(FATAL, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Inner maintenance tools only for the initial user."))); } @@ -1151,12 +1157,10 @@ void PostgresInitializer::SetDatabaseAndUser(const char* in_dbname, Oid dboid, c void PostgresInitializer::InitFencedSysCache() { + m_dboid = u_sess->proc_cxt.MyDatabaseId; InitSysCache(); - u_sess->proc_cxt.MyDatabaseTableSpace = DEFAULTTABLESPACE_OID; - SetFencedMasterDatabase(); - LoadSysCache(); } @@ -1371,6 +1375,42 @@ void PostgresInitializer::InitStatementWorker() FinishInit(); } +void PostgresInitializer::InitParallelDecode() +{ + /* Check replication permissions needed for walsender processes. */ + Assert(!IsBootstrapProcessingMode()); + + InitThread(); + + InitSysCache(); + + /* Initialize stats collection --- must happen before first xact */ + pgstat_initialize(); + + SetProcessExitCallback(); + + StartXact(); + + SetSuperUserStandalone(); + + if (!AM_WAL_DB_SENDER && !AM_PARALLEL_DECODE && !AM_LOGICAL_READ_RECORD) { + InitPlainWalSender(); + return; + } + + SetDatabase(); + + LoadSysCache(); + + InitDatabase(); + + InitPGXCPort(); + + InitSettings(); + + FinishInit(); +} + void PostgresInitializer::InitPercentileWorker() { InitThread(); @@ -1701,8 +1741,6 @@ void PostgresInitializer::InitTxnSnapWorker() InitSettings(); - InitExtensionVariable(); - FinishInit(); AuditUserLogin(); @@ -1954,7 +1992,12 @@ void PostgresInitializer::InitThread() */ t_thrd.proc_cxt.MyBackendId = InvalidBackendId; - SharedInvalBackendInit(IS_THREAD_POOL_WORKER, false); + if (EnableLocalSysCache()) { + SharedInvalBackendInit(false, false); + } else { + /* init invalid msg slot */ + SharedInvalBackendInit(IS_THREAD_POOL_WORKER, false); + } if (t_thrd.proc_cxt.MyBackendId > g_instance.shmem_cxt.MaxBackends || t_thrd.proc_cxt.MyBackendId <= 0) ereport(FATAL, (errmsg("bad backend ID: %d", t_thrd.proc_cxt.MyBackendId))); @@ -1989,6 +2032,56 @@ void PostgresInitializer::InitThread() } } +void PostgresInitializer::InitLoadLocalSysCache(Oid db_oid, const char *db_name) +{ + if(!EnableLocalSysCache()) { + return; + } + ResourceOwner currentOwner = t_thrd.utils_cxt.CurrentResourceOwner; + PG_TRY(); + { + /* local_sysdb_resowner never be freed until proc exit */ + t_thrd.utils_cxt.CurrentResourceOwner = t_thrd.lsc_cxt.lsc->local_sysdb_resowner; + Assert(u_sess->proc_cxt.MyDatabaseId != InvalidOid); + t_thrd.lsc_cxt.lsc->ClearSysCacheIfNecessary(db_oid, db_name); + InitFileAccess(); + + /* Do local initialization of file, storage and buffer managers */ + t_thrd.lsc_cxt.lsc->InitThreadDatabase(db_oid, db_name, u_sess->proc_cxt.MyDatabaseTableSpace); + t_thrd.lsc_cxt.lsc->InitDatabasePath(u_sess->proc_cxt.DatabasePath); + + /* init syscache which is mounted on thread */ + Assert(t_thrd.lsc_cxt.lsc != NULL); + /* this function is called by threadworker, since we have inited u_sess, we can find the db_id */ + Assert(u_sess->proc_cxt.MyDatabaseId != InvalidOid); + t_thrd.lsc_cxt.lsc->tabdefcache.Init(); + t_thrd.lsc_cxt.lsc->tabdefcache.InitPhase2(); + + t_thrd.lsc_cxt.lsc->partdefcache.Init(); + t_thrd.lsc_cxt.lsc->systabcache.Init(); + t_thrd.lsc_cxt.lsc->tabdefcache.InitPhase3(); + } + PG_CATCH(); + { + /* loadsyscache failed, there is noway to recovery, release resource here */ + ResourceOwnerRelease(t_thrd.lsc_cxt.lsc->local_sysdb_resowner, RESOURCE_RELEASE_BEFORE_LOCKS, false, true); + ResourceOwnerRelease(t_thrd.lsc_cxt.lsc->local_sysdb_resowner, RESOURCE_RELEASE_LOCKS, false, true); + ResourceOwnerRelease(t_thrd.lsc_cxt.lsc->local_sysdb_resowner, RESOURCE_RELEASE_AFTER_LOCKS, false, true); + + /* lwlocks arre released at sigsetjmp */ + + /* recovery CurrentResourceOwner */ + t_thrd.utils_cxt.CurrentResourceOwner = currentOwner; + PG_RE_THROW(); + } + PG_END_TRY(); + /* local_sysdb_resowner should be empty */ + Assert(CurrentResourceOwnerIsEmpty(t_thrd.lsc_cxt.lsc->local_sysdb_resowner)); + + /* recovery CurrentResourceOwner */ + t_thrd.utils_cxt.CurrentResourceOwner = currentOwner; +} + void PostgresInitializer::InitSession() { /* Init rel cache for new session. */ @@ -2046,6 +2139,11 @@ void PostgresInitializer::InitStreamSession() void PostgresInitializer::InitSysCache() { + if (EnableLocalSysCache()) { + Assert(u_sess->proc_cxt.MyDatabaseId == InvalidOid); + t_thrd.lsc_cxt.lsc->ClearSysCacheIfNecessary(m_dboid, m_indbname); + InitFileAccess(); + } /* * Initialize the relation cache and the system catalog caches. Note that * no catalog access happens here; we only set up the hashtable structure. @@ -2059,12 +2157,13 @@ void PostgresInitializer::InitSysCache() */ RelationCacheInitializePhase2(); - PartitionCacheInitialize(); - BucketCacheInitialize(); InitCatalogCache(); InitPlanCache(); + if (!EnableLocalSysCache()) { + PartitionCacheInitialize(); + } } void PostgresInitializer::SetProcessExitCallback() @@ -2298,6 +2397,10 @@ void PostgresInitializer::SetDefaultDatabase() Assert(!u_sess->proc_cxt.DatabasePath); u_sess->proc_cxt.DatabasePath = MemoryContextStrdup(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_EXECUTOR), m_fullpath); + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->InitSessionDatabase(TemplateDbOid, m_dbname, DEFAULTTABLESPACE_OID); + t_thrd.lsc_cxt.lsc->InitDatabasePath(m_fullpath); + } } @@ -2307,6 +2410,11 @@ void PostgresInitializer::SetFencedMasterDatabase() u_sess->utils_cxt.RecentGlobalXmin = FirstNormalTransactionId; u_sess->proc_cxt.DatabasePath = MemoryContextStrdup(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_EXECUTOR), m_fullpath); + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->InitSessionDatabase(u_sess->proc_cxt.MyDatabaseId, + NULL, u_sess->proc_cxt.MyDatabaseTableSpace); + t_thrd.lsc_cxt.lsc->InitDatabasePath(m_fullpath); + } } void PostgresInitializer::SetDatabase() @@ -2352,7 +2460,11 @@ void PostgresInitializer::SetDatabaseByName() u_sess->proc_cxt.MyDatabaseId = HeapTupleGetOid(tuple); u_sess->proc_cxt.MyDatabaseTableSpace = dbform->dattablespace; /* take database name from the caller, just for paranoia */ + m_dboid = u_sess->proc_cxt.MyDatabaseId; strlcpy(m_dbname, m_indbname, sizeof(m_dbname)); + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->InitSessionDatabase(m_dboid, m_dbname, dbform->dattablespace); + } } void PostgresInitializer::SetDatabaseByOid() @@ -2377,6 +2489,9 @@ void PostgresInitializer::SetDatabaseByOid() u_sess->proc_cxt.MyDatabaseTableSpace = dbform->dattablespace; Assert(u_sess->proc_cxt.MyDatabaseId == m_dboid); strlcpy(m_dbname, NameStr(dbform->datname), sizeof(m_dbname)); + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->InitSessionDatabase(m_dboid, m_dbname, dbform->dattablespace); + } } void PostgresInitializer::LockDatabase() @@ -2402,6 +2517,7 @@ void PostgresInitializer::LockDatabase() * AccessShareLock for such sessions and thereby not conflict against * CREATE DATABASE. */ + LockSharedObject(DatabaseRelationId, u_sess->proc_cxt.MyDatabaseId, 0, RowExclusiveLock); /* * Now we can mark our PGPROC entry with the database ID. @@ -2478,10 +2594,16 @@ void PostgresInitializer::SetDatabasePath() Assert(!u_sess->proc_cxt.DatabasePath); u_sess->proc_cxt.DatabasePath = MemoryContextStrdup( SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_EXECUTOR), m_fullpath); + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->InitDatabasePath(m_fullpath); + } } void PostgresInitializer::LoadSysCache() { + if (EnableLocalSysCache()) { + PartitionCacheInitialize(); + } /* * It's now possible to do real access to the system catalogs. * @@ -2644,7 +2766,12 @@ void PostgresInitializer::InitCompactionThread() */ t_thrd.proc_cxt.MyBackendId = InvalidBackendId; - SharedInvalBackendInit(IS_THREAD_POOL_WORKER, false); + if (EnableLocalSysCache()) { + SharedInvalBackendInit(false, false); + } else { + /* init invalid msg slot */ + SharedInvalBackendInit(IS_THREAD_POOL_WORKER, false); + } if (t_thrd.proc_cxt.MyBackendId > g_instance.shmem_cxt.MaxBackends || t_thrd.proc_cxt.MyBackendId <= 0) ereport(FATAL, (errmsg("bad backend ID: %d", t_thrd.proc_cxt.MyBackendId))); @@ -2694,6 +2821,8 @@ void PostgresInitializer::InitBarrierCreator() InitPGXCPort(); + FinishInit(); + return; } diff --git a/src/common/backend/utils/mb/Unicode/ISO10646-GB18030.TXT b/src/common/backend/utils/mb/Unicode/ISO10646-GB18030.TXT deleted file mode 100644 index a26bab48a..000000000 --- a/src/common/backend/utils/mb/Unicode/ISO10646-GB18030.TXT +++ /dev/null @@ -1,63488 +0,0 @@ -0000 00 -0001 01 -0002 02 -0003 03 -0004 04 -0005 05 -0006 06 -0007 07 -0008 08 -0009 09 -000A 0A -000B 0B -000C 0C -000D 0D -000E 0E -000F 0F -0010 10 -0011 11 -0012 12 -0013 13 -0014 14 -0015 15 -0016 16 -0017 17 -0018 18 -0019 19 -001A 1A -001B 1B -001C 1C -001D 1D -001E 1E -001F 1F -0020 20 -0021 21 -0022 22 -0023 23 -0024 24 -0025 25 -0026 26 -0027 27 -0028 28 -0029 29 -002A 2A -002B 2B -002C 2C -002D 2D -002E 2E -002F 2F -0030 30 -0031 31 -0032 32 -0033 33 -0034 34 -0035 35 -0036 36 -0037 37 -0038 38 -0039 39 -003A 3A -003B 3B -003C 3C -003D 3D -003E 3E -003F 3F -0040 40 -0041 41 -0042 42 -0043 43 -0044 44 -0045 45 -0046 46 -0047 47 -0048 48 -0049 49 -004A 4A -004B 4B -004C 4C -004D 4D -004E 4E -004F 4F -0050 50 -0051 51 -0052 52 -0053 53 -0054 54 -0055 55 -0056 56 -0057 57 -0058 58 -0059 59 -005A 5A -005B 5B -005C 5C -005D 5D -005E 5E -005F 5F -0060 60 -0061 61 -0062 62 -0063 63 -0064 64 -0065 65 -0066 66 -0067 67 -0068 68 -0069 69 -006A 6A -006B 6B -006C 6C -006D 6D -006E 6E -006F 6F -0070 70 -0071 71 -0072 72 -0073 73 -0074 74 -0075 75 -0076 76 -0077 77 -0078 78 -0079 79 -007A 7A -007B 7B -007C 7C -007D 7D -007E 7E -007F 7F -0080 81308130 -0081 81308131 -0082 81308132 -0083 81308133 -0084 81308134 -0085 81308135 -0086 81308136 -0087 81308137 -0088 81308138 -0089 81308139 -008A 81308230 -008B 81308231 -008C 81308232 -008D 81308233 -008E 81308234 -008F 81308235 -0090 81308236 -0091 81308237 -0092 81308238 -0093 81308239 -0094 81308330 -0095 81308331 -0096 81308332 -0097 81308333 -0098 81308334 -0099 81308335 -009A 81308336 -009B 81308337 -009C 81308338 -009D 81308339 -009E 81308430 -009F 81308431 -00A0 81308432 -00A1 81308433 -00A2 81308434 -00A3 81308435 -00A4 A1E8 -00A5 81308436 -00A6 81308437 -00A7 A1EC -00A8 A1A7 -00A9 81308438 -00AA 81308439 -00AB 81308530 -00AC 81308531 -00AD 81308532 -00AE 81308533 -00AF 81308534 -00B0 A1E3 -00B1 A1C0 -00B2 81308535 -00B3 81308536 -00B4 81308537 -00B5 81308538 -00B6 81308539 -00B7 A1A4 -00B8 81308630 -00B9 81308631 -00BA 81308632 -00BB 81308633 -00BC 81308634 -00BD 81308635 -00BE 81308636 -00BF 81308637 -00C0 81308638 -00C1 81308639 -00C2 81308730 -00C3 81308731 -00C4 81308732 -00C5 81308733 -00C6 81308734 -00C7 81308735 -00C8 81308736 -00C9 81308737 -00CA 81308738 -00CB 81308739 -00CC 81308830 -00CD 81308831 -00CE 81308832 -00CF 81308833 -00D0 81308834 -00D1 81308835 -00D2 81308836 -00D3 81308837 -00D4 81308838 -00D5 81308839 -00D6 81308930 -00D7 A1C1 -00D8 81308931 -00D9 81308932 -00DA 81308933 -00DB 81308934 -00DC 81308935 -00DD 81308936 -00DE 81308937 -00DF 81308938 -00E0 A8A4 -00E1 A8A2 -00E2 81308939 -00E3 81308A30 -00E4 81308A31 -00E5 81308A32 -00E6 81308A33 -00E7 81308A34 -00E8 A8A8 -00E9 A8A6 -00EA A8BA -00EB 81308A35 -00EC A8AC -00ED A8AA -00EE 81308A36 -00EF 81308A37 -00F0 81308A38 -00F1 81308A39 -00F2 A8B0 -00F3 A8AE -00F4 81308B30 -00F5 81308B31 -00F6 81308B32 -00F7 A1C2 -00F8 81308B33 -00F9 A8B4 -00FA A8B2 -00FB 81308B34 -00FC A8B9 -00FD 81308B35 -00FE 81308B36 -00FF 81308B37 -0100 81308B38 -0101 A8A1 -0102 81308B39 -0103 81308C30 -0104 81308C31 -0105 81308C32 -0106 81308C33 -0107 81308C34 -0108 81308C35 -0109 81308C36 -010A 81308C37 -010B 81308C38 -010C 81308C39 -010D 81308D30 -010E 81308D31 -010F 81308D32 -0110 81308D33 -0111 81308D34 -0112 81308D35 -0113 A8A5 -0114 81308D36 -0115 81308D37 -0116 81308D38 -0117 81308D39 -0118 81308E30 -0119 81308E31 -011A 81308E32 -011B A8A7 -011C 81308E33 -011D 81308E34 -011E 81308E35 -011F 81308E36 -0120 81308E37 -0121 81308E38 -0122 81308E39 -0123 81308F30 -0124 81308F31 -0125 81308F32 -0126 81308F33 -0127 81308F34 -0128 81308F35 -0129 81308F36 -012A 81308F37 -012B A8A9 -012C 81308F38 -012D 81308F39 -012E 81309030 -012F 81309031 -0130 81309032 -0131 81309033 -0132 81309034 -0133 81309035 -0134 81309036 -0135 81309037 -0136 81309038 -0137 81309039 -0138 81309130 -0139 81309131 -013A 81309132 -013B 81309133 -013C 81309134 -013D 81309135 -013E 81309136 -013F 81309137 -0140 81309138 -0141 81309139 -0142 81309230 -0143 81309231 -0144 A8BD -0145 81309232 -0146 81309233 -0147 81309234 -0148 A8BE -0149 81309235 -014A 81309236 -014B 81309237 -014C 81309238 -014D A8AD -014E 81309239 -014F 81309330 -0150 81309331 -0151 81309332 -0152 81309333 -0153 81309334 -0154 81309335 -0155 81309336 -0156 81309337 -0157 81309338 -0158 81309339 -0159 81309430 -015A 81309431 -015B 81309432 -015C 81309433 -015D 81309434 -015E 81309435 -015F 81309436 -0160 81309437 -0161 81309438 -0162 81309439 -0163 81309530 -0164 81309531 -0165 81309532 -0166 81309533 -0167 81309534 -0168 81309535 -0169 81309536 -016A 81309537 -016B A8B1 -016C 81309538 -016D 81309539 -016E 81309630 -016F 81309631 -0170 81309632 -0171 81309633 -0172 81309634 -0173 81309635 -0174 81309636 -0175 81309637 -0176 81309638 -0177 81309639 -0178 81309730 -0179 81309731 -017A 81309732 -017B 81309733 -017C 81309734 -017D 81309735 -017E 81309736 -017F 81309737 -0180 81309738 -0181 81309739 -0182 81309830 -0183 81309831 -0184 81309832 -0185 81309833 -0186 81309834 -0187 81309835 -0188 81309836 -0189 81309837 -018A 81309838 -018B 81309839 -018C 81309930 -018D 81309931 -018E 81309932 -018F 81309933 -0190 81309934 -0191 81309935 -0192 81309936 -0193 81309937 -0194 81309938 -0195 81309939 -0196 81309A30 -0197 81309A31 -0198 81309A32 -0199 81309A33 -019A 81309A34 -019B 81309A35 -019C 81309A36 -019D 81309A37 -019E 81309A38 -019F 81309A39 -01A0 81309B30 -01A1 81309B31 -01A2 81309B32 -01A3 81309B33 -01A4 81309B34 -01A5 81309B35 -01A6 81309B36 -01A7 81309B37 -01A8 81309B38 -01A9 81309B39 -01AA 81309C30 -01AB 81309C31 -01AC 81309C32 -01AD 81309C33 -01AE 81309C34 -01AF 81309C35 -01B0 81309C36 -01B1 81309C37 -01B2 81309C38 -01B3 81309C39 -01B4 81309D30 -01B5 81309D31 -01B6 81309D32 -01B7 81309D33 -01B8 81309D34 -01B9 81309D35 -01BA 81309D36 -01BB 81309D37 -01BC 81309D38 -01BD 81309D39 -01BE 81309E30 -01BF 81309E31 -01C0 81309E32 -01C1 81309E33 -01C2 81309E34 -01C3 81309E35 -01C4 81309E36 -01C5 81309E37 -01C6 81309E38 -01C7 81309E39 -01C8 81309F30 -01C9 81309F31 -01CA 81309F32 -01CB 81309F33 -01CC 81309F34 -01CD 81309F35 -01CE A8A3 -01CF 81309F36 -01D0 A8AB -01D1 81309F37 -01D2 A8AF -01D3 81309F38 -01D4 A8B3 -01D5 81309F39 -01D6 A8B5 -01D7 8130A030 -01D8 A8B6 -01D9 8130A031 -01DA A8B7 -01DB 8130A032 -01DC A8B8 -01DD 8130A033 -01DE 8130A034 -01DF 8130A035 -01E0 8130A036 -01E1 8130A037 -01E2 8130A038 -01E3 8130A039 -01E4 8130A130 -01E5 8130A131 -01E6 8130A132 -01E7 8130A133 -01E8 8130A134 -01E9 8130A135 -01EA 8130A136 -01EB 8130A137 -01EC 8130A138 -01ED 8130A139 -01EE 8130A230 -01EF 8130A231 -01F0 8130A232 -01F1 8130A233 -01F2 8130A234 -01F3 8130A235 -01F4 8130A236 -01F5 8130A237 -01F6 8130A238 -01F7 8130A239 -01F8 8130A330 -01F9 A8BF -01FA 8130A331 -01FB 8130A332 -01FC 8130A333 -01FD 8130A334 -01FE 8130A335 -01FF 8130A336 -0200 8130A337 -0201 8130A338 -0202 8130A339 -0203 8130A430 -0204 8130A431 -0205 8130A432 -0206 8130A433 -0207 8130A434 -0208 8130A435 -0209 8130A436 -020A 8130A437 -020B 8130A438 -020C 8130A439 -020D 8130A530 -020E 8130A531 -020F 8130A532 -0210 8130A533 -0211 8130A534 -0212 8130A535 -0213 8130A536 -0214 8130A537 -0215 8130A538 -0216 8130A539 -0217 8130A630 -0218 8130A631 -0219 8130A632 -021A 8130A633 -021B 8130A634 -021C 8130A635 -021D 8130A636 -021E 8130A637 -021F 8130A638 -0220 8130A639 -0221 8130A730 -0222 8130A731 -0223 8130A732 -0224 8130A733 -0225 8130A734 -0226 8130A735 -0227 8130A736 -0228 8130A737 -0229 8130A738 -022A 8130A739 -022B 8130A830 -022C 8130A831 -022D 8130A832 -022E 8130A833 -022F 8130A834 -0230 8130A835 -0231 8130A836 -0232 8130A837 -0233 8130A838 -0234 8130A839 -0235 8130A930 -0236 8130A931 -0237 8130A932 -0238 8130A933 -0239 8130A934 -023A 8130A935 -023B 8130A936 -023C 8130A937 -023D 8130A938 -023E 8130A939 -023F 8130AA30 -0240 8130AA31 -0241 8130AA32 -0242 8130AA33 -0243 8130AA34 -0244 8130AA35 -0245 8130AA36 -0246 8130AA37 -0247 8130AA38 -0248 8130AA39 -0249 8130AB30 -024A 8130AB31 -024B 8130AB32 -024C 8130AB33 -024D 8130AB34 -024E 8130AB35 -024F 8130AB36 -0250 8130AB37 -0251 A8BB -0252 8130AB38 -0253 8130AB39 -0254 8130AC30 -0255 8130AC31 -0256 8130AC32 -0257 8130AC33 -0258 8130AC34 -0259 8130AC35 -025A 8130AC36 -025B 8130AC37 -025C 8130AC38 -025D 8130AC39 -025E 8130AD30 -025F 8130AD31 -0260 8130AD32 -0261 A8C0 -0262 8130AD33 -0263 8130AD34 -0264 8130AD35 -0265 8130AD36 -0266 8130AD37 -0267 8130AD38 -0268 8130AD39 -0269 8130AE30 -026A 8130AE31 -026B 8130AE32 -026C 8130AE33 -026D 8130AE34 -026E 8130AE35 -026F 8130AE36 -0270 8130AE37 -0271 8130AE38 -0272 8130AE39 -0273 8130AF30 -0274 8130AF31 -0275 8130AF32 -0276 8130AF33 -0277 8130AF34 -0278 8130AF35 -0279 8130AF36 -027A 8130AF37 -027B 8130AF38 -027C 8130AF39 -027D 8130B030 -027E 8130B031 -027F 8130B032 -0280 8130B033 -0281 8130B034 -0282 8130B035 -0283 8130B036 -0284 8130B037 -0285 8130B038 -0286 8130B039 -0287 8130B130 -0288 8130B131 -0289 8130B132 -028A 8130B133 -028B 8130B134 -028C 8130B135 -028D 8130B136 -028E 8130B137 -028F 8130B138 -0290 8130B139 -0291 8130B230 -0292 8130B231 -0293 8130B232 -0294 8130B233 -0295 8130B234 -0296 8130B235 -0297 8130B236 -0298 8130B237 -0299 8130B238 -029A 8130B239 -029B 8130B330 -029C 8130B331 -029D 8130B332 -029E 8130B333 -029F 8130B334 -02A0 8130B335 -02A1 8130B336 -02A2 8130B337 -02A3 8130B338 -02A4 8130B339 -02A5 8130B430 -02A6 8130B431 -02A7 8130B432 -02A8 8130B433 -02A9 8130B434 -02AA 8130B435 -02AB 8130B436 -02AC 8130B437 -02AD 8130B438 -02AE 8130B439 -02AF 8130B530 -02B0 8130B531 -02B1 8130B532 -02B2 8130B533 -02B3 8130B534 -02B4 8130B535 -02B5 8130B536 -02B6 8130B537 -02B7 8130B538 -02B8 8130B539 -02B9 8130B630 -02BA 8130B631 -02BB 8130B632 -02BC 8130B633 -02BD 8130B634 -02BE 8130B635 -02BF 8130B636 -02C0 8130B637 -02C1 8130B638 -02C2 8130B639 -02C3 8130B730 -02C4 8130B731 -02C5 8130B732 -02C6 8130B733 -02C7 A1A6 -02C8 8130B734 -02C9 A1A5 -02CA A840 -02CB A841 -02CC 8130B735 -02CD 8130B736 -02CE 8130B737 -02CF 8130B738 -02D0 8130B739 -02D1 8130B830 -02D2 8130B831 -02D3 8130B832 -02D4 8130B833 -02D5 8130B834 -02D6 8130B835 -02D7 8130B836 -02D8 8130B837 -02D9 A842 -02DA 8130B838 -02DB 8130B839 -02DC 8130B930 -02DD 8130B931 -02DE 8130B932 -02DF 8130B933 -02E0 8130B934 -02E1 8130B935 -02E2 8130B936 -02E3 8130B937 -02E4 8130B938 -02E5 8130B939 -02E6 8130BA30 -02E7 8130BA31 -02E8 8130BA32 -02E9 8130BA33 -02EA 8130BA34 -02EB 8130BA35 -02EC 8130BA36 -02ED 8130BA37 -02EE 8130BA38 -02EF 8130BA39 -02F0 8130BB30 -02F1 8130BB31 -02F2 8130BB32 -02F3 8130BB33 -02F4 8130BB34 -02F5 8130BB35 -02F6 8130BB36 -02F7 8130BB37 -02F8 8130BB38 -02F9 8130BB39 -02FA 8130BC30 -02FB 8130BC31 -02FC 8130BC32 -02FD 8130BC33 -02FE 8130BC34 -02FF 8130BC35 -0300 8130BC36 -0301 8130BC37 -0302 8130BC38 -0303 8130BC39 -0304 8130BD30 -0305 8130BD31 -0306 8130BD32 -0307 8130BD33 -0308 8130BD34 -0309 8130BD35 -030A 8130BD36 -030B 8130BD37 -030C 8130BD38 -030D 8130BD39 -030E 8130BE30 -030F 8130BE31 -0310 8130BE32 -0311 8130BE33 -0312 8130BE34 -0313 8130BE35 -0314 8130BE36 -0315 8130BE37 -0316 8130BE38 -0317 8130BE39 -0318 8130BF30 -0319 8130BF31 -031A 8130BF32 -031B 8130BF33 -031C 8130BF34 -031D 8130BF35 -031E 8130BF36 -031F 8130BF37 -0320 8130BF38 -0321 8130BF39 -0322 8130C030 -0323 8130C031 -0324 8130C032 -0325 8130C033 -0326 8130C034 -0327 8130C035 -0328 8130C036 -0329 8130C037 -032A 8130C038 -032B 8130C039 -032C 8130C130 -032D 8130C131 -032E 8130C132 -032F 8130C133 -0330 8130C134 -0331 8130C135 -0332 8130C136 -0333 8130C137 -0334 8130C138 -0335 8130C139 -0336 8130C230 -0337 8130C231 -0338 8130C232 -0339 8130C233 -033A 8130C234 -033B 8130C235 -033C 8130C236 -033D 8130C237 -033E 8130C238 -033F 8130C239 -0340 8130C330 -0341 8130C331 -0342 8130C332 -0343 8130C333 -0344 8130C334 -0345 8130C335 -0346 8130C336 -0347 8130C337 -0348 8130C338 -0349 8130C339 -034A 8130C430 -034B 8130C431 -034C 8130C432 -034D 8130C433 -034E 8130C434 -034F 8130C435 -0350 8130C436 -0351 8130C437 -0352 8130C438 -0353 8130C439 -0354 8130C530 -0355 8130C531 -0356 8130C532 -0357 8130C533 -0358 8130C534 -0359 8130C535 -035A 8130C536 -035B 8130C537 -035C 8130C538 -035D 8130C539 -035E 8130C630 -035F 8130C631 -0360 8130C632 -0361 8130C633 -0362 8130C634 -0363 8130C635 -0364 8130C636 -0365 8130C637 -0366 8130C638 -0367 8130C639 -0368 8130C730 -0369 8130C731 -036A 8130C732 -036B 8130C733 -036C 8130C734 -036D 8130C735 -036E 8130C736 -036F 8130C737 -0370 8130C738 -0371 8130C739 -0372 8130C830 -0373 8130C831 -0374 8130C832 -0375 8130C833 -0376 8130C834 -0377 8130C835 -0378 8130C836 -0379 8130C837 -037A 8130C838 -037B 8130C839 -037C 8130C930 -037D 8130C931 -037E 8130C932 -037F 8130C933 -0380 8130C934 -0381 8130C935 -0382 8130C936 -0383 8130C937 -0384 8130C938 -0385 8130C939 -0386 8130CA30 -0387 8130CA31 -0388 8130CA32 -0389 8130CA33 -038A 8130CA34 -038B 8130CA35 -038C 8130CA36 -038D 8130CA37 -038E 8130CA38 -038F 8130CA39 -0390 8130CB30 -0391 A6A1 -0392 A6A2 -0393 A6A3 -0394 A6A4 -0395 A6A5 -0396 A6A6 -0397 A6A7 -0398 A6A8 -0399 A6A9 -039A A6AA -039B A6AB -039C A6AC -039D A6AD -039E A6AE -039F A6AF -03A0 A6B0 -03A1 A6B1 -03A2 8130CB31 -03A3 A6B2 -03A4 A6B3 -03A5 A6B4 -03A6 A6B5 -03A7 A6B6 -03A8 A6B7 -03A9 A6B8 -03AA 8130CB32 -03AB 8130CB33 -03AC 8130CB34 -03AD 8130CB35 -03AE 8130CB36 -03AF 8130CB37 -03B0 8130CB38 -03B1 A6C1 -03B2 A6C2 -03B3 A6C3 -03B4 A6C4 -03B5 A6C5 -03B6 A6C6 -03B7 A6C7 -03B8 A6C8 -03B9 A6C9 -03BA A6CA -03BB A6CB -03BC A6CC -03BD A6CD -03BE A6CE -03BF A6CF -03C0 A6D0 -03C1 A6D1 -03C2 8130CB39 -03C3 A6D2 -03C4 A6D3 -03C5 A6D4 -03C6 A6D5 -03C7 A6D6 -03C8 A6D7 -03C9 A6D8 -03CA 8130CC30 -03CB 8130CC31 -03CC 8130CC32 -03CD 8130CC33 -03CE 8130CC34 -03CF 8130CC35 -03D0 8130CC36 -03D1 8130CC37 -03D2 8130CC38 -03D3 8130CC39 -03D4 8130CD30 -03D5 8130CD31 -03D6 8130CD32 -03D7 8130CD33 -03D8 8130CD34 -03D9 8130CD35 -03DA 8130CD36 -03DB 8130CD37 -03DC 8130CD38 -03DD 8130CD39 -03DE 8130CE30 -03DF 8130CE31 -03E0 8130CE32 -03E1 8130CE33 -03E2 8130CE34 -03E3 8130CE35 -03E4 8130CE36 -03E5 8130CE37 -03E6 8130CE38 -03E7 8130CE39 -03E8 8130CF30 -03E9 8130CF31 -03EA 8130CF32 -03EB 8130CF33 -03EC 8130CF34 -03ED 8130CF35 -03EE 8130CF36 -03EF 8130CF37 -03F0 8130CF38 -03F1 8130CF39 -03F2 8130D030 -03F3 8130D031 -03F4 8130D032 -03F5 8130D033 -03F6 8130D034 -03F7 8130D035 -03F8 8130D036 -03F9 8130D037 -03FA 8130D038 -03FB 8130D039 -03FC 8130D130 -03FD 8130D131 -03FE 8130D132 -03FF 8130D133 -0400 8130D134 -0401 A7A7 -0402 8130D135 -0403 8130D136 -0404 8130D137 -0405 8130D138 -0406 8130D139 -0407 8130D230 -0408 8130D231 -0409 8130D232 -040A 8130D233 -040B 8130D234 -040C 8130D235 -040D 8130D236 -040E 8130D237 -040F 8130D238 -0410 A7A1 -0411 A7A2 -0412 A7A3 -0413 A7A4 -0414 A7A5 -0415 A7A6 -0416 A7A8 -0417 A7A9 -0418 A7AA -0419 A7AB -041A A7AC -041B A7AD -041C A7AE -041D A7AF -041E A7B0 -041F A7B1 -0420 A7B2 -0421 A7B3 -0422 A7B4 -0423 A7B5 -0424 A7B6 -0425 A7B7 -0426 A7B8 -0427 A7B9 -0428 A7BA -0429 A7BB -042A A7BC -042B A7BD -042C A7BE -042D A7BF -042E A7C0 -042F A7C1 -0430 A7D1 -0431 A7D2 -0432 A7D3 -0433 A7D4 -0434 A7D5 -0435 A7D6 -0436 A7D8 -0437 A7D9 -0438 A7DA -0439 A7DB -043A A7DC -043B A7DD -043C A7DE -043D A7DF -043E A7E0 -043F A7E1 -0440 A7E2 -0441 A7E3 -0442 A7E4 -0443 A7E5 -0444 A7E6 -0445 A7E7 -0446 A7E8 -0447 A7E9 -0448 A7EA -0449 A7EB -044A A7EC -044B A7ED -044C A7EE -044D A7EF -044E A7F0 -044F A7F1 -0450 8130D239 -0451 A7D7 -0452 8130D330 -0453 8130D331 -0454 8130D332 -0455 8130D333 -0456 8130D334 -0457 8130D335 -0458 8130D336 -0459 8130D337 -045A 8130D338 -045B 8130D339 -045C 8130D430 -045D 8130D431 -045E 8130D432 -045F 8130D433 -0460 8130D434 -0461 8130D435 -0462 8130D436 -0463 8130D437 -0464 8130D438 -0465 8130D439 -0466 8130D530 -0467 8130D531 -0468 8130D532 -0469 8130D533 -046A 8130D534 -046B 8130D535 -046C 8130D536 -046D 8130D537 -046E 8130D538 -046F 8130D539 -0470 8130D630 -0471 8130D631 -0472 8130D632 -0473 8130D633 -0474 8130D634 -0475 8130D635 -0476 8130D636 -0477 8130D637 -0478 8130D638 -0479 8130D639 -047A 8130D730 -047B 8130D731 -047C 8130D732 -047D 8130D733 -047E 8130D734 -047F 8130D735 -0480 8130D736 -0481 8130D737 -0482 8130D738 -0483 8130D739 -0484 8130D830 -0485 8130D831 -0486 8130D832 -0487 8130D833 -0488 8130D834 -0489 8130D835 -048A 8130D836 -048B 8130D837 -048C 8130D838 -048D 8130D839 -048E 8130D930 -048F 8130D931 -0490 8130D932 -0491 8130D933 -0492 8130D934 -0493 8130D935 -0494 8130D936 -0495 8130D937 -0496 8130D938 -0497 8130D939 -0498 8130DA30 -0499 8130DA31 -049A 8130DA32 -049B 8130DA33 -049C 8130DA34 -049D 8130DA35 -049E 8130DA36 -049F 8130DA37 -04A0 8130DA38 -04A1 8130DA39 -04A2 8130DB30 -04A3 8130DB31 -04A4 8130DB32 -04A5 8130DB33 -04A6 8130DB34 -04A7 8130DB35 -04A8 8130DB36 -04A9 8130DB37 -04AA 8130DB38 -04AB 8130DB39 -04AC 8130DC30 -04AD 8130DC31 -04AE 8130DC32 -04AF 8130DC33 -04B0 8130DC34 -04B1 8130DC35 -04B2 8130DC36 -04B3 8130DC37 -04B4 8130DC38 -04B5 8130DC39 -04B6 8130DD30 -04B7 8130DD31 -04B8 8130DD32 -04B9 8130DD33 -04BA 8130DD34 -04BB 8130DD35 -04BC 8130DD36 -04BD 8130DD37 -04BE 8130DD38 -04BF 8130DD39 -04C0 8130DE30 -04C1 8130DE31 -04C2 8130DE32 -04C3 8130DE33 -04C4 8130DE34 -04C5 8130DE35 -04C6 8130DE36 -04C7 8130DE37 -04C8 8130DE38 -04C9 8130DE39 -04CA 8130DF30 -04CB 8130DF31 -04CC 8130DF32 -04CD 8130DF33 -04CE 8130DF34 -04CF 8130DF35 -04D0 8130DF36 -04D1 8130DF37 -04D2 8130DF38 -04D3 8130DF39 -04D4 8130E030 -04D5 8130E031 -04D6 8130E032 -04D7 8130E033 -04D8 8130E034 -04D9 8130E035 -04DA 8130E036 -04DB 8130E037 -04DC 8130E038 -04DD 8130E039 -04DE 8130E130 -04DF 8130E131 -04E0 8130E132 -04E1 8130E133 -04E2 8130E134 -04E3 8130E135 -04E4 8130E136 -04E5 8130E137 -04E6 8130E138 -04E7 8130E139 -04E8 8130E230 -04E9 8130E231 -04EA 8130E232 -04EB 8130E233 -04EC 8130E234 -04ED 8130E235 -04EE 8130E236 -04EF 8130E237 -04F0 8130E238 -04F1 8130E239 -04F2 8130E330 -04F3 8130E331 -04F4 8130E332 -04F5 8130E333 -04F6 8130E334 -04F7 8130E335 -04F8 8130E336 -04F9 8130E337 -04FA 8130E338 -04FB 8130E339 -04FC 8130E430 -04FD 8130E431 -04FE 8130E432 -04FF 8130E433 -0500 8130E434 -0501 8130E435 -0502 8130E436 -0503 8130E437 -0504 8130E438 -0505 8130E439 -0506 8130E530 -0507 8130E531 -0508 8130E532 -0509 8130E533 -050A 8130E534 -050B 8130E535 -050C 8130E536 -050D 8130E537 -050E 8130E538 -050F 8130E539 -0510 8130E630 -0511 8130E631 -0512 8130E632 -0513 8130E633 -0514 8130E634 -0515 8130E635 -0516 8130E636 -0517 8130E637 -0518 8130E638 -0519 8130E639 -051A 8130E730 -051B 8130E731 -051C 8130E732 -051D 8130E733 -051E 8130E734 -051F 8130E735 -0520 8130E736 -0521 8130E737 -0522 8130E738 -0523 8130E739 -0524 8130E830 -0525 8130E831 -0526 8130E832 -0527 8130E833 -0528 8130E834 -0529 8130E835 -052A 8130E836 -052B 8130E837 -052C 8130E838 -052D 8130E839 -052E 8130E930 -052F 8130E931 -0530 8130E932 -0531 8130E933 -0532 8130E934 -0533 8130E935 -0534 8130E936 -0535 8130E937 -0536 8130E938 -0537 8130E939 -0538 8130EA30 -0539 8130EA31 -053A 8130EA32 -053B 8130EA33 -053C 8130EA34 -053D 8130EA35 -053E 8130EA36 -053F 8130EA37 -0540 8130EA38 -0541 8130EA39 -0542 8130EB30 -0543 8130EB31 -0544 8130EB32 -0545 8130EB33 -0546 8130EB34 -0547 8130EB35 -0548 8130EB36 -0549 8130EB37 -054A 8130EB38 -054B 8130EB39 -054C 8130EC30 -054D 8130EC31 -054E 8130EC32 -054F 8130EC33 -0550 8130EC34 -0551 8130EC35 -0552 8130EC36 -0553 8130EC37 -0554 8130EC38 -0555 8130EC39 -0556 8130ED30 -0557 8130ED31 -0558 8130ED32 -0559 8130ED33 -055A 8130ED34 -055B 8130ED35 -055C 8130ED36 -055D 8130ED37 -055E 8130ED38 -055F 8130ED39 -0560 8130EE30 -0561 8130EE31 -0562 8130EE32 -0563 8130EE33 -0564 8130EE34 -0565 8130EE35 -0566 8130EE36 -0567 8130EE37 -0568 8130EE38 -0569 8130EE39 -056A 8130EF30 -056B 8130EF31 -056C 8130EF32 -056D 8130EF33 -056E 8130EF34 -056F 8130EF35 -0570 8130EF36 -0571 8130EF37 -0572 8130EF38 -0573 8130EF39 -0574 8130F030 -0575 8130F031 -0576 8130F032 -0577 8130F033 -0578 8130F034 -0579 8130F035 -057A 8130F036 -057B 8130F037 -057C 8130F038 -057D 8130F039 -057E 8130F130 -057F 8130F131 -0580 8130F132 -0581 8130F133 -0582 8130F134 -0583 8130F135 -0584 8130F136 -0585 8130F137 -0586 8130F138 -0587 8130F139 -0588 8130F230 -0589 8130F231 -058A 8130F232 -058B 8130F233 -058C 8130F234 -058D 8130F235 -058E 8130F236 -058F 8130F237 -0590 8130F238 -0591 8130F239 -0592 8130F330 -0593 8130F331 -0594 8130F332 -0595 8130F333 -0596 8130F334 -0597 8130F335 -0598 8130F336 -0599 8130F337 -059A 8130F338 -059B 8130F339 -059C 8130F430 -059D 8130F431 -059E 8130F432 -059F 8130F433 -05A0 8130F434 -05A1 8130F435 -05A2 8130F436 -05A3 8130F437 -05A4 8130F438 -05A5 8130F439 -05A6 8130F530 -05A7 8130F531 -05A8 8130F532 -05A9 8130F533 -05AA 8130F534 -05AB 8130F535 -05AC 8130F536 -05AD 8130F537 -05AE 8130F538 -05AF 8130F539 -05B0 8130F630 -05B1 8130F631 -05B2 8130F632 -05B3 8130F633 -05B4 8130F634 -05B5 8130F635 -05B6 8130F636 -05B7 8130F637 -05B8 8130F638 -05B9 8130F639 -05BA 8130F730 -05BB 8130F731 -05BC 8130F732 -05BD 8130F733 -05BE 8130F734 -05BF 8130F735 -05C0 8130F736 -05C1 8130F737 -05C2 8130F738 -05C3 8130F739 -05C4 8130F830 -05C5 8130F831 -05C6 8130F832 -05C7 8130F833 -05C8 8130F834 -05C9 8130F835 -05CA 8130F836 -05CB 8130F837 -05CC 8130F838 -05CD 8130F839 -05CE 8130F930 -05CF 8130F931 -05D0 8130F932 -05D1 8130F933 -05D2 8130F934 -05D3 8130F935 -05D4 8130F936 -05D5 8130F937 -05D6 8130F938 -05D7 8130F939 -05D8 8130FA30 -05D9 8130FA31 -05DA 8130FA32 -05DB 8130FA33 -05DC 8130FA34 -05DD 8130FA35 -05DE 8130FA36 -05DF 8130FA37 -05E0 8130FA38 -05E1 8130FA39 -05E2 8130FB30 -05E3 8130FB31 -05E4 8130FB32 -05E5 8130FB33 -05E6 8130FB34 -05E7 8130FB35 -05E8 8130FB36 -05E9 8130FB37 -05EA 8130FB38 -05EB 8130FB39 -05EC 8130FC30 -05ED 8130FC31 -05EE 8130FC32 -05EF 8130FC33 -05F0 8130FC34 -05F1 8130FC35 -05F2 8130FC36 -05F3 8130FC37 -05F4 8130FC38 -05F5 8130FC39 -05F6 8130FD30 -05F7 8130FD31 -05F8 8130FD32 -05F9 8130FD33 -05FA 8130FD34 -05FB 8130FD35 -05FC 8130FD36 -05FD 8130FD37 -05FE 8130FD38 -05FF 8130FD39 -0600 8130FE30 -0601 8130FE31 -0602 8130FE32 -0603 8130FE33 -0604 8130FE34 -0605 8130FE35 -0606 8130FE36 -0607 8130FE37 -0608 8130FE38 -0609 8130FE39 -060A 81318130 -060B 81318131 -060C 81318132 -060D 81318133 -060E 81318134 -060F 81318135 -0610 81318136 -0611 81318137 -0612 81318138 -0613 81318139 -0614 81318230 -0615 81318231 -0616 81318232 -0617 81318233 -0618 81318234 -0619 81318235 -061A 81318236 -061B 81318237 -061C 81318238 -061D 81318239 -061E 81318330 -061F 81318331 -0620 81318332 -0621 81318333 -0622 81318334 -0623 81318335 -0624 81318336 -0625 81318337 -0626 81318338 -0627 81318339 -0628 81318430 -0629 81318431 -062A 81318432 -062B 81318433 -062C 81318434 -062D 81318435 -062E 81318436 -062F 81318437 -0630 81318438 -0631 81318439 -0632 81318530 -0633 81318531 -0634 81318532 -0635 81318533 -0636 81318534 -0637 81318535 -0638 81318536 -0639 81318537 -063A 81318538 -063B 81318539 -063C 81318630 -063D 81318631 -063E 81318632 -063F 81318633 -0640 81318634 -0641 81318635 -0642 81318636 -0643 81318637 -0644 81318638 -0645 81318639 -0646 81318730 -0647 81318731 -0648 81318732 -0649 81318733 -064A 81318734 -064B 81318735 -064C 81318736 -064D 81318737 -064E 81318738 -064F 81318739 -0650 81318830 -0651 81318831 -0652 81318832 -0653 81318833 -0654 81318834 -0655 81318835 -0656 81318836 -0657 81318837 -0658 81318838 -0659 81318839 -065A 81318930 -065B 81318931 -065C 81318932 -065D 81318933 -065E 81318934 -065F 81318935 -0660 81318936 -0661 81318937 -0662 81318938 -0663 81318939 -0664 81318A30 -0665 81318A31 -0666 81318A32 -0667 81318A33 -0668 81318A34 -0669 81318A35 -066A 81318A36 -066B 81318A37 -066C 81318A38 -066D 81318A39 -066E 81318B30 -066F 81318B31 -0670 81318B32 -0671 81318B33 -0672 81318B34 -0673 81318B35 -0674 81318B36 -0675 81318B37 -0676 81318B38 -0677 81318B39 -0678 81318C30 -0679 81318C31 -067A 81318C32 -067B 81318C33 -067C 81318C34 -067D 81318C35 -067E 81318C36 -067F 81318C37 -0680 81318C38 -0681 81318C39 -0682 81318D30 -0683 81318D31 -0684 81318D32 -0685 81318D33 -0686 81318D34 -0687 81318D35 -0688 81318D36 -0689 81318D37 -068A 81318D38 -068B 81318D39 -068C 81318E30 -068D 81318E31 -068E 81318E32 -068F 81318E33 -0690 81318E34 -0691 81318E35 -0692 81318E36 -0693 81318E37 -0694 81318E38 -0695 81318E39 -0696 81318F30 -0697 81318F31 -0698 81318F32 -0699 81318F33 -069A 81318F34 -069B 81318F35 -069C 81318F36 -069D 81318F37 -069E 81318F38 -069F 81318F39 -06A0 81319030 -06A1 81319031 -06A2 81319032 -06A3 81319033 -06A4 81319034 -06A5 81319035 -06A6 81319036 -06A7 81319037 -06A8 81319038 -06A9 81319039 -06AA 81319130 -06AB 81319131 -06AC 81319132 -06AD 81319133 -06AE 81319134 -06AF 81319135 -06B0 81319136 -06B1 81319137 -06B2 81319138 -06B3 81319139 -06B4 81319230 -06B5 81319231 -06B6 81319232 -06B7 81319233 -06B8 81319234 -06B9 81319235 -06BA 81319236 -06BB 81319237 -06BC 81319238 -06BD 81319239 -06BE 81319330 -06BF 81319331 -06C0 81319332 -06C1 81319333 -06C2 81319334 -06C3 81319335 -06C4 81319336 -06C5 81319337 -06C6 81319338 -06C7 81319339 -06C8 81319430 -06C9 81319431 -06CA 81319432 -06CB 81319433 -06CC 81319434 -06CD 81319435 -06CE 81319436 -06CF 81319437 -06D0 81319438 -06D1 81319439 -06D2 81319530 -06D3 81319531 -06D4 81319532 -06D5 81319533 -06D6 81319534 -06D7 81319535 -06D8 81319536 -06D9 81319537 -06DA 81319538 -06DB 81319539 -06DC 81319630 -06DD 81319631 -06DE 81319632 -06DF 81319633 -06E0 81319634 -06E1 81319635 -06E2 81319636 -06E3 81319637 -06E4 81319638 -06E5 81319639 -06E6 81319730 -06E7 81319731 -06E8 81319732 -06E9 81319733 -06EA 81319734 -06EB 81319735 -06EC 81319736 -06ED 81319737 -06EE 81319738 -06EF 81319739 -06F0 81319830 -06F1 81319831 -06F2 81319832 -06F3 81319833 -06F4 81319834 -06F5 81319835 -06F6 81319836 -06F7 81319837 -06F8 81319838 -06F9 81319839 -06FA 81319930 -06FB 81319931 -06FC 81319932 -06FD 81319933 -06FE 81319934 -06FF 81319935 -0700 81319936 -0701 81319937 -0702 81319938 -0703 81319939 -0704 81319A30 -0705 81319A31 -0706 81319A32 -0707 81319A33 -0708 81319A34 -0709 81319A35 -070A 81319A36 -070B 81319A37 -070C 81319A38 -070D 81319A39 -070E 81319B30 -070F 81319B31 -0710 81319B32 -0711 81319B33 -0712 81319B34 -0713 81319B35 -0714 81319B36 -0715 81319B37 -0716 81319B38 -0717 81319B39 -0718 81319C30 -0719 81319C31 -071A 81319C32 -071B 81319C33 -071C 81319C34 -071D 81319C35 -071E 81319C36 -071F 81319C37 -0720 81319C38 -0721 81319C39 -0722 81319D30 -0723 81319D31 -0724 81319D32 -0725 81319D33 -0726 81319D34 -0727 81319D35 -0728 81319D36 -0729 81319D37 -072A 81319D38 -072B 81319D39 -072C 81319E30 -072D 81319E31 -072E 81319E32 -072F 81319E33 -0730 81319E34 -0731 81319E35 -0732 81319E36 -0733 81319E37 -0734 81319E38 -0735 81319E39 -0736 81319F30 -0737 81319F31 -0738 81319F32 -0739 81319F33 -073A 81319F34 -073B 81319F35 -073C 81319F36 -073D 81319F37 -073E 81319F38 -073F 81319F39 -0740 8131A030 -0741 8131A031 -0742 8131A032 -0743 8131A033 -0744 8131A034 -0745 8131A035 -0746 8131A036 -0747 8131A037 -0748 8131A038 -0749 8131A039 -074A 8131A130 -074B 8131A131 -074C 8131A132 -074D 8131A133 -074E 8131A134 -074F 8131A135 -0750 8131A136 -0751 8131A137 -0752 8131A138 -0753 8131A139 -0754 8131A230 -0755 8131A231 -0756 8131A232 -0757 8131A233 -0758 8131A234 -0759 8131A235 -075A 8131A236 -075B 8131A237 -075C 8131A238 -075D 8131A239 -075E 8131A330 -075F 8131A331 -0760 8131A332 -0761 8131A333 -0762 8131A334 -0763 8131A335 -0764 8131A336 -0765 8131A337 -0766 8131A338 -0767 8131A339 -0768 8131A430 -0769 8131A431 -076A 8131A432 -076B 8131A433 -076C 8131A434 -076D 8131A435 -076E 8131A436 -076F 8131A437 -0770 8131A438 -0771 8131A439 -0772 8131A530 -0773 8131A531 -0774 8131A532 -0775 8131A533 -0776 8131A534 -0777 8131A535 -0778 8131A536 -0779 8131A537 -077A 8131A538 -077B 8131A539 -077C 8131A630 -077D 8131A631 -077E 8131A632 -077F 8131A633 -0780 8131A634 -0781 8131A635 -0782 8131A636 -0783 8131A637 -0784 8131A638 -0785 8131A639 -0786 8131A730 -0787 8131A731 -0788 8131A732 -0789 8131A733 -078A 8131A734 -078B 8131A735 -078C 8131A736 -078D 8131A737 -078E 8131A738 -078F 8131A739 -0790 8131A830 -0791 8131A831 -0792 8131A832 -0793 8131A833 -0794 8131A834 -0795 8131A835 -0796 8131A836 -0797 8131A837 -0798 8131A838 -0799 8131A839 -079A 8131A930 -079B 8131A931 -079C 8131A932 -079D 8131A933 -079E 8131A934 -079F 8131A935 -07A0 8131A936 -07A1 8131A937 -07A2 8131A938 -07A3 8131A939 -07A4 8131AA30 -07A5 8131AA31 -07A6 8131AA32 -07A7 8131AA33 -07A8 8131AA34 -07A9 8131AA35 -07AA 8131AA36 -07AB 8131AA37 -07AC 8131AA38 -07AD 8131AA39 -07AE 8131AB30 -07AF 8131AB31 -07B0 8131AB32 -07B1 8131AB33 -07B2 8131AB34 -07B3 8131AB35 -07B4 8131AB36 -07B5 8131AB37 -07B6 8131AB38 -07B7 8131AB39 -07B8 8131AC30 -07B9 8131AC31 -07BA 8131AC32 -07BB 8131AC33 -07BC 8131AC34 -07BD 8131AC35 -07BE 8131AC36 -07BF 8131AC37 -07C0 8131AC38 -07C1 8131AC39 -07C2 8131AD30 -07C3 8131AD31 -07C4 8131AD32 -07C5 8131AD33 -07C6 8131AD34 -07C7 8131AD35 -07C8 8131AD36 -07C9 8131AD37 -07CA 8131AD38 -07CB 8131AD39 -07CC 8131AE30 -07CD 8131AE31 -07CE 8131AE32 -07CF 8131AE33 -07D0 8131AE34 -07D1 8131AE35 -07D2 8131AE36 -07D3 8131AE37 -07D4 8131AE38 -07D5 8131AE39 -07D6 8131AF30 -07D7 8131AF31 -07D8 8131AF32 -07D9 8131AF33 -07DA 8131AF34 -07DB 8131AF35 -07DC 8131AF36 -07DD 8131AF37 -07DE 8131AF38 -07DF 8131AF39 -07E0 8131B030 -07E1 8131B031 -07E2 8131B032 -07E3 8131B033 -07E4 8131B034 -07E5 8131B035 -07E6 8131B036 -07E7 8131B037 -07E8 8131B038 -07E9 8131B039 -07EA 8131B130 -07EB 8131B131 -07EC 8131B132 -07ED 8131B133 -07EE 8131B134 -07EF 8131B135 -07F0 8131B136 -07F1 8131B137 -07F2 8131B138 -07F3 8131B139 -07F4 8131B230 -07F5 8131B231 -07F6 8131B232 -07F7 8131B233 -07F8 8131B234 -07F9 8131B235 -07FA 8131B236 -07FB 8131B237 -07FC 8131B238 -07FD 8131B239 -07FE 8131B330 -07FF 8131B331 -0800 8131B332 -0801 8131B333 -0802 8131B334 -0803 8131B335 -0804 8131B336 -0805 8131B337 -0806 8131B338 -0807 8131B339 -0808 8131B430 -0809 8131B431 -080A 8131B432 -080B 8131B433 -080C 8131B434 -080D 8131B435 -080E 8131B436 -080F 8131B437 -0810 8131B438 -0811 8131B439 -0812 8131B530 -0813 8131B531 -0814 8131B532 -0815 8131B533 -0816 8131B534 -0817 8131B535 -0818 8131B536 -0819 8131B537 -081A 8131B538 -081B 8131B539 -081C 8131B630 -081D 8131B631 -081E 8131B632 -081F 8131B633 -0820 8131B634 -0821 8131B635 -0822 8131B636 -0823 8131B637 -0824 8131B638 -0825 8131B639 -0826 8131B730 -0827 8131B731 -0828 8131B732 -0829 8131B733 -082A 8131B734 -082B 8131B735 -082C 8131B736 -082D 8131B737 -082E 8131B738 -082F 8131B739 -0830 8131B830 -0831 8131B831 -0832 8131B832 -0833 8131B833 -0834 8131B834 -0835 8131B835 -0836 8131B836 -0837 8131B837 -0838 8131B838 -0839 8131B839 -083A 8131B930 -083B 8131B931 -083C 8131B932 -083D 8131B933 -083E 8131B934 -083F 8131B935 -0840 8131B936 -0841 8131B937 -0842 8131B938 -0843 8131B939 -0844 8131BA30 -0845 8131BA31 -0846 8131BA32 -0847 8131BA33 -0848 8131BA34 -0849 8131BA35 -084A 8131BA36 -084B 8131BA37 -084C 8131BA38 -084D 8131BA39 -084E 8131BB30 -084F 8131BB31 -0850 8131BB32 -0851 8131BB33 -0852 8131BB34 -0853 8131BB35 -0854 8131BB36 -0855 8131BB37 -0856 8131BB38 -0857 8131BB39 -0858 8131BC30 -0859 8131BC31 -085A 8131BC32 -085B 8131BC33 -085C 8131BC34 -085D 8131BC35 -085E 8131BC36 -085F 8131BC37 -0860 8131BC38 -0861 8131BC39 -0862 8131BD30 -0863 8131BD31 -0864 8131BD32 -0865 8131BD33 -0866 8131BD34 -0867 8131BD35 -0868 8131BD36 -0869 8131BD37 -086A 8131BD38 -086B 8131BD39 -086C 8131BE30 -086D 8131BE31 -086E 8131BE32 -086F 8131BE33 -0870 8131BE34 -0871 8131BE35 -0872 8131BE36 -0873 8131BE37 -0874 8131BE38 -0875 8131BE39 -0876 8131BF30 -0877 8131BF31 -0878 8131BF32 -0879 8131BF33 -087A 8131BF34 -087B 8131BF35 -087C 8131BF36 -087D 8131BF37 -087E 8131BF38 -087F 8131BF39 -0880 8131C030 -0881 8131C031 -0882 8131C032 -0883 8131C033 -0884 8131C034 -0885 8131C035 -0886 8131C036 -0887 8131C037 -0888 8131C038 -0889 8131C039 -088A 8131C130 -088B 8131C131 -088C 8131C132 -088D 8131C133 -088E 8131C134 -088F 8131C135 -0890 8131C136 -0891 8131C137 -0892 8131C138 -0893 8131C139 -0894 8131C230 -0895 8131C231 -0896 8131C232 -0897 8131C233 -0898 8131C234 -0899 8131C235 -089A 8131C236 -089B 8131C237 -089C 8131C238 -089D 8131C239 -089E 8131C330 -089F 8131C331 -08A0 8131C332 -08A1 8131C333 -08A2 8131C334 -08A3 8131C335 -08A4 8131C336 -08A5 8131C337 -08A6 8131C338 -08A7 8131C339 -08A8 8131C430 -08A9 8131C431 -08AA 8131C432 -08AB 8131C433 -08AC 8131C434 -08AD 8131C435 -08AE 8131C436 -08AF 8131C437 -08B0 8131C438 -08B1 8131C439 -08B2 8131C530 -08B3 8131C531 -08B4 8131C532 -08B5 8131C533 -08B6 8131C534 -08B7 8131C535 -08B8 8131C536 -08B9 8131C537 -08BA 8131C538 -08BB 8131C539 -08BC 8131C630 -08BD 8131C631 -08BE 8131C632 -08BF 8131C633 -08C0 8131C634 -08C1 8131C635 -08C2 8131C636 -08C3 8131C637 -08C4 8131C638 -08C5 8131C639 -08C6 8131C730 -08C7 8131C731 -08C8 8131C732 -08C9 8131C733 -08CA 8131C734 -08CB 8131C735 -08CC 8131C736 -08CD 8131C737 -08CE 8131C738 -08CF 8131C739 -08D0 8131C830 -08D1 8131C831 -08D2 8131C832 -08D3 8131C833 -08D4 8131C834 -08D5 8131C835 -08D6 8131C836 -08D7 8131C837 -08D8 8131C838 -08D9 8131C839 -08DA 8131C930 -08DB 8131C931 -08DC 8131C932 -08DD 8131C933 -08DE 8131C934 -08DF 8131C935 -08E0 8131C936 -08E1 8131C937 -08E2 8131C938 -08E3 8131C939 -08E4 8131CA30 -08E5 8131CA31 -08E6 8131CA32 -08E7 8131CA33 -08E8 8131CA34 -08E9 8131CA35 -08EA 8131CA36 -08EB 8131CA37 -08EC 8131CA38 -08ED 8131CA39 -08EE 8131CB30 -08EF 8131CB31 -08F0 8131CB32 -08F1 8131CB33 -08F2 8131CB34 -08F3 8131CB35 -08F4 8131CB36 -08F5 8131CB37 -08F6 8131CB38 -08F7 8131CB39 -08F8 8131CC30 -08F9 8131CC31 -08FA 8131CC32 -08FB 8131CC33 -08FC 8131CC34 -08FD 8131CC35 -08FE 8131CC36 -08FF 8131CC37 -0900 8131CC38 -0901 8131CC39 -0902 8131CD30 -0903 8131CD31 -0904 8131CD32 -0905 8131CD33 -0906 8131CD34 -0907 8131CD35 -0908 8131CD36 -0909 8131CD37 -090A 8131CD38 -090B 8131CD39 -090C 8131CE30 -090D 8131CE31 -090E 8131CE32 -090F 8131CE33 -0910 8131CE34 -0911 8131CE35 -0912 8131CE36 -0913 8131CE37 -0914 8131CE38 -0915 8131CE39 -0916 8131CF30 -0917 8131CF31 -0918 8131CF32 -0919 8131CF33 -091A 8131CF34 -091B 8131CF35 -091C 8131CF36 -091D 8131CF37 -091E 8131CF38 -091F 8131CF39 -0920 8131D030 -0921 8131D031 -0922 8131D032 -0923 8131D033 -0924 8131D034 -0925 8131D035 -0926 8131D036 -0927 8131D037 -0928 8131D038 -0929 8131D039 -092A 8131D130 -092B 8131D131 -092C 8131D132 -092D 8131D133 -092E 8131D134 -092F 8131D135 -0930 8131D136 -0931 8131D137 -0932 8131D138 -0933 8131D139 -0934 8131D230 -0935 8131D231 -0936 8131D232 -0937 8131D233 -0938 8131D234 -0939 8131D235 -093A 8131D236 -093B 8131D237 -093C 8131D238 -093D 8131D239 -093E 8131D330 -093F 8131D331 -0940 8131D332 -0941 8131D333 -0942 8131D334 -0943 8131D335 -0944 8131D336 -0945 8131D337 -0946 8131D338 -0947 8131D339 -0948 8131D430 -0949 8131D431 -094A 8131D432 -094B 8131D433 -094C 8131D434 -094D 8131D435 -094E 8131D436 -094F 8131D437 -0950 8131D438 -0951 8131D439 -0952 8131D530 -0953 8131D531 -0954 8131D532 -0955 8131D533 -0956 8131D534 -0957 8131D535 -0958 8131D536 -0959 8131D537 -095A 8131D538 -095B 8131D539 -095C 8131D630 -095D 8131D631 -095E 8131D632 -095F 8131D633 -0960 8131D634 -0961 8131D635 -0962 8131D636 -0963 8131D637 -0964 8131D638 -0965 8131D639 -0966 8131D730 -0967 8131D731 -0968 8131D732 -0969 8131D733 -096A 8131D734 -096B 8131D735 -096C 8131D736 -096D 8131D737 -096E 8131D738 -096F 8131D739 -0970 8131D830 -0971 8131D831 -0972 8131D832 -0973 8131D833 -0974 8131D834 -0975 8131D835 -0976 8131D836 -0977 8131D837 -0978 8131D838 -0979 8131D839 -097A 8131D930 -097B 8131D931 -097C 8131D932 -097D 8131D933 -097E 8131D934 -097F 8131D935 -0980 8131D936 -0981 8131D937 -0982 8131D938 -0983 8131D939 -0984 8131DA30 -0985 8131DA31 -0986 8131DA32 -0987 8131DA33 -0988 8131DA34 -0989 8131DA35 -098A 8131DA36 -098B 8131DA37 -098C 8131DA38 -098D 8131DA39 -098E 8131DB30 -098F 8131DB31 -0990 8131DB32 -0991 8131DB33 -0992 8131DB34 -0993 8131DB35 -0994 8131DB36 -0995 8131DB37 -0996 8131DB38 -0997 8131DB39 -0998 8131DC30 -0999 8131DC31 -099A 8131DC32 -099B 8131DC33 -099C 8131DC34 -099D 8131DC35 -099E 8131DC36 -099F 8131DC37 -09A0 8131DC38 -09A1 8131DC39 -09A2 8131DD30 -09A3 8131DD31 -09A4 8131DD32 -09A5 8131DD33 -09A6 8131DD34 -09A7 8131DD35 -09A8 8131DD36 -09A9 8131DD37 -09AA 8131DD38 -09AB 8131DD39 -09AC 8131DE30 -09AD 8131DE31 -09AE 8131DE32 -09AF 8131DE33 -09B0 8131DE34 -09B1 8131DE35 -09B2 8131DE36 -09B3 8131DE37 -09B4 8131DE38 -09B5 8131DE39 -09B6 8131DF30 -09B7 8131DF31 -09B8 8131DF32 -09B9 8131DF33 -09BA 8131DF34 -09BB 8131DF35 -09BC 8131DF36 -09BD 8131DF37 -09BE 8131DF38 -09BF 8131DF39 -09C0 8131E030 -09C1 8131E031 -09C2 8131E032 -09C3 8131E033 -09C4 8131E034 -09C5 8131E035 -09C6 8131E036 -09C7 8131E037 -09C8 8131E038 -09C9 8131E039 -09CA 8131E130 -09CB 8131E131 -09CC 8131E132 -09CD 8131E133 -09CE 8131E134 -09CF 8131E135 -09D0 8131E136 -09D1 8131E137 -09D2 8131E138 -09D3 8131E139 -09D4 8131E230 -09D5 8131E231 -09D6 8131E232 -09D7 8131E233 -09D8 8131E234 -09D9 8131E235 -09DA 8131E236 -09DB 8131E237 -09DC 8131E238 -09DD 8131E239 -09DE 8131E330 -09DF 8131E331 -09E0 8131E332 -09E1 8131E333 -09E2 8131E334 -09E3 8131E335 -09E4 8131E336 -09E5 8131E337 -09E6 8131E338 -09E7 8131E339 -09E8 8131E430 -09E9 8131E431 -09EA 8131E432 -09EB 8131E433 -09EC 8131E434 -09ED 8131E435 -09EE 8131E436 -09EF 8131E437 -09F0 8131E438 -09F1 8131E439 -09F2 8131E530 -09F3 8131E531 -09F4 8131E532 -09F5 8131E533 -09F6 8131E534 -09F7 8131E535 -09F8 8131E536 -09F9 8131E537 -09FA 8131E538 -09FB 8131E539 -09FC 8131E630 -09FD 8131E631 -09FE 8131E632 -09FF 8131E633 -0A00 8131E634 -0A01 8131E635 -0A02 8131E636 -0A03 8131E637 -0A04 8131E638 -0A05 8131E639 -0A06 8131E730 -0A07 8131E731 -0A08 8131E732 -0A09 8131E733 -0A0A 8131E734 -0A0B 8131E735 -0A0C 8131E736 -0A0D 8131E737 -0A0E 8131E738 -0A0F 8131E739 -0A10 8131E830 -0A11 8131E831 -0A12 8131E832 -0A13 8131E833 -0A14 8131E834 -0A15 8131E835 -0A16 8131E836 -0A17 8131E837 -0A18 8131E838 -0A19 8131E839 -0A1A 8131E930 -0A1B 8131E931 -0A1C 8131E932 -0A1D 8131E933 -0A1E 8131E934 -0A1F 8131E935 -0A20 8131E936 -0A21 8131E937 -0A22 8131E938 -0A23 8131E939 -0A24 8131EA30 -0A25 8131EA31 -0A26 8131EA32 -0A27 8131EA33 -0A28 8131EA34 -0A29 8131EA35 -0A2A 8131EA36 -0A2B 8131EA37 -0A2C 8131EA38 -0A2D 8131EA39 -0A2E 8131EB30 -0A2F 8131EB31 -0A30 8131EB32 -0A31 8131EB33 -0A32 8131EB34 -0A33 8131EB35 -0A34 8131EB36 -0A35 8131EB37 -0A36 8131EB38 -0A37 8131EB39 -0A38 8131EC30 -0A39 8131EC31 -0A3A 8131EC32 -0A3B 8131EC33 -0A3C 8131EC34 -0A3D 8131EC35 -0A3E 8131EC36 -0A3F 8131EC37 -0A40 8131EC38 -0A41 8131EC39 -0A42 8131ED30 -0A43 8131ED31 -0A44 8131ED32 -0A45 8131ED33 -0A46 8131ED34 -0A47 8131ED35 -0A48 8131ED36 -0A49 8131ED37 -0A4A 8131ED38 -0A4B 8131ED39 -0A4C 8131EE30 -0A4D 8131EE31 -0A4E 8131EE32 -0A4F 8131EE33 -0A50 8131EE34 -0A51 8131EE35 -0A52 8131EE36 -0A53 8131EE37 -0A54 8131EE38 -0A55 8131EE39 -0A56 8131EF30 -0A57 8131EF31 -0A58 8131EF32 -0A59 8131EF33 -0A5A 8131EF34 -0A5B 8131EF35 -0A5C 8131EF36 -0A5D 8131EF37 -0A5E 8131EF38 -0A5F 8131EF39 -0A60 8131F030 -0A61 8131F031 -0A62 8131F032 -0A63 8131F033 -0A64 8131F034 -0A65 8131F035 -0A66 8131F036 -0A67 8131F037 -0A68 8131F038 -0A69 8131F039 -0A6A 8131F130 -0A6B 8131F131 -0A6C 8131F132 -0A6D 8131F133 -0A6E 8131F134 -0A6F 8131F135 -0A70 8131F136 -0A71 8131F137 -0A72 8131F138 -0A73 8131F139 -0A74 8131F230 -0A75 8131F231 -0A76 8131F232 -0A77 8131F233 -0A78 8131F234 -0A79 8131F235 -0A7A 8131F236 -0A7B 8131F237 -0A7C 8131F238 -0A7D 8131F239 -0A7E 8131F330 -0A7F 8131F331 -0A80 8131F332 -0A81 8131F333 -0A82 8131F334 -0A83 8131F335 -0A84 8131F336 -0A85 8131F337 -0A86 8131F338 -0A87 8131F339 -0A88 8131F430 -0A89 8131F431 -0A8A 8131F432 -0A8B 8131F433 -0A8C 8131F434 -0A8D 8131F435 -0A8E 8131F436 -0A8F 8131F437 -0A90 8131F438 -0A91 8131F439 -0A92 8131F530 -0A93 8131F531 -0A94 8131F532 -0A95 8131F533 -0A96 8131F534 -0A97 8131F535 -0A98 8131F536 -0A99 8131F537 -0A9A 8131F538 -0A9B 8131F539 -0A9C 8131F630 -0A9D 8131F631 -0A9E 8131F632 -0A9F 8131F633 -0AA0 8131F634 -0AA1 8131F635 -0AA2 8131F636 -0AA3 8131F637 -0AA4 8131F638 -0AA5 8131F639 -0AA6 8131F730 -0AA7 8131F731 -0AA8 8131F732 -0AA9 8131F733 -0AAA 8131F734 -0AAB 8131F735 -0AAC 8131F736 -0AAD 8131F737 -0AAE 8131F738 -0AAF 8131F739 -0AB0 8131F830 -0AB1 8131F831 -0AB2 8131F832 -0AB3 8131F833 -0AB4 8131F834 -0AB5 8131F835 -0AB6 8131F836 -0AB7 8131F837 -0AB8 8131F838 -0AB9 8131F839 -0ABA 8131F930 -0ABB 8131F931 -0ABC 8131F932 -0ABD 8131F933 -0ABE 8131F934 -0ABF 8131F935 -0AC0 8131F936 -0AC1 8131F937 -0AC2 8131F938 -0AC3 8131F939 -0AC4 8131FA30 -0AC5 8131FA31 -0AC6 8131FA32 -0AC7 8131FA33 -0AC8 8131FA34 -0AC9 8131FA35 -0ACA 8131FA36 -0ACB 8131FA37 -0ACC 8131FA38 -0ACD 8131FA39 -0ACE 8131FB30 -0ACF 8131FB31 -0AD0 8131FB32 -0AD1 8131FB33 -0AD2 8131FB34 -0AD3 8131FB35 -0AD4 8131FB36 -0AD5 8131FB37 -0AD6 8131FB38 -0AD7 8131FB39 -0AD8 8131FC30 -0AD9 8131FC31 -0ADA 8131FC32 -0ADB 8131FC33 -0ADC 8131FC34 -0ADD 8131FC35 -0ADE 8131FC36 -0ADF 8131FC37 -0AE0 8131FC38 -0AE1 8131FC39 -0AE2 8131FD30 -0AE3 8131FD31 -0AE4 8131FD32 -0AE5 8131FD33 -0AE6 8131FD34 -0AE7 8131FD35 -0AE8 8131FD36 -0AE9 8131FD37 -0AEA 8131FD38 -0AEB 8131FD39 -0AEC 8131FE30 -0AED 8131FE31 -0AEE 8131FE32 -0AEF 8131FE33 -0AF0 8131FE34 -0AF1 8131FE35 -0AF2 8131FE36 -0AF3 8131FE37 -0AF4 8131FE38 -0AF5 8131FE39 -0AF6 81328130 -0AF7 81328131 -0AF8 81328132 -0AF9 81328133 -0AFA 81328134 -0AFB 81328135 -0AFC 81328136 -0AFD 81328137 -0AFE 81328138 -0AFF 81328139 -0B00 81328230 -0B01 81328231 -0B02 81328232 -0B03 81328233 -0B04 81328234 -0B05 81328235 -0B06 81328236 -0B07 81328237 -0B08 81328238 -0B09 81328239 -0B0A 81328330 -0B0B 81328331 -0B0C 81328332 -0B0D 81328333 -0B0E 81328334 -0B0F 81328335 -0B10 81328336 -0B11 81328337 -0B12 81328338 -0B13 81328339 -0B14 81328430 -0B15 81328431 -0B16 81328432 -0B17 81328433 -0B18 81328434 -0B19 81328435 -0B1A 81328436 -0B1B 81328437 -0B1C 81328438 -0B1D 81328439 -0B1E 81328530 -0B1F 81328531 -0B20 81328532 -0B21 81328533 -0B22 81328534 -0B23 81328535 -0B24 81328536 -0B25 81328537 -0B26 81328538 -0B27 81328539 -0B28 81328630 -0B29 81328631 -0B2A 81328632 -0B2B 81328633 -0B2C 81328634 -0B2D 81328635 -0B2E 81328636 -0B2F 81328637 -0B30 81328638 -0B31 81328639 -0B32 81328730 -0B33 81328731 -0B34 81328732 -0B35 81328733 -0B36 81328734 -0B37 81328735 -0B38 81328736 -0B39 81328737 -0B3A 81328738 -0B3B 81328739 -0B3C 81328830 -0B3D 81328831 -0B3E 81328832 -0B3F 81328833 -0B40 81328834 -0B41 81328835 -0B42 81328836 -0B43 81328837 -0B44 81328838 -0B45 81328839 -0B46 81328930 -0B47 81328931 -0B48 81328932 -0B49 81328933 -0B4A 81328934 -0B4B 81328935 -0B4C 81328936 -0B4D 81328937 -0B4E 81328938 -0B4F 81328939 -0B50 81328A30 -0B51 81328A31 -0B52 81328A32 -0B53 81328A33 -0B54 81328A34 -0B55 81328A35 -0B56 81328A36 -0B57 81328A37 -0B58 81328A38 -0B59 81328A39 -0B5A 81328B30 -0B5B 81328B31 -0B5C 81328B32 -0B5D 81328B33 -0B5E 81328B34 -0B5F 81328B35 -0B60 81328B36 -0B61 81328B37 -0B62 81328B38 -0B63 81328B39 -0B64 81328C30 -0B65 81328C31 -0B66 81328C32 -0B67 81328C33 -0B68 81328C34 -0B69 81328C35 -0B6A 81328C36 -0B6B 81328C37 -0B6C 81328C38 -0B6D 81328C39 -0B6E 81328D30 -0B6F 81328D31 -0B70 81328D32 -0B71 81328D33 -0B72 81328D34 -0B73 81328D35 -0B74 81328D36 -0B75 81328D37 -0B76 81328D38 -0B77 81328D39 -0B78 81328E30 -0B79 81328E31 -0B7A 81328E32 -0B7B 81328E33 -0B7C 81328E34 -0B7D 81328E35 -0B7E 81328E36 -0B7F 81328E37 -0B80 81328E38 -0B81 81328E39 -0B82 81328F30 -0B83 81328F31 -0B84 81328F32 -0B85 81328F33 -0B86 81328F34 -0B87 81328F35 -0B88 81328F36 -0B89 81328F37 -0B8A 81328F38 -0B8B 81328F39 -0B8C 81329030 -0B8D 81329031 -0B8E 81329032 -0B8F 81329033 -0B90 81329034 -0B91 81329035 -0B92 81329036 -0B93 81329037 -0B94 81329038 -0B95 81329039 -0B96 81329130 -0B97 81329131 -0B98 81329132 -0B99 81329133 -0B9A 81329134 -0B9B 81329135 -0B9C 81329136 -0B9D 81329137 -0B9E 81329138 -0B9F 81329139 -0BA0 81329230 -0BA1 81329231 -0BA2 81329232 -0BA3 81329233 -0BA4 81329234 -0BA5 81329235 -0BA6 81329236 -0BA7 81329237 -0BA8 81329238 -0BA9 81329239 -0BAA 81329330 -0BAB 81329331 -0BAC 81329332 -0BAD 81329333 -0BAE 81329334 -0BAF 81329335 -0BB0 81329336 -0BB1 81329337 -0BB2 81329338 -0BB3 81329339 -0BB4 81329430 -0BB5 81329431 -0BB6 81329432 -0BB7 81329433 -0BB8 81329434 -0BB9 81329435 -0BBA 81329436 -0BBB 81329437 -0BBC 81329438 -0BBD 81329439 -0BBE 81329530 -0BBF 81329531 -0BC0 81329532 -0BC1 81329533 -0BC2 81329534 -0BC3 81329535 -0BC4 81329536 -0BC5 81329537 -0BC6 81329538 -0BC7 81329539 -0BC8 81329630 -0BC9 81329631 -0BCA 81329632 -0BCB 81329633 -0BCC 81329634 -0BCD 81329635 -0BCE 81329636 -0BCF 81329637 -0BD0 81329638 -0BD1 81329639 -0BD2 81329730 -0BD3 81329731 -0BD4 81329732 -0BD5 81329733 -0BD6 81329734 -0BD7 81329735 -0BD8 81329736 -0BD9 81329737 -0BDA 81329738 -0BDB 81329739 -0BDC 81329830 -0BDD 81329831 -0BDE 81329832 -0BDF 81329833 -0BE0 81329834 -0BE1 81329835 -0BE2 81329836 -0BE3 81329837 -0BE4 81329838 -0BE5 81329839 -0BE6 81329930 -0BE7 81329931 -0BE8 81329932 -0BE9 81329933 -0BEA 81329934 -0BEB 81329935 -0BEC 81329936 -0BED 81329937 -0BEE 81329938 -0BEF 81329939 -0BF0 81329A30 -0BF1 81329A31 -0BF2 81329A32 -0BF3 81329A33 -0BF4 81329A34 -0BF5 81329A35 -0BF6 81329A36 -0BF7 81329A37 -0BF8 81329A38 -0BF9 81329A39 -0BFA 81329B30 -0BFB 81329B31 -0BFC 81329B32 -0BFD 81329B33 -0BFE 81329B34 -0BFF 81329B35 -0C00 81329B36 -0C01 81329B37 -0C02 81329B38 -0C03 81329B39 -0C04 81329C30 -0C05 81329C31 -0C06 81329C32 -0C07 81329C33 -0C08 81329C34 -0C09 81329C35 -0C0A 81329C36 -0C0B 81329C37 -0C0C 81329C38 -0C0D 81329C39 -0C0E 81329D30 -0C0F 81329D31 -0C10 81329D32 -0C11 81329D33 -0C12 81329D34 -0C13 81329D35 -0C14 81329D36 -0C15 81329D37 -0C16 81329D38 -0C17 81329D39 -0C18 81329E30 -0C19 81329E31 -0C1A 81329E32 -0C1B 81329E33 -0C1C 81329E34 -0C1D 81329E35 -0C1E 81329E36 -0C1F 81329E37 -0C20 81329E38 -0C21 81329E39 -0C22 81329F30 -0C23 81329F31 -0C24 81329F32 -0C25 81329F33 -0C26 81329F34 -0C27 81329F35 -0C28 81329F36 -0C29 81329F37 -0C2A 81329F38 -0C2B 81329F39 -0C2C 8132A030 -0C2D 8132A031 -0C2E 8132A032 -0C2F 8132A033 -0C30 8132A034 -0C31 8132A035 -0C32 8132A036 -0C33 8132A037 -0C34 8132A038 -0C35 8132A039 -0C36 8132A130 -0C37 8132A131 -0C38 8132A132 -0C39 8132A133 -0C3A 8132A134 -0C3B 8132A135 -0C3C 8132A136 -0C3D 8132A137 -0C3E 8132A138 -0C3F 8132A139 -0C40 8132A230 -0C41 8132A231 -0C42 8132A232 -0C43 8132A233 -0C44 8132A234 -0C45 8132A235 -0C46 8132A236 -0C47 8132A237 -0C48 8132A238 -0C49 8132A239 -0C4A 8132A330 -0C4B 8132A331 -0C4C 8132A332 -0C4D 8132A333 -0C4E 8132A334 -0C4F 8132A335 -0C50 8132A336 -0C51 8132A337 -0C52 8132A338 -0C53 8132A339 -0C54 8132A430 -0C55 8132A431 -0C56 8132A432 -0C57 8132A433 -0C58 8132A434 -0C59 8132A435 -0C5A 8132A436 -0C5B 8132A437 -0C5C 8132A438 -0C5D 8132A439 -0C5E 8132A530 -0C5F 8132A531 -0C60 8132A532 -0C61 8132A533 -0C62 8132A534 -0C63 8132A535 -0C64 8132A536 -0C65 8132A537 -0C66 8132A538 -0C67 8132A539 -0C68 8132A630 -0C69 8132A631 -0C6A 8132A632 -0C6B 8132A633 -0C6C 8132A634 -0C6D 8132A635 -0C6E 8132A636 -0C6F 8132A637 -0C70 8132A638 -0C71 8132A639 -0C72 8132A730 -0C73 8132A731 -0C74 8132A732 -0C75 8132A733 -0C76 8132A734 -0C77 8132A735 -0C78 8132A736 -0C79 8132A737 -0C7A 8132A738 -0C7B 8132A739 -0C7C 8132A830 -0C7D 8132A831 -0C7E 8132A832 -0C7F 8132A833 -0C80 8132A834 -0C81 8132A835 -0C82 8132A836 -0C83 8132A837 -0C84 8132A838 -0C85 8132A839 -0C86 8132A930 -0C87 8132A931 -0C88 8132A932 -0C89 8132A933 -0C8A 8132A934 -0C8B 8132A935 -0C8C 8132A936 -0C8D 8132A937 -0C8E 8132A938 -0C8F 8132A939 -0C90 8132AA30 -0C91 8132AA31 -0C92 8132AA32 -0C93 8132AA33 -0C94 8132AA34 -0C95 8132AA35 -0C96 8132AA36 -0C97 8132AA37 -0C98 8132AA38 -0C99 8132AA39 -0C9A 8132AB30 -0C9B 8132AB31 -0C9C 8132AB32 -0C9D 8132AB33 -0C9E 8132AB34 -0C9F 8132AB35 -0CA0 8132AB36 -0CA1 8132AB37 -0CA2 8132AB38 -0CA3 8132AB39 -0CA4 8132AC30 -0CA5 8132AC31 -0CA6 8132AC32 -0CA7 8132AC33 -0CA8 8132AC34 -0CA9 8132AC35 -0CAA 8132AC36 -0CAB 8132AC37 -0CAC 8132AC38 -0CAD 8132AC39 -0CAE 8132AD30 -0CAF 8132AD31 -0CB0 8132AD32 -0CB1 8132AD33 -0CB2 8132AD34 -0CB3 8132AD35 -0CB4 8132AD36 -0CB5 8132AD37 -0CB6 8132AD38 -0CB7 8132AD39 -0CB8 8132AE30 -0CB9 8132AE31 -0CBA 8132AE32 -0CBB 8132AE33 -0CBC 8132AE34 -0CBD 8132AE35 -0CBE 8132AE36 -0CBF 8132AE37 -0CC0 8132AE38 -0CC1 8132AE39 -0CC2 8132AF30 -0CC3 8132AF31 -0CC4 8132AF32 -0CC5 8132AF33 -0CC6 8132AF34 -0CC7 8132AF35 -0CC8 8132AF36 -0CC9 8132AF37 -0CCA 8132AF38 -0CCB 8132AF39 -0CCC 8132B030 -0CCD 8132B031 -0CCE 8132B032 -0CCF 8132B033 -0CD0 8132B034 -0CD1 8132B035 -0CD2 8132B036 -0CD3 8132B037 -0CD4 8132B038 -0CD5 8132B039 -0CD6 8132B130 -0CD7 8132B131 -0CD8 8132B132 -0CD9 8132B133 -0CDA 8132B134 -0CDB 8132B135 -0CDC 8132B136 -0CDD 8132B137 -0CDE 8132B138 -0CDF 8132B139 -0CE0 8132B230 -0CE1 8132B231 -0CE2 8132B232 -0CE3 8132B233 -0CE4 8132B234 -0CE5 8132B235 -0CE6 8132B236 -0CE7 8132B237 -0CE8 8132B238 -0CE9 8132B239 -0CEA 8132B330 -0CEB 8132B331 -0CEC 8132B332 -0CED 8132B333 -0CEE 8132B334 -0CEF 8132B335 -0CF0 8132B336 -0CF1 8132B337 -0CF2 8132B338 -0CF3 8132B339 -0CF4 8132B430 -0CF5 8132B431 -0CF6 8132B432 -0CF7 8132B433 -0CF8 8132B434 -0CF9 8132B435 -0CFA 8132B436 -0CFB 8132B437 -0CFC 8132B438 -0CFD 8132B439 -0CFE 8132B530 -0CFF 8132B531 -0D00 8132B532 -0D01 8132B533 -0D02 8132B534 -0D03 8132B535 -0D04 8132B536 -0D05 8132B537 -0D06 8132B538 -0D07 8132B539 -0D08 8132B630 -0D09 8132B631 -0D0A 8132B632 -0D0B 8132B633 -0D0C 8132B634 -0D0D 8132B635 -0D0E 8132B636 -0D0F 8132B637 -0D10 8132B638 -0D11 8132B639 -0D12 8132B730 -0D13 8132B731 -0D14 8132B732 -0D15 8132B733 -0D16 8132B734 -0D17 8132B735 -0D18 8132B736 -0D19 8132B737 -0D1A 8132B738 -0D1B 8132B739 -0D1C 8132B830 -0D1D 8132B831 -0D1E 8132B832 -0D1F 8132B833 -0D20 8132B834 -0D21 8132B835 -0D22 8132B836 -0D23 8132B837 -0D24 8132B838 -0D25 8132B839 -0D26 8132B930 -0D27 8132B931 -0D28 8132B932 -0D29 8132B933 -0D2A 8132B934 -0D2B 8132B935 -0D2C 8132B936 -0D2D 8132B937 -0D2E 8132B938 -0D2F 8132B939 -0D30 8132BA30 -0D31 8132BA31 -0D32 8132BA32 -0D33 8132BA33 -0D34 8132BA34 -0D35 8132BA35 -0D36 8132BA36 -0D37 8132BA37 -0D38 8132BA38 -0D39 8132BA39 -0D3A 8132BB30 -0D3B 8132BB31 -0D3C 8132BB32 -0D3D 8132BB33 -0D3E 8132BB34 -0D3F 8132BB35 -0D40 8132BB36 -0D41 8132BB37 -0D42 8132BB38 -0D43 8132BB39 -0D44 8132BC30 -0D45 8132BC31 -0D46 8132BC32 -0D47 8132BC33 -0D48 8132BC34 -0D49 8132BC35 -0D4A 8132BC36 -0D4B 8132BC37 -0D4C 8132BC38 -0D4D 8132BC39 -0D4E 8132BD30 -0D4F 8132BD31 -0D50 8132BD32 -0D51 8132BD33 -0D52 8132BD34 -0D53 8132BD35 -0D54 8132BD36 -0D55 8132BD37 -0D56 8132BD38 -0D57 8132BD39 -0D58 8132BE30 -0D59 8132BE31 -0D5A 8132BE32 -0D5B 8132BE33 -0D5C 8132BE34 -0D5D 8132BE35 -0D5E 8132BE36 -0D5F 8132BE37 -0D60 8132BE38 -0D61 8132BE39 -0D62 8132BF30 -0D63 8132BF31 -0D64 8132BF32 -0D65 8132BF33 -0D66 8132BF34 -0D67 8132BF35 -0D68 8132BF36 -0D69 8132BF37 -0D6A 8132BF38 -0D6B 8132BF39 -0D6C 8132C030 -0D6D 8132C031 -0D6E 8132C032 -0D6F 8132C033 -0D70 8132C034 -0D71 8132C035 -0D72 8132C036 -0D73 8132C037 -0D74 8132C038 -0D75 8132C039 -0D76 8132C130 -0D77 8132C131 -0D78 8132C132 -0D79 8132C133 -0D7A 8132C134 -0D7B 8132C135 -0D7C 8132C136 -0D7D 8132C137 -0D7E 8132C138 -0D7F 8132C139 -0D80 8132C230 -0D81 8132C231 -0D82 8132C232 -0D83 8132C233 -0D84 8132C234 -0D85 8132C235 -0D86 8132C236 -0D87 8132C237 -0D88 8132C238 -0D89 8132C239 -0D8A 8132C330 -0D8B 8132C331 -0D8C 8132C332 -0D8D 8132C333 -0D8E 8132C334 -0D8F 8132C335 -0D90 8132C336 -0D91 8132C337 -0D92 8132C338 -0D93 8132C339 -0D94 8132C430 -0D95 8132C431 -0D96 8132C432 -0D97 8132C433 -0D98 8132C434 -0D99 8132C435 -0D9A 8132C436 -0D9B 8132C437 -0D9C 8132C438 -0D9D 8132C439 -0D9E 8132C530 -0D9F 8132C531 -0DA0 8132C532 -0DA1 8132C533 -0DA2 8132C534 -0DA3 8132C535 -0DA4 8132C536 -0DA5 8132C537 -0DA6 8132C538 -0DA7 8132C539 -0DA8 8132C630 -0DA9 8132C631 -0DAA 8132C632 -0DAB 8132C633 -0DAC 8132C634 -0DAD 8132C635 -0DAE 8132C636 -0DAF 8132C637 -0DB0 8132C638 -0DB1 8132C639 -0DB2 8132C730 -0DB3 8132C731 -0DB4 8132C732 -0DB5 8132C733 -0DB6 8132C734 -0DB7 8132C735 -0DB8 8132C736 -0DB9 8132C737 -0DBA 8132C738 -0DBB 8132C739 -0DBC 8132C830 -0DBD 8132C831 -0DBE 8132C832 -0DBF 8132C833 -0DC0 8132C834 -0DC1 8132C835 -0DC2 8132C836 -0DC3 8132C837 -0DC4 8132C838 -0DC5 8132C839 -0DC6 8132C930 -0DC7 8132C931 -0DC8 8132C932 -0DC9 8132C933 -0DCA 8132C934 -0DCB 8132C935 -0DCC 8132C936 -0DCD 8132C937 -0DCE 8132C938 -0DCF 8132C939 -0DD0 8132CA30 -0DD1 8132CA31 -0DD2 8132CA32 -0DD3 8132CA33 -0DD4 8132CA34 -0DD5 8132CA35 -0DD6 8132CA36 -0DD7 8132CA37 -0DD8 8132CA38 -0DD9 8132CA39 -0DDA 8132CB30 -0DDB 8132CB31 -0DDC 8132CB32 -0DDD 8132CB33 -0DDE 8132CB34 -0DDF 8132CB35 -0DE0 8132CB36 -0DE1 8132CB37 -0DE2 8132CB38 -0DE3 8132CB39 -0DE4 8132CC30 -0DE5 8132CC31 -0DE6 8132CC32 -0DE7 8132CC33 -0DE8 8132CC34 -0DE9 8132CC35 -0DEA 8132CC36 -0DEB 8132CC37 -0DEC 8132CC38 -0DED 8132CC39 -0DEE 8132CD30 -0DEF 8132CD31 -0DF0 8132CD32 -0DF1 8132CD33 -0DF2 8132CD34 -0DF3 8132CD35 -0DF4 8132CD36 -0DF5 8132CD37 -0DF6 8132CD38 -0DF7 8132CD39 -0DF8 8132CE30 -0DF9 8132CE31 -0DFA 8132CE32 -0DFB 8132CE33 -0DFC 8132CE34 -0DFD 8132CE35 -0DFE 8132CE36 -0DFF 8132CE37 -0E00 8132CE38 -0E01 8132CE39 -0E02 8132CF30 -0E03 8132CF31 -0E04 8132CF32 -0E05 8132CF33 -0E06 8132CF34 -0E07 8132CF35 -0E08 8132CF36 -0E09 8132CF37 -0E0A 8132CF38 -0E0B 8132CF39 -0E0C 8132D030 -0E0D 8132D031 -0E0E 8132D032 -0E0F 8132D033 -0E10 8132D034 -0E11 8132D035 -0E12 8132D036 -0E13 8132D037 -0E14 8132D038 -0E15 8132D039 -0E16 8132D130 -0E17 8132D131 -0E18 8132D132 -0E19 8132D133 -0E1A 8132D134 -0E1B 8132D135 -0E1C 8132D136 -0E1D 8132D137 -0E1E 8132D138 -0E1F 8132D139 -0E20 8132D230 -0E21 8132D231 -0E22 8132D232 -0E23 8132D233 -0E24 8132D234 -0E25 8132D235 -0E26 8132D236 -0E27 8132D237 -0E28 8132D238 -0E29 8132D239 -0E2A 8132D330 -0E2B 8132D331 -0E2C 8132D332 -0E2D 8132D333 -0E2E 8132D334 -0E2F 8132D335 -0E30 8132D336 -0E31 8132D337 -0E32 8132D338 -0E33 8132D339 -0E34 8132D430 -0E35 8132D431 -0E36 8132D432 -0E37 8132D433 -0E38 8132D434 -0E39 8132D435 -0E3A 8132D436 -0E3B 8132D437 -0E3C 8132D438 -0E3D 8132D439 -0E3E 8132D530 -0E3F 8132D531 -0E40 8132D532 -0E41 8132D533 -0E42 8132D534 -0E43 8132D535 -0E44 8132D536 -0E45 8132D537 -0E46 8132D538 -0E47 8132D539 -0E48 8132D630 -0E49 8132D631 -0E4A 8132D632 -0E4B 8132D633 -0E4C 8132D634 -0E4D 8132D635 -0E4E 8132D636 -0E4F 8132D637 -0E50 8132D638 -0E51 8132D639 -0E52 8132D730 -0E53 8132D731 -0E54 8132D732 -0E55 8132D733 -0E56 8132D734 -0E57 8132D735 -0E58 8132D736 -0E59 8132D737 -0E5A 8132D738 -0E5B 8132D739 -0E5C 8132D830 -0E5D 8132D831 -0E5E 8132D832 -0E5F 8132D833 -0E60 8132D834 -0E61 8132D835 -0E62 8132D836 -0E63 8132D837 -0E64 8132D838 -0E65 8132D839 -0E66 8132D930 -0E67 8132D931 -0E68 8132D932 -0E69 8132D933 -0E6A 8132D934 -0E6B 8132D935 -0E6C 8132D936 -0E6D 8132D937 -0E6E 8132D938 -0E6F 8132D939 -0E70 8132DA30 -0E71 8132DA31 -0E72 8132DA32 -0E73 8132DA33 -0E74 8132DA34 -0E75 8132DA35 -0E76 8132DA36 -0E77 8132DA37 -0E78 8132DA38 -0E79 8132DA39 -0E7A 8132DB30 -0E7B 8132DB31 -0E7C 8132DB32 -0E7D 8132DB33 -0E7E 8132DB34 -0E7F 8132DB35 -0E80 8132DB36 -0E81 8132DB37 -0E82 8132DB38 -0E83 8132DB39 -0E84 8132DC30 -0E85 8132DC31 -0E86 8132DC32 -0E87 8132DC33 -0E88 8132DC34 -0E89 8132DC35 -0E8A 8132DC36 -0E8B 8132DC37 -0E8C 8132DC38 -0E8D 8132DC39 -0E8E 8132DD30 -0E8F 8132DD31 -0E90 8132DD32 -0E91 8132DD33 -0E92 8132DD34 -0E93 8132DD35 -0E94 8132DD36 -0E95 8132DD37 -0E96 8132DD38 -0E97 8132DD39 -0E98 8132DE30 -0E99 8132DE31 -0E9A 8132DE32 -0E9B 8132DE33 -0E9C 8132DE34 -0E9D 8132DE35 -0E9E 8132DE36 -0E9F 8132DE37 -0EA0 8132DE38 -0EA1 8132DE39 -0EA2 8132DF30 -0EA3 8132DF31 -0EA4 8132DF32 -0EA5 8132DF33 -0EA6 8132DF34 -0EA7 8132DF35 -0EA8 8132DF36 -0EA9 8132DF37 -0EAA 8132DF38 -0EAB 8132DF39 -0EAC 8132E030 -0EAD 8132E031 -0EAE 8132E032 -0EAF 8132E033 -0EB0 8132E034 -0EB1 8132E035 -0EB2 8132E036 -0EB3 8132E037 -0EB4 8132E038 -0EB5 8132E039 -0EB6 8132E130 -0EB7 8132E131 -0EB8 8132E132 -0EB9 8132E133 -0EBA 8132E134 -0EBB 8132E135 -0EBC 8132E136 -0EBD 8132E137 -0EBE 8132E138 -0EBF 8132E139 -0EC0 8132E230 -0EC1 8132E231 -0EC2 8132E232 -0EC3 8132E233 -0EC4 8132E234 -0EC5 8132E235 -0EC6 8132E236 -0EC7 8132E237 -0EC8 8132E238 -0EC9 8132E239 -0ECA 8132E330 -0ECB 8132E331 -0ECC 8132E332 -0ECD 8132E333 -0ECE 8132E334 -0ECF 8132E335 -0ED0 8132E336 -0ED1 8132E337 -0ED2 8132E338 -0ED3 8132E339 -0ED4 8132E430 -0ED5 8132E431 -0ED6 8132E432 -0ED7 8132E433 -0ED8 8132E434 -0ED9 8132E435 -0EDA 8132E436 -0EDB 8132E437 -0EDC 8132E438 -0EDD 8132E439 -0EDE 8132E530 -0EDF 8132E531 -0EE0 8132E532 -0EE1 8132E533 -0EE2 8132E534 -0EE3 8132E535 -0EE4 8132E536 -0EE5 8132E537 -0EE6 8132E538 -0EE7 8132E539 -0EE8 8132E630 -0EE9 8132E631 -0EEA 8132E632 -0EEB 8132E633 -0EEC 8132E634 -0EED 8132E635 -0EEE 8132E636 -0EEF 8132E637 -0EF0 8132E638 -0EF1 8132E639 -0EF2 8132E730 -0EF3 8132E731 -0EF4 8132E732 -0EF5 8132E733 -0EF6 8132E734 -0EF7 8132E735 -0EF8 8132E736 -0EF9 8132E737 -0EFA 8132E738 -0EFB 8132E739 -0EFC 8132E830 -0EFD 8132E831 -0EFE 8132E832 -0EFF 8132E833 -0F00 8132E834 -0F01 8132E835 -0F02 8132E836 -0F03 8132E837 -0F04 8132E838 -0F05 8132E839 -0F06 8132E930 -0F07 8132E931 -0F08 8132E932 -0F09 8132E933 -0F0A 8132E934 -0F0B 8132E935 -0F0C 8132E936 -0F0D 8132E937 -0F0E 8132E938 -0F0F 8132E939 -0F10 8132EA30 -0F11 8132EA31 -0F12 8132EA32 -0F13 8132EA33 -0F14 8132EA34 -0F15 8132EA35 -0F16 8132EA36 -0F17 8132EA37 -0F18 8132EA38 -0F19 8132EA39 -0F1A 8132EB30 -0F1B 8132EB31 -0F1C 8132EB32 -0F1D 8132EB33 -0F1E 8132EB34 -0F1F 8132EB35 -0F20 8132EB36 -0F21 8132EB37 -0F22 8132EB38 -0F23 8132EB39 -0F24 8132EC30 -0F25 8132EC31 -0F26 8132EC32 -0F27 8132EC33 -0F28 8132EC34 -0F29 8132EC35 -0F2A 8132EC36 -0F2B 8132EC37 -0F2C 8132EC38 -0F2D 8132EC39 -0F2E 8132ED30 -0F2F 8132ED31 -0F30 8132ED32 -0F31 8132ED33 -0F32 8132ED34 -0F33 8132ED35 -0F34 8132ED36 -0F35 8132ED37 -0F36 8132ED38 -0F37 8132ED39 -0F38 8132EE30 -0F39 8132EE31 -0F3A 8132EE32 -0F3B 8132EE33 -0F3C 8132EE34 -0F3D 8132EE35 -0F3E 8132EE36 -0F3F 8132EE37 -0F40 8132EE38 -0F41 8132EE39 -0F42 8132EF30 -0F43 8132EF31 -0F44 8132EF32 -0F45 8132EF33 -0F46 8132EF34 -0F47 8132EF35 -0F48 8132EF36 -0F49 8132EF37 -0F4A 8132EF38 -0F4B 8132EF39 -0F4C 8132F030 -0F4D 8132F031 -0F4E 8132F032 -0F4F 8132F033 -0F50 8132F034 -0F51 8132F035 -0F52 8132F036 -0F53 8132F037 -0F54 8132F038 -0F55 8132F039 -0F56 8132F130 -0F57 8132F131 -0F58 8132F132 -0F59 8132F133 -0F5A 8132F134 -0F5B 8132F135 -0F5C 8132F136 -0F5D 8132F137 -0F5E 8132F138 -0F5F 8132F139 -0F60 8132F230 -0F61 8132F231 -0F62 8132F232 -0F63 8132F233 -0F64 8132F234 -0F65 8132F235 -0F66 8132F236 -0F67 8132F237 -0F68 8132F238 -0F69 8132F239 -0F6A 8132F330 -0F6B 8132F331 -0F6C 8132F332 -0F6D 8132F333 -0F6E 8132F334 -0F6F 8132F335 -0F70 8132F336 -0F71 8132F337 -0F72 8132F338 -0F73 8132F339 -0F74 8132F430 -0F75 8132F431 -0F76 8132F432 -0F77 8132F433 -0F78 8132F434 -0F79 8132F435 -0F7A 8132F436 -0F7B 8132F437 -0F7C 8132F438 -0F7D 8132F439 -0F7E 8132F530 -0F7F 8132F531 -0F80 8132F532 -0F81 8132F533 -0F82 8132F534 -0F83 8132F535 -0F84 8132F536 -0F85 8132F537 -0F86 8132F538 -0F87 8132F539 -0F88 8132F630 -0F89 8132F631 -0F8A 8132F632 -0F8B 8132F633 -0F8C 8132F634 -0F8D 8132F635 -0F8E 8132F636 -0F8F 8132F637 -0F90 8132F638 -0F91 8132F639 -0F92 8132F730 -0F93 8132F731 -0F94 8132F732 -0F95 8132F733 -0F96 8132F734 -0F97 8132F735 -0F98 8132F736 -0F99 8132F737 -0F9A 8132F738 -0F9B 8132F739 -0F9C 8132F830 -0F9D 8132F831 -0F9E 8132F832 -0F9F 8132F833 -0FA0 8132F834 -0FA1 8132F835 -0FA2 8132F836 -0FA3 8132F837 -0FA4 8132F838 -0FA5 8132F839 -0FA6 8132F930 -0FA7 8132F931 -0FA8 8132F932 -0FA9 8132F933 -0FAA 8132F934 -0FAB 8132F935 -0FAC 8132F936 -0FAD 8132F937 -0FAE 8132F938 -0FAF 8132F939 -0FB0 8132FA30 -0FB1 8132FA31 -0FB2 8132FA32 -0FB3 8132FA33 -0FB4 8132FA34 -0FB5 8132FA35 -0FB6 8132FA36 -0FB7 8132FA37 -0FB8 8132FA38 -0FB9 8132FA39 -0FBA 8132FB30 -0FBB 8132FB31 -0FBC 8132FB32 -0FBD 8132FB33 -0FBE 8132FB34 -0FBF 8132FB35 -0FC0 8132FB36 -0FC1 8132FB37 -0FC2 8132FB38 -0FC3 8132FB39 -0FC4 8132FC30 -0FC5 8132FC31 -0FC6 8132FC32 -0FC7 8132FC33 -0FC8 8132FC34 -0FC9 8132FC35 -0FCA 8132FC36 -0FCB 8132FC37 -0FCC 8132FC38 -0FCD 8132FC39 -0FCE 8132FD30 -0FCF 8132FD31 -0FD0 8132FD32 -0FD1 8132FD33 -0FD2 8132FD34 -0FD3 8132FD35 -0FD4 8132FD36 -0FD5 8132FD37 -0FD6 8132FD38 -0FD7 8132FD39 -0FD8 8132FE30 -0FD9 8132FE31 -0FDA 8132FE32 -0FDB 8132FE33 -0FDC 8132FE34 -0FDD 8132FE35 -0FDE 8132FE36 -0FDF 8132FE37 -0FE0 8132FE38 -0FE1 8132FE39 -0FE2 81338130 -0FE3 81338131 -0FE4 81338132 -0FE5 81338133 -0FE6 81338134 -0FE7 81338135 -0FE8 81338136 -0FE9 81338137 -0FEA 81338138 -0FEB 81338139 -0FEC 81338230 -0FED 81338231 -0FEE 81338232 -0FEF 81338233 -0FF0 81338234 -0FF1 81338235 -0FF2 81338236 -0FF3 81338237 -0FF4 81338238 -0FF5 81338239 -0FF6 81338330 -0FF7 81338331 -0FF8 81338332 -0FF9 81338333 -0FFA 81338334 -0FFB 81338335 -0FFC 81338336 -0FFD 81338337 -0FFE 81338338 -0FFF 81338339 -1000 81338430 -1001 81338431 -1002 81338432 -1003 81338433 -1004 81338434 -1005 81338435 -1006 81338436 -1007 81338437 -1008 81338438 -1009 81338439 -100A 81338530 -100B 81338531 -100C 81338532 -100D 81338533 -100E 81338534 -100F 81338535 -1010 81338536 -1011 81338537 -1012 81338538 -1013 81338539 -1014 81338630 -1015 81338631 -1016 81338632 -1017 81338633 -1018 81338634 -1019 81338635 -101A 81338636 -101B 81338637 -101C 81338638 -101D 81338639 -101E 81338730 -101F 81338731 -1020 81338732 -1021 81338733 -1022 81338734 -1023 81338735 -1024 81338736 -1025 81338737 -1026 81338738 -1027 81338739 -1028 81338830 -1029 81338831 -102A 81338832 -102B 81338833 -102C 81338834 -102D 81338835 -102E 81338836 -102F 81338837 -1030 81338838 -1031 81338839 -1032 81338930 -1033 81338931 -1034 81338932 -1035 81338933 -1036 81338934 -1037 81338935 -1038 81338936 -1039 81338937 -103A 81338938 -103B 81338939 -103C 81338A30 -103D 81338A31 -103E 81338A32 -103F 81338A33 -1040 81338A34 -1041 81338A35 -1042 81338A36 -1043 81338A37 -1044 81338A38 -1045 81338A39 -1046 81338B30 -1047 81338B31 -1048 81338B32 -1049 81338B33 -104A 81338B34 -104B 81338B35 -104C 81338B36 -104D 81338B37 -104E 81338B38 -104F 81338B39 -1050 81338C30 -1051 81338C31 -1052 81338C32 -1053 81338C33 -1054 81338C34 -1055 81338C35 -1056 81338C36 -1057 81338C37 -1058 81338C38 -1059 81338C39 -105A 81338D30 -105B 81338D31 -105C 81338D32 -105D 81338D33 -105E 81338D34 -105F 81338D35 -1060 81338D36 -1061 81338D37 -1062 81338D38 -1063 81338D39 -1064 81338E30 -1065 81338E31 -1066 81338E32 -1067 81338E33 -1068 81338E34 -1069 81338E35 -106A 81338E36 -106B 81338E37 -106C 81338E38 -106D 81338E39 -106E 81338F30 -106F 81338F31 -1070 81338F32 -1071 81338F33 -1072 81338F34 -1073 81338F35 -1074 81338F36 -1075 81338F37 -1076 81338F38 -1077 81338F39 -1078 81339030 -1079 81339031 -107A 81339032 -107B 81339033 -107C 81339034 -107D 81339035 -107E 81339036 -107F 81339037 -1080 81339038 -1081 81339039 -1082 81339130 -1083 81339131 -1084 81339132 -1085 81339133 -1086 81339134 -1087 81339135 -1088 81339136 -1089 81339137 -108A 81339138 -108B 81339139 -108C 81339230 -108D 81339231 -108E 81339232 -108F 81339233 -1090 81339234 -1091 81339235 -1092 81339236 -1093 81339237 -1094 81339238 -1095 81339239 -1096 81339330 -1097 81339331 -1098 81339332 -1099 81339333 -109A 81339334 -109B 81339335 -109C 81339336 -109D 81339337 -109E 81339338 -109F 81339339 -10A0 81339430 -10A1 81339431 -10A2 81339432 -10A3 81339433 -10A4 81339434 -10A5 81339435 -10A6 81339436 -10A7 81339437 -10A8 81339438 -10A9 81339439 -10AA 81339530 -10AB 81339531 -10AC 81339532 -10AD 81339533 -10AE 81339534 -10AF 81339535 -10B0 81339536 -10B1 81339537 -10B2 81339538 -10B3 81339539 -10B4 81339630 -10B5 81339631 -10B6 81339632 -10B7 81339633 -10B8 81339634 -10B9 81339635 -10BA 81339636 -10BB 81339637 -10BC 81339638 -10BD 81339639 -10BE 81339730 -10BF 81339731 -10C0 81339732 -10C1 81339733 -10C2 81339734 -10C3 81339735 -10C4 81339736 -10C5 81339737 -10C6 81339738 -10C7 81339739 -10C8 81339830 -10C9 81339831 -10CA 81339832 -10CB 81339833 -10CC 81339834 -10CD 81339835 -10CE 81339836 -10CF 81339837 -10D0 81339838 -10D1 81339839 -10D2 81339930 -10D3 81339931 -10D4 81339932 -10D5 81339933 -10D6 81339934 -10D7 81339935 -10D8 81339936 -10D9 81339937 -10DA 81339938 -10DB 81339939 -10DC 81339A30 -10DD 81339A31 -10DE 81339A32 -10DF 81339A33 -10E0 81339A34 -10E1 81339A35 -10E2 81339A36 -10E3 81339A37 -10E4 81339A38 -10E5 81339A39 -10E6 81339B30 -10E7 81339B31 -10E8 81339B32 -10E9 81339B33 -10EA 81339B34 -10EB 81339B35 -10EC 81339B36 -10ED 81339B37 -10EE 81339B38 -10EF 81339B39 -10F0 81339C30 -10F1 81339C31 -10F2 81339C32 -10F3 81339C33 -10F4 81339C34 -10F5 81339C35 -10F6 81339C36 -10F7 81339C37 -10F8 81339C38 -10F9 81339C39 -10FA 81339D30 -10FB 81339D31 -10FC 81339D32 -10FD 81339D33 -10FE 81339D34 -10FF 81339D35 -1100 81339D36 -1101 81339D37 -1102 81339D38 -1103 81339D39 -1104 81339E30 -1105 81339E31 -1106 81339E32 -1107 81339E33 -1108 81339E34 -1109 81339E35 -110A 81339E36 -110B 81339E37 -110C 81339E38 -110D 81339E39 -110E 81339F30 -110F 81339F31 -1110 81339F32 -1111 81339F33 -1112 81339F34 -1113 81339F35 -1114 81339F36 -1115 81339F37 -1116 81339F38 -1117 81339F39 -1118 8133A030 -1119 8133A031 -111A 8133A032 -111B 8133A033 -111C 8133A034 -111D 8133A035 -111E 8133A036 -111F 8133A037 -1120 8133A038 -1121 8133A039 -1122 8133A130 -1123 8133A131 -1124 8133A132 -1125 8133A133 -1126 8133A134 -1127 8133A135 -1128 8133A136 -1129 8133A137 -112A 8133A138 -112B 8133A139 -112C 8133A230 -112D 8133A231 -112E 8133A232 -112F 8133A233 -1130 8133A234 -1131 8133A235 -1132 8133A236 -1133 8133A237 -1134 8133A238 -1135 8133A239 -1136 8133A330 -1137 8133A331 -1138 8133A332 -1139 8133A333 -113A 8133A334 -113B 8133A335 -113C 8133A336 -113D 8133A337 -113E 8133A338 -113F 8133A339 -1140 8133A430 -1141 8133A431 -1142 8133A432 -1143 8133A433 -1144 8133A434 -1145 8133A435 -1146 8133A436 -1147 8133A437 -1148 8133A438 -1149 8133A439 -114A 8133A530 -114B 8133A531 -114C 8133A532 -114D 8133A533 -114E 8133A534 -114F 8133A535 -1150 8133A536 -1151 8133A537 -1152 8133A538 -1153 8133A539 -1154 8133A630 -1155 8133A631 -1156 8133A632 -1157 8133A633 -1158 8133A634 -1159 8133A635 -115A 8133A636 -115B 8133A637 -115C 8133A638 -115D 8133A639 -115E 8133A730 -115F 8133A731 -1160 8133A732 -1161 8133A733 -1162 8133A734 -1163 8133A735 -1164 8133A736 -1165 8133A737 -1166 8133A738 -1167 8133A739 -1168 8133A830 -1169 8133A831 -116A 8133A832 -116B 8133A833 -116C 8133A834 -116D 8133A835 -116E 8133A836 -116F 8133A837 -1170 8133A838 -1171 8133A839 -1172 8133A930 -1173 8133A931 -1174 8133A932 -1175 8133A933 -1176 8133A934 -1177 8133A935 -1178 8133A936 -1179 8133A937 -117A 8133A938 -117B 8133A939 -117C 8133AA30 -117D 8133AA31 -117E 8133AA32 -117F 8133AA33 -1180 8133AA34 -1181 8133AA35 -1182 8133AA36 -1183 8133AA37 -1184 8133AA38 -1185 8133AA39 -1186 8133AB30 -1187 8133AB31 -1188 8133AB32 -1189 8133AB33 -118A 8133AB34 -118B 8133AB35 -118C 8133AB36 -118D 8133AB37 -118E 8133AB38 -118F 8133AB39 -1190 8133AC30 -1191 8133AC31 -1192 8133AC32 -1193 8133AC33 -1194 8133AC34 -1195 8133AC35 -1196 8133AC36 -1197 8133AC37 -1198 8133AC38 -1199 8133AC39 -119A 8133AD30 -119B 8133AD31 -119C 8133AD32 -119D 8133AD33 -119E 8133AD34 -119F 8133AD35 -11A0 8133AD36 -11A1 8133AD37 -11A2 8133AD38 -11A3 8133AD39 -11A4 8133AE30 -11A5 8133AE31 -11A6 8133AE32 -11A7 8133AE33 -11A8 8133AE34 -11A9 8133AE35 -11AA 8133AE36 -11AB 8133AE37 -11AC 8133AE38 -11AD 8133AE39 -11AE 8133AF30 -11AF 8133AF31 -11B0 8133AF32 -11B1 8133AF33 -11B2 8133AF34 -11B3 8133AF35 -11B4 8133AF36 -11B5 8133AF37 -11B6 8133AF38 -11B7 8133AF39 -11B8 8133B030 -11B9 8133B031 -11BA 8133B032 -11BB 8133B033 -11BC 8133B034 -11BD 8133B035 -11BE 8133B036 -11BF 8133B037 -11C0 8133B038 -11C1 8133B039 -11C2 8133B130 -11C3 8133B131 -11C4 8133B132 -11C5 8133B133 -11C6 8133B134 -11C7 8133B135 -11C8 8133B136 -11C9 8133B137 -11CA 8133B138 -11CB 8133B139 -11CC 8133B230 -11CD 8133B231 -11CE 8133B232 -11CF 8133B233 -11D0 8133B234 -11D1 8133B235 -11D2 8133B236 -11D3 8133B237 -11D4 8133B238 -11D5 8133B239 -11D6 8133B330 -11D7 8133B331 -11D8 8133B332 -11D9 8133B333 -11DA 8133B334 -11DB 8133B335 -11DC 8133B336 -11DD 8133B337 -11DE 8133B338 -11DF 8133B339 -11E0 8133B430 -11E1 8133B431 -11E2 8133B432 -11E3 8133B433 -11E4 8133B434 -11E5 8133B435 -11E6 8133B436 -11E7 8133B437 -11E8 8133B438 -11E9 8133B439 -11EA 8133B530 -11EB 8133B531 -11EC 8133B532 -11ED 8133B533 -11EE 8133B534 -11EF 8133B535 -11F0 8133B536 -11F1 8133B537 -11F2 8133B538 -11F3 8133B539 -11F4 8133B630 -11F5 8133B631 -11F6 8133B632 -11F7 8133B633 -11F8 8133B634 -11F9 8133B635 -11FA 8133B636 -11FB 8133B637 -11FC 8133B638 -11FD 8133B639 -11FE 8133B730 -11FF 8133B731 -1200 8133B732 -1201 8133B733 -1202 8133B734 -1203 8133B735 -1204 8133B736 -1205 8133B737 -1206 8133B738 -1207 8133B739 -1208 8133B830 -1209 8133B831 -120A 8133B832 -120B 8133B833 -120C 8133B834 -120D 8133B835 -120E 8133B836 -120F 8133B837 -1210 8133B838 -1211 8133B839 -1212 8133B930 -1213 8133B931 -1214 8133B932 -1215 8133B933 -1216 8133B934 -1217 8133B935 -1218 8133B936 -1219 8133B937 -121A 8133B938 -121B 8133B939 -121C 8133BA30 -121D 8133BA31 -121E 8133BA32 -121F 8133BA33 -1220 8133BA34 -1221 8133BA35 -1222 8133BA36 -1223 8133BA37 -1224 8133BA38 -1225 8133BA39 -1226 8133BB30 -1227 8133BB31 -1228 8133BB32 -1229 8133BB33 -122A 8133BB34 -122B 8133BB35 -122C 8133BB36 -122D 8133BB37 -122E 8133BB38 -122F 8133BB39 -1230 8133BC30 -1231 8133BC31 -1232 8133BC32 -1233 8133BC33 -1234 8133BC34 -1235 8133BC35 -1236 8133BC36 -1237 8133BC37 -1238 8133BC38 -1239 8133BC39 -123A 8133BD30 -123B 8133BD31 -123C 8133BD32 -123D 8133BD33 -123E 8133BD34 -123F 8133BD35 -1240 8133BD36 -1241 8133BD37 -1242 8133BD38 -1243 8133BD39 -1244 8133BE30 -1245 8133BE31 -1246 8133BE32 -1247 8133BE33 -1248 8133BE34 -1249 8133BE35 -124A 8133BE36 -124B 8133BE37 -124C 8133BE38 -124D 8133BE39 -124E 8133BF30 -124F 8133BF31 -1250 8133BF32 -1251 8133BF33 -1252 8133BF34 -1253 8133BF35 -1254 8133BF36 -1255 8133BF37 -1256 8133BF38 -1257 8133BF39 -1258 8133C030 -1259 8133C031 -125A 8133C032 -125B 8133C033 -125C 8133C034 -125D 8133C035 -125E 8133C036 -125F 8133C037 -1260 8133C038 -1261 8133C039 -1262 8133C130 -1263 8133C131 -1264 8133C132 -1265 8133C133 -1266 8133C134 -1267 8133C135 -1268 8133C136 -1269 8133C137 -126A 8133C138 -126B 8133C139 -126C 8133C230 -126D 8133C231 -126E 8133C232 -126F 8133C233 -1270 8133C234 -1271 8133C235 -1272 8133C236 -1273 8133C237 -1274 8133C238 -1275 8133C239 -1276 8133C330 -1277 8133C331 -1278 8133C332 -1279 8133C333 -127A 8133C334 -127B 8133C335 -127C 8133C336 -127D 8133C337 -127E 8133C338 -127F 8133C339 -1280 8133C430 -1281 8133C431 -1282 8133C432 -1283 8133C433 -1284 8133C434 -1285 8133C435 -1286 8133C436 -1287 8133C437 -1288 8133C438 -1289 8133C439 -128A 8133C530 -128B 8133C531 -128C 8133C532 -128D 8133C533 -128E 8133C534 -128F 8133C535 -1290 8133C536 -1291 8133C537 -1292 8133C538 -1293 8133C539 -1294 8133C630 -1295 8133C631 -1296 8133C632 -1297 8133C633 -1298 8133C634 -1299 8133C635 -129A 8133C636 -129B 8133C637 -129C 8133C638 -129D 8133C639 -129E 8133C730 -129F 8133C731 -12A0 8133C732 -12A1 8133C733 -12A2 8133C734 -12A3 8133C735 -12A4 8133C736 -12A5 8133C737 -12A6 8133C738 -12A7 8133C739 -12A8 8133C830 -12A9 8133C831 -12AA 8133C832 -12AB 8133C833 -12AC 8133C834 -12AD 8133C835 -12AE 8133C836 -12AF 8133C837 -12B0 8133C838 -12B1 8133C839 -12B2 8133C930 -12B3 8133C931 -12B4 8133C932 -12B5 8133C933 -12B6 8133C934 -12B7 8133C935 -12B8 8133C936 -12B9 8133C937 -12BA 8133C938 -12BB 8133C939 -12BC 8133CA30 -12BD 8133CA31 -12BE 8133CA32 -12BF 8133CA33 -12C0 8133CA34 -12C1 8133CA35 -12C2 8133CA36 -12C3 8133CA37 -12C4 8133CA38 -12C5 8133CA39 -12C6 8133CB30 -12C7 8133CB31 -12C8 8133CB32 -12C9 8133CB33 -12CA 8133CB34 -12CB 8133CB35 -12CC 8133CB36 -12CD 8133CB37 -12CE 8133CB38 -12CF 8133CB39 -12D0 8133CC30 -12D1 8133CC31 -12D2 8133CC32 -12D3 8133CC33 -12D4 8133CC34 -12D5 8133CC35 -12D6 8133CC36 -12D7 8133CC37 -12D8 8133CC38 -12D9 8133CC39 -12DA 8133CD30 -12DB 8133CD31 -12DC 8133CD32 -12DD 8133CD33 -12DE 8133CD34 -12DF 8133CD35 -12E0 8133CD36 -12E1 8133CD37 -12E2 8133CD38 -12E3 8133CD39 -12E4 8133CE30 -12E5 8133CE31 -12E6 8133CE32 -12E7 8133CE33 -12E8 8133CE34 -12E9 8133CE35 -12EA 8133CE36 -12EB 8133CE37 -12EC 8133CE38 -12ED 8133CE39 -12EE 8133CF30 -12EF 8133CF31 -12F0 8133CF32 -12F1 8133CF33 -12F2 8133CF34 -12F3 8133CF35 -12F4 8133CF36 -12F5 8133CF37 -12F6 8133CF38 -12F7 8133CF39 -12F8 8133D030 -12F9 8133D031 -12FA 8133D032 -12FB 8133D033 -12FC 8133D034 -12FD 8133D035 -12FE 8133D036 -12FF 8133D037 -1300 8133D038 -1301 8133D039 -1302 8133D130 -1303 8133D131 -1304 8133D132 -1305 8133D133 -1306 8133D134 -1307 8133D135 -1308 8133D136 -1309 8133D137 -130A 8133D138 -130B 8133D139 -130C 8133D230 -130D 8133D231 -130E 8133D232 -130F 8133D233 -1310 8133D234 -1311 8133D235 -1312 8133D236 -1313 8133D237 -1314 8133D238 -1315 8133D239 -1316 8133D330 -1317 8133D331 -1318 8133D332 -1319 8133D333 -131A 8133D334 -131B 8133D335 -131C 8133D336 -131D 8133D337 -131E 8133D338 -131F 8133D339 -1320 8133D430 -1321 8133D431 -1322 8133D432 -1323 8133D433 -1324 8133D434 -1325 8133D435 -1326 8133D436 -1327 8133D437 -1328 8133D438 -1329 8133D439 -132A 8133D530 -132B 8133D531 -132C 8133D532 -132D 8133D533 -132E 8133D534 -132F 8133D535 -1330 8133D536 -1331 8133D537 -1332 8133D538 -1333 8133D539 -1334 8133D630 -1335 8133D631 -1336 8133D632 -1337 8133D633 -1338 8133D634 -1339 8133D635 -133A 8133D636 -133B 8133D637 -133C 8133D638 -133D 8133D639 -133E 8133D730 -133F 8133D731 -1340 8133D732 -1341 8133D733 -1342 8133D734 -1343 8133D735 -1344 8133D736 -1345 8133D737 -1346 8133D738 -1347 8133D739 -1348 8133D830 -1349 8133D831 -134A 8133D832 -134B 8133D833 -134C 8133D834 -134D 8133D835 -134E 8133D836 -134F 8133D837 -1350 8133D838 -1351 8133D839 -1352 8133D930 -1353 8133D931 -1354 8133D932 -1355 8133D933 -1356 8133D934 -1357 8133D935 -1358 8133D936 -1359 8133D937 -135A 8133D938 -135B 8133D939 -135C 8133DA30 -135D 8133DA31 -135E 8133DA32 -135F 8133DA33 -1360 8133DA34 -1361 8133DA35 -1362 8133DA36 -1363 8133DA37 -1364 8133DA38 -1365 8133DA39 -1366 8133DB30 -1367 8133DB31 -1368 8133DB32 -1369 8133DB33 -136A 8133DB34 -136B 8133DB35 -136C 8133DB36 -136D 8133DB37 -136E 8133DB38 -136F 8133DB39 -1370 8133DC30 -1371 8133DC31 -1372 8133DC32 -1373 8133DC33 -1374 8133DC34 -1375 8133DC35 -1376 8133DC36 -1377 8133DC37 -1378 8133DC38 -1379 8133DC39 -137A 8133DD30 -137B 8133DD31 -137C 8133DD32 -137D 8133DD33 -137E 8133DD34 -137F 8133DD35 -1380 8133DD36 -1381 8133DD37 -1382 8133DD38 -1383 8133DD39 -1384 8133DE30 -1385 8133DE31 -1386 8133DE32 -1387 8133DE33 -1388 8133DE34 -1389 8133DE35 -138A 8133DE36 -138B 8133DE37 -138C 8133DE38 -138D 8133DE39 -138E 8133DF30 -138F 8133DF31 -1390 8133DF32 -1391 8133DF33 -1392 8133DF34 -1393 8133DF35 -1394 8133DF36 -1395 8133DF37 -1396 8133DF38 -1397 8133DF39 -1398 8133E030 -1399 8133E031 -139A 8133E032 -139B 8133E033 -139C 8133E034 -139D 8133E035 -139E 8133E036 -139F 8133E037 -13A0 8133E038 -13A1 8133E039 -13A2 8133E130 -13A3 8133E131 -13A4 8133E132 -13A5 8133E133 -13A6 8133E134 -13A7 8133E135 -13A8 8133E136 -13A9 8133E137 -13AA 8133E138 -13AB 8133E139 -13AC 8133E230 -13AD 8133E231 -13AE 8133E232 -13AF 8133E233 -13B0 8133E234 -13B1 8133E235 -13B2 8133E236 -13B3 8133E237 -13B4 8133E238 -13B5 8133E239 -13B6 8133E330 -13B7 8133E331 -13B8 8133E332 -13B9 8133E333 -13BA 8133E334 -13BB 8133E335 -13BC 8133E336 -13BD 8133E337 -13BE 8133E338 -13BF 8133E339 -13C0 8133E430 -13C1 8133E431 -13C2 8133E432 -13C3 8133E433 -13C4 8133E434 -13C5 8133E435 -13C6 8133E436 -13C7 8133E437 -13C8 8133E438 -13C9 8133E439 -13CA 8133E530 -13CB 8133E531 -13CC 8133E532 -13CD 8133E533 -13CE 8133E534 -13CF 8133E535 -13D0 8133E536 -13D1 8133E537 -13D2 8133E538 -13D3 8133E539 -13D4 8133E630 -13D5 8133E631 -13D6 8133E632 -13D7 8133E633 -13D8 8133E634 -13D9 8133E635 -13DA 8133E636 -13DB 8133E637 -13DC 8133E638 -13DD 8133E639 -13DE 8133E730 -13DF 8133E731 -13E0 8133E732 -13E1 8133E733 -13E2 8133E734 -13E3 8133E735 -13E4 8133E736 -13E5 8133E737 -13E6 8133E738 -13E7 8133E739 -13E8 8133E830 -13E9 8133E831 -13EA 8133E832 -13EB 8133E833 -13EC 8133E834 -13ED 8133E835 -13EE 8133E836 -13EF 8133E837 -13F0 8133E838 -13F1 8133E839 -13F2 8133E930 -13F3 8133E931 -13F4 8133E932 -13F5 8133E933 -13F6 8133E934 -13F7 8133E935 -13F8 8133E936 -13F9 8133E937 -13FA 8133E938 -13FB 8133E939 -13FC 8133EA30 -13FD 8133EA31 -13FE 8133EA32 -13FF 8133EA33 -1400 8133EA34 -1401 8133EA35 -1402 8133EA36 -1403 8133EA37 -1404 8133EA38 -1405 8133EA39 -1406 8133EB30 -1407 8133EB31 -1408 8133EB32 -1409 8133EB33 -140A 8133EB34 -140B 8133EB35 -140C 8133EB36 -140D 8133EB37 -140E 8133EB38 -140F 8133EB39 -1410 8133EC30 -1411 8133EC31 -1412 8133EC32 -1413 8133EC33 -1414 8133EC34 -1415 8133EC35 -1416 8133EC36 -1417 8133EC37 -1418 8133EC38 -1419 8133EC39 -141A 8133ED30 -141B 8133ED31 -141C 8133ED32 -141D 8133ED33 -141E 8133ED34 -141F 8133ED35 -1420 8133ED36 -1421 8133ED37 -1422 8133ED38 -1423 8133ED39 -1424 8133EE30 -1425 8133EE31 -1426 8133EE32 -1427 8133EE33 -1428 8133EE34 -1429 8133EE35 -142A 8133EE36 -142B 8133EE37 -142C 8133EE38 -142D 8133EE39 -142E 8133EF30 -142F 8133EF31 -1430 8133EF32 -1431 8133EF33 -1432 8133EF34 -1433 8133EF35 -1434 8133EF36 -1435 8133EF37 -1436 8133EF38 -1437 8133EF39 -1438 8133F030 -1439 8133F031 -143A 8133F032 -143B 8133F033 -143C 8133F034 -143D 8133F035 -143E 8133F036 -143F 8133F037 -1440 8133F038 -1441 8133F039 -1442 8133F130 -1443 8133F131 -1444 8133F132 -1445 8133F133 -1446 8133F134 -1447 8133F135 -1448 8133F136 -1449 8133F137 -144A 8133F138 -144B 8133F139 -144C 8133F230 -144D 8133F231 -144E 8133F232 -144F 8133F233 -1450 8133F234 -1451 8133F235 -1452 8133F236 -1453 8133F237 -1454 8133F238 -1455 8133F239 -1456 8133F330 -1457 8133F331 -1458 8133F332 -1459 8133F333 -145A 8133F334 -145B 8133F335 -145C 8133F336 -145D 8133F337 -145E 8133F338 -145F 8133F339 -1460 8133F430 -1461 8133F431 -1462 8133F432 -1463 8133F433 -1464 8133F434 -1465 8133F435 -1466 8133F436 -1467 8133F437 -1468 8133F438 -1469 8133F439 -146A 8133F530 -146B 8133F531 -146C 8133F532 -146D 8133F533 -146E 8133F534 -146F 8133F535 -1470 8133F536 -1471 8133F537 -1472 8133F538 -1473 8133F539 -1474 8133F630 -1475 8133F631 -1476 8133F632 -1477 8133F633 -1478 8133F634 -1479 8133F635 -147A 8133F636 -147B 8133F637 -147C 8133F638 -147D 8133F639 -147E 8133F730 -147F 8133F731 -1480 8133F732 -1481 8133F733 -1482 8133F734 -1483 8133F735 -1484 8133F736 -1485 8133F737 -1486 8133F738 -1487 8133F739 -1488 8133F830 -1489 8133F831 -148A 8133F832 -148B 8133F833 -148C 8133F834 -148D 8133F835 -148E 8133F836 -148F 8133F837 -1490 8133F838 -1491 8133F839 -1492 8133F930 -1493 8133F931 -1494 8133F932 -1495 8133F933 -1496 8133F934 -1497 8133F935 -1498 8133F936 -1499 8133F937 -149A 8133F938 -149B 8133F939 -149C 8133FA30 -149D 8133FA31 -149E 8133FA32 -149F 8133FA33 -14A0 8133FA34 -14A1 8133FA35 -14A2 8133FA36 -14A3 8133FA37 -14A4 8133FA38 -14A5 8133FA39 -14A6 8133FB30 -14A7 8133FB31 -14A8 8133FB32 -14A9 8133FB33 -14AA 8133FB34 -14AB 8133FB35 -14AC 8133FB36 -14AD 8133FB37 -14AE 8133FB38 -14AF 8133FB39 -14B0 8133FC30 -14B1 8133FC31 -14B2 8133FC32 -14B3 8133FC33 -14B4 8133FC34 -14B5 8133FC35 -14B6 8133FC36 -14B7 8133FC37 -14B8 8133FC38 -14B9 8133FC39 -14BA 8133FD30 -14BB 8133FD31 -14BC 8133FD32 -14BD 8133FD33 -14BE 8133FD34 -14BF 8133FD35 -14C0 8133FD36 -14C1 8133FD37 -14C2 8133FD38 -14C3 8133FD39 -14C4 8133FE30 -14C5 8133FE31 -14C6 8133FE32 -14C7 8133FE33 -14C8 8133FE34 -14C9 8133FE35 -14CA 8133FE36 -14CB 8133FE37 -14CC 8133FE38 -14CD 8133FE39 -14CE 81348130 -14CF 81348131 -14D0 81348132 -14D1 81348133 -14D2 81348134 -14D3 81348135 -14D4 81348136 -14D5 81348137 -14D6 81348138 -14D7 81348139 -14D8 81348230 -14D9 81348231 -14DA 81348232 -14DB 81348233 -14DC 81348234 -14DD 81348235 -14DE 81348236 -14DF 81348237 -14E0 81348238 -14E1 81348239 -14E2 81348330 -14E3 81348331 -14E4 81348332 -14E5 81348333 -14E6 81348334 -14E7 81348335 -14E8 81348336 -14E9 81348337 -14EA 81348338 -14EB 81348339 -14EC 81348430 -14ED 81348431 -14EE 81348432 -14EF 81348433 -14F0 81348434 -14F1 81348435 -14F2 81348436 -14F3 81348437 -14F4 81348438 -14F5 81348439 -14F6 81348530 -14F7 81348531 -14F8 81348532 -14F9 81348533 -14FA 81348534 -14FB 81348535 -14FC 81348536 -14FD 81348537 -14FE 81348538 -14FF 81348539 -1500 81348630 -1501 81348631 -1502 81348632 -1503 81348633 -1504 81348634 -1505 81348635 -1506 81348636 -1507 81348637 -1508 81348638 -1509 81348639 -150A 81348730 -150B 81348731 -150C 81348732 -150D 81348733 -150E 81348734 -150F 81348735 -1510 81348736 -1511 81348737 -1512 81348738 -1513 81348739 -1514 81348830 -1515 81348831 -1516 81348832 -1517 81348833 -1518 81348834 -1519 81348835 -151A 81348836 -151B 81348837 -151C 81348838 -151D 81348839 -151E 81348930 -151F 81348931 -1520 81348932 -1521 81348933 -1522 81348934 -1523 81348935 -1524 81348936 -1525 81348937 -1526 81348938 -1527 81348939 -1528 81348A30 -1529 81348A31 -152A 81348A32 -152B 81348A33 -152C 81348A34 -152D 81348A35 -152E 81348A36 -152F 81348A37 -1530 81348A38 -1531 81348A39 -1532 81348B30 -1533 81348B31 -1534 81348B32 -1535 81348B33 -1536 81348B34 -1537 81348B35 -1538 81348B36 -1539 81348B37 -153A 81348B38 -153B 81348B39 -153C 81348C30 -153D 81348C31 -153E 81348C32 -153F 81348C33 -1540 81348C34 -1541 81348C35 -1542 81348C36 -1543 81348C37 -1544 81348C38 -1545 81348C39 -1546 81348D30 -1547 81348D31 -1548 81348D32 -1549 81348D33 -154A 81348D34 -154B 81348D35 -154C 81348D36 -154D 81348D37 -154E 81348D38 -154F 81348D39 -1550 81348E30 -1551 81348E31 -1552 81348E32 -1553 81348E33 -1554 81348E34 -1555 81348E35 -1556 81348E36 -1557 81348E37 -1558 81348E38 -1559 81348E39 -155A 81348F30 -155B 81348F31 -155C 81348F32 -155D 81348F33 -155E 81348F34 -155F 81348F35 -1560 81348F36 -1561 81348F37 -1562 81348F38 -1563 81348F39 -1564 81349030 -1565 81349031 -1566 81349032 -1567 81349033 -1568 81349034 -1569 81349035 -156A 81349036 -156B 81349037 -156C 81349038 -156D 81349039 -156E 81349130 -156F 81349131 -1570 81349132 -1571 81349133 -1572 81349134 -1573 81349135 -1574 81349136 -1575 81349137 -1576 81349138 -1577 81349139 -1578 81349230 -1579 81349231 -157A 81349232 -157B 81349233 -157C 81349234 -157D 81349235 -157E 81349236 -157F 81349237 -1580 81349238 -1581 81349239 -1582 81349330 -1583 81349331 -1584 81349332 -1585 81349333 -1586 81349334 -1587 81349335 -1588 81349336 -1589 81349337 -158A 81349338 -158B 81349339 -158C 81349430 -158D 81349431 -158E 81349432 -158F 81349433 -1590 81349434 -1591 81349435 -1592 81349436 -1593 81349437 -1594 81349438 -1595 81349439 -1596 81349530 -1597 81349531 -1598 81349532 -1599 81349533 -159A 81349534 -159B 81349535 -159C 81349536 -159D 81349537 -159E 81349538 -159F 81349539 -15A0 81349630 -15A1 81349631 -15A2 81349632 -15A3 81349633 -15A4 81349634 -15A5 81349635 -15A6 81349636 -15A7 81349637 -15A8 81349638 -15A9 81349639 -15AA 81349730 -15AB 81349731 -15AC 81349732 -15AD 81349733 -15AE 81349734 -15AF 81349735 -15B0 81349736 -15B1 81349737 -15B2 81349738 -15B3 81349739 -15B4 81349830 -15B5 81349831 -15B6 81349832 -15B7 81349833 -15B8 81349834 -15B9 81349835 -15BA 81349836 -15BB 81349837 -15BC 81349838 -15BD 81349839 -15BE 81349930 -15BF 81349931 -15C0 81349932 -15C1 81349933 -15C2 81349934 -15C3 81349935 -15C4 81349936 -15C5 81349937 -15C6 81349938 -15C7 81349939 -15C8 81349A30 -15C9 81349A31 -15CA 81349A32 -15CB 81349A33 -15CC 81349A34 -15CD 81349A35 -15CE 81349A36 -15CF 81349A37 -15D0 81349A38 -15D1 81349A39 -15D2 81349B30 -15D3 81349B31 -15D4 81349B32 -15D5 81349B33 -15D6 81349B34 -15D7 81349B35 -15D8 81349B36 -15D9 81349B37 -15DA 81349B38 -15DB 81349B39 -15DC 81349C30 -15DD 81349C31 -15DE 81349C32 -15DF 81349C33 -15E0 81349C34 -15E1 81349C35 -15E2 81349C36 -15E3 81349C37 -15E4 81349C38 -15E5 81349C39 -15E6 81349D30 -15E7 81349D31 -15E8 81349D32 -15E9 81349D33 -15EA 81349D34 -15EB 81349D35 -15EC 81349D36 -15ED 81349D37 -15EE 81349D38 -15EF 81349D39 -15F0 81349E30 -15F1 81349E31 -15F2 81349E32 -15F3 81349E33 -15F4 81349E34 -15F5 81349E35 -15F6 81349E36 -15F7 81349E37 -15F8 81349E38 -15F9 81349E39 -15FA 81349F30 -15FB 81349F31 -15FC 81349F32 -15FD 81349F33 -15FE 81349F34 -15FF 81349F35 -1600 81349F36 -1601 81349F37 -1602 81349F38 -1603 81349F39 -1604 8134A030 -1605 8134A031 -1606 8134A032 -1607 8134A033 -1608 8134A034 -1609 8134A035 -160A 8134A036 -160B 8134A037 -160C 8134A038 -160D 8134A039 -160E 8134A130 -160F 8134A131 -1610 8134A132 -1611 8134A133 -1612 8134A134 -1613 8134A135 -1614 8134A136 -1615 8134A137 -1616 8134A138 -1617 8134A139 -1618 8134A230 -1619 8134A231 -161A 8134A232 -161B 8134A233 -161C 8134A234 -161D 8134A235 -161E 8134A236 -161F 8134A237 -1620 8134A238 -1621 8134A239 -1622 8134A330 -1623 8134A331 -1624 8134A332 -1625 8134A333 -1626 8134A334 -1627 8134A335 -1628 8134A336 -1629 8134A337 -162A 8134A338 -162B 8134A339 -162C 8134A430 -162D 8134A431 -162E 8134A432 -162F 8134A433 -1630 8134A434 -1631 8134A435 -1632 8134A436 -1633 8134A437 -1634 8134A438 -1635 8134A439 -1636 8134A530 -1637 8134A531 -1638 8134A532 -1639 8134A533 -163A 8134A534 -163B 8134A535 -163C 8134A536 -163D 8134A537 -163E 8134A538 -163F 8134A539 -1640 8134A630 -1641 8134A631 -1642 8134A632 -1643 8134A633 -1644 8134A634 -1645 8134A635 -1646 8134A636 -1647 8134A637 -1648 8134A638 -1649 8134A639 -164A 8134A730 -164B 8134A731 -164C 8134A732 -164D 8134A733 -164E 8134A734 -164F 8134A735 -1650 8134A736 -1651 8134A737 -1652 8134A738 -1653 8134A739 -1654 8134A830 -1655 8134A831 -1656 8134A832 -1657 8134A833 -1658 8134A834 -1659 8134A835 -165A 8134A836 -165B 8134A837 -165C 8134A838 -165D 8134A839 -165E 8134A930 -165F 8134A931 -1660 8134A932 -1661 8134A933 -1662 8134A934 -1663 8134A935 -1664 8134A936 -1665 8134A937 -1666 8134A938 -1667 8134A939 -1668 8134AA30 -1669 8134AA31 -166A 8134AA32 -166B 8134AA33 -166C 8134AA34 -166D 8134AA35 -166E 8134AA36 -166F 8134AA37 -1670 8134AA38 -1671 8134AA39 -1672 8134AB30 -1673 8134AB31 -1674 8134AB32 -1675 8134AB33 -1676 8134AB34 -1677 8134AB35 -1678 8134AB36 -1679 8134AB37 -167A 8134AB38 -167B 8134AB39 -167C 8134AC30 -167D 8134AC31 -167E 8134AC32 -167F 8134AC33 -1680 8134AC34 -1681 8134AC35 -1682 8134AC36 -1683 8134AC37 -1684 8134AC38 -1685 8134AC39 -1686 8134AD30 -1687 8134AD31 -1688 8134AD32 -1689 8134AD33 -168A 8134AD34 -168B 8134AD35 -168C 8134AD36 -168D 8134AD37 -168E 8134AD38 -168F 8134AD39 -1690 8134AE30 -1691 8134AE31 -1692 8134AE32 -1693 8134AE33 -1694 8134AE34 -1695 8134AE35 -1696 8134AE36 -1697 8134AE37 -1698 8134AE38 -1699 8134AE39 -169A 8134AF30 -169B 8134AF31 -169C 8134AF32 -169D 8134AF33 -169E 8134AF34 -169F 8134AF35 -16A0 8134AF36 -16A1 8134AF37 -16A2 8134AF38 -16A3 8134AF39 -16A4 8134B030 -16A5 8134B031 -16A6 8134B032 -16A7 8134B033 -16A8 8134B034 -16A9 8134B035 -16AA 8134B036 -16AB 8134B037 -16AC 8134B038 -16AD 8134B039 -16AE 8134B130 -16AF 8134B131 -16B0 8134B132 -16B1 8134B133 -16B2 8134B134 -16B3 8134B135 -16B4 8134B136 -16B5 8134B137 -16B6 8134B138 -16B7 8134B139 -16B8 8134B230 -16B9 8134B231 -16BA 8134B232 -16BB 8134B233 -16BC 8134B234 -16BD 8134B235 -16BE 8134B236 -16BF 8134B237 -16C0 8134B238 -16C1 8134B239 -16C2 8134B330 -16C3 8134B331 -16C4 8134B332 -16C5 8134B333 -16C6 8134B334 -16C7 8134B335 -16C8 8134B336 -16C9 8134B337 -16CA 8134B338 -16CB 8134B339 -16CC 8134B430 -16CD 8134B431 -16CE 8134B432 -16CF 8134B433 -16D0 8134B434 -16D1 8134B435 -16D2 8134B436 -16D3 8134B437 -16D4 8134B438 -16D5 8134B439 -16D6 8134B530 -16D7 8134B531 -16D8 8134B532 -16D9 8134B533 -16DA 8134B534 -16DB 8134B535 -16DC 8134B536 -16DD 8134B537 -16DE 8134B538 -16DF 8134B539 -16E0 8134B630 -16E1 8134B631 -16E2 8134B632 -16E3 8134B633 -16E4 8134B634 -16E5 8134B635 -16E6 8134B636 -16E7 8134B637 -16E8 8134B638 -16E9 8134B639 -16EA 8134B730 -16EB 8134B731 -16EC 8134B732 -16ED 8134B733 -16EE 8134B734 -16EF 8134B735 -16F0 8134B736 -16F1 8134B737 -16F2 8134B738 -16F3 8134B739 -16F4 8134B830 -16F5 8134B831 -16F6 8134B832 -16F7 8134B833 -16F8 8134B834 -16F9 8134B835 -16FA 8134B836 -16FB 8134B837 -16FC 8134B838 -16FD 8134B839 -16FE 8134B930 -16FF 8134B931 -1700 8134B932 -1701 8134B933 -1702 8134B934 -1703 8134B935 -1704 8134B936 -1705 8134B937 -1706 8134B938 -1707 8134B939 -1708 8134BA30 -1709 8134BA31 -170A 8134BA32 -170B 8134BA33 -170C 8134BA34 -170D 8134BA35 -170E 8134BA36 -170F 8134BA37 -1710 8134BA38 -1711 8134BA39 -1712 8134BB30 -1713 8134BB31 -1714 8134BB32 -1715 8134BB33 -1716 8134BB34 -1717 8134BB35 -1718 8134BB36 -1719 8134BB37 -171A 8134BB38 -171B 8134BB39 -171C 8134BC30 -171D 8134BC31 -171E 8134BC32 -171F 8134BC33 -1720 8134BC34 -1721 8134BC35 -1722 8134BC36 -1723 8134BC37 -1724 8134BC38 -1725 8134BC39 -1726 8134BD30 -1727 8134BD31 -1728 8134BD32 -1729 8134BD33 -172A 8134BD34 -172B 8134BD35 -172C 8134BD36 -172D 8134BD37 -172E 8134BD38 -172F 8134BD39 -1730 8134BE30 -1731 8134BE31 -1732 8134BE32 -1733 8134BE33 -1734 8134BE34 -1735 8134BE35 -1736 8134BE36 -1737 8134BE37 -1738 8134BE38 -1739 8134BE39 -173A 8134BF30 -173B 8134BF31 -173C 8134BF32 -173D 8134BF33 -173E 8134BF34 -173F 8134BF35 -1740 8134BF36 -1741 8134BF37 -1742 8134BF38 -1743 8134BF39 -1744 8134C030 -1745 8134C031 -1746 8134C032 -1747 8134C033 -1748 8134C034 -1749 8134C035 -174A 8134C036 -174B 8134C037 -174C 8134C038 -174D 8134C039 -174E 8134C130 -174F 8134C131 -1750 8134C132 -1751 8134C133 -1752 8134C134 -1753 8134C135 -1754 8134C136 -1755 8134C137 -1756 8134C138 -1757 8134C139 -1758 8134C230 -1759 8134C231 -175A 8134C232 -175B 8134C233 -175C 8134C234 -175D 8134C235 -175E 8134C236 -175F 8134C237 -1760 8134C238 -1761 8134C239 -1762 8134C330 -1763 8134C331 -1764 8134C332 -1765 8134C333 -1766 8134C334 -1767 8134C335 -1768 8134C336 -1769 8134C337 -176A 8134C338 -176B 8134C339 -176C 8134C430 -176D 8134C431 -176E 8134C432 -176F 8134C433 -1770 8134C434 -1771 8134C435 -1772 8134C436 -1773 8134C437 -1774 8134C438 -1775 8134C439 -1776 8134C530 -1777 8134C531 -1778 8134C532 -1779 8134C533 -177A 8134C534 -177B 8134C535 -177C 8134C536 -177D 8134C537 -177E 8134C538 -177F 8134C539 -1780 8134C630 -1781 8134C631 -1782 8134C632 -1783 8134C633 -1784 8134C634 -1785 8134C635 -1786 8134C636 -1787 8134C637 -1788 8134C638 -1789 8134C639 -178A 8134C730 -178B 8134C731 -178C 8134C732 -178D 8134C733 -178E 8134C734 -178F 8134C735 -1790 8134C736 -1791 8134C737 -1792 8134C738 -1793 8134C739 -1794 8134C830 -1795 8134C831 -1796 8134C832 -1797 8134C833 -1798 8134C834 -1799 8134C835 -179A 8134C836 -179B 8134C837 -179C 8134C838 -179D 8134C839 -179E 8134C930 -179F 8134C931 -17A0 8134C932 -17A1 8134C933 -17A2 8134C934 -17A3 8134C935 -17A4 8134C936 -17A5 8134C937 -17A6 8134C938 -17A7 8134C939 -17A8 8134CA30 -17A9 8134CA31 -17AA 8134CA32 -17AB 8134CA33 -17AC 8134CA34 -17AD 8134CA35 -17AE 8134CA36 -17AF 8134CA37 -17B0 8134CA38 -17B1 8134CA39 -17B2 8134CB30 -17B3 8134CB31 -17B4 8134CB32 -17B5 8134CB33 -17B6 8134CB34 -17B7 8134CB35 -17B8 8134CB36 -17B9 8134CB37 -17BA 8134CB38 -17BB 8134CB39 -17BC 8134CC30 -17BD 8134CC31 -17BE 8134CC32 -17BF 8134CC33 -17C0 8134CC34 -17C1 8134CC35 -17C2 8134CC36 -17C3 8134CC37 -17C4 8134CC38 -17C5 8134CC39 -17C6 8134CD30 -17C7 8134CD31 -17C8 8134CD32 -17C9 8134CD33 -17CA 8134CD34 -17CB 8134CD35 -17CC 8134CD36 -17CD 8134CD37 -17CE 8134CD38 -17CF 8134CD39 -17D0 8134CE30 -17D1 8134CE31 -17D2 8134CE32 -17D3 8134CE33 -17D4 8134CE34 -17D5 8134CE35 -17D6 8134CE36 -17D7 8134CE37 -17D8 8134CE38 -17D9 8134CE39 -17DA 8134CF30 -17DB 8134CF31 -17DC 8134CF32 -17DD 8134CF33 -17DE 8134CF34 -17DF 8134CF35 -17E0 8134CF36 -17E1 8134CF37 -17E2 8134CF38 -17E3 8134CF39 -17E4 8134D030 -17E5 8134D031 -17E6 8134D032 -17E7 8134D033 -17E8 8134D034 -17E9 8134D035 -17EA 8134D036 -17EB 8134D037 -17EC 8134D038 -17ED 8134D039 -17EE 8134D130 -17EF 8134D131 -17F0 8134D132 -17F1 8134D133 -17F2 8134D134 -17F3 8134D135 -17F4 8134D136 -17F5 8134D137 -17F6 8134D138 -17F7 8134D139 -17F8 8134D230 -17F9 8134D231 -17FA 8134D232 -17FB 8134D233 -17FC 8134D234 -17FD 8134D235 -17FE 8134D236 -17FF 8134D237 -1800 8134D238 -1801 8134D239 -1802 8134D330 -1803 8134D331 -1804 8134D332 -1805 8134D333 -1806 8134D334 -1807 8134D335 -1808 8134D336 -1809 8134D337 -180A 8134D338 -180B 8134D339 -180C 8134D430 -180D 8134D431 -180E 8134D432 -180F 8134D433 -1810 8134D434 -1811 8134D435 -1812 8134D436 -1813 8134D437 -1814 8134D438 -1815 8134D439 -1816 8134D530 -1817 8134D531 -1818 8134D532 -1819 8134D533 -181A 8134D534 -181B 8134D535 -181C 8134D536 -181D 8134D537 -181E 8134D538 -181F 8134D539 -1820 8134D630 -1821 8134D631 -1822 8134D632 -1823 8134D633 -1824 8134D634 -1825 8134D635 -1826 8134D636 -1827 8134D637 -1828 8134D638 -1829 8134D639 -182A 8134D730 -182B 8134D731 -182C 8134D732 -182D 8134D733 -182E 8134D734 -182F 8134D735 -1830 8134D736 -1831 8134D737 -1832 8134D738 -1833 8134D739 -1834 8134D830 -1835 8134D831 -1836 8134D832 -1837 8134D833 -1838 8134D834 -1839 8134D835 -183A 8134D836 -183B 8134D837 -183C 8134D838 -183D 8134D839 -183E 8134D930 -183F 8134D931 -1840 8134D932 -1841 8134D933 -1842 8134D934 -1843 8134D935 -1844 8134D936 -1845 8134D937 -1846 8134D938 -1847 8134D939 -1848 8134DA30 -1849 8134DA31 -184A 8134DA32 -184B 8134DA33 -184C 8134DA34 -184D 8134DA35 -184E 8134DA36 -184F 8134DA37 -1850 8134DA38 -1851 8134DA39 -1852 8134DB30 -1853 8134DB31 -1854 8134DB32 -1855 8134DB33 -1856 8134DB34 -1857 8134DB35 -1858 8134DB36 -1859 8134DB37 -185A 8134DB38 -185B 8134DB39 -185C 8134DC30 -185D 8134DC31 -185E 8134DC32 -185F 8134DC33 -1860 8134DC34 -1861 8134DC35 -1862 8134DC36 -1863 8134DC37 -1864 8134DC38 -1865 8134DC39 -1866 8134DD30 -1867 8134DD31 -1868 8134DD32 -1869 8134DD33 -186A 8134DD34 -186B 8134DD35 -186C 8134DD36 -186D 8134DD37 -186E 8134DD38 -186F 8134DD39 -1870 8134DE30 -1871 8134DE31 -1872 8134DE32 -1873 8134DE33 -1874 8134DE34 -1875 8134DE35 -1876 8134DE36 -1877 8134DE37 -1878 8134DE38 -1879 8134DE39 -187A 8134DF30 -187B 8134DF31 -187C 8134DF32 -187D 8134DF33 -187E 8134DF34 -187F 8134DF35 -1880 8134DF36 -1881 8134DF37 -1882 8134DF38 -1883 8134DF39 -1884 8134E030 -1885 8134E031 -1886 8134E032 -1887 8134E033 -1888 8134E034 -1889 8134E035 -188A 8134E036 -188B 8134E037 -188C 8134E038 -188D 8134E039 -188E 8134E130 -188F 8134E131 -1890 8134E132 -1891 8134E133 -1892 8134E134 -1893 8134E135 -1894 8134E136 -1895 8134E137 -1896 8134E138 -1897 8134E139 -1898 8134E230 -1899 8134E231 -189A 8134E232 -189B 8134E233 -189C 8134E234 -189D 8134E235 -189E 8134E236 -189F 8134E237 -18A0 8134E238 -18A1 8134E239 -18A2 8134E330 -18A3 8134E331 -18A4 8134E332 -18A5 8134E333 -18A6 8134E334 -18A7 8134E335 -18A8 8134E336 -18A9 8134E337 -18AA 8134E338 -18AB 8134E339 -18AC 8134E430 -18AD 8134E431 -18AE 8134E432 -18AF 8134E433 -18B0 8134E434 -18B1 8134E435 -18B2 8134E436 -18B3 8134E437 -18B4 8134E438 -18B5 8134E439 -18B6 8134E530 -18B7 8134E531 -18B8 8134E532 -18B9 8134E533 -18BA 8134E534 -18BB 8134E535 -18BC 8134E536 -18BD 8134E537 -18BE 8134E538 -18BF 8134E539 -18C0 8134E630 -18C1 8134E631 -18C2 8134E632 -18C3 8134E633 -18C4 8134E634 -18C5 8134E635 -18C6 8134E636 -18C7 8134E637 -18C8 8134E638 -18C9 8134E639 -18CA 8134E730 -18CB 8134E731 -18CC 8134E732 -18CD 8134E733 -18CE 8134E734 -18CF 8134E735 -18D0 8134E736 -18D1 8134E737 -18D2 8134E738 -18D3 8134E739 -18D4 8134E830 -18D5 8134E831 -18D6 8134E832 -18D7 8134E833 -18D8 8134E834 -18D9 8134E835 -18DA 8134E836 -18DB 8134E837 -18DC 8134E838 -18DD 8134E839 -18DE 8134E930 -18DF 8134E931 -18E0 8134E932 -18E1 8134E933 -18E2 8134E934 -18E3 8134E935 -18E4 8134E936 -18E5 8134E937 -18E6 8134E938 -18E7 8134E939 -18E8 8134EA30 -18E9 8134EA31 -18EA 8134EA32 -18EB 8134EA33 -18EC 8134EA34 -18ED 8134EA35 -18EE 8134EA36 -18EF 8134EA37 -18F0 8134EA38 -18F1 8134EA39 -18F2 8134EB30 -18F3 8134EB31 -18F4 8134EB32 -18F5 8134EB33 -18F6 8134EB34 -18F7 8134EB35 -18F8 8134EB36 -18F9 8134EB37 -18FA 8134EB38 -18FB 8134EB39 -18FC 8134EC30 -18FD 8134EC31 -18FE 8134EC32 -18FF 8134EC33 -1900 8134EC34 -1901 8134EC35 -1902 8134EC36 -1903 8134EC37 -1904 8134EC38 -1905 8134EC39 -1906 8134ED30 -1907 8134ED31 -1908 8134ED32 -1909 8134ED33 -190A 8134ED34 -190B 8134ED35 -190C 8134ED36 -190D 8134ED37 -190E 8134ED38 -190F 8134ED39 -1910 8134EE30 -1911 8134EE31 -1912 8134EE32 -1913 8134EE33 -1914 8134EE34 -1915 8134EE35 -1916 8134EE36 -1917 8134EE37 -1918 8134EE38 -1919 8134EE39 -191A 8134EF30 -191B 8134EF31 -191C 8134EF32 -191D 8134EF33 -191E 8134EF34 -191F 8134EF35 -1920 8134EF36 -1921 8134EF37 -1922 8134EF38 -1923 8134EF39 -1924 8134F030 -1925 8134F031 -1926 8134F032 -1927 8134F033 -1928 8134F034 -1929 8134F035 -192A 8134F036 -192B 8134F037 -192C 8134F038 -192D 8134F039 -192E 8134F130 -192F 8134F131 -1930 8134F132 -1931 8134F133 -1932 8134F134 -1933 8134F135 -1934 8134F136 -1935 8134F137 -1936 8134F138 -1937 8134F139 -1938 8134F230 -1939 8134F231 -193A 8134F232 -193B 8134F233 -193C 8134F234 -193D 8134F235 -193E 8134F236 -193F 8134F237 -1940 8134F238 -1941 8134F239 -1942 8134F330 -1943 8134F331 -1944 8134F332 -1945 8134F333 -1946 8134F334 -1947 8134F335 -1948 8134F336 -1949 8134F337 -194A 8134F338 -194B 8134F339 -194C 8134F430 -194D 8134F431 -194E 8134F432 -194F 8134F433 -1950 8134F434 -1951 8134F435 -1952 8134F436 -1953 8134F437 -1954 8134F438 -1955 8134F439 -1956 8134F530 -1957 8134F531 -1958 8134F532 -1959 8134F533 -195A 8134F534 -195B 8134F535 -195C 8134F536 -195D 8134F537 -195E 8134F538 -195F 8134F539 -1960 8134F630 -1961 8134F631 -1962 8134F632 -1963 8134F633 -1964 8134F634 -1965 8134F635 -1966 8134F636 -1967 8134F637 -1968 8134F638 -1969 8134F639 -196A 8134F730 -196B 8134F731 -196C 8134F732 -196D 8134F733 -196E 8134F734 -196F 8134F735 -1970 8134F736 -1971 8134F737 -1972 8134F738 -1973 8134F739 -1974 8134F830 -1975 8134F831 -1976 8134F832 -1977 8134F833 -1978 8134F834 -1979 8134F835 -197A 8134F836 -197B 8134F837 -197C 8134F838 -197D 8134F839 -197E 8134F930 -197F 8134F931 -1980 8134F932 -1981 8134F933 -1982 8134F934 -1983 8134F935 -1984 8134F936 -1985 8134F937 -1986 8134F938 -1987 8134F939 -1988 8134FA30 -1989 8134FA31 -198A 8134FA32 -198B 8134FA33 -198C 8134FA34 -198D 8134FA35 -198E 8134FA36 -198F 8134FA37 -1990 8134FA38 -1991 8134FA39 -1992 8134FB30 -1993 8134FB31 -1994 8134FB32 -1995 8134FB33 -1996 8134FB34 -1997 8134FB35 -1998 8134FB36 -1999 8134FB37 -199A 8134FB38 -199B 8134FB39 -199C 8134FC30 -199D 8134FC31 -199E 8134FC32 -199F 8134FC33 -19A0 8134FC34 -19A1 8134FC35 -19A2 8134FC36 -19A3 8134FC37 -19A4 8134FC38 -19A5 8134FC39 -19A6 8134FD30 -19A7 8134FD31 -19A8 8134FD32 -19A9 8134FD33 -19AA 8134FD34 -19AB 8134FD35 -19AC 8134FD36 -19AD 8134FD37 -19AE 8134FD38 -19AF 8134FD39 -19B0 8134FE30 -19B1 8134FE31 -19B2 8134FE32 -19B3 8134FE33 -19B4 8134FE34 -19B5 8134FE35 -19B6 8134FE36 -19B7 8134FE37 -19B8 8134FE38 -19B9 8134FE39 -19BA 81358130 -19BB 81358131 -19BC 81358132 -19BD 81358133 -19BE 81358134 -19BF 81358135 -19C0 81358136 -19C1 81358137 -19C2 81358138 -19C3 81358139 -19C4 81358230 -19C5 81358231 -19C6 81358232 -19C7 81358233 -19C8 81358234 -19C9 81358235 -19CA 81358236 -19CB 81358237 -19CC 81358238 -19CD 81358239 -19CE 81358330 -19CF 81358331 -19D0 81358332 -19D1 81358333 -19D2 81358334 -19D3 81358335 -19D4 81358336 -19D5 81358337 -19D6 81358338 -19D7 81358339 -19D8 81358430 -19D9 81358431 -19DA 81358432 -19DB 81358433 -19DC 81358434 -19DD 81358435 -19DE 81358436 -19DF 81358437 -19E0 81358438 -19E1 81358439 -19E2 81358530 -19E3 81358531 -19E4 81358532 -19E5 81358533 -19E6 81358534 -19E7 81358535 -19E8 81358536 -19E9 81358537 -19EA 81358538 -19EB 81358539 -19EC 81358630 -19ED 81358631 -19EE 81358632 -19EF 81358633 -19F0 81358634 -19F1 81358635 -19F2 81358636 -19F3 81358637 -19F4 81358638 -19F5 81358639 -19F6 81358730 -19F7 81358731 -19F8 81358732 -19F9 81358733 -19FA 81358734 -19FB 81358735 -19FC 81358736 -19FD 81358737 -19FE 81358738 -19FF 81358739 -1A00 81358830 -1A01 81358831 -1A02 81358832 -1A03 81358833 -1A04 81358834 -1A05 81358835 -1A06 81358836 -1A07 81358837 -1A08 81358838 -1A09 81358839 -1A0A 81358930 -1A0B 81358931 -1A0C 81358932 -1A0D 81358933 -1A0E 81358934 -1A0F 81358935 -1A10 81358936 -1A11 81358937 -1A12 81358938 -1A13 81358939 -1A14 81358A30 -1A15 81358A31 -1A16 81358A32 -1A17 81358A33 -1A18 81358A34 -1A19 81358A35 -1A1A 81358A36 -1A1B 81358A37 -1A1C 81358A38 -1A1D 81358A39 -1A1E 81358B30 -1A1F 81358B31 -1A20 81358B32 -1A21 81358B33 -1A22 81358B34 -1A23 81358B35 -1A24 81358B36 -1A25 81358B37 -1A26 81358B38 -1A27 81358B39 -1A28 81358C30 -1A29 81358C31 -1A2A 81358C32 -1A2B 81358C33 -1A2C 81358C34 -1A2D 81358C35 -1A2E 81358C36 -1A2F 81358C37 -1A30 81358C38 -1A31 81358C39 -1A32 81358D30 -1A33 81358D31 -1A34 81358D32 -1A35 81358D33 -1A36 81358D34 -1A37 81358D35 -1A38 81358D36 -1A39 81358D37 -1A3A 81358D38 -1A3B 81358D39 -1A3C 81358E30 -1A3D 81358E31 -1A3E 81358E32 -1A3F 81358E33 -1A40 81358E34 -1A41 81358E35 -1A42 81358E36 -1A43 81358E37 -1A44 81358E38 -1A45 81358E39 -1A46 81358F30 -1A47 81358F31 -1A48 81358F32 -1A49 81358F33 -1A4A 81358F34 -1A4B 81358F35 -1A4C 81358F36 -1A4D 81358F37 -1A4E 81358F38 -1A4F 81358F39 -1A50 81359030 -1A51 81359031 -1A52 81359032 -1A53 81359033 -1A54 81359034 -1A55 81359035 -1A56 81359036 -1A57 81359037 -1A58 81359038 -1A59 81359039 -1A5A 81359130 -1A5B 81359131 -1A5C 81359132 -1A5D 81359133 -1A5E 81359134 -1A5F 81359135 -1A60 81359136 -1A61 81359137 -1A62 81359138 -1A63 81359139 -1A64 81359230 -1A65 81359231 -1A66 81359232 -1A67 81359233 -1A68 81359234 -1A69 81359235 -1A6A 81359236 -1A6B 81359237 -1A6C 81359238 -1A6D 81359239 -1A6E 81359330 -1A6F 81359331 -1A70 81359332 -1A71 81359333 -1A72 81359334 -1A73 81359335 -1A74 81359336 -1A75 81359337 -1A76 81359338 -1A77 81359339 -1A78 81359430 -1A79 81359431 -1A7A 81359432 -1A7B 81359433 -1A7C 81359434 -1A7D 81359435 -1A7E 81359436 -1A7F 81359437 -1A80 81359438 -1A81 81359439 -1A82 81359530 -1A83 81359531 -1A84 81359532 -1A85 81359533 -1A86 81359534 -1A87 81359535 -1A88 81359536 -1A89 81359537 -1A8A 81359538 -1A8B 81359539 -1A8C 81359630 -1A8D 81359631 -1A8E 81359632 -1A8F 81359633 -1A90 81359634 -1A91 81359635 -1A92 81359636 -1A93 81359637 -1A94 81359638 -1A95 81359639 -1A96 81359730 -1A97 81359731 -1A98 81359732 -1A99 81359733 -1A9A 81359734 -1A9B 81359735 -1A9C 81359736 -1A9D 81359737 -1A9E 81359738 -1A9F 81359739 -1AA0 81359830 -1AA1 81359831 -1AA2 81359832 -1AA3 81359833 -1AA4 81359834 -1AA5 81359835 -1AA6 81359836 -1AA7 81359837 -1AA8 81359838 -1AA9 81359839 -1AAA 81359930 -1AAB 81359931 -1AAC 81359932 -1AAD 81359933 -1AAE 81359934 -1AAF 81359935 -1AB0 81359936 -1AB1 81359937 -1AB2 81359938 -1AB3 81359939 -1AB4 81359A30 -1AB5 81359A31 -1AB6 81359A32 -1AB7 81359A33 -1AB8 81359A34 -1AB9 81359A35 -1ABA 81359A36 -1ABB 81359A37 -1ABC 81359A38 -1ABD 81359A39 -1ABE 81359B30 -1ABF 81359B31 -1AC0 81359B32 -1AC1 81359B33 -1AC2 81359B34 -1AC3 81359B35 -1AC4 81359B36 -1AC5 81359B37 -1AC6 81359B38 -1AC7 81359B39 -1AC8 81359C30 -1AC9 81359C31 -1ACA 81359C32 -1ACB 81359C33 -1ACC 81359C34 -1ACD 81359C35 -1ACE 81359C36 -1ACF 81359C37 -1AD0 81359C38 -1AD1 81359C39 -1AD2 81359D30 -1AD3 81359D31 -1AD4 81359D32 -1AD5 81359D33 -1AD6 81359D34 -1AD7 81359D35 -1AD8 81359D36 -1AD9 81359D37 -1ADA 81359D38 -1ADB 81359D39 -1ADC 81359E30 -1ADD 81359E31 -1ADE 81359E32 -1ADF 81359E33 -1AE0 81359E34 -1AE1 81359E35 -1AE2 81359E36 -1AE3 81359E37 -1AE4 81359E38 -1AE5 81359E39 -1AE6 81359F30 -1AE7 81359F31 -1AE8 81359F32 -1AE9 81359F33 -1AEA 81359F34 -1AEB 81359F35 -1AEC 81359F36 -1AED 81359F37 -1AEE 81359F38 -1AEF 81359F39 -1AF0 8135A030 -1AF1 8135A031 -1AF2 8135A032 -1AF3 8135A033 -1AF4 8135A034 -1AF5 8135A035 -1AF6 8135A036 -1AF7 8135A037 -1AF8 8135A038 -1AF9 8135A039 -1AFA 8135A130 -1AFB 8135A131 -1AFC 8135A132 -1AFD 8135A133 -1AFE 8135A134 -1AFF 8135A135 -1B00 8135A136 -1B01 8135A137 -1B02 8135A138 -1B03 8135A139 -1B04 8135A230 -1B05 8135A231 -1B06 8135A232 -1B07 8135A233 -1B08 8135A234 -1B09 8135A235 -1B0A 8135A236 -1B0B 8135A237 -1B0C 8135A238 -1B0D 8135A239 -1B0E 8135A330 -1B0F 8135A331 -1B10 8135A332 -1B11 8135A333 -1B12 8135A334 -1B13 8135A335 -1B14 8135A336 -1B15 8135A337 -1B16 8135A338 -1B17 8135A339 -1B18 8135A430 -1B19 8135A431 -1B1A 8135A432 -1B1B 8135A433 -1B1C 8135A434 -1B1D 8135A435 -1B1E 8135A436 -1B1F 8135A437 -1B20 8135A438 -1B21 8135A439 -1B22 8135A530 -1B23 8135A531 -1B24 8135A532 -1B25 8135A533 -1B26 8135A534 -1B27 8135A535 -1B28 8135A536 -1B29 8135A537 -1B2A 8135A538 -1B2B 8135A539 -1B2C 8135A630 -1B2D 8135A631 -1B2E 8135A632 -1B2F 8135A633 -1B30 8135A634 -1B31 8135A635 -1B32 8135A636 -1B33 8135A637 -1B34 8135A638 -1B35 8135A639 -1B36 8135A730 -1B37 8135A731 -1B38 8135A732 -1B39 8135A733 -1B3A 8135A734 -1B3B 8135A735 -1B3C 8135A736 -1B3D 8135A737 -1B3E 8135A738 -1B3F 8135A739 -1B40 8135A830 -1B41 8135A831 -1B42 8135A832 -1B43 8135A833 -1B44 8135A834 -1B45 8135A835 -1B46 8135A836 -1B47 8135A837 -1B48 8135A838 -1B49 8135A839 -1B4A 8135A930 -1B4B 8135A931 -1B4C 8135A932 -1B4D 8135A933 -1B4E 8135A934 -1B4F 8135A935 -1B50 8135A936 -1B51 8135A937 -1B52 8135A938 -1B53 8135A939 -1B54 8135AA30 -1B55 8135AA31 -1B56 8135AA32 -1B57 8135AA33 -1B58 8135AA34 -1B59 8135AA35 -1B5A 8135AA36 -1B5B 8135AA37 -1B5C 8135AA38 -1B5D 8135AA39 -1B5E 8135AB30 -1B5F 8135AB31 -1B60 8135AB32 -1B61 8135AB33 -1B62 8135AB34 -1B63 8135AB35 -1B64 8135AB36 -1B65 8135AB37 -1B66 8135AB38 -1B67 8135AB39 -1B68 8135AC30 -1B69 8135AC31 -1B6A 8135AC32 -1B6B 8135AC33 -1B6C 8135AC34 -1B6D 8135AC35 -1B6E 8135AC36 -1B6F 8135AC37 -1B70 8135AC38 -1B71 8135AC39 -1B72 8135AD30 -1B73 8135AD31 -1B74 8135AD32 -1B75 8135AD33 -1B76 8135AD34 -1B77 8135AD35 -1B78 8135AD36 -1B79 8135AD37 -1B7A 8135AD38 -1B7B 8135AD39 -1B7C 8135AE30 -1B7D 8135AE31 -1B7E 8135AE32 -1B7F 8135AE33 -1B80 8135AE34 -1B81 8135AE35 -1B82 8135AE36 -1B83 8135AE37 -1B84 8135AE38 -1B85 8135AE39 -1B86 8135AF30 -1B87 8135AF31 -1B88 8135AF32 -1B89 8135AF33 -1B8A 8135AF34 -1B8B 8135AF35 -1B8C 8135AF36 -1B8D 8135AF37 -1B8E 8135AF38 -1B8F 8135AF39 -1B90 8135B030 -1B91 8135B031 -1B92 8135B032 -1B93 8135B033 -1B94 8135B034 -1B95 8135B035 -1B96 8135B036 -1B97 8135B037 -1B98 8135B038 -1B99 8135B039 -1B9A 8135B130 -1B9B 8135B131 -1B9C 8135B132 -1B9D 8135B133 -1B9E 8135B134 -1B9F 8135B135 -1BA0 8135B136 -1BA1 8135B137 -1BA2 8135B138 -1BA3 8135B139 -1BA4 8135B230 -1BA5 8135B231 -1BA6 8135B232 -1BA7 8135B233 -1BA8 8135B234 -1BA9 8135B235 -1BAA 8135B236 -1BAB 8135B237 -1BAC 8135B238 -1BAD 8135B239 -1BAE 8135B330 -1BAF 8135B331 -1BB0 8135B332 -1BB1 8135B333 -1BB2 8135B334 -1BB3 8135B335 -1BB4 8135B336 -1BB5 8135B337 -1BB6 8135B338 -1BB7 8135B339 -1BB8 8135B430 -1BB9 8135B431 -1BBA 8135B432 -1BBB 8135B433 -1BBC 8135B434 -1BBD 8135B435 -1BBE 8135B436 -1BBF 8135B437 -1BC0 8135B438 -1BC1 8135B439 -1BC2 8135B530 -1BC3 8135B531 -1BC4 8135B532 -1BC5 8135B533 -1BC6 8135B534 -1BC7 8135B535 -1BC8 8135B536 -1BC9 8135B537 -1BCA 8135B538 -1BCB 8135B539 -1BCC 8135B630 -1BCD 8135B631 -1BCE 8135B632 -1BCF 8135B633 -1BD0 8135B634 -1BD1 8135B635 -1BD2 8135B636 -1BD3 8135B637 -1BD4 8135B638 -1BD5 8135B639 -1BD6 8135B730 -1BD7 8135B731 -1BD8 8135B732 -1BD9 8135B733 -1BDA 8135B734 -1BDB 8135B735 -1BDC 8135B736 -1BDD 8135B737 -1BDE 8135B738 -1BDF 8135B739 -1BE0 8135B830 -1BE1 8135B831 -1BE2 8135B832 -1BE3 8135B833 -1BE4 8135B834 -1BE5 8135B835 -1BE6 8135B836 -1BE7 8135B837 -1BE8 8135B838 -1BE9 8135B839 -1BEA 8135B930 -1BEB 8135B931 -1BEC 8135B932 -1BED 8135B933 -1BEE 8135B934 -1BEF 8135B935 -1BF0 8135B936 -1BF1 8135B937 -1BF2 8135B938 -1BF3 8135B939 -1BF4 8135BA30 -1BF5 8135BA31 -1BF6 8135BA32 -1BF7 8135BA33 -1BF8 8135BA34 -1BF9 8135BA35 -1BFA 8135BA36 -1BFB 8135BA37 -1BFC 8135BA38 -1BFD 8135BA39 -1BFE 8135BB30 -1BFF 8135BB31 -1C00 8135BB32 -1C01 8135BB33 -1C02 8135BB34 -1C03 8135BB35 -1C04 8135BB36 -1C05 8135BB37 -1C06 8135BB38 -1C07 8135BB39 -1C08 8135BC30 -1C09 8135BC31 -1C0A 8135BC32 -1C0B 8135BC33 -1C0C 8135BC34 -1C0D 8135BC35 -1C0E 8135BC36 -1C0F 8135BC37 -1C10 8135BC38 -1C11 8135BC39 -1C12 8135BD30 -1C13 8135BD31 -1C14 8135BD32 -1C15 8135BD33 -1C16 8135BD34 -1C17 8135BD35 -1C18 8135BD36 -1C19 8135BD37 -1C1A 8135BD38 -1C1B 8135BD39 -1C1C 8135BE30 -1C1D 8135BE31 -1C1E 8135BE32 -1C1F 8135BE33 -1C20 8135BE34 -1C21 8135BE35 -1C22 8135BE36 -1C23 8135BE37 -1C24 8135BE38 -1C25 8135BE39 -1C26 8135BF30 -1C27 8135BF31 -1C28 8135BF32 -1C29 8135BF33 -1C2A 8135BF34 -1C2B 8135BF35 -1C2C 8135BF36 -1C2D 8135BF37 -1C2E 8135BF38 -1C2F 8135BF39 -1C30 8135C030 -1C31 8135C031 -1C32 8135C032 -1C33 8135C033 -1C34 8135C034 -1C35 8135C035 -1C36 8135C036 -1C37 8135C037 -1C38 8135C038 -1C39 8135C039 -1C3A 8135C130 -1C3B 8135C131 -1C3C 8135C132 -1C3D 8135C133 -1C3E 8135C134 -1C3F 8135C135 -1C40 8135C136 -1C41 8135C137 -1C42 8135C138 -1C43 8135C139 -1C44 8135C230 -1C45 8135C231 -1C46 8135C232 -1C47 8135C233 -1C48 8135C234 -1C49 8135C235 -1C4A 8135C236 -1C4B 8135C237 -1C4C 8135C238 -1C4D 8135C239 -1C4E 8135C330 -1C4F 8135C331 -1C50 8135C332 -1C51 8135C333 -1C52 8135C334 -1C53 8135C335 -1C54 8135C336 -1C55 8135C337 -1C56 8135C338 -1C57 8135C339 -1C58 8135C430 -1C59 8135C431 -1C5A 8135C432 -1C5B 8135C433 -1C5C 8135C434 -1C5D 8135C435 -1C5E 8135C436 -1C5F 8135C437 -1C60 8135C438 -1C61 8135C439 -1C62 8135C530 -1C63 8135C531 -1C64 8135C532 -1C65 8135C533 -1C66 8135C534 -1C67 8135C535 -1C68 8135C536 -1C69 8135C537 -1C6A 8135C538 -1C6B 8135C539 -1C6C 8135C630 -1C6D 8135C631 -1C6E 8135C632 -1C6F 8135C633 -1C70 8135C634 -1C71 8135C635 -1C72 8135C636 -1C73 8135C637 -1C74 8135C638 -1C75 8135C639 -1C76 8135C730 -1C77 8135C731 -1C78 8135C732 -1C79 8135C733 -1C7A 8135C734 -1C7B 8135C735 -1C7C 8135C736 -1C7D 8135C737 -1C7E 8135C738 -1C7F 8135C739 -1C80 8135C830 -1C81 8135C831 -1C82 8135C832 -1C83 8135C833 -1C84 8135C834 -1C85 8135C835 -1C86 8135C836 -1C87 8135C837 -1C88 8135C838 -1C89 8135C839 -1C8A 8135C930 -1C8B 8135C931 -1C8C 8135C932 -1C8D 8135C933 -1C8E 8135C934 -1C8F 8135C935 -1C90 8135C936 -1C91 8135C937 -1C92 8135C938 -1C93 8135C939 -1C94 8135CA30 -1C95 8135CA31 -1C96 8135CA32 -1C97 8135CA33 -1C98 8135CA34 -1C99 8135CA35 -1C9A 8135CA36 -1C9B 8135CA37 -1C9C 8135CA38 -1C9D 8135CA39 -1C9E 8135CB30 -1C9F 8135CB31 -1CA0 8135CB32 -1CA1 8135CB33 -1CA2 8135CB34 -1CA3 8135CB35 -1CA4 8135CB36 -1CA5 8135CB37 -1CA6 8135CB38 -1CA7 8135CB39 -1CA8 8135CC30 -1CA9 8135CC31 -1CAA 8135CC32 -1CAB 8135CC33 -1CAC 8135CC34 -1CAD 8135CC35 -1CAE 8135CC36 -1CAF 8135CC37 -1CB0 8135CC38 -1CB1 8135CC39 -1CB2 8135CD30 -1CB3 8135CD31 -1CB4 8135CD32 -1CB5 8135CD33 -1CB6 8135CD34 -1CB7 8135CD35 -1CB8 8135CD36 -1CB9 8135CD37 -1CBA 8135CD38 -1CBB 8135CD39 -1CBC 8135CE30 -1CBD 8135CE31 -1CBE 8135CE32 -1CBF 8135CE33 -1CC0 8135CE34 -1CC1 8135CE35 -1CC2 8135CE36 -1CC3 8135CE37 -1CC4 8135CE38 -1CC5 8135CE39 -1CC6 8135CF30 -1CC7 8135CF31 -1CC8 8135CF32 -1CC9 8135CF33 -1CCA 8135CF34 -1CCB 8135CF35 -1CCC 8135CF36 -1CCD 8135CF37 -1CCE 8135CF38 -1CCF 8135CF39 -1CD0 8135D030 -1CD1 8135D031 -1CD2 8135D032 -1CD3 8135D033 -1CD4 8135D034 -1CD5 8135D035 -1CD6 8135D036 -1CD7 8135D037 -1CD8 8135D038 -1CD9 8135D039 -1CDA 8135D130 -1CDB 8135D131 -1CDC 8135D132 -1CDD 8135D133 -1CDE 8135D134 -1CDF 8135D135 -1CE0 8135D136 -1CE1 8135D137 -1CE2 8135D138 -1CE3 8135D139 -1CE4 8135D230 -1CE5 8135D231 -1CE6 8135D232 -1CE7 8135D233 -1CE8 8135D234 -1CE9 8135D235 -1CEA 8135D236 -1CEB 8135D237 -1CEC 8135D238 -1CED 8135D239 -1CEE 8135D330 -1CEF 8135D331 -1CF0 8135D332 -1CF1 8135D333 -1CF2 8135D334 -1CF3 8135D335 -1CF4 8135D336 -1CF5 8135D337 -1CF6 8135D338 -1CF7 8135D339 -1CF8 8135D430 -1CF9 8135D431 -1CFA 8135D432 -1CFB 8135D433 -1CFC 8135D434 -1CFD 8135D435 -1CFE 8135D436 -1CFF 8135D437 -1D00 8135D438 -1D01 8135D439 -1D02 8135D530 -1D03 8135D531 -1D04 8135D532 -1D05 8135D533 -1D06 8135D534 -1D07 8135D535 -1D08 8135D536 -1D09 8135D537 -1D0A 8135D538 -1D0B 8135D539 -1D0C 8135D630 -1D0D 8135D631 -1D0E 8135D632 -1D0F 8135D633 -1D10 8135D634 -1D11 8135D635 -1D12 8135D636 -1D13 8135D637 -1D14 8135D638 -1D15 8135D639 -1D16 8135D730 -1D17 8135D731 -1D18 8135D732 -1D19 8135D733 -1D1A 8135D734 -1D1B 8135D735 -1D1C 8135D736 -1D1D 8135D737 -1D1E 8135D738 -1D1F 8135D739 -1D20 8135D830 -1D21 8135D831 -1D22 8135D832 -1D23 8135D833 -1D24 8135D834 -1D25 8135D835 -1D26 8135D836 -1D27 8135D837 -1D28 8135D838 -1D29 8135D839 -1D2A 8135D930 -1D2B 8135D931 -1D2C 8135D932 -1D2D 8135D933 -1D2E 8135D934 -1D2F 8135D935 -1D30 8135D936 -1D31 8135D937 -1D32 8135D938 -1D33 8135D939 -1D34 8135DA30 -1D35 8135DA31 -1D36 8135DA32 -1D37 8135DA33 -1D38 8135DA34 -1D39 8135DA35 -1D3A 8135DA36 -1D3B 8135DA37 -1D3C 8135DA38 -1D3D 8135DA39 -1D3E 8135DB30 -1D3F 8135DB31 -1D40 8135DB32 -1D41 8135DB33 -1D42 8135DB34 -1D43 8135DB35 -1D44 8135DB36 -1D45 8135DB37 -1D46 8135DB38 -1D47 8135DB39 -1D48 8135DC30 -1D49 8135DC31 -1D4A 8135DC32 -1D4B 8135DC33 -1D4C 8135DC34 -1D4D 8135DC35 -1D4E 8135DC36 -1D4F 8135DC37 -1D50 8135DC38 -1D51 8135DC39 -1D52 8135DD30 -1D53 8135DD31 -1D54 8135DD32 -1D55 8135DD33 -1D56 8135DD34 -1D57 8135DD35 -1D58 8135DD36 -1D59 8135DD37 -1D5A 8135DD38 -1D5B 8135DD39 -1D5C 8135DE30 -1D5D 8135DE31 -1D5E 8135DE32 -1D5F 8135DE33 -1D60 8135DE34 -1D61 8135DE35 -1D62 8135DE36 -1D63 8135DE37 -1D64 8135DE38 -1D65 8135DE39 -1D66 8135DF30 -1D67 8135DF31 -1D68 8135DF32 -1D69 8135DF33 -1D6A 8135DF34 -1D6B 8135DF35 -1D6C 8135DF36 -1D6D 8135DF37 -1D6E 8135DF38 -1D6F 8135DF39 -1D70 8135E030 -1D71 8135E031 -1D72 8135E032 -1D73 8135E033 -1D74 8135E034 -1D75 8135E035 -1D76 8135E036 -1D77 8135E037 -1D78 8135E038 -1D79 8135E039 -1D7A 8135E130 -1D7B 8135E131 -1D7C 8135E132 -1D7D 8135E133 -1D7E 8135E134 -1D7F 8135E135 -1D80 8135E136 -1D81 8135E137 -1D82 8135E138 -1D83 8135E139 -1D84 8135E230 -1D85 8135E231 -1D86 8135E232 -1D87 8135E233 -1D88 8135E234 -1D89 8135E235 -1D8A 8135E236 -1D8B 8135E237 -1D8C 8135E238 -1D8D 8135E239 -1D8E 8135E330 -1D8F 8135E331 -1D90 8135E332 -1D91 8135E333 -1D92 8135E334 -1D93 8135E335 -1D94 8135E336 -1D95 8135E337 -1D96 8135E338 -1D97 8135E339 -1D98 8135E430 -1D99 8135E431 -1D9A 8135E432 -1D9B 8135E433 -1D9C 8135E434 -1D9D 8135E435 -1D9E 8135E436 -1D9F 8135E437 -1DA0 8135E438 -1DA1 8135E439 -1DA2 8135E530 -1DA3 8135E531 -1DA4 8135E532 -1DA5 8135E533 -1DA6 8135E534 -1DA7 8135E535 -1DA8 8135E536 -1DA9 8135E537 -1DAA 8135E538 -1DAB 8135E539 -1DAC 8135E630 -1DAD 8135E631 -1DAE 8135E632 -1DAF 8135E633 -1DB0 8135E634 -1DB1 8135E635 -1DB2 8135E636 -1DB3 8135E637 -1DB4 8135E638 -1DB5 8135E639 -1DB6 8135E730 -1DB7 8135E731 -1DB8 8135E732 -1DB9 8135E733 -1DBA 8135E734 -1DBB 8135E735 -1DBC 8135E736 -1DBD 8135E737 -1DBE 8135E738 -1DBF 8135E739 -1DC0 8135E830 -1DC1 8135E831 -1DC2 8135E832 -1DC3 8135E833 -1DC4 8135E834 -1DC5 8135E835 -1DC6 8135E836 -1DC7 8135E837 -1DC8 8135E838 -1DC9 8135E839 -1DCA 8135E930 -1DCB 8135E931 -1DCC 8135E932 -1DCD 8135E933 -1DCE 8135E934 -1DCF 8135E935 -1DD0 8135E936 -1DD1 8135E937 -1DD2 8135E938 -1DD3 8135E939 -1DD4 8135EA30 -1DD5 8135EA31 -1DD6 8135EA32 -1DD7 8135EA33 -1DD8 8135EA34 -1DD9 8135EA35 -1DDA 8135EA36 -1DDB 8135EA37 -1DDC 8135EA38 -1DDD 8135EA39 -1DDE 8135EB30 -1DDF 8135EB31 -1DE0 8135EB32 -1DE1 8135EB33 -1DE2 8135EB34 -1DE3 8135EB35 -1DE4 8135EB36 -1DE5 8135EB37 -1DE6 8135EB38 -1DE7 8135EB39 -1DE8 8135EC30 -1DE9 8135EC31 -1DEA 8135EC32 -1DEB 8135EC33 -1DEC 8135EC34 -1DED 8135EC35 -1DEE 8135EC36 -1DEF 8135EC37 -1DF0 8135EC38 -1DF1 8135EC39 -1DF2 8135ED30 -1DF3 8135ED31 -1DF4 8135ED32 -1DF5 8135ED33 -1DF6 8135ED34 -1DF7 8135ED35 -1DF8 8135ED36 -1DF9 8135ED37 -1DFA 8135ED38 -1DFB 8135ED39 -1DFC 8135EE30 -1DFD 8135EE31 -1DFE 8135EE32 -1DFF 8135EE33 -1E00 8135EE34 -1E01 8135EE35 -1E02 8135EE36 -1E03 8135EE37 -1E04 8135EE38 -1E05 8135EE39 -1E06 8135EF30 -1E07 8135EF31 -1E08 8135EF32 -1E09 8135EF33 -1E0A 8135EF34 -1E0B 8135EF35 -1E0C 8135EF36 -1E0D 8135EF37 -1E0E 8135EF38 -1E0F 8135EF39 -1E10 8135F030 -1E11 8135F031 -1E12 8135F032 -1E13 8135F033 -1E14 8135F034 -1E15 8135F035 -1E16 8135F036 -1E17 8135F037 -1E18 8135F038 -1E19 8135F039 -1E1A 8135F130 -1E1B 8135F131 -1E1C 8135F132 -1E1D 8135F133 -1E1E 8135F134 -1E1F 8135F135 -1E20 8135F136 -1E21 8135F137 -1E22 8135F138 -1E23 8135F139 -1E24 8135F230 -1E25 8135F231 -1E26 8135F232 -1E27 8135F233 -1E28 8135F234 -1E29 8135F235 -1E2A 8135F236 -1E2B 8135F237 -1E2C 8135F238 -1E2D 8135F239 -1E2E 8135F330 -1E2F 8135F331 -1E30 8135F332 -1E31 8135F333 -1E32 8135F334 -1E33 8135F335 -1E34 8135F336 -1E35 8135F337 -1E36 8135F338 -1E37 8135F339 -1E38 8135F430 -1E39 8135F431 -1E3A 8135F432 -1E3B 8135F433 -1E3C 8135F434 -1E3D 8135F435 -1E3E 8135F436 -1E3F 8135F437 -1E40 8135F438 -1E41 8135F439 -1E42 8135F530 -1E43 8135F531 -1E44 8135F532 -1E45 8135F533 -1E46 8135F534 -1E47 8135F535 -1E48 8135F536 -1E49 8135F537 -1E4A 8135F538 -1E4B 8135F539 -1E4C 8135F630 -1E4D 8135F631 -1E4E 8135F632 -1E4F 8135F633 -1E50 8135F634 -1E51 8135F635 -1E52 8135F636 -1E53 8135F637 -1E54 8135F638 -1E55 8135F639 -1E56 8135F730 -1E57 8135F731 -1E58 8135F732 -1E59 8135F733 -1E5A 8135F734 -1E5B 8135F735 -1E5C 8135F736 -1E5D 8135F737 -1E5E 8135F738 -1E5F 8135F739 -1E60 8135F830 -1E61 8135F831 -1E62 8135F832 -1E63 8135F833 -1E64 8135F834 -1E65 8135F835 -1E66 8135F836 -1E67 8135F837 -1E68 8135F838 -1E69 8135F839 -1E6A 8135F930 -1E6B 8135F931 -1E6C 8135F932 -1E6D 8135F933 -1E6E 8135F934 -1E6F 8135F935 -1E70 8135F936 -1E71 8135F937 -1E72 8135F938 -1E73 8135F939 -1E74 8135FA30 -1E75 8135FA31 -1E76 8135FA32 -1E77 8135FA33 -1E78 8135FA34 -1E79 8135FA35 -1E7A 8135FA36 -1E7B 8135FA37 -1E7C 8135FA38 -1E7D 8135FA39 -1E7E 8135FB30 -1E7F 8135FB31 -1E80 8135FB32 -1E81 8135FB33 -1E82 8135FB34 -1E83 8135FB35 -1E84 8135FB36 -1E85 8135FB37 -1E86 8135FB38 -1E87 8135FB39 -1E88 8135FC30 -1E89 8135FC31 -1E8A 8135FC32 -1E8B 8135FC33 -1E8C 8135FC34 -1E8D 8135FC35 -1E8E 8135FC36 -1E8F 8135FC37 -1E90 8135FC38 -1E91 8135FC39 -1E92 8135FD30 -1E93 8135FD31 -1E94 8135FD32 -1E95 8135FD33 -1E96 8135FD34 -1E97 8135FD35 -1E98 8135FD36 -1E99 8135FD37 -1E9A 8135FD38 -1E9B 8135FD39 -1E9C 8135FE30 -1E9D 8135FE31 -1E9E 8135FE32 -1E9F 8135FE33 -1EA0 8135FE34 -1EA1 8135FE35 -1EA2 8135FE36 -1EA3 8135FE37 -1EA4 8135FE38 -1EA5 8135FE39 -1EA6 81368130 -1EA7 81368131 -1EA8 81368132 -1EA9 81368133 -1EAA 81368134 -1EAB 81368135 -1EAC 81368136 -1EAD 81368137 -1EAE 81368138 -1EAF 81368139 -1EB0 81368230 -1EB1 81368231 -1EB2 81368232 -1EB3 81368233 -1EB4 81368234 -1EB5 81368235 -1EB6 81368236 -1EB7 81368237 -1EB8 81368238 -1EB9 81368239 -1EBA 81368330 -1EBB 81368331 -1EBC 81368332 -1EBD 81368333 -1EBE 81368334 -1EBF 81368335 -1EC0 81368336 -1EC1 81368337 -1EC2 81368338 -1EC3 81368339 -1EC4 81368430 -1EC5 81368431 -1EC6 81368432 -1EC7 81368433 -1EC8 81368434 -1EC9 81368435 -1ECA 81368436 -1ECB 81368437 -1ECC 81368438 -1ECD 81368439 -1ECE 81368530 -1ECF 81368531 -1ED0 81368532 -1ED1 81368533 -1ED2 81368534 -1ED3 81368535 -1ED4 81368536 -1ED5 81368537 -1ED6 81368538 -1ED7 81368539 -1ED8 81368630 -1ED9 81368631 -1EDA 81368632 -1EDB 81368633 -1EDC 81368634 -1EDD 81368635 -1EDE 81368636 -1EDF 81368637 -1EE0 81368638 -1EE1 81368639 -1EE2 81368730 -1EE3 81368731 -1EE4 81368732 -1EE5 81368733 -1EE6 81368734 -1EE7 81368735 -1EE8 81368736 -1EE9 81368737 -1EEA 81368738 -1EEB 81368739 -1EEC 81368830 -1EED 81368831 -1EEE 81368832 -1EEF 81368833 -1EF0 81368834 -1EF1 81368835 -1EF2 81368836 -1EF3 81368837 -1EF4 81368838 -1EF5 81368839 -1EF6 81368930 -1EF7 81368931 -1EF8 81368932 -1EF9 81368933 -1EFA 81368934 -1EFB 81368935 -1EFC 81368936 -1EFD 81368937 -1EFE 81368938 -1EFF 81368939 -1F00 81368A30 -1F01 81368A31 -1F02 81368A32 -1F03 81368A33 -1F04 81368A34 -1F05 81368A35 -1F06 81368A36 -1F07 81368A37 -1F08 81368A38 -1F09 81368A39 -1F0A 81368B30 -1F0B 81368B31 -1F0C 81368B32 -1F0D 81368B33 -1F0E 81368B34 -1F0F 81368B35 -1F10 81368B36 -1F11 81368B37 -1F12 81368B38 -1F13 81368B39 -1F14 81368C30 -1F15 81368C31 -1F16 81368C32 -1F17 81368C33 -1F18 81368C34 -1F19 81368C35 -1F1A 81368C36 -1F1B 81368C37 -1F1C 81368C38 -1F1D 81368C39 -1F1E 81368D30 -1F1F 81368D31 -1F20 81368D32 -1F21 81368D33 -1F22 81368D34 -1F23 81368D35 -1F24 81368D36 -1F25 81368D37 -1F26 81368D38 -1F27 81368D39 -1F28 81368E30 -1F29 81368E31 -1F2A 81368E32 -1F2B 81368E33 -1F2C 81368E34 -1F2D 81368E35 -1F2E 81368E36 -1F2F 81368E37 -1F30 81368E38 -1F31 81368E39 -1F32 81368F30 -1F33 81368F31 -1F34 81368F32 -1F35 81368F33 -1F36 81368F34 -1F37 81368F35 -1F38 81368F36 -1F39 81368F37 -1F3A 81368F38 -1F3B 81368F39 -1F3C 81369030 -1F3D 81369031 -1F3E 81369032 -1F3F 81369033 -1F40 81369034 -1F41 81369035 -1F42 81369036 -1F43 81369037 -1F44 81369038 -1F45 81369039 -1F46 81369130 -1F47 81369131 -1F48 81369132 -1F49 81369133 -1F4A 81369134 -1F4B 81369135 -1F4C 81369136 -1F4D 81369137 -1F4E 81369138 -1F4F 81369139 -1F50 81369230 -1F51 81369231 -1F52 81369232 -1F53 81369233 -1F54 81369234 -1F55 81369235 -1F56 81369236 -1F57 81369237 -1F58 81369238 -1F59 81369239 -1F5A 81369330 -1F5B 81369331 -1F5C 81369332 -1F5D 81369333 -1F5E 81369334 -1F5F 81369335 -1F60 81369336 -1F61 81369337 -1F62 81369338 -1F63 81369339 -1F64 81369430 -1F65 81369431 -1F66 81369432 -1F67 81369433 -1F68 81369434 -1F69 81369435 -1F6A 81369436 -1F6B 81369437 -1F6C 81369438 -1F6D 81369439 -1F6E 81369530 -1F6F 81369531 -1F70 81369532 -1F71 81369533 -1F72 81369534 -1F73 81369535 -1F74 81369536 -1F75 81369537 -1F76 81369538 -1F77 81369539 -1F78 81369630 -1F79 81369631 -1F7A 81369632 -1F7B 81369633 -1F7C 81369634 -1F7D 81369635 -1F7E 81369636 -1F7F 81369637 -1F80 81369638 -1F81 81369639 -1F82 81369730 -1F83 81369731 -1F84 81369732 -1F85 81369733 -1F86 81369734 -1F87 81369735 -1F88 81369736 -1F89 81369737 -1F8A 81369738 -1F8B 81369739 -1F8C 81369830 -1F8D 81369831 -1F8E 81369832 -1F8F 81369833 -1F90 81369834 -1F91 81369835 -1F92 81369836 -1F93 81369837 -1F94 81369838 -1F95 81369839 -1F96 81369930 -1F97 81369931 -1F98 81369932 -1F99 81369933 -1F9A 81369934 -1F9B 81369935 -1F9C 81369936 -1F9D 81369937 -1F9E 81369938 -1F9F 81369939 -1FA0 81369A30 -1FA1 81369A31 -1FA2 81369A32 -1FA3 81369A33 -1FA4 81369A34 -1FA5 81369A35 -1FA6 81369A36 -1FA7 81369A37 -1FA8 81369A38 -1FA9 81369A39 -1FAA 81369B30 -1FAB 81369B31 -1FAC 81369B32 -1FAD 81369B33 -1FAE 81369B34 -1FAF 81369B35 -1FB0 81369B36 -1FB1 81369B37 -1FB2 81369B38 -1FB3 81369B39 -1FB4 81369C30 -1FB5 81369C31 -1FB6 81369C32 -1FB7 81369C33 -1FB8 81369C34 -1FB9 81369C35 -1FBA 81369C36 -1FBB 81369C37 -1FBC 81369C38 -1FBD 81369C39 -1FBE 81369D30 -1FBF 81369D31 -1FC0 81369D32 -1FC1 81369D33 -1FC2 81369D34 -1FC3 81369D35 -1FC4 81369D36 -1FC5 81369D37 -1FC6 81369D38 -1FC7 81369D39 -1FC8 81369E30 -1FC9 81369E31 -1FCA 81369E32 -1FCB 81369E33 -1FCC 81369E34 -1FCD 81369E35 -1FCE 81369E36 -1FCF 81369E37 -1FD0 81369E38 -1FD1 81369E39 -1FD2 81369F30 -1FD3 81369F31 -1FD4 81369F32 -1FD5 81369F33 -1FD6 81369F34 -1FD7 81369F35 -1FD8 81369F36 -1FD9 81369F37 -1FDA 81369F38 -1FDB 81369F39 -1FDC 8136A030 -1FDD 8136A031 -1FDE 8136A032 -1FDF 8136A033 -1FE0 8136A034 -1FE1 8136A035 -1FE2 8136A036 -1FE3 8136A037 -1FE4 8136A038 -1FE5 8136A039 -1FE6 8136A130 -1FE7 8136A131 -1FE8 8136A132 -1FE9 8136A133 -1FEA 8136A134 -1FEB 8136A135 -1FEC 8136A136 -1FED 8136A137 -1FEE 8136A138 -1FEF 8136A139 -1FF0 8136A230 -1FF1 8136A231 -1FF2 8136A232 -1FF3 8136A233 -1FF4 8136A234 -1FF5 8136A235 -1FF6 8136A236 -1FF7 8136A237 -1FF8 8136A238 -1FF9 8136A239 -1FFA 8136A330 -1FFB 8136A331 -1FFC 8136A332 -1FFD 8136A333 -1FFE 8136A334 -1FFF 8136A335 -2000 8136A336 -2001 8136A337 -2002 8136A338 -2003 8136A339 -2004 8136A430 -2005 8136A431 -2006 8136A432 -2007 8136A433 -2008 8136A434 -2009 8136A435 -200A 8136A436 -200B 8136A437 -200C 8136A438 -200D 8136A439 -200E 8136A530 -200F 8136A531 -2010 A95C -2011 8136A532 -2012 8136A533 -2013 A843 -2014 A1AA -2015 A844 -2016 A1AC -2017 8136A534 -2018 A1AE -2019 A1AF -201A 8136A535 -201B 8136A536 -201C A1B0 -201D A1B1 -201E 8136A537 -201F 8136A538 -2020 8136A539 -2021 8136A630 -2022 8136A631 -2023 8136A632 -2024 8136A633 -2025 A845 -2026 A1AD -2027 8136A634 -2028 8136A635 -2029 8136A636 -202A 8136A637 -202B 8136A638 -202C 8136A639 -202D 8136A730 -202E 8136A731 -202F 8136A732 -2030 A1EB -2031 8136A733 -2032 A1E4 -2033 A1E5 -2034 8136A734 -2035 A846 -2036 8136A735 -2037 8136A736 -2038 8136A737 -2039 8136A738 -203A 8136A739 -203B A1F9 -203C 8136A830 -203D 8136A831 -203E 8136A832 -203F 8136A833 -2040 8136A834 -2041 8136A835 -2042 8136A836 -2043 8136A837 -2044 8136A838 -2045 8136A839 -2046 8136A930 -2047 8136A931 -2048 8136A932 -2049 8136A933 -204A 8136A934 -204B 8136A935 -204C 8136A936 -204D 8136A937 -204E 8136A938 -204F 8136A939 -2050 8136AA30 -2051 8136AA31 -2052 8136AA32 -2053 8136AA33 -2054 8136AA34 -2055 8136AA35 -2056 8136AA36 -2057 8136AA37 -2058 8136AA38 -2059 8136AA39 -205A 8136AB30 -205B 8136AB31 -205C 8136AB32 -205D 8136AB33 -205E 8136AB34 -205F 8136AB35 -2060 8136AB36 -2061 8136AB37 -2062 8136AB38 -2063 8136AB39 -2064 8136AC30 -2065 8136AC31 -2066 8136AC32 -2067 8136AC33 -2068 8136AC34 -2069 8136AC35 -206A 8136AC36 -206B 8136AC37 -206C 8136AC38 -206D 8136AC39 -206E 8136AD30 -206F 8136AD31 -2070 8136AD32 -2071 8136AD33 -2072 8136AD34 -2073 8136AD35 -2074 8136AD36 -2075 8136AD37 -2076 8136AD38 -2077 8136AD39 -2078 8136AE30 -2079 8136AE31 -207A 8136AE32 -207B 8136AE33 -207C 8136AE34 -207D 8136AE35 -207E 8136AE36 -207F 8136AE37 -2080 8136AE38 -2081 8136AE39 -2082 8136AF30 -2083 8136AF31 -2084 8136AF32 -2085 8136AF33 -2086 8136AF34 -2087 8136AF35 -2088 8136AF36 -2089 8136AF37 -208A 8136AF38 -208B 8136AF39 -208C 8136B030 -208D 8136B031 -208E 8136B032 -208F 8136B033 -2090 8136B034 -2091 8136B035 -2092 8136B036 -2093 8136B037 -2094 8136B038 -2095 8136B039 -2096 8136B130 -2097 8136B131 -2098 8136B132 -2099 8136B133 -209A 8136B134 -209B 8136B135 -209C 8136B136 -209D 8136B137 -209E 8136B138 -209F 8136B139 -20A0 8136B230 -20A1 8136B231 -20A2 8136B232 -20A3 8136B233 -20A4 8136B234 -20A5 8136B235 -20A6 8136B236 -20A7 8136B237 -20A8 8136B238 -20A9 8136B239 -20AA 8136B330 -20AB 8136B331 -20AC A2E3 -20AD 8136B332 -20AE 8136B333 -20AF 8136B334 -20B0 8136B335 -20B1 8136B336 -20B2 8136B337 -20B3 8136B338 -20B4 8136B339 -20B5 8136B430 -20B6 8136B431 -20B7 8136B432 -20B8 8136B433 -20B9 8136B434 -20BA 8136B435 -20BB 8136B436 -20BC 8136B437 -20BD 8136B438 -20BE 8136B439 -20BF 8136B530 -20C0 8136B531 -20C1 8136B532 -20C2 8136B533 -20C3 8136B534 -20C4 8136B535 -20C5 8136B536 -20C6 8136B537 -20C7 8136B538 -20C8 8136B539 -20C9 8136B630 -20CA 8136B631 -20CB 8136B632 -20CC 8136B633 -20CD 8136B634 -20CE 8136B635 -20CF 8136B636 -20D0 8136B637 -20D1 8136B638 -20D2 8136B639 -20D3 8136B730 -20D4 8136B731 -20D5 8136B732 -20D6 8136B733 -20D7 8136B734 -20D8 8136B735 -20D9 8136B736 -20DA 8136B737 -20DB 8136B738 -20DC 8136B739 -20DD 8136B830 -20DE 8136B831 -20DF 8136B832 -20E0 8136B833 -20E1 8136B834 -20E2 8136B835 -20E3 8136B836 -20E4 8136B837 -20E5 8136B838 -20E6 8136B839 -20E7 8136B930 -20E8 8136B931 -20E9 8136B932 -20EA 8136B933 -20EB 8136B934 -20EC 8136B935 -20ED 8136B936 -20EE 8136B937 -20EF 8136B938 -20F0 8136B939 -20F1 8136BA30 -20F2 8136BA31 -20F3 8136BA32 -20F4 8136BA33 -20F5 8136BA34 -20F6 8136BA35 -20F7 8136BA36 -20F8 8136BA37 -20F9 8136BA38 -20FA 8136BA39 -20FB 8136BB30 -20FC 8136BB31 -20FD 8136BB32 -20FE 8136BB33 -20FF 8136BB34 -2100 8136BB35 -2101 8136BB36 -2102 8136BB37 -2103 A1E6 -2104 8136BB38 -2105 A847 -2106 8136BB39 -2107 8136BC30 -2108 8136BC31 -2109 A848 -210A 8136BC32 -210B 8136BC33 -210C 8136BC34 -210D 8136BC35 -210E 8136BC36 -210F 8136BC37 -2110 8136BC38 -2111 8136BC39 -2112 8136BD30 -2113 8136BD31 -2114 8136BD32 -2115 8136BD33 -2116 A1ED -2117 8136BD34 -2118 8136BD35 -2119 8136BD36 -211A 8136BD37 -211B 8136BD38 -211C 8136BD39 -211D 8136BE30 -211E 8136BE31 -211F 8136BE32 -2120 8136BE33 -2121 A959 -2122 8136BE34 -2123 8136BE35 -2124 8136BE36 -2125 8136BE37 -2126 8136BE38 -2127 8136BE39 -2128 8136BF30 -2129 8136BF31 -212A 8136BF32 -212B 8136BF33 -212C 8136BF34 -212D 8136BF35 -212E 8136BF36 -212F 8136BF37 -2130 8136BF38 -2131 8136BF39 -2132 8136C030 -2133 8136C031 -2134 8136C032 -2135 8136C033 -2136 8136C034 -2137 8136C035 -2138 8136C036 -2139 8136C037 -213A 8136C038 -213B 8136C039 -213C 8136C130 -213D 8136C131 -213E 8136C132 -213F 8136C133 -2140 8136C134 -2141 8136C135 -2142 8136C136 -2143 8136C137 -2144 8136C138 -2145 8136C139 -2146 8136C230 -2147 8136C231 -2148 8136C232 -2149 8136C233 -214A 8136C234 -214B 8136C235 -214C 8136C236 -214D 8136C237 -214E 8136C238 -214F 8136C239 -2150 8136C330 -2151 8136C331 -2152 8136C332 -2153 8136C333 -2154 8136C334 -2155 8136C335 -2156 8136C336 -2157 8136C337 -2158 8136C338 -2159 8136C339 -215A 8136C430 -215B 8136C431 -215C 8136C432 -215D 8136C433 -215E 8136C434 -215F 8136C435 -2160 A2F1 -2161 A2F2 -2162 A2F3 -2163 A2F4 -2164 A2F5 -2165 A2F6 -2166 A2F7 -2167 A2F8 -2168 A2F9 -2169 A2FA -216A A2FB -216B A2FC -216C 8136C436 -216D 8136C437 -216E 8136C438 -216F 8136C439 -2170 A2A1 -2171 A2A2 -2172 A2A3 -2173 A2A4 -2174 A2A5 -2175 A2A6 -2176 A2A7 -2177 A2A8 -2178 A2A9 -2179 A2AA -217A 8136C530 -217B 8136C531 -217C 8136C532 -217D 8136C533 -217E 8136C534 -217F 8136C535 -2180 8136C536 -2181 8136C537 -2182 8136C538 -2183 8136C539 -2184 8136C630 -2185 8136C631 -2186 8136C632 -2187 8136C633 -2188 8136C634 -2189 8136C635 -218A 8136C636 -218B 8136C637 -218C 8136C638 -218D 8136C639 -218E 8136C730 -218F 8136C731 -2190 A1FB -2191 A1FC -2192 A1FA -2193 A1FD -2194 8136C732 -2195 8136C733 -2196 A849 -2197 A84A -2198 A84B -2199 A84C -219A 8136C734 -219B 8136C735 -219C 8136C736 -219D 8136C737 -219E 8136C738 -219F 8136C739 -21A0 8136C830 -21A1 8136C831 -21A2 8136C832 -21A3 8136C833 -21A4 8136C834 -21A5 8136C835 -21A6 8136C836 -21A7 8136C837 -21A8 8136C838 -21A9 8136C839 -21AA 8136C930 -21AB 8136C931 -21AC 8136C932 -21AD 8136C933 -21AE 8136C934 -21AF 8136C935 -21B0 8136C936 -21B1 8136C937 -21B2 8136C938 -21B3 8136C939 -21B4 8136CA30 -21B5 8136CA31 -21B6 8136CA32 -21B7 8136CA33 -21B8 8136CA34 -21B9 8136CA35 -21BA 8136CA36 -21BB 8136CA37 -21BC 8136CA38 -21BD 8136CA39 -21BE 8136CB30 -21BF 8136CB31 -21C0 8136CB32 -21C1 8136CB33 -21C2 8136CB34 -21C3 8136CB35 -21C4 8136CB36 -21C5 8136CB37 -21C6 8136CB38 -21C7 8136CB39 -21C8 8136CC30 -21C9 8136CC31 -21CA 8136CC32 -21CB 8136CC33 -21CC 8136CC34 -21CD 8136CC35 -21CE 8136CC36 -21CF 8136CC37 -21D0 8136CC38 -21D1 8136CC39 -21D2 8136CD30 -21D3 8136CD31 -21D4 8136CD32 -21D5 8136CD33 -21D6 8136CD34 -21D7 8136CD35 -21D8 8136CD36 -21D9 8136CD37 -21DA 8136CD38 -21DB 8136CD39 -21DC 8136CE30 -21DD 8136CE31 -21DE 8136CE32 -21DF 8136CE33 -21E0 8136CE34 -21E1 8136CE35 -21E2 8136CE36 -21E3 8136CE37 -21E4 8136CE38 -21E5 8136CE39 -21E6 8136CF30 -21E7 8136CF31 -21E8 8136CF32 -21E9 8136CF33 -21EA 8136CF34 -21EB 8136CF35 -21EC 8136CF36 -21ED 8136CF37 -21EE 8136CF38 -21EF 8136CF39 -21F0 8136D030 -21F1 8136D031 -21F2 8136D032 -21F3 8136D033 -21F4 8136D034 -21F5 8136D035 -21F6 8136D036 -21F7 8136D037 -21F8 8136D038 -21F9 8136D039 -21FA 8136D130 -21FB 8136D131 -21FC 8136D132 -21FD 8136D133 -21FE 8136D134 -21FF 8136D135 -2200 8136D136 -2201 8136D137 -2202 8136D138 -2203 8136D139 -2204 8136D230 -2205 8136D231 -2206 8136D232 -2207 8136D233 -2208 A1CA -2209 8136D234 -220A 8136D235 -220B 8136D236 -220C 8136D237 -220D 8136D238 -220E 8136D239 -220F A1C7 -2210 8136D330 -2211 A1C6 -2212 8136D331 -2213 8136D332 -2214 8136D333 -2215 A84D -2216 8136D334 -2217 8136D335 -2218 8136D336 -2219 8136D337 -221A A1CC -221B 8136D338 -221C 8136D339 -221D A1D8 -221E A1DE -221F A84E -2220 A1CF -2221 8136D430 -2222 8136D431 -2223 A84F -2224 8136D432 -2225 A1CE -2226 8136D433 -2227 A1C4 -2228 A1C5 -2229 A1C9 -222A A1C8 -222B A1D2 -222C 8136D434 -222D 8136D435 -222E A1D3 -222F 8136D436 -2230 8136D437 -2231 8136D438 -2232 8136D439 -2233 8136D530 -2234 A1E0 -2235 A1DF -2236 A1C3 -2237 A1CB -2238 8136D531 -2239 8136D532 -223A 8136D533 -223B 8136D534 -223C 8136D535 -223D A1D7 -223E 8136D536 -223F 8136D537 -2240 8136D538 -2241 8136D539 -2242 8136D630 -2243 8136D631 -2244 8136D632 -2245 8136D633 -2246 8136D634 -2247 8136D635 -2248 A1D6 -2249 8136D636 -224A 8136D637 -224B 8136D638 -224C A1D5 -224D 8136D639 -224E 8136D730 -224F 8136D731 -2250 8136D732 -2251 8136D733 -2252 A850 -2253 8136D734 -2254 8136D735 -2255 8136D736 -2256 8136D737 -2257 8136D738 -2258 8136D739 -2259 8136D830 -225A 8136D831 -225B 8136D832 -225C 8136D833 -225D 8136D834 -225E 8136D835 -225F 8136D836 -2260 A1D9 -2261 A1D4 -2262 8136D837 -2263 8136D838 -2264 A1DC -2265 A1DD -2266 A851 -2267 A852 -2268 8136D839 -2269 8136D930 -226A 8136D931 -226B 8136D932 -226C 8136D933 -226D 8136D934 -226E A1DA -226F A1DB -2270 8136D935 -2271 8136D936 -2272 8136D937 -2273 8136D938 -2274 8136D939 -2275 8136DA30 -2276 8136DA31 -2277 8136DA32 -2278 8136DA33 -2279 8136DA34 -227A 8136DA35 -227B 8136DA36 -227C 8136DA37 -227D 8136DA38 -227E 8136DA39 -227F 8136DB30 -2280 8136DB31 -2281 8136DB32 -2282 8136DB33 -2283 8136DB34 -2284 8136DB35 -2285 8136DB36 -2286 8136DB37 -2287 8136DB38 -2288 8136DB39 -2289 8136DC30 -228A 8136DC31 -228B 8136DC32 -228C 8136DC33 -228D 8136DC34 -228E 8136DC35 -228F 8136DC36 -2290 8136DC37 -2291 8136DC38 -2292 8136DC39 -2293 8136DD30 -2294 8136DD31 -2295 A892 -2296 8136DD32 -2297 8136DD33 -2298 8136DD34 -2299 A1D1 -229A 8136DD35 -229B 8136DD36 -229C 8136DD37 -229D 8136DD38 -229E 8136DD39 -229F 8136DE30 -22A0 8136DE31 -22A1 8136DE32 -22A2 8136DE33 -22A3 8136DE34 -22A4 8136DE35 -22A5 A1CD -22A6 8136DE36 -22A7 8136DE37 -22A8 8136DE38 -22A9 8136DE39 -22AA 8136DF30 -22AB 8136DF31 -22AC 8136DF32 -22AD 8136DF33 -22AE 8136DF34 -22AF 8136DF35 -22B0 8136DF36 -22B1 8136DF37 -22B2 8136DF38 -22B3 8136DF39 -22B4 8136E030 -22B5 8136E031 -22B6 8136E032 -22B7 8136E033 -22B8 8136E034 -22B9 8136E035 -22BA 8136E036 -22BB 8136E037 -22BC 8136E038 -22BD 8136E039 -22BE 8136E130 -22BF A853 -22C0 8136E131 -22C1 8136E132 -22C2 8136E133 -22C3 8136E134 -22C4 8136E135 -22C5 8136E136 -22C6 8136E137 -22C7 8136E138 -22C8 8136E139 -22C9 8136E230 -22CA 8136E231 -22CB 8136E232 -22CC 8136E233 -22CD 8136E234 -22CE 8136E235 -22CF 8136E236 -22D0 8136E237 -22D1 8136E238 -22D2 8136E239 -22D3 8136E330 -22D4 8136E331 -22D5 8136E332 -22D6 8136E333 -22D7 8136E334 -22D8 8136E335 -22D9 8136E336 -22DA 8136E337 -22DB 8136E338 -22DC 8136E339 -22DD 8136E430 -22DE 8136E431 -22DF 8136E432 -22E0 8136E433 -22E1 8136E434 -22E2 8136E435 -22E3 8136E436 -22E4 8136E437 -22E5 8136E438 -22E6 8136E439 -22E7 8136E530 -22E8 8136E531 -22E9 8136E532 -22EA 8136E533 -22EB 8136E534 -22EC 8136E535 -22ED 8136E536 -22EE 8136E537 -22EF 8136E538 -22F0 8136E539 -22F1 8136E630 -22F2 8136E631 -22F3 8136E632 -22F4 8136E633 -22F5 8136E634 -22F6 8136E635 -22F7 8136E636 -22F8 8136E637 -22F9 8136E638 -22FA 8136E639 -22FB 8136E730 -22FC 8136E731 -22FD 8136E732 -22FE 8136E733 -22FF 8136E734 -2300 8136E735 -2301 8136E736 -2302 8136E737 -2303 8136E738 -2304 8136E739 -2305 8136E830 -2306 8136E831 -2307 8136E832 -2308 8136E833 -2309 8136E834 -230A 8136E835 -230B 8136E836 -230C 8136E837 -230D 8136E838 -230E 8136E839 -230F 8136E930 -2310 8136E931 -2311 8136E932 -2312 A1D0 -2313 8136E933 -2314 8136E934 -2315 8136E935 -2316 8136E936 -2317 8136E937 -2318 8136E938 -2319 8136E939 -231A 8136EA30 -231B 8136EA31 -231C 8136EA32 -231D 8136EA33 -231E 8136EA34 -231F 8136EA35 -2320 8136EA36 -2321 8136EA37 -2322 8136EA38 -2323 8136EA39 -2324 8136EB30 -2325 8136EB31 -2326 8136EB32 -2327 8136EB33 -2328 8136EB34 -2329 8136EB35 -232A 8136EB36 -232B 8136EB37 -232C 8136EB38 -232D 8136EB39 -232E 8136EC30 -232F 8136EC31 -2330 8136EC32 -2331 8136EC33 -2332 8136EC34 -2333 8136EC35 -2334 8136EC36 -2335 8136EC37 -2336 8136EC38 -2337 8136EC39 -2338 8136ED30 -2339 8136ED31 -233A 8136ED32 -233B 8136ED33 -233C 8136ED34 -233D 8136ED35 -233E 8136ED36 -233F 8136ED37 -2340 8136ED38 -2341 8136ED39 -2342 8136EE30 -2343 8136EE31 -2344 8136EE32 -2345 8136EE33 -2346 8136EE34 -2347 8136EE35 -2348 8136EE36 -2349 8136EE37 -234A 8136EE38 -234B 8136EE39 -234C 8136EF30 -234D 8136EF31 -234E 8136EF32 -234F 8136EF33 -2350 8136EF34 -2351 8136EF35 -2352 8136EF36 -2353 8136EF37 -2354 8136EF38 -2355 8136EF39 -2356 8136F030 -2357 8136F031 -2358 8136F032 -2359 8136F033 -235A 8136F034 -235B 8136F035 -235C 8136F036 -235D 8136F037 -235E 8136F038 -235F 8136F039 -2360 8136F130 -2361 8136F131 -2362 8136F132 -2363 8136F133 -2364 8136F134 -2365 8136F135 -2366 8136F136 -2367 8136F137 -2368 8136F138 -2369 8136F139 -236A 8136F230 -236B 8136F231 -236C 8136F232 -236D 8136F233 -236E 8136F234 -236F 8136F235 -2370 8136F236 -2371 8136F237 -2372 8136F238 -2373 8136F239 -2374 8136F330 -2375 8136F331 -2376 8136F332 -2377 8136F333 -2378 8136F334 -2379 8136F335 -237A 8136F336 -237B 8136F337 -237C 8136F338 -237D 8136F339 -237E 8136F430 -237F 8136F431 -2380 8136F432 -2381 8136F433 -2382 8136F434 -2383 8136F435 -2384 8136F436 -2385 8136F437 -2386 8136F438 -2387 8136F439 -2388 8136F530 -2389 8136F531 -238A 8136F532 -238B 8136F533 -238C 8136F534 -238D 8136F535 -238E 8136F536 -238F 8136F537 -2390 8136F538 -2391 8136F539 -2392 8136F630 -2393 8136F631 -2394 8136F632 -2395 8136F633 -2396 8136F634 -2397 8136F635 -2398 8136F636 -2399 8136F637 -239A 8136F638 -239B 8136F639 -239C 8136F730 -239D 8136F731 -239E 8136F732 -239F 8136F733 -23A0 8136F734 -23A1 8136F735 -23A2 8136F736 -23A3 8136F737 -23A4 8136F738 -23A5 8136F739 -23A6 8136F830 -23A7 8136F831 -23A8 8136F832 -23A9 8136F833 -23AA 8136F834 -23AB 8136F835 -23AC 8136F836 -23AD 8136F837 -23AE 8136F838 -23AF 8136F839 -23B0 8136F930 -23B1 8136F931 -23B2 8136F932 -23B3 8136F933 -23B4 8136F934 -23B5 8136F935 -23B6 8136F936 -23B7 8136F937 -23B8 8136F938 -23B9 8136F939 -23BA 8136FA30 -23BB 8136FA31 -23BC 8136FA32 -23BD 8136FA33 -23BE 8136FA34 -23BF 8136FA35 -23C0 8136FA36 -23C1 8136FA37 -23C2 8136FA38 -23C3 8136FA39 -23C4 8136FB30 -23C5 8136FB31 -23C6 8136FB32 -23C7 8136FB33 -23C8 8136FB34 -23C9 8136FB35 -23CA 8136FB36 -23CB 8136FB37 -23CC 8136FB38 -23CD 8136FB39 -23CE 8136FC30 -23CF 8136FC31 -23D0 8136FC32 -23D1 8136FC33 -23D2 8136FC34 -23D3 8136FC35 -23D4 8136FC36 -23D5 8136FC37 -23D6 8136FC38 -23D7 8136FC39 -23D8 8136FD30 -23D9 8136FD31 -23DA 8136FD32 -23DB 8136FD33 -23DC 8136FD34 -23DD 8136FD35 -23DE 8136FD36 -23DF 8136FD37 -23E0 8136FD38 -23E1 8136FD39 -23E2 8136FE30 -23E3 8136FE31 -23E4 8136FE32 -23E5 8136FE33 -23E6 8136FE34 -23E7 8136FE35 -23E8 8136FE36 -23E9 8136FE37 -23EA 8136FE38 -23EB 8136FE39 -23EC 81378130 -23ED 81378131 -23EE 81378132 -23EF 81378133 -23F0 81378134 -23F1 81378135 -23F2 81378136 -23F3 81378137 -23F4 81378138 -23F5 81378139 -23F6 81378230 -23F7 81378231 -23F8 81378232 -23F9 81378233 -23FA 81378234 -23FB 81378235 -23FC 81378236 -23FD 81378237 -23FE 81378238 -23FF 81378239 -2400 81378330 -2401 81378331 -2402 81378332 -2403 81378333 -2404 81378334 -2405 81378335 -2406 81378336 -2407 81378337 -2408 81378338 -2409 81378339 -240A 81378430 -240B 81378431 -240C 81378432 -240D 81378433 -240E 81378434 -240F 81378435 -2410 81378436 -2411 81378437 -2412 81378438 -2413 81378439 -2414 81378530 -2415 81378531 -2416 81378532 -2417 81378533 -2418 81378534 -2419 81378535 -241A 81378536 -241B 81378537 -241C 81378538 -241D 81378539 -241E 81378630 -241F 81378631 -2420 81378632 -2421 81378633 -2422 81378634 -2423 81378635 -2424 81378636 -2425 81378637 -2426 81378638 -2427 81378639 -2428 81378730 -2429 81378731 -242A 81378732 -242B 81378733 -242C 81378734 -242D 81378735 -242E 81378736 -242F 81378737 -2430 81378738 -2431 81378739 -2432 81378830 -2433 81378831 -2434 81378832 -2435 81378833 -2436 81378834 -2437 81378835 -2438 81378836 -2439 81378837 -243A 81378838 -243B 81378839 -243C 81378930 -243D 81378931 -243E 81378932 -243F 81378933 -2440 81378934 -2441 81378935 -2442 81378936 -2443 81378937 -2444 81378938 -2445 81378939 -2446 81378A30 -2447 81378A31 -2448 81378A32 -2449 81378A33 -244A 81378A34 -244B 81378A35 -244C 81378A36 -244D 81378A37 -244E 81378A38 -244F 81378A39 -2450 81378B30 -2451 81378B31 -2452 81378B32 -2453 81378B33 -2454 81378B34 -2455 81378B35 -2456 81378B36 -2457 81378B37 -2458 81378B38 -2459 81378B39 -245A 81378C30 -245B 81378C31 -245C 81378C32 -245D 81378C33 -245E 81378C34 -245F 81378C35 -2460 A2D9 -2461 A2DA -2462 A2DB -2463 A2DC -2464 A2DD -2465 A2DE -2466 A2DF -2467 A2E0 -2468 A2E1 -2469 A2E2 -246A 81378C36 -246B 81378C37 -246C 81378C38 -246D 81378C39 -246E 81378D30 -246F 81378D31 -2470 81378D32 -2471 81378D33 -2472 81378D34 -2473 81378D35 -2474 A2C5 -2475 A2C6 -2476 A2C7 -2477 A2C8 -2478 A2C9 -2479 A2CA -247A A2CB -247B A2CC -247C A2CD -247D A2CE -247E A2CF -247F A2D0 -2480 A2D1 -2481 A2D2 -2482 A2D3 -2483 A2D4 -2484 A2D5 -2485 A2D6 -2486 A2D7 -2487 A2D8 -2488 A2B1 -2489 A2B2 -248A A2B3 -248B A2B4 -248C A2B5 -248D A2B6 -248E A2B7 -248F A2B8 -2490 A2B9 -2491 A2BA -2492 A2BB -2493 A2BC -2494 A2BD -2495 A2BE -2496 A2BF -2497 A2C0 -2498 A2C1 -2499 A2C2 -249A A2C3 -249B A2C4 -249C 81378D36 -249D 81378D37 -249E 81378D38 -249F 81378D39 -24A0 81378E30 -24A1 81378E31 -24A2 81378E32 -24A3 81378E33 -24A4 81378E34 -24A5 81378E35 -24A6 81378E36 -24A7 81378E37 -24A8 81378E38 -24A9 81378E39 -24AA 81378F30 -24AB 81378F31 -24AC 81378F32 -24AD 81378F33 -24AE 81378F34 -24AF 81378F35 -24B0 81378F36 -24B1 81378F37 -24B2 81378F38 -24B3 81378F39 -24B4 81379030 -24B5 81379031 -24B6 81379032 -24B7 81379033 -24B8 81379034 -24B9 81379035 -24BA 81379036 -24BB 81379037 -24BC 81379038 -24BD 81379039 -24BE 81379130 -24BF 81379131 -24C0 81379132 -24C1 81379133 -24C2 81379134 -24C3 81379135 -24C4 81379136 -24C5 81379137 -24C6 81379138 -24C7 81379139 -24C8 81379230 -24C9 81379231 -24CA 81379232 -24CB 81379233 -24CC 81379234 -24CD 81379235 -24CE 81379236 -24CF 81379237 -24D0 81379238 -24D1 81379239 -24D2 81379330 -24D3 81379331 -24D4 81379332 -24D5 81379333 -24D6 81379334 -24D7 81379335 -24D8 81379336 -24D9 81379337 -24DA 81379338 -24DB 81379339 -24DC 81379430 -24DD 81379431 -24DE 81379432 -24DF 81379433 -24E0 81379434 -24E1 81379435 -24E2 81379436 -24E3 81379437 -24E4 81379438 -24E5 81379439 -24E6 81379530 -24E7 81379531 -24E8 81379532 -24E9 81379533 -24EA 81379534 -24EB 81379535 -24EC 81379536 -24ED 81379537 -24EE 81379538 -24EF 81379539 -24F0 81379630 -24F1 81379631 -24F2 81379632 -24F3 81379633 -24F4 81379634 -24F5 81379635 -24F6 81379636 -24F7 81379637 -24F8 81379638 -24F9 81379639 -24FA 81379730 -24FB 81379731 -24FC 81379732 -24FD 81379733 -24FE 81379734 -24FF 81379735 -2500 A9A4 -2501 A9A5 -2502 A9A6 -2503 A9A7 -2504 A9A8 -2505 A9A9 -2506 A9AA -2507 A9AB -2508 A9AC -2509 A9AD -250A A9AE -250B A9AF -250C A9B0 -250D A9B1 -250E A9B2 -250F A9B3 -2510 A9B4 -2511 A9B5 -2512 A9B6 -2513 A9B7 -2514 A9B8 -2515 A9B9 -2516 A9BA -2517 A9BB -2518 A9BC -2519 A9BD -251A A9BE -251B A9BF -251C A9C0 -251D A9C1 -251E A9C2 -251F A9C3 -2520 A9C4 -2521 A9C5 -2522 A9C6 -2523 A9C7 -2524 A9C8 -2525 A9C9 -2526 A9CA -2527 A9CB -2528 A9CC -2529 A9CD -252A A9CE -252B A9CF -252C A9D0 -252D A9D1 -252E A9D2 -252F A9D3 -2530 A9D4 -2531 A9D5 -2532 A9D6 -2533 A9D7 -2534 A9D8 -2535 A9D9 -2536 A9DA -2537 A9DB -2538 A9DC -2539 A9DD -253A A9DE -253B A9DF -253C A9E0 -253D A9E1 -253E A9E2 -253F A9E3 -2540 A9E4 -2541 A9E5 -2542 A9E6 -2543 A9E7 -2544 A9E8 -2545 A9E9 -2546 A9EA -2547 A9EB -2548 A9EC -2549 A9ED -254A A9EE -254B A9EF -254C 81379736 -254D 81379737 -254E 81379738 -254F 81379739 -2550 A854 -2551 A855 -2552 A856 -2553 A857 -2554 A858 -2555 A859 -2556 A85A -2557 A85B -2558 A85C -2559 A85D -255A A85E -255B A85F -255C A860 -255D A861 -255E A862 -255F A863 -2560 A864 -2561 A865 -2562 A866 -2563 A867 -2564 A868 -2565 A869 -2566 A86A -2567 A86B -2568 A86C -2569 A86D -256A A86E -256B A86F -256C A870 -256D A871 -256E A872 -256F A873 -2570 A874 -2571 A875 -2572 A876 -2573 A877 -2574 81379830 -2575 81379831 -2576 81379832 -2577 81379833 -2578 81379834 -2579 81379835 -257A 81379836 -257B 81379837 -257C 81379838 -257D 81379839 -257E 81379930 -257F 81379931 -2580 81379932 -2581 A878 -2582 A879 -2583 A87A -2584 A87B -2585 A87C -2586 A87D -2587 A87E -2588 A880 -2589 A881 -258A A882 -258B A883 -258C A884 -258D A885 -258E A886 -258F A887 -2590 81379933 -2591 81379934 -2592 81379935 -2593 A888 -2594 A889 -2595 A88A -2596 81379936 -2597 81379937 -2598 81379938 -2599 81379939 -259A 81379A30 -259B 81379A31 -259C 81379A32 -259D 81379A33 -259E 81379A34 -259F 81379A35 -25A0 A1F6 -25A1 A1F5 -25A2 81379A36 -25A3 81379A37 -25A4 81379A38 -25A5 81379A39 -25A6 81379B30 -25A7 81379B31 -25A8 81379B32 -25A9 81379B33 -25AA 81379B34 -25AB 81379B35 -25AC 81379B36 -25AD 81379B37 -25AE 81379B38 -25AF 81379B39 -25B0 81379C30 -25B1 81379C31 -25B2 A1F8 -25B3 A1F7 -25B4 81379C32 -25B5 81379C33 -25B6 81379C34 -25B7 81379C35 -25B8 81379C36 -25B9 81379C37 -25BA 81379C38 -25BB 81379C39 -25BC A88B -25BD A88C -25BE 81379D30 -25BF 81379D31 -25C0 81379D32 -25C1 81379D33 -25C2 81379D34 -25C3 81379D35 -25C4 81379D36 -25C5 81379D37 -25C6 A1F4 -25C7 A1F3 -25C8 81379D38 -25C9 81379D39 -25CA 81379E30 -25CB A1F0 -25CC 81379E31 -25CD 81379E32 -25CE A1F2 -25CF A1F1 -25D0 81379E33 -25D1 81379E34 -25D2 81379E35 -25D3 81379E36 -25D4 81379E37 -25D5 81379E38 -25D6 81379E39 -25D7 81379F30 -25D8 81379F31 -25D9 81379F32 -25DA 81379F33 -25DB 81379F34 -25DC 81379F35 -25DD 81379F36 -25DE 81379F37 -25DF 81379F38 -25E0 81379F39 -25E1 8137A030 -25E2 A88D -25E3 A88E -25E4 A88F -25E5 A890 -25E6 8137A031 -25E7 8137A032 -25E8 8137A033 -25E9 8137A034 -25EA 8137A035 -25EB 8137A036 -25EC 8137A037 -25ED 8137A038 -25EE 8137A039 -25EF 8137A130 -25F0 8137A131 -25F1 8137A132 -25F2 8137A133 -25F3 8137A134 -25F4 8137A135 -25F5 8137A136 -25F6 8137A137 -25F7 8137A138 -25F8 8137A139 -25F9 8137A230 -25FA 8137A231 -25FB 8137A232 -25FC 8137A233 -25FD 8137A234 -25FE 8137A235 -25FF 8137A236 -2600 8137A237 -2601 8137A238 -2602 8137A239 -2603 8137A330 -2604 8137A331 -2605 A1EF -2606 A1EE -2607 8137A332 -2608 8137A333 -2609 A891 -260A 8137A334 -260B 8137A335 -260C 8137A336 -260D 8137A337 -260E 8137A338 -260F 8137A339 -2610 8137A430 -2611 8137A431 -2612 8137A432 -2613 8137A433 -2614 8137A434 -2615 8137A435 -2616 8137A436 -2617 8137A437 -2618 8137A438 -2619 8137A439 -261A 8137A530 -261B 8137A531 -261C 8137A532 -261D 8137A533 -261E 8137A534 -261F 8137A535 -2620 8137A536 -2621 8137A537 -2622 8137A538 -2623 8137A539 -2624 8137A630 -2625 8137A631 -2626 8137A632 -2627 8137A633 -2628 8137A634 -2629 8137A635 -262A 8137A636 -262B 8137A637 -262C 8137A638 -262D 8137A639 -262E 8137A730 -262F 8137A731 -2630 8137A732 -2631 8137A733 -2632 8137A734 -2633 8137A735 -2634 8137A736 -2635 8137A737 -2636 8137A738 -2637 8137A739 -2638 8137A830 -2639 8137A831 -263A 8137A832 -263B 8137A833 -263C 8137A834 -263D 8137A835 -263E 8137A836 -263F 8137A837 -2640 A1E2 -2641 8137A838 -2642 A1E1 -2643 8137A839 -2644 8137A930 -2645 8137A931 -2646 8137A932 -2647 8137A933 -2648 8137A934 -2649 8137A935 -264A 8137A936 -264B 8137A937 -264C 8137A938 -264D 8137A939 -264E 8137AA30 -264F 8137AA31 -2650 8137AA32 -2651 8137AA33 -2652 8137AA34 -2653 8137AA35 -2654 8137AA36 -2655 8137AA37 -2656 8137AA38 -2657 8137AA39 -2658 8137AB30 -2659 8137AB31 -265A 8137AB32 -265B 8137AB33 -265C 8137AB34 -265D 8137AB35 -265E 8137AB36 -265F 8137AB37 -2660 8137AB38 -2661 8137AB39 -2662 8137AC30 -2663 8137AC31 -2664 8137AC32 -2665 8137AC33 -2666 8137AC34 -2667 8137AC35 -2668 8137AC36 -2669 8137AC37 -266A 8137AC38 -266B 8137AC39 -266C 8137AD30 -266D 8137AD31 -266E 8137AD32 -266F 8137AD33 -2670 8137AD34 -2671 8137AD35 -2672 8137AD36 -2673 8137AD37 -2674 8137AD38 -2675 8137AD39 -2676 8137AE30 -2677 8137AE31 -2678 8137AE32 -2679 8137AE33 -267A 8137AE34 -267B 8137AE35 -267C 8137AE36 -267D 8137AE37 -267E 8137AE38 -267F 8137AE39 -2680 8137AF30 -2681 8137AF31 -2682 8137AF32 -2683 8137AF33 -2684 8137AF34 -2685 8137AF35 -2686 8137AF36 -2687 8137AF37 -2688 8137AF38 -2689 8137AF39 -268A 8137B030 -268B 8137B031 -268C 8137B032 -268D 8137B033 -268E 8137B034 -268F 8137B035 -2690 8137B036 -2691 8137B037 -2692 8137B038 -2693 8137B039 -2694 8137B130 -2695 8137B131 -2696 8137B132 -2697 8137B133 -2698 8137B134 -2699 8137B135 -269A 8137B136 -269B 8137B137 -269C 8137B138 -269D 8137B139 -269E 8137B230 -269F 8137B231 -26A0 8137B232 -26A1 8137B233 -26A2 8137B234 -26A3 8137B235 -26A4 8137B236 -26A5 8137B237 -26A6 8137B238 -26A7 8137B239 -26A8 8137B330 -26A9 8137B331 -26AA 8137B332 -26AB 8137B333 -26AC 8137B334 -26AD 8137B335 -26AE 8137B336 -26AF 8137B337 -26B0 8137B338 -26B1 8137B339 -26B2 8137B430 -26B3 8137B431 -26B4 8137B432 -26B5 8137B433 -26B6 8137B434 -26B7 8137B435 -26B8 8137B436 -26B9 8137B437 -26BA 8137B438 -26BB 8137B439 -26BC 8137B530 -26BD 8137B531 -26BE 8137B532 -26BF 8137B533 -26C0 8137B534 -26C1 8137B535 -26C2 8137B536 -26C3 8137B537 -26C4 8137B538 -26C5 8137B539 -26C6 8137B630 -26C7 8137B631 -26C8 8137B632 -26C9 8137B633 -26CA 8137B634 -26CB 8137B635 -26CC 8137B636 -26CD 8137B637 -26CE 8137B638 -26CF 8137B639 -26D0 8137B730 -26D1 8137B731 -26D2 8137B732 -26D3 8137B733 -26D4 8137B734 -26D5 8137B735 -26D6 8137B736 -26D7 8137B737 -26D8 8137B738 -26D9 8137B739 -26DA 8137B830 -26DB 8137B831 -26DC 8137B832 -26DD 8137B833 -26DE 8137B834 -26DF 8137B835 -26E0 8137B836 -26E1 8137B837 -26E2 8137B838 -26E3 8137B839 -26E4 8137B930 -26E5 8137B931 -26E6 8137B932 -26E7 8137B933 -26E8 8137B934 -26E9 8137B935 -26EA 8137B936 -26EB 8137B937 -26EC 8137B938 -26ED 8137B939 -26EE 8137BA30 -26EF 8137BA31 -26F0 8137BA32 -26F1 8137BA33 -26F2 8137BA34 -26F3 8137BA35 -26F4 8137BA36 -26F5 8137BA37 -26F6 8137BA38 -26F7 8137BA39 -26F8 8137BB30 -26F9 8137BB31 -26FA 8137BB32 -26FB 8137BB33 -26FC 8137BB34 -26FD 8137BB35 -26FE 8137BB36 -26FF 8137BB37 -2700 8137BB38 -2701 8137BB39 -2702 8137BC30 -2703 8137BC31 -2704 8137BC32 -2705 8137BC33 -2706 8137BC34 -2707 8137BC35 -2708 8137BC36 -2709 8137BC37 -270A 8137BC38 -270B 8137BC39 -270C 8137BD30 -270D 8137BD31 -270E 8137BD32 -270F 8137BD33 -2710 8137BD34 -2711 8137BD35 -2712 8137BD36 -2713 8137BD37 -2714 8137BD38 -2715 8137BD39 -2716 8137BE30 -2717 8137BE31 -2718 8137BE32 -2719 8137BE33 -271A 8137BE34 -271B 8137BE35 -271C 8137BE36 -271D 8137BE37 -271E 8137BE38 -271F 8137BE39 -2720 8137BF30 -2721 8137BF31 -2722 8137BF32 -2723 8137BF33 -2724 8137BF34 -2725 8137BF35 -2726 8137BF36 -2727 8137BF37 -2728 8137BF38 -2729 8137BF39 -272A 8137C030 -272B 8137C031 -272C 8137C032 -272D 8137C033 -272E 8137C034 -272F 8137C035 -2730 8137C036 -2731 8137C037 -2732 8137C038 -2733 8137C039 -2734 8137C130 -2735 8137C131 -2736 8137C132 -2737 8137C133 -2738 8137C134 -2739 8137C135 -273A 8137C136 -273B 8137C137 -273C 8137C138 -273D 8137C139 -273E 8137C230 -273F 8137C231 -2740 8137C232 -2741 8137C233 -2742 8137C234 -2743 8137C235 -2744 8137C236 -2745 8137C237 -2746 8137C238 -2747 8137C239 -2748 8137C330 -2749 8137C331 -274A 8137C332 -274B 8137C333 -274C 8137C334 -274D 8137C335 -274E 8137C336 -274F 8137C337 -2750 8137C338 -2751 8137C339 -2752 8137C430 -2753 8137C431 -2754 8137C432 -2755 8137C433 -2756 8137C434 -2757 8137C435 -2758 8137C436 -2759 8137C437 -275A 8137C438 -275B 8137C439 -275C 8137C530 -275D 8137C531 -275E 8137C532 -275F 8137C533 -2760 8137C534 -2761 8137C535 -2762 8137C536 -2763 8137C537 -2764 8137C538 -2765 8137C539 -2766 8137C630 -2767 8137C631 -2768 8137C632 -2769 8137C633 -276A 8137C634 -276B 8137C635 -276C 8137C636 -276D 8137C637 -276E 8137C638 -276F 8137C639 -2770 8137C730 -2771 8137C731 -2772 8137C732 -2773 8137C733 -2774 8137C734 -2775 8137C735 -2776 8137C736 -2777 8137C737 -2778 8137C738 -2779 8137C739 -277A 8137C830 -277B 8137C831 -277C 8137C832 -277D 8137C833 -277E 8137C834 -277F 8137C835 -2780 8137C836 -2781 8137C837 -2782 8137C838 -2783 8137C839 -2784 8137C930 -2785 8137C931 -2786 8137C932 -2787 8137C933 -2788 8137C934 -2789 8137C935 -278A 8137C936 -278B 8137C937 -278C 8137C938 -278D 8137C939 -278E 8137CA30 -278F 8137CA31 -2790 8137CA32 -2791 8137CA33 -2792 8137CA34 -2793 8137CA35 -2794 8137CA36 -2795 8137CA37 -2796 8137CA38 -2797 8137CA39 -2798 8137CB30 -2799 8137CB31 -279A 8137CB32 -279B 8137CB33 -279C 8137CB34 -279D 8137CB35 -279E 8137CB36 -279F 8137CB37 -27A0 8137CB38 -27A1 8137CB39 -27A2 8137CC30 -27A3 8137CC31 -27A4 8137CC32 -27A5 8137CC33 -27A6 8137CC34 -27A7 8137CC35 -27A8 8137CC36 -27A9 8137CC37 -27AA 8137CC38 -27AB 8137CC39 -27AC 8137CD30 -27AD 8137CD31 -27AE 8137CD32 -27AF 8137CD33 -27B0 8137CD34 -27B1 8137CD35 -27B2 8137CD36 -27B3 8137CD37 -27B4 8137CD38 -27B5 8137CD39 -27B6 8137CE30 -27B7 8137CE31 -27B8 8137CE32 -27B9 8137CE33 -27BA 8137CE34 -27BB 8137CE35 -27BC 8137CE36 -27BD 8137CE37 -27BE 8137CE38 -27BF 8137CE39 -27C0 8137CF30 -27C1 8137CF31 -27C2 8137CF32 -27C3 8137CF33 -27C4 8137CF34 -27C5 8137CF35 -27C6 8137CF36 -27C7 8137CF37 -27C8 8137CF38 -27C9 8137CF39 -27CA 8137D030 -27CB 8137D031 -27CC 8137D032 -27CD 8137D033 -27CE 8137D034 -27CF 8137D035 -27D0 8137D036 -27D1 8137D037 -27D2 8137D038 -27D3 8137D039 -27D4 8137D130 -27D5 8137D131 -27D6 8137D132 -27D7 8137D133 -27D8 8137D134 -27D9 8137D135 -27DA 8137D136 -27DB 8137D137 -27DC 8137D138 -27DD 8137D139 -27DE 8137D230 -27DF 8137D231 -27E0 8137D232 -27E1 8137D233 -27E2 8137D234 -27E3 8137D235 -27E4 8137D236 -27E5 8137D237 -27E6 8137D238 -27E7 8137D239 -27E8 8137D330 -27E9 8137D331 -27EA 8137D332 -27EB 8137D333 -27EC 8137D334 -27ED 8137D335 -27EE 8137D336 -27EF 8137D337 -27F0 8137D338 -27F1 8137D339 -27F2 8137D430 -27F3 8137D431 -27F4 8137D432 -27F5 8137D433 -27F6 8137D434 -27F7 8137D435 -27F8 8137D436 -27F9 8137D437 -27FA 8137D438 -27FB 8137D439 -27FC 8137D530 -27FD 8137D531 -27FE 8137D532 -27FF 8137D533 -2800 8137D534 -2801 8137D535 -2802 8137D536 -2803 8137D537 -2804 8137D538 -2805 8137D539 -2806 8137D630 -2807 8137D631 -2808 8137D632 -2809 8137D633 -280A 8137D634 -280B 8137D635 -280C 8137D636 -280D 8137D637 -280E 8137D638 -280F 8137D639 -2810 8137D730 -2811 8137D731 -2812 8137D732 -2813 8137D733 -2814 8137D734 -2815 8137D735 -2816 8137D736 -2817 8137D737 -2818 8137D738 -2819 8137D739 -281A 8137D830 -281B 8137D831 -281C 8137D832 -281D 8137D833 -281E 8137D834 -281F 8137D835 -2820 8137D836 -2821 8137D837 -2822 8137D838 -2823 8137D839 -2824 8137D930 -2825 8137D931 -2826 8137D932 -2827 8137D933 -2828 8137D934 -2829 8137D935 -282A 8137D936 -282B 8137D937 -282C 8137D938 -282D 8137D939 -282E 8137DA30 -282F 8137DA31 -2830 8137DA32 -2831 8137DA33 -2832 8137DA34 -2833 8137DA35 -2834 8137DA36 -2835 8137DA37 -2836 8137DA38 -2837 8137DA39 -2838 8137DB30 -2839 8137DB31 -283A 8137DB32 -283B 8137DB33 -283C 8137DB34 -283D 8137DB35 -283E 8137DB36 -283F 8137DB37 -2840 8137DB38 -2841 8137DB39 -2842 8137DC30 -2843 8137DC31 -2844 8137DC32 -2845 8137DC33 -2846 8137DC34 -2847 8137DC35 -2848 8137DC36 -2849 8137DC37 -284A 8137DC38 -284B 8137DC39 -284C 8137DD30 -284D 8137DD31 -284E 8137DD32 -284F 8137DD33 -2850 8137DD34 -2851 8137DD35 -2852 8137DD36 -2853 8137DD37 -2854 8137DD38 -2855 8137DD39 -2856 8137DE30 -2857 8137DE31 -2858 8137DE32 -2859 8137DE33 -285A 8137DE34 -285B 8137DE35 -285C 8137DE36 -285D 8137DE37 -285E 8137DE38 -285F 8137DE39 -2860 8137DF30 -2861 8137DF31 -2862 8137DF32 -2863 8137DF33 -2864 8137DF34 -2865 8137DF35 -2866 8137DF36 -2867 8137DF37 -2868 8137DF38 -2869 8137DF39 -286A 8137E030 -286B 8137E031 -286C 8137E032 -286D 8137E033 -286E 8137E034 -286F 8137E035 -2870 8137E036 -2871 8137E037 -2872 8137E038 -2873 8137E039 -2874 8137E130 -2875 8137E131 -2876 8137E132 -2877 8137E133 -2878 8137E134 -2879 8137E135 -287A 8137E136 -287B 8137E137 -287C 8137E138 -287D 8137E139 -287E 8137E230 -287F 8137E231 -2880 8137E232 -2881 8137E233 -2882 8137E234 -2883 8137E235 -2884 8137E236 -2885 8137E237 -2886 8137E238 -2887 8137E239 -2888 8137E330 -2889 8137E331 -288A 8137E332 -288B 8137E333 -288C 8137E334 -288D 8137E335 -288E 8137E336 -288F 8137E337 -2890 8137E338 -2891 8137E339 -2892 8137E430 -2893 8137E431 -2894 8137E432 -2895 8137E433 -2896 8137E434 -2897 8137E435 -2898 8137E436 -2899 8137E437 -289A 8137E438 -289B 8137E439 -289C 8137E530 -289D 8137E531 -289E 8137E532 -289F 8137E533 -28A0 8137E534 -28A1 8137E535 -28A2 8137E536 -28A3 8137E537 -28A4 8137E538 -28A5 8137E539 -28A6 8137E630 -28A7 8137E631 -28A8 8137E632 -28A9 8137E633 -28AA 8137E634 -28AB 8137E635 -28AC 8137E636 -28AD 8137E637 -28AE 8137E638 -28AF 8137E639 -28B0 8137E730 -28B1 8137E731 -28B2 8137E732 -28B3 8137E733 -28B4 8137E734 -28B5 8137E735 -28B6 8137E736 -28B7 8137E737 -28B8 8137E738 -28B9 8137E739 -28BA 8137E830 -28BB 8137E831 -28BC 8137E832 -28BD 8137E833 -28BE 8137E834 -28BF 8137E835 -28C0 8137E836 -28C1 8137E837 -28C2 8137E838 -28C3 8137E839 -28C4 8137E930 -28C5 8137E931 -28C6 8137E932 -28C7 8137E933 -28C8 8137E934 -28C9 8137E935 -28CA 8137E936 -28CB 8137E937 -28CC 8137E938 -28CD 8137E939 -28CE 8137EA30 -28CF 8137EA31 -28D0 8137EA32 -28D1 8137EA33 -28D2 8137EA34 -28D3 8137EA35 -28D4 8137EA36 -28D5 8137EA37 -28D6 8137EA38 -28D7 8137EA39 -28D8 8137EB30 -28D9 8137EB31 -28DA 8137EB32 -28DB 8137EB33 -28DC 8137EB34 -28DD 8137EB35 -28DE 8137EB36 -28DF 8137EB37 -28E0 8137EB38 -28E1 8137EB39 -28E2 8137EC30 -28E3 8137EC31 -28E4 8137EC32 -28E5 8137EC33 -28E6 8137EC34 -28E7 8137EC35 -28E8 8137EC36 -28E9 8137EC37 -28EA 8137EC38 -28EB 8137EC39 -28EC 8137ED30 -28ED 8137ED31 -28EE 8137ED32 -28EF 8137ED33 -28F0 8137ED34 -28F1 8137ED35 -28F2 8137ED36 -28F3 8137ED37 -28F4 8137ED38 -28F5 8137ED39 -28F6 8137EE30 -28F7 8137EE31 -28F8 8137EE32 -28F9 8137EE33 -28FA 8137EE34 -28FB 8137EE35 -28FC 8137EE36 -28FD 8137EE37 -28FE 8137EE38 -28FF 8137EE39 -2900 8137EF30 -2901 8137EF31 -2902 8137EF32 -2903 8137EF33 -2904 8137EF34 -2905 8137EF35 -2906 8137EF36 -2907 8137EF37 -2908 8137EF38 -2909 8137EF39 -290A 8137F030 -290B 8137F031 -290C 8137F032 -290D 8137F033 -290E 8137F034 -290F 8137F035 -2910 8137F036 -2911 8137F037 -2912 8137F038 -2913 8137F039 -2914 8137F130 -2915 8137F131 -2916 8137F132 -2917 8137F133 -2918 8137F134 -2919 8137F135 -291A 8137F136 -291B 8137F137 -291C 8137F138 -291D 8137F139 -291E 8137F230 -291F 8137F231 -2920 8137F232 -2921 8137F233 -2922 8137F234 -2923 8137F235 -2924 8137F236 -2925 8137F237 -2926 8137F238 -2927 8137F239 -2928 8137F330 -2929 8137F331 -292A 8137F332 -292B 8137F333 -292C 8137F334 -292D 8137F335 -292E 8137F336 -292F 8137F337 -2930 8137F338 -2931 8137F339 -2932 8137F430 -2933 8137F431 -2934 8137F432 -2935 8137F433 -2936 8137F434 -2937 8137F435 -2938 8137F436 -2939 8137F437 -293A 8137F438 -293B 8137F439 -293C 8137F530 -293D 8137F531 -293E 8137F532 -293F 8137F533 -2940 8137F534 -2941 8137F535 -2942 8137F536 -2943 8137F537 -2944 8137F538 -2945 8137F539 -2946 8137F630 -2947 8137F631 -2948 8137F632 -2949 8137F633 -294A 8137F634 -294B 8137F635 -294C 8137F636 -294D 8137F637 -294E 8137F638 -294F 8137F639 -2950 8137F730 -2951 8137F731 -2952 8137F732 -2953 8137F733 -2954 8137F734 -2955 8137F735 -2956 8137F736 -2957 8137F737 -2958 8137F738 -2959 8137F739 -295A 8137F830 -295B 8137F831 -295C 8137F832 -295D 8137F833 -295E 8137F834 -295F 8137F835 -2960 8137F836 -2961 8137F837 -2962 8137F838 -2963 8137F839 -2964 8137F930 -2965 8137F931 -2966 8137F932 -2967 8137F933 -2968 8137F934 -2969 8137F935 -296A 8137F936 -296B 8137F937 -296C 8137F938 -296D 8137F939 -296E 8137FA30 -296F 8137FA31 -2970 8137FA32 -2971 8137FA33 -2972 8137FA34 -2973 8137FA35 -2974 8137FA36 -2975 8137FA37 -2976 8137FA38 -2977 8137FA39 -2978 8137FB30 -2979 8137FB31 -297A 8137FB32 -297B 8137FB33 -297C 8137FB34 -297D 8137FB35 -297E 8137FB36 -297F 8137FB37 -2980 8137FB38 -2981 8137FB39 -2982 8137FC30 -2983 8137FC31 -2984 8137FC32 -2985 8137FC33 -2986 8137FC34 -2987 8137FC35 -2988 8137FC36 -2989 8137FC37 -298A 8137FC38 -298B 8137FC39 -298C 8137FD30 -298D 8137FD31 -298E 8137FD32 -298F 8137FD33 -2990 8137FD34 -2991 8137FD35 -2992 8137FD36 -2993 8137FD37 -2994 8137FD38 -2995 8137FD39 -2996 8137FE30 -2997 8137FE31 -2998 8137FE32 -2999 8137FE33 -299A 8137FE34 -299B 8137FE35 -299C 8137FE36 -299D 8137FE37 -299E 8137FE38 -299F 8137FE39 -29A0 81388130 -29A1 81388131 -29A2 81388132 -29A3 81388133 -29A4 81388134 -29A5 81388135 -29A6 81388136 -29A7 81388137 -29A8 81388138 -29A9 81388139 -29AA 81388230 -29AB 81388231 -29AC 81388232 -29AD 81388233 -29AE 81388234 -29AF 81388235 -29B0 81388236 -29B1 81388237 -29B2 81388238 -29B3 81388239 -29B4 81388330 -29B5 81388331 -29B6 81388332 -29B7 81388333 -29B8 81388334 -29B9 81388335 -29BA 81388336 -29BB 81388337 -29BC 81388338 -29BD 81388339 -29BE 81388430 -29BF 81388431 -29C0 81388432 -29C1 81388433 -29C2 81388434 -29C3 81388435 -29C4 81388436 -29C5 81388437 -29C6 81388438 -29C7 81388439 -29C8 81388530 -29C9 81388531 -29CA 81388532 -29CB 81388533 -29CC 81388534 -29CD 81388535 -29CE 81388536 -29CF 81388537 -29D0 81388538 -29D1 81388539 -29D2 81388630 -29D3 81388631 -29D4 81388632 -29D5 81388633 -29D6 81388634 -29D7 81388635 -29D8 81388636 -29D9 81388637 -29DA 81388638 -29DB 81388639 -29DC 81388730 -29DD 81388731 -29DE 81388732 -29DF 81388733 -29E0 81388734 -29E1 81388735 -29E2 81388736 -29E3 81388737 -29E4 81388738 -29E5 81388739 -29E6 81388830 -29E7 81388831 -29E8 81388832 -29E9 81388833 -29EA 81388834 -29EB 81388835 -29EC 81388836 -29ED 81388837 -29EE 81388838 -29EF 81388839 -29F0 81388930 -29F1 81388931 -29F2 81388932 -29F3 81388933 -29F4 81388934 -29F5 81388935 -29F6 81388936 -29F7 81388937 -29F8 81388938 -29F9 81388939 -29FA 81388A30 -29FB 81388A31 -29FC 81388A32 -29FD 81388A33 -29FE 81388A34 -29FF 81388A35 -2A00 81388A36 -2A01 81388A37 -2A02 81388A38 -2A03 81388A39 -2A04 81388B30 -2A05 81388B31 -2A06 81388B32 -2A07 81388B33 -2A08 81388B34 -2A09 81388B35 -2A0A 81388B36 -2A0B 81388B37 -2A0C 81388B38 -2A0D 81388B39 -2A0E 81388C30 -2A0F 81388C31 -2A10 81388C32 -2A11 81388C33 -2A12 81388C34 -2A13 81388C35 -2A14 81388C36 -2A15 81388C37 -2A16 81388C38 -2A17 81388C39 -2A18 81388D30 -2A19 81388D31 -2A1A 81388D32 -2A1B 81388D33 -2A1C 81388D34 -2A1D 81388D35 -2A1E 81388D36 -2A1F 81388D37 -2A20 81388D38 -2A21 81388D39 -2A22 81388E30 -2A23 81388E31 -2A24 81388E32 -2A25 81388E33 -2A26 81388E34 -2A27 81388E35 -2A28 81388E36 -2A29 81388E37 -2A2A 81388E38 -2A2B 81388E39 -2A2C 81388F30 -2A2D 81388F31 -2A2E 81388F32 -2A2F 81388F33 -2A30 81388F34 -2A31 81388F35 -2A32 81388F36 -2A33 81388F37 -2A34 81388F38 -2A35 81388F39 -2A36 81389030 -2A37 81389031 -2A38 81389032 -2A39 81389033 -2A3A 81389034 -2A3B 81389035 -2A3C 81389036 -2A3D 81389037 -2A3E 81389038 -2A3F 81389039 -2A40 81389130 -2A41 81389131 -2A42 81389132 -2A43 81389133 -2A44 81389134 -2A45 81389135 -2A46 81389136 -2A47 81389137 -2A48 81389138 -2A49 81389139 -2A4A 81389230 -2A4B 81389231 -2A4C 81389232 -2A4D 81389233 -2A4E 81389234 -2A4F 81389235 -2A50 81389236 -2A51 81389237 -2A52 81389238 -2A53 81389239 -2A54 81389330 -2A55 81389331 -2A56 81389332 -2A57 81389333 -2A58 81389334 -2A59 81389335 -2A5A 81389336 -2A5B 81389337 -2A5C 81389338 -2A5D 81389339 -2A5E 81389430 -2A5F 81389431 -2A60 81389432 -2A61 81389433 -2A62 81389434 -2A63 81389435 -2A64 81389436 -2A65 81389437 -2A66 81389438 -2A67 81389439 -2A68 81389530 -2A69 81389531 -2A6A 81389532 -2A6B 81389533 -2A6C 81389534 -2A6D 81389535 -2A6E 81389536 -2A6F 81389537 -2A70 81389538 -2A71 81389539 -2A72 81389630 -2A73 81389631 -2A74 81389632 -2A75 81389633 -2A76 81389634 -2A77 81389635 -2A78 81389636 -2A79 81389637 -2A7A 81389638 -2A7B 81389639 -2A7C 81389730 -2A7D 81389731 -2A7E 81389732 -2A7F 81389733 -2A80 81389734 -2A81 81389735 -2A82 81389736 -2A83 81389737 -2A84 81389738 -2A85 81389739 -2A86 81389830 -2A87 81389831 -2A88 81389832 -2A89 81389833 -2A8A 81389834 -2A8B 81389835 -2A8C 81389836 -2A8D 81389837 -2A8E 81389838 -2A8F 81389839 -2A90 81389930 -2A91 81389931 -2A92 81389932 -2A93 81389933 -2A94 81389934 -2A95 81389935 -2A96 81389936 -2A97 81389937 -2A98 81389938 -2A99 81389939 -2A9A 81389A30 -2A9B 81389A31 -2A9C 81389A32 -2A9D 81389A33 -2A9E 81389A34 -2A9F 81389A35 -2AA0 81389A36 -2AA1 81389A37 -2AA2 81389A38 -2AA3 81389A39 -2AA4 81389B30 -2AA5 81389B31 -2AA6 81389B32 -2AA7 81389B33 -2AA8 81389B34 -2AA9 81389B35 -2AAA 81389B36 -2AAB 81389B37 -2AAC 81389B38 -2AAD 81389B39 -2AAE 81389C30 -2AAF 81389C31 -2AB0 81389C32 -2AB1 81389C33 -2AB2 81389C34 -2AB3 81389C35 -2AB4 81389C36 -2AB5 81389C37 -2AB6 81389C38 -2AB7 81389C39 -2AB8 81389D30 -2AB9 81389D31 -2ABA 81389D32 -2ABB 81389D33 -2ABC 81389D34 -2ABD 81389D35 -2ABE 81389D36 -2ABF 81389D37 -2AC0 81389D38 -2AC1 81389D39 -2AC2 81389E30 -2AC3 81389E31 -2AC4 81389E32 -2AC5 81389E33 -2AC6 81389E34 -2AC7 81389E35 -2AC8 81389E36 -2AC9 81389E37 -2ACA 81389E38 -2ACB 81389E39 -2ACC 81389F30 -2ACD 81389F31 -2ACE 81389F32 -2ACF 81389F33 -2AD0 81389F34 -2AD1 81389F35 -2AD2 81389F36 -2AD3 81389F37 -2AD4 81389F38 -2AD5 81389F39 -2AD6 8138A030 -2AD7 8138A031 -2AD8 8138A032 -2AD9 8138A033 -2ADA 8138A034 -2ADB 8138A035 -2ADC 8138A036 -2ADD 8138A037 -2ADE 8138A038 -2ADF 8138A039 -2AE0 8138A130 -2AE1 8138A131 -2AE2 8138A132 -2AE3 8138A133 -2AE4 8138A134 -2AE5 8138A135 -2AE6 8138A136 -2AE7 8138A137 -2AE8 8138A138 -2AE9 8138A139 -2AEA 8138A230 -2AEB 8138A231 -2AEC 8138A232 -2AED 8138A233 -2AEE 8138A234 -2AEF 8138A235 -2AF0 8138A236 -2AF1 8138A237 -2AF2 8138A238 -2AF3 8138A239 -2AF4 8138A330 -2AF5 8138A331 -2AF6 8138A332 -2AF7 8138A333 -2AF8 8138A334 -2AF9 8138A335 -2AFA 8138A336 -2AFB 8138A337 -2AFC 8138A338 -2AFD 8138A339 -2AFE 8138A430 -2AFF 8138A431 -2B00 8138A432 -2B01 8138A433 -2B02 8138A434 -2B03 8138A435 -2B04 8138A436 -2B05 8138A437 -2B06 8138A438 -2B07 8138A439 -2B08 8138A530 -2B09 8138A531 -2B0A 8138A532 -2B0B 8138A533 -2B0C 8138A534 -2B0D 8138A535 -2B0E 8138A536 -2B0F 8138A537 -2B10 8138A538 -2B11 8138A539 -2B12 8138A630 -2B13 8138A631 -2B14 8138A632 -2B15 8138A633 -2B16 8138A634 -2B17 8138A635 -2B18 8138A636 -2B19 8138A637 -2B1A 8138A638 -2B1B 8138A639 -2B1C 8138A730 -2B1D 8138A731 -2B1E 8138A732 -2B1F 8138A733 -2B20 8138A734 -2B21 8138A735 -2B22 8138A736 -2B23 8138A737 -2B24 8138A738 -2B25 8138A739 -2B26 8138A830 -2B27 8138A831 -2B28 8138A832 -2B29 8138A833 -2B2A 8138A834 -2B2B 8138A835 -2B2C 8138A836 -2B2D 8138A837 -2B2E 8138A838 -2B2F 8138A839 -2B30 8138A930 -2B31 8138A931 -2B32 8138A932 -2B33 8138A933 -2B34 8138A934 -2B35 8138A935 -2B36 8138A936 -2B37 8138A937 -2B38 8138A938 -2B39 8138A939 -2B3A 8138AA30 -2B3B 8138AA31 -2B3C 8138AA32 -2B3D 8138AA33 -2B3E 8138AA34 -2B3F 8138AA35 -2B40 8138AA36 -2B41 8138AA37 -2B42 8138AA38 -2B43 8138AA39 -2B44 8138AB30 -2B45 8138AB31 -2B46 8138AB32 -2B47 8138AB33 -2B48 8138AB34 -2B49 8138AB35 -2B4A 8138AB36 -2B4B 8138AB37 -2B4C 8138AB38 -2B4D 8138AB39 -2B4E 8138AC30 -2B4F 8138AC31 -2B50 8138AC32 -2B51 8138AC33 -2B52 8138AC34 -2B53 8138AC35 -2B54 8138AC36 -2B55 8138AC37 -2B56 8138AC38 -2B57 8138AC39 -2B58 8138AD30 -2B59 8138AD31 -2B5A 8138AD32 -2B5B 8138AD33 -2B5C 8138AD34 -2B5D 8138AD35 -2B5E 8138AD36 -2B5F 8138AD37 -2B60 8138AD38 -2B61 8138AD39 -2B62 8138AE30 -2B63 8138AE31 -2B64 8138AE32 -2B65 8138AE33 -2B66 8138AE34 -2B67 8138AE35 -2B68 8138AE36 -2B69 8138AE37 -2B6A 8138AE38 -2B6B 8138AE39 -2B6C 8138AF30 -2B6D 8138AF31 -2B6E 8138AF32 -2B6F 8138AF33 -2B70 8138AF34 -2B71 8138AF35 -2B72 8138AF36 -2B73 8138AF37 -2B74 8138AF38 -2B75 8138AF39 -2B76 8138B030 -2B77 8138B031 -2B78 8138B032 -2B79 8138B033 -2B7A 8138B034 -2B7B 8138B035 -2B7C 8138B036 -2B7D 8138B037 -2B7E 8138B038 -2B7F 8138B039 -2B80 8138B130 -2B81 8138B131 -2B82 8138B132 -2B83 8138B133 -2B84 8138B134 -2B85 8138B135 -2B86 8138B136 -2B87 8138B137 -2B88 8138B138 -2B89 8138B139 -2B8A 8138B230 -2B8B 8138B231 -2B8C 8138B232 -2B8D 8138B233 -2B8E 8138B234 -2B8F 8138B235 -2B90 8138B236 -2B91 8138B237 -2B92 8138B238 -2B93 8138B239 -2B94 8138B330 -2B95 8138B331 -2B96 8138B332 -2B97 8138B333 -2B98 8138B334 -2B99 8138B335 -2B9A 8138B336 -2B9B 8138B337 -2B9C 8138B338 -2B9D 8138B339 -2B9E 8138B430 -2B9F 8138B431 -2BA0 8138B432 -2BA1 8138B433 -2BA2 8138B434 -2BA3 8138B435 -2BA4 8138B436 -2BA5 8138B437 -2BA6 8138B438 -2BA7 8138B439 -2BA8 8138B530 -2BA9 8138B531 -2BAA 8138B532 -2BAB 8138B533 -2BAC 8138B534 -2BAD 8138B535 -2BAE 8138B536 -2BAF 8138B537 -2BB0 8138B538 -2BB1 8138B539 -2BB2 8138B630 -2BB3 8138B631 -2BB4 8138B632 -2BB5 8138B633 -2BB6 8138B634 -2BB7 8138B635 -2BB8 8138B636 -2BB9 8138B637 -2BBA 8138B638 -2BBB 8138B639 -2BBC 8138B730 -2BBD 8138B731 -2BBE 8138B732 -2BBF 8138B733 -2BC0 8138B734 -2BC1 8138B735 -2BC2 8138B736 -2BC3 8138B737 -2BC4 8138B738 -2BC5 8138B739 -2BC6 8138B830 -2BC7 8138B831 -2BC8 8138B832 -2BC9 8138B833 -2BCA 8138B834 -2BCB 8138B835 -2BCC 8138B836 -2BCD 8138B837 -2BCE 8138B838 -2BCF 8138B839 -2BD0 8138B930 -2BD1 8138B931 -2BD2 8138B932 -2BD3 8138B933 -2BD4 8138B934 -2BD5 8138B935 -2BD6 8138B936 -2BD7 8138B937 -2BD8 8138B938 -2BD9 8138B939 -2BDA 8138BA30 -2BDB 8138BA31 -2BDC 8138BA32 -2BDD 8138BA33 -2BDE 8138BA34 -2BDF 8138BA35 -2BE0 8138BA36 -2BE1 8138BA37 -2BE2 8138BA38 -2BE3 8138BA39 -2BE4 8138BB30 -2BE5 8138BB31 -2BE6 8138BB32 -2BE7 8138BB33 -2BE8 8138BB34 -2BE9 8138BB35 -2BEA 8138BB36 -2BEB 8138BB37 -2BEC 8138BB38 -2BED 8138BB39 -2BEE 8138BC30 -2BEF 8138BC31 -2BF0 8138BC32 -2BF1 8138BC33 -2BF2 8138BC34 -2BF3 8138BC35 -2BF4 8138BC36 -2BF5 8138BC37 -2BF6 8138BC38 -2BF7 8138BC39 -2BF8 8138BD30 -2BF9 8138BD31 -2BFA 8138BD32 -2BFB 8138BD33 -2BFC 8138BD34 -2BFD 8138BD35 -2BFE 8138BD36 -2BFF 8138BD37 -2C00 8138BD38 -2C01 8138BD39 -2C02 8138BE30 -2C03 8138BE31 -2C04 8138BE32 -2C05 8138BE33 -2C06 8138BE34 -2C07 8138BE35 -2C08 8138BE36 -2C09 8138BE37 -2C0A 8138BE38 -2C0B 8138BE39 -2C0C 8138BF30 -2C0D 8138BF31 -2C0E 8138BF32 -2C0F 8138BF33 -2C10 8138BF34 -2C11 8138BF35 -2C12 8138BF36 -2C13 8138BF37 -2C14 8138BF38 -2C15 8138BF39 -2C16 8138C030 -2C17 8138C031 -2C18 8138C032 -2C19 8138C033 -2C1A 8138C034 -2C1B 8138C035 -2C1C 8138C036 -2C1D 8138C037 -2C1E 8138C038 -2C1F 8138C039 -2C20 8138C130 -2C21 8138C131 -2C22 8138C132 -2C23 8138C133 -2C24 8138C134 -2C25 8138C135 -2C26 8138C136 -2C27 8138C137 -2C28 8138C138 -2C29 8138C139 -2C2A 8138C230 -2C2B 8138C231 -2C2C 8138C232 -2C2D 8138C233 -2C2E 8138C234 -2C2F 8138C235 -2C30 8138C236 -2C31 8138C237 -2C32 8138C238 -2C33 8138C239 -2C34 8138C330 -2C35 8138C331 -2C36 8138C332 -2C37 8138C333 -2C38 8138C334 -2C39 8138C335 -2C3A 8138C336 -2C3B 8138C337 -2C3C 8138C338 -2C3D 8138C339 -2C3E 8138C430 -2C3F 8138C431 -2C40 8138C432 -2C41 8138C433 -2C42 8138C434 -2C43 8138C435 -2C44 8138C436 -2C45 8138C437 -2C46 8138C438 -2C47 8138C439 -2C48 8138C530 -2C49 8138C531 -2C4A 8138C532 -2C4B 8138C533 -2C4C 8138C534 -2C4D 8138C535 -2C4E 8138C536 -2C4F 8138C537 -2C50 8138C538 -2C51 8138C539 -2C52 8138C630 -2C53 8138C631 -2C54 8138C632 -2C55 8138C633 -2C56 8138C634 -2C57 8138C635 -2C58 8138C636 -2C59 8138C637 -2C5A 8138C638 -2C5B 8138C639 -2C5C 8138C730 -2C5D 8138C731 -2C5E 8138C732 -2C5F 8138C733 -2C60 8138C734 -2C61 8138C735 -2C62 8138C736 -2C63 8138C737 -2C64 8138C738 -2C65 8138C739 -2C66 8138C830 -2C67 8138C831 -2C68 8138C832 -2C69 8138C833 -2C6A 8138C834 -2C6B 8138C835 -2C6C 8138C836 -2C6D 8138C837 -2C6E 8138C838 -2C6F 8138C839 -2C70 8138C930 -2C71 8138C931 -2C72 8138C932 -2C73 8138C933 -2C74 8138C934 -2C75 8138C935 -2C76 8138C936 -2C77 8138C937 -2C78 8138C938 -2C79 8138C939 -2C7A 8138CA30 -2C7B 8138CA31 -2C7C 8138CA32 -2C7D 8138CA33 -2C7E 8138CA34 -2C7F 8138CA35 -2C80 8138CA36 -2C81 8138CA37 -2C82 8138CA38 -2C83 8138CA39 -2C84 8138CB30 -2C85 8138CB31 -2C86 8138CB32 -2C87 8138CB33 -2C88 8138CB34 -2C89 8138CB35 -2C8A 8138CB36 -2C8B 8138CB37 -2C8C 8138CB38 -2C8D 8138CB39 -2C8E 8138CC30 -2C8F 8138CC31 -2C90 8138CC32 -2C91 8138CC33 -2C92 8138CC34 -2C93 8138CC35 -2C94 8138CC36 -2C95 8138CC37 -2C96 8138CC38 -2C97 8138CC39 -2C98 8138CD30 -2C99 8138CD31 -2C9A 8138CD32 -2C9B 8138CD33 -2C9C 8138CD34 -2C9D 8138CD35 -2C9E 8138CD36 -2C9F 8138CD37 -2CA0 8138CD38 -2CA1 8138CD39 -2CA2 8138CE30 -2CA3 8138CE31 -2CA4 8138CE32 -2CA5 8138CE33 -2CA6 8138CE34 -2CA7 8138CE35 -2CA8 8138CE36 -2CA9 8138CE37 -2CAA 8138CE38 -2CAB 8138CE39 -2CAC 8138CF30 -2CAD 8138CF31 -2CAE 8138CF32 -2CAF 8138CF33 -2CB0 8138CF34 -2CB1 8138CF35 -2CB2 8138CF36 -2CB3 8138CF37 -2CB4 8138CF38 -2CB5 8138CF39 -2CB6 8138D030 -2CB7 8138D031 -2CB8 8138D032 -2CB9 8138D033 -2CBA 8138D034 -2CBB 8138D035 -2CBC 8138D036 -2CBD 8138D037 -2CBE 8138D038 -2CBF 8138D039 -2CC0 8138D130 -2CC1 8138D131 -2CC2 8138D132 -2CC3 8138D133 -2CC4 8138D134 -2CC5 8138D135 -2CC6 8138D136 -2CC7 8138D137 -2CC8 8138D138 -2CC9 8138D139 -2CCA 8138D230 -2CCB 8138D231 -2CCC 8138D232 -2CCD 8138D233 -2CCE 8138D234 -2CCF 8138D235 -2CD0 8138D236 -2CD1 8138D237 -2CD2 8138D238 -2CD3 8138D239 -2CD4 8138D330 -2CD5 8138D331 -2CD6 8138D332 -2CD7 8138D333 -2CD8 8138D334 -2CD9 8138D335 -2CDA 8138D336 -2CDB 8138D337 -2CDC 8138D338 -2CDD 8138D339 -2CDE 8138D430 -2CDF 8138D431 -2CE0 8138D432 -2CE1 8138D433 -2CE2 8138D434 -2CE3 8138D435 -2CE4 8138D436 -2CE5 8138D437 -2CE6 8138D438 -2CE7 8138D439 -2CE8 8138D530 -2CE9 8138D531 -2CEA 8138D532 -2CEB 8138D533 -2CEC 8138D534 -2CED 8138D535 -2CEE 8138D536 -2CEF 8138D537 -2CF0 8138D538 -2CF1 8138D539 -2CF2 8138D630 -2CF3 8138D631 -2CF4 8138D632 -2CF5 8138D633 -2CF6 8138D634 -2CF7 8138D635 -2CF8 8138D636 -2CF9 8138D637 -2CFA 8138D638 -2CFB 8138D639 -2CFC 8138D730 -2CFD 8138D731 -2CFE 8138D732 -2CFF 8138D733 -2D00 8138D734 -2D01 8138D735 -2D02 8138D736 -2D03 8138D737 -2D04 8138D738 -2D05 8138D739 -2D06 8138D830 -2D07 8138D831 -2D08 8138D832 -2D09 8138D833 -2D0A 8138D834 -2D0B 8138D835 -2D0C 8138D836 -2D0D 8138D837 -2D0E 8138D838 -2D0F 8138D839 -2D10 8138D930 -2D11 8138D931 -2D12 8138D932 -2D13 8138D933 -2D14 8138D934 -2D15 8138D935 -2D16 8138D936 -2D17 8138D937 -2D18 8138D938 -2D19 8138D939 -2D1A 8138DA30 -2D1B 8138DA31 -2D1C 8138DA32 -2D1D 8138DA33 -2D1E 8138DA34 -2D1F 8138DA35 -2D20 8138DA36 -2D21 8138DA37 -2D22 8138DA38 -2D23 8138DA39 -2D24 8138DB30 -2D25 8138DB31 -2D26 8138DB32 -2D27 8138DB33 -2D28 8138DB34 -2D29 8138DB35 -2D2A 8138DB36 -2D2B 8138DB37 -2D2C 8138DB38 -2D2D 8138DB39 -2D2E 8138DC30 -2D2F 8138DC31 -2D30 8138DC32 -2D31 8138DC33 -2D32 8138DC34 -2D33 8138DC35 -2D34 8138DC36 -2D35 8138DC37 -2D36 8138DC38 -2D37 8138DC39 -2D38 8138DD30 -2D39 8138DD31 -2D3A 8138DD32 -2D3B 8138DD33 -2D3C 8138DD34 -2D3D 8138DD35 -2D3E 8138DD36 -2D3F 8138DD37 -2D40 8138DD38 -2D41 8138DD39 -2D42 8138DE30 -2D43 8138DE31 -2D44 8138DE32 -2D45 8138DE33 -2D46 8138DE34 -2D47 8138DE35 -2D48 8138DE36 -2D49 8138DE37 -2D4A 8138DE38 -2D4B 8138DE39 -2D4C 8138DF30 -2D4D 8138DF31 -2D4E 8138DF32 -2D4F 8138DF33 -2D50 8138DF34 -2D51 8138DF35 -2D52 8138DF36 -2D53 8138DF37 -2D54 8138DF38 -2D55 8138DF39 -2D56 8138E030 -2D57 8138E031 -2D58 8138E032 -2D59 8138E033 -2D5A 8138E034 -2D5B 8138E035 -2D5C 8138E036 -2D5D 8138E037 -2D5E 8138E038 -2D5F 8138E039 -2D60 8138E130 -2D61 8138E131 -2D62 8138E132 -2D63 8138E133 -2D64 8138E134 -2D65 8138E135 -2D66 8138E136 -2D67 8138E137 -2D68 8138E138 -2D69 8138E139 -2D6A 8138E230 -2D6B 8138E231 -2D6C 8138E232 -2D6D 8138E233 -2D6E 8138E234 -2D6F 8138E235 -2D70 8138E236 -2D71 8138E237 -2D72 8138E238 -2D73 8138E239 -2D74 8138E330 -2D75 8138E331 -2D76 8138E332 -2D77 8138E333 -2D78 8138E334 -2D79 8138E335 -2D7A 8138E336 -2D7B 8138E337 -2D7C 8138E338 -2D7D 8138E339 -2D7E 8138E430 -2D7F 8138E431 -2D80 8138E432 -2D81 8138E433 -2D82 8138E434 -2D83 8138E435 -2D84 8138E436 -2D85 8138E437 -2D86 8138E438 -2D87 8138E439 -2D88 8138E530 -2D89 8138E531 -2D8A 8138E532 -2D8B 8138E533 -2D8C 8138E534 -2D8D 8138E535 -2D8E 8138E536 -2D8F 8138E537 -2D90 8138E538 -2D91 8138E539 -2D92 8138E630 -2D93 8138E631 -2D94 8138E632 -2D95 8138E633 -2D96 8138E634 -2D97 8138E635 -2D98 8138E636 -2D99 8138E637 -2D9A 8138E638 -2D9B 8138E639 -2D9C 8138E730 -2D9D 8138E731 -2D9E 8138E732 -2D9F 8138E733 -2DA0 8138E734 -2DA1 8138E735 -2DA2 8138E736 -2DA3 8138E737 -2DA4 8138E738 -2DA5 8138E739 -2DA6 8138E830 -2DA7 8138E831 -2DA8 8138E832 -2DA9 8138E833 -2DAA 8138E834 -2DAB 8138E835 -2DAC 8138E836 -2DAD 8138E837 -2DAE 8138E838 -2DAF 8138E839 -2DB0 8138E930 -2DB1 8138E931 -2DB2 8138E932 -2DB3 8138E933 -2DB4 8138E934 -2DB5 8138E935 -2DB6 8138E936 -2DB7 8138E937 -2DB8 8138E938 -2DB9 8138E939 -2DBA 8138EA30 -2DBB 8138EA31 -2DBC 8138EA32 -2DBD 8138EA33 -2DBE 8138EA34 -2DBF 8138EA35 -2DC0 8138EA36 -2DC1 8138EA37 -2DC2 8138EA38 -2DC3 8138EA39 -2DC4 8138EB30 -2DC5 8138EB31 -2DC6 8138EB32 -2DC7 8138EB33 -2DC8 8138EB34 -2DC9 8138EB35 -2DCA 8138EB36 -2DCB 8138EB37 -2DCC 8138EB38 -2DCD 8138EB39 -2DCE 8138EC30 -2DCF 8138EC31 -2DD0 8138EC32 -2DD1 8138EC33 -2DD2 8138EC34 -2DD3 8138EC35 -2DD4 8138EC36 -2DD5 8138EC37 -2DD6 8138EC38 -2DD7 8138EC39 -2DD8 8138ED30 -2DD9 8138ED31 -2DDA 8138ED32 -2DDB 8138ED33 -2DDC 8138ED34 -2DDD 8138ED35 -2DDE 8138ED36 -2DDF 8138ED37 -2DE0 8138ED38 -2DE1 8138ED39 -2DE2 8138EE30 -2DE3 8138EE31 -2DE4 8138EE32 -2DE5 8138EE33 -2DE6 8138EE34 -2DE7 8138EE35 -2DE8 8138EE36 -2DE9 8138EE37 -2DEA 8138EE38 -2DEB 8138EE39 -2DEC 8138EF30 -2DED 8138EF31 -2DEE 8138EF32 -2DEF 8138EF33 -2DF0 8138EF34 -2DF1 8138EF35 -2DF2 8138EF36 -2DF3 8138EF37 -2DF4 8138EF38 -2DF5 8138EF39 -2DF6 8138F030 -2DF7 8138F031 -2DF8 8138F032 -2DF9 8138F033 -2DFA 8138F034 -2DFB 8138F035 -2DFC 8138F036 -2DFD 8138F037 -2DFE 8138F038 -2DFF 8138F039 -2E00 8138F130 -2E01 8138F131 -2E02 8138F132 -2E03 8138F133 -2E04 8138F134 -2E05 8138F135 -2E06 8138F136 -2E07 8138F137 -2E08 8138F138 -2E09 8138F139 -2E0A 8138F230 -2E0B 8138F231 -2E0C 8138F232 -2E0D 8138F233 -2E0E 8138F234 -2E0F 8138F235 -2E10 8138F236 -2E11 8138F237 -2E12 8138F238 -2E13 8138F239 -2E14 8138F330 -2E15 8138F331 -2E16 8138F332 -2E17 8138F333 -2E18 8138F334 -2E19 8138F335 -2E1A 8138F336 -2E1B 8138F337 -2E1C 8138F338 -2E1D 8138F339 -2E1E 8138F430 -2E1F 8138F431 -2E20 8138F432 -2E21 8138F433 -2E22 8138F434 -2E23 8138F435 -2E24 8138F436 -2E25 8138F437 -2E26 8138F438 -2E27 8138F439 -2E28 8138F530 -2E29 8138F531 -2E2A 8138F532 -2E2B 8138F533 -2E2C 8138F534 -2E2D 8138F535 -2E2E 8138F536 -2E2F 8138F537 -2E30 8138F538 -2E31 8138F539 -2E32 8138F630 -2E33 8138F631 -2E34 8138F632 -2E35 8138F633 -2E36 8138F634 -2E37 8138F635 -2E38 8138F636 -2E39 8138F637 -2E3A 8138F638 -2E3B 8138F639 -2E3C 8138F730 -2E3D 8138F731 -2E3E 8138F732 -2E3F 8138F733 -2E40 8138F734 -2E41 8138F735 -2E42 8138F736 -2E43 8138F737 -2E44 8138F738 -2E45 8138F739 -2E46 8138F830 -2E47 8138F831 -2E48 8138F832 -2E49 8138F833 -2E4A 8138F834 -2E4B 8138F835 -2E4C 8138F836 -2E4D 8138F837 -2E4E 8138F838 -2E4F 8138F839 -2E50 8138F930 -2E51 8138F931 -2E52 8138F932 -2E53 8138F933 -2E54 8138F934 -2E55 8138F935 -2E56 8138F936 -2E57 8138F937 -2E58 8138F938 -2E59 8138F939 -2E5A 8138FA30 -2E5B 8138FA31 -2E5C 8138FA32 -2E5D 8138FA33 -2E5E 8138FA34 -2E5F 8138FA35 -2E60 8138FA36 -2E61 8138FA37 -2E62 8138FA38 -2E63 8138FA39 -2E64 8138FB30 -2E65 8138FB31 -2E66 8138FB32 -2E67 8138FB33 -2E68 8138FB34 -2E69 8138FB35 -2E6A 8138FB36 -2E6B 8138FB37 -2E6C 8138FB38 -2E6D 8138FB39 -2E6E 8138FC30 -2E6F 8138FC31 -2E70 8138FC32 -2E71 8138FC33 -2E72 8138FC34 -2E73 8138FC35 -2E74 8138FC36 -2E75 8138FC37 -2E76 8138FC38 -2E77 8138FC39 -2E78 8138FD30 -2E79 8138FD31 -2E7A 8138FD32 -2E7B 8138FD33 -2E7C 8138FD34 -2E7D 8138FD35 -2E7E 8138FD36 -2E7F 8138FD37 -2E80 8138FD38 -2E81 FE50 -2E82 8138FD39 -2E83 8138FE30 -2E84 FE54 -2E85 8138FE31 -2E86 8138FE32 -2E87 8138FE33 -2E88 FE57 -2E89 8138FE34 -2E8A 8138FE35 -2E8B FE58 -2E8C FE5D -2E8D 8138FE36 -2E8E 8138FE37 -2E8F 8138FE38 -2E90 8138FE39 -2E91 81398130 -2E92 81398131 -2E93 81398132 -2E94 81398133 -2E95 81398134 -2E96 81398135 -2E97 FE5E -2E98 81398136 -2E99 81398137 -2E9A 81398138 -2E9B 81398139 -2E9C 81398230 -2E9D 81398231 -2E9E 81398232 -2E9F 81398233 -2EA0 81398234 -2EA1 81398235 -2EA2 81398236 -2EA3 81398237 -2EA4 81398238 -2EA5 81398239 -2EA6 81398330 -2EA7 FE6B -2EA8 81398331 -2EA9 81398332 -2EAA FE6E -2EAB 81398333 -2EAC 81398334 -2EAD 81398335 -2EAE FE71 -2EAF 81398336 -2EB0 81398337 -2EB1 81398338 -2EB2 81398339 -2EB3 FE73 -2EB4 81398430 -2EB5 81398431 -2EB6 FE74 -2EB7 FE75 -2EB8 81398432 -2EB9 81398433 -2EBA 81398434 -2EBB FE79 -2EBC 81398435 -2EBD 81398436 -2EBE 81398437 -2EBF 81398438 -2EC0 81398439 -2EC1 81398530 -2EC2 81398531 -2EC3 81398532 -2EC4 81398533 -2EC5 81398534 -2EC6 81398535 -2EC7 81398536 -2EC8 81398537 -2EC9 81398538 -2ECA FE84 -2ECB 81398539 -2ECC 81398630 -2ECD 81398631 -2ECE 81398632 -2ECF 81398633 -2ED0 81398634 -2ED1 81398635 -2ED2 81398636 -2ED3 81398637 -2ED4 81398638 -2ED5 81398639 -2ED6 81398730 -2ED7 81398731 -2ED8 81398732 -2ED9 81398733 -2EDA 81398734 -2EDB 81398735 -2EDC 81398736 -2EDD 81398737 -2EDE 81398738 -2EDF 81398739 -2EE0 81398830 -2EE1 81398831 -2EE2 81398832 -2EE3 81398833 -2EE4 81398834 -2EE5 81398835 -2EE6 81398836 -2EE7 81398837 -2EE8 81398838 -2EE9 81398839 -2EEA 81398930 -2EEB 81398931 -2EEC 81398932 -2EED 81398933 -2EEE 81398934 -2EEF 81398935 -2EF0 81398936 -2EF1 81398937 -2EF2 81398938 -2EF3 81398939 -2EF4 81398A30 -2EF5 81398A31 -2EF6 81398A32 -2EF7 81398A33 -2EF8 81398A34 -2EF9 81398A35 -2EFA 81398A36 -2EFB 81398A37 -2EFC 81398A38 -2EFD 81398A39 -2EFE 81398B30 -2EFF 81398B31 -2F00 81398B32 -2F01 81398B33 -2F02 81398B34 -2F03 81398B35 -2F04 81398B36 -2F05 81398B37 -2F06 81398B38 -2F07 81398B39 -2F08 81398C30 -2F09 81398C31 -2F0A 81398C32 -2F0B 81398C33 -2F0C 81398C34 -2F0D 81398C35 -2F0E 81398C36 -2F0F 81398C37 -2F10 81398C38 -2F11 81398C39 -2F12 81398D30 -2F13 81398D31 -2F14 81398D32 -2F15 81398D33 -2F16 81398D34 -2F17 81398D35 -2F18 81398D36 -2F19 81398D37 -2F1A 81398D38 -2F1B 81398D39 -2F1C 81398E30 -2F1D 81398E31 -2F1E 81398E32 -2F1F 81398E33 -2F20 81398E34 -2F21 81398E35 -2F22 81398E36 -2F23 81398E37 -2F24 81398E38 -2F25 81398E39 -2F26 81398F30 -2F27 81398F31 -2F28 81398F32 -2F29 81398F33 -2F2A 81398F34 -2F2B 81398F35 -2F2C 81398F36 -2F2D 81398F37 -2F2E 81398F38 -2F2F 81398F39 -2F30 81399030 -2F31 81399031 -2F32 81399032 -2F33 81399033 -2F34 81399034 -2F35 81399035 -2F36 81399036 -2F37 81399037 -2F38 81399038 -2F39 81399039 -2F3A 81399130 -2F3B 81399131 -2F3C 81399132 -2F3D 81399133 -2F3E 81399134 -2F3F 81399135 -2F40 81399136 -2F41 81399137 -2F42 81399138 -2F43 81399139 -2F44 81399230 -2F45 81399231 -2F46 81399232 -2F47 81399233 -2F48 81399234 -2F49 81399235 -2F4A 81399236 -2F4B 81399237 -2F4C 81399238 -2F4D 81399239 -2F4E 81399330 -2F4F 81399331 -2F50 81399332 -2F51 81399333 -2F52 81399334 -2F53 81399335 -2F54 81399336 -2F55 81399337 -2F56 81399338 -2F57 81399339 -2F58 81399430 -2F59 81399431 -2F5A 81399432 -2F5B 81399433 -2F5C 81399434 -2F5D 81399435 -2F5E 81399436 -2F5F 81399437 -2F60 81399438 -2F61 81399439 -2F62 81399530 -2F63 81399531 -2F64 81399532 -2F65 81399533 -2F66 81399534 -2F67 81399535 -2F68 81399536 -2F69 81399537 -2F6A 81399538 -2F6B 81399539 -2F6C 81399630 -2F6D 81399631 -2F6E 81399632 -2F6F 81399633 -2F70 81399634 -2F71 81399635 -2F72 81399636 -2F73 81399637 -2F74 81399638 -2F75 81399639 -2F76 81399730 -2F77 81399731 -2F78 81399732 -2F79 81399733 -2F7A 81399734 -2F7B 81399735 -2F7C 81399736 -2F7D 81399737 -2F7E 81399738 -2F7F 81399739 -2F80 81399830 -2F81 81399831 -2F82 81399832 -2F83 81399833 -2F84 81399834 -2F85 81399835 -2F86 81399836 -2F87 81399837 -2F88 81399838 -2F89 81399839 -2F8A 81399930 -2F8B 81399931 -2F8C 81399932 -2F8D 81399933 -2F8E 81399934 -2F8F 81399935 -2F90 81399936 -2F91 81399937 -2F92 81399938 -2F93 81399939 -2F94 81399A30 -2F95 81399A31 -2F96 81399A32 -2F97 81399A33 -2F98 81399A34 -2F99 81399A35 -2F9A 81399A36 -2F9B 81399A37 -2F9C 81399A38 -2F9D 81399A39 -2F9E 81399B30 -2F9F 81399B31 -2FA0 81399B32 -2FA1 81399B33 -2FA2 81399B34 -2FA3 81399B35 -2FA4 81399B36 -2FA5 81399B37 -2FA6 81399B38 -2FA7 81399B39 -2FA8 81399C30 -2FA9 81399C31 -2FAA 81399C32 -2FAB 81399C33 -2FAC 81399C34 -2FAD 81399C35 -2FAE 81399C36 -2FAF 81399C37 -2FB0 81399C38 -2FB1 81399C39 -2FB2 81399D30 -2FB3 81399D31 -2FB4 81399D32 -2FB5 81399D33 -2FB6 81399D34 -2FB7 81399D35 -2FB8 81399D36 -2FB9 81399D37 -2FBA 81399D38 -2FBB 81399D39 -2FBC 81399E30 -2FBD 81399E31 -2FBE 81399E32 -2FBF 81399E33 -2FC0 81399E34 -2FC1 81399E35 -2FC2 81399E36 -2FC3 81399E37 -2FC4 81399E38 -2FC5 81399E39 -2FC6 81399F30 -2FC7 81399F31 -2FC8 81399F32 -2FC9 81399F33 -2FCA 81399F34 -2FCB 81399F35 -2FCC 81399F36 -2FCD 81399F37 -2FCE 81399F38 -2FCF 81399F39 -2FD0 8139A030 -2FD1 8139A031 -2FD2 8139A032 -2FD3 8139A033 -2FD4 8139A034 -2FD5 8139A035 -2FD6 8139A036 -2FD7 8139A037 -2FD8 8139A038 -2FD9 8139A039 -2FDA 8139A130 -2FDB 8139A131 -2FDC 8139A132 -2FDD 8139A133 -2FDE 8139A134 -2FDF 8139A135 -2FE0 8139A136 -2FE1 8139A137 -2FE2 8139A138 -2FE3 8139A139 -2FE4 8139A230 -2FE5 8139A231 -2FE6 8139A232 -2FE7 8139A233 -2FE8 8139A234 -2FE9 8139A235 -2FEA 8139A236 -2FEB 8139A237 -2FEC 8139A238 -2FED 8139A239 -2FEE 8139A330 -2FEF 8139A331 -2FF0 A98A -2FF1 A98B -2FF2 A98C -2FF3 A98D -2FF4 A98E -2FF5 A98F -2FF6 A990 -2FF7 A991 -2FF8 A992 -2FF9 A993 -2FFA A994 -2FFB A995 -2FFC 8139A332 -2FFD 8139A333 -2FFE 8139A334 -2FFF 8139A335 -3000 A1A1 -3001 A1A2 -3002 A1A3 -3003 A1A8 -3004 8139A336 -3005 A1A9 -3006 A965 -3007 A996 -3008 A1B4 -3009 A1B5 -300A A1B6 -300B A1B7 -300C A1B8 -300D A1B9 -300E A1BA -300F A1BB -3010 A1BE -3011 A1BF -3012 A893 -3013 A1FE -3014 A1B2 -3015 A1B3 -3016 A1BC -3017 A1BD -3018 8139A337 -3019 8139A338 -301A 8139A339 -301B 8139A430 -301C 8139A431 -301D A894 -301E A895 -301F 8139A432 -3020 8139A433 -3021 A940 -3022 A941 -3023 A942 -3024 A943 -3025 A944 -3026 A945 -3027 A946 -3028 A947 -3029 A948 -302A 8139A434 -302B 8139A435 -302C 8139A436 -302D 8139A437 -302E 8139A438 -302F 8139A439 -3030 8139A530 -3031 8139A531 -3032 8139A532 -3033 8139A533 -3034 8139A534 -3035 8139A535 -3036 8139A536 -3037 8139A537 -3038 8139A538 -3039 8139A539 -303A 8139A630 -303B 8139A631 -303C 8139A632 -303D 8139A633 -303E A989 -303F 8139A634 -3040 8139A635 -3041 A4A1 -3042 A4A2 -3043 A4A3 -3044 A4A4 -3045 A4A5 -3046 A4A6 -3047 A4A7 -3048 A4A8 -3049 A4A9 -304A A4AA -304B A4AB -304C A4AC -304D A4AD -304E A4AE -304F A4AF -3050 A4B0 -3051 A4B1 -3052 A4B2 -3053 A4B3 -3054 A4B4 -3055 A4B5 -3056 A4B6 -3057 A4B7 -3058 A4B8 -3059 A4B9 -305A A4BA -305B A4BB -305C A4BC -305D A4BD -305E A4BE -305F A4BF -3060 A4C0 -3061 A4C1 -3062 A4C2 -3063 A4C3 -3064 A4C4 -3065 A4C5 -3066 A4C6 -3067 A4C7 -3068 A4C8 -3069 A4C9 -306A A4CA -306B A4CB -306C A4CC -306D A4CD -306E A4CE -306F A4CF -3070 A4D0 -3071 A4D1 -3072 A4D2 -3073 A4D3 -3074 A4D4 -3075 A4D5 -3076 A4D6 -3077 A4D7 -3078 A4D8 -3079 A4D9 -307A A4DA -307B A4DB -307C A4DC -307D A4DD -307E A4DE -307F A4DF -3080 A4E0 -3081 A4E1 -3082 A4E2 -3083 A4E3 -3084 A4E4 -3085 A4E5 -3086 A4E6 -3087 A4E7 -3088 A4E8 -3089 A4E9 -308A A4EA -308B A4EB -308C A4EC -308D A4ED -308E A4EE -308F A4EF -3090 A4F0 -3091 A4F1 -3092 A4F2 -3093 A4F3 -3094 8139A636 -3095 8139A637 -3096 8139A638 -3097 8139A639 -3098 8139A730 -3099 8139A731 -309A 8139A732 -309B A961 -309C A962 -309D A966 -309E A967 -309F 8139A733 -30A0 8139A734 -30A1 A5A1 -30A2 A5A2 -30A3 A5A3 -30A4 A5A4 -30A5 A5A5 -30A6 A5A6 -30A7 A5A7 -30A8 A5A8 -30A9 A5A9 -30AA A5AA -30AB A5AB -30AC A5AC -30AD A5AD -30AE A5AE -30AF A5AF -30B0 A5B0 -30B1 A5B1 -30B2 A5B2 -30B3 A5B3 -30B4 A5B4 -30B5 A5B5 -30B6 A5B6 -30B7 A5B7 -30B8 A5B8 -30B9 A5B9 -30BA A5BA -30BB A5BB -30BC A5BC -30BD A5BD -30BE A5BE -30BF A5BF -30C0 A5C0 -30C1 A5C1 -30C2 A5C2 -30C3 A5C3 -30C4 A5C4 -30C5 A5C5 -30C6 A5C6 -30C7 A5C7 -30C8 A5C8 -30C9 A5C9 -30CA A5CA -30CB A5CB -30CC A5CC -30CD A5CD -30CE A5CE -30CF A5CF -30D0 A5D0 -30D1 A5D1 -30D2 A5D2 -30D3 A5D3 -30D4 A5D4 -30D5 A5D5 -30D6 A5D6 -30D7 A5D7 -30D8 A5D8 -30D9 A5D9 -30DA A5DA -30DB A5DB -30DC A5DC -30DD A5DD -30DE A5DE -30DF A5DF -30E0 A5E0 -30E1 A5E1 -30E2 A5E2 -30E3 A5E3 -30E4 A5E4 -30E5 A5E5 -30E6 A5E6 -30E7 A5E7 -30E8 A5E8 -30E9 A5E9 -30EA A5EA -30EB A5EB -30EC A5EC -30ED A5ED -30EE A5EE -30EF A5EF -30F0 A5F0 -30F1 A5F1 -30F2 A5F2 -30F3 A5F3 -30F4 A5F4 -30F5 A5F5 -30F6 A5F6 -30F7 8139A735 -30F8 8139A736 -30F9 8139A737 -30FA 8139A738 -30FB 8139A739 -30FC A960 -30FD A963 -30FE A964 -30FF 8139A830 -3100 8139A831 -3101 8139A832 -3102 8139A833 -3103 8139A834 -3104 8139A835 -3105 A8C5 -3106 A8C6 -3107 A8C7 -3108 A8C8 -3109 A8C9 -310A A8CA -310B A8CB -310C A8CC -310D A8CD -310E A8CE -310F A8CF -3110 A8D0 -3111 A8D1 -3112 A8D2 -3113 A8D3 -3114 A8D4 -3115 A8D5 -3116 A8D6 -3117 A8D7 -3118 A8D8 -3119 A8D9 -311A A8DA -311B A8DB -311C A8DC -311D A8DD -311E A8DE -311F A8DF -3120 A8E0 -3121 A8E1 -3122 A8E2 -3123 A8E3 -3124 A8E4 -3125 A8E5 -3126 A8E6 -3127 A8E7 -3128 A8E8 -3129 A8E9 -312A 8139A836 -312B 8139A837 -312C 8139A838 -312D 8139A839 -312E 8139A930 -312F 8139A931 -3130 8139A932 -3131 8139A933 -3132 8139A934 -3133 8139A935 -3134 8139A936 -3135 8139A937 -3136 8139A938 -3137 8139A939 -3138 8139AA30 -3139 8139AA31 -313A 8139AA32 -313B 8139AA33 -313C 8139AA34 -313D 8139AA35 -313E 8139AA36 -313F 8139AA37 -3140 8139AA38 -3141 8139AA39 -3142 8139AB30 -3143 8139AB31 -3144 8139AB32 -3145 8139AB33 -3146 8139AB34 -3147 8139AB35 -3148 8139AB36 -3149 8139AB37 -314A 8139AB38 -314B 8139AB39 -314C 8139AC30 -314D 8139AC31 -314E 8139AC32 -314F 8139AC33 -3150 8139AC34 -3151 8139AC35 -3152 8139AC36 -3153 8139AC37 -3154 8139AC38 -3155 8139AC39 -3156 8139AD30 -3157 8139AD31 -3158 8139AD32 -3159 8139AD33 -315A 8139AD34 -315B 8139AD35 -315C 8139AD36 -315D 8139AD37 -315E 8139AD38 -315F 8139AD39 -3160 8139AE30 -3161 8139AE31 -3162 8139AE32 -3163 8139AE33 -3164 8139AE34 -3165 8139AE35 -3166 8139AE36 -3167 8139AE37 -3168 8139AE38 -3169 8139AE39 -316A 8139AF30 -316B 8139AF31 -316C 8139AF32 -316D 8139AF33 -316E 8139AF34 -316F 8139AF35 -3170 8139AF36 -3171 8139AF37 -3172 8139AF38 -3173 8139AF39 -3174 8139B030 -3175 8139B031 -3176 8139B032 -3177 8139B033 -3178 8139B034 -3179 8139B035 -317A 8139B036 -317B 8139B037 -317C 8139B038 -317D 8139B039 -317E 8139B130 -317F 8139B131 -3180 8139B132 -3181 8139B133 -3182 8139B134 -3183 8139B135 -3184 8139B136 -3185 8139B137 -3186 8139B138 -3187 8139B139 -3188 8139B230 -3189 8139B231 -318A 8139B232 -318B 8139B233 -318C 8139B234 -318D 8139B235 -318E 8139B236 -318F 8139B237 -3190 8139B238 -3191 8139B239 -3192 8139B330 -3193 8139B331 -3194 8139B332 -3195 8139B333 -3196 8139B334 -3197 8139B335 -3198 8139B336 -3199 8139B337 -319A 8139B338 -319B 8139B339 -319C 8139B430 -319D 8139B431 -319E 8139B432 -319F 8139B433 -31A0 8139B434 -31A1 8139B435 -31A2 8139B436 -31A3 8139B437 -31A4 8139B438 -31A5 8139B439 -31A6 8139B530 -31A7 8139B531 -31A8 8139B532 -31A9 8139B533 -31AA 8139B534 -31AB 8139B535 -31AC 8139B536 -31AD 8139B537 -31AE 8139B538 -31AF 8139B539 -31B0 8139B630 -31B1 8139B631 -31B2 8139B632 -31B3 8139B633 -31B4 8139B634 -31B5 8139B635 -31B6 8139B636 -31B7 8139B637 -31B8 8139B638 -31B9 8139B639 -31BA 8139B730 -31BB 8139B731 -31BC 8139B732 -31BD 8139B733 -31BE 8139B734 -31BF 8139B735 -31C0 8139B736 -31C1 8139B737 -31C2 8139B738 -31C3 8139B739 -31C4 8139B830 -31C5 8139B831 -31C6 8139B832 -31C7 8139B833 -31C8 8139B834 -31C9 8139B835 -31CA 8139B836 -31CB 8139B837 -31CC 8139B838 -31CD 8139B839 -31CE 8139B930 -31CF 8139B931 -31D0 8139B932 -31D1 8139B933 -31D2 8139B934 -31D3 8139B935 -31D4 8139B936 -31D5 8139B937 -31D6 8139B938 -31D7 8139B939 -31D8 8139BA30 -31D9 8139BA31 -31DA 8139BA32 -31DB 8139BA33 -31DC 8139BA34 -31DD 8139BA35 -31DE 8139BA36 -31DF 8139BA37 -31E0 8139BA38 -31E1 8139BA39 -31E2 8139BB30 -31E3 8139BB31 -31E4 8139BB32 -31E5 8139BB33 -31E6 8139BB34 -31E7 8139BB35 -31E8 8139BB36 -31E9 8139BB37 -31EA 8139BB38 -31EB 8139BB39 -31EC 8139BC30 -31ED 8139BC31 -31EE 8139BC32 -31EF 8139BC33 -31F0 8139BC34 -31F1 8139BC35 -31F2 8139BC36 -31F3 8139BC37 -31F4 8139BC38 -31F5 8139BC39 -31F6 8139BD30 -31F7 8139BD31 -31F8 8139BD32 -31F9 8139BD33 -31FA 8139BD34 -31FB 8139BD35 -31FC 8139BD36 -31FD 8139BD37 -31FE 8139BD38 -31FF 8139BD39 -3200 8139BE30 -3201 8139BE31 -3202 8139BE32 -3203 8139BE33 -3204 8139BE34 -3205 8139BE35 -3206 8139BE36 -3207 8139BE37 -3208 8139BE38 -3209 8139BE39 -320A 8139BF30 -320B 8139BF31 -320C 8139BF32 -320D 8139BF33 -320E 8139BF34 -320F 8139BF35 -3210 8139BF36 -3211 8139BF37 -3212 8139BF38 -3213 8139BF39 -3214 8139C030 -3215 8139C031 -3216 8139C032 -3217 8139C033 -3218 8139C034 -3219 8139C035 -321A 8139C036 -321B 8139C037 -321C 8139C038 -321D 8139C039 -321E 8139C130 -321F 8139C131 -3220 A2E5 -3221 A2E6 -3222 A2E7 -3223 A2E8 -3224 A2E9 -3225 A2EA -3226 A2EB -3227 A2EC -3228 A2ED -3229 A2EE -322A 8139C132 -322B 8139C133 -322C 8139C134 -322D 8139C135 -322E 8139C136 -322F 8139C137 -3230 8139C138 -3231 A95A -3232 8139C139 -3233 8139C230 -3234 8139C231 -3235 8139C232 -3236 8139C233 -3237 8139C234 -3238 8139C235 -3239 8139C236 -323A 8139C237 -323B 8139C238 -323C 8139C239 -323D 8139C330 -323E 8139C331 -323F 8139C332 -3240 8139C333 -3241 8139C334 -3242 8139C335 -3243 8139C336 -3244 8139C337 -3245 8139C338 -3246 8139C339 -3247 8139C430 -3248 8139C431 -3249 8139C432 -324A 8139C433 -324B 8139C434 -324C 8139C435 -324D 8139C436 -324E 8139C437 -324F 8139C438 -3250 8139C439 -3251 8139C530 -3252 8139C531 -3253 8139C532 -3254 8139C533 -3255 8139C534 -3256 8139C535 -3257 8139C536 -3258 8139C537 -3259 8139C538 -325A 8139C539 -325B 8139C630 -325C 8139C631 -325D 8139C632 -325E 8139C633 -325F 8139C634 -3260 8139C635 -3261 8139C636 -3262 8139C637 -3263 8139C638 -3264 8139C639 -3265 8139C730 -3266 8139C731 -3267 8139C732 -3268 8139C733 -3269 8139C734 -326A 8139C735 -326B 8139C736 -326C 8139C737 -326D 8139C738 -326E 8139C739 -326F 8139C830 -3270 8139C831 -3271 8139C832 -3272 8139C833 -3273 8139C834 -3274 8139C835 -3275 8139C836 -3276 8139C837 -3277 8139C838 -3278 8139C839 -3279 8139C930 -327A 8139C931 -327B 8139C932 -327C 8139C933 -327D 8139C934 -327E 8139C935 -327F 8139C936 -3280 8139C937 -3281 8139C938 -3282 8139C939 -3283 8139CA30 -3284 8139CA31 -3285 8139CA32 -3286 8139CA33 -3287 8139CA34 -3288 8139CA35 -3289 8139CA36 -328A 8139CA37 -328B 8139CA38 -328C 8139CA39 -328D 8139CB30 -328E 8139CB31 -328F 8139CB32 -3290 8139CB33 -3291 8139CB34 -3292 8139CB35 -3293 8139CB36 -3294 8139CB37 -3295 8139CB38 -3296 8139CB39 -3297 8139CC30 -3298 8139CC31 -3299 8139CC32 -329A 8139CC33 -329B 8139CC34 -329C 8139CC35 -329D 8139CC36 -329E 8139CC37 -329F 8139CC38 -32A0 8139CC39 -32A1 8139CD30 -32A2 8139CD31 -32A3 A949 -32A4 8139CD32 -32A5 8139CD33 -32A6 8139CD34 -32A7 8139CD35 -32A8 8139CD36 -32A9 8139CD37 -32AA 8139CD38 -32AB 8139CD39 -32AC 8139CE30 -32AD 8139CE31 -32AE 8139CE32 -32AF 8139CE33 -32B0 8139CE34 -32B1 8139CE35 -32B2 8139CE36 -32B3 8139CE37 -32B4 8139CE38 -32B5 8139CE39 -32B6 8139CF30 -32B7 8139CF31 -32B8 8139CF32 -32B9 8139CF33 -32BA 8139CF34 -32BB 8139CF35 -32BC 8139CF36 -32BD 8139CF37 -32BE 8139CF38 -32BF 8139CF39 -32C0 8139D030 -32C1 8139D031 -32C2 8139D032 -32C3 8139D033 -32C4 8139D034 -32C5 8139D035 -32C6 8139D036 -32C7 8139D037 -32C8 8139D038 -32C9 8139D039 -32CA 8139D130 -32CB 8139D131 -32CC 8139D132 -32CD 8139D133 -32CE 8139D134 -32CF 8139D135 -32D0 8139D136 -32D1 8139D137 -32D2 8139D138 -32D3 8139D139 -32D4 8139D230 -32D5 8139D231 -32D6 8139D232 -32D7 8139D233 -32D8 8139D234 -32D9 8139D235 -32DA 8139D236 -32DB 8139D237 -32DC 8139D238 -32DD 8139D239 -32DE 8139D330 -32DF 8139D331 -32E0 8139D332 -32E1 8139D333 -32E2 8139D334 -32E3 8139D335 -32E4 8139D336 -32E5 8139D337 -32E6 8139D338 -32E7 8139D339 -32E8 8139D430 -32E9 8139D431 -32EA 8139D432 -32EB 8139D433 -32EC 8139D434 -32ED 8139D435 -32EE 8139D436 -32EF 8139D437 -32F0 8139D438 -32F1 8139D439 -32F2 8139D530 -32F3 8139D531 -32F4 8139D532 -32F5 8139D533 -32F6 8139D534 -32F7 8139D535 -32F8 8139D536 -32F9 8139D537 -32FA 8139D538 -32FB 8139D539 -32FC 8139D630 -32FD 8139D631 -32FE 8139D632 -32FF 8139D633 -3300 8139D634 -3301 8139D635 -3302 8139D636 -3303 8139D637 -3304 8139D638 -3305 8139D639 -3306 8139D730 -3307 8139D731 -3308 8139D732 -3309 8139D733 -330A 8139D734 -330B 8139D735 -330C 8139D736 -330D 8139D737 -330E 8139D738 -330F 8139D739 -3310 8139D830 -3311 8139D831 -3312 8139D832 -3313 8139D833 -3314 8139D834 -3315 8139D835 -3316 8139D836 -3317 8139D837 -3318 8139D838 -3319 8139D839 -331A 8139D930 -331B 8139D931 -331C 8139D932 -331D 8139D933 -331E 8139D934 -331F 8139D935 -3320 8139D936 -3321 8139D937 -3322 8139D938 -3323 8139D939 -3324 8139DA30 -3325 8139DA31 -3326 8139DA32 -3327 8139DA33 -3328 8139DA34 -3329 8139DA35 -332A 8139DA36 -332B 8139DA37 -332C 8139DA38 -332D 8139DA39 -332E 8139DB30 -332F 8139DB31 -3330 8139DB32 -3331 8139DB33 -3332 8139DB34 -3333 8139DB35 -3334 8139DB36 -3335 8139DB37 -3336 8139DB38 -3337 8139DB39 -3338 8139DC30 -3339 8139DC31 -333A 8139DC32 -333B 8139DC33 -333C 8139DC34 -333D 8139DC35 -333E 8139DC36 -333F 8139DC37 -3340 8139DC38 -3341 8139DC39 -3342 8139DD30 -3343 8139DD31 -3344 8139DD32 -3345 8139DD33 -3346 8139DD34 -3347 8139DD35 -3348 8139DD36 -3349 8139DD37 -334A 8139DD38 -334B 8139DD39 -334C 8139DE30 -334D 8139DE31 -334E 8139DE32 -334F 8139DE33 -3350 8139DE34 -3351 8139DE35 -3352 8139DE36 -3353 8139DE37 -3354 8139DE38 -3355 8139DE39 -3356 8139DF30 -3357 8139DF31 -3358 8139DF32 -3359 8139DF33 -335A 8139DF34 -335B 8139DF35 -335C 8139DF36 -335D 8139DF37 -335E 8139DF38 -335F 8139DF39 -3360 8139E030 -3361 8139E031 -3362 8139E032 -3363 8139E033 -3364 8139E034 -3365 8139E035 -3366 8139E036 -3367 8139E037 -3368 8139E038 -3369 8139E039 -336A 8139E130 -336B 8139E131 -336C 8139E132 -336D 8139E133 -336E 8139E134 -336F 8139E135 -3370 8139E136 -3371 8139E137 -3372 8139E138 -3373 8139E139 -3374 8139E230 -3375 8139E231 -3376 8139E232 -3377 8139E233 -3378 8139E234 -3379 8139E235 -337A 8139E236 -337B 8139E237 -337C 8139E238 -337D 8139E239 -337E 8139E330 -337F 8139E331 -3380 8139E332 -3381 8139E333 -3382 8139E334 -3383 8139E335 -3384 8139E336 -3385 8139E337 -3386 8139E338 -3387 8139E339 -3388 8139E430 -3389 8139E431 -338A 8139E432 -338B 8139E433 -338C 8139E434 -338D 8139E435 -338E A94A -338F A94B -3390 8139E436 -3391 8139E437 -3392 8139E438 -3393 8139E439 -3394 8139E530 -3395 8139E531 -3396 8139E532 -3397 8139E533 -3398 8139E534 -3399 8139E535 -339A 8139E536 -339B 8139E537 -339C A94C -339D A94D -339E A94E -339F 8139E538 -33A0 8139E539 -33A1 A94F -33A2 8139E630 -33A3 8139E631 -33A4 8139E632 -33A5 8139E633 -33A6 8139E634 -33A7 8139E635 -33A8 8139E636 -33A9 8139E637 -33AA 8139E638 -33AB 8139E639 -33AC 8139E730 -33AD 8139E731 -33AE 8139E732 -33AF 8139E733 -33B0 8139E734 -33B1 8139E735 -33B2 8139E736 -33B3 8139E737 -33B4 8139E738 -33B5 8139E739 -33B6 8139E830 -33B7 8139E831 -33B8 8139E832 -33B9 8139E833 -33BA 8139E834 -33BB 8139E835 -33BC 8139E836 -33BD 8139E837 -33BE 8139E838 -33BF 8139E839 -33C0 8139E930 -33C1 8139E931 -33C2 8139E932 -33C3 8139E933 -33C4 A950 -33C5 8139E934 -33C6 8139E935 -33C7 8139E936 -33C8 8139E937 -33C9 8139E938 -33CA 8139E939 -33CB 8139EA30 -33CC 8139EA31 -33CD 8139EA32 -33CE A951 -33CF 8139EA33 -33D0 8139EA34 -33D1 A952 -33D2 A953 -33D3 8139EA35 -33D4 8139EA36 -33D5 A954 -33D6 8139EA37 -33D7 8139EA38 -33D8 8139EA39 -33D9 8139EB30 -33DA 8139EB31 -33DB 8139EB32 -33DC 8139EB33 -33DD 8139EB34 -33DE 8139EB35 -33DF 8139EB36 -33E0 8139EB37 -33E1 8139EB38 -33E2 8139EB39 -33E3 8139EC30 -33E4 8139EC31 -33E5 8139EC32 -33E6 8139EC33 -33E7 8139EC34 -33E8 8139EC35 -33E9 8139EC36 -33EA 8139EC37 -33EB 8139EC38 -33EC 8139EC39 -33ED 8139ED30 -33EE 8139ED31 -33EF 8139ED32 -33F0 8139ED33 -33F1 8139ED34 -33F2 8139ED35 -33F3 8139ED36 -33F4 8139ED37 -33F5 8139ED38 -33F6 8139ED39 -33F7 8139EE30 -33F8 8139EE31 -33F9 8139EE32 -33FA 8139EE33 -33FB 8139EE34 -33FC 8139EE35 -33FD 8139EE36 -33FE 8139EE37 -33FF 8139EE38 -3400 8139EE39 -3401 8139EF30 -3402 8139EF31 -3403 8139EF32 -3404 8139EF33 -3405 8139EF34 -3406 8139EF35 -3407 8139EF36 -3408 8139EF37 -3409 8139EF38 -340A 8139EF39 -340B 8139F030 -340C 8139F031 -340D 8139F032 -340E 8139F033 -340F 8139F034 -3410 8139F035 -3411 8139F036 -3412 8139F037 -3413 8139F038 -3414 8139F039 -3415 8139F130 -3416 8139F131 -3417 8139F132 -3418 8139F133 -3419 8139F134 -341A 8139F135 -341B 8139F136 -341C 8139F137 -341D 8139F138 -341E 8139F139 -341F 8139F230 -3420 8139F231 -3421 8139F232 -3422 8139F233 -3423 8139F234 -3424 8139F235 -3425 8139F236 -3426 8139F237 -3427 8139F238 -3428 8139F239 -3429 8139F330 -342A 8139F331 -342B 8139F332 -342C 8139F333 -342D 8139F334 -342E 8139F335 -342F 8139F336 -3430 8139F337 -3431 8139F338 -3432 8139F339 -3433 8139F430 -3434 8139F431 -3435 8139F432 -3436 8139F433 -3437 8139F434 -3438 8139F435 -3439 8139F436 -343A 8139F437 -343B 8139F438 -343C 8139F439 -343D 8139F530 -343E 8139F531 -343F 8139F532 -3440 8139F533 -3441 8139F534 -3442 8139F535 -3443 8139F536 -3444 8139F537 -3445 8139F538 -3446 8139F539 -3447 FE56 -3448 8139F630 -3449 8139F631 -344A 8139F632 -344B 8139F633 -344C 8139F634 -344D 8139F635 -344E 8139F636 -344F 8139F637 -3450 8139F638 -3451 8139F639 -3452 8139F730 -3453 8139F731 -3454 8139F732 -3455 8139F733 -3456 8139F734 -3457 8139F735 -3458 8139F736 -3459 8139F737 -345A 8139F738 -345B 8139F739 -345C 8139F830 -345D 8139F831 -345E 8139F832 -345F 8139F833 -3460 8139F834 -3461 8139F835 -3462 8139F836 -3463 8139F837 -3464 8139F838 -3465 8139F839 -3466 8139F930 -3467 8139F931 -3468 8139F932 -3469 8139F933 -346A 8139F934 -346B 8139F935 -346C 8139F936 -346D 8139F937 -346E 8139F938 -346F 8139F939 -3470 8139FA30 -3471 8139FA31 -3472 8139FA32 -3473 FE55 -3474 8139FA33 -3475 8139FA34 -3476 8139FA35 -3477 8139FA36 -3478 8139FA37 -3479 8139FA38 -347A 8139FA39 -347B 8139FB30 -347C 8139FB31 -347D 8139FB32 -347E 8139FB33 -347F 8139FB34 -3480 8139FB35 -3481 8139FB36 -3482 8139FB37 -3483 8139FB38 -3484 8139FB39 -3485 8139FC30 -3486 8139FC31 -3487 8139FC32 -3488 8139FC33 -3489 8139FC34 -348A 8139FC35 -348B 8139FC36 -348C 8139FC37 -348D 8139FC38 -348E 8139FC39 -348F 8139FD30 -3490 8139FD31 -3491 8139FD32 -3492 8139FD33 -3493 8139FD34 -3494 8139FD35 -3495 8139FD36 -3496 8139FD37 -3497 8139FD38 -3498 8139FD39 -3499 8139FE30 -349A 8139FE31 -349B 8139FE32 -349C 8139FE33 -349D 8139FE34 -349E 8139FE35 -349F 8139FE36 -34A0 8139FE37 -34A1 8139FE38 -34A2 8139FE39 -34A3 82308130 -34A4 82308131 -34A5 82308132 -34A6 82308133 -34A7 82308134 -34A8 82308135 -34A9 82308136 -34AA 82308137 -34AB 82308138 -34AC 82308139 -34AD 82308230 -34AE 82308231 -34AF 82308232 -34B0 82308233 -34B1 82308234 -34B2 82308235 -34B3 82308236 -34B4 82308237 -34B5 82308238 -34B6 82308239 -34B7 82308330 -34B8 82308331 -34B9 82308332 -34BA 82308333 -34BB 82308334 -34BC 82308335 -34BD 82308336 -34BE 82308337 -34BF 82308338 -34C0 82308339 -34C1 82308430 -34C2 82308431 -34C3 82308432 -34C4 82308433 -34C5 82308434 -34C6 82308435 -34C7 82308436 -34C8 82308437 -34C9 82308438 -34CA 82308439 -34CB 82308530 -34CC 82308531 -34CD 82308532 -34CE 82308533 -34CF 82308534 -34D0 82308535 -34D1 82308536 -34D2 82308537 -34D3 82308538 -34D4 82308539 -34D5 82308630 -34D6 82308631 -34D7 82308632 -34D8 82308633 -34D9 82308634 -34DA 82308635 -34DB 82308636 -34DC 82308637 -34DD 82308638 -34DE 82308639 -34DF 82308730 -34E0 82308731 -34E1 82308732 -34E2 82308733 -34E3 82308734 -34E4 82308735 -34E5 82308736 -34E6 82308737 -34E7 82308738 -34E8 82308739 -34E9 82308830 -34EA 82308831 -34EB 82308832 -34EC 82308833 -34ED 82308834 -34EE 82308835 -34EF 82308836 -34F0 82308837 -34F1 82308838 -34F2 82308839 -34F3 82308930 -34F4 82308931 -34F5 82308932 -34F6 82308933 -34F7 82308934 -34F8 82308935 -34F9 82308936 -34FA 82308937 -34FB 82308938 -34FC 82308939 -34FD 82308A30 -34FE 82308A31 -34FF 82308A32 -3500 82308A33 -3501 82308A34 -3502 82308A35 -3503 82308A36 -3504 82308A37 -3505 82308A38 -3506 82308A39 -3507 82308B30 -3508 82308B31 -3509 82308B32 -350A 82308B33 -350B 82308B34 -350C 82308B35 -350D 82308B36 -350E 82308B37 -350F 82308B38 -3510 82308B39 -3511 82308C30 -3512 82308C31 -3513 82308C32 -3514 82308C33 -3515 82308C34 -3516 82308C35 -3517 82308C36 -3518 82308C37 -3519 82308C38 -351A 82308C39 -351B 82308D30 -351C 82308D31 -351D 82308D32 -351E 82308D33 -351F 82308D34 -3520 82308D35 -3521 82308D36 -3522 82308D37 -3523 82308D38 -3524 82308D39 -3525 82308E30 -3526 82308E31 -3527 82308E32 -3528 82308E33 -3529 82308E34 -352A 82308E35 -352B 82308E36 -352C 82308E37 -352D 82308E38 -352E 82308E39 -352F 82308F30 -3530 82308F31 -3531 82308F32 -3532 82308F33 -3533 82308F34 -3534 82308F35 -3535 82308F36 -3536 82308F37 -3537 82308F38 -3538 82308F39 -3539 82309030 -353A 82309031 -353B 82309032 -353C 82309033 -353D 82309034 -353E 82309035 -353F 82309036 -3540 82309037 -3541 82309038 -3542 82309039 -3543 82309130 -3544 82309131 -3545 82309132 -3546 82309133 -3547 82309134 -3548 82309135 -3549 82309136 -354A 82309137 -354B 82309138 -354C 82309139 -354D 82309230 -354E 82309231 -354F 82309232 -3550 82309233 -3551 82309234 -3552 82309235 -3553 82309236 -3554 82309237 -3555 82309238 -3556 82309239 -3557 82309330 -3558 82309331 -3559 82309332 -355A 82309333 -355B 82309334 -355C 82309335 -355D 82309336 -355E 82309337 -355F 82309338 -3560 82309339 -3561 82309430 -3562 82309431 -3563 82309432 -3564 82309433 -3565 82309434 -3566 82309435 -3567 82309436 -3568 82309437 -3569 82309438 -356A 82309439 -356B 82309530 -356C 82309531 -356D 82309532 -356E 82309533 -356F 82309534 -3570 82309535 -3571 82309536 -3572 82309537 -3573 82309538 -3574 82309539 -3575 82309630 -3576 82309631 -3577 82309632 -3578 82309633 -3579 82309634 -357A 82309635 -357B 82309636 -357C 82309637 -357D 82309638 -357E 82309639 -357F 82309730 -3580 82309731 -3581 82309732 -3582 82309733 -3583 82309734 -3584 82309735 -3585 82309736 -3586 82309737 -3587 82309738 -3588 82309739 -3589 82309830 -358A 82309831 -358B 82309832 -358C 82309833 -358D 82309834 -358E 82309835 -358F 82309836 -3590 82309837 -3591 82309838 -3592 82309839 -3593 82309930 -3594 82309931 -3595 82309932 -3596 82309933 -3597 82309934 -3598 82309935 -3599 82309936 -359A 82309937 -359B 82309938 -359C 82309939 -359D 82309A30 -359E FE5A -359F 82309A31 -35A0 82309A32 -35A1 82309A33 -35A2 82309A34 -35A3 82309A35 -35A4 82309A36 -35A5 82309A37 -35A6 82309A38 -35A7 82309A39 -35A8 82309B30 -35A9 82309B31 -35AA 82309B32 -35AB 82309B33 -35AC 82309B34 -35AD 82309B35 -35AE 82309B36 -35AF 82309B37 -35B0 82309B38 -35B1 82309B39 -35B2 82309C30 -35B3 82309C31 -35B4 82309C32 -35B5 82309C33 -35B6 82309C34 -35B7 82309C35 -35B8 82309C36 -35B9 82309C37 -35BA 82309C38 -35BB 82309C39 -35BC 82309D30 -35BD 82309D31 -35BE 82309D32 -35BF 82309D33 -35C0 82309D34 -35C1 82309D35 -35C2 82309D36 -35C3 82309D37 -35C4 82309D38 -35C5 82309D39 -35C6 82309E30 -35C7 82309E31 -35C8 82309E32 -35C9 82309E33 -35CA 82309E34 -35CB 82309E35 -35CC 82309E36 -35CD 82309E37 -35CE 82309E38 -35CF 82309E39 -35D0 82309F30 -35D1 82309F31 -35D2 82309F32 -35D3 82309F33 -35D4 82309F34 -35D5 82309F35 -35D6 82309F36 -35D7 82309F37 -35D8 82309F38 -35D9 82309F39 -35DA 8230A030 -35DB 8230A031 -35DC 8230A032 -35DD 8230A033 -35DE 8230A034 -35DF 8230A035 -35E0 8230A036 -35E1 8230A037 -35E2 8230A038 -35E3 8230A039 -35E4 8230A130 -35E5 8230A131 -35E6 8230A132 -35E7 8230A133 -35E8 8230A134 -35E9 8230A135 -35EA 8230A136 -35EB 8230A137 -35EC 8230A138 -35ED 8230A139 -35EE 8230A230 -35EF 8230A231 -35F0 8230A232 -35F1 8230A233 -35F2 8230A234 -35F3 8230A235 -35F4 8230A236 -35F5 8230A237 -35F6 8230A238 -35F7 8230A239 -35F8 8230A330 -35F9 8230A331 -35FA 8230A332 -35FB 8230A333 -35FC 8230A334 -35FD 8230A335 -35FE 8230A336 -35FF 8230A337 -3600 8230A338 -3601 8230A339 -3602 8230A430 -3603 8230A431 -3604 8230A432 -3605 8230A433 -3606 8230A434 -3607 8230A435 -3608 8230A436 -3609 8230A437 -360A 8230A438 -360B 8230A439 -360C 8230A530 -360D 8230A531 -360E FE5C -360F 8230A532 -3610 8230A533 -3611 8230A534 -3612 8230A535 -3613 8230A536 -3614 8230A537 -3615 8230A538 -3616 8230A539 -3617 8230A630 -3618 8230A631 -3619 8230A632 -361A FE5B -361B 8230A633 -361C 8230A634 -361D 8230A635 -361E 8230A636 -361F 8230A637 -3620 8230A638 -3621 8230A639 -3622 8230A730 -3623 8230A731 -3624 8230A732 -3625 8230A733 -3626 8230A734 -3627 8230A735 -3628 8230A736 -3629 8230A737 -362A 8230A738 -362B 8230A739 -362C 8230A830 -362D 8230A831 -362E 8230A832 -362F 8230A833 -3630 8230A834 -3631 8230A835 -3632 8230A836 -3633 8230A837 -3634 8230A838 -3635 8230A839 -3636 8230A930 -3637 8230A931 -3638 8230A932 -3639 8230A933 -363A 8230A934 -363B 8230A935 -363C 8230A936 -363D 8230A937 -363E 8230A938 -363F 8230A939 -3640 8230AA30 -3641 8230AA31 -3642 8230AA32 -3643 8230AA33 -3644 8230AA34 -3645 8230AA35 -3646 8230AA36 -3647 8230AA37 -3648 8230AA38 -3649 8230AA39 -364A 8230AB30 -364B 8230AB31 -364C 8230AB32 -364D 8230AB33 -364E 8230AB34 -364F 8230AB35 -3650 8230AB36 -3651 8230AB37 -3652 8230AB38 -3653 8230AB39 -3654 8230AC30 -3655 8230AC31 -3656 8230AC32 -3657 8230AC33 -3658 8230AC34 -3659 8230AC35 -365A 8230AC36 -365B 8230AC37 -365C 8230AC38 -365D 8230AC39 -365E 8230AD30 -365F 8230AD31 -3660 8230AD32 -3661 8230AD33 -3662 8230AD34 -3663 8230AD35 -3664 8230AD36 -3665 8230AD37 -3666 8230AD38 -3667 8230AD39 -3668 8230AE30 -3669 8230AE31 -366A 8230AE32 -366B 8230AE33 -366C 8230AE34 -366D 8230AE35 -366E 8230AE36 -366F 8230AE37 -3670 8230AE38 -3671 8230AE39 -3672 8230AF30 -3673 8230AF31 -3674 8230AF32 -3675 8230AF33 -3676 8230AF34 -3677 8230AF35 -3678 8230AF36 -3679 8230AF37 -367A 8230AF38 -367B 8230AF39 -367C 8230B030 -367D 8230B031 -367E 8230B032 -367F 8230B033 -3680 8230B034 -3681 8230B035 -3682 8230B036 -3683 8230B037 -3684 8230B038 -3685 8230B039 -3686 8230B130 -3687 8230B131 -3688 8230B132 -3689 8230B133 -368A 8230B134 -368B 8230B135 -368C 8230B136 -368D 8230B137 -368E 8230B138 -368F 8230B139 -3690 8230B230 -3691 8230B231 -3692 8230B232 -3693 8230B233 -3694 8230B234 -3695 8230B235 -3696 8230B236 -3697 8230B237 -3698 8230B238 -3699 8230B239 -369A 8230B330 -369B 8230B331 -369C 8230B332 -369D 8230B333 -369E 8230B334 -369F 8230B335 -36A0 8230B336 -36A1 8230B337 -36A2 8230B338 -36A3 8230B339 -36A4 8230B430 -36A5 8230B431 -36A6 8230B432 -36A7 8230B433 -36A8 8230B434 -36A9 8230B435 -36AA 8230B436 -36AB 8230B437 -36AC 8230B438 -36AD 8230B439 -36AE 8230B530 -36AF 8230B531 -36B0 8230B532 -36B1 8230B533 -36B2 8230B534 -36B3 8230B535 -36B4 8230B536 -36B5 8230B537 -36B6 8230B538 -36B7 8230B539 -36B8 8230B630 -36B9 8230B631 -36BA 8230B632 -36BB 8230B633 -36BC 8230B634 -36BD 8230B635 -36BE 8230B636 -36BF 8230B637 -36C0 8230B638 -36C1 8230B639 -36C2 8230B730 -36C3 8230B731 -36C4 8230B732 -36C5 8230B733 -36C6 8230B734 -36C7 8230B735 -36C8 8230B736 -36C9 8230B737 -36CA 8230B738 -36CB 8230B739 -36CC 8230B830 -36CD 8230B831 -36CE 8230B832 -36CF 8230B833 -36D0 8230B834 -36D1 8230B835 -36D2 8230B836 -36D3 8230B837 -36D4 8230B838 -36D5 8230B839 -36D6 8230B930 -36D7 8230B931 -36D8 8230B932 -36D9 8230B933 -36DA 8230B934 -36DB 8230B935 -36DC 8230B936 -36DD 8230B937 -36DE 8230B938 -36DF 8230B939 -36E0 8230BA30 -36E1 8230BA31 -36E2 8230BA32 -36E3 8230BA33 -36E4 8230BA34 -36E5 8230BA35 -36E6 8230BA36 -36E7 8230BA37 -36E8 8230BA38 -36E9 8230BA39 -36EA 8230BB30 -36EB 8230BB31 -36EC 8230BB32 -36ED 8230BB33 -36EE 8230BB34 -36EF 8230BB35 -36F0 8230BB36 -36F1 8230BB37 -36F2 8230BB38 -36F3 8230BB39 -36F4 8230BC30 -36F5 8230BC31 -36F6 8230BC32 -36F7 8230BC33 -36F8 8230BC34 -36F9 8230BC35 -36FA 8230BC36 -36FB 8230BC37 -36FC 8230BC38 -36FD 8230BC39 -36FE 8230BD30 -36FF 8230BD31 -3700 8230BD32 -3701 8230BD33 -3702 8230BD34 -3703 8230BD35 -3704 8230BD36 -3705 8230BD37 -3706 8230BD38 -3707 8230BD39 -3708 8230BE30 -3709 8230BE31 -370A 8230BE32 -370B 8230BE33 -370C 8230BE34 -370D 8230BE35 -370E 8230BE36 -370F 8230BE37 -3710 8230BE38 -3711 8230BE39 -3712 8230BF30 -3713 8230BF31 -3714 8230BF32 -3715 8230BF33 -3716 8230BF34 -3717 8230BF35 -3718 8230BF36 -3719 8230BF37 -371A 8230BF38 -371B 8230BF39 -371C 8230C030 -371D 8230C031 -371E 8230C032 -371F 8230C033 -3720 8230C034 -3721 8230C035 -3722 8230C036 -3723 8230C037 -3724 8230C038 -3725 8230C039 -3726 8230C130 -3727 8230C131 -3728 8230C132 -3729 8230C133 -372A 8230C134 -372B 8230C135 -372C 8230C136 -372D 8230C137 -372E 8230C138 -372F 8230C139 -3730 8230C230 -3731 8230C231 -3732 8230C232 -3733 8230C233 -3734 8230C234 -3735 8230C235 -3736 8230C236 -3737 8230C237 -3738 8230C238 -3739 8230C239 -373A 8230C330 -373B 8230C331 -373C 8230C332 -373D 8230C333 -373E 8230C334 -373F 8230C335 -3740 8230C336 -3741 8230C337 -3742 8230C338 -3743 8230C339 -3744 8230C430 -3745 8230C431 -3746 8230C432 -3747 8230C433 -3748 8230C434 -3749 8230C435 -374A 8230C436 -374B 8230C437 -374C 8230C438 -374D 8230C439 -374E 8230C530 -374F 8230C531 -3750 8230C532 -3751 8230C533 -3752 8230C534 -3753 8230C535 -3754 8230C536 -3755 8230C537 -3756 8230C538 -3757 8230C539 -3758 8230C630 -3759 8230C631 -375A 8230C632 -375B 8230C633 -375C 8230C634 -375D 8230C635 -375E 8230C636 -375F 8230C637 -3760 8230C638 -3761 8230C639 -3762 8230C730 -3763 8230C731 -3764 8230C732 -3765 8230C733 -3766 8230C734 -3767 8230C735 -3768 8230C736 -3769 8230C737 -376A 8230C738 -376B 8230C739 -376C 8230C830 -376D 8230C831 -376E 8230C832 -376F 8230C833 -3770 8230C834 -3771 8230C835 -3772 8230C836 -3773 8230C837 -3774 8230C838 -3775 8230C839 -3776 8230C930 -3777 8230C931 -3778 8230C932 -3779 8230C933 -377A 8230C934 -377B 8230C935 -377C 8230C936 -377D 8230C937 -377E 8230C938 -377F 8230C939 -3780 8230CA30 -3781 8230CA31 -3782 8230CA32 -3783 8230CA33 -3784 8230CA34 -3785 8230CA35 -3786 8230CA36 -3787 8230CA37 -3788 8230CA38 -3789 8230CA39 -378A 8230CB30 -378B 8230CB31 -378C 8230CB32 -378D 8230CB33 -378E 8230CB34 -378F 8230CB35 -3790 8230CB36 -3791 8230CB37 -3792 8230CB38 -3793 8230CB39 -3794 8230CC30 -3795 8230CC31 -3796 8230CC32 -3797 8230CC33 -3798 8230CC34 -3799 8230CC35 -379A 8230CC36 -379B 8230CC37 -379C 8230CC38 -379D 8230CC39 -379E 8230CD30 -379F 8230CD31 -37A0 8230CD32 -37A1 8230CD33 -37A2 8230CD34 -37A3 8230CD35 -37A4 8230CD36 -37A5 8230CD37 -37A6 8230CD38 -37A7 8230CD39 -37A8 8230CE30 -37A9 8230CE31 -37AA 8230CE32 -37AB 8230CE33 -37AC 8230CE34 -37AD 8230CE35 -37AE 8230CE36 -37AF 8230CE37 -37B0 8230CE38 -37B1 8230CE39 -37B2 8230CF30 -37B3 8230CF31 -37B4 8230CF32 -37B5 8230CF33 -37B6 8230CF34 -37B7 8230CF35 -37B8 8230CF36 -37B9 8230CF37 -37BA 8230CF38 -37BB 8230CF39 -37BC 8230D030 -37BD 8230D031 -37BE 8230D032 -37BF 8230D033 -37C0 8230D034 -37C1 8230D035 -37C2 8230D036 -37C3 8230D037 -37C4 8230D038 -37C5 8230D039 -37C6 8230D130 -37C7 8230D131 -37C8 8230D132 -37C9 8230D133 -37CA 8230D134 -37CB 8230D135 -37CC 8230D136 -37CD 8230D137 -37CE 8230D138 -37CF 8230D139 -37D0 8230D230 -37D1 8230D231 -37D2 8230D232 -37D3 8230D233 -37D4 8230D234 -37D5 8230D235 -37D6 8230D236 -37D7 8230D237 -37D8 8230D238 -37D9 8230D239 -37DA 8230D330 -37DB 8230D331 -37DC 8230D332 -37DD 8230D333 -37DE 8230D334 -37DF 8230D335 -37E0 8230D336 -37E1 8230D337 -37E2 8230D338 -37E3 8230D339 -37E4 8230D430 -37E5 8230D431 -37E6 8230D432 -37E7 8230D433 -37E8 8230D434 -37E9 8230D435 -37EA 8230D436 -37EB 8230D437 -37EC 8230D438 -37ED 8230D439 -37EE 8230D530 -37EF 8230D531 -37F0 8230D532 -37F1 8230D533 -37F2 8230D534 -37F3 8230D535 -37F4 8230D536 -37F5 8230D537 -37F6 8230D538 -37F7 8230D539 -37F8 8230D630 -37F9 8230D631 -37FA 8230D632 -37FB 8230D633 -37FC 8230D634 -37FD 8230D635 -37FE 8230D636 -37FF 8230D637 -3800 8230D638 -3801 8230D639 -3802 8230D730 -3803 8230D731 -3804 8230D732 -3805 8230D733 -3806 8230D734 -3807 8230D735 -3808 8230D736 -3809 8230D737 -380A 8230D738 -380B 8230D739 -380C 8230D830 -380D 8230D831 -380E 8230D832 -380F 8230D833 -3810 8230D834 -3811 8230D835 -3812 8230D836 -3813 8230D837 -3814 8230D838 -3815 8230D839 -3816 8230D930 -3817 8230D931 -3818 8230D932 -3819 8230D933 -381A 8230D934 -381B 8230D935 -381C 8230D936 -381D 8230D937 -381E 8230D938 -381F 8230D939 -3820 8230DA30 -3821 8230DA31 -3822 8230DA32 -3823 8230DA33 -3824 8230DA34 -3825 8230DA35 -3826 8230DA36 -3827 8230DA37 -3828 8230DA38 -3829 8230DA39 -382A 8230DB30 -382B 8230DB31 -382C 8230DB32 -382D 8230DB33 -382E 8230DB34 -382F 8230DB35 -3830 8230DB36 -3831 8230DB37 -3832 8230DB38 -3833 8230DB39 -3834 8230DC30 -3835 8230DC31 -3836 8230DC32 -3837 8230DC33 -3838 8230DC34 -3839 8230DC35 -383A 8230DC36 -383B 8230DC37 -383C 8230DC38 -383D 8230DC39 -383E 8230DD30 -383F 8230DD31 -3840 8230DD32 -3841 8230DD33 -3842 8230DD34 -3843 8230DD35 -3844 8230DD36 -3845 8230DD37 -3846 8230DD38 -3847 8230DD39 -3848 8230DE30 -3849 8230DE31 -384A 8230DE32 -384B 8230DE33 -384C 8230DE34 -384D 8230DE35 -384E 8230DE36 -384F 8230DE37 -3850 8230DE38 -3851 8230DE39 -3852 8230DF30 -3853 8230DF31 -3854 8230DF32 -3855 8230DF33 -3856 8230DF34 -3857 8230DF35 -3858 8230DF36 -3859 8230DF37 -385A 8230DF38 -385B 8230DF39 -385C 8230E030 -385D 8230E031 -385E 8230E032 -385F 8230E033 -3860 8230E034 -3861 8230E035 -3862 8230E036 -3863 8230E037 -3864 8230E038 -3865 8230E039 -3866 8230E130 -3867 8230E131 -3868 8230E132 -3869 8230E133 -386A 8230E134 -386B 8230E135 -386C 8230E136 -386D 8230E137 -386E 8230E138 -386F 8230E139 -3870 8230E230 -3871 8230E231 -3872 8230E232 -3873 8230E233 -3874 8230E234 -3875 8230E235 -3876 8230E236 -3877 8230E237 -3878 8230E238 -3879 8230E239 -387A 8230E330 -387B 8230E331 -387C 8230E332 -387D 8230E333 -387E 8230E334 -387F 8230E335 -3880 8230E336 -3881 8230E337 -3882 8230E338 -3883 8230E339 -3884 8230E430 -3885 8230E431 -3886 8230E432 -3887 8230E433 -3888 8230E434 -3889 8230E435 -388A 8230E436 -388B 8230E437 -388C 8230E438 -388D 8230E439 -388E 8230E530 -388F 8230E531 -3890 8230E532 -3891 8230E533 -3892 8230E534 -3893 8230E535 -3894 8230E536 -3895 8230E537 -3896 8230E538 -3897 8230E539 -3898 8230E630 -3899 8230E631 -389A 8230E632 -389B 8230E633 -389C 8230E634 -389D 8230E635 -389E 8230E636 -389F 8230E637 -38A0 8230E638 -38A1 8230E639 -38A2 8230E730 -38A3 8230E731 -38A4 8230E732 -38A5 8230E733 -38A6 8230E734 -38A7 8230E735 -38A8 8230E736 -38A9 8230E737 -38AA 8230E738 -38AB 8230E739 -38AC 8230E830 -38AD 8230E831 -38AE 8230E832 -38AF 8230E833 -38B0 8230E834 -38B1 8230E835 -38B2 8230E836 -38B3 8230E837 -38B4 8230E838 -38B5 8230E839 -38B6 8230E930 -38B7 8230E931 -38B8 8230E932 -38B9 8230E933 -38BA 8230E934 -38BB 8230E935 -38BC 8230E936 -38BD 8230E937 -38BE 8230E938 -38BF 8230E939 -38C0 8230EA30 -38C1 8230EA31 -38C2 8230EA32 -38C3 8230EA33 -38C4 8230EA34 -38C5 8230EA35 -38C6 8230EA36 -38C7 8230EA37 -38C8 8230EA38 -38C9 8230EA39 -38CA 8230EB30 -38CB 8230EB31 -38CC 8230EB32 -38CD 8230EB33 -38CE 8230EB34 -38CF 8230EB35 -38D0 8230EB36 -38D1 8230EB37 -38D2 8230EB38 -38D3 8230EB39 -38D4 8230EC30 -38D5 8230EC31 -38D6 8230EC32 -38D7 8230EC33 -38D8 8230EC34 -38D9 8230EC35 -38DA 8230EC36 -38DB 8230EC37 -38DC 8230EC38 -38DD 8230EC39 -38DE 8230ED30 -38DF 8230ED31 -38E0 8230ED32 -38E1 8230ED33 -38E2 8230ED34 -38E3 8230ED35 -38E4 8230ED36 -38E5 8230ED37 -38E6 8230ED38 -38E7 8230ED39 -38E8 8230EE30 -38E9 8230EE31 -38EA 8230EE32 -38EB 8230EE33 -38EC 8230EE34 -38ED 8230EE35 -38EE 8230EE36 -38EF 8230EE37 -38F0 8230EE38 -38F1 8230EE39 -38F2 8230EF30 -38F3 8230EF31 -38F4 8230EF32 -38F5 8230EF33 -38F6 8230EF34 -38F7 8230EF35 -38F8 8230EF36 -38F9 8230EF37 -38FA 8230EF38 -38FB 8230EF39 -38FC 8230F030 -38FD 8230F031 -38FE 8230F032 -38FF 8230F033 -3900 8230F034 -3901 8230F035 -3902 8230F036 -3903 8230F037 -3904 8230F038 -3905 8230F039 -3906 8230F130 -3907 8230F131 -3908 8230F132 -3909 8230F133 -390A 8230F134 -390B 8230F135 -390C 8230F136 -390D 8230F137 -390E 8230F138 -390F 8230F139 -3910 8230F230 -3911 8230F231 -3912 8230F232 -3913 8230F233 -3914 8230F234 -3915 8230F235 -3916 8230F236 -3917 8230F237 -3918 FE60 -3919 8230F238 -391A 8230F239 -391B 8230F330 -391C 8230F331 -391D 8230F332 -391E 8230F333 -391F 8230F334 -3920 8230F335 -3921 8230F336 -3922 8230F337 -3923 8230F338 -3924 8230F339 -3925 8230F430 -3926 8230F431 -3927 8230F432 -3928 8230F433 -3929 8230F434 -392A 8230F435 -392B 8230F436 -392C 8230F437 -392D 8230F438 -392E 8230F439 -392F 8230F530 -3930 8230F531 -3931 8230F532 -3932 8230F533 -3933 8230F534 -3934 8230F535 -3935 8230F536 -3936 8230F537 -3937 8230F538 -3938 8230F539 -3939 8230F630 -393A 8230F631 -393B 8230F632 -393C 8230F633 -393D 8230F634 -393E 8230F635 -393F 8230F636 -3940 8230F637 -3941 8230F638 -3942 8230F639 -3943 8230F730 -3944 8230F731 -3945 8230F732 -3946 8230F733 -3947 8230F734 -3948 8230F735 -3949 8230F736 -394A 8230F737 -394B 8230F738 -394C 8230F739 -394D 8230F830 -394E 8230F831 -394F 8230F832 -3950 8230F833 -3951 8230F834 -3952 8230F835 -3953 8230F836 -3954 8230F837 -3955 8230F838 -3956 8230F839 -3957 8230F930 -3958 8230F931 -3959 8230F932 -395A 8230F933 -395B 8230F934 -395C 8230F935 -395D 8230F936 -395E 8230F937 -395F 8230F938 -3960 8230F939 -3961 8230FA30 -3962 8230FA31 -3963 8230FA32 -3964 8230FA33 -3965 8230FA34 -3966 8230FA35 -3967 8230FA36 -3968 8230FA37 -3969 8230FA38 -396A 8230FA39 -396B 8230FB30 -396C 8230FB31 -396D 8230FB32 -396E FE5F -396F 8230FB33 -3970 8230FB34 -3971 8230FB35 -3972 8230FB36 -3973 8230FB37 -3974 8230FB38 -3975 8230FB39 -3976 8230FC30 -3977 8230FC31 -3978 8230FC32 -3979 8230FC33 -397A 8230FC34 -397B 8230FC35 -397C 8230FC36 -397D 8230FC37 -397E 8230FC38 -397F 8230FC39 -3980 8230FD30 -3981 8230FD31 -3982 8230FD32 -3983 8230FD33 -3984 8230FD34 -3985 8230FD35 -3986 8230FD36 -3987 8230FD37 -3988 8230FD38 -3989 8230FD39 -398A 8230FE30 -398B 8230FE31 -398C 8230FE32 -398D 8230FE33 -398E 8230FE34 -398F 8230FE35 -3990 8230FE36 -3991 8230FE37 -3992 8230FE38 -3993 8230FE39 -3994 82318130 -3995 82318131 -3996 82318132 -3997 82318133 -3998 82318134 -3999 82318135 -399A 82318136 -399B 82318137 -399C 82318138 -399D 82318139 -399E 82318230 -399F 82318231 -39A0 82318232 -39A1 82318233 -39A2 82318234 -39A3 82318235 -39A4 82318236 -39A5 82318237 -39A6 82318238 -39A7 82318239 -39A8 82318330 -39A9 82318331 -39AA 82318332 -39AB 82318333 -39AC 82318334 -39AD 82318335 -39AE 82318336 -39AF 82318337 -39B0 82318338 -39B1 82318339 -39B2 82318430 -39B3 82318431 -39B4 82318432 -39B5 82318433 -39B6 82318434 -39B7 82318435 -39B8 82318436 -39B9 82318437 -39BA 82318438 -39BB 82318439 -39BC 82318530 -39BD 82318531 -39BE 82318532 -39BF 82318533 -39C0 82318534 -39C1 82318535 -39C2 82318536 -39C3 82318537 -39C4 82318538 -39C5 82318539 -39C6 82318630 -39C7 82318631 -39C8 82318632 -39C9 82318633 -39CA 82318634 -39CB 82318635 -39CC 82318636 -39CD 82318637 -39CE 82318638 -39CF FE62 -39D0 FE65 -39D1 82318639 -39D2 82318730 -39D3 82318731 -39D4 82318732 -39D5 82318733 -39D6 82318734 -39D7 82318735 -39D8 82318736 -39D9 82318737 -39DA 82318738 -39DB 82318739 -39DC 82318830 -39DD 82318831 -39DE 82318832 -39DF FE63 -39E0 82318833 -39E1 82318834 -39E2 82318835 -39E3 82318836 -39E4 82318837 -39E5 82318838 -39E6 82318839 -39E7 82318930 -39E8 82318931 -39E9 82318932 -39EA 82318933 -39EB 82318934 -39EC 82318935 -39ED 82318936 -39EE 82318937 -39EF 82318938 -39F0 82318939 -39F1 82318A30 -39F2 82318A31 -39F3 82318A32 -39F4 82318A33 -39F5 82318A34 -39F6 82318A35 -39F7 82318A36 -39F8 82318A37 -39F9 82318A38 -39FA 82318A39 -39FB 82318B30 -39FC 82318B31 -39FD 82318B32 -39FE 82318B33 -39FF 82318B34 -3A00 82318B35 -3A01 82318B36 -3A02 82318B37 -3A03 82318B38 -3A04 82318B39 -3A05 82318C30 -3A06 82318C31 -3A07 82318C32 -3A08 82318C33 -3A09 82318C34 -3A0A 82318C35 -3A0B 82318C36 -3A0C 82318C37 -3A0D 82318C38 -3A0E 82318C39 -3A0F 82318D30 -3A10 82318D31 -3A11 82318D32 -3A12 82318D33 -3A13 82318D34 -3A14 82318D35 -3A15 82318D36 -3A16 82318D37 -3A17 82318D38 -3A18 82318D39 -3A19 82318E30 -3A1A 82318E31 -3A1B 82318E32 -3A1C 82318E33 -3A1D 82318E34 -3A1E 82318E35 -3A1F 82318E36 -3A20 82318E37 -3A21 82318E38 -3A22 82318E39 -3A23 82318F30 -3A24 82318F31 -3A25 82318F32 -3A26 82318F33 -3A27 82318F34 -3A28 82318F35 -3A29 82318F36 -3A2A 82318F37 -3A2B 82318F38 -3A2C 82318F39 -3A2D 82319030 -3A2E 82319031 -3A2F 82319032 -3A30 82319033 -3A31 82319034 -3A32 82319035 -3A33 82319036 -3A34 82319037 -3A35 82319038 -3A36 82319039 -3A37 82319130 -3A38 82319131 -3A39 82319132 -3A3A 82319133 -3A3B 82319134 -3A3C 82319135 -3A3D 82319136 -3A3E 82319137 -3A3F 82319138 -3A40 82319139 -3A41 82319230 -3A42 82319231 -3A43 82319232 -3A44 82319233 -3A45 82319234 -3A46 82319235 -3A47 82319236 -3A48 82319237 -3A49 82319238 -3A4A 82319239 -3A4B 82319330 -3A4C 82319331 -3A4D 82319332 -3A4E 82319333 -3A4F 82319334 -3A50 82319335 -3A51 82319336 -3A52 82319337 -3A53 82319338 -3A54 82319339 -3A55 82319430 -3A56 82319431 -3A57 82319432 -3A58 82319433 -3A59 82319434 -3A5A 82319435 -3A5B 82319436 -3A5C 82319437 -3A5D 82319438 -3A5E 82319439 -3A5F 82319530 -3A60 82319531 -3A61 82319532 -3A62 82319533 -3A63 82319534 -3A64 82319535 -3A65 82319536 -3A66 82319537 -3A67 82319538 -3A68 82319539 -3A69 82319630 -3A6A 82319631 -3A6B 82319632 -3A6C 82319633 -3A6D 82319634 -3A6E 82319635 -3A6F 82319636 -3A70 82319637 -3A71 82319638 -3A72 82319639 -3A73 FE64 -3A74 82319730 -3A75 82319731 -3A76 82319732 -3A77 82319733 -3A78 82319734 -3A79 82319735 -3A7A 82319736 -3A7B 82319737 -3A7C 82319738 -3A7D 82319739 -3A7E 82319830 -3A7F 82319831 -3A80 82319832 -3A81 82319833 -3A82 82319834 -3A83 82319835 -3A84 82319836 -3A85 82319837 -3A86 82319838 -3A87 82319839 -3A88 82319930 -3A89 82319931 -3A8A 82319932 -3A8B 82319933 -3A8C 82319934 -3A8D 82319935 -3A8E 82319936 -3A8F 82319937 -3A90 82319938 -3A91 82319939 -3A92 82319A30 -3A93 82319A31 -3A94 82319A32 -3A95 82319A33 -3A96 82319A34 -3A97 82319A35 -3A98 82319A36 -3A99 82319A37 -3A9A 82319A38 -3A9B 82319A39 -3A9C 82319B30 -3A9D 82319B31 -3A9E 82319B32 -3A9F 82319B33 -3AA0 82319B34 -3AA1 82319B35 -3AA2 82319B36 -3AA3 82319B37 -3AA4 82319B38 -3AA5 82319B39 -3AA6 82319C30 -3AA7 82319C31 -3AA8 82319C32 -3AA9 82319C33 -3AAA 82319C34 -3AAB 82319C35 -3AAC 82319C36 -3AAD 82319C37 -3AAE 82319C38 -3AAF 82319C39 -3AB0 82319D30 -3AB1 82319D31 -3AB2 82319D32 -3AB3 82319D33 -3AB4 82319D34 -3AB5 82319D35 -3AB6 82319D36 -3AB7 82319D37 -3AB8 82319D38 -3AB9 82319D39 -3ABA 82319E30 -3ABB 82319E31 -3ABC 82319E32 -3ABD 82319E33 -3ABE 82319E34 -3ABF 82319E35 -3AC0 82319E36 -3AC1 82319E37 -3AC2 82319E38 -3AC3 82319E39 -3AC4 82319F30 -3AC5 82319F31 -3AC6 82319F32 -3AC7 82319F33 -3AC8 82319F34 -3AC9 82319F35 -3ACA 82319F36 -3ACB 82319F37 -3ACC 82319F38 -3ACD 82319F39 -3ACE 8231A030 -3ACF 8231A031 -3AD0 8231A032 -3AD1 8231A033 -3AD2 8231A034 -3AD3 8231A035 -3AD4 8231A036 -3AD5 8231A037 -3AD6 8231A038 -3AD7 8231A039 -3AD8 8231A130 -3AD9 8231A131 -3ADA 8231A132 -3ADB 8231A133 -3ADC 8231A134 -3ADD 8231A135 -3ADE 8231A136 -3ADF 8231A137 -3AE0 8231A138 -3AE1 8231A139 -3AE2 8231A230 -3AE3 8231A231 -3AE4 8231A232 -3AE5 8231A233 -3AE6 8231A234 -3AE7 8231A235 -3AE8 8231A236 -3AE9 8231A237 -3AEA 8231A238 -3AEB 8231A239 -3AEC 8231A330 -3AED 8231A331 -3AEE 8231A332 -3AEF 8231A333 -3AF0 8231A334 -3AF1 8231A335 -3AF2 8231A336 -3AF3 8231A337 -3AF4 8231A338 -3AF5 8231A339 -3AF6 8231A430 -3AF7 8231A431 -3AF8 8231A432 -3AF9 8231A433 -3AFA 8231A434 -3AFB 8231A435 -3AFC 8231A436 -3AFD 8231A437 -3AFE 8231A438 -3AFF 8231A439 -3B00 8231A530 -3B01 8231A531 -3B02 8231A532 -3B03 8231A533 -3B04 8231A534 -3B05 8231A535 -3B06 8231A536 -3B07 8231A537 -3B08 8231A538 -3B09 8231A539 -3B0A 8231A630 -3B0B 8231A631 -3B0C 8231A632 -3B0D 8231A633 -3B0E 8231A634 -3B0F 8231A635 -3B10 8231A636 -3B11 8231A637 -3B12 8231A638 -3B13 8231A639 -3B14 8231A730 -3B15 8231A731 -3B16 8231A732 -3B17 8231A733 -3B18 8231A734 -3B19 8231A735 -3B1A 8231A736 -3B1B 8231A737 -3B1C 8231A738 -3B1D 8231A739 -3B1E 8231A830 -3B1F 8231A831 -3B20 8231A832 -3B21 8231A833 -3B22 8231A834 -3B23 8231A835 -3B24 8231A836 -3B25 8231A837 -3B26 8231A838 -3B27 8231A839 -3B28 8231A930 -3B29 8231A931 -3B2A 8231A932 -3B2B 8231A933 -3B2C 8231A934 -3B2D 8231A935 -3B2E 8231A936 -3B2F 8231A937 -3B30 8231A938 -3B31 8231A939 -3B32 8231AA30 -3B33 8231AA31 -3B34 8231AA32 -3B35 8231AA33 -3B36 8231AA34 -3B37 8231AA35 -3B38 8231AA36 -3B39 8231AA37 -3B3A 8231AA38 -3B3B 8231AA39 -3B3C 8231AB30 -3B3D 8231AB31 -3B3E 8231AB32 -3B3F 8231AB33 -3B40 8231AB34 -3B41 8231AB35 -3B42 8231AB36 -3B43 8231AB37 -3B44 8231AB38 -3B45 8231AB39 -3B46 8231AC30 -3B47 8231AC31 -3B48 8231AC32 -3B49 8231AC33 -3B4A 8231AC34 -3B4B 8231AC35 -3B4C 8231AC36 -3B4D 8231AC37 -3B4E FE68 -3B4F 8231AC38 -3B50 8231AC39 -3B51 8231AD30 -3B52 8231AD31 -3B53 8231AD32 -3B54 8231AD33 -3B55 8231AD34 -3B56 8231AD35 -3B57 8231AD36 -3B58 8231AD37 -3B59 8231AD38 -3B5A 8231AD39 -3B5B 8231AE30 -3B5C 8231AE31 -3B5D 8231AE32 -3B5E 8231AE33 -3B5F 8231AE34 -3B60 8231AE35 -3B61 8231AE36 -3B62 8231AE37 -3B63 8231AE38 -3B64 8231AE39 -3B65 8231AF30 -3B66 8231AF31 -3B67 8231AF32 -3B68 8231AF33 -3B69 8231AF34 -3B6A 8231AF35 -3B6B 8231AF36 -3B6C 8231AF37 -3B6D 8231AF38 -3B6E 8231AF39 -3B6F 8231B030 -3B70 8231B031 -3B71 8231B032 -3B72 8231B033 -3B73 8231B034 -3B74 8231B035 -3B75 8231B036 -3B76 8231B037 -3B77 8231B038 -3B78 8231B039 -3B79 8231B130 -3B7A 8231B131 -3B7B 8231B132 -3B7C 8231B133 -3B7D 8231B134 -3B7E 8231B135 -3B7F 8231B136 -3B80 8231B137 -3B81 8231B138 -3B82 8231B139 -3B83 8231B230 -3B84 8231B231 -3B85 8231B232 -3B86 8231B233 -3B87 8231B234 -3B88 8231B235 -3B89 8231B236 -3B8A 8231B237 -3B8B 8231B238 -3B8C 8231B239 -3B8D 8231B330 -3B8E 8231B331 -3B8F 8231B332 -3B90 8231B333 -3B91 8231B334 -3B92 8231B335 -3B93 8231B336 -3B94 8231B337 -3B95 8231B338 -3B96 8231B339 -3B97 8231B430 -3B98 8231B431 -3B99 8231B432 -3B9A 8231B433 -3B9B 8231B434 -3B9C 8231B435 -3B9D 8231B436 -3B9E 8231B437 -3B9F 8231B438 -3BA0 8231B439 -3BA1 8231B530 -3BA2 8231B531 -3BA3 8231B532 -3BA4 8231B533 -3BA5 8231B534 -3BA6 8231B535 -3BA7 8231B536 -3BA8 8231B537 -3BA9 8231B538 -3BAA 8231B539 -3BAB 8231B630 -3BAC 8231B631 -3BAD 8231B632 -3BAE 8231B633 -3BAF 8231B634 -3BB0 8231B635 -3BB1 8231B636 -3BB2 8231B637 -3BB3 8231B638 -3BB4 8231B639 -3BB5 8231B730 -3BB6 8231B731 -3BB7 8231B732 -3BB8 8231B733 -3BB9 8231B734 -3BBA 8231B735 -3BBB 8231B736 -3BBC 8231B737 -3BBD 8231B738 -3BBE 8231B739 -3BBF 8231B830 -3BC0 8231B831 -3BC1 8231B832 -3BC2 8231B833 -3BC3 8231B834 -3BC4 8231B835 -3BC5 8231B836 -3BC6 8231B837 -3BC7 8231B838 -3BC8 8231B839 -3BC9 8231B930 -3BCA 8231B931 -3BCB 8231B932 -3BCC 8231B933 -3BCD 8231B934 -3BCE 8231B935 -3BCF 8231B936 -3BD0 8231B937 -3BD1 8231B938 -3BD2 8231B939 -3BD3 8231BA30 -3BD4 8231BA31 -3BD5 8231BA32 -3BD6 8231BA33 -3BD7 8231BA34 -3BD8 8231BA35 -3BD9 8231BA36 -3BDA 8231BA37 -3BDB 8231BA38 -3BDC 8231BA39 -3BDD 8231BB30 -3BDE 8231BB31 -3BDF 8231BB32 -3BE0 8231BB33 -3BE1 8231BB34 -3BE2 8231BB35 -3BE3 8231BB36 -3BE4 8231BB37 -3BE5 8231BB38 -3BE6 8231BB39 -3BE7 8231BC30 -3BE8 8231BC31 -3BE9 8231BC32 -3BEA 8231BC33 -3BEB 8231BC34 -3BEC 8231BC35 -3BED 8231BC36 -3BEE 8231BC37 -3BEF 8231BC38 -3BF0 8231BC39 -3BF1 8231BD30 -3BF2 8231BD31 -3BF3 8231BD32 -3BF4 8231BD33 -3BF5 8231BD34 -3BF6 8231BD35 -3BF7 8231BD36 -3BF8 8231BD37 -3BF9 8231BD38 -3BFA 8231BD39 -3BFB 8231BE30 -3BFC 8231BE31 -3BFD 8231BE32 -3BFE 8231BE33 -3BFF 8231BE34 -3C00 8231BE35 -3C01 8231BE36 -3C02 8231BE37 -3C03 8231BE38 -3C04 8231BE39 -3C05 8231BF30 -3C06 8231BF31 -3C07 8231BF32 -3C08 8231BF33 -3C09 8231BF34 -3C0A 8231BF35 -3C0B 8231BF36 -3C0C 8231BF37 -3C0D 8231BF38 -3C0E 8231BF39 -3C0F 8231C030 -3C10 8231C031 -3C11 8231C032 -3C12 8231C033 -3C13 8231C034 -3C14 8231C035 -3C15 8231C036 -3C16 8231C037 -3C17 8231C038 -3C18 8231C039 -3C19 8231C130 -3C1A 8231C131 -3C1B 8231C132 -3C1C 8231C133 -3C1D 8231C134 -3C1E 8231C135 -3C1F 8231C136 -3C20 8231C137 -3C21 8231C138 -3C22 8231C139 -3C23 8231C230 -3C24 8231C231 -3C25 8231C232 -3C26 8231C233 -3C27 8231C234 -3C28 8231C235 -3C29 8231C236 -3C2A 8231C237 -3C2B 8231C238 -3C2C 8231C239 -3C2D 8231C330 -3C2E 8231C331 -3C2F 8231C332 -3C30 8231C333 -3C31 8231C334 -3C32 8231C335 -3C33 8231C336 -3C34 8231C337 -3C35 8231C338 -3C36 8231C339 -3C37 8231C430 -3C38 8231C431 -3C39 8231C432 -3C3A 8231C433 -3C3B 8231C434 -3C3C 8231C435 -3C3D 8231C436 -3C3E 8231C437 -3C3F 8231C438 -3C40 8231C439 -3C41 8231C530 -3C42 8231C531 -3C43 8231C532 -3C44 8231C533 -3C45 8231C534 -3C46 8231C535 -3C47 8231C536 -3C48 8231C537 -3C49 8231C538 -3C4A 8231C539 -3C4B 8231C630 -3C4C 8231C631 -3C4D 8231C632 -3C4E 8231C633 -3C4F 8231C634 -3C50 8231C635 -3C51 8231C636 -3C52 8231C637 -3C53 8231C638 -3C54 8231C639 -3C55 8231C730 -3C56 8231C731 -3C57 8231C732 -3C58 8231C733 -3C59 8231C734 -3C5A 8231C735 -3C5B 8231C736 -3C5C 8231C737 -3C5D 8231C738 -3C5E 8231C739 -3C5F 8231C830 -3C60 8231C831 -3C61 8231C832 -3C62 8231C833 -3C63 8231C834 -3C64 8231C835 -3C65 8231C836 -3C66 8231C837 -3C67 8231C838 -3C68 8231C839 -3C69 8231C930 -3C6A 8231C931 -3C6B 8231C932 -3C6C 8231C933 -3C6D 8231C934 -3C6E FE69 -3C6F 8231C935 -3C70 8231C936 -3C71 8231C937 -3C72 8231C938 -3C73 8231C939 -3C74 8231CA30 -3C75 8231CA31 -3C76 8231CA32 -3C77 8231CA33 -3C78 8231CA34 -3C79 8231CA35 -3C7A 8231CA36 -3C7B 8231CA37 -3C7C 8231CA38 -3C7D 8231CA39 -3C7E 8231CB30 -3C7F 8231CB31 -3C80 8231CB32 -3C81 8231CB33 -3C82 8231CB34 -3C83 8231CB35 -3C84 8231CB36 -3C85 8231CB37 -3C86 8231CB38 -3C87 8231CB39 -3C88 8231CC30 -3C89 8231CC31 -3C8A 8231CC32 -3C8B 8231CC33 -3C8C 8231CC34 -3C8D 8231CC35 -3C8E 8231CC36 -3C8F 8231CC37 -3C90 8231CC38 -3C91 8231CC39 -3C92 8231CD30 -3C93 8231CD31 -3C94 8231CD32 -3C95 8231CD33 -3C96 8231CD34 -3C97 8231CD35 -3C98 8231CD36 -3C99 8231CD37 -3C9A 8231CD38 -3C9B 8231CD39 -3C9C 8231CE30 -3C9D 8231CE31 -3C9E 8231CE32 -3C9F 8231CE33 -3CA0 8231CE34 -3CA1 8231CE35 -3CA2 8231CE36 -3CA3 8231CE37 -3CA4 8231CE38 -3CA5 8231CE39 -3CA6 8231CF30 -3CA7 8231CF31 -3CA8 8231CF32 -3CA9 8231CF33 -3CAA 8231CF34 -3CAB 8231CF35 -3CAC 8231CF36 -3CAD 8231CF37 -3CAE 8231CF38 -3CAF 8231CF39 -3CB0 8231D030 -3CB1 8231D031 -3CB2 8231D032 -3CB3 8231D033 -3CB4 8231D034 -3CB5 8231D035 -3CB6 8231D036 -3CB7 8231D037 -3CB8 8231D038 -3CB9 8231D039 -3CBA 8231D130 -3CBB 8231D131 -3CBC 8231D132 -3CBD 8231D133 -3CBE 8231D134 -3CBF 8231D135 -3CC0 8231D136 -3CC1 8231D137 -3CC2 8231D138 -3CC3 8231D139 -3CC4 8231D230 -3CC5 8231D231 -3CC6 8231D232 -3CC7 8231D233 -3CC8 8231D234 -3CC9 8231D235 -3CCA 8231D236 -3CCB 8231D237 -3CCC 8231D238 -3CCD 8231D239 -3CCE 8231D330 -3CCF 8231D331 -3CD0 8231D332 -3CD1 8231D333 -3CD2 8231D334 -3CD3 8231D335 -3CD4 8231D336 -3CD5 8231D337 -3CD6 8231D338 -3CD7 8231D339 -3CD8 8231D430 -3CD9 8231D431 -3CDA 8231D432 -3CDB 8231D433 -3CDC 8231D434 -3CDD 8231D435 -3CDE 8231D436 -3CDF 8231D437 -3CE0 FE6A -3CE1 8231D438 -3CE2 8231D439 -3CE3 8231D530 -3CE4 8231D531 -3CE5 8231D532 -3CE6 8231D533 -3CE7 8231D534 -3CE8 8231D535 -3CE9 8231D536 -3CEA 8231D537 -3CEB 8231D538 -3CEC 8231D539 -3CED 8231D630 -3CEE 8231D631 -3CEF 8231D632 -3CF0 8231D633 -3CF1 8231D634 -3CF2 8231D635 -3CF3 8231D636 -3CF4 8231D637 -3CF5 8231D638 -3CF6 8231D639 -3CF7 8231D730 -3CF8 8231D731 -3CF9 8231D732 -3CFA 8231D733 -3CFB 8231D734 -3CFC 8231D735 -3CFD 8231D736 -3CFE 8231D737 -3CFF 8231D738 -3D00 8231D739 -3D01 8231D830 -3D02 8231D831 -3D03 8231D832 -3D04 8231D833 -3D05 8231D834 -3D06 8231D835 -3D07 8231D836 -3D08 8231D837 -3D09 8231D838 -3D0A 8231D839 -3D0B 8231D930 -3D0C 8231D931 -3D0D 8231D932 -3D0E 8231D933 -3D0F 8231D934 -3D10 8231D935 -3D11 8231D936 -3D12 8231D937 -3D13 8231D938 -3D14 8231D939 -3D15 8231DA30 -3D16 8231DA31 -3D17 8231DA32 -3D18 8231DA33 -3D19 8231DA34 -3D1A 8231DA35 -3D1B 8231DA36 -3D1C 8231DA37 -3D1D 8231DA38 -3D1E 8231DA39 -3D1F 8231DB30 -3D20 8231DB31 -3D21 8231DB32 -3D22 8231DB33 -3D23 8231DB34 -3D24 8231DB35 -3D25 8231DB36 -3D26 8231DB37 -3D27 8231DB38 -3D28 8231DB39 -3D29 8231DC30 -3D2A 8231DC31 -3D2B 8231DC32 -3D2C 8231DC33 -3D2D 8231DC34 -3D2E 8231DC35 -3D2F 8231DC36 -3D30 8231DC37 -3D31 8231DC38 -3D32 8231DC39 -3D33 8231DD30 -3D34 8231DD31 -3D35 8231DD32 -3D36 8231DD33 -3D37 8231DD34 -3D38 8231DD35 -3D39 8231DD36 -3D3A 8231DD37 -3D3B 8231DD38 -3D3C 8231DD39 -3D3D 8231DE30 -3D3E 8231DE31 -3D3F 8231DE32 -3D40 8231DE33 -3D41 8231DE34 -3D42 8231DE35 -3D43 8231DE36 -3D44 8231DE37 -3D45 8231DE38 -3D46 8231DE39 -3D47 8231DF30 -3D48 8231DF31 -3D49 8231DF32 -3D4A 8231DF33 -3D4B 8231DF34 -3D4C 8231DF35 -3D4D 8231DF36 -3D4E 8231DF37 -3D4F 8231DF38 -3D50 8231DF39 -3D51 8231E030 -3D52 8231E031 -3D53 8231E032 -3D54 8231E033 -3D55 8231E034 -3D56 8231E035 -3D57 8231E036 -3D58 8231E037 -3D59 8231E038 -3D5A 8231E039 -3D5B 8231E130 -3D5C 8231E131 -3D5D 8231E132 -3D5E 8231E133 -3D5F 8231E134 -3D60 8231E135 -3D61 8231E136 -3D62 8231E137 -3D63 8231E138 -3D64 8231E139 -3D65 8231E230 -3D66 8231E231 -3D67 8231E232 -3D68 8231E233 -3D69 8231E234 -3D6A 8231E235 -3D6B 8231E236 -3D6C 8231E237 -3D6D 8231E238 -3D6E 8231E239 -3D6F 8231E330 -3D70 8231E331 -3D71 8231E332 -3D72 8231E333 -3D73 8231E334 -3D74 8231E335 -3D75 8231E336 -3D76 8231E337 -3D77 8231E338 -3D78 8231E339 -3D79 8231E430 -3D7A 8231E431 -3D7B 8231E432 -3D7C 8231E433 -3D7D 8231E434 -3D7E 8231E435 -3D7F 8231E436 -3D80 8231E437 -3D81 8231E438 -3D82 8231E439 -3D83 8231E530 -3D84 8231E531 -3D85 8231E532 -3D86 8231E533 -3D87 8231E534 -3D88 8231E535 -3D89 8231E536 -3D8A 8231E537 -3D8B 8231E538 -3D8C 8231E539 -3D8D 8231E630 -3D8E 8231E631 -3D8F 8231E632 -3D90 8231E633 -3D91 8231E634 -3D92 8231E635 -3D93 8231E636 -3D94 8231E637 -3D95 8231E638 -3D96 8231E639 -3D97 8231E730 -3D98 8231E731 -3D99 8231E732 -3D9A 8231E733 -3D9B 8231E734 -3D9C 8231E735 -3D9D 8231E736 -3D9E 8231E737 -3D9F 8231E738 -3DA0 8231E739 -3DA1 8231E830 -3DA2 8231E831 -3DA3 8231E832 -3DA4 8231E833 -3DA5 8231E834 -3DA6 8231E835 -3DA7 8231E836 -3DA8 8231E837 -3DA9 8231E838 -3DAA 8231E839 -3DAB 8231E930 -3DAC 8231E931 -3DAD 8231E932 -3DAE 8231E933 -3DAF 8231E934 -3DB0 8231E935 -3DB1 8231E936 -3DB2 8231E937 -3DB3 8231E938 -3DB4 8231E939 -3DB5 8231EA30 -3DB6 8231EA31 -3DB7 8231EA32 -3DB8 8231EA33 -3DB9 8231EA34 -3DBA 8231EA35 -3DBB 8231EA36 -3DBC 8231EA37 -3DBD 8231EA38 -3DBE 8231EA39 -3DBF 8231EB30 -3DC0 8231EB31 -3DC1 8231EB32 -3DC2 8231EB33 -3DC3 8231EB34 -3DC4 8231EB35 -3DC5 8231EB36 -3DC6 8231EB37 -3DC7 8231EB38 -3DC8 8231EB39 -3DC9 8231EC30 -3DCA 8231EC31 -3DCB 8231EC32 -3DCC 8231EC33 -3DCD 8231EC34 -3DCE 8231EC35 -3DCF 8231EC36 -3DD0 8231EC37 -3DD1 8231EC38 -3DD2 8231EC39 -3DD3 8231ED30 -3DD4 8231ED31 -3DD5 8231ED32 -3DD6 8231ED33 -3DD7 8231ED34 -3DD8 8231ED35 -3DD9 8231ED36 -3DDA 8231ED37 -3DDB 8231ED38 -3DDC 8231ED39 -3DDD 8231EE30 -3DDE 8231EE31 -3DDF 8231EE32 -3DE0 8231EE33 -3DE1 8231EE34 -3DE2 8231EE35 -3DE3 8231EE36 -3DE4 8231EE37 -3DE5 8231EE38 -3DE6 8231EE39 -3DE7 8231EF30 -3DE8 8231EF31 -3DE9 8231EF32 -3DEA 8231EF33 -3DEB 8231EF34 -3DEC 8231EF35 -3DED 8231EF36 -3DEE 8231EF37 -3DEF 8231EF38 -3DF0 8231EF39 -3DF1 8231F030 -3DF2 8231F031 -3DF3 8231F032 -3DF4 8231F033 -3DF5 8231F034 -3DF6 8231F035 -3DF7 8231F036 -3DF8 8231F037 -3DF9 8231F038 -3DFA 8231F039 -3DFB 8231F130 -3DFC 8231F131 -3DFD 8231F132 -3DFE 8231F133 -3DFF 8231F134 -3E00 8231F135 -3E01 8231F136 -3E02 8231F137 -3E03 8231F138 -3E04 8231F139 -3E05 8231F230 -3E06 8231F231 -3E07 8231F232 -3E08 8231F233 -3E09 8231F234 -3E0A 8231F235 -3E0B 8231F236 -3E0C 8231F237 -3E0D 8231F238 -3E0E 8231F239 -3E0F 8231F330 -3E10 8231F331 -3E11 8231F332 -3E12 8231F333 -3E13 8231F334 -3E14 8231F335 -3E15 8231F336 -3E16 8231F337 -3E17 8231F338 -3E18 8231F339 -3E19 8231F430 -3E1A 8231F431 -3E1B 8231F432 -3E1C 8231F433 -3E1D 8231F434 -3E1E 8231F435 -3E1F 8231F436 -3E20 8231F437 -3E21 8231F438 -3E22 8231F439 -3E23 8231F530 -3E24 8231F531 -3E25 8231F532 -3E26 8231F533 -3E27 8231F534 -3E28 8231F535 -3E29 8231F536 -3E2A 8231F537 -3E2B 8231F538 -3E2C 8231F539 -3E2D 8231F630 -3E2E 8231F631 -3E2F 8231F632 -3E30 8231F633 -3E31 8231F634 -3E32 8231F635 -3E33 8231F636 -3E34 8231F637 -3E35 8231F638 -3E36 8231F639 -3E37 8231F730 -3E38 8231F731 -3E39 8231F732 -3E3A 8231F733 -3E3B 8231F734 -3E3C 8231F735 -3E3D 8231F736 -3E3E 8231F737 -3E3F 8231F738 -3E40 8231F739 -3E41 8231F830 -3E42 8231F831 -3E43 8231F832 -3E44 8231F833 -3E45 8231F834 -3E46 8231F835 -3E47 8231F836 -3E48 8231F837 -3E49 8231F838 -3E4A 8231F839 -3E4B 8231F930 -3E4C 8231F931 -3E4D 8231F932 -3E4E 8231F933 -3E4F 8231F934 -3E50 8231F935 -3E51 8231F936 -3E52 8231F937 -3E53 8231F938 -3E54 8231F939 -3E55 8231FA30 -3E56 8231FA31 -3E57 8231FA32 -3E58 8231FA33 -3E59 8231FA34 -3E5A 8231FA35 -3E5B 8231FA36 -3E5C 8231FA37 -3E5D 8231FA38 -3E5E 8231FA39 -3E5F 8231FB30 -3E60 8231FB31 -3E61 8231FB32 -3E62 8231FB33 -3E63 8231FB34 -3E64 8231FB35 -3E65 8231FB36 -3E66 8231FB37 -3E67 8231FB38 -3E68 8231FB39 -3E69 8231FC30 -3E6A 8231FC31 -3E6B 8231FC32 -3E6C 8231FC33 -3E6D 8231FC34 -3E6E 8231FC35 -3E6F 8231FC36 -3E70 8231FC37 -3E71 8231FC38 -3E72 8231FC39 -3E73 8231FD30 -3E74 8231FD31 -3E75 8231FD32 -3E76 8231FD33 -3E77 8231FD34 -3E78 8231FD35 -3E79 8231FD36 -3E7A 8231FD37 -3E7B 8231FD38 -3E7C 8231FD39 -3E7D 8231FE30 -3E7E 8231FE31 -3E7F 8231FE32 -3E80 8231FE33 -3E81 8231FE34 -3E82 8231FE35 -3E83 8231FE36 -3E84 8231FE37 -3E85 8231FE38 -3E86 8231FE39 -3E87 82328130 -3E88 82328131 -3E89 82328132 -3E8A 82328133 -3E8B 82328134 -3E8C 82328135 -3E8D 82328136 -3E8E 82328137 -3E8F 82328138 -3E90 82328139 -3E91 82328230 -3E92 82328231 -3E93 82328232 -3E94 82328233 -3E95 82328234 -3E96 82328235 -3E97 82328236 -3E98 82328237 -3E99 82328238 -3E9A 82328239 -3E9B 82328330 -3E9C 82328331 -3E9D 82328332 -3E9E 82328333 -3E9F 82328334 -3EA0 82328335 -3EA1 82328336 -3EA2 82328337 -3EA3 82328338 -3EA4 82328339 -3EA5 82328430 -3EA6 82328431 -3EA7 82328432 -3EA8 82328433 -3EA9 82328434 -3EAA 82328435 -3EAB 82328436 -3EAC 82328437 -3EAD 82328438 -3EAE 82328439 -3EAF 82328530 -3EB0 82328531 -3EB1 82328532 -3EB2 82328533 -3EB3 82328534 -3EB4 82328535 -3EB5 82328536 -3EB6 82328537 -3EB7 82328538 -3EB8 82328539 -3EB9 82328630 -3EBA 82328631 -3EBB 82328632 -3EBC 82328633 -3EBD 82328634 -3EBE 82328635 -3EBF 82328636 -3EC0 82328637 -3EC1 82328638 -3EC2 82328639 -3EC3 82328730 -3EC4 82328731 -3EC5 82328732 -3EC6 82328733 -3EC7 82328734 -3EC8 82328735 -3EC9 82328736 -3ECA 82328737 -3ECB 82328738 -3ECC 82328739 -3ECD 82328830 -3ECE 82328831 -3ECF 82328832 -3ED0 82328833 -3ED1 82328834 -3ED2 82328835 -3ED3 82328836 -3ED4 82328837 -3ED5 82328838 -3ED6 82328839 -3ED7 82328930 -3ED8 82328931 -3ED9 82328932 -3EDA 82328933 -3EDB 82328934 -3EDC 82328935 -3EDD 82328936 -3EDE 82328937 -3EDF 82328938 -3EE0 82328939 -3EE1 82328A30 -3EE2 82328A31 -3EE3 82328A32 -3EE4 82328A33 -3EE5 82328A34 -3EE6 82328A35 -3EE7 82328A36 -3EE8 82328A37 -3EE9 82328A38 -3EEA 82328A39 -3EEB 82328B30 -3EEC 82328B31 -3EED 82328B32 -3EEE 82328B33 -3EEF 82328B34 -3EF0 82328B35 -3EF1 82328B36 -3EF2 82328B37 -3EF3 82328B38 -3EF4 82328B39 -3EF5 82328C30 -3EF6 82328C31 -3EF7 82328C32 -3EF8 82328C33 -3EF9 82328C34 -3EFA 82328C35 -3EFB 82328C36 -3EFC 82328C37 -3EFD 82328C38 -3EFE 82328C39 -3EFF 82328D30 -3F00 82328D31 -3F01 82328D32 -3F02 82328D33 -3F03 82328D34 -3F04 82328D35 -3F05 82328D36 -3F06 82328D37 -3F07 82328D38 -3F08 82328D39 -3F09 82328E30 -3F0A 82328E31 -3F0B 82328E32 -3F0C 82328E33 -3F0D 82328E34 -3F0E 82328E35 -3F0F 82328E36 -3F10 82328E37 -3F11 82328E38 -3F12 82328E39 -3F13 82328F30 -3F14 82328F31 -3F15 82328F32 -3F16 82328F33 -3F17 82328F34 -3F18 82328F35 -3F19 82328F36 -3F1A 82328F37 -3F1B 82328F38 -3F1C 82328F39 -3F1D 82329030 -3F1E 82329031 -3F1F 82329032 -3F20 82329033 -3F21 82329034 -3F22 82329035 -3F23 82329036 -3F24 82329037 -3F25 82329038 -3F26 82329039 -3F27 82329130 -3F28 82329131 -3F29 82329132 -3F2A 82329133 -3F2B 82329134 -3F2C 82329135 -3F2D 82329136 -3F2E 82329137 -3F2F 82329138 -3F30 82329139 -3F31 82329230 -3F32 82329231 -3F33 82329232 -3F34 82329233 -3F35 82329234 -3F36 82329235 -3F37 82329236 -3F38 82329237 -3F39 82329238 -3F3A 82329239 -3F3B 82329330 -3F3C 82329331 -3F3D 82329332 -3F3E 82329333 -3F3F 82329334 -3F40 82329335 -3F41 82329336 -3F42 82329337 -3F43 82329338 -3F44 82329339 -3F45 82329430 -3F46 82329431 -3F47 82329432 -3F48 82329433 -3F49 82329434 -3F4A 82329435 -3F4B 82329436 -3F4C 82329437 -3F4D 82329438 -3F4E 82329439 -3F4F 82329530 -3F50 82329531 -3F51 82329532 -3F52 82329533 -3F53 82329534 -3F54 82329535 -3F55 82329536 -3F56 82329537 -3F57 82329538 -3F58 82329539 -3F59 82329630 -3F5A 82329631 -3F5B 82329632 -3F5C 82329633 -3F5D 82329634 -3F5E 82329635 -3F5F 82329636 -3F60 82329637 -3F61 82329638 -3F62 82329639 -3F63 82329730 -3F64 82329731 -3F65 82329732 -3F66 82329733 -3F67 82329734 -3F68 82329735 -3F69 82329736 -3F6A 82329737 -3F6B 82329738 -3F6C 82329739 -3F6D 82329830 -3F6E 82329831 -3F6F 82329832 -3F70 82329833 -3F71 82329834 -3F72 82329835 -3F73 82329836 -3F74 82329837 -3F75 82329838 -3F76 82329839 -3F77 82329930 -3F78 82329931 -3F79 82329932 -3F7A 82329933 -3F7B 82329934 -3F7C 82329935 -3F7D 82329936 -3F7E 82329937 -3F7F 82329938 -3F80 82329939 -3F81 82329A30 -3F82 82329A31 -3F83 82329A32 -3F84 82329A33 -3F85 82329A34 -3F86 82329A35 -3F87 82329A36 -3F88 82329A37 -3F89 82329A38 -3F8A 82329A39 -3F8B 82329B30 -3F8C 82329B31 -3F8D 82329B32 -3F8E 82329B33 -3F8F 82329B34 -3F90 82329B35 -3F91 82329B36 -3F92 82329B37 -3F93 82329B38 -3F94 82329B39 -3F95 82329C30 -3F96 82329C31 -3F97 82329C32 -3F98 82329C33 -3F99 82329C34 -3F9A 82329C35 -3F9B 82329C36 -3F9C 82329C37 -3F9D 82329C38 -3F9E 82329C39 -3F9F 82329D30 -3FA0 82329D31 -3FA1 82329D32 -3FA2 82329D33 -3FA3 82329D34 -3FA4 82329D35 -3FA5 82329D36 -3FA6 82329D37 -3FA7 82329D38 -3FA8 82329D39 -3FA9 82329E30 -3FAA 82329E31 -3FAB 82329E32 -3FAC 82329E33 -3FAD 82329E34 -3FAE 82329E35 -3FAF 82329E36 -3FB0 82329E37 -3FB1 82329E38 -3FB2 82329E39 -3FB3 82329F30 -3FB4 82329F31 -3FB5 82329F32 -3FB6 82329F33 -3FB7 82329F34 -3FB8 82329F35 -3FB9 82329F36 -3FBA 82329F37 -3FBB 82329F38 -3FBC 82329F39 -3FBD 8232A030 -3FBE 8232A031 -3FBF 8232A032 -3FC0 8232A033 -3FC1 8232A034 -3FC2 8232A035 -3FC3 8232A036 -3FC4 8232A037 -3FC5 8232A038 -3FC6 8232A039 -3FC7 8232A130 -3FC8 8232A131 -3FC9 8232A132 -3FCA 8232A133 -3FCB 8232A134 -3FCC 8232A135 -3FCD 8232A136 -3FCE 8232A137 -3FCF 8232A138 -3FD0 8232A139 -3FD1 8232A230 -3FD2 8232A231 -3FD3 8232A232 -3FD4 8232A233 -3FD5 8232A234 -3FD6 8232A235 -3FD7 8232A236 -3FD8 8232A237 -3FD9 8232A238 -3FDA 8232A239 -3FDB 8232A330 -3FDC 8232A331 -3FDD 8232A332 -3FDE 8232A333 -3FDF 8232A334 -3FE0 8232A335 -3FE1 8232A336 -3FE2 8232A337 -3FE3 8232A338 -3FE4 8232A339 -3FE5 8232A430 -3FE6 8232A431 -3FE7 8232A432 -3FE8 8232A433 -3FE9 8232A434 -3FEA 8232A435 -3FEB 8232A436 -3FEC 8232A437 -3FED 8232A438 -3FEE 8232A439 -3FEF 8232A530 -3FF0 8232A531 -3FF1 8232A532 -3FF2 8232A533 -3FF3 8232A534 -3FF4 8232A535 -3FF5 8232A536 -3FF6 8232A537 -3FF7 8232A538 -3FF8 8232A539 -3FF9 8232A630 -3FFA 8232A631 -3FFB 8232A632 -3FFC 8232A633 -3FFD 8232A634 -3FFE 8232A635 -3FFF 8232A636 -4000 8232A637 -4001 8232A638 -4002 8232A639 -4003 8232A730 -4004 8232A731 -4005 8232A732 -4006 8232A733 -4007 8232A734 -4008 8232A735 -4009 8232A736 -400A 8232A737 -400B 8232A738 -400C 8232A739 -400D 8232A830 -400E 8232A831 -400F 8232A832 -4010 8232A833 -4011 8232A834 -4012 8232A835 -4013 8232A836 -4014 8232A837 -4015 8232A838 -4016 8232A839 -4017 8232A930 -4018 8232A931 -4019 8232A932 -401A 8232A933 -401B 8232A934 -401C 8232A935 -401D 8232A936 -401E 8232A937 -401F 8232A938 -4020 8232A939 -4021 8232AA30 -4022 8232AA31 -4023 8232AA32 -4024 8232AA33 -4025 8232AA34 -4026 8232AA35 -4027 8232AA36 -4028 8232AA37 -4029 8232AA38 -402A 8232AA39 -402B 8232AB30 -402C 8232AB31 -402D 8232AB32 -402E 8232AB33 -402F 8232AB34 -4030 8232AB35 -4031 8232AB36 -4032 8232AB37 -4033 8232AB38 -4034 8232AB39 -4035 8232AC30 -4036 8232AC31 -4037 8232AC32 -4038 8232AC33 -4039 8232AC34 -403A 8232AC35 -403B 8232AC36 -403C 8232AC37 -403D 8232AC38 -403E 8232AC39 -403F 8232AD30 -4040 8232AD31 -4041 8232AD32 -4042 8232AD33 -4043 8232AD34 -4044 8232AD35 -4045 8232AD36 -4046 8232AD37 -4047 8232AD38 -4048 8232AD39 -4049 8232AE30 -404A 8232AE31 -404B 8232AE32 -404C 8232AE33 -404D 8232AE34 -404E 8232AE35 -404F 8232AE36 -4050 8232AE37 -4051 8232AE38 -4052 8232AE39 -4053 8232AF30 -4054 8232AF31 -4055 8232AF32 -4056 FE6F -4057 8232AF33 -4058 8232AF34 -4059 8232AF35 -405A 8232AF36 -405B 8232AF37 -405C 8232AF38 -405D 8232AF39 -405E 8232B030 -405F 8232B031 -4060 8232B032 -4061 8232B033 -4062 8232B034 -4063 8232B035 -4064 8232B036 -4065 8232B037 -4066 8232B038 -4067 8232B039 -4068 8232B130 -4069 8232B131 -406A 8232B132 -406B 8232B133 -406C 8232B134 -406D 8232B135 -406E 8232B136 -406F 8232B137 -4070 8232B138 -4071 8232B139 -4072 8232B230 -4073 8232B231 -4074 8232B232 -4075 8232B233 -4076 8232B234 -4077 8232B235 -4078 8232B236 -4079 8232B237 -407A 8232B238 -407B 8232B239 -407C 8232B330 -407D 8232B331 -407E 8232B332 -407F 8232B333 -4080 8232B334 -4081 8232B335 -4082 8232B336 -4083 8232B337 -4084 8232B338 -4085 8232B339 -4086 8232B430 -4087 8232B431 -4088 8232B432 -4089 8232B433 -408A 8232B434 -408B 8232B435 -408C 8232B436 -408D 8232B437 -408E 8232B438 -408F 8232B439 -4090 8232B530 -4091 8232B531 -4092 8232B532 -4093 8232B533 -4094 8232B534 -4095 8232B535 -4096 8232B536 -4097 8232B537 -4098 8232B538 -4099 8232B539 -409A 8232B630 -409B 8232B631 -409C 8232B632 -409D 8232B633 -409E 8232B634 -409F 8232B635 -40A0 8232B636 -40A1 8232B637 -40A2 8232B638 -40A3 8232B639 -40A4 8232B730 -40A5 8232B731 -40A6 8232B732 -40A7 8232B733 -40A8 8232B734 -40A9 8232B735 -40AA 8232B736 -40AB 8232B737 -40AC 8232B738 -40AD 8232B739 -40AE 8232B830 -40AF 8232B831 -40B0 8232B832 -40B1 8232B833 -40B2 8232B834 -40B3 8232B835 -40B4 8232B836 -40B5 8232B837 -40B6 8232B838 -40B7 8232B839 -40B8 8232B930 -40B9 8232B931 -40BA 8232B932 -40BB 8232B933 -40BC 8232B934 -40BD 8232B935 -40BE 8232B936 -40BF 8232B937 -40C0 8232B938 -40C1 8232B939 -40C2 8232BA30 -40C3 8232BA31 -40C4 8232BA32 -40C5 8232BA33 -40C6 8232BA34 -40C7 8232BA35 -40C8 8232BA36 -40C9 8232BA37 -40CA 8232BA38 -40CB 8232BA39 -40CC 8232BB30 -40CD 8232BB31 -40CE 8232BB32 -40CF 8232BB33 -40D0 8232BB34 -40D1 8232BB35 -40D2 8232BB36 -40D3 8232BB37 -40D4 8232BB38 -40D5 8232BB39 -40D6 8232BC30 -40D7 8232BC31 -40D8 8232BC32 -40D9 8232BC33 -40DA 8232BC34 -40DB 8232BC35 -40DC 8232BC36 -40DD 8232BC37 -40DE 8232BC38 -40DF 8232BC39 -40E0 8232BD30 -40E1 8232BD31 -40E2 8232BD32 -40E3 8232BD33 -40E4 8232BD34 -40E5 8232BD35 -40E6 8232BD36 -40E7 8232BD37 -40E8 8232BD38 -40E9 8232BD39 -40EA 8232BE30 -40EB 8232BE31 -40EC 8232BE32 -40ED 8232BE33 -40EE 8232BE34 -40EF 8232BE35 -40F0 8232BE36 -40F1 8232BE37 -40F2 8232BE38 -40F3 8232BE39 -40F4 8232BF30 -40F5 8232BF31 -40F6 8232BF32 -40F7 8232BF33 -40F8 8232BF34 -40F9 8232BF35 -40FA 8232BF36 -40FB 8232BF37 -40FC 8232BF38 -40FD 8232BF39 -40FE 8232C030 -40FF 8232C031 -4100 8232C032 -4101 8232C033 -4102 8232C034 -4103 8232C035 -4104 8232C036 -4105 8232C037 -4106 8232C038 -4107 8232C039 -4108 8232C130 -4109 8232C131 -410A 8232C132 -410B 8232C133 -410C 8232C134 -410D 8232C135 -410E 8232C136 -410F 8232C137 -4110 8232C138 -4111 8232C139 -4112 8232C230 -4113 8232C231 -4114 8232C232 -4115 8232C233 -4116 8232C234 -4117 8232C235 -4118 8232C236 -4119 8232C237 -411A 8232C238 -411B 8232C239 -411C 8232C330 -411D 8232C331 -411E 8232C332 -411F 8232C333 -4120 8232C334 -4121 8232C335 -4122 8232C336 -4123 8232C337 -4124 8232C338 -4125 8232C339 -4126 8232C430 -4127 8232C431 -4128 8232C432 -4129 8232C433 -412A 8232C434 -412B 8232C435 -412C 8232C436 -412D 8232C437 -412E 8232C438 -412F 8232C439 -4130 8232C530 -4131 8232C531 -4132 8232C532 -4133 8232C533 -4134 8232C534 -4135 8232C535 -4136 8232C536 -4137 8232C537 -4138 8232C538 -4139 8232C539 -413A 8232C630 -413B 8232C631 -413C 8232C632 -413D 8232C633 -413E 8232C634 -413F 8232C635 -4140 8232C636 -4141 8232C637 -4142 8232C638 -4143 8232C639 -4144 8232C730 -4145 8232C731 -4146 8232C732 -4147 8232C733 -4148 8232C734 -4149 8232C735 -414A 8232C736 -414B 8232C737 -414C 8232C738 -414D 8232C739 -414E 8232C830 -414F 8232C831 -4150 8232C832 -4151 8232C833 -4152 8232C834 -4153 8232C835 -4154 8232C836 -4155 8232C837 -4156 8232C838 -4157 8232C839 -4158 8232C930 -4159 8232C931 -415A 8232C932 -415B 8232C933 -415C 8232C934 -415D 8232C935 -415E 8232C936 -415F FE70 -4160 8232C937 -4161 8232C938 -4162 8232C939 -4163 8232CA30 -4164 8232CA31 -4165 8232CA32 -4166 8232CA33 -4167 8232CA34 -4168 8232CA35 -4169 8232CA36 -416A 8232CA37 -416B 8232CA38 -416C 8232CA39 -416D 8232CB30 -416E 8232CB31 -416F 8232CB32 -4170 8232CB33 -4171 8232CB34 -4172 8232CB35 -4173 8232CB36 -4174 8232CB37 -4175 8232CB38 -4176 8232CB39 -4177 8232CC30 -4178 8232CC31 -4179 8232CC32 -417A 8232CC33 -417B 8232CC34 -417C 8232CC35 -417D 8232CC36 -417E 8232CC37 -417F 8232CC38 -4180 8232CC39 -4181 8232CD30 -4182 8232CD31 -4183 8232CD32 -4184 8232CD33 -4185 8232CD34 -4186 8232CD35 -4187 8232CD36 -4188 8232CD37 -4189 8232CD38 -418A 8232CD39 -418B 8232CE30 -418C 8232CE31 -418D 8232CE32 -418E 8232CE33 -418F 8232CE34 -4190 8232CE35 -4191 8232CE36 -4192 8232CE37 -4193 8232CE38 -4194 8232CE39 -4195 8232CF30 -4196 8232CF31 -4197 8232CF32 -4198 8232CF33 -4199 8232CF34 -419A 8232CF35 -419B 8232CF36 -419C 8232CF37 -419D 8232CF38 -419E 8232CF39 -419F 8232D030 -41A0 8232D031 -41A1 8232D032 -41A2 8232D033 -41A3 8232D034 -41A4 8232D035 -41A5 8232D036 -41A6 8232D037 -41A7 8232D038 -41A8 8232D039 -41A9 8232D130 -41AA 8232D131 -41AB 8232D132 -41AC 8232D133 -41AD 8232D134 -41AE 8232D135 -41AF 8232D136 -41B0 8232D137 -41B1 8232D138 -41B2 8232D139 -41B3 8232D230 -41B4 8232D231 -41B5 8232D232 -41B6 8232D233 -41B7 8232D234 -41B8 8232D235 -41B9 8232D236 -41BA 8232D237 -41BB 8232D238 -41BC 8232D239 -41BD 8232D330 -41BE 8232D331 -41BF 8232D332 -41C0 8232D333 -41C1 8232D334 -41C2 8232D335 -41C3 8232D336 -41C4 8232D337 -41C5 8232D338 -41C6 8232D339 -41C7 8232D430 -41C8 8232D431 -41C9 8232D432 -41CA 8232D433 -41CB 8232D434 -41CC 8232D435 -41CD 8232D436 -41CE 8232D437 -41CF 8232D438 -41D0 8232D439 -41D1 8232D530 -41D2 8232D531 -41D3 8232D532 -41D4 8232D533 -41D5 8232D534 -41D6 8232D535 -41D7 8232D536 -41D8 8232D537 -41D9 8232D538 -41DA 8232D539 -41DB 8232D630 -41DC 8232D631 -41DD 8232D632 -41DE 8232D633 -41DF 8232D634 -41E0 8232D635 -41E1 8232D636 -41E2 8232D637 -41E3 8232D638 -41E4 8232D639 -41E5 8232D730 -41E6 8232D731 -41E7 8232D732 -41E8 8232D733 -41E9 8232D734 -41EA 8232D735 -41EB 8232D736 -41EC 8232D737 -41ED 8232D738 -41EE 8232D739 -41EF 8232D830 -41F0 8232D831 -41F1 8232D832 -41F2 8232D833 -41F3 8232D834 -41F4 8232D835 -41F5 8232D836 -41F6 8232D837 -41F7 8232D838 -41F8 8232D839 -41F9 8232D930 -41FA 8232D931 -41FB 8232D932 -41FC 8232D933 -41FD 8232D934 -41FE 8232D935 -41FF 8232D936 -4200 8232D937 -4201 8232D938 -4202 8232D939 -4203 8232DA30 -4204 8232DA31 -4205 8232DA32 -4206 8232DA33 -4207 8232DA34 -4208 8232DA35 -4209 8232DA36 -420A 8232DA37 -420B 8232DA38 -420C 8232DA39 -420D 8232DB30 -420E 8232DB31 -420F 8232DB32 -4210 8232DB33 -4211 8232DB34 -4212 8232DB35 -4213 8232DB36 -4214 8232DB37 -4215 8232DB38 -4216 8232DB39 -4217 8232DC30 -4218 8232DC31 -4219 8232DC32 -421A 8232DC33 -421B 8232DC34 -421C 8232DC35 -421D 8232DC36 -421E 8232DC37 -421F 8232DC38 -4220 8232DC39 -4221 8232DD30 -4222 8232DD31 -4223 8232DD32 -4224 8232DD33 -4225 8232DD34 -4226 8232DD35 -4227 8232DD36 -4228 8232DD37 -4229 8232DD38 -422A 8232DD39 -422B 8232DE30 -422C 8232DE31 -422D 8232DE32 -422E 8232DE33 -422F 8232DE34 -4230 8232DE35 -4231 8232DE36 -4232 8232DE37 -4233 8232DE38 -4234 8232DE39 -4235 8232DF30 -4236 8232DF31 -4237 8232DF32 -4238 8232DF33 -4239 8232DF34 -423A 8232DF35 -423B 8232DF36 -423C 8232DF37 -423D 8232DF38 -423E 8232DF39 -423F 8232E030 -4240 8232E031 -4241 8232E032 -4242 8232E033 -4243 8232E034 -4244 8232E035 -4245 8232E036 -4246 8232E037 -4247 8232E038 -4248 8232E039 -4249 8232E130 -424A 8232E131 -424B 8232E132 -424C 8232E133 -424D 8232E134 -424E 8232E135 -424F 8232E136 -4250 8232E137 -4251 8232E138 -4252 8232E139 -4253 8232E230 -4254 8232E231 -4255 8232E232 -4256 8232E233 -4257 8232E234 -4258 8232E235 -4259 8232E236 -425A 8232E237 -425B 8232E238 -425C 8232E239 -425D 8232E330 -425E 8232E331 -425F 8232E332 -4260 8232E333 -4261 8232E334 -4262 8232E335 -4263 8232E336 -4264 8232E337 -4265 8232E338 -4266 8232E339 -4267 8232E430 -4268 8232E431 -4269 8232E432 -426A 8232E433 -426B 8232E434 -426C 8232E435 -426D 8232E436 -426E 8232E437 -426F 8232E438 -4270 8232E439 -4271 8232E530 -4272 8232E531 -4273 8232E532 -4274 8232E533 -4275 8232E534 -4276 8232E535 -4277 8232E536 -4278 8232E537 -4279 8232E538 -427A 8232E539 -427B 8232E630 -427C 8232E631 -427D 8232E632 -427E 8232E633 -427F 8232E634 -4280 8232E635 -4281 8232E636 -4282 8232E637 -4283 8232E638 -4284 8232E639 -4285 8232E730 -4286 8232E731 -4287 8232E732 -4288 8232E733 -4289 8232E734 -428A 8232E735 -428B 8232E736 -428C 8232E737 -428D 8232E738 -428E 8232E739 -428F 8232E830 -4290 8232E831 -4291 8232E832 -4292 8232E833 -4293 8232E834 -4294 8232E835 -4295 8232E836 -4296 8232E837 -4297 8232E838 -4298 8232E839 -4299 8232E930 -429A 8232E931 -429B 8232E932 -429C 8232E933 -429D 8232E934 -429E 8232E935 -429F 8232E936 -42A0 8232E937 -42A1 8232E938 -42A2 8232E939 -42A3 8232EA30 -42A4 8232EA31 -42A5 8232EA32 -42A6 8232EA33 -42A7 8232EA34 -42A8 8232EA35 -42A9 8232EA36 -42AA 8232EA37 -42AB 8232EA38 -42AC 8232EA39 -42AD 8232EB30 -42AE 8232EB31 -42AF 8232EB32 -42B0 8232EB33 -42B1 8232EB34 -42B2 8232EB35 -42B3 8232EB36 -42B4 8232EB37 -42B5 8232EB38 -42B6 8232EB39 -42B7 8232EC30 -42B8 8232EC31 -42B9 8232EC32 -42BA 8232EC33 -42BB 8232EC34 -42BC 8232EC35 -42BD 8232EC36 -42BE 8232EC37 -42BF 8232EC38 -42C0 8232EC39 -42C1 8232ED30 -42C2 8232ED31 -42C3 8232ED32 -42C4 8232ED33 -42C5 8232ED34 -42C6 8232ED35 -42C7 8232ED36 -42C8 8232ED37 -42C9 8232ED38 -42CA 8232ED39 -42CB 8232EE30 -42CC 8232EE31 -42CD 8232EE32 -42CE 8232EE33 -42CF 8232EE34 -42D0 8232EE35 -42D1 8232EE36 -42D2 8232EE37 -42D3 8232EE38 -42D4 8232EE39 -42D5 8232EF30 -42D6 8232EF31 -42D7 8232EF32 -42D8 8232EF33 -42D9 8232EF34 -42DA 8232EF35 -42DB 8232EF36 -42DC 8232EF37 -42DD 8232EF38 -42DE 8232EF39 -42DF 8232F030 -42E0 8232F031 -42E1 8232F032 -42E2 8232F033 -42E3 8232F034 -42E4 8232F035 -42E5 8232F036 -42E6 8232F037 -42E7 8232F038 -42E8 8232F039 -42E9 8232F130 -42EA 8232F131 -42EB 8232F132 -42EC 8232F133 -42ED 8232F134 -42EE 8232F135 -42EF 8232F136 -42F0 8232F137 -42F1 8232F138 -42F2 8232F139 -42F3 8232F230 -42F4 8232F231 -42F5 8232F232 -42F6 8232F233 -42F7 8232F234 -42F8 8232F235 -42F9 8232F236 -42FA 8232F237 -42FB 8232F238 -42FC 8232F239 -42FD 8232F330 -42FE 8232F331 -42FF 8232F332 -4300 8232F333 -4301 8232F334 -4302 8232F335 -4303 8232F336 -4304 8232F337 -4305 8232F338 -4306 8232F339 -4307 8232F430 -4308 8232F431 -4309 8232F432 -430A 8232F433 -430B 8232F434 -430C 8232F435 -430D 8232F436 -430E 8232F437 -430F 8232F438 -4310 8232F439 -4311 8232F530 -4312 8232F531 -4313 8232F532 -4314 8232F533 -4315 8232F534 -4316 8232F535 -4317 8232F536 -4318 8232F537 -4319 8232F538 -431A 8232F539 -431B 8232F630 -431C 8232F631 -431D 8232F632 -431E 8232F633 -431F 8232F634 -4320 8232F635 -4321 8232F636 -4322 8232F637 -4323 8232F638 -4324 8232F639 -4325 8232F730 -4326 8232F731 -4327 8232F732 -4328 8232F733 -4329 8232F734 -432A 8232F735 -432B 8232F736 -432C 8232F737 -432D 8232F738 -432E 8232F739 -432F 8232F830 -4330 8232F831 -4331 8232F832 -4332 8232F833 -4333 8232F834 -4334 8232F835 -4335 8232F836 -4336 8232F837 -4337 FE72 -4338 8232F838 -4339 8232F839 -433A 8232F930 -433B 8232F931 -433C 8232F932 -433D 8232F933 -433E 8232F934 -433F 8232F935 -4340 8232F936 -4341 8232F937 -4342 8232F938 -4343 8232F939 -4344 8232FA30 -4345 8232FA31 -4346 8232FA32 -4347 8232FA33 -4348 8232FA34 -4349 8232FA35 -434A 8232FA36 -434B 8232FA37 -434C 8232FA38 -434D 8232FA39 -434E 8232FB30 -434F 8232FB31 -4350 8232FB32 -4351 8232FB33 -4352 8232FB34 -4353 8232FB35 -4354 8232FB36 -4355 8232FB37 -4356 8232FB38 -4357 8232FB39 -4358 8232FC30 -4359 8232FC31 -435A 8232FC32 -435B 8232FC33 -435C 8232FC34 -435D 8232FC35 -435E 8232FC36 -435F 8232FC37 -4360 8232FC38 -4361 8232FC39 -4362 8232FD30 -4363 8232FD31 -4364 8232FD32 -4365 8232FD33 -4366 8232FD34 -4367 8232FD35 -4368 8232FD36 -4369 8232FD37 -436A 8232FD38 -436B 8232FD39 -436C 8232FE30 -436D 8232FE31 -436E 8232FE32 -436F 8232FE33 -4370 8232FE34 -4371 8232FE35 -4372 8232FE36 -4373 8232FE37 -4374 8232FE38 -4375 8232FE39 -4376 82338130 -4377 82338131 -4378 82338132 -4379 82338133 -437A 82338134 -437B 82338135 -437C 82338136 -437D 82338137 -437E 82338138 -437F 82338139 -4380 82338230 -4381 82338231 -4382 82338232 -4383 82338233 -4384 82338234 -4385 82338235 -4386 82338236 -4387 82338237 -4388 82338238 -4389 82338239 -438A 82338330 -438B 82338331 -438C 82338332 -438D 82338333 -438E 82338334 -438F 82338335 -4390 82338336 -4391 82338337 -4392 82338338 -4393 82338339 -4394 82338430 -4395 82338431 -4396 82338432 -4397 82338433 -4398 82338434 -4399 82338435 -439A 82338436 -439B 82338437 -439C 82338438 -439D 82338439 -439E 82338530 -439F 82338531 -43A0 82338532 -43A1 82338533 -43A2 82338534 -43A3 82338535 -43A4 82338536 -43A5 82338537 -43A6 82338538 -43A7 82338539 -43A8 82338630 -43A9 82338631 -43AA 82338632 -43AB 82338633 -43AC FE78 -43AD 82338634 -43AE 82338635 -43AF 82338636 -43B0 82338637 -43B1 FE77 -43B2 82338638 -43B3 82338639 -43B4 82338730 -43B5 82338731 -43B6 82338732 -43B7 82338733 -43B8 82338734 -43B9 82338735 -43BA 82338736 -43BB 82338737 -43BC 82338738 -43BD 82338739 -43BE 82338830 -43BF 82338831 -43C0 82338832 -43C1 82338833 -43C2 82338834 -43C3 82338835 -43C4 82338836 -43C5 82338837 -43C6 82338838 -43C7 82338839 -43C8 82338930 -43C9 82338931 -43CA 82338932 -43CB 82338933 -43CC 82338934 -43CD 82338935 -43CE 82338936 -43CF 82338937 -43D0 82338938 -43D1 82338939 -43D2 82338A30 -43D3 82338A31 -43D4 82338A32 -43D5 82338A33 -43D6 82338A34 -43D7 82338A35 -43D8 82338A36 -43D9 82338A37 -43DA 82338A38 -43DB 82338A39 -43DC 82338B30 -43DD FE7A -43DE 82338B31 -43DF 82338B32 -43E0 82338B33 -43E1 82338B34 -43E2 82338B35 -43E3 82338B36 -43E4 82338B37 -43E5 82338B38 -43E6 82338B39 -43E7 82338C30 -43E8 82338C31 -43E9 82338C32 -43EA 82338C33 -43EB 82338C34 -43EC 82338C35 -43ED 82338C36 -43EE 82338C37 -43EF 82338C38 -43F0 82338C39 -43F1 82338D30 -43F2 82338D31 -43F3 82338D32 -43F4 82338D33 -43F5 82338D34 -43F6 82338D35 -43F7 82338D36 -43F8 82338D37 -43F9 82338D38 -43FA 82338D39 -43FB 82338E30 -43FC 82338E31 -43FD 82338E32 -43FE 82338E33 -43FF 82338E34 -4400 82338E35 -4401 82338E36 -4402 82338E37 -4403 82338E38 -4404 82338E39 -4405 82338F30 -4406 82338F31 -4407 82338F32 -4408 82338F33 -4409 82338F34 -440A 82338F35 -440B 82338F36 -440C 82338F37 -440D 82338F38 -440E 82338F39 -440F 82339030 -4410 82339031 -4411 82339032 -4412 82339033 -4413 82339034 -4414 82339035 -4415 82339036 -4416 82339037 -4417 82339038 -4418 82339039 -4419 82339130 -441A 82339131 -441B 82339132 -441C 82339133 -441D 82339134 -441E 82339135 -441F 82339136 -4420 82339137 -4421 82339138 -4422 82339139 -4423 82339230 -4424 82339231 -4425 82339232 -4426 82339233 -4427 82339234 -4428 82339235 -4429 82339236 -442A 82339237 -442B 82339238 -442C 82339239 -442D 82339330 -442E 82339331 -442F 82339332 -4430 82339333 -4431 82339334 -4432 82339335 -4433 82339336 -4434 82339337 -4435 82339338 -4436 82339339 -4437 82339430 -4438 82339431 -4439 82339432 -443A 82339433 -443B 82339434 -443C 82339435 -443D 82339436 -443E 82339437 -443F 82339438 -4440 82339439 -4441 82339530 -4442 82339531 -4443 82339532 -4444 82339533 -4445 82339534 -4446 82339535 -4447 82339536 -4448 82339537 -4449 82339538 -444A 82339539 -444B 82339630 -444C 82339631 -444D 82339632 -444E 82339633 -444F 82339634 -4450 82339635 -4451 82339636 -4452 82339637 -4453 82339638 -4454 82339639 -4455 82339730 -4456 82339731 -4457 82339732 -4458 82339733 -4459 82339734 -445A 82339735 -445B 82339736 -445C 82339737 -445D 82339738 -445E 82339739 -445F 82339830 -4460 82339831 -4461 82339832 -4462 82339833 -4463 82339834 -4464 82339835 -4465 82339836 -4466 82339837 -4467 82339838 -4468 82339839 -4469 82339930 -446A 82339931 -446B 82339932 -446C 82339933 -446D 82339934 -446E 82339935 -446F 82339936 -4470 82339937 -4471 82339938 -4472 82339939 -4473 82339A30 -4474 82339A31 -4475 82339A32 -4476 82339A33 -4477 82339A34 -4478 82339A35 -4479 82339A36 -447A 82339A37 -447B 82339A38 -447C 82339A39 -447D 82339B30 -447E 82339B31 -447F 82339B32 -4480 82339B33 -4481 82339B34 -4482 82339B35 -4483 82339B36 -4484 82339B37 -4485 82339B38 -4486 82339B39 -4487 82339C30 -4488 82339C31 -4489 82339C32 -448A 82339C33 -448B 82339C34 -448C 82339C35 -448D 82339C36 -448E 82339C37 -448F 82339C38 -4490 82339C39 -4491 82339D30 -4492 82339D31 -4493 82339D32 -4494 82339D33 -4495 82339D34 -4496 82339D35 -4497 82339D36 -4498 82339D37 -4499 82339D38 -449A 82339D39 -449B 82339E30 -449C 82339E31 -449D 82339E32 -449E 82339E33 -449F 82339E34 -44A0 82339E35 -44A1 82339E36 -44A2 82339E37 -44A3 82339E38 -44A4 82339E39 -44A5 82339F30 -44A6 82339F31 -44A7 82339F32 -44A8 82339F33 -44A9 82339F34 -44AA 82339F35 -44AB 82339F36 -44AC 82339F37 -44AD 82339F38 -44AE 82339F39 -44AF 8233A030 -44B0 8233A031 -44B1 8233A032 -44B2 8233A033 -44B3 8233A034 -44B4 8233A035 -44B5 8233A036 -44B6 8233A037 -44B7 8233A038 -44B8 8233A039 -44B9 8233A130 -44BA 8233A131 -44BB 8233A132 -44BC 8233A133 -44BD 8233A134 -44BE 8233A135 -44BF 8233A136 -44C0 8233A137 -44C1 8233A138 -44C2 8233A139 -44C3 8233A230 -44C4 8233A231 -44C5 8233A232 -44C6 8233A233 -44C7 8233A234 -44C8 8233A235 -44C9 8233A236 -44CA 8233A237 -44CB 8233A238 -44CC 8233A239 -44CD 8233A330 -44CE 8233A331 -44CF 8233A332 -44D0 8233A333 -44D1 8233A334 -44D2 8233A335 -44D3 8233A336 -44D4 8233A337 -44D5 8233A338 -44D6 FE7B -44D7 8233A339 -44D8 8233A430 -44D9 8233A431 -44DA 8233A432 -44DB 8233A433 -44DC 8233A434 -44DD 8233A435 -44DE 8233A436 -44DF 8233A437 -44E0 8233A438 -44E1 8233A439 -44E2 8233A530 -44E3 8233A531 -44E4 8233A532 -44E5 8233A533 -44E6 8233A534 -44E7 8233A535 -44E8 8233A536 -44E9 8233A537 -44EA 8233A538 -44EB 8233A539 -44EC 8233A630 -44ED 8233A631 -44EE 8233A632 -44EF 8233A633 -44F0 8233A634 -44F1 8233A635 -44F2 8233A636 -44F3 8233A637 -44F4 8233A638 -44F5 8233A639 -44F6 8233A730 -44F7 8233A731 -44F8 8233A732 -44F9 8233A733 -44FA 8233A734 -44FB 8233A735 -44FC 8233A736 -44FD 8233A737 -44FE 8233A738 -44FF 8233A739 -4500 8233A830 -4501 8233A831 -4502 8233A832 -4503 8233A833 -4504 8233A834 -4505 8233A835 -4506 8233A836 -4507 8233A837 -4508 8233A838 -4509 8233A839 -450A 8233A930 -450B 8233A931 -450C 8233A932 -450D 8233A933 -450E 8233A934 -450F 8233A935 -4510 8233A936 -4511 8233A937 -4512 8233A938 -4513 8233A939 -4514 8233AA30 -4515 8233AA31 -4516 8233AA32 -4517 8233AA33 -4518 8233AA34 -4519 8233AA35 -451A 8233AA36 -451B 8233AA37 -451C 8233AA38 -451D 8233AA39 -451E 8233AB30 -451F 8233AB31 -4520 8233AB32 -4521 8233AB33 -4522 8233AB34 -4523 8233AB35 -4524 8233AB36 -4525 8233AB37 -4526 8233AB38 -4527 8233AB39 -4528 8233AC30 -4529 8233AC31 -452A 8233AC32 -452B 8233AC33 -452C 8233AC34 -452D 8233AC35 -452E 8233AC36 -452F 8233AC37 -4530 8233AC38 -4531 8233AC39 -4532 8233AD30 -4533 8233AD31 -4534 8233AD32 -4535 8233AD33 -4536 8233AD34 -4537 8233AD35 -4538 8233AD36 -4539 8233AD37 -453A 8233AD38 -453B 8233AD39 -453C 8233AE30 -453D 8233AE31 -453E 8233AE32 -453F 8233AE33 -4540 8233AE34 -4541 8233AE35 -4542 8233AE36 -4543 8233AE37 -4544 8233AE38 -4545 8233AE39 -4546 8233AF30 -4547 8233AF31 -4548 8233AF32 -4549 8233AF33 -454A 8233AF34 -454B 8233AF35 -454C 8233AF36 -454D 8233AF37 -454E 8233AF38 -454F 8233AF39 -4550 8233B030 -4551 8233B031 -4552 8233B032 -4553 8233B033 -4554 8233B034 -4555 8233B035 -4556 8233B036 -4557 8233B037 -4558 8233B038 -4559 8233B039 -455A 8233B130 -455B 8233B131 -455C 8233B132 -455D 8233B133 -455E 8233B134 -455F 8233B135 -4560 8233B136 -4561 8233B137 -4562 8233B138 -4563 8233B139 -4564 8233B230 -4565 8233B231 -4566 8233B232 -4567 8233B233 -4568 8233B234 -4569 8233B235 -456A 8233B236 -456B 8233B237 -456C 8233B238 -456D 8233B239 -456E 8233B330 -456F 8233B331 -4570 8233B332 -4571 8233B333 -4572 8233B334 -4573 8233B335 -4574 8233B336 -4575 8233B337 -4576 8233B338 -4577 8233B339 -4578 8233B430 -4579 8233B431 -457A 8233B432 -457B 8233B433 -457C 8233B434 -457D 8233B435 -457E 8233B436 -457F 8233B437 -4580 8233B438 -4581 8233B439 -4582 8233B530 -4583 8233B531 -4584 8233B532 -4585 8233B533 -4586 8233B534 -4587 8233B535 -4588 8233B536 -4589 8233B537 -458A 8233B538 -458B 8233B539 -458C 8233B630 -458D 8233B631 -458E 8233B632 -458F 8233B633 -4590 8233B634 -4591 8233B635 -4592 8233B636 -4593 8233B637 -4594 8233B638 -4595 8233B639 -4596 8233B730 -4597 8233B731 -4598 8233B732 -4599 8233B733 -459A 8233B734 -459B 8233B735 -459C 8233B736 -459D 8233B737 -459E 8233B738 -459F 8233B739 -45A0 8233B830 -45A1 8233B831 -45A2 8233B832 -45A3 8233B833 -45A4 8233B834 -45A5 8233B835 -45A6 8233B836 -45A7 8233B837 -45A8 8233B838 -45A9 8233B839 -45AA 8233B930 -45AB 8233B931 -45AC 8233B932 -45AD 8233B933 -45AE 8233B934 -45AF 8233B935 -45B0 8233B936 -45B1 8233B937 -45B2 8233B938 -45B3 8233B939 -45B4 8233BA30 -45B5 8233BA31 -45B6 8233BA32 -45B7 8233BA33 -45B8 8233BA34 -45B9 8233BA35 -45BA 8233BA36 -45BB 8233BA37 -45BC 8233BA38 -45BD 8233BA39 -45BE 8233BB30 -45BF 8233BB31 -45C0 8233BB32 -45C1 8233BB33 -45C2 8233BB34 -45C3 8233BB35 -45C4 8233BB36 -45C5 8233BB37 -45C6 8233BB38 -45C7 8233BB39 -45C8 8233BC30 -45C9 8233BC31 -45CA 8233BC32 -45CB 8233BC33 -45CC 8233BC34 -45CD 8233BC35 -45CE 8233BC36 -45CF 8233BC37 -45D0 8233BC38 -45D1 8233BC39 -45D2 8233BD30 -45D3 8233BD31 -45D4 8233BD32 -45D5 8233BD33 -45D6 8233BD34 -45D7 8233BD35 -45D8 8233BD36 -45D9 8233BD37 -45DA 8233BD38 -45DB 8233BD39 -45DC 8233BE30 -45DD 8233BE31 -45DE 8233BE32 -45DF 8233BE33 -45E0 8233BE34 -45E1 8233BE35 -45E2 8233BE36 -45E3 8233BE37 -45E4 8233BE38 -45E5 8233BE39 -45E6 8233BF30 -45E7 8233BF31 -45E8 8233BF32 -45E9 8233BF33 -45EA 8233BF34 -45EB 8233BF35 -45EC 8233BF36 -45ED 8233BF37 -45EE 8233BF38 -45EF 8233BF39 -45F0 8233C030 -45F1 8233C031 -45F2 8233C032 -45F3 8233C033 -45F4 8233C034 -45F5 8233C035 -45F6 8233C036 -45F7 8233C037 -45F8 8233C038 -45F9 8233C039 -45FA 8233C130 -45FB 8233C131 -45FC 8233C132 -45FD 8233C133 -45FE 8233C134 -45FF 8233C135 -4600 8233C136 -4601 8233C137 -4602 8233C138 -4603 8233C139 -4604 8233C230 -4605 8233C231 -4606 8233C232 -4607 8233C233 -4608 8233C234 -4609 8233C235 -460A 8233C236 -460B 8233C237 -460C 8233C238 -460D 8233C239 -460E 8233C330 -460F 8233C331 -4610 8233C332 -4611 8233C333 -4612 8233C334 -4613 8233C335 -4614 8233C336 -4615 8233C337 -4616 8233C338 -4617 8233C339 -4618 8233C430 -4619 8233C431 -461A 8233C432 -461B 8233C433 -461C 8233C434 -461D 8233C435 -461E 8233C436 -461F 8233C437 -4620 8233C438 -4621 8233C439 -4622 8233C530 -4623 8233C531 -4624 8233C532 -4625 8233C533 -4626 8233C534 -4627 8233C535 -4628 8233C536 -4629 8233C537 -462A 8233C538 -462B 8233C539 -462C 8233C630 -462D 8233C631 -462E 8233C632 -462F 8233C633 -4630 8233C634 -4631 8233C635 -4632 8233C636 -4633 8233C637 -4634 8233C638 -4635 8233C639 -4636 8233C730 -4637 8233C731 -4638 8233C732 -4639 8233C733 -463A 8233C734 -463B 8233C735 -463C 8233C736 -463D 8233C737 -463E 8233C738 -463F 8233C739 -4640 8233C830 -4641 8233C831 -4642 8233C832 -4643 8233C833 -4644 8233C834 -4645 8233C835 -4646 8233C836 -4647 8233C837 -4648 8233C838 -4649 8233C839 -464A 8233C930 -464B 8233C931 -464C FE7D -464D 8233C932 -464E 8233C933 -464F 8233C934 -4650 8233C935 -4651 8233C936 -4652 8233C937 -4653 8233C938 -4654 8233C939 -4655 8233CA30 -4656 8233CA31 -4657 8233CA32 -4658 8233CA33 -4659 8233CA34 -465A 8233CA35 -465B 8233CA36 -465C 8233CA37 -465D 8233CA38 -465E 8233CA39 -465F 8233CB30 -4660 8233CB31 -4661 FE7C -4662 8233CB32 -4663 8233CB33 -4664 8233CB34 -4665 8233CB35 -4666 8233CB36 -4667 8233CB37 -4668 8233CB38 -4669 8233CB39 -466A 8233CC30 -466B 8233CC31 -466C 8233CC32 -466D 8233CC33 -466E 8233CC34 -466F 8233CC35 -4670 8233CC36 -4671 8233CC37 -4672 8233CC38 -4673 8233CC39 -4674 8233CD30 -4675 8233CD31 -4676 8233CD32 -4677 8233CD33 -4678 8233CD34 -4679 8233CD35 -467A 8233CD36 -467B 8233CD37 -467C 8233CD38 -467D 8233CD39 -467E 8233CE30 -467F 8233CE31 -4680 8233CE32 -4681 8233CE33 -4682 8233CE34 -4683 8233CE35 -4684 8233CE36 -4685 8233CE37 -4686 8233CE38 -4687 8233CE39 -4688 8233CF30 -4689 8233CF31 -468A 8233CF32 -468B 8233CF33 -468C 8233CF34 -468D 8233CF35 -468E 8233CF36 -468F 8233CF37 -4690 8233CF38 -4691 8233CF39 -4692 8233D030 -4693 8233D031 -4694 8233D032 -4695 8233D033 -4696 8233D034 -4697 8233D035 -4698 8233D036 -4699 8233D037 -469A 8233D038 -469B 8233D039 -469C 8233D130 -469D 8233D131 -469E 8233D132 -469F 8233D133 -46A0 8233D134 -46A1 8233D135 -46A2 8233D136 -46A3 8233D137 -46A4 8233D138 -46A5 8233D139 -46A6 8233D230 -46A7 8233D231 -46A8 8233D232 -46A9 8233D233 -46AA 8233D234 -46AB 8233D235 -46AC 8233D236 -46AD 8233D237 -46AE 8233D238 -46AF 8233D239 -46B0 8233D330 -46B1 8233D331 -46B2 8233D332 -46B3 8233D333 -46B4 8233D334 -46B5 8233D335 -46B6 8233D336 -46B7 8233D337 -46B8 8233D338 -46B9 8233D339 -46BA 8233D430 -46BB 8233D431 -46BC 8233D432 -46BD 8233D433 -46BE 8233D434 -46BF 8233D435 -46C0 8233D436 -46C1 8233D437 -46C2 8233D438 -46C3 8233D439 -46C4 8233D530 -46C5 8233D531 -46C6 8233D532 -46C7 8233D533 -46C8 8233D534 -46C9 8233D535 -46CA 8233D536 -46CB 8233D537 -46CC 8233D538 -46CD 8233D539 -46CE 8233D630 -46CF 8233D631 -46D0 8233D632 -46D1 8233D633 -46D2 8233D634 -46D3 8233D635 -46D4 8233D636 -46D5 8233D637 -46D6 8233D638 -46D7 8233D639 -46D8 8233D730 -46D9 8233D731 -46DA 8233D732 -46DB 8233D733 -46DC 8233D734 -46DD 8233D735 -46DE 8233D736 -46DF 8233D737 -46E0 8233D738 -46E1 8233D739 -46E2 8233D830 -46E3 8233D831 -46E4 8233D832 -46E5 8233D833 -46E6 8233D834 -46E7 8233D835 -46E8 8233D836 -46E9 8233D837 -46EA 8233D838 -46EB 8233D839 -46EC 8233D930 -46ED 8233D931 -46EE 8233D932 -46EF 8233D933 -46F0 8233D934 -46F1 8233D935 -46F2 8233D936 -46F3 8233D937 -46F4 8233D938 -46F5 8233D939 -46F6 8233DA30 -46F7 8233DA31 -46F8 8233DA32 -46F9 8233DA33 -46FA 8233DA34 -46FB 8233DA35 -46FC 8233DA36 -46FD 8233DA37 -46FE 8233DA38 -46FF 8233DA39 -4700 8233DB30 -4701 8233DB31 -4702 8233DB32 -4703 8233DB33 -4704 8233DB34 -4705 8233DB35 -4706 8233DB36 -4707 8233DB37 -4708 8233DB38 -4709 8233DB39 -470A 8233DC30 -470B 8233DC31 -470C 8233DC32 -470D 8233DC33 -470E 8233DC34 -470F 8233DC35 -4710 8233DC36 -4711 8233DC37 -4712 8233DC38 -4713 8233DC39 -4714 8233DD30 -4715 8233DD31 -4716 8233DD32 -4717 8233DD33 -4718 8233DD34 -4719 8233DD35 -471A 8233DD36 -471B 8233DD37 -471C 8233DD38 -471D 8233DD39 -471E 8233DE30 -471F 8233DE31 -4720 8233DE32 -4721 8233DE33 -4722 8233DE34 -4723 FE80 -4724 8233DE35 -4725 8233DE36 -4726 8233DE37 -4727 8233DE38 -4728 8233DE39 -4729 FE81 -472A 8233DF30 -472B 8233DF31 -472C 8233DF32 -472D 8233DF33 -472E 8233DF34 -472F 8233DF35 -4730 8233DF36 -4731 8233DF37 -4732 8233DF38 -4733 8233DF39 -4734 8233E030 -4735 8233E031 -4736 8233E032 -4737 8233E033 -4738 8233E034 -4739 8233E035 -473A 8233E036 -473B 8233E037 -473C 8233E038 -473D 8233E039 -473E 8233E130 -473F 8233E131 -4740 8233E132 -4741 8233E133 -4742 8233E134 -4743 8233E135 -4744 8233E136 -4745 8233E137 -4746 8233E138 -4747 8233E139 -4748 8233E230 -4749 8233E231 -474A 8233E232 -474B 8233E233 -474C 8233E234 -474D 8233E235 -474E 8233E236 -474F 8233E237 -4750 8233E238 -4751 8233E239 -4752 8233E330 -4753 8233E331 -4754 8233E332 -4755 8233E333 -4756 8233E334 -4757 8233E335 -4758 8233E336 -4759 8233E337 -475A 8233E338 -475B 8233E339 -475C 8233E430 -475D 8233E431 -475E 8233E432 -475F 8233E433 -4760 8233E434 -4761 8233E435 -4762 8233E436 -4763 8233E437 -4764 8233E438 -4765 8233E439 -4766 8233E530 -4767 8233E531 -4768 8233E532 -4769 8233E533 -476A 8233E534 -476B 8233E535 -476C 8233E536 -476D 8233E537 -476E 8233E538 -476F 8233E539 -4770 8233E630 -4771 8233E631 -4772 8233E632 -4773 8233E633 -4774 8233E634 -4775 8233E635 -4776 8233E636 -4777 8233E637 -4778 8233E638 -4779 8233E639 -477A 8233E730 -477B 8233E731 -477C FE82 -477D 8233E732 -477E 8233E733 -477F 8233E734 -4780 8233E735 -4781 8233E736 -4782 8233E737 -4783 8233E738 -4784 8233E739 -4785 8233E830 -4786 8233E831 -4787 8233E832 -4788 8233E833 -4789 8233E834 -478A 8233E835 -478B 8233E836 -478C 8233E837 -478D FE83 -478E 8233E838 -478F 8233E839 -4790 8233E930 -4791 8233E931 -4792 8233E932 -4793 8233E933 -4794 8233E934 -4795 8233E935 -4796 8233E936 -4797 8233E937 -4798 8233E938 -4799 8233E939 -479A 8233EA30 -479B 8233EA31 -479C 8233EA32 -479D 8233EA33 -479E 8233EA34 -479F 8233EA35 -47A0 8233EA36 -47A1 8233EA37 -47A2 8233EA38 -47A3 8233EA39 -47A4 8233EB30 -47A5 8233EB31 -47A6 8233EB32 -47A7 8233EB33 -47A8 8233EB34 -47A9 8233EB35 -47AA 8233EB36 -47AB 8233EB37 -47AC 8233EB38 -47AD 8233EB39 -47AE 8233EC30 -47AF 8233EC31 -47B0 8233EC32 -47B1 8233EC33 -47B2 8233EC34 -47B3 8233EC35 -47B4 8233EC36 -47B5 8233EC37 -47B6 8233EC38 -47B7 8233EC39 -47B8 8233ED30 -47B9 8233ED31 -47BA 8233ED32 -47BB 8233ED33 -47BC 8233ED34 -47BD 8233ED35 -47BE 8233ED36 -47BF 8233ED37 -47C0 8233ED38 -47C1 8233ED39 -47C2 8233EE30 -47C3 8233EE31 -47C4 8233EE32 -47C5 8233EE33 -47C6 8233EE34 -47C7 8233EE35 -47C8 8233EE36 -47C9 8233EE37 -47CA 8233EE38 -47CB 8233EE39 -47CC 8233EF30 -47CD 8233EF31 -47CE 8233EF32 -47CF 8233EF33 -47D0 8233EF34 -47D1 8233EF35 -47D2 8233EF36 -47D3 8233EF37 -47D4 8233EF38 -47D5 8233EF39 -47D6 8233F030 -47D7 8233F031 -47D8 8233F032 -47D9 8233F033 -47DA 8233F034 -47DB 8233F035 -47DC 8233F036 -47DD 8233F037 -47DE 8233F038 -47DF 8233F039 -47E0 8233F130 -47E1 8233F131 -47E2 8233F132 -47E3 8233F133 -47E4 8233F134 -47E5 8233F135 -47E6 8233F136 -47E7 8233F137 -47E8 8233F138 -47E9 8233F139 -47EA 8233F230 -47EB 8233F231 -47EC 8233F232 -47ED 8233F233 -47EE 8233F234 -47EF 8233F235 -47F0 8233F236 -47F1 8233F237 -47F2 8233F238 -47F3 8233F239 -47F4 8233F330 -47F5 8233F331 -47F6 8233F332 -47F7 8233F333 -47F8 8233F334 -47F9 8233F335 -47FA 8233F336 -47FB 8233F337 -47FC 8233F338 -47FD 8233F339 -47FE 8233F430 -47FF 8233F431 -4800 8233F432 -4801 8233F433 -4802 8233F434 -4803 8233F435 -4804 8233F436 -4805 8233F437 -4806 8233F438 -4807 8233F439 -4808 8233F530 -4809 8233F531 -480A 8233F532 -480B 8233F533 -480C 8233F534 -480D 8233F535 -480E 8233F536 -480F 8233F537 -4810 8233F538 -4811 8233F539 -4812 8233F630 -4813 8233F631 -4814 8233F632 -4815 8233F633 -4816 8233F634 -4817 8233F635 -4818 8233F636 -4819 8233F637 -481A 8233F638 -481B 8233F639 -481C 8233F730 -481D 8233F731 -481E 8233F732 -481F 8233F733 -4820 8233F734 -4821 8233F735 -4822 8233F736 -4823 8233F737 -4824 8233F738 -4825 8233F739 -4826 8233F830 -4827 8233F831 -4828 8233F832 -4829 8233F833 -482A 8233F834 -482B 8233F835 -482C 8233F836 -482D 8233F837 -482E 8233F838 -482F 8233F839 -4830 8233F930 -4831 8233F931 -4832 8233F932 -4833 8233F933 -4834 8233F934 -4835 8233F935 -4836 8233F936 -4837 8233F937 -4838 8233F938 -4839 8233F939 -483A 8233FA30 -483B 8233FA31 -483C 8233FA32 -483D 8233FA33 -483E 8233FA34 -483F 8233FA35 -4840 8233FA36 -4841 8233FA37 -4842 8233FA38 -4843 8233FA39 -4844 8233FB30 -4845 8233FB31 -4846 8233FB32 -4847 8233FB33 -4848 8233FB34 -4849 8233FB35 -484A 8233FB36 -484B 8233FB37 -484C 8233FB38 -484D 8233FB39 -484E 8233FC30 -484F 8233FC31 -4850 8233FC32 -4851 8233FC33 -4852 8233FC34 -4853 8233FC35 -4854 8233FC36 -4855 8233FC37 -4856 8233FC38 -4857 8233FC39 -4858 8233FD30 -4859 8233FD31 -485A 8233FD32 -485B 8233FD33 -485C 8233FD34 -485D 8233FD35 -485E 8233FD36 -485F 8233FD37 -4860 8233FD38 -4861 8233FD39 -4862 8233FE30 -4863 8233FE31 -4864 8233FE32 -4865 8233FE33 -4866 8233FE34 -4867 8233FE35 -4868 8233FE36 -4869 8233FE37 -486A 8233FE38 -486B 8233FE39 -486C 82348130 -486D 82348131 -486E 82348132 -486F 82348133 -4870 82348134 -4871 82348135 -4872 82348136 -4873 82348137 -4874 82348138 -4875 82348139 -4876 82348230 -4877 82348231 -4878 82348232 -4879 82348233 -487A 82348234 -487B 82348235 -487C 82348236 -487D 82348237 -487E 82348238 -487F 82348239 -4880 82348330 -4881 82348331 -4882 82348332 -4883 82348333 -4884 82348334 -4885 82348335 -4886 82348336 -4887 82348337 -4888 82348338 -4889 82348339 -488A 82348430 -488B 82348431 -488C 82348432 -488D 82348433 -488E 82348434 -488F 82348435 -4890 82348436 -4891 82348437 -4892 82348438 -4893 82348439 -4894 82348530 -4895 82348531 -4896 82348532 -4897 82348533 -4898 82348534 -4899 82348535 -489A 82348536 -489B 82348537 -489C 82348538 -489D 82348539 -489E 82348630 -489F 82348631 -48A0 82348632 -48A1 82348633 -48A2 82348634 -48A3 82348635 -48A4 82348636 -48A5 82348637 -48A6 82348638 -48A7 82348639 -48A8 82348730 -48A9 82348731 -48AA 82348732 -48AB 82348733 -48AC 82348734 -48AD 82348735 -48AE 82348736 -48AF 82348737 -48B0 82348738 -48B1 82348739 -48B2 82348830 -48B3 82348831 -48B4 82348832 -48B5 82348833 -48B6 82348834 -48B7 82348835 -48B8 82348836 -48B9 82348837 -48BA 82348838 -48BB 82348839 -48BC 82348930 -48BD 82348931 -48BE 82348932 -48BF 82348933 -48C0 82348934 -48C1 82348935 -48C2 82348936 -48C3 82348937 -48C4 82348938 -48C5 82348939 -48C6 82348A30 -48C7 82348A31 -48C8 82348A32 -48C9 82348A33 -48CA 82348A34 -48CB 82348A35 -48CC 82348A36 -48CD 82348A37 -48CE 82348A38 -48CF 82348A39 -48D0 82348B30 -48D1 82348B31 -48D2 82348B32 -48D3 82348B33 -48D4 82348B34 -48D5 82348B35 -48D6 82348B36 -48D7 82348B37 -48D8 82348B38 -48D9 82348B39 -48DA 82348C30 -48DB 82348C31 -48DC 82348C32 -48DD 82348C33 -48DE 82348C34 -48DF 82348C35 -48E0 82348C36 -48E1 82348C37 -48E2 82348C38 -48E3 82348C39 -48E4 82348D30 -48E5 82348D31 -48E6 82348D32 -48E7 82348D33 -48E8 82348D34 -48E9 82348D35 -48EA 82348D36 -48EB 82348D37 -48EC 82348D38 -48ED 82348D39 -48EE 82348E30 -48EF 82348E31 -48F0 82348E32 -48F1 82348E33 -48F2 82348E34 -48F3 82348E35 -48F4 82348E36 -48F5 82348E37 -48F6 82348E38 -48F7 82348E39 -48F8 82348F30 -48F9 82348F31 -48FA 82348F32 -48FB 82348F33 -48FC 82348F34 -48FD 82348F35 -48FE 82348F36 -48FF 82348F37 -4900 82348F38 -4901 82348F39 -4902 82349030 -4903 82349031 -4904 82349032 -4905 82349033 -4906 82349034 -4907 82349035 -4908 82349036 -4909 82349037 -490A 82349038 -490B 82349039 -490C 82349130 -490D 82349131 -490E 82349132 -490F 82349133 -4910 82349134 -4911 82349135 -4912 82349136 -4913 82349137 -4914 82349138 -4915 82349139 -4916 82349230 -4917 82349231 -4918 82349232 -4919 82349233 -491A 82349234 -491B 82349235 -491C 82349236 -491D 82349237 -491E 82349238 -491F 82349239 -4920 82349330 -4921 82349331 -4922 82349332 -4923 82349333 -4924 82349334 -4925 82349335 -4926 82349336 -4927 82349337 -4928 82349338 -4929 82349339 -492A 82349430 -492B 82349431 -492C 82349432 -492D 82349433 -492E 82349434 -492F 82349435 -4930 82349436 -4931 82349437 -4932 82349438 -4933 82349439 -4934 82349530 -4935 82349531 -4936 82349532 -4937 82349533 -4938 82349534 -4939 82349535 -493A 82349536 -493B 82349537 -493C 82349538 -493D 82349539 -493E 82349630 -493F 82349631 -4940 82349632 -4941 82349633 -4942 82349634 -4943 82349635 -4944 82349636 -4945 82349637 -4946 82349638 -4947 FE85 -4948 82349639 -4949 82349730 -494A 82349731 -494B 82349732 -494C 82349733 -494D 82349734 -494E 82349735 -494F 82349736 -4950 82349737 -4951 82349738 -4952 82349739 -4953 82349830 -4954 82349831 -4955 82349832 -4956 82349833 -4957 82349834 -4958 82349835 -4959 82349836 -495A 82349837 -495B 82349838 -495C 82349839 -495D 82349930 -495E 82349931 -495F 82349932 -4960 82349933 -4961 82349934 -4962 82349935 -4963 82349936 -4964 82349937 -4965 82349938 -4966 82349939 -4967 82349A30 -4968 82349A31 -4969 82349A32 -496A 82349A33 -496B 82349A34 -496C 82349A35 -496D 82349A36 -496E 82349A37 -496F 82349A38 -4970 82349A39 -4971 82349B30 -4972 82349B31 -4973 82349B32 -4974 82349B33 -4975 82349B34 -4976 82349B35 -4977 82349B36 -4978 82349B37 -4979 82349B38 -497A FE86 -497B 82349B39 -497C 82349C30 -497D FE87 -497E 82349C31 -497F 82349C32 -4980 82349C33 -4981 82349C34 -4982 FE88 -4983 FE89 -4984 82349C35 -4985 FE8A -4986 FE8B -4987 82349C36 -4988 82349C37 -4989 82349C38 -498A 82349C39 -498B 82349D30 -498C 82349D31 -498D 82349D32 -498E 82349D33 -498F 82349D34 -4990 82349D35 -4991 82349D36 -4992 82349D37 -4993 82349D38 -4994 82349D39 -4995 82349E30 -4996 82349E31 -4997 82349E32 -4998 82349E33 -4999 82349E34 -499A 82349E35 -499B FE8D -499C 82349E36 -499D 82349E37 -499E 82349E38 -499F FE8C -49A0 82349E39 -49A1 82349F30 -49A2 82349F31 -49A3 82349F32 -49A4 82349F33 -49A5 82349F34 -49A6 82349F35 -49A7 82349F36 -49A8 82349F37 -49A9 82349F38 -49AA 82349F39 -49AB 8234A030 -49AC 8234A031 -49AD 8234A032 -49AE 8234A033 -49AF 8234A034 -49B0 8234A035 -49B1 8234A036 -49B2 8234A037 -49B3 8234A038 -49B4 8234A039 -49B5 8234A130 -49B6 FE8F -49B7 FE8E -49B8 8234A131 -49B9 8234A132 -49BA 8234A133 -49BB 8234A134 -49BC 8234A135 -49BD 8234A136 -49BE 8234A137 -49BF 8234A138 -49C0 8234A139 -49C1 8234A230 -49C2 8234A231 -49C3 8234A232 -49C4 8234A233 -49C5 8234A234 -49C6 8234A235 -49C7 8234A236 -49C8 8234A237 -49C9 8234A238 -49CA 8234A239 -49CB 8234A330 -49CC 8234A331 -49CD 8234A332 -49CE 8234A333 -49CF 8234A334 -49D0 8234A335 -49D1 8234A336 -49D2 8234A337 -49D3 8234A338 -49D4 8234A339 -49D5 8234A430 -49D6 8234A431 -49D7 8234A432 -49D8 8234A433 -49D9 8234A434 -49DA 8234A435 -49DB 8234A436 -49DC 8234A437 -49DD 8234A438 -49DE 8234A439 -49DF 8234A530 -49E0 8234A531 -49E1 8234A532 -49E2 8234A533 -49E3 8234A534 -49E4 8234A535 -49E5 8234A536 -49E6 8234A537 -49E7 8234A538 -49E8 8234A539 -49E9 8234A630 -49EA 8234A631 -49EB 8234A632 -49EC 8234A633 -49ED 8234A634 -49EE 8234A635 -49EF 8234A636 -49F0 8234A637 -49F1 8234A638 -49F2 8234A639 -49F3 8234A730 -49F4 8234A731 -49F5 8234A732 -49F6 8234A733 -49F7 8234A734 -49F8 8234A735 -49F9 8234A736 -49FA 8234A737 -49FB 8234A738 -49FC 8234A739 -49FD 8234A830 -49FE 8234A831 -49FF 8234A832 -4A00 8234A833 -4A01 8234A834 -4A02 8234A835 -4A03 8234A836 -4A04 8234A837 -4A05 8234A838 -4A06 8234A839 -4A07 8234A930 -4A08 8234A931 -4A09 8234A932 -4A0A 8234A933 -4A0B 8234A934 -4A0C 8234A935 -4A0D 8234A936 -4A0E 8234A937 -4A0F 8234A938 -4A10 8234A939 -4A11 8234AA30 -4A12 8234AA31 -4A13 8234AA32 -4A14 8234AA33 -4A15 8234AA34 -4A16 8234AA35 -4A17 8234AA36 -4A18 8234AA37 -4A19 8234AA38 -4A1A 8234AA39 -4A1B 8234AB30 -4A1C 8234AB31 -4A1D 8234AB32 -4A1E 8234AB33 -4A1F 8234AB34 -4A20 8234AB35 -4A21 8234AB36 -4A22 8234AB37 -4A23 8234AB38 -4A24 8234AB39 -4A25 8234AC30 -4A26 8234AC31 -4A27 8234AC32 -4A28 8234AC33 -4A29 8234AC34 -4A2A 8234AC35 -4A2B 8234AC36 -4A2C 8234AC37 -4A2D 8234AC38 -4A2E 8234AC39 -4A2F 8234AD30 -4A30 8234AD31 -4A31 8234AD32 -4A32 8234AD33 -4A33 8234AD34 -4A34 8234AD35 -4A35 8234AD36 -4A36 8234AD37 -4A37 8234AD38 -4A38 8234AD39 -4A39 8234AE30 -4A3A 8234AE31 -4A3B 8234AE32 -4A3C 8234AE33 -4A3D 8234AE34 -4A3E 8234AE35 -4A3F 8234AE36 -4A40 8234AE37 -4A41 8234AE38 -4A42 8234AE39 -4A43 8234AF30 -4A44 8234AF31 -4A45 8234AF32 -4A46 8234AF33 -4A47 8234AF34 -4A48 8234AF35 -4A49 8234AF36 -4A4A 8234AF37 -4A4B 8234AF38 -4A4C 8234AF39 -4A4D 8234B030 -4A4E 8234B031 -4A4F 8234B032 -4A50 8234B033 -4A51 8234B034 -4A52 8234B035 -4A53 8234B036 -4A54 8234B037 -4A55 8234B038 -4A56 8234B039 -4A57 8234B130 -4A58 8234B131 -4A59 8234B132 -4A5A 8234B133 -4A5B 8234B134 -4A5C 8234B135 -4A5D 8234B136 -4A5E 8234B137 -4A5F 8234B138 -4A60 8234B139 -4A61 8234B230 -4A62 8234B231 -4A63 8234B232 -4A64 8234B233 -4A65 8234B234 -4A66 8234B235 -4A67 8234B236 -4A68 8234B237 -4A69 8234B238 -4A6A 8234B239 -4A6B 8234B330 -4A6C 8234B331 -4A6D 8234B332 -4A6E 8234B333 -4A6F 8234B334 -4A70 8234B335 -4A71 8234B336 -4A72 8234B337 -4A73 8234B338 -4A74 8234B339 -4A75 8234B430 -4A76 8234B431 -4A77 8234B432 -4A78 8234B433 -4A79 8234B434 -4A7A 8234B435 -4A7B 8234B436 -4A7C 8234B437 -4A7D 8234B438 -4A7E 8234B439 -4A7F 8234B530 -4A80 8234B531 -4A81 8234B532 -4A82 8234B533 -4A83 8234B534 -4A84 8234B535 -4A85 8234B536 -4A86 8234B537 -4A87 8234B538 -4A88 8234B539 -4A89 8234B630 -4A8A 8234B631 -4A8B 8234B632 -4A8C 8234B633 -4A8D 8234B634 -4A8E 8234B635 -4A8F 8234B636 -4A90 8234B637 -4A91 8234B638 -4A92 8234B639 -4A93 8234B730 -4A94 8234B731 -4A95 8234B732 -4A96 8234B733 -4A97 8234B734 -4A98 8234B735 -4A99 8234B736 -4A9A 8234B737 -4A9B 8234B738 -4A9C 8234B739 -4A9D 8234B830 -4A9E 8234B831 -4A9F 8234B832 -4AA0 8234B833 -4AA1 8234B834 -4AA2 8234B835 -4AA3 8234B836 -4AA4 8234B837 -4AA5 8234B838 -4AA6 8234B839 -4AA7 8234B930 -4AA8 8234B931 -4AA9 8234B932 -4AAA 8234B933 -4AAB 8234B934 -4AAC 8234B935 -4AAD 8234B936 -4AAE 8234B937 -4AAF 8234B938 -4AB0 8234B939 -4AB1 8234BA30 -4AB2 8234BA31 -4AB3 8234BA32 -4AB4 8234BA33 -4AB5 8234BA34 -4AB6 8234BA35 -4AB7 8234BA36 -4AB8 8234BA37 -4AB9 8234BA38 -4ABA 8234BA39 -4ABB 8234BB30 -4ABC 8234BB31 -4ABD 8234BB32 -4ABE 8234BB33 -4ABF 8234BB34 -4AC0 8234BB35 -4AC1 8234BB36 -4AC2 8234BB37 -4AC3 8234BB38 -4AC4 8234BB39 -4AC5 8234BC30 -4AC6 8234BC31 -4AC7 8234BC32 -4AC8 8234BC33 -4AC9 8234BC34 -4ACA 8234BC35 -4ACB 8234BC36 -4ACC 8234BC37 -4ACD 8234BC38 -4ACE 8234BC39 -4ACF 8234BD30 -4AD0 8234BD31 -4AD1 8234BD32 -4AD2 8234BD33 -4AD3 8234BD34 -4AD4 8234BD35 -4AD5 8234BD36 -4AD6 8234BD37 -4AD7 8234BD38 -4AD8 8234BD39 -4AD9 8234BE30 -4ADA 8234BE31 -4ADB 8234BE32 -4ADC 8234BE33 -4ADD 8234BE34 -4ADE 8234BE35 -4ADF 8234BE36 -4AE0 8234BE37 -4AE1 8234BE38 -4AE2 8234BE39 -4AE3 8234BF30 -4AE4 8234BF31 -4AE5 8234BF32 -4AE6 8234BF33 -4AE7 8234BF34 -4AE8 8234BF35 -4AE9 8234BF36 -4AEA 8234BF37 -4AEB 8234BF38 -4AEC 8234BF39 -4AED 8234C030 -4AEE 8234C031 -4AEF 8234C032 -4AF0 8234C033 -4AF1 8234C034 -4AF2 8234C035 -4AF3 8234C036 -4AF4 8234C037 -4AF5 8234C038 -4AF6 8234C039 -4AF7 8234C130 -4AF8 8234C131 -4AF9 8234C132 -4AFA 8234C133 -4AFB 8234C134 -4AFC 8234C135 -4AFD 8234C136 -4AFE 8234C137 -4AFF 8234C138 -4B00 8234C139 -4B01 8234C230 -4B02 8234C231 -4B03 8234C232 -4B04 8234C233 -4B05 8234C234 -4B06 8234C235 -4B07 8234C236 -4B08 8234C237 -4B09 8234C238 -4B0A 8234C239 -4B0B 8234C330 -4B0C 8234C331 -4B0D 8234C332 -4B0E 8234C333 -4B0F 8234C334 -4B10 8234C335 -4B11 8234C336 -4B12 8234C337 -4B13 8234C338 -4B14 8234C339 -4B15 8234C430 -4B16 8234C431 -4B17 8234C432 -4B18 8234C433 -4B19 8234C434 -4B1A 8234C435 -4B1B 8234C436 -4B1C 8234C437 -4B1D 8234C438 -4B1E 8234C439 -4B1F 8234C530 -4B20 8234C531 -4B21 8234C532 -4B22 8234C533 -4B23 8234C534 -4B24 8234C535 -4B25 8234C536 -4B26 8234C537 -4B27 8234C538 -4B28 8234C539 -4B29 8234C630 -4B2A 8234C631 -4B2B 8234C632 -4B2C 8234C633 -4B2D 8234C634 -4B2E 8234C635 -4B2F 8234C636 -4B30 8234C637 -4B31 8234C638 -4B32 8234C639 -4B33 8234C730 -4B34 8234C731 -4B35 8234C732 -4B36 8234C733 -4B37 8234C734 -4B38 8234C735 -4B39 8234C736 -4B3A 8234C737 -4B3B 8234C738 -4B3C 8234C739 -4B3D 8234C830 -4B3E 8234C831 -4B3F 8234C832 -4B40 8234C833 -4B41 8234C834 -4B42 8234C835 -4B43 8234C836 -4B44 8234C837 -4B45 8234C838 -4B46 8234C839 -4B47 8234C930 -4B48 8234C931 -4B49 8234C932 -4B4A 8234C933 -4B4B 8234C934 -4B4C 8234C935 -4B4D 8234C936 -4B4E 8234C937 -4B4F 8234C938 -4B50 8234C939 -4B51 8234CA30 -4B52 8234CA31 -4B53 8234CA32 -4B54 8234CA33 -4B55 8234CA34 -4B56 8234CA35 -4B57 8234CA36 -4B58 8234CA37 -4B59 8234CA38 -4B5A 8234CA39 -4B5B 8234CB30 -4B5C 8234CB31 -4B5D 8234CB32 -4B5E 8234CB33 -4B5F 8234CB34 -4B60 8234CB35 -4B61 8234CB36 -4B62 8234CB37 -4B63 8234CB38 -4B64 8234CB39 -4B65 8234CC30 -4B66 8234CC31 -4B67 8234CC32 -4B68 8234CC33 -4B69 8234CC34 -4B6A 8234CC35 -4B6B 8234CC36 -4B6C 8234CC37 -4B6D 8234CC38 -4B6E 8234CC39 -4B6F 8234CD30 -4B70 8234CD31 -4B71 8234CD32 -4B72 8234CD33 -4B73 8234CD34 -4B74 8234CD35 -4B75 8234CD36 -4B76 8234CD37 -4B77 8234CD38 -4B78 8234CD39 -4B79 8234CE30 -4B7A 8234CE31 -4B7B 8234CE32 -4B7C 8234CE33 -4B7D 8234CE34 -4B7E 8234CE35 -4B7F 8234CE36 -4B80 8234CE37 -4B81 8234CE38 -4B82 8234CE39 -4B83 8234CF30 -4B84 8234CF31 -4B85 8234CF32 -4B86 8234CF33 -4B87 8234CF34 -4B88 8234CF35 -4B89 8234CF36 -4B8A 8234CF37 -4B8B 8234CF38 -4B8C 8234CF39 -4B8D 8234D030 -4B8E 8234D031 -4B8F 8234D032 -4B90 8234D033 -4B91 8234D034 -4B92 8234D035 -4B93 8234D036 -4B94 8234D037 -4B95 8234D038 -4B96 8234D039 -4B97 8234D130 -4B98 8234D131 -4B99 8234D132 -4B9A 8234D133 -4B9B 8234D134 -4B9C 8234D135 -4B9D 8234D136 -4B9E 8234D137 -4B9F 8234D138 -4BA0 8234D139 -4BA1 8234D230 -4BA2 8234D231 -4BA3 8234D232 -4BA4 8234D233 -4BA5 8234D234 -4BA6 8234D235 -4BA7 8234D236 -4BA8 8234D237 -4BA9 8234D238 -4BAA 8234D239 -4BAB 8234D330 -4BAC 8234D331 -4BAD 8234D332 -4BAE 8234D333 -4BAF 8234D334 -4BB0 8234D335 -4BB1 8234D336 -4BB2 8234D337 -4BB3 8234D338 -4BB4 8234D339 -4BB5 8234D430 -4BB6 8234D431 -4BB7 8234D432 -4BB8 8234D433 -4BB9 8234D434 -4BBA 8234D435 -4BBB 8234D436 -4BBC 8234D437 -4BBD 8234D438 -4BBE 8234D439 -4BBF 8234D530 -4BC0 8234D531 -4BC1 8234D532 -4BC2 8234D533 -4BC3 8234D534 -4BC4 8234D535 -4BC5 8234D536 -4BC6 8234D537 -4BC7 8234D538 -4BC8 8234D539 -4BC9 8234D630 -4BCA 8234D631 -4BCB 8234D632 -4BCC 8234D633 -4BCD 8234D634 -4BCE 8234D635 -4BCF 8234D636 -4BD0 8234D637 -4BD1 8234D638 -4BD2 8234D639 -4BD3 8234D730 -4BD4 8234D731 -4BD5 8234D732 -4BD6 8234D733 -4BD7 8234D734 -4BD8 8234D735 -4BD9 8234D736 -4BDA 8234D737 -4BDB 8234D738 -4BDC 8234D739 -4BDD 8234D830 -4BDE 8234D831 -4BDF 8234D832 -4BE0 8234D833 -4BE1 8234D834 -4BE2 8234D835 -4BE3 8234D836 -4BE4 8234D837 -4BE5 8234D838 -4BE6 8234D839 -4BE7 8234D930 -4BE8 8234D931 -4BE9 8234D932 -4BEA 8234D933 -4BEB 8234D934 -4BEC 8234D935 -4BED 8234D936 -4BEE 8234D937 -4BEF 8234D938 -4BF0 8234D939 -4BF1 8234DA30 -4BF2 8234DA31 -4BF3 8234DA32 -4BF4 8234DA33 -4BF5 8234DA34 -4BF6 8234DA35 -4BF7 8234DA36 -4BF8 8234DA37 -4BF9 8234DA38 -4BFA 8234DA39 -4BFB 8234DB30 -4BFC 8234DB31 -4BFD 8234DB32 -4BFE 8234DB33 -4BFF 8234DB34 -4C00 8234DB35 -4C01 8234DB36 -4C02 8234DB37 -4C03 8234DB38 -4C04 8234DB39 -4C05 8234DC30 -4C06 8234DC31 -4C07 8234DC32 -4C08 8234DC33 -4C09 8234DC34 -4C0A 8234DC35 -4C0B 8234DC36 -4C0C 8234DC37 -4C0D 8234DC38 -4C0E 8234DC39 -4C0F 8234DD30 -4C10 8234DD31 -4C11 8234DD32 -4C12 8234DD33 -4C13 8234DD34 -4C14 8234DD35 -4C15 8234DD36 -4C16 8234DD37 -4C17 8234DD38 -4C18 8234DD39 -4C19 8234DE30 -4C1A 8234DE31 -4C1B 8234DE32 -4C1C 8234DE33 -4C1D 8234DE34 -4C1E 8234DE35 -4C1F 8234DE36 -4C20 8234DE37 -4C21 8234DE38 -4C22 8234DE39 -4C23 8234DF30 -4C24 8234DF31 -4C25 8234DF32 -4C26 8234DF33 -4C27 8234DF34 -4C28 8234DF35 -4C29 8234DF36 -4C2A 8234DF37 -4C2B 8234DF38 -4C2C 8234DF39 -4C2D 8234E030 -4C2E 8234E031 -4C2F 8234E032 -4C30 8234E033 -4C31 8234E034 -4C32 8234E035 -4C33 8234E036 -4C34 8234E037 -4C35 8234E038 -4C36 8234E039 -4C37 8234E130 -4C38 8234E131 -4C39 8234E132 -4C3A 8234E133 -4C3B 8234E134 -4C3C 8234E135 -4C3D 8234E136 -4C3E 8234E137 -4C3F 8234E138 -4C40 8234E139 -4C41 8234E230 -4C42 8234E231 -4C43 8234E232 -4C44 8234E233 -4C45 8234E234 -4C46 8234E235 -4C47 8234E236 -4C48 8234E237 -4C49 8234E238 -4C4A 8234E239 -4C4B 8234E330 -4C4C 8234E331 -4C4D 8234E332 -4C4E 8234E333 -4C4F 8234E334 -4C50 8234E335 -4C51 8234E336 -4C52 8234E337 -4C53 8234E338 -4C54 8234E339 -4C55 8234E430 -4C56 8234E431 -4C57 8234E432 -4C58 8234E433 -4C59 8234E434 -4C5A 8234E435 -4C5B 8234E436 -4C5C 8234E437 -4C5D 8234E438 -4C5E 8234E439 -4C5F 8234E530 -4C60 8234E531 -4C61 8234E532 -4C62 8234E533 -4C63 8234E534 -4C64 8234E535 -4C65 8234E536 -4C66 8234E537 -4C67 8234E538 -4C68 8234E539 -4C69 8234E630 -4C6A 8234E631 -4C6B 8234E632 -4C6C 8234E633 -4C6D 8234E634 -4C6E 8234E635 -4C6F 8234E636 -4C70 8234E637 -4C71 8234E638 -4C72 8234E639 -4C73 8234E730 -4C74 8234E731 -4C75 8234E732 -4C76 8234E733 -4C77 FE96 -4C78 8234E734 -4C79 8234E735 -4C7A 8234E736 -4C7B 8234E737 -4C7C 8234E738 -4C7D 8234E739 -4C7E 8234E830 -4C7F 8234E831 -4C80 8234E832 -4C81 8234E833 -4C82 8234E834 -4C83 8234E835 -4C84 8234E836 -4C85 8234E837 -4C86 8234E838 -4C87 8234E839 -4C88 8234E930 -4C89 8234E931 -4C8A 8234E932 -4C8B 8234E933 -4C8C 8234E934 -4C8D 8234E935 -4C8E 8234E936 -4C8F 8234E937 -4C90 8234E938 -4C91 8234E939 -4C92 8234EA30 -4C93 8234EA31 -4C94 8234EA32 -4C95 8234EA33 -4C96 8234EA34 -4C97 8234EA35 -4C98 8234EA36 -4C99 8234EA37 -4C9A 8234EA38 -4C9B 8234EA39 -4C9C 8234EB30 -4C9D 8234EB31 -4C9E 8234EB32 -4C9F FE93 -4CA0 FE94 -4CA1 FE95 -4CA2 FE97 -4CA3 FE92 -4CA4 8234EB33 -4CA5 8234EB34 -4CA6 8234EB35 -4CA7 8234EB36 -4CA8 8234EB37 -4CA9 8234EB38 -4CAA 8234EB39 -4CAB 8234EC30 -4CAC 8234EC31 -4CAD 8234EC32 -4CAE 8234EC33 -4CAF 8234EC34 -4CB0 8234EC35 -4CB1 8234EC36 -4CB2 8234EC37 -4CB3 8234EC38 -4CB4 8234EC39 -4CB5 8234ED30 -4CB6 8234ED31 -4CB7 8234ED32 -4CB8 8234ED33 -4CB9 8234ED34 -4CBA 8234ED35 -4CBB 8234ED36 -4CBC 8234ED37 -4CBD 8234ED38 -4CBE 8234ED39 -4CBF 8234EE30 -4CC0 8234EE31 -4CC1 8234EE32 -4CC2 8234EE33 -4CC3 8234EE34 -4CC4 8234EE35 -4CC5 8234EE36 -4CC6 8234EE37 -4CC7 8234EE38 -4CC8 8234EE39 -4CC9 8234EF30 -4CCA 8234EF31 -4CCB 8234EF32 -4CCC 8234EF33 -4CCD 8234EF34 -4CCE 8234EF35 -4CCF 8234EF36 -4CD0 8234EF37 -4CD1 8234EF38 -4CD2 8234EF39 -4CD3 8234F030 -4CD4 8234F031 -4CD5 8234F032 -4CD6 8234F033 -4CD7 8234F034 -4CD8 8234F035 -4CD9 8234F036 -4CDA 8234F037 -4CDB 8234F038 -4CDC 8234F039 -4CDD 8234F130 -4CDE 8234F131 -4CDF 8234F132 -4CE0 8234F133 -4CE1 8234F134 -4CE2 8234F135 -4CE3 8234F136 -4CE4 8234F137 -4CE5 8234F138 -4CE6 8234F139 -4CE7 8234F230 -4CE8 8234F231 -4CE9 8234F232 -4CEA 8234F233 -4CEB 8234F234 -4CEC 8234F235 -4CED 8234F236 -4CEE 8234F237 -4CEF 8234F238 -4CF0 8234F239 -4CF1 8234F330 -4CF2 8234F331 -4CF3 8234F332 -4CF4 8234F333 -4CF5 8234F334 -4CF6 8234F335 -4CF7 8234F336 -4CF8 8234F337 -4CF9 8234F338 -4CFA 8234F339 -4CFB 8234F430 -4CFC 8234F431 -4CFD 8234F432 -4CFE 8234F433 -4CFF 8234F434 -4D00 8234F435 -4D01 8234F436 -4D02 8234F437 -4D03 8234F438 -4D04 8234F439 -4D05 8234F530 -4D06 8234F531 -4D07 8234F532 -4D08 8234F533 -4D09 8234F534 -4D0A 8234F535 -4D0B 8234F536 -4D0C 8234F537 -4D0D 8234F538 -4D0E 8234F539 -4D0F 8234F630 -4D10 8234F631 -4D11 8234F632 -4D12 8234F633 -4D13 FE98 -4D14 FE99 -4D15 FE9A -4D16 FE9B -4D17 FE9C -4D18 FE9D -4D19 FE9E -4D1A 8234F634 -4D1B 8234F635 -4D1C 8234F636 -4D1D 8234F637 -4D1E 8234F638 -4D1F 8234F639 -4D20 8234F730 -4D21 8234F731 -4D22 8234F732 -4D23 8234F733 -4D24 8234F734 -4D25 8234F735 -4D26 8234F736 -4D27 8234F737 -4D28 8234F738 -4D29 8234F739 -4D2A 8234F830 -4D2B 8234F831 -4D2C 8234F832 -4D2D 8234F833 -4D2E 8234F834 -4D2F 8234F835 -4D30 8234F836 -4D31 8234F837 -4D32 8234F838 -4D33 8234F839 -4D34 8234F930 -4D35 8234F931 -4D36 8234F932 -4D37 8234F933 -4D38 8234F934 -4D39 8234F935 -4D3A 8234F936 -4D3B 8234F937 -4D3C 8234F938 -4D3D 8234F939 -4D3E 8234FA30 -4D3F 8234FA31 -4D40 8234FA32 -4D41 8234FA33 -4D42 8234FA34 -4D43 8234FA35 -4D44 8234FA36 -4D45 8234FA37 -4D46 8234FA38 -4D47 8234FA39 -4D48 8234FB30 -4D49 8234FB31 -4D4A 8234FB32 -4D4B 8234FB33 -4D4C 8234FB34 -4D4D 8234FB35 -4D4E 8234FB36 -4D4F 8234FB37 -4D50 8234FB38 -4D51 8234FB39 -4D52 8234FC30 -4D53 8234FC31 -4D54 8234FC32 -4D55 8234FC33 -4D56 8234FC34 -4D57 8234FC35 -4D58 8234FC36 -4D59 8234FC37 -4D5A 8234FC38 -4D5B 8234FC39 -4D5C 8234FD30 -4D5D 8234FD31 -4D5E 8234FD32 -4D5F 8234FD33 -4D60 8234FD34 -4D61 8234FD35 -4D62 8234FD36 -4D63 8234FD37 -4D64 8234FD38 -4D65 8234FD39 -4D66 8234FE30 -4D67 8234FE31 -4D68 8234FE32 -4D69 8234FE33 -4D6A 8234FE34 -4D6B 8234FE35 -4D6C 8234FE36 -4D6D 8234FE37 -4D6E 8234FE38 -4D6F 8234FE39 -4D70 82358130 -4D71 82358131 -4D72 82358132 -4D73 82358133 -4D74 82358134 -4D75 82358135 -4D76 82358136 -4D77 82358137 -4D78 82358138 -4D79 82358139 -4D7A 82358230 -4D7B 82358231 -4D7C 82358232 -4D7D 82358233 -4D7E 82358234 -4D7F 82358235 -4D80 82358236 -4D81 82358237 -4D82 82358238 -4D83 82358239 -4D84 82358330 -4D85 82358331 -4D86 82358332 -4D87 82358333 -4D88 82358334 -4D89 82358335 -4D8A 82358336 -4D8B 82358337 -4D8C 82358338 -4D8D 82358339 -4D8E 82358430 -4D8F 82358431 -4D90 82358432 -4D91 82358433 -4D92 82358434 -4D93 82358435 -4D94 82358436 -4D95 82358437 -4D96 82358438 -4D97 82358439 -4D98 82358530 -4D99 82358531 -4D9A 82358532 -4D9B 82358533 -4D9C 82358534 -4D9D 82358535 -4D9E 82358536 -4D9F 82358537 -4DA0 82358538 -4DA1 82358539 -4DA2 82358630 -4DA3 82358631 -4DA4 82358632 -4DA5 82358633 -4DA6 82358634 -4DA7 82358635 -4DA8 82358636 -4DA9 82358637 -4DAA 82358638 -4DAB 82358639 -4DAC 82358730 -4DAD 82358731 -4DAE FE9F -4DAF 82358732 -4DB0 82358733 -4DB1 82358734 -4DB2 82358735 -4DB3 82358736 -4DB4 82358737 -4DB5 82358738 -4DB6 82358739 -4DB7 82358830 -4DB8 82358831 -4DB9 82358832 -4DBA 82358833 -4DBB 82358834 -4DBC 82358835 -4DBD 82358836 -4DBE 82358837 -4DBF 82358838 -4DC0 82358839 -4DC1 82358930 -4DC2 82358931 -4DC3 82358932 -4DC4 82358933 -4DC5 82358934 -4DC6 82358935 -4DC7 82358936 -4DC8 82358937 -4DC9 82358938 -4DCA 82358939 -4DCB 82358A30 -4DCC 82358A31 -4DCD 82358A32 -4DCE 82358A33 -4DCF 82358A34 -4DD0 82358A35 -4DD1 82358A36 -4DD2 82358A37 -4DD3 82358A38 -4DD4 82358A39 -4DD5 82358B30 -4DD6 82358B31 -4DD7 82358B32 -4DD8 82358B33 -4DD9 82358B34 -4DDA 82358B35 -4DDB 82358B36 -4DDC 82358B37 -4DDD 82358B38 -4DDE 82358B39 -4DDF 82358C30 -4DE0 82358C31 -4DE1 82358C32 -4DE2 82358C33 -4DE3 82358C34 -4DE4 82358C35 -4DE5 82358C36 -4DE6 82358C37 -4DE7 82358C38 -4DE8 82358C39 -4DE9 82358D30 -4DEA 82358D31 -4DEB 82358D32 -4DEC 82358D33 -4DED 82358D34 -4DEE 82358D35 -4DEF 82358D36 -4DF0 82358D37 -4DF1 82358D38 -4DF2 82358D39 -4DF3 82358E30 -4DF4 82358E31 -4DF5 82358E32 -4DF6 82358E33 -4DF7 82358E34 -4DF8 82358E35 -4DF9 82358E36 -4DFA 82358E37 -4DFB 82358E38 -4DFC 82358E39 -4DFD 82358F30 -4DFE 82358F31 -4DFF 82358F32 -4E00 D2BB -4E01 B6A1 -4E02 8140 -4E03 C6DF -4E04 8141 -4E05 8142 -4E06 8143 -4E07 CDF2 -4E08 D5C9 -4E09 C8FD -4E0A C9CF -4E0B CFC2 -4E0C D8A2 -4E0D B2BB -4E0E D3EB -4E0F 8144 -4E10 D8A4 -4E11 B3F3 -4E12 8145 -4E13 D7A8 -4E14 C7D2 -4E15 D8A7 -4E16 CAC0 -4E17 8146 -4E18 C7F0 -4E19 B1FB -4E1A D2B5 -4E1B B4D4 -4E1C B6AB -4E1D CBBF -4E1E D8A9 -4E1F 8147 -4E20 8148 -4E21 8149 -4E22 B6AA -4E23 814A -4E24 C1BD -4E25 D1CF -4E26 814B -4E27 C9A5 -4E28 D8AD -4E29 814C -4E2A B8F6 -4E2B D1BE -4E2C E3DC -4E2D D6D0 -4E2E 814D -4E2F 814E -4E30 B7E1 -4E31 814F -4E32 B4AE -4E33 8150 -4E34 C1D9 -4E35 8151 -4E36 D8BC -4E37 8152 -4E38 CDE8 -4E39 B5A4 -4E3A CEAA -4E3B D6F7 -4E3C 8153 -4E3D C0F6 -4E3E BED9 -4E3F D8AF -4E40 8154 -4E41 8155 -4E42 8156 -4E43 C4CB -4E44 8157 -4E45 BEC3 -4E46 8158 -4E47 D8B1 -4E48 C3B4 -4E49 D2E5 -4E4A 8159 -4E4B D6AE -4E4C CEDA -4E4D D5A7 -4E4E BAF5 -4E4F B7A6 -4E50 C0D6 -4E51 815A -4E52 C6B9 -4E53 C5D2 -4E54 C7C7 -4E55 815B -4E56 B9D4 -4E57 815C -4E58 B3CB -4E59 D2D2 -4E5A 815D -4E5B 815E -4E5C D8BF -4E5D BEC5 -4E5E C6F2 -4E5F D2B2 -4E60 CFB0 -4E61 CFE7 -4E62 815F -4E63 8160 -4E64 8161 -4E65 8162 -4E66 CAE9 -4E67 8163 -4E68 8164 -4E69 D8C0 -4E6A 8165 -4E6B 8166 -4E6C 8167 -4E6D 8168 -4E6E 8169 -4E6F 816A -4E70 C2F2 -4E71 C2D2 -4E72 816B -4E73 C8E9 -4E74 816C -4E75 816D -4E76 816E -4E77 816F -4E78 8170 -4E79 8171 -4E7A 8172 -4E7B 8173 -4E7C 8174 -4E7D 8175 -4E7E C7AC -4E7F 8176 -4E80 8177 -4E81 8178 -4E82 8179 -4E83 817A -4E84 817B -4E85 817C -4E86 C1CB -4E87 817D -4E88 D3E8 -4E89 D5F9 -4E8A 817E -4E8B CAC2 -4E8C B6FE -4E8D D8A1 -4E8E D3DA -4E8F BFF7 -4E90 8180 -4E91 D4C6 -4E92 BBA5 -4E93 D8C1 -4E94 CEE5 -4E95 BEAE -4E96 8181 -4E97 8182 -4E98 D8A8 -4E99 8183 -4E9A D1C7 -4E9B D0A9 -4E9C 8184 -4E9D 8185 -4E9E 8186 -4E9F D8BD -4EA0 D9EF -4EA1 CDF6 -4EA2 BFBA -4EA3 8187 -4EA4 BDBB -4EA5 BAA5 -4EA6 D2E0 -4EA7 B2FA -4EA8 BAE0 -4EA9 C4B6 -4EAA 8188 -4EAB CFED -4EAC BEA9 -4EAD CDA4 -4EAE C1C1 -4EAF 8189 -4EB0 818A -4EB1 818B -4EB2 C7D7 -4EB3 D9F1 -4EB4 818C -4EB5 D9F4 -4EB6 818D -4EB7 818E -4EB8 818F -4EB9 8190 -4EBA C8CB -4EBB D8E9 -4EBC 8191 -4EBD 8192 -4EBE 8193 -4EBF D2DA -4EC0 CAB2 -4EC1 C8CA -4EC2 D8EC -4EC3 D8EA -4EC4 D8C6 -4EC5 BDF6 -4EC6 C6CD -4EC7 B3F0 -4EC8 8194 -4EC9 D8EB -4ECA BDF1 -4ECB BDE9 -4ECC 8195 -4ECD C8D4 -4ECE B4D3 -4ECF 8196 -4ED0 8197 -4ED1 C2D8 -4ED2 8198 -4ED3 B2D6 -4ED4 D7D0 -4ED5 CACB -4ED6 CBFB -4ED7 D5CC -4ED8 B8B6 -4ED9 CFC9 -4EDA 8199 -4EDB 819A -4EDC 819B -4EDD D9DA -4EDE D8F0 -4EDF C7AA -4EE0 819C -4EE1 D8EE -4EE2 819D -4EE3 B4FA -4EE4 C1EE -4EE5 D2D4 -4EE6 819E -4EE7 819F -4EE8 D8ED -4EE9 81A0 -4EEA D2C7 -4EEB D8EF -4EEC C3C7 -4EED 81A1 -4EEE 81A2 -4EEF 81A3 -4EF0 D1F6 -4EF1 81A4 -4EF2 D6D9 -4EF3 D8F2 -4EF4 81A5 -4EF5 D8F5 -4EF6 BCFE -4EF7 BCDB -4EF8 81A6 -4EF9 81A7 -4EFA 81A8 -4EFB C8CE -4EFC 81A9 -4EFD B7DD -4EFE 81AA -4EFF B7C2 -4F00 81AB -4F01 C6F3 -4F02 81AC -4F03 81AD -4F04 81AE -4F05 81AF -4F06 81B0 -4F07 81B1 -4F08 81B2 -4F09 D8F8 -4F0A D2C1 -4F0B 81B3 -4F0C 81B4 -4F0D CEE9 -4F0E BCBF -4F0F B7FC -4F10 B7A5 -4F11 D0DD -4F12 81B5 -4F13 81B6 -4F14 81B7 -4F15 81B8 -4F16 81B9 -4F17 D6DA -4F18 D3C5 -4F19 BBEF -4F1A BBE1 -4F1B D8F1 -4F1C 81BA -4F1D 81BB -4F1E C9A1 -4F1F CEB0 -4F20 B4AB -4F21 81BC -4F22 D8F3 -4F23 81BD -4F24 C9CB -4F25 D8F6 -4F26 C2D7 -4F27 D8F7 -4F28 81BE -4F29 81BF -4F2A CEB1 -4F2B D8F9 -4F2C 81C0 -4F2D 81C1 -4F2E 81C2 -4F2F B2AE -4F30 B9C0 -4F31 81C3 -4F32 D9A3 -4F33 81C4 -4F34 B0E9 -4F35 81C5 -4F36 C1E6 -4F37 81C6 -4F38 C9EC -4F39 81C7 -4F3A CBC5 -4F3B 81C8 -4F3C CBC6 -4F3D D9A4 -4F3E 81C9 -4F3F 81CA -4F40 81CB -4F41 81CC -4F42 81CD -4F43 B5E8 -4F44 81CE -4F45 81CF -4F46 B5AB -4F47 81D0 -4F48 81D1 -4F49 81D2 -4F4A 81D3 -4F4B 81D4 -4F4C 81D5 -4F4D CEBB -4F4E B5CD -4F4F D7A1 -4F50 D7F4 -4F51 D3D3 -4F52 81D6 -4F53 CCE5 -4F54 81D7 -4F55 BACE -4F56 81D8 -4F57 D9A2 -4F58 D9DC -4F59 D3E0 -4F5A D8FD -4F5B B7F0 -4F5C D7F7 -4F5D D8FE -4F5E D8FA -4F5F D9A1 -4F60 C4E3 -4F61 81D9 -4F62 81DA -4F63 D3B6 -4F64 D8F4 -4F65 D9DD -4F66 81DB -4F67 D8FB -4F68 81DC -4F69 C5E5 -4F6A 81DD -4F6B 81DE -4F6C C0D0 -4F6D 81DF -4F6E 81E0 -4F6F D1F0 -4F70 B0DB -4F71 81E1 -4F72 81E2 -4F73 BCD1 -4F74 D9A6 -4F75 81E3 -4F76 D9A5 -4F77 81E4 -4F78 81E5 -4F79 81E6 -4F7A 81E7 -4F7B D9AC -4F7C D9AE -4F7D 81E8 -4F7E D9AB -4F7F CAB9 -4F80 81E9 -4F81 81EA -4F82 81EB -4F83 D9A9 -4F84 D6B6 -4F85 81EC -4F86 81ED -4F87 81EE -4F88 B3DE -4F89 D9A8 -4F8A 81EF -4F8B C0FD -4F8C 81F0 -4F8D CACC -4F8E 81F1 -4F8F D9AA -4F90 81F2 -4F91 D9A7 -4F92 81F3 -4F93 81F4 -4F94 D9B0 -4F95 81F5 -4F96 81F6 -4F97 B6B1 -4F98 81F7 -4F99 81F8 -4F9A 81F9 -4F9B B9A9 -4F9C 81FA -4F9D D2C0 -4F9E 81FB -4F9F 81FC -4FA0 CFC0 -4FA1 81FD -4FA2 81FE -4FA3 C2C2 -4FA4 8240 -4FA5 BDC4 -4FA6 D5EC -4FA7 B2E0 -4FA8 C7C8 -4FA9 BFEB -4FAA D9AD -4FAB 8241 -4FAC D9AF -4FAD 8242 -4FAE CEEA -4FAF BAEE -4FB0 8243 -4FB1 8244 -4FB2 8245 -4FB3 8246 -4FB4 8247 -4FB5 C7D6 -4FB6 8248 -4FB7 8249 -4FB8 824A -4FB9 824B -4FBA 824C -4FBB 824D -4FBC 824E -4FBD 824F -4FBE 8250 -4FBF B1E3 -4FC0 8251 -4FC1 8252 -4FC2 8253 -4FC3 B4D9 -4FC4 B6ED -4FC5 D9B4 -4FC6 8254 -4FC7 8255 -4FC8 8256 -4FC9 8257 -4FCA BFA1 -4FCB 8258 -4FCC 8259 -4FCD 825A -4FCE D9DE -4FCF C7CE -4FD0 C0FE -4FD1 D9B8 -4FD2 825B -4FD3 825C -4FD4 825D -4FD5 825E -4FD6 825F -4FD7 CBD7 -4FD8 B7FD -4FD9 8260 -4FDA D9B5 -4FDB 8261 -4FDC D9B7 -4FDD B1A3 -4FDE D3E1 -4FDF D9B9 -4FE0 8262 -4FE1 D0C5 -4FE2 8263 -4FE3 D9B6 -4FE4 8264 -4FE5 8265 -4FE6 D9B1 -4FE7 8266 -4FE8 D9B2 -4FE9 C1A9 -4FEA D9B3 -4FEB 8267 -4FEC 8268 -4FED BCF3 -4FEE D0DE -4FEF B8A9 -4FF0 8269 -4FF1 BEE3 -4FF2 826A -4FF3 D9BD -4FF4 826B -4FF5 826C -4FF6 826D -4FF7 826E -4FF8 D9BA -4FF9 826F -4FFA B0B3 -4FFB 8270 -4FFC 8271 -4FFD 8272 -4FFE D9C2 -4FFF 8273 -5000 8274 -5001 8275 -5002 8276 -5003 8277 -5004 8278 -5005 8279 -5006 827A -5007 827B -5008 827C -5009 827D -500A 827E -500B 8280 -500C D9C4 -500D B1B6 -500E 8281 -500F D9BF -5010 8282 -5011 8283 -5012 B5B9 -5013 8284 -5014 BEF3 -5015 8285 -5016 8286 -5017 8287 -5018 CCC8 -5019 BAF2 -501A D2D0 -501B 8288 -501C D9C3 -501D 8289 -501E 828A -501F BDE8 -5020 828B -5021 B3AB -5022 828C -5023 828D -5024 828E -5025 D9C5 -5026 BEEB -5027 828F -5028 D9C6 -5029 D9BB -502A C4DF -502B 8290 -502C D9BE -502D D9C1 -502E D9C0 -502F 8291 -5030 8292 -5031 8293 -5032 8294 -5033 8295 -5034 8296 -5035 8297 -5036 8298 -5037 8299 -5038 829A -5039 829B -503A D5AE -503B 829C -503C D6B5 -503D 829D -503E C7E3 -503F 829E -5040 829F -5041 82A0 -5042 82A1 -5043 D9C8 -5044 82A2 -5045 82A3 -5046 82A4 -5047 BCD9 -5048 D9CA -5049 82A5 -504A 82A6 -504B 82A7 -504C D9BC -504D 82A8 -504E D9CB -504F C6AB -5050 82A9 -5051 82AA -5052 82AB -5053 82AC -5054 82AD -5055 D9C9 -5056 82AE -5057 82AF -5058 82B0 -5059 82B1 -505A D7F6 -505B 82B2 -505C CDA3 -505D 82B3 -505E 82B4 -505F 82B5 -5060 82B6 -5061 82B7 -5062 82B8 -5063 82B9 -5064 82BA -5065 BDA1 -5066 82BB -5067 82BC -5068 82BD -5069 82BE -506A 82BF -506B 82C0 -506C D9CC -506D 82C1 -506E 82C2 -506F 82C3 -5070 82C4 -5071 82C5 -5072 82C6 -5073 82C7 -5074 82C8 -5075 82C9 -5076 C5BC -5077 CDB5 -5078 82CA -5079 82CB -507A 82CC -507B D9CD -507C 82CD -507D 82CE -507E D9C7 -507F B3A5 -5080 BFFE -5081 82CF -5082 82D0 -5083 82D1 -5084 82D2 -5085 B8B5 -5086 82D3 -5087 82D4 -5088 C0FC -5089 82D5 -508A 82D6 -508B 82D7 -508C 82D8 -508D B0F8 -508E 82D9 -508F 82DA -5090 82DB -5091 82DC -5092 82DD -5093 82DE -5094 82DF -5095 82E0 -5096 82E1 -5097 82E2 -5098 82E3 -5099 82E4 -509A 82E5 -509B 82E6 -509C 82E7 -509D 82E8 -509E 82E9 -509F 82EA -50A0 82EB -50A1 82EC -50A2 82ED -50A3 B4F6 -50A4 82EE -50A5 D9CE -50A6 82EF -50A7 D9CF -50A8 B4A2 -50A9 D9D0 -50AA 82F0 -50AB 82F1 -50AC B4DF -50AD 82F2 -50AE 82F3 -50AF 82F4 -50B0 82F5 -50B1 82F6 -50B2 B0C1 -50B3 82F7 -50B4 82F8 -50B5 82F9 -50B6 82FA -50B7 82FB -50B8 82FC -50B9 82FD -50BA D9D1 -50BB C9B5 -50BC 82FE -50BD 8340 -50BE 8341 -50BF 8342 -50C0 8343 -50C1 8344 -50C2 8345 -50C3 8346 -50C4 8347 -50C5 8348 -50C6 8349 -50C7 834A -50C8 834B -50C9 834C -50CA 834D -50CB 834E -50CC 834F -50CD 8350 -50CE 8351 -50CF CFF1 -50D0 8352 -50D1 8353 -50D2 8354 -50D3 8355 -50D4 8356 -50D5 8357 -50D6 D9D2 -50D7 8358 -50D8 8359 -50D9 835A -50DA C1C5 -50DB 835B -50DC 835C -50DD 835D -50DE 835E -50DF 835F -50E0 8360 -50E1 8361 -50E2 8362 -50E3 8363 -50E4 8364 -50E5 8365 -50E6 D9D6 -50E7 C9AE -50E8 8366 -50E9 8367 -50EA 8368 -50EB 8369 -50EC D9D5 -50ED D9D4 -50EE D9D7 -50EF 836A -50F0 836B -50F1 836C -50F2 836D -50F3 CBDB -50F4 836E -50F5 BDA9 -50F6 836F -50F7 8370 -50F8 8371 -50F9 8372 -50FA 8373 -50FB C6A7 -50FC 8374 -50FD 8375 -50FE 8376 -50FF 8377 -5100 8378 -5101 8379 -5102 837A -5103 837B -5104 837C -5105 837D -5106 D9D3 -5107 D9D8 -5108 837E -5109 8380 -510A 8381 -510B D9D9 -510C 8382 -510D 8383 -510E 8384 -510F 8385 -5110 8386 -5111 8387 -5112 C8E5 -5113 8388 -5114 8389 -5115 838A -5116 838B -5117 838C -5118 838D -5119 838E -511A 838F -511B 8390 -511C 8391 -511D 8392 -511E 8393 -511F 8394 -5120 8395 -5121 C0DC -5122 8396 -5123 8397 -5124 8398 -5125 8399 -5126 839A -5127 839B -5128 839C -5129 839D -512A 839E -512B 839F -512C 83A0 -512D 83A1 -512E 83A2 -512F 83A3 -5130 83A4 -5131 83A5 -5132 83A6 -5133 83A7 -5134 83A8 -5135 83A9 -5136 83AA -5137 83AB -5138 83AC -5139 83AD -513A 83AE -513B 83AF -513C 83B0 -513D 83B1 -513E 83B2 -513F B6F9 -5140 D8A3 -5141 D4CA -5142 83B3 -5143 D4AA -5144 D0D6 -5145 B3E4 -5146 D5D7 -5147 83B4 -5148 CFC8 -5149 B9E2 -514A 83B5 -514B BFCB -514C 83B6 -514D C3E2 -514E 83B7 -514F 83B8 -5150 83B9 -5151 B6D2 -5152 83BA -5153 83BB -5154 CDC3 -5155 D9EE -5156 D9F0 -5157 83BC -5158 83BD -5159 83BE -515A B5B3 -515B 83BF -515C B6B5 -515D 83C0 -515E 83C1 -515F 83C2 -5160 83C3 -5161 83C4 -5162 BEA4 -5163 83C5 -5164 83C6 -5165 C8EB -5166 83C7 -5167 83C8 -5168 C8AB -5169 83C9 -516A 83CA -516B B0CB -516C B9AB -516D C1F9 -516E D9E2 -516F 83CB -5170 C0BC -5171 B9B2 -5172 83CC -5173 B9D8 -5174 D0CB -5175 B1F8 -5176 C6E4 -5177 BEDF -5178 B5E4 -5179 D7C8 -517A 83CD -517B D1F8 -517C BCE6 -517D CADE -517E 83CE -517F 83CF -5180 BCBD -5181 D9E6 -5182 D8E7 -5183 83D0 -5184 83D1 -5185 C4DA -5186 83D2 -5187 83D3 -5188 B8D4 -5189 C8BD -518A 83D4 -518B 83D5 -518C B2E1 -518D D4D9 -518E 83D6 -518F 83D7 -5190 83D8 -5191 83D9 -5192 C3B0 -5193 83DA -5194 83DB -5195 C3E1 -5196 DAA2 -5197 C8DF -5198 83DC -5199 D0B4 -519A 83DD -519B BEFC -519C C5A9 -519D 83DE -519E 83DF -519F 83E0 -51A0 B9DA -51A1 83E1 -51A2 DAA3 -51A3 83E2 -51A4 D4A9 -51A5 DAA4 -51A6 83E3 -51A7 83E4 -51A8 83E5 -51A9 83E6 -51AA 83E7 -51AB D9FB -51AC B6AC -51AD 83E8 -51AE 83E9 -51AF B7EB -51B0 B1F9 -51B1 D9FC -51B2 B3E5 -51B3 BEF6 -51B4 83EA -51B5 BFF6 -51B6 D2B1 -51B7 C0E4 -51B8 83EB -51B9 83EC -51BA 83ED -51BB B6B3 -51BC D9FE -51BD D9FD -51BE 83EE -51BF 83EF -51C0 BEBB -51C1 83F0 -51C2 83F1 -51C3 83F2 -51C4 C6E0 -51C5 83F3 -51C6 D7BC -51C7 DAA1 -51C8 83F4 -51C9 C1B9 -51CA 83F5 -51CB B5F2 -51CC C1E8 -51CD 83F6 -51CE 83F7 -51CF BCF5 -51D0 83F8 -51D1 B4D5 -51D2 83F9 -51D3 83FA -51D4 83FB -51D5 83FC -51D6 83FD -51D7 83FE -51D8 8440 -51D9 8441 -51DA 8442 -51DB C1DD -51DC 8443 -51DD C4FD -51DE 8444 -51DF 8445 -51E0 BCB8 -51E1 B7B2 -51E2 8446 -51E3 8447 -51E4 B7EF -51E5 8448 -51E6 8449 -51E7 844A -51E8 844B -51E9 844C -51EA 844D -51EB D9EC -51EC 844E -51ED C6BE -51EE 844F -51EF BFAD -51F0 BBCB -51F1 8450 -51F2 8451 -51F3 B5CA -51F4 8452 -51F5 DBC9 -51F6 D0D7 -51F7 8453 -51F8 CDB9 -51F9 B0BC -51FA B3F6 -51FB BBF7 -51FC DBCA -51FD BAAF -51FE 8454 -51FF D4E4 -5200 B5B6 -5201 B5F3 -5202 D8D6 -5203 C8D0 -5204 8455 -5205 8456 -5206 B7D6 -5207 C7D0 -5208 D8D7 -5209 8457 -520A BFAF -520B 8458 -520C 8459 -520D DBBB -520E D8D8 -520F 845A -5210 845B -5211 D0CC -5212 BBAE -5213 845C -5214 845D -5215 845E -5216 EBBE -5217 C1D0 -5218 C1F5 -5219 D4F2 -521A B8D5 -521B B4B4 -521C 845F -521D B3F5 -521E 8460 -521F 8461 -5220 C9BE -5221 8462 -5222 8463 -5223 8464 -5224 C5D0 -5225 8465 -5226 8466 -5227 8467 -5228 C5D9 -5229 C0FB -522A 8468 -522B B1F0 -522C 8469 -522D D8D9 -522E B9CE -522F 846A -5230 B5BD -5231 846B -5232 846C -5233 D8DA -5234 846D -5235 846E -5236 D6C6 -5237 CBA2 -5238 C8AF -5239 C9B2 -523A B4CC -523B BFCC -523C 846F -523D B9F4 -523E 8470 -523F D8DB -5240 D8DC -5241 B6E7 -5242 BCC1 -5243 CCEA -5244 8471 -5245 8472 -5246 8473 -5247 8474 -5248 8475 -5249 8476 -524A CFF7 -524B 8477 -524C D8DD -524D C7B0 -524E 8478 -524F 8479 -5250 B9D0 -5251 BDA3 -5252 847A -5253 847B -5254 CCDE -5255 847C -5256 C6CA -5257 847D -5258 847E -5259 8480 -525A 8481 -525B 8482 -525C D8E0 -525D 8483 -525E D8DE -525F 8484 -5260 8485 -5261 D8DF -5262 8486 -5263 8487 -5264 8488 -5265 B0FE -5266 8489 -5267 BEE7 -5268 848A -5269 CAA3 -526A BCF4 -526B 848B -526C 848C -526D 848D -526E 848E -526F B8B1 -5270 848F -5271 8490 -5272 B8EE -5273 8491 -5274 8492 -5275 8493 -5276 8494 -5277 8495 -5278 8496 -5279 8497 -527A 8498 -527B 8499 -527C 849A -527D D8E2 -527E 849B -527F BDCB -5280 849C -5281 D8E4 -5282 D8E3 -5283 849D -5284 849E -5285 849F -5286 84A0 -5287 84A1 -5288 C5FC -5289 84A2 -528A 84A3 -528B 84A4 -528C 84A5 -528D 84A6 -528E 84A7 -528F 84A8 -5290 D8E5 -5291 84A9 -5292 84AA -5293 D8E6 -5294 84AB -5295 84AC -5296 84AD -5297 84AE -5298 84AF -5299 84B0 -529A 84B1 -529B C1A6 -529C 84B2 -529D C8B0 -529E B0EC -529F B9A6 -52A0 BCD3 -52A1 CEF1 -52A2 DBBD -52A3 C1D3 -52A4 84B3 -52A5 84B4 -52A6 84B5 -52A7 84B6 -52A8 B6AF -52A9 D6FA -52AA C5AC -52AB BDD9 -52AC DBBE -52AD DBBF -52AE 84B7 -52AF 84B8 -52B0 84B9 -52B1 C0F8 -52B2 BEA2 -52B3 C0CD -52B4 84BA -52B5 84BB -52B6 84BC -52B7 84BD -52B8 84BE -52B9 84BF -52BA 84C0 -52BB 84C1 -52BC 84C2 -52BD 84C3 -52BE DBC0 -52BF CAC6 -52C0 84C4 -52C1 84C5 -52C2 84C6 -52C3 B2AA -52C4 84C7 -52C5 84C8 -52C6 84C9 -52C7 D3C2 -52C8 84CA -52C9 C3E3 -52CA 84CB -52CB D1AB -52CC 84CC -52CD 84CD -52CE 84CE -52CF 84CF -52D0 DBC2 -52D1 84D0 -52D2 C0D5 -52D3 84D1 -52D4 84D2 -52D5 84D3 -52D6 DBC3 -52D7 84D4 -52D8 BFB1 -52D9 84D5 -52DA 84D6 -52DB 84D7 -52DC 84D8 -52DD 84D9 -52DE 84DA -52DF C4BC -52E0 84DB -52E1 84DC -52E2 84DD -52E3 84DE -52E4 C7DA -52E5 84DF -52E6 84E0 -52E7 84E1 -52E8 84E2 -52E9 84E3 -52EA 84E4 -52EB 84E5 -52EC 84E6 -52ED 84E7 -52EE 84E8 -52EF 84E9 -52F0 DBC4 -52F1 84EA -52F2 84EB -52F3 84EC -52F4 84ED -52F5 84EE -52F6 84EF -52F7 84F0 -52F8 84F1 -52F9 D9E8 -52FA C9D7 -52FB 84F2 -52FC 84F3 -52FD 84F4 -52FE B9B4 -52FF CEF0 -5300 D4C8 -5301 84F5 -5302 84F6 -5303 84F7 -5304 84F8 -5305 B0FC -5306 B4D2 -5307 84F9 -5308 D0D9 -5309 84FA -530A 84FB -530B 84FC -530C 84FD -530D D9E9 -530E 84FE -530F DECB -5310 D9EB -5311 8540 -5312 8541 -5313 8542 -5314 8543 -5315 D8B0 -5316 BBAF -5317 B1B1 -5318 8544 -5319 B3D7 -531A D8CE -531B 8545 -531C 8546 -531D D4D1 -531E 8547 -531F 8548 -5320 BDB3 -5321 BFEF -5322 8549 -5323 CFBB -5324 854A -5325 854B -5326 D8D0 -5327 854C -5328 854D -5329 854E -532A B7CB -532B 854F -532C 8550 -532D 8551 -532E D8D1 -532F 8552 -5330 8553 -5331 8554 -5332 8555 -5333 8556 -5334 8557 -5335 8558 -5336 8559 -5337 855A -5338 855B -5339 C6A5 -533A C7F8 -533B D2BD -533C 855C -533D 855D -533E D8D2 -533F C4E4 -5340 855E -5341 CAAE -5342 855F -5343 C7A7 -5344 8560 -5345 D8A6 -5346 8561 -5347 C9FD -5348 CEE7 -5349 BBDC -534A B0EB -534B 8562 -534C 8563 -534D 8564 -534E BBAA -534F D0AD -5350 8565 -5351 B1B0 -5352 D7E4 -5353 D7BF -5354 8566 -5355 B5A5 -5356 C2F4 -5357 C4CF -5358 8567 -5359 8568 -535A B2A9 -535B 8569 -535C B2B7 -535D 856A -535E B1E5 -535F DFB2 -5360 D5BC -5361 BFA8 -5362 C2AC -5363 D8D5 -5364 C2B1 -5365 856B -5366 D8D4 -5367 CED4 -5368 856C -5369 DAE0 -536A 856D -536B CEC0 -536C 856E -536D 856F -536E D8B4 -536F C3AE -5370 D3A1 -5371 CEA3 -5372 8570 -5373 BCB4 -5374 C8B4 -5375 C2D1 -5376 8571 -5377 BEED -5378 D0B6 -5379 8572 -537A DAE1 -537B 8573 -537C 8574 -537D 8575 -537E 8576 -537F C7E4 -5380 8577 -5381 8578 -5382 B3A7 -5383 8579 -5384 B6F2 -5385 CCFC -5386 C0FA -5387 857A -5388 857B -5389 C0F7 -538A 857C -538B D1B9 -538C D1E1 -538D D8C7 -538E 857D -538F 857E -5390 8580 -5391 8581 -5392 8582 -5393 8583 -5394 8584 -5395 B2DE -5396 8585 -5397 8586 -5398 C0E5 -5399 8587 -539A BAF1 -539B 8588 -539C 8589 -539D D8C8 -539E 858A -539F D4AD -53A0 858B -53A1 858C -53A2 CFE1 -53A3 D8C9 -53A4 858D -53A5 D8CA -53A6 CFC3 -53A7 858E -53A8 B3F8 -53A9 BEC7 -53AA 858F -53AB 8590 -53AC 8591 -53AD 8592 -53AE D8CB -53AF 8593 -53B0 8594 -53B1 8595 -53B2 8596 -53B3 8597 -53B4 8598 -53B5 8599 -53B6 DBCC -53B7 859A -53B8 859B -53B9 859C -53BA 859D -53BB C8A5 -53BC 859E -53BD 859F -53BE 85A0 -53BF CFD8 -53C0 85A1 -53C1 C8FE -53C2 B2CE -53C3 85A2 -53C4 85A3 -53C5 85A4 -53C6 85A5 -53C7 85A6 -53C8 D3D6 -53C9 B2E6 -53CA BCB0 -53CB D3D1 -53CC CBAB -53CD B7B4 -53CE 85A7 -53CF 85A8 -53D0 85A9 -53D1 B7A2 -53D2 85AA -53D3 85AB -53D4 CAE5 -53D5 85AC -53D6 C8A1 -53D7 CADC -53D8 B1E4 -53D9 D0F0 -53DA 85AD -53DB C5D1 -53DC 85AE -53DD 85AF -53DE 85B0 -53DF DBC5 -53E0 B5FE -53E1 85B1 -53E2 85B2 -53E3 BFDA -53E4 B9C5 -53E5 BEE4 -53E6 C1ED -53E7 85B3 -53E8 DFB6 -53E9 DFB5 -53EA D6BB -53EB BDD0 -53EC D5D9 -53ED B0C8 -53EE B6A3 -53EF BFC9 -53F0 CCA8 -53F1 DFB3 -53F2 CAB7 -53F3 D3D2 -53F4 85B4 -53F5 D8CF -53F6 D2B6 -53F7 BAC5 -53F8 CBBE -53F9 CCBE -53FA 85B5 -53FB DFB7 -53FC B5F0 -53FD DFB4 -53FE 85B6 -53FF 85B7 -5400 85B8 -5401 D3F5 -5402 85B9 -5403 B3D4 -5404 B8F7 -5405 85BA -5406 DFBA -5407 85BB -5408 BACF -5409 BCAA -540A B5F5 -540B 85BC -540C CDAC -540D C3FB -540E BAF3 -540F C0F4 -5410 CDC2 -5411 CFF2 -5412 DFB8 -5413 CFC5 -5414 85BD -5415 C2C0 -5416 DFB9 -5417 C2F0 -5418 85BE -5419 85BF -541A 85C0 -541B BEFD -541C 85C1 -541D C1DF -541E CDCC -541F D2F7 -5420 B7CD -5421 DFC1 -5422 85C2 -5423 DFC4 -5424 85C3 -5425 85C4 -5426 B7F1 -5427 B0C9 -5428 B6D6 -5429 B7D4 -542A 85C5 -542B BAAC -542C CCFD -542D BFD4 -542E CBB1 -542F C6F4 -5430 85C6 -5431 D6A8 -5432 DFC5 -5433 85C7 -5434 CEE2 -5435 B3B3 -5436 85C8 -5437 85C9 -5438 CEFC -5439 B4B5 -543A 85CA -543B CEC7 -543C BAF0 -543D 85CB -543E CEE1 -543F 85CC -5440 D1BD -5441 85CD -5442 85CE -5443 DFC0 -5444 85CF -5445 85D0 -5446 B4F4 -5447 85D1 -5448 B3CA -5449 85D2 -544A B8E6 -544B DFBB -544C 85D3 -544D 85D4 -544E 85D5 -544F 85D6 -5450 C4C5 -5451 85D7 -5452 DFBC -5453 DFBD -5454 DFBE -5455 C5BB -5456 DFBF -5457 DFC2 -5458 D4B1 -5459 DFC3 -545A 85D8 -545B C7BA -545C CED8 -545D 85D9 -545E 85DA -545F 85DB -5460 85DC -5461 85DD -5462 C4D8 -5463 85DE -5464 DFCA -5465 85DF -5466 DFCF -5467 85E0 -5468 D6DC -5469 85E1 -546A 85E2 -546B 85E3 -546C 85E4 -546D 85E5 -546E 85E6 -546F 85E7 -5470 85E8 -5471 DFC9 -5472 DFDA -5473 CEB6 -5474 85E9 -5475 BAC7 -5476 DFCE -5477 DFC8 -5478 C5DE -5479 85EA -547A 85EB -547B C9EB -547C BAF4 -547D C3FC -547E 85EC -547F 85ED -5480 BED7 -5481 85EE -5482 DFC6 -5483 85EF -5484 DFCD -5485 85F0 -5486 C5D8 -5487 85F1 -5488 85F2 -5489 85F3 -548A 85F4 -548B D5A6 -548C BACD -548D 85F5 -548E BECC -548F D3BD -5490 B8C0 -5491 85F6 -5492 D6E4 -5493 85F7 -5494 DFC7 -5495 B9BE -5496 BFA7 -5497 85F8 -5498 85F9 -5499 C1FC -549A DFCB -549B DFCC -549C 85FA -549D DFD0 -549E 85FB -549F 85FC -54A0 85FD -54A1 85FE -54A2 8640 -54A3 DFDB -54A4 DFE5 -54A5 8641 -54A6 DFD7 -54A7 DFD6 -54A8 D7C9 -54A9 DFE3 -54AA DFE4 -54AB E5EB -54AC D2A7 -54AD DFD2 -54AE 8642 -54AF BFA9 -54B0 8643 -54B1 D4DB -54B2 8644 -54B3 BFC8 -54B4 DFD4 -54B5 8645 -54B6 8646 -54B7 8647 -54B8 CFCC -54B9 8648 -54BA 8649 -54BB DFDD -54BC 864A -54BD D1CA -54BE 864B -54BF DFDE -54C0 B0A7 -54C1 C6B7 -54C2 DFD3 -54C3 864C -54C4 BAE5 -54C5 864D -54C6 B6DF -54C7 CDDB -54C8 B9FE -54C9 D4D5 -54CA 864E -54CB 864F -54CC DFDF -54CD CFEC -54CE B0A5 -54CF DFE7 -54D0 DFD1 -54D1 D1C6 -54D2 DFD5 -54D3 DFD8 -54D4 DFD9 -54D5 DFDC -54D6 8650 -54D7 BBA9 -54D8 8651 -54D9 DFE0 -54DA DFE1 -54DB 8652 -54DC DFE2 -54DD DFE6 -54DE DFE8 -54DF D3B4 -54E0 8653 -54E1 8654 -54E2 8655 -54E3 8656 -54E4 8657 -54E5 B8E7 -54E6 C5B6 -54E7 DFEA -54E8 C9DA -54E9 C1A8 -54EA C4C4 -54EB 8658 -54EC 8659 -54ED BFDE -54EE CFF8 -54EF 865A -54F0 865B -54F1 865C -54F2 D5DC -54F3 DFEE -54F4 865D -54F5 865E -54F6 865F -54F7 8660 -54F8 8661 -54F9 8662 -54FA B2B8 -54FB 8663 -54FC BADF -54FD DFEC -54FE 8664 -54FF DBC1 -5500 8665 -5501 D1E4 -5502 8666 -5503 8667 -5504 8668 -5505 8669 -5506 CBF4 -5507 B4BD -5508 866A -5509 B0A6 -550A 866B -550B 866C -550C 866D -550D 866E -550E 866F -550F DFF1 -5510 CCC6 -5511 DFF2 -5512 8670 -5513 8671 -5514 DFED -5515 8672 -5516 8673 -5517 8674 -5518 8675 -5519 8676 -551A 8677 -551B DFE9 -551C 8678 -551D 8679 -551E 867A -551F 867B -5520 DFEB -5521 867C -5522 DFEF -5523 DFF0 -5524 BBBD -5525 867D -5526 867E -5527 DFF3 -5528 8680 -5529 8681 -552A DFF4 -552B 8682 -552C BBA3 -552D 8683 -552E CADB -552F CEA8 -5530 E0A7 -5531 B3AA -5532 8684 -5533 E0A6 -5534 8685 -5535 8686 -5536 8687 -5537 E0A1 -5538 8688 -5539 8689 -553A 868A -553B 868B -553C DFFE -553D 868C -553E CDD9 -553F DFFC -5540 868D -5541 DFFA -5542 868E -5543 BFD0 -5544 D7C4 -5545 868F -5546 C9CC -5547 8690 -5548 8691 -5549 DFF8 -554A B0A1 -554B 8692 -554C 8693 -554D 8694 -554E 8695 -554F 8696 -5550 DFFD -5551 8697 -5552 8698 -5553 8699 -5554 869A -5555 DFFB -5556 E0A2 -5557 869B -5558 869C -5559 869D -555A 869E -555B 869F -555C E0A8 -555D 86A0 -555E 86A1 -555F 86A2 -5560 86A3 -5561 B7C8 -5562 86A4 -5563 86A5 -5564 C6A1 -5565 C9B6 -5566 C0B2 -5567 DFF5 -5568 86A6 -5569 86A7 -556A C5BE -556B 86A8 -556C D8C4 -556D DFF9 -556E C4F6 -556F 86A9 -5570 86AA -5571 86AB -5572 86AC -5573 86AD -5574 86AE -5575 E0A3 -5576 E0A4 -5577 E0A5 -5578 D0A5 -5579 86AF -557A 86B0 -557B E0B4 -557C CCE4 -557D 86B1 -557E E0B1 -557F 86B2 -5580 BFA6 -5581 E0AF -5582 CEB9 -5583 E0AB -5584 C9C6 -5585 86B3 -5586 86B4 -5587 C0AE -5588 E0AE -5589 BAED -558A BAB0 -558B E0A9 -558C 86B5 -558D 86B6 -558E 86B7 -558F DFF6 -5590 86B8 -5591 E0B3 -5592 86B9 -5593 86BA -5594 E0B8 -5595 86BB -5596 86BC -5597 86BD -5598 B4AD -5599 E0B9 -559A 86BE -559B 86BF -559C CFB2 -559D BAC8 -559E 86C0 -559F E0B0 -55A0 86C1 -55A1 86C2 -55A2 86C3 -55A3 86C4 -55A4 86C5 -55A5 86C6 -55A6 86C7 -55A7 D0FA -55A8 86C8 -55A9 86C9 -55AA 86CA -55AB 86CB -55AC 86CC -55AD 86CD -55AE 86CE -55AF 86CF -55B0 86D0 -55B1 E0AC -55B2 86D1 -55B3 D4FB -55B4 86D2 -55B5 DFF7 -55B6 86D3 -55B7 C5E7 -55B8 86D4 -55B9 E0AD -55BA 86D5 -55BB D3F7 -55BC 86D6 -55BD E0B6 -55BE E0B7 -55BF 86D7 -55C0 86D8 -55C1 86D9 -55C2 86DA -55C3 86DB -55C4 E0C4 -55C5 D0E1 -55C6 86DC -55C7 86DD -55C8 86DE -55C9 E0BC -55CA 86DF -55CB 86E0 -55CC E0C9 -55CD E0CA -55CE 86E1 -55CF 86E2 -55D0 86E3 -55D1 E0BE -55D2 E0AA -55D3 C9A4 -55D4 E0C1 -55D5 86E4 -55D6 E0B2 -55D7 86E5 -55D8 86E6 -55D9 86E7 -55DA 86E8 -55DB 86E9 -55DC CAC8 -55DD E0C3 -55DE 86EA -55DF E0B5 -55E0 86EB -55E1 CECB -55E2 86EC -55E3 CBC3 -55E4 E0CD -55E5 E0C6 -55E6 E0C2 -55E7 86ED -55E8 E0CB -55E9 86EE -55EA E0BA -55EB E0BF -55EC E0C0 -55ED 86EF -55EE 86F0 -55EF E0C5 -55F0 86F1 -55F1 86F2 -55F2 E0C7 -55F3 E0C8 -55F4 86F3 -55F5 E0CC -55F6 86F4 -55F7 E0BB -55F8 86F5 -55F9 86F6 -55FA 86F7 -55FB 86F8 -55FC 86F9 -55FD CBD4 -55FE E0D5 -55FF 86FA -5600 E0D6 -5601 E0D2 -5602 86FB -5603 86FC -5604 86FD -5605 86FE -5606 8740 -5607 8741 -5608 E0D0 -5609 BCCE -560A 8742 -560B 8743 -560C E0D1 -560D 8744 -560E B8C2 -560F D8C5 -5610 8745 -5611 8746 -5612 8747 -5613 8748 -5614 8749 -5615 874A -5616 874B -5617 874C -5618 D0EA -5619 874D -561A 874E -561B C2EF -561C 874F -561D 8750 -561E E0CF -561F E0BD -5620 8751 -5621 8752 -5622 8753 -5623 E0D4 -5624 E0D3 -5625 8754 -5626 8755 -5627 E0D7 -5628 8756 -5629 8757 -562A 8758 -562B 8759 -562C E0DC -562D E0D8 -562E 875A -562F 875B -5630 875C -5631 D6F6 -5632 B3B0 -5633 875D -5634 D7EC -5635 875E -5636 CBBB -5637 875F -5638 8760 -5639 E0DA -563A 8761 -563B CEFB -563C 8762 -563D 8763 -563E 8764 -563F BAD9 -5640 8765 -5641 8766 -5642 8767 -5643 8768 -5644 8769 -5645 876A -5646 876B -5647 876C -5648 876D -5649 876E -564A 876F -564B 8770 -564C E0E1 -564D E0DD -564E D2AD -564F 8771 -5650 8772 -5651 8773 -5652 8774 -5653 8775 -5654 E0E2 -5655 8776 -5656 8777 -5657 E0DB -5658 E0D9 -5659 E0DF -565A 8778 -565B 8779 -565C E0E0 -565D 877A -565E 877B -565F 877C -5660 877D -5661 877E -5662 E0DE -5663 8780 -5664 E0E4 -5665 8781 -5666 8782 -5667 8783 -5668 C6F7 -5669 D8AC -566A D4EB -566B E0E6 -566C CAC9 -566D 8784 -566E 8785 -566F 8786 -5670 8787 -5671 E0E5 -5672 8788 -5673 8789 -5674 878A -5675 878B -5676 B8C1 -5677 878C -5678 878D -5679 878E -567A 878F -567B E0E7 -567C E0E8 -567D 8790 -567E 8791 -567F 8792 -5680 8793 -5681 8794 -5682 8795 -5683 8796 -5684 8797 -5685 E0E9 -5686 E0E3 -5687 8798 -5688 8799 -5689 879A -568A 879B -568B 879C -568C 879D -568D 879E -568E BABF -568F CCE7 -5690 879F -5691 87A0 -5692 87A1 -5693 E0EA -5694 87A2 -5695 87A3 -5696 87A4 -5697 87A5 -5698 87A6 -5699 87A7 -569A 87A8 -569B 87A9 -569C 87AA -569D 87AB -569E 87AC -569F 87AD -56A0 87AE -56A1 87AF -56A2 87B0 -56A3 CFF9 -56A4 87B1 -56A5 87B2 -56A6 87B3 -56A7 87B4 -56A8 87B5 -56A9 87B6 -56AA 87B7 -56AB 87B8 -56AC 87B9 -56AD 87BA -56AE 87BB -56AF E0EB -56B0 87BC -56B1 87BD -56B2 87BE -56B3 87BF -56B4 87C0 -56B5 87C1 -56B6 87C2 -56B7 C8C2 -56B8 87C3 -56B9 87C4 -56BA 87C5 -56BB 87C6 -56BC BDC0 -56BD 87C7 -56BE 87C8 -56BF 87C9 -56C0 87CA -56C1 87CB -56C2 87CC -56C3 87CD -56C4 87CE -56C5 87CF -56C6 87D0 -56C7 87D1 -56C8 87D2 -56C9 87D3 -56CA C4D2 -56CB 87D4 -56CC 87D5 -56CD 87D6 -56CE 87D7 -56CF 87D8 -56D0 87D9 -56D1 87DA -56D2 87DB -56D3 87DC -56D4 E0EC -56D5 87DD -56D6 87DE -56D7 E0ED -56D8 87DF -56D9 87E0 -56DA C7F4 -56DB CBC4 -56DC 87E1 -56DD E0EE -56DE BBD8 -56DF D8B6 -56E0 D2F2 -56E1 E0EF -56E2 CDC5 -56E3 87E2 -56E4 B6DA -56E5 87E3 -56E6 87E4 -56E7 87E5 -56E8 87E6 -56E9 87E7 -56EA 87E8 -56EB E0F1 -56EC 87E9 -56ED D4B0 -56EE 87EA -56EF 87EB -56F0 C0A7 -56F1 B4D1 -56F2 87EC -56F3 87ED -56F4 CEA7 -56F5 E0F0 -56F6 87EE -56F7 87EF -56F8 87F0 -56F9 E0F2 -56FA B9CC -56FB 87F1 -56FC 87F2 -56FD B9FA -56FE CDBC -56FF E0F3 -5700 87F3 -5701 87F4 -5702 87F5 -5703 C6D4 -5704 E0F4 -5705 87F6 -5706 D4B2 -5707 87F7 -5708 C8A6 -5709 E0F6 -570A E0F5 -570B 87F8 -570C 87F9 -570D 87FA -570E 87FB -570F 87FC -5710 87FD -5711 87FE -5712 8840 -5713 8841 -5714 8842 -5715 8843 -5716 8844 -5717 8845 -5718 8846 -5719 8847 -571A 8848 -571B 8849 -571C E0F7 -571D 884A -571E 884B -571F CDC1 -5720 884C -5721 884D -5722 884E -5723 CAA5 -5724 884F -5725 8850 -5726 8851 -5727 8852 -5728 D4DA -5729 DBD7 -572A DBD9 -572B 8853 -572C DBD8 -572D B9E7 -572E DBDC -572F DBDD -5730 B5D8 -5731 8854 -5732 8855 -5733 DBDA -5734 8856 -5735 8857 -5736 8858 -5737 8859 -5738 885A -5739 DBDB -573A B3A1 -573B DBDF -573C 885B -573D 885C -573E BBF8 -573F 885D -5740 D6B7 -5741 885E -5742 DBE0 -5743 885F -5744 8860 -5745 8861 -5746 8862 -5747 BEF9 -5748 8863 -5749 8864 -574A B7BB -574B 8865 -574C DBD0 -574D CCAE -574E BFB2 -574F BBB5 -5750 D7F8 -5751 BFD3 -5752 8866 -5753 8867 -5754 8868 -5755 8869 -5756 886A -5757 BFE9 -5758 886B -5759 886C -575A BCE1 -575B CCB3 -575C DBDE -575D B0D3 -575E CEEB -575F B7D8 -5760 D7B9 -5761 C6C2 -5762 886D -5763 886E -5764 C0A4 -5765 886F -5766 CCB9 -5767 8870 -5768 DBE7 -5769 DBE1 -576A C6BA -576B DBE3 -576C 8871 -576D DBE8 -576E 8872 -576F C5F7 -5770 8873 -5771 8874 -5772 8875 -5773 DBEA -5774 8876 -5775 8877 -5776 DBE9 -5777 BFC0 -5778 8878 -5779 8879 -577A 887A -577B DBE6 -577C DBE5 -577D 887B -577E 887C -577F 887D -5780 887E -5781 8880 -5782 B4B9 -5783 C0AC -5784 C2A2 -5785 DBE2 -5786 DBE4 -5787 8881 -5788 8882 -5789 8883 -578A 8884 -578B D0CD -578C DBED -578D 8885 -578E 8886 -578F 8887 -5790 8888 -5791 8889 -5792 C0DD -5793 DBF2 -5794 888A -5795 888B -5796 888C -5797 888D -5798 888E -5799 888F -579A 8890 -579B B6E2 -579C 8891 -579D 8892 -579E 8893 -579F 8894 -57A0 DBF3 -57A1 DBD2 -57A2 B9B8 -57A3 D4AB -57A4 DBEC -57A5 8895 -57A6 BFD1 -57A7 DBF0 -57A8 8896 -57A9 DBD1 -57AA 8897 -57AB B5E6 -57AC 8898 -57AD DBEB -57AE BFE5 -57AF 8899 -57B0 889A -57B1 889B -57B2 DBEE -57B3 889C -57B4 DBF1 -57B5 889D -57B6 889E -57B7 889F -57B8 DBF9 -57B9 88A0 -57BA 88A1 -57BB 88A2 -57BC 88A3 -57BD 88A4 -57BE 88A5 -57BF 88A6 -57C0 88A7 -57C1 88A8 -57C2 B9A1 -57C3 B0A3 -57C4 88A9 -57C5 88AA -57C6 88AB -57C7 88AC -57C8 88AD -57C9 88AE -57CA 88AF -57CB C2F1 -57CC 88B0 -57CD 88B1 -57CE B3C7 -57CF DBEF -57D0 88B2 -57D1 88B3 -57D2 DBF8 -57D3 88B4 -57D4 C6D2 -57D5 DBF4 -57D6 88B5 -57D7 88B6 -57D8 DBF5 -57D9 DBF7 -57DA DBF6 -57DB 88B7 -57DC 88B8 -57DD DBFE -57DE 88B9 -57DF D3F2 -57E0 B2BA -57E1 88BA -57E2 88BB -57E3 88BC -57E4 DBFD -57E5 88BD -57E6 88BE -57E7 88BF -57E8 88C0 -57E9 88C1 -57EA 88C2 -57EB 88C3 -57EC 88C4 -57ED DCA4 -57EE 88C5 -57EF DBFB -57F0 88C6 -57F1 88C7 -57F2 88C8 -57F3 88C9 -57F4 DBFA -57F5 88CA -57F6 88CB -57F7 88CC -57F8 DBFC -57F9 C5E0 -57FA BBF9 -57FB 88CD -57FC 88CE -57FD DCA3 -57FE 88CF -57FF 88D0 -5800 DCA5 -5801 88D1 -5802 CCC3 -5803 88D2 -5804 88D3 -5805 88D4 -5806 B6D1 -5807 DDC0 -5808 88D5 -5809 88D6 -580A 88D7 -580B DCA1 -580C 88D8 -580D DCA2 -580E 88D9 -580F 88DA -5810 88DB -5811 C7B5 -5812 88DC -5813 88DD -5814 88DE -5815 B6E9 -5816 88DF -5817 88E0 -5818 88E1 -5819 DCA7 -581A 88E2 -581B 88E3 -581C 88E4 -581D 88E5 -581E DCA6 -581F 88E6 -5820 DCA9 -5821 B1A4 -5822 88E7 -5823 88E8 -5824 B5CC -5825 88E9 -5826 88EA -5827 88EB -5828 88EC -5829 88ED -582A BFB0 -582B 88EE -582C 88EF -582D 88F0 -582E 88F1 -582F 88F2 -5830 D1DF -5831 88F3 -5832 88F4 -5833 88F5 -5834 88F6 -5835 B6C2 -5836 88F7 -5837 88F8 -5838 88F9 -5839 88FA -583A 88FB -583B 88FC -583C 88FD -583D 88FE -583E 8940 -583F 8941 -5840 8942 -5841 8943 -5842 8944 -5843 8945 -5844 DCA8 -5845 8946 -5846 8947 -5847 8948 -5848 8949 -5849 894A -584A 894B -584B 894C -584C CBFA -584D EBF3 -584E 894D -584F 894E -5850 894F -5851 CBDC -5852 8950 -5853 8951 -5854 CBFE -5855 8952 -5856 8953 -5857 8954 -5858 CCC1 -5859 8955 -585A 8956 -585B 8957 -585C 8958 -585D 8959 -585E C8FB -585F 895A -5860 895B -5861 895C -5862 895D -5863 895E -5864 895F -5865 DCAA -5866 8960 -5867 8961 -5868 8962 -5869 8963 -586A 8964 -586B CCEE -586C DCAB -586D 8965 -586E 8966 -586F 8967 -5870 8968 -5871 8969 -5872 896A -5873 896B -5874 896C -5875 896D -5876 896E -5877 896F -5878 8970 -5879 8971 -587A 8972 -587B 8973 -587C 8974 -587D 8975 -587E DBD3 -587F 8976 -5880 DCAF -5881 DCAC -5882 8977 -5883 BEB3 -5884 8978 -5885 CAFB -5886 8979 -5887 897A -5888 897B -5889 DCAD -588A 897C -588B 897D -588C 897E -588D 8980 -588E 8981 -588F 8982 -5890 8983 -5891 8984 -5892 C9CA -5893 C4B9 -5894 8985 -5895 8986 -5896 8987 -5897 8988 -5898 8989 -5899 C7BD -589A DCAE -589B 898A -589C 898B -589D 898C -589E D4F6 -589F D0E6 -58A0 898D -58A1 898E -58A2 898F -58A3 8990 -58A4 8991 -58A5 8992 -58A6 8993 -58A7 8994 -58A8 C4AB -58A9 B6D5 -58AA 8995 -58AB 8996 -58AC 8997 -58AD 8998 -58AE 8999 -58AF 899A -58B0 899B -58B1 899C -58B2 899D -58B3 899E -58B4 899F -58B5 89A0 -58B6 89A1 -58B7 89A2 -58B8 89A3 -58B9 89A4 -58BA 89A5 -58BB 89A6 -58BC DBD4 -58BD 89A7 -58BE 89A8 -58BF 89A9 -58C0 89AA -58C1 B1DA -58C2 89AB -58C3 89AC -58C4 89AD -58C5 DBD5 -58C6 89AE -58C7 89AF -58C8 89B0 -58C9 89B1 -58CA 89B2 -58CB 89B3 -58CC 89B4 -58CD 89B5 -58CE 89B6 -58CF 89B7 -58D0 89B8 -58D1 DBD6 -58D2 89B9 -58D3 89BA -58D4 89BB -58D5 BABE -58D6 89BC -58D7 89BD -58D8 89BE -58D9 89BF -58DA 89C0 -58DB 89C1 -58DC 89C2 -58DD 89C3 -58DE 89C4 -58DF 89C5 -58E0 89C6 -58E1 89C7 -58E2 89C8 -58E3 89C9 -58E4 C8C0 -58E5 89CA -58E6 89CB -58E7 89CC -58E8 89CD -58E9 89CE -58EA 89CF -58EB CABF -58EC C8C9 -58ED 89D0 -58EE D7B3 -58EF 89D1 -58F0 C9F9 -58F1 89D2 -58F2 89D3 -58F3 BFC7 -58F4 89D4 -58F5 89D5 -58F6 BAF8 -58F7 89D6 -58F8 89D7 -58F9 D2BC -58FA 89D8 -58FB 89D9 -58FC 89DA -58FD 89DB -58FE 89DC -58FF 89DD -5900 89DE -5901 89DF -5902 E2BA -5903 89E0 -5904 B4A6 -5905 89E1 -5906 89E2 -5907 B1B8 -5908 89E3 -5909 89E4 -590A 89E5 -590B 89E6 -590C 89E7 -590D B8B4 -590E 89E8 -590F CFC4 -5910 89E9 -5911 89EA -5912 89EB -5913 89EC -5914 D9E7 -5915 CFA6 -5916 CDE2 -5917 89ED -5918 89EE -5919 D9ED -591A B6E0 -591B 89EF -591C D2B9 -591D 89F0 -591E 89F1 -591F B9BB -5920 89F2 -5921 89F3 -5922 89F4 -5923 89F5 -5924 E2B9 -5925 E2B7 -5926 89F6 -5927 B4F3 -5928 89F7 -5929 CCEC -592A CCAB -592B B7F2 -592C 89F8 -592D D8B2 -592E D1EB -592F BABB -5930 89F9 -5931 CAA7 -5932 89FA -5933 89FB -5934 CDB7 -5935 89FC -5936 89FD -5937 D2C4 -5938 BFE4 -5939 BCD0 -593A B6E1 -593B 89FE -593C DEC5 -593D 8A40 -593E 8A41 -593F 8A42 -5940 8A43 -5941 DEC6 -5942 DBBC -5943 8A44 -5944 D1D9 -5945 8A45 -5946 8A46 -5947 C6E6 -5948 C4CE -5949 B7EE -594A 8A47 -594B B7DC -594C 8A48 -594D 8A49 -594E BFFC -594F D7E0 -5950 8A4A -5951 C6F5 -5952 8A4B -5953 8A4C -5954 B1BC -5955 DEC8 -5956 BDB1 -5957 CCD7 -5958 DECA -5959 8A4D -595A DEC9 -595B 8A4E -595C 8A4F -595D 8A50 -595E 8A51 -595F 8A52 -5960 B5EC -5961 8A53 -5962 C9DD -5963 8A54 -5964 8A55 -5965 B0C2 -5966 8A56 -5967 8A57 -5968 8A58 -5969 8A59 -596A 8A5A -596B 8A5B -596C 8A5C -596D 8A5D -596E 8A5E -596F 8A5F -5970 8A60 -5971 8A61 -5972 8A62 -5973 C5AE -5974 C5AB -5975 8A63 -5976 C4CC -5977 8A64 -5978 BCE9 -5979 CBFD -597A 8A65 -597B 8A66 -597C 8A67 -597D BAC3 -597E 8A68 -597F 8A69 -5980 8A6A -5981 E5F9 -5982 C8E7 -5983 E5FA -5984 CDFD -5985 8A6B -5986 D7B1 -5987 B8BE -5988 C2E8 -5989 8A6C -598A C8D1 -598B 8A6D -598C 8A6E -598D E5FB -598E 8A6F -598F 8A70 -5990 8A71 -5991 8A72 -5992 B6CA -5993 BCCB -5994 8A73 -5995 8A74 -5996 D1FD -5997 E6A1 -5998 8A75 -5999 C3EE -599A 8A76 -599B 8A77 -599C 8A78 -599D 8A79 -599E E6A4 -599F 8A7A -59A0 8A7B -59A1 8A7C -59A2 8A7D -59A3 E5FE -59A4 E6A5 -59A5 CDD7 -59A6 8A7E -59A7 8A80 -59A8 B7C1 -59A9 E5FC -59AA E5FD -59AB E6A3 -59AC 8A81 -59AD 8A82 -59AE C4DD -59AF E6A8 -59B0 8A83 -59B1 8A84 -59B2 E6A7 -59B3 8A85 -59B4 8A86 -59B5 8A87 -59B6 8A88 -59B7 8A89 -59B8 8A8A -59B9 C3C3 -59BA 8A8B -59BB C6DE -59BC 8A8C -59BD 8A8D -59BE E6AA -59BF 8A8E -59C0 8A8F -59C1 8A90 -59C2 8A91 -59C3 8A92 -59C4 8A93 -59C5 8A94 -59C6 C4B7 -59C7 8A95 -59C8 8A96 -59C9 8A97 -59CA E6A2 -59CB CABC -59CC 8A98 -59CD 8A99 -59CE 8A9A -59CF 8A9B -59D0 BDE3 -59D1 B9C3 -59D2 E6A6 -59D3 D0D5 -59D4 CEAF -59D5 8A9C -59D6 8A9D -59D7 E6A9 -59D8 E6B0 -59D9 8A9E -59DA D2A6 -59DB 8A9F -59DC BDAA -59DD E6AD -59DE 8AA0 -59DF 8AA1 -59E0 8AA2 -59E1 8AA3 -59E2 8AA4 -59E3 E6AF -59E4 8AA5 -59E5 C0D1 -59E6 8AA6 -59E7 8AA7 -59E8 D2CC -59E9 8AA8 -59EA 8AA9 -59EB 8AAA -59EC BCA7 -59ED 8AAB -59EE 8AAC -59EF 8AAD -59F0 8AAE -59F1 8AAF -59F2 8AB0 -59F3 8AB1 -59F4 8AB2 -59F5 8AB3 -59F6 8AB4 -59F7 8AB5 -59F8 8AB6 -59F9 E6B1 -59FA 8AB7 -59FB D2F6 -59FC 8AB8 -59FD 8AB9 -59FE 8ABA -59FF D7CB -5A00 8ABB -5A01 CDFE -5A02 8ABC -5A03 CDDE -5A04 C2A6 -5A05 E6AB -5A06 E6AC -5A07 BDBF -5A08 E6AE -5A09 E6B3 -5A0A 8ABD -5A0B 8ABE -5A0C E6B2 -5A0D 8ABF -5A0E 8AC0 -5A0F 8AC1 -5A10 8AC2 -5A11 E6B6 -5A12 8AC3 -5A13 E6B8 -5A14 8AC4 -5A15 8AC5 -5A16 8AC6 -5A17 8AC7 -5A18 C4EF -5A19 8AC8 -5A1A 8AC9 -5A1B 8ACA -5A1C C4C8 -5A1D 8ACB -5A1E 8ACC -5A1F BEEA -5A20 C9EF -5A21 8ACD -5A22 8ACE -5A23 E6B7 -5A24 8ACF -5A25 B6F0 -5A26 8AD0 -5A27 8AD1 -5A28 8AD2 -5A29 C3E4 -5A2A 8AD3 -5A2B 8AD4 -5A2C 8AD5 -5A2D 8AD6 -5A2E 8AD7 -5A2F 8AD8 -5A30 8AD9 -5A31 D3E9 -5A32 E6B4 -5A33 8ADA -5A34 E6B5 -5A35 8ADB -5A36 C8A2 -5A37 8ADC -5A38 8ADD -5A39 8ADE -5A3A 8ADF -5A3B 8AE0 -5A3C E6BD -5A3D 8AE1 -5A3E 8AE2 -5A3F 8AE3 -5A40 E6B9 -5A41 8AE4 -5A42 8AE5 -5A43 8AE6 -5A44 8AE7 -5A45 8AE8 -5A46 C6C5 -5A47 8AE9 -5A48 8AEA -5A49 CDF1 -5A4A E6BB -5A4B 8AEB -5A4C 8AEC -5A4D 8AED -5A4E 8AEE -5A4F 8AEF -5A50 8AF0 -5A51 8AF1 -5A52 8AF2 -5A53 8AF3 -5A54 8AF4 -5A55 E6BC -5A56 8AF5 -5A57 8AF6 -5A58 8AF7 -5A59 8AF8 -5A5A BBE9 -5A5B 8AF9 -5A5C 8AFA -5A5D 8AFB -5A5E 8AFC -5A5F 8AFD -5A60 8AFE -5A61 8B40 -5A62 E6BE -5A63 8B41 -5A64 8B42 -5A65 8B43 -5A66 8B44 -5A67 E6BA -5A68 8B45 -5A69 8B46 -5A6A C0B7 -5A6B 8B47 -5A6C 8B48 -5A6D 8B49 -5A6E 8B4A -5A6F 8B4B -5A70 8B4C -5A71 8B4D -5A72 8B4E -5A73 8B4F -5A74 D3A4 -5A75 E6BF -5A76 C9F4 -5A77 E6C3 -5A78 8B50 -5A79 8B51 -5A7A E6C4 -5A7B 8B52 -5A7C 8B53 -5A7D 8B54 -5A7E 8B55 -5A7F D0F6 -5A80 8B56 -5A81 8B57 -5A82 8B58 -5A83 8B59 -5A84 8B5A -5A85 8B5B -5A86 8B5C -5A87 8B5D -5A88 8B5E -5A89 8B5F -5A8A 8B60 -5A8B 8B61 -5A8C 8B62 -5A8D 8B63 -5A8E 8B64 -5A8F 8B65 -5A90 8B66 -5A91 8B67 -5A92 C3BD -5A93 8B68 -5A94 8B69 -5A95 8B6A -5A96 8B6B -5A97 8B6C -5A98 8B6D -5A99 8B6E -5A9A C3C4 -5A9B E6C2 -5A9C 8B6F -5A9D 8B70 -5A9E 8B71 -5A9F 8B72 -5AA0 8B73 -5AA1 8B74 -5AA2 8B75 -5AA3 8B76 -5AA4 8B77 -5AA5 8B78 -5AA6 8B79 -5AA7 8B7A -5AA8 8B7B -5AA9 8B7C -5AAA E6C1 -5AAB 8B7D -5AAC 8B7E -5AAD 8B80 -5AAE 8B81 -5AAF 8B82 -5AB0 8B83 -5AB1 8B84 -5AB2 E6C7 -5AB3 CFB1 -5AB4 8B85 -5AB5 EBF4 -5AB6 8B86 -5AB7 8B87 -5AB8 E6CA -5AB9 8B88 -5ABA 8B89 -5ABB 8B8A -5ABC 8B8B -5ABD 8B8C -5ABE E6C5 -5ABF 8B8D -5AC0 8B8E -5AC1 BCDE -5AC2 C9A9 -5AC3 8B8F -5AC4 8B90 -5AC5 8B91 -5AC6 8B92 -5AC7 8B93 -5AC8 8B94 -5AC9 BCB5 -5ACA 8B95 -5ACB 8B96 -5ACC CFD3 -5ACD 8B97 -5ACE 8B98 -5ACF 8B99 -5AD0 8B9A -5AD1 8B9B -5AD2 E6C8 -5AD3 8B9C -5AD4 E6C9 -5AD5 8B9D -5AD6 E6CE -5AD7 8B9E -5AD8 E6D0 -5AD9 8B9F -5ADA 8BA0 -5ADB 8BA1 -5ADC E6D1 -5ADD 8BA2 -5ADE 8BA3 -5ADF 8BA4 -5AE0 E6CB -5AE1 B5D5 -5AE2 8BA5 -5AE3 E6CC -5AE4 8BA6 -5AE5 8BA7 -5AE6 E6CF -5AE7 8BA8 -5AE8 8BA9 -5AE9 C4DB -5AEA 8BAA -5AEB E6C6 -5AEC 8BAB -5AED 8BAC -5AEE 8BAD -5AEF 8BAE -5AF0 8BAF -5AF1 E6CD -5AF2 8BB0 -5AF3 8BB1 -5AF4 8BB2 -5AF5 8BB3 -5AF6 8BB4 -5AF7 8BB5 -5AF8 8BB6 -5AF9 8BB7 -5AFA 8BB8 -5AFB 8BB9 -5AFC 8BBA -5AFD 8BBB -5AFE 8BBC -5AFF 8BBD -5B00 8BBE -5B01 8BBF -5B02 8BC0 -5B03 8BC1 -5B04 8BC2 -5B05 8BC3 -5B06 8BC4 -5B07 8BC5 -5B08 8BC6 -5B09 E6D2 -5B0A 8BC7 -5B0B 8BC8 -5B0C 8BC9 -5B0D 8BCA -5B0E 8BCB -5B0F 8BCC -5B10 8BCD -5B11 8BCE -5B12 8BCF -5B13 8BD0 -5B14 8BD1 -5B15 8BD2 -5B16 E6D4 -5B17 E6D3 -5B18 8BD3 -5B19 8BD4 -5B1A 8BD5 -5B1B 8BD6 -5B1C 8BD7 -5B1D 8BD8 -5B1E 8BD9 -5B1F 8BDA -5B20 8BDB -5B21 8BDC -5B22 8BDD -5B23 8BDE -5B24 8BDF -5B25 8BE0 -5B26 8BE1 -5B27 8BE2 -5B28 8BE3 -5B29 8BE4 -5B2A 8BE5 -5B2B 8BE6 -5B2C 8BE7 -5B2D 8BE8 -5B2E 8BE9 -5B2F 8BEA -5B30 8BEB -5B31 8BEC -5B32 E6D5 -5B33 8BED -5B34 D9F8 -5B35 8BEE -5B36 8BEF -5B37 E6D6 -5B38 8BF0 -5B39 8BF1 -5B3A 8BF2 -5B3B 8BF3 -5B3C 8BF4 -5B3D 8BF5 -5B3E 8BF6 -5B3F 8BF7 -5B40 E6D7 -5B41 8BF8 -5B42 8BF9 -5B43 8BFA -5B44 8BFB -5B45 8BFC -5B46 8BFD -5B47 8BFE -5B48 8C40 -5B49 8C41 -5B4A 8C42 -5B4B 8C43 -5B4C 8C44 -5B4D 8C45 -5B4E 8C46 -5B4F 8C47 -5B50 D7D3 -5B51 E6DD -5B52 8C48 -5B53 E6DE -5B54 BFD7 -5B55 D4D0 -5B56 8C49 -5B57 D7D6 -5B58 B4E6 -5B59 CBEF -5B5A E6DA -5B5B D8C3 -5B5C D7CE -5B5D D0A2 -5B5E 8C4A -5B5F C3CF -5B60 8C4B -5B61 8C4C -5B62 E6DF -5B63 BCBE -5B64 B9C2 -5B65 E6DB -5B66 D1A7 -5B67 8C4D -5B68 8C4E -5B69 BAA2 -5B6A C2CF -5B6B 8C4F -5B6C D8AB -5B6D 8C50 -5B6E 8C51 -5B6F 8C52 -5B70 CAEB -5B71 E5EE -5B72 8C53 -5B73 E6DC -5B74 8C54 -5B75 B7F5 -5B76 8C55 -5B77 8C56 -5B78 8C57 -5B79 8C58 -5B7A C8E6 -5B7B 8C59 -5B7C 8C5A -5B7D C4F5 -5B7E 8C5B -5B7F 8C5C -5B80 E5B2 -5B81 C4FE -5B82 8C5D -5B83 CBFC -5B84 E5B3 -5B85 D5AC -5B86 8C5E -5B87 D3EE -5B88 CAD8 -5B89 B0B2 -5B8A 8C5F -5B8B CBCE -5B8C CDEA -5B8D 8C60 -5B8E 8C61 -5B8F BAEA -5B90 8C62 -5B91 8C63 -5B92 8C64 -5B93 E5B5 -5B94 8C65 -5B95 E5B4 -5B96 8C66 -5B97 D7DA -5B98 B9D9 -5B99 D6E6 -5B9A B6A8 -5B9B CDF0 -5B9C D2CB -5B9D B1A6 -5B9E CAB5 -5B9F 8C67 -5BA0 B3E8 -5BA1 C9F3 -5BA2 BFCD -5BA3 D0FB -5BA4 CAD2 -5BA5 E5B6 -5BA6 BBC2 -5BA7 8C68 -5BA8 8C69 -5BA9 8C6A -5BAA CFDC -5BAB B9AC -5BAC 8C6B -5BAD 8C6C -5BAE 8C6D -5BAF 8C6E -5BB0 D4D7 -5BB1 8C6F -5BB2 8C70 -5BB3 BAA6 -5BB4 D1E7 -5BB5 CFFC -5BB6 BCD2 -5BB7 8C71 -5BB8 E5B7 -5BB9 C8DD -5BBA 8C72 -5BBB 8C73 -5BBC 8C74 -5BBD BFED -5BBE B1F6 -5BBF CBDE -5BC0 8C75 -5BC1 8C76 -5BC2 BCC5 -5BC3 8C77 -5BC4 BCC4 -5BC5 D2FA -5BC6 C3DC -5BC7 BFDC -5BC8 8C78 -5BC9 8C79 -5BCA 8C7A -5BCB 8C7B -5BCC B8BB -5BCD 8C7C -5BCE 8C7D -5BCF 8C7E -5BD0 C3C2 -5BD1 8C80 -5BD2 BAAE -5BD3 D4A2 -5BD4 8C81 -5BD5 8C82 -5BD6 8C83 -5BD7 8C84 -5BD8 8C85 -5BD9 8C86 -5BDA 8C87 -5BDB 8C88 -5BDC 8C89 -5BDD C7DE -5BDE C4AF -5BDF B2EC -5BE0 8C8A -5BE1 B9D1 -5BE2 8C8B -5BE3 8C8C -5BE4 E5BB -5BE5 C1C8 -5BE6 8C8D -5BE7 8C8E -5BE8 D5AF -5BE9 8C8F -5BEA 8C90 -5BEB 8C91 -5BEC 8C92 -5BED 8C93 -5BEE E5BC -5BEF 8C94 -5BF0 E5BE -5BF1 8C95 -5BF2 8C96 -5BF3 8C97 -5BF4 8C98 -5BF5 8C99 -5BF6 8C9A -5BF7 8C9B -5BF8 B4E7 -5BF9 B6D4 -5BFA CBC2 -5BFB D1B0 -5BFC B5BC -5BFD 8C9C -5BFE 8C9D -5BFF CAD9 -5C00 8C9E -5C01 B7E2 -5C02 8C9F -5C03 8CA0 -5C04 C9E4 -5C05 8CA1 -5C06 BDAB -5C07 8CA2 -5C08 8CA3 -5C09 CEBE -5C0A D7F0 -5C0B 8CA4 -5C0C 8CA5 -5C0D 8CA6 -5C0E 8CA7 -5C0F D0A1 -5C10 8CA8 -5C11 C9D9 -5C12 8CA9 -5C13 8CAA -5C14 B6FB -5C15 E6D8 -5C16 BCE2 -5C17 8CAB -5C18 B3BE -5C19 8CAC -5C1A C9D0 -5C1B 8CAD -5C1C E6D9 -5C1D B3A2 -5C1E 8CAE -5C1F 8CAF -5C20 8CB0 -5C21 8CB1 -5C22 DECC -5C23 8CB2 -5C24 D3C8 -5C25 DECD -5C26 8CB3 -5C27 D2A2 -5C28 8CB4 -5C29 8CB5 -5C2A 8CB6 -5C2B 8CB7 -5C2C DECE -5C2D 8CB8 -5C2E 8CB9 -5C2F 8CBA -5C30 8CBB -5C31 BECD -5C32 8CBC -5C33 8CBD -5C34 DECF -5C35 8CBE -5C36 8CBF -5C37 8CC0 -5C38 CAAC -5C39 D2FC -5C3A B3DF -5C3B E5EA -5C3C C4E1 -5C3D BEA1 -5C3E CEB2 -5C3F C4F2 -5C40 BED6 -5C41 C6A8 -5C42 B2E3 -5C43 8CC1 -5C44 8CC2 -5C45 BED3 -5C46 8CC3 -5C47 8CC4 -5C48 C7FC -5C49 CCEB -5C4A BDEC -5C4B CEDD -5C4C 8CC5 -5C4D 8CC6 -5C4E CABA -5C4F C6C1 -5C50 E5EC -5C51 D0BC -5C52 8CC7 -5C53 8CC8 -5C54 8CC9 -5C55 D5B9 -5C56 8CCA -5C57 8CCB -5C58 8CCC -5C59 E5ED -5C5A 8CCD -5C5B 8CCE -5C5C 8CCF -5C5D 8CD0 -5C5E CAF4 -5C5F 8CD1 -5C60 CDC0 -5C61 C2C5 -5C62 8CD2 -5C63 E5EF -5C64 8CD3 -5C65 C2C4 -5C66 E5F0 -5C67 8CD4 -5C68 8CD5 -5C69 8CD6 -5C6A 8CD7 -5C6B 8CD8 -5C6C 8CD9 -5C6D 8CDA -5C6E E5F8 -5C6F CDCD -5C70 8CDB -5C71 C9BD -5C72 8CDC -5C73 8CDD -5C74 8CDE -5C75 8CDF -5C76 8CE0 -5C77 8CE1 -5C78 8CE2 -5C79 D2D9 -5C7A E1A8 -5C7B 8CE3 -5C7C 8CE4 -5C7D 8CE5 -5C7E 8CE6 -5C7F D3EC -5C80 8CE7 -5C81 CBEA -5C82 C6F1 -5C83 8CE8 -5C84 8CE9 -5C85 8CEA -5C86 8CEB -5C87 8CEC -5C88 E1AC -5C89 8CED -5C8A 8CEE -5C8B 8CEF -5C8C E1A7 -5C8D E1A9 -5C8E 8CF0 -5C8F 8CF1 -5C90 E1AA -5C91 E1AF -5C92 8CF2 -5C93 8CF3 -5C94 B2ED -5C95 8CF4 -5C96 E1AB -5C97 B8DA -5C98 E1AD -5C99 E1AE -5C9A E1B0 -5C9B B5BA -5C9C E1B1 -5C9D 8CF5 -5C9E 8CF6 -5C9F 8CF7 -5CA0 8CF8 -5CA1 8CF9 -5CA2 E1B3 -5CA3 E1B8 -5CA4 8CFA -5CA5 8CFB -5CA6 8CFC -5CA7 8CFD -5CA8 8CFE -5CA9 D1D2 -5CAA 8D40 -5CAB E1B6 -5CAC E1B5 -5CAD C1EB -5CAE 8D41 -5CAF 8D42 -5CB0 8D43 -5CB1 E1B7 -5CB2 8D44 -5CB3 D4C0 -5CB4 8D45 -5CB5 E1B2 -5CB6 8D46 -5CB7 E1BA -5CB8 B0B6 -5CB9 8D47 -5CBA 8D48 -5CBB 8D49 -5CBC 8D4A -5CBD E1B4 -5CBE 8D4B -5CBF BFF9 -5CC0 8D4C -5CC1 E1B9 -5CC2 8D4D -5CC3 8D4E -5CC4 E1BB -5CC5 8D4F -5CC6 8D50 -5CC7 8D51 -5CC8 8D52 -5CC9 8D53 -5CCA 8D54 -5CCB E1BE -5CCC 8D55 -5CCD 8D56 -5CCE 8D57 -5CCF 8D58 -5CD0 8D59 -5CD1 8D5A -5CD2 E1BC -5CD3 8D5B -5CD4 8D5C -5CD5 8D5D -5CD6 8D5E -5CD7 8D5F -5CD8 8D60 -5CD9 D6C5 -5CDA 8D61 -5CDB 8D62 -5CDC 8D63 -5CDD 8D64 -5CDE 8D65 -5CDF 8D66 -5CE0 8D67 -5CE1 CFBF -5CE2 8D68 -5CE3 8D69 -5CE4 E1BD -5CE5 E1BF -5CE6 C2CD -5CE7 8D6A -5CE8 B6EB -5CE9 8D6B -5CEA D3F8 -5CEB 8D6C -5CEC 8D6D -5CED C7CD -5CEE 8D6E -5CEF 8D6F -5CF0 B7E5 -5CF1 8D70 -5CF2 8D71 -5CF3 8D72 -5CF4 8D73 -5CF5 8D74 -5CF6 8D75 -5CF7 8D76 -5CF8 8D77 -5CF9 8D78 -5CFA 8D79 -5CFB BEFE -5CFC 8D7A -5CFD 8D7B -5CFE 8D7C -5CFF 8D7D -5D00 8D7E -5D01 8D80 -5D02 E1C0 -5D03 E1C1 -5D04 8D81 -5D05 8D82 -5D06 E1C7 -5D07 B3E7 -5D08 8D83 -5D09 8D84 -5D0A 8D85 -5D0B 8D86 -5D0C 8D87 -5D0D 8D88 -5D0E C6E9 -5D0F 8D89 -5D10 8D8A -5D11 8D8B -5D12 8D8C -5D13 8D8D -5D14 B4DE -5D15 8D8E -5D16 D1C2 -5D17 8D8F -5D18 8D90 -5D19 8D91 -5D1A 8D92 -5D1B E1C8 -5D1C 8D93 -5D1D 8D94 -5D1E E1C6 -5D1F 8D95 -5D20 8D96 -5D21 8D97 -5D22 8D98 -5D23 8D99 -5D24 E1C5 -5D25 8D9A -5D26 E1C3 -5D27 E1C2 -5D28 8D9B -5D29 B1C0 -5D2A 8D9C -5D2B 8D9D -5D2C 8D9E -5D2D D5B8 -5D2E E1C4 -5D2F 8D9F -5D30 8DA0 -5D31 8DA1 -5D32 8DA2 -5D33 8DA3 -5D34 E1CB -5D35 8DA4 -5D36 8DA5 -5D37 8DA6 -5D38 8DA7 -5D39 8DA8 -5D3A 8DA9 -5D3B 8DAA -5D3C 8DAB -5D3D E1CC -5D3E E1CA -5D3F 8DAC -5D40 8DAD -5D41 8DAE -5D42 8DAF -5D43 8DB0 -5D44 8DB1 -5D45 8DB2 -5D46 8DB3 -5D47 EFFA -5D48 8DB4 -5D49 8DB5 -5D4A E1D3 -5D4B E1D2 -5D4C C7B6 -5D4D 8DB6 -5D4E 8DB7 -5D4F 8DB8 -5D50 8DB9 -5D51 8DBA -5D52 8DBB -5D53 8DBC -5D54 8DBD -5D55 8DBE -5D56 8DBF -5D57 8DC0 -5D58 E1C9 -5D59 8DC1 -5D5A 8DC2 -5D5B E1CE -5D5C 8DC3 -5D5D E1D0 -5D5E 8DC4 -5D5F 8DC5 -5D60 8DC6 -5D61 8DC7 -5D62 8DC8 -5D63 8DC9 -5D64 8DCA -5D65 8DCB -5D66 8DCC -5D67 8DCD -5D68 8DCE -5D69 E1D4 -5D6A 8DCF -5D6B E1D1 -5D6C E1CD -5D6D 8DD0 -5D6E 8DD1 -5D6F E1CF -5D70 8DD2 -5D71 8DD3 -5D72 8DD4 -5D73 8DD5 -5D74 E1D5 -5D75 8DD6 -5D76 8DD7 -5D77 8DD8 -5D78 8DD9 -5D79 8DDA -5D7A 8DDB -5D7B 8DDC -5D7C 8DDD -5D7D 8DDE -5D7E 8DDF -5D7F 8DE0 -5D80 8DE1 -5D81 8DE2 -5D82 E1D6 -5D83 8DE3 -5D84 8DE4 -5D85 8DE5 -5D86 8DE6 -5D87 8DE7 -5D88 8DE8 -5D89 8DE9 -5D8A 8DEA -5D8B 8DEB -5D8C 8DEC -5D8D 8DED -5D8E 8DEE -5D8F 8DEF -5D90 8DF0 -5D91 8DF1 -5D92 8DF2 -5D93 8DF3 -5D94 8DF4 -5D95 8DF5 -5D96 8DF6 -5D97 8DF7 -5D98 8DF8 -5D99 E1D7 -5D9A 8DF9 -5D9B 8DFA -5D9C 8DFB -5D9D E1D8 -5D9E 8DFC -5D9F 8DFD -5DA0 8DFE -5DA1 8E40 -5DA2 8E41 -5DA3 8E42 -5DA4 8E43 -5DA5 8E44 -5DA6 8E45 -5DA7 8E46 -5DA8 8E47 -5DA9 8E48 -5DAA 8E49 -5DAB 8E4A -5DAC 8E4B -5DAD 8E4C -5DAE 8E4D -5DAF 8E4E -5DB0 8E4F -5DB1 8E50 -5DB2 8E51 -5DB3 8E52 -5DB4 8E53 -5DB5 8E54 -5DB6 8E55 -5DB7 E1DA -5DB8 8E56 -5DB9 8E57 -5DBA 8E58 -5DBB 8E59 -5DBC 8E5A -5DBD 8E5B -5DBE 8E5C -5DBF 8E5D -5DC0 8E5E -5DC1 8E5F -5DC2 8E60 -5DC3 8E61 -5DC4 8E62 -5DC5 E1DB -5DC6 8E63 -5DC7 8E64 -5DC8 8E65 -5DC9 8E66 -5DCA 8E67 -5DCB 8E68 -5DCC 8E69 -5DCD CEA1 -5DCE 8E6A -5DCF 8E6B -5DD0 8E6C -5DD1 8E6D -5DD2 8E6E -5DD3 8E6F -5DD4 8E70 -5DD5 8E71 -5DD6 8E72 -5DD7 8E73 -5DD8 8E74 -5DD9 8E75 -5DDA 8E76 -5DDB E7DD -5DDC 8E77 -5DDD B4A8 -5DDE D6DD -5DDF 8E78 -5DE0 8E79 -5DE1 D1B2 -5DE2 B3B2 -5DE3 8E7A -5DE4 8E7B -5DE5 B9A4 -5DE6 D7F3 -5DE7 C7C9 -5DE8 BEDE -5DE9 B9AE -5DEA 8E7C -5DEB CED7 -5DEC 8E7D -5DED 8E7E -5DEE B2EE -5DEF DBCF -5DF0 8E80 -5DF1 BCBA -5DF2 D2D1 -5DF3 CBC8 -5DF4 B0CD -5DF5 8E81 -5DF6 8E82 -5DF7 CFEF -5DF8 8E83 -5DF9 8E84 -5DFA 8E85 -5DFB 8E86 -5DFC 8E87 -5DFD D9E3 -5DFE BDED -5DFF 8E88 -5E00 8E89 -5E01 B1D2 -5E02 CAD0 -5E03 B2BC -5E04 8E8A -5E05 CBA7 -5E06 B7AB -5E07 8E8B -5E08 CAA6 -5E09 8E8C -5E0A 8E8D -5E0B 8E8E -5E0C CFA3 -5E0D 8E8F -5E0E 8E90 -5E0F E0F8 -5E10 D5CA -5E11 E0FB -5E12 8E91 -5E13 8E92 -5E14 E0FA -5E15 C5C1 -5E16 CCFB -5E17 8E93 -5E18 C1B1 -5E19 E0F9 -5E1A D6E3 -5E1B B2AF -5E1C D6C4 -5E1D B5DB -5E1E 8E94 -5E1F 8E95 -5E20 8E96 -5E21 8E97 -5E22 8E98 -5E23 8E99 -5E24 8E9A -5E25 8E9B -5E26 B4F8 -5E27 D6A1 -5E28 8E9C -5E29 8E9D -5E2A 8E9E -5E2B 8E9F -5E2C 8EA0 -5E2D CFAF -5E2E B0EF -5E2F 8EA1 -5E30 8EA2 -5E31 E0FC -5E32 8EA3 -5E33 8EA4 -5E34 8EA5 -5E35 8EA6 -5E36 8EA7 -5E37 E1A1 -5E38 B3A3 -5E39 8EA8 -5E3A 8EA9 -5E3B E0FD -5E3C E0FE -5E3D C3B1 -5E3E 8EAA -5E3F 8EAB -5E40 8EAC -5E41 8EAD -5E42 C3DD -5E43 8EAE -5E44 E1A2 -5E45 B7F9 -5E46 8EAF -5E47 8EB0 -5E48 8EB1 -5E49 8EB2 -5E4A 8EB3 -5E4B 8EB4 -5E4C BBCF -5E4D 8EB5 -5E4E 8EB6 -5E4F 8EB7 -5E50 8EB8 -5E51 8EB9 -5E52 8EBA -5E53 8EBB -5E54 E1A3 -5E55 C4BB -5E56 8EBC -5E57 8EBD -5E58 8EBE -5E59 8EBF -5E5A 8EC0 -5E5B E1A4 -5E5C 8EC1 -5E5D 8EC2 -5E5E E1A5 -5E5F 8EC3 -5E60 8EC4 -5E61 E1A6 -5E62 B4B1 -5E63 8EC5 -5E64 8EC6 -5E65 8EC7 -5E66 8EC8 -5E67 8EC9 -5E68 8ECA -5E69 8ECB -5E6A 8ECC -5E6B 8ECD -5E6C 8ECE -5E6D 8ECF -5E6E 8ED0 -5E6F 8ED1 -5E70 8ED2 -5E71 8ED3 -5E72 B8C9 -5E73 C6BD -5E74 C4EA -5E75 8ED4 -5E76 B2A2 -5E77 8ED5 -5E78 D0D2 -5E79 8ED6 -5E7A E7DB -5E7B BBC3 -5E7C D3D7 -5E7D D3C4 -5E7E 8ED7 -5E7F B9E3 -5E80 E2CF -5E81 8ED8 -5E82 8ED9 -5E83 8EDA -5E84 D7AF -5E85 8EDB -5E86 C7EC -5E87 B1D3 -5E88 8EDC -5E89 8EDD -5E8A B4B2 -5E8B E2D1 -5E8C 8EDE -5E8D 8EDF -5E8E 8EE0 -5E8F D0F2 -5E90 C2AE -5E91 E2D0 -5E92 8EE1 -5E93 BFE2 -5E94 D3A6 -5E95 B5D7 -5E96 E2D2 -5E97 B5EA -5E98 8EE2 -5E99 C3ED -5E9A B8FD -5E9B 8EE3 -5E9C B8AE -5E9D 8EE4 -5E9E C5D3 -5E9F B7CF -5EA0 E2D4 -5EA1 8EE5 -5EA2 8EE6 -5EA3 8EE7 -5EA4 8EE8 -5EA5 E2D3 -5EA6 B6C8 -5EA7 D7F9 -5EA8 8EE9 -5EA9 8EEA -5EAA 8EEB -5EAB 8EEC -5EAC 8EED -5EAD CDA5 -5EAE 8EEE -5EAF 8EEF -5EB0 8EF0 -5EB1 8EF1 -5EB2 8EF2 -5EB3 E2D8 -5EB4 8EF3 -5EB5 E2D6 -5EB6 CAFC -5EB7 BFB5 -5EB8 D3B9 -5EB9 E2D5 -5EBA 8EF4 -5EBB 8EF5 -5EBC 8EF6 -5EBD 8EF7 -5EBE E2D7 -5EBF 8EF8 -5EC0 8EF9 -5EC1 8EFA -5EC2 8EFB -5EC3 8EFC -5EC4 8EFD -5EC5 8EFE -5EC6 8F40 -5EC7 8F41 -5EC8 8F42 -5EC9 C1AE -5ECA C0C8 -5ECB 8F43 -5ECC 8F44 -5ECD 8F45 -5ECE 8F46 -5ECF 8F47 -5ED0 8F48 -5ED1 E2DB -5ED2 E2DA -5ED3 C0AA -5ED4 8F49 -5ED5 8F4A -5ED6 C1CE -5ED7 8F4B -5ED8 8F4C -5ED9 8F4D -5EDA 8F4E -5EDB E2DC -5EDC 8F4F -5EDD 8F50 -5EDE 8F51 -5EDF 8F52 -5EE0 8F53 -5EE1 8F54 -5EE2 8F55 -5EE3 8F56 -5EE4 8F57 -5EE5 8F58 -5EE6 8F59 -5EE7 8F5A -5EE8 E2DD -5EE9 8F5B -5EEA E2DE -5EEB 8F5C -5EEC 8F5D -5EED 8F5E -5EEE 8F5F -5EEF 8F60 -5EF0 8F61 -5EF1 8F62 -5EF2 8F63 -5EF3 8F64 -5EF4 DBC8 -5EF5 8F65 -5EF6 D1D3 -5EF7 CDA2 -5EF8 8F66 -5EF9 8F67 -5EFA BDA8 -5EFB 8F68 -5EFC 8F69 -5EFD 8F6A -5EFE DEC3 -5EFF D8A5 -5F00 BFAA -5F01 DBCD -5F02 D2EC -5F03 C6FA -5F04 C5AA -5F05 8F6B -5F06 8F6C -5F07 8F6D -5F08 DEC4 -5F09 8F6E -5F0A B1D7 -5F0B DFAE -5F0C 8F6F -5F0D 8F70 -5F0E 8F71 -5F0F CABD -5F10 8F72 -5F11 DFB1 -5F12 8F73 -5F13 B9AD -5F14 8F74 -5F15 D2FD -5F16 8F75 -5F17 B8A5 -5F18 BAEB -5F19 8F76 -5F1A 8F77 -5F1B B3DA -5F1C 8F78 -5F1D 8F79 -5F1E 8F7A -5F1F B5DC -5F20 D5C5 -5F21 8F7B -5F22 8F7C -5F23 8F7D -5F24 8F7E -5F25 C3D6 -5F26 CFD2 -5F27 BBA1 -5F28 8F80 -5F29 E5F3 -5F2A E5F2 -5F2B 8F81 -5F2C 8F82 -5F2D E5F4 -5F2E 8F83 -5F2F CDE4 -5F30 8F84 -5F31 C8F5 -5F32 8F85 -5F33 8F86 -5F34 8F87 -5F35 8F88 -5F36 8F89 -5F37 8F8A -5F38 8F8B -5F39 B5AF -5F3A C7BF -5F3B 8F8C -5F3C E5F6 -5F3D 8F8D -5F3E 8F8E -5F3F 8F8F -5F40 ECB0 -5F41 8F90 -5F42 8F91 -5F43 8F92 -5F44 8F93 -5F45 8F94 -5F46 8F95 -5F47 8F96 -5F48 8F97 -5F49 8F98 -5F4A 8F99 -5F4B 8F9A -5F4C 8F9B -5F4D 8F9C -5F4E 8F9D -5F4F 8F9E -5F50 E5E6 -5F51 8F9F -5F52 B9E9 -5F53 B5B1 -5F54 8FA0 -5F55 C2BC -5F56 E5E8 -5F57 E5E7 -5F58 E5E9 -5F59 8FA1 -5F5A 8FA2 -5F5B 8FA3 -5F5C 8FA4 -5F5D D2CD -5F5E 8FA5 -5F5F 8FA6 -5F60 8FA7 -5F61 E1EA -5F62 D0CE -5F63 8FA8 -5F64 CDAE -5F65 8FA9 -5F66 D1E5 -5F67 8FAA -5F68 8FAB -5F69 B2CA -5F6A B1EB -5F6B 8FAC -5F6C B1F2 -5F6D C5ED -5F6E 8FAD -5F6F 8FAE -5F70 D5C3 -5F71 D3B0 -5F72 8FAF -5F73 E1DC -5F74 8FB0 -5F75 8FB1 -5F76 8FB2 -5F77 E1DD -5F78 8FB3 -5F79 D2DB -5F7A 8FB4 -5F7B B3B9 -5F7C B1CB -5F7D 8FB5 -5F7E 8FB6 -5F7F 8FB7 -5F80 CDF9 -5F81 D5F7 -5F82 E1DE -5F83 8FB8 -5F84 BEB6 -5F85 B4FD -5F86 8FB9 -5F87 E1DF -5F88 BADC -5F89 E1E0 -5F8A BBB2 -5F8B C2C9 -5F8C E1E1 -5F8D 8FBA -5F8E 8FBB -5F8F 8FBC -5F90 D0EC -5F91 8FBD -5F92 CDBD -5F93 8FBE -5F94 8FBF -5F95 E1E2 -5F96 8FC0 -5F97 B5C3 -5F98 C5C7 -5F99 E1E3 -5F9A 8FC1 -5F9B 8FC2 -5F9C E1E4 -5F9D 8FC3 -5F9E 8FC4 -5F9F 8FC5 -5FA0 8FC6 -5FA1 D3F9 -5FA2 8FC7 -5FA3 8FC8 -5FA4 8FC9 -5FA5 8FCA -5FA6 8FCB -5FA7 8FCC -5FA8 E1E5 -5FA9 8FCD -5FAA D1AD -5FAB 8FCE -5FAC 8FCF -5FAD E1E6 -5FAE CEA2 -5FAF 8FD0 -5FB0 8FD1 -5FB1 8FD2 -5FB2 8FD3 -5FB3 8FD4 -5FB4 8FD5 -5FB5 E1E7 -5FB6 8FD6 -5FB7 B5C2 -5FB8 8FD7 -5FB9 8FD8 -5FBA 8FD9 -5FBB 8FDA -5FBC E1E8 -5FBD BBD5 -5FBE 8FDB -5FBF 8FDC -5FC0 8FDD -5FC1 8FDE -5FC2 8FDF -5FC3 D0C4 -5FC4 E2E0 -5FC5 B1D8 -5FC6 D2E4 -5FC7 8FE0 -5FC8 8FE1 -5FC9 E2E1 -5FCA 8FE2 -5FCB 8FE3 -5FCC BCC9 -5FCD C8CC -5FCE 8FE4 -5FCF E2E3 -5FD0 ECFE -5FD1 ECFD -5FD2 DFAF -5FD3 8FE5 -5FD4 8FE6 -5FD5 8FE7 -5FD6 E2E2 -5FD7 D6BE -5FD8 CDFC -5FD9 C3A6 -5FDA 8FE8 -5FDB 8FE9 -5FDC 8FEA -5FDD E3C3 -5FDE 8FEB -5FDF 8FEC -5FE0 D6D2 -5FE1 E2E7 -5FE2 8FED -5FE3 8FEE -5FE4 E2E8 -5FE5 8FEF -5FE6 8FF0 -5FE7 D3C7 -5FE8 8FF1 -5FE9 8FF2 -5FEA E2EC -5FEB BFEC -5FEC 8FF3 -5FED E2ED -5FEE E2E5 -5FEF 8FF4 -5FF0 8FF5 -5FF1 B3C0 -5FF2 8FF6 -5FF3 8FF7 -5FF4 8FF8 -5FF5 C4EE -5FF6 8FF9 -5FF7 8FFA -5FF8 E2EE -5FF9 8FFB -5FFA 8FFC -5FFB D0C3 -5FFC 8FFD -5FFD BAF6 -5FFE E2E9 -5FFF B7DE -6000 BBB3 -6001 CCAC -6002 CBCB -6003 E2E4 -6004 E2E6 -6005 E2EA -6006 E2EB -6007 8FFE -6008 9040 -6009 9041 -600A E2F7 -600B 9042 -600C 9043 -600D E2F4 -600E D4F5 -600F E2F3 -6010 9044 -6011 9045 -6012 C5AD -6013 9046 -6014 D5FA -6015 C5C2 -6016 B2C0 -6017 9047 -6018 9048 -6019 E2EF -601A 9049 -601B E2F2 -601C C1AF -601D CBBC -601E 904A -601F 904B -6020 B5A1 -6021 E2F9 -6022 904C -6023 904D -6024 904E -6025 BCB1 -6026 E2F1 -6027 D0D4 -6028 D4B9 -6029 E2F5 -602A B9D6 -602B E2F6 -602C 904F -602D 9050 -602E 9051 -602F C7D3 -6030 9052 -6031 9053 -6032 9054 -6033 9055 -6034 9056 -6035 E2F0 -6036 9057 -6037 9058 -6038 9059 -6039 905A -603A 905B -603B D7DC -603C EDA1 -603D 905C -603E 905D -603F E2F8 -6040 905E -6041 EDA5 -6042 E2FE -6043 CAD1 -6044 905F -6045 9060 -6046 9061 -6047 9062 -6048 9063 -6049 9064 -604A 9065 -604B C1B5 -604C 9066 -604D BBD0 -604E 9067 -604F 9068 -6050 BFD6 -6051 9069 -6052 BAE3 -6053 906A -6054 906B -6055 CBA1 -6056 906C -6057 906D -6058 906E -6059 EDA6 -605A EDA3 -605B 906F -605C 9070 -605D EDA2 -605E 9071 -605F 9072 -6060 9073 -6061 9074 -6062 BBD6 -6063 EDA7 -6064 D0F4 -6065 9075 -6066 9076 -6067 EDA4 -6068 BADE -6069 B6F7 -606A E3A1 -606B B6B2 -606C CCF1 -606D B9A7 -606E 9077 -606F CFA2 -6070 C7A1 -6071 9078 -6072 9079 -6073 BFD2 -6074 907A -6075 907B -6076 B6F1 -6077 907C -6078 E2FA -6079 E2FB -607A E2FD -607B E2FC -607C C4D5 -607D E3A2 -607E 907D -607F D3C1 -6080 907E -6081 9080 -6082 9081 -6083 E3A7 -6084 C7C4 -6085 9082 -6086 9083 -6087 9084 -6088 9085 -6089 CFA4 -608A 9086 -608B 9087 -608C E3A9 -608D BAB7 -608E 9088 -608F 9089 -6090 908A -6091 908B -6092 E3A8 -6093 908C -6094 BBDA -6095 908D -6096 E3A3 -6097 908E -6098 908F -6099 9090 -609A E3A4 -609B E3AA -609C 9091 -609D E3A6 -609E 9092 -609F CEF2 -60A0 D3C6 -60A1 9093 -60A2 9094 -60A3 BBBC -60A4 9095 -60A5 9096 -60A6 D4C3 -60A7 9097 -60A8 C4FA -60A9 9098 -60AA 9099 -60AB EDA8 -60AC D0FC -60AD E3A5 -60AE 909A -60AF C3F5 -60B0 909B -60B1 E3AD -60B2 B1AF -60B3 909C -60B4 E3B2 -60B5 909D -60B6 909E -60B7 909F -60B8 BCC2 -60B9 90A0 -60BA 90A1 -60BB E3AC -60BC B5BF -60BD 90A2 -60BE 90A3 -60BF 90A4 -60C0 90A5 -60C1 90A6 -60C2 90A7 -60C3 90A8 -60C4 90A9 -60C5 C7E9 -60C6 E3B0 -60C7 90AA -60C8 90AB -60C9 90AC -60CA BEAA -60CB CDEF -60CC 90AD -60CD 90AE -60CE 90AF -60CF 90B0 -60D0 90B1 -60D1 BBF3 -60D2 90B2 -60D3 90B3 -60D4 90B4 -60D5 CCE8 -60D6 90B5 -60D7 90B6 -60D8 E3AF -60D9 90B7 -60DA E3B1 -60DB 90B8 -60DC CFA7 -60DD E3AE -60DE 90B9 -60DF CEA9 -60E0 BBDD -60E1 90BA -60E2 90BB -60E3 90BC -60E4 90BD -60E5 90BE -60E6 B5EB -60E7 BEE5 -60E8 B2D2 -60E9 B3CD -60EA 90BF -60EB B1B9 -60EC E3AB -60ED B2D1 -60EE B5AC -60EF B9DF -60F0 B6E8 -60F1 90C0 -60F2 90C1 -60F3 CFEB -60F4 E3B7 -60F5 90C2 -60F6 BBCC -60F7 90C3 -60F8 90C4 -60F9 C8C7 -60FA D0CA -60FB 90C5 -60FC 90C6 -60FD 90C7 -60FE 90C8 -60FF 90C9 -6100 E3B8 -6101 B3EE -6102 90CA -6103 90CB -6104 90CC -6105 90CD -6106 EDA9 -6107 90CE -6108 D3FA -6109 D3E4 -610A 90CF -610B 90D0 -610C 90D1 -610D EDAA -610E E3B9 -610F D2E2 -6110 90D2 -6111 90D3 -6112 90D4 -6113 90D5 -6114 90D6 -6115 E3B5 -6116 90D7 -6117 90D8 -6118 90D9 -6119 90DA -611A D3DE -611B 90DB -611C 90DC -611D 90DD -611E 90DE -611F B8D0 -6120 E3B3 -6121 90DF -6122 90E0 -6123 E3B6 -6124 B7DF -6125 90E1 -6126 E3B4 -6127 C0A2 -6128 90E2 -6129 90E3 -612A 90E4 -612B E3BA -612C 90E5 -612D 90E6 -612E 90E7 -612F 90E8 -6130 90E9 -6131 90EA -6132 90EB -6133 90EC -6134 90ED -6135 90EE -6136 90EF -6137 90F0 -6138 90F1 -6139 90F2 -613A 90F3 -613B 90F4 -613C 90F5 -613D 90F6 -613E 90F7 -613F D4B8 -6140 90F8 -6141 90F9 -6142 90FA -6143 90FB -6144 90FC -6145 90FD -6146 90FE -6147 9140 -6148 B4C8 -6149 9141 -614A E3BB -614B 9142 -614C BBC5 -614D 9143 -614E C9F7 -614F 9144 -6150 9145 -6151 C9E5 -6152 9146 -6153 9147 -6154 9148 -6155 C4BD -6156 9149 -6157 914A -6158 914B -6159 914C -615A 914D -615B 914E -615C 914F -615D EDAB -615E 9150 -615F 9151 -6160 9152 -6161 9153 -6162 C2FD -6163 9154 -6164 9155 -6165 9156 -6166 9157 -6167 BBDB -6168 BFAE -6169 9158 -616A 9159 -616B 915A -616C 915B -616D 915C -616E 915D -616F 915E -6170 CEBF -6171 915F -6172 9160 -6173 9161 -6174 9162 -6175 E3BC -6176 9163 -6177 BFB6 -6178 9164 -6179 9165 -617A 9166 -617B 9167 -617C 9168 -617D 9169 -617E 916A -617F 916B -6180 916C -6181 916D -6182 916E -6183 916F -6184 9170 -6185 9171 -6186 9172 -6187 9173 -6188 9174 -6189 9175 -618A 9176 -618B B1EF -618C 9177 -618D 9178 -618E D4F7 -618F 9179 -6190 917A -6191 917B -6192 917C -6193 917D -6194 E3BE -6195 917E -6196 9180 -6197 9181 -6198 9182 -6199 9183 -619A 9184 -619B 9185 -619C 9186 -619D EDAD -619E 9187 -619F 9188 -61A0 9189 -61A1 918A -61A2 918B -61A3 918C -61A4 918D -61A5 918E -61A6 918F -61A7 E3BF -61A8 BAA9 -61A9 EDAC -61AA 9190 -61AB 9191 -61AC E3BD -61AD 9192 -61AE 9193 -61AF 9194 -61B0 9195 -61B1 9196 -61B2 9197 -61B3 9198 -61B4 9199 -61B5 919A -61B6 919B -61B7 E3C0 -61B8 919C -61B9 919D -61BA 919E -61BB 919F -61BC 91A0 -61BD 91A1 -61BE BAB6 -61BF 91A2 -61C0 91A3 -61C1 91A4 -61C2 B6AE -61C3 91A5 -61C4 91A6 -61C5 91A7 -61C6 91A8 -61C7 91A9 -61C8 D0B8 -61C9 91AA -61CA B0C3 -61CB EDAE -61CC 91AB -61CD 91AC -61CE 91AD -61CF 91AE -61D0 91AF -61D1 EDAF -61D2 C0C1 -61D3 91B0 -61D4 E3C1 -61D5 91B1 -61D6 91B2 -61D7 91B3 -61D8 91B4 -61D9 91B5 -61DA 91B6 -61DB 91B7 -61DC 91B8 -61DD 91B9 -61DE 91BA -61DF 91BB -61E0 91BC -61E1 91BD -61E2 91BE -61E3 91BF -61E4 91C0 -61E5 91C1 -61E6 C5B3 -61E7 91C2 -61E8 91C3 -61E9 91C4 -61EA 91C5 -61EB 91C6 -61EC 91C7 -61ED 91C8 -61EE 91C9 -61EF 91CA -61F0 91CB -61F1 91CC -61F2 91CD -61F3 91CE -61F4 91CF -61F5 E3C2 -61F6 91D0 -61F7 91D1 -61F8 91D2 -61F9 91D3 -61FA 91D4 -61FB 91D5 -61FC 91D6 -61FD 91D7 -61FE 91D8 -61FF DCB2 -6200 91D9 -6201 91DA -6202 91DB -6203 91DC -6204 91DD -6205 91DE -6206 EDB0 -6207 91DF -6208 B8EA -6209 91E0 -620A CEEC -620B EAA7 -620C D0E7 -620D CAF9 -620E C8D6 -620F CFB7 -6210 B3C9 -6211 CED2 -6212 BDE4 -6213 91E1 -6214 91E2 -6215 E3DE -6216 BBF2 -6217 EAA8 -6218 D5BD -6219 91E3 -621A C6DD -621B EAA9 -621C 91E4 -621D 91E5 -621E 91E6 -621F EAAA -6220 91E7 -6221 EAAC -6222 EAAB -6223 91E8 -6224 EAAE -6225 EAAD -6226 91E9 -6227 91EA -6228 91EB -6229 91EC -622A BDD8 -622B 91ED -622C EAAF -622D 91EE -622E C2BE -622F 91EF -6230 91F0 -6231 91F1 -6232 91F2 -6233 B4C1 -6234 B4F7 -6235 91F3 -6236 91F4 -6237 BBA7 -6238 91F5 -6239 91F6 -623A 91F7 -623B 91F8 -623C 91F9 -623D ECE6 -623E ECE5 -623F B7BF -6240 CBF9 -6241 B1E2 -6242 91FA -6243 ECE7 -6244 91FB -6245 91FC -6246 91FD -6247 C9C8 -6248 ECE8 -6249 ECE9 -624A 91FE -624B CAD6 -624C DED0 -624D B2C5 -624E D4FA -624F 9240 -6250 9241 -6251 C6CB -6252 B0C7 -6253 B4F2 -6254 C8D3 -6255 9242 -6256 9243 -6257 9244 -6258 CDD0 -6259 9245 -625A 9246 -625B BFB8 -625C 9247 -625D 9248 -625E 9249 -625F 924A -6260 924B -6261 924C -6262 924D -6263 BFDB -6264 924E -6265 924F -6266 C7A4 -6267 D6B4 -6268 9250 -6269 C0A9 -626A DED1 -626B C9A8 -626C D1EF -626D C5A4 -626E B0E7 -626F B3B6 -6270 C8C5 -6271 9251 -6272 9252 -6273 B0E2 -6274 9253 -6275 9254 -6276 B7F6 -6277 9255 -6278 9256 -6279 C5FA -627A 9257 -627B 9258 -627C B6F3 -627D 9259 -627E D5D2 -627F B3D0 -6280 BCBC -6281 925A -6282 925B -6283 925C -6284 B3AD -6285 925D -6286 925E -6287 925F -6288 9260 -6289 BEF1 -628A B0D1 -628B 9261 -628C 9262 -628D 9263 -628E 9264 -628F 9265 -6290 9266 -6291 D2D6 -6292 CAE3 -6293 D7A5 -6294 9267 -6295 CDB6 -6296 B6B6 -6297 BFB9 -6298 D5DB -6299 9268 -629A B8A7 -629B C5D7 -629C 9269 -629D 926A -629E 926B -629F DED2 -62A0 BFD9 -62A1 C2D5 -62A2 C7C0 -62A3 926C -62A4 BBA4 -62A5 B1A8 -62A6 926D -62A7 926E -62A8 C5EA -62A9 926F -62AA 9270 -62AB C5FB -62AC CCA7 -62AD 9271 -62AE 9272 -62AF 9273 -62B0 9274 -62B1 B1A7 -62B2 9275 -62B3 9276 -62B4 9277 -62B5 B5D6 -62B6 9278 -62B7 9279 -62B8 927A -62B9 C4A8 -62BA 927B -62BB DED3 -62BC D1BA -62BD B3E9 -62BE 927C -62BF C3F2 -62C0 927D -62C1 927E -62C2 B7F7 -62C3 9280 -62C4 D6F4 -62C5 B5A3 -62C6 B2F0 -62C7 C4B4 -62C8 C4E9 -62C9 C0AD -62CA DED4 -62CB 9281 -62CC B0E8 -62CD C5C4 -62CE C1E0 -62CF 9282 -62D0 B9D5 -62D1 9283 -62D2 BEDC -62D3 CDD8 -62D4 B0CE -62D5 9284 -62D6 CDCF -62D7 DED6 -62D8 BED0 -62D9 D7BE -62DA DED5 -62DB D5D0 -62DC B0DD -62DD 9285 -62DE 9286 -62DF C4E2 -62E0 9287 -62E1 9288 -62E2 C2A3 -62E3 BCF0 -62E4 9289 -62E5 D3B5 -62E6 C0B9 -62E7 C5A1 -62E8 B2A6 -62E9 D4F1 -62EA 928A -62EB 928B -62EC C0A8 -62ED CAC3 -62EE DED7 -62EF D5FC -62F0 928C -62F1 B9B0 -62F2 928D -62F3 C8AD -62F4 CBA9 -62F5 928E -62F6 DED9 -62F7 BFBD -62F8 928F -62F9 9290 -62FA 9291 -62FB 9292 -62FC C6B4 -62FD D7A7 -62FE CAB0 -62FF C4C3 -6300 9293 -6301 B3D6 -6302 B9D2 -6303 9294 -6304 9295 -6305 9296 -6306 9297 -6307 D6B8 -6308 EAFC -6309 B0B4 -630A 9298 -630B 9299 -630C 929A -630D 929B -630E BFE6 -630F 929C -6310 929D -6311 CCF4 -6312 929E -6313 929F -6314 92A0 -6315 92A1 -6316 CDDA -6317 92A2 -6318 92A3 -6319 92A4 -631A D6BF -631B C2CE -631C 92A5 -631D CECE -631E CCA2 -631F D0AE -6320 C4D3 -6321 B5B2 -6322 DED8 -6323 D5F5 -6324 BCB7 -6325 BBD3 -6326 92A6 -6327 92A7 -6328 B0A4 -6329 92A8 -632A C5B2 -632B B4EC -632C 92A9 -632D 92AA -632E 92AB -632F D5F1 -6330 92AC -6331 92AD -6332 EAFD -6333 92AE -6334 92AF -6335 92B0 -6336 92B1 -6337 92B2 -6338 92B3 -6339 DEDA -633A CDA6 -633B 92B4 -633C 92B5 -633D CDEC -633E 92B6 -633F 92B7 -6340 92B8 -6341 92B9 -6342 CEE6 -6343 DEDC -6344 92BA -6345 CDB1 -6346 C0A6 -6347 92BB -6348 92BC -6349 D7BD -634A 92BD -634B DEDB -634C B0C6 -634D BAB4 -634E C9D3 -634F C4F3 -6350 BEE8 -6351 92BE -6352 92BF -6353 92C0 -6354 92C1 -6355 B2B6 -6356 92C2 -6357 92C3 -6358 92C4 -6359 92C5 -635A 92C6 -635B 92C7 -635C 92C8 -635D 92C9 -635E C0CC -635F CBF0 -6360 92CA -6361 BCF1 -6362 BBBB -6363 B5B7 -6364 92CB -6365 92CC -6366 92CD -6367 C5F5 -6368 92CE -6369 DEE6 -636A 92CF -636B 92D0 -636C 92D1 -636D DEE3 -636E BEDD -636F 92D2 -6370 92D3 -6371 DEDF -6372 92D4 -6373 92D5 -6374 92D6 -6375 92D7 -6376 B4B7 -6377 BDDD -6378 92D8 -6379 92D9 -637A DEE0 -637B C4ED -637C 92DA -637D 92DB -637E 92DC -637F 92DD -6380 CFC6 -6381 92DE -6382 B5E0 -6383 92DF -6384 92E0 -6385 92E1 -6386 92E2 -6387 B6DE -6388 CADA -6389 B5F4 -638A DEE5 -638B 92E3 -638C D5C6 -638D 92E4 -638E DEE1 -638F CCCD -6390 C6FE -6391 92E5 -6392 C5C5 -6393 92E6 -6394 92E7 -6395 92E8 -6396 D2B4 -6397 92E9 -6398 BEF2 -6399 92EA -639A 92EB -639B 92EC -639C 92ED -639D 92EE -639E 92EF -639F 92F0 -63A0 C2D3 -63A1 92F1 -63A2 CCBD -63A3 B3B8 -63A4 92F2 -63A5 BDD3 -63A6 92F3 -63A7 BFD8 -63A8 CDC6 -63A9 D1DA -63AA B4EB -63AB 92F4 -63AC DEE4 -63AD DEDD -63AE DEE7 -63AF 92F5 -63B0 EAFE -63B1 92F6 -63B2 92F7 -63B3 C2B0 -63B4 DEE2 -63B5 92F8 -63B6 92F9 -63B7 D6C0 -63B8 B5A7 -63B9 92FA -63BA B2F4 -63BB 92FB -63BC DEE8 -63BD 92FC -63BE DEF2 -63BF 92FD -63C0 92FE -63C1 9340 -63C2 9341 -63C3 9342 -63C4 DEED -63C5 9343 -63C6 DEF1 -63C7 9344 -63C8 9345 -63C9 C8E0 -63CA 9346 -63CB 9347 -63CC 9348 -63CD D7E1 -63CE DEEF -63CF C3E8 -63D0 CCE1 -63D1 9349 -63D2 B2E5 -63D3 934A -63D4 934B -63D5 934C -63D6 D2BE -63D7 934D -63D8 934E -63D9 934F -63DA 9350 -63DB 9351 -63DC 9352 -63DD 9353 -63DE DEEE -63DF 9354 -63E0 DEEB -63E1 CED5 -63E2 9355 -63E3 B4A7 -63E4 9356 -63E5 9357 -63E6 9358 -63E7 9359 -63E8 935A -63E9 BFAB -63EA BEBE -63EB 935B -63EC 935C -63ED BDD2 -63EE 935D -63EF 935E -63F0 935F -63F1 9360 -63F2 DEE9 -63F3 9361 -63F4 D4AE -63F5 9362 -63F6 DEDE -63F7 9363 -63F8 DEEA -63F9 9364 -63FA 9365 -63FB 9366 -63FC 9367 -63FD C0BF -63FE 9368 -63FF DEEC -6400 B2F3 -6401 B8E9 -6402 C2A7 -6403 9369 -6404 936A -6405 BDC1 -6406 936B -6407 936C -6408 936D -6409 936E -640A 936F -640B DEF5 -640C DEF8 -640D 9370 -640E 9371 -640F B2AB -6410 B4A4 -6411 9372 -6412 9373 -6413 B4EA -6414 C9A6 -6415 9374 -6416 9375 -6417 9376 -6418 9377 -6419 9378 -641A 9379 -641B DEF6 -641C CBD1 -641D 937A -641E B8E3 -641F 937B -6420 DEF7 -6421 DEFA -6422 937C -6423 937D -6424 937E -6425 9380 -6426 DEF9 -6427 9381 -6428 9382 -6429 9383 -642A CCC2 -642B 9384 -642C B0E1 -642D B4EE -642E 9385 -642F 9386 -6430 9387 -6431 9388 -6432 9389 -6433 938A -6434 E5BA -6435 938B -6436 938C -6437 938D -6438 938E -6439 938F -643A D0AF -643B 9390 -643C 9391 -643D B2EB -643E 9392 -643F EBA1 -6440 9393 -6441 DEF4 -6442 9394 -6443 9395 -6444 C9E3 -6445 DEF3 -6446 B0DA -6447 D2A1 -6448 B1F7 -6449 9396 -644A CCAF -644B 9397 -644C 9398 -644D 9399 -644E 939A -644F 939B -6450 939C -6451 939D -6452 DEF0 -6453 939E -6454 CBA4 -6455 939F -6456 93A0 -6457 93A1 -6458 D5AA -6459 93A2 -645A 93A3 -645B 93A4 -645C 93A5 -645D 93A6 -645E DEFB -645F 93A7 -6460 93A8 -6461 93A9 -6462 93AA -6463 93AB -6464 93AC -6465 93AD -6466 93AE -6467 B4DD -6468 93AF -6469 C4A6 -646A 93B0 -646B 93B1 -646C 93B2 -646D DEFD -646E 93B3 -646F 93B4 -6470 93B5 -6471 93B6 -6472 93B7 -6473 93B8 -6474 93B9 -6475 93BA -6476 93BB -6477 93BC -6478 C3FE -6479 C4A1 -647A DFA1 -647B 93BD -647C 93BE -647D 93BF -647E 93C0 -647F 93C1 -6480 93C2 -6481 93C3 -6482 C1CC -6483 93C4 -6484 DEFC -6485 BEEF -6486 93C5 -6487 C6B2 -6488 93C6 -6489 93C7 -648A 93C8 -648B 93C9 -648C 93CA -648D 93CB -648E 93CC -648F 93CD -6490 93CE -6491 B3C5 -6492 C8F6 -6493 93CF -6494 93D0 -6495 CBBA -6496 DEFE -6497 93D1 -6498 93D2 -6499 DFA4 -649A 93D3 -649B 93D4 -649C 93D5 -649D 93D6 -649E D7B2 -649F 93D7 -64A0 93D8 -64A1 93D9 -64A2 93DA -64A3 93DB -64A4 B3B7 -64A5 93DC -64A6 93DD -64A7 93DE -64A8 93DF -64A9 C1C3 -64AA 93E0 -64AB 93E1 -64AC C7CB -64AD B2A5 -64AE B4E9 -64AF 93E2 -64B0 D7AB -64B1 93E3 -64B2 93E4 -64B3 93E5 -64B4 93E6 -64B5 C4EC -64B6 93E7 -64B7 DFA2 -64B8 DFA3 -64B9 93E8 -64BA DFA5 -64BB 93E9 -64BC BAB3 -64BD 93EA -64BE 93EB -64BF 93EC -64C0 DFA6 -64C1 93ED -64C2 C0DE -64C3 93EE -64C4 93EF -64C5 C9C3 -64C6 93F0 -64C7 93F1 -64C8 93F2 -64C9 93F3 -64CA 93F4 -64CB 93F5 -64CC 93F6 -64CD B2D9 -64CE C7E6 -64CF 93F7 -64D0 DFA7 -64D1 93F8 -64D2 C7DC -64D3 93F9 -64D4 93FA -64D5 93FB -64D6 93FC -64D7 DFA8 -64D8 EBA2 -64D9 93FD -64DA 93FE -64DB 9440 -64DC 9441 -64DD 9442 -64DE CBD3 -64DF 9443 -64E0 9444 -64E1 9445 -64E2 DFAA -64E3 9446 -64E4 DFA9 -64E5 9447 -64E6 B2C1 -64E7 9448 -64E8 9449 -64E9 944A -64EA 944B -64EB 944C -64EC 944D -64ED 944E -64EE 944F -64EF 9450 -64F0 9451 -64F1 9452 -64F2 9453 -64F3 9454 -64F4 9455 -64F5 9456 -64F6 9457 -64F7 9458 -64F8 9459 -64F9 945A -64FA 945B -64FB 945C -64FC 945D -64FD 945E -64FE 945F -64FF 9460 -6500 C5CA -6501 9461 -6502 9462 -6503 9463 -6504 9464 -6505 9465 -6506 9466 -6507 9467 -6508 9468 -6509 DFAB -650A 9469 -650B 946A -650C 946B -650D 946C -650E 946D -650F 946E -6510 946F -6511 9470 -6512 D4DC -6513 9471 -6514 9472 -6515 9473 -6516 9474 -6517 9475 -6518 C8C1 -6519 9476 -651A 9477 -651B 9478 -651C 9479 -651D 947A -651E 947B -651F 947C -6520 947D -6521 947E -6522 9480 -6523 9481 -6524 9482 -6525 DFAC -6526 9483 -6527 9484 -6528 9485 -6529 9486 -652A 9487 -652B BEF0 -652C 9488 -652D 9489 -652E DFAD -652F D6A7 -6530 948A -6531 948B -6532 948C -6533 948D -6534 EAB7 -6535 EBB6 -6536 CAD5 -6537 948E -6538 D8FC -6539 B8C4 -653A 948F -653B B9A5 -653C 9490 -653D 9491 -653E B7C5 -653F D5FE -6540 9492 -6541 9493 -6542 9494 -6543 9495 -6544 9496 -6545 B9CA -6546 9497 -6547 9498 -6548 D0A7 -6549 F4CD -654A 9499 -654B 949A -654C B5D0 -654D 949B -654E 949C -654F C3F4 -6550 949D -6551 BEC8 -6552 949E -6553 949F -6554 94A0 -6555 EBB7 -6556 B0BD -6557 94A1 -6558 94A2 -6559 BDCC -655A 94A3 -655B C1B2 -655C 94A4 -655D B1D6 -655E B3A8 -655F 94A5 -6560 94A6 -6561 94A7 -6562 B8D2 -6563 C9A2 -6564 94A8 -6565 94A9 -6566 B6D8 -6567 94AA -6568 94AB -6569 94AC -656A 94AD -656B EBB8 -656C BEB4 -656D 94AE -656E 94AF -656F 94B0 -6570 CAFD -6571 94B1 -6572 C7C3 -6573 94B2 -6574 D5FB -6575 94B3 -6576 94B4 -6577 B7F3 -6578 94B5 -6579 94B6 -657A 94B7 -657B 94B8 -657C 94B9 -657D 94BA -657E 94BB -657F 94BC -6580 94BD -6581 94BE -6582 94BF -6583 94C0 -6584 94C1 -6585 94C2 -6586 94C3 -6587 CEC4 -6588 94C4 -6589 94C5 -658A 94C6 -658B D5AB -658C B1F3 -658D 94C7 -658E 94C8 -658F 94C9 -6590 ECB3 -6591 B0DF -6592 94CA -6593 ECB5 -6594 94CB -6595 94CC -6596 94CD -6597 B6B7 -6598 94CE -6599 C1CF -659A 94CF -659B F5FA -659C D0B1 -659D 94D0 -659E 94D1 -659F D5E5 -65A0 94D2 -65A1 CED3 -65A2 94D3 -65A3 94D4 -65A4 BDEF -65A5 B3E2 -65A6 94D5 -65A7 B8AB -65A8 94D6 -65A9 D5B6 -65AA 94D7 -65AB EDBD -65AC 94D8 -65AD B6CF -65AE 94D9 -65AF CBB9 -65B0 D0C2 -65B1 94DA -65B2 94DB -65B3 94DC -65B4 94DD -65B5 94DE -65B6 94DF -65B7 94E0 -65B8 94E1 -65B9 B7BD -65BA 94E2 -65BB 94E3 -65BC ECB6 -65BD CAA9 -65BE 94E4 -65BF 94E5 -65C0 94E6 -65C1 C5D4 -65C2 94E7 -65C3 ECB9 -65C4 ECB8 -65C5 C2C3 -65C6 ECB7 -65C7 94E8 -65C8 94E9 -65C9 94EA -65CA 94EB -65CB D0FD -65CC ECBA -65CD 94EC -65CE ECBB -65CF D7E5 -65D0 94ED -65D1 94EE -65D2 ECBC -65D3 94EF -65D4 94F0 -65D5 94F1 -65D6 ECBD -65D7 C6EC -65D8 94F2 -65D9 94F3 -65DA 94F4 -65DB 94F5 -65DC 94F6 -65DD 94F7 -65DE 94F8 -65DF 94F9 -65E0 CEDE -65E1 94FA -65E2 BCC8 -65E3 94FB -65E4 94FC -65E5 C8D5 -65E6 B5A9 -65E7 BEC9 -65E8 D6BC -65E9 D4E7 -65EA 94FD -65EB 94FE -65EC D1AE -65ED D0F1 -65EE EAB8 -65EF EAB9 -65F0 EABA -65F1 BAB5 -65F2 9540 -65F3 9541 -65F4 9542 -65F5 9543 -65F6 CAB1 -65F7 BFF5 -65F8 9544 -65F9 9545 -65FA CDFA -65FB 9546 -65FC 9547 -65FD 9548 -65FE 9549 -65FF 954A -6600 EAC0 -6601 954B -6602 B0BA -6603 EABE -6604 954C -6605 954D -6606 C0A5 -6607 954E -6608 954F -6609 9550 -660A EABB -660B 9551 -660C B2FD -660D 9552 -660E C3F7 -660F BBE8 -6610 9553 -6611 9554 -6612 9555 -6613 D2D7 -6614 CEF4 -6615 EABF -6616 9556 -6617 9557 -6618 9558 -6619 EABC -661A 9559 -661B 955A -661C 955B -661D EAC3 -661E 955C -661F D0C7 -6620 D3B3 -6621 955D -6622 955E -6623 955F -6624 9560 -6625 B4BA -6626 9561 -6627 C3C1 -6628 D7F2 -6629 9562 -662A 9563 -662B 9564 -662C 9565 -662D D5D1 -662E 9566 -662F CAC7 -6630 9567 -6631 EAC5 -6632 9568 -6633 9569 -6634 EAC4 -6635 EAC7 -6636 EAC6 -6637 956A -6638 956B -6639 956C -663A 956D -663B 956E -663C D6E7 -663D 956F -663E CFD4 -663F 9570 -6640 9571 -6641 EACB -6642 9572 -6643 BBCE -6644 9573 -6645 9574 -6646 9575 -6647 9576 -6648 9577 -6649 9578 -664A 9579 -664B BDFA -664C C9CE -664D 957A -664E 957B -664F EACC -6650 957C -6651 957D -6652 C9B9 -6653 CFFE -6654 EACA -6655 D4CE -6656 EACD -6657 EACF -6658 957E -6659 9580 -665A CDED -665B 9581 -665C 9582 -665D 9583 -665E 9584 -665F EAC9 -6660 9585 -6661 EACE -6662 9586 -6663 9587 -6664 CEEE -6665 9588 -6666 BBDE -6667 9589 -6668 B3BF -6669 958A -666A 958B -666B 958C -666C 958D -666D 958E -666E C6D5 -666F BEB0 -6670 CEFA -6671 958F -6672 9590 -6673 9591 -6674 C7E7 -6675 9592 -6676 BEA7 -6677 EAD0 -6678 9593 -6679 9594 -667A D6C7 -667B 9595 -667C 9596 -667D 9597 -667E C1C0 -667F 9598 -6680 9599 -6681 959A -6682 D4DD -6683 959B -6684 EAD1 -6685 959C -6686 959D -6687 CFBE -6688 959E -6689 959F -668A 95A0 -668B 95A1 -668C EAD2 -668D 95A2 -668E 95A3 -668F 95A4 -6690 95A5 -6691 CAEE -6692 95A6 -6693 95A7 -6694 95A8 -6695 95A9 -6696 C5AF -6697 B0B5 -6698 95AA -6699 95AB -669A 95AC -669B 95AD -669C 95AE -669D EAD4 -669E 95AF -669F 95B0 -66A0 95B1 -66A1 95B2 -66A2 95B3 -66A3 95B4 -66A4 95B5 -66A5 95B6 -66A6 95B7 -66A7 EAD3 -66A8 F4DF -66A9 95B8 -66AA 95B9 -66AB 95BA -66AC 95BB -66AD 95BC -66AE C4BA -66AF 95BD -66B0 95BE -66B1 95BF -66B2 95C0 -66B3 95C1 -66B4 B1A9 -66B5 95C2 -66B6 95C3 -66B7 95C4 -66B8 95C5 -66B9 E5DF -66BA 95C6 -66BB 95C7 -66BC 95C8 -66BD 95C9 -66BE EAD5 -66BF 95CA -66C0 95CB -66C1 95CC -66C2 95CD -66C3 95CE -66C4 95CF -66C5 95D0 -66C6 95D1 -66C7 95D2 -66C8 95D3 -66C9 95D4 -66CA 95D5 -66CB 95D6 -66CC 95D7 -66CD 95D8 -66CE 95D9 -66CF 95DA -66D0 95DB -66D1 95DC -66D2 95DD -66D3 95DE -66D4 95DF -66D5 95E0 -66D6 95E1 -66D7 95E2 -66D8 95E3 -66D9 CAEF -66DA 95E4 -66DB EAD6 -66DC EAD7 -66DD C6D8 -66DE 95E5 -66DF 95E6 -66E0 95E7 -66E1 95E8 -66E2 95E9 -66E3 95EA -66E4 95EB -66E5 95EC -66E6 EAD8 -66E7 95ED -66E8 95EE -66E9 EAD9 -66EA 95EF -66EB 95F0 -66EC 95F1 -66ED 95F2 -66EE 95F3 -66EF 95F4 -66F0 D4BB -66F1 95F5 -66F2 C7FA -66F3 D2B7 -66F4 B8FC -66F5 95F6 -66F6 95F7 -66F7 EAC2 -66F8 95F8 -66F9 B2DC -66FA 95F9 -66FB 95FA -66FC C2FC -66FD 95FB -66FE D4F8 -66FF CCE6 -6700 D7EE -6701 95FC -6702 95FD -6703 95FE -6704 9640 -6705 9641 -6706 9642 -6707 9643 -6708 D4C2 -6709 D3D0 -670A EBC3 -670B C5F3 -670C 9644 -670D B7FE -670E 9645 -670F 9646 -6710 EBD4 -6711 9647 -6712 9648 -6713 9649 -6714 CBB7 -6715 EBDE -6716 964A -6717 C0CA -6718 964B -6719 964C -671A 964D -671B CDFB -671C 964E -671D B3AF -671E 964F -671F C6DA -6720 9650 -6721 9651 -6722 9652 -6723 9653 -6724 9654 -6725 9655 -6726 EBFC -6727 9656 -6728 C4BE -6729 9657 -672A CEB4 -672B C4A9 -672C B1BE -672D D4FD -672E 9658 -672F CAF5 -6730 9659 -6731 D6EC -6732 965A -6733 965B -6734 C6D3 -6735 B6E4 -6736 965C -6737 965D -6738 965E -6739 965F -673A BBFA -673B 9660 -673C 9661 -673D D0E0 -673E 9662 -673F 9663 -6740 C9B1 -6741 9664 -6742 D4D3 -6743 C8A8 -6744 9665 -6745 9666 -6746 B8CB -6747 9667 -6748 E8BE -6749 C9BC -674A 9668 -674B 9669 -674C E8BB -674D 966A -674E C0EE -674F D0D3 -6750 B2C4 -6751 B4E5 -6752 966B -6753 E8BC -6754 966C -6755 966D -6756 D5C8 -6757 966E -6758 966F -6759 9670 -675A 9671 -675B 9672 -675C B6C5 -675D 9673 -675E E8BD -675F CAF8 -6760 B8DC -6761 CCF5 -6762 9674 -6763 9675 -6764 9676 -6765 C0B4 -6766 9677 -6767 9678 -6768 D1EE -6769 E8BF -676A E8C2 -676B 9679 -676C 967A -676D BABC -676E 967B -676F B1AD -6770 BDDC -6771 967C -6772 EABD -6773 E8C3 -6774 967D -6775 E8C6 -6776 967E -6777 E8CB -6778 9680 -6779 9681 -677A 9682 -677B 9683 -677C E8CC -677D 9684 -677E CBC9 -677F B0E5 -6780 9685 -6781 BCAB -6782 9686 -6783 9687 -6784 B9B9 -6785 9688 -6786 9689 -6787 E8C1 -6788 968A -6789 CDF7 -678A 968B -678B E8CA -678C 968C -678D 968D -678E 968E -678F 968F -6790 CEF6 -6791 9690 -6792 9691 -6793 9692 -6794 9693 -6795 D5ED -6796 9694 -6797 C1D6 -6798 E8C4 -6799 9695 -679A C3B6 -679B 9696 -679C B9FB -679D D6A6 -679E E8C8 -679F 9697 -67A0 9698 -67A1 9699 -67A2 CAE0 -67A3 D4E6 -67A4 969A -67A5 E8C0 -67A6 969B -67A7 E8C5 -67A8 E8C7 -67A9 969C -67AA C7B9 -67AB B7E3 -67AC 969D -67AD E8C9 -67AE 969E -67AF BFDD -67B0 E8D2 -67B1 969F -67B2 96A0 -67B3 E8D7 -67B4 96A1 -67B5 E8D5 -67B6 BCDC -67B7 BCCF -67B8 E8DB -67B9 96A2 -67BA 96A3 -67BB 96A4 -67BC 96A5 -67BD 96A6 -67BE 96A7 -67BF 96A8 -67C0 96A9 -67C1 E8DE -67C2 96AA -67C3 E8DA -67C4 B1FA -67C5 96AB -67C6 96AC -67C7 96AD -67C8 96AE -67C9 96AF -67CA 96B0 -67CB 96B1 -67CC 96B2 -67CD 96B3 -67CE 96B4 -67CF B0D8 -67D0 C4B3 -67D1 B8CC -67D2 C6E2 -67D3 C8BE -67D4 C8E1 -67D5 96B5 -67D6 96B6 -67D7 96B7 -67D8 E8CF -67D9 E8D4 -67DA E8D6 -67DB 96B8 -67DC B9F1 -67DD E8D8 -67DE D7F5 -67DF 96B9 -67E0 C4FB -67E1 96BA -67E2 E8DC -67E3 96BB -67E4 96BC -67E5 B2E9 -67E6 96BD -67E7 96BE -67E8 96BF -67E9 E8D1 -67EA 96C0 -67EB 96C1 -67EC BCED -67ED 96C2 -67EE 96C3 -67EF BFC2 -67F0 E8CD -67F1 D6F9 -67F2 96C4 -67F3 C1F8 -67F4 B2F1 -67F5 96C5 -67F6 96C6 -67F7 96C7 -67F8 96C8 -67F9 96C9 -67FA 96CA -67FB 96CB -67FC 96CC -67FD E8DF -67FE 96CD -67FF CAC1 -6800 E8D9 -6801 96CE -6802 96CF -6803 96D0 -6804 96D1 -6805 D5A4 -6806 96D2 -6807 B1EA -6808 D5BB -6809 E8CE -680A E8D0 -680B B6B0 -680C E8D3 -680D 96D3 -680E E8DD -680F C0B8 -6810 96D4 -6811 CAF7 -6812 96D5 -6813 CBA8 -6814 96D6 -6815 96D7 -6816 C6DC -6817 C0F5 -6818 96D8 -6819 96D9 -681A 96DA -681B 96DB -681C 96DC -681D E8E9 -681E 96DD -681F 96DE -6820 96DF -6821 D0A3 -6822 96E0 -6823 96E1 -6824 96E2 -6825 96E3 -6826 96E4 -6827 96E5 -6828 96E6 -6829 E8F2 -682A D6EA -682B 96E7 -682C 96E8 -682D 96E9 -682E 96EA -682F 96EB -6830 96EC -6831 96ED -6832 E8E0 -6833 E8E1 -6834 96EE -6835 96EF -6836 96F0 -6837 D1F9 -6838 BACB -6839 B8F9 -683A 96F1 -683B 96F2 -683C B8F1 -683D D4D4 -683E E8EF -683F 96F3 -6840 E8EE -6841 E8EC -6842 B9F0 -6843 CCD2 -6844 E8E6 -6845 CEA6 -6846 BFF2 -6847 96F4 -6848 B0B8 -6849 E8F1 -684A E8F0 -684B 96F5 -684C D7C0 -684D 96F6 -684E E8E4 -684F 96F7 -6850 CDA9 -6851 C9A3 -6852 96F8 -6853 BBB8 -6854 BDDB -6855 E8EA -6856 96F9 -6857 96FA -6858 96FB -6859 96FC -685A 96FD -685B 96FE -685C 9740 -685D 9741 -685E 9742 -685F 9743 -6860 E8E2 -6861 E8E3 -6862 E8E5 -6863 B5B5 -6864 E8E7 -6865 C7C5 -6866 E8EB -6867 E8ED -6868 BDB0 -6869 D7AE -686A 9744 -686B E8F8 -686C 9745 -686D 9746 -686E 9747 -686F 9748 -6870 9749 -6871 974A -6872 974B -6873 974C -6874 E8F5 -6875 974D -6876 CDB0 -6877 E8F6 -6878 974E -6879 974F -687A 9750 -687B 9751 -687C 9752 -687D 9753 -687E 9754 -687F 9755 -6880 9756 -6881 C1BA -6882 9757 -6883 E8E8 -6884 9758 -6885 C3B7 -6886 B0F0 -6887 9759 -6888 975A -6889 975B -688A 975C -688B 975D -688C 975E -688D 975F -688E 9760 -688F E8F4 -6890 9761 -6891 9762 -6892 9763 -6893 E8F7 -6894 9764 -6895 9765 -6896 9766 -6897 B9A3 -6898 9767 -6899 9768 -689A 9769 -689B 976A -689C 976B -689D 976C -689E 976D -689F 976E -68A0 976F -68A1 9770 -68A2 C9D2 -68A3 9771 -68A4 9772 -68A5 9773 -68A6 C3CE -68A7 CEE0 -68A8 C0E6 -68A9 9774 -68AA 9775 -68AB 9776 -68AC 9777 -68AD CBF3 -68AE 9778 -68AF CCDD -68B0 D0B5 -68B1 9779 -68B2 977A -68B3 CAE1 -68B4 977B -68B5 E8F3 -68B6 977C -68B7 977D -68B8 977E -68B9 9780 -68BA 9781 -68BB 9782 -68BC 9783 -68BD 9784 -68BE 9785 -68BF 9786 -68C0 BCEC -68C1 9787 -68C2 E8F9 -68C3 9788 -68C4 9789 -68C5 978A -68C6 978B -68C7 978C -68C8 978D -68C9 C3DE -68CA 978E -68CB C6E5 -68CC 978F -68CD B9F7 -68CE 9790 -68CF 9791 -68D0 9792 -68D1 9793 -68D2 B0F4 -68D3 9794 -68D4 9795 -68D5 D7D8 -68D6 9796 -68D7 9797 -68D8 BCAC -68D9 9798 -68DA C5EF -68DB 9799 -68DC 979A -68DD 979B -68DE 979C -68DF 979D -68E0 CCC4 -68E1 979E -68E2 979F -68E3 E9A6 -68E4 97A0 -68E5 97A1 -68E6 97A2 -68E7 97A3 -68E8 97A4 -68E9 97A5 -68EA 97A6 -68EB 97A7 -68EC 97A8 -68ED 97A9 -68EE C9AD -68EF 97AA -68F0 E9A2 -68F1 C0E2 -68F2 97AB -68F3 97AC -68F4 97AD -68F5 BFC3 -68F6 97AE -68F7 97AF -68F8 97B0 -68F9 E8FE -68FA B9D7 -68FB 97B1 -68FC E8FB -68FD 97B2 -68FE 97B3 -68FF 97B4 -6900 97B5 -6901 E9A4 -6902 97B6 -6903 97B7 -6904 97B8 -6905 D2CE -6906 97B9 -6907 97BA -6908 97BB -6909 97BC -690A 97BD -690B E9A3 -690C 97BE -690D D6B2 -690E D7B5 -690F 97BF -6910 E9A7 -6911 97C0 -6912 BDB7 -6913 97C1 -6914 97C2 -6915 97C3 -6916 97C4 -6917 97C5 -6918 97C6 -6919 97C7 -691A 97C8 -691B 97C9 -691C 97CA -691D 97CB -691E 97CC -691F E8FC -6920 E8FD -6921 97CD -6922 97CE -6923 97CF -6924 E9A1 -6925 97D0 -6926 97D1 -6927 97D2 -6928 97D3 -6929 97D4 -692A 97D5 -692B 97D6 -692C 97D7 -692D CDD6 -692E 97D8 -692F 97D9 -6930 D2AC -6931 97DA -6932 97DB -6933 97DC -6934 E9B2 -6935 97DD -6936 97DE -6937 97DF -6938 97E0 -6939 E9A9 -693A 97E1 -693B 97E2 -693C 97E3 -693D B4AA -693E 97E4 -693F B4BB -6940 97E5 -6941 97E6 -6942 E9AB -6943 97E7 -6944 97E8 -6945 97E9 -6946 97EA -6947 97EB -6948 97EC -6949 97ED -694A 97EE -694B 97EF -694C 97F0 -694D 97F1 -694E 97F2 -694F 97F3 -6950 97F4 -6951 97F5 -6952 97F6 -6953 97F7 -6954 D0A8 -6955 97F8 -6956 97F9 -6957 E9A5 -6958 97FA -6959 97FB -695A B3FE -695B 97FC -695C 97FD -695D E9AC -695E C0E3 -695F 97FE -6960 E9AA -6961 9840 -6962 9841 -6963 E9B9 -6964 9842 -6965 9843 -6966 E9B8 -6967 9844 -6968 9845 -6969 9846 -696A 9847 -696B E9AE -696C 9848 -696D 9849 -696E E8FA -696F 984A -6970 984B -6971 E9A8 -6972 984C -6973 984D -6974 984E -6975 984F -6976 9850 -6977 BFAC -6978 E9B1 -6979 E9BA -697A 9851 -697B 9852 -697C C2A5 -697D 9853 -697E 9854 -697F 9855 -6980 E9AF -6981 9856 -6982 B8C5 -6983 9857 -6984 E9AD -6985 9858 -6986 D3DC -6987 E9B4 -6988 E9B5 -6989 E9B7 -698A 9859 -698B 985A -698C 985B -698D E9C7 -698E 985C -698F 985D -6990 985E -6991 985F -6992 9860 -6993 9861 -6994 C0C6 -6995 E9C5 -6996 9862 -6997 9863 -6998 E9B0 -6999 9864 -699A 9865 -699B E9BB -699C B0F1 -699D 9866 -699E 9867 -699F 9868 -69A0 9869 -69A1 986A -69A2 986B -69A3 986C -69A4 986D -69A5 986E -69A6 986F -69A7 E9BC -69A8 D5A5 -69A9 9870 -69AA 9871 -69AB E9BE -69AC 9872 -69AD E9BF -69AE 9873 -69AF 9874 -69B0 9875 -69B1 E9C1 -69B2 9876 -69B3 9877 -69B4 C1F1 -69B5 9878 -69B6 9879 -69B7 C8B6 -69B8 987A -69B9 987B -69BA 987C -69BB E9BD -69BC 987D -69BD 987E -69BE 9880 -69BF 9881 -69C0 9882 -69C1 E9C2 -69C2 9883 -69C3 9884 -69C4 9885 -69C5 9886 -69C6 9887 -69C7 9888 -69C8 9889 -69C9 988A -69CA E9C3 -69CB 988B -69CC E9B3 -69CD 988C -69CE E9B6 -69CF 988D -69D0 BBB1 -69D1 988E -69D2 988F -69D3 9890 -69D4 E9C0 -69D5 9891 -69D6 9892 -69D7 9893 -69D8 9894 -69D9 9895 -69DA 9896 -69DB BCF7 -69DC 9897 -69DD 9898 -69DE 9899 -69DF E9C4 -69E0 E9C6 -69E1 989A -69E2 989B -69E3 989C -69E4 989D -69E5 989E -69E6 989F -69E7 98A0 -69E8 98A1 -69E9 98A2 -69EA 98A3 -69EB 98A4 -69EC 98A5 -69ED E9CA -69EE 98A6 -69EF 98A7 -69F0 98A8 -69F1 98A9 -69F2 E9CE -69F3 98AA -69F4 98AB -69F5 98AC -69F6 98AD -69F7 98AE -69F8 98AF -69F9 98B0 -69FA 98B1 -69FB 98B2 -69FC 98B3 -69FD B2DB -69FE 98B4 -69FF E9C8 -6A00 98B5 -6A01 98B6 -6A02 98B7 -6A03 98B8 -6A04 98B9 -6A05 98BA -6A06 98BB -6A07 98BC -6A08 98BD -6A09 98BE -6A0A B7AE -6A0B 98BF -6A0C 98C0 -6A0D 98C1 -6A0E 98C2 -6A0F 98C3 -6A10 98C4 -6A11 98C5 -6A12 98C6 -6A13 98C7 -6A14 98C8 -6A15 98C9 -6A16 98CA -6A17 E9CB -6A18 E9CC -6A19 98CB -6A1A 98CC -6A1B 98CD -6A1C 98CE -6A1D 98CF -6A1E 98D0 -6A1F D5C1 -6A20 98D1 -6A21 C4A3 -6A22 98D2 -6A23 98D3 -6A24 98D4 -6A25 98D5 -6A26 98D6 -6A27 98D7 -6A28 E9D8 -6A29 98D8 -6A2A BAE1 -6A2B 98D9 -6A2C 98DA -6A2D 98DB -6A2E 98DC -6A2F E9C9 -6A30 98DD -6A31 D3A3 -6A32 98DE -6A33 98DF -6A34 98E0 -6A35 E9D4 -6A36 98E1 -6A37 98E2 -6A38 98E3 -6A39 98E4 -6A3A 98E5 -6A3B 98E6 -6A3C 98E7 -6A3D E9D7 -6A3E E9D0 -6A3F 98E8 -6A40 98E9 -6A41 98EA -6A42 98EB -6A43 98EC -6A44 E9CF -6A45 98ED -6A46 98EE -6A47 C7C1 -6A48 98EF -6A49 98F0 -6A4A 98F1 -6A4B 98F2 -6A4C 98F3 -6A4D 98F4 -6A4E 98F5 -6A4F 98F6 -6A50 E9D2 -6A51 98F7 -6A52 98F8 -6A53 98F9 -6A54 98FA -6A55 98FB -6A56 98FC -6A57 98FD -6A58 E9D9 -6A59 B3C8 -6A5A 98FE -6A5B E9D3 -6A5C 9940 -6A5D 9941 -6A5E 9942 -6A5F 9943 -6A60 9944 -6A61 CFF0 -6A62 9945 -6A63 9946 -6A64 9947 -6A65 E9CD -6A66 9948 -6A67 9949 -6A68 994A -6A69 994B -6A6A 994C -6A6B 994D -6A6C 994E -6A6D 994F -6A6E 9950 -6A6F 9951 -6A70 9952 -6A71 B3F7 -6A72 9953 -6A73 9954 -6A74 9955 -6A75 9956 -6A76 9957 -6A77 9958 -6A78 9959 -6A79 E9D6 -6A7A 995A -6A7B 995B -6A7C E9DA -6A7D 995C -6A7E 995D -6A7F 995E -6A80 CCB4 -6A81 995F -6A82 9960 -6A83 9961 -6A84 CFAD -6A85 9962 -6A86 9963 -6A87 9964 -6A88 9965 -6A89 9966 -6A8A 9967 -6A8B 9968 -6A8C 9969 -6A8D 996A -6A8E E9D5 -6A8F 996B -6A90 E9DC -6A91 E9DB -6A92 996C -6A93 996D -6A94 996E -6A95 996F -6A96 9970 -6A97 E9DE -6A98 9971 -6A99 9972 -6A9A 9973 -6A9B 9974 -6A9C 9975 -6A9D 9976 -6A9E 9977 -6A9F 9978 -6AA0 E9D1 -6AA1 9979 -6AA2 997A -6AA3 997B -6AA4 997C -6AA5 997D -6AA6 997E -6AA7 9980 -6AA8 9981 -6AA9 E9DD -6AAA 9982 -6AAB E9DF -6AAC C3CA -6AAD 9983 -6AAE 9984 -6AAF 9985 -6AB0 9986 -6AB1 9987 -6AB2 9988 -6AB3 9989 -6AB4 998A -6AB5 998B -6AB6 998C -6AB7 998D -6AB8 998E -6AB9 998F -6ABA 9990 -6ABB 9991 -6ABC 9992 -6ABD 9993 -6ABE 9994 -6ABF 9995 -6AC0 9996 -6AC1 9997 -6AC2 9998 -6AC3 9999 -6AC4 999A -6AC5 999B -6AC6 999C -6AC7 999D -6AC8 999E -6AC9 999F -6ACA 99A0 -6ACB 99A1 -6ACC 99A2 -6ACD 99A3 -6ACE 99A4 -6ACF 99A5 -6AD0 99A6 -6AD1 99A7 -6AD2 99A8 -6AD3 99A9 -6AD4 99AA -6AD5 99AB -6AD6 99AC -6AD7 99AD -6AD8 99AE -6AD9 99AF -6ADA 99B0 -6ADB 99B1 -6ADC 99B2 -6ADD 99B3 -6ADE 99B4 -6ADF 99B5 -6AE0 99B6 -6AE1 99B7 -6AE2 99B8 -6AE3 99B9 -6AE4 99BA -6AE5 99BB -6AE6 99BC -6AE7 99BD -6AE8 99BE -6AE9 99BF -6AEA 99C0 -6AEB 99C1 -6AEC 99C2 -6AED 99C3 -6AEE 99C4 -6AEF 99C5 -6AF0 99C6 -6AF1 99C7 -6AF2 99C8 -6AF3 99C9 -6AF4 99CA -6AF5 99CB -6AF6 99CC -6AF7 99CD -6AF8 99CE -6AF9 99CF -6AFA 99D0 -6AFB 99D1 -6AFC 99D2 -6AFD 99D3 -6AFE 99D4 -6AFF 99D5 -6B00 99D6 -6B01 99D7 -6B02 99D8 -6B03 99D9 -6B04 99DA -6B05 99DB -6B06 99DC -6B07 99DD -6B08 99DE -6B09 99DF -6B0A 99E0 -6B0B 99E1 -6B0C 99E2 -6B0D 99E3 -6B0E 99E4 -6B0F 99E5 -6B10 99E6 -6B11 99E7 -6B12 99E8 -6B13 99E9 -6B14 99EA -6B15 99EB -6B16 99EC -6B17 99ED -6B18 99EE -6B19 99EF -6B1A 99F0 -6B1B 99F1 -6B1C 99F2 -6B1D 99F3 -6B1E 99F4 -6B1F 99F5 -6B20 C7B7 -6B21 B4CE -6B22 BBB6 -6B23 D0C0 -6B24 ECA3 -6B25 99F6 -6B26 99F7 -6B27 C5B7 -6B28 99F8 -6B29 99F9 -6B2A 99FA -6B2B 99FB -6B2C 99FC -6B2D 99FD -6B2E 99FE -6B2F 9A40 -6B30 9A41 -6B31 9A42 -6B32 D3FB -6B33 9A43 -6B34 9A44 -6B35 9A45 -6B36 9A46 -6B37 ECA4 -6B38 9A47 -6B39 ECA5 -6B3A C6DB -6B3B 9A48 -6B3C 9A49 -6B3D 9A4A -6B3E BFEE -6B3F 9A4B -6B40 9A4C -6B41 9A4D -6B42 9A4E -6B43 ECA6 -6B44 9A4F -6B45 9A50 -6B46 ECA7 -6B47 D0AA -6B48 9A51 -6B49 C7B8 -6B4A 9A52 -6B4B 9A53 -6B4C B8E8 -6B4D 9A54 -6B4E 9A55 -6B4F 9A56 -6B50 9A57 -6B51 9A58 -6B52 9A59 -6B53 9A5A -6B54 9A5B -6B55 9A5C -6B56 9A5D -6B57 9A5E -6B58 9A5F -6B59 ECA8 -6B5A 9A60 -6B5B 9A61 -6B5C 9A62 -6B5D 9A63 -6B5E 9A64 -6B5F 9A65 -6B60 9A66 -6B61 9A67 -6B62 D6B9 -6B63 D5FD -6B64 B4CB -6B65 B2BD -6B66 CEE4 -6B67 C6E7 -6B68 9A68 -6B69 9A69 -6B6A CDE1 -6B6B 9A6A -6B6C 9A6B -6B6D 9A6C -6B6E 9A6D -6B6F 9A6E -6B70 9A6F -6B71 9A70 -6B72 9A71 -6B73 9A72 -6B74 9A73 -6B75 9A74 -6B76 9A75 -6B77 9A76 -6B78 9A77 -6B79 B4F5 -6B7A 9A78 -6B7B CBC0 -6B7C BCDF -6B7D 9A79 -6B7E 9A7A -6B7F 9A7B -6B80 9A7C -6B81 E9E2 -6B82 E9E3 -6B83 D1EA -6B84 E9E5 -6B85 9A7D -6B86 B4F9 -6B87 E9E4 -6B88 9A7E -6B89 D1B3 -6B8A CAE2 -6B8B B2D0 -6B8C 9A80 -6B8D E9E8 -6B8E 9A81 -6B8F 9A82 -6B90 9A83 -6B91 9A84 -6B92 E9E6 -6B93 E9E7 -6B94 9A85 -6B95 9A86 -6B96 D6B3 -6B97 9A87 -6B98 9A88 -6B99 9A89 -6B9A E9E9 -6B9B E9EA -6B9C 9A8A -6B9D 9A8B -6B9E 9A8C -6B9F 9A8D -6BA0 9A8E -6BA1 E9EB -6BA2 9A8F -6BA3 9A90 -6BA4 9A91 -6BA5 9A92 -6BA6 9A93 -6BA7 9A94 -6BA8 9A95 -6BA9 9A96 -6BAA E9EC -6BAB 9A97 -6BAC 9A98 -6BAD 9A99 -6BAE 9A9A -6BAF 9A9B -6BB0 9A9C -6BB1 9A9D -6BB2 9A9E -6BB3 ECAF -6BB4 C5B9 -6BB5 B6CE -6BB6 9A9F -6BB7 D2F3 -6BB8 9AA0 -6BB9 9AA1 -6BBA 9AA2 -6BBB 9AA3 -6BBC 9AA4 -6BBD 9AA5 -6BBE 9AA6 -6BBF B5EE -6BC0 9AA7 -6BC1 BBD9 -6BC2 ECB1 -6BC3 9AA8 -6BC4 9AA9 -6BC5 D2E3 -6BC6 9AAA -6BC7 9AAB -6BC8 9AAC -6BC9 9AAD -6BCA 9AAE -6BCB CEE3 -6BCC 9AAF -6BCD C4B8 -6BCE 9AB0 -6BCF C3BF -6BD0 9AB1 -6BD1 9AB2 -6BD2 B6BE -6BD3 D8B9 -6BD4 B1C8 -6BD5 B1CF -6BD6 B1D1 -6BD7 C5FE -6BD8 9AB3 -6BD9 B1D0 -6BDA 9AB4 -6BDB C3AB -6BDC 9AB5 -6BDD 9AB6 -6BDE 9AB7 -6BDF 9AB8 -6BE0 9AB9 -6BE1 D5B1 -6BE2 9ABA -6BE3 9ABB -6BE4 9ABC -6BE5 9ABD -6BE6 9ABE -6BE7 9ABF -6BE8 9AC0 -6BE9 9AC1 -6BEA EBA4 -6BEB BAC1 -6BEC 9AC2 -6BED 9AC3 -6BEE 9AC4 -6BEF CCBA -6BF0 9AC5 -6BF1 9AC6 -6BF2 9AC7 -6BF3 EBA5 -6BF4 9AC8 -6BF5 EBA7 -6BF6 9AC9 -6BF7 9ACA -6BF8 9ACB -6BF9 EBA8 -6BFA 9ACC -6BFB 9ACD -6BFC 9ACE -6BFD EBA6 -6BFE 9ACF -6BFF 9AD0 -6C00 9AD1 -6C01 9AD2 -6C02 9AD3 -6C03 9AD4 -6C04 9AD5 -6C05 EBA9 -6C06 EBAB -6C07 EBAA -6C08 9AD6 -6C09 9AD7 -6C0A 9AD8 -6C0B 9AD9 -6C0C 9ADA -6C0D EBAC -6C0E 9ADB -6C0F CACF -6C10 D8B5 -6C11 C3F1 -6C12 9ADC -6C13 C3A5 -6C14 C6F8 -6C15 EBAD -6C16 C4CA -6C17 9ADD -6C18 EBAE -6C19 EBAF -6C1A EBB0 -6C1B B7D5 -6C1C 9ADE -6C1D 9ADF -6C1E 9AE0 -6C1F B7FA -6C20 9AE1 -6C21 EBB1 -6C22 C7E2 -6C23 9AE2 -6C24 EBB3 -6C25 9AE3 -6C26 BAA4 -6C27 D1F5 -6C28 B0B1 -6C29 EBB2 -6C2A EBB4 -6C2B 9AE4 -6C2C 9AE5 -6C2D 9AE6 -6C2E B5AA -6C2F C2C8 -6C30 C7E8 -6C31 9AE7 -6C32 EBB5 -6C33 9AE8 -6C34 CBAE -6C35 E3DF -6C36 9AE9 -6C37 9AEA -6C38 D3C0 -6C39 9AEB -6C3A 9AEC -6C3B 9AED -6C3C 9AEE -6C3D D9DB -6C3E 9AEF -6C3F 9AF0 -6C40 CDA1 -6C41 D6AD -6C42 C7F3 -6C43 9AF1 -6C44 9AF2 -6C45 9AF3 -6C46 D9E0 -6C47 BBE3 -6C48 9AF4 -6C49 BABA -6C4A E3E2 -6C4B 9AF5 -6C4C 9AF6 -6C4D 9AF7 -6C4E 9AF8 -6C4F 9AF9 -6C50 CFAB -6C51 9AFA -6C52 9AFB -6C53 9AFC -6C54 E3E0 -6C55 C9C7 -6C56 9AFD -6C57 BAB9 -6C58 9AFE -6C59 9B40 -6C5A 9B41 -6C5B D1B4 -6C5C E3E1 -6C5D C8EA -6C5E B9AF -6C5F BDAD -6C60 B3D8 -6C61 CEDB -6C62 9B42 -6C63 9B43 -6C64 CCC0 -6C65 9B44 -6C66 9B45 -6C67 9B46 -6C68 E3E8 -6C69 E3E9 -6C6A CDF4 -6C6B 9B47 -6C6C 9B48 -6C6D 9B49 -6C6E 9B4A -6C6F 9B4B -6C70 CCAD -6C71 9B4C -6C72 BCB3 -6C73 9B4D -6C74 E3EA -6C75 9B4E -6C76 E3EB -6C77 9B4F -6C78 9B50 -6C79 D0DA -6C7A 9B51 -6C7B 9B52 -6C7C 9B53 -6C7D C6FB -6C7E B7DA -6C7F 9B54 -6C80 9B55 -6C81 C7DF -6C82 D2CA -6C83 CED6 -6C84 9B56 -6C85 E3E4 -6C86 E3EC -6C87 9B57 -6C88 C9F2 -6C89 B3C1 -6C8A 9B58 -6C8B 9B59 -6C8C E3E7 -6C8D 9B5A -6C8E 9B5B -6C8F C6E3 -6C90 E3E5 -6C91 9B5C -6C92 9B5D -6C93 EDB3 -6C94 E3E6 -6C95 9B5E -6C96 9B5F -6C97 9B60 -6C98 9B61 -6C99 C9B3 -6C9A 9B62 -6C9B C5E6 -6C9C 9B63 -6C9D 9B64 -6C9E 9B65 -6C9F B9B5 -6CA0 9B66 -6CA1 C3BB -6CA2 9B67 -6CA3 E3E3 -6CA4 C5BD -6CA5 C1A4 -6CA6 C2D9 -6CA7 B2D7 -6CA8 9B68 -6CA9 E3ED -6CAA BBA6 -6CAB C4AD -6CAC 9B69 -6CAD E3F0 -6CAE BEDA -6CAF 9B6A -6CB0 9B6B -6CB1 E3FB -6CB2 E3F5 -6CB3 BAD3 -6CB4 9B6C -6CB5 9B6D -6CB6 9B6E -6CB7 9B6F -6CB8 B7D0 -6CB9 D3CD -6CBA 9B70 -6CBB D6CE -6CBC D5D3 -6CBD B9C1 -6CBE D5B4 -6CBF D1D8 -6CC0 9B71 -6CC1 9B72 -6CC2 9B73 -6CC3 9B74 -6CC4 D0B9 -6CC5 C7F6 -6CC6 9B75 -6CC7 9B76 -6CC8 9B77 -6CC9 C8AA -6CCA B2B4 -6CCB 9B78 -6CCC C3DA -6CCD 9B79 -6CCE 9B7A -6CCF 9B7B -6CD0 E3EE -6CD1 9B7C -6CD2 9B7D -6CD3 E3FC -6CD4 E3EF -6CD5 B7A8 -6CD6 E3F7 -6CD7 E3F4 -6CD8 9B7E -6CD9 9B80 -6CDA 9B81 -6CDB B7BA -6CDC 9B82 -6CDD 9B83 -6CDE C5A2 -6CDF 9B84 -6CE0 E3F6 -6CE1 C5DD -6CE2 B2A8 -6CE3 C6FC -6CE4 9B85 -6CE5 C4E0 -6CE6 9B86 -6CE7 9B87 -6CE8 D7A2 -6CE9 9B88 -6CEA C0E1 -6CEB E3F9 -6CEC 9B89 -6CED 9B8A -6CEE E3FA -6CEF E3FD -6CF0 CCA9 -6CF1 E3F3 -6CF2 9B8B -6CF3 D3BE -6CF4 9B8C -6CF5 B1C3 -6CF6 EDB4 -6CF7 E3F1 -6CF8 E3F2 -6CF9 9B8D -6CFA E3F8 -6CFB D0BA -6CFC C6C3 -6CFD D4F3 -6CFE E3FE -6CFF 9B8E -6D00 9B8F -6D01 BDE0 -6D02 9B90 -6D03 9B91 -6D04 E4A7 -6D05 9B92 -6D06 9B93 -6D07 E4A6 -6D08 9B94 -6D09 9B95 -6D0A 9B96 -6D0B D1F3 -6D0C E4A3 -6D0D 9B97 -6D0E E4A9 -6D0F 9B98 -6D10 9B99 -6D11 9B9A -6D12 C8F7 -6D13 9B9B -6D14 9B9C -6D15 9B9D -6D16 9B9E -6D17 CFB4 -6D18 9B9F -6D19 E4A8 -6D1A E4AE -6D1B C2E5 -6D1C 9BA0 -6D1D 9BA1 -6D1E B6B4 -6D1F 9BA2 -6D20 9BA3 -6D21 9BA4 -6D22 9BA5 -6D23 9BA6 -6D24 9BA7 -6D25 BDF2 -6D26 9BA8 -6D27 E4A2 -6D28 9BA9 -6D29 9BAA -6D2A BAE9 -6D2B E4AA -6D2C 9BAB -6D2D 9BAC -6D2E E4AC -6D2F 9BAD -6D30 9BAE -6D31 B6FD -6D32 D6DE -6D33 E4B2 -6D34 9BAF -6D35 E4AD -6D36 9BB0 -6D37 9BB1 -6D38 9BB2 -6D39 E4A1 -6D3A 9BB3 -6D3B BBEE -6D3C CDDD -6D3D C7A2 -6D3E C5C9 -6D3F 9BB4 -6D40 9BB5 -6D41 C1F7 -6D42 9BB6 -6D43 E4A4 -6D44 9BB7 -6D45 C7B3 -6D46 BDAC -6D47 BDBD -6D48 E4A5 -6D49 9BB8 -6D4A D7C7 -6D4B B2E2 -6D4C 9BB9 -6D4D E4AB -6D4E BCC3 -6D4F E4AF -6D50 9BBA -6D51 BBEB -6D52 E4B0 -6D53 C5A8 -6D54 E4B1 -6D55 9BBB -6D56 9BBC -6D57 9BBD -6D58 9BBE -6D59 D5E3 -6D5A BFA3 -6D5B 9BBF -6D5C E4BA -6D5D 9BC0 -6D5E E4B7 -6D5F 9BC1 -6D60 E4BB -6D61 9BC2 -6D62 9BC3 -6D63 E4BD -6D64 9BC4 -6D65 9BC5 -6D66 C6D6 -6D67 9BC6 -6D68 9BC7 -6D69 BAC6 -6D6A C0CB -6D6B 9BC8 -6D6C 9BC9 -6D6D 9BCA -6D6E B8A1 -6D6F E4B4 -6D70 9BCB -6D71 9BCC -6D72 9BCD -6D73 9BCE -6D74 D4A1 -6D75 9BCF -6D76 9BD0 -6D77 BAA3 -6D78 BDFE -6D79 9BD1 -6D7A 9BD2 -6D7B 9BD3 -6D7C E4BC -6D7D 9BD4 -6D7E 9BD5 -6D7F 9BD6 -6D80 9BD7 -6D81 9BD8 -6D82 CDBF -6D83 9BD9 -6D84 9BDA -6D85 C4F9 -6D86 9BDB -6D87 9BDC -6D88 CFFB -6D89 C9E6 -6D8A 9BDD -6D8B 9BDE -6D8C D3BF -6D8D 9BDF -6D8E CFD1 -6D8F 9BE0 -6D90 9BE1 -6D91 E4B3 -6D92 9BE2 -6D93 E4B8 -6D94 E4B9 -6D95 CCE9 -6D96 9BE3 -6D97 9BE4 -6D98 9BE5 -6D99 9BE6 -6D9A 9BE7 -6D9B CCCE -6D9C 9BE8 -6D9D C0D4 -6D9E E4B5 -6D9F C1B0 -6DA0 E4B6 -6DA1 CED0 -6DA2 9BE9 -6DA3 BBC1 -6DA4 B5D3 -6DA5 9BEA -6DA6 C8F3 -6DA7 BDA7 -6DA8 D5C7 -6DA9 C9AC -6DAA B8A2 -6DAB E4CA -6DAC 9BEB -6DAD 9BEC -6DAE E4CC -6DAF D1C4 -6DB0 9BED -6DB1 9BEE -6DB2 D2BA -6DB3 9BEF -6DB4 9BF0 -6DB5 BAAD -6DB6 9BF1 -6DB7 9BF2 -6DB8 BAD4 -6DB9 9BF3 -6DBA 9BF4 -6DBB 9BF5 -6DBC 9BF6 -6DBD 9BF7 -6DBE 9BF8 -6DBF E4C3 -6DC0 B5ED -6DC1 9BF9 -6DC2 9BFA -6DC3 9BFB -6DC4 D7CD -6DC5 E4C0 -6DC6 CFFD -6DC7 E4BF -6DC8 9BFC -6DC9 9BFD -6DCA 9BFE -6DCB C1DC -6DCC CCCA -6DCD 9C40 -6DCE 9C41 -6DCF 9C42 -6DD0 9C43 -6DD1 CAE7 -6DD2 9C44 -6DD3 9C45 -6DD4 9C46 -6DD5 9C47 -6DD6 C4D7 -6DD7 9C48 -6DD8 CCD4 -6DD9 E4C8 -6DDA 9C49 -6DDB 9C4A -6DDC 9C4B -6DDD E4C7 -6DDE E4C1 -6DDF 9C4C -6DE0 E4C4 -6DE1 B5AD -6DE2 9C4D -6DE3 9C4E -6DE4 D3D9 -6DE5 9C4F -6DE6 E4C6 -6DE7 9C50 -6DE8 9C51 -6DE9 9C52 -6DEA 9C53 -6DEB D2F9 -6DEC B4E3 -6DED 9C54 -6DEE BBB4 -6DEF 9C55 -6DF0 9C56 -6DF1 C9EE -6DF2 9C57 -6DF3 B4BE -6DF4 9C58 -6DF5 9C59 -6DF6 9C5A -6DF7 BBEC -6DF8 9C5B -6DF9 D1CD -6DFA 9C5C -6DFB CCED -6DFC EDB5 -6DFD 9C5D -6DFE 9C5E -6DFF 9C5F -6E00 9C60 -6E01 9C61 -6E02 9C62 -6E03 9C63 -6E04 9C64 -6E05 C7E5 -6E06 9C65 -6E07 9C66 -6E08 9C67 -6E09 9C68 -6E0A D4A8 -6E0B 9C69 -6E0C E4CB -6E0D D7D5 -6E0E E4C2 -6E0F 9C6A -6E10 BDA5 -6E11 E4C5 -6E12 9C6B -6E13 9C6C -6E14 D3E6 -6E15 9C6D -6E16 E4C9 -6E17 C9F8 -6E18 9C6E -6E19 9C6F -6E1A E4BE -6E1B 9C70 -6E1C 9C71 -6E1D D3E5 -6E1E 9C72 -6E1F 9C73 -6E20 C7FE -6E21 B6C9 -6E22 9C74 -6E23 D4FC -6E24 B2B3 -6E25 E4D7 -6E26 9C75 -6E27 9C76 -6E28 9C77 -6E29 CEC2 -6E2A 9C78 -6E2B E4CD -6E2C 9C79 -6E2D CEBC -6E2E 9C7A -6E2F B8DB -6E30 9C7B -6E31 9C7C -6E32 E4D6 -6E33 9C7D -6E34 BFCA -6E35 9C7E -6E36 9C80 -6E37 9C81 -6E38 D3CE -6E39 9C82 -6E3A C3EC -6E3B 9C83 -6E3C 9C84 -6E3D 9C85 -6E3E 9C86 -6E3F 9C87 -6E40 9C88 -6E41 9C89 -6E42 9C8A -6E43 C5C8 -6E44 E4D8 -6E45 9C8B -6E46 9C8C -6E47 9C8D -6E48 9C8E -6E49 9C8F -6E4A 9C90 -6E4B 9C91 -6E4C 9C92 -6E4D CDC4 -6E4E E4CF -6E4F 9C93 -6E50 9C94 -6E51 9C95 -6E52 9C96 -6E53 E4D4 -6E54 E4D5 -6E55 9C97 -6E56 BAFE -6E57 9C98 -6E58 CFE6 -6E59 9C99 -6E5A 9C9A -6E5B D5BF -6E5C 9C9B -6E5D 9C9C -6E5E 9C9D -6E5F E4D2 -6E60 9C9E -6E61 9C9F -6E62 9CA0 -6E63 9CA1 -6E64 9CA2 -6E65 9CA3 -6E66 9CA4 -6E67 9CA5 -6E68 9CA6 -6E69 9CA7 -6E6A 9CA8 -6E6B E4D0 -6E6C 9CA9 -6E6D 9CAA -6E6E E4CE -6E6F 9CAB -6E70 9CAC -6E71 9CAD -6E72 9CAE -6E73 9CAF -6E74 9CB0 -6E75 9CB1 -6E76 9CB2 -6E77 9CB3 -6E78 9CB4 -6E79 9CB5 -6E7A 9CB6 -6E7B 9CB7 -6E7C 9CB8 -6E7D 9CB9 -6E7E CDE5 -6E7F CAAA -6E80 9CBA -6E81 9CBB -6E82 9CBC -6E83 C0A3 -6E84 9CBD -6E85 BDA6 -6E86 E4D3 -6E87 9CBE -6E88 9CBF -6E89 B8C8 -6E8A 9CC0 -6E8B 9CC1 -6E8C 9CC2 -6E8D 9CC3 -6E8E 9CC4 -6E8F E4E7 -6E90 D4B4 -6E91 9CC5 -6E92 9CC6 -6E93 9CC7 -6E94 9CC8 -6E95 9CC9 -6E96 9CCA -6E97 9CCB -6E98 E4DB -6E99 9CCC -6E9A 9CCD -6E9B 9CCE -6E9C C1EF -6E9D 9CCF -6E9E 9CD0 -6E9F E4E9 -6EA0 9CD1 -6EA1 9CD2 -6EA2 D2E7 -6EA3 9CD3 -6EA4 9CD4 -6EA5 E4DF -6EA6 9CD5 -6EA7 E4E0 -6EA8 9CD6 -6EA9 9CD7 -6EAA CFAA -6EAB 9CD8 -6EAC 9CD9 -6EAD 9CDA -6EAE 9CDB -6EAF CBDD -6EB0 9CDC -6EB1 E4DA -6EB2 E4D1 -6EB3 9CDD -6EB4 E4E5 -6EB5 9CDE -6EB6 C8DC -6EB7 E4E3 -6EB8 9CDF -6EB9 9CE0 -6EBA C4E7 -6EBB E4E2 -6EBC 9CE1 -6EBD E4E1 -6EBE 9CE2 -6EBF 9CE3 -6EC0 9CE4 -6EC1 B3FC -6EC2 E4E8 -6EC3 9CE5 -6EC4 9CE6 -6EC5 9CE7 -6EC6 9CE8 -6EC7 B5E1 -6EC8 9CE9 -6EC9 9CEA -6ECA 9CEB -6ECB D7CC -6ECC 9CEC -6ECD 9CED -6ECE 9CEE -6ECF E4E6 -6ED0 9CEF -6ED1 BBAC -6ED2 9CF0 -6ED3 D7D2 -6ED4 CCCF -6ED5 EBF8 -6ED6 9CF1 -6ED7 E4E4 -6ED8 9CF2 -6ED9 9CF3 -6EDA B9F6 -6EDB 9CF4 -6EDC 9CF5 -6EDD 9CF6 -6EDE D6CD -6EDF E4D9 -6EE0 E4DC -6EE1 C2FA -6EE2 E4DE -6EE3 9CF7 -6EE4 C2CB -6EE5 C0C4 -6EE6 C2D0 -6EE7 9CF8 -6EE8 B1F5 -6EE9 CCB2 -6EEA 9CF9 -6EEB 9CFA -6EEC 9CFB -6EED 9CFC -6EEE 9CFD -6EEF 9CFE -6EF0 9D40 -6EF1 9D41 -6EF2 9D42 -6EF3 9D43 -6EF4 B5CE -6EF5 9D44 -6EF6 9D45 -6EF7 9D46 -6EF8 9D47 -6EF9 E4EF -6EFA 9D48 -6EFB 9D49 -6EFC 9D4A -6EFD 9D4B -6EFE 9D4C -6EFF 9D4D -6F00 9D4E -6F01 9D4F -6F02 C6AF -6F03 9D50 -6F04 9D51 -6F05 9D52 -6F06 C6E1 -6F07 9D53 -6F08 9D54 -6F09 E4F5 -6F0A 9D55 -6F0B 9D56 -6F0C 9D57 -6F0D 9D58 -6F0E 9D59 -6F0F C2A9 -6F10 9D5A -6F11 9D5B -6F12 9D5C -6F13 C0EC -6F14 D1DD -6F15 E4EE -6F16 9D5D -6F17 9D5E -6F18 9D5F -6F19 9D60 -6F1A 9D61 -6F1B 9D62 -6F1C 9D63 -6F1D 9D64 -6F1E 9D65 -6F1F 9D66 -6F20 C4AE -6F21 9D67 -6F22 9D68 -6F23 9D69 -6F24 E4ED -6F25 9D6A -6F26 9D6B -6F27 9D6C -6F28 9D6D -6F29 E4F6 -6F2A E4F4 -6F2B C2FE -6F2C 9D6E -6F2D E4DD -6F2E 9D6F -6F2F E4F0 -6F30 9D70 -6F31 CAFE -6F32 9D71 -6F33 D5C4 -6F34 9D72 -6F35 9D73 -6F36 E4F1 -6F37 9D74 -6F38 9D75 -6F39 9D76 -6F3A 9D77 -6F3B 9D78 -6F3C 9D79 -6F3D 9D7A -6F3E D1FA -6F3F 9D7B -6F40 9D7C -6F41 9D7D -6F42 9D7E -6F43 9D80 -6F44 9D81 -6F45 9D82 -6F46 E4EB -6F47 E4EC -6F48 9D83 -6F49 9D84 -6F4A 9D85 -6F4B E4F2 -6F4C 9D86 -6F4D CEAB -6F4E 9D87 -6F4F 9D88 -6F50 9D89 -6F51 9D8A -6F52 9D8B -6F53 9D8C -6F54 9D8D -6F55 9D8E -6F56 9D8F -6F57 9D90 -6F58 C5CB -6F59 9D91 -6F5A 9D92 -6F5B 9D93 -6F5C C7B1 -6F5D 9D94 -6F5E C2BA -6F5F 9D95 -6F60 9D96 -6F61 9D97 -6F62 E4EA -6F63 9D98 -6F64 9D99 -6F65 9D9A -6F66 C1CA -6F67 9D9B -6F68 9D9C -6F69 9D9D -6F6A 9D9E -6F6B 9D9F -6F6C 9DA0 -6F6D CCB6 -6F6E B3B1 -6F6F 9DA1 -6F70 9DA2 -6F71 9DA3 -6F72 E4FB -6F73 9DA4 -6F74 E4F3 -6F75 9DA5 -6F76 9DA6 -6F77 9DA7 -6F78 E4FA -6F79 9DA8 -6F7A E4FD -6F7B 9DA9 -6F7C E4FC -6F7D 9DAA -6F7E 9DAB -6F7F 9DAC -6F80 9DAD -6F81 9DAE -6F82 9DAF -6F83 9DB0 -6F84 B3CE -6F85 9DB1 -6F86 9DB2 -6F87 9DB3 -6F88 B3BA -6F89 E4F7 -6F8A 9DB4 -6F8B 9DB5 -6F8C E4F9 -6F8D E4F8 -6F8E C5EC -6F8F 9DB6 -6F90 9DB7 -6F91 9DB8 -6F92 9DB9 -6F93 9DBA -6F94 9DBB -6F95 9DBC -6F96 9DBD -6F97 9DBE -6F98 9DBF -6F99 9DC0 -6F9A 9DC1 -6F9B 9DC2 -6F9C C0BD -6F9D 9DC3 -6F9E 9DC4 -6F9F 9DC5 -6FA0 9DC6 -6FA1 D4E8 -6FA2 9DC7 -6FA3 9DC8 -6FA4 9DC9 -6FA5 9DCA -6FA6 9DCB -6FA7 E5A2 -6FA8 9DCC -6FA9 9DCD -6FAA 9DCE -6FAB 9DCF -6FAC 9DD0 -6FAD 9DD1 -6FAE 9DD2 -6FAF 9DD3 -6FB0 9DD4 -6FB1 9DD5 -6FB2 9DD6 -6FB3 B0C4 -6FB4 9DD7 -6FB5 9DD8 -6FB6 E5A4 -6FB7 9DD9 -6FB8 9DDA -6FB9 E5A3 -6FBA 9DDB -6FBB 9DDC -6FBC 9DDD -6FBD 9DDE -6FBE 9DDF -6FBF 9DE0 -6FC0 BCA4 -6FC1 9DE1 -6FC2 E5A5 -6FC3 9DE2 -6FC4 9DE3 -6FC5 9DE4 -6FC6 9DE5 -6FC7 9DE6 -6FC8 9DE7 -6FC9 E5A1 -6FCA 9DE8 -6FCB 9DE9 -6FCC 9DEA -6FCD 9DEB -6FCE 9DEC -6FCF 9DED -6FD0 9DEE -6FD1 E4FE -6FD2 B1F4 -6FD3 9DEF -6FD4 9DF0 -6FD5 9DF1 -6FD6 9DF2 -6FD7 9DF3 -6FD8 9DF4 -6FD9 9DF5 -6FDA 9DF6 -6FDB 9DF7 -6FDC 9DF8 -6FDD 9DF9 -6FDE E5A8 -6FDF 9DFA -6FE0 E5A9 -6FE1 E5A6 -6FE2 9DFB -6FE3 9DFC -6FE4 9DFD -6FE5 9DFE -6FE6 9E40 -6FE7 9E41 -6FE8 9E42 -6FE9 9E43 -6FEA 9E44 -6FEB 9E45 -6FEC 9E46 -6FED 9E47 -6FEE E5A7 -6FEF E5AA -6FF0 9E48 -6FF1 9E49 -6FF2 9E4A -6FF3 9E4B -6FF4 9E4C -6FF5 9E4D -6FF6 9E4E -6FF7 9E4F -6FF8 9E50 -6FF9 9E51 -6FFA 9E52 -6FFB 9E53 -6FFC 9E54 -6FFD 9E55 -6FFE 9E56 -6FFF 9E57 -7000 9E58 -7001 9E59 -7002 9E5A -7003 9E5B -7004 9E5C -7005 9E5D -7006 9E5E -7007 9E5F -7008 9E60 -7009 9E61 -700A 9E62 -700B 9E63 -700C 9E64 -700D 9E65 -700E 9E66 -700F 9E67 -7010 9E68 -7011 C6D9 -7012 9E69 -7013 9E6A -7014 9E6B -7015 9E6C -7016 9E6D -7017 9E6E -7018 9E6F -7019 9E70 -701A E5AB -701B E5AD -701C 9E71 -701D 9E72 -701E 9E73 -701F 9E74 -7020 9E75 -7021 9E76 -7022 9E77 -7023 E5AC -7024 9E78 -7025 9E79 -7026 9E7A -7027 9E7B -7028 9E7C -7029 9E7D -702A 9E7E -702B 9E80 -702C 9E81 -702D 9E82 -702E 9E83 -702F 9E84 -7030 9E85 -7031 9E86 -7032 9E87 -7033 9E88 -7034 9E89 -7035 E5AF -7036 9E8A -7037 9E8B -7038 9E8C -7039 E5AE -703A 9E8D -703B 9E8E -703C 9E8F -703D 9E90 -703E 9E91 -703F 9E92 -7040 9E93 -7041 9E94 -7042 9E95 -7043 9E96 -7044 9E97 -7045 9E98 -7046 9E99 -7047 9E9A -7048 9E9B -7049 9E9C -704A 9E9D -704B 9E9E -704C B9E0 -704D 9E9F -704E 9EA0 -704F E5B0 -7050 9EA1 -7051 9EA2 -7052 9EA3 -7053 9EA4 -7054 9EA5 -7055 9EA6 -7056 9EA7 -7057 9EA8 -7058 9EA9 -7059 9EAA -705A 9EAB -705B 9EAC -705C 9EAD -705D 9EAE -705E E5B1 -705F 9EAF -7060 9EB0 -7061 9EB1 -7062 9EB2 -7063 9EB3 -7064 9EB4 -7065 9EB5 -7066 9EB6 -7067 9EB7 -7068 9EB8 -7069 9EB9 -706A 9EBA -706B BBF0 -706C ECE1 -706D C3F0 -706E 9EBB -706F B5C6 -7070 BBD2 -7071 9EBC -7072 9EBD -7073 9EBE -7074 9EBF -7075 C1E9 -7076 D4EE -7077 9EC0 -7078 BEC4 -7079 9EC1 -707A 9EC2 -707B 9EC3 -707C D7C6 -707D 9EC4 -707E D4D6 -707F B2D3 -7080 ECBE -7081 9EC5 -7082 9EC6 -7083 9EC7 -7084 9EC8 -7085 EAC1 -7086 9EC9 -7087 9ECA -7088 9ECB -7089 C2AF -708A B4B6 -708B 9ECC -708C 9ECD -708D 9ECE -708E D1D7 -708F 9ECF -7090 9ED0 -7091 9ED1 -7092 B3B4 -7093 9ED2 -7094 C8B2 -7095 BFBB -7096 ECC0 -7097 9ED3 -7098 9ED4 -7099 D6CB -709A 9ED5 -709B 9ED6 -709C ECBF -709D ECC1 -709E 9ED7 -709F 9ED8 -70A0 9ED9 -70A1 9EDA -70A2 9EDB -70A3 9EDC -70A4 9EDD -70A5 9EDE -70A6 9EDF -70A7 9EE0 -70A8 9EE1 -70A9 9EE2 -70AA 9EE3 -70AB ECC5 -70AC BEE6 -70AD CCBF -70AE C5DA -70AF BEBC -70B0 9EE4 -70B1 ECC6 -70B2 9EE5 -70B3 B1FE -70B4 9EE6 -70B5 9EE7 -70B6 9EE8 -70B7 ECC4 -70B8 D5A8 -70B9 B5E3 -70BA 9EE9 -70BB ECC2 -70BC C1B6 -70BD B3E3 -70BE 9EEA -70BF 9EEB -70C0 ECC3 -70C1 CBB8 -70C2 C0C3 -70C3 CCFE -70C4 9EEC -70C5 9EED -70C6 9EEE -70C7 9EEF -70C8 C1D2 -70C9 9EF0 -70CA ECC8 -70CB 9EF1 -70CC 9EF2 -70CD 9EF3 -70CE 9EF4 -70CF 9EF5 -70D0 9EF6 -70D1 9EF7 -70D2 9EF8 -70D3 9EF9 -70D4 9EFA -70D5 9EFB -70D6 9EFC -70D7 9EFD -70D8 BAE6 -70D9 C0D3 -70DA 9EFE -70DB D6F2 -70DC 9F40 -70DD 9F41 -70DE 9F42 -70DF D1CC -70E0 9F43 -70E1 9F44 -70E2 9F45 -70E3 9F46 -70E4 BFBE -70E5 9F47 -70E6 B7B3 -70E7 C9D5 -70E8 ECC7 -70E9 BBE2 -70EA 9F48 -70EB CCCC -70EC BDFD -70ED C8C8 -70EE 9F49 -70EF CFA9 -70F0 9F4A -70F1 9F4B -70F2 9F4C -70F3 9F4D -70F4 9F4E -70F5 9F4F -70F6 9F50 -70F7 CDE9 -70F8 9F51 -70F9 C5EB -70FA 9F52 -70FB 9F53 -70FC 9F54 -70FD B7E9 -70FE 9F55 -70FF 9F56 -7100 9F57 -7101 9F58 -7102 9F59 -7103 9F5A -7104 9F5B -7105 9F5C -7106 9F5D -7107 9F5E -7108 9F5F -7109 D1C9 -710A BAB8 -710B 9F60 -710C 9F61 -710D 9F62 -710E 9F63 -710F 9F64 -7110 ECC9 -7111 9F65 -7112 9F66 -7113 ECCA -7114 9F67 -7115 BBC0 -7116 ECCB -7117 9F68 -7118 ECE2 -7119 B1BA -711A B7D9 -711B 9F69 -711C 9F6A -711D 9F6B -711E 9F6C -711F 9F6D -7120 9F6E -7121 9F6F -7122 9F70 -7123 9F71 -7124 9F72 -7125 9F73 -7126 BDB9 -7127 9F74 -7128 9F75 -7129 9F76 -712A 9F77 -712B 9F78 -712C 9F79 -712D 9F7A -712E 9F7B -712F ECCC -7130 D1E6 -7131 ECCD -7132 9F7C -7133 9F7D -7134 9F7E -7135 9F80 -7136 C8BB -7137 9F81 -7138 9F82 -7139 9F83 -713A 9F84 -713B 9F85 -713C 9F86 -713D 9F87 -713E 9F88 -713F 9F89 -7140 9F8A -7141 9F8B -7142 9F8C -7143 9F8D -7144 9F8E -7145 ECD1 -7146 9F8F -7147 9F90 -7148 9F91 -7149 9F92 -714A ECD3 -714B 9F93 -714C BBCD -714D 9F94 -714E BCE5 -714F 9F95 -7150 9F96 -7151 9F97 -7152 9F98 -7153 9F99 -7154 9F9A -7155 9F9B -7156 9F9C -7157 9F9D -7158 9F9E -7159 9F9F -715A 9FA0 -715B 9FA1 -715C ECCF -715D 9FA2 -715E C9B7 -715F 9FA3 -7160 9FA4 -7161 9FA5 -7162 9FA6 -7163 9FA7 -7164 C3BA -7165 9FA8 -7166 ECE3 -7167 D5D5 -7168 ECD0 -7169 9FA9 -716A 9FAA -716B 9FAB -716C 9FAC -716D 9FAD -716E D6F3 -716F 9FAE -7170 9FAF -7171 9FB0 -7172 ECD2 -7173 ECCE -7174 9FB1 -7175 9FB2 -7176 9FB3 -7177 9FB4 -7178 ECD4 -7179 9FB5 -717A ECD5 -717B 9FB6 -717C 9FB7 -717D C9BF -717E 9FB8 -717F 9FB9 -7180 9FBA -7181 9FBB -7182 9FBC -7183 9FBD -7184 CFA8 -7185 9FBE -7186 9FBF -7187 9FC0 -7188 9FC1 -7189 9FC2 -718A D0DC -718B 9FC3 -718C 9FC4 -718D 9FC5 -718E 9FC6 -718F D1AC -7190 9FC7 -7191 9FC8 -7192 9FC9 -7193 9FCA -7194 C8DB -7195 9FCB -7196 9FCC -7197 9FCD -7198 ECD6 -7199 CEF5 -719A 9FCE -719B 9FCF -719C 9FD0 -719D 9FD1 -719E 9FD2 -719F CAEC -71A0 ECDA -71A1 9FD3 -71A2 9FD4 -71A3 9FD5 -71A4 9FD6 -71A5 9FD7 -71A6 9FD8 -71A7 9FD9 -71A8 ECD9 -71A9 9FDA -71AA 9FDB -71AB 9FDC -71AC B0BE -71AD 9FDD -71AE 9FDE -71AF 9FDF -71B0 9FE0 -71B1 9FE1 -71B2 9FE2 -71B3 ECD7 -71B4 9FE3 -71B5 ECD8 -71B6 9FE4 -71B7 9FE5 -71B8 9FE6 -71B9 ECE4 -71BA 9FE7 -71BB 9FE8 -71BC 9FE9 -71BD 9FEA -71BE 9FEB -71BF 9FEC -71C0 9FED -71C1 9FEE -71C2 9FEF -71C3 C8BC -71C4 9FF0 -71C5 9FF1 -71C6 9FF2 -71C7 9FF3 -71C8 9FF4 -71C9 9FF5 -71CA 9FF6 -71CB 9FF7 -71CC 9FF8 -71CD 9FF9 -71CE C1C7 -71CF 9FFA -71D0 9FFB -71D1 9FFC -71D2 9FFD -71D3 9FFE -71D4 ECDC -71D5 D1E0 -71D6 A040 -71D7 A041 -71D8 A042 -71D9 A043 -71DA A044 -71DB A045 -71DC A046 -71DD A047 -71DE A048 -71DF A049 -71E0 ECDB -71E1 A04A -71E2 A04B -71E3 A04C -71E4 A04D -71E5 D4EF -71E6 A04E -71E7 ECDD -71E8 A04F -71E9 A050 -71EA A051 -71EB A052 -71EC A053 -71ED A054 -71EE DBC6 -71EF A055 -71F0 A056 -71F1 A057 -71F2 A058 -71F3 A059 -71F4 A05A -71F5 A05B -71F6 A05C -71F7 A05D -71F8 A05E -71F9 ECDE -71FA A05F -71FB A060 -71FC A061 -71FD A062 -71FE A063 -71FF A064 -7200 A065 -7201 A066 -7202 A067 -7203 A068 -7204 A069 -7205 A06A -7206 B1AC -7207 A06B -7208 A06C -7209 A06D -720A A06E -720B A06F -720C A070 -720D A071 -720E A072 -720F A073 -7210 A074 -7211 A075 -7212 A076 -7213 A077 -7214 A078 -7215 A079 -7216 A07A -7217 A07B -7218 A07C -7219 A07D -721A A07E -721B A080 -721C A081 -721D ECDF -721E A082 -721F A083 -7220 A084 -7221 A085 -7222 A086 -7223 A087 -7224 A088 -7225 A089 -7226 A08A -7227 A08B -7228 ECE0 -7229 A08C -722A D7A6 -722B A08D -722C C5C0 -722D A08E -722E A08F -722F A090 -7230 EBBC -7231 B0AE -7232 A091 -7233 A092 -7234 A093 -7235 BEF4 -7236 B8B8 -7237 D2AF -7238 B0D6 -7239 B5F9 -723A A094 -723B D8B3 -723C A095 -723D CBAC -723E A096 -723F E3DD -7240 A097 -7241 A098 -7242 A099 -7243 A09A -7244 A09B -7245 A09C -7246 A09D -7247 C6AC -7248 B0E6 -7249 A09E -724A A09F -724B A0A0 -724C C5C6 -724D EBB9 -724E A0A1 -724F A0A2 -7250 A0A3 -7251 A0A4 -7252 EBBA -7253 A0A5 -7254 A0A6 -7255 A0A7 -7256 EBBB -7257 A0A8 -7258 A0A9 -7259 D1C0 -725A A0AA -725B C5A3 -725C A0AB -725D EAF2 -725E A0AC -725F C4B2 -7260 A0AD -7261 C4B5 -7262 C0CE -7263 A0AE -7264 A0AF -7265 A0B0 -7266 EAF3 -7267 C4C1 -7268 A0B1 -7269 CEEF -726A A0B2 -726B A0B3 -726C A0B4 -726D A0B5 -726E EAF0 -726F EAF4 -7270 A0B6 -7271 A0B7 -7272 C9FC -7273 A0B8 -7274 A0B9 -7275 C7A3 -7276 A0BA -7277 A0BB -7278 A0BC -7279 CCD8 -727A CEFE -727B A0BD -727C A0BE -727D A0BF -727E EAF5 -727F EAF6 -7280 CFAC -7281 C0E7 -7282 A0C0 -7283 A0C1 -7284 EAF7 -7285 A0C2 -7286 A0C3 -7287 A0C4 -7288 A0C5 -7289 A0C6 -728A B6BF -728B EAF8 -728C A0C7 -728D EAF9 -728E A0C8 -728F EAFA -7290 A0C9 -7291 A0CA -7292 EAFB -7293 A0CB -7294 A0CC -7295 A0CD -7296 A0CE -7297 A0CF -7298 A0D0 -7299 A0D1 -729A A0D2 -729B A0D3 -729C A0D4 -729D A0D5 -729E A0D6 -729F EAF1 -72A0 A0D7 -72A1 A0D8 -72A2 A0D9 -72A3 A0DA -72A4 A0DB -72A5 A0DC -72A6 A0DD -72A7 A0DE -72A8 A0DF -72A9 A0E0 -72AA A0E1 -72AB A0E2 -72AC C8AE -72AD E1EB -72AE A0E3 -72AF B7B8 -72B0 E1EC -72B1 A0E4 -72B2 A0E5 -72B3 A0E6 -72B4 E1ED -72B5 A0E7 -72B6 D7B4 -72B7 E1EE -72B8 E1EF -72B9 D3CC -72BA A0E8 -72BB A0E9 -72BC A0EA -72BD A0EB -72BE A0EC -72BF A0ED -72C0 A0EE -72C1 E1F1 -72C2 BFF1 -72C3 E1F0 -72C4 B5D2 -72C5 A0EF -72C6 A0F0 -72C7 A0F1 -72C8 B1B7 -72C9 A0F2 -72CA A0F3 -72CB A0F4 -72CC A0F5 -72CD E1F3 -72CE E1F2 -72CF A0F6 -72D0 BAFC -72D1 A0F7 -72D2 E1F4 -72D3 A0F8 -72D4 A0F9 -72D5 A0FA -72D6 A0FB -72D7 B9B7 -72D8 A0FC -72D9 BED1 -72DA A0FD -72DB A0FE -72DC AA40 -72DD AA41 -72DE C4FC -72DF AA42 -72E0 BADD -72E1 BDC6 -72E2 AA43 -72E3 AA44 -72E4 AA45 -72E5 AA46 -72E6 AA47 -72E7 AA48 -72E8 E1F5 -72E9 E1F7 -72EA AA49 -72EB AA4A -72EC B6C0 -72ED CFC1 -72EE CAA8 -72EF E1F6 -72F0 D5F8 -72F1 D3FC -72F2 E1F8 -72F3 E1FC -72F4 E1F9 -72F5 AA4B -72F6 AA4C -72F7 E1FA -72F8 C0EA -72F9 AA4D -72FA E1FE -72FB E2A1 -72FC C0C7 -72FD AA4E -72FE AA4F -72FF AA50 -7300 AA51 -7301 E1FB -7302 AA52 -7303 E1FD -7304 AA53 -7305 AA54 -7306 AA55 -7307 AA56 -7308 AA57 -7309 AA58 -730A E2A5 -730B AA59 -730C AA5A -730D AA5B -730E C1D4 -730F AA5C -7310 AA5D -7311 AA5E -7312 AA5F -7313 E2A3 -7314 AA60 -7315 E2A8 -7316 B2FE -7317 E2A2 -7318 AA61 -7319 AA62 -731A AA63 -731B C3CD -731C B2C2 -731D E2A7 -731E E2A6 -731F AA64 -7320 AA65 -7321 E2A4 -7322 E2A9 -7323 AA66 -7324 AA67 -7325 E2AB -7326 AA68 -7327 AA69 -7328 AA6A -7329 D0C9 -732A D6ED -732B C3A8 -732C E2AC -732D AA6B -732E CFD7 -732F AA6C -7330 AA6D -7331 E2AE -7332 AA6E -7333 AA6F -7334 BAEF -7335 AA70 -7336 AA71 -7337 E9E0 -7338 E2AD -7339 E2AA -733A AA72 -733B AA73 -733C AA74 -733D AA75 -733E BBAB -733F D4B3 -7340 AA76 -7341 AA77 -7342 AA78 -7343 AA79 -7344 AA7A -7345 AA7B -7346 AA7C -7347 AA7D -7348 AA7E -7349 AA80 -734A AA81 -734B AA82 -734C AA83 -734D E2B0 -734E AA84 -734F AA85 -7350 E2AF -7351 AA86 -7352 E9E1 -7353 AA87 -7354 AA88 -7355 AA89 -7356 AA8A -7357 E2B1 -7358 AA8B -7359 AA8C -735A AA8D -735B AA8E -735C AA8F -735D AA90 -735E AA91 -735F AA92 -7360 E2B2 -7361 AA93 -7362 AA94 -7363 AA95 -7364 AA96 -7365 AA97 -7366 AA98 -7367 AA99 -7368 AA9A -7369 AA9B -736A AA9C -736B AA9D -736C E2B3 -736D CCA1 -736E AA9E -736F E2B4 -7370 AA9F -7371 AAA0 -7372 AB40 -7373 AB41 -7374 AB42 -7375 AB43 -7376 AB44 -7377 AB45 -7378 AB46 -7379 AB47 -737A AB48 -737B AB49 -737C AB4A -737D AB4B -737E E2B5 -737F AB4C -7380 AB4D -7381 AB4E -7382 AB4F -7383 AB50 -7384 D0FE -7385 AB51 -7386 AB52 -7387 C2CA -7388 AB53 -7389 D3F1 -738A AB54 -738B CDF5 -738C AB55 -738D AB56 -738E E7E0 -738F AB57 -7390 AB58 -7391 E7E1 -7392 AB59 -7393 AB5A -7394 AB5B -7395 AB5C -7396 BEC1 -7397 AB5D -7398 AB5E -7399 AB5F -739A AB60 -739B C2EA -739C AB61 -739D AB62 -739E AB63 -739F E7E4 -73A0 AB64 -73A1 AB65 -73A2 E7E3 -73A3 AB66 -73A4 AB67 -73A5 AB68 -73A6 AB69 -73A7 AB6A -73A8 AB6B -73A9 CDE6 -73AA AB6C -73AB C3B5 -73AC AB6D -73AD AB6E -73AE E7E2 -73AF BBB7 -73B0 CFD6 -73B1 AB6F -73B2 C1E1 -73B3 E7E9 -73B4 AB70 -73B5 AB71 -73B6 AB72 -73B7 E7E8 -73B8 AB73 -73B9 AB74 -73BA E7F4 -73BB B2A3 -73BC AB75 -73BD AB76 -73BE AB77 -73BF AB78 -73C0 E7EA -73C1 AB79 -73C2 E7E6 -73C3 AB7A -73C4 AB7B -73C5 AB7C -73C6 AB7D -73C7 AB7E -73C8 E7EC -73C9 E7EB -73CA C9BA -73CB AB80 -73CC AB81 -73CD D5E4 -73CE AB82 -73CF E7E5 -73D0 B7A9 -73D1 E7E7 -73D2 AB83 -73D3 AB84 -73D4 AB85 -73D5 AB86 -73D6 AB87 -73D7 AB88 -73D8 AB89 -73D9 E7EE -73DA AB8A -73DB AB8B -73DC AB8C -73DD AB8D -73DE E7F3 -73DF AB8E -73E0 D6E9 -73E1 AB8F -73E2 AB90 -73E3 AB91 -73E4 AB92 -73E5 E7ED -73E6 AB93 -73E7 E7F2 -73E8 AB94 -73E9 E7F1 -73EA AB95 -73EB AB96 -73EC AB97 -73ED B0E0 -73EE AB98 -73EF AB99 -73F0 AB9A -73F1 AB9B -73F2 E7F5 -73F3 AB9C -73F4 AB9D -73F5 AB9E -73F6 AB9F -73F7 ABA0 -73F8 AC40 -73F9 AC41 -73FA AC42 -73FB AC43 -73FC AC44 -73FD AC45 -73FE AC46 -73FF AC47 -7400 AC48 -7401 AC49 -7402 AC4A -7403 C7F2 -7404 AC4B -7405 C0C5 -7406 C0ED -7407 AC4C -7408 AC4D -7409 C1F0 -740A E7F0 -740B AC4E -740C AC4F -740D AC50 -740E AC51 -740F E7F6 -7410 CBF6 -7411 AC52 -7412 AC53 -7413 AC54 -7414 AC55 -7415 AC56 -7416 AC57 -7417 AC58 -7418 AC59 -7419 AC5A -741A E8A2 -741B E8A1 -741C AC5B -741D AC5C -741E AC5D -741F AC5E -7420 AC5F -7421 AC60 -7422 D7C1 -7423 AC61 -7424 AC62 -7425 E7FA -7426 E7F9 -7427 AC63 -7428 E7FB -7429 AC64 -742A E7F7 -742B AC65 -742C E7FE -742D AC66 -742E E7FD -742F AC67 -7430 E7FC -7431 AC68 -7432 AC69 -7433 C1D5 -7434 C7D9 -7435 C5FD -7436 C5C3 -7437 AC6A -7438 AC6B -7439 AC6C -743A AC6D -743B AC6E -743C C7ED -743D AC6F -743E AC70 -743F AC71 -7440 AC72 -7441 E8A3 -7442 AC73 -7443 AC74 -7444 AC75 -7445 AC76 -7446 AC77 -7447 AC78 -7448 AC79 -7449 AC7A -744A AC7B -744B AC7C -744C AC7D -744D AC7E -744E AC80 -744F AC81 -7450 AC82 -7451 AC83 -7452 AC84 -7453 AC85 -7454 AC86 -7455 E8A6 -7456 AC87 -7457 E8A5 -7458 AC88 -7459 E8A7 -745A BAF7 -745B E7F8 -745C E8A4 -745D AC89 -745E C8F0 -745F C9AA -7460 AC8A -7461 AC8B -7462 AC8C -7463 AC8D -7464 AC8E -7465 AC8F -7466 AC90 -7467 AC91 -7468 AC92 -7469 AC93 -746A AC94 -746B AC95 -746C AC96 -746D E8A9 -746E AC97 -746F AC98 -7470 B9E5 -7471 AC99 -7472 AC9A -7473 AC9B -7474 AC9C -7475 AC9D -7476 D1FE -7477 E8A8 -7478 AC9E -7479 AC9F -747A ACA0 -747B AD40 -747C AD41 -747D AD42 -747E E8AA -747F AD43 -7480 E8AD -7481 E8AE -7482 AD44 -7483 C1A7 -7484 AD45 -7485 AD46 -7486 AD47 -7487 E8AF -7488 AD48 -7489 AD49 -748A AD4A -748B E8B0 -748C AD4B -748D AD4C -748E E8AC -748F AD4D -7490 E8B4 -7491 AD4E -7492 AD4F -7493 AD50 -7494 AD51 -7495 AD52 -7496 AD53 -7497 AD54 -7498 AD55 -7499 AD56 -749A AD57 -749B AD58 -749C E8AB -749D AD59 -749E E8B1 -749F AD5A -74A0 AD5B -74A1 AD5C -74A2 AD5D -74A3 AD5E -74A4 AD5F -74A5 AD60 -74A6 AD61 -74A7 E8B5 -74A8 E8B2 -74A9 E8B3 -74AA AD62 -74AB AD63 -74AC AD64 -74AD AD65 -74AE AD66 -74AF AD67 -74B0 AD68 -74B1 AD69 -74B2 AD6A -74B3 AD6B -74B4 AD6C -74B5 AD6D -74B6 AD6E -74B7 AD6F -74B8 AD70 -74B9 AD71 -74BA E8B7 -74BB AD72 -74BC AD73 -74BD AD74 -74BE AD75 -74BF AD76 -74C0 AD77 -74C1 AD78 -74C2 AD79 -74C3 AD7A -74C4 AD7B -74C5 AD7C -74C6 AD7D -74C7 AD7E -74C8 AD80 -74C9 AD81 -74CA AD82 -74CB AD83 -74CC AD84 -74CD AD85 -74CE AD86 -74CF AD87 -74D0 AD88 -74D1 AD89 -74D2 E8B6 -74D3 AD8A -74D4 AD8B -74D5 AD8C -74D6 AD8D -74D7 AD8E -74D8 AD8F -74D9 AD90 -74DA AD91 -74DB AD92 -74DC B9CF -74DD AD93 -74DE F0AC -74DF AD94 -74E0 F0AD -74E1 AD95 -74E2 C6B0 -74E3 B0EA -74E4 C8BF -74E5 AD96 -74E6 CDDF -74E7 AD97 -74E8 AD98 -74E9 AD99 -74EA AD9A -74EB AD9B -74EC AD9C -74ED AD9D -74EE CECD -74EF EAB1 -74F0 AD9E -74F1 AD9F -74F2 ADA0 -74F3 AE40 -74F4 EAB2 -74F5 AE41 -74F6 C6BF -74F7 B4C9 -74F8 AE42 -74F9 AE43 -74FA AE44 -74FB AE45 -74FC AE46 -74FD AE47 -74FE AE48 -74FF EAB3 -7500 AE49 -7501 AE4A -7502 AE4B -7503 AE4C -7504 D5E7 -7505 AE4D -7506 AE4E -7507 AE4F -7508 AE50 -7509 AE51 -750A AE52 -750B AE53 -750C AE54 -750D DDF9 -750E AE55 -750F EAB4 -7510 AE56 -7511 EAB5 -7512 AE57 -7513 EAB6 -7514 AE58 -7515 AE59 -7516 AE5A -7517 AE5B -7518 B8CA -7519 DFB0 -751A C9F5 -751B AE5C -751C CCF0 -751D AE5D -751E AE5E -751F C9FA -7520 AE5F -7521 AE60 -7522 AE61 -7523 AE62 -7524 AE63 -7525 C9FB -7526 AE64 -7527 AE65 -7528 D3C3 -7529 CBA6 -752A AE66 -752B B8A6 -752C F0AE -752D B1C2 -752E AE67 -752F E5B8 -7530 CCEF -7531 D3C9 -7532 BCD7 -7533 C9EA -7534 AE68 -7535 B5E7 -7536 AE69 -7537 C4D0 -7538 B5E9 -7539 AE6A -753A EEAE -753B BBAD -753C AE6B -753D AE6C -753E E7DE -753F AE6D -7540 EEAF -7541 AE6E -7542 AE6F -7543 AE70 -7544 AE71 -7545 B3A9 -7546 AE72 -7547 AE73 -7548 EEB2 -7549 AE74 -754A AE75 -754B EEB1 -754C BDE7 -754D AE76 -754E EEB0 -754F CEB7 -7550 AE77 -7551 AE78 -7552 AE79 -7553 AE7A -7554 C5CF -7555 AE7B -7556 AE7C -7557 AE7D -7558 AE7E -7559 C1F4 -755A DBCE -755B EEB3 -755C D0F3 -755D AE80 -755E AE81 -755F AE82 -7560 AE83 -7561 AE84 -7562 AE85 -7563 AE86 -7564 AE87 -7565 C2D4 -7566 C6E8 -7567 AE88 -7568 AE89 -7569 AE8A -756A B7AC -756B AE8B -756C AE8C -756D AE8D -756E AE8E -756F AE8F -7570 AE90 -7571 AE91 -7572 EEB4 -7573 AE92 -7574 B3EB -7575 AE93 -7576 AE94 -7577 AE95 -7578 BBFB -7579 EEB5 -757A AE96 -757B AE97 -757C AE98 -757D AE99 -757E AE9A -757F E7DC -7580 AE9B -7581 AE9C -7582 AE9D -7583 EEB6 -7584 AE9E -7585 AE9F -7586 BDAE -7587 AEA0 -7588 AF40 -7589 AF41 -758A AF42 -758B F1E2 -758C AF43 -758D AF44 -758E AF45 -758F CAE8 -7590 AF46 -7591 D2C9 -7592 F0DA -7593 AF47 -7594 F0DB -7595 AF48 -7596 F0DC -7597 C1C6 -7598 AF49 -7599 B8ED -759A BECE -759B AF4A -759C AF4B -759D F0DE -759E AF4C -759F C5B1 -75A0 F0DD -75A1 D1F1 -75A2 AF4D -75A3 F0E0 -75A4 B0CC -75A5 BDEA -75A6 AF4E -75A7 AF4F -75A8 AF50 -75A9 AF51 -75AA AF52 -75AB D2DF -75AC F0DF -75AD AF53 -75AE B4AF -75AF B7E8 -75B0 F0E6 -75B1 F0E5 -75B2 C6A3 -75B3 F0E1 -75B4 F0E2 -75B5 B4C3 -75B6 AF54 -75B7 AF55 -75B8 F0E3 -75B9 D5EE -75BA AF56 -75BB AF57 -75BC CCDB -75BD BED2 -75BE BCB2 -75BF AF58 -75C0 AF59 -75C1 AF5A -75C2 F0E8 -75C3 F0E7 -75C4 F0E4 -75C5 B2A1 -75C6 AF5B -75C7 D6A2 -75C8 D3B8 -75C9 BEB7 -75CA C8AC -75CB AF5C -75CC AF5D -75CD F0EA -75CE AF5E -75CF AF5F -75D0 AF60 -75D1 AF61 -75D2 D1F7 -75D3 AF62 -75D4 D6CC -75D5 BADB -75D6 F0E9 -75D7 AF63 -75D8 B6BB -75D9 AF64 -75DA AF65 -75DB CDB4 -75DC AF66 -75DD AF67 -75DE C6A6 -75DF AF68 -75E0 AF69 -75E1 AF6A -75E2 C1A1 -75E3 F0EB -75E4 F0EE -75E5 AF6B -75E6 F0ED -75E7 F0F0 -75E8 F0EC -75E9 AF6C -75EA BBBE -75EB F0EF -75EC AF6D -75ED AF6E -75EE AF6F -75EF AF70 -75F0 CCB5 -75F1 F0F2 -75F2 AF71 -75F3 AF72 -75F4 B3D5 -75F5 AF73 -75F6 AF74 -75F7 AF75 -75F8 AF76 -75F9 B1D4 -75FA AF77 -75FB AF78 -75FC F0F3 -75FD AF79 -75FE AF7A -75FF F0F4 -7600 F0F6 -7601 B4E1 -7602 AF7B -7603 F0F1 -7604 AF7C -7605 F0F7 -7606 AF7D -7607 AF7E -7608 AF80 -7609 AF81 -760A F0FA -760B AF82 -760C F0F8 -760D AF83 -760E AF84 -760F AF85 -7610 F0F5 -7611 AF86 -7612 AF87 -7613 AF88 -7614 AF89 -7615 F0FD -7616 AF8A -7617 F0F9 -7618 F0FC -7619 F0FE -761A AF8B -761B F1A1 -761C AF8C -761D AF8D -761E AF8E -761F CEC1 -7620 F1A4 -7621 AF8F -7622 F1A3 -7623 AF90 -7624 C1F6 -7625 F0FB -7626 CADD -7627 AF91 -7628 AF92 -7629 B4F1 -762A B1F1 -762B CCB1 -762C AF93 -762D F1A6 -762E AF94 -762F AF95 -7630 F1A7 -7631 AF96 -7632 AF97 -7633 F1AC -7634 D5CE -7635 F1A9 -7636 AF98 -7637 AF99 -7638 C8B3 -7639 AF9A -763A AF9B -763B AF9C -763C F1A2 -763D AF9D -763E F1AB -763F F1A8 -7640 F1A5 -7641 AF9E -7642 AF9F -7643 F1AA -7644 AFA0 -7645 B040 -7646 B041 -7647 B042 -7648 B043 -7649 B044 -764A B045 -764B B046 -764C B0A9 -764D F1AD -764E B047 -764F B048 -7650 B049 -7651 B04A -7652 B04B -7653 B04C -7654 F1AF -7655 B04D -7656 F1B1 -7657 B04E -7658 B04F -7659 B050 -765A B051 -765B B052 -765C F1B0 -765D B053 -765E F1AE -765F B054 -7660 B055 -7661 B056 -7662 B057 -7663 D1A2 -7664 B058 -7665 B059 -7666 B05A -7667 B05B -7668 B05C -7669 B05D -766A B05E -766B F1B2 -766C B05F -766D B060 -766E B061 -766F F1B3 -7670 B062 -7671 B063 -7672 B064 -7673 B065 -7674 B066 -7675 B067 -7676 B068 -7677 B069 -7678 B9EF -7679 B06A -767A B06B -767B B5C7 -767C B06C -767D B0D7 -767E B0D9 -767F B06D -7680 B06E -7681 B06F -7682 D4ED -7683 B070 -7684 B5C4 -7685 B071 -7686 BDD4 -7687 BBCA -7688 F0A7 -7689 B072 -768A B073 -768B B8DE -768C B074 -768D B075 -768E F0A8 -768F B076 -7690 B077 -7691 B0A8 -7692 B078 -7693 F0A9 -7694 B079 -7695 B07A -7696 CDEE -7697 B07B -7698 B07C -7699 F0AA -769A B07D -769B B07E -769C B080 -769D B081 -769E B082 -769F B083 -76A0 B084 -76A1 B085 -76A2 B086 -76A3 B087 -76A4 F0AB -76A5 B088 -76A6 B089 -76A7 B08A -76A8 B08B -76A9 B08C -76AA B08D -76AB B08E -76AC B08F -76AD B090 -76AE C6A4 -76AF B091 -76B0 B092 -76B1 D6E5 -76B2 F1E4 -76B3 B093 -76B4 F1E5 -76B5 B094 -76B6 B095 -76B7 B096 -76B8 B097 -76B9 B098 -76BA B099 -76BB B09A -76BC B09B -76BD B09C -76BE B09D -76BF C3F3 -76C0 B09E -76C1 B09F -76C2 D3DB -76C3 B0A0 -76C4 B140 -76C5 D6D1 -76C6 C5E8 -76C7 B141 -76C8 D3AF -76C9 B142 -76CA D2E6 -76CB B143 -76CC B144 -76CD EEC1 -76CE B0BB -76CF D5B5 -76D0 D1CE -76D1 BCE0 -76D2 BAD0 -76D3 B145 -76D4 BFF8 -76D5 B146 -76D6 B8C7 -76D7 B5C1 -76D8 C5CC -76D9 B147 -76DA B148 -76DB CAA2 -76DC B149 -76DD B14A -76DE B14B -76DF C3CB -76E0 B14C -76E1 B14D -76E2 B14E -76E3 B14F -76E4 B150 -76E5 EEC2 -76E6 B151 -76E7 B152 -76E8 B153 -76E9 B154 -76EA B155 -76EB B156 -76EC B157 -76ED B158 -76EE C4BF -76EF B6A2 -76F0 B159 -76F1 EDEC -76F2 C3A4 -76F3 B15A -76F4 D6B1 -76F5 B15B -76F6 B15C -76F7 B15D -76F8 CFE0 -76F9 EDEF -76FA B15E -76FB B15F -76FC C5CE -76FD B160 -76FE B6DC -76FF B161 -7700 B162 -7701 CAA1 -7702 B163 -7703 B164 -7704 EDED -7705 B165 -7706 B166 -7707 EDF0 -7708 EDF1 -7709 C3BC -770A B167 -770B BFB4 -770C B168 -770D EDEE -770E B169 -770F B16A -7710 B16B -7711 B16C -7712 B16D -7713 B16E -7714 B16F -7715 B170 -7716 B171 -7717 B172 -7718 B173 -7719 EDF4 -771A EDF2 -771B B174 -771C B175 -771D B176 -771E B177 -771F D5E6 -7720 C3DF -7721 B178 -7722 EDF3 -7723 B179 -7724 B17A -7725 B17B -7726 EDF6 -7727 B17C -7728 D5A3 -7729 D1A3 -772A B17D -772B B17E -772C B180 -772D EDF5 -772E B181 -772F C3D0 -7730 B182 -7731 B183 -7732 B184 -7733 B185 -7734 B186 -7735 EDF7 -7736 BFF4 -7737 BEEC -7738 EDF8 -7739 B187 -773A CCF7 -773B B188 -773C D1DB -773D B189 -773E B18A -773F B18B -7740 D7C5 -7741 D5F6 -7742 B18C -7743 EDFC -7744 B18D -7745 B18E -7746 B18F -7747 EDFB -7748 B190 -7749 B191 -774A B192 -774B B193 -774C B194 -774D B195 -774E B196 -774F B197 -7750 EDF9 -7751 EDFA -7752 B198 -7753 B199 -7754 B19A -7755 B19B -7756 B19C -7757 B19D -7758 B19E -7759 B19F -775A EDFD -775B BEA6 -775C B1A0 -775D B240 -775E B241 -775F B242 -7760 B243 -7761 CBAF -7762 EEA1 -7763 B6BD -7764 B244 -7765 EEA2 -7766 C4C0 -7767 B245 -7768 EDFE -7769 B246 -776A B247 -776B BDDE -776C B2C7 -776D B248 -776E B249 -776F B24A -7770 B24B -7771 B24C -7772 B24D -7773 B24E -7774 B24F -7775 B250 -7776 B251 -7777 B252 -7778 B253 -7779 B6C3 -777A B254 -777B B255 -777C B256 -777D EEA5 -777E D8BA -777F EEA3 -7780 EEA6 -7781 B257 -7782 B258 -7783 B259 -7784 C3E9 -7785 B3F2 -7786 B25A -7787 B25B -7788 B25C -7789 B25D -778A B25E -778B B25F -778C EEA7 -778D EEA4 -778E CFB9 -778F B260 -7790 B261 -7791 EEA8 -7792 C2F7 -7793 B262 -7794 B263 -7795 B264 -7796 B265 -7797 B266 -7798 B267 -7799 B268 -779A B269 -779B B26A -779C B26B -779D B26C -779E B26D -779F EEA9 -77A0 EEAA -77A1 B26E -77A2 DEAB -77A3 B26F -77A4 B270 -77A5 C6B3 -77A6 B271 -77A7 C7C6 -77A8 B272 -77A9 D6F5 -77AA B5C9 -77AB B273 -77AC CBB2 -77AD B274 -77AE B275 -77AF B276 -77B0 EEAB -77B1 B277 -77B2 B278 -77B3 CDAB -77B4 B279 -77B5 EEAC -77B6 B27A -77B7 B27B -77B8 B27C -77B9 B27D -77BA B27E -77BB D5B0 -77BC B280 -77BD EEAD -77BE B281 -77BF F6C4 -77C0 B282 -77C1 B283 -77C2 B284 -77C3 B285 -77C4 B286 -77C5 B287 -77C6 B288 -77C7 B289 -77C8 B28A -77C9 B28B -77CA B28C -77CB B28D -77CC B28E -77CD DBC7 -77CE B28F -77CF B290 -77D0 B291 -77D1 B292 -77D2 B293 -77D3 B294 -77D4 B295 -77D5 B296 -77D6 B297 -77D7 B4A3 -77D8 B298 -77D9 B299 -77DA B29A -77DB C3AC -77DC F1E6 -77DD B29B -77DE B29C -77DF B29D -77E0 B29E -77E1 B29F -77E2 CAB8 -77E3 D2D3 -77E4 B2A0 -77E5 D6AA -77E6 B340 -77E7 EFF2 -77E8 B341 -77E9 BED8 -77EA B342 -77EB BDC3 -77EC EFF3 -77ED B6CC -77EE B0AB -77EF B343 -77F0 B344 -77F1 B345 -77F2 B346 -77F3 CAAF -77F4 B347 -77F5 B348 -77F6 EDB6 -77F7 B349 -77F8 EDB7 -77F9 B34A -77FA B34B -77FB B34C -77FC B34D -77FD CEF9 -77FE B7AF -77FF BFF3 -7800 EDB8 -7801 C2EB -7802 C9B0 -7803 B34E -7804 B34F -7805 B350 -7806 B351 -7807 B352 -7808 B353 -7809 EDB9 -780A B354 -780B B355 -780C C6F6 -780D BFB3 -780E B356 -780F B357 -7810 B358 -7811 EDBC -7812 C5F8 -7813 B359 -7814 D1D0 -7815 B35A -7816 D7A9 -7817 EDBA -7818 EDBB -7819 B35B -781A D1E2 -781B B35C -781C EDBF -781D EDC0 -781E B35D -781F EDC4 -7820 B35E -7821 B35F -7822 B360 -7823 EDC8 -7824 B361 -7825 EDC6 -7826 EDCE -7827 D5E8 -7828 B362 -7829 EDC9 -782A B363 -782B B364 -782C EDC7 -782D EDBE -782E B365 -782F B366 -7830 C5E9 -7831 B367 -7832 B368 -7833 B369 -7834 C6C6 -7835 B36A -7836 B36B -7837 C9E9 -7838 D4D2 -7839 EDC1 -783A EDC2 -783B EDC3 -783C EDC5 -783D B36C -783E C0F9 -783F B36D -7840 B4A1 -7841 B36E -7842 B36F -7843 B370 -7844 B371 -7845 B9E8 -7846 B372 -7847 EDD0 -7848 B373 -7849 B374 -784A B375 -784B B376 -784C EDD1 -784D B377 -784E EDCA -784F B378 -7850 EDCF -7851 B379 -7852 CEF8 -7853 B37A -7854 B37B -7855 CBB6 -7856 EDCC -7857 EDCD -7858 B37C -7859 B37D -785A B37E -785B B380 -785C B381 -785D CFF5 -785E B382 -785F B383 -7860 B384 -7861 B385 -7862 B386 -7863 B387 -7864 B388 -7865 B389 -7866 B38A -7867 B38B -7868 B38C -7869 B38D -786A EDD2 -786B C1F2 -786C D3B2 -786D EDCB -786E C8B7 -786F B38E -7870 B38F -7871 B390 -7872 B391 -7873 B392 -7874 B393 -7875 B394 -7876 B395 -7877 BCEF -7878 B396 -7879 B397 -787A B398 -787B B399 -787C C5F0 -787D B39A -787E B39B -787F B39C -7880 B39D -7881 B39E -7882 B39F -7883 B3A0 -7884 B440 -7885 B441 -7886 B442 -7887 EDD6 -7888 B443 -7889 B5EF -788A B444 -788B B445 -788C C2B5 -788D B0AD -788E CBE9 -788F B446 -7890 B447 -7891 B1AE -7892 B448 -7893 EDD4 -7894 B449 -7895 B44A -7896 B44B -7897 CDEB -7898 B5E2 -7899 B44C -789A EDD5 -789B EDD3 -789C EDD7 -789D B44D -789E B44E -789F B5FA -78A0 B44F -78A1 EDD8 -78A2 B450 -78A3 EDD9 -78A4 B451 -78A5 EDDC -78A6 B452 -78A7 B1CC -78A8 B453 -78A9 B454 -78AA B455 -78AB B456 -78AC B457 -78AD B458 -78AE B459 -78AF B45A -78B0 C5F6 -78B1 BCEE -78B2 EDDA -78B3 CCBC -78B4 B2EA -78B5 B45B -78B6 B45C -78B7 B45D -78B8 B45E -78B9 EDDB -78BA B45F -78BB B460 -78BC B461 -78BD B462 -78BE C4EB -78BF B463 -78C0 B464 -78C1 B4C5 -78C2 B465 -78C3 B466 -78C4 B467 -78C5 B0F5 -78C6 B468 -78C7 B469 -78C8 B46A -78C9 EDDF -78CA C0DA -78CB B4E8 -78CC B46B -78CD B46C -78CE B46D -78CF B46E -78D0 C5CD -78D1 B46F -78D2 B470 -78D3 B471 -78D4 EDDD -78D5 BFC4 -78D6 B472 -78D7 B473 -78D8 B474 -78D9 EDDE -78DA B475 -78DB B476 -78DC B477 -78DD B478 -78DE B479 -78DF B47A -78E0 B47B -78E1 B47C -78E2 B47D -78E3 B47E -78E4 B480 -78E5 B481 -78E6 B482 -78E7 B483 -78E8 C4A5 -78E9 B484 -78EA B485 -78EB B486 -78EC EDE0 -78ED B487 -78EE B488 -78EF B489 -78F0 B48A -78F1 B48B -78F2 EDE1 -78F3 B48C -78F4 EDE3 -78F5 B48D -78F6 B48E -78F7 C1D7 -78F8 B48F -78F9 B490 -78FA BBC7 -78FB B491 -78FC B492 -78FD B493 -78FE B494 -78FF B495 -7900 B496 -7901 BDB8 -7902 B497 -7903 B498 -7904 B499 -7905 EDE2 -7906 B49A -7907 B49B -7908 B49C -7909 B49D -790A B49E -790B B49F -790C B4A0 -790D B540 -790E B541 -790F B542 -7910 B543 -7911 B544 -7912 B545 -7913 EDE4 -7914 B546 -7915 B547 -7916 B548 -7917 B549 -7918 B54A -7919 B54B -791A B54C -791B B54D -791C B54E -791D B54F -791E EDE6 -791F B550 -7920 B551 -7921 B552 -7922 B553 -7923 B554 -7924 EDE5 -7925 B555 -7926 B556 -7927 B557 -7928 B558 -7929 B559 -792A B55A -792B B55B -792C B55C -792D B55D -792E B55E -792F B55F -7930 B560 -7931 B561 -7932 B562 -7933 B563 -7934 EDE7 -7935 B564 -7936 B565 -7937 B566 -7938 B567 -7939 B568 -793A CABE -793B ECEA -793C C0F1 -793D B569 -793E C9E7 -793F B56A -7940 ECEB -7941 C6EE -7942 B56B -7943 B56C -7944 B56D -7945 B56E -7946 ECEC -7947 B56F -7948 C6ED -7949 ECED -794A B570 -794B B571 -794C B572 -794D B573 -794E B574 -794F B575 -7950 B576 -7951 B577 -7952 B578 -7953 ECF0 -7954 B579 -7955 B57A -7956 D7E6 -7957 ECF3 -7958 B57B -7959 B57C -795A ECF1 -795B ECEE -795C ECEF -795D D7A3 -795E C9F1 -795F CBEE -7960 ECF4 -7961 B57D -7962 ECF2 -7963 B57E -7964 B580 -7965 CFE9 -7966 B581 -7967 ECF6 -7968 C6B1 -7969 B582 -796A B583 -796B B584 -796C B585 -796D BCC0 -796E B586 -796F ECF5 -7970 B587 -7971 B588 -7972 B589 -7973 B58A -7974 B58B -7975 B58C -7976 B58D -7977 B5BB -7978 BBF6 -7979 B58E -797A ECF7 -797B B58F -797C B590 -797D B591 -797E B592 -797F B593 -7980 D9F7 -7981 BDFB -7982 B594 -7983 B595 -7984 C2BB -7985 ECF8 -7986 B596 -7987 B597 -7988 B598 -7989 B599 -798A ECF9 -798B B59A -798C B59B -798D B59C -798E B59D -798F B8A3 -7990 B59E -7991 B59F -7992 B5A0 -7993 B640 -7994 B641 -7995 B642 -7996 B643 -7997 B644 -7998 B645 -7999 B646 -799A ECFA -799B B647 -799C B648 -799D B649 -799E B64A -799F B64B -79A0 B64C -79A1 B64D -79A2 B64E -79A3 B64F -79A4 B650 -79A5 B651 -79A6 B652 -79A7 ECFB -79A8 B653 -79A9 B654 -79AA B655 -79AB B656 -79AC B657 -79AD B658 -79AE B659 -79AF B65A -79B0 B65B -79B1 B65C -79B2 B65D -79B3 ECFC -79B4 B65E -79B5 B65F -79B6 B660 -79B7 B661 -79B8 B662 -79B9 D3ED -79BA D8AE -79BB C0EB -79BC B663 -79BD C7DD -79BE BACC -79BF B664 -79C0 D0E3 -79C1 CBBD -79C2 B665 -79C3 CDBA -79C4 B666 -79C5 B667 -79C6 B8D1 -79C7 B668 -79C8 B669 -79C9 B1FC -79CA B66A -79CB C7EF -79CC B66B -79CD D6D6 -79CE B66C -79CF B66D -79D0 B66E -79D1 BFC6 -79D2 C3EB -79D3 B66F -79D4 B670 -79D5 EFF5 -79D6 B671 -79D7 B672 -79D8 C3D8 -79D9 B673 -79DA B674 -79DB B675 -79DC B676 -79DD B677 -79DE B678 -79DF D7E2 -79E0 B679 -79E1 B67A -79E2 B67B -79E3 EFF7 -79E4 B3D3 -79E5 B67C -79E6 C7D8 -79E7 D1ED -79E8 B67D -79E9 D6C8 -79EA B67E -79EB EFF8 -79EC B680 -79ED EFF6 -79EE B681 -79EF BBFD -79F0 B3C6 -79F1 B682 -79F2 B683 -79F3 B684 -79F4 B685 -79F5 B686 -79F6 B687 -79F7 B688 -79F8 BDD5 -79F9 B689 -79FA B68A -79FB D2C6 -79FC B68B -79FD BBE0 -79FE B68C -79FF B68D -7A00 CFA1 -7A01 B68E -7A02 EFFC -7A03 EFFB -7A04 B68F -7A05 B690 -7A06 EFF9 -7A07 B691 -7A08 B692 -7A09 B693 -7A0A B694 -7A0B B3CC -7A0C B695 -7A0D C9D4 -7A0E CBB0 -7A0F B696 -7A10 B697 -7A11 B698 -7A12 B699 -7A13 B69A -7A14 EFFE -7A15 B69B -7A16 B69C -7A17 B0DE -7A18 B69D -7A19 B69E -7A1A D6C9 -7A1B B69F -7A1C B6A0 -7A1D B740 -7A1E EFFD -7A1F B741 -7A20 B3ED -7A21 B742 -7A22 B743 -7A23 F6D5 -7A24 B744 -7A25 B745 -7A26 B746 -7A27 B747 -7A28 B748 -7A29 B749 -7A2A B74A -7A2B B74B -7A2C B74C -7A2D B74D -7A2E B74E -7A2F B74F -7A30 B750 -7A31 B751 -7A32 B752 -7A33 CEC8 -7A34 B753 -7A35 B754 -7A36 B755 -7A37 F0A2 -7A38 B756 -7A39 F0A1 -7A3A B757 -7A3B B5BE -7A3C BCDA -7A3D BBFC -7A3E B758 -7A3F B8E5 -7A40 B759 -7A41 B75A -7A42 B75B -7A43 B75C -7A44 B75D -7A45 B75E -7A46 C4C2 -7A47 B75F -7A48 B760 -7A49 B761 -7A4A B762 -7A4B B763 -7A4C B764 -7A4D B765 -7A4E B766 -7A4F B767 -7A50 B768 -7A51 F0A3 -7A52 B769 -7A53 B76A -7A54 B76B -7A55 B76C -7A56 B76D -7A57 CBEB -7A58 B76E -7A59 B76F -7A5A B770 -7A5B B771 -7A5C B772 -7A5D B773 -7A5E B774 -7A5F B775 -7A60 B776 -7A61 B777 -7A62 B778 -7A63 B779 -7A64 B77A -7A65 B77B -7A66 B77C -7A67 B77D -7A68 B77E -7A69 B780 -7A6A B781 -7A6B B782 -7A6C B783 -7A6D B784 -7A6E B785 -7A6F B786 -7A70 F0A6 -7A71 B787 -7A72 B788 -7A73 B789 -7A74 D1A8 -7A75 B78A -7A76 BEBF -7A77 C7EE -7A78 F1B6 -7A79 F1B7 -7A7A BFD5 -7A7B B78B -7A7C B78C -7A7D B78D -7A7E B78E -7A7F B4A9 -7A80 F1B8 -7A81 CDBB -7A82 B78F -7A83 C7D4 -7A84 D5AD -7A85 B790 -7A86 F1B9 -7A87 B791 -7A88 F1BA -7A89 B792 -7A8A B793 -7A8B B794 -7A8C B795 -7A8D C7CF -7A8E B796 -7A8F B797 -7A90 B798 -7A91 D2A4 -7A92 D6CF -7A93 B799 -7A94 B79A -7A95 F1BB -7A96 BDD1 -7A97 B4B0 -7A98 BEBD -7A99 B79B -7A9A B79C -7A9B B79D -7A9C B4DC -7A9D CED1 -7A9E B79E -7A9F BFDF -7AA0 F1BD -7AA1 B79F -7AA2 B7A0 -7AA3 B840 -7AA4 B841 -7AA5 BFFA -7AA6 F1BC -7AA7 B842 -7AA8 F1BF -7AA9 B843 -7AAA B844 -7AAB B845 -7AAC F1BE -7AAD F1C0 -7AAE B846 -7AAF B847 -7AB0 B848 -7AB1 B849 -7AB2 B84A -7AB3 F1C1 -7AB4 B84B -7AB5 B84C -7AB6 B84D -7AB7 B84E -7AB8 B84F -7AB9 B850 -7ABA B851 -7ABB B852 -7ABC B853 -7ABD B854 -7ABE B855 -7ABF C1FE -7AC0 B856 -7AC1 B857 -7AC2 B858 -7AC3 B859 -7AC4 B85A -7AC5 B85B -7AC6 B85C -7AC7 B85D -7AC8 B85E -7AC9 B85F -7ACA B860 -7ACB C1A2 -7ACC B861 -7ACD B862 -7ACE B863 -7ACF B864 -7AD0 B865 -7AD1 B866 -7AD2 B867 -7AD3 B868 -7AD4 B869 -7AD5 B86A -7AD6 CAFA -7AD7 B86B -7AD8 B86C -7AD9 D5BE -7ADA B86D -7ADB B86E -7ADC B86F -7ADD B870 -7ADE BEBA -7ADF BEB9 -7AE0 D5C2 -7AE1 B871 -7AE2 B872 -7AE3 BFA2 -7AE4 B873 -7AE5 CDAF -7AE6 F1B5 -7AE7 B874 -7AE8 B875 -7AE9 B876 -7AEA B877 -7AEB B878 -7AEC B879 -7AED BDDF -7AEE B87A -7AEF B6CB -7AF0 B87B -7AF1 B87C -7AF2 B87D -7AF3 B87E -7AF4 B880 -7AF5 B881 -7AF6 B882 -7AF7 B883 -7AF8 B884 -7AF9 D6F1 -7AFA F3C3 -7AFB B885 -7AFC B886 -7AFD F3C4 -7AFE B887 -7AFF B8CD -7B00 B888 -7B01 B889 -7B02 B88A -7B03 F3C6 -7B04 F3C7 -7B05 B88B -7B06 B0CA -7B07 B88C -7B08 F3C5 -7B09 B88D -7B0A F3C9 -7B0B CBF1 -7B0C B88E -7B0D B88F -7B0E B890 -7B0F F3CB -7B10 B891 -7B11 D0A6 -7B12 B892 -7B13 B893 -7B14 B1CA -7B15 F3C8 -7B16 B894 -7B17 B895 -7B18 B896 -7B19 F3CF -7B1A B897 -7B1B B5D1 -7B1C B898 -7B1D B899 -7B1E F3D7 -7B1F B89A -7B20 F3D2 -7B21 B89B -7B22 B89C -7B23 B89D -7B24 F3D4 -7B25 F3D3 -7B26 B7FB -7B27 B89E -7B28 B1BF -7B29 B89F -7B2A F3CE -7B2B F3CA -7B2C B5DA -7B2D B8A0 -7B2E F3D0 -7B2F B940 -7B30 B941 -7B31 F3D1 -7B32 B942 -7B33 F3D5 -7B34 B943 -7B35 B944 -7B36 B945 -7B37 B946 -7B38 F3CD -7B39 B947 -7B3A BCE3 -7B3B B948 -7B3C C1FD -7B3D B949 -7B3E F3D6 -7B3F B94A -7B40 B94B -7B41 B94C -7B42 B94D -7B43 B94E -7B44 B94F -7B45 F3DA -7B46 B950 -7B47 F3CC -7B48 B951 -7B49 B5C8 -7B4A B952 -7B4B BDEE -7B4C F3DC -7B4D B953 -7B4E B954 -7B4F B7A4 -7B50 BFF0 -7B51 D6FE -7B52 CDB2 -7B53 B955 -7B54 B4F0 -7B55 B956 -7B56 B2DF -7B57 B957 -7B58 F3D8 -7B59 B958 -7B5A F3D9 -7B5B C9B8 -7B5C B959 -7B5D F3DD -7B5E B95A -7B5F B95B -7B60 F3DE -7B61 B95C -7B62 F3E1 -7B63 B95D -7B64 B95E -7B65 B95F -7B66 B960 -7B67 B961 -7B68 B962 -7B69 B963 -7B6A B964 -7B6B B965 -7B6C B966 -7B6D B967 -7B6E F3DF -7B6F B968 -7B70 B969 -7B71 F3E3 -7B72 F3E2 -7B73 B96A -7B74 B96B -7B75 F3DB -7B76 B96C -7B77 BFEA -7B78 B96D -7B79 B3EF -7B7A B96E -7B7B F3E0 -7B7C B96F -7B7D B970 -7B7E C7A9 -7B7F B971 -7B80 BCF2 -7B81 B972 -7B82 B973 -7B83 B974 -7B84 B975 -7B85 F3EB -7B86 B976 -7B87 B977 -7B88 B978 -7B89 B979 -7B8A B97A -7B8B B97B -7B8C B97C -7B8D B9BF -7B8E B97D -7B8F B97E -7B90 F3E4 -7B91 B980 -7B92 B981 -7B93 B982 -7B94 B2AD -7B95 BBFE -7B96 B983 -7B97 CBE3 -7B98 B984 -7B99 B985 -7B9A B986 -7B9B B987 -7B9C F3ED -7B9D F3E9 -7B9E B988 -7B9F B989 -7BA0 B98A -7BA1 B9DC -7BA2 F3EE -7BA3 B98B -7BA4 B98C -7BA5 B98D -7BA6 F3E5 -7BA7 F3E6 -7BA8 F3EA -7BA9 C2E1 -7BAA F3EC -7BAB F3EF -7BAC F3E8 -7BAD BCFD -7BAE B98E -7BAF B98F -7BB0 B990 -7BB1 CFE4 -7BB2 B991 -7BB3 B992 -7BB4 F3F0 -7BB5 B993 -7BB6 B994 -7BB7 B995 -7BB8 F3E7 -7BB9 B996 -7BBA B997 -7BBB B998 -7BBC B999 -7BBD B99A -7BBE B99B -7BBF B99C -7BC0 B99D -7BC1 F3F2 -7BC2 B99E -7BC3 B99F -7BC4 B9A0 -7BC5 BA40 -7BC6 D7AD -7BC7 C6AA -7BC8 BA41 -7BC9 BA42 -7BCA BA43 -7BCB BA44 -7BCC F3F3 -7BCD BA45 -7BCE BA46 -7BCF BA47 -7BD0 BA48 -7BD1 F3F1 -7BD2 BA49 -7BD3 C2A8 -7BD4 BA4A -7BD5 BA4B -7BD6 BA4C -7BD7 BA4D -7BD8 BA4E -7BD9 B8DD -7BDA F3F5 -7BDB BA4F -7BDC BA50 -7BDD F3F4 -7BDE BA51 -7BDF BA52 -7BE0 BA53 -7BE1 B4DB -7BE2 BA54 -7BE3 BA55 -7BE4 BA56 -7BE5 F3F6 -7BE6 F3F7 -7BE7 BA57 -7BE8 BA58 -7BE9 BA59 -7BEA F3F8 -7BEB BA5A -7BEC BA5B -7BED BA5C -7BEE C0BA -7BEF BA5D -7BF0 BA5E -7BF1 C0E9 -7BF2 BA5F -7BF3 BA60 -7BF4 BA61 -7BF5 BA62 -7BF6 BA63 -7BF7 C5F1 -7BF8 BA64 -7BF9 BA65 -7BFA BA66 -7BFB BA67 -7BFC F3FB -7BFD BA68 -7BFE F3FA -7BFF BA69 -7C00 BA6A -7C01 BA6B -7C02 BA6C -7C03 BA6D -7C04 BA6E -7C05 BA6F -7C06 BA70 -7C07 B4D8 -7C08 BA71 -7C09 BA72 -7C0A BA73 -7C0B F3FE -7C0C F3F9 -7C0D BA74 -7C0E BA75 -7C0F F3FC -7C10 BA76 -7C11 BA77 -7C12 BA78 -7C13 BA79 -7C14 BA7A -7C15 BA7B -7C16 F3FD -7C17 BA7C -7C18 BA7D -7C19 BA7E -7C1A BA80 -7C1B BA81 -7C1C BA82 -7C1D BA83 -7C1E BA84 -7C1F F4A1 -7C20 BA85 -7C21 BA86 -7C22 BA87 -7C23 BA88 -7C24 BA89 -7C25 BA8A -7C26 F4A3 -7C27 BBC9 -7C28 BA8B -7C29 BA8C -7C2A F4A2 -7C2B BA8D -7C2C BA8E -7C2D BA8F -7C2E BA90 -7C2F BA91 -7C30 BA92 -7C31 BA93 -7C32 BA94 -7C33 BA95 -7C34 BA96 -7C35 BA97 -7C36 BA98 -7C37 BA99 -7C38 F4A4 -7C39 BA9A -7C3A BA9B -7C3B BA9C -7C3C BA9D -7C3D BA9E -7C3E BA9F -7C3F B2BE -7C40 F4A6 -7C41 F4A5 -7C42 BAA0 -7C43 BB40 -7C44 BB41 -7C45 BB42 -7C46 BB43 -7C47 BB44 -7C48 BB45 -7C49 BB46 -7C4A BB47 -7C4B BB48 -7C4C BB49 -7C4D BCAE -7C4E BB4A -7C4F BB4B -7C50 BB4C -7C51 BB4D -7C52 BB4E -7C53 BB4F -7C54 BB50 -7C55 BB51 -7C56 BB52 -7C57 BB53 -7C58 BB54 -7C59 BB55 -7C5A BB56 -7C5B BB57 -7C5C BB58 -7C5D BB59 -7C5E BB5A -7C5F BB5B -7C60 BB5C -7C61 BB5D -7C62 BB5E -7C63 BB5F -7C64 BB60 -7C65 BB61 -7C66 BB62 -7C67 BB63 -7C68 BB64 -7C69 BB65 -7C6A BB66 -7C6B BB67 -7C6C BB68 -7C6D BB69 -7C6E BB6A -7C6F BB6B -7C70 BB6C -7C71 BB6D -7C72 BB6E -7C73 C3D7 -7C74 D9E1 -7C75 BB6F -7C76 BB70 -7C77 BB71 -7C78 BB72 -7C79 BB73 -7C7A BB74 -7C7B C0E0 -7C7C F4CC -7C7D D7D1 -7C7E BB75 -7C7F BB76 -7C80 BB77 -7C81 BB78 -7C82 BB79 -7C83 BB7A -7C84 BB7B -7C85 BB7C -7C86 BB7D -7C87 BB7E -7C88 BB80 -7C89 B7DB -7C8A BB81 -7C8B BB82 -7C8C BB83 -7C8D BB84 -7C8E BB85 -7C8F BB86 -7C90 BB87 -7C91 F4CE -7C92 C1A3 -7C93 BB88 -7C94 BB89 -7C95 C6C9 -7C96 BB8A -7C97 B4D6 -7C98 D5B3 -7C99 BB8B -7C9A BB8C -7C9B BB8D -7C9C F4D0 -7C9D F4CF -7C9E F4D1 -7C9F CBDA -7CA0 BB8E -7CA1 BB8F -7CA2 F4D2 -7CA3 BB90 -7CA4 D4C1 -7CA5 D6E0 -7CA6 BB91 -7CA7 BB92 -7CA8 BB93 -7CA9 BB94 -7CAA B7E0 -7CAB BB95 -7CAC BB96 -7CAD BB97 -7CAE C1B8 -7CAF BB98 -7CB0 BB99 -7CB1 C1BB -7CB2 F4D3 -7CB3 BEAC -7CB4 BB9A -7CB5 BB9B -7CB6 BB9C -7CB7 BB9D -7CB8 BB9E -7CB9 B4E2 -7CBA BB9F -7CBB BBA0 -7CBC F4D4 -7CBD F4D5 -7CBE BEAB -7CBF BC40 -7CC0 BC41 -7CC1 F4D6 -7CC2 BC42 -7CC3 BC43 -7CC4 BC44 -7CC5 F4DB -7CC6 BC45 -7CC7 F4D7 -7CC8 F4DA -7CC9 BC46 -7CCA BAFD -7CCB BC47 -7CCC F4D8 -7CCD F4D9 -7CCE BC48 -7CCF BC49 -7CD0 BC4A -7CD1 BC4B -7CD2 BC4C -7CD3 BC4D -7CD4 BC4E -7CD5 B8E2 -7CD6 CCC7 -7CD7 F4DC -7CD8 BC4F -7CD9 B2DA -7CDA BC50 -7CDB BC51 -7CDC C3D3 -7CDD BC52 -7CDE BC53 -7CDF D4E3 -7CE0 BFB7 -7CE1 BC54 -7CE2 BC55 -7CE3 BC56 -7CE4 BC57 -7CE5 BC58 -7CE6 BC59 -7CE7 BC5A -7CE8 F4DD -7CE9 BC5B -7CEA BC5C -7CEB BC5D -7CEC BC5E -7CED BC5F -7CEE BC60 -7CEF C5B4 -7CF0 BC61 -7CF1 BC62 -7CF2 BC63 -7CF3 BC64 -7CF4 BC65 -7CF5 BC66 -7CF6 BC67 -7CF7 BC68 -7CF8 F4E9 -7CF9 BC69 -7CFA BC6A -7CFB CFB5 -7CFC BC6B -7CFD BC6C -7CFE BC6D -7CFF BC6E -7D00 BC6F -7D01 BC70 -7D02 BC71 -7D03 BC72 -7D04 BC73 -7D05 BC74 -7D06 BC75 -7D07 BC76 -7D08 BC77 -7D09 BC78 -7D0A CEC9 -7D0B BC79 -7D0C BC7A -7D0D BC7B -7D0E BC7C -7D0F BC7D -7D10 BC7E -7D11 BC80 -7D12 BC81 -7D13 BC82 -7D14 BC83 -7D15 BC84 -7D16 BC85 -7D17 BC86 -7D18 BC87 -7D19 BC88 -7D1A BC89 -7D1B BC8A -7D1C BC8B -7D1D BC8C -7D1E BC8D -7D1F BC8E -7D20 CBD8 -7D21 BC8F -7D22 CBF7 -7D23 BC90 -7D24 BC91 -7D25 BC92 -7D26 BC93 -7D27 BDF4 -7D28 BC94 -7D29 BC95 -7D2A BC96 -7D2B D7CF -7D2C BC97 -7D2D BC98 -7D2E BC99 -7D2F C0DB -7D30 BC9A -7D31 BC9B -7D32 BC9C -7D33 BC9D -7D34 BC9E -7D35 BC9F -7D36 BCA0 -7D37 BD40 -7D38 BD41 -7D39 BD42 -7D3A BD43 -7D3B BD44 -7D3C BD45 -7D3D BD46 -7D3E BD47 -7D3F BD48 -7D40 BD49 -7D41 BD4A -7D42 BD4B -7D43 BD4C -7D44 BD4D -7D45 BD4E -7D46 BD4F -7D47 BD50 -7D48 BD51 -7D49 BD52 -7D4A BD53 -7D4B BD54 -7D4C BD55 -7D4D BD56 -7D4E BD57 -7D4F BD58 -7D50 BD59 -7D51 BD5A -7D52 BD5B -7D53 BD5C -7D54 BD5D -7D55 BD5E -7D56 BD5F -7D57 BD60 -7D58 BD61 -7D59 BD62 -7D5A BD63 -7D5B BD64 -7D5C BD65 -7D5D BD66 -7D5E BD67 -7D5F BD68 -7D60 BD69 -7D61 BD6A -7D62 BD6B -7D63 BD6C -7D64 BD6D -7D65 BD6E -7D66 BD6F -7D67 BD70 -7D68 BD71 -7D69 BD72 -7D6A BD73 -7D6B BD74 -7D6C BD75 -7D6D BD76 -7D6E D0F5 -7D6F BD77 -7D70 BD78 -7D71 BD79 -7D72 BD7A -7D73 BD7B -7D74 BD7C -7D75 BD7D -7D76 BD7E -7D77 F4EA -7D78 BD80 -7D79 BD81 -7D7A BD82 -7D7B BD83 -7D7C BD84 -7D7D BD85 -7D7E BD86 -7D7F BD87 -7D80 BD88 -7D81 BD89 -7D82 BD8A -7D83 BD8B -7D84 BD8C -7D85 BD8D -7D86 BD8E -7D87 BD8F -7D88 BD90 -7D89 BD91 -7D8A BD92 -7D8B BD93 -7D8C BD94 -7D8D BD95 -7D8E BD96 -7D8F BD97 -7D90 BD98 -7D91 BD99 -7D92 BD9A -7D93 BD9B -7D94 BD9C -7D95 BD9D -7D96 BD9E -7D97 BD9F -7D98 BDA0 -7D99 BE40 -7D9A BE41 -7D9B BE42 -7D9C BE43 -7D9D BE44 -7D9E BE45 -7D9F BE46 -7DA0 BE47 -7DA1 BE48 -7DA2 BE49 -7DA3 BE4A -7DA4 BE4B -7DA5 BE4C -7DA6 F4EB -7DA7 BE4D -7DA8 BE4E -7DA9 BE4F -7DAA BE50 -7DAB BE51 -7DAC BE52 -7DAD BE53 -7DAE F4EC -7DAF BE54 -7DB0 BE55 -7DB1 BE56 -7DB2 BE57 -7DB3 BE58 -7DB4 BE59 -7DB5 BE5A -7DB6 BE5B -7DB7 BE5C -7DB8 BE5D -7DB9 BE5E -7DBA BE5F -7DBB BE60 -7DBC BE61 -7DBD BE62 -7DBE BE63 -7DBF BE64 -7DC0 BE65 -7DC1 BE66 -7DC2 BE67 -7DC3 BE68 -7DC4 BE69 -7DC5 BE6A -7DC6 BE6B -7DC7 BE6C -7DC8 BE6D -7DC9 BE6E -7DCA BE6F -7DCB BE70 -7DCC BE71 -7DCD BE72 -7DCE BE73 -7DCF BE74 -7DD0 BE75 -7DD1 BE76 -7DD2 BE77 -7DD3 BE78 -7DD4 BE79 -7DD5 BE7A -7DD6 BE7B -7DD7 BE7C -7DD8 BE7D -7DD9 BE7E -7DDA BE80 -7DDB BE81 -7DDC BE82 -7DDD BE83 -7DDE BE84 -7DDF BE85 -7DE0 BE86 -7DE1 BE87 -7DE2 BE88 -7DE3 BE89 -7DE4 BE8A -7DE5 BE8B -7DE6 BE8C -7DE7 BE8D -7DE8 BE8E -7DE9 BE8F -7DEA BE90 -7DEB BE91 -7DEC BE92 -7DED BE93 -7DEE BE94 -7DEF BE95 -7DF0 BE96 -7DF1 BE97 -7DF2 BE98 -7DF3 BE99 -7DF4 BE9A -7DF5 BE9B -7DF6 BE9C -7DF7 BE9D -7DF8 BE9E -7DF9 BE9F -7DFA BEA0 -7DFB BF40 -7DFC BF41 -7DFD BF42 -7DFE BF43 -7DFF BF44 -7E00 BF45 -7E01 BF46 -7E02 BF47 -7E03 BF48 -7E04 BF49 -7E05 BF4A -7E06 BF4B -7E07 BF4C -7E08 BF4D -7E09 BF4E -7E0A BF4F -7E0B BF50 -7E0C BF51 -7E0D BF52 -7E0E BF53 -7E0F BF54 -7E10 BF55 -7E11 BF56 -7E12 BF57 -7E13 BF58 -7E14 BF59 -7E15 BF5A -7E16 BF5B -7E17 BF5C -7E18 BF5D -7E19 BF5E -7E1A BF5F -7E1B BF60 -7E1C BF61 -7E1D BF62 -7E1E BF63 -7E1F BF64 -7E20 BF65 -7E21 BF66 -7E22 BF67 -7E23 BF68 -7E24 BF69 -7E25 BF6A -7E26 BF6B -7E27 BF6C -7E28 BF6D -7E29 BF6E -7E2A BF6F -7E2B BF70 -7E2C BF71 -7E2D BF72 -7E2E BF73 -7E2F BF74 -7E30 BF75 -7E31 BF76 -7E32 BF77 -7E33 BF78 -7E34 BF79 -7E35 BF7A -7E36 BF7B -7E37 BF7C -7E38 BF7D -7E39 BF7E -7E3A BF80 -7E3B F7E3 -7E3C BF81 -7E3D BF82 -7E3E BF83 -7E3F BF84 -7E40 BF85 -7E41 B7B1 -7E42 BF86 -7E43 BF87 -7E44 BF88 -7E45 BF89 -7E46 BF8A -7E47 F4ED -7E48 BF8B -7E49 BF8C -7E4A BF8D -7E4B BF8E -7E4C BF8F -7E4D BF90 -7E4E BF91 -7E4F BF92 -7E50 BF93 -7E51 BF94 -7E52 BF95 -7E53 BF96 -7E54 BF97 -7E55 BF98 -7E56 BF99 -7E57 BF9A -7E58 BF9B -7E59 BF9C -7E5A BF9D -7E5B BF9E -7E5C BF9F -7E5D BFA0 -7E5E C040 -7E5F C041 -7E60 C042 -7E61 C043 -7E62 C044 -7E63 C045 -7E64 C046 -7E65 C047 -7E66 C048 -7E67 C049 -7E68 C04A -7E69 C04B -7E6A C04C -7E6B C04D -7E6C C04E -7E6D C04F -7E6E C050 -7E6F C051 -7E70 C052 -7E71 C053 -7E72 C054 -7E73 C055 -7E74 C056 -7E75 C057 -7E76 C058 -7E77 C059 -7E78 C05A -7E79 C05B -7E7A C05C -7E7B C05D -7E7C C05E -7E7D C05F -7E7E C060 -7E7F C061 -7E80 C062 -7E81 C063 -7E82 D7EB -7E83 C064 -7E84 C065 -7E85 C066 -7E86 C067 -7E87 C068 -7E88 C069 -7E89 C06A -7E8A C06B -7E8B C06C -7E8C C06D -7E8D C06E -7E8E C06F -7E8F C070 -7E90 C071 -7E91 C072 -7E92 C073 -7E93 C074 -7E94 C075 -7E95 C076 -7E96 C077 -7E97 C078 -7E98 C079 -7E99 C07A -7E9A C07B -7E9B F4EE -7E9C C07C -7E9D C07D -7E9E C07E -7E9F E6F9 -7EA0 BEC0 -7EA1 E6FA -7EA2 BAEC -7EA3 E6FB -7EA4 CFCB -7EA5 E6FC -7EA6 D4BC -7EA7 BCB6 -7EA8 E6FD -7EA9 E6FE -7EAA BCCD -7EAB C8D2 -7EAC CEB3 -7EAD E7A1 -7EAE C080 -7EAF B4BF -7EB0 E7A2 -7EB1 C9B4 -7EB2 B8D9 -7EB3 C4C9 -7EB4 C081 -7EB5 D7DD -7EB6 C2DA -7EB7 B7D7 -7EB8 D6BD -7EB9 CEC6 -7EBA B7C4 -7EBB C082 -7EBC C083 -7EBD C5A6 -7EBE E7A3 -7EBF CFDF -7EC0 E7A4 -7EC1 E7A5 -7EC2 E7A6 -7EC3 C1B7 -7EC4 D7E9 -7EC5 C9F0 -7EC6 CFB8 -7EC7 D6AF -7EC8 D6D5 -7EC9 E7A7 -7ECA B0ED -7ECB E7A8 -7ECC E7A9 -7ECD C9DC -7ECE D2EF -7ECF BEAD -7ED0 E7AA -7ED1 B0F3 -7ED2 C8DE -7ED3 BDE1 -7ED4 E7AB -7ED5 C8C6 -7ED6 C084 -7ED7 E7AC -7ED8 BBE6 -7ED9 B8F8 -7EDA D1A4 -7EDB E7AD -7EDC C2E7 -7EDD BEF8 -7EDE BDCA -7EDF CDB3 -7EE0 E7AE -7EE1 E7AF -7EE2 BEEE -7EE3 D0E5 -7EE4 C085 -7EE5 CBE7 -7EE6 CCD0 -7EE7 BCCC -7EE8 E7B0 -7EE9 BCA8 -7EEA D0F7 -7EEB E7B1 -7EEC C086 -7EED D0F8 -7EEE E7B2 -7EEF E7B3 -7EF0 B4C2 -7EF1 E7B4 -7EF2 E7B5 -7EF3 C9FE -7EF4 CEAC -7EF5 C3E0 -7EF6 E7B7 -7EF7 B1C1 -7EF8 B3F1 -7EF9 C087 -7EFA E7B8 -7EFB E7B9 -7EFC D7DB -7EFD D5C0 -7EFE E7BA -7EFF C2CC -7F00 D7BA -7F01 E7BB -7F02 E7BC -7F03 E7BD -7F04 BCEA -7F05 C3E5 -7F06 C0C2 -7F07 E7BE -7F08 E7BF -7F09 BCA9 -7F0A C088 -7F0B E7C0 -7F0C E7C1 -7F0D E7B6 -7F0E B6D0 -7F0F E7C2 -7F10 C089 -7F11 E7C3 -7F12 E7C4 -7F13 BBBA -7F14 B5DE -7F15 C2C6 -7F16 B1E0 -7F17 E7C5 -7F18 D4B5 -7F19 E7C6 -7F1A B8BF -7F1B E7C8 -7F1C E7C7 -7F1D B7EC -7F1E C08A -7F1F E7C9 -7F20 B2F8 -7F21 E7CA -7F22 E7CB -7F23 E7CC -7F24 E7CD -7F25 E7CE -7F26 E7CF -7F27 E7D0 -7F28 D3A7 -7F29 CBF5 -7F2A E7D1 -7F2B E7D2 -7F2C E7D3 -7F2D E7D4 -7F2E C9C9 -7F2F E7D5 -7F30 E7D6 -7F31 E7D7 -7F32 E7D8 -7F33 E7D9 -7F34 BDC9 -7F35 E7DA -7F36 F3BE -7F37 C08B -7F38 B8D7 -7F39 C08C -7F3A C8B1 -7F3B C08D -7F3C C08E -7F3D C08F -7F3E C090 -7F3F C091 -7F40 C092 -7F41 C093 -7F42 F3BF -7F43 C094 -7F44 F3C0 -7F45 F3C1 -7F46 C095 -7F47 C096 -7F48 C097 -7F49 C098 -7F4A C099 -7F4B C09A -7F4C C09B -7F4D C09C -7F4E C09D -7F4F C09E -7F50 B9DE -7F51 CDF8 -7F52 C09F -7F53 C0A0 -7F54 D8E8 -7F55 BAB1 -7F56 C140 -7F57 C2DE -7F58 EEB7 -7F59 C141 -7F5A B7A3 -7F5B C142 -7F5C C143 -7F5D C144 -7F5E C145 -7F5F EEB9 -7F60 C146 -7F61 EEB8 -7F62 B0D5 -7F63 C147 -7F64 C148 -7F65 C149 -7F66 C14A -7F67 C14B -7F68 EEBB -7F69 D5D6 -7F6A D7EF -7F6B C14C -7F6C C14D -7F6D C14E -7F6E D6C3 -7F6F C14F -7F70 C150 -7F71 EEBD -7F72 CAF0 -7F73 C151 -7F74 EEBC -7F75 C152 -7F76 C153 -7F77 C154 -7F78 C155 -7F79 EEBE -7F7A C156 -7F7B C157 -7F7C C158 -7F7D C159 -7F7E EEC0 -7F7F C15A -7F80 C15B -7F81 EEBF -7F82 C15C -7F83 C15D -7F84 C15E -7F85 C15F -7F86 C160 -7F87 C161 -7F88 C162 -7F89 C163 -7F8A D1F2 -7F8B C164 -7F8C C7BC -7F8D C165 -7F8E C3C0 -7F8F C166 -7F90 C167 -7F91 C168 -7F92 C169 -7F93 C16A -7F94 B8E1 -7F95 C16B -7F96 C16C -7F97 C16D -7F98 C16E -7F99 C16F -7F9A C1E7 -7F9B C170 -7F9C C171 -7F9D F4C6 -7F9E D0DF -7F9F F4C7 -7FA0 C172 -7FA1 CFDB -7FA2 C173 -7FA3 C174 -7FA4 C8BA -7FA5 C175 -7FA6 C176 -7FA7 F4C8 -7FA8 C177 -7FA9 C178 -7FAA C179 -7FAB C17A -7FAC C17B -7FAD C17C -7FAE C17D -7FAF F4C9 -7FB0 F4CA -7FB1 C17E -7FB2 F4CB -7FB3 C180 -7FB4 C181 -7FB5 C182 -7FB6 C183 -7FB7 C184 -7FB8 D9FA -7FB9 B8FE -7FBA C185 -7FBB C186 -7FBC E5F1 -7FBD D3F0 -7FBE C187 -7FBF F4E0 -7FC0 C188 -7FC1 CECC -7FC2 C189 -7FC3 C18A -7FC4 C18B -7FC5 B3E1 -7FC6 C18C -7FC7 C18D -7FC8 C18E -7FC9 C18F -7FCA F1B4 -7FCB C190 -7FCC D2EE -7FCD C191 -7FCE F4E1 -7FCF C192 -7FD0 C193 -7FD1 C194 -7FD2 C195 -7FD3 C196 -7FD4 CFE8 -7FD5 F4E2 -7FD6 C197 -7FD7 C198 -7FD8 C7CC -7FD9 C199 -7FDA C19A -7FDB C19B -7FDC C19C -7FDD C19D -7FDE C19E -7FDF B5D4 -7FE0 B4E4 -7FE1 F4E4 -7FE2 C19F -7FE3 C1A0 -7FE4 C240 -7FE5 F4E3 -7FE6 F4E5 -7FE7 C241 -7FE8 C242 -7FE9 F4E6 -7FEA C243 -7FEB C244 -7FEC C245 -7FED C246 -7FEE F4E7 -7FEF C247 -7FF0 BAB2 -7FF1 B0BF -7FF2 C248 -7FF3 F4E8 -7FF4 C249 -7FF5 C24A -7FF6 C24B -7FF7 C24C -7FF8 C24D -7FF9 C24E -7FFA C24F -7FFB B7AD -7FFC D2ED -7FFD C250 -7FFE C251 -7FFF C252 -8000 D2AB -8001 C0CF -8002 C253 -8003 BFBC -8004 EBA3 -8005 D5DF -8006 EAC8 -8007 C254 -8008 C255 -8009 C256 -800A C257 -800B F1F3 -800C B6F8 -800D CBA3 -800E C258 -800F C259 -8010 C4CD -8011 C25A -8012 F1E7 -8013 C25B -8014 F1E8 -8015 B8FB -8016 F1E9 -8017 BAC4 -8018 D4C5 -8019 B0D2 -801A C25C -801B C25D -801C F1EA -801D C25E -801E C25F -801F C260 -8020 F1EB -8021 C261 -8022 F1EC -8023 C262 -8024 C263 -8025 F1ED -8026 F1EE -8027 F1EF -8028 F1F1 -8029 F1F0 -802A C5D5 -802B C264 -802C C265 -802D C266 -802E C267 -802F C268 -8030 C269 -8031 F1F2 -8032 C26A -8033 B6FA -8034 C26B -8035 F1F4 -8036 D2AE -8037 DEC7 -8038 CBCA -8039 C26C -803A C26D -803B B3DC -803C C26E -803D B5A2 -803E C26F -803F B9A2 -8040 C270 -8041 C271 -8042 C4F4 -8043 F1F5 -8044 C272 -8045 C273 -8046 F1F6 -8047 C274 -8048 C275 -8049 C276 -804A C1C4 -804B C1FB -804C D6B0 -804D F1F7 -804E C277 -804F C278 -8050 C279 -8051 C27A -8052 F1F8 -8053 C27B -8054 C1AA -8055 C27C -8056 C27D -8057 C27E -8058 C6B8 -8059 C280 -805A BEDB -805B C281 -805C C282 -805D C283 -805E C284 -805F C285 -8060 C286 -8061 C287 -8062 C288 -8063 C289 -8064 C28A -8065 C28B -8066 C28C -8067 C28D -8068 C28E -8069 F1F9 -806A B4CF -806B C28F -806C C290 -806D C291 -806E C292 -806F C293 -8070 C294 -8071 F1FA -8072 C295 -8073 C296 -8074 C297 -8075 C298 -8076 C299 -8077 C29A -8078 C29B -8079 C29C -807A C29D -807B C29E -807C C29F -807D C2A0 -807E C340 -807F EDB2 -8080 EDB1 -8081 C341 -8082 C342 -8083 CBE0 -8084 D2DE -8085 C343 -8086 CBC1 -8087 D5D8 -8088 C344 -8089 C8E2 -808A C345 -808B C0DF -808C BCA1 -808D C346 -808E C347 -808F C348 -8090 C349 -8091 C34A -8092 C34B -8093 EBC1 -8094 C34C -8095 C34D -8096 D0A4 -8097 C34E -8098 D6E2 -8099 C34F -809A B6C7 -809B B8D8 -809C EBC0 -809D B8CE -809E C350 -809F EBBF -80A0 B3A6 -80A1 B9C9 -80A2 D6AB -80A3 C351 -80A4 B7F4 -80A5 B7CA -80A6 C352 -80A7 C353 -80A8 C354 -80A9 BCE7 -80AA B7BE -80AB EBC6 -80AC C355 -80AD EBC7 -80AE B0B9 -80AF BFCF -80B0 C356 -80B1 EBC5 -80B2 D3FD -80B3 C357 -80B4 EBC8 -80B5 C358 -80B6 C359 -80B7 EBC9 -80B8 C35A -80B9 C35B -80BA B7CE -80BB C35C -80BC EBC2 -80BD EBC4 -80BE C9F6 -80BF D6D7 -80C0 D5CD -80C1 D0B2 -80C2 EBCF -80C3 CEB8 -80C4 EBD0 -80C5 C35D -80C6 B5A8 -80C7 C35E -80C8 C35F -80C9 C360 -80CA C361 -80CB C362 -80CC B1B3 -80CD EBD2 -80CE CCA5 -80CF C363 -80D0 C364 -80D1 C365 -80D2 C366 -80D3 C367 -80D4 C368 -80D5 C369 -80D6 C5D6 -80D7 EBD3 -80D8 C36A -80D9 EBD1 -80DA C5DF -80DB EBCE -80DC CAA4 -80DD EBD5 -80DE B0FB -80DF C36B -80E0 C36C -80E1 BAFA -80E2 C36D -80E3 C36E -80E4 D8B7 -80E5 F1E3 -80E6 C36F -80E7 EBCA -80E8 EBCB -80E9 EBCC -80EA EBCD -80EB EBD6 -80EC E6C0 -80ED EBD9 -80EE C370 -80EF BFE8 -80F0 D2C8 -80F1 EBD7 -80F2 EBDC -80F3 B8EC -80F4 EBD8 -80F5 C371 -80F6 BDBA -80F7 C372 -80F8 D0D8 -80F9 C373 -80FA B0B7 -80FB C374 -80FC EBDD -80FD C4DC -80FE C375 -80FF C376 -8100 C377 -8101 C378 -8102 D6AC -8103 C379 -8104 C37A -8105 C37B -8106 B4E0 -8107 C37C -8108 C37D -8109 C2F6 -810A BCB9 -810B C37E -810C C380 -810D EBDA -810E EBDB -810F D4E0 -8110 C6EA -8111 C4D4 -8112 EBDF -8113 C5A7 -8114 D9F5 -8115 C381 -8116 B2B1 -8117 C382 -8118 EBE4 -8119 C383 -811A BDC5 -811B C384 -811C C385 -811D C386 -811E EBE2 -811F C387 -8120 C388 -8121 C389 -8122 C38A -8123 C38B -8124 C38C -8125 C38D -8126 C38E -8127 C38F -8128 C390 -8129 C391 -812A C392 -812B C393 -812C EBE3 -812D C394 -812E C395 -812F B8AC -8130 C396 -8131 CDD1 -8132 EBE5 -8133 C397 -8134 C398 -8135 C399 -8136 EBE1 -8137 C39A -8138 C1B3 -8139 C39B -813A C39C -813B C39D -813C C39E -813D C39F -813E C6A2 -813F C3A0 -8140 C440 -8141 C441 -8142 C442 -8143 C443 -8144 C444 -8145 C445 -8146 CCF3 -8147 C446 -8148 EBE6 -8149 C447 -814A C0B0 -814B D2B8 -814C EBE7 -814D C448 -814E C449 -814F C44A -8150 B8AF -8151 B8AD -8152 C44B -8153 EBE8 -8154 C7BB -8155 CDF3 -8156 C44C -8157 C44D -8158 C44E -8159 EBEA -815A EBEB -815B C44F -815C C450 -815D C451 -815E C452 -815F C453 -8160 EBED -8161 C454 -8162 C455 -8163 C456 -8164 C457 -8165 D0C8 -8166 C458 -8167 EBF2 -8168 C459 -8169 EBEE -816A C45A -816B C45B -816C C45C -816D EBF1 -816E C8F9 -816F C45D -8170 D1FC -8171 EBEC -8172 C45E -8173 C45F -8174 EBE9 -8175 C460 -8176 C461 -8177 C462 -8178 C463 -8179 B8B9 -817A CFD9 -817B C4E5 -817C EBEF -817D EBF0 -817E CCDA -817F CDC8 -8180 B0F2 -8181 C464 -8182 EBF6 -8183 C465 -8184 C466 -8185 C467 -8186 C468 -8187 C469 -8188 EBF5 -8189 C46A -818A B2B2 -818B C46B -818C C46C -818D C46D -818E C46E -818F B8E0 -8190 C46F -8191 EBF7 -8192 C470 -8193 C471 -8194 C472 -8195 C473 -8196 C474 -8197 C475 -8198 B1EC -8199 C476 -819A C477 -819B CCC5 -819C C4A4 -819D CFA5 -819E C478 -819F C479 -81A0 C47A -81A1 C47B -81A2 C47C -81A3 EBF9 -81A4 C47D -81A5 C47E -81A6 ECA2 -81A7 C480 -81A8 C5F2 -81A9 C481 -81AA EBFA -81AB C482 -81AC C483 -81AD C484 -81AE C485 -81AF C486 -81B0 C487 -81B1 C488 -81B2 C489 -81B3 C9C5 -81B4 C48A -81B5 C48B -81B6 C48C -81B7 C48D -81B8 C48E -81B9 C48F -81BA E2DF -81BB EBFE -81BC C490 -81BD C491 -81BE C492 -81BF C493 -81C0 CDCE -81C1 ECA1 -81C2 B1DB -81C3 D3B7 -81C4 C494 -81C5 C495 -81C6 D2DC -81C7 C496 -81C8 C497 -81C9 C498 -81CA EBFD -81CB C499 -81CC EBFB -81CD C49A -81CE C49B -81CF C49C -81D0 C49D -81D1 C49E -81D2 C49F -81D3 C4A0 -81D4 C540 -81D5 C541 -81D6 C542 -81D7 C543 -81D8 C544 -81D9 C545 -81DA C546 -81DB C547 -81DC C548 -81DD C549 -81DE C54A -81DF C54B -81E0 C54C -81E1 C54D -81E2 C54E -81E3 B3BC -81E4 C54F -81E5 C550 -81E6 C551 -81E7 EAB0 -81E8 C552 -81E9 C553 -81EA D7D4 -81EB C554 -81EC F4AB -81ED B3F4 -81EE C555 -81EF C556 -81F0 C557 -81F1 C558 -81F2 C559 -81F3 D6C1 -81F4 D6C2 -81F5 C55A -81F6 C55B -81F7 C55C -81F8 C55D -81F9 C55E -81FA C55F -81FB D5E9 -81FC BECA -81FD C560 -81FE F4A7 -81FF C561 -8200 D2A8 -8201 F4A8 -8202 F4A9 -8203 C562 -8204 F4AA -8205 BECB -8206 D3DF -8207 C563 -8208 C564 -8209 C565 -820A C566 -820B C567 -820C C9E0 -820D C9E1 -820E C568 -820F C569 -8210 F3C2 -8211 C56A -8212 CAE6 -8213 C56B -8214 CCF2 -8215 C56C -8216 C56D -8217 C56E -8218 C56F -8219 C570 -821A C571 -821B E2B6 -821C CBB4 -821D C572 -821E CEE8 -821F D6DB -8220 C573 -8221 F4AD -8222 F4AE -8223 F4AF -8224 C574 -8225 C575 -8226 C576 -8227 C577 -8228 F4B2 -8229 C578 -822A BABD -822B F4B3 -822C B0E3 -822D F4B0 -822E C579 -822F F4B1 -8230 BDA2 -8231 B2D5 -8232 C57A -8233 F4B6 -8234 F4B7 -8235 B6E6 -8236 B2B0 -8237 CFCF -8238 F4B4 -8239 B4AC -823A C57B -823B F4B5 -823C C57C -823D C57D -823E F4B8 -823F C57E -8240 C580 -8241 C581 -8242 C582 -8243 C583 -8244 F4B9 -8245 C584 -8246 C585 -8247 CDA7 -8248 C586 -8249 F4BA -824A C587 -824B F4BB -824C C588 -824D C589 -824E C58A -824F F4BC -8250 C58B -8251 C58C -8252 C58D -8253 C58E -8254 C58F -8255 C590 -8256 C591 -8257 C592 -8258 CBD2 -8259 C593 -825A F4BD -825B C594 -825C C595 -825D C596 -825E C597 -825F F4BE -8260 C598 -8261 C599 -8262 C59A -8263 C59B -8264 C59C -8265 C59D -8266 C59E -8267 C59F -8268 F4BF -8269 C5A0 -826A C640 -826B C641 -826C C642 -826D C643 -826E F4DE -826F C1BC -8270 BCE8 -8271 C644 -8272 C9AB -8273 D1DE -8274 E5F5 -8275 C645 -8276 C646 -8277 C647 -8278 C648 -8279 DCB3 -827A D2D5 -827B C649 -827C C64A -827D DCB4 -827E B0AC -827F DCB5 -8280 C64B -8281 C64C -8282 BDDA -8283 C64D -8284 DCB9 -8285 C64E -8286 C64F -8287 C650 -8288 D8C2 -8289 C651 -828A DCB7 -828B D3F3 -828C C652 -828D C9D6 -828E DCBA -828F DCB6 -8290 C653 -8291 DCBB -8292 C3A2 -8293 C654 -8294 C655 -8295 C656 -8296 C657 -8297 DCBC -8298 DCC5 -8299 DCBD -829A C658 -829B C659 -829C CEDF -829D D6A5 -829E C65A -829F DCCF -82A0 C65B -82A1 DCCD -82A2 C65C -82A3 C65D -82A4 DCD2 -82A5 BDE6 -82A6 C2AB -82A7 C65E -82A8 DCB8 -82A9 DCCB -82AA DCCE -82AB DCBE -82AC B7D2 -82AD B0C5 -82AE DCC7 -82AF D0BE -82B0 DCC1 -82B1 BBA8 -82B2 C65F -82B3 B7BC -82B4 DCCC -82B5 C660 -82B6 C661 -82B7 DCC6 -82B8 DCBF -82B9 C7DB -82BA C662 -82BB C663 -82BC C664 -82BD D1BF -82BE DCC0 -82BF C665 -82C0 C666 -82C1 DCCA -82C2 C667 -82C3 C668 -82C4 DCD0 -82C5 C669 -82C6 C66A -82C7 CEAD -82C8 DCC2 -82C9 C66B -82CA DCC3 -82CB DCC8 -82CC DCC9 -82CD B2D4 -82CE DCD1 -82CF CBD5 -82D0 C66C -82D1 D4B7 -82D2 DCDB -82D3 DCDF -82D4 CCA6 -82D5 DCE6 -82D6 C66D -82D7 C3E7 -82D8 DCDC -82D9 C66E -82DA C66F -82DB BFC1 -82DC DCD9 -82DD C670 -82DE B0FA -82DF B9B6 -82E0 DCE5 -82E1 DCD3 -82E2 C671 -82E3 DCC4 -82E4 DCD6 -82E5 C8F4 -82E6 BFE0 -82E7 C672 -82E8 C673 -82E9 C674 -82EA C675 -82EB C9BB -82EC C676 -82ED C677 -82EE C678 -82EF B1BD -82F0 C679 -82F1 D3A2 -82F2 C67A -82F3 C67B -82F4 DCDA -82F5 C67C -82F6 C67D -82F7 DCD5 -82F8 C67E -82F9 C6BB -82FA C680 -82FB DCDE -82FC C681 -82FD C682 -82FE C683 -82FF C684 -8300 C685 -8301 D7C2 -8302 C3AF -8303 B7B6 -8304 C7D1 -8305 C3A9 -8306 DCE2 -8307 DCD8 -8308 DCEB -8309 DCD4 -830A C686 -830B C687 -830C DCDD -830D C688 -830E BEA5 -830F DCD7 -8310 C689 -8311 DCE0 -8312 C68A -8313 C68B -8314 DCE3 -8315 DCE4 -8316 C68C -8317 DCF8 -8318 C68D -8319 C68E -831A DCE1 -831B DDA2 -831C DCE7 -831D C68F -831E C690 -831F C691 -8320 C692 -8321 C693 -8322 C694 -8323 C695 -8324 C696 -8325 C697 -8326 C698 -8327 BCEB -8328 B4C4 -8329 C699 -832A C69A -832B C3A3 -832C B2E7 -832D DCFA -832E C69B -832F DCF2 -8330 C69C -8331 DCEF -8332 C69D -8333 DCFC -8334 DCEE -8335 D2F0 -8336 B2E8 -8337 C69E -8338 C8D7 -8339 C8E3 -833A DCFB -833B C69F -833C DCED -833D C6A0 -833E C740 -833F C741 -8340 DCF7 -8341 C742 -8342 C743 -8343 DCF5 -8344 C744 -8345 C745 -8346 BEA3 -8347 DCF4 -8348 C746 -8349 B2DD -834A C747 -834B C748 -834C C749 -834D C74A -834E C74B -834F DCF3 -8350 BCF6 -8351 DCE8 -8352 BBC4 -8353 C74C -8354 C0F3 -8355 C74D -8356 C74E -8357 C74F -8358 C750 -8359 C751 -835A BCD4 -835B DCE9 -835C DCEA -835D C752 -835E DCF1 -835F DCF6 -8360 DCF9 -8361 B5B4 -8362 C753 -8363 C8D9 -8364 BBE7 -8365 DCFE -8366 DCFD -8367 D3AB -8368 DDA1 -8369 DDA3 -836A DDA5 -836B D2F1 -836C DDA4 -836D DDA6 -836E DDA7 -836F D2A9 -8370 C754 -8371 C755 -8372 C756 -8373 C757 -8374 C758 -8375 C759 -8376 C75A -8377 BAC9 -8378 DDA9 -8379 C75B -837A C75C -837B DDB6 -837C DDB1 -837D DDB4 -837E C75D -837F C75E -8380 C75F -8381 C760 -8382 C761 -8383 C762 -8384 C763 -8385 DDB0 -8386 C6CE -8387 C764 -8388 C765 -8389 C0F2 -838A C766 -838B C767 -838C C768 -838D C769 -838E C9AF -838F C76A -8390 C76B -8391 C76C -8392 DCEC -8393 DDAE -8394 C76D -8395 C76E -8396 C76F -8397 C770 -8398 DDB7 -8399 C771 -839A C772 -839B DCF0 -839C DDAF -839D C773 -839E DDB8 -839F C774 -83A0 DDAC -83A1 C775 -83A2 C776 -83A3 C777 -83A4 C778 -83A5 C779 -83A6 C77A -83A7 C77B -83A8 DDB9 -83A9 DDB3 -83AA DDAD -83AB C4AA -83AC C77C -83AD C77D -83AE C77E -83AF C780 -83B0 DDA8 -83B1 C0B3 -83B2 C1AB -83B3 DDAA -83B4 DDAB -83B5 C781 -83B6 DDB2 -83B7 BBF1 -83B8 DDB5 -83B9 D3A8 -83BA DDBA -83BB C782 -83BC DDBB -83BD C3A7 -83BE C783 -83BF C784 -83C0 DDD2 -83C1 DDBC -83C2 C785 -83C3 C786 -83C4 C787 -83C5 DDD1 -83C6 C788 -83C7 B9BD -83C8 C789 -83C9 C78A -83CA BED5 -83CB C78B -83CC BEFA -83CD C78C -83CE C78D -83CF BACA -83D0 C78E -83D1 C78F -83D2 C790 -83D3 C791 -83D4 DDCA -83D5 C792 -83D6 DDC5 -83D7 C793 -83D8 DDBF -83D9 C794 -83DA C795 -83DB C796 -83DC B2CB -83DD DDC3 -83DE C797 -83DF DDCB -83E0 B2A4 -83E1 DDD5 -83E2 C798 -83E3 C799 -83E4 C79A -83E5 DDBE -83E6 C79B -83E7 C79C -83E8 C79D -83E9 C6D0 -83EA DDD0 -83EB C79E -83EC C79F -83ED C7A0 -83EE C840 -83EF C841 -83F0 DDD4 -83F1 C1E2 -83F2 B7C6 -83F3 C842 -83F4 C843 -83F5 C844 -83F6 C845 -83F7 C846 -83F8 DDCE -83F9 DDCF -83FA C847 -83FB C848 -83FC C849 -83FD DDC4 -83FE C84A -83FF C84B -8400 C84C -8401 DDBD -8402 C84D -8403 DDCD -8404 CCD1 -8405 C84E -8406 DDC9 -8407 C84F -8408 C850 -8409 C851 -840A C852 -840B DDC2 -840C C3C8 -840D C6BC -840E CEAE -840F DDCC -8410 C853 -8411 DDC8 -8412 C854 -8413 C855 -8414 C856 -8415 C857 -8416 C858 -8417 C859 -8418 DDC1 -8419 C85A -841A C85B -841B C85C -841C DDC6 -841D C2DC -841E C85D -841F C85E -8420 C85F -8421 C860 -8422 C861 -8423 C862 -8424 D3A9 -8425 D3AA -8426 DDD3 -8427 CFF4 -8428 C8F8 -8429 C863 -842A C864 -842B C865 -842C C866 -842D C867 -842E C868 -842F C869 -8430 C86A -8431 DDE6 -8432 C86B -8433 C86C -8434 C86D -8435 C86E -8436 C86F -8437 C870 -8438 DDC7 -8439 C871 -843A C872 -843B C873 -843C DDE0 -843D C2E4 -843E C874 -843F C875 -8440 C876 -8441 C877 -8442 C878 -8443 C879 -8444 C87A -8445 C87B -8446 DDE1 -8447 C87C -8448 C87D -8449 C87E -844A C880 -844B C881 -844C C882 -844D C883 -844E C884 -844F C885 -8450 C886 -8451 DDD7 -8452 C887 -8453 C888 -8454 C889 -8455 C88A -8456 C88B -8457 D6F8 -8458 C88C -8459 DDD9 -845A DDD8 -845B B8F0 -845C DDD6 -845D C88D -845E C88E -845F C88F -8460 C890 -8461 C6CF -8462 C891 -8463 B6AD -8464 C892 -8465 C893 -8466 C894 -8467 C895 -8468 C896 -8469 DDE2 -846A C897 -846B BAF9 -846C D4E1 -846D DDE7 -846E C898 -846F C899 -8470 C89A -8471 B4D0 -8472 C89B -8473 DDDA -8474 C89C -8475 BFFB -8476 DDE3 -8477 C89D -8478 DDDF -8479 C89E -847A DDDD -847B C89F -847C C8A0 -847D C940 -847E C941 -847F C942 -8480 C943 -8481 C944 -8482 B5D9 -8483 C945 -8484 C946 -8485 C947 -8486 C948 -8487 DDDB -8488 DDDC -8489 DDDE -848A C949 -848B BDAF -848C DDE4 -848D C94A -848E DDE5 -848F C94B -8490 C94C -8491 C94D -8492 C94E -8493 C94F -8494 C950 -8495 C951 -8496 C952 -8497 DDF5 -8498 C953 -8499 C3C9 -849A C954 -849B C955 -849C CBE2 -849D C956 -849E C957 -849F C958 -84A0 C959 -84A1 DDF2 -84A2 C95A -84A3 C95B -84A4 C95C -84A5 C95D -84A6 C95E -84A7 C95F -84A8 C960 -84A9 C961 -84AA C962 -84AB C963 -84AC C964 -84AD C965 -84AE C966 -84AF D8E1 -84B0 C967 -84B1 C968 -84B2 C6D1 -84B3 C969 -84B4 DDF4 -84B5 C96A -84B6 C96B -84B7 C96C -84B8 D5F4 -84B9 DDF3 -84BA DDF0 -84BB C96D -84BC C96E -84BD DDEC -84BE C96F -84BF DDEF -84C0 C970 -84C1 DDE8 -84C2 C971 -84C3 C972 -84C4 D0EE -84C5 C973 -84C6 C974 -84C7 C975 -84C8 C976 -84C9 C8D8 -84CA DDEE -84CB C977 -84CC C978 -84CD DDE9 -84CE C979 -84CF C97A -84D0 DDEA -84D1 CBF2 -84D2 C97B -84D3 DDED -84D4 C97C -84D5 C97D -84D6 B1CD -84D7 C97E -84D8 C980 -84D9 C981 -84DA C982 -84DB C983 -84DC C984 -84DD C0B6 -84DE C985 -84DF BCBB -84E0 DDF1 -84E1 C986 -84E2 C987 -84E3 DDF7 -84E4 C988 -84E5 DDF6 -84E6 DDEB -84E7 C989 -84E8 C98A -84E9 C98B -84EA C98C -84EB C98D -84EC C5EE -84ED C98E -84EE C98F -84EF C990 -84F0 DDFB -84F1 C991 -84F2 C992 -84F3 C993 -84F4 C994 -84F5 C995 -84F6 C996 -84F7 C997 -84F8 C998 -84F9 C999 -84FA C99A -84FB C99B -84FC DEA4 -84FD C99C -84FE C99D -84FF DEA3 -8500 C99E -8501 C99F -8502 C9A0 -8503 CA40 -8504 CA41 -8505 CA42 -8506 CA43 -8507 CA44 -8508 CA45 -8509 CA46 -850A CA47 -850B CA48 -850C DDF8 -850D CA49 -850E CA4A -850F CA4B -8510 CA4C -8511 C3EF -8512 CA4D -8513 C2FB -8514 CA4E -8515 CA4F -8516 CA50 -8517 D5E1 -8518 CA51 -8519 CA52 -851A CEB5 -851B CA53 -851C CA54 -851D CA55 -851E CA56 -851F DDFD -8520 CA57 -8521 B2CC -8522 CA58 -8523 CA59 -8524 CA5A -8525 CA5B -8526 CA5C -8527 CA5D -8528 CA5E -8529 CA5F -852A CA60 -852B C4E8 -852C CADF -852D CA61 -852E CA62 -852F CA63 -8530 CA64 -8531 CA65 -8532 CA66 -8533 CA67 -8534 CA68 -8535 CA69 -8536 CA6A -8537 C7BE -8538 DDFA -8539 DDFC -853A DDFE -853B DEA2 -853C B0AA -853D B1CE -853E CA6B -853F CA6C -8540 CA6D -8541 CA6E -8542 CA6F -8543 DEAC -8544 CA70 -8545 CA71 -8546 CA72 -8547 CA73 -8548 DEA6 -8549 BDB6 -854A C8EF -854B CA74 -854C CA75 -854D CA76 -854E CA77 -854F CA78 -8550 CA79 -8551 CA7A -8552 CA7B -8553 CA7C -8554 CA7D -8555 CA7E -8556 DEA1 -8557 CA80 -8558 CA81 -8559 DEA5 -855A CA82 -855B CA83 -855C CA84 -855D CA85 -855E DEA9 -855F CA86 -8560 CA87 -8561 CA88 -8562 CA89 -8563 CA8A -8564 DEA8 -8565 CA8B -8566 CA8C -8567 CA8D -8568 DEA7 -8569 CA8E -856A CA8F -856B CA90 -856C CA91 -856D CA92 -856E CA93 -856F CA94 -8570 CA95 -8571 CA96 -8572 DEAD -8573 CA97 -8574 D4CC -8575 CA98 -8576 CA99 -8577 CA9A -8578 CA9B -8579 DEB3 -857A DEAA -857B DEAE -857C CA9C -857D CA9D -857E C0D9 -857F CA9E -8580 CA9F -8581 CAA0 -8582 CB40 -8583 CB41 -8584 B1A1 -8585 DEB6 -8586 CB42 -8587 DEB1 -8588 CB43 -8589 CB44 -858A CB45 -858B CB46 -858C CB47 -858D CB48 -858E CB49 -858F DEB2 -8590 CB4A -8591 CB4B -8592 CB4C -8593 CB4D -8594 CB4E -8595 CB4F -8596 CB50 -8597 CB51 -8598 CB52 -8599 CB53 -859A CB54 -859B D1A6 -859C DEB5 -859D CB55 -859E CB56 -859F CB57 -85A0 CB58 -85A1 CB59 -85A2 CB5A -85A3 CB5B -85A4 DEAF -85A5 CB5C -85A6 CB5D -85A7 CB5E -85A8 DEB0 -85A9 CB5F -85AA D0BD -85AB CB60 -85AC CB61 -85AD CB62 -85AE DEB4 -85AF CAED -85B0 DEB9 -85B1 CB63 -85B2 CB64 -85B3 CB65 -85B4 CB66 -85B5 CB67 -85B6 CB68 -85B7 DEB8 -85B8 CB69 -85B9 DEB7 -85BA CB6A -85BB CB6B -85BC CB6C -85BD CB6D -85BE CB6E -85BF CB6F -85C0 CB70 -85C1 DEBB -85C2 CB71 -85C3 CB72 -85C4 CB73 -85C5 CB74 -85C6 CB75 -85C7 CB76 -85C8 CB77 -85C9 BDE5 -85CA CB78 -85CB CB79 -85CC CB7A -85CD CB7B -85CE CB7C -85CF B2D8 -85D0 C3EA -85D1 CB7D -85D2 CB7E -85D3 DEBA -85D4 CB80 -85D5 C5BA -85D6 CB81 -85D7 CB82 -85D8 CB83 -85D9 CB84 -85DA CB85 -85DB CB86 -85DC DEBC -85DD CB87 -85DE CB88 -85DF CB89 -85E0 CB8A -85E1 CB8B -85E2 CB8C -85E3 CB8D -85E4 CCD9 -85E5 CB8E -85E6 CB8F -85E7 CB90 -85E8 CB91 -85E9 B7AA -85EA CB92 -85EB CB93 -85EC CB94 -85ED CB95 -85EE CB96 -85EF CB97 -85F0 CB98 -85F1 CB99 -85F2 CB9A -85F3 CB9B -85F4 CB9C -85F5 CB9D -85F6 CB9E -85F7 CB9F -85F8 CBA0 -85F9 CC40 -85FA CC41 -85FB D4E5 -85FC CC42 -85FD CC43 -85FE CC44 -85FF DEBD -8600 CC45 -8601 CC46 -8602 CC47 -8603 CC48 -8604 CC49 -8605 DEBF -8606 CC4A -8607 CC4B -8608 CC4C -8609 CC4D -860A CC4E -860B CC4F -860C CC50 -860D CC51 -860E CC52 -860F CC53 -8610 CC54 -8611 C4A2 -8612 CC55 -8613 CC56 -8614 CC57 -8615 CC58 -8616 DEC1 -8617 CC59 -8618 CC5A -8619 CC5B -861A CC5C -861B CC5D -861C CC5E -861D CC5F -861E CC60 -861F CC61 -8620 CC62 -8621 CC63 -8622 CC64 -8623 CC65 -8624 CC66 -8625 CC67 -8626 CC68 -8627 DEBE -8628 CC69 -8629 DEC0 -862A CC6A -862B CC6B -862C CC6C -862D CC6D -862E CC6E -862F CC6F -8630 CC70 -8631 CC71 -8632 CC72 -8633 CC73 -8634 CC74 -8635 CC75 -8636 CC76 -8637 CC77 -8638 D5BA -8639 CC78 -863A CC79 -863B CC7A -863C DEC2 -863D CC7B -863E CC7C -863F CC7D -8640 CC7E -8641 CC80 -8642 CC81 -8643 CC82 -8644 CC83 -8645 CC84 -8646 CC85 -8647 CC86 -8648 CC87 -8649 CC88 -864A CC89 -864B CC8A -864C CC8B -864D F2AE -864E BBA2 -864F C2B2 -8650 C5B0 -8651 C2C7 -8652 CC8C -8653 CC8D -8654 F2AF -8655 CC8E -8656 CC8F -8657 CC90 -8658 CC91 -8659 CC92 -865A D0E9 -865B CC93 -865C CC94 -865D CC95 -865E D3DD -865F CC96 -8660 CC97 -8661 CC98 -8662 EBBD -8663 CC99 -8664 CC9A -8665 CC9B -8666 CC9C -8667 CC9D -8668 CC9E -8669 CC9F -866A CCA0 -866B B3E6 -866C F2B0 -866D CD40 -866E F2B1 -866F CD41 -8670 CD42 -8671 CAAD -8672 CD43 -8673 CD44 -8674 CD45 -8675 CD46 -8676 CD47 -8677 CD48 -8678 CD49 -8679 BAE7 -867A F2B3 -867B F2B5 -867C F2B4 -867D CBE4 -867E CFBA -867F F2B2 -8680 CAB4 -8681 D2CF -8682 C2EC -8683 CD4A -8684 CD4B -8685 CD4C -8686 CD4D -8687 CD4E -8688 CD4F -8689 CD50 -868A CEC3 -868B F2B8 -868C B0F6 -868D F2B7 -868E CD51 -868F CD52 -8690 CD53 -8691 CD54 -8692 CD55 -8693 F2BE -8694 CD56 -8695 B2CF -8696 CD57 -8697 CD58 -8698 CD59 -8699 CD5A -869A CD5B -869B CD5C -869C D1C1 -869D F2BA -869E CD5D -869F CD5E -86A0 CD5F -86A1 CD60 -86A2 CD61 -86A3 F2BC -86A4 D4E9 -86A5 CD62 -86A6 CD63 -86A7 F2BB -86A8 F2B6 -86A9 F2BF -86AA F2BD -86AB CD64 -86AC F2B9 -86AD CD65 -86AE CD66 -86AF F2C7 -86B0 F2C4 -86B1 F2C6 -86B2 CD67 -86B3 CD68 -86B4 F2CA -86B5 F2C2 -86B6 F2C0 -86B7 CD69 -86B8 CD6A -86B9 CD6B -86BA F2C5 -86BB CD6C -86BC CD6D -86BD CD6E -86BE CD6F -86BF CD70 -86C0 D6FB -86C1 CD71 -86C2 CD72 -86C3 CD73 -86C4 F2C1 -86C5 CD74 -86C6 C7F9 -86C7 C9DF -86C8 CD75 -86C9 F2C8 -86CA B9C6 -86CB B5B0 -86CC CD76 -86CD CD77 -86CE F2C3 -86CF F2C9 -86D0 F2D0 -86D1 F2D6 -86D2 CD78 -86D3 CD79 -86D4 BBD7 -86D5 CD7A -86D6 CD7B -86D7 CD7C -86D8 F2D5 -86D9 CDDC -86DA CD7D -86DB D6EB -86DC CD7E -86DD CD80 -86DE F2D2 -86DF F2D4 -86E0 CD81 -86E1 CD82 -86E2 CD83 -86E3 CD84 -86E4 B8F2 -86E5 CD85 -86E6 CD86 -86E7 CD87 -86E8 CD88 -86E9 F2CB -86EA CD89 -86EB CD8A -86EC CD8B -86ED F2CE -86EE C2F9 -86EF CD8C -86F0 D5DD -86F1 F2CC -86F2 F2CD -86F3 F2CF -86F4 F2D3 -86F5 CD8D -86F6 CD8E -86F7 CD8F -86F8 F2D9 -86F9 D3BC -86FA CD90 -86FB CD91 -86FC CD92 -86FD CD93 -86FE B6EA -86FF CD94 -8700 CAF1 -8701 CD95 -8702 B7E4 -8703 F2D7 -8704 CD96 -8705 CD97 -8706 CD98 -8707 F2D8 -8708 F2DA -8709 F2DD -870A F2DB -870B CD99 -870C CD9A -870D F2DC -870E CD9B -870F CD9C -8710 CD9D -8711 CD9E -8712 D1D1 -8713 F2D1 -8714 CD9F -8715 CDC9 -8716 CDA0 -8717 CECF -8718 D6A9 -8719 CE40 -871A F2E3 -871B CE41 -871C C3DB -871D CE42 -871E F2E0 -871F CE43 -8720 CE44 -8721 C0AF -8722 F2EC -8723 F2DE -8724 CE45 -8725 F2E1 -8726 CE46 -8727 CE47 -8728 CE48 -8729 F2E8 -872A CE49 -872B CE4A -872C CE4B -872D CE4C -872E F2E2 -872F CE4D -8730 CE4E -8731 F2E7 -8732 CE4F -8733 CE50 -8734 F2E6 -8735 CE51 -8736 CE52 -8737 F2E9 -8738 CE53 -8739 CE54 -873A CE55 -873B F2DF -873C CE56 -873D CE57 -873E F2E4 -873F F2EA -8740 CE58 -8741 CE59 -8742 CE5A -8743 CE5B -8744 CE5C -8745 CE5D -8746 CE5E -8747 D3AC -8748 F2E5 -8749 B2F5 -874A CE5F -874B CE60 -874C F2F2 -874D CE61 -874E D0AB -874F CE62 -8750 CE63 -8751 CE64 -8752 CE65 -8753 F2F5 -8754 CE66 -8755 CE67 -8756 CE68 -8757 BBC8 -8758 CE69 -8759 F2F9 -875A CE6A -875B CE6B -875C CE6C -875D CE6D -875E CE6E -875F CE6F -8760 F2F0 -8761 CE70 -8762 CE71 -8763 F2F6 -8764 F2F8 -8765 F2FA -8766 CE72 -8767 CE73 -8768 CE74 -8769 CE75 -876A CE76 -876B CE77 -876C CE78 -876D CE79 -876E F2F3 -876F CE7A -8770 F2F1 -8771 CE7B -8772 CE7C -8773 CE7D -8774 BAFB -8775 CE7E -8776 B5FB -8777 CE80 -8778 CE81 -8779 CE82 -877A CE83 -877B F2EF -877C F2F7 -877D F2ED -877E F2EE -877F CE84 -8780 CE85 -8781 CE86 -8782 F2EB -8783 F3A6 -8784 CE87 -8785 F3A3 -8786 CE88 -8787 CE89 -8788 F3A2 -8789 CE8A -878A CE8B -878B F2F4 -878C CE8C -878D C8DA -878E CE8D -878F CE8E -8790 CE8F -8791 CE90 -8792 CE91 -8793 F2FB -8794 CE92 -8795 CE93 -8796 CE94 -8797 F3A5 -8798 CE95 -8799 CE96 -879A CE97 -879B CE98 -879C CE99 -879D CE9A -879E CE9B -879F C3F8 -87A0 CE9C -87A1 CE9D -87A2 CE9E -87A3 CE9F -87A4 CEA0 -87A5 CF40 -87A6 CF41 -87A7 CF42 -87A8 F2FD -87A9 CF43 -87AA CF44 -87AB F3A7 -87AC F3A9 -87AD F3A4 -87AE CF45 -87AF F2FC -87B0 CF46 -87B1 CF47 -87B2 CF48 -87B3 F3AB -87B4 CF49 -87B5 F3AA -87B6 CF4A -87B7 CF4B -87B8 CF4C -87B9 CF4D -87BA C2DD -87BB CF4E -87BC CF4F -87BD F3AE -87BE CF50 -87BF CF51 -87C0 F3B0 -87C1 CF52 -87C2 CF53 -87C3 CF54 -87C4 CF55 -87C5 CF56 -87C6 F3A1 -87C7 CF57 -87C8 CF58 -87C9 CF59 -87CA F3B1 -87CB F3AC -87CC CF5A -87CD CF5B -87CE CF5C -87CF CF5D -87D0 CF5E -87D1 F3AF -87D2 F2FE -87D3 F3AD -87D4 CF5F -87D5 CF60 -87D6 CF61 -87D7 CF62 -87D8 CF63 -87D9 CF64 -87DA CF65 -87DB F3B2 -87DC CF66 -87DD CF67 -87DE CF68 -87DF CF69 -87E0 F3B4 -87E1 CF6A -87E2 CF6B -87E3 CF6C -87E4 CF6D -87E5 F3A8 -87E6 CF6E -87E7 CF6F -87E8 CF70 -87E9 CF71 -87EA F3B3 -87EB CF72 -87EC CF73 -87ED CF74 -87EE F3B5 -87EF CF75 -87F0 CF76 -87F1 CF77 -87F2 CF78 -87F3 CF79 -87F4 CF7A -87F5 CF7B -87F6 CF7C -87F7 CF7D -87F8 CF7E -87F9 D0B7 -87FA CF80 -87FB CF81 -87FC CF82 -87FD CF83 -87FE F3B8 -87FF CF84 -8800 CF85 -8801 CF86 -8802 CF87 -8803 D9F9 -8804 CF88 -8805 CF89 -8806 CF8A -8807 CF8B -8808 CF8C -8809 CF8D -880A F3B9 -880B CF8E -880C CF8F -880D CF90 -880E CF91 -880F CF92 -8810 CF93 -8811 CF94 -8812 CF95 -8813 F3B7 -8814 CF96 -8815 C8E4 -8816 F3B6 -8817 CF97 -8818 CF98 -8819 CF99 -881A CF9A -881B F3BA -881C CF9B -881D CF9C -881E CF9D -881F CF9E -8820 CF9F -8821 F3BB -8822 B4C0 -8823 CFA0 -8824 D040 -8825 D041 -8826 D042 -8827 D043 -8828 D044 -8829 D045 -882A D046 -882B D047 -882C D048 -882D D049 -882E D04A -882F D04B -8830 D04C -8831 D04D -8832 EEC3 -8833 D04E -8834 D04F -8835 D050 -8836 D051 -8837 D052 -8838 D053 -8839 F3BC -883A D054 -883B D055 -883C F3BD -883D D056 -883E D057 -883F D058 -8840 D1AA -8841 D059 -8842 D05A -8843 D05B -8844 F4AC -8845 D0C6 -8846 D05C -8847 D05D -8848 D05E -8849 D05F -884A D060 -884B D061 -884C D0D0 -884D D1DC -884E D062 -884F D063 -8850 D064 -8851 D065 -8852 D066 -8853 D067 -8854 CFCE -8855 D068 -8856 D069 -8857 BDD6 -8858 D06A -8859 D1C3 -885A D06B -885B D06C -885C D06D -885D D06E -885E D06F -885F D070 -8860 D071 -8861 BAE2 -8862 E1E9 -8863 D2C2 -8864 F1C2 -8865 B2B9 -8866 D072 -8867 D073 -8868 B1ED -8869 F1C3 -886A D074 -886B C9C0 -886C B3C4 -886D D075 -886E D9F2 -886F D076 -8870 CBA5 -8871 D077 -8872 F1C4 -8873 D078 -8874 D079 -8875 D07A -8876 D07B -8877 D6D4 -8878 D07C -8879 D07D -887A D07E -887B D080 -887C D081 -887D F1C5 -887E F4C0 -887F F1C6 -8880 D082 -8881 D4AC -8882 F1C7 -8883 D083 -8884 B0C0 -8885 F4C1 -8886 D084 -8887 D085 -8888 F4C2 -8889 D086 -888A D087 -888B B4FC -888C D088 -888D C5DB -888E D089 -888F D08A -8890 D08B -8891 D08C -8892 CCBB -8893 D08D -8894 D08E -8895 D08F -8896 D0E4 -8897 D090 -8898 D091 -8899 D092 -889A D093 -889B D094 -889C CDE0 -889D D095 -889E D096 -889F D097 -88A0 D098 -88A1 D099 -88A2 F1C8 -88A3 D09A -88A4 D9F3 -88A5 D09B -88A6 D09C -88A7 D09D -88A8 D09E -88A9 D09F -88AA D0A0 -88AB B1BB -88AC D140 -88AD CFAE -88AE D141 -88AF D142 -88B0 D143 -88B1 B8A4 -88B2 D144 -88B3 D145 -88B4 D146 -88B5 D147 -88B6 D148 -88B7 F1CA -88B8 D149 -88B9 D14A -88BA D14B -88BB D14C -88BC F1CB -88BD D14D -88BE D14E -88BF D14F -88C0 D150 -88C1 B2C3 -88C2 C1D1 -88C3 D151 -88C4 D152 -88C5 D7B0 -88C6 F1C9 -88C7 D153 -88C8 D154 -88C9 F1CC -88CA D155 -88CB D156 -88CC D157 -88CD D158 -88CE F1CE -88CF D159 -88D0 D15A -88D1 D15B -88D2 D9F6 -88D3 D15C -88D4 D2E1 -88D5 D4A3 -88D6 D15D -88D7 D15E -88D8 F4C3 -88D9 C8B9 -88DA D15F -88DB D160 -88DC D161 -88DD D162 -88DE D163 -88DF F4C4 -88E0 D164 -88E1 D165 -88E2 F1CD -88E3 F1CF -88E4 BFE3 -88E5 F1D0 -88E6 D166 -88E7 D167 -88E8 F1D4 -88E9 D168 -88EA D169 -88EB D16A -88EC D16B -88ED D16C -88EE D16D -88EF D16E -88F0 F1D6 -88F1 F1D1 -88F2 D16F -88F3 C9D1 -88F4 C5E1 -88F5 D170 -88F6 D171 -88F7 D172 -88F8 C2E3 -88F9 B9FC -88FA D173 -88FB D174 -88FC F1D3 -88FD D175 -88FE F1D5 -88FF D176 -8900 D177 -8901 D178 -8902 B9D3 -8903 D179 -8904 D17A -8905 D17B -8906 D17C -8907 D17D -8908 D17E -8909 D180 -890A F1DB -890B D181 -890C D182 -890D D183 -890E D184 -890F D185 -8910 BAD6 -8911 D186 -8912 B0FD -8913 F1D9 -8914 D187 -8915 D188 -8916 D189 -8917 D18A -8918 D18B -8919 F1D8 -891A F1D2 -891B F1DA -891C D18C -891D D18D -891E D18E -891F D18F -8920 D190 -8921 F1D7 -8922 D191 -8923 D192 -8924 D193 -8925 C8EC -8926 D194 -8927 D195 -8928 D196 -8929 D197 -892A CDCA -892B F1DD -892C D198 -892D D199 -892E D19A -892F D19B -8930 E5BD -8931 D19C -8932 D19D -8933 D19E -8934 F1DC -8935 D19F -8936 F1DE -8937 D1A0 -8938 D240 -8939 D241 -893A D242 -893B D243 -893C D244 -893D D245 -893E D246 -893F D247 -8940 D248 -8941 F1DF -8942 D249 -8943 D24A -8944 CFE5 -8945 D24B -8946 D24C -8947 D24D -8948 D24E -8949 D24F -894A D250 -894B D251 -894C D252 -894D D253 -894E D254 -894F D255 -8950 D256 -8951 D257 -8952 D258 -8953 D259 -8954 D25A -8955 D25B -8956 D25C -8957 D25D -8958 D25E -8959 D25F -895A D260 -895B D261 -895C D262 -895D D263 -895E F4C5 -895F BDF3 -8960 D264 -8961 D265 -8962 D266 -8963 D267 -8964 D268 -8965 D269 -8966 F1E0 -8967 D26A -8968 D26B -8969 D26C -896A D26D -896B D26E -896C D26F -896D D270 -896E D271 -896F D272 -8970 D273 -8971 D274 -8972 D275 -8973 D276 -8974 D277 -8975 D278 -8976 D279 -8977 D27A -8978 D27B -8979 D27C -897A D27D -897B F1E1 -897C D27E -897D D280 -897E D281 -897F CEF7 -8980 D282 -8981 D2AA -8982 D283 -8983 F1FB -8984 D284 -8985 D285 -8986 B8B2 -8987 D286 -8988 D287 -8989 D288 -898A D289 -898B D28A -898C D28B -898D D28C -898E D28D -898F D28E -8990 D28F -8991 D290 -8992 D291 -8993 D292 -8994 D293 -8995 D294 -8996 D295 -8997 D296 -8998 D297 -8999 D298 -899A D299 -899B D29A -899C D29B -899D D29C -899E D29D -899F D29E -89A0 D29F -89A1 D2A0 -89A2 D340 -89A3 D341 -89A4 D342 -89A5 D343 -89A6 D344 -89A7 D345 -89A8 D346 -89A9 D347 -89AA D348 -89AB D349 -89AC D34A -89AD D34B -89AE D34C -89AF D34D -89B0 D34E -89B1 D34F -89B2 D350 -89B3 D351 -89B4 D352 -89B5 D353 -89B6 D354 -89B7 D355 -89B8 D356 -89B9 D357 -89BA D358 -89BB D359 -89BC D35A -89BD D35B -89BE D35C -89BF D35D -89C0 D35E -89C1 BCFB -89C2 B9DB -89C3 D35F -89C4 B9E6 -89C5 C3D9 -89C6 CAD3 -89C7 EAE8 -89C8 C0C0 -89C9 BEF5 -89CA EAE9 -89CB EAEA -89CC EAEB -89CD D360 -89CE EAEC -89CF EAED -89D0 EAEE -89D1 EAEF -89D2 BDC7 -89D3 D361 -89D4 D362 -89D5 D363 -89D6 F5FB -89D7 D364 -89D8 D365 -89D9 D366 -89DA F5FD -89DB D367 -89DC F5FE -89DD D368 -89DE F5FC -89DF D369 -89E0 D36A -89E1 D36B -89E2 D36C -89E3 BDE2 -89E4 D36D -89E5 F6A1 -89E6 B4A5 -89E7 D36E -89E8 D36F -89E9 D370 -89EA D371 -89EB F6A2 -89EC D372 -89ED D373 -89EE D374 -89EF F6A3 -89F0 D375 -89F1 D376 -89F2 D377 -89F3 ECB2 -89F4 D378 -89F5 D379 -89F6 D37A -89F7 D37B -89F8 D37C -89F9 D37D -89FA D37E -89FB D380 -89FC D381 -89FD D382 -89FE D383 -89FF D384 -8A00 D1D4 -8A01 D385 -8A02 D386 -8A03 D387 -8A04 D388 -8A05 D389 -8A06 D38A -8A07 D9EA -8A08 D38B -8A09 D38C -8A0A D38D -8A0B D38E -8A0C D38F -8A0D D390 -8A0E D391 -8A0F D392 -8A10 D393 -8A11 D394 -8A12 D395 -8A13 D396 -8A14 D397 -8A15 D398 -8A16 D399 -8A17 D39A -8A18 D39B -8A19 D39C -8A1A D39D -8A1B D39E -8A1C D39F -8A1D D3A0 -8A1E D440 -8A1F D441 -8A20 D442 -8A21 D443 -8A22 D444 -8A23 D445 -8A24 D446 -8A25 D447 -8A26 D448 -8A27 D449 -8A28 D44A -8A29 D44B -8A2A D44C -8A2B D44D -8A2C D44E -8A2D D44F -8A2E D450 -8A2F D451 -8A30 D452 -8A31 D453 -8A32 D454 -8A33 D455 -8A34 D456 -8A35 D457 -8A36 D458 -8A37 D459 -8A38 D45A -8A39 D45B -8A3A D45C -8A3B D45D -8A3C D45E -8A3D D45F -8A3E F6A4 -8A3F D460 -8A40 D461 -8A41 D462 -8A42 D463 -8A43 D464 -8A44 D465 -8A45 D466 -8A46 D467 -8A47 D468 -8A48 EEBA -8A49 D469 -8A4A D46A -8A4B D46B -8A4C D46C -8A4D D46D -8A4E D46E -8A4F D46F -8A50 D470 -8A51 D471 -8A52 D472 -8A53 D473 -8A54 D474 -8A55 D475 -8A56 D476 -8A57 D477 -8A58 D478 -8A59 D479 -8A5A D47A -8A5B D47B -8A5C D47C -8A5D D47D -8A5E D47E -8A5F D480 -8A60 D481 -8A61 D482 -8A62 D483 -8A63 D484 -8A64 D485 -8A65 D486 -8A66 D487 -8A67 D488 -8A68 D489 -8A69 D48A -8A6A D48B -8A6B D48C -8A6C D48D -8A6D D48E -8A6E D48F -8A6F D490 -8A70 D491 -8A71 D492 -8A72 D493 -8A73 D494 -8A74 D495 -8A75 D496 -8A76 D497 -8A77 D498 -8A78 D499 -8A79 D5B2 -8A7A D49A -8A7B D49B -8A7C D49C -8A7D D49D -8A7E D49E -8A7F D49F -8A80 D4A0 -8A81 D540 -8A82 D541 -8A83 D542 -8A84 D543 -8A85 D544 -8A86 D545 -8A87 D546 -8A88 D547 -8A89 D3FE -8A8A CCDC -8A8B D548 -8A8C D549 -8A8D D54A -8A8E D54B -8A8F D54C -8A90 D54D -8A91 D54E -8A92 D54F -8A93 CAC4 -8A94 D550 -8A95 D551 -8A96 D552 -8A97 D553 -8A98 D554 -8A99 D555 -8A9A D556 -8A9B D557 -8A9C D558 -8A9D D559 -8A9E D55A -8A9F D55B -8AA0 D55C -8AA1 D55D -8AA2 D55E -8AA3 D55F -8AA4 D560 -8AA5 D561 -8AA6 D562 -8AA7 D563 -8AA8 D564 -8AA9 D565 -8AAA D566 -8AAB D567 -8AAC D568 -8AAD D569 -8AAE D56A -8AAF D56B -8AB0 D56C -8AB1 D56D -8AB2 D56E -8AB3 D56F -8AB4 D570 -8AB5 D571 -8AB6 D572 -8AB7 D573 -8AB8 D574 -8AB9 D575 -8ABA D576 -8ABB D577 -8ABC D578 -8ABD D579 -8ABE D57A -8ABF D57B -8AC0 D57C -8AC1 D57D -8AC2 D57E -8AC3 D580 -8AC4 D581 -8AC5 D582 -8AC6 D583 -8AC7 D584 -8AC8 D585 -8AC9 D586 -8ACA D587 -8ACB D588 -8ACC D589 -8ACD D58A -8ACE D58B -8ACF D58C -8AD0 D58D -8AD1 D58E -8AD2 D58F -8AD3 D590 -8AD4 D591 -8AD5 D592 -8AD6 D593 -8AD7 D594 -8AD8 D595 -8AD9 D596 -8ADA D597 -8ADB D598 -8ADC D599 -8ADD D59A -8ADE D59B -8ADF D59C -8AE0 D59D -8AE1 D59E -8AE2 D59F -8AE3 D5A0 -8AE4 D640 -8AE5 D641 -8AE6 D642 -8AE7 D643 -8AE8 D644 -8AE9 D645 -8AEA D646 -8AEB D647 -8AEC D648 -8AED D649 -8AEE D64A -8AEF D64B -8AF0 D64C -8AF1 D64D -8AF2 D64E -8AF3 D64F -8AF4 D650 -8AF5 D651 -8AF6 D652 -8AF7 D653 -8AF8 D654 -8AF9 D655 -8AFA D656 -8AFB D657 -8AFC D658 -8AFD D659 -8AFE D65A -8AFF D65B -8B00 D65C -8B01 D65D -8B02 D65E -8B03 D65F -8B04 D660 -8B05 D661 -8B06 D662 -8B07 E5C0 -8B08 D663 -8B09 D664 -8B0A D665 -8B0B D666 -8B0C D667 -8B0D D668 -8B0E D669 -8B0F D66A -8B10 D66B -8B11 D66C -8B12 D66D -8B13 D66E -8B14 D66F -8B15 D670 -8B16 D671 -8B17 D672 -8B18 D673 -8B19 D674 -8B1A D675 -8B1B D676 -8B1C D677 -8B1D D678 -8B1E D679 -8B1F D67A -8B20 D67B -8B21 D67C -8B22 D67D -8B23 D67E -8B24 D680 -8B25 D681 -8B26 F6A5 -8B27 D682 -8B28 D683 -8B29 D684 -8B2A D685 -8B2B D686 -8B2C D687 -8B2D D688 -8B2E D689 -8B2F D68A -8B30 D68B -8B31 D68C -8B32 D68D -8B33 D68E -8B34 D68F -8B35 D690 -8B36 D691 -8B37 D692 -8B38 D693 -8B39 D694 -8B3A D695 -8B3B D696 -8B3C D697 -8B3D D698 -8B3E D699 -8B3F D69A -8B40 D69B -8B41 D69C -8B42 D69D -8B43 D69E -8B44 D69F -8B45 D6A0 -8B46 D740 -8B47 D741 -8B48 D742 -8B49 D743 -8B4A D744 -8B4B D745 -8B4C D746 -8B4D D747 -8B4E D748 -8B4F D749 -8B50 D74A -8B51 D74B -8B52 D74C -8B53 D74D -8B54 D74E -8B55 D74F -8B56 D750 -8B57 D751 -8B58 D752 -8B59 D753 -8B5A D754 -8B5B D755 -8B5C D756 -8B5D D757 -8B5E D758 -8B5F D759 -8B60 D75A -8B61 D75B -8B62 D75C -8B63 D75D -8B64 D75E -8B65 D75F -8B66 BEAF -8B67 D760 -8B68 D761 -8B69 D762 -8B6A D763 -8B6B D764 -8B6C C6A9 -8B6D D765 -8B6E D766 -8B6F D767 -8B70 D768 -8B71 D769 -8B72 D76A -8B73 D76B -8B74 D76C -8B75 D76D -8B76 D76E -8B77 D76F -8B78 D770 -8B79 D771 -8B7A D772 -8B7B D773 -8B7C D774 -8B7D D775 -8B7E D776 -8B7F D777 -8B80 D778 -8B81 D779 -8B82 D77A -8B83 D77B -8B84 D77C -8B85 D77D -8B86 D77E -8B87 D780 -8B88 D781 -8B89 D782 -8B8A D783 -8B8B D784 -8B8C D785 -8B8D D786 -8B8E D787 -8B8F D788 -8B90 D789 -8B91 D78A -8B92 D78B -8B93 D78C -8B94 D78D -8B95 D78E -8B96 D78F -8B97 D790 -8B98 D791 -8B99 D792 -8B9A D793 -8B9B D794 -8B9C D795 -8B9D D796 -8B9E D797 -8B9F D798 -8BA0 DAA5 -8BA1 BCC6 -8BA2 B6A9 -8BA3 B8BC -8BA4 C8CF -8BA5 BCA5 -8BA6 DAA6 -8BA7 DAA7 -8BA8 CCD6 -8BA9 C8C3 -8BAA DAA8 -8BAB C6FD -8BAC D799 -8BAD D1B5 -8BAE D2E9 -8BAF D1B6 -8BB0 BCC7 -8BB1 D79A -8BB2 BDB2 -8BB3 BBE4 -8BB4 DAA9 -8BB5 DAAA -8BB6 D1C8 -8BB7 DAAB -8BB8 D0ED -8BB9 B6EF -8BBA C2DB -8BBB D79B -8BBC CBCF -8BBD B7ED -8BBE C9E8 -8BBF B7C3 -8BC0 BEF7 -8BC1 D6A4 -8BC2 DAAC -8BC3 DAAD -8BC4 C6C0 -8BC5 D7E7 -8BC6 CAB6 -8BC7 D79C -8BC8 D5A9 -8BC9 CBDF -8BCA D5EF -8BCB DAAE -8BCC D6DF -8BCD B4CA -8BCE DAB0 -8BCF DAAF -8BD0 D79D -8BD1 D2EB -8BD2 DAB1 -8BD3 DAB2 -8BD4 DAB3 -8BD5 CAD4 -8BD6 DAB4 -8BD7 CAAB -8BD8 DAB5 -8BD9 DAB6 -8BDA B3CF -8BDB D6EF -8BDC DAB7 -8BDD BBB0 -8BDE B5AE -8BDF DAB8 -8BE0 DAB9 -8BE1 B9EE -8BE2 D1AF -8BE3 D2E8 -8BE4 DABA -8BE5 B8C3 -8BE6 CFEA -8BE7 B2EF -8BE8 DABB -8BE9 DABC -8BEA D79E -8BEB BDEB -8BEC CEDC -8BED D3EF -8BEE DABD -8BEF CEF3 -8BF0 DABE -8BF1 D3D5 -8BF2 BBE5 -8BF3 DABF -8BF4 CBB5 -8BF5 CBD0 -8BF6 DAC0 -8BF7 C7EB -8BF8 D6EE -8BF9 DAC1 -8BFA C5B5 -8BFB B6C1 -8BFC DAC2 -8BFD B7CC -8BFE BFCE -8BFF DAC3 -8C00 DAC4 -8C01 CBAD -8C02 DAC5 -8C03 B5F7 -8C04 DAC6 -8C05 C1C2 -8C06 D7BB -8C07 DAC7 -8C08 CCB8 -8C09 D79F -8C0A D2EA -8C0B C4B1 -8C0C DAC8 -8C0D B5FD -8C0E BBD1 -8C0F DAC9 -8C10 D0B3 -8C11 DACA -8C12 DACB -8C13 CEBD -8C14 DACC -8C15 DACD -8C16 DACE -8C17 B2F7 -8C18 DAD1 -8C19 DACF -8C1A D1E8 -8C1B DAD0 -8C1C C3D5 -8C1D DAD2 -8C1E D7A0 -8C1F DAD3 -8C20 DAD4 -8C21 DAD5 -8C22 D0BB -8C23 D2A5 -8C24 B0F9 -8C25 DAD6 -8C26 C7AB -8C27 DAD7 -8C28 BDF7 -8C29 C3A1 -8C2A DAD8 -8C2B DAD9 -8C2C C3FD -8C2D CCB7 -8C2E DADA -8C2F DADB -8C30 C0BE -8C31 C6D7 -8C32 DADC -8C33 DADD -8C34 C7B4 -8C35 DADE -8C36 DADF -8C37 B9C8 -8C38 D840 -8C39 D841 -8C3A D842 -8C3B D843 -8C3C D844 -8C3D D845 -8C3E D846 -8C3F D847 -8C40 D848 -8C41 BBED -8C42 D849 -8C43 D84A -8C44 D84B -8C45 D84C -8C46 B6B9 -8C47 F4F8 -8C48 D84D -8C49 F4F9 -8C4A D84E -8C4B D84F -8C4C CDE3 -8C4D D850 -8C4E D851 -8C4F D852 -8C50 D853 -8C51 D854 -8C52 D855 -8C53 D856 -8C54 D857 -8C55 F5B9 -8C56 D858 -8C57 D859 -8C58 D85A -8C59 D85B -8C5A EBE0 -8C5B D85C -8C5C D85D -8C5D D85E -8C5E D85F -8C5F D860 -8C60 D861 -8C61 CFF3 -8C62 BBBF -8C63 D862 -8C64 D863 -8C65 D864 -8C66 D865 -8C67 D866 -8C68 D867 -8C69 D868 -8C6A BAC0 -8C6B D4A5 -8C6C D869 -8C6D D86A -8C6E D86B -8C6F D86C -8C70 D86D -8C71 D86E -8C72 D86F -8C73 E1D9 -8C74 D870 -8C75 D871 -8C76 D872 -8C77 D873 -8C78 F5F4 -8C79 B1AA -8C7A B2F2 -8C7B D874 -8C7C D875 -8C7D D876 -8C7E D877 -8C7F D878 -8C80 D879 -8C81 D87A -8C82 F5F5 -8C83 D87B -8C84 D87C -8C85 F5F7 -8C86 D87D -8C87 D87E -8C88 D880 -8C89 BAD1 -8C8A F5F6 -8C8B D881 -8C8C C3B2 -8C8D D882 -8C8E D883 -8C8F D884 -8C90 D885 -8C91 D886 -8C92 D887 -8C93 D888 -8C94 F5F9 -8C95 D889 -8C96 D88A -8C97 D88B -8C98 F5F8 -8C99 D88C -8C9A D88D -8C9B D88E -8C9C D88F -8C9D D890 -8C9E D891 -8C9F D892 -8CA0 D893 -8CA1 D894 -8CA2 D895 -8CA3 D896 -8CA4 D897 -8CA5 D898 -8CA6 D899 -8CA7 D89A -8CA8 D89B -8CA9 D89C -8CAA D89D -8CAB D89E -8CAC D89F -8CAD D8A0 -8CAE D940 -8CAF D941 -8CB0 D942 -8CB1 D943 -8CB2 D944 -8CB3 D945 -8CB4 D946 -8CB5 D947 -8CB6 D948 -8CB7 D949 -8CB8 D94A -8CB9 D94B -8CBA D94C -8CBB D94D -8CBC D94E -8CBD D94F -8CBE D950 -8CBF D951 -8CC0 D952 -8CC1 D953 -8CC2 D954 -8CC3 D955 -8CC4 D956 -8CC5 D957 -8CC6 D958 -8CC7 D959 -8CC8 D95A -8CC9 D95B -8CCA D95C -8CCB D95D -8CCC D95E -8CCD D95F -8CCE D960 -8CCF D961 -8CD0 D962 -8CD1 D963 -8CD2 D964 -8CD3 D965 -8CD4 D966 -8CD5 D967 -8CD6 D968 -8CD7 D969 -8CD8 D96A -8CD9 D96B -8CDA D96C -8CDB D96D -8CDC D96E -8CDD D96F -8CDE D970 -8CDF D971 -8CE0 D972 -8CE1 D973 -8CE2 D974 -8CE3 D975 -8CE4 D976 -8CE5 D977 -8CE6 D978 -8CE7 D979 -8CE8 D97A -8CE9 D97B -8CEA D97C -8CEB D97D -8CEC D97E -8CED D980 -8CEE D981 -8CEF D982 -8CF0 D983 -8CF1 D984 -8CF2 D985 -8CF3 D986 -8CF4 D987 -8CF5 D988 -8CF6 D989 -8CF7 D98A -8CF8 D98B -8CF9 D98C -8CFA D98D -8CFB D98E -8CFC D98F -8CFD D990 -8CFE D991 -8CFF D992 -8D00 D993 -8D01 D994 -8D02 D995 -8D03 D996 -8D04 D997 -8D05 D998 -8D06 D999 -8D07 D99A -8D08 D99B -8D09 D99C -8D0A D99D -8D0B D99E -8D0C D99F -8D0D D9A0 -8D0E DA40 -8D0F DA41 -8D10 DA42 -8D11 DA43 -8D12 DA44 -8D13 DA45 -8D14 DA46 -8D15 DA47 -8D16 DA48 -8D17 DA49 -8D18 DA4A -8D19 DA4B -8D1A DA4C -8D1B DA4D -8D1C DA4E -8D1D B1B4 -8D1E D5EA -8D1F B8BA -8D20 DA4F -8D21 B9B1 -8D22 B2C6 -8D23 D4F0 -8D24 CFCD -8D25 B0DC -8D26 D5CB -8D27 BBF5 -8D28 D6CA -8D29 B7B7 -8D2A CCB0 -8D2B C6B6 -8D2C B1E1 -8D2D B9BA -8D2E D6FC -8D2F B9E1 -8D30 B7A1 -8D31 BCFA -8D32 EADA -8D33 EADB -8D34 CCF9 -8D35 B9F3 -8D36 EADC -8D37 B4FB -8D38 C3B3 -8D39 B7D1 -8D3A BAD8 -8D3B EADD -8D3C D4F4 -8D3D EADE -8D3E BCD6 -8D3F BBDF -8D40 EADF -8D41 C1DE -8D42 C2B8 -8D43 D4DF -8D44 D7CA -8D45 EAE0 -8D46 EAE1 -8D47 EAE4 -8D48 EAE2 -8D49 EAE3 -8D4A C9DE -8D4B B8B3 -8D4C B6C4 -8D4D EAE5 -8D4E CAEA -8D4F C9CD -8D50 B4CD -8D51 DA50 -8D52 DA51 -8D53 E2D9 -8D54 C5E2 -8D55 EAE6 -8D56 C0B5 -8D57 DA52 -8D58 D7B8 -8D59 EAE7 -8D5A D7AC -8D5B C8FC -8D5C D8D3 -8D5D D8CD -8D5E D4DE -8D5F DA53 -8D60 D4F9 -8D61 C9C4 -8D62 D3AE -8D63 B8D3 -8D64 B3E0 -8D65 DA54 -8D66 C9E2 -8D67 F4F6 -8D68 DA55 -8D69 DA56 -8D6A DA57 -8D6B BAD5 -8D6C DA58 -8D6D F4F7 -8D6E DA59 -8D6F DA5A -8D70 D7DF -8D71 DA5B -8D72 DA5C -8D73 F4F1 -8D74 B8B0 -8D75 D5D4 -8D76 B8CF -8D77 C6F0 -8D78 DA5D -8D79 DA5E -8D7A DA5F -8D7B DA60 -8D7C DA61 -8D7D DA62 -8D7E DA63 -8D7F DA64 -8D80 DA65 -8D81 B3C3 -8D82 DA66 -8D83 DA67 -8D84 F4F2 -8D85 B3AC -8D86 DA68 -8D87 DA69 -8D88 DA6A -8D89 DA6B -8D8A D4BD -8D8B C7F7 -8D8C DA6C -8D8D DA6D -8D8E DA6E -8D8F DA6F -8D90 DA70 -8D91 F4F4 -8D92 DA71 -8D93 DA72 -8D94 F4F3 -8D95 DA73 -8D96 DA74 -8D97 DA75 -8D98 DA76 -8D99 DA77 -8D9A DA78 -8D9B DA79 -8D9C DA7A -8D9D DA7B -8D9E DA7C -8D9F CCCB -8DA0 DA7D -8DA1 DA7E -8DA2 DA80 -8DA3 C8A4 -8DA4 DA81 -8DA5 DA82 -8DA6 DA83 -8DA7 DA84 -8DA8 DA85 -8DA9 DA86 -8DAA DA87 -8DAB DA88 -8DAC DA89 -8DAD DA8A -8DAE DA8B -8DAF DA8C -8DB0 DA8D -8DB1 F4F5 -8DB2 DA8E -8DB3 D7E3 -8DB4 C5BF -8DB5 F5C0 -8DB6 DA8F -8DB7 DA90 -8DB8 F5BB -8DB9 DA91 -8DBA F5C3 -8DBB DA92 -8DBC F5C2 -8DBD DA93 -8DBE D6BA -8DBF F5C1 -8DC0 DA94 -8DC1 DA95 -8DC2 DA96 -8DC3 D4BE -8DC4 F5C4 -8DC5 DA97 -8DC6 F5CC -8DC7 DA98 -8DC8 DA99 -8DC9 DA9A -8DCA DA9B -8DCB B0CF -8DCC B5F8 -8DCD DA9C -8DCE F5C9 -8DCF F5CA -8DD0 DA9D -8DD1 C5DC -8DD2 DA9E -8DD3 DA9F -8DD4 DAA0 -8DD5 DB40 -8DD6 F5C5 -8DD7 F5C6 -8DD8 DB41 -8DD9 DB42 -8DDA F5C7 -8DDB F5CB -8DDC DB43 -8DDD BEE0 -8DDE F5C8 -8DDF B8FA -8DE0 DB44 -8DE1 DB45 -8DE2 DB46 -8DE3 F5D0 -8DE4 F5D3 -8DE5 DB47 -8DE6 DB48 -8DE7 DB49 -8DE8 BFE7 -8DE9 DB4A -8DEA B9F2 -8DEB F5BC -8DEC F5CD -8DED DB4B -8DEE DB4C -8DEF C2B7 -8DF0 DB4D -8DF1 DB4E -8DF2 DB4F -8DF3 CCF8 -8DF4 DB50 -8DF5 BCF9 -8DF6 DB51 -8DF7 F5CE -8DF8 F5CF -8DF9 F5D1 -8DFA B6E5 -8DFB F5D2 -8DFC DB52 -8DFD F5D5 -8DFE DB53 -8DFF DB54 -8E00 DB55 -8E01 DB56 -8E02 DB57 -8E03 DB58 -8E04 DB59 -8E05 F5BD -8E06 DB5A -8E07 DB5B -8E08 DB5C -8E09 F5D4 -8E0A D3BB -8E0B DB5D -8E0C B3EC -8E0D DB5E -8E0E DB5F -8E0F CCA4 -8E10 DB60 -8E11 DB61 -8E12 DB62 -8E13 DB63 -8E14 F5D6 -8E15 DB64 -8E16 DB65 -8E17 DB66 -8E18 DB67 -8E19 DB68 -8E1A DB69 -8E1B DB6A -8E1C DB6B -8E1D F5D7 -8E1E BEE1 -8E1F F5D8 -8E20 DB6C -8E21 DB6D -8E22 CCDF -8E23 F5DB -8E24 DB6E -8E25 DB6F -8E26 DB70 -8E27 DB71 -8E28 DB72 -8E29 B2C8 -8E2A D7D9 -8E2B DB73 -8E2C F5D9 -8E2D DB74 -8E2E F5DA -8E2F F5DC -8E30 DB75 -8E31 F5E2 -8E32 DB76 -8E33 DB77 -8E34 DB78 -8E35 F5E0 -8E36 DB79 -8E37 DB7A -8E38 DB7B -8E39 F5DF -8E3A F5DD -8E3B DB7C -8E3C DB7D -8E3D F5E1 -8E3E DB7E -8E3F DB80 -8E40 F5DE -8E41 F5E4 -8E42 F5E5 -8E43 DB81 -8E44 CCE3 -8E45 DB82 -8E46 DB83 -8E47 E5BF -8E48 B5B8 -8E49 F5E3 -8E4A F5E8 -8E4B CCA3 -8E4C DB84 -8E4D DB85 -8E4E DB86 -8E4F DB87 -8E50 DB88 -8E51 F5E6 -8E52 F5E7 -8E53 DB89 -8E54 DB8A -8E55 DB8B -8E56 DB8C -8E57 DB8D -8E58 DB8E -8E59 F5BE -8E5A DB8F -8E5B DB90 -8E5C DB91 -8E5D DB92 -8E5E DB93 -8E5F DB94 -8E60 DB95 -8E61 DB96 -8E62 DB97 -8E63 DB98 -8E64 DB99 -8E65 DB9A -8E66 B1C4 -8E67 DB9B -8E68 DB9C -8E69 F5BF -8E6A DB9D -8E6B DB9E -8E6C B5C5 -8E6D B2E4 -8E6E DB9F -8E6F F5EC -8E70 F5E9 -8E71 DBA0 -8E72 B6D7 -8E73 DC40 -8E74 F5ED -8E75 DC41 -8E76 F5EA -8E77 DC42 -8E78 DC43 -8E79 DC44 -8E7A DC45 -8E7B DC46 -8E7C F5EB -8E7D DC47 -8E7E DC48 -8E7F B4DA -8E80 DC49 -8E81 D4EA -8E82 DC4A -8E83 DC4B -8E84 DC4C -8E85 F5EE -8E86 DC4D -8E87 B3F9 -8E88 DC4E -8E89 DC4F -8E8A DC50 -8E8B DC51 -8E8C DC52 -8E8D DC53 -8E8E DC54 -8E8F F5EF -8E90 F5F1 -8E91 DC55 -8E92 DC56 -8E93 DC57 -8E94 F5F0 -8E95 DC58 -8E96 DC59 -8E97 DC5A -8E98 DC5B -8E99 DC5C -8E9A DC5D -8E9B DC5E -8E9C F5F2 -8E9D DC5F -8E9E F5F3 -8E9F DC60 -8EA0 DC61 -8EA1 DC62 -8EA2 DC63 -8EA3 DC64 -8EA4 DC65 -8EA5 DC66 -8EA6 DC67 -8EA7 DC68 -8EA8 DC69 -8EA9 DC6A -8EAA DC6B -8EAB C9ED -8EAC B9AA -8EAD DC6C -8EAE DC6D -8EAF C7FB -8EB0 DC6E -8EB1 DC6F -8EB2 B6E3 -8EB3 DC70 -8EB4 DC71 -8EB5 DC72 -8EB6 DC73 -8EB7 DC74 -8EB8 DC75 -8EB9 DC76 -8EBA CCC9 -8EBB DC77 -8EBC DC78 -8EBD DC79 -8EBE DC7A -8EBF DC7B -8EC0 DC7C -8EC1 DC7D -8EC2 DC7E -8EC3 DC80 -8EC4 DC81 -8EC5 DC82 -8EC6 DC83 -8EC7 DC84 -8EC8 DC85 -8EC9 DC86 -8ECA DC87 -8ECB DC88 -8ECC DC89 -8ECD DC8A -8ECE EAA6 -8ECF DC8B -8ED0 DC8C -8ED1 DC8D -8ED2 DC8E -8ED3 DC8F -8ED4 DC90 -8ED5 DC91 -8ED6 DC92 -8ED7 DC93 -8ED8 DC94 -8ED9 DC95 -8EDA DC96 -8EDB DC97 -8EDC DC98 -8EDD DC99 -8EDE DC9A -8EDF DC9B -8EE0 DC9C -8EE1 DC9D -8EE2 DC9E -8EE3 DC9F -8EE4 DCA0 -8EE5 DD40 -8EE6 DD41 -8EE7 DD42 -8EE8 DD43 -8EE9 DD44 -8EEA DD45 -8EEB DD46 -8EEC DD47 -8EED DD48 -8EEE DD49 -8EEF DD4A -8EF0 DD4B -8EF1 DD4C -8EF2 DD4D -8EF3 DD4E -8EF4 DD4F -8EF5 DD50 -8EF6 DD51 -8EF7 DD52 -8EF8 DD53 -8EF9 DD54 -8EFA DD55 -8EFB DD56 -8EFC DD57 -8EFD DD58 -8EFE DD59 -8EFF DD5A -8F00 DD5B -8F01 DD5C -8F02 DD5D -8F03 DD5E -8F04 DD5F -8F05 DD60 -8F06 DD61 -8F07 DD62 -8F08 DD63 -8F09 DD64 -8F0A DD65 -8F0B DD66 -8F0C DD67 -8F0D DD68 -8F0E DD69 -8F0F DD6A -8F10 DD6B -8F11 DD6C -8F12 DD6D -8F13 DD6E -8F14 DD6F -8F15 DD70 -8F16 DD71 -8F17 DD72 -8F18 DD73 -8F19 DD74 -8F1A DD75 -8F1B DD76 -8F1C DD77 -8F1D DD78 -8F1E DD79 -8F1F DD7A -8F20 DD7B -8F21 DD7C -8F22 DD7D -8F23 DD7E -8F24 DD80 -8F25 DD81 -8F26 DD82 -8F27 DD83 -8F28 DD84 -8F29 DD85 -8F2A DD86 -8F2B DD87 -8F2C DD88 -8F2D DD89 -8F2E DD8A -8F2F DD8B -8F30 DD8C -8F31 DD8D -8F32 DD8E -8F33 DD8F -8F34 DD90 -8F35 DD91 -8F36 DD92 -8F37 DD93 -8F38 DD94 -8F39 DD95 -8F3A DD96 -8F3B DD97 -8F3C DD98 -8F3D DD99 -8F3E DD9A -8F3F DD9B -8F40 DD9C -8F41 DD9D -8F42 DD9E -8F43 DD9F -8F44 DDA0 -8F45 DE40 -8F46 DE41 -8F47 DE42 -8F48 DE43 -8F49 DE44 -8F4A DE45 -8F4B DE46 -8F4C DE47 -8F4D DE48 -8F4E DE49 -8F4F DE4A -8F50 DE4B -8F51 DE4C -8F52 DE4D -8F53 DE4E -8F54 DE4F -8F55 DE50 -8F56 DE51 -8F57 DE52 -8F58 DE53 -8F59 DE54 -8F5A DE55 -8F5B DE56 -8F5C DE57 -8F5D DE58 -8F5E DE59 -8F5F DE5A -8F60 DE5B -8F61 DE5C -8F62 DE5D -8F63 DE5E -8F64 DE5F -8F65 DE60 -8F66 B3B5 -8F67 D4FE -8F68 B9EC -8F69 D0F9 -8F6A DE61 -8F6B E9ED -8F6C D7AA -8F6D E9EE -8F6E C2D6 -8F6F C8ED -8F70 BAE4 -8F71 E9EF -8F72 E9F0 -8F73 E9F1 -8F74 D6E1 -8F75 E9F2 -8F76 E9F3 -8F77 E9F5 -8F78 E9F4 -8F79 E9F6 -8F7A E9F7 -8F7B C7E1 -8F7C E9F8 -8F7D D4D8 -8F7E E9F9 -8F7F BDCE -8F80 DE62 -8F81 E9FA -8F82 E9FB -8F83 BDCF -8F84 E9FC -8F85 B8A8 -8F86 C1BE -8F87 E9FD -8F88 B1B2 -8F89 BBD4 -8F8A B9F5 -8F8B E9FE -8F8C DE63 -8F8D EAA1 -8F8E EAA2 -8F8F EAA3 -8F90 B7F8 -8F91 BCAD -8F92 DE64 -8F93 CAE4 -8F94 E0CE -8F95 D4AF -8F96 CFBD -8F97 D5B7 -8F98 EAA4 -8F99 D5DE -8F9A EAA5 -8F9B D0C1 -8F9C B9BC -8F9D DE65 -8F9E B4C7 -8F9F B1D9 -8FA0 DE66 -8FA1 DE67 -8FA2 DE68 -8FA3 C0B1 -8FA4 DE69 -8FA5 DE6A -8FA6 DE6B -8FA7 DE6C -8FA8 B1E6 -8FA9 B1E7 -8FAA DE6D -8FAB B1E8 -8FAC DE6E -8FAD DE6F -8FAE DE70 -8FAF DE71 -8FB0 B3BD -8FB1 C8E8 -8FB2 DE72 -8FB3 DE73 -8FB4 DE74 -8FB5 DE75 -8FB6 E5C1 -8FB7 DE76 -8FB8 DE77 -8FB9 B1DF -8FBA DE78 -8FBB DE79 -8FBC DE7A -8FBD C1C9 -8FBE B4EF -8FBF DE7B -8FC0 DE7C -8FC1 C7A8 -8FC2 D3D8 -8FC3 DE7D -8FC4 C6F9 -8FC5 D1B8 -8FC6 DE7E -8FC7 B9FD -8FC8 C2F5 -8FC9 DE80 -8FCA DE81 -8FCB DE82 -8FCC DE83 -8FCD DE84 -8FCE D3AD -8FCF DE85 -8FD0 D4CB -8FD1 BDFC -8FD2 DE86 -8FD3 E5C2 -8FD4 B7B5 -8FD5 E5C3 -8FD6 DE87 -8FD7 DE88 -8FD8 BBB9 -8FD9 D5E2 -8FDA DE89 -8FDB BDF8 -8FDC D4B6 -8FDD CEA5 -8FDE C1AC -8FDF B3D9 -8FE0 DE8A -8FE1 DE8B -8FE2 CCF6 -8FE3 DE8C -8FE4 E5C6 -8FE5 E5C4 -8FE6 E5C8 -8FE7 DE8D -8FE8 E5CA -8FE9 E5C7 -8FEA B5CF -8FEB C6C8 -8FEC DE8E -8FED B5FC -8FEE E5C5 -8FEF DE8F -8FF0 CAF6 -8FF1 DE90 -8FF2 DE91 -8FF3 E5C9 -8FF4 DE92 -8FF5 DE93 -8FF6 DE94 -8FF7 C3D4 -8FF8 B1C5 -8FF9 BCA3 -8FFA DE95 -8FFB DE96 -8FFC DE97 -8FFD D7B7 -8FFE DE98 -8FFF DE99 -9000 CDCB -9001 CBCD -9002 CACA -9003 CCD3 -9004 E5CC -9005 E5CB -9006 C4E6 -9007 DE9A -9008 DE9B -9009 D1A1 -900A D1B7 -900B E5CD -900C DE9C -900D E5D0 -900E DE9D -900F CDB8 -9010 D6F0 -9011 E5CF -9012 B5DD -9013 DE9E -9014 CDBE -9015 DE9F -9016 E5D1 -9017 B6BA -9018 DEA0 -9019 DF40 -901A CDA8 -901B B9E4 -901C DF41 -901D CAC5 -901E B3D1 -901F CBD9 -9020 D4EC -9021 E5D2 -9022 B7EA -9023 DF42 -9024 DF43 -9025 DF44 -9026 E5CE -9027 DF45 -9028 DF46 -9029 DF47 -902A DF48 -902B DF49 -902C DF4A -902D E5D5 -902E B4FE -902F E5D6 -9030 DF4B -9031 DF4C -9032 DF4D -9033 DF4E -9034 DF4F -9035 E5D3 -9036 E5D4 -9037 DF50 -9038 D2DD -9039 DF51 -903A DF52 -903B C2DF -903C B1C6 -903D DF53 -903E D3E2 -903F DF54 -9040 DF55 -9041 B6DD -9042 CBEC -9043 DF56 -9044 E5D7 -9045 DF57 -9046 DF58 -9047 D3F6 -9048 DF59 -9049 DF5A -904A DF5B -904B DF5C -904C DF5D -904D B1E9 -904E DF5E -904F B6F4 -9050 E5DA -9051 E5D8 -9052 E5D9 -9053 B5C0 -9054 DF5F -9055 DF60 -9056 DF61 -9057 D2C5 -9058 E5DC -9059 DF62 -905A DF63 -905B E5DE -905C DF64 -905D DF65 -905E DF66 -905F DF67 -9060 DF68 -9061 DF69 -9062 E5DD -9063 C7B2 -9064 DF6A -9065 D2A3 -9066 DF6B -9067 DF6C -9068 E5DB -9069 DF6D -906A DF6E -906B DF6F -906C DF70 -906D D4E2 -906E D5DA -906F DF71 -9070 DF72 -9071 DF73 -9072 DF74 -9073 DF75 -9074 E5E0 -9075 D7F1 -9076 DF76 -9077 DF77 -9078 DF78 -9079 DF79 -907A DF7A -907B DF7B -907C DF7C -907D E5E1 -907E DF7D -907F B1DC -9080 D1FB -9081 DF7E -9082 E5E2 -9083 E5E4 -9084 DF80 -9085 DF81 -9086 DF82 -9087 DF83 -9088 E5E3 -9089 DF84 -908A DF85 -908B E5E5 -908C DF86 -908D DF87 -908E DF88 -908F DF89 -9090 DF8A -9091 D2D8 -9092 DF8B -9093 B5CB -9094 DF8C -9095 E7DF -9096 DF8D -9097 DAF5 -9098 DF8E -9099 DAF8 -909A DF8F -909B DAF6 -909C DF90 -909D DAF7 -909E DF91 -909F DF92 -90A0 DF93 -90A1 DAFA -90A2 D0CF -90A3 C4C7 -90A4 DF94 -90A5 DF95 -90A6 B0EE -90A7 DF96 -90A8 DF97 -90A9 DF98 -90AA D0B0 -90AB DF99 -90AC DAF9 -90AD DF9A -90AE D3CA -90AF BAAA -90B0 DBA2 -90B1 C7F1 -90B2 DF9B -90B3 DAFC -90B4 DAFB -90B5 C9DB -90B6 DAFD -90B7 DF9C -90B8 DBA1 -90B9 D7DE -90BA DAFE -90BB C1DA -90BC DF9D -90BD DF9E -90BE DBA5 -90BF DF9F -90C0 DFA0 -90C1 D3F4 -90C2 E040 -90C3 E041 -90C4 DBA7 -90C5 DBA4 -90C6 E042 -90C7 DBA8 -90C8 E043 -90C9 E044 -90CA BDBC -90CB E045 -90CC E046 -90CD E047 -90CE C0C9 -90CF DBA3 -90D0 DBA6 -90D1 D6A3 -90D2 E048 -90D3 DBA9 -90D4 E049 -90D5 E04A -90D6 E04B -90D7 DBAD -90D8 E04C -90D9 E04D -90DA E04E -90DB DBAE -90DC DBAC -90DD BAC2 -90DE E04F -90DF E050 -90E0 E051 -90E1 BFA4 -90E2 DBAB -90E3 E052 -90E4 E053 -90E5 E054 -90E6 DBAA -90E7 D4C7 -90E8 B2BF -90E9 E055 -90EA E056 -90EB DBAF -90EC E057 -90ED B9F9 -90EE E058 -90EF DBB0 -90F0 E059 -90F1 E05A -90F2 E05B -90F3 E05C -90F4 B3BB -90F5 E05D -90F6 E05E -90F7 E05F -90F8 B5A6 -90F9 E060 -90FA E061 -90FB E062 -90FC E063 -90FD B6BC -90FE DBB1 -90FF E064 -9100 E065 -9101 E066 -9102 B6F5 -9103 E067 -9104 DBB2 -9105 E068 -9106 E069 -9107 E06A -9108 E06B -9109 E06C -910A E06D -910B E06E -910C E06F -910D E070 -910E E071 -910F E072 -9110 E073 -9111 E074 -9112 E075 -9113 E076 -9114 E077 -9115 E078 -9116 E079 -9117 E07A -9118 E07B -9119 B1C9 -911A E07C -911B E07D -911C E07E -911D E080 -911E DBB4 -911F E081 -9120 E082 -9121 E083 -9122 DBB3 -9123 DBB5 -9124 E084 -9125 E085 -9126 E086 -9127 E087 -9128 E088 -9129 E089 -912A E08A -912B E08B -912C E08C -912D E08D -912E E08E -912F DBB7 -9130 E08F -9131 DBB6 -9132 E090 -9133 E091 -9134 E092 -9135 E093 -9136 E094 -9137 E095 -9138 E096 -9139 DBB8 -913A E097 -913B E098 -913C E099 -913D E09A -913E E09B -913F E09C -9140 E09D -9141 E09E -9142 E09F -9143 DBB9 -9144 E0A0 -9145 E140 -9146 DBBA -9147 E141 -9148 E142 -9149 D3CF -914A F4FA -914B C7F5 -914C D7C3 -914D C5E4 -914E F4FC -914F F4FD -9150 F4FB -9151 E143 -9152 BEC6 -9153 E144 -9154 E145 -9155 E146 -9156 E147 -9157 D0EF -9158 E148 -9159 E149 -915A B7D3 -915B E14A -915C E14B -915D D4CD -915E CCAA -915F E14C -9160 E14D -9161 F5A2 -9162 F5A1 -9163 BAA8 -9164 F4FE -9165 CBD6 -9166 E14E -9167 E14F -9168 E150 -9169 F5A4 -916A C0D2 -916B E151 -916C B3EA -916D E152 -916E CDAA -916F F5A5 -9170 F5A3 -9171 BDB4 -9172 F5A8 -9173 E153 -9174 F5A9 -9175 BDCD -9176 C3B8 -9177 BFE1 -9178 CBE1 -9179 F5AA -917A E154 -917B E155 -917C E156 -917D F5A6 -917E F5A7 -917F C4F0 -9180 E157 -9181 E158 -9182 E159 -9183 E15A -9184 E15B -9185 F5AC -9186 E15C -9187 B4BC -9188 E15D -9189 D7ED -918A E15E -918B B4D7 -918C F5AB -918D F5AE -918E E15F -918F E160 -9190 F5AD -9191 F5AF -9192 D0D1 -9193 E161 -9194 E162 -9195 E163 -9196 E164 -9197 E165 -9198 E166 -9199 E167 -919A C3D1 -919B C8A9 -919C E168 -919D E169 -919E E16A -919F E16B -91A0 E16C -91A1 E16D -91A2 F5B0 -91A3 F5B1 -91A4 E16E -91A5 E16F -91A6 E170 -91A7 E171 -91A8 E172 -91A9 E173 -91AA F5B2 -91AB E174 -91AC E175 -91AD F5B3 -91AE F5B4 -91AF F5B5 -91B0 E176 -91B1 E177 -91B2 E178 -91B3 E179 -91B4 F5B7 -91B5 F5B6 -91B6 E17A -91B7 E17B -91B8 E17C -91B9 E17D -91BA F5B8 -91BB E17E -91BC E180 -91BD E181 -91BE E182 -91BF E183 -91C0 E184 -91C1 E185 -91C2 E186 -91C3 E187 -91C4 E188 -91C5 E189 -91C6 E18A -91C7 B2C9 -91C8 E18B -91C9 D3D4 -91CA CACD -91CB E18C -91CC C0EF -91CD D6D8 -91CE D2B0 -91CF C1BF -91D0 E18D -91D1 BDF0 -91D2 E18E -91D3 E18F -91D4 E190 -91D5 E191 -91D6 E192 -91D7 E193 -91D8 E194 -91D9 E195 -91DA E196 -91DB E197 -91DC B8AA -91DD E198 -91DE E199 -91DF E19A -91E0 E19B -91E1 E19C -91E2 E19D -91E3 E19E -91E4 E19F -91E5 E1A0 -91E6 E240 -91E7 E241 -91E8 E242 -91E9 E243 -91EA E244 -91EB E245 -91EC E246 -91ED E247 -91EE E248 -91EF E249 -91F0 E24A -91F1 E24B -91F2 E24C -91F3 E24D -91F4 E24E -91F5 E24F -91F6 E250 -91F7 E251 -91F8 E252 -91F9 E253 -91FA E254 -91FB E255 -91FC E256 -91FD E257 -91FE E258 -91FF E259 -9200 E25A -9201 E25B -9202 E25C -9203 E25D -9204 E25E -9205 E25F -9206 E260 -9207 E261 -9208 E262 -9209 E263 -920A E264 -920B E265 -920C E266 -920D E267 -920E E268 -920F E269 -9210 E26A -9211 E26B -9212 E26C -9213 E26D -9214 E26E -9215 E26F -9216 E270 -9217 E271 -9218 E272 -9219 E273 -921A E274 -921B E275 -921C E276 -921D E277 -921E E278 -921F E279 -9220 E27A -9221 E27B -9222 E27C -9223 E27D -9224 E27E -9225 E280 -9226 E281 -9227 E282 -9228 E283 -9229 E284 -922A E285 -922B E286 -922C E287 -922D E288 -922E E289 -922F E28A -9230 E28B -9231 E28C -9232 E28D -9233 E28E -9234 E28F -9235 E290 -9236 E291 -9237 E292 -9238 E293 -9239 E294 -923A E295 -923B E296 -923C E297 -923D E298 -923E E299 -923F E29A -9240 E29B -9241 E29C -9242 E29D -9243 E29E -9244 E29F -9245 E2A0 -9246 E340 -9247 E341 -9248 E342 -9249 E343 -924A E344 -924B E345 -924C E346 -924D E347 -924E E348 -924F E349 -9250 E34A -9251 E34B -9252 E34C -9253 E34D -9254 E34E -9255 E34F -9256 E350 -9257 E351 -9258 E352 -9259 E353 -925A E354 -925B E355 -925C E356 -925D E357 -925E E358 -925F E359 -9260 E35A -9261 E35B -9262 E35C -9263 E35D -9264 E35E -9265 E35F -9266 E360 -9267 E361 -9268 E362 -9269 E363 -926A E364 -926B E365 -926C E366 -926D E367 -926E E368 -926F E369 -9270 E36A -9271 E36B -9272 E36C -9273 E36D -9274 BCF8 -9275 E36E -9276 E36F -9277 E370 -9278 E371 -9279 E372 -927A E373 -927B E374 -927C E375 -927D E376 -927E E377 -927F E378 -9280 E379 -9281 E37A -9282 E37B -9283 E37C -9284 E37D -9285 E37E -9286 E380 -9287 E381 -9288 E382 -9289 E383 -928A E384 -928B E385 -928C E386 -928D E387 -928E F6C6 -928F E388 -9290 E389 -9291 E38A -9292 E38B -9293 E38C -9294 E38D -9295 E38E -9296 E38F -9297 E390 -9298 E391 -9299 E392 -929A E393 -929B E394 -929C E395 -929D E396 -929E E397 -929F E398 -92A0 E399 -92A1 E39A -92A2 E39B -92A3 E39C -92A4 E39D -92A5 E39E -92A6 E39F -92A7 E3A0 -92A8 E440 -92A9 E441 -92AA E442 -92AB E443 -92AC E444 -92AD E445 -92AE F6C7 -92AF E446 -92B0 E447 -92B1 E448 -92B2 E449 -92B3 E44A -92B4 E44B -92B5 E44C -92B6 E44D -92B7 E44E -92B8 E44F -92B9 E450 -92BA E451 -92BB E452 -92BC E453 -92BD E454 -92BE E455 -92BF E456 -92C0 E457 -92C1 E458 -92C2 E459 -92C3 E45A -92C4 E45B -92C5 E45C -92C6 E45D -92C7 E45E -92C8 F6C8 -92C9 E45F -92CA E460 -92CB E461 -92CC E462 -92CD E463 -92CE E464 -92CF E465 -92D0 E466 -92D1 E467 -92D2 E468 -92D3 E469 -92D4 E46A -92D5 E46B -92D6 E46C -92D7 E46D -92D8 E46E -92D9 E46F -92DA E470 -92DB E471 -92DC E472 -92DD E473 -92DE E474 -92DF E475 -92E0 E476 -92E1 E477 -92E2 E478 -92E3 E479 -92E4 E47A -92E5 E47B -92E6 E47C -92E7 E47D -92E8 E47E -92E9 E480 -92EA E481 -92EB E482 -92EC E483 -92ED E484 -92EE E485 -92EF E486 -92F0 E487 -92F1 E488 -92F2 E489 -92F3 E48A -92F4 E48B -92F5 E48C -92F6 E48D -92F7 E48E -92F8 E48F -92F9 E490 -92FA E491 -92FB E492 -92FC E493 -92FD E494 -92FE E495 -92FF E496 -9300 E497 -9301 E498 -9302 E499 -9303 E49A -9304 E49B -9305 E49C -9306 E49D -9307 E49E -9308 E49F -9309 E4A0 -930A E540 -930B E541 -930C E542 -930D E543 -930E E544 -930F E545 -9310 E546 -9311 E547 -9312 E548 -9313 E549 -9314 E54A -9315 E54B -9316 E54C -9317 E54D -9318 E54E -9319 E54F -931A E550 -931B E551 -931C E552 -931D E553 -931E E554 -931F E555 -9320 E556 -9321 E557 -9322 E558 -9323 E559 -9324 E55A -9325 E55B -9326 E55C -9327 E55D -9328 E55E -9329 E55F -932A E560 -932B E561 -932C E562 -932D E563 -932E E564 -932F E565 -9330 E566 -9331 E567 -9332 E568 -9333 E569 -9334 E56A -9335 E56B -9336 E56C -9337 E56D -9338 E56E -9339 E56F -933A E570 -933B E571 -933C E572 -933D E573 -933E F6C9 -933F E574 -9340 E575 -9341 E576 -9342 E577 -9343 E578 -9344 E579 -9345 E57A -9346 E57B -9347 E57C -9348 E57D -9349 E57E -934A E580 -934B E581 -934C E582 -934D E583 -934E E584 -934F E585 -9350 E586 -9351 E587 -9352 E588 -9353 E589 -9354 E58A -9355 E58B -9356 E58C -9357 E58D -9358 E58E -9359 E58F -935A E590 -935B E591 -935C E592 -935D E593 -935E E594 -935F E595 -9360 E596 -9361 E597 -9362 E598 -9363 E599 -9364 E59A -9365 E59B -9366 E59C -9367 E59D -9368 E59E -9369 E59F -936A F6CA -936B E5A0 -936C E640 -936D E641 -936E E642 -936F E643 -9370 E644 -9371 E645 -9372 E646 -9373 E647 -9374 E648 -9375 E649 -9376 E64A -9377 E64B -9378 E64C -9379 E64D -937A E64E -937B E64F -937C E650 -937D E651 -937E E652 -937F E653 -9380 E654 -9381 E655 -9382 E656 -9383 E657 -9384 E658 -9385 E659 -9386 E65A -9387 E65B -9388 E65C -9389 E65D -938A E65E -938B E65F -938C E660 -938D E661 -938E E662 -938F F6CC -9390 E663 -9391 E664 -9392 E665 -9393 E666 -9394 E667 -9395 E668 -9396 E669 -9397 E66A -9398 E66B -9399 E66C -939A E66D -939B E66E -939C E66F -939D E670 -939E E671 -939F E672 -93A0 E673 -93A1 E674 -93A2 E675 -93A3 E676 -93A4 E677 -93A5 E678 -93A6 E679 -93A7 E67A -93A8 E67B -93A9 E67C -93AA E67D -93AB E67E -93AC E680 -93AD E681 -93AE E682 -93AF E683 -93B0 E684 -93B1 E685 -93B2 E686 -93B3 E687 -93B4 E688 -93B5 E689 -93B6 E68A -93B7 E68B -93B8 E68C -93B9 E68D -93BA E68E -93BB E68F -93BC E690 -93BD E691 -93BE E692 -93BF E693 -93C0 E694 -93C1 E695 -93C2 E696 -93C3 E697 -93C4 E698 -93C5 E699 -93C6 E69A -93C7 E69B -93C8 E69C -93C9 E69D -93CA F6CB -93CB E69E -93CC E69F -93CD E6A0 -93CE E740 -93CF E741 -93D0 E742 -93D1 E743 -93D2 E744 -93D3 E745 -93D4 E746 -93D5 E747 -93D6 F7E9 -93D7 E748 -93D8 E749 -93D9 E74A -93DA E74B -93DB E74C -93DC E74D -93DD E74E -93DE E74F -93DF E750 -93E0 E751 -93E1 E752 -93E2 E753 -93E3 E754 -93E4 E755 -93E5 E756 -93E6 E757 -93E7 E758 -93E8 E759 -93E9 E75A -93EA E75B -93EB E75C -93EC E75D -93ED E75E -93EE E75F -93EF E760 -93F0 E761 -93F1 E762 -93F2 E763 -93F3 E764 -93F4 E765 -93F5 E766 -93F6 E767 -93F7 E768 -93F8 E769 -93F9 E76A -93FA E76B -93FB E76C -93FC E76D -93FD E76E -93FE E76F -93FF E770 -9400 E771 -9401 E772 -9402 E773 -9403 E774 -9404 E775 -9405 E776 -9406 E777 -9407 E778 -9408 E779 -9409 E77A -940A E77B -940B E77C -940C E77D -940D E77E -940E E780 -940F E781 -9410 E782 -9411 E783 -9412 E784 -9413 E785 -9414 E786 -9415 E787 -9416 E788 -9417 E789 -9418 E78A -9419 E78B -941A E78C -941B E78D -941C E78E -941D E78F -941E E790 -941F E791 -9420 E792 -9421 E793 -9422 E794 -9423 E795 -9424 E796 -9425 E797 -9426 E798 -9427 E799 -9428 E79A -9429 E79B -942A E79C -942B E79D -942C E79E -942D E79F -942E E7A0 -942F E840 -9430 E841 -9431 E842 -9432 E843 -9433 E844 -9434 E845 -9435 E846 -9436 E847 -9437 E848 -9438 E849 -9439 E84A -943A E84B -943B E84C -943C E84D -943D E84E -943E F6CD -943F E84F -9440 E850 -9441 E851 -9442 E852 -9443 E853 -9444 E854 -9445 E855 -9446 E856 -9447 E857 -9448 E858 -9449 E859 -944A E85A -944B E85B -944C E85C -944D E85D -944E E85E -944F E85F -9450 E860 -9451 E861 -9452 E862 -9453 E863 -9454 E864 -9455 E865 -9456 E866 -9457 E867 -9458 E868 -9459 E869 -945A E86A -945B E86B -945C E86C -945D E86D -945E E86E -945F E86F -9460 E870 -9461 E871 -9462 E872 -9463 E873 -9464 E874 -9465 E875 -9466 E876 -9467 E877 -9468 E878 -9469 E879 -946A E87A -946B F6CE -946C E87B -946D E87C -946E E87D -946F E87E -9470 E880 -9471 E881 -9472 E882 -9473 E883 -9474 E884 -9475 E885 -9476 E886 -9477 E887 -9478 E888 -9479 E889 -947A E88A -947B E88B -947C E88C -947D E88D -947E E88E -947F E88F -9480 E890 -9481 E891 -9482 E892 -9483 E893 -9484 E894 -9485 EEC4 -9486 EEC5 -9487 EEC6 -9488 D5EB -9489 B6A4 -948A EEC8 -948B EEC7 -948C EEC9 -948D EECA -948E C7A5 -948F EECB -9490 EECC -9491 E895 -9492 B7B0 -9493 B5F6 -9494 EECD -9495 EECF -9496 E896 -9497 EECE -9498 E897 -9499 B8C6 -949A EED0 -949B EED1 -949C EED2 -949D B6DB -949E B3AE -949F D6D3 -94A0 C4C6 -94A1 B1B5 -94A2 B8D6 -94A3 EED3 -94A4 EED4 -94A5 D4BF -94A6 C7D5 -94A7 BEFB -94A8 CED9 -94A9 B9B3 -94AA EED6 -94AB EED5 -94AC EED8 -94AD EED7 -94AE C5A5 -94AF EED9 -94B0 EEDA -94B1 C7AE -94B2 EEDB -94B3 C7AF -94B4 EEDC -94B5 B2A7 -94B6 EEDD -94B7 EEDE -94B8 EEDF -94B9 EEE0 -94BA EEE1 -94BB D7EA -94BC EEE2 -94BD EEE3 -94BE BCD8 -94BF EEE4 -94C0 D3CB -94C1 CCFA -94C2 B2AC -94C3 C1E5 -94C4 EEE5 -94C5 C7A6 -94C6 C3AD -94C7 E898 -94C8 EEE6 -94C9 EEE7 -94CA EEE8 -94CB EEE9 -94CC EEEA -94CD EEEB -94CE EEEC -94CF E899 -94D0 EEED -94D1 EEEE -94D2 EEEF -94D3 E89A -94D4 E89B -94D5 EEF0 -94D6 EEF1 -94D7 EEF2 -94D8 EEF4 -94D9 EEF3 -94DA E89C -94DB EEF5 -94DC CDAD -94DD C2C1 -94DE EEF6 -94DF EEF7 -94E0 EEF8 -94E1 D5A1 -94E2 EEF9 -94E3 CFB3 -94E4 EEFA -94E5 EEFB -94E6 E89D -94E7 EEFC -94E8 EEFD -94E9 EFA1 -94EA EEFE -94EB EFA2 -94EC B8F5 -94ED C3FA -94EE EFA3 -94EF EFA4 -94F0 BDC2 -94F1 D2BF -94F2 B2F9 -94F3 EFA5 -94F4 EFA6 -94F5 EFA7 -94F6 D2F8 -94F7 EFA8 -94F8 D6FD -94F9 EFA9 -94FA C6CC -94FB E89E -94FC EFAA -94FD EFAB -94FE C1B4 -94FF EFAC -9500 CFFA -9501 CBF8 -9502 EFAE -9503 EFAD -9504 B3FA -9505 B9F8 -9506 EFAF -9507 EFB0 -9508 D0E2 -9509 EFB1 -950A EFB2 -950B B7E6 -950C D0BF -950D EFB3 -950E EFB4 -950F EFB5 -9510 C8F1 -9511 CCE0 -9512 EFB6 -9513 EFB7 -9514 EFB8 -9515 EFB9 -9516 EFBA -9517 D5E0 -9518 EFBB -9519 B4ED -951A C3AA -951B EFBC -951C E89F -951D EFBD -951E EFBE -951F EFBF -9520 E8A0 -9521 CEFD -9522 EFC0 -9523 C2E0 -9524 B4B8 -9525 D7B6 -9526 BDF5 -9527 E940 -9528 CFC7 -9529 EFC3 -952A EFC1 -952B EFC2 -952C EFC4 -952D B6A7 -952E BCFC -952F BEE2 -9530 C3CC -9531 EFC5 -9532 EFC6 -9533 E941 -9534 EFC7 -9535 EFCF -9536 EFC8 -9537 EFC9 -9538 EFCA -9539 C7C2 -953A EFF1 -953B B6CD -953C EFCB -953D E942 -953E EFCC -953F EFCD -9540 B6C6 -9541 C3BE -9542 EFCE -9543 E943 -9544 EFD0 -9545 EFD1 -9546 EFD2 -9547 D5F2 -9548 E944 -9549 EFD3 -954A C4F7 -954B E945 -954C EFD4 -954D C4F8 -954E EFD5 -954F EFD6 -9550 B8E4 -9551 B0F7 -9552 EFD7 -9553 EFD8 -9554 EFD9 -9555 E946 -9556 EFDA -9557 EFDB -9558 EFDC -9559 EFDD -955A E947 -955B EFDE -955C BEB5 -955D EFE1 -955E EFDF -955F EFE0 -9560 E948 -9561 EFE2 -9562 EFE3 -9563 C1CD -9564 EFE4 -9565 EFE5 -9566 EFE6 -9567 EFE7 -9568 EFE8 -9569 EFE9 -956A EFEA -956B EFEB -956C EFEC -956D C0D8 -956E E949 -956F EFED -9570 C1AD -9571 EFEE -9572 EFEF -9573 EFF0 -9574 E94A -9575 E94B -9576 CFE2 -9577 E94C -9578 E94D -9579 E94E -957A E94F -957B E950 -957C E951 -957D E952 -957E E953 -957F B3A4 -9580 E954 -9581 E955 -9582 E956 -9583 E957 -9584 E958 -9585 E959 -9586 E95A -9587 E95B -9588 E95C -9589 E95D -958A E95E -958B E95F -958C E960 -958D E961 -958E E962 -958F E963 -9590 E964 -9591 E965 -9592 E966 -9593 E967 -9594 E968 -9595 E969 -9596 E96A -9597 E96B -9598 E96C -9599 E96D -959A E96E -959B E96F -959C E970 -959D E971 -959E E972 -959F E973 -95A0 E974 -95A1 E975 -95A2 E976 -95A3 E977 -95A4 E978 -95A5 E979 -95A6 E97A -95A7 E97B -95A8 E97C -95A9 E97D -95AA E97E -95AB E980 -95AC E981 -95AD E982 -95AE E983 -95AF E984 -95B0 E985 -95B1 E986 -95B2 E987 -95B3 E988 -95B4 E989 -95B5 E98A -95B6 E98B -95B7 E98C -95B8 E98D -95B9 E98E -95BA E98F -95BB E990 -95BC E991 -95BD E992 -95BE E993 -95BF E994 -95C0 E995 -95C1 E996 -95C2 E997 -95C3 E998 -95C4 E999 -95C5 E99A -95C6 E99B -95C7 E99C -95C8 E99D -95C9 E99E -95CA E99F -95CB E9A0 -95CC EA40 -95CD EA41 -95CE EA42 -95CF EA43 -95D0 EA44 -95D1 EA45 -95D2 EA46 -95D3 EA47 -95D4 EA48 -95D5 EA49 -95D6 EA4A -95D7 EA4B -95D8 EA4C -95D9 EA4D -95DA EA4E -95DB EA4F -95DC EA50 -95DD EA51 -95DE EA52 -95DF EA53 -95E0 EA54 -95E1 EA55 -95E2 EA56 -95E3 EA57 -95E4 EA58 -95E5 EA59 -95E6 EA5A -95E7 EA5B -95E8 C3C5 -95E9 E3C5 -95EA C9C1 -95EB E3C6 -95EC EA5C -95ED B1D5 -95EE CECA -95EF B4B3 -95F0 C8F2 -95F1 E3C7 -95F2 CFD0 -95F3 E3C8 -95F4 BCE4 -95F5 E3C9 -95F6 E3CA -95F7 C3C6 -95F8 D5A2 -95F9 C4D6 -95FA B9EB -95FB CEC5 -95FC E3CB -95FD C3F6 -95FE E3CC -95FF EA5D -9600 B7A7 -9601 B8F3 -9602 BAD2 -9603 E3CD -9604 E3CE -9605 D4C4 -9606 E3CF -9607 EA5E -9608 E3D0 -9609 D1CB -960A E3D1 -960B E3D2 -960C E3D3 -960D E3D4 -960E D1D6 -960F E3D5 -9610 B2FB -9611 C0BB -9612 E3D6 -9613 EA5F -9614 C0AB -9615 E3D7 -9616 E3D8 -9617 E3D9 -9618 EA60 -9619 E3DA -961A E3DB -961B EA61 -961C B8B7 -961D DAE2 -961E EA62 -961F B6D3 -9620 EA63 -9621 DAE4 -9622 DAE3 -9623 EA64 -9624 EA65 -9625 EA66 -9626 EA67 -9627 EA68 -9628 EA69 -9629 EA6A -962A DAE6 -962B EA6B -962C EA6C -962D EA6D -962E C8EE -962F EA6E -9630 EA6F -9631 DAE5 -9632 B7C0 -9633 D1F4 -9634 D2F5 -9635 D5F3 -9636 BDD7 -9637 EA70 -9638 EA71 -9639 EA72 -963A EA73 -963B D7E8 -963C DAE8 -963D DAE7 -963E EA74 -963F B0A2 -9640 CDD3 -9641 EA75 -9642 DAE9 -9643 EA76 -9644 B8BD -9645 BCCA -9646 C2BD -9647 C2A4 -9648 B3C2 -9649 DAEA -964A EA77 -964B C2AA -964C C4B0 -964D BDB5 -964E EA78 -964F EA79 -9650 CFDE -9651 EA7A -9652 EA7B -9653 EA7C -9654 DAEB -9655 C9C2 -9656 EA7D -9657 EA7E -9658 EA80 -9659 EA81 -965A EA82 -965B B1DD -965C EA83 -965D EA84 -965E EA85 -965F DAEC -9660 EA86 -9661 B6B8 -9662 D4BA -9663 EA87 -9664 B3FD -9665 EA88 -9666 EA89 -9667 DAED -9668 D4C9 -9669 CFD5 -966A C5E3 -966B EA8A -966C DAEE -966D EA8B -966E EA8C -966F EA8D -9670 EA8E -9671 EA8F -9672 DAEF -9673 EA90 -9674 DAF0 -9675 C1EA -9676 CCD5 -9677 CFDD -9678 EA91 -9679 EA92 -967A EA93 -967B EA94 -967C EA95 -967D EA96 -967E EA97 -967F EA98 -9680 EA99 -9681 EA9A -9682 EA9B -9683 EA9C -9684 EA9D -9685 D3E7 -9686 C2A1 -9687 EA9E -9688 DAF1 -9689 EA9F -968A EAA0 -968B CBE5 -968C EB40 -968D DAF2 -968E EB41 -968F CBE6 -9690 D2FE -9691 EB42 -9692 EB43 -9693 EB44 -9694 B8F4 -9695 EB45 -9696 EB46 -9697 DAF3 -9698 B0AF -9699 CFB6 -969A EB47 -969B EB48 -969C D5CF -969D EB49 -969E EB4A -969F EB4B -96A0 EB4C -96A1 EB4D -96A2 EB4E -96A3 EB4F -96A4 EB50 -96A5 EB51 -96A6 EB52 -96A7 CBED -96A8 EB53 -96A9 EB54 -96AA EB55 -96AB EB56 -96AC EB57 -96AD EB58 -96AE EB59 -96AF EB5A -96B0 DAF4 -96B1 EB5B -96B2 EB5C -96B3 E3C4 -96B4 EB5D -96B5 EB5E -96B6 C1A5 -96B7 EB5F -96B8 EB60 -96B9 F6BF -96BA EB61 -96BB EB62 -96BC F6C0 -96BD F6C1 -96BE C4D1 -96BF EB63 -96C0 C8B8 -96C1 D1E3 -96C2 EB64 -96C3 EB65 -96C4 D0DB -96C5 D1C5 -96C6 BCAF -96C7 B9CD -96C8 EB66 -96C9 EFF4 -96CA EB67 -96CB EB68 -96CC B4C6 -96CD D3BA -96CE F6C2 -96CF B3FB -96D0 EB69 -96D1 EB6A -96D2 F6C3 -96D3 EB6B -96D4 EB6C -96D5 B5F1 -96D6 EB6D -96D7 EB6E -96D8 EB6F -96D9 EB70 -96DA EB71 -96DB EB72 -96DC EB73 -96DD EB74 -96DE EB75 -96DF EB76 -96E0 F6C5 -96E1 EB77 -96E2 EB78 -96E3 EB79 -96E4 EB7A -96E5 EB7B -96E6 EB7C -96E7 EB7D -96E8 D3EA -96E9 F6A7 -96EA D1A9 -96EB EB7E -96EC EB80 -96ED EB81 -96EE EB82 -96EF F6A9 -96F0 EB83 -96F1 EB84 -96F2 EB85 -96F3 F6A8 -96F4 EB86 -96F5 EB87 -96F6 C1E3 -96F7 C0D7 -96F8 EB88 -96F9 B1A2 -96FA EB89 -96FB EB8A -96FC EB8B -96FD EB8C -96FE CEED -96FF EB8D -9700 D0E8 -9701 F6AB -9702 EB8E -9703 EB8F -9704 CFF6 -9705 EB90 -9706 F6AA -9707 D5F0 -9708 F6AC -9709 C3B9 -970A EB91 -970B EB92 -970C EB93 -970D BBF4 -970E F6AE -970F F6AD -9710 EB94 -9711 EB95 -9712 EB96 -9713 C4DE -9714 EB97 -9715 EB98 -9716 C1D8 -9717 EB99 -9718 EB9A -9719 EB9B -971A EB9C -971B EB9D -971C CBAA -971D EB9E -971E CFBC -971F EB9F -9720 EBA0 -9721 EC40 -9722 EC41 -9723 EC42 -9724 EC43 -9725 EC44 -9726 EC45 -9727 EC46 -9728 EC47 -9729 EC48 -972A F6AF -972B EC49 -972C EC4A -972D F6B0 -972E EC4B -972F EC4C -9730 F6B1 -9731 EC4D -9732 C2B6 -9733 EC4E -9734 EC4F -9735 EC50 -9736 EC51 -9737 EC52 -9738 B0D4 -9739 C5F9 -973A EC53 -973B EC54 -973C EC55 -973D EC56 -973E F6B2 -973F EC57 -9740 EC58 -9741 EC59 -9742 EC5A -9743 EC5B -9744 EC5C -9745 EC5D -9746 EC5E -9747 EC5F -9748 EC60 -9749 EC61 -974A EC62 -974B EC63 -974C EC64 -974D EC65 -974E EC66 -974F EC67 -9750 EC68 -9751 EC69 -9752 C7E0 -9753 F6A6 -9754 EC6A -9755 EC6B -9756 BEB8 -9757 EC6C -9758 EC6D -9759 BEB2 -975A EC6E -975B B5E5 -975C EC6F -975D EC70 -975E B7C7 -975F EC71 -9760 BFBF -9761 C3D2 -9762 C3E6 -9763 EC72 -9764 EC73 -9765 D8CC -9766 EC74 -9767 EC75 -9768 EC76 -9769 B8EF -976A EC77 -976B EC78 -976C EC79 -976D EC7A -976E EC7B -976F EC7C -9770 EC7D -9771 EC7E -9772 EC80 -9773 BDF9 -9774 D1A5 -9775 EC81 -9776 B0D0 -9777 EC82 -9778 EC83 -9779 EC84 -977A EC85 -977B EC86 -977C F7B0 -977D EC87 -977E EC88 -977F EC89 -9780 EC8A -9781 EC8B -9782 EC8C -9783 EC8D -9784 EC8E -9785 F7B1 -9786 EC8F -9787 EC90 -9788 EC91 -9789 EC92 -978A EC93 -978B D0AC -978C EC94 -978D B0B0 -978E EC95 -978F EC96 -9790 EC97 -9791 F7B2 -9792 F7B3 -9793 EC98 -9794 F7B4 -9795 EC99 -9796 EC9A -9797 EC9B -9798 C7CA -9799 EC9C -979A EC9D -979B EC9E -979C EC9F -979D ECA0 -979E ED40 -979F ED41 -97A0 BECF -97A1 ED42 -97A2 ED43 -97A3 F7B7 -97A4 ED44 -97A5 ED45 -97A6 ED46 -97A7 ED47 -97A8 ED48 -97A9 ED49 -97AA ED4A -97AB F7B6 -97AC ED4B -97AD B1DE -97AE ED4C -97AF F7B5 -97B0 ED4D -97B1 ED4E -97B2 F7B8 -97B3 ED4F -97B4 F7B9 -97B5 ED50 -97B6 ED51 -97B7 ED52 -97B8 ED53 -97B9 ED54 -97BA ED55 -97BB ED56 -97BC ED57 -97BD ED58 -97BE ED59 -97BF ED5A -97C0 ED5B -97C1 ED5C -97C2 ED5D -97C3 ED5E -97C4 ED5F -97C5 ED60 -97C6 ED61 -97C7 ED62 -97C8 ED63 -97C9 ED64 -97CA ED65 -97CB ED66 -97CC ED67 -97CD ED68 -97CE ED69 -97CF ED6A -97D0 ED6B -97D1 ED6C -97D2 ED6D -97D3 ED6E -97D4 ED6F -97D5 ED70 -97D6 ED71 -97D7 ED72 -97D8 ED73 -97D9 ED74 -97DA ED75 -97DB ED76 -97DC ED77 -97DD ED78 -97DE ED79 -97DF ED7A -97E0 ED7B -97E1 ED7C -97E2 ED7D -97E3 ED7E -97E4 ED80 -97E5 ED81 -97E6 CEA4 -97E7 C8CD -97E8 ED82 -97E9 BAAB -97EA E8B8 -97EB E8B9 -97EC E8BA -97ED BEC2 -97EE ED83 -97EF ED84 -97F0 ED85 -97F1 ED86 -97F2 ED87 -97F3 D2F4 -97F4 ED88 -97F5 D4CF -97F6 C9D8 -97F7 ED89 -97F8 ED8A -97F9 ED8B -97FA ED8C -97FB ED8D -97FC ED8E -97FD ED8F -97FE ED90 -97FF ED91 -9800 ED92 -9801 ED93 -9802 ED94 -9803 ED95 -9804 ED96 -9805 ED97 -9806 ED98 -9807 ED99 -9808 ED9A -9809 ED9B -980A ED9C -980B ED9D -980C ED9E -980D ED9F -980E EDA0 -980F EE40 -9810 EE41 -9811 EE42 -9812 EE43 -9813 EE44 -9814 EE45 -9815 EE46 -9816 EE47 -9817 EE48 -9818 EE49 -9819 EE4A -981A EE4B -981B EE4C -981C EE4D -981D EE4E -981E EE4F -981F EE50 -9820 EE51 -9821 EE52 -9822 EE53 -9823 EE54 -9824 EE55 -9825 EE56 -9826 EE57 -9827 EE58 -9828 EE59 -9829 EE5A -982A EE5B -982B EE5C -982C EE5D -982D EE5E -982E EE5F -982F EE60 -9830 EE61 -9831 EE62 -9832 EE63 -9833 EE64 -9834 EE65 -9835 EE66 -9836 EE67 -9837 EE68 -9838 EE69 -9839 EE6A -983A EE6B -983B EE6C -983C EE6D -983D EE6E -983E EE6F -983F EE70 -9840 EE71 -9841 EE72 -9842 EE73 -9843 EE74 -9844 EE75 -9845 EE76 -9846 EE77 -9847 EE78 -9848 EE79 -9849 EE7A -984A EE7B -984B EE7C -984C EE7D -984D EE7E -984E EE80 -984F EE81 -9850 EE82 -9851 EE83 -9852 EE84 -9853 EE85 -9854 EE86 -9855 EE87 -9856 EE88 -9857 EE89 -9858 EE8A -9859 EE8B -985A EE8C -985B EE8D -985C EE8E -985D EE8F -985E EE90 -985F EE91 -9860 EE92 -9861 EE93 -9862 EE94 -9863 EE95 -9864 EE96 -9865 EE97 -9866 EE98 -9867 EE99 -9868 EE9A -9869 EE9B -986A EE9C -986B EE9D -986C EE9E -986D EE9F -986E EEA0 -986F EF40 -9870 EF41 -9871 EF42 -9872 EF43 -9873 EF44 -9874 EF45 -9875 D2B3 -9876 B6A5 -9877 C7EA -9878 F1FC -9879 CFEE -987A CBB3 -987B D0EB -987C E7EF -987D CDE7 -987E B9CB -987F B6D9 -9880 F1FD -9881 B0E4 -9882 CBCC -9883 F1FE -9884 D4A4 -9885 C2AD -9886 C1EC -9887 C6C4 -9888 BEB1 -9889 F2A1 -988A BCD5 -988B EF46 -988C F2A2 -988D F2A3 -988E EF47 -988F F2A4 -9890 D2C3 -9891 C6B5 -9892 EF48 -9893 CDC7 -9894 F2A5 -9895 EF49 -9896 D3B1 -9897 BFC5 -9898 CCE2 -9899 EF4A -989A F2A6 -989B F2A7 -989C D1D5 -989D B6EE -989E F2A8 -989F F2A9 -98A0 B5DF -98A1 F2AA -98A2 F2AB -98A3 EF4B -98A4 B2FC -98A5 F2AC -98A6 F2AD -98A7 C8A7 -98A8 EF4C -98A9 EF4D -98AA EF4E -98AB EF4F -98AC EF50 -98AD EF51 -98AE EF52 -98AF EF53 -98B0 EF54 -98B1 EF55 -98B2 EF56 -98B3 EF57 -98B4 EF58 -98B5 EF59 -98B6 EF5A -98B7 EF5B -98B8 EF5C -98B9 EF5D -98BA EF5E -98BB EF5F -98BC EF60 -98BD EF61 -98BE EF62 -98BF EF63 -98C0 EF64 -98C1 EF65 -98C2 EF66 -98C3 EF67 -98C4 EF68 -98C5 EF69 -98C6 EF6A -98C7 EF6B -98C8 EF6C -98C9 EF6D -98CA EF6E -98CB EF6F -98CC EF70 -98CD EF71 -98CE B7E7 -98CF EF72 -98D0 EF73 -98D1 ECA9 -98D2 ECAA -98D3 ECAB -98D4 EF74 -98D5 ECAC -98D6 EF75 -98D7 EF76 -98D8 C6AE -98D9 ECAD -98DA ECAE -98DB EF77 -98DC EF78 -98DD EF79 -98DE B7C9 -98DF CAB3 -98E0 EF7A -98E1 EF7B -98E2 EF7C -98E3 EF7D -98E4 EF7E -98E5 EF80 -98E6 EF81 -98E7 E2B8 -98E8 F7CF -98E9 EF82 -98EA EF83 -98EB EF84 -98EC EF85 -98ED EF86 -98EE EF87 -98EF EF88 -98F0 EF89 -98F1 EF8A -98F2 EF8B -98F3 EF8C -98F4 EF8D -98F5 EF8E -98F6 EF8F -98F7 EF90 -98F8 EF91 -98F9 EF92 -98FA EF93 -98FB EF94 -98FC EF95 -98FD EF96 -98FE EF97 -98FF EF98 -9900 EF99 -9901 EF9A -9902 EF9B -9903 EF9C -9904 EF9D -9905 EF9E -9906 EF9F -9907 EFA0 -9908 F040 -9909 F041 -990A F042 -990B F043 -990C F044 -990D F7D0 -990E F045 -990F F046 -9910 B2CD -9911 F047 -9912 F048 -9913 F049 -9914 F04A -9915 F04B -9916 F04C -9917 F04D -9918 F04E -9919 F04F -991A F050 -991B F051 -991C F052 -991D F053 -991E F054 -991F F055 -9920 F056 -9921 F057 -9922 F058 -9923 F059 -9924 F05A -9925 F05B -9926 F05C -9927 F05D -9928 F05E -9929 F05F -992A F060 -992B F061 -992C F062 -992D F063 -992E F7D1 -992F F064 -9930 F065 -9931 F066 -9932 F067 -9933 F068 -9934 F069 -9935 F06A -9936 F06B -9937 F06C -9938 F06D -9939 F06E -993A F06F -993B F070 -993C F071 -993D F072 -993E F073 -993F F074 -9940 F075 -9941 F076 -9942 F077 -9943 F078 -9944 F079 -9945 F07A -9946 F07B -9947 F07C -9948 F07D -9949 F07E -994A F080 -994B F081 -994C F082 -994D F083 -994E F084 -994F F085 -9950 F086 -9951 F087 -9952 F088 -9953 F089 -9954 F7D3 -9955 F7D2 -9956 F08A -9957 F08B -9958 F08C -9959 F08D -995A F08E -995B F08F -995C F090 -995D F091 -995E F092 -995F F093 -9960 F094 -9961 F095 -9962 F096 -9963 E2BB -9964 F097 -9965 BCA2 -9966 F098 -9967 E2BC -9968 E2BD -9969 E2BE -996A E2BF -996B E2C0 -996C E2C1 -996D B7B9 -996E D2FB -996F BDA4 -9970 CACE -9971 B1A5 -9972 CBC7 -9973 F099 -9974 E2C2 -9975 B6FC -9976 C8C4 -9977 E2C3 -9978 F09A -9979 F09B -997A BDC8 -997B F09C -997C B1FD -997D E2C4 -997E F09D -997F B6F6 -9980 E2C5 -9981 C4D9 -9982 F09E -9983 F09F -9984 E2C6 -9985 CFDA -9986 B9DD -9987 E2C7 -9988 C0A1 -9989 F0A0 -998A E2C8 -998B B2F6 -998C F140 -998D E2C9 -998E F141 -998F C1F3 -9990 E2CA -9991 E2CB -9992 C2F8 -9993 E2CC -9994 E2CD -9995 E2CE -9996 CAD7 -9997 D8B8 -9998 D9E5 -9999 CFE3 -999A F142 -999B F143 -999C F144 -999D F145 -999E F146 -999F F147 -99A0 F148 -99A1 F149 -99A2 F14A -99A3 F14B -99A4 F14C -99A5 F0A5 -99A6 F14D -99A7 F14E -99A8 DCB0 -99A9 F14F -99AA F150 -99AB F151 -99AC F152 -99AD F153 -99AE F154 -99AF F155 -99B0 F156 -99B1 F157 -99B2 F158 -99B3 F159 -99B4 F15A -99B5 F15B -99B6 F15C -99B7 F15D -99B8 F15E -99B9 F15F -99BA F160 -99BB F161 -99BC F162 -99BD F163 -99BE F164 -99BF F165 -99C0 F166 -99C1 F167 -99C2 F168 -99C3 F169 -99C4 F16A -99C5 F16B -99C6 F16C -99C7 F16D -99C8 F16E -99C9 F16F -99CA F170 -99CB F171 -99CC F172 -99CD F173 -99CE F174 -99CF F175 -99D0 F176 -99D1 F177 -99D2 F178 -99D3 F179 -99D4 F17A -99D5 F17B -99D6 F17C -99D7 F17D -99D8 F17E -99D9 F180 -99DA F181 -99DB F182 -99DC F183 -99DD F184 -99DE F185 -99DF F186 -99E0 F187 -99E1 F188 -99E2 F189 -99E3 F18A -99E4 F18B -99E5 F18C -99E6 F18D -99E7 F18E -99E8 F18F -99E9 F190 -99EA F191 -99EB F192 -99EC F193 -99ED F194 -99EE F195 -99EF F196 -99F0 F197 -99F1 F198 -99F2 F199 -99F3 F19A -99F4 F19B -99F5 F19C -99F6 F19D -99F7 F19E -99F8 F19F -99F9 F1A0 -99FA F240 -99FB F241 -99FC F242 -99FD F243 -99FE F244 -99FF F245 -9A00 F246 -9A01 F247 -9A02 F248 -9A03 F249 -9A04 F24A -9A05 F24B -9A06 F24C -9A07 F24D -9A08 F24E -9A09 F24F -9A0A F250 -9A0B F251 -9A0C F252 -9A0D F253 -9A0E F254 -9A0F F255 -9A10 F256 -9A11 F257 -9A12 F258 -9A13 F259 -9A14 F25A -9A15 F25B -9A16 F25C -9A17 F25D -9A18 F25E -9A19 F25F -9A1A F260 -9A1B F261 -9A1C F262 -9A1D F263 -9A1E F264 -9A1F F265 -9A20 F266 -9A21 F267 -9A22 F268 -9A23 F269 -9A24 F26A -9A25 F26B -9A26 F26C -9A27 F26D -9A28 F26E -9A29 F26F -9A2A F270 -9A2B F271 -9A2C F272 -9A2D F273 -9A2E F274 -9A2F F275 -9A30 F276 -9A31 F277 -9A32 F278 -9A33 F279 -9A34 F27A -9A35 F27B -9A36 F27C -9A37 F27D -9A38 F27E -9A39 F280 -9A3A F281 -9A3B F282 -9A3C F283 -9A3D F284 -9A3E F285 -9A3F F286 -9A40 F287 -9A41 F288 -9A42 F289 -9A43 F28A -9A44 F28B -9A45 F28C -9A46 F28D -9A47 F28E -9A48 F28F -9A49 F290 -9A4A F291 -9A4B F292 -9A4C F293 -9A4D F294 -9A4E F295 -9A4F F296 -9A50 F297 -9A51 F298 -9A52 F299 -9A53 F29A -9A54 F29B -9A55 F29C -9A56 F29D -9A57 F29E -9A58 F29F -9A59 F2A0 -9A5A F340 -9A5B F341 -9A5C F342 -9A5D F343 -9A5E F344 -9A5F F345 -9A60 F346 -9A61 F347 -9A62 F348 -9A63 F349 -9A64 F34A -9A65 F34B -9A66 F34C -9A67 F34D -9A68 F34E -9A69 F34F -9A6A F350 -9A6B F351 -9A6C C2ED -9A6D D4A6 -9A6E CDD4 -9A6F D1B1 -9A70 B3DB -9A71 C7FD -9A72 F352 -9A73 B2B5 -9A74 C2BF -9A75 E6E0 -9A76 CABB -9A77 E6E1 -9A78 E6E2 -9A79 BED4 -9A7A E6E3 -9A7B D7A4 -9A7C CDD5 -9A7D E6E5 -9A7E BCDD -9A7F E6E4 -9A80 E6E6 -9A81 E6E7 -9A82 C2EE -9A83 F353 -9A84 BDBE -9A85 E6E8 -9A86 C2E6 -9A87 BAA7 -9A88 E6E9 -9A89 F354 -9A8A E6EA -9A8B B3D2 -9A8C D1E9 -9A8D F355 -9A8E F356 -9A8F BFA5 -9A90 E6EB -9A91 C6EF -9A92 E6EC -9A93 E6ED -9A94 F357 -9A95 F358 -9A96 E6EE -9A97 C6AD -9A98 E6EF -9A99 F359 -9A9A C9A7 -9A9B E6F0 -9A9C E6F1 -9A9D E6F2 -9A9E E5B9 -9A9F E6F3 -9AA0 E6F4 -9AA1 C2E2 -9AA2 E6F5 -9AA3 E6F6 -9AA4 D6E8 -9AA5 E6F7 -9AA6 F35A -9AA7 E6F8 -9AA8 B9C7 -9AA9 F35B -9AAA F35C -9AAB F35D -9AAC F35E -9AAD F35F -9AAE F360 -9AAF F361 -9AB0 F7BB -9AB1 F7BA -9AB2 F362 -9AB3 F363 -9AB4 F364 -9AB5 F365 -9AB6 F7BE -9AB7 F7BC -9AB8 BAA1 -9AB9 F366 -9ABA F7BF -9ABB F367 -9ABC F7C0 -9ABD F368 -9ABE F369 -9ABF F36A -9AC0 F7C2 -9AC1 F7C1 -9AC2 F7C4 -9AC3 F36B -9AC4 F36C -9AC5 F7C3 -9AC6 F36D -9AC7 F36E -9AC8 F36F -9AC9 F370 -9ACA F371 -9ACB F7C5 -9ACC F7C6 -9ACD F372 -9ACE F373 -9ACF F374 -9AD0 F375 -9AD1 F7C7 -9AD2 F376 -9AD3 CBE8 -9AD4 F377 -9AD5 F378 -9AD6 F379 -9AD7 F37A -9AD8 B8DF -9AD9 F37B -9ADA F37C -9ADB F37D -9ADC F37E -9ADD F380 -9ADE F381 -9ADF F7D4 -9AE0 F382 -9AE1 F7D5 -9AE2 F383 -9AE3 F384 -9AE4 F385 -9AE5 F386 -9AE6 F7D6 -9AE7 F387 -9AE8 F388 -9AE9 F389 -9AEA F38A -9AEB F7D8 -9AEC F38B -9AED F7DA -9AEE F38C -9AEF F7D7 -9AF0 F38D -9AF1 F38E -9AF2 F38F -9AF3 F390 -9AF4 F391 -9AF5 F392 -9AF6 F393 -9AF7 F394 -9AF8 F395 -9AF9 F7DB -9AFA F396 -9AFB F7D9 -9AFC F397 -9AFD F398 -9AFE F399 -9AFF F39A -9B00 F39B -9B01 F39C -9B02 F39D -9B03 D7D7 -9B04 F39E -9B05 F39F -9B06 F3A0 -9B07 F440 -9B08 F7DC -9B09 F441 -9B0A F442 -9B0B F443 -9B0C F444 -9B0D F445 -9B0E F446 -9B0F F7DD -9B10 F447 -9B11 F448 -9B12 F449 -9B13 F7DE -9B14 F44A -9B15 F44B -9B16 F44C -9B17 F44D -9B18 F44E -9B19 F44F -9B1A F450 -9B1B F451 -9B1C F452 -9B1D F453 -9B1E F454 -9B1F F7DF -9B20 F455 -9B21 F456 -9B22 F457 -9B23 F7E0 -9B24 F458 -9B25 F459 -9B26 F45A -9B27 F45B -9B28 F45C -9B29 F45D -9B2A F45E -9B2B F45F -9B2C F460 -9B2D F461 -9B2E F462 -9B2F DBCB -9B30 F463 -9B31 F464 -9B32 D8AA -9B33 F465 -9B34 F466 -9B35 F467 -9B36 F468 -9B37 F469 -9B38 F46A -9B39 F46B -9B3A F46C -9B3B E5F7 -9B3C B9ED -9B3D F46D -9B3E F46E -9B3F F46F -9B40 F470 -9B41 BFFD -9B42 BBEA -9B43 F7C9 -9B44 C6C7 -9B45 F7C8 -9B46 F471 -9B47 F7CA -9B48 F7CC -9B49 F7CB -9B4A F472 -9B4B F473 -9B4C F474 -9B4D F7CD -9B4E F475 -9B4F CEBA -9B50 F476 -9B51 F7CE -9B52 F477 -9B53 F478 -9B54 C4A7 -9B55 F479 -9B56 F47A -9B57 F47B -9B58 F47C -9B59 F47D -9B5A F47E -9B5B F480 -9B5C F481 -9B5D F482 -9B5E F483 -9B5F F484 -9B60 F485 -9B61 F486 -9B62 F487 -9B63 F488 -9B64 F489 -9B65 F48A -9B66 F48B -9B67 F48C -9B68 F48D -9B69 F48E -9B6A F48F -9B6B F490 -9B6C F491 -9B6D F492 -9B6E F493 -9B6F F494 -9B70 F495 -9B71 F496 -9B72 F497 -9B73 F498 -9B74 F499 -9B75 F49A -9B76 F49B -9B77 F49C -9B78 F49D -9B79 F49E -9B7A F49F -9B7B F4A0 -9B7C F540 -9B7D F541 -9B7E F542 -9B7F F543 -9B80 F544 -9B81 F545 -9B82 F546 -9B83 F547 -9B84 F548 -9B85 F549 -9B86 F54A -9B87 F54B -9B88 F54C -9B89 F54D -9B8A F54E -9B8B F54F -9B8C F550 -9B8D F551 -9B8E F552 -9B8F F553 -9B90 F554 -9B91 F555 -9B92 F556 -9B93 F557 -9B94 F558 -9B95 F559 -9B96 F55A -9B97 F55B -9B98 F55C -9B99 F55D -9B9A F55E -9B9B F55F -9B9C F560 -9B9D F561 -9B9E F562 -9B9F F563 -9BA0 F564 -9BA1 F565 -9BA2 F566 -9BA3 F567 -9BA4 F568 -9BA5 F569 -9BA6 F56A -9BA7 F56B -9BA8 F56C -9BA9 F56D -9BAA F56E -9BAB F56F -9BAC F570 -9BAD F571 -9BAE F572 -9BAF F573 -9BB0 F574 -9BB1 F575 -9BB2 F576 -9BB3 F577 -9BB4 F578 -9BB5 F579 -9BB6 F57A -9BB7 F57B -9BB8 F57C -9BB9 F57D -9BBA F57E -9BBB F580 -9BBC F581 -9BBD F582 -9BBE F583 -9BBF F584 -9BC0 F585 -9BC1 F586 -9BC2 F587 -9BC3 F588 -9BC4 F589 -9BC5 F58A -9BC6 F58B -9BC7 F58C -9BC8 F58D -9BC9 F58E -9BCA F58F -9BCB F590 -9BCC F591 -9BCD F592 -9BCE F593 -9BCF F594 -9BD0 F595 -9BD1 F596 -9BD2 F597 -9BD3 F598 -9BD4 F599 -9BD5 F59A -9BD6 F59B -9BD7 F59C -9BD8 F59D -9BD9 F59E -9BDA F59F -9BDB F5A0 -9BDC F640 -9BDD F641 -9BDE F642 -9BDF F643 -9BE0 F644 -9BE1 F645 -9BE2 F646 -9BE3 F647 -9BE4 F648 -9BE5 F649 -9BE6 F64A -9BE7 F64B -9BE8 F64C -9BE9 F64D -9BEA F64E -9BEB F64F -9BEC F650 -9BED F651 -9BEE F652 -9BEF F653 -9BF0 F654 -9BF1 F655 -9BF2 F656 -9BF3 F657 -9BF4 F658 -9BF5 F659 -9BF6 F65A -9BF7 F65B -9BF8 F65C -9BF9 F65D -9BFA F65E -9BFB F65F -9BFC F660 -9BFD F661 -9BFE F662 -9BFF F663 -9C00 F664 -9C01 F665 -9C02 F666 -9C03 F667 -9C04 F668 -9C05 F669 -9C06 F66A -9C07 F66B -9C08 F66C -9C09 F66D -9C0A F66E -9C0B F66F -9C0C F670 -9C0D F671 -9C0E F672 -9C0F F673 -9C10 F674 -9C11 F675 -9C12 F676 -9C13 F677 -9C14 F678 -9C15 F679 -9C16 F67A -9C17 F67B -9C18 F67C -9C19 F67D -9C1A F67E -9C1B F680 -9C1C F681 -9C1D F682 -9C1E F683 -9C1F F684 -9C20 F685 -9C21 F686 -9C22 F687 -9C23 F688 -9C24 F689 -9C25 F68A -9C26 F68B -9C27 F68C -9C28 F68D -9C29 F68E -9C2A F68F -9C2B F690 -9C2C F691 -9C2D F692 -9C2E F693 -9C2F F694 -9C30 F695 -9C31 F696 -9C32 F697 -9C33 F698 -9C34 F699 -9C35 F69A -9C36 F69B -9C37 F69C -9C38 F69D -9C39 F69E -9C3A F69F -9C3B F6A0 -9C3C F740 -9C3D F741 -9C3E F742 -9C3F F743 -9C40 F744 -9C41 F745 -9C42 F746 -9C43 F747 -9C44 F748 -9C45 F749 -9C46 F74A -9C47 F74B -9C48 F74C -9C49 F74D -9C4A F74E -9C4B F74F -9C4C F750 -9C4D F751 -9C4E F752 -9C4F F753 -9C50 F754 -9C51 F755 -9C52 F756 -9C53 F757 -9C54 F758 -9C55 F759 -9C56 F75A -9C57 F75B -9C58 F75C -9C59 F75D -9C5A F75E -9C5B F75F -9C5C F760 -9C5D F761 -9C5E F762 -9C5F F763 -9C60 F764 -9C61 F765 -9C62 F766 -9C63 F767 -9C64 F768 -9C65 F769 -9C66 F76A -9C67 F76B -9C68 F76C -9C69 F76D -9C6A F76E -9C6B F76F -9C6C F770 -9C6D F771 -9C6E F772 -9C6F F773 -9C70 F774 -9C71 F775 -9C72 F776 -9C73 F777 -9C74 F778 -9C75 F779 -9C76 F77A -9C77 F77B -9C78 F77C -9C79 F77D -9C7A F77E -9C7B F780 -9C7C D3E3 -9C7D F781 -9C7E F782 -9C7F F6CF -9C80 F783 -9C81 C2B3 -9C82 F6D0 -9C83 F784 -9C84 F785 -9C85 F6D1 -9C86 F6D2 -9C87 F6D3 -9C88 F6D4 -9C89 F786 -9C8A F787 -9C8B F6D6 -9C8C F788 -9C8D B1AB -9C8E F6D7 -9C8F F789 -9C90 F6D8 -9C91 F6D9 -9C92 F6DA -9C93 F78A -9C94 F6DB -9C95 F6DC -9C96 F78B -9C97 F78C -9C98 F78D -9C99 F78E -9C9A F6DD -9C9B F6DE -9C9C CFCA -9C9D F78F -9C9E F6DF -9C9F F6E0 -9CA0 F6E1 -9CA1 F6E2 -9CA2 F6E3 -9CA3 F6E4 -9CA4 C0F0 -9CA5 F6E5 -9CA6 F6E6 -9CA7 F6E7 -9CA8 F6E8 -9CA9 F6E9 -9CAA F790 -9CAB F6EA -9CAC F791 -9CAD F6EB -9CAE F6EC -9CAF F792 -9CB0 F6ED -9CB1 F6EE -9CB2 F6EF -9CB3 F6F0 -9CB4 F6F1 -9CB5 F6F2 -9CB6 F6F3 -9CB7 F6F4 -9CB8 BEA8 -9CB9 F793 -9CBA F6F5 -9CBB F6F6 -9CBC F6F7 -9CBD F6F8 -9CBE F794 -9CBF F795 -9CC0 F796 -9CC1 F797 -9CC2 F798 -9CC3 C8FA -9CC4 F6F9 -9CC5 F6FA -9CC6 F6FB -9CC7 F6FC -9CC8 F799 -9CC9 F79A -9CCA F6FD -9CCB F6FE -9CCC F7A1 -9CCD F7A2 -9CCE F7A3 -9CCF F7A4 -9CD0 F7A5 -9CD1 F79B -9CD2 F79C -9CD3 F7A6 -9CD4 F7A7 -9CD5 F7A8 -9CD6 B1EE -9CD7 F7A9 -9CD8 F7AA -9CD9 F7AB -9CDA F79D -9CDB F79E -9CDC F7AC -9CDD F7AD -9CDE C1DB -9CDF F7AE -9CE0 F79F -9CE1 F7A0 -9CE2 F7AF -9CE3 F840 -9CE4 F841 -9CE5 F842 -9CE6 F843 -9CE7 F844 -9CE8 F845 -9CE9 F846 -9CEA F847 -9CEB F848 -9CEC F849 -9CED F84A -9CEE F84B -9CEF F84C -9CF0 F84D -9CF1 F84E -9CF2 F84F -9CF3 F850 -9CF4 F851 -9CF5 F852 -9CF6 F853 -9CF7 F854 -9CF8 F855 -9CF9 F856 -9CFA F857 -9CFB F858 -9CFC F859 -9CFD F85A -9CFE F85B -9CFF F85C -9D00 F85D -9D01 F85E -9D02 F85F -9D03 F860 -9D04 F861 -9D05 F862 -9D06 F863 -9D07 F864 -9D08 F865 -9D09 F866 -9D0A F867 -9D0B F868 -9D0C F869 -9D0D F86A -9D0E F86B -9D0F F86C -9D10 F86D -9D11 F86E -9D12 F86F -9D13 F870 -9D14 F871 -9D15 F872 -9D16 F873 -9D17 F874 -9D18 F875 -9D19 F876 -9D1A F877 -9D1B F878 -9D1C F879 -9D1D F87A -9D1E F87B -9D1F F87C -9D20 F87D -9D21 F87E -9D22 F880 -9D23 F881 -9D24 F882 -9D25 F883 -9D26 F884 -9D27 F885 -9D28 F886 -9D29 F887 -9D2A F888 -9D2B F889 -9D2C F88A -9D2D F88B -9D2E F88C -9D2F F88D -9D30 F88E -9D31 F88F -9D32 F890 -9D33 F891 -9D34 F892 -9D35 F893 -9D36 F894 -9D37 F895 -9D38 F896 -9D39 F897 -9D3A F898 -9D3B F899 -9D3C F89A -9D3D F89B -9D3E F89C -9D3F F89D -9D40 F89E -9D41 F89F -9D42 F8A0 -9D43 F940 -9D44 F941 -9D45 F942 -9D46 F943 -9D47 F944 -9D48 F945 -9D49 F946 -9D4A F947 -9D4B F948 -9D4C F949 -9D4D F94A -9D4E F94B -9D4F F94C -9D50 F94D -9D51 F94E -9D52 F94F -9D53 F950 -9D54 F951 -9D55 F952 -9D56 F953 -9D57 F954 -9D58 F955 -9D59 F956 -9D5A F957 -9D5B F958 -9D5C F959 -9D5D F95A -9D5E F95B -9D5F F95C -9D60 F95D -9D61 F95E -9D62 F95F -9D63 F960 -9D64 F961 -9D65 F962 -9D66 F963 -9D67 F964 -9D68 F965 -9D69 F966 -9D6A F967 -9D6B F968 -9D6C F969 -9D6D F96A -9D6E F96B -9D6F F96C -9D70 F96D -9D71 F96E -9D72 F96F -9D73 F970 -9D74 F971 -9D75 F972 -9D76 F973 -9D77 F974 -9D78 F975 -9D79 F976 -9D7A F977 -9D7B F978 -9D7C F979 -9D7D F97A -9D7E F97B -9D7F F97C -9D80 F97D -9D81 F97E -9D82 F980 -9D83 F981 -9D84 F982 -9D85 F983 -9D86 F984 -9D87 F985 -9D88 F986 -9D89 F987 -9D8A F988 -9D8B F989 -9D8C F98A -9D8D F98B -9D8E F98C -9D8F F98D -9D90 F98E -9D91 F98F -9D92 F990 -9D93 F991 -9D94 F992 -9D95 F993 -9D96 F994 -9D97 F995 -9D98 F996 -9D99 F997 -9D9A F998 -9D9B F999 -9D9C F99A -9D9D F99B -9D9E F99C -9D9F F99D -9DA0 F99E -9DA1 F99F -9DA2 F9A0 -9DA3 FA40 -9DA4 FA41 -9DA5 FA42 -9DA6 FA43 -9DA7 FA44 -9DA8 FA45 -9DA9 FA46 -9DAA FA47 -9DAB FA48 -9DAC FA49 -9DAD FA4A -9DAE FA4B -9DAF FA4C -9DB0 FA4D -9DB1 FA4E -9DB2 FA4F -9DB3 FA50 -9DB4 FA51 -9DB5 FA52 -9DB6 FA53 -9DB7 FA54 -9DB8 FA55 -9DB9 FA56 -9DBA FA57 -9DBB FA58 -9DBC FA59 -9DBD FA5A -9DBE FA5B -9DBF FA5C -9DC0 FA5D -9DC1 FA5E -9DC2 FA5F -9DC3 FA60 -9DC4 FA61 -9DC5 FA62 -9DC6 FA63 -9DC7 FA64 -9DC8 FA65 -9DC9 FA66 -9DCA FA67 -9DCB FA68 -9DCC FA69 -9DCD FA6A -9DCE FA6B -9DCF FA6C -9DD0 FA6D -9DD1 FA6E -9DD2 FA6F -9DD3 FA70 -9DD4 FA71 -9DD5 FA72 -9DD6 FA73 -9DD7 FA74 -9DD8 FA75 -9DD9 FA76 -9DDA FA77 -9DDB FA78 -9DDC FA79 -9DDD FA7A -9DDE FA7B -9DDF FA7C -9DE0 FA7D -9DE1 FA7E -9DE2 FA80 -9DE3 FA81 -9DE4 FA82 -9DE5 FA83 -9DE6 FA84 -9DE7 FA85 -9DE8 FA86 -9DE9 FA87 -9DEA FA88 -9DEB FA89 -9DEC FA8A -9DED FA8B -9DEE FA8C -9DEF FA8D -9DF0 FA8E -9DF1 FA8F -9DF2 FA90 -9DF3 FA91 -9DF4 FA92 -9DF5 FA93 -9DF6 FA94 -9DF7 FA95 -9DF8 FA96 -9DF9 FA97 -9DFA FA98 -9DFB FA99 -9DFC FA9A -9DFD FA9B -9DFE FA9C -9DFF FA9D -9E00 FA9E -9E01 FA9F -9E02 FAA0 -9E03 FB40 -9E04 FB41 -9E05 FB42 -9E06 FB43 -9E07 FB44 -9E08 FB45 -9E09 FB46 -9E0A FB47 -9E0B FB48 -9E0C FB49 -9E0D FB4A -9E0E FB4B -9E0F FB4C -9E10 FB4D -9E11 FB4E -9E12 FB4F -9E13 FB50 -9E14 FB51 -9E15 FB52 -9E16 FB53 -9E17 FB54 -9E18 FB55 -9E19 FB56 -9E1A FB57 -9E1B FB58 -9E1C FB59 -9E1D FB5A -9E1E FB5B -9E1F C4F1 -9E20 F0AF -9E21 BCA6 -9E22 F0B0 -9E23 C3F9 -9E24 FB5C -9E25 C5B8 -9E26 D1BB -9E27 FB5D -9E28 F0B1 -9E29 F0B2 -9E2A F0B3 -9E2B F0B4 -9E2C F0B5 -9E2D D1BC -9E2E FB5E -9E2F D1EC -9E30 FB5F -9E31 F0B7 -9E32 F0B6 -9E33 D4A7 -9E34 FB60 -9E35 CDD2 -9E36 F0B8 -9E37 F0BA -9E38 F0B9 -9E39 F0BB -9E3A F0BC -9E3B FB61 -9E3C FB62 -9E3D B8EB -9E3E F0BD -9E3F BAE8 -9E40 FB63 -9E41 F0BE -9E42 F0BF -9E43 BEE9 -9E44 F0C0 -9E45 B6EC -9E46 F0C1 -9E47 F0C2 -9E48 F0C3 -9E49 F0C4 -9E4A C8B5 -9E4B F0C5 -9E4C F0C6 -9E4D FB64 -9E4E F0C7 -9E4F C5F4 -9E50 FB65 -9E51 F0C8 -9E52 FB66 -9E53 FB67 -9E54 FB68 -9E55 F0C9 -9E56 FB69 -9E57 F0CA -9E58 F7BD -9E59 FB6A -9E5A F0CB -9E5B F0CC -9E5C F0CD -9E5D FB6B -9E5E F0CE -9E5F FB6C -9E60 FB6D -9E61 FB6E -9E62 FB6F -9E63 F0CF -9E64 BAD7 -9E65 FB70 -9E66 F0D0 -9E67 F0D1 -9E68 F0D2 -9E69 F0D3 -9E6A F0D4 -9E6B F0D5 -9E6C F0D6 -9E6D F0D8 -9E6E FB71 -9E6F FB72 -9E70 D3A5 -9E71 F0D7 -9E72 FB73 -9E73 F0D9 -9E74 FB74 -9E75 FB75 -9E76 FB76 -9E77 FB77 -9E78 FB78 -9E79 FB79 -9E7A FB7A -9E7B FB7B -9E7C FB7C -9E7D FB7D -9E7E F5BA -9E7F C2B9 -9E80 FB7E -9E81 FB80 -9E82 F7E4 -9E83 FB81 -9E84 FB82 -9E85 FB83 -9E86 FB84 -9E87 F7E5 -9E88 F7E6 -9E89 FB85 -9E8A FB86 -9E8B F7E7 -9E8C FB87 -9E8D FB88 -9E8E FB89 -9E8F FB8A -9E90 FB8B -9E91 FB8C -9E92 F7E8 -9E93 C2B4 -9E94 FB8D -9E95 FB8E -9E96 FB8F -9E97 FB90 -9E98 FB91 -9E99 FB92 -9E9A FB93 -9E9B FB94 -9E9C FB95 -9E9D F7EA -9E9E FB96 -9E9F F7EB -9EA0 FB97 -9EA1 FB98 -9EA2 FB99 -9EA3 FB9A -9EA4 FB9B -9EA5 FB9C -9EA6 C2F3 -9EA7 FB9D -9EA8 FB9E -9EA9 FB9F -9EAA FBA0 -9EAB FC40 -9EAC FC41 -9EAD FC42 -9EAE FC43 -9EAF FC44 -9EB0 FC45 -9EB1 FC46 -9EB2 FC47 -9EB3 FC48 -9EB4 F4F0 -9EB5 FC49 -9EB6 FC4A -9EB7 FC4B -9EB8 F4EF -9EB9 FC4C -9EBA FC4D -9EBB C2E9 -9EBC FC4E -9EBD F7E1 -9EBE F7E2 -9EBF FC4F -9EC0 FC50 -9EC1 FC51 -9EC2 FC52 -9EC3 FC53 -9EC4 BBC6 -9EC5 FC54 -9EC6 FC55 -9EC7 FC56 -9EC8 FC57 -9EC9 D9E4 -9ECA FC58 -9ECB FC59 -9ECC FC5A -9ECD CAF2 -9ECE C0E8 -9ECF F0A4 -9ED0 FC5B -9ED1 BADA -9ED2 FC5C -9ED3 FC5D -9ED4 C7AD -9ED5 FC5E -9ED6 FC5F -9ED7 FC60 -9ED8 C4AC -9ED9 FC61 -9EDA FC62 -9EDB F7EC -9EDC F7ED -9EDD F7EE -9EDE FC63 -9EDF F7F0 -9EE0 F7EF -9EE1 FC64 -9EE2 F7F1 -9EE3 FC65 -9EE4 FC66 -9EE5 F7F4 -9EE6 FC67 -9EE7 F7F3 -9EE8 FC68 -9EE9 F7F2 -9EEA F7F5 -9EEB FC69 -9EEC FC6A -9EED FC6B -9EEE FC6C -9EEF F7F6 -9EF0 FC6D -9EF1 FC6E -9EF2 FC6F -9EF3 FC70 -9EF4 FC71 -9EF5 FC72 -9EF6 FC73 -9EF7 FC74 -9EF8 FC75 -9EF9 EDE9 -9EFA FC76 -9EFB EDEA -9EFC EDEB -9EFD FC77 -9EFE F6BC -9EFF FC78 -9F00 FC79 -9F01 FC7A -9F02 FC7B -9F03 FC7C -9F04 FC7D -9F05 FC7E -9F06 FC80 -9F07 FC81 -9F08 FC82 -9F09 FC83 -9F0A FC84 -9F0B F6BD -9F0C FC85 -9F0D F6BE -9F0E B6A6 -9F0F FC86 -9F10 D8BE -9F11 FC87 -9F12 FC88 -9F13 B9C4 -9F14 FC89 -9F15 FC8A -9F16 FC8B -9F17 D8BB -9F18 FC8C -9F19 DCB1 -9F1A FC8D -9F1B FC8E -9F1C FC8F -9F1D FC90 -9F1E FC91 -9F1F FC92 -9F20 CAF3 -9F21 FC93 -9F22 F7F7 -9F23 FC94 -9F24 FC95 -9F25 FC96 -9F26 FC97 -9F27 FC98 -9F28 FC99 -9F29 FC9A -9F2A FC9B -9F2B FC9C -9F2C F7F8 -9F2D FC9D -9F2E FC9E -9F2F F7F9 -9F30 FC9F -9F31 FCA0 -9F32 FD40 -9F33 FD41 -9F34 FD42 -9F35 FD43 -9F36 FD44 -9F37 F7FB -9F38 FD45 -9F39 F7FA -9F3A FD46 -9F3B B1C7 -9F3C FD47 -9F3D F7FC -9F3E F7FD -9F3F FD48 -9F40 FD49 -9F41 FD4A -9F42 FD4B -9F43 FD4C -9F44 F7FE -9F45 FD4D -9F46 FD4E -9F47 FD4F -9F48 FD50 -9F49 FD51 -9F4A FD52 -9F4B FD53 -9F4C FD54 -9F4D FD55 -9F4E FD56 -9F4F FD57 -9F50 C6EB -9F51 ECB4 -9F52 FD58 -9F53 FD59 -9F54 FD5A -9F55 FD5B -9F56 FD5C -9F57 FD5D -9F58 FD5E -9F59 FD5F -9F5A FD60 -9F5B FD61 -9F5C FD62 -9F5D FD63 -9F5E FD64 -9F5F FD65 -9F60 FD66 -9F61 FD67 -9F62 FD68 -9F63 FD69 -9F64 FD6A -9F65 FD6B -9F66 FD6C -9F67 FD6D -9F68 FD6E -9F69 FD6F -9F6A FD70 -9F6B FD71 -9F6C FD72 -9F6D FD73 -9F6E FD74 -9F6F FD75 -9F70 FD76 -9F71 FD77 -9F72 FD78 -9F73 FD79 -9F74 FD7A -9F75 FD7B -9F76 FD7C -9F77 FD7D -9F78 FD7E -9F79 FD80 -9F7A FD81 -9F7B FD82 -9F7C FD83 -9F7D FD84 -9F7E FD85 -9F7F B3DD -9F80 F6B3 -9F81 FD86 -9F82 FD87 -9F83 F6B4 -9F84 C1E4 -9F85 F6B5 -9F86 F6B6 -9F87 F6B7 -9F88 F6B8 -9F89 F6B9 -9F8A F6BA -9F8B C8A3 -9F8C F6BB -9F8D FD88 -9F8E FD89 -9F8F FD8A -9F90 FD8B -9F91 FD8C -9F92 FD8D -9F93 FD8E -9F94 FD8F -9F95 FD90 -9F96 FD91 -9F97 FD92 -9F98 FD93 -9F99 C1FA -9F9A B9A8 -9F9B EDE8 -9F9C FD94 -9F9D FD95 -9F9E FD96 -9F9F B9EA -9FA0 D9DF -9FA1 FD97 -9FA2 FD98 -9FA3 FD99 -9FA4 FD9A -9FA5 FD9B -9FA6 82358F33 -9FA7 82358F34 -9FA8 82358F35 -9FA9 82358F36 -9FAA 82358F37 -9FAB 82358F38 -9FAC 82358F39 -9FAD 82359030 -9FAE 82359031 -9FAF 82359032 -9FB0 82359033 -9FB1 82359034 -9FB2 82359035 -9FB3 82359036 -9FB4 82359037 -9FB5 82359038 -9FB6 82359039 -9FB7 82359130 -9FB8 82359131 -9FB9 82359132 -9FBA 82359133 -9FBB 82359134 -9FBC 82359135 -9FBD 82359136 -9FBE 82359137 -9FBF 82359138 -9FC0 82359139 -9FC1 82359230 -9FC2 82359231 -9FC3 82359232 -9FC4 82359233 -9FC5 82359234 -9FC6 82359235 -9FC7 82359236 -9FC8 82359237 -9FC9 82359238 -9FCA 82359239 -9FCB 82359330 -9FCC 82359331 -9FCD 82359332 -9FCE 82359333 -9FCF 82359334 -9FD0 82359335 -9FD1 82359336 -9FD2 82359337 -9FD3 82359338 -9FD4 82359339 -9FD5 82359430 -9FD6 82359431 -9FD7 82359432 -9FD8 82359433 -9FD9 82359434 -9FDA 82359435 -9FDB 82359436 -9FDC 82359437 -9FDD 82359438 -9FDE 82359439 -9FDF 82359530 -9FE0 82359531 -9FE1 82359532 -9FE2 82359533 -9FE3 82359534 -9FE4 82359535 -9FE5 82359536 -9FE6 82359537 -9FE7 82359538 -9FE8 82359539 -9FE9 82359630 -9FEA 82359631 -9FEB 82359632 -9FEC 82359633 -9FED 82359634 -9FEE 82359635 -9FEF 82359636 -9FF0 82359637 -9FF1 82359638 -9FF2 82359639 -9FF3 82359730 -9FF4 82359731 -9FF5 82359732 -9FF6 82359733 -9FF7 82359734 -9FF8 82359735 -9FF9 82359736 -9FFA 82359737 -9FFB 82359738 -9FFC 82359739 -9FFD 82359830 -9FFE 82359831 -9FFF 82359832 -A000 82359833 -A001 82359834 -A002 82359835 -A003 82359836 -A004 82359837 -A005 82359838 -A006 82359839 -A007 82359930 -A008 82359931 -A009 82359932 -A00A 82359933 -A00B 82359934 -A00C 82359935 -A00D 82359936 -A00E 82359937 -A00F 82359938 -A010 82359939 -A011 82359A30 -A012 82359A31 -A013 82359A32 -A014 82359A33 -A015 82359A34 -A016 82359A35 -A017 82359A36 -A018 82359A37 -A019 82359A38 -A01A 82359A39 -A01B 82359B30 -A01C 82359B31 -A01D 82359B32 -A01E 82359B33 -A01F 82359B34 -A020 82359B35 -A021 82359B36 -A022 82359B37 -A023 82359B38 -A024 82359B39 -A025 82359C30 -A026 82359C31 -A027 82359C32 -A028 82359C33 -A029 82359C34 -A02A 82359C35 -A02B 82359C36 -A02C 82359C37 -A02D 82359C38 -A02E 82359C39 -A02F 82359D30 -A030 82359D31 -A031 82359D32 -A032 82359D33 -A033 82359D34 -A034 82359D35 -A035 82359D36 -A036 82359D37 -A037 82359D38 -A038 82359D39 -A039 82359E30 -A03A 82359E31 -A03B 82359E32 -A03C 82359E33 -A03D 82359E34 -A03E 82359E35 -A03F 82359E36 -A040 82359E37 -A041 82359E38 -A042 82359E39 -A043 82359F30 -A044 82359F31 -A045 82359F32 -A046 82359F33 -A047 82359F34 -A048 82359F35 -A049 82359F36 -A04A 82359F37 -A04B 82359F38 -A04C 82359F39 -A04D 8235A030 -A04E 8235A031 -A04F 8235A032 -A050 8235A033 -A051 8235A034 -A052 8235A035 -A053 8235A036 -A054 8235A037 -A055 8235A038 -A056 8235A039 -A057 8235A130 -A058 8235A131 -A059 8235A132 -A05A 8235A133 -A05B 8235A134 -A05C 8235A135 -A05D 8235A136 -A05E 8235A137 -A05F 8235A138 -A060 8235A139 -A061 8235A230 -A062 8235A231 -A063 8235A232 -A064 8235A233 -A065 8235A234 -A066 8235A235 -A067 8235A236 -A068 8235A237 -A069 8235A238 -A06A 8235A239 -A06B 8235A330 -A06C 8235A331 -A06D 8235A332 -A06E 8235A333 -A06F 8235A334 -A070 8235A335 -A071 8235A336 -A072 8235A337 -A073 8235A338 -A074 8235A339 -A075 8235A430 -A076 8235A431 -A077 8235A432 -A078 8235A433 -A079 8235A434 -A07A 8235A435 -A07B 8235A436 -A07C 8235A437 -A07D 8235A438 -A07E 8235A439 -A07F 8235A530 -A080 8235A531 -A081 8235A532 -A082 8235A533 -A083 8235A534 -A084 8235A535 -A085 8235A536 -A086 8235A537 -A087 8235A538 -A088 8235A539 -A089 8235A630 -A08A 8235A631 -A08B 8235A632 -A08C 8235A633 -A08D 8235A634 -A08E 8235A635 -A08F 8235A636 -A090 8235A637 -A091 8235A638 -A092 8235A639 -A093 8235A730 -A094 8235A731 -A095 8235A732 -A096 8235A733 -A097 8235A734 -A098 8235A735 -A099 8235A736 -A09A 8235A737 -A09B 8235A738 -A09C 8235A739 -A09D 8235A830 -A09E 8235A831 -A09F 8235A832 -A0A0 8235A833 -A0A1 8235A834 -A0A2 8235A835 -A0A3 8235A836 -A0A4 8235A837 -A0A5 8235A838 -A0A6 8235A839 -A0A7 8235A930 -A0A8 8235A931 -A0A9 8235A932 -A0AA 8235A933 -A0AB 8235A934 -A0AC 8235A935 -A0AD 8235A936 -A0AE 8235A937 -A0AF 8235A938 -A0B0 8235A939 -A0B1 8235AA30 -A0B2 8235AA31 -A0B3 8235AA32 -A0B4 8235AA33 -A0B5 8235AA34 -A0B6 8235AA35 -A0B7 8235AA36 -A0B8 8235AA37 -A0B9 8235AA38 -A0BA 8235AA39 -A0BB 8235AB30 -A0BC 8235AB31 -A0BD 8235AB32 -A0BE 8235AB33 -A0BF 8235AB34 -A0C0 8235AB35 -A0C1 8235AB36 -A0C2 8235AB37 -A0C3 8235AB38 -A0C4 8235AB39 -A0C5 8235AC30 -A0C6 8235AC31 -A0C7 8235AC32 -A0C8 8235AC33 -A0C9 8235AC34 -A0CA 8235AC35 -A0CB 8235AC36 -A0CC 8235AC37 -A0CD 8235AC38 -A0CE 8235AC39 -A0CF 8235AD30 -A0D0 8235AD31 -A0D1 8235AD32 -A0D2 8235AD33 -A0D3 8235AD34 -A0D4 8235AD35 -A0D5 8235AD36 -A0D6 8235AD37 -A0D7 8235AD38 -A0D8 8235AD39 -A0D9 8235AE30 -A0DA 8235AE31 -A0DB 8235AE32 -A0DC 8235AE33 -A0DD 8235AE34 -A0DE 8235AE35 -A0DF 8235AE36 -A0E0 8235AE37 -A0E1 8235AE38 -A0E2 8235AE39 -A0E3 8235AF30 -A0E4 8235AF31 -A0E5 8235AF32 -A0E6 8235AF33 -A0E7 8235AF34 -A0E8 8235AF35 -A0E9 8235AF36 -A0EA 8235AF37 -A0EB 8235AF38 -A0EC 8235AF39 -A0ED 8235B030 -A0EE 8235B031 -A0EF 8235B032 -A0F0 8235B033 -A0F1 8235B034 -A0F2 8235B035 -A0F3 8235B036 -A0F4 8235B037 -A0F5 8235B038 -A0F6 8235B039 -A0F7 8235B130 -A0F8 8235B131 -A0F9 8235B132 -A0FA 8235B133 -A0FB 8235B134 -A0FC 8235B135 -A0FD 8235B136 -A0FE 8235B137 -A0FF 8235B138 -A100 8235B139 -A101 8235B230 -A102 8235B231 -A103 8235B232 -A104 8235B233 -A105 8235B234 -A106 8235B235 -A107 8235B236 -A108 8235B237 -A109 8235B238 -A10A 8235B239 -A10B 8235B330 -A10C 8235B331 -A10D 8235B332 -A10E 8235B333 -A10F 8235B334 -A110 8235B335 -A111 8235B336 -A112 8235B337 -A113 8235B338 -A114 8235B339 -A115 8235B430 -A116 8235B431 -A117 8235B432 -A118 8235B433 -A119 8235B434 -A11A 8235B435 -A11B 8235B436 -A11C 8235B437 -A11D 8235B438 -A11E 8235B439 -A11F 8235B530 -A120 8235B531 -A121 8235B532 -A122 8235B533 -A123 8235B534 -A124 8235B535 -A125 8235B536 -A126 8235B537 -A127 8235B538 -A128 8235B539 -A129 8235B630 -A12A 8235B631 -A12B 8235B632 -A12C 8235B633 -A12D 8235B634 -A12E 8235B635 -A12F 8235B636 -A130 8235B637 -A131 8235B638 -A132 8235B639 -A133 8235B730 -A134 8235B731 -A135 8235B732 -A136 8235B733 -A137 8235B734 -A138 8235B735 -A139 8235B736 -A13A 8235B737 -A13B 8235B738 -A13C 8235B739 -A13D 8235B830 -A13E 8235B831 -A13F 8235B832 -A140 8235B833 -A141 8235B834 -A142 8235B835 -A143 8235B836 -A144 8235B837 -A145 8235B838 -A146 8235B839 -A147 8235B930 -A148 8235B931 -A149 8235B932 -A14A 8235B933 -A14B 8235B934 -A14C 8235B935 -A14D 8235B936 -A14E 8235B937 -A14F 8235B938 -A150 8235B939 -A151 8235BA30 -A152 8235BA31 -A153 8235BA32 -A154 8235BA33 -A155 8235BA34 -A156 8235BA35 -A157 8235BA36 -A158 8235BA37 -A159 8235BA38 -A15A 8235BA39 -A15B 8235BB30 -A15C 8235BB31 -A15D 8235BB32 -A15E 8235BB33 -A15F 8235BB34 -A160 8235BB35 -A161 8235BB36 -A162 8235BB37 -A163 8235BB38 -A164 8235BB39 -A165 8235BC30 -A166 8235BC31 -A167 8235BC32 -A168 8235BC33 -A169 8235BC34 -A16A 8235BC35 -A16B 8235BC36 -A16C 8235BC37 -A16D 8235BC38 -A16E 8235BC39 -A16F 8235BD30 -A170 8235BD31 -A171 8235BD32 -A172 8235BD33 -A173 8235BD34 -A174 8235BD35 -A175 8235BD36 -A176 8235BD37 -A177 8235BD38 -A178 8235BD39 -A179 8235BE30 -A17A 8235BE31 -A17B 8235BE32 -A17C 8235BE33 -A17D 8235BE34 -A17E 8235BE35 -A17F 8235BE36 -A180 8235BE37 -A181 8235BE38 -A182 8235BE39 -A183 8235BF30 -A184 8235BF31 -A185 8235BF32 -A186 8235BF33 -A187 8235BF34 -A188 8235BF35 -A189 8235BF36 -A18A 8235BF37 -A18B 8235BF38 -A18C 8235BF39 -A18D 8235C030 -A18E 8235C031 -A18F 8235C032 -A190 8235C033 -A191 8235C034 -A192 8235C035 -A193 8235C036 -A194 8235C037 -A195 8235C038 -A196 8235C039 -A197 8235C130 -A198 8235C131 -A199 8235C132 -A19A 8235C133 -A19B 8235C134 -A19C 8235C135 -A19D 8235C136 -A19E 8235C137 -A19F 8235C138 -A1A0 8235C139 -A1A1 8235C230 -A1A2 8235C231 -A1A3 8235C232 -A1A4 8235C233 -A1A5 8235C234 -A1A6 8235C235 -A1A7 8235C236 -A1A8 8235C237 -A1A9 8235C238 -A1AA 8235C239 -A1AB 8235C330 -A1AC 8235C331 -A1AD 8235C332 -A1AE 8235C333 -A1AF 8235C334 -A1B0 8235C335 -A1B1 8235C336 -A1B2 8235C337 -A1B3 8235C338 -A1B4 8235C339 -A1B5 8235C430 -A1B6 8235C431 -A1B7 8235C432 -A1B8 8235C433 -A1B9 8235C434 -A1BA 8235C435 -A1BB 8235C436 -A1BC 8235C437 -A1BD 8235C438 -A1BE 8235C439 -A1BF 8235C530 -A1C0 8235C531 -A1C1 8235C532 -A1C2 8235C533 -A1C3 8235C534 -A1C4 8235C535 -A1C5 8235C536 -A1C6 8235C537 -A1C7 8235C538 -A1C8 8235C539 -A1C9 8235C630 -A1CA 8235C631 -A1CB 8235C632 -A1CC 8235C633 -A1CD 8235C634 -A1CE 8235C635 -A1CF 8235C636 -A1D0 8235C637 -A1D1 8235C638 -A1D2 8235C639 -A1D3 8235C730 -A1D4 8235C731 -A1D5 8235C732 -A1D6 8235C733 -A1D7 8235C734 -A1D8 8235C735 -A1D9 8235C736 -A1DA 8235C737 -A1DB 8235C738 -A1DC 8235C739 -A1DD 8235C830 -A1DE 8235C831 -A1DF 8235C832 -A1E0 8235C833 -A1E1 8235C834 -A1E2 8235C835 -A1E3 8235C836 -A1E4 8235C837 -A1E5 8235C838 -A1E6 8235C839 -A1E7 8235C930 -A1E8 8235C931 -A1E9 8235C932 -A1EA 8235C933 -A1EB 8235C934 -A1EC 8235C935 -A1ED 8235C936 -A1EE 8235C937 -A1EF 8235C938 -A1F0 8235C939 -A1F1 8235CA30 -A1F2 8235CA31 -A1F3 8235CA32 -A1F4 8235CA33 -A1F5 8235CA34 -A1F6 8235CA35 -A1F7 8235CA36 -A1F8 8235CA37 -A1F9 8235CA38 -A1FA 8235CA39 -A1FB 8235CB30 -A1FC 8235CB31 -A1FD 8235CB32 -A1FE 8235CB33 -A1FF 8235CB34 -A200 8235CB35 -A201 8235CB36 -A202 8235CB37 -A203 8235CB38 -A204 8235CB39 -A205 8235CC30 -A206 8235CC31 -A207 8235CC32 -A208 8235CC33 -A209 8235CC34 -A20A 8235CC35 -A20B 8235CC36 -A20C 8235CC37 -A20D 8235CC38 -A20E 8235CC39 -A20F 8235CD30 -A210 8235CD31 -A211 8235CD32 -A212 8235CD33 -A213 8235CD34 -A214 8235CD35 -A215 8235CD36 -A216 8235CD37 -A217 8235CD38 -A218 8235CD39 -A219 8235CE30 -A21A 8235CE31 -A21B 8235CE32 -A21C 8235CE33 -A21D 8235CE34 -A21E 8235CE35 -A21F 8235CE36 -A220 8235CE37 -A221 8235CE38 -A222 8235CE39 -A223 8235CF30 -A224 8235CF31 -A225 8235CF32 -A226 8235CF33 -A227 8235CF34 -A228 8235CF35 -A229 8235CF36 -A22A 8235CF37 -A22B 8235CF38 -A22C 8235CF39 -A22D 8235D030 -A22E 8235D031 -A22F 8235D032 -A230 8235D033 -A231 8235D034 -A232 8235D035 -A233 8235D036 -A234 8235D037 -A235 8235D038 -A236 8235D039 -A237 8235D130 -A238 8235D131 -A239 8235D132 -A23A 8235D133 -A23B 8235D134 -A23C 8235D135 -A23D 8235D136 -A23E 8235D137 -A23F 8235D138 -A240 8235D139 -A241 8235D230 -A242 8235D231 -A243 8235D232 -A244 8235D233 -A245 8235D234 -A246 8235D235 -A247 8235D236 -A248 8235D237 -A249 8235D238 -A24A 8235D239 -A24B 8235D330 -A24C 8235D331 -A24D 8235D332 -A24E 8235D333 -A24F 8235D334 -A250 8235D335 -A251 8235D336 -A252 8235D337 -A253 8235D338 -A254 8235D339 -A255 8235D430 -A256 8235D431 -A257 8235D432 -A258 8235D433 -A259 8235D434 -A25A 8235D435 -A25B 8235D436 -A25C 8235D437 -A25D 8235D438 -A25E 8235D439 -A25F 8235D530 -A260 8235D531 -A261 8235D532 -A262 8235D533 -A263 8235D534 -A264 8235D535 -A265 8235D536 -A266 8235D537 -A267 8235D538 -A268 8235D539 -A269 8235D630 -A26A 8235D631 -A26B 8235D632 -A26C 8235D633 -A26D 8235D634 -A26E 8235D635 -A26F 8235D636 -A270 8235D637 -A271 8235D638 -A272 8235D639 -A273 8235D730 -A274 8235D731 -A275 8235D732 -A276 8235D733 -A277 8235D734 -A278 8235D735 -A279 8235D736 -A27A 8235D737 -A27B 8235D738 -A27C 8235D739 -A27D 8235D830 -A27E 8235D831 -A27F 8235D832 -A280 8235D833 -A281 8235D834 -A282 8235D835 -A283 8235D836 -A284 8235D837 -A285 8235D838 -A286 8235D839 -A287 8235D930 -A288 8235D931 -A289 8235D932 -A28A 8235D933 -A28B 8235D934 -A28C 8235D935 -A28D 8235D936 -A28E 8235D937 -A28F 8235D938 -A290 8235D939 -A291 8235DA30 -A292 8235DA31 -A293 8235DA32 -A294 8235DA33 -A295 8235DA34 -A296 8235DA35 -A297 8235DA36 -A298 8235DA37 -A299 8235DA38 -A29A 8235DA39 -A29B 8235DB30 -A29C 8235DB31 -A29D 8235DB32 -A29E 8235DB33 -A29F 8235DB34 -A2A0 8235DB35 -A2A1 8235DB36 -A2A2 8235DB37 -A2A3 8235DB38 -A2A4 8235DB39 -A2A5 8235DC30 -A2A6 8235DC31 -A2A7 8235DC32 -A2A8 8235DC33 -A2A9 8235DC34 -A2AA 8235DC35 -A2AB 8235DC36 -A2AC 8235DC37 -A2AD 8235DC38 -A2AE 8235DC39 -A2AF 8235DD30 -A2B0 8235DD31 -A2B1 8235DD32 -A2B2 8235DD33 -A2B3 8235DD34 -A2B4 8235DD35 -A2B5 8235DD36 -A2B6 8235DD37 -A2B7 8235DD38 -A2B8 8235DD39 -A2B9 8235DE30 -A2BA 8235DE31 -A2BB 8235DE32 -A2BC 8235DE33 -A2BD 8235DE34 -A2BE 8235DE35 -A2BF 8235DE36 -A2C0 8235DE37 -A2C1 8235DE38 -A2C2 8235DE39 -A2C3 8235DF30 -A2C4 8235DF31 -A2C5 8235DF32 -A2C6 8235DF33 -A2C7 8235DF34 -A2C8 8235DF35 -A2C9 8235DF36 -A2CA 8235DF37 -A2CB 8235DF38 -A2CC 8235DF39 -A2CD 8235E030 -A2CE 8235E031 -A2CF 8235E032 -A2D0 8235E033 -A2D1 8235E034 -A2D2 8235E035 -A2D3 8235E036 -A2D4 8235E037 -A2D5 8235E038 -A2D6 8235E039 -A2D7 8235E130 -A2D8 8235E131 -A2D9 8235E132 -A2DA 8235E133 -A2DB 8235E134 -A2DC 8235E135 -A2DD 8235E136 -A2DE 8235E137 -A2DF 8235E138 -A2E0 8235E139 -A2E1 8235E230 -A2E2 8235E231 -A2E3 8235E232 -A2E4 8235E233 -A2E5 8235E234 -A2E6 8235E235 -A2E7 8235E236 -A2E8 8235E237 -A2E9 8235E238 -A2EA 8235E239 -A2EB 8235E330 -A2EC 8235E331 -A2ED 8235E332 -A2EE 8235E333 -A2EF 8235E334 -A2F0 8235E335 -A2F1 8235E336 -A2F2 8235E337 -A2F3 8235E338 -A2F4 8235E339 -A2F5 8235E430 -A2F6 8235E431 -A2F7 8235E432 -A2F8 8235E433 -A2F9 8235E434 -A2FA 8235E435 -A2FB 8235E436 -A2FC 8235E437 -A2FD 8235E438 -A2FE 8235E439 -A2FF 8235E530 -A300 8235E531 -A301 8235E532 -A302 8235E533 -A303 8235E534 -A304 8235E535 -A305 8235E536 -A306 8235E537 -A307 8235E538 -A308 8235E539 -A309 8235E630 -A30A 8235E631 -A30B 8235E632 -A30C 8235E633 -A30D 8235E634 -A30E 8235E635 -A30F 8235E636 -A310 8235E637 -A311 8235E638 -A312 8235E639 -A313 8235E730 -A314 8235E731 -A315 8235E732 -A316 8235E733 -A317 8235E734 -A318 8235E735 -A319 8235E736 -A31A 8235E737 -A31B 8235E738 -A31C 8235E739 -A31D 8235E830 -A31E 8235E831 -A31F 8235E832 -A320 8235E833 -A321 8235E834 -A322 8235E835 -A323 8235E836 -A324 8235E837 -A325 8235E838 -A326 8235E839 -A327 8235E930 -A328 8235E931 -A329 8235E932 -A32A 8235E933 -A32B 8235E934 -A32C 8235E935 -A32D 8235E936 -A32E 8235E937 -A32F 8235E938 -A330 8235E939 -A331 8235EA30 -A332 8235EA31 -A333 8235EA32 -A334 8235EA33 -A335 8235EA34 -A336 8235EA35 -A337 8235EA36 -A338 8235EA37 -A339 8235EA38 -A33A 8235EA39 -A33B 8235EB30 -A33C 8235EB31 -A33D 8235EB32 -A33E 8235EB33 -A33F 8235EB34 -A340 8235EB35 -A341 8235EB36 -A342 8235EB37 -A343 8235EB38 -A344 8235EB39 -A345 8235EC30 -A346 8235EC31 -A347 8235EC32 -A348 8235EC33 -A349 8235EC34 -A34A 8235EC35 -A34B 8235EC36 -A34C 8235EC37 -A34D 8235EC38 -A34E 8235EC39 -A34F 8235ED30 -A350 8235ED31 -A351 8235ED32 -A352 8235ED33 -A353 8235ED34 -A354 8235ED35 -A355 8235ED36 -A356 8235ED37 -A357 8235ED38 -A358 8235ED39 -A359 8235EE30 -A35A 8235EE31 -A35B 8235EE32 -A35C 8235EE33 -A35D 8235EE34 -A35E 8235EE35 -A35F 8235EE36 -A360 8235EE37 -A361 8235EE38 -A362 8235EE39 -A363 8235EF30 -A364 8235EF31 -A365 8235EF32 -A366 8235EF33 -A367 8235EF34 -A368 8235EF35 -A369 8235EF36 -A36A 8235EF37 -A36B 8235EF38 -A36C 8235EF39 -A36D 8235F030 -A36E 8235F031 -A36F 8235F032 -A370 8235F033 -A371 8235F034 -A372 8235F035 -A373 8235F036 -A374 8235F037 -A375 8235F038 -A376 8235F039 -A377 8235F130 -A378 8235F131 -A379 8235F132 -A37A 8235F133 -A37B 8235F134 -A37C 8235F135 -A37D 8235F136 -A37E 8235F137 -A37F 8235F138 -A380 8235F139 -A381 8235F230 -A382 8235F231 -A383 8235F232 -A384 8235F233 -A385 8235F234 -A386 8235F235 -A387 8235F236 -A388 8235F237 -A389 8235F238 -A38A 8235F239 -A38B 8235F330 -A38C 8235F331 -A38D 8235F332 -A38E 8235F333 -A38F 8235F334 -A390 8235F335 -A391 8235F336 -A392 8235F337 -A393 8235F338 -A394 8235F339 -A395 8235F430 -A396 8235F431 -A397 8235F432 -A398 8235F433 -A399 8235F434 -A39A 8235F435 -A39B 8235F436 -A39C 8235F437 -A39D 8235F438 -A39E 8235F439 -A39F 8235F530 -A3A0 8235F531 -A3A1 8235F532 -A3A2 8235F533 -A3A3 8235F534 -A3A4 8235F535 -A3A5 8235F536 -A3A6 8235F537 -A3A7 8235F538 -A3A8 8235F539 -A3A9 8235F630 -A3AA 8235F631 -A3AB 8235F632 -A3AC 8235F633 -A3AD 8235F634 -A3AE 8235F635 -A3AF 8235F636 -A3B0 8235F637 -A3B1 8235F638 -A3B2 8235F639 -A3B3 8235F730 -A3B4 8235F731 -A3B5 8235F732 -A3B6 8235F733 -A3B7 8235F734 -A3B8 8235F735 -A3B9 8235F736 -A3BA 8235F737 -A3BB 8235F738 -A3BC 8235F739 -A3BD 8235F830 -A3BE 8235F831 -A3BF 8235F832 -A3C0 8235F833 -A3C1 8235F834 -A3C2 8235F835 -A3C3 8235F836 -A3C4 8235F837 -A3C5 8235F838 -A3C6 8235F839 -A3C7 8235F930 -A3C8 8235F931 -A3C9 8235F932 -A3CA 8235F933 -A3CB 8235F934 -A3CC 8235F935 -A3CD 8235F936 -A3CE 8235F937 -A3CF 8235F938 -A3D0 8235F939 -A3D1 8235FA30 -A3D2 8235FA31 -A3D3 8235FA32 -A3D4 8235FA33 -A3D5 8235FA34 -A3D6 8235FA35 -A3D7 8235FA36 -A3D8 8235FA37 -A3D9 8235FA38 -A3DA 8235FA39 -A3DB 8235FB30 -A3DC 8235FB31 -A3DD 8235FB32 -A3DE 8235FB33 -A3DF 8235FB34 -A3E0 8235FB35 -A3E1 8235FB36 -A3E2 8235FB37 -A3E3 8235FB38 -A3E4 8235FB39 -A3E5 8235FC30 -A3E6 8235FC31 -A3E7 8235FC32 -A3E8 8235FC33 -A3E9 8235FC34 -A3EA 8235FC35 -A3EB 8235FC36 -A3EC 8235FC37 -A3ED 8235FC38 -A3EE 8235FC39 -A3EF 8235FD30 -A3F0 8235FD31 -A3F1 8235FD32 -A3F2 8235FD33 -A3F3 8235FD34 -A3F4 8235FD35 -A3F5 8235FD36 -A3F6 8235FD37 -A3F7 8235FD38 -A3F8 8235FD39 -A3F9 8235FE30 -A3FA 8235FE31 -A3FB 8235FE32 -A3FC 8235FE33 -A3FD 8235FE34 -A3FE 8235FE35 -A3FF 8235FE36 -A400 8235FE37 -A401 8235FE38 -A402 8235FE39 -A403 82368130 -A404 82368131 -A405 82368132 -A406 82368133 -A407 82368134 -A408 82368135 -A409 82368136 -A40A 82368137 -A40B 82368138 -A40C 82368139 -A40D 82368230 -A40E 82368231 -A40F 82368232 -A410 82368233 -A411 82368234 -A412 82368235 -A413 82368236 -A414 82368237 -A415 82368238 -A416 82368239 -A417 82368330 -A418 82368331 -A419 82368332 -A41A 82368333 -A41B 82368334 -A41C 82368335 -A41D 82368336 -A41E 82368337 -A41F 82368338 -A420 82368339 -A421 82368430 -A422 82368431 -A423 82368432 -A424 82368433 -A425 82368434 -A426 82368435 -A427 82368436 -A428 82368437 -A429 82368438 -A42A 82368439 -A42B 82368530 -A42C 82368531 -A42D 82368532 -A42E 82368533 -A42F 82368534 -A430 82368535 -A431 82368536 -A432 82368537 -A433 82368538 -A434 82368539 -A435 82368630 -A436 82368631 -A437 82368632 -A438 82368633 -A439 82368634 -A43A 82368635 -A43B 82368636 -A43C 82368637 -A43D 82368638 -A43E 82368639 -A43F 82368730 -A440 82368731 -A441 82368732 -A442 82368733 -A443 82368734 -A444 82368735 -A445 82368736 -A446 82368737 -A447 82368738 -A448 82368739 -A449 82368830 -A44A 82368831 -A44B 82368832 -A44C 82368833 -A44D 82368834 -A44E 82368835 -A44F 82368836 -A450 82368837 -A451 82368838 -A452 82368839 -A453 82368930 -A454 82368931 -A455 82368932 -A456 82368933 -A457 82368934 -A458 82368935 -A459 82368936 -A45A 82368937 -A45B 82368938 -A45C 82368939 -A45D 82368A30 -A45E 82368A31 -A45F 82368A32 -A460 82368A33 -A461 82368A34 -A462 82368A35 -A463 82368A36 -A464 82368A37 -A465 82368A38 -A466 82368A39 -A467 82368B30 -A468 82368B31 -A469 82368B32 -A46A 82368B33 -A46B 82368B34 -A46C 82368B35 -A46D 82368B36 -A46E 82368B37 -A46F 82368B38 -A470 82368B39 -A471 82368C30 -A472 82368C31 -A473 82368C32 -A474 82368C33 -A475 82368C34 -A476 82368C35 -A477 82368C36 -A478 82368C37 -A479 82368C38 -A47A 82368C39 -A47B 82368D30 -A47C 82368D31 -A47D 82368D32 -A47E 82368D33 -A47F 82368D34 -A480 82368D35 -A481 82368D36 -A482 82368D37 -A483 82368D38 -A484 82368D39 -A485 82368E30 -A486 82368E31 -A487 82368E32 -A488 82368E33 -A489 82368E34 -A48A 82368E35 -A48B 82368E36 -A48C 82368E37 -A48D 82368E38 -A48E 82368E39 -A48F 82368F30 -A490 82368F31 -A491 82368F32 -A492 82368F33 -A493 82368F34 -A494 82368F35 -A495 82368F36 -A496 82368F37 -A497 82368F38 -A498 82368F39 -A499 82369030 -A49A 82369031 -A49B 82369032 -A49C 82369033 -A49D 82369034 -A49E 82369035 -A49F 82369036 -A4A0 82369037 -A4A1 82369038 -A4A2 82369039 -A4A3 82369130 -A4A4 82369131 -A4A5 82369132 -A4A6 82369133 -A4A7 82369134 -A4A8 82369135 -A4A9 82369136 -A4AA 82369137 -A4AB 82369138 -A4AC 82369139 -A4AD 82369230 -A4AE 82369231 -A4AF 82369232 -A4B0 82369233 -A4B1 82369234 -A4B2 82369235 -A4B3 82369236 -A4B4 82369237 -A4B5 82369238 -A4B6 82369239 -A4B7 82369330 -A4B8 82369331 -A4B9 82369332 -A4BA 82369333 -A4BB 82369334 -A4BC 82369335 -A4BD 82369336 -A4BE 82369337 -A4BF 82369338 -A4C0 82369339 -A4C1 82369430 -A4C2 82369431 -A4C3 82369432 -A4C4 82369433 -A4C5 82369434 -A4C6 82369435 -A4C7 82369436 -A4C8 82369437 -A4C9 82369438 -A4CA 82369439 -A4CB 82369530 -A4CC 82369531 -A4CD 82369532 -A4CE 82369533 -A4CF 82369534 -A4D0 82369535 -A4D1 82369536 -A4D2 82369537 -A4D3 82369538 -A4D4 82369539 -A4D5 82369630 -A4D6 82369631 -A4D7 82369632 -A4D8 82369633 -A4D9 82369634 -A4DA 82369635 -A4DB 82369636 -A4DC 82369637 -A4DD 82369638 -A4DE 82369639 -A4DF 82369730 -A4E0 82369731 -A4E1 82369732 -A4E2 82369733 -A4E3 82369734 -A4E4 82369735 -A4E5 82369736 -A4E6 82369737 -A4E7 82369738 -A4E8 82369739 -A4E9 82369830 -A4EA 82369831 -A4EB 82369832 -A4EC 82369833 -A4ED 82369834 -A4EE 82369835 -A4EF 82369836 -A4F0 82369837 -A4F1 82369838 -A4F2 82369839 -A4F3 82369930 -A4F4 82369931 -A4F5 82369932 -A4F6 82369933 -A4F7 82369934 -A4F8 82369935 -A4F9 82369936 -A4FA 82369937 -A4FB 82369938 -A4FC 82369939 -A4FD 82369A30 -A4FE 82369A31 -A4FF 82369A32 -A500 82369A33 -A501 82369A34 -A502 82369A35 -A503 82369A36 -A504 82369A37 -A505 82369A38 -A506 82369A39 -A507 82369B30 -A508 82369B31 -A509 82369B32 -A50A 82369B33 -A50B 82369B34 -A50C 82369B35 -A50D 82369B36 -A50E 82369B37 -A50F 82369B38 -A510 82369B39 -A511 82369C30 -A512 82369C31 -A513 82369C32 -A514 82369C33 -A515 82369C34 -A516 82369C35 -A517 82369C36 -A518 82369C37 -A519 82369C38 -A51A 82369C39 -A51B 82369D30 -A51C 82369D31 -A51D 82369D32 -A51E 82369D33 -A51F 82369D34 -A520 82369D35 -A521 82369D36 -A522 82369D37 -A523 82369D38 -A524 82369D39 -A525 82369E30 -A526 82369E31 -A527 82369E32 -A528 82369E33 -A529 82369E34 -A52A 82369E35 -A52B 82369E36 -A52C 82369E37 -A52D 82369E38 -A52E 82369E39 -A52F 82369F30 -A530 82369F31 -A531 82369F32 -A532 82369F33 -A533 82369F34 -A534 82369F35 -A535 82369F36 -A536 82369F37 -A537 82369F38 -A538 82369F39 -A539 8236A030 -A53A 8236A031 -A53B 8236A032 -A53C 8236A033 -A53D 8236A034 -A53E 8236A035 -A53F 8236A036 -A540 8236A037 -A541 8236A038 -A542 8236A039 -A543 8236A130 -A544 8236A131 -A545 8236A132 -A546 8236A133 -A547 8236A134 -A548 8236A135 -A549 8236A136 -A54A 8236A137 -A54B 8236A138 -A54C 8236A139 -A54D 8236A230 -A54E 8236A231 -A54F 8236A232 -A550 8236A233 -A551 8236A234 -A552 8236A235 -A553 8236A236 -A554 8236A237 -A555 8236A238 -A556 8236A239 -A557 8236A330 -A558 8236A331 -A559 8236A332 -A55A 8236A333 -A55B 8236A334 -A55C 8236A335 -A55D 8236A336 -A55E 8236A337 -A55F 8236A338 -A560 8236A339 -A561 8236A430 -A562 8236A431 -A563 8236A432 -A564 8236A433 -A565 8236A434 -A566 8236A435 -A567 8236A436 -A568 8236A437 -A569 8236A438 -A56A 8236A439 -A56B 8236A530 -A56C 8236A531 -A56D 8236A532 -A56E 8236A533 -A56F 8236A534 -A570 8236A535 -A571 8236A536 -A572 8236A537 -A573 8236A538 -A574 8236A539 -A575 8236A630 -A576 8236A631 -A577 8236A632 -A578 8236A633 -A579 8236A634 -A57A 8236A635 -A57B 8236A636 -A57C 8236A637 -A57D 8236A638 -A57E 8236A639 -A57F 8236A730 -A580 8236A731 -A581 8236A732 -A582 8236A733 -A583 8236A734 -A584 8236A735 -A585 8236A736 -A586 8236A737 -A587 8236A738 -A588 8236A739 -A589 8236A830 -A58A 8236A831 -A58B 8236A832 -A58C 8236A833 -A58D 8236A834 -A58E 8236A835 -A58F 8236A836 -A590 8236A837 -A591 8236A838 -A592 8236A839 -A593 8236A930 -A594 8236A931 -A595 8236A932 -A596 8236A933 -A597 8236A934 -A598 8236A935 -A599 8236A936 -A59A 8236A937 -A59B 8236A938 -A59C 8236A939 -A59D 8236AA30 -A59E 8236AA31 -A59F 8236AA32 -A5A0 8236AA33 -A5A1 8236AA34 -A5A2 8236AA35 -A5A3 8236AA36 -A5A4 8236AA37 -A5A5 8236AA38 -A5A6 8236AA39 -A5A7 8236AB30 -A5A8 8236AB31 -A5A9 8236AB32 -A5AA 8236AB33 -A5AB 8236AB34 -A5AC 8236AB35 -A5AD 8236AB36 -A5AE 8236AB37 -A5AF 8236AB38 -A5B0 8236AB39 -A5B1 8236AC30 -A5B2 8236AC31 -A5B3 8236AC32 -A5B4 8236AC33 -A5B5 8236AC34 -A5B6 8236AC35 -A5B7 8236AC36 -A5B8 8236AC37 -A5B9 8236AC38 -A5BA 8236AC39 -A5BB 8236AD30 -A5BC 8236AD31 -A5BD 8236AD32 -A5BE 8236AD33 -A5BF 8236AD34 -A5C0 8236AD35 -A5C1 8236AD36 -A5C2 8236AD37 -A5C3 8236AD38 -A5C4 8236AD39 -A5C5 8236AE30 -A5C6 8236AE31 -A5C7 8236AE32 -A5C8 8236AE33 -A5C9 8236AE34 -A5CA 8236AE35 -A5CB 8236AE36 -A5CC 8236AE37 -A5CD 8236AE38 -A5CE 8236AE39 -A5CF 8236AF30 -A5D0 8236AF31 -A5D1 8236AF32 -A5D2 8236AF33 -A5D3 8236AF34 -A5D4 8236AF35 -A5D5 8236AF36 -A5D6 8236AF37 -A5D7 8236AF38 -A5D8 8236AF39 -A5D9 8236B030 -A5DA 8236B031 -A5DB 8236B032 -A5DC 8236B033 -A5DD 8236B034 -A5DE 8236B035 -A5DF 8236B036 -A5E0 8236B037 -A5E1 8236B038 -A5E2 8236B039 -A5E3 8236B130 -A5E4 8236B131 -A5E5 8236B132 -A5E6 8236B133 -A5E7 8236B134 -A5E8 8236B135 -A5E9 8236B136 -A5EA 8236B137 -A5EB 8236B138 -A5EC 8236B139 -A5ED 8236B230 -A5EE 8236B231 -A5EF 8236B232 -A5F0 8236B233 -A5F1 8236B234 -A5F2 8236B235 -A5F3 8236B236 -A5F4 8236B237 -A5F5 8236B238 -A5F6 8236B239 -A5F7 8236B330 -A5F8 8236B331 -A5F9 8236B332 -A5FA 8236B333 -A5FB 8236B334 -A5FC 8236B335 -A5FD 8236B336 -A5FE 8236B337 -A5FF 8236B338 -A600 8236B339 -A601 8236B430 -A602 8236B431 -A603 8236B432 -A604 8236B433 -A605 8236B434 -A606 8236B435 -A607 8236B436 -A608 8236B437 -A609 8236B438 -A60A 8236B439 -A60B 8236B530 -A60C 8236B531 -A60D 8236B532 -A60E 8236B533 -A60F 8236B534 -A610 8236B535 -A611 8236B536 -A612 8236B537 -A613 8236B538 -A614 8236B539 -A615 8236B630 -A616 8236B631 -A617 8236B632 -A618 8236B633 -A619 8236B634 -A61A 8236B635 -A61B 8236B636 -A61C 8236B637 -A61D 8236B638 -A61E 8236B639 -A61F 8236B730 -A620 8236B731 -A621 8236B732 -A622 8236B733 -A623 8236B734 -A624 8236B735 -A625 8236B736 -A626 8236B737 -A627 8236B738 -A628 8236B739 -A629 8236B830 -A62A 8236B831 -A62B 8236B832 -A62C 8236B833 -A62D 8236B834 -A62E 8236B835 -A62F 8236B836 -A630 8236B837 -A631 8236B838 -A632 8236B839 -A633 8236B930 -A634 8236B931 -A635 8236B932 -A636 8236B933 -A637 8236B934 -A638 8236B935 -A639 8236B936 -A63A 8236B937 -A63B 8236B938 -A63C 8236B939 -A63D 8236BA30 -A63E 8236BA31 -A63F 8236BA32 -A640 8236BA33 -A641 8236BA34 -A642 8236BA35 -A643 8236BA36 -A644 8236BA37 -A645 8236BA38 -A646 8236BA39 -A647 8236BB30 -A648 8236BB31 -A649 8236BB32 -A64A 8236BB33 -A64B 8236BB34 -A64C 8236BB35 -A64D 8236BB36 -A64E 8236BB37 -A64F 8236BB38 -A650 8236BB39 -A651 8236BC30 -A652 8236BC31 -A653 8236BC32 -A654 8236BC33 -A655 8236BC34 -A656 8236BC35 -A657 8236BC36 -A658 8236BC37 -A659 8236BC38 -A65A 8236BC39 -A65B 8236BD30 -A65C 8236BD31 -A65D 8236BD32 -A65E 8236BD33 -A65F 8236BD34 -A660 8236BD35 -A661 8236BD36 -A662 8236BD37 -A663 8236BD38 -A664 8236BD39 -A665 8236BE30 -A666 8236BE31 -A667 8236BE32 -A668 8236BE33 -A669 8236BE34 -A66A 8236BE35 -A66B 8236BE36 -A66C 8236BE37 -A66D 8236BE38 -A66E 8236BE39 -A66F 8236BF30 -A670 8236BF31 -A671 8236BF32 -A672 8236BF33 -A673 8236BF34 -A674 8236BF35 -A675 8236BF36 -A676 8236BF37 -A677 8236BF38 -A678 8236BF39 -A679 8236C030 -A67A 8236C031 -A67B 8236C032 -A67C 8236C033 -A67D 8236C034 -A67E 8236C035 -A67F 8236C036 -A680 8236C037 -A681 8236C038 -A682 8236C039 -A683 8236C130 -A684 8236C131 -A685 8236C132 -A686 8236C133 -A687 8236C134 -A688 8236C135 -A689 8236C136 -A68A 8236C137 -A68B 8236C138 -A68C 8236C139 -A68D 8236C230 -A68E 8236C231 -A68F 8236C232 -A690 8236C233 -A691 8236C234 -A692 8236C235 -A693 8236C236 -A694 8236C237 -A695 8236C238 -A696 8236C239 -A697 8236C330 -A698 8236C331 -A699 8236C332 -A69A 8236C333 -A69B 8236C334 -A69C 8236C335 -A69D 8236C336 -A69E 8236C337 -A69F 8236C338 -A6A0 8236C339 -A6A1 8236C430 -A6A2 8236C431 -A6A3 8236C432 -A6A4 8236C433 -A6A5 8236C434 -A6A6 8236C435 -A6A7 8236C436 -A6A8 8236C437 -A6A9 8236C438 -A6AA 8236C439 -A6AB 8236C530 -A6AC 8236C531 -A6AD 8236C532 -A6AE 8236C533 -A6AF 8236C534 -A6B0 8236C535 -A6B1 8236C536 -A6B2 8236C537 -A6B3 8236C538 -A6B4 8236C539 -A6B5 8236C630 -A6B6 8236C631 -A6B7 8236C632 -A6B8 8236C633 -A6B9 8236C634 -A6BA 8236C635 -A6BB 8236C636 -A6BC 8236C637 -A6BD 8236C638 -A6BE 8236C639 -A6BF 8236C730 -A6C0 8236C731 -A6C1 8236C732 -A6C2 8236C733 -A6C3 8236C734 -A6C4 8236C735 -A6C5 8236C736 -A6C6 8236C737 -A6C7 8236C738 -A6C8 8236C739 -A6C9 8236C830 -A6CA 8236C831 -A6CB 8236C832 -A6CC 8236C833 -A6CD 8236C834 -A6CE 8236C835 -A6CF 8236C836 -A6D0 8236C837 -A6D1 8236C838 -A6D2 8236C839 -A6D3 8236C930 -A6D4 8236C931 -A6D5 8236C932 -A6D6 8236C933 -A6D7 8236C934 -A6D8 8236C935 -A6D9 8236C936 -A6DA 8236C937 -A6DB 8236C938 -A6DC 8236C939 -A6DD 8236CA30 -A6DE 8236CA31 -A6DF 8236CA32 -A6E0 8236CA33 -A6E1 8236CA34 -A6E2 8236CA35 -A6E3 8236CA36 -A6E4 8236CA37 -A6E5 8236CA38 -A6E6 8236CA39 -A6E7 8236CB30 -A6E8 8236CB31 -A6E9 8236CB32 -A6EA 8236CB33 -A6EB 8236CB34 -A6EC 8236CB35 -A6ED 8236CB36 -A6EE 8236CB37 -A6EF 8236CB38 -A6F0 8236CB39 -A6F1 8236CC30 -A6F2 8236CC31 -A6F3 8236CC32 -A6F4 8236CC33 -A6F5 8236CC34 -A6F6 8236CC35 -A6F7 8236CC36 -A6F8 8236CC37 -A6F9 8236CC38 -A6FA 8236CC39 -A6FB 8236CD30 -A6FC 8236CD31 -A6FD 8236CD32 -A6FE 8236CD33 -A6FF 8236CD34 -A700 8236CD35 -A701 8236CD36 -A702 8236CD37 -A703 8236CD38 -A704 8236CD39 -A705 8236CE30 -A706 8236CE31 -A707 8236CE32 -A708 8236CE33 -A709 8236CE34 -A70A 8236CE35 -A70B 8236CE36 -A70C 8236CE37 -A70D 8236CE38 -A70E 8236CE39 -A70F 8236CF30 -A710 8236CF31 -A711 8236CF32 -A712 8236CF33 -A713 8236CF34 -A714 8236CF35 -A715 8236CF36 -A716 8236CF37 -A717 8236CF38 -A718 8236CF39 -A719 8236D030 -A71A 8236D031 -A71B 8236D032 -A71C 8236D033 -A71D 8236D034 -A71E 8236D035 -A71F 8236D036 -A720 8236D037 -A721 8236D038 -A722 8236D039 -A723 8236D130 -A724 8236D131 -A725 8236D132 -A726 8236D133 -A727 8236D134 -A728 8236D135 -A729 8236D136 -A72A 8236D137 -A72B 8236D138 -A72C 8236D139 -A72D 8236D230 -A72E 8236D231 -A72F 8236D232 -A730 8236D233 -A731 8236D234 -A732 8236D235 -A733 8236D236 -A734 8236D237 -A735 8236D238 -A736 8236D239 -A737 8236D330 -A738 8236D331 -A739 8236D332 -A73A 8236D333 -A73B 8236D334 -A73C 8236D335 -A73D 8236D336 -A73E 8236D337 -A73F 8236D338 -A740 8236D339 -A741 8236D430 -A742 8236D431 -A743 8236D432 -A744 8236D433 -A745 8236D434 -A746 8236D435 -A747 8236D436 -A748 8236D437 -A749 8236D438 -A74A 8236D439 -A74B 8236D530 -A74C 8236D531 -A74D 8236D532 -A74E 8236D533 -A74F 8236D534 -A750 8236D535 -A751 8236D536 -A752 8236D537 -A753 8236D538 -A754 8236D539 -A755 8236D630 -A756 8236D631 -A757 8236D632 -A758 8236D633 -A759 8236D634 -A75A 8236D635 -A75B 8236D636 -A75C 8236D637 -A75D 8236D638 -A75E 8236D639 -A75F 8236D730 -A760 8236D731 -A761 8236D732 -A762 8236D733 -A763 8236D734 -A764 8236D735 -A765 8236D736 -A766 8236D737 -A767 8236D738 -A768 8236D739 -A769 8236D830 -A76A 8236D831 -A76B 8236D832 -A76C 8236D833 -A76D 8236D834 -A76E 8236D835 -A76F 8236D836 -A770 8236D837 -A771 8236D838 -A772 8236D839 -A773 8236D930 -A774 8236D931 -A775 8236D932 -A776 8236D933 -A777 8236D934 -A778 8236D935 -A779 8236D936 -A77A 8236D937 -A77B 8236D938 -A77C 8236D939 -A77D 8236DA30 -A77E 8236DA31 -A77F 8236DA32 -A780 8236DA33 -A781 8236DA34 -A782 8236DA35 -A783 8236DA36 -A784 8236DA37 -A785 8236DA38 -A786 8236DA39 -A787 8236DB30 -A788 8236DB31 -A789 8236DB32 -A78A 8236DB33 -A78B 8236DB34 -A78C 8236DB35 -A78D 8236DB36 -A78E 8236DB37 -A78F 8236DB38 -A790 8236DB39 -A791 8236DC30 -A792 8236DC31 -A793 8236DC32 -A794 8236DC33 -A795 8236DC34 -A796 8236DC35 -A797 8236DC36 -A798 8236DC37 -A799 8236DC38 -A79A 8236DC39 -A79B 8236DD30 -A79C 8236DD31 -A79D 8236DD32 -A79E 8236DD33 -A79F 8236DD34 -A7A0 8236DD35 -A7A1 8236DD36 -A7A2 8236DD37 -A7A3 8236DD38 -A7A4 8236DD39 -A7A5 8236DE30 -A7A6 8236DE31 -A7A7 8236DE32 -A7A8 8236DE33 -A7A9 8236DE34 -A7AA 8236DE35 -A7AB 8236DE36 -A7AC 8236DE37 -A7AD 8236DE38 -A7AE 8236DE39 -A7AF 8236DF30 -A7B0 8236DF31 -A7B1 8236DF32 -A7B2 8236DF33 -A7B3 8236DF34 -A7B4 8236DF35 -A7B5 8236DF36 -A7B6 8236DF37 -A7B7 8236DF38 -A7B8 8236DF39 -A7B9 8236E030 -A7BA 8236E031 -A7BB 8236E032 -A7BC 8236E033 -A7BD 8236E034 -A7BE 8236E035 -A7BF 8236E036 -A7C0 8236E037 -A7C1 8236E038 -A7C2 8236E039 -A7C3 8236E130 -A7C4 8236E131 -A7C5 8236E132 -A7C6 8236E133 -A7C7 8236E134 -A7C8 8236E135 -A7C9 8236E136 -A7CA 8236E137 -A7CB 8236E138 -A7CC 8236E139 -A7CD 8236E230 -A7CE 8236E231 -A7CF 8236E232 -A7D0 8236E233 -A7D1 8236E234 -A7D2 8236E235 -A7D3 8236E236 -A7D4 8236E237 -A7D5 8236E238 -A7D6 8236E239 -A7D7 8236E330 -A7D8 8236E331 -A7D9 8236E332 -A7DA 8236E333 -A7DB 8236E334 -A7DC 8236E335 -A7DD 8236E336 -A7DE 8236E337 -A7DF 8236E338 -A7E0 8236E339 -A7E1 8236E430 -A7E2 8236E431 -A7E3 8236E432 -A7E4 8236E433 -A7E5 8236E434 -A7E6 8236E435 -A7E7 8236E436 -A7E8 8236E437 -A7E9 8236E438 -A7EA 8236E439 -A7EB 8236E530 -A7EC 8236E531 -A7ED 8236E532 -A7EE 8236E533 -A7EF 8236E534 -A7F0 8236E535 -A7F1 8236E536 -A7F2 8236E537 -A7F3 8236E538 -A7F4 8236E539 -A7F5 8236E630 -A7F6 8236E631 -A7F7 8236E632 -A7F8 8236E633 -A7F9 8236E634 -A7FA 8236E635 -A7FB 8236E636 -A7FC 8236E637 -A7FD 8236E638 -A7FE 8236E639 -A7FF 8236E730 -A800 8236E731 -A801 8236E732 -A802 8236E733 -A803 8236E734 -A804 8236E735 -A805 8236E736 -A806 8236E737 -A807 8236E738 -A808 8236E739 -A809 8236E830 -A80A 8236E831 -A80B 8236E832 -A80C 8236E833 -A80D 8236E834 -A80E 8236E835 -A80F 8236E836 -A810 8236E837 -A811 8236E838 -A812 8236E839 -A813 8236E930 -A814 8236E931 -A815 8236E932 -A816 8236E933 -A817 8236E934 -A818 8236E935 -A819 8236E936 -A81A 8236E937 -A81B 8236E938 -A81C 8236E939 -A81D 8236EA30 -A81E 8236EA31 -A81F 8236EA32 -A820 8236EA33 -A821 8236EA34 -A822 8236EA35 -A823 8236EA36 -A824 8236EA37 -A825 8236EA38 -A826 8236EA39 -A827 8236EB30 -A828 8236EB31 -A829 8236EB32 -A82A 8236EB33 -A82B 8236EB34 -A82C 8236EB35 -A82D 8236EB36 -A82E 8236EB37 -A82F 8236EB38 -A830 8236EB39 -A831 8236EC30 -A832 8236EC31 -A833 8236EC32 -A834 8236EC33 -A835 8236EC34 -A836 8236EC35 -A837 8236EC36 -A838 8236EC37 -A839 8236EC38 -A83A 8236EC39 -A83B 8236ED30 -A83C 8236ED31 -A83D 8236ED32 -A83E 8236ED33 -A83F 8236ED34 -A840 8236ED35 -A841 8236ED36 -A842 8236ED37 -A843 8236ED38 -A844 8236ED39 -A845 8236EE30 -A846 8236EE31 -A847 8236EE32 -A848 8236EE33 -A849 8236EE34 -A84A 8236EE35 -A84B 8236EE36 -A84C 8236EE37 -A84D 8236EE38 -A84E 8236EE39 -A84F 8236EF30 -A850 8236EF31 -A851 8236EF32 -A852 8236EF33 -A853 8236EF34 -A854 8236EF35 -A855 8236EF36 -A856 8236EF37 -A857 8236EF38 -A858 8236EF39 -A859 8236F030 -A85A 8236F031 -A85B 8236F032 -A85C 8236F033 -A85D 8236F034 -A85E 8236F035 -A85F 8236F036 -A860 8236F037 -A861 8236F038 -A862 8236F039 -A863 8236F130 -A864 8236F131 -A865 8236F132 -A866 8236F133 -A867 8236F134 -A868 8236F135 -A869 8236F136 -A86A 8236F137 -A86B 8236F138 -A86C 8236F139 -A86D 8236F230 -A86E 8236F231 -A86F 8236F232 -A870 8236F233 -A871 8236F234 -A872 8236F235 -A873 8236F236 -A874 8236F237 -A875 8236F238 -A876 8236F239 -A877 8236F330 -A878 8236F331 -A879 8236F332 -A87A 8236F333 -A87B 8236F334 -A87C 8236F335 -A87D 8236F336 -A87E 8236F337 -A87F 8236F338 -A880 8236F339 -A881 8236F430 -A882 8236F431 -A883 8236F432 -A884 8236F433 -A885 8236F434 -A886 8236F435 -A887 8236F436 -A888 8236F437 -A889 8236F438 -A88A 8236F439 -A88B 8236F530 -A88C 8236F531 -A88D 8236F532 -A88E 8236F533 -A88F 8236F534 -A890 8236F535 -A891 8236F536 -A892 8236F537 -A893 8236F538 -A894 8236F539 -A895 8236F630 -A896 8236F631 -A897 8236F632 -A898 8236F633 -A899 8236F634 -A89A 8236F635 -A89B 8236F636 -A89C 8236F637 -A89D 8236F638 -A89E 8236F639 -A89F 8236F730 -A8A0 8236F731 -A8A1 8236F732 -A8A2 8236F733 -A8A3 8236F734 -A8A4 8236F735 -A8A5 8236F736 -A8A6 8236F737 -A8A7 8236F738 -A8A8 8236F739 -A8A9 8236F830 -A8AA 8236F831 -A8AB 8236F832 -A8AC 8236F833 -A8AD 8236F834 -A8AE 8236F835 -A8AF 8236F836 -A8B0 8236F837 -A8B1 8236F838 -A8B2 8236F839 -A8B3 8236F930 -A8B4 8236F931 -A8B5 8236F932 -A8B6 8236F933 -A8B7 8236F934 -A8B8 8236F935 -A8B9 8236F936 -A8BA 8236F937 -A8BB 8236F938 -A8BC 8236F939 -A8BD 8236FA30 -A8BE 8236FA31 -A8BF 8236FA32 -A8C0 8236FA33 -A8C1 8236FA34 -A8C2 8236FA35 -A8C3 8236FA36 -A8C4 8236FA37 -A8C5 8236FA38 -A8C6 8236FA39 -A8C7 8236FB30 -A8C8 8236FB31 -A8C9 8236FB32 -A8CA 8236FB33 -A8CB 8236FB34 -A8CC 8236FB35 -A8CD 8236FB36 -A8CE 8236FB37 -A8CF 8236FB38 -A8D0 8236FB39 -A8D1 8236FC30 -A8D2 8236FC31 -A8D3 8236FC32 -A8D4 8236FC33 -A8D5 8236FC34 -A8D6 8236FC35 -A8D7 8236FC36 -A8D8 8236FC37 -A8D9 8236FC38 -A8DA 8236FC39 -A8DB 8236FD30 -A8DC 8236FD31 -A8DD 8236FD32 -A8DE 8236FD33 -A8DF 8236FD34 -A8E0 8236FD35 -A8E1 8236FD36 -A8E2 8236FD37 -A8E3 8236FD38 -A8E4 8236FD39 -A8E5 8236FE30 -A8E6 8236FE31 -A8E7 8236FE32 -A8E8 8236FE33 -A8E9 8236FE34 -A8EA 8236FE35 -A8EB 8236FE36 -A8EC 8236FE37 -A8ED 8236FE38 -A8EE 8236FE39 -A8EF 82378130 -A8F0 82378131 -A8F1 82378132 -A8F2 82378133 -A8F3 82378134 -A8F4 82378135 -A8F5 82378136 -A8F6 82378137 -A8F7 82378138 -A8F8 82378139 -A8F9 82378230 -A8FA 82378231 -A8FB 82378232 -A8FC 82378233 -A8FD 82378234 -A8FE 82378235 -A8FF 82378236 -A900 82378237 -A901 82378238 -A902 82378239 -A903 82378330 -A904 82378331 -A905 82378332 -A906 82378333 -A907 82378334 -A908 82378335 -A909 82378336 -A90A 82378337 -A90B 82378338 -A90C 82378339 -A90D 82378430 -A90E 82378431 -A90F 82378432 -A910 82378433 -A911 82378434 -A912 82378435 -A913 82378436 -A914 82378437 -A915 82378438 -A916 82378439 -A917 82378530 -A918 82378531 -A919 82378532 -A91A 82378533 -A91B 82378534 -A91C 82378535 -A91D 82378536 -A91E 82378537 -A91F 82378538 -A920 82378539 -A921 82378630 -A922 82378631 -A923 82378632 -A924 82378633 -A925 82378634 -A926 82378635 -A927 82378636 -A928 82378637 -A929 82378638 -A92A 82378639 -A92B 82378730 -A92C 82378731 -A92D 82378732 -A92E 82378733 -A92F 82378734 -A930 82378735 -A931 82378736 -A932 82378737 -A933 82378738 -A934 82378739 -A935 82378830 -A936 82378831 -A937 82378832 -A938 82378833 -A939 82378834 -A93A 82378835 -A93B 82378836 -A93C 82378837 -A93D 82378838 -A93E 82378839 -A93F 82378930 -A940 82378931 -A941 82378932 -A942 82378933 -A943 82378934 -A944 82378935 -A945 82378936 -A946 82378937 -A947 82378938 -A948 82378939 -A949 82378A30 -A94A 82378A31 -A94B 82378A32 -A94C 82378A33 -A94D 82378A34 -A94E 82378A35 -A94F 82378A36 -A950 82378A37 -A951 82378A38 -A952 82378A39 -A953 82378B30 -A954 82378B31 -A955 82378B32 -A956 82378B33 -A957 82378B34 -A958 82378B35 -A959 82378B36 -A95A 82378B37 -A95B 82378B38 -A95C 82378B39 -A95D 82378C30 -A95E 82378C31 -A95F 82378C32 -A960 82378C33 -A961 82378C34 -A962 82378C35 -A963 82378C36 -A964 82378C37 -A965 82378C38 -A966 82378C39 -A967 82378D30 -A968 82378D31 -A969 82378D32 -A96A 82378D33 -A96B 82378D34 -A96C 82378D35 -A96D 82378D36 -A96E 82378D37 -A96F 82378D38 -A970 82378D39 -A971 82378E30 -A972 82378E31 -A973 82378E32 -A974 82378E33 -A975 82378E34 -A976 82378E35 -A977 82378E36 -A978 82378E37 -A979 82378E38 -A97A 82378E39 -A97B 82378F30 -A97C 82378F31 -A97D 82378F32 -A97E 82378F33 -A97F 82378F34 -A980 82378F35 -A981 82378F36 -A982 82378F37 -A983 82378F38 -A984 82378F39 -A985 82379030 -A986 82379031 -A987 82379032 -A988 82379033 -A989 82379034 -A98A 82379035 -A98B 82379036 -A98C 82379037 -A98D 82379038 -A98E 82379039 -A98F 82379130 -A990 82379131 -A991 82379132 -A992 82379133 -A993 82379134 -A994 82379135 -A995 82379136 -A996 82379137 -A997 82379138 -A998 82379139 -A999 82379230 -A99A 82379231 -A99B 82379232 -A99C 82379233 -A99D 82379234 -A99E 82379235 -A99F 82379236 -A9A0 82379237 -A9A1 82379238 -A9A2 82379239 -A9A3 82379330 -A9A4 82379331 -A9A5 82379332 -A9A6 82379333 -A9A7 82379334 -A9A8 82379335 -A9A9 82379336 -A9AA 82379337 -A9AB 82379338 -A9AC 82379339 -A9AD 82379430 -A9AE 82379431 -A9AF 82379432 -A9B0 82379433 -A9B1 82379434 -A9B2 82379435 -A9B3 82379436 -A9B4 82379437 -A9B5 82379438 -A9B6 82379439 -A9B7 82379530 -A9B8 82379531 -A9B9 82379532 -A9BA 82379533 -A9BB 82379534 -A9BC 82379535 -A9BD 82379536 -A9BE 82379537 -A9BF 82379538 -A9C0 82379539 -A9C1 82379630 -A9C2 82379631 -A9C3 82379632 -A9C4 82379633 -A9C5 82379634 -A9C6 82379635 -A9C7 82379636 -A9C8 82379637 -A9C9 82379638 -A9CA 82379639 -A9CB 82379730 -A9CC 82379731 -A9CD 82379732 -A9CE 82379733 -A9CF 82379734 -A9D0 82379735 -A9D1 82379736 -A9D2 82379737 -A9D3 82379738 -A9D4 82379739 -A9D5 82379830 -A9D6 82379831 -A9D7 82379832 -A9D8 82379833 -A9D9 82379834 -A9DA 82379835 -A9DB 82379836 -A9DC 82379837 -A9DD 82379838 -A9DE 82379839 -A9DF 82379930 -A9E0 82379931 -A9E1 82379932 -A9E2 82379933 -A9E3 82379934 -A9E4 82379935 -A9E5 82379936 -A9E6 82379937 -A9E7 82379938 -A9E8 82379939 -A9E9 82379A30 -A9EA 82379A31 -A9EB 82379A32 -A9EC 82379A33 -A9ED 82379A34 -A9EE 82379A35 -A9EF 82379A36 -A9F0 82379A37 -A9F1 82379A38 -A9F2 82379A39 -A9F3 82379B30 -A9F4 82379B31 -A9F5 82379B32 -A9F6 82379B33 -A9F7 82379B34 -A9F8 82379B35 -A9F9 82379B36 -A9FA 82379B37 -A9FB 82379B38 -A9FC 82379B39 -A9FD 82379C30 -A9FE 82379C31 -A9FF 82379C32 -AA00 82379C33 -AA01 82379C34 -AA02 82379C35 -AA03 82379C36 -AA04 82379C37 -AA05 82379C38 -AA06 82379C39 -AA07 82379D30 -AA08 82379D31 -AA09 82379D32 -AA0A 82379D33 -AA0B 82379D34 -AA0C 82379D35 -AA0D 82379D36 -AA0E 82379D37 -AA0F 82379D38 -AA10 82379D39 -AA11 82379E30 -AA12 82379E31 -AA13 82379E32 -AA14 82379E33 -AA15 82379E34 -AA16 82379E35 -AA17 82379E36 -AA18 82379E37 -AA19 82379E38 -AA1A 82379E39 -AA1B 82379F30 -AA1C 82379F31 -AA1D 82379F32 -AA1E 82379F33 -AA1F 82379F34 -AA20 82379F35 -AA21 82379F36 -AA22 82379F37 -AA23 82379F38 -AA24 82379F39 -AA25 8237A030 -AA26 8237A031 -AA27 8237A032 -AA28 8237A033 -AA29 8237A034 -AA2A 8237A035 -AA2B 8237A036 -AA2C 8237A037 -AA2D 8237A038 -AA2E 8237A039 -AA2F 8237A130 -AA30 8237A131 -AA31 8237A132 -AA32 8237A133 -AA33 8237A134 -AA34 8237A135 -AA35 8237A136 -AA36 8237A137 -AA37 8237A138 -AA38 8237A139 -AA39 8237A230 -AA3A 8237A231 -AA3B 8237A232 -AA3C 8237A233 -AA3D 8237A234 -AA3E 8237A235 -AA3F 8237A236 -AA40 8237A237 -AA41 8237A238 -AA42 8237A239 -AA43 8237A330 -AA44 8237A331 -AA45 8237A332 -AA46 8237A333 -AA47 8237A334 -AA48 8237A335 -AA49 8237A336 -AA4A 8237A337 -AA4B 8237A338 -AA4C 8237A339 -AA4D 8237A430 -AA4E 8237A431 -AA4F 8237A432 -AA50 8237A433 -AA51 8237A434 -AA52 8237A435 -AA53 8237A436 -AA54 8237A437 -AA55 8237A438 -AA56 8237A439 -AA57 8237A530 -AA58 8237A531 -AA59 8237A532 -AA5A 8237A533 -AA5B 8237A534 -AA5C 8237A535 -AA5D 8237A536 -AA5E 8237A537 -AA5F 8237A538 -AA60 8237A539 -AA61 8237A630 -AA62 8237A631 -AA63 8237A632 -AA64 8237A633 -AA65 8237A634 -AA66 8237A635 -AA67 8237A636 -AA68 8237A637 -AA69 8237A638 -AA6A 8237A639 -AA6B 8237A730 -AA6C 8237A731 -AA6D 8237A732 -AA6E 8237A733 -AA6F 8237A734 -AA70 8237A735 -AA71 8237A736 -AA72 8237A737 -AA73 8237A738 -AA74 8237A739 -AA75 8237A830 -AA76 8237A831 -AA77 8237A832 -AA78 8237A833 -AA79 8237A834 -AA7A 8237A835 -AA7B 8237A836 -AA7C 8237A837 -AA7D 8237A838 -AA7E 8237A839 -AA7F 8237A930 -AA80 8237A931 -AA81 8237A932 -AA82 8237A933 -AA83 8237A934 -AA84 8237A935 -AA85 8237A936 -AA86 8237A937 -AA87 8237A938 -AA88 8237A939 -AA89 8237AA30 -AA8A 8237AA31 -AA8B 8237AA32 -AA8C 8237AA33 -AA8D 8237AA34 -AA8E 8237AA35 -AA8F 8237AA36 -AA90 8237AA37 -AA91 8237AA38 -AA92 8237AA39 -AA93 8237AB30 -AA94 8237AB31 -AA95 8237AB32 -AA96 8237AB33 -AA97 8237AB34 -AA98 8237AB35 -AA99 8237AB36 -AA9A 8237AB37 -AA9B 8237AB38 -AA9C 8237AB39 -AA9D 8237AC30 -AA9E 8237AC31 -AA9F 8237AC32 -AAA0 8237AC33 -AAA1 8237AC34 -AAA2 8237AC35 -AAA3 8237AC36 -AAA4 8237AC37 -AAA5 8237AC38 -AAA6 8237AC39 -AAA7 8237AD30 -AAA8 8237AD31 -AAA9 8237AD32 -AAAA 8237AD33 -AAAB 8237AD34 -AAAC 8237AD35 -AAAD 8237AD36 -AAAE 8237AD37 -AAAF 8237AD38 -AAB0 8237AD39 -AAB1 8237AE30 -AAB2 8237AE31 -AAB3 8237AE32 -AAB4 8237AE33 -AAB5 8237AE34 -AAB6 8237AE35 -AAB7 8237AE36 -AAB8 8237AE37 -AAB9 8237AE38 -AABA 8237AE39 -AABB 8237AF30 -AABC 8237AF31 -AABD 8237AF32 -AABE 8237AF33 -AABF 8237AF34 -AAC0 8237AF35 -AAC1 8237AF36 -AAC2 8237AF37 -AAC3 8237AF38 -AAC4 8237AF39 -AAC5 8237B030 -AAC6 8237B031 -AAC7 8237B032 -AAC8 8237B033 -AAC9 8237B034 -AACA 8237B035 -AACB 8237B036 -AACC 8237B037 -AACD 8237B038 -AACE 8237B039 -AACF 8237B130 -AAD0 8237B131 -AAD1 8237B132 -AAD2 8237B133 -AAD3 8237B134 -AAD4 8237B135 -AAD5 8237B136 -AAD6 8237B137 -AAD7 8237B138 -AAD8 8237B139 -AAD9 8237B230 -AADA 8237B231 -AADB 8237B232 -AADC 8237B233 -AADD 8237B234 -AADE 8237B235 -AADF 8237B236 -AAE0 8237B237 -AAE1 8237B238 -AAE2 8237B239 -AAE3 8237B330 -AAE4 8237B331 -AAE5 8237B332 -AAE6 8237B333 -AAE7 8237B334 -AAE8 8237B335 -AAE9 8237B336 -AAEA 8237B337 -AAEB 8237B338 -AAEC 8237B339 -AAED 8237B430 -AAEE 8237B431 -AAEF 8237B432 -AAF0 8237B433 -AAF1 8237B434 -AAF2 8237B435 -AAF3 8237B436 -AAF4 8237B437 -AAF5 8237B438 -AAF6 8237B439 -AAF7 8237B530 -AAF8 8237B531 -AAF9 8237B532 -AAFA 8237B533 -AAFB 8237B534 -AAFC 8237B535 -AAFD 8237B536 -AAFE 8237B537 -AAFF 8237B538 -AB00 8237B539 -AB01 8237B630 -AB02 8237B631 -AB03 8237B632 -AB04 8237B633 -AB05 8237B634 -AB06 8237B635 -AB07 8237B636 -AB08 8237B637 -AB09 8237B638 -AB0A 8237B639 -AB0B 8237B730 -AB0C 8237B731 -AB0D 8237B732 -AB0E 8237B733 -AB0F 8237B734 -AB10 8237B735 -AB11 8237B736 -AB12 8237B737 -AB13 8237B738 -AB14 8237B739 -AB15 8237B830 -AB16 8237B831 -AB17 8237B832 -AB18 8237B833 -AB19 8237B834 -AB1A 8237B835 -AB1B 8237B836 -AB1C 8237B837 -AB1D 8237B838 -AB1E 8237B839 -AB1F 8237B930 -AB20 8237B931 -AB21 8237B932 -AB22 8237B933 -AB23 8237B934 -AB24 8237B935 -AB25 8237B936 -AB26 8237B937 -AB27 8237B938 -AB28 8237B939 -AB29 8237BA30 -AB2A 8237BA31 -AB2B 8237BA32 -AB2C 8237BA33 -AB2D 8237BA34 -AB2E 8237BA35 -AB2F 8237BA36 -AB30 8237BA37 -AB31 8237BA38 -AB32 8237BA39 -AB33 8237BB30 -AB34 8237BB31 -AB35 8237BB32 -AB36 8237BB33 -AB37 8237BB34 -AB38 8237BB35 -AB39 8237BB36 -AB3A 8237BB37 -AB3B 8237BB38 -AB3C 8237BB39 -AB3D 8237BC30 -AB3E 8237BC31 -AB3F 8237BC32 -AB40 8237BC33 -AB41 8237BC34 -AB42 8237BC35 -AB43 8237BC36 -AB44 8237BC37 -AB45 8237BC38 -AB46 8237BC39 -AB47 8237BD30 -AB48 8237BD31 -AB49 8237BD32 -AB4A 8237BD33 -AB4B 8237BD34 -AB4C 8237BD35 -AB4D 8237BD36 -AB4E 8237BD37 -AB4F 8237BD38 -AB50 8237BD39 -AB51 8237BE30 -AB52 8237BE31 -AB53 8237BE32 -AB54 8237BE33 -AB55 8237BE34 -AB56 8237BE35 -AB57 8237BE36 -AB58 8237BE37 -AB59 8237BE38 -AB5A 8237BE39 -AB5B 8237BF30 -AB5C 8237BF31 -AB5D 8237BF32 -AB5E 8237BF33 -AB5F 8237BF34 -AB60 8237BF35 -AB61 8237BF36 -AB62 8237BF37 -AB63 8237BF38 -AB64 8237BF39 -AB65 8237C030 -AB66 8237C031 -AB67 8237C032 -AB68 8237C033 -AB69 8237C034 -AB6A 8237C035 -AB6B 8237C036 -AB6C 8237C037 -AB6D 8237C038 -AB6E 8237C039 -AB6F 8237C130 -AB70 8237C131 -AB71 8237C132 -AB72 8237C133 -AB73 8237C134 -AB74 8237C135 -AB75 8237C136 -AB76 8237C137 -AB77 8237C138 -AB78 8237C139 -AB79 8237C230 -AB7A 8237C231 -AB7B 8237C232 -AB7C 8237C233 -AB7D 8237C234 -AB7E 8237C235 -AB7F 8237C236 -AB80 8237C237 -AB81 8237C238 -AB82 8237C239 -AB83 8237C330 -AB84 8237C331 -AB85 8237C332 -AB86 8237C333 -AB87 8237C334 -AB88 8237C335 -AB89 8237C336 -AB8A 8237C337 -AB8B 8237C338 -AB8C 8237C339 -AB8D 8237C430 -AB8E 8237C431 -AB8F 8237C432 -AB90 8237C433 -AB91 8237C434 -AB92 8237C435 -AB93 8237C436 -AB94 8237C437 -AB95 8237C438 -AB96 8237C439 -AB97 8237C530 -AB98 8237C531 -AB99 8237C532 -AB9A 8237C533 -AB9B 8237C534 -AB9C 8237C535 -AB9D 8237C536 -AB9E 8237C537 -AB9F 8237C538 -ABA0 8237C539 -ABA1 8237C630 -ABA2 8237C631 -ABA3 8237C632 -ABA4 8237C633 -ABA5 8237C634 -ABA6 8237C635 -ABA7 8237C636 -ABA8 8237C637 -ABA9 8237C638 -ABAA 8237C639 -ABAB 8237C730 -ABAC 8237C731 -ABAD 8237C732 -ABAE 8237C733 -ABAF 8237C734 -ABB0 8237C735 -ABB1 8237C736 -ABB2 8237C737 -ABB3 8237C738 -ABB4 8237C739 -ABB5 8237C830 -ABB6 8237C831 -ABB7 8237C832 -ABB8 8237C833 -ABB9 8237C834 -ABBA 8237C835 -ABBB 8237C836 -ABBC 8237C837 -ABBD 8237C838 -ABBE 8237C839 -ABBF 8237C930 -ABC0 8237C931 -ABC1 8237C932 -ABC2 8237C933 -ABC3 8237C934 -ABC4 8237C935 -ABC5 8237C936 -ABC6 8237C937 -ABC7 8237C938 -ABC8 8237C939 -ABC9 8237CA30 -ABCA 8237CA31 -ABCB 8237CA32 -ABCC 8237CA33 -ABCD 8237CA34 -ABCE 8237CA35 -ABCF 8237CA36 -ABD0 8237CA37 -ABD1 8237CA38 -ABD2 8237CA39 -ABD3 8237CB30 -ABD4 8237CB31 -ABD5 8237CB32 -ABD6 8237CB33 -ABD7 8237CB34 -ABD8 8237CB35 -ABD9 8237CB36 -ABDA 8237CB37 -ABDB 8237CB38 -ABDC 8237CB39 -ABDD 8237CC30 -ABDE 8237CC31 -ABDF 8237CC32 -ABE0 8237CC33 -ABE1 8237CC34 -ABE2 8237CC35 -ABE3 8237CC36 -ABE4 8237CC37 -ABE5 8237CC38 -ABE6 8237CC39 -ABE7 8237CD30 -ABE8 8237CD31 -ABE9 8237CD32 -ABEA 8237CD33 -ABEB 8237CD34 -ABEC 8237CD35 -ABED 8237CD36 -ABEE 8237CD37 -ABEF 8237CD38 -ABF0 8237CD39 -ABF1 8237CE30 -ABF2 8237CE31 -ABF3 8237CE32 -ABF4 8237CE33 -ABF5 8237CE34 -ABF6 8237CE35 -ABF7 8237CE36 -ABF8 8237CE37 -ABF9 8237CE38 -ABFA 8237CE39 -ABFB 8237CF30 -ABFC 8237CF31 -ABFD 8237CF32 -ABFE 8237CF33 -ABFF 8237CF34 -AC00 8237CF35 -AC01 8237CF36 -AC02 8237CF37 -AC03 8237CF38 -AC04 8237CF39 -AC05 8237D030 -AC06 8237D031 -AC07 8237D032 -AC08 8237D033 -AC09 8237D034 -AC0A 8237D035 -AC0B 8237D036 -AC0C 8237D037 -AC0D 8237D038 -AC0E 8237D039 -AC0F 8237D130 -AC10 8237D131 -AC11 8237D132 -AC12 8237D133 -AC13 8237D134 -AC14 8237D135 -AC15 8237D136 -AC16 8237D137 -AC17 8237D138 -AC18 8237D139 -AC19 8237D230 -AC1A 8237D231 -AC1B 8237D232 -AC1C 8237D233 -AC1D 8237D234 -AC1E 8237D235 -AC1F 8237D236 -AC20 8237D237 -AC21 8237D238 -AC22 8237D239 -AC23 8237D330 -AC24 8237D331 -AC25 8237D332 -AC26 8237D333 -AC27 8237D334 -AC28 8237D335 -AC29 8237D336 -AC2A 8237D337 -AC2B 8237D338 -AC2C 8237D339 -AC2D 8237D430 -AC2E 8237D431 -AC2F 8237D432 -AC30 8237D433 -AC31 8237D434 -AC32 8237D435 -AC33 8237D436 -AC34 8237D437 -AC35 8237D438 -AC36 8237D439 -AC37 8237D530 -AC38 8237D531 -AC39 8237D532 -AC3A 8237D533 -AC3B 8237D534 -AC3C 8237D535 -AC3D 8237D536 -AC3E 8237D537 -AC3F 8237D538 -AC40 8237D539 -AC41 8237D630 -AC42 8237D631 -AC43 8237D632 -AC44 8237D633 -AC45 8237D634 -AC46 8237D635 -AC47 8237D636 -AC48 8237D637 -AC49 8237D638 -AC4A 8237D639 -AC4B 8237D730 -AC4C 8237D731 -AC4D 8237D732 -AC4E 8237D733 -AC4F 8237D734 -AC50 8237D735 -AC51 8237D736 -AC52 8237D737 -AC53 8237D738 -AC54 8237D739 -AC55 8237D830 -AC56 8237D831 -AC57 8237D832 -AC58 8237D833 -AC59 8237D834 -AC5A 8237D835 -AC5B 8237D836 -AC5C 8237D837 -AC5D 8237D838 -AC5E 8237D839 -AC5F 8237D930 -AC60 8237D931 -AC61 8237D932 -AC62 8237D933 -AC63 8237D934 -AC64 8237D935 -AC65 8237D936 -AC66 8237D937 -AC67 8237D938 -AC68 8237D939 -AC69 8237DA30 -AC6A 8237DA31 -AC6B 8237DA32 -AC6C 8237DA33 -AC6D 8237DA34 -AC6E 8237DA35 -AC6F 8237DA36 -AC70 8237DA37 -AC71 8237DA38 -AC72 8237DA39 -AC73 8237DB30 -AC74 8237DB31 -AC75 8237DB32 -AC76 8237DB33 -AC77 8237DB34 -AC78 8237DB35 -AC79 8237DB36 -AC7A 8237DB37 -AC7B 8237DB38 -AC7C 8237DB39 -AC7D 8237DC30 -AC7E 8237DC31 -AC7F 8237DC32 -AC80 8237DC33 -AC81 8237DC34 -AC82 8237DC35 -AC83 8237DC36 -AC84 8237DC37 -AC85 8237DC38 -AC86 8237DC39 -AC87 8237DD30 -AC88 8237DD31 -AC89 8237DD32 -AC8A 8237DD33 -AC8B 8237DD34 -AC8C 8237DD35 -AC8D 8237DD36 -AC8E 8237DD37 -AC8F 8237DD38 -AC90 8237DD39 -AC91 8237DE30 -AC92 8237DE31 -AC93 8237DE32 -AC94 8237DE33 -AC95 8237DE34 -AC96 8237DE35 -AC97 8237DE36 -AC98 8237DE37 -AC99 8237DE38 -AC9A 8237DE39 -AC9B 8237DF30 -AC9C 8237DF31 -AC9D 8237DF32 -AC9E 8237DF33 -AC9F 8237DF34 -ACA0 8237DF35 -ACA1 8237DF36 -ACA2 8237DF37 -ACA3 8237DF38 -ACA4 8237DF39 -ACA5 8237E030 -ACA6 8237E031 -ACA7 8237E032 -ACA8 8237E033 -ACA9 8237E034 -ACAA 8237E035 -ACAB 8237E036 -ACAC 8237E037 -ACAD 8237E038 -ACAE 8237E039 -ACAF 8237E130 -ACB0 8237E131 -ACB1 8237E132 -ACB2 8237E133 -ACB3 8237E134 -ACB4 8237E135 -ACB5 8237E136 -ACB6 8237E137 -ACB7 8237E138 -ACB8 8237E139 -ACB9 8237E230 -ACBA 8237E231 -ACBB 8237E232 -ACBC 8237E233 -ACBD 8237E234 -ACBE 8237E235 -ACBF 8237E236 -ACC0 8237E237 -ACC1 8237E238 -ACC2 8237E239 -ACC3 8237E330 -ACC4 8237E331 -ACC5 8237E332 -ACC6 8237E333 -ACC7 8237E334 -ACC8 8237E335 -ACC9 8237E336 -ACCA 8237E337 -ACCB 8237E338 -ACCC 8237E339 -ACCD 8237E430 -ACCE 8237E431 -ACCF 8237E432 -ACD0 8237E433 -ACD1 8237E434 -ACD2 8237E435 -ACD3 8237E436 -ACD4 8237E437 -ACD5 8237E438 -ACD6 8237E439 -ACD7 8237E530 -ACD8 8237E531 -ACD9 8237E532 -ACDA 8237E533 -ACDB 8237E534 -ACDC 8237E535 -ACDD 8237E536 -ACDE 8237E537 -ACDF 8237E538 -ACE0 8237E539 -ACE1 8237E630 -ACE2 8237E631 -ACE3 8237E632 -ACE4 8237E633 -ACE5 8237E634 -ACE6 8237E635 -ACE7 8237E636 -ACE8 8237E637 -ACE9 8237E638 -ACEA 8237E639 -ACEB 8237E730 -ACEC 8237E731 -ACED 8237E732 -ACEE 8237E733 -ACEF 8237E734 -ACF0 8237E735 -ACF1 8237E736 -ACF2 8237E737 -ACF3 8237E738 -ACF4 8237E739 -ACF5 8237E830 -ACF6 8237E831 -ACF7 8237E832 -ACF8 8237E833 -ACF9 8237E834 -ACFA 8237E835 -ACFB 8237E836 -ACFC 8237E837 -ACFD 8237E838 -ACFE 8237E839 -ACFF 8237E930 -AD00 8237E931 -AD01 8237E932 -AD02 8237E933 -AD03 8237E934 -AD04 8237E935 -AD05 8237E936 -AD06 8237E937 -AD07 8237E938 -AD08 8237E939 -AD09 8237EA30 -AD0A 8237EA31 -AD0B 8237EA32 -AD0C 8237EA33 -AD0D 8237EA34 -AD0E 8237EA35 -AD0F 8237EA36 -AD10 8237EA37 -AD11 8237EA38 -AD12 8237EA39 -AD13 8237EB30 -AD14 8237EB31 -AD15 8237EB32 -AD16 8237EB33 -AD17 8237EB34 -AD18 8237EB35 -AD19 8237EB36 -AD1A 8237EB37 -AD1B 8237EB38 -AD1C 8237EB39 -AD1D 8237EC30 -AD1E 8237EC31 -AD1F 8237EC32 -AD20 8237EC33 -AD21 8237EC34 -AD22 8237EC35 -AD23 8237EC36 -AD24 8237EC37 -AD25 8237EC38 -AD26 8237EC39 -AD27 8237ED30 -AD28 8237ED31 -AD29 8237ED32 -AD2A 8237ED33 -AD2B 8237ED34 -AD2C 8237ED35 -AD2D 8237ED36 -AD2E 8237ED37 -AD2F 8237ED38 -AD30 8237ED39 -AD31 8237EE30 -AD32 8237EE31 -AD33 8237EE32 -AD34 8237EE33 -AD35 8237EE34 -AD36 8237EE35 -AD37 8237EE36 -AD38 8237EE37 -AD39 8237EE38 -AD3A 8237EE39 -AD3B 8237EF30 -AD3C 8237EF31 -AD3D 8237EF32 -AD3E 8237EF33 -AD3F 8237EF34 -AD40 8237EF35 -AD41 8237EF36 -AD42 8237EF37 -AD43 8237EF38 -AD44 8237EF39 -AD45 8237F030 -AD46 8237F031 -AD47 8237F032 -AD48 8237F033 -AD49 8237F034 -AD4A 8237F035 -AD4B 8237F036 -AD4C 8237F037 -AD4D 8237F038 -AD4E 8237F039 -AD4F 8237F130 -AD50 8237F131 -AD51 8237F132 -AD52 8237F133 -AD53 8237F134 -AD54 8237F135 -AD55 8237F136 -AD56 8237F137 -AD57 8237F138 -AD58 8237F139 -AD59 8237F230 -AD5A 8237F231 -AD5B 8237F232 -AD5C 8237F233 -AD5D 8237F234 -AD5E 8237F235 -AD5F 8237F236 -AD60 8237F237 -AD61 8237F238 -AD62 8237F239 -AD63 8237F330 -AD64 8237F331 -AD65 8237F332 -AD66 8237F333 -AD67 8237F334 -AD68 8237F335 -AD69 8237F336 -AD6A 8237F337 -AD6B 8237F338 -AD6C 8237F339 -AD6D 8237F430 -AD6E 8237F431 -AD6F 8237F432 -AD70 8237F433 -AD71 8237F434 -AD72 8237F435 -AD73 8237F436 -AD74 8237F437 -AD75 8237F438 -AD76 8237F439 -AD77 8237F530 -AD78 8237F531 -AD79 8237F532 -AD7A 8237F533 -AD7B 8237F534 -AD7C 8237F535 -AD7D 8237F536 -AD7E 8237F537 -AD7F 8237F538 -AD80 8237F539 -AD81 8237F630 -AD82 8237F631 -AD83 8237F632 -AD84 8237F633 -AD85 8237F634 -AD86 8237F635 -AD87 8237F636 -AD88 8237F637 -AD89 8237F638 -AD8A 8237F639 -AD8B 8237F730 -AD8C 8237F731 -AD8D 8237F732 -AD8E 8237F733 -AD8F 8237F734 -AD90 8237F735 -AD91 8237F736 -AD92 8237F737 -AD93 8237F738 -AD94 8237F739 -AD95 8237F830 -AD96 8237F831 -AD97 8237F832 -AD98 8237F833 -AD99 8237F834 -AD9A 8237F835 -AD9B 8237F836 -AD9C 8237F837 -AD9D 8237F838 -AD9E 8237F839 -AD9F 8237F930 -ADA0 8237F931 -ADA1 8237F932 -ADA2 8237F933 -ADA3 8237F934 -ADA4 8237F935 -ADA5 8237F936 -ADA6 8237F937 -ADA7 8237F938 -ADA8 8237F939 -ADA9 8237FA30 -ADAA 8237FA31 -ADAB 8237FA32 -ADAC 8237FA33 -ADAD 8237FA34 -ADAE 8237FA35 -ADAF 8237FA36 -ADB0 8237FA37 -ADB1 8237FA38 -ADB2 8237FA39 -ADB3 8237FB30 -ADB4 8237FB31 -ADB5 8237FB32 -ADB6 8237FB33 -ADB7 8237FB34 -ADB8 8237FB35 -ADB9 8237FB36 -ADBA 8237FB37 -ADBB 8237FB38 -ADBC 8237FB39 -ADBD 8237FC30 -ADBE 8237FC31 -ADBF 8237FC32 -ADC0 8237FC33 -ADC1 8237FC34 -ADC2 8237FC35 -ADC3 8237FC36 -ADC4 8237FC37 -ADC5 8237FC38 -ADC6 8237FC39 -ADC7 8237FD30 -ADC8 8237FD31 -ADC9 8237FD32 -ADCA 8237FD33 -ADCB 8237FD34 -ADCC 8237FD35 -ADCD 8237FD36 -ADCE 8237FD37 -ADCF 8237FD38 -ADD0 8237FD39 -ADD1 8237FE30 -ADD2 8237FE31 -ADD3 8237FE32 -ADD4 8237FE33 -ADD5 8237FE34 -ADD6 8237FE35 -ADD7 8237FE36 -ADD8 8237FE37 -ADD9 8237FE38 -ADDA 8237FE39 -ADDB 82388130 -ADDC 82388131 -ADDD 82388132 -ADDE 82388133 -ADDF 82388134 -ADE0 82388135 -ADE1 82388136 -ADE2 82388137 -ADE3 82388138 -ADE4 82388139 -ADE5 82388230 -ADE6 82388231 -ADE7 82388232 -ADE8 82388233 -ADE9 82388234 -ADEA 82388235 -ADEB 82388236 -ADEC 82388237 -ADED 82388238 -ADEE 82388239 -ADEF 82388330 -ADF0 82388331 -ADF1 82388332 -ADF2 82388333 -ADF3 82388334 -ADF4 82388335 -ADF5 82388336 -ADF6 82388337 -ADF7 82388338 -ADF8 82388339 -ADF9 82388430 -ADFA 82388431 -ADFB 82388432 -ADFC 82388433 -ADFD 82388434 -ADFE 82388435 -ADFF 82388436 -AE00 82388437 -AE01 82388438 -AE02 82388439 -AE03 82388530 -AE04 82388531 -AE05 82388532 -AE06 82388533 -AE07 82388534 -AE08 82388535 -AE09 82388536 -AE0A 82388537 -AE0B 82388538 -AE0C 82388539 -AE0D 82388630 -AE0E 82388631 -AE0F 82388632 -AE10 82388633 -AE11 82388634 -AE12 82388635 -AE13 82388636 -AE14 82388637 -AE15 82388638 -AE16 82388639 -AE17 82388730 -AE18 82388731 -AE19 82388732 -AE1A 82388733 -AE1B 82388734 -AE1C 82388735 -AE1D 82388736 -AE1E 82388737 -AE1F 82388738 -AE20 82388739 -AE21 82388830 -AE22 82388831 -AE23 82388832 -AE24 82388833 -AE25 82388834 -AE26 82388835 -AE27 82388836 -AE28 82388837 -AE29 82388838 -AE2A 82388839 -AE2B 82388930 -AE2C 82388931 -AE2D 82388932 -AE2E 82388933 -AE2F 82388934 -AE30 82388935 -AE31 82388936 -AE32 82388937 -AE33 82388938 -AE34 82388939 -AE35 82388A30 -AE36 82388A31 -AE37 82388A32 -AE38 82388A33 -AE39 82388A34 -AE3A 82388A35 -AE3B 82388A36 -AE3C 82388A37 -AE3D 82388A38 -AE3E 82388A39 -AE3F 82388B30 -AE40 82388B31 -AE41 82388B32 -AE42 82388B33 -AE43 82388B34 -AE44 82388B35 -AE45 82388B36 -AE46 82388B37 -AE47 82388B38 -AE48 82388B39 -AE49 82388C30 -AE4A 82388C31 -AE4B 82388C32 -AE4C 82388C33 -AE4D 82388C34 -AE4E 82388C35 -AE4F 82388C36 -AE50 82388C37 -AE51 82388C38 -AE52 82388C39 -AE53 82388D30 -AE54 82388D31 -AE55 82388D32 -AE56 82388D33 -AE57 82388D34 -AE58 82388D35 -AE59 82388D36 -AE5A 82388D37 -AE5B 82388D38 -AE5C 82388D39 -AE5D 82388E30 -AE5E 82388E31 -AE5F 82388E32 -AE60 82388E33 -AE61 82388E34 -AE62 82388E35 -AE63 82388E36 -AE64 82388E37 -AE65 82388E38 -AE66 82388E39 -AE67 82388F30 -AE68 82388F31 -AE69 82388F32 -AE6A 82388F33 -AE6B 82388F34 -AE6C 82388F35 -AE6D 82388F36 -AE6E 82388F37 -AE6F 82388F38 -AE70 82388F39 -AE71 82389030 -AE72 82389031 -AE73 82389032 -AE74 82389033 -AE75 82389034 -AE76 82389035 -AE77 82389036 -AE78 82389037 -AE79 82389038 -AE7A 82389039 -AE7B 82389130 -AE7C 82389131 -AE7D 82389132 -AE7E 82389133 -AE7F 82389134 -AE80 82389135 -AE81 82389136 -AE82 82389137 -AE83 82389138 -AE84 82389139 -AE85 82389230 -AE86 82389231 -AE87 82389232 -AE88 82389233 -AE89 82389234 -AE8A 82389235 -AE8B 82389236 -AE8C 82389237 -AE8D 82389238 -AE8E 82389239 -AE8F 82389330 -AE90 82389331 -AE91 82389332 -AE92 82389333 -AE93 82389334 -AE94 82389335 -AE95 82389336 -AE96 82389337 -AE97 82389338 -AE98 82389339 -AE99 82389430 -AE9A 82389431 -AE9B 82389432 -AE9C 82389433 -AE9D 82389434 -AE9E 82389435 -AE9F 82389436 -AEA0 82389437 -AEA1 82389438 -AEA2 82389439 -AEA3 82389530 -AEA4 82389531 -AEA5 82389532 -AEA6 82389533 -AEA7 82389534 -AEA8 82389535 -AEA9 82389536 -AEAA 82389537 -AEAB 82389538 -AEAC 82389539 -AEAD 82389630 -AEAE 82389631 -AEAF 82389632 -AEB0 82389633 -AEB1 82389634 -AEB2 82389635 -AEB3 82389636 -AEB4 82389637 -AEB5 82389638 -AEB6 82389639 -AEB7 82389730 -AEB8 82389731 -AEB9 82389732 -AEBA 82389733 -AEBB 82389734 -AEBC 82389735 -AEBD 82389736 -AEBE 82389737 -AEBF 82389738 -AEC0 82389739 -AEC1 82389830 -AEC2 82389831 -AEC3 82389832 -AEC4 82389833 -AEC5 82389834 -AEC6 82389835 -AEC7 82389836 -AEC8 82389837 -AEC9 82389838 -AECA 82389839 -AECB 82389930 -AECC 82389931 -AECD 82389932 -AECE 82389933 -AECF 82389934 -AED0 82389935 -AED1 82389936 -AED2 82389937 -AED3 82389938 -AED4 82389939 -AED5 82389A30 -AED6 82389A31 -AED7 82389A32 -AED8 82389A33 -AED9 82389A34 -AEDA 82389A35 -AEDB 82389A36 -AEDC 82389A37 -AEDD 82389A38 -AEDE 82389A39 -AEDF 82389B30 -AEE0 82389B31 -AEE1 82389B32 -AEE2 82389B33 -AEE3 82389B34 -AEE4 82389B35 -AEE5 82389B36 -AEE6 82389B37 -AEE7 82389B38 -AEE8 82389B39 -AEE9 82389C30 -AEEA 82389C31 -AEEB 82389C32 -AEEC 82389C33 -AEED 82389C34 -AEEE 82389C35 -AEEF 82389C36 -AEF0 82389C37 -AEF1 82389C38 -AEF2 82389C39 -AEF3 82389D30 -AEF4 82389D31 -AEF5 82389D32 -AEF6 82389D33 -AEF7 82389D34 -AEF8 82389D35 -AEF9 82389D36 -AEFA 82389D37 -AEFB 82389D38 -AEFC 82389D39 -AEFD 82389E30 -AEFE 82389E31 -AEFF 82389E32 -AF00 82389E33 -AF01 82389E34 -AF02 82389E35 -AF03 82389E36 -AF04 82389E37 -AF05 82389E38 -AF06 82389E39 -AF07 82389F30 -AF08 82389F31 -AF09 82389F32 -AF0A 82389F33 -AF0B 82389F34 -AF0C 82389F35 -AF0D 82389F36 -AF0E 82389F37 -AF0F 82389F38 -AF10 82389F39 -AF11 8238A030 -AF12 8238A031 -AF13 8238A032 -AF14 8238A033 -AF15 8238A034 -AF16 8238A035 -AF17 8238A036 -AF18 8238A037 -AF19 8238A038 -AF1A 8238A039 -AF1B 8238A130 -AF1C 8238A131 -AF1D 8238A132 -AF1E 8238A133 -AF1F 8238A134 -AF20 8238A135 -AF21 8238A136 -AF22 8238A137 -AF23 8238A138 -AF24 8238A139 -AF25 8238A230 -AF26 8238A231 -AF27 8238A232 -AF28 8238A233 -AF29 8238A234 -AF2A 8238A235 -AF2B 8238A236 -AF2C 8238A237 -AF2D 8238A238 -AF2E 8238A239 -AF2F 8238A330 -AF30 8238A331 -AF31 8238A332 -AF32 8238A333 -AF33 8238A334 -AF34 8238A335 -AF35 8238A336 -AF36 8238A337 -AF37 8238A338 -AF38 8238A339 -AF39 8238A430 -AF3A 8238A431 -AF3B 8238A432 -AF3C 8238A433 -AF3D 8238A434 -AF3E 8238A435 -AF3F 8238A436 -AF40 8238A437 -AF41 8238A438 -AF42 8238A439 -AF43 8238A530 -AF44 8238A531 -AF45 8238A532 -AF46 8238A533 -AF47 8238A534 -AF48 8238A535 -AF49 8238A536 -AF4A 8238A537 -AF4B 8238A538 -AF4C 8238A539 -AF4D 8238A630 -AF4E 8238A631 -AF4F 8238A632 -AF50 8238A633 -AF51 8238A634 -AF52 8238A635 -AF53 8238A636 -AF54 8238A637 -AF55 8238A638 -AF56 8238A639 -AF57 8238A730 -AF58 8238A731 -AF59 8238A732 -AF5A 8238A733 -AF5B 8238A734 -AF5C 8238A735 -AF5D 8238A736 -AF5E 8238A737 -AF5F 8238A738 -AF60 8238A739 -AF61 8238A830 -AF62 8238A831 -AF63 8238A832 -AF64 8238A833 -AF65 8238A834 -AF66 8238A835 -AF67 8238A836 -AF68 8238A837 -AF69 8238A838 -AF6A 8238A839 -AF6B 8238A930 -AF6C 8238A931 -AF6D 8238A932 -AF6E 8238A933 -AF6F 8238A934 -AF70 8238A935 -AF71 8238A936 -AF72 8238A937 -AF73 8238A938 -AF74 8238A939 -AF75 8238AA30 -AF76 8238AA31 -AF77 8238AA32 -AF78 8238AA33 -AF79 8238AA34 -AF7A 8238AA35 -AF7B 8238AA36 -AF7C 8238AA37 -AF7D 8238AA38 -AF7E 8238AA39 -AF7F 8238AB30 -AF80 8238AB31 -AF81 8238AB32 -AF82 8238AB33 -AF83 8238AB34 -AF84 8238AB35 -AF85 8238AB36 -AF86 8238AB37 -AF87 8238AB38 -AF88 8238AB39 -AF89 8238AC30 -AF8A 8238AC31 -AF8B 8238AC32 -AF8C 8238AC33 -AF8D 8238AC34 -AF8E 8238AC35 -AF8F 8238AC36 -AF90 8238AC37 -AF91 8238AC38 -AF92 8238AC39 -AF93 8238AD30 -AF94 8238AD31 -AF95 8238AD32 -AF96 8238AD33 -AF97 8238AD34 -AF98 8238AD35 -AF99 8238AD36 -AF9A 8238AD37 -AF9B 8238AD38 -AF9C 8238AD39 -AF9D 8238AE30 -AF9E 8238AE31 -AF9F 8238AE32 -AFA0 8238AE33 -AFA1 8238AE34 -AFA2 8238AE35 -AFA3 8238AE36 -AFA4 8238AE37 -AFA5 8238AE38 -AFA6 8238AE39 -AFA7 8238AF30 -AFA8 8238AF31 -AFA9 8238AF32 -AFAA 8238AF33 -AFAB 8238AF34 -AFAC 8238AF35 -AFAD 8238AF36 -AFAE 8238AF37 -AFAF 8238AF38 -AFB0 8238AF39 -AFB1 8238B030 -AFB2 8238B031 -AFB3 8238B032 -AFB4 8238B033 -AFB5 8238B034 -AFB6 8238B035 -AFB7 8238B036 -AFB8 8238B037 -AFB9 8238B038 -AFBA 8238B039 -AFBB 8238B130 -AFBC 8238B131 -AFBD 8238B132 -AFBE 8238B133 -AFBF 8238B134 -AFC0 8238B135 -AFC1 8238B136 -AFC2 8238B137 -AFC3 8238B138 -AFC4 8238B139 -AFC5 8238B230 -AFC6 8238B231 -AFC7 8238B232 -AFC8 8238B233 -AFC9 8238B234 -AFCA 8238B235 -AFCB 8238B236 -AFCC 8238B237 -AFCD 8238B238 -AFCE 8238B239 -AFCF 8238B330 -AFD0 8238B331 -AFD1 8238B332 -AFD2 8238B333 -AFD3 8238B334 -AFD4 8238B335 -AFD5 8238B336 -AFD6 8238B337 -AFD7 8238B338 -AFD8 8238B339 -AFD9 8238B430 -AFDA 8238B431 -AFDB 8238B432 -AFDC 8238B433 -AFDD 8238B434 -AFDE 8238B435 -AFDF 8238B436 -AFE0 8238B437 -AFE1 8238B438 -AFE2 8238B439 -AFE3 8238B530 -AFE4 8238B531 -AFE5 8238B532 -AFE6 8238B533 -AFE7 8238B534 -AFE8 8238B535 -AFE9 8238B536 -AFEA 8238B537 -AFEB 8238B538 -AFEC 8238B539 -AFED 8238B630 -AFEE 8238B631 -AFEF 8238B632 -AFF0 8238B633 -AFF1 8238B634 -AFF2 8238B635 -AFF3 8238B636 -AFF4 8238B637 -AFF5 8238B638 -AFF6 8238B639 -AFF7 8238B730 -AFF8 8238B731 -AFF9 8238B732 -AFFA 8238B733 -AFFB 8238B734 -AFFC 8238B735 -AFFD 8238B736 -AFFE 8238B737 -AFFF 8238B738 -B000 8238B739 -B001 8238B830 -B002 8238B831 -B003 8238B832 -B004 8238B833 -B005 8238B834 -B006 8238B835 -B007 8238B836 -B008 8238B837 -B009 8238B838 -B00A 8238B839 -B00B 8238B930 -B00C 8238B931 -B00D 8238B932 -B00E 8238B933 -B00F 8238B934 -B010 8238B935 -B011 8238B936 -B012 8238B937 -B013 8238B938 -B014 8238B939 -B015 8238BA30 -B016 8238BA31 -B017 8238BA32 -B018 8238BA33 -B019 8238BA34 -B01A 8238BA35 -B01B 8238BA36 -B01C 8238BA37 -B01D 8238BA38 -B01E 8238BA39 -B01F 8238BB30 -B020 8238BB31 -B021 8238BB32 -B022 8238BB33 -B023 8238BB34 -B024 8238BB35 -B025 8238BB36 -B026 8238BB37 -B027 8238BB38 -B028 8238BB39 -B029 8238BC30 -B02A 8238BC31 -B02B 8238BC32 -B02C 8238BC33 -B02D 8238BC34 -B02E 8238BC35 -B02F 8238BC36 -B030 8238BC37 -B031 8238BC38 -B032 8238BC39 -B033 8238BD30 -B034 8238BD31 -B035 8238BD32 -B036 8238BD33 -B037 8238BD34 -B038 8238BD35 -B039 8238BD36 -B03A 8238BD37 -B03B 8238BD38 -B03C 8238BD39 -B03D 8238BE30 -B03E 8238BE31 -B03F 8238BE32 -B040 8238BE33 -B041 8238BE34 -B042 8238BE35 -B043 8238BE36 -B044 8238BE37 -B045 8238BE38 -B046 8238BE39 -B047 8238BF30 -B048 8238BF31 -B049 8238BF32 -B04A 8238BF33 -B04B 8238BF34 -B04C 8238BF35 -B04D 8238BF36 -B04E 8238BF37 -B04F 8238BF38 -B050 8238BF39 -B051 8238C030 -B052 8238C031 -B053 8238C032 -B054 8238C033 -B055 8238C034 -B056 8238C035 -B057 8238C036 -B058 8238C037 -B059 8238C038 -B05A 8238C039 -B05B 8238C130 -B05C 8238C131 -B05D 8238C132 -B05E 8238C133 -B05F 8238C134 -B060 8238C135 -B061 8238C136 -B062 8238C137 -B063 8238C138 -B064 8238C139 -B065 8238C230 -B066 8238C231 -B067 8238C232 -B068 8238C233 -B069 8238C234 -B06A 8238C235 -B06B 8238C236 -B06C 8238C237 -B06D 8238C238 -B06E 8238C239 -B06F 8238C330 -B070 8238C331 -B071 8238C332 -B072 8238C333 -B073 8238C334 -B074 8238C335 -B075 8238C336 -B076 8238C337 -B077 8238C338 -B078 8238C339 -B079 8238C430 -B07A 8238C431 -B07B 8238C432 -B07C 8238C433 -B07D 8238C434 -B07E 8238C435 -B07F 8238C436 -B080 8238C437 -B081 8238C438 -B082 8238C439 -B083 8238C530 -B084 8238C531 -B085 8238C532 -B086 8238C533 -B087 8238C534 -B088 8238C535 -B089 8238C536 -B08A 8238C537 -B08B 8238C538 -B08C 8238C539 -B08D 8238C630 -B08E 8238C631 -B08F 8238C632 -B090 8238C633 -B091 8238C634 -B092 8238C635 -B093 8238C636 -B094 8238C637 -B095 8238C638 -B096 8238C639 -B097 8238C730 -B098 8238C731 -B099 8238C732 -B09A 8238C733 -B09B 8238C734 -B09C 8238C735 -B09D 8238C736 -B09E 8238C737 -B09F 8238C738 -B0A0 8238C739 -B0A1 8238C830 -B0A2 8238C831 -B0A3 8238C832 -B0A4 8238C833 -B0A5 8238C834 -B0A6 8238C835 -B0A7 8238C836 -B0A8 8238C837 -B0A9 8238C838 -B0AA 8238C839 -B0AB 8238C930 -B0AC 8238C931 -B0AD 8238C932 -B0AE 8238C933 -B0AF 8238C934 -B0B0 8238C935 -B0B1 8238C936 -B0B2 8238C937 -B0B3 8238C938 -B0B4 8238C939 -B0B5 8238CA30 -B0B6 8238CA31 -B0B7 8238CA32 -B0B8 8238CA33 -B0B9 8238CA34 -B0BA 8238CA35 -B0BB 8238CA36 -B0BC 8238CA37 -B0BD 8238CA38 -B0BE 8238CA39 -B0BF 8238CB30 -B0C0 8238CB31 -B0C1 8238CB32 -B0C2 8238CB33 -B0C3 8238CB34 -B0C4 8238CB35 -B0C5 8238CB36 -B0C6 8238CB37 -B0C7 8238CB38 -B0C8 8238CB39 -B0C9 8238CC30 -B0CA 8238CC31 -B0CB 8238CC32 -B0CC 8238CC33 -B0CD 8238CC34 -B0CE 8238CC35 -B0CF 8238CC36 -B0D0 8238CC37 -B0D1 8238CC38 -B0D2 8238CC39 -B0D3 8238CD30 -B0D4 8238CD31 -B0D5 8238CD32 -B0D6 8238CD33 -B0D7 8238CD34 -B0D8 8238CD35 -B0D9 8238CD36 -B0DA 8238CD37 -B0DB 8238CD38 -B0DC 8238CD39 -B0DD 8238CE30 -B0DE 8238CE31 -B0DF 8238CE32 -B0E0 8238CE33 -B0E1 8238CE34 -B0E2 8238CE35 -B0E3 8238CE36 -B0E4 8238CE37 -B0E5 8238CE38 -B0E6 8238CE39 -B0E7 8238CF30 -B0E8 8238CF31 -B0E9 8238CF32 -B0EA 8238CF33 -B0EB 8238CF34 -B0EC 8238CF35 -B0ED 8238CF36 -B0EE 8238CF37 -B0EF 8238CF38 -B0F0 8238CF39 -B0F1 8238D030 -B0F2 8238D031 -B0F3 8238D032 -B0F4 8238D033 -B0F5 8238D034 -B0F6 8238D035 -B0F7 8238D036 -B0F8 8238D037 -B0F9 8238D038 -B0FA 8238D039 -B0FB 8238D130 -B0FC 8238D131 -B0FD 8238D132 -B0FE 8238D133 -B0FF 8238D134 -B100 8238D135 -B101 8238D136 -B102 8238D137 -B103 8238D138 -B104 8238D139 -B105 8238D230 -B106 8238D231 -B107 8238D232 -B108 8238D233 -B109 8238D234 -B10A 8238D235 -B10B 8238D236 -B10C 8238D237 -B10D 8238D238 -B10E 8238D239 -B10F 8238D330 -B110 8238D331 -B111 8238D332 -B112 8238D333 -B113 8238D334 -B114 8238D335 -B115 8238D336 -B116 8238D337 -B117 8238D338 -B118 8238D339 -B119 8238D430 -B11A 8238D431 -B11B 8238D432 -B11C 8238D433 -B11D 8238D434 -B11E 8238D435 -B11F 8238D436 -B120 8238D437 -B121 8238D438 -B122 8238D439 -B123 8238D530 -B124 8238D531 -B125 8238D532 -B126 8238D533 -B127 8238D534 -B128 8238D535 -B129 8238D536 -B12A 8238D537 -B12B 8238D538 -B12C 8238D539 -B12D 8238D630 -B12E 8238D631 -B12F 8238D632 -B130 8238D633 -B131 8238D634 -B132 8238D635 -B133 8238D636 -B134 8238D637 -B135 8238D638 -B136 8238D639 -B137 8238D730 -B138 8238D731 -B139 8238D732 -B13A 8238D733 -B13B 8238D734 -B13C 8238D735 -B13D 8238D736 -B13E 8238D737 -B13F 8238D738 -B140 8238D739 -B141 8238D830 -B142 8238D831 -B143 8238D832 -B144 8238D833 -B145 8238D834 -B146 8238D835 -B147 8238D836 -B148 8238D837 -B149 8238D838 -B14A 8238D839 -B14B 8238D930 -B14C 8238D931 -B14D 8238D932 -B14E 8238D933 -B14F 8238D934 -B150 8238D935 -B151 8238D936 -B152 8238D937 -B153 8238D938 -B154 8238D939 -B155 8238DA30 -B156 8238DA31 -B157 8238DA32 -B158 8238DA33 -B159 8238DA34 -B15A 8238DA35 -B15B 8238DA36 -B15C 8238DA37 -B15D 8238DA38 -B15E 8238DA39 -B15F 8238DB30 -B160 8238DB31 -B161 8238DB32 -B162 8238DB33 -B163 8238DB34 -B164 8238DB35 -B165 8238DB36 -B166 8238DB37 -B167 8238DB38 -B168 8238DB39 -B169 8238DC30 -B16A 8238DC31 -B16B 8238DC32 -B16C 8238DC33 -B16D 8238DC34 -B16E 8238DC35 -B16F 8238DC36 -B170 8238DC37 -B171 8238DC38 -B172 8238DC39 -B173 8238DD30 -B174 8238DD31 -B175 8238DD32 -B176 8238DD33 -B177 8238DD34 -B178 8238DD35 -B179 8238DD36 -B17A 8238DD37 -B17B 8238DD38 -B17C 8238DD39 -B17D 8238DE30 -B17E 8238DE31 -B17F 8238DE32 -B180 8238DE33 -B181 8238DE34 -B182 8238DE35 -B183 8238DE36 -B184 8238DE37 -B185 8238DE38 -B186 8238DE39 -B187 8238DF30 -B188 8238DF31 -B189 8238DF32 -B18A 8238DF33 -B18B 8238DF34 -B18C 8238DF35 -B18D 8238DF36 -B18E 8238DF37 -B18F 8238DF38 -B190 8238DF39 -B191 8238E030 -B192 8238E031 -B193 8238E032 -B194 8238E033 -B195 8238E034 -B196 8238E035 -B197 8238E036 -B198 8238E037 -B199 8238E038 -B19A 8238E039 -B19B 8238E130 -B19C 8238E131 -B19D 8238E132 -B19E 8238E133 -B19F 8238E134 -B1A0 8238E135 -B1A1 8238E136 -B1A2 8238E137 -B1A3 8238E138 -B1A4 8238E139 -B1A5 8238E230 -B1A6 8238E231 -B1A7 8238E232 -B1A8 8238E233 -B1A9 8238E234 -B1AA 8238E235 -B1AB 8238E236 -B1AC 8238E237 -B1AD 8238E238 -B1AE 8238E239 -B1AF 8238E330 -B1B0 8238E331 -B1B1 8238E332 -B1B2 8238E333 -B1B3 8238E334 -B1B4 8238E335 -B1B5 8238E336 -B1B6 8238E337 -B1B7 8238E338 -B1B8 8238E339 -B1B9 8238E430 -B1BA 8238E431 -B1BB 8238E432 -B1BC 8238E433 -B1BD 8238E434 -B1BE 8238E435 -B1BF 8238E436 -B1C0 8238E437 -B1C1 8238E438 -B1C2 8238E439 -B1C3 8238E530 -B1C4 8238E531 -B1C5 8238E532 -B1C6 8238E533 -B1C7 8238E534 -B1C8 8238E535 -B1C9 8238E536 -B1CA 8238E537 -B1CB 8238E538 -B1CC 8238E539 -B1CD 8238E630 -B1CE 8238E631 -B1CF 8238E632 -B1D0 8238E633 -B1D1 8238E634 -B1D2 8238E635 -B1D3 8238E636 -B1D4 8238E637 -B1D5 8238E638 -B1D6 8238E639 -B1D7 8238E730 -B1D8 8238E731 -B1D9 8238E732 -B1DA 8238E733 -B1DB 8238E734 -B1DC 8238E735 -B1DD 8238E736 -B1DE 8238E737 -B1DF 8238E738 -B1E0 8238E739 -B1E1 8238E830 -B1E2 8238E831 -B1E3 8238E832 -B1E4 8238E833 -B1E5 8238E834 -B1E6 8238E835 -B1E7 8238E836 -B1E8 8238E837 -B1E9 8238E838 -B1EA 8238E839 -B1EB 8238E930 -B1EC 8238E931 -B1ED 8238E932 -B1EE 8238E933 -B1EF 8238E934 -B1F0 8238E935 -B1F1 8238E936 -B1F2 8238E937 -B1F3 8238E938 -B1F4 8238E939 -B1F5 8238EA30 -B1F6 8238EA31 -B1F7 8238EA32 -B1F8 8238EA33 -B1F9 8238EA34 -B1FA 8238EA35 -B1FB 8238EA36 -B1FC 8238EA37 -B1FD 8238EA38 -B1FE 8238EA39 -B1FF 8238EB30 -B200 8238EB31 -B201 8238EB32 -B202 8238EB33 -B203 8238EB34 -B204 8238EB35 -B205 8238EB36 -B206 8238EB37 -B207 8238EB38 -B208 8238EB39 -B209 8238EC30 -B20A 8238EC31 -B20B 8238EC32 -B20C 8238EC33 -B20D 8238EC34 -B20E 8238EC35 -B20F 8238EC36 -B210 8238EC37 -B211 8238EC38 -B212 8238EC39 -B213 8238ED30 -B214 8238ED31 -B215 8238ED32 -B216 8238ED33 -B217 8238ED34 -B218 8238ED35 -B219 8238ED36 -B21A 8238ED37 -B21B 8238ED38 -B21C 8238ED39 -B21D 8238EE30 -B21E 8238EE31 -B21F 8238EE32 -B220 8238EE33 -B221 8238EE34 -B222 8238EE35 -B223 8238EE36 -B224 8238EE37 -B225 8238EE38 -B226 8238EE39 -B227 8238EF30 -B228 8238EF31 -B229 8238EF32 -B22A 8238EF33 -B22B 8238EF34 -B22C 8238EF35 -B22D 8238EF36 -B22E 8238EF37 -B22F 8238EF38 -B230 8238EF39 -B231 8238F030 -B232 8238F031 -B233 8238F032 -B234 8238F033 -B235 8238F034 -B236 8238F035 -B237 8238F036 -B238 8238F037 -B239 8238F038 -B23A 8238F039 -B23B 8238F130 -B23C 8238F131 -B23D 8238F132 -B23E 8238F133 -B23F 8238F134 -B240 8238F135 -B241 8238F136 -B242 8238F137 -B243 8238F138 -B244 8238F139 -B245 8238F230 -B246 8238F231 -B247 8238F232 -B248 8238F233 -B249 8238F234 -B24A 8238F235 -B24B 8238F236 -B24C 8238F237 -B24D 8238F238 -B24E 8238F239 -B24F 8238F330 -B250 8238F331 -B251 8238F332 -B252 8238F333 -B253 8238F334 -B254 8238F335 -B255 8238F336 -B256 8238F337 -B257 8238F338 -B258 8238F339 -B259 8238F430 -B25A 8238F431 -B25B 8238F432 -B25C 8238F433 -B25D 8238F434 -B25E 8238F435 -B25F 8238F436 -B260 8238F437 -B261 8238F438 -B262 8238F439 -B263 8238F530 -B264 8238F531 -B265 8238F532 -B266 8238F533 -B267 8238F534 -B268 8238F535 -B269 8238F536 -B26A 8238F537 -B26B 8238F538 -B26C 8238F539 -B26D 8238F630 -B26E 8238F631 -B26F 8238F632 -B270 8238F633 -B271 8238F634 -B272 8238F635 -B273 8238F636 -B274 8238F637 -B275 8238F638 -B276 8238F639 -B277 8238F730 -B278 8238F731 -B279 8238F732 -B27A 8238F733 -B27B 8238F734 -B27C 8238F735 -B27D 8238F736 -B27E 8238F737 -B27F 8238F738 -B280 8238F739 -B281 8238F830 -B282 8238F831 -B283 8238F832 -B284 8238F833 -B285 8238F834 -B286 8238F835 -B287 8238F836 -B288 8238F837 -B289 8238F838 -B28A 8238F839 -B28B 8238F930 -B28C 8238F931 -B28D 8238F932 -B28E 8238F933 -B28F 8238F934 -B290 8238F935 -B291 8238F936 -B292 8238F937 -B293 8238F938 -B294 8238F939 -B295 8238FA30 -B296 8238FA31 -B297 8238FA32 -B298 8238FA33 -B299 8238FA34 -B29A 8238FA35 -B29B 8238FA36 -B29C 8238FA37 -B29D 8238FA38 -B29E 8238FA39 -B29F 8238FB30 -B2A0 8238FB31 -B2A1 8238FB32 -B2A2 8238FB33 -B2A3 8238FB34 -B2A4 8238FB35 -B2A5 8238FB36 -B2A6 8238FB37 -B2A7 8238FB38 -B2A8 8238FB39 -B2A9 8238FC30 -B2AA 8238FC31 -B2AB 8238FC32 -B2AC 8238FC33 -B2AD 8238FC34 -B2AE 8238FC35 -B2AF 8238FC36 -B2B0 8238FC37 -B2B1 8238FC38 -B2B2 8238FC39 -B2B3 8238FD30 -B2B4 8238FD31 -B2B5 8238FD32 -B2B6 8238FD33 -B2B7 8238FD34 -B2B8 8238FD35 -B2B9 8238FD36 -B2BA 8238FD37 -B2BB 8238FD38 -B2BC 8238FD39 -B2BD 8238FE30 -B2BE 8238FE31 -B2BF 8238FE32 -B2C0 8238FE33 -B2C1 8238FE34 -B2C2 8238FE35 -B2C3 8238FE36 -B2C4 8238FE37 -B2C5 8238FE38 -B2C6 8238FE39 -B2C7 82398130 -B2C8 82398131 -B2C9 82398132 -B2CA 82398133 -B2CB 82398134 -B2CC 82398135 -B2CD 82398136 -B2CE 82398137 -B2CF 82398138 -B2D0 82398139 -B2D1 82398230 -B2D2 82398231 -B2D3 82398232 -B2D4 82398233 -B2D5 82398234 -B2D6 82398235 -B2D7 82398236 -B2D8 82398237 -B2D9 82398238 -B2DA 82398239 -B2DB 82398330 -B2DC 82398331 -B2DD 82398332 -B2DE 82398333 -B2DF 82398334 -B2E0 82398335 -B2E1 82398336 -B2E2 82398337 -B2E3 82398338 -B2E4 82398339 -B2E5 82398430 -B2E6 82398431 -B2E7 82398432 -B2E8 82398433 -B2E9 82398434 -B2EA 82398435 -B2EB 82398436 -B2EC 82398437 -B2ED 82398438 -B2EE 82398439 -B2EF 82398530 -B2F0 82398531 -B2F1 82398532 -B2F2 82398533 -B2F3 82398534 -B2F4 82398535 -B2F5 82398536 -B2F6 82398537 -B2F7 82398538 -B2F8 82398539 -B2F9 82398630 -B2FA 82398631 -B2FB 82398632 -B2FC 82398633 -B2FD 82398634 -B2FE 82398635 -B2FF 82398636 -B300 82398637 -B301 82398638 -B302 82398639 -B303 82398730 -B304 82398731 -B305 82398732 -B306 82398733 -B307 82398734 -B308 82398735 -B309 82398736 -B30A 82398737 -B30B 82398738 -B30C 82398739 -B30D 82398830 -B30E 82398831 -B30F 82398832 -B310 82398833 -B311 82398834 -B312 82398835 -B313 82398836 -B314 82398837 -B315 82398838 -B316 82398839 -B317 82398930 -B318 82398931 -B319 82398932 -B31A 82398933 -B31B 82398934 -B31C 82398935 -B31D 82398936 -B31E 82398937 -B31F 82398938 -B320 82398939 -B321 82398A30 -B322 82398A31 -B323 82398A32 -B324 82398A33 -B325 82398A34 -B326 82398A35 -B327 82398A36 -B328 82398A37 -B329 82398A38 -B32A 82398A39 -B32B 82398B30 -B32C 82398B31 -B32D 82398B32 -B32E 82398B33 -B32F 82398B34 -B330 82398B35 -B331 82398B36 -B332 82398B37 -B333 82398B38 -B334 82398B39 -B335 82398C30 -B336 82398C31 -B337 82398C32 -B338 82398C33 -B339 82398C34 -B33A 82398C35 -B33B 82398C36 -B33C 82398C37 -B33D 82398C38 -B33E 82398C39 -B33F 82398D30 -B340 82398D31 -B341 82398D32 -B342 82398D33 -B343 82398D34 -B344 82398D35 -B345 82398D36 -B346 82398D37 -B347 82398D38 -B348 82398D39 -B349 82398E30 -B34A 82398E31 -B34B 82398E32 -B34C 82398E33 -B34D 82398E34 -B34E 82398E35 -B34F 82398E36 -B350 82398E37 -B351 82398E38 -B352 82398E39 -B353 82398F30 -B354 82398F31 -B355 82398F32 -B356 82398F33 -B357 82398F34 -B358 82398F35 -B359 82398F36 -B35A 82398F37 -B35B 82398F38 -B35C 82398F39 -B35D 82399030 -B35E 82399031 -B35F 82399032 -B360 82399033 -B361 82399034 -B362 82399035 -B363 82399036 -B364 82399037 -B365 82399038 -B366 82399039 -B367 82399130 -B368 82399131 -B369 82399132 -B36A 82399133 -B36B 82399134 -B36C 82399135 -B36D 82399136 -B36E 82399137 -B36F 82399138 -B370 82399139 -B371 82399230 -B372 82399231 -B373 82399232 -B374 82399233 -B375 82399234 -B376 82399235 -B377 82399236 -B378 82399237 -B379 82399238 -B37A 82399239 -B37B 82399330 -B37C 82399331 -B37D 82399332 -B37E 82399333 -B37F 82399334 -B380 82399335 -B381 82399336 -B382 82399337 -B383 82399338 -B384 82399339 -B385 82399430 -B386 82399431 -B387 82399432 -B388 82399433 -B389 82399434 -B38A 82399435 -B38B 82399436 -B38C 82399437 -B38D 82399438 -B38E 82399439 -B38F 82399530 -B390 82399531 -B391 82399532 -B392 82399533 -B393 82399534 -B394 82399535 -B395 82399536 -B396 82399537 -B397 82399538 -B398 82399539 -B399 82399630 -B39A 82399631 -B39B 82399632 -B39C 82399633 -B39D 82399634 -B39E 82399635 -B39F 82399636 -B3A0 82399637 -B3A1 82399638 -B3A2 82399639 -B3A3 82399730 -B3A4 82399731 -B3A5 82399732 -B3A6 82399733 -B3A7 82399734 -B3A8 82399735 -B3A9 82399736 -B3AA 82399737 -B3AB 82399738 -B3AC 82399739 -B3AD 82399830 -B3AE 82399831 -B3AF 82399832 -B3B0 82399833 -B3B1 82399834 -B3B2 82399835 -B3B3 82399836 -B3B4 82399837 -B3B5 82399838 -B3B6 82399839 -B3B7 82399930 -B3B8 82399931 -B3B9 82399932 -B3BA 82399933 -B3BB 82399934 -B3BC 82399935 -B3BD 82399936 -B3BE 82399937 -B3BF 82399938 -B3C0 82399939 -B3C1 82399A30 -B3C2 82399A31 -B3C3 82399A32 -B3C4 82399A33 -B3C5 82399A34 -B3C6 82399A35 -B3C7 82399A36 -B3C8 82399A37 -B3C9 82399A38 -B3CA 82399A39 -B3CB 82399B30 -B3CC 82399B31 -B3CD 82399B32 -B3CE 82399B33 -B3CF 82399B34 -B3D0 82399B35 -B3D1 82399B36 -B3D2 82399B37 -B3D3 82399B38 -B3D4 82399B39 -B3D5 82399C30 -B3D6 82399C31 -B3D7 82399C32 -B3D8 82399C33 -B3D9 82399C34 -B3DA 82399C35 -B3DB 82399C36 -B3DC 82399C37 -B3DD 82399C38 -B3DE 82399C39 -B3DF 82399D30 -B3E0 82399D31 -B3E1 82399D32 -B3E2 82399D33 -B3E3 82399D34 -B3E4 82399D35 -B3E5 82399D36 -B3E6 82399D37 -B3E7 82399D38 -B3E8 82399D39 -B3E9 82399E30 -B3EA 82399E31 -B3EB 82399E32 -B3EC 82399E33 -B3ED 82399E34 -B3EE 82399E35 -B3EF 82399E36 -B3F0 82399E37 -B3F1 82399E38 -B3F2 82399E39 -B3F3 82399F30 -B3F4 82399F31 -B3F5 82399F32 -B3F6 82399F33 -B3F7 82399F34 -B3F8 82399F35 -B3F9 82399F36 -B3FA 82399F37 -B3FB 82399F38 -B3FC 82399F39 -B3FD 8239A030 -B3FE 8239A031 -B3FF 8239A032 -B400 8239A033 -B401 8239A034 -B402 8239A035 -B403 8239A036 -B404 8239A037 -B405 8239A038 -B406 8239A039 -B407 8239A130 -B408 8239A131 -B409 8239A132 -B40A 8239A133 -B40B 8239A134 -B40C 8239A135 -B40D 8239A136 -B40E 8239A137 -B40F 8239A138 -B410 8239A139 -B411 8239A230 -B412 8239A231 -B413 8239A232 -B414 8239A233 -B415 8239A234 -B416 8239A235 -B417 8239A236 -B418 8239A237 -B419 8239A238 -B41A 8239A239 -B41B 8239A330 -B41C 8239A331 -B41D 8239A332 -B41E 8239A333 -B41F 8239A334 -B420 8239A335 -B421 8239A336 -B422 8239A337 -B423 8239A338 -B424 8239A339 -B425 8239A430 -B426 8239A431 -B427 8239A432 -B428 8239A433 -B429 8239A434 -B42A 8239A435 -B42B 8239A436 -B42C 8239A437 -B42D 8239A438 -B42E 8239A439 -B42F 8239A530 -B430 8239A531 -B431 8239A532 -B432 8239A533 -B433 8239A534 -B434 8239A535 -B435 8239A536 -B436 8239A537 -B437 8239A538 -B438 8239A539 -B439 8239A630 -B43A 8239A631 -B43B 8239A632 -B43C 8239A633 -B43D 8239A634 -B43E 8239A635 -B43F 8239A636 -B440 8239A637 -B441 8239A638 -B442 8239A639 -B443 8239A730 -B444 8239A731 -B445 8239A732 -B446 8239A733 -B447 8239A734 -B448 8239A735 -B449 8239A736 -B44A 8239A737 -B44B 8239A738 -B44C 8239A739 -B44D 8239A830 -B44E 8239A831 -B44F 8239A832 -B450 8239A833 -B451 8239A834 -B452 8239A835 -B453 8239A836 -B454 8239A837 -B455 8239A838 -B456 8239A839 -B457 8239A930 -B458 8239A931 -B459 8239A932 -B45A 8239A933 -B45B 8239A934 -B45C 8239A935 -B45D 8239A936 -B45E 8239A937 -B45F 8239A938 -B460 8239A939 -B461 8239AA30 -B462 8239AA31 -B463 8239AA32 -B464 8239AA33 -B465 8239AA34 -B466 8239AA35 -B467 8239AA36 -B468 8239AA37 -B469 8239AA38 -B46A 8239AA39 -B46B 8239AB30 -B46C 8239AB31 -B46D 8239AB32 -B46E 8239AB33 -B46F 8239AB34 -B470 8239AB35 -B471 8239AB36 -B472 8239AB37 -B473 8239AB38 -B474 8239AB39 -B475 8239AC30 -B476 8239AC31 -B477 8239AC32 -B478 8239AC33 -B479 8239AC34 -B47A 8239AC35 -B47B 8239AC36 -B47C 8239AC37 -B47D 8239AC38 -B47E 8239AC39 -B47F 8239AD30 -B480 8239AD31 -B481 8239AD32 -B482 8239AD33 -B483 8239AD34 -B484 8239AD35 -B485 8239AD36 -B486 8239AD37 -B487 8239AD38 -B488 8239AD39 -B489 8239AE30 -B48A 8239AE31 -B48B 8239AE32 -B48C 8239AE33 -B48D 8239AE34 -B48E 8239AE35 -B48F 8239AE36 -B490 8239AE37 -B491 8239AE38 -B492 8239AE39 -B493 8239AF30 -B494 8239AF31 -B495 8239AF32 -B496 8239AF33 -B497 8239AF34 -B498 8239AF35 -B499 8239AF36 -B49A 8239AF37 -B49B 8239AF38 -B49C 8239AF39 -B49D 8239B030 -B49E 8239B031 -B49F 8239B032 -B4A0 8239B033 -B4A1 8239B034 -B4A2 8239B035 -B4A3 8239B036 -B4A4 8239B037 -B4A5 8239B038 -B4A6 8239B039 -B4A7 8239B130 -B4A8 8239B131 -B4A9 8239B132 -B4AA 8239B133 -B4AB 8239B134 -B4AC 8239B135 -B4AD 8239B136 -B4AE 8239B137 -B4AF 8239B138 -B4B0 8239B139 -B4B1 8239B230 -B4B2 8239B231 -B4B3 8239B232 -B4B4 8239B233 -B4B5 8239B234 -B4B6 8239B235 -B4B7 8239B236 -B4B8 8239B237 -B4B9 8239B238 -B4BA 8239B239 -B4BB 8239B330 -B4BC 8239B331 -B4BD 8239B332 -B4BE 8239B333 -B4BF 8239B334 -B4C0 8239B335 -B4C1 8239B336 -B4C2 8239B337 -B4C3 8239B338 -B4C4 8239B339 -B4C5 8239B430 -B4C6 8239B431 -B4C7 8239B432 -B4C8 8239B433 -B4C9 8239B434 -B4CA 8239B435 -B4CB 8239B436 -B4CC 8239B437 -B4CD 8239B438 -B4CE 8239B439 -B4CF 8239B530 -B4D0 8239B531 -B4D1 8239B532 -B4D2 8239B533 -B4D3 8239B534 -B4D4 8239B535 -B4D5 8239B536 -B4D6 8239B537 -B4D7 8239B538 -B4D8 8239B539 -B4D9 8239B630 -B4DA 8239B631 -B4DB 8239B632 -B4DC 8239B633 -B4DD 8239B634 -B4DE 8239B635 -B4DF 8239B636 -B4E0 8239B637 -B4E1 8239B638 -B4E2 8239B639 -B4E3 8239B730 -B4E4 8239B731 -B4E5 8239B732 -B4E6 8239B733 -B4E7 8239B734 -B4E8 8239B735 -B4E9 8239B736 -B4EA 8239B737 -B4EB 8239B738 -B4EC 8239B739 -B4ED 8239B830 -B4EE 8239B831 -B4EF 8239B832 -B4F0 8239B833 -B4F1 8239B834 -B4F2 8239B835 -B4F3 8239B836 -B4F4 8239B837 -B4F5 8239B838 -B4F6 8239B839 -B4F7 8239B930 -B4F8 8239B931 -B4F9 8239B932 -B4FA 8239B933 -B4FB 8239B934 -B4FC 8239B935 -B4FD 8239B936 -B4FE 8239B937 -B4FF 8239B938 -B500 8239B939 -B501 8239BA30 -B502 8239BA31 -B503 8239BA32 -B504 8239BA33 -B505 8239BA34 -B506 8239BA35 -B507 8239BA36 -B508 8239BA37 -B509 8239BA38 -B50A 8239BA39 -B50B 8239BB30 -B50C 8239BB31 -B50D 8239BB32 -B50E 8239BB33 -B50F 8239BB34 -B510 8239BB35 -B511 8239BB36 -B512 8239BB37 -B513 8239BB38 -B514 8239BB39 -B515 8239BC30 -B516 8239BC31 -B517 8239BC32 -B518 8239BC33 -B519 8239BC34 -B51A 8239BC35 -B51B 8239BC36 -B51C 8239BC37 -B51D 8239BC38 -B51E 8239BC39 -B51F 8239BD30 -B520 8239BD31 -B521 8239BD32 -B522 8239BD33 -B523 8239BD34 -B524 8239BD35 -B525 8239BD36 -B526 8239BD37 -B527 8239BD38 -B528 8239BD39 -B529 8239BE30 -B52A 8239BE31 -B52B 8239BE32 -B52C 8239BE33 -B52D 8239BE34 -B52E 8239BE35 -B52F 8239BE36 -B530 8239BE37 -B531 8239BE38 -B532 8239BE39 -B533 8239BF30 -B534 8239BF31 -B535 8239BF32 -B536 8239BF33 -B537 8239BF34 -B538 8239BF35 -B539 8239BF36 -B53A 8239BF37 -B53B 8239BF38 -B53C 8239BF39 -B53D 8239C030 -B53E 8239C031 -B53F 8239C032 -B540 8239C033 -B541 8239C034 -B542 8239C035 -B543 8239C036 -B544 8239C037 -B545 8239C038 -B546 8239C039 -B547 8239C130 -B548 8239C131 -B549 8239C132 -B54A 8239C133 -B54B 8239C134 -B54C 8239C135 -B54D 8239C136 -B54E 8239C137 -B54F 8239C138 -B550 8239C139 -B551 8239C230 -B552 8239C231 -B553 8239C232 -B554 8239C233 -B555 8239C234 -B556 8239C235 -B557 8239C236 -B558 8239C237 -B559 8239C238 -B55A 8239C239 -B55B 8239C330 -B55C 8239C331 -B55D 8239C332 -B55E 8239C333 -B55F 8239C334 -B560 8239C335 -B561 8239C336 -B562 8239C337 -B563 8239C338 -B564 8239C339 -B565 8239C430 -B566 8239C431 -B567 8239C432 -B568 8239C433 -B569 8239C434 -B56A 8239C435 -B56B 8239C436 -B56C 8239C437 -B56D 8239C438 -B56E 8239C439 -B56F 8239C530 -B570 8239C531 -B571 8239C532 -B572 8239C533 -B573 8239C534 -B574 8239C535 -B575 8239C536 -B576 8239C537 -B577 8239C538 -B578 8239C539 -B579 8239C630 -B57A 8239C631 -B57B 8239C632 -B57C 8239C633 -B57D 8239C634 -B57E 8239C635 -B57F 8239C636 -B580 8239C637 -B581 8239C638 -B582 8239C639 -B583 8239C730 -B584 8239C731 -B585 8239C732 -B586 8239C733 -B587 8239C734 -B588 8239C735 -B589 8239C736 -B58A 8239C737 -B58B 8239C738 -B58C 8239C739 -B58D 8239C830 -B58E 8239C831 -B58F 8239C832 -B590 8239C833 -B591 8239C834 -B592 8239C835 -B593 8239C836 -B594 8239C837 -B595 8239C838 -B596 8239C839 -B597 8239C930 -B598 8239C931 -B599 8239C932 -B59A 8239C933 -B59B 8239C934 -B59C 8239C935 -B59D 8239C936 -B59E 8239C937 -B59F 8239C938 -B5A0 8239C939 -B5A1 8239CA30 -B5A2 8239CA31 -B5A3 8239CA32 -B5A4 8239CA33 -B5A5 8239CA34 -B5A6 8239CA35 -B5A7 8239CA36 -B5A8 8239CA37 -B5A9 8239CA38 -B5AA 8239CA39 -B5AB 8239CB30 -B5AC 8239CB31 -B5AD 8239CB32 -B5AE 8239CB33 -B5AF 8239CB34 -B5B0 8239CB35 -B5B1 8239CB36 -B5B2 8239CB37 -B5B3 8239CB38 -B5B4 8239CB39 -B5B5 8239CC30 -B5B6 8239CC31 -B5B7 8239CC32 -B5B8 8239CC33 -B5B9 8239CC34 -B5BA 8239CC35 -B5BB 8239CC36 -B5BC 8239CC37 -B5BD 8239CC38 -B5BE 8239CC39 -B5BF 8239CD30 -B5C0 8239CD31 -B5C1 8239CD32 -B5C2 8239CD33 -B5C3 8239CD34 -B5C4 8239CD35 -B5C5 8239CD36 -B5C6 8239CD37 -B5C7 8239CD38 -B5C8 8239CD39 -B5C9 8239CE30 -B5CA 8239CE31 -B5CB 8239CE32 -B5CC 8239CE33 -B5CD 8239CE34 -B5CE 8239CE35 -B5CF 8239CE36 -B5D0 8239CE37 -B5D1 8239CE38 -B5D2 8239CE39 -B5D3 8239CF30 -B5D4 8239CF31 -B5D5 8239CF32 -B5D6 8239CF33 -B5D7 8239CF34 -B5D8 8239CF35 -B5D9 8239CF36 -B5DA 8239CF37 -B5DB 8239CF38 -B5DC 8239CF39 -B5DD 8239D030 -B5DE 8239D031 -B5DF 8239D032 -B5E0 8239D033 -B5E1 8239D034 -B5E2 8239D035 -B5E3 8239D036 -B5E4 8239D037 -B5E5 8239D038 -B5E6 8239D039 -B5E7 8239D130 -B5E8 8239D131 -B5E9 8239D132 -B5EA 8239D133 -B5EB 8239D134 -B5EC 8239D135 -B5ED 8239D136 -B5EE 8239D137 -B5EF 8239D138 -B5F0 8239D139 -B5F1 8239D230 -B5F2 8239D231 -B5F3 8239D232 -B5F4 8239D233 -B5F5 8239D234 -B5F6 8239D235 -B5F7 8239D236 -B5F8 8239D237 -B5F9 8239D238 -B5FA 8239D239 -B5FB 8239D330 -B5FC 8239D331 -B5FD 8239D332 -B5FE 8239D333 -B5FF 8239D334 -B600 8239D335 -B601 8239D336 -B602 8239D337 -B603 8239D338 -B604 8239D339 -B605 8239D430 -B606 8239D431 -B607 8239D432 -B608 8239D433 -B609 8239D434 -B60A 8239D435 -B60B 8239D436 -B60C 8239D437 -B60D 8239D438 -B60E 8239D439 -B60F 8239D530 -B610 8239D531 -B611 8239D532 -B612 8239D533 -B613 8239D534 -B614 8239D535 -B615 8239D536 -B616 8239D537 -B617 8239D538 -B618 8239D539 -B619 8239D630 -B61A 8239D631 -B61B 8239D632 -B61C 8239D633 -B61D 8239D634 -B61E 8239D635 -B61F 8239D636 -B620 8239D637 -B621 8239D638 -B622 8239D639 -B623 8239D730 -B624 8239D731 -B625 8239D732 -B626 8239D733 -B627 8239D734 -B628 8239D735 -B629 8239D736 -B62A 8239D737 -B62B 8239D738 -B62C 8239D739 -B62D 8239D830 -B62E 8239D831 -B62F 8239D832 -B630 8239D833 -B631 8239D834 -B632 8239D835 -B633 8239D836 -B634 8239D837 -B635 8239D838 -B636 8239D839 -B637 8239D930 -B638 8239D931 -B639 8239D932 -B63A 8239D933 -B63B 8239D934 -B63C 8239D935 -B63D 8239D936 -B63E 8239D937 -B63F 8239D938 -B640 8239D939 -B641 8239DA30 -B642 8239DA31 -B643 8239DA32 -B644 8239DA33 -B645 8239DA34 -B646 8239DA35 -B647 8239DA36 -B648 8239DA37 -B649 8239DA38 -B64A 8239DA39 -B64B 8239DB30 -B64C 8239DB31 -B64D 8239DB32 -B64E 8239DB33 -B64F 8239DB34 -B650 8239DB35 -B651 8239DB36 -B652 8239DB37 -B653 8239DB38 -B654 8239DB39 -B655 8239DC30 -B656 8239DC31 -B657 8239DC32 -B658 8239DC33 -B659 8239DC34 -B65A 8239DC35 -B65B 8239DC36 -B65C 8239DC37 -B65D 8239DC38 -B65E 8239DC39 -B65F 8239DD30 -B660 8239DD31 -B661 8239DD32 -B662 8239DD33 -B663 8239DD34 -B664 8239DD35 -B665 8239DD36 -B666 8239DD37 -B667 8239DD38 -B668 8239DD39 -B669 8239DE30 -B66A 8239DE31 -B66B 8239DE32 -B66C 8239DE33 -B66D 8239DE34 -B66E 8239DE35 -B66F 8239DE36 -B670 8239DE37 -B671 8239DE38 -B672 8239DE39 -B673 8239DF30 -B674 8239DF31 -B675 8239DF32 -B676 8239DF33 -B677 8239DF34 -B678 8239DF35 -B679 8239DF36 -B67A 8239DF37 -B67B 8239DF38 -B67C 8239DF39 -B67D 8239E030 -B67E 8239E031 -B67F 8239E032 -B680 8239E033 -B681 8239E034 -B682 8239E035 -B683 8239E036 -B684 8239E037 -B685 8239E038 -B686 8239E039 -B687 8239E130 -B688 8239E131 -B689 8239E132 -B68A 8239E133 -B68B 8239E134 -B68C 8239E135 -B68D 8239E136 -B68E 8239E137 -B68F 8239E138 -B690 8239E139 -B691 8239E230 -B692 8239E231 -B693 8239E232 -B694 8239E233 -B695 8239E234 -B696 8239E235 -B697 8239E236 -B698 8239E237 -B699 8239E238 -B69A 8239E239 -B69B 8239E330 -B69C 8239E331 -B69D 8239E332 -B69E 8239E333 -B69F 8239E334 -B6A0 8239E335 -B6A1 8239E336 -B6A2 8239E337 -B6A3 8239E338 -B6A4 8239E339 -B6A5 8239E430 -B6A6 8239E431 -B6A7 8239E432 -B6A8 8239E433 -B6A9 8239E434 -B6AA 8239E435 -B6AB 8239E436 -B6AC 8239E437 -B6AD 8239E438 -B6AE 8239E439 -B6AF 8239E530 -B6B0 8239E531 -B6B1 8239E532 -B6B2 8239E533 -B6B3 8239E534 -B6B4 8239E535 -B6B5 8239E536 -B6B6 8239E537 -B6B7 8239E538 -B6B8 8239E539 -B6B9 8239E630 -B6BA 8239E631 -B6BB 8239E632 -B6BC 8239E633 -B6BD 8239E634 -B6BE 8239E635 -B6BF 8239E636 -B6C0 8239E637 -B6C1 8239E638 -B6C2 8239E639 -B6C3 8239E730 -B6C4 8239E731 -B6C5 8239E732 -B6C6 8239E733 -B6C7 8239E734 -B6C8 8239E735 -B6C9 8239E736 -B6CA 8239E737 -B6CB 8239E738 -B6CC 8239E739 -B6CD 8239E830 -B6CE 8239E831 -B6CF 8239E832 -B6D0 8239E833 -B6D1 8239E834 -B6D2 8239E835 -B6D3 8239E836 -B6D4 8239E837 -B6D5 8239E838 -B6D6 8239E839 -B6D7 8239E930 -B6D8 8239E931 -B6D9 8239E932 -B6DA 8239E933 -B6DB 8239E934 -B6DC 8239E935 -B6DD 8239E936 -B6DE 8239E937 -B6DF 8239E938 -B6E0 8239E939 -B6E1 8239EA30 -B6E2 8239EA31 -B6E3 8239EA32 -B6E4 8239EA33 -B6E5 8239EA34 -B6E6 8239EA35 -B6E7 8239EA36 -B6E8 8239EA37 -B6E9 8239EA38 -B6EA 8239EA39 -B6EB 8239EB30 -B6EC 8239EB31 -B6ED 8239EB32 -B6EE 8239EB33 -B6EF 8239EB34 -B6F0 8239EB35 -B6F1 8239EB36 -B6F2 8239EB37 -B6F3 8239EB38 -B6F4 8239EB39 -B6F5 8239EC30 -B6F6 8239EC31 -B6F7 8239EC32 -B6F8 8239EC33 -B6F9 8239EC34 -B6FA 8239EC35 -B6FB 8239EC36 -B6FC 8239EC37 -B6FD 8239EC38 -B6FE 8239EC39 -B6FF 8239ED30 -B700 8239ED31 -B701 8239ED32 -B702 8239ED33 -B703 8239ED34 -B704 8239ED35 -B705 8239ED36 -B706 8239ED37 -B707 8239ED38 -B708 8239ED39 -B709 8239EE30 -B70A 8239EE31 -B70B 8239EE32 -B70C 8239EE33 -B70D 8239EE34 -B70E 8239EE35 -B70F 8239EE36 -B710 8239EE37 -B711 8239EE38 -B712 8239EE39 -B713 8239EF30 -B714 8239EF31 -B715 8239EF32 -B716 8239EF33 -B717 8239EF34 -B718 8239EF35 -B719 8239EF36 -B71A 8239EF37 -B71B 8239EF38 -B71C 8239EF39 -B71D 8239F030 -B71E 8239F031 -B71F 8239F032 -B720 8239F033 -B721 8239F034 -B722 8239F035 -B723 8239F036 -B724 8239F037 -B725 8239F038 -B726 8239F039 -B727 8239F130 -B728 8239F131 -B729 8239F132 -B72A 8239F133 -B72B 8239F134 -B72C 8239F135 -B72D 8239F136 -B72E 8239F137 -B72F 8239F138 -B730 8239F139 -B731 8239F230 -B732 8239F231 -B733 8239F232 -B734 8239F233 -B735 8239F234 -B736 8239F235 -B737 8239F236 -B738 8239F237 -B739 8239F238 -B73A 8239F239 -B73B 8239F330 -B73C 8239F331 -B73D 8239F332 -B73E 8239F333 -B73F 8239F334 -B740 8239F335 -B741 8239F336 -B742 8239F337 -B743 8239F338 -B744 8239F339 -B745 8239F430 -B746 8239F431 -B747 8239F432 -B748 8239F433 -B749 8239F434 -B74A 8239F435 -B74B 8239F436 -B74C 8239F437 -B74D 8239F438 -B74E 8239F439 -B74F 8239F530 -B750 8239F531 -B751 8239F532 -B752 8239F533 -B753 8239F534 -B754 8239F535 -B755 8239F536 -B756 8239F537 -B757 8239F538 -B758 8239F539 -B759 8239F630 -B75A 8239F631 -B75B 8239F632 -B75C 8239F633 -B75D 8239F634 -B75E 8239F635 -B75F 8239F636 -B760 8239F637 -B761 8239F638 -B762 8239F639 -B763 8239F730 -B764 8239F731 -B765 8239F732 -B766 8239F733 -B767 8239F734 -B768 8239F735 -B769 8239F736 -B76A 8239F737 -B76B 8239F738 -B76C 8239F739 -B76D 8239F830 -B76E 8239F831 -B76F 8239F832 -B770 8239F833 -B771 8239F834 -B772 8239F835 -B773 8239F836 -B774 8239F837 -B775 8239F838 -B776 8239F839 -B777 8239F930 -B778 8239F931 -B779 8239F932 -B77A 8239F933 -B77B 8239F934 -B77C 8239F935 -B77D 8239F936 -B77E 8239F937 -B77F 8239F938 -B780 8239F939 -B781 8239FA30 -B782 8239FA31 -B783 8239FA32 -B784 8239FA33 -B785 8239FA34 -B786 8239FA35 -B787 8239FA36 -B788 8239FA37 -B789 8239FA38 -B78A 8239FA39 -B78B 8239FB30 -B78C 8239FB31 -B78D 8239FB32 -B78E 8239FB33 -B78F 8239FB34 -B790 8239FB35 -B791 8239FB36 -B792 8239FB37 -B793 8239FB38 -B794 8239FB39 -B795 8239FC30 -B796 8239FC31 -B797 8239FC32 -B798 8239FC33 -B799 8239FC34 -B79A 8239FC35 -B79B 8239FC36 -B79C 8239FC37 -B79D 8239FC38 -B79E 8239FC39 -B79F 8239FD30 -B7A0 8239FD31 -B7A1 8239FD32 -B7A2 8239FD33 -B7A3 8239FD34 -B7A4 8239FD35 -B7A5 8239FD36 -B7A6 8239FD37 -B7A7 8239FD38 -B7A8 8239FD39 -B7A9 8239FE30 -B7AA 8239FE31 -B7AB 8239FE32 -B7AC 8239FE33 -B7AD 8239FE34 -B7AE 8239FE35 -B7AF 8239FE36 -B7B0 8239FE37 -B7B1 8239FE38 -B7B2 8239FE39 -B7B3 83308130 -B7B4 83308131 -B7B5 83308132 -B7B6 83308133 -B7B7 83308134 -B7B8 83308135 -B7B9 83308136 -B7BA 83308137 -B7BB 83308138 -B7BC 83308139 -B7BD 83308230 -B7BE 83308231 -B7BF 83308232 -B7C0 83308233 -B7C1 83308234 -B7C2 83308235 -B7C3 83308236 -B7C4 83308237 -B7C5 83308238 -B7C6 83308239 -B7C7 83308330 -B7C8 83308331 -B7C9 83308332 -B7CA 83308333 -B7CB 83308334 -B7CC 83308335 -B7CD 83308336 -B7CE 83308337 -B7CF 83308338 -B7D0 83308339 -B7D1 83308430 -B7D2 83308431 -B7D3 83308432 -B7D4 83308433 -B7D5 83308434 -B7D6 83308435 -B7D7 83308436 -B7D8 83308437 -B7D9 83308438 -B7DA 83308439 -B7DB 83308530 -B7DC 83308531 -B7DD 83308532 -B7DE 83308533 -B7DF 83308534 -B7E0 83308535 -B7E1 83308536 -B7E2 83308537 -B7E3 83308538 -B7E4 83308539 -B7E5 83308630 -B7E6 83308631 -B7E7 83308632 -B7E8 83308633 -B7E9 83308634 -B7EA 83308635 -B7EB 83308636 -B7EC 83308637 -B7ED 83308638 -B7EE 83308639 -B7EF 83308730 -B7F0 83308731 -B7F1 83308732 -B7F2 83308733 -B7F3 83308734 -B7F4 83308735 -B7F5 83308736 -B7F6 83308737 -B7F7 83308738 -B7F8 83308739 -B7F9 83308830 -B7FA 83308831 -B7FB 83308832 -B7FC 83308833 -B7FD 83308834 -B7FE 83308835 -B7FF 83308836 -B800 83308837 -B801 83308838 -B802 83308839 -B803 83308930 -B804 83308931 -B805 83308932 -B806 83308933 -B807 83308934 -B808 83308935 -B809 83308936 -B80A 83308937 -B80B 83308938 -B80C 83308939 -B80D 83308A30 -B80E 83308A31 -B80F 83308A32 -B810 83308A33 -B811 83308A34 -B812 83308A35 -B813 83308A36 -B814 83308A37 -B815 83308A38 -B816 83308A39 -B817 83308B30 -B818 83308B31 -B819 83308B32 -B81A 83308B33 -B81B 83308B34 -B81C 83308B35 -B81D 83308B36 -B81E 83308B37 -B81F 83308B38 -B820 83308B39 -B821 83308C30 -B822 83308C31 -B823 83308C32 -B824 83308C33 -B825 83308C34 -B826 83308C35 -B827 83308C36 -B828 83308C37 -B829 83308C38 -B82A 83308C39 -B82B 83308D30 -B82C 83308D31 -B82D 83308D32 -B82E 83308D33 -B82F 83308D34 -B830 83308D35 -B831 83308D36 -B832 83308D37 -B833 83308D38 -B834 83308D39 -B835 83308E30 -B836 83308E31 -B837 83308E32 -B838 83308E33 -B839 83308E34 -B83A 83308E35 -B83B 83308E36 -B83C 83308E37 -B83D 83308E38 -B83E 83308E39 -B83F 83308F30 -B840 83308F31 -B841 83308F32 -B842 83308F33 -B843 83308F34 -B844 83308F35 -B845 83308F36 -B846 83308F37 -B847 83308F38 -B848 83308F39 -B849 83309030 -B84A 83309031 -B84B 83309032 -B84C 83309033 -B84D 83309034 -B84E 83309035 -B84F 83309036 -B850 83309037 -B851 83309038 -B852 83309039 -B853 83309130 -B854 83309131 -B855 83309132 -B856 83309133 -B857 83309134 -B858 83309135 -B859 83309136 -B85A 83309137 -B85B 83309138 -B85C 83309139 -B85D 83309230 -B85E 83309231 -B85F 83309232 -B860 83309233 -B861 83309234 -B862 83309235 -B863 83309236 -B864 83309237 -B865 83309238 -B866 83309239 -B867 83309330 -B868 83309331 -B869 83309332 -B86A 83309333 -B86B 83309334 -B86C 83309335 -B86D 83309336 -B86E 83309337 -B86F 83309338 -B870 83309339 -B871 83309430 -B872 83309431 -B873 83309432 -B874 83309433 -B875 83309434 -B876 83309435 -B877 83309436 -B878 83309437 -B879 83309438 -B87A 83309439 -B87B 83309530 -B87C 83309531 -B87D 83309532 -B87E 83309533 -B87F 83309534 -B880 83309535 -B881 83309536 -B882 83309537 -B883 83309538 -B884 83309539 -B885 83309630 -B886 83309631 -B887 83309632 -B888 83309633 -B889 83309634 -B88A 83309635 -B88B 83309636 -B88C 83309637 -B88D 83309638 -B88E 83309639 -B88F 83309730 -B890 83309731 -B891 83309732 -B892 83309733 -B893 83309734 -B894 83309735 -B895 83309736 -B896 83309737 -B897 83309738 -B898 83309739 -B899 83309830 -B89A 83309831 -B89B 83309832 -B89C 83309833 -B89D 83309834 -B89E 83309835 -B89F 83309836 -B8A0 83309837 -B8A1 83309838 -B8A2 83309839 -B8A3 83309930 -B8A4 83309931 -B8A5 83309932 -B8A6 83309933 -B8A7 83309934 -B8A8 83309935 -B8A9 83309936 -B8AA 83309937 -B8AB 83309938 -B8AC 83309939 -B8AD 83309A30 -B8AE 83309A31 -B8AF 83309A32 -B8B0 83309A33 -B8B1 83309A34 -B8B2 83309A35 -B8B3 83309A36 -B8B4 83309A37 -B8B5 83309A38 -B8B6 83309A39 -B8B7 83309B30 -B8B8 83309B31 -B8B9 83309B32 -B8BA 83309B33 -B8BB 83309B34 -B8BC 83309B35 -B8BD 83309B36 -B8BE 83309B37 -B8BF 83309B38 -B8C0 83309B39 -B8C1 83309C30 -B8C2 83309C31 -B8C3 83309C32 -B8C4 83309C33 -B8C5 83309C34 -B8C6 83309C35 -B8C7 83309C36 -B8C8 83309C37 -B8C9 83309C38 -B8CA 83309C39 -B8CB 83309D30 -B8CC 83309D31 -B8CD 83309D32 -B8CE 83309D33 -B8CF 83309D34 -B8D0 83309D35 -B8D1 83309D36 -B8D2 83309D37 -B8D3 83309D38 -B8D4 83309D39 -B8D5 83309E30 -B8D6 83309E31 -B8D7 83309E32 -B8D8 83309E33 -B8D9 83309E34 -B8DA 83309E35 -B8DB 83309E36 -B8DC 83309E37 -B8DD 83309E38 -B8DE 83309E39 -B8DF 83309F30 -B8E0 83309F31 -B8E1 83309F32 -B8E2 83309F33 -B8E3 83309F34 -B8E4 83309F35 -B8E5 83309F36 -B8E6 83309F37 -B8E7 83309F38 -B8E8 83309F39 -B8E9 8330A030 -B8EA 8330A031 -B8EB 8330A032 -B8EC 8330A033 -B8ED 8330A034 -B8EE 8330A035 -B8EF 8330A036 -B8F0 8330A037 -B8F1 8330A038 -B8F2 8330A039 -B8F3 8330A130 -B8F4 8330A131 -B8F5 8330A132 -B8F6 8330A133 -B8F7 8330A134 -B8F8 8330A135 -B8F9 8330A136 -B8FA 8330A137 -B8FB 8330A138 -B8FC 8330A139 -B8FD 8330A230 -B8FE 8330A231 -B8FF 8330A232 -B900 8330A233 -B901 8330A234 -B902 8330A235 -B903 8330A236 -B904 8330A237 -B905 8330A238 -B906 8330A239 -B907 8330A330 -B908 8330A331 -B909 8330A332 -B90A 8330A333 -B90B 8330A334 -B90C 8330A335 -B90D 8330A336 -B90E 8330A337 -B90F 8330A338 -B910 8330A339 -B911 8330A430 -B912 8330A431 -B913 8330A432 -B914 8330A433 -B915 8330A434 -B916 8330A435 -B917 8330A436 -B918 8330A437 -B919 8330A438 -B91A 8330A439 -B91B 8330A530 -B91C 8330A531 -B91D 8330A532 -B91E 8330A533 -B91F 8330A534 -B920 8330A535 -B921 8330A536 -B922 8330A537 -B923 8330A538 -B924 8330A539 -B925 8330A630 -B926 8330A631 -B927 8330A632 -B928 8330A633 -B929 8330A634 -B92A 8330A635 -B92B 8330A636 -B92C 8330A637 -B92D 8330A638 -B92E 8330A639 -B92F 8330A730 -B930 8330A731 -B931 8330A732 -B932 8330A733 -B933 8330A734 -B934 8330A735 -B935 8330A736 -B936 8330A737 -B937 8330A738 -B938 8330A739 -B939 8330A830 -B93A 8330A831 -B93B 8330A832 -B93C 8330A833 -B93D 8330A834 -B93E 8330A835 -B93F 8330A836 -B940 8330A837 -B941 8330A838 -B942 8330A839 -B943 8330A930 -B944 8330A931 -B945 8330A932 -B946 8330A933 -B947 8330A934 -B948 8330A935 -B949 8330A936 -B94A 8330A937 -B94B 8330A938 -B94C 8330A939 -B94D 8330AA30 -B94E 8330AA31 -B94F 8330AA32 -B950 8330AA33 -B951 8330AA34 -B952 8330AA35 -B953 8330AA36 -B954 8330AA37 -B955 8330AA38 -B956 8330AA39 -B957 8330AB30 -B958 8330AB31 -B959 8330AB32 -B95A 8330AB33 -B95B 8330AB34 -B95C 8330AB35 -B95D 8330AB36 -B95E 8330AB37 -B95F 8330AB38 -B960 8330AB39 -B961 8330AC30 -B962 8330AC31 -B963 8330AC32 -B964 8330AC33 -B965 8330AC34 -B966 8330AC35 -B967 8330AC36 -B968 8330AC37 -B969 8330AC38 -B96A 8330AC39 -B96B 8330AD30 -B96C 8330AD31 -B96D 8330AD32 -B96E 8330AD33 -B96F 8330AD34 -B970 8330AD35 -B971 8330AD36 -B972 8330AD37 -B973 8330AD38 -B974 8330AD39 -B975 8330AE30 -B976 8330AE31 -B977 8330AE32 -B978 8330AE33 -B979 8330AE34 -B97A 8330AE35 -B97B 8330AE36 -B97C 8330AE37 -B97D 8330AE38 -B97E 8330AE39 -B97F 8330AF30 -B980 8330AF31 -B981 8330AF32 -B982 8330AF33 -B983 8330AF34 -B984 8330AF35 -B985 8330AF36 -B986 8330AF37 -B987 8330AF38 -B988 8330AF39 -B989 8330B030 -B98A 8330B031 -B98B 8330B032 -B98C 8330B033 -B98D 8330B034 -B98E 8330B035 -B98F 8330B036 -B990 8330B037 -B991 8330B038 -B992 8330B039 -B993 8330B130 -B994 8330B131 -B995 8330B132 -B996 8330B133 -B997 8330B134 -B998 8330B135 -B999 8330B136 -B99A 8330B137 -B99B 8330B138 -B99C 8330B139 -B99D 8330B230 -B99E 8330B231 -B99F 8330B232 -B9A0 8330B233 -B9A1 8330B234 -B9A2 8330B235 -B9A3 8330B236 -B9A4 8330B237 -B9A5 8330B238 -B9A6 8330B239 -B9A7 8330B330 -B9A8 8330B331 -B9A9 8330B332 -B9AA 8330B333 -B9AB 8330B334 -B9AC 8330B335 -B9AD 8330B336 -B9AE 8330B337 -B9AF 8330B338 -B9B0 8330B339 -B9B1 8330B430 -B9B2 8330B431 -B9B3 8330B432 -B9B4 8330B433 -B9B5 8330B434 -B9B6 8330B435 -B9B7 8330B436 -B9B8 8330B437 -B9B9 8330B438 -B9BA 8330B439 -B9BB 8330B530 -B9BC 8330B531 -B9BD 8330B532 -B9BE 8330B533 -B9BF 8330B534 -B9C0 8330B535 -B9C1 8330B536 -B9C2 8330B537 -B9C3 8330B538 -B9C4 8330B539 -B9C5 8330B630 -B9C6 8330B631 -B9C7 8330B632 -B9C8 8330B633 -B9C9 8330B634 -B9CA 8330B635 -B9CB 8330B636 -B9CC 8330B637 -B9CD 8330B638 -B9CE 8330B639 -B9CF 8330B730 -B9D0 8330B731 -B9D1 8330B732 -B9D2 8330B733 -B9D3 8330B734 -B9D4 8330B735 -B9D5 8330B736 -B9D6 8330B737 -B9D7 8330B738 -B9D8 8330B739 -B9D9 8330B830 -B9DA 8330B831 -B9DB 8330B832 -B9DC 8330B833 -B9DD 8330B834 -B9DE 8330B835 -B9DF 8330B836 -B9E0 8330B837 -B9E1 8330B838 -B9E2 8330B839 -B9E3 8330B930 -B9E4 8330B931 -B9E5 8330B932 -B9E6 8330B933 -B9E7 8330B934 -B9E8 8330B935 -B9E9 8330B936 -B9EA 8330B937 -B9EB 8330B938 -B9EC 8330B939 -B9ED 8330BA30 -B9EE 8330BA31 -B9EF 8330BA32 -B9F0 8330BA33 -B9F1 8330BA34 -B9F2 8330BA35 -B9F3 8330BA36 -B9F4 8330BA37 -B9F5 8330BA38 -B9F6 8330BA39 -B9F7 8330BB30 -B9F8 8330BB31 -B9F9 8330BB32 -B9FA 8330BB33 -B9FB 8330BB34 -B9FC 8330BB35 -B9FD 8330BB36 -B9FE 8330BB37 -B9FF 8330BB38 -BA00 8330BB39 -BA01 8330BC30 -BA02 8330BC31 -BA03 8330BC32 -BA04 8330BC33 -BA05 8330BC34 -BA06 8330BC35 -BA07 8330BC36 -BA08 8330BC37 -BA09 8330BC38 -BA0A 8330BC39 -BA0B 8330BD30 -BA0C 8330BD31 -BA0D 8330BD32 -BA0E 8330BD33 -BA0F 8330BD34 -BA10 8330BD35 -BA11 8330BD36 -BA12 8330BD37 -BA13 8330BD38 -BA14 8330BD39 -BA15 8330BE30 -BA16 8330BE31 -BA17 8330BE32 -BA18 8330BE33 -BA19 8330BE34 -BA1A 8330BE35 -BA1B 8330BE36 -BA1C 8330BE37 -BA1D 8330BE38 -BA1E 8330BE39 -BA1F 8330BF30 -BA20 8330BF31 -BA21 8330BF32 -BA22 8330BF33 -BA23 8330BF34 -BA24 8330BF35 -BA25 8330BF36 -BA26 8330BF37 -BA27 8330BF38 -BA28 8330BF39 -BA29 8330C030 -BA2A 8330C031 -BA2B 8330C032 -BA2C 8330C033 -BA2D 8330C034 -BA2E 8330C035 -BA2F 8330C036 -BA30 8330C037 -BA31 8330C038 -BA32 8330C039 -BA33 8330C130 -BA34 8330C131 -BA35 8330C132 -BA36 8330C133 -BA37 8330C134 -BA38 8330C135 -BA39 8330C136 -BA3A 8330C137 -BA3B 8330C138 -BA3C 8330C139 -BA3D 8330C230 -BA3E 8330C231 -BA3F 8330C232 -BA40 8330C233 -BA41 8330C234 -BA42 8330C235 -BA43 8330C236 -BA44 8330C237 -BA45 8330C238 -BA46 8330C239 -BA47 8330C330 -BA48 8330C331 -BA49 8330C332 -BA4A 8330C333 -BA4B 8330C334 -BA4C 8330C335 -BA4D 8330C336 -BA4E 8330C337 -BA4F 8330C338 -BA50 8330C339 -BA51 8330C430 -BA52 8330C431 -BA53 8330C432 -BA54 8330C433 -BA55 8330C434 -BA56 8330C435 -BA57 8330C436 -BA58 8330C437 -BA59 8330C438 -BA5A 8330C439 -BA5B 8330C530 -BA5C 8330C531 -BA5D 8330C532 -BA5E 8330C533 -BA5F 8330C534 -BA60 8330C535 -BA61 8330C536 -BA62 8330C537 -BA63 8330C538 -BA64 8330C539 -BA65 8330C630 -BA66 8330C631 -BA67 8330C632 -BA68 8330C633 -BA69 8330C634 -BA6A 8330C635 -BA6B 8330C636 -BA6C 8330C637 -BA6D 8330C638 -BA6E 8330C639 -BA6F 8330C730 -BA70 8330C731 -BA71 8330C732 -BA72 8330C733 -BA73 8330C734 -BA74 8330C735 -BA75 8330C736 -BA76 8330C737 -BA77 8330C738 -BA78 8330C739 -BA79 8330C830 -BA7A 8330C831 -BA7B 8330C832 -BA7C 8330C833 -BA7D 8330C834 -BA7E 8330C835 -BA7F 8330C836 -BA80 8330C837 -BA81 8330C838 -BA82 8330C839 -BA83 8330C930 -BA84 8330C931 -BA85 8330C932 -BA86 8330C933 -BA87 8330C934 -BA88 8330C935 -BA89 8330C936 -BA8A 8330C937 -BA8B 8330C938 -BA8C 8330C939 -BA8D 8330CA30 -BA8E 8330CA31 -BA8F 8330CA32 -BA90 8330CA33 -BA91 8330CA34 -BA92 8330CA35 -BA93 8330CA36 -BA94 8330CA37 -BA95 8330CA38 -BA96 8330CA39 -BA97 8330CB30 -BA98 8330CB31 -BA99 8330CB32 -BA9A 8330CB33 -BA9B 8330CB34 -BA9C 8330CB35 -BA9D 8330CB36 -BA9E 8330CB37 -BA9F 8330CB38 -BAA0 8330CB39 -BAA1 8330CC30 -BAA2 8330CC31 -BAA3 8330CC32 -BAA4 8330CC33 -BAA5 8330CC34 -BAA6 8330CC35 -BAA7 8330CC36 -BAA8 8330CC37 -BAA9 8330CC38 -BAAA 8330CC39 -BAAB 8330CD30 -BAAC 8330CD31 -BAAD 8330CD32 -BAAE 8330CD33 -BAAF 8330CD34 -BAB0 8330CD35 -BAB1 8330CD36 -BAB2 8330CD37 -BAB3 8330CD38 -BAB4 8330CD39 -BAB5 8330CE30 -BAB6 8330CE31 -BAB7 8330CE32 -BAB8 8330CE33 -BAB9 8330CE34 -BABA 8330CE35 -BABB 8330CE36 -BABC 8330CE37 -BABD 8330CE38 -BABE 8330CE39 -BABF 8330CF30 -BAC0 8330CF31 -BAC1 8330CF32 -BAC2 8330CF33 -BAC3 8330CF34 -BAC4 8330CF35 -BAC5 8330CF36 -BAC6 8330CF37 -BAC7 8330CF38 -BAC8 8330CF39 -BAC9 8330D030 -BACA 8330D031 -BACB 8330D032 -BACC 8330D033 -BACD 8330D034 -BACE 8330D035 -BACF 8330D036 -BAD0 8330D037 -BAD1 8330D038 -BAD2 8330D039 -BAD3 8330D130 -BAD4 8330D131 -BAD5 8330D132 -BAD6 8330D133 -BAD7 8330D134 -BAD8 8330D135 -BAD9 8330D136 -BADA 8330D137 -BADB 8330D138 -BADC 8330D139 -BADD 8330D230 -BADE 8330D231 -BADF 8330D232 -BAE0 8330D233 -BAE1 8330D234 -BAE2 8330D235 -BAE3 8330D236 -BAE4 8330D237 -BAE5 8330D238 -BAE6 8330D239 -BAE7 8330D330 -BAE8 8330D331 -BAE9 8330D332 -BAEA 8330D333 -BAEB 8330D334 -BAEC 8330D335 -BAED 8330D336 -BAEE 8330D337 -BAEF 8330D338 -BAF0 8330D339 -BAF1 8330D430 -BAF2 8330D431 -BAF3 8330D432 -BAF4 8330D433 -BAF5 8330D434 -BAF6 8330D435 -BAF7 8330D436 -BAF8 8330D437 -BAF9 8330D438 -BAFA 8330D439 -BAFB 8330D530 -BAFC 8330D531 -BAFD 8330D532 -BAFE 8330D533 -BAFF 8330D534 -BB00 8330D535 -BB01 8330D536 -BB02 8330D537 -BB03 8330D538 -BB04 8330D539 -BB05 8330D630 -BB06 8330D631 -BB07 8330D632 -BB08 8330D633 -BB09 8330D634 -BB0A 8330D635 -BB0B 8330D636 -BB0C 8330D637 -BB0D 8330D638 -BB0E 8330D639 -BB0F 8330D730 -BB10 8330D731 -BB11 8330D732 -BB12 8330D733 -BB13 8330D734 -BB14 8330D735 -BB15 8330D736 -BB16 8330D737 -BB17 8330D738 -BB18 8330D739 -BB19 8330D830 -BB1A 8330D831 -BB1B 8330D832 -BB1C 8330D833 -BB1D 8330D834 -BB1E 8330D835 -BB1F 8330D836 -BB20 8330D837 -BB21 8330D838 -BB22 8330D839 -BB23 8330D930 -BB24 8330D931 -BB25 8330D932 -BB26 8330D933 -BB27 8330D934 -BB28 8330D935 -BB29 8330D936 -BB2A 8330D937 -BB2B 8330D938 -BB2C 8330D939 -BB2D 8330DA30 -BB2E 8330DA31 -BB2F 8330DA32 -BB30 8330DA33 -BB31 8330DA34 -BB32 8330DA35 -BB33 8330DA36 -BB34 8330DA37 -BB35 8330DA38 -BB36 8330DA39 -BB37 8330DB30 -BB38 8330DB31 -BB39 8330DB32 -BB3A 8330DB33 -BB3B 8330DB34 -BB3C 8330DB35 -BB3D 8330DB36 -BB3E 8330DB37 -BB3F 8330DB38 -BB40 8330DB39 -BB41 8330DC30 -BB42 8330DC31 -BB43 8330DC32 -BB44 8330DC33 -BB45 8330DC34 -BB46 8330DC35 -BB47 8330DC36 -BB48 8330DC37 -BB49 8330DC38 -BB4A 8330DC39 -BB4B 8330DD30 -BB4C 8330DD31 -BB4D 8330DD32 -BB4E 8330DD33 -BB4F 8330DD34 -BB50 8330DD35 -BB51 8330DD36 -BB52 8330DD37 -BB53 8330DD38 -BB54 8330DD39 -BB55 8330DE30 -BB56 8330DE31 -BB57 8330DE32 -BB58 8330DE33 -BB59 8330DE34 -BB5A 8330DE35 -BB5B 8330DE36 -BB5C 8330DE37 -BB5D 8330DE38 -BB5E 8330DE39 -BB5F 8330DF30 -BB60 8330DF31 -BB61 8330DF32 -BB62 8330DF33 -BB63 8330DF34 -BB64 8330DF35 -BB65 8330DF36 -BB66 8330DF37 -BB67 8330DF38 -BB68 8330DF39 -BB69 8330E030 -BB6A 8330E031 -BB6B 8330E032 -BB6C 8330E033 -BB6D 8330E034 -BB6E 8330E035 -BB6F 8330E036 -BB70 8330E037 -BB71 8330E038 -BB72 8330E039 -BB73 8330E130 -BB74 8330E131 -BB75 8330E132 -BB76 8330E133 -BB77 8330E134 -BB78 8330E135 -BB79 8330E136 -BB7A 8330E137 -BB7B 8330E138 -BB7C 8330E139 -BB7D 8330E230 -BB7E 8330E231 -BB7F 8330E232 -BB80 8330E233 -BB81 8330E234 -BB82 8330E235 -BB83 8330E236 -BB84 8330E237 -BB85 8330E238 -BB86 8330E239 -BB87 8330E330 -BB88 8330E331 -BB89 8330E332 -BB8A 8330E333 -BB8B 8330E334 -BB8C 8330E335 -BB8D 8330E336 -BB8E 8330E337 -BB8F 8330E338 -BB90 8330E339 -BB91 8330E430 -BB92 8330E431 -BB93 8330E432 -BB94 8330E433 -BB95 8330E434 -BB96 8330E435 -BB97 8330E436 -BB98 8330E437 -BB99 8330E438 -BB9A 8330E439 -BB9B 8330E530 -BB9C 8330E531 -BB9D 8330E532 -BB9E 8330E533 -BB9F 8330E534 -BBA0 8330E535 -BBA1 8330E536 -BBA2 8330E537 -BBA3 8330E538 -BBA4 8330E539 -BBA5 8330E630 -BBA6 8330E631 -BBA7 8330E632 -BBA8 8330E633 -BBA9 8330E634 -BBAA 8330E635 -BBAB 8330E636 -BBAC 8330E637 -BBAD 8330E638 -BBAE 8330E639 -BBAF 8330E730 -BBB0 8330E731 -BBB1 8330E732 -BBB2 8330E733 -BBB3 8330E734 -BBB4 8330E735 -BBB5 8330E736 -BBB6 8330E737 -BBB7 8330E738 -BBB8 8330E739 -BBB9 8330E830 -BBBA 8330E831 -BBBB 8330E832 -BBBC 8330E833 -BBBD 8330E834 -BBBE 8330E835 -BBBF 8330E836 -BBC0 8330E837 -BBC1 8330E838 -BBC2 8330E839 -BBC3 8330E930 -BBC4 8330E931 -BBC5 8330E932 -BBC6 8330E933 -BBC7 8330E934 -BBC8 8330E935 -BBC9 8330E936 -BBCA 8330E937 -BBCB 8330E938 -BBCC 8330E939 -BBCD 8330EA30 -BBCE 8330EA31 -BBCF 8330EA32 -BBD0 8330EA33 -BBD1 8330EA34 -BBD2 8330EA35 -BBD3 8330EA36 -BBD4 8330EA37 -BBD5 8330EA38 -BBD6 8330EA39 -BBD7 8330EB30 -BBD8 8330EB31 -BBD9 8330EB32 -BBDA 8330EB33 -BBDB 8330EB34 -BBDC 8330EB35 -BBDD 8330EB36 -BBDE 8330EB37 -BBDF 8330EB38 -BBE0 8330EB39 -BBE1 8330EC30 -BBE2 8330EC31 -BBE3 8330EC32 -BBE4 8330EC33 -BBE5 8330EC34 -BBE6 8330EC35 -BBE7 8330EC36 -BBE8 8330EC37 -BBE9 8330EC38 -BBEA 8330EC39 -BBEB 8330ED30 -BBEC 8330ED31 -BBED 8330ED32 -BBEE 8330ED33 -BBEF 8330ED34 -BBF0 8330ED35 -BBF1 8330ED36 -BBF2 8330ED37 -BBF3 8330ED38 -BBF4 8330ED39 -BBF5 8330EE30 -BBF6 8330EE31 -BBF7 8330EE32 -BBF8 8330EE33 -BBF9 8330EE34 -BBFA 8330EE35 -BBFB 8330EE36 -BBFC 8330EE37 -BBFD 8330EE38 -BBFE 8330EE39 -BBFF 8330EF30 -BC00 8330EF31 -BC01 8330EF32 -BC02 8330EF33 -BC03 8330EF34 -BC04 8330EF35 -BC05 8330EF36 -BC06 8330EF37 -BC07 8330EF38 -BC08 8330EF39 -BC09 8330F030 -BC0A 8330F031 -BC0B 8330F032 -BC0C 8330F033 -BC0D 8330F034 -BC0E 8330F035 -BC0F 8330F036 -BC10 8330F037 -BC11 8330F038 -BC12 8330F039 -BC13 8330F130 -BC14 8330F131 -BC15 8330F132 -BC16 8330F133 -BC17 8330F134 -BC18 8330F135 -BC19 8330F136 -BC1A 8330F137 -BC1B 8330F138 -BC1C 8330F139 -BC1D 8330F230 -BC1E 8330F231 -BC1F 8330F232 -BC20 8330F233 -BC21 8330F234 -BC22 8330F235 -BC23 8330F236 -BC24 8330F237 -BC25 8330F238 -BC26 8330F239 -BC27 8330F330 -BC28 8330F331 -BC29 8330F332 -BC2A 8330F333 -BC2B 8330F334 -BC2C 8330F335 -BC2D 8330F336 -BC2E 8330F337 -BC2F 8330F338 -BC30 8330F339 -BC31 8330F430 -BC32 8330F431 -BC33 8330F432 -BC34 8330F433 -BC35 8330F434 -BC36 8330F435 -BC37 8330F436 -BC38 8330F437 -BC39 8330F438 -BC3A 8330F439 -BC3B 8330F530 -BC3C 8330F531 -BC3D 8330F532 -BC3E 8330F533 -BC3F 8330F534 -BC40 8330F535 -BC41 8330F536 -BC42 8330F537 -BC43 8330F538 -BC44 8330F539 -BC45 8330F630 -BC46 8330F631 -BC47 8330F632 -BC48 8330F633 -BC49 8330F634 -BC4A 8330F635 -BC4B 8330F636 -BC4C 8330F637 -BC4D 8330F638 -BC4E 8330F639 -BC4F 8330F730 -BC50 8330F731 -BC51 8330F732 -BC52 8330F733 -BC53 8330F734 -BC54 8330F735 -BC55 8330F736 -BC56 8330F737 -BC57 8330F738 -BC58 8330F739 -BC59 8330F830 -BC5A 8330F831 -BC5B 8330F832 -BC5C 8330F833 -BC5D 8330F834 -BC5E 8330F835 -BC5F 8330F836 -BC60 8330F837 -BC61 8330F838 -BC62 8330F839 -BC63 8330F930 -BC64 8330F931 -BC65 8330F932 -BC66 8330F933 -BC67 8330F934 -BC68 8330F935 -BC69 8330F936 -BC6A 8330F937 -BC6B 8330F938 -BC6C 8330F939 -BC6D 8330FA30 -BC6E 8330FA31 -BC6F 8330FA32 -BC70 8330FA33 -BC71 8330FA34 -BC72 8330FA35 -BC73 8330FA36 -BC74 8330FA37 -BC75 8330FA38 -BC76 8330FA39 -BC77 8330FB30 -BC78 8330FB31 -BC79 8330FB32 -BC7A 8330FB33 -BC7B 8330FB34 -BC7C 8330FB35 -BC7D 8330FB36 -BC7E 8330FB37 -BC7F 8330FB38 -BC80 8330FB39 -BC81 8330FC30 -BC82 8330FC31 -BC83 8330FC32 -BC84 8330FC33 -BC85 8330FC34 -BC86 8330FC35 -BC87 8330FC36 -BC88 8330FC37 -BC89 8330FC38 -BC8A 8330FC39 -BC8B 8330FD30 -BC8C 8330FD31 -BC8D 8330FD32 -BC8E 8330FD33 -BC8F 8330FD34 -BC90 8330FD35 -BC91 8330FD36 -BC92 8330FD37 -BC93 8330FD38 -BC94 8330FD39 -BC95 8330FE30 -BC96 8330FE31 -BC97 8330FE32 -BC98 8330FE33 -BC99 8330FE34 -BC9A 8330FE35 -BC9B 8330FE36 -BC9C 8330FE37 -BC9D 8330FE38 -BC9E 8330FE39 -BC9F 83318130 -BCA0 83318131 -BCA1 83318132 -BCA2 83318133 -BCA3 83318134 -BCA4 83318135 -BCA5 83318136 -BCA6 83318137 -BCA7 83318138 -BCA8 83318139 -BCA9 83318230 -BCAA 83318231 -BCAB 83318232 -BCAC 83318233 -BCAD 83318234 -BCAE 83318235 -BCAF 83318236 -BCB0 83318237 -BCB1 83318238 -BCB2 83318239 -BCB3 83318330 -BCB4 83318331 -BCB5 83318332 -BCB6 83318333 -BCB7 83318334 -BCB8 83318335 -BCB9 83318336 -BCBA 83318337 -BCBB 83318338 -BCBC 83318339 -BCBD 83318430 -BCBE 83318431 -BCBF 83318432 -BCC0 83318433 -BCC1 83318434 -BCC2 83318435 -BCC3 83318436 -BCC4 83318437 -BCC5 83318438 -BCC6 83318439 -BCC7 83318530 -BCC8 83318531 -BCC9 83318532 -BCCA 83318533 -BCCB 83318534 -BCCC 83318535 -BCCD 83318536 -BCCE 83318537 -BCCF 83318538 -BCD0 83318539 -BCD1 83318630 -BCD2 83318631 -BCD3 83318632 -BCD4 83318633 -BCD5 83318634 -BCD6 83318635 -BCD7 83318636 -BCD8 83318637 -BCD9 83318638 -BCDA 83318639 -BCDB 83318730 -BCDC 83318731 -BCDD 83318732 -BCDE 83318733 -BCDF 83318734 -BCE0 83318735 -BCE1 83318736 -BCE2 83318737 -BCE3 83318738 -BCE4 83318739 -BCE5 83318830 -BCE6 83318831 -BCE7 83318832 -BCE8 83318833 -BCE9 83318834 -BCEA 83318835 -BCEB 83318836 -BCEC 83318837 -BCED 83318838 -BCEE 83318839 -BCEF 83318930 -BCF0 83318931 -BCF1 83318932 -BCF2 83318933 -BCF3 83318934 -BCF4 83318935 -BCF5 83318936 -BCF6 83318937 -BCF7 83318938 -BCF8 83318939 -BCF9 83318A30 -BCFA 83318A31 -BCFB 83318A32 -BCFC 83318A33 -BCFD 83318A34 -BCFE 83318A35 -BCFF 83318A36 -BD00 83318A37 -BD01 83318A38 -BD02 83318A39 -BD03 83318B30 -BD04 83318B31 -BD05 83318B32 -BD06 83318B33 -BD07 83318B34 -BD08 83318B35 -BD09 83318B36 -BD0A 83318B37 -BD0B 83318B38 -BD0C 83318B39 -BD0D 83318C30 -BD0E 83318C31 -BD0F 83318C32 -BD10 83318C33 -BD11 83318C34 -BD12 83318C35 -BD13 83318C36 -BD14 83318C37 -BD15 83318C38 -BD16 83318C39 -BD17 83318D30 -BD18 83318D31 -BD19 83318D32 -BD1A 83318D33 -BD1B 83318D34 -BD1C 83318D35 -BD1D 83318D36 -BD1E 83318D37 -BD1F 83318D38 -BD20 83318D39 -BD21 83318E30 -BD22 83318E31 -BD23 83318E32 -BD24 83318E33 -BD25 83318E34 -BD26 83318E35 -BD27 83318E36 -BD28 83318E37 -BD29 83318E38 -BD2A 83318E39 -BD2B 83318F30 -BD2C 83318F31 -BD2D 83318F32 -BD2E 83318F33 -BD2F 83318F34 -BD30 83318F35 -BD31 83318F36 -BD32 83318F37 -BD33 83318F38 -BD34 83318F39 -BD35 83319030 -BD36 83319031 -BD37 83319032 -BD38 83319033 -BD39 83319034 -BD3A 83319035 -BD3B 83319036 -BD3C 83319037 -BD3D 83319038 -BD3E 83319039 -BD3F 83319130 -BD40 83319131 -BD41 83319132 -BD42 83319133 -BD43 83319134 -BD44 83319135 -BD45 83319136 -BD46 83319137 -BD47 83319138 -BD48 83319139 -BD49 83319230 -BD4A 83319231 -BD4B 83319232 -BD4C 83319233 -BD4D 83319234 -BD4E 83319235 -BD4F 83319236 -BD50 83319237 -BD51 83319238 -BD52 83319239 -BD53 83319330 -BD54 83319331 -BD55 83319332 -BD56 83319333 -BD57 83319334 -BD58 83319335 -BD59 83319336 -BD5A 83319337 -BD5B 83319338 -BD5C 83319339 -BD5D 83319430 -BD5E 83319431 -BD5F 83319432 -BD60 83319433 -BD61 83319434 -BD62 83319435 -BD63 83319436 -BD64 83319437 -BD65 83319438 -BD66 83319439 -BD67 83319530 -BD68 83319531 -BD69 83319532 -BD6A 83319533 -BD6B 83319534 -BD6C 83319535 -BD6D 83319536 -BD6E 83319537 -BD6F 83319538 -BD70 83319539 -BD71 83319630 -BD72 83319631 -BD73 83319632 -BD74 83319633 -BD75 83319634 -BD76 83319635 -BD77 83319636 -BD78 83319637 -BD79 83319638 -BD7A 83319639 -BD7B 83319730 -BD7C 83319731 -BD7D 83319732 -BD7E 83319733 -BD7F 83319734 -BD80 83319735 -BD81 83319736 -BD82 83319737 -BD83 83319738 -BD84 83319739 -BD85 83319830 -BD86 83319831 -BD87 83319832 -BD88 83319833 -BD89 83319834 -BD8A 83319835 -BD8B 83319836 -BD8C 83319837 -BD8D 83319838 -BD8E 83319839 -BD8F 83319930 -BD90 83319931 -BD91 83319932 -BD92 83319933 -BD93 83319934 -BD94 83319935 -BD95 83319936 -BD96 83319937 -BD97 83319938 -BD98 83319939 -BD99 83319A30 -BD9A 83319A31 -BD9B 83319A32 -BD9C 83319A33 -BD9D 83319A34 -BD9E 83319A35 -BD9F 83319A36 -BDA0 83319A37 -BDA1 83319A38 -BDA2 83319A39 -BDA3 83319B30 -BDA4 83319B31 -BDA5 83319B32 -BDA6 83319B33 -BDA7 83319B34 -BDA8 83319B35 -BDA9 83319B36 -BDAA 83319B37 -BDAB 83319B38 -BDAC 83319B39 -BDAD 83319C30 -BDAE 83319C31 -BDAF 83319C32 -BDB0 83319C33 -BDB1 83319C34 -BDB2 83319C35 -BDB3 83319C36 -BDB4 83319C37 -BDB5 83319C38 -BDB6 83319C39 -BDB7 83319D30 -BDB8 83319D31 -BDB9 83319D32 -BDBA 83319D33 -BDBB 83319D34 -BDBC 83319D35 -BDBD 83319D36 -BDBE 83319D37 -BDBF 83319D38 -BDC0 83319D39 -BDC1 83319E30 -BDC2 83319E31 -BDC3 83319E32 -BDC4 83319E33 -BDC5 83319E34 -BDC6 83319E35 -BDC7 83319E36 -BDC8 83319E37 -BDC9 83319E38 -BDCA 83319E39 -BDCB 83319F30 -BDCC 83319F31 -BDCD 83319F32 -BDCE 83319F33 -BDCF 83319F34 -BDD0 83319F35 -BDD1 83319F36 -BDD2 83319F37 -BDD3 83319F38 -BDD4 83319F39 -BDD5 8331A030 -BDD6 8331A031 -BDD7 8331A032 -BDD8 8331A033 -BDD9 8331A034 -BDDA 8331A035 -BDDB 8331A036 -BDDC 8331A037 -BDDD 8331A038 -BDDE 8331A039 -BDDF 8331A130 -BDE0 8331A131 -BDE1 8331A132 -BDE2 8331A133 -BDE3 8331A134 -BDE4 8331A135 -BDE5 8331A136 -BDE6 8331A137 -BDE7 8331A138 -BDE8 8331A139 -BDE9 8331A230 -BDEA 8331A231 -BDEB 8331A232 -BDEC 8331A233 -BDED 8331A234 -BDEE 8331A235 -BDEF 8331A236 -BDF0 8331A237 -BDF1 8331A238 -BDF2 8331A239 -BDF3 8331A330 -BDF4 8331A331 -BDF5 8331A332 -BDF6 8331A333 -BDF7 8331A334 -BDF8 8331A335 -BDF9 8331A336 -BDFA 8331A337 -BDFB 8331A338 -BDFC 8331A339 -BDFD 8331A430 -BDFE 8331A431 -BDFF 8331A432 -BE00 8331A433 -BE01 8331A434 -BE02 8331A435 -BE03 8331A436 -BE04 8331A437 -BE05 8331A438 -BE06 8331A439 -BE07 8331A530 -BE08 8331A531 -BE09 8331A532 -BE0A 8331A533 -BE0B 8331A534 -BE0C 8331A535 -BE0D 8331A536 -BE0E 8331A537 -BE0F 8331A538 -BE10 8331A539 -BE11 8331A630 -BE12 8331A631 -BE13 8331A632 -BE14 8331A633 -BE15 8331A634 -BE16 8331A635 -BE17 8331A636 -BE18 8331A637 -BE19 8331A638 -BE1A 8331A639 -BE1B 8331A730 -BE1C 8331A731 -BE1D 8331A732 -BE1E 8331A733 -BE1F 8331A734 -BE20 8331A735 -BE21 8331A736 -BE22 8331A737 -BE23 8331A738 -BE24 8331A739 -BE25 8331A830 -BE26 8331A831 -BE27 8331A832 -BE28 8331A833 -BE29 8331A834 -BE2A 8331A835 -BE2B 8331A836 -BE2C 8331A837 -BE2D 8331A838 -BE2E 8331A839 -BE2F 8331A930 -BE30 8331A931 -BE31 8331A932 -BE32 8331A933 -BE33 8331A934 -BE34 8331A935 -BE35 8331A936 -BE36 8331A937 -BE37 8331A938 -BE38 8331A939 -BE39 8331AA30 -BE3A 8331AA31 -BE3B 8331AA32 -BE3C 8331AA33 -BE3D 8331AA34 -BE3E 8331AA35 -BE3F 8331AA36 -BE40 8331AA37 -BE41 8331AA38 -BE42 8331AA39 -BE43 8331AB30 -BE44 8331AB31 -BE45 8331AB32 -BE46 8331AB33 -BE47 8331AB34 -BE48 8331AB35 -BE49 8331AB36 -BE4A 8331AB37 -BE4B 8331AB38 -BE4C 8331AB39 -BE4D 8331AC30 -BE4E 8331AC31 -BE4F 8331AC32 -BE50 8331AC33 -BE51 8331AC34 -BE52 8331AC35 -BE53 8331AC36 -BE54 8331AC37 -BE55 8331AC38 -BE56 8331AC39 -BE57 8331AD30 -BE58 8331AD31 -BE59 8331AD32 -BE5A 8331AD33 -BE5B 8331AD34 -BE5C 8331AD35 -BE5D 8331AD36 -BE5E 8331AD37 -BE5F 8331AD38 -BE60 8331AD39 -BE61 8331AE30 -BE62 8331AE31 -BE63 8331AE32 -BE64 8331AE33 -BE65 8331AE34 -BE66 8331AE35 -BE67 8331AE36 -BE68 8331AE37 -BE69 8331AE38 -BE6A 8331AE39 -BE6B 8331AF30 -BE6C 8331AF31 -BE6D 8331AF32 -BE6E 8331AF33 -BE6F 8331AF34 -BE70 8331AF35 -BE71 8331AF36 -BE72 8331AF37 -BE73 8331AF38 -BE74 8331AF39 -BE75 8331B030 -BE76 8331B031 -BE77 8331B032 -BE78 8331B033 -BE79 8331B034 -BE7A 8331B035 -BE7B 8331B036 -BE7C 8331B037 -BE7D 8331B038 -BE7E 8331B039 -BE7F 8331B130 -BE80 8331B131 -BE81 8331B132 -BE82 8331B133 -BE83 8331B134 -BE84 8331B135 -BE85 8331B136 -BE86 8331B137 -BE87 8331B138 -BE88 8331B139 -BE89 8331B230 -BE8A 8331B231 -BE8B 8331B232 -BE8C 8331B233 -BE8D 8331B234 -BE8E 8331B235 -BE8F 8331B236 -BE90 8331B237 -BE91 8331B238 -BE92 8331B239 -BE93 8331B330 -BE94 8331B331 -BE95 8331B332 -BE96 8331B333 -BE97 8331B334 -BE98 8331B335 -BE99 8331B336 -BE9A 8331B337 -BE9B 8331B338 -BE9C 8331B339 -BE9D 8331B430 -BE9E 8331B431 -BE9F 8331B432 -BEA0 8331B433 -BEA1 8331B434 -BEA2 8331B435 -BEA3 8331B436 -BEA4 8331B437 -BEA5 8331B438 -BEA6 8331B439 -BEA7 8331B530 -BEA8 8331B531 -BEA9 8331B532 -BEAA 8331B533 -BEAB 8331B534 -BEAC 8331B535 -BEAD 8331B536 -BEAE 8331B537 -BEAF 8331B538 -BEB0 8331B539 -BEB1 8331B630 -BEB2 8331B631 -BEB3 8331B632 -BEB4 8331B633 -BEB5 8331B634 -BEB6 8331B635 -BEB7 8331B636 -BEB8 8331B637 -BEB9 8331B638 -BEBA 8331B639 -BEBB 8331B730 -BEBC 8331B731 -BEBD 8331B732 -BEBE 8331B733 -BEBF 8331B734 -BEC0 8331B735 -BEC1 8331B736 -BEC2 8331B737 -BEC3 8331B738 -BEC4 8331B739 -BEC5 8331B830 -BEC6 8331B831 -BEC7 8331B832 -BEC8 8331B833 -BEC9 8331B834 -BECA 8331B835 -BECB 8331B836 -BECC 8331B837 -BECD 8331B838 -BECE 8331B839 -BECF 8331B930 -BED0 8331B931 -BED1 8331B932 -BED2 8331B933 -BED3 8331B934 -BED4 8331B935 -BED5 8331B936 -BED6 8331B937 -BED7 8331B938 -BED8 8331B939 -BED9 8331BA30 -BEDA 8331BA31 -BEDB 8331BA32 -BEDC 8331BA33 -BEDD 8331BA34 -BEDE 8331BA35 -BEDF 8331BA36 -BEE0 8331BA37 -BEE1 8331BA38 -BEE2 8331BA39 -BEE3 8331BB30 -BEE4 8331BB31 -BEE5 8331BB32 -BEE6 8331BB33 -BEE7 8331BB34 -BEE8 8331BB35 -BEE9 8331BB36 -BEEA 8331BB37 -BEEB 8331BB38 -BEEC 8331BB39 -BEED 8331BC30 -BEEE 8331BC31 -BEEF 8331BC32 -BEF0 8331BC33 -BEF1 8331BC34 -BEF2 8331BC35 -BEF3 8331BC36 -BEF4 8331BC37 -BEF5 8331BC38 -BEF6 8331BC39 -BEF7 8331BD30 -BEF8 8331BD31 -BEF9 8331BD32 -BEFA 8331BD33 -BEFB 8331BD34 -BEFC 8331BD35 -BEFD 8331BD36 -BEFE 8331BD37 -BEFF 8331BD38 -BF00 8331BD39 -BF01 8331BE30 -BF02 8331BE31 -BF03 8331BE32 -BF04 8331BE33 -BF05 8331BE34 -BF06 8331BE35 -BF07 8331BE36 -BF08 8331BE37 -BF09 8331BE38 -BF0A 8331BE39 -BF0B 8331BF30 -BF0C 8331BF31 -BF0D 8331BF32 -BF0E 8331BF33 -BF0F 8331BF34 -BF10 8331BF35 -BF11 8331BF36 -BF12 8331BF37 -BF13 8331BF38 -BF14 8331BF39 -BF15 8331C030 -BF16 8331C031 -BF17 8331C032 -BF18 8331C033 -BF19 8331C034 -BF1A 8331C035 -BF1B 8331C036 -BF1C 8331C037 -BF1D 8331C038 -BF1E 8331C039 -BF1F 8331C130 -BF20 8331C131 -BF21 8331C132 -BF22 8331C133 -BF23 8331C134 -BF24 8331C135 -BF25 8331C136 -BF26 8331C137 -BF27 8331C138 -BF28 8331C139 -BF29 8331C230 -BF2A 8331C231 -BF2B 8331C232 -BF2C 8331C233 -BF2D 8331C234 -BF2E 8331C235 -BF2F 8331C236 -BF30 8331C237 -BF31 8331C238 -BF32 8331C239 -BF33 8331C330 -BF34 8331C331 -BF35 8331C332 -BF36 8331C333 -BF37 8331C334 -BF38 8331C335 -BF39 8331C336 -BF3A 8331C337 -BF3B 8331C338 -BF3C 8331C339 -BF3D 8331C430 -BF3E 8331C431 -BF3F 8331C432 -BF40 8331C433 -BF41 8331C434 -BF42 8331C435 -BF43 8331C436 -BF44 8331C437 -BF45 8331C438 -BF46 8331C439 -BF47 8331C530 -BF48 8331C531 -BF49 8331C532 -BF4A 8331C533 -BF4B 8331C534 -BF4C 8331C535 -BF4D 8331C536 -BF4E 8331C537 -BF4F 8331C538 -BF50 8331C539 -BF51 8331C630 -BF52 8331C631 -BF53 8331C632 -BF54 8331C633 -BF55 8331C634 -BF56 8331C635 -BF57 8331C636 -BF58 8331C637 -BF59 8331C638 -BF5A 8331C639 -BF5B 8331C730 -BF5C 8331C731 -BF5D 8331C732 -BF5E 8331C733 -BF5F 8331C734 -BF60 8331C735 -BF61 8331C736 -BF62 8331C737 -BF63 8331C738 -BF64 8331C739 -BF65 8331C830 -BF66 8331C831 -BF67 8331C832 -BF68 8331C833 -BF69 8331C834 -BF6A 8331C835 -BF6B 8331C836 -BF6C 8331C837 -BF6D 8331C838 -BF6E 8331C839 -BF6F 8331C930 -BF70 8331C931 -BF71 8331C932 -BF72 8331C933 -BF73 8331C934 -BF74 8331C935 -BF75 8331C936 -BF76 8331C937 -BF77 8331C938 -BF78 8331C939 -BF79 8331CA30 -BF7A 8331CA31 -BF7B 8331CA32 -BF7C 8331CA33 -BF7D 8331CA34 -BF7E 8331CA35 -BF7F 8331CA36 -BF80 8331CA37 -BF81 8331CA38 -BF82 8331CA39 -BF83 8331CB30 -BF84 8331CB31 -BF85 8331CB32 -BF86 8331CB33 -BF87 8331CB34 -BF88 8331CB35 -BF89 8331CB36 -BF8A 8331CB37 -BF8B 8331CB38 -BF8C 8331CB39 -BF8D 8331CC30 -BF8E 8331CC31 -BF8F 8331CC32 -BF90 8331CC33 -BF91 8331CC34 -BF92 8331CC35 -BF93 8331CC36 -BF94 8331CC37 -BF95 8331CC38 -BF96 8331CC39 -BF97 8331CD30 -BF98 8331CD31 -BF99 8331CD32 -BF9A 8331CD33 -BF9B 8331CD34 -BF9C 8331CD35 -BF9D 8331CD36 -BF9E 8331CD37 -BF9F 8331CD38 -BFA0 8331CD39 -BFA1 8331CE30 -BFA2 8331CE31 -BFA3 8331CE32 -BFA4 8331CE33 -BFA5 8331CE34 -BFA6 8331CE35 -BFA7 8331CE36 -BFA8 8331CE37 -BFA9 8331CE38 -BFAA 8331CE39 -BFAB 8331CF30 -BFAC 8331CF31 -BFAD 8331CF32 -BFAE 8331CF33 -BFAF 8331CF34 -BFB0 8331CF35 -BFB1 8331CF36 -BFB2 8331CF37 -BFB3 8331CF38 -BFB4 8331CF39 -BFB5 8331D030 -BFB6 8331D031 -BFB7 8331D032 -BFB8 8331D033 -BFB9 8331D034 -BFBA 8331D035 -BFBB 8331D036 -BFBC 8331D037 -BFBD 8331D038 -BFBE 8331D039 -BFBF 8331D130 -BFC0 8331D131 -BFC1 8331D132 -BFC2 8331D133 -BFC3 8331D134 -BFC4 8331D135 -BFC5 8331D136 -BFC6 8331D137 -BFC7 8331D138 -BFC8 8331D139 -BFC9 8331D230 -BFCA 8331D231 -BFCB 8331D232 -BFCC 8331D233 -BFCD 8331D234 -BFCE 8331D235 -BFCF 8331D236 -BFD0 8331D237 -BFD1 8331D238 -BFD2 8331D239 -BFD3 8331D330 -BFD4 8331D331 -BFD5 8331D332 -BFD6 8331D333 -BFD7 8331D334 -BFD8 8331D335 -BFD9 8331D336 -BFDA 8331D337 -BFDB 8331D338 -BFDC 8331D339 -BFDD 8331D430 -BFDE 8331D431 -BFDF 8331D432 -BFE0 8331D433 -BFE1 8331D434 -BFE2 8331D435 -BFE3 8331D436 -BFE4 8331D437 -BFE5 8331D438 -BFE6 8331D439 -BFE7 8331D530 -BFE8 8331D531 -BFE9 8331D532 -BFEA 8331D533 -BFEB 8331D534 -BFEC 8331D535 -BFED 8331D536 -BFEE 8331D537 -BFEF 8331D538 -BFF0 8331D539 -BFF1 8331D630 -BFF2 8331D631 -BFF3 8331D632 -BFF4 8331D633 -BFF5 8331D634 -BFF6 8331D635 -BFF7 8331D636 -BFF8 8331D637 -BFF9 8331D638 -BFFA 8331D639 -BFFB 8331D730 -BFFC 8331D731 -BFFD 8331D732 -BFFE 8331D733 -BFFF 8331D734 -C000 8331D735 -C001 8331D736 -C002 8331D737 -C003 8331D738 -C004 8331D739 -C005 8331D830 -C006 8331D831 -C007 8331D832 -C008 8331D833 -C009 8331D834 -C00A 8331D835 -C00B 8331D836 -C00C 8331D837 -C00D 8331D838 -C00E 8331D839 -C00F 8331D930 -C010 8331D931 -C011 8331D932 -C012 8331D933 -C013 8331D934 -C014 8331D935 -C015 8331D936 -C016 8331D937 -C017 8331D938 -C018 8331D939 -C019 8331DA30 -C01A 8331DA31 -C01B 8331DA32 -C01C 8331DA33 -C01D 8331DA34 -C01E 8331DA35 -C01F 8331DA36 -C020 8331DA37 -C021 8331DA38 -C022 8331DA39 -C023 8331DB30 -C024 8331DB31 -C025 8331DB32 -C026 8331DB33 -C027 8331DB34 -C028 8331DB35 -C029 8331DB36 -C02A 8331DB37 -C02B 8331DB38 -C02C 8331DB39 -C02D 8331DC30 -C02E 8331DC31 -C02F 8331DC32 -C030 8331DC33 -C031 8331DC34 -C032 8331DC35 -C033 8331DC36 -C034 8331DC37 -C035 8331DC38 -C036 8331DC39 -C037 8331DD30 -C038 8331DD31 -C039 8331DD32 -C03A 8331DD33 -C03B 8331DD34 -C03C 8331DD35 -C03D 8331DD36 -C03E 8331DD37 -C03F 8331DD38 -C040 8331DD39 -C041 8331DE30 -C042 8331DE31 -C043 8331DE32 -C044 8331DE33 -C045 8331DE34 -C046 8331DE35 -C047 8331DE36 -C048 8331DE37 -C049 8331DE38 -C04A 8331DE39 -C04B 8331DF30 -C04C 8331DF31 -C04D 8331DF32 -C04E 8331DF33 -C04F 8331DF34 -C050 8331DF35 -C051 8331DF36 -C052 8331DF37 -C053 8331DF38 -C054 8331DF39 -C055 8331E030 -C056 8331E031 -C057 8331E032 -C058 8331E033 -C059 8331E034 -C05A 8331E035 -C05B 8331E036 -C05C 8331E037 -C05D 8331E038 -C05E 8331E039 -C05F 8331E130 -C060 8331E131 -C061 8331E132 -C062 8331E133 -C063 8331E134 -C064 8331E135 -C065 8331E136 -C066 8331E137 -C067 8331E138 -C068 8331E139 -C069 8331E230 -C06A 8331E231 -C06B 8331E232 -C06C 8331E233 -C06D 8331E234 -C06E 8331E235 -C06F 8331E236 -C070 8331E237 -C071 8331E238 -C072 8331E239 -C073 8331E330 -C074 8331E331 -C075 8331E332 -C076 8331E333 -C077 8331E334 -C078 8331E335 -C079 8331E336 -C07A 8331E337 -C07B 8331E338 -C07C 8331E339 -C07D 8331E430 -C07E 8331E431 -C07F 8331E432 -C080 8331E433 -C081 8331E434 -C082 8331E435 -C083 8331E436 -C084 8331E437 -C085 8331E438 -C086 8331E439 -C087 8331E530 -C088 8331E531 -C089 8331E532 -C08A 8331E533 -C08B 8331E534 -C08C 8331E535 -C08D 8331E536 -C08E 8331E537 -C08F 8331E538 -C090 8331E539 -C091 8331E630 -C092 8331E631 -C093 8331E632 -C094 8331E633 -C095 8331E634 -C096 8331E635 -C097 8331E636 -C098 8331E637 -C099 8331E638 -C09A 8331E639 -C09B 8331E730 -C09C 8331E731 -C09D 8331E732 -C09E 8331E733 -C09F 8331E734 -C0A0 8331E735 -C0A1 8331E736 -C0A2 8331E737 -C0A3 8331E738 -C0A4 8331E739 -C0A5 8331E830 -C0A6 8331E831 -C0A7 8331E832 -C0A8 8331E833 -C0A9 8331E834 -C0AA 8331E835 -C0AB 8331E836 -C0AC 8331E837 -C0AD 8331E838 -C0AE 8331E839 -C0AF 8331E930 -C0B0 8331E931 -C0B1 8331E932 -C0B2 8331E933 -C0B3 8331E934 -C0B4 8331E935 -C0B5 8331E936 -C0B6 8331E937 -C0B7 8331E938 -C0B8 8331E939 -C0B9 8331EA30 -C0BA 8331EA31 -C0BB 8331EA32 -C0BC 8331EA33 -C0BD 8331EA34 -C0BE 8331EA35 -C0BF 8331EA36 -C0C0 8331EA37 -C0C1 8331EA38 -C0C2 8331EA39 -C0C3 8331EB30 -C0C4 8331EB31 -C0C5 8331EB32 -C0C6 8331EB33 -C0C7 8331EB34 -C0C8 8331EB35 -C0C9 8331EB36 -C0CA 8331EB37 -C0CB 8331EB38 -C0CC 8331EB39 -C0CD 8331EC30 -C0CE 8331EC31 -C0CF 8331EC32 -C0D0 8331EC33 -C0D1 8331EC34 -C0D2 8331EC35 -C0D3 8331EC36 -C0D4 8331EC37 -C0D5 8331EC38 -C0D6 8331EC39 -C0D7 8331ED30 -C0D8 8331ED31 -C0D9 8331ED32 -C0DA 8331ED33 -C0DB 8331ED34 -C0DC 8331ED35 -C0DD 8331ED36 -C0DE 8331ED37 -C0DF 8331ED38 -C0E0 8331ED39 -C0E1 8331EE30 -C0E2 8331EE31 -C0E3 8331EE32 -C0E4 8331EE33 -C0E5 8331EE34 -C0E6 8331EE35 -C0E7 8331EE36 -C0E8 8331EE37 -C0E9 8331EE38 -C0EA 8331EE39 -C0EB 8331EF30 -C0EC 8331EF31 -C0ED 8331EF32 -C0EE 8331EF33 -C0EF 8331EF34 -C0F0 8331EF35 -C0F1 8331EF36 -C0F2 8331EF37 -C0F3 8331EF38 -C0F4 8331EF39 -C0F5 8331F030 -C0F6 8331F031 -C0F7 8331F032 -C0F8 8331F033 -C0F9 8331F034 -C0FA 8331F035 -C0FB 8331F036 -C0FC 8331F037 -C0FD 8331F038 -C0FE 8331F039 -C0FF 8331F130 -C100 8331F131 -C101 8331F132 -C102 8331F133 -C103 8331F134 -C104 8331F135 -C105 8331F136 -C106 8331F137 -C107 8331F138 -C108 8331F139 -C109 8331F230 -C10A 8331F231 -C10B 8331F232 -C10C 8331F233 -C10D 8331F234 -C10E 8331F235 -C10F 8331F236 -C110 8331F237 -C111 8331F238 -C112 8331F239 -C113 8331F330 -C114 8331F331 -C115 8331F332 -C116 8331F333 -C117 8331F334 -C118 8331F335 -C119 8331F336 -C11A 8331F337 -C11B 8331F338 -C11C 8331F339 -C11D 8331F430 -C11E 8331F431 -C11F 8331F432 -C120 8331F433 -C121 8331F434 -C122 8331F435 -C123 8331F436 -C124 8331F437 -C125 8331F438 -C126 8331F439 -C127 8331F530 -C128 8331F531 -C129 8331F532 -C12A 8331F533 -C12B 8331F534 -C12C 8331F535 -C12D 8331F536 -C12E 8331F537 -C12F 8331F538 -C130 8331F539 -C131 8331F630 -C132 8331F631 -C133 8331F632 -C134 8331F633 -C135 8331F634 -C136 8331F635 -C137 8331F636 -C138 8331F637 -C139 8331F638 -C13A 8331F639 -C13B 8331F730 -C13C 8331F731 -C13D 8331F732 -C13E 8331F733 -C13F 8331F734 -C140 8331F735 -C141 8331F736 -C142 8331F737 -C143 8331F738 -C144 8331F739 -C145 8331F830 -C146 8331F831 -C147 8331F832 -C148 8331F833 -C149 8331F834 -C14A 8331F835 -C14B 8331F836 -C14C 8331F837 -C14D 8331F838 -C14E 8331F839 -C14F 8331F930 -C150 8331F931 -C151 8331F932 -C152 8331F933 -C153 8331F934 -C154 8331F935 -C155 8331F936 -C156 8331F937 -C157 8331F938 -C158 8331F939 -C159 8331FA30 -C15A 8331FA31 -C15B 8331FA32 -C15C 8331FA33 -C15D 8331FA34 -C15E 8331FA35 -C15F 8331FA36 -C160 8331FA37 -C161 8331FA38 -C162 8331FA39 -C163 8331FB30 -C164 8331FB31 -C165 8331FB32 -C166 8331FB33 -C167 8331FB34 -C168 8331FB35 -C169 8331FB36 -C16A 8331FB37 -C16B 8331FB38 -C16C 8331FB39 -C16D 8331FC30 -C16E 8331FC31 -C16F 8331FC32 -C170 8331FC33 -C171 8331FC34 -C172 8331FC35 -C173 8331FC36 -C174 8331FC37 -C175 8331FC38 -C176 8331FC39 -C177 8331FD30 -C178 8331FD31 -C179 8331FD32 -C17A 8331FD33 -C17B 8331FD34 -C17C 8331FD35 -C17D 8331FD36 -C17E 8331FD37 -C17F 8331FD38 -C180 8331FD39 -C181 8331FE30 -C182 8331FE31 -C183 8331FE32 -C184 8331FE33 -C185 8331FE34 -C186 8331FE35 -C187 8331FE36 -C188 8331FE37 -C189 8331FE38 -C18A 8331FE39 -C18B 83328130 -C18C 83328131 -C18D 83328132 -C18E 83328133 -C18F 83328134 -C190 83328135 -C191 83328136 -C192 83328137 -C193 83328138 -C194 83328139 -C195 83328230 -C196 83328231 -C197 83328232 -C198 83328233 -C199 83328234 -C19A 83328235 -C19B 83328236 -C19C 83328237 -C19D 83328238 -C19E 83328239 -C19F 83328330 -C1A0 83328331 -C1A1 83328332 -C1A2 83328333 -C1A3 83328334 -C1A4 83328335 -C1A5 83328336 -C1A6 83328337 -C1A7 83328338 -C1A8 83328339 -C1A9 83328430 -C1AA 83328431 -C1AB 83328432 -C1AC 83328433 -C1AD 83328434 -C1AE 83328435 -C1AF 83328436 -C1B0 83328437 -C1B1 83328438 -C1B2 83328439 -C1B3 83328530 -C1B4 83328531 -C1B5 83328532 -C1B6 83328533 -C1B7 83328534 -C1B8 83328535 -C1B9 83328536 -C1BA 83328537 -C1BB 83328538 -C1BC 83328539 -C1BD 83328630 -C1BE 83328631 -C1BF 83328632 -C1C0 83328633 -C1C1 83328634 -C1C2 83328635 -C1C3 83328636 -C1C4 83328637 -C1C5 83328638 -C1C6 83328639 -C1C7 83328730 -C1C8 83328731 -C1C9 83328732 -C1CA 83328733 -C1CB 83328734 -C1CC 83328735 -C1CD 83328736 -C1CE 83328737 -C1CF 83328738 -C1D0 83328739 -C1D1 83328830 -C1D2 83328831 -C1D3 83328832 -C1D4 83328833 -C1D5 83328834 -C1D6 83328835 -C1D7 83328836 -C1D8 83328837 -C1D9 83328838 -C1DA 83328839 -C1DB 83328930 -C1DC 83328931 -C1DD 83328932 -C1DE 83328933 -C1DF 83328934 -C1E0 83328935 -C1E1 83328936 -C1E2 83328937 -C1E3 83328938 -C1E4 83328939 -C1E5 83328A30 -C1E6 83328A31 -C1E7 83328A32 -C1E8 83328A33 -C1E9 83328A34 -C1EA 83328A35 -C1EB 83328A36 -C1EC 83328A37 -C1ED 83328A38 -C1EE 83328A39 -C1EF 83328B30 -C1F0 83328B31 -C1F1 83328B32 -C1F2 83328B33 -C1F3 83328B34 -C1F4 83328B35 -C1F5 83328B36 -C1F6 83328B37 -C1F7 83328B38 -C1F8 83328B39 -C1F9 83328C30 -C1FA 83328C31 -C1FB 83328C32 -C1FC 83328C33 -C1FD 83328C34 -C1FE 83328C35 -C1FF 83328C36 -C200 83328C37 -C201 83328C38 -C202 83328C39 -C203 83328D30 -C204 83328D31 -C205 83328D32 -C206 83328D33 -C207 83328D34 -C208 83328D35 -C209 83328D36 -C20A 83328D37 -C20B 83328D38 -C20C 83328D39 -C20D 83328E30 -C20E 83328E31 -C20F 83328E32 -C210 83328E33 -C211 83328E34 -C212 83328E35 -C213 83328E36 -C214 83328E37 -C215 83328E38 -C216 83328E39 -C217 83328F30 -C218 83328F31 -C219 83328F32 -C21A 83328F33 -C21B 83328F34 -C21C 83328F35 -C21D 83328F36 -C21E 83328F37 -C21F 83328F38 -C220 83328F39 -C221 83329030 -C222 83329031 -C223 83329032 -C224 83329033 -C225 83329034 -C226 83329035 -C227 83329036 -C228 83329037 -C229 83329038 -C22A 83329039 -C22B 83329130 -C22C 83329131 -C22D 83329132 -C22E 83329133 -C22F 83329134 -C230 83329135 -C231 83329136 -C232 83329137 -C233 83329138 -C234 83329139 -C235 83329230 -C236 83329231 -C237 83329232 -C238 83329233 -C239 83329234 -C23A 83329235 -C23B 83329236 -C23C 83329237 -C23D 83329238 -C23E 83329239 -C23F 83329330 -C240 83329331 -C241 83329332 -C242 83329333 -C243 83329334 -C244 83329335 -C245 83329336 -C246 83329337 -C247 83329338 -C248 83329339 -C249 83329430 -C24A 83329431 -C24B 83329432 -C24C 83329433 -C24D 83329434 -C24E 83329435 -C24F 83329436 -C250 83329437 -C251 83329438 -C252 83329439 -C253 83329530 -C254 83329531 -C255 83329532 -C256 83329533 -C257 83329534 -C258 83329535 -C259 83329536 -C25A 83329537 -C25B 83329538 -C25C 83329539 -C25D 83329630 -C25E 83329631 -C25F 83329632 -C260 83329633 -C261 83329634 -C262 83329635 -C263 83329636 -C264 83329637 -C265 83329638 -C266 83329639 -C267 83329730 -C268 83329731 -C269 83329732 -C26A 83329733 -C26B 83329734 -C26C 83329735 -C26D 83329736 -C26E 83329737 -C26F 83329738 -C270 83329739 -C271 83329830 -C272 83329831 -C273 83329832 -C274 83329833 -C275 83329834 -C276 83329835 -C277 83329836 -C278 83329837 -C279 83329838 -C27A 83329839 -C27B 83329930 -C27C 83329931 -C27D 83329932 -C27E 83329933 -C27F 83329934 -C280 83329935 -C281 83329936 -C282 83329937 -C283 83329938 -C284 83329939 -C285 83329A30 -C286 83329A31 -C287 83329A32 -C288 83329A33 -C289 83329A34 -C28A 83329A35 -C28B 83329A36 -C28C 83329A37 -C28D 83329A38 -C28E 83329A39 -C28F 83329B30 -C290 83329B31 -C291 83329B32 -C292 83329B33 -C293 83329B34 -C294 83329B35 -C295 83329B36 -C296 83329B37 -C297 83329B38 -C298 83329B39 -C299 83329C30 -C29A 83329C31 -C29B 83329C32 -C29C 83329C33 -C29D 83329C34 -C29E 83329C35 -C29F 83329C36 -C2A0 83329C37 -C2A1 83329C38 -C2A2 83329C39 -C2A3 83329D30 -C2A4 83329D31 -C2A5 83329D32 -C2A6 83329D33 -C2A7 83329D34 -C2A8 83329D35 -C2A9 83329D36 -C2AA 83329D37 -C2AB 83329D38 -C2AC 83329D39 -C2AD 83329E30 -C2AE 83329E31 -C2AF 83329E32 -C2B0 83329E33 -C2B1 83329E34 -C2B2 83329E35 -C2B3 83329E36 -C2B4 83329E37 -C2B5 83329E38 -C2B6 83329E39 -C2B7 83329F30 -C2B8 83329F31 -C2B9 83329F32 -C2BA 83329F33 -C2BB 83329F34 -C2BC 83329F35 -C2BD 83329F36 -C2BE 83329F37 -C2BF 83329F38 -C2C0 83329F39 -C2C1 8332A030 -C2C2 8332A031 -C2C3 8332A032 -C2C4 8332A033 -C2C5 8332A034 -C2C6 8332A035 -C2C7 8332A036 -C2C8 8332A037 -C2C9 8332A038 -C2CA 8332A039 -C2CB 8332A130 -C2CC 8332A131 -C2CD 8332A132 -C2CE 8332A133 -C2CF 8332A134 -C2D0 8332A135 -C2D1 8332A136 -C2D2 8332A137 -C2D3 8332A138 -C2D4 8332A139 -C2D5 8332A230 -C2D6 8332A231 -C2D7 8332A232 -C2D8 8332A233 -C2D9 8332A234 -C2DA 8332A235 -C2DB 8332A236 -C2DC 8332A237 -C2DD 8332A238 -C2DE 8332A239 -C2DF 8332A330 -C2E0 8332A331 -C2E1 8332A332 -C2E2 8332A333 -C2E3 8332A334 -C2E4 8332A335 -C2E5 8332A336 -C2E6 8332A337 -C2E7 8332A338 -C2E8 8332A339 -C2E9 8332A430 -C2EA 8332A431 -C2EB 8332A432 -C2EC 8332A433 -C2ED 8332A434 -C2EE 8332A435 -C2EF 8332A436 -C2F0 8332A437 -C2F1 8332A438 -C2F2 8332A439 -C2F3 8332A530 -C2F4 8332A531 -C2F5 8332A532 -C2F6 8332A533 -C2F7 8332A534 -C2F8 8332A535 -C2F9 8332A536 -C2FA 8332A537 -C2FB 8332A538 -C2FC 8332A539 -C2FD 8332A630 -C2FE 8332A631 -C2FF 8332A632 -C300 8332A633 -C301 8332A634 -C302 8332A635 -C303 8332A636 -C304 8332A637 -C305 8332A638 -C306 8332A639 -C307 8332A730 -C308 8332A731 -C309 8332A732 -C30A 8332A733 -C30B 8332A734 -C30C 8332A735 -C30D 8332A736 -C30E 8332A737 -C30F 8332A738 -C310 8332A739 -C311 8332A830 -C312 8332A831 -C313 8332A832 -C314 8332A833 -C315 8332A834 -C316 8332A835 -C317 8332A836 -C318 8332A837 -C319 8332A838 -C31A 8332A839 -C31B 8332A930 -C31C 8332A931 -C31D 8332A932 -C31E 8332A933 -C31F 8332A934 -C320 8332A935 -C321 8332A936 -C322 8332A937 -C323 8332A938 -C324 8332A939 -C325 8332AA30 -C326 8332AA31 -C327 8332AA32 -C328 8332AA33 -C329 8332AA34 -C32A 8332AA35 -C32B 8332AA36 -C32C 8332AA37 -C32D 8332AA38 -C32E 8332AA39 -C32F 8332AB30 -C330 8332AB31 -C331 8332AB32 -C332 8332AB33 -C333 8332AB34 -C334 8332AB35 -C335 8332AB36 -C336 8332AB37 -C337 8332AB38 -C338 8332AB39 -C339 8332AC30 -C33A 8332AC31 -C33B 8332AC32 -C33C 8332AC33 -C33D 8332AC34 -C33E 8332AC35 -C33F 8332AC36 -C340 8332AC37 -C341 8332AC38 -C342 8332AC39 -C343 8332AD30 -C344 8332AD31 -C345 8332AD32 -C346 8332AD33 -C347 8332AD34 -C348 8332AD35 -C349 8332AD36 -C34A 8332AD37 -C34B 8332AD38 -C34C 8332AD39 -C34D 8332AE30 -C34E 8332AE31 -C34F 8332AE32 -C350 8332AE33 -C351 8332AE34 -C352 8332AE35 -C353 8332AE36 -C354 8332AE37 -C355 8332AE38 -C356 8332AE39 -C357 8332AF30 -C358 8332AF31 -C359 8332AF32 -C35A 8332AF33 -C35B 8332AF34 -C35C 8332AF35 -C35D 8332AF36 -C35E 8332AF37 -C35F 8332AF38 -C360 8332AF39 -C361 8332B030 -C362 8332B031 -C363 8332B032 -C364 8332B033 -C365 8332B034 -C366 8332B035 -C367 8332B036 -C368 8332B037 -C369 8332B038 -C36A 8332B039 -C36B 8332B130 -C36C 8332B131 -C36D 8332B132 -C36E 8332B133 -C36F 8332B134 -C370 8332B135 -C371 8332B136 -C372 8332B137 -C373 8332B138 -C374 8332B139 -C375 8332B230 -C376 8332B231 -C377 8332B232 -C378 8332B233 -C379 8332B234 -C37A 8332B235 -C37B 8332B236 -C37C 8332B237 -C37D 8332B238 -C37E 8332B239 -C37F 8332B330 -C380 8332B331 -C381 8332B332 -C382 8332B333 -C383 8332B334 -C384 8332B335 -C385 8332B336 -C386 8332B337 -C387 8332B338 -C388 8332B339 -C389 8332B430 -C38A 8332B431 -C38B 8332B432 -C38C 8332B433 -C38D 8332B434 -C38E 8332B435 -C38F 8332B436 -C390 8332B437 -C391 8332B438 -C392 8332B439 -C393 8332B530 -C394 8332B531 -C395 8332B532 -C396 8332B533 -C397 8332B534 -C398 8332B535 -C399 8332B536 -C39A 8332B537 -C39B 8332B538 -C39C 8332B539 -C39D 8332B630 -C39E 8332B631 -C39F 8332B632 -C3A0 8332B633 -C3A1 8332B634 -C3A2 8332B635 -C3A3 8332B636 -C3A4 8332B637 -C3A5 8332B638 -C3A6 8332B639 -C3A7 8332B730 -C3A8 8332B731 -C3A9 8332B732 -C3AA 8332B733 -C3AB 8332B734 -C3AC 8332B735 -C3AD 8332B736 -C3AE 8332B737 -C3AF 8332B738 -C3B0 8332B739 -C3B1 8332B830 -C3B2 8332B831 -C3B3 8332B832 -C3B4 8332B833 -C3B5 8332B834 -C3B6 8332B835 -C3B7 8332B836 -C3B8 8332B837 -C3B9 8332B838 -C3BA 8332B839 -C3BB 8332B930 -C3BC 8332B931 -C3BD 8332B932 -C3BE 8332B933 -C3BF 8332B934 -C3C0 8332B935 -C3C1 8332B936 -C3C2 8332B937 -C3C3 8332B938 -C3C4 8332B939 -C3C5 8332BA30 -C3C6 8332BA31 -C3C7 8332BA32 -C3C8 8332BA33 -C3C9 8332BA34 -C3CA 8332BA35 -C3CB 8332BA36 -C3CC 8332BA37 -C3CD 8332BA38 -C3CE 8332BA39 -C3CF 8332BB30 -C3D0 8332BB31 -C3D1 8332BB32 -C3D2 8332BB33 -C3D3 8332BB34 -C3D4 8332BB35 -C3D5 8332BB36 -C3D6 8332BB37 -C3D7 8332BB38 -C3D8 8332BB39 -C3D9 8332BC30 -C3DA 8332BC31 -C3DB 8332BC32 -C3DC 8332BC33 -C3DD 8332BC34 -C3DE 8332BC35 -C3DF 8332BC36 -C3E0 8332BC37 -C3E1 8332BC38 -C3E2 8332BC39 -C3E3 8332BD30 -C3E4 8332BD31 -C3E5 8332BD32 -C3E6 8332BD33 -C3E7 8332BD34 -C3E8 8332BD35 -C3E9 8332BD36 -C3EA 8332BD37 -C3EB 8332BD38 -C3EC 8332BD39 -C3ED 8332BE30 -C3EE 8332BE31 -C3EF 8332BE32 -C3F0 8332BE33 -C3F1 8332BE34 -C3F2 8332BE35 -C3F3 8332BE36 -C3F4 8332BE37 -C3F5 8332BE38 -C3F6 8332BE39 -C3F7 8332BF30 -C3F8 8332BF31 -C3F9 8332BF32 -C3FA 8332BF33 -C3FB 8332BF34 -C3FC 8332BF35 -C3FD 8332BF36 -C3FE 8332BF37 -C3FF 8332BF38 -C400 8332BF39 -C401 8332C030 -C402 8332C031 -C403 8332C032 -C404 8332C033 -C405 8332C034 -C406 8332C035 -C407 8332C036 -C408 8332C037 -C409 8332C038 -C40A 8332C039 -C40B 8332C130 -C40C 8332C131 -C40D 8332C132 -C40E 8332C133 -C40F 8332C134 -C410 8332C135 -C411 8332C136 -C412 8332C137 -C413 8332C138 -C414 8332C139 -C415 8332C230 -C416 8332C231 -C417 8332C232 -C418 8332C233 -C419 8332C234 -C41A 8332C235 -C41B 8332C236 -C41C 8332C237 -C41D 8332C238 -C41E 8332C239 -C41F 8332C330 -C420 8332C331 -C421 8332C332 -C422 8332C333 -C423 8332C334 -C424 8332C335 -C425 8332C336 -C426 8332C337 -C427 8332C338 -C428 8332C339 -C429 8332C430 -C42A 8332C431 -C42B 8332C432 -C42C 8332C433 -C42D 8332C434 -C42E 8332C435 -C42F 8332C436 -C430 8332C437 -C431 8332C438 -C432 8332C439 -C433 8332C530 -C434 8332C531 -C435 8332C532 -C436 8332C533 -C437 8332C534 -C438 8332C535 -C439 8332C536 -C43A 8332C537 -C43B 8332C538 -C43C 8332C539 -C43D 8332C630 -C43E 8332C631 -C43F 8332C632 -C440 8332C633 -C441 8332C634 -C442 8332C635 -C443 8332C636 -C444 8332C637 -C445 8332C638 -C446 8332C639 -C447 8332C730 -C448 8332C731 -C449 8332C732 -C44A 8332C733 -C44B 8332C734 -C44C 8332C735 -C44D 8332C736 -C44E 8332C737 -C44F 8332C738 -C450 8332C739 -C451 8332C830 -C452 8332C831 -C453 8332C832 -C454 8332C833 -C455 8332C834 -C456 8332C835 -C457 8332C836 -C458 8332C837 -C459 8332C838 -C45A 8332C839 -C45B 8332C930 -C45C 8332C931 -C45D 8332C932 -C45E 8332C933 -C45F 8332C934 -C460 8332C935 -C461 8332C936 -C462 8332C937 -C463 8332C938 -C464 8332C939 -C465 8332CA30 -C466 8332CA31 -C467 8332CA32 -C468 8332CA33 -C469 8332CA34 -C46A 8332CA35 -C46B 8332CA36 -C46C 8332CA37 -C46D 8332CA38 -C46E 8332CA39 -C46F 8332CB30 -C470 8332CB31 -C471 8332CB32 -C472 8332CB33 -C473 8332CB34 -C474 8332CB35 -C475 8332CB36 -C476 8332CB37 -C477 8332CB38 -C478 8332CB39 -C479 8332CC30 -C47A 8332CC31 -C47B 8332CC32 -C47C 8332CC33 -C47D 8332CC34 -C47E 8332CC35 -C47F 8332CC36 -C480 8332CC37 -C481 8332CC38 -C482 8332CC39 -C483 8332CD30 -C484 8332CD31 -C485 8332CD32 -C486 8332CD33 -C487 8332CD34 -C488 8332CD35 -C489 8332CD36 -C48A 8332CD37 -C48B 8332CD38 -C48C 8332CD39 -C48D 8332CE30 -C48E 8332CE31 -C48F 8332CE32 -C490 8332CE33 -C491 8332CE34 -C492 8332CE35 -C493 8332CE36 -C494 8332CE37 -C495 8332CE38 -C496 8332CE39 -C497 8332CF30 -C498 8332CF31 -C499 8332CF32 -C49A 8332CF33 -C49B 8332CF34 -C49C 8332CF35 -C49D 8332CF36 -C49E 8332CF37 -C49F 8332CF38 -C4A0 8332CF39 -C4A1 8332D030 -C4A2 8332D031 -C4A3 8332D032 -C4A4 8332D033 -C4A5 8332D034 -C4A6 8332D035 -C4A7 8332D036 -C4A8 8332D037 -C4A9 8332D038 -C4AA 8332D039 -C4AB 8332D130 -C4AC 8332D131 -C4AD 8332D132 -C4AE 8332D133 -C4AF 8332D134 -C4B0 8332D135 -C4B1 8332D136 -C4B2 8332D137 -C4B3 8332D138 -C4B4 8332D139 -C4B5 8332D230 -C4B6 8332D231 -C4B7 8332D232 -C4B8 8332D233 -C4B9 8332D234 -C4BA 8332D235 -C4BB 8332D236 -C4BC 8332D237 -C4BD 8332D238 -C4BE 8332D239 -C4BF 8332D330 -C4C0 8332D331 -C4C1 8332D332 -C4C2 8332D333 -C4C3 8332D334 -C4C4 8332D335 -C4C5 8332D336 -C4C6 8332D337 -C4C7 8332D338 -C4C8 8332D339 -C4C9 8332D430 -C4CA 8332D431 -C4CB 8332D432 -C4CC 8332D433 -C4CD 8332D434 -C4CE 8332D435 -C4CF 8332D436 -C4D0 8332D437 -C4D1 8332D438 -C4D2 8332D439 -C4D3 8332D530 -C4D4 8332D531 -C4D5 8332D532 -C4D6 8332D533 -C4D7 8332D534 -C4D8 8332D535 -C4D9 8332D536 -C4DA 8332D537 -C4DB 8332D538 -C4DC 8332D539 -C4DD 8332D630 -C4DE 8332D631 -C4DF 8332D632 -C4E0 8332D633 -C4E1 8332D634 -C4E2 8332D635 -C4E3 8332D636 -C4E4 8332D637 -C4E5 8332D638 -C4E6 8332D639 -C4E7 8332D730 -C4E8 8332D731 -C4E9 8332D732 -C4EA 8332D733 -C4EB 8332D734 -C4EC 8332D735 -C4ED 8332D736 -C4EE 8332D737 -C4EF 8332D738 -C4F0 8332D739 -C4F1 8332D830 -C4F2 8332D831 -C4F3 8332D832 -C4F4 8332D833 -C4F5 8332D834 -C4F6 8332D835 -C4F7 8332D836 -C4F8 8332D837 -C4F9 8332D838 -C4FA 8332D839 -C4FB 8332D930 -C4FC 8332D931 -C4FD 8332D932 -C4FE 8332D933 -C4FF 8332D934 -C500 8332D935 -C501 8332D936 -C502 8332D937 -C503 8332D938 -C504 8332D939 -C505 8332DA30 -C506 8332DA31 -C507 8332DA32 -C508 8332DA33 -C509 8332DA34 -C50A 8332DA35 -C50B 8332DA36 -C50C 8332DA37 -C50D 8332DA38 -C50E 8332DA39 -C50F 8332DB30 -C510 8332DB31 -C511 8332DB32 -C512 8332DB33 -C513 8332DB34 -C514 8332DB35 -C515 8332DB36 -C516 8332DB37 -C517 8332DB38 -C518 8332DB39 -C519 8332DC30 -C51A 8332DC31 -C51B 8332DC32 -C51C 8332DC33 -C51D 8332DC34 -C51E 8332DC35 -C51F 8332DC36 -C520 8332DC37 -C521 8332DC38 -C522 8332DC39 -C523 8332DD30 -C524 8332DD31 -C525 8332DD32 -C526 8332DD33 -C527 8332DD34 -C528 8332DD35 -C529 8332DD36 -C52A 8332DD37 -C52B 8332DD38 -C52C 8332DD39 -C52D 8332DE30 -C52E 8332DE31 -C52F 8332DE32 -C530 8332DE33 -C531 8332DE34 -C532 8332DE35 -C533 8332DE36 -C534 8332DE37 -C535 8332DE38 -C536 8332DE39 -C537 8332DF30 -C538 8332DF31 -C539 8332DF32 -C53A 8332DF33 -C53B 8332DF34 -C53C 8332DF35 -C53D 8332DF36 -C53E 8332DF37 -C53F 8332DF38 -C540 8332DF39 -C541 8332E030 -C542 8332E031 -C543 8332E032 -C544 8332E033 -C545 8332E034 -C546 8332E035 -C547 8332E036 -C548 8332E037 -C549 8332E038 -C54A 8332E039 -C54B 8332E130 -C54C 8332E131 -C54D 8332E132 -C54E 8332E133 -C54F 8332E134 -C550 8332E135 -C551 8332E136 -C552 8332E137 -C553 8332E138 -C554 8332E139 -C555 8332E230 -C556 8332E231 -C557 8332E232 -C558 8332E233 -C559 8332E234 -C55A 8332E235 -C55B 8332E236 -C55C 8332E237 -C55D 8332E238 -C55E 8332E239 -C55F 8332E330 -C560 8332E331 -C561 8332E332 -C562 8332E333 -C563 8332E334 -C564 8332E335 -C565 8332E336 -C566 8332E337 -C567 8332E338 -C568 8332E339 -C569 8332E430 -C56A 8332E431 -C56B 8332E432 -C56C 8332E433 -C56D 8332E434 -C56E 8332E435 -C56F 8332E436 -C570 8332E437 -C571 8332E438 -C572 8332E439 -C573 8332E530 -C574 8332E531 -C575 8332E532 -C576 8332E533 -C577 8332E534 -C578 8332E535 -C579 8332E536 -C57A 8332E537 -C57B 8332E538 -C57C 8332E539 -C57D 8332E630 -C57E 8332E631 -C57F 8332E632 -C580 8332E633 -C581 8332E634 -C582 8332E635 -C583 8332E636 -C584 8332E637 -C585 8332E638 -C586 8332E639 -C587 8332E730 -C588 8332E731 -C589 8332E732 -C58A 8332E733 -C58B 8332E734 -C58C 8332E735 -C58D 8332E736 -C58E 8332E737 -C58F 8332E738 -C590 8332E739 -C591 8332E830 -C592 8332E831 -C593 8332E832 -C594 8332E833 -C595 8332E834 -C596 8332E835 -C597 8332E836 -C598 8332E837 -C599 8332E838 -C59A 8332E839 -C59B 8332E930 -C59C 8332E931 -C59D 8332E932 -C59E 8332E933 -C59F 8332E934 -C5A0 8332E935 -C5A1 8332E936 -C5A2 8332E937 -C5A3 8332E938 -C5A4 8332E939 -C5A5 8332EA30 -C5A6 8332EA31 -C5A7 8332EA32 -C5A8 8332EA33 -C5A9 8332EA34 -C5AA 8332EA35 -C5AB 8332EA36 -C5AC 8332EA37 -C5AD 8332EA38 -C5AE 8332EA39 -C5AF 8332EB30 -C5B0 8332EB31 -C5B1 8332EB32 -C5B2 8332EB33 -C5B3 8332EB34 -C5B4 8332EB35 -C5B5 8332EB36 -C5B6 8332EB37 -C5B7 8332EB38 -C5B8 8332EB39 -C5B9 8332EC30 -C5BA 8332EC31 -C5BB 8332EC32 -C5BC 8332EC33 -C5BD 8332EC34 -C5BE 8332EC35 -C5BF 8332EC36 -C5C0 8332EC37 -C5C1 8332EC38 -C5C2 8332EC39 -C5C3 8332ED30 -C5C4 8332ED31 -C5C5 8332ED32 -C5C6 8332ED33 -C5C7 8332ED34 -C5C8 8332ED35 -C5C9 8332ED36 -C5CA 8332ED37 -C5CB 8332ED38 -C5CC 8332ED39 -C5CD 8332EE30 -C5CE 8332EE31 -C5CF 8332EE32 -C5D0 8332EE33 -C5D1 8332EE34 -C5D2 8332EE35 -C5D3 8332EE36 -C5D4 8332EE37 -C5D5 8332EE38 -C5D6 8332EE39 -C5D7 8332EF30 -C5D8 8332EF31 -C5D9 8332EF32 -C5DA 8332EF33 -C5DB 8332EF34 -C5DC 8332EF35 -C5DD 8332EF36 -C5DE 8332EF37 -C5DF 8332EF38 -C5E0 8332EF39 -C5E1 8332F030 -C5E2 8332F031 -C5E3 8332F032 -C5E4 8332F033 -C5E5 8332F034 -C5E6 8332F035 -C5E7 8332F036 -C5E8 8332F037 -C5E9 8332F038 -C5EA 8332F039 -C5EB 8332F130 -C5EC 8332F131 -C5ED 8332F132 -C5EE 8332F133 -C5EF 8332F134 -C5F0 8332F135 -C5F1 8332F136 -C5F2 8332F137 -C5F3 8332F138 -C5F4 8332F139 -C5F5 8332F230 -C5F6 8332F231 -C5F7 8332F232 -C5F8 8332F233 -C5F9 8332F234 -C5FA 8332F235 -C5FB 8332F236 -C5FC 8332F237 -C5FD 8332F238 -C5FE 8332F239 -C5FF 8332F330 -C600 8332F331 -C601 8332F332 -C602 8332F333 -C603 8332F334 -C604 8332F335 -C605 8332F336 -C606 8332F337 -C607 8332F338 -C608 8332F339 -C609 8332F430 -C60A 8332F431 -C60B 8332F432 -C60C 8332F433 -C60D 8332F434 -C60E 8332F435 -C60F 8332F436 -C610 8332F437 -C611 8332F438 -C612 8332F439 -C613 8332F530 -C614 8332F531 -C615 8332F532 -C616 8332F533 -C617 8332F534 -C618 8332F535 -C619 8332F536 -C61A 8332F537 -C61B 8332F538 -C61C 8332F539 -C61D 8332F630 -C61E 8332F631 -C61F 8332F632 -C620 8332F633 -C621 8332F634 -C622 8332F635 -C623 8332F636 -C624 8332F637 -C625 8332F638 -C626 8332F639 -C627 8332F730 -C628 8332F731 -C629 8332F732 -C62A 8332F733 -C62B 8332F734 -C62C 8332F735 -C62D 8332F736 -C62E 8332F737 -C62F 8332F738 -C630 8332F739 -C631 8332F830 -C632 8332F831 -C633 8332F832 -C634 8332F833 -C635 8332F834 -C636 8332F835 -C637 8332F836 -C638 8332F837 -C639 8332F838 -C63A 8332F839 -C63B 8332F930 -C63C 8332F931 -C63D 8332F932 -C63E 8332F933 -C63F 8332F934 -C640 8332F935 -C641 8332F936 -C642 8332F937 -C643 8332F938 -C644 8332F939 -C645 8332FA30 -C646 8332FA31 -C647 8332FA32 -C648 8332FA33 -C649 8332FA34 -C64A 8332FA35 -C64B 8332FA36 -C64C 8332FA37 -C64D 8332FA38 -C64E 8332FA39 -C64F 8332FB30 -C650 8332FB31 -C651 8332FB32 -C652 8332FB33 -C653 8332FB34 -C654 8332FB35 -C655 8332FB36 -C656 8332FB37 -C657 8332FB38 -C658 8332FB39 -C659 8332FC30 -C65A 8332FC31 -C65B 8332FC32 -C65C 8332FC33 -C65D 8332FC34 -C65E 8332FC35 -C65F 8332FC36 -C660 8332FC37 -C661 8332FC38 -C662 8332FC39 -C663 8332FD30 -C664 8332FD31 -C665 8332FD32 -C666 8332FD33 -C667 8332FD34 -C668 8332FD35 -C669 8332FD36 -C66A 8332FD37 -C66B 8332FD38 -C66C 8332FD39 -C66D 8332FE30 -C66E 8332FE31 -C66F 8332FE32 -C670 8332FE33 -C671 8332FE34 -C672 8332FE35 -C673 8332FE36 -C674 8332FE37 -C675 8332FE38 -C676 8332FE39 -C677 83338130 -C678 83338131 -C679 83338132 -C67A 83338133 -C67B 83338134 -C67C 83338135 -C67D 83338136 -C67E 83338137 -C67F 83338138 -C680 83338139 -C681 83338230 -C682 83338231 -C683 83338232 -C684 83338233 -C685 83338234 -C686 83338235 -C687 83338236 -C688 83338237 -C689 83338238 -C68A 83338239 -C68B 83338330 -C68C 83338331 -C68D 83338332 -C68E 83338333 -C68F 83338334 -C690 83338335 -C691 83338336 -C692 83338337 -C693 83338338 -C694 83338339 -C695 83338430 -C696 83338431 -C697 83338432 -C698 83338433 -C699 83338434 -C69A 83338435 -C69B 83338436 -C69C 83338437 -C69D 83338438 -C69E 83338439 -C69F 83338530 -C6A0 83338531 -C6A1 83338532 -C6A2 83338533 -C6A3 83338534 -C6A4 83338535 -C6A5 83338536 -C6A6 83338537 -C6A7 83338538 -C6A8 83338539 -C6A9 83338630 -C6AA 83338631 -C6AB 83338632 -C6AC 83338633 -C6AD 83338634 -C6AE 83338635 -C6AF 83338636 -C6B0 83338637 -C6B1 83338638 -C6B2 83338639 -C6B3 83338730 -C6B4 83338731 -C6B5 83338732 -C6B6 83338733 -C6B7 83338734 -C6B8 83338735 -C6B9 83338736 -C6BA 83338737 -C6BB 83338738 -C6BC 83338739 -C6BD 83338830 -C6BE 83338831 -C6BF 83338832 -C6C0 83338833 -C6C1 83338834 -C6C2 83338835 -C6C3 83338836 -C6C4 83338837 -C6C5 83338838 -C6C6 83338839 -C6C7 83338930 -C6C8 83338931 -C6C9 83338932 -C6CA 83338933 -C6CB 83338934 -C6CC 83338935 -C6CD 83338936 -C6CE 83338937 -C6CF 83338938 -C6D0 83338939 -C6D1 83338A30 -C6D2 83338A31 -C6D3 83338A32 -C6D4 83338A33 -C6D5 83338A34 -C6D6 83338A35 -C6D7 83338A36 -C6D8 83338A37 -C6D9 83338A38 -C6DA 83338A39 -C6DB 83338B30 -C6DC 83338B31 -C6DD 83338B32 -C6DE 83338B33 -C6DF 83338B34 -C6E0 83338B35 -C6E1 83338B36 -C6E2 83338B37 -C6E3 83338B38 -C6E4 83338B39 -C6E5 83338C30 -C6E6 83338C31 -C6E7 83338C32 -C6E8 83338C33 -C6E9 83338C34 -C6EA 83338C35 -C6EB 83338C36 -C6EC 83338C37 -C6ED 83338C38 -C6EE 83338C39 -C6EF 83338D30 -C6F0 83338D31 -C6F1 83338D32 -C6F2 83338D33 -C6F3 83338D34 -C6F4 83338D35 -C6F5 83338D36 -C6F6 83338D37 -C6F7 83338D38 -C6F8 83338D39 -C6F9 83338E30 -C6FA 83338E31 -C6FB 83338E32 -C6FC 83338E33 -C6FD 83338E34 -C6FE 83338E35 -C6FF 83338E36 -C700 83338E37 -C701 83338E38 -C702 83338E39 -C703 83338F30 -C704 83338F31 -C705 83338F32 -C706 83338F33 -C707 83338F34 -C708 83338F35 -C709 83338F36 -C70A 83338F37 -C70B 83338F38 -C70C 83338F39 -C70D 83339030 -C70E 83339031 -C70F 83339032 -C710 83339033 -C711 83339034 -C712 83339035 -C713 83339036 -C714 83339037 -C715 83339038 -C716 83339039 -C717 83339130 -C718 83339131 -C719 83339132 -C71A 83339133 -C71B 83339134 -C71C 83339135 -C71D 83339136 -C71E 83339137 -C71F 83339138 -C720 83339139 -C721 83339230 -C722 83339231 -C723 83339232 -C724 83339233 -C725 83339234 -C726 83339235 -C727 83339236 -C728 83339237 -C729 83339238 -C72A 83339239 -C72B 83339330 -C72C 83339331 -C72D 83339332 -C72E 83339333 -C72F 83339334 -C730 83339335 -C731 83339336 -C732 83339337 -C733 83339338 -C734 83339339 -C735 83339430 -C736 83339431 -C737 83339432 -C738 83339433 -C739 83339434 -C73A 83339435 -C73B 83339436 -C73C 83339437 -C73D 83339438 -C73E 83339439 -C73F 83339530 -C740 83339531 -C741 83339532 -C742 83339533 -C743 83339534 -C744 83339535 -C745 83339536 -C746 83339537 -C747 83339538 -C748 83339539 -C749 83339630 -C74A 83339631 -C74B 83339632 -C74C 83339633 -C74D 83339634 -C74E 83339635 -C74F 83339636 -C750 83339637 -C751 83339638 -C752 83339639 -C753 83339730 -C754 83339731 -C755 83339732 -C756 83339733 -C757 83339734 -C758 83339735 -C759 83339736 -C75A 83339737 -C75B 83339738 -C75C 83339739 -C75D 83339830 -C75E 83339831 -C75F 83339832 -C760 83339833 -C761 83339834 -C762 83339835 -C763 83339836 -C764 83339837 -C765 83339838 -C766 83339839 -C767 83339930 -C768 83339931 -C769 83339932 -C76A 83339933 -C76B 83339934 -C76C 83339935 -C76D 83339936 -C76E 83339937 -C76F 83339938 -C770 83339939 -C771 83339A30 -C772 83339A31 -C773 83339A32 -C774 83339A33 -C775 83339A34 -C776 83339A35 -C777 83339A36 -C778 83339A37 -C779 83339A38 -C77A 83339A39 -C77B 83339B30 -C77C 83339B31 -C77D 83339B32 -C77E 83339B33 -C77F 83339B34 -C780 83339B35 -C781 83339B36 -C782 83339B37 -C783 83339B38 -C784 83339B39 -C785 83339C30 -C786 83339C31 -C787 83339C32 -C788 83339C33 -C789 83339C34 -C78A 83339C35 -C78B 83339C36 -C78C 83339C37 -C78D 83339C38 -C78E 83339C39 -C78F 83339D30 -C790 83339D31 -C791 83339D32 -C792 83339D33 -C793 83339D34 -C794 83339D35 -C795 83339D36 -C796 83339D37 -C797 83339D38 -C798 83339D39 -C799 83339E30 -C79A 83339E31 -C79B 83339E32 -C79C 83339E33 -C79D 83339E34 -C79E 83339E35 -C79F 83339E36 -C7A0 83339E37 -C7A1 83339E38 -C7A2 83339E39 -C7A3 83339F30 -C7A4 83339F31 -C7A5 83339F32 -C7A6 83339F33 -C7A7 83339F34 -C7A8 83339F35 -C7A9 83339F36 -C7AA 83339F37 -C7AB 83339F38 -C7AC 83339F39 -C7AD 8333A030 -C7AE 8333A031 -C7AF 8333A032 -C7B0 8333A033 -C7B1 8333A034 -C7B2 8333A035 -C7B3 8333A036 -C7B4 8333A037 -C7B5 8333A038 -C7B6 8333A039 -C7B7 8333A130 -C7B8 8333A131 -C7B9 8333A132 -C7BA 8333A133 -C7BB 8333A134 -C7BC 8333A135 -C7BD 8333A136 -C7BE 8333A137 -C7BF 8333A138 -C7C0 8333A139 -C7C1 8333A230 -C7C2 8333A231 -C7C3 8333A232 -C7C4 8333A233 -C7C5 8333A234 -C7C6 8333A235 -C7C7 8333A236 -C7C8 8333A237 -C7C9 8333A238 -C7CA 8333A239 -C7CB 8333A330 -C7CC 8333A331 -C7CD 8333A332 -C7CE 8333A333 -C7CF 8333A334 -C7D0 8333A335 -C7D1 8333A336 -C7D2 8333A337 -C7D3 8333A338 -C7D4 8333A339 -C7D5 8333A430 -C7D6 8333A431 -C7D7 8333A432 -C7D8 8333A433 -C7D9 8333A434 -C7DA 8333A435 -C7DB 8333A436 -C7DC 8333A437 -C7DD 8333A438 -C7DE 8333A439 -C7DF 8333A530 -C7E0 8333A531 -C7E1 8333A532 -C7E2 8333A533 -C7E3 8333A534 -C7E4 8333A535 -C7E5 8333A536 -C7E6 8333A537 -C7E7 8333A538 -C7E8 8333A539 -C7E9 8333A630 -C7EA 8333A631 -C7EB 8333A632 -C7EC 8333A633 -C7ED 8333A634 -C7EE 8333A635 -C7EF 8333A636 -C7F0 8333A637 -C7F1 8333A638 -C7F2 8333A639 -C7F3 8333A730 -C7F4 8333A731 -C7F5 8333A732 -C7F6 8333A733 -C7F7 8333A734 -C7F8 8333A735 -C7F9 8333A736 -C7FA 8333A737 -C7FB 8333A738 -C7FC 8333A739 -C7FD 8333A830 -C7FE 8333A831 -C7FF 8333A832 -C800 8333A833 -C801 8333A834 -C802 8333A835 -C803 8333A836 -C804 8333A837 -C805 8333A838 -C806 8333A839 -C807 8333A930 -C808 8333A931 -C809 8333A932 -C80A 8333A933 -C80B 8333A934 -C80C 8333A935 -C80D 8333A936 -C80E 8333A937 -C80F 8333A938 -C810 8333A939 -C811 8333AA30 -C812 8333AA31 -C813 8333AA32 -C814 8333AA33 -C815 8333AA34 -C816 8333AA35 -C817 8333AA36 -C818 8333AA37 -C819 8333AA38 -C81A 8333AA39 -C81B 8333AB30 -C81C 8333AB31 -C81D 8333AB32 -C81E 8333AB33 -C81F 8333AB34 -C820 8333AB35 -C821 8333AB36 -C822 8333AB37 -C823 8333AB38 -C824 8333AB39 -C825 8333AC30 -C826 8333AC31 -C827 8333AC32 -C828 8333AC33 -C829 8333AC34 -C82A 8333AC35 -C82B 8333AC36 -C82C 8333AC37 -C82D 8333AC38 -C82E 8333AC39 -C82F 8333AD30 -C830 8333AD31 -C831 8333AD32 -C832 8333AD33 -C833 8333AD34 -C834 8333AD35 -C835 8333AD36 -C836 8333AD37 -C837 8333AD38 -C838 8333AD39 -C839 8333AE30 -C83A 8333AE31 -C83B 8333AE32 -C83C 8333AE33 -C83D 8333AE34 -C83E 8333AE35 -C83F 8333AE36 -C840 8333AE37 -C841 8333AE38 -C842 8333AE39 -C843 8333AF30 -C844 8333AF31 -C845 8333AF32 -C846 8333AF33 -C847 8333AF34 -C848 8333AF35 -C849 8333AF36 -C84A 8333AF37 -C84B 8333AF38 -C84C 8333AF39 -C84D 8333B030 -C84E 8333B031 -C84F 8333B032 -C850 8333B033 -C851 8333B034 -C852 8333B035 -C853 8333B036 -C854 8333B037 -C855 8333B038 -C856 8333B039 -C857 8333B130 -C858 8333B131 -C859 8333B132 -C85A 8333B133 -C85B 8333B134 -C85C 8333B135 -C85D 8333B136 -C85E 8333B137 -C85F 8333B138 -C860 8333B139 -C861 8333B230 -C862 8333B231 -C863 8333B232 -C864 8333B233 -C865 8333B234 -C866 8333B235 -C867 8333B236 -C868 8333B237 -C869 8333B238 -C86A 8333B239 -C86B 8333B330 -C86C 8333B331 -C86D 8333B332 -C86E 8333B333 -C86F 8333B334 -C870 8333B335 -C871 8333B336 -C872 8333B337 -C873 8333B338 -C874 8333B339 -C875 8333B430 -C876 8333B431 -C877 8333B432 -C878 8333B433 -C879 8333B434 -C87A 8333B435 -C87B 8333B436 -C87C 8333B437 -C87D 8333B438 -C87E 8333B439 -C87F 8333B530 -C880 8333B531 -C881 8333B532 -C882 8333B533 -C883 8333B534 -C884 8333B535 -C885 8333B536 -C886 8333B537 -C887 8333B538 -C888 8333B539 -C889 8333B630 -C88A 8333B631 -C88B 8333B632 -C88C 8333B633 -C88D 8333B634 -C88E 8333B635 -C88F 8333B636 -C890 8333B637 -C891 8333B638 -C892 8333B639 -C893 8333B730 -C894 8333B731 -C895 8333B732 -C896 8333B733 -C897 8333B734 -C898 8333B735 -C899 8333B736 -C89A 8333B737 -C89B 8333B738 -C89C 8333B739 -C89D 8333B830 -C89E 8333B831 -C89F 8333B832 -C8A0 8333B833 -C8A1 8333B834 -C8A2 8333B835 -C8A3 8333B836 -C8A4 8333B837 -C8A5 8333B838 -C8A6 8333B839 -C8A7 8333B930 -C8A8 8333B931 -C8A9 8333B932 -C8AA 8333B933 -C8AB 8333B934 -C8AC 8333B935 -C8AD 8333B936 -C8AE 8333B937 -C8AF 8333B938 -C8B0 8333B939 -C8B1 8333BA30 -C8B2 8333BA31 -C8B3 8333BA32 -C8B4 8333BA33 -C8B5 8333BA34 -C8B6 8333BA35 -C8B7 8333BA36 -C8B8 8333BA37 -C8B9 8333BA38 -C8BA 8333BA39 -C8BB 8333BB30 -C8BC 8333BB31 -C8BD 8333BB32 -C8BE 8333BB33 -C8BF 8333BB34 -C8C0 8333BB35 -C8C1 8333BB36 -C8C2 8333BB37 -C8C3 8333BB38 -C8C4 8333BB39 -C8C5 8333BC30 -C8C6 8333BC31 -C8C7 8333BC32 -C8C8 8333BC33 -C8C9 8333BC34 -C8CA 8333BC35 -C8CB 8333BC36 -C8CC 8333BC37 -C8CD 8333BC38 -C8CE 8333BC39 -C8CF 8333BD30 -C8D0 8333BD31 -C8D1 8333BD32 -C8D2 8333BD33 -C8D3 8333BD34 -C8D4 8333BD35 -C8D5 8333BD36 -C8D6 8333BD37 -C8D7 8333BD38 -C8D8 8333BD39 -C8D9 8333BE30 -C8DA 8333BE31 -C8DB 8333BE32 -C8DC 8333BE33 -C8DD 8333BE34 -C8DE 8333BE35 -C8DF 8333BE36 -C8E0 8333BE37 -C8E1 8333BE38 -C8E2 8333BE39 -C8E3 8333BF30 -C8E4 8333BF31 -C8E5 8333BF32 -C8E6 8333BF33 -C8E7 8333BF34 -C8E8 8333BF35 -C8E9 8333BF36 -C8EA 8333BF37 -C8EB 8333BF38 -C8EC 8333BF39 -C8ED 8333C030 -C8EE 8333C031 -C8EF 8333C032 -C8F0 8333C033 -C8F1 8333C034 -C8F2 8333C035 -C8F3 8333C036 -C8F4 8333C037 -C8F5 8333C038 -C8F6 8333C039 -C8F7 8333C130 -C8F8 8333C131 -C8F9 8333C132 -C8FA 8333C133 -C8FB 8333C134 -C8FC 8333C135 -C8FD 8333C136 -C8FE 8333C137 -C8FF 8333C138 -C900 8333C139 -C901 8333C230 -C902 8333C231 -C903 8333C232 -C904 8333C233 -C905 8333C234 -C906 8333C235 -C907 8333C236 -C908 8333C237 -C909 8333C238 -C90A 8333C239 -C90B 8333C330 -C90C 8333C331 -C90D 8333C332 -C90E 8333C333 -C90F 8333C334 -C910 8333C335 -C911 8333C336 -C912 8333C337 -C913 8333C338 -C914 8333C339 -C915 8333C430 -C916 8333C431 -C917 8333C432 -C918 8333C433 -C919 8333C434 -C91A 8333C435 -C91B 8333C436 -C91C 8333C437 -C91D 8333C438 -C91E 8333C439 -C91F 8333C530 -C920 8333C531 -C921 8333C532 -C922 8333C533 -C923 8333C534 -C924 8333C535 -C925 8333C536 -C926 8333C537 -C927 8333C538 -C928 8333C539 -C929 8333C630 -C92A 8333C631 -C92B 8333C632 -C92C 8333C633 -C92D 8333C634 -C92E 8333C635 -C92F 8333C636 -C930 8333C637 -C931 8333C638 -C932 8333C639 -C933 8333C730 -C934 8333C731 -C935 8333C732 -C936 8333C733 -C937 8333C734 -C938 8333C735 -C939 8333C736 -C93A 8333C737 -C93B 8333C738 -C93C 8333C739 -C93D 8333C830 -C93E 8333C831 -C93F 8333C832 -C940 8333C833 -C941 8333C834 -C942 8333C835 -C943 8333C836 -C944 8333C837 -C945 8333C838 -C946 8333C839 -C947 8333C930 -C948 8333C931 -C949 8333C932 -C94A 8333C933 -C94B 8333C934 -C94C 8333C935 -C94D 8333C936 -C94E 8333C937 -C94F 8333C938 -C950 8333C939 -C951 8333CA30 -C952 8333CA31 -C953 8333CA32 -C954 8333CA33 -C955 8333CA34 -C956 8333CA35 -C957 8333CA36 -C958 8333CA37 -C959 8333CA38 -C95A 8333CA39 -C95B 8333CB30 -C95C 8333CB31 -C95D 8333CB32 -C95E 8333CB33 -C95F 8333CB34 -C960 8333CB35 -C961 8333CB36 -C962 8333CB37 -C963 8333CB38 -C964 8333CB39 -C965 8333CC30 -C966 8333CC31 -C967 8333CC32 -C968 8333CC33 -C969 8333CC34 -C96A 8333CC35 -C96B 8333CC36 -C96C 8333CC37 -C96D 8333CC38 -C96E 8333CC39 -C96F 8333CD30 -C970 8333CD31 -C971 8333CD32 -C972 8333CD33 -C973 8333CD34 -C974 8333CD35 -C975 8333CD36 -C976 8333CD37 -C977 8333CD38 -C978 8333CD39 -C979 8333CE30 -C97A 8333CE31 -C97B 8333CE32 -C97C 8333CE33 -C97D 8333CE34 -C97E 8333CE35 -C97F 8333CE36 -C980 8333CE37 -C981 8333CE38 -C982 8333CE39 -C983 8333CF30 -C984 8333CF31 -C985 8333CF32 -C986 8333CF33 -C987 8333CF34 -C988 8333CF35 -C989 8333CF36 -C98A 8333CF37 -C98B 8333CF38 -C98C 8333CF39 -C98D 8333D030 -C98E 8333D031 -C98F 8333D032 -C990 8333D033 -C991 8333D034 -C992 8333D035 -C993 8333D036 -C994 8333D037 -C995 8333D038 -C996 8333D039 -C997 8333D130 -C998 8333D131 -C999 8333D132 -C99A 8333D133 -C99B 8333D134 -C99C 8333D135 -C99D 8333D136 -C99E 8333D137 -C99F 8333D138 -C9A0 8333D139 -C9A1 8333D230 -C9A2 8333D231 -C9A3 8333D232 -C9A4 8333D233 -C9A5 8333D234 -C9A6 8333D235 -C9A7 8333D236 -C9A8 8333D237 -C9A9 8333D238 -C9AA 8333D239 -C9AB 8333D330 -C9AC 8333D331 -C9AD 8333D332 -C9AE 8333D333 -C9AF 8333D334 -C9B0 8333D335 -C9B1 8333D336 -C9B2 8333D337 -C9B3 8333D338 -C9B4 8333D339 -C9B5 8333D430 -C9B6 8333D431 -C9B7 8333D432 -C9B8 8333D433 -C9B9 8333D434 -C9BA 8333D435 -C9BB 8333D436 -C9BC 8333D437 -C9BD 8333D438 -C9BE 8333D439 -C9BF 8333D530 -C9C0 8333D531 -C9C1 8333D532 -C9C2 8333D533 -C9C3 8333D534 -C9C4 8333D535 -C9C5 8333D536 -C9C6 8333D537 -C9C7 8333D538 -C9C8 8333D539 -C9C9 8333D630 -C9CA 8333D631 -C9CB 8333D632 -C9CC 8333D633 -C9CD 8333D634 -C9CE 8333D635 -C9CF 8333D636 -C9D0 8333D637 -C9D1 8333D638 -C9D2 8333D639 -C9D3 8333D730 -C9D4 8333D731 -C9D5 8333D732 -C9D6 8333D733 -C9D7 8333D734 -C9D8 8333D735 -C9D9 8333D736 -C9DA 8333D737 -C9DB 8333D738 -C9DC 8333D739 -C9DD 8333D830 -C9DE 8333D831 -C9DF 8333D832 -C9E0 8333D833 -C9E1 8333D834 -C9E2 8333D835 -C9E3 8333D836 -C9E4 8333D837 -C9E5 8333D838 -C9E6 8333D839 -C9E7 8333D930 -C9E8 8333D931 -C9E9 8333D932 -C9EA 8333D933 -C9EB 8333D934 -C9EC 8333D935 -C9ED 8333D936 -C9EE 8333D937 -C9EF 8333D938 -C9F0 8333D939 -C9F1 8333DA30 -C9F2 8333DA31 -C9F3 8333DA32 -C9F4 8333DA33 -C9F5 8333DA34 -C9F6 8333DA35 -C9F7 8333DA36 -C9F8 8333DA37 -C9F9 8333DA38 -C9FA 8333DA39 -C9FB 8333DB30 -C9FC 8333DB31 -C9FD 8333DB32 -C9FE 8333DB33 -C9FF 8333DB34 -CA00 8333DB35 -CA01 8333DB36 -CA02 8333DB37 -CA03 8333DB38 -CA04 8333DB39 -CA05 8333DC30 -CA06 8333DC31 -CA07 8333DC32 -CA08 8333DC33 -CA09 8333DC34 -CA0A 8333DC35 -CA0B 8333DC36 -CA0C 8333DC37 -CA0D 8333DC38 -CA0E 8333DC39 -CA0F 8333DD30 -CA10 8333DD31 -CA11 8333DD32 -CA12 8333DD33 -CA13 8333DD34 -CA14 8333DD35 -CA15 8333DD36 -CA16 8333DD37 -CA17 8333DD38 -CA18 8333DD39 -CA19 8333DE30 -CA1A 8333DE31 -CA1B 8333DE32 -CA1C 8333DE33 -CA1D 8333DE34 -CA1E 8333DE35 -CA1F 8333DE36 -CA20 8333DE37 -CA21 8333DE38 -CA22 8333DE39 -CA23 8333DF30 -CA24 8333DF31 -CA25 8333DF32 -CA26 8333DF33 -CA27 8333DF34 -CA28 8333DF35 -CA29 8333DF36 -CA2A 8333DF37 -CA2B 8333DF38 -CA2C 8333DF39 -CA2D 8333E030 -CA2E 8333E031 -CA2F 8333E032 -CA30 8333E033 -CA31 8333E034 -CA32 8333E035 -CA33 8333E036 -CA34 8333E037 -CA35 8333E038 -CA36 8333E039 -CA37 8333E130 -CA38 8333E131 -CA39 8333E132 -CA3A 8333E133 -CA3B 8333E134 -CA3C 8333E135 -CA3D 8333E136 -CA3E 8333E137 -CA3F 8333E138 -CA40 8333E139 -CA41 8333E230 -CA42 8333E231 -CA43 8333E232 -CA44 8333E233 -CA45 8333E234 -CA46 8333E235 -CA47 8333E236 -CA48 8333E237 -CA49 8333E238 -CA4A 8333E239 -CA4B 8333E330 -CA4C 8333E331 -CA4D 8333E332 -CA4E 8333E333 -CA4F 8333E334 -CA50 8333E335 -CA51 8333E336 -CA52 8333E337 -CA53 8333E338 -CA54 8333E339 -CA55 8333E430 -CA56 8333E431 -CA57 8333E432 -CA58 8333E433 -CA59 8333E434 -CA5A 8333E435 -CA5B 8333E436 -CA5C 8333E437 -CA5D 8333E438 -CA5E 8333E439 -CA5F 8333E530 -CA60 8333E531 -CA61 8333E532 -CA62 8333E533 -CA63 8333E534 -CA64 8333E535 -CA65 8333E536 -CA66 8333E537 -CA67 8333E538 -CA68 8333E539 -CA69 8333E630 -CA6A 8333E631 -CA6B 8333E632 -CA6C 8333E633 -CA6D 8333E634 -CA6E 8333E635 -CA6F 8333E636 -CA70 8333E637 -CA71 8333E638 -CA72 8333E639 -CA73 8333E730 -CA74 8333E731 -CA75 8333E732 -CA76 8333E733 -CA77 8333E734 -CA78 8333E735 -CA79 8333E736 -CA7A 8333E737 -CA7B 8333E738 -CA7C 8333E739 -CA7D 8333E830 -CA7E 8333E831 -CA7F 8333E832 -CA80 8333E833 -CA81 8333E834 -CA82 8333E835 -CA83 8333E836 -CA84 8333E837 -CA85 8333E838 -CA86 8333E839 -CA87 8333E930 -CA88 8333E931 -CA89 8333E932 -CA8A 8333E933 -CA8B 8333E934 -CA8C 8333E935 -CA8D 8333E936 -CA8E 8333E937 -CA8F 8333E938 -CA90 8333E939 -CA91 8333EA30 -CA92 8333EA31 -CA93 8333EA32 -CA94 8333EA33 -CA95 8333EA34 -CA96 8333EA35 -CA97 8333EA36 -CA98 8333EA37 -CA99 8333EA38 -CA9A 8333EA39 -CA9B 8333EB30 -CA9C 8333EB31 -CA9D 8333EB32 -CA9E 8333EB33 -CA9F 8333EB34 -CAA0 8333EB35 -CAA1 8333EB36 -CAA2 8333EB37 -CAA3 8333EB38 -CAA4 8333EB39 -CAA5 8333EC30 -CAA6 8333EC31 -CAA7 8333EC32 -CAA8 8333EC33 -CAA9 8333EC34 -CAAA 8333EC35 -CAAB 8333EC36 -CAAC 8333EC37 -CAAD 8333EC38 -CAAE 8333EC39 -CAAF 8333ED30 -CAB0 8333ED31 -CAB1 8333ED32 -CAB2 8333ED33 -CAB3 8333ED34 -CAB4 8333ED35 -CAB5 8333ED36 -CAB6 8333ED37 -CAB7 8333ED38 -CAB8 8333ED39 -CAB9 8333EE30 -CABA 8333EE31 -CABB 8333EE32 -CABC 8333EE33 -CABD 8333EE34 -CABE 8333EE35 -CABF 8333EE36 -CAC0 8333EE37 -CAC1 8333EE38 -CAC2 8333EE39 -CAC3 8333EF30 -CAC4 8333EF31 -CAC5 8333EF32 -CAC6 8333EF33 -CAC7 8333EF34 -CAC8 8333EF35 -CAC9 8333EF36 -CACA 8333EF37 -CACB 8333EF38 -CACC 8333EF39 -CACD 8333F030 -CACE 8333F031 -CACF 8333F032 -CAD0 8333F033 -CAD1 8333F034 -CAD2 8333F035 -CAD3 8333F036 -CAD4 8333F037 -CAD5 8333F038 -CAD6 8333F039 -CAD7 8333F130 -CAD8 8333F131 -CAD9 8333F132 -CADA 8333F133 -CADB 8333F134 -CADC 8333F135 -CADD 8333F136 -CADE 8333F137 -CADF 8333F138 -CAE0 8333F139 -CAE1 8333F230 -CAE2 8333F231 -CAE3 8333F232 -CAE4 8333F233 -CAE5 8333F234 -CAE6 8333F235 -CAE7 8333F236 -CAE8 8333F237 -CAE9 8333F238 -CAEA 8333F239 -CAEB 8333F330 -CAEC 8333F331 -CAED 8333F332 -CAEE 8333F333 -CAEF 8333F334 -CAF0 8333F335 -CAF1 8333F336 -CAF2 8333F337 -CAF3 8333F338 -CAF4 8333F339 -CAF5 8333F430 -CAF6 8333F431 -CAF7 8333F432 -CAF8 8333F433 -CAF9 8333F434 -CAFA 8333F435 -CAFB 8333F436 -CAFC 8333F437 -CAFD 8333F438 -CAFE 8333F439 -CAFF 8333F530 -CB00 8333F531 -CB01 8333F532 -CB02 8333F533 -CB03 8333F534 -CB04 8333F535 -CB05 8333F536 -CB06 8333F537 -CB07 8333F538 -CB08 8333F539 -CB09 8333F630 -CB0A 8333F631 -CB0B 8333F632 -CB0C 8333F633 -CB0D 8333F634 -CB0E 8333F635 -CB0F 8333F636 -CB10 8333F637 -CB11 8333F638 -CB12 8333F639 -CB13 8333F730 -CB14 8333F731 -CB15 8333F732 -CB16 8333F733 -CB17 8333F734 -CB18 8333F735 -CB19 8333F736 -CB1A 8333F737 -CB1B 8333F738 -CB1C 8333F739 -CB1D 8333F830 -CB1E 8333F831 -CB1F 8333F832 -CB20 8333F833 -CB21 8333F834 -CB22 8333F835 -CB23 8333F836 -CB24 8333F837 -CB25 8333F838 -CB26 8333F839 -CB27 8333F930 -CB28 8333F931 -CB29 8333F932 -CB2A 8333F933 -CB2B 8333F934 -CB2C 8333F935 -CB2D 8333F936 -CB2E 8333F937 -CB2F 8333F938 -CB30 8333F939 -CB31 8333FA30 -CB32 8333FA31 -CB33 8333FA32 -CB34 8333FA33 -CB35 8333FA34 -CB36 8333FA35 -CB37 8333FA36 -CB38 8333FA37 -CB39 8333FA38 -CB3A 8333FA39 -CB3B 8333FB30 -CB3C 8333FB31 -CB3D 8333FB32 -CB3E 8333FB33 -CB3F 8333FB34 -CB40 8333FB35 -CB41 8333FB36 -CB42 8333FB37 -CB43 8333FB38 -CB44 8333FB39 -CB45 8333FC30 -CB46 8333FC31 -CB47 8333FC32 -CB48 8333FC33 -CB49 8333FC34 -CB4A 8333FC35 -CB4B 8333FC36 -CB4C 8333FC37 -CB4D 8333FC38 -CB4E 8333FC39 -CB4F 8333FD30 -CB50 8333FD31 -CB51 8333FD32 -CB52 8333FD33 -CB53 8333FD34 -CB54 8333FD35 -CB55 8333FD36 -CB56 8333FD37 -CB57 8333FD38 -CB58 8333FD39 -CB59 8333FE30 -CB5A 8333FE31 -CB5B 8333FE32 -CB5C 8333FE33 -CB5D 8333FE34 -CB5E 8333FE35 -CB5F 8333FE36 -CB60 8333FE37 -CB61 8333FE38 -CB62 8333FE39 -CB63 83348130 -CB64 83348131 -CB65 83348132 -CB66 83348133 -CB67 83348134 -CB68 83348135 -CB69 83348136 -CB6A 83348137 -CB6B 83348138 -CB6C 83348139 -CB6D 83348230 -CB6E 83348231 -CB6F 83348232 -CB70 83348233 -CB71 83348234 -CB72 83348235 -CB73 83348236 -CB74 83348237 -CB75 83348238 -CB76 83348239 -CB77 83348330 -CB78 83348331 -CB79 83348332 -CB7A 83348333 -CB7B 83348334 -CB7C 83348335 -CB7D 83348336 -CB7E 83348337 -CB7F 83348338 -CB80 83348339 -CB81 83348430 -CB82 83348431 -CB83 83348432 -CB84 83348433 -CB85 83348434 -CB86 83348435 -CB87 83348436 -CB88 83348437 -CB89 83348438 -CB8A 83348439 -CB8B 83348530 -CB8C 83348531 -CB8D 83348532 -CB8E 83348533 -CB8F 83348534 -CB90 83348535 -CB91 83348536 -CB92 83348537 -CB93 83348538 -CB94 83348539 -CB95 83348630 -CB96 83348631 -CB97 83348632 -CB98 83348633 -CB99 83348634 -CB9A 83348635 -CB9B 83348636 -CB9C 83348637 -CB9D 83348638 -CB9E 83348639 -CB9F 83348730 -CBA0 83348731 -CBA1 83348732 -CBA2 83348733 -CBA3 83348734 -CBA4 83348735 -CBA5 83348736 -CBA6 83348737 -CBA7 83348738 -CBA8 83348739 -CBA9 83348830 -CBAA 83348831 -CBAB 83348832 -CBAC 83348833 -CBAD 83348834 -CBAE 83348835 -CBAF 83348836 -CBB0 83348837 -CBB1 83348838 -CBB2 83348839 -CBB3 83348930 -CBB4 83348931 -CBB5 83348932 -CBB6 83348933 -CBB7 83348934 -CBB8 83348935 -CBB9 83348936 -CBBA 83348937 -CBBB 83348938 -CBBC 83348939 -CBBD 83348A30 -CBBE 83348A31 -CBBF 83348A32 -CBC0 83348A33 -CBC1 83348A34 -CBC2 83348A35 -CBC3 83348A36 -CBC4 83348A37 -CBC5 83348A38 -CBC6 83348A39 -CBC7 83348B30 -CBC8 83348B31 -CBC9 83348B32 -CBCA 83348B33 -CBCB 83348B34 -CBCC 83348B35 -CBCD 83348B36 -CBCE 83348B37 -CBCF 83348B38 -CBD0 83348B39 -CBD1 83348C30 -CBD2 83348C31 -CBD3 83348C32 -CBD4 83348C33 -CBD5 83348C34 -CBD6 83348C35 -CBD7 83348C36 -CBD8 83348C37 -CBD9 83348C38 -CBDA 83348C39 -CBDB 83348D30 -CBDC 83348D31 -CBDD 83348D32 -CBDE 83348D33 -CBDF 83348D34 -CBE0 83348D35 -CBE1 83348D36 -CBE2 83348D37 -CBE3 83348D38 -CBE4 83348D39 -CBE5 83348E30 -CBE6 83348E31 -CBE7 83348E32 -CBE8 83348E33 -CBE9 83348E34 -CBEA 83348E35 -CBEB 83348E36 -CBEC 83348E37 -CBED 83348E38 -CBEE 83348E39 -CBEF 83348F30 -CBF0 83348F31 -CBF1 83348F32 -CBF2 83348F33 -CBF3 83348F34 -CBF4 83348F35 -CBF5 83348F36 -CBF6 83348F37 -CBF7 83348F38 -CBF8 83348F39 -CBF9 83349030 -CBFA 83349031 -CBFB 83349032 -CBFC 83349033 -CBFD 83349034 -CBFE 83349035 -CBFF 83349036 -CC00 83349037 -CC01 83349038 -CC02 83349039 -CC03 83349130 -CC04 83349131 -CC05 83349132 -CC06 83349133 -CC07 83349134 -CC08 83349135 -CC09 83349136 -CC0A 83349137 -CC0B 83349138 -CC0C 83349139 -CC0D 83349230 -CC0E 83349231 -CC0F 83349232 -CC10 83349233 -CC11 83349234 -CC12 83349235 -CC13 83349236 -CC14 83349237 -CC15 83349238 -CC16 83349239 -CC17 83349330 -CC18 83349331 -CC19 83349332 -CC1A 83349333 -CC1B 83349334 -CC1C 83349335 -CC1D 83349336 -CC1E 83349337 -CC1F 83349338 -CC20 83349339 -CC21 83349430 -CC22 83349431 -CC23 83349432 -CC24 83349433 -CC25 83349434 -CC26 83349435 -CC27 83349436 -CC28 83349437 -CC29 83349438 -CC2A 83349439 -CC2B 83349530 -CC2C 83349531 -CC2D 83349532 -CC2E 83349533 -CC2F 83349534 -CC30 83349535 -CC31 83349536 -CC32 83349537 -CC33 83349538 -CC34 83349539 -CC35 83349630 -CC36 83349631 -CC37 83349632 -CC38 83349633 -CC39 83349634 -CC3A 83349635 -CC3B 83349636 -CC3C 83349637 -CC3D 83349638 -CC3E 83349639 -CC3F 83349730 -CC40 83349731 -CC41 83349732 -CC42 83349733 -CC43 83349734 -CC44 83349735 -CC45 83349736 -CC46 83349737 -CC47 83349738 -CC48 83349739 -CC49 83349830 -CC4A 83349831 -CC4B 83349832 -CC4C 83349833 -CC4D 83349834 -CC4E 83349835 -CC4F 83349836 -CC50 83349837 -CC51 83349838 -CC52 83349839 -CC53 83349930 -CC54 83349931 -CC55 83349932 -CC56 83349933 -CC57 83349934 -CC58 83349935 -CC59 83349936 -CC5A 83349937 -CC5B 83349938 -CC5C 83349939 -CC5D 83349A30 -CC5E 83349A31 -CC5F 83349A32 -CC60 83349A33 -CC61 83349A34 -CC62 83349A35 -CC63 83349A36 -CC64 83349A37 -CC65 83349A38 -CC66 83349A39 -CC67 83349B30 -CC68 83349B31 -CC69 83349B32 -CC6A 83349B33 -CC6B 83349B34 -CC6C 83349B35 -CC6D 83349B36 -CC6E 83349B37 -CC6F 83349B38 -CC70 83349B39 -CC71 83349C30 -CC72 83349C31 -CC73 83349C32 -CC74 83349C33 -CC75 83349C34 -CC76 83349C35 -CC77 83349C36 -CC78 83349C37 -CC79 83349C38 -CC7A 83349C39 -CC7B 83349D30 -CC7C 83349D31 -CC7D 83349D32 -CC7E 83349D33 -CC7F 83349D34 -CC80 83349D35 -CC81 83349D36 -CC82 83349D37 -CC83 83349D38 -CC84 83349D39 -CC85 83349E30 -CC86 83349E31 -CC87 83349E32 -CC88 83349E33 -CC89 83349E34 -CC8A 83349E35 -CC8B 83349E36 -CC8C 83349E37 -CC8D 83349E38 -CC8E 83349E39 -CC8F 83349F30 -CC90 83349F31 -CC91 83349F32 -CC92 83349F33 -CC93 83349F34 -CC94 83349F35 -CC95 83349F36 -CC96 83349F37 -CC97 83349F38 -CC98 83349F39 -CC99 8334A030 -CC9A 8334A031 -CC9B 8334A032 -CC9C 8334A033 -CC9D 8334A034 -CC9E 8334A035 -CC9F 8334A036 -CCA0 8334A037 -CCA1 8334A038 -CCA2 8334A039 -CCA3 8334A130 -CCA4 8334A131 -CCA5 8334A132 -CCA6 8334A133 -CCA7 8334A134 -CCA8 8334A135 -CCA9 8334A136 -CCAA 8334A137 -CCAB 8334A138 -CCAC 8334A139 -CCAD 8334A230 -CCAE 8334A231 -CCAF 8334A232 -CCB0 8334A233 -CCB1 8334A234 -CCB2 8334A235 -CCB3 8334A236 -CCB4 8334A237 -CCB5 8334A238 -CCB6 8334A239 -CCB7 8334A330 -CCB8 8334A331 -CCB9 8334A332 -CCBA 8334A333 -CCBB 8334A334 -CCBC 8334A335 -CCBD 8334A336 -CCBE 8334A337 -CCBF 8334A338 -CCC0 8334A339 -CCC1 8334A430 -CCC2 8334A431 -CCC3 8334A432 -CCC4 8334A433 -CCC5 8334A434 -CCC6 8334A435 -CCC7 8334A436 -CCC8 8334A437 -CCC9 8334A438 -CCCA 8334A439 -CCCB 8334A530 -CCCC 8334A531 -CCCD 8334A532 -CCCE 8334A533 -CCCF 8334A534 -CCD0 8334A535 -CCD1 8334A536 -CCD2 8334A537 -CCD3 8334A538 -CCD4 8334A539 -CCD5 8334A630 -CCD6 8334A631 -CCD7 8334A632 -CCD8 8334A633 -CCD9 8334A634 -CCDA 8334A635 -CCDB 8334A636 -CCDC 8334A637 -CCDD 8334A638 -CCDE 8334A639 -CCDF 8334A730 -CCE0 8334A731 -CCE1 8334A732 -CCE2 8334A733 -CCE3 8334A734 -CCE4 8334A735 -CCE5 8334A736 -CCE6 8334A737 -CCE7 8334A738 -CCE8 8334A739 -CCE9 8334A830 -CCEA 8334A831 -CCEB 8334A832 -CCEC 8334A833 -CCED 8334A834 -CCEE 8334A835 -CCEF 8334A836 -CCF0 8334A837 -CCF1 8334A838 -CCF2 8334A839 -CCF3 8334A930 -CCF4 8334A931 -CCF5 8334A932 -CCF6 8334A933 -CCF7 8334A934 -CCF8 8334A935 -CCF9 8334A936 -CCFA 8334A937 -CCFB 8334A938 -CCFC 8334A939 -CCFD 8334AA30 -CCFE 8334AA31 -CCFF 8334AA32 -CD00 8334AA33 -CD01 8334AA34 -CD02 8334AA35 -CD03 8334AA36 -CD04 8334AA37 -CD05 8334AA38 -CD06 8334AA39 -CD07 8334AB30 -CD08 8334AB31 -CD09 8334AB32 -CD0A 8334AB33 -CD0B 8334AB34 -CD0C 8334AB35 -CD0D 8334AB36 -CD0E 8334AB37 -CD0F 8334AB38 -CD10 8334AB39 -CD11 8334AC30 -CD12 8334AC31 -CD13 8334AC32 -CD14 8334AC33 -CD15 8334AC34 -CD16 8334AC35 -CD17 8334AC36 -CD18 8334AC37 -CD19 8334AC38 -CD1A 8334AC39 -CD1B 8334AD30 -CD1C 8334AD31 -CD1D 8334AD32 -CD1E 8334AD33 -CD1F 8334AD34 -CD20 8334AD35 -CD21 8334AD36 -CD22 8334AD37 -CD23 8334AD38 -CD24 8334AD39 -CD25 8334AE30 -CD26 8334AE31 -CD27 8334AE32 -CD28 8334AE33 -CD29 8334AE34 -CD2A 8334AE35 -CD2B 8334AE36 -CD2C 8334AE37 -CD2D 8334AE38 -CD2E 8334AE39 -CD2F 8334AF30 -CD30 8334AF31 -CD31 8334AF32 -CD32 8334AF33 -CD33 8334AF34 -CD34 8334AF35 -CD35 8334AF36 -CD36 8334AF37 -CD37 8334AF38 -CD38 8334AF39 -CD39 8334B030 -CD3A 8334B031 -CD3B 8334B032 -CD3C 8334B033 -CD3D 8334B034 -CD3E 8334B035 -CD3F 8334B036 -CD40 8334B037 -CD41 8334B038 -CD42 8334B039 -CD43 8334B130 -CD44 8334B131 -CD45 8334B132 -CD46 8334B133 -CD47 8334B134 -CD48 8334B135 -CD49 8334B136 -CD4A 8334B137 -CD4B 8334B138 -CD4C 8334B139 -CD4D 8334B230 -CD4E 8334B231 -CD4F 8334B232 -CD50 8334B233 -CD51 8334B234 -CD52 8334B235 -CD53 8334B236 -CD54 8334B237 -CD55 8334B238 -CD56 8334B239 -CD57 8334B330 -CD58 8334B331 -CD59 8334B332 -CD5A 8334B333 -CD5B 8334B334 -CD5C 8334B335 -CD5D 8334B336 -CD5E 8334B337 -CD5F 8334B338 -CD60 8334B339 -CD61 8334B430 -CD62 8334B431 -CD63 8334B432 -CD64 8334B433 -CD65 8334B434 -CD66 8334B435 -CD67 8334B436 -CD68 8334B437 -CD69 8334B438 -CD6A 8334B439 -CD6B 8334B530 -CD6C 8334B531 -CD6D 8334B532 -CD6E 8334B533 -CD6F 8334B534 -CD70 8334B535 -CD71 8334B536 -CD72 8334B537 -CD73 8334B538 -CD74 8334B539 -CD75 8334B630 -CD76 8334B631 -CD77 8334B632 -CD78 8334B633 -CD79 8334B634 -CD7A 8334B635 -CD7B 8334B636 -CD7C 8334B637 -CD7D 8334B638 -CD7E 8334B639 -CD7F 8334B730 -CD80 8334B731 -CD81 8334B732 -CD82 8334B733 -CD83 8334B734 -CD84 8334B735 -CD85 8334B736 -CD86 8334B737 -CD87 8334B738 -CD88 8334B739 -CD89 8334B830 -CD8A 8334B831 -CD8B 8334B832 -CD8C 8334B833 -CD8D 8334B834 -CD8E 8334B835 -CD8F 8334B836 -CD90 8334B837 -CD91 8334B838 -CD92 8334B839 -CD93 8334B930 -CD94 8334B931 -CD95 8334B932 -CD96 8334B933 -CD97 8334B934 -CD98 8334B935 -CD99 8334B936 -CD9A 8334B937 -CD9B 8334B938 -CD9C 8334B939 -CD9D 8334BA30 -CD9E 8334BA31 -CD9F 8334BA32 -CDA0 8334BA33 -CDA1 8334BA34 -CDA2 8334BA35 -CDA3 8334BA36 -CDA4 8334BA37 -CDA5 8334BA38 -CDA6 8334BA39 -CDA7 8334BB30 -CDA8 8334BB31 -CDA9 8334BB32 -CDAA 8334BB33 -CDAB 8334BB34 -CDAC 8334BB35 -CDAD 8334BB36 -CDAE 8334BB37 -CDAF 8334BB38 -CDB0 8334BB39 -CDB1 8334BC30 -CDB2 8334BC31 -CDB3 8334BC32 -CDB4 8334BC33 -CDB5 8334BC34 -CDB6 8334BC35 -CDB7 8334BC36 -CDB8 8334BC37 -CDB9 8334BC38 -CDBA 8334BC39 -CDBB 8334BD30 -CDBC 8334BD31 -CDBD 8334BD32 -CDBE 8334BD33 -CDBF 8334BD34 -CDC0 8334BD35 -CDC1 8334BD36 -CDC2 8334BD37 -CDC3 8334BD38 -CDC4 8334BD39 -CDC5 8334BE30 -CDC6 8334BE31 -CDC7 8334BE32 -CDC8 8334BE33 -CDC9 8334BE34 -CDCA 8334BE35 -CDCB 8334BE36 -CDCC 8334BE37 -CDCD 8334BE38 -CDCE 8334BE39 -CDCF 8334BF30 -CDD0 8334BF31 -CDD1 8334BF32 -CDD2 8334BF33 -CDD3 8334BF34 -CDD4 8334BF35 -CDD5 8334BF36 -CDD6 8334BF37 -CDD7 8334BF38 -CDD8 8334BF39 -CDD9 8334C030 -CDDA 8334C031 -CDDB 8334C032 -CDDC 8334C033 -CDDD 8334C034 -CDDE 8334C035 -CDDF 8334C036 -CDE0 8334C037 -CDE1 8334C038 -CDE2 8334C039 -CDE3 8334C130 -CDE4 8334C131 -CDE5 8334C132 -CDE6 8334C133 -CDE7 8334C134 -CDE8 8334C135 -CDE9 8334C136 -CDEA 8334C137 -CDEB 8334C138 -CDEC 8334C139 -CDED 8334C230 -CDEE 8334C231 -CDEF 8334C232 -CDF0 8334C233 -CDF1 8334C234 -CDF2 8334C235 -CDF3 8334C236 -CDF4 8334C237 -CDF5 8334C238 -CDF6 8334C239 -CDF7 8334C330 -CDF8 8334C331 -CDF9 8334C332 -CDFA 8334C333 -CDFB 8334C334 -CDFC 8334C335 -CDFD 8334C336 -CDFE 8334C337 -CDFF 8334C338 -CE00 8334C339 -CE01 8334C430 -CE02 8334C431 -CE03 8334C432 -CE04 8334C433 -CE05 8334C434 -CE06 8334C435 -CE07 8334C436 -CE08 8334C437 -CE09 8334C438 -CE0A 8334C439 -CE0B 8334C530 -CE0C 8334C531 -CE0D 8334C532 -CE0E 8334C533 -CE0F 8334C534 -CE10 8334C535 -CE11 8334C536 -CE12 8334C537 -CE13 8334C538 -CE14 8334C539 -CE15 8334C630 -CE16 8334C631 -CE17 8334C632 -CE18 8334C633 -CE19 8334C634 -CE1A 8334C635 -CE1B 8334C636 -CE1C 8334C637 -CE1D 8334C638 -CE1E 8334C639 -CE1F 8334C730 -CE20 8334C731 -CE21 8334C732 -CE22 8334C733 -CE23 8334C734 -CE24 8334C735 -CE25 8334C736 -CE26 8334C737 -CE27 8334C738 -CE28 8334C739 -CE29 8334C830 -CE2A 8334C831 -CE2B 8334C832 -CE2C 8334C833 -CE2D 8334C834 -CE2E 8334C835 -CE2F 8334C836 -CE30 8334C837 -CE31 8334C838 -CE32 8334C839 -CE33 8334C930 -CE34 8334C931 -CE35 8334C932 -CE36 8334C933 -CE37 8334C934 -CE38 8334C935 -CE39 8334C936 -CE3A 8334C937 -CE3B 8334C938 -CE3C 8334C939 -CE3D 8334CA30 -CE3E 8334CA31 -CE3F 8334CA32 -CE40 8334CA33 -CE41 8334CA34 -CE42 8334CA35 -CE43 8334CA36 -CE44 8334CA37 -CE45 8334CA38 -CE46 8334CA39 -CE47 8334CB30 -CE48 8334CB31 -CE49 8334CB32 -CE4A 8334CB33 -CE4B 8334CB34 -CE4C 8334CB35 -CE4D 8334CB36 -CE4E 8334CB37 -CE4F 8334CB38 -CE50 8334CB39 -CE51 8334CC30 -CE52 8334CC31 -CE53 8334CC32 -CE54 8334CC33 -CE55 8334CC34 -CE56 8334CC35 -CE57 8334CC36 -CE58 8334CC37 -CE59 8334CC38 -CE5A 8334CC39 -CE5B 8334CD30 -CE5C 8334CD31 -CE5D 8334CD32 -CE5E 8334CD33 -CE5F 8334CD34 -CE60 8334CD35 -CE61 8334CD36 -CE62 8334CD37 -CE63 8334CD38 -CE64 8334CD39 -CE65 8334CE30 -CE66 8334CE31 -CE67 8334CE32 -CE68 8334CE33 -CE69 8334CE34 -CE6A 8334CE35 -CE6B 8334CE36 -CE6C 8334CE37 -CE6D 8334CE38 -CE6E 8334CE39 -CE6F 8334CF30 -CE70 8334CF31 -CE71 8334CF32 -CE72 8334CF33 -CE73 8334CF34 -CE74 8334CF35 -CE75 8334CF36 -CE76 8334CF37 -CE77 8334CF38 -CE78 8334CF39 -CE79 8334D030 -CE7A 8334D031 -CE7B 8334D032 -CE7C 8334D033 -CE7D 8334D034 -CE7E 8334D035 -CE7F 8334D036 -CE80 8334D037 -CE81 8334D038 -CE82 8334D039 -CE83 8334D130 -CE84 8334D131 -CE85 8334D132 -CE86 8334D133 -CE87 8334D134 -CE88 8334D135 -CE89 8334D136 -CE8A 8334D137 -CE8B 8334D138 -CE8C 8334D139 -CE8D 8334D230 -CE8E 8334D231 -CE8F 8334D232 -CE90 8334D233 -CE91 8334D234 -CE92 8334D235 -CE93 8334D236 -CE94 8334D237 -CE95 8334D238 -CE96 8334D239 -CE97 8334D330 -CE98 8334D331 -CE99 8334D332 -CE9A 8334D333 -CE9B 8334D334 -CE9C 8334D335 -CE9D 8334D336 -CE9E 8334D337 -CE9F 8334D338 -CEA0 8334D339 -CEA1 8334D430 -CEA2 8334D431 -CEA3 8334D432 -CEA4 8334D433 -CEA5 8334D434 -CEA6 8334D435 -CEA7 8334D436 -CEA8 8334D437 -CEA9 8334D438 -CEAA 8334D439 -CEAB 8334D530 -CEAC 8334D531 -CEAD 8334D532 -CEAE 8334D533 -CEAF 8334D534 -CEB0 8334D535 -CEB1 8334D536 -CEB2 8334D537 -CEB3 8334D538 -CEB4 8334D539 -CEB5 8334D630 -CEB6 8334D631 -CEB7 8334D632 -CEB8 8334D633 -CEB9 8334D634 -CEBA 8334D635 -CEBB 8334D636 -CEBC 8334D637 -CEBD 8334D638 -CEBE 8334D639 -CEBF 8334D730 -CEC0 8334D731 -CEC1 8334D732 -CEC2 8334D733 -CEC3 8334D734 -CEC4 8334D735 -CEC5 8334D736 -CEC6 8334D737 -CEC7 8334D738 -CEC8 8334D739 -CEC9 8334D830 -CECA 8334D831 -CECB 8334D832 -CECC 8334D833 -CECD 8334D834 -CECE 8334D835 -CECF 8334D836 -CED0 8334D837 -CED1 8334D838 -CED2 8334D839 -CED3 8334D930 -CED4 8334D931 -CED5 8334D932 -CED6 8334D933 -CED7 8334D934 -CED8 8334D935 -CED9 8334D936 -CEDA 8334D937 -CEDB 8334D938 -CEDC 8334D939 -CEDD 8334DA30 -CEDE 8334DA31 -CEDF 8334DA32 -CEE0 8334DA33 -CEE1 8334DA34 -CEE2 8334DA35 -CEE3 8334DA36 -CEE4 8334DA37 -CEE5 8334DA38 -CEE6 8334DA39 -CEE7 8334DB30 -CEE8 8334DB31 -CEE9 8334DB32 -CEEA 8334DB33 -CEEB 8334DB34 -CEEC 8334DB35 -CEED 8334DB36 -CEEE 8334DB37 -CEEF 8334DB38 -CEF0 8334DB39 -CEF1 8334DC30 -CEF2 8334DC31 -CEF3 8334DC32 -CEF4 8334DC33 -CEF5 8334DC34 -CEF6 8334DC35 -CEF7 8334DC36 -CEF8 8334DC37 -CEF9 8334DC38 -CEFA 8334DC39 -CEFB 8334DD30 -CEFC 8334DD31 -CEFD 8334DD32 -CEFE 8334DD33 -CEFF 8334DD34 -CF00 8334DD35 -CF01 8334DD36 -CF02 8334DD37 -CF03 8334DD38 -CF04 8334DD39 -CF05 8334DE30 -CF06 8334DE31 -CF07 8334DE32 -CF08 8334DE33 -CF09 8334DE34 -CF0A 8334DE35 -CF0B 8334DE36 -CF0C 8334DE37 -CF0D 8334DE38 -CF0E 8334DE39 -CF0F 8334DF30 -CF10 8334DF31 -CF11 8334DF32 -CF12 8334DF33 -CF13 8334DF34 -CF14 8334DF35 -CF15 8334DF36 -CF16 8334DF37 -CF17 8334DF38 -CF18 8334DF39 -CF19 8334E030 -CF1A 8334E031 -CF1B 8334E032 -CF1C 8334E033 -CF1D 8334E034 -CF1E 8334E035 -CF1F 8334E036 -CF20 8334E037 -CF21 8334E038 -CF22 8334E039 -CF23 8334E130 -CF24 8334E131 -CF25 8334E132 -CF26 8334E133 -CF27 8334E134 -CF28 8334E135 -CF29 8334E136 -CF2A 8334E137 -CF2B 8334E138 -CF2C 8334E139 -CF2D 8334E230 -CF2E 8334E231 -CF2F 8334E232 -CF30 8334E233 -CF31 8334E234 -CF32 8334E235 -CF33 8334E236 -CF34 8334E237 -CF35 8334E238 -CF36 8334E239 -CF37 8334E330 -CF38 8334E331 -CF39 8334E332 -CF3A 8334E333 -CF3B 8334E334 -CF3C 8334E335 -CF3D 8334E336 -CF3E 8334E337 -CF3F 8334E338 -CF40 8334E339 -CF41 8334E430 -CF42 8334E431 -CF43 8334E432 -CF44 8334E433 -CF45 8334E434 -CF46 8334E435 -CF47 8334E436 -CF48 8334E437 -CF49 8334E438 -CF4A 8334E439 -CF4B 8334E530 -CF4C 8334E531 -CF4D 8334E532 -CF4E 8334E533 -CF4F 8334E534 -CF50 8334E535 -CF51 8334E536 -CF52 8334E537 -CF53 8334E538 -CF54 8334E539 -CF55 8334E630 -CF56 8334E631 -CF57 8334E632 -CF58 8334E633 -CF59 8334E634 -CF5A 8334E635 -CF5B 8334E636 -CF5C 8334E637 -CF5D 8334E638 -CF5E 8334E639 -CF5F 8334E730 -CF60 8334E731 -CF61 8334E732 -CF62 8334E733 -CF63 8334E734 -CF64 8334E735 -CF65 8334E736 -CF66 8334E737 -CF67 8334E738 -CF68 8334E739 -CF69 8334E830 -CF6A 8334E831 -CF6B 8334E832 -CF6C 8334E833 -CF6D 8334E834 -CF6E 8334E835 -CF6F 8334E836 -CF70 8334E837 -CF71 8334E838 -CF72 8334E839 -CF73 8334E930 -CF74 8334E931 -CF75 8334E932 -CF76 8334E933 -CF77 8334E934 -CF78 8334E935 -CF79 8334E936 -CF7A 8334E937 -CF7B 8334E938 -CF7C 8334E939 -CF7D 8334EA30 -CF7E 8334EA31 -CF7F 8334EA32 -CF80 8334EA33 -CF81 8334EA34 -CF82 8334EA35 -CF83 8334EA36 -CF84 8334EA37 -CF85 8334EA38 -CF86 8334EA39 -CF87 8334EB30 -CF88 8334EB31 -CF89 8334EB32 -CF8A 8334EB33 -CF8B 8334EB34 -CF8C 8334EB35 -CF8D 8334EB36 -CF8E 8334EB37 -CF8F 8334EB38 -CF90 8334EB39 -CF91 8334EC30 -CF92 8334EC31 -CF93 8334EC32 -CF94 8334EC33 -CF95 8334EC34 -CF96 8334EC35 -CF97 8334EC36 -CF98 8334EC37 -CF99 8334EC38 -CF9A 8334EC39 -CF9B 8334ED30 -CF9C 8334ED31 -CF9D 8334ED32 -CF9E 8334ED33 -CF9F 8334ED34 -CFA0 8334ED35 -CFA1 8334ED36 -CFA2 8334ED37 -CFA3 8334ED38 -CFA4 8334ED39 -CFA5 8334EE30 -CFA6 8334EE31 -CFA7 8334EE32 -CFA8 8334EE33 -CFA9 8334EE34 -CFAA 8334EE35 -CFAB 8334EE36 -CFAC 8334EE37 -CFAD 8334EE38 -CFAE 8334EE39 -CFAF 8334EF30 -CFB0 8334EF31 -CFB1 8334EF32 -CFB2 8334EF33 -CFB3 8334EF34 -CFB4 8334EF35 -CFB5 8334EF36 -CFB6 8334EF37 -CFB7 8334EF38 -CFB8 8334EF39 -CFB9 8334F030 -CFBA 8334F031 -CFBB 8334F032 -CFBC 8334F033 -CFBD 8334F034 -CFBE 8334F035 -CFBF 8334F036 -CFC0 8334F037 -CFC1 8334F038 -CFC2 8334F039 -CFC3 8334F130 -CFC4 8334F131 -CFC5 8334F132 -CFC6 8334F133 -CFC7 8334F134 -CFC8 8334F135 -CFC9 8334F136 -CFCA 8334F137 -CFCB 8334F138 -CFCC 8334F139 -CFCD 8334F230 -CFCE 8334F231 -CFCF 8334F232 -CFD0 8334F233 -CFD1 8334F234 -CFD2 8334F235 -CFD3 8334F236 -CFD4 8334F237 -CFD5 8334F238 -CFD6 8334F239 -CFD7 8334F330 -CFD8 8334F331 -CFD9 8334F332 -CFDA 8334F333 -CFDB 8334F334 -CFDC 8334F335 -CFDD 8334F336 -CFDE 8334F337 -CFDF 8334F338 -CFE0 8334F339 -CFE1 8334F430 -CFE2 8334F431 -CFE3 8334F432 -CFE4 8334F433 -CFE5 8334F434 -CFE6 8334F435 -CFE7 8334F436 -CFE8 8334F437 -CFE9 8334F438 -CFEA 8334F439 -CFEB 8334F530 -CFEC 8334F531 -CFED 8334F532 -CFEE 8334F533 -CFEF 8334F534 -CFF0 8334F535 -CFF1 8334F536 -CFF2 8334F537 -CFF3 8334F538 -CFF4 8334F539 -CFF5 8334F630 -CFF6 8334F631 -CFF7 8334F632 -CFF8 8334F633 -CFF9 8334F634 -CFFA 8334F635 -CFFB 8334F636 -CFFC 8334F637 -CFFD 8334F638 -CFFE 8334F639 -CFFF 8334F730 -D000 8334F731 -D001 8334F732 -D002 8334F733 -D003 8334F734 -D004 8334F735 -D005 8334F736 -D006 8334F737 -D007 8334F738 -D008 8334F739 -D009 8334F830 -D00A 8334F831 -D00B 8334F832 -D00C 8334F833 -D00D 8334F834 -D00E 8334F835 -D00F 8334F836 -D010 8334F837 -D011 8334F838 -D012 8334F839 -D013 8334F930 -D014 8334F931 -D015 8334F932 -D016 8334F933 -D017 8334F934 -D018 8334F935 -D019 8334F936 -D01A 8334F937 -D01B 8334F938 -D01C 8334F939 -D01D 8334FA30 -D01E 8334FA31 -D01F 8334FA32 -D020 8334FA33 -D021 8334FA34 -D022 8334FA35 -D023 8334FA36 -D024 8334FA37 -D025 8334FA38 -D026 8334FA39 -D027 8334FB30 -D028 8334FB31 -D029 8334FB32 -D02A 8334FB33 -D02B 8334FB34 -D02C 8334FB35 -D02D 8334FB36 -D02E 8334FB37 -D02F 8334FB38 -D030 8334FB39 -D031 8334FC30 -D032 8334FC31 -D033 8334FC32 -D034 8334FC33 -D035 8334FC34 -D036 8334FC35 -D037 8334FC36 -D038 8334FC37 -D039 8334FC38 -D03A 8334FC39 -D03B 8334FD30 -D03C 8334FD31 -D03D 8334FD32 -D03E 8334FD33 -D03F 8334FD34 -D040 8334FD35 -D041 8334FD36 -D042 8334FD37 -D043 8334FD38 -D044 8334FD39 -D045 8334FE30 -D046 8334FE31 -D047 8334FE32 -D048 8334FE33 -D049 8334FE34 -D04A 8334FE35 -D04B 8334FE36 -D04C 8334FE37 -D04D 8334FE38 -D04E 8334FE39 -D04F 83358130 -D050 83358131 -D051 83358132 -D052 83358133 -D053 83358134 -D054 83358135 -D055 83358136 -D056 83358137 -D057 83358138 -D058 83358139 -D059 83358230 -D05A 83358231 -D05B 83358232 -D05C 83358233 -D05D 83358234 -D05E 83358235 -D05F 83358236 -D060 83358237 -D061 83358238 -D062 83358239 -D063 83358330 -D064 83358331 -D065 83358332 -D066 83358333 -D067 83358334 -D068 83358335 -D069 83358336 -D06A 83358337 -D06B 83358338 -D06C 83358339 -D06D 83358430 -D06E 83358431 -D06F 83358432 -D070 83358433 -D071 83358434 -D072 83358435 -D073 83358436 -D074 83358437 -D075 83358438 -D076 83358439 -D077 83358530 -D078 83358531 -D079 83358532 -D07A 83358533 -D07B 83358534 -D07C 83358535 -D07D 83358536 -D07E 83358537 -D07F 83358538 -D080 83358539 -D081 83358630 -D082 83358631 -D083 83358632 -D084 83358633 -D085 83358634 -D086 83358635 -D087 83358636 -D088 83358637 -D089 83358638 -D08A 83358639 -D08B 83358730 -D08C 83358731 -D08D 83358732 -D08E 83358733 -D08F 83358734 -D090 83358735 -D091 83358736 -D092 83358737 -D093 83358738 -D094 83358739 -D095 83358830 -D096 83358831 -D097 83358832 -D098 83358833 -D099 83358834 -D09A 83358835 -D09B 83358836 -D09C 83358837 -D09D 83358838 -D09E 83358839 -D09F 83358930 -D0A0 83358931 -D0A1 83358932 -D0A2 83358933 -D0A3 83358934 -D0A4 83358935 -D0A5 83358936 -D0A6 83358937 -D0A7 83358938 -D0A8 83358939 -D0A9 83358A30 -D0AA 83358A31 -D0AB 83358A32 -D0AC 83358A33 -D0AD 83358A34 -D0AE 83358A35 -D0AF 83358A36 -D0B0 83358A37 -D0B1 83358A38 -D0B2 83358A39 -D0B3 83358B30 -D0B4 83358B31 -D0B5 83358B32 -D0B6 83358B33 -D0B7 83358B34 -D0B8 83358B35 -D0B9 83358B36 -D0BA 83358B37 -D0BB 83358B38 -D0BC 83358B39 -D0BD 83358C30 -D0BE 83358C31 -D0BF 83358C32 -D0C0 83358C33 -D0C1 83358C34 -D0C2 83358C35 -D0C3 83358C36 -D0C4 83358C37 -D0C5 83358C38 -D0C6 83358C39 -D0C7 83358D30 -D0C8 83358D31 -D0C9 83358D32 -D0CA 83358D33 -D0CB 83358D34 -D0CC 83358D35 -D0CD 83358D36 -D0CE 83358D37 -D0CF 83358D38 -D0D0 83358D39 -D0D1 83358E30 -D0D2 83358E31 -D0D3 83358E32 -D0D4 83358E33 -D0D5 83358E34 -D0D6 83358E35 -D0D7 83358E36 -D0D8 83358E37 -D0D9 83358E38 -D0DA 83358E39 -D0DB 83358F30 -D0DC 83358F31 -D0DD 83358F32 -D0DE 83358F33 -D0DF 83358F34 -D0E0 83358F35 -D0E1 83358F36 -D0E2 83358F37 -D0E3 83358F38 -D0E4 83358F39 -D0E5 83359030 -D0E6 83359031 -D0E7 83359032 -D0E8 83359033 -D0E9 83359034 -D0EA 83359035 -D0EB 83359036 -D0EC 83359037 -D0ED 83359038 -D0EE 83359039 -D0EF 83359130 -D0F0 83359131 -D0F1 83359132 -D0F2 83359133 -D0F3 83359134 -D0F4 83359135 -D0F5 83359136 -D0F6 83359137 -D0F7 83359138 -D0F8 83359139 -D0F9 83359230 -D0FA 83359231 -D0FB 83359232 -D0FC 83359233 -D0FD 83359234 -D0FE 83359235 -D0FF 83359236 -D100 83359237 -D101 83359238 -D102 83359239 -D103 83359330 -D104 83359331 -D105 83359332 -D106 83359333 -D107 83359334 -D108 83359335 -D109 83359336 -D10A 83359337 -D10B 83359338 -D10C 83359339 -D10D 83359430 -D10E 83359431 -D10F 83359432 -D110 83359433 -D111 83359434 -D112 83359435 -D113 83359436 -D114 83359437 -D115 83359438 -D116 83359439 -D117 83359530 -D118 83359531 -D119 83359532 -D11A 83359533 -D11B 83359534 -D11C 83359535 -D11D 83359536 -D11E 83359537 -D11F 83359538 -D120 83359539 -D121 83359630 -D122 83359631 -D123 83359632 -D124 83359633 -D125 83359634 -D126 83359635 -D127 83359636 -D128 83359637 -D129 83359638 -D12A 83359639 -D12B 83359730 -D12C 83359731 -D12D 83359732 -D12E 83359733 -D12F 83359734 -D130 83359735 -D131 83359736 -D132 83359737 -D133 83359738 -D134 83359739 -D135 83359830 -D136 83359831 -D137 83359832 -D138 83359833 -D139 83359834 -D13A 83359835 -D13B 83359836 -D13C 83359837 -D13D 83359838 -D13E 83359839 -D13F 83359930 -D140 83359931 -D141 83359932 -D142 83359933 -D143 83359934 -D144 83359935 -D145 83359936 -D146 83359937 -D147 83359938 -D148 83359939 -D149 83359A30 -D14A 83359A31 -D14B 83359A32 -D14C 83359A33 -D14D 83359A34 -D14E 83359A35 -D14F 83359A36 -D150 83359A37 -D151 83359A38 -D152 83359A39 -D153 83359B30 -D154 83359B31 -D155 83359B32 -D156 83359B33 -D157 83359B34 -D158 83359B35 -D159 83359B36 -D15A 83359B37 -D15B 83359B38 -D15C 83359B39 -D15D 83359C30 -D15E 83359C31 -D15F 83359C32 -D160 83359C33 -D161 83359C34 -D162 83359C35 -D163 83359C36 -D164 83359C37 -D165 83359C38 -D166 83359C39 -D167 83359D30 -D168 83359D31 -D169 83359D32 -D16A 83359D33 -D16B 83359D34 -D16C 83359D35 -D16D 83359D36 -D16E 83359D37 -D16F 83359D38 -D170 83359D39 -D171 83359E30 -D172 83359E31 -D173 83359E32 -D174 83359E33 -D175 83359E34 -D176 83359E35 -D177 83359E36 -D178 83359E37 -D179 83359E38 -D17A 83359E39 -D17B 83359F30 -D17C 83359F31 -D17D 83359F32 -D17E 83359F33 -D17F 83359F34 -D180 83359F35 -D181 83359F36 -D182 83359F37 -D183 83359F38 -D184 83359F39 -D185 8335A030 -D186 8335A031 -D187 8335A032 -D188 8335A033 -D189 8335A034 -D18A 8335A035 -D18B 8335A036 -D18C 8335A037 -D18D 8335A038 -D18E 8335A039 -D18F 8335A130 -D190 8335A131 -D191 8335A132 -D192 8335A133 -D193 8335A134 -D194 8335A135 -D195 8335A136 -D196 8335A137 -D197 8335A138 -D198 8335A139 -D199 8335A230 -D19A 8335A231 -D19B 8335A232 -D19C 8335A233 -D19D 8335A234 -D19E 8335A235 -D19F 8335A236 -D1A0 8335A237 -D1A1 8335A238 -D1A2 8335A239 -D1A3 8335A330 -D1A4 8335A331 -D1A5 8335A332 -D1A6 8335A333 -D1A7 8335A334 -D1A8 8335A335 -D1A9 8335A336 -D1AA 8335A337 -D1AB 8335A338 -D1AC 8335A339 -D1AD 8335A430 -D1AE 8335A431 -D1AF 8335A432 -D1B0 8335A433 -D1B1 8335A434 -D1B2 8335A435 -D1B3 8335A436 -D1B4 8335A437 -D1B5 8335A438 -D1B6 8335A439 -D1B7 8335A530 -D1B8 8335A531 -D1B9 8335A532 -D1BA 8335A533 -D1BB 8335A534 -D1BC 8335A535 -D1BD 8335A536 -D1BE 8335A537 -D1BF 8335A538 -D1C0 8335A539 -D1C1 8335A630 -D1C2 8335A631 -D1C3 8335A632 -D1C4 8335A633 -D1C5 8335A634 -D1C6 8335A635 -D1C7 8335A636 -D1C8 8335A637 -D1C9 8335A638 -D1CA 8335A639 -D1CB 8335A730 -D1CC 8335A731 -D1CD 8335A732 -D1CE 8335A733 -D1CF 8335A734 -D1D0 8335A735 -D1D1 8335A736 -D1D2 8335A737 -D1D3 8335A738 -D1D4 8335A739 -D1D5 8335A830 -D1D6 8335A831 -D1D7 8335A832 -D1D8 8335A833 -D1D9 8335A834 -D1DA 8335A835 -D1DB 8335A836 -D1DC 8335A837 -D1DD 8335A838 -D1DE 8335A839 -D1DF 8335A930 -D1E0 8335A931 -D1E1 8335A932 -D1E2 8335A933 -D1E3 8335A934 -D1E4 8335A935 -D1E5 8335A936 -D1E6 8335A937 -D1E7 8335A938 -D1E8 8335A939 -D1E9 8335AA30 -D1EA 8335AA31 -D1EB 8335AA32 -D1EC 8335AA33 -D1ED 8335AA34 -D1EE 8335AA35 -D1EF 8335AA36 -D1F0 8335AA37 -D1F1 8335AA38 -D1F2 8335AA39 -D1F3 8335AB30 -D1F4 8335AB31 -D1F5 8335AB32 -D1F6 8335AB33 -D1F7 8335AB34 -D1F8 8335AB35 -D1F9 8335AB36 -D1FA 8335AB37 -D1FB 8335AB38 -D1FC 8335AB39 -D1FD 8335AC30 -D1FE 8335AC31 -D1FF 8335AC32 -D200 8335AC33 -D201 8335AC34 -D202 8335AC35 -D203 8335AC36 -D204 8335AC37 -D205 8335AC38 -D206 8335AC39 -D207 8335AD30 -D208 8335AD31 -D209 8335AD32 -D20A 8335AD33 -D20B 8335AD34 -D20C 8335AD35 -D20D 8335AD36 -D20E 8335AD37 -D20F 8335AD38 -D210 8335AD39 -D211 8335AE30 -D212 8335AE31 -D213 8335AE32 -D214 8335AE33 -D215 8335AE34 -D216 8335AE35 -D217 8335AE36 -D218 8335AE37 -D219 8335AE38 -D21A 8335AE39 -D21B 8335AF30 -D21C 8335AF31 -D21D 8335AF32 -D21E 8335AF33 -D21F 8335AF34 -D220 8335AF35 -D221 8335AF36 -D222 8335AF37 -D223 8335AF38 -D224 8335AF39 -D225 8335B030 -D226 8335B031 -D227 8335B032 -D228 8335B033 -D229 8335B034 -D22A 8335B035 -D22B 8335B036 -D22C 8335B037 -D22D 8335B038 -D22E 8335B039 -D22F 8335B130 -D230 8335B131 -D231 8335B132 -D232 8335B133 -D233 8335B134 -D234 8335B135 -D235 8335B136 -D236 8335B137 -D237 8335B138 -D238 8335B139 -D239 8335B230 -D23A 8335B231 -D23B 8335B232 -D23C 8335B233 -D23D 8335B234 -D23E 8335B235 -D23F 8335B236 -D240 8335B237 -D241 8335B238 -D242 8335B239 -D243 8335B330 -D244 8335B331 -D245 8335B332 -D246 8335B333 -D247 8335B334 -D248 8335B335 -D249 8335B336 -D24A 8335B337 -D24B 8335B338 -D24C 8335B339 -D24D 8335B430 -D24E 8335B431 -D24F 8335B432 -D250 8335B433 -D251 8335B434 -D252 8335B435 -D253 8335B436 -D254 8335B437 -D255 8335B438 -D256 8335B439 -D257 8335B530 -D258 8335B531 -D259 8335B532 -D25A 8335B533 -D25B 8335B534 -D25C 8335B535 -D25D 8335B536 -D25E 8335B537 -D25F 8335B538 -D260 8335B539 -D261 8335B630 -D262 8335B631 -D263 8335B632 -D264 8335B633 -D265 8335B634 -D266 8335B635 -D267 8335B636 -D268 8335B637 -D269 8335B638 -D26A 8335B639 -D26B 8335B730 -D26C 8335B731 -D26D 8335B732 -D26E 8335B733 -D26F 8335B734 -D270 8335B735 -D271 8335B736 -D272 8335B737 -D273 8335B738 -D274 8335B739 -D275 8335B830 -D276 8335B831 -D277 8335B832 -D278 8335B833 -D279 8335B834 -D27A 8335B835 -D27B 8335B836 -D27C 8335B837 -D27D 8335B838 -D27E 8335B839 -D27F 8335B930 -D280 8335B931 -D281 8335B932 -D282 8335B933 -D283 8335B934 -D284 8335B935 -D285 8335B936 -D286 8335B937 -D287 8335B938 -D288 8335B939 -D289 8335BA30 -D28A 8335BA31 -D28B 8335BA32 -D28C 8335BA33 -D28D 8335BA34 -D28E 8335BA35 -D28F 8335BA36 -D290 8335BA37 -D291 8335BA38 -D292 8335BA39 -D293 8335BB30 -D294 8335BB31 -D295 8335BB32 -D296 8335BB33 -D297 8335BB34 -D298 8335BB35 -D299 8335BB36 -D29A 8335BB37 -D29B 8335BB38 -D29C 8335BB39 -D29D 8335BC30 -D29E 8335BC31 -D29F 8335BC32 -D2A0 8335BC33 -D2A1 8335BC34 -D2A2 8335BC35 -D2A3 8335BC36 -D2A4 8335BC37 -D2A5 8335BC38 -D2A6 8335BC39 -D2A7 8335BD30 -D2A8 8335BD31 -D2A9 8335BD32 -D2AA 8335BD33 -D2AB 8335BD34 -D2AC 8335BD35 -D2AD 8335BD36 -D2AE 8335BD37 -D2AF 8335BD38 -D2B0 8335BD39 -D2B1 8335BE30 -D2B2 8335BE31 -D2B3 8335BE32 -D2B4 8335BE33 -D2B5 8335BE34 -D2B6 8335BE35 -D2B7 8335BE36 -D2B8 8335BE37 -D2B9 8335BE38 -D2BA 8335BE39 -D2BB 8335BF30 -D2BC 8335BF31 -D2BD 8335BF32 -D2BE 8335BF33 -D2BF 8335BF34 -D2C0 8335BF35 -D2C1 8335BF36 -D2C2 8335BF37 -D2C3 8335BF38 -D2C4 8335BF39 -D2C5 8335C030 -D2C6 8335C031 -D2C7 8335C032 -D2C8 8335C033 -D2C9 8335C034 -D2CA 8335C035 -D2CB 8335C036 -D2CC 8335C037 -D2CD 8335C038 -D2CE 8335C039 -D2CF 8335C130 -D2D0 8335C131 -D2D1 8335C132 -D2D2 8335C133 -D2D3 8335C134 -D2D4 8335C135 -D2D5 8335C136 -D2D6 8335C137 -D2D7 8335C138 -D2D8 8335C139 -D2D9 8335C230 -D2DA 8335C231 -D2DB 8335C232 -D2DC 8335C233 -D2DD 8335C234 -D2DE 8335C235 -D2DF 8335C236 -D2E0 8335C237 -D2E1 8335C238 -D2E2 8335C239 -D2E3 8335C330 -D2E4 8335C331 -D2E5 8335C332 -D2E6 8335C333 -D2E7 8335C334 -D2E8 8335C335 -D2E9 8335C336 -D2EA 8335C337 -D2EB 8335C338 -D2EC 8335C339 -D2ED 8335C430 -D2EE 8335C431 -D2EF 8335C432 -D2F0 8335C433 -D2F1 8335C434 -D2F2 8335C435 -D2F3 8335C436 -D2F4 8335C437 -D2F5 8335C438 -D2F6 8335C439 -D2F7 8335C530 -D2F8 8335C531 -D2F9 8335C532 -D2FA 8335C533 -D2FB 8335C534 -D2FC 8335C535 -D2FD 8335C536 -D2FE 8335C537 -D2FF 8335C538 -D300 8335C539 -D301 8335C630 -D302 8335C631 -D303 8335C632 -D304 8335C633 -D305 8335C634 -D306 8335C635 -D307 8335C636 -D308 8335C637 -D309 8335C638 -D30A 8335C639 -D30B 8335C730 -D30C 8335C731 -D30D 8335C732 -D30E 8335C733 -D30F 8335C734 -D310 8335C735 -D311 8335C736 -D312 8335C737 -D313 8335C738 -D314 8335C739 -D315 8335C830 -D316 8335C831 -D317 8335C832 -D318 8335C833 -D319 8335C834 -D31A 8335C835 -D31B 8335C836 -D31C 8335C837 -D31D 8335C838 -D31E 8335C839 -D31F 8335C930 -D320 8335C931 -D321 8335C932 -D322 8335C933 -D323 8335C934 -D324 8335C935 -D325 8335C936 -D326 8335C937 -D327 8335C938 -D328 8335C939 -D329 8335CA30 -D32A 8335CA31 -D32B 8335CA32 -D32C 8335CA33 -D32D 8335CA34 -D32E 8335CA35 -D32F 8335CA36 -D330 8335CA37 -D331 8335CA38 -D332 8335CA39 -D333 8335CB30 -D334 8335CB31 -D335 8335CB32 -D336 8335CB33 -D337 8335CB34 -D338 8335CB35 -D339 8335CB36 -D33A 8335CB37 -D33B 8335CB38 -D33C 8335CB39 -D33D 8335CC30 -D33E 8335CC31 -D33F 8335CC32 -D340 8335CC33 -D341 8335CC34 -D342 8335CC35 -D343 8335CC36 -D344 8335CC37 -D345 8335CC38 -D346 8335CC39 -D347 8335CD30 -D348 8335CD31 -D349 8335CD32 -D34A 8335CD33 -D34B 8335CD34 -D34C 8335CD35 -D34D 8335CD36 -D34E 8335CD37 -D34F 8335CD38 -D350 8335CD39 -D351 8335CE30 -D352 8335CE31 -D353 8335CE32 -D354 8335CE33 -D355 8335CE34 -D356 8335CE35 -D357 8335CE36 -D358 8335CE37 -D359 8335CE38 -D35A 8335CE39 -D35B 8335CF30 -D35C 8335CF31 -D35D 8335CF32 -D35E 8335CF33 -D35F 8335CF34 -D360 8335CF35 -D361 8335CF36 -D362 8335CF37 -D363 8335CF38 -D364 8335CF39 -D365 8335D030 -D366 8335D031 -D367 8335D032 -D368 8335D033 -D369 8335D034 -D36A 8335D035 -D36B 8335D036 -D36C 8335D037 -D36D 8335D038 -D36E 8335D039 -D36F 8335D130 -D370 8335D131 -D371 8335D132 -D372 8335D133 -D373 8335D134 -D374 8335D135 -D375 8335D136 -D376 8335D137 -D377 8335D138 -D378 8335D139 -D379 8335D230 -D37A 8335D231 -D37B 8335D232 -D37C 8335D233 -D37D 8335D234 -D37E 8335D235 -D37F 8335D236 -D380 8335D237 -D381 8335D238 -D382 8335D239 -D383 8335D330 -D384 8335D331 -D385 8335D332 -D386 8335D333 -D387 8335D334 -D388 8335D335 -D389 8335D336 -D38A 8335D337 -D38B 8335D338 -D38C 8335D339 -D38D 8335D430 -D38E 8335D431 -D38F 8335D432 -D390 8335D433 -D391 8335D434 -D392 8335D435 -D393 8335D436 -D394 8335D437 -D395 8335D438 -D396 8335D439 -D397 8335D530 -D398 8335D531 -D399 8335D532 -D39A 8335D533 -D39B 8335D534 -D39C 8335D535 -D39D 8335D536 -D39E 8335D537 -D39F 8335D538 -D3A0 8335D539 -D3A1 8335D630 -D3A2 8335D631 -D3A3 8335D632 -D3A4 8335D633 -D3A5 8335D634 -D3A6 8335D635 -D3A7 8335D636 -D3A8 8335D637 -D3A9 8335D638 -D3AA 8335D639 -D3AB 8335D730 -D3AC 8335D731 -D3AD 8335D732 -D3AE 8335D733 -D3AF 8335D734 -D3B0 8335D735 -D3B1 8335D736 -D3B2 8335D737 -D3B3 8335D738 -D3B4 8335D739 -D3B5 8335D830 -D3B6 8335D831 -D3B7 8335D832 -D3B8 8335D833 -D3B9 8335D834 -D3BA 8335D835 -D3BB 8335D836 -D3BC 8335D837 -D3BD 8335D838 -D3BE 8335D839 -D3BF 8335D930 -D3C0 8335D931 -D3C1 8335D932 -D3C2 8335D933 -D3C3 8335D934 -D3C4 8335D935 -D3C5 8335D936 -D3C6 8335D937 -D3C7 8335D938 -D3C8 8335D939 -D3C9 8335DA30 -D3CA 8335DA31 -D3CB 8335DA32 -D3CC 8335DA33 -D3CD 8335DA34 -D3CE 8335DA35 -D3CF 8335DA36 -D3D0 8335DA37 -D3D1 8335DA38 -D3D2 8335DA39 -D3D3 8335DB30 -D3D4 8335DB31 -D3D5 8335DB32 -D3D6 8335DB33 -D3D7 8335DB34 -D3D8 8335DB35 -D3D9 8335DB36 -D3DA 8335DB37 -D3DB 8335DB38 -D3DC 8335DB39 -D3DD 8335DC30 -D3DE 8335DC31 -D3DF 8335DC32 -D3E0 8335DC33 -D3E1 8335DC34 -D3E2 8335DC35 -D3E3 8335DC36 -D3E4 8335DC37 -D3E5 8335DC38 -D3E6 8335DC39 -D3E7 8335DD30 -D3E8 8335DD31 -D3E9 8335DD32 -D3EA 8335DD33 -D3EB 8335DD34 -D3EC 8335DD35 -D3ED 8335DD36 -D3EE 8335DD37 -D3EF 8335DD38 -D3F0 8335DD39 -D3F1 8335DE30 -D3F2 8335DE31 -D3F3 8335DE32 -D3F4 8335DE33 -D3F5 8335DE34 -D3F6 8335DE35 -D3F7 8335DE36 -D3F8 8335DE37 -D3F9 8335DE38 -D3FA 8335DE39 -D3FB 8335DF30 -D3FC 8335DF31 -D3FD 8335DF32 -D3FE 8335DF33 -D3FF 8335DF34 -D400 8335DF35 -D401 8335DF36 -D402 8335DF37 -D403 8335DF38 -D404 8335DF39 -D405 8335E030 -D406 8335E031 -D407 8335E032 -D408 8335E033 -D409 8335E034 -D40A 8335E035 -D40B 8335E036 -D40C 8335E037 -D40D 8335E038 -D40E 8335E039 -D40F 8335E130 -D410 8335E131 -D411 8335E132 -D412 8335E133 -D413 8335E134 -D414 8335E135 -D415 8335E136 -D416 8335E137 -D417 8335E138 -D418 8335E139 -D419 8335E230 -D41A 8335E231 -D41B 8335E232 -D41C 8335E233 -D41D 8335E234 -D41E 8335E235 -D41F 8335E236 -D420 8335E237 -D421 8335E238 -D422 8335E239 -D423 8335E330 -D424 8335E331 -D425 8335E332 -D426 8335E333 -D427 8335E334 -D428 8335E335 -D429 8335E336 -D42A 8335E337 -D42B 8335E338 -D42C 8335E339 -D42D 8335E430 -D42E 8335E431 -D42F 8335E432 -D430 8335E433 -D431 8335E434 -D432 8335E435 -D433 8335E436 -D434 8335E437 -D435 8335E438 -D436 8335E439 -D437 8335E530 -D438 8335E531 -D439 8335E532 -D43A 8335E533 -D43B 8335E534 -D43C 8335E535 -D43D 8335E536 -D43E 8335E537 -D43F 8335E538 -D440 8335E539 -D441 8335E630 -D442 8335E631 -D443 8335E632 -D444 8335E633 -D445 8335E634 -D446 8335E635 -D447 8335E636 -D448 8335E637 -D449 8335E638 -D44A 8335E639 -D44B 8335E730 -D44C 8335E731 -D44D 8335E732 -D44E 8335E733 -D44F 8335E734 -D450 8335E735 -D451 8335E736 -D452 8335E737 -D453 8335E738 -D454 8335E739 -D455 8335E830 -D456 8335E831 -D457 8335E832 -D458 8335E833 -D459 8335E834 -D45A 8335E835 -D45B 8335E836 -D45C 8335E837 -D45D 8335E838 -D45E 8335E839 -D45F 8335E930 -D460 8335E931 -D461 8335E932 -D462 8335E933 -D463 8335E934 -D464 8335E935 -D465 8335E936 -D466 8335E937 -D467 8335E938 -D468 8335E939 -D469 8335EA30 -D46A 8335EA31 -D46B 8335EA32 -D46C 8335EA33 -D46D 8335EA34 -D46E 8335EA35 -D46F 8335EA36 -D470 8335EA37 -D471 8335EA38 -D472 8335EA39 -D473 8335EB30 -D474 8335EB31 -D475 8335EB32 -D476 8335EB33 -D477 8335EB34 -D478 8335EB35 -D479 8335EB36 -D47A 8335EB37 -D47B 8335EB38 -D47C 8335EB39 -D47D 8335EC30 -D47E 8335EC31 -D47F 8335EC32 -D480 8335EC33 -D481 8335EC34 -D482 8335EC35 -D483 8335EC36 -D484 8335EC37 -D485 8335EC38 -D486 8335EC39 -D487 8335ED30 -D488 8335ED31 -D489 8335ED32 -D48A 8335ED33 -D48B 8335ED34 -D48C 8335ED35 -D48D 8335ED36 -D48E 8335ED37 -D48F 8335ED38 -D490 8335ED39 -D491 8335EE30 -D492 8335EE31 -D493 8335EE32 -D494 8335EE33 -D495 8335EE34 -D496 8335EE35 -D497 8335EE36 -D498 8335EE37 -D499 8335EE38 -D49A 8335EE39 -D49B 8335EF30 -D49C 8335EF31 -D49D 8335EF32 -D49E 8335EF33 -D49F 8335EF34 -D4A0 8335EF35 -D4A1 8335EF36 -D4A2 8335EF37 -D4A3 8335EF38 -D4A4 8335EF39 -D4A5 8335F030 -D4A6 8335F031 -D4A7 8335F032 -D4A8 8335F033 -D4A9 8335F034 -D4AA 8335F035 -D4AB 8335F036 -D4AC 8335F037 -D4AD 8335F038 -D4AE 8335F039 -D4AF 8335F130 -D4B0 8335F131 -D4B1 8335F132 -D4B2 8335F133 -D4B3 8335F134 -D4B4 8335F135 -D4B5 8335F136 -D4B6 8335F137 -D4B7 8335F138 -D4B8 8335F139 -D4B9 8335F230 -D4BA 8335F231 -D4BB 8335F232 -D4BC 8335F233 -D4BD 8335F234 -D4BE 8335F235 -D4BF 8335F236 -D4C0 8335F237 -D4C1 8335F238 -D4C2 8335F239 -D4C3 8335F330 -D4C4 8335F331 -D4C5 8335F332 -D4C6 8335F333 -D4C7 8335F334 -D4C8 8335F335 -D4C9 8335F336 -D4CA 8335F337 -D4CB 8335F338 -D4CC 8335F339 -D4CD 8335F430 -D4CE 8335F431 -D4CF 8335F432 -D4D0 8335F433 -D4D1 8335F434 -D4D2 8335F435 -D4D3 8335F436 -D4D4 8335F437 -D4D5 8335F438 -D4D6 8335F439 -D4D7 8335F530 -D4D8 8335F531 -D4D9 8335F532 -D4DA 8335F533 -D4DB 8335F534 -D4DC 8335F535 -D4DD 8335F536 -D4DE 8335F537 -D4DF 8335F538 -D4E0 8335F539 -D4E1 8335F630 -D4E2 8335F631 -D4E3 8335F632 -D4E4 8335F633 -D4E5 8335F634 -D4E6 8335F635 -D4E7 8335F636 -D4E8 8335F637 -D4E9 8335F638 -D4EA 8335F639 -D4EB 8335F730 -D4EC 8335F731 -D4ED 8335F732 -D4EE 8335F733 -D4EF 8335F734 -D4F0 8335F735 -D4F1 8335F736 -D4F2 8335F737 -D4F3 8335F738 -D4F4 8335F739 -D4F5 8335F830 -D4F6 8335F831 -D4F7 8335F832 -D4F8 8335F833 -D4F9 8335F834 -D4FA 8335F835 -D4FB 8335F836 -D4FC 8335F837 -D4FD 8335F838 -D4FE 8335F839 -D4FF 8335F930 -D500 8335F931 -D501 8335F932 -D502 8335F933 -D503 8335F934 -D504 8335F935 -D505 8335F936 -D506 8335F937 -D507 8335F938 -D508 8335F939 -D509 8335FA30 -D50A 8335FA31 -D50B 8335FA32 -D50C 8335FA33 -D50D 8335FA34 -D50E 8335FA35 -D50F 8335FA36 -D510 8335FA37 -D511 8335FA38 -D512 8335FA39 -D513 8335FB30 -D514 8335FB31 -D515 8335FB32 -D516 8335FB33 -D517 8335FB34 -D518 8335FB35 -D519 8335FB36 -D51A 8335FB37 -D51B 8335FB38 -D51C 8335FB39 -D51D 8335FC30 -D51E 8335FC31 -D51F 8335FC32 -D520 8335FC33 -D521 8335FC34 -D522 8335FC35 -D523 8335FC36 -D524 8335FC37 -D525 8335FC38 -D526 8335FC39 -D527 8335FD30 -D528 8335FD31 -D529 8335FD32 -D52A 8335FD33 -D52B 8335FD34 -D52C 8335FD35 -D52D 8335FD36 -D52E 8335FD37 -D52F 8335FD38 -D530 8335FD39 -D531 8335FE30 -D532 8335FE31 -D533 8335FE32 -D534 8335FE33 -D535 8335FE34 -D536 8335FE35 -D537 8335FE36 -D538 8335FE37 -D539 8335FE38 -D53A 8335FE39 -D53B 83368130 -D53C 83368131 -D53D 83368132 -D53E 83368133 -D53F 83368134 -D540 83368135 -D541 83368136 -D542 83368137 -D543 83368138 -D544 83368139 -D545 83368230 -D546 83368231 -D547 83368232 -D548 83368233 -D549 83368234 -D54A 83368235 -D54B 83368236 -D54C 83368237 -D54D 83368238 -D54E 83368239 -D54F 83368330 -D550 83368331 -D551 83368332 -D552 83368333 -D553 83368334 -D554 83368335 -D555 83368336 -D556 83368337 -D557 83368338 -D558 83368339 -D559 83368430 -D55A 83368431 -D55B 83368432 -D55C 83368433 -D55D 83368434 -D55E 83368435 -D55F 83368436 -D560 83368437 -D561 83368438 -D562 83368439 -D563 83368530 -D564 83368531 -D565 83368532 -D566 83368533 -D567 83368534 -D568 83368535 -D569 83368536 -D56A 83368537 -D56B 83368538 -D56C 83368539 -D56D 83368630 -D56E 83368631 -D56F 83368632 -D570 83368633 -D571 83368634 -D572 83368635 -D573 83368636 -D574 83368637 -D575 83368638 -D576 83368639 -D577 83368730 -D578 83368731 -D579 83368732 -D57A 83368733 -D57B 83368734 -D57C 83368735 -D57D 83368736 -D57E 83368737 -D57F 83368738 -D580 83368739 -D581 83368830 -D582 83368831 -D583 83368832 -D584 83368833 -D585 83368834 -D586 83368835 -D587 83368836 -D588 83368837 -D589 83368838 -D58A 83368839 -D58B 83368930 -D58C 83368931 -D58D 83368932 -D58E 83368933 -D58F 83368934 -D590 83368935 -D591 83368936 -D592 83368937 -D593 83368938 -D594 83368939 -D595 83368A30 -D596 83368A31 -D597 83368A32 -D598 83368A33 -D599 83368A34 -D59A 83368A35 -D59B 83368A36 -D59C 83368A37 -D59D 83368A38 -D59E 83368A39 -D59F 83368B30 -D5A0 83368B31 -D5A1 83368B32 -D5A2 83368B33 -D5A3 83368B34 -D5A4 83368B35 -D5A5 83368B36 -D5A6 83368B37 -D5A7 83368B38 -D5A8 83368B39 -D5A9 83368C30 -D5AA 83368C31 -D5AB 83368C32 -D5AC 83368C33 -D5AD 83368C34 -D5AE 83368C35 -D5AF 83368C36 -D5B0 83368C37 -D5B1 83368C38 -D5B2 83368C39 -D5B3 83368D30 -D5B4 83368D31 -D5B5 83368D32 -D5B6 83368D33 -D5B7 83368D34 -D5B8 83368D35 -D5B9 83368D36 -D5BA 83368D37 -D5BB 83368D38 -D5BC 83368D39 -D5BD 83368E30 -D5BE 83368E31 -D5BF 83368E32 -D5C0 83368E33 -D5C1 83368E34 -D5C2 83368E35 -D5C3 83368E36 -D5C4 83368E37 -D5C5 83368E38 -D5C6 83368E39 -D5C7 83368F30 -D5C8 83368F31 -D5C9 83368F32 -D5CA 83368F33 -D5CB 83368F34 -D5CC 83368F35 -D5CD 83368F36 -D5CE 83368F37 -D5CF 83368F38 -D5D0 83368F39 -D5D1 83369030 -D5D2 83369031 -D5D3 83369032 -D5D4 83369033 -D5D5 83369034 -D5D6 83369035 -D5D7 83369036 -D5D8 83369037 -D5D9 83369038 -D5DA 83369039 -D5DB 83369130 -D5DC 83369131 -D5DD 83369132 -D5DE 83369133 -D5DF 83369134 -D5E0 83369135 -D5E1 83369136 -D5E2 83369137 -D5E3 83369138 -D5E4 83369139 -D5E5 83369230 -D5E6 83369231 -D5E7 83369232 -D5E8 83369233 -D5E9 83369234 -D5EA 83369235 -D5EB 83369236 -D5EC 83369237 -D5ED 83369238 -D5EE 83369239 -D5EF 83369330 -D5F0 83369331 -D5F1 83369332 -D5F2 83369333 -D5F3 83369334 -D5F4 83369335 -D5F5 83369336 -D5F6 83369337 -D5F7 83369338 -D5F8 83369339 -D5F9 83369430 -D5FA 83369431 -D5FB 83369432 -D5FC 83369433 -D5FD 83369434 -D5FE 83369435 -D5FF 83369436 -D600 83369437 -D601 83369438 -D602 83369439 -D603 83369530 -D604 83369531 -D605 83369532 -D606 83369533 -D607 83369534 -D608 83369535 -D609 83369536 -D60A 83369537 -D60B 83369538 -D60C 83369539 -D60D 83369630 -D60E 83369631 -D60F 83369632 -D610 83369633 -D611 83369634 -D612 83369635 -D613 83369636 -D614 83369637 -D615 83369638 -D616 83369639 -D617 83369730 -D618 83369731 -D619 83369732 -D61A 83369733 -D61B 83369734 -D61C 83369735 -D61D 83369736 -D61E 83369737 -D61F 83369738 -D620 83369739 -D621 83369830 -D622 83369831 -D623 83369832 -D624 83369833 -D625 83369834 -D626 83369835 -D627 83369836 -D628 83369837 -D629 83369838 -D62A 83369839 -D62B 83369930 -D62C 83369931 -D62D 83369932 -D62E 83369933 -D62F 83369934 -D630 83369935 -D631 83369936 -D632 83369937 -D633 83369938 -D634 83369939 -D635 83369A30 -D636 83369A31 -D637 83369A32 -D638 83369A33 -D639 83369A34 -D63A 83369A35 -D63B 83369A36 -D63C 83369A37 -D63D 83369A38 -D63E 83369A39 -D63F 83369B30 -D640 83369B31 -D641 83369B32 -D642 83369B33 -D643 83369B34 -D644 83369B35 -D645 83369B36 -D646 83369B37 -D647 83369B38 -D648 83369B39 -D649 83369C30 -D64A 83369C31 -D64B 83369C32 -D64C 83369C33 -D64D 83369C34 -D64E 83369C35 -D64F 83369C36 -D650 83369C37 -D651 83369C38 -D652 83369C39 -D653 83369D30 -D654 83369D31 -D655 83369D32 -D656 83369D33 -D657 83369D34 -D658 83369D35 -D659 83369D36 -D65A 83369D37 -D65B 83369D38 -D65C 83369D39 -D65D 83369E30 -D65E 83369E31 -D65F 83369E32 -D660 83369E33 -D661 83369E34 -D662 83369E35 -D663 83369E36 -D664 83369E37 -D665 83369E38 -D666 83369E39 -D667 83369F30 -D668 83369F31 -D669 83369F32 -D66A 83369F33 -D66B 83369F34 -D66C 83369F35 -D66D 83369F36 -D66E 83369F37 -D66F 83369F38 -D670 83369F39 -D671 8336A030 -D672 8336A031 -D673 8336A032 -D674 8336A033 -D675 8336A034 -D676 8336A035 -D677 8336A036 -D678 8336A037 -D679 8336A038 -D67A 8336A039 -D67B 8336A130 -D67C 8336A131 -D67D 8336A132 -D67E 8336A133 -D67F 8336A134 -D680 8336A135 -D681 8336A136 -D682 8336A137 -D683 8336A138 -D684 8336A139 -D685 8336A230 -D686 8336A231 -D687 8336A232 -D688 8336A233 -D689 8336A234 -D68A 8336A235 -D68B 8336A236 -D68C 8336A237 -D68D 8336A238 -D68E 8336A239 -D68F 8336A330 -D690 8336A331 -D691 8336A332 -D692 8336A333 -D693 8336A334 -D694 8336A335 -D695 8336A336 -D696 8336A337 -D697 8336A338 -D698 8336A339 -D699 8336A430 -D69A 8336A431 -D69B 8336A432 -D69C 8336A433 -D69D 8336A434 -D69E 8336A435 -D69F 8336A436 -D6A0 8336A437 -D6A1 8336A438 -D6A2 8336A439 -D6A3 8336A530 -D6A4 8336A531 -D6A5 8336A532 -D6A6 8336A533 -D6A7 8336A534 -D6A8 8336A535 -D6A9 8336A536 -D6AA 8336A537 -D6AB 8336A538 -D6AC 8336A539 -D6AD 8336A630 -D6AE 8336A631 -D6AF 8336A632 -D6B0 8336A633 -D6B1 8336A634 -D6B2 8336A635 -D6B3 8336A636 -D6B4 8336A637 -D6B5 8336A638 -D6B6 8336A639 -D6B7 8336A730 -D6B8 8336A731 -D6B9 8336A732 -D6BA 8336A733 -D6BB 8336A734 -D6BC 8336A735 -D6BD 8336A736 -D6BE 8336A737 -D6BF 8336A738 -D6C0 8336A739 -D6C1 8336A830 -D6C2 8336A831 -D6C3 8336A832 -D6C4 8336A833 -D6C5 8336A834 -D6C6 8336A835 -D6C7 8336A836 -D6C8 8336A837 -D6C9 8336A838 -D6CA 8336A839 -D6CB 8336A930 -D6CC 8336A931 -D6CD 8336A932 -D6CE 8336A933 -D6CF 8336A934 -D6D0 8336A935 -D6D1 8336A936 -D6D2 8336A937 -D6D3 8336A938 -D6D4 8336A939 -D6D5 8336AA30 -D6D6 8336AA31 -D6D7 8336AA32 -D6D8 8336AA33 -D6D9 8336AA34 -D6DA 8336AA35 -D6DB 8336AA36 -D6DC 8336AA37 -D6DD 8336AA38 -D6DE 8336AA39 -D6DF 8336AB30 -D6E0 8336AB31 -D6E1 8336AB32 -D6E2 8336AB33 -D6E3 8336AB34 -D6E4 8336AB35 -D6E5 8336AB36 -D6E6 8336AB37 -D6E7 8336AB38 -D6E8 8336AB39 -D6E9 8336AC30 -D6EA 8336AC31 -D6EB 8336AC32 -D6EC 8336AC33 -D6ED 8336AC34 -D6EE 8336AC35 -D6EF 8336AC36 -D6F0 8336AC37 -D6F1 8336AC38 -D6F2 8336AC39 -D6F3 8336AD30 -D6F4 8336AD31 -D6F5 8336AD32 -D6F6 8336AD33 -D6F7 8336AD34 -D6F8 8336AD35 -D6F9 8336AD36 -D6FA 8336AD37 -D6FB 8336AD38 -D6FC 8336AD39 -D6FD 8336AE30 -D6FE 8336AE31 -D6FF 8336AE32 -D700 8336AE33 -D701 8336AE34 -D702 8336AE35 -D703 8336AE36 -D704 8336AE37 -D705 8336AE38 -D706 8336AE39 -D707 8336AF30 -D708 8336AF31 -D709 8336AF32 -D70A 8336AF33 -D70B 8336AF34 -D70C 8336AF35 -D70D 8336AF36 -D70E 8336AF37 -D70F 8336AF38 -D710 8336AF39 -D711 8336B030 -D712 8336B031 -D713 8336B032 -D714 8336B033 -D715 8336B034 -D716 8336B035 -D717 8336B036 -D718 8336B037 -D719 8336B038 -D71A 8336B039 -D71B 8336B130 -D71C 8336B131 -D71D 8336B132 -D71E 8336B133 -D71F 8336B134 -D720 8336B135 -D721 8336B136 -D722 8336B137 -D723 8336B138 -D724 8336B139 -D725 8336B230 -D726 8336B231 -D727 8336B232 -D728 8336B233 -D729 8336B234 -D72A 8336B235 -D72B 8336B236 -D72C 8336B237 -D72D 8336B238 -D72E 8336B239 -D72F 8336B330 -D730 8336B331 -D731 8336B332 -D732 8336B333 -D733 8336B334 -D734 8336B335 -D735 8336B336 -D736 8336B337 -D737 8336B338 -D738 8336B339 -D739 8336B430 -D73A 8336B431 -D73B 8336B432 -D73C 8336B433 -D73D 8336B434 -D73E 8336B435 -D73F 8336B436 -D740 8336B437 -D741 8336B438 -D742 8336B439 -D743 8336B530 -D744 8336B531 -D745 8336B532 -D746 8336B533 -D747 8336B534 -D748 8336B535 -D749 8336B536 -D74A 8336B537 -D74B 8336B538 -D74C 8336B539 -D74D 8336B630 -D74E 8336B631 -D74F 8336B632 -D750 8336B633 -D751 8336B634 -D752 8336B635 -D753 8336B636 -D754 8336B637 -D755 8336B638 -D756 8336B639 -D757 8336B730 -D758 8336B731 -D759 8336B732 -D75A 8336B733 -D75B 8336B734 -D75C 8336B735 -D75D 8336B736 -D75E 8336B737 -D75F 8336B738 -D760 8336B739 -D761 8336B830 -D762 8336B831 -D763 8336B832 -D764 8336B833 -D765 8336B834 -D766 8336B835 -D767 8336B836 -D768 8336B837 -D769 8336B838 -D76A 8336B839 -D76B 8336B930 -D76C 8336B931 -D76D 8336B932 -D76E 8336B933 -D76F 8336B934 -D770 8336B935 -D771 8336B936 -D772 8336B937 -D773 8336B938 -D774 8336B939 -D775 8336BA30 -D776 8336BA31 -D777 8336BA32 -D778 8336BA33 -D779 8336BA34 -D77A 8336BA35 -D77B 8336BA36 -D77C 8336BA37 -D77D 8336BA38 -D77E 8336BA39 -D77F 8336BB30 -D780 8336BB31 -D781 8336BB32 -D782 8336BB33 -D783 8336BB34 -D784 8336BB35 -D785 8336BB36 -D786 8336BB37 -D787 8336BB38 -D788 8336BB39 -D789 8336BC30 -D78A 8336BC31 -D78B 8336BC32 -D78C 8336BC33 -D78D 8336BC34 -D78E 8336BC35 -D78F 8336BC36 -D790 8336BC37 -D791 8336BC38 -D792 8336BC39 -D793 8336BD30 -D794 8336BD31 -D795 8336BD32 -D796 8336BD33 -D797 8336BD34 -D798 8336BD35 -D799 8336BD36 -D79A 8336BD37 -D79B 8336BD38 -D79C 8336BD39 -D79D 8336BE30 -D79E 8336BE31 -D79F 8336BE32 -D7A0 8336BE33 -D7A1 8336BE34 -D7A2 8336BE35 -D7A3 8336BE36 -D7A4 8336BE37 -D7A5 8336BE38 -D7A6 8336BE39 -D7A7 8336BF30 -D7A8 8336BF31 -D7A9 8336BF32 -D7AA 8336BF33 -D7AB 8336BF34 -D7AC 8336BF35 -D7AD 8336BF36 -D7AE 8336BF37 -D7AF 8336BF38 -D7B0 8336BF39 -D7B1 8336C030 -D7B2 8336C031 -D7B3 8336C032 -D7B4 8336C033 -D7B5 8336C034 -D7B6 8336C035 -D7B7 8336C036 -D7B8 8336C037 -D7B9 8336C038 -D7BA 8336C039 -D7BB 8336C130 -D7BC 8336C131 -D7BD 8336C132 -D7BE 8336C133 -D7BF 8336C134 -D7C0 8336C135 -D7C1 8336C136 -D7C2 8336C137 -D7C3 8336C138 -D7C4 8336C139 -D7C5 8336C230 -D7C6 8336C231 -D7C7 8336C232 -D7C8 8336C233 -D7C9 8336C234 -D7CA 8336C235 -D7CB 8336C236 -D7CC 8336C237 -D7CD 8336C238 -D7CE 8336C239 -D7CF 8336C330 -D7D0 8336C331 -D7D1 8336C332 -D7D2 8336C333 -D7D3 8336C334 -D7D4 8336C335 -D7D5 8336C336 -D7D6 8336C337 -D7D7 8336C338 -D7D8 8336C339 -D7D9 8336C430 -D7DA 8336C431 -D7DB 8336C432 -D7DC 8336C433 -D7DD 8336C434 -D7DE 8336C435 -D7DF 8336C436 -D7E0 8336C437 -D7E1 8336C438 -D7E2 8336C439 -D7E3 8336C530 -D7E4 8336C531 -D7E5 8336C532 -D7E6 8336C533 -D7E7 8336C534 -D7E8 8336C535 -D7E9 8336C536 -D7EA 8336C537 -D7EB 8336C538 -D7EC 8336C539 -D7ED 8336C630 -D7EE 8336C631 -D7EF 8336C632 -D7F0 8336C633 -D7F1 8336C634 -D7F2 8336C635 -D7F3 8336C636 -D7F4 8336C637 -D7F5 8336C638 -D7F6 8336C639 -D7F7 8336C730 -D7F8 8336C731 -D7F9 8336C732 -D7FA 8336C733 -D7FB 8336C734 -D7FC 8336C735 -D7FD 8336C736 -D7FE 8336C737 -D7FF 8336C738 -E000 AAA1 -E001 AAA2 -E002 AAA3 -E003 AAA4 -E004 AAA5 -E005 AAA6 -E006 AAA7 -E007 AAA8 -E008 AAA9 -E009 AAAA -E00A AAAB -E00B AAAC -E00C AAAD -E00D AAAE -E00E AAAF -E00F AAB0 -E010 AAB1 -E011 AAB2 -E012 AAB3 -E013 AAB4 -E014 AAB5 -E015 AAB6 -E016 AAB7 -E017 AAB8 -E018 AAB9 -E019 AABA -E01A AABB -E01B AABC -E01C AABD -E01D AABE -E01E AABF -E01F AAC0 -E020 AAC1 -E021 AAC2 -E022 AAC3 -E023 AAC4 -E024 AAC5 -E025 AAC6 -E026 AAC7 -E027 AAC8 -E028 AAC9 -E029 AACA -E02A AACB -E02B AACC -E02C AACD -E02D AACE -E02E AACF -E02F AAD0 -E030 AAD1 -E031 AAD2 -E032 AAD3 -E033 AAD4 -E034 AAD5 -E035 AAD6 -E036 AAD7 -E037 AAD8 -E038 AAD9 -E039 AADA -E03A AADB -E03B AADC -E03C AADD -E03D AADE -E03E AADF -E03F AAE0 -E040 AAE1 -E041 AAE2 -E042 AAE3 -E043 AAE4 -E044 AAE5 -E045 AAE6 -E046 AAE7 -E047 AAE8 -E048 AAE9 -E049 AAEA -E04A AAEB -E04B AAEC -E04C AAED -E04D AAEE -E04E AAEF -E04F AAF0 -E050 AAF1 -E051 AAF2 -E052 AAF3 -E053 AAF4 -E054 AAF5 -E055 AAF6 -E056 AAF7 -E057 AAF8 -E058 AAF9 -E059 AAFA -E05A AAFB -E05B AAFC -E05C AAFD -E05D AAFE -E05E ABA1 -E05F ABA2 -E060 ABA3 -E061 ABA4 -E062 ABA5 -E063 ABA6 -E064 ABA7 -E065 ABA8 -E066 ABA9 -E067 ABAA -E068 ABAB -E069 ABAC -E06A ABAD -E06B ABAE -E06C ABAF -E06D ABB0 -E06E ABB1 -E06F ABB2 -E070 ABB3 -E071 ABB4 -E072 ABB5 -E073 ABB6 -E074 ABB7 -E075 ABB8 -E076 ABB9 -E077 ABBA -E078 ABBB -E079 ABBC -E07A ABBD -E07B ABBE -E07C ABBF -E07D ABC0 -E07E ABC1 -E07F ABC2 -E080 ABC3 -E081 ABC4 -E082 ABC5 -E083 ABC6 -E084 ABC7 -E085 ABC8 -E086 ABC9 -E087 ABCA -E088 ABCB -E089 ABCC -E08A ABCD -E08B ABCE -E08C ABCF -E08D ABD0 -E08E ABD1 -E08F ABD2 -E090 ABD3 -E091 ABD4 -E092 ABD5 -E093 ABD6 -E094 ABD7 -E095 ABD8 -E096 ABD9 -E097 ABDA -E098 ABDB -E099 ABDC -E09A ABDD -E09B ABDE -E09C ABDF -E09D ABE0 -E09E ABE1 -E09F ABE2 -E0A0 ABE3 -E0A1 ABE4 -E0A2 ABE5 -E0A3 ABE6 -E0A4 ABE7 -E0A5 ABE8 -E0A6 ABE9 -E0A7 ABEA -E0A8 ABEB -E0A9 ABEC -E0AA ABED -E0AB ABEE -E0AC ABEF -E0AD ABF0 -E0AE ABF1 -E0AF ABF2 -E0B0 ABF3 -E0B1 ABF4 -E0B2 ABF5 -E0B3 ABF6 -E0B4 ABF7 -E0B5 ABF8 -E0B6 ABF9 -E0B7 ABFA -E0B8 ABFB -E0B9 ABFC -E0BA ABFD -E0BB ABFE -E0BC ACA1 -E0BD ACA2 -E0BE ACA3 -E0BF ACA4 -E0C0 ACA5 -E0C1 ACA6 -E0C2 ACA7 -E0C3 ACA8 -E0C4 ACA9 -E0C5 ACAA -E0C6 ACAB -E0C7 ACAC -E0C8 ACAD -E0C9 ACAE -E0CA ACAF -E0CB ACB0 -E0CC ACB1 -E0CD ACB2 -E0CE ACB3 -E0CF ACB4 -E0D0 ACB5 -E0D1 ACB6 -E0D2 ACB7 -E0D3 ACB8 -E0D4 ACB9 -E0D5 ACBA -E0D6 ACBB -E0D7 ACBC -E0D8 ACBD -E0D9 ACBE -E0DA ACBF -E0DB ACC0 -E0DC ACC1 -E0DD ACC2 -E0DE ACC3 -E0DF ACC4 -E0E0 ACC5 -E0E1 ACC6 -E0E2 ACC7 -E0E3 ACC8 -E0E4 ACC9 -E0E5 ACCA -E0E6 ACCB -E0E7 ACCC -E0E8 ACCD -E0E9 ACCE -E0EA ACCF -E0EB ACD0 -E0EC ACD1 -E0ED ACD2 -E0EE ACD3 -E0EF ACD4 -E0F0 ACD5 -E0F1 ACD6 -E0F2 ACD7 -E0F3 ACD8 -E0F4 ACD9 -E0F5 ACDA -E0F6 ACDB -E0F7 ACDC -E0F8 ACDD -E0F9 ACDE -E0FA ACDF -E0FB ACE0 -E0FC ACE1 -E0FD ACE2 -E0FE ACE3 -E0FF ACE4 -E100 ACE5 -E101 ACE6 -E102 ACE7 -E103 ACE8 -E104 ACE9 -E105 ACEA -E106 ACEB -E107 ACEC -E108 ACED -E109 ACEE -E10A ACEF -E10B ACF0 -E10C ACF1 -E10D ACF2 -E10E ACF3 -E10F ACF4 -E110 ACF5 -E111 ACF6 -E112 ACF7 -E113 ACF8 -E114 ACF9 -E115 ACFA -E116 ACFB -E117 ACFC -E118 ACFD -E119 ACFE -E11A ADA1 -E11B ADA2 -E11C ADA3 -E11D ADA4 -E11E ADA5 -E11F ADA6 -E120 ADA7 -E121 ADA8 -E122 ADA9 -E123 ADAA -E124 ADAB -E125 ADAC -E126 ADAD -E127 ADAE -E128 ADAF -E129 ADB0 -E12A ADB1 -E12B ADB2 -E12C ADB3 -E12D ADB4 -E12E ADB5 -E12F ADB6 -E130 ADB7 -E131 ADB8 -E132 ADB9 -E133 ADBA -E134 ADBB -E135 ADBC -E136 ADBD -E137 ADBE -E138 ADBF -E139 ADC0 -E13A ADC1 -E13B ADC2 -E13C ADC3 -E13D ADC4 -E13E ADC5 -E13F ADC6 -E140 ADC7 -E141 ADC8 -E142 ADC9 -E143 ADCA -E144 ADCB -E145 ADCC -E146 ADCD -E147 ADCE -E148 ADCF -E149 ADD0 -E14A ADD1 -E14B ADD2 -E14C ADD3 -E14D ADD4 -E14E ADD5 -E14F ADD6 -E150 ADD7 -E151 ADD8 -E152 ADD9 -E153 ADDA -E154 ADDB -E155 ADDC -E156 ADDD -E157 ADDE -E158 ADDF -E159 ADE0 -E15A ADE1 -E15B ADE2 -E15C ADE3 -E15D ADE4 -E15E ADE5 -E15F ADE6 -E160 ADE7 -E161 ADE8 -E162 ADE9 -E163 ADEA -E164 ADEB -E165 ADEC -E166 ADED -E167 ADEE -E168 ADEF -E169 ADF0 -E16A ADF1 -E16B ADF2 -E16C ADF3 -E16D ADF4 -E16E ADF5 -E16F ADF6 -E170 ADF7 -E171 ADF8 -E172 ADF9 -E173 ADFA -E174 ADFB -E175 ADFC -E176 ADFD -E177 ADFE -E178 AEA1 -E179 AEA2 -E17A AEA3 -E17B AEA4 -E17C AEA5 -E17D AEA6 -E17E AEA7 -E17F AEA8 -E180 AEA9 -E181 AEAA -E182 AEAB -E183 AEAC -E184 AEAD -E185 AEAE -E186 AEAF -E187 AEB0 -E188 AEB1 -E189 AEB2 -E18A AEB3 -E18B AEB4 -E18C AEB5 -E18D AEB6 -E18E AEB7 -E18F AEB8 -E190 AEB9 -E191 AEBA -E192 AEBB -E193 AEBC -E194 AEBD -E195 AEBE -E196 AEBF -E197 AEC0 -E198 AEC1 -E199 AEC2 -E19A AEC3 -E19B AEC4 -E19C AEC5 -E19D AEC6 -E19E AEC7 -E19F AEC8 -E1A0 AEC9 -E1A1 AECA -E1A2 AECB -E1A3 AECC -E1A4 AECD -E1A5 AECE -E1A6 AECF -E1A7 AED0 -E1A8 AED1 -E1A9 AED2 -E1AA AED3 -E1AB AED4 -E1AC AED5 -E1AD AED6 -E1AE AED7 -E1AF AED8 -E1B0 AED9 -E1B1 AEDA -E1B2 AEDB -E1B3 AEDC -E1B4 AEDD -E1B5 AEDE -E1B6 AEDF -E1B7 AEE0 -E1B8 AEE1 -E1B9 AEE2 -E1BA AEE3 -E1BB AEE4 -E1BC AEE5 -E1BD AEE6 -E1BE AEE7 -E1BF AEE8 -E1C0 AEE9 -E1C1 AEEA -E1C2 AEEB -E1C3 AEEC -E1C4 AEED -E1C5 AEEE -E1C6 AEEF -E1C7 AEF0 -E1C8 AEF1 -E1C9 AEF2 -E1CA AEF3 -E1CB AEF4 -E1CC AEF5 -E1CD AEF6 -E1CE AEF7 -E1CF AEF8 -E1D0 AEF9 -E1D1 AEFA -E1D2 AEFB -E1D3 AEFC -E1D4 AEFD -E1D5 AEFE -E1D6 AFA1 -E1D7 AFA2 -E1D8 AFA3 -E1D9 AFA4 -E1DA AFA5 -E1DB AFA6 -E1DC AFA7 -E1DD AFA8 -E1DE AFA9 -E1DF AFAA -E1E0 AFAB -E1E1 AFAC -E1E2 AFAD -E1E3 AFAE -E1E4 AFAF -E1E5 AFB0 -E1E6 AFB1 -E1E7 AFB2 -E1E8 AFB3 -E1E9 AFB4 -E1EA AFB5 -E1EB AFB6 -E1EC AFB7 -E1ED AFB8 -E1EE AFB9 -E1EF AFBA -E1F0 AFBB -E1F1 AFBC -E1F2 AFBD -E1F3 AFBE -E1F4 AFBF -E1F5 AFC0 -E1F6 AFC1 -E1F7 AFC2 -E1F8 AFC3 -E1F9 AFC4 -E1FA AFC5 -E1FB AFC6 -E1FC AFC7 -E1FD AFC8 -E1FE AFC9 -E1FF AFCA -E200 AFCB -E201 AFCC -E202 AFCD -E203 AFCE -E204 AFCF -E205 AFD0 -E206 AFD1 -E207 AFD2 -E208 AFD3 -E209 AFD4 -E20A AFD5 -E20B AFD6 -E20C AFD7 -E20D AFD8 -E20E AFD9 -E20F AFDA -E210 AFDB -E211 AFDC -E212 AFDD -E213 AFDE -E214 AFDF -E215 AFE0 -E216 AFE1 -E217 AFE2 -E218 AFE3 -E219 AFE4 -E21A AFE5 -E21B AFE6 -E21C AFE7 -E21D AFE8 -E21E AFE9 -E21F AFEA -E220 AFEB -E221 AFEC -E222 AFED -E223 AFEE -E224 AFEF -E225 AFF0 -E226 AFF1 -E227 AFF2 -E228 AFF3 -E229 AFF4 -E22A AFF5 -E22B AFF6 -E22C AFF7 -E22D AFF8 -E22E AFF9 -E22F AFFA -E230 AFFB -E231 AFFC -E232 AFFD -E233 AFFE -E234 F8A1 -E235 F8A2 -E236 F8A3 -E237 F8A4 -E238 F8A5 -E239 F8A6 -E23A F8A7 -E23B F8A8 -E23C F8A9 -E23D F8AA -E23E F8AB -E23F F8AC -E240 F8AD -E241 F8AE -E242 F8AF -E243 F8B0 -E244 F8B1 -E245 F8B2 -E246 F8B3 -E247 F8B4 -E248 F8B5 -E249 F8B6 -E24A F8B7 -E24B F8B8 -E24C F8B9 -E24D F8BA -E24E F8BB -E24F F8BC -E250 F8BD -E251 F8BE -E252 F8BF -E253 F8C0 -E254 F8C1 -E255 F8C2 -E256 F8C3 -E257 F8C4 -E258 F8C5 -E259 F8C6 -E25A F8C7 -E25B F8C8 -E25C F8C9 -E25D F8CA -E25E F8CB -E25F F8CC -E260 F8CD -E261 F8CE -E262 F8CF -E263 F8D0 -E264 F8D1 -E265 F8D2 -E266 F8D3 -E267 F8D4 -E268 F8D5 -E269 F8D6 -E26A F8D7 -E26B F8D8 -E26C F8D9 -E26D F8DA -E26E F8DB -E26F F8DC -E270 F8DD -E271 F8DE -E272 F8DF -E273 F8E0 -E274 F8E1 -E275 F8E2 -E276 F8E3 -E277 F8E4 -E278 F8E5 -E279 F8E6 -E27A F8E7 -E27B F8E8 -E27C F8E9 -E27D F8EA -E27E F8EB -E27F F8EC -E280 F8ED -E281 F8EE -E282 F8EF -E283 F8F0 -E284 F8F1 -E285 F8F2 -E286 F8F3 -E287 F8F4 -E288 F8F5 -E289 F8F6 -E28A F8F7 -E28B F8F8 -E28C F8F9 -E28D F8FA -E28E F8FB -E28F F8FC -E290 F8FD -E291 F8FE -E292 F9A1 -E293 F9A2 -E294 F9A3 -E295 F9A4 -E296 F9A5 -E297 F9A6 -E298 F9A7 -E299 F9A8 -E29A F9A9 -E29B F9AA -E29C F9AB -E29D F9AC -E29E F9AD -E29F F9AE -E2A0 F9AF -E2A1 F9B0 -E2A2 F9B1 -E2A3 F9B2 -E2A4 F9B3 -E2A5 F9B4 -E2A6 F9B5 -E2A7 F9B6 -E2A8 F9B7 -E2A9 F9B8 -E2AA F9B9 -E2AB F9BA -E2AC F9BB -E2AD F9BC -E2AE F9BD -E2AF F9BE -E2B0 F9BF -E2B1 F9C0 -E2B2 F9C1 -E2B3 F9C2 -E2B4 F9C3 -E2B5 F9C4 -E2B6 F9C5 -E2B7 F9C6 -E2B8 F9C7 -E2B9 F9C8 -E2BA F9C9 -E2BB F9CA -E2BC F9CB -E2BD F9CC -E2BE F9CD -E2BF F9CE -E2C0 F9CF -E2C1 F9D0 -E2C2 F9D1 -E2C3 F9D2 -E2C4 F9D3 -E2C5 F9D4 -E2C6 F9D5 -E2C7 F9D6 -E2C8 F9D7 -E2C9 F9D8 -E2CA F9D9 -E2CB F9DA -E2CC F9DB -E2CD F9DC -E2CE F9DD -E2CF F9DE -E2D0 F9DF -E2D1 F9E0 -E2D2 F9E1 -E2D3 F9E2 -E2D4 F9E3 -E2D5 F9E4 -E2D6 F9E5 -E2D7 F9E6 -E2D8 F9E7 -E2D9 F9E8 -E2DA F9E9 -E2DB F9EA -E2DC F9EB -E2DD F9EC -E2DE F9ED -E2DF F9EE -E2E0 F9EF -E2E1 F9F0 -E2E2 F9F1 -E2E3 F9F2 -E2E4 F9F3 -E2E5 F9F4 -E2E6 F9F5 -E2E7 F9F6 -E2E8 F9F7 -E2E9 F9F8 -E2EA F9F9 -E2EB F9FA -E2EC F9FB -E2ED F9FC -E2EE F9FD -E2EF F9FE -E2F0 FAA1 -E2F1 FAA2 -E2F2 FAA3 -E2F3 FAA4 -E2F4 FAA5 -E2F5 FAA6 -E2F6 FAA7 -E2F7 FAA8 -E2F8 FAA9 -E2F9 FAAA -E2FA FAAB -E2FB FAAC -E2FC FAAD -E2FD FAAE -E2FE FAAF -E2FF FAB0 -E300 FAB1 -E301 FAB2 -E302 FAB3 -E303 FAB4 -E304 FAB5 -E305 FAB6 -E306 FAB7 -E307 FAB8 -E308 FAB9 -E309 FABA -E30A FABB -E30B FABC -E30C FABD -E30D FABE -E30E FABF -E30F FAC0 -E310 FAC1 -E311 FAC2 -E312 FAC3 -E313 FAC4 -E314 FAC5 -E315 FAC6 -E316 FAC7 -E317 FAC8 -E318 FAC9 -E319 FACA -E31A FACB -E31B FACC -E31C FACD -E31D FACE -E31E FACF -E31F FAD0 -E320 FAD1 -E321 FAD2 -E322 FAD3 -E323 FAD4 -E324 FAD5 -E325 FAD6 -E326 FAD7 -E327 FAD8 -E328 FAD9 -E329 FADA -E32A FADB -E32B FADC -E32C FADD -E32D FADE -E32E FADF -E32F FAE0 -E330 FAE1 -E331 FAE2 -E332 FAE3 -E333 FAE4 -E334 FAE5 -E335 FAE6 -E336 FAE7 -E337 FAE8 -E338 FAE9 -E339 FAEA -E33A FAEB -E33B FAEC -E33C FAED -E33D FAEE -E33E FAEF -E33F FAF0 -E340 FAF1 -E341 FAF2 -E342 FAF3 -E343 FAF4 -E344 FAF5 -E345 FAF6 -E346 FAF7 -E347 FAF8 -E348 FAF9 -E349 FAFA -E34A FAFB -E34B FAFC -E34C FAFD -E34D FAFE -E34E FBA1 -E34F FBA2 -E350 FBA3 -E351 FBA4 -E352 FBA5 -E353 FBA6 -E354 FBA7 -E355 FBA8 -E356 FBA9 -E357 FBAA -E358 FBAB -E359 FBAC -E35A FBAD -E35B FBAE -E35C FBAF -E35D FBB0 -E35E FBB1 -E35F FBB2 -E360 FBB3 -E361 FBB4 -E362 FBB5 -E363 FBB6 -E364 FBB7 -E365 FBB8 -E366 FBB9 -E367 FBBA -E368 FBBB -E369 FBBC -E36A FBBD -E36B FBBE -E36C FBBF -E36D FBC0 -E36E FBC1 -E36F FBC2 -E370 FBC3 -E371 FBC4 -E372 FBC5 -E373 FBC6 -E374 FBC7 -E375 FBC8 -E376 FBC9 -E377 FBCA -E378 FBCB -E379 FBCC -E37A FBCD -E37B FBCE -E37C FBCF -E37D FBD0 -E37E FBD1 -E37F FBD2 -E380 FBD3 -E381 FBD4 -E382 FBD5 -E383 FBD6 -E384 FBD7 -E385 FBD8 -E386 FBD9 -E387 FBDA -E388 FBDB -E389 FBDC -E38A FBDD -E38B FBDE -E38C FBDF -E38D FBE0 -E38E FBE1 -E38F FBE2 -E390 FBE3 -E391 FBE4 -E392 FBE5 -E393 FBE6 -E394 FBE7 -E395 FBE8 -E396 FBE9 -E397 FBEA -E398 FBEB -E399 FBEC -E39A FBED -E39B FBEE -E39C FBEF -E39D FBF0 -E39E FBF1 -E39F FBF2 -E3A0 FBF3 -E3A1 FBF4 -E3A2 FBF5 -E3A3 FBF6 -E3A4 FBF7 -E3A5 FBF8 -E3A6 FBF9 -E3A7 FBFA -E3A8 FBFB -E3A9 FBFC -E3AA FBFD -E3AB FBFE -E3AC FCA1 -E3AD FCA2 -E3AE FCA3 -E3AF FCA4 -E3B0 FCA5 -E3B1 FCA6 -E3B2 FCA7 -E3B3 FCA8 -E3B4 FCA9 -E3B5 FCAA -E3B6 FCAB -E3B7 FCAC -E3B8 FCAD -E3B9 FCAE -E3BA FCAF -E3BB FCB0 -E3BC FCB1 -E3BD FCB2 -E3BE FCB3 -E3BF FCB4 -E3C0 FCB5 -E3C1 FCB6 -E3C2 FCB7 -E3C3 FCB8 -E3C4 FCB9 -E3C5 FCBA -E3C6 FCBB -E3C7 FCBC -E3C8 FCBD -E3C9 FCBE -E3CA FCBF -E3CB FCC0 -E3CC FCC1 -E3CD FCC2 -E3CE FCC3 -E3CF FCC4 -E3D0 FCC5 -E3D1 FCC6 -E3D2 FCC7 -E3D3 FCC8 -E3D4 FCC9 -E3D5 FCCA -E3D6 FCCB -E3D7 FCCC -E3D8 FCCD -E3D9 FCCE -E3DA FCCF -E3DB FCD0 -E3DC FCD1 -E3DD FCD2 -E3DE FCD3 -E3DF FCD4 -E3E0 FCD5 -E3E1 FCD6 -E3E2 FCD7 -E3E3 FCD8 -E3E4 FCD9 -E3E5 FCDA -E3E6 FCDB -E3E7 FCDC -E3E8 FCDD -E3E9 FCDE -E3EA FCDF -E3EB FCE0 -E3EC FCE1 -E3ED FCE2 -E3EE FCE3 -E3EF FCE4 -E3F0 FCE5 -E3F1 FCE6 -E3F2 FCE7 -E3F3 FCE8 -E3F4 FCE9 -E3F5 FCEA -E3F6 FCEB -E3F7 FCEC -E3F8 FCED -E3F9 FCEE -E3FA FCEF -E3FB FCF0 -E3FC FCF1 -E3FD FCF2 -E3FE FCF3 -E3FF FCF4 -E400 FCF5 -E401 FCF6 -E402 FCF7 -E403 FCF8 -E404 FCF9 -E405 FCFA -E406 FCFB -E407 FCFC -E408 FCFD -E409 FCFE -E40A FDA1 -E40B FDA2 -E40C FDA3 -E40D FDA4 -E40E FDA5 -E40F FDA6 -E410 FDA7 -E411 FDA8 -E412 FDA9 -E413 FDAA -E414 FDAB -E415 FDAC -E416 FDAD -E417 FDAE -E418 FDAF -E419 FDB0 -E41A FDB1 -E41B FDB2 -E41C FDB3 -E41D FDB4 -E41E FDB5 -E41F FDB6 -E420 FDB7 -E421 FDB8 -E422 FDB9 -E423 FDBA -E424 FDBB -E425 FDBC -E426 FDBD -E427 FDBE -E428 FDBF -E429 FDC0 -E42A FDC1 -E42B FDC2 -E42C FDC3 -E42D FDC4 -E42E FDC5 -E42F FDC6 -E430 FDC7 -E431 FDC8 -E432 FDC9 -E433 FDCA -E434 FDCB -E435 FDCC -E436 FDCD -E437 FDCE -E438 FDCF -E439 FDD0 -E43A FDD1 -E43B FDD2 -E43C FDD3 -E43D FDD4 -E43E FDD5 -E43F FDD6 -E440 FDD7 -E441 FDD8 -E442 FDD9 -E443 FDDA -E444 FDDB -E445 FDDC -E446 FDDD -E447 FDDE -E448 FDDF -E449 FDE0 -E44A FDE1 -E44B FDE2 -E44C FDE3 -E44D FDE4 -E44E FDE5 -E44F FDE6 -E450 FDE7 -E451 FDE8 -E452 FDE9 -E453 FDEA -E454 FDEB -E455 FDEC -E456 FDED -E457 FDEE -E458 FDEF -E459 FDF0 -E45A FDF1 -E45B FDF2 -E45C FDF3 -E45D FDF4 -E45E FDF5 -E45F FDF6 -E460 FDF7 -E461 FDF8 -E462 FDF9 -E463 FDFA -E464 FDFB -E465 FDFC -E466 FDFD -E467 FDFE -E468 FEA1 -E469 FEA2 -E46A FEA3 -E46B FEA4 -E46C FEA5 -E46D FEA6 -E46E FEA7 -E46F FEA8 -E470 FEA9 -E471 FEAA -E472 FEAB -E473 FEAC -E474 FEAD -E475 FEAE -E476 FEAF -E477 FEB0 -E478 FEB1 -E479 FEB2 -E47A FEB3 -E47B FEB4 -E47C FEB5 -E47D FEB6 -E47E FEB7 -E47F FEB8 -E480 FEB9 -E481 FEBA -E482 FEBB -E483 FEBC -E484 FEBD -E485 FEBE -E486 FEBF -E487 FEC0 -E488 FEC1 -E489 FEC2 -E48A FEC3 -E48B FEC4 -E48C FEC5 -E48D FEC6 -E48E FEC7 -E48F FEC8 -E490 FEC9 -E491 FECA -E492 FECB -E493 FECC -E494 FECD -E495 FECE -E496 FECF -E497 FED0 -E498 FED1 -E499 FED2 -E49A FED3 -E49B FED4 -E49C FED5 -E49D FED6 -E49E FED7 -E49F FED8 -E4A0 FED9 -E4A1 FEDA -E4A2 FEDB -E4A3 FEDC -E4A4 FEDD -E4A5 FEDE -E4A6 FEDF -E4A7 FEE0 -E4A8 FEE1 -E4A9 FEE2 -E4AA FEE3 -E4AB FEE4 -E4AC FEE5 -E4AD FEE6 -E4AE FEE7 -E4AF FEE8 -E4B0 FEE9 -E4B1 FEEA -E4B2 FEEB -E4B3 FEEC -E4B4 FEED -E4B5 FEEE -E4B6 FEEF -E4B7 FEF0 -E4B8 FEF1 -E4B9 FEF2 -E4BA FEF3 -E4BB FEF4 -E4BC FEF5 -E4BD FEF6 -E4BE FEF7 -E4BF FEF8 -E4C0 FEF9 -E4C1 FEFA -E4C2 FEFB -E4C3 FEFC -E4C4 FEFD -E4C5 FEFE -E4C6 A140 -E4C7 A141 -E4C8 A142 -E4C9 A143 -E4CA A144 -E4CB A145 -E4CC A146 -E4CD A147 -E4CE A148 -E4CF A149 -E4D0 A14A -E4D1 A14B -E4D2 A14C -E4D3 A14D -E4D4 A14E -E4D5 A14F -E4D6 A150 -E4D7 A151 -E4D8 A152 -E4D9 A153 -E4DA A154 -E4DB A155 -E4DC A156 -E4DD A157 -E4DE A158 -E4DF A159 -E4E0 A15A -E4E1 A15B -E4E2 A15C -E4E3 A15D -E4E4 A15E -E4E5 A15F -E4E6 A160 -E4E7 A161 -E4E8 A162 -E4E9 A163 -E4EA A164 -E4EB A165 -E4EC A166 -E4ED A167 -E4EE A168 -E4EF A169 -E4F0 A16A -E4F1 A16B -E4F2 A16C -E4F3 A16D -E4F4 A16E -E4F5 A16F -E4F6 A170 -E4F7 A171 -E4F8 A172 -E4F9 A173 -E4FA A174 -E4FB A175 -E4FC A176 -E4FD A177 -E4FE A178 -E4FF A179 -E500 A17A -E501 A17B -E502 A17C -E503 A17D -E504 A17E -E505 A180 -E506 A181 -E507 A182 -E508 A183 -E509 A184 -E50A A185 -E50B A186 -E50C A187 -E50D A188 -E50E A189 -E50F A18A -E510 A18B -E511 A18C -E512 A18D -E513 A18E -E514 A18F -E515 A190 -E516 A191 -E517 A192 -E518 A193 -E519 A194 -E51A A195 -E51B A196 -E51C A197 -E51D A198 -E51E A199 -E51F A19A -E520 A19B -E521 A19C -E522 A19D -E523 A19E -E524 A19F -E525 A1A0 -E526 A240 -E527 A241 -E528 A242 -E529 A243 -E52A A244 -E52B A245 -E52C A246 -E52D A247 -E52E A248 -E52F A249 -E530 A24A -E531 A24B -E532 A24C -E533 A24D -E534 A24E -E535 A24F -E536 A250 -E537 A251 -E538 A252 -E539 A253 -E53A A254 -E53B A255 -E53C A256 -E53D A257 -E53E A258 -E53F A259 -E540 A25A -E541 A25B -E542 A25C -E543 A25D -E544 A25E -E545 A25F -E546 A260 -E547 A261 -E548 A262 -E549 A263 -E54A A264 -E54B A265 -E54C A266 -E54D A267 -E54E A268 -E54F A269 -E550 A26A -E551 A26B -E552 A26C -E553 A26D -E554 A26E -E555 A26F -E556 A270 -E557 A271 -E558 A272 -E559 A273 -E55A A274 -E55B A275 -E55C A276 -E55D A277 -E55E A278 -E55F A279 -E560 A27A -E561 A27B -E562 A27C -E563 A27D -E564 A27E -E565 A280 -E566 A281 -E567 A282 -E568 A283 -E569 A284 -E56A A285 -E56B A286 -E56C A287 -E56D A288 -E56E A289 -E56F A28A -E570 A28B -E571 A28C -E572 A28D -E573 A28E -E574 A28F -E575 A290 -E576 A291 -E577 A292 -E578 A293 -E579 A294 -E57A A295 -E57B A296 -E57C A297 -E57D A298 -E57E A299 -E57F A29A -E580 A29B -E581 A29C -E582 A29D -E583 A29E -E584 A29F -E585 A2A0 -E586 A340 -E587 A341 -E588 A342 -E589 A343 -E58A A344 -E58B A345 -E58C A346 -E58D A347 -E58E A348 -E58F A349 -E590 A34A -E591 A34B -E592 A34C -E593 A34D -E594 A34E -E595 A34F -E596 A350 -E597 A351 -E598 A352 -E599 A353 -E59A A354 -E59B A355 -E59C A356 -E59D A357 -E59E A358 -E59F A359 -E5A0 A35A -E5A1 A35B -E5A2 A35C -E5A3 A35D -E5A4 A35E -E5A5 A35F -E5A6 A360 -E5A7 A361 -E5A8 A362 -E5A9 A363 -E5AA A364 -E5AB A365 -E5AC A366 -E5AD A367 -E5AE A368 -E5AF A369 -E5B0 A36A -E5B1 A36B -E5B2 A36C -E5B3 A36D -E5B4 A36E -E5B5 A36F -E5B6 A370 -E5B7 A371 -E5B8 A372 -E5B9 A373 -E5BA A374 -E5BB A375 -E5BC A376 -E5BD A377 -E5BE A378 -E5BF A379 -E5C0 A37A -E5C1 A37B -E5C2 A37C -E5C3 A37D -E5C4 A37E -E5C5 A380 -E5C6 A381 -E5C7 A382 -E5C8 A383 -E5C9 A384 -E5CA A385 -E5CB A386 -E5CC A387 -E5CD A388 -E5CE A389 -E5CF A38A -E5D0 A38B -E5D1 A38C -E5D2 A38D -E5D3 A38E -E5D4 A38F -E5D5 A390 -E5D6 A391 -E5D7 A392 -E5D8 A393 -E5D9 A394 -E5DA A395 -E5DB A396 -E5DC A397 -E5DD A398 -E5DE A399 -E5DF A39A -E5E0 A39B -E5E1 A39C -E5E2 A39D -E5E3 A39E -E5E4 A39F -E5E5 A3A0 -E5E6 A440 -E5E7 A441 -E5E8 A442 -E5E9 A443 -E5EA A444 -E5EB A445 -E5EC A446 -E5ED A447 -E5EE A448 -E5EF A449 -E5F0 A44A -E5F1 A44B -E5F2 A44C -E5F3 A44D -E5F4 A44E -E5F5 A44F -E5F6 A450 -E5F7 A451 -E5F8 A452 -E5F9 A453 -E5FA A454 -E5FB A455 -E5FC A456 -E5FD A457 -E5FE A458 -E5FF A459 -E600 A45A -E601 A45B -E602 A45C -E603 A45D -E604 A45E -E605 A45F -E606 A460 -E607 A461 -E608 A462 -E609 A463 -E60A A464 -E60B A465 -E60C A466 -E60D A467 -E60E A468 -E60F A469 -E610 A46A -E611 A46B -E612 A46C -E613 A46D -E614 A46E -E615 A46F -E616 A470 -E617 A471 -E618 A472 -E619 A473 -E61A A474 -E61B A475 -E61C A476 -E61D A477 -E61E A478 -E61F A479 -E620 A47A -E621 A47B -E622 A47C -E623 A47D -E624 A47E -E625 A480 -E626 A481 -E627 A482 -E628 A483 -E629 A484 -E62A A485 -E62B A486 -E62C A487 -E62D A488 -E62E A489 -E62F A48A -E630 A48B -E631 A48C -E632 A48D -E633 A48E -E634 A48F -E635 A490 -E636 A491 -E637 A492 -E638 A493 -E639 A494 -E63A A495 -E63B A496 -E63C A497 -E63D A498 -E63E A499 -E63F A49A -E640 A49B -E641 A49C -E642 A49D -E643 A49E -E644 A49F -E645 A4A0 -E646 A540 -E647 A541 -E648 A542 -E649 A543 -E64A A544 -E64B A545 -E64C A546 -E64D A547 -E64E A548 -E64F A549 -E650 A54A -E651 A54B -E652 A54C -E653 A54D -E654 A54E -E655 A54F -E656 A550 -E657 A551 -E658 A552 -E659 A553 -E65A A554 -E65B A555 -E65C A556 -E65D A557 -E65E A558 -E65F A559 -E660 A55A -E661 A55B -E662 A55C -E663 A55D -E664 A55E -E665 A55F -E666 A560 -E667 A561 -E668 A562 -E669 A563 -E66A A564 -E66B A565 -E66C A566 -E66D A567 -E66E A568 -E66F A569 -E670 A56A -E671 A56B -E672 A56C -E673 A56D -E674 A56E -E675 A56F -E676 A570 -E677 A571 -E678 A572 -E679 A573 -E67A A574 -E67B A575 -E67C A576 -E67D A577 -E67E A578 -E67F A579 -E680 A57A -E681 A57B -E682 A57C -E683 A57D -E684 A57E -E685 A580 -E686 A581 -E687 A582 -E688 A583 -E689 A584 -E68A A585 -E68B A586 -E68C A587 -E68D A588 -E68E A589 -E68F A58A -E690 A58B -E691 A58C -E692 A58D -E693 A58E -E694 A58F -E695 A590 -E696 A591 -E697 A592 -E698 A593 -E699 A594 -E69A A595 -E69B A596 -E69C A597 -E69D A598 -E69E A599 -E69F A59A -E6A0 A59B -E6A1 A59C -E6A2 A59D -E6A3 A59E -E6A4 A59F -E6A5 A5A0 -E6A6 A640 -E6A7 A641 -E6A8 A642 -E6A9 A643 -E6AA A644 -E6AB A645 -E6AC A646 -E6AD A647 -E6AE A648 -E6AF A649 -E6B0 A64A -E6B1 A64B -E6B2 A64C -E6B3 A64D -E6B4 A64E -E6B5 A64F -E6B6 A650 -E6B7 A651 -E6B8 A652 -E6B9 A653 -E6BA A654 -E6BB A655 -E6BC A656 -E6BD A657 -E6BE A658 -E6BF A659 -E6C0 A65A -E6C1 A65B -E6C2 A65C -E6C3 A65D -E6C4 A65E -E6C5 A65F -E6C6 A660 -E6C7 A661 -E6C8 A662 -E6C9 A663 -E6CA A664 -E6CB A665 -E6CC A666 -E6CD A667 -E6CE A668 -E6CF A669 -E6D0 A66A -E6D1 A66B -E6D2 A66C -E6D3 A66D -E6D4 A66E -E6D5 A66F -E6D6 A670 -E6D7 A671 -E6D8 A672 -E6D9 A673 -E6DA A674 -E6DB A675 -E6DC A676 -E6DD A677 -E6DE A678 -E6DF A679 -E6E0 A67A -E6E1 A67B -E6E2 A67C -E6E3 A67D -E6E4 A67E -E6E5 A680 -E6E6 A681 -E6E7 A682 -E6E8 A683 -E6E9 A684 -E6EA A685 -E6EB A686 -E6EC A687 -E6ED A688 -E6EE A689 -E6EF A68A -E6F0 A68B -E6F1 A68C -E6F2 A68D -E6F3 A68E -E6F4 A68F -E6F5 A690 -E6F6 A691 -E6F7 A692 -E6F8 A693 -E6F9 A694 -E6FA A695 -E6FB A696 -E6FC A697 -E6FD A698 -E6FE A699 -E6FF A69A -E700 A69B -E701 A69C -E702 A69D -E703 A69E -E704 A69F -E705 A6A0 -E706 A740 -E707 A741 -E708 A742 -E709 A743 -E70A A744 -E70B A745 -E70C A746 -E70D A747 -E70E A748 -E70F A749 -E710 A74A -E711 A74B -E712 A74C -E713 A74D -E714 A74E -E715 A74F -E716 A750 -E717 A751 -E718 A752 -E719 A753 -E71A A754 -E71B A755 -E71C A756 -E71D A757 -E71E A758 -E71F A759 -E720 A75A -E721 A75B -E722 A75C -E723 A75D -E724 A75E -E725 A75F -E726 A760 -E727 A761 -E728 A762 -E729 A763 -E72A A764 -E72B A765 -E72C A766 -E72D A767 -E72E A768 -E72F A769 -E730 A76A -E731 A76B -E732 A76C -E733 A76D -E734 A76E -E735 A76F -E736 A770 -E737 A771 -E738 A772 -E739 A773 -E73A A774 -E73B A775 -E73C A776 -E73D A777 -E73E A778 -E73F A779 -E740 A77A -E741 A77B -E742 A77C -E743 A77D -E744 A77E -E745 A780 -E746 A781 -E747 A782 -E748 A783 -E749 A784 -E74A A785 -E74B A786 -E74C A787 -E74D A788 -E74E A789 -E74F A78A -E750 A78B -E751 A78C -E752 A78D -E753 A78E -E754 A78F -E755 A790 -E756 A791 -E757 A792 -E758 A793 -E759 A794 -E75A A795 -E75B A796 -E75C A797 -E75D A798 -E75E A799 -E75F A79A -E760 A79B -E761 A79C -E762 A79D -E763 A79E -E764 A79F -E765 A7A0 -E766 A2AB -E767 A2AC -E768 A2AD -E769 A2AE -E76A A2AF -E76B A2B0 -E76C 8336C739 -E76D A2E4 -E76E A2EF -E76F A2F0 -E770 A2FD -E771 A2FE -E772 A4F4 -E773 A4F5 -E774 A4F6 -E775 A4F7 -E776 A4F8 -E777 A4F9 -E778 A4FA -E779 A4FB -E77A A4FC -E77B A4FD -E77C A4FE -E77D A5F7 -E77E A5F8 -E77F A5F9 -E780 A5FA -E781 A5FB -E782 A5FC -E783 A5FD -E784 A5FE -E785 A6B9 -E786 A6BA -E787 A6BB -E788 A6BC -E789 A6BD -E78A A6BE -E78B A6BF -E78C A6C0 -E78D A6D9 -E78E A6DA -E78F A6DB -E790 A6DC -E791 A6DD -E792 A6DE -E793 A6DF -E794 A6EC -E795 A6ED -E796 A6F3 -E797 A6F6 -E798 A6F7 -E799 A6F8 -E79A A6F9 -E79B A6FA -E79C A6FB -E79D A6FC -E79E A6FD -E79F A6FE -E7A0 A7C2 -E7A1 A7C3 -E7A2 A7C4 -E7A3 A7C5 -E7A4 A7C6 -E7A5 A7C7 -E7A6 A7C8 -E7A7 A7C9 -E7A8 A7CA -E7A9 A7CB -E7AA A7CC -E7AB A7CD -E7AC A7CE -E7AD A7CF -E7AE A7D0 -E7AF A7F2 -E7B0 A7F3 -E7B1 A7F4 -E7B2 A7F5 -E7B3 A7F6 -E7B4 A7F7 -E7B5 A7F8 -E7B6 A7F9 -E7B7 A7FA -E7B8 A7FB -E7B9 A7FC -E7BA A7FD -E7BB A7FE -E7BC A896 -E7BD A897 -E7BE A898 -E7BF A899 -E7C0 A89A -E7C1 A89B -E7C2 A89C -E7C3 A89D -E7C4 A89E -E7C5 A89F -E7C6 A8A0 -E7C7 A8BC -E7C8 8336C830 -E7C9 A8C1 -E7CA A8C2 -E7CB A8C3 -E7CC A8C4 -E7CD A8EA -E7CE A8EB -E7CF A8EC -E7D0 A8ED -E7D1 A8EE -E7D2 A8EF -E7D3 A8F0 -E7D4 A8F1 -E7D5 A8F2 -E7D6 A8F3 -E7D7 A8F4 -E7D8 A8F5 -E7D9 A8F6 -E7DA A8F7 -E7DB A8F8 -E7DC A8F9 -E7DD A8FA -E7DE A8FB -E7DF A8FC -E7E0 A8FD -E7E1 A8FE -E7E2 A958 -E7E3 A95B -E7E4 A95D -E7E5 A95E -E7E6 A95F -E7E7 8336C831 -E7E8 8336C832 -E7E9 8336C833 -E7EA 8336C834 -E7EB 8336C835 -E7EC 8336C836 -E7ED 8336C837 -E7EE 8336C838 -E7EF 8336C839 -E7F0 8336C930 -E7F1 8336C931 -E7F2 8336C932 -E7F3 8336C933 -E7F4 A997 -E7F5 A998 -E7F6 A999 -E7F7 A99A -E7F8 A99B -E7F9 A99C -E7FA A99D -E7FB A99E -E7FC A99F -E7FD A9A0 -E7FE A9A1 -E7FF A9A2 -E800 A9A3 -E801 A9F0 -E802 A9F1 -E803 A9F2 -E804 A9F3 -E805 A9F4 -E806 A9F5 -E807 A9F6 -E808 A9F7 -E809 A9F8 -E80A A9F9 -E80B A9FA -E80C A9FB -E80D A9FC -E80E A9FD -E80F A9FE -E810 D7FA -E811 D7FB -E812 D7FC -E813 D7FD -E814 D7FE -E815 8336C934 -E816 FE51 -E817 FE52 -E818 FE53 -E819 8336C935 -E81A 8336C936 -E81B 8336C937 -E81C 8336C938 -E81D 8336C939 -E81E FE59 -E81F 8336CA30 -E820 8336CA31 -E821 8336CA32 -E822 8336CA33 -E823 8336CA34 -E824 8336CA35 -E825 8336CA36 -E826 FE61 -E827 8336CA37 -E828 8336CA38 -E829 8336CA39 -E82A 8336CB30 -E82B FE66 -E82C FE67 -E82D 8336CB31 -E82E 8336CB32 -E82F 8336CB33 -E830 8336CB34 -E831 FE6C -E832 FE6D -E833 8336CB35 -E834 8336CB36 -E835 8336CB37 -E836 8336CB38 -E837 8336CB39 -E838 8336CC30 -E839 8336CC31 -E83A 8336CC32 -E83B FE76 -E83C 8336CC33 -E83D 8336CC34 -E83E 8336CC35 -E83F 8336CC36 -E840 8336CC37 -E841 8336CC38 -E842 8336CC39 -E843 FE7E -E844 8336CD30 -E845 8336CD31 -E846 8336CD32 -E847 8336CD33 -E848 8336CD34 -E849 8336CD35 -E84A 8336CD36 -E84B 8336CD37 -E84C 8336CD38 -E84D 8336CD39 -E84E 8336CE30 -E84F 8336CE31 -E850 8336CE32 -E851 8336CE33 -E852 8336CE34 -E853 8336CE35 -E854 FE90 -E855 FE91 -E856 8336CE36 -E857 8336CE37 -E858 8336CE38 -E859 8336CE39 -E85A 8336CF30 -E85B 8336CF31 -E85C 8336CF32 -E85D 8336CF33 -E85E 8336CF34 -E85F 8336CF35 -E860 8336CF36 -E861 8336CF37 -E862 8336CF38 -E863 8336CF39 -E864 FEA0 -E865 8336D030 -E866 8336D031 -E867 8336D032 -E868 8336D033 -E869 8336D034 -E86A 8336D035 -E86B 8336D036 -E86C 8336D037 -E86D 8336D038 -E86E 8336D039 -E86F 8336D130 -E870 8336D131 -E871 8336D132 -E872 8336D133 -E873 8336D134 -E874 8336D135 -E875 8336D136 -E876 8336D137 -E877 8336D138 -E878 8336D139 -E879 8336D230 -E87A 8336D231 -E87B 8336D232 -E87C 8336D233 -E87D 8336D234 -E87E 8336D235 -E87F 8336D236 -E880 8336D237 -E881 8336D238 -E882 8336D239 -E883 8336D330 -E884 8336D331 -E885 8336D332 -E886 8336D333 -E887 8336D334 -E888 8336D335 -E889 8336D336 -E88A 8336D337 -E88B 8336D338 -E88C 8336D339 -E88D 8336D430 -E88E 8336D431 -E88F 8336D432 -E890 8336D433 -E891 8336D434 -E892 8336D435 -E893 8336D436 -E894 8336D437 -E895 8336D438 -E896 8336D439 -E897 8336D530 -E898 8336D531 -E899 8336D532 -E89A 8336D533 -E89B 8336D534 -E89C 8336D535 -E89D 8336D536 -E89E 8336D537 -E89F 8336D538 -E8A0 8336D539 -E8A1 8336D630 -E8A2 8336D631 -E8A3 8336D632 -E8A4 8336D633 -E8A5 8336D634 -E8A6 8336D635 -E8A7 8336D636 -E8A8 8336D637 -E8A9 8336D638 -E8AA 8336D639 -E8AB 8336D730 -E8AC 8336D731 -E8AD 8336D732 -E8AE 8336D733 -E8AF 8336D734 -E8B0 8336D735 -E8B1 8336D736 -E8B2 8336D737 -E8B3 8336D738 -E8B4 8336D739 -E8B5 8336D830 -E8B6 8336D831 -E8B7 8336D832 -E8B8 8336D833 -E8B9 8336D834 -E8BA 8336D835 -E8BB 8336D836 -E8BC 8336D837 -E8BD 8336D838 -E8BE 8336D839 -E8BF 8336D930 -E8C0 8336D931 -E8C1 8336D932 -E8C2 8336D933 -E8C3 8336D934 -E8C4 8336D935 -E8C5 8336D936 -E8C6 8336D937 -E8C7 8336D938 -E8C8 8336D939 -E8C9 8336DA30 -E8CA 8336DA31 -E8CB 8336DA32 -E8CC 8336DA33 -E8CD 8336DA34 -E8CE 8336DA35 -E8CF 8336DA36 -E8D0 8336DA37 -E8D1 8336DA38 -E8D2 8336DA39 -E8D3 8336DB30 -E8D4 8336DB31 -E8D5 8336DB32 -E8D6 8336DB33 -E8D7 8336DB34 -E8D8 8336DB35 -E8D9 8336DB36 -E8DA 8336DB37 -E8DB 8336DB38 -E8DC 8336DB39 -E8DD 8336DC30 -E8DE 8336DC31 -E8DF 8336DC32 -E8E0 8336DC33 -E8E1 8336DC34 -E8E2 8336DC35 -E8E3 8336DC36 -E8E4 8336DC37 -E8E5 8336DC38 -E8E6 8336DC39 -E8E7 8336DD30 -E8E8 8336DD31 -E8E9 8336DD32 -E8EA 8336DD33 -E8EB 8336DD34 -E8EC 8336DD35 -E8ED 8336DD36 -E8EE 8336DD37 -E8EF 8336DD38 -E8F0 8336DD39 -E8F1 8336DE30 -E8F2 8336DE31 -E8F3 8336DE32 -E8F4 8336DE33 -E8F5 8336DE34 -E8F6 8336DE35 -E8F7 8336DE36 -E8F8 8336DE37 -E8F9 8336DE38 -E8FA 8336DE39 -E8FB 8336DF30 -E8FC 8336DF31 -E8FD 8336DF32 -E8FE 8336DF33 -E8FF 8336DF34 -E900 8336DF35 -E901 8336DF36 -E902 8336DF37 -E903 8336DF38 -E904 8336DF39 -E905 8336E030 -E906 8336E031 -E907 8336E032 -E908 8336E033 -E909 8336E034 -E90A 8336E035 -E90B 8336E036 -E90C 8336E037 -E90D 8336E038 -E90E 8336E039 -E90F 8336E130 -E910 8336E131 -E911 8336E132 -E912 8336E133 -E913 8336E134 -E914 8336E135 -E915 8336E136 -E916 8336E137 -E917 8336E138 -E918 8336E139 -E919 8336E230 -E91A 8336E231 -E91B 8336E232 -E91C 8336E233 -E91D 8336E234 -E91E 8336E235 -E91F 8336E236 -E920 8336E237 -E921 8336E238 -E922 8336E239 -E923 8336E330 -E924 8336E331 -E925 8336E332 -E926 8336E333 -E927 8336E334 -E928 8336E335 -E929 8336E336 -E92A 8336E337 -E92B 8336E338 -E92C 8336E339 -E92D 8336E430 -E92E 8336E431 -E92F 8336E432 -E930 8336E433 -E931 8336E434 -E932 8336E435 -E933 8336E436 -E934 8336E437 -E935 8336E438 -E936 8336E439 -E937 8336E530 -E938 8336E531 -E939 8336E532 -E93A 8336E533 -E93B 8336E534 -E93C 8336E535 -E93D 8336E536 -E93E 8336E537 -E93F 8336E538 -E940 8336E539 -E941 8336E630 -E942 8336E631 -E943 8336E632 -E944 8336E633 -E945 8336E634 -E946 8336E635 -E947 8336E636 -E948 8336E637 -E949 8336E638 -E94A 8336E639 -E94B 8336E730 -E94C 8336E731 -E94D 8336E732 -E94E 8336E733 -E94F 8336E734 -E950 8336E735 -E951 8336E736 -E952 8336E737 -E953 8336E738 -E954 8336E739 -E955 8336E830 -E956 8336E831 -E957 8336E832 -E958 8336E833 -E959 8336E834 -E95A 8336E835 -E95B 8336E836 -E95C 8336E837 -E95D 8336E838 -E95E 8336E839 -E95F 8336E930 -E960 8336E931 -E961 8336E932 -E962 8336E933 -E963 8336E934 -E964 8336E935 -E965 8336E936 -E966 8336E937 -E967 8336E938 -E968 8336E939 -E969 8336EA30 -E96A 8336EA31 -E96B 8336EA32 -E96C 8336EA33 -E96D 8336EA34 -E96E 8336EA35 -E96F 8336EA36 -E970 8336EA37 -E971 8336EA38 -E972 8336EA39 -E973 8336EB30 -E974 8336EB31 -E975 8336EB32 -E976 8336EB33 -E977 8336EB34 -E978 8336EB35 -E979 8336EB36 -E97A 8336EB37 -E97B 8336EB38 -E97C 8336EB39 -E97D 8336EC30 -E97E 8336EC31 -E97F 8336EC32 -E980 8336EC33 -E981 8336EC34 -E982 8336EC35 -E983 8336EC36 -E984 8336EC37 -E985 8336EC38 -E986 8336EC39 -E987 8336ED30 -E988 8336ED31 -E989 8336ED32 -E98A 8336ED33 -E98B 8336ED34 -E98C 8336ED35 -E98D 8336ED36 -E98E 8336ED37 -E98F 8336ED38 -E990 8336ED39 -E991 8336EE30 -E992 8336EE31 -E993 8336EE32 -E994 8336EE33 -E995 8336EE34 -E996 8336EE35 -E997 8336EE36 -E998 8336EE37 -E999 8336EE38 -E99A 8336EE39 -E99B 8336EF30 -E99C 8336EF31 -E99D 8336EF32 -E99E 8336EF33 -E99F 8336EF34 -E9A0 8336EF35 -E9A1 8336EF36 -E9A2 8336EF37 -E9A3 8336EF38 -E9A4 8336EF39 -E9A5 8336F030 -E9A6 8336F031 -E9A7 8336F032 -E9A8 8336F033 -E9A9 8336F034 -E9AA 8336F035 -E9AB 8336F036 -E9AC 8336F037 -E9AD 8336F038 -E9AE 8336F039 -E9AF 8336F130 -E9B0 8336F131 -E9B1 8336F132 -E9B2 8336F133 -E9B3 8336F134 -E9B4 8336F135 -E9B5 8336F136 -E9B6 8336F137 -E9B7 8336F138 -E9B8 8336F139 -E9B9 8336F230 -E9BA 8336F231 -E9BB 8336F232 -E9BC 8336F233 -E9BD 8336F234 -E9BE 8336F235 -E9BF 8336F236 -E9C0 8336F237 -E9C1 8336F238 -E9C2 8336F239 -E9C3 8336F330 -E9C4 8336F331 -E9C5 8336F332 -E9C6 8336F333 -E9C7 8336F334 -E9C8 8336F335 -E9C9 8336F336 -E9CA 8336F337 -E9CB 8336F338 -E9CC 8336F339 -E9CD 8336F430 -E9CE 8336F431 -E9CF 8336F432 -E9D0 8336F433 -E9D1 8336F434 -E9D2 8336F435 -E9D3 8336F436 -E9D4 8336F437 -E9D5 8336F438 -E9D6 8336F439 -E9D7 8336F530 -E9D8 8336F531 -E9D9 8336F532 -E9DA 8336F533 -E9DB 8336F534 -E9DC 8336F535 -E9DD 8336F536 -E9DE 8336F537 -E9DF 8336F538 -E9E0 8336F539 -E9E1 8336F630 -E9E2 8336F631 -E9E3 8336F632 -E9E4 8336F633 -E9E5 8336F634 -E9E6 8336F635 -E9E7 8336F636 -E9E8 8336F637 -E9E9 8336F638 -E9EA 8336F639 -E9EB 8336F730 -E9EC 8336F731 -E9ED 8336F732 -E9EE 8336F733 -E9EF 8336F734 -E9F0 8336F735 -E9F1 8336F736 -E9F2 8336F737 -E9F3 8336F738 -E9F4 8336F739 -E9F5 8336F830 -E9F6 8336F831 -E9F7 8336F832 -E9F8 8336F833 -E9F9 8336F834 -E9FA 8336F835 -E9FB 8336F836 -E9FC 8336F837 -E9FD 8336F838 -E9FE 8336F839 -E9FF 8336F930 -EA00 8336F931 -EA01 8336F932 -EA02 8336F933 -EA03 8336F934 -EA04 8336F935 -EA05 8336F936 -EA06 8336F937 -EA07 8336F938 -EA08 8336F939 -EA09 8336FA30 -EA0A 8336FA31 -EA0B 8336FA32 -EA0C 8336FA33 -EA0D 8336FA34 -EA0E 8336FA35 -EA0F 8336FA36 -EA10 8336FA37 -EA11 8336FA38 -EA12 8336FA39 -EA13 8336FB30 -EA14 8336FB31 -EA15 8336FB32 -EA16 8336FB33 -EA17 8336FB34 -EA18 8336FB35 -EA19 8336FB36 -EA1A 8336FB37 -EA1B 8336FB38 -EA1C 8336FB39 -EA1D 8336FC30 -EA1E 8336FC31 -EA1F 8336FC32 -EA20 8336FC33 -EA21 8336FC34 -EA22 8336FC35 -EA23 8336FC36 -EA24 8336FC37 -EA25 8336FC38 -EA26 8336FC39 -EA27 8336FD30 -EA28 8336FD31 -EA29 8336FD32 -EA2A 8336FD33 -EA2B 8336FD34 -EA2C 8336FD35 -EA2D 8336FD36 -EA2E 8336FD37 -EA2F 8336FD38 -EA30 8336FD39 -EA31 8336FE30 -EA32 8336FE31 -EA33 8336FE32 -EA34 8336FE33 -EA35 8336FE34 -EA36 8336FE35 -EA37 8336FE36 -EA38 8336FE37 -EA39 8336FE38 -EA3A 8336FE39 -EA3B 83378130 -EA3C 83378131 -EA3D 83378132 -EA3E 83378133 -EA3F 83378134 -EA40 83378135 -EA41 83378136 -EA42 83378137 -EA43 83378138 -EA44 83378139 -EA45 83378230 -EA46 83378231 -EA47 83378232 -EA48 83378233 -EA49 83378234 -EA4A 83378235 -EA4B 83378236 -EA4C 83378237 -EA4D 83378238 -EA4E 83378239 -EA4F 83378330 -EA50 83378331 -EA51 83378332 -EA52 83378333 -EA53 83378334 -EA54 83378335 -EA55 83378336 -EA56 83378337 -EA57 83378338 -EA58 83378339 -EA59 83378430 -EA5A 83378431 -EA5B 83378432 -EA5C 83378433 -EA5D 83378434 -EA5E 83378435 -EA5F 83378436 -EA60 83378437 -EA61 83378438 -EA62 83378439 -EA63 83378530 -EA64 83378531 -EA65 83378532 -EA66 83378533 -EA67 83378534 -EA68 83378535 -EA69 83378536 -EA6A 83378537 -EA6B 83378538 -EA6C 83378539 -EA6D 83378630 -EA6E 83378631 -EA6F 83378632 -EA70 83378633 -EA71 83378634 -EA72 83378635 -EA73 83378636 -EA74 83378637 -EA75 83378638 -EA76 83378639 -EA77 83378730 -EA78 83378731 -EA79 83378732 -EA7A 83378733 -EA7B 83378734 -EA7C 83378735 -EA7D 83378736 -EA7E 83378737 -EA7F 83378738 -EA80 83378739 -EA81 83378830 -EA82 83378831 -EA83 83378832 -EA84 83378833 -EA85 83378834 -EA86 83378835 -EA87 83378836 -EA88 83378837 -EA89 83378838 -EA8A 83378839 -EA8B 83378930 -EA8C 83378931 -EA8D 83378932 -EA8E 83378933 -EA8F 83378934 -EA90 83378935 -EA91 83378936 -EA92 83378937 -EA93 83378938 -EA94 83378939 -EA95 83378A30 -EA96 83378A31 -EA97 83378A32 -EA98 83378A33 -EA99 83378A34 -EA9A 83378A35 -EA9B 83378A36 -EA9C 83378A37 -EA9D 83378A38 -EA9E 83378A39 -EA9F 83378B30 -EAA0 83378B31 -EAA1 83378B32 -EAA2 83378B33 -EAA3 83378B34 -EAA4 83378B35 -EAA5 83378B36 -EAA6 83378B37 -EAA7 83378B38 -EAA8 83378B39 -EAA9 83378C30 -EAAA 83378C31 -EAAB 83378C32 -EAAC 83378C33 -EAAD 83378C34 -EAAE 83378C35 -EAAF 83378C36 -EAB0 83378C37 -EAB1 83378C38 -EAB2 83378C39 -EAB3 83378D30 -EAB4 83378D31 -EAB5 83378D32 -EAB6 83378D33 -EAB7 83378D34 -EAB8 83378D35 -EAB9 83378D36 -EABA 83378D37 -EABB 83378D38 -EABC 83378D39 -EABD 83378E30 -EABE 83378E31 -EABF 83378E32 -EAC0 83378E33 -EAC1 83378E34 -EAC2 83378E35 -EAC3 83378E36 -EAC4 83378E37 -EAC5 83378E38 -EAC6 83378E39 -EAC7 83378F30 -EAC8 83378F31 -EAC9 83378F32 -EACA 83378F33 -EACB 83378F34 -EACC 83378F35 -EACD 83378F36 -EACE 83378F37 -EACF 83378F38 -EAD0 83378F39 -EAD1 83379030 -EAD2 83379031 -EAD3 83379032 -EAD4 83379033 -EAD5 83379034 -EAD6 83379035 -EAD7 83379036 -EAD8 83379037 -EAD9 83379038 -EADA 83379039 -EADB 83379130 -EADC 83379131 -EADD 83379132 -EADE 83379133 -EADF 83379134 -EAE0 83379135 -EAE1 83379136 -EAE2 83379137 -EAE3 83379138 -EAE4 83379139 -EAE5 83379230 -EAE6 83379231 -EAE7 83379232 -EAE8 83379233 -EAE9 83379234 -EAEA 83379235 -EAEB 83379236 -EAEC 83379237 -EAED 83379238 -EAEE 83379239 -EAEF 83379330 -EAF0 83379331 -EAF1 83379332 -EAF2 83379333 -EAF3 83379334 -EAF4 83379335 -EAF5 83379336 -EAF6 83379337 -EAF7 83379338 -EAF8 83379339 -EAF9 83379430 -EAFA 83379431 -EAFB 83379432 -EAFC 83379433 -EAFD 83379434 -EAFE 83379435 -EAFF 83379436 -EB00 83379437 -EB01 83379438 -EB02 83379439 -EB03 83379530 -EB04 83379531 -EB05 83379532 -EB06 83379533 -EB07 83379534 -EB08 83379535 -EB09 83379536 -EB0A 83379537 -EB0B 83379538 -EB0C 83379539 -EB0D 83379630 -EB0E 83379631 -EB0F 83379632 -EB10 83379633 -EB11 83379634 -EB12 83379635 -EB13 83379636 -EB14 83379637 -EB15 83379638 -EB16 83379639 -EB17 83379730 -EB18 83379731 -EB19 83379732 -EB1A 83379733 -EB1B 83379734 -EB1C 83379735 -EB1D 83379736 -EB1E 83379737 -EB1F 83379738 -EB20 83379739 -EB21 83379830 -EB22 83379831 -EB23 83379832 -EB24 83379833 -EB25 83379834 -EB26 83379835 -EB27 83379836 -EB28 83379837 -EB29 83379838 -EB2A 83379839 -EB2B 83379930 -EB2C 83379931 -EB2D 83379932 -EB2E 83379933 -EB2F 83379934 -EB30 83379935 -EB31 83379936 -EB32 83379937 -EB33 83379938 -EB34 83379939 -EB35 83379A30 -EB36 83379A31 -EB37 83379A32 -EB38 83379A33 -EB39 83379A34 -EB3A 83379A35 -EB3B 83379A36 -EB3C 83379A37 -EB3D 83379A38 -EB3E 83379A39 -EB3F 83379B30 -EB40 83379B31 -EB41 83379B32 -EB42 83379B33 -EB43 83379B34 -EB44 83379B35 -EB45 83379B36 -EB46 83379B37 -EB47 83379B38 -EB48 83379B39 -EB49 83379C30 -EB4A 83379C31 -EB4B 83379C32 -EB4C 83379C33 -EB4D 83379C34 -EB4E 83379C35 -EB4F 83379C36 -EB50 83379C37 -EB51 83379C38 -EB52 83379C39 -EB53 83379D30 -EB54 83379D31 -EB55 83379D32 -EB56 83379D33 -EB57 83379D34 -EB58 83379D35 -EB59 83379D36 -EB5A 83379D37 -EB5B 83379D38 -EB5C 83379D39 -EB5D 83379E30 -EB5E 83379E31 -EB5F 83379E32 -EB60 83379E33 -EB61 83379E34 -EB62 83379E35 -EB63 83379E36 -EB64 83379E37 -EB65 83379E38 -EB66 83379E39 -EB67 83379F30 -EB68 83379F31 -EB69 83379F32 -EB6A 83379F33 -EB6B 83379F34 -EB6C 83379F35 -EB6D 83379F36 -EB6E 83379F37 -EB6F 83379F38 -EB70 83379F39 -EB71 8337A030 -EB72 8337A031 -EB73 8337A032 -EB74 8337A033 -EB75 8337A034 -EB76 8337A035 -EB77 8337A036 -EB78 8337A037 -EB79 8337A038 -EB7A 8337A039 -EB7B 8337A130 -EB7C 8337A131 -EB7D 8337A132 -EB7E 8337A133 -EB7F 8337A134 -EB80 8337A135 -EB81 8337A136 -EB82 8337A137 -EB83 8337A138 -EB84 8337A139 -EB85 8337A230 -EB86 8337A231 -EB87 8337A232 -EB88 8337A233 -EB89 8337A234 -EB8A 8337A235 -EB8B 8337A236 -EB8C 8337A237 -EB8D 8337A238 -EB8E 8337A239 -EB8F 8337A330 -EB90 8337A331 -EB91 8337A332 -EB92 8337A333 -EB93 8337A334 -EB94 8337A335 -EB95 8337A336 -EB96 8337A337 -EB97 8337A338 -EB98 8337A339 -EB99 8337A430 -EB9A 8337A431 -EB9B 8337A432 -EB9C 8337A433 -EB9D 8337A434 -EB9E 8337A435 -EB9F 8337A436 -EBA0 8337A437 -EBA1 8337A438 -EBA2 8337A439 -EBA3 8337A530 -EBA4 8337A531 -EBA5 8337A532 -EBA6 8337A533 -EBA7 8337A534 -EBA8 8337A535 -EBA9 8337A536 -EBAA 8337A537 -EBAB 8337A538 -EBAC 8337A539 -EBAD 8337A630 -EBAE 8337A631 -EBAF 8337A632 -EBB0 8337A633 -EBB1 8337A634 -EBB2 8337A635 -EBB3 8337A636 -EBB4 8337A637 -EBB5 8337A638 -EBB6 8337A639 -EBB7 8337A730 -EBB8 8337A731 -EBB9 8337A732 -EBBA 8337A733 -EBBB 8337A734 -EBBC 8337A735 -EBBD 8337A736 -EBBE 8337A737 -EBBF 8337A738 -EBC0 8337A739 -EBC1 8337A830 -EBC2 8337A831 -EBC3 8337A832 -EBC4 8337A833 -EBC5 8337A834 -EBC6 8337A835 -EBC7 8337A836 -EBC8 8337A837 -EBC9 8337A838 -EBCA 8337A839 -EBCB 8337A930 -EBCC 8337A931 -EBCD 8337A932 -EBCE 8337A933 -EBCF 8337A934 -EBD0 8337A935 -EBD1 8337A936 -EBD2 8337A937 -EBD3 8337A938 -EBD4 8337A939 -EBD5 8337AA30 -EBD6 8337AA31 -EBD7 8337AA32 -EBD8 8337AA33 -EBD9 8337AA34 -EBDA 8337AA35 -EBDB 8337AA36 -EBDC 8337AA37 -EBDD 8337AA38 -EBDE 8337AA39 -EBDF 8337AB30 -EBE0 8337AB31 -EBE1 8337AB32 -EBE2 8337AB33 -EBE3 8337AB34 -EBE4 8337AB35 -EBE5 8337AB36 -EBE6 8337AB37 -EBE7 8337AB38 -EBE8 8337AB39 -EBE9 8337AC30 -EBEA 8337AC31 -EBEB 8337AC32 -EBEC 8337AC33 -EBED 8337AC34 -EBEE 8337AC35 -EBEF 8337AC36 -EBF0 8337AC37 -EBF1 8337AC38 -EBF2 8337AC39 -EBF3 8337AD30 -EBF4 8337AD31 -EBF5 8337AD32 -EBF6 8337AD33 -EBF7 8337AD34 -EBF8 8337AD35 -EBF9 8337AD36 -EBFA 8337AD37 -EBFB 8337AD38 -EBFC 8337AD39 -EBFD 8337AE30 -EBFE 8337AE31 -EBFF 8337AE32 -EC00 8337AE33 -EC01 8337AE34 -EC02 8337AE35 -EC03 8337AE36 -EC04 8337AE37 -EC05 8337AE38 -EC06 8337AE39 -EC07 8337AF30 -EC08 8337AF31 -EC09 8337AF32 -EC0A 8337AF33 -EC0B 8337AF34 -EC0C 8337AF35 -EC0D 8337AF36 -EC0E 8337AF37 -EC0F 8337AF38 -EC10 8337AF39 -EC11 8337B030 -EC12 8337B031 -EC13 8337B032 -EC14 8337B033 -EC15 8337B034 -EC16 8337B035 -EC17 8337B036 -EC18 8337B037 -EC19 8337B038 -EC1A 8337B039 -EC1B 8337B130 -EC1C 8337B131 -EC1D 8337B132 -EC1E 8337B133 -EC1F 8337B134 -EC20 8337B135 -EC21 8337B136 -EC22 8337B137 -EC23 8337B138 -EC24 8337B139 -EC25 8337B230 -EC26 8337B231 -EC27 8337B232 -EC28 8337B233 -EC29 8337B234 -EC2A 8337B235 -EC2B 8337B236 -EC2C 8337B237 -EC2D 8337B238 -EC2E 8337B239 -EC2F 8337B330 -EC30 8337B331 -EC31 8337B332 -EC32 8337B333 -EC33 8337B334 -EC34 8337B335 -EC35 8337B336 -EC36 8337B337 -EC37 8337B338 -EC38 8337B339 -EC39 8337B430 -EC3A 8337B431 -EC3B 8337B432 -EC3C 8337B433 -EC3D 8337B434 -EC3E 8337B435 -EC3F 8337B436 -EC40 8337B437 -EC41 8337B438 -EC42 8337B439 -EC43 8337B530 -EC44 8337B531 -EC45 8337B532 -EC46 8337B533 -EC47 8337B534 -EC48 8337B535 -EC49 8337B536 -EC4A 8337B537 -EC4B 8337B538 -EC4C 8337B539 -EC4D 8337B630 -EC4E 8337B631 -EC4F 8337B632 -EC50 8337B633 -EC51 8337B634 -EC52 8337B635 -EC53 8337B636 -EC54 8337B637 -EC55 8337B638 -EC56 8337B639 -EC57 8337B730 -EC58 8337B731 -EC59 8337B732 -EC5A 8337B733 -EC5B 8337B734 -EC5C 8337B735 -EC5D 8337B736 -EC5E 8337B737 -EC5F 8337B738 -EC60 8337B739 -EC61 8337B830 -EC62 8337B831 -EC63 8337B832 -EC64 8337B833 -EC65 8337B834 -EC66 8337B835 -EC67 8337B836 -EC68 8337B837 -EC69 8337B838 -EC6A 8337B839 -EC6B 8337B930 -EC6C 8337B931 -EC6D 8337B932 -EC6E 8337B933 -EC6F 8337B934 -EC70 8337B935 -EC71 8337B936 -EC72 8337B937 -EC73 8337B938 -EC74 8337B939 -EC75 8337BA30 -EC76 8337BA31 -EC77 8337BA32 -EC78 8337BA33 -EC79 8337BA34 -EC7A 8337BA35 -EC7B 8337BA36 -EC7C 8337BA37 -EC7D 8337BA38 -EC7E 8337BA39 -EC7F 8337BB30 -EC80 8337BB31 -EC81 8337BB32 -EC82 8337BB33 -EC83 8337BB34 -EC84 8337BB35 -EC85 8337BB36 -EC86 8337BB37 -EC87 8337BB38 -EC88 8337BB39 -EC89 8337BC30 -EC8A 8337BC31 -EC8B 8337BC32 -EC8C 8337BC33 -EC8D 8337BC34 -EC8E 8337BC35 -EC8F 8337BC36 -EC90 8337BC37 -EC91 8337BC38 -EC92 8337BC39 -EC93 8337BD30 -EC94 8337BD31 -EC95 8337BD32 -EC96 8337BD33 -EC97 8337BD34 -EC98 8337BD35 -EC99 8337BD36 -EC9A 8337BD37 -EC9B 8337BD38 -EC9C 8337BD39 -EC9D 8337BE30 -EC9E 8337BE31 -EC9F 8337BE32 -ECA0 8337BE33 -ECA1 8337BE34 -ECA2 8337BE35 -ECA3 8337BE36 -ECA4 8337BE37 -ECA5 8337BE38 -ECA6 8337BE39 -ECA7 8337BF30 -ECA8 8337BF31 -ECA9 8337BF32 -ECAA 8337BF33 -ECAB 8337BF34 -ECAC 8337BF35 -ECAD 8337BF36 -ECAE 8337BF37 -ECAF 8337BF38 -ECB0 8337BF39 -ECB1 8337C030 -ECB2 8337C031 -ECB3 8337C032 -ECB4 8337C033 -ECB5 8337C034 -ECB6 8337C035 -ECB7 8337C036 -ECB8 8337C037 -ECB9 8337C038 -ECBA 8337C039 -ECBB 8337C130 -ECBC 8337C131 -ECBD 8337C132 -ECBE 8337C133 -ECBF 8337C134 -ECC0 8337C135 -ECC1 8337C136 -ECC2 8337C137 -ECC3 8337C138 -ECC4 8337C139 -ECC5 8337C230 -ECC6 8337C231 -ECC7 8337C232 -ECC8 8337C233 -ECC9 8337C234 -ECCA 8337C235 -ECCB 8337C236 -ECCC 8337C237 -ECCD 8337C238 -ECCE 8337C239 -ECCF 8337C330 -ECD0 8337C331 -ECD1 8337C332 -ECD2 8337C333 -ECD3 8337C334 -ECD4 8337C335 -ECD5 8337C336 -ECD6 8337C337 -ECD7 8337C338 -ECD8 8337C339 -ECD9 8337C430 -ECDA 8337C431 -ECDB 8337C432 -ECDC 8337C433 -ECDD 8337C434 -ECDE 8337C435 -ECDF 8337C436 -ECE0 8337C437 -ECE1 8337C438 -ECE2 8337C439 -ECE3 8337C530 -ECE4 8337C531 -ECE5 8337C532 -ECE6 8337C533 -ECE7 8337C534 -ECE8 8337C535 -ECE9 8337C536 -ECEA 8337C537 -ECEB 8337C538 -ECEC 8337C539 -ECED 8337C630 -ECEE 8337C631 -ECEF 8337C632 -ECF0 8337C633 -ECF1 8337C634 -ECF2 8337C635 -ECF3 8337C636 -ECF4 8337C637 -ECF5 8337C638 -ECF6 8337C639 -ECF7 8337C730 -ECF8 8337C731 -ECF9 8337C732 -ECFA 8337C733 -ECFB 8337C734 -ECFC 8337C735 -ECFD 8337C736 -ECFE 8337C737 -ECFF 8337C738 -ED00 8337C739 -ED01 8337C830 -ED02 8337C831 -ED03 8337C832 -ED04 8337C833 -ED05 8337C834 -ED06 8337C835 -ED07 8337C836 -ED08 8337C837 -ED09 8337C838 -ED0A 8337C839 -ED0B 8337C930 -ED0C 8337C931 -ED0D 8337C932 -ED0E 8337C933 -ED0F 8337C934 -ED10 8337C935 -ED11 8337C936 -ED12 8337C937 -ED13 8337C938 -ED14 8337C939 -ED15 8337CA30 -ED16 8337CA31 -ED17 8337CA32 -ED18 8337CA33 -ED19 8337CA34 -ED1A 8337CA35 -ED1B 8337CA36 -ED1C 8337CA37 -ED1D 8337CA38 -ED1E 8337CA39 -ED1F 8337CB30 -ED20 8337CB31 -ED21 8337CB32 -ED22 8337CB33 -ED23 8337CB34 -ED24 8337CB35 -ED25 8337CB36 -ED26 8337CB37 -ED27 8337CB38 -ED28 8337CB39 -ED29 8337CC30 -ED2A 8337CC31 -ED2B 8337CC32 -ED2C 8337CC33 -ED2D 8337CC34 -ED2E 8337CC35 -ED2F 8337CC36 -ED30 8337CC37 -ED31 8337CC38 -ED32 8337CC39 -ED33 8337CD30 -ED34 8337CD31 -ED35 8337CD32 -ED36 8337CD33 -ED37 8337CD34 -ED38 8337CD35 -ED39 8337CD36 -ED3A 8337CD37 -ED3B 8337CD38 -ED3C 8337CD39 -ED3D 8337CE30 -ED3E 8337CE31 -ED3F 8337CE32 -ED40 8337CE33 -ED41 8337CE34 -ED42 8337CE35 -ED43 8337CE36 -ED44 8337CE37 -ED45 8337CE38 -ED46 8337CE39 -ED47 8337CF30 -ED48 8337CF31 -ED49 8337CF32 -ED4A 8337CF33 -ED4B 8337CF34 -ED4C 8337CF35 -ED4D 8337CF36 -ED4E 8337CF37 -ED4F 8337CF38 -ED50 8337CF39 -ED51 8337D030 -ED52 8337D031 -ED53 8337D032 -ED54 8337D033 -ED55 8337D034 -ED56 8337D035 -ED57 8337D036 -ED58 8337D037 -ED59 8337D038 -ED5A 8337D039 -ED5B 8337D130 -ED5C 8337D131 -ED5D 8337D132 -ED5E 8337D133 -ED5F 8337D134 -ED60 8337D135 -ED61 8337D136 -ED62 8337D137 -ED63 8337D138 -ED64 8337D139 -ED65 8337D230 -ED66 8337D231 -ED67 8337D232 -ED68 8337D233 -ED69 8337D234 -ED6A 8337D235 -ED6B 8337D236 -ED6C 8337D237 -ED6D 8337D238 -ED6E 8337D239 -ED6F 8337D330 -ED70 8337D331 -ED71 8337D332 -ED72 8337D333 -ED73 8337D334 -ED74 8337D335 -ED75 8337D336 -ED76 8337D337 -ED77 8337D338 -ED78 8337D339 -ED79 8337D430 -ED7A 8337D431 -ED7B 8337D432 -ED7C 8337D433 -ED7D 8337D434 -ED7E 8337D435 -ED7F 8337D436 -ED80 8337D437 -ED81 8337D438 -ED82 8337D439 -ED83 8337D530 -ED84 8337D531 -ED85 8337D532 -ED86 8337D533 -ED87 8337D534 -ED88 8337D535 -ED89 8337D536 -ED8A 8337D537 -ED8B 8337D538 -ED8C 8337D539 -ED8D 8337D630 -ED8E 8337D631 -ED8F 8337D632 -ED90 8337D633 -ED91 8337D634 -ED92 8337D635 -ED93 8337D636 -ED94 8337D637 -ED95 8337D638 -ED96 8337D639 -ED97 8337D730 -ED98 8337D731 -ED99 8337D732 -ED9A 8337D733 -ED9B 8337D734 -ED9C 8337D735 -ED9D 8337D736 -ED9E 8337D737 -ED9F 8337D738 -EDA0 8337D739 -EDA1 8337D830 -EDA2 8337D831 -EDA3 8337D832 -EDA4 8337D833 -EDA5 8337D834 -EDA6 8337D835 -EDA7 8337D836 -EDA8 8337D837 -EDA9 8337D838 -EDAA 8337D839 -EDAB 8337D930 -EDAC 8337D931 -EDAD 8337D932 -EDAE 8337D933 -EDAF 8337D934 -EDB0 8337D935 -EDB1 8337D936 -EDB2 8337D937 -EDB3 8337D938 -EDB4 8337D939 -EDB5 8337DA30 -EDB6 8337DA31 -EDB7 8337DA32 -EDB8 8337DA33 -EDB9 8337DA34 -EDBA 8337DA35 -EDBB 8337DA36 -EDBC 8337DA37 -EDBD 8337DA38 -EDBE 8337DA39 -EDBF 8337DB30 -EDC0 8337DB31 -EDC1 8337DB32 -EDC2 8337DB33 -EDC3 8337DB34 -EDC4 8337DB35 -EDC5 8337DB36 -EDC6 8337DB37 -EDC7 8337DB38 -EDC8 8337DB39 -EDC9 8337DC30 -EDCA 8337DC31 -EDCB 8337DC32 -EDCC 8337DC33 -EDCD 8337DC34 -EDCE 8337DC35 -EDCF 8337DC36 -EDD0 8337DC37 -EDD1 8337DC38 -EDD2 8337DC39 -EDD3 8337DD30 -EDD4 8337DD31 -EDD5 8337DD32 -EDD6 8337DD33 -EDD7 8337DD34 -EDD8 8337DD35 -EDD9 8337DD36 -EDDA 8337DD37 -EDDB 8337DD38 -EDDC 8337DD39 -EDDD 8337DE30 -EDDE 8337DE31 -EDDF 8337DE32 -EDE0 8337DE33 -EDE1 8337DE34 -EDE2 8337DE35 -EDE3 8337DE36 -EDE4 8337DE37 -EDE5 8337DE38 -EDE6 8337DE39 -EDE7 8337DF30 -EDE8 8337DF31 -EDE9 8337DF32 -EDEA 8337DF33 -EDEB 8337DF34 -EDEC 8337DF35 -EDED 8337DF36 -EDEE 8337DF37 -EDEF 8337DF38 -EDF0 8337DF39 -EDF1 8337E030 -EDF2 8337E031 -EDF3 8337E032 -EDF4 8337E033 -EDF5 8337E034 -EDF6 8337E035 -EDF7 8337E036 -EDF8 8337E037 -EDF9 8337E038 -EDFA 8337E039 -EDFB 8337E130 -EDFC 8337E131 -EDFD 8337E132 -EDFE 8337E133 -EDFF 8337E134 -EE00 8337E135 -EE01 8337E136 -EE02 8337E137 -EE03 8337E138 -EE04 8337E139 -EE05 8337E230 -EE06 8337E231 -EE07 8337E232 -EE08 8337E233 -EE09 8337E234 -EE0A 8337E235 -EE0B 8337E236 -EE0C 8337E237 -EE0D 8337E238 -EE0E 8337E239 -EE0F 8337E330 -EE10 8337E331 -EE11 8337E332 -EE12 8337E333 -EE13 8337E334 -EE14 8337E335 -EE15 8337E336 -EE16 8337E337 -EE17 8337E338 -EE18 8337E339 -EE19 8337E430 -EE1A 8337E431 -EE1B 8337E432 -EE1C 8337E433 -EE1D 8337E434 -EE1E 8337E435 -EE1F 8337E436 -EE20 8337E437 -EE21 8337E438 -EE22 8337E439 -EE23 8337E530 -EE24 8337E531 -EE25 8337E532 -EE26 8337E533 -EE27 8337E534 -EE28 8337E535 -EE29 8337E536 -EE2A 8337E537 -EE2B 8337E538 -EE2C 8337E539 -EE2D 8337E630 -EE2E 8337E631 -EE2F 8337E632 -EE30 8337E633 -EE31 8337E634 -EE32 8337E635 -EE33 8337E636 -EE34 8337E637 -EE35 8337E638 -EE36 8337E639 -EE37 8337E730 -EE38 8337E731 -EE39 8337E732 -EE3A 8337E733 -EE3B 8337E734 -EE3C 8337E735 -EE3D 8337E736 -EE3E 8337E737 -EE3F 8337E738 -EE40 8337E739 -EE41 8337E830 -EE42 8337E831 -EE43 8337E832 -EE44 8337E833 -EE45 8337E834 -EE46 8337E835 -EE47 8337E836 -EE48 8337E837 -EE49 8337E838 -EE4A 8337E839 -EE4B 8337E930 -EE4C 8337E931 -EE4D 8337E932 -EE4E 8337E933 -EE4F 8337E934 -EE50 8337E935 -EE51 8337E936 -EE52 8337E937 -EE53 8337E938 -EE54 8337E939 -EE55 8337EA30 -EE56 8337EA31 -EE57 8337EA32 -EE58 8337EA33 -EE59 8337EA34 -EE5A 8337EA35 -EE5B 8337EA36 -EE5C 8337EA37 -EE5D 8337EA38 -EE5E 8337EA39 -EE5F 8337EB30 -EE60 8337EB31 -EE61 8337EB32 -EE62 8337EB33 -EE63 8337EB34 -EE64 8337EB35 -EE65 8337EB36 -EE66 8337EB37 -EE67 8337EB38 -EE68 8337EB39 -EE69 8337EC30 -EE6A 8337EC31 -EE6B 8337EC32 -EE6C 8337EC33 -EE6D 8337EC34 -EE6E 8337EC35 -EE6F 8337EC36 -EE70 8337EC37 -EE71 8337EC38 -EE72 8337EC39 -EE73 8337ED30 -EE74 8337ED31 -EE75 8337ED32 -EE76 8337ED33 -EE77 8337ED34 -EE78 8337ED35 -EE79 8337ED36 -EE7A 8337ED37 -EE7B 8337ED38 -EE7C 8337ED39 -EE7D 8337EE30 -EE7E 8337EE31 -EE7F 8337EE32 -EE80 8337EE33 -EE81 8337EE34 -EE82 8337EE35 -EE83 8337EE36 -EE84 8337EE37 -EE85 8337EE38 -EE86 8337EE39 -EE87 8337EF30 -EE88 8337EF31 -EE89 8337EF32 -EE8A 8337EF33 -EE8B 8337EF34 -EE8C 8337EF35 -EE8D 8337EF36 -EE8E 8337EF37 -EE8F 8337EF38 -EE90 8337EF39 -EE91 8337F030 -EE92 8337F031 -EE93 8337F032 -EE94 8337F033 -EE95 8337F034 -EE96 8337F035 -EE97 8337F036 -EE98 8337F037 -EE99 8337F038 -EE9A 8337F039 -EE9B 8337F130 -EE9C 8337F131 -EE9D 8337F132 -EE9E 8337F133 -EE9F 8337F134 -EEA0 8337F135 -EEA1 8337F136 -EEA2 8337F137 -EEA3 8337F138 -EEA4 8337F139 -EEA5 8337F230 -EEA6 8337F231 -EEA7 8337F232 -EEA8 8337F233 -EEA9 8337F234 -EEAA 8337F235 -EEAB 8337F236 -EEAC 8337F237 -EEAD 8337F238 -EEAE 8337F239 -EEAF 8337F330 -EEB0 8337F331 -EEB1 8337F332 -EEB2 8337F333 -EEB3 8337F334 -EEB4 8337F335 -EEB5 8337F336 -EEB6 8337F337 -EEB7 8337F338 -EEB8 8337F339 -EEB9 8337F430 -EEBA 8337F431 -EEBB 8337F432 -EEBC 8337F433 -EEBD 8337F434 -EEBE 8337F435 -EEBF 8337F436 -EEC0 8337F437 -EEC1 8337F438 -EEC2 8337F439 -EEC3 8337F530 -EEC4 8337F531 -EEC5 8337F532 -EEC6 8337F533 -EEC7 8337F534 -EEC8 8337F535 -EEC9 8337F536 -EECA 8337F537 -EECB 8337F538 -EECC 8337F539 -EECD 8337F630 -EECE 8337F631 -EECF 8337F632 -EED0 8337F633 -EED1 8337F634 -EED2 8337F635 -EED3 8337F636 -EED4 8337F637 -EED5 8337F638 -EED6 8337F639 -EED7 8337F730 -EED8 8337F731 -EED9 8337F732 -EEDA 8337F733 -EEDB 8337F734 -EEDC 8337F735 -EEDD 8337F736 -EEDE 8337F737 -EEDF 8337F738 -EEE0 8337F739 -EEE1 8337F830 -EEE2 8337F831 -EEE3 8337F832 -EEE4 8337F833 -EEE5 8337F834 -EEE6 8337F835 -EEE7 8337F836 -EEE8 8337F837 -EEE9 8337F838 -EEEA 8337F839 -EEEB 8337F930 -EEEC 8337F931 -EEED 8337F932 -EEEE 8337F933 -EEEF 8337F934 -EEF0 8337F935 -EEF1 8337F936 -EEF2 8337F937 -EEF3 8337F938 -EEF4 8337F939 -EEF5 8337FA30 -EEF6 8337FA31 -EEF7 8337FA32 -EEF8 8337FA33 -EEF9 8337FA34 -EEFA 8337FA35 -EEFB 8337FA36 -EEFC 8337FA37 -EEFD 8337FA38 -EEFE 8337FA39 -EEFF 8337FB30 -EF00 8337FB31 -EF01 8337FB32 -EF02 8337FB33 -EF03 8337FB34 -EF04 8337FB35 -EF05 8337FB36 -EF06 8337FB37 -EF07 8337FB38 -EF08 8337FB39 -EF09 8337FC30 -EF0A 8337FC31 -EF0B 8337FC32 -EF0C 8337FC33 -EF0D 8337FC34 -EF0E 8337FC35 -EF0F 8337FC36 -EF10 8337FC37 -EF11 8337FC38 -EF12 8337FC39 -EF13 8337FD30 -EF14 8337FD31 -EF15 8337FD32 -EF16 8337FD33 -EF17 8337FD34 -EF18 8337FD35 -EF19 8337FD36 -EF1A 8337FD37 -EF1B 8337FD38 -EF1C 8337FD39 -EF1D 8337FE30 -EF1E 8337FE31 -EF1F 8337FE32 -EF20 8337FE33 -EF21 8337FE34 -EF22 8337FE35 -EF23 8337FE36 -EF24 8337FE37 -EF25 8337FE38 -EF26 8337FE39 -EF27 83388130 -EF28 83388131 -EF29 83388132 -EF2A 83388133 -EF2B 83388134 -EF2C 83388135 -EF2D 83388136 -EF2E 83388137 -EF2F 83388138 -EF30 83388139 -EF31 83388230 -EF32 83388231 -EF33 83388232 -EF34 83388233 -EF35 83388234 -EF36 83388235 -EF37 83388236 -EF38 83388237 -EF39 83388238 -EF3A 83388239 -EF3B 83388330 -EF3C 83388331 -EF3D 83388332 -EF3E 83388333 -EF3F 83388334 -EF40 83388335 -EF41 83388336 -EF42 83388337 -EF43 83388338 -EF44 83388339 -EF45 83388430 -EF46 83388431 -EF47 83388432 -EF48 83388433 -EF49 83388434 -EF4A 83388435 -EF4B 83388436 -EF4C 83388437 -EF4D 83388438 -EF4E 83388439 -EF4F 83388530 -EF50 83388531 -EF51 83388532 -EF52 83388533 -EF53 83388534 -EF54 83388535 -EF55 83388536 -EF56 83388537 -EF57 83388538 -EF58 83388539 -EF59 83388630 -EF5A 83388631 -EF5B 83388632 -EF5C 83388633 -EF5D 83388634 -EF5E 83388635 -EF5F 83388636 -EF60 83388637 -EF61 83388638 -EF62 83388639 -EF63 83388730 -EF64 83388731 -EF65 83388732 -EF66 83388733 -EF67 83388734 -EF68 83388735 -EF69 83388736 -EF6A 83388737 -EF6B 83388738 -EF6C 83388739 -EF6D 83388830 -EF6E 83388831 -EF6F 83388832 -EF70 83388833 -EF71 83388834 -EF72 83388835 -EF73 83388836 -EF74 83388837 -EF75 83388838 -EF76 83388839 -EF77 83388930 -EF78 83388931 -EF79 83388932 -EF7A 83388933 -EF7B 83388934 -EF7C 83388935 -EF7D 83388936 -EF7E 83388937 -EF7F 83388938 -EF80 83388939 -EF81 83388A30 -EF82 83388A31 -EF83 83388A32 -EF84 83388A33 -EF85 83388A34 -EF86 83388A35 -EF87 83388A36 -EF88 83388A37 -EF89 83388A38 -EF8A 83388A39 -EF8B 83388B30 -EF8C 83388B31 -EF8D 83388B32 -EF8E 83388B33 -EF8F 83388B34 -EF90 83388B35 -EF91 83388B36 -EF92 83388B37 -EF93 83388B38 -EF94 83388B39 -EF95 83388C30 -EF96 83388C31 -EF97 83388C32 -EF98 83388C33 -EF99 83388C34 -EF9A 83388C35 -EF9B 83388C36 -EF9C 83388C37 -EF9D 83388C38 -EF9E 83388C39 -EF9F 83388D30 -EFA0 83388D31 -EFA1 83388D32 -EFA2 83388D33 -EFA3 83388D34 -EFA4 83388D35 -EFA5 83388D36 -EFA6 83388D37 -EFA7 83388D38 -EFA8 83388D39 -EFA9 83388E30 -EFAA 83388E31 -EFAB 83388E32 -EFAC 83388E33 -EFAD 83388E34 -EFAE 83388E35 -EFAF 83388E36 -EFB0 83388E37 -EFB1 83388E38 -EFB2 83388E39 -EFB3 83388F30 -EFB4 83388F31 -EFB5 83388F32 -EFB6 83388F33 -EFB7 83388F34 -EFB8 83388F35 -EFB9 83388F36 -EFBA 83388F37 -EFBB 83388F38 -EFBC 83388F39 -EFBD 83389030 -EFBE 83389031 -EFBF 83389032 -EFC0 83389033 -EFC1 83389034 -EFC2 83389035 -EFC3 83389036 -EFC4 83389037 -EFC5 83389038 -EFC6 83389039 -EFC7 83389130 -EFC8 83389131 -EFC9 83389132 -EFCA 83389133 -EFCB 83389134 -EFCC 83389135 -EFCD 83389136 -EFCE 83389137 -EFCF 83389138 -EFD0 83389139 -EFD1 83389230 -EFD2 83389231 -EFD3 83389232 -EFD4 83389233 -EFD5 83389234 -EFD6 83389235 -EFD7 83389236 -EFD8 83389237 -EFD9 83389238 -EFDA 83389239 -EFDB 83389330 -EFDC 83389331 -EFDD 83389332 -EFDE 83389333 -EFDF 83389334 -EFE0 83389335 -EFE1 83389336 -EFE2 83389337 -EFE3 83389338 -EFE4 83389339 -EFE5 83389430 -EFE6 83389431 -EFE7 83389432 -EFE8 83389433 -EFE9 83389434 -EFEA 83389435 -EFEB 83389436 -EFEC 83389437 -EFED 83389438 -EFEE 83389439 -EFEF 83389530 -EFF0 83389531 -EFF1 83389532 -EFF2 83389533 -EFF3 83389534 -EFF4 83389535 -EFF5 83389536 -EFF6 83389537 -EFF7 83389538 -EFF8 83389539 -EFF9 83389630 -EFFA 83389631 -EFFB 83389632 -EFFC 83389633 -EFFD 83389634 -EFFE 83389635 -EFFF 83389636 -F000 83389637 -F001 83389638 -F002 83389639 -F003 83389730 -F004 83389731 -F005 83389732 -F006 83389733 -F007 83389734 -F008 83389735 -F009 83389736 -F00A 83389737 -F00B 83389738 -F00C 83389739 -F00D 83389830 -F00E 83389831 -F00F 83389832 -F010 83389833 -F011 83389834 -F012 83389835 -F013 83389836 -F014 83389837 -F015 83389838 -F016 83389839 -F017 83389930 -F018 83389931 -F019 83389932 -F01A 83389933 -F01B 83389934 -F01C 83389935 -F01D 83389936 -F01E 83389937 -F01F 83389938 -F020 83389939 -F021 83389A30 -F022 83389A31 -F023 83389A32 -F024 83389A33 -F025 83389A34 -F026 83389A35 -F027 83389A36 -F028 83389A37 -F029 83389A38 -F02A 83389A39 -F02B 83389B30 -F02C 83389B31 -F02D 83389B32 -F02E 83389B33 -F02F 83389B34 -F030 83389B35 -F031 83389B36 -F032 83389B37 -F033 83389B38 -F034 83389B39 -F035 83389C30 -F036 83389C31 -F037 83389C32 -F038 83389C33 -F039 83389C34 -F03A 83389C35 -F03B 83389C36 -F03C 83389C37 -F03D 83389C38 -F03E 83389C39 -F03F 83389D30 -F040 83389D31 -F041 83389D32 -F042 83389D33 -F043 83389D34 -F044 83389D35 -F045 83389D36 -F046 83389D37 -F047 83389D38 -F048 83389D39 -F049 83389E30 -F04A 83389E31 -F04B 83389E32 -F04C 83389E33 -F04D 83389E34 -F04E 83389E35 -F04F 83389E36 -F050 83389E37 -F051 83389E38 -F052 83389E39 -F053 83389F30 -F054 83389F31 -F055 83389F32 -F056 83389F33 -F057 83389F34 -F058 83389F35 -F059 83389F36 -F05A 83389F37 -F05B 83389F38 -F05C 83389F39 -F05D 8338A030 -F05E 8338A031 -F05F 8338A032 -F060 8338A033 -F061 8338A034 -F062 8338A035 -F063 8338A036 -F064 8338A037 -F065 8338A038 -F066 8338A039 -F067 8338A130 -F068 8338A131 -F069 8338A132 -F06A 8338A133 -F06B 8338A134 -F06C 8338A135 -F06D 8338A136 -F06E 8338A137 -F06F 8338A138 -F070 8338A139 -F071 8338A230 -F072 8338A231 -F073 8338A232 -F074 8338A233 -F075 8338A234 -F076 8338A235 -F077 8338A236 -F078 8338A237 -F079 8338A238 -F07A 8338A239 -F07B 8338A330 -F07C 8338A331 -F07D 8338A332 -F07E 8338A333 -F07F 8338A334 -F080 8338A335 -F081 8338A336 -F082 8338A337 -F083 8338A338 -F084 8338A339 -F085 8338A430 -F086 8338A431 -F087 8338A432 -F088 8338A433 -F089 8338A434 -F08A 8338A435 -F08B 8338A436 -F08C 8338A437 -F08D 8338A438 -F08E 8338A439 -F08F 8338A530 -F090 8338A531 -F091 8338A532 -F092 8338A533 -F093 8338A534 -F094 8338A535 -F095 8338A536 -F096 8338A537 -F097 8338A538 -F098 8338A539 -F099 8338A630 -F09A 8338A631 -F09B 8338A632 -F09C 8338A633 -F09D 8338A634 -F09E 8338A635 -F09F 8338A636 -F0A0 8338A637 -F0A1 8338A638 -F0A2 8338A639 -F0A3 8338A730 -F0A4 8338A731 -F0A5 8338A732 -F0A6 8338A733 -F0A7 8338A734 -F0A8 8338A735 -F0A9 8338A736 -F0AA 8338A737 -F0AB 8338A738 -F0AC 8338A739 -F0AD 8338A830 -F0AE 8338A831 -F0AF 8338A832 -F0B0 8338A833 -F0B1 8338A834 -F0B2 8338A835 -F0B3 8338A836 -F0B4 8338A837 -F0B5 8338A838 -F0B6 8338A839 -F0B7 8338A930 -F0B8 8338A931 -F0B9 8338A932 -F0BA 8338A933 -F0BB 8338A934 -F0BC 8338A935 -F0BD 8338A936 -F0BE 8338A937 -F0BF 8338A938 -F0C0 8338A939 -F0C1 8338AA30 -F0C2 8338AA31 -F0C3 8338AA32 -F0C4 8338AA33 -F0C5 8338AA34 -F0C6 8338AA35 -F0C7 8338AA36 -F0C8 8338AA37 -F0C9 8338AA38 -F0CA 8338AA39 -F0CB 8338AB30 -F0CC 8338AB31 -F0CD 8338AB32 -F0CE 8338AB33 -F0CF 8338AB34 -F0D0 8338AB35 -F0D1 8338AB36 -F0D2 8338AB37 -F0D3 8338AB38 -F0D4 8338AB39 -F0D5 8338AC30 -F0D6 8338AC31 -F0D7 8338AC32 -F0D8 8338AC33 -F0D9 8338AC34 -F0DA 8338AC35 -F0DB 8338AC36 -F0DC 8338AC37 -F0DD 8338AC38 -F0DE 8338AC39 -F0DF 8338AD30 -F0E0 8338AD31 -F0E1 8338AD32 -F0E2 8338AD33 -F0E3 8338AD34 -F0E4 8338AD35 -F0E5 8338AD36 -F0E6 8338AD37 -F0E7 8338AD38 -F0E8 8338AD39 -F0E9 8338AE30 -F0EA 8338AE31 -F0EB 8338AE32 -F0EC 8338AE33 -F0ED 8338AE34 -F0EE 8338AE35 -F0EF 8338AE36 -F0F0 8338AE37 -F0F1 8338AE38 -F0F2 8338AE39 -F0F3 8338AF30 -F0F4 8338AF31 -F0F5 8338AF32 -F0F6 8338AF33 -F0F7 8338AF34 -F0F8 8338AF35 -F0F9 8338AF36 -F0FA 8338AF37 -F0FB 8338AF38 -F0FC 8338AF39 -F0FD 8338B030 -F0FE 8338B031 -F0FF 8338B032 -F100 8338B033 -F101 8338B034 -F102 8338B035 -F103 8338B036 -F104 8338B037 -F105 8338B038 -F106 8338B039 -F107 8338B130 -F108 8338B131 -F109 8338B132 -F10A 8338B133 -F10B 8338B134 -F10C 8338B135 -F10D 8338B136 -F10E 8338B137 -F10F 8338B138 -F110 8338B139 -F111 8338B230 -F112 8338B231 -F113 8338B232 -F114 8338B233 -F115 8338B234 -F116 8338B235 -F117 8338B236 -F118 8338B237 -F119 8338B238 -F11A 8338B239 -F11B 8338B330 -F11C 8338B331 -F11D 8338B332 -F11E 8338B333 -F11F 8338B334 -F120 8338B335 -F121 8338B336 -F122 8338B337 -F123 8338B338 -F124 8338B339 -F125 8338B430 -F126 8338B431 -F127 8338B432 -F128 8338B433 -F129 8338B434 -F12A 8338B435 -F12B 8338B436 -F12C 8338B437 -F12D 8338B438 -F12E 8338B439 -F12F 8338B530 -F130 8338B531 -F131 8338B532 -F132 8338B533 -F133 8338B534 -F134 8338B535 -F135 8338B536 -F136 8338B537 -F137 8338B538 -F138 8338B539 -F139 8338B630 -F13A 8338B631 -F13B 8338B632 -F13C 8338B633 -F13D 8338B634 -F13E 8338B635 -F13F 8338B636 -F140 8338B637 -F141 8338B638 -F142 8338B639 -F143 8338B730 -F144 8338B731 -F145 8338B732 -F146 8338B733 -F147 8338B734 -F148 8338B735 -F149 8338B736 -F14A 8338B737 -F14B 8338B738 -F14C 8338B739 -F14D 8338B830 -F14E 8338B831 -F14F 8338B832 -F150 8338B833 -F151 8338B834 -F152 8338B835 -F153 8338B836 -F154 8338B837 -F155 8338B838 -F156 8338B839 -F157 8338B930 -F158 8338B931 -F159 8338B932 -F15A 8338B933 -F15B 8338B934 -F15C 8338B935 -F15D 8338B936 -F15E 8338B937 -F15F 8338B938 -F160 8338B939 -F161 8338BA30 -F162 8338BA31 -F163 8338BA32 -F164 8338BA33 -F165 8338BA34 -F166 8338BA35 -F167 8338BA36 -F168 8338BA37 -F169 8338BA38 -F16A 8338BA39 -F16B 8338BB30 -F16C 8338BB31 -F16D 8338BB32 -F16E 8338BB33 -F16F 8338BB34 -F170 8338BB35 -F171 8338BB36 -F172 8338BB37 -F173 8338BB38 -F174 8338BB39 -F175 8338BC30 -F176 8338BC31 -F177 8338BC32 -F178 8338BC33 -F179 8338BC34 -F17A 8338BC35 -F17B 8338BC36 -F17C 8338BC37 -F17D 8338BC38 -F17E 8338BC39 -F17F 8338BD30 -F180 8338BD31 -F181 8338BD32 -F182 8338BD33 -F183 8338BD34 -F184 8338BD35 -F185 8338BD36 -F186 8338BD37 -F187 8338BD38 -F188 8338BD39 -F189 8338BE30 -F18A 8338BE31 -F18B 8338BE32 -F18C 8338BE33 -F18D 8338BE34 -F18E 8338BE35 -F18F 8338BE36 -F190 8338BE37 -F191 8338BE38 -F192 8338BE39 -F193 8338BF30 -F194 8338BF31 -F195 8338BF32 -F196 8338BF33 -F197 8338BF34 -F198 8338BF35 -F199 8338BF36 -F19A 8338BF37 -F19B 8338BF38 -F19C 8338BF39 -F19D 8338C030 -F19E 8338C031 -F19F 8338C032 -F1A0 8338C033 -F1A1 8338C034 -F1A2 8338C035 -F1A3 8338C036 -F1A4 8338C037 -F1A5 8338C038 -F1A6 8338C039 -F1A7 8338C130 -F1A8 8338C131 -F1A9 8338C132 -F1AA 8338C133 -F1AB 8338C134 -F1AC 8338C135 -F1AD 8338C136 -F1AE 8338C137 -F1AF 8338C138 -F1B0 8338C139 -F1B1 8338C230 -F1B2 8338C231 -F1B3 8338C232 -F1B4 8338C233 -F1B5 8338C234 -F1B6 8338C235 -F1B7 8338C236 -F1B8 8338C237 -F1B9 8338C238 -F1BA 8338C239 -F1BB 8338C330 -F1BC 8338C331 -F1BD 8338C332 -F1BE 8338C333 -F1BF 8338C334 -F1C0 8338C335 -F1C1 8338C336 -F1C2 8338C337 -F1C3 8338C338 -F1C4 8338C339 -F1C5 8338C430 -F1C6 8338C431 -F1C7 8338C432 -F1C8 8338C433 -F1C9 8338C434 -F1CA 8338C435 -F1CB 8338C436 -F1CC 8338C437 -F1CD 8338C438 -F1CE 8338C439 -F1CF 8338C530 -F1D0 8338C531 -F1D1 8338C532 -F1D2 8338C533 -F1D3 8338C534 -F1D4 8338C535 -F1D5 8338C536 -F1D6 8338C537 -F1D7 8338C538 -F1D8 8338C539 -F1D9 8338C630 -F1DA 8338C631 -F1DB 8338C632 -F1DC 8338C633 -F1DD 8338C634 -F1DE 8338C635 -F1DF 8338C636 -F1E0 8338C637 -F1E1 8338C638 -F1E2 8338C639 -F1E3 8338C730 -F1E4 8338C731 -F1E5 8338C732 -F1E6 8338C733 -F1E7 8338C734 -F1E8 8338C735 -F1E9 8338C736 -F1EA 8338C737 -F1EB 8338C738 -F1EC 8338C739 -F1ED 8338C830 -F1EE 8338C831 -F1EF 8338C832 -F1F0 8338C833 -F1F1 8338C834 -F1F2 8338C835 -F1F3 8338C836 -F1F4 8338C837 -F1F5 8338C838 -F1F6 8338C839 -F1F7 8338C930 -F1F8 8338C931 -F1F9 8338C932 -F1FA 8338C933 -F1FB 8338C934 -F1FC 8338C935 -F1FD 8338C936 -F1FE 8338C937 -F1FF 8338C938 -F200 8338C939 -F201 8338CA30 -F202 8338CA31 -F203 8338CA32 -F204 8338CA33 -F205 8338CA34 -F206 8338CA35 -F207 8338CA36 -F208 8338CA37 -F209 8338CA38 -F20A 8338CA39 -F20B 8338CB30 -F20C 8338CB31 -F20D 8338CB32 -F20E 8338CB33 -F20F 8338CB34 -F210 8338CB35 -F211 8338CB36 -F212 8338CB37 -F213 8338CB38 -F214 8338CB39 -F215 8338CC30 -F216 8338CC31 -F217 8338CC32 -F218 8338CC33 -F219 8338CC34 -F21A 8338CC35 -F21B 8338CC36 -F21C 8338CC37 -F21D 8338CC38 -F21E 8338CC39 -F21F 8338CD30 -F220 8338CD31 -F221 8338CD32 -F222 8338CD33 -F223 8338CD34 -F224 8338CD35 -F225 8338CD36 -F226 8338CD37 -F227 8338CD38 -F228 8338CD39 -F229 8338CE30 -F22A 8338CE31 -F22B 8338CE32 -F22C 8338CE33 -F22D 8338CE34 -F22E 8338CE35 -F22F 8338CE36 -F230 8338CE37 -F231 8338CE38 -F232 8338CE39 -F233 8338CF30 -F234 8338CF31 -F235 8338CF32 -F236 8338CF33 -F237 8338CF34 -F238 8338CF35 -F239 8338CF36 -F23A 8338CF37 -F23B 8338CF38 -F23C 8338CF39 -F23D 8338D030 -F23E 8338D031 -F23F 8338D032 -F240 8338D033 -F241 8338D034 -F242 8338D035 -F243 8338D036 -F244 8338D037 -F245 8338D038 -F246 8338D039 -F247 8338D130 -F248 8338D131 -F249 8338D132 -F24A 8338D133 -F24B 8338D134 -F24C 8338D135 -F24D 8338D136 -F24E 8338D137 -F24F 8338D138 -F250 8338D139 -F251 8338D230 -F252 8338D231 -F253 8338D232 -F254 8338D233 -F255 8338D234 -F256 8338D235 -F257 8338D236 -F258 8338D237 -F259 8338D238 -F25A 8338D239 -F25B 8338D330 -F25C 8338D331 -F25D 8338D332 -F25E 8338D333 -F25F 8338D334 -F260 8338D335 -F261 8338D336 -F262 8338D337 -F263 8338D338 -F264 8338D339 -F265 8338D430 -F266 8338D431 -F267 8338D432 -F268 8338D433 -F269 8338D434 -F26A 8338D435 -F26B 8338D436 -F26C 8338D437 -F26D 8338D438 -F26E 8338D439 -F26F 8338D530 -F270 8338D531 -F271 8338D532 -F272 8338D533 -F273 8338D534 -F274 8338D535 -F275 8338D536 -F276 8338D537 -F277 8338D538 -F278 8338D539 -F279 8338D630 -F27A 8338D631 -F27B 8338D632 -F27C 8338D633 -F27D 8338D634 -F27E 8338D635 -F27F 8338D636 -F280 8338D637 -F281 8338D638 -F282 8338D639 -F283 8338D730 -F284 8338D731 -F285 8338D732 -F286 8338D733 -F287 8338D734 -F288 8338D735 -F289 8338D736 -F28A 8338D737 -F28B 8338D738 -F28C 8338D739 -F28D 8338D830 -F28E 8338D831 -F28F 8338D832 -F290 8338D833 -F291 8338D834 -F292 8338D835 -F293 8338D836 -F294 8338D837 -F295 8338D838 -F296 8338D839 -F297 8338D930 -F298 8338D931 -F299 8338D932 -F29A 8338D933 -F29B 8338D934 -F29C 8338D935 -F29D 8338D936 -F29E 8338D937 -F29F 8338D938 -F2A0 8338D939 -F2A1 8338DA30 -F2A2 8338DA31 -F2A3 8338DA32 -F2A4 8338DA33 -F2A5 8338DA34 -F2A6 8338DA35 -F2A7 8338DA36 -F2A8 8338DA37 -F2A9 8338DA38 -F2AA 8338DA39 -F2AB 8338DB30 -F2AC 8338DB31 -F2AD 8338DB32 -F2AE 8338DB33 -F2AF 8338DB34 -F2B0 8338DB35 -F2B1 8338DB36 -F2B2 8338DB37 -F2B3 8338DB38 -F2B4 8338DB39 -F2B5 8338DC30 -F2B6 8338DC31 -F2B7 8338DC32 -F2B8 8338DC33 -F2B9 8338DC34 -F2BA 8338DC35 -F2BB 8338DC36 -F2BC 8338DC37 -F2BD 8338DC38 -F2BE 8338DC39 -F2BF 8338DD30 -F2C0 8338DD31 -F2C1 8338DD32 -F2C2 8338DD33 -F2C3 8338DD34 -F2C4 8338DD35 -F2C5 8338DD36 -F2C6 8338DD37 -F2C7 8338DD38 -F2C8 8338DD39 -F2C9 8338DE30 -F2CA 8338DE31 -F2CB 8338DE32 -F2CC 8338DE33 -F2CD 8338DE34 -F2CE 8338DE35 -F2CF 8338DE36 -F2D0 8338DE37 -F2D1 8338DE38 -F2D2 8338DE39 -F2D3 8338DF30 -F2D4 8338DF31 -F2D5 8338DF32 -F2D6 8338DF33 -F2D7 8338DF34 -F2D8 8338DF35 -F2D9 8338DF36 -F2DA 8338DF37 -F2DB 8338DF38 -F2DC 8338DF39 -F2DD 8338E030 -F2DE 8338E031 -F2DF 8338E032 -F2E0 8338E033 -F2E1 8338E034 -F2E2 8338E035 -F2E3 8338E036 -F2E4 8338E037 -F2E5 8338E038 -F2E6 8338E039 -F2E7 8338E130 -F2E8 8338E131 -F2E9 8338E132 -F2EA 8338E133 -F2EB 8338E134 -F2EC 8338E135 -F2ED 8338E136 -F2EE 8338E137 -F2EF 8338E138 -F2F0 8338E139 -F2F1 8338E230 -F2F2 8338E231 -F2F3 8338E232 -F2F4 8338E233 -F2F5 8338E234 -F2F6 8338E235 -F2F7 8338E236 -F2F8 8338E237 -F2F9 8338E238 -F2FA 8338E239 -F2FB 8338E330 -F2FC 8338E331 -F2FD 8338E332 -F2FE 8338E333 -F2FF 8338E334 -F300 8338E335 -F301 8338E336 -F302 8338E337 -F303 8338E338 -F304 8338E339 -F305 8338E430 -F306 8338E431 -F307 8338E432 -F308 8338E433 -F309 8338E434 -F30A 8338E435 -F30B 8338E436 -F30C 8338E437 -F30D 8338E438 -F30E 8338E439 -F30F 8338E530 -F310 8338E531 -F311 8338E532 -F312 8338E533 -F313 8338E534 -F314 8338E535 -F315 8338E536 -F316 8338E537 -F317 8338E538 -F318 8338E539 -F319 8338E630 -F31A 8338E631 -F31B 8338E632 -F31C 8338E633 -F31D 8338E634 -F31E 8338E635 -F31F 8338E636 -F320 8338E637 -F321 8338E638 -F322 8338E639 -F323 8338E730 -F324 8338E731 -F325 8338E732 -F326 8338E733 -F327 8338E734 -F328 8338E735 -F329 8338E736 -F32A 8338E737 -F32B 8338E738 -F32C 8338E739 -F32D 8338E830 -F32E 8338E831 -F32F 8338E832 -F330 8338E833 -F331 8338E834 -F332 8338E835 -F333 8338E836 -F334 8338E837 -F335 8338E838 -F336 8338E839 -F337 8338E930 -F338 8338E931 -F339 8338E932 -F33A 8338E933 -F33B 8338E934 -F33C 8338E935 -F33D 8338E936 -F33E 8338E937 -F33F 8338E938 -F340 8338E939 -F341 8338EA30 -F342 8338EA31 -F343 8338EA32 -F344 8338EA33 -F345 8338EA34 -F346 8338EA35 -F347 8338EA36 -F348 8338EA37 -F349 8338EA38 -F34A 8338EA39 -F34B 8338EB30 -F34C 8338EB31 -F34D 8338EB32 -F34E 8338EB33 -F34F 8338EB34 -F350 8338EB35 -F351 8338EB36 -F352 8338EB37 -F353 8338EB38 -F354 8338EB39 -F355 8338EC30 -F356 8338EC31 -F357 8338EC32 -F358 8338EC33 -F359 8338EC34 -F35A 8338EC35 -F35B 8338EC36 -F35C 8338EC37 -F35D 8338EC38 -F35E 8338EC39 -F35F 8338ED30 -F360 8338ED31 -F361 8338ED32 -F362 8338ED33 -F363 8338ED34 -F364 8338ED35 -F365 8338ED36 -F366 8338ED37 -F367 8338ED38 -F368 8338ED39 -F369 8338EE30 -F36A 8338EE31 -F36B 8338EE32 -F36C 8338EE33 -F36D 8338EE34 -F36E 8338EE35 -F36F 8338EE36 -F370 8338EE37 -F371 8338EE38 -F372 8338EE39 -F373 8338EF30 -F374 8338EF31 -F375 8338EF32 -F376 8338EF33 -F377 8338EF34 -F378 8338EF35 -F379 8338EF36 -F37A 8338EF37 -F37B 8338EF38 -F37C 8338EF39 -F37D 8338F030 -F37E 8338F031 -F37F 8338F032 -F380 8338F033 -F381 8338F034 -F382 8338F035 -F383 8338F036 -F384 8338F037 -F385 8338F038 -F386 8338F039 -F387 8338F130 -F388 8338F131 -F389 8338F132 -F38A 8338F133 -F38B 8338F134 -F38C 8338F135 -F38D 8338F136 -F38E 8338F137 -F38F 8338F138 -F390 8338F139 -F391 8338F230 -F392 8338F231 -F393 8338F232 -F394 8338F233 -F395 8338F234 -F396 8338F235 -F397 8338F236 -F398 8338F237 -F399 8338F238 -F39A 8338F239 -F39B 8338F330 -F39C 8338F331 -F39D 8338F332 -F39E 8338F333 -F39F 8338F334 -F3A0 8338F335 -F3A1 8338F336 -F3A2 8338F337 -F3A3 8338F338 -F3A4 8338F339 -F3A5 8338F430 -F3A6 8338F431 -F3A7 8338F432 -F3A8 8338F433 -F3A9 8338F434 -F3AA 8338F435 -F3AB 8338F436 -F3AC 8338F437 -F3AD 8338F438 -F3AE 8338F439 -F3AF 8338F530 -F3B0 8338F531 -F3B1 8338F532 -F3B2 8338F533 -F3B3 8338F534 -F3B4 8338F535 -F3B5 8338F536 -F3B6 8338F537 -F3B7 8338F538 -F3B8 8338F539 -F3B9 8338F630 -F3BA 8338F631 -F3BB 8338F632 -F3BC 8338F633 -F3BD 8338F634 -F3BE 8338F635 -F3BF 8338F636 -F3C0 8338F637 -F3C1 8338F638 -F3C2 8338F639 -F3C3 8338F730 -F3C4 8338F731 -F3C5 8338F732 -F3C6 8338F733 -F3C7 8338F734 -F3C8 8338F735 -F3C9 8338F736 -F3CA 8338F737 -F3CB 8338F738 -F3CC 8338F739 -F3CD 8338F830 -F3CE 8338F831 -F3CF 8338F832 -F3D0 8338F833 -F3D1 8338F834 -F3D2 8338F835 -F3D3 8338F836 -F3D4 8338F837 -F3D5 8338F838 -F3D6 8338F839 -F3D7 8338F930 -F3D8 8338F931 -F3D9 8338F932 -F3DA 8338F933 -F3DB 8338F934 -F3DC 8338F935 -F3DD 8338F936 -F3DE 8338F937 -F3DF 8338F938 -F3E0 8338F939 -F3E1 8338FA30 -F3E2 8338FA31 -F3E3 8338FA32 -F3E4 8338FA33 -F3E5 8338FA34 -F3E6 8338FA35 -F3E7 8338FA36 -F3E8 8338FA37 -F3E9 8338FA38 -F3EA 8338FA39 -F3EB 8338FB30 -F3EC 8338FB31 -F3ED 8338FB32 -F3EE 8338FB33 -F3EF 8338FB34 -F3F0 8338FB35 -F3F1 8338FB36 -F3F2 8338FB37 -F3F3 8338FB38 -F3F4 8338FB39 -F3F5 8338FC30 -F3F6 8338FC31 -F3F7 8338FC32 -F3F8 8338FC33 -F3F9 8338FC34 -F3FA 8338FC35 -F3FB 8338FC36 -F3FC 8338FC37 -F3FD 8338FC38 -F3FE 8338FC39 -F3FF 8338FD30 -F400 8338FD31 -F401 8338FD32 -F402 8338FD33 -F403 8338FD34 -F404 8338FD35 -F405 8338FD36 -F406 8338FD37 -F407 8338FD38 -F408 8338FD39 -F409 8338FE30 -F40A 8338FE31 -F40B 8338FE32 -F40C 8338FE33 -F40D 8338FE34 -F40E 8338FE35 -F40F 8338FE36 -F410 8338FE37 -F411 8338FE38 -F412 8338FE39 -F413 83398130 -F414 83398131 -F415 83398132 -F416 83398133 -F417 83398134 -F418 83398135 -F419 83398136 -F41A 83398137 -F41B 83398138 -F41C 83398139 -F41D 83398230 -F41E 83398231 -F41F 83398232 -F420 83398233 -F421 83398234 -F422 83398235 -F423 83398236 -F424 83398237 -F425 83398238 -F426 83398239 -F427 83398330 -F428 83398331 -F429 83398332 -F42A 83398333 -F42B 83398334 -F42C 83398335 -F42D 83398336 -F42E 83398337 -F42F 83398338 -F430 83398339 -F431 83398430 -F432 83398431 -F433 83398432 -F434 83398433 -F435 83398434 -F436 83398435 -F437 83398436 -F438 83398437 -F439 83398438 -F43A 83398439 -F43B 83398530 -F43C 83398531 -F43D 83398532 -F43E 83398533 -F43F 83398534 -F440 83398535 -F441 83398536 -F442 83398537 -F443 83398538 -F444 83398539 -F445 83398630 -F446 83398631 -F447 83398632 -F448 83398633 -F449 83398634 -F44A 83398635 -F44B 83398636 -F44C 83398637 -F44D 83398638 -F44E 83398639 -F44F 83398730 -F450 83398731 -F451 83398732 -F452 83398733 -F453 83398734 -F454 83398735 -F455 83398736 -F456 83398737 -F457 83398738 -F458 83398739 -F459 83398830 -F45A 83398831 -F45B 83398832 -F45C 83398833 -F45D 83398834 -F45E 83398835 -F45F 83398836 -F460 83398837 -F461 83398838 -F462 83398839 -F463 83398930 -F464 83398931 -F465 83398932 -F466 83398933 -F467 83398934 -F468 83398935 -F469 83398936 -F46A 83398937 -F46B 83398938 -F46C 83398939 -F46D 83398A30 -F46E 83398A31 -F46F 83398A32 -F470 83398A33 -F471 83398A34 -F472 83398A35 -F473 83398A36 -F474 83398A37 -F475 83398A38 -F476 83398A39 -F477 83398B30 -F478 83398B31 -F479 83398B32 -F47A 83398B33 -F47B 83398B34 -F47C 83398B35 -F47D 83398B36 -F47E 83398B37 -F47F 83398B38 -F480 83398B39 -F481 83398C30 -F482 83398C31 -F483 83398C32 -F484 83398C33 -F485 83398C34 -F486 83398C35 -F487 83398C36 -F488 83398C37 -F489 83398C38 -F48A 83398C39 -F48B 83398D30 -F48C 83398D31 -F48D 83398D32 -F48E 83398D33 -F48F 83398D34 -F490 83398D35 -F491 83398D36 -F492 83398D37 -F493 83398D38 -F494 83398D39 -F495 83398E30 -F496 83398E31 -F497 83398E32 -F498 83398E33 -F499 83398E34 -F49A 83398E35 -F49B 83398E36 -F49C 83398E37 -F49D 83398E38 -F49E 83398E39 -F49F 83398F30 -F4A0 83398F31 -F4A1 83398F32 -F4A2 83398F33 -F4A3 83398F34 -F4A4 83398F35 -F4A5 83398F36 -F4A6 83398F37 -F4A7 83398F38 -F4A8 83398F39 -F4A9 83399030 -F4AA 83399031 -F4AB 83399032 -F4AC 83399033 -F4AD 83399034 -F4AE 83399035 -F4AF 83399036 -F4B0 83399037 -F4B1 83399038 -F4B2 83399039 -F4B3 83399130 -F4B4 83399131 -F4B5 83399132 -F4B6 83399133 -F4B7 83399134 -F4B8 83399135 -F4B9 83399136 -F4BA 83399137 -F4BB 83399138 -F4BC 83399139 -F4BD 83399230 -F4BE 83399231 -F4BF 83399232 -F4C0 83399233 -F4C1 83399234 -F4C2 83399235 -F4C3 83399236 -F4C4 83399237 -F4C5 83399238 -F4C6 83399239 -F4C7 83399330 -F4C8 83399331 -F4C9 83399332 -F4CA 83399333 -F4CB 83399334 -F4CC 83399335 -F4CD 83399336 -F4CE 83399337 -F4CF 83399338 -F4D0 83399339 -F4D1 83399430 -F4D2 83399431 -F4D3 83399432 -F4D4 83399433 -F4D5 83399434 -F4D6 83399435 -F4D7 83399436 -F4D8 83399437 -F4D9 83399438 -F4DA 83399439 -F4DB 83399530 -F4DC 83399531 -F4DD 83399532 -F4DE 83399533 -F4DF 83399534 -F4E0 83399535 -F4E1 83399536 -F4E2 83399537 -F4E3 83399538 -F4E4 83399539 -F4E5 83399630 -F4E6 83399631 -F4E7 83399632 -F4E8 83399633 -F4E9 83399634 -F4EA 83399635 -F4EB 83399636 -F4EC 83399637 -F4ED 83399638 -F4EE 83399639 -F4EF 83399730 -F4F0 83399731 -F4F1 83399732 -F4F2 83399733 -F4F3 83399734 -F4F4 83399735 -F4F5 83399736 -F4F6 83399737 -F4F7 83399738 -F4F8 83399739 -F4F9 83399830 -F4FA 83399831 -F4FB 83399832 -F4FC 83399833 -F4FD 83399834 -F4FE 83399835 -F4FF 83399836 -F500 83399837 -F501 83399838 -F502 83399839 -F503 83399930 -F504 83399931 -F505 83399932 -F506 83399933 -F507 83399934 -F508 83399935 -F509 83399936 -F50A 83399937 -F50B 83399938 -F50C 83399939 -F50D 83399A30 -F50E 83399A31 -F50F 83399A32 -F510 83399A33 -F511 83399A34 -F512 83399A35 -F513 83399A36 -F514 83399A37 -F515 83399A38 -F516 83399A39 -F517 83399B30 -F518 83399B31 -F519 83399B32 -F51A 83399B33 -F51B 83399B34 -F51C 83399B35 -F51D 83399B36 -F51E 83399B37 -F51F 83399B38 -F520 83399B39 -F521 83399C30 -F522 83399C31 -F523 83399C32 -F524 83399C33 -F525 83399C34 -F526 83399C35 -F527 83399C36 -F528 83399C37 -F529 83399C38 -F52A 83399C39 -F52B 83399D30 -F52C 83399D31 -F52D 83399D32 -F52E 83399D33 -F52F 83399D34 -F530 83399D35 -F531 83399D36 -F532 83399D37 -F533 83399D38 -F534 83399D39 -F535 83399E30 -F536 83399E31 -F537 83399E32 -F538 83399E33 -F539 83399E34 -F53A 83399E35 -F53B 83399E36 -F53C 83399E37 -F53D 83399E38 -F53E 83399E39 -F53F 83399F30 -F540 83399F31 -F541 83399F32 -F542 83399F33 -F543 83399F34 -F544 83399F35 -F545 83399F36 -F546 83399F37 -F547 83399F38 -F548 83399F39 -F549 8339A030 -F54A 8339A031 -F54B 8339A032 -F54C 8339A033 -F54D 8339A034 -F54E 8339A035 -F54F 8339A036 -F550 8339A037 -F551 8339A038 -F552 8339A039 -F553 8339A130 -F554 8339A131 -F555 8339A132 -F556 8339A133 -F557 8339A134 -F558 8339A135 -F559 8339A136 -F55A 8339A137 -F55B 8339A138 -F55C 8339A139 -F55D 8339A230 -F55E 8339A231 -F55F 8339A232 -F560 8339A233 -F561 8339A234 -F562 8339A235 -F563 8339A236 -F564 8339A237 -F565 8339A238 -F566 8339A239 -F567 8339A330 -F568 8339A331 -F569 8339A332 -F56A 8339A333 -F56B 8339A334 -F56C 8339A335 -F56D 8339A336 -F56E 8339A337 -F56F 8339A338 -F570 8339A339 -F571 8339A430 -F572 8339A431 -F573 8339A432 -F574 8339A433 -F575 8339A434 -F576 8339A435 -F577 8339A436 -F578 8339A437 -F579 8339A438 -F57A 8339A439 -F57B 8339A530 -F57C 8339A531 -F57D 8339A532 -F57E 8339A533 -F57F 8339A534 -F580 8339A535 -F581 8339A536 -F582 8339A537 -F583 8339A538 -F584 8339A539 -F585 8339A630 -F586 8339A631 -F587 8339A632 -F588 8339A633 -F589 8339A634 -F58A 8339A635 -F58B 8339A636 -F58C 8339A637 -F58D 8339A638 -F58E 8339A639 -F58F 8339A730 -F590 8339A731 -F591 8339A732 -F592 8339A733 -F593 8339A734 -F594 8339A735 -F595 8339A736 -F596 8339A737 -F597 8339A738 -F598 8339A739 -F599 8339A830 -F59A 8339A831 -F59B 8339A832 -F59C 8339A833 -F59D 8339A834 -F59E 8339A835 -F59F 8339A836 -F5A0 8339A837 -F5A1 8339A838 -F5A2 8339A839 -F5A3 8339A930 -F5A4 8339A931 -F5A5 8339A932 -F5A6 8339A933 -F5A7 8339A934 -F5A8 8339A935 -F5A9 8339A936 -F5AA 8339A937 -F5AB 8339A938 -F5AC 8339A939 -F5AD 8339AA30 -F5AE 8339AA31 -F5AF 8339AA32 -F5B0 8339AA33 -F5B1 8339AA34 -F5B2 8339AA35 -F5B3 8339AA36 -F5B4 8339AA37 -F5B5 8339AA38 -F5B6 8339AA39 -F5B7 8339AB30 -F5B8 8339AB31 -F5B9 8339AB32 -F5BA 8339AB33 -F5BB 8339AB34 -F5BC 8339AB35 -F5BD 8339AB36 -F5BE 8339AB37 -F5BF 8339AB38 -F5C0 8339AB39 -F5C1 8339AC30 -F5C2 8339AC31 -F5C3 8339AC32 -F5C4 8339AC33 -F5C5 8339AC34 -F5C6 8339AC35 -F5C7 8339AC36 -F5C8 8339AC37 -F5C9 8339AC38 -F5CA 8339AC39 -F5CB 8339AD30 -F5CC 8339AD31 -F5CD 8339AD32 -F5CE 8339AD33 -F5CF 8339AD34 -F5D0 8339AD35 -F5D1 8339AD36 -F5D2 8339AD37 -F5D3 8339AD38 -F5D4 8339AD39 -F5D5 8339AE30 -F5D6 8339AE31 -F5D7 8339AE32 -F5D8 8339AE33 -F5D9 8339AE34 -F5DA 8339AE35 -F5DB 8339AE36 -F5DC 8339AE37 -F5DD 8339AE38 -F5DE 8339AE39 -F5DF 8339AF30 -F5E0 8339AF31 -F5E1 8339AF32 -F5E2 8339AF33 -F5E3 8339AF34 -F5E4 8339AF35 -F5E5 8339AF36 -F5E6 8339AF37 -F5E7 8339AF38 -F5E8 8339AF39 -F5E9 8339B030 -F5EA 8339B031 -F5EB 8339B032 -F5EC 8339B033 -F5ED 8339B034 -F5EE 8339B035 -F5EF 8339B036 -F5F0 8339B037 -F5F1 8339B038 -F5F2 8339B039 -F5F3 8339B130 -F5F4 8339B131 -F5F5 8339B132 -F5F6 8339B133 -F5F7 8339B134 -F5F8 8339B135 -F5F9 8339B136 -F5FA 8339B137 -F5FB 8339B138 -F5FC 8339B139 -F5FD 8339B230 -F5FE 8339B231 -F5FF 8339B232 -F600 8339B233 -F601 8339B234 -F602 8339B235 -F603 8339B236 -F604 8339B237 -F605 8339B238 -F606 8339B239 -F607 8339B330 -F608 8339B331 -F609 8339B332 -F60A 8339B333 -F60B 8339B334 -F60C 8339B335 -F60D 8339B336 -F60E 8339B337 -F60F 8339B338 -F610 8339B339 -F611 8339B430 -F612 8339B431 -F613 8339B432 -F614 8339B433 -F615 8339B434 -F616 8339B435 -F617 8339B436 -F618 8339B437 -F619 8339B438 -F61A 8339B439 -F61B 8339B530 -F61C 8339B531 -F61D 8339B532 -F61E 8339B533 -F61F 8339B534 -F620 8339B535 -F621 8339B536 -F622 8339B537 -F623 8339B538 -F624 8339B539 -F625 8339B630 -F626 8339B631 -F627 8339B632 -F628 8339B633 -F629 8339B634 -F62A 8339B635 -F62B 8339B636 -F62C 8339B637 -F62D 8339B638 -F62E 8339B639 -F62F 8339B730 -F630 8339B731 -F631 8339B732 -F632 8339B733 -F633 8339B734 -F634 8339B735 -F635 8339B736 -F636 8339B737 -F637 8339B738 -F638 8339B739 -F639 8339B830 -F63A 8339B831 -F63B 8339B832 -F63C 8339B833 -F63D 8339B834 -F63E 8339B835 -F63F 8339B836 -F640 8339B837 -F641 8339B838 -F642 8339B839 -F643 8339B930 -F644 8339B931 -F645 8339B932 -F646 8339B933 -F647 8339B934 -F648 8339B935 -F649 8339B936 -F64A 8339B937 -F64B 8339B938 -F64C 8339B939 -F64D 8339BA30 -F64E 8339BA31 -F64F 8339BA32 -F650 8339BA33 -F651 8339BA34 -F652 8339BA35 -F653 8339BA36 -F654 8339BA37 -F655 8339BA38 -F656 8339BA39 -F657 8339BB30 -F658 8339BB31 -F659 8339BB32 -F65A 8339BB33 -F65B 8339BB34 -F65C 8339BB35 -F65D 8339BB36 -F65E 8339BB37 -F65F 8339BB38 -F660 8339BB39 -F661 8339BC30 -F662 8339BC31 -F663 8339BC32 -F664 8339BC33 -F665 8339BC34 -F666 8339BC35 -F667 8339BC36 -F668 8339BC37 -F669 8339BC38 -F66A 8339BC39 -F66B 8339BD30 -F66C 8339BD31 -F66D 8339BD32 -F66E 8339BD33 -F66F 8339BD34 -F670 8339BD35 -F671 8339BD36 -F672 8339BD37 -F673 8339BD38 -F674 8339BD39 -F675 8339BE30 -F676 8339BE31 -F677 8339BE32 -F678 8339BE33 -F679 8339BE34 -F67A 8339BE35 -F67B 8339BE36 -F67C 8339BE37 -F67D 8339BE38 -F67E 8339BE39 -F67F 8339BF30 -F680 8339BF31 -F681 8339BF32 -F682 8339BF33 -F683 8339BF34 -F684 8339BF35 -F685 8339BF36 -F686 8339BF37 -F687 8339BF38 -F688 8339BF39 -F689 8339C030 -F68A 8339C031 -F68B 8339C032 -F68C 8339C033 -F68D 8339C034 -F68E 8339C035 -F68F 8339C036 -F690 8339C037 -F691 8339C038 -F692 8339C039 -F693 8339C130 -F694 8339C131 -F695 8339C132 -F696 8339C133 -F697 8339C134 -F698 8339C135 -F699 8339C136 -F69A 8339C137 -F69B 8339C138 -F69C 8339C139 -F69D 8339C230 -F69E 8339C231 -F69F 8339C232 -F6A0 8339C233 -F6A1 8339C234 -F6A2 8339C235 -F6A3 8339C236 -F6A4 8339C237 -F6A5 8339C238 -F6A6 8339C239 -F6A7 8339C330 -F6A8 8339C331 -F6A9 8339C332 -F6AA 8339C333 -F6AB 8339C334 -F6AC 8339C335 -F6AD 8339C336 -F6AE 8339C337 -F6AF 8339C338 -F6B0 8339C339 -F6B1 8339C430 -F6B2 8339C431 -F6B3 8339C432 -F6B4 8339C433 -F6B5 8339C434 -F6B6 8339C435 -F6B7 8339C436 -F6B8 8339C437 -F6B9 8339C438 -F6BA 8339C439 -F6BB 8339C530 -F6BC 8339C531 -F6BD 8339C532 -F6BE 8339C533 -F6BF 8339C534 -F6C0 8339C535 -F6C1 8339C536 -F6C2 8339C537 -F6C3 8339C538 -F6C4 8339C539 -F6C5 8339C630 -F6C6 8339C631 -F6C7 8339C632 -F6C8 8339C633 -F6C9 8339C634 -F6CA 8339C635 -F6CB 8339C636 -F6CC 8339C637 -F6CD 8339C638 -F6CE 8339C639 -F6CF 8339C730 -F6D0 8339C731 -F6D1 8339C732 -F6D2 8339C733 -F6D3 8339C734 -F6D4 8339C735 -F6D5 8339C736 -F6D6 8339C737 -F6D7 8339C738 -F6D8 8339C739 -F6D9 8339C830 -F6DA 8339C831 -F6DB 8339C832 -F6DC 8339C833 -F6DD 8339C834 -F6DE 8339C835 -F6DF 8339C836 -F6E0 8339C837 -F6E1 8339C838 -F6E2 8339C839 -F6E3 8339C930 -F6E4 8339C931 -F6E5 8339C932 -F6E6 8339C933 -F6E7 8339C934 -F6E8 8339C935 -F6E9 8339C936 -F6EA 8339C937 -F6EB 8339C938 -F6EC 8339C939 -F6ED 8339CA30 -F6EE 8339CA31 -F6EF 8339CA32 -F6F0 8339CA33 -F6F1 8339CA34 -F6F2 8339CA35 -F6F3 8339CA36 -F6F4 8339CA37 -F6F5 8339CA38 -F6F6 8339CA39 -F6F7 8339CB30 -F6F8 8339CB31 -F6F9 8339CB32 -F6FA 8339CB33 -F6FB 8339CB34 -F6FC 8339CB35 -F6FD 8339CB36 -F6FE 8339CB37 -F6FF 8339CB38 -F700 8339CB39 -F701 8339CC30 -F702 8339CC31 -F703 8339CC32 -F704 8339CC33 -F705 8339CC34 -F706 8339CC35 -F707 8339CC36 -F708 8339CC37 -F709 8339CC38 -F70A 8339CC39 -F70B 8339CD30 -F70C 8339CD31 -F70D 8339CD32 -F70E 8339CD33 -F70F 8339CD34 -F710 8339CD35 -F711 8339CD36 -F712 8339CD37 -F713 8339CD38 -F714 8339CD39 -F715 8339CE30 -F716 8339CE31 -F717 8339CE32 -F718 8339CE33 -F719 8339CE34 -F71A 8339CE35 -F71B 8339CE36 -F71C 8339CE37 -F71D 8339CE38 -F71E 8339CE39 -F71F 8339CF30 -F720 8339CF31 -F721 8339CF32 -F722 8339CF33 -F723 8339CF34 -F724 8339CF35 -F725 8339CF36 -F726 8339CF37 -F727 8339CF38 -F728 8339CF39 -F729 8339D030 -F72A 8339D031 -F72B 8339D032 -F72C 8339D033 -F72D 8339D034 -F72E 8339D035 -F72F 8339D036 -F730 8339D037 -F731 8339D038 -F732 8339D039 -F733 8339D130 -F734 8339D131 -F735 8339D132 -F736 8339D133 -F737 8339D134 -F738 8339D135 -F739 8339D136 -F73A 8339D137 -F73B 8339D138 -F73C 8339D139 -F73D 8339D230 -F73E 8339D231 -F73F 8339D232 -F740 8339D233 -F741 8339D234 -F742 8339D235 -F743 8339D236 -F744 8339D237 -F745 8339D238 -F746 8339D239 -F747 8339D330 -F748 8339D331 -F749 8339D332 -F74A 8339D333 -F74B 8339D334 -F74C 8339D335 -F74D 8339D336 -F74E 8339D337 -F74F 8339D338 -F750 8339D339 -F751 8339D430 -F752 8339D431 -F753 8339D432 -F754 8339D433 -F755 8339D434 -F756 8339D435 -F757 8339D436 -F758 8339D437 -F759 8339D438 -F75A 8339D439 -F75B 8339D530 -F75C 8339D531 -F75D 8339D532 -F75E 8339D533 -F75F 8339D534 -F760 8339D535 -F761 8339D536 -F762 8339D537 -F763 8339D538 -F764 8339D539 -F765 8339D630 -F766 8339D631 -F767 8339D632 -F768 8339D633 -F769 8339D634 -F76A 8339D635 -F76B 8339D636 -F76C 8339D637 -F76D 8339D638 -F76E 8339D639 -F76F 8339D730 -F770 8339D731 -F771 8339D732 -F772 8339D733 -F773 8339D734 -F774 8339D735 -F775 8339D736 -F776 8339D737 -F777 8339D738 -F778 8339D739 -F779 8339D830 -F77A 8339D831 -F77B 8339D832 -F77C 8339D833 -F77D 8339D834 -F77E 8339D835 -F77F 8339D836 -F780 8339D837 -F781 8339D838 -F782 8339D839 -F783 8339D930 -F784 8339D931 -F785 8339D932 -F786 8339D933 -F787 8339D934 -F788 8339D935 -F789 8339D936 -F78A 8339D937 -F78B 8339D938 -F78C 8339D939 -F78D 8339DA30 -F78E 8339DA31 -F78F 8339DA32 -F790 8339DA33 -F791 8339DA34 -F792 8339DA35 -F793 8339DA36 -F794 8339DA37 -F795 8339DA38 -F796 8339DA39 -F797 8339DB30 -F798 8339DB31 -F799 8339DB32 -F79A 8339DB33 -F79B 8339DB34 -F79C 8339DB35 -F79D 8339DB36 -F79E 8339DB37 -F79F 8339DB38 -F7A0 8339DB39 -F7A1 8339DC30 -F7A2 8339DC31 -F7A3 8339DC32 -F7A4 8339DC33 -F7A5 8339DC34 -F7A6 8339DC35 -F7A7 8339DC36 -F7A8 8339DC37 -F7A9 8339DC38 -F7AA 8339DC39 -F7AB 8339DD30 -F7AC 8339DD31 -F7AD 8339DD32 -F7AE 8339DD33 -F7AF 8339DD34 -F7B0 8339DD35 -F7B1 8339DD36 -F7B2 8339DD37 -F7B3 8339DD38 -F7B4 8339DD39 -F7B5 8339DE30 -F7B6 8339DE31 -F7B7 8339DE32 -F7B8 8339DE33 -F7B9 8339DE34 -F7BA 8339DE35 -F7BB 8339DE36 -F7BC 8339DE37 -F7BD 8339DE38 -F7BE 8339DE39 -F7BF 8339DF30 -F7C0 8339DF31 -F7C1 8339DF32 -F7C2 8339DF33 -F7C3 8339DF34 -F7C4 8339DF35 -F7C5 8339DF36 -F7C6 8339DF37 -F7C7 8339DF38 -F7C8 8339DF39 -F7C9 8339E030 -F7CA 8339E031 -F7CB 8339E032 -F7CC 8339E033 -F7CD 8339E034 -F7CE 8339E035 -F7CF 8339E036 -F7D0 8339E037 -F7D1 8339E038 -F7D2 8339E039 -F7D3 8339E130 -F7D4 8339E131 -F7D5 8339E132 -F7D6 8339E133 -F7D7 8339E134 -F7D8 8339E135 -F7D9 8339E136 -F7DA 8339E137 -F7DB 8339E138 -F7DC 8339E139 -F7DD 8339E230 -F7DE 8339E231 -F7DF 8339E232 -F7E0 8339E233 -F7E1 8339E234 -F7E2 8339E235 -F7E3 8339E236 -F7E4 8339E237 -F7E5 8339E238 -F7E6 8339E239 -F7E7 8339E330 -F7E8 8339E331 -F7E9 8339E332 -F7EA 8339E333 -F7EB 8339E334 -F7EC 8339E335 -F7ED 8339E336 -F7EE 8339E337 -F7EF 8339E338 -F7F0 8339E339 -F7F1 8339E430 -F7F2 8339E431 -F7F3 8339E432 -F7F4 8339E433 -F7F5 8339E434 -F7F6 8339E435 -F7F7 8339E436 -F7F8 8339E437 -F7F9 8339E438 -F7FA 8339E439 -F7FB 8339E530 -F7FC 8339E531 -F7FD 8339E532 -F7FE 8339E533 -F7FF 8339E534 -F800 8339E535 -F801 8339E536 -F802 8339E537 -F803 8339E538 -F804 8339E539 -F805 8339E630 -F806 8339E631 -F807 8339E632 -F808 8339E633 -F809 8339E634 -F80A 8339E635 -F80B 8339E636 -F80C 8339E637 -F80D 8339E638 -F80E 8339E639 -F80F 8339E730 -F810 8339E731 -F811 8339E732 -F812 8339E733 -F813 8339E734 -F814 8339E735 -F815 8339E736 -F816 8339E737 -F817 8339E738 -F818 8339E739 -F819 8339E830 -F81A 8339E831 -F81B 8339E832 -F81C 8339E833 -F81D 8339E834 -F81E 8339E835 -F81F 8339E836 -F820 8339E837 -F821 8339E838 -F822 8339E839 -F823 8339E930 -F824 8339E931 -F825 8339E932 -F826 8339E933 -F827 8339E934 -F828 8339E935 -F829 8339E936 -F82A 8339E937 -F82B 8339E938 -F82C 8339E939 -F82D 8339EA30 -F82E 8339EA31 -F82F 8339EA32 -F830 8339EA33 -F831 8339EA34 -F832 8339EA35 -F833 8339EA36 -F834 8339EA37 -F835 8339EA38 -F836 8339EA39 -F837 8339EB30 -F838 8339EB31 -F839 8339EB32 -F83A 8339EB33 -F83B 8339EB34 -F83C 8339EB35 -F83D 8339EB36 -F83E 8339EB37 -F83F 8339EB38 -F840 8339EB39 -F841 8339EC30 -F842 8339EC31 -F843 8339EC32 -F844 8339EC33 -F845 8339EC34 -F846 8339EC35 -F847 8339EC36 -F848 8339EC37 -F849 8339EC38 -F84A 8339EC39 -F84B 8339ED30 -F84C 8339ED31 -F84D 8339ED32 -F84E 8339ED33 -F84F 8339ED34 -F850 8339ED35 -F851 8339ED36 -F852 8339ED37 -F853 8339ED38 -F854 8339ED39 -F855 8339EE30 -F856 8339EE31 -F857 8339EE32 -F858 8339EE33 -F859 8339EE34 -F85A 8339EE35 -F85B 8339EE36 -F85C 8339EE37 -F85D 8339EE38 -F85E 8339EE39 -F85F 8339EF30 -F860 8339EF31 -F861 8339EF32 -F862 8339EF33 -F863 8339EF34 -F864 8339EF35 -F865 8339EF36 -F866 8339EF37 -F867 8339EF38 -F868 8339EF39 -F869 8339F030 -F86A 8339F031 -F86B 8339F032 -F86C 8339F033 -F86D 8339F034 -F86E 8339F035 -F86F 8339F036 -F870 8339F037 -F871 8339F038 -F872 8339F039 -F873 8339F130 -F874 8339F131 -F875 8339F132 -F876 8339F133 -F877 8339F134 -F878 8339F135 -F879 8339F136 -F87A 8339F137 -F87B 8339F138 -F87C 8339F139 -F87D 8339F230 -F87E 8339F231 -F87F 8339F232 -F880 8339F233 -F881 8339F234 -F882 8339F235 -F883 8339F236 -F884 8339F237 -F885 8339F238 -F886 8339F239 -F887 8339F330 -F888 8339F331 -F889 8339F332 -F88A 8339F333 -F88B 8339F334 -F88C 8339F335 -F88D 8339F336 -F88E 8339F337 -F88F 8339F338 -F890 8339F339 -F891 8339F430 -F892 8339F431 -F893 8339F432 -F894 8339F433 -F895 8339F434 -F896 8339F435 -F897 8339F436 -F898 8339F437 -F899 8339F438 -F89A 8339F439 -F89B 8339F530 -F89C 8339F531 -F89D 8339F532 -F89E 8339F533 -F89F 8339F534 -F8A0 8339F535 -F8A1 8339F536 -F8A2 8339F537 -F8A3 8339F538 -F8A4 8339F539 -F8A5 8339F630 -F8A6 8339F631 -F8A7 8339F632 -F8A8 8339F633 -F8A9 8339F634 -F8AA 8339F635 -F8AB 8339F636 -F8AC 8339F637 -F8AD 8339F638 -F8AE 8339F639 -F8AF 8339F730 -F8B0 8339F731 -F8B1 8339F732 -F8B2 8339F733 -F8B3 8339F734 -F8B4 8339F735 -F8B5 8339F736 -F8B6 8339F737 -F8B7 8339F738 -F8B8 8339F739 -F8B9 8339F830 -F8BA 8339F831 -F8BB 8339F832 -F8BC 8339F833 -F8BD 8339F834 -F8BE 8339F835 -F8BF 8339F836 -F8C0 8339F837 -F8C1 8339F838 -F8C2 8339F839 -F8C3 8339F930 -F8C4 8339F931 -F8C5 8339F932 -F8C6 8339F933 -F8C7 8339F934 -F8C8 8339F935 -F8C9 8339F936 -F8CA 8339F937 -F8CB 8339F938 -F8CC 8339F939 -F8CD 8339FA30 -F8CE 8339FA31 -F8CF 8339FA32 -F8D0 8339FA33 -F8D1 8339FA34 -F8D2 8339FA35 -F8D3 8339FA36 -F8D4 8339FA37 -F8D5 8339FA38 -F8D6 8339FA39 -F8D7 8339FB30 -F8D8 8339FB31 -F8D9 8339FB32 -F8DA 8339FB33 -F8DB 8339FB34 -F8DC 8339FB35 -F8DD 8339FB36 -F8DE 8339FB37 -F8DF 8339FB38 -F8E0 8339FB39 -F8E1 8339FC30 -F8E2 8339FC31 -F8E3 8339FC32 -F8E4 8339FC33 -F8E5 8339FC34 -F8E6 8339FC35 -F8E7 8339FC36 -F8E8 8339FC37 -F8E9 8339FC38 -F8EA 8339FC39 -F8EB 8339FD30 -F8EC 8339FD31 -F8ED 8339FD32 -F8EE 8339FD33 -F8EF 8339FD34 -F8F0 8339FD35 -F8F1 8339FD36 -F8F2 8339FD37 -F8F3 8339FD38 -F8F4 8339FD39 -F8F5 8339FE30 -F8F6 8339FE31 -F8F7 8339FE32 -F8F8 8339FE33 -F8F9 8339FE34 -F8FA 8339FE35 -F8FB 8339FE36 -F8FC 8339FE37 -F8FD 8339FE38 -F8FE 8339FE39 -F8FF 84308130 -F900 84308131 -F901 84308132 -F902 84308133 -F903 84308134 -F904 84308135 -F905 84308136 -F906 84308137 -F907 84308138 -F908 84308139 -F909 84308230 -F90A 84308231 -F90B 84308232 -F90C 84308233 -F90D 84308234 -F90E 84308235 -F90F 84308236 -F910 84308237 -F911 84308238 -F912 84308239 -F913 84308330 -F914 84308331 -F915 84308332 -F916 84308333 -F917 84308334 -F918 84308335 -F919 84308336 -F91A 84308337 -F91B 84308338 -F91C 84308339 -F91D 84308430 -F91E 84308431 -F91F 84308432 -F920 84308433 -F921 84308434 -F922 84308435 -F923 84308436 -F924 84308437 -F925 84308438 -F926 84308439 -F927 84308530 -F928 84308531 -F929 84308532 -F92A 84308533 -F92B 84308534 -F92C FD9C -F92D 84308535 -F92E 84308536 -F92F 84308537 -F930 84308538 -F931 84308539 -F932 84308630 -F933 84308631 -F934 84308632 -F935 84308633 -F936 84308634 -F937 84308635 -F938 84308636 -F939 84308637 -F93A 84308638 -F93B 84308639 -F93C 84308730 -F93D 84308731 -F93E 84308732 -F93F 84308733 -F940 84308734 -F941 84308735 -F942 84308736 -F943 84308737 -F944 84308738 -F945 84308739 -F946 84308830 -F947 84308831 -F948 84308832 -F949 84308833 -F94A 84308834 -F94B 84308835 -F94C 84308836 -F94D 84308837 -F94E 84308838 -F94F 84308839 -F950 84308930 -F951 84308931 -F952 84308932 -F953 84308933 -F954 84308934 -F955 84308935 -F956 84308936 -F957 84308937 -F958 84308938 -F959 84308939 -F95A 84308A30 -F95B 84308A31 -F95C 84308A32 -F95D 84308A33 -F95E 84308A34 -F95F 84308A35 -F960 84308A36 -F961 84308A37 -F962 84308A38 -F963 84308A39 -F964 84308B30 -F965 84308B31 -F966 84308B32 -F967 84308B33 -F968 84308B34 -F969 84308B35 -F96A 84308B36 -F96B 84308B37 -F96C 84308B38 -F96D 84308B39 -F96E 84308C30 -F96F 84308C31 -F970 84308C32 -F971 84308C33 -F972 84308C34 -F973 84308C35 -F974 84308C36 -F975 84308C37 -F976 84308C38 -F977 84308C39 -F978 84308D30 -F979 FD9D -F97A 84308D31 -F97B 84308D32 -F97C 84308D33 -F97D 84308D34 -F97E 84308D35 -F97F 84308D36 -F980 84308D37 -F981 84308D38 -F982 84308D39 -F983 84308E30 -F984 84308E31 -F985 84308E32 -F986 84308E33 -F987 84308E34 -F988 84308E35 -F989 84308E36 -F98A 84308E37 -F98B 84308E38 -F98C 84308E39 -F98D 84308F30 -F98E 84308F31 -F98F 84308F32 -F990 84308F33 -F991 84308F34 -F992 84308F35 -F993 84308F36 -F994 84308F37 -F995 FD9E -F996 84308F38 -F997 84308F39 -F998 84309030 -F999 84309031 -F99A 84309032 -F99B 84309033 -F99C 84309034 -F99D 84309035 -F99E 84309036 -F99F 84309037 -F9A0 84309038 -F9A1 84309039 -F9A2 84309130 -F9A3 84309131 -F9A4 84309132 -F9A5 84309133 -F9A6 84309134 -F9A7 84309135 -F9A8 84309136 -F9A9 84309137 -F9AA 84309138 -F9AB 84309139 -F9AC 84309230 -F9AD 84309231 -F9AE 84309232 -F9AF 84309233 -F9B0 84309234 -F9B1 84309235 -F9B2 84309236 -F9B3 84309237 -F9B4 84309238 -F9B5 84309239 -F9B6 84309330 -F9B7 84309331 -F9B8 84309332 -F9B9 84309333 -F9BA 84309334 -F9BB 84309335 -F9BC 84309336 -F9BD 84309337 -F9BE 84309338 -F9BF 84309339 -F9C0 84309430 -F9C1 84309431 -F9C2 84309432 -F9C3 84309433 -F9C4 84309434 -F9C5 84309435 -F9C6 84309436 -F9C7 84309437 -F9C8 84309438 -F9C9 84309439 -F9CA 84309530 -F9CB 84309531 -F9CC 84309532 -F9CD 84309533 -F9CE 84309534 -F9CF 84309535 -F9D0 84309536 -F9D1 84309537 -F9D2 84309538 -F9D3 84309539 -F9D4 84309630 -F9D5 84309631 -F9D6 84309632 -F9D7 84309633 -F9D8 84309634 -F9D9 84309635 -F9DA 84309636 -F9DB 84309637 -F9DC 84309638 -F9DD 84309639 -F9DE 84309730 -F9DF 84309731 -F9E0 84309732 -F9E1 84309733 -F9E2 84309734 -F9E3 84309735 -F9E4 84309736 -F9E5 84309737 -F9E6 84309738 -F9E7 FD9F -F9E8 84309739 -F9E9 84309830 -F9EA 84309831 -F9EB 84309832 -F9EC 84309833 -F9ED 84309834 -F9EE 84309835 -F9EF 84309836 -F9F0 84309837 -F9F1 FDA0 -F9F2 84309838 -F9F3 84309839 -F9F4 84309930 -F9F5 84309931 -F9F6 84309932 -F9F7 84309933 -F9F8 84309934 -F9F9 84309935 -F9FA 84309936 -F9FB 84309937 -F9FC 84309938 -F9FD 84309939 -F9FE 84309A30 -F9FF 84309A31 -FA00 84309A32 -FA01 84309A33 -FA02 84309A34 -FA03 84309A35 -FA04 84309A36 -FA05 84309A37 -FA06 84309A38 -FA07 84309A39 -FA08 84309B30 -FA09 84309B31 -FA0A 84309B32 -FA0B 84309B33 -FA0C FE40 -FA0D FE41 -FA0E FE42 -FA0F FE43 -FA10 84309B34 -FA11 FE44 -FA12 84309B35 -FA13 FE45 -FA14 FE46 -FA15 84309B36 -FA16 84309B37 -FA17 84309B38 -FA18 FE47 -FA19 84309B39 -FA1A 84309C30 -FA1B 84309C31 -FA1C 84309C32 -FA1D 84309C33 -FA1E 84309C34 -FA1F FE48 -FA20 FE49 -FA21 FE4A -FA22 84309C35 -FA23 FE4B -FA24 FE4C -FA25 84309C36 -FA26 84309C37 -FA27 FE4D -FA28 FE4E -FA29 FE4F -FA2A 84309C38 -FA2B 84309C39 -FA2C 84309D30 -FA2D 84309D31 -FA2E 84309D32 -FA2F 84309D33 -FA30 84309D34 -FA31 84309D35 -FA32 84309D36 -FA33 84309D37 -FA34 84309D38 -FA35 84309D39 -FA36 84309E30 -FA37 84309E31 -FA38 84309E32 -FA39 84309E33 -FA3A 84309E34 -FA3B 84309E35 -FA3C 84309E36 -FA3D 84309E37 -FA3E 84309E38 -FA3F 84309E39 -FA40 84309F30 -FA41 84309F31 -FA42 84309F32 -FA43 84309F33 -FA44 84309F34 -FA45 84309F35 -FA46 84309F36 -FA47 84309F37 -FA48 84309F38 -FA49 84309F39 -FA4A 8430A030 -FA4B 8430A031 -FA4C 8430A032 -FA4D 8430A033 -FA4E 8430A034 -FA4F 8430A035 -FA50 8430A036 -FA51 8430A037 -FA52 8430A038 -FA53 8430A039 -FA54 8430A130 -FA55 8430A131 -FA56 8430A132 -FA57 8430A133 -FA58 8430A134 -FA59 8430A135 -FA5A 8430A136 -FA5B 8430A137 -FA5C 8430A138 -FA5D 8430A139 -FA5E 8430A230 -FA5F 8430A231 -FA60 8430A232 -FA61 8430A233 -FA62 8430A234 -FA63 8430A235 -FA64 8430A236 -FA65 8430A237 -FA66 8430A238 -FA67 8430A239 -FA68 8430A330 -FA69 8430A331 -FA6A 8430A332 -FA6B 8430A333 -FA6C 8430A334 -FA6D 8430A335 -FA6E 8430A336 -FA6F 8430A337 -FA70 8430A338 -FA71 8430A339 -FA72 8430A430 -FA73 8430A431 -FA74 8430A432 -FA75 8430A433 -FA76 8430A434 -FA77 8430A435 -FA78 8430A436 -FA79 8430A437 -FA7A 8430A438 -FA7B 8430A439 -FA7C 8430A530 -FA7D 8430A531 -FA7E 8430A532 -FA7F 8430A533 -FA80 8430A534 -FA81 8430A535 -FA82 8430A536 -FA83 8430A537 -FA84 8430A538 -FA85 8430A539 -FA86 8430A630 -FA87 8430A631 -FA88 8430A632 -FA89 8430A633 -FA8A 8430A634 -FA8B 8430A635 -FA8C 8430A636 -FA8D 8430A637 -FA8E 8430A638 -FA8F 8430A639 -FA90 8430A730 -FA91 8430A731 -FA92 8430A732 -FA93 8430A733 -FA94 8430A734 -FA95 8430A735 -FA96 8430A736 -FA97 8430A737 -FA98 8430A738 -FA99 8430A739 -FA9A 8430A830 -FA9B 8430A831 -FA9C 8430A832 -FA9D 8430A833 -FA9E 8430A834 -FA9F 8430A835 -FAA0 8430A836 -FAA1 8430A837 -FAA2 8430A838 -FAA3 8430A839 -FAA4 8430A930 -FAA5 8430A931 -FAA6 8430A932 -FAA7 8430A933 -FAA8 8430A934 -FAA9 8430A935 -FAAA 8430A936 -FAAB 8430A937 -FAAC 8430A938 -FAAD 8430A939 -FAAE 8430AA30 -FAAF 8430AA31 -FAB0 8430AA32 -FAB1 8430AA33 -FAB2 8430AA34 -FAB3 8430AA35 -FAB4 8430AA36 -FAB5 8430AA37 -FAB6 8430AA38 -FAB7 8430AA39 -FAB8 8430AB30 -FAB9 8430AB31 -FABA 8430AB32 -FABB 8430AB33 -FABC 8430AB34 -FABD 8430AB35 -FABE 8430AB36 -FABF 8430AB37 -FAC0 8430AB38 -FAC1 8430AB39 -FAC2 8430AC30 -FAC3 8430AC31 -FAC4 8430AC32 -FAC5 8430AC33 -FAC6 8430AC34 -FAC7 8430AC35 -FAC8 8430AC36 -FAC9 8430AC37 -FACA 8430AC38 -FACB 8430AC39 -FACC 8430AD30 -FACD 8430AD31 -FACE 8430AD32 -FACF 8430AD33 -FAD0 8430AD34 -FAD1 8430AD35 -FAD2 8430AD36 -FAD3 8430AD37 -FAD4 8430AD38 -FAD5 8430AD39 -FAD6 8430AE30 -FAD7 8430AE31 -FAD8 8430AE32 -FAD9 8430AE33 -FADA 8430AE34 -FADB 8430AE35 -FADC 8430AE36 -FADD 8430AE37 -FADE 8430AE38 -FADF 8430AE39 -FAE0 8430AF30 -FAE1 8430AF31 -FAE2 8430AF32 -FAE3 8430AF33 -FAE4 8430AF34 -FAE5 8430AF35 -FAE6 8430AF36 -FAE7 8430AF37 -FAE8 8430AF38 -FAE9 8430AF39 -FAEA 8430B030 -FAEB 8430B031 -FAEC 8430B032 -FAED 8430B033 -FAEE 8430B034 -FAEF 8430B035 -FAF0 8430B036 -FAF1 8430B037 -FAF2 8430B038 -FAF3 8430B039 -FAF4 8430B130 -FAF5 8430B131 -FAF6 8430B132 -FAF7 8430B133 -FAF8 8430B134 -FAF9 8430B135 -FAFA 8430B136 -FAFB 8430B137 -FAFC 8430B138 -FAFD 8430B139 -FAFE 8430B230 -FAFF 8430B231 -FB00 8430B232 -FB01 8430B233 -FB02 8430B234 -FB03 8430B235 -FB04 8430B236 -FB05 8430B237 -FB06 8430B238 -FB07 8430B239 -FB08 8430B330 -FB09 8430B331 -FB0A 8430B332 -FB0B 8430B333 -FB0C 8430B334 -FB0D 8430B335 -FB0E 8430B336 -FB0F 8430B337 -FB10 8430B338 -FB11 8430B339 -FB12 8430B430 -FB13 8430B431 -FB14 8430B432 -FB15 8430B433 -FB16 8430B434 -FB17 8430B435 -FB18 8430B436 -FB19 8430B437 -FB1A 8430B438 -FB1B 8430B439 -FB1C 8430B530 -FB1D 8430B531 -FB1E 8430B532 -FB1F 8430B533 -FB20 8430B534 -FB21 8430B535 -FB22 8430B536 -FB23 8430B537 -FB24 8430B538 -FB25 8430B539 -FB26 8430B630 -FB27 8430B631 -FB28 8430B632 -FB29 8430B633 -FB2A 8430B634 -FB2B 8430B635 -FB2C 8430B636 -FB2D 8430B637 -FB2E 8430B638 -FB2F 8430B639 -FB30 8430B730 -FB31 8430B731 -FB32 8430B732 -FB33 8430B733 -FB34 8430B734 -FB35 8430B735 -FB36 8430B736 -FB37 8430B737 -FB38 8430B738 -FB39 8430B739 -FB3A 8430B830 -FB3B 8430B831 -FB3C 8430B832 -FB3D 8430B833 -FB3E 8430B834 -FB3F 8430B835 -FB40 8430B836 -FB41 8430B837 -FB42 8430B838 -FB43 8430B839 -FB44 8430B930 -FB45 8430B931 -FB46 8430B932 -FB47 8430B933 -FB48 8430B934 -FB49 8430B935 -FB4A 8430B936 -FB4B 8430B937 -FB4C 8430B938 -FB4D 8430B939 -FB4E 8430BA30 -FB4F 8430BA31 -FB50 8430BA32 -FB51 8430BA33 -FB52 8430BA34 -FB53 8430BA35 -FB54 8430BA36 -FB55 8430BA37 -FB56 8430BA38 -FB57 8430BA39 -FB58 8430BB30 -FB59 8430BB31 -FB5A 8430BB32 -FB5B 8430BB33 -FB5C 8430BB34 -FB5D 8430BB35 -FB5E 8430BB36 -FB5F 8430BB37 -FB60 8430BB38 -FB61 8430BB39 -FB62 8430BC30 -FB63 8430BC31 -FB64 8430BC32 -FB65 8430BC33 -FB66 8430BC34 -FB67 8430BC35 -FB68 8430BC36 -FB69 8430BC37 -FB6A 8430BC38 -FB6B 8430BC39 -FB6C 8430BD30 -FB6D 8430BD31 -FB6E 8430BD32 -FB6F 8430BD33 -FB70 8430BD34 -FB71 8430BD35 -FB72 8430BD36 -FB73 8430BD37 -FB74 8430BD38 -FB75 8430BD39 -FB76 8430BE30 -FB77 8430BE31 -FB78 8430BE32 -FB79 8430BE33 -FB7A 8430BE34 -FB7B 8430BE35 -FB7C 8430BE36 -FB7D 8430BE37 -FB7E 8430BE38 -FB7F 8430BE39 -FB80 8430BF30 -FB81 8430BF31 -FB82 8430BF32 -FB83 8430BF33 -FB84 8430BF34 -FB85 8430BF35 -FB86 8430BF36 -FB87 8430BF37 -FB88 8430BF38 -FB89 8430BF39 -FB8A 8430C030 -FB8B 8430C031 -FB8C 8430C032 -FB8D 8430C033 -FB8E 8430C034 -FB8F 8430C035 -FB90 8430C036 -FB91 8430C037 -FB92 8430C038 -FB93 8430C039 -FB94 8430C130 -FB95 8430C131 -FB96 8430C132 -FB97 8430C133 -FB98 8430C134 -FB99 8430C135 -FB9A 8430C136 -FB9B 8430C137 -FB9C 8430C138 -FB9D 8430C139 -FB9E 8430C230 -FB9F 8430C231 -FBA0 8430C232 -FBA1 8430C233 -FBA2 8430C234 -FBA3 8430C235 -FBA4 8430C236 -FBA5 8430C237 -FBA6 8430C238 -FBA7 8430C239 -FBA8 8430C330 -FBA9 8430C331 -FBAA 8430C332 -FBAB 8430C333 -FBAC 8430C334 -FBAD 8430C335 -FBAE 8430C336 -FBAF 8430C337 -FBB0 8430C338 -FBB1 8430C339 -FBB2 8430C430 -FBB3 8430C431 -FBB4 8430C432 -FBB5 8430C433 -FBB6 8430C434 -FBB7 8430C435 -FBB8 8430C436 -FBB9 8430C437 -FBBA 8430C438 -FBBB 8430C439 -FBBC 8430C530 -FBBD 8430C531 -FBBE 8430C532 -FBBF 8430C533 -FBC0 8430C534 -FBC1 8430C535 -FBC2 8430C536 -FBC3 8430C537 -FBC4 8430C538 -FBC5 8430C539 -FBC6 8430C630 -FBC7 8430C631 -FBC8 8430C632 -FBC9 8430C633 -FBCA 8430C634 -FBCB 8430C635 -FBCC 8430C636 -FBCD 8430C637 -FBCE 8430C638 -FBCF 8430C639 -FBD0 8430C730 -FBD1 8430C731 -FBD2 8430C732 -FBD3 8430C733 -FBD4 8430C734 -FBD5 8430C735 -FBD6 8430C736 -FBD7 8430C737 -FBD8 8430C738 -FBD9 8430C739 -FBDA 8430C830 -FBDB 8430C831 -FBDC 8430C832 -FBDD 8430C833 -FBDE 8430C834 -FBDF 8430C835 -FBE0 8430C836 -FBE1 8430C837 -FBE2 8430C838 -FBE3 8430C839 -FBE4 8430C930 -FBE5 8430C931 -FBE6 8430C932 -FBE7 8430C933 -FBE8 8430C934 -FBE9 8430C935 -FBEA 8430C936 -FBEB 8430C937 -FBEC 8430C938 -FBED 8430C939 -FBEE 8430CA30 -FBEF 8430CA31 -FBF0 8430CA32 -FBF1 8430CA33 -FBF2 8430CA34 -FBF3 8430CA35 -FBF4 8430CA36 -FBF5 8430CA37 -FBF6 8430CA38 -FBF7 8430CA39 -FBF8 8430CB30 -FBF9 8430CB31 -FBFA 8430CB32 -FBFB 8430CB33 -FBFC 8430CB34 -FBFD 8430CB35 -FBFE 8430CB36 -FBFF 8430CB37 -FC00 8430CB38 -FC01 8430CB39 -FC02 8430CC30 -FC03 8430CC31 -FC04 8430CC32 -FC05 8430CC33 -FC06 8430CC34 -FC07 8430CC35 -FC08 8430CC36 -FC09 8430CC37 -FC0A 8430CC38 -FC0B 8430CC39 -FC0C 8430CD30 -FC0D 8430CD31 -FC0E 8430CD32 -FC0F 8430CD33 -FC10 8430CD34 -FC11 8430CD35 -FC12 8430CD36 -FC13 8430CD37 -FC14 8430CD38 -FC15 8430CD39 -FC16 8430CE30 -FC17 8430CE31 -FC18 8430CE32 -FC19 8430CE33 -FC1A 8430CE34 -FC1B 8430CE35 -FC1C 8430CE36 -FC1D 8430CE37 -FC1E 8430CE38 -FC1F 8430CE39 -FC20 8430CF30 -FC21 8430CF31 -FC22 8430CF32 -FC23 8430CF33 -FC24 8430CF34 -FC25 8430CF35 -FC26 8430CF36 -FC27 8430CF37 -FC28 8430CF38 -FC29 8430CF39 -FC2A 8430D030 -FC2B 8430D031 -FC2C 8430D032 -FC2D 8430D033 -FC2E 8430D034 -FC2F 8430D035 -FC30 8430D036 -FC31 8430D037 -FC32 8430D038 -FC33 8430D039 -FC34 8430D130 -FC35 8430D131 -FC36 8430D132 -FC37 8430D133 -FC38 8430D134 -FC39 8430D135 -FC3A 8430D136 -FC3B 8430D137 -FC3C 8430D138 -FC3D 8430D139 -FC3E 8430D230 -FC3F 8430D231 -FC40 8430D232 -FC41 8430D233 -FC42 8430D234 -FC43 8430D235 -FC44 8430D236 -FC45 8430D237 -FC46 8430D238 -FC47 8430D239 -FC48 8430D330 -FC49 8430D331 -FC4A 8430D332 -FC4B 8430D333 -FC4C 8430D334 -FC4D 8430D335 -FC4E 8430D336 -FC4F 8430D337 -FC50 8430D338 -FC51 8430D339 -FC52 8430D430 -FC53 8430D431 -FC54 8430D432 -FC55 8430D433 -FC56 8430D434 -FC57 8430D435 -FC58 8430D436 -FC59 8430D437 -FC5A 8430D438 -FC5B 8430D439 -FC5C 8430D530 -FC5D 8430D531 -FC5E 8430D532 -FC5F 8430D533 -FC60 8430D534 -FC61 8430D535 -FC62 8430D536 -FC63 8430D537 -FC64 8430D538 -FC65 8430D539 -FC66 8430D630 -FC67 8430D631 -FC68 8430D632 -FC69 8430D633 -FC6A 8430D634 -FC6B 8430D635 -FC6C 8430D636 -FC6D 8430D637 -FC6E 8430D638 -FC6F 8430D639 -FC70 8430D730 -FC71 8430D731 -FC72 8430D732 -FC73 8430D733 -FC74 8430D734 -FC75 8430D735 -FC76 8430D736 -FC77 8430D737 -FC78 8430D738 -FC79 8430D739 -FC7A 8430D830 -FC7B 8430D831 -FC7C 8430D832 -FC7D 8430D833 -FC7E 8430D834 -FC7F 8430D835 -FC80 8430D836 -FC81 8430D837 -FC82 8430D838 -FC83 8430D839 -FC84 8430D930 -FC85 8430D931 -FC86 8430D932 -FC87 8430D933 -FC88 8430D934 -FC89 8430D935 -FC8A 8430D936 -FC8B 8430D937 -FC8C 8430D938 -FC8D 8430D939 -FC8E 8430DA30 -FC8F 8430DA31 -FC90 8430DA32 -FC91 8430DA33 -FC92 8430DA34 -FC93 8430DA35 -FC94 8430DA36 -FC95 8430DA37 -FC96 8430DA38 -FC97 8430DA39 -FC98 8430DB30 -FC99 8430DB31 -FC9A 8430DB32 -FC9B 8430DB33 -FC9C 8430DB34 -FC9D 8430DB35 -FC9E 8430DB36 -FC9F 8430DB37 -FCA0 8430DB38 -FCA1 8430DB39 -FCA2 8430DC30 -FCA3 8430DC31 -FCA4 8430DC32 -FCA5 8430DC33 -FCA6 8430DC34 -FCA7 8430DC35 -FCA8 8430DC36 -FCA9 8430DC37 -FCAA 8430DC38 -FCAB 8430DC39 -FCAC 8430DD30 -FCAD 8430DD31 -FCAE 8430DD32 -FCAF 8430DD33 -FCB0 8430DD34 -FCB1 8430DD35 -FCB2 8430DD36 -FCB3 8430DD37 -FCB4 8430DD38 -FCB5 8430DD39 -FCB6 8430DE30 -FCB7 8430DE31 -FCB8 8430DE32 -FCB9 8430DE33 -FCBA 8430DE34 -FCBB 8430DE35 -FCBC 8430DE36 -FCBD 8430DE37 -FCBE 8430DE38 -FCBF 8430DE39 -FCC0 8430DF30 -FCC1 8430DF31 -FCC2 8430DF32 -FCC3 8430DF33 -FCC4 8430DF34 -FCC5 8430DF35 -FCC6 8430DF36 -FCC7 8430DF37 -FCC8 8430DF38 -FCC9 8430DF39 -FCCA 8430E030 -FCCB 8430E031 -FCCC 8430E032 -FCCD 8430E033 -FCCE 8430E034 -FCCF 8430E035 -FCD0 8430E036 -FCD1 8430E037 -FCD2 8430E038 -FCD3 8430E039 -FCD4 8430E130 -FCD5 8430E131 -FCD6 8430E132 -FCD7 8430E133 -FCD8 8430E134 -FCD9 8430E135 -FCDA 8430E136 -FCDB 8430E137 -FCDC 8430E138 -FCDD 8430E139 -FCDE 8430E230 -FCDF 8430E231 -FCE0 8430E232 -FCE1 8430E233 -FCE2 8430E234 -FCE3 8430E235 -FCE4 8430E236 -FCE5 8430E237 -FCE6 8430E238 -FCE7 8430E239 -FCE8 8430E330 -FCE9 8430E331 -FCEA 8430E332 -FCEB 8430E333 -FCEC 8430E334 -FCED 8430E335 -FCEE 8430E336 -FCEF 8430E337 -FCF0 8430E338 -FCF1 8430E339 -FCF2 8430E430 -FCF3 8430E431 -FCF4 8430E432 -FCF5 8430E433 -FCF6 8430E434 -FCF7 8430E435 -FCF8 8430E436 -FCF9 8430E437 -FCFA 8430E438 -FCFB 8430E439 -FCFC 8430E530 -FCFD 8430E531 -FCFE 8430E532 -FCFF 8430E533 -FD00 8430E534 -FD01 8430E535 -FD02 8430E536 -FD03 8430E537 -FD04 8430E538 -FD05 8430E539 -FD06 8430E630 -FD07 8430E631 -FD08 8430E632 -FD09 8430E633 -FD0A 8430E634 -FD0B 8430E635 -FD0C 8430E636 -FD0D 8430E637 -FD0E 8430E638 -FD0F 8430E639 -FD10 8430E730 -FD11 8430E731 -FD12 8430E732 -FD13 8430E733 -FD14 8430E734 -FD15 8430E735 -FD16 8430E736 -FD17 8430E737 -FD18 8430E738 -FD19 8430E739 -FD1A 8430E830 -FD1B 8430E831 -FD1C 8430E832 -FD1D 8430E833 -FD1E 8430E834 -FD1F 8430E835 -FD20 8430E836 -FD21 8430E837 -FD22 8430E838 -FD23 8430E839 -FD24 8430E930 -FD25 8430E931 -FD26 8430E932 -FD27 8430E933 -FD28 8430E934 -FD29 8430E935 -FD2A 8430E936 -FD2B 8430E937 -FD2C 8430E938 -FD2D 8430E939 -FD2E 8430EA30 -FD2F 8430EA31 -FD30 8430EA32 -FD31 8430EA33 -FD32 8430EA34 -FD33 8430EA35 -FD34 8430EA36 -FD35 8430EA37 -FD36 8430EA38 -FD37 8430EA39 -FD38 8430EB30 -FD39 8430EB31 -FD3A 8430EB32 -FD3B 8430EB33 -FD3C 8430EB34 -FD3D 8430EB35 -FD3E 8430EB36 -FD3F 8430EB37 -FD40 8430EB38 -FD41 8430EB39 -FD42 8430EC30 -FD43 8430EC31 -FD44 8430EC32 -FD45 8430EC33 -FD46 8430EC34 -FD47 8430EC35 -FD48 8430EC36 -FD49 8430EC37 -FD4A 8430EC38 -FD4B 8430EC39 -FD4C 8430ED30 -FD4D 8430ED31 -FD4E 8430ED32 -FD4F 8430ED33 -FD50 8430ED34 -FD51 8430ED35 -FD52 8430ED36 -FD53 8430ED37 -FD54 8430ED38 -FD55 8430ED39 -FD56 8430EE30 -FD57 8430EE31 -FD58 8430EE32 -FD59 8430EE33 -FD5A 8430EE34 -FD5B 8430EE35 -FD5C 8430EE36 -FD5D 8430EE37 -FD5E 8430EE38 -FD5F 8430EE39 -FD60 8430EF30 -FD61 8430EF31 -FD62 8430EF32 -FD63 8430EF33 -FD64 8430EF34 -FD65 8430EF35 -FD66 8430EF36 -FD67 8430EF37 -FD68 8430EF38 -FD69 8430EF39 -FD6A 8430F030 -FD6B 8430F031 -FD6C 8430F032 -FD6D 8430F033 -FD6E 8430F034 -FD6F 8430F035 -FD70 8430F036 -FD71 8430F037 -FD72 8430F038 -FD73 8430F039 -FD74 8430F130 -FD75 8430F131 -FD76 8430F132 -FD77 8430F133 -FD78 8430F134 -FD79 8430F135 -FD7A 8430F136 -FD7B 8430F137 -FD7C 8430F138 -FD7D 8430F139 -FD7E 8430F230 -FD7F 8430F231 -FD80 8430F232 -FD81 8430F233 -FD82 8430F234 -FD83 8430F235 -FD84 8430F236 -FD85 8430F237 -FD86 8430F238 -FD87 8430F239 -FD88 8430F330 -FD89 8430F331 -FD8A 8430F332 -FD8B 8430F333 -FD8C 8430F334 -FD8D 8430F335 -FD8E 8430F336 -FD8F 8430F337 -FD90 8430F338 -FD91 8430F339 -FD92 8430F430 -FD93 8430F431 -FD94 8430F432 -FD95 8430F433 -FD96 8430F434 -FD97 8430F435 -FD98 8430F436 -FD99 8430F437 -FD9A 8430F438 -FD9B 8430F439 -FD9C 8430F530 -FD9D 8430F531 -FD9E 8430F532 -FD9F 8430F533 -FDA0 8430F534 -FDA1 8430F535 -FDA2 8430F536 -FDA3 8430F537 -FDA4 8430F538 -FDA5 8430F539 -FDA6 8430F630 -FDA7 8430F631 -FDA8 8430F632 -FDA9 8430F633 -FDAA 8430F634 -FDAB 8430F635 -FDAC 8430F636 -FDAD 8430F637 -FDAE 8430F638 -FDAF 8430F639 -FDB0 8430F730 -FDB1 8430F731 -FDB2 8430F732 -FDB3 8430F733 -FDB4 8430F734 -FDB5 8430F735 -FDB6 8430F736 -FDB7 8430F737 -FDB8 8430F738 -FDB9 8430F739 -FDBA 8430F830 -FDBB 8430F831 -FDBC 8430F832 -FDBD 8430F833 -FDBE 8430F834 -FDBF 8430F835 -FDC0 8430F836 -FDC1 8430F837 -FDC2 8430F838 -FDC3 8430F839 -FDC4 8430F930 -FDC5 8430F931 -FDC6 8430F932 -FDC7 8430F933 -FDC8 8430F934 -FDC9 8430F935 -FDCA 8430F936 -FDCB 8430F937 -FDCC 8430F938 -FDCD 8430F939 -FDCE 8430FA30 -FDCF 8430FA31 -FDD0 8430FA32 -FDD1 8430FA33 -FDD2 8430FA34 -FDD3 8430FA35 -FDD4 8430FA36 -FDD5 8430FA37 -FDD6 8430FA38 -FDD7 8430FA39 -FDD8 8430FB30 -FDD9 8430FB31 -FDDA 8430FB32 -FDDB 8430FB33 -FDDC 8430FB34 -FDDD 8430FB35 -FDDE 8430FB36 -FDDF 8430FB37 -FDE0 8430FB38 -FDE1 8430FB39 -FDE2 8430FC30 -FDE3 8430FC31 -FDE4 8430FC32 -FDE5 8430FC33 -FDE6 8430FC34 -FDE7 8430FC35 -FDE8 8430FC36 -FDE9 8430FC37 -FDEA 8430FC38 -FDEB 8430FC39 -FDEC 8430FD30 -FDED 8430FD31 -FDEE 8430FD32 -FDEF 8430FD33 -FDF0 8430FD34 -FDF1 8430FD35 -FDF2 8430FD36 -FDF3 8430FD37 -FDF4 8430FD38 -FDF5 8430FD39 -FDF6 8430FE30 -FDF7 8430FE31 -FDF8 8430FE32 -FDF9 8430FE33 -FDFA 8430FE34 -FDFB 8430FE35 -FDFC 8430FE36 -FDFD 8430FE37 -FDFE 8430FE38 -FDFF 8430FE39 -FE00 84318130 -FE01 84318131 -FE02 84318132 -FE03 84318133 -FE04 84318134 -FE05 84318135 -FE06 84318136 -FE07 84318137 -FE08 84318138 -FE09 84318139 -FE0A 84318230 -FE0B 84318231 -FE0C 84318232 -FE0D 84318233 -FE0E 84318234 -FE0F 84318235 -FE10 84318236 -FE11 84318237 -FE12 84318238 -FE13 84318239 -FE14 84318330 -FE15 84318331 -FE16 84318332 -FE17 84318333 -FE18 84318334 -FE19 84318335 -FE1A 84318336 -FE1B 84318337 -FE1C 84318338 -FE1D 84318339 -FE1E 84318430 -FE1F 84318431 -FE20 84318432 -FE21 84318433 -FE22 84318434 -FE23 84318435 -FE24 84318436 -FE25 84318437 -FE26 84318438 -FE27 84318439 -FE28 84318530 -FE29 84318531 -FE2A 84318532 -FE2B 84318533 -FE2C 84318534 -FE2D 84318535 -FE2E 84318536 -FE2F 84318537 -FE30 A955 -FE31 A6F2 -FE32 84318538 -FE33 A6F4 -FE34 A6F5 -FE35 A6E0 -FE36 A6E1 -FE37 A6F0 -FE38 A6F1 -FE39 A6E2 -FE3A A6E3 -FE3B A6EE -FE3C A6EF -FE3D A6E6 -FE3E A6E7 -FE3F A6E4 -FE40 A6E5 -FE41 A6E8 -FE42 A6E9 -FE43 A6EA -FE44 A6EB -FE45 84318539 -FE46 84318630 -FE47 84318631 -FE48 84318632 -FE49 A968 -FE4A A969 -FE4B A96A -FE4C A96B -FE4D A96C -FE4E A96D -FE4F A96E -FE50 A96F -FE51 A970 -FE52 A971 -FE53 84318633 -FE54 A972 -FE55 A973 -FE56 A974 -FE57 A975 -FE58 84318634 -FE59 A976 -FE5A A977 -FE5B A978 -FE5C A979 -FE5D A97A -FE5E A97B -FE5F A97C -FE60 A97D -FE61 A97E -FE62 A980 -FE63 A981 -FE64 A982 -FE65 A983 -FE66 A984 -FE67 84318635 -FE68 A985 -FE69 A986 -FE6A A987 -FE6B A988 -FE6C 84318636 -FE6D 84318637 -FE6E 84318638 -FE6F 84318639 -FE70 84318730 -FE71 84318731 -FE72 84318732 -FE73 84318733 -FE74 84318734 -FE75 84318735 -FE76 84318736 -FE77 84318737 -FE78 84318738 -FE79 84318739 -FE7A 84318830 -FE7B 84318831 -FE7C 84318832 -FE7D 84318833 -FE7E 84318834 -FE7F 84318835 -FE80 84318836 -FE81 84318837 -FE82 84318838 -FE83 84318839 -FE84 84318930 -FE85 84318931 -FE86 84318932 -FE87 84318933 -FE88 84318934 -FE89 84318935 -FE8A 84318936 -FE8B 84318937 -FE8C 84318938 -FE8D 84318939 -FE8E 84318A30 -FE8F 84318A31 -FE90 84318A32 -FE91 84318A33 -FE92 84318A34 -FE93 84318A35 -FE94 84318A36 -FE95 84318A37 -FE96 84318A38 -FE97 84318A39 -FE98 84318B30 -FE99 84318B31 -FE9A 84318B32 -FE9B 84318B33 -FE9C 84318B34 -FE9D 84318B35 -FE9E 84318B36 -FE9F 84318B37 -FEA0 84318B38 -FEA1 84318B39 -FEA2 84318C30 -FEA3 84318C31 -FEA4 84318C32 -FEA5 84318C33 -FEA6 84318C34 -FEA7 84318C35 -FEA8 84318C36 -FEA9 84318C37 -FEAA 84318C38 -FEAB 84318C39 -FEAC 84318D30 -FEAD 84318D31 -FEAE 84318D32 -FEAF 84318D33 -FEB0 84318D34 -FEB1 84318D35 -FEB2 84318D36 -FEB3 84318D37 -FEB4 84318D38 -FEB5 84318D39 -FEB6 84318E30 -FEB7 84318E31 -FEB8 84318E32 -FEB9 84318E33 -FEBA 84318E34 -FEBB 84318E35 -FEBC 84318E36 -FEBD 84318E37 -FEBE 84318E38 -FEBF 84318E39 -FEC0 84318F30 -FEC1 84318F31 -FEC2 84318F32 -FEC3 84318F33 -FEC4 84318F34 -FEC5 84318F35 -FEC6 84318F36 -FEC7 84318F37 -FEC8 84318F38 -FEC9 84318F39 -FECA 84319030 -FECB 84319031 -FECC 84319032 -FECD 84319033 -FECE 84319034 -FECF 84319035 -FED0 84319036 -FED1 84319037 -FED2 84319038 -FED3 84319039 -FED4 84319130 -FED5 84319131 -FED6 84319132 -FED7 84319133 -FED8 84319134 -FED9 84319135 -FEDA 84319136 -FEDB 84319137 -FEDC 84319138 -FEDD 84319139 -FEDE 84319230 -FEDF 84319231 -FEE0 84319232 -FEE1 84319233 -FEE2 84319234 -FEE3 84319235 -FEE4 84319236 -FEE5 84319237 -FEE6 84319238 -FEE7 84319239 -FEE8 84319330 -FEE9 84319331 -FEEA 84319332 -FEEB 84319333 -FEEC 84319334 -FEED 84319335 -FEEE 84319336 -FEEF 84319337 -FEF0 84319338 -FEF1 84319339 -FEF2 84319430 -FEF3 84319431 -FEF4 84319432 -FEF5 84319433 -FEF6 84319434 -FEF7 84319435 -FEF8 84319436 -FEF9 84319437 -FEFA 84319438 -FEFB 84319439 -FEFC 84319530 -FEFD 84319531 -FEFE 84319532 -FEFF 84319533 -FF00 84319534 -FF01 A3A1 -FF02 A3A2 -FF03 A3A3 -FF04 A1E7 -FF05 A3A5 -FF06 A3A6 -FF07 A3A7 -FF08 A3A8 -FF09 A3A9 -FF0A A3AA -FF0B A3AB -FF0C A3AC -FF0D A3AD -FF0E A3AE -FF0F A3AF -FF10 A3B0 -FF11 A3B1 -FF12 A3B2 -FF13 A3B3 -FF14 A3B4 -FF15 A3B5 -FF16 A3B6 -FF17 A3B7 -FF18 A3B8 -FF19 A3B9 -FF1A A3BA -FF1B A3BB -FF1C A3BC -FF1D A3BD -FF1E A3BE -FF1F A3BF -FF20 A3C0 -FF21 A3C1 -FF22 A3C2 -FF23 A3C3 -FF24 A3C4 -FF25 A3C5 -FF26 A3C6 -FF27 A3C7 -FF28 A3C8 -FF29 A3C9 -FF2A A3CA -FF2B A3CB -FF2C A3CC -FF2D A3CD -FF2E A3CE -FF2F A3CF -FF30 A3D0 -FF31 A3D1 -FF32 A3D2 -FF33 A3D3 -FF34 A3D4 -FF35 A3D5 -FF36 A3D6 -FF37 A3D7 -FF38 A3D8 -FF39 A3D9 -FF3A A3DA -FF3B A3DB -FF3C A3DC -FF3D A3DD -FF3E A3DE -FF3F A3DF -FF40 A3E0 -FF41 A3E1 -FF42 A3E2 -FF43 A3E3 -FF44 A3E4 -FF45 A3E5 -FF46 A3E6 -FF47 A3E7 -FF48 A3E8 -FF49 A3E9 -FF4A A3EA -FF4B A3EB -FF4C A3EC -FF4D A3ED -FF4E A3EE -FF4F A3EF -FF50 A3F0 -FF51 A3F1 -FF52 A3F2 -FF53 A3F3 -FF54 A3F4 -FF55 A3F5 -FF56 A3F6 -FF57 A3F7 -FF58 A3F8 -FF59 A3F9 -FF5A A3FA -FF5B A3FB -FF5C A3FC -FF5D A3FD -FF5E A1AB -FF5F 84319535 -FF60 84319536 -FF61 84319537 -FF62 84319538 -FF63 84319539 -FF64 84319630 -FF65 84319631 -FF66 84319632 -FF67 84319633 -FF68 84319634 -FF69 84319635 -FF6A 84319636 -FF6B 84319637 -FF6C 84319638 -FF6D 84319639 -FF6E 84319730 -FF6F 84319731 -FF70 84319732 -FF71 84319733 -FF72 84319734 -FF73 84319735 -FF74 84319736 -FF75 84319737 -FF76 84319738 -FF77 84319739 -FF78 84319830 -FF79 84319831 -FF7A 84319832 -FF7B 84319833 -FF7C 84319834 -FF7D 84319835 -FF7E 84319836 -FF7F 84319837 -FF80 84319838 -FF81 84319839 -FF82 84319930 -FF83 84319931 -FF84 84319932 -FF85 84319933 -FF86 84319934 -FF87 84319935 -FF88 84319936 -FF89 84319937 -FF8A 84319938 -FF8B 84319939 -FF8C 84319A30 -FF8D 84319A31 -FF8E 84319A32 -FF8F 84319A33 -FF90 84319A34 -FF91 84319A35 -FF92 84319A36 -FF93 84319A37 -FF94 84319A38 -FF95 84319A39 -FF96 84319B30 -FF97 84319B31 -FF98 84319B32 -FF99 84319B33 -FF9A 84319B34 -FF9B 84319B35 -FF9C 84319B36 -FF9D 84319B37 -FF9E 84319B38 -FF9F 84319B39 -FFA0 84319C30 -FFA1 84319C31 -FFA2 84319C32 -FFA3 84319C33 -FFA4 84319C34 -FFA5 84319C35 -FFA6 84319C36 -FFA7 84319C37 -FFA8 84319C38 -FFA9 84319C39 -FFAA 84319D30 -FFAB 84319D31 -FFAC 84319D32 -FFAD 84319D33 -FFAE 84319D34 -FFAF 84319D35 -FFB0 84319D36 -FFB1 84319D37 -FFB2 84319D38 -FFB3 84319D39 -FFB4 84319E30 -FFB5 84319E31 -FFB6 84319E32 -FFB7 84319E33 -FFB8 84319E34 -FFB9 84319E35 -FFBA 84319E36 -FFBB 84319E37 -FFBC 84319E38 -FFBD 84319E39 -FFBE 84319F30 -FFBF 84319F31 -FFC0 84319F32 -FFC1 84319F33 -FFC2 84319F34 -FFC3 84319F35 -FFC4 84319F36 -FFC5 84319F37 -FFC6 84319F38 -FFC7 84319F39 -FFC8 8431A030 -FFC9 8431A031 -FFCA 8431A032 -FFCB 8431A033 -FFCC 8431A034 -FFCD 8431A035 -FFCE 8431A036 -FFCF 8431A037 -FFD0 8431A038 -FFD1 8431A039 -FFD2 8431A130 -FFD3 8431A131 -FFD4 8431A132 -FFD5 8431A133 -FFD6 8431A134 -FFD7 8431A135 -FFD8 8431A136 -FFD9 8431A137 -FFDA 8431A138 -FFDB 8431A139 -FFDC 8431A230 -FFDD 8431A231 -FFDE 8431A232 -FFDF 8431A233 -FFE0 A1E9 -FFE1 A1EA -FFE2 A956 -FFE3 A3FE -FFE4 A957 -FFE5 A3A4 -FFE6 8431A234 -FFE7 8431A235 -FFE8 8431A236 -FFE9 8431A237 -FFEA 8431A238 -FFEB 8431A239 -FFEC 8431A330 -FFED 8431A331 -FFEE 8431A332 -FFEF 8431A333 -FFF0 8431A334 -FFF1 8431A335 -FFF2 8431A336 -FFF3 8431A337 -FFF4 8431A338 -FFF5 8431A339 -FFF6 8431A430 -FFF7 8431A431 -FFF8 8431A432 -FFF9 8431A433 -FFFA 8431A434 -FFFB 8431A435 -FFFC 8431A436 -FFFD 8431A437 -FFFE 8431A438 -FFFF 8431A439 diff --git a/src/common/backend/utils/mb/mbutils.cpp b/src/common/backend/utils/mb/mbutils.cpp index 546cfa89b..981331330 100644 --- a/src/common/backend/utils/mb/mbutils.cpp +++ b/src/common/backend/utils/mb/mbutils.cpp @@ -825,6 +825,24 @@ int pg_mbstrlen_with_len_eml(const char* mbstr, int limit, int eml) return len; } +int pg_mbstrlen_with_len_toast(const char* mbstr, int* limit) +{ + int len = 0; + + /* optimization for single byte encoding */ + if (pg_database_encoding_max_length() == 1) { + return *limit; + } + while (*limit > 0 && *mbstr) { + int l = pg_mblen(mbstr); + + *limit -= l; + mbstr += l; + len++; + } + return len; +} + /* * returns the byte length of a multibyte string * (not necessarily NULL terminated) diff --git a/src/common/backend/utils/misc/guc-file.l b/src/common/backend/utils/misc/guc-file.l index 3a7096b9c..de25b0c3c 100644 --- a/src/common/backend/utils/misc/guc-file.l +++ b/src/common/backend/utils/misc/guc-file.l @@ -370,6 +370,9 @@ ProcessConfigFile(GucContext context) if (pre_value) pfree(pre_value); } + if (EnableGlobalSysCache()) { + g_instance.global_sysdbcache.UpdateGSCConfig(g_instance.attr.attr_memory.global_syscache_threshold); + } /* Remember when we last successfully loaded the config file. */ t_thrd.time_cxt.pg_reload_time = GetCurrentTimestamp(); diff --git a/src/common/backend/utils/misc/guc.cpp b/src/common/backend/utils/misc/guc.cpp index 562792662..d74d096b9 100755 --- a/src/common/backend/utils/misc/guc.cpp +++ b/src/common/backend/utils/misc/guc.cpp @@ -8,8 +8,8 @@ * * Copyright (c) 2000-2012, PostgreSQL Global Development Group * Portions Copyright (c) 2010-2012 Postgres-XC Development Group - * Written by Peter Eisentraut . * Portions Copyright (c) 2021, openGauss Contributors + * Written by Peter Eisentraut . * * IDENTIFICATION * src/backend/utils/misc/guc.c @@ -183,7 +183,6 @@ #define INVALID_LINES_IDX (int)(~0) #define MAX_PARAM_LEN 1024 #define WRITE_CONFIG_LOCK_LEN (1024 * 1024) -#define CONFIG_BAK_FILENAME "postgresql.conf.bak" #ifdef EXEC_BACKEND #define CONFIG_EXEC_PARAMS "global/config_exec_params" @@ -413,6 +412,7 @@ const char* sync_guc_variable_namelist[] = {"work_mem", "sql_beta_feature", #ifndef ENABLE_MULTIPLE_NODES "plsql_show_all_error", + "uppercase_attribute_name", #endif "track_stmt_session_slot", "track_stmt_stat_level", @@ -971,7 +971,6 @@ const char* const config_group_names[] = { /* INSTRUMENTS_OPTIONS */ gettext_noop("Instruments Options"), gettext_noop("Column Encryption"), - gettext_noop("Compress Options"), #ifdef PGXC /* DATA_NODES */ gettext_noop("Datanodes and Connection Pooling"), @@ -1645,6 +1644,17 @@ static void InitConfigureNamesBool() NULL, NULL}, #endif + {{"enable_global_syscache", + PGC_POSTMASTER, + NODE_ALL, + CLIENT_CONN, + gettext_noop("enable to use global system cache. "), NULL}, + &g_instance.attr.attr_common.enable_global_syscache, + true, + NULL, + NULL, + NULL}, + {{"enable_router", PGC_SIGHUP, NODE_DISTRIBUTE, @@ -1783,6 +1793,19 @@ static void InitConfigureNamesBool() NULL, NULL }, + {{"enable_ustore", + PGC_POSTMASTER, + NODE_SINGLENODE, + QUERY_TUNING_METHOD, + gettext_noop("Enable ustore storage engine"), + NULL}, + &g_instance.attr.attr_storage.enable_ustore, + true, + NULL, + NULL, + NULL, + NULL + }, /* End-of-list marker */ {{NULL, (GucContext)0, @@ -2070,7 +2093,11 @@ static void InitConfigureNamesInt() GUC_SUPERUSER_ONLY}, &g_instance.attr.attr_common.asp_sample_num, 100000, +#ifdef ENABLE_MULTIPLE_NODES 10000, +#else + 10, +#endif 100000, NULL, NULL, @@ -2229,32 +2256,6 @@ static void InitConfigureNamesInt() NULL, NULL, NULL}, - {{"pset_lob_length", - PGC_USERSET, - NODE_ALL, - CLIENT_CONN_STATEMENT, - gettext_noop("GUC parameter of pset_lob_length."), - NULL}, - &u_sess->attr.attr_common.pset_lob_length, - 0, - 0, - INT_MAX, - NULL, - NULL, - NULL}, - {{"pset_num_width", - PGC_USERSET, - NODE_ALL, - CLIENT_CONN_STATEMENT, - gettext_noop("GUC parameter of pset_num_width."), - NULL}, - &u_sess->attr.attr_common.pset_num_width, - 0, - 0, - 128, - NULL, - NULL, - NULL}, {{"tcp_keepalives_idle", PGC_USERSET, NODE_ALL, @@ -3043,7 +3044,6 @@ static void InitConfigureNamesString() NULL, NULL, NULL}, - {{"thread_pool_attr", PGC_POSTMASTER, NODE_ALL, @@ -3056,6 +3056,19 @@ static void InitConfigureNamesString() NULL, NULL, NULL}, + + {{"thread_pool_stream_attr", + PGC_POSTMASTER, + NODE_ALL, + CLIENT_CONN, + gettext_noop("Spare Cpu that can not be used in thread pool stream."), + NULL, + GUC_LIST_INPUT | GUC_LIST_QUOTE | GUC_SUPERUSER_ONLY}, + &g_instance.attr.attr_common.thread_pool_stream_attr, + "16, 0.2, 2, (nobind)", + NULL, + NULL, + NULL}, {{"comm_proxy_attr", PGC_POSTMASTER, @@ -3769,7 +3782,7 @@ static void InitConfigureNamesString() &u_sess->attr.attr_common.ts_compaction_strategy, "3,6,6,12,0", check_compaciton_strategy, - NULL}, + NULL}, #endif {{NULL, (GucContext)0, @@ -4045,7 +4058,7 @@ static void InitConfigureNamesEnum() PGC_POSTMASTER, NODE_SINGLENODE, PRESET_OPTIONS, - gettext_noop("Sets how binary values are to be encoded in XML."), + gettext_noop("Sets the type of shared storage cluster."), NULL}, &g_instance.attr.attr_common.cluster_run_mode, RUN_MODE_PRIMARY, @@ -4053,6 +4066,18 @@ static void InitConfigureNamesEnum() NULL, NULL, NULL}, + {{"stream_cluster_run_mode", + PGC_POSTMASTER, + NODE_DISTRIBUTE, + PRESET_OPTIONS, + gettext_noop("Sets the type of streaming cluster."), + NULL}, + &g_instance.attr.attr_common.stream_cluster_run_mode, + RUN_MODE_PRIMARY, + cluster_run_mode_options, + NULL, + NULL, + NULL}, /* End-of-list marker */ {{NULL, (GucContext)0, @@ -8215,7 +8240,7 @@ static void CheckAlterSystemSetPrivilege(const char* name) "query_log_directory", "ssl_ca_file", "ssl_cert_file", "ssl_crl_file", "ssl_key_file", "stats_temp_directory", "unix_socket_directory", "unix_socket_group", "unix_socket_permissions", "krb_caseins_users", "krb_server_keyfile", "krb_srvname", "allow_system_table_mods", "enableSeparationOfDuty", - "modify_initial_password", "password_encryption_type", "password_policy", + "modify_initial_password", "password_encryption_type", "password_policy", "audit_xid_info", NULL }; for (int i = 0; blackList[i] != NULL; i++) { @@ -11298,7 +11323,8 @@ static const char* show_tcp_keepalives_interval(void) const int maxBufLen = 16; static char nbuf[maxBufLen]; - errno_t rc = snprintf_s(nbuf, maxBufLen, maxBufLen - 1, "%d", pq_getkeepalivesinterval(u_sess->proc_cxt.MyProcPort)); + errno_t rc = snprintf_s(nbuf, maxBufLen, maxBufLen - 1, "%d", + pq_getkeepalivesinterval(u_sess->proc_cxt.MyProcPort)); securec_check_ss(rc, "\0", "\0"); return nbuf; } @@ -12055,6 +12081,56 @@ ErrCode copy_guc_lines(char** copy_to_line, char** optlines, const char** opt_na return CODE_OK; } +/* + * @@GaussDB@@ + * Brief : void modify_guc_one_line + * Description : modify one guc config + * Notes : + */ +void modify_guc_one_line(char*** guc_optlines, const char* opt_name, const char* copy_from_line) +{ + int opt_name_index = 0; + int optvalue_off = 0; + int optvalue_len = 0; + char **optlines = *guc_optlines; + + opt_name_index = find_guc_option(optlines, opt_name, NULL, NULL, &optvalue_off, &optvalue_len, false); + if (INVALID_LINES_IDX != opt_name_index) { + pfree(optlines[opt_name_index]); + } else { + int lines = 0; + /* get optlines row number */ + for (lines = 0; optlines[lines] != NULL; ++lines) {} + /* add one line guc item, and set a end flag NULL. */ + char **optlines_copy = (char**)pg_malloc((lines + 2) * sizeof(char*)); + errno_t rc = memset_s(optlines_copy, (lines + 2) * sizeof(char*), 0, (lines + 2) * sizeof(char*)); + securec_check(rc, "\0", "\0"); + for (int cnt = 0; cnt < lines; cnt++) { + optlines_copy[cnt] = optlines[cnt]; + } + pfree(optlines); + *guc_optlines = optlines_copy; + optlines = *guc_optlines; + optlines_copy = NULL; + + Assert(optlines[lines] == NULL); + optlines[lines + 1] = NULL; + opt_name_index = lines; + } + int newsize = strlen(copy_from_line) + 1; + + optlines[opt_name_index] = (char*)pg_malloc(newsize); + + errno_t rc = strncpy_s(optlines[opt_name_index], newsize, copy_from_line, newsize - 1); + securec_check(rc, "\0", "\0"); + + if (newsize > MAX_PARAM_LEN) { + ereport(WARNING, (errmsg("modify_guc_one_line:opt len '%s' is out of 1024", optlines[opt_name_index]))); + } + +} + + /* * @@GaussDB@@ * Brief : static void modify_config_file diff --git a/src/common/backend/utils/misc/guc/guc_memory.cpp b/src/common/backend/utils/misc/guc/guc_memory.cpp index d946a9797..79f087462 100644 --- a/src/common/backend/utils/misc/guc/guc_memory.cpp +++ b/src/common/backend/utils/misc/guc/guc_memory.cpp @@ -264,7 +264,22 @@ static void InitMemoryConfigureNamesBool() NULL, NULL, NULL}, - + +#ifdef MEMORY_CONTEXT_CHECKING + // variable to enable memory check + {{"enable_memory_context_check_debug", + PGC_POSTMASTER, + NODE_ALL, + RESOURCES_MEM, + gettext_noop("check the memory context info on debug mode."), + NULL}, + &g_instance.attr.attr_memory.enable_memory_context_check_debug, + false, + NULL, + NULL, + NULL}, +#endif + /* End-of-list marker */ {{NULL, (GucContext)0, @@ -318,7 +333,7 @@ static void InitMemoryConfigureNamesInt() NULL, NULL}, {{"local_syscache_threshold", - PGC_POSTMASTER, + PGC_SIGHUP, NODE_ALL, RESOURCES_MEM, gettext_noop("Sets the maximum threshold for cleaning cache."), @@ -331,6 +346,20 @@ static void InitMemoryConfigureNamesInt() check_syscache_threshold_gpc, NULL, NULL}, + {{"global_syscache_threshold", + PGC_SIGHUP, + NODE_ALL, + RESOURCES_MEM, + gettext_noop("Sets the maximum threshold for cleaning global syscache."), + NULL, + GUC_UNIT_KB}, + &g_instance.attr.attr_memory.global_syscache_threshold, + 160 * 1024, + 16 * 1024, + 1024 * 1024 * 1024, + NULL, + NULL, + NULL}, {{"work_mem", PGC_USERSET, NODE_ALL, @@ -633,7 +662,6 @@ static bool check_syscache_threshold_gpc(int* newval, void** extra, GucSource so *newval / 1024))); g_instance.attr.attr_memory.local_syscache_threshold = 16 * 1024; } - return true; } diff --git a/src/common/backend/utils/misc/guc/guc_security.cpp b/src/common/backend/utils/misc/guc/guc_security.cpp index 690118fef..e9ec81dc6 100755 --- a/src/common/backend/utils/misc/guc/guc_security.cpp +++ b/src/common/backend/utils/misc/guc/guc_security.cpp @@ -798,7 +798,7 @@ static void InitSecurityConfigureNamesInt() &u_sess->attr.attr_security.Audit_DDL, 12295, 0, - 2097151, + 67108863, NULL, NULL, NULL}, @@ -889,6 +889,36 @@ static void InitSecurityConfigureNamesInt() NULL, NULL, NULL}, + + {{"audit_xid_info", + PGC_SIGHUP, + NODE_ALL, + AUDIT_OPTIONS, + gettext_noop("whether record xid info in audit log."), + NULL, + 0}, + &u_sess->attr.attr_security.audit_xid_info, + 0, + 0, + 1, + NULL, + NULL, + NULL}, + + {{"audit_thread_num", + PGC_POSTMASTER, + NODE_ALL, + AUDIT_OPTIONS, + gettext_noop("Sets the number of audit threads."), + NULL, + 0}, + &g_instance.attr.attr_security.audit_thread_num, + 1, + 1, + 48, + NULL, + NULL, + NULL}, /* End-of-list marker */ {{NULL, @@ -1262,18 +1292,17 @@ static bool check_ssl_ciphers(char** newval, void** extra, GucSource) int i = 0; char* ptok = NULL; const char* ssl_ciphers_list[] = { - "DHE-RSA-AES256-GCM-SHA384", - "DHE-RSA-AES128-GCM-SHA256", - "DHE-RSA-AES256-CCM", - "DHE-RSA-AES128-CCM", - "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-RSA-AES128-GCM-SHA256", - "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-AES128-GCM-SHA256", - NULL + "ECDHE-ECDSA-AES256-GCM-SHA384", + "DHE-RSA-AES128-GCM-SHA256", + "DHE-RSA-AES256-GCM-SHA384" }; + int maxCnt = lengthof(ssl_ciphers_list); + if (*newval == NULL || **newval == '\0' || **newval == ';') { - ereport(ERROR, (errmsg("sslciphers can not be null"))); + return false; } else if (strcasecmp(*newval, "ALL") == 0) { return true; } else { @@ -1286,25 +1315,19 @@ static bool check_ssl_ciphers(char** newval, void** extra, GucSource) break; } if (cipherStr == strchr(cipherStr, ';')) { - ereport(ERROR, (errmsg("unrecognized ssl ciphers name: \"%s\"", *newval))); + return false; } cipherStr = strchr(cipherStr, ';'); } + + if (counter > maxCnt) { + return false; + } ciphers_list = static_cast(palloc(counter * sizeof(char*))); - - Assert(ciphers_list != NULL); - if (ciphers_list == NULL) { - ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("malloc failed"))); - } - cipherStr_tmp = pstrdup(*newval); - if (cipherStr_tmp == NULL) { - pfree_ext(ciphers_list); - ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("malloc failed"))); - } token = strtok_r(cipherStr_tmp, ";", &ptok); while (token != NULL) { - for (int j = 0; ssl_ciphers_list[j] != NULL; j++) { + for (int j = 0; j < maxCnt; j++) { if (strlen(ssl_ciphers_list[j]) == strlen(token) && strncmp(ssl_ciphers_list[j], token, strlen(token)) == 0) { ciphers_list[i] = const_cast(ssl_ciphers_list[j]); @@ -1312,16 +1335,11 @@ static bool check_ssl_ciphers(char** newval, void** extra, GucSource) break; } } + if (!find_ciphers_in_list) { - const int maxCipherStrLen = 64; - char errormessage[maxCipherStrLen] = {0}; - errno_t errorno = EOK; - errorno = strncpy_s(errormessage, sizeof(errormessage), token, sizeof(errormessage) - 1); - securec_check(errorno, cipherStr_tmp, ciphers_list, "\0"); - errormessage[maxCipherStrLen - 1] = '\0'; pfree_ext(cipherStr_tmp); pfree_ext(ciphers_list); - ereport(ERROR, (errmsg("unrecognized ssl ciphers name: \"%s\"", errormessage))); + return false; } token = strtok_r(NULL, ";", &ptok); i++; @@ -1332,3 +1350,4 @@ static bool check_ssl_ciphers(char** newval, void** extra, GucSource) pfree_ext(ciphers_list); return true; } + diff --git a/src/common/backend/utils/misc/guc/guc_sql.cpp b/src/common/backend/utils/misc/guc/guc_sql.cpp index fca448ca5..9139bb39d 100755 --- a/src/common/backend/utils/misc/guc/guc_sql.cpp +++ b/src/common/backend/utils/misc/guc/guc_sql.cpp @@ -8,8 +8,8 @@ * * Copyright (c) 2000-2012, PostgreSQL Global Development Group * Portions Copyright (c) 2010-2012 Postgres-XC Development Group - * Written by Peter Eisentraut . * Portions Copyright (c) 2021, openGauss Contributors + * Written by Peter Eisentraut . * * IDENTIFICATION * src/backend/utils/misc/guc/guc_sql.cpp @@ -165,7 +165,6 @@ static void assign_convert_string_to_digit(bool newval, void* extra); static void AssignUStoreAttr(const char* newval, void* extra); static bool check_snapshot_delimiter(char** newval, void** extra, GucSource source); static bool check_snapshot_separator(char** newval, void** extra, GucSource source); -static bool check_numformat_arg(char** numformat, void** extra, GucSource source); static void InitSqlConfigureNamesBool(); @@ -174,15 +173,7 @@ static void InitSqlConfigureNamesInt64(); static void InitSqlConfigureNamesReal(); static void InitSqlConfigureNamesString(); static void InitSqlConfigureNamesEnum(); - -double str_to_num(char *format_str); -double my_pow(double base, int exp); -void extract_deci_part_width(double num, int width, int int_places, StringInfo result, bool is_negative); -void extract_part(double num, StringInfo result, bool is_negative, bool extract_deci, int int_places, int deci_places); -void extract_int_part(double num, int places, StringInfo space_part, StringInfo int_part); -void extract_deci_part(double num, int deci_places, StringInfo deci_part); -void out_of_range(StringInfo result, int len, bool amend_length); - +#define FORBID_GUC_NUM 3 /* * Although only "on", "off", and "safe_encoding" are documented, we * accept all the likely variants of "on" and "off". @@ -296,9 +287,17 @@ static const struct config_enum_entry sql_beta_options[] = { {"canonical_pathkey", CANONICAL_PATHKEY, false}, {"index_cost_with_leaf_pages_only", INDEX_COST_WITH_LEAF_PAGES_ONLY, false}, {"partition_opfusion", PARTITION_OPFUSION , false}, - {"spi_debug", SPI_DEBUG, false}, - {"resowner_debug", RESOWNER_DEBUG, false}, {"a_style_coerce", A_STYLE_COERCE, false}, + {"plpgsql_stream_fetchall", PLPGSQL_STREAM_FETCHALL, false}, + {"predpush_same_level", PREDPUSH_SAME_LEVEL, false}, + {"partition_fdw_on", PARTITION_FDW_ON, false}, + {NULL, 0, false} +}; + +static const struct config_enum_entry vector_engine_strategy[] = { + {"off", OFF_VECTOR_ENGINE, false}, + {"force", FORCE_VECTOR_ENGINE, false}, + {"optimal", OPT_VECTOR_ENGINE, false}, {NULL, 0, false} }; @@ -323,7 +322,14 @@ static const struct behavior_compat_entry behavior_compat_options[OPT_MAX] = { {"hide_tailing_zero", OPT_HIDE_TAILING_ZERO}, {"plsql_security_definer", OPT_SECURITY_DEFINER}, {"skip_insert_gs_source", OPT_SKIP_GS_SOURCE}, - {"proc_outparam_override", OPT_PROC_OUTPARAM_OVERRIDE} + {"proc_outparam_override", OPT_PROC_OUTPARAM_OVERRIDE}, + {"allow_procedure_compile_check", OPT_ALLOW_PROCEDURE_COMPILE_CHECK}, + {"proc_implicit_for_loop_variable", OPT_IMPLICIT_FOR_LOOP_VARIABLE}, + {"aformat_null_test", OPT_AFORMAT_NULL_TEST}, + {"aformat_regexp_match", OPT_AFORMAT_REGEX_MATCH}, + {"rownum_type_compat", OPT_ROWNUM_TYPE_COMPAT}, + {"compat_cursor", OPT_COMPAT_CURSOR}, + {"char_coerce_compat", OPT_CHAR_COERCE_COMPAT} }; /* @@ -865,7 +871,7 @@ static void InitSqlConfigureNamesBool() gettext_noop("Logs the duration of each completed SQL statement."), NULL}, &u_sess->attr.attr_sql.log_duration, - true, + false, NULL, NULL, NULL}, @@ -1545,6 +1551,19 @@ static void InitSqlConfigureNamesBool() NULL, NULL }, + {{"uppercase_attribute_name", + PGC_USERSET, + NODE_SINGLENODE, + COMPAT_OPTIONS, + gettext_noop("Set to ON will force attname displayed in upper case when all characters in lower case " + "(comapatible with ORA), otherwise do nothing."), + NULL, + GUC_REPORT}, + &u_sess->attr.attr_sql.uppercase_attribute_name, + false, + NULL, + NULL, + NULL}, #endif /* End-of-list marker */ {{NULL, @@ -2213,6 +2232,7 @@ static void InitSqlConfigureNamesInt() NULL, NULL, NULL}, +#ifndef ENABLE_MULTIPLE_NODES {{"pldebugger_timeout", PGC_USERSET, NODE_ALL, @@ -2227,6 +2247,7 @@ static void InitSqlConfigureNamesInt() NULL, NULL, NULL}, +#endif /* End-of-list marker */ {{NULL, (GucContext)0, @@ -2665,17 +2686,6 @@ static void InitSqlConfigureNamesString() check_snapshot_delimiter, NULL, NULL}, - {{"pset_num_format", - PGC_USERSET, - NODE_ALL, - CUSTOM_OPTIONS, - gettext_noop("GUC parameter of pset_num_format."), - NULL}, - &u_sess->attr.attr_common.pset_num_format, - "\0", - check_numformat_arg, - NULL, - NULL}, {{NULL, (GucContext)0, (GucNodeType)0, @@ -2837,6 +2847,18 @@ static void InitSqlConfigureNamesEnum() NULL, NULL, NULL}, + {{"try_vector_engine_strategy", + PGC_USERSET, + NODE_ALL, + QUERY_TUNING, + gettext_noop("Sets the strategy of using vector engine for row table."), + NULL}, + &u_sess->attr.attr_sql.vectorEngineStrategy, + OFF_VECTOR_ENGINE, + vector_engine_strategy, + NULL, + NULL, + NULL}, /* End-of-list marker */ {{NULL, @@ -3091,7 +3113,22 @@ static void assign_inlist2joininfo(const char* newval, void* extra) } } } - +#ifdef ENABLE_MULTIPLE_NODES +static bool ForbidDistributeParameter(const char* elem) +{ + const char *forbidList[] = { + "proc_outparam_override", + "skip_insert_gs_source", + "rownum_type_compat" + }; + for (int i = 0; i < FORBID_GUC_NUM; i++) { + if (strcmp(forbidList[i], elem) == 0) { + return true; + } + } + return false; +} +#endif /* * check_behavior_compat_options: GUC check_hook for behavior compat options */ @@ -3119,12 +3156,20 @@ static bool check_behavior_compat_options(char** newval, void** extra, GucSource bool nfound = true; for (start = 0; start < OPT_MAX; start++) { +#ifdef ENABLE_MULTIPLE_NODES + if (ForbidDistributeParameter(item)) { + GUC_check_errdetail("behavior compat option %s can not use" + " in distributed database system", item); + pfree(rawstring); + list_free(elemlist); + return false; + } +#endif if (strcmp(item, behavior_compat_options[start].name) == 0) { nfound = false; break; } } - if (nfound) { GUC_check_errdetail("invalid behavior compat option \"%s\"", item); pfree(rawstring); @@ -3316,274 +3361,3 @@ static bool check_snapshot_separator(char** newval, void** extra, GucSource sour return (strlen(*newval) == 1 && (!u_sess->attr.attr_sql.db4ai_snapshot_version_delimiter || **newval != *u_sess->attr.attr_sql.db4ai_snapshot_version_delimiter)); } - -/* - * @@GaussDB@@ - * Brief : Check numformat (strlen(numformat) is known to greater than 0). - * Description : If numformat is not right, then return false. - */ -static bool check_numformat_arg(char** numformat, void** extra, GucSource source) -{ - if (NULL == numformat) { - return false; - } - - if (strlen(*numformat) > 128) { - GUC_check_errdetail("The numformat is too long!"); - return false; - } - - bool point_appeared = false; - char *numformat_copy = *numformat; - while (*numformat_copy != '\0') { - if (*numformat_copy != '9' && *numformat_copy != '.') { - return false; - } - if (*numformat_copy == '.') { - if (unlikely(point_appeared)) { - return false; - } - point_appeared = true; - } - numformat_copy++; - } - return true; -} - -char* apply_num_width(double num) -{ - StringInfoData result; - initStringInfo(&result); - bool is_negative = false; - int width = u_sess->attr.attr_common.pset_num_width; - const int decimal = 10; - if (num < 0) { - num = -num; - is_negative = true; - width--; - } - - int int_places = 0; - double num_copy = num; - while ((int)num_copy > 0) { - int_places++; - num_copy = num_copy / decimal; - } - - if (int_places > width) { - out_of_range(&result, width, is_negative); - } else if (int_places == width || int_places == width - 1) { - int value = (int)(num * decimal) % decimal; - if (value >= 5) { - num += 1; - } - if ((int)num == (int)my_pow(decimal, width) && int_places == width) { - out_of_range(&result, width, is_negative); - } else { - if (unlikely(is_negative)) { - appendStringInfo(&result, "%s", "-"); - } - appendStringInfo(&result, "%d", (int)num); - } - } else { - extract_deci_part_width(num, width, int_places, &result, is_negative); - } - - return result.data; -} - -void extract_deci_part_width(double num, int width, int int_places, StringInfo result, bool is_negative) -{ - int deci_places = width - 1 - int_places; - const int decimal = 10; - int value = (int)(num * my_pow(decimal, deci_places + 1)) % decimal; - if (value >= 5) { - num += my_pow(decimal, -deci_places); - int_places = 0; - double num_copy = num; - while ((int)num_copy > 0) { - int_places++; - num_copy = num_copy / decimal; - } - deci_places = width - 1 - int_places; - } - - StringInfoData int_part, deci_part; - initStringInfo(&int_part); - initStringInfo(&deci_part); - - if (unlikely(is_negative)) { - appendStringInfo(&int_part, "%s", "-"); - } - - for (int i = int_places - 1; i >= 0; i--) { - int value = (int)(num / my_pow(decimal, i)) % decimal; - appendStringInfo(&int_part, "%d", value); - } - - int last_not_zero = 0; - for (int i = 1; i <= deci_places; i++) { - int value = (int)(num * my_pow(decimal, i)) % decimal; - if (value != 0) { - last_not_zero = i; - } - } - - if (last_not_zero == 0) { - for (int i = 0; i < deci_places + 1; i++) { - appendStringInfo(result, "%s", " "); - } - if (int_part.len == 0) { - appendStringInfo(&int_part, "%s", "0"); - } - appendStringInfo(result, "%s", int_part.data); - } else { - for (int i = 0; i < deci_places - last_not_zero; i++) { - appendStringInfo(result, "%s", " "); - } - appendStringInfo(result, "%s", int_part.data); - appendStringInfo(result, "%s", "."); - extract_deci_part(num, last_not_zero, &deci_part); - appendStringInfo(result, "%s", deci_part.data); - } - - pfree(int_part.data); - pfree(deci_part.data); -} - -char* apply_num_format(double num) -{ - char *format_str = u_sess->attr.attr_common.pset_num_format; - double format_num = str_to_num(format_str); - const int decimal = 10; - - StringInfoData result; - initStringInfo(&result); - bool is_negative = false; - - if (num < 0) { - num = -num; - is_negative = true; - } - - char *decimal_str = strrchr(format_str, '.'); - if (decimal_str == NULL) { - int remainder = (int)(num * decimal) % decimal; - if (remainder >= 5) { - num += 1; - } - if ((int)num > (int)format_num) { - out_of_range(&result, strlen(format_str) + 1, false); - } else { - extract_part(num, &result, is_negative, false, strlen(format_str), 0); - } - } else { - int deci_places = strlen(decimal_str) - 1; - int remainder = (int)(num * my_pow(decimal, deci_places + 1)) % decimal; - if (remainder >= 5) { - num += my_pow(decimal, -deci_places); - } - num = (double)(int)(num * my_pow(decimal, deci_places)) / my_pow(decimal, deci_places); - if (num > format_num) { - out_of_range(&result, strlen(format_str) + 1, false); - } else { - extract_part(num, &result, is_negative, true, strlen(format_str) - strlen(decimal_str), deci_places); - } - } - - return result.data; -} - -double str_to_num(char *format_str) -{ - double num; - const int decimal = 10; - - char *decimal_str = strrchr(format_str, '.'); - if (decimal_str == NULL) { - num = my_pow(decimal, strlen(format_str)) - 1; - } else { - num = my_pow(decimal, strlen(format_str) - strlen(decimal_str)) - 1 + (1 - my_pow(decimal, 1 - strlen(decimal_str))); - } - - return num; -} - -double my_pow(double base, int exp) -{ - double result = 1; - bool is_negative = false; - if (exp < 0) { - exp = -exp; - is_negative = true; - } - while (exp) { - if (exp & 1) { - result *= base; - } - exp >>= 1; - base *= base; - } - return is_negative ? (1 / result) : result; -} - -void extract_part(double num, StringInfo result, bool is_negative, bool extract_deci, int int_places, int deci_places) -{ - StringInfoData space_part, int_part, deci_part; - initStringInfo(&space_part); - initStringInfo(&int_part); - initStringInfo(&deci_part); - - extract_int_part(num, int_places, &space_part, &int_part); - - appendStringInfo(result, "%s", space_part.data); - if (unlikely(is_negative)) { - appendStringInfo(result, "%s", "-"); - } - appendStringInfo(result, "%s", int_part.data); - - if (extract_deci) { - extract_deci_part(num, deci_places, &deci_part); - appendStringInfo(result, "%s", "."); - appendStringInfo(result, "%s", deci_part.data); - } - - pfree(space_part.data); - pfree(int_part.data); - pfree(deci_part.data); -} - -void extract_int_part(double num, int places, StringInfo space_part, StringInfo int_part) -{ - bool can_append_space = true; - const int decimal = 10; - for (int i = places - 1; i >= 0; i--) { - int value = (int)(num / my_pow(decimal, i)) % decimal; - if (value == 0 && unlikely(can_append_space)) { - appendStringInfo(space_part, "%s", " "); - } else { - appendStringInfo(int_part, "%d", value); - can_append_space = false; - } - } -} - -void extract_deci_part(double num, int deci_places, StringInfo deci_part) -{ - const int decimal = 10; - - for (int i = 1; i <= deci_places; i++) { - int value = (int)(num * my_pow(decimal, i)) % decimal; - appendStringInfo(deci_part, "%d", value); - } -} - -void out_of_range(StringInfo result, int len, bool amend_length) -{ - if (unlikely(amend_length)) { - len++; - } - for (int i = 0; i < len; i++) { - appendStringInfo(result, "%s", "#"); - } -} diff --git a/src/common/backend/utils/misc/guc/guc_storage.cpp b/src/common/backend/utils/misc/guc/guc_storage.cpp index a60ae55de..b53d83820 100755 --- a/src/common/backend/utils/misc/guc/guc_storage.cpp +++ b/src/common/backend/utils/misc/guc/guc_storage.cpp @@ -209,6 +209,8 @@ static int GetLengthAndCheckReplConn(const char* ConnInfoList); #ifndef ENABLE_MULTIPLE_NODES static void assign_dcf_election_timeout(int newval, void* extra); +static void assign_dcf_auto_elc_priority_en(int newval, void* extra); +static void assign_dcf_election_switch_threshold(int newval, void* extra); static void assign_dcf_run_mode(int newval, void* extra); static void assign_dcf_log_level(const char* newval, void* extra); static void assign_dcf_max_log_file_size(int newval, void* extra); @@ -605,6 +607,18 @@ static void InitStorageConfigureNamesBool() NULL, NULL}, + {{"enable_wal_shipping_compression", + PGC_SIGHUP, + NODE_ALL, + REPLICATION_SENDING, + gettext_noop("enable compress xlog during xlog shipping."), + NULL}, + &g_instance.attr.attr_storage.enable_wal_shipping_compression, + false, + NULL, + NULL, + NULL}, + {{"enable_mix_replication", PGC_POSTMASTER, NODE_ALL, @@ -641,7 +655,6 @@ static void InitStorageConfigureNamesBool() NULL, NULL}, #endif - {{"ha_module_debug", PGC_USERSET, NODE_ALL, @@ -879,6 +892,17 @@ static void InitStorageConfigureNamesBool() NULL, NULL, NULL}, + {{"enable_defer_calculate_snapshot", + PGC_SIGHUP, + NODE_ALL, + UNGROUPED, + gettext_noop("Enable defer to calculate mvcc snapshot when commit"), + NULL}, + &u_sess->attr.attr_storage.enable_defer_calculate_snapshot, + true, + NULL, + NULL, + NULL}, #ifdef USE_ASSERT_CHECKING {{"enable_segment", PGC_SIGHUP, @@ -943,6 +967,20 @@ static void InitStorageConfigureNamesBool() NULL, NULL}, #endif + +#ifdef ENABLE_MULTIPLE_NODES + {{"auto_csn_barrier", + PGC_SIGHUP, + NODE_DISTRIBUTE, + WAL, + gettext_noop("Enable auto csn barrier creation."), + NULL}, + &g_instance.attr.attr_storage.auto_csn_barrier, + false, + NULL, + NULL, + NULL}, +#endif /* End-of-list marker */ {{NULL, (GucContext)0, @@ -1288,6 +1326,32 @@ static void InitStorageConfigureNamesInt() NULL, assign_dcf_election_timeout, NULL}, + {{"dcf_enable_auto_election_priority", + PGC_SIGHUP, + NODE_SINGLENODE, + REPLICATION_PAXOS, + gettext_noop("Sets the election priority of local DCF node."), + NULL}, + &u_sess->attr.attr_storage.dcf_attr.dcf_auto_elc_priority_en, + 1, + 0, + 1, + NULL, + assign_dcf_auto_elc_priority_en, + NULL}, + {{"dcf_election_switch_threshold", + PGC_SIGHUP, + NODE_SINGLENODE, + REPLICATION_PAXOS, + gettext_noop("Sets the election switch threshold of local DCF node."), + NULL}, + &u_sess->attr.attr_storage.dcf_attr.dcf_election_switch_threshold, + 0, + 0, + INT_MAX, + NULL, + assign_dcf_election_switch_threshold, + NULL}, {{"dcf_max_log_file_size", PGC_SIGHUP, NODE_SINGLENODE, @@ -2035,7 +2099,34 @@ static void InitStorageConfigureNamesInt() NULL, NULL, NULL}, - + {{"wal_flush_timeout", + PGC_SIGHUP, + NODE_ALL, + WAL_SETTINGS, + gettext_noop("set timeout when iterator table entry."), + NULL, + GUC_NOT_IN_SAMPLE}, + &g_instance.attr.attr_storage.wal_flush_timeout, + 2, + 0, + 90000000, + NULL, + NULL, + NULL}, + {{"wal_flush_delay", + PGC_SIGHUP, + NODE_ALL, + WAL_SETTINGS, + gettext_noop("set delay time when iterator table entry."), + NULL, + GUC_NOT_IN_SAMPLE}, + &g_instance.attr.attr_storage.wal_flush_delay, + 1, + 0, + 90000000, + NULL, + NULL, + NULL}, {{"checkpoint_wait_timeout", PGC_SIGHUP, NODE_ALL, @@ -2080,6 +2171,20 @@ static void InitStorageConfigureNamesInt() NULL, NULL, NULL}, + {{"archive_interval", + PGC_SIGHUP, + NODE_ALL, + WAL_ARCHIVING, + gettext_noop("OBS archive time interval."), + NULL, + GUC_UNIT_S}, + &u_sess->attr.attr_storage.archive_interval, + 1, + 1, + 1000, + NULL, + NULL, + NULL}, /* see max_connections */ {{"max_wal_senders", PGC_POSTMASTER, @@ -2094,7 +2199,7 @@ static void InitStorageConfigureNamesInt() 16, #endif 0, - MAX_BACKENDS, + 1024, NULL, NULL, NULL}, @@ -2108,7 +2213,7 @@ static void InitStorageConfigureNamesInt() &g_instance.attr.attr_storage.max_replication_slots, 8, 0, - MAX_BACKENDS, /* XXX? */ + 1024, /* XXX? */ NULL, NULL, NULL}, @@ -2729,6 +2834,34 @@ static void InitStorageConfigureNamesInt() NULL, NULL, NULL}, + {{"dw_file_num", + PGC_POSTMASTER, + NODE_ALL, + WAL_CHECKPOINTS, + gettext_noop("Sets the number of dw batch files."), + NULL, + 0}, + &g_instance.attr.attr_storage.dw_file_num, + 1, + 1, + 16, + NULL, + NULL, + NULL}, + {{"dw_file_size", + PGC_POSTMASTER, + NODE_ALL, + WAL_CHECKPOINTS, + gettext_noop("Sets the size of each dw batch file."), + NULL, + 0}, + &g_instance.attr.attr_storage.dw_file_size, + 256, + 32, + 256, + NULL, + NULL, + NULL}, {{"recovery_parse_workers", PGC_POSTMASTER, NODE_ALL, @@ -2889,27 +3022,27 @@ static void InitStorageConfigureNamesInt() NULL, NULL}, {{"undo_space_limit_size", - PGC_POSTMASTER, + PGC_SIGHUP, NODE_SINGLENODE, RESOURCES_DISK, gettext_noop("Undo space limit size for force recycle."), NULL, GUC_UNIT_BLOCKS}, - &g_instance.attr.attr_storage.undo_space_limit_size, - 4194304, /* 32 GB */ + &u_sess->attr.attr_storage.undo_space_limit_size, + 33554432, /* 256 GB */ 102400, /* 800 MB */ INT_MAX, NULL, NULL, NULL}, {{"undo_limit_size_per_transaction", - PGC_POSTMASTER, + PGC_SIGHUP, NODE_SINGLENODE, RESOURCES_DISK, gettext_noop("Undo limit size per transaction."), NULL, GUC_UNIT_BLOCKS}, - &g_instance.attr.attr_storage.undo_limit_size_transaction, + &u_sess->attr.attr_storage.undo_limit_size_transaction, 4194304, /* 32 GB */ 256, /* 2 MB */ INT_MAX, @@ -2990,7 +3123,7 @@ static void InitStorageConfigureNamesInt() gettext_noop("Set gtm option, 0 for GTM ,1 GTMLite and 2 GTMFree"), NULL}, &g_instance.attr.attr_storage.gtm_option, - GTMOPTION_GTM, + GTMOPTION_GTMLITE, GTMOPTION_GTM, GTMOPTION_GTMFREE, NULL, @@ -3044,13 +3177,27 @@ static void InitStorageConfigureNamesInt() gettext_noop("Sets the maximum retention time of objects in recyclebin."), NULL, GUC_UNIT_S}, - &u_sess->attr.attr_storage.recyclebin_retention, + &u_sess->attr.attr_storage.recyclebin_retention_time, 900, 1, INT_MAX, NULL, NULL, NULL}, + {{"undo_retention_time", + PGC_SIGHUP, + NODE_SINGLENODE, + UNGROUPED, + gettext_noop("Sets the maximum retention time of undo record."), + NULL, + GUC_UNIT_S}, + &u_sess->attr.attr_storage.undo_retention_time, + 0, + 0, + INT_MAX, + NULL, + NULL, + NULL}, /* End-of-list marker */ {{NULL, (GucContext)0, @@ -3743,7 +3890,7 @@ static void InitStorageConfigureNamesString() NULL, GUC_LIST_INPUT | GUC_LIST_QUOTE | GUC_SUPERUSER_ONLY}, &g_instance.attr.attr_storage.num_internal_lock_partitions_str, - "CLOG_PART=256,CSNLOG_PART=512,LOG2_LOCKTABLE_PART=4,TWOPHASE_PART=1", + "CLOG_PART=256,CSNLOG_PART=512,LOG2_LOCKTABLE_PART=4,TWOPHASE_PART=1,FASTPATH_PART=20", NULL, NULL, NULL}, @@ -3889,6 +4036,18 @@ static void InitStorageConfigureNamesString() NULL, NULL, NULL}, + {{"redo_bind_cpu_attr", + PGC_POSTMASTER, + NODE_ALL, + WAL_SETTINGS, + gettext_noop("bind redo worker threads to specified cpus"), + NULL, + GUC_LIST_INPUT | GUC_LIST_QUOTE | GUC_SUPERUSER_ONLY}, + &g_instance.attr.attr_storage.redo_bind_cpu_attr, + "nobind", + NULL, + NULL, + NULL}, {{NULL, (GucContext)0, (GucNodeType)0, @@ -4130,6 +4289,7 @@ bool check_enable_gtm_free(bool* newval, void** extra, GucSource source) /* Initialize storage critical lwlock partition num */ void InitializeNumLwLockPartitions(void) { + Assert(lengthof(LWLockPartInfo) == LWLOCK_PART_KIND); /* set default values */ SetLWLockPartDefaultNum(); /* Do str copy and remove space. */ @@ -4300,6 +4460,118 @@ static inline bool GetReplCurArrayIsNull() } #endif +static int IsReplConnInfoChanged(const char* replConnInfo, const char* newval) +{ + char* temptok = NULL; + char* toker = NULL; + char* temp = NULL; + char* token = NULL; + char* tmpToken = NULL; + char* oldReplStr = NULL; + char* newReplStr = NULL; + int repl_length = 0; + replconninfo* newReplInfo = NULL; + replconninfo* ReplInfo_1 = t_thrd.postmaster_cxt.ReplConnArray[1]; + if (replConnInfo == NULL || newval == NULL) { + return NO_CHANGE; + } + + newReplInfo = ParseReplConnInfo(newval, &repl_length); + oldReplStr = pstrdup(replConnInfo); + newReplStr = pstrdup(newval); + + /* Added replication info and enabled new ip or port */ + if (strcmp(oldReplStr, "") == 0) { + if (strcmp(newReplStr, "") != 0) { + /* ReplConnInfo_1 is not configured, it is considered to be new repconninfo */ + if (ReplInfo_1 == NULL) { + pfree_ext(newReplInfo); + pfree_ext(oldReplStr); + pfree_ext(newReplStr); + return ADD_REPL_CONN_INFO_WITH_NEW_LOCAL_IP_PORT; + } + + if (strcmp(ReplInfo_1->localhost, newReplInfo->localhost) != 0 || + ReplInfo_1->localport != newReplInfo->localport || + ReplInfo_1->localheartbeatport != newReplInfo->localheartbeatport) { + pfree_ext(newReplInfo); + pfree_ext(oldReplStr); + pfree_ext(newReplStr); + return ADD_REPL_CONN_INFO_WITH_NEW_LOCAL_IP_PORT; + } else { + pfree_ext(newReplInfo); + pfree_ext(oldReplStr); + pfree_ext(newReplStr); + return ADD_REPL_CONN_INFO_WITH_OLD_LOCAL_IP_PORT; + } + } else { + pfree_ext(newReplInfo); + pfree_ext(oldReplStr); + pfree_ext(newReplStr); + return NO_CHANGE; + } + } + + pfree_ext(newReplInfo); + temp = strstr(oldReplStr, "iscascade"); + if (temp == NULL) { + temptok = strstr(newReplStr, "iscascade"); + if (temptok == NULL) { + /* Modify the old replication info, + excluding disaster recovery configuration information */ + if (strcmp(oldReplStr, newReplStr) == 0) { + pfree_ext(oldReplStr); + pfree_ext(newReplStr); + return NO_CHANGE; + } else { + pfree_ext(oldReplStr); + pfree_ext(newReplStr); + return OLD_REPL_CHANGE_IP_OR_PORT; + } + } else { + toker = strstr(newReplStr, "iscrossregion"); + if (toker == NULL) { + pfree_ext(oldReplStr); + pfree_ext(newReplStr); + return OLD_REPL_CHANGE_IP_OR_PORT; + } else { + /* Modify the old replication info and + add disaster recovery configuration information */ + pfree_ext(oldReplStr); + pfree_ext(newReplStr); + return ADD_DISASTER_RECOVERY_INFO; + } + } + } else { + temptok = strstr(newReplStr, "iscascade"); + if (temptok == NULL) { + /* Modify the replication info message, + the new message does not carry disaster recovery information */ + token = strtok_r(oldReplStr, "d", &tmpToken); + if (strncasecmp(token, newReplStr, strlen(newReplStr)) == 0) { + pfree_ext(oldReplStr); + pfree_ext(newReplStr); + return NO_CHANGE; + } else { + pfree_ext(oldReplStr); + pfree_ext(newReplStr); + return OLD_REPL_CHANGE_IP_OR_PORT; + } + } else { + /* Modify the replication info carrying disaster recovery information */ + if (strcmp(oldReplStr, newReplStr) == 0) { + pfree_ext(oldReplStr); + pfree_ext(newReplStr); + return NO_CHANGE; + } else { + pfree_ext(oldReplStr); + pfree_ext(newReplStr); + return OLD_REPL_CHANGE_IP_OR_PORT; + } + } + } +} + /* * @@GaussDB@@ * Brief : Parse replconninfo1. @@ -4316,9 +4588,9 @@ static void assign_replconninfo1(const char* newval, void* extra) * At present, ReplConnArray is only used by PM, so it is safe. */ t_thrd.postmaster_cxt.ReplConnArray[1] = ParseReplConnInfo(newval, &repl_length); - if (u_sess->attr.attr_storage.ReplConnInfoArr[1] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[1], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[1] = true; + if (u_sess->attr.attr_storage.ReplConnInfoArr[1] != NULL && newval != NULL) { + t_thrd.postmaster_cxt.ReplConnChangeType[1] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[1], newval); #ifndef ENABLE_MULTIPLE_NODES /* perceive single --> primary_standby */ @@ -4326,6 +4598,7 @@ static void assign_replconninfo1(const char* newval, void* extra) t_thrd.postmaster_cxt.HaShmData->current_mode == NORMAL_MODE && !GetReplCurArrayIsNull()) { t_thrd.postmaster_cxt.HaShmData->current_mode = PRIMARY_MODE; + g_instance.global_sysdbcache.RefreshHotStandby(); } #endif } @@ -4343,9 +4616,9 @@ static void assign_replconninfo2(const char* newval, void* extra) pfree(t_thrd.postmaster_cxt.ReplConnArray[2]); t_thrd.postmaster_cxt.ReplConnArray[2] = ParseReplConnInfo(newval, &repl_length); - if (u_sess->attr.attr_storage.ReplConnInfoArr[2] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[2], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[2] = true; + if (u_sess->attr.attr_storage.ReplConnInfoArr[2] != NULL && newval != NULL) { + t_thrd.postmaster_cxt.ReplConnChangeType[2] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[2], newval); } } @@ -4357,9 +4630,9 @@ static void assign_replconninfo3(const char* newval, void* extra) pfree(t_thrd.postmaster_cxt.ReplConnArray[3]); t_thrd.postmaster_cxt.ReplConnArray[3] = ParseReplConnInfo(newval, &repl_length); - if (u_sess->attr.attr_storage.ReplConnInfoArr[3] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[3], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[3] = true; + if (u_sess->attr.attr_storage.ReplConnInfoArr[3] != NULL && newval != NULL) { + t_thrd.postmaster_cxt.ReplConnChangeType[3] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[3], newval); } } @@ -4371,9 +4644,9 @@ static void assign_replconninfo4(const char* newval, void* extra) pfree(t_thrd.postmaster_cxt.ReplConnArray[4]); t_thrd.postmaster_cxt.ReplConnArray[4] = ParseReplConnInfo(newval, &repl_length); - if (u_sess->attr.attr_storage.ReplConnInfoArr[4] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[4], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[4] = true; + if (u_sess->attr.attr_storage.ReplConnInfoArr[4] != NULL && newval != NULL) { + t_thrd.postmaster_cxt.ReplConnChangeType[4] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[4], newval); } } @@ -4385,9 +4658,9 @@ static void assign_replconninfo5(const char* newval, void* extra) pfree(t_thrd.postmaster_cxt.ReplConnArray[5]); t_thrd.postmaster_cxt.ReplConnArray[5] = ParseReplConnInfo(newval, &repl_length); - if (u_sess->attr.attr_storage.ReplConnInfoArr[5] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[5], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[5] = true; + if (u_sess->attr.attr_storage.ReplConnInfoArr[5] != NULL && newval != NULL) { + t_thrd.postmaster_cxt.ReplConnChangeType[5] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[5], newval); } } @@ -4399,9 +4672,9 @@ static void assign_replconninfo6(const char* newval, void* extra) pfree(t_thrd.postmaster_cxt.ReplConnArray[6]); t_thrd.postmaster_cxt.ReplConnArray[6] = ParseReplConnInfo(newval, &repl_length); - if (u_sess->attr.attr_storage.ReplConnInfoArr[6] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[6], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[6] = true; + if (u_sess->attr.attr_storage.ReplConnInfoArr[6] != NULL && newval != NULL) { + t_thrd.postmaster_cxt.ReplConnChangeType[6] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[6], newval); } } @@ -4413,9 +4686,9 @@ static void assign_replconninfo7(const char* newval, void* extra) pfree(t_thrd.postmaster_cxt.ReplConnArray[7]); t_thrd.postmaster_cxt.ReplConnArray[7] = ParseReplConnInfo(newval, &repl_length); - if (u_sess->attr.attr_storage.ReplConnInfoArr[7] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[7], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[7] = true; + if (u_sess->attr.attr_storage.ReplConnInfoArr[7] != NULL && newval != NULL) { + t_thrd.postmaster_cxt.ReplConnChangeType[7] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[7], newval); } } @@ -4428,9 +4701,9 @@ static void assign_replconninfo8(const char* newval, void* extra) } t_thrd.postmaster_cxt.ReplConnArray[8] = ParseReplConnInfo(newval, &repl_length); - if (u_sess->attr.attr_storage.ReplConnInfoArr[8] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[8], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[8] = true; + if (u_sess->attr.attr_storage.ReplConnInfoArr[8] != NULL && newval != NULL) { + t_thrd.postmaster_cxt.ReplConnChangeType[8] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[8], newval); } } @@ -4443,9 +4716,9 @@ static void assign_replconninfo9(const char* newval, void* extra) } t_thrd.postmaster_cxt.ReplConnArray[9] = ParseReplConnInfo(newval, &repl_length); - if (u_sess->attr.attr_storage.ReplConnInfoArr[9] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[9], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[9] = true; + if (u_sess->attr.attr_storage.ReplConnInfoArr[9] != NULL && newval != NULL) { + t_thrd.postmaster_cxt.ReplConnChangeType[9] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[9], newval); } } @@ -4459,8 +4732,9 @@ static void assign_replconninfo10(const char* newval, void* extra) t_thrd.postmaster_cxt.ReplConnArray[10] = ParseReplConnInfo(newval, &repl_length); if (u_sess->attr.attr_storage.ReplConnInfoArr[10] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[10], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[10] = true; + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[10], newval)) { + t_thrd.postmaster_cxt.ReplConnChangeType[10] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[10], newval); } } @@ -4474,8 +4748,9 @@ static void assign_replconninfo11(const char* newval, void* extra) t_thrd.postmaster_cxt.ReplConnArray[11] = ParseReplConnInfo(newval, &repl_length); if (u_sess->attr.attr_storage.ReplConnInfoArr[11] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[11], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[11] = true; + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[11], newval)) { + t_thrd.postmaster_cxt.ReplConnChangeType[11] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[11], newval); } } @@ -4489,8 +4764,9 @@ static void assign_replconninfo12(const char* newval, void* extra) t_thrd.postmaster_cxt.ReplConnArray[12] = ParseReplConnInfo(newval, &repl_length); if (u_sess->attr.attr_storage.ReplConnInfoArr[12] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[12], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[12] = true; + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[12], newval)) { + t_thrd.postmaster_cxt.ReplConnChangeType[12] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[12], newval); } } @@ -4504,8 +4780,9 @@ static void assign_replconninfo13(const char* newval, void* extra) t_thrd.postmaster_cxt.ReplConnArray[13] = ParseReplConnInfo(newval, &repl_length); if (u_sess->attr.attr_storage.ReplConnInfoArr[13] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[13], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[13] = true; + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[13], newval)) { + t_thrd.postmaster_cxt.ReplConnChangeType[13] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[13], newval); } } @@ -4519,8 +4796,9 @@ static void assign_replconninfo14(const char* newval, void* extra) t_thrd.postmaster_cxt.ReplConnArray[14] = ParseReplConnInfo(newval, &repl_length); if (u_sess->attr.attr_storage.ReplConnInfoArr[14] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[14], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[14] = true; + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[14], newval)) { + t_thrd.postmaster_cxt.ReplConnChangeType[14] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[14], newval); } } @@ -4534,8 +4812,9 @@ static void assign_replconninfo15(const char* newval, void* extra) t_thrd.postmaster_cxt.ReplConnArray[15] = ParseReplConnInfo(newval, &repl_length); if (u_sess->attr.attr_storage.ReplConnInfoArr[15] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[15], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[15] = true; + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[15], newval)) { + t_thrd.postmaster_cxt.ReplConnChangeType[15] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[15], newval); } } @@ -4549,8 +4828,9 @@ static void assign_replconninfo16(const char* newval, void* extra) t_thrd.postmaster_cxt.ReplConnArray[16] = ParseReplConnInfo(newval, &repl_length); if (u_sess->attr.attr_storage.ReplConnInfoArr[16] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[16], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[16] = true; + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[16], newval)) { + t_thrd.postmaster_cxt.ReplConnChangeType[16] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[16], newval); } } @@ -4564,8 +4844,9 @@ static void assign_replconninfo17(const char* newval, void* extra) t_thrd.postmaster_cxt.ReplConnArray[17] = ParseReplConnInfo(newval, &repl_length); if (u_sess->attr.attr_storage.ReplConnInfoArr[17] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[17], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[17] = true; + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[17], newval)) { + t_thrd.postmaster_cxt.ReplConnChangeType[17] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[17], newval); } } @@ -4579,8 +4860,9 @@ static void assign_replconninfo18(const char* newval, void* extra) t_thrd.postmaster_cxt.ReplConnArray[18] = ParseReplConnInfo(newval, &repl_length); if (u_sess->attr.attr_storage.ReplConnInfoArr[18] != NULL && newval != NULL && - strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[18], newval) != 0) { - t_thrd.postmaster_cxt.ReplConnChanged[18] = true; + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[18], newval)) { + t_thrd.postmaster_cxt.ReplConnChangeType[18] = + IsReplConnInfoChanged(u_sess->attr.attr_storage.ReplConnInfoArr[18], newval); } } @@ -5029,6 +5311,33 @@ static ReplConnInfo* ParseReplConnInfo(const char* ConnInfoList, int* InfoLength } } +#ifdef ENABLE_LITE_MODE + iter = strstr(token, "sslmode"); + if (iter != NULL) { + iter += sizeof("sslmode"); + while (*iter == ' ' || *iter == '=') { + iter++; + } + + pNext = iter; + iplen = 0; + while (*pNext != ' ' && *pNext != '\0') { + iplen++; + pNext++; + } + errorno = strncpy_s(repl[parsed].sslmode, SSL_MODE_LEN - 1, iter, iplen); + securec_check(errorno, "\0", "\0"); + repl[parsed].sslmode[iplen] = '\0'; + + if (strcmp(repl[parsed].sslmode, "disable") != 0 && strcmp(repl[parsed].sslmode, "allow") != 0 && + strcmp(repl[parsed].sslmode, "prefer") != 0 && strcmp(repl[parsed].sslmode, "require") != 0 && + strcmp(repl[parsed].sslmode, "verify-ca") != 0 && strcmp(repl[parsed].sslmode, "verify-full") != 0) { + errorno = strcpy_s(repl[parsed].sslmode, SSL_MODE_LEN, "prefer"); + securec_check(errorno, "\0", "\0"); + } + } +#endif + token = strtok_r(NULL, ",", &tmp_token); parsed++; } @@ -5150,6 +5459,20 @@ static void assign_dcf_election_timeout(int newval, void* extra) dcf_set_param("ELECTION_TIMEOUT", std::to_string(newval).c_str()); } +static void assign_dcf_auto_elc_priority_en(int newval, void* extra) +{ + u_sess->attr.attr_storage.dcf_attr.dcf_auto_elc_priority_en = newval; + if (t_thrd.proc_cxt.MyProcPid == PostmasterPid) + dcf_set_param("AUTO_ELC_PRIORITY_EN", std::to_string(newval).c_str()); +} + +static void assign_dcf_election_switch_threshold(int newval, void* extra) +{ + u_sess->attr.attr_storage.dcf_attr.dcf_election_switch_threshold = newval; + if (t_thrd.proc_cxt.MyProcPid == PostmasterPid) + dcf_set_param("ELECTION_SWITCH_THRESHOLD", std::to_string(newval).c_str()); +} + static void assign_dcf_run_mode(int newval, void* extra) { u_sess->attr.attr_storage.dcf_attr.dcf_run_mode = newval; diff --git a/src/common/backend/utils/misc/pg_controldata.cpp b/src/common/backend/utils/misc/pg_controldata.cpp index e5624c337..91f18c8d8 100644 --- a/src/common/backend/utils/misc/pg_controldata.cpp +++ b/src/common/backend/utils/misc/pg_controldata.cpp @@ -234,7 +234,7 @@ Datum pg_control_checkpoint(PG_FUNCTION_ARGS) */ XLByteToSeg(controlFile->checkPointCopy.redo, segno); - XLogFileName(xlogfilename, controlFile->checkPointCopy.ThisTimeLineID, segno); + XLogFileName(xlogfilename, MAXFNAMELEN, controlFile->checkPointCopy.ThisTimeLineID, segno); i = 0; /* Populate the values and null arrays */ diff --git a/src/common/backend/utils/misc/postgresql_distribute.conf.sample b/src/common/backend/utils/misc/postgresql_distribute.conf.sample index de1abba8f..00cbf2fe4 100644 --- a/src/common/backend/utils/misc/postgresql_distribute.conf.sample +++ b/src/common/backend/utils/misc/postgresql_distribute.conf.sample @@ -470,7 +470,7 @@ log_min_duration_statement = 1800000 # -1 is disabled, 0 logs all statements #log_pagewriter = off log_connections = off # log connection requirement from client log_disconnections = off # log disconnection from client -log_duration = on # log the execution time of each query +log_duration = off # log the execution time of each query # when log_duration is on and log_min_duration_statement # is larger than zero, log the ones whose execution time # is larger than this threshold @@ -802,6 +802,8 @@ audit_enabled = on #audit_function_exec = 0 #audit_copy_exec = 0 #audit_set_parameter = 1 # whether audit set parameter operation +#audit_xid_info = 0 # whether record xid info in audit log +#audit_thread_num = 1 #Choose which style to print the explain info, normal,pretty,summary,run #explain_perf_mode = normal diff --git a/src/common/backend/utils/misc/postgresql_single.conf.sample b/src/common/backend/utils/misc/postgresql_single.conf.sample index 76ff11c30..2033781df 100644 --- a/src/common/backend/utils/misc/postgresql_single.conf.sample +++ b/src/common/backend/utils/misc/postgresql_single.conf.sample @@ -476,7 +476,7 @@ log_min_duration_statement = 1800000 # -1 is disabled, 0 logs all statements #log_pagewriter = off log_connections = off # log connection requirement from client log_disconnections = off # log disconnection from client -log_duration = on # log the execution time of each query +log_duration = off # log the execution time of each query # when log_duration is on and log_min_duration_statement # is larger than zero, log the ones whose execution time # is larger than this threshold @@ -737,6 +737,8 @@ audit_enabled = on #audit_function_exec = 0 #audit_copy_exec = 0 #audit_set_parameter = 1 # whether audit set parameter operation +#audit_xid_info = 0 # whether record xid info in audit log +#audit_thread_num = 1 #Choose which style to print the explain info, normal,pretty,summary,run #explain_perf_mode = normal diff --git a/src/common/backend/utils/misc/superuser.cpp b/src/common/backend/utils/misc/superuser.cpp index c24429d4a..01ed52d07 100644 --- a/src/common/backend/utils/misc/superuser.cpp +++ b/src/common/backend/utils/misc/superuser.cpp @@ -283,7 +283,6 @@ bool CheckExecDirectPrivilege(const char* query) static void cacheSuperOrSysadmin(Oid roleid) { HeapTuple rtup = NULL; - u_sess->sec_cxt.last_roleid_is_super = false; u_sess->sec_cxt.last_roleid_is_sysdba = false; u_sess->sec_cxt.last_roleid_is_auditadmin = false; @@ -343,7 +342,7 @@ static void cacheSuperOrSysadmin(Oid roleid) /* If first time through, set up callback for cache flushes */ if (!u_sess->sec_cxt.roleid_callback_registered) { - CacheRegisterSyscacheCallback(AUTHOID, RoleidCallback, (Datum)0); + CacheRegisterSessionSyscacheCallback(AUTHOID, RoleidCallback, (Datum)0); u_sess->sec_cxt.roleid_callback_registered = true; } diff --git a/src/common/backend/utils/mmgr/AsanMemoryAllocator.cpp b/src/common/backend/utils/mmgr/AsanMemoryAllocator.cpp index 4a919f06d..b8f712501 100644 --- a/src/common/backend/utils/mmgr/AsanMemoryAllocator.cpp +++ b/src/common/backend/utils/mmgr/AsanMemoryAllocator.cpp @@ -824,4 +824,15 @@ void dumpAsanBlock(AsanSet set, StringInfoData* memoryBuf) return; } + +void GetAsanBlockInfo(AsanSet set, StringInfoData* memoryBuf) +{ + for (AsanBlock blk = set->blocks; blk != NULL; blk = blk->next) { + uint32 realSize = ASAN_BLOCKRELSZ(blk->requestSize); + appendStringInfo(memoryBuf, "%s:%d, %u\n", blk->file, blk->line, realSize); + } + + return; +} + #endif diff --git a/src/common/backend/utils/mmgr/aset.cpp b/src/common/backend/utils/mmgr/aset.cpp index cb954ea76..d24d354f1 100644 --- a/src/common/backend/utils/mmgr/aset.cpp +++ b/src/common/backend/utils/mmgr/aset.cpp @@ -125,7 +125,6 @@ * -------------------- */ -#define ALLOC_CHUNKHDRSZ MAXALIGN(sizeof(AllocChunkData)) #ifdef MEMORY_CONTEXT_CHECKING #define ALLOC_MAGICHDRSZ MAXALIGN(sizeof(AllocMagicData)) #else @@ -143,27 +142,6 @@ typedef struct AllocMagicData { } AllocMagicData; #endif -/* - * AllocChunk - * The prefix of each piece of memory in an AllocBlock - * - * NB: this MUST match StandardChunkHeader as defined by utils/memutils.h. - */ -typedef struct AllocChunkData { - /* aset is the owning aset if allocated, or the freelist link if free */ - void* aset; - /* size is always the size of the usable space in the chunk */ - Size size; -#ifdef MEMORY_CONTEXT_CHECKING - /* when debugging memory usage, also store actual requested size */ - /* this is zero in a free chunk */ - Size requested_size; - const char* file; /* __FILE__ of palloc/palloc0 call */ - int line; /* __LINE__ of palloc/palloc0 call */ - uint32 prenum; /* prefix magic number */ -#endif -} AllocChunkData; - /* * AllocPointerIsValid * True iff pointer is valid allocation pointer. @@ -232,7 +210,7 @@ static const unsigned char LogTable256[256] = {0, #ifdef MEMORY_CONTEXT_CHECKING #define CHECK_CONTEXT_OWNER(context) \ - Assert((context->session_id == u_sess->session_id) || (context->thread_id == gs_thread_self())) + Assert((context->thread_id == gs_thread_self() || (context->session_id == u_sess->session_id))) #else #define CHECK_CONTEXT_OWNER(context) ((void)0) #endif @@ -836,6 +814,8 @@ void GenericMemoryAllocator::AllocSetDelete(MemoryContext context) template void* GenericMemoryAllocator::AllocSetAlloc(MemoryContext context, Size align, Size size, const char* file, int line) { + Assert(file != NULL); + Assert(line != 0); AllocSet set = (AllocSet)context; AllocBlock block; AllocChunk chunk; @@ -908,10 +888,12 @@ void* GenericMemoryAllocator::AllocSetAlloc(MemoryContext context, Size align, S chunk = (AllocChunk)(((char*)block) + ALLOC_BLOCKHDRSZ); chunk->aset = set; chunk->size = chunk_size; -#ifdef MEMORY_CONTEXT_CHECKING - chunk->requested_size = size; +#ifdef MEMORY_CONTEXT_TRACK chunk->file = file; chunk->line = line; +#endif +#ifdef MEMORY_CONTEXT_CHECKING + chunk->requested_size = size; chunk->prenum = PremagicNum; /* set mark to catch clobber of "unused" space */ if (size < chunk_size) @@ -973,11 +955,12 @@ void* GenericMemoryAllocator::AllocSetAlloc(MemoryContext context, Size align, S chunk->aset = (void*)set; set->freeSpace -= (chunk->size + ALLOC_CHUNKHDRSZ); - -#ifdef MEMORY_CONTEXT_CHECKING - chunk->requested_size = size; +#ifdef MEMORY_CONTEXT_TRACK chunk->file = file; chunk->line = line; +#endif +#ifdef MEMORY_CONTEXT_CHECKING + chunk->requested_size = size; chunk->prenum = PremagicNum; /* set mark to catch clobber of "unused" space */ if (size < chunk->size) @@ -1051,11 +1034,13 @@ void* GenericMemoryAllocator::AllocSetAlloc(MemoryContext context, Size align, S availspace -= (availchunk + ALLOC_CHUNKHDRSZ); chunk->size = availchunk; +#ifdef MEMORY_CONTEXT_TRACK + chunk->file = NULL; + chunk->line = 0; +#endif #ifdef MEMORY_CONTEXT_CHECKING chunk->requested_size = 0; /* mark it free */ chunk->prenum = 0; - chunk->file = NULL; - chunk->line = 0; #endif chunk->aset = (void*)set->freelist[a_fidx]; set->freelist[a_fidx] = chunk; @@ -1146,10 +1131,12 @@ void* GenericMemoryAllocator::AllocSetAlloc(MemoryContext context, Size align, S chunk->aset = (void*)set; chunk->size = chunk_size; -#ifdef MEMORY_CONTEXT_CHECKING - chunk->requested_size = size; +#ifdef MEMORY_CONTEXT_TRACK chunk->file = file; chunk->line = line; +#endif +#ifdef MEMORY_CONTEXT_CHECKING + chunk->requested_size = size; chunk->prenum = PremagicNum; /* set mark to catch clobber of "unused" space */ if (size < chunk->size) @@ -1288,17 +1275,18 @@ void GenericMemoryAllocator::AllocSetFree(MemoryContext context, void* pointer) int fidx = AllocSetFreeIndex(chunk->size); chunk->aset = (void*)set->freelist[fidx]; - set->freeSpace += chunk->size + ALLOC_CHUNKHDRSZ; -#ifdef MEMORY_CONTEXT_CHECKING - /* Reset requested_size to 0 in chunks that are on freelist */ - AllocMagicData* magic = - (AllocMagicData*)(((char*)chunk) + ALLOC_CHUNKHDRSZ + MAXALIGN(chunk->requested_size) - ALLOC_MAGICHDRSZ); - chunk->requested_size = 0; +#ifdef MEMORY_CONTEXT_TRACK chunk->file = NULL; chunk->line = 0; +#endif +#ifdef MEMORY_CONTEXT_CHECKING + /* Reset requested_size to 0 in chunks that are on freelist */ + chunk->requested_size = 0; chunk->prenum = 0; + AllocMagicData* magic = + (AllocMagicData*)(((char*)chunk) + ALLOC_CHUNKHDRSZ + MAXALIGN(chunk->requested_size) - ALLOC_MAGICHDRSZ); magic->aset = NULL; magic->size = 0; magic->posnum = 0; @@ -1371,16 +1359,17 @@ void* GenericMemoryAllocator::AllocSetRealloc( */ if (oldsize >= (size + ALLOC_MAGICHDRSZ)) { size += ALLOC_MAGICHDRSZ; +#ifdef MEMORY_CONTEXT_TRACK + chunk->file = file; + chunk->line = line; +#endif #ifdef MEMORY_CONTEXT_CHECKING #ifdef RANDOMIZE_ALLOCATED_MEMORY /* We can only fill the extra space if we know the prior request */ if (size > chunk->requested_size) randomize_mem((char*)AllocChunkGetPointer(chunk) + chunk->requested_size, size - chunk->requested_size); #endif - chunk->requested_size = size; - chunk->file = file; - chunk->line = line; chunk->prenum = PremagicNum; /* set mark to catch clobber of "unused" space */ if (size < oldsize) @@ -1459,16 +1448,16 @@ void* GenericMemoryAllocator::AllocSetRealloc( if (block->next) block->next->prev = block; chunk->size = chksize; - +#ifdef MEMORY_CONTEXT_TRACK + chunk->file = file; + chunk->line = line; +#endif #ifdef MEMORY_CONTEXT_CHECKING #ifdef RANDOMIZE_ALLOCATED_MEMORY /* We can only fill the extra space if we know the prior request */ randomize_mem((char*)AllocChunkGetPointer(chunk) + chunk->requested_size, size - chunk->requested_size); #endif - chunk->requested_size = size; - chunk->file = file; - chunk->line = line; chunk->prenum = PremagicNum; /* set mark to catch clobber of "unused" space */ if (size < chunk->size) @@ -1793,6 +1782,49 @@ void dumpAllocBlock(AllocSet set, StringInfoData* memoryBuf) #endif /* MEMORY_CONTEXT_CHECKING */ +#ifdef MEMORY_CONTEXT_TRACK +/* + * chunk walker + */ +static void GetAllocChunkInfo(AllocSet set, AllocBlock blk, StringInfoData* memoryBuf) +{ + char* bpoz = ((char*)blk) + ALLOC_BLOCKHDRSZ; + while (bpoz < blk->freeptr) { + AllocChunk chunk = (AllocChunk)bpoz; + Size chsize = chunk->size; + + /* the chunk is free, so skip it */ + if (chunk->aset != set) { + bpoz += ALLOC_CHUNKHDRSZ + chsize; + continue; + } + + if (memoryBuf != NULL) { + appendStringInfo(memoryBuf, "%s:%d, %lu\n", chunk->file, chunk->line, chunk->size); + } + + bpoz += ALLOC_CHUNKHDRSZ + chsize; + } +} + +void GetAllocBlockInfo(AllocSet set, StringInfoData* memoryBuf) +{ + for (AllocBlock blk = set->blocks; blk != NULL; blk = blk->next) { + char* bpoz = ((char*)blk) + ALLOC_BLOCKHDRSZ; + long blk_used = blk->freeptr - bpoz; + + /* empty block - empty can be keeper-block only (from AllocSetCheck()) */ + if (!blk_used) + continue; + + /* there are chunks in block */ + GetAllocChunkInfo(set, blk, memoryBuf); + } + + return; +} + +#endif /* * alloc_trunk_size * Given a width, calculate how many bytes are actually allocated diff --git a/src/common/backend/utils/mmgr/mcxt.cpp b/src/common/backend/utils/mmgr/mcxt.cpp index 348a03df8..f4b722e12 100644 --- a/src/common/backend/utils/mmgr/mcxt.cpp +++ b/src/common/backend/utils/mmgr/mcxt.cpp @@ -64,7 +64,6 @@ THR_LOCAL MemoryContext AlignMemoryContext = NULL; static void MemoryContextStatsInternal(MemoryContext context, int level); static void FreeMemoryContextList(List* context_list); -static void* MemoryAllocFromContext(MemoryContext context, Size size, const char* file, int line); #ifdef PGXC void* allocTopCxt(size_t s); @@ -73,7 +72,6 @@ void* allocTopCxt(size_t s); /***************************************************************************** * EXPORTED ROUTINES * *****************************************************************************/ - static inline void InsertMemoryAllocInfo(const void* pointer, MemoryContext context, const char* file, int line, Size size) { @@ -115,7 +113,6 @@ static inline void RemoveMemoryContextInfo(MemoryContext context) } } } - /* * MemoryContextInit @@ -370,6 +367,13 @@ void MemoryContextDelete(MemoryContext context) if (!IsTopMemCxt(context)) { PreventActionOnSealedContext(context); + } else { +#ifdef MEMORY_CONTEXT_CHECKING + /* before delete top memcxt, you should close lsc */ + if (EnableGlobalSysCache() && context == t_thrd.top_mem_cxt && t_thrd.lsc_cxt.lsc != NULL) { + Assert(t_thrd.lsc_cxt.lsc->is_closed); + } +#endif } MemoryContext old_context = MemoryContextSwitchTo(t_thrd.top_mem_cxt); @@ -379,7 +383,7 @@ void MemoryContextDelete(MemoryContext context) (void)MemoryContextSwitchTo(old_context); /* u_sess->top_mem_cxt may be reused by other threads, set it null before the memory it points to be freed. */ - if (context == u_sess->top_mem_cxt) { + if (u_sess != NULL && context == u_sess->top_mem_cxt) { u_sess->top_mem_cxt = NULL; } @@ -430,6 +434,13 @@ void MemoryContextResetAndDeleteChildren(MemoryContext context) AssertArg(MemoryContextIsValid(context)); if (!IsTopMemCxt(context)) { PreventActionOnSealedContext(context); + } else { +#ifdef MEMORY_CONTEXT_CHECKING + /* before delete top memcxt, you should close lsc */ + if (EnableGlobalSysCache() && context == t_thrd.top_mem_cxt && t_thrd.lsc_cxt.lsc != NULL) { + Assert(t_thrd.lsc_cxt.lsc->is_closed); + } +#endif } List context_list = {T_List, 0, NULL, NULL}; @@ -665,6 +676,10 @@ static void MemoryContextStatsInternal(MemoryContext context, int level) void MemoryContextCheck(MemoryContext context, bool own_by_session) { MemoryContext child; + + if (!g_instance.attr.attr_memory.enable_memory_context_check_debug) { + return; + } if (unlikely(context == NULL)) { elog(PANIC, "Switch to Invalid memory context"); } @@ -952,16 +967,17 @@ void MemoryContextCheckMaxSize(MemoryContext context, Size size, const char* fil void MemoryContextCheckSessionMemory(MemoryContext context, Size size, const char* file, int line) { /* libcomm permanent thread don't need to check session memory */ - if (STATEMENT_MAX_MEM && (t_thrd.shemem_ptr_cxt.mySessionMemoryEntry != NULL) && + if ((t_thrd.shemem_ptr_cxt.mySessionMemoryEntry != NULL) && (t_thrd.comm_cxt.LibcommThreadType == LIBCOMM_NONE) && (context->level >= MEMORY_CONTEXT_CONTROL_LEVEL) && !t_thrd.int_cxt.CritSectionCount && !(AmPostmasterProcess()) && IsNormalProcessingMode()) { int used = (t_thrd.shemem_ptr_cxt.mySessionMemoryEntry->queryMemInChunks << (chunkSizeInBits - BITS_IN_MB)) << BITS_IN_KB; - if (STATEMENT_MAX_MEM < used) + if (u_sess->attr.attr_sql.statement_max_mem < used) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg( - "Session used memory %d Kbytes is beyond the limitation %d Kbytes.", used, STATEMENT_MAX_MEM), + "Session used memory %d Kbytes is beyond the limitation %d Kbytes.", used, + u_sess->attr.attr_sql.statement_max_mem), errdetail("Session estimated memory is %d Mbytes and MemoryContext %s request of size %lu " "bytes.[file:%s,line:%d]", t_thrd.shemem_ptr_cxt.mySessionMemoryEntry->estimate_memory, @@ -972,7 +988,7 @@ void MemoryContextCheckSessionMemory(MemoryContext context, Size size, const cha } } -static void* MemoryAllocFromContext(MemoryContext context, Size size, const char* file, int line) +void* MemoryAllocFromContext(MemoryContext context, Size size, const char* file, int line) { void* ret = NULL; if (!AllocSizeIsValid(size)) { @@ -1000,7 +1016,9 @@ static void* MemoryAllocFromContext(MemoryContext context, Size size, const char #endif /* check if the session used memory is beyond the limitation */ - MemoryContextCheckSessionMemory(context, size, file, line); + if (unlikely(STATEMENT_MAX_MEM)) { + MemoryContextCheckSessionMemory(context, size, file, line); + } InsertMemoryAllocInfo(ret, context, file, line, size); @@ -1016,11 +1034,10 @@ static void* MemoryAllocFromContext(MemoryContext context, Size size, const char void* MemoryContextAllocDebug(MemoryContext context, Size size, const char* file, int line) { AssertArg(MemoryContextIsValid(context)); - PreventActionOnSealedContext(context); + return MemoryAllocFromContext(context, size, file, line); } - /* * MemoryContextAllocZero * Like MemoryContextAlloc, but clears allocated memory @@ -1033,9 +1050,9 @@ void* MemoryContextAllocZeroDebug(MemoryContext context, Size size, const char* void* ret = NULL; AssertArg(MemoryContextIsValid(context)); - +#ifdef MEMORY_CONTEXT_CHECKING PreventActionOnSealedContext(context); - +#endif if (!AllocSizeIsValid(size)) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -1061,8 +1078,9 @@ void* MemoryContextAllocZeroDebug(MemoryContext context, Size size, const char* #endif /* check if the session used memory is beyond the limitation */ - MemoryContextCheckSessionMemory(context, size, file, line); - + if (unlikely(STATEMENT_MAX_MEM)) { + MemoryContextCheckSessionMemory(context, size, file, line); + } MemSetAligned(ret, 0, size); InsertMemoryAllocInfo(ret, context, file, line, size); @@ -1110,8 +1128,9 @@ void* MemoryContextAllocZeroAlignedDebug(MemoryContext context, Size size, const #endif /* check if the session used memory is beyond the limitation */ - MemoryContextCheckSessionMemory(context, size, file, line); - + if (unlikely(STATEMENT_MAX_MEM)) { + MemoryContextCheckSessionMemory(context, size, file, line); + } MemSetLoop(ret, 0, size); InsertMemoryAllocInfo(ret, context, file, line, size); @@ -1128,8 +1147,9 @@ void* palloc_extended(Size size, int flags) void* ret = NULL; AssertArg(MemoryContextIsValid(CurrentMemoryContext)); +#ifdef MEMORY_CONTEXT_CHECKING PreventActionOnSealedContext(CurrentMemoryContext); - +#endif if (!AllocSizeIsValid(size)) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -1150,8 +1170,9 @@ void* palloc_extended(Size size, int flags) MemSetAligned(ret, 0, size); /* check if the session used memory is beyond the limitation */ - MemoryContextCheckSessionMemory(CurrentMemoryContext, size, __FILE__, __LINE__); - + if (unlikely(STATEMENT_MAX_MEM)) { + MemoryContextCheckSessionMemory(CurrentMemoryContext, size, __FILE__, __LINE__); + } InsertMemoryAllocInfo(ret, CurrentMemoryContext, __FILE__, __LINE__, size); return ret; @@ -1194,11 +1215,12 @@ void pfree(void* pointer) #endif AssertArg(MemoryContextIsValid(context)); +#ifdef MEMORY_CONTEXT_CHECKING if (!IsTopMemCxt(context)) { /* No prevent on top memory context as they may be sealed. */ PreventActionOnSealedContext(context); } - +#endif RemoveMemoryAllocInfo(pointer, context); (*context->methods->free_p)(context, pointer); @@ -1246,8 +1268,9 @@ void* repalloc_noexcept_Debug(void* pointer, Size size, const char* file, int li return NULL; } /* check if the session used memory is beyond the limitation */ - MemoryContextCheckSessionMemory(context, size, file, line); - + if (unlikely(STATEMENT_MAX_MEM)) { + MemoryContextCheckSessionMemory(context, size, file, line); + } InsertMemoryAllocInfo(ret, context, file, line, size); return ret; @@ -1308,8 +1331,9 @@ void* repallocDebug(void* pointer, Size size, const char* file, int line) #endif /* check if the session used memory is beyond the limitation */ - MemoryContextCheckSessionMemory(context, size, file, line); - + if (unlikely(STATEMENT_MAX_MEM)) { + MemoryContextCheckSessionMemory(context, size, file, line); + } InsertMemoryAllocInfo(ret, context, file, line, size); return ret; @@ -1350,8 +1374,9 @@ void* MemoryContextMemalignAllocDebug(MemoryContext context, Size align, Size si #endif /* check if the session used memory is beyond the limitation */ - MemoryContextCheckSessionMemory(context, size, file, line); - + if (unlikely(STATEMENT_MAX_MEM)) { + MemoryContextCheckSessionMemory(context, size, file, line); + } InsertMemoryAllocInfo(ret, context, file, line, size); return ret; @@ -1395,8 +1420,9 @@ void* MemoryContextAllocHugeDebug(MemoryContext context, Size size, const char* #endif /* check if the session used memory is beyond the limitation */ - MemoryContextCheckSessionMemory(context, size, file, line); - + if (unlikely(STATEMENT_MAX_MEM)) { + MemoryContextCheckSessionMemory(context, size, file, line); + } InsertMemoryAllocInfo(ret, context, file, line, size); return ret; @@ -1460,7 +1486,9 @@ void* repallocHugeDebug(void* pointer, Size size, const char* file, int line) #endif /* check if the session used memory is beyond the limitation */ - MemoryContextCheckSessionMemory(context, size, file, line); + if (unlikely(STATEMENT_MAX_MEM)) { + MemoryContextCheckSessionMemory(context, size, file, line); + } InsertMemoryAllocInfo(ret, context, file, line, size); @@ -1621,6 +1649,13 @@ void MemoryContextDestroyAtThreadExit(MemoryContext context) MemoryContext pContext = context; if (!IsTopMemCxt(context)) { PreventActionOnSealedContext(context); + } else { +#ifdef MEMORY_CONTEXT_CHECKING + /* before delete top memcxt, you should close lsc */ + if (EnableGlobalSysCache() && context == t_thrd.top_mem_cxt && t_thrd.lsc_cxt.lsc != NULL) { + Assert(t_thrd.lsc_cxt.lsc->is_closed); + } +#endif } if (pContext != NULL) { diff --git a/src/common/backend/utils/mmgr/memprot.cpp b/src/common/backend/utils/mmgr/memprot.cpp index 0749658e2..75522c1d3 100755 --- a/src/common/backend/utils/mmgr/memprot.cpp +++ b/src/common/backend/utils/mmgr/memprot.cpp @@ -114,17 +114,6 @@ bool gs_memory_enjection(void) } #endif -bool compressed_mem_reserve(Size sz, bool protect) -{ - return MemoryProtectFunctions::gs_memprot_reserve(sz, protect); -} - -void compressed_mem_release(Size sz) -{ - MemoryProtectFunctions::gs_memprot_release(sz); - -} - /* * check if the node is on heavy memory status now? * is strict is true, we'll do some pre-judgement. @@ -918,36 +907,6 @@ int MemoryProtectFunctions::gs_posix_memalign(void** memptr, Size alignment, Siz return ENOMEM; /* insufficient memory */ } -/** - * reseve memory for mmap of compressed table - * @tparam mem_type MEM_SHRD is supported only - * @param sz reserved size(bytes) - * @param needProtect - * @return success or not - */ -template -bool MemoryProtectFunctions::gs_memprot_reserve(Size sz, bool needProtect) -{ - if (type != MEM_SHRD) { - return false; - } - return memTracker_ReserveMem(sz, needProtect); -} - -/** - * release the momery allocated by gs_memprot_reserve - * @tparam type MEM_SHRD is supported only - * @param sz free size(bytes) - */ -template -void MemoryProtectFunctions::gs_memprot_release(Size sz) -{ - if (type != MEM_SHRD) { - return; - } - memTracker_ReleaseMem(sz); -} - /* thread level initialization */ void gs_memprot_thread_init(void) { diff --git a/src/common/backend/utils/mmgr/portalmem.cpp b/src/common/backend/utils/mmgr/portalmem.cpp index cb8cef96f..fddc2e39d 100755 --- a/src/common/backend/utils/mmgr/portalmem.cpp +++ b/src/common/backend/utils/mmgr/portalmem.cpp @@ -206,7 +206,7 @@ Portal CreatePortal(const char* name, bool allowDup, bool dupSilent, bool is_fro ereport(ERROR, (errcode(ERRCODE_DUPLICATE_CURSOR), errmsg("cursor \"%s\" already exists", name))); if (dupSilent == false) ereport(WARNING, (errcode(ERRCODE_DUPLICATE_CURSOR), errmsg("closing existing cursor \"%s\"", name))); - PortalDrop(portal, false, true); + PortalDrop(portal, false); } /* make new portal structure */ @@ -222,7 +222,6 @@ Portal CreatePortal(const char* name, bool allowDup, bool dupSilent, bool is_fro /* create a resource owner for the portal */ portal->resowner = ResourceOwnerCreate(t_thrd.utils_cxt.CurTransactionResourceOwner, "Portal", THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_EXECUTOR)); - RESOWNER_LOG("create", portal->resowner); /* initialize portal fields that don't start off zero */ portal->status = PORTAL_NEW; @@ -244,8 +243,11 @@ Portal CreatePortal(const char* name, bool allowDup, bool dupSilent, bool is_fro CURSOR_ATTRIBUTE_NUMBER * sizeof(void*)); securec_check(rc, "\0", "\0"); portal->funcUseCount = 0; + portal->hasStreamForPlpgsql = false; #ifndef ENABLE_MULTIPLE_NODES portal->streamInfo.Reset(); + portal->isAutoOutParam = false; + portal->isPkgCur = false; #endif /* put portal in table (sets portal->name) */ PortalHashTableInsert(portal, name); @@ -370,11 +372,23 @@ void PortalCreateHoldStore(Portal portal) * Create the memory context that is used for storage of the tuple set. * Note this is NOT a child of the portal's heap memory. */ +#ifndef ENABLE_MULTIPLE_NODES + if (portal->isAutoOutParam) { + portal->holdContext = GetAvailableHoldContext(u_sess->plsql_cxt.auto_parent_session_pkgs->portalContext); + } else { + portal->holdContext = AllocSetContextCreate(u_sess->top_portal_cxt, + "PortalHoldContext", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + } +#else portal->holdContext = AllocSetContextCreate(u_sess->top_portal_cxt, "PortalHoldContext", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); +#endif /* * Create the tuple store, selecting cross-transaction temp files, and @@ -495,7 +509,7 @@ void MarkPortalFailed(Portal portal) * PortalDrop * Destroy the portal. */ -void PortalDrop(Portal portal, bool isTopCommit, bool isInCreate) +void PortalDrop(Portal portal, bool isTopCommit) { if (!PortalIsValid(portal)) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("portal is NULL"))); @@ -553,19 +567,10 @@ void PortalDrop(Portal portal, bool isTopCommit, bool isInCreate) */ if (portal->resowner && (!isTopCommit || portal->status == PORTAL_FAILED)) { bool isCommit = (portal->status != PORTAL_FAILED); - if (portal->resowner && isInCreate && ENABLE_SQL_BETA_FEATURE(RESOWNER_DEBUG)) { - RESOWNER_LOG("delete for create", portal->resowner); - ResourceOwnerRelease(portal->resowner, RESOURCE_RELEASE_BEFORE_LOCKS, isCommit, false); - ResourceOwnerRelease(portal->resowner, RESOURCE_RELEASE_LOCKS, isCommit, false); - ResourceOwnerRelease(portal->resowner, RESOURCE_RELEASE_AFTER_LOCKS, isCommit, false); - portal->resowner = NULL; - } else { - RESOWNER_LOG("delete", portal->resowner); - ResourceOwnerRelease(portal->resowner, RESOURCE_RELEASE_BEFORE_LOCKS, isCommit, false); - ResourceOwnerRelease(portal->resowner, RESOURCE_RELEASE_LOCKS, isCommit, false); - ResourceOwnerRelease(portal->resowner, RESOURCE_RELEASE_AFTER_LOCKS, isCommit, false); - ResourceOwnerDelete(portal->resowner); - } + ResourceOwnerRelease(portal->resowner, RESOURCE_RELEASE_BEFORE_LOCKS, isCommit, false); + ResourceOwnerRelease(portal->resowner, RESOURCE_RELEASE_LOCKS, isCommit, false); + ResourceOwnerRelease(portal->resowner, RESOURCE_RELEASE_AFTER_LOCKS, isCommit, false); + ResourceOwnerDelete(portal->resowner); } portal->resowner = NULL; @@ -574,7 +579,12 @@ void PortalDrop(Portal portal, bool isTopCommit, bool isInCreate) * conditions; since the tuplestore would have been using cross- * transaction storage, its temp files need to be explicitly deleted. */ +#ifndef ENABLE_MULTIPLE_NODES + /* autonomous transactions procedure out param portal cleaned by its parent session */ + if (portal->holdStore && !portal->isAutoOutParam) { +#else if (portal->holdStore) { +#endif MemoryContext oldcontext; oldcontext = MemoryContextSwitchTo(portal->holdContext); @@ -599,7 +609,11 @@ void PortalDrop(Portal portal, bool isTopCommit, bool isInCreate) #endif /* delete tuplestore storage, if any */ +#ifndef ENABLE_MULTIPLE_NODES + if (portal->holdContext && !portal->isAutoOutParam) +#else if (portal->holdContext) +#endif MemoryContextDelete(portal->holdContext); /* release subsidiary storage */ @@ -614,6 +628,10 @@ void PortalDrop(Portal portal, bool isTopCommit, bool isInCreate) u_sess->parser_cxt.param_info = NULL; } +#ifndef ENABLE_MULTIPLE_NODES + /* reset portal cursor attribute */ + ResetCursorAtrribute(portal); +#endif /* release portal struct (it's in u_sess->portal_mem_cxt) */ pfree(portal); } @@ -650,7 +668,7 @@ void PortalHashTableDeleteAll(void) /* * "Hold" a portal. Prepare it for access by later transactions. */ -static void HoldPortal(Portal portal) +void HoldPortal(Portal portal) { /* * Note that PersistHoldablePortal() must release all resources @@ -1211,6 +1229,7 @@ Datum pg_cursor(PG_FUNCTION_ARGS) Portal portal = hentry->portal; Datum values[6]; bool nulls[6]; + char* mask_string = NULL; /* report only "visible" entries */ if (!portal->visible) @@ -1220,7 +1239,13 @@ Datum pg_cursor(PG_FUNCTION_ARGS) securec_check(ss_rc, "\0", "\0"); values[0] = CStringGetTextDatum(portal->name); - values[1] = CStringGetTextDatum(portal->sourceText); + mask_string = maskPassword(portal->sourceText); + if (mask_string == NULL) { + values[1] = CStringGetTextDatum(portal->sourceText); + } else { + values[1] = CStringGetTextDatum(mask_string); + pfree_ext(mask_string); + } values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD); values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY); values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL); @@ -1262,9 +1287,10 @@ bool ThereAreNoReadyPortals(void) * @in mySubid: current transaction Id. * @in funOid: function oid, 0 if in exception block. * @in funUseCount:times of function had been used, 0 if in exception block. + * @in reset: wether reset cursor's option or not. * Return: void */ -void ResetPortalCursor(SubTransactionId mySubid, Oid funOid, int funUseCount) +void ResetPortalCursor(SubTransactionId mySubid, Oid funOid, int funUseCount, bool reset) { HASH_SEQ_STATUS status; PortalHashEnt *hentry = NULL; @@ -1290,7 +1316,7 @@ void ResetPortalCursor(SubTransactionId mySubid, Oid funOid, int funUseCount) continue; } - ResetCursorOption(portal, true); + ResetCursorOption(portal, reset); } } @@ -1337,7 +1363,7 @@ HoldPinnedPortals(void) if (portal->status != PORTAL_READY) ereport(ERROR, (errmsg("pinned portal(%s) is not ready to be auto-held, with status[%d]", portal->name, portal->status))); - + HoldPortal(portal); portal->autoHeld = true; } diff --git a/src/common/backend/utils/resowner/resowner.cpp b/src/common/backend/utils/resowner/resowner.cpp index d6499d7f6..6dbb7a099 100755 --- a/src/common/backend/utils/resowner/resowner.cpp +++ b/src/common/backend/utils/resowner/resowner.cpp @@ -28,6 +28,8 @@ #include "storage/predicate.h" #include "storage/proc.h" #include "storage/smgr/segment.h" +#include "utils/knl_partcache.h" +#include "utils/knl_relcache.h" #include "utils/memutils.h" #include "utils/rel.h" #include "utils/rel_gs.h" @@ -50,6 +52,35 @@ typedef struct ResourceOwnerData { Buffer* buffers; /* dynamically allocated array */ int maxbuffers; /* currently allocated array size */ + + int nlocalcatclist; + LocalCatCList** localcatclists; + int maxlocalcatclists; + + int nlocalcatctup; + LocalCatCTup** localcatctups; + int maxlocalcatctups; + + int nglobalcatctup; + GlobalCatCTup** globalcatctups; + int maxglobalcatctups; + + int nglobalcatclist; + GlobalCatCList** globalcatclists; + int maxglobalcatclist; + + int nglobalbaseentry; + GlobalBaseEntry** globalbaseentries; + int maxglobalbaseentry; + + int nglobaldbentry; + GlobalSysDBCacheEntry** globaldbentries; + int maxglobaldbentry; + + int nglobalisexclusive; + volatile uint32** globalisexclusives; + int maxglobalisexclusive; + /* We have built-in support for remembering catcache references */ int ncatrefs; /* number of owned catcache pins */ HeapTuple* catrefs; /* dynamically allocated array */ @@ -111,6 +142,11 @@ typedef struct ResourceOwnerData { pthread_mutex_t** pThdMutexs; int maxPThdMutexs; + /* We have built-in support for remembering pthread_rwlock */ + int nPthreadRWlock; + pthread_rwlock_t** pThdRWlocks; + int maxPThdRWlocks; + /* We have built-in support for remembering partition map references */ int npartmaprefs; PartitionMap** partmaprefs; @@ -128,15 +164,16 @@ typedef struct ResourceOwnerData { } ResourceOwnerData; THR_LOCAL ResourceOwner IsolatedResourceOwner = NULL; - -/* - * List of add-on callbacks for resource releasing - */ -typedef struct ResourceReleaseCallbackItem { - struct ResourceReleaseCallbackItem* next; - ResourceReleaseCallback callback; - void* arg; -} ResourceReleaseCallbackItem; +#ifdef MEMORY_CONTEXT_CHECKING +#define PrintGlobalSysCacheLeakWarning(owner, strinfo) \ +do { \ + if (EnableLocalSysCache() && LOCAL_SYSDB_RESOWNER != (owner)) { \ + ereport(WARNING, (errmsg("global syscache reference leak %s %s %d", strinfo, __FILE__, __LINE__))); \ + } \ +} while(0) +#else +#define PrintGlobalSysCacheLeakWarning(owner, strinfo) +#endif /* Internal routines */ static void ResourceOwnerReleaseInternal( @@ -167,18 +204,22 @@ ResourceOwner ResourceOwnerCreate(ResourceOwner parent, const char* name, Memory { ResourceOwner owner; - owner = (ResourceOwner)MemoryContextAllocZero(memCxt, sizeof(ResourceOwnerData)); + MemoryContext context = AllocSetContextCreate(memCxt, + "ResourceOwnerCxt", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + owner = (ResourceOwner)MemoryContextAllocZero(context, sizeof(ResourceOwnerData)); owner->name = name; - owner->memCxt = memCxt; + owner->memCxt = context; owner->valid = true; - if (parent) { owner->parent = parent; owner->nextchild = parent->firstchild; parent->firstchild = owner; } - if (parent == NULL && strcmp(name, "TopTransaction") != 0) + if (parent == NULL && strcmp(name, "TopTransaction") != 0 && strcmp(name, "InitLocalSysCache") != 0) IsolatedResourceOwner = owner; return owner; @@ -234,7 +275,6 @@ static void ResourceOwnerReleaseInternal( { ResourceOwner child; ResourceOwner save; - ResourceReleaseCallbackItem* item = NULL; /* Recurse to handle descendants */ for (child = owner->firstchild; child != NULL; child = child->nextchild) { @@ -316,16 +356,8 @@ static void ResourceOwnerReleaseInternal( * As with buffer pins, warn if any are left at commit time, and * release back-to-front for speed. */ - while (owner->nrelrefs > 0) { - if (isCommit) - PrintRelCacheLeakWarning(owner->relrefs[owner->nrelrefs - 1]); - RelationClose(owner->relrefs[owner->nrelrefs - 1]); - } - while (owner->npartrefs > 0) { - if (isCommit) - PrintPartCacheLeakWarning(owner->partrefs[owner->npartrefs - 1]); - PartitionClose(owner->partrefs[owner->npartrefs - 1]); - } + ResourceOwnerReleaseRelationRef(owner, isCommit); + ResourceOwnerReleasePartitionRef(owner, isCommit); // Ditto for pthread mutex // while (owner->nPthreadMutex > 0) { @@ -333,6 +365,12 @@ static void ResourceOwnerReleaseInternal( PrintPthreadMutexLeakWarning(owner->pThdMutexs[owner->nPthreadMutex - 1]); PthreadMutexUnlock(owner, owner->pThdMutexs[owner->nPthreadMutex - 1]); } + ResourceOwnerReleaseRWLock(owner, isCommit); + ResourceOwnerReleaseGlobalCatCList(owner, isCommit); + ResourceOwnerReleaseGlobalCatCTup(owner, isCommit); + ResourceOwnerReleaseGlobalBaseEntry(owner, isCommit); + ResourceOwnerReleaseGlobalDBEntry(owner, isCommit); + ResourceOwnerReleaseGlobalIsExclusive(owner, isCommit); } else if (phase == RESOURCE_RELEASE_LOCKS) { if (isTopLevel) { /* @@ -367,15 +405,19 @@ static void ResourceOwnerReleaseInternal( * release back-to-front for speed. */ while (owner->ncatrefs > 0) { + Assert(!EnableLocalSysCache()); if (isCommit) PrintCatCacheLeakWarning(owner->catrefs[owner->ncatrefs - 1]); ReleaseCatCache(owner->catrefs[owner->ncatrefs - 1]); } + ResourceOwnerReleaseLocalCatCList(owner, isCommit); + ResourceOwnerReleaseLocalCatCTup(owner, isCommit); /* Ditto for catcache lists */ while (owner->ncatlistrefs > 0) { + Assert(!EnableLocalSysCache()); if (isCommit) PrintCatCacheListLeakWarning(owner->catlistrefs[owner->ncatlistrefs - 1]); - ReleaseCatCacheList(owner->catlistrefs[owner->ncatlistrefs - 1]); + ReleaseSysCacheList(owner->catlistrefs[owner->ncatlistrefs - 1]); } /* Ditto for plancache references */ while (owner->nplanrefs > 0) { @@ -410,11 +452,10 @@ static void ResourceOwnerReleaseInternal( MemoryContextDelete(memContext); ResourceOwnerForgetGMemContext(t_thrd.utils_cxt.TopTransactionResourceOwner, memContext); } - } - /* Let add-on modules get a chance too */ - for (item = t_thrd.utils_cxt.ResourceRelease_callbacks; item; item = item->next) - (*item->callback)(phase, isCommit, isTopLevel, item->arg); + /* Clean up index scans too */ + ReleaseResources_hash(); + } t_thrd.utils_cxt.CurrentResourceOwner = save; } @@ -452,9 +493,17 @@ static void ResourceOwnerFreeOwner(ResourceOwner owner, bool whole) pfree(owner->fakepartrefs); if (owner->globalMemContexts) pfree(owner->globalMemContexts); + pfree_ext(owner->localcatclists); + pfree_ext(owner->localcatctups); + pfree_ext(owner->globalcatctups); + pfree_ext(owner->globalcatclists); + pfree_ext(owner->globalbaseentries); + pfree_ext(owner->globaldbentries); + pfree_ext(owner->globalisexclusives); + pfree_ext(owner->pThdRWlocks); } - if (whole) - pfree(owner); + if (whole && owner->memCxt) + MemoryContextDelete(owner->memCxt); } /* @@ -469,20 +518,7 @@ void ResourceOwnerDelete(ResourceOwner owner) Assert(owner != t_thrd.utils_cxt.CurrentResourceOwner); /* And it better not own any resources, either */ - Assert(owner->nbuffers == 0); - Assert(owner->ncatrefs == 0); - Assert(owner->ncatlistrefs == 0); - Assert(owner->nrelrefs == 0); - Assert(owner->npartrefs == 0); - Assert(owner->nfakerelrefs == 0); - Assert(owner->nfakepartrefs == 0); - Assert(owner->nplanrefs == 0); - Assert(owner->ntupdescs == 0); - Assert(owner->nsnapshots == 0); - Assert(owner->nfiles == 0); - Assert(owner->nDataCacheSlots == 0); - Assert(owner->nMetaCacheSlots == 0); - Assert(owner->nPthreadMutex == 0); + Assert(CurrentResourceOwnerIsEmpty(owner)); /* * Delete children. The recursive call will delink the child from me, so @@ -575,45 +611,6 @@ void ResourceOwnerNewParent(ResourceOwner owner, ResourceOwner newparent) } } -/* - * Register or deregister callback functions for resource cleanup - * - * These functions are intended for use by dynamically loaded modules. - * For built-in modules we generally just hardwire the appropriate calls. - * - * Note that the callback occurs post-commit or post-abort, so the callback - * functions can only do noncritical cleanup. - */ -void RegisterResourceReleaseCallback(ResourceReleaseCallback callback, void* arg) -{ - ResourceReleaseCallbackItem* item = NULL; - - item = (ResourceReleaseCallbackItem*)MemoryContextAlloc( - THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE), sizeof(ResourceReleaseCallbackItem)); - item->callback = callback; - item->arg = arg; - item->next = t_thrd.utils_cxt.ResourceRelease_callbacks; - t_thrd.utils_cxt.ResourceRelease_callbacks = item; -} - -void UnregisterResourceReleaseCallback(ResourceReleaseCallback callback, const void* arg) -{ - ResourceReleaseCallbackItem* item = NULL; - ResourceReleaseCallbackItem* prev = NULL; - - prev = NULL; - for (item = t_thrd.utils_cxt.ResourceRelease_callbacks; item; prev = item, item = item->next) { - if (item->callback == callback && item->arg == arg) { - if (prev != NULL) - prev->next = item->next; - else - t_thrd.utils_cxt.ResourceRelease_callbacks = item->next; - pfree(item); - break; - } - } -} - /* * Make sure there is room for at least one more entry in a ResourceOwner's * buffer array. @@ -1159,7 +1156,6 @@ void ResourceOwnerEnlargeFakepartRefs(ResourceOwner owner) void ResourceOwnerRememberFakepartRef(ResourceOwner owner, Partition fakepart) { - ResourceOwnerEnlargeFakepartRefs(owner); Assert(owner->nfakepartrefs < owner->maxfakepartrefs); owner->fakepartrefs[owner->nfakepartrefs] = fakepart; owner->nfakepartrefs++; @@ -1272,7 +1268,8 @@ void ResourceOwnerForgetPlanCacheRef(ResourceOwner owner, CachedPlan* plan) return; } } - ereport(ERROR, + int elevel = t_thrd.proc_cxt.proc_exit_inprogress ? WARNING : ERROR; + ereport(elevel, (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED), errmsg("plancache reference is not owned by resource owner %s", owner->name))); } @@ -1420,15 +1417,13 @@ bool ResourceOwnerForgetSnapshot(ResourceOwner owner, Snapshot snapshot, bool er } } - bool allValid = true; for (ResourceOwner child = owner->firstchild; child != NULL; child = child->nextchild) { - allValid = (allValid && child->valid); if (ResourceOwnerForgetSnapshot(child, snapshot, false)) { return true; } } - if (ereport && allValid) { + if (ereport && u_sess->plsql_cxt.spi_xact_context != NULL) { ereport(ERROR, (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED), errmsg("snapshot is not owned by resource owner %s", owner->name))); @@ -1801,3 +1796,543 @@ bool ResourceOwnerIsValid(ResourceOwner owner) return owner->valid; } +/* Make sure there is room for at least one more entry in a ResourceOwner's + * pthread rwlock array. + * This is separate from actually inserting an entry because if we run out + * of memory, it's critical to do so *before* acquiring the resource. + */ +void ResourceOwnerEnlargePthreadRWlock(ResourceOwner owner) +{ + int newmax; + if (owner->nPthreadRWlock < owner->maxPThdRWlocks) + return; /* nothing to do */ + if (owner->pThdRWlocks == NULL) { + newmax = 16; + owner->pThdRWlocks = (pthread_rwlock_t**)MemoryContextAlloc(owner->memCxt, + newmax * sizeof(pthread_rwlock_t*)); + owner->maxPThdRWlocks = newmax; + } else { + newmax = owner->maxPThdRWlocks * 2; + owner->pThdRWlocks = (pthread_rwlock_t**)repalloc(owner->pThdRWlocks, newmax * sizeof(pthread_rwlock_t*)); + owner->maxPThdRWlocks = newmax; + } +} + +/* ResourceOwnerRememberPthreadRWlock + * Remember that a pthread rwlock is owned by a ResourceOwner + */ +void ResourceOwnerRememberPthreadRWlock(ResourceOwner owner, pthread_rwlock_t* pRWlock) +{ + Assert(owner->nPthreadRWlock < owner->maxPThdRWlocks); + owner->pThdRWlocks[owner->nPthreadRWlock] = pRWlock; + owner->nPthreadRWlock++; +} +/* ResourceOwnerForgetPthreadRWlock + * Forget that a pthread mutex is owned by a ResourceOwner + */ +void ResourceOwnerForgetPthreadRWlock(ResourceOwner owner, pthread_rwlock_t* pRWlock) +{ + pthread_rwlock_t** rwlocks = owner->pThdRWlocks; + int ns1 = owner->nPthreadRWlock - 1; + int i; + for (i = ns1; i >= 0; i--) { + if (rwlocks[i] == pRWlock) { + while (i < ns1) { + rwlocks[i] = rwlocks[i + 1]; + i++; + } + owner->nPthreadRWlock = ns1; + return; + } + } + ereport(ERROR, + (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED), + errmsg("pthread rwlock is not owned by resource owner %s", owner->name))); +} + +void ResourceOwnerEnlargeLocalCatCList(ResourceOwner owner) +{ + int newmax; + if (owner->nlocalcatclist < owner->maxlocalcatclists) + return; /* nothing to do */ + if (owner->localcatclists == NULL) { + newmax = 16; + owner->localcatclists = (LocalCatCList**)MemoryContextAlloc(owner->memCxt, + newmax * sizeof(LocalCatCList*)); + owner->maxlocalcatclists = newmax; + } else { + newmax = owner->maxlocalcatclists * 2; + owner->localcatclists = (LocalCatCList**)repalloc(owner->localcatclists, newmax * sizeof(LocalCatCList*)); + owner->maxlocalcatclists = newmax; + } +} + +void ResourceOwnerRememberLocalCatCList(ResourceOwner owner, LocalCatCList* list) +{ + Assert(owner->nlocalcatclist < owner->maxlocalcatclists); + owner->localcatclists[owner->nlocalcatclist] = list; + owner->nlocalcatclist++; +} +void ResourceOwnerForgetLocalCatCList(ResourceOwner owner, LocalCatCList* list) +{ + LocalCatCList** localcatclist = owner->localcatclists; + int nc1 = owner->nlocalcatclist - 1; + int i; + for (i = nc1; i >= 0; i--) { + if (localcatclist[i] == list) { + while (i < nc1) { + localcatclist[i] = localcatclist[i + 1]; + i++; + } + owner->nlocalcatclist = nc1; + return; + } + } + ereport(ERROR, + (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED), + errmsg("localcatcache list is not owned by resource owner %s", owner->name))); +} +void ResourceOwnerEnlargeLocalCatCTup(ResourceOwner owner) +{ + int newmax; + if (owner->nlocalcatctup < owner->maxlocalcatctups) + return; /* nothing to do */ + if (owner->localcatctups == NULL) { + newmax = 16; + owner->localcatctups = (LocalCatCTup**)MemoryContextAlloc(owner->memCxt, + newmax * sizeof(LocalCatCTup*)); + owner->maxlocalcatctups = newmax; + } else { + newmax = owner->maxlocalcatctups * 2; + owner->localcatctups = (LocalCatCTup**)repalloc(owner->localcatctups, newmax * sizeof(LocalCatCTup*)); + owner->maxlocalcatctups = newmax; + } +} +void ResourceOwnerRememberLocalCatCTup(ResourceOwner owner, LocalCatCTup* tup) +{ + Assert(owner->nlocalcatctup < owner->maxlocalcatctups); + owner->localcatctups[owner->nlocalcatctup] = tup; + owner->nlocalcatctup++; +} +LocalCatCTup* ResourceOwnerForgetLocalCatCTup(ResourceOwner owner, HeapTuple tup) +{ + LocalCatCTup** localcatctup = owner->localcatctups; + LocalCatCTup* find = NULL; + int nc1 = owner->nlocalcatctup - 1; + int i; + for (i = nc1; i >= 0; i--) { + if (&localcatctup[i]->global_ct->tuple == tup) { + find = localcatctup[i]; + while (i < nc1) { + localcatctup[i] = localcatctup[i + 1]; + i++; + } + owner->nlocalcatctup = nc1; + return find; + } + } + ereport(ERROR, + (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED), + errmsg("localcatcache tuple is not owned by resource owner %s", owner->name))); + return NULL; /* keep compiler quiet */ +} + +void ResourceOwnerEnlargeGlobalCatCTup(ResourceOwner owner) +{ + int newmax; + if (owner->nglobalcatctup < owner->maxglobalcatctups) + return; /* nothing to do */ + if (owner->globalcatctups == NULL) { + newmax = 16; + owner->globalcatctups = (GlobalCatCTup**)MemoryContextAlloc(owner->memCxt, + newmax * sizeof(GlobalCatCTup*)); + owner->maxglobalcatctups = newmax; + } else { + newmax = owner->maxglobalcatctups * 2; + owner->globalcatctups = (GlobalCatCTup**)repalloc(owner->globalcatctups, newmax * sizeof(GlobalCatCTup*)); + owner->maxglobalcatctups = newmax; + } +} +void ResourceOwnerRememberGlobalCatCTup(ResourceOwner owner, GlobalCatCTup* tup) +{ + Assert(owner->nglobalcatctup < owner->maxglobalcatctups); + owner->globalcatctups[owner->nglobalcatctup] = tup; + owner->nglobalcatctup++; +} +void ResourceOwnerForgetGlobalCatCTup(ResourceOwner owner, GlobalCatCTup* tup) +{ + GlobalCatCTup** global_cts = owner->globalcatctups; + int nc1 = owner->nglobalcatctup - 1; + int i; + for (i = nc1; i >= 0; i--) { + if (global_cts[i] == tup) { + while (i < nc1) { + global_cts[i] = global_cts[i + 1]; + i++; + } + owner->nglobalcatctup = nc1; + return; + } + } + ereport(ERROR, + (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED), + errmsg("bad global tuple is not owned by resource owner %s", owner->name))); +} +void ResourceOwnerEnlargeGlobalCatCList(ResourceOwner owner) +{ + int newmax; + if (owner->nglobalcatclist < owner->maxglobalcatclist) + return; /* nothing to do */ + if (owner->globalcatclists == NULL) { + newmax = 16; + owner->globalcatclists = (GlobalCatCList**)MemoryContextAlloc(owner->memCxt, + newmax * sizeof(GlobalCatCList*)); + owner->maxglobalcatclist = newmax; + } else { + newmax = owner->maxglobalcatclist * 2; + owner->globalcatclists = (GlobalCatCList**)repalloc(owner->globalcatclists, newmax * sizeof(GlobalCatCList*)); + owner->maxglobalcatclist = newmax; + } +} +void ResourceOwnerRememberGlobalCatCList(ResourceOwner owner, GlobalCatCList* list) +{ + Assert(owner->nglobalcatclist < owner->maxglobalcatclist); + owner->globalcatclists[owner->nglobalcatclist] = list; + owner->nglobalcatclist++; +} +void ResourceOwnerForgetGlobalCatCList(ResourceOwner owner, GlobalCatCList* list) +{ + GlobalCatCList** global_lists = owner->globalcatclists; + int nc1 = owner->nglobalcatclist - 1; + int i; + for (i = nc1; i >= 0; i--) { + if (global_lists[i] == list) { + while (i < nc1) { + global_lists[i] = global_lists[i + 1]; + i++; + } + owner->nglobalcatclist = nc1; + return; + } + } + ereport(ERROR, + (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED), + errmsg("bad global list is not owned by resource owner %s", owner->name))); +} +void ResourceOwnerEnlargeGlobalBaseEntry(ResourceOwner owner) +{ + int newmax; + if (owner->nglobalbaseentry < owner->maxglobalbaseentry) + return; /* nothing to do */ + if (owner->globalbaseentries == NULL) { + newmax = 16; + owner->globalbaseentries = (GlobalBaseEntry **)MemoryContextAlloc(owner->memCxt, + newmax * sizeof(GlobalBaseEntry *)); + owner->maxglobalbaseentry = newmax; + } else { + newmax = owner->maxglobalbaseentry * 2; + owner->globalbaseentries = + (GlobalBaseEntry **)repalloc(owner->globalbaseentries, newmax * sizeof(GlobalBaseEntry *)); + owner->maxglobalbaseentry = newmax; + } +} +void ResourceOwnerRememberGlobalBaseEntry(ResourceOwner owner, GlobalBaseEntry* entry) +{ + Assert(owner->nglobalbaseentry < owner->maxglobalbaseentry); + owner->globalbaseentries[owner->nglobalbaseentry] = entry; + owner->nglobalbaseentry++; +} +void ResourceOwnerForgetGlobalBaseEntry(ResourceOwner owner, GlobalBaseEntry* entry) +{ + GlobalBaseEntry** global_entries = owner->globalbaseentries; + int nc1 = owner->nglobalbaseentry - 1; + int i; + for (i = nc1; i >= 0; i--) { + if (global_entries[i] == entry) { + while (i < nc1) { + global_entries[i] = global_entries[i + 1]; + i++; + } + owner->nglobalbaseentry = nc1; + return; + } + } + ereport(ERROR, + (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED), + errmsg("the global base entry is not owned by resource owner %s", owner->name))); +} + +void ResourceOwnerReleaseRWLock(ResourceOwner owner, bool isCommit) +{ + while (owner->nPthreadRWlock > 0) { + if (isCommit) { + PrintGlobalSysCacheLeakWarning(owner, "RWLock"); + } + /* unlock do -- */ + PthreadRWlockUnlock(owner, owner->pThdRWlocks[owner->nPthreadRWlock - 1]); + } +} + +void ResourceOwnerReleaseLocalCatCTup(ResourceOwner owner, bool isCommit) +{ + while (owner->nlocalcatctup > 0) { + LocalCatCTup *ct = owner->localcatctups[owner->nlocalcatctup - 1]; + if (isCommit) { + PrintGlobalSysCacheLeakWarning(owner, "LocalCatCTup"); + } + ct->Release(); + owner->nlocalcatctup--; + } +} + +void ResourceOwnerReleaseLocalCatCList(ResourceOwner owner, bool isCommit) +{ + while (owner->nlocalcatclist > 0) { + LocalCatCList *cl = owner->localcatclists[owner->nlocalcatclist - 1]; + if (isCommit) { + PrintGlobalSysCacheLeakWarning(owner, "LocalCatCList"); + } + cl->Release(); + owner->nlocalcatclist--; + } +} + +void ResourceOwnerReleaseRelationRef(ResourceOwner owner, bool isCommit) +{ + while (owner->nrelrefs > 0) { + Relation rel = owner->relrefs[owner->nrelrefs - 1]; + if (isCommit) { + PrintGlobalSysCacheLeakWarning(owner, "Relation"); + if (!EnableLocalSysCache()) { + PrintRelCacheLeakWarning(rel); + } + } + /* close do -- */ + RelationClose(rel); + } +} + +void ResourceOwnerReleasePartitionRef(ResourceOwner owner, bool isCommit) +{ + while (owner->npartrefs > 0) { + Partition part = owner->partrefs[owner->npartrefs - 1]; + if (isCommit) { + PrintGlobalSysCacheLeakWarning(owner, "Partition"); + PrintPartCacheLeakWarning(part); + } + /* close do -- */ + PartitionClose(part); + } +} + +void ResourceOwnerReleaseGlobalCatCTup(ResourceOwner owner, bool isCommit) +{ + while (owner->nglobalcatctup > 0) { + GlobalCatCTup* global_ct = owner->globalcatctups[owner->nglobalcatctup - 1]; + if (isCommit) { + PrintGlobalSysCacheLeakWarning(owner, "GlobalCatCTup"); + } + global_ct->Release(); + owner->nglobalcatctup--; + } +} + +void ResourceOwnerReleaseGlobalCatCList(ResourceOwner owner, bool isCommit) +{ + while (owner->nglobalcatclist > 0) { + GlobalCatCList* global_cl = owner->globalcatclists[owner->nglobalcatclist - 1]; + if (isCommit) { + PrintGlobalSysCacheLeakWarning(owner, "GlobalCatCList"); + } + global_cl->Release(); + owner->nglobalcatclist--; +} +} + +void ResourceOwnerReleaseGlobalBaseEntry(ResourceOwner owner, bool isCommit) +{ + while (owner->nglobalbaseentry > 0) { + GlobalBaseEntry *entry = owner->globalbaseentries[owner->nglobalbaseentry - 1]; + if (isCommit) { + PrintGlobalSysCacheLeakWarning(owner, "GlobalBaseEntry"); + } + if (unlikely(entry->refcount == 0)) { + /* palloc fail */ + entry->FreeError(); + } else { + entry->Release(); + } + owner->nglobalbaseentry--; + } +} + +void ResourceOwnerEnlargeGlobalDBEntry(ResourceOwner owner) +{ + int newmax; + if (owner->nglobaldbentry < owner->maxglobaldbentry) + return; /* nothing to do */ + if (owner->globaldbentries == NULL) { + newmax = 16; + owner->globaldbentries = (GlobalSysDBCacheEntry **)MemoryContextAlloc(owner->memCxt, + newmax * sizeof(GlobalSysDBCacheEntry *)); + owner->maxglobaldbentry = newmax; + } else { + newmax = owner->maxglobaldbentry * 2; + owner->globaldbentries = + (GlobalSysDBCacheEntry **)repalloc(owner->globaldbentries, newmax * sizeof(GlobalSysDBCacheEntry *)); + owner->maxglobaldbentry = newmax; + } +} + +extern void ResourceOwnerRememberGlobalDBEntry(ResourceOwner owner, GlobalSysDBCacheEntry* entry) +{ + Assert(owner->nglobaldbentry < owner->maxglobaldbentry); + owner->globaldbentries[owner->nglobaldbentry] = entry; + owner->nglobaldbentry++; +} + +extern void ResourceOwnerForgetGlobalDBEntry(ResourceOwner owner, GlobalSysDBCacheEntry* entry) +{ + Assert(entry->m_dbOid!= InvalidOid); + GlobalSysDBCacheEntry** global_entries = owner->globaldbentries; + int nc1 = owner->nglobaldbentry - 1; + int i; + for (i = nc1; i >= 0; i--) { + if (global_entries[i] == entry) { + while (i < nc1) { + global_entries[i] = global_entries[i + 1]; + i++; + } + owner->nglobaldbentry = nc1; + return; + } + } + ereport(ERROR, + (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED), + errmsg("the global rel entry is not owned by resource owner %s", owner->name))); +} + +extern void ResourceOwnerReleaseGlobalDBEntry(ResourceOwner owner, bool isCommit) +{ + Assert(owner->nglobaldbentry <= 1); + while (owner->nglobaldbentry > 0) { + GlobalSysDBCacheEntry *entry = owner->globaldbentries[owner->nglobaldbentry - 1]; + if (isCommit) { + /* print some debug info */ + PrintGlobalSysCacheLeakWarning(owner, "GlobalDBEntry"); + } + if (unlikely(entry->m_refcount == 0)) { + // palloc failed entry + entry->Free(entry); + } else { + entry->Release(); + } + owner->nglobaldbentry--; + } +} + +void ResourceOwnerEnlargeGlobalIsExclusive(ResourceOwner owner) +{ + int newmax; + if (owner->nglobalisexclusive < owner->maxglobalisexclusive) + return; /* nothing to do */ + if (owner->globalisexclusives == NULL) { + newmax = 16; + owner->globalisexclusives = (volatile uint32 **)MemoryContextAlloc(owner->memCxt, newmax * sizeof(uint32 *)); + owner->maxglobalisexclusive = newmax; + } else { + newmax = owner->maxglobalisexclusive * 2; + owner->globalisexclusives = + (volatile uint32 **)repalloc(owner->globalisexclusives, newmax * sizeof(uint32 *)); + owner->maxglobalisexclusive = newmax; + } +} + +extern void ResourceOwnerRememberGlobalIsExclusive(ResourceOwner owner, volatile uint32 *isexclusive) +{ + Assert(owner->nglobalisexclusive < owner->maxglobalisexclusive); + Assert(*isexclusive == 1); + owner->globalisexclusives[owner->nglobalisexclusive] = isexclusive; + owner->nglobalisexclusive++; +} + +extern void ResourceOwnerForgetGlobalIsExclusive(ResourceOwner owner, volatile uint32 *isexclusive) +{ + volatile uint32 **global_isexclusives = owner->globalisexclusives; + int nc1 = owner->nglobalisexclusive - 1; + int i; + for (i = nc1; i >= 0; i--) { + if (global_isexclusives[i] == isexclusive) { + while (i < nc1) { + global_isexclusives[i] = global_isexclusives[i + 1]; + i++; + } + owner->nglobalisexclusive = nc1; + return; + } + } + ereport(ERROR, + (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED), + errmsg("the global isexclusive is not owned by resource owner %s", owner->name))); +} + +extern void ResourceOwnerReleaseGlobalIsExclusive(ResourceOwner owner, bool isCommit) +{ + Assert(owner->nglobalisexclusive <= 1); + while (owner->nglobalisexclusive > 0) { + volatile uint32 *isexclusive = owner->globalisexclusives[owner->nglobalisexclusive - 1]; + if (isCommit) { + /* print some debug info */ + PrintGlobalSysCacheLeakWarning(owner, "Global IsExclusive"); + } + Assert(*isexclusive == 1); + atomic_compare_exchange_u32(isexclusive, 1, 0); + owner->nglobalisexclusive--; + } +} + +bool CurrentResourceOwnerIsEmpty(ResourceOwner owner) +{ + if (owner == NULL || !owner->valid) { + return true; + } + Assert(owner->nbuffers == 0); + Assert(owner->nlocalcatclist == 0); + Assert(owner->nlocalcatctup == 0); + Assert(owner->nglobalcatctup == 0); + Assert(owner->nglobalcatclist == 0); + Assert(owner->nglobalbaseentry == 0); + Assert(owner->nglobaldbentry == 0); + Assert(owner->nglobalisexclusive == 0); + Assert(owner->ncatrefs == 0); + Assert(owner->ncatlistrefs == 0); + Assert(owner->nrelrefs == 0); + Assert(owner->npartrefs == 0); + Assert(owner->nfakerelrefs == 0); + Assert(owner->nfakepartrefs == 0); + Assert(owner->nplanrefs == 0); + Assert(owner->ntupdescs == 0); + Assert(owner->nsnapshots == 0); + Assert(owner->nfiles == 0); + Assert(owner->nDataCacheSlots == 0); + Assert(owner->nMetaCacheSlots == 0); + Assert(owner->nPthreadMutex == 0); + Assert(owner->nPthreadRWlock == 0); + Assert(owner->npartmaprefs == 0); + Assert(owner->nglobalMemContext == 0); + return true; +} +/* + * ResourceOwnerReleaseAllPlanCacheRefs + * Release the plancache references (only) held by this owner. + * + * We might eventually add similar functions for other resource types, + * but for now, only this is needed. + */ +void ResourceOwnerReleaseAllPlanCacheRefs(ResourceOwner owner) +{ + ResourceOwner save = t_thrd.utils_cxt.CurrentResourceOwner; + t_thrd.utils_cxt.CurrentResourceOwner = owner; + ResourceOwnerDecrementNPlanRefs(owner, true); + t_thrd.utils_cxt.CurrentResourceOwner = save; +} diff --git a/src/common/backend/utils/sort/tuplesort.cpp b/src/common/backend/utils/sort/tuplesort.cpp index 860cc1e9b..cbe46b212 100644 --- a/src/common/backend/utils/sort/tuplesort.cpp +++ b/src/common/backend/utils/sort/tuplesort.cpp @@ -110,7 +110,6 @@ #include #include "access/nbtree.h" -#include "access/hash.h" #include "access/tableam.h" #include "access/ustore/knl_utuple.h" #include "access/tableam.h" @@ -416,7 +415,6 @@ struct Tuplesortstate { * These variables are specific to the IndexTuple case; they are set by * tuplesort_begin_index_xxx and used only by the IndexTuple routines. */ - Relation heapRel; /* table the index is being built on */ Relation indexRel; /* index being built */ /* These are specific to the index_btree subcase: */ @@ -424,9 +422,7 @@ struct Tuplesortstate { bool enforceUnique; /* complain if we find duplicate tuples */ /* These are specific to the index_hash subcase: */ - uint32 high_mask; /* masks for sortable part of hash code */ - uint32 low_mask; - uint32 max_buckets; + uint32 hash_mask; /* mask for sortable part of hash code */ /* * These variables are specific to the Datum case; they are set by @@ -974,8 +970,7 @@ Tuplesortstate* tuplesort_begin_index_btree( } Tuplesortstate* tuplesort_begin_index_hash( - Relation heapRel, Relation indexRel, uint32 high_mask, uint32 low_mask, - uint32 max_buckets, int workMem, bool randomAccess, int maxMem) + Relation indexRel, uint32 hash_mask, int workMem, bool randomAccess, int maxMem) { Tuplesortstate* state = tuplesort_begin_common(workMem, randomAccess); MemoryContext oldcontext; @@ -985,12 +980,11 @@ Tuplesortstate* tuplesort_begin_index_hash( #ifdef TRACE_SORT if (u_sess->attr.attr_common.trace_sort) { elog(LOG, - "begin index sort: high_mask = 0x%x, low_mask = 0x%x, " - "max_buckets = 0x%x, workMem = %d, randomAccess = %c", - high_mask, - low_mask, - max_buckets, - workMem, randomAccess ? 't' : 'f'); + "begin index sort: hash_mask = 0x%x, workMem = %d, randomAccess = %c, maxMem = %d", + hash_mask, + workMem, + randomAccess ? 't' : 'f', + maxMem); } #endif @@ -1005,12 +999,9 @@ Tuplesortstate* tuplesort_begin_index_hash( #endif state->reversedirection = reversedirection_index_hash; - state->heapRel = heapRel; state->indexRel = indexRel; - state->high_mask = high_mask; - state->low_mask = low_mask; - state->max_buckets = max_buckets; + state->hash_mask = hash_mask; state->maxMem = maxMem * 1024L; (void)MemoryContextSwitchTo(oldcontext); @@ -1445,21 +1436,12 @@ void TuplesortPutheaptuple(Tuplesortstate* state, HeapTuple tup) * it from caller-supplied values. */ void tuplesort_putindextuplevalues( - Tuplesortstate* state, Relation rel, ItemPointer self, Datum* values, const bool* isnull, IndexTransInfo* transInfo) + Tuplesortstate* state, Relation rel, ItemPointer self, Datum* values, const bool* isnull) { MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); SortTuple stup; stup.tupindex = 0; stup.tuple = index_form_tuple(RelationGetDescr(rel), values, isnull); - if (transInfo != NULL) { - /* create a larger IndexTuple with corresponding xmin/xmax */ - IndexTuple itup = CopyIndexTupleAndReserveSpace((IndexTuple)stup.tuple, sizeof(TransactionId) * 2); - IndexTransInfo *tupleInfo = (IndexTransInfo*)(((char*)itup) + IndexTupleSize((IndexTuple)stup.tuple)); - *tupleInfo = *transInfo; - /* replace the original IndexTuple */ - pfree(stup.tuple); - stup.tuple = itup; - } ((IndexTuple)stup.tuple)->t_tid = *self; USEMEM(state, GetMemoryChunkSpace(stup.tuple)); @@ -3828,8 +3810,8 @@ static int comparetup_index_btree(const SortTuple* a, const SortTuple* b, Tuples static int comparetup_index_hash(const SortTuple* a, const SortTuple* b, Tuplesortstate* state) { - Bucket bucket1; - Bucket bucket2; + uint32 hash1; + uint32 hash2; IndexTuple tuple1; IndexTuple tuple2; @@ -3838,17 +3820,13 @@ static int comparetup_index_hash(const SortTuple* a, const SortTuple* b, Tupleso * that the first column of the index tuple is the hash key. */ Assert(!a->isnull1); - bucket1 = _hash_hashkey2bucket(DatumGetUInt32(a->datum1), - state->max_buckets, state->high_mask, - state->low_mask); + hash1 = DatumGetUInt32(a->datum1) & state->hash_mask; Assert(!b->isnull1); - bucket2 = _hash_hashkey2bucket(DatumGetUInt32(b->datum1), - state->max_buckets, state->high_mask, - state->low_mask); + hash2 = DatumGetUInt32(b->datum1) & state->hash_mask; - if (bucket1 > bucket2) { + if (hash1 > hash2) { return 1; - } else if (bucket1 < bucket2) { + } else if (hash1 < hash2) { return -1; } diff --git a/src/common/backend/utils/time/combocid.cpp b/src/common/backend/utils/time/combocid.cpp index 998d28f51..0a9535a31 100644 --- a/src/common/backend/utils/time/combocid.cpp +++ b/src/common/backend/utils/time/combocid.cpp @@ -89,7 +89,6 @@ CommandId HeapTupleHeaderGetCmin(HeapTupleHeader tup, Page page) { CommandId cid = HeapTupleHeaderGetRawCommandId(tup); - Assert(!(tup->t_infomask & HEAP_MOVED)); Assert(TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(page, tup))); if (tup->t_infomask & HEAP_COMBOCID) @@ -115,7 +114,6 @@ CommandId HeapTupleGetCmax(HeapTuple tup) HeapTupleHeader htup = tup->t_data; CommandId cid = HeapTupleHeaderGetRawCommandId(htup); - Assert(!(htup->t_infomask & HEAP_MOVED)); Assert(TransactionIdIsCurrentTransactionId(HeapTupleGetUpdateXid(tup))); if (htup->t_infomask & HEAP_COMBOCID) @@ -128,7 +126,6 @@ CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup, Page page) { CommandId cid = HeapTupleHeaderGetRawCommandId(tup); - Assert(!(tup->t_infomask & HEAP_MOVED)); Assert(TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetUpdateXid(page, tup))); if (tup->t_infomask & HEAP_COMBOCID) @@ -142,7 +139,6 @@ bool CheckStreamCombocid(HeapTupleHeader tup, CommandId current_cid, Page page) { CommandId cid = HeapTupleHeaderGetRawCommandId(tup); - Assert(!(tup->t_infomask & HEAP_MOVED)); Assert(TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(page, tup))); /* diff --git a/src/common/backend/utils/time/snapmgr.cpp b/src/common/backend/utils/time/snapmgr.cpp index 77ae806a5..754bc54f3 100644 --- a/src/common/backend/utils/time/snapmgr.cpp +++ b/src/common/backend/utils/time/snapmgr.cpp @@ -64,7 +64,6 @@ #endif SnapshotData CatalogSnapshotData = {SNAPSHOT_MVCC}; -extern THR_LOCAL bool need_reset_xmin; /* * Elements of the active snapshot stack. * @@ -201,18 +200,6 @@ bool XidVisibleInSnapshot(TransactionId xid, Snapshot snapshot, TransactionIdSta snapshot->xmax))); #endif - /* - * Any xid >= xmax is in-progress (or aborted, but we don't distinguish - * that here). - * - * We can't do anything useful with xmin, because the xmin only tells us - * whether we see it as completed. We have to check the transaction log to - * see if the transaction committed or aborted, in any case. - */ - if (GTM_MODE && TransactionIdFollowsOrEquals(xid, snapshot->xmax)) { - return false; - } - loop: csn = TransactionIdGetCommitSeqNo(xid, false, true, false, snapshot); @@ -283,6 +270,51 @@ loop: } } +bool UHeapXidVisibleInSnapshot(TransactionId xid, Snapshot snapshot, + TransactionIdStatus *hintstatus, Buffer buffer, bool *sync) +{ + if (!GTM_LITE_MODE || snapshot->gtm_snapshot_type == GTM_SNAPSHOT_TYPE_LOCAL) { + /* + * Make a quick range check to eliminate most XIDs without looking at the + * CSN log. + */ + if (TransactionIdPrecedes(xid, snapshot->xmin)) { + return true; + } + + /* + * Any xid >= xmax is in-progress (or aborted, but we don't distinguish + * that here. + */ + if (GTM_MODE && TransactionIdFollowsOrEquals(xid, snapshot->xmax)) { + return false; + } + } + + return XidVisibleInSnapshot(xid, snapshot, hintstatus, buffer, sync); +} + +bool XidVisibleInDecodeSnapshot(TransactionId xid, Snapshot snapshot, TransactionIdStatus* hintstatus, Buffer buffer) +{ + volatile CommitSeqNo csn; + *hintstatus = XID_INPROGRESS; + + csn = TransactionIdGetCommitSeqNo(xid, false, true, false, snapshot); + if (COMMITSEQNO_IS_COMMITTED(csn)) { + *hintstatus = XID_COMMITTED; + if (csn < snapshot->snapshotcsn) { + return true; + } else { + return false; + } + } else { + if (csn == COMMITSEQNO_ABORTED) { + *hintstatus = XID_ABORTED; + } + } + return false; +} + /* * CommittedXidVisibleInSnapshot * Is the given XID visible according to the snapshot? @@ -304,14 +336,6 @@ bool CommittedXidVisibleInSnapshot(TransactionId xid, Snapshot snapshot, Buffer */ if (TransactionIdPrecedes(xid, snapshot->xmin)) return true; - - /* - * Any xid >= xmax is in-progress (or aborted, but we don't distinguish - * that here. - */ - if (GTM_MODE && TransactionIdFollowsOrEquals(xid, snapshot->xmax)) { - return false; - } } loop: @@ -377,6 +401,32 @@ loop: return false; } +bool CommittedXidVisibleInDecodeSnapshot(TransactionId xid, Snapshot snapshot, Buffer buffer) +{ + CommitSeqNo csn; + + csn = TransactionIdGetCommitSeqNo(xid, true, true, false, snapshot); + if (COMMITSEQNO_IS_COMMITTING(csn)) { + return false; + } else if (!COMMITSEQNO_IS_COMMITTED(csn)) { + ereport(WARNING, + (errmsg("transaction/csn %lu/%lu was hinted as " + "committed, but was not marked as committed in " + "the transaction log", + xid, csn))); + /* + * We have contradicting evidence on whether the transaction committed or + * not. Let's assume that it did. That seems better than erroring out. + */ + return true; + } + + if (csn < snapshot->snapshotcsn) { + return true; + } else { + return false; + } +} /* * GetTransactionSnapshot @@ -966,7 +1016,7 @@ static void SnapshotResetXmin(void) if (u_sess->utils_cxt.RegisteredSnapshots == 0 && u_sess->utils_cxt.ActiveSnapshot == NULL) { t_thrd.pgxact->xmin = InvalidTransactionId; t_thrd.pgxact->csn_min = InvalidCommitSeqNo; - need_reset_xmin = true; + t_thrd.pgxact->csn_dr = InvalidCommitSeqNo; } } diff --git a/src/common/interfaces/ecpg/preproc/parse.pl b/src/common/interfaces/ecpg/preproc/parse.pl index eadaa6494..14877b3e6 100644 --- a/src/common/interfaces/ecpg/preproc/parse.pl +++ b/src/common/interfaces/ecpg/preproc/parse.pl @@ -48,10 +48,13 @@ my %replace_string = ( 'INCLUDING_ALL' => 'including all', 'RENAME_PARTITION' => 'rename partition', 'PARTITION_FOR' => 'partition for', + 'SUBPARTITION_FOR' => 'subpartition for', 'ADD_PARTITION' => 'add partition', 'DROP_PARTITION' => 'drop partition', 'REBUILD_PARTITION' => 'rebuild partition', 'MODIFY_PARTITION' => 'modify partition', + 'ADD_SUBPARTITION' => 'add subpartition', + 'DROP_SUBPARTITION' => 'drop subpartition', 'TYPECAST' => '::', 'DOT_DOT' => '..', 'COLON_EQUALS' => ':=',); diff --git a/src/common/interfaces/libpq/CMakeLists.txt b/src/common/interfaces/libpq/CMakeLists.txt index d30140599..1528af3b3 100755 --- a/src/common/interfaces/libpq/CMakeLists.txt +++ b/src/common/interfaces/libpq/CMakeLists.txt @@ -1,9 +1,10 @@ #This is the main CMAKE for build all components. +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") set(CMAKE_MODULE_PATH ${${CMAKE_CURRENT_SOURCE_DIR}}/jdbc ) add_subdirectory(jdbc) - +endif() #execute_process( # COMMAND flex -CF -b -p -o scan.cpp scan.l # WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/frontend_parser @@ -97,8 +98,10 @@ set(TGT_pq_INC ${LIBEDIT_INCLUDE_PATH} ${ZLIB_INCLUDE_PATH} ${PROJECT_SRC_DIR}/include/libpq +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") ${JAVA_HOME}/include ${JAVA_HOME}/include/linux +endif() ) set(pq_DEF_OPTIONS ${MACRO_OPTIONS} -DFRONTEND -DFRONTEND_PARSER -DUNSAFE_STAT_OK -DSO_MAJOR_VERSION=5) @@ -108,7 +111,11 @@ add_static_libtarget(pq TGT_pq_SRC TGT_pq_INC "${pq_DEF_OPTIONS}" "${pq_COMPILE_ # so pq set(pq_LINK_OPTIONS ${LIB_LINK_OPTIONS}) add_shared_libtarget(pq TGT_pq_SRC TGT_pq_INC "${pq_DEF_OPTIONS}" "${pq_COMPILE_OPTIONS}" "${pq_LINK_OPTIONS}") -target_link_libraries(pq PRIVATE com_err_gauss crypto gssapi_krb5_gauss gssrpc_gauss k5crypto_gauss krb5_gauss krb5support_gauss securec ssl) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + target_link_libraries(pq PRIVATE com_err_gauss crypto gssapi_krb5_gauss gssrpc_gauss k5crypto_gauss krb5_gauss krb5support_gauss securec ssl) +else() + target_link_libraries(pq PRIVATE crypto securec ssl) +endif() target_link_directories(pq PUBLIC ${LIBOPENSSL_LIB_PATH} ${KERBEROS_LIB_PATH} ${SECURE_LIB_PATH} ${PROJECT_SRC_DIR}/common/port ${PROJECT_SRC_DIR}/gstrace/common @@ -122,6 +129,7 @@ install(TARGETS pq LIBRARY DESTINATION lib) install(TARGETS pq_static ARCHIVE DESTINATION lib) # libpq_ce.so +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") execute_process(COMMAND rm -rf ${CMAKE_CURRENT_SOURCE_DIR}/libpq_ce) execute_process(COMMAND mkdir -p ${CMAKE_CURRENT_SOURCE_DIR}/libpq_ce) execute_process( @@ -193,7 +201,12 @@ add_static_objtarget(libpq_ce TGT_libpq_ce_SRC TGT_pq_ce_INC "${libpq_ce_DEF_OPT AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR}/client_logic_hooks/cmk_entity_manager_hooks TGT_cmk_entity_manager_hooks_SRC) if(NOT "${ENABLE_UT}" STREQUAL "ON") if(NOT "${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS}" STREQUAL "OFF_OFF") - list(REMOVE_ITEM TGT_cmk_entity_manager_hooks_SRC ${CMAKE_CURRENT_SOURCE_DIR}/client_logic_hooks/cmk_entity_manager_hooks/register_local_kms.cpp) + if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(REMOVE_ITEM TGT_cmk_entity_manager_hooks_SRC ${CMAKE_CURRENT_SOURCE_DIR}/client_logic_hooks/cmk_entity_manager_hooks/register_local_kms.cpp) + else() + list(REMOVE_ITEM TGT_cmk_entity_manager_hooks_SRC ${CMAKE_CURRENT_SOURCE_DIR}/client_logic_hooks/cmk_entity_manager_hooks/register_huawei_kms.cpp) + list(REMOVE_ITEM TGT_cmk_entity_manager_hooks_SRC ${CMAKE_CURRENT_SOURCE_DIR}/client_logic_hooks/cmk_entity_manager_hooks/register_gs_ktool.cpp) + endif() else() list(REMOVE_ITEM TGT_cmk_entity_manager_hooks_SRC ${CMAKE_CURRENT_SOURCE_DIR}/client_logic_hooks/cmk_entity_manager_hooks/register_huawei_kms.cpp) list(REMOVE_ITEM TGT_cmk_entity_manager_hooks_SRC ${CMAKE_CURRENT_SOURCE_DIR}/client_logic_hooks/cmk_entity_manager_hooks/register_gs_ktool.cpp) @@ -267,9 +280,15 @@ set(pq_ce_LINK_OPTIONS ${LIB_LINK_OPTIONS}) add_shared_libtarget(pq_ce TGT_pq_ce_SRC TGT_pq_ce_INC "${pq_ce_DEF_OPTIONS}" "${pq_ce_COMPILE_OPTIONS}" "${pq_ce_LINK_OPTIONS}") set(pq_ce_LINK_LIBS libpq_ce cmk_entity_manager_hooks encryption_hooks client_logic_common client_logic_expressions client_logic_cache client_logic_processor client_logic_fmt client_logic_hooks client_logic_data_fetcher frontend_parser) if(NOT "${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS}" STREQUAL "OFF_OFF") - set(pq_ce_LINK_LIBS ${pq_ce_LINK_LIBS} -lgs_ktool -lkmc) + if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + set(pq_ce_LINK_LIBS ${pq_ce_LINK_LIBS} -lgs_ktool -lkmc) + endif() +endif() +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + target_link_libraries(pq_ce PRIVATE ${pq_ce_LINK_LIBS} -lcurl -lcjson -lssl -lcrypto -l${SECURE_C_CHECK} -lpthread -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss) +else() + target_link_libraries(pq_ce PRIVATE ${pq_ce_LINK_LIBS} -lcurl -lcjson -lssl -lcrypto -l${SECURE_C_CHECK} -lpthread) endif() -target_link_libraries(pq_ce PRIVATE ${pq_ce_LINK_LIBS} -lcurl -lcjson -lssl -lcrypto -lsecurec -lpthread -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss) add_dependencies(pq_ce libpq_ce cmk_entity_manager_hooks encryption_hooks client_logic_common client_logic_expressions client_logic_cache client_logic_processor client_logic_fmt client_logic_hooks client_logic_data_fetcher frontend_parser) target_link_directories(pq_ce PUBLIC ${SECURE_LIB_PATH} @@ -281,7 +300,9 @@ target_link_directories(pq_ce PUBLIC ${CMAKE_BINARY_DIR}/lib ) if(NOT "${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS}" STREQUAL "OFF_OFF") - add_dependencies(pq_ce gs_ktool) + if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + add_dependencies(pq_ce gs_ktool) + endif() endif() SET_TARGET_PROPERTIES(pq_ce PROPERTIES VERSION 5.5) add_custom_command(TARGET pq_ce POST_BUILD @@ -289,6 +310,21 @@ add_custom_command(TARGET pq_ce POST_BUILD WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/lib ) install(TARGETS pq_ce LIBRARY DESTINATION lib) +else() +SET(TGT_pq_ce_INC + ${PROJECT_SRC_DIR}/include/libpq + ${LIBOPENSSL_INCLUDE_PATH} + ${ZLIB_INCLUDE_PATH} + ${LIBCURL_INCLUDE_PATH} + ${CMAKE_CURRENT_SOURCE_DIR} +) + +set(pq_ce_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${LIB_SECURE_OPTIONS} ${CHECK_OPTIONS} -fstack-protector-all) +AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR}/client_logic_common TGT_client_logic_common_SRC) +set(TGT_client_logic_common_cstrings_map_SRC ${CMAKE_CURRENT_SOURCE_DIR}/client_logic_common/cstrings_map.cpp) +set(client_logic_common_DEF_OPTIONS ${MACRO_OPTIONS} -DFRONTEND -DFRONTEND_PARSER -DHAVE_CE) +add_static_objtarget(libpq_cstrings_map TGT_client_logic_common_cstrings_map_SRC TGT_pq_ce_INC "${client_logic_common_DEF_OPTIONS}" "${pq_ce_COMPILE_OPTIONS}" "") +endif() #end of not build pq_ce # gauss pq ogject list(APPEND TGT_libpq_SRC @@ -324,6 +360,8 @@ endif() install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/pg_service.conf.sample DESTINATION share/postgresql/ ) -install_symlink(libpq_ce.so.5.5 libpq_ce.so.5 ${prefix_home}/lib) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + install_symlink(libpq_ce.so.5.5 libpq_ce.so.5 ${prefix_home}/lib) +endif() install_symlink(libpq.so.5.5 libpq.so.5 ${prefix_home}/lib) diff --git a/src/common/interfaces/libpq/Makefile b/src/common/interfaces/libpq/Makefile index 625caebbf..58ce13ee7 100644 --- a/src/common/interfaces/libpq/Makefile +++ b/src/common/interfaces/libpq/Makefile @@ -26,12 +26,16 @@ override CPPFLAGS += -DPGXC -I$(srcdir) -I$(top_builddir)/src/ -I$(top_builddir) override CPPFLAGS += -I$(top_builddir)/$(subdir)/client_logic_hooks override CPPFLAGS += -I$(top_builddir)/$(subdir)/client_logic_hooks/cmk_entity_manager_hooks override CPPFLAGS += -I$(top_builddir)/$(subdir)/client_logic_hooks/encryption_hooks +ifeq ($(enable_lite_mode), no) override CPPFLAGS += -I$(CJSON_INCLUDE_PATH) -I$(LIBCURL_INCLUDE_PATH) -L$(CJSON_LIB_PATH) -L$(LIBCURL_LIB_PATH) -lcjson -lcurl +endif ifeq "$(ENABLE_CE)" "1" override CPPFLAGS += -DHAVE_CE -DWORDS_BIGENDIAN ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) -override CPPFLAGS += -L$(top_builddir)/../distribute/bin/gs_ktool/ -lgs_ktool -L$(LIBKMC_LIB_PATH) -lkmc + ifneq ($(enable_lite_mode), yes) + override CPPFLAGS += -L$(top_builddir)/../distribute/bin/gs_ktool/ -lgs_ktool -L$(LIBKMC_LIB_PATH) -lkmc + endif endif endif @@ -98,9 +102,12 @@ endif # shared library link. (The order in which you list them here doesn't # matter.) ifneq ($(PORTNAME), win32) -SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lssl -lk5crypto -lkrb5 -lgssapi_krb5 -lgss -lgssapi -lsocket -lnsl -lresolv -lintl -lsecurec, $(LIBS)) $(LDAP_LIBS_FE) $(PTHREAD_LIBS) -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss +SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lssl -lk5crypto -lkrb5 -lgssapi_krb5 -lgss -lgssapi -lsocket -lnsl -lresolv -lintl -l$(SECURE_C_CHECK), $(LIBS)) $(LDAP_LIBS_FE) $(PTHREAD_LIBS) else -SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lssl -lk5crypto -lkrb5 -lgssapi32 -lsocket -lnsl -lresolv -lintl -lsecurec $(PTHREAD_LIBS), $(LIBS)) $(LDAP_LIBS_FE) -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss +SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lssl -lk5crypto -lkrb5 -lgssapi32 -lsocket -lnsl -lresolv -lintl -l$(SECURE_C_CHECK) $(PTHREAD_LIBS), $(LIBS)) $(LDAP_LIBS_FE) +endif +ifeq ($(enable_lite_mode), no) + SHLIB_LINK += -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss endif ifeq ($(PORTNAME), win32) SHLIB_LINK += -lshfolder -lwsock32 -lws2_32 -lsecur32 $(filter -leay32 -lssleay32 -lcomerr32 -lkrb5_32, $(LIBS)) @@ -123,7 +130,12 @@ endif ifneq ($(enable_ut), yes) ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) -OBJS := $(filter-out client_logic_hooks/cmk_entity_manager_hooks/register_local_kms.o, $(OBJS)) +ifneq ($(enable_lite_mode), yes) + OBJS := $(filter-out client_logic_hooks/cmk_entity_manager_hooks/register_local_kms.o, $(OBJS)) +else + OBJS := $(filter-out client_logic_hooks/cmk_entity_manager_hooks/register_huawei_kms.o, $(OBJS)) + OBJS := $(filter-out client_logic_hooks/cmk_entity_manager_hooks/register_gs_ktool.o, $(OBJS)) +endif else OBJS := $(filter-out client_logic_hooks/cmk_entity_manager_hooks/register_huawei_kms.o, $(OBJS)) OBJS := $(filter-out client_logic_hooks/cmk_entity_manager_hooks/register_gs_ktool.o, $(OBJS)) @@ -146,9 +158,11 @@ OBJS:=$(CE_OBJS) NAME:=pq_ce gs_ktool: +ifneq ($(enable_lite_mode), yes) ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) $(MAKE) -C $(top_builddir)/../distribute/bin/gs_ktool endif +endif libpq_ce/fe-protocol3.o libpq_ce/fe-exec.o: client_logic_cache/types_to_oid.h libpq_ce/fe-%.o: fe-%.cpp diff --git a/src/common/interfaces/libpq/cl_state.cpp b/src/common/interfaces/libpq/cl_state.cpp index 7b12a3649..5f6e6409b 100644 --- a/src/common/interfaces/libpq/cl_state.cpp +++ b/src/common/interfaces/libpq/cl_state.cpp @@ -49,7 +49,6 @@ PGClientLogic::PGClientLogic(PGconn *conn, JNIEnv *java_env, jobject jdbc_handle droppedColumnSettings_size(0), droppedColumnSettings_allocated(0), isInvalidOperationOnColumn(false), - should_refresh_function(false), isDuringRefreshCacheOnError(false), is_external_err(false), cacheRefreshType(CacheRefreshType::CACHE_ALL), diff --git a/src/common/interfaces/libpq/client_logic_cache/cache_loader.cpp b/src/common/interfaces/libpq/client_logic_cache/cache_loader.cpp index 38d228ddf..078f7c1cf 100644 --- a/src/common/interfaces/libpq/client_logic_cache/cache_loader.cpp +++ b/src/common/interfaces/libpq/client_logic_cache/cache_loader.cpp @@ -21,6 +21,7 @@ * ------------------------------------------------------------------------- */ #include +#include #include "cache_loader.h" #include "client_logic_common/client_logic_utils.h" #include "cache_refresh_type.h" @@ -444,7 +445,6 @@ bool CacheLoader::fill_global_settings_map(PGconn *conn) int arg_key_num = data_fetcher.get_column_index("key"); int arg_value_num = data_fetcher.get_column_index("value"); int change_epoch_num = data_fetcher.get_column_index("change_epoch"); - CachedGlobalSetting *cached_global_setting(NULL); Oid object_oid_prev(0); while (data_fetcher.next()) { @@ -471,7 +471,7 @@ bool CacheLoader::fill_global_settings_map(PGconn *conn) bool is_new_object(false); if (!object_oid_prev || object_oid != object_oid_prev) { cached_global_setting = - new (std::nothrow) CachedGlobalSetting(object_oid, get_database_name(), object_name_space, object_name); + new(std::nothrow) CachedGlobalSetting(object_oid, get_database_name(), object_name_space, object_name); if (cached_global_setting == NULL) { fprintf(stderr, "failed to allocate memory for client master key\n"); return false; @@ -577,7 +577,7 @@ bool CacheLoader::fill_column_settings_info_cache(PGconn *conn) bool is_new_object(false); if (!object_oid_prev || object_oid != object_oid_prev) { column_setting = - new (std::nothrow) CachedColumnSetting(object_oid, get_database_name(), object_name_space, object_name); + new(std::nothrow) CachedColumnSetting(object_oid, get_database_name(), object_name_space, object_name); if (column_setting == NULL) { fprintf(stderr, "failed to allocate memory for column encryption key\n"); return false; @@ -692,7 +692,7 @@ bool CacheLoader::fill_cached_columns(PGconn *conn) Oid data_type_oid = (Oid)atoi(data_fetcher[orig_type_oid_num]); int data_type_mod = atoi(data_fetcher[orig_type_mod_num]); - CachedColumn *cached_column = new (std::nothrow) CachedColumn(my_oid, table_oid, database_name, schema_name, + CachedColumn *cached_column = new(std::nothrow) CachedColumn(my_oid, table_oid, database_name, schema_name, table_name, column_name, column_position, data_type_oid, data_type_mod); if (cached_column == NULL) { fprintf(stderr, "failed to new CachedColumn object\n"); @@ -736,7 +736,7 @@ const bool CacheLoader::fill_cached_types(PGconn *conn) m_cached_types_list.clear(); while (data_fetcher.next()) { Oid oid_value = (Oid)atoi(data_fetcher[typid_num]); - CachedType *curr_type = new (std::nothrow) CachedType(oid_value, data_fetcher[typname_num], + CachedType *curr_type = new(std::nothrow) CachedType(oid_value, data_fetcher[typname_num], data_fetcher[schema_num], data_fetcher[db_num], conn->client_logic->m_cached_column_manager); if (curr_type == NULL) { fprintf(stderr, "failed to new CachedType object\n"); @@ -749,11 +749,11 @@ const bool CacheLoader::fill_cached_types(PGconn *conn) bool CacheLoader::fill_cached_procs(PGconn *conn) { - const char *query = "select func_id, prorettype_orig, proargcachedcol, proallargtypes_orig,proname," - "pronargs,proargtypes,proallargtypes,proargnames,nspname,current_database() as dbname " + const char *query = "select func_id, proargcachedcol, proallargtypes_orig,proname," + "pronargs,proargtypes,proallargtypes,proargnames,proargmodes,nspname,current_database() as dbname, " + "EXTRACT(epoch from gs_proc.last_change) as change_epoch " "from gs_encrypted_proc gs_proc join pg_proc on gs_proc.func_id = pg_proc.oid " " join pg_namespace ON (pg_namespace.oid = pronamespace);"; - DataFetcher data_fetcher = conn->client_logic->m_data_fetcher_manager->get_data_fetcher(); if (!data_fetcher.load(query)) { fprintf(stderr, "fill_cached_procs - query to initialize failed or did not return data\n"); @@ -761,50 +761,54 @@ bool CacheLoader::fill_cached_procs(PGconn *conn) } // getting the index of each column in the response int func_oid_num = data_fetcher.get_column_index("func_id"); - int origin_ret_type_num = data_fetcher.get_column_index("prorettype_orig"); int cached_col_num = data_fetcher.get_column_index("proargcachedcol"); int alltypes_orig_num = data_fetcher.get_column_index("proallargtypes_orig"); int proname_num = data_fetcher.get_column_index("proname"); int pronargs_num = data_fetcher.get_column_index("pronargs"); int proargtypes_num = data_fetcher.get_column_index("proargtypes"); int proallargtypes_num = data_fetcher.get_column_index("proallargtypes"); + int proargmodes_num = data_fetcher.get_column_index("proargmodes"); int proargnames_num = data_fetcher.get_column_index("proargnames"); int dbname_num = data_fetcher.get_column_index("dbname"); int schema_num = data_fetcher.get_column_index("nspname"); - + int change_epoch_num = data_fetcher.get_column_index("change_epoch"); m_proc_list.clear(); while (data_fetcher.next()) { - CachedProc *curr_proc = new (std::nothrow) CachedProc(); + update_last_change_epoch(data_fetcher[change_epoch_num]); + CachedProc *curr_proc = new(std::nothrow) CachedProc(conn); if (curr_proc == NULL) { fprintf(stderr, "failed to new CachedProc object\n"); return false; } - /* 10: base is decimal */ - curr_proc->m_func_id = strtoul(data_fetcher[func_oid_num], NULL, 10); + curr_proc->m_func_id = strtoul(data_fetcher[func_oid_num], NULL, NUMBER_BASE); curr_proc->m_proname = strdup(data_fetcher[proname_num]); curr_proc->m_pronargs = atoi(data_fetcher[pronargs_num]); - curr_proc->m_prorettype_orig = strtoul(data_fetcher[origin_ret_type_num], NULL, 10); curr_proc->m_dbname = strdup(data_fetcher[dbname_num]); curr_proc->m_schema_name = strdup(data_fetcher[schema_num]); if (data_fetcher[cached_col_num] != NULL && strlen(data_fetcher[cached_col_num]) > 0) { - parse_oid_array(data_fetcher[cached_col_num], &curr_proc->m_proargcachedcol); + parse_oid_array(conn, data_fetcher[cached_col_num], &curr_proc->m_proargcachedcol); } if (data_fetcher[proargtypes_num] != NULL && strlen(data_fetcher[proargtypes_num]) > 0) { - parse_oid_array(data_fetcher[proargtypes_num], &curr_proc->m_proargtypes); + parse_oid_array(conn, data_fetcher[proargtypes_num], &curr_proc->m_proargtypes); } if (data_fetcher[alltypes_orig_num] != NULL && strlen(data_fetcher[alltypes_orig_num]) > 0) { - curr_proc->m_nallargtypes_orig = - parse_oid_array(data_fetcher[alltypes_orig_num], &curr_proc->m_proallargtypes_orig); + curr_proc->m_nallargtypes = + parse_oid_array(conn, data_fetcher[alltypes_orig_num], &curr_proc->m_proallargtypes_orig); } if (data_fetcher[proallargtypes_num] != NULL && strlen(data_fetcher[proallargtypes_num]) > 0) { - parse_oid_array(data_fetcher[proallargtypes_num], &curr_proc->m_proallargtypes); + parse_oid_array(conn, data_fetcher[proallargtypes_num], &curr_proc->m_proallargtypes); } if (data_fetcher[proargnames_num] != NULL && strlen(data_fetcher[proargnames_num]) > 0) { - curr_proc->m_nallargtypes = parse_char_array(data_fetcher[proargnames_num], &curr_proc->m_proargnames); + curr_proc->m_nargnames = + parse_string_array(conn, data_fetcher[proargnames_num], &curr_proc->m_proargnames); + } + if (data_fetcher[proargmodes_num] != NULL && strlen(data_fetcher[proargmodes_num]) > 0) { + parse_char_array(conn, data_fetcher[proargmodes_num], &curr_proc->m_proargmodes); } curr_proc->set_original_ids(); - /* add to maps */ + + // add to maps m_proc_list.add(curr_proc); } return true; diff --git a/src/common/interfaces/libpq/client_logic_cache/cache_loader.h b/src/common/interfaces/libpq/client_logic_cache/cache_loader.h index 842c47e0e..afa500f50 100644 --- a/src/common/interfaces/libpq/client_logic_cache/cache_loader.h +++ b/src/common/interfaces/libpq/client_logic_cache/cache_loader.h @@ -167,7 +167,6 @@ private: tables from the server */ DatabaseType m_compat_type; /* server SQL compatibility */ NameData m_current_database_name; - double m_change_epoch = 0; /* time stamp of the latest client logic configuration fetched from the server */ void update_last_change_epoch(const char *time_since_epoc); double get_local_max_time_stamp() const; @@ -175,6 +174,7 @@ private: private: static const int m_FQDN_MAX_SIZE = NAMEDATALEN * 4; + static const int NUMBER_BASE = 10; }; #endif /* CACHE_LOADER_H */ diff --git a/src/common/interfaces/libpq/client_logic_cache/cached_proc.cpp b/src/common/interfaces/libpq/client_logic_cache/cached_proc.cpp index 3c9406839..9f7960252 100755 --- a/src/common/interfaces/libpq/client_logic_cache/cached_proc.cpp +++ b/src/common/interfaces/libpq/client_logic_cache/cached_proc.cpp @@ -25,10 +25,20 @@ #include "libpq-int.h" #include "client_logic_common/client_logic_utils.h" +/* + * @Description checks if param is used as output parameter + * @Param param_mode + * @Return true if param is output param, else false + */ +static const bool is_output_param(const char param_mode) +{ + return param_mode == FUNC_PARAM_OUT || param_mode == FUNC_PARAM_INOUT || param_mode == FUNC_PARAM_TABLE; +} + CachedProc::~CachedProc() { if (m_proargnames) { - for (size_t i = 0; i < m_nallargtypes; i++) { + for (size_t i = 0; i < m_nargnames; i++) { if (m_proargnames[i]) libpq_free(m_proargnames[i]); } @@ -41,6 +51,7 @@ CachedProc::~CachedProc() libpq_free(m_proargtypes); libpq_free(m_proallargtypes); libpq_free(m_proallargtypes_orig); + libpq_free(m_proargmodes); if (m_original_ids) { libpq_free(m_original_ids); m_original_ids = NULL; @@ -52,8 +63,8 @@ void CachedProc::set_original_ids() if (m_original_ids == NULL) { m_original_ids = (int*)malloc(get_num_processed_args() * sizeof(int)); if (m_original_ids == NULL) { - fprintf(stderr, "cannot allocate memory for m_original_ids\n"); - exit(EXIT_FAILURE); + printfPQExpBuffer(&m_conn->errorMessage, libpq_gettext("cannot allocate memory for m_original_ids\n")); + return; } for (size_t i = 0; i < get_num_processed_args(); i++) { m_original_ids[i] = get_original_id(i); @@ -63,12 +74,13 @@ void CachedProc::set_original_ids() const Oid CachedProc::get_original_id(const size_t idx) const { - if (idx >= m_nallargtypes_orig || !m_proallargtypes) { + if (idx >= m_nallargtypes || !m_proallargtypes) { return InvalidOid; } size_t index = 0; - for (size_t i = 0; i < m_nallargtypes_orig; i++) { - if (is_clientlogic_datatype(m_proallargtypes[i])) { + for (size_t i = 0; i < m_nallargtypes; i++) { + /* Since this function is for use on response, for deprocessing the result set, input parms should be skipped */ + if (is_clientlogic_datatype(m_proallargtypes[i]) && is_output_param(m_proargmodes[i])) { if (index == idx) { return m_proallargtypes_orig[i]; } diff --git a/src/common/interfaces/libpq/client_logic_cache/cached_proc.h b/src/common/interfaces/libpq/client_logic_cache/cached_proc.h index 3596d9316..393cd40b6 100755 --- a/src/common/interfaces/libpq/client_logic_cache/cached_proc.h +++ b/src/common/interfaces/libpq/client_logic_cache/cached_proc.h @@ -28,24 +28,25 @@ #include "cached_column.h" #include "icached_rec.h" /* - * select func_id, prorettype_orig, proargcachedcol, proallargtypes_orig, + * select func_id, proargcachedcol, proallargtypes_orig, * proname,pronargs,proargtypes,proallargtypes,proargnames,nspname from gs_encrypted_proc gs_proc join pg_proc on * gs_proc.func_id = pg_proc.oid join pg_namespace ON (pg_namespace.oid = pronamespace); */ class CachedProc : public ICachedRec { public: - CachedProc() - : m_func_id(0), + CachedProc(PGconn* const conn) + : m_conn(conn), + m_func_id(0), m_proname(NULL), m_pronargs(0), - m_prorettype_orig(0), m_proargcachedcol(NULL), m_proargtypes(NULL), m_nallargtypes(0), - m_nallargtypes_orig(0), m_proallargtypes(NULL), m_proallargtypes_orig(NULL), + m_nargnames(0), m_proargnames(NULL), + m_proargmodes(NULL), m_schema_name(NULL), m_dbname(NULL), m_refcount(0) {}; @@ -59,22 +60,23 @@ public: void set_original_ids(); const size_t get_num_processed_args() const override { - return m_nallargtypes_orig; + return m_nallargtypes; } + PGconn* const m_conn; Oid m_func_id; char* m_proname; int m_pronargs; - Oid m_prorettype_orig; Oid* m_proargcachedcol; Oid* m_proargtypes; size_t m_nallargtypes; - size_t m_nallargtypes_orig; Oid* m_proallargtypes; Oid* m_proallargtypes_orig; + size_t m_nargnames; char** m_proargnames; + char* m_proargmodes; /*argmod of param might be OUT, INOUT, IN, VARIADIC or TABLE*/ char* m_schema_name; char* m_dbname; size_t m_refcount; }; -#endif \ No newline at end of file +#endif diff --git a/src/common/interfaces/libpq/client_logic_common/client_logic_utils.cpp b/src/common/interfaces/libpq/client_logic_common/client_logic_utils.cpp index a716225ca..065a6848c 100755 --- a/src/common/interfaces/libpq/client_logic_common/client_logic_utils.cpp +++ b/src/common/interfaces/libpq/client_logic_common/client_logic_utils.cpp @@ -1,312 +1,323 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * client_logic_utils.cpp - * - * IDENTIFICATION - * src\common\interfaces\libpq\client_logic_common\client_logic_utils.cpp - * - * ------------------------------------------------------------------------- - */ - -#include -#include -#include "client_logic_utils.h" -#include "securec.h" -typedef uintptr_t Datum; -#include "nodes/primnodes.h" -#include "libpq-int.h" -#define HTONLL(x) ((1 == htonl(1)) ? (x) : ((((uint64_t)htonl((x)&0xFFFFFFFFUL)) << 32) | htonl((uint32_t)((x) >> 32)))) -#define NTOHLL(x) ((1 == ntohl(1)) ? (x) : ((((uint64_t)ntohl((x)&0xFFFFFFFFUL)) << 32) | ntohl((uint32_t)((x) >> 32)))) - -bool is_clientlogic_datatype(const Oid o) -{ - return (o == BYTEAWITHOUTORDERWITHEQUALCOLOID || o == BYTEAWITHOUTORDERCOLOID); -} -/* - * Get the string without space - */ -char *del_blanks(char *str, const int str_len) -{ - char *source = NULL; - char *dest = NULL; - char *cpy_str = NULL; - errno_t rc = 0; - - if (str == NULL || str_len <= 0) { - return NULL; - } - - cpy_str = (char *)malloc(str_len + 1); - if (cpy_str == NULL) { - return NULL; - } - rc = memset_s(cpy_str, str_len + 1, 0, str_len + 1); - securec_check_c(rc, "\0", "\0"); - source = str; - dest = cpy_str; - while (source != NULL && isspace((int)*source)) { - source++; - } - if (*source == '\0') { - free(cpy_str); - cpy_str = NULL; - return NULL; - } - - for (; *source != '\0'; source++) { - if (!isspace((int)*source)) { - *dest = *source; - dest++; - } - } - - *dest = '\0'; - return cpy_str; -} - - -bool concat_col_fqdn(const char *catalogname, const char *schemaname, const char *relname, const char *colname, - char *fqdn) -{ - if (!fqdn || !colname) { - return false; - } - bool ret = concat_table_fqdn(catalogname, schemaname, relname, fqdn); - if (ret) { - check_strncat_s(strncat_s(fqdn, NAMEDATALEN, ".", 1)); - } else { - fqdn[0] = '\0'; - } - check_strncat_s(strncat_s(fqdn, NAMEDATALEN, colname, strlen(colname))); - return true; -} - -bool concat_table_fqdn(const char *catalogname, const char *schemaname, const char *relname, char *fqdn) -{ - if (!fqdn) { - return false; - } - - fqdn[0] = '\0'; - if (catalogname && catalogname[0] != '\0') { - check_strncat_s(strncat_s(fqdn, NAMEDATALEN, catalogname, strlen(catalogname))); - check_strncat_s(strncat_s(fqdn, NAMEDATALEN, ".", 1)); - } else { - fqdn[0] = '\0'; - return false; - } - - if (schemaname && schemaname[0] != '\0') { - check_strncat_s(strncat_s(fqdn, NAMEDATALEN, schemaname, strlen(schemaname))); - check_strncat_s(strncat_s(fqdn, NAMEDATALEN, ".", 1)); - } else { - fqdn[0] = '\0'; - return false; - } - - if (relname && relname[0] != '\0') { - check_strncat_s(strncat_s(fqdn, NAMEDATALEN, relname, strlen(relname))); - } else { - fqdn[0] = '\0'; - return false; - } - - return true; -} - -void free_obj_list(ObjName *obj_list) -{ - ObjName *cur_obj = obj_list; - ObjName *to_free = NULL; - - while (cur_obj != NULL) { - to_free = cur_obj; - cur_obj = cur_obj->next; - free(to_free); - to_free = NULL; - } - - obj_list = NULL; -} - -ObjName *obj_list_append(ObjName *obj_list, const char *new_obj_name) -{ - ObjName *new_obj = NULL; - ObjName *last_obj = obj_list; - errno_t rc = 0; - - if (new_obj_name == NULL || strlen(new_obj_name) >= OBJ_NAME_BUF_LEN) { - free_obj_list(obj_list); - return NULL; - } - - new_obj = (ObjName *)malloc(sizeof(ObjName)); - if (new_obj == NULL) { - free_obj_list(obj_list); - return NULL; - } - - rc = strcpy_s(new_obj->obj_name, sizeof(new_obj->obj_name), new_obj_name); - securec_check_c(rc, "", ""); - new_obj->next = NULL; - - if (obj_list == NULL) { - return new_obj; - } - - while (last_obj->next != NULL) { - last_obj = last_obj->next; - } - - last_obj->next = new_obj; - return obj_list; -} -/** - * helper function to parse a string array of Oids and types. - * It searches for possible separator with are ',' or ' ' space and count its occurrences - * @param input input array - * @return the numbers of seperators in the input string - */ -size_t count_sep_in_str(const char *input) -{ - size_t result = 0; - if (input == NULL || strlen(input) == 0) { - return result; - } - for (size_t index = 0; index < strlen(input); ++index) { - if (input[index] == ',' || input[index] == ' ') { - ++result; - } - } - return result; -} -/** - * Parses a char array coming from the database server when loading the cache - * It is in the form of {elem,elem,elem ...} or "elem elem elem elem" - * Note that this method allocates the memory for the items_out parameters and it is up to the caller to free it - * @param[in] input the input array string - * @param[out] items_out vector of items allocated by this method - * @return the numbers of items in items_out - */ -size_t parse_char_array(const char *input, char ***items_out) -{ - *items_out = NULL; - char **items = NULL; - size_t output_length = 0; - - if (input == NULL || - strlen(input) == 0) { /* there are 2 characters for opening and closing brakets {item1,item2....itemn} */ - return output_length; - } - int start_offset = 0; - if (input[0] == '{') { - start_offset = 1; - /* if input length < 3, it has no item */ - if (strlen(input) < 3) { - return output_length; - } - } - size_t count_of_column = count_sep_in_str(input); - output_length = count_of_column + 1; - size_t elem_index = 0; - items = (char **)malloc(output_length * sizeof(char *)); - if (items == NULL) { - fprintf(stderr, "Error: out of memory\n"); - output_length = 0; - return output_length; - } - *items_out = items; - check_memset_s(memset_s(items, output_length * sizeof(char *), 0, output_length * sizeof(char *))); - size_t begin_index = start_offset; - size_t last_index = 0; - /* Ignoring the brackets if exists {item1,item2....itemn} */ - for (size_t index = start_offset; index < strlen(input); ++index) { - last_index = index; - if (input[index] == ',' || input[index] == ' ' || input[index] == '}') { - if (elem_index < output_length) { - size_t item_len = index - begin_index; - items[elem_index] = (char *)malloc((item_len + 1) * sizeof(char)); - if (items[elem_index] == NULL) { - fprintf(stderr, "Error: out of memory\n"); - for (size_t i = 0; i < elem_index; i++) { - libpq_free(items[i]); - } - libpq_free(items); - output_length = 0; - return output_length; - } - check_strncpy_s(strncpy_s(items[elem_index], item_len + 1, input + begin_index, item_len)); - items[elem_index][index - begin_index] = 0; - } else { - fprintf(stderr, "Error: index out of bound on parse_char_array %s\n", input); - } - begin_index = index + 1; - ++elem_index; - } - } - if (start_offset == 0) { /* Handle the last item */ - if (elem_index < output_length) { - size_t item_len = last_index - begin_index + 1; - items[elem_index] = (char *)malloc((item_len + 1) * sizeof(char)); - if (items[elem_index] == NULL) { - fprintf(stderr, "Error: out of memory\n"); - for (size_t i = 0; i < elem_index; i++) { - libpq_free(items[i]); - } - libpq_free(items); - output_length = 0; - return output_length; - } - check_strncpy_s(strncpy_s(items[elem_index], item_len + 1, input + begin_index, item_len)); - items[elem_index][last_index - begin_index + 1] = 0; - } - } - return output_length; -} -/** - * Parses a char array coming from the database server when loading the cache - * It is in the form of {oid1,oid2,...oidn) or "oid1 oid2 ... oidn" - * Note that this method allocates the memory for the items_out parameters and it is up to the caller to free it - * @param[in] input the input array string - * @param[out] items_out vector of items allocated by this method - * @return the numbers of items in items_out* - */ -size_t parse_oid_array(const char *input, Oid **items_out) -{ - *items_out = NULL; - char **items_char = NULL; - Oid *items = NULL; - size_t output_length = parse_char_array(input, &items_char); - if (output_length > 0) { - items = (Oid *)malloc(output_length * sizeof(Oid)); - if (items == NULL) { - fprintf(stderr, "Error: out of memory\n"); - for (size_t index = 0; index < output_length; ++index) { - free(items_char[index]); - } - free(items_char); - output_length = 0; - return output_length; - } - *items_out = items; - for (size_t index = 0; index < output_length; ++index) { - items[index] = (Oid)atoi(items_char[index]); - free(items_char[index]); - } - } - free(items_char); - return output_length; -} +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * client_logic_utils.cpp + * + * IDENTIFICATION + * src\common\interfaces\libpq\client_logic_common\client_logic_utils.cpp + * + * ------------------------------------------------------------------------- + */ + +#include +#include +#include "client_logic_utils.h" +#include "securec.h" +typedef uintptr_t Datum; +#include "nodes/primnodes.h" +#include "libpq-int.h" +#define HTONLL(x) ((1 == htonl(1)) ? (x) : ((((uint64_t)htonl((x)&0xFFFFFFFFUL)) << 32) | htonl((uint32_t)((x) >> 32)))) +#define NTOHLL(x) ((1 == ntohl(1)) ? (x) : ((((uint64_t)ntohl((x)&0xFFFFFFFFUL)) << 32) | ntohl((uint32_t)((x) >> 32)))) + +static const size_t MIN_SIZE_WITH_ELEMENTS = 3; + +bool is_clientlogic_datatype(const Oid o) +{ + return (o == BYTEAWITHOUTORDERWITHEQUALCOLOID || o == BYTEAWITHOUTORDERCOLOID); +} +/* + * Get the string without space + */ +char *del_blanks(char *str, const int str_len) +{ + char *source = NULL; + char *dest = NULL; + char *cpy_str = NULL; + errno_t rc = 0; + + if (str == NULL || str_len <= 0) { + return NULL; + } + + cpy_str = (char *)malloc(str_len + 1); + if (cpy_str == NULL) { + return NULL; + } + rc = memset_s(cpy_str, str_len + 1, 0, str_len + 1); + securec_check_c(rc, "\0", "\0"); + source = str; + dest = cpy_str; + while (source != NULL && isspace((int)*source)) { + source++; + } + if (*source == '\0') { + free(cpy_str); + cpy_str = NULL; + return NULL; + } + + for (; *source != '\0'; source++) { + if (!isspace((int)*source)) { + *dest = *source; + dest++; + } + } + + *dest = '\0'; + return cpy_str; +} + + +bool concat_col_fqdn(const char *catalogname, const char *schemaname, const char *relname, const char *colname, + char *fqdn) +{ + if (!fqdn || !colname) { + return false; + } + bool ret = concat_table_fqdn(catalogname, schemaname, relname, fqdn); + if (ret) { + check_strncat_s(strncat_s(fqdn, NAMEDATALEN, ".", 1)); + } else { + fqdn[0] = '\0'; + } + check_strncat_s(strncat_s(fqdn, NAMEDATALEN, colname, strlen(colname))); + return true; +} + +bool concat_table_fqdn(const char *catalogname, const char *schemaname, const char *relname, char *fqdn) +{ + if (!fqdn) { + return false; + } + + fqdn[0] = '\0'; + if (catalogname && catalogname[0] != '\0') { + check_strncat_s(strncat_s(fqdn, NAMEDATALEN, catalogname, strlen(catalogname))); + check_strncat_s(strncat_s(fqdn, NAMEDATALEN, ".", 1)); + } else { + fqdn[0] = '\0'; + return false; + } + + if (schemaname && schemaname[0] != '\0') { + check_strncat_s(strncat_s(fqdn, NAMEDATALEN, schemaname, strlen(schemaname))); + check_strncat_s(strncat_s(fqdn, NAMEDATALEN, ".", 1)); + } else { + fqdn[0] = '\0'; + return false; + } + + if (relname && relname[0] != '\0') { + check_strncat_s(strncat_s(fqdn, NAMEDATALEN, relname, strlen(relname))); + } else { + fqdn[0] = '\0'; + return false; + } + + return true; +} + +void free_obj_list(ObjName *obj_list) +{ + ObjName *cur_obj = obj_list; + ObjName *to_free = NULL; + + while (cur_obj != NULL) { + to_free = cur_obj; + cur_obj = cur_obj->next; + free(to_free); + to_free = NULL; + } + + obj_list = NULL; +} + +ObjName *obj_list_append(ObjName *obj_list, const char *new_obj_name) +{ + ObjName *new_obj = NULL; + ObjName *last_obj = obj_list; + errno_t rc = 0; + + if (new_obj_name == NULL || strlen(new_obj_name) >= OBJ_NAME_BUF_LEN) { + free_obj_list(obj_list); + return NULL; + } + + new_obj = (ObjName *)malloc(sizeof(ObjName)); + if (new_obj == NULL) { + free_obj_list(obj_list); + return NULL; + } + + rc = strcpy_s(new_obj->obj_name, sizeof(new_obj->obj_name), new_obj_name); + securec_check_c(rc, "", ""); + new_obj->next = NULL; + + if (obj_list == NULL) { + return new_obj; + } + + while (last_obj->next != NULL) { + last_obj = last_obj->next; + } + + last_obj->next = new_obj; + return obj_list; +} +/** + * helper function to parse a string array of Oids and types. + * It searches for possible separator with are ',' or ' ' space and count its occurrences + * @param input input array + * @return the numbers of seperators in the input string + */ +size_t count_sep_in_str(const char *input) +{ + size_t result = 0; + if (input == NULL || strlen(input) == 0) { + return result; + } + for (size_t index = 0; index < strlen(input); ++index) { + if (input[index] == ',' || input[index] == ' ') { + ++result; + } + } + return result; +} + +/** + * Parses a char array coming from the database server when loading the cache + * It is in the form of {elem,elem,elem ...} or "elem elem elem elem" + * Note that this method allocates the memory for the items_out parameters and it is up to the caller to free it + * @param[in] input the input array string + * @param[out] items_out vector of items allocated by this method + * @return the numbers of items in items_out + */ +size_t parse_string_array(PGconn* const conn, const char *input, char ***items_out) +{ + *items_out = NULL; + char **items = NULL; + size_t output_length = 0; + + if (input == NULL || strlen(input) == 0) { + /* there are 2 characters for opening and closing brakets {item1,item2....itemn} */ + return output_length; + } + size_t start_offset = 0; + if (input[0] == '{') { + start_offset = 1; + if (strlen(input) < MIN_SIZE_WITH_ELEMENTS) { + return output_length; + } + } + size_t count_of_column = count_sep_in_str(input); + output_length = count_of_column + 1; + size_t elem_index = 0; + items = (char **)malloc(output_length * sizeof(char*)); + *items_out = items; + if (items == NULL) { + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("Error: out of memory?\n")); + output_length = 0; + return output_length; + } + check_memset_s(memset_s(items, output_length * sizeof(char *), 0, output_length * sizeof(char*))); + size_t begin_index = start_offset; + size_t last_index = start_offset; + for (size_t index = start_offset; index < strlen(input); ++index) { + // Ignoring the bracket if exists {item1,item2....itemn} + last_index = index; + if (input[index] == ',' || input[index] == ' ' || input[index] == '}') { + if (elem_index < output_length) { + size_t item_len = index - begin_index; + items[elem_index] = (char *)malloc((item_len + 1) * sizeof(char)); + check_strncpy_s(strncpy_s(items[elem_index], item_len + 1, input + begin_index, item_len)); + items[elem_index][index - begin_index] = 0; + } else { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("Error: index out of bound on parse_string_array %s\n"), input); + } + begin_index = index + 1; + ++elem_index; + } + } + if (start_offset == 0) { // Handle the last item + if (elem_index < output_length) { + size_t item_len = last_index + 1 - begin_index; + items[elem_index] = (char *)malloc((item_len + 1) * sizeof(char)); + check_strncpy_s(strncpy_s(items[elem_index], item_len + 1, input + begin_index, item_len)); + items[elem_index][item_len] = 0; + } + } + return output_length; +} + +/** + * Parses a char array coming from the database server when loading the cache + * It is in the form of {'c','i',..'o'} or "c i ... o" + * Note that this method allocates the memory for the items_out parameters and it is up to the caller to free it + * @param[in] input the input array string + * @param[out] items_out vector of items allocated by this method + * @return the numbers of items in items_out* + */ +size_t parse_char_array(PGconn* const conn, const char *input, char **items_out) +{ + *items_out = NULL; + char **items_char = NULL; + char *items = NULL; + size_t output_length = parse_string_array(conn, input, &items_char); + if (output_length > 0) { + items = (char*)malloc(output_length * sizeof(char)); + *items_out = items; + for (size_t index = 0; index < output_length; ++index) { + items[index] = *(items_char[index]); + free(items_char[index]); + } + } + free(items_char); + return output_length; +} +/** + * Parses a oid array coming from the database server when loading the cache + * It is in the form of {oid1,oid2,...oidn) or "oid1 oid2 ... oidn" + * Note that this method allocates the memory for the items_out parameters and it is up to the caller to free it + * @param[in] input the input array string + * @param[out] items_out vector of items allocated by this method + * @return the numbers of items in items_out* + */ +size_t parse_oid_array(PGconn* const conn, const char *input, Oid **items_out) +{ + *items_out = NULL; + char **items_char = NULL; + Oid *items = NULL; + size_t output_length = parse_string_array(conn, input, &items_char); + if (output_length > 0) { + items = (Oid *)malloc(output_length * sizeof(Oid)); + if (items == NULL) { + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("Error: out of memory\n")); + for (size_t index = 0; index < output_length; ++index) { + free(items_char[index]); + } + free(items_char); + output_length = 0; + return output_length; + } + *items_out = items; + for (size_t index = 0; index < output_length; ++index) { + items[index] = (Oid)atoi(items_char[index]); + free(items_char[index]); + } + } + free(items_char); + return output_length; +} diff --git a/src/common/interfaces/libpq/client_logic_common/client_logic_utils.h b/src/common/interfaces/libpq/client_logic_common/client_logic_utils.h index edb0a1df3..3ef8b19a4 100755 --- a/src/common/interfaces/libpq/client_logic_common/client_logic_utils.h +++ b/src/common/interfaces/libpq/client_logic_common/client_logic_utils.h @@ -29,7 +29,7 @@ #include "client_logic_cache/dataTypes.def" struct RangeVar; typedef unsigned int Oid; - +typedef struct pg_conn PGconn; const int OBJ_NAME_BUF_LEN = 256; /* Array header structures */ typedef struct { @@ -62,9 +62,10 @@ template inline bool is_const(T const & x) return true; } char *del_blanks(char *str, const int str_len); -size_t parse_char_array(const char *input, char ***items_out); -size_t parse_oid_array(const char *input, Oid **items_out); - +size_t count_char_in_str(const char *input, char niddle); +size_t parse_string_array(PGconn* const conn, const char *input, char ***items_out); +size_t parse_char_array(PGconn* const conn, const char *input, char **items_out); +size_t parse_oid_array(PGconn* const conn, const char *input, Oid **items_out); extern void free_obj_list(ObjName *obj_list); extern ObjName *obj_list_append(ObjName *obj_list, const char *new_obj_name); diff --git a/src/common/interfaces/libpq/client_logic_common/cstrings_map.cpp b/src/common/interfaces/libpq/client_logic_common/cstrings_map.cpp index 1ec1fa422..b702f7525 100644 --- a/src/common/interfaces/libpq/client_logic_common/cstrings_map.cpp +++ b/src/common/interfaces/libpq/client_logic_common/cstrings_map.cpp @@ -68,7 +68,7 @@ void CStringsMap::set(const char *key, const char *value, size_t valsize) { Assert(key && value); size_t key_index = index(key); - if (valsize == 0) { + if (valsize == SIZE_MAX) { valsize = strlen(value); } if (valsize > MAX_VAL_LEN) { diff --git a/src/common/interfaces/libpq/client_logic_common/pg_client_logic_params.cpp b/src/common/interfaces/libpq/client_logic_common/pg_client_logic_params.cpp index 6a6d007ca..e88f9f132 100644 --- a/src/common/interfaces/libpq/client_logic_common/pg_client_logic_params.cpp +++ b/src/common/interfaces/libpq/client_logic_common/pg_client_logic_params.cpp @@ -21,95 +21,20 @@ * ------------------------------------------------------------------------- */ -#include "libpq-int.h" #include "client_logic_common/pg_client_logic_params.h" #include -PGClientLogicParams::PGClientLogicParams(const PGClientLogicParams &other) -{ - init(other); -} - -void PGClientLogicParams::init(const PGClientLogicParams &other) -{ - nParams = other.nParams; - new_param_values = NULL; - adjusted_query = other.adjusted_query; - adjusted_query_size = other.adjusted_query_size; - adjusted_paramTypes = NULL; - copy_sizes = NULL; - adjusted_param_lengths = NULL; - adjusted_param_values = NULL; - new_query = NULL; - new_query_size = other.new_query_size; - if (other.new_query != NULL && other.new_query_size > 0) { - new_query = (char *)calloc(new_query_size + 1, sizeof(char)); - if (new_query == NULL) { - printf("out of memory\n"); - exit(EXIT_FAILURE); - } - check_strncpy_s(strncpy_s(new_query, new_query_size + 1, other.new_query, other.new_query_size)); - new_query[new_query_size] = '\0'; - } - if (other.nParams && other.copy_sizes) { - copy_sizes = (size_t *)calloc(other.nParams, sizeof(size_t)); - if (copy_sizes == NULL) { - printf("out of memory\n"); - exit(EXIT_FAILURE); - } - check_memcpy_s( - memcpy_s(copy_sizes, nParams * sizeof(size_t), other.copy_sizes, other.nParams * sizeof(size_t))); - } - if (other.new_param_values) { - new_param_values = (unsigned char **)calloc(other.nParams, sizeof(unsigned char *)); - if (new_param_values == NULL) { - printf("out of memory\n"); - exit(EXIT_FAILURE); - } - for (size_t i = 0; i < other.nParams; ++i) { - if (copy_sizes != NULL && copy_sizes[i]) { - new_param_values[i] = (unsigned char *)calloc(other.copy_sizes[i], sizeof(unsigned char)); - if (new_param_values[i] == NULL) { - printf("out of memory\n"); - exit(EXIT_FAILURE); - } - check_memcpy_s(memcpy_s(new_param_values[i], copy_sizes[i] * sizeof(unsigned char), - other.new_param_values[i], other.copy_sizes[i])); - } else { - new_param_values[i] = NULL; - } - } - } - if (other.adjusted_paramTypes) { - adjusted_paramTypes = (Oid *)calloc(other.nParams, sizeof(Oid)); - if (adjusted_paramTypes == NULL) { - printf("out of memory\n"); - exit(EXIT_FAILURE); - } - check_memcpy_s(memcpy_s(adjusted_paramTypes, nParams * sizeof(Oid), other.adjusted_paramTypes, - other.nParams * sizeof(Oid))); - } - if (other.adjusted_param_lengths) { - adjusted_param_lengths = (int *)calloc(other.nParams, sizeof(int)); - if (adjusted_param_lengths == NULL) { - printf("out of memory\n"); - exit(EXIT_FAILURE); - } - check_memcpy_s(memcpy_s(adjusted_param_lengths, nParams * sizeof(int), other.adjusted_param_lengths, - other.nParams * sizeof(int))); - } -} - PGClientLogicParams::~PGClientLogicParams() { for (size_t i = 0; i < nParams; i++) { - libpq_free(new_param_values[i]); + if (copy_sizes && copy_sizes[i]) { + libpq_free(new_param_values[i]); + } } libpq_free(new_param_values); libpq_free(copy_sizes); libpq_free(adjusted_paramTypes); - libpq_free(adjusted_param_values); libpq_free(adjusted_param_lengths); libpq_free(new_query); -} \ No newline at end of file +} diff --git a/src/common/interfaces/libpq/client_logic_common/pg_client_logic_params.h b/src/common/interfaces/libpq/client_logic_common/pg_client_logic_params.h index 7e72076c4..6268cbdd4 100644 --- a/src/common/interfaces/libpq/client_logic_common/pg_client_logic_params.h +++ b/src/common/interfaces/libpq/client_logic_common/pg_client_logic_params.h @@ -37,8 +37,7 @@ typedef struct PGClientLogicParams { adjusted_param_values(NULL), adjusted_param_lengths(NULL), copy_sizes(NULL) {}; - PGClientLogicParams(const PGClientLogicParams &other); - void init(const PGClientLogicParams &other); + PGClientLogicParams(const PGClientLogicParams &other) = delete; ~PGClientLogicParams(); char *new_query; size_t new_query_size; diff --git a/src/common/interfaces/libpq/client_logic_data_fetcher/jni_conn_cursor.cpp b/src/common/interfaces/libpq/client_logic_data_fetcher/jni_conn_cursor.cpp index 76505db57..b74e8910f 100644 --- a/src/common/interfaces/libpq/client_logic_data_fetcher/jni_conn_cursor.cpp +++ b/src/common/interfaces/libpq/client_logic_data_fetcher/jni_conn_cursor.cpp @@ -23,7 +23,9 @@ #include "jni_conn_cursor.h" #include "libpq-int.h" +#ifndef ENABLE_LITE_MODE #include +#endif static const int DATA_INDEX = 2; diff --git a/src/common/interfaces/libpq/client_logic_data_fetcher/lib_pq_cursor.cpp b/src/common/interfaces/libpq/client_logic_data_fetcher/lib_pq_cursor.cpp index 30cd75b56..7ae94600c 100644 --- a/src/common/interfaces/libpq/client_logic_data_fetcher/lib_pq_cursor.cpp +++ b/src/common/interfaces/libpq/client_logic_data_fetcher/lib_pq_cursor.cpp @@ -24,6 +24,8 @@ #include "lib_pq_cursor.h" #include "stdio.h" #include "libpq/libpq-fe.h" +#include "libpq/libpq-int.h" + /* * * Constructor @@ -54,6 +56,11 @@ bool LibPQCursor::load(const char *query) { m_conn->client_logic->disable_once = true; m_data_handler = PQexec(m_conn, query); + if (!m_data_handler) { + printfPQExpBuffer(&m_conn->errorMessage, libpq_gettext("Client encryption cache query: '%s' failed\n"), query); + m_data_handler = NULL; + return false; + } /* check status */ if (PQresultStatus(m_data_handler) != PGRES_TUPLES_OK) { fprintf(stderr, "Client encryption cache query: '%s' failed with error : %d, '%s'\n", query, @@ -116,4 +123,4 @@ void LibPQCursor::clear_result() PQclear(m_data_handler); m_data_handler = NULL; } -} \ No newline at end of file +} diff --git a/src/common/interfaces/libpq/client_logic_fmt/biginteger.cpp b/src/common/interfaces/libpq/client_logic_fmt/biginteger.cpp deleted file mode 100644 index dae1d22de..000000000 --- a/src/common/interfaces/libpq/client_logic_fmt/biginteger.cpp +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * biginteger.cpp - * - * IDENTIFICATION - * src\common\interfaces\libpq\client_logic_fmt\biginteger.cpp - * - * ------------------------------------------------------------------------- - */ - -#include "biginteger.h" -#define MAXBI64LEN 25 -#define MAXBI128LEN 45 - -static const char *int64_min_str = "-9223372036854775808"; -static const char *int128_min_str = "-170141183460469231731687303715884105728"; -bool bi64_out(int64 data, int scale, char *buf) -{ - Assert(scale >= 0 && scale <= MAXINT64DIGIT); - uint64 val_u64 = 0; - - errno_t rc = EOK; - /* data == INT64_MIN */ - if (unlikely(data == (-INT64CONST(0x7FFFFFFFFFFFFFFF) - 1))) { - /* - * Avoid problems with the most negative integer not being representable - * as a positive integer. - */ - if (scale > 0) { - int len = strlen(int64_min_str) - scale; - rc = memcpy_s(buf, MAXBI64LEN, int64_min_str, len); - /* check the return value of security function */ - securec_check_c(rc, "\0", "\0"); - buf[len] = '.'; - rc = memcpy_s(buf + len + 1, MAXBI64LEN - len - 1, int64_min_str + len, scale + 1); - /* check the return value of security function */ - securec_check_c(rc, "\0", "\0"); - } else { - rc = memcpy_s(buf, MAXBI64LEN, int64_min_str, strlen(int64_min_str) + 1); - /* check the return value of security function */ - securec_check_c(rc, "\0", "\0"); - } - return true; - } - - int64 pre_val = 0; - int64 post_val = 0; - - /* data is positive */ - if (data >= 0) { - val_u64 = data; - pre_val = val_u64 / (uint64)get_scale_multiplier(scale); - post_val = val_u64 % (uint64)get_scale_multiplier(scale); - if (likely(scale > 0)) { - /* ignore preceding 0, eg: value is 0.1, output is '.1' */ - if (pre_val == 0 && post_val != 0) { - rc = sprintf_s(buf, MAXBI64LEN, ".%0*ld", scale, post_val); - } else { - rc = sprintf_s(buf, MAXBI64LEN, "%ld.%0*ld", pre_val, scale, post_val); - } - } else { - rc = sprintf_s(buf, MAXBI64LEN, "%ld", pre_val); - } - /* check the return value of security function */ - securec_check_ss_c(rc, "\0", "\0"); - } else { - /* data is negative */ - val_u64 = -data; - pre_val = val_u64 / (uint64)get_scale_multiplier(scale); - post_val = val_u64 % (uint64)get_scale_multiplier(scale); - if (likely(scale > 0)) { - /* ignore preceding 0, eg: value is -0.1, output is '-.1' */ - if (pre_val == 0 && post_val != 0) { - rc = sprintf_s(buf, MAXBI64LEN, "-.%0*ld", scale, post_val); - } else { - rc = sprintf_s(buf, MAXBI64LEN, "-%ld.%0*ld", pre_val, scale, post_val); - } - } else { - rc = sprintf_s(buf, MAXBI64LEN, "-%ld", pre_val); - } - /* check the return value of security function */ - securec_check_ss_c(rc, "\0", "\0"); - } - - return true; -} -/* - * @Description: print int128 data to string, because of current GCC doesn't provide - * solution to print int128 data, we provide this function. - * - * @IN preceding_zero: mark whether print preceding zero or not - * @IN data: int128 data - * @OUT str: the output string buffer - * @IN len: the length of string buffer - * @IN scale:when preceding_zero is true, scale is the standard output width size - * @Return: print succeed or not - */ -template static int int128_to_string(int128 data, char *str, int len, int scale) -{ - Assert(data >= 0); - Assert(scale >= 0 && scale <= MAXINT128DIGIT); - - errno_t rc = EOK; - /* turn to int64 */ - if (INT128_INT64_EQ(data)) { - if (preceding_zero) { - rc = sprintf_s(str, len, "%0*ld", scale, (int64)data); - } else { - rc = sprintf_s(str, len, "%ld", (int64)data); - } - securec_check_ss_c(rc, "\0", "\0"); - return rc; - } - - /* get the absolute value of data, it's useful for sprintf */ - int128 num = data; - int64 leading = 0; - int64 trailing = 0; - trailing = num % P10_INT64; - num = num / P10_INT64; - /* two int64 num can represent the int128 data */ - if (INT128_INT64_EQ(num)) { - leading = (int64)num; - if (preceding_zero) { - const int trailing_digits = 18; - Assert(scale > trailing_digits); - rc = sprintf_s(str, len, "%0*ld%018ld", scale - trailing_digits, leading, trailing); - } else { - rc = sprintf_s(str, len, "%ld%018ld", leading, trailing); - } - securec_check_ss_c(rc, "\0", "\0"); - return rc; - } - - /* two int64 num can't represent int128data, use 3 int64 numbers */ - int64 middle = num % P10_INT64; - num = num / P10_INT64; - leading = (int64)num; - /* both the middle and trailing digits have 18 digits */ - if (preceding_zero) { - const int middle_trailing_digits = 36; - Assert(scale > middle_trailing_digits); - rc = sprintf_s(str, len, "%0*ld%018ld%018ld", scale - middle_trailing_digits, leading, middle, trailing); - } else { - rc = sprintf_s(str, len, "%ld%018ld%018ld", leading, middle, trailing); - } - securec_check_ss_c(rc, "\0", "\0"); - return rc; -} - -/* - * @Description: print bi128 data to string like numeric_out. - * - * @IN data: int128 data - * @IN scale: the scale of bi128 data - * @Return: Output string for numeric data type - */ -bool bi128_out(int128 data, int scale, char *buf) -{ - Assert(scale >= 0 && scale <= MAXINT128DIGIT); - - /* data == INT128_MIN */ - if (unlikely(data == INT128_MIN)) { - errno_t rc = EOK; - /* - * Avoid problems with the most negative integer not being representable - * as a positive integer. - */ - if (scale > 0) { - int len = strlen(int128_min_str) - scale; - rc = memcpy_s(buf, MAXBI128LEN, int128_min_str, len); - /* check the return value of security function */ - securec_check_c(rc, "\0", "\0"); - buf[len] = '.'; - rc = memcpy_s(buf + len + 1, MAXBI128LEN - len - 1, int128_min_str + len, scale + 1); - /* check the return value of security function */ - securec_check_c(rc, "\0", "\0"); - } else { - rc = memcpy_s(buf, MAXBI128LEN, int128_min_str, strlen(int128_min_str) + 1); - /* check the return value of security function */ - securec_check_c(rc, "\0", "\0"); - } - return true; - } - - int128 pre_val = 0; - int128 post_val = 0; - - /* data is positive */ - if (data >= 0) { - pre_val = data / get_scale_multiplier(scale); - post_val = data % get_scale_multiplier(scale); - if (likely(scale > 0)) { - if (pre_val == 0 && post_val != 0) { - buf[0] = '.'; - int128_to_string(post_val, buf + 1, MAXBI128LEN - 1, scale); - } else { - int128_to_string(pre_val, buf, MAXBI128LEN, 0); - int len = strlen(buf); - buf[len] = '.'; - int128_to_string(post_val, buf + len + 1, MAXBI128LEN - len - 1, scale); - } - } else { - int128_to_string(pre_val, buf, MAXBI128LEN, 0); - } - } else { /* data is negative */ - data = -data; - pre_val = data / get_scale_multiplier(scale); - post_val = data % get_scale_multiplier(scale); - buf[0] = '-'; - if (likely(scale > 0)) { - if (pre_val == 0 && post_val != 0) { - buf[1] = '.'; - int128_to_string(post_val, buf + 2, MAXBI128LEN - 2, scale); /* 2 is buf[0] and buf[1] */ - } else { - int128_to_string(pre_val, buf + 1, MAXBI128LEN - 1, 0); - int len = strlen(buf); - buf[len] = '.'; - int128_to_string(post_val, buf + len + 1, MAXBI128LEN - len - 1, scale); - } - } else { - int128_to_string(pre_val, buf + 1, MAXBI128LEN - 1, 0); - } - } - return true; -} diff --git a/src/common/interfaces/libpq/client_logic_fmt/biginteger.h b/src/common/interfaces/libpq/client_logic_fmt/biginteger.h deleted file mode 100644 index 395091c70..000000000 --- a/src/common/interfaces/libpq/client_logic_fmt/biginteger.h +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * biginteger.h - * - * IDENTIFICATION - * src\common\interfaces\libpq\client_logic_fmt\biginteger.h - * - * ------------------------------------------------------------------------- - */ - -#ifndef BIGINTEGER_H -#define BIGINTEGER_H - -#include "postgres_fe.h" -#define MAXINT64DIGIT 19 -#define MAXINT128DIGIT 38 -#define INT128_INT64_EQ(data) ((data) == (int128)((int64)(data))) -#define P10_INT64 1000000000000000000LL /* 18 zeroes */ - -inline int128 get_scale_multiplier(int scale) -{ - Assert(scale >= 0 && scale <= MAXINT128DIGIT); - static const int128 values[] = { - static_cast(1LL), - static_cast(10LL), - static_cast(100LL), - static_cast(1000LL), - static_cast(10000LL), - static_cast(100000LL), - static_cast(1000000LL), - static_cast(10000000LL), - static_cast(100000000LL), - static_cast(1000000000LL), - static_cast(10000000000LL), - static_cast(100000000000LL), - static_cast(1000000000000LL), - static_cast(10000000000000LL), - static_cast(100000000000000LL), - static_cast(1000000000000000LL), - static_cast(10000000000000000LL), - static_cast(100000000000000000LL), - static_cast(1000000000000000000LL), - static_cast(1000000000000000000LL) * 10LL, - static_cast(1000000000000000000LL) * 100LL, - static_cast(1000000000000000000LL) * 1000LL, - static_cast(1000000000000000000LL) * 10000LL, - static_cast(1000000000000000000LL) * 100000LL, - static_cast(1000000000000000000LL) * 1000000LL, - static_cast(1000000000000000000LL) * 10000000LL, - static_cast(1000000000000000000LL) * 100000000LL, - static_cast(1000000000000000000LL) * 1000000000LL, - static_cast(1000000000000000000LL) * 10000000000LL, - static_cast(1000000000000000000LL) * 100000000000LL, - static_cast(1000000000000000000LL) * 1000000000000LL, - static_cast(1000000000000000000LL) * 10000000000000LL, - static_cast(1000000000000000000LL) * 100000000000000LL, - static_cast(1000000000000000000LL) * 1000000000000000LL, - static_cast(1000000000000000000LL) * 10000000000000000LL, - static_cast(1000000000000000000LL) * 100000000000000000LL, - static_cast(1000000000000000000LL) * 100000000000000000LL * 10LL, - static_cast(1000000000000000000LL) * 100000000000000000LL * 100LL, - static_cast(1000000000000000000LL) * 100000000000000000LL * 1000LL - }; - return values[scale]; -} - - -bool bi64_out(int64 data, int scale, char *buf); -bool bi128_out(int128 data, int scale, char *buf); - -#endif \ No newline at end of file diff --git a/src/common/interfaces/libpq/client_logic_fmt/float.cpp b/src/common/interfaces/libpq/client_logic_fmt/float.cpp index bfde39fdb..0265c9d2e 100644 --- a/src/common/interfaces/libpq/client_logic_fmt/float.cpp +++ b/src/common/interfaces/libpq/client_logic_fmt/float.cpp @@ -349,9 +349,9 @@ bool float4toa(float4 num, char *ascii) * Delete 0 before decimal. * For Example: convert 0.123 to .123, or -0.123 to -.123 */ - if (num > 0 && num < 1) { + if (num > 0 && num < 1 && ascii[0] == '0') { check_memmove_s(memmove_s(ascii, MAXFLOATWIDTH + 1, ascii + 1, MAXFLOATWIDTH)); - } else if (num > -1 && num < 0) { + } else if (num > -1 && num < 0 && ascii[1] == '0') { check_memmove_s(memmove_s(ascii + 1, MAXFLOATWIDTH, ascii + 2, MAXFLOATWIDTH - 1)); } return true; diff --git a/src/common/interfaces/libpq/client_logic_fmt/gs_bool.cpp b/src/common/interfaces/libpq/client_logic_fmt/gs_bool.cpp index 83eab470a..a5a79cb63 100644 --- a/src/common/interfaces/libpq/client_logic_fmt/gs_bool.cpp +++ b/src/common/interfaces/libpq/client_logic_fmt/gs_bool.cpp @@ -26,8 +26,7 @@ bool scan_bool(const char *bool_val, bool *res, const char *err_msg); bool parse_bool_with_len(const char *value, size_t len, bool *result); -unsigned char *bool_bin(const char *text, const Oid typelem, const int atttypmod, size_t *binary_size, - const char *err_msg) +unsigned char *bool_bin(const char *text, size_t *binary_size, const char *err_msg) { unsigned char *binary = (unsigned char *)malloc(sizeof(bool)); if (binary == NULL) { @@ -41,7 +40,7 @@ unsigned char *bool_bin(const char *text, const Oid typelem, const int atttypmod return binary; } -char *bool_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size) +char *bool_bout(const unsigned char *binary, size_t size, size_t *result_size) { Assert(size == sizeof(bool)); const size_t text_size = 2; diff --git a/src/common/interfaces/libpq/client_logic_fmt/gs_bool.h b/src/common/interfaces/libpq/client_logic_fmt/gs_bool.h index 1c31e5f81..57863a58d 100644 --- a/src/common/interfaces/libpq/client_logic_fmt/gs_bool.h +++ b/src/common/interfaces/libpq/client_logic_fmt/gs_bool.h @@ -27,11 +27,8 @@ #include "postgres_fe.h" #include -unsigned char *bool_bin(const char *text, const Oid typelem, const int atttypmod, size_t *binary_size, +unsigned char *bool_bin(const char *text, size_t *binary_size, const char *err_msg); -char *bool_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size); -unsigned char *int1_badjust(unsigned char *binary, Oid typelem, int atttypmod, char *err_msg); -bool int1_brestore(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, unsigned char *res, - char *err_msg); +char *bool_bout(const unsigned char *binary, size_t size, size_t *result_size); #endif \ No newline at end of file diff --git a/src/common/interfaces/libpq/client_logic_fmt/gs_char.cpp b/src/common/interfaces/libpq/client_logic_fmt/gs_char.cpp index b750d3b27..a17fff563 100644 --- a/src/common/interfaces/libpq/client_logic_fmt/gs_char.cpp +++ b/src/common/interfaces/libpq/client_logic_fmt/gs_char.cpp @@ -33,7 +33,7 @@ int pg_mbcharcliplen_orig(const char *mbstr, int len, int limit); * char_bin - * converts char style to binary array */ -unsigned char *char_bin(const char *text, Oid typelem, int atttypmod, size_t *binary_size, const char *err_msg) +unsigned char *char_bin(const char *text, size_t *binary_size, const char *err_msg) { unsigned char *binary = (unsigned char *)malloc(1); if (binary == NULL) { @@ -48,7 +48,7 @@ unsigned char *char_bin(const char *text, Oid typelem, int atttypmod, size_t *bi * char_bout - * converts binary array to char style */ -char *char_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size) +char *char_bout(const unsigned char *binary, size_t size, size_t *result_size) { if (size != 1) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported result size: %zu", size))); @@ -67,7 +67,7 @@ char *char_bout(const unsigned char *binary, size_t size, Oid typelem, int attty * bytea_bin - * converts hex format/escaped style byte array to binary array */ -unsigned char *bytea_bin(const char *text, Oid typelem, int atttypmod, size_t *binary_size, char *err_msg) +unsigned char *bytea_bin(const char *text, size_t *binary_size, char *err_msg) { return byteain(text, binary_size, err_msg); } @@ -76,12 +76,12 @@ unsigned char *bytea_bin(const char *text, Oid typelem, int atttypmod, size_t *b * bytea_bout - * converts binary array to hex format/escaped style byte array */ -char *bytea_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size) +char *bytea_bout(const unsigned char *binary, size_t size, size_t *result_size) { return byteaout(binary, size, result_size); } -unsigned char *fallback_bin(const char *text, Oid typelem, int atttypmod, size_t *binary_size) +unsigned char *fallback_bin(const char *text, size_t *binary_size) { size_t text_size = strlen(text); unsigned char *binary = (unsigned char *)malloc(text_size + 1); @@ -96,7 +96,7 @@ unsigned char *fallback_bin(const char *text, Oid typelem, int atttypmod, size_t return binary; } -char *fallback_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size) +char *fallback_bout(const unsigned char *binary, size_t size, size_t *result_size) { char *text = (char *)malloc(size + 1); if (text == NULL) { @@ -108,8 +108,7 @@ char *fallback_bout(const unsigned char *binary, size_t size, Oid typelem, int a return text; } -unsigned char *fallback_brestore(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, - size_t *result_size, const char *err_msg) +unsigned char *fallback_brestore(const unsigned char *binary, size_t size, size_t *result_size, const char *err_msg) { unsigned char *result = (unsigned char *)malloc(size + 1); if (result == NULL) { @@ -121,7 +120,7 @@ unsigned char *fallback_brestore(const unsigned char *binary, size_t size, Oid t return result; } -unsigned char *varchar_badjust(unsigned char *binary, size_t *binary_size, Oid typelem, int atttypmod, char *err_msg) +unsigned char *varchar_badjust(unsigned char *binary, size_t *binary_size, int atttypmod, char *err_msg) { size_t maxlen; size_t len = *binary_size; @@ -149,7 +148,7 @@ unsigned char *varchar_badjust(unsigned char *binary, size_t *binary_size, Oid t return binary; } -unsigned char *nvarchar2_badjust(unsigned char *binary, size_t *binary_size, Oid typelem, int atttypmod, char *err_msg) +unsigned char *nvarchar2_badjust(unsigned char *binary, size_t *binary_size, int atttypmod, char *err_msg) { size_t maxlen; size_t len = *binary_size; @@ -177,7 +176,7 @@ unsigned char *nvarchar2_badjust(unsigned char *binary, size_t *binary_size, Oid return binary; } -unsigned char *bpchar_badjust(unsigned char *binary, size_t *binary_size, Oid typelem, int atttypmod, char *err_msg) +unsigned char *bpchar_badjust(unsigned char *binary, size_t *binary_size, int atttypmod, char *err_msg) { size_t maxlen; size_t len = *binary_size; diff --git a/src/common/interfaces/libpq/client_logic_fmt/gs_char.h b/src/common/interfaces/libpq/client_logic_fmt/gs_char.h index 913d86147..6bbf78cc9 100644 --- a/src/common/interfaces/libpq/client_logic_fmt/gs_char.h +++ b/src/common/interfaces/libpq/client_logic_fmt/gs_char.h @@ -27,16 +27,15 @@ #include #include "postgres_fe.h" -unsigned char *char_bin(const char *text, Oid typelem, int atttypmod, size_t *binary_size, const char *err_msg); -char *char_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size); -unsigned char *bytea_bin(const char *text, Oid typelem, int atttypmod, size_t *binary_size, char *err_msg); -char *bytea_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size); -unsigned char *fallback_bin(const char *text, Oid typelem, int atttypmod, size_t *binary_size); -char *fallback_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size); -unsigned char *fallback_brestore(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, - size_t *result_size, const char *err_msg); -unsigned char *varchar_badjust(unsigned char *binary, size_t *binary_size, Oid typelem, int atttypmod, char *err_msg); -unsigned char *nvarchar2_badjust(unsigned char *binary, size_t *binary_size, Oid typelem, int atttypmod, char *err_msg); -unsigned char *bpchar_badjust(unsigned char *binary, size_t *binary_size, Oid typelem, int atttypmod, char *err_msg); +unsigned char *char_bin(const char *text, size_t *binary_size, const char *err_msg); +char *char_bout(const unsigned char *binary, size_t size, size_t *result_size); +unsigned char *bytea_bin(const char *text, size_t *binary_size, char *err_msg); +char *bytea_bout(const unsigned char *binary, size_t size, size_t *result_size); +unsigned char *fallback_bin(const char *text, size_t *binary_size); +char *fallback_bout(const unsigned char *binary, size_t size, size_t *result_size); +unsigned char *fallback_brestore(const unsigned char *binary, size_t size, size_t *result_size, const char *err_msg); +unsigned char *varchar_badjust(unsigned char *binary, size_t *binary_size, int atttypmod, char *err_msg); +unsigned char *nvarchar2_badjust(unsigned char *binary, size_t *binary_size, int atttypmod, char *err_msg); +unsigned char *bpchar_badjust(unsigned char *binary, size_t *binary_size, int atttypmod, char *err_msg); #endif \ No newline at end of file diff --git a/src/common/interfaces/libpq/client_logic_fmt/gs_copy.cpp b/src/common/interfaces/libpq/client_logic_fmt/gs_copy.cpp index 46be5b00b..1da9ed987 100644 --- a/src/common/interfaces/libpq/client_logic_fmt/gs_copy.cpp +++ b/src/common/interfaces/libpq/client_logic_fmt/gs_copy.cpp @@ -46,6 +46,10 @@ #define OCTVALUE(c) ((c) - '0') #define INTEGER_SIZE 64 +#ifdef ENABLE_UT +#define static +#endif + /* non-export function prototypes */ static CopyStateData *begin_copy(bool is_from, bool is_rel, Node *raw_query, const char *query_string, List *attnamelist, List *options); @@ -114,11 +118,16 @@ int deprocess_copy_line(PGconn *conn, const char *in_buffer, int msg_length, cha msg_length = remove_line_end(cstate, in_buffer, msg_length); + int decrypted_msg_length = 0; if (cstate->csv_mode) { - return deprocess_csv_line(conn, entry, in_buffer, msg_length, buffer); + decrypted_msg_length = deprocess_csv_line(conn, entry, in_buffer, msg_length, buffer); } else { - return deprocess_txt_line(conn, entry, in_buffer, msg_length, buffer); + decrypted_msg_length = deprocess_txt_line(conn, entry, in_buffer, msg_length, buffer); } + if (decrypted_msg_length < 0) { + libpq_free(*buffer); + } + return decrypted_msg_length; } } @@ -177,6 +186,9 @@ int process_copy_chunk(PGconn *conn, const char *in_buffer, int msg_length, char } else { ret = process_txt_chunk(conn, entry, in_buffer, msg_length, buffer); } + if (ret < 0) { + libpq_free(*buffer); + } if (full_chunk != NULL) { free(full_chunk); } @@ -392,9 +404,7 @@ static int deprocess_txt_line(PGconn *conn, PreparedStatement *entry, const char /* Make sure there is enough space for the next value */ if (cstate->fieldno >= (int)entry->original_data_types_oids_size) { free(data); - free(*buffer); data = NULL; - *buffer = NULL; return -1; } @@ -540,9 +550,7 @@ static int deprocess_txt_line(PGconn *conn, PreparedStatement *entry, const char } else { if (!deprocess_and_replace(conn, entry, data, data_size, false, buffer, written, res_length)) { free(data); - free(*buffer); data = NULL; - *buffer = NULL; return -1; } if (!append_buffer(buffer, res_length, written, end_ptr, cur_ptr - end_ptr)) { @@ -580,6 +588,7 @@ static int deprocess_txt_line(PGconn *conn, PreparedStatement *entry, const char static int process_txt_chunk(PGconn *conn, PreparedStatement *entry, const char *in_buffer, int msg_length, char **buffer) { + *buffer = NULL; if (!entry) { return -1; } @@ -592,7 +601,6 @@ static int process_txt_chunk(PGconn *conn, PreparedStatement *entry, const char } size_t data_size = 0; - *buffer = NULL; CopyStateData *cstate = entry->copy_state; char delimc = cstate->delim; @@ -788,9 +796,7 @@ static int process_txt_chunk(PGconn *conn, PreparedStatement *entry, const char /* process */ if (!process_and_replace(conn, entry, data, data_size, false, buffer, written, res_length)) { free(data); - free(*buffer); data = NULL; - *buffer = NULL; return -1; } if (!append_buffer(buffer, res_length, written, end_ptr, cur_ptr - end_ptr)) { @@ -897,9 +903,7 @@ static int deprocess_csv_line(PGconn *conn, PreparedStatement *entry, const char /* Make sure there is enough space for the next value */ if (cstate->fieldno >= (int)entry->original_data_types_oids_size) { free(data); - free(*buffer); data = NULL; - *buffer = NULL; return -1; } @@ -954,9 +958,7 @@ static int deprocess_csv_line(PGconn *conn, PreparedStatement *entry, const char end_ptr = cur_ptr; if (cur_ptr >= line_end_ptr) { free(data); - free(*buffer); data = NULL; - *buffer = NULL; return -1; } @@ -1027,9 +1029,7 @@ static int deprocess_csv_line(PGconn *conn, PreparedStatement *entry, const char } else { if (!deprocess_and_replace(conn, entry, data, data_size, saw_quote, buffer, written, res_length)) { free(data); - free(*buffer); data = NULL; - *buffer = NULL; return -1; } if (!append_buffer(buffer, res_length, written, end_ptr, cur_ptr - end_ptr)) { @@ -1167,10 +1167,8 @@ int process_csv_chunk(PGconn *conn, PreparedStatement *entry, const char *in_buf end_ptr = cur_ptr; if (cur_ptr >= line_end_ptr) { free(data); - free(*buffer); data = NULL; - *buffer = NULL; - return 0; + return -1; } c = *cur_ptr++; @@ -1236,15 +1234,13 @@ int process_csv_chunk(PGconn *conn, PreparedStatement *entry, const char *in_buf if (!append_buffer(buffer, res_length, written, start_ptr, cur_ptr - start_ptr)) { free(data); data = NULL; - return 0; + return -1; } } else if (found_delim || found_eol || (end_ptr == line_end_ptr)) { /* process */ if (!process_and_replace(conn, entry, data, data_size, false, buffer, written, res_length)) { free(data); - free(*buffer); data = NULL; - *buffer = NULL; return -1; } if (!append_buffer(buffer, res_length, written, end_ptr, cur_ptr - end_ptr)) { diff --git a/src/common/interfaces/libpq/client_logic_fmt/gs_fmt.cpp b/src/common/interfaces/libpq/client_logic_fmt/gs_fmt.cpp index 6153eed6c..93782e7af 100644 --- a/src/common/interfaces/libpq/client_logic_fmt/gs_fmt.cpp +++ b/src/common/interfaces/libpq/client_logic_fmt/gs_fmt.cpp @@ -31,20 +31,20 @@ * type_char_bin - * converts char style to binary array(unigned char) */ -unsigned char *Format::type_char_bin(const char *text, Oid type, Oid typelem, int atttypmod, size_t *binary_size, +unsigned char *Format::type_char_bin(const char *text, Oid type, int atttypmod, size_t *binary_size, char *err_msg) { - unsigned char *binary = fallback_bin(text, typelem, atttypmod, binary_size); + unsigned char *binary = fallback_bin(text, binary_size); if (binary != NULL) { switch (type) { case VARCHAROID: { - return varchar_badjust(binary, binary_size, typelem, atttypmod, err_msg); + return varchar_badjust(binary, binary_size, atttypmod, err_msg); } case NVARCHAR2OID: { - return nvarchar2_badjust(binary, binary_size, typelem, atttypmod, err_msg); + return nvarchar2_badjust(binary, binary_size, atttypmod, err_msg); } case BPCHAROID: { - return bpchar_badjust(binary, binary_size, typelem, atttypmod, err_msg); + return bpchar_badjust(binary, binary_size, atttypmod, err_msg); } default: break; @@ -57,7 +57,7 @@ unsigned char *Format::type_char_bin(const char *text, Oid type, Oid typelem, in * text_to_binary - * converts a assic string to a binary array */ -unsigned char *Format::text_to_binary(const PGconn* conn, const char *text, Oid type, Oid typelem, +unsigned char *Format::text_to_binary(const PGconn* conn, const char *text, Oid type, int atttypmod, size_t *binary_size, char *err_msg) { if (!text || !binary_size) { @@ -66,42 +66,42 @@ unsigned char *Format::text_to_binary(const PGconn* conn, const char *text, Oid switch (type) { case BYTEAOID: { - return bytea_bin(text, typelem, atttypmod, binary_size, err_msg); + return bytea_bin(text, binary_size, err_msg); } case CHAROID: { - return char_bin(text, typelem, atttypmod, binary_size, err_msg); + return char_bin(text, binary_size, err_msg); } case INT8OID: { - return int8_bin(conn, text, typelem, atttypmod, binary_size, err_msg); + return int8_bin(conn, text, binary_size, err_msg); } case INT2OID: { - return int2_bin(conn, text, typelem, atttypmod, binary_size, err_msg); + return int2_bin(conn, text, binary_size, err_msg); } case INT1OID: { - return int1_bin(conn, text, typelem, atttypmod, binary_size, err_msg); + return int1_bin(conn, text, binary_size, err_msg); } case INT4OID: { - return int4_bin(conn, text, typelem, atttypmod, binary_size, err_msg); + return int4_bin(conn, text, binary_size, err_msg); } case FLOAT4OID: { - return float4_bin(text, typelem, atttypmod, binary_size, err_msg); + return float4_bin(text, binary_size, err_msg); } case FLOAT8OID: { - return float8_bin(text, typelem, atttypmod, binary_size, err_msg); + return float8_bin(text, binary_size, err_msg); } case NUMERICOID: { - return numeric_bin(conn, text, typelem, atttypmod, binary_size, err_msg); + return numeric_bin(conn, text, atttypmod, binary_size, err_msg); } case VARCHAROID: case NVARCHAR2OID: case BPCHAROID: { - return type_char_bin(text, type, typelem, atttypmod, binary_size, err_msg); + return type_char_bin(text, type, atttypmod, binary_size, err_msg); } case BOOLOID: { - return bool_bin(text, typelem, atttypmod, binary_size, err_msg); + return bool_bin(text, binary_size, err_msg); } default: { - return fallback_bin(text, typelem, atttypmod, binary_size); + return fallback_bin(text, binary_size); } } return NULL; @@ -111,45 +111,44 @@ unsigned char *Format::text_to_binary(const PGconn* conn, const char *text, Oid * binary_to_text - * converts a binary array to a assic string */ -char *Format::binary_to_text(const unsigned char *binary, size_t length, Oid type, Oid typelem, int atttypmod, - size_t *result_size) +char *Format::binary_to_text(const unsigned char *binary, size_t length, Oid type, size_t *result_size) { if (binary == NULL || result_size == NULL) { return NULL; } switch (type) { case BYTEAOID: { - return bytea_bout(binary, length, typelem, atttypmod, result_size); + return bytea_bout(binary, length, result_size); } case CHAROID: { - return char_bout(binary, length, typelem, atttypmod, result_size); + return char_bout(binary, length, result_size); } case INT8OID: { - return int8_bout(binary, length, typelem, atttypmod, result_size); + return int8_bout(binary, length, result_size); } case INT2OID: { - return int2_bout(binary, length, typelem, atttypmod, result_size); + return int2_bout(binary, length, result_size); } case INT1OID: { - return int1_bout(binary, length, typelem, atttypmod, result_size); + return int1_bout(binary, length, result_size); } case INT4OID: { - return int4_bout(binary, length, typelem, atttypmod, result_size); + return int4_bout(binary, length, result_size); } case FLOAT4OID: { - return float4_bout(binary, length, typelem, atttypmod, result_size); + return float4_bout(binary, length, result_size); } case FLOAT8OID: { - return float8_bout(binary, length, typelem, atttypmod, result_size); + return float8_bout(binary, length, result_size); } case NUMERICOID: { - return numeric_bout(binary, length, typelem, atttypmod, result_size); + return numeric_bout(binary, length, result_size); } case BOOLOID: { - return bool_bout(binary, length, typelem, atttypmod, result_size); + return bool_bout(binary, length, result_size); } default: { - return fallback_bout(binary, length, typelem, atttypmod, result_size); + return fallback_bout(binary, length, result_size); } } } @@ -160,36 +159,38 @@ char *Format::binary_to_text(const unsigned char *binary, size_t length, Oid typ * realloc the memory for data, and because how the data is saved in disk and * how the client/application expects to retrieve it are different, change the endianess */ -unsigned char *Format::verify_and_adjust_binary(unsigned char *binary, size_t *binary_size, Oid type, Oid typelem, +unsigned char *Format::verify_and_adjust_binary(unsigned char *binary, size_t *binary_size, Oid type, int atttypmod, char *err_msg) { if (!binary || !binary_size) { return NULL; } switch (type) { + case FLOAT8OID: case INT8OID: { - return int8_badjust(binary, binary_size, typelem, atttypmod, err_msg); + return int8_badjust(binary, binary_size, err_msg); } case INT2OID: { - return int2_badjust(binary, binary_size, typelem, atttypmod, err_msg); + return int2_badjust(binary, binary_size, err_msg); } case INT1OID: { - return int1_badjust(binary, binary_size, typelem, atttypmod, err_msg); + return int1_badjust(binary, binary_size, err_msg); } + case FLOAT4OID: case INT4OID: { - return int4_badjust(binary, binary_size, typelem, atttypmod, err_msg); + return int4_badjust(binary, binary_size, err_msg); } case VARCHAROID: { - return varchar_badjust(binary, binary_size, typelem, atttypmod, err_msg); + return varchar_badjust(binary, binary_size, atttypmod, err_msg); } case NVARCHAR2OID: { - return nvarchar2_badjust(binary, binary_size, typelem, atttypmod, err_msg); + return nvarchar2_badjust(binary, binary_size, atttypmod, err_msg); } case BPCHAROID: { - return bpchar_badjust(binary, binary_size, typelem, atttypmod, err_msg); + return bpchar_badjust(binary, binary_size, atttypmod, err_msg); } case NUMERICOID: { - return numeric_badjust(binary, binary_size, typelem, atttypmod, err_msg); + return numeric_badjust(binary, binary_size, atttypmod, err_msg); } default: { return binary; @@ -202,24 +203,26 @@ unsigned char *Format::verify_and_adjust_binary(unsigned char *binary, size_t *b * restore_binary - * restore binary array, change the endianess */ -unsigned char *Format::restore_binary(const unsigned char *binary, size_t size, Oid type, Oid typelem, int atttypmod, - size_t *result_size, const char *err_msg) +unsigned char *Format::restore_binary(const unsigned char *binary, size_t size, Oid type, size_t *result_size, + const char *err_msg) { switch (type) { + case FLOAT8OID: case INT8OID: { - return int8_brestore(binary, size, typelem, atttypmod, result_size, err_msg); + return int8_brestore(binary, size, result_size, err_msg); } case INT2OID: { - return int2_brestore(binary, size, typelem, atttypmod, result_size, err_msg); + return int2_brestore(binary, size, result_size, err_msg); } case INT1OID: { - return int1_brestore(binary, size, typelem, atttypmod, result_size, err_msg); + return int1_brestore(binary, size, result_size, err_msg); } + case FLOAT4OID: case INT4OID: { - return int4_brestore(binary, size, typelem, atttypmod, result_size, err_msg); + return int4_brestore(binary, size, result_size, err_msg); } default: { - return fallback_brestore(binary, size, typelem, atttypmod, result_size, err_msg); + return fallback_brestore(binary, size, result_size, err_msg); } } return NULL; diff --git a/src/common/interfaces/libpq/client_logic_fmt/gs_fmt.h b/src/common/interfaces/libpq/client_logic_fmt/gs_fmt.h index 929306811..f5034038f 100644 --- a/src/common/interfaces/libpq/client_logic_fmt/gs_fmt.h +++ b/src/common/interfaces/libpq/client_logic_fmt/gs_fmt.h @@ -31,17 +31,16 @@ typedef struct pg_conn PGconn; class Format { public: - static unsigned char *text_to_binary(const PGconn* conn, const char *text, Oid type, Oid typelem, int atttypmod, + static unsigned char *text_to_binary(const PGconn* conn, const char *text, Oid type, int atttypmod, size_t *binary_size, char *err_msg); - static char *binary_to_text(const unsigned char *binary, size_t size, Oid type, Oid typelem, int atttypmod, - size_t *result_size); - static unsigned char *verify_and_adjust_binary(unsigned char *binary, size_t *binary_size, Oid type, Oid typelem, + static char *binary_to_text(const unsigned char *binary, size_t size, Oid type, size_t *result_size); + static unsigned char *verify_and_adjust_binary(unsigned char *binary, size_t *binary_size, Oid type, int atttypmod, char *err_msg); - static unsigned char *restore_binary(const unsigned char *binary, size_t size, Oid type, Oid typelem, int atttypmod, - size_t *binary_size, const char *err_msg); + static unsigned char *restore_binary(const unsigned char *binary, size_t size, Oid type, size_t *binary_size, + const char *err_msg); private: - static unsigned char *type_char_bin(const char *text, Oid type, Oid typelem, int atttypmod, size_t *binary_size, + static unsigned char *type_char_bin(const char *text, Oid type, int atttypmod, size_t *binary_size, char *err_msg); }; diff --git a/src/common/interfaces/libpq/client_logic_fmt/gs_num.cpp b/src/common/interfaces/libpq/client_logic_fmt/gs_num.cpp index ebde361f7..88a5d0c9d 100644 --- a/src/common/interfaces/libpq/client_logic_fmt/gs_num.cpp +++ b/src/common/interfaces/libpq/client_logic_fmt/gs_num.cpp @@ -45,8 +45,7 @@ * int1_bin - * convert string to integer */ -unsigned char *int1_bin(const PGconn* conn, const char *text, Oid typelem, int atttypmod, - size_t *binary_size, char *err_msg) +unsigned char *int1_bin(const PGconn* conn, const char *text, size_t *binary_size, char *err_msg) { unsigned char *binary = (unsigned char *)malloc(sizeof(int64)); if (binary == NULL) { @@ -67,7 +66,7 @@ unsigned char *int1_bin(const PGconn* conn, const char *text, Oid typelem, int a * int1_bout - * converts a unsigned 8-bit integer to its string representation */ -char *int1_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size) +char *int1_bout(const unsigned char *binary, size_t size, size_t *result_size) { if (size != sizeof(int64)) { ereport_null(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported result size: %zu", size))); @@ -84,7 +83,7 @@ char *int1_bout(const unsigned char *binary, size_t size, Oid typelem, int attty return text; } -unsigned char *int1_badjust(unsigned char *binary, size_t *binary_size, Oid typelem, int atttypmod, const char *err_msg) +unsigned char *int1_badjust(unsigned char *binary, size_t *binary_size, const char *err_msg) { binary = (unsigned char *)libpq_realloc(binary, *binary_size, sizeof(int64)); if (binary == NULL) { @@ -97,19 +96,15 @@ unsigned char *int1_badjust(unsigned char *binary, size_t *binary_size, Oid type return binary; } -unsigned char *int1_brestore(const unsigned char *binary_in, size_t size, Oid typelem, int atttypmod, - size_t *result_size, const char *err_msg) +unsigned char *int1_brestore(const unsigned char *binary_in, size_t size, size_t *result_size, const char *err_msg) { if (size != sizeof(int64)) { ereport_null(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported result size: %zu", size))); } - unsigned char *result = (unsigned char *)malloc(sizeof(int8)); + unsigned char *result = (unsigned char *)calloc(1, sizeof(int8)); if (result == NULL) { return NULL; } - errno_t rc = EOK; - rc = memset_s(result, sizeof(int8), 0, sizeof(int8)); - securec_check_c(rc, "\0", "\0"); *(int8 *)result = *(const int8 *)binary_in; *result_size = sizeof(int8); return result; @@ -119,7 +114,7 @@ unsigned char *int1_brestore(const unsigned char *binary_in, size_t size, Oid ty * int2_bin - * Convert input string to a signed 16 bit integer. */ -unsigned char *int2_bin(const PGconn *conn, const char *text, Oid typelem, int atttypmod, size_t *binary_size, +unsigned char *int2_bin(const PGconn *conn, const char *text, size_t *binary_size, char *err_msg) { unsigned char *binary = (unsigned char *)malloc(sizeof(int64)); @@ -141,7 +136,7 @@ unsigned char *int2_bin(const PGconn *conn, const char *text, Oid typelem, int a * int2_bin - * converts a signed 16-bit integer to its string representation */ -char *int2_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size) +char *int2_bout(const unsigned char *binary, size_t size, size_t *result_size) { if (size != sizeof(int64)) { ereport_null(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported result size: %zu", size))); @@ -158,7 +153,7 @@ char *int2_bout(const unsigned char *binary, size_t size, Oid typelem, int attty return text; } -unsigned char *int2_badjust(unsigned char *binary, size_t *binary_size, Oid typelem, int atttypmod, const char *err_msg) +unsigned char *int2_badjust(unsigned char *binary, size_t *binary_size, const char *err_msg) { binary = (unsigned char *)libpq_realloc(binary, *binary_size, sizeof(int64)); if (binary == NULL) { @@ -173,7 +168,7 @@ unsigned char *int2_badjust(unsigned char *binary, size_t *binary_size, Oid type return binary; } -unsigned char *int2_brestore(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size, +unsigned char *int2_brestore(const unsigned char *binary, size_t size, size_t *result_size, const char *err_msg) { if (size != sizeof(int64)) { @@ -195,7 +190,7 @@ unsigned char *int2_brestore(const unsigned char *binary, size_t size, Oid typel * int4_bin - * Convert input string to a signed 32 bit integer. */ -unsigned char *int4_bin(const PGconn *conn, const char *text, Oid typelem, int atttypmod, size_t *binary_size, +unsigned char *int4_bin(const PGconn *conn, const char *text, size_t *binary_size, char *err_msg) { unsigned char *binary = (unsigned char *)malloc(sizeof(int64)); @@ -217,7 +212,7 @@ unsigned char *int4_bin(const PGconn *conn, const char *text, Oid typelem, int a * int4_bout - * converts a signed 32-bit integer to its string representation */ -char *int4_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size) +char *int4_bout(const unsigned char *binary, size_t size, size_t *result_size) { if (size != sizeof(int64)) { ereport_null(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported result size: %zu", size))); @@ -235,7 +230,7 @@ char *int4_bout(const unsigned char *binary, size_t size, Oid typelem, int attty return text; } -unsigned char *int4_badjust(unsigned char *binary, size_t *binary_size, Oid typelem, int atttypmod, const char *err_msg) +unsigned char *int4_badjust(unsigned char *binary, size_t *binary_size, const char *err_msg) { binary = (unsigned char *)libpq_realloc(binary, *binary_size, sizeof(int64)); if (binary == NULL) { @@ -250,8 +245,7 @@ unsigned char *int4_badjust(unsigned char *binary, size_t *binary_size, Oid type return binary; } -unsigned char *int4_brestore(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size, - const char *err_msg) +unsigned char *int4_brestore(const unsigned char *binary, size_t size, size_t *result_size, const char *err_msg) { if (size != sizeof(int64)) { ereport_null(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported result size: %zu", size))); @@ -272,7 +266,7 @@ unsigned char *int4_brestore(const unsigned char *binary, size_t size, Oid typel * int8_bin - * try to parse a string into an 64-bit integer. */ -unsigned char *int8_bin(const PGconn *conn, const char *text, Oid typelem, int atttypmod, size_t *binary_size, +unsigned char *int8_bin(const PGconn *conn, const char *text, size_t *binary_size, char *err_msg) { unsigned char *binary = (unsigned char *)malloc(sizeof(int64)); @@ -294,7 +288,7 @@ unsigned char *int8_bin(const PGconn *conn, const char *text, Oid typelem, int a * int8_bout - * convert a signed 64-bit integer to its string representation */ -char *int8_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size) +char *int8_bout(const unsigned char *binary, size_t size, size_t *result_size) { if (size != sizeof(int64)) { ereport_null(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported result size: %zu", size))); @@ -317,8 +311,7 @@ char *int8_bout(const unsigned char *binary, size_t size, Oid typelem, int attty * because how the data is saved in disk and how the client/application expects to retrieve it * are different, change the endianess */ -unsigned char *int8_badjust(unsigned char *binary, size_t *binary_size, - Oid typelem, int atttypmod, const char *err_msg) +unsigned char *int8_badjust(unsigned char *binary, size_t *binary_size, const char *err_msg) { *binary_size = sizeof(int64); long &n = *(long *)binary; @@ -330,8 +323,7 @@ unsigned char *int8_badjust(unsigned char *binary, size_t *binary_size, * restore_binary - * restore binary array, change the endianess */ -unsigned char *int8_brestore(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size, - const char *err_msg) +unsigned char *int8_brestore(const unsigned char *binary, size_t size, size_t *result_size, const char *err_msg) { if (size != sizeof(int64)) { ereport_null(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported result size: %zu", size))); @@ -352,20 +344,20 @@ unsigned char *int8_brestore(const unsigned char *binary, size_t size, Oid typel * float4_bin - * try to parse a string into a float. */ -unsigned char *float4_bin(const char *text, Oid typelem, int atttypmod, size_t *binary_size, char *err_msg) +unsigned char *float4_bin(const char *text, size_t *binary_size, char *err_msg) { - unsigned char *binary = (unsigned char *)malloc(sizeof(float4)); + unsigned char *binary = (unsigned char *)malloc(sizeof(int64)); if (binary == NULL) { return NULL; } errno_t rc = EOK; - rc = memset_s(binary, sizeof(float4), 0, sizeof(float4)); + rc = memset_s(binary, sizeof(int64), 0, sizeof(int64)); securec_check_c(rc, "\0", "\0"); if (!scan_float4(text, (float4 *)binary, err_msg)) { free(binary); return NULL; } - *binary_size = sizeof(float4); + *binary_size = sizeof(int64); return binary; } @@ -373,9 +365,9 @@ unsigned char *float4_bin(const char *text, Oid typelem, int atttypmod, size_t * * float4_bout - * converts a float4 number to a ascii string */ -char *float4_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size) +char *float4_bout(const unsigned char *binary, size_t size, size_t *result_size) { - if (size != sizeof(float4)) { + if (size != sizeof(int64)) { ereport_null(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported result size: %zu", size))); } char *text = (char *)malloc(MAXFLOATWIDTH + 1); @@ -394,7 +386,7 @@ char *float4_bout(const unsigned char *binary, size_t size, Oid typelem, int att * float8_bin - * try to parse a string into a float8. */ -unsigned char *float8_bin(const char *text, Oid typelem, int atttypmod, size_t *binary_size, char *err_msg) +unsigned char *float8_bin(const char *text, size_t *binary_size, char *err_msg) { unsigned char *binary = (unsigned char *)malloc(sizeof(float8)); if (binary == NULL) { @@ -415,7 +407,7 @@ unsigned char *float8_bin(const char *text, Oid typelem, int atttypmod, size_t * * float4_bout - * converts a float8 number to a ascii string */ -char *float8_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size) +char *float8_bout(const unsigned char *binary, size_t size, size_t *result_size) { if (size != sizeof(float8)) { ereport_null(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported result size: %zu", size))); @@ -441,8 +433,7 @@ char *float8_bout(const unsigned char *binary, size_t size, Oid typelem, int att * converts a numeric data type string to * a NumericChoice struct(binary array) */ -unsigned char *numeric_bin(const PGconn* conn, const char *text, Oid typelem, int atttypmod, - size_t *binary_size, char *err_msg) +unsigned char *numeric_bin(const PGconn* conn, const char *text, int atttypmod, size_t *binary_size, char *err_msg) { return scan_numeric(conn, text, atttypmod, binary_size, err_msg); } @@ -453,7 +444,7 @@ unsigned char *numeric_bin(const PGconn* conn, const char *text, Oid typelem, in * converts a NumericChoice struct(binary array) * to a numeric data type string */ -char *numeric_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size) +char *numeric_bout(const unsigned char *binary, size_t size, size_t *result_size) { size_t len = NUMERIC_MAX_PRECISION + NUMERIC_MAX_RESULT_SCALE + 1; if (size > len) { @@ -467,7 +458,7 @@ char *numeric_bout(const unsigned char *binary, size_t size, Oid typelem, int at errno_t rc = EOK; rc = memset_s(text, len, 0, len); securec_check_c(rc, "\0", "\0"); - if (!numerictoa((NumericChoice *)binary, text, len)) { + if (!numerictoa((NumericData *)binary, text, len)) { free(text); return NULL; } @@ -475,7 +466,7 @@ char *numeric_bout(const unsigned char *binary, size_t size, Oid typelem, int at return text; } -unsigned char *numeric_badjust(unsigned char *binary, size_t *binary_size, Oid typelem, int atttypmod, char *err_msg) +unsigned char *numeric_badjust(unsigned char *binary, size_t *binary_size, int atttypmod, char *err_msg) { if (apply_typmod((NumericVar *)binary, atttypmod, err_msg)) { NumericVar nv_copy = *(NumericVar *)binary; diff --git a/src/common/interfaces/libpq/client_logic_fmt/gs_num.h b/src/common/interfaces/libpq/client_logic_fmt/gs_num.h index 5118430c1..3007c4a82 100644 --- a/src/common/interfaces/libpq/client_logic_fmt/gs_num.h +++ b/src/common/interfaces/libpq/client_logic_fmt/gs_num.h @@ -29,41 +29,28 @@ typedef struct pg_conn PGconn; -unsigned char *int1_bin(const PGconn* conn, const char *text, Oid typelem, int atttypmod, size_t *binary_size, - char *err_msg); -char *int1_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size); -unsigned char *int1_badjust(unsigned char *binary, size_t *binary_size, - Oid typelem, int atttypmod, const char *err_msg); -unsigned char *int1_brestore(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size, - const char *err_msg); -unsigned char *int2_bin(const PGconn* conn, const char *text, Oid typelem, int atttypmod, size_t *binary_size, - char *err_msg); -char *int2_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size); -unsigned char *int2_badjust(unsigned char *binary, size_t *binary_size, - Oid typelem, int atttypmod, const char *err_msg); -unsigned char *int2_brestore(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size, - const char *err_msg); -unsigned char *int4_bin(const PGconn* conn, const char *text, Oid typelem, int atttypmod, size_t *binary_size, - char *err_msg); -char *int4_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size); -unsigned char *int4_badjust(unsigned char *binary, size_t *binary_size, - Oid typelem, int atttypmod, const char *err_msg); -unsigned char *int4_brestore(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size, - const char *err_msg); -unsigned char *int8_bin(const PGconn* conn, const char *text, Oid typelem, int atttypmod, size_t *binary_size, - char *err_msg); -char *int8_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size); -unsigned char *int8_badjust(unsigned char *binary, size_t *binary_size, - Oid typelem, int atttypmod, const char *err_msg); -unsigned char *int8_brestore(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size, - const char *err_msg); -unsigned char *float4_bin(const char *text, Oid typelem, int atttypmod, size_t *binary_size, char *err_msg); -char *float4_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size); -unsigned char *float8_bin(const char *text, Oid typelem, int atttypmod, size_t *binary_size, char *err_msg); -char *float8_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size); -unsigned char *numeric_bin(const PGconn* conn, const char *text, Oid typelem, int atttypmod, - size_t *binary_size, char *err_msg); -char *numeric_bout(const unsigned char *binary, size_t size, Oid typelem, int atttypmod, size_t *result_size); -unsigned char *numeric_badjust(unsigned char *binary, size_t *binary_size, Oid typelem, int atttypmod, char *err_msg); +unsigned char *int1_bin(const PGconn* conn, const char *text, size_t *binary_size, char *err_msg); +char *int1_bout(const unsigned char *binary, size_t size, size_t *result_size); +unsigned char *int1_badjust(unsigned char *binary, size_t *binary_size, const char *err_msg); +unsigned char *int1_brestore(const unsigned char *binary, size_t size, size_t *result_size, const char *err_msg); +unsigned char *int2_bin(const PGconn* conn, const char *text, size_t *binary_size, char *err_msg); +char *int2_bout(const unsigned char *binary, size_t size, size_t *result_size); +unsigned char *int2_badjust(unsigned char *binary, size_t *binary_size, const char *err_msg); +unsigned char *int2_brestore(const unsigned char *binary, size_t size, size_t *result_size, const char *err_msg); +unsigned char *int4_bin(const PGconn* conn, const char *text, size_t *binary_size, char *err_msg); +char *int4_bout(const unsigned char *binary, size_t size, size_t *result_size); +unsigned char *int4_badjust(unsigned char *binary, size_t *binary_size, const char *err_msg); +unsigned char *int4_brestore(const unsigned char *binary, size_t size, size_t *result_size, const char *err_msg); +unsigned char *int8_bin(const PGconn* conn, const char *text, size_t *binary_size, char *err_msg); +char *int8_bout(const unsigned char *binary, size_t size, size_t *result_size); +unsigned char *int8_badjust(unsigned char *binary, size_t *binary_size, const char *err_msg); +unsigned char *int8_brestore(const unsigned char *binary, size_t size, size_t *result_size, const char *err_msg); +unsigned char *float4_bin(const char *text, size_t *binary_size, char *err_msg); +char *float4_bout(const unsigned char *binary, size_t size, size_t *result_size); +unsigned char *float8_bin(const char *text, size_t *binary_size, char *err_msg); +char *float8_bout(const unsigned char *binary, size_t size, size_t *result_size); +unsigned char *numeric_bin(const PGconn* conn, const char *text, int atttypmod, size_t *binary_size, char *err_msg); +char *numeric_bout(const unsigned char *binary, size_t size, size_t *result_size); +unsigned char *numeric_badjust(unsigned char *binary, size_t *binary_size, int atttypmod, char *err_msg); #endif \ No newline at end of file diff --git a/src/common/interfaces/libpq/client_logic_fmt/numeric.cpp b/src/common/interfaces/libpq/client_logic_fmt/numeric.cpp index 2e187a8a3..36967763f 100644 --- a/src/common/interfaces/libpq/client_logic_fmt/numeric.cpp +++ b/src/common/interfaces/libpq/client_logic_fmt/numeric.cpp @@ -22,46 +22,32 @@ */ #include "numeric.h" -#include "biginteger.h" #include "client_logic_cache/icached_column_manager.h" #include "client_logic_common/client_logic_utils.h" #include "libpq-int.h" -#define palloc(sz) malloc(sz) +#define palloc(sz) calloc(sz, sizeof(unsigned char)) #define pfree free #define ereport(a, b) return false; -#define NUMERIC_HDRSZ (sizeof(uint16) + sizeof(int16)) - -#define NUMERIC_HDRSZ_SHORT (sizeof(uint16)) +#define NUMERIC_HDRSZ (VARHDRSZ + sizeof(uint16) + sizeof(int16)) +#define NUMERIC_BI_MASK 0xF000 +#define NUMERIC_IS_NAN(n) (NUMERIC_NB_FLAGBITS(n) == NUMERIC_NAN) +#define NUMERIC_HDRSZ_SHORT (VARHDRSZ + sizeof(uint16)) #define VEC_TO_CHOICE(res) ((NumericChoice *)(res)) #define NUMERIC_FLAGBITS(n) ((n)->choice.n_header & NUMERIC_SIGN_MASK) -#define CNUMERIC_FLAGBITS(n) ((n)->n_header & NUMERIC_SIGN_MASK) -#define VNUMERIC_FLAGBITS(n) (VEC_TO_CHOICE(n)->n_header & NUMERIC_SIGN_MASK) #define NUMERIC_IS_SHORT(n) (NUMERIC_FLAGBITS(n) == NUMERIC_SHORT) -#define CNUMERIC_IS_SHORT(n) (CNUMERIC_FLAGBITS(n) == NUMERIC_SHORT) -#define VNUMERIC_IS_SHORT(n) (VNUMERIC_FLAGBITS(n) == NUMERIC_SHORT) #define NUMERIC_SHORT_SIGN_MASK 0x2000 #define NUMERIC_DSCALE_MASK 0x3FFF #define NUMERIC_SIGN(n) \ (NUMERIC_IS_SHORT(n) ? (((n)->choice.n_short.n_header & NUMERIC_SHORT_SIGN_MASK) ? NUMERIC_NEG : NUMERIC_POS) : \ NUMERIC_FLAGBITS(n)) -#define CNUMERIC_SIGN(n) \ - (CNUMERIC_IS_SHORT(n) ? (((n)->n_short.n_header & NUMERIC_SHORT_SIGN_MASK) ? NUMERIC_NEG : NUMERIC_POS) : \ - CNUMERIC_FLAGBITS(n)) #define NUMERIC_DSCALE(n) \ (NUMERIC_IS_SHORT((n)) ? \ ((n)->choice.n_short.n_header & NUMERIC_SHORT_DSCALE_MASK) >> NUMERIC_SHORT_DSCALE_SHIFT : \ ((n)->choice.n_long.n_sign_dscale & NUMERIC_DSCALE_MASK)) -#define CNUMERIC_DSCALE(n) \ - (CNUMERIC_IS_SHORT((n)) ? ((n)->n_short.n_header & NUMERIC_SHORT_DSCALE_MASK) >> NUMERIC_SHORT_DSCALE_SHIFT : \ - ((n)->n_long.n_sign_dscale & NUMERIC_DSCALE_MASK)) -#define VNUMERIC_DSCALE(n) \ - (VNUMERIC_IS_SHORT((n)) ? \ - (VEC_TO_CHOICE(n)->n_short.n_header & NUMERIC_SHORT_DSCALE_MASK) >> NUMERIC_SHORT_DSCALE_SHIFT : \ - (VEC_TO_CHOICE(n)->n_long.n_sign_dscale & NUMERIC_DSCALE_MASK)) #define NUMERIC_WEIGHT(n) \ (NUMERIC_IS_SHORT((n)) ? \ @@ -69,26 +55,9 @@ ((n)->choice.n_short.n_header & NUMERIC_SHORT_WEIGHT_MASK)) : \ ((n)->choice.n_long.n_weight)) -#define CNUMERIC_WEIGHT(n) \ - (CNUMERIC_IS_SHORT((n)) ? \ - (((n)->n_short.n_header & NUMERIC_SHORT_WEIGHT_SIGN_MASK ? ~NUMERIC_SHORT_WEIGHT_MASK : 0) | \ - ((n)->n_short.n_header & NUMERIC_SHORT_WEIGHT_MASK)) : \ - ((n)->n_long.n_weight)) -#define VNUMERIC_WEIGHT(n) \ - (VNUMERIC_IS_SHORT((n)) ? \ - ((VEC_TO_CHOICE(n)->n_short.n_header & NUMERIC_SHORT_WEIGHT_SIGN_MASK ? ~NUMERIC_SHORT_WEIGHT_MASK : 0) | \ - (VEC_TO_CHOICE(n)->n_short.n_header & NUMERIC_SHORT_WEIGHT_MASK)) : \ - (VEC_TO_CHOICE(n)->n_long.n_weight)) - -#define VNUMERIC_DIGITS(num) \ - (VNUMERIC_IS_SHORT(num) ? VEC_TO_CHOICE(num)->n_short.n_data : VEC_TO_CHOICE(num)->n_long.n_data) #define NUMERIC_DIGITS(num) (NUMERIC_IS_SHORT(num) ? (num)->choice.n_short.n_data : (num)->choice.n_long.n_data) -#define CNUMERIC_DIGITS(num) (CNUMERIC_IS_SHORT(num) ? (num)->n_short.n_data : (num)->n_long.n_data) #define NUMERIC_64 0xD000 #define NUMERIC_64VALUE(n) (*((int64 *)((n)->choice.n_bi.n_data))) -#define CNUMERIC_64VALUE(n) (*((int64 *)((n)->n_bi.n_data))) -#define NUMERIC_BI_MASK 0xF000 -#define NUMERIC_BI_SCALEMASK 0x00FF #define NUMERIC_SHORT_DSCALE_MASK 0x1F80 #define NUMERIC_SHORT_DSCALE_SHIFT 7 #define NUMERIC_SHORT_WEIGHT_SIGN_MASK 0x0040 @@ -99,22 +68,23 @@ #define NUMERIC_SHORT_DSCALE_MAX (NUMERIC_SHORT_DSCALE_MASK >> NUMERIC_SHORT_DSCALE_SHIFT) #ifndef WORDS_LITTLEENDIAN #define VARSIZE_4B(PTR) (((varattrib_4b_fe *)(PTR))->va_4byte.va_header & 0x3FFFFFFF) +#define SET_VARSIZE_4B(PTR, len) (((varattrib_4b_fe*)(PTR))->va_4byte.va_header = (len)&0x3FFFFFFF) #else -#define VyARSIZE_4B(PTR) ((((varattrib_4b_fe *)(PTR))->va_4byte.va_header >> 2) & 0x3FFFFFFF) +#define VARSIZE_4B(PTR) ((((varattrib_4b_fe *)(PTR))->va_4byte.va_header >> 2) & 0x3FFFFFFF) +#define SET_VARSIZE_4B(PTR, len) (((varattrib_4b_fe*)(PTR))->va_4byte.va_header = (((uint32)(len)) << 2)) #endif +#define SET_VARSIZE(PTR, len) do { \ + SET_VARSIZE_4B(PTR, len); \ + *binary_size = len; \ + } while (0) -#define CNUMERIC_NDIGITS(num) ((VARSIZE(num) - sizeof(int16)) / sizeof(NumericDigit)) #define NUMERIC_NDIGITS(num) ((VARSIZE(num) - NUMERIC_HEADER_SIZE(num)) / sizeof(NumericDigit)) -#define VNUMERIC_NDIGITS(num) \ - ((num.size() - sizeof(uint16) - (((VNUMERIC_FLAGBITS(num) & 0x8000) == 0) ? sizeof(int16) : 0)) / \ - sizeof(NumericDigit)) #define VARSIZE(PTR) VARSIZE_4B(PTR) #define NUMERIC_HEADER_SIZE(n) (sizeof(uint16) + (((NUMERIC_FLAGBITS(n) & 0x8000) == 0) ? sizeof(int16) : 0)) -#define NUMERIC_BI_SCALE(n) ((n)->choice.n_header & NUMERIC_BI_SCALEMASK) -#define CNUMERIC_BI_SCALE(n) ((n)->n_header & NUMERIC_BI_SCALEMASK) +#define NUMERIC_NB_FLAGBITS(n) ((n)->choice.n_header & NUMERIC_BI_MASK) // nan or biginteger #define NUMERIC_IS_BI(n) (NUMERIC_NB_FLAGBITS(n) > NUMERIC_NAN) -#define CNUMERIC_IS_BI(n) (CNUMERIC_NB_FLAGBITS(n) > NUMERIC_NAN) + #define FREE_POINTER(ptr) do { \ if ((ptr) != NULL) { \ pfree((void *)ptr); \ @@ -123,9 +93,7 @@ } \ } while (0) #define pfree_ext(__p) FREE_POINTER(__p) -#define NUMERIC_NB_FLAGBITS(n) ((n)->choice.n_header & NUMERIC_BI_MASK) // nan or biginteger -#define CNUMERIC_NB_FLAGBITS(n) ((n)->n_header & NUMERIC_BI_MASK) // nan or biginteger -#define NUMERIC_IS_NAN(n) (NUMERIC_NB_FLAGBITS(n) == NUMERIC_NAN) +#define CNUMERIC_NB_FLAGBITS(n) ((n)->n_header & 0xF000) // nan or biginteger #define CNUMERIC_IS_NAN(n) (CNUMERIC_NB_FLAGBITS(n) == NUMERIC_NAN) #define NUMERIC_128 0xE000 #define NUMERIC_FLAG_IS_BI128(n) (n == NUMERIC_128) @@ -135,12 +103,12 @@ typedef union { struct { /* Normal varlena (4-byte length) */ uint32 va_header; - char va_data[1]; + char va_data[FLEXIBLE_ARRAY_MEMBER]; } va_4byte; struct { /* Compressed-in-line format */ uint32 va_header; uint32 va_rawsize; /* Original data size (excludes header) */ - char va_data[1]; /* Compressed data */ + char va_data[FLEXIBLE_ARRAY_MEMBER]; /* Compressed data */ } va_compressed; } varattrib_4b_fe; @@ -161,7 +129,7 @@ static void strip_var(NumericVar *var); static const char *set_var_from_str(const char *str, const char *cp, NumericVar *dest, char *err_msg); static void free_var(NumericVar *var); -static void init_var_from_num(NumericChoice *num, NumericVar *dest); +static void init_var_from_num(NumericData* num, NumericVar *dest); static bool get_str_from_var(const NumericVar *var, char *str, size_t max_size); static NumericVar const_nan = { 0, 0, NUMERIC_NAN, 0, NULL, NULL }; @@ -170,7 +138,7 @@ static const int round_powers[4] = {0, 1000, 100, 10}; #endif #ifdef NUMERIC_DEBUG -static void dump_numeric(const char *str, Numeric num); +static void dump_numeric(const char *str, NumericData* num); #else #define dump_numeric(s, n) #endif @@ -431,38 +399,20 @@ unsigned char *scan_numeric(const PGconn* conn, const char *num, int typmod, siz } -bool numerictoa(NumericChoice *num, char *ascii, size_t max_size) +bool numerictoa(NumericData* num, char *ascii, size_t max_size) { NumericVar x; - int scale = 0; /* * Handle NaN */ - if (CNUMERIC_IS_NAN(num)) { + if (NUMERIC_IS_NAN(num)) { errno_t rc = EOK; - rc = memcpy_s(ascii, strlen("NaN") + 1, "NaN", strlen("NaN")); + rc = memcpy_s(ascii, max_size, "NaN", strlen("NaN")); securec_check_c(rc, "\0", "\0"); return true; } - /* - * If numeric is big integer, call int64_out/int128_out - */ - uint16 numFlags = CNUMERIC_NB_FLAGBITS(num); - if (NUMERIC_FLAG_IS_BI64(numFlags)) { - int64 val64 = CNUMERIC_64VALUE(num); - scale = CNUMERIC_BI_SCALE(num); - return bi64_out(val64, scale, ascii); - } else if (NUMERIC_FLAG_IS_BI128(numFlags)) { - int128 val128 = 0; - errno_t rc = EOK; - rc = memcpy_s(&val128, sizeof(int128), num->n_bi.n_data, sizeof(int128)); - securec_check_c(rc, "\0", "\0"); - scale = CNUMERIC_BI_SCALE(num); - return bi128_out(val128, scale, ascii); - } - /* * Get the number in the variable format */ @@ -550,7 +500,7 @@ bool apply_typmod(NumericVar *var, int32 typmod, char *err_msg) */ unsigned char *make_result(NumericVar *var, size_t *binary_size, char *err_msg) { - unsigned char *binary = NULL; + NumericData* result; NumericDigit *digits = var->digits; int weight = var->weight; int sign = var->sign; @@ -558,16 +508,14 @@ unsigned char *make_result(NumericVar *var, size_t *binary_size, char *err_msg) Size len; if (sign == NUMERIC_NAN) { - binary = (unsigned char *)malloc(NUMERIC_HDRSZ_SHORT); - if (binary == NULL) { + result = (NumericData*)palloc(NUMERIC_HDRSZ_SHORT); + if (result == NULL) { return NULL; } - errno_t rc = EOK; - rc = memset_s(binary, NUMERIC_HDRSZ_SHORT, 0, NUMERIC_HDRSZ_SHORT); - securec_check_c(rc, "\0", "\0"); - *binary_size = NUMERIC_HDRSZ_SHORT; - ((NumericChoice *)binary)->n_header = NUMERIC_NAN; - return binary; + + SET_VARSIZE(result, NUMERIC_HDRSZ_SHORT); + result->choice.n_header = NUMERIC_NAN; + return (unsigned char*) result; } n = var->ndigits; @@ -592,45 +540,38 @@ unsigned char *make_result(NumericVar *var, size_t *binary_size, char *err_msg) /* Build the result */ if (NUMERIC_CAN_BE_SHORT(var->dscale, weight)) { len = NUMERIC_HDRSZ_SHORT + n * sizeof(NumericDigit); - binary = (unsigned char *)malloc(len); - if (binary == NULL) { + result = (NumericData*)palloc(len); + if (result == NULL) { return NULL; } - errno_t rc = EOK; - rc = memset_s(binary, len, 0, len); - securec_check_c(rc, "\0", "\0"); - *binary_size = len; - ((NumericChoice *)(binary))->n_short.n_header = + SET_VARSIZE(result, len); + result->choice.n_short.n_header = (sign == NUMERIC_NEG ? (NUMERIC_SHORT | NUMERIC_SHORT_SIGN_MASK) : NUMERIC_SHORT) | (var->dscale << NUMERIC_SHORT_DSCALE_SHIFT) | (weight < 0 ? NUMERIC_SHORT_WEIGHT_SIGN_MASK : 0) | (weight & NUMERIC_SHORT_WEIGHT_MASK); } else { len = NUMERIC_HDRSZ + n * sizeof(NumericDigit); - binary = (unsigned char *)malloc(len); - if (binary == NULL) { + result = (NumericData*)palloc(len); + if (result == NULL) { return NULL; } - errno_t rc = EOK; - rc = memset_s(binary, len, 0, len); - securec_check_c(rc, "\0", "\0"); - *binary_size = len; - ((NumericChoice *)binary)->n_long.n_sign_dscale = sign | (var->dscale & NUMERIC_DSCALE_MASK); - ((NumericChoice *)binary)->n_long.n_weight = weight; + SET_VARSIZE(result, len); + result->choice.n_long.n_sign_dscale = sign | (var->dscale & NUMERIC_DSCALE_MASK); + result->choice.n_long.n_weight = weight; } if (n != 0) { - errno_t rc = memcpy_s(VNUMERIC_DIGITS(binary), n * sizeof(NumericDigit), digits, n * sizeof(NumericDigit)); + errno_t rc = memcpy_s(NUMERIC_DIGITS(result), n * sizeof(NumericDigit), digits, n * sizeof(NumericDigit)); securec_check_c(rc, "\0", "\0"); } /* Check for overflow of int16 fields */ - if (VNUMERIC_WEIGHT(binary) != weight || VNUMERIC_DSCALE(binary) != var->dscale) { - free(binary); - binary = NULL; + if (NUMERIC_WEIGHT(result) != weight || NUMERIC_DSCALE(result) != var->dscale) { + libpq_free(result); check_sprintf_s(sprintf_s(err_msg, MAX_ERRMSG_LENGTH, "value overflows numeric format\n")); return NULL; } - return binary; + return (unsigned char*) result; } /* * set_var_from_str() @@ -798,25 +739,25 @@ static const char *set_var_from_str(const char *str, const char *cp, NumericVar /* * init_var_from_num() - * - * Initialize a variable from packed db format. The digits array is not - * copied, which saves some cycles when the resulting var is not modified. - * Also, there's no need to call free_var(), as long as you don't assign any - * other value to it (with set_var_* functions, or by using the var as the - * destination of a function like add_var()) + * Initialize a variable from packed db format. The digits array is not + * copied, which saves some cycles when the resulting var is not modified. + * Also, there's no need to call free_var(), as long as you don't assign any + * other value to it (with set_var_* functions, or by using the var as the + * destination of a function like add_var()) * - * CAUTION: Do not modify the digits buffer of a var initialized with this - * function, e.g by calling round_var() or trunc_var(), as the changes will - * propagate to the original Numeric! It's OK to use it as the destination - * argument of one of the calculational functions, though. + * CAUTION: Do not modify the digits buffer of a var initialized with this + * function, e.g by calling round_var() or trunc_var(), as the changes will + * propagate to the original NumericData*! It's OK to use it as the destination + * argument of one of the calculational functions, though. */ -static inline void init_var_from_num(NumericChoice *num, NumericVar *dest) +static inline void init_var_from_num(NumericData* num, NumericVar* dest) { - Assert(!CNUMERIC_IS_BI(num)); - dest->ndigits = CNUMERIC_NDIGITS(num); - dest->weight = CNUMERIC_WEIGHT(num); - dest->sign = CNUMERIC_SIGN(num); - dest->dscale = CNUMERIC_DSCALE(num); - dest->digits = CNUMERIC_DIGITS(num); + Assert(!NUMERIC_IS_BI(num)); + dest->ndigits = NUMERIC_NDIGITS(num); + dest->weight = NUMERIC_WEIGHT(num); + dest->sign = NUMERIC_SIGN(num); + dest->dscale = NUMERIC_DSCALE(num); + dest->digits = NUMERIC_DIGITS(num); dest->buf = NULL; /* digits array is not palloc'd */ } diff --git a/src/common/interfaces/libpq/client_logic_fmt/numeric.h b/src/common/interfaces/libpq/client_logic_fmt/numeric.h index 2d451d076..c7f327c7f 100644 --- a/src/common/interfaces/libpq/client_logic_fmt/numeric.h +++ b/src/common/interfaces/libpq/client_logic_fmt/numeric.h @@ -93,8 +93,8 @@ typedef struct NumericData *Numeric; typedef struct pg_conn PGconn; unsigned char *scan_numeric(const PGconn* conn, const char *num, int atttypmod, size_t *binary_size, char *err_msg); -bool numerictoa(NumericChoice *num, char *ascii, size_t max_size); +bool numerictoa(NumericData* num, char *ascii, size_t max_size); bool apply_typmod(NumericVar *var, int32 typmod, char *err_msg); unsigned char *make_result(NumericVar *var, size_t *binary_size, char *err_msg); -#endif \ No newline at end of file +#endif diff --git a/src/common/interfaces/libpq/client_logic_hooks/Makefile b/src/common/interfaces/libpq/client_logic_hooks/Makefile index 6c20a5113..ab71ba0b0 100644 --- a/src/common/interfaces/libpq/client_logic_hooks/Makefile +++ b/src/common/interfaces/libpq/client_logic_hooks/Makefile @@ -20,8 +20,10 @@ override CPPFLAGS := -DFRONTEND -DFRONTEND_PARSER -DPGXC -fstack-protector-all - override CPPFLAGS += -I$(top_builddir)/src/common/interfaces/libpq/ -I$(top_builddir)/src/include/libpq/ -I$(top_builddir)/src/include/libpq/client_logic_cache override CPPFLAGS += -I$(top_builddir)/src/common/interfaces/libpq/client_logic_hooks -I$(encryption_hooks_dir) ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) -override CPPFLAGS += -L$(top_builddir)/../distribute/bin/gs_ktool/ -lgs_ktool -lsecurec -L$(LIBKMC_LIB_PATH) -lkmc -override CPPFLAGS += -I$(CJSON_INCLUDE_PATH) -I$(LIBCURL_INCLUDE_PATH) -L$(CJSON_LIB_PATH) -L$(LIBCURL_LIB_PATH) -lcjson -lcurl + ifneq ($(enable_lite_mode), yes) + override CPPFLAGS += -L$(top_builddir)/../distribute/bin/gs_ktool/ -lgs_ktool -l$(SECURE_C_CHECK) -L$(LIBKMC_LIB_PATH) -lkmc + endif + override CPPFLAGS += -I$(CJSON_INCLUDE_PATH) -I$(LIBCURL_INCLUDE_PATH) -L$(CJSON_LIB_PATH) -L$(LIBCURL_LIB_PATH) -lcjson -lcurl endif override CPPFLAGS := $(filter-out -fPIE, $(CPPFLAGS)) -fPIC -shared override CFLAGS := $(filter-out -fPIE, $(CFLAGS)) -fPIC -shared diff --git a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/Makefile b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/Makefile index 5c3b23348..a99cd312a 100644 --- a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/Makefile +++ b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/Makefile @@ -13,7 +13,7 @@ include $(top_builddir)/src/Makefile.global override CPPFLAGS += -I. -I$(top_builddir)/src/include -I../encryption_hooks override LDFLAGS += -L. -L$(LIBOPENSSL_LIB_PATH) -override LDLIBS += -lsecurec -lssl -lcrypto +override LDLIBS += -l$(SECURE_C_CHECK) -lssl -lcrypto CMKEM_OBJS = common.o cmkem_comm_algorithm.o reg_hook_frame.o override CPPFLAGS += -ftrapv -fstack-protector-strong diff --git a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/cmkem_version_control.h b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/cmkem_version_control.h index cecaa8195..c67d1835c 100644 --- a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/cmkem_version_control.h +++ b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/cmkem_version_control.h @@ -25,23 +25,16 @@ #include "pg_config.h" -#if ((defined(ENABLE_MULTIPLE_NODES)) || (defined(ENABLE_PRIVATEGAUSS))) +#if ((defined(ENABLE_MULTIPLE_NODES)) || (defined(ENABLE_PRIVATEGAUSS) && (!defined(ENABLE_LITE_MODE)))) #define ENABLE_GS_KTOOL #define ENABLE_HUAWEI_KMS + +#ifdef ENABLE_UT +#define ENABLE_LOCAL_KMS +#endif /* ENABLE_UT */ + #else #define ENABLE_LOCAL_KMS #endif -#ifdef ENABLE_UT -#ifndef ENABLE_GS_KTOOL -#define ENABLE_GS_KTOOL -#endif -#ifndef ENABLE_HUAWEI_KMS -#define ENABLE_HUAWEI_KMS -#endif -#ifndef ENABLE_LOCAL_KMS -#define ENABLE_LOCAL_KMS -#endif -#endif /* ENABLE_UT */ - #endif /* CMKEM_VERSION_CONTROL_H */ diff --git a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/Makefile b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/Makefile deleted file mode 100644 index 25b06d108..000000000 --- a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -#------------------------------------------------------------------------- -# -# Copyright (c) 2020 Huawei Technologies Co.,Ltd. -# -# src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/Makefile -# -#------------------------------------------------------------------------- - -subdir = src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile -top_builddir = ../../../../../../../ - -include $(top_builddir)/src/Makefile.global - -override CPPFLAGS += -I.. -I. -I$(CJSON_INCLUDE_PATH) -I$(LIBCURL_INCLUDE_PATH) -override CPPFLAGS += -g -override LDFLAGS += -L. -L$(CJSON_LIB_PATH) -L$(LIBCURL_LIB_PATH) -override LDLIBS += -lcurl -lcjson -lsecurec - -override CPPFLAGS := $(filter-out -fPIE, $(CPPFLAGS)) -fPIC -shared - -OBJS = ../common.o convjson_common.o convjson.o - -all: convjson - -convjson: $(OBJS) - cp $(CJSON_LIB_PATH)/* ./ - gcc -fPIE -pie $(CPPLAGS) $^ $(LDFLAGS) $(LDLIBS) -o $@ - -clean distclean maintainer-clean: - rm -f ./*.o ./convjson libcjson* kms_httpmsg_temp.ini diff --git a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson.cpp b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson.cpp deleted file mode 100644 index 12cee63da..000000000 --- a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson.cpp +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * convjson.cpp - * we need to construct HTTP packages to connect to servers with restful style interfaces. - * however, it's hard to construct json strings directly in the code. - * so, we provide this bin tool to convert json file into code file: 'convjson'. - * - * IDENTIFICATION - * src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson.cpp - * - * ------------------------------------------------------------------------- - */ - -#include "convjson.h" - -#include -#include -#include -#include "securec.h" -#include "securec_check.h" -#include "cmkem_comm.h" -#include "convjson_common.h" - -/* - * rule (1) : str -> "str" - * rule (2) : "str" -> "\"str\"" - * rele (3) : \n -> "\”\n\"" - * - * e.g. - * input: - * line 1 - * line 2 - * "line3" - * outpt : - * "line1" - * "" - * "line2" - * "" - * "\"line3\"" - */ -CmkemStr *conv_str_to_code(const char *var, CmkemStr *var_val) -{ - errno_t rc = 0; - - /* - * input : var, var_val - * output : "const char *var = \"var_val\";" - * to make sure the buffer is enough, we consider the worsted situation, such as : - * if var_val = "\n\n\n", - * according to the rule(3), we should convert it to "\"\n\"\"\n\"\"\n\"" (= var_val * 3) - */ - size_t code_len = strlen("const char *$ = \"") + strlen(var) + 3 * var_val->str_len + strlen("\";"); - CmkemStr *code = malloc_cmkem_str(code_len); - if (code == NULL) { - return NULL; - } - - /* code = "const char *var = \"" */ - rc = sprintf_s(code->str_val, code_len, "const char *%s = \"", var); - securec_check_ss_c(rc, "", ""); - code->str_len += strlen(var) + strlen("const char * = \""); - - /* code = "var = \"val_val" */ - for (size_t i = 0; i < var_val->str_len; i++) { - char var_val_chr = var_val->str_val[i]; - if (var_val_chr == '\"') { - push_char(code, '\\'); - push_char(code, var_val_chr); - } else if (var_val_chr == '\n') { - push_char(code, '\"'); - push_char(code, var_val_chr); - push_char(code, '\"'); - } else { - push_char(code, var_val_chr); - } - } - - /* code = "var = \"val_val\";" */ - push_char(code, '\"'); - push_char(code, ';'); - push_char(code, '\n'); - push_char(code, '\n'); - - return code; -} - -CmkemStr *conv_jsontree_to_advstr(cJSON *json_tree) -{ - char *json_str = NULL; - CmkemStr *json_advstr = NULL; - - json_str = cJSON_Print(json_tree); - if (json_str == NULL) { - cmkem_errmsg("failed to parse json tree."); - return NULL; - } - - json_advstr = conv_str_to_cmkem_str(json_str); - cJSON_free(json_str); - if (json_advstr == NULL) { - return NULL; - } - - return json_advstr; -} - -CmkemStr *conv_jsontree_to_code(cJSON *json_tree) -{ - char *json_str = NULL; - CmkemStr *json_advstr = NULL; - - json_str = cJSON_Print(json_tree); - if (json_str == NULL) { - cmkem_errmsg("failed to parse json tree."); - return NULL; - } - - json_advstr = conv_str_to_cmkem_str(json_str); - cJSON_free(json_str); - if (json_advstr == NULL) { - cmkem_errmsg("failed to malloc memory."); - return NULL; - } - - return conv_str_to_code(json_tree->valuestring, json_advstr); -} - -cJSON *conv_jsonfile_to_cjson(const char *json_file) -{ - CmkemStr *json_file_content = NULL; - - json_file_content = read_file(json_file); - if (json_file_content == NULL) { - return NULL; - } - - return cJSON_Parse(json_file_content->str_val); -} - -CmkemErrCode conv_jsonfile_to_headfile_with_resolve(const char *json_file, const char *header_file, - const char *child_json_list[]) -{ - CmkemErrCode ret = CMKEM_SUCCEED; - cJSON *father = NULL; - cJSON *cur_child = NULL; - CmkemStr *cur_kmsstr = NULL; - CmkemStr *cur_code = NULL; /* cur_code = "const char *cur_var = \"cur_json\";" */ - - ret = remove_file(header_file); - if (ret != CMKEM_SUCCEED) { - return ret; - } - - father = conv_jsonfile_to_cjson(json_file); - if (father == NULL) { - return CMKEM_UNKNOWN_ERR; - } - - ret = create_file(header_file); - if (ret != CMKEM_SUCCEED) { - return ret; - } - - for (size_t i = 0; child_json_list[i] != NULL; i++) { - cur_child = cJSON_GetObjectItem(father, child_json_list[i]); - if (cur_child == NULL) { - return CMKEM_FIND_CSJON_ERR; - } - - cur_kmsstr = conv_jsontree_to_advstr(cur_child); - - cur_code = conv_str_to_code(child_json_list[i], cur_kmsstr); - if (cur_code == NULL) { - return CMKEM_MALLOC_MEM_ERR; - } - free_cmkem_str(cur_kmsstr); - - ret = write_content_to_tail(header_file, cur_code->str_val, cur_code->str_len); - if (ret != CMKEM_SUCCEED) { - return ret; - } - free_cmkem_str(cur_code); - } - - return ret; -} - -int main() -{ - CmkemErrCode ret = CMKEM_SUCCEED; - - system("rm -rf ./kms_httpmsg_temp.ini"); - - const char *in_file = "./kms_httpmsg_temp.json"; - const char *out_file = "./kms_httpmsg_temp.ini"; - const char *temp_type_list[] = { - "temp_iam_auth_req", - "temp_kms_select_key_req", - "temp_kms_select_key_res", - "temp_kms_enc_key_req", - "temp_kms_enc_key_res", - "temp_kms_dec_key_req", - "temp_kms_dec_key_res", - "temp_iam_err_res", - "temp_kms_err_res", - NULL - }; - - ret = conv_jsonfile_to_headfile_with_resolve(in_file, out_file, temp_type_list); - if (ret != CMKEM_SUCCEED) { - print_cmkem_errmsg_buf(); - } - - return 0; -} diff --git a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson.h b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson.h deleted file mode 100644 index 0daa97875..000000000 --- a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * convjson.h - * we need to construct HTTP packages to connect to servers with restful style interfaces. - * however, it's hard to construct json strings directly in the code. - * so, we provide this bin tool to convert json file into code file: 'convjson'. - * - * IDENTIFICATION - * src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson.h - * - * ------------------------------------------------------------------------- - */ - -#ifndef C_H -#define C_H - -#include "../cmkem_comm.h" -#include "cjson/cJSON.h" - -CmkemStr *conv_str_to_code(const char *var, CmkemStr *var_val); -CmkemStr *conv_jsontree_to_advstr(cJSON *json_tree); -CmkemStr *conv_jsontree_to_code(cJSON *json_tree); -cJSON *conv_jsonfile_to_jsontree(const char *json_file); -CmkemErrCode conv_jsonfile_to_headfile_with_resolve(const char *json_file, const char *header_file, - const char *child_json_list[]); - -#endif diff --git a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson_common.cpp b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson_common.cpp deleted file mode 100644 index 119724431..000000000 --- a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson_common.cpp +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * convjson_common.cpp - * functions to create/read/write/remove files, used for convert json file into code file. - * - * IDENTIFICATION - * src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson_common.cpp - * - * ------------------------------------------------------------------------- - */ - -#include "convjson_common.h" -#include -#include -#include -#include -#include -#include -#include -#include "securec.h" -#include "securec_check.h" - -CmkemErrCode check_file_path(const char *file_path) -{ - const char danger_char_list[] = {'|', ';', '&', '$', '<', '>', '`', '\\', '\'', '\"', '{', '}', '(', ')', '[',']', - '~', '*', '?', '!'}; - - for (size_t i = 0; i < strlen(file_path); i++) { - for (size_t j = 0; j < sizeof(danger_char_list); j++) { - if (file_path[i] == danger_char_list[j]) { - cmkem_errmsg("the path '%s' contains invalid character '%c'.", file_path, file_path[i]); - return CMKEM_CHECK_ENV_VAL_ERR; - } - } - } - - return CMKEM_SUCCEED; -} - -CmkemErrCode check_and_realpath(const char *file_path, char *real_file_path, size_t real_file_path_len) -{ - CmkemErrCode ret = CMKEM_SUCCEED; - - if (real_file_path_len < PATH_MAX) { - cmkem_errmsg("failed to convert '%s' to real path, the length of dest buffer is shorter than PATH_MAX.", - file_path); - return CMKEM_CHECK_BUF_LEN_ERR; - } - - ret = check_file_path(file_path); - check_kmsret(ret, ret); - - realpath(file_path, real_file_path); - - return ret; -} - -CmkemErrCode create_file(const char *file_path) -{ - CmkemErrCode ret = CMKEM_SUCCEED; - char real_file_path[PATH_MAX] = {0}; - - ret = check_and_realpath(file_path, real_file_path, PATH_MAX); - check_kmsret(ret, ret); - - int fd = open(real_file_path, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR); - if (fd == -1) { - cmkem_errmsg("failed to create file '%s'.", real_file_path); - return CMKEM_CREATE_FILE_ERR; - } - - close(fd); - return ret; -} - -CmkemErrCode read_file_size(const char *file_path, size_t *file_size) -{ - CmkemErrCode ret = CMKEM_SUCCEED; - struct stat file_stat = {0}; - char real_file_path[PATH_MAX] = {0}; - - ret = check_and_realpath(file_path, real_file_path, PATH_MAX); - check_kmsret(ret, ret); - - if (stat(real_file_path, &file_stat) != 0) { - cmkem_errmsg("failed to read status of file '%s'.", real_file_path); - return CMKEM_READ_FILE_STATUS_ERR; - } - - *file_size = file_stat.st_size; - return ret; -} - -CmkemErrCode read_file_content(const char *file_path, size_t read_len, char *buf, size_t buf_len) -{ - CmkemErrCode ret = CMKEM_SUCCEED; - int fd = 0; - char real_file_path[PATH_MAX] = {0}; - - if (buf_len < read_len) { - cmkem_errmsg("faield to read file '%s', content buffer is too short.", file_path); - return CMKEM_CHECK_BUF_LEN_ERR; - } - - ret = check_and_realpath(file_path, real_file_path, PATH_MAX); - check_kmsret(ret, ret); - - fd = open(real_file_path, O_RDONLY, 0); - if (fd < 0) { - cmkem_errmsg("failed to open file '%s'.", real_file_path); - return CMKEM_OPEN_FILE_ERR; - } - - if (read(fd, buf, read_len) < 0) { - cmkem_errmsg("failed to read file '%s'.", real_file_path); - close(fd); - return CMKEM_READ_FILE_ERR; - } - - close(fd); - return ret; -} - -CmkemStr *read_file(const char *file_path) -{ - CmkemErrCode ret = CMKEM_SUCCEED; - int fd = 0; - char real_file_path[PATH_MAX] = {0}; - size_t file_size = 0; - CmkemStr *file_content = NULL; - - ret = check_and_realpath(file_path, real_file_path, PATH_MAX); - check_kmsret(ret, NULL); - - ret = read_file_size(real_file_path, &file_size); - check_kmsret(ret, NULL); - - file_content = malloc_cmkem_str(file_size + 1); - if (file_content == NULL) { - return NULL; - } - - fd = open(real_file_path, O_RDONLY, 0); - if (fd < 0) { - cmkem_errmsg("failed to open file '%s'.", real_file_path); - return NULL; - } - - if (read(fd, file_content->str_val, file_size) < 0) { - cmkem_errmsg("failed to read file '%s'.\n", real_file_path); - close(fd); - return NULL; - } - - close(fd); - file_content->str_len = file_size; - return file_content; -} - -CmkemErrCode write_content(const char *file_path, const char* buf, size_t buf_len) -{ - CmkemErrCode ret = CMKEM_SUCCEED; - int fd = 0; - char real_file_path[PATH_MAX] = {0}; - - ret = check_and_realpath(file_path, real_file_path, PATH_MAX); - check_kmsret(ret, ret); - - fd = open(real_file_path, O_WRONLY, 0); - if (fd < 0) { - cmkem_errmsg("failed to open file '%s'.", real_file_path); - return CMKEM_OPEN_FILE_ERR; - } - - if (write(fd, buf, buf_len) < 0) { - cmkem_errmsg("failed to write to file '%s'.\n", real_file_path); - close(fd); - return CMKEM_WRITE_FILE_ERR; - } - - close(fd); - return ret; -} - -CmkemErrCode write_content_with_create(const char *file_path, const char* buf, size_t buf_len) -{ - CmkemErrCode ret = CMKEM_SUCCEED; - int fd = 0; - char real_file_path[PATH_MAX] = {0}; - - ret = check_and_realpath(file_path, real_file_path, PATH_MAX); - check_kmsret(ret, ret); - - fd = open(real_file_path, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR); - if (fd < 0) { - cmkem_errmsg("failed to create file '%s'.", real_file_path); - return CMKEM_CREATE_FILE_ERR; - } - - if (write(fd, buf, buf_len) < 0) { - cmkem_errmsg("failed to write to file '%s'.\n", real_file_path); - close(fd); - return CMKEM_WRITE_FILE_ERR; - } - - close(fd); - return ret; -} - -CmkemErrCode write_content_to_tail(const char *file_path, const char* buf, size_t buf_len) -{ - CmkemErrCode ret = CMKEM_SUCCEED; - int fd = 0; - char real_file_path[PATH_MAX] = {0}; - - ret = check_and_realpath(file_path, real_file_path, PATH_MAX); - check_kmsret(ret, ret); - - fd = open(real_file_path, O_WRONLY | O_APPEND, 0); - if (fd < 0) { - cmkem_errmsg("failed to open file '%s'.", real_file_path); - return CMKEM_OPEN_FILE_ERR; - } - - if (write(fd, buf, buf_len) < 0) { - cmkem_errmsg("failed to write to file '%s'.\n", real_file_path); - close(fd); - return CMKEM_WRITE_FILE_ERR; - } - - close(fd); - return ret; -} - -CmkemErrCode remove_file(const char *file_path) -{ - CmkemErrCode ret = CMKEM_SUCCEED; - char real_file_path[PATH_MAX] = {0}; - - ret = check_and_realpath(file_path, real_file_path, PATH_MAX); - check_kmsret(ret, ret); - - if (remove(real_file_path) != 0) { - cmkem_errmsg("failed to remove file '%s' ", real_file_path); - } - - return ret; -} diff --git a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson_common.h b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson_common.h deleted file mode 100644 index fa5aab1d1..000000000 --- a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson_common.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * convjson_common.h - * functions to create/read/write/remove files, used for convert json file into code file. - * - * IDENTIFICATION - * src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/convjson_common.h - * - * ------------------------------------------------------------------------- - */ - -#ifndef CONVJSON_COMMON_H -#define CONVJSON_COMMON_H - -#include "../cmkem_comm.h" - -CmkemErrCode check_file_path(const char *file_path); -CmkemErrCode check_and_realpath(const char *file_path, char *real_file_path, size_t real_file_path_len); -CmkemErrCode create_file(const char *file_path); -CmkemErrCode read_file_size(const char *file_path, size_t *file_size); -CmkemErrCode read_file_content(const char *file_path, size_t read_len, char *buf, size_t buf_len); -CmkemStr *read_file(const char *file_path); -CmkemErrCode write_content(const char *file_path, const char* buf, size_t buf_len); -CmkemErrCode write_content_with_create(const char *file_path, const char* buf, size_t buf_len); -CmkemErrCode write_content_to_tail(const char *file_path, const char* buf, size_t buf_len); -CmkemErrCode remove_file(const char *file_path); - -#endif /* CONVJSON_COMMON_H */ diff --git a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/kms_httpmsg_temp.ini b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/kms_httpmsg_temp.ini deleted file mode 100644 index 82f2313c3..000000000 --- a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/kms_httpmsg_temp.ini +++ /dev/null @@ -1,110 +0,0 @@ -const char *temp_iam_auth_req = "{" -" \"auth\": {" -" \"identity\": {" -" \"methods\": [\"password\"]," -" \"password\": {" -" \"user\": {" -" \"name\": \"$user_name$\"," -" \"password\": \"$password$\"," -" \"domain\": {" -" \"name\": \"$domain_name$\"" -" }" -" }" -" }" -" }," -" \"scope\": {" -" \"project\": {" -" \"name\": \"$project_name$\"" -" }" -" }" -" }" -"}"; - -const char *temp_kms_select_key_req = "{" -" \"key_id\": \"$cmk_id$\"" -"}"; - -const char *temp_kms_select_key_res = "{" -" \"key_info\": {" -" \"key_id\": \"0d0466b0-e727-4d9c-b35d-f84bb474a37f\"," -" \"domain_id\": \"b168fe00ff56492495a7d22974df2d0b\"," -" \"key_alias\": \"kms_test\"," -" \"realm\": \"aaa\"," -" \"key_description\": \"\"," -" \"creation_date\": \"1472442386000\"," -" \"scheduled_deletion_date\": \"\"," -" \"key_state\": \"2\"," -" \"default_key_flag\": \"0\"," -" \"key_type\": \"1\"," -" \"expiration_time\": \"1501578672000\"," -" \"origin\": \"kms\"," -" \"key_rotation_enabled\": \"false\"," -" \"sys_enterprise_project_id \": \"0\"" -" }" -"}"; - -const char *temp_kms_enc_key_req = "{" -" \"key_id\": \"$cmk_id$\"," -" \"plain_text\": \"$cek_plain$\"," -" \"datakey_plain_length\": \"$cek_plain_len$\"" -"}"; - -const char *temp_kms_enc_key_res = "{" -" \"key_id\": \"0d0466b0-e727-4d9c-b35d-f84bb474a37f\"," -" \"plain_text\": \"00000000000000000079B43003D2320D9F0E8EA9831A92759FB4B\"," -" \"datakey_plain_length\": \"64\"" -"}"; - -const char *temp_kms_dec_key_req = "{" -" \"key_id\": \"$cmk_id$\"," -" \"cipher_text\": \"$cek_cipher$\"," -" \"datakey_cipher_length\": \"$cek_cipher_len$\"" -"}"; - -const char *temp_kms_dec_key_res = "{" -" \"key_id\": \"0d0466b0-e727-4d9c-b35d-f84bb474a37f\"," -" \"cipher_text\": \"00000000000098EF6ED309979B43003D2320D9F0E8EA9831A92759FB4B\"," -" \"datakey_plain_length\": \"64\"" -"}"; - -const char *temp_iam_err_res = "{" -" \"error\": {" -" \"message\": \"The request you have made requires authentication.\"," -" \"title\": \"Unauthorized\"" -" }" -"}"; - -const char *temp_kms_err_res = "{" -" \"error\": {" -" \"error_code\": \"KMS.XXXX\"," -" \"error_msg\": \"XXX\"" -" }" -"}"; - -const char *iam_agency_token_req = "{" -" \"auth\": {" -" \"identity\": {" -" \"methods\": [\"assume_role\"]," -" \"assume_role\": {" -" \"domain_name\": \"$domain_name$\"," -" \"agency_name\": \"$agency_name$\"" -" }" -" }," -" \"scope\": {" -" \"project\": {" -" \"name\": \"$project_name$\"" -" }" -" }" -" }" -"}"; - -const char *kms_create_dek_req = "{" -" \"datakey_length\": \"128\"," -" \"key_id\": \"$cmk_id$\"" -"}"; - -const char *kms_decrypt_dek_req = "{" -" \"cipher_text\": \"$dek_cipher$\"," -" \"datakey_cipher_length\": \"16\"," -" \"key_id\": \"$cmk_id$\"" -"}"; diff --git a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/kms_httpmsg_temp.json b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/kms_restful_temp.ini similarity index 76% rename from src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/kms_httpmsg_temp.json rename to src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/kms_restful_temp.ini index 24e7d0d92..3f87d164e 100644 --- a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/convert_jsonfile/kms_httpmsg_temp.json +++ b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/kms_restful_temp.ini @@ -1,5 +1,7 @@ -{ - "temp_iam_auth_req" : { +#define JS_TO_STR(R) #R + +const char *temp_iam_auth_req = JS_TO_STR(( + { "auth": { "identity": { "methods": ["password"], @@ -19,16 +21,17 @@ } } } - }, + } +)); - "aa" : { - "auth": {"identity": {"methods": ["password"],"password": {"user": {"domain": {"name": "DBS_gauss_hw05056319_01"},"name": "DBS_gauss_hw05056319_01","password": "Gauss_234"}},},"scope": {"domain": {"name": "DBS_gauss_hw05056319_01"}}}}, - - "temp_kms_select_key_req" : { +const char *temp_kms_select_key_req = JS_TO_STR(( + { "key_id": "$cmk_id$" - }, + } +)); - "temp_kms_select_key_res" : { +const char *temp_kms_select_key_res = JS_TO_STR(( + { "key_info": { "key_id": "0d0466b0-e727-4d9c-b35d-f84bb474a37f", "domain_id": "b168fe00ff56492495a7d22974df2d0b", @@ -44,44 +47,56 @@ "origin":"kms", "key_rotation_enabled":"false", "sys_enterprise_project_id ": "0" - } - }, + } + } +)); - "temp_kms_enc_key_req" : { +const char *temp_kms_enc_key_req = JS_TO_STR(( + { "key_id": "$cmk_id$", "plain_text":"$cek_plain$", "datakey_plain_length": "$cek_plain_len$" - }, + } +)); - "temp_kms_enc_key_res" : { +const char *temp_kms_enc_key_res = JS_TO_STR(( + { "key_id": "0d0466b0-e727-4d9c-b35d-f84bb474a37f", "plain_text":"00000000000000000079B43003D2320D9F0E8EA9831A92759FB4B", "datakey_plain_length": "64" - }, + } +)); - "temp_kms_dec_key_req" : { +const char *temp_kms_dec_key_req = JS_TO_STR(( + { "key_id": "$cmk_id$", "cipher_text":"$cek_cipher$", "datakey_plain_length": "$cek_cipher_len$" - }, + } +)); - "temp_kms_dec_key_res" : { +const char *temp_kms_dec_key_res = JS_TO_STR(( + { "key_id": "0d0466b0-e727-4d9c-b35d-f84bb474a37f", "cipher_text":"00000000000098EF6ED309979B43003D2320D9F0E8EA9831A92759FB4B", "datakey_plain_length": "64" - }, - - "temp_kms_err_res" : { + } +)); + +const char *temp_kms_err_res = JS_TO_STR(( + { "error": { "error_code": "KMS.XXXX", "error_msg": "XXX" } - }, + } +)); - "temp_iam_err_res" : { +const char *temp_iam_err_res = JS_TO_STR(( + { "error": { "message": "The request you have made requires authentication.", "title": "Unauthorized" } } -} +)); \ No newline at end of file diff --git a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_huawei_kms.cpp b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_huawei_kms.cpp index 7916db3e7..a656fac95 100644 --- a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_huawei_kms.cpp +++ b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_huawei_kms.cpp @@ -43,7 +43,7 @@ #include "cmkem_comm_algorithm.h" #include "reg_hook_frame.h" -#include "./convert_jsonfile/kms_httpmsg_temp.ini" +#include "./kms_restful_temp.ini" #ifdef ENABLE_UT #define static @@ -90,20 +90,31 @@ static ProcessPolicy decrypt_cek_cipher_hookfunc(CmkemUStr *cek_cipher, CmkIdent */ cJSON *get_json_temp(KmsHttpMsgType json_tree_type) { - switch (json_tree_type) { - case IAM_AUTH_REQ: - return cJSON_Parse(temp_iam_auth_req); - case KMS_SELECT_CMK_REQ: - return cJSON_Parse(temp_kms_select_key_req); - case KMS_ENC_CEK_REQ: - return cJSON_Parse(temp_kms_enc_key_req); - case KMS_DEC_CEK_REQ: - return cJSON_Parse(temp_kms_dec_key_req); - default: - break; - } + const char *temp_in = NULL; + cJSON *json_out = NULL; + + typedef struct { + KmsHttpMsgType type; + const char *temp; + } JsonTemp; - return NULL; + const JsonTemp json_tbl[] = { + {IAM_AUTH_REQ, temp_iam_auth_req}, + {KMS_SELECT_CMK_REQ, temp_kms_select_key_req}, + {KMS_ENC_CEK_REQ, temp_kms_enc_key_req}, + {KMS_DEC_CEK_REQ, temp_kms_dec_key_req}, + }; + + temp_in = json_tbl[json_tree_type].temp; + + /* + * the format of the temp_in is : "(str_with_bracket)", we should remove the backet + * temp_in + 1 : remove the left bracket + * strlen(temp_in) - 2 : remove the right bracket + */ + json_out = cJSON_ParseWithLength(temp_in + 1, strlen(temp_in) - 2); + + return json_out; } char *get_iam_auth_req_jsontemp(const char *user_name, const char *password, const char *domain_name, @@ -325,6 +336,10 @@ void free_kms_cache(size_t cache_id) if (cache_id >= KMS_CACHE_TBL_LEN) { return; } + + errno_t rc = memset_s(kms_cache_tbl[cache_id], sizeof(CachedAuthInfo), 0, sizeof(CachedAuthInfo)); + securec_check_c(rc, "", ""); + cmkem_free(kms_cache_tbl[cache_id]); } diff --git a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_huawei_kms.h b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_huawei_kms.h index fb7164f2a..c4c8f3c7c 100644 --- a/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_huawei_kms.h +++ b/src/common/interfaces/libpq/client_logic_hooks/cmk_entity_manager_hooks/register_huawei_kms.h @@ -42,12 +42,9 @@ const int KMS_PLAIN_PACKET_LEN = 16; typedef enum { IAM_AUTH_REQ = 0, - IAM_AGENCY_TOKEN_REQ, KMS_SELECT_CMK_REQ, KMS_ENC_CEK_REQ, KMS_DEC_CEK_REQ, - KMS_GEN_DEK_REQ, - KMS_DEC_DEK_REQ, } KmsHttpMsgType; extern CmkemErrCode get_kms_err_type(const char *kms_errmsg_body); diff --git a/src/common/interfaces/libpq/client_logic_hooks/encryption_hooks/Makefile b/src/common/interfaces/libpq/client_logic_hooks/encryption_hooks/Makefile index 43aaaf54f..0afa75adc 100644 --- a/src/common/interfaces/libpq/client_logic_hooks/encryption_hooks/Makefile +++ b/src/common/interfaces/libpq/client_logic_hooks/encryption_hooks/Makefile @@ -16,9 +16,11 @@ top_builddir = ../../../../../../ NAME=client_logic_encryption_hooks override CPPFLAGS := -DFRONTEND -DPGXC -Wno-write-strings -fstack-protector-all -I$(srcdir) $(CPPFLAGS) -I$(top_builddir)/src/ -I$(top_builddir)/src/include -I$(top_builddir)/src/include/libpq -I$(top_builddir)/src/common/interfaces/libpq -I$(top_builddir)/src/common/interfaces/libpq/client_logic_hooks/ -I. -override CPPFLAGS += -lsecurec -lssl -lcrypto +override CPPFLAGS += -l$(SECURE_C_CHECK) -lssl -lcrypto ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) -override CPPFLAGS += -L$(top_builddir)/../distribute/bin/gs_ktool/ -lgs_ktool -lsecurec -L$(LIBKMC_LIB_PATH) -lkmc + ifneq ($(enable_lite_mode), yes) + override CPPFLAGS += -L$(top_builddir)/../distribute/bin/gs_ktool/ -lgs_ktool -l$(SECURE_C_CHECK) -L$(LIBKMC_LIB_PATH) -lkmc + endif endif override CPPFLAGS := $(filter-out -fPIE, $(CPPFLAGS)) -fPIC -shared override CFLAGS := $(filter-out -fPIE, $(CFLAGS)) -fPIC -shared diff --git a/src/common/interfaces/libpq/client_logic_hooks/encryption_hooks/encryption_column_hook_executor.cpp b/src/common/interfaces/libpq/client_logic_hooks/encryption_hooks/encryption_column_hook_executor.cpp index 6596c788b..e11baedf3 100644 --- a/src/common/interfaces/libpq/client_logic_hooks/encryption_hooks/encryption_column_hook_executor.cpp +++ b/src/common/interfaces/libpq/client_logic_hooks/encryption_hooks/encryption_column_hook_executor.cpp @@ -327,7 +327,7 @@ bool EncryptionColumnHookExecutor::pre_create(PGClientLogic &column_encryption, StringArgs &new_args) { bool has_user_set_cek = false; - + EncryptionGlobalHookExecutor *encryption_global_hook_executor = dynamic_cast(m_global_hook_executor); if (!encryption_global_hook_executor) { diff --git a/src/common/interfaces/libpq/client_logic_processor/Makefile b/src/common/interfaces/libpq/client_logic_processor/Makefile index 69d00c82e..703a62f40 100644 --- a/src/common/interfaces/libpq/client_logic_processor/Makefile +++ b/src/common/interfaces/libpq/client_logic_processor/Makefile @@ -22,7 +22,7 @@ override CFLAGS := $(filter-out -fPIE, $(CFLAGS)) -fPIC -fstack-protector-all # We can't use Makefile variables here because the MSVC build system scrapes # OBJS from this file. -OBJS=raw_value.o raw_values_cont.o processor_utils.o stmt_processor.o ../frontend_parser/parser.o ../client_logic_common/client_logic_utils.o where_clause_processor.o values_processor.o create_stmt_processor.o +OBJS=raw_value.o raw_values_cont.o processor_utils.o post_stmt_processor.o stmt_processor.o func_processor.o ../frontend_parser/parser.o ../client_logic_common/client_logic_utils.o where_clause_processor.o values_processor.o create_stmt_processor.o OBJS+=prepared_statements_list.o prepared_statement.o func_hardcoded_values.o include $(top_builddir)/src/Makefile.global diff --git a/src/common/interfaces/libpq/client_logic_processor/func_hardcoded_values.cpp b/src/common/interfaces/libpq/client_logic_processor/func_hardcoded_values.cpp index 497082516..876fb9d8f 100644 --- a/src/common/interfaces/libpq/client_logic_processor/func_hardcoded_values.cpp +++ b/src/common/interfaces/libpq/client_logic_processor/func_hardcoded_values.cpp @@ -79,9 +79,6 @@ bool FuncHardcodedValues::process(const List *options, StatementData *statement_ /* process hardcoded values in each query */ size_t body_location_offset = 0; for (size_t i = 0; i < bodies_size; ++i) { - if (bodies[i] == NULL) { - return false; - } if (!parse_body(language, bodies[i], delimiter_location, delimiter_size, &body_location_offset, statement_data)) { return false; diff --git a/src/common/interfaces/libpq/client_logic_processor/func_hardcoded_values.h b/src/common/interfaces/libpq/client_logic_processor/func_hardcoded_values.h index 0e31beab9..07a8f347f 100644 --- a/src/common/interfaces/libpq/client_logic_processor/func_hardcoded_values.h +++ b/src/common/interfaces/libpq/client_logic_processor/func_hardcoded_values.h @@ -114,4 +114,4 @@ private: static constexpr const char *EMPTY_DOLLAR_TAG = "$$"; }; -#endif \ No newline at end of file +#endif diff --git a/src/common/interfaces/libpq/client_logic_processor/func_processor.cpp b/src/common/interfaces/libpq/client_logic_processor/func_processor.cpp index c72158a94..edc2dcf01 100755 --- a/src/common/interfaces/libpq/client_logic_processor/func_processor.cpp +++ b/src/common/interfaces/libpq/client_logic_processor/func_processor.cpp @@ -31,10 +31,17 @@ #include "values_processor.h" #include "func_hardcoded_values.h" -bool func_processor::run_pre_create_function_stmt(const List* options, StatementData* statement_data, bool is_do_stmt) +/** + process the query in CREATE FUNCTION, CREATE PROCEDURE or DO + @param[in] list of DefElem nodes - they are derieved either from the CreateFunctionStmt object or the DoStmt object + @param[in] StatementData - the current client logic state machine for the query + @return boolean, returns true or false for severe, unexpected errors +*/ + +bool func_processor::run_pre_create_function_stmt(const List* options, StatementData *statement_data, bool is_do_stmt) { FuncHardcodedValues::process(options, statement_data, is_do_stmt); - return true; + return true; // ignore error } bool func_processor::process(const ExprPartsList* target_expr_vec, StatementData* statement_data) diff --git a/src/common/interfaces/libpq/client_logic_processor/post_stmt_processor.cpp b/src/common/interfaces/libpq/client_logic_processor/post_stmt_processor.cpp new file mode 100644 index 000000000..ebbb36c33 --- /dev/null +++ b/src/common/interfaces/libpq/client_logic_processor/post_stmt_processor.cpp @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2021 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * post_stmt_processor.cpp + * + * IDENTIFICATION + * src\common\interfaces\libpq\client_logic_processor\post_stmt_processor.cpp + * + * ------------------------------------------------------------------------- + */ + +#include "pg_config.h" + +#include "stmt_processor.h" +#include "cl_state.h" +#include "client_logic_cache/cache_refresh_type.h" +#include "client_logic_hooks/hooks_manager.h" +#include "libpq-fe.h" +#include "libpq-int.h" +#include "prepared_statement.h" +#include "prepared_statements_list.h" + +#include + +void Processor::remove_dropped_column_settings(PGconn *conn, const bool is_success) +{ + Assert(conn->client_logic && conn->client_logic->enable_client_encryption); + if (conn->client_logic->droppedColumnSettings_size == 0) { + return; + } + + if (!is_success) { + conn->client_logic->droppedColumnSettings_size = 0; + return; + } + + for (size_t i = 0; i < conn->client_logic->droppedColumnSettings_size; ++i) { + const char *object_name = conn->client_logic->droppedColumnSettings[i].data; + HooksManager::ColumnSettings::set_deletion_expected(*conn->client_logic, object_name, false); + } + conn->client_logic->droppedColumnSettings_size = 0; +} + +void Processor::remove_dropped_global_settings(PGconn *conn, const bool is_success) +{ + ObjName *cur_cmk = conn->client_logic->droppedGlobalSettings; + ObjName *to_free = NULL; + + Assert(conn->client_logic && conn->client_logic->enable_client_encryption); + if (cur_cmk == NULL) { + return; + } + + if (!is_success) { + free_obj_list(conn->client_logic->droppedGlobalSettings); + conn->client_logic->droppedGlobalSettings = NULL; + return; + } + + while (cur_cmk != NULL) { + HooksManager::GlobalSettings::set_deletion_expected(*conn->client_logic, cur_cmk->obj_name, false); + to_free = cur_cmk; + cur_cmk = cur_cmk->next; + libpq_free(to_free); + } + conn->client_logic->droppedGlobalSettings = NULL; +} + +const bool Processor::remove_droppend_schemas(PGconn *conn, const bool is_success) +{ + Assert(conn->client_logic && conn->client_logic->enable_client_encryption); + if (conn->client_logic->droppedSchemas_size == 0) { + return true; + } + + if (!is_success) { + conn->client_logic->droppedSchemas_size = 0; + return true; + } + + for (size_t i = 0; i < conn->client_logic->droppedSchemas_size; ++i) { + const char *object_name = conn->client_logic->droppedSchemas[i].data; + HooksManager::GlobalSettings::set_deletion_expected(*conn->client_logic, object_name, true); + HooksManager::ColumnSettings::set_deletion_expected(*conn->client_logic, object_name, true); + conn->client_logic->m_cached_column_manager->remove_schema(object_name); + } + conn->client_logic->droppedSchemas_size = 0; + return true; +} +bool Processor::accept_pending_statements(PGconn *conn, bool is_success) +{ + Assert(conn->client_logic && conn->client_logic->enable_client_encryption); + if (!is_success) { + conn->client_logic->pendingStatements->clear(); + return true; + } + + conn->client_logic->preparedStatements->merge(conn->client_logic->pendingStatements); + conn->client_logic->pendingStatements->clear(); + return true; +} + +void handle_post_set_stmt(PGconn *conn, const bool is_success) +{ + if (!is_success) { + return; + } + if (conn->client_logic->val_to_update & updateGucValues::GUC_ROLE) { + conn->client_logic->m_cached_column_manager->set_user_schema(conn->client_logic->tmpGucParams.role.c_str()); + conn->client_logic->gucParams.role = conn->client_logic->tmpGucParams.role; + conn->client_logic->val_to_update ^= updateGucValues::GUC_ROLE; + } + if (conn->client_logic->val_to_update & updateGucValues::SEARCH_PATH) { + conn->client_logic->m_cached_column_manager->load_search_path( + conn->client_logic->tmpGucParams.searchpathStr.c_str(), + conn->client_logic->gucParams.role.c_str()); + conn->client_logic->val_to_update ^= updateGucValues::SEARCH_PATH; + } + if (conn->client_logic->val_to_update & updateGucValues::BACKSLASH_QUOTE) { + conn->client_logic->gucParams.backslash_quote = conn->client_logic->tmpGucParams.backslash_quote; + conn->client_logic->val_to_update ^= updateGucValues::BACKSLASH_QUOTE; + } + if (conn->client_logic->val_to_update & updateGucValues::CONFORMING) { + conn->client_logic->gucParams.standard_conforming_strings = + conn->client_logic->tmpGucParams.standard_conforming_strings; + conn->client_logic->val_to_update ^= updateGucValues::CONFORMING; + } + if (conn->client_logic->val_to_update & updateGucValues::ESCAPE_STRING) { + conn->client_logic->gucParams.escape_string_warning = conn->client_logic->tmpGucParams.escape_string_warning; + conn->client_logic->val_to_update ^= updateGucValues::ESCAPE_STRING; + } +} + +bool Processor::run_post_query(PGconn *conn, bool force_error) +{ + if (!conn) { + return false; + } + + Assert(conn->client_logic && conn->client_logic->enable_client_encryption); + if (conn->client_logic->rawValuesForReplace) { + conn->client_logic->rawValuesForReplace->clear(); + } + + if (!conn->client_logic->raw_values_for_post_query.empty()) { + conn->client_logic->raw_values_for_post_query.clear(); + } + char last_stmt_name[NAMEDATALEN]; + errno_t rc = EOK; + rc = memset_s(last_stmt_name, NAMEDATALEN, 0, NAMEDATALEN); + securec_check_c(rc, "\0", "\0"); + /* swap local lastStmtName with lastStmtName object on connection */ + + check_memcpy_s( + memcpy_s(last_stmt_name, NAMEDATALEN, conn->client_logic->lastStmtName, NAMEDATALEN)); + check_memset_s(memset_s(conn->client_logic->lastStmtName, NAMEDATALEN, 0, NAMEDATALEN)); + + int last_result = conn->client_logic->m_lastResultStatus; + conn->client_logic->m_lastResultStatus = PGRES_EMPTY_QUERY; + + bool is_success = (last_result == PGRES_COMMAND_OK && !force_error); + + accept_pending_statements(conn, is_success); + remove_droppend_schemas(conn, is_success); + remove_dropped_global_settings(conn, is_success); + remove_dropped_column_settings(conn, is_success); + handle_post_set_stmt(conn, is_success); + conn->client_logic->clear_functions_list(); + if (!is_success || (conn->queryclass != PGQUERY_SIMPLE && conn->queryclass != PGQUERY_EXTENDED)) { + /* we only want to process successful queries that were actually executed */ + return true; + } + + PreparedStatement *prepared_statement = conn->client_logic->preparedStatements->get_or_create(last_stmt_name); + if (!prepared_statement) { + return false; + } + + if (!is_success || (conn->queryclass != PGQUERY_SIMPLE && conn->queryclass != PGQUERY_EXTENDED)) { + /* we only want to process successful queries that were actually executed */ + return true; + } + + if ((prepared_statement->cacheRefresh & CacheRefreshType::GLOBAL_SETTING) == CacheRefreshType::GLOBAL_SETTING && + prepared_statement->m_function_name[0] != '\0') { + /* + * run post_create hook requirements after the arguments have been validaity and the Global Setting is sure to + * be created We conduct this operation here to support incremental executions of post_create. So we only call + * post_create for the new Global Setting and not for existing Global Settings. We actually create a copy of the + * hook here but we don't save it in memory - we toss it right away. The hook will be saved in memory when the + * CacheLoader loads it from the catalog table + */ + bool ret = HooksManager::GlobalSettings::post_create(*conn->client_logic, prepared_statement->m_function_name, + prepared_statement->m_string_args); + if (!ret) { + return false; + } + } + /* + * we override the cacheRefreshType from the (prepared) statement object + * otherwise, the cacheRefreshType is filled with the value from the getReadyForQuery + */ + conn->client_logic->cacheRefreshType = prepared_statement->cacheRefresh; + prepared_statement->cacheRefresh = CacheRefreshType::CACHE_NONE; + conn->client_logic->m_cached_column_manager->load_cache(conn); + return true; +} diff --git a/src/common/interfaces/libpq/client_logic_processor/raw_value.cpp b/src/common/interfaces/libpq/client_logic_processor/raw_value.cpp index c94cfe649..2e2bb8b52 100644 --- a/src/common/interfaces/libpq/client_logic_processor/raw_value.cpp +++ b/src/common/interfaces/libpq/client_logic_processor/raw_value.cpp @@ -104,7 +104,7 @@ bool RawValue::process(const ICachedColumn *cached_column, char *err_msg) allocated = true; } - binary = Format::text_to_binary((const PGconn*)m_conn, text_value, cached_column->get_origdatatype_oid(), 0, + binary = Format::text_to_binary((const PGconn*)m_conn, text_value, cached_column->get_origdatatype_oid(), cached_column->get_origdatatype_mod(), &binary_size, err_msg); if (allocated) { free(text_value); @@ -129,7 +129,7 @@ bool RawValue::process(const ICachedColumn *cached_column, char *err_msg) rcs = memcpy_s(binary, binary_size, m_data_value, m_data_size); securec_check_c(rcs, "", ""); unsigned char *result = Format::verify_and_adjust_binary(binary, &binary_size, - cached_column->get_origdatatype_oid(), 0, cached_column->get_origdatatype_mod(), err_msg); + cached_column->get_origdatatype_oid(), cached_column->get_origdatatype_mod(), err_msg); if (!result) { if (strlen(err_msg) == 0) { check_sprintf_s(sprintf_s(err_msg, MAX_ERRMSG_LENGTH, "failed to convert text to binary")); diff --git a/src/common/interfaces/libpq/client_logic_processor/raw_value.h b/src/common/interfaces/libpq/client_logic_processor/raw_value.h index 510aa2d8a..f70489a5e 100644 --- a/src/common/interfaces/libpq/client_logic_processor/raw_value.h +++ b/src/common/interfaces/libpq/client_logic_processor/raw_value.h @@ -80,7 +80,6 @@ public: /* the connection object */ PGconn *m_conn; - private: void free_processed_data(); int ref_count; diff --git a/src/common/interfaces/libpq/client_logic_processor/raw_values_list.cpp b/src/common/interfaces/libpq/client_logic_processor/raw_values_list.cpp index 3d90c9304..718abdcf4 100644 --- a/src/common/interfaces/libpq/client_logic_processor/raw_values_list.cpp +++ b/src/common/interfaces/libpq/client_logic_processor/raw_values_list.cpp @@ -40,19 +40,14 @@ void RawValuesList::clear() { if (m_should_free_values) { for (size_t i = 0; i < m_raw_values_size; ++i) { - if (m_raw_values[i] == NULL) { - continue; - } - - m_raw_values[i]->dec_ref_count(); - if (m_raw_values[i]->safe_to_delete()) { - delete m_raw_values[i]; - m_raw_values[i] = NULL; - } + safe_delete(i); } } - free(m_raw_values); - m_raw_values = NULL; + + if (m_raw_values != NULL) { + free(m_raw_values); + m_raw_values = NULL; + } m_raw_values_size = 0; } @@ -126,17 +121,8 @@ bool RawValuesList::set(size_t pos, RawValue *raw_value) return true; } -bool RawValuesList::erase(size_t pos, bool is_delete) +void RawValuesList::safe_delete(size_t pos) { - if (pos >= m_raw_values_size) { - return false; - } - - if (!is_delete) { - m_raw_values[pos] = NULL; - return true; - } - if (m_raw_values[pos]) { m_raw_values[pos]->dec_ref_count(); if (m_raw_values[pos]->safe_to_delete()) { @@ -144,13 +130,26 @@ bool RawValuesList::erase(size_t pos, bool is_delete) m_raw_values[pos] = NULL; } } +} + +bool RawValuesList::erase(size_t pos, bool is_delete) +{ + if (pos >= m_raw_values_size) { + return false; + } + + safe_delete(pos); + if (!is_delete) { + m_raw_values[pos] = NULL; + return true; + } + std::swap(m_raw_values[pos], m_raw_values[m_raw_values_size - 1]); m_raw_values[m_raw_values_size - 1] = NULL; --m_raw_values_size; return true; } - int RawValuesList::partition_by_location(int lo, int hi) { RawValue *raw_value_pivot = m_raw_values[hi]; @@ -221,4 +220,4 @@ bool RawValuesList::gen_values_from_statement(const StatementData *statement_dat set(param_num, raw_value); } return true; -} \ No newline at end of file +} diff --git a/src/common/interfaces/libpq/client_logic_processor/raw_values_list.h b/src/common/interfaces/libpq/client_logic_processor/raw_values_list.h index 868598c90..6db3d22fa 100644 --- a/src/common/interfaces/libpq/client_logic_processor/raw_values_list.h +++ b/src/common/interfaces/libpq/client_logic_processor/raw_values_list.h @@ -49,6 +49,7 @@ public: private: void quicksort_by_location(int lo, int high); int partition_by_location(int lo, int high); + void safe_delete(size_t pos); size_t m_raw_values_size; bool m_should_free_values; }; diff --git a/src/common/interfaces/libpq/client_logic_processor/record_processor.cpp b/src/common/interfaces/libpq/client_logic_processor/record_processor.cpp index 804a11e26..8bc6a3812 100644 --- a/src/common/interfaces/libpq/client_logic_processor/record_processor.cpp +++ b/src/common/interfaces/libpq/client_logic_processor/record_processor.cpp @@ -29,8 +29,9 @@ #include "client_logic_cache/icached_rec.h" #include "client_logic_cache/dataTypes.def" #include "client_logic_cache/icached_column_manager.h" + bool RecordProcessor::DeProcessRecord(PGconn* conn, const char* processed_data, size_t processed_data_size, - const int* original_typesid, int format, unsigned char** plain_text, + const int* original_typesid, const size_t original_typesid_size, int format, unsigned char** plain_text, size_t& plain_text_size, bool* is_decrypted) { DecryptDataRes dec_dat_res = DEC_DATA_ERR; @@ -52,13 +53,13 @@ bool RecordProcessor::DeProcessRecord(PGconn* conn, const char* processed_data, const char* end = NULL; int idx = 0; bool first = true; - while (pdata) { + while (pdata && (size_t) idx < original_typesid_size) { if (!first) { /* if this is not the first variable in the row, we need to add comma and space */ size_t cur_size = strlen(result); - /* 2: the count of char ', ' between 2 results, such as 'a, b' */ - const int char_num = 2; - new_res = (char*)libpq_realloc(result, cur_size, cur_size + char_num + 1); + /* 1: the count of char ',' between 2 results, such as 'a,b' */ + const int char_num = m_NULL_TERMINATION_SIZE; + new_res = (char*)libpq_realloc(result, cur_size, cur_size + char_num + m_NULL_TERMINATION_SIZE); if (new_res == NULL) { libpq_free(result); fprintf(stderr, "allocation failure when trying to deprocess record\n"); @@ -66,7 +67,6 @@ bool RecordProcessor::DeProcessRecord(PGconn* conn, const char* processed_data, } result = new_res; result[cur_size] = ','; - result[cur_size + 1] = ' '; result[cur_size + char_num] = '\0'; } first = false; @@ -83,13 +83,12 @@ bool RecordProcessor::DeProcessRecord(PGconn* conn, const char* processed_data, &plain, plain_size, process_status); if (dec_dat_res == DEC_DATA_SUCCEED) { *is_decrypted = true; - size_t oldsize = strlen(result); - newsize = oldsize + plain_size + 1 + 1; + newsize = oldsize + plain_size + m_NULL_TERMINATION_SIZE; if (original_id == BYTEAOID) { - newsize += 1; + newsize += m_BYTEA_PREFIX_SIZE; } - new_res = (char*)libpq_realloc(result, oldsize + 1, newsize); + new_res = (char*)libpq_realloc(result, oldsize + m_NULL_TERMINATION_SIZE, newsize); if (new_res == NULL) { libpq_free(result); libpq_free(plain); @@ -100,9 +99,11 @@ bool RecordProcessor::DeProcessRecord(PGconn* conn, const char* processed_data, if (original_id != BYTEAOID) { check_strncat_s(strncat_s(result, newsize, (char*)plain, plain_size)); } else { - size_t result_size = strlen(result); + size_t result_size = strlen(result) + m_NULL_TERMINATION_SIZE; result[result_size] = '"'; - result[result_size + 1] = '\0'; + result[result_size + m_NULL_TERMINATION_SIZE] = '\0'; + const char* bytea_begin = "\"\\"; + check_strncat_s(strncat_s(result, newsize, bytea_begin, m_BYTEA_PREFIX_SIZE)); check_strncat_s(strncat_s(result, newsize, (char*)plain, plain_size)); end--; /* for the quote will be copied */ } @@ -119,7 +120,7 @@ bool RecordProcessor::DeProcessRecord(PGconn* conn, const char* processed_data, if (end) { end++; size_t oldsize = strlen(result); - newsize = oldsize + processed_data_size - (end - processed_data) + 1; + newsize = oldsize + processed_data_size - (end - processed_data) + m_NULL_TERMINATION_SIZE; new_res = (char*)libpq_realloc(result, oldsize + 1, newsize); if (new_res == NULL) { libpq_free(result); @@ -128,8 +129,8 @@ bool RecordProcessor::DeProcessRecord(PGconn* conn, const char* processed_data, } result = new_res; check_strncat_s(strncat_s(result, newsize, end, processed_data_size - (end - processed_data))); + result[newsize - m_NULL_TERMINATION_SIZE] = '\0'; } - result[newsize - 1] = '\0'; *plain_text = (unsigned char*)result; plain_text_size = newsize; return true; diff --git a/src/common/interfaces/libpq/client_logic_processor/record_processor.h b/src/common/interfaces/libpq/client_logic_processor/record_processor.h index 5ebe0284f..6a6ae6ecd 100644 --- a/src/common/interfaces/libpq/client_logic_processor/record_processor.h +++ b/src/common/interfaces/libpq/client_logic_processor/record_processor.h @@ -30,6 +30,12 @@ typedef struct pg_conn PGconn; class RecordProcessor { public: static bool DeProcessRecord(PGconn* conn, const char* processedData, size_t processedDataSize, - const int* original_typesid, int format, unsigned char** plainText, size_t& plainTextSize, bool* is_decrypted); + const int* original_typesid, const size_t original_typesid_size, int format, + unsigned char** plainText, size_t& plainTextSize, bool* is_decrypted); + +private: + static const size_t m_NULL_TERMINATION_SIZE = 1; + static const size_t m_BYTEA_PREFIX_SIZE = 2; + }; -#endif \ No newline at end of file +#endif diff --git a/src/common/interfaces/libpq/client_logic_processor/stmt_processor.cpp b/src/common/interfaces/libpq/client_logic_processor/stmt_processor.cpp index 90a643fae..9535e2094 100755 --- a/src/common/interfaces/libpq/client_logic_processor/stmt_processor.cpp +++ b/src/common/interfaces/libpq/client_logic_processor/stmt_processor.cpp @@ -1704,6 +1704,29 @@ bool Processor::run_pre_rlspolicy_using(const Node *stmt, StatementData *stateme return true; } +bool Processor::run_pre_create_function_stmt(const CreateFunctionStmt *stmt, StatementData *statement_data) +{ + if (stmt == NULL || statement_data == NULL) { + return false; + } + + if (stmt->parameters == NULL) { + return true; + } + + foreach_cell (lc, stmt->parameters) { + FunctionParameter* fp = (FunctionParameter*) lfirst(lc); + const char* p_name = strVal(llast(fp->argType->names)); + if (strcmp(p_name, "byteawithoutordercol") == 0 || strcmp(p_name, "byteawithoutorderwithequalcol") == 0) { + printfPQExpBuffer(&statement_data->conn->errorMessage, + libpq_gettext("ERROR(CLIENT): could not support functions when full encryption is on.\n")); + return false; + } + } + + return true; +} + bool Processor::run_pre_create_rlspolicy_stmt(const CreateRlsPolicyStmt *stmt, StatementData *statement_data) { if (statement_data->GetCacheManager()->is_cache_empty()) { @@ -1923,7 +1946,6 @@ bool Processor::run_pre_statement(const Node * const stmt, StatementData *statem /* rewrite query in the CREATE VIEW clause if query has relevant columns */ - return run_pre_select_statement((SelectStmt *)((ViewStmt *)stmt)->query, statement_data); case T_DropStmt: return run_pre_drop_statement((DropStmt *)stmt, statement_data); @@ -1939,9 +1961,17 @@ bool Processor::run_pre_statement(const Node * const stmt, StatementData *statem return run_pre_create_rlspolicy_stmt((CreateRlsPolicyStmt *)stmt, statement_data); case T_AlterRlsPolicyStmt: return run_pre_alter_rlspolicy_stmt((AlterRlsPolicyStmt *)stmt, statement_data); - case T_CreateFunctionStmt: - current_statement->cacheRefresh |= CacheRefreshType::PROCEDURES; - return func_processor::run_pre_create_function_stmt(((CreateFunctionStmt*)stmt)->options, statement_data); + case T_CreateFunctionStmt: { + if (!run_pre_create_function_stmt((const CreateFunctionStmt *)stmt, statement_data)) { + return false; + } + List* options = ((CreateFunctionStmt*)stmt)->options; + bool res = func_processor::run_pre_create_function_stmt(options, statement_data); + if (res) { + current_statement->cacheRefresh |= CacheRefreshType::PROCEDURES; + } + return res; + } case T_DoStmt: return func_processor::run_pre_create_function_stmt(((DoStmt*)stmt)->args, statement_data, true); case T_MergeStmt: @@ -2095,7 +2125,10 @@ bool Processor::run_pre_query(StatementData *statement_data, bool is_inner_query foreach (stmt_iter, stmts) { Node *stmt = (Node *)lfirst(stmt_iter); if (!run_pre_statement(stmt, statement_data)) { - conn->client_logic->pendingStatements->clear(); + /* in inner query in function parse, it is normal when it return false, so it need not clear memery */ + if (!is_inner_query) { + run_post_query(conn, true); + } return false; } } @@ -2146,185 +2179,3 @@ bool Processor::run_pre_exec(StatementData *statement_data) statement_data->replace_raw_values(); return true; } - -void Processor::remove_dropped_column_settings(PGconn *conn, const bool is_success) -{ - Assert(conn->client_logic && conn->client_logic->enable_client_encryption); - if (conn->client_logic->droppedColumnSettings_size == 0) { - return; - } - - if (!is_success) { - conn->client_logic->droppedColumnSettings_size = 0; - return; - } - - for (size_t i = 0; i < conn->client_logic->droppedColumnSettings_size; ++i) { - const char *object_name = conn->client_logic->droppedColumnSettings[i].data; - HooksManager::ColumnSettings::set_deletion_expected(*conn->client_logic, object_name, false); - } - conn->client_logic->droppedColumnSettings_size = 0; -} - -void Processor::remove_dropped_global_settings(PGconn *conn, const bool is_success) -{ - ObjName *cur_cmk = conn->client_logic->droppedGlobalSettings; - ObjName *to_free = NULL; - - Assert(conn->client_logic && conn->client_logic->enable_client_encryption); - if (cur_cmk == NULL) { - return; - } - - if (!is_success) { - free_obj_list(conn->client_logic->droppedGlobalSettings); - conn->client_logic->droppedGlobalSettings = NULL; - return; - } - - while (cur_cmk != NULL) { - HooksManager::GlobalSettings::set_deletion_expected(*conn->client_logic, cur_cmk->obj_name, false); - to_free = cur_cmk; - cur_cmk = cur_cmk->next; - libpq_free(to_free); - } - conn->client_logic->droppedGlobalSettings = NULL; -} - -const bool Processor::remove_droppend_schemas(PGconn *conn, const bool is_success) -{ - Assert(conn->client_logic && conn->client_logic->enable_client_encryption); - if (conn->client_logic->droppedSchemas_size == 0) { - return true; - } - - if (!is_success) { - conn->client_logic->droppedSchemas_size = 0; - return true; - } - - for (size_t i = 0; i < conn->client_logic->droppedSchemas_size; ++i) { - const char *object_name = conn->client_logic->droppedSchemas[i].data; - HooksManager::GlobalSettings::set_deletion_expected(*conn->client_logic, object_name, true); - HooksManager::ColumnSettings::set_deletion_expected(*conn->client_logic, object_name, true); - conn->client_logic->m_cached_column_manager->remove_schema(object_name); - } - conn->client_logic->droppedSchemas_size = 0; - return true; -} -bool Processor::accept_pending_statements(PGconn *conn, bool is_success) -{ - Assert(conn->client_logic && conn->client_logic->enable_client_encryption); - if (!is_success) { - conn->client_logic->pendingStatements->clear(); - return true; - } - - conn->client_logic->preparedStatements->merge(conn->client_logic->pendingStatements); - conn->client_logic->pendingStatements->clear(); - return true; -} - -void handle_post_set_stmt(PGconn *conn, const bool is_success) -{ - if (!is_success) { - return; - } - if (conn->client_logic->val_to_update & updateGucValues::GUC_ROLE) { - conn->client_logic->m_cached_column_manager->set_user_schema(conn->client_logic->tmpGucParams.role.c_str()); - conn->client_logic->gucParams.role = conn->client_logic->tmpGucParams.role; - conn->client_logic->val_to_update ^= updateGucValues::GUC_ROLE; - } - if (conn->client_logic->val_to_update & updateGucValues::SEARCH_PATH) { - conn->client_logic->m_cached_column_manager->load_search_path( - conn->client_logic->tmpGucParams.searchpathStr.c_str(), - conn->client_logic->gucParams.role.c_str()); - conn->client_logic->val_to_update ^= updateGucValues::SEARCH_PATH; - } - if (conn->client_logic->val_to_update & updateGucValues::BACKSLASH_QUOTE) { - conn->client_logic->gucParams.backslash_quote = conn->client_logic->tmpGucParams.backslash_quote; - conn->client_logic->val_to_update ^= updateGucValues::BACKSLASH_QUOTE; - } - if (conn->client_logic->val_to_update & updateGucValues::CONFORMING) { - conn->client_logic->gucParams.standard_conforming_strings = - conn->client_logic->tmpGucParams.standard_conforming_strings; - conn->client_logic->val_to_update ^= updateGucValues::CONFORMING; - } - if (conn->client_logic->val_to_update & updateGucValues::ESCAPE_STRING) { - conn->client_logic->gucParams.escape_string_warning = conn->client_logic->tmpGucParams.escape_string_warning; - conn->client_logic->val_to_update ^= updateGucValues::ESCAPE_STRING; - } -} - -bool Processor::run_post_query(PGconn *conn) -{ - if (!conn) { - return false; - } - - Assert(conn->client_logic && conn->client_logic->enable_client_encryption); - if (conn->client_logic->rawValuesForReplace) { - conn->client_logic->rawValuesForReplace->clear(); - } - - conn->client_logic->raw_values_for_post_query.clear(); - char last_stmt_name[NAMEDATALEN]; - errno_t rc = EOK; - rc = memset_s(last_stmt_name, NAMEDATALEN, 0, NAMEDATALEN); - securec_check_c(rc, "\0", "\0"); - /* swap local lastStmtName with lastStmtName object on connection */ - - check_memcpy_s( - memcpy_s(last_stmt_name, NAMEDATALEN, conn->client_logic->lastStmtName, NAMEDATALEN)); - check_memset_s(memset_s(conn->client_logic->lastStmtName, NAMEDATALEN, 0, NAMEDATALEN)); - - int last_result = conn->client_logic->m_lastResultStatus; - conn->client_logic->m_lastResultStatus = PGRES_EMPTY_QUERY; - - bool is_success = (last_result == PGRES_COMMAND_OK); - - accept_pending_statements(conn, is_success); - remove_droppend_schemas(conn, is_success); - remove_dropped_global_settings(conn, is_success); - remove_dropped_column_settings(conn, is_success); - handle_post_set_stmt(conn, is_success); - conn->client_logic->clear_functions_list(); - if (!is_success || (conn->queryclass != PGQUERY_SIMPLE && conn->queryclass != PGQUERY_EXTENDED)) { - /* we only want to process successful queries that were actually executed */ - return true; - } - - PreparedStatement *prepared_statement = conn->client_logic->preparedStatements->get_or_create(last_stmt_name); - if (!prepared_statement) { - return false; - } - - if (!is_success || (conn->queryclass != PGQUERY_SIMPLE && conn->queryclass != PGQUERY_EXTENDED)) { - /* we only want to process successful queries that were actually executed */ - return true; - } - - if ((prepared_statement->cacheRefresh & CacheRefreshType::GLOBAL_SETTING) == CacheRefreshType::GLOBAL_SETTING && - prepared_statement->m_function_name[0] != '\0') { - /* - * run post_create hook requirements after the arguments have been validaity and the Global Setting is sure to - * be created We conduct this operation here to support incremental executions of post_create. So we only call - * post_create for the new Global Setting and not for existing Global Settings. We actually create a copy of the - * hook here but we don't save it in memory - we toss it right away. The hook will be saved in memory when the - * CacheLoader loads it from the catalog table - */ - bool ret = HooksManager::GlobalSettings::post_create(*conn->client_logic, prepared_statement->m_function_name, - prepared_statement->m_string_args); - if (!ret) { - return false; - } - } - /* - * we override the cacheRefreshType from the (prepared) statement object - * otherwise, the cacheRefreshType is filled with the value from the getReadyForQuery - */ - conn->client_logic->cacheRefreshType = prepared_statement->cacheRefresh; - prepared_statement->cacheRefresh = CacheRefreshType::CACHE_NONE; - conn->client_logic->m_cached_column_manager->load_cache(conn); - return true; -} diff --git a/src/common/interfaces/libpq/client_logic_processor/stmt_processor.h b/src/common/interfaces/libpq/client_logic_processor/stmt_processor.h index 9c313d97f..bfae01832 100644 --- a/src/common/interfaces/libpq/client_logic_processor/stmt_processor.h +++ b/src/common/interfaces/libpq/client_logic_processor/stmt_processor.h @@ -45,7 +45,7 @@ public: /* * * CREATE commands require running functions later to update local cache */ - static bool run_post_query(PGconn *conn); + static bool run_post_query(PGconn *conn, bool force_error = false); static bool accept_pending_statements(PGconn *conn, bool isSuccess = true); static bool deal_order_by_statement(const SelectStmt * const select_stmt, ICachedColumns *select_cached_columns, StatementData *statement_data); @@ -107,6 +107,7 @@ private: static bool run_pre_drop_schema_statement(const DropStmt *stmt, StatementData *statement_data); static bool run_pre_set_statement(const VariableSetStmt *stmt, StatementData *statement_data); static bool run_pre_exec_direct_statement(const ExecDirectStmt *stmt, StatementData *statement_data); + static bool run_pre_create_function_stmt(const CreateFunctionStmt *stmt, StatementData *statement_data); static bool run_pre_create_rlspolicy_stmt(const CreateRlsPolicyStmt *stmt, StatementData *statement_data); static bool run_pre_alter_rlspolicy_stmt(const AlterRlsPolicyStmt *stmt, StatementData *statement_data); static bool run_pre_rlspolicy_using(const Node *stmt, StatementData *statement_data); diff --git a/src/common/interfaces/libpq/client_logic_processor/values_processor.cpp b/src/common/interfaces/libpq/client_logic_processor/values_processor.cpp index 49f41ce0c..e6cb66c13 100644 --- a/src/common/interfaces/libpq/client_logic_processor/values_processor.cpp +++ b/src/common/interfaces/libpq/client_logic_processor/values_processor.cpp @@ -45,10 +45,10 @@ static bool process_get_value_index(const ICachedColumns *cached_columns, size_t values_per_row_count, size_t m, size_t i, size_t &raw_value_index) { - const ICachedColumn *cached_column = cached_columns->at(i); if (cached_columns->is_in_scheme_order()) { /* this can happen when "INSERT INTO TABLE VALUES" contains less values than the number of columns in * the original table */ + const ICachedColumn *cached_column = cached_columns->at(i); if (cached_column->get_col_idx() > values_per_row_count) { return false; } @@ -86,13 +86,13 @@ static void process_prepare_state(const RawValue *raw_value, StatementData *stat if (!statement_data->params.new_param_values) { Assert(!statement_data->params.copy_sizes); statement_data->params.new_param_values = - (unsigned char **)calloc(sizeof(unsigned char *), statement_data->nParams); - statement_data->params.copy_sizes = (size_t *)calloc(sizeof(size_t), statement_data->nParams); + (unsigned char **)calloc(statement_data->nParams, sizeof(unsigned char *)); + statement_data->params.copy_sizes = (size_t *)calloc(statement_data->nParams, sizeof(size_t)); statement_data->params.nParams = statement_data->nParams; } Assert(!statement_data->params.new_param_values[raw_value->m_location]); statement_data->params.new_param_values[raw_value->m_location] = - (unsigned char *)calloc(copy_size, sizeof(unsigned char)); + (unsigned char *)calloc(copy_size + 1, sizeof(unsigned char)); if (statement_data->params.new_param_values[raw_value->m_location] == NULL) { fprintf(stderr, "ERROR(CLIENT): out of memory when processing state\n"); return; @@ -103,14 +103,14 @@ static void process_prepare_state(const RawValue *raw_value, StatementData *stat raw_value->m_processed_data, copy_size)); if (!statement_data->params.adjusted_param_values) { statement_data->params.adjusted_param_values = - (const char **)calloc(statement_data->nParams, sizeof(const char *)); + (const char **)calloc(statement_data->nParams, sizeof(const char *)); if (statement_data->params.adjusted_param_values == NULL) { fprintf(stderr, "ERROR(CLIENT): out of memory when processing state\n"); return; } statement_data->params.nParams = statement_data->nParams; } - + if (statement_data->params.new_param_values[raw_value->m_location] != NULL) { statement_data->params.adjusted_param_values[raw_value->m_location] = (const char*)statement_data->params.new_param_values[raw_value->m_location]; @@ -254,17 +254,19 @@ bool ValuesProcessor::process_values(StatementData *statement_data, const ICache if (!process_inside_value(statement_data, raw_value, cached_column)) { return false; } - - /* process the data for prepare statement */ - process_prepare_state(raw_value, statement_data); - - /* - * 1. realign locations inside the rawValue after data was processed and probably enlarged - * (does not apply if this is param) - * 2. add the rawValue to the list of rawValues intended for replacement in the original query to be sent to - * the client - */ - if (!raw_value->m_is_param) { + if (raw_value->m_is_param) { + process_prepare_state(raw_value, statement_data); + /* + * adding to raw values list. adding to rawValuesForReplace is unnesscary + * since the replcment of the param in the params array is done by the parmter index + * and we do not need to replace them all in one place + */ + statement_data->conn->client_logic->raw_values_for_post_query.add(raw_value); + } else { + /* 1. realign locations inside the rawValue after data was processed and probably enlarged + * (does not apply if this is param) + * 2. add the rawValue to the list of rawValues intended for replacement in the original + * query to be sent to the client */ int size_diff = (int)raw_value->m_processed_data_size - (int)raw_value->m_data_size; statement_data->offset += size_diff; for (size_t j = 1 + (raw_value_index + (m * values_per_row_count)); j < raw_values->size(); ++j) { @@ -272,8 +274,6 @@ bool ValuesProcessor::process_values(StatementData *statement_data, const ICache } statement_data->conn->client_logic->rawValuesForReplace->add(raw_value); raw_values->erase(raw_value_index, false); - } else { - statement_data->conn->client_logic->raw_values_for_post_query.add(raw_value); } } } @@ -426,7 +426,7 @@ DecryptDataRes ValuesProcessor::deprocess_value(PGconn *conn, const unsigned cha rc = memset_s(err_msg, MAX_ERRMSG_LENGTH, 0, MAX_ERRMSG_LENGTH); securec_check_c(rc, "\0", "\0"); unsigned char *result = - Format::restore_binary(*plain_text, plain_text_size, original_typeid, 0, -1, &result_size, err_msg); + Format::restore_binary(*plain_text, plain_text_size, original_typeid, &result_size, err_msg); if (result == NULL) { return DEC_DATA_ERR; } @@ -457,7 +457,7 @@ void ValuesProcessor::process_text_format(unsigned char **plain_text, size_t &pl ProcessStatus process_status, int original_typeid) { size_t result_size = 0; - char *res = Format::binary_to_text(*plain_text, plain_text_size, original_typeid, 0, -1, &result_size); + char *res = Format::binary_to_text(*plain_text, plain_text_size, original_typeid, &result_size); if (res == NULL) { fprintf(stderr, "ERROR(CLIENT): failed to convert binary to text\n"); return; diff --git a/src/common/interfaces/libpq/fe-auth.cpp b/src/common/interfaces/libpq/fe-auth.cpp index ced78655e..c28bca692 100644 --- a/src/common/interfaces/libpq/fe-auth.cpp +++ b/src/common/interfaces/libpq/fe-auth.cpp @@ -46,7 +46,9 @@ #include "libpq/sha2.h" #include "utils/syscall_lock.h" #ifndef WIN32 +#ifdef ENABLE_GSS #include "gssapi/gssapi_krb5.h" +#endif /* ENABLE_GSS */ #endif // WIN32 #ifdef KRB5 /* @@ -177,10 +179,11 @@ static int pg_krb5_sendauth(PGconn* conn) krb5_auth_context auth_context = NULL; krb5_error* err_ret = NULL; struct krb5_info info; + char* host = (conn->connhost) ? conn->connhost[conn->whichhost].host : NULL; info.pg_krb5_initialised = 0; - if (!((conn->pghost != NULL) && conn->pghost[0] != '\0')) { + if (!((host != NULL) && host[0] != '\0')) { printfPQExpBuffer(&conn->errorMessage, libpq_gettext("host name must be specified\n")); return STATUS_ERROR; } @@ -189,7 +192,7 @@ static int pg_krb5_sendauth(PGconn* conn) if (ret != STATUS_OK) return ret; - retval = krb5_sname_to_principal(info.pg_krb5_context, conn->pghost, conn->krbsrvname, KRB5_NT_SRV_HST, &server); + retval = krb5_sname_to_principal(info.pg_krb5_context, host, conn->krbsrvname, KRB5_NT_SRV_HST, &server); if (retval) { printfPQExpBuffer(&conn->errorMessage, "pg_krb5_sendauth: krb5_sname_to_principal: %s, err: %s\n", @@ -425,8 +428,9 @@ static int pg_GSS_startup(PGconn* conn) char* krbsrvname = NULL; char* krbhostname = NULL; errno_t rc = EOK; + char* host = (conn->connhost) ? conn->connhost[conn->whichhost].host : NULL; - if (!((conn->pghost != NULL) && conn->pghost[0] != '\0')) { + if (!((host != NULL) && host[0] != '\0')) { printfPQExpBuffer(&conn->errorMessage, libpq_gettext("host name must be specified\n")); return STATUS_ERROR; } @@ -620,6 +624,7 @@ static int pg_SSPI_startup(PGconn* conn, int use_negotiate) { SECURITY_STATUS r; TimeStamp expire; + char* host = (conn->connhost) ? conn->connhost[conn->whichhost].host : NULL; conn->sspictx = NULL; @@ -656,13 +661,13 @@ static int pg_SSPI_startup(PGconn* conn, int use_negotiate) * but not more complex. We can skip the @REALM part, because Windows will * fill that in for us automatically. */ - if (!((conn->pghost != NULL) && conn->pghost[0] != '\0')) { + if (!((host != NULL) && host[0] != '\0')) { printfPQExpBuffer(&conn->errorMessage, libpq_gettext("host name must be specified\n")); return STATUS_ERROR; } int krbsrvnameLen = strlen(conn->krbsrvname); - int pghostLen = strlen(conn->pghost); + int pghostLen = strlen(host); #ifndef WIN32 if (unlikely(krbsrvnameLen > PG_INT32_MAX - pghostLen - 2)) { printfPQExpBuffer(&conn->errorMessage, libpq_gettext("krb server name or host string is too long\n")); @@ -676,7 +681,7 @@ static int pg_SSPI_startup(PGconn* conn, int use_negotiate) #endif - int sspitarget_len = strlen(conn->krbsrvname) + strlen(conn->pghost) + 2; + int sspitarget_len = strlen(conn->krbsrvname) + strlen(host) + 2; #ifdef WIN32 conn->sspitarget = (char*)malloc(sspitarget_len); #else @@ -686,7 +691,7 @@ static int pg_SSPI_startup(PGconn* conn, int use_negotiate) printfPQExpBuffer(&conn->errorMessage, libpq_gettext("out of memory\n")); return STATUS_ERROR; } - check_sprintf_s(sprintf_s(conn->sspitarget, sspitarget_len, "%s/%s", conn->krbsrvname, conn->pghost)); + check_sprintf_s(sprintf_s(conn->sspitarget, sspitarget_len, "%s/%s", conn->krbsrvname, host)); /* * Indicate that we're in SSPI authentication mode to make sure that @@ -848,15 +853,32 @@ static int pg_password_sendauth(PGconn* conn, const char* password, AuthRequest break; } +#ifdef ENABLE_LITE_MODE + case AUTH_REQ_SHA256_RFC: +#endif case AUTH_REQ_SHA256: { char* crypt_pwd2 = NULL; +#ifdef ENABLE_LITE_MODE + if ((SHA256_PASSWORD == conn->password_stored_method) || + (SHA256_PASSWORD_RFC == conn->password_stored_method) || + (PLAIN_PASSWORD == conn->password_stored_method)) { + if (areq == AUTH_REQ_SHA256_RFC) { + if (!pg_sha256_encrypt_v1( + password, conn->salt, strlen(conn->salt), (char*)buf, client_key_buf)) + return STATUS_ERROR; + } else { + if (!pg_sha256_encrypt( + password, conn->salt, strlen(conn->salt), (char*)buf, client_key_buf, conn->iteration_count)) + return STATUS_ERROR; + } +#else if (SHA256_PASSWORD == conn->password_stored_method || PLAIN_PASSWORD == conn->password_stored_method) { if (!pg_sha256_encrypt( password, conn->salt, strlen(conn->salt), (char*)buf, client_key_buf, conn->iteration_count)) return STATUS_ERROR; - +#endif rc = strncpy_s(server_key_string, sizeof(server_key_string), &buf[SHA256_LENGTH + SALT_STRING_LENGTH], @@ -1018,9 +1040,6 @@ static int pg_password_sendauth(PGconn* conn, const char* password, AuthRequest break; } - case AUTH_REQ_PASSWORD: - pwd_to_send = password; - break; /* * Notice: Authentication of send password directly are not currently supported. * need to: We remove the branch of AUTH_REQ_PASSWORD here for both implication and @@ -1190,20 +1209,32 @@ int pg_fe_sendauth(AuthRequest areq, PGconn* conn) case AUTH_REQ_MD5: case AUTH_REQ_MD5_SHA256: case AUTH_REQ_SHA256: +#ifdef ENABLE_LITE_MODE + case AUTH_REQ_SHA256_RFC: +#endif case AUTH_REQ_SM3: - case AUTH_REQ_PASSWORD: - int status; - conn->password_needed = true; - if (conn->pgpass == NULL || conn->pgpass[0] == '\0') { - printfPQExpBuffer( - &conn->errorMessage, libpq_gettext("FATAL: Invalid username/password,login denied.\n")); - return STATUS_ERROR; + { + int status; + char *password = NULL; + conn->password_needed = true; + if (conn->connhost != NULL) + password = conn->connhost[conn->whichhost].password; + + if (password == NULL) { + password = conn->pgpass; + } + + if (password == NULL || password[0] == '\0') { + printfPQExpBuffer( + &conn->errorMessage, libpq_gettext("FATAL: Invalid username/password,login denied.\n")); + return STATUS_ERROR; + } + if ((status = pg_password_sendauth(conn, password, areq)) != STATUS_OK) { + printfPQExpBuffer(&conn->errorMessage, "fe_sendauth: error sending password authentication\n"); + return STATUS_ERROR; + } + break; } - if ((status = pg_password_sendauth(conn, conn->pgpass, areq)) != STATUS_OK) { - printfPQExpBuffer(&conn->errorMessage, "fe_sendauth: error sending password authentication\n"); - return STATUS_ERROR; - } - break; case AUTH_REQ_SCM_CREDS: if (pg_local_sendauth(conn) != STATUS_OK) diff --git a/src/common/interfaces/libpq/fe-connect.cpp b/src/common/interfaces/libpq/fe-connect.cpp index cb050cdb5..76d1bce74 100644 --- a/src/common/interfaces/libpq/fe-connect.cpp +++ b/src/common/interfaces/libpq/fe-connect.cpp @@ -127,6 +127,7 @@ extern const char* libpqVersionString; #define DefaultOption "" #define DefaultAuthtype "" #define DefaultPassword "" +#define DefaultTargetSessionAttrs "any" #ifdef USE_SSL #define DefaultSSLMode "prefer" #else @@ -254,6 +255,10 @@ static const PQconninfoOption PQconninfoOptions[] = { */ {"connection_info", NULL, NULL, NULL, "Connection-Info", "", 8192, 0}, {"connectionExtraInfo", NULL, NULL, NULL, "Connection-Extra-Info", "", 1, 0}, + {"target_session_attrs", "PGTARGETSESSIONATTRS", + DefaultTargetSessionAttrs, NULL, + "Target-Session-Attrs", "", 15, /* sizeof("prefer-standby") = 15 */ + offsetof(struct pg_conn, target_session_attrs)}, /* Terminating entry --- MUST BE LAST */ {NULL, NULL, NULL, NULL, NULL, NULL, 0, 0}}; @@ -277,6 +282,24 @@ static int connectDBComplete(PGconn* conn); static void connectSetConninfo(PGconn* conn); static PGPing internal_ping(PGconn* conn); static void fillPGconn(PGconn* conn, PQconninfoOption* connOptions); +static void release_conn_addrinfo(PGconn* conn); +static void sendTerminateConn(PGconn* conn); +static void pqDropConnection(PGconn* conn, bool flushInput); +static void pqDropServerData(PGconn *conn); +static int count_comma_separated_elems(const char *input); +static char *parse_comma_separated_list(char **startptr, bool *more); +static bool mutiHostlOptions(PGconn* conn); +static bool parse_int_param(const char *value, int *result, PGconn *conn, const char *context); +static bool resolve_host_addr(PGconn *conn); +static void reset_connection_state_machine(PGconn *conn); +static void reset_physical_connection(PGconn *conn); +static void try_next_host(PGconn *conn); +static bool saveErrorMessage(PGconn *conn, PQExpBuffer savedMessage); +static void restoreErrorMessage(PGconn *conn, PQExpBuffer savedMessage); +static PostgresPollingStatusType connection_check_target(PGconn* conn); +static PostgresPollingStatusType connection_consume(PGconn* conn); +static PostgresPollingStatusType connection_check_writable(PGconn* conn); +static PostgresPollingStatusType connection_check_standby(PGconn* conn); static PQconninfoOption* conninfo_init(PQExpBuffer errorMessage); static PQconninfoOption* parse_connection_string(const char* conninfo, PQExpBuffer errorMessage, bool use_defaults); static int uri_prefix_length(const char* connstr); @@ -684,6 +707,13 @@ PGconn* PQconnectStartParams(const char* const* keywords, const char* const* val if (conn == NULL) return NULL; + /* Check for params keywords-values */ + if (keywords == NULL || values == NULL) { + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("invalid params keywords or values.\n")); + conn->status = CONNECTION_BAD; + return conn; + } + /* * Parse the conninfo arrays */ @@ -899,6 +929,8 @@ static void fillPGconn(PGconn* conn, PQconninfoOption* connOptions) conn->sslrootcert = (tmp != NULL) ? strdup(tmp) : NULL; tmp = conninfo_getval(connOptions, "sslcrl"); conn->sslcrl = (tmp != NULL) ? strdup(tmp) : NULL; + tmp = conninfo_getval(connOptions, "target_session_attrs"); + conn->target_session_attrs = (tmp != NULL) ? strdup(tmp) : NULL; tmp = conninfo_getval(connOptions, "requirepeer"); conn->requirepeer = (tmp != NULL) ? strdup(tmp) : NULL; #if defined(KRB5) || defined(ENABLE_GSS) || defined(ENABLE_SSPI) @@ -988,6 +1020,10 @@ static bool connectOptions1(PGconn* conn, const char* conninfo) */ static bool connectOptions2(PGconn* conn) { + if (!mutiHostlOptions(conn)) { + return false; + } + /* * If database name was not given, default it to equal user name */ @@ -995,21 +1031,38 @@ static bool connectOptions2(PGconn* conn) if (conn->dbName != NULL) free(conn->dbName); conn->dbName = strdup(conn->pguser); + if (!conn->dbName) + goto oom_error; } #ifdef SUPPORT_PGPASSFILE /* - * Supply default password if none given + * Supply default password if none given. Note that the password might + * be defferent for each host/port pair. */ if (conn->pgpass == NULL || conn->pgpass[0] == '\0') { if (conn->pgpass != NULL) { erase_string(conn->pgpass); - free(conn->pgpass); + libpq_free(conn->pgpass); + } + + for (int i = 0; i < conn->nconnhost; ++i) { + /* + * Try to get a password for this host from file. We use host + * for the hostname search key if given, else hostaddr (at + * least one of them is guaranteed nonempty by now). + */ + const char *pwhost = conn->connhost[i].host; + + if (pwhost == NULL || pwhost[0] == '\0') + pwhost = conn->connhost[i].hostaddr; + conn->connhost[i].password = PasswordFromFile(conn->connhost[i].host, + conn->connhost[i].port, + conn->dbName, conn->pguser); + if (conn->connhost[i].password == NULL) + conn->pgpass = strdup(DefaultPassword); + else + conn->dot_pgpass_used = true; } - conn->pgpass = PasswordFromFile(conn->pghost, conn->pgport, conn->dbName, conn->pguser); - if (conn->pgpass == NULL) - conn->pgpass = strdup(DefaultPassword); - else - conn->dot_pgpass_used = true; } #else if (conn->pgpass == NULL) { @@ -1017,16 +1070,6 @@ static bool connectOptions2(PGconn* conn) } #endif - /* - * Allow unix socket specification in the host name - */ - if ((conn->pghost != NULL) && is_absolute_path(conn->pghost)) { - if (conn->pgunixsocket != NULL) - free(conn->pgunixsocket); - conn->pgunixsocket = conn->pghost; - conn->pghost = NULL; - } - /* * validate sslmode option */ @@ -1070,6 +1113,34 @@ static bool connectOptions2(PGconn* conn) conn->client_encoding_initial = strdup(pg_encoding_to_char(pg_get_encoding_from_locale(NULL, true))); } + /* + * Validate target_session_attrs option, and set target_server_type + */ + if (conn->target_session_attrs) { + if (strcmp(conn->target_session_attrs, "any") == 0) + conn->target_server_type = SERVER_TYPE_ANY; + else if (strcmp(conn->target_session_attrs, "read-write") == 0) + conn->target_server_type = SERVER_TYPE_READ_WRITE; + else if (strcmp(conn->target_session_attrs, "read-only") == 0) + conn->target_server_type = SERVER_TYPE_READ_ONLY; + else if (strcmp(conn->target_session_attrs, "primary") == 0) + conn->target_server_type = SERVER_TYPE_PRIMARY; + else if (strcmp(conn->target_session_attrs, "standby") == 0) + conn->target_server_type = SERVER_TYPE_STANDBY; + else if (strcmp(conn->target_session_attrs, "prefer-standby") == 0) + conn->target_server_type = SERVER_TYPE_PREFER_STANDBY; + else { + conn->status = CONNECTION_BAD; + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("invalid %s value: \"%s\"\n"), + "target_session_attrs", + conn->target_session_attrs); + return false; + } + } else { + conn->target_server_type = SERVER_TYPE_ANY; + } + /* * Only if we get this far is it appropriate to try to connect. (We need a * state flag, rather than just the boolean result of this function, in @@ -1078,6 +1149,11 @@ static bool connectOptions2(PGconn* conn) conn->options_valid = true; return true; + +oom_error: + conn->status = CONNECTION_BAD; + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("out of memory \n")); + return false; } /* @@ -1277,6 +1353,7 @@ static void connectFailureMessage(PGconn* conn, int errorno) char host_addr[NI_MAXHOST]; int rcs = 0; const char* displayed_host = NULL; + const char* displayed_port = NULL; struct sockaddr_storage* addr = &conn->raddr.addr; @@ -1284,8 +1361,9 @@ static void connectFailureMessage(PGconn* conn, int errorno) * Optionally display the network address with the hostname. This is * useful to distinguish between IPv4 and IPv6 connections. */ - if (conn->pghostaddr != NULL) { - check_strncpy_s(strncpy_s(host_addr, NI_MAXHOST, conn->pghostaddr, strlen(conn->pghostaddr))); + if (conn->pghostaddr != NULL && conn->pghostaddr[0] != '\0') { + check_strncpy_s(strncpy_s(host_addr, NI_MAXHOST, conn->connhost[conn->whichhost].hostaddr, + strlen(conn->connhost[conn->whichhost].hostaddr))); } else if (addr->ss_family == AF_INET) { #if defined(WIN32) || defined(_WIN64) rcs = strcpy_s(host_addr, NI_MAXHOST, "inet_net_ntop() unsupported on Windows"); @@ -1314,18 +1392,23 @@ static void connectFailureMessage(PGconn* conn, int errorno) securec_check_c(rcs, "\0", "\0"); if (conn->pghostaddr != NULL && conn->pghostaddr[0] != '\0') - displayed_host = conn->pghostaddr; + displayed_host = conn->connhost[conn->whichhost].hostaddr; else if (conn->pghost != NULL && conn->pghost[0] != '\0') - displayed_host = conn->pghost; + displayed_host = conn->connhost[conn->whichhost].host; else displayed_host = DefaultHost; + displayed_port = conn->connhost[conn->whichhost].port; + if (displayed_port == NULL || displayed_port[0] == '\0') { + displayed_port = DEF_PGPORT_STR; + } + /* * If the user did not supply an IP address using 'hostaddr', and * 'host' was missing or does not match our lookup, display the * looked-up IP address. */ - if ((conn->pghostaddr == NULL) && (conn->pghost == NULL || strcmp(conn->pghost, host_addr) != 0)) + if ((conn->pghostaddr == NULL) && (conn->pghost == NULL || strcmp(displayed_host, host_addr) != 0)) appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not connect to server: %s\n" "\tIs the server running on host \"%s\" (%s) and accepting\n" @@ -1333,7 +1416,7 @@ static void connectFailureMessage(PGconn* conn, int errorno) strerror(errorno), displayed_host, host_addr, - conn->pgport); + displayed_port); else appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not connect to server: %s\n" @@ -1341,7 +1424,7 @@ static void connectFailureMessage(PGconn* conn, int errorno) "\tTCP/IP connections on port %s?\n"), strerror(errorno), displayed_host, - conn->pgport); + displayed_port); } } @@ -1506,14 +1589,6 @@ static int setKeepalivesWin32(PGconn* conn) */ static int connectDBStart(PGconn* conn) { - int portnum; - char portstr[MAXPGPATH]; - struct addrinfo* addrs = NULL; - struct addrinfo hint; - const char* node = NULL; - int ret; - int nRet = 0; - if (conn == NULL) return 0; @@ -1524,112 +1599,38 @@ static int connectDBStart(PGconn* conn) conn->inStart = conn->inCursor = conn->inEnd = 0; conn->outCount = 0; - /* - * Determine the parameters to pass to pg_getaddrinfo_all. - */ - - /* Initialize hint structure */ - nRet = memset_s(&hint, sizeof(hint), 0, sizeof(hint)); - securec_check_c(nRet, "\0", "\0"); - hint.ai_socktype = SOCK_STREAM; - hint.ai_family = AF_UNSPEC; - - /* Set up port number as a string */ - if (conn->pgport != NULL && conn->pgport[0] != '\0') { - portnum = atoi(conn->pgport); - if (portnum < 1 || portnum > 65535) { - appendPQExpBuffer(&conn->errorMessage, libpq_gettext("invalid port number: \"%s\"\n"), conn->pgport); - conn->options_valid = false; + if (conn->connhost == NULL) { + if (mutiHostlOptions(conn)) { goto connect_errReturn; } - } else - portnum = DEF_PGPORT; - nRet = snprintf_s(portstr, MAXPGPATH, MAXPGPATH - 1, "%d", portnum); - securec_check_ss_c(nRet, "\0", "\0"); - - if (conn->pghostaddr != NULL && conn->pghostaddr[0] != '\0') { - /* Using pghostaddr avoids a hostname lookup */ - node = conn->pghostaddr; - hint.ai_family = AF_UNSPEC; - hint.ai_flags = AI_NUMERICHOST; - } else if (conn->pghost != NULL && conn->pghost[0] != '\0') { - /* Using pghost, so we have to look-up the hostname */ - node = conn->pghost; - hint.ai_family = AF_UNSPEC; - } else { -#ifdef HAVE_UNIX_SOCKETS - /* pghostaddr and pghost are NULL, so use Unix domain socket */ - node = NULL; - hint.ai_family = AF_UNIX; - - if (!conn->fencedUdfRPCMode) - UNIXSOCK_PATH(portstr, portnum, conn->pgunixsocket); - else - UNIXSOCK_FENCED_MASTER_PATH(portstr, conn->pgunixsocket); - - if (strlen(portstr) >= UNIXSOCK_PATH_BUFLEN) { - appendPQExpBuffer(&conn->errorMessage, - libpq_gettext("Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n"), - portstr, - (int)(UNIXSOCK_PATH_BUFLEN - 1)); - conn->options_valid = false; - goto connect_errReturn; - } -#else - /* Without Unix sockets, default to localhost instead */ - node = DefaultHost; - hint.ai_family = AF_UNSPEC; -#endif /* HAVE_UNIX_SOCKETS */ } - /* Use pg_getaddrinfo_all() to resolve the address */ - ret = pg_getaddrinfo_all(node, portstr, &hint, &addrs); - if (ret || addrs == NULL) { - if (node != NULL) - appendPQExpBuffer(&conn->errorMessage, - libpq_gettext("could not translate host name \"%s\" to address: %s\n"), - node, - gai_strerror(ret)); - else - appendPQExpBuffer(&conn->errorMessage, - libpq_gettext("could not translate Unix-domain socket path \"%s\" to address: %s\n"), - portstr, - gai_strerror(ret)); - if (addrs != NULL) - pg_freeaddrinfo_all(hint.ai_family, addrs); - conn->options_valid = false; + conn->whichhost = -1; + /* Also reset the target_server_type state if needed */ + if (conn->target_server_type == SERVER_TYPE_PREFER_STANDBY_PASS2) + conn->target_server_type = SERVER_TYPE_PREFER_STANDBY; + +nexthost: + try_next_host(conn); + + if (conn->whichhost >= conn->nconnhost) { + /* Keep whichhost's value within the range of the array */ + conn->whichhost--; goto connect_errReturn; } -#ifdef USE_SSL - /* setup values based on SSL mode */ - if (conn->sslmode[0] == 'd') /* "disable" */ - conn->allow_ssl_try = false; - else if (conn->sslmode[0] == 'a') /* "allow" */ - conn->wait_ssl_try = true; -#endif - /* - * Set up to try to connect, with protocol 3.0 as the first attempt. - */ - conn->addrlist = addrs; - conn->addr_cur = addrs; - conn->addrlist_family = hint.ai_family; - - /* Whenever change the authenication process, change the version here for compatible.*/ - conn->pversion = PG_PROTOCOL(3, 51); - conn->send_appname = true; - conn->status = CONNECTION_NEEDED; - - /* - * The code for processing CONNECTION_NEEDED state is in PQconnectPoll(), - * so that it can easily be re-executed if needed again during the - * asynchronous startup process. However, we must run it once here, - * because callers expect a success return from this routine to mean that - * we are in PGRES_POLLING_WRITING connection state. - */ - if (PQconnectPoll(conn) == PGRES_POLLING_WRITING) + * The code for processing CONNECTION_NEEDED state is in PQconnectPoll(), + * so that it can easily be re-executed if needed again during the + * asynchronous startup process. However, we must run it once here, + * because callers expect a success return from this routine to mean that + * we are in PGRES_POLLING_WRITING connection state. + */ + if (PQconnectPoll(conn) == PGRES_POLLING_WRITING) { return 1; + } else { + goto nexthost; + } connect_errReturn: if (conn->sock >= 0) { @@ -1652,6 +1653,12 @@ static int connectDBComplete(PGconn* conn) { PostgresPollingStatusType flag = PGRES_POLLING_WRITING; time_t finish_time = ((time_t)-1); + int last_whichhost = -2; /* certainly different from whichhost */ + struct addrinfo *last_addr_cur = NULL; + int timeout = 0; +#ifdef ENABLE_LITE_MODE + unsigned char runTime = 0; +#endif if (conn == NULL || conn->status == CONNECTION_BAD) return 0; @@ -1660,7 +1667,7 @@ static int connectDBComplete(PGconn* conn) * Set up a time limit, if connect_timeout isn't zero. */ if (conn->connect_timeout != NULL) { - int timeout = atoi(conn->connect_timeout); + timeout = atoi(conn->connect_timeout); if (timeout > 0) { /* @@ -1672,7 +1679,33 @@ static int connectDBComplete(PGconn* conn) finish_time = time(NULL) + timeout; } } + +#ifdef ENABLE_LITE_MODE + PQExpBuffer errMsgBuf = createPQExpBuffer(); + if (PQExpBufferBroken(errMsgBuf)) { + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("out of memory\n")); + conn->status = CONNECTION_BAD; + return 0; + } +#endif + for (;;) { + int err_ret = 0; + + /* + * (Re)start the connect_timeout timer if it's active and we are + * considering a different host than we were last time through. If + * we've already succeeded, though, needn't recalculate. + */ + if (flag != PGRES_POLLING_OK && + timeout > 0 && + (conn->whichhost != last_whichhost || + conn->addr_cur != last_addr_cur)) { + finish_time = time(NULL) + timeout; + last_whichhost = conn->whichhost; + last_addr_cur = conn->addr_cur; + } + /* * Wait, if necessary. Note that the initial state (just after * PQconnectStart) is to wait for the socket to select for writing. @@ -1685,26 +1718,89 @@ static int connectDBComplete(PGconn* conn) * connection */ resetPQExpBuffer(&conn->errorMessage); +#ifdef ENABLE_LITE_MODE + destroyPQExpBuffer(errMsgBuf); +#endif return 1; /* success! */ case PGRES_POLLING_READING: if (pqWaitTimed(1, 0, conn, finish_time)) { - conn->status = CONNECTION_BAD; - return 0; + err_ret = 1; } break; case PGRES_POLLING_WRITING: if (pqWaitTimed(0, 1, conn, finish_time)) { - conn->status = CONNECTION_BAD; - return 0; + err_ret = 1; } break; default: + err_ret = 1; + break; + } + + if (err_ret == 1) { +#ifdef ENABLE_LITE_MODE + if (runTime % 2 == 1) { + appendPQExpBuffer(errMsgBuf, "(connect to V1 server) "); + } else { + appendPQExpBuffer(errMsgBuf, "(connect to V5 server) "); + } + appendPQExpBufferStr(errMsgBuf, PQerrorMessage(conn)); + resetPQExpBuffer(&conn->errorMessage); + + runTime++; + /* ENABLE_LITE_MODE is compatible with V1 server, */ + /* and the database name of V1 server is uppercase letters. */ + /* So, we need to attempt to connect to the server twice. */ + if (runTime % 2 == 1) { + for (; conn->whichhost < conn->nconnhost; conn->whichhost++) { + reset_physical_connection(conn); + + reset_connection_state_machine(conn); + + if (resolve_host_addr(conn)) { + break; + } + } + conn->pversion = PG_PROTOCOL(3, 0); + conn->dbName = pg_strtoupper(conn->dbName); + } else { + /* + * Give up on current server/address, try the next one. + */ + try_next_host(conn); + + if (conn->whichhost >= conn->nconnhost) { + /* Keep whichhost's value within the range of the array */ + conn->whichhost--; + /* Just in case we failed to set it in PQconnectPoll */ + conn->status = CONNECTION_BAD; + + resetPQExpBuffer(&conn->errorMessage); + appendPQExpBufferStr(&conn->errorMessage, errMsgBuf->data); + destroyPQExpBuffer(errMsgBuf); + + return 0; + } + runTime = 0; + conn->dbName = pg_strtolower(conn->dbName); + } +#else + /* + * Give up on current server/address, try the next one. + */ + try_next_host(conn); + + if (conn->whichhost >= conn->nconnhost) { + /* Keep whichhost's value within the range of the array */ + conn->whichhost--; /* Just in case we failed to set it in PQconnectPoll */ conn->status = CONNECTION_BAD; return 0; + } +#endif } /* @@ -1872,7 +1968,10 @@ PostgresPollingStatusType PQconnectPoll(PGconn* conn) /* These are reading states */ case CONNECTION_AWAITING_RESPONSE: - case CONNECTION_AUTH_OK: { + case CONNECTION_AUTH_OK: + case CONNECTION_CHECK_WRITABLE: + case CONNECTION_CONSUME: + case CONNECTION_CHECK_STANDBY: { /* Load waiting data */ int n = pqReadData(conn); @@ -1896,6 +1995,7 @@ PostgresPollingStatusType PQconnectPoll(PGconn* conn) /* Special cases: proceed without waiting. */ case CONNECTION_SSL_STARTUP: case CONNECTION_NEEDED: + case CONNECTION_CHECK_TARGET: break; default: @@ -2075,8 +2175,18 @@ keep_going: /* We will come back to here until there is continue; } } - + char *bind_addr = tcp_link_addr; + +#ifdef ENABLE_LITE_MODE + /* For replication connection request under lite mode, client ip bind is forced */ + if ((bind_addr == NULL || strcmp(bind_addr, "0.0.0.0") == 0) && + conn->replication != NULL && strcmp(conn->pglocalhost, "0.0.0.0") != 0) { + comm_client_bind = true; + bind_addr = conn->pglocalhost; + } +#endif + if (!IS_AF_UNIX(addr_cur->ai_family) && comm_client_bind && bind_addr != NULL @@ -2406,7 +2516,8 @@ keep_going: /* We will come back to here until there is libpq_gettext("could not send startup packet: %s\n" "localhost: %s, localport: %s, remotehost: %s, remoteaddr: %s, remoteport:%s\n"), SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)), - conn->pglocalhost, conn->pglocalport, conn->pghost, conn->pghostaddr, conn->pgport); + conn->pglocalhost, conn->pglocalport, conn->connhost[conn->whichhost].host, + conn->connhost[conn->whichhost].hostaddr, conn->connhost[conn->whichhost].port); libpq_free(startpacket); goto error_return; } @@ -2735,12 +2846,20 @@ keep_going: /* We will come back to here until there is return PGRES_POLLING_READING; } } - if (areq == AUTH_REQ_SHA256) { + if ((areq == AUTH_REQ_SHA256) +#ifdef ENABLE_LITE_MODE + || (areq == AUTH_REQ_SHA256_RFC) +#endif + ) { if (pqGetInt((int*)(&(conn->password_stored_method)), 4, conn)) { return PGRES_POLLING_READING; } - if (SHA256_PASSWORD == conn->password_stored_method) { + if ((SHA256_PASSWORD == conn->password_stored_method) +#ifdef ENABLE_LITE_MODE + || (SHA256_PASSWORD_RFC == conn->password_stored_method) +#endif + ) { if (pqGetnchar(conn->salt, SALT_LENGTH * 2, conn)) { /* We'll come back when there are more data */ return PGRES_POLLING_READING; @@ -2963,9 +3082,7 @@ keep_going: /* We will come back to here until there is } /* We can release the address list now. */ - pg_freeaddrinfo_all(conn->addrlist_family, conn->addrlist); - conn->addrlist = NULL; - conn->addr_cur = NULL; + release_conn_addrinfo(conn); /* Fire up post-connection housekeeping if needed */ if (PG_PROTOCOL_MAJOR(conn->pversion) < 3) { @@ -2976,8 +3093,8 @@ keep_going: /* We will come back to here until there is } /* Otherwise, we are open for business! */ - conn->status = CONNECTION_OK; - return PGRES_POLLING_OK; + conn->status = CONNECTION_CHECK_TARGET; + goto keep_going; } case CONNECTION_SETENV: @@ -3007,8 +3124,35 @@ keep_going: /* We will come back to here until there is } /* We are open for business! */ - conn->status = CONNECTION_OK; - return PGRES_POLLING_OK; + conn->status = CONNECTION_CHECK_TARGET; + goto keep_going; + + case CONNECTION_CHECK_TARGET: + return connection_check_target(conn); + case CONNECTION_CONSUME: { + PostgresPollingStatusType ret = connection_consume(conn); + if (ret == PGRES_POLLING_ACTIVE) { + goto keep_going; + } else { + return ret; + } + } + case CONNECTION_CHECK_WRITABLE: { + PostgresPollingStatusType ret = connection_check_writable(conn); + if (ret == PGRES_POLLING_ACTIVE) { + goto keep_going; + } else { + return ret; + } + } + case CONNECTION_CHECK_STANDBY: { + PostgresPollingStatusType ret = connection_check_standby(conn); + if (ret == PGRES_POLLING_ACTIVE) { + goto keep_going; + } else { + return ret; + } + } default: appendPQExpBuffer(&conn->errorMessage, @@ -3143,6 +3287,9 @@ PGconn* makeEmptyPGconn(void) conn->setenv_state = SETENV_STATE_IDLE; conn->client_encoding = PG_SQL_ASCII; conn->std_strings = false; /* unless server says differently */ + conn->whichhost = -1; + conn->default_transaction_read_only = PG_BOOL_UNKNOWN; + conn->in_hot_standby = PG_BOOL_UNKNOWN; conn->verbosity = PQERRORS_DEFAULT; conn->sock = -1; conn->auth_req_received = false; @@ -3209,13 +3356,29 @@ void freePGconn(PGconn* conn) libpq_free(conn->events[i].name); } + /* clean up pg_conn_host structures */ + if (conn->connhost != NULL) { + for (i = 0; i < conn->nconnhost; ++i) { + if (conn->connhost[i].host != NULL) + libpq_free(conn->connhost[i].host); + if (conn->connhost[i].hostaddr != NULL) + libpq_free(conn->connhost[i].hostaddr); + if (conn->connhost[i].port != NULL) + libpq_free(conn->connhost[i].port); + if (conn->connhost[i].password != NULL) { + erase_string(conn->connhost[i].password); + libpq_free(conn->connhost[i].password); + } + } + libpq_free(conn->connhost); + } + libpq_free(conn->client_encoding_initial); libpq_free(conn->events); libpq_free(conn->pghost); libpq_free(conn->remote_nodename); libpq_free(conn->pghostaddr); libpq_free(conn->pgport); - libpq_free(conn->pgunixsocket); libpq_free(conn->pgtty); libpq_free(conn->connect_timeout); libpq_free(conn->pgoptions); @@ -3255,6 +3418,8 @@ void freePGconn(PGconn* conn) libpq_free(conn->pglocalhost); libpq_free(conn->pglocalport); libpq_free(conn->connection_info); + if (conn->target_session_attrs) + libpq_free(conn->target_session_attrs); termPQExpBuffer(&conn->errorMessage); termPQExpBuffer(&conn->workBuffer); #ifdef HAVE_CE @@ -3319,9 +3484,8 @@ void closePGconn(PGconn* conn) * absent */ conn->asyncStatus = PGASYNC_IDLE; pqClearAsyncResult(conn); /* deallocate result */ - pg_freeaddrinfo_all(conn->addrlist_family, conn->addrlist); - conn->addrlist = NULL; - conn->addr_cur = NULL; + resetPQExpBuffer(&conn->errorMessage); + release_conn_addrinfo(conn); notify = conn->notifyHead; while (notify != NULL) { PGnotify* prev = notify; @@ -5067,6 +5231,16 @@ static bool conninfo_uri_parse_options(PQconninfoOption* options, const char* ur char* user = NULL; char* host = NULL; bool retval = false; + PQExpBufferData hostbuf; + PQExpBufferData portbuf; + + initPQExpBuffer(&hostbuf); + initPQExpBuffer(&portbuf); + if (PQExpBufferDataBroken(hostbuf) || PQExpBufferDataBroken(portbuf)) { + appendPQExpBufferStr(errorMessage, + libpq_gettext("out of memory\n")); + goto cleanup; + } if (buf == NULL) { printfPQExpBuffer(errorMessage, libpq_gettext("out of memory\n")); @@ -5126,74 +5300,99 @@ static bool conninfo_uri_parse_options(PQconninfoOption* options, const char* ur } /* - * "p" has been incremented past optional URI credential information at - * this point and now points at the "netloc" part of the URI. - * - * Look for IPv6 address. + * There may be multiple netloc[:port] pairs, each separated from the next + * by a comma. When we initially enter this loop, "p" has been + * incremented past optional URI credential information at this point and + * now points at the "netloc" part of the URI. On subsequent loop + * iterations, "p" has been incremented past the comma separator and now + * points at the start of the next "netloc". */ - if (*p == '[') { - host = ++p; - while (*p && *p != ']') - ++p; - if (!*p) { - printfPQExpBuffer(errorMessage, - libpq_gettext( - "end of string reached when looking for matching \"]\" in IPv6 host address in URI: \"%s\"\n"), - uri); - goto cleanup; - } - if (p == host) { - printfPQExpBuffer(errorMessage, libpq_gettext("IPv6 host address may not be empty in URI: \"%s\"\n"), uri); - goto cleanup; - } - - /* Cut off the bracket and advance */ - *(p++) = '\0'; - + for (;;) { /* - * The address may be followed by a port specifier or a slash or a - * query. + * Look for IPv6 address. */ - if (*p && *p != ':' && *p != '/' && *p != '?') { - printfPQExpBuffer(errorMessage, - libpq_gettext("unexpected character \"%c\" at position %d in URI (expected \":\" or \"/\"): \"%s\"\n"), - *p, - (int)(p - buf + 1), - uri); - goto cleanup; + if (*p == '[') { + host = ++p; + while (*p && *p != ']') + ++p; + if (!*p) { + printfPQExpBuffer(errorMessage, + libpq_gettext( + "end of string reached when looking for matching \"]\" in IPv6 host address in URI: \"%s\"\n"), + uri); + goto cleanup; + } + if (p == host) { + printfPQExpBuffer(errorMessage, libpq_gettext("IPv6 host address may not be empty in URI: \"%s\"\n"), uri); + goto cleanup; + } + + /* Cut off the bracket and advance */ + *(p++) = '\0'; + + /* + * The address may be followed by a port specifier or a slash or a + * query. + */ + if (*p && *p != ':' && *p != '/' && *p != '?') { + printfPQExpBuffer(errorMessage, + libpq_gettext("unexpected character \"%c\" at position %d in URI (expected \":\" or \"/\"): \"%s\"\n"), + *p, + (int)(p - buf + 1), + uri); + goto cleanup; + } + } else { + /* not an IPv6 address: DNS-named or IPv4 netloc */ + host = p; + + /* + * Look for port specifier (colon) or end of host specifier (slash), + * or query (question mark). + */ + while (*p && *p != ':' && *p != '/' && *p != '?') + ++p; } - } else { - /* not an IPv6 address: DNS-named or IPv4 netloc */ - host = p; - - /* - * Look for port specifier (colon) or end of host specifier (slash), - * or query (question mark). - */ - while (*p && *p != ':' && *p != '/' && *p != '?') - ++p; - } - - /* Save the hostname terminator before we null it */ - prevchar = *p; - *p = '\0'; - - if (*host && conninfo_storeval(options, "host", host, errorMessage, false, true) == NULL) - goto cleanup; - - if (prevchar == ':') { - const char* port = ++p; /* advance past host terminator */ - - while (*p && *p != '/' && *p != '?') - ++p; + /* Save the hostname terminator before we null it */ prevchar = *p; *p = '\0'; - if (*port && conninfo_storeval(options, "port", port, errorMessage, false, true) == NULL) - goto cleanup; + appendPQExpBufferStr(&hostbuf, host); + + if (prevchar == ':') { + const char *port = ++p; /* advance past host terminator */ + + while (*p && *p != '/' && *p != '?' && *p != ',') + ++p; + + prevchar = *p; + *p = '\0'; + + appendPQExpBufferStr(&portbuf, port); + } + + if (prevchar != ',') { + break; + } + + ++p; /* advance past comma separator */ + appendPQExpBufferChar(&hostbuf, ','); + appendPQExpBufferChar(&portbuf, ','); } + /* Save final values for host and port. */ + if (PQExpBufferDataBroken(hostbuf) || PQExpBufferDataBroken(portbuf)) + goto cleanup; + if (hostbuf.data[0] && + !conninfo_storeval(options, "host", hostbuf.data, + errorMessage, false, true)) + goto cleanup; + if (portbuf.data[0] && + !conninfo_storeval(options, "port", portbuf.data, + errorMessage, false, true)) + goto cleanup; + if (prevchar && prevchar != '?') { const char* dbname = ++p; /* advance past host terminator */ @@ -5227,6 +5426,8 @@ cleanup: check_memset_s(memset_s(buf, strlen(buf) + 1, 0, strlen(buf) + 1)); p = NULL; libpq_free(buf); + termPQExpBuffer(&hostbuf); + termPQExpBuffer(&portbuf); return retval; } @@ -5557,22 +5758,42 @@ char* PQuser(const PGconn* conn) char* PQpass(const PGconn* conn) { + char *password = NULL; + if (conn == NULL) return NULL; - return conn->pgpass; + if (conn->connhost != NULL) + password = conn->connhost[conn->whichhost].password; + if (password == NULL) + password = conn->pgpass; + return password; } char* PQhost(const PGconn* conn) { if (conn == NULL) return NULL; - return conn->pghost != NULL ? conn->pghost : conn->pgunixsocket; + if (conn->connhost != NULL) { + /* + * Return the verbatim host value provided by user, or hostaddr in its + * lack. + */ + if (conn->connhost[conn->whichhost].host != NULL && + conn->connhost[conn->whichhost].host[0] != '\0') + return conn->connhost[conn->whichhost].host; + else if (conn->connhost[conn->whichhost].hostaddr != NULL && + conn->connhost[conn->whichhost].hostaddr[0] != '\0') + return conn->connhost[conn->whichhost].hostaddr; + } + return NULL; } char* PQport(const PGconn* conn) { if (conn == NULL) return NULL; + if (conn->connhost != NULL) + return conn->connhost[conn->whichhost].port; return conn->pgport; } @@ -5684,9 +5905,13 @@ int PQbackendPID(const PGconn* conn) int PQconnectionNeedsPassword(const PGconn* conn) { + char* password = NULL; + if (conn == NULL) return false; - if (conn->password_needed && (conn->pgpass == NULL || conn->pgpass[0] == '\0')) + + password = PQpass(conn); + if (conn->password_needed && (password == NULL || password[0] == '\0')) return true; else return false; @@ -6140,6 +6365,1014 @@ static void set_libpq_stat_info(Oid nodeid, int count) } } +/* + * pqDropConnection + * + * Close any physical connection to the server, and reset associated + * state inside the connection object. We don't release state that + * would be needed to reconnect, though. + * + * We can always flush the output buffer, since there's no longer any hope + * of sending that data. However, unprocessed input data might still be + * valuable, so the caller must tell us whether to flush that or not. + */ +static void pqDropConnection(PGconn* conn, bool flushInput) +{ + conn->nonblocking = false; + + /* Close the socket itself */ + if (conn->sock != PGINVALID_SOCKET && (!conn->is_logic_conn)) { + pqsecure_close(conn); + closesocket(conn->sock); + } + conn->sock = PGINVALID_SOCKET; + + /* Optionally discard any unread data */ + if (flushInput) { + conn->inStart = conn->inCursor = conn->inEnd = 0; + } + + /* Always discard any unsent data */ + conn->outCount = 0; + + /* Free authentication state */ +#ifdef ENABLE_GSS + { + OM_uint32 min_s; + + if (conn->gctx != NULL) + gss_delete_sec_context(&min_s, &conn->gctx, GSS_C_NO_BUFFER); + if (conn->gtarg_nam != NULL) + gss_release_name(&min_s, &conn->gtarg_nam); + if (conn->ginbuf.length) + gss_release_buffer(&min_s, &conn->ginbuf); + if (conn->goutbuf.length) + gss_release_buffer(&min_s, &conn->goutbuf); + } +#endif +#ifdef ENABLE_SSPI + if (conn->ginbuf.length) + libpq_free(conn->ginbuf.value); + conn->ginbuf.length = 0; + conn->ginbuf.value = NULL; + libpq_free(conn->sspitarget); + conn->sspitarget = NULL; + if (conn->sspicred != NULL) { + FreeCredentialsHandle(conn->sspicred); + libpq_free(conn->sspicred); + } + if (conn->sspictx != NULL) { + DeleteSecurityContext(conn->sspictx); + libpq_free(conn->sspictx); + } + conn->usesspi = 0; +#endif +} + +/* + * pqDropServerData + * + * Clear all connection state data that was received from (or deduced about) + * the server. This is essential to do between connection attempts to + * different servers, else we may incorrectly hold over some data from the + * old server. + * + * It would be better to merge this into pqDropConnection, perhaps, but + * right now we cannot because that function is called immediately on + * detection of connection loss (cf. pqReadData, for instance). This data + * should be kept until we are actually starting a new connection. + */ +static void pqDropServerData(PGconn *conn) +{ + PGnotify *notify; + pgParameterStatus *pstatus; + + /* Forget pending notifies */ + notify = conn->notifyHead; + while (notify != NULL) { + PGnotify *prev = notify; + + notify = notify->next; + libpq_free(prev); + } + conn->notifyHead = conn->notifyTail = NULL; + + /* Reset ParameterStatus data, as well as variables deduced from it */ + pstatus = conn->pstatus; + while (pstatus != NULL) { + pgParameterStatus *prev = pstatus; + + pstatus = pstatus->next; + libpq_free(prev); + } + conn->pstatus = NULL; + conn->client_encoding = PG_SQL_ASCII; + conn->std_strings = false; + conn->setenv_state = SETENV_STATE_IDLE; + conn->next_eo = NULL; + conn->default_transaction_read_only = PG_BOOL_UNKNOWN; + conn->in_hot_standby = PG_BOOL_UNKNOWN; + conn->sversion = 0; + + /* Drop large-object lookup data */ + if (conn->lobjfuncs) + libpq_free(conn->lobjfuncs); + + /* Reset assorted other per-connection state */ + conn->last_sqlstate[0] = '\0'; + conn->auth_req_received = false; + conn->password_needed = false; + conn->be_pid = 0; + conn->be_key = 0; +} + +/* + * Count the number of elements in a simple comma-separated list. + */ +static int count_comma_separated_elems(const char *input) +{ + int n; + + n = 1; + for (; *input != '\0'; input++) { + if (*input == ',') { + n++; + } + + } + + return n; +} + +/* + * Parse a simple comma-separated list. + * + * On each call, returns a malloc'd copy of the next element, and sets *more + * to indicate whether there are any more elements in the list after this, + * and updates *startptr to point to the next element, if any. + * + * On out of memory, returns NULL. + */ +static char *parse_comma_separated_list(char **startptr, bool *more) +{ + char *p; + char *s = *startptr; + char *e; + int len; + + /* + * Search for the end of the current element; a comma or end-of-string + * acts as a terminator. + */ + e = s; + while (*e != '\0' && *e != ',') { + ++e; + } + + *more = (*e == ','); + + len = e - s; + p = (char *) malloc(sizeof(char) * (len + 1)); + if (p) { + check_memcpy_s(memcpy_s(p, len + 1, s, len)); + p[len] = '\0'; + } + *startptr = e + 1; + + return p; +} + +static bool mutiHostlOptions(PGconn* conn) +{ + int i = 0; + /* + * Allocate memory for details about each host to which we might possibly + * try to connect. For that, count the number of elements in the hostaddr + * or host options. If neither is given, assume one host. + */ + conn->whichhost = 0; + if (conn->pghostaddr && conn->pghostaddr[0] != '\0') { + conn->nconnhost = count_comma_separated_elems(conn->pghostaddr); + } else if (conn->pghost && conn->pghost[0] != '\0') { + conn->nconnhost = count_comma_separated_elems(conn->pghost); + } else { + conn->nconnhost = 1; + } + +#ifdef ENABLE_MULTIPLE_NODES + if (conn->nconnhost > 1) { + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("do not support multiple hosts\n")); + return false; + + } +#endif + + conn->connhost = (pg_conn_host *) + calloc(conn->nconnhost, sizeof(pg_conn_host)); + if (conn->connhost == NULL) { + goto oom_error; + } + + /* + * We now have one pg_conn_host structure per possible host. Fill in the + * host and hostaddr fields for each, by splitting the parameter strings. + */ + if (conn->pghostaddr != NULL && conn->pghostaddr[0] != '\0') { + char *s = conn->pghostaddr; + bool more = true; + + for (i = 0; i < conn->nconnhost && more; i++) { + conn->connhost[i].hostaddr = parse_comma_separated_list(&s, &more); + if (conn->connhost[i].hostaddr == NULL) + goto oom_error; + } + + /* + * If hostaddr was given, the array was allocated according to the + * number of elements in the hostaddr list, so it really should be the + * right size. + */ + Assert(!more); + Assert(i == conn->nconnhost); + } + + if (conn->pghost != NULL && conn->pghost[0] != '\0') { + char *s = conn->pghost; + bool more = true; + + for (i = 0; i < conn->nconnhost && more; i++) { + conn->connhost[i].host = parse_comma_separated_list(&s, &more); + if (conn->connhost[i].host == NULL) + goto oom_error; + } + + /* Check for wrong number of host items. */ + if (more || i != conn->nconnhost) { + conn->status = CONNECTION_BAD; + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("could not match %d host names to %d hostaddr values\n"), + count_comma_separated_elems(conn->pghost), conn->nconnhost); + return false; + } + } + + /* + * Now, for each host slot, identify the type of address spec, and fill in + * the default address if nothing was given. + */ + for (i = 0; i < conn->nconnhost; i++) { + pg_conn_host *ch = &conn->connhost[i]; + + if (ch->hostaddr != NULL && ch->hostaddr[0] != '\0') + ch->type = CHT_HOST_ADDRESS; + else if (ch->host != NULL && ch->host[0] != '\0') { + ch->type = CHT_HOST_NAME; +#ifdef HAVE_UNIX_SOCKETS + if (is_absolute_path(ch->host)) + ch->type = CHT_UNIX_SOCKET; +#endif + } else { + if (ch->host) + libpq_free(ch->host); +#ifdef HAVE_UNIX_SOCKETS + if (DEFAULT_PGSOCKET_DIR[0]) { + ch->host = strdup(DEFAULT_PGSOCKET_DIR); + ch->type = CHT_UNIX_SOCKET; + } else +#endif + { + ch->host = strdup(DefaultHost); + ch->type = CHT_HOST_NAME; + } + if (ch->host == NULL) + goto oom_error; + } + } + /* + * Next, work out the port number corresponding to each host name. + * + * Note: unlike the above for host names, this could leave the port fields + * as null or empty strings. We will substitute DEF_PGPORT whenever we + * read such a port field. + */ + if (conn->pgport != NULL && conn->pgport[0] != '\0') { + char *s = conn->pgport; + bool more = true; + + for (i = 0; i < conn->nconnhost && more; i++) { + conn->connhost[i].port = parse_comma_separated_list(&s, &more); + if (conn->connhost[i].port == NULL) + goto oom_error; + } + + /* + * If exactly one port was given, use it for every host. Otherwise, + * there must be exactly as many ports as there were hosts. + */ + if (i == 1 && !more) { + for (i = 1; i < conn->nconnhost; i++) { + conn->connhost[i].port = strdup(conn->connhost[0].port); + if (conn->connhost[i].port == NULL) + goto oom_error; + } + } else if (more || i != conn->nconnhost) { + conn->status = CONNECTION_BAD; + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("could not match %d port numbers to %d hosts\n"), + count_comma_separated_elems(conn->pgport), conn->nconnhost); + return false; + } + } + + /* + * If user name was not given, fetch it. (Most likely, the fetch will + * fail, since the only way we get here is if pg_fe_getauthname() failed + * during conninfo_add_defaults(). But now we want an error message.) + */ + if (conn->pguser == NULL || conn->pguser[0] == '\0') { + if (conn->pguser) + libpq_free(conn->pguser); + conn->pguser = pg_fe_getauthname(&conn->errorMessage); + if (!conn->pguser) { + conn->status = CONNECTION_BAD; + return false; + } + } + + return true; + +oom_error: + conn->status = CONNECTION_BAD; + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("out of memory \n")); + return false; + +} + +/* + * sendTerminateConn + * - Send a terminate message to backend. + */ +static void sendTerminateConn(PGconn *conn) +{ + /* + * Note that the protocol doesn't allow us to send Terminate messages + * during the startup phase. + */ + if (conn->sock != PGINVALID_SOCKET && + conn->status == CONNECTION_OK && + (!conn->is_logic_conn)) { + /* + * Try to send "close connection" message to backend. Ignore any + * error. + */ + pqPutMsgStart('X', false, conn); + pqPutMsgEnd(conn); + (void) pqFlush(conn); + } +} + +/* + * Parse and try to interpret "value" as an integer value, and if successful, + * store it in *result, complaining if there is any trailing garbage or an + * overflow. This allows any number of leading and trailing whitespaces. + */ +static bool parse_int_param(const char *value, int *result, PGconn *conn, + const char *context) +{ + char *end; + long numval; + + Assert(value != NULL); + + *result = 0; + + /* strtol(3) skips leading whitespaces */ + errno = 0; + numval = strtol(value, &end, 10); /* size of(unsigned int port) = 10 */ + + /* + * If no progress was done during the parsing or an error happened, fail. + * This tests properly for overflows of the result. + */ + if (value == end || errno != 0 || numval != (int)numval) { + goto error; + } + + /* + * Skip any trailing whitespace; if anything but whitespace remains before + * the terminating character, fail + */ + while (*end != '\0' && isspace((unsigned char) *end)) { + end++; + } + + if (*end != '\0') { + goto error; + } + + *result = numval; + return true; + +error: + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("invalid integer value \"%s\" for connection option \"%s\"\n"), + value, context); + return false; +} + +/* + * release_conn_addrinfo + * - Free any addrinfo list in the PGconn. + */ +static void release_conn_addrinfo(PGconn *conn) +{ + if (conn->addrlist) { + pg_freeaddrinfo_all(conn->addrlist_family, conn->addrlist); + conn->addrlist = NULL; + conn->addr_cur = NULL; /* for safety */ + } +} + +/* Time to advance to next connhost[] entry? */ +static bool resolve_host_addr(PGconn *conn) +{ + pg_conn_host *ch; + struct addrinfo hint; + int thisport = 0; + int ret = 0; + char portstr[MAXPGPATH]; + int nRet = 0; + errno_t error_ret = 0; + + if (conn->whichhost >= conn->nconnhost) { + return false; + } + + /* Drop any address info for previous host */ + release_conn_addrinfo(conn); + + ch = &conn->connhost[conn->whichhost]; + + /* Initialize hint structure */ + nRet = memset_s(&hint, sizeof(hint), 0, sizeof(hint)); + securec_check_c(nRet, "\0", "\0"); + hint.ai_socktype = SOCK_STREAM; + conn->addrlist_family = hint.ai_family = AF_UNSPEC; + + /* Figure out the port number we're going to use. */ + if (ch->port == NULL || ch->port[0] == '\0') { + thisport = DEF_PGPORT; + } else { + if (!parse_int_param(ch->port, &thisport, conn, "port")) + return false; + + if (thisport < 1 || thisport > 65535) { + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("invalid port number: \"%s\"\n"), + ch->port); + return false; + } + } + error_ret = snprintf_s(portstr, sizeof(portstr), sizeof(portstr) - 1, "%d", thisport); + securec_check_ss_c(error_ret, "", ""); + + /* Use pg_getaddrinfo_all() to resolve the address */ + switch (ch->type) { + case CHT_HOST_NAME: + ret = pg_getaddrinfo_all(ch->host, portstr, &hint, + &conn->addrlist); + if (ret || !conn->addrlist) { + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("could not translate host name \"%s\" to address: %s\n"), + ch->host, gai_strerror(ret)); + return false; + } + break; + + case CHT_HOST_ADDRESS: + hint.ai_flags = AI_NUMERICHOST; + ret = pg_getaddrinfo_all(ch->hostaddr, portstr, &hint, + &conn->addrlist); + if (ret || !conn->addrlist) { + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("could not translate host name \"%s\" to address: %s\n"), + ch->hostaddr, gai_strerror(ret)); + return false; + } + break; + + case CHT_UNIX_SOCKET: +#ifdef HAVE_UNIX_SOCKETS + conn->addrlist_family = hint.ai_family = AF_UNIX; + if (!conn->fencedUdfRPCMode) + UNIXSOCK_PATH(portstr, thisport, ch->host); + else + UNIXSOCK_FENCED_MASTER_PATH(portstr, ch->host); + + if (strlen(portstr) >= UNIXSOCK_PATH_BUFLEN) { + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n"), + portstr, + (int) (UNIXSOCK_PATH_BUFLEN - 1)); + return false; + } + + /* + * NULL hostname tells pg_getaddrinfo_all to parse the service + * name as a Unix-domain socket path. + */ + ret = pg_getaddrinfo_all(NULL, portstr, &hint, + &conn->addrlist); + if (ret || !conn->addrlist) { + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("could not translate Unix-domain socket path \"%s\" to address: %s\n"), + portstr, gai_strerror(ret)); + return false; + } +#else + Assert(false); +#endif + break; + } + + /* OK, scan this addrlist for a working server address */ + conn->addr_cur = conn->addrlist; + return true; +} + + + /* Reset connection state machine? */ +static void reset_connection_state_machine(PGconn *conn) +{ + /* + * (Re) initialize our connection control variables for a set of + * connection attempts to a single server address. These variables + * must persist across individual connection attempts, but we must + * reset them when we start to consider a new server. + */ + conn->pversion = PG_PROTOCOL(3, 51); + conn->send_appname = true; + +#ifdef USE_SSL + /* setup values based on SSL mode */ + conn->allow_ssl_try = (conn->sslmode[0] != 'd'); /* "disable" */ + conn->wait_ssl_try = (conn->sslmode[0] == 'a'); /* "allow" */ +#endif + + /* Reset conn->status to put the state machine in the right state */ + conn->status = CONNECTION_NEEDED; + +} + + /* Force a new connection (perhaps to the same server as before)? */ +static void reset_physical_connection(PGconn *conn) +{ + /* Drop any existing connection */ + pqDropConnection(conn, true); + + /* Reset all state obtained from old server */ + pqDropServerData(conn); + + /* Drop any PGresult we might have, too */ + conn->asyncStatus = PGASYNC_IDLE; + conn->xactStatus = PQTRANS_IDLE; + pqClearAsyncResult(conn); + +} + +/* Time to advance to next host */ +static void try_next_host(PGconn *conn) +{ + if (conn->whichhost + 1 < conn->nconnhost) { + conn->whichhost++; + } else { + /* + * Oops, no more hosts. + * + * If we are trying to connect in "prefer-standby" mode, then drop + * the standby requirement and start over. + */ + if (conn->target_server_type == SERVER_TYPE_PREFER_STANDBY && + conn->nconnhost > 0) { + conn->target_server_type = SERVER_TYPE_PREFER_STANDBY_PASS2; + conn->whichhost = 0; + } else { + conn->whichhost++; + return; + } + } + + for (; conn->whichhost < conn->nconnhost; conn->whichhost++) { + reset_physical_connection(conn); + + reset_connection_state_machine(conn); + + if (resolve_host_addr(conn)) { + break; + } + } +} + +/* + * This subroutine saves conn->errorMessage, which will be restored back by + * restoreErrorMessage subroutine. + */ +static bool saveErrorMessage(PGconn *conn, PQExpBuffer savedMessage) +{ + initPQExpBuffer(savedMessage); + if (PQExpBufferBroken(savedMessage)) { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("out of memory\n")); + return false; + } + + appendPQExpBufferStr(savedMessage, + conn->errorMessage.data); + resetPQExpBuffer(&conn->errorMessage); + return true; +} + +/* + * Restores saved error messages back to conn->errorMessage. + */ +static void restoreErrorMessage(PGconn *conn, PQExpBuffer savedMessage) +{ + appendPQExpBufferStr(savedMessage, conn->errorMessage.data); + resetPQExpBuffer(&conn->errorMessage); + appendPQExpBufferStr(&conn->errorMessage, savedMessage->data); + termPQExpBuffer(savedMessage); +} + +static PostgresPollingStatusType connection_check_target(PGconn* conn) +{ + PQExpBufferData savedMessage; + + if (conn->target_server_type == SERVER_TYPE_READ_WRITE || + conn->target_server_type == SERVER_TYPE_READ_ONLY) { + bool read_only_server; + + /* + * We are yet to make a connection. Save all existing error + * messages until we make a successful connection state. + * This is important because PQsendQuery is going to reset + * conn->errorMessage and we will loose error messages + * related to previous hosts we have tried to connect and + * failed. + */ + if (!saveErrorMessage(conn, &savedMessage)) + goto omm_return; + + /* + * If the server didn't report + * "default_transaction_read_only" or "in_hot_standby" at + * startup, we must determine its state by sending the + * query "SHOW transaction_read_only". This GUC exists in + * all server versions that support 3.0 protocol. + */ + if (conn->default_transaction_read_only == PG_BOOL_UNKNOWN || + conn->in_hot_standby == PG_BOOL_UNKNOWN) { + conn->status = CONNECTION_OK; + if (!PQsendQuery(conn, + "SHOW transaction_read_only")) + goto error_return; + /* We'll return to this state when we have the answer */ + conn->status = CONNECTION_CHECK_WRITABLE; + restoreErrorMessage(conn, &savedMessage); + return PGRES_POLLING_READING; + } + + /* OK, we can make the test */ + read_only_server = + (conn->default_transaction_read_only == PG_BOOL_YES || + conn->in_hot_standby == PG_BOOL_YES); + + if ((conn->target_server_type == SERVER_TYPE_READ_WRITE) ? + read_only_server : !read_only_server) { + /* Wrong server state, reject and try the next host */ + if (conn->target_server_type == SERVER_TYPE_READ_WRITE) + appendPQExpBufferStr(&conn->errorMessage, + libpq_gettext("session is read-only\n")); + else + appendPQExpBufferStr(&conn->errorMessage, + libpq_gettext("session is not read-only\n")); + + /* Close connection politely. */ + conn->status = CONNECTION_OK; + sendTerminateConn(conn); + + /* + * Try next host if any, but we don't want to consider + * additional addresses for this host. + */ + goto error_return; + } + } else if (conn->target_server_type == SERVER_TYPE_PRIMARY || + conn->target_server_type == SERVER_TYPE_STANDBY || + conn->target_server_type == SERVER_TYPE_PREFER_STANDBY) { + if (!saveErrorMessage(conn, &savedMessage)) + goto omm_return; + + /* + * If the server didn't report "in_hot_standby" at + * startup, we must determine its state by sending the + * query "SELECT pg_catalog.pg_is_in_recovery()". Servers + * before 9.0 don't have that function, but by the same + * token they don't have any standby mode, so we may just + * assume the result. + */ + if (conn->sversion < 90000) /* server version 9.0 = 90000 */ + conn->in_hot_standby = PG_BOOL_NO; + + if (conn->in_hot_standby == PG_BOOL_UNKNOWN) { + conn->status = CONNECTION_OK; + if (!PQsendQuery(conn, + "SELECT pg_catalog.pg_is_in_recovery()")) + goto error_return; + /* We'll return to this state when we have the answer */ + conn->status = CONNECTION_CHECK_STANDBY; + restoreErrorMessage(conn, &savedMessage); + return PGRES_POLLING_READING; + } + + /* OK, we can make the test */ + if ((conn->target_server_type == SERVER_TYPE_PRIMARY) ? + (conn->in_hot_standby == PG_BOOL_YES) : + (conn->in_hot_standby == PG_BOOL_NO)) { + /* Wrong server state, reject and try the next host */ + if (conn->target_server_type == SERVER_TYPE_PRIMARY) + appendPQExpBufferStr(&conn->errorMessage, + libpq_gettext("server is in hot standby mode\n")); + else + appendPQExpBufferStr(&conn->errorMessage, + libpq_gettext("server is not in hot standby mode\n")); + + /* Close connection politely. */ + conn->status = CONNECTION_OK; + sendTerminateConn(conn); + + /* + * Try next host if any, but we don't want to consider + * additional addresses for this host. + */ + goto error_return; + } + } + + /* We can release the address list now. */ + release_conn_addrinfo(conn); + + /* We are open for business! */ + conn->status = CONNECTION_OK; + if (conn->target_server_type != SERVER_TYPE_ANY + && conn->target_server_type != SERVER_TYPE_PREFER_STANDBY_PASS2) { + restoreErrorMessage(conn, &savedMessage); + } + + return PGRES_POLLING_OK; + +/* Unreachable */ + +error_return: + + restoreErrorMessage(conn, &savedMessage); + +omm_return: + /* + * We used to close the socket at this point, but that makes it awkward + * for those above us if they wish to remove this socket from their own + * records (an fd_set for example). We'll just have this socket closed + * when PQfinish is called (which is compulsory even after an error, since + * the connection structure must be freed). + */ + conn->status = CONNECTION_BAD; + return PGRES_POLLING_FAILED; +} + +static PostgresPollingStatusType connection_consume(PGconn* conn) +{ + PGresult *res; + PQExpBufferData savedMessage; + + if (!saveErrorMessage(conn, &savedMessage)) + goto omm_return; + + /* + * This state just makes sure the connection is idle after + * we've obtained the result of a SHOW or SELECT query. Once + * we're clear, return to CONNECTION_CHECK_TARGET state to + * decide what to do next. We must transiently set status = + * CONNECTION_OK in order to use the result-consuming + * subroutines. + */ + conn->status = CONNECTION_OK; + if (!PQconsumeInput(conn)) + goto error_return; + + if (PQisBusy(conn)) { + conn->status = CONNECTION_CONSUME; + restoreErrorMessage(conn, &savedMessage); + return PGRES_POLLING_READING; + } + + /* Call PQgetResult() again until we get a NULL result */ + res = PQgetResult(conn); + if (res != NULL) { + PQclear(res); + conn->status = CONNECTION_CONSUME; + restoreErrorMessage(conn, &savedMessage); + return PGRES_POLLING_READING; + } + + conn->status = CONNECTION_CHECK_TARGET; + restoreErrorMessage(conn, &savedMessage); + return PGRES_POLLING_ACTIVE; + +/* Unreachable */ + +error_return: + + restoreErrorMessage(conn, &savedMessage); + +omm_return: + + /* + * We used to close the socket at this point, but that makes it awkward + * for those above us if they wish to remove this socket from their own + * records (an fd_set for example). We'll just have this socket closed + * when PQfinish is called (which is compulsory even after an error, since + * the connection structure must be freed). + */ + conn->status = CONNECTION_BAD; + return PGRES_POLLING_FAILED; +} + +static PostgresPollingStatusType connection_check_writable(PGconn* conn) +{ + PGresult *res; + PQExpBufferData savedMessage; + + /* + * We are yet to make a connection. Save all existing error + * messages until we make a successful connection state. + * This is important because PQsendQuery is going to reset + * conn->errorMessage and we will loose error messages + * related to previous hosts we have tried to connect and + * failed. + */ + if (!saveErrorMessage(conn, &savedMessage)) + goto omm_return; + + /* + * Waiting for result of "SHOW transaction_read_only". We + * must transiently set status = CONNECTION_OK in order to use + * the result-consuming subroutines. + */ + conn->status = CONNECTION_OK; + if (!PQconsumeInput(conn)) + goto error_return; + + if (PQisBusy(conn)) { + conn->status = CONNECTION_CHECK_WRITABLE; + restoreErrorMessage(conn, &savedMessage); + return PGRES_POLLING_READING; + } + + res = PQgetResult(conn); + if (res && PQresultStatus(res) == PGRES_TUPLES_OK && + PQntuples(res) == 1) { + char *val = PQgetvalue(res, 0, 0); + + /* + * "transaction_read_only = on" proves that at least one + * of default_transaction_read_only and in_hot_standby is + * on, but we don't actually know which. We don't care + * though for the purpose of identifying a read-only + * session, so satisfy the CONNECTION_CHECK_TARGET code by + * claiming they are both on. On the other hand, if it's + * a read-write session, they are certainly both off. + */ + if (strncmp(val, "on", 2) == 0) { /* sizeof("on") = 2 */ + conn->default_transaction_read_only = PG_BOOL_YES; + conn->in_hot_standby = PG_BOOL_YES; + } else { + conn->default_transaction_read_only = PG_BOOL_NO; + conn->in_hot_standby = PG_BOOL_NO; + } + PQclear(res); + + /* Finish reading messages before continuing */ + conn->status = CONNECTION_CONSUME; + restoreErrorMessage(conn, &savedMessage); + return PGRES_POLLING_ACTIVE; + } + + /* Something went wrong with "SHOW transaction_read_only". */ + if (res) + PQclear(res); + + /* Append error report to conn->errorMessage. */ + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("\"%s\" failed\n"), + "SHOW transaction_read_only"); + + /* Close connection politely. */ + conn->status = CONNECTION_OK; + sendTerminateConn(conn); + + +/* Unreachable */ + +error_return: + + restoreErrorMessage(conn, &savedMessage); + +omm_return: + + /* + * We used to close the socket at this point, but that makes it awkward + * for those above us if they wish to remove this socket from their own + * records (an fd_set for example). We'll just have this socket closed + * when PQfinish is called (which is compulsory even after an error, since + * the connection structure must be freed). + */ + conn->status = CONNECTION_BAD; + return PGRES_POLLING_FAILED; +} + +static PostgresPollingStatusType connection_check_standby(PGconn* conn) +{ + PGresult *res; + PQExpBufferData savedMessage; + + if (!saveErrorMessage(conn, &savedMessage)) + goto omm_return; + + + /* + * Waiting for result of "SELECT pg_is_in_recovery()". We + * must transiently set status = CONNECTION_OK in order to use + * the result-consuming subroutines. + */ + conn->status = CONNECTION_OK; + if (!PQconsumeInput(conn)) + goto error_return; + + if (PQisBusy(conn)) { + conn->status = CONNECTION_CHECK_STANDBY; + restoreErrorMessage(conn, &savedMessage); + return PGRES_POLLING_READING; + } + + res = PQgetResult(conn); + if (res && PQresultStatus(res) == PGRES_TUPLES_OK && + PQntuples(res) == 1) { + char *val = PQgetvalue(res, 0, 0); + + if (strncmp(val, "t", 1) == 0) + conn->in_hot_standby = PG_BOOL_YES; + else + conn->in_hot_standby = PG_BOOL_NO; + PQclear(res); + + /* Finish reading messages before continuing */ + conn->status = CONNECTION_CONSUME; + restoreErrorMessage(conn, &savedMessage); + return PGRES_POLLING_ACTIVE; + } + + /* Something went wrong with "SELECT pg_is_in_recovery()". */ + if (res) + PQclear(res); + + /* Append error report to conn->errorMessage. */ + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("\"%s\" failed\n"), + "SELECT pg_is_in_recovery()"); + + /* Close connection politely. */ + conn->status = CONNECTION_OK; + sendTerminateConn(conn); + + +/* Unreachable */ + +error_return: + + restoreErrorMessage(conn, &savedMessage); + +omm_return: + /* + * We used to close the socket at this point, but that makes it awkward + * for those above us if they wish to remove this socket from their own + * records (an fd_set for example). We'll just have this socket closed + * when PQfinish is called (which is compulsory even after an error, since + * the connection structure must be freed). + */ + conn->status = CONNECTION_BAD; + return PGRES_POLLING_FAILED; +} + #ifdef ENABLE_UT void uttest_parseServiceInfo(PQconninfoOption* options, PQExpBuffer errorMessage) { diff --git a/src/common/interfaces/libpq/fe-exec.cpp b/src/common/interfaces/libpq/fe-exec.cpp index 04ae5af19..707c64183 100644 --- a/src/common/interfaces/libpq/fe-exec.cpp +++ b/src/common/interfaces/libpq/fe-exec.cpp @@ -1127,8 +1127,8 @@ int pqRowProcessor(PGconn* conn, const char** errmsgp) bool isValueDecrypted = false; dec_dat_res = DEC_DATA_ERR; if (RecordProcessor::DeProcessRecord(conn, columns[i].value, length, - res->attDescs[i].rec->get_original_ids(), res->attDescs[i].format, &deProcessed, length, - &isValueDecrypted)) { + res->attDescs[i].rec->get_original_ids(), res->attDescs[i].rec->get_num_processed_args(), + res->attDescs[i].format, &deProcessed, length, &isValueDecrypted)) { clen = (int)length; if (isValueDecrypted) { dec_dat_res = DEC_DATA_SUCCEED; @@ -1136,7 +1136,6 @@ int pqRowProcessor(PGconn* conn, const char** errmsgp) } } else { printfPQExpBuffer(&conn->errorMessage, libpq_gettext("failed to deprocess data in record")); - conn->client_logic->isInvalidOperationOnColumn = true; // FAILED TO READ goto fail; } } @@ -2130,16 +2129,11 @@ PGresult* PQgetResult(PGconn* conn) } #ifdef HAVE_CE - if (conn->client_logic->enable_client_encryption) { - Assert(conn->client_logic && (conn->client_logic->enable_client_encryption)); - /* - * when res is NULL then it means ReadyForQuery was received - */ - if (res) + /* + * when res is NULL then it means ReadyForQuery was received + */ + if (conn->client_logic->enable_client_encryption && res) { conn->client_logic->m_lastResultStatus = res->resultStatus; - else if (conn->asyncStatus != PGASYNC_BUSY) { - Processor::run_post_query(conn); - } } #endif @@ -2400,8 +2394,7 @@ PGresult* checkRefreshCacheOnError(PGconn* conn) { PGresult* res = NULL; if (conn->client_logic->enable_client_encryption) { - if ((conn->client_logic->isInvalidOperationOnColumn || conn->client_logic->should_refresh_function) && - !conn->client_logic->isDuringRefreshCacheOnError) { + if (conn->client_logic->isInvalidOperationOnColumn && !conn->client_logic->isDuringRefreshCacheOnError) { /* copy query because it will be overwritten */ char* query_to_resend = NULL; size_t last_query_size = 0; @@ -2413,17 +2406,6 @@ PGresult* checkRefreshCacheOnError(PGconn* conn) check_strncpy_s(strncpy_s(query_to_resend, last_query_size + 1, conn->last_query, last_query_size)); query_to_resend[last_query_size] = '\0'; } - if (conn->client_logic->should_refresh_function) { - conn->client_logic->should_refresh_function = false; - if (query_to_resend != NULL) { - conn->client_logic->disable_once = true; - res = PQexec(conn, query_to_resend); - conn->client_logic->disable_once = false; - libpq_free(query_to_resend); - } - conn->client_logic->isDuringRefreshCacheOnError = false; /* reset variable */ - return res; - } /* * if an "invalid operation on a column" was detected it probably means that a DML operation was made on * "encrypted" special column. however, the client that performed the DML operation was under the @@ -2433,14 +2415,14 @@ PGresult* checkRefreshCacheOnError(PGconn* conn) conn->client_logic->cacheRefreshType = CacheRefreshType::CACHE_ALL; conn->client_logic->m_cached_column_manager->load_cache(conn); if (query_to_resend != NULL) { - PQclear(PQexec(conn, query_to_resend)); + res = PQexec(conn, query_to_resend); free(query_to_resend); } conn->client_logic->isInvalidOperationOnColumn = false; // reset variable conn->client_logic->isDuringRefreshCacheOnError = false; // reset variable } } - return NULL; + return res; } #endif diff --git a/src/common/interfaces/libpq/fe-misc.cpp b/src/common/interfaces/libpq/fe-misc.cpp index 7f25b5e70..82ed47bf1 100644 --- a/src/common/interfaces/libpq/fe-misc.cpp +++ b/src/common/interfaces/libpq/fe-misc.cpp @@ -993,7 +993,7 @@ int pqWaitTimed(int forRead, int forWrite, PGconn* conn, time_t finish_time) if (result == 0) { printfPQExpBuffer(&conn->errorMessage, libpq_gettext("wait %s:%s timeout expired\n"), - conn->pghost, conn->pgport); + PQhost(conn), PQport(conn)); return EOF; } diff --git a/src/common/interfaces/libpq/fe-protocol3.cpp b/src/common/interfaces/libpq/fe-protocol3.cpp index dda3861d3..bb49d9d89 100644 --- a/src/common/interfaces/libpq/fe-protocol3.cpp +++ b/src/common/interfaces/libpq/fe-protocol3.cpp @@ -380,6 +380,18 @@ void pqParseInput3(PGconn* conn) /* trust the specified message length as what to skip */ conn->inStart += 5 + msgLength; } +#ifdef HAVE_CE + /* post processing we are now executing after the message has been handled */ + switch (id) { + case 'Z': /* backend is ready for new query */ + if (conn->client_logic->enable_client_encryption) { + Processor::run_post_query(conn); + } + break; + default: + break; + } +#endif // HAVE_CE } } @@ -756,8 +768,74 @@ set_error_result: return 0; } +#ifdef HAVE_CE #define ERRCODE_INVALID_ENCRYPTED_COLUMN_DATA "2200Z" -#define ERRCODE_CL_FUNCTION_UPDATE "42716" +#define ERRCODE_UNDEFINED_FUNCTION "42883" + +typedef struct CLRefreshParams { + CLRefreshParams() + : check_cl_refresh(true), found_internal_cl_type(false), check_function_hint(false) {}; + bool check_cl_refresh; + bool found_internal_cl_type; + bool check_function_hint; +} CLRefreshParams; + +/* + * based on the error response, checking whether the CL cache in the libpq needs to be refreshed + * @param[OUT] - client_logic - session wide state machine + * @param[IN] - id - response token from server + * @param[IN] - data - string response from server + * @param[INOUT] - cl_refresh_params - internal state of the loop this function is in. + */ +bool cl_refresh(PGClientLogic *client_logic, const char id, const char *data, CLRefreshParams *cl_refresh_params) +{ + if (!client_logic || !data || !cl_refresh_params) { + return false; + } + + switch (id) { + /* + * Severity: the field contents are ERROR, FATAL, or PANIC (in an error message), + * or WARNING, NOTICE, DEBUG, INFO, or LOG (in a notice message) + */ + case 'S': + if (strcmp(data, "ERROR") != 0) { + cl_refresh_params->check_cl_refresh = false; + } + break; + case 'M': /* Message: the primary human-readable error message */ + if (strstr(data, "byteawithoutorder") != 0) { + /* it's only used for the internal state of the while clause */ + cl_refresh_params->found_internal_cl_type = true; + } + break; + case 'C': /* Code: the SQLSTATE code for the error */ + if (strcmp(data, ERRCODE_INVALID_ENCRYPTED_COLUMN_DATA) == 0) { + client_logic->isInvalidOperationOnColumn = true; // failed to WRITE + } else if (strcmp(data, ERRCODE_UNDEFINED_FUNCTION) == 0) { + if (cl_refresh_params->found_internal_cl_type) { + client_logic->isInvalidOperationOnColumn = true; // failed to WRITE + } else { + cl_refresh_params->check_function_hint = true; + } + } + break; + case 'H': /* Hint: an optional suggestion what to do about the problem. */ + if (cl_refresh_params->check_function_hint) { + const char* hint = "No function matches the given name and argument types. " + "You might need to add explicit type casts."; + if (strcmp(data, hint) == 0) { + client_logic->isInvalidOperationOnColumn = true; // failed to WRITE + } + } + break; + default: + break; + } + return true; +} +#endif // HAVE_CE + /* * Attempt to read an Error or Notice response message. * This is possible in several places, so we break it out as a subroutine. @@ -774,6 +852,9 @@ int pqGetErrorNotice3(PGconn* conn, bool isError) const char* querytext = NULL; int querypos = 0; int errcodes = 0; +#ifdef HAVE_CE + CLRefreshParams cl_refresh_params; +#endif /* dbms_output */ /* @@ -805,13 +886,9 @@ int pqGetErrorNotice3(PGconn* conn, bool isError) if (pqGets(&workBuf, conn)) goto fail; #ifdef HAVE_CE - if (strcmp(workBuf.data, ERRCODE_INVALID_ENCRYPTED_COLUMN_DATA)==0) { - conn->client_logic->isInvalidOperationOnColumn = true; // failed to WRITE - } else if (strcmp(workBuf.data, ERRCODE_CL_FUNCTION_UPDATE)==0) { - conn->client_logic->should_refresh_function = true; // failed to WRITE - goto fail; + if (cl_refresh_params.check_cl_refresh) { + (void)cl_refresh(conn->client_logic, id, workBuf.data, &cl_refresh_params); } - pqSaveMessageField(res, id, workBuf.data, conn); #else pqSaveMessageField(res, id, workBuf.data); diff --git a/src/common/interfaces/libpq/fe-secure.cpp b/src/common/interfaces/libpq/fe-secure.cpp index 01f8ac25c..764ba08c5 100644 --- a/src/common/interfaces/libpq/fe-secure.cpp +++ b/src/common/interfaces/libpq/fe-secure.cpp @@ -113,6 +113,10 @@ static int SSL_CTX_set_cipher_list_ex(SSL_CTX* ctx, const char* ciphers[], const static THR_LOCAL bool pq_init_ssl_lib = true; static THR_LOCAL bool pq_init_crypto_lib = true; +#ifdef ENABLE_UT +#define static +#endif +static THR_LOCAL bool g_client_crl_err = false; #ifndef ENABLE_UT static bool set_client_ssl_ciphers(); /* set client security cipherslist*/ #else @@ -141,14 +145,13 @@ static long win32_ssl_create_mutex = 0; /* security ciphers suites in SSL connection */ static const char* ssl_ciphers_map[] = { - TLS1_TXT_DHE_RSA_WITH_AES_128_GCM_SHA256, /* TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 */ - TLS1_TXT_DHE_RSA_WITH_AES_256_GCM_SHA384, /* TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 */ - TLS1_TXT_DHE_RSA_WITH_AES_128_CCM, /* TLS_DHE_RSA_WITH_AES_128_CCM */ - TLS1_TXT_DHE_RSA_WITH_AES_256_CCM, /* TLS_DHE_RSA_WITH_AES_256_CCM */ - TLS1_TXT_ECDHE_RSA_WITH_AES_256_GCM_SHA384, /* TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 */ TLS1_TXT_ECDHE_RSA_WITH_AES_128_GCM_SHA256, /* TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 */ - TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, /* TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 */ + TLS1_TXT_ECDHE_RSA_WITH_AES_256_GCM_SHA384, /* TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 */ TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, /* TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 */ + TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, /* TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 */ + /* The following are compatible with earlier versions of the server. */ + TLS1_TXT_DHE_RSA_WITH_AES_128_GCM_SHA256, /* TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, */ + TLS1_TXT_DHE_RSA_WITH_AES_256_GCM_SHA384, /* TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 */ NULL}; #endif /* SSL */ @@ -895,51 +898,49 @@ static #endif int verify_cb(int ok, X509_STORE_CTX* ctx) { - int cert_error = X509_STORE_CTX_get_error(ctx); + if (ok) { + return ok; + } - if (!ok) - { - switch (cert_error) - { - case X509_V_ERR_CRL_HAS_EXPIRED: - ok = 1; - break; - case X509_V_ERR_UNABLE_TO_GET_CRL: - ok = 1; - break; - case X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE: - ok = 1; - break; - case X509_V_ERR_CRL_SIGNATURE_FAILURE: - ok = 1; - break; - case X509_V_ERR_CRL_NOT_YET_VALID: - ok = 1; - break; - case X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD: - ok = 1; - break; - case X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD: - ok = 1; - break; - case X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER: - ok = 1; - break; - case X509_V_ERR_KEYUSAGE_NO_CRL_SIGN: - ok = 1; - break; - case X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION: - ok = 1; - break; - case X509_V_ERR_DIFFERENT_CRL_SCOPE: - ok = 1; - break; - case X509_V_ERR_CRL_PATH_VALIDATION_ERROR: - ok = 1; - break; - default: + /* + * When the CRL is abnormal, it won't be used to check whether the certificate is revoked, + * and the services shouldn't be affected due to the CRL exception. + */ + const int crl_err_scenarios[] = { + X509_V_ERR_CRL_HAS_EXPIRED, + X509_V_ERR_UNABLE_TO_GET_CRL, + X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE, + X509_V_ERR_CRL_SIGNATURE_FAILURE, + X509_V_ERR_CRL_NOT_YET_VALID, + X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD, + X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD, + X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER, + X509_V_ERR_KEYUSAGE_NO_CRL_SIGN, + X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION, + X509_V_ERR_DIFFERENT_CRL_SCOPE, + X509_V_ERR_CRL_PATH_VALIDATION_ERROR + }; + bool ignore_crl_err = false; + int err_code = X509_STORE_CTX_get_error(ctx); + + if (!g_client_crl_err) { + for (size_t i = 0; i < sizeof(crl_err_scenarios) / sizeof(crl_err_scenarios[0]); i++) { + if (err_code == crl_err_scenarios[i]) { + g_client_crl_err = true; + ignore_crl_err = true; break; + } } + } else { + if (err_code == X509_V_ERR_CERT_REVOKED) { + g_client_crl_err = false; /* reset */ + ignore_crl_err = true; + } + } + + if (ignore_crl_err) { + X509_STORE_CTX_set_error(ctx, X509_V_OK); + ok = 1; } return ok; @@ -1007,6 +1008,7 @@ static bool verify_peer_name_matches_certificate(PGconn* conn) int r; int len; bool result = false; + char* host = conn->connhost[conn->whichhost].host; /* * If told not to verify the peer name, don't do it. Return true @@ -1050,22 +1052,22 @@ static bool verify_peer_name_matches_certificate(PGconn* conn) * We got the peer's common name. Now compare it against the originally * given hostname. */ - if (!((conn->pghost != NULL) && conn->pghost[0] != '\0')) { + if (!((host != NULL) && host[0] != '\0')) { printfPQExpBuffer( &conn->errorMessage, libpq_gettext("host name must be specified for a verified SSL connection\n")); result = false; } else { - if (pg_strcasecmp(peer_cn, conn->pghost) == 0) + if (pg_strcasecmp(peer_cn, host) == 0) /* Exact name match */ result = true; - else if (wildcard_certificate_match(peer_cn, conn->pghost)) + else if (wildcard_certificate_match(peer_cn, host)) /* Matched wildcard certificate */ result = true; else { printfPQExpBuffer(&conn->errorMessage, libpq_gettext("server common name \"%s\" does not match host name \"%s\"\n"), peer_cn, - conn->pghost); + host); result = false; } } @@ -1438,6 +1440,44 @@ int LoadSslKeyFile(PGconn* conn, bool have_homedir, const PathData *homedir, boo return 0; } +void LoadSslCrlFile(PGconn* conn, bool have_homedir, const PathData *homedir) +{ + struct stat buf; + char fnbuf[MAXPGPATH] = {0}; + errno_t rc = 0; + int nRet = 0; + bool userSetSslCrl = false; + X509_STORE* cvstore = SSL_CTX_get_cert_store(SSL_context); + if (cvstore == NULL) { + return; + } + + if ((conn->sslcrl != NULL) && strlen(conn->sslcrl) > 0) { + rc = strncpy_s(fnbuf, MAXPGPATH, conn->sslcrl, strlen(conn->sslcrl)); + securec_check_c(rc, "\0", "\0"); + fnbuf[MAXPGPATH - 1] = '\0'; + userSetSslCrl = true; + } else if (have_homedir) { + nRet = snprintf_s(fnbuf, MAXPGPATH, MAXPGPATH - 1, "%s/%s", homedir->data, ROOT_CRL_FILE); + securec_check_ss_c(nRet, "\0", "\0"); + } else { + fnbuf[0] = '\0'; + } + + if (fnbuf[0] == '\0') { + return; + } + + /* Set the flags to check against the complete CRL chain */ + if (stat(fnbuf, &buf) == 0 && X509_STORE_load_locations(cvstore, fnbuf, NULL) == 1) { + (void)X509_STORE_set_flags(cvstore, X509_V_FLAG_CRL_CHECK); + } else if (userSetSslCrl) { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("could not load SSL certificate revocation list (file \"%s\")\n"), fnbuf); + fprintf(stdout, "Warning: could not load SSL certificate revocation list (file \"%s\")\n", fnbuf); + } +} + #define MAX_CERTIFICATE_DEPTH_SUPPORTED 20 /* The max certificate depth supported. */ int LoadRootCertFile(PGconn* conn, bool have_homedir, const PathData *homedir) { @@ -1476,31 +1516,7 @@ int LoadRootCertFile(PGconn* conn, bool have_homedir, const PathData *homedir) return -1; } #endif - if (SSL_CTX_get_cert_store(SSL_context) != NULL) { - if ((conn->sslcrl != NULL) && strlen(conn->sslcrl) > 0) { - rc = strncpy_s(fnbuf, MAXPGPATH, conn->sslcrl, strlen(conn->sslcrl)); - securec_check_c(rc, "\0", "\0"); - fnbuf[MAXPGPATH - 1] = '\0'; - } else if (have_homedir) { - nRet = snprintf_s(fnbuf, MAXPGPATH, MAXPGPATH - 1, "%s/%s", homedir->data, ROOT_CRL_FILE); - securec_check_ss_c(nRet, "\0", "\0"); - } else - fnbuf[0] = '\0'; - - /* Set the flags to check against the complete CRL chain */ - if (fnbuf[0] != '\0' && stat(fnbuf, &buf) == 0) { - if (X509_STORE_load_locations(SSL_CTX_get_cert_store(SSL_context), fnbuf, NULL) == 1) { - (void)X509_STORE_set_flags( - SSL_CTX_get_cert_store(SSL_context), X509_V_FLAG_CRL_CHECK); - } else { - printfPQExpBuffer(&conn->errorMessage, - libpq_gettext("could not load SSL certificate revocation list (file \"%s\")\n"), - fnbuf); - return -1; - } - } - } - + LoadSslCrlFile(conn, have_homedir, homedir); /* Check the DH length to make sure it's at least 2048. */ SSL_set_security_callback(conn->ssl, ssl_security_DH_ECDH_cb); @@ -1547,6 +1563,7 @@ int initialize_SSL(PGconn* conn) errno_t rc = 0; int retval = 0; + g_client_crl_err = false; /* * We'll need the home directory if any of the relevant parameters are * defaulted. If pqGetHomeDirectory fails, act as though none of the diff --git a/src/common/interfaces/libpq/frontend_parser/gram.y b/src/common/interfaces/libpq/frontend_parser/gram.y index bd097f36c..1e8a32eab 100755 --- a/src/common/interfaces/libpq/frontend_parser/gram.y +++ b/src/common/interfaces/libpq/frontend_parser/gram.y @@ -291,7 +291,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus; %type func_type %type opt_nowait -%type OptTemp +%type OptTemp opt_wait %type OnCommitOption %type for_locking_item @@ -499,7 +499,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus; BACKWARD BARRIER BEFORE BEGIN_NON_ANOYBLOCK BEGIN_P BETWEEN BIGINT BINARY BINARY_DOUBLE BINARY_INTEGER BIT BLANKS BLOB_P BLOCKCHAIN BODY_P BOGUS BOOLEAN_P BOTH BUCKETCNT BUCKETS BY BYTEAWITHOUTORDER BYTEAWITHOUTORDERWITHEQUAL - CACHE CALL CALLED CASCADE CASCADED CASE CAST CATALOG_P CHAIN CHAR_P + CACHE CALL CALLED CANCELABLE CASCADE CASCADED CASE CAST CATALOG_P CHAIN CHAR_P CHARACTER CHARACTERISTICS CHARACTERSET CHECK CHECKPOINT CLASS CLEAN CLIENT CLIENT_MASTER_KEY CLIENT_MASTER_KEYS CLOB CLOSE CLUSTER COALESCE COLLATE COLLATION COLUMN COLUMN_ENCRYPTION_KEY COLUMN_ENCRYPTION_KEYS COMMENT COMMENTS COMMIT CONNECT COMMITTED COMPACT COMPATIBLE_ILLEGAL_CHARS COMPLETE COMPRESS CONDITION CONCURRENTLY CONFIGURATION CONNECTBY CONNECTION CONSTANT CONSTRAINT CONSTRAINTS @@ -548,7 +548,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus; OBJECT_P OF OFF OFFSET OIDS ON ONLY OPERATOR OPTIMIZATION OPTION OPTIONALLY OPTIONS OR ORDER OUT_P OUTER_P OVER OVERLAPS OVERLAY OWNED OWNER - PACKAGE PARSER PARTIAL PARTITION PARTITIONS PASSING PASSWORD PCTFREE PER_P PERCENT PERFORMANCE PERM PLACING PLAN PLANS POLICY POSITION + PACKAGE PACKAGES PARSER PARTIAL PARTITION PARTITIONS PASSING PASSWORD PCTFREE PER_P PERCENT PERFORMANCE PERM PLACING PLAN PLANS POLICY POSITION /* PGXC_BEGIN */ POOL PRECEDING PRECISION /* PGXC_END */ @@ -581,7 +581,7 @@ extern THR_LOCAL bool stmt_contains_operator_plus; VACUUM VALID VALIDATE VALIDATION VALIDATOR VALUE_P VALUES VARCHAR VARCHAR2 VARIABLES VARIADIC VARRAY VARYING VCGROUP VERBOSE VERIFY VERSION_P VIEW VOLATILE - WEAK WHEN WHERE WHITESPACE_P WINDOW WITH WITHIN WITHOUT WORK WORKLOAD WRAPPER WRITE + WAIT WEAK WHEN WHERE WHITESPACE_P WINDOW WITH WITHIN WITHOUT WORK WORKLOAD WRAPPER WRITE XML_P XMLATTRIBUTES XMLCONCAT XMLELEMENT XMLEXISTS XMLFOREST XMLPARSE XMLPI XMLROOT XMLSERIALIZE @@ -7339,6 +7339,22 @@ for_locking_item: n->lockedRels = $3; n->forUpdate = TRUE; n->noWait = $4; + n->waitSec = 0; + $$ = (Node *) n; + } + | FOR UPDATE locked_rels_list opt_wait + { + LockingClause *n = makeNode(LockingClause); + n->lockedRels = $3; + n->forUpdate = TRUE; + n->noWait = false; + n->waitSec = $4; + /* When the delay time is 0, the processing is based on the nowait logic. */ + if (n->waitSec == 0) { + n->noWait = true; + } else { + n->noWait = false; + } $$ = (Node *) n; } | FOR SHARE locked_rels_list opt_nowait @@ -7347,6 +7363,7 @@ for_locking_item: n->lockedRels = $3; n->forUpdate = FALSE; n->noWait = $4; + n->waitSec = 0; $$ = (Node *) n; } ; @@ -7355,6 +7372,9 @@ opt_nowait: NOWAIT { $$ = TRUE; } | /*EMPTY*/ { $$ = FALSE; } ; +opt_wait: WAIT Iconst { $$ = $2; } + ; + locked_rels_list: OF qualified_name_list { $$ = $2; } | /* EMPTY */ { $$ = NIL; } @@ -8215,7 +8235,7 @@ character: CHARACTER opt_varying | CHAR_P opt_varying { $$ = (char *)($2 ? "varchar": "bpchar"); } | NVARCHAR - { $$ = "nvarchar2"; } + { $$ = "nvarchar2"; } | NVARCHAR2 { $$ = "nvarchar2"; } | VARCHAR @@ -11024,6 +11044,7 @@ unreserved_keyword: | CACHE | CALL | CALLED + | CANCELABLE | CASCADE | CASCADED | CATALOG_P @@ -11393,6 +11414,7 @@ unreserved_keyword: | VERSION_P | VIEW | VOLATILE + | WAIT | WEAK | WHITESPACE_P | WITHIN diff --git a/src/common/interfaces/libpq/jdbc/CMakeLists.txt b/src/common/interfaces/libpq/jdbc/CMakeLists.txt index 7784f1bf5..793b570d8 100644 --- a/src/common/interfaces/libpq/jdbc/CMakeLists.txt +++ b/src/common/interfaces/libpq/jdbc/CMakeLists.txt @@ -20,12 +20,16 @@ set(gauss_cl_jni_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPT set(gauss_cl_jni_LINK_OPTIONS ${LIB_LINK_OPTIONS}) add_shared_libtarget(gauss_cl_jni TGT_gauss_cl_jni_SRC TGT_gauss_cl_jni_INC "${gauss_cl_jni_DEF_OPTIONS}" "${gauss_cl_jni_COMPILE_OPTIONS}" "${gauss_cl_jni_LINK_OPTIONS}") if(NOT "${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS}" STREQUAL "OFF_OFF") - add_dependencies(gauss_cl_jni gs_ktool) + if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + add_dependencies(gauss_cl_jni gs_ktool) + endif() endif() add_dependencies(gauss_cl_jni pq_ce) -target_link_libraries(gauss_cl_jni PRIVATE -lsecurec -lrt -lz -lpq_ce -lcjson -lcurl) +target_link_libraries(gauss_cl_jni PRIVATE -l${SECURE_C_CHECK} -lrt -lz -lpq_ce -lcjson -lcurl) if(NOT "${ENABLE_MULTIPLE_NODES}_${ENABLE_PRIVATEGAUSS}" STREQUAL "OFF_OFF") - target_link_libraries(gauss_cl_jni PRIVATE -lgs_ktool -lkmc) + if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + target_link_libraries(gauss_cl_jni PRIVATE -lgs_ktool -lkmc) + endif() endif() target_link_directories(gauss_cl_jni PUBLIC ${KMC_LIB_PATH} ${CJSON_LIB_PATH} ${LIBCURL_LIB_PATH} ${LIBOPENSSL_LIB_PATH} ${SECURE_LIB_PATH} ${CMAKE_BINARY_DIR}/lib ${ZLIB_LIB_PATH}) INSTALL(TARGETS gauss_cl_jni LIBRARY DESTINATION lib) diff --git a/src/common/interfaces/libpq/jdbc/Makefile b/src/common/interfaces/libpq/jdbc/Makefile index 369fc34b6..b586d941e 100644 --- a/src/common/interfaces/libpq/jdbc/Makefile +++ b/src/common/interfaces/libpq/jdbc/Makefile @@ -4,6 +4,7 @@ backenddir = $(top_builddir)/src/common/backend include $(top_builddir)/src/Makefile.global SUBDIRS=client_logic_jni - +clean: + rm -f client_logic_jni/*.o $(recurse) diff --git a/src/common/interfaces/libpq/jdbc/client_logic_jni/Makefile b/src/common/interfaces/libpq/jdbc/client_logic_jni/Makefile index db8210cec..e52814bee 100644 --- a/src/common/interfaces/libpq/jdbc/client_logic_jni/Makefile +++ b/src/common/interfaces/libpq/jdbc/client_logic_jni/Makefile @@ -16,7 +16,9 @@ NAME= libgauss_cl_jni INCLUDES = -I$(top_builddir)/src/include -I"$(JAVA_HOME)/include" -I"$(JAVA_HOME)/include/linux" -I$(top_builddir)/src/include/libpq -I../../ -I$(LIBOPENSSL_INCLUDE_PATH) -I$(KERBEROS_INCLUDE_PATH) SHLIB_LINK := -L"../../" -lpq_ce -lrt -lz -L$(CJSON_LIB_PATH) -L$(LIBCURL_LIB_PATH) -lcjson -lcurl ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) -SHLIB_LINK += -L$(top_builddir)/../distribute/bin/gs_ktool/ -lgs_ktool -lsecurec -L$(LIBKMC_LIB_PATH) -lkmc + ifneq ($(enable_lite_mode), yes) + SHLIB_LINK += -L$(top_builddir)/../distribute/bin/gs_ktool/ -lgs_ktool -l$(SECURE_C_CHECK) -L$(LIBKMC_LIB_PATH) -lkmc + endif endif override CPPFLAGS := ${INCLUDES} -I$(CJSON_INCLUDE_PATH) -I$(LIBCURL_INCLUDE_PATH) -DHAVE_CE @@ -26,8 +28,7 @@ driver_error.cpp \ jni_string_convertor.cpp \ jni_util.cpp \ jni_logger.cpp \ -org_postgresql_jdbc_ClientLogicImpl.cpp \ -com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl.cpp +org_postgresql_jdbc_ClientLogicImpl.cpp OBJS = $(CPP_SRCS:.cpp=.o) diff --git a/src/common/interfaces/libpq/jdbc/client_logic_jni/client_logic_jni.cpp b/src/common/interfaces/libpq/jdbc/client_logic_jni/client_logic_jni.cpp index 911863e96..4aa8a8a94 100644 --- a/src/common/interfaces/libpq/jdbc/client_logic_jni/client_logic_jni.cpp +++ b/src/common/interfaces/libpq/jdbc/client_logic_jni/client_logic_jni.cpp @@ -75,7 +75,7 @@ void clean_empty_conn_4cl(PGconn *conn) } libpq_free(conn->dbName); if (conn->client_logic != NULL) { -#if ((defined(ENABLE_MULTIPLE_NODES)) || (defined(ENABLE_PRIVATEGAUSS))) +#if ((defined(ENABLE_MULTIPLE_NODES)) || (defined(ENABLE_PRIVATEGAUSS) && (!defined(ENABLE_LITE_MODE)))) free_kms_cache(conn->client_logic->client_cache_id); #endif delete conn->client_logic; @@ -148,7 +148,7 @@ bool ClientLogicJNI::set_kms_info(const char *key, const char *value) return false; /* should never happen */ } -#if ((defined(ENABLE_MULTIPLE_NODES)) || (defined(ENABLE_PRIVATEGAUSS))) +#if ((defined(ENABLE_MULTIPLE_NODES)) || (defined(ENABLE_PRIVATEGAUSS) && (!defined(ENABLE_LITE_MODE)))) CmkemErrCode ret = CMKEM_SUCCEED; ret = set_kms_cache_auth_info(m_stub_conn->client_logic->client_cache_id, key, value); if (ret != CMKEM_SUCCEED) { @@ -290,14 +290,15 @@ const int* ClientLogicJNI::get_record_data_oids(int oid, const char *column_name } bool ClientLogicJNI::process_record_data(const char *data_2_process, const int *original_oids, - unsigned char **proccessed_data, bool *is_client_logic, size_t &length_output, DriverError *status) + size_t original_oids_length, unsigned char **proccessed_data, + bool *is_client_logic, size_t &length_output, DriverError *status) { if (status == NULL || data_2_process == NULL || proccessed_data == NULL) { return false; } size_t converted_len = strlen(data_2_process); - if (!RecordProcessor::DeProcessRecord(m_stub_conn, data_2_process, converted_len, original_oids, 0, - proccessed_data, length_output, is_client_logic)) { + if (!RecordProcessor::DeProcessRecord(m_stub_conn, data_2_process, converted_len, original_oids, + original_oids_length, 0, proccessed_data, length_output, is_client_logic)) { status->set_error(JNI_SYSTEM_ERROR_CODES::CLIENT_LOGIC_FAILED); return false; } @@ -310,10 +311,11 @@ bool ClientLogicJNI::process_record_data(const char *data_2_process, const int * /* * runs the post query client logic function to clean its state machine * @param status + * @param statement_name when issued for prepared statement contains the statement name, otherwise an empty string * @param[out] status error information if any * @return true on success or false on failure */ -bool ClientLogicJNI::run_post_query(DriverError *status) +bool ClientLogicJNI::run_post_query(const char *statement_name, DriverError *status) { if (status == NULL) { return false; @@ -324,6 +326,11 @@ bool ClientLogicJNI::run_post_query(DriverError *status) return false; } m_stub_conn->client_logic->m_lastResultStatus = PGRES_COMMAND_OK; + if (strlen(statement_name) > 0) { + check_strncpy_s(strncpy_s(m_stub_conn->client_logic->lastStmtName, NAMEDATALEN, statement_name, + strlen(statement_name))); + } + Processor::run_post_query(m_stub_conn); m_post_query_needed = false; return true; @@ -362,8 +369,9 @@ bool ClientLogicJNI::preare_statement(const char *query, const char *statement_n } // run_post_query is required here to make replace parameters work m_stub_conn->client_logic->m_lastResultStatus = PGRES_COMMAND_OK; + m_stub_conn->queryclass = PGQUERY_PREPARE; Processor::run_post_query(m_stub_conn); - m_post_query_needed = false; + m_stub_conn->queryclass = PGQUERY_SIMPLE; // Setting it back to PGQUERY_SIMPLE as the default return true; } /* @@ -617,3 +625,13 @@ void ClientLogicJNI::set_jni_env_and_cl_impl(JNIEnv *env, jobject jni_cl_impl) { m_stub_conn->client_logic->m_data_fetcher_manager->set_jni_env_and_cl_impl(env, jni_cl_impl); } + +/** + * Reloads the client logic cache, called when there is an error related to missing client logic cache + */ +void ClientLogicJNI::reload_cache() const +{ + JNI_LOG_DEBUG("in reload_cache"); + m_stub_conn->client_logic->cacheRefreshType = CacheRefreshType::CACHE_ALL; + m_stub_conn->client_logic->m_cached_column_manager->load_cache(m_stub_conn); // Load the cache +} diff --git a/src/common/interfaces/libpq/jdbc/client_logic_jni/client_logic_jni.h b/src/common/interfaces/libpq/jdbc/client_logic_jni/client_logic_jni.h index a90a92b48..e10fe4117 100644 --- a/src/common/interfaces/libpq/jdbc/client_logic_jni/client_logic_jni.h +++ b/src/common/interfaces/libpq/jdbc/client_logic_jni/client_logic_jni.h @@ -52,7 +52,7 @@ public: bool set_kms_info(const char *key, const char *value); static bool from_handle(long handle, ClientLogicJNI **handle_ptr, DriverError *status); bool run_pre_query(const char *original_query, DriverError *status); - bool run_post_query(DriverError *status); + bool run_post_query(const char *statement_name, DriverError *status); bool preare_statement(const char *query, const char *statement_name, size_t parameter_count, DriverError *status); bool replace_statement_params(const char *statement_name, const char * const param_values[], size_t parameter_count, DriverError *status); @@ -62,12 +62,13 @@ public: size_t &length_output, DriverError *status) const; size_t get_record_data_oid_length(int oid, const char* column_name); const int *get_record_data_oids(int oid, const char* column_name); - bool process_record_data(const char *data_2_process, const int *original_oids, unsigned char **proccessed_data, - bool *is_encreypted, size_t &length_output, DriverError *status); + bool process_record_data(const char *data_2_process, const int *original_oids, size_t original_oids_length, + unsigned char **proccessed_data, bool *is_encreypted, size_t &length_output, DriverError *status); bool replace_message(const char *original_message, char **new_message, DriverError *status) const; const char *get_new_query(const char *query); void set_jni_env_and_cl_impl(JNIEnv *env, jobject jni_cl_impl); void reload_cache_if_needed() const; + void reload_cache() const; private: PGconn *m_stub_conn = NULL; diff --git a/src/common/interfaces/libpq/jdbc/client_logic_jni/com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl.cpp b/src/common/interfaces/libpq/jdbc/client_logic_jni/com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl.cpp deleted file mode 100644 index b48a11dda..000000000 --- a/src/common/interfaces/libpq/jdbc/client_logic_jni/com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl.cpp +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl.cpp - * - * IDENTIFICATION - * src/common/interfaces/libpq/jdbc/client_logic_jni/com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl.cpp - * - * ------------------------------------------------------------------------- - */ - -#include "com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl.h" -#include "org_postgresql_jdbc_ClientLogicImpl.h" - -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_linkClientLogicImpl(JNIEnv *env, - jobject jdbc_cl_impl, jstring database_name_java) -{ - return Java_org_postgresql_jdbc_ClientLogicImpl_linkClientLogicImpl(env, jdbc_cl_impl, database_name_java); -} - -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_setKmsInfo(JNIEnv *env, - jobject java_object, jlong handle, jstring key, jstring value) -{ - return Java_org_postgresql_jdbc_ClientLogicImpl_setKmsInfoImpl(env, java_object, handle, key, value); -} - -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_runQueryPreProcessImpl(JNIEnv *env, - jobject java_object, jlong handle, jstring original_query_java) -{ - return Java_org_postgresql_jdbc_ClientLogicImpl_runQueryPreProcessImpl(env, java_object, handle, - original_query_java); -} - -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_runQueryPostProcessImpl(JNIEnv *env, - jobject java_object, jlong handle) -{ - return Java_org_postgresql_jdbc_ClientLogicImpl_runQueryPostProcessImpl(env, java_object, handle); -} - -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_runClientLogicImpl(JNIEnv *env, - jobject java_object, jlong handle, jstring data_to_process_java, jint data_type) -{ - return Java_org_postgresql_jdbc_ClientLogicImpl_runClientLogicImpl(env, java_object, handle, data_to_process_java, - data_type); -} - -JNIEXPORT jobjectArray JNICALL JJava_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_getRecordIDsImpl (JNIEnv *env, - jobject java_object, jlong handle, jstring column_name_java, jint oid) -{ - return Java_org_postgresql_jdbc_ClientLogicImpl_getRecordIDsImpl(env, java_object, handle, column_name_java, oid); -} - -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_runClientLogic4RecordImpl(JNIEnv *env, - jobject java_object, jlong handle, jstring data_to_process_java, jintArray original_oids_java) -{ - return Java_org_postgresql_jdbc_ClientLogicImpl_runClientLogic4RecordImpl(env, java_object, handle, - data_to_process_java, original_oids_java); -} - -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_prepareQueryImpl(JNIEnv *env, - jobject java_object, jlong handle, jstring query_java, jstring statement_name_java, jint parameter_count) -{ - return Java_org_postgresql_jdbc_ClientLogicImpl_prepareQueryImpl(env, java_object, handle, query_java, - statement_name_java, parameter_count); -} - -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_replaceStatementParamsImpl( - JNIEnv *env, jobject java_object, jlong handle, jstring statement_name_java, jobjectArray parameters_java) -{ - return Java_org_postgresql_jdbc_ClientLogicImpl_replaceStatementParamsImpl(env, java_object, handle, - statement_name_java, parameters_java); -} - -/* - * Class: com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl - * Method: replaceErrorMessageImpl - * Signature: (JLjava/lang/String;)[Ljava/lang/Object; - */ -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_replaceErrorMessageImpl(JNIEnv *env, - jobject java_object, jlong handle, jstring message) -{ - return Java_org_postgresql_jdbc_ClientLogicImpl_replaceErrorMessageImpl(env, java_object, handle, message); -} - - -JNIEXPORT void JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_destroy(JNIEnv *env, jobject java_object, - jlong handle) -{ - return Java_org_postgresql_jdbc_ClientLogicImpl_destroy(env, java_object, handle); -} diff --git a/src/common/interfaces/libpq/jdbc/client_logic_jni/com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl.h b/src/common/interfaces/libpq/jdbc/client_logic_jni/com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl.h deleted file mode 100644 index 21400bc3d..000000000 --- a/src/common/interfaces/libpq/jdbc/client_logic_jni/com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl.h +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl.h - * - * IDENTIFICATION - * src/common/interfaces/libpq/jdbc/client_logic_jni/com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl.h - * - * ------------------------------------------------------------------------- - */ - -#ifndef _Included_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl -#define _Included_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl -/* DO NOT EDIT THIS FILE - it is machine generated */ -#include -#ifdef __cplusplus -extern "C" { -#endif -/* - * Class: org_postgresql_jdbc_ClientLogicImpl - * Method: linkClientLogicImpl - * Signature: ()[Ljava/lang/Object; - */ -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_linkClientLogicImpl - (JNIEnv *, jobject, jstring); - -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_setKmsInfoImpl(JNIEnv *env, - jobject java_object, jlong handle, jstring key, jstring info); - -/* - * Class: com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl - * Method: runQueryPreProcessImpl - * Signature: (JLjava/lang/String;)[Ljava/lang/Object; - */ -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_runQueryPreProcessImpl(JNIEnv *, - jobject, jlong, jstring); - -/* - * Class: com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl - * Method: runQueryPostProcessImpl - * Signature: (J)[Ljava/lang/Object; - */ -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_runQueryPostProcessImpl(JNIEnv *, - jobject, jlong); - -/* - * Class: com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl - * Method: runClientLogicImpl - * Signature: (JLjava/lang/String;I)[Ljava/lang/Object; - */ -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_runClientLogicImpl(JNIEnv *, jobject, - jlong, jstring, jint); - -/* - * Class: com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl - * Method: getRecordIDsImpl - * Signature: (JLjava/lang/String;I)[Ljava/lang/Object; - */ -JNIEXPORT jobjectArray JNICALL JJava_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_getRecordIDsImpl(JNIEnv *, - jobject, jlong, jstring, jint); - -/* - * Class: com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl - * Method: runClientLogic4RecordImpl - * Signature: (JLjava/lang/String;[I)[Ljava/lang/Object; - */ -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_runClientLogic4RecordImpl(JNIEnv *, - jobject, jlong, jstring, jintArray); - -/* - * Class: com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl - * Method: prepareQueryImpl - * Signature: (JLjava/lang/String;Ljava/lang/String;I)[Ljava/lang/Object; - */ -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_prepareQueryImpl(JNIEnv *, jobject, - jlong, jstring, jstring, jint); - -/* - * Class: com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl - * Method: replaceStatementParamsImpl - * Signature: (JLjava/lang/String;[Ljava/lang/String;)[Ljava/lang/Object; - */ -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_replaceStatementParamsImpl(JNIEnv *, - jobject, jlong, jstring, jobjectArray); - -/* - * Class: com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl - * Method: replaceErrorMessageImpl - * Signature: (JLjava/lang/String;)[Ljava/lang/Object; - */ -JNIEXPORT jobjectArray JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_replaceErrorMessageImpl(JNIEnv *, - jobject, jlong, jstring); - -/* - * Class: com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl - * Method: destroy - * Signature: (J)V - */ -JNIEXPORT void JNICALL Java_com_huawei_gauss200_jdbc_jdbc_ClientLogicImpl_destroy(JNIEnv *, jobject, jlong); - -#ifdef __cplusplus -} -#endif -#endif diff --git a/src/common/interfaces/libpq/jdbc/client_logic_jni/jni_string_convertor.h b/src/common/interfaces/libpq/jdbc/client_logic_jni/jni_string_convertor.h index 1a85d2dbe..81d85aa97 100644 --- a/src/common/interfaces/libpq/jdbc/client_logic_jni/jni_string_convertor.h +++ b/src/common/interfaces/libpq/jdbc/client_logic_jni/jni_string_convertor.h @@ -23,8 +23,9 @@ #ifndef JNI_STRING_CONVERTOR_H_ #define JNI_STRING_CONVERTOR_H_ - +#ifndef ENABLE_LITE_MODE #include +#endif class JNIStringConvertor { public: diff --git a/src/common/interfaces/libpq/jdbc/client_logic_jni/jni_util.h b/src/common/interfaces/libpq/jdbc/client_logic_jni/jni_util.h index 9c14392ad..eacc51de4 100644 --- a/src/common/interfaces/libpq/jdbc/client_logic_jni/jni_util.h +++ b/src/common/interfaces/libpq/jdbc/client_logic_jni/jni_util.h @@ -23,7 +23,9 @@ #ifndef JNI_UTIL_H_ #define JNI_UTIL_H_ +#ifndef ENABLE_LITE_MODE #include +#endif const int ARRAY_SIZE = 2; /* array size */ void set_no_error(JNIEnv *env, jclass objectClass, jobjectArray arrayObject); diff --git a/src/common/interfaces/libpq/jdbc/client_logic_jni/org_postgresql_jdbc_ClientLogicImpl.cpp b/src/common/interfaces/libpq/jdbc/client_logic_jni/org_postgresql_jdbc_ClientLogicImpl.cpp index fe56d9834..5a835a2a3 100644 --- a/src/common/interfaces/libpq/jdbc/client_logic_jni/org_postgresql_jdbc_ClientLogicImpl.cpp +++ b/src/common/interfaces/libpq/jdbc/client_logic_jni/org_postgresql_jdbc_ClientLogicImpl.cpp @@ -23,7 +23,9 @@ #include "client_logic_jni.h" #include "org_postgresql_jdbc_ClientLogicImpl.h" +#ifndef ENABLE_LITE_MODE #include +#endif #include #include "libpq-fe.h" #include "jni_logger.h" @@ -86,7 +88,7 @@ struct JniResult { return; } set_error(m_env, object_class, array, status->get_error_code(), - status->get_error_message() ? status->get_error_message() : ""); + status->get_error_message() ? status->get_error_message() : ""); } /* * * sets to return OK success @@ -104,7 +106,7 @@ struct JniResult { * @return true for success and false for failures */ bool convert_string(JNIStringConvertor *string_convertor, jstring java_str, DriverError *status, - const char *failure_message) const + const char *failure_message) const { if (string_convertor == NULL || java_str == NULL || status == NULL) { return false; @@ -196,7 +198,7 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_setKmsIn if (!result.from_handle((long)handle, &client_logic_jni, &status, "setKmsInfoImpl")) { return result.array; } - + if (!result.convert_string(&key, key_java, &status, "setKmsInfo dump kms info")) { return result.array; } @@ -249,7 +251,7 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_runQuery status.set_error(JNI_SYSTEM_ERROR_CODES::STRING_CREATION_FAILED); result.set_error_return(&status); JNI_LOG_ERROR("Java_org_postgresql_jdbc_ClientLogicImpl_runQueryPreProcessImpl error code:%d text:'%s'", - status.get_error_code(), status.get_error_message() ? status.get_error_message() : ""); + status.get_error_code(), status.get_error_message() ? status.get_error_message() : ""); return result.array; } const char *original_query_dup = client_logic->get_new_query(original_query.c_str); @@ -257,7 +259,7 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_runQuery status.set_error(JNI_SYSTEM_ERROR_CODES::STRING_CREATION_FAILED); result.set_error_return(&status); JNI_LOG_ERROR("Java_org_postgresql_jdbc_ClientLogicImpl_runQueryPreProcessImpl error code:%d text:'%s'", - status.get_error_code(), status.get_error_message() ? status.get_error_message() : ""); + status.get_error_code(), status.get_error_message() ? status.get_error_message() : ""); result.set_error_return(&status); return result.array; } @@ -280,6 +282,7 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_runQuery * @param env pointer to jvm * @param * @param handle pointer to ClientLogicJNI instance + * @param statament_name_java when issued for prepared statement contains the statement name, otherwise an empty string * @return java array * [0][0] - int status code - zero for success * [0][1] - string status description @@ -300,7 +303,7 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_runQuery if (JNI_TRUE == env->IsSameObject(jdbc_cl_impl, NULL)) { fprintf(stderr, "Client encryption run_post_query failed jobject %p was invalid\n", jdbc_cl_impl); } - if (!client_logic->run_post_query(&status)) { + if (!client_logic->run_post_query("", &status)) { JNI_LOG_ERROR("run_post_query failed: %ld, error code: %d error: '%s'", (long)handle, status.get_error_code(), status.get_error_message() ? status.get_error_message() : ""); result.set_error_return(&status); @@ -349,7 +352,7 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_runClien if (!client_logic->deprocess_value(data_to_process.c_str, data_type, &proccessed_data, length_output, &status)) { libpq_free(proccessed_data); JNI_LOG_ERROR("Java_org_postgresql_jdbc_ClientLogicImpl_runClientLogicImpl failed:error code: %d error: '%s'", - status.get_error_code(), status.get_error_message() ? status.get_error_message() : ""); + status.get_error_code(), status.get_error_message() ? status.get_error_message() : ""); result.set_error_return(&status); return result.array; } @@ -448,8 +451,8 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_runClien for (size_t index = 0; index < number_of_oids; ++index) { original_oids[index] = oids_java[index]; } - if (!client_logic->process_record_data(data_to_process.c_str, original_oids, &proccessed_data, - &is_client_logic, length_output, &status)) { + if (!client_logic->process_record_data(data_to_process.c_str, original_oids, number_of_oids, + &proccessed_data, &is_client_logic, length_output, &status)) { libpq_free(proccessed_data); JNI_LOG_ERROR( "Java_org_postgresql_jdbc_ClientLogicImpl_runClientLogic4RecordImpl failed:error code: %d error: '%s'", @@ -506,7 +509,7 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_prepareQ status.set_error(JNI_SYSTEM_ERROR_CODES::STRING_CREATION_FAILED); result.set_error_return(&status); JNI_LOG_ERROR("prepareQuery failed getting the query string error code:%d text:'%s'", status.get_error_code(), - status.get_error_message() ? status.get_error_message() : ""); + status.get_error_message() ? status.get_error_message() : ""); return result.array; } JNIStringConvertor statement_name; @@ -516,14 +519,14 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_prepareQ client_logic->set_jni_env_and_cl_impl(env, jdbc_cl_impl); if (!client_logic->preare_statement(original_query.c_str, statement_name.c_str, parameter_count, &status)) { JNI_LOG_ERROR("preare_statement call failed: %ld, error code: %d error: '%s'", (long)handle, - status.get_error_code(), status.get_error_message() ? status.get_error_message() : ""); + status.get_error_code(), status.get_error_message() ? status.get_error_message() : ""); result.set_error_return(&status); return result.array; } if (client_logic->get_statement_data() == NULL) { status.set_error(STATEMENT_DATA_EMPTY); JNI_LOG_ERROR("preare_statement get_statement_data call failed: %ld, error code: %d error: '%s'", (long)handle, - status.get_error_code(), status.get_error_message() ? status.get_error_message() : ""); + status.get_error_code(), status.get_error_message() ? status.get_error_message() : ""); result.set_error_return(&status); return result.array; } @@ -609,7 +612,7 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_replaceS client_logic->set_jni_env_and_cl_impl(env, jdbc_cl_impl); if (!client_logic->replace_statement_params(statement_name.c_str, param_values, parameter_count, &status)) { JNI_LOG_ERROR("replace_statement_params failed: %ld, error code: %d error: '%s'", (long)handle, - status.get_error_code(), status.get_error_message() ? status.get_error_message() : ""); + status.get_error_code(), status.get_error_message() ? status.get_error_message() : ""); result.set_error_return(&status); delete[] param_values; delete[] string_convertors; @@ -634,7 +637,7 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_replaceS } } - for (int i = 0; i < parameter_count; ++i) { + for (int i = 0; i < parameter_count && !convert_failure; ++i) { /* * rawValue in INSERT could be NULL or empty string, * we have recgonize it in preare_statement routine and make adjusted_param_values[idx] @@ -705,7 +708,7 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_replaceE } if (status.get_error_code() != 0) { JNI_LOG_ERROR("replaceErrorMessage failed: %ld, error code: %d error: '%s'", (long)handle, - status.get_error_code(), status.get_error_message() ? status.get_error_message() : ""); + status.get_error_code(), status.get_error_message() ? status.get_error_message() : ""); result.set_error_return(&status); return result.array; } @@ -726,6 +729,25 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_replaceE } return result.array; } +/** + * reloads the client logic cache + * @param env java environment + * @param jdbc_cl_impl pointer back to the Java client logic impl instance + * @param handle client logic instance handle + */ +JNIEXPORT void JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_reloadCacheImpl (JNIEnv *env, + jobject jdbc_cl_impl, jlong handle) +{ + ClientLogicJNI *client_logic = NULL; + DriverError status(0, ""); + if (!ClientLogicJNI::from_handle(handle, &client_logic, &status) || client_logic == NULL) { + JNI_LOG_DEBUG("reloadCacheImpl failed: %ld, error code: %d error: '%s'", (long)handle, status.get_error_code(), + status.get_error_message() ? status.get_error_message() : ""); + return; + } + client_logic->set_jni_env_and_cl_impl(env, jdbc_cl_impl); + client_logic->reload_cache(); +} /** * reloads the client logic cache ONLY if the server timestamp is later than the client timestamp @@ -759,7 +781,7 @@ JNIEXPORT void JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_destroy(JNIEnv * DriverError status(0, ""); if (!ClientLogicJNI::from_handle(handle, &client_logic, &status) || client_logic == NULL) { JNI_LOG_DEBUG("Destroy failed: %ld, error code: %d error: '%s'", (long)handle, status.get_error_code(), - status.get_error_message() ? status.get_error_message() : ""); + status.get_error_message() ? status.get_error_message() : ""); return; } else { delete client_logic; diff --git a/src/common/interfaces/libpq/jdbc/client_logic_jni/org_postgresql_jdbc_ClientLogicImpl.h b/src/common/interfaces/libpq/jdbc/client_logic_jni/org_postgresql_jdbc_ClientLogicImpl.h index 3feb3154d..e5acf85df 100644 --- a/src/common/interfaces/libpq/jdbc/client_logic_jni/org_postgresql_jdbc_ClientLogicImpl.h +++ b/src/common/interfaces/libpq/jdbc/client_logic_jni/org_postgresql_jdbc_ClientLogicImpl.h @@ -25,7 +25,9 @@ #define _Included_org_postgresql_jdbc_ClientLogicImpl /* DO NOT EDIT THIS FILE - it is machine generated */ +#ifndef ENABLE_LITE_MODE #include +#endif #ifdef __cplusplus extern "C" { #endif @@ -52,7 +54,7 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_runQuery /* * Class: org_postgresql_jdbc_ClientLogicImpl * Method: runQueryPostProcessImpl - * Signature: (J)[Ljava/lang/Object; + * Signature: (JLjava/lang/String;)[Ljava/lang/Object; */ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_runQueryPostProcessImpl(JNIEnv *, jobject, jlong); @@ -112,6 +114,23 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_replaceE */ JNIEXPORT void JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_reloadCacheIfNeededImpl (JNIEnv *, jobject, jlong); +/* + * Class: org_postgresql_jdbc_ClientLogicImpl + * Method: reloadCache + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_reloadCacheImpl + (JNIEnv *, jobject, jlong); + +/* + * Class: Java_org_postgresql_jdbc_ClientLogicImpl_reloadCacheIfNeededImpl + * Method: reloadCache + * Signature: (J)V + */ + +JNIEXPORT void JNICALL Java_org_postgresql_jdbc_ClientLogicImpl_reloadCacheIfNeededImpl + (JNIEnv *, jobject, jlong); + /* * Class: org_postgresql_jdbc_ClientLogicImpl * Method: destroy diff --git a/src/common/pl/plpgsql/src/gram.y b/src/common/pl/plpgsql/src/gram.y index 5f9c8d140..17580149d 100755 --- a/src/common/pl/plpgsql/src/gram.y +++ b/src/common/pl/plpgsql/src/gram.y @@ -5,7 +5,6 @@ * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * * * IDENTIFICATION @@ -44,6 +43,7 @@ #include "utils/pl_package.h" #include "utils/syscache.h" #include "utils/typcache.h" +#include "knl/knl_session.h" #include @@ -113,7 +113,9 @@ static void cword_is_not_variable(PLcword *cword, int location); static void current_token_is_not_variable(int tok); static void yylex_inparam(StringInfoData* func_inparam, int *params, - int * tok); + int * tok, + int *tableof_func_dno, + int *tableof_var_dno); static int yylex_outparam(char** fieldnames, int *varnos, int nfields, @@ -124,12 +126,12 @@ static int yylex_outparam(char** fieldnames, bool overload = false); static bool is_function(const char *name, bool is_assign, bool no_parenthesis, List* funcNameList = NULL); -static bool is_logfunction(ScanKeyword* keyword, bool no_parenthesis, const char *name); +static bool is_unreservedkeywordfunction(ScanKeyword* keyword, bool no_parenthesis, const char *name); static bool is_paren_friendly_datatype(TypeName *name); static void plpgsql_parser_funcname(const char *s, char **output, int numidents); static PLpgSQL_stmt *make_callfunc_stmt(const char *sqlstart, - int location, bool is_assign, bool eaten_first_token, List* funcNameList = NULL, int arrayFuncDno = -1); + int location, bool is_assign, bool eaten_first_token, List* funcNameList = NULL, int arrayFuncDno = -1, bool isCallFunc = false); static PLpgSQL_stmt *make_callfunc_stmt_no_arg(const char *sqlstart, int location, bool withsemicolon = false, List* funcNameList = NULL); static PLpgSQL_expr *read_sql_construct6(int until, int until2, @@ -208,8 +210,6 @@ static PLpgSQL_expr *read_cursor_args(PLpgSQL_var *cursor, int until, const char *expected); static List *read_raise_options(void); static int errstate = ERROR; -static bool pragma_autonomous; -static bool pragma_exception_init; static char* get_proc_str(int tok); static char* get_init_proc(int tok); static char* get_attrname(int tok); @@ -219,7 +219,7 @@ static void checkFuncName(List* funcname); static void IsInPublicNamespace(char* varname); static void SetErrorState(); static void AddNamespaceIfNeed(int dno, char* ident); -static void AddNamespaceIfPkgVar(const char* ident); +static void AddNamespaceIfPkgVar(const char* ident, IdentifierLookup save_IdentifierLookup); bool plpgsql_is_token_keyword(int tok); static void check_bulk_into_type(PLpgSQL_row* row); static void check_table_index(PLpgSQL_datum* datum, char* funcName); @@ -243,14 +243,21 @@ static int read_assignlist(bool is_push_back, int* token); static void plpgsql_cast_reference_list(List* idents, StringInfoData* ds, bool isPkgVar); static bool PkgVarNeedCast(List* idents); static void CastArrayNameToArrayFunc(StringInfoData* ds, List* idents, bool needDot = true); -static Oid get_table_index_type(PLpgSQL_datum* datum); +static Oid get_table_index_type(PLpgSQL_datum* datum, int *tableof_func_dno); +static int get_nest_tableof_layer(PLpgSQL_var *var, const char *typname, int errstate); static void SetErrorState(); static void CheckDuplicateFunctionName(List* funcNameList); +static void check_autonomous_nest_tablevar(PLpgSQL_var* var); #ifndef ENABLE_MULTIPLE_NODES static PLpgSQL_type* build_type_from_cursor_var(PLpgSQL_var* var); static bool checkAllAttrName(TupleDesc tupleDesc); +static void BuildForQueryVariable(PLpgSQL_expr* expr, PLpgSQL_row **row, PLpgSQL_rec **rec, + const char* refname, int lineno); #endif static Oid createCompositeTypeForCursor(PLpgSQL_var* var, PLpgSQL_expr* expr); +static void check_record_nest_tableof_index(PLpgSQL_datum* datum); +static void check_tableofindex_args(int tableof_var_dno, Oid argtype); +static bool need_build_row_for_func_arg(PLpgSQL_rec **rec, PLpgSQL_row **row, int out_arg_num, int all_arg, int *varnos, char *p_argmodes); %} %expect 0 @@ -373,6 +380,7 @@ static Oid createCompositeTypeForCursor(PLpgSQL_var* var, PLpgSQL_expr* expr); %type record_attr %type opt_save_exceptions +%type unreserved_keyword_func /* * Basic non-keyword token types. These are hard-wired into the core lexer. @@ -673,8 +681,8 @@ decl_sect : opt_block_label $$.label = $1; /* Remember variables declared in decl_stmts */ $$.n_initvars = plpgsql_add_initdatums(&($$.initvarnos)); - $$.isAutonomous = pragma_autonomous; - pragma_autonomous = false; + $$.isAutonomous = u_sess->plsql_cxt.pragma_autonomous; + u_sess->plsql_cxt.pragma_autonomous = false; } ; @@ -682,7 +690,7 @@ decl_start : K_DECLARE { /* Forget any variables created before block */ plpgsql_add_initdatums(NULL); - pragma_autonomous = false; + u_sess->plsql_cxt.pragma_autonomous = false; /* * Disable scanner lookup of identifiers while * we process the decl_stmts @@ -1024,6 +1032,22 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull } | K_TYPE decl_varname as_is K_TABLE K_OF decl_datatype decl_notnull ';' { +#ifdef ENABLE_MULTIPLE_NODES + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("table of is not support in distribute database"), + errdetail("N/A"), errcause("PL/SQL uses unsupported feature."), + erraction("Modify SQL statement according to the manual."))); +#endif + if (u_sess->attr.attr_sql.sql_compatibility != A_FORMAT) { + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("table of is only support in A-format database."), + errdetail("N/A"), errcause("PL/SQL uses unsupported feature."), + erraction("Modify SQL statement according to the manual."))); + } IsInPublicNamespace($2.name); $6->collectionType = PLPGSQL_COLLECTION_TABLE; @@ -1057,17 +1081,17 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull | K_TYPE decl_varname as_is K_TABLE K_OF table_var decl_notnull ';' { IsInPublicNamespace($2.name); - - PLpgSQL_type *var_type = ((PLpgSQL_var *)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[$6])->datatype; + PLpgSQL_var *check_var = (PLpgSQL_var *)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[$6]; + /* get and check nest tableof's depth */ + int depth = get_nest_tableof_layer(check_var, $2.name, errstate); PLpgSQL_type *nest_type = plpgsql_build_nested_datatype(); nest_type->tableOfIndexType = INT4OID; nest_type->collectionType = PLPGSQL_COLLECTION_TABLE; PLpgSQL_var* var = (PLpgSQL_var*)plpgsql_build_tableType($2.name, $2.lineno, nest_type, true); /* nested table type */ var->nest_table = (PLpgSQL_var *)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[$6]; - if (IS_PACKAGE) { - plpgsql_build_package_array_type($2.name, var_type->typoid, TYPCATEGORY_TABLEOF); - } + var->nest_layers = depth; + var->isIndexByTblOf = false; pfree_ext($2.name); } | K_TYPE decl_varname as_is K_TABLE K_OF record_var decl_notnull ';' @@ -1082,6 +1106,15 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull erraction("check define of table type"))); u_sess->plsql_cxt.have_error = true; #endif + if (u_sess->attr.attr_sql.sql_compatibility != A_FORMAT) { + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("table of is only support in A-format database."), + errdetail("N/A"), errcause("PL/SQL uses unsupported feature."), + erraction("Modify SQL statement according to the manual."))); + } + if (IS_ANONYMOUS_BLOCK) { ereport(errstate, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -1156,6 +1189,22 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull | K_TYPE decl_varname as_is K_TABLE K_OF decl_datatype decl_notnull K_INDEX K_BY decl_datatype ';' { +#ifdef ENABLE_MULTIPLE_NODES + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("table of is not support in distribute database"), + errdetail("N/A"), errcause("PL/SQL uses unsupported feature."), + erraction("Modify SQL statement according to the manual."))); +#endif + if (u_sess->attr.attr_sql.sql_compatibility != A_FORMAT) { + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("table of is only support in A-format database."), + errdetail("N/A"), errcause("PL/SQL uses unsupported feature."), + erraction("Modify SQL statement according to the manual."))); + } IsInPublicNamespace($2.name); $6->collectionType = PLPGSQL_COLLECTION_TABLE; @@ -1188,12 +1237,14 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull erraction("check define of table type"))); u_sess->plsql_cxt.have_error = true; } - plpgsql_build_tableType($2.name, $2.lineno, $6, true); + PLpgSQL_var* var = (PLpgSQL_var*)plpgsql_build_tableType($2.name, $2.lineno, $6, true); + var->isIndexByTblOf = true; + if (IS_PACKAGE) { if ($10->typoid == VARCHAROID) { plpgsql_build_package_array_type($2.name, $6->typoid, TYPCATEGORY_TABLEOF_VARCHAR); } else { - plpgsql_build_package_array_type($2.name, $6->typoid, TYPTYPE_TABLEOF); + plpgsql_build_package_array_type($2.name, $6->typoid, TYPCATEGORY_TABLEOF_INTEGER); } } pfree_ext($2.name); @@ -1210,21 +1261,17 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull errmsg("unsupported table index type"))); u_sess->plsql_cxt.have_error = true; } - PLpgSQL_type *var_type = ((PLpgSQL_var *)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[$6])->datatype; - var_type->tableOfIndexType = $10->typoid; + PLpgSQL_var *check_var = (PLpgSQL_var *)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[$6]; + /* get and check nest tableof's depth */ + int depth = get_nest_tableof_layer(check_var, $2.name, errstate); PLpgSQL_type *nest_type = plpgsql_build_nested_datatype(); nest_type->tableOfIndexType = $10->typoid; nest_type->collectionType = PLPGSQL_COLLECTION_TABLE; PLpgSQL_var* var = (PLpgSQL_var*)plpgsql_build_tableType($2.name, $2.lineno, nest_type, true); /* nested table type */ var->nest_table = (PLpgSQL_var *)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[$6]; - if (IS_PACKAGE) { - if ($10->typoid == VARCHAROID) { - plpgsql_build_package_array_type($2.name, $10->typoid, TYPCATEGORY_TABLEOF_VARCHAR); - } else { - plpgsql_build_package_array_type($2.name, $10->typoid, TYPTYPE_TABLEOF); - } - } + var->nest_layers = depth; + var->isIndexByTblOf = true; pfree_ext($2.name); } | K_TYPE decl_varname as_is K_TABLE K_OF record_var decl_notnull K_INDEX K_BY decl_datatype ';' @@ -1239,6 +1286,14 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull erraction("check define of table type"))); u_sess->plsql_cxt.have_error = true; #endif + if (u_sess->attr.attr_sql.sql_compatibility != A_FORMAT) { + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("table of is only support in A-format database."), + errdetail("N/A"), errcause("PL/SQL uses unsupported feature."), + erraction("Modify SQL statement according to the manual."))); + } if (IS_ANONYMOUS_BLOCK) { ereport(errstate, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -1263,12 +1318,14 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull u_sess->plsql_cxt.have_error = true; } newp->tableOfIndexType = $10->typoid; - plpgsql_build_tableType($2.name, $2.lineno, newp, true); + PLpgSQL_var *var = (PLpgSQL_var*)plpgsql_build_tableType($2.name, $2.lineno, newp, true); + var->isIndexByTblOf = true; + if (IS_PACKAGE) { if ($10->typoid == VARCHAROID) { - plpgsql_build_package_array_type($2.name, $10->typoid, TYPCATEGORY_TABLEOF_VARCHAR); + plpgsql_build_package_array_type($2.name, newp->typoid, TYPCATEGORY_TABLEOF_VARCHAR); } else { - plpgsql_build_package_array_type($2.name, $10->typoid, TYPTYPE_TABLEOF); + plpgsql_build_package_array_type($2.name, newp->typoid, TYPCATEGORY_TABLEOF_INTEGER); } } pfree_ext($2.name); @@ -1302,6 +1359,7 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull if (table_type->nest_table != NULL) { newp->nest_table = plpgsql_build_nested_variable(table_type->nest_table, $2, $1.name, $1.lineno); + newp->nest_layers = table_type->nest_layers; } pfree_ext($1.name); } @@ -1368,8 +1426,12 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull } | K_PRAGMA any_identifier ';' { - if (pg_strcasecmp($2, "autonomous_transaction") == 0) - pragma_autonomous = true; + if (pg_strcasecmp($2, "autonomous_transaction") == 0) { + u_sess->plsql_cxt.pragma_autonomous = true; + if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile !=NULL) { + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile->is_autonomous = true; + } + } else { elog(errstate, "invalid pragma"); u_sess->plsql_cxt.have_error = true; @@ -1378,7 +1440,6 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull | K_PRAGMA any_identifier '(' any_identifier ',' error_code ')' ';' { if (pg_strcasecmp($2, "exception_init") == 0) { - pragma_exception_init = true; if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { plpgsql_set_variable($4, $6); } @@ -2016,14 +2077,24 @@ label_stmt : stmt_assign { $$ = $1; } ; -stmt_perform : K_PERFORM expr_until_semi +stmt_perform : K_PERFORM { +#ifndef ENABLE_MULTIPLE_NODES + if (enable_out_param_override()) { + const char* message = "not support perform when behavior_compat_options=\"proc_outparam_override\""; + InsertErrorMessage(message, plpgsql_yylloc); + ereport(errstate, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("not support perform when behavior_compat_options=\"proc_outparam_override\""), + parser_errposition(@1))); + } +#endif + } expr_until_semi { PLpgSQL_stmt_perform *newp; - newp = (PLpgSQL_stmt_perform *)palloc0(sizeof(PLpgSQL_stmt_perform)); newp->cmd_type = PLPGSQL_STMT_PERFORM; newp->lineno = plpgsql_location_to_lineno(@1); - newp->expr = $2; + newp->expr = $3; newp->sqlString = plpgsql_get_curline_query(); $$ = (PLpgSQL_stmt *)newp; @@ -2292,6 +2363,9 @@ assign_var | T_TABLE_VAR assign_list { check_assignable($1.datum, @1); + if ($1.datum->dtype == PLPGSQL_DTYPE_VAR) { + check_autonomous_nest_tablevar((PLpgSQL_var*)$1.datum); + } if ($2 == NIL) { $$ = $1.dno; } else { @@ -2322,6 +2396,7 @@ assign_var { check_assignable($1.datum, @1); if ($2 == NIL) { + check_record_nest_tableof_index($1.datum); $$ = $1.dno; } else { if(IsA((Node*)linitial($2), A_Indices) && list_length($2) == 1) { @@ -2376,6 +2451,7 @@ assign_var plpgsql_adddatum((PLpgSQL_datum *) newptr); $$ = newptr->dno; } else { + check_record_nest_tableof_index($1.datum); $$ = $1.dno; } } @@ -2711,7 +2787,7 @@ for_control : for_variable K_IN $$ = (PLpgSQL_stmt *) newp; } - else if (tok == T_DATUM && + else if ((tok == T_DATUM || tok == T_PACKAGE_VARIABLE) && yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_VAR && ((PLpgSQL_var *) yylval.wdatum.datum)->datatype->typoid == REFCURSOROID) { @@ -2881,35 +2957,61 @@ for_control : for_variable K_IN newp->cmd_type = PLPGSQL_STMT_FORS; if ($1.rec) { +#ifndef ENABLE_MULTIPLE_NODES + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && IMPLICIT_FOR_LOOP_VARIABLE) { + BuildForQueryVariable(expr1, &newp->row, &newp->rec, $1.name, $1.lineno); + check_assignable((PLpgSQL_datum *)newp->rec ? + (PLpgSQL_datum *)newp->rec : (PLpgSQL_datum *)newp->row, @1); + } else { + newp->rec = $1.rec; + check_assignable((PLpgSQL_datum *) newp->rec, @1); + } +#else newp->rec = $1.rec; check_assignable((PLpgSQL_datum *) newp->rec, @1); +#endif } else if ($1.row) { +#ifndef ENABLE_MULTIPLE_NODES + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && IMPLICIT_FOR_LOOP_VARIABLE) { + BuildForQueryVariable(expr1, &newp->row, &newp->rec, $1.name, $1.lineno); + check_assignable((PLpgSQL_datum *)newp->rec ? + (PLpgSQL_datum *)newp->rec : (PLpgSQL_datum *)newp->row, @1); + } else { + newp->row = $1.row; + check_assignable((PLpgSQL_datum *) newp->row, @1); + } +#else newp->row = $1.row; check_assignable((PLpgSQL_datum *) newp->row, @1); +#endif } else if ($1.scalar) { +#ifndef ENABLE_MULTIPLE_NODES + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && IMPLICIT_FOR_LOOP_VARIABLE) { + BuildForQueryVariable(expr1, &newp->row, &newp->rec, $1.name, $1.lineno); + check_assignable((PLpgSQL_datum *)newp->rec ? + (PLpgSQL_datum *)newp->rec : (PLpgSQL_datum *)newp->row, @1); + } else { + /* convert single scalar to list */ + newp->row = make_scalar_list1($1.name, $1.scalar, $1.dno, $1.lineno, @1); + /* no need for check_assignable */ + } +#else /* convert single scalar to list */ newp->row = make_scalar_list1($1.name, $1.scalar, $1.dno, $1.lineno, @1); /* no need for check_assignable */ +#endif } else { -#ifndef ENABLE_MULTIPLE_NODES - if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { - TupleDesc desc = getCursorTupleDesc(expr1, true); - if (desc == NULL || desc->natts == 0 || checkAllAttrName(desc)) { - PLpgSQL_type dtype; - dtype.ttype = PLPGSQL_TTYPE_REC; - newp->rec = (PLpgSQL_rec *) - plpgsql_build_variable($1.name,$1.lineno, &dtype, true); - check_assignable((PLpgSQL_datum *) newp->rec, @1); - } else { - newp->row = build_row_from_tuple_desc($1.name, $1.lineno, desc); - check_assignable((PLpgSQL_datum*)newp->row, @1); - } +#ifndef ENABLE_MULTIPLE_NODES + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && ALLOW_PROCEDURE_COMPILE_CHECK) { + BuildForQueryVariable(expr1, &newp->row, &newp->rec, $1.name, $1.lineno); + check_assignable((PLpgSQL_datum *)newp->rec ? + (PLpgSQL_datum *)newp->rec : (PLpgSQL_datum *)newp->row, @1); } else { PLpgSQL_type dtype; dtype.ttype = PLPGSQL_TTYPE_REC; @@ -3178,6 +3280,9 @@ for_variable : T_DATUM int tok; $$.scalar = $1.datum; + if ($1.datum->dtype == PLPGSQL_DTYPE_VAR) { + check_autonomous_nest_tablevar((PLpgSQL_var*)$1.datum); + } $$.rec = NULL; $$.row = NULL; $$.dno = $1.dno; @@ -3622,7 +3727,7 @@ stmt_execsql : K_ALTER } } } - | K_LOG + | unreserved_keyword_func { int tok = -1; bool isCallFunc = false; @@ -3888,7 +3993,7 @@ stmt_execsql : K_ALTER | T_ARRAY_DELETE { StringInfoData sqlBuf; - Oid indexType = get_table_index_type(yylval.wdatum.datum); + Oid indexType = get_table_index_type(yylval.wdatum.datum, NULL); List* idents = yylval.wdatum.idents; int dno = yylval.wdatum.dno; initStringInfo(&sqlBuf); @@ -3897,9 +4002,16 @@ stmt_execsql : K_ALTER if (';' == tok) { - appendStringInfo(&sqlBuf, "array_delete("); - CastArrayNameToArrayFunc(&sqlBuf, idents, false); - appendStringInfo(&sqlBuf, ")"); + if (indexType == VARCHAROID || indexType == INT4OID) { + appendStringInfo(&sqlBuf, "array_indexby_delete("); + CastArrayNameToArrayFunc(&sqlBuf, idents, false); + appendStringInfo(&sqlBuf, ")"); + } else { + appendStringInfo(&sqlBuf, "array_delete("); + CastArrayNameToArrayFunc(&sqlBuf, idents, false); + appendStringInfo(&sqlBuf, ")"); + } + $$ = make_callfunc_stmt(sqlBuf.data, @1, false, false, NULL, dno); } else { if('(' == tok) { @@ -3907,82 +4019,137 @@ stmt_execsql : K_ALTER if (tok1 == ')') { int tok2 = yylex(); if (tok2 == ';') { - appendStringInfo(&sqlBuf, "array_delete("); - CastArrayNameToArrayFunc(&sqlBuf, idents, false); - appendStringInfo(&sqlBuf, ")"); + if (indexType == VARCHAROID || indexType == INT4OID) { + appendStringInfo(&sqlBuf, "array_indexby_delete("); + CastArrayNameToArrayFunc(&sqlBuf, idents, false); + appendStringInfo(&sqlBuf, ")"); + } else { + appendStringInfo(&sqlBuf, "array_delete("); + CastArrayNameToArrayFunc(&sqlBuf, idents, false); + appendStringInfo(&sqlBuf, ")"); + } + $$ = make_callfunc_stmt(sqlBuf.data, @1, false, false, NULL, dno); } else { plpgsql_push_back_token(tok); $$ = NULL; yyerror("syntax error"); } - } else if (tok1 != ICONST && tok1 != T_WORD && tok1 != SCONST && tok1 != T_DATUM) { + } else if (tok1 != ICONST && tok1 != T_WORD && tok1 != SCONST && tok1 != T_DATUM && tok1 != '-') { plpgsql_push_back_token(tok); $$ = NULL; yyerror("syntax error"); } else { - if (ICONST == tok1) { + if (tok1 == '-') { if (indexType == VARCHAROID) { - appendStringInfo(&sqlBuf, "array_varchar_delete("); - CastArrayNameToArrayFunc(&sqlBuf, idents); - appendStringInfo(&sqlBuf, "\'%d\')", yylval.ival); - } else if (indexType == INT4OID) { - appendStringInfo(&sqlBuf, "array_integer_delete("); - CastArrayNameToArrayFunc(&sqlBuf, idents); - appendStringInfo(&sqlBuf, "\'%d\')", yylval.ival); - } else { - appendStringInfo(&sqlBuf, "array_deleteidx("); - CastArrayNameToArrayFunc(&sqlBuf, idents); - appendStringInfo(&sqlBuf, "%d)", yylval.ival); - } - } else if (SCONST == tok1) { - if (indexType == VARCHAROID) { - appendStringInfo(&sqlBuf, "array_varchar_delete("); - CastArrayNameToArrayFunc(&sqlBuf, idents); - appendStringInfo(&sqlBuf, "\'%s\')", yylval.str); - } else if (indexType == INT4OID) { - appendStringInfo(&sqlBuf, "array_integer_delete("); - CastArrayNameToArrayFunc(&sqlBuf, idents); - appendStringInfo(&sqlBuf, "\'%s\')", yylval.str); - } else { - appendStringInfo(&sqlBuf, "array_deleteidx("); - CastArrayNameToArrayFunc(&sqlBuf, idents); - appendStringInfo(&sqlBuf, "%s)", yylval.str); - } - - } else if (T_WORD == tok1) { - if (indexType == VARCHAROID) { - appendStringInfo(&sqlBuf, "array_varchar_delete("); - CastArrayNameToArrayFunc(&sqlBuf, idents); - appendStringInfo(&sqlBuf, "\'%s\')", yylval.word.ident); - } else if (indexType == INT4OID) { - appendStringInfo(&sqlBuf, "array_integer_delete("); - CastArrayNameToArrayFunc(&sqlBuf, idents); - appendStringInfo(&sqlBuf, "\'%s\')", yylval.word.ident); - } else { - appendStringInfo(&sqlBuf, "array_deleteidx("); - CastArrayNameToArrayFunc(&sqlBuf, idents); - appendStringInfo(&sqlBuf, "%s)", yylval.word.ident); - } - - } else { - char *datName = NameOfDatum(&yylval.wdatum); - if (indexType == VARCHAROID) { - appendStringInfo(&sqlBuf, "array_varchar_delete("); - CastArrayNameToArrayFunc(&sqlBuf, idents); - appendStringInfo(&sqlBuf, "\'%s\')", datName); - } else if (indexType == INT4OID) { - appendStringInfo(&sqlBuf, "array_integer_delete("); - CastArrayNameToArrayFunc(&sqlBuf, idents); - appendStringInfo(&sqlBuf, "\'%s\')", datName); - } else { - appendStringInfo(&sqlBuf, "array_deleteidx("); - CastArrayNameToArrayFunc(&sqlBuf, idents); - appendStringInfo(&sqlBuf, "%s)", datName); + yyerror("syntax error"); } - pfree_ext(datName); + int tok3 = yylex(); + if (tok3 != ICONST && tok3 != T_WORD && tok3 != T_DATUM) { + yyerror("syntax error"); + } + + if (ICONST == tok3) { + if (indexType == INT4OID) { + appendStringInfo(&sqlBuf, "array_integer_delete("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "\'-%d\')", yylval.ival); + } else { + appendStringInfo(&sqlBuf, "array_deleteidx("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "-%d)", yylval.ival); + } + } else if (T_WORD == tok3) { + if (indexType == INT4OID) { + appendStringInfo(&sqlBuf, "array_integer_delete("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "\'-%s\')", yylval.word.ident); + } else { + appendStringInfo(&sqlBuf, "array_deleteidx("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "-%s)", yylval.word.ident); + } + + } else { + char *datName = NameOfDatum(&yylval.wdatum); + if (indexType == INT4OID) { + appendStringInfo(&sqlBuf, "array_integer_delete("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "\'-%s\')", datName); + } else { + appendStringInfo(&sqlBuf, "array_deleteidx("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "-%s)", datName); + } + + pfree_ext(datName); + } + } else { + if (ICONST == tok1) { + if (indexType == VARCHAROID) { + appendStringInfo(&sqlBuf, "array_varchar_delete("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "\'%d\')", yylval.ival); + } else if (indexType == INT4OID) { + appendStringInfo(&sqlBuf, "array_integer_delete("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "\'%d\')", yylval.ival); + } else { + appendStringInfo(&sqlBuf, "array_deleteidx("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "%d)", yylval.ival); + } + } else if (SCONST == tok1) { + if (indexType == VARCHAROID) { + appendStringInfo(&sqlBuf, "array_varchar_delete("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "\'%s\')", yylval.str); + } else if (indexType == INT4OID) { + appendStringInfo(&sqlBuf, "array_integer_delete("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "\'%s\')", yylval.str); + } else { + appendStringInfo(&sqlBuf, "array_deleteidx("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "%s)", yylval.str); + } + + } else if (T_WORD == tok1) { + if (indexType == VARCHAROID) { + appendStringInfo(&sqlBuf, "array_varchar_delete("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "\'%s\')", yylval.word.ident); + } else if (indexType == INT4OID) { + appendStringInfo(&sqlBuf, "array_integer_delete("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "\'%s\')", yylval.word.ident); + } else { + appendStringInfo(&sqlBuf, "array_deleteidx("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "%s)", yylval.word.ident); + } + + } else { + char *datName = NameOfDatum(&yylval.wdatum); + if (indexType == VARCHAROID) { + appendStringInfo(&sqlBuf, "array_varchar_delete("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "\'%s\')", datName); + } else if (indexType == INT4OID) { + appendStringInfo(&sqlBuf, "array_integer_delete("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "\'%s\')", datName); + } else { + appendStringInfo(&sqlBuf, "array_deleteidx("); + CastArrayNameToArrayFunc(&sqlBuf, idents); + appendStringInfo(&sqlBuf, "%s)", datName); + } + + pfree_ext(datName); + } } + int tok2 = yylex(); if (tok2 != ')') { plpgsql_push_back_token(tok); @@ -4159,7 +4326,6 @@ stmt_open : K_OPEN cursor_variable read_sql_expression2(K_USING, ';', "USING or ;", &endtoken); - /* If we found "USING", collect argument(s) */ if(K_USING == endtoken) { @@ -4176,6 +4342,13 @@ stmt_open : K_OPEN cursor_variable yyerror("syntax error"); } } + if (newp->query != NULL && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && ALLOW_PROCEDURE_COMPILE_CHECK) { + (void)getCursorTupleDesc(newp->query, false, true); + } + else if (newp->dynquery != NULL && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && ALLOW_PROCEDURE_COMPILE_CHECK) + { + (void)getCursorTupleDesc(newp->dynquery, false, true); + } } else { @@ -4456,6 +4629,20 @@ cursor_variable : T_DATUM ((PLpgSQL_var *) $1.datum)->refname), parser_errposition(@1))); } + if (((PLpgSQL_var *) $1.datum)->ispkg) { + if ((u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile != NULL && + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile->is_autonomous) || + u_sess->is_autonomous_session) { + const char* message = + "package cursor referenced in autonomous procedure is not supported yet"; + InsertErrorMessage(message, plpgsql_yylloc); + ereport(errstate, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cursor referenced by \"%s\" in autonomous procedure is not supported yet", + $1.ident), + parser_errposition(@1))); + } + } $$ = $1.dno; } | T_PACKAGE_VARIABLE @@ -4478,6 +4665,28 @@ cursor_variable : T_DATUM ((PLpgSQL_var *) $1.datum)->refname), parser_errposition(@1))); } + if (list_length($1.idents) == 3) { + const char* message = + "cursor referenced in schema.package.cursor format is not supported yet"; + InsertErrorMessage(message, plpgsql_yylloc); + ereport(errstate, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cursor referenced by \"%s\" is not supported yet", + $1.ident), + parser_errposition(@1))); + } + if ((u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile != NULL && + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile->is_autonomous) || + u_sess->is_autonomous_session) { + const char* message = + "package cursor referenced in autonomous procedure is not supported yet"; + InsertErrorMessage(message, plpgsql_yylloc); + ereport(errstate, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cursor referenced by \"%s\" in autonomous procedure is not supported yet", + $1.ident), + parser_errposition(@1))); + } $$ = $1.dno; } | T_WORD @@ -4653,7 +4862,7 @@ expr_until_semi : if (isCallFunc) { - stmt = make_callfunc_stmt(name, yylloc, true, false, funcNameList); + stmt = make_callfunc_stmt(name, yylloc, true, false, funcNameList, -1, isCallFunc); if (PLPGSQL_STMT_EXECSQL == stmt->cmd_type) expr = ((PLpgSQL_stmt_execsql*)stmt)->sqlstmt; else if (PLPGSQL_STMT_PERFORM == stmt->cmd_type) @@ -4746,6 +4955,54 @@ any_identifier : T_WORD } ; +unreserved_keyword_func : + K_ABSOLUTE + | K_ALIAS + | K_BACKWARD + | K_CONSTANT + | K_CURRENT + | K_DEBUG + | K_DETAIL + | K_DUMP + | K_ERRCODE + | K_ERROR + | K_EXCEPTIONS + | K_FIRST + | K_FORWARD + | K_HINT + | K_INDEX + | K_INFO + | K_LAST + | K_LOG + | K_MESSAGE + | K_MESSAGE_TEXT + | K_MULTISET + | K_NEXT + | K_NO + | K_NOTICE + | K_OPTION + | K_PACKAGE + | K_INSTANTIATION + | K_PG_EXCEPTION_CONTEXT + | K_PG_EXCEPTION_DETAIL + | K_PG_EXCEPTION_HINT + | K_QUERY + | K_RECORD + | K_RELATIVE + | K_RESULT_OID + | K_RETURNED_SQLSTATE + | K_REVERSE + | K_ROW_COUNT + | K_SCROLL + | K_SLICE + | K_STACKED + | K_SYS_REFCURSOR + | K_USE_COLUMN + | K_USE_VARIABLE + | K_VARIABLE_CONFLICT + | K_VARRAY + | K_WARNING + ; unreserved_keyword : K_ABSOLUTE | K_ALIAS @@ -4907,7 +5164,9 @@ current_token_is_not_variable(int tok) static void yylex_inparam(StringInfoData* func_inparam, int *nparams, - int * tok) + int * tok, + int *tableof_func_dno, + int *tableof_var_dno) { PLpgSQL_expr * expr = NULL; @@ -4935,6 +5194,13 @@ yylex_inparam(StringInfoData* func_inparam, expr = read_sql_construct(',', ')', 0, ",|)", "", true, false, false, NULL, tok); } + if (*tableof_func_dno != -1 && *tableof_func_dno != expr->tableof_func_dno) { + yyerror("do not support more than 2 table of index by variables call functions in function"); + } else { + *tableof_func_dno = expr->tableof_func_dno; + } + + *tableof_var_dno = expr->tableof_var_dno; /* * handle the problem that the function * arguments can only be variable. After revising, the arguments can be any @@ -5280,7 +5546,7 @@ static int get_func_out_arg_num(char* p_argmodes, int all_arg) * Notes : */ static PLpgSQL_stmt * -make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eaten_first_token, List* funcNameList, int arrayFuncDno) +make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eaten_first_token, List* funcNameList, int arrayFuncDno, bool isCallFunc) { int nparams = 0; int nfields = 0; @@ -5296,10 +5562,12 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate int pos_outer = 0; /* pos_inner is the position got by its real postion in function invoke */ int pos_inner = -1; - int varnos[FUNC_MAX_ARGS]; + int varnos[FUNC_MAX_ARGS] = {-1}; bool namedarg[FUNC_MAX_ARGS]; char* namedargnamses[FUNC_MAX_ARGS]; char *fieldnames[FUNC_MAX_ARGS]; + bool outParamInvalid = false; + bool is_plpgsql_func_with_outparam = false; List *funcname = NIL; PLpgSQL_row *row = NULL; @@ -5315,10 +5583,12 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate int j = 0; int placeholders = 0; char *quoted_sqlstart = NULL; + bool is_have_tableof_index_func = false; MemoryContext oldCxt = NULL; bool multi_func = false; const char *varray_delete = "array_delete(\""; + const char *varray_indexby_delete = "array_indexby_delete(\""; const char *varray_deleteidx = "array_deleteidx(\""; const char *varray_deletevarchar = "array_varchar_delete(\""; const char *varray_deleteinteger = "array_integer_delete(\""; @@ -5331,6 +5601,7 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate cp[2] = NULL; if (sqlstart != NULL && (strncmp(sqlstart, varray_delete, strlen(varray_delete)) == 0 + || strncmp(sqlstart, varray_indexby_delete, strlen(varray_indexby_delete)) == 0 || strncmp(sqlstart, varray_deleteidx, strlen(varray_deleteidx)) == 0 || strncmp(sqlstart, varray_extend, strlen(varray_extend)) == 0 || strncmp(sqlstart, varray_trim, strlen(varray_trim)) == 0 @@ -5340,12 +5611,17 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate if (strncmp(sqlstart, varray_delete, strlen(varray_delete)) == 0) { chIdx = strlen(varray_delete); + } else if (strncmp(sqlstart, varray_indexby_delete, strlen(varray_indexby_delete)) == 0) { + chIdx = strlen(varray_indexby_delete); + is_have_tableof_index_func = true; } else if (strncmp(sqlstart, varray_deleteidx, strlen(varray_deleteidx)) == 0) { chIdx = strlen(varray_deleteidx); } else if (strncmp(sqlstart, varray_deletevarchar, strlen(varray_deletevarchar)) == 0) { chIdx = strlen(varray_deletevarchar); + is_have_tableof_index_func = true; } else if (strncmp(sqlstart, varray_deleteinteger, strlen(varray_deleteinteger)) == 0) { chIdx = strlen(varray_deleteinteger); + is_have_tableof_index_func = true; } else if (strncmp(sqlstart, varray_trim, strlen(varray_trim)) == 0) { chIdx = strlen(varray_trim); } else { @@ -5360,13 +5636,15 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate return NULL; } - expr = (PLpgSQL_expr *)palloc(sizeof(PLpgSQL_expr)); + expr = (PLpgSQL_expr *)palloc0(sizeof(PLpgSQL_expr)); expr->dtype = PLPGSQL_DTYPE_EXPR; expr->query = pstrdup(func_inparas.data); expr->plan = NULL; expr->paramnos = NULL; expr->ns = plpgsql_ns_top(); expr->idx = (uint32)-1; + expr->out_param_dno = -1; + expr->is_have_tableof_index_func = is_have_tableof_index_func; PLpgSQL_stmt_assign *perform = (PLpgSQL_stmt_assign*)palloc0(sizeof(PLpgSQL_stmt_assign)); perform->cmd_type = PLPGSQL_STMT_ASSIGN;; @@ -5472,6 +5750,10 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate } plpgsql_push_back_token(tok); + if (isCallFunc && is_function_with_plpgsql_language_and_outparam(clist->oid)) { + is_assign = false; + is_plpgsql_func_with_outparam = true; + } /* has any "out" parameters, user execsql stmt */ if (is_assign) { @@ -5482,6 +5764,8 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate appendStringInfoString(&func_inparas, "CALL "); } + int tableof_func_dno = -1; + int tableof_var_dno = -1; /* * Properly double-quote schema name and function name to handle uppercase * and special characters when making 'CALL func_name;' statement. @@ -5527,6 +5811,7 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate * search the p_argnames to match the right argname with current arg, * so the real postion of current arg can be determined */ + bool argMatch = false; for (j = 0; j < narg; j++) { if ('o' == p_argmodes[j] || 'b' == p_argmodes[j]) @@ -5540,7 +5825,8 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate switch (p_argmodes[j]) { case 'i': - yylex_inparam(&func_inparas, &nparams, &tok); + yylex_inparam(&func_inparas, &nparams, &tok, &tableof_func_dno, &tableof_var_dno); + check_tableofindex_args(tableof_var_dno, p_argtypes[j]); break; case 'o': case 'b': @@ -5571,12 +5857,17 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate /* pass => */ (void)yylex(); yylex_outparam(fieldnames, varnos, pos_outer, &row, &rec, &tok, true); - if (T_DATUM == tok || T_VARRAY_VAR == tok || T_TABLE_VAR == tok || T_PACKAGE_VARIABLE == tok) + if ((!enable_out_param_override() && p_argmodes[j] == 'b') + || T_DATUM == tok || T_VARRAY_VAR == tok + || T_TABLE_VAR == tok || T_PACKAGE_VARIABLE == tok) { nfields++; plpgsql_push_back_token(tok); /* don't need inparam add ',' */ - yylex_inparam(&func_inparas, NULL, &tok); + yylex_inparam(&func_inparas, NULL, &tok, &tableof_func_dno, &tableof_var_dno); + check_tableofindex_args(tableof_var_dno, p_argtypes[j]); + } else { + outParamInvalid = true; } nparams++; break; @@ -5587,9 +5878,24 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate (errcode(ERRCODE_UNEXPECTED_NODE_STATE), errmsg("parameter mode %c doesn't exist", p_argmodes[j]))); } + argMatch = true; break; } } + if (!argMatch) { + const char* message = "invoking function error,check function"; + InsertErrorMessage(message, plpgsql_yylloc); + ereport(errstate, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("when invoking function %s, no argments match \"%s\"", sqlstart, argname.data))); + } + if (outParamInvalid) { + const char* message = "invoking function error,check function"; + InsertErrorMessage(message, plpgsql_yylloc); + ereport(errstate, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("when invoking function %s, no destination for argments \"%s\"", sqlstart, argname.data))); + } } else { @@ -5601,7 +5907,8 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate if (T_PLACEHOLDER == tok) placeholders++; plpgsql_push_back_token(tok); - yylex_inparam(&func_inparas, &nparams, &tok); + yylex_inparam(&func_inparas, &nparams, &tok, &tableof_func_dno, &tableof_var_dno); + check_tableofindex_args(tableof_var_dno, p_argtypes[i]); break; case 'o': /* @@ -5610,7 +5917,7 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate */ if (is_assign) { - if (T_DATUM == tok || T_PLACEHOLDER == tok) + if (T_DATUM == tok || T_VARRAY_VAR == tok || T_TABLE_VAR == tok || T_PACKAGE_VARIABLE == tok || T_PLACEHOLDER == tok) { plpgsql_push_back_token(tok); (void)read_sql_expression2(',', ')', ",|)", &tok); @@ -5624,27 +5931,29 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate { placeholders++; } - else if (T_DATUM == tok || T_VARRAY_VAR == tok || T_TABLE_VAR == tok || T_PACKAGE_VARIABLE == tok) + else if (T_DATUM == tok || T_VARRAY_VAR == tok || T_TABLE_VAR == tok || T_PACKAGE_VARIABLE == tok || T_PLACEHOLDER == tok) { nfields++; + } else { + outParamInvalid = true; } plpgsql_push_back_token(tok); yylex_outparam(fieldnames, varnos, pos_inner, &row, &rec, &tok, true); plpgsql_push_back_token(tok); - yylex_inparam(&func_inparas, &nparams, &tok); + yylex_inparam(&func_inparas, &nparams, &tok, &tableof_func_dno, &tableof_var_dno); + check_tableofindex_args(tableof_var_dno, p_argtypes[i]); break; case 'b': if (is_assign) { - /* - * if the function is in an assign expr, read the inout - * parameters. - */ - if (T_DATUM == tok) + if (!enable_out_param_override() + || (enable_out_param_override() + && (T_DATUM == tok || T_VARRAY_VAR == tok + || T_TABLE_VAR == tok || T_PACKAGE_VARIABLE == tok))) { plpgsql_push_back_token(tok); - yylex_inparam(&func_inparas, &nparams, &tok); + yylex_inparam(&func_inparas, &nparams, &tok, &tableof_func_dno, &tableof_var_dno); } else yyerror("syntax error"); @@ -5657,11 +5966,14 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate yylex_outparam(fieldnames, varnos, pos_inner, &row, &rec, &tok, true); if (T_DATUM == tok || T_VARRAY_VAR == tok || T_TABLE_VAR == tok || T_PACKAGE_VARIABLE == tok) { nfields++; + } else { + outParamInvalid = true; } plpgsql_push_back_token(tok); - yylex_inparam(&func_inparas, &nparams, &tok); + yylex_inparam(&func_inparas, &nparams, &tok, &tableof_func_dno, &tableof_var_dno); + check_tableofindex_args(tableof_var_dno, p_argtypes[i]); break; default: const char* message = "parameter mode not exist"; @@ -5670,6 +5982,14 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate (errcode(ERRCODE_UNEXPECTED_NODE_STATE), errmsg("parameter mode %c doesn't exist", p_argmodes[i]))); } + if (is_plpgsql_func_with_outparam && outParamInvalid) { + const char* message = "invoking function error,check function"; + InsertErrorMessage(message, plpgsql_yylloc); + ereport(errstate, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("when invoking function %s, no destination for argments \"%s\"", sqlstart, argname.data))); + } + } if (')' == tok) @@ -5705,7 +6025,7 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate placeholders++; plpgsql_push_back_token(tok); - yylex_inparam(&func_inparas, &nparams, &tok); + yylex_inparam(&func_inparas, &nparams, &tok, &tableof_func_dno, &tableof_var_dno); if (')' == tok) { @@ -5750,6 +6070,31 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate (void)yylex(); int loc = yylex_outparam(fieldnames, varnos, nfields, &row, &rec, &tok, false, true); + if (tok == '(') { + int brackets = 1; + int syntaxErr = false; + // ignore record const value. + while(brackets != 0 && !syntaxErr) { + tok = yylex(); + switch(tok) { + case '(': + brackets++; + break; + case ')': + brackets--; + break; + case 0: + syntaxErr = true; + break; + default: + ; + } + } + + if (syntaxErr) { + yyerror("Record/Composite type format error, brackets does't match."); + } + } tok = yylex(); int curloc = yylloc; plpgsql_push_back_token(tok); @@ -5763,7 +6108,7 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate { yylex_outparam(fieldnames, varnos, nfields, &row, &rec, &tok, true, true); plpgsql_push_back_token(tok); - yylex_inparam(&func_inparas, &nparams, &tok); + yylex_inparam(&func_inparas, &nparams, &tok, &tableof_func_dno, &tableof_var_dno); namedarg[nfields] = false; namedargnamses[nfields] = NULL; } @@ -5816,6 +6161,8 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate expr->paramnos = NULL; expr->ns = plpgsql_ns_top(); expr->idx = (uint32)-1; + expr->out_param_dno = -1; + expr->is_have_tableof_index_func = tableof_func_dno != -1 ? true : false; if (multi_func) { @@ -5872,6 +6219,10 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate narg = procStruct->pronargs; ReleaseSysCache(proctup); + for (int k = 0; k < all_arg; k++) { + check_tableofindex_args(varnos[k], p_argtypes[k]); + } + /* if there is no "out" parameters ,use perform stmt,others use exesql */ if ((0 == all_arg || NULL == p_argmodes)) { @@ -5886,22 +6237,13 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate else if (all_arg >= narg) { int out_arg_num = get_func_out_arg_num(p_argmodes, all_arg); - /* In multiset function, row maybe not the out arg */ - if (all_arg > 1 && out_arg_num == 1 && row != NULL) { - for (int k = 0; k < all_arg; k++) - { - if (p_argmodes[k] == 'i' && row->dno == varnos[k]) { - row = NULL; - break; - } - } - } - /* out arg number > 1 should build a row */ - if ((NULL == rec && NULL == row) || out_arg_num > 1) + bool need_build_row = need_build_row_for_func_arg(&rec, &row, out_arg_num, all_arg, varnos, p_argmodes); + if (need_build_row) { int new_nfields = 0; int i = 0, j = 0; + bool varnosInvalid = false; for (i = 0; i < all_arg; i++) { if (p_argmodes[i] == 'i') @@ -5931,6 +6273,8 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate row->fieldnames[j] = fieldnames[i]; row->varnos[j] = varnos[i]; j++; + } else { + varnosInvalid = true; } } else @@ -5979,15 +6323,25 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate (errcode(ERRCODE_SYNTAX_ERROR), errmsg("parameter \"%s\" is assigned more than once", row->fieldnames[pos_outer]))); } - if (varnos[i] >= 0) { row->fieldnames[pos_outer] = fieldnames[i]; row->varnos[pos_outer] = varnos[i]; + } else { + varnosInvalid = true; } } } - plpgsql_adddatum((PLpgSQL_datum *)row); + if (varnosInvalid) { + pfree_ext(row->refname); + pfree_ext(row->fieldnames); + pfree_ext(row->varnos); + pfree_ext(row); + } else if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + expr->out_param_dno = plpgsql_adddatum((PLpgSQL_datum *)row); + } else { + plpgsql_adddatum((PLpgSQL_datum *)row); + } } PLpgSQL_stmt_execsql * execsql = NULL; @@ -6021,8 +6375,7 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate * if have out parameters and no row/rec, generate row type to store the results. * if have row/rec and more than 2 out parameters , also generate row type. */ - if ((nfields && NULL == rec && NULL == row) || - nfields > 1) + if (((nfields && NULL == rec && NULL == row) || nfields > 1) && !outParamInvalid) { row = (PLpgSQL_row*)palloc0(sizeof(PLpgSQL_row)); row->dtype = PLPGSQL_DTYPE_ROW; @@ -6038,7 +6391,17 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate row->fieldnames[nfields] = fieldnames[nfields]; row->varnos[nfields] = varnos[nfields]; } - plpgsql_adddatum((PLpgSQL_datum *)row); + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + expr->out_param_dno = plpgsql_adddatum((PLpgSQL_datum *)row); + } else { + plpgsql_adddatum((PLpgSQL_datum *)row); + } + } + /* has invalid out param, set it to null */ + if ((rec != NULL || row != NULL) && outParamInvalid) + { + rec = NULL; + row = NULL; } /* if there is no "out" parameters ,use perform stmt,others use exesql */ @@ -6076,6 +6439,19 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate return NULL; } + +static bool +is_unreserved_keyword_func(const char *name) +{ + int i; + char *func_name[] = {"absolute", "alias", "backward", "constant","current", "debug", "detail","dump" ,"errcode","error", "exceptions", "first", "forward", "hint", "index", "info", "last", "log","message", "message_text", "multiset", "next", "no", "notice", "option", "package", "instantiation","pg_exception_context", "pg_exception_detail", "pg_exception_hint", "query", "record", "relative", "result_oid", "returned_sqlstate", "reverse","row_count","sroll", "slice", "stacked","sys_refcursor","use_column","use_variable","variable_conflict","varray","warning"}; + for (i = 0; i <= 45; i++) { + if (pg_strcasecmp(func_name[i], name) == 0) { + return true; + } + } + return false; +} /* * Brief : check if it is an log function invoke * Description : @@ -6086,9 +6462,9 @@ make_callfunc_stmt(const char *sqlstart, int location, bool is_assign, bool eate * Notes : */ static bool -is_logfunction(ScanKeyword* keyword, bool no_parenthesis, const char *name) +is_unreservedkeywordfunction(ScanKeyword* keyword, bool no_parenthesis, const char *name) { - if (keyword && no_parenthesis && UNRESERVED_KEYWORD == keyword->category && pg_strcasecmp("log", name) != 0) + if (keyword && no_parenthesis && UNRESERVED_KEYWORD == keyword->category && !is_unreserved_keyword_func(name)) return false; else return true; @@ -6110,6 +6486,7 @@ is_function(const char *name, bool is_assign, bool no_parenthesis, List* funcNam List *funcname = NIL; FuncCandidateList clist = NULL; bool have_outargs = false; + bool have_inoutargs = false; char **p_argnames = NULL; char *p_argmodes = NULL; Oid *p_argtypes = NULL; @@ -6145,7 +6522,7 @@ is_function(const char *name, bool is_assign, bool no_parenthesis, List* funcNam if (keyword && RESERVED_KEYWORD == keyword->category) return false; /* function name can not be unreserved keyword when no-parenthesis function is called. except log function*/ - if (!is_logfunction(keyword, no_parenthesis, cp[0])) + if (!is_unreservedkeywordfunction(keyword, no_parenthesis, cp[0])) { return false; } @@ -6208,10 +6585,20 @@ is_function(const char *name, bool is_assign, bool no_parenthesis, List* funcNam break; } } + for (i = 0; i < narg; i++) + { + if ('b' == p_argmodes[i]) + { + have_inoutargs = true; + break; + } + } } ReleaseSysCache(proctup); } + if (have_inoutargs && is_function_with_plpgsql_language_and_outparam(clist->oid)) + return true; if (!have_outargs && is_assign) return false; @@ -6265,10 +6652,20 @@ is_function(const char *name, bool is_assign, bool no_parenthesis, List* funcNam break; } } + for (i = 0; i < narg; i++) + { + if ('b' == p_argmodes[i]) + { + have_inoutargs = true; + break; + } + } } ReleaseSysCache(proctup); } + if (have_inoutargs && is_function_with_plpgsql_language_and_outparam(clist->oid)) + return true; if (!have_outargs && is_assign) return false; @@ -6324,7 +6721,8 @@ static bool is_paren_friendly_datatype(TypeName *name) char category = TYPCATEGORY_INVALID; get_type_category_preferred(typoid, &category, &preferred); if (category != TYPCATEGORY_ARRAY && category != TYPCATEGORY_COMPOSITE - && category != TYPCATEGORY_TABLEOF && category != TYPCATEGORY_TABLEOF_VARCHAR) { + && category != TYPCATEGORY_TABLEOF && category != TYPCATEGORY_TABLEOF_VARCHAR + && category != TYPCATEGORY_TABLEOF_INTEGER) { return false; } return true; @@ -6650,6 +7048,11 @@ read_sql_construct6(int until, int loc = 0; int curloc = 0; int brack_cnt = 0; + /* mark if there are 2 table of index by var call functions in an expr */ + int tableof_func_dno = -1; + int tableof_var_dno = -1; + bool is_have_tableof_index_var = false; + List* tableof_index_list = NIL; PLpgSQL_compile_context* curr_compile = u_sess->plsql_cxt.curr_compile_context; /* @@ -6782,7 +7185,7 @@ read_sql_construct6(int until, case T_VARRAY_VAR: idents = yylval.wdatum.idents; if (idents == NIL) { - AddNamespaceIfPkgVar(yylval.wdatum.ident); + AddNamespaceIfPkgVar(yylval.wdatum.ident, save_IdentifierLookup); } tok = yylex(); if (tok == '(' || tok == '[') { @@ -6801,7 +7204,7 @@ read_sql_construct6(int until, } case T_ARRAY_FIRST: { - Oid indexType = get_table_index_type(yylval.wdatum.datum); + Oid indexType = get_table_index_type(yylval.wdatum.datum, &tableof_func_dno); if (indexType == VARCHAROID) { appendStringInfo(&ds, "ARRAY_VARCHAR_FIRST("); CastArrayNameToArrayFunc(&ds, yylval.wdatum.idents, false); @@ -6829,7 +7232,7 @@ read_sql_construct6(int until, } case T_ARRAY_LAST: { - Oid indexType = get_table_index_type(yylval.wdatum.datum); + Oid indexType = get_table_index_type(yylval.wdatum.datum, &tableof_func_dno); if (indexType == VARCHAROID) { appendStringInfo(&ds, "ARRAY_VARCHAR_LAST("); CastArrayNameToArrayFunc(&ds, yylval.wdatum.idents, false); @@ -6857,7 +7260,7 @@ read_sql_construct6(int until, } case T_ARRAY_COUNT: { - Oid indexType = get_table_index_type(yylval.wdatum.datum); + Oid indexType = get_table_index_type(yylval.wdatum.datum, &tableof_func_dno); if (indexType == VARCHAROID || indexType == INT4OID) { appendStringInfo(&ds, "ARRAY_INDEXBY_LENGTH("); CastArrayNameToArrayFunc(&ds, yylval.wdatum.idents); @@ -6883,7 +7286,7 @@ read_sql_construct6(int until, } case T_ARRAY_EXISTS: { - Oid indexType = get_table_index_type(yylval.wdatum.datum); + Oid indexType = get_table_index_type(yylval.wdatum.datum, &tableof_func_dno); if (indexType == VARCHAROID) { appendStringInfo(&ds, "ARRAY_VARCHAR_EXISTS("); CastArrayNameToArrayFunc(&ds, yylval.wdatum.idents); @@ -6909,7 +7312,7 @@ read_sql_construct6(int until, } case T_ARRAY_PRIOR: { - Oid indexType = get_table_index_type(yylval.wdatum.datum); + Oid indexType = get_table_index_type(yylval.wdatum.datum, &tableof_func_dno); if (indexType == VARCHAROID) { appendStringInfo(&ds, "ARRAY_VARCHAR_PRIOR("); CastArrayNameToArrayFunc(&ds, yylval.wdatum.idents); @@ -6935,7 +7338,7 @@ read_sql_construct6(int until, } case T_ARRAY_NEXT: { - Oid indexType = get_table_index_type(yylval.wdatum.datum); + Oid indexType = get_table_index_type(yylval.wdatum.datum, &tableof_func_dno); if (indexType == VARCHAROID) { appendStringInfo(&ds, "ARRAY_VARCHAR_NEXT("); CastArrayNameToArrayFunc(&ds, yylval.wdatum.idents); @@ -7083,19 +7486,35 @@ read_sql_construct6(int until, char tableName1[tablevar_namelen] = {0}; idents = yylval.wdatum.idents; if (idents == NIL) { - AddNamespaceIfPkgVar(yylval.wdatum.ident); + AddNamespaceIfPkgVar(yylval.wdatum.ident, save_IdentifierLookup); } copy_table_var_indents(tableName1, yylval.wdatum.ident, tablevar_namelen); PLpgSQL_datum* datum = yylval.wdatum.datum; + int var_dno = yylval.wdatum.dno; + if (datum->dtype == PLPGSQL_DTYPE_VAR) { + check_autonomous_nest_tablevar((PLpgSQL_var*)datum); + } tok = yylex(); if('(' == tok) { push_array_parse_stack(&context, parenlevel, ARRAY_ACCESS); + tableof_index_list = lappend_int(tableof_index_list, ((PLpgSQL_var*)datum)->dno); } else if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && K_MULTISET == tok) { Oid typeOid = get_table_type(datum); read_multiset(&ds, tableName1, typeOid); ds_changed = true; break; + } else { + PLpgSQL_var* var = (PLpgSQL_var*)datum; + if (OidIsValid(var->datatype->tableOfIndexType) && + (',' == tok || ')' == tok || ';' == tok)) { + is_have_tableof_index_var = true; + /* tableof_var_dno is only used for args */ + if (',' == tok || ')' == tok) { + tableof_var_dno = var_dno; + } + } } + curloc = yylloc; plpgsql_push_back_token(tok); if (list_length(idents) >= 3) { @@ -7174,6 +7593,36 @@ read_sql_construct6(int until, } } case T_TABLE: + { + int dno = yylval.wdatum.datum->dno; + PLpgSQL_var *var = (PLpgSQL_var *)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[dno]; + /* table of index by only support: a = a(); */ + if ((OidIsValid(var->datatype->tableOfIndexType) && var->nest_table == NULL) || + (var->nest_table != NULL && var->isIndexByTblOf)) { + TokenData* temptokendata = build_token_data(tok); + int first_tok = yylex(); + DList* tokenstack = NULL; + TokenData* temptokendata1 = build_token_data(first_tok); + tokenstack = dlappend(tokenstack, temptokendata1); + if (first_tok != '(') { + yyerror("table of index by does not support syntax"); + } + + int second_tok = yylex(); + temptokendata1 = build_token_data(second_tok); + if (second_tok != ')') { + yyerror("table of index by does not support syntax"); + } + tokenstack = dlappend(tokenstack, temptokendata1); + /* restore yylex */ + push_back_token_stack(tokenstack); + yylloc = temptokendata->lloc; + yylval = temptokendata->lval; + u_sess->plsql_cxt.curr_compile_context->plpgsql_yyleng = temptokendata->leng; + } + ds_changed = construct_array_start(&ds, &context, var->datatype, &tok, parenlevel, loc); + break; + } case T_VARRAY: { if (u_sess->attr.attr_sql.sql_compatibility != A_FORMAT) { @@ -7210,7 +7659,7 @@ read_sql_construct6(int until, break; } case T_WORD: - AddNamespaceIfPkgVar(yylval.word.ident); + AddNamespaceIfPkgVar(yylval.word.ident, save_IdentifierLookup); ds_changed = construct_word(&ds, &context, &tok, parenlevel, loc); break; case T_CWORD: @@ -7298,6 +7747,17 @@ read_sql_construct6(int until, oldCxt = MemoryContextSwitchTo(curr_compile->compile_cxt); } + if (tableof_index_list != NULL && tableof_func_dno != -1) { + ListCell* cell = NULL; + foreach (cell, tableof_index_list) { + int dno = lfirst_int(cell); + if (dno != tableof_func_dno) { + yyerror("do not support more than 2 table of index by variables call functions in an expr"); + } + } + } + list_free_ext(tableof_index_list); + expr = (PLpgSQL_expr *)palloc0(sizeof(PLpgSQL_expr)); expr->dtype = PLPGSQL_DTYPE_EXPR; expr->query = pstrdup(ds.data); @@ -7306,6 +7766,11 @@ read_sql_construct6(int until, expr->ns = plpgsql_ns_top(); expr->isouttype = false; expr->idx = (uint32)-1; + expr->out_param_dno = -1; + expr->is_have_tableof_index_var = is_have_tableof_index_var; + expr->tableof_var_dno = tableof_var_dno; + expr->is_have_tableof_index_func = tableof_func_dno != -1 ? true : false; + expr->tableof_func_dno = tableof_func_dno; pfree_ext(ds.data); @@ -7835,7 +8300,6 @@ read_datatype(int tok) plpgsql_append_source_text(&ds, startlocation, yylloc); type_name = ds.data; - // do such change just for DTS2021090817171 if (type_name[0] == '\0') { #ifndef ENABLE_MULTIPLE_NODES if (u_sess->attr.attr_common.plsql_show_all_error) { @@ -8059,6 +8523,10 @@ make_execsql_stmt(int firsttoken, int location) continue; } else if (PLPGSQL_DTYPE_REC == datum->dtype) { rec_data = (PLpgSQL_rec*)datum; + if (rec_data->tupdesc == NULL) { + yyerror("unsupported insert into table from record type without desc, " + "may need set behavior_compat_options to allow_procedure_compile_check."); + } continue; } @@ -8301,10 +8769,13 @@ make_execsql_stmt(int firsttoken, int location) expr->paramnos = NULL; expr->ns = plpgsql_ns_top(); expr->idx = (uint32)-1; + expr->out_param_dno = -1; pfree_ext(ds.data); check_sql_expr(expr->query, location, 0); - + if (firsttoken == K_SELECT && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && ALLOW_PROCEDURE_COMPILE_CHECK) { + (void)getCursorTupleDesc(expr, false, true); + } execsql = (PLpgSQL_stmt_execsql *)palloc(sizeof(PLpgSQL_stmt_execsql)); execsql->cmd_type = PLPGSQL_STMT_EXECSQL; execsql->lineno = plpgsql_location_to_lineno(location); @@ -8411,7 +8882,7 @@ read_fetch_direction(void) /* empty direction */ check_FROM = false; } - else if (tok == T_DATUM) + else if (tok == T_DATUM || tok == T_PACKAGE_VARIABLE) { /* Assume there's no direction clause and tok is a cursor name */ plpgsql_push_back_token(tok); @@ -8580,7 +9051,13 @@ make_return_stmt(int location) * Note that a well-formed expression is _required_ here; * anything else is a compile-time error. */ + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + newp->retvarno = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile->out_param_varno; + } newp->expr = read_sql_expression(';', ";"); + if (newp->expr->is_have_tableof_index_var) { + yyerror("table of index type is not supported as function return type."); + } } } @@ -8627,6 +9104,7 @@ make_return_next_stmt(int location) { case T_DATUM: if (yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_ROW || + yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_RECORD || yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_REC) newp->retvarno = yylval.wdatum.dno; else { @@ -8732,6 +9210,28 @@ CopyNameOfDatum(PLwdatum *wdatum) return NameListToString(wdatum->idents); } +static void check_record_nest_tableof_index(PLpgSQL_datum* datum) +{ + if (datum->dtype == PLPGSQL_DTYPE_RECORD || datum->dtype == PLPGSQL_DTYPE_ROW) { + PLpgSQL_row* row = (PLpgSQL_row*)datum; + for (int i = 0; i < row->nfields; i++) { + PLpgSQL_datum* row_element = NULL; + if (row->ispkg) { + row_element = (PLpgSQL_datum*)(row->pkg->datums[row->varnos[i]]); + } else { + row_element = (PLpgSQL_datum*)(u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[row->varnos[i]]); + } + /* Notice: do not deal record nest record nest table of index, because table of index type can not get */ + if (row_element->dtype == PLPGSQL_DTYPE_VAR) { + PLpgSQL_var* var_element = (PLpgSQL_var*)row_element; + if (OidIsValid(var_element->datatype->tableOfIndexType)) { + yyerror("record nested table of index variable do not support entire assign"); + } + } + } + } +} + static void check_assignable(PLpgSQL_datum *datum, int location) { @@ -8791,9 +9291,17 @@ check_assignable(PLpgSQL_datum *datum, int location) } } -static Oid get_table_index_type(PLpgSQL_datum* datum) +static Oid get_table_index_type(PLpgSQL_datum* datum, int *tableof_func_dno) { PLpgSQL_var* var = (PLpgSQL_var*)datum; + if (OidIsValid(var->datatype->tableOfIndexType) && tableof_func_dno != NULL) { + if (*tableof_func_dno == -1) { + *tableof_func_dno = var->dno; + } else if (*tableof_func_dno != var->dno) { + yyerror("do not support more than 2 table of index by variables call functions in an expr"); + } + } + return var->datatype->tableOfIndexType; } @@ -8821,6 +9329,30 @@ static void check_bulk_into_type(PLpgSQL_row* row) } } +static void check_tableofindex_args(int tableof_var_dno, Oid argtype) +{ + if (tableof_var_dno < 0 || u_sess->plsql_cxt.curr_compile_context == NULL) { + return ; + } + PLpgSQL_datum* tableof_var_datum = u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[tableof_var_dno]; + if (tableof_var_datum == NULL) { + return ; + } else if (tableof_var_datum->dtype == PLPGSQL_DTYPE_VAR) { + PLpgSQL_var* var = (PLpgSQL_var*)tableof_var_datum; + Oid base_oid = InvalidOid; + Oid indexby_oid = InvalidOid; + + if (isTableofType(argtype, &base_oid, &indexby_oid)) { + if (var->datatype->tableOfIndexType != indexby_oid || + var->datatype->typoid != base_oid) { + yyerror("procedure table of arg types not match"); + } + } + } else if (tableof_var_datum->dtype == PLPGSQL_DTYPE_RECORD) { + check_record_nest_tableof_index(tableof_var_datum); + } +} + static void check_table_index(PLpgSQL_datum* datum, char* funcName) { PLpgSQL_var* var = (PLpgSQL_var*)datum; @@ -8948,14 +9480,13 @@ static AttrNumber get_assign_attrno(PLpgSQL_datum* target, char* attrname) if (elemtupledesc == NULL){ const char* message = "array element type is not composite in assignment"; InsertErrorMessage(message, plpgsql_yylloc); - ereport(errstate, + ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmodule(MOD_PLSQL), errmsg("array element type is not composite in assignment"), errdetail("array variable \"%s\" must be composite when assign value to attibute", var->refname), errcause("incorrectly referencing variables"), erraction("modify assign variable"))); - u_sess->plsql_cxt.have_error = true; } /* search the matched attribute */ @@ -9143,6 +9674,9 @@ read_into_target(PLpgSQL_rec **rec, PLpgSQL_row **row, bool *strict, bool bulk_c { *strict = true; tok = yylex(); + } else if (strict && bulk_collect) { + /* bulk into target can be assigned null */ + *strict = false; } /* @@ -9909,12 +10443,33 @@ static PLpgSQL_var* plpgsql_build_nested_variable(PLpgSQL_var *nest_table, bool build_nest_table = (PLpgSQL_var *)plpgsql_build_variable(nestname, lineno, new_var_type, true); build_nest_table->isconst = isconst; build_nest_table->default_val = NULL; + build_nest_table->nest_layers = nest_table->nest_layers; if (nest_table->nest_table != NULL) { build_nest_table->nest_table = plpgsql_build_nested_variable(nest_table->nest_table, isconst, name, lineno); } return build_nest_table; } +static int get_nest_tableof_layer(PLpgSQL_var *var, const char *typname, int errstate) +{ + int depth = 0; + while (var != NULL) { + depth++; + var = var->nest_table; + } + if (depth + 1 > MAXDIM) { + ereport(errstate, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmodule(MOD_PLSQL), + errmsg("Layer number of nest tableof type exceeds the maximum allowed."), + errdetail("Define nest tableof type \"%s\" layers (%d) exceeds the maximum allowed (%d).", typname, depth + 1, MAXDIM), + errcause("too many nested layers"), + erraction("check define of table of type"))); + u_sess->plsql_cxt.have_error = true; + } + return depth + 1; +} + static void getPkgFuncTypeName(char* typname, char** functypname, char** pkgtypname) { if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile != NULL) { @@ -10246,6 +10801,10 @@ static PLpgSQL_type* build_type_from_record_var(int dno) /* we have already build one, just take it from datums. */ if (ns->itemtype == PLPGSQL_NSTYPE_COMPOSITE) { newp = (PLpgSQL_type*)(u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[ns->itemno]); + /* build a new one, avoid conflict by array or table of */ + newp = plpgsql_build_datatype(newp->typoid, -1, InvalidOid); + newp->dtype = PLPGSQL_DTYPE_COMPOSITE; + newp->ttype = PLPGSQL_TTYPE_ROW; } else { ereport(errstate, (errcode(ERRCODE_DUPLICATE_OBJECT), @@ -10387,18 +10946,26 @@ static void plpgsql_build_package_array_type(const char* typname,Oid elemtypoid } } - if (arraytype == TYPCATEGORY_TABLEOF || arraytype == TYPCATEGORY_TABLEOF_VARCHAR) { + if (arraytype == TYPCATEGORY_TABLEOF || + arraytype == TYPCATEGORY_TABLEOF_VARCHAR || + arraytype == TYPCATEGORY_TABLEOF_INTEGER) { elemtypoid = get_array_type(elemtypoid); typtyp = TYPTYPE_TABLEOF; } else { typtyp = TYPTYPE_BASE; } + Oid ownerId = InvalidOid; + ownerId = GetUserIdFromNspId(pkgNamespaceOid); + if (!OidIsValid(ownerId)) { + ownerId = GetUserId(); + } + Oid typoid = TypeCreate(InvalidOid, /* force the type's OID to this */ casttypename, /* Array type name */ pkgNamespaceOid, /* Same namespace as parent */ InvalidOid, /* Not composite, no relationOid */ 0, /* relkind, also N/A here */ - GetUserId(), /* owner's ID */ + ownerId, /* owner's ID */ -1, /* Internal size (varlena) */ typtyp, /* Not composite - typelem is */ arraytype, /* type-category (array or table of) */ @@ -10651,10 +11218,30 @@ read_cursor_args(PLpgSQL_var *cursor, int until, const char *expected) parser_errposition(yylloc))); } + bool isPkgCur = cursor->ispkg && + (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package == NULL + || u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid != cursor->pkg->pkg_oid); + if (isPkgCur) { + ereport(errstate, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("package cursor with arguments is only supported to be opened in the same package."), + errdetail("cursor \"%s.%s\" is only supported to be opened in the package \"%s\"", + cursor->pkg->pkg_signature, + cursor->varname == NULL ? cursor->refname : cursor->varname, + cursor->pkg->pkg_signature), + errcause("feature not supported"), + erraction("define this cursor without arguments or open this cursor in same package"), + parser_errposition(yylloc))); + } + /* * Read the arguments, one by one. */ - row = (PLpgSQL_row *) u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[cursor->cursor_explicit_argrow]; + if (cursor->ispkg) { + row = (PLpgSQL_row *) cursor->pkg->datums[cursor->cursor_explicit_argrow]; + } else { + row = (PLpgSQL_row *) u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[cursor->cursor_explicit_argrow]; + } argv = (char **) palloc0(row->nfields * sizeof(char *)); for (argc = 0; argc < row->nfields; argc++) @@ -10787,6 +11374,7 @@ read_cursor_args(PLpgSQL_var *cursor, int until, const char *expected) expr->paramnos = NULL; expr->ns = plpgsql_ns_top(); expr->idx = (uint32)-1; + expr->out_param_dno = -1; pfree_ext(ds.data); /* Next we'd better find the until token */ @@ -11026,6 +11614,7 @@ make_callfunc_stmt_no_arg(const char *sqlstart, int location, bool withsemicolon expr->paramnos = NULL; expr->ns = plpgsql_ns_top(); expr->idx = (uint32)-1; + expr->out_param_dno = -1; PLpgSQL_stmt_perform *perform = NULL; perform = (PLpgSQL_stmt_perform*)palloc0(sizeof(PLpgSQL_stmt_perform)); @@ -11101,6 +11690,7 @@ parse_lob_open_close(int location) expr->paramnos = NULL; expr->ns = plpgsql_ns_top(); expr->idx = (uint32)-1; + expr->out_param_dno = -1; perform = (PLpgSQL_stmt_perform*)palloc0(sizeof(PLpgSQL_stmt_perform)); perform->cmd_type = PLPGSQL_STMT_PERFORM; @@ -11219,12 +11809,16 @@ static void AddNamespaceIfNeed(int dno, char* ident) return; } -static void AddNamespaceIfPkgVar(const char* ident) +static void AddNamespaceIfPkgVar(const char* ident, IdentifierLookup save_IdentifierLookup) { if (getCompileStatus() != COMPILIE_PKG_FUNC) { return; } + /* only declare session need to add */ + if (save_IdentifierLookup != IDENTIFIER_LOOKUP_DECLARE) { + return; + } if (ident == NULL) { yyerror("null string when add package variable to procedure namespace"); @@ -11373,6 +11967,17 @@ static bool PkgVarNeedCast(List* idents) } +static void check_autonomous_nest_tablevar(PLpgSQL_var* var) +{ + if (unlikely(var->ispkg && var->nest_table != NULL && + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile != NULL && + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile->is_autonomous)) { + ereport(errstate, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Un-support feature: nest tableof variable %s not support pass through autonm function", + var->varname))); + } +} + static void SetErrorState() { #ifndef ENABLE_MULTIPLE_NODES @@ -11384,4 +11989,85 @@ static void SetErrorState() #else errstate = ERROR; #endif -} +} + +static bool need_build_row_for_func_arg(PLpgSQL_rec **rec, PLpgSQL_row **row, int out_arg_num, int all_arg, int *varnos, char *p_argmodes) +{ + /* out arg number > 1 should build a row */ + if (out_arg_num > 1) { + return true; + } + + /* no row or rec, should build a row */ + if (*rec == NULL && *row == NULL) { + return true; + } + + /* no out arg, need not build */ + if (out_arg_num == 0) { + return false; + } + + PLpgSQL_compile_context* curr_compile = u_sess->plsql_cxt.curr_compile_context; + PLpgSQL_datum *tempDatum = NULL; + for (int i = 0; i < all_arg; i++) { + if (p_argmodes[i] == 'i') { + continue; + } + + /* out param no destination */ + if (varnos[i] == -1) { + *rec = NULL; + *row = NULL; + return false; + } + + tempDatum = curr_compile->plpgsql_Datums[varnos[i]]; + /* the out param is the rec or row */ + if (tempDatum == (PLpgSQL_datum*)(*rec) || tempDatum == (PLpgSQL_datum*)(*row)) { + return false; + } + /* the out param is scalar, need build row */ + if (tempDatum->dtype == PLPGSQL_DTYPE_VAR + || tempDatum->dtype == PLPGSQL_DTYPE_RECFIELD + || tempDatum->dtype == PLPGSQL_DTYPE_ASSIGNLIST) { + return true; + } + + /* need not build a new row, but need to replace the correct row */ + if (tempDatum->dtype == PLPGSQL_DTYPE_ROW || tempDatum->dtype == PLPGSQL_DTYPE_RECORD) { + *row = (PLpgSQL_row *)tempDatum; + return false; + } + if (tempDatum->dtype == PLPGSQL_DTYPE_REC) { + *rec = (PLpgSQL_rec *)tempDatum; + return false; + } + + /* arrive here, means out param invalid, set row and rec to null*/ + *rec = NULL; + *row = NULL; + return false; + } + + /* should not arrive here */ + *rec = NULL; + *row = NULL; + return false; +} + +#ifndef ENABLE_MULTIPLE_NODES +static void BuildForQueryVariable(PLpgSQL_expr* expr, PLpgSQL_row **row, PLpgSQL_rec **rec, + const char* refname, int lineno) +{ + TupleDesc desc = getCursorTupleDesc(expr, true); + if (desc == NULL || desc->natts == 0 || checkAllAttrName(desc)) { + PLpgSQL_type dtype; + dtype.ttype = PLPGSQL_TTYPE_REC; + *rec = (PLpgSQL_rec *) + plpgsql_build_variable(refname, lineno, &dtype, true); + } else { + *row = build_row_from_tuple_desc(refname, lineno, desc); + } +} +#endif diff --git a/src/common/pl/plpgsql/src/pl_comp.cpp b/src/common/pl/plpgsql/src/pl_comp.cpp index 614d0154a..3d8b90fbc 100644 --- a/src/common/pl/plpgsql/src/pl_comp.cpp +++ b/src/common/pl/plpgsql/src/pl_comp.cpp @@ -91,7 +91,7 @@ static PLpgSQL_function* plpgsql_HashTableLookup(PLpgSQL_func_hashkey* func_key) static void plpgsql_HashTableInsert(PLpgSQL_function* func, PLpgSQL_func_hashkey* func_key); static void plpgsql_append_dlcell(plpgsql_HashEnt* entity); static bool plpgsql_lookup_tripword_datum(int itemno, const char* word2, const char* word3, - PLwdatum* wdatum, int* tok_flag); + PLwdatum* wdatum, int* tok_flag, bool isPkgVar); static void get_datum_tok_type(PLpgSQL_datum* target, int* tok_flag); static PLpgSQL_type* plpgsql_get_cursor_type_relid(const char* cursorname, const char* colname, MemoryContext oldCxt); static int find_package_rowfield(PLpgSQL_datum* datum, const char* pkgName, const char* schemaName = NULL); @@ -103,7 +103,8 @@ extern bool plpgsql_check_insert_colocate( Query* query, List* qry_part_attr_num, List* trig_part_attr_num, PLpgSQL_function* func); typedef int (*plsql_parser)(void); -static inline plsql_parser PlsqlParser(){ +static inline plsql_parser PlsqlParser() +{ int (*plsql_parser_hook)(void) = plpgsql_yyparse; #ifndef ENABLE_MULTIPLE_NODES if (u_sess->attr.attr_sql.enable_custom_parser) { @@ -241,19 +242,24 @@ recheck: * Do the hard part. */ PLpgSQL_compile_context* save_compile_context = u_sess->plsql_cxt.curr_compile_context; + int save_compile_status = getCompileStatus(); PG_TRY(); { func = do_compile(fcinfo, proc_tup, func, &hashkey, for_validator); + (void)CompileStatusSwtichTo(save_compile_status); } PG_CATCH(); { #ifndef ENABLE_MULTIPLE_NODES - bool show_all_error = u_sess->attr.attr_common.plsql_show_all_error && u_sess->plsql_cxt.isCreateFunction; - if (show_all_error) { + bool insertError = (u_sess->attr.attr_common.plsql_show_all_error || + u_sess->attr.attr_sql.check_function_bodies) && + u_sess->plsql_cxt.isCreateFunction; + if (insertError) { InsertError(func_oid); } #endif popToOldCompileContext(save_compile_context); + (void)CompileStatusSwtichTo(save_compile_status); PG_RE_THROW(); } PG_END_TRY(); @@ -300,15 +306,18 @@ PLpgSQL_function* plpgsql_compile_nohashkey(FunctionCallInfo fcinfo) * set true by plpgsql_validator in CREATE FUNCTION or DO command. */ PLpgSQL_compile_context* save_compile_context = u_sess->plsql_cxt.curr_compile_context; + int save_compile_status = getCompileStatus(); PG_TRY(); { if (func == NULL) { func = do_compile(fcinfo, proc_tup, func, NULL, false); } + (void)CompileStatusSwtichTo(save_compile_status); } PG_CATCH(); { popToOldCompileContext(save_compile_context); + (void)CompileStatusSwtichTo(save_compile_status); PG_RE_THROW(); } PG_END_TRY(); @@ -398,6 +407,10 @@ MemoryContext getCompileContext(char* const context_name) return AllocSetContextCreate(u_sess->top_mem_cxt, context_name, ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); } break; + case COMPILIE_PKG_ANON_BLOCK_FUNC: { + return AllocSetContextCreate(u_sess->top_mem_cxt, context_name, + ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); + } break; default: { ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unrecognized compile type: %d", u_sess->plsql_cxt.compile_status))); @@ -419,6 +432,7 @@ static void initCompileContext(PLpgSQL_compile_context* compile_cxt, MemoryConte compile_cxt->plpgsql_error_pkgname = NULL; compile_cxt->plpgsql_parse_result = NULL; compile_cxt->plpgsql_Datums = NULL; + compile_cxt->datum_need_free = NULL; compile_cxt->plpgsql_curr_compile = NULL; compile_cxt->plpgsql_DumpExecTree = false; compile_cxt->plpgsql_pkg_DumpExecTree = false; @@ -498,8 +512,7 @@ PLpgSQL_compile_context* createCompileContext(char* const context_name) void pushCompileContext() { if (u_sess->plsql_cxt.curr_compile_context != NULL) { - checkCompileMemoryContext(u_sess->plsql_cxt.curr_compile_context->compile_cxt); - MemoryContext oldcontext = MemoryContextSwitchTo(u_sess->plsql_cxt.curr_compile_context->compile_cxt); + MemoryContext oldcontext = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); u_sess->plsql_cxt.compile_context_list = lcons(u_sess->plsql_cxt.curr_compile_context, u_sess->plsql_cxt.compile_context_list); MemoryContextSwitchTo(oldcontext); @@ -509,7 +522,7 @@ void pushCompileContext() PLpgSQL_compile_context* popCompileContext() { if (u_sess->plsql_cxt.compile_context_list != NIL) { - u_sess->plsql_cxt.compile_context_list = list_delete_first_nofree(u_sess->plsql_cxt.compile_context_list); + u_sess->plsql_cxt.compile_context_list = list_delete_first(u_sess->plsql_cxt.compile_context_list); if (list_length(u_sess->plsql_cxt.compile_context_list) == 0) { u_sess->plsql_cxt.compile_context_list = NIL; return NULL; @@ -628,6 +641,14 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, } else { proisprivatedatum = BoolGetDatum(false); } + + /* get function's prokind */ + Datum prokindDatum = SysCacheGetAttr(PROCOID, proc_tup, Anum_pg_proc_prokind, &isnull); + bool isFunc = false; + if (isnull || PROC_IS_FUNC(DatumGetChar(prokindDatum))) { + /* Null prokind items are created when there is no procedure */ + isFunc = true; + } /* * Setup error traceback support for ereport() */ @@ -658,12 +679,32 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, rc = snprintf_s( context_name, NAMEDATALEN, NAMEDATALEN - 1, "%s_%lu", "PL/pgSQL function context", u_sess->debug_query_id); securec_check_ss(rc, "", ""); + int save_compile_status = getCompileStatus(); + PLpgSQL_compile_context* curr_compile = NULL; + /* If has other function in package body header, should switch compile status to function, + in case compile function in package's context. */ + if (u_sess->plsql_cxt.compile_status == COMPILIE_PKG_ANON_BLOCK) { + if (!OidIsValid(pkgoid)) { + /* has function in package body header */ + save_compile_status = CompileStatusSwtichTo(COMPILIE_PKG_ANON_BLOCK_FUNC); + } else if (u_sess->plsql_cxt.curr_compile_context != NULL && + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package != NULL && + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid != pkgoid) { + /* should not has other package's function in package body header compiled here */ + ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNEXPECTED_NODE_STATE), + errmsg("current compile context's package %u not match current package %u", + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid, pkgoid), + errdetail("N/A"), errcause("nested compilation swtich context error"), + erraction("check logic of compilation"))); + } + } /* * if the function belong to a package,then the function memorycontext will use the package memorycontext, * because function may use package variable */ - PLpgSQL_compile_context* curr_compile = createCompileContext(context_name); + curr_compile = createCompileContext(context_name); SPI_NESTCOMPILE_LOG(curr_compile->compile_cxt); + MemoryContext temp = NULL; if (u_sess->plsql_cxt.curr_compile_context != NULL) { checkCompileMemoryContext(u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt); @@ -710,6 +751,7 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, func->out_param_varno = -1; /* set up for no OUT param */ func->resolve_option = GetResolveOption(); func->invalItems = NIL; + func->is_autonomous = false; func->pkg_oid = pkgoid; func->fn_searchpath->addCatalog = true; @@ -795,8 +837,13 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, /* This is short-lived, so needn't allocate in function's cxt */ curr_compile->plpgsql_Datums = (PLpgSQL_datum**)MemoryContextAlloc( curr_compile->compile_tmp_cxt, sizeof(PLpgSQL_datum*) * curr_compile->datums_alloc); + curr_compile->datum_need_free = (bool*)MemoryContextAlloc( + curr_compile->compile_tmp_cxt, sizeof(bool) * curr_compile->datums_alloc); curr_compile->datums_last = 0; - add_pkg_compile(); + add_pkg_compile(); + Oid base_oid = InvalidOid; + bool isHaveTableOfIndexArgs = false; + bool isHaveOutRefCursorArgs = false; switch ((int)is_trigger) { case false: @@ -832,6 +879,9 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, int arg_item_type; errno_t err = EOK; + /* if procedure args have table of index by type */ + isHaveTableOfIndexArgs = isHaveTableOfIndexArgs || isTableofIndexbyType(arg_types[i]); + /* Create $n name for variable */ err = snprintf_s(buf, sizeof(buf), sizeof(buf) - 1, "$%d", i + 1); securec_check_ss(err, "", ""); @@ -872,6 +922,12 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, out_arg_variables[num_out_args++] = argvariable; } + if (arg_type_id == REFCURSOROID + && (arg_mode == PROARGMODE_OUT || arg_mode == PROARGMODE_INOUT)) { + isHaveOutRefCursorArgs = true; + } + + /* Add to namespace under the $n name */ add_parameter_name(arg_item_type, argvariable->dno, buf); @@ -927,10 +983,20 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, } } + if (isTableofType(rettypeid, &base_oid, NULL)) { + func->fn_rettype = base_oid; + } else { + func->fn_rettype = rettypeid; + } + + if (rettypeid == REFCURSOROID) { + isHaveOutRefCursorArgs = true; + } + /* * Normal function has a defined returntype */ - func->fn_rettype = rettypeid; + func->fn_retset = proc_struct->proretset; /* @@ -1090,7 +1156,6 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, } else { var = plpgsql_build_variable("sqlcode", 0, plpgsql_build_datatype(INT4OID, -1, InvalidOid), true, true); } - func->sqlcode_varno = var->dno; var = plpgsql_build_variable("sqlstate", 0, plpgsql_build_datatype(TEXTOID, -1, InvalidOid), true, true); func->sqlstate_varno = var->dno; @@ -1152,6 +1217,33 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, erraction("Modify SQL statement according to the manual."))); } + if (isHaveTableOfIndexArgs && func->action->isAutonomous) { + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Autonomous do not support table of index Or record nested tabel of index as in, out args."), + errdetail("N/A"), errcause("PL/SQL uses unsupported feature."), + erraction("Modify SQL statement according to the manual."))); + } + + if (isHaveTableOfIndexArgs && isFunc) { + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Function do not support table of index Or record nested tabel of index as in, out args."), + errdetail("N/A"), errcause("PL/SQL uses unsupported feature."), + erraction("Modify SQL statement according to the manual."))); + } + + if (isHaveOutRefCursorArgs && isFunc && func->action->isAutonomous) { + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Autonomous function do not support ref cursor as return types or out, inout arguments."), + errdetail("N/A"), errcause("PL/SQL uses unsupported feature."), + erraction("Use procedure instead."))); + } + #ifdef ENABLE_MULTIPLE_NODES if (proc_struct->provolatile != PROVOLATILE_VOLATILE && func->action->isAutonomous) { ereport(ERROR, (errmodule(MOD_PLSQL), @@ -1202,8 +1294,10 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, } func->ndatums = curr_compile->plpgsql_nDatums; func->datums = (PLpgSQL_datum**)palloc(sizeof(PLpgSQL_datum*) * curr_compile->plpgsql_nDatums); + func->datum_need_free = (bool*)palloc(sizeof(bool) * curr_compile->plpgsql_nDatums); for (i = 0; i < curr_compile->plpgsql_nDatums; i++) { func->datums[i] = curr_compile->plpgsql_Datums[i]; + func->datum_need_free[i] = curr_compile->datum_need_free[i]; } /* Debug dump for completed functions */ if (curr_compile->plpgsql_DumpExecTree) { @@ -1226,11 +1320,13 @@ static PLpgSQL_function* do_compile(FunctionCallInfo fcinfo, HeapTuple proc_tup, curr_compile->plpgsql_curr_compile = NULL; curr_compile->plpgsql_check_syntax = false; - if (curr_compile->plpgsql_curr_compile_package != NULL) { + if (curr_compile->plpgsql_curr_compile_package != NULL && + func->fn_cxt == curr_compile->plpgsql_curr_compile_package->pkg_cxt) { List* proc_compiled_list = curr_compile->plpgsql_curr_compile_package->proc_compiled_list; curr_compile->plpgsql_curr_compile_package->proc_compiled_list = lappend(proc_compiled_list, func); } MemoryContextSwitchTo(curr_compile->compile_tmp_cxt); + CompileStatusSwtichTo(save_compile_status); if (curr_compile->plpgsql_curr_compile_package == NULL) curr_compile->compile_tmp_cxt = NULL; ereport(DEBUG3, (errmodule(MOD_NEST_COMPILE), errcode(ERRCODE_LOG), @@ -1313,6 +1409,7 @@ PLpgSQL_function* plpgsql_compile_inline(char* proc_source) curr_compile->datums_alloc = alloc_size; curr_compile->plpgsql_nDatums = 0; curr_compile->plpgsql_Datums = (PLpgSQL_datum**)palloc(sizeof(PLpgSQL_datum*) * curr_compile->datums_alloc); + curr_compile->datum_need_free = (bool*)palloc(sizeof(bool) * curr_compile->datums_alloc); curr_compile->datums_last = 0; /* Set up as though in a function returning VOID */ @@ -1322,6 +1419,7 @@ PLpgSQL_function* plpgsql_compile_inline(char* proc_source) /* a bit of hardwired knowledge about type VOID here */ func->fn_retbyval = true; func->fn_rettyplen = sizeof(int32); + func->is_autonomous = false; getTypeInputInfo(VOIDOID, &typinput, &func->fn_rettypioparam); fmgr_info(typinput, &(func->fn_retinput)); @@ -1361,9 +1459,8 @@ PLpgSQL_function* plpgsql_compile_inline(char* proc_source) if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { var = plpgsql_build_variable("sqlcode", 0, plpgsql_build_datatype(TEXTOID, -1, InvalidOid), true, true); } else { - var = plpgsql_build_variable("sqlcode", 0, plpgsql_build_datatype(INT4OID, -1, InvalidOid), true, true); + var = plpgsql_build_variable("sqlcode", 0, plpgsql_build_datatype(INT4OID, -1, InvalidOid), true, true); } - func->sqlcode_varno = var->dno; var = plpgsql_build_variable("sqlstate", 0, plpgsql_build_datatype(TEXTOID, -1, InvalidOid), true, true); func->sqlstate_varno = var->dno; @@ -1405,8 +1502,10 @@ PLpgSQL_function* plpgsql_compile_inline(char* proc_source) func->fn_nargs = 0; func->ndatums = curr_compile->plpgsql_nDatums; func->datums = (PLpgSQL_datum**)palloc(sizeof(PLpgSQL_datum*) * curr_compile->plpgsql_nDatums); + func->datum_need_free = (bool*)palloc(sizeof(bool) * curr_compile->plpgsql_nDatums); for (int i = 0; i < curr_compile->plpgsql_nDatums; i++) { func->datums[i] = curr_compile->plpgsql_Datums[i]; + func->datum_need_free[i] = curr_compile->datum_need_free[i]; } /* * Pop the error context stack @@ -2314,7 +2413,7 @@ static bool isVarrayWord(const char *compWord, const char *firstWord, const char int tok_flag = -1; PLwdatum wdatum; wdatum.datum = NULL; - plpgsql_lookup_tripword_datum(ns->itemno, secondWord, thirdWord, &wdatum, &tok_flag); + plpgsql_lookup_tripword_datum(ns->itemno, secondWord, thirdWord, &wdatum, &tok_flag, false); if (wdatum.datum == NULL) { return false; } @@ -2581,7 +2680,7 @@ bool plpgsql_parse_tripword(char* word1, char* word2, char* word3, PLwdatum* wda } if (ns != NULL && nnames == 1 && ns->itemtype == PLPGSQL_NSTYPE_ROW) { - if (plpgsql_lookup_tripword_datum(ns->itemno, word2, word3, wdatum, tok_flag)) { + if (plpgsql_lookup_tripword_datum(ns->itemno, word2, word3, wdatum, tok_flag, false)) { wdatum->idents = idents; wdatum->ident = pstrdup(NameListToString(idents)); return true; @@ -2686,7 +2785,7 @@ bool plpgsql_parse_quadword(char* word1, char* word2, char* word3, char* word4, * words 1/2 are a row name, so word 3/4 word could be * fields in this row. */ - if (plpgsql_lookup_tripword_datum(ns->itemno, word3, word4, wdatum, tok_flag)) { + if (plpgsql_lookup_tripword_datum(ns->itemno, word3, word4, wdatum, tok_flag, true)) { wdatum->idents = idents; wdatum->ident = pstrdup(NameListToString(idents)); return true; @@ -2705,7 +2804,7 @@ bool plpgsql_parse_quadword(char* word1, char* word2, char* word3, char* word4, row = (PLpgSQL_row*)(curr_compile->plpgsql_Datums[ns->itemno]); for (i = 0; i < row->nfields; i++) { if (row->fieldnames[i] && strcmp(row->fieldnames[i], word2) == 0) { - if (plpgsql_lookup_tripword_datum(row->varnos[i], word3, word4, wdatum, tok_flag)) { + if (plpgsql_lookup_tripword_datum(row->varnos[i], word3, word4, wdatum, tok_flag, false)) { wdatum->idents = idents; wdatum->ident = pstrdup(NameListToString(idents)); return true; @@ -2767,7 +2866,7 @@ bool plpgsql_parse_quadword(char* word1, char* word2, char* word3, char* word4, List *idents2 = list_make2(makeString(word1), makeString(word2)); dno = plpgsql_pkg_add_unknown_var_to_namespace(idents2); if (dno != -1) { - if (plpgsql_lookup_tripword_datum(dno, word3, word4, wdatum, tok_flag)) { + if (plpgsql_lookup_tripword_datum(dno, word3, word4, wdatum, tok_flag, true)) { wdatum->idents = idents; wdatum->ident = pstrdup(NameListToString(idents)); return true; @@ -3201,7 +3300,7 @@ static void build_cursor_variable(int varno) pfree_ext(str.data); } -char *plpgsql_code_int2cstring(const int sqlcode) +const char *plpgsql_code_int2cstring(int sqlcode) { if (sqlcode >= 0) { ereport(ERROR, @@ -3211,17 +3310,14 @@ char *plpgsql_code_int2cstring(const int sqlcode) } char* buf = t_thrd.buf_cxt.unpack_sql_state_buf; - char codename[12] = ""; errno_t rc = EOK; - rc = sprintf_s(codename, sizeof(codename) - 1, "%d", sqlcode); + rc = snprintf_s(buf, SQL_STATE_BUF_LEN, SQL_STATE_BUF_LEN - 1, "%d", sqlcode); securec_check_ss(rc, "\0", "\0"); - rc = strcpy_s(buf, sizeof(t_thrd.buf_cxt.unpack_sql_state_buf), codename); - securec_check_c(rc, "\0", "\0"); return buf; } -int plpgsql_code_cstring2int(const char *codename) +const int plpgsql_code_cstring2int(const char *codename) { Assert(codename != NULL); int sqlcode = atoi(codename); @@ -3240,8 +3336,11 @@ int plpgsql_code_cstring2int(const char *codename) return sqlcode; } -void plpgsql_set_variable(const char* varname, const int value) +void plpgsql_set_variable(const char* varname, int value) { +#ifdef ENABLE_MULTIPLE_NODES + return; +#else if (value >= 0) { ereport(ERROR, (errmodule(MOD_PLSQL), @@ -3272,6 +3371,8 @@ void plpgsql_set_variable(const char* varname, const int value) (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("Undefined exception name '%s' in EXCEPTION_INIT", varname))); + return; +#endif } /* @@ -3312,7 +3413,9 @@ PLpgSQL_variable* plpgsql_build_variable(const char* refname, int lineno, PLpgSQ var->isnull = true; var->freeval = false; var->nest_table = NULL; + var->nest_layers = 0; var->tableOfIndexType = dtype->tableOfIndexType; + var->isIndexByTblOf = (OidIsValid(var->tableOfIndexType)); var->addNamespace = add2namespace; var->varname = varname == NULL ? NULL : pstrdup(varname); varno = plpgsql_adddatum((PLpgSQL_datum*)var); @@ -3326,6 +3429,7 @@ PLpgSQL_variable* plpgsql_build_variable(const char* refname, int lineno, PLpgSQ } if (dtype->typoid == REFCURSOROID) { build_cursor_variable(varno); + var->cursor_closed = false; } result = (PLpgSQL_variable*)var; break; @@ -3465,6 +3569,7 @@ PLpgSQL_variable* plpgsql_build_tableType(const char* refname, int lineno, PLpgS var->isnull = true; var->freeval = false; var->isImplicit = true; + var->isIndexByTblOf = false; varno = plpgsql_adddatum((PLpgSQL_datum*)var); if (add2namespace) { @@ -3821,6 +3926,8 @@ PLpgSQL_type* plpgsql_build_datatype(Oid typeOid, int32 typmod, Oid collation) typ->collectionType = PLPGSQL_COLLECTION_TABLE; if (((Form_pg_type)GETSTRUCT(type_tup))->typcategory == TYPCATEGORY_TABLEOF_VARCHAR) { typ->tableOfIndexType = VARCHAROID; + } else if (((Form_pg_type)GETSTRUCT(type_tup))->typcategory == TYPCATEGORY_TABLEOF_INTEGER) { + typ->tableOfIndexType = INT4OID; } ReleaseSysCache(base_type_tup); } else { @@ -3922,22 +4029,30 @@ int plpgsql_adddatum(PLpgSQL_datum* newm, bool isChange) curr_compile->datums_pkg_alloc *= 2; curr_compile->plpgsql_Datums = (PLpgSQL_datum**)repalloc( curr_compile->plpgsql_Datums, sizeof(PLpgSQL_datum*) * curr_compile->datums_pkg_alloc); + curr_compile->datum_need_free = (bool*)repalloc( + curr_compile->datum_need_free, sizeof(bool) * curr_compile->datums_pkg_alloc); } if (isChange) { newm->dno = curr_compile->plpgsql_pkg_nDatums; } - curr_compile->plpgsql_Datums[curr_compile->plpgsql_pkg_nDatums++] = newm; + curr_compile->plpgsql_Datums[curr_compile->plpgsql_pkg_nDatums] = newm; + curr_compile->datum_need_free[curr_compile->plpgsql_pkg_nDatums] = isChange; + curr_compile->plpgsql_pkg_nDatums++; return curr_compile->plpgsql_pkg_nDatums - 1; } else { if (curr_compile->plpgsql_nDatums == curr_compile->datums_alloc) { curr_compile->datums_alloc *= 2; curr_compile->plpgsql_Datums = (PLpgSQL_datum**)repalloc( curr_compile->plpgsql_Datums, sizeof(PLpgSQL_datum*) * curr_compile->datums_alloc); + curr_compile->datum_need_free = (bool*)repalloc( + curr_compile->datum_need_free, sizeof(bool) * curr_compile->datums_alloc); } if (isChange) { newm->dno = curr_compile->plpgsql_nDatums; } - curr_compile->plpgsql_Datums[curr_compile->plpgsql_nDatums++] = newm; + curr_compile->plpgsql_Datums[curr_compile->plpgsql_nDatums] = newm; + curr_compile->datum_need_free[curr_compile->plpgsql_nDatums] = isChange; + curr_compile->plpgsql_nDatums++; return curr_compile->plpgsql_nDatums - 1; } } @@ -4240,7 +4355,7 @@ static void plpgsql_resolve_polymorphic_argtypes( * pointers to the same function cache. Hence be careful not to do things * twice. */ -void delete_function(PLpgSQL_function* func) +void delete_function(PLpgSQL_function* func, bool fromPackage) { u_sess->plsql_cxt.is_delete_function = true; @@ -4250,7 +4365,7 @@ void delete_function(PLpgSQL_function* func) plpgsql_HashTableDelete(func); /* release the function's storage if safe and not done already */ if (func->use_count == 0) { - plpgsql_free_function_memory(func); + plpgsql_free_function_memory(func, fromPackage); } u_sess->plsql_cxt.is_delete_function = false; @@ -4509,7 +4624,7 @@ bool plpgsql_check_insert_colocate( /* Try to find field word2 and word3 of a row variable, if find, return ture */ static bool plpgsql_lookup_tripword_datum(int itemno, const char* word2, const char* word3, - PLwdatum* wdatum, int* tok_flag) + PLwdatum* wdatum, int* tok_flag, bool isPkgVar) { PLpgSQL_compile_context* curr_compile = u_sess->plsql_cxt.curr_compile_context; @@ -4523,7 +4638,7 @@ static bool plpgsql_lookup_tripword_datum(int itemno, const char* word2, const c int i; bool isPkg = false; row = (PLpgSQL_row*)(curr_compile->plpgsql_Datums[itemno]); - isPkg = row->ispkg; + isPkg = row->ispkg && isPkgVar; /* find field word2 first */ for (i = 0; i < row->nfields; i++) { @@ -4602,9 +4717,9 @@ static void get_datum_tok_type(PLpgSQL_datum* target, int* tok_flag) } } -TupleDesc getCursorTupleDesc(PLpgSQL_expr* expr, bool isOnlySelect) +TupleDesc getCursorTupleDesc(PLpgSQL_expr* expr, bool isOnlySelect, bool isOnlyParse) { - if (expr == NULL) { + if (expr == NULL || !ALLOW_PROCEDURE_COMPILE_CHECK) { return NULL; } if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile == NULL) { @@ -4616,6 +4731,9 @@ TupleDesc getCursorTupleDesc(PLpgSQL_expr* expr, bool isOnlySelect) expr->func->fn_cxt = CurrentMemoryContext; } else { expr->func = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile; + if (expr->func->fn_is_trigger) { + return NULL; + } } MemoryContext current_context = CurrentMemoryContext; bool temp_pre_parse_trig = expr->func->pre_parse_trig ; @@ -4657,7 +4775,9 @@ TupleDesc getCursorTupleDesc(PLpgSQL_expr* expr, bool isOnlySelect) } Query* query = (Query*)linitial(queryList); Assert(IsA(query, Query)); - tupleDesc = ExecCleanTypeFromTL(query->targetList, false); + if (!isOnlyParse) { + tupleDesc = ExecCleanTypeFromTL(query->targetList, false); + } expr->func->pre_parse_trig = temp_pre_parse_trig; expr->func->datums = NULL; expr->func->ndatums = 0; diff --git a/src/common/pl/plpgsql/src/pl_debugger.cpp b/src/common/pl/plpgsql/src/pl_debugger.cpp index 56e4a0918..22f342625 100644 --- a/src/common/pl/plpgsql/src/pl_debugger.cpp +++ b/src/common/pl/plpgsql/src/pl_debugger.cpp @@ -184,7 +184,9 @@ void check_debug(PLpgSQL_function* func, PLpgSQL_execstate* estate) func->debug->cur_opt = DEBUG_CONTINUE_HEADER_AFTER; } } else { - entry->func = func; + if (entry != NULL) { + entry->func = func; + } /* maintain session's debug server is on base turn on function */ u_sess->plsql_cxt.cur_debug_server = func->debug; } @@ -259,7 +261,8 @@ bool handle_debug_msg(DebugInfo* debug, char* firstChar, PLpgSQL_execstate* esta switch (*firstChar) { case DEBUG_ATTACH_HEADER: debug_server_attach(debug, estate); - need_wait = true; + /* if already get to function's end when attach, no need to wait for next command. */ + need_wait = debug->stop_next_stmt ? true : false; break; case DEBUG_LOCALS_HEADER: debug_server_local_variables(debug); @@ -477,7 +480,7 @@ PLDebug_variable* get_debug_variable_var(PLpgSQL_var* node, const char* target) } else { var->value = OidOutputFunctionCall(form->typoutput, node->value); } - if (node->pkg != NULL) { + if (node->ispkg && node->pkg != NULL) { NameData* pkgName = GetPackageName(node->pkg->pkg_oid); var->pkgname = AssignStr(NameStr(*pkgName)); } else { @@ -539,7 +542,7 @@ PLDebug_variable* get_debug_variable_row(PLpgSQL_row* node, PLpgSQL_execstate* e heap_freetuple_ext(tuple); pfree(buf); } - if (node->pkg != NULL) { + if (node->ispkg && node->pkg != NULL) { NameData* pkgName = GetPackageName(node->pkg->pkg_oid); var->pkgname = AssignStr(NameStr(*pkgName)); } else { @@ -573,7 +576,7 @@ PLDebug_variable* get_debug_variable_rec(PLpgSQL_rec* node, const char* target) var->value = pstrdup(buf->data); pfree(buf); } - if (node->pkg != NULL) { + if (node->ispkg && node->pkg != NULL) { NameData* pkgName = GetPackageName(node->pkg->pkg_oid); var->pkgname = AssignStr(NameStr(*pkgName)); } else { @@ -891,7 +894,12 @@ static bool get_cur_info(StringInfo str, PLpgSQL_execstate* estate, DebugInfo* d /* turn to show code's lineno */ if (query) { + char* maskquery = maskPassword(query); + query = (maskquery == NULL) ? query : maskquery; appendStringInfo(str, "%u:%s:%d:%s", funcoid, pkgfuncname, lineno, query); + if (maskquery != query) { + pfree_ext(maskquery); + } } else { if (debug->debugStackIdx == 0) { set_debugger_procedure_state(debug->comm->comm_idx, false); @@ -1008,6 +1016,11 @@ static void debug_server_add_breakpoint(DebugInfo* debug) } int lineno = (int)pg_strtouint64(new_fir, NULL, int64Size); char* query = pstrdup(psave); + char* maskquery = (query == NULL) ? NULL : maskPassword(query); + if (maskquery != NULL && maskquery != query) { + pfree_ext(query); + query = maskquery; + } int newIndex = -1; @@ -1146,7 +1159,10 @@ PLDebug_frame* get_frame(DebugInfo* debug) frame->frameno = debug->debugStackIdx; frame->funcname = quote_qualified_identifier(pkgname, funcname); frame->lineno = debug->cur_stmt->lineno; - frame->query = get_stmt_query(debug->cur_stmt); + char* query = get_stmt_query(debug->cur_stmt); + char* maskquery = (query == NULL) ? NULL : maskPassword(query); + query = (maskquery == NULL) ? query : maskquery; + frame->query = query; frame->funcoid = debug->func->fn_oid; pfree(funcname); return frame; @@ -1181,6 +1197,7 @@ static PLpgSQL_expr* ConstructAssignExpr(char* value, PLpgSQL_nsitem* ns_top) expr->ns = ns_top; expr->dno = -1; expr->idx = -1; + expr->out_param_dno = -1; pfree(str.data); return expr; } diff --git a/src/common/pl/plpgsql/src/pl_debugger_api.cpp b/src/common/pl/plpgsql/src/pl_debugger_api.cpp index 8e17ddd83..be78dbc51 100644 --- a/src/common/pl/plpgsql/src/pl_debugger_api.cpp +++ b/src/common/pl/plpgsql/src/pl_debugger_api.cpp @@ -330,6 +330,7 @@ static Datum get_info_local_data(const char* var_name, const int frameno, Functi */ Datum debug_client_info_code(PG_FUNCTION_ARGS) { + InterfaceCheck("info_code", false); Oid funcid = PG_GETARG_OID(0); const int DEBUG_LOCAL_VAR_TUPLE_ATTR_NUM = 3; @@ -385,7 +386,15 @@ Datum debug_client_info_code(PG_FUNCTION_ARGS) } else { nulls[i++] = true; } - values[i++] = CStringGetTextDatum(entry->code); + if (entry->code != NULL) { + char* maskcode = maskPassword(entry->code); + char* code = (maskcode == NULL) ? entry->code : maskcode; + values[i++] = CStringGetTextDatum(code); + if (code != maskcode) + pfree_ext(maskcode); + } else { + nulls[i++] = true; + } values[i++] = BoolGetDatum(entry->canBreak); tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); diff --git a/src/common/pl/plpgsql/src/pl_exec.cpp b/src/common/pl/plpgsql/src/pl_exec.cpp index b2e55305f..41ab2a0d4 100644 --- a/src/common/pl/plpgsql/src/pl_exec.cpp +++ b/src/common/pl/plpgsql/src/pl_exec.cpp @@ -19,8 +19,10 @@ #include +#include "access/tuptoaster.h" #include "access/transam.h" #include "access/tupconvert.h" +#include "access/xact.h" #include "auditfuncs.h" #include "catalog/pg_proc.h" #include "catalog/gs_package.h" @@ -31,7 +33,10 @@ #include "gssignal/gs_signal.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" #include "parser/parse_coerce.h" +#include "parser/parse_expr.h" #include "parser/scansup.h" #include "pgaudit.h" #include "pgstat.h" @@ -143,8 +148,9 @@ void exec_eval_cleanup(PLpgSQL_execstate* estate); static void exec_prepare_plan(PLpgSQL_execstate* estate, PLpgSQL_expr* expr, int cursorOptions); static bool exec_simple_check_node(Node* node); -static void exec_simple_check_plan(PLpgSQL_expr* expr); +static void exec_simple_check_plan(PLpgSQL_execstate *estate, PLpgSQL_expr* expr); static void exec_simple_recheck_plan(PLpgSQL_expr* expr, CachedPlan* cplan); +static void exec_save_simple_expr(PLpgSQL_expr *expr, CachedPlan *cplan); static bool exec_eval_simple_expr( PLpgSQL_execstate* estate, PLpgSQL_expr* expr, Datum* result, bool* isNull, Oid* rettype, HTAB** tableOfIndex); @@ -161,8 +167,8 @@ static int exec_run_select(PLpgSQL_execstate* estate, PLpgSQL_expr* expr, long m static int exec_for_query(PLpgSQL_execstate* estate, PLpgSQL_stmt_forq* stmt, Portal portal, bool prefetch_ok, int dno); static ParamListInfo setup_param_list(PLpgSQL_execstate* estate, PLpgSQL_expr* expr); static void plpgsql_param_fetch(ParamListInfo params, int paramid); -static void exec_move_row( - PLpgSQL_execstate* estate, PLpgSQL_rec* rec, PLpgSQL_row* row, HeapTuple tup, TupleDesc tupdesc); +static void exec_move_row(PLpgSQL_execstate* estate, + PLpgSQL_rec* rec, PLpgSQL_row* row, HeapTuple tup, TupleDesc tupdesc, bool fromExecSql = false); static void exec_read_bulk_collect(PLpgSQL_execstate *estate, PLpgSQL_row *row, SPITupleTable *tuptab); static char* convert_value_to_string(PLpgSQL_execstate* estate, Datum value, Oid valtype); static Datum pl_coerce_type_typmod(Datum value, Oid targetTypeId, int32 targetTypMod); @@ -181,17 +187,20 @@ static void exec_set_sql_cursor_found(PLpgSQL_execstate* estate, PLpgSQL_state s static void exec_set_sql_notfound(PLpgSQL_execstate* estate, PLpgSQL_state state); static void exec_set_sql_isopen(PLpgSQL_execstate* estate, bool state); static void exec_set_sql_rowcount(PLpgSQL_execstate* estate, int rowcount); -static void plpgsql_create_econtext(PLpgSQL_execstate *estate); +static void plpgsql_create_econtext(PLpgSQL_execstate *estate, MemoryContext saveCxt = NULL); static void plpgsql_destroy_econtext(PLpgSQL_execstate *estate); static void free_var(PLpgSQL_var* var); -static void assign_text_var(PLpgSQL_var* var, const char* str); static PreparedParamsData* exec_eval_using_params(PLpgSQL_execstate* estate, List* params); static void free_params_data(PreparedParamsData* ppd); static Portal exec_dynquery_with_params( PLpgSQL_execstate* estate, PLpgSQL_expr* dynquery, List* params, const char* portalname, int cursorOptions); static void exec_set_sqlcode(PLpgSQL_execstate* estate, int sqlcode); static void exec_set_prev_sqlcode(PLpgSQL_execstate* estate, PLpgSQL_execstate* estate_prev); - +#ifndef ENABLE_MULTIPLE_NODES +static void exec_set_cursor_att_var(PLpgSQL_execstate* estate, PLpgSQL_execstate* estate_prev); +static bool IsAutoOutParam(PLpgSQL_execstate* estate, PLpgSQL_stmt_open* stmt, int dno = -1); +static void CheckAssignTarget(PLpgSQL_execstate* estate, int dno); +#endif static int search_for_valid_line(PLpgSQL_stmt* stmt, int linenum, int); static int check_line_validity(List* stmts, int linenum, int); static int check_line_validity_in_block(PLpgSQL_stmt_block* block, int linenum, int); @@ -207,7 +216,7 @@ static void BindCursorWithPortal(Portal portal, PLpgSQL_execstate *estate, int v static char* transformAnonymousBlock(char* query); static bool needRecompilePlan(SPIPlanPtr plan); -static void rebuild_exception_subtransaction_chain(PLpgSQL_execstate* estate); +static void rebuild_exception_subtransaction_chain(PLpgSQL_execstate* estate, List* transactionList); static void stp_check_transaction_and_set_resource_owner(ResourceOwner oldResourceOwner,TransactionId oldTransactionId); static void stp_check_transaction_and_create_econtext(PLpgSQL_execstate* estate,TransactionId oldTransactionId); @@ -221,7 +230,7 @@ static void exec_assign_list(PLpgSQL_execstate* estate, PLpgSQL_datum* assigntar static PLpgSQL_datum* get_indirect_target(PLpgSQL_execstate* estate, PLpgSQL_datum* assigntarget, const char* attrname); static void evalSubscriptList(PLpgSQL_execstate* estate, const List* subscripts, - int* subscriptvals, int nsubscripts, PLpgSQL_datum** target); + int* subscriptvals, int nsubscripts, PLpgSQL_datum** target, HTAB** elemTableOfIndex); static PLpgSQL_temp_assignvar* build_temp_assignvar_from_datum(PLpgSQL_datum* target, int* subscriptvals, int nsubscripts); static PLpgSQL_temp_assignvar* extractArrayElem(PLpgSQL_execstate* estate, PLpgSQL_temp_assignvar* target, @@ -241,11 +250,14 @@ void plpgsql_restore_current_value_stp_with_exception(bool saved_current_stp_wit static int addNewNestedTable(PLpgSQL_execstate* estate, TableOfIndexKey key, PLpgSQL_var* base_table); static PLpgSQL_var* evalSubsciptsNested(PLpgSQL_execstate* estate, PLpgSQL_var* tablevar, PLpgSQL_expr** subscripts, int nsubscripts, int pos, int* subscriptvals, Oid subscriptType, HTAB* tableOfIndex); +static void assignNestTableOfValue(PLpgSQL_execstate* estate, PLpgSQL_var* var, Datum oldvalue, HTAB* tableOfIndex); static PLpgSQL_row* copyPLpgsqlRow(PLpgSQL_row* src); static PLpgSQL_type* copyPLpgsqlType(PLpgSQL_type* src); static PLpgSQL_rec* copyPLpgsqlRec(PLpgSQL_rec* src); static PLpgSQL_recfield* copyPLpgsqlRecfield(PLpgSQL_recfield* src); +static List* invalid_depend_func_and_packgae(Oid pkgOid); +static void ReportCompileConcurrentError(const char* objName, bool isPackage); /* ---------- * plpgsql_check_line_validity Called by the debugger plugin for @@ -473,8 +485,11 @@ static char* AssembleAutomnousStatement(PLpgSQL_function* func, FunctionCallInfo Form_pg_proc procform = (Form_pg_proc)GETSTRUCT(procTup); char* proname = NameStr(procform->proname); int nargs = procform->pronargs; - - appendStringInfoString(&buf, "SELECT "); + if (is_function_with_plpgsql_language_and_outparam(func->fn_oid)) { + appendStringInfoString(&buf, "SELECT * from "); + } else { + appendStringInfoString(&buf, "SELECT "); + } /* * Would this proc be found (given the right args) by regprocedurein? @@ -522,7 +537,11 @@ static char* AssembleAutomnousStatement(PLpgSQL_function* func, FunctionCallInfo errno_t rc = memcpy_s(argTypes, allNumArgs * sizeof(Oid), ARR_DATA_PTR(arr), allNumArgs * sizeof(Oid)); securec_check(rc, "\0", "\0"); for (int i = 0; i < allNumArgs; i++) { +#ifndef ENABLE_MULTIPLE_NODES + if (argTypes[i] == REFCURSOROID && (argmodes[i] == PROARGMODE_IN || argmodes[i] == PROARGMODE_INOUT)) { +#else if (argTypes[i] == REFCURSOROID) { +#endif pfree_ext(argTypes); ReleaseSysCache(procTup); ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_CACHE_LOOKUP_FAILED), @@ -538,7 +557,8 @@ static char* AssembleAutomnousStatement(PLpgSQL_function* func, FunctionCallInfo bool prokindIsNULL = false; Datum prokindDatum = SysCacheGetAttr(PROCOID, procTup, Anum_pg_proc_prokind, &prokindIsNULL); char prokind = CharGetDatum(prokindDatum); - if (!prokindIsNULL && PROC_IS_PRO(prokind) && enable_out_param_override()) { + if ((!prokindIsNULL && PROC_IS_PRO(prokind) && enable_out_param_override()) || + (PROC_IS_FUNC(prokind) && is_function_with_plpgsql_language_and_outparam(procedureOid))) { if (i > 0) appendStringInfoChar(&buf, ','); TypeValueToString(&buf, argTypes[i], (Datum)0, true); @@ -551,7 +571,11 @@ static char* AssembleAutomnousStatement(PLpgSQL_function* func, FunctionCallInfo for (int i = 0; i < nargs; i++) { if (i > 0) appendStringInfoChar(&buf, ','); - + if (fcinfo->argTypes[i] == REFCURSOROID) { + ReleaseSysCache(procTup); + ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("Un-support:ref_cursor parameter is not supported for autonomous transactions."))); + } TypeValueToString(&buf, fcinfo->argTypes[i], fcinfo->arg[i], fcinfo->argnull[i]); } } @@ -573,6 +597,175 @@ static char* AssembleAutomnousStatement(PLpgSQL_function* func, FunctionCallInfo return buf.data; } +/* + * Subtransction's resowner can't be released right now since its + * invoker may refer it. We keep them and their scope in one list, + * and then try to release them at later. + */ +typedef struct { + /* reserved subxact's resourceowner */ + ResourceOwner resowner; + + /* + * resourceowner's scope, no one refers it before this stackId. + * '-1' means it can be released at any time. + */ + int64 stackId; + + /* next item in the list */ + void *next; +} XactContextItem; + +/* + * reserve current subxact's Resowner into session list. + * + * NOTE: resowner's scope(u_sess->plsql_cxt.minSubxactStackId) should be ready in advance. + */ +void stp_reserve_subxact_resowner(ResourceOwner resowner) +{ + MemoryContext oldcxt = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE)); + XactContextItem *item = (XactContextItem*)palloc(sizeof(XactContextItem)); + item->resowner = resowner; + item->stackId = u_sess->plsql_cxt.minSubxactStackId; + + ResourceOwnerNewParent(item->resowner, NULL); + ResourceOwnerMarkInvalid(item->resowner); + item->next = u_sess->plsql_cxt.spi_xact_context; + MemoryContextSwitchTo(oldcxt); + + u_sess->plsql_cxt.spi_xact_context = item; +} + +/* + * release reserved subxact resowner after 'stackId'. + */ +void stp_cleanup_subxact_resowner(int64 stackId) +{ + XactContextItem *item = (XactContextItem*)u_sess->plsql_cxt.spi_xact_context; + XactContextItem *preItem = NULL; + + while (item != NULL) { + /* + * (1) those at the upper stack can be clean at this level. + * (2) -1 means it can be clean at any time. + */ + if (item->stackId > stackId || item->stackId == -1) { + ResourceOwnerDelete(item->resowner); + + /* remove it and process the next one. */ + if (preItem != NULL) { + preItem->next = item->next; + pfree(item); + item = (XactContextItem*)preItem->next; + } else { + u_sess->plsql_cxt.spi_xact_context = item->next; + pfree(item); + item = (XactContextItem*)u_sess->plsql_cxt.spi_xact_context; + } + } else { + preItem = item; + item = (XactContextItem*)item->next; + } + } +} + +/* reset store procedure context for each transaction. */ +void stp_reset_xact() +{ + /* cleanup all reserved subxact resourceowner. */ + stp_cleanup_subxact_resowner(-1); + + /* reset stack counter for xact. */ + u_sess->plsql_cxt.nextStackEntryId = 0; +} + +/* reset store procedure context for each statement. */ +void stp_reset_stmt() +{ + stp_reset_opt_values(); + + /* any reserved subxact resowner in previous statement can be released now. */ + stp_cleanup_subxact_resowner(-1); +} + +bool recheckTableofType(TupleDesc tupdesc, TupleDesc retdesc) +{ + int n = tupdesc->natts; + bool has_change = false; + + for (int i = 0; i < n; i++) { + Form_pg_attribute att = tupdesc->attrs[i]; + if (att->attisdropped) + continue; + Oid baseOid = InvalidOid; + if (isTableofType(att->atttypid, &baseOid, NULL)) { + Oid typOid = baseOid; + char colname[NAMEDATALEN] = {0}; + errno_t rc = memcpy_s(colname, NAMEDATALEN, tupdesc->attrs[i]->attname.data, NAMEDATALEN); + securec_check(rc, "\0", "\0"); + TupleDescInitEntry(tupdesc, i + 1, colname, typOid, retdesc->attrs[i]->atttypmod, 0); + has_change = true; + } + } + return has_change; +} + +static void free_func_tableof_index() +{ + if (u_sess->plsql_cxt.func_tableof_index == NULL) { + return; + } + ListCell* l = NULL; + foreach (l, u_sess->plsql_cxt.func_tableof_index) { + PLpgSQL_func_tableof_index* func_tableof = (PLpgSQL_func_tableof_index*)lfirst(l); + hash_destroy(func_tableof->tableOfIndex); + } + + list_free_deep(u_sess->plsql_cxt.func_tableof_index); + u_sess->plsql_cxt.func_tableof_index = NIL; +} + +static void init_implicit_cursor_attr(PLpgSQL_execstate *estate) +{ + /* + * Set the magic variable FOUND to false + */ + exec_set_found(estate, false); + /* Set the magic implicit cursor attribute variable FOUND to false */ + exec_set_sql_cursor_found(estate, PLPGSQL_NULL); + + /* Set the magic implicit cursor attribute variable NOTFOUND to true */ + exec_set_sql_notfound(estate, PLPGSQL_NULL); + + /* Set the magic implicit cursor attribute variable ISOPEN to false */ + exec_set_sql_isopen(estate, false); + + /* Set the magic implicit cursor attribute variable ROWCOUNT to 0 */ + exec_set_sql_rowcount(estate, -1); + +} + +#ifndef ENABLE_MULTIPLE_NODES +static void reset_implicit_cursor_attr(PLpgSQL_execstate *estate) +{ + /* + * Set the magic variable FOUND to false + */ + exec_set_found(estate, false); + /* Set the magic implicit cursor attribute variable FOUND to false */ + exec_set_sql_cursor_found(estate, PLPGSQL_FALSE); + + /* Set the magic implicit cursor attribute variable NOTFOUND to true */ + exec_set_sql_notfound(estate, PLPGSQL_TRUE); + + /* Set the magic implicit cursor attribute variable ISOPEN to false */ + exec_set_sql_isopen(estate, false); + + /* Set the magic implicit cursor attribute variable ROWCOUNT to 0 */ + exec_set_sql_rowcount(estate, 0); +} +#endif + /* ---------- * plpgsql_exec_autonm_function Called by the call handler for * autonomous function execution. @@ -582,7 +775,34 @@ Datum plpgsql_exec_autonm_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, char* sourceText) { TupleDesc tupDesc = NULL; - + PLpgSQL_execstate estate; + FormatCallStack plcallstack; + /* + * Setup the execution state + */ + plpgsql_estate_setup(&estate, func, (ReturnSetInfo*)fcinfo->resultinfo); + func->debug = NULL; + + /* setup call stack for format_call_stack */ + plcallstack.elem = &estate; + plcallstack.prev = t_thrd.log_cxt.call_stack; + t_thrd.log_cxt.call_stack = &plcallstack; + /* + * Make local execution copies of all the datums + */ + estate.err_text = gettext_noop("during initialization of execution state"); + /* + * if the datum type is unknown, it means it's a package variable,so we need + * compile the package , and replace the unknown type. + */ + for (int i = 0; i < estate.ndatums; i++) { + if (!func->datums[i]->ispkg) { + estate.datums[i] = copy_plpgsql_datum(func->datums[i]); + } else { + estate.datums[i] = func->datums[i]; + } + } + #ifdef ENABLE_MULTIPLE_NODES if (IS_PGXC_DATANODE) { ereport(ERROR, @@ -592,34 +812,52 @@ Datum plpgsql_exec_autonm_function(PLpgSQL_function* func, #endif #ifndef ENABLE_MULTIPLE_NODES - uint64 sessionId = IS_THREAD_POOL_WORKER ? u_sess->session_id : t_thrd.proc_cxt.MyProcPid; - /* add session package values to global for autonm session, to restore package values */ - if (OidIsValid(func->fn_oid)) { - BuildSessionPackageRuntime(sessionId, u_sess->autonomous_parent_sessionid); + if (plcallstack.prev != NULL && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && COMPAT_CURSOR) { + PLpgSQL_execstate* estate_tmp = (PLpgSQL_execstate*)(plcallstack.prev->elem); + exec_set_cursor_att_var(&estate, estate_tmp); + } else { +#endif + init_implicit_cursor_attr(&estate); +#ifndef ENABLE_MULTIPLE_NODES } #endif - /* - * libpq link establishment - * If no, create it. If yes, check its link status. + + if (estate.sqlcode_varno) { + if (plcallstack.prev != NULL) { + PLpgSQL_execstate* estate_tmp = (PLpgSQL_execstate*)(plcallstack.prev->elem); + exec_set_prev_sqlcode(&estate, estate_tmp); + } + } + + /* + * libpq link establishment + * If no, create it. If yes, check its link status. */ CreateAutonomousSession(); - if (func->fn_rettype == REFCURSOROID) { - ereport(ERROR, - (errmodule(MOD_PLSQL), - errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ref_cursor parameter is not supported for autonomous transactions."), - errdetail("N/A"), - errcause("PL/SQL uses unsupported feature."), - erraction("Modify SQL statement according to the manual."))); +#ifndef ENABLE_MULTIPLE_NODES + uint64 sessionId = IS_THREAD_POOL_WORKER ? u_sess->session_id : t_thrd.proc_cxt.MyProcPid; + /* add session package values to global for autonm session, to restore package values */ + if (OidIsValid(func->fn_oid)) { + BuildSessionPackageRuntimeForAutoSession(sessionId, u_sess->autonomous_parent_sessionid, &estate, func); } +#endif /* Statement concatenation. If the block is an anonymous block, the entire anonymous block is returned. */ char* sql = AssembleAutomnousStatement(func, fcinfo, sourceText); /* If the return value of the function is of the record type, add the function to the temporary cache. */ - bool returnTypeNotMatch = sourceText == NULL && func->fn_retistuple - && get_func_result_type(fcinfo->flinfo->fn_oid, NULL, &tupDesc) != TYPEFUNC_COMPOSITE; + bool returnTypeNotMatch = false; + if (is_function_with_plpgsql_language_and_outparam(func->fn_oid)) { + Oid fn_rettype = func->fn_rettype; + TypeFuncClass typclass; + construct_func_param_desc(fcinfo->flinfo->fn_oid, &typclass, &tupDesc, &fn_rettype); + returnTypeNotMatch = sourceText == NULL && func->fn_retistuple; + } else { + returnTypeNotMatch = sourceText == NULL && func->fn_retistuple + && get_func_result_type(fcinfo->flinfo->fn_oid, NULL, &tupDesc) != TYPEFUNC_COMPOSITE; + } + if (returnTypeNotMatch) { ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), @@ -650,13 +888,29 @@ Datum plpgsql_exec_autonm_function(PLpgSQL_function* func, pfree(buf.data); } } + StringInfoData buf; + initStringInfo(&buf); +#ifndef ENABLE_MULTIPLE_NODES + if (is_function_with_plpgsql_language_and_outparam(func->fn_oid)) { + appendStringInfo(&buf, "set behavior_compat_options='proc_outparam_override';"); + } +#endif + if (COMPAT_CURSOR) { + appendStringInfo(&buf, "set behavior_compat_options='COMPAT_CURSOR';"); + } + appendStringInfo(&buf, "start transaction;"); + (void)u_sess->SPI_cxt.autonomous_session->ExecSimpleQuery(buf.data, NULL, 0); + if (buf.data != NULL) { + pfree(buf.data); + } - (void)u_sess->SPI_cxt.autonomous_session->ExecSimpleQuery("start transaction;", NULL, 0); - - int64 automnXid = u_sess->SPI_cxt.autonomous_session->ExecSimpleQuery("select txid_current();", NULL, 0); + ATResult res = u_sess->SPI_cxt.autonomous_session->ExecSimpleQuery("select txid_current();", NULL, 0); + int64 automnXid = res.ResTup; /* Call the libpq interface to send function names and parameters. */ - Datum res = u_sess->SPI_cxt.autonomous_session->ExecSimpleQuery(sql, tupDesc, automnXid, true); + res = u_sess->SPI_cxt.autonomous_session->ExecSimpleQuery( + sql, tupDesc, automnXid, true, is_function_with_plpgsql_language_and_outparam(func->fn_oid)); + fcinfo->isnull = res.resisnull; /* Process the information whose return value is of the record type. */ if (sourceText == NULL && func->fn_retistuple) { @@ -665,7 +919,7 @@ Datum plpgsql_exec_autonm_function(PLpgSQL_function* func, * expected result type. XXX would be better to cache the tupdesc * instead of repeating get_call_result_type() */ - HeapTuple retTup = (HeapTuple)DatumGetPointer(res); + HeapTuple retTup = (HeapTuple)DatumGetPointer(res.ResTup); TupleDesc outTupdesc; TupleConversionMap* tupMap = NULL; switch (get_call_result_type(fcinfo, NULL, &outTupdesc)) { @@ -691,7 +945,7 @@ Datum plpgsql_exec_autonm_function(PLpgSQL_function* func, * Copy tuple to upper executor memory, as a tuple Datum. Make * sure it is labeled with the caller-supplied tuple type. */ - res = PointerGetDatum(SPI_returntuple(retTup, outTupdesc)); + res.ResTup = PointerGetDatum(SPI_returntuple(retTup, outTupdesc)); } else { /* * If the function's return type isn't by value, copy the value @@ -702,19 +956,38 @@ Datum plpgsql_exec_autonm_function(PLpgSQL_function* func, void* tmp = NULL; errno_t rc = EOK; - len = datumGetSize(res, false, func->fn_rettyplen); + len = datumGetSize(res.ResTup, false, func->fn_rettyplen); tmp = SPI_palloc(len); - rc = memcpy_s(tmp, len, DatumGetPointer(res), len); + rc = memcpy_s(tmp, len, DatumGetPointer(res.ResTup), len); securec_check(rc, "\0", "\0"); - res = PointerGetDatum(tmp); + res.ResTup = PointerGetDatum(tmp); } } if (sql != NULL) { pfree(sql); } - return res; +#ifndef ENABLE_MULTIPLE_NODES + /* for restore parent session and automn session package var values */ + List *autonmsList = processAutonmSessionPkgs(func, NULL, true); + if (autonmsList != NULL) { + reset_implicit_cursor_attr(&estate); + } + if (plcallstack.prev != NULL && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && COMPAT_CURSOR) { + PLpgSQL_execstate* estate_tmp = (PLpgSQL_execstate*)(plcallstack.prev->elem); + exec_set_cursor_att_var(estate_tmp, &estate); + } +#endif + /* Clean up any leftover temporary memory */ + plpgsql_destroy_econtext(&estate); + exec_eval_cleanup(&estate); + /* pop the call stack */ + t_thrd.log_cxt.call_stack = plcallstack.prev; + /* + * Return the function's result + */ + return res.ResTup; } /* ---------- @@ -738,6 +1011,12 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, boo #ifndef ENABLE_MULTIPLE_NODES check_debug(func, &estate); + bool isExecAutoFunc = OidIsValid(func->fn_oid) && + u_sess->is_autonomous_session == true && u_sess->SPI_cxt._connected == 0; + /* when exec autonomous transaction procedure, need update package values by parent session */ + if (isExecAutoFunc) { + initAutoSessionPkgsValue(u_sess->autonomous_parent_sessionid); + } #endif saved_current_stp_with_exception = plpgsql_get_current_value_stp_with_exception(); @@ -777,6 +1056,21 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, boo * Store the actual call argument values into the appropriate variables */ estate.err_text = gettext_noop("while storing call arguments into local variables"); + int outArgCnt = 0; + bool isNULL = false; + char *argmodes = NULL; + HeapTuple procTup = NULL; + Datum proArgModes = 0; + bool is_plpgsql_function_with_outparam = is_function_with_plpgsql_language_and_outparam(func->fn_oid); + if (func->fn_nargs != fcinfo->nargs && is_plpgsql_function_with_outparam) { + procTup = SearchSysCache1(PROCOID, ObjectIdGetDatum(func->fn_oid)); + proArgModes = SysCacheGetAttr(PROCOID, procTup, Anum_pg_proc_proargmodes, &isNULL); + Assert(!isNULL); + if (!isNULL) { + ArrayType *arr = DatumGetArrayTypeP(proArgModes); + argmodes = (char *)ARR_DATA_PTR(arr); + } + } for (i = 0; i < func->fn_nargs; i++) { int n = func->fn_argvarnos[i]; @@ -785,10 +1079,30 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, boo case PLPGSQL_DTYPE_TABLE: case PLPGSQL_DTYPE_VAR: { PLpgSQL_var* var = (PLpgSQL_var*)estate.datums[n]; - - var->value = fcinfo->arg[i]; - var->isnull = fcinfo->argnull[i]; - var->freeval = false; + if (func->fn_nargs != fcinfo->nargs && is_plpgsql_function_with_outparam) { + while (argmodes[outArgCnt] == PROARGMODE_OUT) { + outArgCnt++; + } + var->value = fcinfo->arg[outArgCnt]; + var->isnull = fcinfo->argnull[outArgCnt]; + var->freeval = false; + outArgCnt++; + } else { + var->value = fcinfo->arg[i]; + var->isnull = fcinfo->argnull[i]; + var->freeval = false; + } + if (u_sess->plsql_cxt.func_tableof_index != NULL) { + ListCell* l = NULL; + foreach (l, u_sess->plsql_cxt.func_tableof_index) { + PLpgSQL_func_tableof_index* func_tableof = (PLpgSQL_func_tableof_index*)lfirst(l); + if (func_tableof->varno == i) { + var->tableOfIndexType = func_tableof->tableOfIndexType; + var->tableOfIndex = copyTableOfIndex(func_tableof->tableOfIndex); + } + } + } + } break; case PLPGSQL_DTYPE_ROW: { @@ -826,6 +1140,9 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, boo } break; default: + if (procTup != NULL) { + ReleaseSysCache(procTup); + } ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmodule(MOD_PLSQL), @@ -833,37 +1150,46 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, boo break; } } - + if (procTup != NULL) { + ReleaseSysCache(procTup); + } + free_func_tableof_index(); estate.err_text = gettext_noop("during function entry"); estate.cursor_return_data = fcinfo->refcursor_data.returnCursor; estate.cursor_return_numbers = fcinfo->refcursor_data.return_number; - /* - * Set the magic variable FOUND to false - */ - exec_set_found(&estate, false); - /* Set the magic implicit cursor attribute variable FOUND to false */ - exec_set_sql_cursor_found(&estate, PLPGSQL_NULL); - - /* Set the magic implicit cursor attribute variable NOTFOUND to true */ - exec_set_sql_notfound(&estate, PLPGSQL_NULL); - - /* Set the magic implicit cursor attribute variable ISOPEN to false */ - exec_set_sql_isopen(&estate, false); - - /* Set the magic implicit cursor attribute variable ROWCOUNT to 0 */ - exec_set_sql_rowcount(&estate, -1); +#ifndef ENABLE_MULTIPLE_NODES + if (plcallstack.prev != NULL && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && COMPAT_CURSOR) { + PLpgSQL_execstate* estate_tmp = (PLpgSQL_execstate*)(plcallstack.prev->elem); + exec_set_cursor_att_var(&estate, estate_tmp); + } else if (isExecAutoFunc && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + init_implicit_cursor_attr(&estate); + initAutoSessionFuncInfoValue(u_sess->autonomous_parent_sessionid, &estate); + } else { +#endif + init_implicit_cursor_attr(&estate); +#ifndef ENABLE_MULTIPLE_NODES + } +#endif if (estate.sqlcode_varno) { - if (plcallstack.prev != NULL) { + if (plcallstack.prev != NULL && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && COMPAT_CURSOR) { PLpgSQL_execstate* estate_tmp = (PLpgSQL_execstate*)(plcallstack.prev->elem); exec_set_prev_sqlcode(&estate, estate_tmp); - } else { +#ifndef ENABLE_MULTIPLE_NODES + } else if (isExecAutoFunc && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + /* + * No processing is performed because the SQL code has been updated by + * the initAutoSessionFuncInfoValue function. + */ +#endif + } else if (u_sess->attr.attr_sql.sql_compatibility != A_FORMAT) { exec_set_sqlcode(&estate, 0); } } + /* * Let the instrumentation plugin peek at this function */ @@ -906,6 +1232,8 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, boo } } + pfree_ext(u_sess->plsql_cxt.pass_func_tupdesc); + /* * We got a return value - process it */ @@ -939,7 +1267,7 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, boo } estate.retval = (Datum)0; fcinfo->isnull = true; - } else if (!estate.retisnull) { + } else if (!estate.retisnull || !estate.paramisnull) { if (estate.retistuple) { /* * We have to check that the returned tuple actually matches the @@ -949,11 +1277,12 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, boo HeapTuple rettup = (HeapTuple)DatumGetPointer(estate.retval); TupleDesc tupdesc; TupleConversionMap* tupmap = NULL; + TupleDesc retdesc = (estate.paramtupdesc) ? estate.paramtupdesc : estate.rettupdesc; switch (get_call_result_type(fcinfo, NULL, &tupdesc)) { case TYPEFUNC_COMPOSITE: /* got the expected result rowtype, now check it */ - if (estate.rettupdesc == NULL && estate.func->out_param_varno >= 0) { + if (retdesc == NULL && estate.func->out_param_varno >= 0) { PLpgSQL_datum* out_param_datum = estate.datums[estate.func->out_param_varno]; if ((out_param_datum->dtype == PLPGSQL_DTYPE_VARRAY || out_param_datum->dtype == PLPGSQL_DTYPE_TABLE || @@ -966,7 +1295,14 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, boo ((PLpgSQL_var*)out_param_datum)->datatype->typname))); } } - tupmap = convert_tuples_by_position(estate.rettupdesc, + /* if tuple has tableof type, we replace its base type, and pass to upper function's */ + if (recheckTableofType(tupdesc, retdesc)) { + MemoryContext old = MemoryContextSwitchTo(u_sess->temp_mem_cxt); + u_sess->plsql_cxt.pass_func_tupdesc = CreateTupleDescCopy(tupdesc); + MemoryContextSwitchTo(old); + } + + tupmap = convert_tuples_by_position(retdesc, tupdesc, gettext_noop("returned record type does not match expected record type"), estate.func->fn_oid); @@ -985,7 +1321,7 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, boo * so we don't really need to be restrictive. Pass back * the generated result type, instead. */ - tupdesc = estate.rettupdesc; + tupdesc = retdesc; if (tupdesc == NULL) { /* shouldn't happen */ ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), @@ -1008,8 +1344,8 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, boo estate.retval = PointerGetDatum(SPI_returntuple(rettup, tupdesc)); } else { if (estate.rettype == InvalidOid && estate.func->out_param_varno >= 0) { - PLpgSQL_datum* out_param_datum = estate.datums[estate.func->out_param_varno]; - if (out_param_datum->dtype == PLPGSQL_DTYPE_ROW && ((PLpgSQL_row*)out_param_datum)->nfields > 1) { + PLpgSQL_datum *out_param_datum = estate.datums[estate.func->out_param_varno]; + if (out_param_datum->dtype == PLPGSQL_DTYPE_ROW && ((PLpgSQL_row *)out_param_datum)->nfields > 1) { ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmodule(MOD_PLSQL), @@ -1042,6 +1378,35 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, boo estate.retval = PointerGetDatum(tmp); } } + + if (estate.paramtupdesc != NULL || OidIsValid(estate.paramtype)) { + HeapTuple tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(func->fn_oid)); + if (!HeapTupleIsValid(tp)) { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmodule(MOD_PLSQL), + errmsg("Cache lookup failed for function %u", func->fn_oid), + errdetail("Fail to get the function param type oid."))); + } + int out_args_num = 0; + TupleDesc tupdesc = get_func_param_desc(tp, func->fn_rettype, &out_args_num); + if (OidIsValid(estate.paramtype) || out_args_num == 1) { // For func has one out param + Datum values[] = {estate.retval, estate.paramval}; + bool nulls[] = {estate.retisnull, estate.paramisnull}; + HeapTuple rettup = heap_form_tuple(tupdesc, values, nulls); + estate.retval = PointerGetDatum(SPI_returntuple(rettup, tupdesc)); + } else { // For func has multiple out params + Datum *values = (Datum*)palloc(sizeof(Datum) * (out_args_num + 1)); + bool *nulls = (bool*)palloc(sizeof(bool) * (out_args_num + 1)); + heap_deform_tuple((HeapTuple)DatumGetPointer(estate.paramval), estate.paramtupdesc, (values + 1), + (nulls + 1)); + values[0] = estate.retval; + nulls[0] = estate.retisnull; + HeapTuple rettup = heap_form_tuple(tupdesc, values, nulls); + estate.retval = PointerGetDatum(SPI_returntuple(rettup, tupdesc)); + pfree(values); + pfree(nulls); + } + ReleaseSysCache(tp); + } } estate.err_text = gettext_noop("during function exit"); @@ -1073,8 +1438,19 @@ Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, boo } } +#ifndef ENABLE_MULTIPLE_NODES + if (plcallstack.prev != NULL && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && COMPAT_CURSOR) { + PLpgSQL_execstate* estate_tmp = (PLpgSQL_execstate*)(plcallstack.prev->elem); + exec_set_cursor_att_var(estate_tmp, &estate); + } +#endif estate.cursor_return_data = NULL; estate.cursor_return_numbers = 0; + +#ifndef ENABLE_MULTIPLE_NODES + /* for restore parent session and automn session package var values */ + (void)processAutonmSessionPkgs(func, &estate); +#endif /* Clean up any leftover temporary memory */ plpgsql_destroy_econtext(&estate); exec_eval_cleanup(&estate); @@ -1143,6 +1519,7 @@ HeapTuple plpgsql_exec_trigger(PLpgSQL_function* func, TriggerData* trigdata) { PLpgSQL_execstate estate; ErrorContextCallback plerrcontext; + FormatCallStack plcallstack; int i; int rc; PLpgSQL_var* var = NULL; @@ -1163,6 +1540,11 @@ HeapTuple plpgsql_exec_trigger(PLpgSQL_function* func, TriggerData* trigdata) plerrcontext.previous = t_thrd.log_cxt.error_context_stack; t_thrd.log_cxt.error_context_stack = &plerrcontext; + /* setup call stack for format_call_stack */ + plcallstack.elem = &estate; + plcallstack.prev = t_thrd.log_cxt.call_stack; + t_thrd.log_cxt.call_stack = &plcallstack; + /* * Make local execution copies of all the datums */ @@ -1335,25 +1717,37 @@ HeapTuple plpgsql_exec_trigger(PLpgSQL_function* func, TriggerData* trigdata) } estate.err_text = gettext_noop("during function entry"); +#ifndef ENABLE_MULTIPLE_NODES + if (plcallstack.prev != NULL && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + PLpgSQL_execstate* estate_tmp = (PLpgSQL_execstate*)(plcallstack.prev->elem); + exec_set_cursor_att_var(&estate, estate_tmp); + exec_set_prev_sqlcode(&estate, estate_tmp); + } else { +#endif + /* + * Set the magic variable FOUND to false + */ + exec_set_found(&estate, false); - /* - * Set the magic variable FOUND to false - */ - exec_set_found(&estate, false); + /* Set the magic implicit cursor attribute variable FOUND to false */ + exec_set_sql_cursor_found(&estate, PLPGSQL_NULL); - /* Set the magic implicit cursor attribute variable FOUND to false */ - exec_set_sql_cursor_found(&estate, PLPGSQL_NULL); + /* Set the magic implicit cursor attribute variable NOTFOUND to true */ + exec_set_sql_notfound(&estate, PLPGSQL_NULL); - /* Set the magic implicit cursor attribute variable NOTFOUND to true */ - exec_set_sql_notfound(&estate, PLPGSQL_NULL); + /* Set the magic implicit cursor attribute variable ISOPEN to false */ + exec_set_sql_isopen(&estate, false); - /* Set the magic implicit cursor attribute variable ISOPEN to false */ - exec_set_sql_isopen(&estate, false); + /* Set the magic implicit cursor attribute variable ROWCOUNT to 0 */ + exec_set_sql_rowcount(&estate, -1); - /* Set the magic implicit cursor attribute variable ROWCOUNT to 0 */ - exec_set_sql_rowcount(&estate, -1); + if (u_sess->attr.attr_sql.sql_compatibility != A_FORMAT) { + exec_set_sqlcode(&estate, 0); + } - exec_set_sqlcode(&estate, 0); +#ifndef ENABLE_MULTIPLE_NODES + } +#endif /* * Let the instrumentation plugin peek at this function @@ -1452,6 +1846,13 @@ HeapTuple plpgsql_exec_trigger(PLpgSQL_function* func, TriggerData* trigdata) ((*u_sess->plsql_cxt.plugin_ptr)->func_end)(&estate, func); } +#ifndef ENABLE_MULTIPLE_NODES + if (plcallstack.prev != NULL && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + PLpgSQL_execstate* estate_tmp = (PLpgSQL_execstate*)(plcallstack.prev->elem); + exec_set_cursor_att_var(estate_tmp, &estate); + } +#endif + // Check for interrupts sent by pl debugger and other plugins. // CHECK_FOR_INTERRUPTS(); @@ -1465,6 +1866,9 @@ HeapTuple plpgsql_exec_trigger(PLpgSQL_function* func, TriggerData* trigdata) */ t_thrd.log_cxt.error_context_stack = plerrcontext.previous; + /* pop the call stack */ + t_thrd.log_cxt.call_stack = plcallstack.prev; + /* * Return the trigger's result */ @@ -1966,22 +2370,6 @@ static ResourceOwner get_last_transaction_resourceowner() return oldowner; } -/* Context for exception block */ -typedef struct { - MemoryContext oldMemCxt; /* CurrentMemoryContext saved at exception's entry */ - ResourceOwner oldResOwner; /* CurrentResourceOwner saved at exception's entry */ - TransactionId oldTransactionId; /* top transaction id saved at exception entry */ - SubTransactionId subXid; /* exception subtransaction's id */ - - int curExceptionCounter; /* serial number for this exception block */ - bool hasReleased; /* whehter or not exception subtransaction has released. */ - - ErrorData* cur_edata; /* ErrorData captured by this exception block. */ - ErrorData* old_edata; /* saved ErrorData before this Exception block. */ - - int spi_connected; /* SPI connected level before exception. */ -} ExceptionContext; - /* * Action at exception block's entry * @@ -1999,6 +2387,7 @@ static void exec_exception_begin(PLpgSQL_execstate* estate, ExceptionContext *co context->oldMemCxt = CurrentMemoryContext; context->oldResOwner = t_thrd.utils_cxt.CurrentResourceOwner; context->oldTransactionId = SPI_get_top_transaction_id(); + context->stackId = u_sess->plsql_cxt.nextStackEntryId; /* recording stmt's Top Portal ResourceOwner before any subtransaction. */ if (u_sess->SPI_cxt.portal_stp_exception_counter == 0 && u_sess->plsql_cxt.stp_savepoint_cnt == 0) { @@ -2038,7 +2427,7 @@ static void exec_exception_end(PLpgSQL_execstate* estate, ExceptionContext *cont */ SPI_savepoint_release(NULL); - XactResumeSPIContext(true); + stp_cleanup_subxact_resowner(context->stackId); } /* @@ -2094,7 +2483,15 @@ static void exec_exception_cleanup(PLpgSQL_execstate* estate, ExceptionContext * edata->message ? edata->message : " "))); } - bool hasAbort = false; + /* + * process resource's leak between subXid and latest subtransaction, such as ActiveSnapshot, catrefs + * and etc. If the top transaction has changed, clean all the subtransaction. + * + * NOTE: More consideration is required to clean up subtransaction on DN to support multinode. + */ + XactCleanExceptionSubTransaction( + context->oldTransactionId != SPI_get_top_transaction_id() ? 0 : context->subXid); + char *txnName = GetCurrentTransactionName(); /* Abort the inner transaction */ @@ -2109,20 +2506,18 @@ static void exec_exception_cleanup(PLpgSQL_execstate* estate, ExceptionContext * SPI_savepoint_release(NULL); } else { SPI_savepoint_rollbackAndRelease(NULL, InvalidTransactionId); - hasAbort = true; } /* * None should has references to this ResouceOwner of exception's subtransaction. Since ResourceOwner * is reserved during above destorying, Deal it specially to release its memroy as soon as poosible. */ - XactResumeSPIContext(true); + stp_cleanup_subxact_resowner(context->stackId); } else if (!PLSTMT_IMPLICIT_SAVEPOINT) { /* * rollback to the lastest savepoint which would be user's savepoint or exception's. */ exec_savepoint_rollback(estate, txnName); - hasAbort = true; } /* Since above AbortSubTransaction may has destory connections, we can't go ahead. */ @@ -2132,14 +2527,6 @@ static void exec_exception_cleanup(PLpgSQL_execstate* estate, ExceptionContext * "failed. error message is: %s", edata->message ? edata->message : " "))); } - /* - * process resource's leak between subXid and latest subtransaction between subXid and the latest - * subtransaction, such as ActiveSnapshot, catrefs and etc. while the top transaction has changed, - * clean all the subtransaction. - */ - XactCleanExceptionSubTransaction( - context->oldTransactionId != SPI_get_top_transaction_id() ? 0 : context->subXid, hasAbort); - /* destory SPI connects created in this exception block. */ SPI_disconnect(context->spi_connected + 1); @@ -2155,6 +2542,8 @@ static void exec_exception_cleanup(PLpgSQL_execstate* estate, ExceptionContext * */ SPI_restore_connection(); + t_thrd.xact_cxt.isSelectInto = false; + /* Get last transaction's ResourceOwner. */ stp_check_transaction_and_set_resource_owner(context->oldResOwner, context->oldTransactionId); @@ -2175,6 +2564,7 @@ static int exec_exception_handler(PLpgSQL_execstate* estate, PLpgSQL_stmt_block* ErrorData* edata = context->cur_edata; int rc = -1; + estate->is_exception = true; /* no error can be ignored once connection was destoryed */ if (t_thrd.xact_cxt.handlesDestroyedInCancelQuery) { estate->cur_error = context->old_edata; @@ -2209,7 +2599,11 @@ static int exec_exception_handler(PLpgSQL_execstate* estate, PLpgSQL_stmt_block* exec_set_sqlcode(estate, edata->sqlerrcode); + ExceptionContext* saved_cxt = u_sess->plsql_cxt.cur_exception_cxt; + u_sess->plsql_cxt.cur_exception_cxt = context; rc = exec_stmts(estate, exception->action); + u_sess->plsql_cxt.cur_exception_cxt = saved_cxt; + saved_cxt = NULL; free_var(state_var); state_var->value = (Datum)0; @@ -2371,7 +2765,8 @@ static int exec_stmt_block(PLpgSQL_execstate* estate, PLpgSQL_stmt_block* block) } #ifndef ENABLE_MULTIPLE_NODES - if (initPackageVar && u_sess->is_autonomous_session) { + bool needInitAutoPkg = initPackageVar && u_sess->is_autonomous_session; + if (needInitAutoPkg) { /* autonomous session, init package values from parent session */ initAutonomousPkgValue(u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package, u_sess->autonomous_parent_sessionid); @@ -2437,6 +2832,9 @@ static int exec_stmt_block(PLpgSQL_execstate* estate, PLpgSQL_stmt_block* block) estate->cursor_return_data = saved_cursor_data; estate->cursor_return_numbers = saved_cursor_numbers; + /* reset stream for-loop flag */ + u_sess->SPI_cxt.has_stream_in_cursor_or_forloop_sql = false; + /* gs_signal_handle maybe block sigusr2 when accept SIGINT */ gs_signal_unblock_sigusr2(); @@ -2690,6 +3088,11 @@ static int exec_stmt(PLpgSQL_execstate* estate, PLpgSQL_stmt* stmt) estate->func->debug->debugCallback(estate->func, estate); } #endif + /* clear table of index every stmt */ + if (u_sess->SPI_cxt.cur_tableof_index != NULL) { + u_sess->SPI_cxt.cur_tableof_index->tableOfIndexType = InvalidOid; + u_sess->SPI_cxt.cur_tableof_index->tableOfIndex = NULL; + } switch ((enum PLpgSQL_stmt_types)stmt->cmd_type) { case PLPGSQL_STMT_BLOCK: @@ -2837,6 +3240,105 @@ static int exec_stmt(PLpgSQL_execstate* estate, PLpgSQL_stmt* stmt) return rc; } +static Oid get_func_oid_from_expr(PLpgSQL_expr *expr) +{ + Assert(expr != NULL); + if (!expr->is_funccall) { + return InvalidOid; + } + + if (expr->plan != NULL && expr->plan->stmt_list != NULL) { + Query *query = (Query *)linitial(expr->plan->stmt_list); + Assert(IsA(query, Query)); + if (query->rtable != NULL) { + RangeTblEntry *rte = (RangeTblEntry *)linitial(query->rtable); + if (rte->rtekind == RTE_FUNCTION) { + return ((FuncExpr *)rte->funcexpr)->funcid; + } + } + } + + if (expr->expr_simple_expr != NULL && IsA(expr->expr_simple_expr, FuncExpr)) { + return ((FuncExpr*)expr->expr_simple_expr)->funcid; + } + + return InvalidOid; +} + +/* + * @brief exec_set_outparam_value + * Seperate OUT param values from RETURN values of function call if exists. + * @param estate execute state + * @param expr expr of execsql or perform or assign stmt + * @param value raw RETURN value + * @param return_is_null raw RETURN value null flag + */ +static void plpgsql_set_outparam_value(PLpgSQL_execstate* estate, PLpgSQL_expr* expr, + Datum* return_value = NULL, bool* return_is_null = NULL) +{ + /* Use this before exec_eval_cleanup */ + if (estate->eval_tuptable == NULL) { + return; + } + + /* make sure we are dealing with a function call with OUT params under A_FORMAT compatibility */ + if (u_sess->attr.attr_sql.sql_compatibility != A_FORMAT || expr->out_param_dno < 0 || !expr->is_funccall) { + return; + } + + Oid funcoid = get_func_oid_from_expr(expr); + if (!is_function_with_plpgsql_language_and_outparam(funcoid)) { + return; + } + + /* + * Seperate OUT param tuple from RETURN tuple. + * We should always have the value of this form at this point: + * (RETURN, (OUT, OUT, ..., OUT)) + */ + TupleDesc tupdesc = estate->eval_tuptable->tupdesc; + int attrsnum = estate->eval_tuptable->tupdesc->natts; + Datum *values = (Datum*)palloc(sizeof(Datum) * attrsnum); + bool *nulls = (bool*)palloc(sizeof(bool) * attrsnum); + heap_deform_tuple(estate->eval_tuptable->vals[0], tupdesc, values, nulls); + + /* Set true RETURN value */ + if (return_value) { + *return_value = values[0]; + } + if (return_is_null) { + *return_is_null = nulls[0]; + } + + /* Set OUT param value */ + HeapTuple tuple = NULL; + TupleDesc paramtupdesc = NULL; + if (attrsnum == 2) { + Datum paramval = values[1]; + bool paramisnull = nulls[1]; + /* If we have a single OUT param, it is not going to be a RECORD */ + paramtupdesc = CreateTemplateTupleDesc(1, false, TAM_HEAP); + TupleDescInitEntry(paramtupdesc, (AttrNumber)1, NameStr(tupdesc->attrs[1]->attname), + tupdesc->attrs[1]->atttypid, + tupdesc->attrs[1]->atttypmod, 0); + Datum vals[] = {paramval}; + bool ns[] = {paramisnull}; + tuple = heap_form_tuple(paramtupdesc, vals, ns); + } else { + /* Multiple OUT params */ + paramtupdesc = CreateTemplateTupleDesc(attrsnum - 1, false, TAM_HEAP); + for (int i = 1; i < attrsnum; i++) { + TupleDescInitEntry(paramtupdesc, (AttrNumber)i, NameStr(tupdesc->attrs[i]->attname), + tupdesc->attrs[i]->atttypid, tupdesc->attrs[i]->atttypmod, 0); + } + tuple = heap_form_tuple(paramtupdesc, (values + 1), (nulls + 1)); + } + PLpgSQL_row* row = (PLpgSQL_row*)estate->datums[expr->out_param_dno]; + exec_move_row(estate, NULL, row, tuple, paramtupdesc); + pfree(values); + pfree(nulls); +} + /* ---------- * exec_stmt_assign Evaluate an expression and * put the result into a variable. @@ -2848,7 +3350,9 @@ static int exec_stmt_assign(PLpgSQL_execstate* estate, PLpgSQL_stmt_assign* stmt ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmodule(MOD_PLSQL), errmsg("It should be valid var number."))); } - +#ifndef ENABLE_MULTIPLE_NODES + CheckAssignTarget(estate, stmt->varno); +#endif exec_assign_expr(estate, estate->datums[stmt->varno], stmt->expr); return PLPGSQL_RC_OK; @@ -2872,6 +3376,8 @@ static int exec_stmt_perform(PLpgSQL_execstate* estate, PLpgSQL_stmt_perform* st (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmodule(MOD_PLSQL), errmsg("exec_run_select returns %d", rc))); } + plpgsql_set_outparam_value(estate, stmt->expr); + /* * This is used for nested STP. If the transaction Id changed, * then need to create new econtext for the TopTransaction. @@ -2881,6 +3387,9 @@ static int exec_stmt_perform(PLpgSQL_execstate* estate, PLpgSQL_stmt_perform* st exec_set_found(estate, (estate->eval_processed != 0)); exec_eval_cleanup(estate); + /* reset the flag, used for restoreAutonmSessionCursors */ + u_sess->plsql_cxt.call_after_auto = false; + return PLPGSQL_RC_OK; } @@ -3225,6 +3734,7 @@ static Datum FormBulkExceptionDatum(int index, int errcode, const char* errmsg) values[i] = CStringGetTextDatum(errmsg); isnull[i++] = false; HeapTuple result = heap_form_tuple(tupDesc, values, isnull); + ReleaseTupleDesc(tupDesc); return HeapTupleGetDatum(result); } @@ -3270,6 +3780,7 @@ static int exec_stmt_fori(PLpgSQL_execstate* estate, PLpgSQL_stmt_fori* stmt) bool exception_saved = false; int rc = PLPGSQL_RC_OK; MemoryContext oldcontext = CurrentMemoryContext; + int64 stackId = u_sess->plsql_cxt.nextStackEntryId; var = (PLpgSQL_var*)(estate->datums[stmt->var->dno]); @@ -3399,10 +3910,10 @@ static int exec_stmt_fori(PLpgSQL_execstate* estate, PLpgSQL_stmt_fori* stmt) PG_TRY(); { - plpgsql_create_econtext(estate); rc = exec_stmts(estate, stmt->body); SPI_savepoint_release(SE_SAVEPOINT_NAME); - XactResumeSPIContext(true); + plpgsql_create_econtext(estate); + stp_cleanup_subxact_resowner(stackId); t_thrd.utils_cxt.CurrentResourceOwner = oldowner; SPI_restore_connection(); } @@ -3423,7 +3934,7 @@ static int exec_stmt_fori(PLpgSQL_execstate* estate, PLpgSQL_stmt_fori* stmt) FlushErrorState(); SPI_savepoint_rollbackAndRelease(SE_SAVEPOINT_NAME, InvalidTransactionId); - XactResumeSPIContext(true); + stp_cleanup_subxact_resowner(stackId); t_thrd.utils_cxt.CurrentResourceOwner = oldowner; exception_saved = true; } @@ -3959,6 +4470,56 @@ static int exec_stmt_exit(PLpgSQL_execstate* estate, PLpgSQL_stmt_exit* stmt) } } +static void set_outparam_info_of_record_type(PLpgSQL_execstate* estate, PLpgSQL_row* row) +{ + HeapTuple tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(estate->func->fn_oid)); + if (!HeapTupleIsValid(tp)) { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmodule(MOD_PLSQL), + errmsg("Cache lookup failed for function %u", estate->func->fn_oid), + errdetail("Fail to get the function param type oid."))); + } + Oid *p_argtypes = NULL; + char **p_argnames = NULL; + char *p_argmodes = NULL; + int p_nargs = get_func_arg_info(tp, &p_argtypes, &p_argnames, &p_argmodes); + int out_args_num = 0; + int out_args_index = 0; + for (int i = 0; i < p_nargs; i++) { + if (p_argmodes[i] == 'o' || p_argmodes[i] == 'b') { + out_args_num++; + out_args_index = i; + } + if (out_args_num > 1) { + break; + } + } + /* + * When a function has an out parameter, the parameter information needs to be supplemented + * because the build_row_from_vars function does not handle this situation. + */ + if (out_args_num == 1 && row->varname != NULL && strcmp(row->varname, p_argnames[out_args_index]) == 0) { + row->rowtupdesc->tdtypeid = p_argtypes[out_args_index]; + row->rowtupdesc->tdtypmod = p_argmodes[out_args_index]; + estate->paramval = PointerGetDatum(make_tuple_from_row(estate, row, row->rowtupdesc)); + HeapTuple rettup = (HeapTuple)DatumGetPointer(estate->paramval); + estate->paramval = PointerGetDatum(SPI_returntuple(rettup, row->rowtupdesc)); + estate->paramtupdesc = row->rowtupdesc; + } else { + estate->paramval = PointerGetDatum(make_tuple_from_row(estate, row, row->rowtupdesc)); + estate->paramtupdesc = row->rowtupdesc; + } + estate->paramisnull = false; + pfree_ext(p_argtypes); + pfree_ext(p_argmodes); + if (p_argnames != NULL) { + for (int i = 0; i < p_nargs; i++) { + pfree_ext(p_argnames[i]); + } + pfree_ext(p_argnames); + } + ReleaseSysCache(tp); +} + /* ---------- * exec_stmt_return Evaluate an expression and start * returning from the function. @@ -3966,6 +4527,7 @@ static int exec_stmt_exit(PLpgSQL_execstate* estate, PLpgSQL_stmt_exit* stmt) */ static int exec_stmt_return(PLpgSQL_execstate* estate, PLpgSQL_stmt_return* stmt) { + free_func_tableof_index(); /* * If processing a set-returning PL/pgSQL function, the final RETURN * indicates that the function is finished producing tuples. The rest of @@ -3975,11 +4537,41 @@ static int exec_stmt_return(PLpgSQL_execstate* estate, PLpgSQL_stmt_return* stmt return PLPGSQL_RC_RETURN; } + bool need_param_seperation = estate->func->is_plpgsql_func_with_outparam; + if (need_param_seperation && stmt->expr == NULL) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmodule(MOD_PLSQL), + errmsg("Value assignment for the out parameter in plpgsql language functions, Unsupported " + "return nothing in PL/pgSQL function"), + errdetail("N/A"), errcause("Missing return value"), erraction("Please add return value"))); + } + /* initialize for null result (possibly a tuple) */ estate->retval = (Datum)0; estate->rettupdesc = NULL; estate->retisnull = true; + if (stmt->expr != NULL) { + if (estate->retistuple) { + exec_run_select(estate, stmt->expr, 1, NULL); + if (estate->eval_processed > 0) { + estate->retval = PointerGetDatum(estate->eval_tuptable->vals[0]); + estate->rettupdesc = estate->eval_tuptable->tupdesc; + estate->retisnull = false; + } + } else { + /* Normal case for scalar results */ + estate->retval = exec_eval_expr(estate, stmt->expr, &(estate->retisnull), &(estate->rettype)); + plpgsql_set_outparam_value(estate, stmt->expr); + if (estate->rettype == REFCURSOROID) { + CopyCursorInfoData(estate->cursor_return_data, &estate->eval_econtext->cursor_data); + } + } + + if (!need_param_seperation) { + return PLPGSQL_RC_RETURN; + } + } + if (stmt->retvarno >= 0) { PLpgSQL_datum* retvar = estate->datums[stmt->retvarno]; @@ -3987,13 +4579,63 @@ static int exec_stmt_return(PLpgSQL_execstate* estate, PLpgSQL_stmt_return* stmt case PLPGSQL_DTYPE_VARRAY: case PLPGSQL_DTYPE_VAR: { PLpgSQL_var* var = (PLpgSQL_var*)retvar; - - estate->retval = var->value; - estate->retisnull = var->isnull; - estate->rettype = var->datatype->typoid; + Datum value = var->value; + if (is_external_clob(var->datatype->typoid, var->isnull, value)) { + bool is_null = false; + bool is_have_huge_clob = false; + struct varatt_lob_pointer* lob_pointer = (varatt_lob_pointer*)(VARDATA_EXTERNAL(value)); + value = fetch_lob_value_from_tuple(lob_pointer, InvalidOid, &is_null, &is_have_huge_clob); + if (is_have_huge_clob) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("huge clob do not support as return parameter"))); + } + } + if (need_param_seperation) { + estate->paramval = value; + estate->paramtype = var->datatype->typoid; + estate->paramisnull = var->isnull; + } else { + estate->retval = value; + estate->retisnull = var->isnull; + estate->rettype = var->datatype->typoid; + if (estate->is_exception && estate->rettype == REFCURSOROID) { + if (DatumGetPointer(estate->retval) != NULL) { + char* curname = NULL; + curname = TextDatumGetCString(estate->retval); + Portal portal = SPI_cursor_find(curname); + if (portal == NULL || portal->status == PORTAL_FAILED) { + estate->retval = (Datum)0; + estate->retisnull = true; + estate->rettype = 0; + } + ereport(DEBUG3, (errmodule(MOD_PLSQL), errcode(ERRCODE_LOG), + errmsg("RESET CURSOR NULL LOG: function: %s, set cursor: %s to null due to exception", + estate->func->fn_signature, curname))); + pfree_ext(curname); + } else { + estate->retval = (Datum)0; + estate->retisnull = true; + estate->rettype = 0; + } + } + } if (estate->rettype == REFCURSOROID) { ExecCopyDataFromDatum(estate->datums, var->dno, estate->cursor_return_data); } + bool isTableVal = var->datatype != NULL && + var->datatype->collectionType == PLPGSQL_COLLECTION_TABLE && + OidIsValid(var->tableOfIndexType) && + var->tableOfIndex != NULL; + if (isTableVal) { + MemoryContext oldCxt = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); + PLpgSQL_func_tableof_index* func_tableof = (PLpgSQL_func_tableof_index*)palloc0(sizeof(PLpgSQL_func_tableof_index)); + func_tableof->varno = 0; + func_tableof->tableOfIndexType = var->tableOfIndexType; + func_tableof->tableOfIndex = copyTableOfIndex(var->tableOfIndex); + u_sess->plsql_cxt.func_tableof_index = lappend(u_sess->plsql_cxt.func_tableof_index, func_tableof); + MemoryContextSwitchTo(oldCxt); + } } break; @@ -4001,9 +4643,15 @@ static int exec_stmt_return(PLpgSQL_execstate* estate, PLpgSQL_stmt_return* stmt PLpgSQL_rec* rec = (PLpgSQL_rec*)retvar; if (HeapTupleIsValid(rec->tup)) { - estate->retval = PointerGetDatum(rec->tup); - estate->rettupdesc = rec->tupdesc; - estate->retisnull = false; + if (need_param_seperation) { + estate->paramval = PointerGetDatum(rec->tup); + estate->paramtupdesc = rec->tupdesc; + estate->paramisnull = false; + } else { + estate->retval = PointerGetDatum(rec->tup); + estate->rettupdesc = rec->tupdesc; + estate->retisnull = false; + } } } break; @@ -4013,15 +4661,52 @@ static int exec_stmt_return(PLpgSQL_execstate* estate, PLpgSQL_stmt_return* stmt PLpgSQL_row* row = (PLpgSQL_row*)retvar; AssertEreport(row->rowtupdesc != NULL, MOD_PLSQL, "row's tuple description is required."); - estate->retval = PointerGetDatum(make_tuple_from_row(estate, row, row->rowtupdesc)); - if (DatumGetPointer(estate->retval) == NULL) { /* should not happen */ + if (need_param_seperation) { + set_outparam_info_of_record_type(estate, row); + } else { + estate->retval = PointerGetDatum(make_tuple_from_row(estate, row, row->rowtupdesc)); + estate->rettupdesc = row->rowtupdesc; + estate->retisnull = false; + } + /* should not happen */ + if (DatumGetPointer(estate->retval) == NULL && DatumGetPointer(estate->paramval) == NULL) { ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmodule(MOD_PLSQL), errmsg("row not compatible with its own tupdesc in RETURN statement."))); } - estate->rettupdesc = row->rowtupdesc; - estate->retisnull = false; + + /* table of index */ + for (int i = 0; i < row->rowtupdesc->natts; i++) { + int dno = row->varnos[i]; + PLpgSQL_var* tableof_var = NULL; + PLpgSQL_datum* var_datum = NULL; + if (row->ispkg) { + var_datum = row->pkg->datums[dno]; + } else { + var_datum = estate->datums[dno]; + } + if (var_datum->dtype == PLPGSQL_DTYPE_TABLE || var_datum->dtype == PLPGSQL_DTYPE_VAR) { + tableof_var = (PLpgSQL_var*)var_datum; + } else { + continue; + } + bool isTableVar = tableof_var->datatype != NULL && + tableof_var->datatype->collectionType == PLPGSQL_COLLECTION_TABLE && + OidIsValid(tableof_var->tableOfIndexType) && + tableof_var->tableOfIndex != NULL; + if (isTableVar) { + MemoryContext oldCxt = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); + PLpgSQL_func_tableof_index* func_tableof = + (PLpgSQL_func_tableof_index*)palloc0(sizeof(PLpgSQL_func_tableof_index)); + func_tableof->varno = i; + func_tableof->tableOfIndexType = tableof_var->tableOfIndexType; + func_tableof->tableOfIndex = copyTableOfIndex(tableof_var->tableOfIndex); + u_sess->plsql_cxt.func_tableof_index = + lappend(u_sess->plsql_cxt.func_tableof_index, func_tableof); + MemoryContextSwitchTo(oldCxt); + } + } if (estate->cursor_return_data != NULL) { for (int i = 0,j = 0; i < row->rowtupdesc->natts; i++) { @@ -4045,25 +4730,6 @@ static int exec_stmt_return(PLpgSQL_execstate* estate, PLpgSQL_stmt_return* stmt return PLPGSQL_RC_RETURN; } - if (stmt->expr != NULL) { - if (estate->retistuple) { - exec_run_select(estate, stmt->expr, 1, NULL); - if (estate->eval_processed > 0) { - estate->retval = PointerGetDatum(estate->eval_tuptable->vals[0]); - estate->rettupdesc = estate->eval_tuptable->tupdesc; - estate->retisnull = false; - } - } else { - /* Normal case for scalar results */ - estate->retval = exec_eval_expr(estate, stmt->expr, &(estate->retisnull), &(estate->rettype)); - if (estate->rettype == REFCURSOROID) { - CopyCursorInfoData(estate->cursor_return_data, &estate->eval_econtext->cursor_data); - } - } - - return PLPGSQL_RC_RETURN; - } - /* * Special hack for function returning VOID: instead of NULL, return a * non-null VOID value. This is of dubious importance but is kept for @@ -4159,7 +4825,8 @@ static int exec_stmt_return_next(PLpgSQL_execstate* estate, PLpgSQL_stmt_return_ } } break; - case PLPGSQL_DTYPE_ROW: { + case PLPGSQL_DTYPE_ROW: + case PLPGSQL_DTYPE_RECORD: { PLpgSQL_row* row = (PLpgSQL_row*)retvar; tuple = make_tuple_from_row(estate, row, tupdesc); @@ -4327,6 +4994,18 @@ static void exec_init_tuple_store(PLpgSQL_execstate* estate) estate->rettupdesc = rsi->expectedDesc; } +static void checkNestTableOfLayer() +{ + if (u_sess->SPI_cxt.cur_tableof_index->tableOfGetNestLayer >= 0 && + u_sess->SPI_cxt.cur_tableof_index->tableOfGetNestLayer + 1 < + u_sess->SPI_cxt.cur_tableof_index->tableOfNestLayer) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_PLSQL), + errmsg("Don't print entire nest table of value in raise statement"))); + } +} + /* ---------- * exec_stmt_raise Build a message and throw it with elog() * ---------- @@ -4392,8 +5071,13 @@ static int exec_stmt_raise(PLpgSQL_execstate* estate, PLpgSQL_stmt_raise* stmt) errmsg("too few parameters specified for RAISE"))); } PLpgSQL_execstate* save_plpgsql_estate = plpgsql_estate; + u_sess->SPI_cxt.cur_tableof_index->tableOfNestLayer = -1; + u_sess->SPI_cxt.cur_tableof_index->tableOfGetNestLayer = -1; plpgsql_estate = estate; paramvalue = exec_eval_expr(estate, (PLpgSQL_expr*)lfirst(current_param), ¶misnull, ¶mtypeid); + checkNestTableOfLayer(); + u_sess->SPI_cxt.cur_tableof_index->tableOfNestLayer = -1; + u_sess->SPI_cxt.cur_tableof_index->tableOfGetNestLayer = -1; plpgsql_estate = save_plpgsql_estate; if (paramisnull) { extval = ""; @@ -4508,6 +5192,21 @@ static int exec_stmt_raise(PLpgSQL_execstate* estate, PLpgSQL_stmt_raise* stmt) err_message = pstrdup(plpgsql_get_sqlstate(err_code)); } } + /* + * If has EXCEPTION_INIT, err_message is generated by err_code + * for example, if sqlcode = -60, then sqlerrm = " 60: non-GaussDB Exception" + * the length of sqlerrm is: 1 + sizeof(-sqlcode) + sizeof (msg_tail) + */ + if (stmt->hasExceptionInit) { + Assert(err_code < 0); + pfree(err_message); + + const char *msg_tail = ": non-GaussDB Exception"; + int msg_len = SQL_STATE_BUF_LEN + strlen(msg_tail) + 1; + err_message = (char *)palloc(msg_len); + errno_t rc = snprintf_s(err_message, msg_len, msg_len - 1, " %d%s", -err_code, msg_tail); + securec_check_ss(rc, "\0", "\0"); + } /* * Throw the error (may or may not come back) @@ -4552,6 +5251,9 @@ static void plpgsql_estate_setup(PLpgSQL_execstate* estate, PLpgSQL_function* fu estate->retval = (Datum)0; estate->retisnull = true; estate->rettype = InvalidOid; + estate->paramval = (Datum)0; + estate->paramisnull = true; + estate->paramtype = InvalidOid; estate->fn_rettype = func->fn_rettype; estate->retistuple = func->fn_retistuple; @@ -4560,6 +5262,7 @@ static void plpgsql_estate_setup(PLpgSQL_execstate* estate, PLpgSQL_function* fu estate->readonly_func = func->fn_readonly; estate->rettupdesc = NULL; + estate->paramtupdesc = NULL; estate->exitlabel = NULL; estate->cur_error = NULL; estate->tuple_store = NULL; @@ -4602,6 +5305,7 @@ static void plpgsql_estate_setup(PLpgSQL_execstate* estate, PLpgSQL_function* fu estate->plugin_info = NULL; estate->stack_entry_start = u_sess->plsql_cxt.nextStackEntryId + 1; estate->curr_nested_table_type = InvalidOid; + estate->is_exception = false; /* * Create an EState and ExprContext for evaluation of simple expressions. @@ -4714,7 +5418,7 @@ static void exec_prepare_plan(PLpgSQL_execstate* estate, PLpgSQL_expr* expr, int u_sess->SPI_cxt._current->visit_id = (uint32)-1; /* Check to see if it's a simple expression */ - exec_simple_check_plan(expr); + exec_simple_check_plan(estate, expr); } /* ---------- @@ -4781,7 +5485,7 @@ static int exec_stmt_execsql(PLpgSQL_execstate* estate, PLpgSQL_stmt_execsql* st * to enforce strictness. */ if (stmt->into) { - if (!stmt->mod_stmt) { + if (!stmt->mod_stmt & !stmt->bulk_collect) { stmt->strict = true; } @@ -4809,6 +5513,10 @@ static int exec_stmt_execsql(PLpgSQL_execstate* estate, PLpgSQL_stmt_execsql* st plpgsql_estate = estate; +#ifndef ENABLE_MULTIPLE_NODES + t_thrd.xact_cxt.isSelectInto = stmt->into; +#endif + /* * Execute the plan */ @@ -4832,6 +5540,11 @@ static int exec_stmt_execsql(PLpgSQL_execstate* estate, PLpgSQL_stmt_execsql* st switch (rc) { case SPI_OK_SELECT: AssertEreport(!stmt->mod_stmt, MOD_PLSQL, "It should not be mod stmt."); +#ifndef ENABLE_MULTIPLE_NODES + if (stmt->sqlstmt->is_funccall && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) { + break; + } +#endif exec_set_found(estate, (SPI_processed != 0)); exec_set_sql_cursor_found(estate, (SPI_processed != 0) ? PLPGSQL_TRUE : PLPGSQL_FALSE); exec_set_sql_notfound(estate, (0 == SPI_processed) ? PLPGSQL_TRUE : PLPGSQL_FALSE); @@ -4947,8 +5660,12 @@ static int exec_stmt_execsql(PLpgSQL_execstate* estate, PLpgSQL_stmt_execsql* st errmodule(MOD_PLSQL), errmsg("query returned no rows when process INTO"))); } - /* set the target to NULL(s) */ - exec_move_row(estate, rec, row, NULL, tuptab->tupdesc); + if (stmt->bulk_collect) { + exec_read_bulk_collect(estate, row, tuptab); + } else { + /* set the target to NULL(s) */ + exec_move_row(estate, rec, row, NULL, tuptab->tupdesc, true); + } } else { if (n > 1 && (stmt->strict || stmt->mod_stmt) && !stmt->bulk_collect) { ereport(ERROR, @@ -4958,15 +5675,30 @@ static int exec_stmt_execsql(PLpgSQL_execstate* estate, PLpgSQL_stmt_execsql* st } if (stmt->bulk_collect) { exec_read_bulk_collect(estate, row, tuptab); + } else if (expr->out_param_dno > 0 && + is_function_with_plpgsql_language_and_outparam(get_func_oid_from_expr(expr))) { + estate->eval_tuptable = tuptab; + plpgsql_set_outparam_value(estate, stmt->sqlstmt); + estate->eval_tuptable = NULL; } else { /* Put the first result row into the target */ - exec_move_row(estate, rec, row, tuptab->vals[0], tuptab->tupdesc); + exec_move_row(estate, rec, row, tuptab->vals[0], tuptab->tupdesc, true); +#ifndef ENABLE_MULTIPLE_NODES + if (stmt->sqlstmt->is_funccall && row != NULL) { + restoreAutonmSessionCursors(estate, row); + } +#endif } } /* Clean up */ exec_eval_cleanup(estate); - SPI_freetuptable(SPI_tuptable); + /* + * SPI_tuptable will be modified by the subsequent non simple expression. + * Therefore, the saved tuptab table is used to clear data. + */ + SPI_freetuptable(tuptab); + SPI_tuptable = NULL; } else { /* If the statement returned a tuple table, complain */ if (SPI_tuptable != NULL) { @@ -4989,6 +5721,7 @@ static int exec_stmt_execsql(PLpgSQL_execstate* estate, PLpgSQL_stmt_execsql* st estate->cursor_return_data = saved_cursor_data; estate->cursor_return_numbers = saved_cursor_numbers; + t_thrd.xact_cxt.isSelectInto = false; return PLPGSQL_RC_OK; } @@ -5162,12 +5895,20 @@ static int exec_stmt_dynexecute(PLpgSQL_execstate* estate, PLpgSQL_stmt_dynexecu stmtblock.cmd_type = stmt->cmd_type; stmtblock.lineno = stmt->lineno; TransactionId oldTransactionId = SPI_get_top_transaction_id(); - +#ifndef ENABLE_MULTIPLE_NODES + // forbid commit/rollback in the stp which is called to get value + bool savedisAllowCommitRollback = false; + bool needResetErrMsg = false; + needResetErrMsg = stp_disable_xact_and_set_err_msg(&savedisAllowCommitRollback, STP_XACT_USED_AS_EXPR); +#endif /* * First we evaluate the string expression after the EXECUTE keyword. Its * result is the querystring we have to execute. */ query = exec_eval_expr(estate, stmt->query, &isnull, &restype); +#ifndef ENABLE_MULTIPLE_NODES + stp_reset_xact_state_and_err_msg(savedisAllowCommitRollback, needResetErrMsg); +#endif if (isnull) { ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), @@ -5220,6 +5961,8 @@ static int exec_stmt_dynexecute(PLpgSQL_execstate* estate, PLpgSQL_stmt_dynexecu } PG_END_TRY(); + /* Mark packages the function use, so them can't be deleted from under us */ + AddPackageUseCount(func); /* Mark the function as busy, just pro forma */ func->use_count++; @@ -5249,9 +5992,14 @@ static int exec_stmt_dynexecute(PLpgSQL_execstate* estate, PLpgSQL_stmt_dynexecu PG_CATCH(); { FormatCallStack* plcallstack = t_thrd.log_cxt.call_stack; +#ifndef ENABLE_MULTIPLE_NODES + estate_cursor_set(plcallstack); +#endif if (plcallstack != NULL) { t_thrd.log_cxt.call_stack = plcallstack->prev; } + /* Decrement package use-count */ + DecreasePackageUseCount(func); PG_RE_THROW(); } PG_END_TRY(); @@ -5269,6 +6017,8 @@ static int exec_stmt_dynexecute(PLpgSQL_execstate* estate, PLpgSQL_stmt_dynexecu /* Function should now have no remaining use-counts ... */ func->use_count--; + /* Decrement package use-count */ + DecreasePackageUseCount(func); if (unlikely(func->use_count != 0)) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -5306,6 +6056,10 @@ static int exec_stmt_dynexecute(PLpgSQL_execstate* estate, PLpgSQL_stmt_dynexecu } plpgsql_estate = estate; +#ifndef ENABLE_MULTIPLE_NODES + t_thrd.xact_cxt.isSelectInto = stmt->into; +#endif + /* * Execute the query without preparing a saved plan. @@ -5358,6 +6112,7 @@ static int exec_stmt_dynexecute(PLpgSQL_execstate* estate, PLpgSQL_stmt_dynexecu estate->cursor_return_data = saved_cursor_data; estate->cursor_return_numbers = saved_cursor_numbers; + t_thrd.xact_cxt.isSelectInto = false; return PLPGSQL_RC_OK; } @@ -5638,6 +6393,64 @@ static void BindCursorWithPortal(Portal portal, PLpgSQL_execstate *estate, int v } } +#ifndef ENABLE_MULTIPLE_NODES +/* find the cursor var if is autonomoust session procedure out param cursor */ +static bool IsAutoOutParam(PLpgSQL_execstate* estate, PLpgSQL_stmt_open* stmt, int dno) +{ + int curVarDno = -1; + /* only conside autonomous transaction procedure */ + if (u_sess->is_autonomous_session != true || u_sess->SPI_cxt._connected != 0) { + return false; + } + + /* means no out param */ + if (estate->func->out_param_varno == -1) { + return false; + } + + /* only consider open curosr for SELECT... or open cursor for EXECUTE... */ + if (stmt != NULL) { + if (stmt->query == NULL && stmt->dynquery == NULL) { + return false; + } + curVarDno = stmt->curvar; + } else { + curVarDno = dno; + } + + PLpgSQL_datum* outDatum = estate->datums[estate->func->out_param_varno]; + if (outDatum->dtype == PLPGSQL_DTYPE_VAR) { + if (curVarDno == estate->func->out_param_varno) { + return true; + } + return false; + } + + PLpgSQL_row* outRow = (PLpgSQL_row*)outDatum; + if (outRow->refname != NULL) { + /* means out param is just one normal row variable */ + return false; + } + for (int i = 0; i < outRow->nfields; i++) { + if (outRow->varnos[i] == curVarDno) { + return true; + } + } + + return false; +} +#endif + +#ifdef ENABLE_MULTIPLE_NODES +static void hold_portal_if_necessary(Portal portal) +{ + if (IS_PGXC_COORDINATOR && ENABLE_SQL_BETA_FEATURE(PLPGSQL_STREAM_FETCHALL) && + portal->hasStreamForPlpgsql) { + _SPI_hold_cursor(); + } +} +#endif + /* ---------- * exec_stmt_open Execute an OPEN cursor statement * ---------- @@ -5656,16 +6469,12 @@ static int exec_stmt_open(PLpgSQL_execstate* estate, PLpgSQL_stmt_open* stmt) */ curvar = (PLpgSQL_var*)(estate->datums[stmt->curvar]); if (!curvar->isnull) { - if (OidIsValid(u_sess->plsql_cxt.running_pkg_oid)) { - if (curvar->pkg != NULL) { - MemoryContext temp; - PLpgSQL_package* pkg = curvar->pkg; - temp = MemoryContextSwitchTo(pkg->pkg_cxt); - curname = TextDatumGetCString(curvar->value); - MemoryContextSwitchTo(temp); - } else { - curname = TextDatumGetCString(curvar->value); - } + if (curvar->pkg != NULL) { + MemoryContext temp; + PLpgSQL_package* pkg = curvar->pkg; + temp = MemoryContextSwitchTo(pkg->pkg_cxt); + curname = TextDatumGetCString(curvar->value); + MemoryContextSwitchTo(temp); } else { curname = TextDatumGetCString(curvar->value); } @@ -5716,6 +6525,15 @@ static int exec_stmt_open(PLpgSQL_execstate* estate, PLpgSQL_stmt_open* stmt) exec_set_isopen(estate, true, stmt->curvar + CURSOR_ISOPEN); exec_set_rowcount(estate, 0, true, stmt->curvar + CURSOR_ROWCOUNT); PinPortal(portal); +#ifndef ENABLE_MULTIPLE_NODES + if (IsAutoOutParam(estate, stmt)) { + portal->isAutoOutParam = true; + } + if (curvar->ispkg) { + portal->isPkgCur = true; + } +#endif + curvar->cursor_closed = false; return PLPGSQL_RC_OK; } else { /* ---------- @@ -5749,7 +6567,11 @@ static int exec_stmt_open(PLpgSQL_execstate* estate, PLpgSQL_stmt_open* stmt) set_args.sqlstmt = stmt->argquery; set_args.into = true; /* XXX historically this has not been STRICT */ - set_args.row = (PLpgSQL_row*)(estate->datums[curvar->cursor_explicit_argrow]); + if (curvar->ispkg) { + set_args.row = (PLpgSQL_row*)(curvar->pkg->datums[curvar->cursor_explicit_argrow]); + } else { + set_args.row = (PLpgSQL_row*)(estate->datums[curvar->cursor_explicit_argrow]); + } if (exec_stmt_execsql(estate, &set_args) != PLPGSQL_RC_OK) { ereport(ERROR, @@ -5798,14 +6620,10 @@ static int exec_stmt_open(PLpgSQL_execstate* estate, PLpgSQL_stmt_open* stmt) * If cursor variable was NULL, store the generated portal name in it */ if (curname == NULL) { - if (OidIsValid(u_sess->plsql_cxt.running_pkg_oid)) { - if (curvar->pkg != NULL) { - MemoryContext temp = MemoryContextSwitchTo(curvar->pkg->pkg_cxt); - assign_text_var(curvar, portal->name); - MemoryContextSwitchTo(temp); - } else { - assign_text_var(curvar, portal->name); - } + if (curvar->pkg != NULL) { + MemoryContext temp = MemoryContextSwitchTo(curvar->pkg->pkg_cxt); + assign_text_var(curvar, portal->name); + MemoryContextSwitchTo(temp); } else { assign_text_var(curvar, portal->name); } @@ -5822,6 +6640,15 @@ static int exec_stmt_open(PLpgSQL_execstate* estate, PLpgSQL_stmt_open* stmt) exec_set_isopen(estate, true, stmt->curvar + CURSOR_ISOPEN); exec_set_rowcount(estate, 0, true, stmt->curvar + CURSOR_ROWCOUNT); PinPortal(portal); +#ifndef ENABLE_MULTIPLE_NODES + if (IsAutoOutParam(estate, stmt)) { + portal->isAutoOutParam = true; + } + if (curvar->ispkg) { + portal->isPkgCur = true; + } +#endif + curvar->cursor_closed = false; return PLPGSQL_RC_OK; } @@ -5855,6 +6682,7 @@ static int exec_stmt_fetch(PLpgSQL_execstate* estate, PLpgSQL_stmt_fetch* stmt) errmodule(MOD_PLSQL), errmsg("cursor variable \"%s\" is null in FETCH statement.", curvar->refname))); } + curname = TextDatumGetCString(curvar->value); portal = SPI_cursor_find(curname); @@ -5883,6 +6711,13 @@ static int exec_stmt_fetch(PLpgSQL_execstate* estate, PLpgSQL_stmt_fetch* stmt) exec_eval_cleanup(estate); } +#ifdef ENABLE_MULTIPLE_NODES + /* For redistribute and broadcast stream, if cursor's sql in loop need to communicate with dn + * which wait for stream operator, hang will occurs. + * To avoid this, we need get all tuples for this fetch sql. */ + hold_portal_if_necessary(portal); +#endif + if (!stmt->is_move) { /* ---------- * Determine if we fetch into a record or a row @@ -5927,6 +6762,10 @@ static int exec_stmt_fetch(PLpgSQL_execstate* estate, PLpgSQL_stmt_fetch* stmt) } else { exec_move_row(estate, rec, row, tuptab->vals[0], tuptab->tupdesc); } + } else { + if (stmt->bulk_collect) { + exec_read_bulk_collect(estate, row, tuptab); + } } exec_eval_cleanup(estate); @@ -5956,7 +6795,6 @@ static int exec_stmt_fetch(PLpgSQL_execstate* estate, PLpgSQL_stmt_fetch* stmt) static int exec_stmt_close(PLpgSQL_execstate* estate, PLpgSQL_stmt_close* stmt) { PLpgSQL_var* curvar = NULL; - PLpgSQL_var* curRowcountVar = NULL; Portal portal = NULL; char* curname = NULL; @@ -5971,17 +6809,18 @@ static int exec_stmt_close(PLpgSQL_execstate* estate, PLpgSQL_stmt_close* stmt) errmodule(MOD_PLSQL), errmsg("cursor variable \"%s\" is null in CLOSE statement.", curvar->refname))); } + curname = TextDatumGetCString(curvar->value); portal = SPI_cursor_find(curname); - curRowcountVar = (PLpgSQL_var*)(estate->datums[stmt->curvar + CURSOR_ROWCOUNT]); /* sometime close cursor can be called after exception, don't report error */ - if (portal == NULL && !curRowcountVar->isnull && estate->cur_error != NULL) { + if (portal == NULL && !curvar->cursor_closed && estate->cur_error != NULL) { pfree_ext(curname); exec_set_isopen(estate, false, stmt->curvar + CURSOR_ISOPEN); exec_set_cursor_found(estate, PLPGSQL_NULL, stmt->curvar + CURSOR_FOUND); exec_set_notfound(estate, PLPGSQL_NULL, stmt->curvar + CURSOR_NOTFOUND); exec_set_rowcount(estate, -1, true, stmt->curvar + CURSOR_ROWCOUNT); + curvar->cursor_closed = true; return PLPGSQL_RC_OK; } else if (portal == NULL) { ereport(ERROR, @@ -5996,25 +6835,31 @@ static int exec_stmt_close(PLpgSQL_execstate* estate, PLpgSQL_stmt_close* stmt) * ---------- */ UnpinPortal(portal); +#ifndef ENABLE_MULTIPLE_NODES + if (portal->isAutoOutParam) { + ResetAutoPortalConext(portal); + } +#endif SPI_cursor_close(portal); exec_set_isopen(estate, false, stmt->curvar + CURSOR_ISOPEN); exec_set_cursor_found(estate, PLPGSQL_NULL, stmt->curvar + CURSOR_FOUND); exec_set_notfound(estate, PLPGSQL_NULL, stmt->curvar + CURSOR_NOTFOUND); exec_set_rowcount(estate, -1, true, stmt->curvar + CURSOR_ROWCOUNT); - + curvar->cursor_closed = true; return PLPGSQL_RC_OK; } -static void rebuild_exception_subtransaction_chain(PLpgSQL_execstate* estate) +static void rebuild_exception_subtransaction_chain(PLpgSQL_execstate* estate, List* transactionList) { // Rebuild ResourceOwner chain, link Portal ResourceOwner to Top ResourceOwner. ResourceOwnerNewParent(t_thrd.utils_cxt.STPSavedResourceOwner, t_thrd.utils_cxt.CurrentResourceOwner); t_thrd.utils_cxt.CurrentResourceOwner = t_thrd.utils_cxt.STPSavedResourceOwner; int subTransactionCount = u_sess->SPI_cxt.portal_stp_exception_counter; while(subTransactionCount > 0) { + Oid savedCurrentUser = InvalidOid; + int saveSecContext = 0; if(u_sess->SPI_cxt.portal_stp_exception_counter > 0) { MemoryContext oldcontext = CurrentMemoryContext; - estate->err_text = gettext_noop("during statement block entry"); #ifdef ENABLE_MULTIPLE_NODES /* CN should send savepoint command to remote nodes to begin sub transaction remotely. */ @@ -6023,19 +6868,32 @@ static void rebuild_exception_subtransaction_chain(PLpgSQL_execstate* estate) pgxc_node_remote_savepoint("Savepoint s1", EXEC_ON_DATANODES, true, true); } #endif - BeginInternalSubTransaction(NULL); - - /* Want to run statements inside function's memory context */ - MemoryContextSwitchTo(oldcontext); - - plpgsql_create_econtext(estate); - estate->err_text = NULL; + PG_TRY(); + { + GetUserIdAndSecContext(&savedCurrentUser, &saveSecContext); + transactionNode* node = (transactionNode*)lfirst(list_head(transactionList)); + SetUserIdAndSecContext(node->userId, node->secContext); + list_delete(transactionList, node); + pfree(node); + BeginInternalSubTransaction(NULL); + } + PG_CATCH(); + { + SetUserIdAndSecContext(savedCurrentUser, saveSecContext); + PG_RE_THROW(); + } + PG_END_TRY(); + SetUserIdAndSecContext(savedCurrentUser, saveSecContext); + /* Want to run statements inside function's memory context */ + MemoryContextSwitchTo(oldcontext); + plpgsql_create_econtext(estate); + estate->err_text = NULL; } - subTransactionCount--; } } + /* * exec_stmt_transaction * @@ -6048,7 +6906,8 @@ static int exec_stmt_transaction(PLpgSQL_execstate *estate, PLpgSQL_stmt* stmt) bool needResetErrMsg = false; // 1. Check whether the calling context is supported. SPI_stp_transaction_check(estate->readonly_func); - + List* transactionHead = NULL; + transactionHead = GetTransactionList(transactionHead); const char *PORTAL = "Portal"; if(strcmp(PORTAL, ResourceOwnerGetName(t_thrd.utils_cxt.CurrentResourceOwner)) == 0) { if(ResourceOwnerGetNextChild(t_thrd.utils_cxt.CurrentResourceOwner) @@ -6058,68 +6917,73 @@ static int exec_stmt_transaction(PLpgSQL_execstate *estate, PLpgSQL_stmt* stmt) errmsg("commit with PE is not supported"))); } } - + // 2. Hold portals. needResetErrMsg = stp_disable_xact_and_set_err_msg(&savedisAllowCommitRollback, STP_XACT_OPEN_FOR); _SPI_hold_cursor(); stp_reset_xact_state_and_err_msg(savedisAllowCommitRollback, needResetErrMsg); - if (u_sess->SPI_cxt.portal_stp_exception_counter == 0 && u_sess->plsql_cxt.stp_savepoint_cnt == 0) { // Recording Portal's ResourceOwner for rebuilding resource chain // when procedure contain transaction statement. // Current ResourceOwner is Portal ResourceOwner. t_thrd.utils_cxt.STPSavedResourceOwner = t_thrd.utils_cxt.CurrentResourceOwner; } - // 3. Save current transaction state for the outer transaction started by user's // begin/start statement. SPI_save_current_stp_transaction_state(); - + /* Saving es_query_cxt, transaction commit, or rollback will no longer delete es_query_cxt information */ + MemoryContext saveCxt = NULL; +#ifndef ENABLE_MULTIPLE_NODES + saveCxt = u_sess->plsql_cxt.simple_eval_estate->es_query_cxt; + MemoryContextSetParent(saveCxt, t_thrd.mem_cxt.portal_mem_cxt); +#endif // 4. Commit/rollback switch((PLpgSQL_stmt_types)stmt->cmd_type) { case PLPGSQL_STMT_COMMIT: SPI_commit(); break; - case PLPGSQL_STMT_ROLLBACK: SPI_rollback(); break; - default: ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("unsupported transaction statement type."))); } - /* all savepoint in PL has destoryed or released. */ u_sess->plsql_cxt.stp_savepoint_cnt = 0; - /* Update transaction start time. */ SetCurrentStatementStartTimestamp(); - // 5. Start a new transaction. - SPI_start_transaction(); - + SPI_start_transaction(transactionHead); // 6. Restore the outer transaction state. SPI_restore_current_stp_transaction_state(); // 7. Rebuild estate's context. u_sess->plsql_cxt.simple_eval_estate = NULL; - plpgsql_create_econtext(estate); + u_sess->plsql_cxt.shared_simple_eval_resowner = NULL; + plpgsql_create_econtext(estate, saveCxt); +#ifndef ENABLE_MULTIPLE_NODES + /* old savedcxt has freed, use new context */ + for (int i = u_sess->SPI_cxt._connected; i > 0; i--) { + u_sess->SPI_cxt._stack[i].savedcxt = u_sess->SPI_cxt._stack[i - 1].execCxt; + } + /* implicit cursor attribute variable should reset when commit/rollback */ + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && COMPAT_CURSOR) { + reset_implicit_cursor_attr(estate); + } +#endif // 8. Rebuild subtransaction chain for exception. - rebuild_exception_subtransaction_chain(estate); - + rebuild_exception_subtransaction_chain(estate, transactionHead); // 9. move old subtransaction' remain resource into Current Transaction - XactResumeSPIContext(false); - + stp_cleanup_subxact_resowner(estate->stack_entry_start); if (u_sess->SPI_cxt.portal_stp_exception_counter == 0) { t_thrd.utils_cxt.CurrentResourceOwner = t_thrd.utils_cxt.STPSavedResourceOwner; } - return PLPGSQL_RC_OK; - } + /* ---------- * exec_stmt_null Locate the execution status. * ---------- @@ -6183,6 +7047,9 @@ void exec_assign_expr(PLpgSQL_execstate* estate, PLpgSQL_datum* target, PLpgSQL_ value = exec_eval_expr(estate, expr, &isnull, &valtype, &tableOfIndex); + /* Under A_FORMAT compatibility, we need to seperate OUT param from RETURN */ + plpgsql_set_outparam_value(estate, expr, &value, &isnull); + /* copy cursor data to estate->datums */ if (valtype == REFCURSOROID && target->dtype == PLPGSQL_DTYPE_VAR) { PLpgSQL_var* var = (PLpgSQL_var*)target; @@ -6250,6 +7117,12 @@ void exec_assign_value(PLpgSQL_execstate* estate, PLpgSQL_datum* target, Datum v PLpgSQL_var* var = (PLpgSQL_var*)target; Datum newvalue; + if (!t_thrd.xact_cxt.isSelectInto && is_external_clob(valtype, *isNull, value)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("clob from execute into do not support assign."))); + } + newvalue = exec_cast_value(estate, value, valtype, @@ -6296,8 +7169,9 @@ void exec_assign_value(PLpgSQL_execstate* estate, PLpgSQL_datum* target, Datum v if (isByReference) { var->freeval = true; } - - if (tableOfIndex != NULL) { + if (var->nest_table != NULL && !var->isnull) { + assignNestTableOfValue(estate, var, var->value, tableOfIndex); + } else if (tableOfIndex != NULL) { if (var->ispkg) { MemoryContext temp = MemoryContextSwitchTo(var->pkg->pkg_cxt); HTAB* newTableOfIndex = copyTableOfIndex(tableOfIndex); @@ -6521,6 +7395,17 @@ void exec_assign_value(PLpgSQL_execstate* estate, PLpgSQL_datum* target, Datum v MemoryContext oldcontext = NULL; AttrNumber attrno = ((PLpgSQL_arrayelem*)target)->assignattrno; + if (is_external_clob(valtype, *isNull, value)) { + bool is_null = false; + bool is_have_huge_clob = false; + struct varatt_lob_pointer* lob_pointer = (varatt_lob_pointer*)(VARDATA_EXTERNAL(value)); + value = fetch_lob_value_from_tuple(lob_pointer, InvalidOid, &is_null, &is_have_huge_clob); + if (is_have_huge_clob) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("huge clob do not support as array element."))); + } + } /* * We need to do subscript evaluation, which might require * evaluating general expressions; and the caller might have @@ -6798,6 +7683,18 @@ void exec_assign_value(PLpgSQL_execstate* estate, PLpgSQL_datum* target, Datum v MemoryContext oldcontext = NULL; AttrNumber attrno = ((PLpgSQL_tableelem*)target)->assignattrno; + if (is_external_clob(valtype, *isNull, value)) { + bool is_null = false; + bool is_have_huge_clob = false; + struct varatt_lob_pointer* lob_pointer = (varatt_lob_pointer*)(VARDATA_EXTERNAL(value)); + value = fetch_lob_value_from_tuple(lob_pointer, InvalidOid, &is_null, &is_have_huge_clob); + if (is_have_huge_clob) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("huge clob do not support as table of element."))); + } + } + /* * We need to do subscript evaluation, which might require * evaluating general expressions; and the caller might have @@ -6893,10 +7790,17 @@ void exec_assign_value(PLpgSQL_execstate* estate, PLpgSQL_datum* target, Datum v valtype, isNull, innerVar->tableOfIndex); break; } else { + MemoryContext temp = NULL; + if (((PLpgSQL_var*)target)->ispkg) { + temp = MemoryContextSwitchTo(((PLpgSQL_var*)target)->pkg->pkg_cxt); + } for (int i = 0; i < nsubscripts; i++) { elemTableOfIndex = evalSubscipts(estate, nsubscripts, i, subscriptvals, subscripts, subscriptType, elemTableOfIndex); } + if (((PLpgSQL_var*)target)->ispkg) { + MemoryContextSwitchTo(temp); + } } /* Now we can restore caller's SPI_execute result if any. */ AssertEreport(estate->eval_tuptable == NULL, MOD_PLSQL, "eval tuptable should not be null"); @@ -7051,8 +7955,9 @@ void exec_assign_value(PLpgSQL_execstate* estate, PLpgSQL_datum* target, Datum v */ *isNull = false; ((PLpgSQL_var*)target)->tableOfIndexType = subscriptType; + ((PLpgSQL_var*)target)->tableOfIndex = elemTableOfIndex; exec_assign_value(estate, target, PointerGetDatum(newtableval), - tableelem->tabletypoid, isNull, elemTableOfIndex); + tableelem->tabletypoid, isNull, NULL); break; } @@ -7060,6 +7965,17 @@ void exec_assign_value(PLpgSQL_execstate* estate, PLpgSQL_datum* target, Datum v /* * Target has a assign list */ + if (is_external_clob(valtype, *isNull, value)) { + bool is_null = false; + bool is_have_huge_clob = false; + struct varatt_lob_pointer* lob_pointer = (varatt_lob_pointer*)(VARDATA_EXTERNAL(value)); + value = fetch_lob_value_from_tuple(lob_pointer, InvalidOid, &is_null, &is_have_huge_clob); + if (is_have_huge_clob) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("huge clob do not support as record element."))); + } + } PLpgSQL_assignlist* assignvar = (PLpgSQL_assignlist*)target; List* assignlist = assignvar->assignlist; PLpgSQL_datum* assigntarget = estate->datums[assignvar->targetno]; @@ -7087,6 +8003,7 @@ static void exec_assign_list(PLpgSQL_execstate* estate, PLpgSQL_datum* assigntar List* subscripts = NIL; ListCell* lc = NULL; DList* targetlist = NULL; + HTAB* elemTableOfIndex = NULL; bool hasarray = false; int subscriptvals[MAXDIM]; int nsubscripts; @@ -7101,7 +8018,8 @@ static void exec_assign_list(PLpgSQL_execstate* estate, PLpgSQL_datum* assigntar if (resultvar == NULL) { nsubscripts = list_length(subscripts); PLpgSQL_datum* oldresulttarget = resulttarget; - evalSubscriptList(estate, subscripts, subscriptvals, nsubscripts, &resulttarget); + evalSubscriptList(estate, subscripts, subscriptvals, nsubscripts, + &resulttarget, &elemTableOfIndex); /* nested table get inner target, only need one subscripts */ if (resulttarget != oldresulttarget) { subscriptvals[0] = subscriptvals[nsubscripts - 1]; @@ -7111,7 +8029,8 @@ static void exec_assign_list(PLpgSQL_execstate* estate, PLpgSQL_datum* assigntar targetlist = dlappend(targetlist, resultvar); } else { nsubscripts = list_length(subscripts); - evalSubscriptList(estate, subscripts, subscriptvals, nsubscripts, &dummytarget); + evalSubscriptList(estate, subscripts, subscriptvals, nsubscripts, + &dummytarget, &elemTableOfIndex); } resultvar = extractArrayElem(estate, resultvar, subscriptvals, nsubscripts); targetlist = dlappend(targetlist, resultvar); @@ -7128,7 +8047,7 @@ static void exec_assign_list(PLpgSQL_execstate* estate, PLpgSQL_datum* assigntar if (resultvar == NULL) { nsubscripts = list_length(subscripts); PLpgSQL_datum* oldresulttarget = resulttarget; - evalSubscriptList(estate, subscripts, subscriptvals, nsubscripts, &resulttarget); + evalSubscriptList(estate, subscripts, subscriptvals, nsubscripts, &resulttarget, &elemTableOfIndex); /* nested table already get inner target, only need one subscripts */ if (resulttarget != oldresulttarget) { subscriptvals[0] = subscriptvals[nsubscripts - 1]; @@ -7138,7 +8057,7 @@ static void exec_assign_list(PLpgSQL_execstate* estate, PLpgSQL_datum* assigntar targetlist = dlappend(targetlist, resultvar); } else { nsubscripts = list_length(subscripts); - evalSubscriptList(estate, subscripts, subscriptvals, nsubscripts, &dummytarget); + evalSubscriptList(estate, subscripts, subscriptvals, nsubscripts, &dummytarget, &elemTableOfIndex); } resultvar = extractArrayElem(estate, resultvar, subscriptvals, nsubscripts); targetlist = dlappend(targetlist, resultvar); @@ -7148,9 +8067,13 @@ static void exec_assign_list(PLpgSQL_execstate* estate, PLpgSQL_datum* assigntar if (targetlist != NULL) { resultval = formDatumFromTargetList(estate, targetlist, value, valtype, &resultvaltype, isNull); } - - exec_assign_value(estate, resulttarget, resultval, resultvaltype, isNull, - ((PLpgSQL_var*)resulttarget)->tableOfIndex); + ((PLpgSQL_var*)resulttarget)->tableOfIndex = elemTableOfIndex; + exec_assign_value(estate, resulttarget, resultval, resultvaltype, isNull, NULL); + /* deep free target list */ + for (DListCell* dlc = dlist_tail_cell(targetlist); dlc != NULL && lprev(dlc) != NULL; dlc = lprev(dlc)) { + PLpgSQL_temp_assignvar* curresultvar = (PLpgSQL_temp_assignvar*)lfirst(dlc); + pfree_ext(curresultvar->subscriptvals); + } dlist_free(targetlist, true); } @@ -7436,7 +8359,7 @@ static PLpgSQL_temp_assignvar* build_temp_assignvar_from_datum(PLpgSQL_datum* ta } static void evalSubscriptList(PLpgSQL_execstate* estate, const List* subscripts, - int* subscriptvals, int nsubscripts, PLpgSQL_datum** target) + int* subscriptvals, int nsubscripts, PLpgSQL_datum** target, HTAB** elemTableOfIndex) { if (nsubscripts > MAXDIM) { ereport(ERROR, @@ -7480,19 +8403,28 @@ static void evalSubscriptList(PLpgSQL_execstate* estate, const List* subscripts, /* nest tableof value's nest level should match subexprs's number. */ if (i > tableof_level) { ereport(ERROR, - (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + (errcode(ERRCODE_DATATYPE_MISMATCH), errmodule(MOD_PLSQL), errmsg("subscripts list has members more than tableof value %s expected", valname))); } + MemoryContext temp = NULL; + if (((PLpgSQL_var*)(*target))->ispkg) { + temp = MemoryContextSwitchTo(((PLpgSQL_var*)(*target))->pkg->pkg_cxt); + } tableOfIndex = evalSubscipts(estate, nsubscripts, i, subscriptvals, subexprs, subscriptType, tableOfIndex); - ((PLpgSQL_var*)(*target))->tableOfIndex = tableOfIndex; + if (((PLpgSQL_var*)(*target))->ispkg) { + MemoryContextSwitchTo(temp); + } + if (*elemTableOfIndex == NULL) { + *elemTableOfIndex = tableOfIndex; + } } i++; } if (i - tableof_level != 1) { ereport(ERROR, - (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + (errcode(ERRCODE_DATATYPE_MISMATCH), errmodule(MOD_PLSQL), errmsg("subscripts list has members less than tableof value %s expected", valname))); } @@ -7602,8 +8534,12 @@ static PLpgSQL_var* evalSubsciptsNested(PLpgSQL_execstate* estate, PLpgSQL_var* } estate->eval_tuptable = NULL; - PLpgSQL_var* nest_var = (PLpgSQL_var*)estate->datums[index]; - + PLpgSQL_var* nest_var = NULL; + if (tablevar->ispkg) { + nest_var = (PLpgSQL_var*)tablevar->pkg->datums[index]; + } else { + nest_var = (PLpgSQL_var*)estate->datums[index]; + } return nest_var; } @@ -7618,8 +8554,15 @@ static HTAB* evalSubscipts(PLpgSQL_execstate* estate, int nsubscripts, int pos, Oid exprtypeid; /* subcript type is index by varchar/integer */ if (OidIsValid(subscriptType)) { + bool isTran = false; + MemoryContext savedContext = CurrentMemoryContext; Datum exprdatum = exec_eval_expr(estate, subscripts[nsubscripts - 1 - pos], &subisnull, &exprtypeid); + MemoryContextSwitchTo(savedContext); exprdatum = exec_simple_cast_value(estate, exprdatum, exprtypeid, subscriptType, -1, subisnull); + if (subscriptType == VARCHAROID && !subisnull && VARATT_IS_1B(exprdatum)) { + exprdatum = transVaratt1BTo4B(exprdatum); + isTran = true; + } if (subisnull) { ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), @@ -7637,6 +8580,9 @@ static HTAB* evalSubscipts(PLpgSQL_execstate* estate, int nsubscripts, int pos, } else { subscriptvals[pos] = insertTableOfIndexByDatumValue(key, &tableOfIndex); } + if (isTran) { + pfree(DatumGetPointer(exprdatum)); + } } else { /* subcript type is integer */ subscriptvals[pos] = exec_eval_integer(estate, subscripts[nsubscripts - 1 - pos], &subisnull); @@ -7662,6 +8608,20 @@ static HTAB* evalSubscipts(PLpgSQL_execstate* estate, int nsubscripts, int pos, return tableOfIndex; } +Datum transVaratt1BTo4B(Datum value) +{ + Size data_size = VARSIZE_SHORT(value) - VARHDRSZ_SHORT; + Size new_size = data_size + VARHDRSZ; + struct varlena *new_value; + errno_t rc = EOK; + + new_value = (struct varlena *)palloc(new_size); + SET_VARSIZE(new_value, new_size); + rc = memcpy_s(VARDATA(new_value), new_size, VARDATA_SHORT(value), data_size); + securec_check(rc, "", ""); + return PointerGetDatum(new_value); +} + static uint32 tableOfIndexHashFunc(const void* key, Size keysize) { const TableOfIndexKey *item = (const TableOfIndexKey *) key; @@ -7704,11 +8664,21 @@ static HTAB* createTableOfIndex() ctl.entrysize = sizeof(TableOfIndexEntry); ctl.hash = (HashValueFunc)tableOfIndexHashFunc; ctl.match = (HashCompareFunc)tableOfIndexKeyMatch; - MemoryContext indexContext = AllocSetContextCreate(CurrentMemoryContext, - "tableOfIndexContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + MemoryContext indexContext = NULL; + if (CurrentMemoryContext->is_shared) { + indexContext = AllocSetContextCreate(CurrentMemoryContext, + "tableOfIndexContext", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE, + SHARED_CONTEXT); + } else { + indexContext = AllocSetContextCreate(CurrentMemoryContext, + "tableOfIndexContext", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + } ctl.hcxt = indexContext; int flags = HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE | HASH_EXTERN_CONTEXT; HTAB* hashTable = hash_create("tableof_index", TABLEOFINDEXBUCKETNUM, &ctl, flags); @@ -7773,6 +8743,9 @@ static PLpgSQL_var* makeNewNestedPlpgsqlVar(PLpgSQL_var* src) PLpgSQL_var* dest = (PLpgSQL_var*)palloc(sizeof(PLpgSQL_var)); dest->dtype = src->dtype; dest->dno = 0; + dest->pkg = src->pkg; + dest->pkg_name = list_copy(src->pkg_name); + dest->varname = pstrdup(src->varname); dest->ispkg = src->ispkg; dest->refname = pstrdup(src->refname); dest->lineno = src->lineno; @@ -7786,11 +8759,10 @@ static PLpgSQL_var* makeNewNestedPlpgsqlVar(PLpgSQL_var* src) dest->freeval = src->freeval; dest->is_cursor_var = src->is_cursor_var; dest->is_cursor_open = src->is_cursor_open; - dest->pkg_name = NIL; - dest->pkg = NULL; dest->tableOfIndexType = src->tableOfIndexType; dest->tableOfIndex = NULL; dest->nest_table = makeNewNestedPlpgsqlVar(src->nest_table); + dest->nest_layers = src->nest_layers; return dest; } @@ -7801,12 +8773,18 @@ static PLpgSQL_var* makeNewNestedPlpgsqlVar(PLpgSQL_var* src) */ static int addNewNestedTable(PLpgSQL_execstate* estate, TableOfIndexKey key, PLpgSQL_var* base_table) { + MemoryContext old = NULL; + if (base_table->ispkg) { + old = MemoryContextSwitchTo(base_table->pkg->pkg_cxt); + } PLpgSQL_var* origin_table = base_table->nest_table; PLpgSQL_var* new_nest_table = makeNewNestedPlpgsqlVar(origin_table); Oid elemtypoid = INT4OID; ArrayType* arrayval = NULL; - if (base_table->value == InvalidOid) { + if (base_table->value == 0) { arrayval = construct_empty_array(elemtypoid); + base_table->freeval = true; + base_table->isnull = false; } else { arrayval = (ArrayType*)base_table->value; } @@ -7816,6 +8794,9 @@ static int addNewNestedTable(PLpgSQL_execstate* estate, TableOfIndexKey key, PLp base_table->value = fillNestedTableArray(arrayval, base_table->datatype->typoid, INT4OID, dno, idx); base_table->isnull = false; + if (base_table->ispkg) { + (void)MemoryContextSwitchTo(old); + } return dno; } @@ -7843,6 +8824,65 @@ Datum fillNestedTableArray(ArrayType* arrayval, Oid parenttypoid, Oid elemtypoid return (Datum)resultArray; } +/* recursivly set nest table's inner value */ +static void assignNestTableOfValue(PLpgSQL_execstate* estate, PLpgSQL_var* var, Datum oldvalue, HTAB* tableOfIndex) +{ + /* for last layer of tableof, assign array value and copy index */ + if (var->nest_table == NULL) { + exec_assign_value(estate, (PLpgSQL_datum*)var, oldvalue, + var->datatype->typoid, &var->isnull, tableOfIndex); + return; + } + if (tableOfIndex == NULL) { + return; + } + HASH_SEQ_STATUS hashSeq; + hash_seq_init(&hashSeq, tableOfIndex); + /* clear var's old index and value */ + bool freevalLater = false; + bool freeindexLater = false; + if (var->value != oldvalue) { + free_var(var); + } else { + var->value = (Datum)0; + var->freeval = false; + freevalLater = true; + } + if (var->tableOfIndex != tableOfIndex) { + hash_destroy(var->tableOfIndex); + var->tableOfIndex = NULL; + } else { + var->tableOfIndex = NULL; + freeindexLater = true; + } + TableOfIndexEntry* srcEntry = NULL; + while ((srcEntry = (TableOfIndexEntry*)hash_seq_search(&hashSeq)) != NULL) { + PLpgSQL_var* oldvar = srcEntry->var; + PLpgSQL_var* nest_var = NULL; + if (unlikely(oldvar == NULL || oldvar->nest_layers != var->nest_table->nest_layers)) { + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmodule(MOD_PLSQL), + errmsg("Nest tableof var %s assigned is mismatch excepted nest layers.", var->refname))); + } + /* first add new var */ + int index = addNewNestedTable(estate, srcEntry->key, var); + if (var->ispkg) { + nest_var = (PLpgSQL_var*)var->pkg->datums[index]; + } else { + nest_var = (PLpgSQL_var*)estate->datums[index]; + } + Assert(nest_var->nest_layers == oldvar->nest_layers); + nest_var->isnull = oldvar->isnull; + /* recursive assign inner nest table value */ + assignNestTableOfValue(estate, nest_var, oldvar->value, oldvar->tableOfIndex); + } + if (freeindexLater && oldvalue != 0) + pfree(DatumGetPointer(oldvalue)); + if (freeindexLater) + hash_destroy(tableOfIndex); +} + static int insertTableOfIndexByDatumValue(TableOfIndexKey key, HTAB** tableOfIndex, PLpgSQL_var* var) { if (*tableOfIndex == NULL) { @@ -8270,13 +9310,12 @@ static Datum exec_eval_expr(PLpgSQL_execstate* estate, PLpgSQL_expr* expr, bool* { Datum result = 0; int rc; -#ifndef ENABLE_PRIVATEGAUSS +#ifdef ENABLE_MULTIPLE_NODES // forbid commit/rollback in the stp which is called to get value bool savedisAllowCommitRollback = false; bool needResetErrMsg = false; needResetErrMsg = stp_disable_xact_and_set_err_msg(&savedisAllowCommitRollback, STP_XACT_USED_AS_EXPR); -#endif -#ifdef ENABLE_PRIVATEGAUSS +#else TransactionId oldTransactionId = SPI_get_top_transaction_id(); #endif /* @@ -8296,10 +9335,10 @@ static Datum exec_eval_expr(PLpgSQL_execstate* estate, PLpgSQL_expr* expr, bool* * directly */ if (exec_eval_simple_expr(estate, expr, &result, isNull, rettype, tableOfIndex)) { -#ifndef ENABLE_PRIVATEGAUSS +#ifdef ENABLE_MULTIPLE_NODES stp_reset_xact_state_and_err_msg(savedisAllowCommitRollback, needResetErrMsg); #else - stp_check_transaction_and_create_econtext(estate,oldTransactionId); + stp_check_transaction_and_create_econtext(estate, oldTransactionId); #endif return result; } @@ -8308,9 +9347,12 @@ static Datum exec_eval_expr(PLpgSQL_execstate* estate, PLpgSQL_expr* expr, bool* * Else do it the hard way via exec_run_select */ rc = exec_run_select(estate, expr, 2, NULL); -#ifndef ENABLE_PRIVATEGAUSS +#ifdef ENABLE_MULTIPLE_NODES stp_reset_xact_state_and_err_msg(savedisAllowCommitRollback, needResetErrMsg); +#else + stp_check_transaction_and_create_econtext(estate, oldTransactionId); #endif + if (rc != SPI_OK_SELECT) { ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), @@ -8318,10 +9360,15 @@ static Datum exec_eval_expr(PLpgSQL_execstate* estate, PLpgSQL_expr* expr, bool* errmsg("query \"%s\" did not return data when evaluate expression", expr->query))); } + bool allowMultiColumn = false; + if (is_function_with_plpgsql_language_and_outparam(get_func_oid_from_expr(expr))) { + allowMultiColumn = true; + } + /* * Check that the expression returns exactly one column... */ - if (estate->eval_tuptable->tupdesc->natts != 1) { + if (estate->eval_tuptable->tupdesc->natts != 1 && !allowMultiColumn) { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmodule(MOD_PLSQL), @@ -8338,6 +9385,7 @@ static Datum exec_eval_expr(PLpgSQL_execstate* estate, PLpgSQL_expr* expr, bool* *rettype = SPI_gettypeid(estate->eval_tuptable->tupdesc, 1); if (plpgsql_estate && plpgsql_estate->curr_nested_table_type != InvalidOid) { *rettype = plpgsql_estate->curr_nested_table_type; + plpgsql_estate->curr_nested_table_type = InvalidOid; } /* @@ -8491,6 +9539,13 @@ static int exec_for_query(PLpgSQL_execstate* estate, PLpgSQL_stmt_forq* stmt, Po tuptab = SPI_tuptable; n = SPI_processed; +#ifdef ENABLE_MULTIPLE_NODES + /* For redistribute and broadcast stream, if sql in loop need to communicate with dn which wait for + * stream operator which wait for stream operator, hang will occurs. + * To avoid this, we need get all tuples for this fetch sql. */ + hold_portal_if_necessary(portal); +#endif + /* * If the query didn't return any rows, set the target to NULL and fall * through with found = false. @@ -8644,9 +9699,8 @@ static bool exec_eval_simple_expr( ParamListInfo paramLI = NULL; PLpgSQL_expr* save_cur_expr = NULL; MemoryContext oldcontext = NULL; -#ifdef ENABLE_PRIVATEGAUSS +#ifndef ENABLE_MULTIPLE_NODES TransactionId oldTransactionId = SPI_get_top_transaction_id(); - ResourceOwner oldOwner = t_thrd.utils_cxt.CurrentResourceOwner; #endif /* * Forget it if expression wasn't simple before. @@ -8663,38 +9717,93 @@ static bool exec_eval_simple_expr( } /* - * Revalidate cached plan, so that we will notice if it became stale. (We - * need to hold a refcount while using the plan, anyway.) + * Check to see if the cached plan has been invalidated. If not, and this + * is the first use in the current transaction, save a plan refcount in + * the simple-expression resowner. */ - cplan = SPI_plan_get_cached_plan(expr->plan); + if (likely(CachedPlanIsSimplyValid(expr->expr_simple_plansource, + expr->expr_simple_plan, + (expr->expr_simple_plan_lxid != curlxid ? + u_sess->plsql_cxt.shared_simple_eval_resowner : NULL)))) { + /* + * It's still good, so just remember that we have a refcount on the + * plan in the current transaction. (If we already had one, this + * assignment is a no-op.) + */ + expr->expr_simple_plan_lxid = curlxid; + } else { + /* + * If we have a valid refcount on some previous version of the plan, + * release it, so we don't leak plans intra-transaction. + */ + if (expr->expr_simple_plan_lxid == curlxid) { + ResourceOwner saveResourceOwner = t_thrd.utils_cxt.CurrentResourceOwner; + t_thrd.utils_cxt.CurrentResourceOwner = u_sess->plsql_cxt.shared_simple_eval_resowner; + ReleaseCachedPlan(expr->expr_simple_plan, true); + t_thrd.utils_cxt.CurrentResourceOwner = saveResourceOwner; + expr->expr_simple_plan = NULL; + expr->expr_simple_plan_lxid = InvalidLocalTransactionId; + } - if (cplan == NULL) { - return false; - } - - /* - * We can't get a failure here, because the number of CachedPlanSources in - * the SPI plan can't change from what exec_simple_check_plan saw; it's a - * property of the raw parsetree generated from the query text. - */ - AssertEreport(estate->eval_tuptable == NULL, MOD_PLSQL, "eval tuptable should be null"); - - if (ENABLE_CN_GPC && !expr->is_cachedplan_shared && cplan->isShared()) { - exec_simple_recheck_plan(expr, cplan); - if (expr->expr_simple_expr == NULL) { - /* Ooops, release refcount and fail */ - ReleaseCachedPlan(cplan, true); + /* + * Revalidate cached plan, so that we will notice if it became stale. (We + * need to hold a refcount while using the plan, anyway.) + */ + cplan = SPI_plan_get_cached_plan(expr->plan); + if (cplan == NULL) { return false; } - } - if (cplan->generation != expr->expr_simple_generation) { - /* It got replanned ... is it still simple? */ - exec_simple_recheck_plan(expr, cplan); - if (expr->expr_simple_expr == NULL) { - /* Ooops, release refcount and fail */ + + /* + * This test probably can't fail either, but if it does, cope by + * declaring the plan to be non-simple. On success, we'll acquire a + * refcount on the new plan, stored in simple_eval_resowner. + */ + if (CachedPlanAllowsSimpleValidityCheck(expr->expr_simple_plansource, cplan, + u_sess->plsql_cxt.shared_simple_eval_resowner)) { + /* Remember that we have the refcount */ + expr->expr_simple_plan = cplan; + expr->expr_simple_plan_lxid = curlxid; + } else { + /* Release SPI_plan_get_cached_plan's refcount */ ReleaseCachedPlan(cplan, true); + /* Mark expression as non-simple, and fail */ + expr->expr_simple_expr = NULL; return false; } + /* + * SPI_plan_get_cached_plan acquired a plan refcount stored in the + * active resowner. We don't need that anymore, so release it. + */ + ReleaseCachedPlan(cplan, true); + + /* Extract desired scalar expression from cached plan */ + exec_save_simple_expr(expr, cplan); + + /* + * We can't get a failure here, because the number of CachedPlanSources in + * the SPI plan can't change from what exec_simple_check_plan saw; it's a + * property of the raw parsetree generated from the query text. + */ + AssertEreport(estate->eval_tuptable == NULL, MOD_PLSQL, "eval tuptable should be null"); + if (ENABLE_CN_GPC && !expr->is_cachedplan_shared && cplan->isShared()) { + exec_simple_recheck_plan(expr, cplan); + if (expr->expr_simple_expr == NULL) { + /* Ooops, release refcount and fail */ + ReleaseCachedPlan(cplan, true); + return false; + } + } + + if (cplan->generation != expr->expr_simple_generation) { + /* It got replanned ... is it still simple? */ + exec_simple_recheck_plan(expr, cplan); + if (expr->expr_simple_expr == NULL) { + /* Ooops, release refcount and fail */ + ReleaseCachedPlan(cplan, true); + return false; + } + } } /* * Pass back previously-determined result type. @@ -8706,17 +9815,7 @@ static bool exec_eval_simple_expr( * the current transaction. (This will be forced to happen if we called * exec_simple_recheck_plan above.) */ -#ifdef ENABLE_PRIVATEGAUSS - if (t_thrd.mem_cxt.portal_mem_cxt == NULL) { - oldcontext = MemoryContextSwitchTo(u_sess->plsql_cxt.simple_eval_estate->es_query_cxt); - } else { - oldcontext = MemoryContextSwitchTo(t_thrd.mem_cxt.portal_mem_cxt); - } - expr->expr_simple_state = ExecInitExpr(expr->expr_simple_expr, NULL); - expr->expr_simple_in_use = false; - expr->expr_simple_lxid = curlxid; - MemoryContextSwitchTo(oldcontext); -#else + if (expr->expr_simple_lxid != curlxid) { oldcontext = MemoryContextSwitchTo(u_sess->plsql_cxt.simple_eval_estate->es_query_cxt); expr->expr_simple_state = ExecInitExpr(expr->expr_simple_expr, NULL); @@ -8724,7 +9823,6 @@ static bool exec_eval_simple_expr( expr->expr_simple_lxid = curlxid; MemoryContextSwitchTo(oldcontext); } -#endif /* * We have to do some of the things SPI_execute_plan would do, in @@ -8744,7 +9842,8 @@ static bool exec_eval_simple_expr( SPI_push(); oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); - if (!estate->readonly_func && expr->expr_simple_need_snapshot) { + bool need_snapshot = (expr->expr_simple_mutable && !estate->readonly_func); + if (need_snapshot) { CommandCounterIncrement(); PushActiveSnapshot(GetTransactionSnapshot()); } @@ -8780,21 +9879,26 @@ static bool exec_eval_simple_expr( econtext->is_cursor = true; } /* get tableof index from param */ - Oid tableOfIndexType = InvalidOid; - bool isNestedtable; - HTAB* tableOfIndexParam = ExecEvalParamExternTableOfIndex(expr->expr_simple_state, econtext, - &tableOfIndexType, &isNestedtable); - if (tableOfIndex != NULL) { - *tableOfIndex = tableOfIndexParam; - } + if (expr->is_have_tableof_index_var || expr->is_have_tableof_index_func) { + ExecTableOfIndexInfo execTableOfIndexInfo; + initExecTableOfIndexInfo(&execTableOfIndexInfo, econtext); + ExecEvalParamExternTableOfIndex((Node*)expr->expr_simple_state->expr, &execTableOfIndexInfo); - u_sess->SPI_cxt.cur_tableof_index->tableOfIndexType = tableOfIndexType; - u_sess->SPI_cxt.cur_tableof_index->tableOfIndex = tableOfIndexParam; + if (tableOfIndex != NULL) { + *tableOfIndex = execTableOfIndexInfo.tableOfIndex; + } + + u_sess->SPI_cxt.cur_tableof_index->tableOfIndexType = execTableOfIndexInfo.tableOfIndexType; + u_sess->SPI_cxt.cur_tableof_index->tableOfIndex = execTableOfIndexInfo.tableOfIndex; + u_sess->SPI_cxt.cur_tableof_index->tableOfNestLayer = execTableOfIndexInfo.tableOfLayers; + /* for nest table of output, save layer of this var tableOfGetNestLayer in ExecEvalArrayRef, + or set to zero for get whole nest table. */ + u_sess->SPI_cxt.cur_tableof_index->tableOfGetNestLayer = + (execTableOfIndexInfo.tableOfLayers > 0 && IsA(expr->expr_simple_state->expr, Param)) ? 0 : -1; + } + plpgsql_estate->curr_nested_table_type = InvalidOid; *result = ExecEvalExpr(expr->expr_simple_state, econtext, isNull, NULL); -#ifdef ENABLE_PRIVATEGAUSS - stp_check_transaction_and_create_econtext(estate,oldTransactionId); -#endif /* for nested table, we need use nested table type as result type */ if (plpgsql_estate && plpgsql_estate->curr_nested_table_type != InvalidOid) { HeapTuple tp; @@ -8808,8 +9912,9 @@ static bool exec_eval_simple_expr( if (typtup->typtype != TYPTYPE_COMPOSITE) *rettype = plpgsql_estate->curr_nested_table_type; ReleaseSysCache(tp); + plpgsql_estate->curr_nested_table_type = InvalidOid; } -#ifdef ENABLE_PRIVATEGAUSS +#ifndef ENABLE_MULTIPLE_NODES if (SPI_get_top_transaction_id() == oldTransactionId) { econtext->is_cursor = false; } @@ -8823,7 +9928,7 @@ static bool exec_eval_simple_expr( estate->cur_expr = save_cur_expr; - if (!estate->readonly_func && expr->expr_simple_need_snapshot) { + if (need_snapshot) { PopActiveSnapshot(); } @@ -8832,23 +9937,6 @@ static bool exec_eval_simple_expr( SPI_STACK_LOG("pop", NULL, NULL); SPI_pop(); -#ifdef ENABLE_PRIVATEGAUSS - /* When commit/rollback occurs - * the plan cache will be release refcount by resourceowner(except for oneshot plan) */ - CachedPlanSource *plansource = (CachedPlanSource *)linitial(expr->plan->plancache_list); - oldOwner = AddCplanRefAgainIfNecessary(expr->plan, plansource, cplan, oldTransactionId, oldOwner); - - ResourceOwner tmp = t_thrd.utils_cxt.CurrentResourceOwner; - t_thrd.utils_cxt.CurrentResourceOwner = oldOwner; -#endif - /* - * Now we can release our refcount on the cached plan. - */ - ReleaseCachedPlan(cplan, true); -#ifdef ENABLE_PRIVATEGAUSS - t_thrd.utils_cxt.CurrentResourceOwner = tmp; -#endif - /* * That's it. */ @@ -8913,7 +10001,7 @@ static ParamListInfo setup_param_list(PLpgSQL_execstate* estate, PLpgSQL_expr* e prm->value = var->value; prm->isnull = var->isnull; prm->pflags = PARAM_FLAG_CONST; - prm->isnestedtable = (var->nest_table != NULL); + prm->tabInfo = NULL; if (var->nest_table) { PLpgSQL_var* nest = var->nest_table; while (nest != NULL && nest->datatype != NULL) { @@ -8923,8 +10011,14 @@ static ParamListInfo setup_param_list(PLpgSQL_execstate* estate, PLpgSQL_expr* e } else { prm->ptype = var->datatype->typoid; } - prm->tableOfIndexType = var->tableOfIndexType; - prm->tableOfIndex = var->tableOfIndex; + if (var->tableOfIndexType != InvalidOid) { + prm->tabInfo = (TableOfInfo*)palloc0(sizeof(TableOfInfo)); + prm->tabInfo->isnestedtable = (var->nest_table != NULL); + prm->tabInfo->tableOfIndexType = var->tableOfIndexType; + prm->tabInfo->tableOfIndex = var->tableOfIndex; + prm->tabInfo->tableOfLayers = var->nest_layers; + } + /* cursor as a parameter, its option is also needed */ if (var->datatype->typoid == REFCURSOROID) { @@ -9303,52 +10397,57 @@ static void exec_read_bulk_collect(PLpgSQL_execstate* estate, PLpgSQL_row* row, /* get associated var -- the target varray */ context.elemtype = InvalidOid; context.var = get_bulk_collect_target(estate, row, fnum, &context.elemtype); + if (context.ntup == 0) { + context.valtype = context.elemtype; + exec_assign_bulk_collect(estate, context); + } else { + /* Get all datums for the corresponding field */ + context.values = (Datum*)palloc0(sizeof(Datum) * context.ntup); /* all datums are stored here */ + context.isnull = (bool*)palloc0(sizeof(bool) * context.ntup); /* mark null datums */ + for (int i = 0; i < context.ntup; i++) { + int t_natts = 0; + HeapTuple tup = tuptab->vals[i]; + if (need_entire_row) { + context.valtype = context.elemtype; + context.values[i] = exec_tuple_get_composite(estate, context.tupdesc, tup, context.elemtype); + context.isnull[i] = false; + } else { + /* skip dropped column in tuple */ + t_natts = (HeapTupleIsValid(tup)) ? HeapTupleHeaderGetNatts(tup->t_data, context.tupdesc) : 0; + while (anum < td_natts && context.tupdesc->attrs[anum]->attisdropped) { + anum++; + } - /* Get all datums for the corresponding field */ - context.values = (Datum*)palloc0(sizeof(Datum) * context.ntup); /* all datums are stored here */ - context.isnull = (bool*)palloc0(sizeof(bool) * context.ntup); /* mark null datums */ - for (int i = 0; i < context.ntup; i++) { - int t_natts = 0; - HeapTuple tup = tuptab->vals[i]; - if (need_entire_row) { - context.valtype = context.elemtype; - context.values[i] = exec_tuple_get_composite(estate, context.tupdesc, tup, context.elemtype); - context.isnull[i] = false; - } else { - /* skip dropped column in tuple */ - t_natts = (HeapTupleIsValid(tup)) ? HeapTupleHeaderGetNatts(tup->t_data, context.tupdesc) : 0; - while (anum < td_natts && context.tupdesc->attrs[anum]->attisdropped) { - anum++; - } - - if (anum < td_natts) { - if (anum < t_natts) { - context.values[i] = SPI_getbinval(tup, context.tupdesc, anum + 1, &context.isnull[i]); + if (anum < td_natts) { + if (anum < t_natts) { + context.values[i] = SPI_getbinval(tup, context.tupdesc, anum + 1, &context.isnull[i]); + } else { + context.values[i] = (Datum)0; + context.isnull[i] = true; + } + context.valtype = SPI_gettypeid(context.tupdesc, anum + 1); } else { context.values[i] = (Datum)0; context.isnull[i] = true; } - context.valtype = SPI_gettypeid(context.tupdesc, anum + 1); - } else { - context.values[i] = (Datum)0; - context.isnull[i] = true; - } - if (context.isnull[i] == false && CheckTypeIsCursor(row, context.valtype, fnum) && - estate->cursor_return_data != NULL) { - /* error out for cursors */ - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmodule(MOD_PLSQL), - errmsg("Unsupported bulk collect into target"), - errdetail("Cursors in bulk collect are not supported"), errcause("N/A"), - erraction("Please modify bulk collect into target"))); + if (context.isnull[i] == false && CheckTypeIsCursor(row, context.valtype, fnum) && + estate->cursor_return_data != NULL) { + /* error out for cursors */ + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmodule(MOD_PLSQL), + errmsg("Unsupported bulk collect into target"), + errdetail("Cursors in bulk collect are not supported"), errcause("N/A"), + erraction("Please modify bulk collect into target"))); + } } } - } - /* make a single array datum from the datums we collected before, and assign to var */ - exec_assign_bulk_collect(estate, context); + /* make a single array datum from the datums we collected before, and assign to var */ + exec_assign_bulk_collect(estate, context); - pfree_ext(context.values); - pfree_ext(context.isnull); + pfree_ext(context.values); + pfree_ext(context.isnull); + } + anum++; /* anum increment at last */ } } @@ -9360,8 +10459,8 @@ static void exec_read_bulk_collect(PLpgSQL_execstate* estate, PLpgSQL_row* row, * exec_eval_cleanup to prevent long-term memory leaks. * ---------- */ -static void exec_move_row( - PLpgSQL_execstate* estate, PLpgSQL_rec* rec, PLpgSQL_row* row, HeapTuple tup, TupleDesc tupdesc) +static void exec_move_row(PLpgSQL_execstate* estate, + PLpgSQL_rec* rec, PLpgSQL_row* row, HeapTuple tup, TupleDesc tupdesc, bool fromExecSql) { /* * Record is simple - just copy the tuple and its descriptor into the @@ -9485,6 +10584,10 @@ static void exec_move_row( bool needSplitByNattrs = (td_natts == 1 && row->nfields > 1) || (td_natts == 1 && row->nfields == 1); bool needSplitByType = tupTypeOid == rowTypeOid && tupTypeOid != InvalidOid; + if (u_sess->plsql_cxt.pass_func_tupdesc && tupdesc && + u_sess->plsql_cxt.pass_func_tupdesc->natts == tupdesc->natts && fromExecSql) { + tupdesc = u_sess->plsql_cxt.pass_func_tupdesc; + } /* * in this case, we have a tuple of a composite type and its type is same as * row type(in rowtupdesc), but the row has more attrs than tuple, so we need @@ -9543,13 +10646,33 @@ static void exec_move_row( valtype = InvalidOid; } + if (valtype == CLOBOID && !isnull && VARATT_IS_HUGE_TOAST_POINTER(value)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("huge clob do not support as record element."))); + } + /* accept function's return value for cursor */ if (isnull == false && CheckTypeIsCursor(row, valtype, fnum) && estate->cursor_return_data != NULL) { ExecCopyDataToDatum(estate->datums, var->dno, &estate->cursor_return_data[fnum]); } - exec_assign_value(estate, (PLpgSQL_datum*)var, value, valtype, &isnull); + HTAB* tableOfIndex = NULL; + if (u_sess->plsql_cxt.func_tableof_index != NULL && fromExecSql) { + ListCell* l = NULL; + foreach (l, u_sess->plsql_cxt.func_tableof_index) { + PLpgSQL_func_tableof_index* func_tableof = (PLpgSQL_func_tableof_index*)lfirst(l); + if (func_tableof->varno == fnum) { + tableOfIndex = func_tableof->tableOfIndex; + } + } + } + exec_assign_value(estate, (PLpgSQL_datum*)var, value, valtype, &isnull, tableOfIndex); + } + if (fromExecSql) { + free_func_tableof_index(); + pfree_ext(u_sess->plsql_cxt.pass_func_tupdesc); } } } else if (row->intoplaceholders > 0 && row->intodatums != NULL) { @@ -9649,6 +10772,38 @@ HeapTuple make_tuple_from_row(PLpgSQL_execstate* estate, PLpgSQL_row* row, Tuple } else { exec_eval_datum(estate, estate->datums[row->varnos[i]], &fieldtypeid, &fieldtypmod, &dvalues[i], &nulls[i]); } + + if (is_external_clob(fieldtypeid, nulls[i], dvalues[i])) { + bool is_null = false; + bool is_have_huge_clob = false; + struct varatt_lob_pointer* lob_pointer = (varatt_lob_pointer*)(VARDATA_EXTERNAL(dvalues[i])); + dvalues[i] = fetch_lob_value_from_tuple(lob_pointer, InvalidOid, &is_null, &is_have_huge_clob); + if (is_have_huge_clob) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("huge clob do not support concat a row"))); + } + nulls[i] = is_null; + } + + if (estate->is_exception && fieldtypeid == REFCURSOROID) { + if (DatumGetPointer(dvalues[i]) != NULL) { + char* curname = NULL; + curname = TextDatumGetCString(dvalues[i]); + Portal portal = SPI_cursor_find(curname); + if (portal == NULL || portal->status == PORTAL_FAILED) { + dvalues[i] = 0; + nulls[i] = true; + } + ereport(DEBUG3, (errmodule(MOD_PLSQL), errcode(ERRCODE_LOG), + errmsg("RESET CURSOR NULL LOG: function: %s, set cursor: %s to null due to exception", + estate->func->fn_signature, curname))); + pfree_ext(curname); + } else { + dvalues[i] = 0; + nulls[i] = true; + } + } if (fieldtypeid != tupdesc->attrs[i]->atttypid) { /* if table of type should check its array type */ HeapTuple type_tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(tupdesc->attrs[i]->atttypid)); @@ -10135,7 +11290,7 @@ static bool exec_simple_check_node(Node* node) * of SPI. * ---------- */ -static void exec_simple_check_plan(PLpgSQL_expr* expr) +static void exec_simple_check_plan(PLpgSQL_execstate *estate, PLpgSQL_expr* expr) { List* plansources = NIL; CachedPlanSource* plansource = NULL; @@ -10196,10 +11351,19 @@ static void exec_simple_check_plan(PLpgSQL_expr* expr) query->cteList || query->jointree->quals || query->groupClause || query->havingQual || query->windowClause || query->distinctClause || query->sortClause || query->limitOffset || query->limitCount || query->setOperations; + if (has_subplans) { return; } + /* Check whether the return value of the function is setof. If yes, directly return the value. */ + for (int i = 0; i < list_length(query->targetList); i++) { + TargetEntry* tle = (TargetEntry*)list_nth(query->targetList, i); + if (!exec_simple_check_node((Node*)tle->expr)) { + return; + } + } + /* * 4. The query must have a single attribute as result */ @@ -10217,13 +11381,94 @@ static void exec_simple_check_plan(PLpgSQL_expr* expr) /* Can't fail, because we checked for a single CachedPlanSource above */ AssertEreport(cplan != NULL, MOD_PLSQL, "cplan should not be null"); - /* Share the remaining work with recheck code path */ - exec_simple_recheck_plan(expr, cplan); + /* + * Verify that plancache.c thinks the plan is simple enough to use + * CachedPlanIsSimplyValid. Given the restrictions above, it's unlikely + * that this could fail, but if it does, just treat plan as not simple. On + * success, save a refcount on the plan in the simple-expression resowner. + */ + if (estate != NULL && + CachedPlanAllowsSimpleValidityCheck(plansource, cplan, u_sess->plsql_cxt.shared_simple_eval_resowner)) { + /* Remember that we have the refcount */ + expr->expr_simple_plansource = plansource; + expr->expr_simple_plan = cplan; + expr->expr_simple_plan_lxid = t_thrd.proc->lxid; + + /* Share the remaining work with the replan code path */ + exec_save_simple_expr(expr, cplan); + } + if (estate == NULL) { + /* Share the remaining work with recheck code path */ + exec_simple_recheck_plan(expr, cplan); + } /* Release our plan refcount */ ReleaseCachedPlan(cplan, true); } + +/* + * exec_save_simple_expr --- extract simple expression from CachedPlan + */ +static void exec_save_simple_expr(PLpgSQL_expr *expr, CachedPlan *cplan) +{ + PlannedStmt *stmt; + Plan *plan; + Expr *tle_expr; + + /* + * Given the checks that exec_simple_check_plan did, none of the Asserts + * here should ever fail. + */ + + /* Extract the single PlannedStmt */ + Assert(list_length(cplan->stmt_list) == 1); + stmt = linitial_node(PlannedStmt, cplan->stmt_list); + Assert(stmt->commandType == CMD_SELECT); + + /* + * Ordinarily, the plan node should be a simple Result. However, if + * force_parallel_mode is on, the planner might've stuck a Gather node + * atop that. The simplest way to deal with this is to look through the + * Gather node. The Gather node's tlist would normally contain a Var + * referencing the child node's output, but it could also be a Param, or + * it could be a Const that setrefs.c copied as-is. + */ + plan = stmt->planTree; + for (;;) { + /* Extract the single tlist expression */ + Assert(list_length(plan->targetlist) == 1); + tle_expr = castNode(TargetEntry, linitial(plan->targetlist))->expr; + + if (IsA(plan, BaseResult)) { + Assert(plan->lefttree == NULL && + plan->righttree == NULL && + plan->initPlan == NULL && + plan->qual == NULL && + ((BaseResult*) plan)->resconstantqual == NULL); + break; + } else { + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmodule(MOD_PLSQL), + errmsg("unexpected plan node type: %d", (int)nodeTag(plan)))); + } + } + + /* + * Save the simple expression, and initialize state to "not valid in current transaction". + */ + expr->expr_simple_expr = tle_expr; + expr->expr_simple_state = NULL; + expr->expr_simple_in_use = false; + expr->expr_simple_lxid = InvalidLocalTransactionId; + /* Also stash away the expression result type */ + expr->expr_simple_type = exprType((Node *) tle_expr); + expr->expr_simple_typmod = exprTypmod((Node *) tle_expr); + /* We also want to remember if it is immutable or not */ + expr->expr_simple_mutable = contain_mutable_functions((Node *) tle_expr); +} + /* * exec_simple_recheck_plan --- check for simple plan once we have CachedPlan */ @@ -10459,7 +11704,7 @@ static void exec_set_sql_rowcount(PLpgSQL_execstate* estate, int rowcount) * already for the current transaction. The EState will be cleaned up at * transaction end. */ -static void plpgsql_create_econtext(PLpgSQL_execstate* estate) +static void plpgsql_create_econtext(PLpgSQL_execstate* estate, MemoryContext saveCxt) { SimpleEcontextStackEntry* entry = NULL; @@ -10472,10 +11717,25 @@ static void plpgsql_create_econtext(PLpgSQL_execstate* estate) MemoryContext oldcontext; oldcontext = MemoryContextSwitchTo(u_sess->top_transaction_mem_cxt); - u_sess->plsql_cxt.simple_eval_estate = CreateExecutorState(); + u_sess->plsql_cxt.simple_eval_estate = CreateExecutorState(saveCxt); + + if (saveCxt != NULL) { + MemoryContextSetParent(saveCxt, u_sess->top_transaction_mem_cxt); + } MemoryContextSwitchTo(oldcontext); } + /* + * Likewise for the simple-expression resource owner. + */ + if (u_sess->plsql_cxt.shared_simple_eval_resowner == NULL) + { + u_sess->plsql_cxt.shared_simple_eval_resowner = + ResourceOwnerCreate(t_thrd.utils_cxt.TopTransactionResourceOwner, + "PL/pgSQL simple expressions", + SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); + } + /* * Create a child econtext for the current function. */ @@ -10485,8 +11745,8 @@ static void plpgsql_create_econtext(PLpgSQL_execstate* estate) * Make a stack entry so we can clean up the econtext at subxact end. * Stack entries are kept in u_sess->top_transaction_mem_cxt for simplicity. */ - entry = (SimpleEcontextStackEntry*)MemoryContextAlloc( - u_sess->top_transaction_mem_cxt, sizeof(SimpleEcontextStackEntry)); + entry = (SimpleEcontextStackEntry*)MemoryContextAlloc(u_sess->top_transaction_mem_cxt, + sizeof(SimpleEcontextStackEntry)); entry->stack_econtext = estate->eval_econtext; entry->xact_subxid = GetCurrentSubTransactionId(); @@ -10535,6 +11795,8 @@ static void plpgsql_destroy_econtext(PLpgSQL_execstate* estate) pfree_ext(plcontext); estate->eval_econtext = NULL; } + + stp_cleanup_subxact_resowner(estate->stack_entry_start); } /* @@ -10568,8 +11830,12 @@ void plpgsql_xact_cb(XactEvent event, void* arg) if (u_sess->plsql_cxt.simple_eval_estate) FreeExecutorState(u_sess->plsql_cxt.simple_eval_estate); u_sess->plsql_cxt.simple_eval_estate = NULL; + if (u_sess->plsql_cxt.shared_simple_eval_resowner) + ResourceOwnerReleaseAllPlanCacheRefs(u_sess->plsql_cxt.shared_simple_eval_resowner); + u_sess->plsql_cxt.shared_simple_eval_resowner = NULL; } else { u_sess->plsql_cxt.simple_eval_estate = NULL; + u_sess->plsql_cxt.shared_simple_eval_resowner = NULL; } } @@ -10587,15 +11853,33 @@ void plpgsql_subxact_cb(SubXactEvent event, SubTransactionId mySubid, SubTransac return; } + /* + * "-1" means it can be freed anywhere. + * + * 1. SubTransaction started outside this statemenet except the last one can be freed + * at any time. such as: + * + * BEGIN; savepoint s1; savepoint s2; proc1(); savepoint s3; savepoint s4; proc2(); + * + * 1.1: s1's resowner can be freed anywhere in proc1(); + * 1.2: those before s4 can be freed anywhere in proc2(); + * + * 2. subtransaction in PL without SimpleEcontextStackEntry + */ + int64 minStackId = -1; + while (u_sess->plsql_cxt.simple_econtext_stack != NULL && u_sess->plsql_cxt.simple_econtext_stack->xact_subxid == mySubid) { SimpleEcontextStackEntry* next = NULL; + minStackId = u_sess->plsql_cxt.simple_econtext_stack->statckEntryId; FreeExprContext(u_sess->plsql_cxt.simple_econtext_stack->stack_econtext, (event == SUBXACT_EVENT_COMMIT_SUB)); next = u_sess->plsql_cxt.simple_econtext_stack->next; pfree_ext(u_sess->plsql_cxt.simple_econtext_stack); u_sess->plsql_cxt.simple_econtext_stack = next; } + + u_sess->plsql_cxt.minSubxactStackId = minStackId; } /* @@ -10615,7 +11899,7 @@ static void free_var(PLpgSQL_var* var) /* * free old value of a text variable and assign new value from C string */ -static void assign_text_var(PLpgSQL_var* var, const char* str) +void assign_text_var(PLpgSQL_var* var, const char* str) { free_var(var); var->value = CStringGetTextDatum(str); @@ -10826,9 +12110,50 @@ static void exec_set_prev_sqlcode(PLpgSQL_execstate* estate, PLpgSQL_execstate* sqlcode_var->isnull = false; } } - } +#ifndef ENABLE_MULTIPLE_NODES +static void exec_set_cursor_att_var(PLpgSQL_execstate* estate, PLpgSQL_execstate* estate_prev) +{ + PLpgSQL_var* var = NULL; + PLpgSQL_var* prev_var = NULL; + + var = (PLpgSQL_var*)(estate->datums[estate->found_varno]); + prev_var = (PLpgSQL_var*)(estate_prev->datums[estate_prev->found_varno]); + var->value = prev_var->value; + var->isnull = prev_var->isnull; + + var = (PLpgSQL_var*)(estate->datums[estate->sql_cursor_found_varno]); + prev_var = (PLpgSQL_var*)(estate_prev->datums[estate_prev->sql_cursor_found_varno]); + var->value = prev_var->value; + var->isnull = prev_var->isnull; + + var = (PLpgSQL_var*)(estate->datums[estate->sql_notfound_varno]); + prev_var = (PLpgSQL_var*)(estate_prev->datums[estate_prev->sql_notfound_varno]); + var->value = prev_var->value; + var->isnull = prev_var->isnull; + + var = (PLpgSQL_var*)(estate->datums[estate->sql_isopen_varno]); + prev_var = (PLpgSQL_var*)(estate_prev->datums[estate_prev->sql_isopen_varno]); + var->value = prev_var->value; + var->isnull = prev_var->isnull; + + var = (PLpgSQL_var*)(estate->datums[estate->sql_rowcount_varno]); + prev_var = (PLpgSQL_var*)(estate_prev->datums[estate_prev->sql_rowcount_varno]); + var->value = prev_var->value; + var->isnull = prev_var->isnull; +} +void estate_cursor_set(FormatCallStack* plcallstack) +{ + if (plcallstack != NULL && plcallstack->prev != NULL && + u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && COMPAT_CURSOR) { + PLpgSQL_execstate* estate_pre = (PLpgSQL_execstate*)(plcallstack->prev->elem); + PLpgSQL_execstate* estate_exec = (PLpgSQL_execstate*)plcallstack->elem; + exec_set_cursor_att_var(estate_pre, estate_exec); + } +} +#endif + /* transform anonymous block in dynamic statments. */ #define SQLSTART "SELECT \'" #define SQLEND "\'" @@ -10938,13 +12263,43 @@ void ResetCursorOption(Portal portal, bool reset) } /* unbind cursor attributes with portal */ - for (int i = 0; i < CURSOR_ATTRIBUTE_NUMBER; i++) { - portal->cursorAttribute[i] = NULL; +#ifndef ENABLE_MULTIPLE_NODES + if (!portal->isPkgCur) { +#endif + for (int i = 0; i < CURSOR_ATTRIBUTE_NUMBER; i++) { + portal->cursorAttribute[i] = NULL; + } +#ifndef ENABLE_MULTIPLE_NODES } +#endif portal->funcOid = InvalidOid; portal->funcUseCount = 0; } +#ifndef ENABLE_MULTIPLE_NODES +void ResetCursorAtrribute(Portal portal) +{ + if (!portal->isPkgCur) { + return; + } + if (portal->cursorAttribute[0] == NULL) { + return; + } + + PLpgSQL_var *var = (PLpgSQL_var*)portal->cursorAttribute[CURSOR_ISOPEN - 1]; + var->isnull = false; + var->value = 0; + var = (PLpgSQL_var*)portal->cursorAttribute[CURSOR_FOUND - 1]; + var->isnull = true; + var->value = (Datum)0; + var = (PLpgSQL_var*)portal->cursorAttribute[CURSOR_NOTFOUND - 1]; + var->isnull = true; + var->value = (Datum)0; + var = (PLpgSQL_var*)portal->cursorAttribute[CURSOR_ROWCOUNT - 1]; + var->isnull = true; + var->value = (Datum)0; +} +#endif static void stp_check_transaction_and_set_resource_owner(ResourceOwner oldResourceOwner, TransactionId oldTransactionId) { @@ -10983,7 +12338,7 @@ void plpgsql_HashTableDeleteAll() } } -static bool CheckInvalItemDependencyForFunc(List* invalItems, int cacheid, uint32 objid) +static bool plpgsql_check_invalid_by_dependency(List* invalItems, int cacheid, uint32 objid) { if (invalItems == NULL) return false; @@ -11005,68 +12360,156 @@ static bool CheckInvalItemDependencyForFunc(List* invalItems, int cacheid, uint3 } /* - * Check dependency for function and package's hash table + * Check dependency for function and package's hash table, + * and delete the invalid package or functio from session. * cacheId: oid of object's type * objId: oid of object */ -void plpgsql_HashTableDeleteAndCheckFunc(int cacheId, Oid objId) +void plpgsql_hashtable_delete_and_check_invalid_item(int classId, Oid objId) { - if (unlikely(u_sess->plsql_cxt.plpgsql_HashTable == NULL)) - return; - bool isPkg = false; - if (cacheId == PACKAGEOID) { - isPkg = true; - } - u_sess->plsql_cxt.is_delete_function = true; - HASH_SEQ_STATUS hash_seq; - hash_seq_init(&hash_seq, u_sess->plsql_cxt.plpgsql_HashTable); - plpgsql_HashEnt* hentry = NULL; - while ((hentry = (plpgsql_HashEnt *)hash_seq_search(&hash_seq)) != NULL) { - PLpgSQL_function* func = hentry->function; - /* check dependency */ - bool isInvalid = CheckInvalItemDependencyForFunc(func->invalItems, cacheId, objId); - if (isInvalid || (!isPkg && hentry->key.funcOid == objId)) { - if (func->use_count == 0) { - uint32 key = SPICacheHashFunc((void*)(&hentry->key), sizeof(PLpgSQL_func_hashkey)); - plpgsql_HashTableDelete(func); - SPIPlanCacheTableDelete(key); - plpgsql_free_function_memory(func); + if (classId == PROCOID) { + if (likely(u_sess->plsql_cxt.plpgsql_HashTable != NULL)) { + HASH_SEQ_STATUS hash_seq; + hash_seq_init(&hash_seq, u_sess->plsql_cxt.plpgsql_HashTable); + plpgsql_HashEnt* hentry = NULL; + while ((hentry = (plpgsql_HashEnt *)hash_seq_search(&hash_seq)) != NULL) { + PLpgSQL_function* func = hentry->function; + /* func in package will be invalid by package */ + if (hentry->key.funcOid == objId) { + if (!OidIsValid(func->pkg_oid)) { + delete_function(func); + } + hash_seq_term(&hash_seq); + return; + } } } - /* sub refcount for dependency pkg */ - if (isInvalid && isPkg) { - PLpgSQL_pkg_hashkey hashkey; - hashkey.pkgOid = objId; - PLpgSQL_package* getpkg = plpgsql_pkg_HashTableLookup(&hashkey); - if (getpkg) - getpkg->use_count--; + return; + } + + /* + * when compile, invalid the package may cause confilct, so ignore it. + * maybe a better way to record it and handler it later, will support in + * the future. + */ + if (u_sess->plsql_cxt.curr_compile_context != NULL) { + return; + } + + PLpgSQL_pkg_hashkey hashkey; + hashkey.pkgOid = objId; + PLpgSQL_package* getpkg = plpgsql_pkg_HashTableLookup(&hashkey); + if (getpkg == NULL) { + return; + } + + /* delete the invalid package from session */ + delete_package(getpkg); + + /* + * find packages and funcs depend on this package, + * funcs will be invalided in invalid_depend_func_and_packgae(), + * packages will be marked and return as a list + */ + List* invalidPkgList = invalid_depend_func_and_packgae(objId); + ListCell* cell = NULL; + + /* delete the marked packages */ + foreach(cell, invalidPkgList) { + plpgsql_hashtable_delete_and_check_invalid_item(PACKAGEOID, lfirst_oid(cell)); + } + list_free_ext(invalidPkgList); +} + +/* + * Check dependency for package's hash table, + * and delete the invalid package or functio from session. + * cacheId: oid of object's type + * objId: oid of object + */ +void delete_package_and_check_invalid_item(Oid pkgOid) +{ + /* if compile the package now, report error */ + if (u_sess->plsql_cxt.curr_compile_context != NULL && + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package != NULL && + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid == pkgOid) { + ReportCompileConcurrentError( + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_signature, true); + } + + PLpgSQL_pkg_hashkey hashkey; + hashkey.pkgOid = pkgOid; + PLpgSQL_package* getpkg = plpgsql_pkg_HashTableLookup(&hashkey); + if (getpkg != NULL) { + /* check if have conflict with current compile */ + CheckCurrCompileDependOnPackage(pkgOid); + delete_package(getpkg); + } else { + return; + } + + /* + * find packages and funcs depend on this package, + * funcs will be invalided in invalid_depend_func_and_packgae(), + * packages will be marked and return as a list + */ + List* invalidPkgList = invalid_depend_func_and_packgae(pkgOid); + ListCell* cell = NULL; + + /* delete the marked packages */ + foreach(cell, invalidPkgList) { + delete_package_and_check_invalid_item(lfirst_oid(cell)); + } + list_free_ext(invalidPkgList); +} + +/* + * find packages and funcs depend on this package, + * funcs will be invalided in invalid_depend_func_and_packgae(), + * packages will be marked and return as a list + */ +static List* invalid_depend_func_and_packgae(Oid pkgOid) +{ + List* invalidPkgList = NIL; + + /* search depend func first */ + if (likely(u_sess->plsql_cxt.plpgsql_HashTable != NULL)) { + HASH_SEQ_STATUS hash_seq; + hash_seq_init(&hash_seq, u_sess->plsql_cxt.plpgsql_HashTable); + plpgsql_HashEnt* hentry = NULL; + while ((hentry = (plpgsql_HashEnt *)hash_seq_search(&hash_seq)) != NULL) { + PLpgSQL_function* func = hentry->function; + /* check invalid by dependency, func->invalItems record the dependency */ + bool funcIsInvalid = plpgsql_check_invalid_by_dependency(func->invalItems, PACKAGEOID, pkgOid); + /* ignore func in this package */ + if (funcIsInvalid && func->pkg_oid != pkgOid) { + if (func->use_count == 0 && !OidIsValid(func->pkg_oid)) { + delete_function(func); + } else if (OidIsValid(func->pkg_oid)) { + /* mark the package, and delete them later */ + invalidPkgList = list_append_unique_oid(invalidPkgList, func->pkg_oid); + } + } } } - if (isPkg && u_sess->plsql_cxt.plpgsql_pkg_HashTable != NULL) { + + /* search depend package then */ + if (u_sess->plsql_cxt.plpgsql_pkg_HashTable != NULL) { HASH_SEQ_STATUS hash_pkgseq; hash_seq_init(&hash_pkgseq, u_sess->plsql_cxt.plpgsql_pkg_HashTable); plpgsql_pkg_HashEnt* pkghentry = NULL; while ((pkghentry = (plpgsql_pkg_HashEnt *)hash_seq_search(&hash_pkgseq)) != NULL) { PLpgSQL_package* pkg = pkghentry->package; - /* check dependency */ - bool isInvalid = CheckInvalItemDependencyForFunc(pkg->invalItems, cacheId, objId); - if (isInvalid || pkg->pkg_oid == objId) { - delete_package(pkg); - /* Restart the iteration */ - hash_seq_term(&hash_pkgseq); - hash_seq_init(&hash_pkgseq, u_sess->plsql_cxt.plpgsql_pkg_HashTable); - } - /* sub refcount for dependency pkg */ - if (isInvalid) { - PLpgSQL_pkg_hashkey hashkey; - hashkey.pkgOid = objId; - PLpgSQL_package* getpkg = plpgsql_pkg_HashTableLookup(&hashkey); - if (getpkg) - getpkg->use_count--; + /* check invalid by dependency, pkg->invalItems record the dependency */ + bool pkgIsInvalid = plpgsql_check_invalid_by_dependency(pkg->invalItems, PACKAGEOID, pkgOid); + /* delete the invalid package, skipping the orignal one, we have delete it */ + if (pkgIsInvalid && pkg->pkg_oid != pkgOid) { + /* mark the package, and delete them later */ + invalidPkgList = list_append_unique_oid(invalidPkgList, pkg->pkg_oid); } } } - u_sess->plsql_cxt.is_delete_function = false; + return invalidPkgList; } /* @@ -11171,7 +12614,7 @@ static DynParamsData* validate_using_params(List* params, PLpgSQL_function* func param->plan = plan; } /* Check to see if it's a simple expression */ - exec_simple_check_plan(param); + exec_simple_check_plan(NULL, param); ppd->types[i] = param->expr_simple_type; @@ -11463,6 +12906,7 @@ void pl_validate_stmt_block_in_subtransaction(PLpgSQL_stmt_block *block, PLpgSQL static void exec_savepoint_rollback(PLpgSQL_execstate *estate, const char *spName) { SPI_savepoint_rollback(spName); + stp_cleanup_subxact_resowner(estate->stack_entry_start); plpgsql_create_econtext(estate); @@ -11485,6 +12929,12 @@ static int exec_stmt_savepoint(PLpgSQL_execstate *estate, PLpgSQL_stmt* stmt) if (u_sess->SPI_cxt.portal_stp_exception_counter == 0 && u_sess->plsql_cxt.stp_savepoint_cnt == 0) { t_thrd.utils_cxt.STPSavedResourceOwner = t_thrd.utils_cxt.CurrentResourceOwner; } +#ifndef ENABLE_MULTIPLE_NODES + /* implicit cursor attribute variable should reset when savepoint */ + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && COMPAT_CURSOR) { + reset_implicit_cursor_attr(estate); + } +#endif switch (spstmt->opType) { case PLPGSQL_SAVEPOINT_CREATE: @@ -11496,6 +12946,7 @@ static int exec_stmt_savepoint(PLpgSQL_execstate *estate, PLpgSQL_stmt* stmt) break; case PLPGSQL_SAVEPOINT_RELEASE: SPI_savepoint_release(spstmt->spName); + stp_cleanup_subxact_resowner(estate->stack_entry_start); /* * Revert to outer eval_econtext. (The inner one was * automatically cleaned up during subxact exit.) @@ -11522,14 +12973,29 @@ static int exec_stmt_savepoint(PLpgSQL_execstate *estate, PLpgSQL_stmt* stmt) int plpgsql_estate_adddatum(PLpgSQL_execstate* estate, PLpgSQL_datum* newm) { /* resize if necessary */ - if (estate->datums_alloc == estate->ndatums) { - estate->datums_alloc *= 2; - estate->datums = (PLpgSQL_datum**)repalloc( - estate->datums, sizeof(PLpgSQL_datum*) * estate->datums_alloc); + if (newm->ispkg) { + PLpgSQL_var* var = (PLpgSQL_var*)newm; + if (var->pkg->ndatums == var->pkg->datums_alloc) { + var->pkg->datums_alloc *= 2; + var->pkg->datums = (PLpgSQL_datum**)repalloc( + var->pkg->datums, sizeof(PLpgSQL_datum*) * var->pkg->datums_alloc); + var->pkg->datum_need_free = (bool*)repalloc( + var->pkg->datum_need_free, sizeof(bool) * var->pkg->datums_alloc); + } + newm->dno = var->pkg->ndatums; + var->pkg->datums[newm->dno] = newm; + var->pkg->datum_need_free[newm->dno] = true; + var->pkg->ndatums++; + } else { + if (estate->datums_alloc == estate->ndatums) { + estate->datums_alloc *= 2; + estate->datums = (PLpgSQL_datum**)repalloc( + estate->datums, sizeof(PLpgSQL_datum*) * estate->datums_alloc); + } + newm->dno = estate->ndatums; + estate->datums[newm->dno] = newm; + estate->ndatums++; } - newm->dno = estate->ndatums; - estate->datums[estate->ndatums] = newm; - estate->ndatums++; return newm->dno; } @@ -11559,6 +13025,7 @@ PLpgSQL_var* copyPlpgsqlVar(PLpgSQL_var* src) dest->tableOfIndexType = src->tableOfIndexType; dest->tableOfIndex = copyTableOfIndex(src->tableOfIndex); dest->nest_table = NULL; + dest->nest_layers = src->nest_layers; return dest; } @@ -11737,3 +13204,102 @@ static PLpgSQL_recfield* copyPLpgsqlRecfield(PLpgSQL_recfield* src) dest->recparentno = src->recparentno; return dest; } + +void CheckCurrCompileDependOnPackage(Oid pkgOid) +{ + /* not compile, just return */ + if (u_sess->plsql_cxt.curr_compile_context == NULL) { + return; + } + + PLpgSQL_compile_context* curr_compile = u_sess->plsql_cxt.curr_compile_context; + if (curr_compile->plpgsql_curr_compile_package != NULL) { + /* comile thi package now, report error */ + if (pkgOid == curr_compile->plpgsql_curr_compile_package->pkg_oid) { + ReportCompileConcurrentError(curr_compile->plpgsql_curr_compile_package->pkg_signature, true); + } + ListCell* l; + PLpgSQL_function* package_func = NULL; + /* check curr compile package's func if depend on this package */ + foreach(l, curr_compile->plpgsql_curr_compile_package->proc_compiled_list) { + package_func = (PLpgSQL_function*)lfirst(l); + List *invalItems = package_func->invalItems; + ListCell *lc = NULL; + FuncInvalItem* item = NULL; + foreach(lc, invalItems) { + item = (FuncInvalItem*)lfirst(lc); + if (item->objId == pkgOid) { + ReportCompileConcurrentError(package_func->fn_signature, false); + } + } + } + } + + /* check curr compile function */ + if (curr_compile->plpgsql_curr_compile != NULL) { + /* check the function belong to this package? */ + if (curr_compile->plpgsql_curr_compile->pkg_oid == pkgOid) { + ReportCompileConcurrentError(curr_compile->plpgsql_curr_compile->fn_signature, false); + } + + /* check the function depend on this package? */ + List *invalItems = curr_compile->plpgsql_curr_compile->invalItems; + ListCell *lc = NULL; + FuncInvalItem* item = NULL; + foreach(lc, invalItems) { + item = (FuncInvalItem*)lfirst(lc); + if (item->objId == pkgOid) { + ReportCompileConcurrentError(curr_compile->plpgsql_curr_compile->fn_signature, false); + } + } + } +} + +void ReportCompileConcurrentError(const char* objName, bool isPackage) +{ + const char *className = isPackage? "package" : "procedure"; + ereport(ERROR, + (errmodule(MOD_PLSQL), errcode(ERRCODE_PLPGSQL_ERROR), + errmsg("concurrent error when compile package or procedure."), + errdetail("when compile %s \"%s\", it has been invalidated by other session.", + className, objName), + errcause("excessive concurrency"), + erraction("reduce concurrency and retry"))); +} + +/* + * check the assign target if valid + * now, only check the assign target can not be + * autonomous procedure out ref_cursor arg + */ +#ifndef ENABLE_MULTIPLE_NODES +static void CheckAssignTarget(PLpgSQL_execstate* estate, int dno) +{ + /* only conside autonomous transaction procedure */ + if (u_sess->is_autonomous_session != true || u_sess->SPI_cxt._connected != 0) { + return; + } + + /* only conside ref cursor var */ + PLpgSQL_datum* target = estate->datums[dno]; + if (target->dtype != PLPGSQL_DTYPE_VAR) { + return; + } + + PLpgSQL_var* targetVar = (PLpgSQL_var*)target; + if (targetVar->datatype->typoid != REFCURSOROID) { + return; + } + + if (IsAutoOutParam(estate, NULL, dno)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_PLSQL), + errmsg("autonomous procedure out ref cursor parameter can not be assigned"), + errdetail("procedure \"%s\" out parameter \"%s\" not support to be assigned value", + estate->func->fn_signature, targetVar->varname == NULL ? targetVar->refname : targetVar->varname), + errcause("feature not supported"), + erraction("use open cursor instead of assign value"))); + } +} +#endif diff --git a/src/common/pl/plpgsql/src/pl_funcs.cpp b/src/common/pl/plpgsql/src/pl_funcs.cpp index 05395119d..07b64b4c8 100644 --- a/src/common/pl/plpgsql/src/pl_funcs.cpp +++ b/src/common/pl/plpgsql/src/pl_funcs.cpp @@ -162,6 +162,7 @@ void plpgsql_ns_additem(int itemtype, int itemno, const char* name, const char* AssertEreport(curr_compile->ns_top != NULL || itemtype == PLPGSQL_NSTYPE_LABEL, MOD_PLSQL, ""); if (curr_compile->plpgsql_curr_compile_package != NULL) { + checkCompileMemoryContext(curr_compile->plpgsql_curr_compile_package->pkg_cxt); temp = MemoryContextSwitchTo(curr_compile->plpgsql_curr_compile_package->pkg_cxt); } @@ -268,7 +269,11 @@ PLpgSQL_nsitem* plpgsql_ns_lookup( char* currCompilePackageName = NULL; char* currCompilePackageSchemaName = NULL; rc = CompileWhich(); - if (rc == PLPGSQL_COMPILE_PACKAGE_PROC || rc == PLPGSQL_COMPILE_PACKAGE) { + if (OidIsValid(u_sess->plsql_cxt.running_pkg_oid)) { + NameData* CompilePackageName = GetPackageName(u_sess->plsql_cxt.running_pkg_oid); + currCompilePackageName = CompilePackageName->data; + currCompilePackageSchemaName = GetPackageSchemaName(u_sess->plsql_cxt.running_pkg_oid); + } else if (rc == PLPGSQL_COMPILE_PACKAGE_PROC || rc == PLPGSQL_COMPILE_PACKAGE) { currCompilePackageName = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_signature; currCompilePackageSchemaName = GetPackageSchemaName(); } @@ -280,11 +285,6 @@ PLpgSQL_nsitem* plpgsql_ns_lookup( continue; } bool isSamePackage = false; - if (OidIsValid(u_sess->plsql_cxt.running_pkg_oid) && currCompilePackageName == NULL) { - NameData* CompilePackageName = GetPackageName(u_sess->plsql_cxt.running_pkg_oid); - currCompilePackageName = CompilePackageName->data; - currCompilePackageSchemaName = GetPackageSchemaName(u_sess->plsql_cxt.running_pkg_oid); - } if (currCompilePackageName != NULL && nsitem->pkgname != NULL) { if (strcmp(currCompilePackageName, nsitem->pkgname) == 0) { if (nsitem->schemaName == NULL) { @@ -888,7 +888,7 @@ void free_expr(PLpgSQL_expr* expr) } } -void plpgsql_free_function_memory(PLpgSQL_function* func) +void plpgsql_free_function_memory(PLpgSQL_function* func, bool fromPackage) { int i; @@ -897,8 +897,7 @@ void plpgsql_free_function_memory(PLpgSQL_function* func) /* * function which in package not free memory alone */ - if (u_sess->plsql_cxt.curr_compile_context != NULL && - u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package != NULL) { + if (OidIsValid(func->pkg_oid) && !fromPackage) { return; } @@ -906,7 +905,7 @@ void plpgsql_free_function_memory(PLpgSQL_function* func) for (i = 0; i < func->ndatums; i++) { PLpgSQL_datum* d = func->datums[i]; if (d != NULL) { - if (d->ispkg == true) { + if (!func->datum_need_free[i]) { continue; } } else { @@ -985,6 +984,10 @@ void plpgsql_free_function_memory(PLpgSQL_function* func) } pfree_ext(func->fn_searchpath); } + if (func->invalItems != NULL) { + list_free_deep(func->invalItems); + func->invalItems = NULL; + } /* * And finally, release all memory except the PLpgSQL_function struct @@ -1010,17 +1013,11 @@ void plpgsql_free_function_memory(PLpgSQL_function* func) void plpgsql_free_package_memory(PLpgSQL_package* pkg) { int i; - ListCell* l; - foreach(l, pkg->proc_compiled_list) { - if (((PLpgSQL_function*)lfirst(l))->use_count > 0) - return; - } - foreach(l, pkg->proc_compiled_list) { - PLpgSQL_function* func = (PLpgSQL_function*)lfirst(l); - plpgsql_free_function_memory(func); - } /* Release plans associated with variable declarations */ for (i = 0; i < pkg->ndatums; i++) { + if (!pkg->datum_need_free[i]) { + continue; + } PLpgSQL_datum* d = pkg->datums[i]; switch (d->dtype) { case PLPGSQL_DTYPE_VARRAY: @@ -1073,6 +1070,12 @@ void plpgsql_free_package_memory(PLpgSQL_package* pkg) } pfree_ext(pkg->pkg_searchpath); } + + if (pkg->invalItems != NULL) { + list_free_deep(pkg->invalItems); + pkg->invalItems = NULL; + } + /* * And finally, release all memory except the PLpgSQL_function struct * itself (which has to be kept around because there may be multiple diff --git a/src/common/pl/plpgsql/src/pl_global_package_runtime_cache.cpp b/src/common/pl/plpgsql/src/pl_global_package_runtime_cache.cpp index eef8406cc..05e08da4a 100644 --- a/src/common/pl/plpgsql/src/pl_global_package_runtime_cache.cpp +++ b/src/common/pl/plpgsql/src/pl_global_package_runtime_cache.cpp @@ -38,45 +38,43 @@ static int getPartition(uint32 hashCode) return hashCode % NUM_GPRC_PARTITIONS; } -static uint32 gprcHash(const void* key, Size keysize) +static uint32 GprcHash(const void* key, Size keysize) { return DatumGetUInt32(DirectFunctionCall1(hashint8, Int64GetDatumFast(*((uint64*)key)))); } -static int gprcMatch(const void *left, const void *right, Size keysize) +static int GprcMatch(const void *left, const void *right, Size keysize) { uint64 *leftItem = (uint64*)left; uint64 *rightItem = (uint64*)right; - return *leftItem == *rightItem; + if (*leftItem == *rightItem) { + return 0; + } else { + return 1; + } } -PLGlobalPackageRuntimeCache::PLGlobalPackageRuntimeCache() -{ - init(); -} +volatile bool PLGlobalPackageRuntimeCache::inited = false; -PLGlobalPackageRuntimeCache::~PLGlobalPackageRuntimeCache() -{ -} - -void PLGlobalPackageRuntimeCache::init() +void PLGlobalPackageRuntimeCache::Init() { + inited = true; HASHCTL ctl; errno_t rc = memset_s(&ctl, sizeof(ctl), 0, sizeof(ctl)); securec_check(rc, "\0", "\0"); ctl.keysize = sizeof(uint64); ctl.entrysize = sizeof(GPRCValue); - ctl.hash = gprcHash; - ctl.match = gprcMatch; + ctl.hash = GprcHash; + ctl.match = GprcMatch; const int hashSize = 1024; int flags = HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE | HASH_EXTERN_CONTEXT | HASH_NOEXCEPT; hashArray = (GPRCHashCtl*) - MemoryContextAllocZero(GLOBAL_PLANCACHE_MEMCONTEXT, sizeof(GPRCHashCtl) * NUM_GPRC_PARTITIONS); + MemoryContextAllocZero(GLOBAL_PRC_MEMCONTEXT, sizeof(GPRCHashCtl) * NUM_GPRC_PARTITIONS); for (uint32 i = 0; i < NUM_GPRC_PARTITIONS; i++) { hashArray[i].lockId = FirstGPRCMappingLock + i; hashArray[i].context = AllocSetContextCreate( - GLOBAL_PLANCACHE_MEMCONTEXT, + GLOBAL_PRC_MEMCONTEXT, "GPRC_Bucket_Context", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, @@ -87,7 +85,7 @@ void PLGlobalPackageRuntimeCache::init() } } -static List* copyPackageRuntimeStates(SessionPackageRuntime *runtime) +static List* CopyPackageRuntimeStates(SessionPackageRuntime *runtime) { ListCell* cell = NULL; List* result = NULL; @@ -107,25 +105,92 @@ static List* copyPackageRuntimeStates(SessionPackageRuntime *runtime) } result = lappend(result, newRuntimeState); } + return result; } -static SessionPackageRuntime* copySessionPackageRuntime(SessionPackageRuntime *runtime) +List* CopyPortalDatas(SessionPackageRuntime *runtime) +{ + ListCell* cell = NULL; + List* result = NIL; + foreach(cell, runtime->portalData) { + AutoSessionPortalData* portalData = (AutoSessionPortalData*)lfirst(cell); + AutoSessionPortalData* newportalData = (AutoSessionPortalData*)palloc0(sizeof(AutoSessionPortalData)); + newportalData->outParamIndex = portalData->outParamIndex; + newportalData->strategy = portalData->strategy; + newportalData->cursorOptions = portalData->cursorOptions; + newportalData->commandTag = portalData->commandTag; + newportalData->atEnd = portalData->atEnd; + newportalData->atStart = portalData->atStart; + newportalData->portalPos = portalData->portalPos; + newportalData->holdStore = portalData->holdStore; + newportalData->holdContext = portalData->holdContext; + newportalData->tupDesc = portalData->tupDesc; + newportalData->is_open = portalData->is_open; + newportalData->found = portalData->found; + newportalData->not_found = portalData->not_found; + newportalData->row_count = portalData->row_count; + newportalData->null_open = portalData->null_open; + newportalData->null_fetch = portalData->null_fetch; + result = lappend(result, newportalData); + } + return result; +} + +List* CopyFuncInfoDatas(SessionPackageRuntime *runtime) +{ + ListCell* cell = NULL; + List* result = NIL; + foreach(cell, runtime->funcValInfo) { + AutoSessionFuncValInfo* funcInfo = (AutoSessionFuncValInfo*)lfirst(cell); + AutoSessionFuncValInfo *newfuncInfo = (AutoSessionFuncValInfo*)palloc0(sizeof(AutoSessionFuncValInfo)); + newfuncInfo->found = funcInfo->found; + newfuncInfo->sql_cursor_found = funcInfo->sql_cursor_found; + newfuncInfo->sql_notfound = funcInfo->sql_notfound; + newfuncInfo->sql_isopen = funcInfo->sql_isopen; + newfuncInfo->sql_rowcount = funcInfo->sql_rowcount; + newfuncInfo->sqlcode = funcInfo->sqlcode; + newfuncInfo->sqlcode_isnull = funcInfo->sqlcode_isnull; + result = lappend(result, newfuncInfo); + } + return result; +} + +List* CopyPortalContexts(List *portalContexts) +{ + ListCell* cell = NULL; + List* result = NIL; + foreach(cell, portalContexts) { + AutoSessionPortalContextData* portalContext = (AutoSessionPortalContextData*)lfirst(cell); + AutoSessionPortalContextData* newPortalContext = + (AutoSessionPortalContextData*)palloc0(sizeof(AutoSessionPortalContextData)); + newPortalContext->status = portalContext->status; + newPortalContext->portalHoldContext = portalContext->portalHoldContext; + result = lappend(result, newPortalContext); + } + return result; +} + +static SessionPackageRuntime* CopySessionPackageRuntime(SessionPackageRuntime *runtime, bool isShare) { MemoryContext pkgRuntimeCtx = AllocSetContextCreate(CurrentMemoryContext, "SessionPackageRuntime", ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_MAXSIZE, + isShare? SHARED_CONTEXT : STANDARD_CONTEXT); MemoryContext oldCtx = MemoryContextSwitchTo(pkgRuntimeCtx); SessionPackageRuntime *sessPkgRuntime = (SessionPackageRuntime*)palloc0(sizeof(SessionPackageRuntime)); - sessPkgRuntime->runtimes = copyPackageRuntimeStates(runtime); + sessPkgRuntime->runtimes = CopyPackageRuntimeStates(runtime); sessPkgRuntime->context = pkgRuntimeCtx; + sessPkgRuntime->portalContext = CopyPortalContexts(runtime->portalContext); + sessPkgRuntime->portalData = CopyPortalDatas(runtime); + sessPkgRuntime->funcValInfo = CopyFuncInfoDatas(runtime); MemoryContextSwitchTo(oldCtx); return sessPkgRuntime; } -bool PLGlobalPackageRuntimeCache::add(uint64 sessionId, SessionPackageRuntime* runtime) +bool PLGlobalPackageRuntimeCache::Add(uint64 sessionId, SessionPackageRuntime* runtime) { if (runtime == NULL) { ereport(WARNING, @@ -133,10 +198,6 @@ bool PLGlobalPackageRuntimeCache::add(uint64 sessionId, SessionPackageRuntime* r return false; } - ereport(DEBUG3, (errmodule(MOD_PLSQL), errcode(ERRCODE_LOG), - errmsg("PLGlobalPackageRuntimeCache LOG: current session id: %lu , add session id: %lu", - IS_THREAD_POOL_WORKER ? u_sess->session_id : t_thrd.proc_cxt.MyProcPid, sessionId))); - uint32 hashCode = DirectFunctionCall1(hashint8, Int64GetDatumFast(sessionId)); int partitionIndex = getPartition(hashCode); GPRCHashCtl hashTblCtl = hashArray[partitionIndex]; @@ -148,7 +209,22 @@ bool PLGlobalPackageRuntimeCache::add(uint64 sessionId, SessionPackageRuntime* r GPRCValue *entry = (GPRCValue*)hash_search_with_hash_value( hashTblCtl.hashTbl, (const void*)&sessionId, hashCode, HASH_FIND, &found); if (found) { - MemoryContextDelete(entry->sessPkgRuntime->context); + /* should not happen */ + if (sessionId != entry->sessionId) { + MemoryContextSwitchTo(oldcontext); + LWLockRelease(GetMainLWLockByIndex(hashTblCtl.lockId)); + ereport(ERROR, (errmodule(MOD_GPRC), errcode(ERRCODE_LOG), + errmsg("session id not match when remove PLGlobalPackageRuntimeCache"), + errdetail("romove PLGlobalPackageRuntimeCache failed, current session id does not match"))); + } + + if (entry->sessPkgRuntime != NULL) { + ereport(DEBUG3, (errmodule(MOD_GPRC), errcode(ERRCODE_LOG), + errmsg("PLGlobalPackageRuntimeCache: remove context(%p) when add, context(%s), parent(%s), shared(%d)", + entry->sessPkgRuntime->context, entry->sessPkgRuntime->context->name, + entry->sessPkgRuntime->context->parent->name, entry->sessPkgRuntime->context->is_shared))); + MemoryContextDelete(entry->sessPkgRuntime->context); + } entry->sessPkgRuntime = NULL; hash_search_with_hash_value( hashTblCtl.hashTbl, (const void*)&sessionId, hashCode, HASH_REMOVE, &found); @@ -157,61 +233,74 @@ bool PLGlobalPackageRuntimeCache::add(uint64 sessionId, SessionPackageRuntime* r entry = (GPRCValue*)hash_search_with_hash_value( hashTblCtl.hashTbl, (const void*)&sessionId, hashCode, HASH_ENTER, &found); if (entry == NULL) { - ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), + ereport(ERROR, (errmodule(MOD_GPRC), errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("palloc hash element GPRCValue failed"), errdetail("failed to add hash element for PLGlobalPackageRuntimeCache"), errcause("out of memory"), erraction("set more memory"))); } - entry->sessPkgRuntime = copySessionPackageRuntime(runtime); + entry->sessPkgRuntime = CopySessionPackageRuntime(runtime, true); + entry->sessionId = sessionId; MemoryContextSwitchTo(oldcontext); LWLockRelease(GetMainLWLockByIndex(hashTblCtl.lockId)); return true; } -SessionPackageRuntime* PLGlobalPackageRuntimeCache::fetch(uint64 sessionId) +SessionPackageRuntime* PLGlobalPackageRuntimeCache::Fetch(uint64 sessionId) { - ereport(DEBUG3, (errmodule(MOD_PLSQL), errcode(ERRCODE_LOG), - errmsg("PLGlobalPackageRuntimeCache LOG: current session id: %lu , fetch session id: %lu", - IS_THREAD_POOL_WORKER ? u_sess->session_id : t_thrd.proc_cxt.MyProcPid, sessionId))); - uint32 hashCode = DirectFunctionCall1(hashint8, Int64GetDatumFast(sessionId)); int partitionIndex = getPartition(hashCode); GPRCHashCtl hashTblCtl = hashArray[partitionIndex]; (void)LWLockAcquire(GetMainLWLockByIndex(hashTblCtl.lockId), LW_EXCLUSIVE); + MemoryContext oldcontext = MemoryContextSwitchTo(hashTblCtl.context); bool found = false; GPRCValue *entry = (GPRCValue*)hash_search_with_hash_value( hashTblCtl.hashTbl, (const void*)&sessionId, hashCode, HASH_FIND, &found); if (!found) { + MemoryContextSwitchTo(oldcontext); LWLockRelease(GetMainLWLockByIndex(hashTblCtl.lockId)); return NULL; } - SessionPackageRuntime *sessPkgRuntime = copySessionPackageRuntime(entry->sessPkgRuntime); + MemoryContextSwitchTo(oldcontext); + SessionPackageRuntime *sessPkgRuntime = CopySessionPackageRuntime(entry->sessPkgRuntime, false); LWLockRelease(GetMainLWLockByIndex(hashTblCtl.lockId)); return sessPkgRuntime; } -bool PLGlobalPackageRuntimeCache::remove(uint64 sessionId) +bool PLGlobalPackageRuntimeCache::Remove(uint64 sessionId) { - ereport(DEBUG3, (errmodule(MOD_PLSQL), errcode(ERRCODE_LOG), - errmsg("PLGlobalPackageRuntimeCache LOG: current session id: %lu , remove session id: %lu", - IS_THREAD_POOL_WORKER ? u_sess->session_id : t_thrd.proc_cxt.MyProcPid, sessionId))); uint32 hashCode = DirectFunctionCall1(hashint8, Int64GetDatumFast(sessionId)); int partitionIndex = getPartition(hashCode); GPRCHashCtl hashTblCtl = hashArray[partitionIndex]; (void)LWLockAcquire(GetMainLWLockByIndex(hashTblCtl.lockId), LW_EXCLUSIVE); + MemoryContext oldcontext = MemoryContextSwitchTo(hashTblCtl.context); bool found = false; - hash_search_with_hash_value( - hashTblCtl.hashTbl, (const void*)&sessionId, hashCode, HASH_REMOVE, &found); - if (!found) { + GPRCValue *entry = (GPRCValue*)hash_search_with_hash_value( + hashTblCtl.hashTbl, (const void*)&sessionId, hashCode, HASH_FIND, &found); + if (found) { + if (sessionId != entry->sessionId) { + /* should not happen */ + MemoryContextSwitchTo(oldcontext); + LWLockRelease(GetMainLWLockByIndex(hashTblCtl.lockId)); + ereport(ERROR, (errmodule(MOD_GPRC), errcode(ERRCODE_LOG), + errmsg("session id not match when remove PLGlobalPackageRuntimeCache"), + errdetail("romove PLGlobalPackageRuntimeCache failed, current session id does not match"))); + } + MemoryContextDelete(entry->sessPkgRuntime->context); + entry->sessPkgRuntime = NULL; + hash_search_with_hash_value( + hashTblCtl.hashTbl, (const void*)&sessionId, hashCode, HASH_REMOVE, &found); + MemoryContextSwitchTo(oldcontext); LWLockRelease(GetMainLWLockByIndex(hashTblCtl.lockId)); - return false; + return true; } + MemoryContextSwitchTo(oldcontext); LWLockRelease(GetMainLWLockByIndex(hashTblCtl.lockId)); - return true; + + return false; } diff --git a/src/common/pl/plpgsql/src/pl_handler.cpp b/src/common/pl/plpgsql/src/pl_handler.cpp index 282ea67fd..8be0a334b 100755 --- a/src/common/pl/plpgsql/src/pl_handler.cpp +++ b/src/common/pl/plpgsql/src/pl_handler.cpp @@ -36,6 +36,7 @@ #include "utils/syscache.h" #include "utils/acl.h" #include "parser/parser.h" +#include "parser/parse_type.h" #include "pgstat.h" #include "utils/timestamp.h" #include "executor/spi_priv.h" @@ -94,25 +95,45 @@ extern void restoreCallFromPkgOid(Oid pkgOid) u_sess->plsql_cxt.running_pkg_oid = pkgOid; } -static void processError(CreateFunctionStmt* stmt) +#ifndef ENABLE_MULTIPLE_NODES +static void processError(CreateFunctionStmt* stmt, enum FunctionErrorType ErrorType) { int lines = stmt->startLineNumber + u_sess->plsql_cxt.package_first_line - 1; - InsertErrorMessage("function declared in package specification " + InsertErrorMessage("function declared in package specification " "and package body must be the same", 0, false, lines); - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmodule(MOD_PLSQL), - errmsg("function declared in package specification and " - "package body must be the same, function: %s", - NameListToString(stmt->funcname)))); + if (ErrorType == FuncitonDefineError) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmodule(MOD_PLSQL), + errmsg("function declared in package specification and " + "package body must be the same, function: %s", + NameListToString(stmt->funcname)))); + } else if (ErrorType == FunctionDuplicate) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmodule(MOD_PLSQL), + errmsg("function declared duplicate: %s", + NameListToString(stmt->funcname)))); + } else if (ErrorType == FunctionUndefined) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmodule(MOD_PLSQL), + errmsg("function undefined: %s", + NameListToString(stmt->funcname)))); + } else if (ErrorType == FunctionReturnTypeError) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmodule(MOD_PLSQL), + errmsg("function return type must be consistent: %s", + NameListToString(stmt->funcname)))); + } } -#ifndef ENABLE_MULTIPLE_NODES static void InsertGsSource(Oid objId, Oid nspid, const char* name, const char* type, bool status) { bool notInsert = u_sess->attr.attr_common.upgrade_mode != 0 || IsSystemNamespace(nspid) || IsToastNamespace(nspid) || IsCStoreNamespace(nspid) || - IsPackageSchemaOid(nspid) || SKIP_GS_SOURCE || IsTransactionBlock(); + IsPackageSchemaOid(nspid) || SKIP_GS_SOURCE; if (notInsert) { return; } @@ -127,9 +148,12 @@ static void InsertGsSource(Oid objId, Oid nspid, const char* name, const char* t Oid userId = (Oid)u_sess->misc_cxt.CurrentUserId; char statusChr = status ? 't' : 'f'; /* User statement may contain sensitive information like password */ - const char* source = maskPassword(u_sess->plsql_cxt.sourceText); + if (u_sess->plsql_cxt.debug_query_string == NULL) { + return; + } + const char* source = maskPassword(u_sess->plsql_cxt.debug_query_string); if (source == NULL) { - source = u_sess->plsql_cxt.sourceText; + source = u_sess->plsql_cxt.debug_query_string; } /* Execute autonomous transaction call for logging purpose */ StringInfoData str; @@ -214,9 +238,6 @@ static void InsertGsSource(Oid objId, Oid nspid, const char* name, const char* t MemoryContextSwitchTo(temp); } pfree_ext(str.data); - if (source != t_thrd.postgres_cxt.debug_query_string) { - pfree_ext(source); - } } static void PkgInsertGsSource(Oid pkgOid, bool isSpec, bool status) { @@ -245,13 +266,14 @@ static void ProcInsertGsSource(Oid funcOid, bool status) bool isnull = false; /* Skip nested create function stmt within package body */ - Datum packageDatum = SysCacheGetAttr(PROCOID, procTup, Anum_pg_proc_package, &isnull); + Datum packageIdDatum = SysCacheGetAttr(PROCOID, procTup, Anum_pg_proc_packageid, &isnull); if (isnull) { ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("The prokind of the function is null"), errhint("Check whether the definition of the function is complete in the pg_proc system table."))); } - if (DatumGetBool(packageDatum)) { + Oid packageOid = DatumGetObjectId(packageIdDatum); + if (OidIsValid(packageOid)) { ReleaseSysCache(procTup); return; } @@ -296,10 +318,11 @@ static void ProcInsertGsSource(Oid funcOid, bool status) * recordArr[0] = 1; it means the location 0 is pairing location 1, so the location 1 value must be 0; * so recordArr[record[1]] = 1; */ +#ifndef ENABLE_MULTIPLE_NODES void processPackageProcList(PLpgSQL_package* pkg) { ListCell* cell = NULL; - CreateFunctionStmt** stmtArray = (CreateFunctionStmt**)palloc0(list_length(pkg->proc_list) * + CreateFunctionStmt** stmtArray = (CreateFunctionStmt**)palloc0(list_length(pkg->proc_list) * sizeof(CreateFunctionStmt*)); int* recordArr = (int*)palloc0(list_length(pkg->proc_list) * sizeof(int)); int i = 0; @@ -320,50 +343,69 @@ void processPackageProcList(PLpgSQL_package* pkg) */ if (funcStmtLength <= 1) { if (stmtArray[0]->isFunctionDeclare && pkg->is_bodycompiled) { - processError(stmtArray[0]); + processError(stmtArray[0], FunctionUndefined); } else if (stmtArray[0]->isFunctionDeclare && !pkg->is_bodycompiled) { return; } - } + } for (int j = 0; j < funcStmtLength - 1; j++) { for (int k = j + 1; k < funcStmtLength; k++) { - char* funcname1 = NULL; char* funcname2 = NULL; + char* returnType1 = NULL; + char* returnType2 = NULL; CreateFunctionStmt* funcStmt1 = stmtArray[j]; CreateFunctionStmt* funcStmt2 = stmtArray[k]; List* funcnameList1 = funcStmt1->funcname; List* funcnameList2 = funcStmt2->funcname; + bool returnTypeSame = false; funcname1 = getFuncName(funcnameList1); funcname2 = getFuncName(funcnameList2); - - if (strcmp(funcname1, funcname2)) { + if (strcmp(funcname1, funcname2) != 0) { + /* ignore return type when two procedure is not same */ continue; } - + if (funcStmt1->returnType != NULL) { + returnType1 = TypeNameToString((TypeName*)funcStmt1->returnType); + } else { + returnType1 = "void"; + } + if (funcStmt2->returnType != NULL) { + returnType2 = TypeNameToString((TypeName*)funcStmt2->returnType); + } else { + returnType2 = "void"; + } if (!isSameArgList(funcStmt1, funcStmt2)) { continue; } if (!isSameParameterList(funcStmt1->options, funcStmt2->options)) { - processError(funcStmt1); + processError(funcStmt1, FuncitonDefineError); + } + if (strcmp(returnType1, returnType2) != 0) { + returnTypeSame = false; + } else { + returnTypeSame = true; + } + if (!returnTypeSame && (funcStmt1->isFunctionDeclare^funcStmt2->isFunctionDeclare)) { + processError(funcStmt1, FunctionReturnTypeError); } if (funcStmt1->isProcedure != funcStmt2->isProcedure) { - processError(funcStmt1); + processError(funcStmt1, FuncitonDefineError); } if (funcStmt1->isFunctionDeclare && funcStmt2->isFunctionDeclare) { - processError(funcStmt1); + processError(funcStmt1, FunctionDuplicate); } if (!(funcStmt1->isFunctionDeclare || funcStmt2->isFunctionDeclare)) { - processError(funcStmt1); + processError(funcStmt1, FuncitonDefineError); } if (recordArr[j] == 0) { if (recordArr[k] != 0) { - processError(stmtArray[j]); + processError(stmtArray[j], FuncitonDefineError); } recordArr[j] = k; recordArr[k] = j; } else if(recordArr[recordArr[j]] != j) { - processError(stmtArray[j]); + processError(stmtArray[j], FuncitonDefineError); } if (!funcStmt1->isPrivate || !funcStmt2->isPrivate) { funcStmt1->isPrivate = false; @@ -371,16 +413,14 @@ void processPackageProcList(PLpgSQL_package* pkg) } } } - i = 0; - for(i = 0; i < funcStmtLength; i++) { if (stmtArray[i]->isFunctionDeclare && pkg->is_bodycompiled) { if (recordArr[i] == 0) { errno_t rc; - char message[MAXSTRLEN]; - rc = sprintf_s(message, MAXSTRLEN, - "Function definition not found: %s", + char message[MAXSTRLEN]; + rc = sprintf_s(message, MAXSTRLEN, + "Function definition not found: %s", NameListToString(stmtArray[i]->funcname)); securec_check_ss_c(rc, "\0", "\0"); InsertErrorMessage(message, stmtArray[i]->startLineNumber); @@ -394,6 +434,8 @@ void processPackageProcList(PLpgSQL_package* pkg) pfree(recordArr); pfree(stmtArray); } +#endif + /* @@ -456,8 +498,6 @@ void _PG_init(void) UnregisterXactCallback(plpgsql_xact_cb, NULL); UnregisterSubXactCallback(plpgsql_subxact_cb, NULL); pfree_ext(u_sess->SPI_cxt.cur_tableof_index); - u_sess->SPI_cxt.cur_tableof_index->tableOfIndexType = InvalidOid; - u_sess->SPI_cxt.cur_tableof_index->tableOfIndex = NULL; PG_RE_THROW(); } PG_END_TRY(); @@ -657,6 +697,7 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS) MemoryContext oldContext = CurrentMemoryContext; FormatCallStack* plcallstack = NULL; int pkgDatumsNumber = 0; + bool savedisAllowCommitRollback = true; /* * if the atomic stored in fcinfo is false means allow * commit/rollback within stored procedure. @@ -678,7 +719,8 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS) Oid package_oid = get_package_id(func_oid); if (OidIsValid(package_oid)) { if (u_sess->plsql_cxt.curr_compile_context == NULL || - u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package == NULL) { + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package == NULL || + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid != package_oid) { pkg = PackageInstantiation(package_oid); } } @@ -691,6 +733,7 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS) } } } + int fun_arg = fcinfo->nargs; #ifdef ENABLE_MULTIPLE_NODES bool outer_is_stream = false; @@ -732,6 +775,10 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS) if (func == NULL) { func = plpgsql_compile(fcinfo, false); } + if (func->fn_readonly) { + stp_disable_xact_and_set_err_msg(&savedisAllowCommitRollback, STP_XACT_IMMUTABLE); + } + func->is_plpgsql_func_with_outparam = is_function_with_plpgsql_language_and_outparam(func->fn_oid); restoreCallFromPkgOid(firstLevelPkgOid); @@ -763,6 +810,8 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS) saved_Pseudo_CurrentUserId = u_sess->misc_cxt.Pseudo_CurrentUserId; u_sess->misc_cxt.Pseudo_CurrentUserId = &func->fn_owner; + /* Mark packages the function use, so them can't be deleted from under us */ + AddPackageUseCount(func); /* Mark the function as busy, so it can't be deleted from under us */ func->use_count++; @@ -772,7 +821,6 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS) PLpgSQL_compile_context* save_compile_context = u_sess->plsql_cxt.curr_compile_context; int save_compile_list_length = list_length(u_sess->plsql_cxt.compile_context_list); int save_compile_status = u_sess->plsql_cxt.compile_status; - bool isExecFunc = false; PG_TRY(); { /* @@ -801,36 +849,43 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS) erraction("redefine package"))); } - isExecFunc = true; if (IsAutonomousTransaction(func->action->isAutonomous)) { retval = plpgsql_exec_autonm_function(func, fcinfo, NULL); } else { retval = plpgsql_exec_function(func, fcinfo, false); } -#ifndef ENABLE_MULTIPLE_NODES - /* for restore parent session and automn session package var values */ - processAutonmSessionPkgs(func); -#endif /* Disconnecting and releasing resources */ DestoryAutonomousSession(false); } restoreCallFromPkgOid(secondLevelPkgOid); + if (func->fn_readonly) { + stp_retore_old_xact_stmt_state(savedisAllowCommitRollback); + } } PG_CATCH(); { -#ifndef ENABLE_MULTIPLE_NODES - /* for restore parent session and automn session package var values */ - if (isExecFunc) { - processAutonmSessionPkgs(func); - } -#endif + /* reset cur_exception_cxt */ + u_sess->plsql_cxt.cur_exception_cxt = NULL; + /* Put the new code behind this code snippet. Otherwise, unexpected errors may occur. */ plcallstack = t_thrd.log_cxt.call_stack; - if (!IsAutonomousTransaction(func->action->isAutonomous) && plcallstack != NULL) { + +#ifndef ENABLE_MULTIPLE_NODES + estate_cursor_set(plcallstack); + +#endif + + if (plcallstack != NULL) { t_thrd.log_cxt.call_stack = plcallstack->prev; } + +#ifndef ENABLE_MULTIPLE_NODES + /* for restore parent session and automn session package var values */ + processAutonmSessionPkgsInException(func); +#endif /* Decrement use-count, restore cur_estate, and propagate error */ func->use_count--; func->cur_estate = save_cur_estate; + DecreasePackageUseCount(func); #ifndef ENABLE_MULTIPLE_NODES /* debug finished, close debug resource */ if (func->debug) { @@ -858,7 +913,10 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS) u_sess->misc_cxt.Pseudo_CurrentUserId = saved_Pseudo_CurrentUserId; /* AutonomousSession Disconnecting and releasing resources */ DestoryAutonomousSession(true); - + pfree_ext(u_sess->plsql_cxt.pass_func_tupdesc); + if (func->fn_readonly) { + stp_retore_old_xact_stmt_state(savedisAllowCommitRollback); + } PG_RE_THROW(); } PG_END_TRY(); @@ -890,10 +948,10 @@ Datum plpgsql_call_handler(PG_FUNCTION_ARGS) } /* set cursor optin to null which opened in this procedure */ - ResetPortalCursor(GetCurrentSubTransactionId(), func->fn_oid, func->use_count); + ResetPortalCursor(GetCurrentSubTransactionId(), func->fn_oid, func->use_count, false); func->use_count--; - + DecreasePackageUseCount(func); func->cur_estate = save_cur_estate; func->debug = save_debug_info; @@ -1017,6 +1075,8 @@ Datum plpgsql_inline_handler(PG_FUNCTION_ARGS) PG_END_TRY(); PGSTAT_END_PLSQL_TIME_RECORD(PL_COMPILATION_TIME); + /* Mark packages the function use, so them can't be deleted from under us */ + AddPackageUseCount(func); /* Mark the function as busy, just pro forma */ func->use_count++; @@ -1048,21 +1108,25 @@ Datum plpgsql_inline_handler(PG_FUNCTION_ARGS) retval = plpgsql_exec_function(func, &fake_fcinfo, false); restoreCallFromPkgOid(old_value); } -#ifndef ENABLE_MULTIPLE_NODES - /* for restore parent session and automn session package var values */ - processAutonmSessionPkgs(func); -#endif } PG_CATCH(); { -#ifndef ENABLE_MULTIPLE_NODES - /* for restore parent session and automn session package var values */ - processAutonmSessionPkgs(func); -#endif + /* Put the new code behind this code snippet. Otherwise, unexpected errors may occur. */ FormatCallStack* plcallstack = t_thrd.log_cxt.call_stack; - if (!IsAutonomousTransaction(func->action->isAutonomous) && plcallstack != NULL) { +#ifndef ENABLE_MULTIPLE_NODES + estate_cursor_set(plcallstack); +#endif + + if (plcallstack != NULL) { t_thrd.log_cxt.call_stack = plcallstack->prev; } + /* Decrement package use-count */ + DecreasePackageUseCount(func); + +#ifndef ENABLE_MULTIPLE_NODES + /* for restore parent session and automn session package var values */ + (void)processAutonmSessionPkgsInException(func); +#endif ereport(DEBUG3, (errmodule(MOD_NEST_COMPILE), errcode(ERRCODE_LOG), errmsg("%s clear curr_compile_context because of error.", __func__))); /* reset nest plpgsql compile */ @@ -1086,10 +1150,11 @@ Datum plpgsql_inline_handler(PG_FUNCTION_ARGS) /* set cursor optin to null which opened in this block */ - ResetPortalCursor(GetCurrentSubTransactionId(), func->fn_oid, func->use_count); + ResetPortalCursor(GetCurrentSubTransactionId(), func->fn_oid, func->use_count, false); /* Function should now have no remaining use-counts ... */ func->use_count--; + DecreasePackageUseCount(func); AssertEreport(func->use_count == 0, MOD_PLSQL, "Function should now have no remaining use-counts"); /* ... so we can free subsidiary storage */ @@ -1236,13 +1301,17 @@ Datum plpgsql_validator(PG_FUNCTION_ARGS) PG_CATCH(); { #ifndef ENABLE_MULTIPLE_NODES - if (u_sess->attr.attr_common.plsql_show_all_error && u_sess->plsql_cxt.isCreateFunction) { + bool insertError = (u_sess->attr.attr_common.plsql_show_all_error || + !u_sess->attr.attr_sql.check_function_bodies) && + u_sess->plsql_cxt.isCreateFunction; + if (insertError) { if (!IsInitdb) { ProcInsertGsSource(funcoid, false); int rc = CompileWhich(); if (rc == PLPGSQL_COMPILE_PACKAGE || rc == PLPGSQL_COMPILE_PACKAGE_PROC) { InsertError(u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package->pkg_oid); } + u_sess->plsql_cxt.isCreateFunction = false; } } #endif @@ -1257,7 +1326,13 @@ Datum plpgsql_validator(PG_FUNCTION_ARGS) /* reset nest plpgsql compile */ u_sess->plsql_cxt.curr_compile_context = save_compile_context; u_sess->plsql_cxt.compile_status = save_compile_status; +#ifndef ENABLE_MULTIPLE_NODES + if (!u_sess->attr.attr_common.plsql_show_all_error) { + clearCompileContextList(save_compile_list_length); + } +#else clearCompileContextList(save_compile_list_length); +#endif u_sess->parser_cxt.isCreateFuncOrProc = false; PG_RE_THROW(); } @@ -1327,8 +1402,10 @@ PLpgSQL_package* plpgsql_package_validator(Oid packageOid, bool isSpec, bool isC PG_CATCH(); { #ifndef ENABLE_MULTIPLE_NODES - - if (u_sess->attr.attr_common.plsql_show_all_error && isCreate) { + bool insertError = (u_sess->attr.attr_common.plsql_show_all_error || + !u_sess->attr.attr_sql.check_function_bodies) && + isCreate; + if (insertError) { SPI_STACK_LOG("finish", NULL, NULL); SPI_finish(); if (!IsInitdb) { @@ -1427,12 +1504,9 @@ void FunctionInPackageCompile(PLpgSQL_package* pkg) * init the package.including package variable. * ---------- */ - +#ifndef ENABLE_MULTIPLE_NODES void PackageInit(PLpgSQL_package* pkg, bool isCreate) { -#ifdef ENABLE_MULTIPLE_NODES - return; -#endif if (likely(pkg != NULL)) { if (likely(pkg->isInit)) { return; @@ -1506,7 +1580,6 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate) (void)CompileStatusSwtichTo(oldCompileStatus); } } -#ifndef ENABLE_MULTIPLE_NODES if (u_sess->attr.attr_common.plsql_show_all_error) { PopOverrideSearchPath(); ereport(DEBUG3, (errmodule(MOD_NEST_COMPILE), errcode(ERRCODE_LOG), @@ -1520,7 +1593,6 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate) } return; } -#endif cell = NULL; bool oldStatus = false; bool needResetErrMsg = stp_disable_xact_and_set_err_msg(&oldStatus, STP_XACT_PACKAGE_INSTANTIATION); @@ -1539,8 +1611,7 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate) if (!doStmt->isExecuted) { (void)CompileStatusSwtichTo(COMPILIE_PKG_ANON_BLOCK); if (u_sess->SPI_cxt._connected > -1 && - u_sess->SPI_cxt._connected != u_sess->SPI_cxt._curid && - !ENABLE_SQL_BETA_FEATURE(SPI_DEBUG)) { + u_sess->SPI_cxt._connected != u_sess->SPI_cxt._curid) { SPI_STACK_LOG("begin", NULL, NULL); _SPI_begin_call(false); ExecuteDoStmt(doStmt, true); @@ -1557,17 +1628,16 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate) doStmt->isExecuted = true; } } else { - if (doStmt->isSpec) { + if (doStmt->isSpec && !doStmt->isExecuted) { (void)CompileStatusSwtichTo(COMPILIE_PKG_ANON_BLOCK); if (u_sess->SPI_cxt._connected > -1 && - u_sess->SPI_cxt._connected != u_sess->SPI_cxt._curid && - !ENABLE_SQL_BETA_FEATURE(SPI_DEBUG)) { + u_sess->SPI_cxt._connected != u_sess->SPI_cxt._curid) { SPI_STACK_LOG("begin", NULL, NULL); _SPI_begin_call(false); ExecuteDoStmt(doStmt, true); SPI_STACK_LOG("end", NULL, NULL); _SPI_end_call(false); - } else { + } else if (!doStmt->isExecuted) { ExecuteDoStmt(doStmt, true); } @@ -1602,7 +1672,7 @@ void PackageInit(PLpgSQL_package* pkg, bool isCreate) PG_END_TRY(); PopOverrideSearchPath(); } - +#endif void record_pkg_function_dependency(PLpgSQL_package* pkg, List** invalItems, Oid funcid, Oid pkgid) { /* @@ -1629,7 +1699,93 @@ void record_pkg_function_dependency(PLpgSQL_package* pkg, List** invalItems, Oid inval_item->objId = pkgid; } *invalItems = lappend(*invalItems, inval_item); - pkg->use_count++; + } +} + +void AddPackageUseCount(PLpgSQL_function* func) +{ + /* first check func is valid */ + if (func->ndatums == 0) { + /* mean the function has beed freed */ + ereport(ERROR, + (errmodule(MOD_PLSQL), errcode(ERRCODE_PLPGSQL_ERROR), + errmsg("concurrent error when call function."), + errdetail("when call function \"%s\", it has been invalidated by other session.", + func->fn_signature), + errcause("excessive concurrency"), + erraction("reduce concurrency and retry"))); + } + List *invalItems = func->invalItems; + if (invalItems == NIL) { + return; + } + + ListCell* lc = NULL; + FuncInvalItem* item = NULL; + PLpgSQL_pkg_hashkey hashkey; + PLpgSQL_package* pkg = NULL; + bool pkgInvalid = false; + List *pkgList = NIL; + + /* first, check all depend package if valid */ + foreach(lc, invalItems) { + pkg = NULL; + item = (FuncInvalItem*)lfirst(lc); + if (item->cacheId == PROCOID) { + continue; + } + hashkey.pkgOid = item->objId; + pkg = plpgsql_pkg_HashTableLookup(&hashkey); + if (pkg == NULL) { + pkgInvalid = true; + break; + } + pkgList = lappend(pkgList, pkg); + } + + /* packages it depends has been invalid, need report error */ + if (pkgInvalid) { + list_free(pkgList); + ereport(ERROR, + (errmodule(MOD_PLSQL), errcode(ERRCODE_PLPGSQL_ERROR), + errmsg("concurrent error when call function."), + errdetail("when call function \"%s\", packages it depends on has been invalidated by other session.", + func->fn_signature), + errcause("excessive concurrency"), + erraction("reduce concurrency and retry"))); + } + + /* add package use count */ + foreach(lc, pkgList) { + pkg = (PLpgSQL_package*)lfirst(lc); + pkg->use_count++; + } + list_free(pkgList); +} + +void DecreasePackageUseCount(PLpgSQL_function* func) +{ + List *invalItems = func->invalItems; + if (invalItems == NIL) { + return; + } + + ListCell* lc = NULL; + FuncInvalItem* item = NULL; + PLpgSQL_pkg_hashkey hashkey; + PLpgSQL_package* pkg = NULL; + + foreach(lc, invalItems) { + pkg = NULL; + item = (FuncInvalItem*)lfirst(lc); + if (item->cacheId == PROCOID) { + continue; + } + hashkey.pkgOid = item->objId; + pkg = plpgsql_pkg_HashTableLookup(&hashkey); + if (pkg != NULL) { + pkg->use_count--; + } } } diff --git a/src/common/pl/plpgsql/src/pl_package.cpp b/src/common/pl/plpgsql/src/pl_package.cpp index 5918447c0..4b25f72ea 100644 --- a/src/common/pl/plpgsql/src/pl_package.cpp +++ b/src/common/pl/plpgsql/src/pl_package.cpp @@ -1,1596 +1,1652 @@ -/* ------------------------------------------------------------------------- - * - * Portions Copyright (c) 2021, openGauss Contributors - - * IDENTIFICATION - * src/common/pl/plpgsql/src/pl_package.cpp - * ------------------------------------------------------------------------- - */ - -#include - -#include "catalog/namespace.h" -#include "catalog/pg_namespace.h" -#include "catalog/pg_proc.h" -#include "catalog/pg_proc_fn.h" -#include "catalog/gs_package.h" -#include "catalog/gs_package_fn.h" -#include "catalog/pg_type.h" -#include "executor/spi.h" -#include "funcapi.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/nodes.h" -#include "nodes/pg_list.h" -#include "optimizer/clauses.h" -#include "optimizer/subselect.h" -#include "parser/parse_type.h" -#include "pgxc/locator.h" -#include "utils/pl_package.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/globalplancore.h" -#include "utils/guc.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" -#include "utils/inval.h" -#include "utils/rel.h" -#include "utils/rel_gs.h" -#include "utils/syscache.h" -#include "utils/acl.h" -#include "miscadmin.h" -#include "parser/scanner.h" -#include "parser/parser.h" - - - -static void plpgsql_pkg_append_dlcell(plpgsql_pkg_HashEnt* entity); - -static void plpgsql_pkg_HashTableInsert(PLpgSQL_package* pkg, PLpgSQL_pkg_hashkey* pkg_key); - -extern PLpgSQL_package* plpgsql_pkg_HashTableLookup(PLpgSQL_pkg_hashkey* pkg_key); - -extern void plpgsql_compile_error_callback(void* arg); - -static Node* plpgsql_bind_variable_column_ref(ParseState* pstate, ColumnRef* cref); - -/* - * plpgsql_parser_setup_bind set up parser hooks for dynamic parameters - * only support DBE_SQL. - */ -void plpgsql_parser_setup_bind(struct ParseState* pstate, List** expr) -{ - pstate->p_bind_variable_columnref_hook = plpgsql_bind_variable_column_ref; - pstate->p_ref_hook_state = (void*)expr; -} - -/* - * plpgsql_bind_variable_column_ref parser callback after parsing a ColumnRef - * only support DBE_SQL. - */ -static Node* plpgsql_bind_variable_column_ref(ParseState* pstate, ColumnRef* cref) -{ - List** expr = (List**)pstate->p_ref_hook_state; - - /* get column name */ - Node* field1 = (Node*)linitial(cref->fields); - AssertEreport(IsA(field1, String), MOD_PLSQL, "string type is required."); - const char* name1 = NULL; - name1 = strVal(field1); - - /* get column type */ - int len = 1; - ListCell* lc_name = NULL; - ListCell* lc_type = NULL; - Oid argtypes = 0; - forboth (lc_type, expr[0], lc_name, expr[1]) { - if (pg_strcasecmp((char *)lfirst(lc_name), name1) != 0) { - len++; - continue; - } - argtypes = lfirst_oid(lc_type); - break; - } - - if (argtypes == 0) { - ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_AMBIGUOUS_COLUMN), - errmsg("argtypes is not valid"), errdetail("Confirm function input parameters."), - errcause("parameters error."), erraction("Confirm function input parameters."))); - } - - /* Generate param and Fill by index(len) */ - Param* param = NULL; - param = makeNode(Param); - param->paramkind = PARAM_EXTERN; - param->paramid = len; - param->paramtype = argtypes; - param->paramtypmod = -1; - param->paramcollid = get_typcollation(param->paramtype); - param->location = cref->location; - return (Node*)param; -} - -static void build_pkg_row_variable(int varno, PLpgSQL_package* pkg, const char* pkgName, const char* nspName); - -bool IsOnlyCompilePackage() -{ - if (u_sess->plsql_cxt.curr_compile_context != NULL && - u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package != NULL && - u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile == NULL) { - return true; - } else { - return false; - } - return false; -} - - -PLpgSQL_datum* plpgsql_pkg_adddatum(const List* wholeName, char** objname, char** pkgname) -{ - char* nspname = NULL; - PLpgSQL_package* pkg = NULL; - struct PLpgSQL_nsitem* nse = NULL; - PLpgSQL_datum* datum = NULL; - PLpgSQL_pkg_hashkey hashkey; - Oid pkgOid; - Oid namespaceId = InvalidOid; - DeconstructQualifiedName(wholeName, &nspname, objname, pkgname); - if (nspname != NULL) { - namespaceId = LookupExplicitNamespace(nspname); - } - if (*pkgname == NULL) { - return NULL; - } - /* - * Lookup the gs_package tuple by Oid; we'll need it in any case - */ - pkgOid = PackageNameGetOid(*pkgname, namespaceId); - hashkey.pkgOid = pkgOid; - - pkg = plpgsql_pkg_HashTableLookup(&hashkey); - - if (pkg == NULL) { - pkg = PackageInstantiation(pkgOid); - } - /* - * find package variable and return package datum,if not found, - * return NULL - */ - if (pkg != NULL) { - nse = plpgsql_ns_lookup(pkg->public_ns, false, *objname, NULL, NULL, NULL); - if (nse == NULL) { - return NULL; - } - } else { - return NULL; - } - datum = pkg->datums[nse->itemno]; - return datum; -} - -/* - * add package vairable to namespace - */ -int plpgsql_pkg_adddatum2ns(const List* name) -{ - PLpgSQL_datum* datum = NULL; - int varno; - char* objname = NULL; - char* pkgname = NULL; - - datum = plpgsql_pkg_adddatum(name, &objname, &pkgname); - - if (datum == NULL) { - return -1; - } else { - varno = plpgsql_adddatum(datum); - switch (datum->dtype) - { - case PLPGSQL_DTYPE_VAR: - plpgsql_ns_additem(PLPGSQL_NSTYPE_VAR, varno, objname, pkgname); - break; - case PLPGSQL_DTYPE_ROW: - plpgsql_ns_additem(PLPGSQL_NSTYPE_ROW, varno, objname, pkgname); - break; - case PLPGSQL_DTYPE_RECORD: - plpgsql_ns_additem(PLPGSQL_NSTYPE_RECORD, varno, objname, pkgname); - break; - default: - ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized type: %d, when build variable in PLSQL, this situation should not occur.", - datum->dtype))); - break; - } - } - return varno; -} - -static void build_pkg_cursor_variable(int varno, PLpgSQL_package* pkg, const char* pkgName, const char* nspName) -{ - int cursorAttrNum = 4; - int dno = -1; - char* refname = NULL; - for (int i = 1; i <= cursorAttrNum; i++) { - dno = plpgsql_adddatum(pkg->datums[varno + i], false); - refname = ((PLpgSQL_variable*)(pkg->datums[varno + i]))->refname; - plpgsql_ns_additem(PLPGSQL_NSTYPE_VAR, dno, refname, pkgName, nspName); - } -} - -static void build_pkg_row_variable(int varno, PLpgSQL_package* pkg, const char* pkgName, const char* nspName) -{ - PLpgSQL_row* row = (PLpgSQL_row*)pkg->datums[varno]; - int dno = -1; - PLpgSQL_datum* datum = NULL; - char* refName = NULL; - - for (int i = 0; i < row->nfields; i++) { - datum = row->pkg->datums[row->varnos[i]]; - if (datum != NULL) { - refName = ((PLpgSQL_variable*)datum)->refname; - dno = plpgsql_adddatum(pkg->datums[row->varnos[i]], false); - if (datum->dtype == PLPGSQL_DTYPE_VAR) { - plpgsql_ns_additem(PLPGSQL_NSTYPE_VAR, dno, refName, pkgName, nspName); - } else { - plpgsql_ns_additem(PLPGSQL_NSTYPE_ROW, dno, refName, pkgName, nspName); - } - } - } -} - - -int plpgsql_build_pkg_variable(List* name, PLpgSQL_datum* datum, bool isSamePkg) -{ - int varno = 0; - char* objname = NULL; - char* pkgname = NULL; - char* nspname = NULL; - - DeconstructQualifiedName(name, &nspname, &objname, &pkgname); - - switch (datum->dtype) { - case PLPGSQL_DTYPE_VAR: { - /* Ordinary scalar datatype */ - PLpgSQL_var* var = (PLpgSQL_var*)datum; - varno = isSamePkg ? var->dno : plpgsql_adddatum(datum, false); - if (var->addNamespace) { - plpgsql_ns_additem(PLPGSQL_NSTYPE_VAR, varno, var->refname, pkgname, nspname); - } - if (var->datatype->typoid == REFCURSOROID && !isSamePkg) { - build_pkg_cursor_variable(var->dno, var->pkg, pkgname, nspname); - } - return varno; - } - case PLPGSQL_DTYPE_ROW: { - /* Ordinary scalar datatype */ - PLpgSQL_row* row = (PLpgSQL_row*)datum; - varno = isSamePkg ? row->dno : plpgsql_adddatum(datum, false); - if (row->addNamespace) { - plpgsql_ns_additem(PLPGSQL_NSTYPE_ROW, varno, row->refname, pkgname, nspname); - } - if (!isSamePkg) { - build_pkg_row_variable(row->dno, row->pkg, pkgname, nspname); - } - return varno; - } - case PLPGSQL_DTYPE_RECORD: { - /* "record" type -- build a record variable */ - PLpgSQL_row* row = (PLpgSQL_row*)datum; - - varno = isSamePkg ? row->dno : plpgsql_adddatum(datum, false); - if (row->addNamespace) { - plpgsql_ns_additem(PLPGSQL_NSTYPE_ROW, varno, row->refname, pkgname, nspname); - } - if (!isSamePkg) { - build_pkg_row_variable(row->dno, row->pkg, pkgname, nspname); - } - return varno; - } - - } - return -1; -} - -/* - * use unknown type when compile a function which has package variable - * because gauss doesn't support compile multiple function at the same time - * so we have a fake compile when compile other functions which has package - * variable.we only check if the package is exist,if not exist,return -1, - * else return the variable number. - */ - -int plpgsql_pkg_add_unknown_var_to_namespace(List* name) -{ -#ifdef ENABLE_MULTIPLE_NODES - return -1; -#endif - - if (list_length(name) >= 4) { - return -1; - } - - bool isSamePkg = false; - PLpgSQL_datum* datum = GetPackageDatum(name, &isSamePkg); - if (datum != NULL) { - return plpgsql_build_pkg_variable(name, datum, isSamePkg); - } else { - return -1; - } - return -1; -} - -/* - * @Description : Check co-location for opexpr. we walk through the lists of opexpr - * to check where they are from new/old with all the distributeion keys of both queries. - * - * @in query : Query struct of the query with trigger. - * @in qry_part_attr_num : list of attr no of the dist keys of triggering query. - * @in trig_part_attr_num : list of attr no of the dist keys of query in trigger. - * @in func : trigger function body information. - * @return : when co-location return true. - */ -extern bool plpgsql_check_opexpr_colocate( - Query* query, List* qry_part_attr_num, List* trig_part_attr_num, PLpgSQL_function* func, List* opexpr_list) -{ - ListCell* lc1 = NULL; - ListCell* lc2 = NULL; - bool is_colocate = true; - - forboth(lc1, qry_part_attr_num, lc2, trig_part_attr_num) - { - Expr* expr = NULL; - Param* param = NULL; - PLpgSQL_recfield* recfield = NULL; - PLpgSQL_rec* rec = NULL; - int fno; - AttrNumber attnum1 = lfirst_int(lc1); - AttrNumber attnum2 = lfirst_int(lc2); - ListCell* opexpr_cell = NULL; - - /* Only all distribute column can colocate, we can ship. */ - if (!is_colocate) { - return false; - } - - foreach (opexpr_cell, opexpr_list) { - Expr* qual_expr = (Expr*)lfirst(opexpr_cell); - - /* Check all opexpr with distribute column */ - expr = pgxc_check_distcol_opexpr(query->resultRelation, attnum2, (OpExpr*)qual_expr); - if (expr == NULL) { - is_colocate = false; - continue; - } - - /* NEW/OLD is replaced with param by parser */ - if (!IsA(expr, Param)) { - is_colocate = false; - continue; - } - param = (Param*)expr; - - /* This param should point to datum in func */ - recfield = (PLpgSQL_recfield*)func->datums[param->paramid - 1]; - - /* From there we get the new or old rec */ - rec = (PLpgSQL_rec*)func->datums[recfield->recparentno]; - if (strcmp(rec->refname, "new") != 0 && strcmp(rec->refname, "old") != 0) { - is_colocate = false; - continue; - } - - /* We should already set tupdesc at the very beginning */ - if (rec->tupdesc == NULL) { - is_colocate = false; - continue; - } - - /* - * Find field index of new.a1 and only if it matches to - * current distribution key of src table, we could call - * both tables are DML colocated - */ - fno = SPI_fnumber(rec->tupdesc, recfield->fieldname); - if (fno != attnum1) { - is_colocate = false; - continue; - } - - is_colocate = true; - break; - } - } - - return is_colocate; -} - -/* - * @Description : Check co-location for update or delete command. To check co-location of the - * update or delete query in trigger and the triggering query, we walk through the lists of - * of attribute no of distribution keys of both queries. For each pair, we check - * if the distribute key of trigger table exist in its where clause and its expression is - * from new/old and is the distribute key of the table the triggering query work on. - * - * For example, - * tables: - * create table t1(a1 int, b1 int, c1 int,d1 varchar(100)) distribute by hash(a1,b1); - * create table t2(a2 int, b2 int, c2 int,d2 varchar(100)) distribute by hash(a2,b2); - * update in trigger body: - * update t2 set d2=new.d1 where a2=old.a1 and b2=old.b1; - * triggering event: - * an insert or update on t1 - * - * we walk through two pairs: (a1, a2) and (b1, b2), and check if a2 is in the - * where clause "a2=old.a1 and b2=old.b1", and if its expression is from new/old - * and is a1; if b2 is in the where clause too and its expression is from new/old - * and is b1. If the above two checks are satified, co-location is true. - * - * @in query : Query struct of the query with trigger. - * @in qry_part_attr_num : list of attr no of the dist keys of triggering query. - * @in trig_part_attr_num : list of attr no of the dist keys of query in trigger. - * @in func : trigger function body information. - * @return : when co-location return true. - */ -bool plpgsql_check_updel_colocate( - Query* query, List* qry_part_attr_num, List* trig_part_attr_num, PLpgSQL_function* func) -{ - Node* whereClause = NULL; - List* opexpr_list = NIL; - bool is_colocate = true; - - if (query->jointree == NULL || query->jointree->quals == NULL) { - return false; - } - - /* Recursively get a list of opexpr from quals. */ - opexpr_list = pull_opExpr((Node*)query->jointree->quals); - if (opexpr_list == NIL) { - return false; - } - /* Flatten AND/OR expression for checking or expr. */ - whereClause = eval_const_expressions(NULL, (Node*)query->jointree->quals); - - /* If it is or clause, we break the clause to check colocation of each expr */ - if (or_clause(whereClause)) { - List* temp_list = NIL; - ListCell* opexpr_cell = NULL; - - /* For or condtion, we can ship only when all opexpr are colocated. */ - foreach (opexpr_cell, opexpr_list) { - temp_list = list_make1((Expr*)lfirst(opexpr_cell)); - is_colocate = plpgsql_check_opexpr_colocate(query, qry_part_attr_num, trig_part_attr_num, func, temp_list); - - if (temp_list != NIL) { - list_free_ext(temp_list); - } - - if (!is_colocate) { - break; - } - } - - if (opexpr_list != NIL) { - list_free_ext(opexpr_list); - } - return is_colocate; - } else { - /* For and with no or condition, we can ship when any opexpr is colocated. */ - is_colocate = plpgsql_check_opexpr_colocate(query, qry_part_attr_num, trig_part_attr_num, func, opexpr_list); - } - - if (opexpr_list != NIL) { - list_free_ext(opexpr_list); - } - return is_colocate; -} - -/* - * @Description : Check co-location for INSERT/UPDATE/DELETE statement and the - * the statment which it triggered. - * - * @in query : Query struct info about the IUD statement. - * @in rte : range table entry for the insert/update/delete statement. - * @in plpgsql_func : information for the insert/update/delete trigger function. - * @return : true when statement and the triggered statement are co-location. - */ -bool plpgsql_check_colocate(Query* query, RangeTblEntry* rte, void* plpgsql_func) -{ - List* query_partAttrNum = NIL; - List* trig_partAttrNum = NIL; - Relation qe_relation = NULL; /* triggering query's rel */ - Relation tg_relation = NULL; - RelationLocInfo* qe_rel_loc_info = NULL; /* triggering query's rel loc info */ - RelationLocInfo* tg_rel_loc_info = NULL; /* trigger body's rel loc info */ - int qe_rel_nodelist_len = 0; - int tg_rel_nodelist_len = 0; - - Assert(query->commandType == CMD_INSERT || query->commandType == CMD_DELETE || query->commandType == CMD_UPDATE); - - PLpgSQL_function* func = (PLpgSQL_function*)plpgsql_func; - - /* Get event query relation and trigger body's relation. */ - qe_relation = func->tg_relation; - tg_relation = relation_open(rte->relid, AccessShareLock); - - query_partAttrNum = qe_relation->rd_locator_info->partAttrNum; - trig_partAttrNum = rte->partAttrNum; - - /* Check if trigger query table and trigger body table are on the same node list. */ - qe_rel_loc_info = qe_relation->rd_locator_info; - tg_rel_loc_info = tg_relation->rd_locator_info; - - qe_rel_nodelist_len = list_length(qe_rel_loc_info->nodeList); - tg_rel_nodelist_len = list_length(tg_rel_loc_info->nodeList); - - /* Cannot ship whenever target table is not row type. */ - if (!RelationIsRowFormat(tg_relation)) { - relation_close(tg_relation, AccessShareLock); - return false; - } - - /* The query table and trigger table must in a same group. */ - if (0 != strcmp(qe_rel_loc_info->gname.data, tg_rel_loc_info->gname.data)) { - relation_close(tg_relation, AccessShareLock); - return false; - } - relation_close(tg_relation, AccessShareLock); - - /* If distribution key list lengths are different they both are not colocated. */ - if (list_length(trig_partAttrNum) != list_length(query_partAttrNum)) { - return false; - } - - /* - * Used difference check function between INSERT and UPDATE/DELETE here because we use - * targetlist to check INSERT and where clause to check UPDATE/DELETE. - */ - if (query->commandType == CMD_UPDATE || query->commandType == CMD_DELETE) { - return plpgsql_check_updel_colocate(query, query_partAttrNum, trig_partAttrNum, func); - } else { - return plpgsql_check_insert_colocate(query, query_partAttrNum, trig_partAttrNum, func); - } -} - - -bool check_search_path_interface(List *schemas, HeapTuple proc_tup) -{ - if (!SUPPORT_BIND_SEARCHPATH) { - return true; - } - bool isOidListSame = true; - Form_pg_proc proc_struct = (Form_pg_proc)GETSTRUCT(proc_tup); - if (proc_struct->pronamespace != PG_CATALOG_NAMESPACE) { - /* Get lastest search_path if baseSearchPathValid is false */ - recomputeNamespacePath(); - - int len1 = list_length(u_sess->catalog_cxt.baseSearchPath); - /* - * The first element of func->fn_searchpath->schemas is - * namespace the current function belongs to. - */ - int len2 = list_length(schemas) - 1; - Assert(len2 >= 0); - - if (len1 == len2) { - ListCell* lc1 = NULL; - ListCell* lc2 = NULL; - - /* Check whether search_path has changed */ - lc1 = list_head(u_sess->catalog_cxt.baseSearchPath); - lc2 = list_head(schemas); - - /* Check function schema from list second position to list tail */ - lc2 = lnext(lc2); - for (; lc1 && lc2; lc1 = lnext(lc1), lc2 = lnext(lc2)) { - if (lfirst_oid(lc1) != lfirst_oid(lc2)) { - isOidListSame = false; - break; - } - } - } else if (len1 == list_length(schemas)) { - /* in some case, same length maybe same oid list */ - ListCell* lc1 = NULL; - foreach(lc1, schemas) { - if (!list_member_oid(u_sess->catalog_cxt.baseSearchPath, lfirst_oid(lc1))) { - isOidListSame = false; - break; - } - } - } else { - /* If length is different, two lists are different. */ - isOidListSame = false; - } - } - - return isOidListSame; -} - - -void plpgsql_pkg_HashTableDelete(PLpgSQL_package* pkg) -{ - plpgsql_pkg_HashEnt* hentry = NULL; - /* do nothing if not in table */ - if (pkg->pkg_hashkey == NULL) { - return; - } - hentry = (plpgsql_pkg_HashEnt*)hash_search( - u_sess->plsql_cxt.plpgsql_pkg_HashTable, (void*)pkg->pkg_hashkey, HASH_REMOVE, NULL); - if (hentry == NULL) { - elog(WARNING, "trying to delete function that does not exist"); - } else { - /* delete the cell from the list. */ - u_sess->plsql_cxt.plpgsqlpkg_dlist_objects = - dlist_delete_cell(u_sess->plsql_cxt.plpgsqlpkg_dlist_objects, hentry->cell, false); - } - /* remove back link, which no longer points to allocated storage */ - pkg->pkg_hashkey = NULL; -} - -void delete_package(PLpgSQL_package* pkg) -{ - pkg->use_count--; - if (pkg->use_count > 0) - return; - ListCell* l = NULL; - foreach(l, pkg->proc_compiled_list) { - PLpgSQL_function* func = (PLpgSQL_function*)lfirst(l); - delete_function(func); - } - /* free package memory,*/ - plpgsql_pkg_HashTableDelete(pkg); - plpgsql_free_package_memory(pkg); -} - -static void plpgsql_pkg_append_dlcell(plpgsql_pkg_HashEnt* entity) -{ - MemoryContext oldctx; - PLpgSQL_package* pkg = NULL; - oldctx = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); - u_sess->plsql_cxt.plpgsqlpkg_dlist_objects = dlappend(u_sess->plsql_cxt.plpgsqlpkg_dlist_objects, entity); - (void)MemoryContextSwitchTo(oldctx); - - entity->cell = u_sess->plsql_cxt.plpgsqlpkg_dlist_objects->tail; - while (dlength(u_sess->plsql_cxt.plpgsqlpkg_dlist_objects) > g_instance.attr.attr_sql.max_compile_functions) { - DListCell* headcell = u_sess->plsql_cxt.plpgsqlpkg_dlist_objects->head; - plpgsql_pkg_HashEnt* head_entity = (plpgsql_pkg_HashEnt*)lfirst(headcell); - - pkg = head_entity->package; - - /* delete from the hash and delete the function's compile */ - delete_package(pkg); - pfree_ext(pkg); - } -} - - -void delete_pkg_in_HashTable(Oid pkgOid) -{ - PLpgSQL_pkg_hashkey hashkey; - plpgsql_pkg_HashEnt* hentry = NULL; - hashkey.pkgOid = pkgOid; - bool found = false; - hentry = (plpgsql_pkg_HashEnt*)hash_search(u_sess->plsql_cxt.plpgsql_pkg_HashTable, &hashkey, HASH_REMOVE, NULL); - if (found) { - u_sess->plsql_cxt.plpgsqlpkg_dlist_objects = - dlist_delete_cell(u_sess->plsql_cxt.plpgsqlpkg_dlist_objects, hentry->cell, false); - } -} - -static void plpgsql_pkg_HashTableInsert(PLpgSQL_package* pkg, PLpgSQL_pkg_hashkey* pkg_key) -{ - plpgsql_pkg_HashEnt* hentry = NULL; - bool found = false; - hentry = (plpgsql_pkg_HashEnt*)hash_search(u_sess->plsql_cxt.plpgsql_pkg_HashTable, (void*)pkg_key, HASH_ENTER, &found); - if (found) { - /* move cell to the tail of the package list. */ - dlist_add_tail_cell(u_sess->plsql_cxt.plpgsqlpkg_dlist_objects, hentry->cell); - elog(WARNING, "trying to insert a package that already exists"); - } else { - /* append the current compiling entity to the end of the compile results list. */ - plpgsql_pkg_append_dlcell(hentry); - } - hentry->package = pkg; - /* prepare back link from function to hashtable key */ - pkg->pkg_hashkey = &hentry->key; - pkg->use_count++; -} - -extern PLpgSQL_package* plpgsql_pkg_HashTableLookup(PLpgSQL_pkg_hashkey* pkg_key) -{ - if (unlikely(u_sess->plsql_cxt.plpgsql_pkg_HashTable == NULL)) - return NULL; - plpgsql_pkg_HashEnt* hentry = NULL; - hentry = (plpgsql_pkg_HashEnt*)hash_search(u_sess->plsql_cxt.plpgsql_pkg_HashTable, (void*)pkg_key, HASH_FIND, NULL); - if (hentry != NULL) { - /* add cell to the tail of the function list. */ - dlist_add_tail_cell(u_sess->plsql_cxt.plpgsqlpkg_dlist_objects, hentry->cell); - return hentry->package; - } else { - return NULL; - } -} - -static PLpgSQL_package* do_pkg_compile(Oid pkgOid, HeapTuple pkg_tup, PLpgSQL_package* pkg, PLpgSQL_pkg_hashkey* hashkey, bool isSpec) -{ - Form_gs_package pkg_struct = (Form_gs_package)GETSTRUCT(pkg_tup); - Datum pkgsrcdatum; - Datum pkginitdatum; - bool isnull = false; - char* pkg_source = NULL; - char* pkg_init_source = NULL; - int i; - ErrorContextCallback pl_err_context; - int parse_rc; - Oid* saved_pseudo_current_userId = NULL; - char* signature = NULL; - List* current_searchpath = NIL; - char* namespace_name = NULL; - char context_name[NAMEDATALEN] = {0}; - int rc = 0; - const int alloc_size = 256; - Datum namespaceOidDatum; - Oid namespaceOid = InvalidOid; - /* - * Setup the scanner input and error info. We assume that this function - * cannot be invoked recursively, so there's no need to save and restore - * the static variables used here. - */ - if (isSpec) { - pkgsrcdatum = SysCacheGetAttr(PACKAGEOID, pkg_tup, Anum_gs_package_pkgspecsrc, &isnull); - } else { - pkgsrcdatum = SysCacheGetAttr(PACKAGEOID, pkg_tup, Anum_gs_package_pkgbodydeclsrc, &isnull); - } - - if (isnull) { - ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("The definition of the package is null"), - errhint("Check whether the definition of the function is complete in the pg_proc system table."))); - } - - if (!isSpec) { - pkginitdatum = SysCacheGetAttr(PACKAGEOID, pkg_tup, Anum_gs_package_pkgbodyinitsrc, &isnull); - } else { - isnull = true; - } - if (isnull) { - pkg_init_source = "INSTANTIATION DECLARE BEGIN NULL; END"; - } else if (!isnull && !isSpec) { - pkg_init_source = TextDatumGetCString(pkginitdatum); - } - pkg_source = TextDatumGetCString(pkgsrcdatum); - /* - * Setup error traceback support for ereport() - */ - pl_err_context.callback = plpgsql_compile_error_callback; - pl_err_context.arg = NULL; - pl_err_context.previous = t_thrd.log_cxt.error_context_stack; - t_thrd.log_cxt.error_context_stack = &pl_err_context; - signature = pstrdup(NameStr(pkg_struct->pkgname)); - /* - * All the permanent output of compilation (e.g. parse tree) is kept in a - * per-function memory context, so it can be reclaimed easily. - */ - rc = snprintf_s( - context_name, NAMEDATALEN, NAMEDATALEN - 1, "%s_%lu", "PL/pgSQL package context", u_sess->debug_query_id); - securec_check_ss(rc, "", ""); - /* - * Create the new function struct, if not done already. The function - * structs are never thrown away, so keep them in session memory context. - */ - PLpgSQL_compile_context* curr_compile = createCompileContext(context_name); - SPI_NESTCOMPILE_LOG(curr_compile->compile_cxt); - if (pkg == NULL) { - pkg = (PLpgSQL_package*)MemoryContextAllocZero( - SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER), sizeof(PLpgSQL_package)); - pkg->pkg_cxt = curr_compile->compile_cxt; - pkg->pkg_signature = pstrdup(signature); - pkg->pkg_owner = pkg_struct->pkgowner; - pkg->pkg_oid = pkgOid; - pkg->pkg_tid = pkg_tup->t_self; - pkg->proc_list = NULL; - pkg->invalItems = NIL; - } - saved_pseudo_current_userId = u_sess->misc_cxt.Pseudo_CurrentUserId; - u_sess->misc_cxt.Pseudo_CurrentUserId = &pkg->pkg_owner; - pkg->is_spec_compiling = isSpec; - MemoryContext temp = NULL; - if (u_sess->plsql_cxt.curr_compile_context != NULL) { - checkCompileMemoryContext(u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt); - temp = MemoryContextSwitchTo(u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt); - } - u_sess->plsql_cxt.curr_compile_context = curr_compile; - pushCompileContext(); - plpgsql_scanner_init(pkg_source); - curr_compile->plpgsql_error_pkgname = pstrdup(NameStr(pkg_struct->pkgname)); - - namespaceOidDatum = SysCacheGetAttr(PACKAGEOID, pkg_tup, Anum_gs_package_pkgnamespace, &isnull); - if (!isnull) { - namespaceOid = DatumGetObjectId(namespaceOidDatum); - } - if (OidIsValid(namespaceOid)) { - pkg->namespaceOid = namespaceOid; - } else { - pkg->namespaceOid = InvalidOid; - } - pkg->is_spec_compiling = isSpec; - if (isSpec) { - u_sess->plsql_cxt.plpgsql_IndexErrorVariable = 0; - } - /* - * compile_tmp_cxt is a short temp context that will be detroyed after - * function compile or execute. - * func_cxt is a long term context that will stay until thread exit. So - * malloc on func_cxt should be very careful. - * signature is stored on a StringInfoData which is 1K byte at least, but - * most signature will not be so long originally, so we should do a strdup. - */ - curr_compile->compile_tmp_cxt = MemoryContextSwitchTo(pkg->pkg_cxt); - pkg->pkg_signature = pstrdup(signature); - pkg->pkg_searchpath = (OverrideSearchPath*)palloc0(sizeof(OverrideSearchPath)); - pkg->pkg_searchpath->addCatalog = true; - pkg->pkg_searchpath->addTemp = true; - pkg->pkg_xmin = HeapTupleGetRawXmin(pkg_tup); - pkg->proc_compiled_list = NULL; - curr_compile->plpgsql_curr_compile_package = pkg; - if (pkg_struct->pkgnamespace == PG_CATALOG_NAMESPACE) { - current_searchpath = fetch_search_path(false); - if (current_searchpath == NIL) { - ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_SCHEMA), - errmsg("the search_path is empty while the porc belongs to pg_catalog "))); - } - namespace_name = get_namespace_name(linitial_oid(current_searchpath)); - if (namespace_name == NULL) { - list_free_ext(current_searchpath); - ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_SCHEMA), - errmsg("cannot find the namespace according to search_path"))); - } - pkg->pkg_searchpath->schemas = current_searchpath; - } else { - /* Assign namespace of current function to fn_searchpath */ - pkg->pkg_searchpath->schemas = list_make1_oid(pkg_struct->pkgnamespace); - if (SUPPORT_BIND_SEARCHPATH) { - /* - * If SUPPORT_BIND_SEARCHPATH is true, - * add system's search_path to fn_searchpath. - * When the relation of other objects cannot be - * found in the namespace of current function, - * find them in search_path list. - * Otherwise, we only find objects in the namespace - * of current function. - */ - ListCell* l = NULL; - /* If u_sess->catalog_cxt.namespaceUser and roleid are not equeal, - * then u_sess->catalog_cxt.baseSearchPath doesn't - * contain currentUser schema.currenUser schema will be added in - * PushOverrideSearchPath. - * - * It can happen executing following statements. - * - * create temp table t1(a int); - * \d t1 --(get schema pg_temp_xxx) - * drop table t1; - * drop schema pg_temp_xxx cascade; - * call proc1() --(proc1 contains create temp table statement) - */ - Oid roleid = GetUserId(); - if (u_sess->catalog_cxt.namespaceUser != roleid) { - pkg->pkg_searchpath->addUser = true; - } - /* Use baseSearchPath not activeSearchPath. */ - foreach (l, u_sess->catalog_cxt.baseSearchPath) { - Oid namespaceId = lfirst_oid(l); - /* - * Append namespaceId to fn_searchpath directly. - */ - pkg->pkg_searchpath->schemas = lappend_oid(pkg->pkg_searchpath->schemas, namespaceId); - } - } - } - pfree_ext(signature); - curr_compile->plpgsql_curr_compile_package->proc_compiled_list = NULL; - /* - * Initialize the compiler, particularly the namespace stack. The - * outermost namespace contains function parameters and other special - * variables (such as FOUND), and is named after the function itself. - */ - curr_compile->datums_pkg_alloc = alloc_size; - curr_compile->plpgsql_pkg_nDatums = 0; - /* This is short-lived, so needn't allocate in function's cxt */ - curr_compile->plpgsql_Datums = (PLpgSQL_datum**)MemoryContextAlloc( - curr_compile->compile_tmp_cxt, sizeof(PLpgSQL_datum*) * curr_compile->datums_pkg_alloc); - curr_compile->datums_last = 0; - PushOverrideSearchPath(pkg->pkg_searchpath); - plpgsql_ns_init(); - plpgsql_ns_push(NameStr(pkg_struct->pkgname)); - add_pkg_compile(); - curr_compile->datums_last = curr_compile->plpgsql_nDatums; - curr_compile->plpgsql_pkg_DumpExecTree = false; - /* - * Now parse the function's text - */ - bool saved_flag = u_sess->plsql_cxt.have_error; - u_sess->plsql_cxt.have_error = false; - parse_rc = plpgsql_yyparse(); - if (parse_rc != 0) { - ereport(ERROR, - (errmodule(MOD_PLSQL), - errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("Syntax parsing error, plpgsql parser returned %d", parse_rc))); - } - plpgsql_scanner_finish(); - pfree_ext(pkg_source); - - if (pkg_init_source != NULL) { - plpgsql_scanner_init(pkg_init_source); - parse_rc = plpgsql_yyparse(); - if (parse_rc != 0) { - ereport(ERROR, - (errmodule(MOD_PLSQL), - errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("Syntax parsing error, plpgsql parser returned %d", parse_rc))); - } - plpgsql_scanner_finish(); - } - - PopOverrideSearchPath(); - u_sess->misc_cxt.Pseudo_CurrentUserId = saved_pseudo_current_userId; -#ifndef ENABLE_MULTIPLE_NODES - if (u_sess->plsql_cxt.have_error && u_sess->attr.attr_common.plsql_show_all_error) { - u_sess->plsql_cxt.have_error = false; - ereport(ERROR, - (errmodule(MOD_PLSQL), - errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Debug mod,create procedure has error."), - errdetail("N/A"), - errcause("compile procedure error."), - erraction("check procedure error and redefine procedure"))); - } -#endif - u_sess->plsql_cxt.have_error = saved_flag; - pkg->ndatums = curr_compile->plpgsql_pkg_nDatums; - pkg->datums = (PLpgSQL_datum**)palloc(sizeof(PLpgSQL_datum*) * curr_compile->plpgsql_pkg_nDatums); - for (i = 0; i < curr_compile->plpgsql_pkg_nDatums; i++) { - pkg->datums[i] = curr_compile->plpgsql_Datums[i]; - if (pkg->datums[i]->dtype == PLPGSQL_DTYPE_VAR) { - PLpgSQL_var* var = reinterpret_cast(pkg->datums[i]); - if (var->pkg == NULL) { - var->pkg = pkg; - var->pkg_name = GetPackageListName(NameStr(pkg_struct->pkgname), namespaceOid); - } - } else if (pkg->datums[i]->dtype == PLPGSQL_DTYPE_ROW) { - PLpgSQL_row* row = (PLpgSQL_row*)pkg->datums[i]; - if (row->pkg == NULL) { - row->pkg = pkg; - row->pkg_name = GetPackageListName(NameStr(pkg_struct->pkgname), namespaceOid); - } - } else if (pkg->datums[i]->dtype == PLPGSQL_DTYPE_RECORD) { - PLpgSQL_row* row = (PLpgSQL_row*)pkg->datums[i]; - if (row->pkg == NULL) { - row->pkg = pkg; - row->pkg_name = GetPackageListName(NameStr(pkg_struct->pkgname), namespaceOid); - } - } else if (pkg->datums[i]->dtype == PLPGSQL_DTYPE_REC) { - PLpgSQL_rec* rec = (PLpgSQL_rec*)pkg->datums[i]; - if (rec->pkg == NULL) { - rec->pkg = pkg; - rec->pkg_name = GetPackageListName(NameStr(pkg_struct->pkgname), namespaceOid); - } - } - } - - if (isSpec) { - pkg->public_ns = curr_compile->ns_top; - pkg->is_bodycompiled = false; - } else { - pkg->private_ns = curr_compile->ns_top; - pkg->is_bodycompiled = true; - } - pkg->proc_list = curr_compile->plpgsql_curr_compile_package->proc_list; - if (!isSpec) { - pkg->is_bodycompiled = true; - } - MemoryContext oldcxt = MemoryContextSwitchTo(pkg->pkg_cxt); - pkg->proc_compiled_list = curr_compile->plpgsql_curr_compile_package->proc_compiled_list; - if (hashkey && isSpec) { - plpgsql_pkg_HashTableInsert(pkg, hashkey); - } - t_thrd.log_cxt.error_context_stack = pl_err_context.previous; - curr_compile->plpgsql_error_funcname = NULL; - curr_compile->plpgsql_check_syntax = false; - MemoryContextSwitchTo(oldcxt); - MemoryContextSwitchTo(curr_compile->compile_tmp_cxt); - curr_compile->compile_tmp_cxt = NULL; - curr_compile->plpgsql_curr_compile = NULL; - ereport(DEBUG3, (errmodule(MOD_NEST_COMPILE), errcode(ERRCODE_LOG), - errmsg("%s finish compile, level: %d", __func__, list_length(u_sess->plsql_cxt.compile_context_list)))); - u_sess->plsql_cxt.curr_compile_context = popCompileContext(); - clearCompileContext(curr_compile); - if (temp != NULL) { - MemoryContextSwitchTo(temp); - } - return pkg; -} - -List* GetPackageListName(const char* pkgName, const Oid nspOid) -{ - StringInfoData nameData; - List* nameList = NULL; - initStringInfo(&nameData); - char* schemaName = get_namespace_name(nspOid); - appendStringInfoString(&nameData, schemaName); - appendStringInfoString(&nameData, "."); - appendStringInfoString(&nameData, pkgName); - nameList = stringToQualifiedNameList(nameData.data); - pfree_ext(nameData.data); - return nameList; -} - -/* - * compile and init package by package oid - */ -PLpgSQL_package* plpgsql_pkg_compile(Oid pkgOid, bool for_validator, bool isSpec, bool isCreate) -{ -#ifdef ENABLE_MULTIPLE_NODES - ereport(ERROR, (errcode(ERRCODE_INVALID_PACKAGE_DEFINITION), - errmsg("not support create package in distributed database"))); -#endif - HeapTuple pkg_tup = NULL; - Form_gs_package pkg_struct = NULL; - PLpgSQL_package* pkg = NULL; - PLpgSQL_pkg_hashkey hashkey; - bool pkg_valid = false; - /* - * Lookup the gs_package tuple by Oid; we'll need it in any case - */ - pkg_tup = SearchSysCache1(PACKAGEOID, ObjectIdGetDatum(pkgOid)); - AclResult aclresult = pg_package_aclcheck(pkgOid, GetUserId(), ACL_EXECUTE); - Form_gs_package pkgForm = (Form_gs_package)GETSTRUCT(pkg_tup); - NameData pkgname = pkgForm->pkgname; - if (aclresult != ACLCHECK_OK) { - aclcheck_error(aclresult, ACL_KIND_PACKAGE, pkgname.data); - } - if (!HeapTupleIsValid(pkg_tup)) { - ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("cache lookup failed for package %u, while compile package", pkgOid))); - } - pkg_struct = (Form_gs_package)GETSTRUCT(pkg_tup); - hashkey.pkgOid = pkgOid; - pkg = plpgsql_pkg_HashTableLookup(&hashkey); - - if (pkg != NULL) { - Assert(pkg->pkg_oid == pkgOid); - if (pkg->pkg_xmin == HeapTupleGetRawXmin(pkg_tup) && - ItemPointerEquals(&pkg->pkg_tid, &pkg_tup->t_self)) { - pkg_valid = true; - } else { - /* need reuse pkg slot in hash table later, we need clear all refcount for this pkg and delete it here */ - plpgsql_HashTableDeleteAndCheckFunc(PACKAGEOID, pkgOid); - pkg_valid = false; - } - } - PLpgSQL_compile_context* save_compile_context = u_sess->plsql_cxt.curr_compile_context; - PG_TRY(); - { - if (!pkg_valid) { - pkg = NULL; - pkg = do_pkg_compile(pkgOid, pkg_tup, pkg, &hashkey, true); - PackageInit(pkg, isCreate); - if (!isSpec && pkg != NULL) { - pkg = do_pkg_compile(pkgOid, pkg_tup, pkg, &hashkey, false); - if (pkg == NULL) { - ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("package %u not found", pkgOid))); - } - ReleaseSysCache(pkg_tup); - pkg_tup = NULL; - PackageInit(pkg, isCreate); - } else if(!isSpec && pkg == NULL) { - ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("package spec %u not found", pkgOid))); - } - } else { - if (!pkg->is_bodycompiled && !isSpec) { - pkg = do_pkg_compile(pkgOid, pkg_tup, pkg, &hashkey, false); - } - - /* package must be compiled befor init */ - if (pkg != NULL) { - PackageInit(pkg, isCreate); - } else { - ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_CACHE_LOOKUP_FAILED), - errmsg("package spec %u not found", pkgOid))); - } - } - } - PG_CATCH(); - { -#ifndef ENABLE_MULTIPLE_NODES - if (u_sess->attr.attr_common.plsql_show_all_error && isCreate) { - InsertError(pkgOid); - } -#endif - popToOldCompileContext(save_compile_context); - PG_RE_THROW(); - } - PG_END_TRY(); - if (HeapTupleIsValid(pkg_tup)) { - ReleaseSysCache(pkg_tup); - pkg_tup = NULL; - } - /* - * Finally return the compiled function - */ - return pkg; -} - - -/* - * error context callback to let us supply a call-stack traceback. - * If we are validating or executing an anonymous code block, the function - * source text is passed as an argument. - */ -void plpgsql_compile_error_callback(void* arg) -{ - if (arg != NULL) { - /* - * Try to convert syntax error position to reference text of original - * CREATE FUNCTION or DO command. - */ - if (function_parse_error_transpose((const char*)arg)) { - return; - } - /* - * Done if a syntax error position was reported; otherwise we have to - * fall back to a "near line N" report. - */ - } - int rc = CompileWhich(); - if (rc == PLPGSQL_COMPILE_PROC) { - if (u_sess->plsql_cxt.curr_compile_context != NULL && - u_sess->plsql_cxt.curr_compile_context->plpgsql_error_funcname) { - errcontext("compilation of PL/pgSQL function \"%s\" near line %d", - u_sess->plsql_cxt.curr_compile_context->plpgsql_error_funcname, plpgsql_latest_lineno()); - } - } else if (rc != PLPGSQL_COMPILE_NULL) { - errcontext("compilation of PL/pgSQL package near line %d", - plpgsql_latest_lineno()); - } -} - -Oid findPackageParameter(const char* objname) -{ - Oid toid = InvalidOid; - if (u_sess->plsql_cxt.curr_compile_context != NULL && - u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package != NULL) { - PLpgSQL_package* pkg = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package; - PLpgSQL_nsitem* ns = NULL; - ns = plpgsql_ns_lookup(pkg->public_ns, false, objname, NULL, NULL, NULL); - if (ns == NULL) { - ns = plpgsql_ns_lookup(pkg->private_ns, false, objname, NULL, NULL, NULL); - } - if (ns != NULL) { - switch (ns->itemtype) { - case PLPGSQL_NSTYPE_REFCURSOR: - toid = REFCURSOROID; - break; - case PLPGSQL_NSTYPE_RECORD: - toid = RECORDOID; - break; - case PLPGSQL_NSTYPE_VAR: { - PLpgSQL_var* var = (PLpgSQL_var*)pkg->datums[ns->itemno]; - if (var->datatype->typoid == REFCURSOROID && OidIsValid(var->datatype->cursorCompositeOid)) { - toid = var->datatype->cursorCompositeOid; - } else { - toid = InvalidOid; - } - break; - } - default: - toid = InvalidOid; - } - } - } - return toid; -} - -int GetLineNumber(const char* procedureStr, int loc) -{ -#ifdef ENABLE_MULTIPLE_NODES - return 0; -#endif - int lines = 1; - int jumpWords = 0; - if (procedureStr == nullptr) { - return 0; - } - if (!strncmp(procedureStr, " DECLARE ", strlen(" DECLARE "))) { - if (strlen(procedureStr) > strlen(" DECLARE ")) { - if (procedureStr[strlen(" DECLARE ")] == '\n') { - jumpWords = strlen(" DECLARE "); - } - } - } else if (!strncmp(procedureStr, " PACKAGE DECLARE ", strlen(" PACKAGE DECLARE "))) { - if (strlen(procedureStr) > strlen(" PACKAGE DECLARE ")) { - if (procedureStr[strlen(" PACKAGE DECLARE ")] == '\n') { - jumpWords = strlen(" PACKAGE DECLARE "); - } - } - } - if (procedureStr == NULL || loc < 0) { - return 0; - } - if (jumpWords > loc) { - return 0; - } - int i = jumpWords; - while (i-- >= 0) { - procedureStr++; - } - for (int i = jumpWords; i < loc; i++) { - if (*procedureStr == '\n') { - lines++; - } - procedureStr++; - } - return lines; -} - -/* - get the correct line number in package or procedure,it's line number - start with "Create" -*/ -int GetProcedureLineNumberInPackage(const char* procedureStr, int loc) -{ -#ifndef ENABLE_MULTIPLE_NODES - if (!u_sess->attr.attr_common.plsql_show_all_error) { - return 0; - } -#else - return 0; -#endif - int lines = GetLineNumber(procedureStr, loc); - int rc = CompileWhich(); - if (rc == PLPGSQL_COMPILE_PACKAGE_PROC) { - lines = u_sess->plsql_cxt.package_first_line + u_sess->plsql_cxt.procedure_start_line + u_sess->plsql_cxt.procedure_first_line + lines - 3; - return lines > 0 ? lines : 1; - } else if (rc == PLPGSQL_COMPILE_PACKAGE) { - if (u_sess->plsql_cxt.procedure_start_line > 0) { - lines = u_sess->plsql_cxt.procedure_start_line + u_sess->plsql_cxt.package_first_line - 1; - return lines > 0 ? lines : 1; - } else { - if (lines <= 1) { - lines = u_sess->plsql_cxt.package_first_line; - } else { - lines = u_sess->plsql_cxt.package_first_line + lines - 1; - } - return lines > 0 ? lines : 1; - } - } - else if (rc == PLPGSQL_COMPILE_PROC) { - lines = u_sess->plsql_cxt.procedure_first_line + lines; - return lines > 0 ? lines : 1; - } else { - ereport(ERROR, - (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_PACKAGE), - errmsg("not found package or procedure."), - errdetail("package may has error"), - errcause("not found package and procedure"), - erraction("retry"))); - } - return lines; -} - -/* - insert error line number and message into DBE_PLDEVELOPER.gs_errors -*/ -void InsertError(Oid objId) -{ - if (u_sess->attr.attr_common.upgrade_mode != 0 || u_sess->plsql_cxt.errorList == NULL) { - return; - } -#ifdef ENABLE_MULTIPLE_NODES - return; -#else - Oid id = InvalidOid; - Oid nspid = InvalidOid; - char* name = NULL; - char* type = NULL; - Oid userId = (Oid)u_sess->misc_cxt.CurrentUserId; - int rc = CompileWhich(); - if (u_sess->plsql_cxt.errorList == NULL || - !u_sess->attr.attr_common.plsql_show_all_error || IsTransactionBlock()) { - return; - } - if (rc == PLPGSQL_COMPILE_PROC) { - id = objId; - HeapTuple tuple; - bool isnull = false; - PLpgSQL_function* func = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile; - tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(func->fn_oid)); - Form_pg_proc procStruct = (Form_pg_proc)GETSTRUCT(tuple); - nspid = procStruct->pronamespace; - name = NameStr(procStruct->proname); - Datum prokindDatum = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_prokind, &isnull); - /* prokind maybe null */ - char prokind; - if (isnull) { - prokind = 'f'; - } else { - prokind = CharGetDatum(prokindDatum); - } - if (PROC_IS_PRO(prokind)) { - type = "procedure"; - } else { - type = "function"; - } - ReleaseSysCache(tuple); - } else if ((rc == PLPGSQL_COMPILE_PACKAGE || - rc == PLPGSQL_COMPILE_PACKAGE_PROC)) { - HeapTuple tuple; - PLpgSQL_package* pkg = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package; - id = pkg->pkg_oid; - tuple = SearchSysCache1(PACKAGEOID, ObjectIdGetDatum(pkg->pkg_oid)); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, - (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_PACKAGE), - errmsg("not found package."), - errdetail("package may has error"), - errcause("create package may has error"), - erraction("please check package"))); - } - Form_gs_package pkgStruct = (Form_gs_package)GETSTRUCT(tuple); - nspid = pkgStruct->pkgnamespace; - name = NameStr(pkgStruct->pkgname); - if (pkg->is_spec_compiling) { - type = "package"; - } else { - type = "package body"; - } - ReleaseSysCache(tuple); - } - StringInfoData ds; - initStringInfo(&ds); - appendStringInfoString(&ds, - "declare\n" - "PRAGMA AUTONOMOUS_TRANSACTION;\n" - "oldId int:=0;" - "objId int:=0;" - "allNum int:=0;\n" - "begin\n "); - appendStringInfo(&ds, - "select count(*) from dbe_pldeveloper.gs_source into allNum where " - "nspid=%u and name=\'%s\' and type=\'%s\';", nspid, name, type); - appendStringInfo(&ds, - "if allNum > 0 then " - "select id from dbe_pldeveloper.gs_source into oldId where " - "nspid=%u and name=\'%s\' and type=\'%s\';" - "objId := oldId; " - "else " - "objId := %u;" - "end if;", nspid, name, type, objId); - appendStringInfo(&ds, - "delete from DBE_PLDEVELOPER.gs_errors where nspid=%u and name=\'%s\' and type = \'%s\';\n", - nspid, name, type); - char* errmsg = NULL; - int line = 0; - if (rc != PLPGSQL_COMPILE_NULL) { - ListCell* cell = NULL; - foreach (cell, u_sess->plsql_cxt.errorList) { - PLpgSQL_error* item = (PLpgSQL_error*)lfirst(cell); - errmsg = item->errmsg; - line = item->line; - appendStringInfoString(&ds, "insert into DBE_PLDEVELOPER.gs_errors "); - appendStringInfo(&ds, "values(objId,%u,%u,\'%s\',\'%s\',%d,$gserrors$%s$gserrors$);\n", - userId, nspid, name, type, line, errmsg); - } - } - appendStringInfo(&ds, "EXCEPTION WHEN OTHERS THEN NULL; \n"); - appendStringInfo(&ds, "end;"); - List* rawParserList = NULL; - rawParserList = raw_parser(ds.data); - DoStmt* stmt = (DoStmt *)linitial(rawParserList); - u_sess->plsql_cxt.insertError = true; - int save_compile_status = getCompileStatus(); - int save_compile_list_length = list_length(u_sess->plsql_cxt.compile_context_list); - PLpgSQL_compile_context* save_compile_context = u_sess->plsql_cxt.curr_compile_context; - MemoryContext temp = NULL; - if (u_sess->plsql_cxt.curr_compile_context != NULL) { - temp = MemoryContextSwitchTo(u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt); - } - PG_TRY(); - { - (void)CompileStatusSwtichTo(NONE_STATUS); - u_sess->plsql_cxt.curr_compile_context = NULL; - ExecuteDoStmt(stmt, true); - } - PG_CATCH(); - { - if (temp != NULL) { - MemoryContextSwitchTo(temp); - } - (void)CompileStatusSwtichTo(save_compile_status); - u_sess->plsql_cxt.curr_compile_context = save_compile_context; - clearCompileContextList(save_compile_list_length); - PG_RE_THROW(); - } - PG_END_TRY(); - u_sess->plsql_cxt.curr_compile_context = save_compile_context; - (void)CompileStatusSwtichTo(save_compile_status); - if (temp != NULL) { - MemoryContextSwitchTo(temp); - } - u_sess->plsql_cxt.insertError = false; - pfree_ext(ds.data); - list_free_deep(u_sess->plsql_cxt.errorList); - u_sess->plsql_cxt.errorList = NULL; -#endif -} - -/* - insert error line number and message into DBE_PLDEVELOPER.gs_errors -*/ -void DropErrorByOid(int objtype, Oid objoid) -{ - bool notInsert = u_sess->attr.attr_common.upgrade_mode != 0 || SKIP_GS_SOURCE || IsTransactionBlock(); - if (notInsert) { - return; - } - -#ifdef ENABLE_MULTIPLE_NODES - return; -#else - char* name = NULL; - char* type = NULL; - Oid nspid = InvalidOid; - if (objtype == PLPGSQL_PROC) { - HeapTuple tuple; - bool isnull = false; - tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(objoid)); - Form_pg_proc procStruct = (Form_pg_proc)GETSTRUCT(tuple); - nspid = procStruct->pronamespace; - name = NameStr(procStruct->proname); - Datum prokindDatum = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_prokind, &isnull); - /* prokind maybe null */ - char prokind; - if (isnull) { - prokind = 'f'; - } else { - prokind = CharGetDatum(prokindDatum); - } - if (PROC_IS_PRO(prokind)) { - type = "procedure"; - } else { - type = "function"; - } - ReleaseSysCache(tuple); - } else if ((objtype == PLPGSQL_PACKAGE || - objtype == PLPGSQL_PACKAGE_BODY)) { - HeapTuple tuple = NULL; - tuple = SearchSysCache1(PACKAGEOID, ObjectIdGetDatum(objoid)); - if (!HeapTupleIsValid(tuple)) { - ereport(ERROR, - (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_PACKAGE), - errmsg("not found package."), - errdetail("package may has error"), - errcause("create package may has error"), - erraction("please check package"))); - } - Form_gs_package pkgStruct = (Form_gs_package)GETSTRUCT(tuple); - nspid = pkgStruct->pkgnamespace; - name = NameStr(pkgStruct->pkgname); - if (objtype == PLPGSQL_PACKAGE) { - type = "package"; - } else { - type = "package body"; - } - ReleaseSysCache(tuple); - } - StringInfoData ds; - initStringInfo(&ds); - appendStringInfoString(&ds, " declare begin "); - if (objtype == PLPGSQL_PACKAGE_BODY) { - appendStringInfo(&ds, " delete from DBE_PLDEVELOPER.gs_errors " - "where nspid=%u and name = \'%s\' and type = \'%s\';", - nspid, name, type); - appendStringInfo(&ds, " delete from DBE_PLDEVELOPER.gs_source where " - "nspid=%u and name = \'%s\' and type = \'%s\';", - nspid, name, type); - } else { - appendStringInfo(&ds, " delete from DBE_PLDEVELOPER.gs_errors " - "where nspid=%u and name = \'%s\' and type = \'%s\';", - nspid, name, type); - appendStringInfo(&ds, " delete from DBE_PLDEVELOPER.gs_errors " - "where nspid=%u and name = \'%s\' and type = \'package body\';", - nspid, name); - appendStringInfo(&ds, " delete from DBE_PLDEVELOPER.gs_source where " - "nspid=%u and name = \'%s\' and type = \'%s\';", - nspid, name, type); - appendStringInfo(&ds, " delete from DBE_PLDEVELOPER.gs_source where " - "nspid=%u and name = \'%s\' and type = \'package body\';", - nspid, name); - } - appendStringInfo(&ds, " EXCEPTION WHEN OTHERS THEN NULL; \n"); - appendStringInfo(&ds, " END; "); - List* rawParserList = raw_parser(ds.data); - DoStmt* stmt = (DoStmt *)linitial(rawParserList);; - int save_compile_status = getCompileStatus(); - int save_compile_list_length = list_length(u_sess->plsql_cxt.compile_context_list); - PLpgSQL_compile_context* save_compile_context = u_sess->plsql_cxt.curr_compile_context; - MemoryContext temp = NULL; - if (u_sess->plsql_cxt.curr_compile_context != NULL) { - temp = MemoryContextSwitchTo(u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt); - } - PG_TRY(); - { - (void)CompileStatusSwtichTo(NONE_STATUS); - u_sess->plsql_cxt.curr_compile_context = NULL; - ExecuteDoStmt(stmt, true); - } - PG_CATCH(); - { - if (temp != NULL) { - MemoryContextSwitchTo(temp); - } - (void)CompileStatusSwtichTo(save_compile_status); - u_sess->plsql_cxt.curr_compile_context = save_compile_context; - clearCompileContextList(save_compile_list_length); - PG_RE_THROW(); - } - PG_END_TRY(); - u_sess->plsql_cxt.curr_compile_context = save_compile_context; - (void)CompileStatusSwtichTo(save_compile_status); - if (temp != NULL) { - MemoryContextSwitchTo(temp); - } - pfree_ext(ds.data); -#endif -} - -int CompileWhich() -{ - if (u_sess->plsql_cxt.curr_compile_context == NULL) { - return PLPGSQL_COMPILE_NULL; - } - - if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package != NULL && - u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile != NULL) { - if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile->fn_signature == NULL) { - return PLPGSQL_COMPILE_NULL; - } else { - return PLPGSQL_COMPILE_PACKAGE_PROC; - } - } else if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package != NULL && - u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile == NULL) { - return PLPGSQL_COMPILE_PACKAGE; - } else if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package == NULL && - u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile != NULL) { - if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile->fn_signature == NULL) { - return PLPGSQL_COMPILE_NULL; - } else { - return PLPGSQL_COMPILE_PROC; - } - } else { - return PLPGSQL_COMPILE_NULL; - } - return PLPGSQL_COMPILE_NULL; -} - -void InsertErrorMessage(const char* message, int yyloc, bool isQueryString, int lines) -{ -#ifdef ENABLE_MULTIPLE_NODES - return; -#else - int rc = CompileWhich(); - if (rc == PLPGSQL_COMPILE_NULL || - !u_sess->attr.attr_common.plsql_show_all_error) { - return; - } -#endif - u_sess->plsql_cxt.have_error = true; - if (!isQueryString && lines == 0) { - lines = GetProcedureLineNumberInPackage(u_sess->plsql_cxt.curr_compile_context->core_yy->scanbuf, yyloc); - } else if (lines == 0) { - lines = GetProcedureLineNumberInPackage(t_thrd.postgres_cxt.debug_query_string, yyloc); - } - addErrorList(message, lines); -} +/* ------------------------------------------------------------------------- + * + * Portions Copyright (c) 2021, openGauss Contributors + + * IDENTIFICATION + * src/common/pl/plpgsql/src/pl_package.cpp + * ------------------------------------------------------------------------- + */ + +#include + +#include "catalog/namespace.h" +#include "catalog/pg_namespace.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_proc_fn.h" +#include "catalog/gs_package.h" +#include "catalog/gs_package_fn.h" +#include "catalog/pg_type.h" +#include "executor/spi.h" +#include "funcapi.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/nodes.h" +#include "nodes/pg_list.h" +#include "optimizer/clauses.h" +#include "optimizer/subselect.h" +#include "parser/parse_type.h" +#include "pgxc/locator.h" +#include "utils/pl_package.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/globalplancore.h" +#include "utils/guc.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/inval.h" +#include "utils/rel.h" +#include "utils/rel_gs.h" +#include "utils/syscache.h" +#include "utils/acl.h" +#include "miscadmin.h" +#include "parser/scanner.h" +#include "parser/parser.h" + + + +static void plpgsql_pkg_append_dlcell(plpgsql_pkg_HashEnt* entity); + +static void plpgsql_pkg_HashTableInsert(PLpgSQL_package* pkg, PLpgSQL_pkg_hashkey* pkg_key); + +extern PLpgSQL_package* plpgsql_pkg_HashTableLookup(PLpgSQL_pkg_hashkey* pkg_key); + +extern void plpgsql_compile_error_callback(void* arg); + +static Node* plpgsql_bind_variable_column_ref(ParseState* pstate, ColumnRef* cref); +static Node* plpgsql_describe_ref(ParseState* pstate, ColumnRef* cref); + +/* + * plpgsql_parser_setup_bind set up parser hooks for dynamic parameters + * only support DBE_SQL. + */ +void plpgsql_parser_setup_bind(struct ParseState* pstate, List** expr) +{ + pstate->p_bind_variable_columnref_hook = plpgsql_bind_variable_column_ref; + pstate->p_bind_hook_state = (void*)expr; +} + +/* + * plpgsql_bind_variable_column_ref parser callback after parsing a ColumnRef + * only support DBE_SQL. + */ +static Node* plpgsql_bind_variable_column_ref(ParseState* pstate, ColumnRef* cref) +{ + List** expr = (List**)pstate->p_bind_hook_state; + + /* get column name */ + Node* field1 = (Node*)linitial(cref->fields); + AssertEreport(IsA(field1, String), MOD_PLSQL, "string type is required."); + const char* name1 = NULL; + name1 = strVal(field1); + + /* get column type */ + int len = 1; + ListCell* lc_name = NULL; + ListCell* lc_type = NULL; + Oid argtypes = 0; + forboth (lc_type, expr[0], lc_name, expr[1]) { + if (pg_strcasecmp((char *)lfirst(lc_name), name1) != 0) { + len++; + continue; + } + argtypes = lfirst_oid(lc_type); + break; + } + + if (argtypes == 0) { + ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_AMBIGUOUS_COLUMN), + errmsg("argtypes is not valid"), errdetail("Confirm function input parameters."), + errcause("parameters error."), erraction("Confirm function input parameters."))); + } + + /* Generate param and Fill by index(len) */ + Param* param = NULL; + param = makeNode(Param); + param->paramkind = PARAM_EXTERN; + param->paramid = len; + param->paramtype = argtypes; + param->paramtypmod = -1; + param->paramcollid = get_typcollation(param->paramtype); + param->location = cref->location; + return (Node*)param; +} + +/* + * plpgsql_parser_setup_describe set up parser hooks for dynamic parameters + * only support DBE_SQL. + */ +void plpgsql_parser_setup_describe(struct ParseState* pstate, List** expr) +{ + pstate->p_bind_describe_hook = plpgsql_describe_ref; + pstate->p_describeco_hook_state = (void*)expr; +} + +/* + * plpgsql_describe_ref parser callback after parsing a ColumnRef + * only support DBE_SQL. + */ +static Node* plpgsql_describe_ref(ParseState* pstate, ColumnRef* cref) +{ + Assert(pstate); + /* Generate param and Fill by index(len) */ + Param* param = NULL; + param = makeNode(Param); + param->paramkind = PARAM_EXTERN; + param->paramid = 0; + param->paramtype = 20; + param->paramtypmod = -1; + param->location = cref->location; + return (Node*)param; +} + +static void build_pkg_row_variable(int varno, PLpgSQL_package* pkg, const char* pkgName, const char* nspName); + +bool IsOnlyCompilePackage() +{ + if (u_sess->plsql_cxt.curr_compile_context != NULL && + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package != NULL && + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile == NULL) { + return true; + } else { + return false; + } + return false; +} + + +PLpgSQL_datum* plpgsql_pkg_adddatum(const List* wholeName, char** objname, char** pkgname) +{ + char* nspname = NULL; + PLpgSQL_package* pkg = NULL; + struct PLpgSQL_nsitem* nse = NULL; + PLpgSQL_datum* datum = NULL; + PLpgSQL_pkg_hashkey hashkey; + Oid pkgOid; + Oid namespaceId = InvalidOid; + DeconstructQualifiedName(wholeName, &nspname, objname, pkgname); + if (nspname != NULL) { + namespaceId = LookupExplicitNamespace(nspname); + } + if (*pkgname == NULL) { + return NULL; + } + /* + * Lookup the gs_package tuple by Oid; we'll need it in any case + */ + pkgOid = PackageNameGetOid(*pkgname, namespaceId); + hashkey.pkgOid = pkgOid; + + pkg = plpgsql_pkg_HashTableLookup(&hashkey); + + if (pkg == NULL) { + pkg = PackageInstantiation(pkgOid); + } + /* + * find package variable and return package datum,if not found, + * return NULL + */ + if (pkg != NULL) { + nse = plpgsql_ns_lookup(pkg->public_ns, false, *objname, NULL, NULL, NULL); + if (nse == NULL) { + return NULL; + } + } else { + return NULL; + } + datum = pkg->datums[nse->itemno]; + return datum; +} + +/* + * add package vairable to namespace + */ +int plpgsql_pkg_adddatum2ns(const List* name) +{ + PLpgSQL_datum* datum = NULL; + int varno; + char* objname = NULL; + char* pkgname = NULL; + + datum = plpgsql_pkg_adddatum(name, &objname, &pkgname); + + if (datum == NULL) { + return -1; + } else { + varno = plpgsql_adddatum(datum); + switch (datum->dtype) + { + case PLPGSQL_DTYPE_VAR: + plpgsql_ns_additem(PLPGSQL_NSTYPE_VAR, varno, objname, pkgname); + break; + case PLPGSQL_DTYPE_ROW: + plpgsql_ns_additem(PLPGSQL_NSTYPE_ROW, varno, objname, pkgname); + break; + case PLPGSQL_DTYPE_RECORD: + plpgsql_ns_additem(PLPGSQL_NSTYPE_RECORD, varno, objname, pkgname); + break; + default: + ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized type: %d, when build variable in PLSQL, this situation should not occur.", + datum->dtype))); + break; + } + } + return varno; +} + +static void build_pkg_cursor_variable(int varno, PLpgSQL_package* pkg, const char* pkgName, const char* nspName) +{ + int cursorAttrNum = 4; + int dno = -1; + char* refname = NULL; + for (int i = 1; i <= cursorAttrNum; i++) { + dno = plpgsql_adddatum(pkg->datums[varno + i], false); + refname = ((PLpgSQL_variable*)(pkg->datums[varno + i]))->refname; + plpgsql_ns_additem(PLPGSQL_NSTYPE_VAR, dno, refname, pkgName, nspName); + } +} + +static void build_pkg_row_variable(int varno, PLpgSQL_package* pkg, const char* pkgName, const char* nspName) +{ + PLpgSQL_row* row = (PLpgSQL_row*)pkg->datums[varno]; + int dno = -1; + PLpgSQL_datum* datum = NULL; + char* refName = NULL; + + for (int i = 0; i < row->nfields; i++) { + datum = row->pkg->datums[row->varnos[i]]; + if (datum != NULL) { + refName = ((PLpgSQL_variable*)datum)->refname; + dno = plpgsql_adddatum(pkg->datums[row->varnos[i]], false); + if (datum->dtype == PLPGSQL_DTYPE_VAR) { + plpgsql_ns_additem(PLPGSQL_NSTYPE_VAR, dno, refName, pkgName, nspName); + } else { + plpgsql_ns_additem(PLPGSQL_NSTYPE_ROW, dno, refName, pkgName, nspName); + } + } + } +} + + +int plpgsql_build_pkg_variable(List* name, PLpgSQL_datum* datum, bool isSamePkg) +{ + int varno = 0; + char* objname = NULL; + char* pkgname = NULL; + char* nspname = NULL; + + DeconstructQualifiedName(name, &nspname, &objname, &pkgname); + + switch (datum->dtype) { + case PLPGSQL_DTYPE_VAR: { + /* Ordinary scalar datatype */ + PLpgSQL_var* var = (PLpgSQL_var*)datum; + varno = isSamePkg ? var->dno : plpgsql_adddatum(datum, false); + if (var->addNamespace) { + plpgsql_ns_additem(PLPGSQL_NSTYPE_VAR, varno, var->refname, pkgname, nspname); + } + if (var->datatype->typoid == REFCURSOROID && !isSamePkg) { + build_pkg_cursor_variable(var->dno, var->pkg, pkgname, nspname); + } + return varno; + } + case PLPGSQL_DTYPE_ROW: { + /* Ordinary scalar datatype */ + PLpgSQL_row* row = (PLpgSQL_row*)datum; + varno = isSamePkg ? row->dno : plpgsql_adddatum(datum, false); + if (row->addNamespace) { + plpgsql_ns_additem(PLPGSQL_NSTYPE_ROW, varno, row->refname, pkgname, nspname); + } + if (!isSamePkg) { + build_pkg_row_variable(row->dno, row->pkg, pkgname, nspname); + } + return varno; + } + case PLPGSQL_DTYPE_RECORD: { + /* "record" type -- build a record variable */ + PLpgSQL_row* row = (PLpgSQL_row*)datum; + + varno = isSamePkg ? row->dno : plpgsql_adddatum(datum, false); + if (row->addNamespace) { + plpgsql_ns_additem(PLPGSQL_NSTYPE_ROW, varno, row->refname, pkgname, nspname); + } + if (!isSamePkg) { + build_pkg_row_variable(row->dno, row->pkg, pkgname, nspname); + } + return varno; + } + + } + return -1; +} + +/* + * use unknown type when compile a function which has package variable + * because gauss doesn't support compile multiple function at the same time + * so we have a fake compile when compile other functions which has package + * variable.we only check if the package is exist,if not exist,return -1, + * else return the variable number. + */ + +int plpgsql_pkg_add_unknown_var_to_namespace(List* name) +{ +#ifdef ENABLE_MULTIPLE_NODES + return -1; +#endif + + if (list_length(name) >= 4) { + return -1; + } + + bool isSamePkg = false; + PLpgSQL_datum* datum = GetPackageDatum(name, &isSamePkg); + if (datum != NULL) { + return plpgsql_build_pkg_variable(name, datum, isSamePkg); + } else { + return -1; + } + return -1; +} + +/* + * @Description : Check co-location for opexpr. we walk through the lists of opexpr + * to check where they are from new/old with all the distributeion keys of both queries. + * + * @in query : Query struct of the query with trigger. + * @in qry_part_attr_num : list of attr no of the dist keys of triggering query. + * @in trig_part_attr_num : list of attr no of the dist keys of query in trigger. + * @in func : trigger function body information. + * @return : when co-location return true. + */ +extern bool plpgsql_check_opexpr_colocate( + Query* query, List* qry_part_attr_num, List* trig_part_attr_num, PLpgSQL_function* func, List* opexpr_list) +{ + ListCell* lc1 = NULL; + ListCell* lc2 = NULL; + bool is_colocate = true; + + forboth(lc1, qry_part_attr_num, lc2, trig_part_attr_num) + { + Expr* expr = NULL; + Param* param = NULL; + PLpgSQL_recfield* recfield = NULL; + PLpgSQL_rec* rec = NULL; + int fno; + AttrNumber attnum1 = lfirst_int(lc1); + AttrNumber attnum2 = lfirst_int(lc2); + ListCell* opexpr_cell = NULL; + + /* Only all distribute column can colocate, we can ship. */ + if (!is_colocate) { + return false; + } + + foreach (opexpr_cell, opexpr_list) { + Expr* qual_expr = (Expr*)lfirst(opexpr_cell); + + /* Check all opexpr with distribute column */ + expr = pgxc_check_distcol_opexpr(query->resultRelation, attnum2, (OpExpr*)qual_expr); + if (expr == NULL) { + is_colocate = false; + continue; + } + + /* NEW/OLD is replaced with param by parser */ + if (!IsA(expr, Param)) { + is_colocate = false; + continue; + } + param = (Param*)expr; + + /* This param should point to datum in func */ + recfield = (PLpgSQL_recfield*)func->datums[param->paramid - 1]; + + /* From there we get the new or old rec */ + rec = (PLpgSQL_rec*)func->datums[recfield->recparentno]; + if (strcmp(rec->refname, "new") != 0 && strcmp(rec->refname, "old") != 0) { + is_colocate = false; + continue; + } + + /* We should already set tupdesc at the very beginning */ + if (rec->tupdesc == NULL) { + is_colocate = false; + continue; + } + + /* + * Find field index of new.a1 and only if it matches to + * current distribution key of src table, we could call + * both tables are DML colocated + */ + fno = SPI_fnumber(rec->tupdesc, recfield->fieldname); + if (fno != attnum1) { + is_colocate = false; + continue; + } + + is_colocate = true; + break; + } + } + + return is_colocate; +} + +/* + * @Description : Check co-location for update or delete command. To check co-location of the + * update or delete query in trigger and the triggering query, we walk through the lists of + * of attribute no of distribution keys of both queries. For each pair, we check + * if the distribute key of trigger table exist in its where clause and its expression is + * from new/old and is the distribute key of the table the triggering query work on. + * + * For example, + * tables: + * create table t1(a1 int, b1 int, c1 int,d1 varchar(100)) distribute by hash(a1,b1); + * create table t2(a2 int, b2 int, c2 int,d2 varchar(100)) distribute by hash(a2,b2); + * update in trigger body: + * update t2 set d2=new.d1 where a2=old.a1 and b2=old.b1; + * triggering event: + * an insert or update on t1 + * + * we walk through two pairs: (a1, a2) and (b1, b2), and check if a2 is in the + * where clause "a2=old.a1 and b2=old.b1", and if its expression is from new/old + * and is a1; if b2 is in the where clause too and its expression is from new/old + * and is b1. If the above two checks are satified, co-location is true. + * + * @in query : Query struct of the query with trigger. + * @in qry_part_attr_num : list of attr no of the dist keys of triggering query. + * @in trig_part_attr_num : list of attr no of the dist keys of query in trigger. + * @in func : trigger function body information. + * @return : when co-location return true. + */ +bool plpgsql_check_updel_colocate( + Query* query, List* qry_part_attr_num, List* trig_part_attr_num, PLpgSQL_function* func) +{ + Node* whereClause = NULL; + List* opexpr_list = NIL; + bool is_colocate = true; + + if (query->jointree == NULL || query->jointree->quals == NULL) { + return false; + } + + /* Recursively get a list of opexpr from quals. */ + opexpr_list = pull_opExpr((Node*)query->jointree->quals); + if (opexpr_list == NIL) { + return false; + } + /* Flatten AND/OR expression for checking or expr. */ + whereClause = eval_const_expressions(NULL, (Node*)query->jointree->quals); + + /* If it is or clause, we break the clause to check colocation of each expr */ + if (or_clause(whereClause)) { + List* temp_list = NIL; + ListCell* opexpr_cell = NULL; + + /* For or condtion, we can ship only when all opexpr are colocated. */ + foreach (opexpr_cell, opexpr_list) { + temp_list = list_make1((Expr*)lfirst(opexpr_cell)); + is_colocate = plpgsql_check_opexpr_colocate(query, qry_part_attr_num, trig_part_attr_num, func, temp_list); + + if (temp_list != NIL) { + list_free_ext(temp_list); + } + + if (!is_colocate) { + break; + } + } + + if (opexpr_list != NIL) { + list_free_ext(opexpr_list); + } + return is_colocate; + } else { + /* For and with no or condition, we can ship when any opexpr is colocated. */ + is_colocate = plpgsql_check_opexpr_colocate(query, qry_part_attr_num, trig_part_attr_num, func, opexpr_list); + } + + if (opexpr_list != NIL) { + list_free_ext(opexpr_list); + } + return is_colocate; +} + +/* + * @Description : Check co-location for INSERT/UPDATE/DELETE statement and the + * the statment which it triggered. + * + * @in query : Query struct info about the IUD statement. + * @in rte : range table entry for the insert/update/delete statement. + * @in plpgsql_func : information for the insert/update/delete trigger function. + * @return : true when statement and the triggered statement are co-location. + */ +bool plpgsql_check_colocate(Query* query, RangeTblEntry* rte, void* plpgsql_func) +{ + List* query_partAttrNum = NIL; + List* trig_partAttrNum = NIL; + Relation qe_relation = NULL; /* triggering query's rel */ + Relation tg_relation = NULL; + RelationLocInfo* qe_rel_loc_info = NULL; /* triggering query's rel loc info */ + RelationLocInfo* tg_rel_loc_info = NULL; /* trigger body's rel loc info */ + int qe_rel_nodelist_len = 0; + int tg_rel_nodelist_len = 0; + + Assert(query->commandType == CMD_INSERT || query->commandType == CMD_DELETE || query->commandType == CMD_UPDATE); + + PLpgSQL_function* func = (PLpgSQL_function*)plpgsql_func; + + /* Get event query relation and trigger body's relation. */ + qe_relation = func->tg_relation; + tg_relation = relation_open(rte->relid, AccessShareLock); + + query_partAttrNum = qe_relation->rd_locator_info->partAttrNum; + trig_partAttrNum = rte->partAttrNum; + + /* Check if trigger query table and trigger body table are on the same node list. */ + qe_rel_loc_info = qe_relation->rd_locator_info; + tg_rel_loc_info = tg_relation->rd_locator_info; + + qe_rel_nodelist_len = list_length(qe_rel_loc_info->nodeList); + tg_rel_nodelist_len = list_length(tg_rel_loc_info->nodeList); + + /* Cannot ship whenever target table is not row type. */ + if (!RelationIsRowFormat(tg_relation)) { + relation_close(tg_relation, AccessShareLock); + return false; + } + + /* The query table and trigger table must in a same group. */ + if (0 != strcmp(qe_rel_loc_info->gname.data, tg_rel_loc_info->gname.data)) { + relation_close(tg_relation, AccessShareLock); + return false; + } + relation_close(tg_relation, AccessShareLock); + + /* If distribution key list lengths are different they both are not colocated. */ + if (list_length(trig_partAttrNum) != list_length(query_partAttrNum)) { + return false; + } + + /* + * Used difference check function between INSERT and UPDATE/DELETE here because we use + * targetlist to check INSERT and where clause to check UPDATE/DELETE. + */ + if (query->commandType == CMD_UPDATE || query->commandType == CMD_DELETE) { + return plpgsql_check_updel_colocate(query, query_partAttrNum, trig_partAttrNum, func); + } else { + return plpgsql_check_insert_colocate(query, query_partAttrNum, trig_partAttrNum, func); + } +} + + +bool check_search_path_interface(List *schemas, HeapTuple proc_tup) +{ + if (!SUPPORT_BIND_SEARCHPATH) { + return true; + } + bool isOidListSame = true; + Form_pg_proc proc_struct = (Form_pg_proc)GETSTRUCT(proc_tup); + if (proc_struct->pronamespace != PG_CATALOG_NAMESPACE) { + /* Get lastest search_path if baseSearchPathValid is false */ + recomputeNamespacePath(); + + int len1 = list_length(u_sess->catalog_cxt.baseSearchPath); + /* + * The first element of func->fn_searchpath->schemas is + * namespace the current function belongs to. + */ + int len2 = list_length(schemas) - 1; + Assert(len2 >= 0); + + if (len1 == len2) { + ListCell* lc1 = NULL; + ListCell* lc2 = NULL; + + /* Check whether search_path has changed */ + lc1 = list_head(u_sess->catalog_cxt.baseSearchPath); + lc2 = list_head(schemas); + + /* Check function schema from list second position to list tail */ + lc2 = lnext(lc2); + for (; lc1 && lc2; lc1 = lnext(lc1), lc2 = lnext(lc2)) { + if (lfirst_oid(lc1) != lfirst_oid(lc2)) { + isOidListSame = false; + break; + } + } + } else if (len1 == list_length(schemas)) { + /* in some case, same length maybe same oid list */ + ListCell* lc1 = NULL; + foreach(lc1, schemas) { + if (!list_member_oid(u_sess->catalog_cxt.baseSearchPath, lfirst_oid(lc1))) { + isOidListSame = false; + break; + } + } + } else { + /* If length is different, two lists are different. */ + isOidListSame = false; + } + } + + return isOidListSame; +} + + +void plpgsql_pkg_HashTableDelete(PLpgSQL_package* pkg) +{ + plpgsql_pkg_HashEnt* hentry = NULL; + /* do nothing if not in table */ + if (pkg->pkg_hashkey == NULL) { + return; + } + hentry = (plpgsql_pkg_HashEnt*)hash_search( + u_sess->plsql_cxt.plpgsql_pkg_HashTable, (void*)pkg->pkg_hashkey, HASH_REMOVE, NULL); + if (hentry == NULL) { + elog(WARNING, "trying to delete function that does not exist"); + } else { + /* delete the cell from the list. */ + u_sess->plsql_cxt.plpgsqlpkg_dlist_objects = + dlist_delete_cell(u_sess->plsql_cxt.plpgsqlpkg_dlist_objects, hentry->cell, false); + } + /* remove back link, which no longer points to allocated storage */ + pkg->pkg_hashkey = NULL; +} + +void delete_package(PLpgSQL_package* pkg) +{ + if (pkg->use_count > 0) + return; + ListCell* l = NULL; + foreach(l, pkg->proc_compiled_list) { + if (((PLpgSQL_function*)lfirst(l))->use_count > 0) + return; + } + foreach(l, pkg->proc_compiled_list) { + PLpgSQL_function* func = (PLpgSQL_function*)lfirst(l); + delete_function(func, true); + } + /* free package memory,*/ + plpgsql_pkg_HashTableDelete(pkg); + plpgsql_free_package_memory(pkg); +} + +static void plpgsql_pkg_append_dlcell(plpgsql_pkg_HashEnt* entity) +{ + MemoryContext oldctx; + PLpgSQL_package* pkg = NULL; + oldctx = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); + u_sess->plsql_cxt.plpgsqlpkg_dlist_objects = dlappend(u_sess->plsql_cxt.plpgsqlpkg_dlist_objects, entity); + (void)MemoryContextSwitchTo(oldctx); + + entity->cell = u_sess->plsql_cxt.plpgsqlpkg_dlist_objects->tail; + while (dlength(u_sess->plsql_cxt.plpgsqlpkg_dlist_objects) > g_instance.attr.attr_sql.max_compile_functions) { + DListCell* headcell = u_sess->plsql_cxt.plpgsqlpkg_dlist_objects->head; + plpgsql_pkg_HashEnt* head_entity = (plpgsql_pkg_HashEnt*)lfirst(headcell); + + pkg = head_entity->package; + + /* delete from the hash and delete the function's compile */ + CheckCurrCompileDependOnPackage(pkg->pkg_oid); + delete_package(pkg); + pfree_ext(pkg); + } +} + + +void delete_pkg_in_HashTable(Oid pkgOid) +{ + PLpgSQL_pkg_hashkey hashkey; + plpgsql_pkg_HashEnt* hentry = NULL; + hashkey.pkgOid = pkgOid; + bool found = false; + hentry = (plpgsql_pkg_HashEnt*)hash_search(u_sess->plsql_cxt.plpgsql_pkg_HashTable, &hashkey, HASH_REMOVE, NULL); + if (found) { + u_sess->plsql_cxt.plpgsqlpkg_dlist_objects = + dlist_delete_cell(u_sess->plsql_cxt.plpgsqlpkg_dlist_objects, hentry->cell, false); + } +} + +static void plpgsql_pkg_HashTableInsert(PLpgSQL_package* pkg, PLpgSQL_pkg_hashkey* pkg_key) +{ + plpgsql_pkg_HashEnt* hentry = NULL; + bool found = false; + hentry = (plpgsql_pkg_HashEnt*)hash_search(u_sess->plsql_cxt.plpgsql_pkg_HashTable, (void*)pkg_key, HASH_ENTER, &found); + if (found) { + /* move cell to the tail of the package list. */ + dlist_add_tail_cell(u_sess->plsql_cxt.plpgsqlpkg_dlist_objects, hentry->cell); + elog(WARNING, "trying to insert a package that already exists"); + } else { + /* append the current compiling entity to the end of the compile results list. */ + plpgsql_pkg_append_dlcell(hentry); + } + hentry->package = pkg; + /* prepare back link from function to hashtable key */ + pkg->pkg_hashkey = &hentry->key; +} + +extern PLpgSQL_package* plpgsql_pkg_HashTableLookup(PLpgSQL_pkg_hashkey* pkg_key) +{ + if (unlikely(u_sess->plsql_cxt.plpgsql_pkg_HashTable == NULL)) + return NULL; + plpgsql_pkg_HashEnt* hentry = NULL; + hentry = (plpgsql_pkg_HashEnt*)hash_search(u_sess->plsql_cxt.plpgsql_pkg_HashTable, (void*)pkg_key, HASH_FIND, NULL); + if (hentry != NULL) { + /* add cell to the tail of the function list. */ + dlist_add_tail_cell(u_sess->plsql_cxt.plpgsqlpkg_dlist_objects, hentry->cell); + return hentry->package; + } else { + return NULL; + } +} + +static PLpgSQL_package* do_pkg_compile(Oid pkgOid, HeapTuple pkg_tup, PLpgSQL_package* pkg, PLpgSQL_pkg_hashkey* hashkey, bool isSpec) +{ + Form_gs_package pkg_struct = (Form_gs_package)GETSTRUCT(pkg_tup); + Datum pkgsrcdatum; + Datum pkginitdatum; + bool isnull = false; + char* pkg_source = NULL; + char* pkg_init_source = NULL; + int i; + ErrorContextCallback pl_err_context; + int parse_rc; + Oid* saved_pseudo_current_userId = NULL; + char* signature = NULL; + List* current_searchpath = NIL; + char* namespace_name = NULL; + char context_name[NAMEDATALEN] = {0}; + int rc = 0; + const int alloc_size = 256; + Datum namespaceOidDatum; + Oid namespaceOid = InvalidOid; + /* + * Setup the scanner input and error info. We assume that this function + * cannot be invoked recursively, so there's no need to save and restore + * the static variables used here. + */ + if (isSpec) { + pkgsrcdatum = SysCacheGetAttr(PACKAGEOID, pkg_tup, Anum_gs_package_pkgspecsrc, &isnull); + } else { + pkgsrcdatum = SysCacheGetAttr(PACKAGEOID, pkg_tup, Anum_gs_package_pkgbodydeclsrc, &isnull); + } + + if (isnull) { + ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("The definition of the package is null"), + errhint("Check whether the definition of the function is complete in the pg_proc system table."))); + } + + if (!isSpec) { + pkginitdatum = SysCacheGetAttr(PACKAGEOID, pkg_tup, Anum_gs_package_pkgbodyinitsrc, &isnull); + } else { + isnull = true; + } + if (isnull) { + pkg_init_source = "INSTANTIATION DECLARE BEGIN NULL; END"; + } else if (!isnull && !isSpec) { + pkg_init_source = TextDatumGetCString(pkginitdatum); + } + pkg_source = TextDatumGetCString(pkgsrcdatum); + /* + * Setup error traceback support for ereport() + */ + pl_err_context.callback = plpgsql_compile_error_callback; + pl_err_context.arg = NULL; + pl_err_context.previous = t_thrd.log_cxt.error_context_stack; + t_thrd.log_cxt.error_context_stack = &pl_err_context; + signature = pstrdup(NameStr(pkg_struct->pkgname)); + /* + * All the permanent output of compilation (e.g. parse tree) is kept in a + * per-function memory context, so it can be reclaimed easily. + */ + rc = snprintf_s( + context_name, NAMEDATALEN, NAMEDATALEN - 1, "%s_%lu", "PL/pgSQL package context", u_sess->debug_query_id); + securec_check_ss(rc, "", ""); + /* + * Create the new function struct, if not done already. The function + * structs are never thrown away, so keep them in session memory context. + */ + PLpgSQL_compile_context* curr_compile = createCompileContext(context_name); + SPI_NESTCOMPILE_LOG(curr_compile->compile_cxt); + if (pkg == NULL) { + pkg = (PLpgSQL_package*)MemoryContextAllocZero( + SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER), sizeof(PLpgSQL_package)); + pkg->pkg_cxt = curr_compile->compile_cxt; + pkg->pkg_signature = pstrdup(signature); + pkg->pkg_owner = pkg_struct->pkgowner; + pkg->pkg_oid = pkgOid; + pkg->pkg_tid = pkg_tup->t_self; + pkg->proc_list = NULL; + pkg->invalItems = NIL; + pkg->use_count = 0; + } + saved_pseudo_current_userId = u_sess->misc_cxt.Pseudo_CurrentUserId; + u_sess->misc_cxt.Pseudo_CurrentUserId = &pkg->pkg_owner; + pkg->is_spec_compiling = isSpec; + MemoryContext temp = NULL; + if (u_sess->plsql_cxt.curr_compile_context != NULL) { + checkCompileMemoryContext(u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt); + temp = MemoryContextSwitchTo(u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt); + } + u_sess->plsql_cxt.curr_compile_context = curr_compile; + pushCompileContext(); + plpgsql_scanner_init(pkg_source); + curr_compile->plpgsql_error_pkgname = pstrdup(NameStr(pkg_struct->pkgname)); + + namespaceOidDatum = SysCacheGetAttr(PACKAGEOID, pkg_tup, Anum_gs_package_pkgnamespace, &isnull); + if (!isnull) { + namespaceOid = DatumGetObjectId(namespaceOidDatum); + } + if (OidIsValid(namespaceOid)) { + pkg->namespaceOid = namespaceOid; + } else { + pkg->namespaceOid = InvalidOid; + } + pkg->is_spec_compiling = isSpec; + if (isSpec) { + u_sess->plsql_cxt.plpgsql_IndexErrorVariable = 0; + } + /* + * compile_tmp_cxt is a short temp context that will be detroyed after + * function compile or execute. + * func_cxt is a long term context that will stay until thread exit. So + * malloc on func_cxt should be very careful. + * signature is stored on a StringInfoData which is 1K byte at least, but + * most signature will not be so long originally, so we should do a strdup. + */ + curr_compile->compile_tmp_cxt = MemoryContextSwitchTo(pkg->pkg_cxt); + pkg->pkg_signature = pstrdup(signature); + pkg->pkg_searchpath = (OverrideSearchPath*)palloc0(sizeof(OverrideSearchPath)); + pkg->pkg_searchpath->addCatalog = true; + pkg->pkg_searchpath->addTemp = true; + pkg->pkg_xmin = HeapTupleGetRawXmin(pkg_tup); + pkg->proc_compiled_list = NULL; + curr_compile->plpgsql_curr_compile_package = pkg; + if (pkg_struct->pkgnamespace == PG_CATALOG_NAMESPACE) { + current_searchpath = fetch_search_path(false); + if (current_searchpath == NIL) { + ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_SCHEMA), + errmsg("the search_path is empty while the porc belongs to pg_catalog "))); + } + namespace_name = get_namespace_name(linitial_oid(current_searchpath)); + if (namespace_name == NULL) { + list_free_ext(current_searchpath); + ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_SCHEMA), + errmsg("cannot find the namespace according to search_path"))); + } + pkg->pkg_searchpath->schemas = current_searchpath; + } else { + /* Assign namespace of current function to fn_searchpath */ + pkg->pkg_searchpath->schemas = list_make1_oid(pkg_struct->pkgnamespace); + if (SUPPORT_BIND_SEARCHPATH) { + /* + * If SUPPORT_BIND_SEARCHPATH is true, + * add system's search_path to fn_searchpath. + * When the relation of other objects cannot be + * found in the namespace of current function, + * find them in search_path list. + * Otherwise, we only find objects in the namespace + * of current function. + */ + ListCell* l = NULL; + /* If u_sess->catalog_cxt.namespaceUser and roleid are not equeal, + * then u_sess->catalog_cxt.baseSearchPath doesn't + * contain currentUser schema.currenUser schema will be added in + * PushOverrideSearchPath. + * + * It can happen executing following statements. + * + * create temp table t1(a int); + * \d t1 --(get schema pg_temp_xxx) + * drop table t1; + * drop schema pg_temp_xxx cascade; + * call proc1() --(proc1 contains create temp table statement) + */ + Oid roleid = GetUserId(); + if (u_sess->catalog_cxt.namespaceUser != roleid) { + pkg->pkg_searchpath->addUser = true; + } + /* Use baseSearchPath not activeSearchPath. */ + foreach (l, u_sess->catalog_cxt.baseSearchPath) { + Oid namespaceId = lfirst_oid(l); + /* + * Append namespaceId to fn_searchpath directly. + */ + pkg->pkg_searchpath->schemas = lappend_oid(pkg->pkg_searchpath->schemas, namespaceId); + } + } + } + pfree_ext(signature); + curr_compile->plpgsql_curr_compile_package->proc_compiled_list = NULL; + /* + * Initialize the compiler, particularly the namespace stack. The + * outermost namespace contains function parameters and other special + * variables (such as FOUND), and is named after the function itself. + */ + curr_compile->datums_pkg_alloc = alloc_size; + curr_compile->plpgsql_pkg_nDatums = 0; + /* This is short-lived, so needn't allocate in function's cxt */ + curr_compile->plpgsql_Datums = (PLpgSQL_datum**)MemoryContextAlloc( + curr_compile->compile_tmp_cxt, sizeof(PLpgSQL_datum*) * curr_compile->datums_pkg_alloc); + curr_compile->datum_need_free = (bool*)MemoryContextAlloc( + curr_compile->compile_tmp_cxt, sizeof(bool) * curr_compile->datums_pkg_alloc); + curr_compile->datums_last = 0; + PushOverrideSearchPath(pkg->pkg_searchpath); + plpgsql_ns_init(); + plpgsql_ns_push(NameStr(pkg_struct->pkgname)); + add_pkg_compile(); + curr_compile->datums_last = curr_compile->plpgsql_nDatums; + curr_compile->plpgsql_pkg_DumpExecTree = false; + /* + * Now parse the function's text + */ + bool saved_flag = u_sess->plsql_cxt.have_error; + u_sess->plsql_cxt.have_error = false; + parse_rc = plpgsql_yyparse(); + if (parse_rc != 0) { + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("Syntax parsing error, plpgsql parser returned %d", parse_rc))); + } + plpgsql_scanner_finish(); + pfree_ext(pkg_source); + + if (pkg_init_source != NULL) { + plpgsql_scanner_init(pkg_init_source); + parse_rc = plpgsql_yyparse(); + if (parse_rc != 0) { + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("Syntax parsing error, plpgsql parser returned %d", parse_rc))); + } + plpgsql_scanner_finish(); + } + + PopOverrideSearchPath(); + u_sess->misc_cxt.Pseudo_CurrentUserId = saved_pseudo_current_userId; +#ifndef ENABLE_MULTIPLE_NODES + if (u_sess->plsql_cxt.have_error && u_sess->attr.attr_common.plsql_show_all_error) { + u_sess->plsql_cxt.have_error = false; + ereport(ERROR, + (errmodule(MOD_PLSQL), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Debug mod,create procedure has error."), + errdetail("N/A"), + errcause("compile procedure error."), + erraction("check procedure error and redefine procedure"))); + } +#endif + u_sess->plsql_cxt.have_error = saved_flag; + pkg->ndatums = curr_compile->plpgsql_pkg_nDatums; + pkg->datums = (PLpgSQL_datum**)palloc(sizeof(PLpgSQL_datum*) * curr_compile->plpgsql_pkg_nDatums); + pkg->datum_need_free = (bool*)palloc(sizeof(bool) * curr_compile->plpgsql_pkg_nDatums); + pkg->datums_alloc = pkg->ndatums; + for (i = 0; i < curr_compile->plpgsql_pkg_nDatums; i++) { + pkg->datums[i] = curr_compile->plpgsql_Datums[i]; + pkg->datum_need_free[i] = curr_compile->datum_need_free[i]; + if (pkg->datums[i]->dtype == PLPGSQL_DTYPE_VAR) { + PLpgSQL_var* var = reinterpret_cast(pkg->datums[i]); + if (var->pkg == NULL) { + var->pkg = pkg; + var->pkg_name = GetPackageListName(NameStr(pkg_struct->pkgname), namespaceOid); + } + } else if (pkg->datums[i]->dtype == PLPGSQL_DTYPE_ROW) { + PLpgSQL_row* row = (PLpgSQL_row*)pkg->datums[i]; + if (row->pkg == NULL) { + row->pkg = pkg; + row->pkg_name = GetPackageListName(NameStr(pkg_struct->pkgname), namespaceOid); + } + } else if (pkg->datums[i]->dtype == PLPGSQL_DTYPE_RECORD) { + PLpgSQL_row* row = (PLpgSQL_row*)pkg->datums[i]; + if (row->pkg == NULL) { + row->pkg = pkg; + row->pkg_name = GetPackageListName(NameStr(pkg_struct->pkgname), namespaceOid); + } + } else if (pkg->datums[i]->dtype == PLPGSQL_DTYPE_REC) { + PLpgSQL_rec* rec = (PLpgSQL_rec*)pkg->datums[i]; + if (rec->pkg == NULL) { + rec->pkg = pkg; + rec->pkg_name = GetPackageListName(NameStr(pkg_struct->pkgname), namespaceOid); + } + } + } + + if (isSpec) { + pkg->public_ns = curr_compile->ns_top; + pkg->is_bodycompiled = false; + pkg->public_ndatums = curr_compile->plpgsql_pkg_nDatums; + } else { + pkg->private_ns = curr_compile->ns_top; + pkg->is_bodycompiled = true; + } + pkg->proc_list = curr_compile->plpgsql_curr_compile_package->proc_list; + if (!isSpec) { + pkg->is_bodycompiled = true; + } + MemoryContext oldcxt = MemoryContextSwitchTo(pkg->pkg_cxt); + pkg->proc_compiled_list = curr_compile->plpgsql_curr_compile_package->proc_compiled_list; + if (hashkey && isSpec) { + plpgsql_pkg_HashTableInsert(pkg, hashkey); + } + t_thrd.log_cxt.error_context_stack = pl_err_context.previous; + curr_compile->plpgsql_error_funcname = NULL; + curr_compile->plpgsql_check_syntax = false; + MemoryContextSwitchTo(oldcxt); + MemoryContextSwitchTo(curr_compile->compile_tmp_cxt); + curr_compile->compile_tmp_cxt = NULL; + curr_compile->plpgsql_curr_compile = NULL; + ereport(DEBUG3, (errmodule(MOD_NEST_COMPILE), errcode(ERRCODE_LOG), + errmsg("%s finish compile, level: %d", __func__, list_length(u_sess->plsql_cxt.compile_context_list)))); + u_sess->plsql_cxt.curr_compile_context = popCompileContext(); + clearCompileContext(curr_compile); + if (temp != NULL) { + MemoryContextSwitchTo(temp); + } + return pkg; +} + +List* GetPackageListName(const char* pkgName, const Oid nspOid) +{ + StringInfoData nameData; + List* nameList = NULL; + initStringInfo(&nameData); + char* schemaName = get_namespace_name(nspOid); + if (schemaName == NULL) { + ereport(ERROR, + (errmodule(MOD_PLSQL), errcode(ERRCODE_PLPGSQL_ERROR), + errmsg("failed to find package schema name"), + errdetail("when compile package \"%s\", failed to find its schema name,"\ + "the schema maybe dropped by other session", pkgName), + errcause("excessive concurrency"), + erraction("reduce concurrency and retry"))); + } + appendStringInfoString(&nameData, schemaName); + appendStringInfoString(&nameData, "."); + appendStringInfoString(&nameData, pkgName); + nameList = stringToQualifiedNameList(nameData.data); + pfree_ext(nameData.data); + return nameList; +} + +/* + * compile and init package by package oid + */ +PLpgSQL_package* plpgsql_pkg_compile(Oid pkgOid, bool for_validator, bool isSpec, bool isCreate) +{ +#ifdef ENABLE_MULTIPLE_NODES + ereport(ERROR, (errcode(ERRCODE_INVALID_PACKAGE_DEFINITION), + errmsg("not support create package in distributed database"))); +#endif + HeapTuple pkg_tup = NULL; + Form_gs_package pkg_struct = NULL; + PLpgSQL_package* pkg = NULL; + PLpgSQL_pkg_hashkey hashkey; + bool pkg_valid = false; + /* + * Lookup the gs_package tuple by Oid; we'll need it in any case + */ + pkg_tup = SearchSysCache1(PACKAGEOID, ObjectIdGetDatum(pkgOid)); + AclResult aclresult = pg_package_aclcheck(pkgOid, GetUserId(), ACL_EXECUTE); + Form_gs_package pkgForm = (Form_gs_package)GETSTRUCT(pkg_tup); + NameData pkgname = pkgForm->pkgname; + if (aclresult != ACLCHECK_OK) { + aclcheck_error(aclresult, ACL_KIND_PACKAGE, pkgname.data); + } + if (!HeapTupleIsValid(pkg_tup)) { + ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("cache lookup failed for package %u, while compile package", pkgOid))); + } + pkg_struct = (Form_gs_package)GETSTRUCT(pkg_tup); + hashkey.pkgOid = pkgOid; + pkg = plpgsql_pkg_HashTableLookup(&hashkey); + + if (pkg != NULL) { + Assert(pkg->pkg_oid == pkgOid); + if (pkg->pkg_xmin == HeapTupleGetRawXmin(pkg_tup) && + ItemPointerEquals(&pkg->pkg_tid, &pkg_tup->t_self)) { + pkg_valid = true; + } else { + /* need reuse pkg slot in hash table later, we need clear all refcount for this pkg and delete it here */ + delete_package_and_check_invalid_item(pkgOid); + pkg_valid = false; + } + } + PLpgSQL_compile_context* save_compile_context = u_sess->plsql_cxt.curr_compile_context; + PG_TRY(); + { + if (!pkg_valid) { + pkg = NULL; + pkg = do_pkg_compile(pkgOid, pkg_tup, pkg, &hashkey, true); +#ifndef ENABLE_MULTIPLE_NODES + PackageInit(pkg, isCreate); +#endif + if (!isSpec && pkg != NULL) { + pkg = do_pkg_compile(pkgOid, pkg_tup, pkg, &hashkey, false); + if (pkg == NULL) { + ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("package %u not found", pkgOid))); + } + ReleaseSysCache(pkg_tup); + pkg_tup = NULL; +#ifndef ENABLE_MULTIPLE_NODES + PackageInit(pkg, isCreate); +#endif + } else if(!isSpec && pkg == NULL) { + ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("package spec %u not found", pkgOid))); + } + } else { + if (!pkg->is_bodycompiled && !isSpec) { + pkg = do_pkg_compile(pkgOid, pkg_tup, pkg, &hashkey, false); + } + + /* package must be compiled befor init */ +#ifndef ENABLE_MULTIPLE_NODES + if (pkg != NULL) { + PackageInit(pkg, isCreate); + } else { + ereport(ERROR, (errmodule(MOD_PLSQL), errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("package spec %u not found", pkgOid))); + } +#endif + } + } + PG_CATCH(); + { +#ifndef ENABLE_MULTIPLE_NODES + bool insertError = u_sess->attr.attr_common.plsql_show_all_error || + !u_sess->attr.attr_sql.check_function_bodies; + if (insertError) { + InsertError(pkgOid); + } +#endif + popToOldCompileContext(save_compile_context); + PG_RE_THROW(); + } + PG_END_TRY(); + if (HeapTupleIsValid(pkg_tup)) { + ReleaseSysCache(pkg_tup); + pkg_tup = NULL; + } + /* + * Finally return the compiled function + */ + return pkg; +} + + +/* + * error context callback to let us supply a call-stack traceback. + * If we are validating or executing an anonymous code block, the function + * source text is passed as an argument. + */ +void plpgsql_compile_error_callback(void* arg) +{ + if (arg != NULL) { + /* + * Try to convert syntax error position to reference text of original + * CREATE FUNCTION or DO command. + */ + if (function_parse_error_transpose((const char*)arg)) { + return; + } + /* + * Done if a syntax error position was reported; otherwise we have to + * fall back to a "near line N" report. + */ + } + int rc = CompileWhich(); + if (rc == PLPGSQL_COMPILE_PROC) { + if (u_sess->plsql_cxt.curr_compile_context != NULL && + u_sess->plsql_cxt.curr_compile_context->plpgsql_error_funcname) { + errcontext("compilation of PL/pgSQL function \"%s\" near line %d", + u_sess->plsql_cxt.curr_compile_context->plpgsql_error_funcname, plpgsql_latest_lineno()); + } + } else if (rc != PLPGSQL_COMPILE_NULL) { + errcontext("compilation of PL/pgSQL package near line %d", + plpgsql_latest_lineno()); + } +} + +Oid findPackageParameter(const char* objname) +{ + Oid toid = InvalidOid; + if (u_sess->plsql_cxt.curr_compile_context != NULL && + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package != NULL) { + PLpgSQL_package* pkg = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package; + PLpgSQL_nsitem* ns = NULL; + ns = plpgsql_ns_lookup(pkg->public_ns, false, objname, NULL, NULL, NULL); + if (ns == NULL) { + ns = plpgsql_ns_lookup(pkg->private_ns, false, objname, NULL, NULL, NULL); + } + if (ns != NULL) { + switch (ns->itemtype) { + case PLPGSQL_NSTYPE_REFCURSOR: + toid = REFCURSOROID; + break; + case PLPGSQL_NSTYPE_RECORD: + toid = RECORDOID; + break; + case PLPGSQL_NSTYPE_VAR: { + PLpgSQL_var* var = (PLpgSQL_var*)pkg->datums[ns->itemno]; + if (var->datatype->typoid == REFCURSOROID && OidIsValid(var->datatype->cursorCompositeOid)) { + toid = var->datatype->cursorCompositeOid; + } else { + toid = InvalidOid; + } + break; + } + default: + toid = InvalidOid; + } + } + } + return toid; +} + +int GetLineNumber(const char* procedureStr, int loc) +{ +#ifdef ENABLE_MULTIPLE_NODES + return 0; +#endif + int lines = 1; + int jumpWords = 0; + if (procedureStr == nullptr) { + return 0; + } + if (!strncmp(procedureStr, " DECLARE ", strlen(" DECLARE "))) { + if (strlen(procedureStr) > strlen(" DECLARE ")) { + if (procedureStr[strlen(" DECLARE ")] == '\n') { + jumpWords = strlen(" DECLARE "); + } + } + } else if (!strncmp(procedureStr, " PACKAGE DECLARE ", strlen(" PACKAGE DECLARE "))) { + if (strlen(procedureStr) > strlen(" PACKAGE DECLARE ")) { + if (procedureStr[strlen(" PACKAGE DECLARE ")] == '\n') { + jumpWords = strlen(" PACKAGE DECLARE "); + } + } + } + if (procedureStr == NULL || loc < 0) { + return 0; + } + if (jumpWords > loc) { + return 0; + } + int i = jumpWords; + while (i-- >= 0) { + procedureStr++; + } + for (int i = jumpWords; i < loc; i++) { + if (*procedureStr == '\n') { + lines++; + } + procedureStr++; + } + return lines; +} + +/* + get the correct line number in package or procedure,it's line number + start with "Create" +*/ +int GetProcedureLineNumberInPackage(const char* procedureStr, int loc) +{ +#ifndef ENABLE_MULTIPLE_NODES + if (!u_sess->attr.attr_common.plsql_show_all_error) { + return 0; + } +#else + return 0; +#endif + int lines = GetLineNumber(procedureStr, loc); + int rc = CompileWhich(); + if (rc == PLPGSQL_COMPILE_PACKAGE_PROC) { + lines = u_sess->plsql_cxt.package_first_line + u_sess->plsql_cxt.procedure_start_line + u_sess->plsql_cxt.procedure_first_line + lines - 3; + return lines > 0 ? lines : 1; + } else if (rc == PLPGSQL_COMPILE_PACKAGE) { + if (u_sess->plsql_cxt.procedure_start_line > 0) { + lines = u_sess->plsql_cxt.procedure_start_line + u_sess->plsql_cxt.package_first_line - 1; + return lines > 0 ? lines : 1; + } else { + if (lines <= 1) { + lines = u_sess->plsql_cxt.package_first_line; + } else { + lines = u_sess->plsql_cxt.package_first_line + lines - 1; + } + return lines > 0 ? lines : 1; + } + } + else if (rc == PLPGSQL_COMPILE_PROC) { + lines = u_sess->plsql_cxt.procedure_first_line + lines; + return lines > 0 ? lines : 1; + } else { + ereport(ERROR, + (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_PACKAGE), + errmsg("not found package or procedure."), + errdetail("package may has error"), + errcause("not found package and procedure"), + erraction("retry"))); + } + return lines; +} + +/* + insert error line number and message into DBE_PLDEVELOPER.gs_errors +*/ +void InsertError(Oid objId) +{ +#ifdef ENABLE_MULTIPLE_NODES + return; +#else + if (u_sess->plsql_cxt.errorList == NULL || + (!u_sess->attr.attr_common.plsql_show_all_error && + u_sess->attr.attr_sql.check_function_bodies)) { + return; + } + Oid id = InvalidOid; + Oid nspid = InvalidOid; + char* name = NULL; + char* type = NULL; + Oid userId = (Oid)u_sess->misc_cxt.CurrentUserId; + int rc = CompileWhich(); + if (rc == PLPGSQL_COMPILE_PROC) { + id = objId; + HeapTuple tuple; + bool isnull = false; + PLpgSQL_function* func = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile; + tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(func->fn_oid)); + Form_pg_proc procStruct = (Form_pg_proc)GETSTRUCT(tuple); + nspid = procStruct->pronamespace; + name = NameStr(procStruct->proname); + Datum prokindDatum = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_prokind, &isnull); + /* prokind maybe null */ + char prokind; + if (isnull) { + prokind = 'f'; + } else { + prokind = CharGetDatum(prokindDatum); + } + if (PROC_IS_PRO(prokind)) { + type = "procedure"; + } else { + type = "function"; + } + ReleaseSysCache(tuple); + } else if ((rc == PLPGSQL_COMPILE_PACKAGE || + rc == PLPGSQL_COMPILE_PACKAGE_PROC)) { + HeapTuple tuple; + PLpgSQL_package* pkg = u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package; + id = pkg->pkg_oid; + tuple = SearchSysCache1(PACKAGEOID, ObjectIdGetDatum(pkg->pkg_oid)); + if (!HeapTupleIsValid(tuple)) { + ereport(ERROR, + (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_PACKAGE), + errmsg("not found package."), + errdetail("package may has error"), + errcause("create package may has error"), + erraction("please check package"))); + } + Form_gs_package pkgStruct = (Form_gs_package)GETSTRUCT(tuple); + nspid = pkgStruct->pkgnamespace; + name = NameStr(pkgStruct->pkgname); + if (pkg->is_spec_compiling) { + type = "package"; + } else { + type = "package body"; + } + ReleaseSysCache(tuple); + } + StringInfoData ds; + initStringInfo(&ds); + appendStringInfoString(&ds, + "declare\n" + "PRAGMA AUTONOMOUS_TRANSACTION;\n" + "oldId int:=0;" + "objId int:=0;" + "allNum int:=0;\n" + "begin\n "); + appendStringInfo(&ds, + "select count(*) from dbe_pldeveloper.gs_source into allNum where " + "nspid=%u and name=\'%s\' and type=\'%s\';", nspid, name, type); + appendStringInfo(&ds, + "if allNum > 0 then " + "select id from dbe_pldeveloper.gs_source into oldId where " + "nspid=%u and name=\'%s\' and type=\'%s\';" + "objId := oldId; " + "else " + "objId := %u;" + "end if;", nspid, name, type, objId); + appendStringInfo(&ds, + "delete from DBE_PLDEVELOPER.gs_errors where nspid=%u and name=\'%s\' and type = \'%s\';\n", + nspid, name, type); + char* errmsg = NULL; + int line = 0; + if (rc != PLPGSQL_COMPILE_NULL) { + ListCell* cell = NULL; + foreach (cell, u_sess->plsql_cxt.errorList) { + PLpgSQL_error* item = (PLpgSQL_error*)lfirst(cell); + errmsg = item->errmsg; + line = item->line; + appendStringInfoString(&ds, "insert into DBE_PLDEVELOPER.gs_errors "); + appendStringInfo(&ds, "values(objId,%u,%u,\'%s\',\'%s\',%d,$gserrors$%s$gserrors$);\n", + userId, nspid, name, type, line, errmsg); + } + } + appendStringInfo(&ds, "end;"); + List* rawParserList = NULL; + rawParserList = raw_parser(ds.data); + DoStmt* stmt = (DoStmt *)linitial(rawParserList); + u_sess->plsql_cxt.insertError = true; + int save_compile_status = getCompileStatus(); + int save_compile_list_length = list_length(u_sess->plsql_cxt.compile_context_list); + PLpgSQL_compile_context* save_compile_context = u_sess->plsql_cxt.curr_compile_context; + MemoryContext temp = NULL; + if (u_sess->plsql_cxt.curr_compile_context != NULL) { + temp = MemoryContextSwitchTo(u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt); + } + PG_TRY(); + { + (void)CompileStatusSwtichTo(NONE_STATUS); + u_sess->plsql_cxt.curr_compile_context = NULL; + ExecuteDoStmt(stmt, true); + } + PG_CATCH(); + { + if (temp != NULL) { + MemoryContextSwitchTo(temp); + } + (void)CompileStatusSwtichTo(save_compile_status); + u_sess->plsql_cxt.curr_compile_context = save_compile_context; + u_sess->plsql_cxt.isCreateFunction = false; + PG_RE_THROW(); + } + PG_END_TRY(); + u_sess->plsql_cxt.curr_compile_context = save_compile_context; + if (rc != PLPGSQL_COMPILE_PACKAGE_PROC) { + clearCompileContextList(save_compile_list_length); + } + (void)CompileStatusSwtichTo(save_compile_status); + if (temp != NULL) { + MemoryContextSwitchTo(temp); + } + u_sess->plsql_cxt.insertError = false; + pfree_ext(ds.data); + list_free_deep(u_sess->plsql_cxt.errorList); + u_sess->plsql_cxt.errorList = NULL; +#endif +} + +/* + insert error line number and message into DBE_PLDEVELOPER.gs_errors +*/ +void DropErrorByOid(int objtype, Oid objoid) +{ + bool notInsert = u_sess->attr.attr_common.upgrade_mode != 0 || SKIP_GS_SOURCE; + if (notInsert) { + return; + } + +#ifdef ENABLE_MULTIPLE_NODES + return; +#else + char* name = NULL; + char* type = NULL; + Oid nspid = InvalidOid; + if (objtype == PLPGSQL_PROC) { + HeapTuple tuple; + bool isnull = false; + tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(objoid)); + Form_pg_proc procStruct = (Form_pg_proc)GETSTRUCT(tuple); + nspid = procStruct->pronamespace; + name = NameStr(procStruct->proname); + Datum prokindDatum = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_prokind, &isnull); + /* prokind maybe null */ + char prokind; + if (isnull) { + prokind = 'f'; + } else { + prokind = CharGetDatum(prokindDatum); + } + if (PROC_IS_PRO(prokind)) { + type = "procedure"; + } else { + type = "function"; + } + ReleaseSysCache(tuple); + } else if ((objtype == PLPGSQL_PACKAGE || + objtype == PLPGSQL_PACKAGE_BODY)) { + HeapTuple tuple = NULL; + tuple = SearchSysCache1(PACKAGEOID, ObjectIdGetDatum(objoid)); + if (!HeapTupleIsValid(tuple)) { + ereport(ERROR, + (errmodule(MOD_PLSQL), errcode(ERRCODE_UNDEFINED_PACKAGE), + errmsg("not found package."), + errdetail("package may has error"), + errcause("create package may has error"), + erraction("please check package"))); + } + Form_gs_package pkgStruct = (Form_gs_package)GETSTRUCT(tuple); + nspid = pkgStruct->pkgnamespace; + name = NameStr(pkgStruct->pkgname); + if (objtype == PLPGSQL_PACKAGE) { + type = "package"; + } else { + type = "package body"; + } + ReleaseSysCache(tuple); + } + StringInfoData ds; + initStringInfo(&ds); + appendStringInfoString(&ds, " declare begin "); + if (objtype == PLPGSQL_PACKAGE_BODY) { + appendStringInfo(&ds, " delete from DBE_PLDEVELOPER.gs_errors " + "where nspid=%u and name = \'%s\' and type = \'%s\';", + nspid, name, type); + appendStringInfo(&ds, " delete from DBE_PLDEVELOPER.gs_source where " + "nspid=%u and name = \'%s\' and type = \'%s\';", + nspid, name, type); + } else { + appendStringInfo(&ds, " delete from DBE_PLDEVELOPER.gs_errors " + "where nspid=%u and name = \'%s\' and type = \'%s\';", + nspid, name, type); + appendStringInfo(&ds, " delete from DBE_PLDEVELOPER.gs_errors " + "where nspid=%u and name = \'%s\' and type = \'package body\';", + nspid, name); + appendStringInfo(&ds, " delete from DBE_PLDEVELOPER.gs_source where " + "nspid=%u and name = \'%s\' and type = \'%s\';", + nspid, name, type); + appendStringInfo(&ds, " delete from DBE_PLDEVELOPER.gs_source where " + "nspid=%u and name = \'%s\' and type = \'package body\';", + nspid, name); + } + appendStringInfo(&ds, " EXCEPTION WHEN OTHERS THEN NULL; \n"); + appendStringInfo(&ds, " END; "); + List* rawParserList = raw_parser(ds.data); + DoStmt* stmt = (DoStmt *)linitial(rawParserList);; + int save_compile_status = getCompileStatus(); + int save_compile_list_length = list_length(u_sess->plsql_cxt.compile_context_list); + PLpgSQL_compile_context* save_compile_context = u_sess->plsql_cxt.curr_compile_context; + MemoryContext temp = NULL; + if (u_sess->plsql_cxt.curr_compile_context != NULL) { + temp = MemoryContextSwitchTo(u_sess->plsql_cxt.curr_compile_context->compile_tmp_cxt); + } + PG_TRY(); + { + (void)CompileStatusSwtichTo(NONE_STATUS); + u_sess->plsql_cxt.curr_compile_context = NULL; + ExecuteDoStmt(stmt, true); + } + PG_CATCH(); + { + if (temp != NULL) { + MemoryContextSwitchTo(temp); + } + (void)CompileStatusSwtichTo(save_compile_status); + u_sess->plsql_cxt.curr_compile_context = save_compile_context; + clearCompileContextList(save_compile_list_length); + PG_RE_THROW(); + } + PG_END_TRY(); + u_sess->plsql_cxt.curr_compile_context = save_compile_context; + (void)CompileStatusSwtichTo(save_compile_status); + if (temp != NULL) { + MemoryContextSwitchTo(temp); + } + pfree_ext(ds.data); +#endif +} + +int CompileWhich() +{ + if (u_sess->plsql_cxt.curr_compile_context == NULL) { + return PLPGSQL_COMPILE_NULL; + } + + if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package != NULL && + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile != NULL) { + if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile->fn_signature == NULL) { + return PLPGSQL_COMPILE_NULL; + } else { + return PLPGSQL_COMPILE_PACKAGE_PROC; + } + } else if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package != NULL && + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile == NULL) { + return PLPGSQL_COMPILE_PACKAGE; + } else if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package == NULL && + u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile != NULL) { + if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile->fn_signature == NULL) { + return PLPGSQL_COMPILE_NULL; + } else { + return PLPGSQL_COMPILE_PROC; + } + } else { + return PLPGSQL_COMPILE_NULL; + } + return PLPGSQL_COMPILE_NULL; +} + +void InsertErrorMessage(const char* message, int yyloc, bool isQueryString, int lines) +{ +#ifdef ENABLE_MULTIPLE_NODES + return; +#else + int rc = CompileWhich(); + if (rc == PLPGSQL_COMPILE_NULL || + (!u_sess->attr.attr_common.plsql_show_all_error && u_sess->attr.attr_sql.check_function_bodies)) { + return; + } +#endif + u_sess->plsql_cxt.have_error = true; + if (!isQueryString && lines == 0) { + lines = GetProcedureLineNumberInPackage(u_sess->plsql_cxt.curr_compile_context->core_yy->scanbuf, yyloc); + } else if (lines == 0) { + lines = GetProcedureLineNumberInPackage(t_thrd.postgres_cxt.debug_query_string, yyloc); + } + addErrorList(message, lines); +} diff --git a/src/common/pl/plpgsql/src/pl_scanner.cpp b/src/common/pl/plpgsql/src/pl_scanner.cpp index d18d913f0..8e0da2bab 100644 --- a/src/common/pl/plpgsql/src/pl_scanner.cpp +++ b/src/common/pl/plpgsql/src/pl_scanner.cpp @@ -1026,6 +1026,14 @@ static int plpgsql_parse_cursor_attribute(int* loc) case T_PACKAGE_CURSOR_ROWCOUNT: /* check the valid of cursor variable */ ns = plpgsql_ns_lookup(plpgsql_ns_top(), false, aux1.lval.str, aux3.lval.str, NULL, NULL); + if (ns == NULL) { + List *idents = list_make2(makeString(aux1.lval.str), makeString(aux3.lval.str)); + int dno = plpgsql_pkg_add_unknown_var_to_namespace(idents); + if (dno != -1) { + ns = plpgsql_ns_lookup(plpgsql_ns_top(), false, aux1.lval.str, aux3.lval.str, NULL, NULL); + } + list_free_deep(idents); + } if (ns != NULL && ns->itemtype == PLPGSQL_NSTYPE_VAR) { PLpgSQL_var* var = (PLpgSQL_var*)u_sess->plsql_cxt.curr_compile_context->plpgsql_Datums[ns->itemno]; if (!(var != NULL && var->datatype && var->datatype->typoid == REFCURSOROID)) { @@ -1130,6 +1138,9 @@ bool plpgsql_is_token_keyword(int token) static PLpgSQL_package* compilePackageSpec(Oid pkgOid) { int oldCompileStatus = getCompileStatus(); + HeapTuple pkgTuple = SearchSysCache1(PACKAGEOID, ObjectIdGetDatum(pkgOid)); + bool isnull = false; + PLpgSQL_package* pkg = NULL; if (u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile_package != NULL || u_sess->plsql_cxt.curr_compile_context->plpgsql_curr_compile != NULL) { CompileStatusSwtichTo(COMPILIE_PKG); @@ -1144,7 +1155,16 @@ static PLpgSQL_package* compilePackageSpec(Oid pkgOid) YYSTYPE temp_lval = plpgsql_yylval; YYLTYPE temp_lloc = plpgsql_yylloc; /* compile package spec */ - PLpgSQL_package* pkg = plpgsql_package_validator(pkgOid, true, false); + if (HeapTupleIsValid(pkgTuple)) { + (void)SysCacheGetAttr(PACKAGEOID, pkgTuple, Anum_gs_package_pkgbodyinitsrc, &isnull); + } + /* if package don't have instantiation string,we don't need compile package body */ + if (isnull) { + pkg = plpgsql_package_validator(pkgOid, true, false); + } else { + pkg = plpgsql_package_validator(pkgOid, false, false); + } + ReleaseSysCache(pkgTuple); CompileStatusSwtichTo(oldCompileStatus); for (int i = 0; i < MAX_PUSHBACKS; i++) { pushback_auxdata[i] = temp_pushback_auxdata[i]; @@ -1162,6 +1182,7 @@ PLpgSQL_datum* GetPackageDatum(List* name, bool* isSamePackage) PLpgSQL_package* pkg = NULL; Oid pkgOid = InvalidOid; Oid namespaceId = InvalidOid; + Oid currentCompilePkgOid = InvalidOid; DeconstructQualifiedName(name, &schemaname, &objname, &pkgname); if (schemaname != NULL) { @@ -1175,7 +1196,7 @@ PLpgSQL_datum* GetPackageDatum(List* name, bool* isSamePackage) PLpgSQL_compile_context* curr_compile = u_sess->plsql_cxt.curr_compile_context; if (curr_compile->plpgsql_curr_compile_package != NULL) { - Oid currentCompilePkgOid = curr_compile->plpgsql_curr_compile_package->pkg_oid; + currentCompilePkgOid = curr_compile->plpgsql_curr_compile_package->pkg_oid; if (currentCompilePkgOid == pkgOid) { pkg = curr_compile->plpgsql_curr_compile_package; if (isSamePackage != NULL) { @@ -1189,8 +1210,10 @@ PLpgSQL_datum* GetPackageDatum(List* name, bool* isSamePackage) } if (u_sess->plsql_cxt.need_pkg_dependencies) { + MemoryContext temp = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); u_sess->plsql_cxt.pkg_dependencies = list_append_unique_oid(u_sess->plsql_cxt.pkg_dependencies, pkgOid); + MemoryContextSwitchTo(temp); } if (pkg == NULL) { @@ -1199,6 +1222,19 @@ PLpgSQL_datum* GetPackageDatum(List* name, bool* isSamePackage) MemoryContextSwitchTo(temp); } + /* when compilePackageSpec, the curr compile package maybe invalided, need to check it */ + bool compilePackage = curr_compile->plpgsql_curr_compile_package != NULL; + if (compilePackage && currentCompilePkgOid != pkgOid) { + if (curr_compile->plpgsql_curr_compile_package->pkg_cxt == NULL) { + ereport(ERROR, + (errmodule(MOD_PLSQL), errcode(ERRCODE_PLPGSQL_ERROR), + errmsg("concurrent error when compile package."), + errdetail("when compile package, it has been invalidated by other session."), + errcause("excessive concurrency"), + erraction("reduce concurrency and retry"))); + } + } + struct PLpgSQL_nsitem* nse = plpgsql_ns_lookup(pkg->public_ns, false, pkgname, objname, NULL, NULL); if (nse == NULL) { return NULL; diff --git a/src/common/pl/plpgsql/src/pl_sql_validator.cpp b/src/common/pl/plpgsql/src/pl_sql_validator.cpp index a2960d04d..7c2ff876d 100755 --- a/src/common/pl/plpgsql/src/pl_sql_validator.cpp +++ b/src/common/pl/plpgsql/src/pl_sql_validator.cpp @@ -23,6 +23,7 @@ #include "utils/plpgsql.h" #include "pgstat.h" +#include "catalog/pg_proc.h" #include "parser/parse_node.h" #include "client_logic/client_logic_proc.h" #ifdef STREAMPLAN @@ -37,11 +38,11 @@ PG_MODULE_MAGIC; #endif -typedef struct pl_validate_expr_info { - PLpgSQL_expr* expr; +typedef struct pl_validate_expr_info : PLpgSQL_expr { SQLFunctionParseInfoPtr pinfo; } pl_validate_expr_info; + static void plpgsql_crete_proc_parser_setup(struct ParseState* pstate, pl_validate_expr_info* pl_validate_info); static ResourceOwnerData* create_temp_resourceowner(const char* name); static void release_temp_resourceowner(ResourceOwnerData* resource_owner, bool is_commit); @@ -53,12 +54,63 @@ static void pl_validate_stmt(PLpgSQL_stmt* stmt, PLpgSQL_function* func, SQLFunc SPIPlanPtr* plan, List** dynexec_list); static void pl_validate_stmts(List* stmts, PLpgSQL_function* func, SQLFunctionParseInfoPtr pinfo, SPIPlanPtr* plan, List** dynexec_list); +static void plpgsql_fn_parser_replace_param_type_for_insert(struct ParseState* pstate, int param_no, + Oid param_new_type, Oid relid, const char *col_name); +static int find_input_param_count(char *argmodes, int n_modes, int param_no); + +static int find_input_param_count(char *argmodes, int n_modes, int param_no) +{ + int real_param_no = 0; + Assert(argmodes[param_no - 1] == PROARGMODE_IN || argmodes[param_no - 1] == PROARGMODE_INOUT || + argmodes[param_no - 1] == PROARGMODE_VARIADIC); + for (int i = 0; i < param_no && i < n_modes; i++) { + if (argmodes[i] == PROARGMODE_IN || argmodes[i] == PROARGMODE_INOUT || + argmodes[i] == PROARGMODE_VARIADIC) { + real_param_no++; + } + } + return real_param_no; +} + +static void plpgsql_fn_parser_replace_param_type_for_insert(struct ParseState* pstate, int param_no, + Oid param_new_type, Oid relid, const char *col_name) +{ + int real_param_no = 0; + /* Find real param no - skip out params in count */ + if (param_no > 1) { + PLpgSQL_expr *expr = (PLpgSQL_expr*)pstate->p_ref_hook_state; + HeapTuple tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(expr->func->fn_oid)); + if (!HeapTupleIsValid(tuple)) { + return; + } + char *argmodes = NULL; + bool isNull; + Datum proargmodes = SysCacheGetAttr(PROCNAMEARGSNSP, tuple, Anum_pg_proc_proargmodes, &isNull); + ArrayType *arr = NULL; + if (!isNull && proargmodes != PointerGetDatum(NULL) && (arr = DatumGetArrayTypeP(proargmodes)) != NULL) { + int n_modes = ARR_DIMS(arr)[0]; + argmodes = (char*)ARR_DATA_PTR(arr); + if (param_no > n_modes) { + /* prarmeter is plpgsql valiable - nothing to do */ + return; + } + /* just verify than used input parameter */ + real_param_no = find_input_param_count(argmodes, n_modes, param_no); + } + ReleaseSysCache(tuple); + } + if (real_param_no == 0) { + real_param_no = param_no; + } + /* call sql function */ + sql_fn_parser_replace_param_type_for_insert(pstate, real_param_no, param_new_type, relid, col_name); +} void plpgsql_crete_proc_parser_setup(struct ParseState* pstate, pl_validate_expr_info* pl_validate_info) { - plpgsql_parser_setup(pstate, pl_validate_info->expr); + plpgsql_parser_setup(pstate, pl_validate_info); pstate->p_create_proc_operator_hook = plpgsql_create_proc_operator_ref; - pstate->p_create_proc_insert_hook = sql_fn_parser_replace_param_type_for_insert; + pstate->p_create_proc_insert_hook = plpgsql_fn_parser_replace_param_type_for_insert; pstate->p_cl_hook_state = pl_validate_info->pinfo; } @@ -68,9 +120,9 @@ void pl_validate_expression(PLpgSQL_expr* expr, PLpgSQL_function* func, SQLFunct return; } pl_validate_expr_info pl_validate_info; - pl_validate_info.expr = expr; - pl_validate_info.expr->func = func; - pl_validate_info.expr->func->pre_parse_trig = true; + *(PLpgSQL_expr*)&pl_validate_info = *expr; + pl_validate_info.func = func; + pl_validate_info.func->pre_parse_trig = true; pl_validate_info.pinfo = pinfo; /* * Generate plan @@ -109,10 +161,15 @@ void pl_validate_function_sql(PLpgSQL_function* func, bool is_replace) /* we cannot handle functions with client logic on the server side */ return; } + /* + * since function with the same func_id may exist already + * first delete old gs_cl_proc info related to the previous create function call + */ + + delete_proc_client_info(func->fn_oid); if (func->fn_nargs == 0 && !is_fn_retval_handled(func->fn_rettype)) { /* Nothiing to validate - no input, no return */ /* if there is some info in gs_cl_proc - remove it */ - delete_proc_client_info(func->fn_oid); return; } bool stored_prepase_trig = func->pre_parse_trig; diff --git a/src/common/pl/plpython/plpy_plpymodule.cpp b/src/common/pl/plpython/plpy_plpymodule.cpp index 110fc4d5f..13ce37f1c 100644 --- a/src/common/pl/plpython/plpy_plpymodule.cpp +++ b/src/common/pl/plpython/plpy_plpymodule.cpp @@ -1,6 +1,5 @@ /* * Portions Copyright (c) 2021, openGauss Contributors - * * the plpy module * * src/common/pl/plpython/plpy_plpymodule.cpp diff --git a/src/common/port/cipher.cpp b/src/common/port/cipher.cpp index 3c676dce4..b840f560b 100644 --- a/src/common/port/cipher.cpp +++ b/src/common/port/cipher.cpp @@ -343,7 +343,7 @@ static bool ReadKeyContentFromFile(KeyMode mode, const char* cipherkeyfile, cons /* Note: Data Source use initdb key file by default (datasource.key.* not given) */ global_rand_file = &g_rand_file_content[INITDB_NOCLOUDOBS_TYPE]; global_cipher_file = &g_cipher_file_content[INITDB_NOCLOUDOBS_TYPE]; - } else if (mode == SOURCE_MODE || mode == HADR_MODE || mode == USER_MAPPING_MODE || mode == SUBSCRIPTION_MODE) { + } else if (mode == SOURCE_MODE || mode == HADR_MODE|| mode == USER_MAPPING_MODE || mode == SUBSCRIPTION_MODE) { /* * For Data Source: * read key from file (datasource.key.*): we do not cache these keys here @@ -835,6 +835,11 @@ void decode_cipher_files( securec_check_ss_c(ret, "\0", "\0"); ret = snprintf_s(randfile, MAXPGPATH, MAXPGPATH - 1, "%s/subscription%s", datadir, RAN_KEY_FILE); securec_check_ss_c(ret, "\0", "\0"); + } else if (mode == HADR_MODE) { + ret = snprintf_s(cipherkeyfile, MAXPGPATH, MAXPGPATH - 1, "%s/hadr%s", datadir, CIPHER_KEY_FILE); + securec_check_ss_c(ret, "\0", "\0"); + ret = snprintf_s(randfile, MAXPGPATH, MAXPGPATH - 1, "%s/hadr%s", datadir, RAN_KEY_FILE); + securec_check_ss_c(ret, "\0", "\0"); } /* * in client_mode,check with the user name is appointed.if so, read the files begin @@ -852,13 +857,6 @@ void decode_cipher_files( ret = snprintf_s(randfile, MAXPGPATH, MAXPGPATH - 1, "%s/%s%s", datadir, user_name, RAN_KEY_FILE); securec_check_ss_c(ret, "\0", "\0"); } - } else if (mode == HADR_MODE) { - ret = snprintf_s(cipherkeyfile, MAXPGPATH, MAXPGPATH - 1, "%s/cipher/%s/server.key.cipher", - user_name, datadir); - securec_check_ss_c(ret, "\0", "\0"); - ret = snprintf_s(randfile, MAXPGPATH, MAXPGPATH - 1, "%s/rand/%s/server.key.rand", - user_name, datadir); - securec_check_ss_c(ret, "\0", "\0"); } /* firstly we get key from memory, and then try to read from file. */ diff --git a/src/common/port/gs_thread.cpp b/src/common/port/gs_thread.cpp index 574a544b5..9cf521594 100644 --- a/src/common/port/gs_thread.cpp +++ b/src/common/port/gs_thread.cpp @@ -51,7 +51,12 @@ #include "gssignal/gs_signal.h" #include "utils/pg_locale.h" #include "gs_policy/policy_common.h" +#ifdef ENABLE_GSS +#include "gssapi/gssapi_krb5.h" +#endif /* ENABLE_GSS */ +#ifdef KRB5 #include "krb5.h" +#endif #ifndef WIN32_ONLY_COMPILER #include "dynloader.h" #else @@ -473,8 +478,10 @@ void gs_thread_exit(int code) /* free the locale cache */ freeLocaleCache(true); +#ifdef ENABLE_LLVM_COMPILE /* release llvm context memory */ CodeGenThreadTearDown(); +#endif CancelAutoAnalyze(); @@ -482,6 +489,12 @@ void gs_thread_exit(int code) if (t_thrd.bn != NULL) { t_thrd.bn->dead_end = true; + } else if (!t_thrd.is_inited) { + /* if thread has error befor get backend, get backend from childSlot. */ + Backend* bn = GetBackend(t_thrd.child_slot); + if (bn != NULL) { + bn->dead_end = true; + } } /* release the signal slot in signal_base */ diff --git a/src/common/port/pgstrcasecmp.cpp b/src/common/port/pgstrcasecmp.cpp index e89957659..0d155d2d9 100644 --- a/src/common/port/pgstrcasecmp.cpp +++ b/src/common/port/pgstrcasecmp.cpp @@ -144,3 +144,17 @@ char* pg_strtolower(char* str) } return str; } + +char* pg_strtoupper(char* str) +{ + char* ptrout = str; + + if (str == NULL) { + return NULL; + } + while (*ptrout) { + *ptrout = pg_toupper(*ptrout); + ptrout++; + } + return str; +} diff --git a/src/common/timezone/CMakeLists.txt b/src/common/timezone/CMakeLists.txt index 22b596525..58b51a4d1 100755 --- a/src/common/timezone/CMakeLists.txt +++ b/src/common/timezone/CMakeLists.txt @@ -14,7 +14,7 @@ set(TGT_zic_INC set(zic_DEF_OPTIONS ${MACRO_OPTIONS}) set(zic_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(zic_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(zic_LINK_LIBS libpgport.a -ldl -lm -lssl -lcrypto -lsecurec -pthread -lrt -lz -lminiunz) +set(zic_LINK_LIBS libpgport.a -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -pthread -lrt -lz -lminiunz) add_bintarget(zic TGT_zic_SRC TGT_zic_INC "${zic_DEF_OPTIONS}" "${zic_COMPILE_OPTIONS}" "${zic_LINK_OPTIONS}" "${zic_LINK_LIBS}") add_dependencies(zic pgport_static) target_link_directories(zic PUBLIC diff --git a/src/common/timezone/Makefile b/src/common/timezone/Makefile index 322d3531b..3d309dcc4 100644 --- a/src/common/timezone/Makefile +++ b/src/common/timezone/Makefile @@ -60,8 +60,10 @@ zic: $(ZICOBJS) | submake-libpgport $(CC) $(CFLAGS) $(ZICOBJS) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@$(X) install: all installdirs +ifneq ($(with_openeuler_os), yes) cp $(LIBOPENSSL_LIB_PATH)/libssl.so* '$(DESTDIR)$(libdir)/' cp $(LIBOPENSSL_LIB_PATH)/libcrypto.so* '$(DESTDIR)$(libdir)/' +endif ifeq (,$(with_system_tzdata)) $(ZIC) -d '$(DESTDIR)$(datadir)/timezone' -p '$(POSIXRULES)' $(TZDATAFILES) endif diff --git a/src/common/timezone/data/yearistype.sh b/src/common/timezone/data/yearistype.sh deleted file mode 100644 index e3de67326..000000000 --- a/src/common/timezone/data/yearistype.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -set -e - -: 'This file is in the public domain, so clarified as of' -: '2006-07-17 by Arthur David Olson.' - -case $#-$1 in - 2-|2-0*|2-*[!0-9]*) - echo "$0: wild year - $1" >&2 - exit 1 ;; -esac - -case $#-$2 in - 2-even) - case $1 in - *[24680]) exit 0 ;; - *) exit 1 ;; - esac ;; - 2-nonpres|2-nonuspres) - case $1 in - *[02468][048]|*[13579][26]) exit 1 ;; - *) exit 0 ;; - esac ;; - 2-odd) - case $1 in - *[13579]) exit 0 ;; - *) exit 1 ;; - esac ;; - 2-uspres) - case $1 in - *[02468][048]|*[13579][26]) exit 0 ;; - *) exit 1 ;; - esac ;; - 2-*) - echo "$0: wild type - $2" >&2 ;; -esac - -echo "$0: usage is $0 year even|odd|uspres|nonpres|nonuspres" >&2 -exit 1 diff --git a/src/gausskernel/CMakeLists.txt b/src/gausskernel/CMakeLists.txt index 949d1005b..491cf45ee 100755 --- a/src/gausskernel/CMakeLists.txt +++ b/src/gausskernel/CMakeLists.txt @@ -115,11 +115,8 @@ set(gaussdb_objects $ $ $ + $ $ - $ - $ - $ - $ $ $ $ @@ -130,10 +127,8 @@ set(gaussdb_objects $ $ $ - $ $ $ - $ $ $ ) @@ -153,8 +148,6 @@ endif() if("${ENABLE_MULTIPLE_NODES}" STREQUAL "OFF") list(APPEND gaussdb_objects $ - $ - $ $ $ $ @@ -163,6 +156,8 @@ if("${ENABLE_MULTIPLE_NODES}" STREQUAL "OFF") endif() list(APPEND gaussdb_objects + $ + $ $ $ $ @@ -173,7 +168,6 @@ list(APPEND gaussdb_objects $ $ $ - $ $ $ $ @@ -190,7 +184,6 @@ list(APPEND gaussdb_objects $ $ $ - $ $ $ $ @@ -242,6 +235,25 @@ if("${ENABLE_MOT}" STREQUAL "ON") ) endif() +if("${ENABLE_UT}" STREQUAL "ON") + list(APPEND gaussdb_objects + $ + ) +endif() + +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND gaussdb_objects + $ + $ + $ + $ + $ + $ + $ + $ + ) +endif() + set(gaussdb_objects ${gaussdb_objects} ${gaussdb_server_objects}) list(APPEND gaussdb_objects @@ -254,22 +266,26 @@ set(gaussdb_LINK_DIRS "") message("******${gaussdb_server_lib}******") set(gaussdb_DEPEND_LIST config_static pgport_srv_static alarmclient_static plpgsql_static pq_static) set(gaussdb_LINK_LIBS libconfig.a libpgport_srv.a libalarmclient.a libpq.a libplpgsql.a) -if("${ENABLE_MULTIPLE_NODES}" STREQUAL "ON" OR "${ENABLE_PRIVATEGAUSS}" STREQUAL "ON") - list(APPEND gaussdb_LINK_LIBS ${gaussdb_server_lib} libhotpatchclient.a) - list(APPEND gaussdb_DEPEND_LIST ${gaussdb_server_target} hotpatchclient_static) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + if("${ENABLE_MULTIPLE_NODES}" STREQUAL "ON" OR "${ENABLE_PRIVATEGAUSS}" STREQUAL "ON") + list(APPEND gaussdb_LINK_LIBS ${gaussdb_server_lib} libhotpatchclient.a) + list(APPEND gaussdb_DEPEND_LIST ${gaussdb_server_target} hotpatchclient_static) + endif() endif() -list(APPEND gaussdb_LINK_LIBS -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -pthread -lrt) +list(APPEND gaussdb_LINK_LIBS -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -pthread -lrt) if(${ENABLE_MEMORY_CHECK}) list(APPEND gaussdb_LINK_OPTIONS ${MEMCHECK_FLAGS}) list(APPEND gaussdb_LINK_LIBS ${MEMCHECK_LIBS}) list(APPEND gaussdb_LINK_DIRS ${MEMCHECK_LINK_DIRECTORIES}) endif() -list(APPEND gaussdb_LINK_LIBS -lz -lminiunz -leSDKOBS -leSDKLogAPI -lpcre -liconv -lnghttp2 -llog4cpp -lcurl -llz4 -lcjson -l${JEMALLOC_LIB_NAME} -lcgroup -lorc -lparquet -larrow -lthrift -lsnappy -lzstd -lprotobuf -lcom_err_gauss -lgssapi_krb5_gauss -lkrb5_gauss -lgssrpc_gauss -lk5crypto_gauss -lkadm5clnt_mit -lkadm5srv_mit -lkdb5 -lkrb5support_gauss -lstdc++ -lboost_thread -lboost_chrono -lboost_system -lboost_atomic -lxml2 -laio -lncurses -ltinfo) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND gaussdb_LINK_LIBS -lz -lminiunz -leSDKOBS -leSDKLogAPI -lpcre -liconv -lnghttp2 -llog4cpp -lcurl -llz4 -lcjson -l${JEMALLOC_LIB_NAME} -lcgroup -lorc -lparquet -larrow -lthrift -lsnappy -lzstd -lprotobuf -lcom_err_gauss -lgssapi_krb5_gauss -lkrb5_gauss -lgssrpc_gauss -lk5crypto_gauss -lkadm5clnt_mit -lkadm5srv_mit -lkdb5 -lkrb5support_gauss -lstdc++ -lboost_thread -lboost_chrono -lboost_system -lboost_atomic -lxml2 -laio -lncurses -ltinfo) +else() + list(APPEND gaussdb_LINK_LIBS -lz -lminiunz -lcurl -llz4 -lcjson -l${JEMALLOC_LIB_NAME} -lcgroup -lzstd -lprotobuf -lncurses -ltinfo -lboost_thread -lboost_chrono -lboost_system -lboost_atomic) +endif() include_directories( - ${BOOST_INCLUDE_PATH} - ${LIBXML_INCLUDE_PATH} ${LIBCURL_INCLUDE_PATH} ${LIBOPENSSL_INCLUDE_PATH} ${LIBLLVM_INCLUDE_PATH} @@ -278,16 +294,12 @@ include_directories( ${LZ4_INCLUDE_PATH} ${CJSON_INCLUDE_PATH} ${JEMALLOC_INCLUDE_PATH} - ${LIBORC_INCLUDE_PATH} - ${LIBPARQUET_INCLUDE_PATH} - ${KERBEROS_INCLUDE_PATH} ${ZSTD_INCLUDE_PATH} ${PROTOBUF_INCLUDE_PATH} - ${LIBTHRIFT_INCLUDE_PATH} - ${SNAPPY_INCLUDE_PATH} ${DCF_INCLUDE_PATH} - ${LIBOBS_INCLUDE_PATH} + ${BOOST_INCLUDE_PATH} ${NUMA_INCLUDE_PATH} + ${XGBOOST_INCLUDE_PATH} ${PROJECT_SRC_DIR}/include ${PROJECT_SRC_DIR}/lib/gstrace ${LIBCGROUP_INCLUDE_PATH} @@ -305,33 +317,50 @@ include_directories( ${PROJECT_SRC_DIR}/gausskernel/storage/replication ) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + include_directories( + ${LIBXML_INCLUDE_PATH} + ${LIBORC_INCLUDE_PATH} + ${LIBPARQUET_INCLUDE_PATH} + ${LIBTHRIFT_INCLUDE_PATH} + ${SNAPPY_INCLUDE_PATH} + ${LIBOBS_INCLUDE_PATH} + ${KERBEROS_INCLUDE_PATH} + ) +endif() + list(APPEND gaussdb_LINK_DIRS ${LIBLLVM_LIB_PATH} - ${BOOST_LIB_PATH} - ${LIBXML_LIB_PATH} ${LIBCURL_LIB_PATH} ${LIBOPENSSL_LIB_PATH} ${ZLIB_LIB_PATH} ${LZ4_LIB_PATH} ${CJSON_LIB_PATH} ${JEMALLOC_LIB_PATH} - ${LIBORC_LIB_PATH} - ${LIBPARQUET_LIB_PATH} - ${LIBPARQUET_ARROW_LIB_PATH} - ${KERBEROS_LIB_PATH} ${ZSTD_LIB_PATH} ${PROTOBUF_LIB_PATH} - ${LIBTHRIFT_LIB_PATH} - ${SNAPPY_LIB_PATH} ${LIBCGROUP_LIB_PATH} ${NCURSES_LIB_PATH} ${CMAKE_BINARY_DIR}/lib ${DCF_LIB_PATH} - ${LIBOBS_LIB_PATH} ${NUMA_LIB_PATH} ${SECURE_LIB_PATH} + ${BOOST_LIB_PATH} ) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND gaussdb_LINK_DIRS + ${LIBXML_LIB_PATH} + ${LIBORC_LIB_PATH} + ${SNAPPY_LIB_PATH} + ${LIBPARQUET_LIB_PATH} + ${LIBPARQUET_ARROW_LIB_PATH} + ${LIBTHRIFT_LIB_PATH} + ${LIBOBS_LIB_PATH} + ${KERBEROS_LIB_PATH} + ) +endif() + if("${ENABLE_MULTIPLE_NODES}" STREQUAL "OFF") list(APPEND gaussdb_LINK_LIBS dcf) endif() @@ -367,8 +396,10 @@ if(NOT "${ENABLE_UT}" STREQUAL "ON") endif() install(TARGETS gaussdb RUNTIME DESTINATION bin) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") install(CODE "execute_process(COMMAND ln -sf gaussdb gs_encrypt WORKING_DIRECTORY \$ENV\{DESTDIR\}${CMAKE_INSTALL_PREFIX}/bin)") install(CODE "message(\"-- Created symlink: gs_encrypt -> gaussdb\")") +endif() if("${ENABLE_MULTIPLE_NODES}" STREQUAL "ON") install_symlink(gaussdb gaussmaster ${prefix_home}/bin) endif() diff --git a/src/gausskernel/Makefile b/src/gausskernel/Makefile index bdfca5dfd..7d73798e6 100755 --- a/src/gausskernel/Makefile +++ b/src/gausskernel/Makefile @@ -28,6 +28,10 @@ SUBDIRS = ../common/backend bootstrap cbb optimizer process dbmind runtime secu $(top_builddir)/contrib/log_fdw $(top_builddir)/contrib/test_decoding $(top_builddir)/contrib/mppdb_decoding \ $(top_builddir)/contrib/postgres_fdw +ifeq ($(enable_db4ai_mock), yes) + SUBDIRS += $(top_builddir)/src/test/db4ai +endif + ifeq ($(enable_mysql_fdw), yes) SUBDIRS += $(top_builddir)/contrib/mysql_fdw endif @@ -84,8 +88,10 @@ ifeq ($(enable_gstrace), yes) endif ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) - OBJS += $(top_builddir)/../distribute/lib/hotpatch/hotpatch.o \ - $(top_builddir)/../distribute/lib/hotpatch/common/libhotpatchcommon.a + ifneq ($(enable_lite_mode), yes) + OBJS += $(top_builddir)/../distribute/lib/hotpatch/hotpatch.o \ + $(top_builddir)/../distribute/lib/hotpatch/common/libhotpatchcommon.a + endif endif ifeq ($(enable_multiple_nodes), yes) @@ -93,7 +99,9 @@ ifeq ($(enable_multiple_nodes), yes) $(top_builddir)/../distribute/gtm/common/libgtmcommon.a endif -OBJS += $(top_builddir)/src/lib/hotpatch/client/libhotpatchclient.a +ifneq ($(enable_lite_mode), yes) + OBJS += $(top_builddir)/src/lib/hotpatch/client/libhotpatchclient.a +endif # We put libpgport into OBJS, so remove it from LIBS; also add libldap LIBS := $(filter-out -lpgport, $(LIBS)) $(LDAP_LIBS_BE) @@ -106,14 +114,18 @@ LIBS := $(filter-out -lreadline -ledit -ltermcap -lncurses -lcurses, $(LIBS)) ifeq ($(enable_libnet), yes) override CXXFLAGS := $(CXXFLAGS) -DUSE_LIBNET - LIBS += -lnuma -lsecurec -lconfig $(LIBNET_LIBS) + LIBS += -lnuma -l$(SECURE_C_CHECK) -lconfig $(LIBNET_LIBS) endif ############################################################################## # libobs component ############################################################################### LIBS += -L$(LIBCURL_LIB_PATH) -L$(LIBOPENSSL_LIB_PATH) -LIBS += -leSDKOBS -leSDKLogAPI -lssl -lcrypto -lpcre -liconv -lnghttp2 -llog4cpp -lsecurec -lcurl +ifeq ($(enable_lite_mode), no) +LIBS += -leSDKOBS -leSDKLogAPI -lssl -lcrypto -lpcre -liconv -lnghttp2 -llog4cpp -l$(SECURE_C_CHECK) -lcurl +else +LIBS += -lssl -l$(SECURE_C_CHECK) -lcurl +endif ############################################################################## # libobs component @@ -143,6 +155,12 @@ LIBS += -llz4 ############################################################################ LIBS += -lcjson +########################################################################## +# append xgboost for xgboost algorithm : libxgboost.so attached by dlopen +############################################################################ +#LIBS += -L$(XGBOOST_LIB_PATH) -lxgboost +LDFLAGS += -L$(XGBOOST_LIB_PATH) +CXXFLAGS+= -I$(XGBOOST_INCLUDE_PATH) ############################################################################# # PLJava component ############################################################################### @@ -181,13 +199,17 @@ LIBCGROUP=libcgroup.so ############################################################################ # link orc for gaussdb ############################################################################ +ifeq ($(enable_lite_mode), no) LIBS += -lorc +endif ############################################################################ # link parquet for gaussdb ############################################################################ +ifeq ($(enable_lite_mode), no) LDFLAGS += -L$(LIBPARQUET_LIB_PATH) -L$(LIBPARQUET_ARROW_LIB_PATH) LIBS += -lparquet -larrow +endif ############################################################################ # link carbondata for gaussdb @@ -208,8 +230,10 @@ endif ############################################################################ # link thrift for gaussdb ############################################################################ +ifeq ($(enable_lite_mode), no) LDFLAGS += -L$(LIBTHRIFT_LIB_PATH) LIBS += -lthrift +endif ############################################################################ # link snappy for gaussdb @@ -224,7 +248,9 @@ else SNAPPY_LIB_NAME=snappy endif endif +ifeq ($(enable_lite_mode), no) LIBS += -l${SNAPPY_LIB_NAME} +endif ############################################################################ # link zstd for gaussdb @@ -251,17 +277,22 @@ LIBS += -l${PROTOBUF_LIB_NAME} ################################################################################## # kerberos ################################################################################## +ifeq ($(enable_lite_mode), no) LIBS += -lcom_err_gauss -lgssapi_krb5_gauss -lkrb5_gauss -lgssrpc_gauss -lk5crypto_gauss -lkadm5clnt_mit -lkadm5srv_mit -lkdb5 -lkrb5support_gauss +endif ################################################################################## # libstd ################################################################################# +ifeq ($(enable_lite_mode), no) LIBSTD_LIB_NAME=stdc++ LIBS += -l${LIBSTD_LIB_NAME} +endif ################################################################################## # boost ################################################################################### +ifeq ($(enable_lite_mode), no) ifeq ($(enable_llt), yes) LIBS += -lboost_thread_pic -lboost_chrono_pic -lboost_system_pic -lboost_atomic else @@ -271,10 +302,12 @@ else LIBS += -lboost_thread -lboost_chrono -lboost_system -lboost_atomic endif endif +endif ################################################################################ # libxml2 ################################################################################ +ifeq ($(enable_lite_mode), no) ifeq ($(enable_llt), yes) LIBS += -lxml2_pic else @@ -284,6 +317,7 @@ else LIBS += -lxml2 endif endif +endif ################################################################################ #openssl @@ -291,19 +325,24 @@ endif LIBS += -lssl -lcrypto #for ADIO +ifeq ($(enable_lite_mode), no) LIBS += -laio +endif ################################################################################ # for 3rd support ################################################################################ LDFLAGS += -L$(LIBLLVM_LIB_PATH) -L$(LIBTINFO_LIB_PATH) \ - -L$(LIBOBS_LIB_PATH) \ - -L$(LIBCGROUP_LIB_PATH) -L$(LIBORC_LIB_PATH) \ - -L$(SNAPPY_LIB_PATH) -L$(PROTOBUF_LIB_PATH) \ - -L$(KERBEROS_LIB_PATH) -L$(LIBSTD_LIB_PATH) -L$(BOOST_LIB_PATH) \ - -L$(LIBXML_LIB_PATH) -L$(LIBOPENSSL_LIB_PATH) \ - -L$(LIBCARBONDATA_LIB_PATH) + -L$(LIBCGROUP_LIB_PATH) -L$(PROTOBUF_LIB_PATH) \ + -L$(LIBOPENSSL_LIB_PATH) +ifeq ($(enable_lite_mode), no) +LDFLAGS += -L$(LIBOBS_LIB_PATH) -L$(LIBORC_LIB_PATH) \ + -L$(SNAPPY_LIB_PATH) -L$(BOOST_LIB_PATH) -L$(LIBXML_LIB_PATH) \ + -L$(KERBEROS_LIB_PATH) -L$(LIBSTD_LIB_PATH) \ + -L$(LIBCARBONDATA_LIB_PATH) +endif + # append lz4 for compression: lz4 LDFLAGS += -L$(LZ4_LIB_PATH) CXXFLAGS+= -I$(LZ4_INCLUDE_PATH) @@ -341,6 +380,11 @@ db: $(OBJS) $(CC) -fPIC -shared $(CXXFLAGS) $(LDFLAGS) $(LDFLAGS_EX) $(export_dynamic) $(call expand_subsys,$^) $(LIBS) $(LLVMLIBS) -o libdb.so mv libdb.so $(top_builddir)/../distribute/test/ut/lib +ifeq ($(enable_db4ai_mock), yes) +db4ai_mock: $(OBJS) + $(CC) $(CXXFLAGS) $(LDFLAGS) $(LDFLAGS_EX) $(export_dynamic) -z muldefs $(call expand_subsys,../test/db4ai/objfiles.txt) $(call expand_subsys,$^) $(LIBS) $(LLVMLIBS) $(HOTPATCH_ATOMIC_LDS) -o $@ +endif + endif endif endif @@ -425,9 +469,11 @@ submake-lwlocknames: $(top_builddir)/src/include/storage/lwlocknames.h # libplgsql.a needs a convenient way to force fmgroids.h to get built submake-fmgroids: $(top_builddir)/src/common/backend/utils/fmgroids.h $(top_builddir)/src/include/utils/fmgroids.h +ifeq ($(enable_lite_mode), no) # run this to build pljava submake-libpljava: | submake-fmgroids $(top_builddir)/third_party/dependency/pljava/build.sh -m only_so +endif # run this to build carbondata-sdk $(top_builddir)/../contrib/carbondata/libcarbondata.a: @@ -450,6 +496,7 @@ $(top_builddir)/src/common/interfaces/libpq/libpq.a: $(MAKE) -C $(top_builddir)/src/common/interfaces/libpq ifneq ($(enable_multiple_nodes)_$(enable_privategauss), no_no) +ifneq ($(enable_lite_mode), yes) $(top_builddir)/../distribute/gtm/common/libgtmcommon.a: $(MAKE) -C $(top_builddir)/../distribute/gtm/common libgtmcommon.a @@ -459,6 +506,8 @@ $(top_builddir)/../distribute/gtm/client/libgtmclient.a: $(top_builddir)/../distribute/lib/hotpatch/common/libhotpatchcommon.a: $(MAKE) -C $(top_builddir)/../distribute/lib/hotpatch/common libhotpatchcommon.a endif +endif + $(top_builddir)/src/lib/hotpatch/client/libhotpatchclient.a: $(MAKE) -C $(top_builddir)/src/lib/hotpatch/client libhotpatchclient.a @@ -622,12 +671,12 @@ endif $(INSTALL_DATA) $(srcdir)/storage/access/transam/recovery.conf.sample '$(DESTDIR)$(datadir)/recovery.conf.sample' $(INSTALL_DATA) $(srcdir)/cbb/communication/comm_proxy/gs_gazelle.conf.sample '$(DESTDIR)$(datadir)/gs_gazelle.conf.sample' +ifeq ($(enable_lite_mode), no) cd $(KERBEROS_LIB_PATH) && tar -cpf - ./* | ( cd $(DESTDIR)$(libdir); tar -xpf - ) mkdir -p '$(DESTDIR)$(libdir)/../temp/' rm -f $(DESTDIR)$(libdir)/libpcre* cp $(LIBOBS_LIB_PATH)/libpcre* '$(DESTDIR)$(libdir)/../temp/' mv $(DESTDIR)$(libdir)/../temp/* '$(DESTDIR)$(libdir)/' - cp $(SECUREDYNAMICLIB_HOME)/libsecurec* '$(DESTDIR)$(libdir)/' cp $(LIBOBS_LIB_PATH)/liblog4* '$(DESTDIR)$(libdir)/' cp $(LIBOBS_LIB_PATH)/libeSDK* '$(DESTDIR)$(libdir)/' cp $(LIBOBS_LIB_PATH)/libxml2* '$(DESTDIR)$(libdir)/' @@ -637,10 +686,15 @@ endif cp $(LIBOBS_LIB_PATH)/libnghttp* '$(DESTDIR)$(libdir)/' cp $(LIBPARQUET_LIB_PATH)/libparquet* '$(DESTDIR)$(libdir)/' cp $(LIBPARQUET_LIB_PATH)/libarrow* '$(DESTDIR)$(libdir)/' +endif cp -d $(ZLIB_LIB_PATH)/libz* '$(DESTDIR)$(libdir)/' - cp -d $(LZ4_LIB_PATH)/liblz4* '$(DESTDIR)$(libdir)/' + cp -d $(XGBOOST_LIB_PATH)/libxgboost* '$(DESTDIR)$(libdir)/' cp -d $(CJSON_LIB_PATH)/libcjson* '$(DESTDIR)$(libdir)/' + +ifneq ($(with_openeuler_os), yes) + cp -d $(LZ4_LIB_PATH)/liblz4* '$(DESTDIR)$(libdir)/' cp -d $(with_3rd)/$(BINARYPATH)/event/$(LIB_SUPPORT_LLT)/lib/libevent* '$(DESTDIR)$(libdir)/' + cp $(SECUREDYNAMICLIB_HOME)/libsecurec* '$(DESTDIR)$(libdir)/' ifneq (, $(findstring __USE_NUMA, $(CFLAGS))) cp $(NUMA_LIB_PATH)/* '$(DESTDIR)$(libdir)/' endif @@ -650,6 +704,7 @@ endif ifeq ($(with_3rd), NONE) cp $(top_builddir)/$(BUILD_TOOLS_PATH)/gcc$(subst .0,,$(CC_VERSION))/gcc/lib64/libstdc++.so.6 '$(DESTDIR)$(libdir)/' cp $(top_builddir)/$(BUILD_TOOLS_PATH)/gcc$(subst .0,,$(CC_VERSION))/gcc/lib64/libgcc_s.so.1 '$(DESTDIR)$(libdir)/' + cp $(top_builddir)/$(BUILD_TOOLS_PATH)/gcc$(subst .0,,$(CC_VERSION))/gcc/lib64/libgomp.so* '$(DESTDIR)$(libdir)/' ifeq ($(enable_mot), yes) cp -d $(top_builddir)/$(BUILD_TOOLS_PATH)/gcc$(subst .0,,$(CC_VERSION))/gcc/lib64/libatomic.so* '$(DESTDIR)$(libdir)/' endif @@ -661,6 +716,7 @@ endif else cp $(with_3rd)/$(BUILD_TOOLS_PATH)/gcc$(subst .0,,$(CC_VERSION))/gcc/lib64/libstdc++.so.6 '$(DESTDIR)$(libdir)/' cp $(with_3rd)/$(BUILD_TOOLS_PATH)/gcc$(subst .0,,$(CC_VERSION))/gcc/lib64/libgcc_s.so.1 '$(DESTDIR)$(libdir)/' + cp $(with_3rd)/$(BUILD_TOOLS_PATH)/gcc$(subst .0,,$(CC_VERSION))/gcc/lib64/libgomp.so* '$(DESTDIR)$(libdir)/' ifeq ($(enable_mot), yes) cp -d $(with_3rd)/$(BUILD_TOOLS_PATH)/gcc$(subst .0,,$(CC_VERSION))/gcc/lib64/libatomic.so* '$(DESTDIR)$(libdir)/' endif @@ -670,46 +726,54 @@ ifeq ($(enable_thread_check), yes) cp $(with_3rd)/$(BUILD_TOOLS_PATH)/gcc$(subst .0,,$(CC_VERSION))/gcc/lib64/libtsan.so.0.0.0 '$(DESTDIR)$(libdir)/' endif endif +endif + +ifeq ($(enable_lite_mode), no) cp -r $(with_jdk)/jre/* '$(DESTDIR)$(bindir)/../jre/' cp $(PLJAVA_LIB_PATH)/* '$(DESTDIR)$(libdir)/' cp $(PLJAVA_JAR_PATH)/$(JARPLJAVA) '$(DESTDIR)$(pkglibdir)/java/' cp $(PLJAVA_HOME)/udstools.py '$(DESTDIR)$(datadir)/tmp/' +endif cp '$(top_builddir)/src/include/ssl/openssl_gsql.cnf' '$(DESTDIR)$(datadir)/../sslcert/gsql/openssl.cnf' +ifeq ($(enable_lite_mode), no) cp '$(top_builddir)/src/include/ssl/openssl_gsql.cnf' '$(DESTDIR)$(datadir)/../sslcert/gds/openssl.cnf' cp '$(top_builddir)/src/include/ssl/openssl_om.cnf' '$(DESTDIR)$(datadir)/../sslcert/om/openssl.cnf' - cp '$(top_builddir)/src/include/ssl/openssl_grpc.cnf' '$(DESTDIR)$(datadir)/../sslcert/grpc/openssl.cnf' +endif + +ifneq ($(with_openeuler_os), yes) cp '$(LIBOPENSSL_BIN_PATH)/openssl' '$(DESTDIR)$(bindir)/openssl' - cp '$(LIBCURL_LIB_PATH)/libcurl.so' '$(DESTDIR)$(libdir)/libcurl.so' - cp '$(LIBCURL_LIB_PATH)/libcurl.so.4' '$(DESTDIR)$(libdir)/libcurl.so.4' ifeq ($(enable_multiple_nodes), no) cp '$(DCF_LIB_PATH)/libdcf.so' '$(DESTDIR)$(libdir)/libdcf.so' endif cp '$(ZSTD_LIB_PATH)/libzstd.so' '$(DESTDIR)$(libdir)/libzstd.so' cp '$(ZSTD_LIB_PATH)/libzstd.so.1' '$(DESTDIR)$(libdir)/libzstd.so.1' - cp '$(ZSTD_LIB_PATH)/libzstd.so.1.4.4' '$(DESTDIR)$(libdir)/libzstd.so.1.4.4' + cp '$(ZSTD_LIB_PATH)/libzstd.so.1.5.0' '$(DESTDIR)$(libdir)/libzstd.so.1.5.0' ifeq ($(PLAT_FORM_STR), euleros2.0_sp2_x86_64) - cp '$(LIBCURL_LIB_PATH)/libcurl.so.4.6.0' '$(DESTDIR)$(libdir)/libcurl.so.4.6.0' + cp -d '$(LIBCURL_LIB_PATH)'/libcurl.so* '$(DESTDIR)$(libdir)/' else ifeq ($(PLAT_FORM_STR), euleros2.0_sp5_x86_64) - cp '$(LIBCURL_LIB_PATH)/libcurl.so.4.6.0' '$(DESTDIR)$(libdir)/libcurl.so.4.6.0' + cp -d '$(LIBCURL_LIB_PATH)'/libcurl.so* '$(DESTDIR)$(libdir)/' else ifeq ($(PLAT_FORM_STR), euleros2.0_sp8_aarch64) - cp '$(LIBCURL_LIB_PATH)/libcurl.so.4.6.0' '$(DESTDIR)$(libdir)/libcurl.so.4.6.0' + cp -d '$(LIBCURL_LIB_PATH)'/libcurl.so* '$(DESTDIR)$(libdir)/' else ifeq ($(PLAT_FORM_STR), euleros2.0_sp9_aarch64) - cp '$(LIBCURL_LIB_PATH)/libcurl.so.4.6.0' '$(DESTDIR)$(libdir)/libcurl.so.4.6.0' + cp -d '$(LIBCURL_LIB_PATH)'/libcurl.so* '$(DESTDIR)$(libdir)/' else ifeq ($(PLAT_FORM_STR), openeuler_aarch64) - cp '$(LIBCURL_LIB_PATH)/libcurl.so.4.6.0' '$(DESTDIR)$(libdir)/libcurl.so.4.6.0' + cp -d '$(LIBCURL_LIB_PATH)'/libcurl.so* '$(DESTDIR)$(libdir)/' else ifeq ($(PLAT_FORM_STR), openeuler_x86_64) - cp '$(LIBCURL_LIB_PATH)/libcurl.so.4.6.0' '$(DESTDIR)$(libdir)/libcurl.so.4.6.0' + cp -d '$(LIBCURL_LIB_PATH)'/libcurl.so* '$(DESTDIR)$(libdir)/' else ifeq ($(PLAT_FORM_STR), centos7.6_x86_64) - cp '$(LIBCURL_LIB_PATH)/libcurl.so.4.6.0' '$(DESTDIR)$(libdir)/libcurl.so.4.6.0' + cp -d '$(LIBCURL_LIB_PATH)'/libcurl.so* '$(DESTDIR)$(libdir)/' else ifeq ($(PLAT_FORM_STR), kylinv10_sp1_aarch64) - cp '$(LIBCURL_LIB_PATH)/libcurl.so.4.6.0' '$(DESTDIR)$(libdir)/libcurl.so.4.6.0' + cp -d '$(LIBCURL_LIB_PATH)'/libcurl.so* '$(DESTDIR)$(libdir)/' else ifeq ($(PLAT_FORM_STR), kylinv10_sp1_x86_64_intel) - cp '$(LIBCURL_LIB_PATH)/libcurl.so.4.6.0' '$(DESTDIR)$(libdir)/libcurl.so.4.6.0' + cp -d '$(LIBCURL_LIB_PATH)'/libcurl.so* '$(DESTDIR)$(libdir)/' +else ifeq ($(PLAT_FORM_STR), ubuntu18.04_x86_64) + cp -d '$(LIBCURL_LIB_PATH)'/libcurl.so* '$(DESTDIR)$(libdir)/' else - cp '$(LIBCURL_LIB_PATH)/libcurl.so.4.5.0' '$(DESTDIR)$(libdir)/libcurl.so.4.5.0' + cp -d '$(LIBCURL_LIB_PATH)'/libcurl.so* '$(DESTDIR)$(libdir)/' +endif endif rm -rf '$(DESTDIR)$(libdir)/../temp' @@ -720,6 +784,7 @@ endif install-bin: gaussdb $(POSTGRES_IMP) installdirs libcgroup $(INSTALL_PROGRAM) gaussdb$(X) '$(DESTDIR)$(bindir)/gaussdb$(X)' +ifeq ($(enable_lite_mode), no) @if test -d $(KERBEROS_BIN_PATH); then cp -rf $(KERBEROS_BIN_PATH)/klist $(DESTDIR)$(bindir)/; fi @if test -d $(KERBEROS_BIN_PATH); then cp -rf $(KERBEROS_BIN_PATH)/kinit $(DESTDIR)$(bindir)/; fi @if test -d $(KERBEROS_BIN_PATH); then cp -rf $(KERBEROS_BIN_PATH)/kdestroy $(DESTDIR)$(bindir)/; fi @@ -736,6 +801,9 @@ endif @rm -f '$(DESTDIR)$(bindir)/gs_encrypt$(X)' ln -s gaussdb$(X) '$(DESTDIR)$(bindir)/gs_encrypt$(X)' cp -rf $(srcdir)/dbmind/tools '$(DESTDIR)$(bindir)/dbmind' + cp -rf $(srcdir)/dbmind/gs_dbmind '$(DESTDIR)$(bindir)/gs_dbmind' + chmod +x '$(DESTDIR)$(bindir)/gs_dbmind' +endif ifeq ($(enable_multiple_nodes), yes) @rm -f '$(DESTDIR)$(bindir)/gaussmaster$(X)' ln -s gaussdb$(X) '$(DESTDIR)$(bindir)/gaussmaster$(X)' @@ -753,21 +821,22 @@ endif #todo: need to use pg_config to create etc directory installdirs: $(MKDIR_P) '$(DESTDIR)$(bindir)' '$(DESTDIR)$(datadir)' '$(DESTDIR)$(bindir)/../etc' - cp '$(top_builddir)/src/gausskernel/storage/access/dfs/region_map' '$(DESTDIR)$(bindir)/../etc/' - @if test -d '$(top_builddir)/src/common/backend/libpq/kerberos'; then cp -rf '$(top_builddir)/src/common/backend/libpq/kerberos' '$(DESTDIR)$(bindir)/../etc/'; fi $(MKDIR_P) '$(DESTDIR)$(llvmirdir)' $(MKDIR_P) '$(DESTDIR)$(libdir)/postgresql/pg_plugin' $(MKDIR_P) '$(DESTDIR)$(libdir)/postgresql/proc_srclib' $(MKDIR_P) '$(DESTDIR)$(libdir)/libsimsearch' - $(MKDIR_P) '$(DESTDIR)$(pkglibdir)/java' - $(MKDIR_P) '$(DESTDIR)$(bindir)/../jre' $(MKDIR_P) '$(DESTDIR)$(datadir)/tmp' $(MKDIR_P) '$(DESTDIR)$(datadir)/../sslcert' $(MKDIR_P) '$(DESTDIR)$(datadir)/../sslcert/gsql' +ifeq ($(enable_lite_mode), no) + cp '$(top_builddir)/src/gausskernel/storage/access/dfs/region_map' '$(DESTDIR)$(bindir)/../etc/' + @if test -d '$(top_builddir)/src/common/backend/libpq/kerberos'; then cp -rf '$(top_builddir)/src/common/backend/libpq/kerberos' '$(DESTDIR)$(bindir)/../etc/'; fi $(MKDIR_P) '$(DESTDIR)$(datadir)/../sslcert/etcd' - $(MKDIR_P) '$(DESTDIR)$(datadir)/../sslcert/grpc' $(MKDIR_P) '$(DESTDIR)$(datadir)/../sslcert/gds' $(MKDIR_P) '$(DESTDIR)$(datadir)/../sslcert/om' + $(MKDIR_P) '$(DESTDIR)$(pkglibdir)/java' + $(MKDIR_P) '$(DESTDIR)$(bindir)/../jre' +endif ifeq ($(PORTNAME), cygwin) ifeq ($(MAKE_DLL), true) @@ -786,8 +855,10 @@ endif libcgroup: cd $(DESTDIR)$(libdir) && rm -f libcgroup.* +ifneq ($(with_openeuler_os), yes) cp $(LIBCGROUP_LIB_PATH)/$(LIBCGROUP) '$(DESTDIR)$(libdir)/$(LIBCGROUP)' cd $(DESTDIR)$(libdir) && $(LN_S) $(LIBCGROUP) libcgroup.so.1 +endif ########################################################################## diff --git a/src/gausskernel/Makefile_for_llt b/src/gausskernel/Makefile_for_llt index 796b9efdb..52e5848e7 100755 --- a/src/gausskernel/Makefile_for_llt +++ b/src/gausskernel/Makefile_for_llt @@ -84,7 +84,7 @@ LIBS := $(filter-out -lreadline -ledit -ltermcap -lncurses -lcurses, $(LIBS)) # libobs component ############################################################################### LIBS += -L$(LIBCURL_LIB_PATH) -L$(LIBOPENSSL_LIB_PATH) -LIBS += -leSDKOBS -leSDKLogAPI -lssl -lcrypto -lpcre -liconv -lnghttp2 -llog4cpp -lsecurec -lcurl +LIBS += -leSDKOBS -leSDKLogAPI -lssl -lcrypto -lpcre -liconv -lnghttp2 -llog4cpp -l$(SECURE_C_CHECK) -lcurl LDFLAGS += -L$(LIBOBS_LIB_PATH) CXXFLAGS += -I$(LIBOBS_INCLUDE_PATH) @@ -95,6 +95,7 @@ LIBS += -llz4 LDFLAGS += -L$(LZ4_LIB_PATH) CXXFLAGS += -I$(LZ4_INCLUDE_PATH) + ############################################################################## # libcjson component ############################################################################### @@ -117,6 +118,13 @@ LIBS += -l$(JEMALLOC_LIB_NAME) LDFLAGS += -L$(JEMALLOC_LIB_PATH) CXXFLAGS += -I$(JEMALLOC_INCLUDE_PATH) +########################################################################## +# append xgboost for xgboost algorithm : xgboost.so +############################################################################ +LIBS += -lxgboost +LDFLAGS += -L$(XGBOOST_LIB_PATH) +CXXFLAGS+= -I$(XGBOOST_INCLUDE_PATH) + ############################################################################# # cgroup library ############################################################################## @@ -475,6 +483,7 @@ endif cp $(LIBOBS_LIB_PATH)/libnghttp* '$(DESTDIR)$(libdir)/' cp -d $(ZLIB_LIB_PATH)/libz* '$(DESTDIR)$(libdir)/' cp -d $(LZ4_LIB_PATH)/liblz4* '$(DESTDIR)$(libdir)/' + cp -d $(XGBOOST_LIB_PATH)/libxgboost* '$(DESTDIR)$(libdir)/' cp -d $(CJSON_LIB_PATH)/libcjson* '$(DESTDIR)$(libdir)/' cp $(PLJAVA_LIB_PATH)/* '$(DESTDIR)$(libdir)/' cp $(PLJAVA_JAR_PATH)/$(JARPLJAVA) '$(DESTDIR)$(pkglibdir)/java/' diff --git a/src/gausskernel/bootstrap/bootparse.y b/src/gausskernel/bootstrap/bootparse.y index 843b131b0..8fa94afef 100755 --- a/src/gausskernel/bootstrap/bootparse.y +++ b/src/gausskernel/bootstrap/bootparse.y @@ -234,7 +234,6 @@ Boot_CreateStmt: mapped_relation, true, REL_CMPRS_NOT_SUPPORT, - (Datum)0, BOOTSTRAP_SUPERUSERID, false, TAM_HEAP, diff --git a/src/gausskernel/bootstrap/bootstrap.cpp b/src/gausskernel/bootstrap/bootstrap.cpp index 9e678ae81..f9bb0b14d 100755 --- a/src/gausskernel/bootstrap/bootstrap.cpp +++ b/src/gausskernel/bootstrap/bootstrap.cpp @@ -320,6 +320,8 @@ void BootStrapProcessMain(int argc, char* argv[]) } InitializeNumLwLockPartitions(); } + g_instance.global_sysdbcache.Init(INSTANCE_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_DEFAULT)); + CreateLocalSysDBCache(); /* Validate we have been given a reasonable-looking t_thrd.proc_cxt.DataDir */ Assert(t_thrd.proc_cxt.DataDir); diff --git a/src/gausskernel/cbb/bbox/bbox_elf_dump.cpp b/src/gausskernel/cbb/bbox/bbox_elf_dump.cpp index 4a9b209d3..2862c30ca 100644 --- a/src/gausskernel/cbb/bbox/bbox_elf_dump.cpp +++ b/src/gausskernel/cbb/bbox/bbox_elf_dump.cpp @@ -166,7 +166,9 @@ static int BBOX_SetMappingVDSOFlag( int* piGetChar, struct BBOX_READ_FILE_IO* pstReadIO, struct BBOX_VM_MAPS* pstSegmentMapping) { int iIsMappingVdsoFlag = BBOX_FALSE; + int iIsMappingVvarFlag = BBOX_TRUE; const char* pszVdso = VDSO_NAME_STRING; + const char* pszVvar = VVAR_NAME_STRING; if (NULL == piGetChar || NULL == pstReadIO || NULL == pstSegmentMapping) { @@ -178,6 +180,10 @@ static int BBOX_SetMappingVDSOFlag( } while (*pszVdso && *piGetChar == *pszVdso) { + if (iIsMappingVvarFlag == BBOX_TRUE) { + iIsMappingVvarFlag = (*piGetChar == *pszVvar) ? BBOX_TRUE : BBOX_FALSE; + pszVvar++; + } *piGetChar = BBOX_GetCharFromFile(pstReadIO); if (RET_ERR == *piGetChar) { bbox_print(PRINT_ERR, "BBOX_GetCharFromFile is failed, *piGetChar= %d.\n", *piGetChar); @@ -198,6 +204,27 @@ static int BBOX_SetMappingVDSOFlag( pstSegmentMapping->uiEndAddress); } + if (iIsMappingVvarFlag == BBOX_TRUE) { + while (*pszVdso && *piGetChar == *pszVvar) { + *piGetChar = BBOX_GetCharFromFile(pstReadIO); + if (RET_ERR == *piGetChar) { + bbox_print(PRINT_ERR, "BBOX_GetCharFromFile is failed, *piGetChar= %d.\n", *piGetChar); + + return RET_ERR; + } + + pszVvar++; + } + + if (*pszVvar == '\0' && (*piGetChar == '\n' || *piGetChar == ' ' || *piGetChar == '\0')) { + pstSegmentMapping->iFlags |= PF_VVAR; /* set VVAR flag */ + + bbox_print(PRINT_DBG, + " Get VVAR StartAddr = %zu, EndAddr = %zu.\n", + pstSegmentMapping->uiStartAddress, pstSegmentMapping->uiEndAddress); + } + } + return RET_OK; } @@ -688,8 +715,9 @@ static int BBOX_VmMappingSizeDump(struct BBOX_VM_MAPS* pstVmMappingSegment, int } /* mark a segment not write into core file - if it cannot be read, discribes equipment segment or segment size is 0 */ + if it cannot be read, discribes equipment segment or segment size is 0 or vvar segement */ if (((pstVmMapping->iFlags & PF_R) == 0) || pstVmMapping->uiStartAddress == pstVmMapping->uiEndAddress || + (pstVmMapping->iFlags & PF_VVAR) || (pstVmMapping->iFlags & PF_DEVICE)) { pstVmMapping->uiWriteSize = 0; (*piValidSegmentNum)--; diff --git a/src/gausskernel/cbb/bbox/bbox_elf_dump.h b/src/gausskernel/cbb/bbox/bbox_elf_dump.h index cfa3323d2..21c675e93 100644 --- a/src/gausskernel/cbb/bbox/bbox_elf_dump.h +++ b/src/gausskernel/cbb/bbox/bbox_elf_dump.h @@ -31,7 +31,11 @@ #include #include #include +#ifndef WITH_OPENEULER_OS #include +#else +#include +#endif #include #include #include @@ -96,6 +100,7 @@ extern "C" { #define THREAD_SELF_COMMAND_LINE_FILE "/proc/self/cmdline" #define VDSO_NAME_STRING "[vdso]" +#define VVAR_NAME_STRING "[vvar]" #define DEVICE_ZERO_NAME_STRING "/dev/zero" #define DEVICE_PREFIX_LEN 5 #define DEVICE_AND_NODE_FIELD_NUM 2 @@ -109,6 +114,7 @@ extern "C" { #define PF_DEVICE 0x40000000 #define PF_VDSO 0x20000000 #define PF_MAPPEDFILE 0x10000000 +#define PF_VVAR 0x08000000 #define BBOX_SECTION_NUM 3 #define BBOX_SHSTR_INDEX 2 diff --git a/src/gausskernel/cbb/bbox/bbox_syscall_support.h b/src/gausskernel/cbb/bbox/bbox_syscall_support.h index 4ebce8a6a..c131a573d 100644 --- a/src/gausskernel/cbb/bbox/bbox_syscall_support.h +++ b/src/gausskernel/cbb/bbox/bbox_syscall_support.h @@ -54,7 +54,11 @@ #include #include #include +#ifndef WITH_OPENEULER_OS #include +#else +#include +#endif #include #include diff --git a/src/gausskernel/cbb/bbox/bbox_threads.cpp b/src/gausskernel/cbb/bbox/bbox_threads.cpp index a224269a6..41e25693d 100644 --- a/src/gausskernel/cbb/bbox/bbox_threads.cpp +++ b/src/gausskernel/cbb/bbox/bbox_threads.cpp @@ -236,6 +236,7 @@ s32 BBOX_PtraceAttachPid(struct TASK_ATTACH_INFO* pstTaskInfo, s32 iPidCount, s3 /* walk through all the threads and debug the trace. */ pid = pstTaskInfo[i].pid; if (sys_ptrace(PTRACE_ATTACH, pid, (void*)0, (void*)0) < 0) { + bbox_print(PRINT_ERR, "ptrace failed, pid = %d, errno = %d\n", pid, errno); continue; } @@ -254,10 +255,11 @@ s32 BBOX_PtraceAttachPid(struct TASK_ATTACH_INFO* pstTaskInfo, s32 iPidCount, s3 ret = sys_ptrace(PTRACE_PEEKDATA, pid, &m, &n); /* check that if the trace is valid */ if (ret || (m != n)) { + bbox_print(PRINT_ERR, + "ptrace peek data failed, pid = %d, ret = %d,m = %lu, n = %lu, errno = %d\n", + pid, ret, m, n, errno); + sys_ptrace(PTRACE_DETACH, pid, 0, 0); - - bbox_print(PRINT_ERR, "ptrace peek data failed, pid = %d, ret = %d,m = %lu, n = %lu, errno = %d\n", pid, ret, m, n, errno); - continue; } } diff --git a/src/gausskernel/cbb/communication/libcomm.cpp b/src/gausskernel/cbb/communication/libcomm.cpp index 08ff12036..139d58703 100755 --- a/src/gausskernel/cbb/communication/libcomm.cpp +++ b/src/gausskernel/cbb/communication/libcomm.cpp @@ -2787,6 +2787,9 @@ void commPoolCleanerMain() /* Report the error to the server log */ EmitErrorReport(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + (void)MemoryContextSwitchTo(poolCleaner_context); FlushErrorState(); diff --git a/src/gausskernel/cbb/communication/libcomm_common.h b/src/gausskernel/cbb/communication/libcomm_common.h index 04e36940c..3a69e3ff8 100644 --- a/src/gausskernel/cbb/communication/libcomm_common.h +++ b/src/gausskernel/cbb/communication/libcomm_common.h @@ -12,23 +12,6 @@ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. * ------------------------------------------------------------------------- - * - * Some of the function may be from software package using GNU license, - * so, included GNU license here. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it - * will be useful, but WITHOUT ANY WARRANTY; without even the implied - * ^^^^^^^^^^^^^^^^^^^^^^^^ - * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * See the GNU General Public License for more details. - * - * ------------------------------------------------------------------------- - * * libcomm_common.h * * IDENTIFICATION diff --git a/src/gausskernel/cbb/communication/libcomm_utils/libcomm_adapter.cpp b/src/gausskernel/cbb/communication/libcomm_utils/libcomm_adapter.cpp index 8777dac1c..034326d6c 100644 --- a/src/gausskernel/cbb/communication/libcomm_utils/libcomm_adapter.cpp +++ b/src/gausskernel/cbb/communication/libcomm_utils/libcomm_adapter.cpp @@ -904,6 +904,7 @@ static int libcomm_build_tcp_connection(libcommaddrinfo* libcomm_addrinfo, int n return -1; } +#ifdef ENABLE_GSS /* Client side gss kerberos authentication for data connection. */ if (g_instance.comm_cxt.localinfo_cxt.gs_krb_keyfile != NULL && GssClientAuth(sock, libcomm_addrinfo->host) < 0) { LIBCOMM_ELOG(WARNING, @@ -918,6 +919,8 @@ static int libcomm_build_tcp_connection(libcommaddrinfo* libcomm_addrinfo, int n mc_tcp_close(sock); return -1; } +#endif + g_instance.attr.attr_network.comm_data_channel_conn[node_idx - 1]->socket = sock; #ifdef USE_SSL if (g_instance.attr.attr_network.comm_enable_SSL) { @@ -1125,6 +1128,7 @@ int gs_s_build_tcp_ctrl_connection(libcommaddrinfo* libcomm_addrinfo, int node_i return -1; } +#ifdef ENABLE_GSS /* Client side gss kerberos authentication for tcp connection. */ if (g_instance.comm_cxt.localinfo_cxt.gs_krb_keyfile != NULL && GssClientAuth(tcp_sock, remote_host) < 0) { mc_tcp_close(tcp_sock); @@ -1143,6 +1147,7 @@ int gs_s_build_tcp_ctrl_connection(libcommaddrinfo* libcomm_addrinfo, int node_i remote_tcp_port, mc_strerror(errno)); } +#endif g_instance.attr.attr_network.comm_ctrl_channel_conn[node_idx - 1]->socket = tcp_sock; #ifdef USE_SSL diff --git a/src/gausskernel/cbb/communication/libcomm_utils/libcomm_client_ssl.cpp b/src/gausskernel/cbb/communication/libcomm_utils/libcomm_client_ssl.cpp index 860afe793..3676939e8 100644 --- a/src/gausskernel/cbb/communication/libcomm_utils/libcomm_client_ssl.cpp +++ b/src/gausskernel/cbb/communication/libcomm_utils/libcomm_client_ssl.cpp @@ -51,10 +51,10 @@ void LibCommErrFree(void* buf) { /* security ciphers suites in SSL connection */ static const char* ssl_ciphers_map[] = { - TLS1_TXT_DHE_RSA_WITH_AES_128_GCM_SHA256, /* TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 */ - TLS1_TXT_DHE_RSA_WITH_AES_256_GCM_SHA384, /* TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 */ - TLS1_TXT_DHE_RSA_WITH_AES_128_CCM, /* TLS_DHE_RSA_WITH_AES_128_CCM */ - TLS1_TXT_DHE_RSA_WITH_AES_256_CCM, /* TLS_DHE_RSA_WITH_AES_256_CCM */ + TLS1_TXT_ECDHE_RSA_WITH_AES_128_GCM_SHA256, /* TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 */ + TLS1_TXT_ECDHE_RSA_WITH_AES_256_GCM_SHA384, /* TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 */ + TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, /* TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 */ + TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, /* TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 */ NULL}; static int LibCommClientCheckPermissionCipherFile(const char* parent_dir, LibCommConn * conn, const char* username); @@ -402,8 +402,43 @@ static int LibCommClientSSLLoadKeyFile(LibCommConn* conn, bool have_homedir, con LIBCOMM_ELOG(LOG, "LibCommClientSSLLoadKeyFile sslkey %s", conn->sslkey); return 0; } + +static void LibCommClientSSLLoadCrlFile(LibCommConn* conn, bool have_homedir, const PathData *homedir) +{ + struct stat buf; + char fnBuf[MAXPATH] = {0}; + int nRet = 0; + bool userSetSslCrl = false; + X509_STORE* cvstore = SSL_CTX_get_cert_store(g_libCommClientSSLContext); + if (cvstore == NULL) { + return; + } + + if ((conn->sslcrl != NULL) && strlen(conn->sslcrl) > 0) { + nRet = snprintf_s(fnBuf, MAXPATH, MAXPATH - 1, "%s/%s", getcwd(NULL,0), conn->sslcrl); + securec_check_ss_c(nRet, "\0", "\0"); + userSetSslCrl = true; + } else if (have_homedir) { + nRet = snprintf_s(fnBuf, MAXPATH, MAXPATH - 1, "%s/%s", homedir->data, ROOT_CRL_FILE); + securec_check_ss_c(nRet, "\0", "\0"); + } else { + fnBuf[0] = '\0'; + } + + if (fnBuf[0] == '\0') { + return; + } + /* Set the flags to check against the complete CRL chain */ + if (stat(fnBuf, &buf) == 0 && X509_STORE_load_locations(cvstore, fnBuf, NULL) == 1) { + (void)X509_STORE_set_flags(cvstore, X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL); + } else if (userSetSslCrl) { + LIBCOMM_ELOG(WARNING, "could not load SSL certificate revocation list (file \"%s\")\n", fnBuf); + } +} + #define MAX_CERTIFICATE_DEPTH_SUPPORTED 20 /* The max certificate depth supported. */ -static int LibCommClientSSLLoadRootCertFile(LibCommConn* conn, bool have_homedir, const PathData *homedir) { +static int LibCommClientSSLLoadRootCertFile(LibCommConn* conn, bool have_homedir, const PathData *homedir) +{ struct stat buf; char fnBuf[MAXPATH] = {0}; int nRet = 0; @@ -436,29 +471,7 @@ static int LibCommClientSSLLoadRootCertFile(LibCommConn* conn, bool have_homedir return -1; } #endif - /* check root cert file permission */ - if (SSL_CTX_get_cert_store(g_libCommClientSSLContext) != NULL) { - if ((conn->sslcrl != NULL) && strlen(conn->sslcrl) > 0) { - nRet = snprintf_s(fnBuf, MAXPATH, MAXPATH - 1, "%s/%s", getcwd(NULL,0), conn->sslcrl); - securec_check_ss_c(nRet, "\0", "\0"); - } else if (have_homedir) { - nRet = snprintf_s(fnBuf, MAXPATH, MAXPATH - 1, "%s/%s", homedir->data, ROOT_CRL_FILE); - securec_check_ss_c(nRet, "\0", "\0"); - } else { - fnBuf[0] = '\0'; - } - /* Set the flags to check against the complete CRL chain */ - if (fnBuf[0] != '\0' && stat(fnBuf, &buf) == 0) { - if (X509_STORE_load_locations(SSL_CTX_get_cert_store(g_libCommClientSSLContext), fnBuf, NULL) == 1) { - (void)X509_STORE_set_flags(SSL_CTX_get_cert_store(g_libCommClientSSLContext), - X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL); - } else { - LIBCOMM_ELOG(ERROR,"could not load SSL certificate revocation list (file \"%s\")\n", conn->sslcrl); - return -1; - } - } - } - + LibCommClientSSLLoadCrlFile(conn, have_homedir, homedir); /* Check the DH length to make sure it's at least 2048. */ SSL_set_security_callback(conn->ssl, LibCommClientSSLDHVerifyCb); diff --git a/src/gausskernel/cbb/communication/libcomm_utils/libcomm_server_ssl.cpp b/src/gausskernel/cbb/communication/libcomm_utils/libcomm_server_ssl.cpp index 5c4e77330..0422c95da 100644 --- a/src/gausskernel/cbb/communication/libcomm_utils/libcomm_server_ssl.cpp +++ b/src/gausskernel/cbb/communication/libcomm_utils/libcomm_server_ssl.cpp @@ -1,1071 +1,1071 @@ -/* - * Copyright (c) 2021 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * libcomm_server_ssl.cpp - * - * IDENTIFICATION - * src/gausskernel/cbb/communication/libcomm_utils/libcomm_server_ssl.cpp - * - * ------------------------------------------------------------------------- - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../libcomm_core/mc_tcp.h" -#include "../libcomm_core/mc_poller.h" -#include "libcomm_thread.h" -#include "libcomm_lqueue.h" -#include "libcomm_queue.h" -#include "libcomm_lock_free_queue.h" -#include "distributelayer/streamCore.h" -#include "distributelayer/streamProducer.h" -#include "pgxc/poolmgr.h" -#include "libpq/auth.h" -#include "libpq/pqsignal.h" -#include "storage/ipc.h" -#include "utils/ps_status.h" -#include "utils/dynahash.h" - -#include "vecexecutor/vectorbatch.h" -#include "vecexecutor/vecnodes.h" -#include "executor/exec/execStream.h" -#include "miscadmin.h" -#include "gssignal/gs_signal.h" -#include "pgxc/pgxc.h" -#include "libcomm/libcomm.h" -#include "../libcomm_common.h" - -#ifdef USE_SSL -#include "openssl/err.h" -#include "openssl/ssl.h" -#include "openssl/rand.h" -#include "openssl/ossl_typ.h" -#include "openssl/sslerr.h" -#include "openssl/obj_mac.h" -#include "openssl/dh.h" -#include "openssl/bn.h" -#include "openssl/x509.h" -#include "openssl/x509_vfy.h" -#include "openssl/opensslconf.h" -#include "openssl/crypto.h" -#include "openssl/bio.h" - -#define MAX_CERTIFICATE_DEPTH_SUPPORTED 20 /* The max certificate depth supported. */ - -/* ------------------------------------------------------------ */ -/* SSL specific code */ -/* ------------------------------------------------------------ */ - -typedef enum { - DHKey768 = 1, - DHKey1024, - DHKey1536, - DHKey2048, - DHKey3072, - DHKey4096, - DHKey6144, - DHKey8192 -} COMM_SSL_DHKeyLength; - -/* security ciphers suites in SSL connection */ -const char* comm_ssl_ciphers_map[] = { - TLS1_TXT_DHE_RSA_WITH_AES_128_GCM_SHA256, /* TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 */ - TLS1_TXT_DHE_RSA_WITH_AES_256_GCM_SHA384, /* TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 */ - TLS1_TXT_DHE_RSA_WITH_AES_128_CCM, /* TLS_DHE_RSA_WITH_AES_128_CCM */ - TLS1_TXT_DHE_RSA_WITH_AES_256_CCM, /* TLS_DHE_RSA_WITH_AES_256_CCM */ - NULL}; - -/* - * Certificate verification callback - * - * This callback allows us to log intermediate problems during - * verification, but for now we'll see if the final error message - * contains enough information. - * - * This callback also allows us to override the default acceptance - * criteria (e.g., accepting self-signed or expired certs), but - * for now we accept the default checks. - */ -int comm_ssl_verify_cb(int code, X509_STORE_CTX* ctx) -{ - return code; -} - -/* - * This callback is used to copy SSL information messages - * into the PostgreSQL log. - */ -static void comm_ssl_info_cb(const SSL* ssl, int type, int args) -{ - switch (type) { - case SSL_CB_HANDSHAKE_START: - LIBCOMM_ELOG(DEBUG4, "SSL: handshake start"); - break; - case SSL_CB_HANDSHAKE_DONE: - LIBCOMM_ELOG(DEBUG4, "SSL: handshake done"); - break; - case SSL_CB_ACCEPT_LOOP: - LIBCOMM_ELOG(DEBUG4, "SSL: accept loop"); - break; - case SSL_CB_ACCEPT_EXIT: - LIBCOMM_ELOG(DEBUG4, "SSL: accept exit (%d)", args); - break; - case SSL_CB_CONNECT_LOOP: - LIBCOMM_ELOG(DEBUG4, "SSL: connect loop"); - break; - case SSL_CB_CONNECT_EXIT: - LIBCOMM_ELOG(DEBUG4, "SSL: connect exit (%d)", args); - break; - case SSL_CB_READ_ALERT: - LIBCOMM_ELOG(DEBUG4, "SSL: read alert (0x%04x)", args); - break; - case SSL_CB_WRITE_ALERT: - LIBCOMM_ELOG(DEBUG4, "SSL: write alert (0x%04x)", args); - break; - default: - break; - } -} - -char* comm_ssl_get_cipher_string(const char* ciphers[], const int num) -{ - int i; - int catlen = 0; - char* cipher_buf = NULL; - errno_t errorno = EOK; - - size_t CIPHER_BUF_SIZE = 0; - for (i = 0; i < num; i++) { - CIPHER_BUF_SIZE += (strlen(ciphers[i]) + 1); - } - - cipher_buf = (char*)OPENSSL_zalloc(CIPHER_BUF_SIZE); - if (cipher_buf == NULL) { - return NULL; - } - - for (i = 0; i < num; i++) { - errorno = memcpy_s(cipher_buf + catlen, strlen(ciphers[i]), ciphers[i], strlen(ciphers[i])); - securec_check(errorno, "\0", "\0"); - - catlen += strlen(ciphers[i]); - - if (i < num - 1) { - errorno = memcpy_s(cipher_buf + catlen, CIPHER_BUF_SIZE - catlen, ":", 1); - securec_check(errorno, "\0", "\0"); - catlen += 1; - } - } - - cipher_buf[catlen] = 0; - return cipher_buf; -} - -/* - * Brief : static int comm_ssl_set_cipher_list(SSL_CTX *ctx, const char* ciphers[], const int num) - * Description : set ssl ciphers. - */ -static int comm_ssl_set_cipher_list(SSL_CTX* ctx, const char* ciphers[], const int num) -{ - int ret = 0; - int rc = 0; - char* cipher_buf = NULL; - - if (ctx == NULL) { - return 0; - } - - cipher_buf = comm_ssl_get_cipher_string(ciphers, num); - if (cipher_buf == NULL) { - return 0; - } - - ret = SSL_CTX_set_cipher_list(ctx, cipher_buf); - rc = memset_s(cipher_buf, strlen(cipher_buf) + 1, 0, strlen(cipher_buf) + 1); - securec_check(rc, "\0", "\0"); - OPENSSL_free(cipher_buf); - return ret; -} - -/* - * Obtain reason string for last SSL error - * - * Some caution is needed here since ERR_reason_error_string will - * return NULL if it doesn't recognize the error code. We don't - * want to return NULL ever. - */ -static const char* comm_ssl_get_errmsg(void) -{ - unsigned long errcode; - const char* errreason = NULL; - static char errbuf[32]; - - errcode = ERR_get_error(); - if (errcode == 0) - return _("no SSL error reported"); - errreason = ERR_reason_error_string(errcode); - if (errreason != NULL) - return errreason; - int rcs = snprintf_s(errbuf, sizeof(errbuf), sizeof(errbuf) - 1, _("SSL error code %lu"), errcode); - securec_check_ss(rcs, "\0", "\0"); - return errbuf; -} - -/* - * Close SSL connection. - */ -void comm_ssl_close(SSL_INFO* port) -{ - LIBCOMM_ELOG(LOG, "comm_ssl_close, sock is %d", port->sock); - if (port->ssl != NULL) { - SSL_shutdown(port->ssl); - SSL_free(port->ssl); - port->ssl = NULL; - } - - if (port->peer != NULL) { - X509_free(port->peer); - port->peer = NULL; - } - - if (port->peer_cn != NULL) { - pfree(port->peer_cn); - port->peer_cn = NULL; - } -} - -ssize_t comm_ssl_read(SSL_INFO* port, void* ptr, size_t len) -{ - ssize_t n; - - /* - * Try to read from the socket without blocking. If it succeeds we're - * done, otherwise we'll wait for the socket using the latch mechanism. - */ -#ifdef WIN32 - pgwin32_noblock = true; -#endif - PGSTAT_INIT_TIME_RECORD(); - PGSTAT_START_TIME_RECORD(); - n = recv(port->sock, ptr, len, 0); - END_NET_RECV_INFO(n); -#ifdef WIN32 - pgwin32_noblock = false; -#endif - - return n; -} - -ssize_t comm_ssl_write(SSL_INFO* port, const void* ptr, size_t len) -{ - ssize_t n; - -#ifdef WIN32 - pgwin32_noblock = true; -#endif - PGSTAT_INIT_TIME_RECORD(); - PGSTAT_START_TIME_RECORD(); - n = send(port->sock, ptr, len, 0); - END_NET_SEND_INFO(n); -#ifdef WIN32 - pgwin32_noblock = false; -#endif - - return n; -} - -static int my_sock_read(BIO* h, char* buf, int size) -{ - int res = 0; - - if (buf != NULL) { - SSL_INFO* myPort = (SSL_INFO*)BIO_get_data(h); - if (myPort == NULL) { - return 0; - } - - prepare_for_client_read(); - res = comm_ssl_read(myPort, buf, size); - client_read_ended(); - BIO_clear_retry_flags(h); - if (res <= 0) { - /* If we were interrupted, tell caller to retry */ - if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN) { - BIO_set_retry_read(h); - } - } - } - - return res; -} - -static int my_sock_write(BIO* h, const char* buf, int size) -{ - int res = 0; - SSL_INFO* myPort = (SSL_INFO*)BIO_get_data(h); - if (myPort == NULL) { - return 0; - } - - res = comm_ssl_write(myPort, (void*)buf, size); - BIO_clear_retry_flags(h); - if (res <= 0) { - /* If we were interrupted, tell caller to retry */ - if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN) { - BIO_set_retry_write(h); - } - } - - return res; -} - -static BIO_METHOD* comm_ssl_get_BIO_socket(void) -{ - static BIO_METHOD* my_bio_methods = NULL; - if (my_bio_methods == NULL) { - int my_bio_index; - - my_bio_index = BIO_get_new_index(); - if (my_bio_index == -1) - return NULL; - - BIO_METHOD* biom = (BIO_METHOD*)BIO_s_socket(); - my_bio_methods = BIO_meth_new(my_bio_index, "socket"); - if (my_bio_methods == NULL) { - return NULL; - } - - if (!BIO_meth_set_write(my_bio_methods, my_sock_write) || !BIO_meth_set_read(my_bio_methods, my_sock_read) || - !BIO_meth_set_gets(my_bio_methods, BIO_meth_get_gets(biom)) || - !BIO_meth_set_puts(my_bio_methods, BIO_meth_get_puts(biom)) || - !BIO_meth_set_ctrl(my_bio_methods, BIO_meth_get_ctrl(biom)) || - !BIO_meth_set_create(my_bio_methods, BIO_meth_get_create(biom)) || - !BIO_meth_set_destroy(my_bio_methods, BIO_meth_get_destroy(biom)) || - !BIO_meth_set_callback_ctrl(my_bio_methods, BIO_meth_get_callback_ctrl(biom))) { - BIO_meth_free(my_bio_methods); - my_bio_methods = NULL; - return NULL; - } - } - return my_bio_methods; -} - -/* This should exactly match openssl's SSL_set_fd except for using my BIO */ -static int comm_ssl_set_fd(SSL_INFO* port, int fd) -{ - BIO* bio = NULL; - BIO_METHOD* bio_method = NULL; - - bio_method = comm_ssl_get_BIO_socket(); - if (bio_method == NULL) { - SSLerr(SSL_F_SSL_SET_FD, ERR_R_BUF_LIB); - return 0; - } - - bio = BIO_new(bio_method); - if (bio == NULL) { - SSLerr(SSL_F_SSL_SET_FD, ERR_R_BUF_LIB); - return 0; - } - BIO_set_data(bio, port); - - BIO_set_fd(bio, fd, BIO_NOCLOSE); - SSL_set_bio(port->ssl, bio, bio); - return 1; -} - -/* - * Brief : int open_server_SSL(SSL_INFO *port) - * Description : Attempt to negotiate SSL connection. - */ -int comm_ssl_open_server(SSL_INFO* port, int fd) -{ - int r; - int err; - - Assert(port->ssl == NULL); - Assert(port->peer == NULL); - - if (port->ssl != NULL) { - LIBCOMM_ELOG(WARNING, "comm_ssl_open_server port->ssl is not null(%p), fd is %d", port->ssl, fd); - return -1; - } - - if (port->peer != NULL) { - LIBCOMM_ELOG(WARNING, "comm_ssl_open_server port->peer is not null(%p), fd is %d", port->peer, fd); - return -1; - } - - if (g_instance.attr.attr_network.SSL_server_context == NULL) { - comm_initialize_SSL(); - if (g_instance.attr.attr_network.SSL_server_context != NULL) { - LIBCOMM_ELOG(LOG, "comm_initialize_SSL in comm_ssl_open_server success"); - } else { - LIBCOMM_ELOG(WARNING, "comm_initialize_SSL in comm_ssl_open_server failed"); - return -1; - } - } - - port->ssl = SSL_new(g_instance.attr.attr_network.SSL_server_context); - if (port->ssl == NULL) { - LIBCOMM_ELOG(WARNING, "SSL_new in comm_ssl_open_server failed"); - return -2; - } - LIBCOMM_ELOG(LOG, "SSL_new in comm_ssl_open_server success"); - - int ret_bind = comm_ssl_set_fd(port, fd); - if (1 != ret_bind) { - LIBCOMM_ELOG(WARNING, "SSL_set_fd in comm_ssl_open_server failed"); - return -2; - } else { - LIBCOMM_ELOG(LOG, "SSL_set_fd in comm_ssl_open_server success"); - } - SSL_set_accept_state(port->ssl); -aloop: - ERR_clear_error(); - r = SSL_accept(port->ssl); - if (r != 1) { - err = SSL_get_error(port->ssl, r); - switch (err) { - case SSL_ERROR_WANT_READ: // 2 - case SSL_ERROR_WANT_WRITE: // 3 -#ifdef WIN32 - pgwin32_waitforsinglesocket(SSL_get_fd(port->ssl), - (err == SSL_ERROR_WANT_READ) ? (FD_READ | FD_CLOSE | FD_ACCEPT) : (FD_WRITE | FD_CLOSE), - INFINITE); -#endif - goto aloop; - case SSL_ERROR_SYSCALL: { // 5 - LIBCOMM_ELOG(LOG, "SSL_get_error in comm_ssl_open_server return SSL_ERROR_SYSCALL, %s %d", - comm_ssl_get_errmsg(), errno); - if (r < 0) { - LIBCOMM_ELOG(LOG, "could not accept SSL connection"); - } else { - LIBCOMM_ELOG(LOG, "could not accept SSL connection: EOF detected"); - } - break; - } - case SSL_ERROR_SSL: { // 1 - LIBCOMM_ELOG(LOG, "SSL_get_error in comm_ssl_open_server return SSL_ERROR_SSL, %s", - comm_ssl_get_errmsg()); - break; - } - case SSL_ERROR_ZERO_RETURN: { // 6 - LIBCOMM_ELOG(LOG, "SSL_get_error in comm_ssl_open_server return SSL_ERROR_ZERO_RETURN, %s", - comm_ssl_get_errmsg()); - break; - } - default: { - LIBCOMM_ELOG(LOG, "SSL_get_error in comm_ssl_open_server return default, %s", comm_ssl_get_errmsg()); - break; - } - } - return -2; - } - LIBCOMM_ELOG(LOG, "after SSL_accept with encryption"); - port->count = 0; - - /* Get client certificate, if available. */ - port->peer = SSL_get_peer_certificate(port->ssl); - - /* and extract the Common Name from it. */ - port->peer_cn = NULL; - if (port->peer != NULL) { - int rt; - int len; - char* peer_cn = NULL; - LIBCOMM_ELOG(LOG, "SSL_get_peer_certificate in comm_ssl_open_server return peer not null"); - - /* First find out the name's length and allocate a buffer for it. */ - len = X509_NAME_get_text_by_NID(X509_get_subject_name(port->peer), NID_commonName, NULL, 0); - LIBCOMM_ELOG(LOG, "X509_NAME_get_text_by_NID in comm_ssl_open_server return %d", len); - if (len != -1) { - peer_cn = (char*)palloc(len + 1); - - rt = X509_NAME_get_text_by_NID(X509_get_subject_name(port->peer), NID_commonName, peer_cn, len + 1); - if (rt != len) { - /* shouldn't happen */ - pfree(peer_cn); - return -2; - } - /* - * Reject embedded NULLs in certificate common name to prevent - * attacks like CVE-2009-4034. - */ - if ((size_t)(unsigned)len != strlen(peer_cn)) { - LIBCOMM_ELOG(WARNING, "SSL certificate's common name contains embedded null"); - pfree(peer_cn); - return -2; - } - port->peer_cn = peer_cn; - } - } - LIBCOMM_ELOG(DEBUG2, "SSL connection from \"%s\"", port->peer_cn ? port->peer_cn : "(anonymous)"); - LIBCOMM_ELOG(LOG, "comm_ssl_open_server SSL connection from \"%s\"", - (port->peer_cn ? port->peer_cn : "(anonymous)")); - - /* set up debugging/info callback */ - if (port->peer != NULL) { - SSL_set_info_callback(port->ssl, comm_ssl_info_cb); - LIBCOMM_ELOG(LOG, "SSL_set_info_callback in comm_ssl_open_server"); - } - - return 0; -} - -/* - * Brief : DH* comm_ssl_genDHKeyPair(COMM_SSL_DHKeyLength dhType) - * Notes : function to generate DH key pair - */ -static DH* comm_ssl_genDHKeyPair(COMM_SSL_DHKeyLength dhType) -{ - int ret = 0; - DH* dh = NULL; - BIGNUM* bn_prime = NULL; - unsigned char GENERATOR_2[] = {DH_GENERATOR_2}; - BIGNUM* bn_genenrator_2 = BN_bin2bn(GENERATOR_2, sizeof(GENERATOR_2), NULL); - if (bn_genenrator_2 == NULL) { - return NULL; - } - - switch (dhType) { - case DHKey768: - bn_prime = BN_get_rfc2409_prime_768(NULL); - break; - case DHKey1024: - bn_prime = BN_get_rfc2409_prime_1024(NULL); - break; - case DHKey1536: - bn_prime = BN_get_rfc3526_prime_1536(NULL); - break; - case DHKey2048: - bn_prime = BN_get_rfc3526_prime_2048(NULL); - break; - case DHKey3072: - bn_prime = BN_get_rfc3526_prime_3072(NULL); - break; - case DHKey4096: - bn_prime = BN_get_rfc3526_prime_4096(NULL); - break; - case DHKey6144: - bn_prime = BN_get_rfc3526_prime_6144(NULL); - break; - case DHKey8192: - bn_prime = BN_get_rfc3526_prime_8192(NULL); - break; - default: - break; - } - - if (bn_prime == NULL) { - BN_free(bn_genenrator_2); - return NULL; - } - - dh = DH_new(); - if (dh == NULL) { - BN_free(bn_prime); - BN_free(bn_genenrator_2); - return NULL; - } - - ret = DH_set0_pqg(dh, bn_prime, NULL, bn_genenrator_2); - if (!ret) { - BN_free(bn_prime); - BN_free(bn_genenrator_2); - DH_free(dh); - return NULL; - } - - ret = DH_generate_key(dh); - if (!ret) { - BN_free(bn_prime); - BN_free(bn_genenrator_2); - DH_free(dh); - return NULL; - } - - return dh; -} - -/* - * Brief : comm_ssl_set_default_ssl_ciphers,set default ssl ciphers - * Description : SEC.CNF.004 - */ -static void comm_ssl_set_default_ssl_ciphers() -{ - int default_ciphers_count = 0; - - for (int i = 0; comm_ssl_ciphers_map[i] != NULL; i++) { - default_ciphers_count++; - } - - if (comm_ssl_set_cipher_list(g_instance.attr.attr_network.SSL_server_context, - comm_ssl_ciphers_map, default_ciphers_count) != 1) { - LIBCOMM_ELOG(WARNING, "could not set the cipher list (no valid ciphers available)"); - Assert(0); - return; - } -} - -/* - * Brief : comm_ssl_set_user_config_ssl_ciphers,set the specified ssl ciphers by user - * Description : SEC.CNF.004 - */ -static void comm_ssl_set_user_config_ssl_ciphers(const char* sslciphers) -{ - char* cipherStr = NULL; - char* cipherStr_tmp = NULL; - char* token = NULL; - int counter = 1; - char** ciphers_list = NULL; - bool find_ciphers_in_list = false; - int i = 0; - char* ptok = NULL; - if (sslciphers == NULL) { - LIBCOMM_ELOG(WARNING, "ssl ciphers can not be null"); - Assert(0); - return; - } else { - cipherStr = (char*)strchr(sslciphers, ';'); // if the sslciphers does not contain character ';', the count is 1 - while (cipherStr != NULL) { - counter++; - cipherStr++; - if (*cipherStr == '\0') { - break; - } - cipherStr = strchr(cipherStr, ';'); - } - ciphers_list = (char**)palloc(counter * sizeof(char*)); - - Assert(ciphers_list != NULL); - if (ciphers_list == NULL) { - LIBCOMM_ELOG(WARNING, "comm_ssl_set_user_config_ssl_ciphers malloc ciphers_list failed"); - Assert(0); - return; - } - - cipherStr_tmp = pstrdup(sslciphers); - if (cipherStr_tmp == NULL) { - if (ciphers_list != NULL) - pfree(ciphers_list); - ciphers_list = NULL; - LIBCOMM_ELOG(WARNING, "comm_ssl_set_user_config_ssl_ciphers malloc cipherStr_tmp failed"); - Assert(0); - return; - } - token = strtok_r(cipherStr_tmp, ";", &ptok); - while (token != NULL) { - for (int j = 0; comm_ssl_ciphers_map[j] != NULL; j++) { - if (strlen(comm_ssl_ciphers_map[j]) == strlen(token) && - strncmp(comm_ssl_ciphers_map[j], token, strlen(token)) == 0) { - ciphers_list[i] = (char*)comm_ssl_ciphers_map[j]; - find_ciphers_in_list = true; - break; - } - } - if (!find_ciphers_in_list) { - errno_t errorno = EOK; - const int maxCipherStrLen = 64; - char errormessage[maxCipherStrLen] = {0}; - errorno = strncpy_s(errormessage, sizeof(errormessage), token, sizeof(errormessage) - 1); - securec_check(errorno, cipherStr_tmp, ciphers_list, "\0"); - errormessage[maxCipherStrLen - 1] = '\0'; - if (cipherStr_tmp != NULL) { - pfree(cipherStr_tmp); - cipherStr_tmp = NULL; - } - if (ciphers_list != NULL) { - pfree(ciphers_list); - ciphers_list = NULL; - } - LIBCOMM_ELOG(WARNING, "unrecognized ssl ciphers name: \"%s\"", errormessage); - Assert(0); - return; - } - token = strtok_r(NULL, ";", &ptok); - i++; - find_ciphers_in_list = false; - } - } - if (comm_ssl_set_cipher_list( - g_instance.attr.attr_network.SSL_server_context, (const char**)ciphers_list, counter) != 1) { - if (cipherStr_tmp != NULL) { - pfree(cipherStr_tmp); - cipherStr_tmp = NULL; - } - if (ciphers_list != NULL) { - pfree(ciphers_list); - ciphers_list = NULL; - } - LIBCOMM_ELOG(WARNING, "could not set the cipher list (no valid ciphers available)"); - Assert(0); - return; - } - if (cipherStr_tmp != NULL) { - pfree(cipherStr_tmp); - cipherStr_tmp = NULL; - } - if (ciphers_list != NULL) { - pfree(ciphers_list); - ciphers_list = NULL; - } -} - -/* Check permissions of cipher file and rand file in server */ -static void comm_ssl_comm_check_cipher_file(const char* parent_dir) -{ - char cipher_file[MAXPGPATH] = {0}; - char rand_file[MAXPGPATH] = {0}; - struct stat cipherbuf; - struct stat randbuf; - int rcs = snprintf_s(cipher_file, MAXPGPATH, MAXPGPATH - 1, "%s/server%s", parent_dir, CIPHER_KEY_FILE); - securec_check_ss(rcs, "\0", "\0"); - rcs = snprintf_s(rand_file, MAXPGPATH, MAXPGPATH - 1, "%s/server%s", parent_dir, RAN_KEY_FILE); - securec_check_ss(rcs, "\0", "\0"); - if (lstat(cipher_file, &cipherbuf) != 0 || lstat(rand_file, &randbuf) != 0) - return; -#if !defined(WIN32) && !defined(__CYGWIN__) - if (!S_ISREG(cipherbuf.st_mode) || - (cipherbuf.st_mode & (S_IRWXG | S_IRWXO)) || ((cipherbuf.st_mode & S_IRWXU) == S_IRWXU)) { - LIBCOMM_ELOG(WARNING, "cipher file \"%s\" has group or world access. " - "Permissions should be u=rw (0600) or less.", cipher_file); - Assert(0); - return; - } - if (!S_ISREG(randbuf.st_mode) || - (randbuf.st_mode & (S_IRWXG | S_IRWXO)) || ((randbuf.st_mode & S_IRWXU) == S_IRWXU)) { - LIBCOMM_ELOG(WARNING, "rand file \"%s\" has group or world access" - "Permissions should be u=rw (0600) or less.", rand_file); - Assert(0); - return; - } - -#endif -} - -/* set the default password for certificate/private key loading */ -static void comm_ssl_comm_init_server_ssl_pwd(SSL_CTX* sslContext) -{ - char* parentdir = NULL; - KeyMode keymode = SERVER_MODE; - if (g_instance.attr.attr_security.ssl_key_file == NULL) { - LIBCOMM_ELOG(LOG, "In comm_ssl_comm_init_server_ssl_pwd, ssl_key_file is NULL"); - return; - } - if (is_absolute_path(g_instance.attr.attr_security.ssl_key_file)) { - parentdir = pstrdup(g_instance.attr.attr_security.ssl_key_file); - get_parent_directory(parentdir); - decode_cipher_files(keymode, NULL, parentdir, g_instance.attr.attr_network.server_key); - } else { - decode_cipher_files(keymode, NULL, t_thrd.proc_cxt.DataDir, g_instance.attr.attr_network.server_key); - parentdir = pstrdup(t_thrd.proc_cxt.DataDir); - } - - comm_ssl_comm_check_cipher_file(parentdir); - pfree_ext(parentdir); - - SSL_CTX_set_default_passwd_cb_userdata(sslContext, (char*)g_instance.attr.attr_network.server_key); -} - -void comm_initialize_SSL() -{ - struct stat buf; - STACK_OF(X509_NAME)* root_cert_list = NULL; - errno_t errorno = EOK; - - /* Already initialized SSL, return here */ - if (g_instance.attr.attr_network.ssl_initialized) { - LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, ssl_initialized is true, return"); - return; - } else { - LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, ssl_initialized is false"); - } - - if (!g_instance.attr.attr_network.SSL_server_context) { - LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, SSL_server_context is null"); - if (OPENSSL_init_ssl(0, NULL) != 1) { - LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, OPENSSL_init_ssl is failed"); - return; - } else { - LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, OPENSSL_init_ssl success"); - } - SSL_load_error_strings(); - - g_instance.attr.attr_network.SSL_server_context = SSL_CTX_new(TLS_method()); - if (!g_instance.attr.attr_network.SSL_server_context) { - LIBCOMM_ELOG(WARNING, "In comm_initialize_SSL, could not create SSL context"); - Assert(0); - return; - } else { - LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, create SSL context success"); - } - if (g_instance.attr.attr_network.server_key == NULL) { - g_instance.attr.attr_network.server_key = (GS_UCHAR*)palloc0((CIPHER_LEN + 1) * sizeof(GS_UCHAR)); - } - if (g_instance.attr.attr_network.server_key == NULL) { - LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, palloc server_key failed"); - } else { - LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, palloc server_key success"); - } - /* - * Disable moving-write-buffer sanity check, because it - * causes unnecessary failures in nonblocking send cases. - */ - SSL_CTX_set_mode(g_instance.attr.attr_network.SSL_server_context, \ - SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); - LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, SSL_CTX_set_mode"); - - /* set the default password for certificate/private key loading */ - comm_ssl_comm_init_server_ssl_pwd(g_instance.attr.attr_network.SSL_server_context); - LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, comm_ssl_comm_init_server_ssl_pwd"); - char *buffer; - if((buffer = getcwd(NULL,0)) == NULL){ - LIBCOMM_ELOG(WARNING, "In comm_initialize_SSL, getcwd error"); - } else{ - free(buffer); - } - - /* Load and verify server's certificate and private key */ - if (SSL_CTX_use_certificate_chain_file(g_instance.attr.attr_network.SSL_server_context, \ - g_instance.attr.attr_security.ssl_cert_file) != 1) { - LIBCOMM_ELOG(WARNING, "In comm_initialize_SSL, could not load server certificate file %s", - g_instance.attr.attr_security.ssl_cert_file); - Assert(0); - return; - } - - /* check certificate file permission */ -#if !defined(WIN32) && !defined(__CYGWIN__) - if (stat(g_instance.attr.attr_security.ssl_cert_file, &buf) == 0){ - if (!S_ISREG(buf.st_mode) || - (buf.st_mode & (S_IRWXG | S_IRWXO)) || ((buf.st_mode & S_IRWXU) == S_IRWXU)) { - LIBCOMM_ELOG(WARNING, "certificate file \"%s\" has group or world access" - "Permissions should be u=rw (0600) or less.", g_instance.attr.attr_security.ssl_cert_file); - Assert(0); - return; - } - } -#endif - - if (stat(g_instance.attr.attr_security.ssl_key_file, &buf) != 0) { - LIBCOMM_ELOG(WARNING, "In comm_initialize_SSL, could not access private key file %s", - g_instance.attr.attr_security.ssl_key_file); - Assert(0); - return; - } - - /* - * Require no public access to key file. - * - * XXX temporarily suppress check when on Windows, because there may - * not be proper support for Unix-y file permissions. Need to think - * of a reasonable check to apply on Windows. (See also the data - * directory permission check in postmaster.c) - */ -#if !defined(WIN32) && !defined(__CYGWIN__) - if (!S_ISREG(buf.st_mode) || - (buf.st_mode & (S_IRWXG | S_IRWXO)) || ((buf.st_mode & S_IRWXU) == S_IRWXU)) { - LIBCOMM_ELOG(WARNING, - "private key file \"%s\" has group or world access Permissions should be u=rw (0600) or less.", - g_instance.attr.attr_security.ssl_key_file); - } -#endif - - if (SSL_CTX_use_PrivateKey_file(g_instance.attr.attr_network.SSL_server_context, \ - g_instance.attr.attr_security.ssl_key_file, \ - SSL_FILETYPE_PEM) != 1) { - LIBCOMM_ELOG(WARNING, "In comm_initialize_SSL, could not load private key file \"%s\"", - g_instance.attr.attr_security.ssl_key_file); - Assert(0); - return; - } - - if (SSL_CTX_check_private_key(g_instance.attr.attr_network.SSL_server_context) != 1) { - LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, check of private key(%s) failed", - g_instance.attr.attr_security.ssl_key_file); - } - } - - /* check ca certificate file permission */ -#if !defined(WIN32) && !defined(__CYGWIN__) - if (stat(g_instance.attr.attr_security.ssl_ca_file, &buf) == 0){ - if (!S_ISREG(buf.st_mode) || (buf.st_mode & (S_IRWXG | S_IRWXO)) || ((buf.st_mode & S_IRWXU) == S_IRWXU)) { - LIBCOMM_ELOG(WARNING, - "ca certificate file \"%s\" has group or world access Permissions should be u=rw (0600) or less.", - g_instance.attr.attr_security.ssl_ca_file); - Assert(0); - return; - } - } -#endif - - /* Check the signature algorithm.*/ - if (check_certificate_signature_algrithm(g_instance.attr.attr_network.SSL_server_context)) { - LIBCOMM_ELOG(LOG, "In initialize_SSL, The server certificate contain a Low-intensity signature algorithm"); - } - - /* Check the certificate expires time. */ - long leftspan = check_certificate_time(g_instance.attr.attr_network.SSL_server_context, - u_sess->attr.attr_security.ssl_cert_notify_time); - if (leftspan > 0) { - int leftdays = (leftspan / 86400 > 0) ? (leftspan / 86400) : 1; - if (leftdays > 1) { - LIBCOMM_ELOG(WARNING, "The server certificate will expire in %d days", leftdays); - } else { - LIBCOMM_ELOG(WARNING, "The server certificate will expire in %d day", leftdays); - } - } - - /* set up ephemeral DH keys, and disallow SSL v2 while at it - * free the dh directly safe as there is reference counts in DH - */ - DH* dhkey = comm_ssl_genDHKeyPair(DHKey3072); - if (dhkey == NULL) { - LIBCOMM_ELOG(WARNING, "DH: generating parameters (3072 bits) failed"); - Assert(0); - return; - } - SSL_CTX_set_tmp_dh(g_instance.attr.attr_network.SSL_server_context, dhkey); - DH_free(dhkey); - - /* SSL2.0/SSL3.0/TLS1.0/TLS1.1 is forbidden here. */ - SSL_CTX_set_options(g_instance.attr.attr_network.SSL_server_context, - SSL_OP_SINGLE_DH_USE | SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | SSL_OP_NO_TLSv1 | SSL_OP_NO_TLSv1_1); - - /* set up the allowed cipher list */ - if (strcasecmp(g_instance.attr.attr_security.SSLCipherSuites, "ALL") == 0) { - comm_ssl_set_default_ssl_ciphers(); - } else { - comm_ssl_set_user_config_ssl_ciphers(g_instance.attr.attr_security.SSLCipherSuites); - } - - /* Load CA store, so we can verify client certificates if needed. */ - if (g_instance.attr.attr_security.ssl_ca_file[0]) { - if (SSL_CTX_load_verify_locations(g_instance.attr.attr_network.SSL_server_context, - g_instance.attr.attr_security.ssl_ca_file, NULL) != 1) { - LIBCOMM_ELOG(WARNING, "could not load the ca certificate file"); - } - - root_cert_list = SSL_load_client_CA_file(g_instance.attr.attr_security.ssl_ca_file); - if (root_cert_list == NULL) { - LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, could not load root certificate file %s", - g_instance.attr.attr_security.ssl_ca_file); - } - } - - /* Load the Certificate Revocation List (CRL). */ - if (g_instance.attr.attr_security.ssl_crl_file[0]) { - X509_STORE* cvstore = SSL_CTX_get_cert_store(g_instance.attr.attr_network.SSL_server_context); - if (cvstore != NULL) { - /* Set the flags to check against the complete CRL chain */ - if (1 == X509_STORE_load_locations(cvstore, g_instance.attr.attr_security.ssl_crl_file, NULL)) { - (void)X509_STORE_set_flags(cvstore, X509_V_FLAG_CRL_CHECK); - } else { - LIBCOMM_ELOG(WARNING, "In comm_initialize_SSL, could not load SSL certificate revocation list file %s", - g_instance.attr.attr_security.ssl_crl_file); - } - } - } - - if (g_instance.attr.attr_security.ssl_ca_file[0]) { - /* - * Always ask for SSL client cert, but don't fail if it's not - * presented. We might fail such connections later, depending on - * what we find in pg_hba.conf. - */ - SSL_CTX_set_verify(g_instance.attr.attr_network.SSL_server_context, - (SSL_VERIFY_PEER | SSL_VERIFY_CLIENT_ONCE), - comm_ssl_verify_cb); - - /* Increase the depth to support multi-level certificate. */ - SSL_CTX_set_verify_depth(g_instance.attr.attr_network.SSL_server_context, - (MAX_CERTIFICATE_DEPTH_SUPPORTED - 2)); - - /* - * send the list of root certs we trust to clients in - * CertificateRequests. This lets a client with a keystore select the - * appropriate client certificate to send to us. - */ - SSL_CTX_set_client_CA_list(g_instance.attr.attr_network.SSL_server_context, root_cert_list); - } - - /* clear the sensitive info in server_key */ - errorno = memset_s(g_instance.attr.attr_network.server_key, CIPHER_LEN + 1, 0, CIPHER_LEN + 1); - securec_check(errorno, "\0", "\0"); - - g_instance.attr.attr_network.ssl_initialized = true; - LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, set ssl_initialized true"); -} - -libcomm_sslinfo** comm_ssl_find_port(libcomm_sslinfo** head, int sock) -{ - Assert(head != NULL); - - libcomm_sslinfo** p; - for (p = head; *p != NULL && (*p)->node.sock != sock; p = &(*p)->next) { - // find node until sock is same - } - if (*p == NULL) { - return p; - } - if ((*p)->node.sock == sock) { - return p; - } - // should never go here - LIBCOMM_ELOG(LOG, "In comm_ssl_find_port, find info of sock[%d] failed", sock); - return head; -} - -SSL* comm_ssl_find_ssl_by_fd(int sock) -{ - libcomm_sslinfo* port = g_instance.comm_cxt.libcomm_ctrl_port_list; - while (port != NULL) { - if (port->node.sock == sock) { - return port->node.ssl; - } else { - port = port->next; - } - } - port = g_instance.comm_cxt.libcomm_data_port_list; - while (port != NULL) { - if (port->node.sock == sock) { - return port->node.ssl; - } else { - port = port->next; - } - } - LIBCOMM_ELOG(LOG, "In comm_ssl_find_ssl_by_fd, find ssl of sock[%d] failed", sock); - return NULL; -} - +/* + * Copyright (c) 2021 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * libcomm_server_ssl.cpp + * + * IDENTIFICATION + * src/gausskernel/cbb/communication/libcomm_utils/libcomm_server_ssl.cpp + * + * ------------------------------------------------------------------------- + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../libcomm_core/mc_tcp.h" +#include "../libcomm_core/mc_poller.h" +#include "libcomm_thread.h" +#include "libcomm_lqueue.h" +#include "libcomm_queue.h" +#include "libcomm_lock_free_queue.h" +#include "distributelayer/streamCore.h" +#include "distributelayer/streamProducer.h" +#include "pgxc/poolmgr.h" +#include "libpq/auth.h" +#include "libpq/pqsignal.h" +#include "storage/ipc.h" +#include "utils/ps_status.h" +#include "utils/dynahash.h" + +#include "vecexecutor/vectorbatch.h" +#include "vecexecutor/vecnodes.h" +#include "executor/exec/execStream.h" +#include "miscadmin.h" +#include "gssignal/gs_signal.h" +#include "pgxc/pgxc.h" +#include "libcomm/libcomm.h" +#include "../libcomm_common.h" + +#ifdef USE_SSL +#include "openssl/err.h" +#include "openssl/ssl.h" +#include "openssl/rand.h" +#include "openssl/ossl_typ.h" +#include "openssl/sslerr.h" +#include "openssl/obj_mac.h" +#include "openssl/dh.h" +#include "openssl/bn.h" +#include "openssl/x509.h" +#include "openssl/x509_vfy.h" +#include "openssl/opensslconf.h" +#include "openssl/crypto.h" +#include "openssl/bio.h" + +#define MAX_CERTIFICATE_DEPTH_SUPPORTED 20 /* The max certificate depth supported. */ + +/* ------------------------------------------------------------ */ +/* SSL specific code */ +/* ------------------------------------------------------------ */ + +typedef enum { + DHKey768 = 1, + DHKey1024, + DHKey1536, + DHKey2048, + DHKey3072, + DHKey4096, + DHKey6144, + DHKey8192 +} COMM_SSL_DHKeyLength; + +/* security ciphers suites in SSL connection */ +const char* comm_ssl_ciphers_map[] = { + TLS1_TXT_ECDHE_RSA_WITH_AES_128_GCM_SHA256, /* TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 */ + TLS1_TXT_ECDHE_RSA_WITH_AES_256_GCM_SHA384, /* TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 */ + TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, /* TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 */ + TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, /* TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 */ + NULL}; + +/* + * Certificate verification callback + * + * This callback allows us to log intermediate problems during + * verification, but for now we'll see if the final error message + * contains enough information. + * + * This callback also allows us to override the default acceptance + * criteria (e.g., accepting self-signed or expired certs), but + * for now we accept the default checks. + */ +int comm_ssl_verify_cb(int code, X509_STORE_CTX* ctx) +{ + return code; +} + +/* + * This callback is used to copy SSL information messages + * into the PostgreSQL log. + */ +static void comm_ssl_info_cb(const SSL* ssl, int type, int args) +{ + switch (type) { + case SSL_CB_HANDSHAKE_START: + LIBCOMM_ELOG(DEBUG4, "SSL: handshake start"); + break; + case SSL_CB_HANDSHAKE_DONE: + LIBCOMM_ELOG(DEBUG4, "SSL: handshake done"); + break; + case SSL_CB_ACCEPT_LOOP: + LIBCOMM_ELOG(DEBUG4, "SSL: accept loop"); + break; + case SSL_CB_ACCEPT_EXIT: + LIBCOMM_ELOG(DEBUG4, "SSL: accept exit (%d)", args); + break; + case SSL_CB_CONNECT_LOOP: + LIBCOMM_ELOG(DEBUG4, "SSL: connect loop"); + break; + case SSL_CB_CONNECT_EXIT: + LIBCOMM_ELOG(DEBUG4, "SSL: connect exit (%d)", args); + break; + case SSL_CB_READ_ALERT: + LIBCOMM_ELOG(DEBUG4, "SSL: read alert (0x%04x)", args); + break; + case SSL_CB_WRITE_ALERT: + LIBCOMM_ELOG(DEBUG4, "SSL: write alert (0x%04x)", args); + break; + default: + break; + } +} + +char* comm_ssl_get_cipher_string(const char* ciphers[], const int num) +{ + int i; + int catlen = 0; + char* cipher_buf = NULL; + errno_t errorno = EOK; + + size_t CIPHER_BUF_SIZE = 0; + for (i = 0; i < num; i++) { + CIPHER_BUF_SIZE += (strlen(ciphers[i]) + 1); + } + + cipher_buf = (char*)OPENSSL_zalloc(CIPHER_BUF_SIZE); + if (cipher_buf == NULL) { + return NULL; + } + + for (i = 0; i < num; i++) { + errorno = memcpy_s(cipher_buf + catlen, strlen(ciphers[i]), ciphers[i], strlen(ciphers[i])); + securec_check(errorno, "\0", "\0"); + + catlen += strlen(ciphers[i]); + + if (i < num - 1) { + errorno = memcpy_s(cipher_buf + catlen, CIPHER_BUF_SIZE - catlen, ":", 1); + securec_check(errorno, "\0", "\0"); + catlen += 1; + } + } + + cipher_buf[catlen] = 0; + return cipher_buf; +} + +/* + * Brief : static int comm_ssl_set_cipher_list(SSL_CTX *ctx, const char* ciphers[], const int num) + * Description : set ssl ciphers. + */ +static int comm_ssl_set_cipher_list(SSL_CTX* ctx, const char* ciphers[], const int num) +{ + int ret = 0; + int rc = 0; + char* cipher_buf = NULL; + + if (ctx == NULL) { + return 0; + } + + cipher_buf = comm_ssl_get_cipher_string(ciphers, num); + if (cipher_buf == NULL) { + return 0; + } + + ret = SSL_CTX_set_cipher_list(ctx, cipher_buf); + rc = memset_s(cipher_buf, strlen(cipher_buf) + 1, 0, strlen(cipher_buf) + 1); + securec_check(rc, "\0", "\0"); + OPENSSL_free(cipher_buf); + return ret; +} + +/* + * Obtain reason string for last SSL error + * + * Some caution is needed here since ERR_reason_error_string will + * return NULL if it doesn't recognize the error code. We don't + * want to return NULL ever. + */ +static const char* comm_ssl_get_errmsg(void) +{ + unsigned long errcode; + const char* errreason = NULL; + static char errbuf[32]; + + errcode = ERR_get_error(); + if (errcode == 0) + return _("no SSL error reported"); + errreason = ERR_reason_error_string(errcode); + if (errreason != NULL) + return errreason; + int rcs = snprintf_s(errbuf, sizeof(errbuf), sizeof(errbuf) - 1, _("SSL error code %lu"), errcode); + securec_check_ss(rcs, "\0", "\0"); + return errbuf; +} + +/* + * Close SSL connection. + */ +void comm_ssl_close(SSL_INFO* port) +{ + LIBCOMM_ELOG(LOG, "comm_ssl_close, sock is %d", port->sock); + if (port->ssl != NULL) { + SSL_shutdown(port->ssl); + SSL_free(port->ssl); + port->ssl = NULL; + } + + if (port->peer != NULL) { + X509_free(port->peer); + port->peer = NULL; + } + + if (port->peer_cn != NULL) { + pfree(port->peer_cn); + port->peer_cn = NULL; + } +} + +ssize_t comm_ssl_read(SSL_INFO* port, void* ptr, size_t len) +{ + ssize_t n; + + /* + * Try to read from the socket without blocking. If it succeeds we're + * done, otherwise we'll wait for the socket using the latch mechanism. + */ +#ifdef WIN32 + pgwin32_noblock = true; +#endif + PGSTAT_INIT_TIME_RECORD(); + PGSTAT_START_TIME_RECORD(); + n = recv(port->sock, ptr, len, 0); + END_NET_RECV_INFO(n); +#ifdef WIN32 + pgwin32_noblock = false; +#endif + + return n; +} + +ssize_t comm_ssl_write(SSL_INFO* port, const void* ptr, size_t len) +{ + ssize_t n; + +#ifdef WIN32 + pgwin32_noblock = true; +#endif + PGSTAT_INIT_TIME_RECORD(); + PGSTAT_START_TIME_RECORD(); + n = send(port->sock, ptr, len, 0); + END_NET_SEND_INFO(n); +#ifdef WIN32 + pgwin32_noblock = false; +#endif + + return n; +} + +static int my_sock_read(BIO* h, char* buf, int size) +{ + int res = 0; + + if (buf != NULL) { + SSL_INFO* myPort = (SSL_INFO*)BIO_get_data(h); + if (myPort == NULL) { + return 0; + } + + prepare_for_client_read(); + res = comm_ssl_read(myPort, buf, size); + client_read_ended(); + BIO_clear_retry_flags(h); + if (res <= 0) { + /* If we were interrupted, tell caller to retry */ + if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN) { + BIO_set_retry_read(h); + } + } + } + + return res; +} + +static int my_sock_write(BIO* h, const char* buf, int size) +{ + int res = 0; + SSL_INFO* myPort = (SSL_INFO*)BIO_get_data(h); + if (myPort == NULL) { + return 0; + } + + res = comm_ssl_write(myPort, (void*)buf, size); + BIO_clear_retry_flags(h); + if (res <= 0) { + /* If we were interrupted, tell caller to retry */ + if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN) { + BIO_set_retry_write(h); + } + } + + return res; +} + +static BIO_METHOD* comm_ssl_get_BIO_socket(void) +{ + static BIO_METHOD* my_bio_methods = NULL; + if (my_bio_methods == NULL) { + int my_bio_index; + + my_bio_index = BIO_get_new_index(); + if (my_bio_index == -1) + return NULL; + + BIO_METHOD* biom = (BIO_METHOD*)BIO_s_socket(); + my_bio_methods = BIO_meth_new(my_bio_index, "socket"); + if (my_bio_methods == NULL) { + return NULL; + } + + if (!BIO_meth_set_write(my_bio_methods, my_sock_write) || !BIO_meth_set_read(my_bio_methods, my_sock_read) || + !BIO_meth_set_gets(my_bio_methods, BIO_meth_get_gets(biom)) || + !BIO_meth_set_puts(my_bio_methods, BIO_meth_get_puts(biom)) || + !BIO_meth_set_ctrl(my_bio_methods, BIO_meth_get_ctrl(biom)) || + !BIO_meth_set_create(my_bio_methods, BIO_meth_get_create(biom)) || + !BIO_meth_set_destroy(my_bio_methods, BIO_meth_get_destroy(biom)) || + !BIO_meth_set_callback_ctrl(my_bio_methods, BIO_meth_get_callback_ctrl(biom))) { + BIO_meth_free(my_bio_methods); + my_bio_methods = NULL; + return NULL; + } + } + return my_bio_methods; +} + +/* This should exactly match openssl's SSL_set_fd except for using my BIO */ +static int comm_ssl_set_fd(SSL_INFO* port, int fd) +{ + BIO* bio = NULL; + BIO_METHOD* bio_method = NULL; + + bio_method = comm_ssl_get_BIO_socket(); + if (bio_method == NULL) { + SSLerr(SSL_F_SSL_SET_FD, ERR_R_BUF_LIB); + return 0; + } + + bio = BIO_new(bio_method); + if (bio == NULL) { + SSLerr(SSL_F_SSL_SET_FD, ERR_R_BUF_LIB); + return 0; + } + BIO_set_data(bio, port); + + BIO_set_fd(bio, fd, BIO_NOCLOSE); + SSL_set_bio(port->ssl, bio, bio); + return 1; +} + +/* + * Brief : int open_server_SSL(SSL_INFO *port) + * Description : Attempt to negotiate SSL connection. + */ +int comm_ssl_open_server(SSL_INFO* port, int fd) +{ + int r; + int err; + + Assert(port->ssl == NULL); + Assert(port->peer == NULL); + + if (port->ssl != NULL) { + LIBCOMM_ELOG(WARNING, "comm_ssl_open_server port->ssl is not null(%p), fd is %d", port->ssl, fd); + return -1; + } + + if (port->peer != NULL) { + LIBCOMM_ELOG(WARNING, "comm_ssl_open_server port->peer is not null(%p), fd is %d", port->peer, fd); + return -1; + } + + if (g_instance.attr.attr_network.SSL_server_context == NULL) { + comm_initialize_SSL(); + if (g_instance.attr.attr_network.SSL_server_context != NULL) { + LIBCOMM_ELOG(LOG, "comm_initialize_SSL in comm_ssl_open_server success"); + } else { + LIBCOMM_ELOG(WARNING, "comm_initialize_SSL in comm_ssl_open_server failed"); + return -1; + } + } + + port->ssl = SSL_new(g_instance.attr.attr_network.SSL_server_context); + if (port->ssl == NULL) { + LIBCOMM_ELOG(WARNING, "SSL_new in comm_ssl_open_server failed"); + return -2; + } + LIBCOMM_ELOG(LOG, "SSL_new in comm_ssl_open_server success"); + + int ret_bind = comm_ssl_set_fd(port, fd); + if (1 != ret_bind) { + LIBCOMM_ELOG(WARNING, "SSL_set_fd in comm_ssl_open_server failed"); + return -2; + } else { + LIBCOMM_ELOG(LOG, "SSL_set_fd in comm_ssl_open_server success"); + } + SSL_set_accept_state(port->ssl); +aloop: + ERR_clear_error(); + r = SSL_accept(port->ssl); + if (r != 1) { + err = SSL_get_error(port->ssl, r); + switch (err) { + case SSL_ERROR_WANT_READ: // 2 + case SSL_ERROR_WANT_WRITE: // 3 +#ifdef WIN32 + pgwin32_waitforsinglesocket(SSL_get_fd(port->ssl), + (err == SSL_ERROR_WANT_READ) ? (FD_READ | FD_CLOSE | FD_ACCEPT) : (FD_WRITE | FD_CLOSE), + INFINITE); +#endif + goto aloop; + case SSL_ERROR_SYSCALL: { // 5 + LIBCOMM_ELOG(LOG, "SSL_get_error in comm_ssl_open_server return SSL_ERROR_SYSCALL, %s %d", + comm_ssl_get_errmsg(), errno); + if (r < 0) { + LIBCOMM_ELOG(LOG, "could not accept SSL connection"); + } else { + LIBCOMM_ELOG(LOG, "could not accept SSL connection: EOF detected"); + } + break; + } + case SSL_ERROR_SSL: { // 1 + LIBCOMM_ELOG(LOG, "SSL_get_error in comm_ssl_open_server return SSL_ERROR_SSL, %s", + comm_ssl_get_errmsg()); + break; + } + case SSL_ERROR_ZERO_RETURN: { // 6 + LIBCOMM_ELOG(LOG, "SSL_get_error in comm_ssl_open_server return SSL_ERROR_ZERO_RETURN, %s", + comm_ssl_get_errmsg()); + break; + } + default: { + LIBCOMM_ELOG(LOG, "SSL_get_error in comm_ssl_open_server return default, %s", comm_ssl_get_errmsg()); + break; + } + } + return -2; + } + LIBCOMM_ELOG(LOG, "after SSL_accept with encryption"); + port->count = 0; + + /* Get client certificate, if available. */ + port->peer = SSL_get_peer_certificate(port->ssl); + + /* and extract the Common Name from it. */ + port->peer_cn = NULL; + if (port->peer != NULL) { + int rt; + int len; + char* peer_cn = NULL; + LIBCOMM_ELOG(LOG, "SSL_get_peer_certificate in comm_ssl_open_server return peer not null"); + + /* First find out the name's length and allocate a buffer for it. */ + len = X509_NAME_get_text_by_NID(X509_get_subject_name(port->peer), NID_commonName, NULL, 0); + LIBCOMM_ELOG(LOG, "X509_NAME_get_text_by_NID in comm_ssl_open_server return %d", len); + if (len != -1) { + peer_cn = (char*)palloc(len + 1); + + rt = X509_NAME_get_text_by_NID(X509_get_subject_name(port->peer), NID_commonName, peer_cn, len + 1); + if (rt != len) { + /* shouldn't happen */ + pfree(peer_cn); + return -2; + } + /* + * Reject embedded NULLs in certificate common name to prevent + * attacks like CVE-2009-4034. + */ + if ((size_t)(unsigned)len != strlen(peer_cn)) { + LIBCOMM_ELOG(WARNING, "SSL certificate's common name contains embedded null"); + pfree(peer_cn); + return -2; + } + port->peer_cn = peer_cn; + } + } + LIBCOMM_ELOG(DEBUG2, "SSL connection from \"%s\"", port->peer_cn ? port->peer_cn : "(anonymous)"); + LIBCOMM_ELOG(LOG, "comm_ssl_open_server SSL connection from \"%s\"", + (port->peer_cn ? port->peer_cn : "(anonymous)")); + + /* set up debugging/info callback */ + if (port->peer != NULL) { + SSL_set_info_callback(port->ssl, comm_ssl_info_cb); + LIBCOMM_ELOG(LOG, "SSL_set_info_callback in comm_ssl_open_server"); + } + + return 0; +} + +/* + * Brief : DH* comm_ssl_genDHKeyPair(COMM_SSL_DHKeyLength dhType) + * Notes : function to generate DH key pair + */ +static DH* comm_ssl_genDHKeyPair(COMM_SSL_DHKeyLength dhType) +{ + int ret = 0; + DH* dh = NULL; + BIGNUM* bn_prime = NULL; + unsigned char GENERATOR_2[] = {DH_GENERATOR_2}; + BIGNUM* bn_genenrator_2 = BN_bin2bn(GENERATOR_2, sizeof(GENERATOR_2), NULL); + if (bn_genenrator_2 == NULL) { + return NULL; + } + + switch (dhType) { + case DHKey768: + bn_prime = BN_get_rfc2409_prime_768(NULL); + break; + case DHKey1024: + bn_prime = BN_get_rfc2409_prime_1024(NULL); + break; + case DHKey1536: + bn_prime = BN_get_rfc3526_prime_1536(NULL); + break; + case DHKey2048: + bn_prime = BN_get_rfc3526_prime_2048(NULL); + break; + case DHKey3072: + bn_prime = BN_get_rfc3526_prime_3072(NULL); + break; + case DHKey4096: + bn_prime = BN_get_rfc3526_prime_4096(NULL); + break; + case DHKey6144: + bn_prime = BN_get_rfc3526_prime_6144(NULL); + break; + case DHKey8192: + bn_prime = BN_get_rfc3526_prime_8192(NULL); + break; + default: + break; + } + + if (bn_prime == NULL) { + BN_free(bn_genenrator_2); + return NULL; + } + + dh = DH_new(); + if (dh == NULL) { + BN_free(bn_prime); + BN_free(bn_genenrator_2); + return NULL; + } + + ret = DH_set0_pqg(dh, bn_prime, NULL, bn_genenrator_2); + if (!ret) { + BN_free(bn_prime); + BN_free(bn_genenrator_2); + DH_free(dh); + return NULL; + } + + ret = DH_generate_key(dh); + if (!ret) { + BN_free(bn_prime); + BN_free(bn_genenrator_2); + DH_free(dh); + return NULL; + } + + return dh; +} + +/* + * Brief : comm_ssl_set_default_ssl_ciphers,set default ssl ciphers + * Description : SEC.CNF.004 + */ +static void comm_ssl_set_default_ssl_ciphers() +{ + int default_ciphers_count = 0; + + for (int i = 0; comm_ssl_ciphers_map[i] != NULL; i++) { + default_ciphers_count++; + } + + if (comm_ssl_set_cipher_list(g_instance.attr.attr_network.SSL_server_context, + comm_ssl_ciphers_map, default_ciphers_count) != 1) { + LIBCOMM_ELOG(WARNING, "could not set the cipher list (no valid ciphers available)"); + Assert(0); + return; + } +} + +/* + * Brief : comm_ssl_set_user_config_ssl_ciphers,set the specified ssl ciphers by user + * Description : SEC.CNF.004 + */ +static void comm_ssl_set_user_config_ssl_ciphers(const char* sslciphers) +{ + char* cipherStr = NULL; + char* cipherStr_tmp = NULL; + char* token = NULL; + int counter = 1; + char** ciphers_list = NULL; + bool find_ciphers_in_list = false; + int i = 0; + char* ptok = NULL; + if (sslciphers == NULL) { + LIBCOMM_ELOG(WARNING, "ssl ciphers can not be null"); + Assert(0); + return; + } else { + cipherStr = (char*)strchr(sslciphers, ';'); // if the sslciphers does not contain character ';', the count is 1 + while (cipherStr != NULL) { + counter++; + cipherStr++; + if (*cipherStr == '\0') { + break; + } + cipherStr = strchr(cipherStr, ';'); + } + ciphers_list = (char**)palloc(counter * sizeof(char*)); + + Assert(ciphers_list != NULL); + if (ciphers_list == NULL) { + LIBCOMM_ELOG(WARNING, "comm_ssl_set_user_config_ssl_ciphers malloc ciphers_list failed"); + Assert(0); + return; + } + + cipherStr_tmp = pstrdup(sslciphers); + if (cipherStr_tmp == NULL) { + if (ciphers_list != NULL) + pfree(ciphers_list); + ciphers_list = NULL; + LIBCOMM_ELOG(WARNING, "comm_ssl_set_user_config_ssl_ciphers malloc cipherStr_tmp failed"); + Assert(0); + return; + } + token = strtok_r(cipherStr_tmp, ";", &ptok); + while (token != NULL) { + for (int j = 0; comm_ssl_ciphers_map[j] != NULL; j++) { + if (strlen(comm_ssl_ciphers_map[j]) == strlen(token) && + strncmp(comm_ssl_ciphers_map[j], token, strlen(token)) == 0) { + ciphers_list[i] = (char*)comm_ssl_ciphers_map[j]; + find_ciphers_in_list = true; + break; + } + } + if (!find_ciphers_in_list) { + errno_t errorno = EOK; + const int maxCipherStrLen = 64; + char errormessage[maxCipherStrLen] = {0}; + errorno = strncpy_s(errormessage, sizeof(errormessage), token, sizeof(errormessage) - 1); + securec_check(errorno, cipherStr_tmp, ciphers_list, "\0"); + errormessage[maxCipherStrLen - 1] = '\0'; + if (cipherStr_tmp != NULL) { + pfree(cipherStr_tmp); + cipherStr_tmp = NULL; + } + if (ciphers_list != NULL) { + pfree(ciphers_list); + ciphers_list = NULL; + } + LIBCOMM_ELOG(WARNING, "unrecognized ssl ciphers name: \"%s\"", errormessage); + Assert(0); + return; + } + token = strtok_r(NULL, ";", &ptok); + i++; + find_ciphers_in_list = false; + } + } + if (comm_ssl_set_cipher_list( + g_instance.attr.attr_network.SSL_server_context, (const char**)ciphers_list, counter) != 1) { + if (cipherStr_tmp != NULL) { + pfree(cipherStr_tmp); + cipherStr_tmp = NULL; + } + if (ciphers_list != NULL) { + pfree(ciphers_list); + ciphers_list = NULL; + } + LIBCOMM_ELOG(WARNING, "could not set the cipher list (no valid ciphers available)"); + Assert(0); + return; + } + if (cipherStr_tmp != NULL) { + pfree(cipherStr_tmp); + cipherStr_tmp = NULL; + } + if (ciphers_list != NULL) { + pfree(ciphers_list); + ciphers_list = NULL; + } +} + +/* Check permissions of cipher file and rand file in server */ +static void comm_ssl_comm_check_cipher_file(const char* parent_dir) +{ + char cipher_file[MAXPGPATH] = {0}; + char rand_file[MAXPGPATH] = {0}; + struct stat cipherbuf; + struct stat randbuf; + int rcs = snprintf_s(cipher_file, MAXPGPATH, MAXPGPATH - 1, "%s/server%s", parent_dir, CIPHER_KEY_FILE); + securec_check_ss(rcs, "\0", "\0"); + rcs = snprintf_s(rand_file, MAXPGPATH, MAXPGPATH - 1, "%s/server%s", parent_dir, RAN_KEY_FILE); + securec_check_ss(rcs, "\0", "\0"); + if (lstat(cipher_file, &cipherbuf) != 0 || lstat(rand_file, &randbuf) != 0) + return; +#if !defined(WIN32) && !defined(__CYGWIN__) + if (!S_ISREG(cipherbuf.st_mode) || + (cipherbuf.st_mode & (S_IRWXG | S_IRWXO)) || ((cipherbuf.st_mode & S_IRWXU) == S_IRWXU)) { + LIBCOMM_ELOG(WARNING, "cipher file \"%s\" has group or world access. " + "Permissions should be u=rw (0600) or less.", cipher_file); + Assert(0); + return; + } + if (!S_ISREG(randbuf.st_mode) || + (randbuf.st_mode & (S_IRWXG | S_IRWXO)) || ((randbuf.st_mode & S_IRWXU) == S_IRWXU)) { + LIBCOMM_ELOG(WARNING, "rand file \"%s\" has group or world access" + "Permissions should be u=rw (0600) or less.", rand_file); + Assert(0); + return; + } + +#endif +} + +/* set the default password for certificate/private key loading */ +static void comm_ssl_comm_init_server_ssl_pwd(SSL_CTX* sslContext) +{ + char* parentdir = NULL; + KeyMode keymode = SERVER_MODE; + if (g_instance.attr.attr_security.ssl_key_file == NULL) { + LIBCOMM_ELOG(LOG, "In comm_ssl_comm_init_server_ssl_pwd, ssl_key_file is NULL"); + return; + } + if (is_absolute_path(g_instance.attr.attr_security.ssl_key_file)) { + parentdir = pstrdup(g_instance.attr.attr_security.ssl_key_file); + get_parent_directory(parentdir); + decode_cipher_files(keymode, NULL, parentdir, g_instance.attr.attr_network.server_key); + } else { + decode_cipher_files(keymode, NULL, t_thrd.proc_cxt.DataDir, g_instance.attr.attr_network.server_key); + parentdir = pstrdup(t_thrd.proc_cxt.DataDir); + } + + comm_ssl_comm_check_cipher_file(parentdir); + pfree_ext(parentdir); + + SSL_CTX_set_default_passwd_cb_userdata(sslContext, (char*)g_instance.attr.attr_network.server_key); +} + +void comm_initialize_SSL() +{ + struct stat buf; + STACK_OF(X509_NAME)* root_cert_list = NULL; + errno_t errorno = EOK; + + /* Already initialized SSL, return here */ + if (g_instance.attr.attr_network.ssl_initialized) { + LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, ssl_initialized is true, return"); + return; + } else { + LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, ssl_initialized is false"); + } + + if (!g_instance.attr.attr_network.SSL_server_context) { + LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, SSL_server_context is null"); + if (OPENSSL_init_ssl(0, NULL) != 1) { + LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, OPENSSL_init_ssl is failed"); + return; + } else { + LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, OPENSSL_init_ssl success"); + } + SSL_load_error_strings(); + + g_instance.attr.attr_network.SSL_server_context = SSL_CTX_new(TLS_method()); + if (!g_instance.attr.attr_network.SSL_server_context) { + LIBCOMM_ELOG(WARNING, "In comm_initialize_SSL, could not create SSL context"); + Assert(0); + return; + } else { + LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, create SSL context success"); + } + if (g_instance.attr.attr_network.server_key == NULL) { + g_instance.attr.attr_network.server_key = (GS_UCHAR*)palloc0((CIPHER_LEN + 1) * sizeof(GS_UCHAR)); + } + if (g_instance.attr.attr_network.server_key == NULL) { + LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, palloc server_key failed"); + } else { + LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, palloc server_key success"); + } + /* + * Disable moving-write-buffer sanity check, because it + * causes unnecessary failures in nonblocking send cases. + */ + SSL_CTX_set_mode(g_instance.attr.attr_network.SSL_server_context, \ + SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); + LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, SSL_CTX_set_mode"); + + /* set the default password for certificate/private key loading */ + comm_ssl_comm_init_server_ssl_pwd(g_instance.attr.attr_network.SSL_server_context); + LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, comm_ssl_comm_init_server_ssl_pwd"); + char *buffer; + if((buffer = getcwd(NULL,0)) == NULL){ + LIBCOMM_ELOG(WARNING, "In comm_initialize_SSL, getcwd error"); + } else{ + free(buffer); + } + + /* Load and verify server's certificate and private key */ + if (SSL_CTX_use_certificate_chain_file(g_instance.attr.attr_network.SSL_server_context, \ + g_instance.attr.attr_security.ssl_cert_file) != 1) { + LIBCOMM_ELOG(WARNING, "In comm_initialize_SSL, could not load server certificate file %s", + g_instance.attr.attr_security.ssl_cert_file); + Assert(0); + return; + } + + /* check certificate file permission */ +#if !defined(WIN32) && !defined(__CYGWIN__) + if (stat(g_instance.attr.attr_security.ssl_cert_file, &buf) == 0){ + if (!S_ISREG(buf.st_mode) || + (buf.st_mode & (S_IRWXG | S_IRWXO)) || ((buf.st_mode & S_IRWXU) == S_IRWXU)) { + LIBCOMM_ELOG(WARNING, "certificate file \"%s\" has group or world access" + "Permissions should be u=rw (0600) or less.", g_instance.attr.attr_security.ssl_cert_file); + Assert(0); + return; + } + } +#endif + + if (stat(g_instance.attr.attr_security.ssl_key_file, &buf) != 0) { + LIBCOMM_ELOG(WARNING, "In comm_initialize_SSL, could not access private key file %s", + g_instance.attr.attr_security.ssl_key_file); + Assert(0); + return; + } + + /* + * Require no public access to key file. + * + * XXX temporarily suppress check when on Windows, because there may + * not be proper support for Unix-y file permissions. Need to think + * of a reasonable check to apply on Windows. (See also the data + * directory permission check in postmaster.c) + */ +#if !defined(WIN32) && !defined(__CYGWIN__) + if (!S_ISREG(buf.st_mode) || + (buf.st_mode & (S_IRWXG | S_IRWXO)) || ((buf.st_mode & S_IRWXU) == S_IRWXU)) { + LIBCOMM_ELOG(WARNING, + "private key file \"%s\" has group or world access Permissions should be u=rw (0600) or less.", + g_instance.attr.attr_security.ssl_key_file); + } +#endif + + if (SSL_CTX_use_PrivateKey_file(g_instance.attr.attr_network.SSL_server_context, \ + g_instance.attr.attr_security.ssl_key_file, \ + SSL_FILETYPE_PEM) != 1) { + LIBCOMM_ELOG(WARNING, "In comm_initialize_SSL, could not load private key file \"%s\"", + g_instance.attr.attr_security.ssl_key_file); + Assert(0); + return; + } + + if (SSL_CTX_check_private_key(g_instance.attr.attr_network.SSL_server_context) != 1) { + LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, check of private key(%s) failed", + g_instance.attr.attr_security.ssl_key_file); + } + } + + /* check ca certificate file permission */ +#if !defined(WIN32) && !defined(__CYGWIN__) + if (stat(g_instance.attr.attr_security.ssl_ca_file, &buf) == 0){ + if (!S_ISREG(buf.st_mode) || (buf.st_mode & (S_IRWXG | S_IRWXO)) || ((buf.st_mode & S_IRWXU) == S_IRWXU)) { + LIBCOMM_ELOG(WARNING, + "ca certificate file \"%s\" has group or world access Permissions should be u=rw (0600) or less.", + g_instance.attr.attr_security.ssl_ca_file); + Assert(0); + return; + } + } +#endif + + /* Check the signature algorithm.*/ + if (check_certificate_signature_algrithm(g_instance.attr.attr_network.SSL_server_context)) { + LIBCOMM_ELOG(LOG, "In initialize_SSL, The server certificate contain a Low-intensity signature algorithm"); + } + + /* Check the certificate expires time. */ + long leftspan = check_certificate_time(g_instance.attr.attr_network.SSL_server_context, + u_sess->attr.attr_security.ssl_cert_notify_time); + if (leftspan > 0) { + int leftdays = (leftspan / 86400 > 0) ? (leftspan / 86400) : 1; + if (leftdays > 1) { + LIBCOMM_ELOG(WARNING, "The server certificate will expire in %d days", leftdays); + } else { + LIBCOMM_ELOG(WARNING, "The server certificate will expire in %d day", leftdays); + } + } + + /* set up ephemeral DH keys, and disallow SSL v2 while at it + * free the dh directly safe as there is reference counts in DH + */ + DH* dhkey = comm_ssl_genDHKeyPair(DHKey3072); + if (dhkey == NULL) { + LIBCOMM_ELOG(WARNING, "DH: generating parameters (3072 bits) failed"); + Assert(0); + return; + } + SSL_CTX_set_tmp_dh(g_instance.attr.attr_network.SSL_server_context, dhkey); + DH_free(dhkey); + + /* SSL2.0/SSL3.0/TLS1.0/TLS1.1 is forbidden here. */ + SSL_CTX_set_options(g_instance.attr.attr_network.SSL_server_context, + SSL_OP_SINGLE_DH_USE | SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | SSL_OP_NO_TLSv1 | SSL_OP_NO_TLSv1_1); + + /* set up the allowed cipher list */ + if (strcasecmp(g_instance.attr.attr_security.SSLCipherSuites, "ALL") == 0) { + comm_ssl_set_default_ssl_ciphers(); + } else { + comm_ssl_set_user_config_ssl_ciphers(g_instance.attr.attr_security.SSLCipherSuites); + } + + /* Load CA store, so we can verify client certificates if needed. */ + if (g_instance.attr.attr_security.ssl_ca_file[0]) { + if (SSL_CTX_load_verify_locations(g_instance.attr.attr_network.SSL_server_context, + g_instance.attr.attr_security.ssl_ca_file, NULL) != 1) { + LIBCOMM_ELOG(WARNING, "could not load the ca certificate file"); + } + + root_cert_list = SSL_load_client_CA_file(g_instance.attr.attr_security.ssl_ca_file); + if (root_cert_list == NULL) { + LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, could not load root certificate file %s", + g_instance.attr.attr_security.ssl_ca_file); + } + } + + /* Load the Certificate Revocation List (CRL). */ + if (g_instance.attr.attr_security.ssl_crl_file[0]) { + X509_STORE* cvstore = SSL_CTX_get_cert_store(g_instance.attr.attr_network.SSL_server_context); + if (cvstore != NULL) { + /* Set the flags to check against the complete CRL chain */ + if (1 == X509_STORE_load_locations(cvstore, g_instance.attr.attr_security.ssl_crl_file, NULL)) { + (void)X509_STORE_set_flags(cvstore, X509_V_FLAG_CRL_CHECK); + } else { + LIBCOMM_ELOG(WARNING, "In comm_initialize_SSL, could not load SSL certificate revocation list file %s", + g_instance.attr.attr_security.ssl_crl_file); + } + } + } + + if (g_instance.attr.attr_security.ssl_ca_file[0]) { + /* + * Always ask for SSL client cert, but don't fail if it's not + * presented. We might fail such connections later, depending on + * what we find in pg_hba.conf. + */ + SSL_CTX_set_verify(g_instance.attr.attr_network.SSL_server_context, + (SSL_VERIFY_PEER | SSL_VERIFY_CLIENT_ONCE), + comm_ssl_verify_cb); + + /* Increase the depth to support multi-level certificate. */ + SSL_CTX_set_verify_depth(g_instance.attr.attr_network.SSL_server_context, + (MAX_CERTIFICATE_DEPTH_SUPPORTED - 2)); + + /* + * send the list of root certs we trust to clients in + * CertificateRequests. This lets a client with a keystore select the + * appropriate client certificate to send to us. + */ + SSL_CTX_set_client_CA_list(g_instance.attr.attr_network.SSL_server_context, root_cert_list); + } + + /* clear the sensitive info in server_key */ + errorno = memset_s(g_instance.attr.attr_network.server_key, CIPHER_LEN + 1, 0, CIPHER_LEN + 1); + securec_check(errorno, "\0", "\0"); + + g_instance.attr.attr_network.ssl_initialized = true; + LIBCOMM_ELOG(LOG, "In comm_initialize_SSL, set ssl_initialized true"); +} + +libcomm_sslinfo** comm_ssl_find_port(libcomm_sslinfo** head, int sock) +{ + Assert(head != NULL); + + libcomm_sslinfo** p; + for (p = head; *p != NULL && (*p)->node.sock != sock; p = &(*p)->next) { + // find node until sock is same + } + if (*p == NULL) { + return p; + } + if ((*p)->node.sock == sock) { + return p; + } + // should never go here + LIBCOMM_ELOG(LOG, "In comm_ssl_find_port, find info of sock[%d] failed", sock); + return head; +} + +SSL* comm_ssl_find_ssl_by_fd(int sock) +{ + libcomm_sslinfo* port = g_instance.comm_cxt.libcomm_ctrl_port_list; + while (port != NULL) { + if (port->node.sock == sock) { + return port->node.ssl; + } else { + port = port->next; + } + } + port = g_instance.comm_cxt.libcomm_data_port_list; + while (port != NULL) { + if (port->node.sock == sock) { + return port->node.ssl; + } else { + port = port->next; + } + } + LIBCOMM_ELOG(LOG, "In comm_ssl_find_ssl_by_fd, find ssl of sock[%d] failed", sock); + return NULL; +} + #endif \ No newline at end of file diff --git a/src/gausskernel/cbb/communication/libcomm_utils/libcomm_thread.cpp b/src/gausskernel/cbb/communication/libcomm_utils/libcomm_thread.cpp index ad6d68549..1aef6612a 100644 --- a/src/gausskernel/cbb/communication/libcomm_utils/libcomm_thread.cpp +++ b/src/gausskernel/cbb/communication/libcomm_utils/libcomm_thread.cpp @@ -1317,10 +1317,12 @@ static void CommReceiverFlowerAcceptNewConn(const struct sock_id* tFdId, int ltk return; } #endif + /* Server side gss kerberos authentication for tcp connection. * if GSS authentication SUCC, no IP authentication is required. */ if (g_instance.comm_cxt.localinfo_cxt.gs_krb_keyfile != NULL) { +#ifdef ENABLE_GSS if (GssServerAuth(ctk, g_instance.comm_cxt.localinfo_cxt.gs_krb_keyfile) < 0) { mc_tcp_close(ctk); errno = ECOMMTCPGSSAUTHFAIL; @@ -1334,6 +1336,12 @@ static void CommReceiverFlowerAcceptNewConn(const struct sock_id* tFdId, int ltk COMM_DEBUG_LOG("(r|flow ctrl)\tControl channel GSS authentication SUCC, listen socket[%d]:%s.", ltk, mc_strerror(errno)); +#else + mc_tcp_close(ctk); + LIBCOMM_ELOG(WARNING, + "(r|flow ctrl)\tControl channel GSS authentication is disable."); + return; +#endif } else { /* send signal to postmaster thread to reload hba */ if (gs_reload_hba(ltk, ctrlClient)) { @@ -1750,6 +1758,7 @@ static int CommReceiverAcceptNewConnect(const struct sock_id *fdId, int selfid) LIBCOMM_ELOG( LOG, "(r|recv loop)\tDetect incoming connection, socket[%d] from [%s].", clientSocket, ipstr); +#ifdef ENABLE_GSS /* * server side gss kerberos authentication for data connection. * authentication for tcp mode after accept. @@ -1770,6 +1779,8 @@ static int CommReceiverAcceptNewConnect(const struct sock_id *fdId, int selfid) "(r|recv loop)\tData channel GSS authentication SUCC, listen socket[%d].", fdId->fd); } +#endif + #ifdef USE_SSL if (g_instance.attr.attr_network.comm_enable_SSL) { LIBCOMM_ELOG(LOG, "CommReceiverAcceptNewConnect call comm_ssl_open_server, fd is %d, id is %d, sock is %d", fdId->fd, fdId->id, clientSocket); diff --git a/src/gausskernel/cbb/extension/foreign/foreign.cpp b/src/gausskernel/cbb/extension/foreign/foreign.cpp index 2fa31d313..15c856db9 100644 --- a/src/gausskernel/cbb/extension/foreign/foreign.cpp +++ b/src/gausskernel/cbb/extension/foreign/foreign.cpp @@ -32,6 +32,7 @@ #include "utils/rel_gs.h" #include "utils/syscache.h" #include "cipher.h" +#include "utils/knl_relcache.h" extern Datum pg_options_to_table(PG_FUNCTION_ARGS); extern Datum postgresql_fdw_validator(PG_FUNCTION_ARGS); @@ -610,8 +611,8 @@ FdwRoutine* GetFdwRoutineForRelation(Relation relation, bool makecopy) /* Get the info by consulting the catalogs and the FDW code */ fdwroutine = GetFdwRoutineByRelId(RelationGetRelid(relation)); - /* Save the data for later reuse in u_sess->cache_mem_cxt */ - cfdwroutine = (FdwRoutine*)MemoryContextAlloc(u_sess->cache_mem_cxt, sizeof(FdwRoutine)); + /* Save the data for later reuse in LocalMyDBCacheMemCxt() */ + cfdwroutine = (FdwRoutine*)MemoryContextAlloc(LocalMyDBCacheMemCxt(), sizeof(FdwRoutine)); rc = memcpy_s(cfdwroutine, sizeof(FdwRoutine), fdwroutine, sizeof(FdwRoutine)); securec_check(rc, "", ""); diff --git a/src/gausskernel/cbb/extension/foreign/regioninfo.cpp b/src/gausskernel/cbb/extension/foreign/regioninfo.cpp index 4f3d04932..ec937a859 100644 --- a/src/gausskernel/cbb/extension/foreign/regioninfo.cpp +++ b/src/gausskernel/cbb/extension/foreign/regioninfo.cpp @@ -176,6 +176,7 @@ char* readDataFromJsonFile(char* region) static bool clean_region_info() { +#ifndef ENABLE_LITE_MODE Relation rel; TableScanDesc scan; HeapTuple tuple; @@ -211,6 +212,10 @@ static bool clean_region_info() ereport(LOG, (errmodule(MOD_DFS), errmsg("clean %d region info.", cleanNum))); return ret; +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); + return true; +#endif } /** diff --git a/src/gausskernel/cbb/grpc/remote_read_client.cpp b/src/gausskernel/cbb/grpc/remote_read_client.cpp index a60b8c4e6..a2a9946ac 100755 --- a/src/gausskernel/cbb/grpc/remote_read_client.cpp +++ b/src/gausskernel/cbb/grpc/remote_read_client.cpp @@ -22,6 +22,7 @@ * * ------------------------------------------------------------------------- */ + #include "securec.h" #include "service/remote_read_client.h" #include "catalog/catalog.h" @@ -29,12 +30,16 @@ #include "utils/elog.h" #include "libpq/libpq-fe.h" #include "libpq/libpq-int.h" +#include "replication/walreceiver.h" + +#define atou64(x) ((uint64)strtoul((x), NULL, 0)) +const int DEFAULT_WAIT_TIMES = 60; /* same as max CU size */ const static int RPC_MAX_MESSAGE_SIZE = 1024 * 1024 * 1024; -int standby_recv_timeout = 120; /* 120 sec = default */ -#define IP_LEN 64 +const int standby_recv_timeout = 120; /* 120 sec = default */ +static int MAX_IP_LEN = 64; /* default ip len */ #define disconnect_and_return_null(tempconn) \ do { \ @@ -88,6 +93,8 @@ int RemoteGetCU(char* remoteAddress, uint32 spcnode, uint32 dbnode, uint32 relno struct replconninfo *conninfo = NULL; char* remoteHost; int remotePort = 0; + char* ipNoZone = NULL; + char ipNoZoneData[MAX_IP_LEN] = {0}; if (size < 0 || size > RPC_MAX_MESSAGE_SIZE) { errCode = REMOTE_READ_SIZE_ERROR; @@ -97,13 +104,17 @@ int RemoteGetCU(char* remoteAddress, uint32 spcnode, uint32 dbnode, uint32 relno tnRet = memset_s(remoteReadConnInfo, MAXPGPATH, 0, MAXPGPATH); securec_check(tnRet, "\0", "\0"); - remoteHost = strtok_s(remoteAddress, ":", &tempToken); + remoteHost = strtok_s(remoteAddress, "@", &tempToken); for (int i = 0; i < MAX_REPLNODE_NUM; i++) { conninfo = t_thrd.postmaster_cxt.ReplConnArray[i]; if (conninfo == NULL) { continue; } - if (strcmp(remoteHost, conninfo->remotehost) == 0) { + + /* remove any '%zone' part from an IPv6 address string */ + ipNoZone = remove_ipv6_zone(conninfo->remotehost, ipNoZoneData, MAX_IP_LEN); + + if (strcmp(remoteHost, ipNoZone) == 0) { remotePort = conninfo->remoteport; break; } @@ -118,7 +129,7 @@ int RemoteGetCU(char* remoteAddress, uint32 spcnode, uint32 dbnode, uint32 relno "connect_timeout=5 rw_timeout=%d", t_thrd.postmaster_cxt.ReplConnArray[1]->localhost, t_thrd.postmaster_cxt.ReplConnArray[1]->localport, - remoteHost, + (remotePort == 0) ? remoteHost : conninfo->remotehost, remotePort, standby_recv_timeout); securec_check_ss(tnRet, "", ""); @@ -129,8 +140,8 @@ int RemoteGetCU(char* remoteAddress, uint32 spcnode, uint32 dbnode, uint32 relno } tnRet = snprintf_s(sqlCommands, MAX_PATH_LEN, MAX_PATH_LEN - 1, - "SELECT gs_read_block_from_remote(%u, %u, %u, %d, %d, '%lu', %d, '%lu', true);", - spcnode, dbnode, relnode, 0, colid, offset, size, lsn); + "SELECT gs_read_block_from_remote(%u, %u, %u, %d, %d, '%lu', %d, '%lu', true, %d);", + spcnode, dbnode, relnode, 0, colid, offset, size, lsn, DEFAULT_WAIT_TIMES); securec_check_ss(tnRet, "", ""); res = PQexecParams(conGet, (const char*)sqlCommands, 0, NULL, NULL, NULL, NULL, 1); @@ -167,7 +178,7 @@ int RemoteGetCU(char* remoteAddress, uint32 spcnode, uint32 dbnode, uint32 relno return errCode; } - tnRet = memcpy_s(cuData, len + 1, PQgetvalue(res, 0, 0), len); + tnRet = memcpy_s(cuData, len, PQgetvalue(res, 0, 0), len); securec_check(tnRet, "\0", "\0"); PQclear(res); res = NULL; @@ -177,40 +188,22 @@ int RemoteGetCU(char* remoteAddress, uint32 spcnode, uint32 dbnode, uint32 relno return errCode; } -/* - * @Description: remote read page - * @IN/OUT remote_address:remote address - * @IN spcnode: tablespace id - * @IN dbnode: database id - * @IN relnode: relfilenode - * @IN bucketnode: bucketnode - * @IN opt: compressed table options - * @IN/OUT forknum: forknum - * @IN/OUT blocknum: block number - * @IN/OUT blocksize: block size - * @IN/OUT lsn: current lsn - * @IN/OUT page_data: pointer of page data - * @Return: remote read error code - */ -int RemoteGetPage(char* remoteAddress, uint32 spcnode, uint32 dbnode, uint32 relnode, int2 bucketnode, uint2 opt, - int32 forknum, uint32 blocknum, uint32 blocksize, uint64 lsn, char* pageData) +int GetRemoteConnInfo(char* remoteAddress, char* remoteReadConnInfo, int len) { - PGconn* conGet = NULL; - PGresult* res = NULL; int errCode = REMOTE_READ_OK; - int len = 0; - char remoteReadConnInfo[MAXPGPATH]; - char sqlCommands[MAX_PATH_LEN] = {0}; int tnRet = 0; char* tempToken = NULL; struct replconninfo *conninfo = NULL; char* remoteHost = NULL; int remotePort = 0; + char* ipNoZone = NULL; + char ipNoZoneData[MAX_IP_LEN] = {0}; + int timeout = standby_recv_timeout; - tnRet = memset_s(remoteReadConnInfo, MAXPGPATH, 0, MAXPGPATH); + tnRet = memset_s(remoteReadConnInfo, len, 0, len); securec_check(tnRet, "\0", "\0"); - remoteHost = strtok_s(remoteAddress, ":", &tempToken); + remoteHost = strtok_s(remoteAddress, "@", &tempToken); if (remoteHost == NULL) { errCode = REMOTE_READ_RPC_ERROR; return errCode; @@ -220,34 +213,79 @@ int RemoteGetPage(char* remoteAddress, uint32 spcnode, uint32 dbnode, uint32 rel if (conninfo == NULL) { continue; } - if (strcmp(remoteHost, conninfo->remotehost) == 0) { + + /* remove any '%zone' part from an IPv6 address string */ + ipNoZone = remove_ipv6_zone(conninfo->remotehost, ipNoZoneData, MAX_IP_LEN); + if (strcmp(remoteHost, ipNoZone) == 0) { remotePort = conninfo->remoteport; break; } } - tnRet = snprintf_s(remoteReadConnInfo, - sizeof(remoteReadConnInfo), - sizeof(remoteReadConnInfo) - 1, + if (t_thrd.storage_cxt.timeoutRemoteOpera != 0) { + timeout += t_thrd.storage_cxt.timeoutRemoteOpera; + } + + tnRet = snprintf_s(remoteReadConnInfo, len, len - 1, "localhost=%s localport=%d host=%s port=%d " - "application_name=remote_read " - "dbname=postgres " + "application_name=remote_read dbname=postgres " "connect_timeout=5 rw_timeout=%d", t_thrd.postmaster_cxt.ReplConnArray[1]->localhost, t_thrd.postmaster_cxt.ReplConnArray[1]->localport, - remoteHost, + (remotePort == 0) ? remoteHost : conninfo->remotehost, remotePort, - standby_recv_timeout); + timeout); securec_check_ss(tnRet, "", ""); + return errCode; +} + + +/* + * @Description: remote read page + * @IN/OUT remote_address:remote address + * @IN spcnode: tablespace id + * @IN dbnode: database id + * @IN relnode: relfilenode + * @IN/OUT forknum: forknum + * @IN/OUT blocknum: block number + * @IN/OUT blocksize: block size + * @IN/OUT lsn: current lsn + * @IN/OUT page_data: pointer of page data + * @Return: remote read error code + */ +extern int RemoteGetPage(char* remoteAddress, RepairBlockKey *key, uint32 blocksize, uint64 lsn, char* pageData, + const XLogPhyBlock *pblk, int timeout) +{ + PGconn* conGet = NULL; + PGresult* res = NULL; + int errCode = REMOTE_READ_OK; + int len = 0; + char remoteReadConnInfo[MAXPGPATH]; + char sqlCommands[MAX_PATH_LEN] = {0}; + int tnRet = 0; + + errCode = GetRemoteConnInfo(remoteAddress, remoteReadConnInfo, MAXPGPATH); + if (errCode != REMOTE_READ_OK) { + return errCode; + } conGet = RemoteReadGetConn(remoteReadConnInfo); if (conGet == NULL) { errCode = REMOTE_READ_RPC_ERROR; return errCode; } - tnRet = snprintf_s(sqlCommands, MAX_PATH_LEN, MAX_PATH_LEN - 1, - "SELECT gs_read_block_from_remote(%u, %u, %u, %d, %d, %d, '%lu', %u, '%lu', false);", spcnode, - dbnode, relnode, bucketnode, (int2)opt, forknum, blocknum, blocksize, lsn); + if (pblk != NULL) { + tnRet = snprintf_s(sqlCommands, MAX_PATH_LEN, MAX_PATH_LEN - 1, + "SELECT gs_read_segment_block_from_remote(%u, %u, %u, %d, %d, '%lu', %u, '%lu', %u, %u, %d);", + key->relfilenode.spcNode, key->relfilenode.dbNode, key->relfilenode.relNode, + key->relfilenode.bucketNode, key->forknum, key->blocknum, blocksize, lsn, pblk->relNode, + pblk->block, timeout); + } else { + tnRet = snprintf_s(sqlCommands, MAX_PATH_LEN, MAX_PATH_LEN - 1, + "SELECT gs_read_block_from_remote(%u, %u, %u, %d, %d, '%lu', %u, '%lu', false, %d);", + key->relfilenode.spcNode, key->relfilenode.dbNode, key->relfilenode.relNode, + key->relfilenode.bucketNode, key->forknum, key->blocknum, blocksize, lsn, timeout); + } securec_check_ss(tnRet, "", ""); @@ -255,7 +293,7 @@ int RemoteGetPage(char* remoteAddress, uint32 spcnode, uint32 dbnode, uint32 rel if (PQresultStatus(res) != PGRES_TUPLES_OK) { ereport(WARNING, (errmodule(MOD_REMOTE), - errmsg("could not get remote buffer: %s", PQresultErrorMessage(res)))); + errmsg("could not get remote buffer: %s", PQresultErrorMessage(res)))); errCode = REMOTE_READ_RPC_ERROR; PQclear(res); res = NULL; @@ -264,9 +302,9 @@ int RemoteGetPage(char* remoteAddress, uint32 spcnode, uint32 dbnode, uint32 rel return errCode; } - if (PQntuples(res) != 1 || PQgetisnull(res, 0, 0)) { + if (PQgetisnull(res, 0, 0)) { ereport(WARNING, (errmodule(MOD_REMOTE), - errmsg("remote file get null: %s", PQresultErrorMessage(res)))); + errmsg("remote get page, the executed res is null: %s", PQresultErrorMessage(res)))); errCode = REMOTE_READ_RPC_ERROR; PQclear(res); res = NULL; @@ -274,16 +312,17 @@ int RemoteGetPage(char* remoteAddress, uint32 spcnode, uint32 dbnode, uint32 rel conGet = NULL; return errCode; } + len = PQgetlength(res, 0, 0); - if (len < 0 || (uint32)len > blocksize) { + if (len < 0 || (uint32)len != blocksize) { ereport(WARNING, (errmodule(MOD_REMOTE), errmsg("remote request get incorrect length: %s", PQresultErrorMessage(res)))); - errCode = REMOTE_READ_SIZE_ERROR; - PQclear(res); - res = NULL; - PQfinish(conGet); - conGet = NULL; - return errCode; + errCode = REMOTE_READ_SIZE_ERROR; + PQclear(res); + res = NULL; + PQfinish(conGet); + conGet = NULL; + return errCode; } tnRet = memcpy_s(pageData, len + 1, PQgetvalue(res, 0, 0), len); @@ -295,3 +334,193 @@ int RemoteGetPage(char* remoteAddress, uint32 spcnode, uint32 dbnode, uint32 rel return errCode; } + +int RemoteGetFileSize(char* remoteAddress, RemoteReadFileKey *key, uint64 lsn, int64 *size, int timeout) +{ + PGconn* conGet = NULL; + PGresult* res = NULL; + int errCode = REMOTE_READ_OK; + char remoteReadConnInfo[MAXPGPATH]; + char sqlCommands[MAX_PATH_LEN] = {0}; + int tnRet = 0; + + errCode = GetRemoteConnInfo(remoteAddress, remoteReadConnInfo, MAXPGPATH); + if (errCode != REMOTE_READ_OK) { + return errCode; + } + conGet = RemoteReadGetConn(remoteReadConnInfo); + if (conGet == NULL) { + errCode = REMOTE_READ_CONN_ERROR; + return errCode; + } + + tnRet = snprintf_s(sqlCommands, MAX_PATH_LEN, MAX_PATH_LEN - 1, + "SELECT gs_read_file_size_from_remote(%u, %u, %u, %d, %d, '%lu', %d);", + key->relfilenode.spcNode, key->relfilenode.dbNode, key->relfilenode.relNode, key->relfilenode.bucketNode, + key->forknum, lsn, timeout); + securec_check_ss(tnRet, "", ""); + + res = PQexecParams(conGet, (const char*)sqlCommands, 0, NULL, NULL, NULL, NULL, 0); + if (PQresultStatus(res) != PGRES_TUPLES_OK) { + ereport(WARNING, (errmodule(MOD_REMOTE), + errmsg("could not get remote file size: %s", PQresultErrorMessage(res)))); + errCode = REMOTE_READ_RPC_ERROR; + PQclear(res); + res = NULL; + PQfinish(conGet); + conGet = NULL; + return errCode; + } + + if (PQgetisnull(res, 0, 0)) { + ereport(WARNING, (errmodule(MOD_REMOTE), + errmsg("remote get file size, remote request return null: %s", PQresultErrorMessage(res)))); + errCode = REMOTE_READ_RPC_ERROR; + PQclear(res); + res = NULL; + PQfinish(conGet); + conGet = NULL; + return errCode; + } + + *size = atou64(PQgetvalue(res, 0, 0)); + if (*size >= 0 && (*size % BLCKSZ != 0)) { + ereport(WARNING, (errmodule(MOD_REMOTE), + errmsg("remote get file size, size is %lu, remote request get incorrect length: %s", + *size, PQresultErrorMessage(res)))); + errCode = REMOTE_READ_SIZE_ERROR; + PQclear(res); + res = NULL; + PQfinish(conGet); + conGet = NULL; + return errCode; + } + + PQclear(res); + res = NULL; + PQfinish(conGet); + conGet = NULL; + + return errCode; +} + +const int CLOG_NODE = 1; +const int CSN_NODE = 2; +int RemoteGetFile(char* remoteAddress, RemoteReadFileKey* key, uint64 lsn, uint32 size, char* fileData, + XLogRecPtr *remote_lsn, uint32 *remote_size, int timeout) +{ + PGconn* conGet = NULL; + PGresult* res = NULL; + int errCode = REMOTE_READ_OK; + int32 len = 0; + char remoteReadConnInfo[MAXPGPATH]; + char sqlCommands[MAX_PATH_LEN] = {0}; + int tnRet = 0; + + errCode = GetRemoteConnInfo(remoteAddress, remoteReadConnInfo, MAXPGPATH); + if (errCode != REMOTE_READ_OK) { + return errCode; + } + conGet = RemoteReadGetConn(remoteReadConnInfo); + if (conGet == NULL) { + errCode = REMOTE_READ_RPC_ERROR; + return errCode; + } + + tnRet = snprintf_s(sqlCommands, MAX_PATH_LEN, MAX_PATH_LEN - 1, + "SELECT * from gs_read_file_from_remote(%u, %u, %u, %d, %d, %d, '%lu', %d);", + key->relfilenode.spcNode, key->relfilenode.dbNode, key->relfilenode.relNode, key->relfilenode.bucketNode, + key->forknum, key->blockstart, lsn, timeout); + securec_check_ss(tnRet, "", ""); + + res = PQexecParams(conGet, (const char*)sqlCommands, 0, NULL, NULL, NULL, NULL, 1); + + if (PQresultStatus(res) != PGRES_TUPLES_OK) { + ereport(WARNING, (errmodule(MOD_REMOTE), + errmsg("could not get remote file: %s", PQresultErrorMessage(res)))); + errCode = REMOTE_READ_RPC_ERROR; + PQclear(res); + res = NULL; + PQfinish(conGet); + conGet = NULL; + return errCode; + } + + if (PQgetisnull(res, 0, 0)) { + ereport(WARNING, (errmodule(MOD_REMOTE), + errmsg("remote get file, remote request return null: %s", PQresultErrorMessage(res)))); + errCode = REMOTE_READ_RPC_ERROR; + PQclear(res); + res = NULL; + PQfinish(conGet); + conGet = NULL; + return errCode; + } + + len = PQgetlength(res, 0, 0); + *remote_size = len; + + /* primary get the file from standby, the len need same with size obtained from the standby DN */ + if (!RecoveryInProgress() && len != (int)size && (key->relfilenode.spcNode != CLOG_NODE && + key->relfilenode.spcNode != CSN_NODE)) { + ereport(WARNING, (errmodule(MOD_REMOTE), + errmsg("remote request get incorrect length %u request size is %u : %s", len, size, + PQresultErrorMessage(res)))); + errCode = REMOTE_READ_SIZE_ERROR; + PQclear(res); + res = NULL; + PQfinish(conGet); + conGet = NULL; + return errCode; + } + + if (len < 0 || (uint32)len > MAX_BATCH_READ_BLOCKNUM * BLCKSZ) { + ereport(WARNING, (errmodule(MOD_REMOTE), + errmsg("remote request get incorrect length %u : %s", len, PQresultErrorMessage(res)))); + errCode = REMOTE_READ_SIZE_ERROR; + PQclear(res); + res = NULL; + PQfinish(conGet); + conGet = NULL; + return errCode; + } + + if (len != 0) { + int copylen = len <= (int)size ? len : size; + tnRet = memcpy_s(fileData, size, PQgetvalue(res, 0, 0), copylen); + securec_check(tnRet, "\0", "\0"); + } + + if (RecoveryInProgress()) { + tnRet = snprintf_s(sqlCommands, MAX_PATH_LEN, MAX_PATH_LEN - 1, + "SELECT * from gs_current_xlog_insert_end_location();"); + securec_check_ss(tnRet, "", ""); + + res = PQexecParams(conGet, (const char*)sqlCommands, 0, NULL, NULL, NULL, NULL, 0); + if (PQresultStatus(res) != PGRES_TUPLES_OK || PQgetisnull(res, 0, 0)) { + ereport(WARNING, (errmodule(MOD_REMOTE), + errmsg("could not get remote lsn or retrun null: %s", PQresultErrorMessage(res)))); + errCode = REMOTE_READ_RPC_ERROR; + PQclear(res); + res = NULL; + PQfinish(conGet); + conGet = NULL; + return errCode; + } + + uint32 hi = 0; + uint32 lo = 0; + /* get remote lsn location */ + if (sscanf_s(PQgetvalue(res, 0, 0), "%X/%X", &hi, &lo) != 2) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not parse log location \"%s\"", PQgetvalue(res, 0, 0)))); + + *remote_lsn = (((uint64)hi) << 32) | lo; + } + PQclear(res); + res = NULL; + PQfinish(conGet); + conGet = NULL; + + return errCode; +} diff --git a/src/gausskernel/cbb/instruments/ash/ash.cpp b/src/gausskernel/cbb/instruments/ash/ash.cpp index 18ffb49c4..b4e5e016f 100755 --- a/src/gausskernel/cbb/instruments/ash/ash.cpp +++ b/src/gausskernel/cbb/instruments/ash/ash.cpp @@ -1065,9 +1065,9 @@ static void ReloadInfo() t_thrd.ash_cxt.got_SIGHUP = false; ProcessConfigFile(PGC_SIGHUP); } - if (u_sess->sig_cxt.got_PoolReload) { + if (IsGotPoolReload()) { processPoolerReload(); - u_sess->sig_cxt.got_PoolReload = false; + ResetGotPoolReload(false); } } diff --git a/src/gausskernel/cbb/instruments/ash/wait_event_info.cpp b/src/gausskernel/cbb/instruments/ash/wait_event_info.cpp index ab1926fa0..58bb95fab 100644 --- a/src/gausskernel/cbb/instruments/ash/wait_event_info.cpp +++ b/src/gausskernel/cbb/instruments/ash/wait_event_info.cpp @@ -30,7 +30,7 @@ typedef struct EventInfo { char* type; char* event; } EventInfo; -#define WAIT_EVENT_SIZE 250 +#define WAIT_EVENT_SIZE 256 struct EventInfo waitEventInfo[WAIT_EVENT_SIZE] = { {"none", "STATUS", "none"}, {"LWLock", "STATUS", "acquire lwlock"}, @@ -52,6 +52,11 @@ struct EventInfo waitEventInfo[WAIT_EVENT_SIZE] = { {"Transaction", "STATUS", "wait data sync"}, {"Transaction", "STATUS", "wait data sync queue"}, {"COMM", "STATUS", "flush data"}, + {"Transaction", "STATUS", "wait reserve td"}, + {"Transaction", "STATUS", "wait td rollback"}, + {"Transaction", "STATUS", "wait transaction rollback"}, + {"Data file", "STATUS", "prune table"}, + {"Data file", "STATUS", "prune index"}, {"COMM", "STATUS", "stream get conn"}, {"COMM", "STATUS", "wait producer ready"}, {"Stream", "STATUS", "synchronize quit"}, @@ -284,7 +289,8 @@ struct EventInfo waitEventInfo[WAIT_EVENT_SIZE] = { {"Plugin", "LWLOCK_EVENT", "GeneralExtendedLock"}, {"plugin", "LWLOCK_EVENT", "extension"}, {"plugin", "LWLOCK_EVENT", "extension"}, - {"Transaction", "LWLOCK_EVENT", "TwoPhaseStatePartLock"} + {"Transaction", "LWLOCK_EVENT", "TwoPhaseStatePartLock"}, + {"Relation", "LWLOCK_EVENT", "NgroupDestoryLock"} }; Datum get_wait_event_info(PG_FUNCTION_ARGS) diff --git a/src/gausskernel/cbb/instruments/percentile/percentile.cpp b/src/gausskernel/cbb/instruments/percentile/percentile.cpp index 31450916f..04122c37d 100755 --- a/src/gausskernel/cbb/instruments/percentile/percentile.cpp +++ b/src/gausskernel/cbb/instruments/percentile/percentile.cpp @@ -207,9 +207,9 @@ NON_EXEC_STATIC void PercentileMain() pgstat_report_activity(STATE_IDLE, NULL); while (!t_thrd.percentile_cxt.need_exit) { - if (u_sess->sig_cxt.got_PoolReload) { + if (IsGotPoolReload()) { processPoolerReload(); - u_sess->sig_cxt.got_PoolReload = false; + ResetGotPoolReload(false); } if (t_thrd.percentile_cxt.got_SIGHUP) { t_thrd.percentile_cxt.got_SIGHUP = false; @@ -341,9 +341,9 @@ void PercentileSpace::SubPercentileMain(void) pgstat_report_activity(STATE_RUNNING, NULL); t_thrd.percentile_cxt.need_reset_timer = false; } - if (u_sess->sig_cxt.got_PoolReload) { + if (IsGotPoolReload()) { processPoolerReload(); - u_sess->sig_cxt.got_PoolReload = false; + ResetGotPoolReload(false); } if (t_thrd.percentile_cxt.got_SIGHUP) { t_thrd.percentile_cxt.got_SIGHUP = false; diff --git a/src/gausskernel/cbb/instruments/slow_query/gs_stat.cpp b/src/gausskernel/cbb/instruments/slow_query/gs_stat.cpp index 8d1ba3935..e61eee98d 100644 --- a/src/gausskernel/cbb/instruments/slow_query/gs_stat.cpp +++ b/src/gausskernel/cbb/instruments/slow_query/gs_stat.cpp @@ -170,6 +170,25 @@ void gs_stat_get_timeout_beentry(int timeout_threshold, Tuplestorestate* tupStor } } +/* + * Free PgBackendStatusNode and its underlying palloc-ed data structures, include st_appname, st_clienthostname + * st_conninfo and st_activity. We just have palloced these four variables in function 'gs_stat_encap_status_info'. + */ +void FreeBackendStatusNodeMemory(PgBackendStatusNode* node) +{ + while (node != NULL) { + if (node->data != NULL) { + pfree_ext(node->data->st_appname); + pfree_ext(node->data->st_clienthostname); + pfree_ext(node->data->st_conninfo); + pfree_ext(node->data->st_activity); + } + pfree_ext(node->data); + PgBackendStatusNode* tempNode = node; + node = node->next; + pfree_ext(tempNode); + } +} bool gs_stat_encap_status_info(PgBackendStatus* localentry, PgBackendStatus* beentry) { @@ -211,6 +230,7 @@ bool gs_stat_encap_status_info(PgBackendStatus* localentry, PgBackendStatus* bee securec_check(rc, "", ""); localentry->st_block_sessionid = beentry->st_block_sessionid; localentry->globalSessionId = beentry->globalSessionId; + localentry->st_unique_sql_key = beentry->st_unique_sql_key; } pgstat_save_changecount_after(beentry, after_changecount); diff --git a/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp b/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp index 6d9fd1b1f..ed9d1a371 100644 --- a/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp +++ b/src/gausskernel/cbb/instruments/statement/instr_handle_mgr.cpp @@ -68,6 +68,10 @@ void statement_init_metric_context() { StatementStatContext *reusedHandle = NULL; + /* won't assign handle when statement flush thread not started */ + if (g_instance.pid_cxt.StatementPID == 0) { + return; + } CHECK_STMT_TRACK_ENABLED(); /* create context under TopMemoryContext */ diff --git a/src/gausskernel/cbb/instruments/statement/instr_statement.cpp b/src/gausskernel/cbb/instruments/statement/instr_statement.cpp index d717ee9bf..89e2d8ab2 100755 --- a/src/gausskernel/cbb/instruments/statement/instr_statement.cpp +++ b/src/gausskernel/cbb/instruments/statement/instr_statement.cpp @@ -73,6 +73,7 @@ #define STATEMENT_DETAILS_HEAD_SIZE (1 + 1) /* [VERSION] + [TRUNCATED] */ #define INSTR_STMT_UNIX_DOMAIN_PORT (-1) +#define INSTR_STATEMENT_ATTRNUM 52 /* lock/lwlock's detail information */ typedef struct { @@ -206,9 +207,9 @@ static void ReloadInfo() ProcessConfigFile(PGC_SIGHUP); } - if (u_sess->sig_cxt.got_PoolReload) { + if (IsGotPoolReload()) { processPoolerReload(); - u_sess->sig_cxt.got_PoolReload = false; + ResetGotPoolReload(false); } } @@ -314,8 +315,8 @@ static HeapTuple GetStatementTuple(Relation rel, StatementStatContext* statement const knl_u_statement_context* statementCxt) { int i = 0; - Datum values[51]; - bool nulls[51] = {false}; + Datum values[INSTR_STATEMENT_ATTRNUM]; + bool nulls[INSTR_STATEMENT_ATTRNUM] = {false}; errno_t rc = memset_s(nulls, sizeof(nulls), 0, sizeof(nulls)); securec_check(rc, "\0", "\0"); @@ -373,6 +374,8 @@ static HeapTuple GetStatementTuple(Relation rel, StatementStatContext* statement values[i++] = BoolGetDatum( (statementInfo->finish_time - statementInfo->start_time >= statementInfo->slow_query_threshold && statementInfo->slow_query_threshold >= 0) ? true : false); + SET_TEXT_VALUES(statementInfo->trace_id, i++); + Assert(INSTR_STATEMENT_ATTRNUM == i); return heap_form_tuple(RelationGetDescr(rel), values, nulls); } @@ -1440,10 +1443,19 @@ void instr_stmt_report_debug_query_id(uint64 debug_query_id) CURRENT_STMT_METRIC_HANDLE->debug_query_id = debug_query_id; } +void instr_stmt_report_trace_id(char *trace_id) +{ + CHECK_STMT_HANDLE(); + errno_t rc = + memcpy_s(CURRENT_STMT_METRIC_HANDLE->trace_id, MAX_TRACE_ID_SIZE, trace_id, strlen(trace_id) + 1); + securec_check(rc, "\0", "\0"); +} + inline void instr_stmt_track_param_query(const char *query) { - if(query == NULL) + if (query == NULL) { return; + } if (CURRENT_STMT_METRIC_HANDLE->params == NULL) { CURRENT_STMT_METRIC_HANDLE->query = pstrdup(query); } else { diff --git a/src/gausskernel/cbb/instruments/unique_sql/instr_unique_sql.cpp b/src/gausskernel/cbb/instruments/unique_sql/instr_unique_sql.cpp index bf9a6832e..d1cb635a3 100755 --- a/src/gausskernel/cbb/instruments/unique_sql/instr_unique_sql.cpp +++ b/src/gausskernel/cbb/instruments/unique_sql/instr_unique_sql.cpp @@ -1691,6 +1691,12 @@ Datum get_instr_unique_sql(PG_FUNCTION_ARGS) } } +bool CheckSkipSQL(Query* query) +{ + return query->utilityStmt != NULL && (IsA(query->utilityStmt, ExplainStmt) || + (IsA(query->utilityStmt, RemoteQuery) && + (((RemoteQuery*)query->utilityStmt)->exec_direct_type != EXEC_DIRECT_NONE))); +} /* * GenerateUniqueSQLInfo - generate unique sql info * @@ -1710,8 +1716,8 @@ void GenerateUniqueSQLInfo(const char* sql, Query* query) * refer to the assert in method "relation_open" */ if (sql == NULL || query == NULL || g_instance.stat_cxt.UniqueSQLHashtbl == NULL || !is_local_unique_sql() || - IsAbortedTransactionBlockState() || - (query->utilityStmt != NULL && IsA(query->utilityStmt, ExplainStmt))) { + IsAbortedTransactionBlockState() || CheckSkipSQL(query) || + u_sess->unique_sql_cxt.skipUniqueSQLCount != 0) { return; } @@ -1759,6 +1765,7 @@ void GenerateUniqueSQLInfo(const char* sql, Query* query) UpdateUniqueSQLStat(query, current_sql, 0); instr_stmt_report_query(u_sess->unique_sql_cxt.unique_sql_id); + pgstat_report_unique_sql_id(false); /* if track top enabled, only TOP SQL will generate unique sql id */ if (IS_UNIQUE_SQL_TRACK_TOP) { @@ -1822,6 +1829,7 @@ static void SetLocalUniqueSQLId(List* query_list) } #endif instr_stmt_report_query(u_sess->unique_sql_cxt.unique_sql_id); + pgstat_report_unique_sql_id(false); /* dynamic enable statement tracking */ instr_stmt_dynamic_change_level(); @@ -2510,7 +2518,7 @@ static bool AutoRecycleUniqueSQLEntry() return false; } const double cleanRatio = 0.1; - int cleanCount = Max(int(cleanRatio * instr_unique_sql_count + totalCount - instr_unique_sql_count), 1); + int cleanCount = Max(int(cleanRatio * instr_unique_sql_count + (totalCount - instr_unique_sql_count)), 1); /* get remove entry list */ KeyUpdatedtime* removeList = GetSortedEntryList(); if (removeList == NULL) { @@ -2662,6 +2670,7 @@ void ResetCurrentUniqueSQL(bool need_reset_cn_id) #ifndef ENABLE_MULTIPLE_NODES u_sess->unique_sql_cxt.unique_sql_text = NULL; #endif + u_sess->unique_sql_cxt.skipUniqueSQLCount = 0; } void FindUniqueSQL(UniqueSQLKey key, char* unique_sql) diff --git a/src/gausskernel/cbb/instruments/wdr/generate_report.cpp b/src/gausskernel/cbb/instruments/wdr/generate_report.cpp index 8cae36ff4..9cd75eec6 100644 --- a/src/gausskernel/cbb/instruments/wdr/generate_report.cpp +++ b/src/gausskernel/cbb/instruments/wdr/generate_report.cpp @@ -46,6 +46,7 @@ const char* g_metric_name = "Metric"; const char* g_per_second_str = "Per Second"; const char* g_per_trx_str = "Per Transaction"; const char* g_per_exec_str = "Per Exec"; +const char* g_value_str = "Value"; const char* g_per_sec_io_rw = "Read + Write Per Sec"; const char* g_per_sec_io_r = "Read Per Sec"; @@ -78,6 +79,9 @@ typedef struct _report_params { int64 snap_diff_rollback_trx_count; /* diff commit trx */ List* Contents; /* structured data pool */ } report_params; + +static bool is_cluster_report(report_params* params); + /* * Declare function */ @@ -86,7 +90,7 @@ namespace GenReport { void add_data(dashboard* spec, List** Contents); /* Generate a report in Html format */ -char* GenerateHtmlReport(List* Contents); +char* GenerateHtmlReport(report_params* params); /* Generate index for h1 title */ void GenerateListByHtml(uint32 detailrow, List* Contents, StringInfoData& listHtml); @@ -127,6 +131,7 @@ void GetTimeModelData(report_params* params); void get_summary_database_stat(report_params* params); void get_summary_load_profile(report_params* params); } // namespace GenReport + static dashboard* CreateDash(void) { dashboard* dash = NULL; @@ -141,10 +146,10 @@ static dashboard* CreateDash(void) /* * generate the type of html report */ -char* GenReport::GenerateHtmlReport(List* Contents) +char* GenReport::GenerateHtmlReport(report_params* params) { StringInfoData result; - initStringInfo(&result); + /* * declare css for html */ @@ -171,10 +176,9 @@ char* GenReport::GenerateHtmlReport(List* Contents) "background:#F4F6F6; vertical-align:top;word-break: break-word; max-width: 300px;}\n" "td.wdrc {font:8pt Arial,Helvetica,Geneva,sans-serif;color:black;" "background:White; vertical-align:top;word-break: break-word; max-width: 300px;}\n" - "td.wdrtext {font:8pt Arial,Helvetica,Geneva,sans-serif;color:black;background:White;vertical-align:top;}" - ""; + "\n"; - const char* js = "\n" ""; - appendStringInfo(&result, "%s%s

Workload Diagnosis Report

", css, js); + + initStringInfo(&result); + appendStringInfoString(&result, "\n"); + + const char* specialStyle = result.data; + + initStringInfo(&result); + appendStringInfo(&result, "%s%s%s

Workload Diagnosis Report

", css, specialStyle, js); + pfree_ext(specialStyle); + for (uint32 i = 0; i < NUM_REPROT_PART; i++) - GenReport::GenerateSummaryHtml(g_reportpart[i], Contents, result); - GenReport::GenerateDetailHtml(Contents, result); + GenReport::GenerateSummaryHtml(g_reportpart[i], params->Contents, result); + GenReport::GenerateDetailHtml(params->Contents, result); appendStringInfoString(&result, "\n"); return result.data; } @@ -316,7 +331,7 @@ static void GetOsInfo(List** osInfoList, report_params* params, List** typeList) List* cpusList = NIL; appendStringInfo(&query, - "select 'CPUS', x.snap_value from (select * from pg_node_env) t," + "select 'CPUs', x.snap_value from (select * from pg_node_env) t," " (select * from snapshot.snap_global_os_runtime) x where x.snap_node_name = t.node_name and" " x.snapshot_id = %ld and (x.snap_name = 'NUM_CPUS');", params->end_snap_id); @@ -326,7 +341,7 @@ static void GetOsInfo(List** osInfoList, report_params* params, List** typeList) List* cpuCoresList = NIL; resetStringInfo(&query); appendStringInfo(&query, - "select 'CPU Cores', x.snap_value from (select * from pg_node_env) t," + "select 'Cores', x.snap_value from (select * from pg_node_env) t," " (select * from snapshot.snap_global_os_runtime) x where x.snap_node_name = t.node_name and" " x.snapshot_id = %ld and x.snap_name = 'NUM_CPU_CORES';", params->end_snap_id); @@ -336,7 +351,7 @@ static void GetOsInfo(List** osInfoList, report_params* params, List** typeList) List* cpuSocketsList = NIL; resetStringInfo(&query); appendStringInfo(&query, - "select 'CPU Sockets', x.snap_value from (select * from pg_node_env) t," + "select 'Sockets', x.snap_value from (select * from pg_node_env) t," " (select * from snapshot.snap_global_os_runtime) x where x.snap_node_name = t.node_name and" " x.snapshot_id = %ld and x.snap_name = 'NUM_CPU_SOCKETS';", params->end_snap_id); @@ -538,6 +553,26 @@ void GenReport::DescToHtml(List* descList, StringInfoData& descHtml) } appendStringInfo(&descHtml, "\n"); } +/* + * Turn the space in the table title into an underscore, + * and use the converted String for ID generation in HTML elements. + * This method calls pstrdup method so you need to use pfree to free memory. + * Input parameters: + * table -- the structed data + */ +static char* SpaceToUnderline(char* tableTitle) +{ + if (!tableTitle) { + return tableTitle; + } + char* resultStr = pstrdup(tableTitle); + for (unsigned i = 0; i < strlen(resultStr); i++) { + if (resultStr[i] == ' ') { + resultStr[i] = '_'; + } + } + return resultStr; +} /* * Convert one dashboard data to html format * Input parameters: @@ -545,6 +580,7 @@ void GenReport::DescToHtml(List* descList, StringInfoData& descHtml) */ void GenReport::dashboad_to_html(List* rowList, StringInfoData& dashboadHtml) { + char* prevTableTitle = NULL; foreach_cell(cell, rowList) { if (lfirst(cell) == NULL) { @@ -552,28 +588,41 @@ void GenReport::dashboad_to_html(List* rowList, StringInfoData& dashboadHtml) } dashboard* dash = (dashboard*)lfirst(cell); char* tableTitle = (char*)dash->tableTitle; - appendStringInfo(&dashboadHtml, "

-%s

\n", - tableTitle, - tableTitle, - tableTitle, - tableTitle); + char* htmlId = SpaceToUnderline(tableTitle); + if (prevTableTitle == NULL || strcmp(tableTitle, prevTableTitle) != 0) { + appendStringInfo(&dashboadHtml, "

-%s

\n", + htmlId, + htmlId, + htmlId, + tableTitle); - /* Convert the description corresponding to the table to HTML format */ - appendStringInfo(&dashboadHtml, - "
\n\n
\n
\n", - tableTitle, - tableTitle); + /* Convert the description corresponding to the table to HTML format */ + appendStringInfo(&dashboadHtml, + "
\n\n
\n
\n", + htmlId, + htmlId); + } else { + appendStringInfo(&dashboadHtml, "
"); + } + pfree_ext(htmlId); GenReport::DescToHtml(dash->desc, dashboadHtml); /* Convert the table data to HTML format (include the colname corresponding to the data) */ if (dash->table == NULL) { ereport(WARNING, (errcode(ERRCODE_DATA_EXCEPTION), errmsg("there is no available data in table"))); - continue; + } else { + GenReport::TableToHtml(dash, dashboadHtml); + } + prevTableTitle = tableTitle; + if (lnext(cell) != NULL) { + dashboard* dashNext = (dashboard*)lfirst(lnext(cell)); + if (strcmp(tableTitle, dashNext->tableTitle) == 0) { + continue; + } } - GenReport::TableToHtml(dash, dashboadHtml); appendStringInfoString(&dashboadHtml, "
\n

\n


\n" ""); } else if (j == 0 && COL_HAVE == HAVE_SQLTEXT) { appendStringInfo( - &tableHtml, "%s", cellData, cellData); + &tableHtml, "%s>%s", tableRowClz, cellData, cellData); appendStringInfoString(&tableHtml, ""); } else { if (*(Oid*)lfirst(typeCell) == NAME_TYPE || *(Oid*)lfirst(typeCell) == TEXT_TYPE) { @@ -1096,7 +1145,7 @@ static void SQLNodeTotalElapsedTime(report_params* params) /* SQL ordered by Total Elapsed Time */ appendStringInfo(&query, - "select /*+ hashjoin(c1 c2)*/t2.snap_unique_sql_id as \"Unique SQL Id\", t2.snap_user_name as \"User Name\"," + "select t2.snap_unique_sql_id as \"Unique SQL Id\", t2.snap_user_name as \"User Name\"," " (t2.snap_total_elapse_time - coalesce(t1.snap_total_elapse_time, 0)) as \"Total Elapse Time(us)\", " " (t2.snap_n_calls - coalesce(t1.snap_n_calls, 0)) as \"Calls\", " " round(\"Total Elapse Time(us)\"/greatest(\"Calls\", 1), 0) as \"Avg Elapse Time(us)\", " @@ -1123,9 +1172,9 @@ static void SQLNodeTotalElapsedTime(report_params* params) " (t2.snap_hash_spill_count - coalesce(t1.snap_hash_spill_count, 0)) as \"Hash Spill Count\", " " (t2.snap_hash_spill_size - coalesce(t1.snap_hash_spill_size, 0)) as \"Hash Spill Size(KB)\", " " LEFT(t2.snap_query, 25) as \"SQL Text\" " - " from (select * from snapshot.snap_summary_statement as c1 where snapshot_id = %ld and snap_node_name = '%s') t1" + " from (select * from snapshot.snap_summary_statement where snapshot_id = %ld and snap_node_name = '%s') t1" " right join " - " (select * from snapshot.snap_summary_statement as c2 where snapshot_id = %ld and snap_node_name = '%s') t2" + " (select * from snapshot.snap_summary_statement where snapshot_id = %ld and snap_node_name = '%s') t2" " on t1.snap_unique_sql_id = t2.snap_unique_sql_id and t1.snap_user_id = t2.snap_user_id order by \"Total " "Elapse Time(us)\" desc limit 200;", params->begin_snap_id, @@ -2116,7 +2165,7 @@ static void GlobalTableIndex(report_params* params) "WHERE i.snapshot_id = %ld) AS snap_1" " ON (snap_2.snap_relid = snap_1.snap_relid AND snap_2.snap_indexrelid = snap_1.snap_indexrelid AND " " snap_2.db_name = snap_1.db_name AND snap_2.snap_schemaname = snap_1.snap_schemaname) " - " order by snap_2.db_name, snap_2.snap_schemaname limit 200;", + " order by \"Index Tuple Read\" limit 200;", params->end_snap_id, params->end_snap_id, params->begin_snap_id, @@ -2179,50 +2228,6 @@ static void GetObjectStatData(report_params* params) } } -/* - * BackgroundWriter Stat will be reset when the stat_reset is updated - */ -static void BackgroundWriterStat(report_params* params) -{ - dashboard* dash = CreateDash(); - char* desc = NULL; - StringInfoData query; - initStringInfo(&query); - appendStringInfo(&query, - "select (snap_2.snap_checkpoints_timed - coalesce(snap_1.snap_checkpoints_timed, 0)) " - "AS \"Checkpoints Timed\"," - " (snap_2.snap_checkpoints_req - coalesce(snap_1.snap_checkpoints_req, 0)) AS \"Checkpoints Require\"," - " (snap_2.snap_checkpoint_write_time - coalesce(snap_1.snap_checkpoint_write_time, 0))" - " AS \"Checkpoint Write Time(ms)\"," - " (snap_2.snap_checkpoint_sync_time - coalesce(snap_1.snap_checkpoint_sync_time, 0))" - " AS \"Checkpoint Sync Time(ms)\"," - " (snap_2.snap_buffers_checkpoint - coalesce(snap_1.snap_buffers_checkpoint, 0)) AS \"Buffers Checkpoint\"," - " (snap_2.snap_buffers_clean - coalesce(snap_1.snap_buffers_clean, 0)) AS \"Buffers Clean\"," - " (snap_2.snap_maxwritten_clean - coalesce(snap_1.snap_maxwritten_clean, 0)) AS \"Maxwritten Clean\"," - " (snap_2.snap_buffers_backend - coalesce(snap_1.snap_buffers_backend, 0)) AS \"Buffers Backend\"," - " (snap_2.snap_buffers_backend_fsync - coalesce(snap_1.snap_buffers_backend_fsync, 0))" - " AS \"Buffers Backend Fsync\"," - " (snap_2.snap_buffers_alloc - coalesce(snap_1.snap_buffers_alloc, 0)) AS \"Buffers Alloc\"," - " to_char(snap_2.snap_stats_reset, 'YYYY-MM-DD HH24:MI:SS') AS \"Stats Reset\" from" - " (select * from snapshot.snap_global_bgwriter_stat where snapshot_id = %ld and snap_node_name = '%s') snap_2 " - " LEFT JOIN (select * from snapshot.snap_global_bgwriter_stat where " - " snapshot_id = %ld and snap_node_name = '%s') snap_1 on snap_2.snapshot_id = snap_1.snapshot_id and" - " snap_2.snap_node_name = snap_1.snap_node_name and snap_2.snap_stats_reset = snap_1.snap_stats_reset" - " limit 200;", - params->end_snap_id, - params->report_node, - params->begin_snap_id, - params->report_node); - - GenReport::get_query_data(query.data, true, &dash->table, &dash->type); - dash->dashTitle = "Utility status"; - dash->tableTitle = "Background writer stat"; - desc = "The information of background writer statistics"; - dash->desc = lappend(dash->desc, desc); - GenReport::add_data(dash, ¶ms->Contents); - - pfree_ext(query.data); -} static void ReplicationStat(report_params* params) { @@ -2263,9 +2268,6 @@ static void GetUtilityStatus(report_params* params) StringInfoData query; initStringInfo(&query); - /* background writer stat */ - BackgroundWriterStat(params); - /* replication slot */ appendStringInfo(&query, "SELECT snap_slot_name as \"Slot Name\", snap_slot_type as \"Slot Type\"," @@ -2362,7 +2364,9 @@ static void GetClusterSqlDetailData(report_params* params) initStringInfo(&query); appendStringInfo(&query, - "select (t2.snap_unique_sql_id) as \"Unique SQL Id\", (t2.snap_query) as \"SQL Text\" " + "select (t2.snap_unique_sql_id) as \"Unique SQL Id\", " + "(t2.snap_node_name) as \"Node Name\", (t2.snap_user_name) as \"User Name\", " + "(t2.snap_query) as \"SQL Text\" " "from snapshot.snap_summary_statement t2 where snapshot_id = %ld ", params->end_snap_id); char* uniqueIDStr = GetUniqueIDStr(params->Contents); @@ -2388,7 +2392,9 @@ static void GetNodeSqlDetailData(report_params* params) initStringInfo(&query); appendStringInfo(&query, - "select (t2.snap_unique_sql_id) as \"Unique SQL Id\", (t2.snap_query) as \"SQL Text\" " + "select (t2.snap_unique_sql_id) as \"Unique SQL Id\", " + "(t2.snap_user_name) as \"User Name\", " + "(t2.snap_query) as \"SQL Text\" " " from snapshot.snap_summary_statement t2 where snapshot_id = %ld " "and snap_node_name = '%s' ", params->end_snap_id, @@ -2436,8 +2442,7 @@ void GenReport::GetTimeModelData(report_params* params) /* Time Model order by value */ appendStringInfo(&query, - "select t2.snap_node_name as \"Node Name\"," - " t2.snap_stat_name as \"Stat Name\", (t2.snap_value - coalesce(t1.snap_value, 0)) as \"Value(us)\" " + "select t2.snap_stat_name as \"Stat Name\", (t2.snap_value - coalesce(t1.snap_value, 0)) as \"Value(us)\" " " from (select * from snapshot.snap_global_instance_time where snapshot_id = %ld and snap_node_name = '%s') t1" " right join (select * from snapshot.snap_global_instance_time where snapshot_id = %ld and" " snap_node_name = '%s') t2 on t1.snap_stat_name = t2.snap_stat_name order by \"Value(us)\" desc limit 200;", @@ -2447,7 +2452,7 @@ void GenReport::GetTimeModelData(report_params* params) params->report_node); GenReport::get_query_data(query.data, true, &dash->table, &dash->type); dash->dashTitle = "Time Model"; - dash->tableTitle = "Time model(node)"; + dash->tableTitle = "Time model"; desc = "time model order by value in node"; dash->desc = lappend(dash->desc, desc); GenReport::add_data(dash, ¶ms->Contents); @@ -2979,13 +2984,15 @@ void GenReport::get_summary_database_stat(report_params* params) " (snap_2.snap_blk_write_time - coalesce(snap_1.snap_blk_write_time, 0)) as \"Blk Write Time\", " " to_char(snap_2.snap_stats_reset, 'YYYY-MM-DD HH24:MI:SS') AS \"Stats Reset\" "); appendStringInfo(&query, - "from (select * from snapshot.snap_summary_stat_database where snapshot_id = %ld) snap_2 ", + "from (select * from snapshot.snap_summary_stat_database where snapshot_id = %ld " + "and snap_datname != 'template0' and snap_datname != 'template1') snap_2 ", params->end_snap_id); appendStringInfo(&query, "left join (select * from snapshot.snap_summary_stat_database " "where snapshot_id = %ld) snap_1 ", params->begin_snap_id); - appendStringInfo(&query, "on snap_1.snap_datname = snap_2.snap_datname order by snap_2.snap_datname;"); + appendStringInfo(&query, "on snap_1.snap_datname = snap_2.snap_datname order by " + "\"Xact Commit\" desc;"); GenReport::get_query_data(query.data, true, &dash->table, &dash->type); pfree(query.data); @@ -3033,7 +3040,7 @@ static void get_summary_load_profile_db_cpu_time(report_params* params, dashboar initStringInfo(&query); // diff (snap_2 db cpu and snap_1 db cpu) - appendStringInfo(&query, "select 'CPU Time(microseconds)' as \"%s\", ", g_metric_name); + appendStringInfo(&query, "select 'CPU Time(us)' as \"%s\", ", g_metric_name); appendStringInfo(&query, "((snap_2.cpu_time - snap_1.cpu_time) / (%ld))::int8 as \"%s\", ", get_report_snap_gap(params), @@ -3073,7 +3080,7 @@ static void get_summary_load_profile_db_time(report_params* params, dashboard* d initStringInfo(&query); // diff (snap_2 db time and snap_1 db time) - appendStringInfo(&query, "select 'DB Time(microseconds)' as \"%s\", ", g_metric_name); + appendStringInfo(&query, "select 'DB Time(us)' as \"%s\", ", g_metric_name); appendStringInfo(&query, "((snap_2.db_time - snap_1.db_time) / (%ld))::int8 as \"%s\", ", get_report_snap_gap(params), @@ -3244,7 +3251,7 @@ static void get_summary_load_profile_logins(report_params* params, dashboard* da initStringInfo(&query); appendStringInfo(&query, - "select 'Logons' as \"%s\", " + "select 'Logins' as \"%s\", " " round((snap_2.login_counter - snap_1.login_counter) / %ld)" " as \"%s\", " " round((snap_2.login_counter - snap_1.login_counter) / %ld)" @@ -3348,19 +3355,12 @@ static void get_summary_load_profile_sql_resp_time(report_params* params, dashbo initStringInfo(&query); appendStringInfo(&query, "select " - "unnest(array['SQL response time P95', 'SQL response time P80']) as \"%s\", " - "round(unnest(array[snap_P95, snap_P80]) / %ld) as \"%s\", " - "round(unnest(array[snap_P95, snap_P80]) / %ld) as \"%s\", " - "round(unnest(array[snap_P95, snap_P80]) / %lu) as \"%s\" " + "unnest(array['SQL response time P95(us)', 'SQL response time P80(us)']) as \"%s\", " + "round(unnest(array[snap_P95, snap_P80])) as \"%s\" " "from " "snapshot.snap_statement_responsetime_percentile where snapshot_id = %ld", g_metric_name, - get_report_snap_gap(params), - g_per_second_str, - get_report_snap_diff_trx_count(params), - g_per_trx_str, - get_report_snap_diff_sql_count(params), - g_per_exec_str, + g_value_str, params->end_snap_id); GenReport::get_query_data(query.data, !list_length(dash->table), &query_result, &dash->type); @@ -3416,7 +3416,11 @@ static void get_summary_load_profile_part(report_params* params, dashboard* dash if (update_report_snap_gap_param(params, "snap_summary_workload_transaction")) { get_summary_load_profile_trx(params, dash); } +} +/* Used to store tables with only one column of values */ +static void get_summary_load_profile_part_single_value(report_params* params, dashboard* dash) +{ /* SQL response time P90/P85 */ if (update_report_snap_gap_param(params, "snap_statement_responsetime_percentile")) { get_summary_load_profile_sql_resp_time(params, dash); @@ -3461,6 +3465,14 @@ void GenReport::get_summary_load_profile(report_params* params) params->snap_diff_sql_count = snap_diff_sql_count; get_summary_load_profile_part(params, dash); GenReport::add_data(dash, ¶ms->Contents); + + dash = CreateDash(); + desc = "SQL response time P80/P95"; + dash->dashTitle = "Summary"; + dash->tableTitle = "Load Profile"; + dash->desc = lappend(dash->desc, (void*)desc); + get_summary_load_profile_part_single_value(params, dash); + GenReport::add_data(dash, ¶ms->Contents); } #ifdef ENABLE_MULTIPLE_NODES @@ -3636,9 +3648,9 @@ static void get_summary_top10event_waitevent(report_params* params) appendStringInfo(&query, "select snap_event as \"Event\", snap_wait as \"Waits\", " - " snap_total_wait_time as \"Total Wait Times(us)\", " - " round(snap_total_wait_time/snap_wait) as \"Wait Avg(us)\", " - " snap_type as \"Wait Class\" " + " snap_total_wait_time as \"Total Wait Time(us)\", " + " round(snap_total_wait_time/snap_wait) as \"Avg Wait Time(us)\", " + " snap_type as \"Type\" " "from (" " select snap_2.snap_event as snap_event, snap_2.snap_type snap_type, " " snap_2.snap_wait - snap_1.snap_wait as snap_wait, " @@ -3696,12 +3708,12 @@ static void get_summary_wait_classes(report_params* params) appendStringInfo(&query, "select " - " snap_2.type as \"Wait Class\", " + " snap_2.type as \"Type\", " " (snap_2.wait - snap_1.wait) as \"Waits\", " " (snap_2.total_wait_time - snap_1.total_wait_time) " " as \"Total Wait Time(us)\", " " round((snap_2.total_wait_time - snap_1.total_wait_time) / " - " greatest((snap_2.wait - snap_1.wait), 1)) as \"Wait Avg(us)\" " + " greatest((snap_2.wait - snap_1.wait), 1)) as \"Avg Wait Time(us)\" " "from " " (select " " snap_type as type, " @@ -3732,7 +3744,7 @@ static void AppendQueryOne(StringInfoData& query) { appendStringInfo(&query, "select " - " snap_2.cpus as \"Cpus\", " + " snap_2.cpus as \"CPUs\", " " snap_2.cores as \"Cores\", " " snap_2.sockets as \"Sockets\", " " snap_1.load as \"Load Average Begin\", " @@ -4406,7 +4418,7 @@ Datum generate_wdr_report(PG_FUNCTION_ARGS) GenReport::get_report_data(¶ms); MemoryContext spi_context = MemoryContextSwitchTo(old_context); - char* result_str = GenReport::GenerateHtmlReport(params.Contents); + char* result_str = GenReport::GenerateHtmlReport(¶ms); (void)MemoryContextSwitchTo(spi_context); SPI_STACK_LOG("finish", NULL, NULL); (void)SPI_finish(); diff --git a/src/gausskernel/cbb/instruments/wdr/snapshot.cpp b/src/gausskernel/cbb/instruments/wdr/snapshot.cpp index 44bc40e2c..918da9291 100755 --- a/src/gausskernel/cbb/instruments/wdr/snapshot.cpp +++ b/src/gausskernel/cbb/instruments/wdr/snapshot.cpp @@ -90,7 +90,7 @@ void init_curr_snapid(void); void CreateTable(const char** views, int numViews, bool ismultidbtable); void InitTables(void); void CreateSnapStatTables(void); -void CreateIndexes(void); +void CreateIndexes(const char* views); void CreateSequence(void); void UpdateSnapEndTime(uint64 curr_snapid); void GetQueryStr(StringInfoData& query, const char* viewname, uint64 curr_snapid, const char* dbname); @@ -390,9 +390,9 @@ static void ReloadInfo() t_thrd.perf_snap_cxt.got_SIGHUP = false; ProcessConfigFile(PGC_SIGHUP); } - if (u_sess->sig_cxt.got_PoolReload) { + if (IsGotPoolReload()) { processPoolerReload(); - u_sess->sig_cxt.got_PoolReload = false; + ResetGotPoolReload(false); } } @@ -1041,6 +1041,20 @@ void SnapshotNameSpace::CreateSnapStatTables(void) CreateStatTable(createSnapshot, tablename2); } +static void DropIndexes(const char* indexName) +{ + StringInfoData query; + initStringInfo(&query); + appendStringInfo(&query, "drop index IF EXISTS snapshot.%s", indexName); + if (!SnapshotNameSpace::ExecuteQuery(query.data, SPI_OK_UTILITY)) { + pfree_ext(query.data); + ereport(ERROR, (errmodule(MOD_WDR_SNAPSHOT), errcode(ERRCODE_DATA_EXCEPTION), + errmsg("create index failed"), errdetail("drop index snapshot.%s execute error", indexName), + errcause("System error."), erraction("Check whether the query can be executed"))); + } + pfree_ext(query.data); +} + void SnapshotNameSpace::InitTables() { SnapshotNameSpace::CreateSnapStatTables(); @@ -1053,7 +1067,10 @@ void SnapshotNameSpace::InitTables() SnapshotNameSpace::CreateTable(lastDbRelatedViews, numViews, false); numViews = COUNT_ARRAY_SIZE(lastStatViews); SnapshotNameSpace::CreateTable(lastStatViews, numViews, true); - SnapshotNameSpace::CreateIndexes(); + DropIndexes("snap_summary_statio_indexes_name"); + DropIndexes("snap_summary_statio_tables_name"); + DropIndexes("snap_summary_stat_indexes_name"); + DropIndexes("snap_class_info_name"); #if ((defined(ENABLE_MULTIPLE_NODES)) || (defined(ENABLE_PRIVATEGAUSS))) SnapshotView(); #endif @@ -1094,6 +1111,8 @@ void SnapshotNameSpace::CreateTable(const char** views, int numViews, bool isSha } pfree(snapColAttrType); } + /* create index on snapshot table */ + SnapshotNameSpace::CreateIndexes(views[i]); } pfree_ext(query.data); } @@ -1260,78 +1279,31 @@ void SnapshotNameSpace::InsertDatabaseData(const char* dbname, uint64 curr_snapi pfree_ext(sql.data); } -static bool IsNeedCreateIndex(const char* indexName) -{ - Datum colval; - bool isNull = false; - StringInfoData query; - - initStringInfo(&query); - /* check the index which is existing or not */ - appendStringInfo(&query, "select count(*) from pg_class where relname = '%s' and relkind = 'i'", indexName); - colval = GetDatumValue(query.data, 0, 0, &isNull); - if (DatumGetInt32(colval)) { - return false; - } - pfree_ext(query.data); - return true; -} /* In order to accelerate query for awr report, the index of some tables need to create The index is created immediately after whose table has existed at the start phase */ -void SnapshotNameSpace::CreateIndexes(void) +void SnapshotNameSpace::CreateIndexes(const char* views) { + bool isnull = false; StringInfoData query; initStringInfo(&query); - - /* snap_summary_statio_all_indexes */ - if (IsNeedCreateIndex("snap_summary_statio_indexes_name")) { - appendStringInfo(&query, "create index snap_summary_statio_indexes_name on" - " snapshot.snap_summary_statio_all_indexes(db_name, snap_schemaname, snap_relname, snap_indexrelname);"); - - if (!SnapshotNameSpace::ExecuteQuery(query.data, SPI_OK_UTILITY)) { - ereport(ERROR, (errmodule(MOD_WDR_SNAPSHOT), errcode(ERRCODE_DATA_EXCEPTION), - errmsg("create index failed"), errdetail("query(%s) execute error", query.data), - errcause("System error."), erraction("Check whether the query can be executed"))); - } - } - - /* snap_summary_statio_all_tables */ - if (IsNeedCreateIndex("snap_summary_statio_tables_name")) { + appendStringInfo(&query, + "select count(*) from pg_indexes where schemaname = 'snapshot' and " + "tablename = 'snap_%s' and indexname = 'snap_%s_idx'", + views, views); + Datum indexNum = GetDatumValue(query.data, 0, 0, &isnull); + if (!DatumGetInt32(indexNum)) { resetStringInfo(&query); - appendStringInfo(&query, "create index snap_summary_statio_tables_name on" - " snapshot.snap_summary_statio_all_tables(db_name, snap_schemaname, snap_relname);"); - + appendStringInfo(&query, "create index snapshot.snap_%s_idx on snapshot.snap_%s(snapshot_id)", + views, views); if (!SnapshotNameSpace::ExecuteQuery(query.data, SPI_OK_UTILITY)) { + pfree_ext(query.data); ereport(ERROR, (errmodule(MOD_WDR_SNAPSHOT), errcode(ERRCODE_DATA_EXCEPTION), - errmsg("create index failed"), errdetail("query(%s) execute failed", query.data), - errcause("System error."), erraction("Check whether the query can be executed"))); - } - } - - /* snap_summary_stat_all_indexes */ - if (IsNeedCreateIndex("snap_summary_stat_indexes_name")) { - resetStringInfo(&query); - appendStringInfo(&query, "create index snap_summary_stat_indexes_name on" - " snapshot.snap_summary_stat_all_indexes(db_name, snap_schemaname, snap_relname, snap_indexrelname);"); - - if (!SnapshotNameSpace::ExecuteQuery(query.data, SPI_OK_UTILITY)) { - ereport(ERROR, (errmodule(MOD_WDR_SNAPSHOT), errcode(ERRCODE_DATA_EXCEPTION), - errmsg("create index failed"), errdetail("query(%s) execute failed", query.data), - errcause("System error."), erraction("Check whether the query can be executed"))); - } - } - - /* snap_class_vital_info */ - if (IsNeedCreateIndex("snap_class_info_name")) { - resetStringInfo(&query); - appendStringInfo(&query, "create index snap_class_info_name on" - " snapshot.snap_class_vital_info(db_name, snap_schemaname, snap_relname);"); - if (!SnapshotNameSpace::ExecuteQuery(query.data, SPI_OK_UTILITY)) { - ereport(ERROR, (errmodule(MOD_WDR_SNAPSHOT), errcode(ERRCODE_DATA_EXCEPTION), - errmsg("create index failed"), errdetail("query(%s) execute failed", query.data), - errcause("System error."), erraction("Check whether the query can be executed"))); + errmsg("create WDR snapshot index failed"), + errdetail("create index snapshot.snap_%s_idx execute error", views), + errcause("System error."), + erraction("Check whether the query can be executed"))); } } pfree_ext(query.data); @@ -1602,6 +1574,9 @@ void SnapshotNameSpace::SubSnapshotMain(void) SnapshotNameSpace::take_snapshot(); PopActiveSnapshot(); finish_xact_command(); + if (OidIsValid(u_sess->proc_cxt.MyDatabaseId)) { + pgstat_report_stat(true); + } ereport(LOG, (errcode(ERRCODE_SUCCESSFUL_COMPLETION), errmsg("WDR snapshot end"))); /* a snapshot has token on next_timestamp, we need get next_timestamp */ diff --git a/src/gausskernel/cbb/utils/aes/cipherfn.cpp b/src/gausskernel/cbb/utils/aes/cipherfn.cpp index ea6cc51bf..99b6d3721 100644 --- a/src/gausskernel/cbb/utils/aes/cipherfn.cpp +++ b/src/gausskernel/cbb/utils/aes/cipherfn.cpp @@ -35,7 +35,9 @@ #include "getopt_long.h" #include "pgxc/pgxc.h" #include "gaussdb_version.h" +#ifdef ENABLE_GSS #include "gssapi/gssapi_krb5.h" +#endif /* ENABLE_GSS */ #include "tde_key_management/tde_key_manager.h" @@ -47,6 +49,12 @@ #ifdef ENABLE_UT #define static + +/* ugly hook for tde().get_key, it should be mocked in ut testcase */ +const char *fake_tde_get_key(const char *str) +{ + return str; +} #endif const char* pgname = "gs_encrypt"; @@ -796,7 +804,7 @@ bool gs_decrypt_aes_speed( } } -static inline const char* GetCipherPrefix(KeyMode mode) +static inline const char* GetCipherPrefix(int mode) { switch (mode) { case SOURCE_MODE: @@ -805,6 +813,8 @@ static inline const char* GetCipherPrefix(KeyMode mode) return "usermapping"; case SUBSCRIPTION_MODE: return "subscription"; + case HADR_MODE: + return "hadr"; default: ereport(ERROR, (errmsg("unknown key mode: %d", mode))); return NULL; @@ -820,13 +830,12 @@ static inline const char* GetCipherPrefix(KeyMode mode) */ static GS_UCHAR* getECKeyString(KeyMode mode) { - Assert(mode == SOURCE_MODE || mode == USER_MAPPING_MODE || mode == SUBSCRIPTION_MODE); + Assert(mode == SOURCE_MODE || mode == USER_MAPPING_MODE || mode == SUBSCRIPTION_MODE || mode == HADR_MODE); GS_UCHAR* plainkey = NULL; char* gshome = NULL; char cipherdir[MAXPGPATH] = {0}; char cipherfile[MAXPGPATH] = {0}; const char *cipherPrefix = GetCipherPrefix(mode); - int ret = 0; /* @@ -884,7 +893,7 @@ static GS_UCHAR* getECKeyString(KeyMode mode) * @IN mode: key mode * @RETURN: void */ -void encryptECString(char* src_plain_text, char* dest_cipher_text, uint32 dest_cipher_length, KeyMode mode) +void encryptECString(char* src_plain_text, char* dest_cipher_text, uint32 dest_cipher_length, int mode) { GS_UINT32 ciphertextlen = 0; GS_UCHAR ciphertext[1024]; @@ -897,7 +906,7 @@ void encryptECString(char* src_plain_text, char* dest_cipher_text, uint32 dest_c } /* First, get encrypt key */ - cipherkey = getECKeyString(mode); + cipherkey = getECKeyString((KeyMode)mode); /* Clear cipher buffer which will be used later */ ret = memset_s(ciphertext, sizeof(ciphertext), 0, sizeof(ciphertext)); @@ -984,7 +993,7 @@ void encryptECString(char* src_plain_text, char* dest_cipher_text, uint32 dest_c * @RETURN: bool, true if encrypt success, false if not */ bool decryptECString(const char* src_cipher_text, char* dest_plain_text, - uint32 dest_plain_length, KeyMode mode) + uint32 dest_plain_length, int mode) { GS_UCHAR* ciphertext = NULL; GS_UINT32 ciphertextlen = 0; @@ -993,15 +1002,18 @@ bool decryptECString(const char* src_cipher_text, char* dest_plain_text, GS_UCHAR* cipherkey = NULL; errno_t ret = EOK; - if (NULL == src_cipher_text || !IsECEncryptedString(src_cipher_text)) { + if (NULL == src_cipher_text || (!IsECEncryptedString(src_cipher_text) && mode != HADR_MODE)) { return false; } /* Get key string */ - cipherkey = getECKeyString(mode); + cipherkey = getECKeyString((KeyMode)mode); /* Step-1: Decode */ - ciphertext = (GS_UCHAR*)(SEC_decodeBase64((char*)(src_cipher_text + strlen(EC_ENCRYPT_PREFIX)), &ciphertextlen)); + if (mode != HADR_MODE) { + src_cipher_text += strlen(EC_ENCRYPT_PREFIX); + } + ciphertext = (GS_UCHAR*)(SEC_decodeBase64(src_cipher_text, &ciphertextlen)); plaintext = (GS_UCHAR*)palloc(ciphertextlen); ret = memset_s(plaintext, ciphertextlen, 0, ciphertextlen); securec_check(ret, "\0", "\0"); @@ -1086,7 +1098,7 @@ bool IsECEncryptedString(const char* src_cipher_text) * @IN src_options: source options to be encrypted * @RETURN: void */ -void EncryptGenericOptions(List* options, const char** sensitiveOptionsArray, int arrayLength, KeyMode mode) +void EncryptGenericOptions(List* options, const char** sensitiveOptionsArray, int arrayLength, int mode) { int i; char* srcString = NULL; @@ -1158,7 +1170,7 @@ void EncryptGenericOptions(List* options, const char** sensitiveOptionsArray, in * @IN mode: key mode * @RETURN: void */ -void DecryptOptions(List *options, const char** sensitiveOptionsArray, int arrayLength, KeyMode mode) +void DecryptOptions(List *options, const char** sensitiveOptionsArray, int arrayLength, int mode) { if (options == NULL) { return; @@ -1359,8 +1371,8 @@ void encryptBlockOrCUData(const char* plainText, const size_t plainLength, tde_key_manager->init(); plain_key = tde_key_manager->get_key(tdeInfo->cmk_id, tdeInfo->dek_cipher); #else - /* ugly hook for TDEKeyManager->get_key() function, gs_strdup should be mocked in ut */ - plain_key = (const char*)gs_strdup("0123456789abcdef0123456789abcdef"); + /* ugly hook for TDEKeyManager->get_key() function, fake_tde_get_key should be mocked in ut */ + plain_key = fake_tde_get_key(plain_key); #endif Assert(strlen(plain_key) == KEY_128BIT_LEN * 2); @@ -1416,8 +1428,8 @@ void decryptBlockOrCUData(const char* cipherText, const size_t cipherLength, tde_key_manager->init(); plain_key = tde_key_manager->get_key(tdeInfo->cmk_id, tdeInfo->dek_cipher); #else - /* ugly hook for TDEKeyManager->get_key() function, gs_strdup should be mocked in ut */ - plain_key = (const char*)gs_strdup("0123456789abcdef0123456789abcdef"); + /* ugly hook for TDEKeyManager->get_key() function, fake_tde_get_key should be mocked in ut */ + plain_key = fake_tde_get_key(plain_key); #endif Assert(strlen(plain_key) == KEY_128BIT_LEN * 2); diff --git a/src/gausskernel/cbb/utils/gssignal/gs_signal.cpp b/src/gausskernel/cbb/utils/gssignal/gs_signal.cpp index 0b5e7ad19..aebff1ef3 100644 --- a/src/gausskernel/cbb/utils/gssignal/gs_signal.cpp +++ b/src/gausskernel/cbb/utils/gssignal/gs_signal.cpp @@ -52,6 +52,11 @@ #define RES_SIGNAL SIGUSR2 #define FUNC_NAME_LEN 32 +#ifdef ENABLE_UT +#define STATIC +#else +#define STATIC static +#endif extern volatile ThreadId PostmasterPid; extern bool IsPostmasterEnvironment; @@ -69,7 +74,7 @@ static gs_sigfunc gs_signal_register_handler(GsSignal* gs_signal, int signo, gs_ static int gs_signal_thread_kill(ThreadId tid, int signo); static GsSignalSlot* gs_signal_alloc_slot_for_new_thread(char* name, ThreadId thread_id); static void gs_res_signal_handler(int signo, siginfo_t* siginfo, void* context); -static gs_sigaction_func gs_signal_install_handler(void); +STATIC gs_sigaction_func gs_signal_install_handler(void); static GsSignalSlot* gs_signal_find_slot(ThreadId thread_id); static void gs_signal_reset_signal(GsSignal* gssignal); @@ -955,7 +960,7 @@ void gs_signal_startup_siginfo(char* thread_name) * Description : * Notes : */ -static gs_sigaction_func gs_signal_install_handler(void) +STATIC gs_sigaction_func gs_signal_install_handler(void) { struct sigaction act, oact; diff --git a/src/gausskernel/cbb/utils/partition/partitionkey.cpp b/src/gausskernel/cbb/utils/partition/partitionkey.cpp index 4d1acb7bb..022d6ad97 100644 --- a/src/gausskernel/cbb/utils/partition/partitionkey.cpp +++ b/src/gausskernel/cbb/utils/partition/partitionkey.cpp @@ -29,8 +29,11 @@ #include "catalog/pg_partition_fn.h" #include "catalog/pg_type.h" +#include "commands/tablecmds.h" +#include "nodes/makefuncs.h" #include "nodes/parsenodes.h" #include "nodes/value.h" +#include "parser/parse_utilcmd.h" #include "utils/array.h" #include "utils/lsyscache.h" #include "utils/builtins.h" @@ -49,6 +52,10 @@ */ #define constIsMaxValue(value) ((value)->ismaxvalue) +static Oid GetPartitionOidFromPartitionKeyValuesList(Relation rel, List *partitionKeyValuesList, ParseState *pstate, + RangeTblEntry *rte); +static void CheckPartitionValuesList(Relation rel, List *subPartitionKeyValuesList); + Datum transformPartitionBoundary(List* bondary, const bool* isTimestamptz) { Datum result; @@ -432,3 +439,243 @@ int2vector* GetPartitionKey(const PartitionMap* partMap) return ((RangePartitionMap*)partMap)->partitionKey; } } + +/* + * @@GaussDB@@ + * Target : data partition + * Brief : select * from partition (partition_name) + * : or select from partition for (partition_values_list) + * Description : get partition oid for rte->partitionOid + */ +Oid getPartitionOidForRTE(RangeTblEntry* rte, RangeVar* relation, ParseState* pstate, Relation rel) +{ + Oid partitionOid = InvalidOid; + + if (!PointerIsValid(rte) || !PointerIsValid(relation) || !PointerIsValid(pstate) || !PointerIsValid(rel)) { + return InvalidOid; + } + + /* relation is not partitioned table. */ + if (!rte->ispartrel || rte->relkind != RELKIND_RELATION) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), + (errmsg("relation \"%s\" is not partitioned table", relation->relname), errdetail("N/A."), + errcause("System error."), erraction("Contact engineer to support.")))); + } else { + /* relation is partitioned table, from clause is partition (partition_name). */ + if (PointerIsValid(relation->partitionname)) { + partitionOid = partitionNameGetPartitionOid(rte->relid, + relation->partitionname, + PART_OBJ_TYPE_TABLE_PARTITION, + AccessShareLock, + true, + false, + NULL, + NULL, + NoLock); + /* partiton does not exist. */ + if (!OidIsValid(partitionOid)) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_TABLE), + errmsg("partition \"%s\" of relation \"%s\" does not exist", + relation->partitionname, + relation->relname))); + } + + rte->pname = makeAlias(relation->partitionname, NIL); + } else { + partitionOid = + GetPartitionOidFromPartitionKeyValuesList(rel, relation->partitionKeyValuesList, pstate, rte); + } + } + + return partitionOid; +} + + +static Oid GetPartitionOidFromPartitionKeyValuesList(Relation rel, List *partitionKeyValuesList, ParseState *pstate, + RangeTblEntry *rte) +{ + CheckPartitionValuesList(rel, partitionKeyValuesList); + + Oid partitionOid = InvalidOid; + + if (rel->partMap->type == PART_TYPE_LIST) { + ListPartitionDefState *listPartDef = NULL; + listPartDef = makeNode(ListPartitionDefState); + listPartDef->boundary = partitionKeyValuesList; + listPartDef->boundary = transformListPartitionValue(pstate, listPartDef->boundary, false, true); + listPartDef->boundary = transformIntoTargetType( + rel->rd_att->attrs, (((ListPartitionMap *)rel->partMap)->partitionKey)->values[0], listPartDef->boundary); + + rte->plist = listPartDef->boundary; + + partitionOid = partitionValuesGetPartitionOid(rel, listPartDef->boundary, AccessShareLock, true, true, false); + + pfree_ext(listPartDef); + } else if (rel->partMap->type == PART_TYPE_HASH) { + HashPartitionDefState *hashPartDef = NULL; + hashPartDef = makeNode(HashPartitionDefState); + hashPartDef->boundary = partitionKeyValuesList; + hashPartDef->boundary = transformListPartitionValue(pstate, hashPartDef->boundary, false, true); + hashPartDef->boundary = transformIntoTargetType( + rel->rd_att->attrs, (((HashPartitionMap *)rel->partMap)->partitionKey)->values[0], hashPartDef->boundary); + + rte->plist = hashPartDef->boundary; + + partitionOid = partitionValuesGetPartitionOid(rel, hashPartDef->boundary, AccessShareLock, true, true, false); + + pfree_ext(hashPartDef); + } else if (rel->partMap->type == PART_TYPE_RANGE || rel->partMap->type == PART_TYPE_INTERVAL) { + RangePartitionDefState *rangePartDef = NULL; + rangePartDef = makeNode(RangePartitionDefState); + rangePartDef->boundary = partitionKeyValuesList; + + transformPartitionValue(pstate, (Node *)rangePartDef, false); + + rangePartDef->boundary = transformConstIntoTargetType( + rel->rd_att->attrs, ((RangePartitionMap *)rel->partMap)->partitionKey, rangePartDef->boundary); + + rte->plist = rangePartDef->boundary; + + partitionOid = partitionValuesGetPartitionOid(rel, rangePartDef->boundary, AccessShareLock, true, true, false); + + pfree_ext(rangePartDef); + } else { + /* shouldn't happen. */ + ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + (errmsg("unsupported partition type"), errdetail("N/A."), errcause("System error."), + erraction("Contact engineer to support.")))); + } + + /* partition does not exist. */ + if (!OidIsValid(partitionOid)) { + if (rel->partMap->type == PART_TYPE_RANGE || rel->partMap->type == PART_TYPE_LIST || + rel->partMap->type == PART_TYPE_HASH) { + ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), + (errmsg("Cannot find partition by the value"), errdetail("N/A."), + errcause("The value is incorrect."), erraction("Use the correct value.")))); + } + } + + return partitionOid; +} + +static void SplitValuesList(List *ValuesList, List **partitionKeyValuesList, List **subPartitionKeyValuesList, + Relation rel) +{ + uint len = 0; + uint cnt = 0; + Node* elem = NULL; + ListCell *cell = NULL; + + if (rel->partMap->type == PART_TYPE_RANGE) { + len = (((RangePartitionMap *)rel->partMap)->partitionKey)->dim1; + } else if (rel->partMap->type == PART_TYPE_LIST) { + len = (((ListPartitionMap *)rel->partMap)->partitionKey)->dim1; + } else if (rel->partMap->type == PART_TYPE_HASH) { + len = (((HashPartitionMap *)rel->partMap)->partitionKey)->dim1; + } else { + /* shouldn't happen */ + ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + (errmsg("unsupported partition type"), errdetail("N/A."), errcause("System error."), + erraction("Contact engineer to support.")))); + } + /* The subpartition table supports only one partition key. */ + Assert(len == 1); + + foreach(cell, ValuesList) { + elem = (Node*)lfirst(cell); + cnt++; + if (cnt <= len) { + *partitionKeyValuesList = lappend(*partitionKeyValuesList, elem); + } else { + *subPartitionKeyValuesList = lappend(*subPartitionKeyValuesList, elem); + } + } +} + +static void CheckPartitionValuesList(Relation rel, List *subPartitionKeyValuesList) +{ + int len = 0; + + if (rel->partMap->type == PART_TYPE_RANGE) { + len = (((RangePartitionMap *)rel->partMap)->partitionKey)->dim1; + } else if (rel->partMap->type == PART_TYPE_LIST) { + len = (((ListPartitionMap *)rel->partMap)->partitionKey)->dim1; + } else if (rel->partMap->type == PART_TYPE_HASH) { + len = (((HashPartitionMap *)rel->partMap)->partitionKey)->dim1; + } else { + /* shouldn't happen */ + ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + (errmsg("unsupported partition type"), errdetail("N/A."), errcause("System error."), + erraction("Contact engineer to support.")))); + } + + if (subPartitionKeyValuesList == NIL || len != subPartitionKeyValuesList->length) { + ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), + (errmsg("number of partitionkey values is not equal to the number of partitioning columns"), + errdetail("N/A."), errcause("The value is incorrect."), erraction("Use the correct value.")))); + } +} + +/* + * @@GaussDB@@ + * Target : data partition + * Brief : select * from subpartition (subpartition_name) + * Description : get partition oid for rte->partitionOid + */ +Oid GetSubPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState *pstate, Relation rel, Oid *partOid) +{ + Oid subPartitionOid = InvalidOid; + + if (!PointerIsValid(rte) || !PointerIsValid(relation) || !PointerIsValid(pstate) || !PointerIsValid(rel)) { + return InvalidOid; + } + + /* relation is not partitioned table. */ + if (!rte->ispartrel || rte->relkind != RELKIND_RELATION || !RelationIsSubPartitioned(rel)) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), + (errmsg("relation \"%s\" is not subpartitioned table", relation->relname), errdetail("N/A."), + errcause("System error."), erraction("Contact engineer to support.")))); + } else { + /* relation is partitioned table, from clause is subpartition (subpartition_name). */ + if (PointerIsValid(relation->subpartitionname)) { + subPartitionOid = partitionNameGetPartitionOid(rte->relid, + relation->subpartitionname, + PART_OBJ_TYPE_TABLE_SUB_PARTITION, + AccessShareLock, + true, + false, + NULL, + NULL, + NoLock, + partOid); + /* partiton does not exist. */ + if (!OidIsValid(subPartitionOid)) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_TABLE), + errmsg("subpartition \"%s\" of relation \"%s\" does not exist", + relation->subpartitionname, + relation->relname))); + } + + rte->pname = makeAlias(relation->subpartitionname, NIL); + } else { + List *partitionKeyValuesList = NIL; + List *subPartitionKeyValuesList = NIL; + List *tmpList = NIL; + SplitValuesList(relation->partitionKeyValuesList, &partitionKeyValuesList, &subPartitionKeyValuesList, rel); + *partOid = GetPartitionOidFromPartitionKeyValuesList(rel, partitionKeyValuesList, pstate, rte); + tmpList = rte->plist; + Partition part = partitionOpen(rel, *partOid, AccessShareLock); + Relation partRel = partitionGetRelation(rel, part); + CheckPartitionValuesList(partRel, subPartitionKeyValuesList); + subPartitionOid = + GetPartitionOidFromPartitionKeyValuesList(partRel, subPartitionKeyValuesList, pstate, rte); + releaseDummyRelation(&partRel); + partitionClose(rel, part, AccessShareLock); + rte->plist = list_concat(tmpList, rte->plist); + } + } + return subPartitionOid; +} diff --git a/src/gausskernel/cbb/utils/partition/partitionmap.cpp b/src/gausskernel/cbb/utils/partition/partitionmap.cpp index a0c54d113..1f2989690 100644 --- a/src/gausskernel/cbb/utils/partition/partitionmap.cpp +++ b/src/gausskernel/cbb/utils/partition/partitionmap.cpp @@ -59,6 +59,7 @@ #include "fmgr.h" #include "utils/memutils.h" #include "utils/datum.h" +#include "utils/knl_relcache.h" #define SAMESIGN(a, b) (((a) < 0) == ((b) < 0)) #define overFlowCheck(arg) \ @@ -268,7 +269,6 @@ break; \ } \ } while (0) - /* * @Description: partition value routing * @Param[IN] compare: returned value @@ -397,8 +397,6 @@ static void BuildHashPartitionMap(Relation relation, Form_pg_partition partition Relation pg_partition, List* partition_list); static void BuildListPartitionMap(Relation relation, Form_pg_partition partitioned_form, HeapTuple partitioned_tuple, Relation pg_partition, List* partition_list); -static ListPartElement* CopyListElements(ListPartElement* src, int elementNum); -static HashPartElement* CopyHashElements(HashPartElement* src, int elementNum, int partkeyNum); ValuePartitionMap* buildValuePartitionMap(Relation relation, Relation pg_partition, HeapTuple partitioned_tuple); /* @@ -781,14 +779,13 @@ void RelationInitPartitionMap(Relation relation, bool isSubPartition) (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("could not find tuple with partition OID %u.", relation->rd_id))); } - partitioned_form = (Form_pg_partition)GETSTRUCT(partitioned_tuple); /* * For value based partition-table, we only have to retrieve partkeys */ if (partitioned_form->partstrategy == PART_STRATEGY_VALUE) { /* create ValuePartitionMap */ - (void)MemoryContextSwitchTo(u_sess->cache_mem_cxt); + (void)MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); relation->partMap = (PartitionMap*)buildValuePartitionMap(relation, pg_partition, partitioned_tuple); @@ -939,6 +936,7 @@ void RebuildPartitonMap(PartitionMap* oldMap, PartitionMap* newMap) } } else { oldMap->isDirty = true; + SetLocalRelCacheNeedEOXactWork(true); elog(LOG, "map refcount is not zero when RebuildPartitonMap "); } } @@ -998,7 +996,7 @@ static void RebuildListPartitionMap(ListPartitionMap* oldMap, ListPartitionMap* PARTITIONMAP_SWAPFIELD(Oid*, partitionKeyDataType); } -static ListPartElement* CopyListElements(ListPartElement* src, int elementNum) +ListPartElement* CopyListElements(ListPartElement* src, int elementNum) { int i = 0; int j = 0; @@ -1171,7 +1169,7 @@ void RelationDestroyPartitionMap(PartitionMap* partMap) return; } -static HashPartElement* CopyHashElements(HashPartElement* src, int elementNum, int partkeyNum) +HashPartElement* CopyHashElements(HashPartElement* src, int elementNum, int partkeyNum) { int i = 0; int j = 0; @@ -1324,7 +1322,7 @@ static void BuildListPartitionMap(Relation relation, Form_pg_partition partition partitionKey = getPartitionKeyAttrNo( &(partitionKeyDataType), partitioned_tuple, RelationGetDescr(pg_partition), RelationGetDescr(relation)); /* copy the partitionKey */ - old_context = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + old_context = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); list_map->partitionKey = (int2vector*)palloc(Int2VectorSize(partitionKey->dim1)); rc = memcpy_s( @@ -1374,7 +1372,7 @@ static void BuildListPartitionMap(Relation relation, Form_pg_partition partition } /* list element array back in RangePartitionMap */ - old_context = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + old_context = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); list_map->listElements = CopyListElements(list_eles, list_map->listElementsNum); relation->partMap = (PartitionMap*)palloc(sizeof(ListPartitionMap)); @@ -1433,7 +1431,7 @@ static void BuildHashPartitionMap(Relation relation, Form_pg_partition partition partitionKey = getPartitionKeyAttrNo( &(partitionKeyDataType), partitioned_tuple, RelationGetDescr(pg_partition), RelationGetDescr(relation)); /* copy the partitionKey */ - old_context = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + old_context = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); hash_map->partitionKey = (int2vector*)palloc(Int2VectorSize(partitionKey->dim1)); rc = memcpy_s( @@ -1487,8 +1485,7 @@ static void BuildHashPartitionMap(Relation relation, Form_pg_partition partition Assert(CheckHashPartitionMap(hash_eles, hash_map->hashElementsNum)); /* hash element array back in RangePartitionMap */ - old_context = MemoryContextSwitchTo(u_sess->cache_mem_cxt); - + old_context = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); hash_map->hashElements = CopyHashElements(hash_eles, hash_map->hashElementsNum, partitionKey->dim1); relation->partMap = (PartitionMap*)palloc(sizeof(HashPartitionMap)); rc = memcpy_s(relation->partMap, sizeof(HashPartitionMap), hash_map, sizeof(HashPartitionMap)); @@ -1530,7 +1527,7 @@ static void buildRangePartitionMap(Relation relation, Form_pg_partition partitio partitionKey = getPartitionKeyAttrNo( &(partitionKeyDataType), partitioned_tuple, RelationGetDescr(pg_partition), RelationGetDescr(relation)); /* copy the partitionKey */ - old_context = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + old_context = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); range_map->partitionKey = (int2vector*)palloc(Int2VectorSize(partitionKey->dim1)); rc = memcpy_s( @@ -1591,7 +1588,7 @@ static void buildRangePartitionMap(Relation relation, Form_pg_partition partitio qsort(range_eles, range_map->rangeElementsNum, sizeof(RangeElement), rangeElementCmp); /* range element array back in RangePartitionMap */ - old_context = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + old_context = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); range_map->rangeElements = copyRangeElements(range_eles, range_map->rangeElementsNum, partitionKey->dim1); relation->partMap = (PartitionMap*)palloc(sizeof(RangePartitionMap)); @@ -1666,6 +1663,7 @@ List* getRangePartitionBoundaryList(Relation rel, int sequence) result = lappend(result, (Const*)copyObject(srcBound[i])); } } else { + decre_partmap_refcount(rel->partMap); ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid partition sequence: %d of relation \"%s\", check whether the table name and partition " @@ -1695,6 +1693,7 @@ List* getListPartitionBoundaryList(Relation rel, int sequence) result = lappend(result, (Const*)copyObject(srcBound[i])); } } else { + decre_partmap_refcount(rel->partMap); ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid partition sequence: %d of relation \"%s\", check whether the table name and partition " @@ -1724,6 +1723,7 @@ List* getHashPartitionBoundaryList(Relation rel, int sequence) result = lappend(result, (Const*)copyObject(srcBound[i])); } } else { + decre_partmap_refcount(rel->partMap); ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid partition sequence: %d of relation \"%s\", check whether the table name and partition " @@ -1746,6 +1746,12 @@ Oid partitionKeyValueListGetPartitionOid(Relation rel, List* partKeyValueList, b ListCell* cell = NULL; int len = 0; + if (list_length(partKeyValueList) > PARTKEY_VALUE_MAXNUM) { + ereport(ERROR, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), + (errmsg("too many partition keys, allowed is %d", PARTKEY_VALUE_MAXNUM), errdetail("N/A"), + errcause("too many partition keys for this syntax."), erraction("Please check the syntax is Ok")))); + } + foreach (cell, partKeyValueList) { t_thrd.utils_cxt.valueItemArr[len++] = (Const*)lfirst(cell); } diff --git a/src/gausskernel/cbb/utils/ssl/gs_openssl_client.cpp b/src/gausskernel/cbb/utils/ssl/gs_openssl_client.cpp index 9ebd345d4..82998bf08 100644 --- a/src/gausskernel/cbb/utils/ssl/gs_openssl_client.cpp +++ b/src/gausskernel/cbb/utils/ssl/gs_openssl_client.cpp @@ -72,7 +72,7 @@ static char* ssl_cipher_list2string(const char* ciphers[], const int num); static int SSL_CTX_set_cipher_list_ex(SSL_CTX* ctx, const char* ciphers[], const int num); /* security ciphers suites in SSL connection */ -static const char* ossl_cipher_list[] = {TLS1_TXT_DHE_RSA_WITH_AES_128_GCM_SHA256, NULL}; +static const char* ossl_cipher_list[] = {TLS1_TXT_ECDHE_RSA_WITH_AES_128_GCM_SHA256, NULL}; /* VPP SSL client configuration information */ struct gs_openssl_client { diff --git a/src/gausskernel/cbb/workload/cpwlm.cpp b/src/gausskernel/cbb/workload/cpwlm.cpp index 2186dc8f4..4b3478bff 100755 --- a/src/gausskernel/cbb/workload/cpwlm.cpp +++ b/src/gausskernel/cbb/workload/cpwlm.cpp @@ -1953,6 +1953,8 @@ NON_EXEC_STATIC void CPmonitorMain(void) t_thrd.wlm_cxt.wlm_init_done = false; AbortCurrentTransaction(); } + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); /* * Notice: at the most time it isn't necessary to call because diff --git a/src/gausskernel/cbb/workload/ioschdl.cpp b/src/gausskernel/cbb/workload/ioschdl.cpp index 111718418..1c045a0c3 100644 --- a/src/gausskernel/cbb/workload/ioschdl.cpp +++ b/src/gausskernel/cbb/workload/ioschdl.cpp @@ -950,9 +950,9 @@ static void WLMmonitor_MainLoop(void) ProcessConfigFile(PGC_SIGHUP); } - if (u_sess->sig_cxt.got_PoolReload) { + if (IsGotPoolReload()) { processPoolerReload(); - u_sess->sig_cxt.got_PoolReload = false; + ResetGotPoolReload(false); } ResetLatch(&t_thrd.wlm_cxt.wlm_mainloop_latch); @@ -1237,6 +1237,8 @@ NON_EXEC_STATIC void WLMmonitorMain(void) t_thrd.wlm_cxt.wlm_xact_start = false; t_thrd.wlm_cxt.wlm_init_done = false; } + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); /* * Notice: at the most time it isn't necessary to call because @@ -1383,9 +1385,9 @@ static void WLMarbiter_MainLoop(void) ProcessConfigFile(PGC_SIGHUP); } - if (u_sess->sig_cxt.got_PoolReload) { + if (IsGotPoolReload()) { processPoolerReload(); - u_sess->sig_cxt.got_PoolReload = false; + ResetGotPoolReload(false); } ResetLatch(&t_thrd.wlm_cxt.wlm_mainloop_latch); @@ -1562,6 +1564,8 @@ NON_EXEC_STATIC void WLMarbiterMain(void) t_thrd.wlm_cxt.wlm_xact_start = false; t_thrd.wlm_cxt.wlm_init_done = false; } + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); /* * Notice: at the most time it isn't necessary to call because diff --git a/src/gausskernel/cbb/workload/parctl.cpp b/src/gausskernel/cbb/workload/parctl.cpp index a4e606c09..85acb3d11 100644 --- a/src/gausskernel/cbb/workload/parctl.cpp +++ b/src/gausskernel/cbb/workload/parctl.cpp @@ -2045,9 +2045,8 @@ void WLMCheckDefaultXactReadOnly(void) { int save_errno = errno; - /* set XactReadOnly for transaction,if IDLEINTRANSACTION no need to cancel stmt */ + /* if IDLEINTRANSACTION no need to cancel stmt */ if (!u_sess->attr.attr_storage.DefaultXactReadOnly) { - u_sess->attr.attr_storage.DefaultXactReadOnly = true; if (t_thrd.shemem_ptr_cxt.MyBEEntry == NULL || t_thrd.shemem_ptr_cxt.MyBEEntry->st_state == STATE_IDLEINTRANSACTION || t_thrd.shemem_ptr_cxt.MyBEEntry->st_state == STATE_IDLEINTRANSACTION_ABORTED) { @@ -2086,6 +2085,7 @@ void WLMCheckDefaultXactReadOnly(void) } } + u_sess->sig_cxt.got_SIGHUP = true; /* If we're still here, waken anything waiting on the process latch */ if (t_thrd.proc) { SetLatch(&t_thrd.proc->procLatch); diff --git a/src/gausskernel/cbb/workload/statctl.cpp b/src/gausskernel/cbb/workload/statctl.cpp index 1c3992c64..7f5fca98a 100644 --- a/src/gausskernel/cbb/workload/statctl.cpp +++ b/src/gausskernel/cbb/workload/statctl.cpp @@ -7856,6 +7856,8 @@ int WLMProcessThreadMain(void) AbortCurrentTransaction(); t_thrd.wlm_cxt.wlm_xact_start = false; } + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); /* * Notice: at the most time it isn't necessary to call because @@ -8033,6 +8035,10 @@ int WLMProcessThreadMain(void) const int SEVEN_DAYS = 7 * 24 * 60 * 60 * 1000; TimestampTz get_thread_status_last_time = GetCurrentTimestamp(); +#ifdef ENABLE_MULTIPLE_NODES + const int ONE_HOURS = 60 * 60; + TimestampTz cgroupCheckLast = GetCurrentTimestamp(); +#endif while (g_instance.wlm_cxt->stat_manager.stop == 0) { if (!t_thrd.wlm_cxt.wlm_xact_start) { StartTransactionCommand(); @@ -8044,9 +8050,9 @@ int WLMProcessThreadMain(void) ProcessConfigFile(PGC_SIGHUP); } - if (u_sess->sig_cxt.got_PoolReload) { + if (IsGotPoolReload()) { processPoolerReload(); - u_sess->sig_cxt.got_PoolReload = false; + ResetGotPoolReload(false); } /* timer is triggerred, start to get session info from the database. */ @@ -8055,6 +8061,17 @@ int WLMProcessThreadMain(void) t_thrd.wlm_cxt.wlmalarm_dump_active = false; } +#ifdef ENABLE_MULTIPLE_NODES + /* if cgroup not init, retry init it */ + if (!g_instance.wlm_cxt->gscgroup_config_parsed) { + TimestampTz cgroupCheckNow = GetCurrentTimestamp(); + if (cgroupCheckNow > cgroupCheckLast + ONE_HOURS * USECS_PER_SEC) { + gscgroup_init(); + cgroupCheckLast = cgroupCheckNow; + } + } +#endif + /* Fetch collect info from each data nodes. */ WLMCollectInfoScanner(); diff --git a/src/gausskernel/dbmind/CMakeLists.txt b/src/gausskernel/dbmind/CMakeLists.txt index 399b2fa18..950b6928d 100755 --- a/src/gausskernel/dbmind/CMakeLists.txt +++ b/src/gausskernel/dbmind/CMakeLists.txt @@ -14,4 +14,9 @@ set(CMAKE_MODULE_PATH add_subdirectory(kernel) add_subdirectory(db4ai) -install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/tools/ DESTINATION bin/dbmind) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/tools/ DESTINATION bin/dbmind) + install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/gs_dbmind + PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE + DESTINATION bin) +endif() diff --git a/src/gausskernel/dbmind/db4ai/catalog/Makefile b/src/gausskernel/dbmind/db4ai/catalog/Makefile index b8c604ca3..26e06b69b 100644 --- a/src/gausskernel/dbmind/db4ai/catalog/Makefile +++ b/src/gausskernel/dbmind/db4ai/catalog/Makefile @@ -17,6 +17,6 @@ ifneq "$(MAKECMDGOALS)" "clean" endif endif -OBJS = model_warehouse.o +OBJS = aifuncs.o model_warehouse.o include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/dbmind/db4ai/catalog/aifuncs.cpp b/src/gausskernel/dbmind/db4ai/catalog/aifuncs.cpp new file mode 100644 index 000000000..e08ad0a87 --- /dev/null +++ b/src/gausskernel/dbmind/db4ai/catalog/aifuncs.cpp @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + *--------------------------------------------------------------------------------------- + * + * aifuncs.cpp + * + * IDENTIFICATION + * src/gausskernel/dbmind/db4ai/catalog/aifuncs.cpp + * + * --------------------------------------------------------------------------------------- + */ + +#include "db4ai/aifuncs.h" +#include "db4ai/db4ai_api.h" +#include "db4ai/gd.h" +#include "db4ai/kmeans.h" +#include "db4ai/xgboost.h" + +// when adding new prediction types make sure of changing the corresponding enum correctly (positions must match) +const char *prediction_types_str[] = {[TYPE_BOOL] = "Boolean", + [TYPE_BYTEA] = "Binary", + [TYPE_INT32] = "Int32", + [TYPE_INT64] = "Int64", + [TYPE_FLOAT32] = "Float32", + [TYPE_FLOAT64] = "Float64", + [TYPE_FLOAT64ARRAY] = "Float64[]", + [TYPE_NUMERIC] = "Numeric", + [TYPE_TEXT] = "Text", + [TYPE_VARCHAR] = "Varchar", + [TYPE_INVALID_PREDICTION] = "Invalid"}; + +const int32_t prediction_type_str_size = ARRAY_LENGTH(prediction_types_str); +ASSERT_ELEMENTS_ENUM_TO_STR(prediction_types_str, TYPE_INVALID_PREDICTION); + +const char* prediction_type_to_string(PredictionType x) +{ + return enum_to_string(x); +} + +const char* algorithm_ml_to_string(AlgorithmML x) +{ + return get_algorithm_api(x)->name; +} + +AlgorithmML get_algorithm_ml(const char *str) +{ + for (int a = 0; a < INVALID_ALGORITHM_ML; a++) { + AlgorithmML x = (AlgorithmML)a; + AlgorithmAPI* api = get_algorithm_api(x); + if (strcmp(str, api->name) == 0) + return x; + } + + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Invalid ML algorithm '%s'", str))); + return INVALID_ALGORITHM_ML; +} + +// when adding new algorithms make sure of changing the corresponding enum correctly (positions must match) +const char* kmeans_distance_functions_str[] = { + [KMEANS_L1] = "L1", + [KMEANS_L2] = "L2", + [KMEANS_L2_SQUARED] = "L2_Squared", + [KMEANS_LINF] = "Linf" +}; +const int32_t kmeans_distance_functions_str_size = ARRAY_LENGTH(kmeans_distance_functions_str); +ASSERT_ELEMENTS_ENUM_TO_STR(kmeans_distance_functions_str, KMEANS_LINF); + +const char* kmeans_distance_to_string(DistanceFunction x) +{ + return enum_to_string(x); +} + + +DistanceFunction get_kmeans_distance(const char *str) +{ + return string_to_enum(str, "No known distance function"); +} + + +/////////////////////////////////////////////////////////////////////////////// + +// when adding new algorithms make sure of changing the corresponding enum correctly (positions must match) +const char* kmeans_seeding_str[] = { + [KMEANS_RANDOM_SEED] = "Random++", + [KMEANS_BB] = "KMeans||" +}; +const int32_t kmeans_seeding_str_size = ARRAY_LENGTH(kmeans_seeding_str); +ASSERT_ELEMENTS_ENUM_TO_STR(kmeans_seeding_str, KMEANS_BB); + + +const char* kmean_seeding_to_string(SeedingFunction x) +{ + return enum_to_string(x); +} + + +SeedingFunction get_kmeans_seeding(const char *str) +{ + return string_to_enum( + str, "No known seeding function"); +} + + +/////////////////////////////////////////////////////////////////////////////// + +// when adding new algorithms make sure of changing the corresponding enum correctly (positions must match) +const char *metric_ml_str[] = {[METRIC_ML_ACCURACY] = "accuracy", + [METRIC_ML_F1] = "f1", + [METRIC_ML_PRECISION] = "precision", + [METRIC_ML_RECALL] = "recall", + [METRIC_ML_LOSS] = "loss", + [METRIC_ML_MSE] = "mse", + [METRIC_ML_DISTANCE_L1] = "l1", + [METRIC_ML_DISTANCE_L2] = "l2", + [METRIC_ML_DISTANCE_L2_SQUARED] = "l2_squared", + [METRIC_ML_DISTANCE_L_INF] = "l_inf", + [METRIC_ML_AUC] = "auc", + [METRIC_ML_AUC_PR] = "aucpr", + [METRIC_ML_MAP] = "map", + [METRIC_ML_RMSE] = "rmse", + [METRIC_ML_RMSLE] = "rmsle", + [METRIC_ML_MAE] = "mae", + [METRIC_ML_INVALID] = "metric_invalid"}; +const int32_t metric_ml_str_size = ARRAY_LENGTH(metric_ml_str); +ASSERT_ELEMENTS_ENUM_TO_STR(metric_ml_str, METRIC_ML_INVALID); + +const char *metric_ml_to_string(MetricML x) +{ + return enum_to_string(x); +} + +MetricML get_metric_ml(const char *str) +{ + return string_to_enum(str, "No known metric"); +} + +/////////////////////////////////////////////////////////////////////////////// + +extern KMeans kmeans; +extern GradientDescent gd_pca; +extern XGBoost xg_reg_logistic; +extern XGBoost xg_bin_logistic; +extern XGBoost xg_reg_sqe; +extern XGBoost xg_reg_gamma; + + +// when adding new algorithms make sure of changing the corresponding position +static AlgorithmAPI *algorithm_apis[] = {&gd_logistic_regression.algo, + &gd_svm_classification.algo, + &gd_linear_regression.algo, + &gd_pca.algo, + &kmeans.algo, + &xg_reg_logistic.algo, // xgboost_regression_logistic + &xg_bin_logistic.algo, // xgboost_binary_logistic + &xg_reg_sqe.algo, // xgboost_regression_squarederror + &xg_reg_gamma.algo, // xgboost_regression_gamma + &gd_multiclass.algo}; + +AlgorithmAPI *get_algorithm_api(AlgorithmML algorithm) +{ + if (algorithm >= INVALID_ALGORITHM_ML || algorithm_apis[algorithm] == nullptr) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("invalid ML algorithm: %d", algorithm))); + + return algorithm_apis[algorithm]; +} + +bool is_supervised(AlgorithmML algorithm) +{ + return ((get_algorithm_api(algorithm)->flags & ALGORITHM_ML_UNSUPERVISED) == 0); +} + diff --git a/src/gausskernel/dbmind/db4ai/catalog/model_warehouse.cpp b/src/gausskernel/dbmind/db4ai/catalog/model_warehouse.cpp index 6396d23a6..4f1ecaf55 100644 --- a/src/gausskernel/dbmind/db4ai/catalog/model_warehouse.cpp +++ b/src/gausskernel/dbmind/db4ai/catalog/model_warehouse.cpp @@ -13,7 +13,7 @@ * See the Mulan PSL v2 for more details. *--------------------------------------------------------------------------------------- * - * command.h + * model_warehouse.cpp * * IDENTIFICATION * src/gausskernel/catalog/model_warehouse.cpp @@ -24,6 +24,7 @@ #include "db4ai/model_warehouse.h" #include "db4ai/gd.h" #include "db4ai/aifuncs.h" +#include "db4ai/db4ai_api.h" #include "access/tableam.h" #include "catalog/gs_model.h" #include "catalog/indexing.h" @@ -33,6 +34,7 @@ #include "utils/fmgroids.h" #include "utils/builtins.h" #include "utils/lsyscache.h" +#include "utils/bytea.h" typedef enum ListType { HYPERPARAMETERS = 0, @@ -47,16 +49,6 @@ template void TupleToList(Model *model, Datum *names, Datum template static void add_model_parameter(Model *model, const char *name, Oid type, Datum value); -static Datum string_to_datum(const char *str, Oid datatype); - -void store_SGD(Datum *values, bool *nulls, ModelGradientDescent *SGDmodel); -void get_SGD(HeapTuple *tuple, ModelGradientDescent *resGD, Form_gs_model_warehouse tuplePointer); - -void store_kmeans(Datum *values, bool *nulls, ModelKMeans *kmeansModel); -void get_kmeans(HeapTuple *tuple, ModelKMeans *modelKmeans); -void splitStringFillCentroid(WHCentroid *curseCent, char *strDescribe); -char *splitStringFillCoordinates(WHCentroid *curseCent, char *strCoordinates, int dimension); - // Store the model in the catalog tables void store_model(const Model *model) { @@ -91,6 +83,8 @@ void store_model(const Model *model) values[Anum_gs_model_outputType - 1] = ObjectIdGetDatum(model->return_type); values[Anum_gs_model_query - 1] = CStringGetTextDatum(model->sql); + values[Anum_gs_model_model_type - 1] = CStringGetTextDatum(algorithm_ml_to_string(model->algorithm)); + if (model->hyperparameters == nullptr) { nulls[Anum_gs_model_hyperparametersNames - 1] = true; nulls[Anum_gs_model_hyperparametersValues - 1] = true; @@ -102,17 +96,6 @@ void store_model(const Model *model) values[Anum_gs_model_hyperparametersOids - 1] = ListOids; } - if (model->train_info == nullptr) { - nulls[Anum_gs_model_coefNames - 1] = true; - nulls[Anum_gs_model_coefValues - 1] = true; - nulls[Anum_gs_model_coefOids - 1] = true; - } else { - ListToTuple(model->train_info, &ListNames, &ListValues, &ListOids); - values[Anum_gs_model_coefNames - 1] = ListNames; - values[Anum_gs_model_coefValues - 1] = ListValues; - values[Anum_gs_model_coefOids - 1] = ListOids; - } - if (model->scores == nullptr) { nulls[Anum_gs_model_trainingScoresName - 1] = true; nulls[Anum_gs_model_trainingScoresValue - 1] = true; @@ -122,21 +105,41 @@ void store_model(const Model *model) values[Anum_gs_model_trainingScoresValue - 1] = ListValues; } - switch (model->algorithm) { - case LOGISTIC_REGRESSION: - case SVM_CLASSIFICATION: - case LINEAR_REGRESSION: { - store_SGD(values, nulls, (ModelGradientDescent *)model); - } break; - case KMEANS: { - store_kmeans(values, nulls, (ModelKMeans *)model); - } break; - default: - // do not cache - ereport(NOTICE, (errmsg("clone model for type %d", (int)model->algorithm))); - break; + if (model->data.version != DB4AI_MODEL_UNDEFINED) { + if (model->data.version >= DB4AI_MODEL_INVALID) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_STATUS), + errmsg("Invalid model version %d", model->data.version))); + + // prepare an array with the version and the content in hexadecimal format + // add extra two for '\x', two for version + int bc = VARHDRSZ + (model->data.size * 2) + 4; + bytea* arr = (bytea*)palloc(bc); + SET_VARSIZE(arr, bc); + + char* pdata = (char*)VARDATA(arr); + *pdata++ = '\\'; + *pdata++ = 'x'; + + char ch = (char)model->data.version; + pdata += hex_encode(&ch, 1, pdata); + hex_encode((char*)model->data.raw_data, model->data.size, pdata); + + values[Anum_gs_model_modelData - 1] = PointerGetDatum(arr); + } else { + if (model->model_data == 0) + nulls[Anum_gs_model_modelData - 1] = true; + else + values[Anum_gs_model_modelData - 1] = model->model_data; } + // DEPRECATED + nulls[Anum_gs_model_weight - 1] = true; + nulls[Anum_gs_model_modeldescribe - 1] = true; + nulls[Anum_gs_model_coefNames - 1] = true; + nulls[Anum_gs_model_coefValues - 1] = true; + nulls[Anum_gs_model_coefOids - 1] = true; + + // create tuple and insert into model warehouse tuple = heap_form_tuple(rel->rd_att, values, nulls); (void)simple_heap_insert(rel, tuple); CatalogUpdateIndexes(rel, tuple); @@ -144,8 +147,225 @@ void store_model(const Model *model) heap_close(rel, RowExclusiveLock); } +/* get SGD model */ +void get_sgd_model_data(HeapTuple *tuple, ModelGradientDescent *resGD, Form_gs_model_warehouse tuplePointer) +{ + char *strValues; + Datum dtValues; + ArrayBuildState *astate = NULL; + bool isnull = false; + + /* weight */ + resGD->weights = SysCacheGetAttr(DB4AI_MODEL, *tuple, Anum_gs_model_weight, &isnull); + + /* categories */ + resGD->ncategories = 0; + Datum dtCat = SysCacheGetAttr(DB4AI_MODEL, *tuple, Anum_gs_model_coefValues, &isnull); + + if (!isnull) { + ArrayType *arrValues = DatumGetArrayTypeP(dtCat); + ArrayIterator itValue = array_create_iterator(arrValues, 0); + while (array_iterate(itValue, &dtValues, &isnull)) { + resGD->ncategories++; + strValues = TextDatumGetCString(dtValues); + dtValues = string_to_datum(strValues, tuplePointer->outputtype); + astate = accumArrayResult(astate, dtValues, false, tuplePointer->outputtype, CurrentMemoryContext); + } + resGD->categories = makeArrayResult(astate, CurrentMemoryContext); + } else { + resGD->categories = PointerGetDatum(NULL); + } +} + +char *splitStringFillCoordinates(WHCentroid *curseCent, char *strCoordinates, int dimension) +{ + char *cur, *context = NULL; + Datum dtCur; + int iter = 0; + double *res = (double *)palloc0(dimension * sizeof(double)); + + while (iter < dimension) { + if (iter == 0) { + cur = strtok_r(strCoordinates, ")(,", &context); + } else { + cur = strtok_r(NULL, ")(,", &context); + } + + if (cur != NULL) { + dtCur = string_to_datum(cur, FLOAT8OID); + res[iter] = DatumGetFloat8(dtCur); + iter++; + } else { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("the Coordinates result seems not match their dimension or actual_num_centroids."))); + } + } + curseCent->coordinates = res; + + return context; +} + +void splitStringFillCentroid(WHCentroid *curseCent, char *strDescribe) +{ + char *cur, *name, *context = NULL; + Datum dtCur; + + name = strtok_r(strDescribe, ":,", &context); + cur = strtok_r(NULL, ":,", &context); + while (cur != NULL and name != NULL) { + if (strcmp(name, "id") == 0) { + dtCur = string_to_datum(cur, INT8OID); + curseCent->id = DatumGetUInt32(dtCur); + } else if (strcmp(name, "objective_function") == 0) { + dtCur = string_to_datum(cur, FLOAT8OID); + curseCent->objective_function = DatumGetFloat8(dtCur); + } else if (strcmp(name, "avg_distance_to_centroid") == 0) { + dtCur = string_to_datum(cur, FLOAT8OID); + curseCent->avg_distance_to_centroid = DatumGetFloat8(dtCur); + } else if (strcmp(name, "min_distance_to_centroid") == 0) { + dtCur = string_to_datum(cur, FLOAT8OID); + curseCent->min_distance_to_centroid = DatumGetFloat8(dtCur); + } else if (strcmp(name, "max_distance_to_centroid") == 0) { + dtCur = string_to_datum(cur, FLOAT8OID); + curseCent->max_distance_to_centroid = DatumGetFloat8(dtCur); + } else if (strcmp(name, "std_dev_distance_to_centroid") == 0) { + dtCur = string_to_datum(cur, FLOAT8OID); + curseCent->std_dev_distance_to_centroid = DatumGetFloat8(dtCur); + } else if (strcmp(name, "cluster_size") == 0) { + dtCur = string_to_datum(cur, INT8OID); + curseCent->cluster_size = DatumGetUInt64(dtCur); + } else { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INTERNAL_ERROR), + errmsg("this description should not be here in KMEANS: %s", cur))); + } + name = strtok_r(NULL, ":,", &context); + cur = strtok_r(NULL, ":,", &context); + } +} + +void get_kmeans_model_data(HeapTuple *tuple, ModelKMeans *modelKmeans) +{ + Datum dtValue, dtName; + bool isnull; + char *strValue, *strName, *coordinates = NULL; + uint32_t coefContainer; + int offset = 0; + WHCentroid *curseCent; + + modelKmeans->model.algorithm = KMEANS; + + /* coef */ + Datum dtCoefValues = SysCacheGetAttr(DB4AI_MODEL, *tuple, Anum_gs_model_coefValues, &isnull); + ArrayType *arrValues = DatumGetArrayTypeP(dtCoefValues); + ArrayIterator itValue = array_create_iterator(arrValues, 0); + + Datum dtCoefNames = SysCacheGetAttr(DB4AI_MODEL, *tuple, Anum_gs_model_coefNames, &isnull); + ArrayType *arrNames = DatumGetArrayTypeP(dtCoefNames); + ArrayIterator itName = array_create_iterator(arrNames, 0); + + int decimal_scale = 10; + while (array_iterate(itName, &dtName, &isnull)) { + array_iterate(itValue, &dtValue, &isnull); + strName = TextDatumGetCString(dtName); + strValue = TextDatumGetCString(dtValue); + coefContainer = strtol(strValue, NULL, decimal_scale); + if (strcmp(strName, "original_num_centroids") == 0) { + modelKmeans->original_num_centroids = coefContainer; + } else if (strcmp(strName, "actual_num_centroids") == 0) { + modelKmeans->actual_num_centroids = coefContainer; + } else if (strcmp(strName, "seed") == 0) { + modelKmeans->seed = coefContainer; + } else if (strcmp(strName, "dimension") == 0) { + modelKmeans->dimension = coefContainer; + } else if (strcmp(strName, "distance_function_id") == 0) { + modelKmeans->distance_function_id = coefContainer; + } else if (strcmp(strName, "coordinates") == 0) { + coordinates = strValue; + } else { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INTERNAL_ERROR), + errmsg("the coef should not be here in KMEANS: %s", strName))); + } + } + + modelKmeans->centroids = + reinterpret_cast(palloc0(sizeof(WHCentroid) * modelKmeans->actual_num_centroids)); + + /* describe */ + Datum dtDescribe = SysCacheGetAttr(DB4AI_MODEL, *tuple, Anum_gs_model_modeldescribe, &isnull); + ArrayType *arrDescribe = DatumGetArrayTypeP(dtDescribe); + ArrayIterator itDescribe = array_create_iterator(arrDescribe, 0); + + while (array_iterate(itDescribe, &dtName, &isnull)) { + curseCent = modelKmeans->centroids + offset; + strName = TextDatumGetCString(dtName); + coordinates = splitStringFillCoordinates(curseCent, coordinates, modelKmeans->dimension); + splitStringFillCentroid(curseCent, strName); + offset++; + } +} + +void setup_model_data_v0(Model *model, const bool &only_model, const AlgorithmML &algorithm, const char *modelType, + HeapTuple tuple, Form_gs_model_warehouse tuplePointer, void *result) +{ + switch (algorithm) { + case LOGISTIC_REGRESSION: + case SVM_CLASSIFICATION: + case LINEAR_REGRESSION: { + result = palloc0(sizeof(ModelGradientDescent)); + ModelGradientDescent *resGD = (ModelGradientDescent *)result; + get_sgd_model_data(&tuple, resGD, tuplePointer); + model = &(resGD->model); + } break; + case KMEANS: { + result = palloc0(sizeof(ModelKMeans)); + ModelKMeans *resKmeans = (ModelKMeans *)result; + get_kmeans_model_data(&tuple, resKmeans); + model = &(resKmeans->model); + } break; + default: + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("the type of model is invalid: %s", modelType))); + break; + } +} + +void setup_model_data_v1(Model *model, HeapTuple &tuple, const bool only_model, bool &isnull) +{ + if (!only_model) { + model->model_data = SysCacheGetAttr(DB4AI_MODEL, tuple, Anum_gs_model_modelData, &isnull); + if (isnull) { + model->model_data = 0; + return; + } + + bytea *arr = (bytea *)pg_detoast_datum((struct varlena *)DatumGetPointer(model->model_data)); + char *pdata = (char *)VARDATA(arr); + + char ch; + hex_decode(pdata + 2, 2, &ch); // skip '\x' + model->data.version = (SerializedModelVersion)ch; + if (model->data.version == DB4AI_MODEL_UNDEFINED || model->data.version >= DB4AI_MODEL_INVALID) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_STATUS), + errmsg("Invalid model version %d", model->data.version))); + + model->data.size = (VARSIZE(arr) - 4 - VARHDRSZ) / 2; + model->data.raw_data = palloc(model->data.size); + hex_decode(pdata + 4, model->data.size * 2, (char *)model->data.raw_data); + + if (PointerGetDatum(arr) != model->model_data) pfree(arr); + } +} + +static const size_t model_gradient_descent_size = sizeof(ModelGradientDescent); +static const size_t model_kmeans_size = sizeof(ModelKMeans); +static const size_t model_size[] = {[LOGISTIC_REGRESSION] = model_gradient_descent_size, + [SVM_CLASSIFICATION] = model_gradient_descent_size, + [LINEAR_REGRESSION] = model_gradient_descent_size, + [PCA] = 0, + [KMEANS] = model_kmeans_size}; + // Get the model from the catalog tables -Model *get_model(const char *model_name, bool only_model) +const Model *get_model(const char *model_name, bool only_model) { void *result = NULL; Model *model = NULL; @@ -161,46 +381,38 @@ Model *get_model(const char *model_name, bool only_model) } HeapTuple tuple = SearchSysCache1(DB4AI_MODEL, CStringGetDatum(model_name)); - if (!HeapTupleIsValid(tuple)) { + if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("There is no model called \"%s\".", model_name))); - return NULL; - } + Form_gs_model_warehouse tuplePointer = (Form_gs_model_warehouse)GETSTRUCT(tuple); const char *modelType = TextDatumGetCString(SysCacheGetAttr(DB4AI_MODEL, tuple, Anum_gs_model_model_type, &isnull)); algorithm = get_algorithm_ml(modelType); - if (algorithm == INVALID_ALGORITHM_ML) { + if (algorithm >= INVALID_ALGORITHM_ML) { ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("the type of model is invalid: %s", modelType))); } - if (only_model) { - result = palloc0(sizeof(Model)); - model = (Model *)result; - } else { - switch (algorithm) { - case LOGISTIC_REGRESSION: - case SVM_CLASSIFICATION: - case LINEAR_REGRESSION: { - result = palloc0(sizeof(ModelGradientDescent)); - ModelGradientDescent *resGD = (ModelGradientDescent *)result; - get_SGD(&tuple, resGD, tuplePointer); - model = &(resGD->model); - } break; - case KMEANS: { - result = palloc0(sizeof(ModelKMeans)); - ModelKMeans *resKmeans = (ModelKMeans *)result; - get_kmeans(&tuple, resKmeans); - model = &(resKmeans->model); - } break; - default: - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("the type of model is invalid: %s", modelType))); - break; + model = (Model *) palloc0(sizeof(Model)); + setup_model_data_v1(model, tuple, only_model, isnull); + if (model->model_data == 0) { + pfree(model); + if (only_model) { + result = palloc0(sizeof(Model)); + model = (Model *)result; + } else if (algorithm == LOGISTIC_REGRESSION || algorithm == SVM_CLASSIFICATION || + algorithm == LINEAR_REGRESSION || algorithm == KMEANS) { + result = palloc0(model_size[algorithm]); + setup_model_data_v0(model, only_model, algorithm, modelType, tuple, tuplePointer, result); + model->data.version = DB4AI_MODEL_V00; + } else { + result = palloc0(sizeof(Model)); + model = (Model *)result; } } + model->algorithm = algorithm; model->model_name = model_name; model->exec_time_secs = tuplePointer->exectime; @@ -210,6 +422,7 @@ Model *get_model(const char *model_name, bool only_model) model->return_type = tuplePointer->outputtype; model->num_actual_iterations = tuplePointer->iterations; model->sql = TextDatumGetCString(SysCacheGetAttr(DB4AI_MODEL, tuple, Anum_gs_model_query, &isnull)); + model->memory_context = CurrentMemoryContext; ListNames = SysCacheGetAttr(DB4AI_MODEL, tuple, Anum_gs_model_hyperparametersNames, &isnull); ListValues = SysCacheGetAttr(DB4AI_MODEL, tuple, Anum_gs_model_hyperparametersValues, &isnullValue); @@ -218,21 +431,17 @@ Model *get_model(const char *model_name, bool only_model) TupleToList(model, &ListNames, &ListValues, &ListOids); } - ListNames = SysCacheGetAttr(DB4AI_MODEL, tuple, Anum_gs_model_coefNames, &isnull); - ListValues = SysCacheGetAttr(DB4AI_MODEL, tuple, Anum_gs_model_coefValues, &isnullValue); - ListOids = SysCacheGetAttr(DB4AI_MODEL, tuple, Anum_gs_model_coefOids, &isnullOid); - if (!isnull && !isnullValue && !isnullOid) { - TupleToList(model, &ListNames, &ListValues, &ListOids); - } - ListNames = SysCacheGetAttr(DB4AI_MODEL, tuple, Anum_gs_model_trainingScoresName, &isnull); ListValues = SysCacheGetAttr(DB4AI_MODEL, tuple, Anum_gs_model_trainingScoresValue, &isnullValue); if (!isnull && !isnullValue) { TupleToList(model, &ListNames, &ListValues, NULL); } + // DEPRECATED + model->weights = 0; + ReleaseSysCache(tuple); - return (Model *)result; + return model; } void elog_model(int level, const Model *model) @@ -245,7 +454,8 @@ void elog_model(int level, const Model *model) const char* model_type = algorithm_ml_to_string(model->algorithm); appendStringInfo(&buf, "\n:type %s", model_type); - appendStringInfo(&buf, "\n:sql %s", model->sql); + if (model->sql != NULL) + appendStringInfo(&buf, "\n:sql %s", model->sql); if (model->hyperparameters != nullptr) { appendStringInfoString(&buf, "\n:hyperparameters"); foreach (lc, model->hyperparameters) { @@ -259,7 +469,6 @@ void elog_model(int level, const Model *model) appendStringInfo(&buf, "\n:exec time %lf s", model->exec_time_secs); appendStringInfo(&buf, "\n:processed %ld tuples", model->processed_tuples); appendStringInfo(&buf, "\n:discarded %ld tuples", model->discarded_tuples); - appendStringInfo(&buf, "\n:actual number of iterations %d", model->num_actual_iterations); if (model->train_info != nullptr) { appendStringInfoString(&buf, "\n:info"); foreach (lc, model->train_info) { @@ -275,37 +484,20 @@ void elog_model(int level, const Model *model) appendStringInfo(&buf, "\n :%s %.16g", score->name, score->value); } } - if (model->algorithm == LOGISTIC_REGRESSION || - model->algorithm == SVM_CLASSIFICATION || - model->algorithm == LINEAR_REGRESSION) { - ModelGradientDescent *model_gd = (ModelGradientDescent *)model; - appendStringInfoString(&buf, "\n:gradient_descent:"); - appendStringInfo(&buf, "\n :algorithm %s", gd_get_algorithm(model_gd->model.algorithm)->name); - getTypeOutputInfo(FLOAT4ARRAYOID, &typoutput, &typIsVarlena); - appendStringInfo(&buf, "\n :weights %s", OidOutputFunctionCall(typoutput, model_gd->weights)); - if (model_gd->ncategories > 0) { - Datum dt; - bool isnull; - bool first = true; - struct varlena *src_arr = (struct varlena *)DatumGetPointer(model_gd->categories); - ArrayType *arr = (ArrayType *)pg_detoast_datum(src_arr); - Assert(arr->elemtype == model->return_type); - ArrayIterator it = array_create_iterator(arr, 0); - getTypeOutputInfo(model->return_type, &typoutput, &typIsVarlena); - appendStringInfo(&buf, "\n :categories %d {", model_gd->ncategories); - while (array_iterate(it, &dt, &isnull)) { - Assert(!isnull); - appendStringInfo(&buf, "%s%s", first ? "" : ",", OidOutputFunctionCall(typoutput, dt)); - first = false; - } - appendStringInfoString(&buf, "}"); - - array_free_iterator(it); - if (arr != (ArrayType *)src_arr) - pfree(arr); + AlgorithmAPI* api = get_algorithm_api(model->algorithm); + if (api->explain != nullptr) { + Oid typoutput; + bool typIsVarlena; + ListCell* lc; + List* infos = api->explain(api, &model->data, model->return_type); + foreach(lc, infos) { + TrainingInfo* info = lfirst_node(TrainingInfo, lc); + getTypeOutputInfo(info->type, &typoutput, &typIsVarlena); + appendStringInfo(&buf, "\n:%s %s", info->name, OidOutputFunctionCall(typoutput, info->value)); } } + elog(level, "Model=%s%s", model->model_name, buf.data); pfree(buf.data); } @@ -333,9 +525,14 @@ template void ListToTuple(List *list, Datum *name, Datum *value iter++; } break; case ListType::COEFS: { + Oid typeOut; + bool isvarlena; TrainingInfo *cell = (TrainingInfo *)lfirst(it); t_names = cstring_to_text(cell->name); - t_values = cstring_to_text(Datum_to_string(cell->value, cell->type, false)); + + getTypeOutputInfo(cell->type, &typeOut, &isvarlena); + t_values = cstring_to_text(OidOutputFunctionCall(typeOut, cell->value)); + array_container[iter] = ObjectIdGetDatum(cell->type); astateName = accumArrayResult(astateName, PointerGetDatum(t_names), false, TEXTOID, CurrentMemoryContext); @@ -413,8 +610,13 @@ template void TupleToList(Model *model, Datum *names, Datum array_iterate(itOid, &dtOid, &isnull); strNames = TextDatumGetCString(dtNames); strValues = TextDatumGetCString(dtValues); + + Oid typInput, typIOParam; + getTypeInputInfo(dtOid, &typInput, &typIOParam); + + dtValues = OidInputFunctionCall(typInput, strValues, typIOParam, -1); + tranOids = DatumGetObjectId(dtOid); - dtValues = string_to_datum(strValues, tranOids); add_model_parameter(model, strNames, tranOids, dtValues); } break; case ListType::SCORES: { @@ -460,301 +662,4 @@ template static void add_model_parameter(Model *model, const ch return; } } -} - -static Datum string_to_datum(const char *str, Oid datatype) -{ - switch (datatype) { - case BOOLOID: - return DirectFunctionCall1(boolin, CStringGetDatum(str)); - case INT1OID: - case INT2OID: - case INT4OID: - return Int32GetDatum(atoi(str)); - case INT8OID: - return Int64GetDatum(atoi(str)); - case VARCHAROID: - case BPCHAROID: - case CHAROID: - case TEXTOID: - return CStringGetTextDatum(str); - case FLOAT4OID: - case FLOAT8OID: - return DirectFunctionCall1(float8in, CStringGetDatum(str)); - default: - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("The type is not supported: %d", datatype))); - return CStringGetTextDatum(str); - } -} - -/* store SGD model */ -void store_SGD(Datum *values, bool *nulls, ModelGradientDescent *SGDmodel) -{ - nulls[Anum_gs_model_modelData - 1] = true; - nulls[Anum_gs_model_modeldescribe - 1] = true; - - values[Anum_gs_model_model_type - 1] = CStringGetTextDatum(algorithm_ml_to_string(SGDmodel->model.algorithm)); - values[Anum_gs_model_weight - 1] = SGDmodel->weights; - if (SGDmodel->ncategories > 0) { - text *categoriesName, *categoriesValue; - ArrayBuildState *astate = NULL; - Datum dt; - bool isnull; - - nulls[Anum_gs_model_coefNames - 1] = false; - categoriesName = cstring_to_text("categories"); - astate = accumArrayResult(astate, PointerGetDatum(categoriesName), false, TEXTOID, CurrentMemoryContext); - values[Anum_gs_model_coefNames - 1] = makeArrayResult(astate, CurrentMemoryContext); - astate = NULL; - - ArrayType *arr = (ArrayType *)pg_detoast_datum((struct varlena *)DatumGetPointer(SGDmodel->categories)); - ArrayIterator it = array_create_iterator(arr, 0); - while (array_iterate(it, &dt, &isnull)) { - categoriesValue = cstring_to_text(Datum_to_string(dt, SGDmodel->model.return_type, false)); - astate = accumArrayResult(astate, PointerGetDatum(categoriesValue), false, TEXTOID, CurrentMemoryContext); - } - values[Anum_gs_model_coefValues - 1] = makeArrayResult(astate, CurrentMemoryContext); - nulls[Anum_gs_model_coefValues - 1] = false; - } -} - -/* get SGD model */ -void get_SGD(HeapTuple *tuple, ModelGradientDescent *resGD, Form_gs_model_warehouse tuplePointer) -{ - char *strValues; - Datum dtValues; - ArrayBuildState *astate = NULL; - bool isnull = false; - - /* weight */ - resGD->weights = SysCacheGetAttr(DB4AI_MODEL, *tuple, Anum_gs_model_weight, &isnull); - - /* categories */ - resGD->ncategories = 0; - Datum dtCat = SysCacheGetAttr(DB4AI_MODEL, *tuple, Anum_gs_model_coefValues, &isnull); - - if (!isnull) { - ArrayType *arrValues = DatumGetArrayTypeP(dtCat); - ArrayIterator itValue = array_create_iterator(arrValues, 0); - while (array_iterate(itValue, &dtValues, &isnull)) { - resGD->ncategories++; - strValues = TextDatumGetCString(dtValues); - dtValues = string_to_datum(strValues, tuplePointer->outputtype); - astate = accumArrayResult(astate, dtValues, false, tuplePointer->outputtype, CurrentMemoryContext); - } - resGD->categories = makeArrayResult(astate, CurrentMemoryContext); - } else { - resGD->categories = PointerGetDatum(NULL); - } -} - -/* get kmeans model */ -void store_kmeans(Datum *values, bool *nulls, ModelKMeans *kmeansModel) -{ - ArrayBuildState *astateName = NULL, *astateValue = NULL, *astateDescribe = NULL; - text *tValue, *txDescribe, *txCoodinate; - int lenDescribe = 200 * sizeof(char), lengthD = 0, lengthC = 0; - int lenCoodinate = 15 * kmeansModel->dimension * kmeansModel->actual_num_centroids; - WHCentroid *centroid; - double *coordinateContainer; - char *describeElem, *strCoordinates = (char *)palloc0(lenCoodinate); - - values[Anum_gs_model_outputType - 1] = ObjectIdGetDatum(kmeansModel->model.return_type); - nulls[Anum_gs_model_modelData - 1] = true; - nulls[Anum_gs_model_weight - 1] = true; - values[Anum_gs_model_model_type - 1] = CStringGetTextDatum(algorithm_ml_to_string(KMEANS)); - - tValue = cstring_to_text(Datum_to_string(Int64GetDatum(kmeansModel->original_num_centroids), INT8OID, false)); - astateName = accumArrayResult(astateName, CStringGetTextDatum("original_num_centroids"), false, TEXTOID, - CurrentMemoryContext); - astateValue = accumArrayResult(astateValue, PointerGetDatum(tValue), false, TEXTOID, CurrentMemoryContext); - - tValue = cstring_to_text(Datum_to_string(Int64GetDatum(kmeansModel->actual_num_centroids), INT8OID, false)); - astateName = - accumArrayResult(astateName, CStringGetTextDatum("actual_num_centroids"), false, TEXTOID, CurrentMemoryContext); - astateValue = accumArrayResult(astateValue, PointerGetDatum(tValue), false, TEXTOID, CurrentMemoryContext); - - tValue = cstring_to_text(Datum_to_string(Int64GetDatum(kmeansModel->dimension), INT8OID, false)); - astateName = accumArrayResult(astateName, CStringGetTextDatum("dimension"), false, TEXTOID, CurrentMemoryContext); - astateValue = accumArrayResult(astateValue, PointerGetDatum(tValue), false, TEXTOID, CurrentMemoryContext); - - tValue = cstring_to_text(Datum_to_string(Int64GetDatum(kmeansModel->distance_function_id), INT8OID, false)); - astateName = - accumArrayResult(astateName, CStringGetTextDatum("distance_function_id"), false, TEXTOID, CurrentMemoryContext); - astateValue = accumArrayResult(astateValue, PointerGetDatum(tValue), false, TEXTOID, CurrentMemoryContext); - - tValue = cstring_to_text(Datum_to_string(Int64GetDatum(kmeansModel->seed), INT8OID, false)); - astateName = accumArrayResult(astateName, CStringGetTextDatum("seed"), false, TEXTOID, CurrentMemoryContext); - astateValue = accumArrayResult(astateValue, PointerGetDatum(tValue), false, TEXTOID, CurrentMemoryContext); - - astateName = accumArrayResult(astateName, CStringGetTextDatum("coordinates"), false, TEXTOID, CurrentMemoryContext); - - for (uint32_t i = 0; i < kmeansModel->actual_num_centroids; i++) { - lengthD = 0; - describeElem = (char *)palloc0(lenDescribe); - centroid = kmeansModel->centroids + i; - - lengthD = sprintf_s(describeElem + lengthD, lenDescribe - lengthD, "id:%d,", centroid->id); - lengthD += sprintf_s(describeElem + lengthD, lenDescribe - lengthD, "objective_function:%f,", - centroid->objective_function); - lengthD += sprintf_s(describeElem + lengthD, lenDescribe - lengthD, "avg_distance_to_centroid:%f,", - centroid->avg_distance_to_centroid); - lengthD += sprintf_s(describeElem + lengthD, lenDescribe - lengthD, "min_distance_to_centroid:%f,", - centroid->min_distance_to_centroid); - lengthD += sprintf_s(describeElem + lengthD, lenDescribe - lengthD, "max_distance_to_centroid:%f,", - centroid->max_distance_to_centroid); - lengthD += sprintf_s(describeElem + lengthD, lenDescribe - lengthD, "std_dev_distance_to_centroid:%f,", - centroid->std_dev_distance_to_centroid); - lengthD += sprintf_s(describeElem + lengthD, lenDescribe - lengthD, "cluster_size:%d", centroid->cluster_size); - - txDescribe = cstring_to_text(describeElem); - astateDescribe = - accumArrayResult(astateDescribe, PointerGetDatum(txDescribe), false, TEXTOID, CurrentMemoryContext); - - coordinateContainer = centroid->coordinates; - lengthC += sprintf_s(strCoordinates + lengthC, lenCoodinate - lengthC, "("); - for (uint32_t j = 0; j < kmeansModel->dimension; j++) { - lengthC += sprintf_s(strCoordinates + lengthC, lenCoodinate - lengthC, "%f,", coordinateContainer[j]); - } - lengthC--; - lengthC += sprintf_s(strCoordinates + lengthC, lenCoodinate - lengthC, ")"); - } - txCoodinate = cstring_to_text(strCoordinates); - astateValue = accumArrayResult(astateValue, PointerGetDatum(txCoodinate), false, TEXTOID, CurrentMemoryContext); - - values[Anum_gs_model_modeldescribe - 1] = makeArrayResult(astateDescribe, CurrentMemoryContext); - values[Anum_gs_model_coefValues - 1] = makeArrayResult(astateValue, CurrentMemoryContext); - values[Anum_gs_model_coefNames - 1] = makeArrayResult(astateName, CurrentMemoryContext); - - nulls[Anum_gs_model_coefValues - 1] = false; - nulls[Anum_gs_model_coefNames - 1] = false; - - return; -} - -void get_kmeans(HeapTuple *tuple, ModelKMeans *modelKmeans) -{ - Datum dtValue, dtName; - bool isnull; - char *strValue, *strName, *coordinates = NULL; - uint32_t coefContainer; - int offset = 0; - WHCentroid *curseCent; - - modelKmeans->model.algorithm = KMEANS; - - /* coef */ - Datum dtCoefValues = SysCacheGetAttr(DB4AI_MODEL, *tuple, Anum_gs_model_coefValues, &isnull); - ArrayType *arrValues = DatumGetArrayTypeP(dtCoefValues); - ArrayIterator itValue = array_create_iterator(arrValues, 0); - - Datum dtCoefNames = SysCacheGetAttr(DB4AI_MODEL, *tuple, Anum_gs_model_coefNames, &isnull); - ArrayType *arrNames = DatumGetArrayTypeP(dtCoefNames); - ArrayIterator itName = array_create_iterator(arrNames, 0); - - while (array_iterate(itName, &dtName, &isnull)) { - array_iterate(itValue, &dtValue, &isnull); - strName = TextDatumGetCString(dtName); - strValue = TextDatumGetCString(dtValue); - coefContainer = atoi(strValue); - if (strcmp(strName, "original_num_centroids") == 0) { - modelKmeans->original_num_centroids = coefContainer; - } else if (strcmp(strName, "actual_num_centroids") == 0) { - modelKmeans->actual_num_centroids = coefContainer; - } else if (strcmp(strName, "seed") == 0) { - modelKmeans->seed = coefContainer; - } else if (strcmp(strName, "dimension") == 0) { - modelKmeans->dimension = coefContainer; - } else if (strcmp(strName, "distance_function_id") == 0) { - modelKmeans->distance_function_id = coefContainer; - } else if (strcmp(strName, "coordinates") == 0) { - coordinates = strValue; - } else { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INTERNAL_ERROR), - errmsg("the coef should not be here in KMEANS: %s", strName))); - } - } - - modelKmeans->centroids = - reinterpret_cast(palloc0(sizeof(WHCentroid) * modelKmeans->actual_num_centroids)); - - /* describe */ - Datum dtDescribe = SysCacheGetAttr(DB4AI_MODEL, *tuple, Anum_gs_model_modeldescribe, &isnull); - ArrayType *arrDescribe = DatumGetArrayTypeP(dtDescribe); - ArrayIterator itDescribe = array_create_iterator(arrDescribe, 0); - - while (array_iterate(itDescribe, &dtName, &isnull)) { - curseCent = modelKmeans->centroids + offset; - strName = TextDatumGetCString(dtName); - coordinates = splitStringFillCoordinates(curseCent, coordinates, modelKmeans->dimension); - splitStringFillCentroid(curseCent, strName); - offset++; - } -} - -void splitStringFillCentroid(WHCentroid *curseCent, char *strDescribe) -{ - char *cur, *name, *context = NULL; - Datum dtCur; - - name = strtok_r(strDescribe, ":,", &context); - cur = strtok_r(NULL, ":,", &context); - while (cur != NULL and name != NULL) { - if (strcmp(name, "id") == 0) { - dtCur = string_to_datum(cur, INT8OID); - curseCent->id = DatumGetUInt32(dtCur); - } else if (strcmp(name, "objective_function") == 0) { - dtCur = string_to_datum(cur, FLOAT8OID); - curseCent->objective_function = DatumGetFloat8(dtCur); - } else if (strcmp(name, "avg_distance_to_centroid") == 0) { - dtCur = string_to_datum(cur, FLOAT8OID); - curseCent->avg_distance_to_centroid = DatumGetFloat8(dtCur); - } else if (strcmp(name, "min_distance_to_centroid") == 0) { - dtCur = string_to_datum(cur, FLOAT8OID); - curseCent->min_distance_to_centroid = DatumGetFloat8(dtCur); - } else if (strcmp(name, "max_distance_to_centroid") == 0) { - dtCur = string_to_datum(cur, FLOAT8OID); - curseCent->max_distance_to_centroid = DatumGetFloat8(dtCur); - } else if (strcmp(name, "std_dev_distance_to_centroid") == 0) { - dtCur = string_to_datum(cur, FLOAT8OID); - curseCent->std_dev_distance_to_centroid = DatumGetFloat8(dtCur); - } else if (strcmp(name, "cluster_size") == 0) { - dtCur = string_to_datum(cur, INT8OID); - curseCent->cluster_size = DatumGetUInt64(dtCur); - } else { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INTERNAL_ERROR), - errmsg("this description should not be here in KMEANS: %s", cur))); - } - name = strtok_r(NULL, ":,", &context); - cur = strtok_r(NULL, ":,", &context); - } -} - -char *splitStringFillCoordinates(WHCentroid *curseCent, char *strCoordinates, int dimension) -{ - char *cur, *context = NULL; - Datum dtCur; - int iter = 0; - double *res = (double *)palloc0(dimension * sizeof(double)); - - while (iter < dimension) { - if (iter == 0) { - cur = strtok_r(strCoordinates, ")(,", &context); - } else { - cur = strtok_r(NULL, ")(,", &context); - } - - if (cur != NULL) { - dtCur = string_to_datum(cur, FLOAT8OID); - res[iter] = DatumGetFloat8(dtCur); - iter++; - } else { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("the Coordinates result seems not match their dimension or actual_num_centroids."))); - } - } - curseCent->coordinates = res; - - return context; -} +} \ No newline at end of file diff --git a/src/gausskernel/dbmind/db4ai/commands/Makefile b/src/gausskernel/dbmind/db4ai/commands/Makefile index a2dd83c64..eef60fb35 100644 --- a/src/gausskernel/dbmind/db4ai/commands/Makefile +++ b/src/gausskernel/dbmind/db4ai/commands/Makefile @@ -17,6 +17,6 @@ ifneq "$(MAKECMDGOALS)" "clean" endif endif -OBJS = create_model.o predict_by.o +OBJS = create_model.o predict_by.o explain_model.o include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/dbmind/db4ai/commands/create_model.cpp b/src/gausskernel/dbmind/db4ai/commands/create_model.cpp index df8072e88..240684ed4 100644 --- a/src/gausskernel/dbmind/db4ai/commands/create_model.cpp +++ b/src/gausskernel/dbmind/db4ai/commands/create_model.cpp @@ -29,7 +29,6 @@ #include "db4ai/hyperparameter_validation.h" #include "catalog/indexing.h" #include "executor/executor.h" -#include "executor/node/nodeKMeans.h" #include "nodes/value.h" #include "parser/analyze.h" #include "rewrite/rewriteHandler.h" @@ -38,24 +37,27 @@ #include "utils/lsyscache.h" #include "utils/rel.h" #include "workload/workload.h" -#include "executor/node/nodeGD.h" +#include "executor/node/nodeTrainModel.h" #include "db4ai/aifuncs.h" +#include "db4ai/db4ai_api.h" #include "utils/builtins.h" +#include "db4ai/gd.h" +#include "db4ai/fp_ops.h" +#include "optimizer/planmain.h" extern void exec_simple_plan(PlannedStmt *plan); // defined in postgres.cpp -bool verify_pgarray(ArrayType const * pg_array, int32_t n); // defined in kmeans.cpp /* * Common setup needed by both normal execution and EXPLAIN ANALYZE. * This setup is adapted from SetupForCreateTableAs */ static Query *setup_for_create_model(Query *query, /* IntoClause *into, */ const char *queryString, - ParamListInfo params /* , DestReceiver *dest */) + ParamListInfo params /* , DestReceiver *dest */) { List *rewritten = NIL; - + Assert(query->commandType == CMD_SELECT); - + /* * Parse analysis was done already, but we still have to run the rule * rewriter. We do not do AcquireRewriteLocks: we assume the query either @@ -68,289 +70,162 @@ static Query *setup_for_create_model(Query *query, /* IntoClause *into, */ const * repeatedly. (See also the same hack in EXPLAIN and PREPARE.) */ rewritten = QueryRewrite((Query *)copyObject(query)); - + /* SELECT should never rewrite to more or less than one SELECT query */ if (list_length(rewritten) != 1) { ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Unexpected rewrite result for CREATE MODEL statement"))); + errmsg("Unexpected rewrite result for CREATE MODEL statement"))); } - + query = (Query *)linitial(rewritten); - + return query; } -// Create an GradientDescent execution node with a given configuration -static GradientDescent *create_gd_node(AlgorithmML algorithm, List *hyperparameters, DestReceiverTrainModel *dest) -{ - GradientDescent *gd_node = makeNode(GradientDescent); - gd_node->algorithm = algorithm; - configure_hyperparameters(algorithm, hyperparameters, dest->model, gd_node); - if (gd_node->seed == 0) { - gd_node->seed = time(NULL); // it is not set to zero again (zero is the epoch in the past) - update_model_hyperparameter(dest->model, "seed", INT4OID, Int32GetDatum(gd_node->seed)); +static void add_operator_to_plan(PlannedStmt *plan, Plan *plan_operator) +{ + plan_operator->lefttree = plan->planTree; + plan->planTree = plan_operator; +} + + +// Optimize the original query subplan, acoording to the train model operation +static void optimize_train_model_subplan(PlannedStmt *plan, TrainModel *train_model, AlgorithmAPI *api) +{ + Plan *original_plan = plan->planTree; + + // Check if the plan could be improved by adding a materialize node before train model + bool already_materializes = ExecMaterializesOutput(original_plan->type) || + (IsA(original_plan, SubqueryScan) && + ExecMaterializesOutput(((SubqueryScan *) original_plan)->subplan->type)); + if ((api->flags & ALGORITHM_ML_RESCANS_DATA) && + u_sess->attr.attr_sql.enable_material && !already_materializes) { + + Plan *materialized_plan = materialize_finished_plan(plan->planTree, true); + add_operator_to_plan(plan, materialized_plan); } - - gd_node->plan.type = T_GradientDescent; - gd_node->plan.targetlist = makeGradientDescentExpr(algorithm, nullptr, 1); - dest->targetlist = gd_node->plan.targetlist; - - return gd_node; } -// Add a GradientDescent operator at the root of the plan -static PlannedStmt *add_GradientDescent_to_plan(PlannedStmt *plan, AlgorithmML algorithm, List *hyperparameters, - DestReceiverTrainModel *dest) + +// Add a train model operator at the root of the plan +static void add_train_model_to_plan(PlannedStmt *plan, AlgorithmML algorithm, List *hyperparameters, + MemoryContext cxt, DestReceiverTrainModel *dest) { - GradientDescent *gd_node = create_gd_node(algorithm, hyperparameters, dest); - gd_node->plan.lefttree = plan->planTree; - plan->planTree = &gd_node->plan; - - return plan; -} - -static DistanceFunction get_kmeans_distance(const char *distance_func) -{ - DistanceFunction distance = KMEANS_L2_SQUARED; - - if (strcmp(distance_func, "L1") == 0) - distance = KMEANS_L1; - else if (strcmp(distance_func, "L2") == 0) - distance = KMEANS_L2; - else if (strcmp(distance_func, "L2_Squared") == 0) - distance = KMEANS_L2_SQUARED; - else if (strcmp(distance_func, "Linf") == 0) - distance = KMEANS_LINF; - else { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("No known distance function chosen. Current candidates are: " - "L1, L2, L2_Squared (default), Linf"))); - } - - return distance; -} - -static SeedingFunction get_kmeans_seeding(const char *seeding_func) -{ - SeedingFunction seeding = KMEANS_RANDOM_SEED; - - if (strcmp(seeding_func, "Random++") == 0) - seeding = KMEANS_RANDOM_SEED; - else if (strcmp(seeding_func, "KMeans||") == 0) - seeding = KMEANS_BB; - else { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("No known seeding function chosen. Current candidates are: Random++ (default), KMeans||"))); - } - return seeding; -} - -static KMeans *create_kmeans_node(AlgorithmML const algorithm, List *hyperparameters, DestReceiverTrainModel *dest) -{ - KMeans *kmeans_node = makeNode(KMeans); - char *distance_func = nullptr; - char *seeding_func = nullptr; - double tolerance = 0.; - int32_t num_iterations = 0; - int32_t num_centroids = 0; - int32_t const max_num_centroids = 1000000; - int32_t batch_size = 0; - int32_t num_features = 0; - int32_t external_seed = 0; - int32_t verbosity = 0; - - auto kmeans_model = reinterpret_cast(dest->model); - HyperparameterValidation validation; - errno_t rc = memset_s(&validation, sizeof(HyperparameterValidation), 0, sizeof(HyperparameterValidation)); - securec_check(rc, "\0", "\0"); + TrainModel *pnode = makeNode(TrainModel); + pnode->algorithm = algorithm; + pnode->configurations = 1; + pnode->hyperparameters = (const ModelHyperparameters **)palloc(sizeof(ModelHyperparameters *)); + pnode->cxt = cxt; - kmeans_node->algorithm = algorithm; - kmeans_node->plan.type = T_KMeans; - kmeans_model->model.return_type = INT4OID; + AlgorithmAPI *api = get_algorithm_api(algorithm); + Assert(api->make_hyperparameters != nullptr); + Assert(api->get_hyperparameters_definitions != nullptr); + + ModelHyperparameters *hyperp = api->make_hyperparameters(api); + pnode->hyperparameters[0] = hyperp; + + // put the hyperparameters into the node using default values when needed + // and register the hyperparameter values also into the output structures + int32_t definitions_size; + const HyperparameterDefinition *definitions = api->get_hyperparameters_definitions(api, &definitions_size); + init_hyperparameters_with_defaults(definitions, definitions_size, hyperp); + configure_hyperparameters_vset(definitions, definitions_size, hyperparameters, hyperp); + if (api->update_hyperparameters != nullptr) + api->update_hyperparameters(api, hyperp); - set_hyperparameter("max_iterations", &num_iterations, hyperparameters, 10, dest->model, &validation); + dest->hyperparameters = prepare_model_hyperparameters(definitions, definitions_size, hyperp, dest->memcxt); - if (unlikely(num_iterations <= 0)) { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("max_iterations must be in [1, %d]", INT_MAX))); - } else { - kmeans_node->parameters.num_iterations = num_iterations; - } - set_hyperparameter("num_centroids", &num_centroids, hyperparameters, 10, dest->model, &validation); - - if (unlikely((num_centroids <= 0) || (num_centroids > max_num_centroids))) { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("num_centroids must be in [1, %d]", max_num_centroids))); - } else { - kmeans_node->parameters.num_centroids = num_centroids; - } - set_hyperparameter("tolerance", &tolerance, hyperparameters, 0.00001, dest->model, &validation); - - if (unlikely((tolerance <= 0.) || (tolerance > 1.))) { - ereport(ERROR, - (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("tolerance must be in (0, 1.0]"))); - } else { - kmeans_node->parameters.tolerance = tolerance; - } - set_hyperparameter("batch_size", &batch_size, hyperparameters, 10, dest->model, &validation); - - if (unlikely((batch_size <= 0) || (batch_size > max_num_centroids))) { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("batch_size must be in [1, %d]", max_num_centroids))); - } else { - kmeans_node->description.batch_size = batch_size; - } - set_hyperparameter("num_features", &num_features, hyperparameters, 2, dest->model, &validation); - - if (unlikely((num_features <= 0) || (num_features > max_num_centroids))) { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("num_features must be in [1, %d]", max_num_centroids))); - } else { - kmeans_node->description.n_features = num_features; - } - set_hyperparameter("distance_function", &distance_func, hyperparameters, "L2_Squared", dest->model, - &validation); - - kmeans_node->description.distance = get_kmeans_distance(distance_func); - - set_hyperparameter("seeding_function", &seeding_func, hyperparameters, "Random++", dest->model, - &validation); - - kmeans_node->description.seeding = get_kmeans_seeding(seeding_func); - - set_hyperparameter("verbose", &verbosity, hyperparameters, false, dest->model, &validation); - - if (verbosity < 0 || verbosity > 2) - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Verbosity level must be between 0 (no output), 1 (less output), or 2 (full output)"))); - else - kmeans_node->description.verbosity = static_cast(verbosity); - - /* - * unfortunately the system parses an int64_t as T_Float whenever the it does not fit into a int32_t - * thus, an int64_t might be internally parsed as a T_Integer or a T_Float depending on whether - * the precision fits into an int32_t. thus, we accept a small seed (int32_t) that is xor'ed with - * a random long internal seed. - */ - set_hyperparameter("seed", &external_seed, hyperparameters, 0, dest->model, &validation); - - /* - * the seed used for the algorithm is the xor of the seed provided by the user with - * a random (but fixed) internal seed. as long as the internal seed is kept unchanged - * results will be reproducible (see nodeKMeans.cpp) - */ - if (external_seed < 0) - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("seed must be in [0, %d]", INT_MAX))); - else - kmeans_node->parameters.external_seed = static_cast(external_seed); - - /* - * these fields are propagated all the way to store_model and used for prediction - * the value of fields of ModelKMeans not set here change during execution - * and thus are set in the very end, when the model is about to be stored - */ - kmeans_model->dimension = kmeans_node->description.n_features; - kmeans_model->original_num_centroids = kmeans_node->parameters.num_centroids; - kmeans_model->distance_function_id = kmeans_node->description.distance; - - pfree(distance_func); - pfree(seeding_func); - return kmeans_node; + optimize_train_model_subplan(plan, pnode, api); + + add_operator_to_plan(plan, &pnode->plan); } -// Add a k-means operator at the root of the plan -static PlannedStmt *add_kmeans_to_plan(PlannedStmt *plan, AlgorithmML algorithm, List *hyperparameters, - DestReceiverTrainModel *dest) -{ - if (unlikely(algorithm != KMEANS)) { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Algorithm is not the expected %u (k-means). Provided %u", KMEANS, algorithm))); - } - - KMeans *kmeans_node = create_kmeans_node(algorithm, hyperparameters, dest); - - kmeans_node->plan.lefttree = plan->planTree; - plan->planTree = &kmeans_node->plan; - - return plan; -} - - // Add the ML algorithm at the root of the plan according to the CreateModelStmt -static PlannedStmt *add_create_model_to_plan(CreateModelStmt *stmt, PlannedStmt *plan, DestReceiverTrainModel *dest) +static void add_create_model_to_plan(CreateModelStmt *stmt, PlannedStmt *plan, + DestReceiverTrainModel *dest, MemoryContext cxt) { - PlannedStmt *result = NULL; - switch (stmt->algorithm) { - case LOGISTIC_REGRESSION: - case SVM_CLASSIFICATION: - case LINEAR_REGRESSION: { - result = add_GradientDescent_to_plan(plan, stmt->algorithm, stmt->hyperparameters, dest); - break; - } - case KMEANS: { - result = add_kmeans_to_plan(plan, stmt->algorithm, stmt->hyperparameters, dest); - break; - } - case INVALID_ALGORITHM_ML: - default: { - char *s = "logistic_regression, svm_classification, linear_regression, kmeans"; - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + if (stmt->algorithm >= INVALID_ALGORITHM_ML) { + char *s = "logistic_regression, svm_classification, linear_regression, kmeans, pca, " + "xgboost_regression_logistic, xgboost_binary_logistic, xgboost_regression_squarederror, " + "xgboost_regression_gamma, dectree_classification, dectree_regression"; + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Architecture %s is not supported. Supported architectures: %s", stmt->architecture, s))); - } } - return result; + + add_train_model_to_plan(plan, stmt->algorithm, stmt->hyperparameters, cxt, dest); } // Create the query plan with the appropriate machine learning model PlannedStmt *plan_create_model(CreateModelStmt *stmt, const char *query_string, ParamListInfo params, - DestReceiver *dest) + DestReceiver *dest, MemoryContext cxt) { Query *query = (Query *)stmt->select_query; PlannedStmt *plan = NULL; - + query = setup_for_create_model(query, query_string, params); - + /* plan the query */ plan = pg_plan_query(query, 0, params); - + // Inject the GradientDescent node at the root of the plan - plan = add_create_model_to_plan(stmt, plan, (DestReceiverTrainModel *)dest); - + DestReceiverTrainModel *dest_train_model = (DestReceiverTrainModel *)dest; + + add_create_model_to_plan(stmt, plan, dest_train_model, cxt); + return plan; } // Prepare the DestReceiver for training -void configure_dest_receiver_train_model(DestReceiverTrainModel *dest, AlgorithmML algorithm, const char *model_name, - const char *sql) +void configure_dest_receiver_train_model(DestReceiverTrainModel *dest, MemoryContext context, AlgorithmML algorithm, + const char *model_name, const char *sql, bool automatic_save) { - switch (algorithm) { - case LOGISTIC_REGRESSION: - case LINEAR_REGRESSION: - case SVM_CLASSIFICATION: { - dest->model = (Model *)palloc0(sizeof(ModelGradientDescent)); - break; - } - case KMEANS: { - dest->model = (Model *)palloc0(sizeof(ModelKMeans)); - break; - } - default: { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + if (algorithm >= INVALID_ALGORITHM_ML) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Unsupported model type in model warehouse %d", algorithm))); - } - } - dest->model->algorithm = algorithm; - dest->model->model_name = pstrdup(model_name); - dest->model->sql = sql; + + MemoryContext old_context = MemoryContextSwitchTo(context); + dest->memcxt = context; + dest->algorithm = algorithm; + dest->model_name = pstrdup(model_name); + dest->sql = sql; + dest->hyperparameters = NULL; dest->targetlist = nullptr; + dest->save_model = automatic_save; + MemoryContextSwitchTo(old_context); +} + + +void exec_create_model_planned(QueryDesc *queryDesc, char *completionTag) +{ + /* call ExecutorStart to prepare the plan for execution */ + ExecutorStart(queryDesc, 0); + + /* workload client manager */ + if (ENABLE_WORKLOAD_CONTROL) { + WLMInitQueryPlan(queryDesc); + dywlm_client_manager(queryDesc); + } + + ScanDirection dir = ForwardScanDirection; + + /* run the plan */ + ExecutorRun(queryDesc, dir, 0L); + + /* save the rowcount if we're given a completionTag to fill */ + if (completionTag != NULL) { + errno_t rc; + rc = snprintf_s(completionTag, COMPLETION_TAG_BUFSIZE, COMPLETION_TAG_BUFSIZE - 1, + "MODEL CREATED. PROCESSED %lu", queryDesc->estate->es_processed); + securec_check_ss(rc, "\0", "\0"); + } + + /* and clean up */ + ExecutorFinish(queryDesc); + ExecutorEnd(queryDesc); } -/* -* ExecCreateTableAs -- execute a CREATE TABLE AS command -*/ void exec_create_model(CreateModelStmt *stmt, const char *queryString, ParamListInfo params, char *completionTag) { #ifdef ENABLE_MULTIPLE_NODES @@ -358,19 +233,29 @@ void exec_create_model(CreateModelStmt *stmt, const char *queryString, ParamList errmsg("No support for distributed scenarios yet."))); #endif DestReceiverTrainModel *dest = NULL; - + PlannedStmt *plan = NULL; QueryDesc *queryDesc = NULL; - ScanDirection dir; - + + // We create the model, and all AutoML structures in the db4ai context. This context + // is normally allocated as a subcontext of portal context, and lasts longer than the execution + // of the query plan for training + MemoryContext db4ai_context = AllocSetContextCreate(CurrentMemoryContext, "db4ai_context", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + MemoryContext old = MemoryContextSwitchTo(db4ai_context); + /* * Create the tuple receiver object and insert hyperp it will need */ dest = (DestReceiverTrainModel *)CreateDestReceiver(DestTrainModel); - configure_dest_receiver_train_model(dest, (AlgorithmML)stmt->algorithm, stmt->model, queryString); - - plan = plan_create_model(stmt, queryString, params, (DestReceiver *)dest); - + configure_dest_receiver_train_model(dest, CurrentMemoryContext, + (AlgorithmML)stmt->algorithm, stmt->model, queryString, + true); + + plan = plan_create_model(stmt, queryString, params, (DestReceiver *)dest, db4ai_context); + /* * Use a snapshot with an updated command ID to ensure this query sees * results of any previously executed queries. (This could only matter if @@ -380,7 +265,7 @@ void exec_create_model(CreateModelStmt *stmt, const char *queryString, ParamList */ PushCopiedSnapshot(GetActiveSnapshot()); UpdateActiveSnapshotCommandId(); - + /* Create a QueryDesc, redirecting output to our tuple receiver */ queryDesc = CreateQueryDesc(plan, queryString, GetActiveSnapshot(), InvalidSnapshot, &dest->dest, params, 0); @@ -392,338 +277,32 @@ void exec_create_model(CreateModelStmt *stmt, const char *queryString, ParamList /* Check if need track resource */ u_sess->exec_cxt.need_track_resource = WLMNeedTrackResource(queryDesc); } - - /* call ExecutorStart to prepare the plan for execution */ - ExecutorStart(queryDesc, 0); - - /* workload client manager */ - if (ENABLE_WORKLOAD_CONTROL) { - WLMInitQueryPlan(queryDesc); - dywlm_client_manager(queryDesc); - } - - dir = ForwardScanDirection; - - /* run the plan */ - ExecutorRun(queryDesc, dir, 0L); - - /* save the rowcount if we're given a completionTag to fill */ - if (completionTag != NULL) { - errno_t rc; - rc = snprintf_s(completionTag, COMPLETION_TAG_BUFSIZE, COMPLETION_TAG_BUFSIZE - 1, - "MODEL CREATED. PROCESSED %lu", queryDesc->estate->es_processed); - securec_check_ss(rc, "\0", "\0"); - } - - /* and clean up */ - ExecutorFinish(queryDesc); - ExecutorEnd(queryDesc); - + print_hyperparameters(DEBUG1, dest->hyperparameters); + exec_create_model_planned(queryDesc, completionTag); + FreeQueryDesc(queryDesc); - PopActiveSnapshot(); + + MemoryContextSwitchTo(old); } -static void store_gd_expr_in_model(Datum dt, Oid type, int col, GradientDescentExprField field, Model *model, - ModelGradientDescent *model_gd, TupleDesc tupdesc) -{ - switch (field) { - case GD_EXPR_ALGORITHM: - Assert(type == INT4OID); - model_gd->model.algorithm = (AlgorithmML)DatumGetInt32(dt); - break; - case GD_EXPR_OPTIMIZER: - break; // Ignore field - case GD_EXPR_RESULT_TYPE: - Assert(type == OIDOID); - model->return_type = DatumGetUInt32(dt); - break; - case GD_EXPR_NUM_ITERATIONS: - Assert(type == INT4OID); - model->num_actual_iterations = DatumGetInt32(dt); - break; - case GD_EXPR_EXEC_TIME_MSECS: - Assert(type == FLOAT4OID); - model->exec_time_secs = DatumGetFloat4(dt) / 1000.0; - break; - case GD_EXPR_PROCESSED_TUPLES: - Assert(type == INT4OID); - model->processed_tuples = DatumGetInt32(dt); - break; - case GD_EXPR_DISCARDED_TUPLES: - Assert(type == INT4OID); - model->discarded_tuples = DatumGetInt32(dt); - break; - case GD_EXPR_WEIGHTS: - Assert(type == FLOAT4ARRAYOID); - model_gd->weights = datumCopy(dt, tupdesc->attrs[col]->attbyval, tupdesc->attrs[col]->attlen); - break; - case GD_EXPR_CATEGORIES: { - ArrayType *arr = (ArrayType *)DatumGetPointer(dt); - model_gd->ncategories = ARR_DIMS(arr)[0]; - model_gd->categories = - datumCopy(dt, tupdesc->attrs[col]->attbyval, tupdesc->attrs[col]->attlen); - } break; - default: - (void)type; - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Model warehouse for GradientDescent field %d not implemented", field))); - break; - } -} - -static void store_tuple_gd_in_model_warehouse(TupleTableSlot *slot, DestReceiverTrainModel *dest) -{ - Assert(dest->targetlist != nullptr); - - TupleDesc tupdesc = slot->tts_tupleDescriptor; - Model *model = dest->model; - model->pre_time_secs = 0.0; - - ModelGradientDescent *model_gd = (ModelGradientDescent *)model; - model_gd->ncategories = 0; - model_gd->model.algorithm = INVALID_ALGORITHM_ML; // undefined - - int col = 0; - ListCell *lc; - foreach (lc, dest->targetlist) { - TargetEntry *target = lfirst_node(TargetEntry, lc); - GradientDescentExpr *expr = (GradientDescentExpr *)target->expr; - if (!slot->tts_isnull[col]) { - Datum dt = slot->tts_values[col]; - Oid type = tupdesc->attrs[col]->atttypid; - if ((expr->field & GD_EXPR_SCORE) != 0) { - Assert(type == FLOAT4OID); - TrainingScore *score = (TrainingScore *)palloc0(sizeof(TrainingScore)); - score->name = pstrdup(target->resname); - score->value = DatumGetFloat4(dt); - model->scores = lappend(model->scores, score); - } else { - store_gd_expr_in_model(dt, type, col, expr->field, model, model_gd, tupdesc); - } - } - col++; - } -} - -static void store_kmeans_data_in_model(uint32_t natts, TupleDesc tupdesc, Datum *values, bool *nulls, - Model *model, ModelKMeans *model_kmeans) -{ - ArrayType *centroid_ids = nullptr; - ArrayType *centroid_coordinates = nullptr; - ArrayType *objective_functions = nullptr; - ArrayType *avg_distances = nullptr; - ArrayType *min_distances = nullptr; - ArrayType *max_distances = nullptr; - ArrayType *std_dev_distances = nullptr; - ArrayType *cluster_sizes = nullptr; - /* these are the inner-facing arrays */ - int32_t *centroid_ids_data = nullptr; - double *centroid_coordiates_data = nullptr; - double *objective_functions_data = nullptr; - double *avg_distances_data = nullptr; - double *min_distances_data = nullptr; - double *max_distances_data = nullptr; - double *std_dev_distances_data = nullptr; - int64_t *cluster_sizes_data = nullptr; - Oid oid = 0; - Datum attr = 0; - - /* - * for tuple at a time we only use one centroid at a time - */ - for (uint32_t a = 0; a < natts; ++a) { - if (unlikely(nulls[a])) { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), - errmsg("Encountered null attribute %u when serializing k-means model when it should not", a))); - } - - oid = tupdesc->attrs[a]->atttypid; - attr = values[a]; - - /* - * this switch has to match exactly the schema of the row we return (see nodeKMeans.cpp) - * there is a single row (quite big in general) thus the switch executes only once - */ - switch (a) { - case 0: - // centroids ids of type INT4ARRAYOID - Assert(oid == INT4ARRAYOID); - centroid_ids = reinterpret_cast(DatumGetPointer(attr)); - centroid_ids_data = reinterpret_cast(ARR_DATA_PTR(centroid_ids)); - break; - case 1: - // centroids coordinates of type FLOAT8ARRAYOID - Assert(oid == FLOAT8ARRAYOID); - /* - * in tuple at a time, this memory reference is valid until we store the centroid - * in the model warehouse - */ - centroid_coordinates = reinterpret_cast(DatumGetPointer(attr)); - centroid_coordiates_data = reinterpret_cast(ARR_DATA_PTR(centroid_coordinates)); - break; - case 2: - // value of the objective functions (per cluster) of type FLOAT8ARRAYOID - Assert(oid == FLOAT8ARRAYOID); - objective_functions = reinterpret_cast(DatumGetPointer(attr)); - objective_functions_data = reinterpret_cast(ARR_DATA_PTR(objective_functions)); - break; - case 3: - // avg distance of the clusters of type FLOAT8ARRAYOID - Assert(oid == FLOAT8ARRAYOID); - avg_distances = reinterpret_cast(DatumGetPointer(attr)); - avg_distances_data = reinterpret_cast(ARR_DATA_PTR(avg_distances)); - break; - case 4: - // min distance of the clusters of type FLOAT8ARRAYOID - Assert(oid == FLOAT8ARRAYOID); - min_distances = reinterpret_cast(DatumGetPointer(attr)); - min_distances_data = reinterpret_cast(ARR_DATA_PTR(min_distances)); - break; - case 5: - // max distance of the clusters of type FLOAT8ARRAYOID - Assert(oid == FLOAT8ARRAYOID); - max_distances = reinterpret_cast(DatumGetPointer(attr)); - max_distances_data = reinterpret_cast(ARR_DATA_PTR(max_distances)); - break; - case 6: - // standard deviation of clusters of type FLOAT8ARRAYOID - Assert(oid == FLOAT8ARRAYOID); - std_dev_distances = reinterpret_cast(DatumGetPointer(attr)); - std_dev_distances_data = reinterpret_cast(ARR_DATA_PTR(std_dev_distances)); - break; - case 7: - // cluster sizes of type INT8ARRAYOID - Assert(oid == INT8ARRAYOID); - cluster_sizes = reinterpret_cast(DatumGetPointer(attr)); - cluster_sizes_data = reinterpret_cast(ARR_DATA_PTR(cluster_sizes)); - break; - case 8: - // num good points of type INT8OID - Assert(oid == INT8OID); - model->processed_tuples = DatumGetInt64(attr); - break; - case 9: - // num bad point of type INT8OID - Assert(oid == INT8OID); - model->discarded_tuples = DatumGetInt64(attr); - break; - case 10: - // seedings time (secs) of type FLOAT8OID - Assert(oid == FLOAT8OID); - model->pre_time_secs = DatumGetFloat8(attr); - break; - case 11: - // execution time (secs) of type FLOAT8OID - Assert(oid == FLOAT8OID); - model->exec_time_secs = DatumGetFloat8(attr); - break; - case 12: - // actual number of iterations INT4OID - Assert(oid == INT4OID); - model->num_actual_iterations = DatumGetInt32(attr); - break; - case 13: - // actual number of centroids INT4OID - Assert(oid == INT4OID); - model_kmeans->actual_num_centroids = DatumGetInt32(attr); - break; - case 14: - // seed used for computations - Assert(oid == INT8OID); - model_kmeans->seed = DatumGetInt64(attr); - break; - default: - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Unknown attribute %u when serializing k-means model", a))); - } - } - - uint32_t const actual_num_centroids = model_kmeans->actual_num_centroids; - uint32_t const dimension = model_kmeans->dimension; - uint32_t centroid_coordinates_offset = 0; - WHCentroid *current_centroid = nullptr; - - /* - * at this point we have extracted all the attributes and the memory representation - * of the model can be constructed so that it can be stored in the model warehouse - */ - model_kmeans->centroids = reinterpret_cast(palloc0(sizeof(WHCentroid) * actual_num_centroids)); - - /* - * we fill in the information of every centroid - */ - for (uint32_t current_centroid_idx = 0; current_centroid_idx < actual_num_centroids; ++current_centroid_idx) { - current_centroid = model_kmeans->centroids + current_centroid_idx; - current_centroid->id = centroid_ids_data[current_centroid_idx]; - current_centroid->objective_function = objective_functions_data[current_centroid_idx]; - current_centroid->avg_distance_to_centroid = avg_distances_data[current_centroid_idx]; - current_centroid->min_distance_to_centroid = min_distances_data[current_centroid_idx]; - current_centroid->max_distance_to_centroid = max_distances_data[current_centroid_idx]; - current_centroid->std_dev_distance_to_centroid = std_dev_distances_data[current_centroid_idx]; - current_centroid->cluster_size = cluster_sizes_data[current_centroid_idx]; - current_centroid->coordinates = centroid_coordiates_data + centroid_coordinates_offset; - centroid_coordinates_offset += dimension; - } -} - -static void store_tuple_kmeans_in_model_warehouse(TupleTableSlot *slot, DestReceiverTrainModel *dest) -{ - /* - * sanity checks - */ - Assert(slot != NULL); - Assert(!slot->tts_isempty); - Assert(slot->tts_nvalid == NUM_ATTR_OUTPUT); - Assert(slot->tts_tupleDescriptor != NULL); - Assert(!TTS_HAS_PHYSICAL_TUPLE(slot)); - - TupleDesc tupdesc = slot->tts_tupleDescriptor; - auto model_kmeans = reinterpret_cast(dest->model); - Model *model = &model_kmeans->model; - - if (unlikely(slot->tts_isempty)) - return; - - uint32_t const natts = slot->tts_nvalid; - /* - * the slot contains a virtual tuple and thus we can access its attributs directly - */ - Datum *values = slot->tts_values; - bool *nulls = slot->tts_isnull; - - if (unlikely(!values && !nulls)) { - ereport(ERROR, - (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Empty arrays values and nulls"))); - } - - store_kmeans_data_in_model(natts, tupdesc, values, nulls, model, model_kmeans); -} static void store_tuple_in_model_warehouse(TupleTableSlot *slot, DestReceiver *self) { DestReceiverTrainModel *dest = (DestReceiverTrainModel *)self; - Model *model = dest->model; + + if (dest->algorithm >= INVALID_ALGORITHM_ML) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Unsupported model type %d", static_cast(dest->algorithm)))); - switch (model->algorithm) { - case LOGISTIC_REGRESSION: - case SVM_CLASSIFICATION: - case LINEAR_REGRESSION: - store_tuple_gd_in_model_warehouse(slot, dest); - break; - - case KMEANS: - store_tuple_kmeans_in_model_warehouse(slot, dest); - break; - - case INVALID_ALGORITHM_ML: - default: - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Unsupported model type %d", static_cast(model->algorithm)))); - break; - } - - store_model(model); + Model *model = (Model*)DatumGetPointer(slot->tts_values[0]); + model->algorithm = dest->algorithm; + model->model_name = dest->model_name; + model->sql = dest->sql; + model->hyperparameters = dest->hyperparameters; + + if (dest->save_model) + store_model(model); } @@ -742,12 +321,12 @@ DestReceiver *CreateTrainModelDestReceiver() { DestReceiverTrainModel *dr = (DestReceiverTrainModel *)palloc0(sizeof(DestReceiverTrainModel)); DestReceiver *result = &dr->dest; - + result->rStartup = do_nothing_startup; result->receiveSlot = store_tuple_in_model_warehouse; result->rShutdown = do_nothing_cleanup; result->rDestroy = do_nothing_cleanup; result->mydest = DestTrainModel; - + return result; } diff --git a/src/gausskernel/dbmind/db4ai/commands/explain_model.cpp b/src/gausskernel/dbmind/db4ai/commands/explain_model.cpp new file mode 100644 index 000000000..7a91f12ef --- /dev/null +++ b/src/gausskernel/dbmind/db4ai/commands/explain_model.cpp @@ -0,0 +1,69 @@ +/* +* Copyright (c) 2020 Huawei Technologies Co.,Ltd. +* +* openGauss is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*--------------------------------------------------------------------------------------- +* +* explain_model.cpp +* Obtain the results of the model by parsing the model. +* +* IDENTIFICATION +* src/gausskernel/dbmind/db4ai/commands/explain_model.cpp +* +* --------------------------------------------------------------------------------------- +*/ + +#include "db4ai/explain_model.h" + +#include "c.h" +#include "commands/explain.h" +#include "db4ai/model_warehouse.h" +#include "utils/builtins.h" +Datum db4ai_explain_model(PG_FUNCTION_ARGS) +{ + // This function is not supported in distributed deployment mode currently +#ifdef ENABLE_MULTIPLE_NODES + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("No support for distributed scenarios yet."))); +#endif + if (t_thrd.proc->workingVersionNum < 92582) { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Before GRAND VERSION NUM 92582, we do not support gs_explain_model."))); + } + + text* in_string = PG_GETARG_TEXT_PP(0); + char* out_string = str_tolower(VARDATA_ANY(in_string), VARSIZE_ANY_EXHDR(in_string), PG_GET_COLLATION()); + text* result = ExecExplainModel(out_string); + PG_RETURN_TEXT_P(result); +} + +extern void do_model_explain(ExplainState *es, const Model *model); +text* ExecExplainModel(char* model_name) +{ + ExplainState explain_state; + errno_t rc = memset_s(&explain_state, sizeof(ExplainState), 0, sizeof(ExplainState)); + securec_check(rc, "\0", "\0"); + + explain_state.str = makeStringInfo(); + explain_state.format = EXPLAIN_FORMAT_TEXT; + explain_state.opt_model_name = model_name; + + const Model* model = get_model(model_name, false); + if (!model) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("empty model obtained for EXPLAIN MODEL"))); + + do_model_explain(&explain_state, model); + text* result = cstring_to_text(explain_state.str->data); + pfree_ext(explain_state.str->data); + return result; +} \ No newline at end of file diff --git a/src/gausskernel/dbmind/db4ai/commands/predict_by.cpp b/src/gausskernel/dbmind/db4ai/commands/predict_by.cpp index d189025c1..350b9fadb 100644 --- a/src/gausskernel/dbmind/db4ai/commands/predict_by.cpp +++ b/src/gausskernel/dbmind/db4ai/commands/predict_by.cpp @@ -29,54 +29,95 @@ #include "utils/array.h" #include "utils/builtins.h" #include "db4ai/gd.h" +#include "utils/lsyscache.h" #define DEBUG_MODEL_RETURN_TYPE INT4OID // Set manually the return type of model, until available from catalog /* - * functions relevant to k-means and defined in kmeans.cpp + * functions relevant to xgboost and defined in xgboost.cpp */ -ModelPredictor kmeans_predict_prepare(Model const * model); -Datum kmeans_predict(ModelPredictor model, Datum *data, bool *nulls, Oid *types, int nargs); +ModelPredictor xgboost_predict_prepare(AlgorithmAPI *, Model const *model); +Datum xgboost_predict(AlgorithmAPI*, ModelPredictor model, Datum* values, bool* isnull, Oid* types, int ncolumns); + struct PredictionByData { PredictorInterface *api; ModelPredictor model_predictor; + AlgorithmAPI *algorithm; }; +typedef enum funcType { + INVAILD_FUNC_TYPE = 0, + BOOL, + FLOAT4, + FLOAT8, + FLOAT8ARRAY, + INT32, + INT64, + NUMERIC, + TEXT +} funcType; + +static void check_func_oid(Oid model_retype, funcType func) +{ + funcType mft = INVAILD_FUNC_TYPE; + switch(model_retype){ + case BOOLOID: + mft = BOOL; + break; + case FLOAT4OID: + mft = FLOAT4; + break; + case FLOAT8OID: + mft = FLOAT8; + break; + case FLOAT8ARRAYOID: + mft = FLOAT8ARRAY; + break; + case INT1OID: + case INT2OID: + case INT4OID: + mft = INT32; + break; + case INT8OID: + mft = INT64; + break; + case NUMERICOID: + mft = NUMERIC; + break; + case VARCHAROID: + case BPCHAROID: + case CHAROID: + case TEXTOID: + mft = TEXT; + break; + default: + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Cannot trigger prediction for model with oid %u", model_retype))); + } + if (mft != func) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("The function return type is mismatched with model result type: %u.", model_retype))); +} -static PredictionByData *initialize_predict_by_data(Model *model) +static PredictionByData *initialize_predict_by_data(const Model *model) { PredictionByData *result = (PredictionByData *)palloc0(sizeof(PredictionByData)); // Initialize the API handlers - switch (model->algorithm) { - case LOGISTIC_REGRESSION: - case SVM_CLASSIFICATION: - case LINEAR_REGRESSION: { - result->api = (PredictorInterface *)palloc0(sizeof(PredictorInterface)); - result->api->predict = gd_predict; - result->api->prepare = gd_predict_prepare; - break; - } - case KMEANS: { - result->api = (PredictorInterface *)palloc0(sizeof(PredictorInterface)); - result->api->predict = kmeans_predict; - result->api->prepare = kmeans_predict_prepare; - break; - } - case INVALID_ALGORITHM_ML: - default: { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Model type %d is not supported for prediction", (int)model->algorithm))); - break; - } - } + result->algorithm = get_algorithm_api(model->algorithm); + result->api = (PredictorInterface*)palloc0(sizeof(PredictorInterface)); + Assert(result->algorithm->predict != nullptr); + Assert(result->algorithm->prepare_predict != nullptr); + result->api->predict = result->algorithm->predict; + result->api->prepare = NULL; // Prepare the in memory version of the model for efficient prediction - result->model_predictor = result->api->prepare(model); + result->model_predictor = result->algorithm->prepare_predict(result->algorithm, &model->data, model->return_type); return result; } +template Datum db4ai_predict_by(PG_FUNCTION_ARGS) { // First argument is the model, the following ones are the inputs to the model predictor @@ -101,16 +142,16 @@ Datum db4ai_predict_by(PG_FUNCTION_ARGS) text *model_name_text = PG_GETARG_TEXT_P(0); char *model_name = text_to_cstring(model_name_text); - Model *model = get_model(model_name, false); + const Model *model = get_model(model_name, false); prediction_by_data = initialize_predict_by_data(model); fcinfo->flinfo->fn_extra = prediction_by_data; pfree(model_name); - + check_func_oid(model->return_type, ft); MemoryContextSwitchTo(oldContext); } - Datum result = - prediction_by_data->api->predict(prediction_by_data->model_predictor, var_args, nulls, types, var_args_size); + Datum result = prediction_by_data->api->predict(prediction_by_data->algorithm, prediction_by_data->model_predictor, + var_args, nulls, types, var_args_size); PG_RETURN_DATUM(result); } @@ -119,35 +160,41 @@ Datum db4ai_predict_by(PG_FUNCTION_ARGS) // to be compliant with openGauss type system that specifies the return type Datum db4ai_predict_by_bool(PG_FUNCTION_ARGS) { - return db4ai_predict_by(fcinfo); + return db4ai_predict_by(fcinfo); } Datum db4ai_predict_by_int32(PG_FUNCTION_ARGS) { - return db4ai_predict_by(fcinfo); + return db4ai_predict_by(fcinfo); } Datum db4ai_predict_by_int64(PG_FUNCTION_ARGS) { - return db4ai_predict_by(fcinfo); + return db4ai_predict_by(fcinfo); } Datum db4ai_predict_by_float4(PG_FUNCTION_ARGS) { - return db4ai_predict_by(fcinfo); + return db4ai_predict_by(fcinfo); } Datum db4ai_predict_by_float8(PG_FUNCTION_ARGS) { - return db4ai_predict_by(fcinfo); + return db4ai_predict_by(fcinfo); } +Datum db4ai_predict_by_float8_array(PG_FUNCTION_ARGS) +{ + return db4ai_predict_by(fcinfo); +} + + Datum db4ai_predict_by_numeric(PG_FUNCTION_ARGS) { - return db4ai_predict_by(fcinfo); + return db4ai_predict_by(fcinfo); } Datum db4ai_predict_by_text(PG_FUNCTION_ARGS) { - return db4ai_predict_by(fcinfo); + return db4ai_predict_by(fcinfo); } diff --git a/src/gausskernel/dbmind/db4ai/executor/CMakeLists.txt b/src/gausskernel/dbmind/db4ai/executor/CMakeLists.txt index fd3c5fe1d..ca1f8582c 100755 --- a/src/gausskernel/dbmind/db4ai/executor/CMakeLists.txt +++ b/src/gausskernel/dbmind/db4ai/executor/CMakeLists.txt @@ -3,7 +3,10 @@ set(TGT_executor_SRC ${CMAKE_CURRENT_SOURCE_DIR}/distance_functions.cpp ${CMAKE_CURRENT_SOURCE_DIR}/fp_ops.cpp ${CMAKE_CURRENT_SOURCE_DIR}/hyperparameter_validation.cpp -) + ${CMAKE_CURRENT_SOURCE_DIR}/kernel.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/matrix.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/direct.cpp +) set(TGT_executor_INC ${PROJECT_OPENGS_DIR}/contrib/log_fdw ${PROJECT_TRUNK_DIR}/distribute/bin/gds @@ -27,3 +30,4 @@ add_static_objtarget(gausskernel_db4ai_executor TGT_executor_SRC TGT_executor_IN add_subdirectory(gd) add_subdirectory(kmeans) +add_subdirectory(xgboost) diff --git a/src/gausskernel/dbmind/db4ai/executor/Makefile b/src/gausskernel/dbmind/db4ai/executor/Makefile index 69fb5bd3f..245367593 100644 --- a/src/gausskernel/dbmind/db4ai/executor/Makefile +++ b/src/gausskernel/dbmind/db4ai/executor/Makefile @@ -24,10 +24,7 @@ ifneq "$(MAKECMDGOALS)" "clean" endif endif -SUBDIRS = gd kmeans -OBJS = fp_ops.o distance_functions.o hyperparameter_validation.o +SUBDIRS = gd kmeans xgboost +OBJS = fp_ops.o distance_functions.o hyperparameter_validation.o direct.o kernel.o matrix.o include $(top_srcdir)/src/gausskernel/common.mk - - - diff --git a/src/gausskernel/dbmind/db4ai/executor/direct.cpp b/src/gausskernel/dbmind/db4ai/executor/direct.cpp new file mode 100644 index 000000000..d893a16e0 --- /dev/null +++ b/src/gausskernel/dbmind/db4ai/executor/direct.cpp @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * direct.cpp + * + * IDENTIFICATION + * src/gausskernel/dbmind/db4ai/executor/direct.cpp + * + * --------------------------------------------------------------------------------------- + */ + +#include "db4ai/db4ai_api.h" + +Model *model_fit(const char *name, AlgorithmML algorithm, const Hyperparameter *hyperparameters, int nhyperp, + Oid *typid, bool *typbyval, int16 *typlen, int ncolumns, callback_ml_fetch fetch, + callback_ml_rescan rescan, void *callback_data) +{ + Assert(nhyperp == 0 || hyperparameters != nullptr); + Assert(ncolumns > 0); + Assert(typid != nullptr); + Assert(typbyval != nullptr); + Assert(typlen != nullptr); + Assert(fetch != NULL); + + AlgorithmAPI *palgo = get_algorithm_api(algorithm); + + // prepare the hyperparameters + int32_t definitions_size; + const HyperparameterDefinition* definitions = palgo->get_hyperparameters_definitions(palgo, &definitions_size); + ModelHyperparameters *hyperp = palgo->make_hyperparameters(palgo); + init_hyperparameters_with_defaults(definitions, definitions_size, hyperp); + if (nhyperp > 0) + configure_hyperparameters(definitions, definitions_size, hyperparameters, nhyperp, hyperp); + if (palgo->update_hyperparameters != nullptr) + palgo->update_hyperparameters(palgo, hyperp); + + // create configuration + TrainModel config; + config.algorithm = algorithm; + config.configurations = 1; + config.hyperparameters = (const ModelHyperparameters **) &hyperp; + config.cxt = CurrentMemoryContext; + + // create state structure + Assert(palgo->create != nullptr); + TrainModelState *pstate = palgo->create(palgo, &config); + pstate->config = &config; + pstate->algorithm = palgo; + pstate->finished = 0; + pstate->fetch = fetch; + pstate->rescan = rescan; + pstate->callback_data = callback_data; + pstate->row_allocated = false; + pstate->tuple.ncolumns = ncolumns; + pstate->tuple.typid = typid; + pstate->tuple.typbyval = typbyval; + pstate->tuple.typlen = typlen; + + // fit, with a single configuration only a single Model is expected + Model *model = (Model *)palloc0(sizeof(Model)); + model->memory_context = CurrentMemoryContext; + model->algorithm = algorithm; + model->model_name = name; + model->sql = nullptr; + model->hyperparameters = + prepare_model_hyperparameters(definitions, definitions_size, hyperp, model->memory_context); + + Assert(palgo->run != nullptr); + palgo->run(palgo, pstate, &model); + + // done + Assert(palgo->end != nullptr); + palgo->end(palgo, pstate); + pfree(pstate); + + return model; +} + +struct DirectModelPredictor { + AlgorithmAPI *palgo; + ModelPredictor predictor; +}; + +ModelPredictor model_prepare_predict(const Model* model) +{ + DirectModelPredictor *pred = (DirectModelPredictor*) palloc(sizeof(DirectModelPredictor)); + pred->palgo = get_algorithm_api(model->algorithm); + pred->predictor = pred->palgo->prepare_predict(pred->palgo, &model->data, model->return_type); + return (ModelPredictor)pred; +} + +Datum model_predict(ModelPredictor predictor, Datum *values, bool *isnull, Oid *typid, int num_columns) +{ + DirectModelPredictor *pred = (DirectModelPredictor*) predictor; + return pred->palgo->predict(pred->palgo, pred->predictor, values, isnull, typid, num_columns); +} + +void model_store(const Model *model) +{ + store_model(model); +} + +const Model *model_load(const char *model_name) +{ + return get_model(model_name, false); +} diff --git a/src/gausskernel/dbmind/db4ai/executor/fp_ops.cpp b/src/gausskernel/dbmind/db4ai/executor/fp_ops.cpp index 65772edfc..9fe9ab972 100644 --- a/src/gausskernel/dbmind/db4ai/executor/fp_ops.cpp +++ b/src/gausskernel/dbmind/db4ai/executor/fp_ops.cpp @@ -192,21 +192,11 @@ double IncrementalStatistics::getMin() const return min_value; } -void IncrementalStatistics::setMin(double min) -{ - min_value = min; -} - double IncrementalStatistics::getMax() const { return max_value; } -void IncrementalStatistics::setMax(double max) -{ - max_value = max; -} - double IncrementalStatistics::getTotal() const { return total; @@ -214,7 +204,8 @@ double IncrementalStatistics::getTotal() const void IncrementalStatistics::setTotal(double t) { - total = t; + total = max_value = min_value = t; + population = 1ULL; } uint64_t IncrementalStatistics::getPopulation() const @@ -222,11 +213,6 @@ uint64_t IncrementalStatistics::getPopulation() const return population; } -void IncrementalStatistics::setPopulation(uint64_t pop) -{ - population = pop; -} - double IncrementalStatistics::getEmpiricalMean() const { double mean = 0.; @@ -245,7 +231,10 @@ double IncrementalStatistics::getEmpiricalVariance() const double IncrementalStatistics::getEmpiricalStdDev() const { - return std::sqrt(getEmpiricalVariance()); + double const std_dev = getEmpiricalVariance(); + // round-off errors might happen and we don't want to take + // sqrt of a negative number (that technically should be zero for example) + return std_dev < 0. ? 0. : std::sqrt(getEmpiricalVariance()); } bool IncrementalStatistics::reset() diff --git a/src/gausskernel/dbmind/db4ai/executor/gd/CMakeLists.txt b/src/gausskernel/dbmind/db4ai/executor/gd/CMakeLists.txt index 63d18bf11..2b054c829 100755 --- a/src/gausskernel/dbmind/db4ai/executor/gd/CMakeLists.txt +++ b/src/gausskernel/dbmind/db4ai/executor/gd/CMakeLists.txt @@ -3,7 +3,10 @@ set(TGT_gd_SRC ${CMAKE_CURRENT_SOURCE_DIR}/gd.cpp ${CMAKE_CURRENT_SOURCE_DIR}/linregr.cpp ${CMAKE_CURRENT_SOURCE_DIR}/logregr.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/matrix.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/pca.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/optimizer_pca.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/optimizer_ova.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/multiclass.cpp ${CMAKE_CURRENT_SOURCE_DIR}/optimizer_gd.cpp ${CMAKE_CURRENT_SOURCE_DIR}/optimizer_ngd.cpp ${CMAKE_CURRENT_SOURCE_DIR}/predict.cpp diff --git a/src/gausskernel/dbmind/db4ai/executor/gd/Makefile b/src/gausskernel/dbmind/db4ai/executor/gd/Makefile index 649b342e2..1b7370a8b 100644 --- a/src/gausskernel/dbmind/db4ai/executor/gd/Makefile +++ b/src/gausskernel/dbmind/db4ai/executor/gd/Makefile @@ -17,6 +17,8 @@ ifneq "$(MAKECMDGOALS)" "clean" endif endif -OBJS = gd.o matrix.o optimizer_gd.o optimizer_ngd.o shuffle_cache.o logregr.o svm.o linregr.o predict.o +OBJS = gd.o shuffle_cache.o predict.o \ + logregr.o svm.o linregr.o pca.o multiclass.o \ + optimizer_gd.o optimizer_ngd.o optimizer_pca.o optimizer_ova.o include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/dbmind/db4ai/executor/gd/gd.cpp b/src/gausskernel/dbmind/db4ai/executor/gd/gd.cpp index ad9250cee..293221fa7 100644 --- a/src/gausskernel/dbmind/db4ai/executor/gd/gd.cpp +++ b/src/gausskernel/dbmind/db4ai/executor/gd/gd.cpp @@ -13,7 +13,8 @@ * See the Mulan PSL v2 for more details. *--------------------------------------------------------------------------------------- * - * gd.cpp + * gd.cpp + * Gradient descent is used for algorithm optimization. * * IDENTIFICATION * src/gausskernel/dbmind/db4ai/executor/gd/gd.cpp @@ -22,147 +23,33 @@ */ #include "postgres.h" +#include "executor/executor.h" #include "utils/builtins.h" #include "nodes/makefuncs.h" #include "db4ai/gd.h" +#include "db4ai/db4ai_common.h" #include "nodes/primnodes.h" #include "utils/array.h" +#include "utils/lsyscache.h" +#include "db4ai/hyperparameter_validation.h" +#include "db4ai/kernel.h" + +#define GD_TARGET_COL 0 // predefined, always the first const char *gd_get_optimizer_name(OptimizerML optimizer) { static const char* names[] = { "gd", "ngd" }; - if (optimizer > OPTIMIZER_NGD) + if (optimizer >= INVALID_OPTIMIZER) ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Invalid optimizer %d", optimizer))); return names[optimizer]; } -const char *gd_get_expr_name(GradientDescentExprField field) +GradientDescent *gd_get_algorithm(AlgorithmML algorithm) { - const char* names[] = { - "algorithm", - "optimizer", - "result_type", - "num_iterations", - "exec_time_msecs", - "processed", - "discarded", - "weights", - "categories", - }; - - if ((field & GD_EXPR_SCORE) != 0) - return gd_get_metric_name(field & ~GD_EXPR_SCORE); - - if (field > GD_EXPR_CATEGORIES) - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Invalid GD expression field %d", field))); - - return names[field]; -} - -Datum gd_float_get_datum(Oid type, gd_float value) -{ - Datum datum = 0; - switch (type) { - case BOOLOID: - datum = BoolGetDatum(value != 0.0); - break; - case INT1OID: - datum = Int8GetDatum(value); - break; - case INT2OID: - datum = Int16GetDatum(value); - break; - case INT4OID: - datum = Int32GetDatum(value); - break; - case INT8OID: - datum = Int64GetDatum(value); - break; - case FLOAT4OID: - datum = Float4GetDatum(value); - break; - case FLOAT8OID: - datum = Float8GetDatum(value); - break; - case NUMERICOID: - datum = DirectFunctionCall1(float4_numeric, Float4GetDatum(value)); - break; - default: - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Oid type %d not yet supported", type))); - break; - } - return datum; -} - -gd_float gd_datum_get_float(Oid type, Datum datum) -{ - gd_float value = 0; - switch (type) { - case BOOLOID: - value = DatumGetBool(datum) ? 1.0 : 0.0; - break; - case INT1OID: - value = DatumGetInt8(datum); - break; - case INT2OID: - value = DatumGetInt16(datum); - break; - case INT4OID: - value = DatumGetInt32(datum); - break; - case INT8OID: - value = DatumGetInt64(datum); - break; - case FLOAT4OID: - value = DatumGetFloat4(datum); - break; - case FLOAT8OID: - value = DatumGetFloat8(datum); - break; - case NUMERICOID: - value = DatumGetFloat8(DirectFunctionCall1(numeric_float8_no_overflow, datum)); - break; - default: - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Oid type %d not yet supported", type))); - break; - } - return value; -} - -char *gd_get_metric_name(int metric) -{ - switch (metric) { - case METRIC_ACCURACY: - return "accuracy"; - case METRIC_F1: - return "f1"; - case METRIC_PRECISION: - return "precision"; - case METRIC_RECALL: - return "recall"; - case METRIC_LOSS: - return "loss"; - case METRIC_MSE: - return "mse"; - default: - Assert(false); - } - return nullptr; -} - -extern GradientDescentAlgorithm gd_logistic_regression; -extern GradientDescentAlgorithm gd_svm_classification; -extern GradientDescentAlgorithm gd_linear_regression; - -GradientDescentAlgorithm *gd_get_algorithm(AlgorithmML algorithm) -{ - GradientDescentAlgorithm *gd_algorithm = nullptr; + GradientDescent *gd_algorithm = nullptr; switch (algorithm) { case LOGISTIC_REGRESSION: gd_algorithm = &gd_logistic_regression; @@ -173,6 +60,9 @@ GradientDescentAlgorithm *gd_get_algorithm(AlgorithmML algorithm) case LINEAR_REGRESSION: gd_algorithm = &gd_linear_regression; break; + case PCA: + gd_algorithm = &gd_pca; + break; default: ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Invalid algorithm %d", algorithm))); @@ -181,177 +71,1036 @@ GradientDescentAlgorithm *gd_get_algorithm(AlgorithmML algorithm) return gd_algorithm; } -// //////////////////////////////////////////////////////////////////////////// -// expressions for projections - -static struct { - GradientDescentExprField field; - Oid fieldtype; - char *name; -} GradientDescentExpr_fields[] = { - { GD_EXPR_ALGORITHM, INT4OID, "algorithm"}, - { GD_EXPR_OPTIMIZER, INT4OID, "optimizer"}, - { GD_EXPR_RESULT_TYPE, OIDOID, "result_type"}, - { GD_EXPR_NUM_ITERATIONS, INT4OID, "num_iterations"}, - { GD_EXPR_EXEC_TIME_MSECS, FLOAT4OID, "exec_time_msecs"}, - { GD_EXPR_PROCESSED_TUPLES, INT4OID, "processed_tuples"}, - { GD_EXPR_DISCARDED_TUPLES, INT4OID, "discarded_tuples"}, - { GD_EXPR_WEIGHTS, FLOAT4ARRAYOID, "weights"}, -}; - -static GradientDescentExpr *makeGradientDescentExpr(GradientDescentExprField field, Oid fieldtype) +// usage: svmc, logreg +MetricML *gd_metrics_accuracy(AlgorithmAPI *self, int *num_metrics) { - GradientDescentExpr *xpr = makeNode(GradientDescentExpr); - xpr->field = field; - xpr->fieldtype = fieldtype; - return xpr; + Assert(num_metrics != nullptr); + static MetricML metrics[] = { + METRIC_ML_ACCURACY, METRIC_ML_F1, METRIC_ML_PRECISION, METRIC_ML_RECALL, METRIC_ML_LOSS + }; + *num_metrics = sizeof(metrics) / sizeof(MetricML); + return metrics; } -List *makeGradientDescentExpr(AlgorithmML algorithm, List *list, int field) +// usage: linreg +MetricML *gd_metrics_mse(AlgorithmAPI *self, int *num_metrics) { - Expr *expr; - for (size_t i = 0; i < sizeof(GradientDescentExpr_fields) / sizeof(GradientDescentExpr_fields[0]); i++) { - expr = (Expr *)makeGradientDescentExpr(GradientDescentExpr_fields[i].field, - GradientDescentExpr_fields[i].fieldtype); - list = lappend(list, makeTargetEntry(expr, field++, GradientDescentExpr_fields[i].name, false)); - } - - // add metrics - GradientDescentAlgorithm *palgo = gd_get_algorithm(algorithm); - int metrics = palgo->metrics; - int metric = 1; - while (metrics != 0) { - if (metrics & metric) { - expr = (Expr *)makeGradientDescentExpr(makeGradientDescentExprFieldScore(metric), FLOAT4OID); - list = lappend(list, makeTargetEntry(expr, field++, gd_get_metric_name(metric), false)); - metrics &= ~metric; - } - metric <<= 1; - } - - // binary value mappings - if (dep_var_is_binary(palgo)) { - expr = (Expr *)makeGradientDescentExpr(GD_EXPR_CATEGORIES, TEXTARRAYOID); - list = lappend(list, makeTargetEntry(expr, field++, "categories", false)); - } - - return list; + Assert(num_metrics != nullptr); + static MetricML metrics[] = { METRIC_ML_MSE }; + *num_metrics = sizeof(metrics) / sizeof(MetricML); + return metrics; } -Datum ExecGDExprScore(GradientDescentExprState *mlstate, bool *isNull) +// usage: pca +MetricML *gd_metrics_loss(AlgorithmAPI *self, int *num_metrics) { - Datum dt = 0; + Assert(num_metrics != nullptr); + static MetricML metrics[] = { METRIC_ML_LOSS }; + *num_metrics = sizeof(metrics) / sizeof(MetricML); + return metrics; +} + +static double gd_get_score(GradientDescentState *gd_state, MetricML metric, bool *isNull) +{ + double score = 0; bool hasp, hasr; double precision, recall; - GradientDescentState *gd_state = (GradientDescentState *)mlstate->ps; - - switch (mlstate->xpr->field & ~GD_EXPR_SCORE) { - case METRIC_LOSS: - dt = Float4GetDatum(gd_state->loss); - break; - case METRIC_ACCURACY: - dt = Float4GetDatum(get_accuracy(&gd_state->scores)); - break; - case METRIC_F1: // 2 * (precision * recall) / (precision + recall) - precision = get_precision(&gd_state->scores, &hasp); - recall = get_recall(&gd_state->scores, &hasr); - if ((hasp && precision > 0) || (hasr && recall > 0)) { - dt = Float4GetDatum(2.0 * precision * recall / (precision + recall)); - } else - *isNull = true; - break; - case METRIC_PRECISION: - precision = get_precision(&gd_state->scores, &hasp); - if (hasp) { - dt = Float4GetDatum(precision); - } else - *isNull = true; - break; - case METRIC_RECALL: - recall = get_recall(&gd_state->scores, &hasr); - if (hasr) { - dt = Float4GetDatum(recall); - } else - *isNull = true; - break; - case METRIC_MSE: - dt = Float4GetDatum(gd_state->scores.mse); - break; - default: - *isNull = true; - break; - } - - return dt; -} - -Datum ExecNonGDExprScore(GradientDescentExprState *mlstate, ExprContext *econtext, bool *isNull) -{ - Datum dt = 0; - Oid typoutput; - GradientDescentState *gd_state = (GradientDescentState *)mlstate->ps; - GradientDescent *gd_node = (GradientDescent *)gd_state->ss.ps.plan; - ArrayBuildState *astate = NULL; - - switch (mlstate->xpr->field) { - case GD_EXPR_ALGORITHM: - dt = Int32GetDatum(gd_node->algorithm); - break; - case GD_EXPR_OPTIMIZER: - dt = Int32GetDatum(gd_node->optimizer); - break; - case GD_EXPR_RESULT_TYPE: - typoutput = get_atttypid(gd_state, gd_node->targetcol); - dt = UInt32GetDatum(typoutput); - break; - case GD_EXPR_NUM_ITERATIONS: - dt = Int32GetDatum(gd_state->n_iterations); - break; - case GD_EXPR_EXEC_TIME_MSECS: - dt = Float4GetDatum(gd_state->usecs / 1000.0); - break; - case GD_EXPR_PROCESSED_TUPLES: - dt = Int32GetDatum(gd_state->processed); - break; - case GD_EXPR_DISCARDED_TUPLES: - dt = Int32GetDatum(gd_state->discarded); - break; - case GD_EXPR_WEIGHTS: { - gd_float *pw = gd_state->weights.data; - for (int i = 0; i < gd_state->weights.rows; i++) - astate = accumArrayResult(astate, Float4GetDatum(*pw++), false, FLOAT4OID, CurrentMemoryContext); - dt = makeArrayResult(astate, econtext->ecxt_per_query_memory); - } break; - case GD_EXPR_CATEGORIES: - typoutput = get_atttypid(gd_state, gd_node->targetcol); - for (int i = 0; i < gd_state->num_classes; i++) - astate = - accumArrayResult(astate, gd_state->binary_classes[i], false, typoutput, CurrentMemoryContext); - dt = makeArrayResult(astate, econtext->ecxt_per_query_memory); - break; - default: - *isNull = true; - break; - } - - return dt; -} - -Datum ExecEvalGradientDescent(GradientDescentExprState *mlstate, ExprContext *econtext, bool *isNull, - ExprDoneCond *isDone) -{ - Datum dt = 0; - - if (isDone != NULL) - *isDone = ExprSingleResult; *isNull = false; - if ((mlstate->xpr->field & GD_EXPR_SCORE) != 0) { - dt = ExecGDExprScore(mlstate, isNull); - } else { - dt = ExecNonGDExprScore(mlstate, econtext, isNull); + switch (metric) { + case METRIC_ML_LOSS: + score = gd_state->loss; + break; + case METRIC_ML_ACCURACY: + score = get_accuracy(&gd_state->scores, &hasr); + if (!hasr) + *isNull = true; + break; + case METRIC_ML_F1: // 2 * (precision * recall) / (precision + recall) + precision = get_precision(&gd_state->scores, &hasp); + recall = get_recall(&gd_state->scores, &hasr); + if ((hasp && precision > 0) || (hasr && recall > 0)) + score = 2.0 * precision * recall / (precision + recall); + else + *isNull = true; + break; + case METRIC_ML_PRECISION: + score = get_precision(&gd_state->scores, &hasp); + *isNull = !hasp; + break; + case METRIC_ML_RECALL: + score = get_recall(&gd_state->scores, &hasr); + *isNull = !hasr; + break; + case METRIC_ML_MSE: + score = gd_state->scores.mse; + break; + default: + *isNull = true; + break; } - return dt; + return score; } +void gd_copy_pg_array_data(float8 *dest, Datum const source_datum, int32_t const num_entries) +{ + ArrayType *pg_array = DatumGetArrayTypeP(source_datum); + Oid array_type = ARR_ELEMTYPE(pg_array); + /* + * We expect the input to be an n-element array of float4/float8; verify that. We + * don't need to use deconstruct_array() since the array data is just + * going to look like a C array of n double values. + */ + if (unlikely((ARR_NDIM(pg_array) != 1) || (ARR_DIMS(pg_array)[0] != num_entries) || ARR_HASNULL(pg_array) || + !((array_type == FLOAT4OID) || (array_type == FLOAT8OID)))) + ereport(ERROR, (errmodule(MOD_DB4AI), + errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Input array must be 1-dimensional of %d elements, must not contain nulls, " + "and must be of type float8 or float.", num_entries))); + + bool const release_point = PointerGetDatum(pg_array) != source_datum; + float8 value = 0.; + if (array_type == FLOAT8OID) { + int32_t const bytes = num_entries * sizeof(float8); + int32_t const rc = memcpy_s(dest, bytes, ARR_DATA_PTR(pg_array), bytes); + securec_check(rc, "\0", "\0"); + } else if (array_type == FLOAT4OID) { + auto feature_array = reinterpret_cast(ARR_DATA_PTR(pg_array)); + for (int32_t f = 0; f < num_entries; ++f) { + value = feature_array[f]; + dest[f] = value; + } + } + // if we end up having a copy we gotta release it as to not leak memory + if (unlikely(release_point)) + pfree(pg_array); +} + +// when adding new algorithms make sure of changing the corresponding enum correctly (positions must match) +const char* optimizer_ml_str[] = { + [OPTIMIZER_GD] = "gd", + [OPTIMIZER_NGD] = "ngd", + [INVALID_OPTIMIZER] = "INVALID_OPTIMIZER" +}; +const int32_t optimizer_ml_str_size = ARRAY_LENGTH(optimizer_ml_str); +ASSERT_ELEMENTS_ENUM_TO_STR(optimizer_ml_str, INVALID_OPTIMIZER); + +const char* optimizer_ml_to_string(OptimizerML optimizer_ml) +{ + return enum_to_string(optimizer_ml); +} + + +OptimizerML get_optimizer_ml(const char* str) +{ + return string_to_enum(str, "Invalid optimizer"); +} + +const char* gd_optimizer_ml[GD_NUM_OPTIMIZERS] = {"gd", "ngd"}; + +OptimizerGD *gd_init_optimizer(const GradientDescentState *pstate, HyperparametersGD *hyperp) +{ + OptimizerGD* poptimizer = nullptr; + switch (hyperp->optimizer) { + case OPTIMIZER_GD: + poptimizer = gd_init_optimizer_gd(pstate, hyperp); + break; + case OPTIMIZER_NGD: + poptimizer = gd_init_optimizer_ngd(pstate, hyperp); + break; + default: + ereport(ERROR, + (errmodule(MOD_DB4AI), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Optimizer %d not supported", hyperp->optimizer))); + break; + } + return poptimizer; +} + +void gd_multiclass_target(GradientDescent *algorithm, int target, float8 *dest, const float8 *src, int count) +{ + for (int f = 0; f < count; f++, dest++, src++) + *dest = (*src == target ? algorithm->max_class : algorithm->min_class); +} + +ModelHyperparameters *gd_make_hyperparameters(AlgorithmAPI *self) +{ + HyperparametersGD *hyperp = (HyperparametersGD *) palloc0(sizeof(HyperparametersGD)); + return &hyperp->mhp; +} + +void gd_update_hyperparameters(AlgorithmAPI *self, ModelHyperparameters *hyperp) +{ + HyperparametersGD *gd_hyperp = (HyperparametersGD *)hyperp; + + // choose a random seed for default (value 0) + // for sure it is not set to zero again because zero is the starting epoch + if (gd_hyperp->seed == 0) + gd_hyperp->seed = time(NULL); +} + +TrainModelState* gd_create(AlgorithmAPI *self, const TrainModel *pnode) +{ + if (pnode->configurations != 1) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("multiple hyperparameter configurations for gradient descent not yet supported"))); + + HyperparametersGD *hyperp = (HyperparametersGD *) pnode->hyperparameters[0]; + + // create state structure + GradientDescentState *pstate = + (GradientDescentState *)makeNodeWithSize(TrainModelState, sizeof(GradientDescentState)); + + // training state initialization + pstate->init = false; + pstate->learning_rate = hyperp->learning_rate; + pstate->n_iterations = 0; + pstate->loss = 0; + pstate->allocated_classes = 0; + pstate->kernel = nullptr; // default, linear kernel + + // the tupdesc sometimes does not contain enough information, e.g. array size + // then the remaining part of the initialization is deferred to the + // first iteration to avoid the execution of the whole subplan during the initialization, + // for example in EXPLAIN + + return &pstate->tms; +} + +static bool transfer_slot(GradientDescentState *pstate, int ith_tuple, Matrix *features, Matrix *dep_var, + HyperparametersGD *hyperp) +{ + Assert(ith_tuple < (int)features->rows); + + GradientDescent *gd_algo = (GradientDescent*)pstate->tms.algorithm; + Assert(!gd_is_supervised(gd_algo) || dep_var != nullptr); + + ModelTuple *tuple = &pstate->tms.tuple; + float8 value = 0; + int feature = 0; + int const n_features = features->columns; + float8 *w; + + if (pstate->kernel == nullptr) { + // just put the values into the destination vector + w = features->data + ith_tuple * n_features; + } else { + // first put the values into a temporary vector + w = pstate->aux_input.data; + } + + for (int i = 0; i < tuple->ncolumns; i++) { + Oid typid = tuple->typid[i]; + bool isnull = tuple->isnull[i]; + Datum datum = tuple->values[i]; + + if (i == GD_TARGET_COL && gd_is_supervised(gd_algo)) { + // ignore row when target column is null for supervised + if (isnull) + return false; + + if (gd_dep_var_is_binary(gd_algo)) { + bool byval = tuple->typbyval[i]; + int attlen = tuple->typlen[i]; + + if (pstate->target_typid == BOOLOID) + value = DatumGetBool(datum) ? gd_algo->max_class : gd_algo->min_class; + else { + bool found = false; + for (int v = 0; v < pstate->num_classes && !found; v++) { + found = DatumImageEq(datum, pstate->value_classes[v], byval, attlen); + if (found) + value = (v == 1 ? gd_algo->max_class : gd_algo->min_class); + } + if (!found) { + if (pstate->num_classes == 2) + ereport(ERROR, + (errmodule(MOD_DB4AI), + errcode(ERRCODE_TOO_MANY_ARGUMENTS), + errmsg("too many target values for binary operator"))); + + value = (pstate->num_classes == 1 ? gd_algo->max_class : gd_algo->min_class); + pstate->value_classes[pstate->num_classes++] = datumCopy(datum, byval, attlen); + } + } + } else { + if (gd_dep_var_is_continuous(gd_algo)) { + // continuous, get just the float value + value = datum_get_float8(typid, datum); + } else { + bool byval = tuple->typbyval[i]; + int attlen = tuple->typlen[i]; + + Oid typoutput; + bool typIsVarlena; + getTypeOutputInfo(tuple->typid[i], &typoutput, &typIsVarlena); + + bool found = false; + for (int v = 0; v < pstate->num_classes && !found; v++) { + if (DatumImageEq(datum, pstate->value_classes[v], byval, attlen)) { + value = v; + found = true; + } + } + if (!found) { + // new category + if (pstate->num_classes == pstate->allocated_classes) { + pstate->allocated_classes += pstate->allocated_classes / 2; // at least one + pstate->value_classes = + (Datum *)repalloc(pstate->value_classes, pstate->allocated_classes * sizeof(Datum)); + } + pstate->value_classes[pstate->num_classes] = datumCopy(datum, byval, attlen); + value = pstate->num_classes++; + } + } + } + + dep_var->data[ith_tuple] = value; + } else { + if (typid == FLOAT8ARRAYOID || typid == FLOAT4ARRAYOID) { + /* + * in here there is a single attribute - which is an array - and all + * features are read directly from it + * + * depending on the current state of the array, we might end up having a copy + */ + Assert(feature == 0); + + // ignore row when there are no features + if (isnull) + return false; + + // expected array length, the intercept/bias does not come with the data + feature = n_features; + if (gd_algo->add_bias) + feature--; + + gd_copy_pg_array_data(w, datum, feature); + w += feature; + } else { + if (isnull) + value = gd_algo->def_feature; + else + value = datum_get_float8(typid, datum); + + *w++ = value; + feature++; + } + } + } + + if (pstate->kernel != nullptr) { + // now apply the kernel transformation + Assert(feature == pstate->aux_input.rows); + + // transform using a fake vector + Matrix tmp; + matrix_init(&tmp, pstate->kernel->coefficients); + w = features->data + ith_tuple * n_features; + + // make sure the transformation points to the destination vector + float8 *p = matrix_swap_data(&tmp, w); + pstate->kernel->transform(pstate->kernel, &pstate->aux_input, &tmp); + matrix_swap_data(&tmp, p); + matrix_release(&tmp); + + // update counters and pointers of data + feature = pstate->kernel->coefficients; + w += feature; + } + + if (gd_algo->add_bias) { + Assert(feature == pstate->n_features - 1); + *w = 1.0; // bias + feature++; + } + + Assert(feature == pstate->n_features); + return true; +} + +static void exec_gd_batch(GradientDescentState* pstate, int iter, bool has_snapshot, HyperparametersGD *hyperp) +{ + GradientDescent *gd_algo = (GradientDescent*)pstate->tms.algorithm; + + void *algo_data = nullptr; + if (gd_algo->start_iteration != nullptr) + algo_data = gd_algo->start_iteration(pstate, iter); + + Matrix* features; + Matrix* dep_var; + bool more = true; + do { + // prepare next batch + features = pstate->shuffle->get(pstate->shuffle, &dep_var); + + int ith_tuple = 0; + if (!has_snapshot) { + while (more && ith_tuple < hyperp->batch_size) { + if (pstate->reuse_tuple) + pstate->reuse_tuple = false; + else + more = pstate->tms.fetch(pstate->tms.callback_data, &pstate->tms.tuple); + + if (more) { + if (transfer_slot(pstate, ith_tuple, features, dep_var, hyperp)) { + if (iter == 0) + pstate->processed++; + + ith_tuple++; + } else { + if (iter == 0) + pstate->discarded++; + } + } + } + } else { + Assert(iter > 0); + if (features != nullptr) + ith_tuple = features->rows; + else + more = false; + } + + // use the batch to test now in case the shuffle algorithm + // releases it during unget + if (iter > 0 && ith_tuple > 0) { + if (ith_tuple < hyperp->batch_size) { + matrix_resize(features, ith_tuple, pstate->n_features); + if (gd_is_supervised(gd_algo)) + matrix_resize(dep_var, ith_tuple, 1); + } + + double loss = gd_algo->test(pstate, features, dep_var, &pstate->weights, &pstate->scores); + pstate->loss += loss; + ereport(DEBUG1, + (errmodule(MOD_DB4AI), + errmsg("iteration %d loss = %.6f (total %.6g)", iter, loss, pstate->loss))); + + if (ith_tuple < hyperp->batch_size) { + matrix_resize(features, hyperp->batch_size, pstate->n_features); + if (gd_is_supervised(gd_algo)) + matrix_resize(dep_var, hyperp->batch_size, 1); + } + } + + if (ith_tuple > 0 || !has_snapshot) { + // give back the batch to the shuffle algorithm + pstate->shuffle->unget(pstate->shuffle, ith_tuple); + } + } while (more); + + if (gd_algo->end_iteration != nullptr) + gd_algo->end_iteration(algo_data); +} + +static void exec_gd_start_iteration(GradientDescentState* pstate) +{ + if (pstate->optimizer->start_iteration != nullptr) + pstate->optimizer->start_iteration(pstate->optimizer); + + if (pstate->shuffle->start_iteration != nullptr) + pstate->shuffle->start_iteration(pstate->shuffle); +} + +static bool exec_gd_end_iteration(GradientDescentState* pstate) +{ + if (pstate->shuffle->end_iteration != nullptr) + pstate->shuffle->end_iteration(pstate->shuffle); + + if (pstate->optimizer->end_iteration != nullptr) + pstate->optimizer->end_iteration(pstate->optimizer); + + if (pstate->shuffle->has_snapshot != nullptr) + return pstate->shuffle->has_snapshot(pstate->shuffle); + + return false; +} + +static bool exec_gd_stop_iterating(GradientDescentState const *pstate, double const prev_loss, double const tolerance) +{ + return (fabs(prev_loss - pstate->loss) < tolerance); +} + +static void exec_gd_finalize(GradientDescentState *pstate) +{ + if (pstate->optimizer->finalize != nullptr) + pstate->optimizer->finalize(pstate->optimizer); +} + +static void deferred_init(TrainModelState *pstate, HyperparametersGD *hyperp) +{ + GradientDescentState *gd_state = (GradientDescentState *)pstate; + Assert(!gd_state->init); + + GradientDescent *gd_algo = (GradientDescent*)pstate->algorithm; + + if (gd_is_supervised(gd_algo)) { + gd_state->allocated_classes = 2; // minimum for binary + gd_state->value_classes = (Datum*)palloc(gd_state->allocated_classes * sizeof(Datum)); + } + + // read the first tuple + if (!pstate->fetch(pstate->callback_data, &pstate->tuple)) + ereport(ERROR, + (errmodule(MOD_DB4AI), + errcode(ERRCODE_NO_DATA_FOUND), + errmsg("Input data is empty"))); + + gd_state->reuse_tuple = true; + + // compute the number of features and validate the dependent var + // of supervised algorithms + gd_state->n_features = 0; + gd_state->target_typid = gd_algo->def_ret_typid; + for (int i = 0; i < pstate->tuple.ncolumns; i++) { + Oid oidtype = pstate->tuple.typid[i]; + if (gd_is_supervised(gd_algo) && i == GD_TARGET_COL) { + gd_state->target_typid = oidtype; + switch (oidtype) { + case BITOID: + case VARBITOID: + case BYTEAOID: + case CHAROID: + case RAWOID: + case NAMEOID: + case TEXTOID: + case BPCHAROID: + case VARCHAROID: + case NVARCHAR2OID: + case CSTRINGOID: + case INT1OID: + case INT2OID: + case INT4OID: + case INT8OID: + case FLOAT4OID: + case FLOAT8OID: + case NUMERICOID: + case ABSTIMEOID: + case DATEOID: + case TIMEOID: + case TIMESTAMPOID: + case TIMESTAMPTZOID: + case TIMETZOID: + case SMALLDATETIMEOID: + // detect the different values while reading the data + gd_state->num_classes = 0; + break; + + case BOOLOID: + // values are known in advance + gd_state->value_classes[0] = BoolGetDatum(false); + gd_state->value_classes[1] = BoolGetDatum(true); + gd_state->num_classes = 2; + break; + + default: + // unsupported datatypes + ereport(ERROR, + (errmodule(MOD_DB4AI), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Datatype of target not supported"))); + break; + } + } else { + if (oidtype == FLOAT8ARRAYOID || oidtype == FLOAT4ARRAYOID) { + if (gd_state->n_features == 0) { + if (pstate->tuple.isnull[i]) + ereport(ERROR, (errmodule(MOD_DB4AI), + errmsg("Input array required"))); + + Datum dt = pstate->tuple.values[i]; + ArrayType *parray = DatumGetArrayTypeP(dt); + if (((ARR_NDIM(parray) != 1) || ARR_HASNULL(parray))) + ereport(ERROR, (errmodule(MOD_DB4AI), + errmsg("Input array must be 1-dimensional and must not contain nulls"))); + + gd_state->n_features = ARR_DIMS(parray)[0]; + + // if we end up having a copy we gotta release it as to not leak memory + if (PointerGetDatum(parray) != dt) + pfree(parray); + } else + ereport(ERROR, + (errmodule(MOD_DB4AI), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Input array cannot be combined with other features"))); + } else { + if (oidtype != BOOLOID + && oidtype != INT1OID + && oidtype != INT2OID + && oidtype != INT4OID + && oidtype != INT8OID + && oidtype != FLOAT4OID + && oidtype != FLOAT8OID + && oidtype != NUMERICOID) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Oid type %d not yet supported", oidtype))); + + gd_state->n_features++; + } + } + } + + if (gd_state->n_features == 0) + ereport(ERROR, + (errmodule(MOD_DB4AI), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("At least one feature is required"))); + + // now it is possible to prepare the kernel + if (gd_algo->prepare_kernel != nullptr) { + gd_state->kernel = gd_algo->prepare_kernel(gd_state->n_features, hyperp); + if (gd_state->kernel != nullptr) { + matrix_init(&gd_state->aux_input, gd_state->n_features); + gd_state->n_features = gd_state->kernel->coefficients; + } + } + + // extra bias/intercept column fixed to 1 + if (gd_algo->add_bias) + gd_state->n_features++; + + // some algorithms require a previous scan over the data + // it has to be done before the optimizer is initialized + if (gd_algo->scan != nullptr) { + Matrix feat_temp, depvar_temp; + matrix_init(&feat_temp, gd_state->n_features); + matrix_init(&depvar_temp, gd_state->n_features); + + // just read the rows and call + bool stop = false; + do { + if (gd_state->reuse_tuple) + gd_state->reuse_tuple = false; + else { + if (!pstate->fetch(pstate->callback_data, &pstate->tuple)) + break; + } + + if (transfer_slot(gd_state, 0, &feat_temp, &depvar_temp, hyperp)) + stop = gd_algo->scan(&feat_temp, &depvar_temp); + } while (!stop); + + matrix_release(&feat_temp); + matrix_release(&depvar_temp); + pstate->rescan(pstate->callback_data); + } + + // initiate the optimizer + gd_state->optimizer = gd_algo->prepare_optimizer(gd_state, hyperp); + + // initiate the cache + gd_state->shuffle = gd_init_shuffle_cache(gd_state, hyperp); + gd_state->shuffle->optimizer = gd_state->optimizer; + + matrix_init_clone(&gd_state->weights, &gd_state->optimizer->weights); + + gd_state->init = true; +} + +void check_gd_state(GradientDescentState *gd_state) +{ + int dt_cnt = gd_state->weights.rows * gd_state->weights.columns; + for (int i = 0; i < dt_cnt; i++) { + if (isnan(gd_state->weights.data[i])) { + ereport(NOTICE, (errmodule(MOD_DB4AI), errmsg("Training warning: weights are out of range, try again with " + "another hyperparameter configuration"))); + break; + } + } +} + +/* Training and test are interleaved to avoid a double scan over the data + * for training and test. Iteration 0 only computes the initial weights, and + * at each following iteration the model is tested with the current weights + * and new weights are updated with the gradients. The optimization is clear: + * for N iterations, the basic algorithm requires N*2 data scans, while the + * interleaved train&test requires only N+1 data scans. When N=1 the number + * of scans is the same (N*2 = N+1) + */ + +void gd_run(AlgorithmAPI *self, TrainModelState* pstate, Model **models) +{ + Assert(pstate->finished == 0); + + GradientDescent *gd_algo = (GradientDescent*)pstate->algorithm; + + // get information from the node + GradientDescentState *gd_state = (GradientDescentState *)pstate; + Assert(pstate->config->configurations == 1); + HyperparametersGD *hyperp = (HyperparametersGD *) pstate->config->hyperparameters[0]; + + // deferred initialization, read and process the first row + deferred_init(pstate, hyperp); + + // for counting execution time + uint64_t start, finish; + uint64_t iter_start, iter_finish; + + // iterations + double prev_loss = DBL_MAX; + + gd_state->processed = 0; + gd_state->discarded = 0; + + uint64_t max_usecs = ULLONG_MAX; + if (hyperp->max_seconds > 0) + max_usecs = hyperp->max_seconds * 1000000ULL; + + bool has_snapshot = false; + bool stop = false; + start = gd_get_clock_usecs(); + int32_t const current_bytes = + gd_state->optimizer->weights.rows * gd_state->optimizer->weights.columns * sizeof(float8); + int32_t const max_bytes = gd_state->weights.allocated * sizeof(float8); + for (int iter = 0; !stop && iter <= hyperp->max_iterations; iter++) { + iter_start = gd_get_clock_usecs(); + + // init loss & scores + scores_init(&gd_state->scores); + gd_state->loss = 0; + + exec_gd_start_iteration(gd_state); + exec_gd_batch(gd_state, iter, has_snapshot, hyperp); + has_snapshot = exec_gd_end_iteration(gd_state); + + if (iter == 0) { + if (gd_state->processed == 0) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_NO_DATA_FOUND), + errmsg("Input data is empty"))); + + if (gd_is_supervised(gd_algo) && !gd_dep_var_is_continuous(gd_algo) && gd_state->num_classes < 2) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_NO_DATA_FOUND), + errmsg("At least two categories are needed"))); + } + + iter_finish = gd_get_clock_usecs(); + + // delta loss < loss tolerance? + if (iter > 0) + stop = exec_gd_stop_iterating(gd_state, prev_loss, hyperp->tolerance); + + if (!stop) { + // continue with another iteration with the new weights + int rc = memcpy_s(gd_state->weights.data, max_bytes, + gd_state->optimizer->weights.data, current_bytes); + securec_check(rc, "", ""); + + if (iter > 0) + gd_state->n_iterations++; + + // timeout || max_iterations + stop = (gd_get_clock_usecs()-start >= max_usecs) + || (iter == hyperp->max_iterations); + } + + if (hyperp->verbose && iter > 0) { + StringInfoData buf; + initStringInfo(&buf); + + appendStringInfo(&buf, "test_loss=%.6f delta_loss=", gd_state->loss); + + if (iter > 1) + appendStringInfo(&buf, "%.16g", fabs(prev_loss - gd_state->loss)); + else + appendStringInfoChar(&buf, '-'); + + bool has; + double accuracy = get_accuracy(&gd_state->scores, &has); + appendStringInfo(&buf, " tolerance=%.3f accuracy=%.3f tuples=%d coef=", hyperp->tolerance, + has ? accuracy : -1, gd_state->processed); + + matrix_print(&gd_state->weights, &buf, false); + + ereport(NOTICE, (errmodule(MOD_DB4AI), errmsg("ITERATION %d: %s", iter, buf.data))); + pfree(buf.data); + } + + prev_loss = iter > 0 ? gd_state->loss : DBL_MAX; + + if (!stop && !has_snapshot) { + // for the next iteration + gd_state->tms.rescan(gd_state->tms.callback_data); + } + } + + exec_gd_finalize(gd_state); + + finish = gd_get_clock_usecs(); + + gd_state->usecs = finish - start; + pstate->finished++; + + // return trainined model + Model *model = models[0]; + MemoryContext oldcxt = MemoryContextSwitchTo(model->memory_context); + + model->return_type = gd_state->target_typid; + model->exec_time_secs = gd_state->usecs / 1.0e6; + model->pre_time_secs = 0; + model->processed_tuples = gd_state->processed; + model->discarded_tuples = gd_state->discarded; + + model->num_actual_iterations = gd_state->n_iterations; + + // scores + bool isNull; + int nmetrics; + double score; + MetricML* metrics = self->get_metrics(self, &nmetrics); + model->scores = nullptr; + for (int m = 0; m < nmetrics; m++) { + score = gd_get_score(gd_state, metrics[m], &isNull); + if (!isNull) { + TrainingScore *pscore = (TrainingScore *)palloc0(sizeof(TrainingScore)); + pscore->name = metric_ml_to_string(metrics[m]); + pscore->value = score; + model->scores = lappend(model->scores, pscore); + } + } + + // serialize internal data: + Datum dt; + struct varlena *categories = NULL; + Assert(!gd_state->weights.transposed); + float8 *pw = gd_state->weights.data; + int count = gd_state->weights.rows * gd_state->weights.columns; + + model->data.version = DB4AI_MODEL_V01; + model->data.size = sizeof(GradientDescentModelV01) + + sizeof(float8) * count; + + // prepare categories for supervised categorial/binary + if (gd_is_supervised(gd_algo) && !gd_dep_var_is_continuous(gd_algo)) { + ArrayBuildState *astate = NULL; + for (int i = 0; i < gd_state->num_classes; i++) + astate = accumArrayResult(astate, gd_state->value_classes[i], false, gd_state->target_typid, + CurrentMemoryContext); + + dt = makeArrayResult(astate, CurrentMemoryContext); + categories = pg_detoast_datum((struct varlena *)DatumGetPointer(dt)); + model->data.size += VARSIZE(categories); + } + + // does it require extra data? + int extra_size = 0; + void *extra_data = nullptr; + if (gd_algo->get_extra_data != nullptr) { + extra_data = gd_algo->get_extra_data(gd_state, &extra_size); + model->data.size += extra_size; + } + + // serialize data + GradientDescentModelV01 *mdata = (GradientDescentModelV01*)palloc(model->data.size); + model->data.raw_data = mdata; + + errno_t rc = memcpy_s(&mdata->hyperparameters, sizeof(HyperparametersGD), hyperp, sizeof(HyperparametersGD)); + securec_check_ss(rc, "\0", "\0"); + + mdata->features = gd_state->n_features; + if (gd_state->kernel != nullptr) + mdata->input = gd_state->aux_input.rows; + else { + mdata->input = mdata->features; + if (gd_algo->add_bias) + mdata->input--; + } + mdata->dimensions[0] = gd_state->weights.rows; + mdata->dimensions[1] = gd_state->weights.columns; + + int8_t *ptr = (int8_t *)(mdata + 1); + int avail = model->data.size - sizeof(GradientDescentModelV01); + count *= sizeof(float8); + rc = memcpy_s(ptr, avail, pw, count); + securec_check_ss(rc, "\0", "\0"); + + if (categories != NULL) { + mdata->categories_size = VARSIZE(categories); + + ptr += count; + avail -= count; + + rc = memcpy_s(ptr, avail, categories, mdata->categories_size); + securec_check_ss(rc, "\0", "\0"); + + if (categories != (struct varlena *)DatumGetPointer(dt)) + pfree(categories); + } else + mdata->categories_size = 0; + + // extra data for prediction + if (extra_size > 0) { + Assert(extra_data != nullptr); + ptr += mdata->categories_size; + rc = memcpy_s(ptr, avail, extra_data, extra_size); + securec_check_ss(rc, "\0", "\0"); + } + + // DEPRECATED + model->weights = 0; + model->train_info = nullptr; + model->model_data = 0; + + model->status = ERRCODE_SUCCESSFUL_COMPLETION; + check_gd_state(gd_state); + MemoryContextSwitchTo(oldcxt); +} + +void gd_end(AlgorithmAPI *self, TrainModelState* pstate) +{ + GradientDescentState *gd_state = (GradientDescentState *)pstate; + + if (gd_state->allocated_classes > 0) + pfree(gd_state->value_classes); + + // release state + if (gd_state->init) { + matrix_release(&gd_state->weights); + + if (gd_state->kernel != nullptr) { + gd_state->kernel->release(gd_state->kernel); + pfree(gd_state->kernel); + matrix_release(&gd_state->aux_input); + } + + gd_state->shuffle->release(gd_state->shuffle); + + matrix_release(&gd_state->optimizer->gradients); + matrix_release(&gd_state->optimizer->weights); + gd_state->optimizer->release(gd_state->optimizer); + + gd_state->init = false; + } +} + +void gd_deserialize(GradientDescent *gd, const SerializedModel *model, Oid return_type, SerializedModelGD *gdm) +{ + gdm->algorithm = gd; + gdm->return_type = return_type; + + int avail = model->size; + if (model->version == DB4AI_MODEL_V01) { + if (avail < (int)sizeof(GradientDescentModelV01)) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_STATUS), + errmsg("Model data corrupted reading header"))); + + GradientDescentModelV01 *mdata = (GradientDescentModelV01 *)model->raw_data; + avail -= sizeof(GradientDescentModelV01); + + gdm->input = mdata->input; + gdm->kernel = nullptr; + + errno_t rc = memcpy_s(&gdm->hyperparameters, sizeof(HyperparametersGD), &mdata->hyperparameters, + sizeof(HyperparametersGD)); + securec_check_ss(rc, "\0", "\0"); + + matrix_init(&gdm->features, mdata->features); + matrix_init(&gdm->weights, mdata->dimensions[0], mdata->dimensions[1]); + + // read the coeeficients + int bc = (gdm->weights.rows * gdm->weights.columns) * sizeof(float8); + if (avail < bc) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_STATUS), + errmsg("Model data corrupted reading coefficient"))); + + uint8_t *ptr = (uint8_t *)(mdata + 1); + rc = memcpy_s(gdm->weights.data, gdm->weights.allocated * sizeof(float8), ptr, bc); + securec_check(rc, "\0", "\0"); + avail -= bc; + + // read the categories + gdm->ncategories = 0; + gdm->categories = nullptr; + if (mdata->categories_size > 0) { + ArrayType *arr = (ArrayType *)(ptr + bc); + if (avail < (int)VARSIZE(arr)) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_STATUS), + errmsg("Model data corrupted reading categories"))); + + gdm->ncategories = ARR_DIMS(arr)[0]; + gdm->categories = (Datum *)palloc(gdm->ncategories * sizeof(Datum)); + + Datum dt; + bool isnull; + int cat = 0; + ArrayIterator it = array_create_iterator(arr, 0); + while (array_iterate(it, &dt, &isnull)) { + Assert(!isnull); + gdm->categories[cat++] = dt; + } + array_free_iterator(it); + avail -= VARSIZE(arr); + } + if (avail > 0) { + // extra data, copy the end + gdm->extra_data = palloc(avail); + rc = memcpy_s(gdm->extra_data, avail, (uint8_t*)model->raw_data + model->size - avail, avail); + securec_check(rc, "\0", "\0"); + } else + gdm->extra_data = nullptr; + } else + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_STATUS), + errmsg("Invalid Gradient Descent model version"))); +} + +List *gd_explain(struct AlgorithmAPI *self, const SerializedModel *model, Oid return_type) +{ + List *infos = NULL; + + // extract serialized model + SerializedModelGD gds; + gd_deserialize((GradientDescent*)self, model, return_type, &gds); + + // training metadata + TrainingInfo *info; + + // weights + ArrayBuildState *astate = NULL; + float8 *pw = gds.weights.data; + info = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + if (gds.weights.columns == 1) { + // vector of weights + int count = gds.weights.rows * gds.weights.columns; + while (count-- > 0) + astate = accumArrayResult(astate, Float8GetDatum(*pw++), false, FLOAT8OID, CurrentMemoryContext); + } else { + // matrix of weights + for (int r = 0; r < gds.weights.rows; r++) { + ArrayBuildState *astate2 = NULL; + for (int c = 0; c < gds.weights.columns; c++) + astate2 = accumArrayResult(astate2, Float8GetDatum(*pw++), false, FLOAT8OID, CurrentMemoryContext); + + astate = accumArrayResult(astate, makeArrayResult(astate2, CurrentMemoryContext), + false, FLOAT8ARRAYOID, CurrentMemoryContext); + } + } + info->value = makeArrayResult(astate, CurrentMemoryContext); + info->type = FLOAT8ARRAYOID; + info->name = "weights"; + infos = lappend(infos, info); + + // categories + if (gd_is_supervised(gds.algorithm) && !gd_dep_var_is_continuous(gds.algorithm)) { + info = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + astate = NULL; + for (int i = 0; i < gds.ncategories; i++) + astate = + accumArrayResult(astate, gds.categories[i], false, gds.return_type, CurrentMemoryContext); + + info->value = makeArrayResult(astate, CurrentMemoryContext); + info->type = get_array_type(gds.return_type); + info->name = "categories"; + infos = lappend(infos, info); + } + + // release + matrix_release(&gds.weights); + matrix_release(&gds.features); + if (gds.categories != nullptr) + pfree(gds.categories); + + return infos; +} diff --git a/src/gausskernel/dbmind/db4ai/executor/gd/linregr.cpp b/src/gausskernel/dbmind/db4ai/executor/gd/linregr.cpp index 2ffbc869d..96e9f1ed3 100644 --- a/src/gausskernel/dbmind/db4ai/executor/gd/linregr.cpp +++ b/src/gausskernel/dbmind/db4ai/executor/gd/linregr.cpp @@ -23,26 +23,27 @@ #include "db4ai/gd.h" -static void linear_reg_gradients(const GradientDescent *gd_node, const Matrix *features, const Matrix *dep_var, - Matrix *weights, Matrix *gradients) +static void linear_reg_gradients(GradientsConfig *cfg) { - Assert(features->rows > 0); + Assert(cfg->features->rows > 0); + + GradientsConfigGD *cfg_gd = (GradientsConfigGD *)cfg; // xT * (x * w - y) Matrix loss; - matrix_init(&loss, features->rows); - matrix_mult_vector(features, weights, &loss); - matrix_subtract(&loss, dep_var); + matrix_init(&loss, cfg->features->rows); + matrix_mult_vector(cfg->features, cfg->weights, &loss); + matrix_subtract(&loss, cfg_gd->dep_var); Matrix x_t; - matrix_init_transpose(&x_t, features); - matrix_mult_vector(&x_t, &loss, gradients); + matrix_init_transpose(&x_t, cfg->features); + matrix_mult_vector(&x_t, &loss, cfg->gradients); matrix_release(&x_t); matrix_release(&loss); } -static double linear_reg_test(const GradientDescent *gd_node, const Matrix *features, const Matrix *dep_var, +static double linear_reg_test(const GradientDescentState* gd_state, const Matrix *features, const Matrix *dep_var, const Matrix *weights, Scores *scores) { Assert(features->rows > 0); @@ -53,7 +54,7 @@ static double linear_reg_test(const GradientDescent *gd_node, const Matrix *feat matrix_mult_vector(features, weights, &errors); matrix_subtract(&errors, dep_var); matrix_square(&errors); - gd_float tuple_loss = matrix_get_sum(&errors) / features->rows; + float8 tuple_loss = matrix_get_sum(&errors) / features->rows; scores->mse += tuple_loss; tuple_loss /= 2; @@ -62,19 +63,47 @@ static double linear_reg_test(const GradientDescent *gd_node, const Matrix *feat return tuple_loss; } -static gd_float linear_reg_predict(const Matrix *features, const Matrix *weights) +static Datum linear_reg_predict(const Matrix *features, const Matrix *weights, + Oid return_type, void *extra_data, bool max_binary, bool* categorize) { + Assert(!max_binary); + // p = x * w - return matrix_dot(features, weights); + float8 prediction = matrix_dot(features, weights); + *categorize = false; + return float8_get_datum(return_type, prediction); } -GradientDescentAlgorithm gd_linear_regression = { - "linear-regression", - GD_DEPENDENT_VAR_CONTINUOUS, - METRIC_MSE, // same as loss function +//////////////////////////////////////////////////////////////////////////////////////////////////// + +GradientDescent gd_linear_regression = { + { + LINEAR_REGRESSION, + "linear_regression", + ALGORITHM_ML_TARGET_CONTINUOUS | ALGORITHM_ML_RESCANS_DATA, + gd_metrics_mse, + gd_get_hyperparameters_regression, + gd_make_hyperparameters, + gd_update_hyperparameters, + gd_create, + gd_run, + gd_end, + gd_predict_prepare, + gd_predict, + gd_explain + }, + true, + 0, // default return type + 0.0, // default feature 0.0, 0.0, // not necessary + nullptr, + gd_init_optimizer, + nullptr, + nullptr, + nullptr, linear_reg_gradients, linear_reg_test, linear_reg_predict, + nullptr, }; diff --git a/src/gausskernel/dbmind/db4ai/executor/gd/logregr.cpp b/src/gausskernel/dbmind/db4ai/executor/gd/logregr.cpp index b9c76c885..e27db493f 100644 --- a/src/gausskernel/dbmind/db4ai/executor/gd/logregr.cpp +++ b/src/gausskernel/dbmind/db4ai/executor/gd/logregr.cpp @@ -23,27 +23,28 @@ #include "db4ai/gd.h" -static void logreg_gradients(const GradientDescent *gd_node, const Matrix *features, const Matrix *dep_var, - Matrix *weights, Matrix *gradients) +static void logreg_gradients(GradientsConfig *cfg) { - Assert(features->rows > 0); + Assert(cfg->features->rows > 0); + + GradientsConfigGD *cfg_gd = (GradientsConfigGD *)cfg; // xT * ((1.0 / (1.0 + exp(-x*w))) - y) Matrix sigma; - matrix_init(&sigma, features->rows); - matrix_mult_vector(features, weights, &sigma); + matrix_init(&sigma, cfg->features->rows); + matrix_mult_vector(cfg->features, cfg->weights, &sigma); matrix_sigmoid(&sigma); - matrix_subtract(&sigma, dep_var); + matrix_subtract(&sigma, cfg_gd->dep_var); Matrix x_t; - matrix_init_transpose(&x_t, features); - matrix_mult_vector(&x_t, &sigma, gradients); + matrix_init_transpose(&x_t, cfg->features); + matrix_mult_vector(&x_t, &sigma, cfg->gradients); matrix_release(&x_t); matrix_release(&sigma); } -static double logreg_test(const GradientDescent *gd_node, const Matrix *features, const Matrix *dep_var, +static double logreg_test(const GradientDescentState* gd_state, const Matrix *features, const Matrix *dep_var, const Matrix *weights, Scores *scores) { Assert(features->rows > 0); @@ -80,7 +81,7 @@ static double logreg_test(const GradientDescent *gd_node, const Matrix *features // cost = sum(-cost1 - cost2) / N matrix_negate(&cost1); matrix_subtract(&cost1, &cost2); - gd_float tuple_loss = matrix_get_sum(&cost1) / features->rows; + float8 tuple_loss = matrix_get_sum(&cost1) / features->rows; matrix_release(&cost2); matrix_release(&cost1); @@ -88,21 +89,60 @@ static double logreg_test(const GradientDescent *gd_node, const Matrix *features return tuple_loss; } -static gd_float logreg_predict(const Matrix *features, const Matrix *weights) +static Datum logreg_predict(const Matrix *features, const Matrix *weights, + Oid return_type, void *extra_data, bool max_binary, bool* categorize) { // p = 1.0 + exp(-x*w) - gd_float r = matrix_dot(features, weights); + float8 r = matrix_dot(features, weights); r = 1.0 / (1.0 + exp(-r)); - return r < 0.5 ? 0.0 : 1.0; + if (!max_binary) + r = (r < 0.5 ? 0.0 : 1.0); + *categorize = true; + return Float4GetDatum(r); } -GradientDescentAlgorithm gd_logistic_regression = { - "logistic-regression", - GD_DEPENDENT_VAR_BINARY, - METRIC_ACCURACY | METRIC_F1 | METRIC_PRECISION | METRIC_RECALL | METRIC_LOSS, +///////////////////////////////////////////////////////////////////// + +// Used by linear regression and logistic regression +static HyperparameterDefinition regression_hyperparameter_definitions[] = { + GD_HYPERPARAMETERS_SUPERVISED +}; + +const HyperparameterDefinition* gd_get_hyperparameters_regression(AlgorithmAPI *self, int *definitions_size) +{ + Assert(definitions_size != nullptr); + *definitions_size = sizeof(regression_hyperparameter_definitions) / sizeof(HyperparameterDefinition); + return regression_hyperparameter_definitions; +} + +GradientDescent gd_logistic_regression = { + { + LOGISTIC_REGRESSION, + GD_LOGISTIC_REGRESSION_NAME, + ALGORITHM_ML_DEFAULT | ALGORITHM_ML_RESCANS_DATA, + gd_metrics_accuracy, + gd_get_hyperparameters_regression, + gd_make_hyperparameters, + gd_update_hyperparameters, + gd_create, + gd_run, + gd_end, + gd_predict_prepare, + gd_predict, + gd_explain + }, + true, + 0, // default return type + 0.0, // default feature 0.0, 1.0, + nullptr, + gd_init_optimizer, + nullptr, + nullptr, + nullptr, logreg_gradients, logreg_test, logreg_predict, + nullptr, }; diff --git a/src/gausskernel/dbmind/db4ai/executor/gd/matrix.cpp b/src/gausskernel/dbmind/db4ai/executor/gd/matrix.cpp deleted file mode 100644 index 2ed2cca9f..000000000 --- a/src/gausskernel/dbmind/db4ai/executor/gd/matrix.cpp +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - *--------------------------------------------------------------------------------------- - * - * matrix.cpp - * - * IDENTIFICATION - * src/gausskernel/dbmind/db4ai/executor/gd/matrix.cpp - * - * --------------------------------------------------------------------------------------- - */ - -#include "db4ai/matrix.h" - -#define MATRIX_LIMITED_OUTPUT 30 - -void matrix_print(const Matrix *matrix, StringInfo buf, bool full) -{ - Assert(matrix != nullptr); - Assert(!matrix->transposed); - const gd_float *pf = matrix->data; - appendStringInfoChar(buf, '['); - for (int r = 0; r < matrix->rows; r++) { - if (!full && matrix->rows > MATRIX_LIMITED_OUTPUT && r > (MATRIX_LIMITED_OUTPUT / 2) && - r < matrix->rows - (MATRIX_LIMITED_OUTPUT / 2)) { - if (matrix->columns > 1) - appendStringInfoString(buf, ",\n..."); - else - appendStringInfoString(buf, ", ..."); - - r = matrix->rows - MATRIX_LIMITED_OUTPUT / 2; - continue; - } - - if (matrix->columns > 1) { - if (r > 0) - appendStringInfoString(buf, ",\n"); - - appendStringInfoChar(buf, '['); - } else { - if (r > 0) - appendStringInfoString(buf, ", "); - } - for (int c = 0; c < matrix->columns; c++) { - if (c > 0) - appendStringInfoString(buf, ", "); - - appendStringInfo(buf, "%.16g", *pf++); - } - if (matrix->columns > 1) - appendStringInfoChar(buf, ']'); - } - appendStringInfoChar(buf, ']'); -} - -void elog_matrix(int elevel, const char *msg, const Matrix *matrix) -{ - StringInfoData buf; - initStringInfo(&buf); - matrix_print(matrix, &buf, false); - ereport(elevel, (errmodule(MOD_DB4AI), errmsg("%s = %s", msg, buf.data))); - pfree(buf.data); -} diff --git a/src/gausskernel/dbmind/db4ai/executor/gd/multiclass.cpp b/src/gausskernel/dbmind/db4ai/executor/gd/multiclass.cpp new file mode 100644 index 000000000..1a5d3f2ce --- /dev/null +++ b/src/gausskernel/dbmind/db4ai/executor/gd/multiclass.cpp @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + *--------------------------------------------------------------------------------------- + * + * multiclass.cpp + * + * IDENTIFICATION + * src/gausskernel/dbmind/db4ai/executor/gd/multiclass.cpp + * + * --------------------------------------------------------------------------------------- + */ + +#include "db4ai/gd.h" + + +static bool multiclass_scan(const Matrix* features, const Matrix* dep_var) +{ + // do nothing, it is just to read all the different classes or target values + return false; // scan all rows +} + +static void multiclass_gradients(GradientsConfig *cfg) +{ + Assert(cfg->features->rows > 0); + + GradientDescent *algo = (GradientDescent *) get_algorithm_api(cfg->hyperp->classifier); + algo->compute_gradients(cfg); +} + +static double multiclass_test(const GradientDescentState* gd_state, const Matrix *features, const Matrix *dep_var, + const Matrix *weights, Scores *scores) +{ + int n_classes = weights->rows; + int n_features = weights->columns; + int n_rows = dep_var->rows; + + Matrix weights_ova; + matrix_init(&weights_ova, n_features); + + Matrix dep_var_ova; + matrix_init(&dep_var_ova, n_rows); + + OptimizerOVA *opt = (OptimizerOVA *)gd_state->optimizer; + + Scores scores_tmp; + double loss = 0; + float8 *pw = weights->data; + for (int c = 0; c < n_classes; c++) { + scores_init(&scores_tmp); + gd_multiclass_target(opt->algorithm, c, dep_var_ova.data, dep_var->data, n_rows); + float8 *curr_w = matrix_swap_data(&weights_ova, pw); + loss += opt->algorithm->test(gd_state, features, &dep_var_ova, &weights_ova, &scores_tmp); + matrix_swap_data(&weights_ova, curr_w); + pw += n_features; + } + + matrix_release(&weights_ova); + matrix_release(&dep_var_ova); + return loss; +} + +static Datum multiclass_predict(const Matrix *features, const Matrix *weights, + Oid return_type, void *extra_data, bool max_binary, bool* categorize) +{ + Assert(extra_data != nullptr); + Assert(!max_binary); + + AlgorithmML algo = *(AlgorithmML*)extra_data; + GradientDescent *palgo = (GradientDescent *) get_algorithm_api(algo); + + int n_classes = weights->rows; + int n_features = weights->columns; + + Matrix weights_ova; + matrix_init(&weights_ova, n_features); + + int cat = 0; + double dmax = 0; + float8 *pw = weights->data; + for (int c = 0; c < n_classes; c++) { + float8 *curr_w = matrix_swap_data(&weights_ova, pw); + double r = DatumGetFloat4(palgo->predict(features, &weights_ova, return_type, nullptr, true, categorize)); + if (r > dmax) { + dmax = r; + cat = c; + } + matrix_swap_data(&weights_ova, curr_w); + pw += n_features; + } + + matrix_release(&weights_ova); + + *categorize = true; + return Float4GetDatum(cat); +} + +static void *multiclass_get_extra_data(GradientDescentState *gd_state, int *size) +{ + static AlgorithmML ml; + *size = sizeof(AlgorithmML); + return &ml; +} + +#define GD_NUM_CLASSIFIERS 2 + +static const char* gd_classifiers_str[GD_NUM_CLASSIFIERS] = { + GD_SVM_CLASSIFICATION_NAME, + GD_LOGISTIC_REGRESSION_NAME +}; + +static AlgorithmML gd_classifiers[GD_NUM_CLASSIFIERS] { + SVM_CLASSIFICATION, + LOGISTIC_REGRESSION, +}; + +static const char *classifier_getter(void *classifier) +{ + AlgorithmML algo = *static_cast(classifier); + for (int c = 0; c < GD_NUM_CLASSIFIERS; c++) { + if (gd_classifiers[c] == algo) + return gd_classifiers_str[c]; + } + + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Invalid classifier %d", algo))); + return NULL; +} + +static void classifier_setter(const char *str, void *algorithm_ml) +{ + for (int c = 0; c < GD_NUM_CLASSIFIERS; c++) { + if (strcmp(str, gd_classifiers_str[c]) == 0) { + *static_cast(algorithm_ml) = gd_classifiers[c]; + return; + } + } + + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Invalid classifier '%s'", str))); +} + +static HyperparameterDefinition multiclass_hyperparameter_definitions[] = { + GD_HYPERPARAMETERS_SVM_CLASSIFICATION + HYPERPARAMETER_ENUM("classifier", GD_SVM_CLASSIFICATION_NAME, + gd_classifiers_str, GD_NUM_CLASSIFIERS, + classifier_getter, classifier_setter, + HyperparametersGD, classifier, + HP_AUTOML_ENUM()), +}; + +static const HyperparameterDefinition* gd_get_hyperparameters_multiclass(AlgorithmAPI *self, int *definitions_size) +{ + Assert(definitions_size != nullptr); + *definitions_size = sizeof(multiclass_hyperparameter_definitions) / sizeof(HyperparameterDefinition); + return multiclass_hyperparameter_definitions; +} + +KernelTransformer* multiclass_init_kernel(int features, HyperparametersGD *hyperp) +{ + GradientDescent *algo = (GradientDescent *) get_algorithm_api(hyperp->classifier); + if (algo->prepare_kernel != nullptr) + return algo->prepare_kernel(features, hyperp); + + return nullptr; +} + +GradientDescent gd_multiclass = { + { + MULTICLASS, + "multiclass", + ALGORITHM_ML_TARGET_MULTICLASS | ALGORITHM_ML_RESCANS_DATA, + gd_metrics_accuracy, + gd_get_hyperparameters_multiclass, + gd_make_hyperparameters, + gd_update_hyperparameters, + gd_create, + gd_run, + gd_end, + gd_predict_prepare, + gd_predict, + gd_explain + }, + true, + 0, // default return type + 0.0, // default feature + 0.0, // not needed + 0.0, // not needed + multiclass_scan, + gd_init_optimizer_ova, + multiclass_init_kernel, + nullptr, + nullptr, + multiclass_gradients, + multiclass_test, + multiclass_predict, + multiclass_get_extra_data, +}; diff --git a/src/gausskernel/dbmind/db4ai/executor/gd/optimizer_gd.cpp b/src/gausskernel/dbmind/db4ai/executor/gd/optimizer_gd.cpp index 21d39f259..d84de9989 100644 --- a/src/gausskernel/dbmind/db4ai/executor/gd/optimizer_gd.cpp +++ b/src/gausskernel/dbmind/db4ai/executor/gd/optimizer_gd.cpp @@ -28,7 +28,6 @@ typedef struct OptimizerMinibatch { OptimizerGD opt; - const GradientDescentState *gd_state; double learning_rate; } OptimizerMinibatch; @@ -37,19 +36,25 @@ static void opt_gd_end_iteration(OptimizerGD *optimizer) OptimizerMinibatch *opt = (OptimizerMinibatch *)optimizer; // decay the learning rate with decay^iterations - opt->learning_rate *= gd_get_node(opt->gd_state)->decay; + opt->learning_rate *= opt->opt.hyperp->decay; } static void opt_gd_update_batch(OptimizerGD *optimizer, const Matrix *features, const Matrix *dep_var) { OptimizerMinibatch *opt = (OptimizerMinibatch *)optimizer; + GradientDescent *gd_algo = (GradientDescent*)opt->opt.gd_state->tms.algorithm; // clear gradients of the batch matrix_zeroes(&optimizer->gradients); // update gradients - opt->gd_state->algorithm->gradients_callback(gd_get_node(opt->gd_state), features, dep_var, &optimizer->weights, - &optimizer->gradients); + GradientsConfigGD cfg; + cfg.hdr.hyperp = optimizer->hyperp; + cfg.hdr.features = features; + cfg.hdr.weights = &optimizer->weights; + cfg.hdr.gradients = &optimizer->gradients; + cfg.dep_var = dep_var; + gd_algo->compute_gradients(&cfg.hdr); elog_matrix(DEBUG1, "optimizer gd: gradients", &optimizer->gradients); @@ -65,14 +70,18 @@ static void opt_gd_release(OptimizerGD *optimizer) pfree(optimizer); } -OptimizerGD *gd_init_optimizer_gd(const GradientDescentState *gd_state) +OptimizerGD *gd_init_optimizer_gd(const GradientDescentState *gd_state, HyperparametersGD *hyperp) { OptimizerMinibatch *opt = (OptimizerMinibatch *)palloc0(sizeof(OptimizerMinibatch)); + opt->opt.hyperp = hyperp; opt->opt.start_iteration = nullptr; opt->opt.end_iteration = opt_gd_end_iteration; opt->opt.update_batch = opt_gd_update_batch; opt->opt.release = opt_gd_release; - opt->gd_state = gd_state; - opt->learning_rate = gd_get_node(gd_state)->learning_rate; + opt->opt.finalize = nullptr; + opt->opt.gd_state = gd_state; + opt->learning_rate = hyperp->learning_rate; + matrix_init(&opt->opt.weights, gd_state->n_features); + matrix_init(&opt->opt.gradients, gd_state->n_features); return &opt->opt; } diff --git a/src/gausskernel/dbmind/db4ai/executor/gd/optimizer_ngd.cpp b/src/gausskernel/dbmind/db4ai/executor/gd/optimizer_ngd.cpp index 095593259..a67a2ed65 100644 --- a/src/gausskernel/dbmind/db4ai/executor/gd/optimizer_ngd.cpp +++ b/src/gausskernel/dbmind/db4ai/executor/gd/optimizer_ngd.cpp @@ -33,7 +33,6 @@ typedef struct OptimizerNormalize { OptimizerGD opt; - const GradientDescentState *gd_state; double learning_rate; bool learn; // only first iteration double scale_rate; @@ -45,7 +44,7 @@ static void opt_ngd_end_iteration(OptimizerGD *optimizer) OptimizerNormalize *opt = (OptimizerNormalize *)optimizer; // decay the learning rate with decay^iterations - opt->learning_rate *= gd_get_node(opt->gd_state)->decay; + opt->learning_rate *= optimizer->hyperp->decay; // be sure that learns how to normalize only in the first iteration opt->learn = false; @@ -54,16 +53,17 @@ static void opt_ngd_end_iteration(OptimizerGD *optimizer) static void opt_ngd_update_batch(OptimizerGD *optimizer, const Matrix *features, const Matrix *dep_var) { OptimizerNormalize *opt = (OptimizerNormalize *)optimizer; + GradientDescent *gd_algo = (GradientDescent*)opt->opt.gd_state->tms.algorithm; if (opt->learn) { Assert(features->columns == opt->scale_gradients.rows); - gd_float *pf = features->data; + float8 *pf = features->data; for (int r = 0; r < features->rows; r++) { - gd_float *pw = optimizer->weights.data; - gd_float *ps = opt->scale_gradients.data; + float8 *pw = optimizer->weights.data; + float8 *ps = opt->scale_gradients.data; for (int c = 0; c < features->columns; c++) { - gd_float qx = *pf++; + float8 qx = *pf++; qx *= qx; if (qx > *ps) { // update weights and scaling of gradients @@ -82,16 +82,22 @@ static void opt_ngd_update_batch(OptimizerGD *optimizer, const Matrix *features, // clear gradients of the batch matrix_zeroes(&optimizer->gradients); - opt->gd_state->algorithm->gradients_callback(gd_get_node(opt->gd_state), features, dep_var, &optimizer->weights, - &optimizer->gradients); + + GradientsConfigGD cfg; + cfg.hdr.hyperp = optimizer->hyperp; + cfg.hdr.features = features; + cfg.hdr.weights = &optimizer->weights; + cfg.hdr.gradients = &optimizer->gradients; + cfg.dep_var = dep_var; + gd_algo->compute_gradients(&cfg.hdr); elog_matrix(DEBUG1, "optimizer ngd: gradients", &optimizer->gradients); // normalize gradients - gd_float *pg = optimizer->gradients.data; - gd_float *ps = opt->scale_gradients.data; + float8 *pg = optimizer->gradients.data; + float8 *ps = opt->scale_gradients.data; for (int r = 0; r < opt->scale_gradients.rows; r++) { - gd_float s = 0.0; + float8 s = 0.0; if (*ps > 0) s = (1.0 / opt->scale_rate) / *ps; @@ -114,17 +120,21 @@ static void opt_ngd_release(OptimizerGD *optimizer) pfree(optimizer); } -OptimizerGD *gd_init_optimizer_ngd(const GradientDescentState *gd_state) +OptimizerGD *gd_init_optimizer_ngd(const GradientDescentState *gd_state, HyperparametersGD *hyperp) { OptimizerNormalize *opt = (OptimizerNormalize *)palloc0(sizeof(OptimizerNormalize)); + opt->opt.hyperp = hyperp; opt->opt.start_iteration = nullptr; opt->opt.end_iteration = opt_ngd_end_iteration; opt->opt.update_batch = opt_ngd_update_batch; opt->opt.release = opt_ngd_release; - opt->gd_state = gd_state; - opt->learning_rate = gd_get_node(gd_state)->learning_rate; + opt->opt.finalize = nullptr; + opt->opt.gd_state = gd_state; + opt->learning_rate = hyperp->learning_rate; opt->learn = true; opt->scale_rate = 0.0; matrix_init(&opt->scale_gradients, gd_state->n_features); + matrix_init(&opt->opt.weights, gd_state->n_features); + matrix_init(&opt->opt.gradients, gd_state->n_features); return &opt->opt; } diff --git a/src/gausskernel/dbmind/db4ai/executor/gd/optimizer_ova.cpp b/src/gausskernel/dbmind/db4ai/executor/gd/optimizer_ova.cpp new file mode 100644 index 000000000..a0334c23f --- /dev/null +++ b/src/gausskernel/dbmind/db4ai/executor/gd/optimizer_ova.cpp @@ -0,0 +1,102 @@ +/* +* Copyright (c) 2020 Huawei Technologies Co.,Ltd. +* +* openGauss is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*--------------------------------------------------------------------------------------- +* +* optimizer_ova.cpp +* Optimizer Implementation of One-to-Many Classification +* +* IDENTIFICATION +* src/gausskernel/dbmind/db4ai/executor/gd/optimizer_ova.cpp +* +* --------------------------------------------------------------------------------------- +*/ + +#include "db4ai/gd.h" + +// ova: one-vs-all multiclass optimizer +static void opt_gd_ova_start_iteration(OptimizerGD *optimizer) +{ + OptimizerOVA *opt = (OptimizerOVA *)optimizer; + if (opt->parent->start_iteration != nullptr) + opt->parent->start_iteration(opt->parent); +} + +static void opt_gd_ova_end_iteration(OptimizerGD *optimizer) +{ + OptimizerOVA *opt = (OptimizerOVA *)optimizer; + if (opt->parent->end_iteration != nullptr) + opt->parent->end_iteration(opt->parent); +} + +static void opt_gd_ova_update_batch(OptimizerGD *optimizer, const Matrix *features, const Matrix *dep_var) +{ + OptimizerOVA *opt = (OptimizerOVA *)optimizer; + + // one-vs-all computes one model for each different class, assuming all other values are false + int n_classes = opt->opt.gd_state->num_classes; + int n_rows = dep_var->rows; + int n_features = optimizer->weights.columns; + + Matrix dep_var_ova; + matrix_init(&dep_var_ova, n_rows); + + float8 *pw = optimizer->weights.data; + for (int c = 0; c < n_classes; c++) { + gd_multiclass_target(opt->algorithm, c, dep_var_ova.data, dep_var->data, n_rows); + float8 *curr_w = matrix_swap_data(&opt->parent->weights, pw); + opt->parent->update_batch(opt->parent, features, &dep_var_ova); + matrix_swap_data(&opt->parent->weights, curr_w); + pw += n_features; + } + matrix_release(&dep_var_ova); + + elog_matrix(DEBUG1, "optimizer ova: weights", &optimizer->weights); +} + +static void opt_gd_ova_finalize(OptimizerGD *optimizer) +{ + OptimizerOVA *opt = (OptimizerOVA *)optimizer; + if (opt->parent->finalize != nullptr) + opt->parent->finalize(opt->parent); +} + +static void opt_gd_ova_release(OptimizerGD *optimizer) +{ + OptimizerOVA *opt = (OptimizerOVA *)optimizer; + pfree(opt->parent); + pfree(optimizer); +} + +OptimizerGD *gd_init_optimizer_ova(const GradientDescentState *gd_state, HyperparametersGD *hyperp) +{ + if (hyperp->classifier != SVM_CLASSIFICATION && hyperp->classifier != LOGISTIC_REGRESSION) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_OPERATION), + errmsg("OVA is only supported for gradient descent binary classification"))); + + // now we use the parent state to + OptimizerOVA *opt = (OptimizerOVA *)palloc0(sizeof(OptimizerOVA)); + opt->opt.hyperp = hyperp; + opt->opt.start_iteration = opt_gd_ova_start_iteration; + opt->opt.end_iteration = opt_gd_ova_end_iteration; + opt->opt.update_batch = opt_gd_ova_update_batch; + opt->opt.release = opt_gd_ova_release; + opt->opt.finalize = opt_gd_ova_finalize; + opt->opt.gd_state = gd_state; + opt->parent = gd_init_optimizer(gd_state, hyperp); + opt->algorithm = (GradientDescent*) get_algorithm_api(hyperp->classifier); + matrix_init(&opt->opt.weights, gd_state->num_classes, gd_state->n_features); + matrix_init(&opt->opt.gradients, 1); // this is not used + return &opt->opt; +} + diff --git a/src/gausskernel/dbmind/db4ai/executor/gd/optimizer_pca.cpp b/src/gausskernel/dbmind/db4ai/executor/gd/optimizer_pca.cpp new file mode 100644 index 000000000..c9f76e628 --- /dev/null +++ b/src/gausskernel/dbmind/db4ai/executor/gd/optimizer_pca.cpp @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + *--------------------------------------------------------------------------------------- + * + * optimizer_pca.cpp + * Optimizer used for Principal Component Analysis + * + * IDENTIFICATION + * src/gausskernel/dbmind/db4ai/executor/gd/optimizer_pca.cpp + * + * --------------------------------------------------------------------------------------- + */ + +#include "db4ai/gd.h" +#include "db4ai/db4ai_cpu.h" + +#include +#include + +typedef struct OptimizerPCA { + OptimizerGD opt; + IncrementalStatistics *eigenvalues_stats = nullptr; + int32_t number_eigenvectors = 0; + Matrix dot_products; +} OptimizerPCA; + +static void gd_pca_update_batch(OptimizerGD *optimizer, Matrix const *features, Matrix const *dep_var) +{ + auto opt = reinterpret_cast(optimizer); + GradientDescent *gd_algo = (GradientDescent *)opt->opt.gd_state->tms.algorithm; + + int32_t const batch_size = features->rows; + int32_t const num_eigenvectors = opt->number_eigenvectors; + + /* + * scaling by the size of the batch seems to be optimal in practice + * (it has been reported elsewhere as well) + */ + float8 const step = 1. / batch_size; + + // clear gradients of the batch + matrix_zeroes(&optimizer->gradients); + // clear the working matrix + matrix_zeroes(&opt->dot_products); + + /* + * we compute the gradient but don't move in that direction inside that function + * the current proportion of each eigenvector is also computed therein + */ + GradientsConfigPCA cfg_pca; + cfg_pca.hdr.hyperp = optimizer->hyperp; + cfg_pca.hdr.features = features; + cfg_pca.hdr.weights = &optimizer->weights; + cfg_pca.hdr.gradients = &optimizer->gradients; + cfg_pca.dot_products = &opt->dot_products; + cfg_pca.eigenvalues_stats = opt->eigenvalues_stats; + + /* + * the actual computations + */ + gd_algo->compute_gradients(&cfg_pca.hdr); + + /* + * moving in the direction of the gradient is actually done in here + * for pca we actually do gradient ascent (max problem) directly instead + * of doing descent with multiplicative factor -1 + */ + matrix_mult_scalar(&optimizer->gradients, step); + matrix_add(&optimizer->weights, &optimizer->gradients); + + /* + * we ortho-normalize before the next iteration. + * for the time being we perform Gram-Schmidt for a set of eigenvectors + */ + matrix_gram_schmidt(&optimizer->weights, num_eigenvectors); + + // DB4AI_API gd_pca_update_batch: TrainModel is an read-only input structure from the query plan + // hyperparameters should be also read-only, use the state instead + HyperparametersGD *hyperp = (HyperparametersGD *)optimizer->hyperp; + hyperp->lambda = cfg_pca.batch_error; +} + +force_inline static void gd_pca_release(OptimizerGD *optimizer) +{ + auto pca_opt = reinterpret_cast(optimizer); + + matrix_release(&pca_opt->dot_products); + pfree(pca_opt->eigenvalues_stats); + pfree(pca_opt); +} + +OptimizerGD *gd_init_optimizer_pca(GradientDescentState const *gd_state, HyperparametersGD *hyperp) +{ + auto pca_opt = reinterpret_cast(palloc0(sizeof(OptimizerPCA))); + uint64_t external_seed = hyperp->seed; + int32_t const dimension = gd_state->n_features; + + pca_opt->opt.hyperp = hyperp; + pca_opt->opt.start_iteration = nullptr; + pca_opt->opt.end_iteration = nullptr; + pca_opt->opt.update_batch = gd_pca_update_batch; + pca_opt->opt.release = gd_pca_release; + pca_opt->opt.finalize = nullptr; + pca_opt->opt.gd_state = gd_state; + int32_t num_eigenvectors = pca_opt->number_eigenvectors = hyperp->number_dimensions; + int32_t batch_size = hyperp->batch_size; + + /* + * the number of principal components can be at most the full dimension of the data + * (as seen from the number of features passed) + */ + if (unlikely(num_eigenvectors > dimension)) + num_eigenvectors = pca_opt->number_eigenvectors = dimension; + + /* + * to keep track of running statistics on the eigenvalues + */ + check_hyper_bounds(sizeof(IncrementalStatistics), num_eigenvectors, "number_components"); + pca_opt->eigenvalues_stats = + reinterpret_cast(palloc0(sizeof(IncrementalStatistics) * num_eigenvectors)); + + /* + * eigenvectors are stored in a matrix (weights) of size (d x k) where d is the dimension + * of the k (column) eigenvectors + * + * observe that we allocate two columns more in which we will keep the proportion of each + * eigenvalue as well as running statistics of each one of them + * + * observe that num_eigenvectors <= dimension, and thus using the last two columns + * of weights to store statistics and proportions of eigenvalues is posible + */ + matrix_init(&pca_opt->opt.weights, dimension, num_eigenvectors + 2); + matrix_init(&pca_opt->opt.gradients, dimension, num_eigenvectors + 2); + + /* + * this matrix (of dimensions n x k) represents in each column (component) a dot product + * it is used internally when computing the gradients + */ + matrix_init(&pca_opt->dot_products, batch_size, num_eigenvectors); + + /* + * we produce high quality random numbers + */ + uint64_t const internal_seed = 0x274066DB9441E851ULL; + external_seed ^= internal_seed; + std::mt19937_64 gen(external_seed); + std::uniform_real_distribution sampler(0., 1.); + + Matrix eigenvector; + eigenvector.transposed = false; + eigenvector.rows = pca_opt->opt.weights.rows; + eigenvector.columns = 1; + eigenvector.allocated = pca_opt->opt.weights.rows; + + for (int32_t e = 0; e < num_eigenvectors; ++e) { + eigenvector.data = pca_opt->opt.weights.data + (e * dimension); + for (int32_t f = 0; f < dimension; ++f) + eigenvector.data[f] = sampler(gen); + } + + /* + * once the initial set of eigenvectors have been produced we have to orthonormalize them using + * (for the moment) modified Gram-Schmidt + */ + matrix_gram_schmidt(&pca_opt->opt.weights, num_eigenvectors); + + return &pca_opt->opt; +} \ No newline at end of file diff --git a/src/gausskernel/dbmind/db4ai/executor/gd/pca.cpp b/src/gausskernel/dbmind/db4ai/executor/gd/pca.cpp new file mode 100644 index 000000000..f4241344a --- /dev/null +++ b/src/gausskernel/dbmind/db4ai/executor/gd/pca.cpp @@ -0,0 +1,395 @@ +/* +* Copyright (c) 2020 Huawei Technologies Co.,Ltd. +* +* openGauss is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*--------------------------------------------------------------------------------------- +* +* pca.cpp +* SGD specialization to compute principal components +* +* IDENTIFICATION +* src/gausskernel/dbmind/db4ai/executor/gd/pca.cpp +* +* --------------------------------------------------------------------------------------- +*/ + +#include "db4ai/gd.h" +#include "db4ai/db4ai_cpu.h" +#include "db4ai/fp_ops.h" + +// we reuse this function defined in kmeans.cpp +extern ArrayType *construct_empty_md_array(uint32_t const num_components, uint32_t const dimension); + +static void pca_gradients(GradientsConfig *cfg) +{ + Assert(cfg->features->rows > 0); + + auto cfg_pca = reinterpret_cast(cfg); + Matrix const *data = cfg->features; + Matrix *eigenvectors = cfg->weights; + Matrix *gradients = cfg->gradients; + Matrix *dot_products = cfg_pca->dot_products; + + /* + * eigenvectors is a matrix of (d x (k + 2)) + * d: dimension of the features + * k: number of components to be computed + * +2: proportions and standard deviations (column) vectors + * + * gradients is also a (clean) matrix of (d x (k + 2)) + * + * eigenvalues is a matrix of (k x 1) and it is used as output + */ + int32_t const dimension = eigenvectors->rows; + int32_t const num_eigenvectors = eigenvectors->columns - 2; + int32_t const batch_size = data->rows; + + // this is the sum of all eigenvectors + double total_std_dev = 0.; + double total_std_dev_correction = 0.; + double local_error = 0.; + double proportion = 0.; + // this helps us update the running statistics of each eigenvalue + IncrementalStatistics running_stats; + + /* + * the gradient of the loss function w.r.t. the current approximation matrix W + * (row vectors approximating the leading eigenvectors) is M * W, where M is an stochastic approximation to the + * empirical covariance matrix (via x^t * x - observe that data vectors are row vector by default + * and thus x^t is a column vector). + * + * observe that we do not produce the whole (empirical) covariance matrix, but rather we compute it + * progressively (in a matrix-free manner using only vector-vector and scalar-vector operations) + */ + + /* + * these are only stubs to represent every eigenvector, the gradient it moves in, + * its proportion, and its standard deviation, each as a matrix (d x 1) + * this is a temporary solution while we implement efficient high-order BLAS + */ + Matrix eigenvector; + Matrix eigenvector_gradient; + Matrix eigenvector_proportion; + Matrix eigenvector_std_dev; + eigenvector.transposed = eigenvector_gradient.transposed = + eigenvector_proportion.transposed = eigenvector_std_dev.transposed = false; + + eigenvector.rows = eigenvector_gradient.rows = eigenvector_proportion.rows = + eigenvector_std_dev.rows = dimension; + + eigenvector.columns = eigenvector_gradient.columns = + eigenvector_proportion.columns = eigenvector_std_dev.columns = 1; + + eigenvector.allocated = eigenvector_gradient.allocated = + eigenvector_proportion.allocated = eigenvector_std_dev.allocated = dimension; + + // the proportion vector is the second-to-last column vector of the eigenvectors matrix + eigenvector_proportion.data = eigenvectors->data + (num_eigenvectors * dimension); + // the standard deviation vector is the very last column vector of the weights matrix + eigenvector_std_dev.data = eigenvector_proportion.data + dimension; + + /* + * this is also a stub for every dot product (n x 1) of the dot_products matrix + */ + Matrix dot_xr_w; + dot_xr_w.transposed = false; + dot_xr_w.rows = batch_size; + dot_xr_w.columns = 1; + dot_xr_w.allocated = batch_size; + + /* + * a (column) stub for every data vector (row vector of) from the data matrix (of dimensions n x d) + */ + Matrix x_r; + x_r.transposed = false; + x_r.rows = dimension; + x_r.columns = 1; + x_r.allocated = dimension; + + /* + * the empirical covariance matrix M = X^t * X is not produced explicitly. thus, in order to produce the product + * M * W = ((X^t * X) * W), we do it as M * W = (X^t * (X * W)) to first produce a matrix of dimensions (n x k) + * this first matrix is produced with the following loop + * + * at this point we also compute the current approximation to the corresponding eigenvalues since: + * M * w_i = lambda_i * w_i -> w_i^t * M * w_i = w_i^t * lambda_i * w_i -> ||X * w_i||^2 = lambda_i * ||w_i||^2 -> + * lambda_i = ||X * w_i||^2 / ||w_i||^2 -> lambda_i = ||X * w_i||^2 since w_i is a unit vector + * + * we thus piggy-back on this computation :) + */ + for (int32_t e = 0; e < num_eigenvectors; ++e) { + eigenvector.data = eigenvectors->data + (e * dimension); + dot_xr_w.data = dot_products->data + (e * batch_size); + /* + * this is a column vector in which each entry represents the dot product + * where x_i is the i-th data (row) vector of the batch and e_j is the j-th (column) eigenvector + */ + matrix_mult_vector(data, &eigenvector, &dot_xr_w); + + // this is eigenvalue * batch_size + eigenvector_proportion.data[e] = matrix_dot(&dot_xr_w, &dot_xr_w); + // we aggregate all eigenvalues (in a robust manner) to be able to compute each proportion + // later on below + twoSum(total_std_dev, eigenvector_proportion.data[e], &total_std_dev, &local_error); + total_std_dev_correction += local_error; + } + total_std_dev += total_std_dev_correction; + + /* + * at this point we have produced matrix W' = (X * W) of dimensions n x k which is stored in dot_products + */ + + /* + * let us now factor in the current changes to the empirical covariance matrix w.r.t. the current batch + * we assume that the gradients (d x k) matrix has been reset + * + * we will now produce the product X^t * W' to finally obtain M * W of dimension (d x k) + * which correspond to the gradients of the different components + * + * observe that, technically, we have to transpose X to a matrix of dimension d x n, but since we produce + * the product component-wise, the transposition can be simulated + * + * the error of the batch is computed here as well + */ + float8 factor = 0.; + double error = 0.; + double error_correction = 0.; + for (int32_t e = 0; e < num_eigenvectors; ++e) { + dot_xr_w.data = dot_products->data + (e * batch_size); + eigenvector_gradient.data = gradients->data + (e * dimension); + for (int32_t r = 0; r < batch_size; ++r) { + x_r.data = data->data + (r * dimension); + factor = dot_xr_w.data[r]; + matrix_mult_scalar_add(&eigenvector_gradient, &x_r, factor); + } + // at this point the current proportion of each eigenvalue can be computed + twoDiv(eigenvector_proportion.data[e], total_std_dev, &proportion, &local_error); + // truncation + eigenvector_proportion.data[e] = proportion + local_error; + + // we update the current running statistics of the proportion of each eigenvalue + running_stats.setTotal(eigenvector_proportion.data[e]); + cfg_pca->eigenvalues_stats[e] += running_stats; + + // this is the current standard deviation of the proportion of each eigenvalue + eigenvector_std_dev.data[e] = cfg_pca->eigenvalues_stats[e].getEmpiricalStdDev(); + // we aggregate the standard deviations to produce the error of the batch + twoSum(error, eigenvector_std_dev.data[e], &error, &local_error); + error_correction += local_error; + } + // we take care of the error when doing arithmetic + cfg_pca->batch_error = error + error_correction; +} + +force_inline static double pca_test(const GradientDescentState* gd_state, const Matrix *features, + const Matrix *dep_var, const Matrix *weights, Scores *scores) +{ + Assert(features->rows > 0); + + const TrainModel *gd_node = gd_state->tms.config; + + // DB4AI_API + Assert(gd_node->configurations == 1); + HyperparametersGD *hyperp = (HyperparametersGD *)gd_node->hyperparameters[0]; + return hyperp->lambda; +} + +static Datum pca_predict(const Matrix *features, const Matrix *weights, + Oid return_type, void *extra_data, bool max_binary, bool *categorize) +{ + Assert(!max_binary); + + /* + * the projection of an n-dimensional tuple onto a lower k-dimensional space is given by the + * inner product features^T * weights. assuming that features is a column vector + * of dimensions n x 1 and weights is a matrix of dimensions n x k + */ + int32_t const dimension = weights->rows; + // remember that the last two columns of the matrix contain statistics about the eigenvectors + // and thus they are not relevant for prediction + int32_t const num_eigenvectors = weights->columns - 2; + int32_t const total_bytes = (sizeof(float8) * num_eigenvectors) + ARR_OVERHEAD_NONULLS(1); + errno_t errorno = EOK; + int32_t dims[1] = {num_eigenvectors}; + int32_t lbs[1] = {1}; + int32_t coordinate_offset = 0; + auto projected_coordinates = reinterpret_cast(palloc0(total_bytes)); + + SET_VARSIZE(projected_coordinates, total_bytes); + projected_coordinates->ndim = 1; + projected_coordinates->dataoffset = 0; + projected_coordinates->elemtype = FLOAT8OID; + errorno = memcpy_s(ARR_DIMS(projected_coordinates), sizeof(int32_t), dims, sizeof(int32_t)); + securec_check(errorno, "\0", "\0"); + errorno = memcpy_s(ARR_LBOUND(projected_coordinates), sizeof(int32_t), lbs, sizeof(int32_t)); + securec_check(errorno, "\0", "\0"); + + auto projected_coordinates_array = reinterpret_cast(ARR_DATA_PTR(projected_coordinates)); + + Matrix eigenvector; + eigenvector.columns = 1; + eigenvector.rows = eigenvector.allocated = dimension; + eigenvector.transposed = false; + + // this loop computes the inner product between the input point and the + // generating vectors + for (int32_t e = 0; e < num_eigenvectors; ++e) { + eigenvector.data = weights->data + coordinate_offset; + projected_coordinates_array[e] = matrix_dot(features, &eigenvector); + coordinate_offset += dimension; + } + + *categorize = false; + return PointerGetDatum(projected_coordinates); +} + +////////////////////////////////////////////////////////////////////////////////// + +static HyperparameterDefinition pca_hyperparameter_definitions[] = { + HYPERPARAMETER_INT4("number_components", 1, 1, true, INT32_MAX, true, HyperparametersGD, number_dimensions, + HP_NO_AUTOML()), + HYPERPARAMETER_INT4("batch_size", 1000, 1, true, INT32_MAX, true, HyperparametersGD, batch_size, HP_NO_AUTOML()), + HYPERPARAMETER_INT4("max_iterations", 100, 1, true, INT32_MAX, true, HyperparametersGD, max_iterations, + HP_NO_AUTOML()), + HYPERPARAMETER_INT4("max_seconds", 0, 0, true, INT32_MAX, true, HyperparametersGD, max_seconds, HP_NO_AUTOML()), + HYPERPARAMETER_FLOAT8("tolerance", 0.0005, 0.0, true, DBL_MAX, true, HyperparametersGD, tolerance, HP_NO_AUTOML()), + HYPERPARAMETER_INT4("seed", 0, 0, true, INT32_MAX, true, HyperparametersGD, seed, HP_NO_AUTOML()), + HYPERPARAMETER_BOOL("verbose", false, HyperparametersGD, verbose, HP_NO_AUTOML()), +}; + +static const HyperparameterDefinition *gd_get_hyperparameters_pca(AlgorithmAPI *self, int *definitions_size) +{ + Assert(definitions_size != nullptr); + *definitions_size = sizeof(pca_hyperparameter_definitions) / sizeof(HyperparameterDefinition); + return pca_hyperparameter_definitions; +} + +/* + * We specialized PCA's explain + */ +List *pca_explain(AlgorithmAPI *self, SerializedModel const *model, Oid const return_type) +{ + List *model_info = nullptr; + + // extract serialized model + SerializedModelGD gds; + gd_deserialize((GradientDescent *)self, model, return_type, &gds); + + /* + * weights is a matrix of dimensions n x k (n rows, k columns) + */ + int32_t const dimension = gds.weights.rows; + // remember that the last two columns of the matrix contain statistics about the eigenvectors + // and thus we handle them in a different manner (not as the coordinates of eigenvectors) + int32_t const num_eigenvectors = gds.weights.columns - 2; + + // output array of coordinates + ArrayType *eigenvector_coordinates_array = nullptr; + float8 *eigenvector_coordinates_array_data = nullptr; + // raw data + float8 *pw = gds.weights.data; + // the proportion vector is the second-to-last column vector of the eigenvectors matrix + float8 *proportions = pw + (num_eigenvectors * dimension); + // the standard deviations vector is the very last column vector of the weights matrix + float8 *std_devs = proportions + dimension; + errno_t errorno = EOK; + uint32_t const size_component_bytes = dimension * sizeof(float8); + // this loop produces the output of every principal component + for (int32_t e = 0; e < num_eigenvectors; ++e) { + /* + * opening and closing the eigenvector group + * if both open_group and close_group are set, they will be ignored) + */ + TrainingInfo *eigenvector_open_group = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + eigenvector_open_group->open_group = true; + TrainingInfo *eigenvector_close_group = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + eigenvector_close_group->close_group = true; + + // the properties of an eigenvector + TrainingInfo *eigenvector_id = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + TrainingInfo *eigenvector_coordinates = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + TrainingInfo *eigenvector_proportion = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + TrainingInfo *eigenvector_standard_deviation = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + + // opening and closing a group must have exactly the same name (otherwise fire!) + eigenvector_open_group->name = "Principal Component"; + eigenvector_close_group->name = "Principal Component"; + + // the name of each property + eigenvector_id->name = "ID"; + eigenvector_coordinates->name = "Coordinates"; + eigenvector_proportion->name = "Relative proportion"; + eigenvector_standard_deviation->name = "Standard deviation of proportion"; + + // the type of each property + eigenvector_id->type = INT4OID; + eigenvector_coordinates->type = FLOAT8ARRAYOID; + eigenvector_proportion->type = FLOAT8OID; + eigenvector_standard_deviation->type = FLOAT8OID; + + // the coordinates of the component + eigenvector_coordinates_array = construct_empty_md_array(1, dimension); + eigenvector_coordinates_array_data = reinterpret_cast(ARR_DATA_PTR(eigenvector_coordinates_array)); + + // filling in the data + eigenvector_id->value = Int32GetDatum(e + 1); + eigenvector_coordinates->value = PointerGetDatum(eigenvector_coordinates_array); + eigenvector_proportion->value = Float8GetDatumFast(proportions[e]); + eigenvector_standard_deviation->value = Float8GetDatumFast(std_devs[e]); + + // filling in the coordinates + errorno = memcpy_s(eigenvector_coordinates_array_data, size_component_bytes, pw, size_component_bytes); + securec_check(errorno, "\0", "\0"); + + /* + * appending the properties to the list of properties + * OBSERVE that open and close elements must be well positioned! (at the beginning and end of the information) + */ + model_info = lappend(model_info, eigenvector_open_group); + model_info = lappend(model_info, eigenvector_id); + model_info = lappend(model_info, eigenvector_coordinates); + model_info = lappend(model_info, eigenvector_proportion); + model_info = lappend(model_info, eigenvector_standard_deviation); + model_info = lappend(model_info, eigenvector_close_group); + + pw += dimension; + } + + return model_info; +} + +ModelHyperparameters *gd_make_hyperparameters_pca(AlgorithmAPI *self) +{ + ModelHyperparameters *hyperp = gd_make_hyperparameters(self); + ((HyperparametersGD *)hyperp)->optimizer = INVALID_OPTIMIZER; + return hyperp; +} + +GradientDescent gd_pca = { + {PCA, "pca", ALGORITHM_ML_UNSUPERVISED | ALGORITHM_ML_RESCANS_DATA, gd_metrics_loss, gd_get_hyperparameters_pca, + gd_make_hyperparameters_pca, gd_update_hyperparameters, gd_create, gd_run, gd_end, gd_predict_prepare, gd_predict, + pca_explain}, + false, + FLOAT8ARRAYOID, // default return type + 0., // default feature + 0., + 0., + nullptr, + gd_init_optimizer_pca, + nullptr, + nullptr, + nullptr, + pca_gradients, + pca_test, + pca_predict, + nullptr, +}; \ No newline at end of file diff --git a/src/gausskernel/dbmind/db4ai/executor/gd/predict.cpp b/src/gausskernel/dbmind/db4ai/executor/gd/predict.cpp index e0e51bb9e..4c0ebde3e 100644 --- a/src/gausskernel/dbmind/db4ai/executor/gd/predict.cpp +++ b/src/gausskernel/dbmind/db4ai/executor/gd/predict.cpp @@ -22,104 +22,100 @@ */ #include "postgres.h" +#include "utils/bytea.h" #include "db4ai/gd.h" +#include "db4ai/db4ai_common.h" #include "db4ai/model_warehouse.h" #include "db4ai/predict_by.h" +#include "db4ai/kernel.h" -typedef struct GradientDescentPredictor { - GradientDescentAlgorithm *algorithm; - int ncategories; - Oid return_type; - Matrix weights; - Matrix features; - Datum *categories; -} GradientDescentPredictor; +extern bool verify_pgarray(ArrayType const * pg_array, int32_t n); -ModelPredictor gd_predict_prepare(const Model *model) +ModelPredictor gd_predict_prepare(AlgorithmAPI *self, const SerializedModel *model, Oid return_type) { - GradientDescentPredictor *gdp = (GradientDescentPredictor *)palloc0(sizeof(GradientDescentPredictor)); - ModelGradientDescent *gdp_model = (ModelGradientDescent *)model; - gdp->algorithm = gd_get_algorithm(gdp_model->model.algorithm); - gdp->ncategories = gdp_model->ncategories; - gdp->return_type = gdp_model->model.return_type; - - ArrayType *arr = (ArrayType *)pg_detoast_datum((struct varlena *)DatumGetPointer(gdp_model->weights)); - Assert(arr->elemtype == FLOAT4OID); - - int coefficients = ARR_DIMS(arr)[0]; - matrix_init(&gdp->weights, coefficients); - matrix_init(&gdp->features, coefficients); - - Datum dt; - bool isnull; - gd_float *pf = gdp->weights.data; - ArrayIterator it = array_create_iterator(arr, 0); - while (array_iterate(it, &dt, &isnull)) { - Assert(!isnull); - *pf++ = DatumGetFloat4(dt); + SerializedModelGD *gdp = (SerializedModelGD *)palloc0(sizeof(SerializedModelGD)); + gd_deserialize((GradientDescent*)self, model, return_type, gdp); + if (gdp->algorithm->prepare_kernel != nullptr) { + gdp->kernel = gdp->algorithm->prepare_kernel(gdp->input, &gdp->hyperparameters); + if (gdp->kernel != nullptr) + matrix_init(&gdp->aux_input, gdp->input); } - array_free_iterator(it); - if (arr != (ArrayType *)DatumGetPointer(gdp_model->weights)) - pfree(arr); - - if (gdp->ncategories > 0) { - arr = (ArrayType *)pg_detoast_datum((struct varlena *)DatumGetPointer(gdp_model->categories)); - gdp->categories = (Datum *)palloc(ARR_DIMS(arr)[0] * sizeof(Datum)); - - int cat = 0; - it = array_create_iterator(arr, 0); - while (array_iterate(it, &dt, &isnull)) { - Assert(!isnull); - gdp->categories[cat++] = dt; - } - array_free_iterator(it); - if (arr != (ArrayType *)DatumGetPointer(gdp_model->categories)) - pfree(arr); - } else - gdp->categories = nullptr; - return (ModelPredictor *)gdp; } -Datum gd_predict(ModelPredictor pred, Datum *values, bool *isnull, Oid *types, int ncolumns) +Datum gd_predict(AlgorithmAPI *self, ModelPredictor pred, Datum *values, bool *isnull, Oid *types, int ncolumns) { // extract coefficients from model - GradientDescentPredictor *gdp = (GradientDescentPredictor *)pred; - + SerializedModelGD *gdp = (SerializedModelGD *)pred; + bool const has_bias = gdp->algorithm->add_bias; + // extract the features - if (ncolumns != (int)gdp->weights.rows - 1) - elog(ERROR, "Invalid number of features for prediction, provided %d, expected %d", ncolumns, - gdp->weights.rows - 1); + float8 *w; + if (gdp->kernel == nullptr) + w = gdp->features.data; + else + w = gdp->aux_input.data; - gd_float *w = gdp->features.data; - for (int i = 0; i < ncolumns; i++) { - double value; - if (isnull[i]) - value = 0.0; // default value for feature, it is not the target for sure - else - value = gd_datum_get_float(types[i], values[i]); - - *w++ = value; - } - *w = 1.0; // bias - - Datum result = 0; - gd_float r = gdp->algorithm->predict_callback(&gdp->features, &gdp->weights); - if (dep_var_is_binary(gdp->algorithm)) { - if (gdp->ncategories == 0) { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INTERNAL_ERROR), - errmsg("For classification algorithms: %s, the number of categories should not be 0.", - gdp->algorithm->name))); - } - result = gdp->categories[0]; - if (r != gdp->algorithm->min_class) { - if (gdp->ncategories == 2 && r == gdp->algorithm->max_class) - result = gdp->categories[1]; - } + if (ncolumns == 1 && (types[0] == FLOAT8ARRAYOID || types[0] == FLOAT4ARRAYOID)) { + Assert(!isnull[0]); + + gd_copy_pg_array_data(w, values[0], gdp->input); + w += gdp->input; } else { - result = gd_float_get_datum(gdp->return_type, r); + if (ncolumns != gdp->input) + ereport(ERROR, (errmodule(MOD_DB4AI), + errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Invalid number of features for prediction, provided %d, expected %d", + ncolumns, gdp->input))); + + for (int i = 0; i < ncolumns; i++) { + double value; + if (isnull[i]) + value = 0.0; // default value for feature, it is not the target for sure + else + value = datum_get_float8(types[i], values[i]); + + *w++ = value; + } } + if (gdp->kernel != nullptr) { + // now apply the kernel transformation using a fake vector + Matrix tmp; + matrix_init(&tmp, gdp->kernel->coefficients); + + // make sure the transformation points to the destination vector + float8 *p = matrix_swap_data(&tmp, gdp->features.data); + gdp->kernel->transform(gdp->kernel, &gdp->aux_input, &tmp); + matrix_swap_data(&tmp, p); + matrix_release(&tmp); + + // update pointers of data + w = gdp->features.data + gdp->kernel->coefficients; + } + + if (has_bias) + *w = 1.0; + + // and predict + bool categorize; + Datum result = + gdp->algorithm->predict(&gdp->features, &gdp->weights, gdp->return_type, gdp->extra_data, false, &categorize); + if (categorize) { + Assert(!gd_dep_var_is_continuous(gdp->algorithm)); + float8 r = DatumGetFloat4(result); + if (gd_dep_var_is_binary(gdp->algorithm)) { + result = gdp->categories[0]; + if (r != gdp->algorithm->min_class) { + Assert(r == gdp->algorithm->max_class); + result = gdp->categories[1]; + } + } else { + Assert(r < gdp->ncategories); + result = gdp->categories[(int)r]; + } + } + return result; } diff --git a/src/gausskernel/dbmind/db4ai/executor/gd/shuffle_cache.cpp b/src/gausskernel/dbmind/db4ai/executor/gd/shuffle_cache.cpp index 1c7839b2b..fe80a6eec 100644 --- a/src/gausskernel/dbmind/db4ai/executor/gd/shuffle_cache.cpp +++ b/src/gausskernel/dbmind/db4ai/executor/gd/shuffle_cache.cpp @@ -25,17 +25,30 @@ /* * Shuffle using a limited cache is performed by caching and training batches - * in random order. For each new batch there are two random options: - * - append (there are free slots into the cache) - * - or train (and test) an existing batch and replace it by the new one - * At the end of the iteration, the cache is emptied randomly. At each step, - * one remaining batch is selected, trained and removed. - * At the end of the two phases, all batches have been visited only once in a - * random sequence, but the ability to shuffle batches depends on the cache size - * (the available working memory) and the batch size (a matrix of N rows - * by M features). The probability distribution is not uniform and initial - * batches have a higher probability than the last batches, but the shuffling - * is good enough and has a very small impact into the performance of GD. + * in random order. During the first iteration the batches are processed + * sequentially in the same order as provided by the source relation or query. + * The cache tries to keep a snapshot of the whole data as long as it fits + * into the available memory (working set), otherwise it discards the snapshot. + * + * For the following iterations there are two options: with snapshot, and the + * relations or queries are not scanned again, or with a rescan. In the first + * case, at each iteration the batches are shuffled again using a random + * uniform distribution. + * + * Instead, if there is no snapshot then for each batch read again from the + * source data there are two random options: + * + * - to cache the batch into a free slot + * - to replace a cached batch by the new one + * + * Batches are trained only when they are discarded from the cache. + * + * At the end of the iteration, the remaining batches in the cache are emptied + * randomly. The process guarantees that all batches have been trained only once + * in a random sequence, but the ability to shuffle batches depends on the + * cache size. In this case, the probability distribution is not uniform and + * initial batches have a higher probability than the last batches, but the + * shuffling is good enough and has a very small impact into the accuracy of GD. */ typedef struct ShuffleCache { @@ -46,13 +59,14 @@ typedef struct ShuffleCache { Matrix **cache_dep_var; int cache_allocated; int max_cache_usage; - int num_batches; int batch_size; int n_features; int iteration; int cached; int next; struct drand48_data rnd; + List *snapshot; + int snapshot_last_size; } ShuffleCache; inline int32_t rnd_next(ShuffleCache *shf) { @@ -66,16 +80,19 @@ static void update_batch(ShuffleCache *cache) Assert(cache->next < cache->cached); ereport(DEBUG1, (errmodule(MOD_DB4AI), errmsg("GD shuffle cache iteration %d train batch %d of %d", - cache->iteration, cache->cache_batch[cache->next] + 1, cache->num_batches))); + cache->iteration, cache->cache_batch[cache->next] + 1, cache->shf.num_batches))); Matrix *features = cache->cache_features[cache->next]; - Matrix *dep_var = cache->cache_dep_var[cache->next]; + Matrix *dep_var = nullptr; + if (cache->shf.supervised) + dep_var = cache->cache_dep_var[cache->next]; cache->shf.optimizer->update_batch(cache->shf.optimizer, features, dep_var); if (features->rows < cache->batch_size) { matrix_resize(features, cache->batch_size, cache->n_features); - matrix_resize(dep_var, cache->batch_size, 1); + if (dep_var != nullptr) + matrix_resize(dep_var, cache->batch_size, 1); } } @@ -85,19 +102,57 @@ static void swap_last(ShuffleCache *cache) cache->cache_features[cache->next] = cache->cache_features[cache->cached]; cache->cache_features[cache->cached] = features; - Matrix *dep_var = cache->cache_dep_var[cache->next]; - cache->cache_dep_var[cache->next] = cache->cache_dep_var[cache->cached]; - cache->cache_dep_var[cache->cached] = dep_var; + if (cache->shf.supervised) { + Matrix *dep_var = cache->cache_dep_var[cache->next]; + cache->cache_dep_var[cache->next] = cache->cache_dep_var[cache->cached]; + cache->cache_dep_var[cache->cached] = dep_var; + } cache->cache_batch[cache->next] = cache->cache_batch[cache->cached]; } +static void cache_release_snapshot(ShuffleCache *cache) +{ + if (cache->snapshot != nullptr) { + ListCell *lc; + foreach (lc, cache->snapshot) { + Matrix **matrices = lfirst_node(Matrix *, lc); + matrix_release(matrices[0]); + pfree(matrices[0]); + if (cache->shf.supervised) { + matrix_release(matrices[1]); + pfree(matrices[1]); + } + pfree(matrices); + } + list_free(cache->snapshot); + cache->snapshot = nullptr; + } +} + static void cache_start_iteration(ShuffleGD *shuffle) { ShuffleCache *cache = (ShuffleCache *)shuffle; cache->next = -1; cache->cached = 0; - cache->num_batches = 0; + cache->shf.num_batches = 0; + if (cache->snapshot != nullptr) { + // move the snapshot to the cache + Assert(cache->cache_allocated == 0); + ListCell *lc; + Matrix **matrices = nullptr; + foreach (lc, cache->snapshot) { + Assert(cache->cached < cache->cache_size); + matrices = lfirst_node(Matrix *, lc); + cache->cache_features[cache->cached] = matrices[0]; + if (cache->shf.supervised) + cache->cache_dep_var[cache->cached] = matrices[1]; + cache->cached++; + } + matrix_resize(matrices[0], cache->snapshot_last_size, cache->n_features); + if (cache->shf.supervised) + matrix_resize(matrices[1], cache->snapshot_last_size, 1); + } } static void cache_end_iteration(ShuffleGD *shuffle) @@ -105,8 +160,8 @@ static void cache_end_iteration(ShuffleGD *shuffle) ShuffleCache *cache = (ShuffleCache *)shuffle; if (cache->iteration == 0) { cache->max_cache_usage = cache->cache_size; - if (cache->max_cache_usage > cache->num_batches) - cache->max_cache_usage = cache->num_batches; + if (cache->max_cache_usage > cache->shf.num_batches) + cache->max_cache_usage = cache->shf.num_batches; } else { // empty the cache while (cache->cached > 0) { @@ -128,12 +183,18 @@ static void cache_release(ShuffleGD *shuffle) matrix_release(cache->cache_features[c]); pfree(cache->cache_features[c]); - matrix_release(cache->cache_dep_var[c]); - pfree(cache->cache_dep_var[c]); + if (cache->shf.supervised) { + matrix_release(cache->cache_dep_var[c]); + pfree(cache->cache_dep_var[c]); + } } + + if (cache->shf.supervised) + pfree(cache->cache_dep_var); + pfree(cache->cache_features); - pfree(cache->cache_dep_var); pfree(cache->cache_batch); + cache_release_snapshot(cache); pfree(cache); } @@ -143,7 +204,7 @@ static Matrix *cache_get(ShuffleGD *shuffle, Matrix **pdep_var) Assert(cache->next == -1); Matrix *features; - Matrix *dep_var; + Matrix *dep_var = nullptr; if (cache->iteration == 0) { // special case, do not shuffle Assert(cache->cached == 0); @@ -152,47 +213,65 @@ static Matrix *cache_get(ShuffleGD *shuffle, Matrix **pdep_var) if (cache->cache_allocated == 0) { features = (Matrix *)palloc0(sizeof(Matrix)); matrix_init(features, cache->batch_size, cache->n_features); - - dep_var = (Matrix *)palloc0(sizeof(Matrix)); - matrix_init(dep_var, cache->batch_size); - cache->cache_features[0] = features; - cache->cache_dep_var[0] = dep_var; + + if (cache->shf.supervised) { + dep_var = (Matrix *)palloc0(sizeof(Matrix)); + matrix_init(dep_var, cache->batch_size); + cache->cache_dep_var[0] = dep_var; + } + cache->cache_allocated++; } else { // reuse the batch, it has been already features = cache->cache_features[0]; - dep_var = cache->cache_dep_var[0]; + if (cache->shf.supervised) + dep_var = cache->cache_dep_var[0]; } } else { - // look for an empty slot, otherwise reuse one - cache->next = rnd_next(cache) % cache->max_cache_usage; - if (cache->next < cache->cached) { - // reuse slot - update_batch(cache); + if (cache->snapshot != nullptr) { + // check if there are more chunks to process + if (cache->cached == 0) + return nullptr; + + cache->next = rnd_next(cache) % cache->cached; features = cache->cache_features[cache->next]; - dep_var = cache->cache_dep_var[cache->next]; - } else { - // append - cache->next = cache->cached++; - if (cache->next == cache->cache_allocated) { - features = (Matrix *)palloc0(sizeof(Matrix)); - matrix_init(features, cache->batch_size, cache->n_features); - - dep_var = (Matrix *)palloc0(sizeof(Matrix)); - matrix_init(dep_var, cache->batch_size); - - cache->cache_features[cache->next] = features; - cache->cache_dep_var[cache->next] = dep_var; - cache->cache_allocated++; - } else { - features = cache->cache_features[cache->next]; + if (cache->shf.supervised) dep_var = cache->cache_dep_var[cache->next]; + } else { + // look for an empty slot, otherwise reuse one + cache->next = rnd_next(cache) % cache->max_cache_usage; + if (cache->next < cache->cached) { + // reuse slot + update_batch(cache); + features = cache->cache_features[cache->next]; + if (cache->shf.supervised) + dep_var = cache->cache_dep_var[cache->next]; + } else { + // append + cache->next = cache->cached++; + if (cache->next == cache->cache_allocated) { + features = (Matrix *)palloc0(sizeof(Matrix)); + matrix_init(features, cache->batch_size, cache->n_features); + cache->cache_features[cache->next] = features; + + if (cache->shf.supervised) { + dep_var = (Matrix *)palloc0(sizeof(Matrix)); + matrix_init(dep_var, cache->batch_size); + cache->cache_dep_var[cache->next] = dep_var; + } + + cache->cache_allocated++; + } else { + features = cache->cache_features[cache->next]; + if (cache->shf.supervised) + dep_var = cache->cache_dep_var[cache->next]; + } } } } - cache->cache_batch[cache->next] = cache->num_batches; - cache->num_batches++; + cache->cache_batch[cache->next] = cache->shf.num_batches; + cache->shf.num_batches++; *pdep_var = dep_var; return features; @@ -217,61 +296,123 @@ static void cache_unget(ShuffleGD *shuffle, int tuples) swap_last(cache); } } - cache->num_batches--; + + if (cache->snapshot != nullptr) { + Assert(cache->cache_allocated == 1); + + // there is a valid snapshot, we must clean the unused arrays + matrix_release(cache->cache_features[0]); + pfree(cache->cache_features[0]); + + if (cache->shf.supervised) { + matrix_release(cache->cache_dep_var[0]); + pfree(cache->cache_dep_var[0]); + } + + cache->cache_allocated--; + } } else { if (tuples < cache->batch_size) { // resize batch temporarily Matrix *features = cache->cache_features[cache->next]; - Matrix *dep_var = cache->cache_dep_var[cache->next]; matrix_resize(features, tuples, cache->n_features); - matrix_resize(dep_var, tuples, 1); + + if (cache->shf.supervised) { + Matrix *dep_var = cache->cache_dep_var[cache->next]; + matrix_resize(dep_var, tuples, 1); + } } if (cache->iteration == 0) { Assert(cache->next == 0); update_batch(cache); cache->cached--; + + if (cache->shf.num_batches < cache->cache_size) { + if (cache->shf.num_batches == 1 || cache->snapshot != nullptr) { + // accumulate into snapshot + Assert(cache->cache_allocated == 1); + int nmatrices = cache->shf.supervised ? 2 : 1; + Matrix **matrices = (Matrix **) palloc(nmatrices * sizeof(Matrix *)); + matrices[0] = cache->cache_features[0]; + if (cache->shf.supervised) + matrices[1] = cache->cache_dep_var[0]; + cache->cache_allocated--; + cache->snapshot = lappend(cache->snapshot, matrices); + cache->snapshot_last_size = tuples; + } + } else { + if (cache->snapshot != nullptr) { + // cannot make a snapshot, there is not enough memory + cache_release_snapshot(cache); + } + } + } else { + if (cache->snapshot != nullptr) { + update_batch(cache); + cache->cached--; + if (cache->next < cache->cached) { + // it is in the middle, swap it with the last + swap_last(cache); + } + } } } cache->next = -1; } -ShuffleGD *gd_init_shuffle_cache(const GradientDescentState *gd_state) +static bool cache_has_snapshot(ShuffleGD *shuffle) { - int batch_size = gd_get_node(gd_state)->batch_size; + ShuffleCache *cache = (ShuffleCache *)shuffle; + return cache->snapshot != nullptr; +} + +ShuffleGD *gd_init_shuffle_cache(const GradientDescentState *gd_state, HyperparametersGD *hyperp) +{ + GradientDescent *gd_algo = (GradientDescent*)gd_state->tms.algorithm; + + bool supervised = gd_is_supervised(gd_algo); + int batch_size = hyperp->batch_size; // check if a batch fits into memory int64_t avail_mem = u_sess->attr.attr_memory.work_mem * 1024LL - matrix_expected_size(gd_state->n_features) * 2; // weights & gradients - int batch_mem = matrix_expected_size(batch_size, gd_state->n_features) // features - + matrix_expected_size(gd_state->n_features); // dep var + int batch_mem = matrix_expected_size(batch_size, gd_state->n_features); // features + if (supervised) + batch_mem += matrix_expected_size(gd_state->n_features); // dep var + if (batch_mem > avail_mem) ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("batch size is too large for the available working memory"))); // initialize ShuffleCache *cache = (ShuffleCache *)palloc0(sizeof(ShuffleCache)); - srand48_r(gd_get_node(gd_state)->seed, &cache->rnd); + cache->snapshot = nullptr; + srand48_r(hyperp->seed, &cache->rnd); + cache->shf.supervised = supervised; cache->shf.start_iteration = cache_start_iteration; cache->shf.end_iteration = cache_end_iteration; cache->shf.release = cache_release; cache->shf.get = cache_get; cache->shf.unget = cache_unget; + cache->shf.has_snapshot = cache_has_snapshot; // cache for shuffle - cache->cache_size = avail_mem / (batch_mem + 2 * sizeof(Matrix *) + sizeof(int)); + int batch_matrices = (supervised ? 2 : 1); + cache->cache_size = avail_mem / (batch_mem + batch_matrices * sizeof(Matrix *) + sizeof(int)); if (cache->cache_size == 0) cache->cache_size = 1; // shuffle is not possible - ereport(NOTICE, (errmodule(MOD_DB4AI), errmsg("GD shuffle cache size %d", cache->cache_size))); + ereport(DEBUG1, (errmodule(MOD_DB4AI), errmsg("GD shuffle cache size %d", cache->cache_size))); cache->batch_size = batch_size; cache->n_features = gd_state->n_features; cache->cache_batch = (int *)palloc(cache->cache_size * sizeof(int)); cache->cache_features = (Matrix **)palloc(cache->cache_size * sizeof(Matrix *)); - cache->cache_dep_var = (Matrix **)palloc(cache->cache_size * sizeof(Matrix *)); + if (supervised) + cache->cache_dep_var = (Matrix **)palloc(cache->cache_size * sizeof(Matrix *)); cache->cache_allocated = 0; cache->max_cache_usage = 0; cache->iteration = 0; diff --git a/src/gausskernel/dbmind/db4ai/executor/gd/svm.cpp b/src/gausskernel/dbmind/db4ai/executor/gd/svm.cpp index 781cf0c15..cbf7014be 100644 --- a/src/gausskernel/dbmind/db4ai/executor/gd/svm.cpp +++ b/src/gausskernel/dbmind/db4ai/executor/gd/svm.cpp @@ -22,41 +22,43 @@ */ #include "db4ai/gd.h" +#include "db4ai/kernel.h" -static void svmc_gradients(const GradientDescent *gd_node, const Matrix *features, const Matrix *dep_var, - Matrix *weights, Matrix *gradients) +static void svmc_gradients(GradientsConfig *cfg) { - Assert(features->rows > 0); + Assert(cfg->features->rows > 0); + + GradientsConfigGD *cfg_gd = (GradientsConfigGD *)cfg; // distances = 1 - y * (x * w) Matrix distance; - matrix_init(&distance, features->rows); - matrix_mult_vector(features, weights, &distance); - matrix_mult_entrywise(&distance, dep_var); + matrix_init(&distance, cfg->features->rows); + matrix_mult_vector(cfg->features, cfg->weights, &distance); + matrix_mult_entrywise(&distance, cfg_gd->dep_var); matrix_complement(&distance); - Assert(distance.rows == dep_var->rows); + Assert(distance.rows == cfg_gd->dep_var->rows); Assert(distance.columns == 1); - const gd_float *pf = features->data; - const gd_float *py = dep_var->data; - const gd_float *pd = distance.data; - gd_float *pg = gradients->data; + const float8 *pf = cfg->features->data; + const float8 *py = cfg_gd->dep_var->data; + const float8 *pd = distance.data; + float8 *pg = cfg->gradients->data; for (int r = 0; r < distance.rows; r++) { - gd_float y = *py++; - gd_float d = *pd++; + float8 y = *py++; + float8 d = *pd++; if (d > 0) { - for (int f = 0; f < features->columns; f++) + for (int f = 0; f < cfg->features->columns; f++) pg[f] -= y * pf[f]; } - pf += features->columns; + pf += cfg->features->columns; } matrix_release(&distance); - matrix_mult_scalar(gradients, gd_node->lambda * 2.0); + matrix_mult_scalar(cfg->gradients, cfg->hyperp->lambda * 2.0); } -static double svmc_test(const GradientDescent *gd_node, const Matrix *features, const Matrix *dep_var, +static double svmc_test(const GradientDescentState* gd_state, const Matrix *features, const Matrix *dep_var, const Matrix *weights, Scores *scores) { Assert(features->rows > 0); @@ -67,7 +69,6 @@ static double svmc_test(const GradientDescent *gd_node, const Matrix *features, matrix_mult_vector(features, weights, &distances); matrix_init_clone(&predictions, &distances); - matrix_positive(&predictions); matrix_binary(&predictions, FLT_MIN, -1.0, 1.0); matrix_relevance(&predictions, dep_var, scores, 1.0); matrix_release(&predictions); @@ -77,25 +78,118 @@ static double svmc_test(const GradientDescent *gd_node, const Matrix *features, matrix_complement(&distances); matrix_positive(&distances); - gd_float tuple_loss = matrix_get_sum(&distances) / features->rows; + float8 tuple_loss = matrix_get_sum(&distances) / features->rows; matrix_release(&distances); return tuple_loss; } -static gd_float svmc_predict(const Matrix *features, const Matrix *weights) +static Datum svmc_predict(const Matrix *features, const Matrix *weights, Oid return_type, void *extra_data, + bool max_binary, bool *categorize) { double r = matrix_dot(features, weights); - return r < 0 ? -1.0 : 1.0; + if (!max_binary) + r = (r <= 0 ? -1.0 : 1.0); + *categorize = true; + return Float4GetDatum(r); } -GradientDescentAlgorithm gd_svm_classification = { - "svm-classification", - GD_DEPENDENT_VAR_BINARY, - METRIC_ACCURACY | METRIC_F1 | METRIC_PRECISION | METRIC_RECALL | METRIC_LOSS, +/////////////////////////////////////////////////////////////////////////////////////////////// + +const char* svm_kernel_str[SVM_NUM_KERNELS] = { + "linear", + "gaussian", + "polynomial" +}; + +const char *svm_kernel_getter(void *pkernel) +{ + KernelSVM kernel = *static_cast(pkernel); + if (kernel < SVM_NUM_KERNELS) + return svm_kernel_str[kernel]; + + ereport(ERROR, + (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Invalid kernel %d", kernel))); + return NULL; +} + +void svm_kernel_setter(const char *str, void *kernel) +{ + for (int c = 0; c < SVM_NUM_KERNELS; c++) { + if (strcmp(str, svm_kernel_str[c]) == 0) { + *static_cast(kernel) = (KernelSVM)c; + return; + } + } + + ereport(ERROR, + (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Invalid kernel '%s'", str))); +} + +static HyperparameterDefinition svmc_hyperparameter_definitions[] = { + GD_HYPERPARAMETERS_SVM_CLASSIFICATION +}; + +static const HyperparameterDefinition* gd_get_hyperparameters_svmc(AlgorithmAPI *self, int *definitions_size) +{ + Assert(definitions_size != nullptr); + *definitions_size = sizeof(svmc_hyperparameter_definitions) / sizeof(HyperparameterDefinition); + return svmc_hyperparameter_definitions; +} + +KernelTransformer* svmc_init_kernel(int features, HyperparametersGD *hyperp) +{ + KernelTransformer *kernel = nullptr; + + if (hyperp->kernel != SVM_KERNEL_LINEAR) { + int components = hyperp->components; + if (components == 0) + components = Max(128, 2 * features); + + if (hyperp->kernel == SVM_KERNEL_GAUSSIAN) { + KernelGaussian *kernel_g = (KernelGaussian *) palloc0(sizeof(KernelGaussian)); + kernel_init_gaussian(kernel_g, features, components, hyperp->gamma, hyperp->seed); + kernel = &kernel_g->km; + } else { + Assert(hyperp->kernel == SVM_KERNEL_POLYNOMIAL); + KernelPolynomial *kernel_p = (KernelPolynomial *) palloc0(sizeof(KernelPolynomial)); + kernel_init_polynomial(kernel_p, features, components, hyperp->degree, hyperp->coef0, hyperp->seed); + kernel = &kernel_p->km; + } + } + + return kernel; +} + +GradientDescent gd_svm_classification = { + { + SVM_CLASSIFICATION, + GD_SVM_CLASSIFICATION_NAME, + ALGORITHM_ML_DEFAULT | ALGORITHM_ML_RESCANS_DATA, + gd_metrics_accuracy, + gd_get_hyperparameters_svmc, + gd_make_hyperparameters, + gd_update_hyperparameters, + gd_create, + gd_run, + gd_end, + gd_predict_prepare, + gd_predict, + gd_explain + }, + true, + 0, // default return type + 0.0, // default feature -1.0, 1.0, + nullptr, + gd_init_optimizer, + svmc_init_kernel, + nullptr, + nullptr, svmc_gradients, svmc_test, svmc_predict, + nullptr, }; + diff --git a/src/gausskernel/dbmind/db4ai/executor/hyperparameter_validation.cpp b/src/gausskernel/dbmind/db4ai/executor/hyperparameter_validation.cpp index 598b2724e..e91ac8990 100644 --- a/src/gausskernel/dbmind/db4ai/executor/hyperparameter_validation.cpp +++ b/src/gausskernel/dbmind/db4ai/executor/hyperparameter_validation.cpp @@ -13,7 +13,7 @@ * See the Mulan PSL v2 for more details. * --------------------------------------------------------------------------------------- * - * + * hyperparameter_validation.cpp * * IDENTIFICATION * src/gausskernel/dbmind/db4ai/executor/hyperparameter_validation.cpp @@ -23,277 +23,89 @@ #include "db4ai/hyperparameter_validation.h" +#include "db4ai/aifuncs.h" +#include "instruments/generate_report.h" #include "nodes/plannodes.h" +#include "db4ai/db4ai_api.h" -#define ARRAY_LENGTH(x) sizeof(x) / sizeof((x)[0]) +/////////////////////////////////////////////////////////////////////////////// -static void init_hyperparameter_validation(HyperparameterValidation *v, void *min_value, bool min_inclusive, - void *max_value, bool max_inclusive, const char **valid_values, int32_t valid_values_size) +#define ARCHITECTURE_CONFIGURATION(name, hyperparameters, metrics, is_supervised) \ + { \ + name, hyperparameters, ARRAY_LENGTH(hyperparameters), metrics, ARRAY_LENGTH(metrics), \ + is_supervised \ + } + + +const HyperparameterDefinition* get_hyperparameter_definitions(AlgorithmML algorithm, int32_t *result_size) { - v->min_value = min_value; - v->min_inclusive = min_inclusive; - v->max_value = max_value; - v->max_inclusive = max_inclusive; - v->valid_values = valid_values; - v->valid_values_size = valid_values_size; + AlgorithmAPI* api = get_algorithm_api(algorithm); + return api->get_hyperparameters_definitions(api, result_size); } -// Definitions of hyperparameters -#define HYPERPARAMETER_BOOL(name, default_value, struct_name, attribute) \ - { \ - name, BoolGetDatum(default_value), PointerGetDatum(NULL), PointerGetDatum(NULL), NULL, NULL, BOOLOID, 0, \ - offsetof(struct_name, attribute), false, false \ - } - - -#define HYPERPARAMETER_ENUM(name, default_value, enum_values, enum_values_size, enum_setter, struct_name, attribute) \ - { \ - name, CStringGetDatum(default_value), PointerGetDatum(NULL), PointerGetDatum(NULL), enum_values, enum_setter, \ - ANYENUMOID, enum_values_size, offsetof(struct_name, attribute), false, false \ - } - - -#define HYPERPARAMETER_INT4(name, default_value, min, min_inclusive, max, max_inclusive, struct_name, attribute) \ - { \ - name, Int32GetDatum(default_value), Int32GetDatum(min), Int32GetDatum(max), NULL, NULL, INT4OID, 0, \ - offsetof(struct_name, attribute), min_inclusive, max_inclusive \ - } - -#define HYPERPARAMETER_FLOAT8(name, default_value, min, min_inclusive, max, max_inclusive, struct_name, attribute) \ - { \ - name, Float8GetDatum(default_value), Float8GetDatum(min), Float8GetDatum(max), NULL, NULL, FLOAT8OID, 0, \ - offsetof(struct_name, attribute), min_inclusive, max_inclusive \ - } - -const char* gd_optimizer_ml[] = {"gd", "ngd"}; -// Used by linear regression and logistic regression -HyperparameterDefinition logistic_regression_hyperparameter_definitions[] = { - HYPERPARAMETER_INT4("batch_size", 1000, 1, true, INT32_MAX, true, - GradientDescent, batch_size), - HYPERPARAMETER_FLOAT8("decay", 0.95, 0.0, false, DBL_MAX, true, - GradientDescent, decay), - HYPERPARAMETER_FLOAT8("learning_rate", 0.8, 0.0, false, DBL_MAX, true, - GradientDescent, learning_rate), - HYPERPARAMETER_INT4("max_iterations", 100, 1, true, INT32_MAX, true, - GradientDescent, max_iterations), - HYPERPARAMETER_INT4("max_seconds", 0, 0, true, INT32_MAX, true, - GradientDescent, max_seconds), - HYPERPARAMETER_ENUM("optimizer", "gd", gd_optimizer_ml, ARRAY_LENGTH(gd_optimizer_ml), optimizer_ml_setter, - GradientDescent, optimizer), - HYPERPARAMETER_FLOAT8("tolerance", 0.0005, 0.0, false, DBL_MAX, true, - GradientDescent, tolerance), - HYPERPARAMETER_INT4("seed", 0, 0, true, INT32_MAX, true, - GradientDescent, seed), - HYPERPARAMETER_BOOL("verbose", false, - GradientDescent, verbose), - }; - -HyperparameterDefinition svm_hyperparameter_definitions[] = { - HYPERPARAMETER_INT4("batch_size", 1000, 1, true, INT32_MAX, true, - GradientDescent, batch_size), - HYPERPARAMETER_FLOAT8("decay", 0.95, 0.0, false, DBL_MAX, true, - GradientDescent, decay), - HYPERPARAMETER_FLOAT8("lambda", 0.01, 0.0, false, DBL_MAX, true, - GradientDescent, lambda), - HYPERPARAMETER_FLOAT8("learning_rate", 0.8, 0.0, false, DBL_MAX, true, - GradientDescent, learning_rate), - HYPERPARAMETER_INT4("max_iterations", 100, 1, true, INT32_MAX, true, - GradientDescent, max_iterations), - HYPERPARAMETER_INT4("max_seconds", 0, 0, true, INT32_MAX, true, - GradientDescent, max_seconds), - HYPERPARAMETER_ENUM("optimizer", "gd", gd_optimizer_ml, ARRAY_LENGTH(gd_optimizer_ml), optimizer_ml_setter, - GradientDescent, optimizer), - HYPERPARAMETER_FLOAT8("tolerance", 0.0005, 0.0, false, DBL_MAX, true, - GradientDescent, tolerance), - HYPERPARAMETER_INT4("seed", 0, 0, true, INT32_MAX, true, - GradientDescent, seed), - HYPERPARAMETER_BOOL("verbose", false, - GradientDescent, verbose), - }; - -HyperparameterDefinition kmeans_hyperparameter_definitions[] = { - /* nothing to do now, will do when needing */ -}; - - -void get_hyperparameter_definitions(AlgorithmML algorithm, HyperparameterDefinition **result, int32_t *result_size) +AlgorithmConfiguration *get_algorithm_configuration(AlgorithmML algorithm) { switch (algorithm) { - case LOGISTIC_REGRESSION: - case LINEAR_REGRESSION: - *result = logistic_regression_hyperparameter_definitions; - *result_size = ARRAY_LENGTH(logistic_regression_hyperparameter_definitions); - break; - - case SVM_CLASSIFICATION: - *result = svm_hyperparameter_definitions; - *result_size = ARRAY_LENGTH(svm_hyperparameter_definitions); - break; - - case KMEANS: - *result = kmeans_hyperparameter_definitions; - *result_size = ARRAY_LENGTH(kmeans_hyperparameter_definitions); - break; - case INVALID_ALGORITHM_ML: default: - char *s = "logistic_regression, svm_classification, linear_regression, kmeans"; - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Architecture is not supported. Supported architectures: %s", s))); + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Algorithm %d has no registered configuration", algorithm))); + } + return NULL; +} + +const HyperparameterDefinition *find_hyperparameter_definition(const HyperparameterDefinition definitions[], + int32_t definitions_size, + const char *hyperparameter_name) +{ + for (int i = 0; i < definitions_size; i++) { + if (0 == strcmp(definitions[i].name, hyperparameter_name)) { + return &definitions[i]; + } + } + return NULL; +} + +// Set the value of a hyperparameter structure +static void set_hyperparameter_datum(Hyperparameter *hyperp, Oid type, Datum value) +{ + if (type == ANYENUMOID) { // Outside of hyperparameter module, treat them as strings + hyperp->type = VARCHAROID; + const char* str = DatumGetPointer(value); + hyperp->value = PointerGetDatum(cstring_to_text(str)); + } else { + hyperp->type = type; + hyperp->value = value; } } - -static void add_model_hyperparameter(Model *model, const char *name, Oid type, Datum value) +static List *add_model_hyperparameter(List *hyperparameters, MemoryContext memcxt, const char *name, Oid type, + Datum value) { + MemoryContext old_context = MemoryContextSwitchTo(memcxt); Hyperparameter *hyperp = (Hyperparameter *)palloc0(sizeof(Hyperparameter)); hyperp->name = pstrdup(name); - hyperp->type = type; - hyperp->value = value; - model->hyperparameters = lappend(model->hyperparameters, hyperp); + set_hyperparameter_datum(hyperp, type, value); + + hyperparameters = lappend(hyperparameters, hyperp); + MemoryContextSwitchTo(old_context); + + return hyperparameters; } -void update_model_hyperparameter(Model *model, const char *name, Oid type, Datum value) { +void update_model_hyperparameter(MemoryContext memcxt, List *hyperparameters, const char *name, Oid type, Datum value) +{ + MemoryContext old_context = MemoryContextSwitchTo(memcxt); ListCell *lc; - foreach(lc, model->hyperparameters) { + foreach(lc, hyperparameters) { Hyperparameter *hyperp = lfirst_node(Hyperparameter, lc); if (strcmp(hyperp->name, name) == 0) { - hyperp->type = type; - hyperp->value = value; - return; + set_hyperparameter_datum(hyperp, type, value); + break; } } - Assert(false); -} - -// Set int hyperparameter -void set_hyperparameter_value(const char *name, int *hyperparameter, Value *value, VariableSetKind kind, - int default_value, Model *model, HyperparameterValidation *validation) -{ - if (kind == VAR_SET_DEFAULT) { - *hyperparameter = default_value; - ereport(NOTICE, (errmsg("Hyperparameter %s takes value DEFAULT (%d)", name, *hyperparameter))); - } else if (kind == VAR_SET_VALUE) { - if (value == NULL) { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Hyperparameter %s cannot take NULL value", name))); - } else if (value->type != T_Integer) { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Hyperparameter %s must be an integer", name))); - } - *hyperparameter = intVal(value); - ereport(NOTICE, (errmsg("Hyperparameter %s takes value %d", name, *hyperparameter))); - } else { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Invalid hyperparameter value for %s ", name))); - } - - add_model_hyperparameter(model, name, INT4OID, Int32GetDatum(*hyperparameter)); - if (validation->min_value != NULL && validation->max_value != NULL) { - bool out_of_range = - (*hyperparameter < *(int *)validation->min_value || *hyperparameter > *(int *)validation->max_value); - if (!validation->min_inclusive && *hyperparameter <= *(int *)validation->min_value) - out_of_range = true; - if (!validation->max_inclusive && *hyperparameter >= *(int *)validation->max_value) - out_of_range = true; - if (out_of_range) { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Hyperparameter %s must be in the range %c%d,%d%c", name, validation->min_inclusive ? '[' : '(', - *(int *)validation->min_value, *(int *)validation->max_value, validation->max_inclusive ? ']' : ')'))); - } - } -} - - -// Set double hyperparameter -void set_hyperparameter_value(const char *name, double *hyperparameter, Value *value, VariableSetKind kind, - double default_value, Model *model, HyperparameterValidation *validation) -{ - if (kind == VAR_SET_DEFAULT) { - *hyperparameter = default_value; - ereport(NOTICE, (errmsg("Hyperparameter %s takes value DEFAULT (%f)", name, *hyperparameter))); - } else if (kind == VAR_SET_VALUE) { - if (value == NULL) { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Hyperparameter %s cannot take NULL value", name))); - } else if (value->type == T_Float) { - *hyperparameter = floatVal(value); - } else if (value->type == T_Integer) { - *hyperparameter = (double)intVal(value); - } else { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Hyperparameter %s must be a floating point number", name))); - } - ereport(NOTICE, (errmsg("Hyperparameter %s takes value %f", name, *hyperparameter))); - } else { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Invalid hyperparameter value for %s ", name))); - } - - add_model_hyperparameter(model, name, FLOAT8OID, Float8GetDatum(*hyperparameter)); - if (validation->min_value != NULL && validation->max_value != NULL) { - bool out_of_range = - (*hyperparameter < *(double *)validation->min_value || *hyperparameter > *(double *)validation->max_value); - if (!validation->min_inclusive && *hyperparameter <= *(double *)validation->min_value) - out_of_range = true; - if (!validation->max_inclusive && *hyperparameter >= *(double *)validation->max_value) - out_of_range = true; - if (out_of_range) { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Hyperparameter %s must be in the range %c%.8g,%.8g%c", name, - validation->min_inclusive ? '[' : '(', *(double *)validation->min_value, - *(double *)validation->max_value, validation->max_inclusive ? ']' : ')'))); - } - } -} - - -// Set string hyperparameter (no const) -void set_hyperparameter_value(const char *name, char **hyperparameter, Value *value, VariableSetKind kind, - char *default_value, Model *model, HyperparameterValidation *validation) -{ - if (kind == VAR_SET_DEFAULT) { - *hyperparameter = (char*)palloc((strlen(default_value) + 1) * sizeof(char)); - errno_t err = strcpy_s(*hyperparameter, strlen(default_value) + 1, default_value); - securec_check(err, "\0", "\0"); - ereport(NOTICE, (errmsg("Hyperparameter %s takes value DEFAULT (%s)", name, *hyperparameter))); - } else if (kind == VAR_SET_VALUE) { - if (value == NULL) { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Hyperparameter %s cannot take NULL value", name))); - } else if (value->type != T_String) { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Hyperparameter %s must be a string", name))); - } - *hyperparameter = strVal(value); - ereport(NOTICE, (errmsg("Hyperparameter %s takes value %s", name, *hyperparameter))); - } else { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Invalid hyperparameter value for %s ", name))); - } - - if (validation->valid_values != NULL) { - bool found = false; - for (int i = 0; i < validation->valid_values_size; i++) { - if (0 == strcmp(validation->valid_values[i], *hyperparameter)) { - found = true; - break; - } - } - if (!found) { - StringInfo str = makeStringInfo(); - for (int i = 0; i < validation->valid_values_size; i++) { - if (i != 0) - appendStringInfoString(str, ", "); - appendStringInfoString(str, (validation->valid_values)[i]); - } - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Invalid hyperparameter value for %s. Valid values are: %s. (default is %s)", name, str->data, - default_value))); - } - } - add_model_hyperparameter(model, name, VARCHAROID, PointerGetDatum(cstring_to_text(*hyperparameter))); + MemoryContextSwitchTo(old_context); } @@ -302,100 +114,459 @@ inline const char *bool_to_str(bool value) return value ? "TRUE" : "FALSE"; } -// Set boolean hyperparameter -void set_hyperparameter_value(const char *name, bool *hyperparameter, Value *value, VariableSetKind kind, - bool default_value, Model *model, HyperparameterValidation *validation) +static void ereport_hyperparameter(int level, const char *name, Datum value, Oid type) { - if (kind == VAR_SET_DEFAULT) { - *hyperparameter = default_value; - ereport(NOTICE, (errmsg("Hyperparameter %s takes value DEFAULT (%s)", name, bool_to_str(*hyperparameter)))); - } else if (kind == VAR_SET_VALUE) { - if (value == NULL) { + switch (type) { + case INT4OID: { + ereport(level, (errmsg("Hyperparameter %s takes value %d", name, DatumGetInt32(value)))); + break; + } + + case INT8OID: { + ereport(level, (errmsg("Hyperparameter %s takes value %ld", name, DatumGetInt64(value)))); + break; + } + + case FLOAT8OID: { + ereport(level, (errmsg("Hyperparameter %s takes value %f", name, DatumGetFloat8(value)))); + break; + } + + case BOOLOID: { + ereport(level, (errmsg("Hyperparameter %s takes value %s", name, bool_to_str(DatumGetBool(value))))); + break; + } + + case CSTRINGOID: { + ereport(level, (errmsg("Hyperparameter %s takes value %s", name, DatumGetCString(value)))); + break; + } + + case VARCHAROID: { + char *str = Datum_to_string(value, type, false); + ereport(level, (errmsg("Hyperparameter %s takes value %s", name, str))); + pfree(str); + break; + } + + case ANYENUMOID: { + ereport(level, (errmsg("Hyperparameter %s takes value %s", name, DatumGetCString(value)))); + break; + } + + default: { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Hyperparameter type %d not yet supported", type))); + } + } +} + + + +// Get hyperparameter in hyperparameter struct to the givne value in the datum. Definition is used for metadata +static Datum get_hyperparameter(const HyperparameterDefinition *definition, void *hyperparameter_struct) +{ + switch (definition->type) { + case INT4OID: { + int32_t *value_addr = (int32_t *)((char *)hyperparameter_struct + definition->offset); + return Int32GetDatum(*value_addr); + break; + } + + case INT8OID: { + int64_t *value_addr = (int64_t *)((char *)hyperparameter_struct + definition->offset); + return Int64GetDatum(*value_addr); + break; + } + + case FLOAT8OID: { + double *value_addr = (double *)((char *)hyperparameter_struct + definition->offset); + return Float8GetDatum(*value_addr); + break; + } + + case BOOLOID: { + bool *value_addr = (bool *)((char *)hyperparameter_struct + definition->offset); + return BoolGetDatum(*value_addr); + break; + } + + case CSTRINGOID: { + char **value_addr = (char **)((char *)hyperparameter_struct + definition->offset); + return CStringGetDatum(*value_addr); + break; + } + + case ANYENUMOID: { + void *value_addr = (void *)((char *)hyperparameter_struct + definition->offset); + return CStringGetDatum(definition->validation.enum_getter(value_addr)); + break; + } + + default: + break; + } + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Hyperparameter type %d not yet supported", definition->type))); + + return PointerGetDatum(NULL); +} + + +// Set hyperparameter in hyperparameter struct to the givne value in the datum. Definition is used for metadata +static void set_hyperparameter(const HyperparameterDefinition *definition, Datum value, void *hyperparameter_struct) +{ + switch (definition->type) { + case INT4OID: { + int32_t *value_addr = (int32_t *)((char *)hyperparameter_struct + definition->offset); + *value_addr = DatumGetInt32(value); + break; + } + + case INT8OID: { + int64_t *value_addr = (int64_t *)((char *)hyperparameter_struct + definition->offset); + *value_addr = DatumGetInt64(value); + break; + } + + case FLOAT8OID: { + double *value_addr = (double *)((char *)hyperparameter_struct + definition->offset); + *value_addr = DatumGetFloat8(value); + break; + } + + case BOOLOID: { + bool *value_addr = (bool *)((char *)hyperparameter_struct + definition->offset); + *value_addr = DatumGetBool(value); + break; + } + + case CSTRINGOID: { + char **value_addr = (char **)((char *)hyperparameter_struct + definition->offset); + *value_addr = DatumGetCString(value); + break; + } + + case ANYENUMOID: { + void *value_addr = (void *)((char *)hyperparameter_struct + definition->offset); + definition->validation.enum_setter(DatumGetCString(value), value_addr); + break; + } + + default: + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Hyperparameter type %d not yet supported", definition->type))); + } +} + +static void validate_hyperparameter_string(const char *name, const char *value, const char *valid_values[], + int32_t valid_values_size) +{ + if (valid_values != NULL) { + bool found = false; + for (int i = 0; i < valid_values_size; i++) { + if (0 == strcmp(valid_values[i], value)) { + found = true; + break; + } + } + if (!found) { + StringInfo str = makeStringInfo(); + for (int i = 0; i < valid_values_size; i++) { + if (i != 0) { + appendStringInfoString(str, ", "); + } + appendStringInfoString(str, (valid_values)[i]); + } ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Hyperparameter %s cannot take NULL value", name))); - } else if (value->type == T_String) { - char *str = strVal(value); - if (strcmp(str, "true") == 0) { - *hyperparameter = true; - } else if (strcmp(str, "false") == 0) { - *hyperparameter = false; + errmsg("Invalid hyperparameter value for %s. Valid values are: %s.", name, str->data))); + } + } +} + +static void validate_hyperparameter(Datum value, Oid type, const HyperparameterValidation *validation, const char *name) +{ + switch (type) { + case INT4OID: { + bool out_of_range = (DatumGetInt32(value) < DatumGetInt32(validation->min_value) || + DatumGetInt32(value) > DatumGetInt32(validation->max_value)); + if (!validation->min_inclusive && DatumGetInt32(value) <= DatumGetInt32(validation->min_value)) + out_of_range = true; + if (!validation->max_inclusive && DatumGetInt32(value) >= DatumGetInt32(validation->max_value)) + out_of_range = true; + if (out_of_range) { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Hyperparameter %s must be in the range %c%d,%d%c", name, + validation->min_inclusive ? '[' : '(', DatumGetInt32(validation->min_value), + DatumGetInt32(validation->max_value), validation->max_inclusive ? ']' : ')'))); + } + break; + } + + case INT8OID: { + bool out_of_range = (DatumGetInt64(value) < DatumGetInt64(validation->min_value) || + DatumGetInt64(value) > DatumGetInt64(validation->max_value)); + if (!validation->min_inclusive && DatumGetInt64(value) <= DatumGetInt64(validation->min_value)) + out_of_range = true; + if (!validation->max_inclusive && DatumGetInt64(value) >= DatumGetInt64(validation->max_value)) + out_of_range = true; + if (out_of_range) { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Hyperparameter %s must be in the range %c%ld,%ld%c", name, + validation->min_inclusive ? '[' : '(', DatumGetInt64(validation->min_value), + DatumGetInt64(validation->max_value), validation->max_inclusive ? ']' : ')'))); + } + break; + } + + case FLOAT8OID: { + bool out_of_range = (DatumGetFloat8(value) < DatumGetFloat8(validation->min_value) || + DatumGetFloat8(value) > DatumGetFloat8(validation->max_value)); + if (!validation->min_inclusive && DatumGetFloat8(value) <= DatumGetFloat8(validation->min_value)) + out_of_range = true; + if (!validation->max_inclusive && DatumGetFloat8(value) >= DatumGetFloat8(validation->max_value)) + out_of_range = true; + if (out_of_range) { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Hyperparameter %s must be in the range %c%.8g,%.8g%c", name, + validation->min_inclusive ? '[' : '(', DatumGetFloat8(validation->min_value), + DatumGetFloat8(validation->max_value), validation->max_inclusive ? ']' : ')'))); + } + break; + } + + case BOOLOID: // No validation + break; + + case CSTRINGOID: + validate_hyperparameter_string(name, DatumGetCString(value), validation->valid_values, + validation->valid_values_size); + break; + + case ANYENUMOID: + validate_hyperparameter_string(name, DatumGetCString(value), validation->valid_values, + validation->valid_values_size); + break; + + default: + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Hyperparameter type %d not yet supported", type))); + } +} + +static Value *extract_value_from_variable_set_stmt(VariableSetStmt *stmt) +{ + if (list_length(stmt->args) > 1) { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Hyperparameter %s cannot be a list", stmt->name))); + } + + Value* value = NULL; + if (stmt->args != NULL) { + A_Const* aconst = NULL; + aconst = linitial_node(A_Const, stmt->args); + value = &aconst->val; + } + return value; +} + +static Datum value_to_datum(Value *value, Oid expected_type, const char *name) +{ + Datum result = (Datum)0; + if (value == NULL) { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Hyperparameter %s cannot take NULL value", name))); + } + + switch (expected_type) { + case INT4OID: { + if (value->type != T_Integer) { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Hyperparameter %s must be an integer", name))); + } + result = Int32GetDatum(intVal(value)); + break; + } + + case INT8OID: { + if (value->type != T_Integer) { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Hyperparameter %s must be an integer", name))); + } + result = Int64GetDatum(intVal(value)); + break; + } + + case FLOAT8OID: { + if (value->type == T_Float) { + result = Float8GetDatum(floatVal(value)); + } else if (value->type == T_Integer) { + result = Float8GetDatum((double)intVal(value)); } else { ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Hyperparameter %s is not a valid string for boolean (i.e. 'true' or 'false')", name))); + errmsg("Hyperparameter %s must be a floating point number", name))); } - } else if (value->type == T_Integer) { - *hyperparameter = (intVal(value) != 0); - } else { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Hyperparameter %s must be a boolean or integer", name))); + break; } - ereport(NOTICE, (errmsg("Hyperparameter %s takes value %s", name, bool_to_str(*hyperparameter)))); - } else { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Invalid hyperparameter value for %s ", name))); + + case BOOLOID: { + if (value->type == T_String) { + char *str = strVal(value); + if (strcmp(str, "true") == 0) { + result = BoolGetDatum(true); + } else if (strcmp(str, "false") == 0) { + result = BoolGetDatum(false); + } else { + ereport( + ERROR, + (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Hyperparameter %s is not a valid string for boolean (i.e. 'true' or 'false')", name))); + } + } else if (value->type == T_Integer) { + result = BoolGetDatum((intVal(value) != 0)); + } else { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Hyperparameter %s must be a boolean or integer", name))); + } + break; + } + + case CSTRINGOID: { + if (value->type != T_String) { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Hyperparameter %s must be a string", name))); + } + result = CStringGetDatum(strVal(value)); + break; + } + + case ANYENUMOID: { + if (value->type != T_String) { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Hyperparameter %s must be a string", name))); + } + result = CStringGetDatum(strVal(value)); + break; + } + + default: + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Hyperparameter type %d not yet supported", expected_type))); } - add_model_hyperparameter(model, name, BOOLOID, BoolGetDatum(*hyperparameter)); + return result; } - -// Set the hyperparameters according to the definitions. In the process, the range and values of -// each parameter is validated -void configure_hyperparameters(AlgorithmML algorithm, List *hyperparameters, Model *model, void *hyperparameter_struct) +Datum extract_datum_from_variable_set_stmt(VariableSetStmt *stmt, const HyperparameterDefinition *definition) { - HyperparameterDefinition *definitions; - int32_t definitions_size; - get_hyperparameter_definitions(algorithm, &definitions, &definitions_size); + Datum selected_value = (Datum)0; + if (stmt->kind == VAR_SET_DEFAULT) { + selected_value = definition->default_value; + } else if (stmt->kind == VAR_SET_VALUE) { + Value* value = extract_value_from_variable_set_stmt(stmt); + selected_value = value_to_datum(value, definition->type, definition->name); + } - HyperparameterValidation validation; - for (int32_t i = 0; i < definitions_size; i++) { - switch (definitions[i].type) { - case INT4OID: { - int *value_addr = (int *)((char *)hyperparameter_struct + definitions[i].offset); - int value_min = DatumGetInt32(definitions[i].min_value); - int value_max = DatumGetInt32(definitions[i].max_value); - init_hyperparameter_validation(&validation, &value_min, definitions[i].min_inclusive, &value_max, - definitions[i].max_inclusive, NULL, 0); - set_hyperparameter(definitions[i].name, value_addr, hyperparameters, - DatumGetInt32(definitions[i].default_value), model, &validation); - break; - } + return selected_value; +} - case FLOAT8OID: { - double *value_addr = (double *)((char *)hyperparameter_struct + definitions[i].offset); - double value_min = DatumGetFloat8(definitions[i].min_value); - double value_max = DatumGetFloat8(definitions[i].max_value); - init_hyperparameter_validation(&validation, &value_min, definitions[i].min_inclusive, &value_max, - definitions[i].max_inclusive, NULL, 0); - set_hyperparameter(definitions[i].name, value_addr, hyperparameters, - DatumGetFloat8(definitions[i].default_value), model, &validation); - break; - } +void configure_hyperparameters_vset(const HyperparameterDefinition definitions[], int32_t definitions_size, + List *hyperparameters, void *configuration) +{ + foreach_cell(it, hyperparameters) { + VariableSetStmt* current = lfirst_node(VariableSetStmt, it); - case BOOLOID: { - bool *value_addr = (bool *)((char *)hyperparameter_struct + definitions[i].offset); - set_hyperparameter(definitions[i].name, value_addr, hyperparameters, - DatumGetBool(definitions[i].default_value), model, NULL); - break; - } + const HyperparameterDefinition *definition = + find_hyperparameter_definition(definitions, definitions_size, current->name); + if (definition != NULL) { + Datum selected_value = (Datum)0; + selected_value = extract_datum_from_variable_set_stmt(current, definition); - case ANYENUMOID: { - void *value_addr = (void *)((char *)hyperparameter_struct + definitions[i].offset); - char *str = NULL; - init_hyperparameter_validation(&validation, NULL, NULL, NULL, NULL, definitions[i].valid_values, - definitions[i].valid_values_size); - set_hyperparameter(definitions[i].name, &str, hyperparameters, - DatumGetCString(definitions[i].default_value), model, &validation); - definitions[i].enum_setter(str, value_addr); - break; - } - - - default: { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Invalid hyperparameter OID %d for hyperparameter %s", definitions[i].type, - definitions[i].name))); - } - } + validate_hyperparameter(selected_value, definition->type, &definition->validation, definition->name); + set_hyperparameter(definition, selected_value, configuration); + } else + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Invalid hyperparameter %s", current->name))); } } + +void configure_hyperparameters_modelw(const HyperparameterDefinition definitions[], int32_t definitions_size, + List *hyperparameters, void *configuration) +{ + foreach_cell(it, hyperparameters) { + Hyperparameter* current = lfirst_node(Hyperparameter, it); + + const HyperparameterDefinition *definition = + find_hyperparameter_definition(definitions, definitions_size, current->name); + if (definition != NULL) { + if (definition->type == ANYENUMOID) { + // special case + if (current->type != VARCHAROID) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Unexpected type for enum hyperparameter %s", current->name))); + + char* str = Datum_to_string(current->value, VARCHAROID, false); + void *value_addr = (void*)((char *)configuration + definition->offset); + definition->validation.enum_setter(str, value_addr); + pfree(str); + } else { + if (definition->type != current->type) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Unexpected type for hyperparameter %s", current->name))); + + set_hyperparameter(definition, current->value, configuration); + } + } else + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Invalid hyperparameter %s", current->name))); + } +} + +void configure_hyperparameters(const HyperparameterDefinition definitions[], int32_t definitions_size, + const Hyperparameter *hyperparameters, int nhyperp, void *configuration) +{ + + const Hyperparameter* current = hyperparameters; + for (int h = 0; h < nhyperp; h++, current++) { + const HyperparameterDefinition *definition = + find_hyperparameter_definition(definitions, definitions_size, current->name); + if (definition != NULL) { + if (current->type != definition->type) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Invalid hyperparameter value for %s", current->name))); + + validate_hyperparameter(current->value, definition->type, &definition->validation, definition->name); + set_hyperparameter(definition, current->value, configuration); + } else + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Invalid hyperparameter %s", current->name))); + } +} + +List *prepare_model_hyperparameters(const HyperparameterDefinition *definitions, int32_t definitions_size, + void *hyperparameter_struct, MemoryContext memcxt) +{ + List *hyperparameters = NULL; + for (int i = 0; i < definitions_size; i++) { + Datum current_value = get_hyperparameter(&definitions[i], hyperparameter_struct); + hyperparameters = + add_model_hyperparameter(hyperparameters, memcxt, definitions[i].name, definitions[i].type, current_value); + } + return hyperparameters; +} + +void init_hyperparameters_with_defaults(const HyperparameterDefinition definitions[], int32_t definitions_size, + void *hyperparameter_struct) +{ + for (int i = 0; i < definitions_size; i++) { + set_hyperparameter(&definitions[i], definitions[i].default_value, hyperparameter_struct); + } +} + +void print_hyperparameters(int level, List *hyperparameters) +{ + foreach_cell(it, hyperparameters) { + Hyperparameter* current = (Hyperparameter*) lfirst(it); + ereport_hyperparameter(level, current->name, current->value, current->type); + } +} \ No newline at end of file diff --git a/src/gausskernel/dbmind/db4ai/executor/kernel.cpp b/src/gausskernel/dbmind/db4ai/executor/kernel.cpp new file mode 100644 index 000000000..56ef4d74f --- /dev/null +++ b/src/gausskernel/dbmind/db4ai/executor/kernel.cpp @@ -0,0 +1,70 @@ +/* +* Copyright (c) 2020 Huawei Technologies Co.,Ltd. +* +* openGauss is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*--------------------------------------------------------------------------------------- +* +* kernel.cpp +* Kernel for transformations to a linear space +* +* IDENTIFICATION +* src/gausskernel/dbmind/db4ai/executor/kernel.cpp +* +* --------------------------------------------------------------------------------------- +*/ + +#include "db4ai/kernel.h" + +static void kernel_gaussian_release(struct KernelTransformer *kernel) +{ + KernelGaussian *kernel_g = (KernelGaussian *)kernel; + matrix_release(&kernel_g->weights); + matrix_release(&kernel_g->offsets); +} + +static void kernel_gaussian_transform(struct KernelTransformer *kernel, const Matrix *input, Matrix *output) +{ + KernelGaussian *kernel_g = (KernelGaussian *)kernel; + matrix_transform_kernel_gaussian(input, &kernel_g->weights, &kernel_g->offsets, output); +} + +void kernel_init_gaussian(KernelGaussian *kernel, int features, int components, double gamma, int seed) +{ + kernel->km.coefficients = components; + kernel->km.release = kernel_gaussian_release; + kernel->km.transform = kernel_gaussian_transform; + matrix_init_kernel_gaussian(features, components, gamma, seed, &kernel->weights, &kernel->offsets); +} + +static void kernel_polynomial_release(struct KernelTransformer *kernel) +{ + KernelPolynomial *kernel_p = (KernelPolynomial *)kernel; + matrix_release(&kernel_p->weights); + matrix_release(&kernel_p->coefs); + pfree(kernel_p->components); +} + +static void kernel_polynomial_transform(struct KernelTransformer *kernel, const Matrix *input, Matrix *output) +{ + KernelPolynomial *kernel_p = (KernelPolynomial *)kernel; + matrix_transform_kernel_polynomial(input, kernel_p->km.coefficients, kernel_p->components, &kernel_p->weights, + &kernel_p->coefs, output); +} + +void kernel_init_polynomial(KernelPolynomial *kernel, int features, int components, int degree, double coef0, int seed) +{ + kernel->km.coefficients = components; + kernel->km.release = kernel_polynomial_release; + kernel->km.transform = kernel_polynomial_transform; + kernel->components = + matrix_init_kernel_polynomial(features, components, degree, coef0, seed, &kernel->weights, &kernel->coefs); +} diff --git a/src/gausskernel/dbmind/db4ai/executor/kmeans/kmeans.cpp b/src/gausskernel/dbmind/db4ai/executor/kmeans/kmeans.cpp index d6f6df8af..170b73c0e 100644 --- a/src/gausskernel/dbmind/db4ai/executor/kmeans/kmeans.cpp +++ b/src/gausskernel/dbmind/db4ai/executor/kmeans/kmeans.cpp @@ -29,18 +29,155 @@ IDENTIFICATION #include "nodes/pg_list.h" #include "postgres_ext.h" +#include "db4ai/kmeans.h" +#include "db4ai/aifuncs.h" #include "db4ai/fp_ops.h" #include "db4ai/distance_functions.h" #include "db4ai/model_warehouse.h" #include "db4ai/predict_by.h" #include "db4ai/db4ai_cpu.h" +#include "db4ai/db4ai_common.h" + + +/* + * parameters that affect k-means (hyper-parameters) + */ +typedef struct HyperparametersKMeans { + ModelHyperparameters mhp; // place-holder + SeedingFunction seeding = KMEANS_RANDOM_SEED; + DistanceFunction distance = KMEANS_L2_SQUARED; + Verbosity verbosity = NO_OUTPUT; + uint32_t num_centroids = 1U; + uint32_t num_iterations = 10U; + uint32_t n_features = 0U; + uint32_t batch_size = 1000U; + uint64_t external_seed = 0ULL; + double tolerance = 0.00001; +} HyperparametersKMeans; + +/* + * optimized for prediction + */ +typedef struct PCentroid { + double objective_function = DBL_MAX; + double avg_distance = DBL_MAX; + double min_distance = DBL_MAX; + double max_distance = DBL_MAX; + double std_dev_distance = DBL_MAX; + uint64_t cluster_size = 0ULL; + uint32_t id = 0U; + double *coordinates = nullptr; +} PCentroid; + +typedef struct ModelKMeansV01 { + /* + * the following fields are put here for convenience + * (currently used for prediction, and explain) + */ + uint32_t actual_num_centroids = 0U; + uint32_t num_actual_iterations = 0U; + uint32_t dimension = 0U; + uint32_t distance_function_id = 0U; + uint32_t coordinates_offset = 0U; + uint32_t padding = 0U; + PCentroid *centroids = nullptr; +} ModelKMeansV01; + +/* + * current state of the k-means algorithm + */ +typedef struct KMeansStateDescription { + Centroid *centroids[2] = {nullptr}; + ArrayType *bbox_min = nullptr; + ArrayType *bbox_max = nullptr; + GSPoint *batch = nullptr; + + double (*distance)(double const *, double const *, uint32_t const dimension) = nullptr; + + double execution_time = 0.; + double seeding_time = 0.; + IncrementalStatistics solution_statistics[2]; + uint64_t num_good_points = 0UL; + uint64_t num_dead_points = 0UL; + uint32_t current_iteration = 0U; + uint32_t current_centroid = 0U; + uint32_t dimension = 0U; + uint32_t num_centroids = 0U; + uint32_t actual_num_iterations = 0U; + uint32_t size_centroids_bytes = 0U; + bool initialized = false; + bool late_initialized = false; +} KMeansStateDescription; + +typedef struct KMeansState { + TrainModelState tms; + KMeansStateDescription description; + bool done = false; +} KMeansState; + +/* + * kmeans|| does at most 10 iterations + */ +uint32_t constexpr NUM_ITERATIONS_KMEANSBB = 10U; + +/* + * internally, the operator works in stages, this enum identifies each one of them + */ +typedef enum : uint32_t { + KMEANS_INIT = 0, + KMEANS_INITIAL_CENTROIDS_RANDOM_SAMPLE, + KMEANS_INITIAL_CENTROIDS_BB_SAMPLE, + KMEANS_INITIAL_CENTROIDS_BB_COMPUTE_COST, + KMEANS_LLOYD +} AlgoStage; + +/* + * this initializes all relevant fields once the dimension of the points (as specified + * by the user or as obtained from the very first tuple read) is known + */ +force_inline static void initialize_fields(KMeansStateDescription *kmeans_state_description, + uint32_t const num_centroids, uint32_t const dimension) +{ + auto datums_tmp = reinterpret_cast(palloc0(sizeof(Datum) * dimension)); + + for (uint32_t c = 0; c < num_centroids; ++c) { + kmeans_state_description->centroids[0][c].id = c + 1; + kmeans_state_description->centroids[1][c].id = c + 1; + /* + * this is an internal array that will eventually hold the coordinates of a centroid + * its representation as a PG Array is legacy. It will be better to represent it as + * an array of double directly (in a manner that can be returned to the client directly) + */ + kmeans_state_description->centroids[0][c].coordinates = + construct_array(datums_tmp, dimension, FLOAT8OID, sizeof(float8), FLOAT8PASSBYVAL, 'd'); + kmeans_state_description->centroids[1][c].coordinates = + construct_array(datums_tmp, dimension, FLOAT8OID, sizeof(float8), FLOAT8PASSBYVAL, 'd'); + } + + /* + * general running time information of the operator + */ + kmeans_state_description->bbox_max = + construct_array(datums_tmp, dimension, FLOAT8OID, sizeof(float8), FLOAT8PASSBYVAL, 'd'); + kmeans_state_description->bbox_min = + construct_array(datums_tmp, dimension, FLOAT8OID, sizeof(float8), FLOAT8PASSBYVAL, 'd'); + kmeans_state_description->num_good_points = 0; + kmeans_state_description->num_dead_points = 0; + kmeans_state_description->current_iteration = 0; + kmeans_state_description->current_centroid = 0; + kmeans_state_description->dimension = dimension; + kmeans_state_description->initialized = true; + kmeans_state_description->size_centroids_bytes = sizeof(float8) * dimension; + + pfree(datums_tmp); +} /* * to verify that the pg array we get complies with what we expect * we discard entries that do not pass the test because we cannot start * much (consistent) with them anyway */ -bool verify_pgarray(ArrayType const * pg_array, int32_t n) +static bool verify_pgarray(ArrayType const *pg_array, int32_t n) { /* * We expect the input to be an n-element array of doubles; verify that. We @@ -48,28 +185,28 @@ bool verify_pgarray(ArrayType const * pg_array, int32_t n) * going to look like a C array of n double values. */ if (unlikely((ARR_NDIM(pg_array) != 1) || (ARR_DIMS(pg_array)[0] != n) || ARR_HASNULL(pg_array) || - (ARR_ELEMTYPE(pg_array) != FLOAT8OID))) + (ARR_ELEMTYPE(pg_array) != FLOAT8OID))) return false; - + return true; } /* * this copies the coordinates found inside a slot onto an array we own */ -bool copy_slot_coordinates_to_array(GSPoint *coordinates, TupleTableSlot const * slot, uint32_t const dimension) +static bool copy_slot_coordinates_to_array(GSPoint *coordinates, ModelTuple const *slot, uint32_t const dimension) { if (unlikely((slot == nullptr) or (coordinates == nullptr))) return false; - + /* * we obtain the coordinates of the current point (function call incurs in detoasting * and thus memory is allocated and we have to free it once we don't need it */ - ArrayType *current_point_pgarray = DatumGetArrayTypePCopy(slot->tts_values[0]); + ArrayType *current_point_pgarray = DatumGetArrayTypePCopy(slot->values[0]); bool const valid_point = verify_pgarray(current_point_pgarray, dimension); - bool release_point = PointerGetDatum(current_point_pgarray) != slot->tts_values[0]; - + bool release_point = PointerGetDatum(current_point_pgarray) != slot->values[0]; + /* * if the point is not valid and it was originally toasted, then we release the copy */ @@ -77,42 +214,28 @@ bool copy_slot_coordinates_to_array(GSPoint *coordinates, TupleTableSlot const * pfree(current_point_pgarray); release_point = false; } - + coordinates->pg_coordinates = current_point_pgarray; coordinates->should_free = release_point; - + return valid_point; } -/* - * this sets the weights of a set of candidates to 1 (every point is the centroid of itself) - */ -void reset_weights(List const * centroids) -{ - ListCell const * current_centroid_cell = centroids ? centroids->head : nullptr; - GSPoint *centroid = nullptr; - - for (; current_centroid_cell != nullptr; current_centroid_cell = lnext(current_centroid_cell)) { - centroid = reinterpret_cast(lfirst(current_centroid_cell)); - centroid->weight = 1U; - } -} - /* * given a set of centroids (as a PG list) and a point, this function compute the distance to the closest * centroid */ -bool closest_centroid(List const * centroids, GSPoint const * point, uint32_t const dimension, double *distance) +static bool closest_centroid(List const *centroids, GSPoint const *point, uint32_t const dimension, double *distance) { - ListCell const * current_centroid_cell = centroids ? centroids->head : nullptr; + ListCell const *current_centroid_cell = centroids ? centroids->head : nullptr; GSPoint *centroid = nullptr; GSPoint *closest_centroid_ptr = nullptr; bool result = false; bool min_distance_changed = false; double local_distance = 0.; auto min_distance = DBL_MAX; - auto const * point_coordinates = reinterpret_cast(ARR_DATA_PTR(point->pg_coordinates)); - + auto const *point_coordinates = reinterpret_cast(ARR_DATA_PTR(point->pg_coordinates)); + for (; current_centroid_cell != nullptr; current_centroid_cell = lnext(current_centroid_cell)) { /* * low temporal locality for a prefetch for read @@ -120,40 +243,94 @@ bool closest_centroid(List const * centroids, GSPoint const * point, uint32_t co prefetch(lnext(current_centroid_cell), 0, 1); centroid = reinterpret_cast(lfirst(current_centroid_cell)); local_distance = l2_squared(point_coordinates, - reinterpret_cast(ARR_DATA_PTR(centroid->pg_coordinates)), dimension); + reinterpret_cast(ARR_DATA_PTR(centroid->pg_coordinates)), + dimension); min_distance_changed = local_distance < min_distance; min_distance = min_distance_changed ? local_distance : min_distance; closest_centroid_ptr = min_distance_changed ? centroid : closest_centroid_ptr; } - + if (closest_centroid_ptr) { result = true; ++closest_centroid_ptr->weight; } - + if (likely(distance != nullptr)) *distance = min_distance; - + return result; } +static bool deal_sample(bool const sample, std::mt19937_64 *prng, GSPoint *batch, uint32_t current_slot, + ModelTuple const *scan_slot, uint32_t const dimension, bool first_candidate, + List *centroid_candidates, double local_sample_probability, AlgoStage stage) +{ + double coin = 0.; + bool result = false; + bool distance_computed = false; + double distance_to_centroid = 0.; + double op_error = 0.; + std::uniform_real_distribution unit_sampler(0., 1.); + /* + * toss the coin to see if we have to + */ + if (sample) { + if (unlikely(prng == nullptr)) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("k-means exec: prng must be set (non-null)"))); + + coin = unit_sampler(*prng); + /* + * random++ and kmeans|| sample with different probabilities + * (the latter is much more complicated) + * + * when sampling we will pay the price of unpacking every single + * tuple (which has to be done for kmeans|| anyway) for code + * simplicity + */ + result = copy_slot_coordinates_to_array(batch + current_slot, scan_slot, dimension); + + if (result && (stage == KMEANS_INITIAL_CENTROIDS_BB_SAMPLE) && !first_candidate) { + distance_computed = closest_centroid(centroid_candidates, batch + current_slot, + dimension, &distance_to_centroid); + twoMult(local_sample_probability, distance_computed ? distance_to_centroid : 1., + &local_sample_probability, &op_error); + local_sample_probability += op_error; + } + + /* + * if the data point is not valid or did not pass the test + * we release the memory it occupies and ask for the next data point + */ + if (!result || coin >= local_sample_probability) { + if (likely(batch[current_slot].should_free)) { + pfree(batch[current_slot].pg_coordinates); + batch[current_slot].pg_coordinates = nullptr; + batch[current_slot].should_free = false; + } + return true; + } + } + return false; +} + /* * given a set of centroids (as a PG list) and a set of points, this function computes * the cost of the set of centroids as well as their weights (number of points assigned * to each centroid) * we assumed that all weights have been reset already */ -bool compute_cost_and_weights(List const * centroids, GSPoint const * points, uint32_t dimension, - uint32_t const num_slots, double *cost) +static bool compute_cost_and_weights(List const *centroids, GSPoint const *points, uint32_t dimension, + uint32_t const num_slots, double *cost) { - GSPoint const * point = nullptr; + GSPoint const *point = nullptr; double local_distance = 0.; double cost_of_batch = 0.; double local_op_error = 0.; double total_op_error = 0.; uint32_t current_slot = 0U; bool const result = current_slot < num_slots; - + // for every point we compute the closest centroid and increase the weight of the corresponding centroid while (current_slot < num_slots) { point = points + current_slot; @@ -167,14 +344,425 @@ bool compute_cost_and_weights(List const * centroids, GSPoint const * points, ui return result; } +/* + * a batch is used only once and thus the data it points to is + * released after it has been used + */ +force_inline static void release_batch(GSPoint *batch, uint32_t const num_slots) +{ + GSPoint *point = nullptr; + for (uint32_t current_slot = 0; current_slot < num_slots; ++current_slot) { + point = batch + current_slot; + if (likely(point->should_free)) { + pfree(point->pg_coordinates); + point->pg_coordinates = nullptr; + point->should_free = false; + } + } +} + +/* + * given the running mean of a centroid and a new point, this adds the new point to the aggregate + * using a sum that provides higher precision (we could provide much higher precision at the cost + * of allocating yet another array to keep correction terms for every dimension + */ +force_inline static void aggregate_point(double *centroid_aggregation, double const *new_point, + uint32_t const dimension) +{ + double local_correction = 0; + for (uint32_t d = 0; d < dimension; ++d) { + twoSum(centroid_aggregation[d], new_point[d], centroid_aggregation + d, &local_correction); + centroid_aggregation[d] += local_correction; + } +} + +/* + * we assume that all slots in the batch are non-null (guaranteed by the upper call) + * also, that the next set of centroids has been reset previous to the very first call + */ +static void update_centroids(KMeansStateDescription *description, GSPoint *slots, uint32_t const num_slots, + uint32_t const idx_current_centroids, uint32_t const idx_next_centroids) +{ + uint32_t const dimension = description->dimension; + uint32_t current_slot = 0U; + uint32_t current_centroid = 0U; + uint32_t const num_centroids = description->num_centroids; + uint32_t closest_centroid = 0U; + GSPoint const *current_point = nullptr; + double const *current_point_coordinates = nullptr; + double *current_centroid_coordinates = nullptr; + double *next_centroid_coordinates = nullptr; + double dist = 0.; + auto min_dist = DBL_MAX; + Centroid *current_centroids = description->centroids[idx_current_centroids]; + Centroid *next_centroids = description->centroids[idx_next_centroids]; + Centroid *closest_centroid_ptr = nullptr; + Centroid *closest_centroid_next_ptr = nullptr; + bool min_dist_change = false; + IncrementalStatistics local_statistics; + + /* + * just in case, but this should not happen as we control the parent call + */ + if (unlikely(num_slots == 0)) + return; + + do { + current_centroid = 0U; + min_dist = DBL_MAX; + /* + * we obtain the coordinates of the current point + */ + current_point = slots + current_slot; + + /* + * this loops obtains the distance of the current point to all centroids and keeps the closest one + */ + while (likely(current_centroid < num_centroids)) { + current_centroid_coordinates = + reinterpret_cast(ARR_DATA_PTR(current_centroids[current_centroid].coordinates)); + current_point_coordinates = reinterpret_cast(ARR_DATA_PTR(current_point->pg_coordinates)); + + dist = description->distance(current_point_coordinates, current_centroid_coordinates, dimension); + min_dist_change = dist < min_dist; + closest_centroid = min_dist_change ? current_centroid : closest_centroid; + min_dist = min_dist_change ? dist : min_dist; + ++current_centroid; + } + + /* + * once the closest centroid has been detected we proceed with the aggregation and update + * of statistics + */ + local_statistics.setTotal(min_dist); + closest_centroid_ptr = current_centroids + closest_centroid; + closest_centroid_next_ptr = next_centroids + closest_centroid; + closest_centroid_ptr->statistics += local_statistics; + /* + * for the next iteration (if there is any) we have to obtain a new centroid, which will be + * the average of the points that we aggregate here + */ + next_centroid_coordinates = reinterpret_cast(ARR_DATA_PTR(closest_centroid_next_ptr->coordinates)); + aggregate_point(next_centroid_coordinates, current_point_coordinates, dimension); + } while (likely(++current_slot < num_slots)); +} + +/* + * updates the minimum bounding box to contain the new given point + */ +force_inline static void update_bbox(double *const bbox_min, double *const bbox_max, double const *point, + uint32_t const dimension) +{ + uint32_t current_dimension = 0; + double min = 0.; + double max = 0.; + double p = 0.; + + while (current_dimension < dimension) { + min = bbox_min[current_dimension]; + max = bbox_max[current_dimension]; + p = point[current_dimension]; + + /* + * we do cmovs instead of ifs (we could spare a comparison from time to time + * by using ifs, but we increase branch misprediction as well. thus we settle + * for the branchless option (more expensive than a hit, but cheaper than a miss) + */ + bbox_min[current_dimension] = p < min ? p : min; + bbox_max[current_dimension] = p > max ? p : max; + + ++current_dimension; + } +} + +static bool init_kmeans(KMeansStateDescription *description, double *bbox_min, double *bbox_max, GSPoint const *batch, + uint32_t const num_slots) +{ + uint32_t const dimension = description->dimension; + uint32_t const size_centroid_bytes = description->size_centroids_bytes; + bool const first_run = description->current_iteration == 0; + uint32_t current_slot = 0; + GSPoint const *current_point = nullptr; + double const *current_point_coordinates = nullptr; + + if (unlikely(num_slots == 0)) + return false; + + /* + * the very first slot of the batch is a bit special. observe that we have got here + * after the point has passed validity checks and thus it is safe to access its information + * directly + */ + current_point = batch + current_slot; + current_point_coordinates = reinterpret_cast(ARR_DATA_PTR(current_point->pg_coordinates)); + + /* + * in the very first run we set the coordinates of the bounding box as the ones + * of the very first point (we improve from there) + */ + if (unlikely(first_run)) { + /* + * no need to memset to zero because in the very first run they are freshly allocated + * with palloc0 + */ + errno_t rc = memcpy_s(bbox_min, size_centroid_bytes, current_point_coordinates, size_centroid_bytes); + securec_check(rc, "\0", "\0"); + rc = memcpy_s(bbox_max, size_centroid_bytes, current_point_coordinates, size_centroid_bytes); + securec_check(rc, "\0", "\0"); + description->current_iteration = 1; + } else { + update_bbox(bbox_min, bbox_max, current_point_coordinates, dimension); + } + + ++description->num_good_points; + + /* + * let's consider the rest of the batch + */ + while (likely(++current_slot < num_slots)) { + current_point = batch + current_slot; + current_point_coordinates = reinterpret_cast(ARR_DATA_PTR(current_point->pg_coordinates)); + + update_bbox(bbox_min, bbox_max, current_point_coordinates, dimension); + + ++description->num_good_points; + } + + /* + * done with the batch + */ + return true; +} + +/* + * this is the work horse of the whole algorithm. in here we do a lot of things depending on the stage + * of the algorithm. this function does a complete (single) pass over the data. the upper layer + * calls this function multiple times depending the stage of the algorithm + */ +static List *one_data_pass(TrainModelState *pstate, KMeansStateDescription *state_description, + uint32_t const batch_size, uint32_t const idx_current_centroids, + uint32_t const idx_next_centroids, bool const sample, double const sample_probability, + AlgoStage stage, List *centroid_candidates, double *cost_centroid_candidates, + std::mt19937_64 *prng) +{ + bool plan_exhausted = false; + bool result = false; + bool first_candidate = centroid_candidates == nullptr; + bool initialized = state_description->initialized; + uint32_t current_slot = 0; + uint32_t dimension = state_description->dimension; + uint32_t const num_centroids = state_description->num_centroids; + uint32_t num_elements_round = 0; + uint32_t slot_number = 0; + uint32_t valid_row = 0; + ModelTuple const *scan_slot = nullptr; + double *bbox_min = nullptr; + double *bbox_max = nullptr; + double local_sample_probability = sample_probability; + double cost_of_batch = 0.; + GSPoint *centroid_candidate = nullptr; + GSPoint *batch = state_description->batch; + + while (!plan_exhausted) { + current_slot = 0; + /* we produce a batch of slots to be passed to the algorithm */ + while (current_slot < batch_size) { + scan_slot = pstate->fetch(pstate->callback_data, &pstate->tuple) + ? &pstate->tuple : nullptr; + ++slot_number; + // every slot will have its own chances + local_sample_probability = sample_probability; + /* + * we get out of the whole thing if we have exhausted the relation or + * we have found our k centroids. + * if we were not able to sample the k centroids, the upper call + * will perform runs until we have done so + */ + if (unlikely(!scan_slot || state_description->current_centroid == num_centroids)) { + plan_exhausted = true; + break; + } + + /* + * we jump over rows with empty coordinates + */ + if (unlikely(scan_slot->isnull[0])) { + continue; + } + + /* + * if we have not initialized the node state because the user didn't provide the dimension, we set it + * here and initialized with what we found. this is very optimistic because if the very first row + * is not "correct", then all rows not matching this dimension will be ignored. + * + * observe that this branch will be executed at most once and after it all fields are allocated + * and "properly" initialized + */ + if (unlikely(stage == KMEANS_INIT && !initialized)) { + ArrayType *first_poing_pgarray = DatumGetArrayTypeP(scan_slot->values[0]); + dimension = ARR_DIMS(first_poing_pgarray)[0]; + initialize_fields(state_description, num_centroids, dimension); + if (unlikely(PointerGetDatum(first_poing_pgarray) != scan_slot->values[0])) + pfree(first_poing_pgarray); + batch = state_description->batch; + initialized = true; + } + + if (deal_sample(sample, prng, batch, current_slot, scan_slot, dimension, first_candidate, + centroid_candidates, local_sample_probability, stage)) + continue; + + if ((stage == KMEANS_INITIAL_CENTROIDS_RANDOM_SAMPLE) || (stage == KMEANS_INITIAL_CENTROIDS_BB_SAMPLE)) { + /* + * we only know the expected number of centroid candidates that we will produce + * but not the exact number. thus we allocate each one of them on demand + * (observe that we cannot use the batch structure because the number of + * candidates we generate can be much larger than the size of a batch) + */ + centroid_candidate = reinterpret_cast(palloc0(sizeof(GSPoint))); + + /* + * observe that the scan_slot was already copied above (when sampling) and thus + * we just move the memory reference from the batch slot to the newly allocated + * GSPoint + */ + *centroid_candidate = batch[current_slot]; + centroid_candidate->distance_to_closest_centroid = DBL_MAX; + centroid_candidate->id = slot_number; + batch[current_slot].id = 0; + batch[current_slot].distance_to_closest_centroid = 0.; + batch[current_slot].weight = 0.; + batch[current_slot].pg_coordinates = nullptr; + batch[current_slot].should_free = false; + + /* + * this stores the reference to the current selected candidate and thus we + * can forget about it + */ + centroid_candidates = lappend(centroid_candidates, centroid_candidate); + + /* + * memory should be allocated in the next iteration + */ + centroid_candidate = nullptr; + + ++num_elements_round; + + /* + * for kmeans|| we produce a single candidate the very first time. + * for random++ we produce a number of candidates in a single pass + * (thus we consume the whole relation since we do not update current_slot) + */ + if (unlikely(first_candidate && (stage == KMEANS_INITIAL_CENTROIDS_BB_SAMPLE))) { + plan_exhausted = true; + break; + } else if (unlikely(first_candidate)) { + first_candidate = false; + } + } else { + /* + * the element's coordinates are copied to be processed + */ + result = copy_slot_coordinates_to_array(batch + current_slot, scan_slot, dimension); + batch[current_slot].id = slot_number; + valid_row = result ? 1U : 0U; + current_slot += valid_row; + state_description->num_dead_points += 1 - valid_row; + } + } + + /* we process the batch + * each stage happens in a batch and thus branch misprediction should not be a problem + * also, except for KMEANS_LLOYD, the other two stages require exactly one data pass + */ + switch (stage) { + case KMEANS_INIT: + /* + * this run is to obtain initial statistics about the data (like the number of valid tuples) + * and the coordinates of the bounding box + */ + bbox_min = reinterpret_cast(state_description->bbox_min); + bbox_max = reinterpret_cast(state_description->bbox_max); + init_kmeans(state_description, bbox_min, bbox_max, batch, current_slot); + + /* + * we are done with the batch and thus we release the allocated memory (corresponding + * to the points of the batch) + */ + release_batch(batch, current_slot); + + break; + case KMEANS_INITIAL_CENTROIDS_RANDOM_SAMPLE: + case KMEANS_INITIAL_CENTROIDS_BB_SAMPLE: + /* + * when sampling for random++ and kmeans|| we do no computations other than the sample + * the upper call will run kmeans++ after the candidates have been sampled + */ + break; + case KMEANS_INITIAL_CENTROIDS_BB_COMPUTE_COST: + /* + * when computing the cost of a solution, we do it in a batched manner as the other + * non-sampling cases + */ + compute_cost_and_weights(centroid_candidates, batch, dimension, current_slot, &cost_of_batch); + + if (unlikely(cost_centroid_candidates == nullptr)) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("k-means exec: cost variable must be non-null"))); + + *cost_centroid_candidates += cost_of_batch; + + /* + * we are done with the batch and thus we release the allocated memory (corresponding + * to the points of the batch) + */ + release_batch(batch, current_slot); + + break; + case KMEANS_LLOYD: + /* + * let's find out which centroid is the closest and aggregate the corresponding statistics + */ + update_centroids(state_description, batch, current_slot, idx_current_centroids, idx_next_centroids); + + /* + * we are done with the batch and thus we release the allocated memory (corresponding + * to the points of the batch) + */ + release_batch(batch, current_slot); + + break; + default: + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("k-means exec: no known algorithm stage"))); + } + } + return centroid_candidates; +} + +/* + * this sets the weights of a set of candidates to 1 (every point is the centroid of itself) + */ +void reset_weights(List const *centroids) +{ + ListCell const *current_centroid_cell = centroids ? centroids->head : nullptr; + GSPoint *centroid = nullptr; + + for (; current_centroid_cell != nullptr; current_centroid_cell = lnext(current_centroid_cell)) { + centroid = reinterpret_cast(lfirst(current_centroid_cell)); + centroid->weight = 1U; + } +} + /* * this runs kmeans++ on a super-set of centroids to obtain the k centroids we want as seeding */ List *kmeanspp(KMeansStateDescription *description, List *centroids_candidates, uint32_t const idx_current_centroids, - uint32_t const size_centroid_bytes, std::mt19937_64 *prng) + std::mt19937_64 *prng) { Centroid *centroids = description->centroids[idx_current_centroids]; uint32_t const num_centroids_needed = description->num_centroids; + uint32_t const size_centroid_bytes = description->size_centroids_bytes; uint32_t num_candidates = centroids_candidates ? centroids_candidates->length : 0; uint32_t const dimension = description->dimension; ListCell *current_candidate_cell = nullptr; @@ -196,16 +784,15 @@ List *kmeanspp(KMeansStateDescription *description, List *centroids_candidates, uint32_t current_centroid_idx = description->current_centroid; uint32_t tries_until_next_centroid = 0; bool no_more_candidates = false; - errno_t rc = EOK; - + /* * we expect to produce all centroids in one go and to be able to produce them because * we have enough candidates */ if ((current_centroid_idx > 0) || (num_centroids_needed == 0)) ereport(ERROR, - (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("k-means: not able to run k-means++"))); + (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("k-means: not able to run k-means++"))); /* * we produce the very first centroid uniformly at random (not weighted) * the rest of the centroids are found by sampling w.r.t. the (weighted) distance to their closest centroid @@ -225,10 +812,10 @@ List *kmeanspp(KMeansStateDescription *description, List *centroids_candidates, for (; current_candidate_cell != nullptr; current_candidate_cell = lnext(current_candidate_cell)) { current_candidate = reinterpret_cast(lfirst(current_candidate_cell)); current_candidate_pgarray = current_candidate->pg_coordinates; - + no_more_candidates = false; candidate_probability = unit_sampler(*prng); - + /* * the very first candidate will be sampled uniformly, the rest will be sampled * w.r.t. the distance to their closest centroid (the farther the more probable) @@ -239,7 +826,7 @@ List *kmeanspp(KMeansStateDescription *description, List *centroids_candidates, twoDiv(distance, sum_distances, &sample_probability, &sample_probability_correction); sample_probability += sample_probability_correction; } - + /* * this is weighted sampling (taking into consideration the weight of the point) */ @@ -247,38 +834,38 @@ List *kmeanspp(KMeansStateDescription *description, List *centroids_candidates, prev_candidate_cell = current_candidate_cell; continue; } - + /* * this candidate becomes a centroid */ current_centroid_pgarray = centroids[current_centroid_idx].coordinates; - + /* * we copy the coordinates of the centroid to the official set of centroids */ if (unlikely((current_centroid_pgarray != nullptr) and (current_candidate_pgarray != nullptr))) { auto point_coordinates_to = reinterpret_cast(ARR_DATA_PTR(current_centroid_pgarray)); auto point_coordinates_from = reinterpret_cast(ARR_DATA_PTR(current_candidate_pgarray)); - rc = memset_s(point_coordinates_to, size_centroid_bytes, 0, size_centroid_bytes); - securec_check(rc, "\0", "\0"); - rc = memcpy_s(point_coordinates_to, - size_centroid_bytes, point_coordinates_from, size_centroid_bytes); + memset_s(point_coordinates_to, size_centroid_bytes, 0, size_centroid_bytes); + errno_t rc = memcpy_s(point_coordinates_to, + size_centroid_bytes, point_coordinates_from, size_centroid_bytes); securec_check(rc, "\0", "\0"); } - + /* * we delete the element that just became centroid from the list of candidates (along all its information) */ centroids_candidates = list_delete_cell(centroids_candidates, current_candidate_cell, prev_candidate_cell); pfree(current_candidate->pg_coordinates); current_candidate_cell = - prev_candidate_cell ? prev_candidate_cell : centroids_candidates ? centroids_candidates->head : nullptr; - + prev_candidate_cell ? prev_candidate_cell : centroids_candidates ? centroids_candidates->head + : nullptr; + /* * we can reset sum_distances because it will be overwritten below anyway */ sum_distances = 0.; - + /* * we update the distance to the closest centroid depending on the presence of the new * centroid @@ -287,20 +874,20 @@ List *kmeanspp(KMeansStateDescription *description, List *centroids_candidates, for (; tmp_cell != nullptr; tmp_cell = lnext(tmp_cell)) { tmp_candidate = reinterpret_cast(lfirst(tmp_cell)); distance = l2_squared(reinterpret_cast(ARR_DATA_PTR(tmp_candidate->pg_coordinates)), - reinterpret_cast(ARR_DATA_PTR(current_centroid_pgarray)), dimension); - + reinterpret_cast(ARR_DATA_PTR(current_centroid_pgarray)), dimension); + /* * medium temporal locality for a prefetch for write */ prefetch(lnext(tmp_cell), 1, 2); - + /* * since we are dealing with weighted points, the overall sum of distances has * to consider the weight of each point */ twoMult(distance, tmp_candidate->weight, &distance, &sum_distances_correction); distance += sum_distances_correction; - + /* * we store the weighted distance at the point to save the multiplication later on * when sampling @@ -308,12 +895,12 @@ List *kmeanspp(KMeansStateDescription *description, List *centroids_candidates, */ if ((current_centroid_idx == 0) || (distance < tmp_candidate->distance_to_closest_centroid)) tmp_candidate->distance_to_closest_centroid = distance; - + /* * every distance appears as many times as the weight of the point */ twoSum(sum_distances, tmp_candidate->distance_to_closest_centroid, &sum_distances, - &sum_distances_local_correction); + &sum_distances_local_correction); sum_distances_correction += sum_distances_local_correction; } /* @@ -321,15 +908,15 @@ List *kmeanspp(KMeansStateDescription *description, List *centroids_candidates, */ if (likely(current_candidate_cell != nullptr)) prefetch(lnext(current_candidate_cell), 0, 3); - + sum_distances += sum_distances_correction; - + ++current_centroid_idx; tries_until_next_centroid = 0; break; } } - + /* * we get rid of the linked list of candidates in case some candidates are left */ @@ -353,9 +940,9 @@ List *kmeanspp(KMeansStateDescription *description, List *centroids_candidates, */ pfree(current_candidate); } - + description->current_centroid = current_centroid_idx; - + return centroids_candidates; } @@ -368,7 +955,7 @@ void compute_cost(KMeansStateDescription *description, uint32_t const idx_curren uint32_t const num_centroids = description->num_centroids; IncrementalStatistics *output_statistics = description->solution_statistics + idx_current_centroids; output_statistics->reset(); - + for (uint32_t c = 0; c < num_centroids; ++c) *output_statistics += centroids[c].statistics; } @@ -376,35 +963,19 @@ void compute_cost(KMeansStateDescription *description, uint32_t const idx_curren /* * every iteration there is a set of centroids (the next one) that is reset */ -void reset_centroids(KMeansStateDescription *description, uint32_t const idx_centroids, - uint32_t const size_centroid_bytes) +void reset_centroids(KMeansStateDescription *description, uint32_t const idx_centroids) { uint32_t const num_centroids = description->num_centroids; + uint32_t const size_centroid_bytes = description->size_centroids_bytes; Centroid *centroids = description->centroids[idx_centroids]; Centroid *centroid = nullptr; double *centroid_coordinates = nullptr; - errno_t rc = EOK; - + for (uint32_t c = 0; c < num_centroids; ++c) { centroid = centroids + c; centroid->statistics.reset(); centroid_coordinates = reinterpret_cast(ARR_DATA_PTR(centroid->coordinates)); - rc = memset_s(centroid_coordinates, size_centroid_bytes, 0, size_centroid_bytes); - securec_check(rc, "\0", "\0"); - } -} - -/* - * given the running mean of a centroid and a new point, this adds the new point to the aggregate - * using a sum that provides higher precision (we could provide much higher precision at the cost - * of allocating yet another array to keep correction terms for every dimension - */ -force_inline void aggregate_point(double *centroid_aggregation, double const * new_point, uint32_t const dimension) -{ - double local_correction = 0; - for (uint32_t d = 0; d < dimension; ++d) { - twoSum(centroid_aggregation[d], new_point[d], centroid_aggregation + d, &local_correction); - centroid_aggregation[d] += local_correction; + memset_s(centroid_coordinates, size_centroid_bytes, 0, size_centroid_bytes); } } @@ -421,178 +992,19 @@ force_inline void finish_centroid(double *centroid_aggregation, uint32_t const d } } -/* - * updates the minimum bounding box to contain the new given point - */ -force_inline void update_bbox(double * const bbox_min, double * const bbox_max, double const * point, - uint32_t const dimension) -{ - uint32_t current_dimension = 0; - double min = 0.; - double max = 0.; - double p = 0.; - - while (current_dimension < dimension) { - min = bbox_min[current_dimension]; - max = bbox_max[current_dimension]; - p = point[current_dimension]; - - /* - * we do cmovs instead of ifs (we could spare a comparison from time to time - * by using ifs, but we increase branch misprediction as well. thus we settle - * for the branchless option (more expensive than a hit, but cheaper than a miss) - */ - bbox_min[current_dimension] = p < min ? p : min; - bbox_max[current_dimension] = p > max ? p : max; - - ++current_dimension; - } -} - -bool init_kmeans(KMeansStateDescription *description, double *bbox_min, double *bbox_max, GSPoint const * batch, - uint32_t const num_slots, uint32_t const size_centroid_bytes) -{ - uint32_t const dimension = description->dimension; - bool const first_run = description->current_iteration == 0; - uint32_t current_slot = 0; - GSPoint const * current_point = nullptr; - double const * current_point_coordinates = nullptr; - - if (unlikely(num_slots == 0)) - return false; - - /* - * the very first slot of the batch is a bit special. observe that we have got here - * after the point has passed validity checks and thus it is safe to access its information - * directly - */ - current_point = batch + current_slot; - current_point_coordinates = reinterpret_cast(ARR_DATA_PTR(current_point->pg_coordinates)); - - /* - * in the very first run we set the coordinates of the bounding box as the ones - * of the very first point (we improve from there) - */ - if (unlikely(first_run)) { - /* - * no need to memset to zero because in the very first run they are freshly allocated - * with palloc0 - */ - errno_t rc = memcpy_s(bbox_min, size_centroid_bytes, current_point_coordinates, size_centroid_bytes); - securec_check(rc, "\0", "\0"); - rc = memcpy_s(bbox_max, size_centroid_bytes, current_point_coordinates, size_centroid_bytes); - securec_check(rc, "\0", "\0"); - description->current_iteration = 1; - } else { - update_bbox(bbox_min, bbox_max, current_point_coordinates, dimension); - } - - ++description->num_good_points; - - /* - * let's consider the rest of the batch - */ - while (likely(++current_slot < num_slots)) { - current_point = batch + current_slot; - current_point_coordinates = reinterpret_cast(ARR_DATA_PTR(current_point->pg_coordinates)); - - update_bbox(bbox_min, bbox_max, current_point_coordinates, dimension); - - ++description->num_good_points; - } - - /* - * done with the batch - */ - return true; -} - -/* - * we assume that all slots in the batch are non-null (guaranteed by the upper call) - * also, that the next set of centroids has been reset previous to the very first call - */ -void update_centroids(KMeansStateDescription *description, GSPoint *slots, uint32_t const num_slots, - uint32_t const idx_current_centroids, uint32_t const idx_next_centroids) -{ - uint32_t const dimension = description->dimension; - uint32_t current_slot = 0U; - uint32_t current_centroid = 0U; - uint32_t const num_centroids = description->num_centroids; - uint32_t closest_centroid = 0U; - GSPoint const * current_point = nullptr; - double const * current_point_coordinates = nullptr; - double *current_centroid_coordinates = nullptr; - double *next_centroid_coordinates = nullptr; - double dist = 0.; - auto min_dist = DBL_MAX; - Centroid *current_centroids = description->centroids[idx_current_centroids]; - Centroid *next_centroids = description->centroids[idx_next_centroids]; - Centroid *closest_centroid_ptr = nullptr; - Centroid *closest_centroid_next_ptr = nullptr; - bool min_dist_change = false; - IncrementalStatistics local_statistics; - - /* - * just in case, but this should not happen as we control the parent call - */ - if (unlikely(num_slots == 0)) - return; - - do { - current_centroid = 0U; - min_dist = DBL_MAX; - /* - * we obtain the coordinates of the current point - */ - current_point = slots + current_slot; - - /* - * this loops obtains the distance of the current point to all centroids and keeps the closest one - */ - while (likely(current_centroid < num_centroids)) { - current_centroid_coordinates = - reinterpret_cast(ARR_DATA_PTR(current_centroids[current_centroid].coordinates)); - current_point_coordinates = reinterpret_cast(ARR_DATA_PTR(current_point->pg_coordinates)); - - dist = description->distance(current_point_coordinates, current_centroid_coordinates, dimension); - min_dist_change = dist < min_dist; - closest_centroid = min_dist_change ? current_centroid : closest_centroid; - min_dist = min_dist_change ? dist : min_dist; - ++current_centroid; - } - - /* - * once the closest centroid has been detected we proceed with the aggregation and update - * of statistics - */ - local_statistics.setTotal(min_dist); - local_statistics.setMin(min_dist); - local_statistics.setMax(min_dist); - local_statistics.setPopulation(1ULL); - closest_centroid_ptr = current_centroids + closest_centroid; - closest_centroid_next_ptr = next_centroids + closest_centroid; - closest_centroid_ptr->statistics += local_statistics; - /* - * for the next iteration (if there is any) we have to obtain a new centroid, which will be - * the average of the points that we aggregate here - */ - next_centroid_coordinates = reinterpret_cast(ARR_DATA_PTR(closest_centroid_next_ptr->coordinates)); - aggregate_point(next_centroid_coordinates, current_point_coordinates, dimension); - } while (likely(++current_slot < num_slots)); -} - void merge_centroids(KMeansStateDescription *description, uint32_t const idx_current_centroids, - uint32_t const idx_next_centroids, uint32_t const size_centroid_bytes) + uint32_t const idx_next_centroids) { uint32_t const num_centroids = description->num_centroids; + uint32_t const size_centroid_bytes = description->size_centroids_bytes; uint32_t const dimension = description->dimension; - Centroid * const current_centroids = description->centroids[idx_current_centroids]; - Centroid * const next_centroids = description->centroids[idx_next_centroids]; + Centroid *const current_centroids = description->centroids[idx_current_centroids]; + Centroid *const next_centroids = description->centroids[idx_next_centroids]; Centroid *current_centroid = nullptr; Centroid *next_centroid = nullptr; double *current_centroid_coordinates = nullptr; double *next_centroid_coordinates = nullptr; - + for (uint32_t c = 0; c < num_centroids; ++c) { next_centroid = next_centroids + c; current_centroid = current_centroids + c; @@ -608,7 +1020,7 @@ void merge_centroids(KMeansStateDescription *description, uint32_t const idx_cur * and since no point was assigned to it, it has remained reset */ error_t rc = memcpy_s(next_centroid_coordinates, size_centroid_bytes, current_centroid_coordinates, - size_centroid_bytes); + size_centroid_bytes); securec_check(rc, "\0", "\0"); } else { finish_centroid(next_centroid_coordinates, dimension, current_centroid->statistics.getPopulation()); @@ -616,43 +1028,769 @@ void merge_centroids(KMeansStateDescription *description, uint32_t const idx_cur } } -bool finish_kmeans() +/* + * new API follows + * functions are defined in the order they are required during execution + */ +HyperparameterDefinition kmeans_hyperparameter_definitions[] = { + HYPERPARAMETER_ENUM("seeding_function", "Random++", kmeans_seeding_str, kmeans_seeding_str_size, + kmeans_seeding_getter, kmeans_seeding_setter, HyperparametersKMeans, seeding, HP_NO_AUTOML()), + HYPERPARAMETER_ENUM("distance_function", "L2_Squared", kmeans_distance_functions_str, + kmeans_distance_functions_str_size, kmeans_distance_function_getter, + kmeans_distance_function_setter, HyperparametersKMeans, distance, HP_NO_AUTOML()), + HYPERPARAMETER_INT4("verbose", 0, 0, true, 2, true, HyperparametersKMeans, verbosity, HP_NO_AUTOML()), + HYPERPARAMETER_INT4("num_centroids", 1, 1, true, 1000000, true, HyperparametersKMeans, num_centroids, + HP_NO_AUTOML()), + HYPERPARAMETER_INT4("max_iterations", 10, 1, true, INT32_MAX, true, HyperparametersKMeans, num_iterations, + HP_NO_AUTOML()), + HYPERPARAMETER_INT4("num_features", 0, 1, true, INT32_MAX, true, HyperparametersKMeans, n_features, HP_NO_AUTOML()), + HYPERPARAMETER_INT4("batch_size", 1000, 1, true, 1000000, true, HyperparametersKMeans, batch_size, HP_NO_AUTOML()), + HYPERPARAMETER_INT4("seed", 0, 0, true, INT32_MAX, true, HyperparametersKMeans, external_seed, + HP_AUTOML_INT(1, INT32_MAX, 1, ProbabilityDistribution::UNIFORM_RANGE)), + HYPERPARAMETER_FLOAT8("tolerance", 0.00001, 0.0, false, 1.0, true, HyperparametersKMeans, tolerance, + HP_NO_AUTOML()), +}; + +force_inline static ModelHyperparameters *kmeans_make_hyperparameters(AlgorithmAPI *self) { - return true; + auto kmeans_hyperp = reinterpret_cast(palloc0(sizeof(HyperparametersKMeans))); + + return &kmeans_hyperp->mhp; } -ModelPredictor kmeans_predict_prepare(Model const * model) +force_inline static HyperparameterDefinition const *kmeans_get_hyperparameters(AlgorithmAPI *self, + int32_t *definitions_size) { - return reinterpret_cast(const_cast(model)); + Assert(definitions_size != nullptr); + *definitions_size = sizeof(kmeans_hyperparameter_definitions) / sizeof(HyperparameterDefinition); + return kmeans_hyperparameter_definitions; } -Datum kmeans_predict(ModelPredictor model, Datum *data, bool *nulls, Oid *types, int32_t nargs) +force_inline static void kmeans_update_hyperparameters(AlgorithmAPI *self, ModelHyperparameters *hyperp) { - auto kmeans_model = reinterpret_cast(model); + auto kmeans_hyperp = reinterpret_cast(hyperp); + + /* if the user-provided seed is 0 we take the current time but reset + * the higher order bits to be able to return this seed to the user + * as an int32_t so that the user can reproduce the run + * (observe that epoch 2^31 is around year 2038 and the shifts are + * until then useless) + */ + if (kmeans_hyperp->external_seed == 0ULL) + kmeans_hyperp->external_seed = (get_time_ms() << 33U) >> 33U; +} +/* + * this function initializes the algorithm + */ +static TrainModelState *kmeans_create(AlgorithmAPI *self, const TrainModel *pnode) +{ + if (pnode->configurations != 1) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("multiple hyper-parameter configurations for k-means are not yet supported"))); + + auto kmeans_hyperp = reinterpret_cast(pnode->hyperparameters[0]); + + uint32_t const num_centroids = kmeans_hyperp->num_centroids; + uint32_t const dimension = kmeans_hyperp->n_features; + uint32_t const batch_size = kmeans_hyperp->batch_size; + + // create state structure + auto kmeans_state = reinterpret_cast(makeNodeWithSize(TrainModelState, + sizeof(KMeansState))); + KMeansStateDescription *kmeans_state_description = &kmeans_state->description; + + kmeans_state->done = false; + + kmeans_state->description.centroids[0] = + reinterpret_cast(palloc0(sizeof(Centroid) * num_centroids)); + kmeans_state->description.centroids[1] = + reinterpret_cast(palloc0(sizeof(Centroid) * num_centroids)); + + /* + * if the dimension was provided by the user we use that one as a reference + * to initialize all fields that depend on it + * if later on the dimension of the tuple disagrees and error is reported + * if the dimension is not provided by the user then it will be set on the very + * first tuple we read (optimistically) + */ + if (dimension > 0) { + initialize_fields(kmeans_state_description, num_centroids, dimension); + kmeans_state_description->late_initialized = false; + } + + /* + * the following fields do not depend on the dimension and thus they are initialized + * here already + */ + kmeans_state_description->num_centroids = num_centroids; + + switch (kmeans_hyperp->distance) { + case KMEANS_L1: + kmeans_state_description->distance = l1; + break; + case KMEANS_L2: + kmeans_state_description->distance = l2; + break; + case KMEANS_LINF: + kmeans_state_description->distance = linf; + break; + case KMEANS_L2_SQUARED: + kmeans_state_description->distance = l2_squared; + break; + default: + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("k-means init: no known distance function: %u", kmeans_hyperp->distance))); + } + + kmeans_state_description->batch = reinterpret_cast(palloc0(sizeof(GSPoint) * batch_size)); + + return &kmeans_state->tms; +} + +force_inline static void output_kmeans_state(Verbosity const verbosity, uint32_t const dimension, + uint64_t const num_good_points, uint64_t const num_dead_points, + double const local_elapsed_time) +{ + if (verbosity > NO_OUTPUT) { + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** Initial statistics gathered:"))); + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** Dimension used for computations: %u", dimension))); + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** Number of valid points: %lu", num_good_points))); + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** Number of dead points: %lu", num_dead_points))); + if (verbosity == VERBOSE_OUTPUT) + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** Duration (s): %0.6lf", local_elapsed_time))); + } +} + +static void kmeans_deal_seeding_function(SeedingFunction seeding_function, uint32_t num_centroids, uint64_t num_points, + double sample_factor, uint32_t num_centroids_orig, Verbosity verbosity, + List **centroid_candidates_kmeans_bb, TrainModelState *pstate, + KMeansStateDescription *state_description, uint32_t const batch_size, + uint32_t idx_current_centroids, + uint32_t idx_next_centroids, std::mt19937_64 *prng) +{ + double sample_probability = 0.; + double oversampling = 0.; + struct timespec start_kmeans_round, finish_kmeans_round; + uint64_t num_candidates = 0ULL; + uint64_t prev_num_candidates = 0ULL; + double cost_kmeans_bb = 0.; + double op_error = 0.; + double local_elapsed_time = 0.; + uint32_t current_iteration_kmeans_bb = 0U; + + switch (seeding_function) { + case KMEANS_BB: + sample_probability = num_centroids >= num_points ? 1.0 : sample_factor / num_points; + + /* + * the number of iterations of kmeans|| depends on the value of the initial solution + * if the solution is bad, we will iterate for longer time + * (one iteration consists of two data passes (one to sample candidates, and another + * to compute the cost of the (partial) solution + */ + oversampling = sample_factor * static_cast(num_centroids_orig); + if (verbosity > NO_OUTPUT) + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** k-means|| oversampling factor: %lf, " + "expected number of candidates per round: %lu", sample_factor, + static_cast(oversampling)))); + do { + clock_gettime(CLOCK_MONOTONIC, &start_kmeans_round); + /* + * this data pass will sample candidates + * (the probability of not choosing a single element is > 0 and thus we have to + * try until at least one candidate is found) + */ + + do { + prev_num_candidates = num_candidates; + pstate->rescan(pstate->callback_data); + *centroid_candidates_kmeans_bb = one_data_pass(pstate, state_description, batch_size, + idx_current_centroids, + idx_next_centroids, + true, sample_probability, + KMEANS_INITIAL_CENTROIDS_BB_SAMPLE, + *centroid_candidates_kmeans_bb, nullptr, prng); + num_candidates = *centroid_candidates_kmeans_bb ? (*centroid_candidates_kmeans_bb)->length : 0ULL; + } while (num_candidates <= prev_num_candidates); + + /* + * this data pass will compute the cost of the (partial) solution + */ + reset_weights(*centroid_candidates_kmeans_bb); + cost_kmeans_bb = 0.; + pstate->rescan(pstate->callback_data); + *centroid_candidates_kmeans_bb = one_data_pass(pstate, state_description, batch_size, + idx_current_centroids, + idx_next_centroids, + false, 0., KMEANS_INITIAL_CENTROIDS_BB_COMPUTE_COST, + *centroid_candidates_kmeans_bb, &cost_kmeans_bb, + nullptr); + + /* + * for the next iteration, sample probability changes according to the cost of the current + * solution + */ + twoDiv(oversampling, cost_kmeans_bb, &sample_probability, &op_error); + sample_probability += op_error; + + clock_gettime(CLOCK_MONOTONIC, &finish_kmeans_round); + local_elapsed_time = static_cast(finish_kmeans_round.tv_sec - start_kmeans_round.tv_sec); + local_elapsed_time += + static_cast(finish_kmeans_round.tv_nsec - start_kmeans_round.tv_nsec) / + 1000000000.0; + + if (verbosity == VERBOSE_OUTPUT) + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** k-means|| round %u stats: cost: %lf, total number of candidates: %u, " + "duration (s): %0.06lf", current_iteration_kmeans_bb + 1, + cost_kmeans_bb, (*centroid_candidates_kmeans_bb)->length, local_elapsed_time))); + } while (likely((++current_iteration_kmeans_bb < NUM_ITERATIONS_KMEANSBB) && + (num_candidates < num_points))); + + break; + case KMEANS_RANDOM_SEED: + /* + * the expected number of points to sample + */ + oversampling = sample_factor * static_cast(num_centroids_orig); + /* + * if the number of centroids is larger than the number of data points (corner case) + * each data point becomes a centroid. otherwise we over sample (all data points could + * be sampled) + */ + sample_probability = num_centroids >= num_points ? 1.0 : oversampling / num_points; + + if (verbosity > NO_OUTPUT) + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** random++ oversampling factor: %lf, expected number of candidates: %lu", + sample_factor, static_cast(oversampling)))); + + do { + pstate->rescan(pstate->callback_data); + *centroid_candidates_kmeans_bb = one_data_pass(pstate, state_description, batch_size, + idx_current_centroids, idx_next_centroids, + true, sample_probability, + KMEANS_INITIAL_CENTROIDS_RANDOM_SAMPLE, + *centroid_candidates_kmeans_bb, nullptr, prng); + num_candidates = *centroid_candidates_kmeans_bb ? (*centroid_candidates_kmeans_bb)->length : 0ULL; + } while ((num_candidates < num_points) && (num_candidates < oversampling)); + reset_weights(*centroid_candidates_kmeans_bb); + break; + default: + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("k-means exec: not known seeding function: %u", seeding_function))); + } +} + +void kmeans_create_model(KMeansState *kmeans_state, Model *model) +{ + auto kmeans_hyperp = + reinterpret_cast(kmeans_state->tms.config->hyperparameters[0]); + uint32_t const num_centroids = kmeans_state->description.num_centroids; + uint32_t const num_points = kmeans_state->description.num_good_points; + uint32_t const actual_num_centroids = num_centroids > num_points ? num_points : num_centroids; + uint32_t const dimension = kmeans_state->description.dimension; + uint32_t idx_current_centroids = 1U - (kmeans_state->description.current_iteration & 1U); + bool const late_initialization = kmeans_state->description.late_initialized; + uint64_t cluster_size = 0ULL; + auto *score = reinterpret_cast(palloc0(sizeof(TrainingScore))); + Centroid *current_centroid = nullptr; + PCentroid *current_wh_centroid = nullptr; + double *centroid_coordinates_data = nullptr; + double objective_function = 0.; + double local_error = 0.; + double total_error = 0.; + uint32_t const size_centroid_bytes = kmeans_state->description.size_centroids_bytes; + errno_t errorno = EOK; + + /* + * this is generic information about the model. specific information (about k-means) is set down below + */ + model->return_type = INT4OID; + model->pre_time_secs = kmeans_state->description.seeding_time; + model->exec_time_secs = kmeans_state->description.execution_time; + model->processed_tuples = kmeans_state->description.num_good_points; + model->discarded_tuples = kmeans_state->description.num_dead_points; + + /* + * in order to serialize the model, it has to be stored in a contiguous memory chunk, + * thus we compute the required space as follows: + * sizeof(ModelKMeans) + (sizeof(WHCentroid) * actual_num_centroids) + + * (sizeof(double) * dimension * actual_num_centroids) + */ + Size const coordinates_array_offset = sizeof(ModelKMeansV01) + (sizeof(PCentroid) * actual_num_centroids); + Size const model_kmeans_size = + coordinates_array_offset + (kmeans_state->description.size_centroids_bytes * actual_num_centroids); + + auto model_kmeans = reinterpret_cast(palloc0(model_kmeans_size)); + + /* + * filling in the raw model data (to be serialized) + */ + model->data.size = model_kmeans_size; + model->data.raw_data = model_kmeans; + model->data.version = DB4AI_MODEL_V01; + + /* + * this is particular information about k-means (centroids are set down below) + */ + model_kmeans->actual_num_centroids = actual_num_centroids; + model_kmeans->dimension = dimension; + model_kmeans->distance_function_id = kmeans_hyperp->distance; + model_kmeans->coordinates_offset = coordinates_array_offset; + model_kmeans->num_actual_iterations = kmeans_state->description.actual_num_iterations; + + /* + * this fields are deprecated and will disappear in the future + */ + model->train_info = nullptr; + model->num_actual_iterations = 0U; + model->weights = 0U; + model->model_data = 0U; + + /* + * on late initialization, the dimension was set at runtime and thus the corresponding + * hyper-parameter has to be updated + */ + if (late_initialization) + update_model_hyperparameter(model->memory_context, model->hyperparameters, "num_features", INT4OID, + Int32GetDatum(dimension)); + + /* + * we fill in the information of every centroid + * the array of centroids start right after the last element of model_kmeans + * + * all coordinates are stored at the end of ModelKMeans (after the array of WHCentroid) + */ + model_kmeans->centroids = + reinterpret_cast(reinterpret_cast(model_kmeans) + sizeof(ModelKMeansV01)); + centroid_coordinates_data = + reinterpret_cast(reinterpret_cast(model_kmeans) + coordinates_array_offset); + uint32_t centroid_coordinates_offset = 0U; + for (uint32_t current_centroid_idx = 0; current_centroid_idx < actual_num_centroids; ++current_centroid_idx) { + current_centroid = kmeans_state->description.centroids[idx_current_centroids] + current_centroid_idx; + cluster_size = current_centroid->statistics.getPopulation(); + + current_wh_centroid = model_kmeans->centroids + current_centroid_idx; + + current_wh_centroid->id = current_centroid->id; + current_wh_centroid->objective_function = current_centroid->statistics.getTotal(); + current_wh_centroid->avg_distance = current_centroid->statistics.getEmpiricalMean(); + current_wh_centroid->min_distance = cluster_size > 0 ? current_centroid->statistics.getMin() : 0.; + current_wh_centroid->max_distance = current_centroid->statistics.getMax(); + current_wh_centroid->std_dev_distance = current_centroid->statistics.getEmpiricalStdDev(); + current_wh_centroid->cluster_size = cluster_size; + current_wh_centroid->coordinates = centroid_coordinates_data + centroid_coordinates_offset; + + errorno = memcpy_s(current_wh_centroid->coordinates, size_centroid_bytes, + ARR_DATA_PTR(current_centroid->coordinates), size_centroid_bytes); + securec_check(errorno, "\0", "\0"); + centroid_coordinates_offset += dimension; + + twoSum(objective_function, current_wh_centroid->objective_function, &objective_function, &local_error); + total_error += local_error; + } + + /* + * finally, the loss function (objective function) + * observe that hyper-parameters are set for all models the same + */ + score->value = objective_function + total_error; + score->name = kmeans_distance_to_string(kmeans_hyperp->distance); + model->scores = lappend(model->scores, score); +} + +/* + * this executes the k-means "training" by executing multiple scans through the data + * until convergence can be declared. + * the operator works in stages: + * 1) obtain general statistics about the input data (one data pass), + * 2) execute a seeding method (random++ or kmeans||) (at least one data pass but not more than 10), + * 3) run Lloyd's algorithm (at least one data pass) + */ +static void kmeans_run(AlgorithmAPI *self, TrainModelState *pstate, Model **models) +{ + /* + * get information from the node + */ + Assert(pstate->finished == 0); + Assert(pstate->config->configurations == 1); + + auto kmeans_state = reinterpret_cast(pstate); + auto kmeans_hyperp = + const_cast( + reinterpret_cast(pstate->config->hyperparameters[0])); + KMeansStateDescription& state_description = kmeans_state->description; + uint32_t const max_num_iterations = kmeans_hyperp->num_iterations; + uint32_t const batch_size = kmeans_hyperp->batch_size; + uint32_t idx_current_centroids = 0U; + uint32_t idx_next_centroids = 0U; + uint32_t num_centroids = state_description.num_centroids; // this field is always initialized + uint32_t const num_centroids_orig = num_centroids; + Verbosity const verbosity = kmeans_hyperp->verbosity; + bool const late_initialization = !state_description.initialized; + SeedingFunction const seeding_function = kmeans_hyperp->seeding; + IncrementalStatistics *prev_solution_statistics = nullptr; + IncrementalStatistics *current_solution_statistics = nullptr; + double cost_fraction = 0.; + double cost_fraction_correction = 0.; + double local_elapsed_time = 0.; + double total_time = 0.; + double const sample_factor = num_centroids < 1000000 ? 4. : 2.; + double const tolerance = kmeans_hyperp->tolerance; + List *centroid_candidates_kmeans_bb = nullptr; + struct timespec start_time, finish_time, start_kmeans_round, finish_kmeans_round; + uint64_t num_points = 0ULL; + uint64_t seed = kmeans_hyperp->external_seed; + + Assert(seed != 0); + + uint64_t const external_seed = seed; + /* + * internal seed obtained from random.org. do not change it because + * it will (most probably) change the overall seed used and results + * will not be reproducible + */ + uint64_t const internal_seed = 0x274066DB9441E851ULL; + seed ^= internal_seed; + + /* + * high-quality prng (based Mersenne primes) + * nothing of this sort is currently available in the system + */ + std::mt19937_64 prng(seed); + + // check if training is already finished, if so we return the next centroid + if (!kmeans_state->done) { + /* + * we have to see that the column we are passed on is of type array + */ + Oid oidtype = pstate->tuple.typid[0]; + + if (unlikely((oidtype != FLOAT8ARRAYOID))) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("k-means exec: data is not of type float8 (double precision) array"))); + + if (pstate->tuple.ncolumns != 1) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("k-means exec: relation should contain only a single attribute " + "(point coordinates in a double precision array)"))); + + /* + * on late initialization we propagate the dimension properly + */ + if (late_initialization) { + kmeans_hyperp->n_features = state_description.dimension; + kmeans_state->description.late_initialized = true; + } + + /* + * Before iterating we have to find an initial set of centroids (seeds) + * either random seeds or using kmeans||, this is one table scan on which we + * can also obtain the diameter of the bounding box (for normalization for example) + * from this scan we can also obtain the coordinates of the bounding box + */ + clock_gettime(CLOCK_MONOTONIC, &start_time); + one_data_pass(pstate, &state_description, batch_size, + 0, 1, false, 0., KMEANS_INIT, nullptr, nullptr, nullptr); + clock_gettime(CLOCK_MONOTONIC, &finish_time); + + local_elapsed_time = static_cast(finish_time.tv_sec - start_time.tv_sec); + local_elapsed_time += static_cast(finish_time.tv_nsec - start_time.tv_nsec) / 1000000000.0; + total_time += local_elapsed_time; + + output_kmeans_state(verbosity, state_description.dimension, state_description.num_good_points, + state_description.num_dead_points, local_elapsed_time); + + num_points = state_description.num_good_points; + /* + * if the number of centroids is larger than the number of data points we save useless computations + * by keeping track of the actual number of centroids that can be realized, in the very end + * we restore the original number of centroids + */ + state_description.num_centroids = num_centroids_orig > num_points ? num_points : num_centroids_orig; + num_centroids = state_description.num_centroids; + + if (unlikely(num_points == 0)) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("k-means exec: no valid point found (no input array seems to be a one-dimensional array, " + "or no point seems to be fully dimensional (perhaps all points have a null dimension?))"))); + + /* + * it is time to produce an initial set of centroids to start with + */ + clock_gettime(CLOCK_MONOTONIC, &start_time); + + idx_current_centroids = state_description.current_iteration & 1U; + idx_next_centroids = 1U - idx_current_centroids; + + /* + * if the number of centers is larger than the number of data points we choose + * all data points (we have no other option). this is a non-sense corner case. + * for the time being we restrict to k's that fit in memory, later we can + * lift that restriction by spooling centroids to a file + * + * for random production we sample with probability (sample_factor * k)/n + * + * for kmeans|| we sample the very first points with probability sample_factor / n and + * later with probability (sample_factor * k * d(x))/sum(d(y)) + */ + kmeans_deal_seeding_function(seeding_function, num_centroids, num_points, sample_factor, num_centroids_orig, + verbosity, ¢roid_candidates_kmeans_bb, pstate, &state_description, + batch_size, idx_current_centroids, idx_next_centroids, &prng); + + /* + * once the set of candidates (> k) has been gathered, we produce k initial centroids using + * (weighted) kmeans++ + * observe that the output of this function are the k centroids stored in their place + * and the list of candidates is freed up inside the function, thus accessing the list + * is illegal + */ + if (verbosity > NO_OUTPUT) + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** k-means++ begin: consolidating %u candidates to %u centroid(s)", + centroid_candidates_kmeans_bb->length, num_centroids))); + + clock_gettime(CLOCK_MONOTONIC, &start_kmeans_round); + + centroid_candidates_kmeans_bb = kmeanspp(&state_description, centroid_candidates_kmeans_bb, + idx_current_centroids, &prng); + + clock_gettime(CLOCK_MONOTONIC, &finish_kmeans_round); + local_elapsed_time = static_cast(finish_kmeans_round.tv_sec - start_kmeans_round.tv_sec); + local_elapsed_time += + static_cast(finish_kmeans_round.tv_nsec - start_kmeans_round.tv_nsec) / + 1000000000.0; + if (verbosity == VERBOSE_OUTPUT) + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** k-means++ ended: duration (s): %0.06lf", local_elapsed_time))); + + Assert(!centroid_candidates_kmeans_bb); + + clock_gettime(CLOCK_MONOTONIC, &finish_time); + + local_elapsed_time = static_cast(finish_time.tv_sec - start_time.tv_sec); + local_elapsed_time += static_cast(finish_time.tv_nsec - start_time.tv_nsec) / 1000000000.0; + total_time += local_elapsed_time; + state_description.seeding_time = local_elapsed_time; + + if (verbosity == VERBOSE_OUTPUT) + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** Seed centroids constructed (%lu, %u): duration (s): %0.6lf", + num_points, state_description.current_centroid, + state_description.seeding_time))); + + // we reset current_centroid so that later each centroid can be output + state_description.current_centroid = 0; + + /* + * Scan the sub-plan and feed all the tuples to kmeans (every single iteration). + * for these scans we will do batching + */ + do { + clock_gettime(CLOCK_MONOTONIC, &start_time); + idx_next_centroids = 1U - idx_current_centroids; + prev_solution_statistics = state_description.solution_statistics + idx_next_centroids; + + /* + * in this iteration we reset the set of next centroids start with a clean set for aggregation + */ + reset_centroids(&state_description, idx_next_centroids); + + /* + * every iteration we have to reset the sub-plan to be able to re-scan it + */ + pstate->rescan(pstate->callback_data); + one_data_pass(pstate, &state_description, batch_size, idx_current_centroids, + idx_next_centroids, false, 0., KMEANS_LLOYD, nullptr, nullptr, nullptr); + + /* + * let's produce the new set of centroids for the next iteration + * (this should be executed at the coordinator in a distributed environment + */ + merge_centroids(&state_description, idx_current_centroids, idx_next_centroids); + + compute_cost(&state_description, idx_current_centroids); + current_solution_statistics = state_description.solution_statistics + idx_current_centroids; + + if (unlikely(state_description.current_iteration == 1)) { + cost_fraction = 1.; + } else { + twoDiv(prev_solution_statistics->getTotal(), current_solution_statistics->getTotal(), + &cost_fraction, &cost_fraction_correction); + cost_fraction += cost_fraction_correction; + twoDiff(cost_fraction, 1.0, &cost_fraction, &cost_fraction_correction); + cost_fraction += cost_fraction_correction; + } + + idx_current_centroids = idx_next_centroids; + + clock_gettime(CLOCK_MONOTONIC, &finish_time); + + local_elapsed_time = static_cast(finish_time.tv_sec - start_time.tv_sec); + local_elapsed_time += static_cast(finish_time.tv_nsec - start_time.tv_nsec) / 1000000000.0; + total_time += local_elapsed_time; + + if (verbosity == VERBOSE_OUTPUT) + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** iteration %u: duration (s): %0.6lf, " + "total relevant population %lu, " + "total cost %0.6lf, " + "average distance %0.6lf, " + "min distance %0.6lf, " + "max distance %0.6lf, " + "standard deviation of distances %0.6lf, " + "cost delta: %0.6lf", state_description.current_iteration, local_elapsed_time, + current_solution_statistics->getPopulation(), + current_solution_statistics->getTotal(), current_solution_statistics->getEmpiricalMean(), + current_solution_statistics->getMin(), current_solution_statistics->getMax(), + current_solution_statistics->getEmpiricalStdDev(), cost_fraction))); + } while ((++state_description.current_iteration <= max_num_iterations) + && (cost_fraction >= tolerance) && (num_points > num_centroids)); + + state_description.execution_time = total_time; + state_description.actual_num_iterations = state_description.current_iteration - 1; + + if (verbosity > NO_OUTPUT) { + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** Number of centroids constructed: %u", num_centroids))); + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** Value of global objective function: %0.6lf", current_solution_statistics->getTotal()))); + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** Seed: %lu", external_seed))); + if (verbosity == VERBOSE_OUTPUT) + ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("*** Total duration time (s): %0.6lf", state_description.execution_time))); + } + + /* + * finally set the finished flag to true + */ + kmeans_state->done = true; + + /* + * we restore the original number of centroids + */ + state_description.num_centroids = num_centroids_orig; + } + + // number of configurations already run + ++pstate->finished; + + Model *model = models[0]; + MemoryContext oldcxt = MemoryContextSwitchTo(model->memory_context); + + kmeans_create_model(kmeans_state, model); + + model->status = ERRCODE_SUCCESSFUL_COMPLETION; + MemoryContextSwitchTo(oldcxt); +} + +/* + * this function frees up all the storage required by the algorithm. + * at this point the upper layer has made persistent the model and + * this information is not required anymore + */ +static void kmeans_end(AlgorithmAPI *self, TrainModelState *pstate) +{ + auto kmeans_state_node = reinterpret_cast(pstate); + + if (kmeans_state_node->description.initialized) { + uint32_t const num_centroids = kmeans_state_node->description.num_centroids; + + for (uint32_t c = 0; c < num_centroids; ++c) { + pfree(kmeans_state_node->description.centroids[0][c].coordinates); + pfree(kmeans_state_node->description.centroids[1][c].coordinates); + } + + pfree(kmeans_state_node->description.centroids[0]); + pfree(kmeans_state_node->description.centroids[1]); + pfree(kmeans_state_node->description.batch); + } +} + +force_inline static MetricML *kmeans_metrics_loss(AlgorithmAPI *self, int *num_metrics) +{ + elog(ERROR, "kmeans_metrics_loss"); + return nullptr; +} + +void kmeans_set_references(ModelKMeansV01 *model_kmeans) +{ + uint32_t const actual_num_centroids = model_kmeans->actual_num_centroids; + /* + * we have to fix the internal pointers to point to the right locations (as memory has changed) + * observe that this pointers are pretty much offsets into the same memory chunk + * + * the array of centroids start right after the last element of model_kmeans (we fix the address) + */ + model_kmeans->centroids = + reinterpret_cast(reinterpret_cast(model_kmeans) + sizeof(ModelKMeansV01)); + /* + * the address of the coordinates of each centroid must be fixed as well + */ + PCentroid *current_centroid = nullptr; + double *centroid_coordinates_data = + reinterpret_cast(reinterpret_cast(model_kmeans) + model_kmeans->coordinates_offset); + uint32_t centroid_coordinates_offset = 0U; + for (uint32_t c = 0; c < actual_num_centroids; ++c) { + current_centroid = model_kmeans->centroids + c; + current_centroid->coordinates = centroid_coordinates_data + centroid_coordinates_offset; + centroid_coordinates_offset += model_kmeans->dimension; + } +} + +ModelPredictor kmeans_predict_prepare(AlgorithmAPI *self, SerializedModel const *model, Oid return_type) +{ + if (unlikely(!model)) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("k-means predict prepare: model cannot be null"))); + + if (unlikely(model->version != DB4AI_MODEL_V01)) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("k-means predict prepare: currently only model V01 is supported"))); + + auto model_kmeans = reinterpret_cast(model->raw_data); + + kmeans_set_references(model_kmeans); + + return reinterpret_cast(model_kmeans); +} + +static Datum kmeans_predict(AlgorithmAPI *, ModelPredictor model, Datum *data, bool *nulls, Oid *types, int32_t nargs) +{ + auto kmeans_model = reinterpret_cast(model); + /* * sanity checks */ if (unlikely(nargs != 1)) ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("k-means predict: only a single attribute containing the coordinates is accepted"))); - + errmsg("k-means predict: only a single attribute containing the coordinates is accepted"))); + if (unlikely(types[0] != FLOAT8ARRAYOID)) ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("k-means predict: only double precision array of coordinates is accepted"))); - + errmsg("k-means predict: only double precision array of coordinates is accepted"))); + if (unlikely(nulls[0])) ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("k-means predict: array of coordinates cannot be null"))); - + errmsg("k-means predict: array of coordinates cannot be null"))); + uint32_t const num_centroids = kmeans_model->actual_num_centroids; uint32_t const dimension = kmeans_model->dimension; double (*distance)(double const *, double const *, uint32_t const) = nullptr; - + if (unlikely(num_centroids == 0)) ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("k-means predict: number of centroids must be positive"))); - + errmsg("k-means predict: number of centroids must be positive"))); + switch (kmeans_model->distance_function_id) { case KMEANS_L1: distance = l1; @@ -668,24 +1806,25 @@ Datum kmeans_predict(ModelPredictor model, Datum *data, bool *nulls, Oid *types, break; default: ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("k-means predict: distance function id %u not recognized", kmeans_model->distance_function_id))); + errmsg("k-means predict: distance function id %u not recognized", + kmeans_model->distance_function_id))); } - - WHCentroid *current_centroid = nullptr; - WHCentroid *closest_centroid = nullptr; + + PCentroid *current_centroid = nullptr; + PCentroid *closest_centroid = nullptr; auto input_point_pg_array = DatumGetArrayTypeP(data[0]); auto min_distance = DBL_MAX; double local_distance = 0.; - double const * input_point_coordinates = nullptr; + double const *input_point_coordinates = nullptr; int32_t closest_centroid_id = -1; bool const valid_input = verify_pgarray(input_point_pg_array, dimension); bool min_distance_changed = false; - + if (unlikely(!valid_input)) return Int32GetDatum(closest_centroid_id); - + input_point_coordinates = reinterpret_cast(ARR_DATA_PTR(input_point_pg_array)); - + for (uint32_t c = 0; c < num_centroids; ++c) { current_centroid = kmeans_model->centroids + c; local_distance = distance(input_point_coordinates, current_centroid->coordinates, dimension); @@ -693,11 +1832,183 @@ Datum kmeans_predict(ModelPredictor model, Datum *data, bool *nulls, Oid *types, min_distance = min_distance_changed ? local_distance : min_distance; closest_centroid = min_distance_changed ? current_centroid : closest_centroid; } - + closest_centroid_id = closest_centroid->id; - + /* * for the time being there is no other way to get to the computed distance other than by a log */ return Int32GetDatum(closest_centroid_id); } + +/* + * this is a faster version of construct_md_array in which we use knowledge we have + * to speed up computations + */ +ArrayType *construct_empty_md_array(uint32_t const num_centroids, uint32_t const dimension) +{ + ArrayType *result = NULL; + int32_t ndims = 0; + uint32_t dims[2] = {0U}; + uint32_t const lbs[2] = {1U, 1U}; + + if (num_centroids == 0) { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("construct_empty_md_array: first dimension must be larger than 0"))); + } else if (num_centroids == 1) { + ndims = 1; + dims[0] = {dimension}; + } else { + ndims = 2; + dims[0] = num_centroids; + dims[1] = dimension; + } + check_hyper_bounds(num_centroids, dimension, "num_centroids"); + check_hyper_bounds(num_centroids * dimension, sizeof(float8), "num_centroids"); + + int32_t nbytes = num_centroids * dimension * sizeof(float8); + nbytes += ARR_OVERHEAD_NONULLS(ndims); + result = reinterpret_cast(palloc0(nbytes)); + SET_VARSIZE(result, nbytes); + result->ndim = ndims; + result->dataoffset = 0; /* marker for no null bitmap */ + result->elemtype = FLOAT8OID; + errno_t errorno = EOK; + errorno = memcpy_s(ARR_DIMS(result), ndims * sizeof(int32_t), dims, ndims * sizeof(int32_t)); + securec_check(errorno, "\0", "\0"); + errorno = memcpy_s(ARR_LBOUND(result), ndims * sizeof(int32_t), lbs, ndims * sizeof(int32_t)); + securec_check(errorno, "\0", "\0"); + + return result; +} + +/* + * used in EXPLAIN MODEL + */ +List *kmeans_explain(AlgorithmAPI *self, SerializedModel const *model, Oid return_type) +{ + if (unlikely(!model)) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("k-means explain: model cannot be null"))); + + if (unlikely(model->version != DB4AI_MODEL_V01)) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("k-means explain: currently only model V01 is supported"))); + + List *model_info = nullptr; + + auto model_kmeans = reinterpret_cast(model->raw_data); + kmeans_set_references(model_kmeans); + + uint32_t const actual_num_centroids = model_kmeans->actual_num_centroids; + uint32_t const dimension = model_kmeans->dimension; + + TrainingInfo *actual_num_centroids_info = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + TrainingInfo *num_iterations_info = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + + actual_num_centroids_info->name = "Actual number of centroids"; + num_iterations_info->name = "Number of iterations"; + + actual_num_centroids_info->type = INT4OID; + num_iterations_info->type = INT4OID; + + actual_num_centroids_info->value = Int32GetDatum(model_kmeans->actual_num_centroids); + num_iterations_info->value = Int32GetDatum(model_kmeans->num_actual_iterations); + + model_info = lappend(model_info, actual_num_centroids_info); + model_info = lappend(model_info, num_iterations_info); + + PCentroid *current_wh_centroid = nullptr; + ArrayType *centroid_coordinates_array = nullptr; + double *centroid_coordinates_data = nullptr; + uint32_t const size_centroid_bytes = sizeof(double) * dimension; + error_t errorno = EOK; + for (uint32_t current_centroid_idx = 0; current_centroid_idx < actual_num_centroids; ++current_centroid_idx) { + current_wh_centroid = model_kmeans->centroids + current_centroid_idx; + /* + * opening and closing the eigenvector group + * if both open_group and close_group are set, they will be ignored) + */ + TrainingInfo *centroid_open_group = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + centroid_open_group->open_group = true; + TrainingInfo *centroid_close_group = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + centroid_close_group->close_group = true; + + // these define the properties of a centroid + TrainingInfo *centroid_id = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + TrainingInfo *centroid_coordinates = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + TrainingInfo *centroid_loss_function = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + TrainingInfo *centroid_max_distance = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + TrainingInfo *centroid_min_distance = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + TrainingInfo *centroid_avg_distance = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + TrainingInfo *centroid_std_dev_distance = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + TrainingInfo *centroid_cluster_size = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + + // opening and closing a group must have exactly the same name (otherwise fire!) + centroid_open_group->name = "Centroid"; + centroid_close_group->name = "Centroid"; + + // the name of each property + centroid_id->name = "ID"; + centroid_coordinates->name = "Coordinates"; + centroid_loss_function->name = "Objective function"; + centroid_max_distance->name = "Maximum cluster distance"; + centroid_min_distance->name = "Minimum cluster distance"; + centroid_avg_distance->name = "Average cluster distance"; + centroid_std_dev_distance->name = "Standard deviation of cluster distances"; + centroid_cluster_size->name = "Cluster size"; + + // the type of each property + centroid_id->type = INT4OID; + centroid_coordinates->type = FLOAT8ARRAYOID; + centroid_loss_function->type = FLOAT8OID; + centroid_max_distance->type = FLOAT8OID; + centroid_min_distance->type = FLOAT8OID; + centroid_avg_distance->type = FLOAT8OID; + centroid_std_dev_distance->type = FLOAT8OID; + centroid_cluster_size->type = INT8OID; + + // the coordinates of a centroid + centroid_coordinates_array = construct_empty_md_array(1, dimension); + centroid_coordinates_data = reinterpret_cast(ARR_DATA_PTR(centroid_coordinates_array)); + + // filling in the data + centroid_id->value = Int32GetDatum(current_wh_centroid->id); + centroid_coordinates->value = PointerGetDatum(centroid_coordinates_array); + centroid_loss_function->value = Float8GetDatumFast(current_wh_centroid->objective_function); + centroid_max_distance->value = Float8GetDatumFast(current_wh_centroid->max_distance); + centroid_min_distance->value = Float8GetDatumFast(current_wh_centroid->min_distance); + centroid_avg_distance->value = Float8GetDatumFast(current_wh_centroid->avg_distance); + centroid_std_dev_distance->value = Float8GetDatumFast(current_wh_centroid->std_dev_distance); + centroid_cluster_size->value = Int64GetDatumFast(current_wh_centroid->cluster_size); + + // filling in the coordinates + errorno = memcpy_s(centroid_coordinates_data, size_centroid_bytes, current_wh_centroid->coordinates, + size_centroid_bytes); + securec_check(errorno, "\0", "\0"); + + /* + * appending the properties to the list of properties + * OBSERVE that open and close elements must be well positioned! (at the beginning and end of the information) + */ + model_info = lappend(model_info, centroid_open_group); + model_info = lappend(model_info, centroid_id); + model_info = lappend(model_info, centroid_cluster_size); + model_info = lappend(model_info, centroid_coordinates); + model_info = lappend(model_info, centroid_loss_function); + model_info = lappend(model_info, centroid_max_distance); + model_info = lappend(model_info, centroid_min_distance); + model_info = lappend(model_info, centroid_avg_distance); + model_info = lappend(model_info, centroid_std_dev_distance); + model_info = lappend(model_info, centroid_close_group); + } + + return model_info; +} + +KMeans kmeans = { + // AlgorithmAPI + {KMEANS, "kmeans", ALGORITHM_ML_UNSUPERVISED | ALGORITHM_ML_RESCANS_DATA, kmeans_metrics_loss, + kmeans_get_hyperparameters, kmeans_make_hyperparameters, kmeans_update_hyperparameters, kmeans_create, kmeans_run, + kmeans_end, kmeans_predict_prepare, kmeans_predict, kmeans_explain}, +}; \ No newline at end of file diff --git a/src/gausskernel/dbmind/db4ai/executor/matrix.cpp b/src/gausskernel/dbmind/db4ai/executor/matrix.cpp new file mode 100644 index 000000000..60144d1af --- /dev/null +++ b/src/gausskernel/dbmind/db4ai/executor/matrix.cpp @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + *--------------------------------------------------------------------------------------- + * + * matrix.cpp + * + * IDENTIFICATION + * src/gausskernel/dbmind/db4ai/executor/matrix.cpp + * + * --------------------------------------------------------------------------------------- + */ + +#include "db4ai/matrix.h" + +#define MATRIX_LIMITED_OUTPUT 30 + +// using Box-Muller implementation +void matrix_init_random_gaussian(Matrix *matrix, int rows, int columns, float8 mu, float8 sigma, int seed) +{ + matrix_init(matrix, rows, columns); + + const float8 two_pi = 2.0 * M_PI; + struct drand48_data rnd; + srand48_r(seed, &rnd); + + float8 u1, u2; + float8 *pd = matrix->data; + float8 *pdx = pd + rows * columns; + while (pd < pdx) { + do { + drand48_r(&rnd, &u1); + } while (u1 <= FLT_EPSILON); + + drand48_r(&rnd, &u2); + u2 *= two_pi; + + float8 mag = sigma * sqrt(-2.0 * log(u1)); + *pd++ = mag * cos(u2) + mu; + if (pd < pdx) + *pd++ = mag * sin(u2) + mu; + } +} + +void matrix_init_kernel_gaussian(int features, int components, float8 gamma, int seed, Matrix *weights, Matrix *offsets) +{ + matrix_init_random_gaussian(weights, features, components, 0.0, sqrt(2.0 * gamma), seed); + matrix_init_random_uniform(offsets, components, 1, 0.0, 2.0 * M_PI, seed+1); +} + +void matrix_transform_kernel_gaussian(const Matrix *input, const Matrix *weights, const Matrix *offsets, Matrix *output) +{ + int components = weights->columns; + + Assert(input->rows == weights->rows); + Assert(input->columns == 1); + Assert(components == offsets->rows); + Assert(offsets->columns == 1); + Assert(output->rows == components); + Assert(output->columns == 1); + + Matrix t_in, t_out; + matrix_init_transpose(&t_in, input); + matrix_init_transpose(&t_out, output); + + matrix_mult(&t_in, weights, &t_out); + + matrix_release(&t_in); + matrix_release(&t_out); + + matrix_add(output, offsets); + matrix_cos(output); + matrix_mult_scalar(output, sqrt(2.0 / components)); +} + +void matrix_init_random_uniform(Matrix *matrix, int rows, int columns, float8 min, float8 max, int seed) +{ + Assert(min < max); + + matrix_init(matrix, rows, columns); + + float8 range = max - min; + struct drand48_data rnd; + srand48_r(seed, &rnd); + + float8 u; + float8 *pd = matrix->data; + float8 *pdx = pd + rows * columns; + while (pd < pdx) { + drand48_r(&rnd, &u); + *pd++ = min + range * u; + } +} + +void matrix_init_random_bernoulli(Matrix *matrix, int rows, int columns, float8 p, float8 min, float8 max, int seed) +{ + matrix_init(matrix, rows, columns); + + struct drand48_data rnd; + srand48_r(seed, &rnd); + + float8 r; + float8 *pdata = matrix->data; + int count = rows * columns; + while (count--) { + drand48_r(&rnd, &r); + *pdata++ = (r < p ? min : max); + } +} + +int *matrix_init_kernel_polynomial(int features, int components, int degree, float8 coef0, int seed, Matrix *weights, + Matrix *coefs) +{ + struct drand48_data rnd; + srand48_r(seed, &rnd); + + Matrix mcoefs; + matrix_init_maclaurin_coefs(&mcoefs, degree, coef0); + + int dims = 0; + matrix_init(coefs, components); + + int *pcomponents = (int*)palloc(components * sizeof(int)); + for (int r = 0; r < components; r++) { + int rep = 0; + do { + float8 rr = 0; + drand48_r(&rnd, &rr); + if (rr == 0) { + continue; + } + rep = (int)log2(1.0 / rr); + } while (rep == 0 || rep > degree); + dims += rep; + pcomponents[r] = rep; + coefs->data[r] = mcoefs.data[rep]; + } + matrix_release(&mcoefs); + + matrix_init_random_bernoulli(weights, dims, features, 0.5, -1, 1, seed); + + return pcomponents; +} + +void matrix_transform_kernel_polynomial(const Matrix *input, int ncomponents, int *components, const Matrix *weights, + const Matrix *coefficients, Matrix *output) +{ + Assert(output->rows == ncomponents); + Assert(output->columns == 1); + + Matrix feat_w; + matrix_init(&feat_w, weights->rows); + matrix_mult(weights, input, &feat_w); + + float8 *pfold = output->data; + const float8 *pfw = feat_w.data; + for (int r = 0; r < ncomponents; r++) { + int fold = (int)*components++; + float8 v = 1; + while (fold-- > 0) + v *= *pfw++; + *pfold++ = v; + } + matrix_release(&feat_w); + + matrix_mult_entrywise(output, coefficients); + matrix_mult_scalar(output, sqrt(1.0 / output->rows)); +} + +void matrix_mult(const Matrix *matrix1, const Matrix *matrix2, Matrix *result) +{ + Assert(matrix1 != nullptr); + Assert(!matrix1->transposed); + Assert(matrix2 != nullptr); + Assert(!matrix2->transposed); + Assert(matrix1->columns == matrix2->rows); + Assert(matrix1->rows == result->rows); + Assert(matrix2->columns == result->columns); + + float8 *pd = result->data; + float8 *ps = matrix1->data; + for (int r = 0; r < matrix1->rows; r++) { + for (int c = 0; c < matrix2->columns; c++) { + float8 *ps1 = ps; + float8 *ps2 = matrix2->data + c; + float8 sum = 0.0; + for (int cx = 0; cx < matrix1->columns; cx++) { + sum += *ps1 * *ps2; + ps1++; + ps2 += matrix2->columns; + } + *pd++ = sum; + } + ps += matrix1->columns; + } +} + +void matrix_print(const Matrix *matrix, StringInfo buf, bool full) +{ + Assert(matrix != nullptr); + Assert(!matrix->transposed); + const float8 *pf = matrix->data; + appendStringInfoChar(buf, '['); + for (int r = 0; r < matrix->rows; r++) { + if (!full && matrix->rows > MATRIX_LIMITED_OUTPUT && r > (MATRIX_LIMITED_OUTPUT / 2) && + r < matrix->rows - (MATRIX_LIMITED_OUTPUT / 2)) { + if (matrix->columns > 1) + appendStringInfoString(buf, ",\n..."); + else + appendStringInfoString(buf, ", ..."); + + r = matrix->rows - MATRIX_LIMITED_OUTPUT / 2; + pf = matrix->data + r * matrix->columns; + continue; + } + + if (matrix->columns > 1) { + if (r > 0) + appendStringInfoString(buf, ",\n"); + + appendStringInfoChar(buf, '['); + } else { + if (r > 0) + appendStringInfoString(buf, ", "); + } + for (int c = 0; c < matrix->columns; c++) { + if (c > 0) + appendStringInfoString(buf, ", "); + + appendStringInfo(buf, "%.16g", *pf++); + } + if (matrix->columns > 1) + appendStringInfoChar(buf, ']'); + } + appendStringInfoChar(buf, ']'); +} + +void elog_matrix(int elevel, const char *msg, const Matrix *matrix) +{ + if (is_errmodule_enable(elevel, MOD_DB4AI)) { + StringInfoData buf; + initStringInfo(&buf); + matrix_print(matrix, &buf, false); + ereport(elevel, (errmodule(MOD_DB4AI), errmsg("%s(%d,%d) = %s", msg, matrix->rows, matrix->columns, buf.data))); + pfree(buf.data); + } +} diff --git a/src/gausskernel/dbmind/db4ai/executor/xgboost/CMakeLists.txt b/src/gausskernel/dbmind/db4ai/executor/xgboost/CMakeLists.txt new file mode 100644 index 000000000..3e360a55c --- /dev/null +++ b/src/gausskernel/dbmind/db4ai/executor/xgboost/CMakeLists.txt @@ -0,0 +1,11 @@ +# xgboost.cmake +set(TGT_xgboost_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/xgboost.cpp + ) +set(TGT_xgboost_INC + ${XGBOOST_INCLUDE_PATH} + ) +set(xgboost_DEF_OPTIONS ${MACRO_OPTIONS}) +set(xgboost_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) +set(xgboost_LINK_OPTIONS ${BIN_LINK_OPTIONS}) +add_static_objtarget(gausskernel_db4ai_executor_xgboost TGT_xgboost_SRC TGT_xgboost_INC "${xgboost_DEF_OPTIONS}" "${xgboost_COMPILE_OPTIONS}" "${xgboost_LINK_OPTIONS}") \ No newline at end of file diff --git a/src/gausskernel/dbmind/db4ai/executor/xgboost/Makefile b/src/gausskernel/dbmind/db4ai/executor/xgboost/Makefile new file mode 100644 index 000000000..4ed36480d --- /dev/null +++ b/src/gausskernel/dbmind/db4ai/executor/xgboost/Makefile @@ -0,0 +1,25 @@ +#--------------------------------------------------------------------------------------- +# +# IDENTIFICATION +# src/gausskernel/dbmind/db4ai/executor/xgboost +# +# --------------------------------------------------------------------------------------- + +subdir = src/gausskernel/dbmind/db4ai/executor/xgboost +top_builddir = ../../../../../.. + +include $(top_builddir)/src/Makefile.global + +override CPPFLAGS += -I$(XGBOOST_LIB_PATH)/../include + +ifneq "$(MAKECMDGOALS)" "clean" + ifneq "$(MAKECMDGOALS)" "distclean" + ifneq "$(shell which g++ |grep hutaf_llt |wc -l)" "1" + -include $(DEPEND) + endif + endif +endif + +OBJS = xgboost.o + +include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/dbmind/db4ai/executor/xgboost/xgboost.cpp b/src/gausskernel/dbmind/db4ai/executor/xgboost/xgboost.cpp new file mode 100644 index 000000000..7f044417a --- /dev/null +++ b/src/gausskernel/dbmind/db4ai/executor/xgboost/xgboost.cpp @@ -0,0 +1,854 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + *--------------------------------------------------------------------------------------- + * + * xgboost.cpp + * Main file implemented for the xgboost algorithm + * + * IDENTIFICATION + * src/gausskernel/dbmind/db4ai/executor/xgboost/xgboost.cpp + * + * --------------------------------------------------------------------------------------- + */ + +#include "nodes/execnodes.h" +#include "nodes/pg_list.h" +#include "postgres_ext.h" +#include "utils/builtins.h" +#include "funcapi.h" +#include + +#include "db4ai/xgboost.h" +#include "db4ai/aifuncs.h" +#include "db4ai/model_warehouse.h" +#include "db4ai/predict_by.h" +#include "db4ai/db4ai_common.h" + +#include "xgboost/c_api.h" + +double total_exec_time = 0.0; +struct timespec exec_start_time, exec_end_time; + +#define XGBOOST_LIB_NAME "libxgboost.so" + +typedef const int (*XGBoosterSetParam_Sym)(BoosterHandle handle, const char *name, const char *value); +typedef const int (*XGDMatrixCreateFromMat_Sym)(const float *data, bst_ulong nrow, bst_ulong ncol, + float missing, DMatrixHandle *out); +typedef const int (*XGDMatrixSetFloatInfo_Sym)(DMatrixHandle handle, const char *field, const float *array, bst_ulong len); +typedef const int (*XGBoosterCreate_Sym)(const DMatrixHandle dmats[], bst_ulong len, BoosterHandle *out); +typedef const int (*XGBoosterUnserializeFromBuffer_Sym)(BoosterHandle handle, const void *buf, bst_ulong len); +typedef const int (*XGBoosterUpdateOneIter_Sym)(BoosterHandle handle, int iter, DMatrixHandle dtrain); +typedef const int (*XGBoosterEvalOneIter_Sym)(BoosterHandle handle, int iter, DMatrixHandle dmats[], + const char *evnames[], bst_ulong len, const char **out_result); +typedef const int (*XGBoosterSerializeToBuffer_Sym)(BoosterHandle handle, bst_ulong *out_len, const char **out_dptr); +typedef const int (*XGDMatrixFree_Sym)(DMatrixHandle handle); +typedef const int (*XGBoosterFree_Sym)(BoosterHandle handle); +typedef const int (*XGBoosterPredict_Sym)(BoosterHandle handle, DMatrixHandle dmat, int option_mask, unsigned ntree_limit, + int training, bst_ulong *out_len, const float **out_result); +typedef const char* (*XGBGetLastError_Sym)(void); + +typedef struct { + XGBoosterSetParam_Sym XGBoosterSetParam; + XGDMatrixCreateFromMat_Sym XGDMatrixCreateFromMat; + XGDMatrixSetFloatInfo_Sym XGDMatrixSetFloatInfo; + XGBoosterCreate_Sym XGBoosterCreate; + XGBoosterUnserializeFromBuffer_Sym XGBoosterUnserializeFromBuffer; + XGBoosterUpdateOneIter_Sym XGBoosterUpdateOneIter; + XGBoosterEvalOneIter_Sym XGBoosterEvalOneIter; + XGBoosterSerializeToBuffer_Sym XGBoosterSerializeToBuffer; + XGDMatrixFree_Sym XGDMatrixFree; + XGBoosterFree_Sym XGBoosterFree; + XGBoosterPredict_Sym XGBoosterPredict; + XGBGetLastError_Sym XGBGetLastError; +} xgboostApi; //xgboost API function + +static void *g_xgboost_handle = NULL; //dynamic library handler +static xgboostApi *g_xgboostApi = NULL; // function symbols of xgboost library +static MemoryContext g_xgboostMcxt = NULL; + +#define safe_xgboost(call) { \ + int err = (call); \ + if (err != 0) { \ + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), \ + errmsg("%s:%d: error in %s: %s\n", __FILE__, __LINE__, #call, g_xgboostApi->XGBGetLastError()))); \ + } \ + } + +double parseDoubleFromErrMetric(const char *str) +{ + // skip all the way to `:' + while (*str != ':') + ++str; + // skip `:' and parse the number + return strtod(++str, NULL); +} + +struct xg_data_t { + float* labels{nullptr}; + float* features{nullptr}; + char* raw_model{nullptr}; + int ft_rows{0}; + int ft_cols{0}; + int lb_rows{0}; + double validation_score{0}; + uint64_t raw_model_size{0}; + + inline void set_raw_model(char *raw_model_, uint64_t len_) + { + if (raw_model_ == 0 || len_ <= 0) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_UNEXPECTED_NULL_VALUE), + errmsg("Xgboost failed loading raw model."))); + + /* check if we need to release the old buffer */ + if (raw_model) + pfree((void *)raw_model); + + raw_model_size = len_; + raw_model = (char *)palloc0(sizeof(char) * len_); + errno_t rc = memcpy_s(raw_model, len_, raw_model_, len_); + securec_check(rc, "\0", "\0"); + } +}; + +typedef struct XgboostModelV01 { + int ft_cols; +} XgboostModelV01; + +/* + * Auxiliary method for printing tuples, useful for debugging + */ +void printXGData(const xg_data_t &chunk, const int n_tuples) +{ + StringInfoData buf; + initStringInfo(&buf); + + for (int i = 0; i < n_tuples; ++i) { + appendStringInfo(&buf, "%4.2f\t", chunk.labels[i]); + for (int j = 0; j < chunk.ft_cols; ++j) { + appendStringInfo(&buf, "%4.2f", *(chunk.features + i * chunk.ft_cols + j)); + appendStringInfoChar(&buf, '\t'); + } + appendStringInfoChar(&buf, '\n'); + elog(NOTICE, "%s", buf.data); + resetStringInfo(&buf); + } + pfree(buf.data); +} + +typedef struct HyperparamsXGBoost { + ModelHyperparameters mhp; /* place-holder */ + /* hyperparameters */ + uint32_t n_iterations; + uint32_t batch_size; + uint32_t max_depth; + uint32_t min_child_weight; + uint32_t nthread; + uint32_t seed; + uint32_t verbosity; + double eta; + double gamma; + const char* booster{nullptr}; + const char* tree_method{nullptr}; + const char* eval_metric{nullptr}; +} HyperparamsXGBoost; + +/* + * XGBoost state + */ +typedef struct XGBoostState { + TrainModelState tms; + // tuple description + Oid *oids; + bool done = false; + uint32_t processed_tuples = 0U; + double execution_time = 0.; +} XGBoostState; + +typedef struct SerializedModelXgboost { + int ft_cols; + BoosterHandle booster = nullptr; +}SerializedModelXgboost; + +extern XGBoost xg_reg_logistic; +extern XGBoost xg_bin_logistic; +extern XGBoost xg_reg_sqe; +extern XGBoost xg_reg_gamma; + +XGBoost *xgboost_get_algorithm(AlgorithmML algorithm) +{ + XGBoost *xgboost_algorithm = nullptr; + switch (algorithm) { + case XG_REG_LOGISTIC: + xgboost_algorithm = &xg_reg_logistic; + break; + case XG_BIN_LOGISTIC: + xgboost_algorithm = &xg_bin_logistic; + break; + case XG_REG_SQE: + xgboost_algorithm = &xg_reg_sqe; + break; + case XG_REG_GAMMA: + xgboost_algorithm = &xg_reg_gamma; + break; + default: + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Invalid algorithm %d", algorithm))); + break; + } + return xgboost_algorithm; +} + +// Hyperparameter and algorithm definitions +MetricML *xgboost_metrics_accuracy(AlgorithmAPI *self, int *num_metrics) +{ + Assert(num_metrics != nullptr); + static MetricML metrics[] = { + METRIC_ML_AUC, METRIC_ML_AUC_PR, METRIC_ML_MAP, METRIC_ML_RMSE, METRIC_ML_RMSLE, METRIC_ML_MAE + }; + *num_metrics = sizeof(metrics) / sizeof(MetricML); + return metrics; +} + +#define BOOST_GBLINEAR_IDX 1 +const char *xgboost_boost_str[] = {"gbtree", "gblinear", "dart"}; +const char *xgboost_tree_method_str[] = {"auto", "exact", "approx", "hist", "gpu_hist"}; +const char *xgboost_eval_metric_str[] = {"rmse", "rmsle", "map", "mae", "auc", "aucpr" }; +static HyperparameterDefinition xgboost_hyperparameter_definitions[] = { + HYPERPARAMETER_INT4("n_iter", 10, 1, true, INT32_MAX, true, HyperparamsXGBoost, n_iterations, HP_NO_AUTOML()), + HYPERPARAMETER_INT4("batch_size", 10000, 1, true, INT32_MAX, true, HyperparamsXGBoost, batch_size, HP_NO_AUTOML()), + HYPERPARAMETER_INT4("max_depth", 5, 0, true, INT32_MAX, true, HyperparamsXGBoost, max_depth, HP_NO_AUTOML()), + HYPERPARAMETER_INT4("min_child_weight", 1, 0, true, INT32_MAX, true, HyperparamsXGBoost, min_child_weight, + HP_NO_AUTOML()), + HYPERPARAMETER_FLOAT8("gamma", 0.0, 0.0, true, 1, true, HyperparamsXGBoost, gamma, HP_NO_AUTOML()), + HYPERPARAMETER_FLOAT8("eta", 0.3, 0.0, true, 1, true, HyperparamsXGBoost, eta, HP_NO_AUTOML()), + HYPERPARAMETER_INT4("nthread", 1, 0, true, INT32_MAX, true, HyperparamsXGBoost, nthread, HP_NO_AUTOML()), + HYPERPARAMETER_INT4("verbosity", 1, 0, true, 3, true, HyperparamsXGBoost, verbosity, HP_NO_AUTOML()), + HYPERPARAMETER_INT4("seed", 0, 0, true, INT32_MAX, true, HyperparamsXGBoost, seed, + HP_AUTOML_INT(1, INT32_MAX, 1, ProbabilityDistribution::UNIFORM_RANGE)), + HYPERPARAMETER_STRING("booster", "gbtree", xgboost_boost_str, ARRAY_LENGTH(xgboost_boost_str), HyperparamsXGBoost, + booster, HP_NO_AUTOML()), + HYPERPARAMETER_STRING("tree_method", "auto", xgboost_tree_method_str, ARRAY_LENGTH(xgboost_tree_method_str), + HyperparamsXGBoost, tree_method, HP_NO_AUTOML()), + HYPERPARAMETER_STRING("eval_metric", "rmse", xgboost_eval_metric_str, ARRAY_LENGTH(xgboost_eval_metric_str), + HyperparamsXGBoost, eval_metric, HP_NO_AUTOML()), +}; + +static const HyperparameterDefinition* xgboost_get_hyperparameters(AlgorithmAPI *self, int *definitions_size) +{ + Assert(definitions_size != nullptr); + *definitions_size = sizeof(xgboost_hyperparameter_definitions) / sizeof(HyperparameterDefinition); + return xgboost_hyperparameter_definitions; +} + +static ModelHyperparameters *xgboost_make_hyperparameters(AlgorithmAPI *self) +{ + auto xgboost_hyperp = reinterpret_cast(palloc0(sizeof(HyperparamsXGBoost))); + + return &xgboost_hyperp->mhp; +} + +void load_xgboost_library(void) +{ + if (g_xgboost_handle != NULL) { + return; // have load it + } + + // get the library path from GAUSSHOME/lib directory + char* gausshome = getGaussHome(); + StringInfo libPath = makeStringInfo(); + appendStringInfo(libPath, "%s/lib/%s", gausshome, XGBOOST_LIB_NAME); + + (void)LWLockAcquire(XGBoostLibLock, LW_EXCLUSIVE); + if (g_xgboost_handle != NULL) { //check again to avoid double dlopen + LWLockRelease(XGBoostLibLock); + return; // have load it + } + + g_xgboost_handle = dlopen(libPath->data, RTLD_NOW | RTLD_GLOBAL); + if (g_xgboost_handle == NULL) { + LWLockRelease(XGBoostLibLock); + ereport(ERROR, (errcode(ERRCODE_FILE_READ_FAILED), + errmsg("Call dlopen to load library file %s failed. error: %s", libPath->data, dlerror()))); + } + + g_xgboostMcxt = AllocSetContextCreate(g_instance.instance_context, + "xgboostApiMemoryContext", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE, + SHARED_CONTEXT); + g_xgboostApi = (xgboostApi *)MemoryContextAlloc(g_xgboostMcxt, sizeof(xgboostApi)); + + g_xgboostApi->XGBoosterSetParam = (XGBoosterSetParam_Sym)dlsym(g_xgboost_handle, "XGBoosterSetParam"); + g_xgboostApi->XGDMatrixCreateFromMat = (XGDMatrixCreateFromMat_Sym)dlsym(g_xgboost_handle, "XGDMatrixCreateFromMat"); + g_xgboostApi->XGDMatrixSetFloatInfo = (XGDMatrixSetFloatInfo_Sym)dlsym(g_xgboost_handle, "XGDMatrixSetFloatInfo"); + g_xgboostApi->XGBoosterCreate = (XGBoosterCreate_Sym)dlsym(g_xgboost_handle, "XGBoosterCreate"); + g_xgboostApi->XGBoosterUnserializeFromBuffer = (XGBoosterUnserializeFromBuffer_Sym)dlsym(g_xgboost_handle, "XGBoosterUnserializeFromBuffer"); + g_xgboostApi->XGBoosterUpdateOneIter = (XGBoosterUpdateOneIter_Sym)dlsym(g_xgboost_handle, "XGBoosterUpdateOneIter"); + g_xgboostApi->XGBoosterEvalOneIter = (XGBoosterEvalOneIter_Sym)dlsym(g_xgboost_handle, "XGBoosterEvalOneIter"); + g_xgboostApi->XGBoosterSerializeToBuffer = (XGBoosterSerializeToBuffer_Sym)dlsym(g_xgboost_handle, "XGBoosterSerializeToBuffer"); + g_xgboostApi->XGDMatrixFree = (XGDMatrixFree_Sym)dlsym(g_xgboost_handle, "XGDMatrixFree"); + g_xgboostApi->XGBoosterFree = (XGBoosterFree_Sym)dlsym(g_xgboost_handle, "XGBoosterFree"); + g_xgboostApi->XGBoosterPredict = (XGBoosterPredict_Sym)dlsym(g_xgboost_handle, "XGBoosterPredict"); + g_xgboostApi->XGBGetLastError = (XGBGetLastError_Sym)dlsym(g_xgboost_handle, "XGBGetLastError"); + + if (g_xgboostApi->XGBoosterSetParam == NULL || g_xgboostApi->XGDMatrixCreateFromMat == NULL || + g_xgboostApi->XGDMatrixSetFloatInfo == NULL || g_xgboostApi->XGBoosterCreate == NULL || + g_xgboostApi->XGBoosterUnserializeFromBuffer == NULL || g_xgboostApi->XGBoosterUpdateOneIter == NULL || + g_xgboostApi->XGBoosterEvalOneIter == NULL || g_xgboostApi->XGBoosterSerializeToBuffer == NULL || + g_xgboostApi->XGDMatrixFree == NULL || g_xgboostApi->XGBoosterFree == NULL || + g_xgboostApi->XGBoosterPredict == NULL || g_xgboostApi->XGBGetLastError == NULL) { + (void)dlclose(g_xgboost_handle); + g_xgboost_handle = NULL; + LWLockRelease(XGBoostLibLock); + ereport(ERROR, (errcode(ERRCODE_FILE_READ_FAILED), + errmsg("Call dlsym to the symbol of load library file %s failed. error: %s", libPath->data, dlerror()))); + } + + LWLockRelease(XGBoostLibLock); + + /* clear any existing error */ + (void)dlerror(); + + pfree(libPath->data); +} + +/* + * This method is reposnsible for setting hyperparams for the XGBoost algorithm. + * For hyperparams which are not set, XGBoost takes default values. + */ +void set_hyperparams(AlgorithmAPI *alg, const HyperparamsXGBoost *xg_hyp, void *booster) +{ + StringInfoData buf; + initStringInfo(&buf); + + switch (alg->algorithm) { + case XG_BIN_LOGISTIC: + safe_xgboost(g_xgboostApi->XGBoosterSetParam(booster, "objective", "binary:logistic")); + break; + case XG_REG_LOGISTIC: + safe_xgboost(g_xgboostApi->XGBoosterSetParam(booster, "objective", "reg:logistic")); + break; + case XG_REG_SQE: + safe_xgboost(g_xgboostApi->XGBoosterSetParam(booster, "objective", "reg:squarederror")); + break; + case XG_REG_GAMMA: + safe_xgboost(g_xgboostApi->XGBoosterSetParam(booster, "objective", "reg:gamma")); + break; + default: + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Unknown XG Algorithm %d!", alg->algorithm))); + break; + } + + if (xg_hyp->booster) { + safe_xgboost(g_xgboostApi->XGBoosterSetParam(booster, "booster", xg_hyp->booster)); + } + if (xg_hyp->tree_method) { + safe_xgboost(g_xgboostApi->XGBoosterSetParam(booster, "tree_method", xg_hyp->tree_method)); + } + if (strcmp(xg_hyp->eval_metric, "") != 0) { + safe_xgboost(g_xgboostApi->XGBoosterSetParam(booster, "eval_metric", xg_hyp->eval_metric)); + } + if (xg_hyp->seed != 0) { + appendStringInfo(&buf, "%d", xg_hyp->seed); + safe_xgboost(g_xgboostApi->XGBoosterSetParam(booster, "seed", buf.data)); + resetStringInfo(&buf); + } + if (xg_hyp->verbosity != 1) { + appendStringInfo(&buf, "%d", xg_hyp->verbosity); + safe_xgboost(g_xgboostApi->XGBoosterSetParam(booster, "verbosity", buf.data)); + resetStringInfo(&buf); + } + + appendStringInfo(&buf, "%d", xg_hyp->nthread); + safe_xgboost(g_xgboostApi->XGBoosterSetParam(booster, "nthread", buf.data)); + resetStringInfo(&buf); + + appendStringInfo(&buf, "%d", xg_hyp->max_depth); + safe_xgboost(g_xgboostApi->XGBoosterSetParam(booster, "max_depth", buf.data)); + resetStringInfo(&buf); + + appendStringInfo(&buf, "%f", xg_hyp->gamma); + safe_xgboost(g_xgboostApi->XGBoosterSetParam(booster, "gamma", buf.data)); + resetStringInfo(&buf); + + appendStringInfo(&buf, "%f", xg_hyp->eta); + safe_xgboost(g_xgboostApi->XGBoosterSetParam(booster, "eta", buf.data)); + resetStringInfo(&buf); + + appendStringInfo(&buf, "%d", xg_hyp->min_child_weight); + safe_xgboost(g_xgboostApi->XGBoosterSetParam(booster, "min_child_wight", buf.data)); + resetStringInfo(&buf); + + pfree(buf.data); +} + +template +void setup_xg_chunk(xg_data_t &xg_data) +{ + check_hyper_bounds(sizeof(float), xg_data.lb_rows, "batch_size"); + + /* allocate the labels array */ + uint lb_size = sizeof(float) * xg_data.lb_rows; + if (alloc) { + xg_data.labels = (float*)palloc0(lb_size); + } else { + /* reset labels for the next iteration */ + errno_t rc = memset_s(xg_data.labels, lb_size, 0, lb_size); + securec_check(rc, "\0", "\0"); + } + + check_hyper_bounds(xg_data.ft_rows, xg_data.ft_cols, "batch_size"); + check_hyper_bounds(sizeof(float), xg_data.ft_rows * xg_data.ft_cols, "batch_size"); + + uint ft_size = sizeof(float) * xg_data.ft_rows * xg_data.ft_cols; + if (alloc) { + xg_data.features = (float*)palloc0(ft_size); + } else { + /* reset features for next iteration */ + errno_t rc = memset_s(xg_data.features, ft_size, 0, ft_size); + securec_check(rc, "\0", "\0"); + } +} + +/* + * this function initializes the algorithm + */ +static TrainModelState *xgboost_create(AlgorithmAPI *self, const TrainModel *pnode) +{ + if (pnode->configurations != 1) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("multiple hyper-parameter configurations for xgboost are not yet supported"))); + + auto xg_state = reinterpret_cast(makeNodeWithSize(TrainModelState, sizeof(XGBoostState))); + xg_state->done = false; + return &xg_state->tms; +} + +/* ---------------------------------------------------------------- + * XBoost train operation. + * + * Node that the allocated chunk size may differ from the number of + * actual tuples, i.e., number of tuples can be less in the last + * chunk. + * ---------------------------------------------------------------- + */ +void trainXG(AlgorithmAPI *alg, const HyperparamsXGBoost *xg_hyp, xg_data_t *chunk, const int n_tuples, + bool first_call = true) +{ + /* XGoost DMatrix handle */ + DMatrixHandle dtrain, dtest; + + /* load DTrain matrix */ + safe_xgboost(g_xgboostApi->XGDMatrixCreateFromMat((float *)chunk->features, // input data + n_tuples, // # rows + chunk->ft_cols, // # columns in the input + -1, // filler for missing values + &dtrain)); // handle of the DMatrix + + /* load DTest matrix */ + safe_xgboost(g_xgboostApi->XGDMatrixCreateFromMat((float *)chunk->features, n_tuples, chunk->ft_cols, -1, &dtest)); + + /* load the labels */ + safe_xgboost(g_xgboostApi->XGDMatrixSetFloatInfo(dtrain, "label", chunk->labels, n_tuples)); + safe_xgboost(g_xgboostApi->XGDMatrixSetFloatInfo(dtest, "label", chunk->labels, n_tuples)); + + DMatrixHandle eval_dmats[2] = {dtrain, dtest}; + + /* create the booster and load the desired parameters */ + BoosterHandle booster; + safe_xgboost(g_xgboostApi->XGBoosterCreate(eval_dmats, 2, &booster)); + + if (!first_call) { + safe_xgboost(g_xgboostApi->XGBoosterUnserializeFromBuffer(booster, chunk->raw_model, chunk->raw_model_size)); + } else { + set_hyperparams(alg, xg_hyp, booster); + } + + /* evaluation structures */ + const char* eval_names[2] = {"train", "test"}; + const char* eval_result = nullptr; + + for (uint32_t iter = 0; iter < xg_hyp->n_iterations; ++iter) { + safe_xgboost(g_xgboostApi->XGBoosterUpdateOneIter(booster, iter, dtrain)); + safe_xgboost(g_xgboostApi->XGBoosterEvalOneIter(booster, iter, eval_dmats, eval_names, 2, &eval_result)); + } + + /* get evaluation results */ + chunk->validation_score = parseDoubleFromErrMetric(eval_result); + uint64_t raw_model_len; + char *raw_model; + safe_xgboost(g_xgboostApi->XGBoosterSerializeToBuffer(booster, &raw_model_len, (const char **)&raw_model)); + chunk->set_raw_model(raw_model, raw_model_len); + /* free xgboost structures */ + safe_xgboost(g_xgboostApi->XGDMatrixFree(dtrain)); + safe_xgboost(g_xgboostApi->XGDMatrixFree(dtest)); + safe_xgboost(g_xgboostApi->XGBoosterFree(booster)); +} + +static void check_label(AlgorithmAPI *self, HyperparamsXGBoost *xg_hyperp, float label) +{ + if ((self->algorithm == XG_BIN_LOGISTIC || self->algorithm == XG_REG_LOGISTIC) || + (strcmp(xg_hyperp->eval_metric, "auc") == 0 || strcmp(xg_hyperp->eval_metric, "aucpr") == 0)) { + if (label == 0 || label == 1) { + return; + } + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Label must be 0 or 1 for logistic regression."))); + } else if (self->algorithm == XG_REG_GAMMA && label < 0) { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Label must be nonnegative for gamma."))); + } +} + +static void check_data_cnt(uint32_t tuple_count, uint32_t pos_cnt, HyperparamsXGBoost *xg_hyperp) +{ + // only have positive numbers or negative numbers + if ((strcmp(xg_hyperp->eval_metric, "auc") == 0 || strcmp(xg_hyperp->eval_metric, "aucpr") == 0) && + (pos_cnt == 0 || pos_cnt == tuple_count)) { + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("The dataset only contains pos or neg samples for auc or aucpr"))); + } +} + +void xgboost_serialize(SerializedModel *data, xg_data_t *chunk_ptr) +{ + data->size = chunk_ptr->raw_model_size + sizeof(XgboostModelV01); + XgboostModelV01 *mdata = (XgboostModelV01 *)palloc0(data->size); + + data->raw_data = mdata; + + mdata->ft_cols = chunk_ptr->ft_cols; + int8_t *ptr = (int8_t *)(mdata + 1); + int avail = data->size - sizeof(XgboostModelV01); + + int rc = memcpy_s(ptr, avail, chunk_ptr->raw_model, chunk_ptr->raw_model_size); + securec_check_ss(rc, "\0", "\0"); +} +static void xgboost_run(AlgorithmAPI *self, TrainModelState *pstate, Model **models) +{ + Assert(pstate->finished == 0); + + clock_gettime(CLOCK_MONOTONIC, &exec_start_time); + auto xg_state = reinterpret_cast(pstate); + auto xg_hyperp = const_cast( + reinterpret_cast(pstate->config->hyperparameters[0])); + + ModelTuple const *outer_tuple_slot = nullptr; + + // Check max_depth parameter + if (xg_hyperp->max_depth == 0 && (0 != strcmp(xgboost_boost_str[BOOST_GBLINEAR_IDX], xg_hyperp->booster))) { + ereport(ERROR, (errmodule(MOD_DB4AI), + errmsg("Max_depth must be larger than 0 when booster is non_linear value."))); + } + + load_xgboost_library(); + + // data holder for in-between (chunk-wise) invocation of XGBoost training algorithm + xg_data_t chunk; + chunk.lb_rows = chunk.ft_rows = xg_hyperp->batch_size; + chunk.ft_cols = pstate->tuple.ncolumns - 1; // minus 1 for labels! + setup_xg_chunk(chunk); + + uint32_t pos_cnt = 0; // number of positive numbers + uint32_t tuple_count = 0, batch_count = 1; + while (true) { + /* retrieve tuples from the outer plan until there are no more */ + outer_tuple_slot = pstate->fetch(pstate->callback_data, &pstate->tuple) + ? &pstate->tuple : nullptr; + /* if no more tuples in the pipeline exit the loop */ + if (outer_tuple_slot == nullptr) { + break; + } + + /* skip null rows */ + if (outer_tuple_slot->isnull[0]) { + continue; + } + + /* first attribute `0' always corresponds to the label (i.e. target) */ + float label = + datum_get_float8(outer_tuple_slot->typid[XG_TARGET_COLUMN], outer_tuple_slot->values[XG_TARGET_COLUMN]); + chunk.labels[tuple_count] = label; + check_label(self, xg_hyperp, label); + + bool col_is_null = false; + for (int j = 1; j < pstate->tuple.ncolumns; ++j) { + if (outer_tuple_slot->isnull[j]) { + col_is_null = true; + break; + } + *(chunk.features + tuple_count * chunk.ft_cols + j - 1) = + datum_get_float8(outer_tuple_slot->typid[j], outer_tuple_slot->values[j]); + } + if (col_is_null) { + continue; + } + if (label > 0) { + pos_cnt += 1; + } + ++tuple_count; + ++xg_state->processed_tuples; + + /* train this batch */ + if (tuple_count == xg_hyperp->batch_size) { + check_data_cnt(tuple_count, pos_cnt, xg_hyperp); + trainXG(self, xg_hyperp, &chunk, tuple_count, (batch_count == 1)); + setup_xg_chunk(chunk); + tuple_count = 0; + pos_cnt = 0; + ++batch_count; + } + } + + /* process any remaining tuples */ + if (tuple_count > 0) { + check_data_cnt(tuple_count, pos_cnt, xg_hyperp); + trainXG(self, xg_hyperp, &chunk, tuple_count, (batch_count == 1)); + } + + /* record the execution time of this method */ + clock_gettime(CLOCK_MONOTONIC, &exec_end_time); + + /* processing stats */ + xg_state->done = true; + xg_state->execution_time = interval_to_sec(time_diff(&exec_end_time, &exec_start_time)); + if (xg_state->processed_tuples == 0) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_NO_DATA_FOUND), + errmsg("Training data is empty, please check the input data."))); + + // number of configurations already run + ++pstate->finished; + // store the model + Model* model = models[0]; + MemoryContext oldcxt = MemoryContextSwitchTo(model->memory_context); + + model->data.version = DB4AI_MODEL_V01; + + xgboost_serialize(&model->data, &chunk); + + model->exec_time_secs = xg_state->execution_time; + model->processed_tuples = xg_state->processed_tuples; + model->num_actual_iterations = xg_hyperp->n_iterations; + model->return_type = FLOAT8OID; + // store the score + TrainingScore* pscore = (TrainingScore*)palloc0(sizeof(TrainingScore)); + pscore->name = xg_hyperp->eval_metric; + pscore->value = chunk.validation_score; + model->scores = lappend(model->scores, pscore); + model->status = ERRCODE_SUCCESSFUL_COMPLETION; + MemoryContextSwitchTo(oldcxt); +} + +static void xgboost_end(AlgorithmAPI *self, TrainModelState *pstate) +{ + auto xg_state_node = reinterpret_cast(pstate); + if (xg_state_node->oids != nullptr) + pfree(xg_state_node->oids); +} + + +// --------------------------------------------------------------------------------------------------- +// Prediction part +// --------------------------------------------------------------------------------------------------- +void print1DFeature(float *arr, const int cols) +{ + StringInfoData buf; + initStringInfo(&buf); + + for (int i = 0; i < cols; ++i) { + appendStringInfo(&buf, "%4.2f\t", arr[i]); + } + + appendStringInfoChar(&buf, '\n'); + elog(NOTICE, "%s", buf.data); + + pfree(buf.data); +} + +void xgboost_deserialize(SerializedModel *xg_model, SerializedModelXgboost *xgboostm) +{ + int avail = xg_model->size; + if (xg_model->version == DB4AI_MODEL_V01) { + if (avail < (int)sizeof(XgboostModelV01)) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_STATUS), + errmsg("Model data corrupted reading header"))); + XgboostModelV01 *mdata = (XgboostModelV01 *)xg_model->raw_data; + xgboostm->ft_cols = mdata->ft_cols; + avail -= (int)sizeof(XgboostModelV01); + + void *placeholder = palloc0(avail); + uint8_t *ptr = (uint8_t *)(mdata + 1); + int rc = memcpy_s(placeholder, avail, ptr, avail); + securec_check(rc, "\0", "\0"); + + xg_model->raw_data = placeholder; + xg_model->size = avail; + } +} +ModelPredictor xgboost_predict_prepare(AlgorithmAPI *, SerializedModel const *model, Oid return_type) +{ + if (unlikely(!model)) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Xgboost predict prepare: model cannot be null"))); + + load_xgboost_library(); + + auto xg_model = const_cast(model); + SerializedModelXgboost *xgboostm = (SerializedModelXgboost *)palloc0(sizeof(SerializedModelXgboost)); + xgboost_deserialize(xg_model, xgboostm); + /* init XGBoost predictor */ + safe_xgboost(g_xgboostApi->XGBoosterCreate(nullptr, 0, &xgboostm->booster)); + /* load the decoded model */ + safe_xgboost(g_xgboostApi->XGBoosterUnserializeFromBuffer(xgboostm->booster, xg_model->raw_data, xg_model->size)); + + return reinterpret_cast(xgboostm); +} + +Datum xgboost_predict(AlgorithmAPI *, ModelPredictor model, Datum *values, bool *isnull, Oid *types, int ncolumns) +{ + SerializedModelXgboost *xgboostm = (SerializedModelXgboost *)model; + /* sanity checks */ + Assert(xgboostm->booster != nullptr); + if (ncolumns != xgboostm->ft_cols) + ereport(ERROR, (errmodule(MOD_DB4AI), + errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Invalid number of features for prediction, provided %d, expected %d", + ncolumns, xgboostm->ft_cols))); + + load_xgboost_library(); + + float features[ncolumns]; + for (int col = 0; col < ncolumns; ++col) + features[col] = isnull[col] ? 0.0 : datum_get_float8(types[col], values[col]); + + DMatrixHandle dmat; + /* convert to DMatrix */ + safe_xgboost(g_xgboostApi->XGDMatrixCreateFromMat((float *) features, 1, ncolumns, -1, &dmat)); + + bst_ulong out_len; + const float *out_result; + safe_xgboost(g_xgboostApi->XGBoosterPredict(xgboostm->booster, dmat, 0, 0, 0, &out_len, &out_result)); + + /* currently predict tuple-at-the-time */ + double prediction = out_result[0]; + + /* release memory of xgboost dmatrix structure */ + safe_xgboost(g_xgboostApi->XGDMatrixFree(dmat)); + + return Float8GetDatum(prediction); +} + +/* + * used in EXPLAIN MODEL + */ +List *xgboost_explain(AlgorithmAPI *self, SerializedModel const *model, Oid return_type) +{ + if (unlikely(!model)) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("xgboost explain: model cannot be null"))); + + if (unlikely(model->version != DB4AI_MODEL_V01)) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("xgboost explain: currently only model V01 is supported"))); + + List *model_info = nullptr; + + auto xg_model = const_cast(model); + + TrainingInfo *info = (TrainingInfo *)palloc0(sizeof(TrainingInfo)); + info->value = Int32GetDatum(xg_model->size); + info->type = INT4OID; + info->name = "model size"; + model_info = lappend(model_info, info); + + return model_info; +} + +XGBoost xg_reg_logistic = { + // AlgorithmAPI + { + XG_REG_LOGISTIC, + "xgboost_regression_logistic", + ALGORITHM_ML_DEFAULT, + xgboost_metrics_accuracy, + xgboost_get_hyperparameters, + xgboost_make_hyperparameters, + nullptr, + xgboost_create, + xgboost_run, + xgboost_end, + xgboost_predict_prepare, + xgboost_predict, + xgboost_explain + }, +}; + +XGBoost xg_bin_logistic = { + // AlgorithmAPI + { + XG_BIN_LOGISTIC, + "xgboost_binary_logistic", + ALGORITHM_ML_DEFAULT, + xgboost_metrics_accuracy, + xgboost_get_hyperparameters, + xgboost_make_hyperparameters, + nullptr, + xgboost_create, + xgboost_run, + xgboost_end, + xgboost_predict_prepare, + xgboost_predict, + xgboost_explain + }, +}; + +XGBoost xg_reg_sqe = { + // AlgorithmAPI + { + XG_REG_SQE, + "xgboost_regression_squarederror", + ALGORITHM_ML_DEFAULT, + xgboost_metrics_accuracy, + xgboost_get_hyperparameters, + xgboost_make_hyperparameters, + nullptr, + xgboost_create, + xgboost_run, + xgboost_end, + xgboost_predict_prepare, + xgboost_predict, + xgboost_explain + }, +}; + +XGBoost xg_reg_gamma = { + // AlgorithmAPI + { + XG_REG_GAMMA, + "xgboost_regression_gamma", + ALGORITHM_ML_DEFAULT, + xgboost_metrics_accuracy, + xgboost_get_hyperparameters, + xgboost_make_hyperparameters, + nullptr, + xgboost_create, + xgboost_run, + xgboost_end, + xgboost_predict_prepare, + xgboost_predict, + xgboost_explain + }, +}; diff --git a/src/gausskernel/dbmind/db4ai/snapshots/create.sql b/src/gausskernel/dbmind/db4ai/snapshots/create.sql index e8b3b283f..433e14f05 100644 --- a/src/gausskernel/dbmind/db4ai/snapshots/create.sql +++ b/src/gausskernel/dbmind/db4ai/snapshots/create.sql @@ -44,7 +44,7 @@ BEGIN GET STACKED DIAGNOSTICS e_stack_act = PG_EXCEPTION_CONTEXT; IF CURRENT_SCHEMA = 'db4ai' THEN - e_stack_act := replace(e_stack_act, 'ion cre', 'ion db4ai.cre'); + e_stack_act := pg_catalog.replace(e_stack_act, 'ion cre', 'ion db4ai.cre'); END IF; IF e_stack_act NOT LIKE E'referenced column: create_snapshot_internal\n' @@ -56,7 +56,7 @@ BEGIN END IF; END; - IF length(i_commands[3]) > 0 THEN + IF pg_catalog.length(i_commands[3]) > 0 THEN <> DECLARE pattern TEXT; -- current user column name @@ -69,13 +69,13 @@ BEGIN BEGIN -- extract mapping from projection list for view definition - mapping := array(SELECT unnest(ARRAY[ m[1], coalesce(m[2], replace(m[3],'""','"'))]) FROM regexp_matches( + mapping := array(SELECT pg_catalog.unnest(ARRAY[ m[1], coalesce(m[2], replace(m[3],'""','"'))]) FROM pg_catalog.regexp_matches( i_commands[5], 't[0-9]+\.(f[0-9]+) AS (?:([^\s",]+)|"((?:[^"]*"")*[^"]*)")', 'g') m); -- extract field list from DISTRIBUTE BY clause - tokens :=(regexp_matches(i_commands[3], '^\s*DISTRIBUTE\s+BY\s+HASH\s*\((.*)\)\s*$', 'i'))[1]; + tokens :=(pg_catalog.regexp_matches(i_commands[3], '^\s*DISTRIBUTE\s+BY\s+HASH\s*\((.*)\)\s*$', 'i'))[1]; IF tokens IS NULL OR tokens SIMILAR TO '\s*' THEN - tokens := (regexp_matches(i_commands[3], '^\s*DISTRIBUTE\s+BY\s+REPLICATION\s*$', 'i'))[1]; + tokens := (pg_catalog.regexp_matches(i_commands[3], '^\s*DISTRIBUTE\s+BY\s+REPLICATION\s*$', 'i'))[1]; IF tokens IS NULL OR tokens SIMILAR TO '\s*' THEN RAISE EXCEPTION 'cannot match DISTRIBUTE BY clause' USING HINT = 'currently only DISTRIBUTE BY REPLICATION and DISTRIBUTE BY HASH(column_name [, ...]) supported'; @@ -95,12 +95,12 @@ BEGIN LOOP idx := idx + 1; - cur_ch := substr(tokens, idx, 1); + cur_ch := pg_catalog.substr(tokens, idx, 1); EXIT WHEN cur_ch IS NULL OR cur_ch = ''; CASE cur_ch WHEN '"' THEN - IF quoted AND substr(tokens, idx + 1, 1) = '"' THEN + IF quoted AND pg_catalog.substr(tokens, idx + 1, 1) = '"' THEN pattern := pattern || '"'; idx := idx + 1; ELSE @@ -111,29 +111,29 @@ BEGIN END IF; WHEN ',' THEN IF quoted THEN - pattern := pattern || cur_ch; + pattern := pattern || cur_ch::TEXT; CONTINUE; - ELSIF pattern IS NULL OR length(pattern) = 0 THEN + ELSIF pattern IS NULL OR pg_catalog.length(pattern) = 0 THEN pattern := ','; ELSE idx := idx - 1; -- reset on comma for next loop END IF; WHEN ' ', E'\n', E'\t' THEN IF quoted THEN - pattern := pattern || cur_ch; + pattern := pattern || cur_ch::TEXT; CONTINUE; - ELSIF pattern IS NULL OR length(pattern) = 0 THEN + ELSIF pattern IS NULL OR pg_catalog.length(pattern) = 0 THEN CONTINUE; END IF; ELSE - pattern := pattern || CASE WHEN quoted THEN cur_ch ELSE lower(cur_ch) END; + pattern := pattern || CASE WHEN quoted THEN cur_ch::TEXT ELSE pg_catalog.lower(cur_ch)::TEXT END; CONTINUE; END CASE; -- END tokenizer code for testing -- attempt to map the pattern - FOR idx IN 2 .. array_length(mapping, 1) BY 2 LOOP + FOR idx IN 2 .. pg_catalog.array_length(mapping, 1) BY 2 LOOP IF pattern = mapping[idx] THEN -- apply the mapping dist_cmd := dist_cmd || mapping[idx-1] || ','; @@ -151,24 +151,24 @@ BEGIN IF quoted THEN RAISE EXCEPTION 'unterminated quoted identifier ''%'' at or near: ''%''', - substr(pattern, 1, char_length(pattern)-1), i_commands[3]; + pg_catalog.substr(pattern, 1, pg_catalog.char_length(pattern)-1), i_commands[3]; END IF; - dist_cmd := rtrim(dist_cmd, ',') || ')'; + dist_cmd := pg_catalog.rtrim(dist_cmd, ',') || ')'; END; END IF; dist_cmd := ''; -- we silently drop DISTRIBUTE_BY - EXECUTE 'CREATE TABLE db4ai.t' || s_id || ' WITH (orientation = column, compression = low)' || dist_cmd - || ' AS SELECT ' || i_commands[4] || ' FROM _db4ai_tmp_x' || s_id; - EXECUTE 'COMMENT ON TABLE db4ai.t' || s_id || ' IS ''snapshot backing table, root is ' || quote_ident(i_schema) - || '.' || quote_ident(i_name) || ''''; - EXECUTE 'CREATE VIEW db4ai.v' || s_id || ' WITH(security_barrier) AS SELECT ' || i_commands[5] || ', xc_node_id, ctid FROM db4ai.t' || s_id; - EXECUTE 'COMMENT ON VIEW db4ai.v' || s_id || ' IS ''snapshot ' || quote_ident(i_schema) || '.' || quote_ident(i_name) - || ' backed by db4ai.t' || s_id || CASE WHEN length(i_comment) > 0 THEN ' comment is "' || i_comment || '"' ELSE '' END || ''''; - EXECUTE 'GRANT SELECT ON db4ai.v' || s_id || ' TO ' || i_owner || ' WITH GRANT OPTION'; - EXECUTE 'SELECT COUNT(*) FROM db4ai.v' || s_id INTO STRICT row_count; + EXECUTE 'CREATE TABLE db4ai.t' || s_id::TEXT || ' WITH (orientation = column, compression = low)' || dist_cmd + || ' AS SELECT ' || i_commands[4] || ' FROM _db4ai_tmp_x' || s_id::TEXT; + EXECUTE 'COMMENT ON TABLE db4ai.t' || s_id::TEXT || ' IS ''snapshot backing table, root is ' || pg_catalog.quote_ident(i_schema) + || '.' || pg_catalog.quote_ident(i_name) || ''''; + EXECUTE 'CREATE VIEW db4ai.v' || s_id::TEXT || ' WITH(security_barrier) AS SELECT ' || i_commands[5] || ', xc_node_id, ctid FROM db4ai.t' || s_id::TEXT; + EXECUTE 'COMMENT ON VIEW db4ai.v' || s_id::TEXT || ' IS ''snapshot ' || pg_catalog.quote_ident(i_schema) || '.' || pg_catalog.quote_ident(i_name) + || ' backed by db4ai.t' || s_id::TEXT || CASE WHEN pg_catalog.length(i_comment) > 0 THEN ' comment is "' || i_comment || '"' ELSE '' END || ''''; + EXECUTE 'GRANT SELECT ON db4ai.v' || s_id::TEXT || ' TO "' || i_owner || '" WITH GRANT OPTION'; + EXECUTE 'SELECT COUNT(*) FROM db4ai.v' || s_id::TEXT INTO STRICT row_count; -- store only original commands supplied by user i_commands := ARRAY[i_commands[1], i_commands[2], i_commands[3]]; @@ -206,14 +206,14 @@ BEGIN -- obtain active message level BEGIN - EXECUTE 'SET LOCAL client_min_messages TO ' || current_setting('db4ai.message_level'); - RAISE INFO 'effective client_min_messages is ''%''', upper(current_setting('db4ai.message_level')); + EXECUTE 'SET LOCAL client_min_messages TO ' || pg_catalog.current_setting('db4ai.message_level'); + RAISE INFO 'effective client_min_messages is ''%''', pg_catalog.upper(pg_catalog.current_setting('db4ai.message_level')); EXCEPTION WHEN OTHERS THEN END; -- obtain database state of separation of rights BEGIN - separation_of_powers := upper(current_setting('enableSeparationOfDuty')); + separation_of_powers := pg_catalog.upper(pg_catalog.current_setting('enableSeparationOfDuty')); EXCEPTION WHEN OTHERS THEN separation_of_powers := 'OFF'; END; @@ -226,7 +226,7 @@ BEGIN -- obtain active snapshot mode BEGIN - s_mode := upper(current_setting('db4ai_snapshot_mode')); + s_mode := pg_catalog.upper(pg_catalog.current_setting('db4ai_snapshot_mode')); EXCEPTION WHEN OTHERS THEN s_mode := 'MSS'; END; @@ -237,12 +237,12 @@ BEGIN -- obtain relevant configuration parameters BEGIN - s_vers_del := current_setting('db4ai_snapshot_version_delimiter'); + s_vers_del := pg_catalog.current_setting('db4ai_snapshot_version_delimiter'); EXCEPTION WHEN OTHERS THEN s_vers_del := '@'; END; BEGIN - s_vers_sep := current_setting('db4ai_snapshot_version_separator'); + s_vers_sep := pg_catalog.current_setting('db4ai_snapshot_version_separator'); EXCEPTION WHEN OTHERS THEN s_vers_sep := '.'; END; @@ -254,11 +254,11 @@ BEGIN IF i_name IS NULL OR i_name = '' THEN RAISE EXCEPTION 'i_name cannot be NULL or empty'; - ELSIF strpos(i_name, s_vers_del) > 0 THEN + ELSIF pg_catalog.strpos(i_name, s_vers_del) > 0 THEN RAISE EXCEPTION 'i_name must not contain ''%'' characters', s_vers_del; END IF; - current_compatibility_mode := current_setting('sql_compatibility'); + current_compatibility_mode := pg_catalog.current_setting('sql_compatibility'); IF current_compatibility_mode = 'ORA' OR current_compatibility_mode = 'A' THEN none_represent := 0; ELSE @@ -267,7 +267,7 @@ BEGIN -- PG BUG: array_ndims('{}') or array_dims(ARRAY[]::INT[]) returns NULL - IF i_commands IS NULL OR array_length(i_commands, 1) = none_represent OR array_length(i_commands, 2) <> none_represent THEN + IF i_commands IS NULL OR pg_catalog.array_length(i_commands, 1) = none_represent OR pg_catalog.array_length(i_commands, 2) <> none_represent THEN RAISE EXCEPTION 'i_commands array malformed' USING HINT = 'pass SQL commands as TEXT[] literal, e.g. ''{SELECT *, FROM public.t, DISTRIBUTE BY HASH(id)'''; END IF; @@ -279,9 +279,9 @@ BEGIN END LOOP; FOREACH command_str IN ARRAY i_commands LOOP - command_str := btrim(command_str); - pattern := upper(regexp_replace(left(command_str, 30), '\s+', ' ', 'g')); - IF left(pattern, 7) = 'SELECT ' THEN + command_str := pg_catalog.btrim(command_str); + pattern := pg_catalog.upper(pg_catalog.regexp_replace(pg_catalog.left(command_str, 30), '\s+', ' ', 'g')); + IF pg_catalog.left(pattern, 7) = 'SELECT ' THEN IF proj_cmd IS NULL THEN proj_cmd := command_str; @@ -300,12 +300,12 @@ BEGIN LOOP idx := idx + 1; - cur_ch := substr(stmt, idx, 1); + cur_ch := pg_catalog.substr(stmt, idx, 1); EXIT WHEN cur_ch IS NULL OR cur_ch = ''; CASE cur_ch WHEN '"' THEN - IF quoted AND substr(stmt, idx + 1, 1) = '"' THEN + IF quoted AND pg_catalog.substr(stmt, idx + 1, 1) = '"' THEN idx := idx + 1; ELSE quoted := NOT quoted; @@ -325,7 +325,7 @@ BEGIN WHEN ' ' THEN IF quoted OR nested > 0 THEN CONTINUE; - ELSIF pattern IS NULL OR length(pattern) = 0 THEN + ELSIF pattern IS NULL OR pg_catalog.length(pattern) = 0 THEN start_pos := idx; CONTINUE; END IF; @@ -333,15 +333,15 @@ BEGIN RAISE EXCEPTION 'syntax error at or near '';'' in ''%'' at position ''%''', stmt, idx; CONTINUE; ELSE - pattern := pattern || upper(cur_ch); + pattern := pattern || pg_catalog.upper(cur_ch); CONTINUE; END CASE; -- END splitter code for testing IF pattern = 'FROM' THEN - from_cmd := substr(stmt, start_pos + 1); - proj_cmd := left(stmt, start_pos - 1); + from_cmd := pg_catalog.substr(stmt, start_pos + 1); + proj_cmd := pg_catalog.left(stmt, start_pos - 1); stmt := from_cmd; nested := 0; quoted := FALSE; @@ -352,8 +352,8 @@ BEGIN RAISE NOTICE E'SELECT SPLITTING2\n%\n%\n%', stmt, proj_cmd, from_cmd; CONTINUE; ELSIF pattern = 'DISTRIBUTEBY' THEN - dist_cmd := substr(stmt, start_pos + 1); - from_cmd := left(stmt, start_pos - 1); + dist_cmd := pg_catalog.substr(stmt, start_pos + 1); + from_cmd := pg_catalog.left(stmt, start_pos - 1); RAISE NOTICE E'SELECT SPLITTING3\n%\n%\n%\n%', stmt, proj_cmd, from_cmd, dist_cmd; EXIT; END IF; @@ -364,13 +364,13 @@ BEGIN ELSE RAISE EXCEPTION 'multiple SELECT clauses in i_commands: ''%'' ''%''', proj_cmd, command_str; END IF; - ELSIF left(pattern, 5) = 'FROM ' THEN + ELSIF pg_catalog.left(pattern, 5) = 'FROM ' THEN IF from_cmd IS NULL THEN from_cmd := command_str; ELSE RAISE EXCEPTION 'multiple FROM clauses in i_commands: ''%'' ''%''', from_cmd, command_str; END IF; - ELSIF left(pattern, 14) = 'DISTRIBUTE BY ' THEN + ELSIF pg_catalog.left(pattern, 14) = 'DISTRIBUTE BY ' THEN IF dist_cmd IS NULL THEN dist_cmd := command_str; ELSE @@ -389,7 +389,7 @@ BEGIN -- supply default projection proj_cmd := 'SELECT *'; ELSE - IF from_cmd IS NULL AND strpos(upper(proj_cmd), 'FROM ') = 0 THEN + IF from_cmd IS NULL AND pg_catalog.strpos(pg_catalog.upper(proj_cmd), 'FROM ') = 0 THEN RAISE EXCEPTION 'FROM clause is missing in i_commands'; END IF; END IF; @@ -401,27 +401,27 @@ BEGIN IF i_vers IS NULL OR i_vers = '' THEN i_vers := s_vers_del || '1' || s_vers_sep || '0' || s_vers_sep || '0'; ELSE - i_vers := replace(i_vers, chr(2), s_vers_sep); + i_vers := pg_catalog.replace(i_vers, pg_catalog.chr(2), s_vers_sep); IF LEFT(i_vers, 1) <> s_vers_del THEN i_vers := s_vers_del || i_vers; - ELSIF char_length(i_vers ) < 2 THEN + ELSIF pg_catalog.char_length(i_vers ) < 2 THEN RAISE EXCEPTION 'illegal i_vers: ''%''', s_vers_del; END IF; - IF strpos(substr(i_vers, 2), s_vers_del) > 0 THEN + IF pg_catalog.strpos(pg_catalog.substr(i_vers, 2), s_vers_del) > 0 THEN RAISE EXCEPTION 'i_vers may contain only one single, leading ''%'' character', s_vers_del USING HINT = 'specify snapshot version as [' || s_vers_del || ']x' || s_vers_sep || 'y' || s_vers_sep || 'z or [' || s_vers_del || ']label with optional, leading ''' || s_vers_del || ''''; END IF; END IF; - IF char_length(i_name || i_vers) > 63 THEN + IF pg_catalog.char_length(i_name || i_vers) > 63 THEN RAISE EXCEPTION 'snapshot name too long: ''%''', i_name || i_vers; ELSE i_name := i_name || i_vers; END IF; -- the final name of the snapshot - qual_name := quote_ident(i_schema) || '.' || quote_ident(i_name); + qual_name := pg_catalog.quote_ident(i_schema) || '.' || pg_catalog.quote_ident(i_name); -- check for duplicate snapshot IF 0 < (SELECT COUNT(*) FROM db4ai.snapshot WHERE schema = i_schema AND name = i_name) THEN @@ -429,34 +429,34 @@ BEGIN END IF; --SELECT nextval('db4ai.snapshot_sequence') INTO STRICT s_id; - SELECT COALESCE(MAX(id)+1,0) FROM db4ai.snapshot INTO STRICT s_id; -- openGauss BUG: cannot create sequences in initdb + SELECT COALESCE(pg_catalog.MAX(id)+1,0) FROM db4ai.snapshot INTO STRICT s_id; -- openGauss BUG: cannot create sequences in initdb -- execute using current user privileges DECLARE e_message TEXT; -- exception message BEGIN - EXECUTE 'CREATE TEMPORARY TABLE _db4ai_tmp_x' || s_id || ' AS ' || proj_cmd + EXECUTE 'CREATE TEMPORARY TABLE _db4ai_tmp_x' || s_id::TEXT || ' AS ' || proj_cmd || CASE WHEN from_cmd IS NULL THEN '' ELSE ' ' || from_cmd END; EXCEPTION WHEN undefined_table THEN GET STACKED DIAGNOSTICS e_message = MESSAGE_TEXT; -- during function invocation, search path is redirected to {pg_temp, pg_catalog, function_schema} and becomes immutable RAISE INFO 'could not resolve relation % using system-defined "search_path" setting during function invocation: ''%''', - substr(e_message, 10, 1 + strpos(substr(e_message,11), '" does not exist')), - array_to_string(current_schemas(TRUE),', ') + pg_catalog.substr(e_message, 10, 1 + pg_catalog.strpos(pg_catalog.substr(e_message,11), '" does not exist')), + pg_catalog.array_to_string(pg_catalog.current_schemas(TRUE),', ') USING HINT = 'snapshots require schema-qualified table references, e.g. schema_name.table_name'; RAISE; END; -- extract normalized projection list i_commands := ARRAY[proj_cmd, from_cmd, dist_cmd, '', '']; - SELECT string_agg(ident, ', '), - string_agg(ident || ' AS f' || ordinal_position, ', '), - string_agg('t' || s_id || '.f' || ordinal_position || ' AS ' || ident, ', ') - FROM ( SELECT ordinal_position, quote_ident(column_name) AS ident + SELECT pg_catalog.string_agg(ident, ', '), + pg_catalog.string_agg(ident || ' AS f' || ordinal_position::TEXT, ', '), + pg_catalog.string_agg('t' || s_id::TEXT || '.f' || ordinal_position::TEXT || ' AS ' || ident, ', ') + FROM ( SELECT ordinal_position, pg_catalog.quote_ident(column_name) AS ident FROM information_schema.columns - WHERE table_schema = (SELECT nspname FROM pg_namespace WHERE oid=pg_my_temp_schema()) - AND table_name = '_db4ai_tmp_x' || s_id + WHERE table_schema = (SELECT nspname FROM pg_namespace WHERE oid=pg_catalog.pg_my_temp_schema()) + AND table_name = '_db4ai_tmp_x' || s_id::TEXT ORDER BY ordinal_position ) INTO STRICT proj_cmd, i_commands[4], i_commands[5]; IF proj_cmd IS NULL THEN @@ -467,13 +467,13 @@ BEGIN PERFORM db4ai.create_snapshot_internal(s_id, i_schema, i_name, i_commands, i_comment, CURRENT_USER); -- drop temporary view used for privilege transfer - EXECUTE 'DROP TABLE _db4ai_tmp_x' || s_id; + EXECUTE 'DROP TABLE _db4ai_tmp_x' || s_id::TEXT; -- create custom view, owned by current user - EXECUTE 'CREATE VIEW ' || qual_name || ' WITH(security_barrier) AS SELECT ' || proj_cmd || ' FROM db4ai.v' || s_id; - EXECUTE 'COMMENT ON VIEW ' || qual_name || ' IS ''snapshot view backed by db4ai.v' || s_id - || CASE WHEN length(i_comment) > 0 THEN ' comment is "' || i_comment || '"' ELSE '' END || ''''; - EXECUTE 'ALTER VIEW ' || qual_name || ' OWNER TO ' || CURRENT_USER; + EXECUTE 'CREATE VIEW ' || qual_name || ' WITH(security_barrier) AS SELECT ' || proj_cmd || ' FROM db4ai.v' || s_id::TEXT; + EXECUTE 'COMMENT ON VIEW ' || qual_name || ' IS ''snapshot view backed by db4ai.v' || s_id::TEXT + || CASE WHEN pg_catalog.length(i_comment) > 0 THEN ' comment is "' || i_comment || '"' ELSE '' END || ''''; + EXECUTE 'ALTER VIEW ' || qual_name || ' OWNER TO "' || CURRENT_USER || '"'; -- return final snapshot name res := ROW(i_schema, i_name); diff --git a/src/gausskernel/dbmind/db4ai/snapshots/prepare.sql b/src/gausskernel/dbmind/db4ai/snapshots/prepare.sql index 04541aba0..5f3040cdb 100644 --- a/src/gausskernel/dbmind/db4ai/snapshots/prepare.sql +++ b/src/gausskernel/dbmind/db4ai/snapshots/prepare.sql @@ -50,16 +50,16 @@ BEGIN GET STACKED DIAGNOSTICS e_stack_act = PG_EXCEPTION_CONTEXT; IF CURRENT_SCHEMA = 'db4ai' THEN - e_stack_act := replace(e_stack_act, ' prepare_snapshot(', ' db4ai.prepare_snapshot('); - e_stack_act := replace(e_stack_act, ' prepare_snapshot_internal(', ' db4ai.prepare_snapshot_internal('); - e_stack_act := replace(e_stack_act, ' sample_snapshot(', ' db4ai.sample_snapshot('); + e_stack_act := pg_catalog.replace(e_stack_act, ' prepare_snapshot(', ' db4ai.prepare_snapshot('); + e_stack_act := pg_catalog.replace(e_stack_act, ' prepare_snapshot_internal(', ' db4ai.prepare_snapshot_internal('); + e_stack_act := pg_catalog.replace(e_stack_act, ' sample_snapshot(', ' db4ai.sample_snapshot('); END IF; IF e_stack_act LIKE E'referenced column: i_idx\n' 'SQL statement "SELECT (db4ai.prepare_snapshot_internal(s_id, p_id, m_id, r_id, i_schema, s_name, i_commands, i_comment,\n' ' CURRENT_USER, idx, exec_cmds)).i_idx"\n%' THEN - e_stack_act := substr(e_stack_act, 200); + e_stack_act := pg_catalog.substr(e_stack_act, 200); END IF; IF e_stack_act NOT SIMILAR TO 'PL/pgSQL function db4ai.prepare_snapshot\(name,name,text\[\],name,text\) line (184|550|616|723) at assignment%' @@ -74,27 +74,27 @@ BEGIN --generate rules from the mapping IF i_mapping IS NOT NULL THEN DECLARE - sel_view TEXT := 'CREATE OR REPLACE VIEW db4ai.v' || s_id || ' WITH(security_barrier) AS SELECT '; + sel_view TEXT := 'CREATE OR REPLACE VIEW db4ai.v' || s_id::TEXT || ' WITH(security_barrier) AS SELECT '; ins_grnt TEXT := 'GRANT INSERT ('; - ins_rule TEXT := 'CREATE OR REPLACE RULE _INSERT AS ON INSERT TO db4ai.v' || s_id || ' DO INSTEAD INSERT INTO ' - 'db4ai.t' || coalesce(m_id, s_id) || '('; + ins_rule TEXT := 'CREATE OR REPLACE RULE _INSERT AS ON INSERT TO db4ai.v' || s_id::TEXT || ' DO INSTEAD INSERT INTO ' + 'db4ai.t' || coalesce(m_id, s_id)::TEXT || '('; ins_vals TEXT := ' VALUES ('; upd_grnt TEXT; upd_rule TEXT; - dist_key NAME[] := array_agg(coalesce(m[1], replace(m[2], '""', '"'))) FROM regexp_matches( - getdistributekey('db4ai.t' || coalesce(m_id, p_id)),'([^\s",]+)|"((?:[^"]*"")*[^"]*)"', 'g') m; + dist_key NAME[] := pg_catalog.array_agg(coalesce(m[1], pg_catalog.replace(m[2], '""', '"'))) FROM pg_catalog.regexp_matches( + pg_catalog.getdistributekey('db4ai.t' || (coalesce(m_id, p_id))::TEXT),'([^\s",]+)|"((?:[^"]*"")*[^"]*)"', 'g') m; BEGIN - FOR idx IN 3 .. array_length(i_mapping, 1) BY 3 LOOP + FOR idx IN 3 .. pg_catalog.array_length(i_mapping, 1) BY 3 LOOP IF idx = 3 THEN - ins_grnt := ins_grnt || quote_ident(i_mapping[idx]); + ins_grnt := ins_grnt || pg_catalog.quote_ident(i_mapping[idx]); ins_rule := ins_rule || coalesce(i_mapping[idx-2], i_mapping[idx-1]); - ins_vals := ins_vals || 'new.' || quote_ident(i_mapping[idx]); + ins_vals := ins_vals || 'new.' || pg_catalog.quote_ident(i_mapping[idx]); ELSE sel_view := sel_view || ', '; - ins_grnt := ins_grnt || ', ' || quote_ident(i_mapping[idx]); + ins_grnt := ins_grnt || ', ' || pg_catalog.quote_ident(i_mapping[idx]); ins_rule := ins_rule || ', ' || coalesce(i_mapping[idx-2], i_mapping[idx-1]); - ins_vals := ins_vals || ', ' || 'new.' || quote_ident(i_mapping[idx]); + ins_vals := ins_vals || ', ' || 'new.' || pg_catalog.quote_ident(i_mapping[idx]); END IF; IF i_mapping[idx-2] IS NULL THEN -- handle shared columns without private (only CSS) @@ -107,32 +107,32 @@ BEGIN END IF; IF dist_key IS NULL OR NOT i_mapping[idx-2] = ANY(dist_key) THEN -- no updates on DISTRIBUTE BY columns upd_grnt := CASE WHEN upd_grnt IS NULL -- grant update only on private column - THEN 'GRANT UPDATE (' ELSE upd_grnt ||', ' END || quote_ident(i_mapping[idx]); + THEN 'GRANT UPDATE (' ELSE upd_grnt ||', ' END || pg_catalog.quote_ident(i_mapping[idx]); upd_rule := CASE WHEN upd_rule IS NULL -- update only private column - THEN 'CREATE OR REPLACE RULE _UPDATE AS ON UPDATE TO db4ai.v' || s_id || ' DO INSTEAD UPDATE db4ai.t' - || coalesce(m_id, s_id) || ' SET ' + THEN 'CREATE OR REPLACE RULE _UPDATE AS ON UPDATE TO db4ai.v' || s_id::TEXT || ' DO INSTEAD UPDATE db4ai.t' + || coalesce(m_id, s_id)::TEXT || ' SET ' ELSE upd_rule || ', ' END - || i_mapping[idx-2] || '=new.' || quote_ident(i_mapping[idx]); -- update private column + || i_mapping[idx-2] || '=new.' || pg_catalog.quote_ident(i_mapping[idx]); -- update private column END IF; END IF; - sel_view := sel_view || ' AS ' || quote_ident(i_mapping[idx]); + sel_view := sel_view || ' AS ' || pg_catalog.quote_ident(i_mapping[idx]); END LOOP; i_exec_cmds := i_exec_cmds || ARRAY [ - [ 'O', sel_view || ', xc_node_id, ctid FROM db4ai.t' || coalesce(m_id, s_id) - || CASE WHEN m_id IS NULL THEN '' ELSE ' WHERE _' || s_id END ], - [ 'O', 'GRANT SELECT, DELETE ON db4ai.v' || s_id || ' TO ' || i_owner ], - [ 'O', ins_grnt || ') ON db4ai.v' || s_id || ' TO ' || i_owner ], - [ 'O', ins_rule || CASE WHEN m_id IS NULL THEN ')' ELSE ', _' || s_id || ')' END || ins_vals + [ 'O', sel_view || ', xc_node_id, ctid FROM db4ai.t' || coalesce(m_id, s_id)::TEXT + || CASE WHEN m_id IS NULL THEN '' ELSE ' WHERE _' || s_id::TEXT END ], + [ 'O', 'GRANT SELECT, DELETE ON db4ai.v' || s_id::TEXT || ' TO "' || i_owner || '"'], + [ 'O', ins_grnt || ') ON db4ai.v' || s_id::TEXT || ' TO "' || i_owner || '"'], + [ 'O', ins_rule || CASE WHEN m_id IS NULL THEN ')' ELSE ', _' || s_id::TEXT || ')' END || ins_vals || CASE WHEN m_id IS NULL THEN ')' ELSE ', TRUE)' END ], - [ 'O', 'CREATE OR REPLACE RULE _DELETE AS ON DELETE TO db4ai.v' || s_id || ' DO INSTEAD ' - || CASE WHEN m_id IS NULL THEN 'DELETE FROM db4ai.t' || s_id ELSE 'UPDATE db4ai.t' || m_id || ' SET _' || s_id || '=FALSE' END - || ' WHERE t' || coalesce(m_id, s_id) || '.xc_node_id=old.xc_node_id AND t' || coalesce(m_id, s_id) || '.ctid=old.ctid' ] ]; + [ 'O', 'CREATE OR REPLACE RULE _DELETE AS ON DELETE TO db4ai.v' || s_id::TEXT || ' DO INSTEAD ' + || CASE WHEN m_id IS NULL THEN 'DELETE FROM db4ai.t' || s_id::TEXT ELSE 'UPDATE db4ai.t' || m_id::TEXT || ' SET _' || s_id::TEXT || '=FALSE' END + || ' WHERE t' || coalesce(m_id, s_id)::TEXT || '.xc_node_id=old.xc_node_id AND t' || coalesce(m_id, s_id)::TEXT || '.ctid=old.ctid' ] ]; IF upd_rule IS NOT NULL THEN i_exec_cmds := i_exec_cmds || ARRAY [ - [ 'O', upd_grnt || ') ON db4ai.v' || s_id || ' TO ' || i_owner ], - [ 'O', upd_rule || ' WHERE t' || coalesce(m_id, s_id) || '.xc_node_id=old.xc_node_id AND t' || coalesce(m_id, s_id) || '.ctid=old.ctid' ]]; + [ 'O', upd_grnt || ') ON db4ai.v' || s_id::TEXT || ' TO "' || i_owner || '"'], + [ 'O', upd_rule || ' WHERE t' || coalesce(m_id, s_id)::TEXT || '.xc_node_id=old.xc_node_id AND t' || coalesce(m_id, s_id)::TEXT || '.ctid=old.ctid' ]]; END IF; RETURN; @@ -140,7 +140,7 @@ BEGIN END IF; -- Execute the queries - LOOP EXIT WHEN i_idx = 1 + array_length(i_exec_cmds, 1); + LOOP EXIT WHEN i_idx = 1 + pg_catalog.array_length(i_exec_cmds, 1); CASE i_exec_cmds[i_idx][1] WHEN 'O' THEN -- RAISE NOTICE 'owner executing: %', i_exec_cmds[i_idx][2]; @@ -153,18 +153,18 @@ BEGIN END CASE; END LOOP; - EXECUTE 'DROP RULE IF EXISTS _INSERT ON db4ai.v' || s_id; - EXECUTE 'DROP RULE IF EXISTS _UPDATE ON db4ai.v' || s_id; - EXECUTE 'DROP RULE IF EXISTS _DELETE ON db4ai.v' || s_id; - EXECUTE 'COMMENT ON VIEW db4ai.v' || s_id || ' IS ''snapshot ' || quote_ident(i_schema) || '.' || quote_ident(i_name) - || ' backed by db4ai.t' || coalesce(m_id, s_id) || CASE WHEN length(i_comment) > 0 THEN ' comment is "' || i_comment + EXECUTE 'DROP RULE IF EXISTS _INSERT ON db4ai.v' || s_id::TEXT; + EXECUTE 'DROP RULE IF EXISTS _UPDATE ON db4ai.v' || s_id::TEXT; + EXECUTE 'DROP RULE IF EXISTS _DELETE ON db4ai.v' || s_id::TEXT; + EXECUTE 'COMMENT ON VIEW db4ai.v' || s_id::TEXT || ' IS ''snapshot ' || pg_catalog.quote_ident(i_schema) || '.' || pg_catalog.quote_ident(i_name) + || ' backed by db4ai.t' || coalesce(m_id, s_id)::TEXT || CASE WHEN pg_catalog.length(i_comment) > 0 THEN ' comment is "' || i_comment || '"' ELSE '' END || ''''; - EXECUTE 'REVOKE ALL PRIVILEGES ON db4ai.v' || s_id || ' FROM ' || i_owner; - EXECUTE 'GRANT SELECT ON db4ai.v' || s_id || ' TO ' || i_owner || ' WITH GRANT OPTION'; - EXECUTE 'SELECT COUNT(*) FROM db4ai.v' || s_id INTO STRICT row_count; + EXECUTE 'REVOKE ALL PRIVILEGES ON db4ai.v' || s_id::TEXT || ' FROM "' || i_owner || '"'; + EXECUTE 'GRANT SELECT ON db4ai.v' || s_id::TEXT || ' TO "' || i_owner || '" WITH GRANT OPTION'; + EXECUTE 'SELECT COUNT(*) FROM db4ai.v' || s_id::TEXT INTO STRICT row_count; INSERT INTO db4ai.snapshot (id, parent_id, matrix_id, root_id, schema, name, owner, commands, comment, row_count) - VALUES (s_id, p_id, m_id, r_id, i_schema, i_name, i_owner, i_commands, i_comment, row_count); + VALUES (s_id, p_id, m_id, r_id, i_schema, i_name, '"' || i_owner || '"', i_commands, i_comment, row_count); END; $$; @@ -222,14 +222,14 @@ BEGIN -- obtain active message level BEGIN - EXECUTE 'SET LOCAL client_min_messages TO ' || current_setting('db4ai.message_level'); - RAISE INFO 'effective client_min_messages is %', upper(current_setting('db4ai.message_level')); + EXECUTE 'SET LOCAL client_min_messages TO ' || pg_catalog.current_setting('db4ai.message_level'); + RAISE INFO 'effective client_min_messages is %', pg_catalog.upper(pg_catalog.current_setting('db4ai.message_level')); EXCEPTION WHEN OTHERS THEN END; -- obtain active snapshot mode BEGIN - s_mode := upper(current_setting('db4ai_snapshot_mode')); + s_mode := pg_catalog.upper(pg_catalog.current_setting('db4ai_snapshot_mode')); EXCEPTION WHEN OTHERS THEN s_mode := 'MSS'; END; @@ -240,17 +240,17 @@ BEGIN -- obtain relevant configuration parameters BEGIN - s_vers_del := upper(current_setting('db4ai_snapshot_version_delimiter')); + s_vers_del := pg_catalog.upper(pg_catalog.current_setting('db4ai_snapshot_version_delimiter')); EXCEPTION WHEN OTHERS THEN s_vers_del := '@'; END; BEGIN - s_vers_sep := upper(current_setting('db4ai_snapshot_version_separator')); + s_vers_sep := pg_catalog.upper(pg_catalog.current_setting('db4ai_snapshot_version_separator')); EXCEPTION WHEN OTHERS THEN s_vers_sep := '.'; END; - current_compatibility_mode := current_setting('sql_compatibility'); + current_compatibility_mode := pg_catalog.current_setting('sql_compatibility'); IF current_compatibility_mode = 'ORA' OR current_compatibility_mode = 'A' THEN none_represent := 0; ELSE @@ -265,10 +265,10 @@ BEGIN IF i_parent IS NULL OR i_parent = '' THEN RAISE EXCEPTION 'i_parent cannot be NULL or empty'; ELSE - i_parent := replace(i_parent, chr(1), s_vers_del); - i_parent := replace(i_parent, chr(2), s_vers_sep); - p_name_vers := regexp_split_to_array(i_parent, s_vers_del); - IF array_length(p_name_vers, 1) <> 2 OR array_length(p_name_vers, 2) <> none_represent THEN + i_parent := pg_catalog.replace(i_parent, pg_catalog.chr(1), s_vers_del); + i_parent := pg_catalog.replace(i_parent, pg_catalog.chr(2), s_vers_sep); + p_name_vers := pg_catalog.regexp_split_to_array(i_parent, s_vers_del); + IF pg_catalog.array_length(p_name_vers, 1) <> 2 OR pg_catalog.array_length(p_name_vers, 2) <> none_represent THEN RAISE EXCEPTION 'i_parent must contain exactly one ''%'' character', s_vers_del USING HINT = 'reference a snapshot using the format: snapshot_name' || s_vers_del || 'version'; END IF; @@ -278,31 +278,31 @@ BEGIN BEGIN SELECT id, matrix_id, root_id FROM db4ai.snapshot WHERE schema = i_schema AND name = i_parent INTO STRICT p_id, m_id, r_id; EXCEPTION WHEN NO_DATA_FOUND THEN - RAISE EXCEPTION 'parent snapshot %.% does not exist' , quote_ident(i_schema), quote_ident(i_parent); + RAISE EXCEPTION 'parent snapshot %.% does not exist' , pg_catalog.quote_ident(i_schema), pg_catalog.quote_ident(i_parent); END; --SELECT nextval('db4ai.snapshot_sequence') INTO STRICT s_id; - SELECT MAX(id)+1 FROM db4ai.snapshot INTO STRICT s_id; -- openGauss BUG: cannot create sequences in initdb + SELECT pg_catalog.MAX(id)+1 FROM db4ai.snapshot INTO STRICT s_id; -- openGauss BUG: cannot create sequences in initdb -- extract highest used c_id from existing backing table or parent () -- cannot use information_schema here, because the current user has no read permission on the backing table - SELECT 1 + max(ltrim(attname, 'f')::BIGINT) FROM pg_catalog.pg_attribute INTO STRICT c_id - WHERE attrelid = ('db4ai.t' || coalesce(m_id, p_id))::regclass AND attnum > 0 AND NOT attisdropped AND attname like 'f%'; + SELECT 1 + pg_catalog.max(pg_catalog.ltrim(attname, 'f')::BIGINT) FROM pg_catalog.pg_attribute INTO STRICT c_id + WHERE attrelid = ('db4ai.t' || coalesce(m_id, p_id)::TEXT)::regclass AND attnum > 0 AND NOT attisdropped AND attname like 'f%'; IF c_id IS NULL THEN RAISE EXCEPTION 'prepare snapshot internal error3: %', coalesce(m_id, p_id); END IF; - IF i_commands IS NULL OR array_length(i_commands, 1) = none_represent OR array_length(i_commands, 2) <> none_represent THEN + IF i_commands IS NULL OR pg_catalog.array_length(i_commands, 1) = none_represent OR pg_catalog.array_length(i_commands, 2) <> none_represent THEN RAISE EXCEPTION 'i_commands array malformed' USING HINT = 'pass SQL DML and DDL operations as TEXT[] literal, e.g. ''{ALTER, ADD a int, DROP c, DELETE, ' 'WHERE b=5, INSERT, FROM t, UPDATE, FROM t, SET x=y, SET z=f(z), WHERE t.u=v}'''; END IF; -- extract normalized projection list - p_sv_proj := substring(pg_get_viewdef('db4ai.v' || p_id), '^SELECT (.*), t[0-9]+\.xc_node_id, t[0-9]+\.ctid FROM.*$'); - mapping := array(SELECT unnest(ARRAY[ m[1], m[2], coalesce(m[3], replace(m[4],'""','"'))]) - FROM regexp_matches(p_sv_proj, CASE s_mode WHEN 'CSS' + p_sv_proj := pg_catalog.substring(pg_catalog.pg_get_viewdef('db4ai.v' || p_id::TEXT), '^SELECT (.*), t[0-9]+\.xc_node_id, t[0-9]+\.ctid FROM.*$'); + mapping := array(SELECT pg_catalog.unnest(ARRAY[ m[1], m[2], coalesce(m[3], pg_catalog.replace(m[4],'""','"'))]) + FROM pg_catalog.regexp_matches(p_sv_proj, CASE s_mode WHEN 'CSS' -- inherited CSS columns are shared (private: nullable, shared: not null, user_cname: not null) THEN '(?:COALESCE\(t[0-9]+\.(f[0-9]+), )?t[0-9]+\.(f[0-9]+)(?:\))? AS (?:([^\s",]+)|"((?:[^"]*"")*[^"]*)")' -- all MSS columns are private (privte: not null, shared: nullable, user_cname: not null) @@ -329,34 +329,34 @@ BEGIN s_bt_dist TEXT; -- DISTRIBUTE BY clause for creating backing table BEGIN - FOR idx IN 3 .. array_length(mapping, 1) BY 3 LOOP - s_bt_proj := s_bt_proj || quote_ident(mapping[idx]) || ' AS ' || mapping[idx-2] || ','; + FOR idx IN 3 .. pg_catalog.array_length(mapping, 1) BY 3 LOOP + s_bt_proj := s_bt_proj || pg_catalog.quote_ident(mapping[idx]) || ' AS ' || mapping[idx-2] || ','; END LOOP; - s_bt_dist := getdistributekey('db4ai.t' || coalesce(m_id, p_id)); + s_bt_dist := pg_catalog.getdistributekey('db4ai.t' || coalesce(m_id, p_id)::TEXT); s_bt_dist := CASE WHEN s_bt_dist IS NULL THEN ' DISTRIBUTE BY REPLICATION' ELSE ' DISTRIBUTE BY HASH(' || s_bt_dist || ')' END; s_bt_dist := ''; -- we silently drop DISTRIBUTE_BY exec_cmds := ARRAY [ - [ 'O', 'CREATE TABLE db4ai.t' || s_id || ' WITH (orientation = column, compression = low)' + [ 'O', 'CREATE TABLE db4ai.t' || s_id::TEXT || ' WITH (orientation = column, compression = low)' -- extract and propagate DISTRIBUTE BY from parent - || s_bt_dist || ' AS SELECT ' || rtrim(s_bt_proj, ',') || ' FROM db4ai.v' || p_id ]]; + || s_bt_dist || ' AS SELECT ' || pg_catalog.rtrim(s_bt_proj, ',') || ' FROM db4ai.v' || p_id::TEXT ]]; END; ELSIF s_mode = 'CSS' THEN IF m_id IS NULL THEN exec_cmds := ARRAY [ - [ 'O', 'UPDATE db4ai.snapshot SET matrix_id = ' || p_id || ' WHERE schema = ''' || i_schema || ''' AND name = ''' + [ 'O', 'UPDATE db4ai.snapshot SET matrix_id = ' || p_id::TEXT || ' WHERE schema = ''' || i_schema || ''' AND name = ''' || i_parent || '''' ], - [ 'O', 'ALTER TABLE db4ai.t' || p_id || ' ADD _' || p_id || ' BOOLEAN NOT NULL DEFAULT TRUE' ], - [ 'O', 'ALTER TABLE db4ai.t' || p_id || ' ALTER _' || p_id || ' SET DEFAULT FALSE' ], - [ 'O', 'CREATE OR REPLACE VIEW db4ai.v' || p_id || ' WITH(security_barrier) AS SELECT ' || p_sv_proj || ', xc_node_id, ctid FROM db4ai.t' - || p_id || ' WHERE _' || p_id ]]; + [ 'O', 'ALTER TABLE db4ai.t' || p_id::TEXT || ' ADD _' || p_id::TEXT || ' BOOLEAN NOT NULL DEFAULT TRUE' ], + [ 'O', 'ALTER TABLE db4ai.t' || p_id::TEXT || ' ALTER _' || p_id::TEXT || ' SET DEFAULT FALSE' ], + [ 'O', 'CREATE OR REPLACE VIEW db4ai.v' || p_id::TEXT || ' WITH(security_barrier) AS SELECT ' || p_sv_proj || ', xc_node_id, ctid FROM db4ai.t' + || p_id::TEXT || ' WHERE _' || p_id::TEXT ]]; m_id := p_id; END IF; exec_cmds := exec_cmds || ARRAY [ - [ 'O', 'ALTER TABLE db4ai.t' || m_id || ' ADD _' || s_id || ' BOOLEAN NOT NULL DEFAULT FALSE' ], - [ 'O', 'UPDATE db4ai.t' || m_id || ' SET _' || s_id || ' = TRUE WHERE _' || p_id ]]; + [ 'O', 'ALTER TABLE db4ai.t' || m_id::TEXT || ' ADD _' || s_id::TEXT || ' BOOLEAN NOT NULL DEFAULT FALSE' ], + [ 'O', 'UPDATE db4ai.t' || m_id::TEXT || ' SET _' || s_id::TEXT || ' = TRUE WHERE _' || p_id::TEXT ]]; END IF; -- generate and append grant, create view and rewrite rules for new snapshot @@ -371,8 +371,8 @@ BEGIN -- apply SQL DML/DDL according to snapshot mode FOREACH command_str IN ARRAY (i_commands || ARRAY[NULL] ) LOOP - command_str := btrim(command_str); - pattern := upper(regexp_replace(left(command_str, 30), '\s+', ' ', 'g')); + command_str := pg_catalog.btrim(command_str); + pattern := pg_catalog.upper(pg_catalog.regexp_replace(pg_catalog.left(command_str, 30), '\s+', ' ', 'g')); IF pattern is NULL THEN next_op := NULL; ELSIF pattern = 'ALTER' THEN -- ALTER keyword is optional @@ -383,21 +383,21 @@ BEGIN next_op := INSERT_OP; ELSIF pattern = 'UPDATE' THEN next_op := UPDATE_OP; - ELSIF left(pattern, 7) = 'DELETE ' THEN + ELSIF pg_catalog.left(pattern, 7) = 'DELETE ' THEN next_op := DELETE_OP; - SELECT coalesce(m[1], m[2]), m [3] FROM regexp_matches(command_str, + SELECT coalesce(m[1], m[2]), m [3] FROM pg_catalog.regexp_matches(command_str, '^\s*DELETE\s+FROM\s*(?: snapshot |"snapshot")\s*(?:AS\s*(?: ([^\s"]+) |"((?:[^"]*"")*[^"]*)")\s*)?(.*)\s*$', 'i') m INTO next_clauses[AS_CLAUSE], next_clauses[FROM_CLAUSE]; - RAISE NOTICE E'XXX DELETE \n%\n%', command_str, array_to_string(next_clauses, E'\n'); - ELSIF left(pattern, 7) = 'INSERT ' THEN + RAISE NOTICE E'XXX DELETE \n%\n%', command_str, pg_catalog.array_to_string(next_clauses, E'\n'); + ELSIF pg_catalog.left(pattern, 7) = 'INSERT ' THEN next_op := INSERT_OP; - SELECT coalesce(m[1], m[2]), m [3] FROM regexp_matches(command_str, + SELECT coalesce(m[1], m[2]), m [3] FROM pg_catalog.regexp_matches(command_str, '^\s*INSERT\s+INTO\s*(?: snapshot |"snapshot")\s*(.*)\s*$', 'i') m INTO STRICT next_clauses[SET_CLAUSE]; - RAISE NOTICE E'XXX INSERT \n%\n%', command_str, array_to_string(next_clauses, E'\n'); - ELSIF left(pattern, 7) = 'UPDATE ' THEN + RAISE NOTICE E'XXX INSERT \n%\n%', command_str, pg_catalog.array_to_string(next_clauses, E'\n'); + ELSIF pg_catalog.left(pattern, 7) = 'UPDATE ' THEN next_op := UPDATE_OP; - SELECT coalesce(m[1], m[2]), m [3] FROM regexp_matches(command_str, + SELECT coalesce(m[1], m[2]), m [3] FROM pg_catalog.regexp_matches(command_str, '^\s*UPDATE\s*(?: snapshot |"snapshot")\s*(?:AS\s*(?: ([^\s"]+) |"((?:[^"]*"")*[^"]*)")\s*)?(.*)\s*$', 'i') m INTO STRICT next_clauses[AS_CLAUSE], next_clauses[SET_CLAUSE]; @@ -416,12 +416,12 @@ BEGIN LOOP idx := idx + 1; - cur_ch := substr(stmt, idx, 1); + cur_ch := pg_catalog.substr(stmt, idx, 1); EXIT WHEN cur_ch IS NULL OR cur_ch = ''; CASE cur_ch WHEN '"' THEN - IF quoted AND substr(stmt, idx + 1, 1) = '"' THEN + IF quoted AND pg_catalog.substr(stmt, idx + 1, 1) = '"' THEN idx := idx + 1; ELSE quoted := NOT quoted; @@ -441,39 +441,39 @@ BEGIN WHEN ' ' THEN IF quoted OR nested > 0 THEN CONTINUE; - ELSIF pattern IS NULL OR length(pattern) = 0 THEN + ELSIF pattern IS NULL OR pg_catalog.length(pattern) = 0 THEN start_pos := idx; CONTINUE; END IF; ELSE - pattern := pattern || upper(cur_ch); + pattern := pattern || pg_catalog.upper(cur_ch); CONTINUE; END CASE; -- END splitter code for testing IF pattern IN ('FROM', 'WHERE') THEN - next_clauses[FROM_CLAUSE] := substr(next_clauses[SET_CLAUSE], start_pos + 1); - next_clauses[SET_CLAUSE] := left(next_clauses[SET_CLAUSE], start_pos - 1); + next_clauses[FROM_CLAUSE] := pg_catalog.substr(next_clauses[SET_CLAUSE], start_pos + 1); + next_clauses[SET_CLAUSE] := pg_catalog.left(next_clauses[SET_CLAUSE], start_pos - 1); EXIT; END IF; pattern := ''; start_pos := idx; END LOOP; END; - RAISE NOTICE E'XXX UPDATE \n%\n%', command_str, array_to_string(next_clauses, E'\n'); - ELSIF left(pattern, 6) = 'ALTER ' THEN - SELECT coalesce(m[1], m[2]), m [3] FROM regexp_matches(command_str, + RAISE NOTICE E'XXX UPDATE \n%\n%', command_str, pg_catalog.array_to_string(next_clauses, E'\n'); + ELSIF pg_catalog.left(pattern, 6) = 'ALTER ' THEN + SELECT coalesce(m[1], m[2]), m [3] FROM pg_catalog.regexp_matches(command_str, '^\s*ALTER\s+TABLE\s*(?: snapshot |"snapshot")\s*(.*)\s*$', 'i') m INTO STRICT next_clauses[ALTER_CLAUSE]; - RAISE NOTICE E'XXX ALTER \n%\n%', command_str, array_to_string(next_clauses, E'\n'); + RAISE NOTICE E'XXX ALTER \n%\n%', command_str, pg_catalog.array_to_string(next_clauses, E'\n'); IF current_op IS NULL OR current_clauses[ALTER_CLAUSE] IS NULL THEN next_op := ALTER_OP; ELSE current_clauses[ALTER_CLAUSE] := current_clauses[ALTER_CLAUSE] || ', ' || next_clauses[ALTER_CLAUSE]; next_clauses[ALTER_CLAUSE] := NULL; END IF; - ELSIF left(pattern, 4) = 'ADD ' OR left(pattern, 5) = 'DROP ' THEN + ELSIF pg_catalog.left(pattern, 4) = 'ADD ' OR pg_catalog.left(pattern, 5) = 'DROP ' THEN --for chaining, conflicting ALTER ops must be avoided by user IF current_op IS NULL OR current_op <> ALTER_OP THEN next_op := ALTER_OP; -- ALTER keyword is optional @@ -485,7 +485,7 @@ BEGIN current_clauses[ALTER_CLAUSE] := current_clauses[ALTER_CLAUSE] || ', ' || command_str; CONTINUE; -- allow chaining of ALTER ops END IF; - ELSIF left(pattern, 6) = 'WHERE ' THEN + ELSIF pg_catalog.left(pattern, 6) = 'WHERE ' THEN IF current_op IS NULL THEN RAISE EXCEPTION 'missing INSERT / UPDATE / DELETE keyword before WHERE clause in i_commands at: ''%''', command_str; ELSIF current_op NOT IN (INSERT_OP, UPDATE_OP, DELETE_OP) THEN @@ -496,7 +496,7 @@ BEGIN RAISE EXCEPTION 'multiple WHERE clauses in % at: ''%''', ops_str[current_op], command_str; END IF; CONTINUE; - ELSIF left(pattern, 5) = 'FROM ' THEN + ELSIF pg_catalog.left(pattern, 5) = 'FROM ' THEN IF current_op IS NULL THEN RAISE EXCEPTION 'missing INSERT / UPDATE keyword before FROM clause in i_commands at: ''%''', command_str; ELSIF current_op NOT IN (INSERT_OP, UPDATE_OP) THEN @@ -507,7 +507,7 @@ BEGIN RAISE EXCEPTION 'multiple FROM clauses in % at: ''%''', ops_str[current_op], command_str; END IF; CONTINUE; - ELSIF left(pattern, 6) = 'USING ' THEN + ELSIF pg_catalog.left(pattern, 6) = 'USING ' THEN IF current_op IS NULL THEN RAISE EXCEPTION 'missing DELETE keyword before USING clause in i_commands at: ''%''', command_str; ELSIF current_op NOT IN (DELETE_OP) THEN @@ -518,7 +518,7 @@ BEGIN RAISE EXCEPTION 'multiple USING clauses in DELETE at: ''%''', command_str; END IF; CONTINUE; - ELSIF left(pattern, 4) = 'SET ' THEN + ELSIF pg_catalog.left(pattern, 4) = 'SET ' THEN IF current_op IS NULL THEN RAISE EXCEPTION 'missing UPDATE keyword before SET clause in i_commands at: ''%''', command_str; ELSIF current_op NOT IN (UPDATE_OP) THEN @@ -529,16 +529,16 @@ BEGIN THEN command_str ELSE current_clauses[SET_CLAUSE] || ' ' || command_str END; END IF; CONTINUE; - ELSIF left(pattern, 3) = 'AS ' THEN + ELSIF pg_catalog.left(pattern, 3) = 'AS ' THEN IF current_op IS NULL THEN RAISE EXCEPTION 'missing UPDATE / DELETE keyword before AS clause in i_commands at: ''%''', command_str; ELSIF current_op NOT IN (UPDATE_OP, DELETE_OP) THEN RAISE EXCEPTION 'illegal AS clause in % at: ''%''', ops_str[current_op], command_str; ELSIF current_clauses[AS_CLAUSE] IS NULL THEN DECLARE - as_pos INT := 3 + strpos(upper(command_str), 'AS '); + as_pos INT := 3 + pg_catalog.strpos(pg_catalog.upper(command_str), 'AS '); BEGIN - current_clauses[AS_CLAUSE] := ltrim(substr(command_str, as_pos)); + current_clauses[AS_CLAUSE] := pg_catalog.ltrim(pg_catalog.substr(command_str, as_pos)); END; ELSE RAISE EXCEPTION 'multiple AS clauses in % at: ''%''', ops_str[current_op], command_str; @@ -586,12 +586,12 @@ BEGIN LOOP idx := idx + 1; - cur_ch := substr(tokens, idx, 1); + cur_ch := pg_catalog.substr(tokens, idx, 1); EXIT WHEN cur_ch IS NULL OR cur_ch = ''; CASE cur_ch WHEN '"' THEN - IF quoted AND substr(tokens, idx + 1, 1) = '"' THEN + IF quoted AND pg_catalog.substr(tokens, idx + 1, 1) = '"' THEN pattern := pattern || '"'; idx := idx + 1; ELSE @@ -604,7 +604,7 @@ BEGIN IF quoted THEN pattern := pattern || cur_ch; CONTINUE; - ELSIF pattern IS NULL OR length(pattern) = 0 THEN + ELSIF pattern IS NULL OR pg_catalog.length(pattern) = 0 THEN pattern := ','; ELSE idx := idx - 1; -- reset on comma for next loop @@ -613,30 +613,30 @@ BEGIN IF quoted THEN pattern := pattern || cur_ch; CONTINUE; - ELSIF pattern IS NULL OR length(pattern) = 0 THEN + ELSIF pattern IS NULL OR pg_catalog.length(pattern) = 0 THEN CONTINUE; END IF; ELSE - pattern := pattern || CASE WHEN quoted THEN cur_ch ELSE lower(cur_ch) END; + pattern := pattern || CASE WHEN quoted THEN cur_ch ELSE pg_catalog.lower(cur_ch) END; CONTINUE; END CASE; -- END tokenizer code for testing - IF alt_op = 'DROP' AND upper(dropif) = 'IF' THEN + IF alt_op = 'DROP' AND pg_catalog.upper(dropif) = 'IF' THEN IF pattern = ',' THEN pattern := dropif; -- interpret 'if' as column name (not a keyword) idx := idx - 1; -- reset on comma for next loop - ELSIF upper(pattern) <> 'EXISTS' THEN + ELSIF pg_catalog.upper(pattern) <> 'EXISTS' THEN RAISE EXCEPTION 'expected EXISTS keyword in % operation after ''%'' in: ''%''', alt_op, dropif, current_clauses[ALTER_CLAUSE]; END IF; END IF; IF expect THEN - IF upper(pattern) IN ('ADD', 'DROP') THEN + IF pg_catalog.upper(pattern) IN ('ADD', 'DROP') THEN IF alt_op IS NULL THEN - alt_op := upper(pattern); + alt_op := pg_catalog.upper(pattern); expect := FALSE; ELSE RAISE EXCEPTION 'unable to extract column name in % operation: ''%''', @@ -655,16 +655,16 @@ BEGIN IF command_str IS NOT NULL THEN command_str := command_str || ' ' || pattern; END IF; - ELSIF upper(pattern) = 'COLUMN' THEN + ELSIF pg_catalog.upper(pattern) = 'COLUMN' THEN -- skip keyword COLUMN between ADD/DROP and column name - ELSIF alt_op = 'DROP' AND upper(pattern) = 'IF' AND dropif IS NULL THEN + ELSIF alt_op = 'DROP' AND pg_catalog.upper(pattern) = 'IF' AND dropif IS NULL THEN dropif := pattern; -- 'IF' is not a keyword - ELSIF alt_op = 'DROP' AND upper(pattern) = 'EXISTS' AND upper(dropif) = 'IF' THEN + ELSIF alt_op = 'DROP' AND pg_catalog.upper(pattern) = 'EXISTS' AND pg_catalog.upper(dropif) = 'IF' THEN dropif := pattern; -- 'EXISTS' is not a keyword ELSIF alt_op IN ('ADD', 'DROP') THEN -- attempt to map the pattern - FOR idx IN 3 .. array_length(mapping, 1) BY 3 LOOP + FOR idx IN 3 .. pg_catalog.array_length(mapping, 1) BY 3 LOOP IF pattern = mapping[idx] THEN IF alt_op = 'ADD' THEN -- check if pattern was mapped to an existing column @@ -673,10 +673,10 @@ BEGIN -- DROP a private column (MSS and CSS) IF mapping[idx-2] IS NOT NULL THEN command_str := CASE WHEN command_str IS NULL - THEN 'ALTER TABLE db4ai.t' || coalesce(m_id, s_id) - ELSE command_str || ',' END || ' DROP ' || mapping[idx-2]; + THEN 'ALTER TABLE db4ai.t' || coalesce(m_id, s_id)::TEXT + ELSE command_str || ',' END || ' DROP ' || mapping[idx-2]::TEXT; END IF; - mapping := mapping[1:(idx-3)] || mapping[idx+1:(array_length(mapping, 1))]; + mapping := mapping[1:(idx-3)] || mapping[idx+1:(pg_catalog.array_length(mapping, 1))]; newmap := TRUE; alt_op := NULL; EXIT; @@ -688,14 +688,14 @@ BEGIN IF alt_op = 'ADD' THEN -- ADD a private column (MSS and CSS) command_str := CASE WHEN command_str IS NULL - THEN 'ALTER TABLE db4ai.t' || coalesce(m_id, s_id) - ELSE command_str || ',' END || ' ADD f' || c_id; - mapping := mapping || ARRAY [ 'f' || c_id, NULL, pattern ]::NAME[]; + THEN 'ALTER TABLE db4ai.t' || coalesce(m_id, s_id)::TEXT + ELSE command_str || ',' END || ' ADD f' || c_id::TEXT; + mapping := mapping || ARRAY [ 'f' || c_id::TEXT, NULL, pattern ]::NAME[]; newmap := TRUE; c_id := c_id + 1; ELSIF alt_op = 'DROP' THEN -- check whether pattern needs mapping to an existing column - IF dropif IS NULL OR upper(dropif) <> 'EXISTS' THEN + IF dropif IS NULL OR pg_catalog.upper(dropif) <> 'EXISTS' THEN RAISE EXCEPTION 'unable to map field "%" to backing table in % operation: ''%''', pattern, alt_op, current_clauses[ALTER_CLAUSE]; END IF; @@ -712,12 +712,12 @@ BEGIN IF quoted THEN RAISE EXCEPTION 'unterminated quoted identifier ''"%'' at or near: ''%''', - substr(pattern, 1, char_length(pattern)-1), current_clauses[ALTER_CLAUSE]; + pg_catalog.substr(pattern, 1, pg_catalog.char_length(pattern)-1), current_clauses[ALTER_CLAUSE]; END IF; -- CREATE OR REPLACE: cannot drop columns from view - MUST use DROP / CREATE -- clear view dependencies for backing table columns - exec_cmds := exec_cmds || ARRAY [ 'O', 'DROP VIEW IF EXISTS db4ai.v' || s_id ]; + exec_cmds := exec_cmds || ARRAY [ 'O', 'DROP VIEW IF EXISTS db4ai.v' || s_id::TEXT ]; -- append the DDL statement for the backing table (if any) IF command_str IS NOT NULL THEN @@ -737,13 +737,13 @@ BEGIN END IF; exec_cmds := exec_cmds || ARRAY [ - 'U', 'INSERT INTO db4ai.v' || s_id + 'U', 'INSERT INTO db4ai.v' || s_id::TEXT || ' ' || current_clauses[SET_CLAUSE] -- generic SQL || CASE WHEN current_clauses[FROM_CLAUSE] IS NULL THEN '' ELSE ' ' || current_clauses[FROM_CLAUSE] END || CASE WHEN current_clauses[WHERE_CLAUSE] IS NULL THEN '' ELSE ' ' || current_clauses[WHERE_CLAUSE] END ]; ELSIF current_op = DELETE_OP THEN exec_cmds := exec_cmds || ARRAY [ - 'U', 'DELETE FROM db4ai.v' || s_id || ' AS ' || current_clauses[AS_CLAUSE] + 'U', 'DELETE FROM db4ai.v' || s_id::TEXT || ' AS ' || current_clauses[AS_CLAUSE] || CASE WHEN current_clauses[FROM_CLAUSE] IS NULL THEN '' ELSE ' ' || current_clauses[FROM_CLAUSE] END -- USING || CASE WHEN current_clauses[WHERE_CLAUSE] IS NULL THEN '' ELSE ' ' || current_clauses[WHERE_CLAUSE] END ]; ELSIF current_op = UPDATE_OP THEN @@ -755,21 +755,21 @@ BEGIN -- extract updated fields and check their mapping FOR pattern IN - SELECT coalesce(m[1], replace(m[2],'""','"')) - FROM regexp_matches(current_clauses[SET_CLAUSE], + SELECT coalesce(m[1], pg_catalog.replace(m[2],'""','"')) + FROM pg_catalog.regexp_matches(current_clauses[SET_CLAUSE], '([^\s"]+)\s*=|"((?:[^"]*"")*[^"]*)"\s*=','g') m LOOP - FOR idx IN 3 .. array_length(mapping, 1) BY 3 LOOP + FOR idx IN 3 .. pg_catalog.array_length(mapping, 1) BY 3 LOOP IF pattern = mapping[idx] THEN -- ADD a private column (only CSS) IF mapping[idx-2] IS NULL THEN command_str := CASE WHEN command_str IS NULL - THEN 'ALTER TABLE db4ai.t' || m_id + THEN 'ALTER TABLE db4ai.t' || m_id::TEXT ELSE command_str || ',' END - || ' ADD f' || c_id || ' ' - || format_type(atttypid, atttypmod) FROM pg_catalog.pg_attribute - WHERE attrelid = ('db4ai.t' || m_id)::regclass AND attname = mapping[idx-1]; - mapping[idx-2] := 'f' || c_id; + || ' ADD f' || c_id::TEXT || ' ' + || pg_catalog.format_type(atttypid, atttypmod) FROM pg_catalog.pg_attribute + WHERE attrelid = ('db4ai.t' || m_id::TEXT)::regclass AND attname = mapping[idx-1]; + mapping[idx-2] := 'f' || c_id::TEXT; newmap := TRUE; c_id := c_id + 1; END IF; @@ -798,7 +798,7 @@ BEGIN END IF; exec_cmds := exec_cmds || ARRAY [ - 'U', 'UPDATE db4ai.v' || s_id || ' AS ' || current_clauses[AS_CLAUSE] + 'U', 'UPDATE db4ai.v' || s_id::TEXT || ' AS ' || current_clauses[AS_CLAUSE] || ' ' || current_clauses[SET_CLAUSE] || CASE WHEN current_clauses[FROM_CLAUSE] IS NULL THEN '' ELSE ' ' || current_clauses[FROM_CLAUSE] END || CASE WHEN current_clauses[WHERE_CLAUSE] IS NULL THEN '' ELSE ' ' || current_clauses[WHERE_CLAUSE] END ]; @@ -815,9 +815,9 @@ BEGIN -- compute final version string IF i_vers IS NULL OR i_vers = '' THEN BEGIN - vers_arr := regexp_split_to_array(p_name_vers[2], CASE s_vers_sep WHEN '.' THEN '\.' ELSE s_vers_sep END); + vers_arr := pg_catalog.regexp_split_to_array(p_name_vers[2], CASE s_vers_sep WHEN '.' THEN '\.' ELSE s_vers_sep END); - IF array_length(vers_arr, 1) <> 3 OR array_length(vers_arr, 2) <> none_represent OR + IF pg_catalog.array_length(vers_arr, 1) <> 3 OR pg_catalog.array_length(vers_arr, 2) <> none_represent OR vers_arr[1] ~ '[^0-9]' OR vers_arr[2] ~ '[^0-9]' OR vers_arr[3] ~ '[^0-9]' THEN RAISE EXCEPTION 'illegal version format'; END IF; @@ -831,33 +831,33 @@ BEGIN ELSE vers_arr[3] := vers_arr[3] + 1; END IF; - i_vers := s_vers_del || array_to_string(vers_arr, s_vers_sep); + i_vers := s_vers_del || pg_catalog.array_to_string(vers_arr, s_vers_sep); EXCEPTION WHEN OTHERS THEN RAISE EXCEPTION 'parent has nonstandard version %. i_vers cannot be null or empty', p_name_vers[2] USING HINT = 'provide custom version using i_vers parameter for new snapshot'; END; - ELSE - i_vers := replace(i_vers, chr(2), s_vers_sep); +ELSE + i_vers := pg_catalog.replace(i_vers, pg_catalog.chr(2), s_vers_sep); IF LEFT(i_vers, 1) <> s_vers_del THEN i_vers := s_vers_del || i_vers; - ELSIF char_length(i_vers) < 2 THEN + ELSIF pg_catalog.char_length(i_vers) < 2 THEN RAISE EXCEPTION 'illegal i_vers: ''%''', s_vers_del; END IF; - IF strpos(substr(i_vers, 2), s_vers_del) > 0 THEN + IF pg_catalog.strpos(pg_catalog.substr(i_vers, 2), s_vers_del) > 0 THEN RAISE EXCEPTION 'i_vers may contain only one single, leading ''%'' character', s_vers_del USING HINT = 'specify snapshot version as [' || s_vers_del || ']x' || s_vers_sep || 'y' || s_vers_sep || 'z or [' || s_vers_del || ']label with optional, leading ''' || s_vers_del || ''''; END IF; END IF; - IF char_length(p_name_vers[1] || i_vers) > 63 THEN + IF pg_catalog.char_length(p_name_vers[1] || i_vers) > 63 THEN RAISE EXCEPTION 'snapshot name too long: ''%''', p_name_vers[1] || i_vers; ELSE s_name := p_name_vers[1] || i_vers; END IF; -- the final name of the snapshot - qual_name := quote_ident(i_schema) || '.' || quote_ident(s_name); + qual_name := pg_catalog.quote_ident(i_schema) || '.' || pg_catalog.quote_ident(s_name); -- check for duplicate snapshot IF 0 < (SELECT COUNT(0) FROM db4ai.snapshot WHERE schema = i_schema AND name = s_name) THEN @@ -866,15 +866,15 @@ BEGIN IF s_mode = 'MSS' THEN exec_cmds := exec_cmds || ARRAY [ - 'O', 'COMMENT ON TABLE db4ai.t' || s_id || ' IS ''snapshot backing table, root is ' || qual_name || '''' ]; + 'O', 'COMMENT ON TABLE db4ai.t' || s_id::TEXT || ' IS ''snapshot backing table, root is ' || qual_name || '''' ]; END IF; -- Execute the queries - RAISE NOTICE E'accumulated commands:\n%', array_to_string(exec_cmds, E'\n'); + RAISE NOTICE E'accumulated commands:\n%', pg_catalog.array_to_string(exec_cmds, E'\n'); DECLARE idx INTEGER := 1; -- loop counter, cannot use FOR .. iterator BEGIN - LOOP EXIT WHEN idx = 1 + array_length(exec_cmds, 1); + LOOP EXIT WHEN idx = 1 + pg_catalog.array_length(exec_cmds, 1); WHILE exec_cmds[idx][1] = 'U' LOOP -- RAISE NOTICE 'user executing: %', exec_cmds[idx][2]; DECLARE @@ -887,14 +887,14 @@ BEGIN -- during function invocation, search path is redirected to {pg_temp, pg_catalog, function_schema} and becomes immutable RAISE INFO 'could not resolve relation % using system-defined "search_path" setting during function invocation: ''%''', - substr(e_message, 10, 1 + strpos(substr(e_message,11), '" does not exist')), - array_to_string(current_schemas(TRUE),', ') + pg_catalog.substr(e_message, 10, 1 + pg_catalog.strpos(pg_catalog.substr(e_message,11), '" does not exist')), + pg_catalog.array_to_string(pg_catalog.current_schemas(TRUE),', ') USING HINT = 'snapshots require schema-qualified table references, e.g. schema_name.table_name'; RAISE; END; END LOOP; - IF idx < array_length(exec_cmds, 1) AND (exec_cmds[idx][1] IS NULL OR exec_cmds[idx][1] <> 'O') THEN -- this should never happen + IF idx < pg_catalog.array_length(exec_cmds, 1) AND (exec_cmds[idx][1] IS NULL OR exec_cmds[idx][1] <> 'O') THEN -- this should never happen RAISE EXCEPTION 'prepare snapshot internal error1: % %', idx, exec_cmds[idx]; END IF; @@ -904,14 +904,14 @@ BEGIN END LOOP; END; - FOR idx IN 3 .. array_length(mapping, 1) BY 3 LOOP - s_uv_proj := s_uv_proj || quote_ident(mapping[idx]) || ','; + FOR idx IN 3 .. pg_catalog.array_length(mapping, 1) BY 3 LOOP + s_uv_proj := s_uv_proj || pg_catalog.quote_ident(mapping[idx]) || ','; END LOOP; -- create custom view, owned by current user - EXECUTE 'CREATE VIEW ' || qual_name || ' WITH(security_barrier) AS SELECT '|| rtrim(s_uv_proj, ',') || ' FROM db4ai.v' || s_id; - EXECUTE 'COMMENT ON VIEW ' || qual_name || ' IS ''snapshot view backed by db4ai.v' || s_id - || CASE WHEN length(i_comment) > 0 THEN ' comment is "' || i_comment || '"' ELSE '' END || ''''; - EXECUTE 'ALTER VIEW ' || qual_name || ' OWNER TO ' || CURRENT_USER; + EXECUTE 'CREATE VIEW ' || qual_name || ' WITH(security_barrier) AS SELECT '|| pg_catalog.rtrim(s_uv_proj, ',') || ' FROM db4ai.v' || s_id::TEXT; + EXECUTE 'COMMENT ON VIEW ' || qual_name || ' IS ''snapshot view backed by db4ai.v' || s_id::TEXT + || CASE WHEN pg_catalog.length(i_comment) > 0 THEN ' comment is "' || i_comment || '"' ELSE '' END || ''''; + EXECUTE 'ALTER VIEW ' || qual_name || ' OWNER TO "' || CURRENT_USER || '"'; -- return final snapshot name res := ROW(i_schema, s_name); diff --git a/src/gausskernel/dbmind/db4ai/snapshots/publish.sql b/src/gausskernel/dbmind/db4ai/snapshots/publish.sql index 21b683924..f01010348 100644 --- a/src/gausskernel/dbmind/db4ai/snapshots/publish.sql +++ b/src/gausskernel/dbmind/db4ai/snapshots/publish.sql @@ -46,8 +46,8 @@ BEGIN GET STACKED DIAGNOSTICS e_stack_act = PG_EXCEPTION_CONTEXT; IF CURRENT_SCHEMA = 'db4ai' THEN - e_stack_act := replace(e_stack_act, ' archive_snapshot(', ' db4ai.archive_snapshot('); - e_stack_act := replace(e_stack_act, ' publish_snapshot(', ' db4ai.publish_snapshot('); + e_stack_act := pg_catalog.replace(e_stack_act, ' archive_snapshot(', ' db4ai.archive_snapshot('); + e_stack_act := pg_catalog.replace(e_stack_act, ' publish_snapshot(', ' db4ai.publish_snapshot('); END IF; IF e_stack_act NOT SIMILAR TO '%PL/pgSQL function db4ai.(archive|publish)_snapshot\(name,name\) line 11 at assignment%' @@ -59,14 +59,14 @@ BEGIN -- obtain active message level BEGIN - EXECUTE 'SET LOCAL client_min_messages TO ' || current_setting('db4ai.message_level'); - RAISE INFO 'effective client_min_messages is ''%''', upper(current_setting('db4ai.message_level')); + EXECUTE 'SET LOCAL client_min_messages TO ' || pg_catalog.current_setting('db4ai.message_level'); + RAISE INFO 'effective client_min_messages is ''%''', pg_catalog.upper(pg_catalog.current_setting('db4ai.message_level')); EXCEPTION WHEN OTHERS THEN END; -- obtain relevant configuration parameters BEGIN - s_mode := upper(current_setting('db4ai_snapshot_mode')); + s_mode := pg_catalog.upper(pg_catalog.current_setting('db4ai_snapshot_mode')); EXCEPTION WHEN OTHERS THEN s_mode := 'MSS'; END; @@ -77,17 +77,17 @@ BEGIN -- obtain relevant configuration parameters BEGIN - s_vers_del := current_setting('db4ai_snapshot_version_delimiter'); + s_vers_del := pg_catalog.current_setting('db4ai_snapshot_version_delimiter'); EXCEPTION WHEN OTHERS THEN s_vers_del := '@'; END; BEGIN - s_vers_sep := upper(current_setting('db4ai_snapshot_version_separator')); + s_vers_sep := pg_catalog.upper(pg_catalog.current_setting('db4ai_snapshot_version_separator')); EXCEPTION WHEN OTHERS THEN s_vers_sep := '.'; END; - current_compatibility_mode := current_setting('sql_compatibility'); + current_compatibility_mode := pg_catalog.current_setting('sql_compatibility'); IF current_compatibility_mode = 'ORA' OR current_compatibility_mode = 'A' THEN none_represent := 0; ELSE @@ -98,10 +98,10 @@ BEGIN IF i_name IS NULL OR i_name = '' THEN RAISE EXCEPTION 'i_name cannot be NULL or empty'; ELSE - i_name := replace(i_name, chr(1), s_vers_del); - i_name := replace(i_name, chr(2), s_vers_sep); - s_name_vers := regexp_split_to_array(i_name, s_vers_del); - IF array_length(s_name_vers, 1) <> 2 OR array_length(s_name_vers, 2) <> none_represent THEN + i_name := pg_catalog.replace(i_name, pg_catalog.chr(1), s_vers_del); + i_name := pg_catalog.replace(i_name, pg_catalog.chr(2), s_vers_sep); + s_name_vers := pg_catalog.regexp_split_to_array(i_name, s_vers_del); + IF pg_catalog.array_length(s_name_vers, 1) <> 2 OR pg_catalog.array_length(s_name_vers, 2) <> none_represent THEN RAISE EXCEPTION 'i_name must contain exactly one ''%'' character', s_vers_del USING HINT = 'reference a snapshot using the format: snapshot_name' || s_vers_del || 'version'; END IF; @@ -109,7 +109,7 @@ BEGIN UPDATE db4ai.snapshot SET published = publish, archived = NOT publish WHERE schema = i_schema AND name = i_name; IF SQL%ROWCOUNT = 0 THEN - RAISE EXCEPTION 'snapshot %.% does not exist' , quote_ident(i_schema), quote_ident(i_name); + RAISE EXCEPTION 'snapshot %.% does not exist' , pg_catalog.quote_ident(i_schema), pg_catalog.quote_ident(i_name); END IF; res := ROW(i_schema, i_name); diff --git a/src/gausskernel/dbmind/db4ai/snapshots/purge.sql b/src/gausskernel/dbmind/db4ai/snapshots/purge.sql index c18834943..33231c8dc 100644 --- a/src/gausskernel/dbmind/db4ai/snapshots/purge.sql +++ b/src/gausskernel/dbmind/db4ai/snapshots/purge.sql @@ -46,7 +46,7 @@ BEGIN GET STACKED DIAGNOSTICS e_stack_act = PG_EXCEPTION_CONTEXT; IF CURRENT_SCHEMA = 'db4ai' THEN - e_stack_act := replace(e_stack_act, 'ion pur', 'ion db4ai.pur'); + e_stack_act := pg_catalog.replace(e_stack_act, 'ion pur', 'ion db4ai.pur'); END IF; IF e_stack_act NOT LIKE 'referenced column: purge_snapshot_internal @@ -63,7 +63,7 @@ PL/pgSQL function db4ai.purge_snapshot(name,name) line 71 at PERFORM%' SELECT commands, comment, id, parent_id, matrix_id FROM db4ai.snapshot WHERE schema = i_schema AND name = i_name INTO STRICT pushed_cmds, pushed_comment, s_id, p_id, m_id; EXCEPTION WHEN NO_DATA_FOUND THEN - RAISE EXCEPTION 'snapshot %.% does not exist' , quote_ident(i_schema), quote_ident(i_name); + RAISE EXCEPTION 'snapshot %.% does not exist' , pg_catalog.quote_ident(i_schema), pg_catalog.quote_ident(i_name); END; -- update descendants, if any @@ -75,40 +75,40 @@ PL/pgSQL function db4ai.purge_snapshot(name,name) line 71 at PERFORM%' ELSE pushed_comment || ' | ' || comment END WHERE parent_id = s_id; IF p_id IS NULL AND SQL%ROWCOUNT > 0 THEN - RAISE EXCEPTION 'cannot purge root snapshot ''%.%'' having dependent snapshots', quote_ident(i_schema), quote_ident(i_name) + RAISE EXCEPTION 'cannot purge root snapshot ''%.%'' having dependent snapshots', pg_catalog.quote_ident(i_schema), pg_catalog.quote_ident(i_name) USING HINT = 'purge all dependent snapshots first'; END IF; IF m_id IS NULL THEN - EXECUTE 'DROP VIEW db4ai.v' || s_id; - EXECUTE 'DROP TABLE db4ai.t' || s_id; + EXECUTE 'DROP VIEW db4ai.v' || s_id::TEXT; + EXECUTE 'DROP TABLE db4ai.t' || s_id::TEXT; RAISE NOTICE 'PURGE_SNAPSHOT: MSS backing table dropped'; ELSE - SELECT array_agg(id) FROM db4ai.snapshot WHERE matrix_id = m_id AND id <> s_id INTO STRICT o_id; + SELECT pg_catalog.array_agg(id) FROM db4ai.snapshot WHERE matrix_id = m_id AND id <> s_id INTO STRICT o_id; - IF o_id IS NULL OR array_length(o_id, 1) = 0 OR array_length(o_id, 1) IS NULL THEN - EXECUTE 'DROP VIEW db4ai.v' || s_id; - EXECUTE 'DROP TABLE db4ai.t' || m_id; + IF o_id IS NULL OR pg_catalog.array_length(o_id, 1) = 0 OR pg_catalog.array_length(o_id, 1) IS NULL THEN + EXECUTE 'DROP VIEW db4ai.v' || s_id::TEXT; + EXECUTE 'DROP TABLE db4ai.t' || m_id::TEXT; RAISE NOTICE 'PURGE_SNAPSHOT: CSS backing table dropped'; ELSE - EXECUTE 'DELETE FROM db4ai.t' || m_id || ' WHERE _' || s_id || ' AND NOT (_' || array_to_string(o_id, ' OR _') || ')'; + EXECUTE 'DELETE FROM db4ai.t' || m_id::TEXT || ' WHERE _' || s_id::TEXT || ' AND NOT (_' || pg_catalog.array_to_string(o_id, ' OR _') || ')'; GET DIAGNOSTICS affected = ROW_COUNT; - SELECT array_agg(quote_ident(column_name)) + SELECT pg_catalog.array_agg(pg_catalog.quote_ident(column_name)) FROM ( SELECT column_name FROM information_schema.columns - WHERE table_schema = 'db4ai' AND table_name = ANY ( ('{v' || array_to_string(s_id || o_id, ',v') || '}')::NAME[] ) + WHERE table_schema = 'db4ai' AND table_name = ANY ( ('{v' || pg_catalog.array_to_string(s_id || o_id, ',v') || '}')::NAME[] ) GROUP BY column_name - HAVING SUM(CASE table_name WHEN 'v' || s_id THEN 0 ELSE 1 END) = 0 ) + HAVING SUM(CASE table_name WHEN 'v' || s_id::TEXT THEN 0 ELSE 1 END) = 0 ) INTO STRICT drop_cols; - EXECUTE 'DROP VIEW db4ai.v' || s_id; + EXECUTE 'DROP VIEW db4ai.v' || s_id::TEXT; IF TRUE OR drop_cols IS NULL THEN - EXECUTE 'ALTER TABLE db4ai.t' || m_id || ' DROP _' || s_id; + EXECUTE 'ALTER TABLE db4ai.t' || m_id::TEXT || ' DROP _' || s_id::TEXT; RAISE NOTICE 'PURGE_SNAPSHOT: orphaned rows dropped: %, orphaned columns dropped: none', affected; ELSE - EXECUTE 'ALTER TABLE db4ai.t' || m_id || ' DROP _' || s_id || ', DROP ' || array_to_string(drop_cols, ', DROP '); + EXECUTE 'ALTER TABLE db4ai.t' || m_id::TEXT || ' DROP _' || s_id::TEXT || ', DROP ' || pg_catalog.array_to_string(drop_cols, ', DROP '); RAISE NOTICE 'PURGE_SNAPSHOT: orphaned rows dropped: %, orphaned columns dropped: %', affected, drop_cols; END IF; END IF; @@ -117,7 +117,7 @@ PL/pgSQL function db4ai.purge_snapshot(name,name) line 71 at PERFORM%' DELETE FROM db4ai.snapshot WHERE schema = i_schema AND name = i_name; IF SQL%ROWCOUNT = 0 THEN -- checked before, this should never happen - RAISE INFO 'snapshot %.% does not exist' , quote_ident(i_schema), quote_ident(i_name); + RAISE INFO 'snapshot %.% does not exist' , pg_catalog.quote_ident(i_schema), pg_catalog.quote_ident(i_name); END IF; END; $$; @@ -140,14 +140,14 @@ BEGIN -- obtain active message level BEGIN - EXECUTE 'SET LOCAL client_min_messages TO ' || current_setting('db4ai.message_level'); - RAISE INFO 'effective client_min_messages is ''%''', upper(current_setting('db4ai.message_level')); + EXECUTE 'SET LOCAL client_min_messages TO ' || pg_catalog.current_setting('db4ai.message_level'); + RAISE INFO 'effective client_min_messages is ''%''', pg_catalog.upper(pg_catalog.current_setting('db4ai.message_level')); EXCEPTION WHEN OTHERS THEN END; -- obtain active snapshot mode BEGIN - s_mode := upper(current_setting('db4ai_snapshot_mode')); + s_mode := pg_catalog.upper(pg_catalog.current_setting('db4ai_snapshot_mode')); EXCEPTION WHEN OTHERS THEN s_mode := 'MSS'; END; @@ -158,12 +158,12 @@ BEGIN -- obtain relevant configuration parameters BEGIN - s_vers_del := current_setting('db4ai_snapshot_version_delimiter'); + s_vers_del := pg_catalog.current_setting('db4ai_snapshot_version_delimiter'); EXCEPTION WHEN OTHERS THEN s_vers_del := '@'; END; BEGIN - s_vers_sep := upper(current_setting('db4ai_snapshot_version_separator')); + s_vers_sep := pg_catalog.upper(pg_catalog.current_setting('db4ai_snapshot_version_separator')); EXCEPTION WHEN OTHERS THEN s_vers_sep := '.'; END; @@ -173,7 +173,7 @@ BEGIN i_schema := CASE WHEN (SELECT 0=COUNT(*) FROM pg_catalog.pg_namespace WHERE nspname = CURRENT_USER) THEN 'public' ELSE CURRENT_USER END; END IF; - current_compatibility_mode := current_setting('sql_compatibility'); + current_compatibility_mode := pg_catalog.current_setting('sql_compatibility'); IF current_compatibility_mode = 'ORA' OR current_compatibility_mode = 'A' THEN none_represent := 0; ELSE @@ -183,17 +183,17 @@ BEGIN IF i_name IS NULL OR i_name = '' THEN RAISE EXCEPTION 'i_name cannot be NULL or empty'; ELSE - i_name := replace(i_name, chr(1), s_vers_del); - i_name := replace(i_name, chr(2), s_vers_sep); - s_name_vers := regexp_split_to_array(i_name, s_vers_del); - IF array_length(s_name_vers, 1) <> 2 OR array_length(s_name_vers, 2) <> none_represent THEN + i_name := pg_catalog.replace(i_name, pg_catalog.chr(1), s_vers_del); + i_name := pg_catalog.replace(i_name, pg_catalog.chr(2), s_vers_sep); + s_name_vers := pg_catalog.regexp_split_to_array(i_name, s_vers_del); + IF pg_catalog.array_length(s_name_vers, 1) <> 2 OR pg_catalog.array_length(s_name_vers, 2) <> none_represent THEN RAISE EXCEPTION 'i_name must contain exactly one ''%'' character', s_vers_del USING HINT = 'reference a snapshot using the format: snapshot_name' || s_vers_del || 'version'; END IF; END IF; BEGIN - EXECUTE 'DROP VIEW ' || quote_ident(i_schema) || '.' || quote_ident(i_name); + EXECUTE 'DROP VIEW ' || pg_catalog.quote_ident(i_schema) || '.' || pg_catalog.quote_ident(i_name); EXCEPTION WHEN OTHERS THEN END; diff --git a/src/gausskernel/dbmind/db4ai/snapshots/sample.sql b/src/gausskernel/dbmind/db4ai/snapshots/sample.sql index 6131aac79..64ee367f4 100644 --- a/src/gausskernel/dbmind/db4ai/snapshots/sample.sql +++ b/src/gausskernel/dbmind/db4ai/snapshots/sample.sql @@ -57,14 +57,14 @@ BEGIN -- obtain active message level BEGIN - EXECUTE 'SET LOCAL client_min_messages TO ' || current_setting('db4ai.message_level'); - RAISE INFO 'effective client_min_messages is %', upper(current_setting('db4ai.message_level')); + EXECUTE 'SET LOCAL client_min_messages TO ' || pg_catalog.current_setting('db4ai.message_level'); + RAISE INFO 'effective client_min_messages is %', pg_catalog.upper(pg_catalog.current_setting('db4ai.message_level')); EXCEPTION WHEN OTHERS THEN END; -- obtain active snapshot mode BEGIN - s_mode := upper(current_setting('db4ai_snapshot_mode')); + s_mode := pg_catalog.upper(pg_catalog.current_setting('db4ai_snapshot_mode')); EXCEPTION WHEN OTHERS THEN s_mode := 'MSS'; END; @@ -75,17 +75,17 @@ BEGIN -- obtain relevant configuration parameters BEGIN - s_vers_del := current_setting('db4ai_snapshot_version_delimiter'); + s_vers_del := pg_catalog.current_setting('db4ai_snapshot_version_delimiter'); EXCEPTION WHEN OTHERS THEN s_vers_del := '@'; END; BEGIN - s_vers_sep := upper(current_setting('db4ai_snapshot_version_separator')); + s_vers_sep := pg_catalog.upper(pg_catalog.current_setting('db4ai_snapshot_version_separator')); EXCEPTION WHEN OTHERS THEN s_vers_sep := '.'; END; - current_compatibility_mode := current_setting('sql_compatibility'); + current_compatibility_mode := pg_catalog.current_setting('sql_compatibility'); IF current_compatibility_mode = 'ORA' OR current_compatibility_mode = 'A' THEN none_represent := 0; ELSE @@ -100,10 +100,10 @@ BEGIN IF i_parent IS NULL OR i_parent = '' THEN RAISE EXCEPTION 'i_parent cannot be NULL or empty'; ELSE - i_parent := replace(i_parent, chr(1), s_vers_del); - i_parent := replace(i_parent, chr(2), s_vers_sep); - p_name_vers := regexp_split_to_array(i_parent, s_vers_del); - IF array_length(p_name_vers, 1) <> 2 OR array_length(p_name_vers, 2) <> none_represent THEN + i_parent := pg_catalog.replace(i_parent, pg_catalog.chr(1), s_vers_del); + i_parent := pg_catalog.replace(i_parent, pg_catalog.chr(2), s_vers_sep); + p_name_vers := pg_catalog.regexp_split_to_array(i_parent, s_vers_del); + IF pg_catalog.array_length(p_name_vers, 1) <> 2 OR pg_catalog.array_length(p_name_vers, 2) <> none_represent THEN RAISE EXCEPTION 'i_parent must contain exactly one ''%'' character', s_vers_del USING HINT = 'reference a snapshot using the format: snapshot_name' || s_vers_del || 'version'; END IF; @@ -113,75 +113,75 @@ BEGIN BEGIN SELECT id, matrix_id, root_id FROM db4ai.snapshot WHERE schema = i_schema AND name = i_parent INTO STRICT p_id, m_id, r_id; EXCEPTION WHEN NO_DATA_FOUND THEN - RAISE EXCEPTION 'parent snapshot %.% does not exist' , quote_ident(i_schema), quote_ident(i_parent); + RAISE EXCEPTION 'parent snapshot %.% does not exist' , pg_catalog.quote_ident(i_schema), pg_catalog.quote_ident(i_parent); END; - IF i_sample_infixes IS NULL OR array_length(i_sample_infixes, 1) = none_represent OR array_length(i_sample_infixes, 2) <> none_represent THEN + IF i_sample_infixes IS NULL OR pg_catalog.array_length(i_sample_infixes, 1) = none_represent OR pg_catalog.array_length(i_sample_infixes, 2) <> none_represent THEN RAISE EXCEPTION 'i_sample_infixes array malformed' USING HINT = 'pass sample infixes as NAME[] literal, e.g. ''{_train, _test}'''; END IF; - IF i_sample_ratios IS NULL OR array_length(i_sample_ratios, 1) = none_represent OR array_length(i_sample_ratios, 2) <> none_represent THEN + IF i_sample_ratios IS NULL OR pg_catalog.array_length(i_sample_ratios, 1) = none_represent OR pg_catalog.array_length(i_sample_ratios, 2) <> none_represent THEN RAISE EXCEPTION 'i_sample_ratios array malformed' USING HINT = 'pass sample percentages as NUMBER[] literal, e.g. ''{.8, .2}'''; END IF; - IF array_length(i_sample_infixes, 1) <> array_length(i_sample_ratios, 1) THEN + IF pg_catalog.array_length(i_sample_infixes, 1) <> pg_catalog.array_length(i_sample_ratios, 1) THEN RAISE EXCEPTION 'i_sample_infixes and i_sample_ratios array length mismatch'; END IF; IF i_stratify IS NOT NULL THEN - IF array_length(i_stratify, 1) = none_represent OR array_length(i_stratify, 2) <> none_represent THEN + IF pg_catalog.array_length(i_stratify, 1) = none_represent OR pg_catalog.array_length(i_stratify, 2) <> none_represent THEN RAISE EXCEPTION 'i_stratify array malformed' USING HINT = 'pass stratification field names as NAME[] literal, e.g. ''{color, size}'''; END IF; - EXECUTE 'SELECT ARRAY[COUNT(DISTINCT ' || array_to_string(i_stratify, '), COUNT(DISTINCT ') || ')] FROM db4ai.v' || p_id + EXECUTE 'SELECT ARRAY[COUNT(DISTINCT ' || pg_catalog.array_to_string(i_stratify, '), COUNT(DISTINCT ') || ')] FROM db4ai.v' || p_id::TEXT INTO STRICT stratify_count; IF stratify_count IS NULL THEN RAISE EXCEPTION 'sample snapshot internal error2: %', p_id; END IF; - SELECT array_agg(ordered) FROM (SELECT unnest(i_stratify) ordered ORDER BY unnest(stratify_count)) INTO STRICT i_stratify; + SELECT pg_catalog.array_agg(ordered) FROM (SELECT pg_catalog.unnest(i_stratify) ordered ORDER BY pg_catalog.unnest(stratify_count)) INTO STRICT i_stratify; IF i_stratify IS NULL THEN RAISE EXCEPTION 'sample snapshot internal error3'; END IF; END IF; IF i_sample_comments IS NOT NULL THEN - IF array_length(i_sample_comments, 1) = none_represent OR array_length(i_sample_comments, 2) <> none_represent THEN + IF pg_catalog.array_length(i_sample_comments, 1) = none_represent OR pg_catalog.array_length(i_sample_comments, 2) <> none_represent THEN RAISE EXCEPTION 'i_sample_comments array malformed' USING HINT = 'pass sample comments as TEXT[] literal, e.g. ''{comment 1, comment 2}'''; - ELSIF array_length(i_sample_infixes, 1) <> array_length(i_sample_comments, 1) THEN + ELSIF pg_catalog.array_length(i_sample_infixes, 1) <> pg_catalog.array_length(i_sample_comments, 1) THEN RAISE EXCEPTION 'i_sample_infixes and i_sample_comments array length mismatch'; END IF; END IF; -- extract normalized projection list (private: nullable, shared: not null, user_cname: not null) - p_sv_proj := substring(pg_get_viewdef('db4ai.v' || p_id), '^SELECT (.*), t[0-9]+\.xc_node_id, t[0-9]+\.ctid FROM.*$'); - mapping := array(SELECT unnest(ARRAY[ m[1], m[2], coalesce(m[3], replace(m[4],'""','"'))]) FROM regexp_matches(p_sv_proj, + p_sv_proj := pg_catalog.substring(pg_catalog.pg_get_viewdef('db4ai.v' || p_id::TEXT), '^SELECT (.*), t[0-9]+\.xc_node_id, t[0-9]+\.ctid FROM.*$'); + mapping := array(SELECT pg_catalog.unnest(ARRAY[ m[1], m[2], coalesce(m[3], pg_catalog.replace(m[4],'""','"'))]) FROM pg_catalog.regexp_matches(p_sv_proj, '(?:COALESCE\(t[0-9]+\.(f[0-9]+), )?t[0-9]+\.(f[0-9]+)(?:\))? AS (?:([^\s",]+)|"((?:[^"]*"")*[^"]*)")', 'g') m); - FOR idx IN 3 .. array_length(mapping, 1) BY 3 LOOP + FOR idx IN 3 .. pg_catalog.array_length(mapping, 1) BY 3 LOOP IF s_mode = 'MSS' THEN - s_sv_proj := s_sv_proj || coalesce(mapping[idx-2], mapping[idx-1]) || ' AS ' || quote_ident(mapping[idx]) || ','; - s_bt_proj := s_bt_proj || quote_ident(mapping[idx]) || ' AS ' || coalesce(mapping[idx-2], mapping[idx-1]) || ','; + s_sv_proj := s_sv_proj || coalesce(mapping[idx-2], mapping[idx-1]) || ' AS ' || pg_catalog.quote_ident(mapping[idx]) || ','; + s_bt_proj := s_bt_proj || pg_catalog.quote_ident(mapping[idx]) || ' AS ' || coalesce(mapping[idx-2], mapping[idx-1]) || ','; ELSIF s_mode = 'CSS' THEN IF mapping[idx-2] IS NULL THEN - s_sv_proj := s_sv_proj || mapping[idx-1] || ' AS ' || quote_ident(mapping[idx]) || ','; + s_sv_proj := s_sv_proj || mapping[idx-1] || ' AS ' || pg_catalog.quote_ident(mapping[idx]) || ','; ELSE - s_sv_proj := s_sv_proj || 'coalesce(' || mapping[idx-2] || ',' || mapping[idx-1] || ') AS ' || quote_ident(mapping[idx]) || ','; + s_sv_proj := s_sv_proj || 'coalesce(' || mapping[idx-2] || ',' || mapping[idx-1] || ') AS ' || pg_catalog.quote_ident(mapping[idx]) || ','; END IF; END IF; - s_uv_proj := s_uv_proj || quote_ident(mapping[idx]) || ','; + s_uv_proj := s_uv_proj || pg_catalog.quote_ident(mapping[idx]) || ','; END LOOP; - s_bt_dist := getdistributekey('db4ai.t' || coalesce(m_id, p_id)); + s_bt_dist := pg_catalog.getdistributekey('db4ai.t' || coalesce(m_id, p_id)::TEXT); s_bt_dist := CASE WHEN s_bt_dist IS NULL THEN ' DISTRIBUTE BY REPLICATION' ELSE ' DISTRIBUTE BY HASH(' || s_bt_dist || ')' END; s_bt_dist = ''; - FOR i IN 1 .. array_length(i_sample_infixes, 1) LOOP + FOR i IN 1 .. pg_catalog.array_length(i_sample_infixes, 1) LOOP IF i_sample_infixes[i] IS NULL THEN RAISE EXCEPTION 'i_sample_infixes array contains NULL values'; END IF; @@ -191,18 +191,18 @@ BEGIN END IF; qual_name := p_name_vers[1] || i_sample_infixes[i] || s_vers_del || p_name_vers[2]; - IF char_length(qual_name) > 63 THEN + IF pg_catalog.char_length(qual_name) > 63 THEN RAISE EXCEPTION 'sample snapshot name too long: ''%''', qual_name; ELSE s_name := (i_schema, qual_name); - qual_name := quote_ident(s_name.schema) || '.' || quote_ident(s_name.name); + qual_name := pg_catalog.quote_ident(s_name.schema) || '.' || pg_catalog.quote_ident(s_name.name); END IF; IF i_sample_ratios[i] < 0 OR i_sample_ratios[i] > 1 THEN RAISE EXCEPTION 'sample ratio must be between 0 and 1'; END IF; - -- SELECT nextval('db4ai.snapshot_sequence') INTO STRICT s_id; + -- SELECT pg_catalog.nextval('db4ai.snapshot_sequence') INTO STRICT s_id; SELECT MAX(id)+1 FROM db4ai.snapshot INTO STRICT s_id; -- openGauss BUG: cannot create sequences in initdb -- check for duplicate snapshot @@ -218,29 +218,29 @@ BEGIN IF s_mode = 'MSS' THEN exec_cmds := ARRAY [ -- extract and propagate DISTRIBUTE BY from root MSS snapshot - [ 'O','CREATE TABLE db4ai.t' || s_id || ' WITH (orientation = column, compression = low)' || s_bt_dist - || ' AS SELECT ' || rtrim(s_bt_proj, ',') || ' FROM db4ai.v' || p_id || ' WHERE random() <= ' || i_sample_ratios[i] ], + [ 'O','CREATE TABLE db4ai.t' || s_id::TEXT || ' WITH (orientation = column, compression = low)' || s_bt_dist + || ' AS SELECT ' || pg_catalog.rtrim(s_bt_proj, ',') || ' FROM db4ai.v' || p_id::TEXT || ' WHERE pg_catalog.random() <= ' || i_sample_ratios[i] ], -- || ' AS SELECT ' || rtrim(s_bt_proj, ',') || ' FROM db4ai.v' || p_id || ' WHERE dbms_random.value(0, 1) <= ' || i_sample_ratios[i], - [ 'O', 'COMMENT ON TABLE db4ai.t' || s_id || ' IS ''snapshot backing table, root is ' || qual_name || '''' ], - [ 'O', 'CREATE VIEW db4ai.v' || s_id || ' WITH(security_barrier) AS SELECT ' || s_sv_proj || ' xc_node_id, ctid FROM db4ai.t' || s_id ]]; + [ 'O', 'COMMENT ON TABLE db4ai.t' || s_id::TEXT || ' IS ''snapshot backing table, root is ' || qual_name || '''' ], + [ 'O', 'CREATE VIEW db4ai.v' || s_id::TEXT || ' WITH(security_barrier) AS SELECT ' || s_sv_proj || ' xc_node_id, ctid FROM db4ai.t' || s_id::TEXT ]]; ELSIF s_mode = 'CSS' THEN IF m_id IS NULL THEN exec_cmds := ARRAY [ - [ 'O', 'UPDATE db4ai.snapshot SET matrix_id = ' || p_id || ' WHERE schema = ''' || i_schema || ''' AND name = ''' + [ 'O', 'UPDATE db4ai.snapshot SET matrix_id = ' || p_id::TEXT || ' WHERE schema = ''' || i_schema || ''' AND name = ''' || i_parent || '''' ], - [ 'O', 'ALTER TABLE db4ai.t' || p_id || ' ADD _' || p_id || ' BOOLEAN NOT NULL DEFAULT TRUE' ], - [ 'O', 'ALTER TABLE db4ai.t' || p_id || ' ALTER _' || p_id || ' SET DEFAULT FALSE' ], - [ 'O', 'CREATE OR REPLACE VIEW db4ai.v' || p_id || ' WITH(security_barrier) AS SELECT ' || p_sv_proj || ', xc_node_id, ctid FROM db4ai.t' - || p_id || ' WHERE _' || p_id ]]; + [ 'O', 'ALTER TABLE db4ai.t' || p_id::TEXT || ' ADD _' || p_id::TEXT || ' BOOLEAN NOT NULL DEFAULT TRUE' ], + [ 'O', 'ALTER TABLE db4ai.t' || p_id::TEXT || ' ALTER _' || p_id::TEXT || ' SET DEFAULT FALSE' ], + [ 'O', 'CREATE OR REPLACE VIEW db4ai.v' || p_id::TEXT || ' WITH(security_barrier) AS SELECT ' || p_sv_proj || ', xc_node_id, ctid FROM db4ai.t' + || p_id::TEXT || ' WHERE _' || p_id::TEXT ]]; m_id := p_id; END IF; exec_cmds := exec_cmds || ARRAY [ - [ 'O', 'ALTER TABLE db4ai.t' || m_id || ' ADD _' || s_id || ' BOOLEAN NOT NULL DEFAULT FALSE' ], - [ 'O', 'UPDATE db4ai.t' || m_id || ' SET _' || s_id || ' = TRUE WHERE _' || p_id || ' AND random() <= ' + [ 'O', 'ALTER TABLE db4ai.t' || m_id::TEXT || ' ADD _' || s_id::TEXT || ' BOOLEAN NOT NULL DEFAULT FALSE' ], + [ 'O', 'UPDATE db4ai.t' || m_id::TEXT || ' SET _' || s_id::TEXT || ' = TRUE WHERE _' || p_id::TEXT || ' AND pg_catalog.random() <= ' -- [ 'O', 'UPDATE db4ai.t' || m_id || ' SET _' || s_id || ' = TRUE WHERE _' || p_id || ' AND dbms_random.value(0, 1) <= ' || i_sample_ratios[i] ], - [ 'O', 'CREATE VIEW db4ai.v' || s_id || ' WITH(security_barrier) AS SELECT ' || s_sv_proj || ' xc_node_id, ctid FROM db4ai.t' || m_id - || ' WHERE _' || s_id ]]; + [ 'O', 'CREATE VIEW db4ai.v' || s_id::TEXT || ' WITH(security_barrier) AS SELECT ' || s_sv_proj || ' xc_node_id, ctid FROM db4ai.t' || m_id::TEXT + || ' WHERE _' || s_id::TEXT ]]; END IF; -- || ' AS SELECT ' || proj_list || ' FROM ' @@ -253,8 +253,8 @@ BEGIN --SELECT * FROM (SELECT *, count(*) over()_ cnt, row_number() OVER(ORDER BY COLOR) _row FROM t) WHERE _row % (cnt/ 10) = 0; -- Execute the queries - RAISE NOTICE E'accumulated commands:\n%', array_to_string(exec_cmds, E'\n'); - IF 1 + array_length(exec_cmds, 1) <> (db4ai.prepare_snapshot_internal( + RAISE NOTICE E'accumulated commands:\n%', pg_catalog.array_to_string(exec_cmds, E'\n'); + IF 1 + pg_catalog.array_length(exec_cmds, 1) <> (db4ai.prepare_snapshot_internal( s_id, p_id, m_id, r_id, s_name.schema, s_name.name, ARRAY [ 'SAMPLE ' || i_sample_infixes[i] || ' ' || i_sample_ratios[i] || CASE WHEN i_stratify IS NULL THEN '' ELSE ' ' || i_stratify::TEXT END ], @@ -263,10 +263,10 @@ BEGIN END IF; -- create custom view, owned by current user - EXECUTE 'CREATE VIEW ' || qual_name || ' WITH(security_barrier) AS SELECT ' || rtrim(s_uv_proj, ',') || ' FROM db4ai.v' || s_id; - EXECUTE 'COMMENT ON VIEW ' || qual_name || ' IS ''snapshot view backed by db4ai.v' || s_id - || CASE WHEN length(i_sample_comments[i]) > 0 THEN ' comment is "' || i_sample_comments[i] || '"' ELSE '' END || ''''; - EXECUTE 'ALTER VIEW ' || qual_name || ' OWNER TO ' || CURRENT_USER; + EXECUTE 'CREATE VIEW ' || qual_name || ' WITH(security_barrier) AS SELECT ' || pg_catalog.rtrim(s_uv_proj, ',') || ' FROM db4ai.v' || s_id::TEXT; + EXECUTE 'COMMENT ON VIEW ' || qual_name || ' IS ''snapshot view backed by db4ai.v' || s_id::TEXT + || CASE WHEN pg_catalog.length(i_sample_comments[i]) > 0 THEN ' comment is "' || i_sample_comments[i] || '"' ELSE '' END || ''''; + EXECUTE 'ALTER VIEW ' || qual_name || ' OWNER TO "' || CURRENT_USER || '"'; exec_cmds := NULL; diff --git a/src/gausskernel/dbmind/gs_dbmind b/src/gausskernel/dbmind/gs_dbmind new file mode 100644 index 000000000..211d123a8 --- /dev/null +++ b/src/gausskernel/dbmind/gs_dbmind @@ -0,0 +1,110 @@ +#!/usr/bin/env bash +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +# ------------Utils------------ +function die() { + echo -e $1 + exit 1 +} + +function extract_libname() { + # Split real library name. + line=$1 + tmparr=(${line//==/ }) + libname=${tmparr[0]} + tmparr=(${libname//>=/ }) + libname=${tmparr[0]} + tmparr=(${libname//<=/ }) + libname=${tmparr[0]} + tmparr=(${libname//~=/ }) + libname=${tmparr[0]} + # trim + libname=$(echo $libname | sed 's/ *$//g') + echo $libname +} + +# ------------Main Process------------ +ABSPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +DBMIND_PATH=${ABSPATH}/dbmind + +# Check whether Python has been installed onto the OS. +PYTHON=$(which python) +if [[ "$?" != "0" ]]; then + die "Not found the Python environment." +fi + +# Check for the Python version. +$PYTHON -c 'import sys; exit(sys.version_info[:2] >= (3, 6))' +if [[ "$?" != "1" ]]; then + die "Not supported Python (${PYTHON}) version: at least Python 3.6." +fi + +# Check for dependencies +## Firstly, get the machine architecture so that determine different requirement files. +if [[ -f "${DBMIND_PATH}/requirements-x86.txt" ]]; then + REQUIREMENT_PATH=$DBMIND_PATH +else + REQUIREMENT_PATH=$DBMIND_PATH/.. +fi + +machine=$(uname -m) +if [[ "$machine" =~ "x86" ]]; then + requirements=$REQUIREMENT_PATH/"requirements-x86.txt" +else + requirements=$REQUIREMENT_PATH/"requirements-aarch64.txt" +fi + +## Secondly, check whether each library has been installed. +installed_list=$($PYTHON -m pip list --disable-pip-version-check) +while read line; do + libname=$(extract_libname $line) + # skip comment + if [[ $libname == "#"* ]] || [[ $libname == "" ]] ; then + continue + fi + + found=$(echo $installed_list | grep -i "$libname") + if [[ $found == "" ]]; then + die "Need dependency ${libname}. You should use pip to install it, following:\n + ${PYTHON} -m pip install -r ${requirements}" + fi +done < $requirements + +# Furthermore, if users want to use several components, he should install optional dependencies. +## e.g., gs_dbmind component sqldiag ... +optional_reqs=$REQUIREMENT_PATH/"requirements-optional.txt" +if [[ "$1" == "component" ]] && [[ "$2" == "sqldiag" || "$2" == "xtuner" ]]; then + while read line; do + libname=$(extract_libname $line) + # skip comment + if [[ $libname == "#"* ]] || [[ $libname == "" ]] ; then + continue + fi + + found=$(echo $installed_list | grep -i "$libname") + if [[ $found == "" ]]; then + die "Need dependency ${libname} while using this component. You should use pip to install it, following:\n + ${PYTHON} -m pip install -r ${optional_reqs}" + fi + done < $optional_reqs +fi + +set -f +for arg in "${@}" ; do + arg=`echo $arg |sed 's/"""/\\\\\\"\\\\\\"\\\\\\"/g'` + arg=`${PYTHON} -c "import shlex;print(shlex.quote(\"\"\"$arg\"\"\"))"` + _args=$_args" "$arg +done + +eval $PYTHON $DBMIND_PATH/ $_args diff --git a/src/gausskernel/dbmind/kernel/hypopg_index.cpp b/src/gausskernel/dbmind/kernel/hypopg_index.cpp index f961e8b72..fd6e61971 100644 --- a/src/gausskernel/dbmind/kernel/hypopg_index.cpp +++ b/src/gausskernel/dbmind/kernel/hypopg_index.cpp @@ -186,23 +186,16 @@ static bool hypo_query_walker(Node *parsetree) if (parsetree == NULL) { return false; } + if (nodeTag(parsetree) == T_ExplainStmt) { + ListCell *lc; - switch (nodeTag(parsetree)) { - case T_ExplainStmt: { - ListCell *lc; + foreach (lc, ((ExplainStmt *)parsetree)->options) { + DefElem *opt = (DefElem *)lfirst(lc); - foreach (lc, ((ExplainStmt *)parsetree)->options) { - DefElem *opt = (DefElem *)lfirst(lc); - - if (strcmp(opt->defname, "analyze") == 0) - return false; - } - return true; - break; - } - default: { - return false; + if (strcmp(opt->defname, "analyze") == 0) + return false; } + return true; } return false; } @@ -690,15 +683,21 @@ static const hypoIndex *hypo_index_store_parsetree(IndexStmt *node, const char * } initStringInfo(&indexRelationName); - appendStringInfo(&indexRelationName, "%s", node->accessMethod); - appendStringInfo(&indexRelationName, "_"); - + appendStringInfoString(&indexRelationName, node->accessMethod); + appendStringInfoString(&indexRelationName, "_"); + if (node->isGlobal) { + appendStringInfoString(&indexRelationName, "global"); + appendStringInfoString(&indexRelationName, "_"); + } else if (node->isPartitioned) { + appendStringInfoString(&indexRelationName, "local"); + appendStringInfoString(&indexRelationName, "_"); + } if (node->relation->schemaname != NULL && (strcmp(node->relation->schemaname, "public") != 0)) { - appendStringInfo(&indexRelationName, "%s", node->relation->schemaname); - appendStringInfo(&indexRelationName, "_"); + appendStringInfoString(&indexRelationName, node->relation->schemaname); + appendStringInfoString(&indexRelationName, "_"); } - appendStringInfo(&indexRelationName, "%s", node->relation->relname); + appendStringInfoString(&indexRelationName, node->relation->relname); /* now create the hypothetical index entry */ entry = hypo_newIndex(relid, node->accessMethod, nkeycolumns, ninccolumns, node->options); @@ -719,7 +718,8 @@ static const hypoIndex *hypo_index_store_parsetree(IndexStmt *node, const char * entry->unique = node->unique; entry->ncolumns = nkeycolumns + ninccolumns; entry->nkeycolumns = nkeycolumns; - + entry->isGlobal = node->isGlobal; + entry->ispartitionedindex = node->isPartitioned; /* handle predicate if present */ hypo_handle_predicate(node, entry); @@ -1021,7 +1021,9 @@ static void hypo_injectHypotheticalIndex(PlannerInfo *root, Oid relationObjectId index->pages = entry->pages; index->tuples = entry->tuples; - + index->ispartitionedindex = entry->ispartitionedindex; + index->partitionindex = InvalidOid; + index->isGlobal = entry->isGlobal; /* * obviously, setup this tag. However, it's only checked in * selfuncs.c/get_actual_variable_range, so we still need to add @@ -1287,8 +1289,8 @@ static void hypo_set_indexname(hypoIndex *entry, const char *indexname) int totalsize; errno_t rc = EOK; - rc = snprintf_s(oid, sizeof(oid), sizeof(oid) - 1, "<%d>", entry->oid); - securec_check_ss_c(rc, "\0", "\0"); + rc = snprintf_s(oid, sizeof(oid), sizeof(oid) - 1, "<%u>", entry->oid); + securec_check_ss(rc, "\0", "\0"); /* we'll prefix the given indexname with the oid, and reserve a final \0 */ totalsize = strlen(oid) + strlen(indexname) + 1; @@ -1300,9 +1302,9 @@ static void hypo_set_indexname(hypoIndex *entry, const char *indexname) /* eventually truncate the given indexname at NAMEDATALEN-1 if needed */ rc = strcpy_s(entry->indexname, NAMEDATALEN, oid); - securec_check_c(rc, "\0", "\0"); + securec_check(rc, "\0", "\0"); rc = strncat_s(entry->indexname, NAMEDATALEN, indexname, totalsize - strlen(oid) - 1); - securec_check_c(rc, "\0", "\0"); + securec_check(rc, "\0", "\0"); } /* diff --git a/src/gausskernel/dbmind/kernel/index_advisor.cpp b/src/gausskernel/dbmind/kernel/index_advisor.cpp index 5ccb6bc73..125ec911e 100644 --- a/src/gausskernel/dbmind/kernel/index_advisor.cpp +++ b/src/gausskernel/dbmind/kernel/index_advisor.cpp @@ -30,6 +30,8 @@ #include "access/tupdesc.h" #include "catalog/indexing.h" #include "catalog/pg_attribute.h" +#include "catalog/pg_class.h" +#include "catalog/pg_partition_fn.h" #include "commands/sqladvisor.h" #include "funcapi.h" #include "nodes/makefuncs.h" @@ -64,6 +66,7 @@ typedef struct { char schema[NAMEDATALEN]; char table[NAMEDATALEN]; StringInfoData column; + char index_type[NAMEDATALEN]; } SuggestedIndex; typedef struct { @@ -79,8 +82,17 @@ typedef struct { List *index; List *join_cond; List *index_print; + bool ispartition = false; + bool issubpartition=false; + List *partition_key_list; + List *subpartition_key_list; } TableCell; +typedef struct { + char *alias_name; + char *column_name; +} TargetCell; + typedef struct { uint4 cardinality; char *index_name; @@ -88,6 +100,11 @@ typedef struct { char *field_expr; } IndexCell; +typedef struct { + char *index_columns; + char *index_type; +} IndexPrint; + namespace index_advisor { typedef struct { char *field; @@ -125,13 +142,19 @@ static uint4 calculate_field_cardinality(char *, char *, const char *); static bool is_tmp_table(const char *); static void find_table_by_column(char **, char **, char **); static void split_field_list(List *, char **, char **, char **); +static void get_partition_key_name(Relation, TableCell *, bool is_subpartition = false); static bool check_relation_type_valid(Oid); -static TableCell *find_or_create_tblcell(char *table_name, char *alias_name, char *schema_name = NULL); +static TableCell *find_or_create_tblcell(char *table_name, char *alias_name, char *schema_name = NULL, + bool is_partition = false, bool is_subpartition = false); +static TargetCell *find_or_create_clmncell(char **, List *); static void add_index_from_field(char *, char *, IndexCell *); static bool parse_group_clause(List *, List *); static bool parse_order_clause(List *, List *); static void add_index_from_group_order(TableCell *, List *, List *, bool); +static void get_partition_index_type(IndexPrint *, TableCell *); +static IndexPrint *generat_index_print(TableCell *, char *); static void generate_final_index(); +static void parse_target_list(List *); static void parse_from_clause(List *); static void add_drived_tables(RangeVar *); static void parse_join_tree(JoinExpr *); @@ -156,7 +179,7 @@ Datum gs_index_advise(PG_FUNCTION_ARGS) if (query == NULL) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("you must enter a query statement."))); } -#define COLUMN_NUM 3 +#define COLUMN_NUM 4 if (SRF_IS_FIRSTCALL()) { TupleDesc tup_desc; MemoryContext old_context; @@ -175,6 +198,7 @@ Datum gs_index_advise(PG_FUNCTION_ARGS) TupleDescInitEntry(tup_desc, (AttrNumber)1, "schema", TEXTOID, -1, 0); TupleDescInitEntry(tup_desc, (AttrNumber)2, "table", TEXTOID, -1, 0); TupleDescInitEntry(tup_desc, (AttrNumber)3, "column", TEXTOID, -1, 0); + TupleDescInitEntry(tup_desc, (AttrNumber)4, "indextype", TEXTOID, -1, 0); func_ctx->tuple_desc = BlessTupleDesc(tup_desc); func_ctx->max_calls = max_calls; @@ -206,6 +230,7 @@ Datum gs_index_advise(PG_FUNCTION_ARGS) values[0] = CStringGetTextDatum(entry->schema); values[1] = CStringGetTextDatum(entry->table); values[2] = CStringGetTextDatum(entry->column.data); + values[3] = CStringGetTextDatum(entry->index_type); tuple = heap_form_tuple(func_ctx->tuple_desc, values, nulls); SRF_RETURN_NEXT(func_ctx, HeapTupleGetDatum(tuple)); @@ -246,22 +271,26 @@ SuggestedIndex *suggest_index(const char *query_string, _out_ int *len) } Node *parsetree = (Node *)lfirst(list_head(parse_tree_list)); - Node* parsetree_copy = (Node*)copyObject(parsetree); - find_select_stmt(parsetree); - + Node *parsetree_copy = (Node *)copyObject(parsetree); + // Check for syntax errors + parse_analyze(parsetree, query_string, NULL, 0); + find_select_stmt(parsetree_copy); if (!g_stmt_list) { - ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("can not advise for the query because not found the select statement."))); + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("can not advise for the query because not found a select statement."))); } // Parse the SelectStmt structure ListCell *item = NULL; foreach (item, g_stmt_list) { + t_thrd.index_advisor_cxt.stmt_table_list = NIL; + t_thrd.index_advisor_cxt.stmt_target_list = NIL; SelectStmt *stmt = (SelectStmt *)lfirst(item); parse_from_clause(stmt->fromClause); + parse_target_list(stmt->targetList); - if (g_table_list) { + if (t_thrd.index_advisor_cxt.stmt_table_list) { parse_where_clause(stmt->whereClause); determine_driver_table(); if (parse_group_clause(stmt->groupClause, stmt->targetList)) { @@ -269,7 +298,7 @@ SuggestedIndex *suggest_index(const char *query_string, _out_ int *len) } else if (parse_order_clause(stmt->sortClause, stmt->targetList)) { add_index_from_group_order(g_driver_table, stmt->sortClause, stmt->targetList, false); } - if (g_table_list->length > 1 && g_driver_table) { + if (t_thrd.index_advisor_cxt.stmt_table_list->length > 1 && g_driver_table) { add_index_for_drived_tables(); } @@ -278,6 +307,8 @@ SuggestedIndex *suggest_index(const char *query_string, _out_ int *len) g_driver_table = NULL; } + list_free_ext(t_thrd.index_advisor_cxt.stmt_table_list); + list_free_ext(t_thrd.index_advisor_cxt.stmt_target_list); } if (g_table_list == NIL) { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), @@ -286,12 +317,21 @@ SuggestedIndex *suggest_index(const char *query_string, _out_ int *len) // Use SQL Advisor extract_info_from_plan(parsetree_copy, query_string); - generate_final_index(); // Format the return result, e.g., 'table1, "(col1,col2),(col3)"'. - int array_len = g_table_list->length; - *len = array_len; - SuggestedIndex *array = (SuggestedIndex *)palloc0(sizeof(SuggestedIndex) * array_len); + // Get index quantity. + Size index_len = 0; + item = NULL; + foreach (item, g_table_list) { + TableCell *cur_table = (TableCell *)lfirst(item); + if (cur_table->index_print) { + index_len += cur_table->index_print->length; + } else { + index_len++; + } + } + *len = index_len; + SuggestedIndex *array = (SuggestedIndex *)palloc0(sizeof(SuggestedIndex) * index_len); errno_t rc = EOK; item = NULL; int i = 0; @@ -299,29 +339,29 @@ SuggestedIndex *suggest_index(const char *query_string, _out_ int *len) foreach (item, g_table_list) { TableCell *cur_table = (TableCell *)lfirst(item); List *index_list = cur_table->index_print; - - rc = strcpy_s((array + i)->schema, NAMEDATALEN, cur_table->schema_name); - securec_check(rc, "\0", "\0"); - rc = strcpy_s((array + i)->table, NAMEDATALEN, cur_table->table_name); - securec_check(rc, "\0", "\0"); - initStringInfo(&(array + i)->column); if (!index_list) { + rc = strcpy_s((array + i)->schema, NAMEDATALEN, cur_table->schema_name); + securec_check(rc, "\0", "\0"); + rc = strcpy_s((array + i)->table, NAMEDATALEN, cur_table->table_name); + securec_check(rc, "\0", "\0"); + initStringInfo(&(array + i)->column); appendStringInfoString(&(array + i)->column, ""); + i++; } else { ListCell *cur_index = NULL; - int j = 0; foreach (cur_index, index_list) { - if (j > 0) { - appendStringInfoString(&(array + i)->column, ",("); - } else { - appendStringInfoString(&(array + i)->column, "("); - } - appendStringInfoString(&(array + i)->column, (char *)lfirst(cur_index)); - appendStringInfoString(&(array + i)->column, ")"); - j++; + rc = strcpy_s((array + i)->schema, NAMEDATALEN, cur_table->schema_name); + securec_check(rc, "\0", "\0"); + rc = strcpy_s((array + i)->table, NAMEDATALEN, cur_table->table_name); + securec_check(rc, "\0", "\0"); + initStringInfo(&(array + i)->column); + appendStringInfoString(&(array + i)->column, (char *)((IndexPrint *)lfirst(cur_index))->index_columns); + rc = strcpy_s((array + i)->index_type, NAMEDATALEN, ((IndexPrint *)lfirst(cur_index))->index_type); + securec_check(rc, "\0", "\0"); + i++; } } - i++; + list_free_deep(index_list); } @@ -344,6 +384,7 @@ void extract_info_from_plan(Node* parse_tree, const char *query_string) List* query_tree_list = pg_analyze_and_rewrite(parse_tree, query_string, NULL, 0); foreach (lc, query_tree_list) { + t_thrd.index_advisor_cxt.stmt_table_list = NIL; Query* query = castNode(Query, lfirst(lc)); if (query->commandType != CMD_UTILITY) { @@ -351,6 +392,8 @@ void extract_info_from_plan(Node* parse_tree, const char *query_string) plan_tree = pg_plan_query(query, 0, NULL); extractNode((Plan*)plan_tree->planTree, NIL, plan_tree->rtable, plan_tree->subplans); } + generate_final_index(); + list_free_ext(t_thrd.index_advisor_cxt.stmt_table_list); } u_sess->adv_cxt.getPlanInfoFunc = NULL; @@ -388,8 +431,10 @@ void get_join_condition_from_plan(Node* node, List* rtable) if (rte1->relid != rte2->relid) { char *l_field_name = pstrdup(get_attname(rte1->relid, arg1->varattno)); char *r_field_name = pstrdup(get_attname(rte2->relid, arg2->varattno)); - TableCell *ltable = find_or_create_tblcell(rte1->relname, NULL); - TableCell *rtable = find_or_create_tblcell(rte2->relname, NULL); + char *l_schema_name = get_namespace_name(get_rel_namespace(rte1->relid)); + char *r_schema_name = get_namespace_name(get_rel_namespace(rte2->relid)); + TableCell *ltable = find_or_create_tblcell(rte1->relname, NULL, l_schema_name); + TableCell *rtable = find_or_create_tblcell(rte2->relname, NULL, r_schema_name); if (!ltable || !rtable) { return; @@ -472,10 +517,16 @@ void get_order_condition_from_plan(Node* node) } char *relname = get_rel_name(origtbl); char *schemaname = get_namespace_name(get_rel_namespace(origtbl)); + // get source column + char *column_name = pstrdup(get_attname(origtbl, targetEntry->resorigcol)); + // not supported cte + if (strcasecmp(column_name, targetEntry->resname) != 0) { + return; + } if (relname && schemaname) { TableCell *tblcell = find_or_create_tblcell(relname, NULL, schemaname); if (tblcell != nullptr) { - add_index(tblcell, targetEntry->resname); + add_index(tblcell, column_name); } } else { ereport(WARNING, @@ -560,6 +611,9 @@ List *get_index_attname(Oid index_oid) int2 attnum; for (int i = 0; i < attnums->dim1; i++) { attnum = attnums->values[i]; + if (attnum < 1) { + break; + } attnames = lappend(attnames, search_table_attname(indrelid, attnum)); } @@ -705,6 +759,8 @@ void find_select_stmt(Node *parsetree) g_stmt_list = lappend(g_stmt_list, stmt); switch (stmt->op) { + case SETOP_INTERSECT: + case SETOP_EXCEPT: case SETOP_UNION: { // analyze the set operation: union find_select_stmt((Node *)stmt->larg); @@ -783,8 +839,126 @@ void extract_stmt_where_clause(Node *item_where) } } +void get_partition_index_type(IndexPrint *suggested_index, TableCell *table) +{ + StringInfoData partition_keys; + initStringInfo(&partition_keys); + int i = 0; + ListCell *value = NULL; + foreach (value, table->partition_key_list) { + char *partition_key = (char *)lfirst(value); + if (i == 0) { + appendStringInfoString(&partition_keys, partition_key); + } else { + appendStringInfoString(&partition_keys, ","); + appendStringInfoString(&partition_keys, partition_key); + } + i++; + } + if (table->subpartition_key_list != NIL) { + StringInfoData subpartition_keys; + initStringInfo(&subpartition_keys); + int i = 0; + foreach (value, table->subpartition_key_list) { + char *subpartition_key = (char *)lfirst(value); + if (i == 0) { + appendStringInfoString(&subpartition_keys, subpartition_key); + } else { + appendStringInfoString(&subpartition_keys, ","); + appendStringInfoString(&subpartition_keys, subpartition_key); + } + i++; + } + if (!strstr(suggested_index->index_columns, subpartition_keys.data)) { + // if not specified subpartition and the suggested columns does not contain subpartition key, + // then recommend global index + suggested_index->index_type = "global"; + pfree_ext(partition_keys.data); + pfree_ext(subpartition_keys.data); + return; + } + if (table->ispartition || strstr(suggested_index->index_columns, partition_keys.data) != NULL) { + // if the suggested columns contain subpartition key and specify partition or + // the suggested columns contain subpartition key and partition key + // then recommend local index + suggested_index->index_type = "local"; + } else { + suggested_index->index_type = "global"; + } + pfree_ext(subpartition_keys.data); + } else { + if (table->ispartition || strstr(suggested_index->index_columns, partition_keys.data) != NULL) { + // if the suggested columns contain partition key, then recommend local index + suggested_index->index_type = "local"; + } else { + suggested_index->index_type = "global"; + } + } + pfree_ext(partition_keys.data); +} + /* - * generate_final_index + * generat_index_print + * Generate index type, normal table is '' by default + * partition table is divided into local and global. + * + * The table in each subquery is preferentially selected for matching. + * The same table is repeated in different partition states in the whole query, +.* resulting in an incorrect index type. + */ +IndexPrint *generat_index_print(TableCell *table, char *index_print) +{ + IndexPrint *suggested_index = (IndexPrint *)palloc0(sizeof(*suggested_index)); + suggested_index->index_columns = index_print; + suggested_index->index_type = NULL; + // recommend index type for partition index + bool stmt_table_match = false; + ListCell *stmt_table = NULL; + foreach(stmt_table, t_thrd.index_advisor_cxt.stmt_table_list) { + TableCell *cur_table = (TableCell *)lfirst(stmt_table); + char *cur_schema_name = cur_table->schema_name; + char *cur_table_name = cur_table->table_name; + if (IsSameRel(cur_schema_name, cur_table_name, table->schema_name, table->table_name)) { + stmt_table_match = true; + if (cur_table->issubpartition) { + // recommend the index type of the query with specified partition for local index + suggested_index->index_type = "local"; + } else if (cur_table->partition_key_list != NIL) { + get_partition_index_type(suggested_index, cur_table); + } else { + suggested_index->index_type = ""; + } + break; + } + } + if (!t_thrd.index_advisor_cxt.stmt_table_list or !stmt_table_match) { + if (table->issubpartition) { + suggested_index->index_type = "local"; + } else if (table->partition_key_list != NIL) { + get_partition_index_type(suggested_index, table); + } else { + suggested_index->index_type = ""; + } + } + return suggested_index; +} + +TableCell *find_table(TableCell *table) +{ + ListCell *item = NULL; + foreach(item, g_table_list) { + TableCell *cur_table = (TableCell *)lfirst(item); + char *cur_schema_name = cur_table->schema_name; + char *cur_table_name = cur_table->table_name; + if (IsSameRel(cur_schema_name, cur_table_name, table->schema_name, table->table_name)) { + return cur_table; + } + } + return NULL; +} + +/* + * generate_final_index_print * Concatenate the candidate indexes of the table into a string of * composite index. * @@ -794,7 +968,7 @@ void extract_stmt_where_clause(Node *item_where) void generate_final_index_print(TableCell *table, Oid table_oid) { Size suggested_index_len = NAMEDATALEN * table->index->length; - char *suggested_index = (char *)palloc0(suggested_index_len); + char *index_print = (char *)palloc0(suggested_index_len); ListCell *index = NULL; errno_t rc = EOK; int i = 0; @@ -802,15 +976,15 @@ void generate_final_index_print(TableCell *table, Oid table_oid) // concatenate the candidate indexes into a string foreach (index, table->index) { char *index_name = ((IndexCell *)lfirst(index))->index_name; - if ((strlen(index_name) + strlen(suggested_index) + 1) > suggested_index_len) { + if ((strlen(index_name) + strlen(index_print) + 1) > suggested_index_len) { break; } if (i == 0) { - rc = strcpy_s(suggested_index, suggested_index_len, index_name); + rc = strcpy_s(index_print, suggested_index_len, index_name); } else { - rc = strcat_s(suggested_index, suggested_index_len, ","); + rc = strcat_s(index_print, suggested_index_len, ","); securec_check(rc, "\0", "\0"); - rc = strcat_s(suggested_index, suggested_index_len, index_name); + rc = strcat_s(index_print, suggested_index_len, index_name); } securec_check(rc, "\0", "\0"); i++; @@ -818,15 +992,7 @@ void generate_final_index_print(TableCell *table, Oid table_oid) list_free_deep(table->index); table->index = NIL; - // check the previously suggested indexes - ListCell *prev_index = NULL; - foreach (prev_index, table->index_print) { - if (strncasecmp((char *)lfirst(prev_index), suggested_index, strlen(suggested_index)) == 0) { - pfree(suggested_index); - return; - } - } - + TableCell *table_index = find_table(table); // check the existed indexes List *indexes = get_table_indexes(table_oid); index = NULL; @@ -853,21 +1019,42 @@ void generate_final_index_print(TableCell *table, Oid table_oid) } list_free(attnames); // the suggested index has existed - if (strcasecmp(existed_index, suggested_index) == 0) { - pfree(suggested_index); + if (strcasecmp(existed_index, index_print) == 0) { + if (table_index == NULL) { + g_table_list = lappend(g_table_list, table); + } + pfree(index_print); pfree(existed_index); return; } pfree(existed_index); } - table->index_print = lappend(table->index_print, suggested_index); + if (table_index != NULL) { + // check the previous suggested indexes + ListCell *prev_index = NULL; + IndexPrint *suggest_index = NULL; + suggest_index = generat_index_print(table, index_print); + foreach (prev_index, table_index->index_print) { + if (strncasecmp(((IndexPrint *)lfirst(prev_index))->index_columns, index_print, strlen(index_print)) == 0) { + if (((IndexPrint *)lfirst(prev_index))->index_type != suggest_index->index_type) { + ((IndexPrint *)lfirst(prev_index))->index_type = "global"; + } + pfree(index_print); + return; + } + } + table_index->index_print = lappend(table_index->index_print, suggest_index); + } else { + table->index_print = lappend(table->index_print, generat_index_print(table, index_print)); + g_table_list = lappend(g_table_list, table); + } return; } void generate_final_index() { ListCell *table_item = NULL; - foreach (table_item, g_table_list) { + foreach (table_item, t_thrd.index_advisor_cxt.stmt_table_list) { TableCell *table = (TableCell *)lfirst(table_item); if (table->index != NIL) { RangeVar *rtable = makeRangeVar(table->schema_name, table->table_name, -1); @@ -876,6 +1063,8 @@ void generate_final_index() continue; } generate_final_index_print(table, table_oid); + } else if (find_table(table) == NULL) { + g_table_list = lappend(g_table_list, table); } } } @@ -942,34 +1131,61 @@ void parse_where_clause(Node *item_where) void parse_from_clause(List *from_list) { ListCell *from = NULL; - foreach (from, from_list) { Node *item = (Node *)lfirst(from); if (nodeTag(item) == T_RangeVar) { // single table RangeVar *table = (RangeVar *)item; if (table->alias) { - (void)find_or_create_tblcell(table->relname, table->alias->aliasname, table->schemaname); + (void)find_or_create_tblcell(table->relname, table->alias->aliasname, table->schemaname, + table->ispartition, table->issubpartition); } else { - (void)find_or_create_tblcell(table->relname, NULL, table->schemaname); + (void)find_or_create_tblcell(table->relname, NULL, table->schemaname, table->ispartition, + table->issubpartition); } } else if (nodeTag(item) == T_JoinExpr) { // multi-table join parse_join_tree((JoinExpr *)item); + } else if(nodeTag(item) == T_RangeSubselect) { + SelectStmt *stmt = (SelectStmt *)(((RangeSubselect *)item)->subquery); + if (stmt->fromClause) { + parse_from_clause(stmt->fromClause); + } } else { } } } +void parse_target_list(List *target_list) +{ + ListCell *target_cell = NULL; + foreach (target_cell, target_list) { + Node *item = (Node *)lfirst(target_cell); + if (nodeTag(item) != T_ResTarget) { + continue; + } + ResTarget *target = (ResTarget *)item; + if (nodeTag(target->val) != T_ColumnRef) { + continue; + } + // only when the alias name exist, we need to save it. + if (target->name) { + (void)find_or_create_clmncell(&target->name, ((ColumnRef *)(target->val))->fields); + } + } +} + void add_drived_tables(RangeVar *join_node) { TableCell *join_table = NULL; if (join_node->alias) { - join_table = find_or_create_tblcell(join_node->relname, join_node->alias->aliasname, join_node->schemaname); + join_table = find_or_create_tblcell(join_node->relname, join_node->alias->aliasname, join_node->schemaname, + join_node->ispartition, join_node->issubpartition); } else { - join_table = find_or_create_tblcell(join_node->relname, NULL, join_node->schemaname); - } + join_table = find_or_create_tblcell(join_node->relname, NULL, join_node->schemaname, join_node->ispartition, + join_node->issubpartition); + } if (!join_table) { return; @@ -988,12 +1204,22 @@ void parse_join_tree(JoinExpr *join_tree) parse_join_tree((JoinExpr *)larg); } else if (nodeTag(larg) == T_RangeVar) { add_drived_tables((RangeVar *)larg); + } else if (nodeTag(larg) == T_RangeSubselect) { + SelectStmt *stmt = (SelectStmt *)(((RangeSubselect *)larg)->subquery); + if (stmt->fromClause) { + parse_from_clause(stmt->fromClause); + } } if (nodeTag(rarg) == T_JoinExpr) { parse_join_tree((JoinExpr *)rarg); } else if (nodeTag(rarg) == T_RangeVar) { add_drived_tables((RangeVar *)rarg); + } else if (nodeTag(rarg) == T_RangeSubselect) { + SelectStmt *stmt = (SelectStmt *)(((RangeSubselect *)rarg)->subquery); + if (stmt->fromClause) { + parse_from_clause(stmt->fromClause); + } } if (join_tree->isNatural == true) { @@ -1098,14 +1324,18 @@ void parse_join_expr(JoinExpr *join_tree) TableCell *ltable, *rtable = NULL; if (l_join_node->alias) { - ltable = find_or_create_tblcell(l_join_node->relname, l_join_node->alias->aliasname, l_join_node->schemaname); + ltable = find_or_create_tblcell(l_join_node->relname, l_join_node->alias->aliasname, l_join_node->schemaname, + l_join_node->ispartition, l_join_node->issubpartition); } else { - ltable = find_or_create_tblcell(l_join_node->relname, NULL, l_join_node->schemaname); + ltable = find_or_create_tblcell(l_join_node->relname, NULL, l_join_node->schemaname, l_join_node->ispartition, + l_join_node->issubpartition); } if (r_join_node->alias) { - rtable = find_or_create_tblcell(r_join_node->relname, r_join_node->alias->aliasname, r_join_node->schemaname); + rtable = find_or_create_tblcell(r_join_node->relname, r_join_node->alias->aliasname, r_join_node->schemaname, + r_join_node->ispartition, l_join_node->issubpartition); } else { - rtable = find_or_create_tblcell(r_join_node->relname, NULL, r_join_node->schemaname); + rtable = find_or_create_tblcell(r_join_node->relname, NULL, r_join_node->schemaname, r_join_node->ispartition, + l_join_node->issubpartition); } if (!ltable || !rtable) { @@ -1292,7 +1522,7 @@ void split_field_list(List *fields, char **schema_name_ptr, char **table_name_pt // if fields have table name if (*table_name_ptr != NULL) { // check existed tables - foreach (item, g_table_list) { + foreach (item, t_thrd.index_advisor_cxt.stmt_table_list) { TableCell *cur_table = (TableCell *)lfirst(item); if (*schema_name_ptr != NULL && strcasecmp(*schema_name_ptr, cur_table->schema_name) != 0) { continue; @@ -1333,8 +1563,9 @@ void find_table_by_column(char **schema_name_ptr, char **table_name_ptr, char ** Oid relid; Relation relation; TupleDesc tupdesc; - - foreach (lc, g_table_list) { + TargetCell *target = NULL; + target = find_or_create_clmncell(col_name_ptr, NULL); + foreach (lc, t_thrd.index_advisor_cxt.stmt_table_list) { char *schema_name = ((TableCell *)lfirst(lc))->schema_name; char *table_name = ((TableCell *)lfirst(lc))->table_name; @@ -1351,7 +1582,28 @@ void find_table_by_column(char **schema_name_ptr, char **table_name_ptr, char ** break; } } + if (cnt == 1) { + heap_close(relation, AccessShareLock); + break; + } + // handle alias name scene + if (target == NULL) { + heap_close(relation, AccessShareLock); + continue; + } + for (int attrIdx = tupdesc->natts - 1; attrIdx >= 0; --attrIdx) { + if (strcasecmp(target->column_name, RelAttrName(tupdesc, attrIdx)) == 0) { + cnt += 1; + *schema_name_ptr = schema_name; + *table_name_ptr = table_name; + *col_name_ptr = target->column_name; + break; + } + } heap_close(relation, AccessShareLock); + if (cnt == 1) { + break; + } } if (cnt != 1) { @@ -1359,6 +1611,35 @@ void find_table_by_column(char **schema_name_ptr, char **table_name_ptr, char ** } } +// Get the partition keys of the partition table or subpartition table +void get_partition_key_name(Relation rel, TableCell *table, bool is_subpartition) +{ + int partkey_column_n = 0; + int2vector *partkey_column = NULL; + partkey_column = GetPartitionKey(rel->partMap); + partkey_column_n = partkey_column->dim1; + for (int i = 0; i < partkey_column_n; i++) { + table->partition_key_list = + lappend(table->partition_key_list, get_attname(rel->rd_id, partkey_column->values[i])); + } + if (is_subpartition) { + List *partOidList = relationGetPartitionOidList(rel); + Assert(list_length(partOidList) != 0); + Partition subPart = partitionOpen(rel, linitial_oid(partOidList), NoLock); + Relation subPartRel = partitionGetRelation(rel, subPart); + int subpartkey_column_n = 0; + int2vector *subpartkey_column = NULL; + subpartkey_column = GetPartitionKey(subPartRel->partMap); + subpartkey_column_n = subpartkey_column->dim1; + for (int i = 0; i < subpartkey_column_n; i++) { + table->subpartition_key_list = + lappend(table->subpartition_key_list, get_attname(rel->rd_id, subpartkey_column->values[i])); + } + releaseDummyRelation(&subPartRel); + partitionClose(rel, subPart, NoLock); + } +} + // Check whether the table type is supported. bool check_relation_type_valid(Oid relid) { @@ -1370,8 +1651,7 @@ bool check_relation_type_valid(Oid relid) heap_close(relation, AccessShareLock); return result; } - if (RelationIsRelation(relation) && RelationGetStorageType(relation) == HEAP_DISK && - RelationGetPartType(relation) == PARTTYPE_NON_PARTITIONED_RELATION && + if (RelationIsRelation(relation) && RelationGetStorageType(relation) == HEAP_DISK && RelationGetRelPersistence(relation) == RELPERSISTENCE_PERMANENT) { const char *format = ((relation->rd_options) && (((StdRdOptions *)(relation->rd_options))->orientation)) ? ((char *)(relation->rd_options) + *(int *)&(((StdRdOptions *)(relation->rd_options))->orientation)) : @@ -1403,9 +1683,10 @@ bool is_tmp_table(const char *table_name) /* * find_or_create_tblcell * Find the TableCell that has given table_name(alias_name) among the global - * variable 'g_table_list', and if not found, create a TableCell and return it. + * variable 't_thrd.index_advisor_cxt.stmt_table_list', and if not found, create a TableCell and return it. */ -TableCell *find_or_create_tblcell(char *table_name, char *alias_name, char *schema_name) +TableCell *find_or_create_tblcell(char *table_name, char *alias_name, char *schema_name, bool ispartition, + bool issubpartition) { if (!table_name) { return NULL; @@ -1419,8 +1700,8 @@ TableCell *find_or_create_tblcell(char *table_name, char *alias_name, char *sche ListCell *item = NULL; ListCell *sub_item = NULL; - if (g_table_list != NIL) { - foreach (item, g_table_list) { + if (t_thrd.index_advisor_cxt.stmt_table_list != NIL) { + foreach (item, t_thrd.index_advisor_cxt.stmt_table_list) { TableCell *cur_table = (TableCell *)lfirst(item); char *cur_schema_name = cur_table->schema_name; char *cur_table_name = cur_table->table_name; @@ -1464,15 +1745,57 @@ TableCell *find_or_create_tblcell(char *table_name, char *alias_name, char *sche new_table->alias_name = NIL; if (alias_name) { new_table->alias_name = lappend(new_table->alias_name, alias_name); - } + } new_table->index = NIL; new_table->join_cond = NIL; new_table->index_print = NIL; - g_table_list = lappend(g_table_list, new_table); - + new_table->partition_key_list = NIL; + new_table->subpartition_key_list = NIL; + new_table->ispartition = ispartition; + new_table->issubpartition = issubpartition; + // set the partition key of the partition table, including partition and subpartition + Relation rel = heap_open(table_oid, AccessShareLock); + if RelationIsPartitioned(rel) { + if RelationIsSubPartitioned(rel) { + get_partition_key_name(rel, new_table, true); + } else { + get_partition_key_name(rel, new_table); + } + } + heap_close(rel, AccessShareLock); + t_thrd.index_advisor_cxt.stmt_table_list = lappend(t_thrd.index_advisor_cxt.stmt_table_list, new_table); return new_table; } +/* + * find_or_create_clmncell + * Find the columnCell that has given column_name(alias_name) among the global + * variable 't_thrd.index_advisor_cxt.stmt_target_list', and if not found, create a columnCell and return it. + * we consider column alias name is unique within a select query + */ +TargetCell *find_or_create_clmncell(char **alias_name, List *fields) +{ + if (fields == NIL) { + // find the columnCell according to the alias_name + ListCell *target_cell = NULL; + foreach (target_cell, t_thrd.index_advisor_cxt.stmt_target_list) { + TargetCell *target = (TargetCell *)lfirst(target_cell); + if (strcasecmp(target->alias_name, *alias_name) == 0) { + return target; + } + } + return NULL; + } + // create a columnCell and return it + TargetCell *new_target = NULL; + new_target = (TargetCell *)palloc0(sizeof(TargetCell)); + new_target->alias_name = *alias_name; + new_target->column_name = strVal(linitial(fields)); + t_thrd.index_advisor_cxt.stmt_target_list = lappend(t_thrd.index_advisor_cxt.stmt_target_list, new_target); + + return new_target; +} + /* * add_index_from_field * Add index from field for the table. @@ -1542,8 +1865,8 @@ void add_index_from_field(char *schema_name, char *table_name, IndexCell *index) // The table that has smallest result set is set as the driver table. void determine_driver_table() { - if (g_table_list->length == 1) { - g_driver_table = (TableCell *)linitial(g_table_list); + if (t_thrd.index_advisor_cxt.stmt_table_list->length == 1) { + g_driver_table = (TableCell *)linitial(t_thrd.index_advisor_cxt.stmt_table_list); } else { if (g_drived_tables != NIL) { uint4 small_result_set = UINT32_MAX; @@ -1699,7 +2022,7 @@ bool parse_group_clause(List *group_clause, List *target_list) } if (is_only_one_table && pre_schema && pre_table) { - if (g_table_list->length == 1 || (g_driver_table && + if (t_thrd.index_advisor_cxt.stmt_table_list->length == 1 || (g_driver_table && IsSameRel(pre_schema, pre_table, g_driver_table->schema_name, g_driver_table->table_name))) { return true; } @@ -1764,7 +2087,7 @@ bool parse_order_clause(List *order_clause, List *target_list) } if (is_only_one_table && pre_schema && pre_table) { - if (g_table_list->length == 1 || (g_driver_table && + if (t_thrd.index_advisor_cxt.stmt_table_list->length == 1 || (g_driver_table && IsSameRel(pre_schema, pre_table, g_driver_table->schema_name, g_driver_table->table_name))) { return true; } diff --git a/src/gausskernel/dbmind/tools/__init__.py b/src/gausskernel/dbmind/tools/__init__.py new file mode 100644 index 000000000..054f32436 --- /dev/null +++ b/src/gausskernel/dbmind/tools/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/share/common.sh b/src/gausskernel/dbmind/tools/__main__.py similarity index 57% rename from src/gausskernel/dbmind/tools/anomaly_detection/share/common.sh rename to src/gausskernel/dbmind/tools/__main__.py index 37c0d60cd..73046464d 100644 --- a/src/gausskernel/dbmind/tools/anomaly_detection/share/common.sh +++ b/src/gausskernel/dbmind/tools/__main__.py @@ -1,24 +1,24 @@ -#!/bin/bash # Copyright (c) 2020 Huawei Technologies Co.,Ltd. # # openGauss is licensed under Mulan PSL v2. # You can use this software according to the terms and conditions of the Mulan PSL v2. # You may obtain a copy of Mulan PSL v2 at: # -# http://license.coscl.org.cn/MulanPSL2 +# http://license.coscl.org.cn/MulanPSL2 # # THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, # EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, # MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. # See the Mulan PSL v2 for more details. -#------------------------------------------------------------------------- -# -# common.sh -# common info of A-Detection -# -#------------------------------------------------------------------------- +try: + from dbmind.cmd import main +except ImportError: + import sys + import os -CURRENT_DIR=$(cd ../$(dirname $0); pwd) -BASENAME=$(basename $CURRENT_DIR) + curr_path = os.path.dirname(os.path.abspath(__file__)) + root_path = os.path.dirname(curr_path) + sys.path.append(root_path) + from dbmind.cmd import main -PROJECT_NAME="A-Detection" +main() diff --git a/src/gausskernel/dbmind/tools/ai_manager/ai_manager.py b/src/gausskernel/dbmind/tools/ai_manager/ai_manager.py deleted file mode 100644 index 2816a5b00..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/ai_manager.py +++ /dev/null @@ -1,227 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : ai_manager.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : Main entrance of ai manager -############################################################################# - -import optparse -import os - -from definitions.constants import Constant -from tools.global_box import g -from module.anomaly_detection.ad_main import AnomalyDetection -from module.index_advisor.index_main import IndexAdvisor -from definitions.errors import Errors -from tools.common_tools import CommonTools -from tools import params_checkers -from config_cabin.config import EXTRACT_DIR -from config_cabin.config import PROJECT_PATH -from config_cabin.config import PYTHON_PATH -from config_cabin.config import ENV_FILE -from tools.env_handler import EnvHandler -from tools.params_checkers import LostChecker - -MODULE_MAPPING = { - 'anomaly_detection': AnomalyDetection, - 'index_advisor': IndexAdvisor -} - - -class Manager(object): - def __init__(self, **kwargs): - self.arg_dict = kwargs - self.project_path = PROJECT_PATH - self.module = self.arg_dict.get('module') - self.action = self.arg_dict.get('action') - self.package_path = self.arg_dict.get('package_path') - self.install_path = None - self.version = None - self.ai_manager_path = None - - def init_globals(self): - self.install_path, self.version = self._get_install_path_with_no_package() - self.arg_dict['install_path'] = self.install_path - self.arg_dict['version'] = self.version - self.ai_manager_path = os.path.realpath(os.path.join( - self.install_path, Constant.AI_MANAGER_PATH)) - - def check_params(self): - lost_checker = LostChecker(self.arg_dict) - lost_checker.run() - funcs = CommonTools.get_funcs(params_checkers) - for param_name, param_value in self.arg_dict.items(): - if param_name not in params_checkers.PARAMS_CHECK_MAPPING: - raise Exception(Errors.PARAMETER['gauss_0203'] % param_name) - funcs[params_checkers.PARAMS_CHECK_MAPPING[param_name]](param_value) - - @staticmethod - def check_process_exist(): - count = CommonTools.check_process('ai_manager') - if count > 1: - raise Exception(Errors.PERMISSION['gauss_0702'] % 'ai_manager') - else: - g.logger.debug('Check process passed.') - - @staticmethod - def check_user(): - is_root = CommonTools.check_is_root() - if is_root: - raise Exception(Errors.PERMISSION['gauss_0703']) - else: - g.logger.debug('Check user passed.') - - def _get_install_path_with_no_package(self): - """ - Get version info from version file with relative path - :return: - """ - version_file = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname( - os.path.dirname(os.path.realpath(__file__))))), Constant.VERSION_FILE) - version = CommonTools.get_version_info_from_file(version_file) - g.logger.info('Got version info:%s' % version) - base_dir = Constant.PACK_PATH_PREFIX + version - install_path = os.path.join(self.project_path, base_dir) - g.logger.info('Got install path:%s.' % install_path) - CommonTools.check_path_valid(install_path) - g.logger.info('Successfully to get install path.') - return install_path, version - - def _get_install_path(self, pack_path): - """ - Extract version info and assembling install path. - """ - g.logger.info('Start getting install path.') - # mk temp extract dir if not exist - CommonTools.mkdir_with_mode(EXTRACT_DIR, Constant.AUTH_COMMON_DIR_STR) - # clean extract dir - CommonTools.clean_dir(EXTRACT_DIR) - # extract package file to temp dir - CommonTools.extract_file_to_dir(pack_path, EXTRACT_DIR) - g.logger.info('Success extract files to temp dir.') - # get version info from version file - version_file = os.path.realpath(os.path.join(EXTRACT_DIR, Constant.VERSION_FILE)) - version = CommonTools.get_version_info_from_file(version_file) - g.logger.info('Got version info:%s' % version) - base_dir = Constant.PACK_PATH_PREFIX + version - install_path = os.path.join(self.project_path, base_dir) - g.logger.info('Got install path:%s.' % install_path) - CommonTools.check_path_valid(install_path) - g.logger.info('Successfully to get install path.') - return install_path, version - - def _check_project_path_access(self): - """ - Check project path is full authority. - """ - CommonTools.check_dir_access(self.project_path, 'full') - - def _mk_manager_dir(self): - """ - Create install path if the path is not exist. - """ - if not os.path.isdir(self.ai_manager_path): - g.logger.info('Install path:%s is not exist, start creating.' % self.ai_manager_path) - CommonTools.mkdir_with_mode(self.ai_manager_path, Constant.AUTH_COMMON_DIR_STR) - else: - g.logger.info('Install path:%s is already exist.' % self.ai_manager_path) - - def _clean_manager_dir(self): - """ - Clean install path before unpack. - """ - file_list = os.listdir(self.ai_manager_path) - if file_list: - g.logger.info('Start clean install path, file list:[%s]' % file_list) - CommonTools.clean_dir(self.ai_manager_path) - - def _copy_manager_files(self): - """ - Copy manager files to manager dir - """ - from_path = os.path.join(os.path.dirname(os.path.dirname( - os.path.realpath(__file__))), Constant.AI_MANAGER_PATH) - to_path = self.ai_manager_path - CommonTools.copy_file_to_dest_path(from_path, to_path) - g.logger.info('Successfully to copy files to manager path.') - - @staticmethod - def _copy_lib(): - """ - Copy lib file to project path - """ - from_path = os.path.realpath(os.path.join(EXTRACT_DIR, Constant.AI_LIB_PATH)) - to_path = PYTHON_PATH - CommonTools.copy_file_to_dest_path(from_path, to_path) - g.logger.info('Successfully to copy lib files.') - - @staticmethod - def clean_temp_extract_dir(): - """ - Clean temp unpack dir - """ - CommonTools.clean_dir(EXTRACT_DIR) - g.logger.info('Successfully clean temp unpack dir.') - - @staticmethod - def modify_env_file(): - handler = EnvHandler(ENV_FILE) - handler.run() - - def prepare_manager_tools(self): - """ - Prepare ai manager, create path, clean path and copy files - """ - self._check_project_path_access() - self._mk_manager_dir() - self._clean_manager_dir() - self._copy_manager_files() - - def run(self): - g.logger.info(Constant.LOG_SEP_LINE) - g.logger.info('Starting Module[%s]-Action[%s]...' % (self.module, self.action)) - g.logger.info(Constant.LOG_SEP_LINE) - g.logger.info('Get input arguments:%s' % str(self.arg_dict)) - self.check_params() - self.check_user() - self.check_process_exist() - if self.action.lower() != 'uninstall': - self.init_globals() - self.modify_env_file() - self.prepare_manager_tools() - module_inst = MODULE_MAPPING[self.module](**self.arg_dict) - module_inst.run() - self.clean_temp_extract_dir() - - -def init_parser(): - """parser command""" - parser = optparse.OptionParser(conflict_handler='resolve') - parser.disable_interspersed_args() - parser.add_option('--module', dest='module', help='function module block') - parser.add_option('--action', dest='action', help='action') - parser.add_option('--param_file', dest='param_file', help='json file') - - return parser - - -if __name__ == '__main__': - ai_parser = init_parser() - opt, arg = ai_parser.parse_args() - module = opt.module - action = opt.action - param_file = opt.param_file - if not param_file: - g.logger.error('Failed to get param file.') - raise Exception(Errors.FILE_DIR_PATH['gauss_0105']) - param_dict = CommonTools.json_file_to_dict(param_file) - if action: - param_dict['action'] = action - if module: - param_dict['module'] = module - manager = Manager(**param_dict) - manager.run() - diff --git a/src/gausskernel/dbmind/tools/ai_manager/config_cabin/config.py b/src/gausskernel/dbmind/tools/ai_manager/config_cabin/config.py deleted file mode 100644 index 70790ff5e..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/config_cabin/config.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : config.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : config -############################################################################# - -PROJECT_PATH = '/dbs/AI-tools' -# for cron lock file -TMP_DIR = PROJECT_PATH + '/lock_file' -# for temp decompression package -EXTRACT_DIR = PROJECT_PATH + '/tmp_extract_dir' -# for record anomaly detection module installed info -VERSION_RECORD_FILE_ANOMALY_DETECTION = PROJECT_PATH + '/version_record_anomaly_detection' -# for record index advisor module installed info -VERSION_RECORD_FILE_INDEX_ADVISOR = PROJECT_PATH + '/version_record_index_advisor' -# for tep keep ca certs -TMP_CA_FILE = PROJECT_PATH + '/tmp_ca' -# for python path -PYTHON_PATH = PROJECT_PATH + '/ai_lib' -# ai env file -ENV_FILE = PROJECT_PATH + '/ai_env' - - -# log config -LOG_PATH = '/dbs/AI-tools/ai_manager.log' -# debug:1 info:2 warning:3 error:4 fatal:5 -LOG_LEVEL = 1 -LOG_MAX_BYTES = 1024 * 1024 * 128 -LOG_BACK_UP = 10 - diff --git a/src/gausskernel/dbmind/tools/ai_manager/definitions/constants.py b/src/gausskernel/dbmind/tools/ai_manager/definitions/constants.py deleted file mode 100644 index 39f67f23e..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/definitions/constants.py +++ /dev/null @@ -1,245 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : constants.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : constants.py -############################################################################# -import os -import sys - -sys.path.append(sys.path[0] + "/../") -from config_cabin.config import ENV_FILE - - -class Constant(object): - # path - PACK_PATH_PREFIX = 'AI_' - VERSION_FILE = 'version.cfg' - CRON_PATH = 'ai_manager/tools/set_cron.py' - TASK_NAME_LIST = ['agent', 'server', 'monitor'] - REMOTE_DEPLOY_SCRIPT = 'bin/deploy.sh' - REMOTE_EXECUTE_SCRIPT = 'bin/execute.sh' - REMOTE_COMMANDER = 'bin/remote_commander' - AI_MANAGER_PATH = 'ai_manager' - AI_LIB_PATH = 'envs/ai_lib' - # encrypt tool path - ENCRYPT_TOOL = 'encrypt' - ANOMALY_DETECTION_INSTALL_SCRIPT_PATH = 'ai_manager/module/anomaly_detection/install.py' - ANOMALY_DETECTION_UNINSTALL_SCRIPT_PATH = 'ai_manager/module/anomaly_detection/uninstall.py' - # anomaly_detection config path - ANOMALY_DETECTION_CONFIG_PATH = 'ai_server/dbmind.conf' - CA_CONFIG = 'config/ca.conf' - PWF_PATH = 'certificate/pwf' - TEMP_ANOMALY_PARAM_FILE = 'param_file.json' - # CERTIFICATE - CA_ROOT_REQ = 'ca_root.req' - CA_REQ = 'ca.req' - CA_ROOT_VALID_DATE = '7300' - CA_VALID_DATE = '7000' - - # anomaly_detection config item - AD_CONF_SECTION_SECURITY = 'security' - AD_CONF_TLS_FLAG = 'tls' - AD_CONF_CA_PATH = 'ca' - AD_CONF_SERVER_CERT = 'server_cert' - AD_CONF_SERVER_KEY = 'server_key' - AD_CONF_AGENT_CERT = 'agent_cert' - AD_CONF_AGENT_KEY = 'agent_key' - AD_CONF_SECTION_DATABASE = 'database' - AD_CONF_DATABASE_PATH = 'database_path' - - # default values - VERSION_FILE_MAX_LINES = 100 - - # anomaly detection param name - MODULE = 'module' - AI_SERVER = 'ai_server' - MODULE_ANOMALY_DETECTION = 'anomaly_detection' - AGENT_NODES = 'agent_nodes' - NODE_IP = 'node_ip' - NODE_USER = 'username' - NODE_PWD = 'password' - PACKAGE_PATH = 'package_path' - # ca information in param file - CA_INFO = 'ca_info' - CA_CERT_PATH = 'ca_cert_path' - CA_KEY_PATH = 'ca_key_path' - CA_PASSWORD = 'ca_password' - - # index advisor param name - INSTALL_NODES = 'install_nodes' - - # cmd prefix - CMD_PREFIX = 'source ~/.bashrc; source %s &&' % ENV_FILE - EXECUTE_REMOTE_SCRIPT = '\'%spython3 %s --param_file %s\'' - - # anormaly_detection_install_cron - ANORMALY_MAIN_SCRIPT = 'ai_server/main.py' - ANOMALY_INSTALL_CRON_LOCALLY = [ - "%s cd %s && nohup python3 %s start --role server" - ] - ANOMALY_STOP_CMD_LOCALLY = [ - "%s cd %s && nohup python3 %s stop --role server" - ] - OPENGAUSS_ANOMALY_INSTALL_CRON_LOCALLY = [ - "%s cd %s && nohup python3 %s start --role server" - ] - OPENGAUSS_ANOMALY_STOP_CMD_LOCALLY = [ - "%s cd %s && nohup python3 %s stop --role server" - ] - OPENGAUSS_ANOMALY_INSTALL_CRON_REMOTE = [ - "%s cd %s && nohup python3 %s start --role agent" - ] - OPENGAUSS_ANOMALY_STOP_CMD_REMOTE = [ - "%s cd %s && nohup python3 %s stop --role agent" - ] - - # expected content - CRON_INFO_EXPECTED = "no crontab for" - SCENE_HUAWEIYUN = "huaweiyun" - SCENE_OPENGAUSS = "opengauss" - - # execute result - SUCCESS = 'Successfully' - FAILED = 'Failed' - - # wait time - DEFAULT_WAIT_SECONDS = 70 - - # random passwd len - RANDOM_PASSWD_LEN = 12 - - # valid check - VALID_MODULE_NAME = ['anomaly_detection'] - VALID_ACTION_NAME = ['install', 'uninstall'] - VALID_BOOL_TYPE = ['True', 'False'] - PATH_CHECK_LIST = ["|", ";", "&", "$", "<", ">", "`", "\\", "'", "\"", - "{", "}", "(", ")", "[", "]", "~", "*", "?", " ", "!", "\n"] - CMD_CHECK_LIST = ["|", ";", "&", "<", ">", "`", "\\", "!", "\n"] - VALID_LOG_LEVEL = [1, 2, 3, 4, 5] - VALID_CONFIG_SECTION_OPT_MAPPING = { - 'server': ['host', 'listen_host', 'listen_port', 'pull_kafka'], - 'database': ['name', 'host', 'port', 'user', 'size', 'max_rows'], - 'agent': ['cluster_name', 'collection_item', 'source_timer_interval', - 'sink_timer_interval', 'channel_capacity', 'collection_type'], - 'security': ['tls'], - 'log': ['log_path'] - } - VALID_SCENE = ['huaweiyun', 'opengauss'] - VALID_CA_INFO = ['ca_cert_path', 'ca_key_path', 'ca_password'] - VALID_COLLECTION_TYPE = ['os', 'database', 'all'] - VALID_COLLECT_DATA_TYPE = ['dn', 'cn'] - # check items - CHECK_PORT = ['listen_port', 'port'] - CHECK_IP = ['host', 'listen_host', 'host'] - # valid database name - VALID_DATABASE_NAME = ['sqlite', 'influxdb', 'mongodb'] - # essential params check - CONFIG_INFO = 'config_info' - SERVER = 'server' - SERVER_HOST = 'host' - SERVER_LISTEN_HOST = 'listen_host' - SERVER_LISTEN_PORT = 'listen_port' - SERVER_PULL_KAFKA = 'pull_kafka' - DATABASE = 'database' - DATABASE_NAME = 'name' - DATABASE_HOST = 'host' - DATABASE_PORT = 'port' - DATABASE_USER = 'user' - DATABASE_SIZE = 'size' - DATABASE_MAX_ROWS = 'max_rows' - AGENT = 'agent' - AGENT_CLUSTER_NAME = 'cluster_name' - AGENT_COLLECTION_TYPE = 'collection_type' - AGENT_COLLECTION_ITEM = 'collection_item' - AGENT_CHANNEL_CAPACITY = 'channel_capacity' - AGENT_SOURCE_TIMER_INTERVAL = 'source_timer_interval' - AGENT_SINK_TIMER_INTERVAL = 'sink_timer_interval' - - # log - LOG_FORMATTER = '[%(asctime)s][%(levelname)s][%(pathname)s][%(funcName)s][%(lineno)d][%(message)s]' - DEFAULT_LOG_NAME = 'ai_manager_log' - CRON_LOG_NAME = 'cron_log' - LOG_SEP_LINE = '=' * 50 - - # permission code - AUTH_COMMON_DIR_STR = '750' - AUTH_COMMON_FILE_STR = '600' - AUTH_COMMON_ENCRYPT_FILES = '700' - - AUTH_COMMON_FILE = 0o600 - - # permission mapping - AUTHORITY_FULL = { - 'read': os.R_OK, - 'write': os.W_OK, - 'execute': os.X_OK - } - AUTHORITY_EXIST = { - 'exist': os.F_OK - } - AUTHORITY_RW = { - 'read': os.R_OK, - 'write': os.W_OK - } - AUTHORITY_R = { - 'read': os.R_OK - } - # cmd lib - SHELL_CMD_DICT = { - "deleteFile": "(if [ -f '%s' ];then rm -f '%s';fi)", - "deleteLibFile": "cd %s && ls | grep -E '%s'|xargs rm -f", - "cleanDir": "(if [ -d '%s' ];then rm -rf '%s'/* && cd '%s' && ls -A | xargs rm -rf ; fi)", - "simpleCleanDir": "rm -rf %s/*", - "execShellFile": "sh %s", - "getFullPathForShellCmd": "which %s", - "deleteDir": "(if [ -d '%s' ];then rm -rf '%s';fi)", - "deleteLib": "(if [ -e '%s' ];then rm -rf '%s';fi)", - "createDir": "(if [ ! -d '%s' ]; then mkdir -p '%s' -m %s;fi)", - "createDirSimple": "mkdir -p '%s' -m %s", - "createFile": "touch '%s' && chmod %s '%s'", - "deleteBatchFiles": "rm -f %s*", - "compressTarFile": "cd '%s' && tar -cf '%s' %s && chmod %s '%s' ", - "decompressTarFile": "cd '%s' && tar -xf '%s' ", - "decompressFileToDir": "tar -xf '%s' -C '%s'", - "copyFile": " cp -r %s %s ", - "renameFile": "(if [ -f '%s' ];then mv '%s' '%s';fi)", - "cleanFile": "if [ -f %s ]; then echo '' > %s; fi", - "checkUserPermission": "su - %s -c \"cd '%s'\"", - "getFileTime": "echo $[`date +%%s`-`stat -c %%Y %s`]", - "findfiles": "cd %s && find . -type l -print", - "copyFile1": "(if [ -f '%s' ];then cp '%s' '%s';fi)", - "copyFile2": "(if [ -f '%s' ] && [ ! -f '%s' ];then cp '%s' '%s';fi)", - "cleanDir1": "(if [ -d '%s' ]; then cd '%s' && rm -rf '%s' && rm -rf '%s' && cd -; fi)", - "cleanDir2": "(if [ -d '%s' ]; then rm -rf '%s'/* && cd '%s' && ls -A | xargs rm -rf && cd -; fi)", - "cleanDir3": "rm -rf '%s'/* && cd '%s' && ls -A | xargs rm -rf && cd - ", - "cleanDir4": "rm -rf %s/*", - "checkNodeConnection": "ping %s -i 1 -c 3 |grep ttl |wc -l", - "overWriteFile": "echo '%s' > '%s'", - "physicMemory": "cat /proc/meminfo | grep MemTotal", - "findFile": "(if [ -d '%s' ]; then find '%s' -type f;fi)", - "unzipForce": "unzip -o '%s' -d '%s'", - "sleep": "sleep %s", - "softLink": "ln -s '%s' '%s'", - "findwithcd": "cd %s && find ./ -name %s", - "changeMode": "chmod %s %s", - "checkPassword": "export LC_ALL=C; chage -l %s | grep -i %s", - "changeModeForFiles": "chmod -R %s %s/*", - "addCronCMD": "%spython3 %s -t add -c %s \"%s\"", - "delCronCMD": "%spython3 %s -t del -c %s \"%s\"", - "killProcess": "ps ux | grep '%s' | grep -v grep | awk '{print $2}' | xargs kill -9", - "checkProcess": "unset LD_LIBRARY_PATH && ps ux | grep '%s' | grep -v grep | wc -l", - "executeShellScripCmd": "echo %s | sh %s %s %s %s %s", - "executeShellScripCmd1": "echo %s | sh %s %s %s %s", - "getUser": "echo $USER", - "ifconfig": "ifconfig", - "chmodWithExecute": "chmod +x %s", - "remoteDeploy": "echo %s | sh %s \"scp -r %s %s@%s:%s\"", - "remoteExecute": "echo %s | sh %s \"ssh %s@%s %s\"", - "showDirDocs": "ls %s | wc -l" - } - - diff --git a/src/gausskernel/dbmind/tools/ai_manager/definitions/errors.py b/src/gausskernel/dbmind/tools/ai_manager/definitions/errors.py deleted file mode 100644 index cac7435ea..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/definitions/errors.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : errors.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : errors.py -############################################################################# - -class Errors(object): - - FILE_DIR_PATH = { - 'gauss_0101': '[GAUSS_0101] The directory [%s] is not exist.', - 'gauss_0102': '[GAUSS_0102] The file [%s] is not exist.', - 'gauss_0103': '[GAUSS_0103] Getting install path [%s] failed.', - 'gauss_0104': '[GAUSS_0104] The path [%s] should be abs path.', - 'gauss_0105': '[GAUSS_0105] Failed to get param file.' - } - - PARAMETER = { - 'gauss_0201': '[GAUSS_0201] The parameter [%s] is wrong.', - 'gauss_0202': '[GAUSS_0202] The config key [%s] not found in the config file or section.', - 'gauss_0203': '[GAUSS_0203] The parameter [%s] not in the valid parameter list.', - 'gauss_0204': '[GAUSS_0204] The config section [%s] not in the valid section list.', - 'gauss_0205': '[GAUSS_0205] The config option [%s] not in the valid option list.', - 'gauss_0206': '[GAUSS_0206] The scene [%s] is not in the valid scene list.', - 'gauss_0207': '[GAUSS_0207] Unsupported operating system %s.', - 'gauss_0208': '[GAUSS_0208] The parameter [%s] should be [%s].', - 'gauss_0209': '[GAUSS_0209] The parameter [%s] is essential but got None.' - } - - ATTRIBUTE = { - 'gauss_0301': '[GAUSS_0301] Missed logger object.' - } - EXECUTE_RESULT = { - 'gauss_0401': '[GAUSS_0401] Failed to execute cmd [%s] when [%s] with error [%s].', - 'gauss_0402': '[GAUSS_0402] Transfer json to dict failed with error[%s].', - 'gauss_0403': '[GAUSS_0403] Failed to unpack files to install path with error:[%s].', - 'gauss_0404': '[GAUSS_0404] Failed to get old cron information with error:[%s].', - 'gauss_0405': '[GAUSS_0405] Failed to copy file to path [%s] with error:[%s].', - 'gauss_0406': '[GAUSS_0406] Failed copy file to node[%s] with error[%s].', - 'gauss_0407': '[GAUSS_0407] Failed to install module [%s] on node[%s] with error[%s].', - 'gauss_0408': '[GAUSS_0408] Transfer dict to json file failed with error[%s].', - 'gauss_0409': '[GAUSS_0409] Remote install failed with error[%s].', - 'gauss_0410': '[GAUSS_0410] Remote uninstall failed with error[%s].', - 'gauss_0411': '[GAUSS_0411] Prepare log file failed with error[%s].', - 'gauss_0412': '[GAUSS_0412] Get rand string error with status:%s.', - 'gauss_0413': '[GAUSS_0413] Failed execute cmd [%s] when [%s].', - 'gauss_0414': '[GAUSS_0414] Failed to generate [%s].', - 'gauss_0415': '[GAUSS_0415] Failed obtain [%s].', - 'gauss_0416': '[GAUSS_0416] Failed start all process.', - 'gauss_0417': '[GAUSS_0417] Failed to encrypt random string.', - 'gauss_0418': '[GAUSS_0418] Failed to install agent on node [%s] with error [%s].', - 'gauss_0419': '[GAUSS_0419] Failed to clean dir [%s] on node [%s], remain doc number [%s].', - 'gauss_0420': '[GAUSS_0420] Failed to [%s].' - } - CONTENT_OR_VALUE = { - 'gauss_0501': '[GAUSS_0501] The version conf info [%s] is wrong.', - 'gauss_0502': '[GAUSS_0502] The cmd [%s] for setting cron is wrong.', - 'gauss_0503': '[GAUSS_0503] The value [%s] for [%s] is wrong.', - 'gauss_0504': '[GAUSS_0504] Found same file name of ssl certs in config file.' - } - ILLEGAL = { - 'gauss_0601': '[GAUSS_0601] There are illegal character [%s] in the [%s].', - 'gauss_0602': '[GAUSS_0602] The [%s] is illegal.', - 'gauss_0603': '[GAUSS_0603] The [%s] should be [%s].', - 'gauss_0604': '[GAUSS_0604] The param [%s] not in params check list.', - 'gauss_0605': '[GAUSS_0605] The section:[%s] option:[%s] is invalid.', - 'gauss_0606': '[GAUSS_0606] Check config data failed with error:%s' - } - - PERMISSION = { - 'gauss_0701': '[GAUSS_0701] The path [%s] can not access.', - 'gauss_0702': '[GAUSS_0702] The process of [%s] is already exist.', - 'gauss_0703': '[GAUSS_0703] User root is not permitted.' - } - diff --git a/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/ad_main.py b/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/ad_main.py deleted file mode 100644 index e706ce1d4..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/ad_main.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : ad_main.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : Main entrance of module anomaly detection -############################################################################# -from copy import deepcopy -from module.anomaly_detection.install import Installer -from module.anomaly_detection.uninstall import Uninstaller -from module.anomaly_detection.install_remote import RemoteInstaller -from module.anomaly_detection.uninstall_remote import RemoteUninstaller -from config_cabin.config import PROJECT_PATH -from definitions.constants import Constant - - -LOCAL_TASK_MAPPING = { - 'install': Installer, - 'uninstall': Uninstaller -} -REMOTE_TASK_MAPPING = { - 'install': RemoteInstaller, - 'uninstall': RemoteUninstaller -} - - -class AnomalyDetection(object): - def __init__(self, **params_dict): - self.args_dict = params_dict - self.action = self.args_dict.get('action') - self.package_path = self.args_dict.get('package_path') - self.project_path = PROJECT_PATH - self.scene = self.args_dict.get('scene') - self.version = self.args_dict.get('version') - self.install_path = self.args_dict.get('install_path') - self.task = None - self.remote_task = None - - def init_globals(self): - - if self.scene == Constant.SCENE_HUAWEIYUN: - self.args_dict['service_list'] = Constant.ANOMALY_INSTALL_CRON_LOCALLY - self.args_dict['stopping_list'] = Constant.ANOMALY_STOP_CMD_LOCALLY - self.task = LOCAL_TASK_MAPPING[self.action](**self.args_dict) - if self.scene == Constant.SCENE_OPENGAUSS: - self.args_dict['service_list'] = Constant.OPENGAUSS_ANOMALY_INSTALL_CRON_LOCALLY - self.args_dict['stopping_list'] = Constant.OPENGAUSS_ANOMALY_STOP_CMD_LOCALLY - self.task = LOCAL_TASK_MAPPING[self.action](**self.args_dict) - remote_params = deepcopy(self.args_dict) - remote_params['service_list'] = Constant.OPENGAUSS_ANOMALY_INSTALL_CRON_REMOTE - remote_params['stopping_list'] = Constant.OPENGAUSS_ANOMALY_STOP_CMD_REMOTE - self.remote_task = REMOTE_TASK_MAPPING[self.action](**remote_params) - - def run(self): - self.init_globals() - self.task.run() - if self.remote_task: - self.remote_task.run() - - - diff --git a/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/config/ca.conf b/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/config/ca.conf deleted file mode 100644 index 3d0349890..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/config/ca.conf +++ /dev/null @@ -1,6 +0,0 @@ -[req] -distinguished_name = req_distinguished_name -prompt = no - -[req_distinguished_name] -O = A-Detection Certificate Authority \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/install.py b/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/install.py deleted file mode 100644 index 286bdd466..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/install.py +++ /dev/null @@ -1,439 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : install.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : Local install -############################################################################# - -import optparse -import os -import time -import sys -from datetime import datetime - -sys.path.append(sys.path[0] + "/../../") -from tools import params_checkers -from tools.common_tools import CommonTools -from definitions.constants import Constant -from definitions.errors import Errors -from config_cabin.config import PROJECT_PATH -from config_cabin.config import VERSION_RECORD_FILE_ANOMALY_DETECTION -from config_cabin.config import EXTRACT_DIR -from config_cabin.config import TMP_CA_FILE -from tools.global_box import g -from config_cabin.config import ENV_FILE -from tools.env_handler import EnvHandler -from tools.cert_generator import CertGenerator - - -class Installer(object): - def __init__(self, **param_dict): - self.param_dict = param_dict - self.project_path = PROJECT_PATH - self.module_name = self.param_dict.get(Constant.MODULE) - self.agent_nodes = self.param_dict.get(Constant.AGENT_NODES) - self.package_path = self.param_dict.get('package_path') - self.install_path = None - self.version = None - self.service_list = None - self.stopping_list = None - self.module_path = None - self.ca_info = None - - def check_remote_params(self): - funcs = CommonTools.get_funcs(params_checkers) - for param_name, param_value in self.param_dict.items(): - if param_name not in params_checkers.PARAMS_CHECK_MAPPING: - raise Exception(Errors.PARAMETER['gauss_0203'] % param_name) - funcs[params_checkers.PARAMS_CHECK_MAPPING[param_name]](param_value) - - def init_globals(self): - self.install_path = self.param_dict.get('install_path') - self.version = self.param_dict.get('version') - self.service_list = self.param_dict.get('service_list') - self.module_name = Constant.AI_SERVER if \ - self.module_name == Constant.MODULE_ANOMALY_DETECTION else self.module_name - self.module_path = os.path.realpath(os.path.join(self.install_path, self.module_name)) - - def check_project_path_access(self): - """ - Check project path is full authority. - """ - CommonTools.check_dir_access(self.project_path, 'full') - - def read_ca_cert_path(self, agent_only=False): - """ - Read ca certs path from config file - """ - ad_config_path = os.path.join(self.install_path, Constant.ANOMALY_DETECTION_CONFIG_PATH) - # read agent cert path - agent_cert_path = CommonTools.read_info_from_config_file( - ad_config_path, Constant.AD_CONF_SECTION_SECURITY, - Constant.AD_CONF_AGENT_CERT, self.module_path) - # read agent key path - agent_key_path = CommonTools.read_info_from_config_file( - ad_config_path, Constant.AD_CONF_SECTION_SECURITY, - Constant.AD_CONF_AGENT_KEY, self.module_path) - # read ca root cert path - ca_root_file_path = CommonTools.read_info_from_config_file( - ad_config_path, Constant.AD_CONF_SECTION_SECURITY, - Constant.AD_CONF_CA_PATH, self.module_path) - if agent_only: - return [agent_key_path, agent_cert_path, ca_root_file_path] - # get ca root key path - ca_root_key_path = ca_root_file_path + '.key' - # read server cert path - server_cert_path = CommonTools.read_info_from_config_file( - ad_config_path, Constant.AD_CONF_SECTION_SECURITY, - Constant.AD_CONF_SERVER_CERT, self.module_path) - # read server key path - server_key_path = CommonTools.read_info_from_config_file( - ad_config_path, Constant.AD_CONF_SECTION_SECURITY, - Constant.AD_CONF_SERVER_KEY, self.module_path) - # judge the basename of path is duplicate - file_names = [os.path.basename(file) for file in [ - ca_root_file_path, server_cert_path, server_key_path, agent_cert_path, agent_key_path]] - if len(file_names) != len(set(file_names)): - raise Exception(Errors.CONTENT_OR_VALUE['gauss_0504']) - return ca_root_file_path, ca_root_key_path, server_cert_path, \ - server_key_path, agent_cert_path, agent_key_path - - def backup_db_file(self): - """ - Backup data file. - """ - ad_config_path = os.path.join(self.install_path, Constant.ANOMALY_DETECTION_CONFIG_PATH) - if not os.path.isfile(ad_config_path): - g.logger.info('Config file not exist, can not backup db file') - return False, None, None - db_cabin = CommonTools.read_info_from_config_file( - ad_config_path, Constant.AD_CONF_SECTION_DATABASE, - Constant.AD_CONF_DATABASE_PATH, self.module_path) - if os.path.isdir(db_cabin): - g.logger.info('Start backup db file.') - back_up_path = os.path.join(EXTRACT_DIR, os.path.basename(db_cabin)) - CommonTools.copy_file_to_dest_path(db_cabin, back_up_path) - return True, db_cabin, back_up_path - else: - g.logger.info('No need backup db file.') - return False, None, None - - @staticmethod - def restore_db_file(status, db_cabin, back_up_path): - """ - Restore db file. - """ - if status: - CommonTools.mkdir_with_mode(os.path.dirname(db_cabin), Constant.AUTH_COMMON_DIR_STR) - CommonTools.copy_file_to_dest_path(back_up_path, db_cabin) - else: - g.logger.info('No need Restore db file.') - - def prepare_module_path(self): - """ - Prepare install path - """ - self._mk_module_dir() - self._clean_module_dir() - self._check_module_path_access() - - def modify_config_file(self, remote=False): - """ - Modify config file with "config_info" in param file. - """ - config_path = os.path.realpath(os.path.join( - self.install_path, Constant.ANOMALY_DETECTION_CONFIG_PATH)) - g.logger.info('Got config file path:[%s]' % config_path) - if not os.path.isfile(config_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0102'] % config_path) - config_info = self.param_dict.get('config_info') - if not remote: - CommonTools.modify_config_file(config_path, config_info) - else: - CommonTools.modify_agent_config_file(config_path, config_info) - g.logger.info('Successfully modify config file.') - - def try_to_kill_process_exist(self): - """ - Try to kill process, if already exist - """ - script_path = os.path.realpath( - os.path.join(self.install_path, Constant.ANORMALY_MAIN_SCRIPT)) - process_list = [(cmd % (Constant.CMD_PREFIX, os.path.dirname(script_path), script_path) - ).split(self.version)[-1] for cmd in self.service_list] - for process in process_list: - process_num = CommonTools.check_process(process) - if process_num: - CommonTools.grep_process_and_kill(process) - g.logger.info('Killed process of [%s]' % process) - - def start_agent_server_monitor(self): - """ - Add cron service for agent, server, monitor. - """ - script_path = os.path.realpath( - os.path.join(self.install_path, Constant.ANORMALY_MAIN_SCRIPT)) - for cmd in self.service_list: - cron_cmd = cmd % (Constant.CMD_PREFIX, os.path.dirname(script_path), script_path) - status, output = CommonTools.add_cron(self.install_path, cron_cmd, '1m') - if status != 0: - err = 'add cron CMD[%s]-STATUS[%s]-OUTPUT[%s]' % (cron_cmd, status, output) - raise Exception(Errors.EXECUTE_RESULT['gauss_0420'] % err) - else: - g.logger.info('Successfully add new cron:[%s]' % cron_cmd) - - @staticmethod - def modify_env_file(): - handler = EnvHandler(ENV_FILE) - handler.run() - - def waiting_for_start(self, wait_seconds): - """ - Wait cron start agent, server and monitor in 1m. - """ - script_path = os.path.realpath( - os.path.join(self.install_path, Constant.ANORMALY_MAIN_SCRIPT)) - process_list = [(cmd % (Constant.CMD_PREFIX, os.path.dirname(script_path), script_path) - ).split(self.version)[-1] for cmd in self.service_list] - ret_mapping = {} - for sec in range(wait_seconds): - for process in process_list: - process_num = CommonTools.check_process(process) - ret_mapping[process] = process_num - if all(ret_mapping.values()): - g.logger.info('Successfully start all process.') - return - time.sleep(1) - g.logger.error('Failed start all process with result:%s' % str(ret_mapping)) - raise Exception(Errors.EXECUTE_RESULT['gauss_0416']) - - def _mk_module_dir(self): - """ - Create install path if the path is not exist. - """ - if not os.path.isdir(self.module_path): - g.logger.info('Install path:%s is not exist, start creating.' % self.module_path) - CommonTools.mkdir_with_mode(self.module_path, Constant.AUTH_COMMON_DIR_STR) - else: - g.logger.info('Install path:%s is already exist.' % self.module_path) - - def _clean_module_dir(self): - """ - Clean install path before unpack. - """ - file_list = os.listdir(self.module_path) - if file_list: - g.logger.info('Start clean install path, file list:[%s]' % file_list) - CommonTools.clean_dir(self.module_path) - - def deploy_module_files(self): - """ - Copy files to module path. - """ - from_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname( - os.path.dirname(os.path.realpath(__file__))))), self.module_name) - to_path = os.path.realpath(os.path.join(self.install_path, self.module_name)) - CommonTools.copy_file_to_dest_path(from_path, to_path) - g.logger.info('Successfully to copy files to package path.') - - def deploy_manager_files(self): - """ - Copy manager files to manager dir - """ - from_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname( - os.path.realpath(__file__))))), Constant.AI_MANAGER_PATH) - to_path = os.path.realpath(os.path.join( - self.install_path, Constant.AI_MANAGER_PATH)) - CommonTools.copy_file_to_dest_path(from_path, to_path) - g.logger.info('Successfully to copy files to manager path.') - - def prepare_ca_certificates(self): - """ - Generate server ca certificates. - """ - ca_root_file_path, ca_root_key_path, server_cert_path, server_key_path, agent_cert_path, \ - agent_key_path = self.read_ca_cert_path() - self.ca_info = self.param_dict.pop(Constant.CA_INFO) - if not self.ca_info: - raise Exception(Errors.PARAMETER['gauss_0201'] % 'ca root cert information') - get_ca_root_cert_path = self.ca_info.get(Constant.CA_CERT_PATH) - get_ca_root_key_path = self.ca_info.get(Constant.CA_KEY_PATH) - get_ca_root_password = self.ca_info.get(Constant.CA_PASSWORD) - if not all([get_ca_root_cert_path, get_ca_root_key_path, get_ca_root_password]): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'items info of ca root cert') - - # copy ca root cert and key files to path of configured - g.logger.info('Start deploy ca root files.') - CommonTools.remove_files([ca_root_file_path, ca_root_key_path]) - CommonTools.mkdir_with_mode(os.path.dirname(ca_root_file_path), - Constant.AUTH_COMMON_DIR_STR) - CommonTools.copy_file_to_dest_path(get_ca_root_cert_path, ca_root_file_path) - CommonTools.mkdir_with_mode(os.path.dirname(ca_root_key_path), - Constant.AUTH_COMMON_DIR_STR) - CommonTools.copy_file_to_dest_path(get_ca_root_key_path, ca_root_key_path) - - # get ssl password - ssl_password = CertGenerator.get_rand_str() - ca_config_path = os.path.join(os.path.dirname( - os.path.realpath(__file__)), Constant.CA_CONFIG) - server_ip = CommonTools.get_local_ip(ignore=True) - g.logger.info('Get server ip:[%s].' % server_ip) - - # create server cert - CertGenerator.create_ca_certificate_with_script(get_ca_root_password, ssl_password, - ca_root_file_path, ca_root_key_path, - ca_config_path, server_cert_path, - server_key_path, server_ip, - crt_type='server') - g.logger.info('Successfully generate server ca certificate.') - return get_ca_root_password, ssl_password, ca_root_file_path, ca_root_key_path, \ - ca_config_path, agent_cert_path, agent_key_path - - def _generate_key_file(self, password): - """ - Create key file - """ - key_file_path = os.path.join(self.module_path, Constant.PWF_PATH) - CommonTools.mkdir_with_mode(key_file_path, Constant.AUTH_COMMON_DIR_STR) - CommonTools.clean_dir(key_file_path) - encrypt_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname( - os.path.dirname(os.path.realpath(__file__)))))), Constant.ENCRYPT_TOOL) - lib_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname( - os.path.dirname(os.path.dirname(os.path.realpath(__file__))))))), 'lib') - CommonTools.encrypt_with_path(password, key_file_path, encrypt_path, lib_path) - g.logger.info('Successfully generate key files with path [%s].' % key_file_path) - return key_file_path - - def generate_agent_ca_files(self, ca_password, ssl_password, ca_root_file_path, - ca_root_key_path, ca_config_path, agent_cert_path, agent_key_path): - """ - Generate client ca certificates. - """ - for node in self.agent_nodes: - ip = node.get(Constant.NODE_IP) - username = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - - CertGenerator.create_ca_certificate_with_script(ca_password, ssl_password, - ca_root_file_path, ca_root_key_path, - ca_config_path, agent_cert_path, - agent_key_path, ip, crt_type='agent') - file_list = self.read_ca_cert_path(agent_only=True) - key_file = self._generate_key_file(ssl_password) - file_list.append(key_file) - temp_ca_dir = TMP_CA_FILE - CertGenerator.deploy_ca_certs( - file_list, ip, username, pwd, temp_ca_dir, no_delete=[key_file, ca_root_file_path]) - g.logger.info('Successfully deploy certs to node:[%s]' % ip) - - def deploy_agent_certs(self): - """ - Copy file from temp dir to config path. - """ - file_list = self.read_ca_cert_path(agent_only=True) - file_list.append(os.path.join(self.module_path, Constant.PWF_PATH)) - temp_cert_list = os.listdir(TMP_CA_FILE) - for dest_path in file_list: - CommonTools.mkdir_with_mode( - os.path.dirname(dest_path), Constant.AUTH_COMMON_DIR_STR) - CommonTools.remove_files(file_list) - g.logger.info('Successfully prepare the path:%s' % str(file_list)) - for file_name in temp_cert_list: - for dest_file in file_list: - if file_name in dest_file: - from_path = os.path.join(TMP_CA_FILE, file_name) - CommonTools.copy_file_to_dest_path(from_path, dest_file) - CommonTools.clean_dir(TMP_CA_FILE) - - def _check_module_path_access(self): - """ - Check module path is full authority. - """ - CommonTools.check_dir_access(self.module_path, 'full') - - def record_version_info(self): - """ - Record install time, version, install path in record file. - """ - time_install = datetime.now().strftime("%Y-%m-%d, %H:%M:%S") - content = '|'.join([time_install, self.version, self.install_path]) + '\n' - CommonTools.add_content_to_file(VERSION_RECORD_FILE_ANOMALY_DETECTION, content) - CommonTools.delete_early_record( - VERSION_RECORD_FILE_ANOMALY_DETECTION, Constant.VERSION_FILE_MAX_LINES) - g.logger.info('Successfully record version information.') - - def unpack_file_to_temp_dir(self): - """ - Unpack file to temp dir on remote node. - """ - # mk temp extract dir if not exist - CommonTools.mkdir_with_mode(EXTRACT_DIR, Constant.AUTH_COMMON_DIR_STR) - # clean extract dir - CommonTools.clean_dir(EXTRACT_DIR) - # extract package file to temp dir - CommonTools.extract_file_to_dir(self.package_path, EXTRACT_DIR) - g.logger.info('Success unpack files to temp dir.') - - def run(self, remote=False): - if remote: - self.check_remote_params() - self.check_project_path_access() - self.init_globals() - back_status, from_path, to_path = self.backup_db_file() - if not remote: - self.prepare_module_path() - g.logger.info('Start deploy module files.') - self.deploy_module_files() - self.restore_db_file(back_status, from_path, to_path) - g.logger.info('Start modify config file.') - self.modify_config_file(remote=remote) - g.logger.info('Start parse ca information.') - ad_config_path = os.path.join(self.install_path, Constant.ANOMALY_DETECTION_CONFIG_PATH) - tls = CommonTools.read_info_from_config_file( - ad_config_path, Constant.AD_CONF_SECTION_SECURITY, Constant.AD_CONF_TLS_FLAG) - g.logger.info('Get server type is https:[%s].' % tls) - if (not remote) and tls.lower() == 'true': - ca_password, ssl_password, ca_root_file_path, ca_root_key_path, ca_config_path, \ - agent_cert_path, agent_key_path = self.prepare_ca_certificates() - self.generate_agent_ca_files( - ca_password, ssl_password, ca_root_file_path, ca_root_key_path, - ca_config_path, agent_cert_path, agent_key_path) - if remote and tls.lower() == 'true': - self.deploy_agent_certs() - - g.logger.info('Start add crontab.') - self.start_agent_server_monitor() - g.logger.info('Start kill process.') - self.try_to_kill_process_exist() - - g.logger.info('Start record version info.') - self.record_version_info() - g.logger.info('Waiting for start, the service will start in 1 minute...') - self.waiting_for_start(Constant.DEFAULT_WAIT_SECONDS) - - -def init_parser(): - """parser command""" - parser = optparse.OptionParser(conflict_handler='resolve') - parser.disable_interspersed_args() - parser.add_option('--param_file', dest='param_file', help='json file path') - return parser - - -if __name__ == '__main__': - try: - install_parser = init_parser() - opt, arg = install_parser.parse_args() - params_file = opt.param_file - params_dict = CommonTools.json_file_to_dict(params_file) - installer = Installer(**params_dict) - installer.run(remote=True) - except Exception as error: - g.logger.error(Errors.EXECUTE_RESULT['gauss_0409'] % error) - raise Exception(Errors.EXECUTE_RESULT['gauss_0409'] % error) - - - diff --git a/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/install_remote.py b/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/install_remote.py deleted file mode 100644 index 0729feafd..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/install_remote.py +++ /dev/null @@ -1,236 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : install_remote.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : Remote install -############################################################################# - -import os -import re -import sys - -sys.path.append(sys.path[0] + "/../../") -from tools.common_tools import CommonTools -from definitions.constants import Constant -from definitions.errors import Errors -from config_cabin.config import PROJECT_PATH -from tools.global_box import g -from config_cabin.config import ENV_FILE -from config_cabin.config import EXTRACT_DIR -from module.anomaly_detection.uninstall_remote import RemoteUninstaller -from copy import deepcopy - - -class RemoteInstaller(object): - def __init__(self, **param_dict): - self.param_dict = param_dict - self.project_path = PROJECT_PATH - self.agent_nodes = self.param_dict.pop(Constant.AGENT_NODES) - self.module_name = self.param_dict.get(Constant.MODULE) - self.package_path = None - self.install_path = None - self.version = None - self.service_list = None - self.module_path = None - self.manager_path = None - - def init_globals(self): - self.package_path = self.param_dict.get('package_path') - self.install_path = self.param_dict.get('install_path') - self.version = self.param_dict.get('version') - self.service_list = self.param_dict.get('service_list') - self.module_name = Constant.AI_SERVER if \ - self.module_name == Constant.MODULE_ANOMALY_DETECTION else self.module_name - self.module_path = os.path.realpath(os.path.join(self.install_path, self.module_name)) - self.manager_path = os.path.join(self.install_path, Constant.AI_MANAGER_PATH) - - def prepare_remote_file_path(self): - """ - Prepare install path - """ - self._clean_envs() - self._mk_remote_module_dir() - self._clean_remote_module_dir() - self._mk_remote_manager_dir() - self._clean_remote_manager_dir() - - def remote_copy_module(self): - """ - Copy package to remote node. - """ - for node in self.agent_nodes: - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - - status, output = CommonTools.remote_copy_files( - ip, uname, pwd, self.module_path, self.module_path) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0406'] % (ip, output)) - else: - g.logger.info('Successfully copy module files to node [%s].' % ip) - - def remote_copy_manager(self): - """ - Copy package to remote node. - """ - manager_from = os.path.dirname(os.path.dirname( - os.path.dirname(os.path.realpath(__file__)))) - for node in self.agent_nodes: - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - - status, output = CommonTools.remote_copy_files( - ip, uname, pwd, manager_from, self.manager_path) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0406'] % (ip, output)) - else: - g.logger.info('Successfully copy manager to node[%s].' % ip) - - def remote_copy_env_file(self): - """ - Copy env file to remote node. - """ - for node in self.agent_nodes: - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - - status, output = CommonTools.remote_copy_files( - ip, uname, pwd, ENV_FILE, ENV_FILE) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0406'] % (ip, output)) - else: - g.logger.info('Successfully copy env file to node[%s].' % ip) - - @staticmethod - def remote_install_single_agent(params): - """ - Remote install single node agent - """ - ip, uname, pwd, cmd = params - g.logger.debug('Install node[%s], cmd[%s]' % (ip, cmd)) - g.logger.info('Starting install on node:[%s], please wait ...' % ip) - status, output = CommonTools.remote_execute_cmd(ip, uname, pwd, cmd) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0418'] % (ip, output)) - g.logger.info('Install result on node:[%s]-output:%s\n' % (ip, output)) - - def remote_install(self): - """ - Remote install agent on all nodes - """ - install_script_path = os.path.join(self.install_path, - Constant.ANOMALY_DETECTION_INSTALL_SCRIPT_PATH) - param_file_path = os.path.join(EXTRACT_DIR, Constant.TEMP_ANOMALY_PARAM_FILE) - cmd = Constant.EXECUTE_REMOTE_SCRIPT % ( - Constant.CMD_PREFIX, install_script_path, param_file_path) - params_list = [(node.get(Constant.NODE_IP), node.get(Constant.NODE_USER), - node.get(Constant.NODE_PWD), cmd) for node in self.agent_nodes] - CommonTools.parallel_execute(self.remote_install_single_agent, params_list) - - def prepare_param_file(self): - """ - Write params into file - """ - param_file_path = os.path.join(EXTRACT_DIR, Constant.TEMP_ANOMALY_PARAM_FILE) - self.param_dict.pop(Constant.CA_INFO) - CommonTools.mkdir_with_mode(os.path.dirname(param_file_path), Constant.AUTH_COMMON_DIR_STR) - CommonTools.dict_to_json_file(self.param_dict, param_file_path) - self._copy_param_file_to_remote_node(param_file_path, param_file_path) - - def _copy_param_file_to_remote_node(self, path_from, path_to): - """ - Copy params file to remote agent node - """ - for node in self.agent_nodes: - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - CommonTools.remote_mkdir_with_mode( - os.path.dirname(path_to), Constant.AUTH_COMMON_DIR_STR, ip, uname, pwd) - status, output = CommonTools.remote_copy_files( - ip, uname, pwd, path_from, path_to) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0406'] % (ip, output)) - else: - g.logger.info('Successfully copy install param file to node[%s].' % ip) - - def _mk_remote_module_dir(self): - """ - Create install path if the path is not exist. - """ - for node in self.agent_nodes: - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - CommonTools.remote_mkdir_with_mode( - self.module_path, Constant.AUTH_COMMON_DIR_STR, ip, uname, pwd) - g.logger.info('Successfully create module path dir on node:[%s]' % ip) - - def _clean_envs(self): - """ - Uninstall remote agent nodes - :return: - """ - try: - params = deepcopy(self.param_dict) - params[Constant.AGENT_NODES] = self.agent_nodes - cleaner = RemoteUninstaller(**params) - cleaner.init_globals() - cleaner.prepare_param_file() - cleaner.remote_uninstall(ignore=True) - except Exception as error: - g.logger.warning('Failed clean remote agent node with error:%s' % str(error)) - - def _clean_remote_module_dir(self): - """ - Clean install path before unpack. - """ - for node in self.agent_nodes: - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - CommonTools.retry_remote_clean_dir(self.module_path, ip, uname, pwd) - g.logger.info('Successfully clean module path on node:[%s]' % ip) - - def _mk_remote_manager_dir(self): - """ - Create install path if the path is not exist. - """ - for node in self.agent_nodes: - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - CommonTools.remote_mkdir_with_mode( - self.manager_path, Constant.AUTH_COMMON_DIR_STR, ip, uname, pwd) - g.logger.info('Successfully create manager path dir on node:[%s]' % ip) - - def _clean_remote_manager_dir(self): - """ - Clean install path before unpack. - """ - for node in self.agent_nodes: - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - CommonTools.retry_remote_clean_dir(self.manager_path, ip, uname, pwd) - g.logger.info('Successfully clean manager path on node:[%s]' % ip) - - def run(self): - self.init_globals() - self.prepare_remote_file_path() - self.remote_copy_module() - self.remote_copy_manager() - self.remote_copy_env_file() - self.prepare_param_file() - self.remote_install() - - - - - diff --git a/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/uninstall.py b/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/uninstall.py deleted file mode 100644 index 5b7a22166..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/uninstall.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : uninstall.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : uninstall -############################################################################# - -import optparse -import os -import sys -import subprocess - -sys.path.append(sys.path[0] + "/../../") -from config_cabin.config import PROJECT_PATH -from config_cabin.config import VERSION_RECORD_FILE_ANOMALY_DETECTION -from tools.common_tools import CommonTools -from definitions.errors import Errors -from tools.global_box import g -from definitions.constants import Constant - - -class Uninstaller(object): - def __init__(self, **param_dict): - self.param_dict = param_dict - self.project_path = PROJECT_PATH - self.module_name = self.param_dict.get(Constant.MODULE) - self.install_path = None - self.install_version = None - self.install_time = None - self.stopping_list = None - self.module_path = None - self.service_list = None - - def init_globals(self): - self.install_time, self.install_version, self.install_path = self.get_install_info() - self.stopping_list = self.param_dict.get('stopping_list') - self.service_list = self.param_dict.get('service_list') - self.module_name = Constant.AI_SERVER if \ - self.module_name == Constant.MODULE_ANOMALY_DETECTION else self.module_name - self.module_path = os.path.realpath(os.path.join(self.install_path, self.module_name)) - - def del_agent_server_monitor_cron(self): - """ - Delete cron service of agent, server and monitor. - """ - g.logger.info('Start uninstall package[version:%s][installed time:%s][installed path:%s]' - % (self.install_version, self.install_time, self.install_path)) - script_path = os.path.realpath( - os.path.join(self.install_path, Constant.ANORMALY_MAIN_SCRIPT)) - for cmd in self.service_list: - cron_cmd = cmd % (Constant.CMD_PREFIX, os.path.dirname(script_path), script_path) - status, output = CommonTools.del_cron(self.install_path, cron_cmd, '1m') - g.logger.info('Delete crontab CMD[%s]-STATUS[%s]-OUTPUT[%s]' % ( - cron_cmd, status, output)) - - def stop_agent_server_monitor(self): - """ - Stop process of agent, server and monitor, if failed, try to kill it. - """ - script_path = os.path.realpath( - os.path.join(self.install_path, Constant.ANORMALY_MAIN_SCRIPT)) - if not os.path.isfile(script_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0102'] % - script_path + 'Please confirm your files are integrated') - for cmd in self.stopping_list: - stop_cmd = cmd % (Constant.CMD_PREFIX, os.path.dirname(script_path), script_path) - status, output = subprocess.getstatusoutput(stop_cmd) - if status != 0: - target = stop_cmd.replace('stop', 'start').split(self.install_version)[-1] - g.logger.warning('Failed stop process,command[%s],error[%s]' % (stop_cmd, output)) - stat, out = CommonTools.grep_process_and_kill(target) - if stat != 0: - g.logger.error('Failed kill process [%s] ' - 'with error [%s], please manually stop it.' % (target, out)) - else: - g.logger.info('Successfully kill process [%s].' % target) - else: - g.logger.info('Successfully stop process with command:[%s]' % stop_cmd) - - def clean_module_path(self): - """ - Delete files in install path. - """ - status, output = CommonTools.clean_dir(self.module_path) - if status != 0: - g.logger.warning('Failed clean path:[%s]' % self.module_path) - else: - g.logger.info('Successfully clean install path:[%s]' % self.module_path) - - @staticmethod - def get_install_info(): - """ - Get installed information from record file. - install time | install version | install path - """ - install_time, install_version, install_path = '', '', '' - if not os.path.isfile(VERSION_RECORD_FILE_ANOMALY_DETECTION): - raise Exception( - Errors.FILE_DIR_PATH['gauss_0102'] % VERSION_RECORD_FILE_ANOMALY_DETECTION) - install_info = CommonTools.read_last_line_from_file( - VERSION_RECORD_FILE_ANOMALY_DETECTION).strip() - if install_info: - install_time, install_version, install_path = install_info.split('|') - # check path valid - CommonTools.check_path_valid(install_path) - if not os.path.isdir(install_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0103'] % install_path) - else: - g.logger.info('Successfully got install path[%s].' % install_path) - return install_time, install_version, install_path - - def run(self, remote=False): - self.init_globals() - self.del_agent_server_monitor_cron() - self.stop_agent_server_monitor() - if remote: - self.clean_module_path() - - -def init_parser(): - """parser command""" - parser = optparse.OptionParser(conflict_handler='resolve') - parser.disable_interspersed_args() - parser.add_option('--param_file', dest='param_file', help='json file path') - return parser - - -if __name__ == '__main__': - try: - install_parser = init_parser() - opt, arg = install_parser.parse_args() - params_file = opt.param_file - params_dict = CommonTools.json_file_to_dict(params_file) - uninstaller = Uninstaller(**params_dict) - uninstaller.run(remote=True) - except Exception as error: - g.logger.error(Errors.EXECUTE_RESULT['gauss_0410'] % error) - raise Exception(Errors.EXECUTE_RESULT['gauss_0410'] % error) - - diff --git a/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/uninstall_remote.py b/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/uninstall_remote.py deleted file mode 100644 index 14435b3e9..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/uninstall_remote.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : uninstall_remote.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : Remote uninstall -############################################################################# - -import os -import re -import sys - -sys.path.append(sys.path[0] + "/../../") -from config_cabin.config import PROJECT_PATH -from tools.common_tools import CommonTools -from tools.global_box import g -from definitions.constants import Constant -from module.anomaly_detection.uninstall import Uninstaller -from config_cabin.config import EXTRACT_DIR - - -class RemoteUninstaller(object): - def __init__(self, **param_dict): - self.param_dict = param_dict - self.project_path = PROJECT_PATH - self.package_path = self.param_dict.get(Constant.PACKAGE_PATH) - self.module_name = self.param_dict.get(Constant.MODULE) - self.agent_nodes = None - self.module_path = None - self.install_path = None - - def init_globals(self): - _, _, self.install_path = Uninstaller.get_install_info() - self.agent_nodes = self.param_dict.pop(Constant.AGENT_NODES) - self.module_name = Constant.AI_SERVER if \ - self.module_name == Constant.MODULE_ANOMALY_DETECTION else self.module_name - self.module_path = os.path.realpath(os.path.join(self.install_path, self.module_name)) - - def prepare_param_file(self): - """ - Record params to file for remote opt. - """ - param_file_path = os.path.join(EXTRACT_DIR, Constant.TEMP_ANOMALY_PARAM_FILE) - CommonTools.dict_to_json_file(self.param_dict, param_file_path) - self._copy_param_file_to_remote_node(param_file_path, param_file_path) - - def _copy_param_file_to_remote_node(self, path_from, path_to): - """ - Copy params file to remote node. - """ - for node in self.agent_nodes: - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - status, output = CommonTools.remote_copy_files( - ip, uname, pwd, path_from, path_to) - g.logger.info('Copy uninstall param file to node[%s]-output:%s\n.' % (ip, output)) - - @staticmethod - def remote_uninstall_each_node(params): - """ - Remote install one agent node - """ - ip, uname, pwd, cmd, ignore = params - g.logger.info('Starting uninstall on node:[%s], please wait ...' % ip) - status, output = CommonTools.remote_execute_cmd(ip, uname, pwd, cmd) - if status != 0 and ignore: - return - g.logger.info('Uninstall result on node:[%s]-output:%s\n' % (ip, output)) - - def remote_uninstall(self, ignore=False): - """ - Remote install agent with install.py - """ - uninstall_script_path = os.path.join(self.install_path, - Constant.ANOMALY_DETECTION_UNINSTALL_SCRIPT_PATH) - param_file_path = os.path.join(EXTRACT_DIR, Constant.TEMP_ANOMALY_PARAM_FILE) - cmd = Constant.EXECUTE_REMOTE_SCRIPT % ( - Constant.CMD_PREFIX, uninstall_script_path, param_file_path) - params = [(node.get(Constant.NODE_IP), node.get(Constant.NODE_USER), - node.get(Constant.NODE_PWD), cmd, ignore) for node in self.agent_nodes] - CommonTools.parallel_execute(self.remote_uninstall_each_node, params) - - def clean_local_module_path(self): - """ - Delete files in install path. - """ - status, output = CommonTools.clean_dir(self.module_path) - if status != 0: - g.logger.warning('Failed clean path:[%s]' % self.module_path) - else: - g.logger.info('Successfully clean install path:[%s]' % self.module_path) - - def run(self): - self.init_globals() - self.prepare_param_file() - self.remote_uninstall() - self.clean_local_module_path() - - diff --git a/src/gausskernel/dbmind/tools/ai_manager/module/index_advisor/index_main.py b/src/gausskernel/dbmind/tools/ai_manager/module/index_advisor/index_main.py deleted file mode 100644 index c50578556..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/module/index_advisor/index_main.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : ad_main.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : Main entrance of module anomaly detection -############################################################################# - -from module.index_advisor.install import Installer -from module.index_advisor.uninstall import UnInstaller -from config_cabin.config import PROJECT_PATH - - -TASK_MAPPING = { - 'install': Installer, - 'uninstall': UnInstaller -} - - -class IndexAdvisor(object): - def __init__(self, **params_dict): - self.args_dict = params_dict - self.action = self.args_dict.get('action') - self.package_path = self.args_dict.get('package_path') - self.project_path = PROJECT_PATH - self.version = self.args_dict.get('version') - self.install_path = self.args_dict.get('install_path') - self.task = None - - def init_globals(self): - self.task = TASK_MAPPING[self.action](**self.args_dict) - - def run(self): - self.init_globals() - self.task.run() - - - - diff --git a/src/gausskernel/dbmind/tools/ai_manager/module/index_advisor/install.py b/src/gausskernel/dbmind/tools/ai_manager/module/index_advisor/install.py deleted file mode 100644 index c8eb087af..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/module/index_advisor/install.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : install.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : Module index advisor install script -############################################################################# - -import os -from datetime import datetime - -from definitions.constants import Constant -from config_cabin.config import PROJECT_PATH -from tools.common_tools import CommonTools -from definitions.errors import Errors -from config_cabin.config import EXTRACT_DIR -from tools.global_box import g -from config_cabin.config import VERSION_RECORD_FILE_INDEX_ADVISOR - - -class Installer(object): - def __init__(self, **param_dict): - self.param_dict = param_dict - self.project_path = PROJECT_PATH - self.install_nodes = self.param_dict.pop(Constant.INSTALL_NODES) - self.module_name = self.param_dict.get(Constant.MODULE) - self.package_path = None - self.install_path = None - self.version = None - self.module_path = None - - def init_globals(self): - self.package_path = self.param_dict.get('package_path') - self.install_path = self.param_dict.get('install_path') - self.version = self.param_dict.get('version') - self.module_path = os.path.realpath(os.path.join(self.install_path, self.module_name)) - - def prepare_remote_package_path(self): - """ - Prepare install path - """ - self._mk_remote_module_dir() - self._clean_remote_module_dir() - - def _mk_remote_module_dir(self): - """ - Create install path if the path is not exist. - """ - for node in self.install_nodes: - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - _, output = CommonTools.remote_mkdir_with_mode( - os.path.dirname(self.module_path), Constant.AUTH_COMMON_DIR_STR, ip, uname, pwd) - g.logger.info('Result of create module path dir on node:[%s], output:%s' % ( - ip, output)) - - def _clean_remote_module_dir(self): - """ - Clean install path before unpack. - """ - for node in self.install_nodes: - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - _, output = CommonTools.retry_remote_clean_dir(self.module_path, ip, uname, pwd) - g.logger.info('Result of clean module path on node:[%s], output:%s' % (ip, output)) - - def remote_copy_module(self): - """ - Copy package of index advisor to remote node. - """ - for node in self.install_nodes: - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - if not any([ip, uname, pwd]): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'remote node info') - local_module_path = os.path.realpath(os.path.join(EXTRACT_DIR, self.module_name)) - if not os.path.exists(local_module_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0101'] % 'temp index advisor module') - _, output = CommonTools.remote_copy_files( - ip, uname, pwd, local_module_path, self.module_path) - g.logger.info('Result of copy index advisor package to node[%s], output:%s' % ( - ip, output)) - - def remote_copy_version_file(self): - """ - Copy version record file of index advisor to remote node. - """ - for node in self.install_nodes: - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - if not any([ip, uname, pwd]): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'remote node info') - if not os.path.exists(VERSION_RECORD_FILE_INDEX_ADVISOR): - raise Exception( - Errors.FILE_DIR_PATH['gauss_0102'] % VERSION_RECORD_FILE_INDEX_ADVISOR) - _, output = CommonTools.remote_copy_files( - ip, uname, pwd, VERSION_RECORD_FILE_INDEX_ADVISOR, VERSION_RECORD_FILE_INDEX_ADVISOR) - g.logger.info('Result of copy version record file to node[%s], output:%s' % ( - ip, output)) - - def record_version_info(self): - """ - Record install time, version, install path in record file. - """ - time_install = datetime.now().strftime("%Y-%m-%d, %H:%M:%S") - content = '|'.join([time_install, self.version, self.install_path]) + '\n' - CommonTools.add_content_to_file(VERSION_RECORD_FILE_INDEX_ADVISOR, content) - CommonTools.delete_early_record( - VERSION_RECORD_FILE_INDEX_ADVISOR, Constant.VERSION_FILE_MAX_LINES) - g.logger.info('Successfully record version information.') - - def run(self): - self.init_globals() - self.prepare_remote_package_path() - self.remote_copy_module() - self.record_version_info() - self.remote_copy_version_file() - diff --git a/src/gausskernel/dbmind/tools/ai_manager/module/index_advisor/uninstall.py b/src/gausskernel/dbmind/tools/ai_manager/module/index_advisor/uninstall.py deleted file mode 100644 index 067d67b52..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/module/index_advisor/uninstall.py +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : uninstall.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : Module index advisor uninstall script -############################################################################# - -import os - -from definitions.constants import Constant -from config_cabin.config import PROJECT_PATH -from tools.common_tools import CommonTools -from definitions.errors import Errors -from tools.global_box import g -from config_cabin.config import VERSION_RECORD_FILE_INDEX_ADVISOR - - -class UnInstaller(object): - def __init__(self, **param_dict): - self.param_dict = param_dict - self.project_path = PROJECT_PATH - self.install_nodes = self.param_dict.pop(Constant.INSTALL_NODES) - self.module_name = self.param_dict.get(Constant.MODULE) - self.install_path = None - self.module_path = None - - def init_globals(self): - install_time, install_version, self.install_path = self._get_install_info() - self.module_path = os.path.realpath(os.path.join(self.install_path, self.module_name)) - - @staticmethod - def _get_install_info(): - """ - Get installed information from record file. - install time | install version | install path - """ - install_time, install_version, install_path = '', '', '' - if not os.path.isfile(VERSION_RECORD_FILE_INDEX_ADVISOR): - raise Exception( - Errors.FILE_DIR_PATH['gauss_0102'] % VERSION_RECORD_FILE_INDEX_ADVISOR) - install_info = CommonTools.read_last_line_from_file( - VERSION_RECORD_FILE_INDEX_ADVISOR).strip() - if install_info: - install_time, install_version, install_path = install_info.split('|') - # check path valid - CommonTools.check_path_valid(install_path) - if not os.path.isdir(install_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0103'] % install_path) - else: - g.logger.info('Successfully got index advisor install path[%s].' % install_path) - return install_time, install_version, install_path - - def clean_remote_module_dir(self): - """ - Clean install path before unpack. - """ - for node in self.install_nodes: - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - _, output = CommonTools.retry_remote_clean_dir(self.module_path, ip, uname, pwd) - g.logger.info('Result of clean module path on node:[%s], output:%s' % (ip, output)) - - def run(self): - self.init_globals() - self.clean_remote_module_dir() - - diff --git a/src/gausskernel/dbmind/tools/ai_manager/test/conftest.py b/src/gausskernel/dbmind/tools/ai_manager/test/conftest.py deleted file mode 100644 index 9df27a41c..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/test/conftest.py +++ /dev/null @@ -1,11 +0,0 @@ -import os - -# Windows platform does not have this method -if getattr(os, 'getuid', None) is None: - os.getuid = lambda: 1 - -if getattr(os, 'sysconf', None) is None: - os.sysconf = lambda x: x - -if getattr(os, 'mknod', None) is None: - os.mknod = lambda x: x \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/ai_manager/test/test_ai_manager/test_tools/test_common_tools.py b/src/gausskernel/dbmind/tools/ai_manager/test/test_ai_manager/test_tools/test_common_tools.py deleted file mode 100644 index 3c69a1ca3..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/test/test_ai_manager/test_tools/test_common_tools.py +++ /dev/null @@ -1,239 +0,0 @@ -import time -import pytest -import os -from unittest.mock import patch -from unittest.mock import mock_open -from ai_manager.tools.common_tools import CommonTools -VERSION_INFO_CORRECT = """GaussDB-Kernel-V500R001C20 -92.301 -cbafe281""" -VERSION_INFO_WRONG = """GaussDB-Kernel-V500R001C20 -92.301""" -JSON_INFO_CORRECT = """{"A":"B"}""" -JSON_INFO_WRONG = """{"A":'B'}""" -PATH_CHECK_LIST = ["|", ";", "&", "$", "<", ">", "`", "\\", "'", "\"", - "{", "}", "(", ")", "[", "]", "~", "*", "?", " ", "!", "\n"] - - -class TestCommonTools: - - @patch("builtins.open", new_callable=mock_open, read_data=VERSION_INFO_CORRECT) - def test_correct_get_version_info_from_file(self, mock_read_file): - version = CommonTools.get_version_info_from_file('./test') - assert version == '92301' - - @patch("builtins.open", new_callable=mock_open, read_data=VERSION_INFO_WRONG) - def test_wrong_get_version_info_from_file(self, mock_read_file): - with pytest.raises(Exception, match=r"is wrong."): - CommonTools.get_version_info_from_file('./test') - - @patch('subprocess.getstatusoutput') - def test_extract_file_to_dir(self, result): - result.return_value = (1, 'aa') - with pytest.raises(Exception, match=r"extract file"): - CommonTools.extract_file_to_dir('path1', 'path2') - - @patch('subprocess.getstatusoutput') - def test_copy_file_to_dest_path(self, result): - result.return_value = (1, 'aa') - with pytest.raises(Exception, match=r"copy files"): - CommonTools.copy_file_to_dest_path('path1', 'path2') - - def test_check_dir_access(self): - # current path check full authority - CommonTools.check_dir_access('./', 'full') - # check exist - with pytest.raises(Exception, match=r"can not access"): - CommonTools.check_dir_access('./notexitpath', 'exist') - # check rw - with pytest.raises(Exception, match=r"can not access"): - CommonTools.check_dir_access('./notexitpath', 'rw') - # check wrong params - with pytest.raises(Exception, match=r"is wrong"): - CommonTools.check_dir_access('./notexitpath', 'wrong') - - @patch('subprocess.getstatusoutput') - def test_mkdir_with_mode(self, result): - result.return_value = (1, 'aa') - with pytest.raises(Exception, match=r"mkdir"): - CommonTools.mkdir_with_mode('path1', '0700') - - @patch('ai_manager.tools.common_tools.CommonTools.remote_execute_cmd') - def test_remote_mkdir_with_mode(self, result): - result.return_value = (1, 'aa') - with pytest.raises(Exception, match=r"remote mkdir"): - CommonTools.remote_mkdir_with_mode('path', 'mode', 'ip', 'username', 'password') - - @patch('subprocess.getstatusoutput') - def test_clean_dir(self, result): - result.return_value = (1, 'aa') - with pytest.raises(Exception, match=r"clean dir"): - CommonTools.clean_dir('path') - - @patch('ai_manager.tools.common_tools.CommonTools.remote_execute_cmd') - def test_remote_clean_dir(self, result): - result.return_value = (1, 'aa') - with pytest.raises(Exception, match=r"remote mkdir"): - CommonTools.remote_clean_dir('ip', 'username', 'password', 'cmd') - - @patch('subprocess.getstatusoutput') - def test_remove_files(self, result): - result.return_value = (1, 'aa') - with pytest.raises(Exception, match=r"remove file"): - CommonTools.remove_files('path') - - @patch("builtins.open", new_callable=mock_open, read_data=JSON_INFO_CORRECT) - @patch("os.path.isfile") - def test_correct_json_file_to_dict(self, is_file, mock_read_file): - is_file.return_value = True - ret = CommonTools.json_file_to_dict('/test') - assert ret == {'A': 'B'} - - @patch('subprocess.getstatusoutput') - def test_add_cron(self, result): - result.return_value = (1, 'aa') - CommonTools.add_cron('install_path', 'cmd', 'frequency') - - @patch('subprocess.getstatusoutput') - def test_del_cron(self, result): - result.return_value = (1, 'aa') - CommonTools.del_cron('install_path', 'cmd', 'frequency') - - def test_delete_early_record(self): - with pytest.raises(Exception, match=r"is not exist."): - CommonTools.delete_early_record('path', 1024) - - @patch("builtins.open", new_callable=mock_open, read_data=VERSION_INFO_WRONG) - def test_read_last_line_from_file(self, mock_read_file): - ret = CommonTools.read_last_line_from_file('./') - assert ret == '92.301' - - @patch('subprocess.getstatusoutput') - def test_grep_process_and_kill(self, result): - result.return_value = (1, 'aa') - CommonTools.grep_process_and_kill('process') - - @patch('subprocess.getstatusoutput') - def test_check_is_root(self, result): - result.return_value = True - ret = CommonTools.check_is_root() - assert ret in [True, False] - - @patch('subprocess.getstatusoutput') - def test_check_process(self, result): - result.return_value = (1, 'aa') - with pytest.raises(Exception, match=r"check process"): - CommonTools.check_process('path') - result.return_value = (0, 'aa') - with pytest.raises(Exception, match=r"check process num"): - CommonTools.check_process('path') - result.return_value = (0, '5') - ret = CommonTools.check_process('path') - assert ret == 5 - - def test_check_path_valid(self): - for item in PATH_CHECK_LIST: - wrong_path = os.path.realpath('./') + item - with pytest.raises(Exception, match=r"There are illegal character"): - CommonTools.check_path_valid(wrong_path) - - def test_get_funcs(self): - from ai_manager.tools import params_checkers - from ai_manager.tools.params_checkers import check_ip - ret = CommonTools.get_funcs(params_checkers) - assert check_ip in ret.values() - - def test_parallel_execute(self): - para_list = [0.5] * 10 - start = time.time() - CommonTools.parallel_execute(lambda x: time.sleep(x), para_list) - end = time.time() - assert end - start < 0.5 * 10 - - @patch('ai_manager.tools.common_tools.CommonTools.get_status_output_error') - def test_remote_copy_files(self, result): - result.return_value = (1, 'aa') - CommonTools.remote_copy_files('remote_ip', 'user', 'password', 'path_from', 'path_to') - - @patch('ai_manager.tools.common_tools.CommonTools.get_status_output_error') - def test_remote_execute_cmd(self, result): - result.return_value = (1, 'aa') - CommonTools.remote_execute_cmd('remote_ip', 'user', 'password', 'cmd') - - @patch('ai_manager.tools.common_tools.CommonTools.get_status_output_error') - def test_get_local_ips(self, result): - result.return_value = (0, "eth0 : df df 10.10.72.57 test " - "string 10.10.72.57 test string 10.10.7.5") - ret = CommonTools.get_local_ips() - assert ret.sort() == ['10.10.72.57', '10.10.7.5'].sort() - - @patch("ai_manager.tools.common_tools.CommonTools.get_local_ips") - def test_modify_agent_config_file(self, ips): - ips.return_value = ['10.10.72.57', '10.10.7.5'] - content = {"security": {"tls": "False"}} - with pytest.raises(Exception, match=r"not found in the config file or section"): - CommonTools.modify_agent_config_file('config_path', content, allow_add=False) - - @patch("ai_manager.tools.common_tools.CommonTools.get_local_ips") - def test_modify_config_file(self, ips): - ips.return_value = ['10.10.72.57', '10.10.7.5'] - content = {"security": {"tls": "False"}} - with pytest.raises(Exception, match=r"not found in the config file or section"): - CommonTools.modify_config_file('config_path', content, allow_add=False) - - @patch('subprocess.getstatusoutput') - def test_get_current_usr(self, result): - result.return_value = (1, 'a') - with pytest.raises(Exception, match=r"get user"): - CommonTools.get_current_usr() - - @patch('os.mknod') - def test_create_file_if_not_exist(self, result): - ret = CommonTools.create_file_if_not_exist('path') - assert ret is True - - @patch('subprocess.getstatusoutput') - def test_read_info_from_config_file(self, result): - result.return_value = (1, 'a') - with pytest.raises(Exception, match=r"not found in the config file or section"): - CommonTools.read_info_from_config_file('file_path', 'section', 'option') - - @patch('subprocess.getstatusoutput') - def test_get_local_ip(self, result): - result.return_value = (0, 'eth0 : inet aa 10.30.30.30 bb eth1: cc dd 10.10.10.10') - ret = CommonTools.get_local_ip(ignore=True) - assert ret == '127.0.0.1' - result.return_value = {0, """eth0: flags=4163 mtu 1500 - inet 10.30.30.30 netmask 255.255.254.0"""} - ret = CommonTools.get_local_ip(ignore=True) - assert ret == '10.30.30.30' - - @patch("ai_manager.tools.common_tools.CommonTools.chmod_files_with_execute_permission") - @patch("platform.machine") - @patch('ai_manager.tools.common_tools.CommonTools.get_status_output_error') - def test_encrypt_with_path(self, result, plat, execute): - with pytest.raises(Exception, match=r"Unsupported operating system"): - CommonTools.encrypt_with_path('password', 'path') - plat.return_value = 'x86_64' - result.return_value = (1, 'a') - with pytest.raises(Exception, match=r"Failed to encrypt random string"): - CommonTools.encrypt_with_path('password', 'path') - - @patch('ai_manager.tools.common_tools.CommonTools.get_status_output_error') - def test_chmod_files_with_execute_permission(self, result): - result.return_value = (1, 'a') - with pytest.raises(Exception, match=r"change file authority"): - CommonTools.chmod_files_with_execute_permission(['file_list']) - - - - - - - - - - - - - diff --git a/src/gausskernel/dbmind/tools/ai_manager/test/test_ai_manager/test_tools/test_params_checkers.py b/src/gausskernel/dbmind/tools/ai_manager/test/test_ai_manager/test_tools/test_params_checkers.py deleted file mode 100644 index 92f588634..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/test/test_ai_manager/test_tools/test_params_checkers.py +++ /dev/null @@ -1,188 +0,0 @@ -import pytest -from collections import defaultdict - - -class TestParamsCheckers: - - def test_check_string(self): - from ai_manager.tools.params_checkers import check_string - valid_str = 'This is valid string' - check_string(valid_str) - invalid_str = ['invalid input because of the type is list, not string'] - with pytest.raises(Exception, match=r"should be"): - check_string(invalid_str) - - def test_check_valid_string(self): - from ai_manager.tools.params_checkers import check_valid_string - valid_str = 'This is valid string' - check_valid_string(valid_str) - invalid_str = 'This is invalid string because of "|"' - with pytest.raises(Exception, match=r"There are illegal character"): - check_valid_string(invalid_str) - - def test_check_digit(self): - from ai_manager.tools.params_checkers import check_digit - valid_str = '123' - check_digit(valid_str) - invalid_str = 'This is invalid string not digit' - with pytest.raises(Exception, match=r"digit"): - check_digit(invalid_str) - - def test_check_list(self): - from ai_manager.tools.params_checkers import check_list - valid_str = ['123'] - check_list(valid_str) - invalid_str = 'This is invalid string not list' - with pytest.raises(Exception, match=r"list"): - check_list(invalid_str) - - def test_check_dict(self): - from ai_manager.tools.params_checkers import check_dict - valid_str = {'123': 'aaa'} - check_dict(valid_str) - invalid_str = 'This is invalid string not dict' - with pytest.raises(Exception, match=r"dict"): - check_dict(invalid_str) - - def test_check_password(self): - from ai_manager.tools.params_checkers import check_password - valid_str = 'test@password' - check_password(valid_str) - invalid_str = 'test wrong password' - with pytest.raises(Exception, match=r"password character"): - check_password(invalid_str) - - def test_check_ip(self): - from ai_manager.tools.params_checkers import check_ip - valid_str = '10.13.12.111' - check_ip(valid_str) - invalid_str = '111.111.111.1111' - with pytest.raises(Exception, match=r"is illegal"): - check_ip(invalid_str) - - def test_check_port(self): - from ai_manager.tools.params_checkers import check_port - valid_str = '2378' - check_port(valid_str) - invalid_str = '66666' - with pytest.raises(Exception, match=r"is illegal"): - check_port(invalid_str) - - def test_check_scene(self): - from ai_manager.tools.params_checkers import check_scene - valid_str = 'opengauss' - check_scene(valid_str) - invalid_str = 'invalidsence' - with pytest.raises(Exception, match=r"is not in the valid scene list"): - check_scene(invalid_str) - - def test_check_path(self): - from ai_manager.tools.params_checkers import check_path - valid_str = '/home/Ruby/aa' - check_path(valid_str) - invalid_str = '/home/$aa/^bb' - with pytest.raises(Exception, match=r"There are illegal character"): - check_path(invalid_str) - - def test_check_module(self): - from ai_manager.tools.params_checkers import check_module - valid_str = 'anomaly_detection' - check_module(valid_str) - invalid_str = 'invalid_module' - with pytest.raises(Exception, match=r"is illegal"): - check_module(invalid_str) - - def test_check_action(self): - from ai_manager.tools.params_checkers import check_action - valid_str = 'install' - check_action(valid_str) - invalid_str = 'invalid_opt' - with pytest.raises(Exception, match=r"is illegal"): - check_action(invalid_str) - - def test_check_tls(self): - from ai_manager.tools.params_checkers import check_tls - valid_str = 'True' - check_tls(valid_str) - invalid_str = 'on' - with pytest.raises(Exception, match=r"is illegal"): - check_tls(invalid_str) - - def test_check_config_info(self): - from ai_manager.tools.params_checkers import check_config_info - valid_obj = defaultdict(dict) - valid_obj['security']['tls'] = 'True' - check_config_info(valid_obj) - - valid_obj['server']['host'] = '10.30.30.30' - check_config_info(valid_obj) - - valid_obj['database']['port'] = '1936' - check_config_info(valid_obj) - - invalid_obj = defaultdict(dict) - invalid_obj['section_error']['port'] = '1936' - with pytest.raises(Exception, match=r"not in the valid section list"): - check_config_info(invalid_obj) - - invalid_obj.pop('section_error') - invalid_obj['database']['option_error'] = 'database1' - with pytest.raises(Exception, match=r"not in the valid option list"): - check_config_info(invalid_obj) - - invalid_obj.pop('database') - invalid_obj['server']['host'] = '10.30.30.301' - with pytest.raises(Exception, match=r"is illegal"): - check_config_info(invalid_obj) - - def test_check_agent_nodes(self): - from ai_manager.tools.params_checkers import check_agent_nodes - valid_obj = [defaultdict(str)] - valid_obj[0]['node_ip'] = '10.10.10.10' - valid_obj[0]['username'] = 'Ruby' - valid_obj[0]['password'] = 'aaa@aaa' - check_agent_nodes(valid_obj) - - invalid_obj = [defaultdict(str)] - invalid_obj[0]['node_ip'] = '10.10.10.1011' - invalid_obj[0]['username'] = 'aaa@aaa' - invalid_obj[0]['password'] = 'aaa@aaa' - with pytest.raises(Exception, match=r"is illegal"): - check_agent_nodes(invalid_obj) - - invalid_obj[0]['node_ip'] = '10.10.10.101' - invalid_obj[0]['username'] = 'aaa|aaa' - with pytest.raises(Exception, match=r"There are illegal character"): - check_agent_nodes(invalid_obj) - - invalid_obj[0]['username'] = 'aaa@aaa' - invalid_obj[0]['password'] = 'aaa|aaa' - with pytest.raises(Exception, match=r"is illegal"): - check_agent_nodes(invalid_obj) - - def test_check_install_path(self): - from ai_manager.tools.params_checkers import check_install_path - valid_str = '/home/Ruby/aa' - check_install_path(valid_str) - invalid_str = '/home/$aa/^bb' - with pytest.raises(Exception, match=r"There are illegal character"): - check_install_path(invalid_str) - - def test_check_version(self): - from ai_manager.tools.params_checkers import check_version - valid_str = '92237' - check_version(valid_str) - invalid_str = '92-237' - with pytest.raises(Exception, match=r"digit"): - check_version(invalid_str) - - def test_check_service_list(self): - from ai_manager.tools.params_checkers import check_service_list - valid_str = ['%s cd %s && nohup python3 %s start --role agent'] - check_service_list(valid_str) - invalid_str = ['test command'] - with pytest.raises(Exception, match=r"service cron params"): - check_service_list(invalid_str) - - - diff --git a/src/gausskernel/dbmind/tools/ai_manager/tools/bin/deploy.sh b/src/gausskernel/dbmind/tools/ai_manager/tools/bin/deploy.sh deleted file mode 100644 index 0999a96cb..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/tools/bin/deploy.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : deploy.sh -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : Remote deploy script -############################################################################# - -function deploy_code() -{ - local host=$1 - local user=$2 - local project_from=$3 - local project_to=$4 - read -p "password: " password - local timer=60 -expect <<-EOF - set timeout ${timer} - spawn scp -r ${project_from} ${user}@${host}:${project_to} - expect { - "(yes/no)?" { - send "yes\r" - expect "*assword:" - send "${password}\r" - } - "*assword" { - send "${password}\r" - } - "*]*" { - send "\r" - } -} - expect eof -EOF - return 0 -} - -deploy_code $@ \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/ai_manager/tools/bin/execute.sh b/src/gausskernel/dbmind/tools/ai_manager/tools/bin/execute.sh deleted file mode 100644 index 035621f2f..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/tools/bin/execute.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : execute.sh -# Version : GaussDB Kernel V500R001 -# Date : 2021-03-01 -# Description : Remote execute script -############################################################################# - -function execute_remote_cmd() -{ - local host=$1 - local user=$2 - local cmd_str=`echo ${@:3}` - read -p "password: " password - local port=22 - local timer=120 -expect <<-EOF - set timeout ${timer} - spawn ssh ${host} -p ${port} -l ${user} - expect { - "(yes/no)?" { - send "yes\r" - expect "*assword:" - send "${password}\r" - } - "*assword:" { - send "${password}\r" - } - "Last login:" { - send "\r" - } - "*]*" { - send "\r" - } - } - send "\r" - expect "*]*" - send "${cmd_str}" - send "\r" - expect "*]*" - send "exit\r" - expect eof -EOF -} - -execute_remote_cmd $@ \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/ai_manager/tools/bin/gen_certificate.sh b/src/gausskernel/dbmind/tools/ai_manager/tools/bin/gen_certificate.sh deleted file mode 100644 index 8714b7255..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/tools/bin/gen_certificate.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : remote_commander -# Version : GaussDB Kernel V500R001 -# Date : 2021-03-01 -# Description : Remote execute script -############################################################################# - -CA_CRT=$1 -CA_KEY=$2 - -crt=$3 -key=$4 -req=$5 -local_host=$6 -file_name=$7 - -ca_password="" -ssl_password="" - -read -s -p "please input the password of ca and ssl separated by space: " ca_password ssl_password - - - -expect <<-EOF - spawn /bin/openssl genrsa -aes256 -out ${key} 2048 - expect "Enter pass phrase for" - send "${ssl_password}\r" - expect "Verifying - Enter pass phrase for" - send "${ssl_password}\r" - expect eof -EOF - -expect <<-EOF - spawn /bin/openssl req -new -out ${req} -key ${key} -subj "/C=CN/ST=Some-State/O=${file_name}/CN=${local_host}" - expect "Enter pass phrase for" - send "${ssl_password}\r" - expect eof -EOF - -expect <<-EOF - spawn /bin/openssl x509 -req -in ${req} -out ${crt} -sha256 -CAcreateserial -days 7000 -CA ${CA_CRT} -CAkey ${CA_KEY} - expect "Enter pass phrase for" - send "${ca_password}\r" - expect eof -EOF - -rm ${req} - -chmod 600 ${key} -chmod 600 ${crt} diff --git a/src/gausskernel/dbmind/tools/ai_manager/tools/bin/remote_commander b/src/gausskernel/dbmind/tools/ai_manager/tools/bin/remote_commander deleted file mode 100644 index 1263f7941..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/tools/bin/remote_commander +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : remote_commander -# Version : GaussDB Kernel V500R001 -# Date : 2021-03-01 -# Description : Remote execute script -############################################################################# - -cmd=$* -read -p "password: " password -timer=120 -func_remote_execute_cmd() -{ - expect -c " - set timeout ${timer} - spawn bash -c \"$cmd\"; - expect { - \"Are you sure you want to continue connecting (yes/no)?\" - {send \"yes\n\";expect \"assword:\";send \"$password\n\"; exp_continue} - \"assword:\" {send $password\n; exp_continue} - } - catch wait result; - exit [lindex \$result 3]" -} -func_remote_execute_cmd diff --git a/src/gausskernel/dbmind/tools/ai_manager/tools/cert_generator.py b/src/gausskernel/dbmind/tools/ai_manager/tools/cert_generator.py deleted file mode 100644 index 7f782c74b..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/tools/cert_generator.py +++ /dev/null @@ -1,210 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : cert_generator.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : ca certificate handler -############################################################################# -import os -from definitions.errors import Errors -from definitions.constants import Constant -from tools.common_tools import CommonTools -from tools.global_box import g - - -class CertGenerator(object): - @staticmethod - def get_rand_str(): - """ - function: get random passwd - input: NA - output: passwd - """ - uppercmd = 'openssl rand -base64 12 | tr "[0-9][a-z]" "[A-Z]" | tr -d [/+=] |cut -c 1-3' - lowercmd = 'openssl rand -base64 12 | tr "[0-9][A-Z]" "[a-z]" | tr -d [/+=] |cut -c 1-4' - numcmd = 'openssl rand -base64 12 | md5sum | tr "[a-z]" "[0-9]" |cut -c 1-3' - strcmd = 'openssl rand -base64 48 | tr "[0-9][a-z][A-Z]" "[~@_#*]" | tr -d [/+=] |cut -c 1-1' - - upper_code, upper_output, upper_error = CommonTools.get_status_output_error(uppercmd) - lower_code, lower_output, lower_error = CommonTools.get_status_output_error(lowercmd) - num_code, num_output, num_error = CommonTools.get_status_output_error(numcmd) - str_code, str_output, str_error = CommonTools.get_status_output_error(strcmd) - if any([upper_code, lower_code, num_code, str_code]): - raise Exception(Errors.EXECUTE_RESULT['gauss_0412'] % str( - [upper_code, lower_code, num_code, str_code])) - rand_pwd = 'G' + upper_output.strip() + lower_output.strip() + \ - num_output.strip() + str_output.strip() - if len(rand_pwd) == Constant.RANDOM_PASSWD_LEN: - return rand_pwd - rand_pwd = "G" - cmd_tuple = (uppercmd, lowercmd, numcmd, strcmd) - out_tuple = (upper_output.strip(), lower_output.strip(), - num_output.strip(), str_output.strip()) - str_len = (3, 4, 3, 1) - for i in range(4): - if len(out_tuple[i]) != str_len[i]: - count = 0 - while True: - count += 1 - _, output, _ = CommonTools.get_status_output_error(cmd_tuple[i]) - if len(output.strip()) == str_len[i]: - rand_pwd += output.strip() - break - if count > 100: - raise Exception(Errors.EXECUTE_RESULT[ - 'gauss_0413'] % (cmd_tuple[i], 'generate rand pwd')) - else: - rand_pwd += out_tuple[i].strip() - return rand_pwd - - @staticmethod - def create_root_certificate(ca_password, ca_crt_path, ca_key_path, config_path): - """ - function : create root ca file - input : rand pass, dir path of certificates, config path - output : NA - """ - if not os.path.isfile(config_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0102'] % config_path) - CommonTools.mkdir_with_mode(os.path.dirname(ca_crt_path), Constant.AUTH_COMMON_DIR_STR) - CommonTools.mkdir_with_mode(os.path.dirname(ca_key_path), Constant.AUTH_COMMON_DIR_STR) - ca_req_path = os.path.realpath(os.path.join( - os.path.dirname(ca_crt_path), Constant.CA_ROOT_REQ)) - # create ca key file - cmd = "%s echo '%s' |openssl genrsa -aes256 -passout stdin -out %s 2048" % ( - Constant.CMD_PREFIX, ca_password, ca_key_path) - cmd += " && %s" % Constant.SHELL_CMD_DICT['changeMode'] % ( - Constant.AUTH_COMMON_FILE_STR, ca_key_path) - status, output = CommonTools.get_status_output_error(cmd, mixed=True) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0414'] % 'ca root key file') - - # 2 create ca req file - cmd = "%s echo '%s' | openssl req -new -out %s -key %s -config %s -passin stdin" % ( - Constant.CMD_PREFIX, ca_password, ca_req_path, ca_key_path, config_path) - cmd += " && %s" % Constant.SHELL_CMD_DICT['changeMode'] % ( - Constant.AUTH_COMMON_FILE_STR, ca_req_path) - status, output = CommonTools.get_status_output_error(cmd, mixed=True) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0414'] % 'ca root req file') - - # 3 create ca crt file - cmd = "%s echo '%s' | openssl x509 -req -in %s " \ - "-signkey %s -days %s -out %s -passin stdin" % ( - Constant.CMD_PREFIX, ca_password, ca_req_path, - ca_key_path, Constant.CA_ROOT_VALID_DATE, ca_crt_path) - cmd += " && %s" % Constant.SHELL_CMD_DICT['changeMode'] % ( - Constant.AUTH_COMMON_FILE_STR, ca_crt_path) - status, output = CommonTools.get_status_output_error(cmd, mixed=True) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0414'] % 'ca root crt file') - - CommonTools.remove_files([ca_req_path]) - g.logger.info('Successfully generate ca root certificate, file path[%s].' % ca_crt_path) - - @staticmethod - def create_ca_certificate_with_script(ca_password, ssl_password, ca_crt_path, ca_key_path, - config_path, out_crt_path, out_key_path, - ip, crt_type='server'): - """ - function : create server ca file or client ca file with shell script. - input : rand pass, dir path of certificates, config path - """ - if not os.path.isfile(config_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0102'] % 'config file:%s' % config_path) - if not os.path.isfile(ca_crt_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0102'] % 'ca crt file:%s' % ca_crt_path) - if not os.path.isfile(ca_key_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0102'] % 'ca key file:%s' % ca_key_path) - CommonTools.mkdir_with_mode(os.path.dirname(out_key_path), Constant.AUTH_COMMON_DIR_STR) - CommonTools.mkdir_with_mode(os.path.dirname(out_crt_path), Constant.AUTH_COMMON_DIR_STR) - ca_req_path = os.path.realpath(os.path.join(os.path.dirname(ca_crt_path), Constant.CA_REQ)) - pwd = "%s %s" % (ca_password, ssl_password) - script_path = os.path.join(os.path.dirname( - os.path.realpath(__file__)), 'bin/gen_certificate.sh') - cmd = "unset LD_LIBRARY_PATH && echo '%s' | sh %s %s %s %s %s %s %s %s" % ( - pwd, script_path, ca_crt_path, ca_key_path, - out_crt_path, out_key_path, ca_req_path, ip, crt_type) - - status, output = CommonTools.get_status_output_error(cmd, mixed=True) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0414'] % 'ca crt file' + output) - g.logger.info("Successfully generate %s ssl cert for node[%s]." % (crt_type, ip)) - - @staticmethod - def create_ca_certificate(ca_password, ssl_password, ca_crt_path, ca_key_path, - config_path, out_crt_path, out_key_path, ip, crt_type='server'): - """ - function : create server ca file or client ca file. - input : rand pass, dir path of certificates, config path - output : NA - """ - if not os.path.isfile(config_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0102'] % 'config file:%s' % config_path) - if not os.path.isfile(ca_crt_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0102'] % 'ca crt file:%s' % ca_crt_path) - if not os.path.isfile(ca_key_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0102'] % 'ca key file:%s' % ca_key_path) - CommonTools.mkdir_with_mode(os.path.dirname(out_key_path), Constant.AUTH_COMMON_DIR_STR) - CommonTools.mkdir_with_mode(os.path.dirname(out_crt_path), Constant.AUTH_COMMON_DIR_STR) - # create ca key file - cmd = "%s echo '%s' | openssl genrsa -aes256 -passout stdin -out %s 2048" % ( - Constant.CMD_PREFIX, ssl_password, out_key_path) - cmd += " && %s" % Constant.SHELL_CMD_DICT['changeMode'] % ( - Constant.AUTH_COMMON_FILE_STR, out_key_path) - - status, output = CommonTools.get_status_output_error(cmd, mixed=True) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0414'] % 'ca key file' + output) - - ca_req_path = os.path.realpath(os.path.join(os.path.dirname(ca_crt_path), Constant.CA_REQ)) - - # create ca req file - openssl_conf_env = "export OPENSSL_CONF=%s" % config_path - cmd = '%s %s && echo "%s" | openssl req -new -out %s -key %s ' \ - '-passin stdin -subj "/C=CN/ST=Some-State/O=%s/CN=%s"' % ( - Constant.CMD_PREFIX, openssl_conf_env, ssl_password, - ca_req_path, out_key_path, crt_type, ip) - cmd += " && %s" % Constant.SHELL_CMD_DICT['changeMode'] % ( - Constant.AUTH_COMMON_FILE_STR, ca_req_path) - - status, output = CommonTools.get_status_output_error(cmd, mixed=True) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0414'] % 'ca req file' + output) - - # create server or client ca crt file - cmd = '%s echo "%s" | openssl x509 -req -in %s -out %s -passin stdin ' \ - '-sha256 -CAcreateserial -days %s -CA %s -CAkey %s' % ( - Constant.CMD_PREFIX, ca_password, ca_req_path, out_crt_path, - Constant.CA_VALID_DATE, ca_crt_path, ca_key_path) - cmd += " && %s" % Constant.SHELL_CMD_DICT['changeMode'] % ( - Constant.AUTH_COMMON_FILE_STR, out_crt_path) - - status, output = CommonTools.get_status_output_error(cmd, mixed=True) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0414'] % 'ca crt file') - CommonTools.remove_files([ca_req_path]) - g.logger.info("Successfully generate %s ssl cert for node[%s]." % (crt_type, ip)) - - @staticmethod - def deploy_ca_certs(file_path_list, remote_ip, user, password, dest_dir, no_delete=None): - """ - Copy files to remote node and remove local files - """ - CommonTools.remote_mkdir_with_mode( - dest_dir, Constant.AUTH_COMMON_DIR_STR, remote_ip, user, password) - - for file_path in file_path_list: - dest_file_path = os.path.join(dest_dir, os.path.basename(file_path)) - status, output = CommonTools.remote_copy_files( - remote_ip, user, password, file_path, dest_file_path) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0406'] % (remote_ip, output)) - g.logger.debug('Successfully copy [%s] to remote node[%s]' % (file_path, remote_ip)) - if no_delete: - file_path_list = [file for file in file_path_list if file not in no_delete] - CommonTools.remove_files(file_path_list) - - diff --git a/src/gausskernel/dbmind/tools/ai_manager/tools/common_tools.py b/src/gausskernel/dbmind/tools/ai_manager/tools/common_tools.py deleted file mode 100644 index daef6c8a0..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/tools/common_tools.py +++ /dev/null @@ -1,543 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : common_tools.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : Common tools -############################################################################# - -import json -import re -import threading -import sys -import inspect -import pydoc -import os -import subprocess -import time - - -from subprocess import PIPE -from configparser import ConfigParser -from multiprocessing.dummy import Pool as ThreadPool - -sys.path.append(sys.path[0] + "/../") -from definitions.errors import Errors -from definitions.constants import Constant -from tools.fast_popen import FastPopen -from tools.params_checkers import ConfigChecker - - -class CommonTools: - """ - Common tools for ai manager. - """ - @staticmethod - def get_version_info_from_file(file_path): - """ - get version from version conf file in package - """ - with open(file_path, 'r') as file: - content = file.readlines() - if len(content) < 3: - raise Exception(Errors.CONTENT_OR_VALUE['gauss_0501'] % content) - if re.match(r'^\d*\.\d*$', content[1]): - version_str = ''.join(content[1].split('.')) - return version_str.strip() - else: - raise Exception(Errors.CONTENT_OR_VALUE['gauss_0501'] % content[1]) - - @staticmethod - def extract_file_to_dir(pack_path, target_path): - """ - Unpack file from pack_path to target_path. - """ - cmd = Constant.SHELL_CMD_DICT['decompressFileToDir'] % (pack_path, target_path) - status, output = subprocess.getstatusoutput(cmd) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0401'] % (cmd, 'extract file', output)) - else: - return status, output - - @staticmethod - def copy_file_to_dest_path(path_from, path_to): - """ - Copy file to dest path. - """ - cmd = Constant.SHELL_CMD_DICT['copyFile'] % (path_from, os.path.dirname(path_to)) - status, output = subprocess.getstatusoutput(cmd) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0401'] % (cmd, 'copy files', output)) - - @staticmethod - def check_dir_access(path, mode='rw'): - """ - Check path authority - full: read & write & execute - exist: exist - rw: read & write - r: read - """ - if mode == 'full': - authority = Constant.AUTHORITY_FULL - elif mode == 'exist': - authority = Constant.AUTHORITY_EXIST - elif mode == 'rw': - authority = Constant.AUTHORITY_RW - elif mode == 'r': - authority = Constant.AUTHORITY_R - else: - raise Exception(Errors.PARAMETER['gauss_0201'] % mode) - results = [] - for check_mode in authority.values(): - results.append(os.access(path, check_mode)) - if not all(results): - raise Exception(Errors.PERMISSION['gauss_0701'] % path) - - @staticmethod - def mkdir_with_mode(path, mode): - """ - Create directory with defined mode if not exist. - """ - cmd = Constant.SHELL_CMD_DICT['createDir'] % (path, path, mode) - status, output = subprocess.getstatusoutput(cmd) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0401'] % (cmd, 'mkdir', output)) - else: - return status, output - - @staticmethod - def remote_mkdir_with_mode(path, mode, ip, username, password): - """ - Create directory with defined mode if not exist. - """ - cmd = Constant.SHELL_CMD_DICT['createDirSimple'] % (path, mode) - status, output = CommonTools.remote_execute_cmd(ip, username, password, cmd) - if status != 0 and 'exist' not in output: - raise Exception(Errors.EXECUTE_RESULT['gauss_0401'] % (cmd, 'remote mkdir', output)) - else: - return status, output - - @staticmethod - def clean_dir(path): - """ - Remove files in path dir. - """ - cmd = Constant.SHELL_CMD_DICT['cleanDir'] % (path, path, path) - status, output = subprocess.getstatusoutput(cmd) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0401'] % (cmd, 'clean dir', output)) - else: - return status, output - - @staticmethod - def retry_remote_clean_dir(path, ip, username, password, retry_times=5): - """ - Retry to clean dir on remote node if clean failed - """ - remain_count = retry_times - while remain_count > 0: - try: - status, output = CommonTools.remote_clean_dir(path, ip, username, password) - return status, output - except Exception as error: - if remain_count <= 1: - raise Exception(str(error)) - time.sleep(1) - remain_count -= 1 - - @staticmethod - def remote_clean_dir(path, ip, username, password): - """ - Remove remote nodes files in path dir - """ - cmd = Constant.SHELL_CMD_DICT['simpleCleanDir'] % path - status, output = CommonTools.remote_execute_cmd(ip, username, password, cmd) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0401'] % (cmd, 'remote clean dir', output)) - cmd_check = Constant.SHELL_CMD_DICT['showDirDocs'] % path - status, output = CommonTools.remote_execute_cmd(ip, username, password, cmd_check) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0401'] % (cmd, 'check dir', output)) - num = int(output.strip().split()[-1]) - if num: - raise Exception(Errors.EXECUTE_RESULT['gauss_0419'] % (path, ip, num)) - return status, output - - @staticmethod - def remove_files(file_path_list): - """ - Remove files in file path list. - """ - cmd_list = [Constant.SHELL_CMD_DICT['deleteFile'] % ( - file_path, file_path) for file_path in file_path_list] - cmd = ' && '.join(cmd_list) - status, output = subprocess.getstatusoutput(cmd) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0401'] % (cmd, 'remove file', output)) - else: - return status, output - - @staticmethod - def json_file_to_dict(file_path): - """ - Read json file from file_path. - return: data in dict. - """ - if not os.path.isfile(file_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0102'] % file_path) - with open(file_path, 'r') as file: - content = file.read() - try: - dict_data = json.loads(content) - return dict_data - except Exception as error: - raise Exception(Errors.EXECUTE_RESULT['gauss_0402'] % error) - - @staticmethod - def dict_to_json_file(dict_data, file_path): - try: - if os.path.isfile(file_path): - os.remove(file_path) - with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT, 0o600), "w") as fp_json: - json.dump(dict_data, fp_json) - except Exception as error: - raise ValueError(Errors.EXECUTE_RESULT['gauss_0408'] % error) - - @staticmethod - def add_cron(install_path, cmd, frequency): - """ - Add cron - install_path: install path for getting cron script - cmd: execute cmd by cron - frequency: execute frequency[1m] - """ - cron_path = os.path.realpath(os.path.join(install_path, Constant.CRON_PATH)) - cron_cmd = Constant.SHELL_CMD_DICT['addCronCMD'] % ( - Constant.CMD_PREFIX, cron_path, frequency, cmd) - status, output = subprocess.getstatusoutput(cron_cmd) - return status, output - - @staticmethod - def del_cron(install_path, cmd, frequency): - """ - Delete cron - install_path: install path for getting cron script - cmd: execute cmd by cron - frequency: execute frequency[1m] - """ - cron_path = os.path.realpath(os.path.join(install_path, Constant.CRON_PATH)) - cron_cmd = Constant.SHELL_CMD_DICT['delCronCMD'] % ( - Constant.CMD_PREFIX, cron_path, frequency, cmd) - status, output = subprocess.getstatusoutput(cron_cmd) - return status, output - - @staticmethod - def delete_early_record(file_path, max_lines_num): - """ - Resize file, if content lines reach max_lines_num, delete early record. - """ - if not os.path.exists(file_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0102'] % file_path) - if isinstance(max_lines_num, str): - if not max_lines_num.isdigit(): - raise Exception( - Errors.CONTENT_OR_VALUE['gauss_0503'] % (max_lines_num, 'max record line')) - with open(file_path, 'r') as file: - lines = file.readlines() - if len(lines) > int(max_lines_num): - new_lines = lines[-max_lines_num:] - with open(file_path, 'w') as write: - write.writelines(new_lines) - - @staticmethod - def add_content_to_file(file_path, content): - """ - add content to file, if not exist, create it. - """ - with os.fdopen(os.open( - file_path, os.O_WRONLY | os.O_CREAT, Constant.AUTH_COMMON_FILE), "a") as file: - file.write(content) - - @staticmethod - def read_last_line_from_file(file_path): - """ - Read last line of file - """ - with open(file_path, 'r') as read_file: - lines = read_file.readlines() - lines_no_empty = [line for line in lines if line] - if lines_no_empty: - return lines_no_empty[-1] - else: - return None - - @staticmethod - def grep_process_and_kill(target): - """ - Grep process key words and kill the process. - """ - cmd = Constant.SHELL_CMD_DICT['killProcess'] % target - status, out_put = subprocess.getstatusoutput(cmd) - return status, out_put - - @staticmethod - def check_is_root(): - if os.getuid() == 0: - return True - else: - return False - - @staticmethod - def check_process(process): - """ - Check process number. - """ - cmd = Constant.SHELL_CMD_DICT['checkProcess'] % process - status, output = subprocess.getstatusoutput(cmd) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0401'] % (cmd, 'check process', output)) - try: - output = int(output) - except ValueError: - raise Exception(Errors.CONTENT_OR_VALUE['gauss_0503'] % (output, 'check process num')) - return output - - @staticmethod - def check_path_valid(path): - """ - function: check path valid - input : path - output: NA - """ - if path.strip() == "": - return - for rac in Constant.PATH_CHECK_LIST: - flag = path.find(rac) - if flag >= 0: - raise Exception(Errors.ILLEGAL['gauss_0601'] % (rac, path)) - - @staticmethod - def get_funcs(module): - """ - Acquire functions in python file. - :param module: python module. - :return: dict - in python module. - """ - funcs = {} - _object, _ = pydoc.resolve(module) - _all = getattr(_object, '__all__', None) - for key, value in inspect.getmembers(_object, inspect.isroutine): - if _all is not None or inspect.isbuiltin(value) or inspect.getmodule(value) is _object: - if pydoc.visiblename(key, _all, _object): - funcs[key] = value - return funcs - - @staticmethod - def parallel_execute(func, para_list, parallel_jobs=10, is_map=False): - """ - function: Execution of python functions through multiple processes - input: func, list - output: list - """ - if parallel_jobs > len(para_list): - parallel_jobs = len(para_list) - trace_id = threading.currentThread().getName() - pool = ThreadPool(parallel_jobs, initializer=lambda: threading.currentThread().setName( - threading.currentThread().getName().replace('Thread', trace_id))) - results = pool.map(func, para_list) - pool.close() - pool.join() - if is_map: - result_map = {} - for result in results: - result_map[result[0]] = result[1] - return result_map - return results - - @staticmethod - def remote_copy_files(remote_ip, user, password, path_from, path_to): - """ - Copy files to remote node. - """ - if os.path.isdir(path_from): - path_from = os.path.join(path_from, '*') - script_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), Constant.REMOTE_COMMANDER) - remote_copy_cmd = Constant.SHELL_CMD_DICT['remoteDeploy'] % ( - password, script_path, path_from, user, remote_ip, path_to) - status, output = CommonTools.get_status_output_error(remote_copy_cmd, mixed=True) - return status, output - - @staticmethod - def remote_execute_cmd(remote_ip, user, password, cmd): - """ - Execute command on remote node. - """ - script_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), Constant.REMOTE_COMMANDER) - remote_execute_cmd = Constant.SHELL_CMD_DICT['remoteExecute'] % ( - password, script_path, user, remote_ip, cmd) - status, output = CommonTools.get_status_output_error(remote_execute_cmd, mixed=True) - return status, output - - @staticmethod - def get_local_ips(): - """ - Get all ips in list - """ - import socket - hostname = socket.gethostname() - addrs = socket.getaddrinfo(hostname, None) - ips = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", str(addrs)) - ips = [ip.strip() for ip in set(ips) if ip] - if ips: - return ips - else: - raise Exception(Errors.EXECUTE_RESULT['gauss_0415'] % 'get local ips.') - - @staticmethod - def modify_agent_config_file(config_path, content, allow_add=True): - """ - Modify agent config file, parse collection item different in each node. - allow_add: allow add new section and option in config file - """ - parser = ConfigParser() - parser.read(config_path) - ips = CommonTools.get_local_ips() - for section_name, section_values in content.items(): - for option_name, option_value in section_values.items(): - is_valid = parser.get(section_name, option_name, fallback=None) - if is_valid is None: - if not allow_add: - raise Exception(Errors.PARAMETER['gauss_0202'] % option_name) - if option_name == "collection_item": - for item in option_value: - json_item = json.dumps(item) - if item[1].strip() in ips: - parser.set(section_name, option_name, json_item) - else: - parser.set(section_name, option_name, str(option_value)) - with open(config_path, 'w') as file: - parser.write(file) - - @staticmethod - def modify_config_file(config_path, content, allow_add=True): - """ - Modify config file - allow_add: allowed add new section or option - content: dict {section1:[opt1:value1,opt2:value2], section2:[...]} - """ - parser = ConfigParser() - parser.read(config_path) - for section_name, section_values in content.items(): - for option_name, option_value in section_values.items(): - is_valid = parser.get(section_name, option_name, fallback=None) - if is_valid is None: - if not allow_add: - raise Exception(Errors.PARAMETER['gauss_0202'] % option_name) - parser.set(section_name, option_name, str(option_value)) - with open(config_path, 'w') as file: - parser.write(file) - - @staticmethod - def get_status_output_error(cmd, mixed=False): - """ - Execute command and return in defined results. - """ - proc = FastPopen(cmd, stdout=PIPE, stderr=PIPE, preexec_fn=os.setsid, close_fds=True) - stdout, stderr = proc.communicate() - if mixed: - return proc.returncode, stdout.decode() + stderr.decode() - else: - return proc.returncode, stdout.decode(), stderr.decode() - - @staticmethod - def get_current_usr(): - """ - Get current user by echo $USER - """ - cmd = Constant.SHELL_CMD_DICT['getUser'] - status, output = subprocess.getstatusoutput(cmd) - if status != 0 or not output: - raise Exception(Errors.EXECUTE_RESULT['gauss_0401'] % (cmd, 'get user', output)) - else: - return output.strip() - - @staticmethod - def create_file_if_not_exist(path, mode=Constant.AUTH_COMMON_FILE): - """ - Create file if not exist. - """ - if not os.path.exists(path): - os.mknod(path, mode=mode) - return True - else: - return False - - @staticmethod - def read_info_from_config_file(file_path, section, option, under_path=None): - """ - Get config info from file_path - """ - parser = ConfigParser() - parser.read(file_path) - info = parser.get(section, option, fallback=None) - if info is None: - raise Exception(Errors.PARAMETER['gauss_0202'] % option) - else: - ConfigChecker.check(section, option, info) - if under_path: - if not os.path.isabs(under_path): - raise Exception(Errors.PARAMETER['gauss_0104'] % under_path) - base_path = re.sub(r'\./', '', info) - return os.path.join(under_path, base_path) - return info - - @staticmethod - def get_local_ip(ignore=False): - cmd = Constant.SHELL_CMD_DICT['ifconfig'] - status, output = subprocess.getstatusoutput(cmd) - if status != 0 or not output: - raise Exception(Errors.EXECUTE_RESULT['gauss_0413'] % (cmd, 'get local ip')) - ret = re.search(r'eth0: .*?inet (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})', output, - flags=re.DOTALL) - if ret: - return ret.group(1) - else: - if ignore: - return '127.0.0.1' - else: - raise Exception(Errors.EXECUTE_RESULT['gauss_0415'] % 'local ip') - - @staticmethod - def encrypt_with_path(password, path, encrypt_path, lib_path): - if not os.path.isfile(encrypt_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0102'] % encrypt_path) - if not os.path.isdir(lib_path): - raise Exception(Errors.FILE_DIR_PATH['gauss_0101'] % lib_path) - CommonTools.chmod_files_with_execute_permission([encrypt_path]) - cmd = 'export LD_LIBRARY_PATH=%s && ' % lib_path - path = path + '/' if not path.endswith('/') else path - cmd += '%s %s %s %s' % (Constant.CMD_PREFIX, encrypt_path, password, path) - cmd += ' && chmod -R %s %s' % (Constant.AUTH_COMMON_ENCRYPT_FILES, path) - status, output = CommonTools.get_status_output_error(cmd, mixed=True) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0417'] + output) - return status, output - - @staticmethod - def chmod_files_with_execute_permission(file_list): - for file in file_list: - cmd = Constant.SHELL_CMD_DICT["chmodWithExecute"] % os.path.abspath(file) - status, output = CommonTools.get_status_output_error(cmd, mixed=True) - if status != 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0401'] % ( - cmd, 'change file authority', output)) - - - - diff --git a/src/gausskernel/dbmind/tools/ai_manager/tools/encrypt_decrypt_handler.py b/src/gausskernel/dbmind/tools/ai_manager/tools/encrypt_decrypt_handler.py deleted file mode 100644 index 36b9ddc3a..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/tools/encrypt_decrypt_handler.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : aes_cbs_util.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : aes_cbs_util -############################################################################# -import sys -import hashlib - -try: - from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes - from cryptography.hazmat.backends import default_backend -except Exception as err: - sys.stdout.write(str(err)) - - -class AesCbcUtil(object): - @staticmethod - def aes_cbc_decrypt_with_path(path): - with open(path + '/server.key.cipher', 'rb') as f: - cipher_txt = f.read() - with open(path + '/server.key.rand', 'rb') as f: - rand_txt = f.read() - if cipher_txt is None or cipher_txt == "": - return None - server_vector_cipher_vector = cipher_txt[16 + 1:16 + 1 + 16] - # pre shared key rand - server_key_rand = rand_txt[:16] - # worker key - server_decrypt_key = hashlib.pbkdf2_hmac('sha256', server_key_rand, - server_vector_cipher_vector, 10000, - 16) - enc = AesCbcUtil.aes_cbc_decrypt(cipher_txt, server_decrypt_key) - return enc - - @staticmethod - def aes_cbc_decrypt(content, key): - AesCbcUtil.check_content_key(content, key) - if type(key) == str: - key = bytes(key) - iv_len = 16 - # pre shared key iv - iv = content[16 + 1 + 16 + 1:16 + 1 + 16 + 1 + 16] - # pre shared key enctryt - enc_content = content[:iv_len] - backend = default_backend() - cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend) - decrypter = cipher.decryptor() - dec_content = decrypter.update(enc_content) + decrypter.finalize() - dec_content = dec_content.rstrip(b'\x00')[:-1].decode() - return dec_content - - @staticmethod - def check_content_key(content, key): - if not (type(content) == bytes): - raise Exception("content's type must be bytes.") - elif not (type(key) in (bytes, str)): - raise Exception("bytes's type must be in (bytes, str).") - iv_len = 16 - if not (len(content) >= (iv_len + 16)): - raise Exception("content's len must >= (iv_len + 16).") - - @staticmethod - def aes_cbc_decrypt_with_multi(root_path): - """ - decrypt message with multi depth - """ - num = 0 - decrypt_str = "" - while True: - path = root_path + "/key_" + str(num) - part = AesCbcUtil.aes_cbc_decrypt_with_path(path) - if part is None or part == "": - break - elif len(part) < 15: - decrypt_str = decrypt_str + AesCbcUtil.aes_cbc_decrypt_with_path(path) - break - else: - decrypt_str = decrypt_str + AesCbcUtil.aes_cbc_decrypt_with_path(path) - - num = num + 1 - if decrypt_str == "": - return None - return decrypt_str - - - - diff --git a/src/gausskernel/dbmind/tools/ai_manager/tools/env_handler.py b/src/gausskernel/dbmind/tools/ai_manager/tools/env_handler.py deleted file mode 100644 index 71a4c8be7..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/tools/env_handler.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : env_handler.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : env file handler -############################################################################# - -import sys - -sys.path.append(sys.path[0] + "/../") -from tools.common_tools import CommonTools -from tools.global_box import g - -PYTHON_PATH = '' - -# Dict of env need defined: -ENV_MENU = { - "export PYTHONPATH": "export PYTHONPATH=%s" % PYTHON_PATH -} - - -class EnvHandler(object): - def __init__(self, env_file_path): - self.env_file_path = env_file_path - self.env_mapping = ENV_MENU - - def create_env_file_if_not_exist(self): - """ - Create env file - """ - ret = CommonTools.create_file_if_not_exist(self.env_file_path) - if ret: - g.logger.info('Successfully create env file.') - else: - g.logger.info('Env file already exist.') - - def modify_env_file(self): - """ - Modify env file - """ - if not self.env_mapping.keys(): - g.logger.info('No need write env file.') - return - with open(self.env_file_path, 'r') as file: - content_list = file.readlines() - for key in ENV_MENU.keys(): - content_list = [item.strip() for item in content_list if key not in item] - content_list += ENV_MENU.values() - with open(self.env_file_path, 'w') as file2: - file2.write('\n'.join(content_list)) - g.logger.info('Successfully modify env file.') - - def run(self): - self.create_env_file_if_not_exist() - self.modify_env_file() - diff --git a/src/gausskernel/dbmind/tools/ai_manager/tools/fast_popen.py b/src/gausskernel/dbmind/tools/ai_manager/tools/fast_popen.py deleted file mode 100644 index c7ff821f3..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/tools/fast_popen.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : fast_popen.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : hiden -############################################################################# - -import subprocess -import os - -SELF_FD_DIR = "/proc/self/fd" -MAXFD = os.sysconf("SC_OPEN_MAX") - - -class FastPopen(subprocess.Popen): - """ - optimization subprocess.Popen when close_fds=True, - only close the currently opend file, - reduce the execution time when ulimit is too large - """ - - def __init__(self, cmd, bufsize=0, - stdout=None, stderr=None, - preexec_fn=None, close_fds=False, - cwd=None, env=None, universal_newlines=False, - startupinfo=None, creationflags=0, logger=None): - - subprocess.Popen.logger = None - subprocess.Popen.__init__(self, ["sh", "-"], bufsize=bufsize, executable=None, - stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, - preexec_fn=preexec_fn, close_fds=close_fds, shell=None, - cwd=cwd, env=env, universal_newlines=universal_newlines, - startupinfo=startupinfo, creationflags=creationflags) - self.logger = logger - self.cmd = cmd - - def communicate(self, input_cmd=None, timeout=None): - if input_cmd: - self.cmd = input_cmd - - if not isinstance(self.cmd, str): - self.cmd = subprocess.list2cmdline(self.cmd) - self.cmd = self.cmd.encode() - - std_out, std_err = subprocess.Popen.communicate(self, self.cmd) - return std_out, std_err - - def _close_fds(self, but): - if not os.path.exists(SELF_FD_DIR): - if hasattr(os, 'closerange'): - os.closerange(3, but) - os.closerange(but + 1, MAXFD) - else: - for i in range(3, MAXFD): - if i == but: - continue - try: - os.close(i) - except BaseException as bex: - if self.logger: - self.logger.info("WARNING:%s" % str(bex)) - return - - fd_list = os.listdir(SELF_FD_DIR) - for fd_h in fd_list: - if int(fd_h) < 3 or int(fd_h) == but: - continue - try: - os.close(int(fd_h)) - except BaseException as bex: - if self.logger: - self.logger.info("WARNING:%s" % str(bex)) \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/ai_manager/tools/global_box.py b/src/gausskernel/dbmind/tools/ai_manager/tools/global_box.py deleted file mode 100644 index 139439208..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/tools/global_box.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : global_box.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : Globals -############################################################################# - -import sys -sys.path.append(sys.path[0] + "/../") -from tools.log import MainLog -from definitions.constants import Constant - - -class GlobalBox(object): - def __init__(self): - self.logger = None - - self.init_globals() - - def init_globals(self): - self.__get_logger() - - def __get_logger(self): - self.logger = MainLog(Constant.DEFAULT_LOG_NAME).get_logger() - - -g = GlobalBox() - - diff --git a/src/gausskernel/dbmind/tools/ai_manager/tools/log.py b/src/gausskernel/dbmind/tools/ai_manager/tools/log.py deleted file mode 100644 index 99ac9be83..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/tools/log.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : log.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : log -############################################################################# - -import logging -import os -import re -import sys -from logging import StreamHandler -from logging.handlers import RotatingFileHandler -sys.path.append(sys.path[0] + "/../") -from config_cabin.config import LOG_PATH -from config_cabin.config import LOG_LEVEL -from config_cabin.config import LOG_MAX_BYTES -from config_cabin.config import LOG_BACK_UP -from definitions.constants import Constant -from definitions.errors import Errors -from tools.common_tools import CommonTools - - -class MaskHandler(StreamHandler): - def __init__(self, *arg, **kwargs): - super().__init__(*arg, **kwargs) - - @staticmethod - def __mask_dict_data(msg): - """ - mask sensitive data in dict style - """ - regs_mapping = { - r'password[^:]*:([^,}]+)': 'password*: ******' - } - for reg, rep in regs_mapping.items(): - msg = re.sub(reg, rep, str(msg)) - return msg - - @staticmethod - def __mask_sensitive_para(msg): - """ mask gs tools Sensitive param """ - mask_items = { - "gsql": ["--with-key", "-k"], - "gs_encrypt": ["--key-base64", "--key", "-B", "-k"], - "gs_guc encrypt": ["-K"], - "gs_guc generate": ["-S"], - "gs_dump": ["--rolepassword", "--with-key"], - "gs_dumpall": ["--rolepassword", "--with-key"], - "gs_restore": ["--with-key", "--rolepassword"], - "gs_ctl": ["-P"], - "gs_redis": ["-A"], - "gs_initdb": ["--pwprompt", "--pwpasswd"], - "gs_roach": ["--obs-sk"], - "InitInstance": ["--pwpasswd"] - } - for t_key, t_value in mask_items.items(): - if t_key in msg: - pattern = re.compile("|".join([r"(?<=%s)[ =]+[^ ]*[ ]*" % i for i in t_value])) - msg = pattern.sub(lambda m: " *** ", msg) - return msg - - def mask_pwd(self, msg): - """mask pwd in msg""" - replace_reg = re.compile(r'-W[ ]*[^ ]*[ ]*') - msg = replace_reg.sub('-W *** ', str(msg)) - replace_reg = re.compile(r'-w[ ]*[^ ]*[ ]*') - msg = replace_reg.sub('-w *** ', str(msg)) - replace_reg = re.compile(r'--password[ ]*[^ ]*[ ]*') - msg = replace_reg.sub('--password *** ', str(msg)) - replace_reg = re.compile(r'--pwd[ ]*[^ ]*[ ]*') - msg = replace_reg.sub('--pwd *** ', str(msg)) - replace_reg = re.compile(r'--root-passwd[ ]*[^ ]*[ ]*') - msg = replace_reg.sub('--root-passwd *** ', str(msg)) - if msg.find("gs_guc") >= 0: - msgd = msg.split() - for idx in range(len(msgd)): - if "gs_guc" in msgd[idx] and len(msgd) > (idx + 5) and \ - msgd[idx + 1] == "encrypt" and msgd[idx + 3] in ( - "server", "client", "source"): - regula = re.compile(r"-K[ ]*[^ ]*[ ]*") - msg = regula.sub("-K *** ", str(msg)) - - msg = self.__mask_sensitive_para(msg) - msg = self.__mask_dict_data(msg) - replace_reg = re.compile(r'echo[ ]*[^ ]*[ ]*') - msg = replace_reg.sub('echo *** ', str(msg)) - return msg - - def emit(self, record): - """ - Emit a record. - - If a formatter is specified, it is used to format the record. - The record is then written to the stream with a trailing newline. If - exception information is present, it is formatted using - traceback.print_exception and appended to the stream. If the stream - has an 'encoding' attribute, it is used to determine how to do the - output to the stream. - """ - try: - record.msg = self.mask_pwd(record.msg) - msg = self.format(record) - stream = self.stream - # issue 35046: merged two stream.writes into one. - stream.write(msg + self.terminator) - self.flush() - except Exception: - self.handleError(record) - - -class MainLog(object): - def __init__(self, log_name): - self.log_file = None - self.expect_level = None - self.logger = None - self.log_name = log_name - self.log_formatter = None - self.__init_globals() - - def __init_globals(self): - self.log_file = LOG_PATH - self.expect_level = LOG_LEVEL - self.log_formatter = Constant.LOG_FORMATTER - self.__create_logfile() - - def get_logger(self): - log_level = self.__get_log_level(self.expect_level) - logger = logging.getLogger(self.log_name) - logger.setLevel(log_level) - if not logger.handlers: - # stream handler - handler_mask_stream = MaskHandler() - stream_formatter = logging.Formatter(self.log_formatter) - handler_mask_stream.setFormatter(stream_formatter) - logger.addHandler(handler_mask_stream) - - # file handler - file_rotating_handler = RotatingFileHandler( - self.log_file, mode="a", maxBytes=LOG_MAX_BYTES, backupCount=LOG_BACK_UP) - file_formatter = logging.Formatter(self.log_formatter) - file_rotating_handler.setFormatter(file_formatter) - logger.addHandler(file_rotating_handler) - - return logger - - def __get_log_level(self, log_level): - if self.expect_level not in Constant.VALID_LOG_LEVEL: - raise ValueError(Errors.ILLEGAL['gauss_0602'] % 'Log level') - return log_level * 10 - - def __create_logfile(self): - """ - function: create log file - input : N/A - output: N/A - """ - try: - if not os.path.isdir(os.path.dirname(self.log_file)): - CommonTools.mkdir_with_mode( - os.path.dirname(self.log_file), Constant.AUTH_COMMON_DIR_STR) - CommonTools.create_file_if_not_exist(self.log_file) - except Exception as error: - raise Exception(Errors.EXECUTE_RESULT['gauss_0411'] % error) diff --git a/src/gausskernel/dbmind/tools/ai_manager/tools/params_checkers.py b/src/gausskernel/dbmind/tools/ai_manager/tools/params_checkers.py deleted file mode 100644 index d8f91c97f..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/tools/params_checkers.py +++ /dev/null @@ -1,503 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : params_checkers.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : Check params -############################################################################# - -import re -import sys - - -sys.path.append(sys.path[0] + "/../") -from definitions.errors import Errors -from definitions.constants import Constant - - -# ===================================ANOMALY DETECTION PARAMS======================================= -# param name: check param func name -PARAMS_CHECK_MAPPING = { - 'module': 'check_module', - 'package_path': 'check_path', - 'action': 'check_action', - 'config_info': 'check_config_info', - 'scene': 'check_scene', - 'agent_nodes': 'check_agent_nodes', - 'install_path': 'check_install_path', - 'version': 'check_version', - 'service_list': 'check_service_list', - 'stopping_list': 'check_stopping_list', - 'install_nodes': 'check_install_nodes', - 'ca_info': 'check_ca_info' -} - - -def check_string(obj): - if not isinstance(obj, str): - raise ValueError(Errors.ILLEGAL['gauss_0603'] % (str(obj), 'string')) - - -def check_valid_string(obj): - for rac in Constant.CMD_CHECK_LIST: - flag = obj.find(rac) - if flag >= 0: - raise Exception(Errors.ILLEGAL['gauss_0601'] % (rac, obj)) - - -def check_digit(obj): - if not str(obj).isdigit(): - raise ValueError(Errors.ILLEGAL['gauss_0603'] % (str(obj), 'digit')) - - -def check_list(obj): - if not isinstance(obj, list): - raise ValueError(Errors.ILLEGAL['gauss_0603'] % (str(obj), 'list')) - - -def check_dict(obj): - if not isinstance(obj, dict): - raise ValueError(Errors.ILLEGAL['gauss_0603'] % (str(obj), 'dict')) - - -def check_password(obj): - if not isinstance(obj, str): - raise ValueError(Errors.ILLEGAL['gauss_0602'] % 'password type') - res = re.search(r'^[A-Za-z0-9~!@#%^*\-_=+?,\.]+$', obj) - if not res: - raise ValueError(Errors.ILLEGAL['gauss_0602'] % 'password character') - - -def check_ip(obj): - """ - function : check if the ip address is valid - input : String - output : NA - """ - if not isinstance(obj, str): - raise ValueError(Errors.ILLEGAL['gauss_0603'] % (str(obj), 'ip string')) - if obj == '0.0.0.0': - valid = True - else: - valid = re.match("^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[" - "0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[" - "0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[" - "0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$", obj) - if not valid: - raise ValueError(Errors.ILLEGAL['gauss_0602'] % 'ip') - - -def check_port(obj): - """ - Judge if the port is valid - """ - if not str(obj).isdigit(): - raise Exception(Errors.ILLEGAL['gauss_0603'] % ('port', 'digit')) - port = int(obj) - if port <=1023 or port > 65535: - raise Exception(Errors.ILLEGAL['gauss_0602'] % 'port') - - -def check_scene(obj): - check_string(obj) - if obj not in Constant.VALID_SCENE: - raise Exception(Errors.PARAMETER['gauss_0206'] % obj) - - -def check_path(obj): - if not isinstance(obj, str): - raise ValueError(Errors.ILLEGAL['gauss_0602'] % 'path type') - for rac in Constant.PATH_CHECK_LIST: - flag = obj.find(rac) - if flag >= 0: - raise Exception(Errors.ILLEGAL['gauss_0601'] % (rac, obj)) - - -def check_module(obj): - if not isinstance(obj, str): - raise ValueError(Errors.ILLEGAL['gauss_0603'] % ('module name', 'string')) - if obj not in Constant.VALID_MODULE_NAME: - raise ValueError(Errors.ILLEGAL['gauss_0602'] % 'module name') - - -def check_action(obj): - if not isinstance(obj, str): - raise ValueError(Errors.ILLEGAL['gauss_0603'] % ('action name', 'string')) - if obj not in Constant.VALID_ACTION_NAME: - raise ValueError(Errors.ILLEGAL['gauss_0602'] % 'action name') - - -def check_tls(obj): - if not isinstance(obj, str): - raise ValueError(Errors.ILLEGAL['gauss_0603'] % ('tls type', 'string')) - if obj not in Constant.VALID_BOOL_TYPE: - raise ValueError(Errors.ILLEGAL['gauss_0602'] % 'tls type') - - -def check_pull_kafka(obj): - if not isinstance(obj, str): - raise ValueError(Errors.ILLEGAL['gauss_0603'] % ('pull kafka param', 'string')) - if obj not in Constant.VALID_BOOL_TYPE: - raise ValueError(Errors.ILLEGAL['gauss_0602'] % 'pull_kakfa param') - - -def check_database_name(obj): - if not isinstance(obj, str): - raise ValueError(Errors.ILLEGAL['gauss_0603'] % ('database name', 'string')) - if obj not in Constant.VALID_DATABASE_NAME: - raise ValueError(Errors.ILLEGAL['gauss_0602'] % 'database name') - - -def check_collection_type(obj): - if not isinstance(obj, str): - raise ValueError(Errors.ILLEGAL['gauss_0603'] % ('collection type', 'string')) - if obj not in Constant.VALID_COLLECTION_TYPE: - raise ValueError(Errors.ILLEGAL['gauss_0602'] % 'collection type') - - -def check_timer_interval(obj): - if not isinstance(obj, str): - raise ValueError(Errors.ILLEGAL['gauss_0603'] % ('timer_interval', 'string')) - if not re.match(r'^\d+[WDHMS]$', obj): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'timer_interval in config file') - - -def check_config_info(obj): - check_dict(obj) - for section_name, section_value in obj.items(): - if section_name not in Constant.VALID_CONFIG_SECTION_OPT_MAPPING: - raise Exception(Errors.PARAMETER['gauss_0204'] % section_name) - check_dict(section_value) - for opt_name, opt_value in obj[section_name].items(): - if opt_name not in Constant.VALID_CONFIG_SECTION_OPT_MAPPING[section_name]: - raise Exception(Errors.PARAMETER['gauss_0205'] % opt_name) - if opt_name != 'collection_item': - check_string(opt_value) - if opt_name in Constant.CHECK_IP: - check_ip(opt_value) - if opt_name in Constant.CHECK_PORT: - check_port(opt_value) - if opt_name == 'tls': - check_tls(opt_value) - else: - check_list(opt_value) - for item in opt_value: - check_list(item) - if len(item) != 3: - raise ValueError(Errors.PARAMETER['gauss_0208'] % ( - item, 'given 3 items')) - if str(item[0]).lower() not in Constant.VALID_COLLECT_DATA_TYPE: - raise Exception(Errors.PARAMETER['gauss_0208'] % (str(item[0]), 'cn or dn')) - check_ip(item[1]) - check_port(item[2]) - - -def check_agent_nodes(obj): - if not isinstance(obj, list): - raise Exception(Errors.ILLEGAL['gauss_0603'] % ('type of agent nodes', 'list')) - for node in obj: - if not isinstance(node, dict): - raise Exception(Errors.ILLEGAL['gauss_0603'] % ('type of agent node info', 'dict')) - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - check_ip(ip) - check_valid_string(uname) - check_password(pwd) - - -# REMOTE -def check_install_path(obj): - """ - Check install path when remote option - """ - if not isinstance(obj, str): - raise ValueError(Errors.ILLEGAL['gauss_0602'] % 'path type') - for rac in Constant.PATH_CHECK_LIST: - flag = obj.find(rac) - if flag >= 0: - raise Exception(Errors.ILLEGAL['gauss_0601'] % (rac, obj)) - - -def check_version(obj): - """ - Check version info when remote option - """ - if not isinstance(obj, str): - raise ValueError(Errors.ILLEGAL['gauss_0602'] % 'path type') - if not obj.isdigit(): - raise ValueError(Errors.ILLEGAL['gauss_0603'] % ('version info', 'digit')) - - -def check_service_list(obj): - """ - Check cron info when remote option - """ - if not isinstance(obj, list): - raise ValueError(Errors.ILLEGAL['gauss_0602'] % 'cron service type') - for cron in obj: - if cron not in Constant.ANOMALY_INSTALL_CRON_LOCALLY\ - + Constant.OPENGAUSS_ANOMALY_INSTALL_CRON_LOCALLY\ - + Constant.OPENGAUSS_ANOMALY_INSTALL_CRON_REMOTE: - raise Exception(Errors.PARAMETER['gauss_0201'] % 'service cron params') - - -def check_stopping_list(obj): - """ - Check cron info when remote option - """ - if not isinstance(obj, list): - raise ValueError(Errors.ILLEGAL['gauss_0602'] % 'cron stopping type') - for cron in obj: - if cron not in Constant.ANOMALY_STOP_CMD_LOCALLY\ - + Constant.OPENGAUSS_ANOMALY_STOP_CMD_LOCALLY\ - + Constant.OPENGAUSS_ANOMALY_STOP_CMD_REMOTE: - raise Exception(Errors.PARAMETER['gauss_0201'] % 'stopping cron params') - - -def check_ca_info(obj): - """ - Check ca cert information. - """ - if not isinstance(obj, dict): - raise ValueError(Errors.ILLEGAL['gauss_0603'] % ('ca cert info', 'type of dict')) - for key, value in obj.items(): - if key not in Constant.VALID_CA_INFO: - raise ValueError(Errors.ILLEGAL['gauss_0604'] % key) - if 'pass' in key: - check_password(obj[key]) - else: - check_path(obj[key]) - - -# index advisor -def check_install_nodes(obj): - """ - Check install_nodes info - """ - if not isinstance(obj, list): - raise Exception(Errors.ILLEGAL['gauss_0603'] % ('type of install nodes', 'list')) - for node in obj: - if not isinstance(node, dict): - raise Exception(Errors.ILLEGAL['gauss_0603'] % ('type of install node info', 'dict')) - ip = node.get(Constant.NODE_IP) - uname = node.get(Constant.NODE_USER) - pwd = node.get(Constant.NODE_PWD) - check_ip(ip) - check_valid_string(uname) - check_password(pwd) - - -class ConfigChecker(object): - """ - Check config info value - """ - check_mapping = { - "server": { - "host": check_ip, - "listen_host": check_ip, - "listen_port": check_port, - "pull_kafka": check_pull_kafka - }, - "database": { - "name": check_database_name, - "host": check_ip, - "port": check_port, - "user": check_valid_string, - "size": check_digit, - "max_rows": check_digit, - "database_path": check_path - }, - "agent": { - "cluster_name": check_valid_string, - "collection_type": check_collection_type, - "collection_item": check_valid_string, - "channel_capacity": check_digit, - "sink_timer_interval": check_timer_interval, - "source_timer_interval": check_timer_interval - }, - "security": { - "tls": check_tls, - "ca": check_path, - "server_cert": check_path, - "server_key": check_path, - "agent_cert": check_path, - "agent_key": check_path - }, - "log": { - "log_path": check_path - } - } - - @staticmethod - def check(section, option, value): - try: - func = ConfigChecker.check_mapping[section][option] - except Exception: - raise Exception(Errors.ILLEGAL['gauss_0605'] % (section, option)) - try: - func(value) - except Exception as error: - raise Exception(Errors.ILLEGAL['gauss_0606'] % str(error)) - - -class LostChecker(object): - def __init__(self, params): - self.params = params - self.scene = None - self.module = None - self.action = None - self.tls = False - - def init_globals(self): - self.scene = self.params.get('scene') - self.module = self.params.get('module') - self.action = self.params.get('action') - if not self.scene: - raise Exception(Errors.PARAMETER['gauss_0209'] % 'scene') - if not self.module: - raise Exception(Errors.PARAMETER['gauss_0209'] % 'module') - if not self.action: - raise Exception(Errors.PARAMETER['gauss_0209'] % 'action') - - def check_agent_nodes(self): - """ - Check agent node info for opengauss scene. - :return: - """ - agent_nodes = self.params.get(Constant.AGENT_NODES) - if not isinstance(agent_nodes, list): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'agent nodes') - for each_node in agent_nodes: - if not isinstance(each_node, dict): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'each agent node') - node_ip = each_node.get(Constant.NODE_IP) - user_name = each_node.get(Constant.NODE_USER) - password = each_node.get(Constant.NODE_PWD) - if not all([node_ip, user_name, password]): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'agent node info') - - def check_ca_info(self): - """ - Check ca information in https mode - :return: - """ - ca_info = self.params.get(Constant.CA_INFO) - if not isinstance(ca_info, dict): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'ca info') - cert_path = ca_info.get(Constant.CA_CERT_PATH) - cert_key = ca_info.get(Constant.CA_KEY_PATH) - cert_pass = ca_info.get(Constant.CA_PASSWORD) - if not all([cert_path, cert_pass, cert_key]): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'ca info') - - @staticmethod - def _check_server(server_info): - """ - Check server info of config - :param server_info: - :return: - """ - host = server_info.get(Constant.SERVER_HOST) - listen_host = server_info.get(Constant.SERVER_LISTEN_HOST) - listen_port = server_info.get(Constant.SERVER_LISTEN_PORT) - pull_kafka = server_info.get(Constant.SERVER_PULL_KAFKA) - if pull_kafka and (pull_kafka not in Constant.VALID_BOOL_TYPE): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'pull_kafka') - if not all([host, listen_host, listen_port]): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'server config info') - - @staticmethod - def _check_database(database_info): - """ - Check database info of config - :param database_info: - :return: - """ - name = database_info.get(Constant.DATABASE_NAME) - host = database_info.get(Constant.DATABASE_HOST) - user = database_info.get(Constant.DATABASE_USER) - port = database_info.get(Constant.DATABASE_PORT) - size = database_info.get(Constant.DATABASE_SIZE) - max_rows = database_info.get(Constant.DATABASE_MAX_ROWS) - if name is None or (not isinstance(name, str)): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'database name') - if name.lower() not in Constant.VALID_DATABASE_NAME: - raise Exception(Errors.ILLEGAL['gauss_0602'] % 'database name') - if name.lower() != Constant.VALID_DATABASE_NAME[0]: - if not all([host, user, port]): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'database info') - if (size is not None) and (not re.match(r'^\d+$', size)): - raise Exception( - Errors.PARAMETER['gauss_0208'] % ('size of database', 'positive integer')) - if (max_rows is not None) and (not re.match(r'^\d+$', max_rows)): - raise Exception( - Errors.PARAMETER['gauss_0208'] % ('max_rows of database', 'positive integer')) - - @staticmethod - def _check_agent(agent_info): - """ - Check agent info when opengauss scene - :param agent_info: - :return: - """ - cluster_name = agent_info.get(Constant.AGENT_CLUSTER_NAME) - collection_type = agent_info.get(Constant.AGENT_COLLECTION_TYPE) - collection_item = agent_info.get(Constant.AGENT_COLLECTION_ITEM) - channel_capacity = agent_info.get(Constant.AGENT_CHANNEL_CAPACITY) - source_timer_interval = agent_info.get(Constant.AGENT_SOURCE_TIMER_INTERVAL) - sink_timer_interval = agent_info.get(Constant.AGENT_SINK_TIMER_INTERVAL) - if not all([cluster_name, collection_type, collection_item]): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'agent config info') - if collection_type not in Constant.VALID_COLLECTION_TYPE: - raise Exception(Errors.PARAMETER['gauss_0201'] % 'collection_type of agent info') - if (channel_capacity is not None) and (not re.match(r'^\d+$', channel_capacity) or int( - channel_capacity) == 0): - raise Exception( - Errors.PARAMETER['gauss_0208'] % ( - 'channel_capacity of agent info', 'positive integer')) - if (source_timer_interval is not None) and ( - not re.match(r'^\d+[WDHMS]$', source_timer_interval)): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'source_timer_interval of agent info') - if (sink_timer_interval is not None) and ( - not re.match(r'^\d+[WDHMS]$', sink_timer_interval)): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'sink_timer_interval of agent info') - - def check_config_info(self): - """ - Check config info - :return: - """ - config_info = self.params.get(Constant.CONFIG_INFO) - if not isinstance(config_info, dict): - raise Exception(Errors.PARAMETER['gauss_0208'] % ('config info', 'dict')) - server = config_info.get(Constant.SERVER) - if not isinstance(server, dict): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'server info') - self._check_server(server) - database = config_info.get(Constant.DATABASE) - if not isinstance(database, dict): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'database info') - self._check_database(database) - agent = config_info.get(Constant.AGENT) - if not isinstance(database, dict): - raise Exception(Errors.PARAMETER['gauss_0201'] % 'agent info') - if self.scene == Constant.SCENE_OPENGAUSS: - self._check_agent(agent) - security = config_info.get(Constant.AD_CONF_SECTION_SECURITY) - tls = security.get(Constant.AD_CONF_TLS_FLAG) if security else None - if tls in ['True', True]: - self.tls = True - - def run(self): - self.init_globals() - if self.scene == Constant.SCENE_OPENGAUSS: - self.check_agent_nodes() - if self.action == 'uninstall': - return - self.check_config_info() - if self.tls is True: - self.check_ca_info() - diff --git a/src/gausskernel/dbmind/tools/ai_manager/tools/set_cron.py b/src/gausskernel/dbmind/tools/ai_manager/tools/set_cron.py deleted file mode 100644 index 8017237dd..000000000 --- a/src/gausskernel/dbmind/tools/ai_manager/tools/set_cron.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- - -############################################################################# -# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd. -# FileName : set_cron.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : Contab tools -############################################################################# - -import os -import optparse -import re -import sys -import stat -import subprocess -sys.path.append(sys.path[0] + "/../") -from definitions.constants import Constant -from definitions.errors import Errors -from config_cabin.config import TMP_DIR -from tools.common_tools import CommonTools -from tools.log import MainLog - -flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL -modes = stat.S_IWUSR | stat.S_IRUSR - - -class Cron: - def __init__(self, task, cycle, cmd): - self.task = task - self.cycle = cycle - self.cmd = cmd - self.curr_path = os.path.dirname(os.path.realpath(__file__)) - self.tmp_file = os.path.join(self.curr_path, "cron_tmp_%s" % os.getpid()) - self.logger = MainLog(Constant.CRON_LOG_NAME).get_logger() - - def get_lock_file(self): - """ - Get lock file path for cron service. - """ - lock_file_dir = TMP_DIR - CommonTools.mkdir_with_mode(lock_file_dir, Constant.AUTH_COMMON_DIR_STR) - lock_file_name = '' - for name in Constant.TASK_NAME_LIST: - if name in self.cmd.split('role')[-1]: - lock_file_name = 'ai_' + name + '.lock' - if not lock_file_name: - raise Exception(Errors.CONTENT_OR_VALUE['gauss_0502'] % self.cmd) - else: - return os.path.join(lock_file_dir, lock_file_name) - - @staticmethod - def exe_cmd(command, ignore_error=False): - """ - Execute command in string or list. - """ - if isinstance(command, list): - command = ' '.join(command) - status, output = subprocess.getstatusoutput(command) - if status != 0 and not ignore_error: - raise Exception(Errors.EXECUTE_RESULT['gauss_0401'] % (command, 'prepare cron', output)) - return status, output - - def get_old_cron(self): - """ - Get old cron string by command of crontab -l - """ - get_cron_cmd = ["crontab", "-l"] - status, result = self.exe_cmd(get_cron_cmd, ignore_error=True) - if status != 0 and result.find(Constant.CRON_INFO_EXPECTED) < 0: - raise Exception(Errors.EXECUTE_RESULT['gauss_0404'] % result) - if result.find(Constant.CRON_INFO_EXPECTED) >= 0: - return "" - return result.strip() - - def del_cron(self): - """ - Delete old cron by resetting a new cron file - """ - self.logger.debug("Deleting old cron...") - crontab_str = self.get_old_cron() - crontab_list = crontab_str.split('\n') - crontab_list_new = [] - clean_cmd = self.cmd - versions = re.findall(r'\d{5}/', self.cmd) - version = versions[0] if versions else None - if version: - clean_cmd = clean_cmd.split(version)[-1] - self.logger.debug('Clean cron with [%s]' % clean_cmd) - - for old_cron in crontab_list: - if clean_cmd not in old_cron: - crontab_list_new.append(old_cron) - cron_str = "\n".join(crontab_list_new) + "\n" - with os.fdopen(os.open(self.tmp_file, flags, modes), "w") as fp: - fp.write(cron_str) - del_cron_cmd = ["crontab", self.tmp_file] - status, output = self.exe_cmd(del_cron_cmd) - if os.path.isfile(self.tmp_file): - os.remove(self.tmp_file) - self.logger.info("Delete cron result:status[%s]-output[%s]." % ( - status or Constant.SUCCESS, output)) - - def add_new_cron(self): - """ - Add new cron with lock - """ - # Supplement newline - self.logger.debug("Adding new cron...") - old_cron = self.get_old_cron() - if old_cron and not old_cron.endswith("\n"): - old_cron += "\n" - # Check cycle info - cycle = self.cycle[:-1] - unit = self.cycle[-1] - if not cycle.isdigit(): - raise Exception(Errors.CONTENT_OR_VALUE['gauss_0503'] % (cycle, 'cron time setting')) - if unit not in ["m", "h", "d", "M", "w"]: - raise Exception(Errors.CONTENT_OR_VALUE['gauss_0503'] % (unit, 'cron time unit')) - - # set cycle string - new_cron = "" - if unit == "m": - new_cron = "*/%s * * * * " % cycle - elif unit == "h": - new_cron = "* */%s * * * " % cycle - elif unit == "d": - new_cron = "* * */%s * * " % cycle - elif unit == "M": - new_cron = "* * * */%s * " % cycle - elif unit == "w": - new_cron = "* * * * */%s " % cycle - lock_file = self.get_lock_file() - if lock_file != "": - new_cron += "flock -nx %s -c '%s' >> /dev/null 2>&1 &\n" \ - % (lock_file, self.cmd) - else: - new_cron += "%s >> /dev/null 2>&1 &\n" % self.cmd - self.logger.debug("new cron is : %s" % new_cron) - - all_cron = old_cron + new_cron - # Write cron file - with os.fdopen(os.open(self.tmp_file, flags, modes), "w") as fp: - fp.write(all_cron) - add_cron_cmd = ["crontab", self.tmp_file] - status, output = self.exe_cmd(add_cron_cmd) - if os.path.exists(self.tmp_file): - os.remove(self.tmp_file) - self.logger.info("Add cron result:status[%s]-output[%s]." % ( - status or Constant.SUCCESS, output)) - - -if __name__ == "__main__": - parser = optparse.OptionParser(conflict_handler='resolve') - parser.disable_interspersed_args() - parser.usage = "%prog -t [add|del] --cycle interval command" - parser.epilog = "Example: set_cron.py -t add --cycle 1d 'reboot'" - parser.add_option('-t', dest='task', help='Specify add or delete tasks') - parser.add_option('-c', "--cycle", dest='cycle', - help='Specify the execution interval of the task, ' - 'unit: m:minute; h:hour; d:day; M:month; w:week') - opts, args = parser.parse_args() - cron = Cron(opts.task, opts.cycle, " ".join(args)) - try: - if cron.task == "add": - cron.del_cron() - cron.add_new_cron() - elif cron.task == "del": - cron.del_cron() - else: - raise Exception(Errors.PARAMETER['gauss_0201'] % '-t') - - except Exception as e: - if os.path.isfile(cron.tmp_file): - os.remove(cron.tmp_file) - raise Exception(str(e)) diff --git a/src/gausskernel/dbmind/tools/ai_server/agent/channel.py b/src/gausskernel/dbmind/tools/ai_server/agent/channel.py deleted file mode 100644 index 0b82cfb74..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/agent/channel.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : channel.py -# Version : -# Date : 2021-4-7 -# Description : Task queue -############################################################################# - -from queue import Queue, Empty, Full - - -class Channel: - """ - This is father class of buffer channel, it acts as a buffer - medium between Source and Sink. - """ - - def __init__(self): - pass - - def put(self, event): - pass - - def take(self): - pass - - def size(self): - pass - - -class MemoryChannel(Channel): - """ - This class inherit from Channel, use to buffer data in memory. - """ - - def __init__(self, name, logger, maxsize=None): - """ - :param name: string, channel name - :param maxsize: int, maxsize of channel - """ - Channel.__init__(self) - self.name = name - self.maxsize = maxsize - self.memory = Queue(maxsize) - self.logger = logger - - def put(self, event): - if self.maxsize and self.size() >= self.maxsize: - self.logger.warn("channel {name} has reach queue maxsize".format(name=self.name)) - try: - self.memory.put(event, block=True, timeout=0.2) - except Full: - self.logger.warn("throw away {name} data when reach maxsize".format(name=self.name)) - - def take(self): - try: - return self.memory.get_nowait() - except Empty: - self.logger.warn('channel {name} is empty.'.format(name=self.name)) - return None - - def size(self): - rv = self.memory.qsize() - return 0 if rv is None else rv - - -class ChannelManager: - """ - This class is used for managing multiple MemoryChannel object. - """ - - def __init__(self, logger): - self._channels = {} - self.logger = logger - - def add_channel(self, name, maxsize): - """ - Add MemoryChannel object. - :params name: string, name of Memorychannel object - :params maxsize: int, maxsize of Memorychannel object - """ - self._channels[name] = MemoryChannel(name=name, maxsize=maxsize, logger=self.logger) - self.logger.info('channel {name} is created.'.format(name=name)) - - def get_channel(self, name): - return self._channels[name] - - def check(self, name): - if name not in self._channels: - return False - return True - - def get_channel_content(self): - contents = {} - for name, queue in self._channels.items(): - event = queue.take() - if event is not None: - contents[name] = event - return contents - - def size(self): - return len(self._channels) diff --git a/src/gausskernel/dbmind/tools/ai_server/agent/db_source.py b/src/gausskernel/dbmind/tools/ai_server/agent/db_source.py deleted file mode 100644 index c4a35bbb6..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/agent/db_source.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : db_source.py -# Version : -# Date : 2021-4-7 -# Description : Collection Task Management -############################################################################# - -try: - import sys - import os - import threading - - sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../")) - from agent.source import Source -except ImportError as err: - sys.exit("db_source.py: Failed to import module: %s." % str(err)) - - -class TaskHandler(threading.Thread): - """ - This class inherits the threading.Thread, it is used for managing single task. - """ - - def __init__(self, interval, function, *args, **kwargs): - """ - :param interval: int, execute interval for task, unit is 'second'. - :param function: function object for task - :param args: list parameters - :param kwargs: dict parameters - """ - threading.Thread.__init__(self) - self._function = function - self._interval = interval - self._args = args - self._kwargs = kwargs - self._finished = threading.Event() - self._res = None - self._channel = None - self._logger = kwargs["logger"] - - def set_channel(self, channel): - self._channel = channel - - def run(self): - - while not self._finished.is_set(): - try: - metrics = self._function(*self._args, **self._kwargs) - self._channel.put(metrics) - except Exception as e: - self._logger.exception(e) - self._finished.wait(self._interval) - - def cancel(self): - self._finished.set() - - -class DBSource(Source): - """ - This class inhert from Source and is used for acquiring mutiple metric data - from database at a specified time interval. - """ - - def __init__(self): - Source.__init__(self) - self.running = False - self._tasks = {} - - def add_task(self, name, interval, task, maxsize, *args, **kwargs): - """ - Add task in DBSource object. - :param name: string, task name - :param interval: int, execute interval for task, unit is 'second'. - :param task: function object of task - :param maxsize: int, maxsize of channel in task. - :param args: list parameters - :param kwargs: dict parameters - :return: NA - """ - if name not in self._tasks: - self._tasks[name] = TaskHandler(interval, task, *args, **kwargs) - self._channel_manager.add_channel(name, maxsize) - self._tasks[name].setDaemon(True) - self._tasks[name].set_channel(self._channel_manager.get_channel(name)) - - def start(self): - for _, task in self._tasks.items(): - task.start() - - def stop(self): - for _, task in self._tasks: - task.cancel() diff --git a/src/gausskernel/dbmind/tools/ai_server/agent/http_sink.py b/src/gausskernel/dbmind/tools/ai_server/agent/http_sink.py deleted file mode 100644 index 398d7333e..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/agent/http_sink.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : http_sink.py -# Version : -# Date : 2021-4-7 -# Description : Sends data to server. -############################################################################# - -try: - import os - import sys - import time - import json - from urllib import request - - sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../")) - from common.utils import Common - from agent.sink import Sink -except ImportError as err: - sys.exit("http_sink.py: Failed to import module: %s." % str(err)) - -header = {'Content-Type': 'application/json'} - - -class HttpSink(Sink): - """ - This class inherit from Sink and use to send data to server based on http/https - method at a specified time interval. - """ - - def __init__(self, interval, url, context, logger): - """ - :param interval: int, time interval when send data. - :param url: string, http/https url. - :param context: certificate context for https method. - """ - Sink.__init__(self) - self._interval = interval - self.running = False - self._url = url - self.context = context - self.logger = logger - try: - self.host = Common.acquire_collection_info()["ip"] - self.port = Common.acquire_collection_info()["port"] - self.data_type = Common.acquire_collection_info()["data_type"] - self.cluster_name = Common.parser_config_file("agent", "cluster_name") - except Exception as err_msg: - logger.error(str(err_msg)) - raise Exception(str(err_msg)) - - def process(self): - self.logger.info('Begin send data to %s' % self._url) - while self.running: - time.sleep(self._interval) - contents = self._channel_manager.get_channel_content() - retry_times = 0 - if contents: - contents.update( - {"database": "%s:%s:%s:%s" % (self.cluster_name, self.host.replace(".", "_"), - str(self.port), self.data_type)}) - while True: - try: - req = request.Request(self._url, headers=header, - data=json.dumps(contents).encode('utf-8'), - method='POST') - request.urlopen(req, context=self.context) - break - except Exception as e: - retry_times += 1 - self.logger.warn(str(e) + " Retry times: %d." % retry_times, exc_info=True) - if retry_times >= 10: - raise ConnectionError("Failed to send data, \nError: %s" % str(e)) - time.sleep(1) - else: - self.logger.warn('Not found data in each channel.') diff --git a/src/gausskernel/dbmind/tools/ai_server/agent/manage_agent.py b/src/gausskernel/dbmind/tools/ai_server/agent/manage_agent.py deleted file mode 100644 index cc7c8044a..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/agent/manage_agent.py +++ /dev/null @@ -1,171 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : manage_agent.py -# Version : -# Date : 2021-4-7 -# Description : Agent service management -############################################################################# - -try: - import sys - import os - import ssl - import signal - from configparser import ConfigParser, NoOptionError, NoSectionError - - sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../")) - from agent.task.os_exporter import OSExporter - from agent.task.database_exporter import DatabaseExporter - from common.utils import Common - from common.logger import CreateLogger - from agent.channel import ChannelManager - from agent.db_source import DBSource - from agent.http_sink import HttpSink -except ImportError as err: - sys.exit("manage_agent.py: Failed to import module: %s." % str(err)) - -# LOGGER is start log object -LOGGER = CreateLogger("debug", "agent.log").create_log() - - -class Agent: - def __init__(self, pid_file, logger): - self.pid_file = pid_file - # logger is agent service log object - self.logger = logger - - def check_agent_parameter(self, config): - """ - Check if the agent parameter is valid, if the parameter is valid, - then return parameters dict, otherwise exit process. - :param config: config handler for config file. - :return: agent parameters dict. - """ - agent_parameters = {} - try: - host = config.get('server', 'host') - listen_port = config.get('server', 'listen_port') - Common.check_ip_and_port(host, listen_port) - except (NoOptionError, NoSectionError) as e: - self.logger.error(e) - sys.exit(1) - else: - agent_parameters['host'] = host - agent_parameters['listen_port'] = listen_port - - default_agent_parameter_dicts = {'sink_timer_interval': '10S', - 'source_timer_interval': '10S', - 'channel_capacity': 1000} - for parameter, default_value in default_agent_parameter_dicts.items(): - try: - if parameter == 'channel_capacity': - agent_parameter_value = config.getint('agent', parameter) - agent_parameters[parameter] = agent_parameter_value - else: - agent_parameter_value = config.get('agent', parameter) - agent_parameters[parameter] = Common.transform_time_string( - agent_parameter_value, mode='to_second') - except Exception as e: - self.logger.error("error occur when acquire %s: %s, use default_value: %s" % ( - parameter, str(e), default_value)) - agent_parameters[parameter] = default_agent_parameter_dicts[parameter] - - return agent_parameters - - def start_agent(self, config_path): - """ - Start agent service. - :param config_path: string, config path. - :return: NA - """ - if not os.path.isfile(config_path): - raise Exception('Config file: %s does not exists.' % config_path) - # check agent is running or not. - if os.path.isfile(self.pid_file): - pid = Common.check_proc_exist("role agent") - if pid: - self.logger.warn("Process already exist, pid:[%s]" % pid) - raise Exception("Process already running, can't start again.") - else: - os.remove(self.pid_file) - # write process pid to file - if not os.path.isdir(os.path.dirname(self.pid_file)): - os.makedirs(os.path.dirname(self.pid_file), 0o700) - with open(self.pid_file, mode='w') as f: - f.write(str(os.getpid())) - try: - config = ConfigParser() - config.read(config_path) - - collection_type = config.get("agent", "collection_type") - - agent_parameters = self.check_agent_parameter(config) - context = Common.check_certificate_setting(self.logger, config_path, "agent") - - protocol = "http" - if context is not None: - protocol = "https" - url = "%s://" % protocol + agent_parameters["host"] + ":" + \ - agent_parameters["listen_port"] + "/sink" - - chan = ChannelManager(LOGGER) - source = DBSource() - http_sink = HttpSink(interval=agent_parameters['sink_timer_interval'], url=url, - context=context, logger=LOGGER) - source.channel_manager = chan - http_sink.channel_manager = chan - - if collection_type == "all": - tasks = [("OSExporter", OSExporter(LOGGER).__call__), - ("DatabaseExporter", DatabaseExporter(LOGGER).__call__)] - elif collection_type == "os": - tasks = [("OSExporter", OSExporter(LOGGER).__call__)] - else: - tasks = [("DatabaseExporter", DatabaseExporter(LOGGER).__call__)] - - for task_name, task_func in tasks: - source.add_task(name=task_name, - interval=agent_parameters['source_timer_interval'], - task=task_func, - maxsize=agent_parameters['channel_capacity'], - logger=LOGGER) - try: - # start to collect data - source.start() - except Exception as e: - self.logger.error("Failed to start agent task, Error: %s." % e) - raise Exception("Failed to start agent, Error: %s." % e) - # push data to server - http_sink.start() - except Exception as err_msg: - self.logger.error(str(err_msg)) - sys.stdout.write("Error: " + str(err_msg) + "\n") - except KeyboardInterrupt: - self.logger.warn("Keyboard exception is received. The process ends.") - finally: - if os.path.isfile(self.pid_file): - os.remove(self.pid_file) - - def stop_agent(self): - try: - if not os.path.exists(self.pid_file): - self.logger.warn("The pid file does not exists.") - std = Common.check_proc_exist("role agent") - if not std: - raise Exception("ERROR: Process not running.") - else: - kill_proc = "kill -9 %s" % std - Common.execute_cmd(kill_proc) - else: - with open(self.pid_file, mode='r') as f: - pid = int(f.read()) - os.kill(pid, signal.SIGTERM) - os.remove(self.pid_file) - self.logger.info("Successfully stopped agent.") - except Exception as e: - self.logger.error("Failed to stop agent, Error: %s" % str(e)) - sys.stdout.write("Error: " + str(e) + "\n") - if os.path.exists(self.pid_file): - os.remove(self.pid_file) diff --git a/src/gausskernel/dbmind/tools/ai_server/agent/sink.py b/src/gausskernel/dbmind/tools/ai_server/agent/sink.py deleted file mode 100644 index d0b3911d1..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/agent/sink.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : sink.py -# Version : -# Date : 2021-4-7 -# Description : -############################################################################# - - -class Sink: - """ - This is father class which is used for getting data from ChannelManager object and - sending data at a specified time interval. - """ - - def __init__(self): - self._channel_manager = None - self.running = False - - @property - def channel_manager(self): - return self._channel_manager - - @channel_manager.setter - def channel_manager(self, channel_manager): - self._channel_manager = channel_manager - - def process(self): - pass - - def start(self): - self.running = True - self.process() - - def stop(self): - self.running = False - diff --git a/src/gausskernel/dbmind/tools/ai_server/agent/source.py b/src/gausskernel/dbmind/tools/ai_server/agent/source.py deleted file mode 100644 index 030d4d57d..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/agent/source.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : source.py -# Version : -# Date : 2021-4-7 -# Description : -############################################################################# - - -class Source: - """ - This is father class which is used for acquiring mutiple metric data at same time. - """ - - def __init__(self): - self._channel_manager = None - - def start(self): - pass - - def stop(self): - pass - - @property - def channel_manager(self): - return self._channel_manager - - @channel_manager.setter - def channel_manager(self, channel_manager): - self._channel_manager = channel_manager diff --git a/src/gausskernel/dbmind/tools/ai_server/agent/task/database_exporter.py b/src/gausskernel/dbmind/tools/ai_server/agent/task/database_exporter.py deleted file mode 100644 index c802d16c8..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/agent/task/database_exporter.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : database_exporter.py -# Version : -# Date : 2021-4-7 -# Description : get gaussDb database information -############################################################################# - -try: - import os - import sys - import time - import re - - sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../")) - from common.utils import Common, DBAgent -except ImportError as err: - sys.exit("database_exporter.py: Failed to import module: %s." % str(err)) - - -class DatabaseExporter: - # all the metrics can be acquired by gsql - - def __init__(self, logger): - try: - self.port = Common.acquire_collection_info()["port"] - except Exception as err_msg: - logger.error(str(err_msg)) - raise Exception(str(err_msg)) - self.logger = logger - self.cursor = DBAgent(port=self.port, database="postgres") - - def guc_parameter(self): - """ - get database guc parameter - :return: {work_mem: value, shared_buffers: value, max_connections: value} - """ - guc_params = ["work_mem", "shared_buffers", "max_connections"] - # get work_mem, shared_buffers, max_connections guc parameter - guc_values = [] - for param in guc_params: - try: - std = self.cursor.fetch_one_result("show %s;" % param, self.logger) - guc_values.append(std[0]) - except Exception as err_msg: - raise Exception("Failed go get guc parameter, Error:%s." % err_msg) - result = dict(zip(guc_params, guc_values)) - return result - - def current_connections(self): - """ - get current connections - :return: - """ - # get current_connections - sql_cmd = "select count(1) from pg_stat_activity where client_port is not null;" - try: - std = self.cursor.fetch_one_result(sql_cmd, self.logger) - except Exception as err_msg: - raise Exception("Failed go get current connections, Error:%s." % err_msg) - return std[0] - - def qps(self): - """ - Calculates QPS information. - :return: - """ - sql = "select sum(select_count+update_count+insert_count+delete_count) from gs_sql_count;" - try: - # get first execute result - t1 = time.time() - std = self.cursor.fetch_one_result(sql, self.logger) - n1 = int(std[0]) - time.sleep(1) - # get second execute result - t2 = time.time() - std = self.cursor.fetch_one_result(sql, self.logger) - n2 = int(std[0]) - except Exception as err_msg: - raise Exception("Failed to execute cmd: %s, \nError: %s." % (sql, err_msg)) - qps = (n2 - n1) / (t2 - t1) - return qps - - def __call__(self, *args, **kwargs): - self.logger.info("Start to collect database data.") - retry_times = 0 - while True: - try: - if retry_times >= 1: - self.cursor = DBAgent(port=self.port, database="postgres") - guc_parameter = self.guc_parameter() - current_connection = self.current_connections() - qps = self.qps() - break - except Exception as connect_err: - retry_times += 1 - self.logger.error("Failed to collect database data, retry times: %d." % retry_times) - if retry_times == 10: - raise Exception(str(connect_err)) - time.sleep(5) - data = {'current_connection': current_connection, - 'qps': qps, "timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ")} - data.update(guc_parameter) - self.logger.info("Successfully collected database data: %s." % data) - return data diff --git a/src/gausskernel/dbmind/tools/ai_server/agent/task/os_exporter.py b/src/gausskernel/dbmind/tools/ai_server/agent/task/os_exporter.py deleted file mode 100644 index 123395b6a..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/agent/task/os_exporter.py +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : os_exporter.py -# Version : -# Date : 2021-4-7 -# Description : get system information -############################################################################# - -try: - import os - import sys - import time - - sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../")) - from common.utils import Common -except ImportError as err: - sys.exit("os_exporter.py: Failed to import module: %s." % str(err)) - - -class OSExporter: - def __init__(self, logger): - try: - collection_info = Common.acquire_collection_info() - except Exception as err_msg: - logger.error(str(err_msg)) - raise Exception(str(err_msg)) - self.logger = logger - self.ip = collection_info["ip"] - self.port = collection_info["port"] - - def cpu_usage(self): - """ - Obtaining the CPU Usage of the GaussDB - :return: current cpu usage of the GaussDb - """ - proc_pid = Common.get_proc_pid(self.ip, self.port) - cmd = "ps -ux | awk '{if ($2==\"%s\")print}' |awk '{print $3}'" % proc_pid - std, _ = Common.execute_cmd(cmd) - if not std: - return "0.0" - return std.decode("utf-8").strip() - - def memory_usage(self): - """ - Obtaining the memory Usage of the GaussDB - :return: current memory usage of the GaussDb - """ - proc_pid = Common.get_proc_pid(self.ip, self.port) - cmd = "ps -ux | awk '{if ($2==\"%s\")print}' |awk '{print $4}'" % proc_pid - std, _ = Common.execute_cmd(cmd) - if not std: - return "0.0" - return std.decode("utf-8").strip() - - @staticmethod - def io_wait(): - """ - Obtaining the system io_wait - :return: io_wait info - """ - std, _ = Common.execute_cmd("iostat") - if not std: - return "0.0" - usage = std.decode("utf-8").split("\n")[3].split()[3] - return usage - - def io_write(self): - """ - Obtaining the io_write info of the GaussDB - :return: io_write info - """ - proc_pid = Common.get_proc_pid(self.ip, self.port) - cmd = "pidstat -d | awk '{if ($4==\"%s\")print}' | awk '{print $6}'" % proc_pid - std, _ = Common.execute_cmd(cmd) - if not std: - return "0.0" - return std.decode("utf-8").strip() - - def io_read(self): - """ - Obtaining the io_read info of the GaussDB - :return: io_read info - """ - proc_pid = Common.get_proc_pid(self.ip, self.port) - cmd = "pidstat -d | awk '{if ($4==\"%s\")print}' | awk '{print $5}'" % proc_pid - std, _ = Common.execute_cmd(cmd) - if not std: - return "0.0" - return std.decode("utf-8").strip() - - def disk_used_size(self): - """ - Obtaining the system disk used size - :return: current disk used size of the GaussDb - """ - proc_pid = Common.get_proc_pid(self.ip, self.port) - get_data_path = "ps -ux | awk '{if ($2==\"%s\")print}'" % proc_pid - std, _ = Common.execute_cmd(get_data_path) - if not std: - self.logger.warn("There is no process of: %s." % proc_pid) - return "0.0M" - std = std.decode() - data_dir = std.split()[std.split().index("-D") + 1] - if not os.path.isdir(data_dir): - self.logger.warn("The data dir does not exist: %s." % data_dir) - return "0.0M" - disk_info, _ = Common.execute_cmd("du -sh %s" % data_dir.strip()) - usage = Common.unify_byte_unit(disk_info.decode("utf-8").split()[0]) - return usage - - def __call__(self, *args, **kwargs): - self.logger.info("Start to collect os data.") - cpu_usage = self.cpu_usage() - memory_usage = self.memory_usage() - disk_used_size = self.disk_used_size() - io_wait = self.io_wait() - io_read = self.io_read() - io_write = self.io_write() - os_data = {"cpu_usage": cpu_usage, "memory_usage": memory_usage, - "disk_usage": disk_used_size, "io_wait": io_wait, - "io_read": io_read, "io_write": io_write, - "timestamp": "%s" % time.strftime("%Y-%m-%dT%H:%M:%SZ")} - self.logger.info("Successfully collected os data:%s." % os_data) - return os_data diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/my_sr.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/my_sr.py deleted file mode 100644 index bd1b0d202..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/my_sr.py +++ /dev/null @@ -1,149 +0,0 @@ -import pandas as pd -import numpy as np -import matplotlib.pyplot as plt - -from gs_aiops.detector.algorithm.anomal_detect_algorithm.utils import * - - -class SpectralResidual: - def __init__(self, series, threshold, mag_window, score_window, batch_size): - self.__series__ = series - self.__values__ = self.__series__['value'].tolist() - self.__threshold__ = threshold - self.__mag_window = mag_window - self.__score_window = score_window - self.__anomaly_frame = None - self.__batch_size = batch_size - if self.__batch_size <= 0: - self.__batch_size = len(series) - - self.__batch_size = max(12, self.__batch_size) - self.__batch_size = min(len(series), self.__batch_size) - - def detect(self): - if self.__anomaly_frame is None: - self.__anomaly_frame = self.__detect() - - return self.__anomaly_frame - - def __detect(self): - # 将原数据拆成batch,在每个batch大小的数据上进行异常检测 - anomaly_frames = [] - for i in range(0, len(self.__series__), self.__batch_size): - start = i - end = i + self.__batch_size - end = min(end, len(self.__series__)) - if end - start >= 12: - anomaly_frames.append(self.__detect_core(self.__series__[start:end])) - else: - ext_start = max(0, end - self.__batch_size) - ext_frame = self.__detect_core(self.__series__[ext_start:end]) - anomaly_frames.append(ext_frame[start-ext_start:]) - - return pd.concat(anomaly_frames, axis=0, ignore_index=True) - - def __detect_core(self, series): - # 普残差核心检测单元函数 - values = series['value'].values - # sr为了达到更好的预测效果,需要将目标监测点放置在窗口中心,因此需要对每个检测的序列后面增加估计点 - extended_series = SpectralResidual.extend_series(values) - # 基于傅里叶算法得到 - mags = self.spectral_residual_transform(extended_series) - anomaly_scores = self.generate_spectral_score(mags) - anomaly_frame = pd.DataFrame({Timestamp: series['timestamp'].values, - Value: values, - Mag: mags[:len(values)], - AnomalyScore: anomaly_scores[:len(values)]}) - anomaly_frame[IsAnomaly] = np.where(anomaly_frame[AnomalyScore] > self.__threshold__, True, False) - - return anomaly_frame - - def generate_spectral_score(self, mags): - ave_mag = average_filter(mags, n=self.__score_window) - safeDivisors = np.clip(ave_mag, EPS, ave_mag.max()) - - raw_scores = np.abs(mags - ave_mag) / safeDivisors - scores = np.clip(raw_scores / 10.0, 0, 1.0) - - return scores - - def spectral_residual_transform(self, values): - trans = np.fft.fft(values) - A = np.abs(trans) - P = np.angle(trans) - L = np.log(A) - AL = average_filter(L, self.__mag_window) - R = np.exp(L - AL) - trans.imag = P - trans.real = R - # trans.real = [0 for i in range(len(A))] - S_ = np.exp(trans) - res = np.fft.ifft(S_) - res = np.sqrt(res.real ** 2 + res.imag ** 2) - return res - - # yy = np.fft.fft(values) - # A = yy.real - # P = yy.imag - # V = np.sqrt(A ** 2 + P ** 2) - # eps_index = np.where(V <= EPS)[0] - # V[eps_index] = EPS - # L = np.log(V) - # L[eps_index] = 0 - # residual = np.exp(L - average_filter(L, self.__mag_window)) - # yy.imag = residual * P / V - # yy.real = residual * A / V - # yy.imag[eps_index] = 0 - # yy.real[eps_index] = 0 - # result = np.fft.ifft(yy) - # S = np.sqrt(result.real ** 2 + result.imag ** 2) - # return S - - # trans = np.fft.fft(values) - # mag = np.sqrt(trans.real ** 2 + trans.imag ** 2) - # eps_index = np.where(mag <= EPS)[0] - # mag[eps_index] = EPS - # - # mag_log = np.log(mag) - # mag_log[eps_index] = 0 - # - # spectral = np.exp(mag_log - average_filter(mag_log, n=self.__mag_window)) - # - # trans.real = trans.real * spectral / mag - # trans.imag = trans.imag * spectral / mag - # trans.real[eps_index] = 0 - # trans.imag[eps_index] = 0 - # - # wave_r = np.fft.ifft(trans) - # mag = np.sqrt(wave_r.real ** 2 + wave_r.imag ** 2) - # return mag - - @staticmethod - def predict_next(values): - if len(values) <= 1: - raise ValueError(f'data should contain at least 2 numbers') - - v_last = values[-1] - n = len(values) - - slopes = [(v_last - v) / (n - 1 - i) for i, v in enumerate(values[:-1])] - - return values[1] + sum(slopes) - - @staticmethod - def extend_series(values, extend_num=5, look_ahead=5): - if look_ahead < 1: - raise ValueError('look_ahead must be at least 1') - - extension = [SpectralResidual.predict_next(values[-look_ahead - 2:-1])] * extend_num - return np.concatenate((values, extension), axis=0) - - -if __name__ == '__main__': - series = pd.read_csv('./dataset/sample.csv') - a_detector = SpectralResidual(series[:1000], THRESHOLD, MAG_WINDOW, SCORE_WINDOW, 1000) - res = a_detector.detect() - print(res) - # res['mag'].plot() - res['value'].plot() - plt.show() diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/spectral_residual.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/spectral_residual.py deleted file mode 100644 index f799614df..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/spectral_residual.py +++ /dev/null @@ -1,108 +0,0 @@ -# SR.py -import numpy as np -import scipy as sc -import pandas as pd -import matplotlib.pyplot as plt -from gs_aiops.detector.algorithm.anomal_detect_algorithm.utils import * -from scipy.fftpack import fft, ifft -from gs_aiops.tools import generate_anomal_data -from gs_aiops.detector.algorithm.anomal_detect_algorithm.utils import load_data - - -class SR: - ''' - This module realises a spectral residual method for anomaly detection. - The input data suppose list,np.ndarray and pd.Series - ''' - - def __init__(self, X=np.array([]), slice_window=3, map_window=3, tresh=1): - self.slice_window = slice_window - self.X = getData(X) - self.map_window = map_window - self.thresh = tresh - # raise NotImplementedError - - def run(self): - Smap = self.getSalienceMap(self.X) - result = np.array([1 if i > self.thresh else 0 for i in Smap]) - return result, Smap - - def setdata(self, data): - self.X = getData(data) - - def setslicewindow(self, thresh): - self.slice_window = thresh - - def plot(self): - raise NotImplementedError - - def getSR(self, X): - ''' - 傅里叶变化、残差谱、反傅里叶变化 - ''' - X = getData(X) - - # spectral_residual_transform - yy = fft(X) - A = yy.real - P = yy.imag - V = np.sqrt(A ** 2 + P ** 2) - eps_index = np.where(V <= EPS)[0] - V[eps_index] = EPS - L = np.log(V) - L[eps_index] = 0 - residual = np.exp(L - average_filter(L, self.map_window)) - yy.imag = residual * P / V - yy.real = residual * A / V - yy.imag[eps_index] = 0 - yy.real[eps_index] = 0 - result = ifft(yy) - S = np.sqrt(result.real ** 2 + result.imag ** 2) - # guass filter - return S - - def getSalienceMap(self, X): - Map = self.getSR(self.extendseries(X))[:len(X)] - ave_mag = average_filter(Map, n=self.slice_window) - ave_mag[np.where(ave_mag <= EPS)] = EPS - - return abs(Map - ave_mag) / ave_mag - - def estimate(self, X): - ''' - get k estimated points which is equal to x(n+1) - x(n+1)=x(n-m+1)+m*g - g=sum(g(x(n),x(n-i)))/m - ''' - n = len(X) - gradients = [(X[-1] - v) / (n - 1 - i) for i, v in enumerate(X[:-1])] - # g=np.sum(gradients)/m - return X[1] + np.sum(gradients) - - def extendseries(self, X, k=5): - ''' - use k to extend oringe serie; - ''' - print(X[-k - 2:-1]) - X = np.append(X, self.estimate(X[-k - 2:-1]).repeat(k)) - return X - - - -if __name__ == '__main__': - # data = generate_anomal_data.generate_period_trend_ts() - data = load_data('../../../../anomal_data/art_daily_flatmiddle.csv') - sr = SR(data, tresh=1.5) - res, ma = sr.run() - print(res) - print(len(res)) - plt.subplot(211) - plt.plot(data) - for index, value in enumerate(data): - if res[index] == 1: - plt.scatter(index, value, c='r') - plt.subplot(212) - plt.plot(ma) - plt.show() - # data = np.array([1, 2, 3]) - # print(SR().extendseries(data)) \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/statistics.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/statistics.py deleted file mode 100644 index b8227e050..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/statistics.py +++ /dev/null @@ -1,29 +0,0 @@ -import numpy as np -import matplotlib.pyplot as plt -from statsmodels.tsa.seasonal import seasonal_decompose, STL - - -def generate_period_trend_ts(period=100, repeat=10): - x = np.arange(period * repeat) * (2 * np.pi / period) - noise = np.random.rand(period * repeat) / 3 - ts = np.sin(x) + noise - ts = ts + np.arange(period * repeat) / (period * repeat) - return ts - - -def n_sigma(data): - from statsmodels.datasets import co2 - data = co2.load(True).data - print(data.head()) - data_len = len(data) - data = data.resample('M').mean().ffill() - res = STL(data).fit() - print(type(data)) - print(len(data), len(res.resid), len(res.trend), len(res.seasonal)) - res.plot() - plt.show() - - -if __name__ == '__main__': - data = generate_period_trend_ts() - n_sigma(data) \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/utils.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/utils.py deleted file mode 100644 index 55d1f9abb..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/utils.py +++ /dev/null @@ -1,32 +0,0 @@ -import numpy as np - - -IsAnomaly = "isAnomaly" -AnomalyId = "id" -AnomalyScore = "score" -Value = "value" -Timestamp = "timestamp" -Mag = "mag" -ExpectedValue = "expectedValue" -UpperBoundary = "upperBoundary" -LowerBoundary = "lowerBoundary" - -MAX_RATIO = 0.25 -EPS = 1e-8 -THRESHOLD = 0.3 -MAG_WINDOW = 3 -SCORE_WINDOW = 40 - - -def average_filter(values, n=3): - if n >= len(values): - n = len(values) - - res = np.cumsum(values, dtype=float) - res[n:] = res[n:] - res[:-n] - res[n:] = res[n:] / n - - for i in range(1, n): - res[i] /= (i + 1) - - return res \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/timeseries/decompose.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/timeseries/decompose.py deleted file mode 100644 index 0dca834d6..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/timeseries/decompose.py +++ /dev/null @@ -1,4 +0,0 @@ -class TSDecompose: - def __init__(self): - pass - diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/timeseries/forecast/__init__.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/timeseries/forecast/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/timeseries/forecast/auto_arima.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/timeseries/forecast/auto_arima.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/timeseries/forecast/prophet.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/timeseries/forecast/prophet.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/detector.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/detector.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/metrics/__init__.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/metrics/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/metrics/performance/__init__.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/metrics/performance/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/metrics/resource/__init__.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/metrics/resource/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/metrics/security/__init__.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/metrics/security/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/metrics/workload/__init__.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/metrics/workload/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/slow_sql/__init__.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/slow_sql/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/slow_sql/get_table_and_column.py b/src/gausskernel/dbmind/tools/ai_server/app/monitor/slow_sql/get_table_and_column.py deleted file mode 100644 index 40f25c599..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/app/monitor/slow_sql/get_table_and_column.py +++ /dev/null @@ -1,8 +0,0 @@ -import pglast -from pglast import parsea_sql, Node - - -def get_from_select(sql): - root = Node(parse_sql(sql)) - if root[0].stmt.node_tag == 'SelectStmt': - pass \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/ai_server/common/__init__.py b/src/gausskernel/dbmind/tools/ai_server/common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/common/encrypt_decrypt_handler.py b/src/gausskernel/dbmind/tools/ai_server/common/encrypt_decrypt_handler.py deleted file mode 100644 index 7220f4638..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/common/encrypt_decrypt_handler.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/python3 -# -*- coding:utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : encrypt_decrypt_handler.py -# Version : V1.0.0 -# Date : 2021-03-01 -# Description : aes_cbs_util -############################################################################# -import sys -import hashlib - -try: - from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes - from cryptography.hazmat.backends import default_backend -except Exception as err: - sys.exit(sys.exit("encrypt_decrypt_handler.py: Failed to import module: %s." % str(err))) - - -class AesCbcUtil(object): - @staticmethod - def aes_cbc_decrypt_with_path(path): - with open(path + '/server.key.cipher', 'rb') as f: - cipher_txt = f.read() - with open(path + '/server.key.rand', 'rb') as f: - rand_txt = f.read() - if cipher_txt is None or cipher_txt == "": - return None - server_vector_cipher_vector = cipher_txt[16 + 1:16 + 1 + 16] - # pre shared key rand - server_key_rand = rand_txt[:16] - # worker key - server_decrypt_key = hashlib.pbkdf2_hmac('sha256', server_key_rand, - server_vector_cipher_vector, 10000, - 16) - enc = AesCbcUtil.aes_cbc_decrypt(cipher_txt, server_decrypt_key) - return enc - - @staticmethod - def aes_cbc_decrypt(content, key): - AesCbcUtil.check_content_key(content, key) - if type(key) == str: - key = bytes(key) - iv_len = 16 - # pre shared key iv - iv = content[16 + 1 + 16 + 1:16 + 1 + 16 + 1 + 16] - # pre shared key enctryt - enc_content = content[:iv_len] - backend = default_backend() - cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend) - decrypter = cipher.decryptor() - dec_content = decrypter.update(enc_content) + decrypter.finalize() - dec_content = dec_content.rstrip(b'\x00')[:-1].decode() - return dec_content - - @staticmethod - def check_content_key(content, key): - if not (type(content) == bytes): - raise Exception("content's type must be bytes.") - elif not (type(key) in (bytes, str)): - raise Exception("bytes's type must be in (bytes, str).") - iv_len = 16 - if not (len(content) >= (iv_len + 16)): - raise Exception("content's len must >= (iv_len + 16).") - - @staticmethod - def aes_cbc_decrypt_with_multi(root_path): - """ - decrypt message with multi depth - """ - num = 0 - decrypt_str = "" - while True: - path = root_path + "/key_" + str(num) - part = AesCbcUtil.aes_cbc_decrypt_with_path(path) - if part is None or part == "": - break - elif len(part) < 15: - decrypt_str = decrypt_str + AesCbcUtil.aes_cbc_decrypt_with_path(path) - break - else: - decrypt_str = decrypt_str + AesCbcUtil.aes_cbc_decrypt_with_path(path) - - num = num + 1 - if decrypt_str == "": - return None - return decrypt_str - - - - diff --git a/src/gausskernel/dbmind/tools/ai_server/common/logger.py b/src/gausskernel/dbmind/tools/ai_server/common/logger.py deleted file mode 100644 index 54c8025d5..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/common/logger.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : logger.py -# Version : -# Date : 2021-4-7 -# Description : Logger for project -############################################################################# - -try: - import os - import sys - from configparser import ConfigParser - - sys.path.insert(0, os.path.dirname(__file__)) - from common.utils import Common, CONFIG_PATH -except ImportError as err: - sys.exit("logger.py: Failed to import module: %s." % str(err)) - - -class CreateLogger: - def __init__(self, level, log_name): - self.level = level - self.log_name = log_name - - def create_log(self): - config = ConfigParser() - config.read(CONFIG_PATH) - log_path = os.path.realpath(config.get("log", "log_path")) - if not os.path.isdir(log_path): - os.makedirs(log_path) - - logger = Common.create_logger(level=self.level, - log_name=self.log_name, - log_path=os.path.join(log_path, self.log_name)) - return logger diff --git a/src/gausskernel/dbmind/tools/ai_server/common/utils.py b/src/gausskernel/dbmind/tools/ai_server/common/utils.py deleted file mode 100644 index a68349cba..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/common/utils.py +++ /dev/null @@ -1,341 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : utils.py -# Version : -# Date : 2021-4-7 -# Description : Common Methods -############################################################################# - -import sys -import os -try: - import ssl - import json - import logging - import re - import pwd - import dateutil.parser - from subprocess import Popen, PIPE - from configparser import ConfigParser - from datetime import datetime, timedelta - from logging import handlers - - sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) -except ImportError as err: - sys.exit("utils.py: Failed to import module, \nError: %s" % str(err)) - -CONFIG_PATH = os.path.join(os.path.dirname(__file__), "../dbmind.conf") - - -class Common: - - @staticmethod - def transform_time_string(time_string, mode='timedelta'): - """ - Transform time string to timedelta or second, only support 'weeks(W), days(D), - hours(H), minutes(M), seconds(S) - :param time_string: string, time string like '10S', '20H', '3W'. - :param mode: string, 'timedelta' or 'to_second', 'timedelta' represent transform - time_string to timedelta, 'to_second' represent transform time_string to second. - :return: 'mode' is 'timedelta', return datetime.timedelta; 'mode' is 'to_second', - return int(second). - """ - if mode not in ('timedelta', 'to_second'): - raise ValueError('wrong mode {mode} in time_transfer.'.format(mode=mode)) - - time_prefix, time_suffix = re.match(r'(\d+)?([WDHMS])', time_string).groups() - - if time_suffix is None: - raise ValueError('wrong format {time_string} for time_string in time_transfer.'.format( - time_string=time_string)) - - if time_prefix is None: - time_prefix = 1 - else: - time_prefix = int(time_prefix) - - timedelta_mapper = {'W': timedelta(weeks=1), - 'D': timedelta(days=1), - 'H': timedelta(hours=1), - 'M': timedelta(minutes=1), - 'S': timedelta(seconds=1)} - - second_mapper = {'W': 7 * 24 * 3600, 'D': 24 * 3600, 'H': 3600, 'M': 60, 'S': 1} - - if mode == 'timedelta': - return timedelta_mapper.get(time_suffix) * time_prefix - if mode == 'to_second': - return second_mapper.get(time_suffix) * time_prefix - - @staticmethod - def create_logger(log_name, log_path, level): - """ - Create logger. - :param log_name: string, log name. - :param log_path: string, log path. - :param level: string, log level such as 'INFO', 'WARN', 'ERROR'. - """ - logger = logging.getLogger(log_name) - agent_handler = handlers.RotatingFileHandler(filename=log_path, - maxBytes=1024 * 1024 * 100, - backupCount=5) - agent_handler.setFormatter(logging.Formatter( - "[%(asctime)s %(levelname)s]-[%(filename)s][%(lineno)d]: %(message)s")) - logger.addHandler(agent_handler) - logger.setLevel( - getattr(logging, level.upper()) if hasattr(logging, level.upper()) else logging.INFO) - return logger - - @staticmethod - def unify_byte_unit(byte_info): - """ - Transfer unit of K、M、G、T、P to M - :param byte_info: string, byte information like '100M', '2K', '30G'. - :return: int, bytes size in unit of M, like '400M' -> 400. - """ - byte_info = byte_info.upper() - bytes_prefix, bytes_suffix = re.match(r'^(\d+|\d+\.\d+)([KMGTP])', byte_info).groups() - if bytes_prefix is None or bytes_suffix is None or bytes_suffix not in 'KMGTP': - raise ValueError('can not parse format of {bytes}'.format(bytes=byte_info)) - byte_unit_mapper = {'K': 1 / 1024, 'M': 1, 'G': 1024, 'T': 1024 * 1024, - 'P': 1024 * 1024 * 1024} - return byte_unit_mapper[bytes_suffix] * int(float(bytes_prefix)) - - @staticmethod - def check_certificate(certificate_path): - """ - Check whether the certificate is expired or invalid. - :param certificate_path: path of certificate. - output: dict, check result which include 'check status' and 'check information'. - """ - check_result = {} - certificate_warn_threshold = 365 - child = Popen(['openssl', 'x509', '-in', certificate_path, '-noout', '-dates'], - shell=False, stdout=PIPE, stdin=PIPE) - sub_chan = child.communicate() - if sub_chan[1] or not sub_chan[0]: - check_result['status'] = 'fail' - else: - check_result['status'] = 'success' - not_after = sub_chan[0].decode('utf-8').split('\n')[1].split('=')[1].strip() - end_time = dateutil.parser.parse(not_after).replace(tzinfo=None) - certificate_remaining_days = (end_time - datetime.now()).days - if 0 < certificate_remaining_days < certificate_warn_threshold: - check_result['level'] = 'warn' - check_result['info'] = "the '{certificate}' has {certificate_remaining_days} " \ - "days before out of date." \ - .format(certificate=certificate_path, - certificate_remaining_days=certificate_remaining_days) - elif certificate_remaining_days >= certificate_warn_threshold: - check_result['level'] = 'info' - check_result['info'] = "the '{certificate}' has {certificate_remaining_days} " \ - "days before out of date."\ - .format(certificate=certificate_path, - certificate_remaining_days=certificate_remaining_days) - else: - check_result['level'] = 'error' - check_result['info'] = "the '{certificate}' is out of date." \ - .format(certificate=certificate_path) - return check_result - - @staticmethod - def execute_cmd(cmd, shell=True): - """ - execute cmd - :param cmd: cmd str - :param shell: execute shell mode, True or False - :return: execute result - """ - if not shell: - cmd_list = [cmd.strip() for cmd in cmd.split("|")] - proc = Popen(cmd_list[0].split(), stdout=PIPE, stderr=PIPE, shell=shell) - if len(cmd_list) > 1: - for i in range(1, len(cmd_list)): - proc = Popen(cmd_list[i].split(), stdin=proc.stdout, stdout=PIPE, stderr=PIPE, - shell=shell) - else: - proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=shell) - std, err_msg = proc.communicate() - if proc.returncode != 0: - raise Exception("Failed to execute command: %s, \nError: %s." % (cmd, str(err_msg))) - return std, err_msg - - @staticmethod - def check_ip_and_port(ip, port): - """ - check the ip and port valid or not - """ - if not str(port).isdigit(): - raise Exception("The value type of port is invalid, it must be an integer.") - check_ip_pattern = r"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\." \ - r"(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\." \ - r"(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\." \ - r"(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" - if not re.match(check_ip_pattern, ip): - raise Exception("The host ip is invalid.") - - @staticmethod - def acquire_collection_info(): - """ - Obtain the IP addresses, port and collect data type - :return: {data_type: xxx, ip: xxx, port:xxx} - """ - collect_info = {"data_type": "", "ip": "", "port": ""} - value = Common.parser_config_file("agent", "collection_item") - # value is: '[data_type, host, port]' - if len(json.loads(value)) != 3: - raise ValueError("can not parse format of '{value}'".format(value=value)) - collect_info["data_type"] = json.loads(value)[0] - collect_info["ip"] = json.loads(value)[1] - collect_info["port"] = json.loads(value)[2] - Common.check_ip_and_port(collect_info["ip"], collect_info["port"]) - return collect_info - - @staticmethod - def parser_config_file(section, option, - conf_path=os.path.join(os.path.dirname(__file__), "../dbmind.conf")): - """ - parser the config file - :param option: config option - :param section: config section - :param conf_path: config file path - :return: config value - """ - if not os.path.isfile(conf_path): - raise Exception("The %s does not exists." % conf_path) - config = ConfigParser() - config.read(conf_path) - try: - value = config.get(section, option) - if not value: - raise Exception("The value of %s is empty." % option) - except Exception as err_msg: - raise Exception(str(err_msg)) - return value.lower() - - @staticmethod - def get_proc_pid(ip, port): - get_process_pid_cmd = "netstat -nltp | grep '%s:%s' | awk '{print $7}'" % (ip, port) - std, _ = Common.execute_cmd(get_process_pid_cmd) - if std.decode("utf-8").strip() == "-": - raise Exception("The collected process does not exist.") - # std is: b'PID/Program name\n' - proc_pid = std.decode("utf-8").strip().split("/")[0] - return proc_pid - - @staticmethod - def check_certificate_setting(logger, config_path, mode): - """ - If use https method, it is used for checking whether CA and Agent certificate is valid, - if certificate is not valid, then exit process, otherwise return right context; - if use http method, it skip checking and just return None. - :param mode: agent|server - :param logger: logger object. - :param config_path: string, config path. - :return: if 'https', return certificate context, else return None. - """ - logger.info("Checking certificate setting...") - context = None - config = ConfigParser() - config.read(config_path) - if config.has_option('security', 'tls') and config.getboolean('security', 'tls'): - try: - agent_cert = os.path.realpath(config.get('security', '%s_cert' % mode)) - agent_key = os.path.realpath(config.get('security', '%s_key' % mode)) - ca = os.path.realpath(config.get('security', 'ca')) - except Exception as e: - logger.error(e) - sys.exit(1) - else: - ssl_certificate_status = Common.check_certificate(agent_cert) - ca_certificate_status = Common.check_certificate(ca) - if ssl_certificate_status['status'] == 'fail' \ - or ca_certificate_status['status'] == 'fail': - logger.warn("error occur when check 'certificate'.") - else: - if ssl_certificate_status['level'] == 'error' or \ - ca_certificate_status['level'] == 'error': - logger.error("{ssl_certificate_info}; {ca_certificate_info}".format( - ssl_certificate_info=ssl_certificate_status['info'], - ca_certificate_info=ca_certificate_status['info'])) - sys.exit(1) - else: - logger.warn("{ssl_certificate_info}; {ca_certificate_info}".format( - ssl_certificate_info=ssl_certificate_status['info'], - ca_certificate_info=ca_certificate_status['info'])) - - pw_file = os.path.join(os.path.dirname(config_path), 'certificate/pwf') - from common.encrypt_decrypt_handler import AesCbcUtil - pw = AesCbcUtil.aes_cbc_decrypt_with_multi(pw_file) - - context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=ca) - context.check_hostname = False - context.load_cert_chain(certfile=agent_cert, keyfile=agent_key, password=pw) - logger.info("Successfully checked certificate setting.") - return context - - @staticmethod - def check_proc_exist(proc_name): - """ - check proc exist - :param proc_name: proc name - :return: proc pid - """ - check_proc = "ps ux | grep '%s' | grep -v grep | grep -v nohup | awk \'{print $2}\'" % proc_name - std, _ = Common.execute_cmd(check_proc) - current_pid = str(os.getpid()) - pid_list = [pid for pid in std.decode("utf-8").split("\n") if pid and pid != current_pid] - if not pid_list: - return "" - return " ".join(pid_list) - - -class DBAgent: - def __init__(self, port, host=None, user=None, password=None, database=None): - self.host = host - self.port = port - self.user = user - self.database = database - self.password = password - self.conn = None - self.cursor = None - self.connect() - - def __enter__(self): - self.connect() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() - - def connect(self): - import psycopg2 - self.conn = psycopg2.connect(host=self.host, - user=self.user, - passwd=self.password, - database=self.database, - port=self.port) - self.conn.set_client_encoding('latin9') - self.cursor = self.conn.cursor() - - def fetch_all_result(self, sql, logger): - try: - self.cursor.execute(sql) - result = self.cursor.fetchall() - return result - except Exception as e: - logger.error(str(e)) - - def fetch_one_result(self, sql, logger): - try: - self.cursor.execute(sql) - result = self.cursor.fetchone() - return result - except Exception as e: - logger.error(str(e)) - - def close(self): - self.cursor.close() - self.conn.close() diff --git a/src/gausskernel/dbmind/tools/ai_server/dao/__init__.py b/src/gausskernel/dbmind/tools/ai_server/dao/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/dao/api.py b/src/gausskernel/dbmind/tools/ai_server/dao/api.py deleted file mode 100644 index a41b21d83..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/dao/api.py +++ /dev/null @@ -1,14 +0,0 @@ -from .foreign.mongodb import MongodbHandler -from .foreign.influxdb import InfluxdbHandler -from .local.sqlite import SqliteHandler - - -def get_actuate(actuate, host, port, user): - if actuate == 'mogodb': - pass - elif actuate == 'influxdb': - pass - elif actuate == 'sqlite': - pass - else: - pass diff --git a/src/gausskernel/dbmind/tools/ai_server/dao/foreign/__init__.py b/src/gausskernel/dbmind/tools/ai_server/dao/foreign/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/dao/foreign/influxdb.py b/src/gausskernel/dbmind/tools/ai_server/dao/foreign/influxdb.py deleted file mode 100644 index 7d40450ef..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/dao/foreign/influxdb.py +++ /dev/null @@ -1,2 +0,0 @@ -class InfluxdbHandler: - pass \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/ai_server/dao/foreign/mongodb.py b/src/gausskernel/dbmind/tools/ai_server/dao/foreign/mongodb.py deleted file mode 100644 index dd5d77f03..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/dao/foreign/mongodb.py +++ /dev/null @@ -1,2 +0,0 @@ -class MongodbHandler: - pass diff --git a/src/gausskernel/dbmind/tools/ai_server/dao/local/__init__.py b/src/gausskernel/dbmind/tools/ai_server/dao/local/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/dao/local/lsm_tree.py b/src/gausskernel/dbmind/tools/ai_server/dao/local/lsm_tree.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/dao/local/sqlite.py b/src/gausskernel/dbmind/tools/ai_server/dao/local/sqlite.py deleted file mode 100644 index 45792fde4..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/dao/local/sqlite.py +++ /dev/null @@ -1,21 +0,0 @@ -class SqliteHandler: - - def __init__(self, dbpath): - self.dbpath = dbpath - self.__conn = None - self.__cur = None - - def get_conn(self): - pass - - def create_table(self, sql): - pass - - def insert(self, sql, data): - pass - - def fetchone(self, sql): - pass - - def fetchall(self, sql): - pass diff --git a/src/gausskernel/dbmind/tools/ai_server/dbmind.conf b/src/gausskernel/dbmind/tools/ai_server/dbmind.conf deleted file mode 100644 index 2f0579c70..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/dbmind.conf +++ /dev/null @@ -1,33 +0,0 @@ -[server] -host = -listen_host = -listen_port = -pull_kafka = True - -[database] -name = sqlite -host = 0.0.0.0 -port = 0000 -user = root -size = 175000000 -max_rows = 1000000 -database_path = ./sqlite_data - -[agent] -cluster_name = -collection_type = all -collection_item = [data_type,host,port] -channel_capacity = 1000 -sink_timer_interval = 5S -source_timer_interval = 5S - -[security] -tls = False -ca = ./certificate/ca/ca.crt -server_cert = ./certificate/server/server.crt -server_key = ./certificate/server/server.key -agent_cert = ./certificate/agent/agent.crt -agent_key = ./certificate/agent/agent.key - -[log] -log_path = ./log diff --git a/src/gausskernel/dbmind/tools/ai_server/main.py b/src/gausskernel/dbmind/tools/ai_server/main.py deleted file mode 100644 index 7238aaaaa..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/main.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : main.py -# Version : -# Date : 2021-4-7 -# Description : Function entry file -############################################################################# - -try: - import sys - import os - import argparse - - sys.path.insert(0, os.path.dirname(__file__)) - from common.utils import Common, CONFIG_PATH - from common.logger import CreateLogger -except ImportError as err: - sys.exit("main.py: Failed to import module: %s." % str(err)) - -LOGGER = CreateLogger("debug", "start_service.log").create_log() - -current_dirname = os.path.dirname(os.path.realpath(__file__)) - -__version__ = '1.0.0' -__description__ = 'anomaly_detection: anomaly detection tool.' -__epilog__ = """ -epilog: - the 'a-detection.conf' will be read when the program is running, - the location of them is: - dbmind.conf: {detection}. - """.format(detection=CONFIG_PATH) - - -def usage(): - usage_message = """ - # start service. - python main.py start [--role {{agent,server}}] - # stop service. - python main.py stop [--role {{agent,server}}] - """ - return usage_message - - -def parse_args(): - parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, - description=__description__, - usage=usage(), - epilog=__epilog__) - - parser.add_argument('mode', choices=['start', 'stop']) - parser.add_argument('--role', choices=['agent', 'server'], - help="Run as 'agent', 'server'. " - "notes: ensure the normal operation of the openGauss in agent.") - parser.add_argument('-v', '--version', action='version') - parser.version = __version__ - return parser.parse_args() - - -def manage_service(args): - server_pid_file = os.path.join(current_dirname, './tmp/server.pid') - agent_pid_file = os.path.join(current_dirname, './tmp/agent.pid') - if args.role == 'server': - from service.my_app import MyApp - if args.mode == 'start': - MyApp(server_pid_file, LOGGER).start_service(CONFIG_PATH) - else: - MyApp(server_pid_file, LOGGER).stop_service() - elif args.role == 'agent': - from agent.manage_agent import Agent - if args.mode == 'start': - get_data_path = "ps -ux | grep -v grep | grep gaussdb" - std, _ = Common.execute_cmd(get_data_path) - if not std: - raise Exception("The GaussDb process does not exists, please check it.") - Agent(agent_pid_file, LOGGER).start_agent(CONFIG_PATH) - else: - Agent(agent_pid_file, LOGGER).stop_agent() - else: - print('FATAL: incorrect parameter.') - print(usage()) - return -1 - - -def main(): - args = parse_args() - if args.mode in ('start', 'stop') and args.role: - try: - manage_service(args) - except Exception as err_msg: - print(err_msg) - sys.exit(1) - else: - print("FATAL: incorrect parameter.") - print(usage()) - return -1 - - -if __name__ == '__main__': - main() diff --git a/src/gausskernel/dbmind/tools/ai_server/requirements.txt b/src/gausskernel/dbmind/tools/ai_server/requirements.txt deleted file mode 100644 index 3007e3bea..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -flask -flask_restful -influxdb -pymongo -python-dateutil -cryptography -pytest -pytest-cov -setuptools_rust diff --git a/src/gausskernel/dbmind/tools/ai_server/service/__init__.py b/src/gausskernel/dbmind/tools/ai_server/service/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/service/app.py b/src/gausskernel/dbmind/tools/ai_server/service/app.py deleted file mode 100644 index 6bce2cff6..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/service/app.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : app.py -# Version : -# Date : 2021-4-7 -# Description : -############################################################################# - - -class App: - """ - This is father class for app. - """ - - def __init__(self): - pass - - def add_resources(self): - pass - - # def init_database(self): - # pass - - def start_service(self, *args, **kwargs): - pass diff --git a/src/gausskernel/dbmind/tools/ai_server/service/datafactory/__init__.py b/src/gausskernel/dbmind/tools/ai_server/service/datafactory/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/service/datafactory/collector/__init__.py b/src/gausskernel/dbmind/tools/ai_server/service/datafactory/collector/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/service/datafactory/collector/agent_collect.py b/src/gausskernel/dbmind/tools/ai_server/service/datafactory/collector/agent_collect.py deleted file mode 100644 index 1b880bf57..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/service/datafactory/collector/agent_collect.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : agent_collect.py -# Version : -# Date : 2021-4-7 -# Description : Receives and stores agent data. -############################################################################# - -try: - import sys - import os - from flask import request, Response - from flask_restful import Resource - - sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../../")) - from common.logger import CreateLogger - from service.datafactory.storage.insert_data_to_database import SaveData -except ImportError as err: - sys.exit("agent_collect.py: Failed to import module: %s." % str(err)) - -LOGGER = CreateLogger("debug", "server.log").create_log() - - -class ResponseTuple: - """ - This class is used for generating a response tuple. - """ - - @staticmethod - def success(result=None): - if result is None: - return {"status": "success"}, 200 - - return {"status": "success", "result": result} - - @staticmethod - def error(msg="", status_code=400): - return {"status": "error", "msg": msg}, status_code - - -class Source(Resource): - """ - This class is used for acquiring metric data from agent and save data - in sqlite database. - """ - - def __init__(self): - pass - - @staticmethod - def post(): - content = request.json - client_ip = request.remote_addr - LOGGER.info("Successfully received request from: %s." % client_ip) - try: - insert_db = SaveData(LOGGER) - insert_db.run(content) - return ResponseTuple.success() - except Exception as e: - return ResponseTuple.error(msg=str(e), status_code=Response.status_code) - - @staticmethod - def get(): - return ResponseTuple.success(result="Server service is normal.") - - @staticmethod - def delete(): - return ResponseTuple.error(status_code=400) diff --git a/src/gausskernel/dbmind/tools/ai_server/service/datafactory/collector/kafka_collector.py b/src/gausskernel/dbmind/tools/ai_server/service/datafactory/collector/kafka_collector.py deleted file mode 100644 index 2b1cf39a2..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/service/datafactory/collector/kafka_collector.py +++ /dev/null @@ -1,2 +0,0 @@ -class CollectFromKafka: - pass \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/ai_server/service/datafactory/storage/__init__.py b/src/gausskernel/dbmind/tools/ai_server/service/datafactory/storage/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/ai_server/service/datafactory/storage/insert_data_to_database.py b/src/gausskernel/dbmind/tools/ai_server/service/datafactory/storage/insert_data_to_database.py deleted file mode 100644 index a45595150..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/service/datafactory/storage/insert_data_to_database.py +++ /dev/null @@ -1,211 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : insert_data_to_database.py -# Version : -# Date : 2021-4-7 -# Description : Inserting data into the database -############################################################################# - -try: - import sys - import os - import pymongo - import sqlite3 - from influxdb import InfluxDBClient - - sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../../")) - from common.utils import Common -except ImportError as err: - sys.exit("insert_data_database.py: Failed to import module: %s." % str(err)) - - -class SaveData: - def __init__(self, logger): - self.logger = logger - try: - self.host = Common.parser_config_file("database", "host") - self.port = Common.parser_config_file("database", "port") - self.user = Common.parser_config_file("database", "user") - except Exception as err_msg: - logger.error(err_msg) - raise Exception(err_msg) - self.pwd = "" - self.collection_mapping = {"os": ["OSExporter"], - "database": ["DatabaseExporter"], - "all": ["OSExporter", "DatabaseExporter"] - } - - def insert_data(self, *args): - pass - - def run(self, agent_data="", kafka_data=""): - database = Common.parser_config_file("database", "name") - if database.lower() == "mongodb": - MongoDb(self.logger).insert_data(agent_data, kafka_data) - elif database == "influxdb": - InfluxDb(self.logger).insert_data(agent_data, kafka_data) - elif database == "sqlite": - Sqlite(self.logger).insert_data(agent_data, kafka_data) - - -class MongoDb(SaveData): - def __init__(self, logger): - super(MongoDb, self).__init__(logger) - - def insert_data(self, *args): - """ - insert data to database - :param args: args[0] is agent data, args[1] is kafka data - :return: None - """ - data_list = [data for data in args if data] - client = None - max_size = Common.parser_config_file("database", "size") - max_rows = Common.parser_config_file("database", "max_rows") - try: - max_size = int(max_size) if max_size else None - max_rows = int(max_rows) if max_rows else None - self.logger.info("Start to insert data: %s." % data_list) - agent_collection_type = Common.parser_config_file("agent", "collection_type") - # connect MongoDB - client = pymongo.MongoClient(host=self.host, port=int(self.port)) - for data in data_list: - # create and use db - my_db = client[data["database"]] - if self.pwd: - my_db.authenticate(self.user, self.pwd) - coll_list = my_db.list_collection_names() - for collection_type in self.collection_mapping[agent_collection_type]: - if not data.get(collection_type, None): - self.logger.warn("The %s data does not exist." % collection_type) - continue - # create collection if collection does not exist - if collection_type not in coll_list: - my_set = my_db.create_collection(collection_type, - capped=True, size=max_size, max=max_rows) - # get collection if collection exist - else: - my_set = my_db.get_collection(collection_type) - # insert data - my_set.insert_one(data[collection_type]) - self.logger.info("Successfully insert data.\n%s" % ("-" * 90)) - except Exception as err_msg: - self.logger.error(err_msg) - raise Exception(err_msg) - finally: - if client: - client.close() - - -class InfluxDb(SaveData): - def __init__(self, logger): - super(InfluxDb, self).__init__(logger) - - def insert_data(self, *args): - """ - insert data to database - :param args: args[0] is agent data, args[1] is kafka data - :return: None - """ - data_list = [data for data in args if data] - client = None - try: - self.logger.info("Start to insert data:%s." % data_list) - agent_collection_type = Common.parser_config_file("agent", "collection_type") - # connect influxDb - if self.pwd: - client = InfluxDBClient(host=self.host, port=int(self.port), - username=self.user, password=self.pwd) - else: - client = InfluxDBClient(host=self.host, port=int(self.port)) - db_list = client.get_list_database() - for data in data_list: - db_name = data["database"] - if db_name not in db_list: - # create database if database does not exist - client.create_database(db_name) - # Create Data Retention Policy - client.create_retention_policy("there_days", "3d", "1", db_name, True) - - for collection_type in self.collection_mapping[agent_collection_type]: - if not data.get(collection_type, None): - self.logger.warn("The %s data does not exist." % collection_type) - continue - json_body = [ - { - "measurement": collection_type, - "fields": data[collection_type] - } - ] - client.write_points(json_body, database=db_name) - self.logger.info("Successfully insert data.\n%s" % ("-" * 90)) - except Exception as err_msg: - self.logger.error(err_msg) - raise Exception(err_msg) - finally: - if client: - client.close() - - -class Sqlite(SaveData): - def __init__(self, logger): - super(Sqlite, self).__init__(logger) - - def insert_data(self, *args): - """ - insert data to database - :param args: args[0] is agent data, args[1] is kafka data - :return: None - """ - data_list = [data for data in args if data] - cursor = None - conn = None - try: - self.logger.info("Start to insert data:%s." % data_list) - database_path = Common.parser_config_file("database", "database_path") - agent_collection_type = Common.parser_config_file("agent", "collection_type") - if not os.path.isdir(os.path.realpath(database_path)): - os.makedirs(os.path.realpath(database_path), mode=0o700) - for data in data_list: - # connect or create db - conn = sqlite3.connect(os.path.realpath(os.path.join(database_path, - data["database"] + ".db"))) - self.logger.info("Successfully connect: %s." % data["database"] + "db") - - # get cursor - cursor = conn.cursor() - for collection_type in self.collection_mapping[agent_collection_type]: - if not data.get(collection_type, None): - self.logger.warn("The %s data does not exist." % collection_type) - continue - columns = [] - values = [] - # get table columns and values - for key, value in data[collection_type].items(): - columns.append("%s text" % key) - values.append("'" + str(value) + "'") - - # create table - create_tb_sql = "CREATE TABLE IF NOT EXISTS %s (%s);" % (collection_type, - ", ".join(columns)) - self.logger.info("Command for create table: %s" % create_tb_sql) - cursor.execute(create_tb_sql) - self.logger.info("Successfully create table: %s." % collection_type) - - # insert data to table - insert_data_sql = "insert into %s values (%s);" % (collection_type, - ", ".join(values)) - self.logger.info("Command for insert data: %s" % insert_data_sql) - cursor.execute(insert_data_sql) - conn.commit() - self.logger.info("Successfully insert data.\n%s" % ("-" * 90)) - except Exception as err_msg: - self.logger.error("Failed to insert data to sqlite, \nError: %s" % str(err_msg)) - self.logger.info("Rolling back...") - conn.rollback() - self.logger.info("Successfully rollback.\n") - raise Exception(str(err_msg)) - finally: - cursor.close() diff --git a/src/gausskernel/dbmind/tools/ai_server/service/my_app.py b/src/gausskernel/dbmind/tools/ai_server/service/my_app.py deleted file mode 100644 index f2902c9d6..000000000 --- a/src/gausskernel/dbmind/tools/ai_server/service/my_app.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -############################################################################# -# Copyright (c): 2021, Huawei Tech. Co., Ltd. -# FileName : my_app.py -# Version : -# Date : 2021-4-7 -# Description : Server Service Management -############################################################################# - -try: - import sys - import os - import signal - from flask import Flask - from flask_restful import Api - from configparser import ConfigParser - - sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../..")) - from common.utils import Common - from service.app import App - from service.datafactory.collector.agent_collect import Source -except ImportError as err: - sys.exit("my_app.py: Failed to import module, \nError: %s" % str(err)) - - -class MyApp(App): - - def __init__(self, pid_file, logger): - App.__init__(self) - self.app = None - self.api = None - self.pid_file = pid_file - self.logger = logger - - def initialize_app(self): - self.app = Flask(__name__) - self.app.config['debug'] = False - self.api = Api(self.app) - - def add_resource(self): - self.api.add_resource(Source, '/sink') - - def start_service(self, config_path): - # check service is running or not. - if os.path.isfile(self.pid_file): - pid = Common.check_proc_exist("role server") - if pid: - raise Exception("Error: Process already running, can't start again.") - else: - os.remove(self.pid_file) - # check config file exists - if not os.path.isfile(config_path): - raise Exception("Config file: %s does not exists." % config_path) - # get listen host and port - config = ConfigParser() - config.read(config_path) - listen_host = config.get("server", "listen_host") - port = config.get("server", "listen_port") - # write process pid to file - if not os.path.isdir(os.path.dirname(self.pid_file)): - os.makedirs(os.path.dirname(self.pid_file), 0o700) - with open(self.pid_file, mode='w') as f: - f.write(str(os.getpid())) - # start service - self.initialize_app() - self.add_resource() - try: - context = Common.check_certificate_setting(self.logger, config_path, "server") - self.logger.info("Start service...") - self.app.run(host=listen_host, port=int(port), ssl_context=context) - self.logger.warn("Service stopped, please check main.log for more information.") - except (Exception, KeyboardInterrupt) as err_msg: - self.logger.error(str(err_msg)) - raise Exception(err_msg) - finally: - if os.path.isfile(self.pid_file): - os.remove(self.pid_file) - - def stop_service(self): - try: - if not os.path.isfile(self.pid_file): - std = Common.check_proc_exist("role server") - if not std: - raise Exception("ERROR: Process not running.") - else: - kill_proc = "kill -9 %s" % std - Common.execute_cmd(kill_proc) - else: - with open(self.pid_file, mode='r') as f: - pid = int(f.read()) - os.kill(pid, signal.SIGTERM) - os.remove(self.pid_file) - self.logger.info("Successfully stopped server.") - except Exception as err_msg: - self.logger.error("Failed to stop service, Error: %s" % str(err_msg)) - sys.stdout.write("Error: " + str(err_msg) + "\n") - if os.path.isfile(self.pid_file): - os.remove(self.pid_file) diff --git a/src/gausskernel/dbmind/tools/ai_server/setup.py b/src/gausskernel/dbmind/tools/ai_server/setup.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/README.md b/src/gausskernel/dbmind/tools/anomaly_detection/README.md deleted file mode 100644 index efc62bcc3..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/README.md +++ /dev/null @@ -1,108 +0,0 @@ -![structure](structure.png) - -## Introduction to anomaly_detection - -**anomaly_detection** is a monitor、 anomaly detection and slow SQL RCA(Root Cause Analysis) tool based on timeseries-forecast algorithm aim at openGauss -metrics, such as IO-Read、IO-Write、CPU-Usage、Memory-Usage、IO-Wait、disk_space、QPS、parts of key GUC parameter(work_mem、shared_buffers、max_connections) and information about slow SQL in the database WDR reporter. anomaly_detection can monitor multi-metric at same -time, and forecast trend of metric in future, if the forecast value in future is beyond the specified scope, it can -notify user timely. - -anomaly_detection is composed of two element, **agent** and **detector**, **agent** is deployed on same machine with openGauss, -**detector** can be divided into **collector** and **monitor**, and it is deployed on any machine which can correspond with agent by _http_ or _https_, default method is __http__, -for security reason, we suggest to use _https_. - -## anomaly_detection Installition - -we suggest to use _anaconda_ to manage your python environment. - -**agent** - - python3.6+ - python-dateutil - configparse - prettytable - -**detector** - - sqlparse - python3.6+ - python-dateutil - pandas - flask - flask_restful - configparse - prettytable - pmdarima - -notes: - -using ```python -m pip install --upgrade pip``` to upgrade your pip. - -you can use ```pip install -r requirements.txt``` to configure the environment. - -if you want to use `fprophet`, it is recommended to use ```conda install fbprophet``` -to install `fbprophet`. - - -##Parameter explanation - -use ```python main.py --help``` to get help: - - usage: - python main.py start [--role {{agent,collector,monitor}}] # start local service. - python main.py stop [--role {{agent,collector,monitor}}] # stop local service. - python main.py start [--user USER] [--host HOST] [--project-path PROJECT_PATH] [--role {{agent,collector,monitor}}] - # start the remote service. - python main.py stop [--user USER] [--host HOST] [--project-path PROJECT_PATH] [--role {{agent,collector, - monitor}}] # stop the remote service. - python main.py deploy [--user USER] [--host HOST] [--project-path PROJECT_PATH] # deploy project in remote host. - python main.py diagnosis [--query] [--start_time] [--finish_time] # rca for slow SQL. - python main.py show_metrics # display all monitored metrics(can only be executed on 'detector' machine). - python main.py forecast [--metric-name METRIC_NAME] [--forecast-periods FORECAST_PERIODS] - [--forecast-method {{auto_arima, fbprophet}}] [--save-path SAVE_PATH] # forecast future trend of - metric(can only be executed on 'detector' machine). - - Anomaly-detection: a time series forecast and anomaly detection tool. - - positional arguments: - {start,stop,deploy,show_metrics,forecast,diagnosis} - - optional arguments: - -h, --help show this help message and exit - --user USER User of the remote server. - --host HOST IP of the remote server. - --project-path PROJECT_PATH - Project location in remote server. - --role {agent,collector,monitor} - Run as 'agent', 'collector', 'monitor'. Notes: ensure - the normal operation of the openGauss in agent. - --metric-name METRIC_NAME - Metric name to be predicted if this parameter is not - provided, all metrics in the database will be predicted. - --query QUERY target SQL for RCA - --start_time START_TIME - start time of the query - --finish_time FINISH_TIME - finish time of the query - --period PERIOD Forecast periods of metric, it should be integernotes: - the specific value should be determined to the - training data. if this parameter is not provided, the - default value '100S' will be used. - --freq FREQ forecast gap, time unit: S: Second, M: Minute, H: - Hour, D: Day, W: Week. - --forecast-method FORECAST_METHOD - Forecast method, default method is 'auto_arima',if - want to use 'fbprophet', you should install fbprophet - first. - --save-path SAVE_PATH - Save the results to this path using CSV format, if - this parameter is not provided, the result will not be - saved. - -v, --version show the program's version number and exit - - -## Introduction to a-detection.conf - -the config is divided into section: `database`, `server`, `agent`, `forecast`, `log`, `security`. - -in all sections in the config, the `log` and `security` section is public, in addition to this, ` diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/a-detection.conf b/src/gausskernel/dbmind/tools/anomaly_detection/a-detection.conf deleted file mode 100644 index 0ba2c1980..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/a-detection.conf +++ /dev/null @@ -1,45 +0,0 @@ -[database] -# data retention time -storage_duration = 12H -# database directory -database_dir = ./data - -[security] -tls = False -ca = ./certificate/ca/ca.crt -server_cert = ./certificate/server/server.crt -server_key = ./certificate/server/server.key -agent_cert = ./certificate/agent/agent.crt -agent_key = ./certificate/agent/agent.key - -[server] -# ip of server -host = 0.0.0.0 -listen_host = 0.0.0.0 -listen_port = 8080 -# white list of agent -white_host = 0.0.0.0 -# white port of agent -white_port = 8080 - -[agent] -# data collection frequency -source_timer_interval = 25S -# data transmission frequency -sink_timer_interval = 25S -# buffer queue capacity -channel_capacity = 1000 -# ip of agent node -db_host = 0.0.0.0 -# port of agent node -db_port = 8080 -# node type of agent -db_type = single - -[forecast] -# time series forecast algorithm -forecast_alg = auto_arima - -[log] -# log location -log_dir = ./log diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/agent/__init__.py b/src/gausskernel/dbmind/tools/anomaly_detection/agent/__init__.py deleted file mode 100644 index ab2c74ff3..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/agent/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import logging -import os -from logging import handlers - -import config - -agent_logger = logging.getLogger('agent') -log_dir_realpath = os.path.realpath(config.get('log', 'log_dir')) -if not os.path.exists(log_dir_realpath): - os.makedirs(log_dir_realpath) - -agent_handler = handlers.RotatingFileHandler(filename=os.path.join(log_dir_realpath, 'agent.log'), - maxBytes=1024 * 1024 * 100, - backupCount=5) -agent_handler.setFormatter(logging.Formatter("[%(asctime)s %(levelname)s]-[%(name)s]: %(message)s")) -agent_logger.addHandler(agent_handler) -agent_logger.setLevel(logging.INFO) diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/agent/channel.py b/src/gausskernel/dbmind/tools/anomaly_detection/agent/channel.py deleted file mode 100644 index 939f68a8f..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/agent/channel.py +++ /dev/null @@ -1,69 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import logging -from queue import Queue, Empty, Full - -agent_logger = logging.getLogger('agent') - - -class Channel: - """ - This is parent class of buffer channel, which acts as a buffer - middleware between Source and Sink. - """ - - def __init__(self): - pass - - def put(self, event): - pass - - def take(self): - pass - - def size(self): - pass - - -class MemoryChannel(Channel): - """ - This class inherits from Channel, which is used to buffer data in memory. - """ - - def __init__(self, maxsize=None): - """ - :param maxsize: int, maxsize of channel - """ - Channel.__init__(self) - self.maxsize = maxsize - self.memory = Queue(maxsize) - - def put(self, event): - if self.maxsize and self.size() > self.maxsize: - agent_logger.warning("Channel has reached max size.") - try: - self.memory.put(event, block=True, timeout=0.2) - except Full: - agent_logger.warning("Throw away new data due to the channel reaching max size.") - - def take(self): - try: - return self.memory.get_nowait() - except Empty: - return None - - def size(self): - rv = self.memory.qsize() - return 0 if rv is None else rv diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/agent/db_source.py b/src/gausskernel/dbmind/tools/anomaly_detection/agent/db_source.py deleted file mode 100644 index 34d5581e7..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/agent/db_source.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import sys -import os -import logging -import threading -import time -import signal - -from .source import Source - -agent_logger = logging.getLogger('agent') - - -class DBSource(Source, threading.Thread): - """ - This class inherits the threading.Thread, it is used for managing single task. - """ - - def __init__(self, interval): - Source.__init__(self) - threading.Thread.__init__(self) - self._finished = threading.Event() - self._tasks = {} - self._interval = interval - - def add_task(self, name, task): - if name not in self._tasks: - self._tasks[name] = task - - def run(self): - while not self._finished.is_set(): - try: - content = {'timestamp': int(time.time())} - # All tasks are executed serially. - for task_name, task_handler in self._tasks.items(): - value = task_handler.output() - content.update(**{task_name: value}) - self._channel.put(content) - except Exception as e: - agent_logger.error(e) - process_id = os.getpid() - os.kill(process_id, signal.SIGTERM) - self._finished.wait(self._interval) - - def cancel(self): - self._finished.set() diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/agent/metric_agent.py b/src/gausskernel/dbmind/tools/anomaly_detection/agent/metric_agent.py deleted file mode 100644 index e32f0f797..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/agent/metric_agent.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import logging -import os -import signal - -import config -import global_vars -from cert import get_agent_ssl_context -from task import database_exporter, os_exporter, wdr -from utils import TimeString -from utils import getpasswd -from deamon import handle_sigterm -from .channel import MemoryChannel -from .db_source import DBSource -from .sink import HttpSink - -agent_logger = logging.getLogger('agent') - - -def _extract_params(): - """ - Check if the agent parameter is valid, if the parameter is valid, - then return parameters dict, otherwise exit process. - :return: agent parameters dict. - """ - params = {} - - host = config.get('server', 'host') - listen_port = config.get('server', 'listen_port') - params['host'] = host - params['listen_port'] = listen_port - - default_params = {'sink_timer_interval': '10S', - 'source_timer_interval': '10S', - 'channel_capacity': 1000} - for parameter, default in default_params.items(): - try: - if parameter == 'channel_capacity': - agent_parameter_value = config.getint('agent', parameter) - params[parameter] = agent_parameter_value - else: - agent_parameter_value = config.get('agent', parameter) - params[parameter] = TimeString(agent_parameter_value).to_second() - except Exception as e: - agent_logger.error("An error ({error}) occurs when acquiring {parameter}," - " using default value: {default_value}." - .format(parameter=parameter, - error=e, - default_value=default)) - params[parameter] = default_params[parameter] - params['db_host'] = config.get('agent', 'db_host') - params['db_port'] = config.get('agent', 'db_port') - params['db_type'] = config.get('agent', 'db_type') - - # Https configures. - if config.has_option('security', 'tls') and config.getboolean('security', 'tls'): - params['agent_cert'] = os.path.realpath(config.get('security', 'agent_cert')) - params['agent_key'] = os.path.realpath(config.get('security', 'agent_key')) - params['ca'] = os.path.realpath(config.get('security', 'ca')) - pwd_path = os.path.dirname(params['agent_cert']) - params['cert_pwd'] = getpasswd(pwd_path) - - return params - - -def agent_main(): - """ - The main entrance of the agent service. - """ - signal.signal(signal.SIGTERM, handle_sigterm) - try: - params = _extract_params() - context = get_agent_ssl_context(params) - if context: - protocol = 'https' - else: - protocol = 'http' - - url = '%s://%s:%s/sink' % (protocol, params['host'], params['listen_port']) - chan = MemoryChannel(maxsize=params['channel_capacity']) - source = DBSource(interval=params['source_timer_interval']) - source.setDaemon(True) - http_sink = HttpSink(interval=params['sink_timer_interval'], - url=url, - context=context, - db_host=params['db_host'], - db_port=params['db_port'], - db_type=params['db_type']) - source.channel = chan - http_sink.channel = chan - - source.add_task(name=database_exporter.DatabaseExporter.__tablename__, - task=database_exporter.DatabaseExporter(db_port=params['db_port'])) - source.add_task(name=os_exporter.OSExporter.__tablename__, task=os_exporter.OSExporter(db_port=params['db_port'])) - source.add_task(name=wdr.WDR.__tablename__, task=wdr.WDR(db_port=params['db_port'], db_type=params['db_type'])) - - source.start() - http_sink.start() - except Exception as e: - agent_logger.error(e) - diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/agent/sink.py b/src/gausskernel/dbmind/tools/anomaly_detection/agent/sink.py deleted file mode 100644 index 811318ced..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/agent/sink.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import json -import logging -import time -from urllib import request - -_JSON_HEADER = {'Content-Type': 'application/json'} -agent_logger = logging.getLogger('agent') - - -class Sink: - """ - This is parent class which is used for getting data from ChannelManager object and - sending data at a specified time interval. - """ - - def __init__(self): - self._channel = None - self.running = False - - @property - def channel(self): - return self._channel - - @channel.setter - def channel(self, channel): - self._channel = channel - - def process(self): - pass - - def start(self): - self.running = True - self.process() - - def stop(self): - self.running = False - - -class HttpSink(Sink): - """ - This class inherits from Sink and use to send data to server based on http/https - method at a specified time interval. - """ - - def __init__(self, interval, url, context, db_host, db_port, db_type): - """ - :param interval: int, time interval when send data. - :param url: string, http/https url. - :param context: certificate context for https method. - """ - Sink.__init__(self) - self._interval = interval - self.running = False - self._url = url - self.context = context - self.db_host = db_host - self.db_port = db_port - self.db_type = db_type - - def process(self): - agent_logger.info('Begin send data to {url}.'.format(url=self._url)) - while self.running: - contents = self._channel.take() - if not contents: - time.sleep(0.5) - continue - - contents.update(**{'flag': {'host': self.db_host, 'port': self.db_port, 'type': self.db_type}}) - retry_times = 5 - while retry_times: - try: - req = request.Request(self._url, headers=_JSON_HEADER, - data=json.dumps(contents).encode('utf-8'), - method='POST') - request.urlopen(req, context=self.context) - break - except Exception as e: - agent_logger.error("{error}, retry...".format(error=str(e))) - retry_times -= 1 - if not retry_times: - raise - time.sleep(1.0) - time.sleep(self._interval) diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/agent/source.py b/src/gausskernel/dbmind/tools/anomaly_detection/agent/source.py deleted file mode 100644 index 7d8090088..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/agent/source.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" - - -class Source: - """ - This is parent class which is used for acquiring - multiple metric data at the same time. - """ - - def __init__(self): - self._channel = None - - @property - def channel(self): - return self._channel - - @channel.setter - def channel(self, channel): - self._channel = channel diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/bin/common.sh b/src/gausskernel/dbmind/tools/anomaly_detection/bin/common.sh deleted file mode 100644 index 0011b13e7..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/bin/common.sh +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) 2020 Huawei Technologies Co.,Ltd. -# -# openGauss is licensed under Mulan PSL v2. -# You can use this software according to the terms and conditions of the Mulan PSL v2. -# You may obtain a copy of Mulan PSL v2 at: -# -# http://license.coscl.org.cn/MulanPSL2 -# -# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -# See the Mulan PSL v2 for more details. -#------------------------------------------------------------------------- -# -# common.sh -# common info of A-Detection -# -#------------------------------------------------------------------------- - -CURRENT_DIR=$(cd ../$(dirname $0); pwd) -PROJECT_NAME=$(basename ${CURRENT_DIR}) -SSH_PORT=22 - - -function send_ssh_command_without_pwd() -{ - local host=$1 - local user=$2 - local password=$3 - local port=$4 - local path=$5 - local cmd=$6 - -expect <<-EOF - spawn ssh -o StrictHostKeyChecking=no ${host} -p ${port} -l ${user} - expect { - "*assword:" { - send "${password}\r"; - expect { - "*denied*" {exit 2;} - eof - } - } - eof {exit 1;} - } - send "\r" - expect "*]*" - send "cd ${path}\r" - expect "*]*" - send "${cmd}\r" - expect "*]*" - send "exit $?\r" - expect eof - - catch wait result; - exit [lindex \$result 3] -EOF - - return $? -} - - -function send_scp_command_without_pwd() -{ - local host=$1 - local user=$2 - local password=$3 - local dest_path=$4 - -expect <<-EOF - spawn scp -o StrictHostKeyChecking=no -r ${CURRENT_DIR} ${user}@${host}:${dest_path} - expect { - "*assword:" { - send "${password}\r"; - expect { - "*denied*" {exit 2;} - eof - } - } - } - - catch wait result; - exit [lindex \$result 3] -EOF - return $? -} - -function send_ssh_command() { - local host=$1 - local user=$2 - local port=$3 - local path=$4 - local cmd=$5 - - ssh -o StrictHostKeyChecking=no ${host} -p ${port} -l ${user} <<-EOF - cd ${path}; - ${cmd} -EOF - - return $? -} - -function send_scp_command() { - local host=$1 - local user=$2 - local dest_path=$3 - - scp -o StrictHostKeyChecking=no -r ${CURRENT_DIR} ${user}@${host}:${dest_path} - - return $? -} diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/bin/start.sh b/src/gausskernel/dbmind/tools/anomaly_detection/bin/start.sh deleted file mode 100644 index 09dbf1d3c..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/bin/start.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash -# Copyright (c) 2020 Huawei Technologies Co.,Ltd. -# -# openGauss is licensed under Mulan PSL v2. -# You can use this software according to the terms and conditions of the Mulan PSL v2. -# You may obtain a copy of Mulan PSL v2 at: -# -# http://license.coscl.org.cn/MulanPSL2 -# -# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -# See the Mulan PSL v2 for more details. -#------------------------------------------------------------------------- -# -# start.sh -# start script of A-Detection -# -#------------------------------------------------------------------------- -source ./common.sh - - -function usage() -{ - echo "usage: $0 [option] - --help - --deploy [host] [user] [location] - --start_local_service [role {agent,server,monitor}] - --start_remote_service [host] [user] [project_path] [role {agent,server,monitor}] - " -} - - -function start_local_service() -{ - local role=$1 - - cd ${CURRENT_DIR} - python main.py start --role ${role} - return $? -} - - -function main() -{ - if [ $# -eq 0 ]; then - usage - exit 1 - fi - - case "$1" in - --help) - usage - exit 0 - ;; - --start_local_service) - start_local_service $2 - exit $? - ;; - --start_remote_service) - send_ssh_command $2 $3 $SSH_PORT $4/${PROJECT_NAME} "python main.py start --role $5" - exit $? - ;; - --deploy) - send_scp_command $2 $3 $4 - exit $? - ;; - *) - echo "Unknown arguments" - exit 1 - ;; - esac -} - - -main $@ diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/bin/stop.sh b/src/gausskernel/dbmind/tools/anomaly_detection/bin/stop.sh deleted file mode 100644 index 8d0a94527..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/bin/stop.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash -# Copyright (c) 2020 Huawei Technologies Co.,Ltd. -# -# openGauss is licensed under Mulan PSL v2. -# You can use this software according to the terms and conditions of the Mulan PSL v2. -# You may obtain a copy of Mulan PSL v2 at: -# -# http://license.coscl.org.cn/MulanPSL2 -# -# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -# See the Mulan PSL v2 for more details. -#------------------------------------------------------------------------- -# -# stop.sh -# stop script of A-Detection -# -#------------------------------------------------------------------------- -source ./common.sh - - -function usage() -{ - echo "usage: $0 [option] - --help - --stop_local_service [role, {agent,server,monitor}] - --stop_remote_service [host] [user] [project_path] [role, {agent,server,monitor}] - " -} - - -function stop_local_service() -{ - local role=$1 - - cd ${CURRENT_DIR} - python main.py stop --role ${role} - return $? -} - - -function main() -{ - if [ $# -eq 0 ]; then - usage - exit 1 - fi - - case "$1" in - --help) - usage - exit 0 - ;; - --stop_local_service) - stop_local_service $2 - exit $? - ;; - --stop_remote_service) - send_ssh_command $2 $3 $SSH_PORT $4/${PROJECT_NAME} "python main.py stop --role $5" - exit $? - ;; - *) - echo "Unknown arguments" - exit 1 - ;; - esac -} - - -main $@ diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/cert.py b/src/gausskernel/dbmind/tools/anomaly_detection/cert.py deleted file mode 100644 index 10ce618bd..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/cert.py +++ /dev/null @@ -1,113 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" - -import logging -import ssl -import subprocess -from datetime import datetime - -import dateutil.parser - - -def check_certificate(certificate_path): - """ - Check whether the certificate is expired or invalid. - :param certificate_path: path of certificate. - output: dict, check result which include 'check status' and 'check information'. - """ - check_result = {} - certificate_warn_threshold = 365 - child = subprocess.Popen(['openssl', 'x509', '-in', certificate_path, '-noout', '-dates'], - shell=False, stdout=subprocess.PIPE, stdin=subprocess.PIPE) - sub_chan = child.communicate() - if sub_chan[1] or not sub_chan[0]: - check_result['status'] = 'fail' - else: - check_result['status'] = 'success' - not_after = sub_chan[0].decode('utf-8').split('\n')[1].split('=')[1].strip() - end_time = dateutil.parser.parse(not_after).replace(tzinfo=None) - certificate_remaining_days = (end_time - datetime.now()).days - if 0 < certificate_remaining_days < certificate_warn_threshold: - check_result['level'] = 'warn' - check_result['info'] = "the '{certificate}' has {certificate_remaining_days} days before out of date." \ - .format(certificate=certificate_path, - certificate_remaining_days=certificate_remaining_days) - elif certificate_remaining_days >= certificate_warn_threshold: - check_result['level'] = 'info' - check_result['info'] = "the '{certificate}' has {certificate_remaining_days} days before out of date." \ - .format(certificate=certificate_path, - certificate_remaining_days=certificate_remaining_days) - else: - check_result['level'] = 'error' - check_result['info'] = "the '{certificate}' is out of date.".format(certificate=certificate_path) - return check_result - - -def get_agent_ssl_context(params): - context = None - if {'agent_cert', 'agent_key', 'ca', 'cert_pwd'}.issubset(set(params.keys())): - cert = params['agent_cert'] - key = params['agent_key'] - pwd = params['cert_pwd'] - ca = params['ca'] - ssl_certificate_status = check_certificate(cert) - ca_certificate_status = check_certificate(ca) - if ssl_certificate_status['status'] == 'fail' or ca_certificate_status['status'] == 'fail': - logging.error("error occur when check 'certificate'.") - return - else: - if ssl_certificate_status['level'] == 'error' or ca_certificate_status['level'] == 'error': - logging.error("{ssl_certificate_info}; {ca_certificate_info}" - .format(ssl_certificate_info=ssl_certificate_status['info'], - ca_certificate_info=ca_certificate_status['info'])) - return - else: - logging.warning("{ssl_certificate_info}; {ca_certificate_info}" - .format(ssl_certificate_info=ssl_certificate_status['info'], - ca_certificate_info=ca_certificate_status['info'])) - context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=ca) - context.check_hostname = False - context.load_cert_chain(certfile=cert, keyfile=key, password=pwd) - - return context - - -def get_server_ssl_context(params): - context = None - if {'server_cert', 'server_key', 'ca', 'cert_pwd'}.issubset(set(params.keys())): - cert = params['server_cert'] - key = params['server_key'] - pwd = params['cert_pwd'] - ca = params['ca'] - ssl_certificate_status = check_certificate(cert) - ca_certificate_status = check_certificate(ca) - if ssl_certificate_status['status'] == 'fail' or ca_certificate_status['status'] == 'fail': - logging.error("error occur when check 'certificate'.") - return - else: - if ssl_certificate_status['level'] == 'error' or ca_certificate_status['level'] == 'error': - logging.error("{ssl_certificate_info}; {ca_certificate_info}" - .format(ssl_certificate_info=ssl_certificate_status['info'], - ca_certificate_info=ca_certificate_status['info'])) - return - else: - logging.warning("{ssl_certificate_info}; {ca_certificate_info}" - .format(ssl_certificate_info=ssl_certificate_status['info'], - ca_certificate_info=ca_certificate_status['info'])) - context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=ca) - context.verify_mode = ssl.CERT_REQUIRED - context.load_cert_chain(certfile=cert, keyfile=key, password=pwd) - - return context diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/config.py b/src/gausskernel/dbmind/tools/anomaly_detection/config.py deleted file mode 100644 index 58a21dbf3..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/config.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" - -from configparser import ConfigParser - -from global_vars import CONFIG_PATH - -_config = ConfigParser() -_config.read(CONFIG_PATH) - -read = _config.read -get = _config.get -getint = _config.getint -getfloat = _config.getfloat -getboolean = _config.getboolean - -has_option = _config.has_option -has_section = _config.has_section diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/deamon.py b/src/gausskernel/dbmind/tools/anomaly_detection/deamon.py deleted file mode 100644 index 33b7e07ec..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/deamon.py +++ /dev/null @@ -1,136 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" - -import atexit -import os -import signal -import sys - -from utils import abnormal_exit - - -def read_pid_file(filepath): - """ - Return the pid of the running process recorded in the file, - and return 0 if the acquisition fails. - """ - if not os.path.exists(filepath): - return 0 - - try: - with open(filepath, mode='r') as f: - pid = int(f.read()) - if os.path.exists('/proc/%d' % pid): - return pid - else: - return 0 - except PermissionError: - return 0 - except ValueError: - return 0 - - -def handle_sigterm(signo, frame): - sys.exit(0) - - -class Daemon: - """ - This class implements the function of running a process in the background.""" - - def __init__(self): - self.args = None - self.kwargs = None - self.function = None - self.stdout = None - self.stderr = None - self.pid_file = None - - def set_pid_file(self, pid_file='./tmp/anomal_detection.pid'): - self.pid_file = pid_file - return self - - def set_stdout(self, stdout='/dev/null'): - self.stdout = stdout - return self - - def set_stderr(self, stderr='/dev/null'): - self.stderr = stderr - return self - - def set_function(self, function, *args, **kwargs): - self.args = args - self.kwargs = kwargs - self.function = function - return self - - def daemon_process(self): - # Verify that the pid file is valid. - read_pid = read_pid_file(self.pid_file) - if read_pid > 0: - if os.readlink('/proc/{pid}/cwd'.format(pid=read_pid)) == os.path.dirname(os.path.realpath(__file__)): - raise RuntimeError("The process is already running.") - else: - os.remove(self.pid_file) - - try: - if os.fork() > 0: - sys.exit(0) - except OSError as e: - raise RuntimeError('Process fork failed: %s.' % e) - - os.setsid() - os.umask(0o0077) - try: - if os.fork() > 0: - sys.exit(0) - except OSError as e: - raise RuntimeError('Process fork failed: %s.' % e) - - sys.stdout.flush() - sys.stderr.flush() - - for path in (self.stdout, self.stderr, self.pid_file): - dirname = os.path.dirname(path) - if not os.path.exists(dirname): - os.makedirs(dirname) - with open(self.stdout, mode='ab', buffering=0) as f: - os.dup2(f.fileno(), sys.stdout.fileno()) - with open(self.stderr, mode='ab', buffering=0) as f: - os.dup2(f.fileno(), sys.stderr.fileno()) - with open(self.pid_file, mode='w') as f: - f.write(str(os.getpid())) - - atexit.register(lambda: os.remove(self.pid_file)) - signal.signal(signal.SIGTERM, handle_sigterm) - - def start(self): - try: - self.daemon_process() - except RuntimeError as msg: - abnormal_exit(msg) - - self.function(*self.args, **self.kwargs) - - def stop(self): - if not os.path.exists(self.pid_file): - abnormal_exit("Process not running.") - - read_pid = read_pid_file(self.pid_file) - if read_pid > 0: - os.kill(read_pid, signal.SIGTERM) - # Clean invalid pid file. - if read_pid_file(self.pid_file) < 0: - os.remove(self.pid_file) diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/__init__.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/__init__.py deleted file mode 100644 index c39054a50..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" - -import logging -import os -from logging import handlers - -import config - -log_dir_realpath = os.path.realpath(config.get('log', 'log_dir')) -if not os.path.exists(log_dir_realpath): - os.makedirs(log_dir_realpath) - -detector_logger = logging.getLogger('detector') -detector_handler = handlers.RotatingFileHandler(filename=os.path.join(log_dir_realpath, 'detector.log'), - maxBytes=1024 * 1024 * 100, - backupCount=5) -detector_handler.setFormatter(logging.Formatter("[%(asctime)s %(levelname)s]-[%(name)s]: %(message)s")) -detector_logger.addHandler(detector_handler) -detector_logger.setLevel(logging.INFO) - -service_logger = logging.getLogger('service') -service_handler = handlers.RotatingFileHandler(filename=os.path.join(log_dir_realpath, 'service.log'), - maxBytes=1024 * 1024 * 100, - backupCount=5) -service_handler.setFormatter(logging.Formatter("[%(asctime)s %(levelname)s]-[%(name)s]: %(message)s")) -service_logger.addHandler(service_handler) -service_logger.setLevel(logging.INFO) - -abnormal_logger = logging.getLogger('abnormal') -abnormal_handler = handlers.RotatingFileHandler(filename=os.path.join(log_dir_realpath, 'abnormal.log'), - maxBytes=1024 * 1024 * 100, - backupCount=5) -abnormal_handler.setFormatter(logging.Formatter("[%(asctime)s %(levelname)s]-[%(name)s]: %(message)s")) -abnormal_logger.addHandler(abnormal_handler) -abnormal_logger.setLevel(logging.INFO) - -sql_rca_logger = logging.getLogger('sql_rca') -sql_rca_handler = handlers.RotatingFileHandler(filename=os.path.join(log_dir_realpath, 'sql_rca.log'), - maxBytes=1024 * 1024 * 100, - backupCount=5) -sql_rca_handler.setFormatter(logging.Formatter("%(message)s")) -sql_rca_logger.addHandler(sql_rca_handler) -sql_rca_logger.setLevel(logging.INFO) diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/algorithm/__init__.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/algorithm/__init__.py deleted file mode 100644 index 0c8b254d6..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/algorithm/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" - - -def get_fcst_alg(name): - if name == 'fbprophet': - from .fb_prophet import FbProphet - return FbProphet - elif name == 'auto_arima': - from .auto_arima import AutoArima - return AutoArima - else: - raise ValueError('No {name} time-series forecast algorithm.'.format(name=name)) diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/algorithm/auto_arima.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/algorithm/auto_arima.py deleted file mode 100644 index 41dace2ad..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/algorithm/auto_arima.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" - -import time - -import pandas as pd -import pmdarima as pm - -from .model import AlgModel - - -class AutoArima(AlgModel): - def __init__(self): - AlgModel.__init__(self) - self.model = None - self.end_date = None - - def fit(self, timeseries): - timeseries = pd.DataFrame(timeseries, columns=['ds', 'y']) - timeseries['ds'] = timeseries['ds'].map(lambda x: time.strftime(AlgModel.DATE_FORMAT, time.localtime(x))) - timeseries.set_index(['ds'], inplace=True) - timeseries.index = pd.to_datetime(timeseries.index) - self.end_date = timeseries.index[-1] - self.model = pm.auto_arima(timeseries, seasonal=True) - self.model.fit(timeseries) - - def forecast(self, period, freq): - if freq.endswith('M'): - freq = freq.replace('M', 'T') - - forecast_date_range = pd.date_range(start=self.end_date, - periods=period + 1, - freq=freq, - closed='right') - forecast_date_range = forecast_date_range.map( - lambda x: x.strftime(AlgModel.DATE_FORMAT) - ).values - forecast_value = self.model.predict(period) - return forecast_date_range, forecast_value diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/algorithm/fb_prophet.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/algorithm/fb_prophet.py deleted file mode 100644 index 1c3fcb577..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/algorithm/fb_prophet.py +++ /dev/null @@ -1,72 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import pickle -import time - -import pandas as pd -from fbprophet import Prophet - -from .model import AlgModel - - -class FbProphet(AlgModel): - """ - This class inherits from the AlgModel class. - It is based on the Facebook prophet algorithm and uses to forecast time-series. - """ - - def __init__(self): - AlgModel.__init__(self) - self.model = None - self.train_length = 0 - - def fit(self, timeseries): - """ - :param timeseries: list, it should include timestamp and value like - [[111111111, 2222222222, ...], [4.0, 5.0, ...]]. - :return: NA - """ - timeseries = pd.DataFrame(timeseries, columns=['ds', 'y']) - timeseries['ds'] = timeseries['ds'].map( - lambda x: time.strftime(AlgModel.DATE_FORMAT, time.localtime(x))) - self.train_length = len(timeseries) - self.model = Prophet(yearly_seasonality=True, - weekly_seasonality=True, - daily_seasonality=True) - self.model.fit(timeseries) - - def forecast(self, period, freq): - """ - :param freq: int, time interval. - :param period: string, like '100S','1D', reprensent forecast period. - :return: list, forecast result which include date and value. - """ - if freq.endswith('M'): - freq = freq.replace('M', 'T') - - future = self.model.make_future_dataframe(freq=freq, - periods=period, - include_history=False) - forecast_result = self.model.predict(future)[['ds', 'yhat']] - forecast_result['ds'] = forecast_result['ds'].map(lambda x: x.strftime(AlgModel.DATE_FORMAT)) - return forecast_result.values[:, 0], forecast_result.values[:, 1] - - def save(self, model_path): - with open(model_path, mode='wb') as f: - pickle.dump(self.model, f) - - def load(self, model_path): - with open(model_path, mode='rb') as f: - self.model = pickle.load(f) diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/algorithm/model.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/algorithm/model.py deleted file mode 100644 index 461366473..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/algorithm/model.py +++ /dev/null @@ -1,40 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -from abc import abstractmethod - - -class AlgModel(object): - """ - This is the parent class for forecasting algorithms. - If we want to use our own forecast algorithm, we should follow some rules. - """ - DATE_FORMAT = "%Y-%m-%d %H:%M:%S" - - def __init__(self): - pass - - @abstractmethod - def fit(self, timeseries): - pass - - @abstractmethod - def forecast(self, period, freq): - pass - - def save(self, model_path): - pass - - def load(self, model_path): - pass diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/metric_detector.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/metric_detector.py deleted file mode 100644 index 059c3f045..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/metric_detector.py +++ /dev/null @@ -1,163 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import logging -import os -import re -import sys -from configparser import ConfigParser - -import config -import global_vars -from detector.algorithm import get_fcst_alg -from detector.service.storage.sqlite_storage import SQLiteStorage -from detector.tools.slow_sql import SQL_RCA -from detector.tools.trend.forecast import Forecaster -from utils import TimeString, RepeatTimer - -m_logger = logging.getLogger('detector') - - -class Detector: - """ - This class is used for monitoring mutiple metric. - """ - - def __init__(self): - self._tasks = dict() - - def apply(self, instance, *args, **kwargs): - if instance in self._tasks: - return False - interval = getattr(instance, 'interval') - try: - interval = TimeString(interval).to_second() - except ValueError as e: - m_logger.error(e, exc_info=True) - return False - timer = RepeatTimer(interval, instance.run, *args, **kwargs) - self._tasks[instance] = timer - return True - - def start(self): - for instance, timer in self._tasks.items(): - timer.start() - - -def _extract_trend_params(): - """ - Extract required parameters for tools task like forecast algorithm and - database path. - :return: parameter dict. - """ - return {'forecast_alg': config.get('forecast', 'forecast_alg'), - 'database_dir': os.path.realpath(config.get('database', 'database_dir'))} - - -def _extract_sql_rca_params(metric_config): - try: - slow_sql_service_list = metric_config.get('detector_method', 'slow_sql') - data_handler = SQLiteStorage - database_dir = config.get('database', 'database_dir') - if metric_config.has_option('common_parameter', 'interval') and metric_config.get('common_parameter', 'interval'): - interval = metric_config.get('common_parameter', 'interval') - value_prefix, value_suffix = re.match(r'^(\d+)([WDHMS])$', interval).groups() - else: - interval = '300S' - except Exception as e: - m_logger.error("parameter interval is error, use default instead({interval}): {err}".format(interval='300S', err=str(e))) - interval = '300S' - - return {'slow_sql_service_list': slow_sql_service_list, - 'data_handler': data_handler, - 'database_dir': database_dir, - 'interval': interval} - - -def _extract_trend_optional_params(metric_config): - params = {'period': 1, 'interval': '300S', 'data_period': '300S', 'freq': 'S'} - if not metric_config.has_section('common_parameter'): - m_logger.warning("Not found 'common_parameter' section in {metric_config}".format( - metric_config=global_vars.METRIC_CONFIG_PATH)) - return params - if metric_config.has_option('common_parameter', 'data_period') and metric_config.get('common_parameter', 'data_period'): - params['data_period'] = metric_config.get('common_parameter', 'data_period') - if metric_config.has_option('common_parameter', 'period') and metric_config.get('common_parameter', 'period'): - try: - params['period'] = metric_config.getint('common_parameter', 'period') - except Exception as e: - m_logger.error("parameter period is error, use default instead({default})".format(default='1')) - params['period'] = 1 - if metric_config.has_option('common_parameter', 'interval') and metric_config.get('common_parameter', 'interval'): - params['interval'] = metric_config.get('common_parameter', 'interval') - if metric_config.has_option('common_parameter', 'freq') and metric_config.get('common_parameter', 'freq'): - params['freq'] = metric_config.get('common_parameter', 'freq') - - for key in ('data_period', 'freq', 'interval'): - try: - value_prefix, value_suffix = re.match(r'^(\d+)?([WDHMS])$', params[key]).groups() - except Exception as e: - if key == 'interval': - default = '300S' - if key == 'data_period': - default = '300S' - if key == 'freq': - default = 'S' - m_logger.error("parameter {key} is error, use default instead({default}).".format(key=key, default=default)) - params[key] = default - return params - - -def _trend_service(metric_config): - kwargs = {} - trend_service_list = metric_config.get('detector_method', 'trend').split() - trend_required_params = _extract_trend_params() - trend_optional_params = _extract_trend_optional_params(metric_config) - kwargs.update(**trend_required_params) - kwargs.update(**trend_optional_params) - kwargs.update(**{'trend_service_list': trend_service_list}) - kwargs.update(**{'metric_config': metric_config}) - kwargs['data_handler'] = SQLiteStorage - kwargs['forecast_handler'] = get_fcst_alg(kwargs['forecast_alg'])() - trend_service = Forecaster(service_list=trend_service_list, **kwargs) - return trend_service - - -def _sql_rca_service(metric_config): - kwargs = _extract_sql_rca_params(metric_config) - sql_rca_service = SQL_RCA(**kwargs) - return sql_rca_service - - -def detector_main(): - if not os.path.exists(global_vars.METRIC_CONFIG_PATH): - m_logger.error('The {metric_config_path} is not exist.'.format( - metric_config_path=global_vars.METRIC_CONFIG_PATH)) - return - metric_config = ConfigParser() - metric_config.read(global_vars.METRIC_CONFIG_PATH) - if set(['detector_method', 'os_exporter', 'common_parameter']) != set(metric_config.sections()): - different_secs = set(metric_config.sections()).symmetric_difference(set(['detector_method', 'os_exporter', 'common_parameter'])) - m_logger.error('error_section: {error_section}'.format(error_section=str(different_secs))) - return - trend_service = _trend_service(metric_config) - slow_service = _sql_rca_service(metric_config) - detector_service = Detector() - try: - detector_service.apply(trend_service) - detector_service.apply(slow_service) - except Exception as e: - m_logger.error(e, exc_info=True) - detector_service.start() - diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/__init__.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/__init__.py deleted file mode 100644 index 6f572d9dc..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -from .detection_app import DetectionApp - - -def service_main(): - app = DetectionApp() - app.add_resources() - app.start_service() diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/app.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/app.py deleted file mode 100644 index 7eec0a0b9..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/app.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" - - -class App: - """ - This is parent class for app. - """ - - def __init__(self): - pass - - def add_resources(self): - pass - - def init_database(self): - pass - - def start_service(self, *args, **kwargs): - pass diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/detection_app.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/detection_app.py deleted file mode 100644 index 1c29416a2..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/detection_app.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import logging -import os -import re - -from flask import Flask -from flask_restful import Api - -import config -import global_vars -from cert import get_server_ssl_context -from utils import getpasswd -from .app import App -from .resource import receiver - -service_logger = logging.getLogger('service') - - -def check_params(): - if not config.has_section('database'): - service_logger.error("Not found 'database' section in config file.") - else: - if not config.has_option('database', 'database_path'): - service_logger.error("Not found 'database_path' in database section.") - - if not config.has_section('server'): - service_logger.error("Not found 'database' section in config file.") - else: - if not config.has_option('server', 'listen_host') or not config.has_option('server', 'listen_port'): - service_logger.error("Not found 'listen_host' or 'listen_port' in server section.") - - -def _extract_params(): - params = {'storage_duration': '12H'} - for name, default_value in params.items(): - if not config.has_option('database', name): - service_logger.warning("Not found '{name}' in database section, using default value: '{default_value}'." - .format(name=name, default_value=default_value)) - value = default_value - else: - value = config.get('database', name) - params[name] = value - - params['database_dir'] = config.get('database', 'database_dir') - params['listen_host'] = config.get('server', 'listen_host') - params['listen_port'] = config.getint('server', 'listen_port') - params['white_host'] = config.get('server', 'white_host') - params['white_port'] = config.get('server', 'white_port') - - # Https configures. - if config.has_option('security', 'tls') and config.getboolean('security', 'tls'): - params['server_cert'] = config.get('security', 'server_cert') - params['server_key'] = config.get('security', 'server_key') - params['ca'] = config.get('security', 'ca') - pwd_path = os.path.dirname(params['server_cert']) - params['cert_pwd'] = getpasswd(pwd_path) - return params - - -class DetectionApp(App): - """ - This class is used for starting detector service. - """ - - def __init__(self): - App.__init__(self) - self.params = _extract_params() - self.app = Flask(__name__) - self.app.config['debug'] = False - self.api = Api(self.app) - - def check_valid_address(self): - valid_dbname = [] - # Consider the case if user do not provide any host and port information. - if not self.params['white_host']: - white_host = '' - else: - white_host = self.params['white_host'] - if not self.params['white_port']: - white_port = '' - else: - white_port = self.params['white_port'] - white_host = re.findall(r'\d{1,4}\.\d{1,4}\.\d{1,4}\.\d{1,4}', white_host) - white_port = re.findall(r'\d{1,5}', white_port) - for i, ip in enumerate(white_host): - for j, port in enumerate(white_port): - dbname = ip + ':' + port - valid_dbname.append(dbname) - return valid_dbname - - def add_resources(self): - database_dir = os.path.realpath(self.params['database_dir']) - os.makedirs(database_dir, exist_ok=True) - valid_dbname = self.check_valid_address() - self.api.add_resource(receiver.Source, '/sink', resource_class_kwargs={'database_dir': database_dir, - 'valid_dbname': valid_dbname}) - - def start_service(self): - context = get_server_ssl_context(self.params) - try: - self.app.run(host=self.params['listen_host'], - port=self.params['listen_port'], - ssl_context=context) - except Exception as e: - service_logger.fatal(e) diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/resource/receiver.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/resource/receiver.py deleted file mode 100644 index 7f93c2d53..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/resource/receiver.py +++ /dev/null @@ -1,88 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import logging -import os - -from flask import request -from flask_restful import Resource - -from detector.service.storage import sqlite_storage - -service_logger = logging.getLogger('service') - - -class ResponseTuple: - """ - This class is used for generating a response tuple. - """ - - @staticmethod - def success(result=None): - if result is None: - return {"status": "success"}, 200 - - return {"status": "success", "result": result} - - @staticmethod - def error(msg="", status_code=400): - return {"status": "error", "msg": msg}, status_code - - -class Source(Resource): - """ - This class is used for acquiring metric data from agent and save data - in sqlite database. - """ - - def __init__(self, database_dir, valid_dbname): - self.database_dir = database_dir - self.valid_dbname = valid_dbname - - def post(self): - content = request.json - try: - flag = content.pop('flag') - dbname = flag['host'] + ':' + str(flag['port']) - timestamp = content.pop('timestamp') - if dbname not in self.valid_dbname: - # not in whitelist - return ResponseTuple.error(status_code=400) - os.makedirs(self.database_dir, exist_ok=True) - database_path = os.path.join(self.database_dir, dbname) - db_agent = sqlite_storage.SQLiteStorage(dbpath=database_path) - db_agent.connect() - db_agent.create_table() - for table, event in content.items(): - # if do not collect data, then continue - if not event: - continue - if table == 'wdr': - for tup in event: - tup.insert(0, timestamp) - db_agent.insert(table, *tup) - else: - event.insert(0, timestamp) - db_agent.insert(table, *event) - db_agent.close() - return ResponseTuple.success() - except Exception as e: - service_logger.error('Error when receive data from agent: ' + str(e), exc_info=True) - return ResponseTuple.error(msg=str(e)) - - def get(self): - return ResponseTuple.success() - - def delete(self): - return ResponseTuple.error(status_code=400) diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/storage/sqlite_storage.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/storage/sqlite_storage.py deleted file mode 100644 index 72178792f..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/storage/sqlite_storage.py +++ /dev/null @@ -1,248 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import json -import logging -import os -import sqlite3 -import time - -import config -import global_vars -from utils import TimeString - -service_logger = logging.getLogger('service') -m_logger = logging.getLogger('detector') - - -class SQLiteStorage: - """ - This class is used for connecting to database and - acquiring data by timestamp or number. - """ - - def __init__(self, dbpath): - """ - :param table: string, name of table in database. - :param dbpath: string, name of database path. - """ - self._dbpath = dbpath - self._conn = None - self._cur = None - self.sql_operation = None - self.load_sql_operation() - self.storage_duration = TimeString(config.get('database', 'storage_duration')).to_second() - - def __enter__(self): - self.connect() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() - - def load_sql_operation(self): - if not os.path.exists(global_vars.TABLE_INFO_PATH): - service_logger.error( - "Not found table information: {tableinfo}.".format(tableinfo=global_vars.TABLE_INFO_PATH)) - raise SystemExit(1) - with open(global_vars.TABLE_INFO_PATH, 'r') as f: - self.sql_operation = json.load(f) - - def connect(self): - self._conn = sqlite3.connect(self._dbpath, timeout=2) - self._cur = self._conn.cursor() - - def execute(self, sql): - try: - self._cur.execute(sql) - self._conn.commit() - except Exception as e: - service_logger.warning(e, exc_info=True) - self._conn.rollback() - - def fetch_all_result(self, sql): - self._cur.execute(sql) - result = self._cur.fetchall() - return result - - def execute_sql(self, sql): - try: - self._cur.execute(sql) - self._conn.commit() - except Exception as e: - service_logger.warning(e, exc_info=True) - self._conn.rollback() - - def create_table(self): - for table in self.sql_operation['sqlite']: - create_table = self.sql_operation['sqlite'][table]['create_table'] - self._cur.execute(create_table) - - def insert(self, table, *args): - insert_opt = self.sql_operation['sqlite'][table]['insert'] - parameters = tuple((item for item in args)) - sql = insert_opt % parameters - try: - self._cur.execute(sql) - except Exception as e: - service_logger.warning(e, exc_info=True) - self._conn.rollback() - return - self._conn.commit() - self.limit_capacity_by_time(table, parameters[0]) - - def limit_capacity_by_time(self, table, last_timestamp): - earliest_timestamp = self.get_earliest_timestamp(table) - margin_timestamp = last_timestamp - self.storage_duration - if earliest_timestamp < margin_timestamp: - limit_max_period_opt = self.sql_operation['sqlite'][table]['limit_max_periods'] - self._cur.execute(limit_max_period_opt % margin_timestamp) - self._conn.commit() - - def get_latest_timestamp(self, table): - operation = self.sql_operation['function']['get_latest_timestamp'] - self._cur.execute(operation.format(table=table)) - last_timestamp = self._cur.fetchall()[0][0] - return last_timestamp - - def get_earliest_timestamp(self, table): - operation = self.sql_operation['function']['get_earliest_timestamp'] - self._cur.execute(operation.format(table=table)) - earliest_timestamp = self._cur.fetchall()[0][0] - return earliest_timestamp - - def select_timeseries_by_timestamp(self, table, field, period, timestamp): - """ - Acquire all timeseries in a timedelta from the present. - :param timestamp: the timestamp of the end time. - :param field: field of database. - :param table: string, table name from database. - :param period: string, name of timedelta from now, like '100S', '1H'. - :return: list, timeseries dataset. - """ - timeseries = [] - times = 0 - times_limit = 5 - while times < times_limit: - try: - last_timestamp = timestamp - select_timestamp = last_timestamp - TimeString(period).to_second() - operation = self.sql_operation['function']['get_timeseries_by_timestamp'] - self._cur.execute( - operation.format(table=table, field=field, select_timestamp=select_timestamp)) - timeseries = self._cur.fetchall() - if not timeseries: - m_logger.warning("Get no time series from '{table}', retrying...".format(table=table)) - else: - return timeseries - except Exception as e: - m_logger.warning('An exception (%s) occurs when getting time series, retrying...', e, - exc_info=True) - times += 1 - time.sleep(0.2) - return timeseries - - def select_timeseries_by_number(self, table, field, number): - """ - Acquire number of timeseries from the present. - :param field: string, a field that needs be selected in the table. - :param table: string, table name in database. - :param number: int, number of timeseries from present. - :return: list, timeseries dataset. - """ - timeseries = [] - times = 0 - times_limit = 5 - while times < times_limit: - try: - operation = self.sql_operation['function']['get_timeseries_by_number'] - self._cur.execute(operation.format(table=table, field=field, number=number)) - timeseries = self._cur.fetchall() - if not timeseries: - m_logger.warning("Get no time series from '{table}', retrying...".format(table=table)) - else: - return timeseries - - except Exception as e: - m_logger.error('An exception (%s) occurs when getting time series: .', e, exc_info=True) - times += 1 - time.sleep(0.2) - return timeseries - - def get_timeseries(self, table, field, period, timestamp=None): - """ - Acquire timeseries from database by timestamp or number. - :param table: string, table name from database. - :param period: int or string, represent number or period like: 100, '100S'. - :return: list, timeseries dataset. - """ - if not self.check_table(table): - return [] - if isinstance(period, int): - timeseries = self.select_timeseries_by_number(table=table, field=field, number=period) - else: - timeseries = self.select_timeseries_by_timestamp(table=table, field=field, period=period, - timestamp=timestamp) - timeseries = list(map(lambda x: (x[0], float(x[1])), timeseries)) - return timeseries - - def get_all_tables(self): - """ - Acquire all tables in database. - :return: list, list of table name - """ - operation = self.sql_operation['function']['get_all_tables'] - self._cur.execute(operation) - tables = self._cur.fetchall() - tables = [item[0] for item in tables] - return tables - - def get_all_fields(self, table): - operation = self.sql_operation['function']['get_all_fields'] - self._cur.execute(operation.format(table=table)) - fields = self._cur.fetchall() - fields = [item[1] for item in fields[1:]] - return fields - - def get_table_rows(self, table): - """ - Acquire table rows. - :return: string, table name. - """ - if self.check_table(table): - operation = self.sql_operation['function']['get_table_rows'] - self._cur.execute(operation.format(table=table)) - table_rows = self._cur.fetchall() - table_rows = table_rows[0][0] - else: - table_rows = 0 - return table_rows - - def check_table(self, table): - """ - Check if table is in the database. - :param table: string, table name. - :return: boolean, True/False. - """ - operation = self.sql_operation['function']['check_table'] - self._cur.execute(operation) - tables = self._cur.fetchall() - tables = [item[0] for item in tables] - if table not in tables: - return False - return True - - def close(self): - self._cur.close() - self._conn.close() diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/__init__.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/__init__.py deleted file mode 100644 index 7ed6f5e5c..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/slow_sql/__init__.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/slow_sql/__init__.py deleted file mode 100644 index c627c6366..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/slow_sql/__init__.py +++ /dev/null @@ -1,80 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import logging -import sys -import os -import time - -import global_vars -from detector.tools.slow_sql import diagnosing - -sql_rca_logger = logging.getLogger('sql_rca') -detector_logger = logging.getLogger('detector') - - -class SQL_RCA: - def __init__(self, **kwargs): - self.data_handler = kwargs['data_handler'] - self.database_dir = kwargs['database_dir'] - self.interval = kwargs['interval'] - - def run(self): - if not os.path.exists(self.database_dir): - detector_logger.error("{database} is not found.".format(database=self.database_dir)) - sys.exit(0) - - try: - detector_start_time = global_vars.DETECTOR_START_TIME - detector_end_time = int(time.time()) - global_vars.DETECTOR_START_TIME = detector_end_time - sql = "select query, start_time, finish_time from {table} where timestamp between {start_time} and {end_time};" - for database in os.listdir(self.database_dir): - if 'journal' in database: - continue - - database_path = os.path.join(self.database_dir, database) - with self.data_handler(database_path) as db: - results = db.fetch_all_result( - sql.format(table='wdr', start_time=detector_start_time, end_time=detector_end_time)) - for query, start_time, finish_time in results: - index = 1 - if 'pg_stat_activity' in query: - continue - # prevent question marks from causing errors in the root cause analysis module - input_query = query.replace('?', '2') - rcas = diagnosing.diagnose_auto(db, input_query, start_time) - sql_rca_logger.info( - "START_TIME: %s, FINISH_TIME: %s.\n SQL Query: %s.", start_time, finish_time, query - ) - - if not rcas: - rca_ana = "query has no slow features." - suggestion_ana = "please check the query threshold, check the log, and analyze the reason." - sql_rca_logger.info( - "RCA: {rca}; Suggestion: {suggestion}".format(index=index, - rca=rca_ana, - suggestion=suggestion_ana)) - - else: - for rca, suggestion in rcas: - sql_rca_logger.info( - "{index}: RCA: {rca}; Suggestion: {suggestion}".format(index=index, - rca=rca, - suggestion=suggestion)) - index += 1 - sql_rca_logger.info('\n') - except Exception as e: - detector_logger.error(str(e), exc_info=True) - sys.exit(-1) diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/slow_sql/diagnosing.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/slow_sql/diagnosing.py deleted file mode 100644 index 289c164dc..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/slow_sql/diagnosing.py +++ /dev/null @@ -1,309 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import ast -import time -from collections import Counter - -import numpy as np - -import config -import global_vars -from utils.plan_parsing import Plan -from utils.sql_parsing import get_indexes -from utils.sql_parsing import sql_parse - -MAX_CPU_RATE = 30.0 -TIME_SCALE = 600 -QPS_GROWN_RATE = 30 -PLAN_CAUSE = { - 'Scan': { - 'FuzzyQuery': 'FuzzyQuery: SQL statement uses the keyword "like" causes fuzzy query', - 'IsNotNULL': 'IndexFailure: SQL statement uses the keyword "is not null" causes indexes failure', - 'UnEqual': 'IndexFailure: SQL statement uses the keyword "!=" causes indexes failure', - 'Function': 'IndexFailure: Function operations are used in the WHERE ' - 'clause causes the SQL engine to abandon ' - ' indexes and use the full table scan', - 'OR': 'NeedIndex: The SQL or condition contains columns which are not ' - 'all created indexes', - 'FullScan': 'FullScan: Select all columns from the table, which causes the full scan', - 'Update': 'FullScan: The UPDATE statement updates all columns', - 'ExprInWhere': 'IndexFailure: Expression manipulation of the WHERE ' - 'clause causes the SQL engine to abandon ' - 'the use of indexes in favor of full table scans', - 'NotIn': 'FullScan: SQL statement uses the keyword "IN" or "NOT IN" causes the full scan', - 'RangeTooLarge': 'RangeTooLarge: Condition range is too large', - - }, - 'Sort': { - 'ExternalSorting': 'ExternalSorting: The cost of query statement sorting is too high, ' - 'resulting in slow SQL', - }, - 'Join': { - 'NestLoop': 'NestLoop: The slow execution of "NestLoop" operator during JOIN ' - 'operation of a large table, resulting in slow SQL', - }, - 'Aggregate': { - 'SortAggregate': 'SortAggregate: For the aggregate operation of the large result set, ' - '"Sort Aggregate" operator has poor performance' - }, - 'Redistribute': { - 'DataSkew': 'DataSkew: Data redistribution during the query execution' - }, - 'Insert': { - 'DataRes': 'Database Resources: A large number of data or indexes are involved' - }, - 'Delete': { - 'DataRes': 'Database Resources: A large number of data or indexes are involved' - } -} - -NON_PLAN_CAUSE = { - 'LoadRequestCrowded': 'External Resources: Database request crowded', - 'RedundantIndex': 'External Resources: There are a large number of redundant ' - 'indexes in related columns, resulting in slow insert performance', - 'ResourceShortage': 'External Resources: External processes occupy a large ' - 'number of system resources, resulting in a database resource shortage', -} - -PLAN_SUGGESTION = { - 'Scan': { - 'FuzzyQuery': 'Avoid fuzzy queries or do not use full fuzzy queries', - 'IsNotNULL': 'Do not use "is not null", otherwise, the index will be invalid', - 'UnEqual': 'Change the unequal sign to "or"', - 'Function': 'Avoid using function operations in the where clause', - 'OR': 'Create indexes on all related columns', - 'FullScan': 'Please stop this operation if it is not necessary', - 'Update': 'If only one or two columns are changed, do not update all columns, ' - 'otherwise, frequent calls will cause significant performance consumption', - 'ExprInWhere': 'Change the WHERE clause, do not perform expression operations on the columns', - 'NotIn': 'For continuity values, you can use the keyword "between" instead', - 'RangeTooLarge': 'Please reduce query range', - - }, - 'Sort': { - 'ExternalSorting': 'Adjust the size of work_mem', - }, - 'Join': { - 'NestLoop': 'Turn off NestLoop by setting the GUC parameter "enable_nestloop" to off, ' - 'and let the optimizer choose other join methods', - }, - 'Aggregate': { - 'SortAggregate': 'By setting the GUC parameter enable_sort to off, let the optimizer ' - 'select the HashAgg operator' - }, - 'Redistribute': { - 'DataSkew': 'It is recommended to use the distribution key recommending tool ' - 'to recommend appropriate distribution keys to avoid data skew' - }, - 'Insert': { - 'DataRes': 'You can use "copy" instead of "insert"' - }, - 'Delete': { - 'DataRes': 'You may launch batch deletions or remove and recreate indexes' - } -} - -NON_PLAN_SUGGESTION = { - 'LoadRequestCrowded': 'It is recommended to change the free time to execute', - 'RedundantIndex': 'Delete the duplicate index before insert', - 'ResourceShortage': 'Stop unnecessary large processes', -} - - -def is_abnormal(opt, opts, n_sigma=3): - opts_cost = list(map(lambda o: o.exec_cost, opts)) - mean = np.mean(opts_cost) - std = np.std(opts_cost) - three_sigma = mean + n_sigma * std - if opt.exec_cost >= three_sigma: - return True - else: - return False - - -def linear_fitting(datas): - datax = np.arange(1, 1 + len(datas), 1) - datay = np.array(datas, dtype='float') - growth_rate, intercept = np.polyfit(datax, datay, deg=1) - growth_rate = round(growth_rate, 4) - intercept = round(intercept, 4) - return growth_rate, intercept - - -def do_resource_check(dbagent, sql, timestamp, rca, suggestion): - """ - Get RCA of system resource. - :param dbagent: obj, interface for sqlite3. - :param sql: str, query. - :param timestamp: int, timestamp. - :param rca: list, store of rca. - :param suggestion: store of rca's suggestion. - """ - count = Counter(get_indexes(dbagent, sql, timestamp)) - if len(count): - if (max(count.values())) > 3: - rca.append(NON_PLAN_CAUSE['RedundantIndex']) - suggestion.append(NON_PLAN_SUGGESTION['RedundantIndex']) - - cpu_res = dbagent.fetch_all_result( - 'SELECT process from database_exporter where timestamp == "{timestamp}"'.format(timestamp=timestamp))[0][0] - cpu_mem_res = ast.literal_eval(cpu_res).values() - cpu_res = [float(x.split(':')[0]) for x in list(cpu_mem_res)] - if sum(cpu_res) > MAX_CPU_RATE: - rca.append(NON_PLAN_CAUSE['ResourceShortage']) - suggestion.append(NON_PLAN_SUGGESTION['ResourceShortage']) - source_timer_interval = config.get('agent', 'source_timer_interval') # 10S -> 300S - time_value, time_unit = int(source_timer_interval[:-1]) * 12, source_timer_interval[-1] - qps_list = dbagent.select_timeseries_by_timestamp('database_exporter', - 'qps', str(time_value) + time_unit, timestamp) - qps = [int(item[1]) for item in qps_list] - growth_rate, _ = linear_fitting(qps) - if growth_rate >= QPS_GROWN_RATE: - rca.append(NON_PLAN_CAUSE['LoadRequestCrowded']) - suggestion.append(NON_PLAN_SUGGESTION['LoadRequestCrowded']) - - -def analyze_scan(dbagent, sql_stmt, timestamp, rca, suggestion): - case = sql_parse(dbagent, sql_stmt, timestamp) - if case is not None: - rca.append(PLAN_CAUSE['Scan'][case]) - suggestion.append(PLAN_SUGGESTION['Scan'][case]) - - -def analyze_sort(dbagent, start_time, finish_time, heaviest_opt, rca, suggestion): - start_time = int(time.mktime(time.strptime(start_time, global_vars.DATE_FORMAT))) - finish_time = int(time.mktime(time.strptime(finish_time, global_vars.DATE_FORMAT))) - res = dbagent.fetch_all_result('SELECT temp_file from database_exporter ' - 'where timestamp between "{start_time}" and "{finish_time}"' - .format(start_time=start_time, finish_time=finish_time)) - res = [item[0] for item in res] - if heaviest_opt.name == 'Sort' and 't' in res: - rca.append(PLAN_CAUSE['Sort']['ExternalSorting']) - suggestion.append(PLAN_SUGGESTION['Sort']['ExternalSorting']) - - -def analyze_join(heaviest_opt, rca, suggestion): - if heaviest_opt.name == 'Nested Loop': - rca.append(PLAN_CAUSE['Join']['NestLoop']) - suggestion.append(PLAN_SUGGESTION['Join']['NestLoop']) - - -def analyze_agg(heaviest_opt, rca, suggestion): - if heaviest_opt.name == 'SortAggregate': - rca.append(PLAN_CAUSE['Aggregate']['SortAggregate']) - suggestion.append(PLAN_SUGGESTION['Aggregate']['SortAggregate']) - - -def diagnose_auto(dbagent, query, start_time): - """ - Get RCA of system resource. - :param dbagent: obj, interface for sqlite3. - :param query: str, query. - :param start_time: int, start_time. - :return - """ - rca = [] - suggestion = [] - plan = Plan() - timestamp, start_time, finish_time, explain = dbagent.fetch_all_result('select timestamp, start_time, finish_time,' - 'explain from wdr where start_time == "{start_time}"' - .format(start_time=start_time))[0] - plan.parse(explain) - operators = plan.sorted_operators - if not len(operators): - return [] - heaviest_opt = operators[0] - - for operator in operators: - if str.startswith(operator.name, 'Vector Streaming(type: REDISTRIBUTE)'): - rca.append(PLAN_CAUSE['Redistribute']['DataSkew']) - suggestion.append(PLAN_SUGGESTION['Redistribute']['DataSkew']) - return [[x_rca, x_sug] for x_rca, x_sug in zip(rca, suggestion)] - - if heaviest_opt.type == 'Scan': - analyze_scan(dbagent, query, timestamp, rca, suggestion) - elif heaviest_opt.type == 'Sort': - analyze_sort(dbagent, start_time, finish_time, heaviest_opt, rca, suggestion) - elif heaviest_opt.type == 'Other': - if str.startswith(heaviest_opt.name, 'Update'): - analyze_scan(dbagent, query, timestamp, rca, suggestion) - elif str.startswith(heaviest_opt.name, 'Insert'): - rca.append(PLAN_CAUSE['Insert']['DataRes']) - suggestion.append(PLAN_SUGGESTION['Insert']['DataRes']) - elif str.startswith(heaviest_opt.name, 'Delete'): - rca.append(PLAN_CAUSE['Delete']['DataRes']) - suggestion.append(PLAN_SUGGESTION['Delete']['DataRes']) - analyze_join(heaviest_opt, rca, suggestion) - elif heaviest_opt.type == 'Aggregate': - analyze_agg(heaviest_opt, rca, suggestion) - do_resource_check(dbagent, query, timestamp, rca, suggestion) - - zip_rca = [[x_rca, x_sug] for x_rca, x_sug in zip(rca, suggestion)] - return zip_rca - - -def diagnose_user(dbagent, query, start_time): - """ - Get RCA of system resource. - :param dbagent: obj, interface for sqlite3. - :param query: str, query. - :param start_time: int, start_time. - :return - """ - rca = [] - suggestion = [] - plan = Plan() - timestamp, start_time, finish_time, explain = dbagent.fetch_all_result('select timestamp, start_time, finish_time, ' - ' explain from wdr where start_time == "{start_time}"' - .format(start_time=start_time))[0] - plan.parse(explain) - operators = plan.sorted_operators - if not len(operators): - return [] - heaviest_opt = operators[0] - - for operator in operators: - if str.startswith(operator.name, 'Vector Streaming(type: REDISTRIBUTE)'): - rca.append(PLAN_CAUSE['Redistribute']['DataSkew']) - suggestion.append(PLAN_SUGGESTION['Redistribute']['DataSkew']) - zip_rca = [[x_rca, x_sug] for x_rca, x_sug in zip(rca, suggestion)] - zip_rca.insert(0, finish_time) - zip_rca.insert(0, start_time) - return zip_rca - - if heaviest_opt.type == 'Scan': - analyze_scan(dbagent, query, timestamp, rca, suggestion) - elif heaviest_opt.type == 'Sort': - analyze_sort(dbagent, start_time, finish_time, heaviest_opt, rca, suggestion) - elif heaviest_opt.type == 'Other': - if str.startswith(heaviest_opt.name, 'Update'): - analyze_scan(dbagent, query, timestamp, rca, suggestion) - elif str.startswith(heaviest_opt.name, 'Insert'): - rca.append(PLAN_CAUSE['Insert']['DataRes']) - suggestion.append(PLAN_SUGGESTION['Insert']['DataRes']) - elif str.startswith(heaviest_opt.name, 'Delete'): - rca.append(PLAN_CAUSE['Delete']['DataRes']) - suggestion.append(PLAN_SUGGESTION['Delete']['DataRes']) - analyze_join(heaviest_opt, rca, suggestion) - - elif heaviest_opt.type == 'Aggregate': - analyze_agg(heaviest_opt, rca, suggestion) - do_resource_check(dbagent, query, timestamp, rca, suggestion) - - zip_rca = [[x_rca, x_sug] for x_rca, x_sug in zip(rca, suggestion)] - zip_rca.insert(0, finish_time) - zip_rca.insert(0, start_time) - return zip_rca - diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/trend/__init__.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/trend/__init__.py deleted file mode 100644 index 7ed6f5e5c..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/trend/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/trend/detect.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/trend/detect.py deleted file mode 100644 index 1dcb29e93..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/trend/detect.py +++ /dev/null @@ -1,105 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import logging -from collections import OrderedDict -from itertools import groupby - -a_logger = logging.getLogger('abnormal') -m_logger = logging.getLogger('detector') - - -class Detector: - """ - This class is used for detecting result of forecastor, if the result from forecastor - is beyond expectation, it can provide alarm function in log file. - """ - - @staticmethod - def detect(forecast_result): - def mapper_function(val): - if val > maximum: - return val, 'higher' - elif val < minimum: - return val, 'lower' - else: - return val, 'normal' - - if forecast_result['status'] == 'fail': - return - - metric_name = forecast_result['metric_name'] - future_value = forecast_result['future_value'] - future_date = forecast_result['future_date'] - minimum = forecast_result['detect_basis']['minimum'] - maximum = forecast_result['detect_basis']['maximum'] - - if minimum is None and maximum is not None: - minimum = '-inf' - value_map_result = list(map(lambda x: (x, 'higher') if x > maximum else (x, 'normal'), future_value)) - elif maximum is None and minimum is not None: - maximum = 'inf' - value_map_result = list(map(lambda x: (x, 'lower') if x < minimum else (x, 'normal'), future_value)) - else: - value_map_result = list(map(mapper_function, future_value)) - forecast_condition = OrderedDict(zip(future_date, value_map_result)) - for key, value in groupby(list(forecast_condition.items()), key=lambda item: item[1][1]): - metric_status = key - metric_date_value_scope = [(item[0], item[1][0]) for item in value] - maximum_forecast_value = round(max([item[1] for item in metric_date_value_scope]), 3) - minimum_forecast_value = round(min([item[1] for item in metric_date_value_scope]), 3) - if metric_status == 'normal': - if len(metric_date_value_scope) == 1: - m_logger.info('The forecast value of [{metric}]({minimum}~{maximum})' - ' at {date} is ({forecast_value}) [{metric_status}].' - .format(metric=metric_name, - minimum=minimum, - maximum=maximum, - forecast_value=metric_date_value_scope[0][1], - metric_status=metric_status, - date=metric_date_value_scope[0][0])) - else: - m_logger.info('The forecast value of [{metric}]({minimum}~{maximum}) in ' - '[{start_date}~{end_date}] is between ({minimum_forecast_value}' - '~{maximum_forecast_value}) [{metric_status}].' - .format(metric=metric_name, - minimum=minimum, - maximum=maximum, - minimum_forecast_value=minimum_forecast_value, - maximum_forecast_value=maximum_forecast_value, - metric_status=metric_status, - start_date=metric_date_value_scope[0][0], - end_date=metric_date_value_scope[-1][0])) - else: - if len(metric_date_value_scope) == 1: - a_logger.warning('The forecast value of [{metric}]({minimum}~{maximum})' - ' at {date} is ({forecast_value}) [{metric_status}].' - .format(metric=metric_name, - minimum=minimum, - maximum=maximum, - forecast_value=metric_date_value_scope[0][1], - metric_status=metric_status, - date=metric_date_value_scope[0][0])) - else: - a_logger.warning('The forecast value of [{metric}]({minimum}~{maximum}) in ' - '[{start_date}~{end_date}] is between ({minimum_forecast_value}' - '~{maximum_forecast_value}) [{metric_status}].' - .format(metric=metric_name, - minimum=minimum, - maximum=maximum, - minimum_forecast_value=minimum_forecast_value, - maximum_forecast_value=maximum_forecast_value, - metric_status=metric_status, - start_date=metric_date_value_scope[0][0], - end_date=metric_date_value_scope[-1][0])) diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/trend/forecast.py b/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/trend/forecast.py deleted file mode 100644 index 0d4302d1f..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/detector/tools/trend/forecast.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import logging -import sys -import os - -from detector.tools.trend.detect import Detector - -m_logger = logging.getLogger('detector') - - -class Forecaster: - """ - This class is used for forecasting future trends for timeseries based on - timeseries forecast algorithm - """ - - def __init__(self, *args, **kwargs): - self.minimum_timeseries_length = 4 - self.database_dir = kwargs['database_dir'] - self.forecast_alg = kwargs['forecast_alg'] - self.data_period = kwargs['data_period'] - self.interval = kwargs['interval'] - self.period = kwargs['period'] - self.freq = kwargs['freq'] - self.data_handler = kwargs['data_handler'] - self.trend_service_list = kwargs['trend_service_list'] - self.forecast_handler = kwargs['forecast_handler'] - self.metric_config = kwargs['metric_config'] - - def run(self): - for database in os.listdir(self.database_dir): - with self.data_handler(os.path.join(self.database_dir, database)) as db: - if 'journal' in database: - continue - try: - tables = db.get_all_tables() - last_timestamp = None - for table in self.trend_service_list: - if table not in tables: - m_logger.warning("Table {table} is not in {database}.".format(table=table, database=database)) - continue - fields = db.get_all_fields(table) - if not last_timestamp: - last_timestamp = db.get_latest_timestamp(table) - for field in fields: - forecast_result = {} - timeseries = db.get_timeseries(table=table, field=field, period=self.data_period, - timestamp=last_timestamp) - if not timeseries: - m_logger.error("Can not get time series from {table}-{field} by period '{period}', " - "skipping forecast.".format(table=table, field=field, - period=self.data_period)) - forecast_result['status'] = 'fail' - continue - - if len(timeseries) < self.minimum_timeseries_length: - m_logger.error( - "The length of time series in {table}-{field} is too short: [{ts_length}], " - "so you can adjust 'data_period'.".format(table=table, field=field, - ts_length=len(timeseries))) - continue - self.forecast_handler.fit(timeseries) - date, value = self.forecast_handler.forecast(period=self.period, freq=self.freq) - try: - minimum = None if not self.metric_config.has_option( - table, field + '_minimum') else self.metric_config.getfloat( - table, field + '_minimum') - maximum = None if not self.metric_config.has_option( - table, field + '_maximum') else self.metric_config.getfloat( - table, field + '_maximum') - except Exception as e: - m_logger.error("{table} - {field}: {err}".format(table=table, field=field, err=str(e))) - continue - if minimum is None and maximum is None: - m_logger.error("{table} - {field}: The minimum and maximum is not provided, you should at least provide one of it.".format(table=table, field=field)) - continue - if minimum is not None and maximum is not None and minimum > maximum: - m_logger.error("{table} - {field}: The minimum is greater than the maximum.".format(table=table, field=field)) - continue - detect_basis = {'minimum': minimum, 'maximum': maximum} - forecast_result['status'] = 'success' - forecast_result['metric_name'] = table + '-->' + field - forecast_result['detect_basis'] = detect_basis - forecast_result['future_date'] = date - forecast_result['future_value'] = value - Detector.detect(forecast_result) - except Exception as e: - m_logger.error(str(e), exc_info=True) - sys.exit(-1) diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/global_vars.py b/src/gausskernel/dbmind/tools/anomaly_detection/global_vars.py deleted file mode 100644 index c72f70b4f..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/global_vars.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" - -import os -import time - -CURRENT_DIRNAME = os.path.dirname(os.path.abspath(__file__)) -PROJECT_NAME = os.path.basename(CURRENT_DIRNAME) -CONFIG_PATH = os.path.join(CURRENT_DIRNAME, 'a-detection.conf') -METRIC_CONFIG_PATH = os.path.join(CURRENT_DIRNAME, 'task/metric_task.conf') -BIN_PATH = os.path.join(CURRENT_DIRNAME, 'bin') -TABLE_INFO_PATH = os.path.join(CURRENT_DIRNAME, 'table.json') -TASK_PATH = os.path.join(CURRENT_DIRNAME, 'task') -DATE_FORMAT = "%Y-%m-%d %H:%M:%S" - -APP_START_TIME = int(time.time()) -SLOW_START_TIME = APP_START_TIME -DETECTOR_START_TIME = APP_START_TIME diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/main.py b/src/gausskernel/dbmind/tools/anomaly_detection/main.py deleted file mode 100644 index 12341e276..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/main.py +++ /dev/null @@ -1,334 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import argparse -import shlex -import subprocess -import sys - -import config -import global_vars -from deamon import Daemon -from detector.tools.slow_sql import diagnosing -from global_vars import * -from utils import check_time_legality, check_port_occupancy, check_collector, check_db_alive - -sys.path.append(CURRENT_DIRNAME) - -__version__ = '1.0.0' -__description__ = 'Anomaly-detection: a time series forecast and anomaly detection tool.' -__epilog__ = """ -epilog: - the 'a-detection.conf' and 'metric_task.conf' will be read when the program is running, - the location of them is: - a-detection.conf: {detection}. - metric_config: {metric_config}. - """.format(detection=CONFIG_PATH, - metric_config=METRIC_CONFIG_PATH) - - -def usage(): - return """ - python main.py start [--role {{agent,collector,monitor}}] # start local service. - python main.py stop [--role {{agent,collector,monitor}}] # stop local service. - python main.py start [--user USER] [--host HOST] [--project-path PROJECT_PATH] [--role {{agent,collector,monitor}}] - # start the remote service. - python main.py stop [--user USER] [--host HOST] [--project-path PROJECT_PATH] [--role {{agent,collector, - monitor}}] # stop the remote service. - python main.py deploy [--user USER] [--host HOST] [--project-path PROJECT_PATH] # deploy project in remote host. - python main.py diagnosis [--query] [--start_time] [--finish_time] # rca for slow SQL. - python main.py show_metrics # display all monitored metrics(can only be executed on 'detector' machine). - python main.py forecast [--metric-name METRIC_NAME] [--period] [--freq] - [--forecast-method {{auto_arima, fbprophet}}] [--save-path SAVE_PATH] # forecast future trend of - metric(can only be executed on 'detector' machine). """ - - -def parse_args(): - parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, - description=__description__, - usage=usage(), - epilog=__epilog__) - - parser.add_argument('mode', choices=['start', 'stop', 'deploy', 'show_metrics', 'forecast', 'diagnosis']) - parser.add_argument('--user', help="User of remote server.") - parser.add_argument('--host', help="IP of remote server.") - parser.add_argument('--project-path', help="Project location in remote server.") - parser.add_argument('--role', choices=['agent', 'collector', 'monitor'], - help="Run as 'agent', 'collector', 'monitor'. " - "Notes: ensure the normal operation of the openGauss in agent.") - parser.add_argument('--metric-name', help="Metric name to be predicted, if this parameter is not provided, " - "all metric in database will be predicted.") - parser.add_argument('--query', help="target sql for RCA") - parser.add_argument('--start_time', help="start time of query") - parser.add_argument('--finish_time', help="finish time of query") - parser.add_argument('--period', default=1, - help="Forecast periods of metric, it should be integer" - "notes: the specific value should be determined to the trainnig data." - "if this parameter is not provided, the default value '100S' will be used.") - - parser.add_argument('--freq', default='S', help="forecast gap, time unit: " - "S: Second, " - "M: Minute, " - "H: Hour, " - "D: Day, " - "W: Week. ") - parser.add_argument('--forecast-method', default='auto_arima', - help="Forecast method, default method is 'auto_arima'," - "if want to use 'fbprophet', you should install fbprophet first.") - parser.add_argument('--save-path', - help='Save the results to this path using csv format, if this parameter is not provided,' - ', the result wil not be saved.') - parser.add_argument('-v', '--version', action='version') - parser.version = __version__ - return parser.parse_args() - - -def forecast(args): - from prettytable import PrettyTable - from detector.algorithm import get_fcst_alg - from detector.service.storage.sqlite_storage import SQLiteStorage - from utils import StdStreamSuppressor - - display_table = PrettyTable() - display_table.field_names = ['Metric name', 'Date range', 'Minimum', 'Maximum', 'Average'] - - database_dir = config.get('database', 'database_dir') - - if not args.forecast_method: - forecast_alg = get_fcst_alg('auto_arima')() - else: - forecast_alg = get_fcst_alg(args.forecast_method)() - - def forecast_metric(name, train_ts, save_path=None): - with StdStreamSuppressor(): - forecast_alg.fit(timeseries=train_ts) - dates, values = forecast_alg.forecast( - period=int(args.period) + 1, freq=args.freq) - date_range = "{start_date}~{end_date}".format(start_date=dates[0], - end_date=dates[-1]) - display_table.add_row( - [name, date_range, min(values), max(values), sum(values) / len(values)] - ) - - if save_path: - if not os.path.exists(os.path.dirname(save_path)): - os.makedirs(os.path.dirname(save_path)) - with open(save_path, mode='w') as f: - for date, value in zip(dates, values): - f.write(date + ',' + str(value) + '\n') - - for database in os.listdir(database_dir): - with SQLiteStorage(os.path.join(database_dir, database)) as db: - table_rows = db.get_table_rows('os_exporter') - timeseries = db.get_timeseries(table='os_exporter', field=args.metric_name, period=table_rows) - forecast_metric(args.metric_name, timeseries, args.save_path) - - print(display_table.get_string()) - - -def slow_sql_rca(args): - from prettytable import PrettyTable - from detector.service.storage.sqlite_storage import SQLiteStorage - from utils import input_sql_processing, remove_comment - - if not args.query: - print('Error: no query input!') - return - user_query = args.query.split(';')[0] - start_time = args.start_time - finish_time = args.finish_time - if start_time and not check_time_legality(start_time): - print("error time format '{time}', using: {date_format}.".format(time=start_time, - date_format=global_vars.DATE_FORMAT)) - return - if finish_time and not check_time_legality(finish_time): - print("error time format '{time}', using: {date_format}.".format(time=finish_time, - date_format=global_vars.DATE_FORMAT)) - return - - database_dir = os.path.realpath(config.get('database', 'database_dir')) - display_table = PrettyTable() - display_table.field_names = ['database', 'start time', 'finish time', 'rca', 'suggestion'] - display_table.align = 'l' - for database in os.listdir(database_dir): - if 'journal' in database: - continue - try: - database_path = os.path.join(database_dir, database) - with SQLiteStorage(database_path) as db: - if start_time and finish_time: - results = db.fetch_all_result( - "select query, start_time, finish_time from wdr where start_time " - "between '{start_time}' and '{finish_time}';".format( - start_time=start_time, finish_time=finish_time)) - elif start_time: - results = db.fetch_all_result( - "select query, start_time, finish_time from wdr where start_time >= '{margin_time}';".format( - margin_time=start_time)) - elif finish_time: - results = db.fetch_all_result( - "select query, start_time, finish_time from wdr where finish_time <= '{margin_time}';".format( - margin_time=finish_time)) - else: - current_time = int(time.time()) - # If not input start_time and finish_time, then default search for 12 hours of historical data. - margin_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(current_time - 43200)) - results = db.fetch_all_result( - "select query, start_time, finish_time from wdr where start_time >= '{margin_time}';".format( - margin_time=margin_time)) - if not results: - continue - for wdr_query, start_time, finish_time in results: - try: - processed_wdr_query = input_sql_processing(wdr_query).replace(' ', '') - processed_user_query = input_sql_processing(user_query).replace(' ', '') - if processed_user_query == processed_wdr_query: - user_query = remove_comment(user_query) - diagnose_result = diagnosing.diagnose_user(db, user_query, start_time) - start_time, finish_time = diagnose_result[0], diagnose_result[1] - rca_ana = "" - suggestion_ana = "" - if not diagnose_result[2:]: - rca_ana = "the query has no slow features or its syntax is incorrect." - suggestion_ana = "please check the query threshold, check the log, and analyze the reason." - else: - index = 1 - for rca, suggestion in diagnose_result[2:]: - rca_ana = rca_ana + "{index}: {rca}\n".format(index=index, rca=rca) - suggestion_ana = suggestion_ana + "{index}: {suggestion}\n".format(index=index, - suggestion=suggestion) - index += 1 - display_table.add_row([database, start_time, finish_time, rca_ana, suggestion_ana]) - except Exception as e: - # Prevent unknown accidents from causing the program to stop - continue - except Exception as e: - print(str(e)) - return - print(display_table.get_string()) - - -def deploy(args): - print('Please input the password of {user}@{host}: '.format(user=args.user, host=args.host)) - command = 'sh start.sh --deploy {host} {user} {project_path}' \ - .format(user=args.user, - host=args.host, - project_path=args.project_path) - if subprocess.call(shlex.split(command), cwd=BIN_PATH) == 0: - print("\nExecute successfully.") - else: - print("\nExecute unsuccessfully.") - - -def show_metrics(): - from prettytable import PrettyTable - from detector.service.storage.sqlite_storage import SQLiteStorage - - display_table = PrettyTable() - display_table.field_names = ['Metric name', 'Current rows'] - database_dir = config.get('database', 'database_dir') - - for database in os.listdir(database_dir): - with SQLiteStorage(os.path.join(database_dir, database)) as db: - table = 'os_exporter' - fields = db.get_all_fields(table) - rows = db.get_table_rows(table) - for field in fields: - display_table.add_row([field, rows]) - - print(display_table.get_string()) - - -def manage_local_service(args): - daemon = Daemon() - daemon.set_stdout(os.devnull).set_stderr(os.devnull) - - if args.role == 'collector': - from detector.service import service_main - - daemon.set_pid_file(os.path.join(CURRENT_DIRNAME, './tmp/collector.pid')) - daemon.set_function(service_main) - elif args.role == 'monitor': - from detector.metric_detector import detector_main - - daemon.set_pid_file(os.path.join(CURRENT_DIRNAME, './tmp/detector.pid')) - daemon.set_function(detector_main) - elif args.role == 'agent': - from agent.metric_agent import agent_main - - pre_check = check_collector() and check_db_alive(port=config.get('agent', 'db_port')) - if args.mode == 'start' and not pre_check: - print('FATAL: Agent process failed to start.', file=sys.stderr, flush=True) - return - - daemon.set_pid_file(os.path.join(CURRENT_DIRNAME, './tmp/agent.pid')) - daemon.set_function(agent_main) - else: - print('FATAL: incorrect parameter.') - print(usage()) - return - - if args.mode == 'start': - if args.role == 'collector': - listen_port = config.get('server', 'listen_port') - check_port_occupancy(listen_port) - daemon.start() - else: - daemon.stop() - - -def manage_remote_service(args): - print('Please input the password of {user}@{host}: '.format(user=args.user, host=args.host)) - if args.mode == 'start': - command = "sh start.sh --start_remote_service {host} {user} {project_path} {role}" \ - .format(user=args.user, - host=args.host, - role=args.role, - project_path=args.project_path) - else: - command = "sh stop.sh --stop_remote_service {host} {user} {project_path} {role}" \ - .format(user=args.user, - host=args.host, - role=args.role, - project_path=args.project_path) - if subprocess.call(shlex.split(command), cwd=BIN_PATH) == 0: - print("\nExecute successfully.") - else: - print("\nExecute unsuccessfully.") - - -def main(): - args = parse_args() - - if args.mode in ('start', 'stop') and all((args.user, args.host, args.project_path, args.role)): - manage_remote_service(args) - elif args.mode in ('start', 'stop') and args.role and not any((args.user, args.host, args.project_path)): - manage_local_service(args) - elif args.mode == 'deploy' and all((args.user, args.host, args.project_path)): - deploy(args) - elif args.mode == 'show_metrics': - show_metrics() - elif args.mode == 'forecast': - forecast(args) - elif args.mode == 'diagnosis': - slow_sql_rca(args) - else: - print("FATAL: incorrect parameter.") - print(usage()) - return -1 - - -if __name__ == '__main__': - main() - diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/requirements.txt b/src/gausskernel/dbmind/tools/anomaly_detection/requirements.txt deleted file mode 100644 index 73bf8ec20..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -numpy -sqlparse -python-dateutil -pandas -flask -flask_restful -configparse -prettytable -pmdarima diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/share/gen_ca_certificate.sh b/src/gausskernel/dbmind/tools/anomaly_detection/share/gen_ca_certificate.sh deleted file mode 100644 index 514ed4471..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/share/gen_ca_certificate.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash -# Copyright (c) 2020 Huawei Technologies Co.,Ltd. -# -# openGauss is licensed under Mulan PSL v2. -# You can use this software according to the terms and conditions of the Mulan PSL v2. -# You may obtain a copy of Mulan PSL v2 at: -# -# http://license.coscl.org.cn/MulanPSL2 -# -# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -# See the Mulan PSL v2 for more details. -#------------------------------------------------------------------------- -# -# gen_ca_certificate.sh -# generate certificate -# -#------------------------------------------------------------------------- -source ./initialize_certificate.sh - -ca_crt="ca.crt" -ca_key="ca.key" -ca_password="" - -read -s -p "please input the CA password: " ca_password - -cat > ca.conf <<-EOF -[req] -distinguished_name = req_distinguished_name -prompt = no - -[req_distinguished_name] -O = $PROJECT_NAME Certificate Authority -EOF - -expect <<-EOF - spawn /bin/openssl genrsa -aes256 -out ${ca_key} 2048 - expect "Enter pass phrase for" - send "${ca_password}\r" - expect "Verifying - Enter pass phrase for" - send "${ca_password}\r" - expect eof -EOF - -expect <<-EOF - spawn /bin/openssl req -new -out ca.req -key ${ca_key} -config ca.conf - expect "Enter pass phrase for" - send "${ca_password}\r" - expect eof -EOF - -expect <<-EOF - spawn /bin/openssl x509 -req -in ca.req -signkey ${ca_key} -days 7300 -out ${ca_crt} - expect "Enter pass phrase for" - send "${ca_password}\r" - expect eof -EOF - -mv ${ca_crt} ${ca_key} ${CURRENT_DIR}/${CA} -rm ca.req ca.conf -chmod 600 `find ${CURRENT_DIR}/${CA} -type f` - - diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/share/gen_certificate.sh b/src/gausskernel/dbmind/tools/anomaly_detection/share/gen_certificate.sh deleted file mode 100644 index bfcb28fac..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/share/gen_certificate.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/bash -# Copyright (c) 2020 Huawei Technologies Co.,Ltd. -# -# openGauss is licensed under Mulan PSL v2. -# You can use this software according to the terms and conditions of the Mulan PSL v2. -# You may obtain a copy of Mulan PSL v2 at: -# -# http://license.coscl.org.cn/MulanPSL2 -# -# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -# See the Mulan PSL v2 for more details. -#------------------------------------------------------------------------- -# -# gen_certificate.sh -# generate certificate -# -#------------------------------------------------------------------------- - -source ./initialize_certificate.sh - -CA_CRT="${CURRENT_DIR}/${CA}/ca.crt" -CA_KEY="${CURRENT_DIR}/${CA}/ca.key" -local_host="" -ca_password="" -ssl_password="" -base_dir="" -file_name="" - -if [ ! -f ${CA_CRT} ]; then - echo "not found ${CA_CRT}." - exit 0 -fi - -if [ ! -f ${CA_KEY} ]; then - echo "not found ${CA_KEY}." - exit 0 -fi - -read -p "please input the basename of ssl certificate: " base_dir -read -p "please input the filename of ssl certificate: " file_name -read -p "please input the local host: " local_host -read -s -p "please input the password of ca and ssl separated by space: " ca_password ssl_password - -if [ ! -d ${base_dir}/ ]; then - mkdir -p ${base_dir} -fi - -key="${base_dir}/${file_name}.key" -crt="${base_dir}/${file_name}.crt" -req="${base_dir}/${file_name}.req" -pwf="${base_dir}" - -expect <<-EOF - spawn /bin/openssl genrsa -aes256 -out ${key} 2048 - expect "Enter pass phrase for" - send "${ssl_password}\r" - expect "Verifying - Enter pass phrase for" - send "${ssl_password}\r" - expect eof -EOF - -expect <<-EOF - spawn /bin/openssl req -new -out ${req} -key ${key} -subj "/C=CN/ST=Some-State/O=${file_name}/CN=${local_host}" - expect "Enter pass phrase for" - send "${ssl_password}\r" - expect eof -EOF - -expect <<-EOF - spawn /bin/openssl x509 -req -in ${req} -out ${crt} -sha256 -CAcreateserial -days 7000 -CA ${CA_CRT} -CAkey ${CA_KEY} - expect "Enter pass phrase for" - send "${ca_password}\r" - expect eof -EOF - -rm ${req} - -encrypt "${ssl_password}" ${pwf} ${pwf} -chmod 600 ${key} -chmod 600 ${crt} -chmod 700 ${pwf} diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/share/initialize_certificate.sh b/src/gausskernel/dbmind/tools/anomaly_detection/share/initialize_certificate.sh deleted file mode 100644 index db8671eb9..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/share/initialize_certificate.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -# Copyright (c) 2020 Huawei Technologies Co.,Ltd. -# -# openGauss is licensed under Mulan PSL v2. -# You can use this software according to the terms and conditions of the Mulan PSL v2. -# You may obtain a copy of Mulan PSL v2 at: -# -# http://license.coscl.org.cn/MulanPSL2 -# -# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -# See the Mulan PSL v2 for more details. -#------------------------------------------------------------------------- -# -# initialize_certificate.sh -# initialize certificate -# -#------------------------------------------------------------------------- -source ./common.sh - -SERVER="certificate/server" -AGENT="certificate/agent" -CA="certificate/ca" -PW_FILE="certificate/pwf" - -if [ ! -d ${CURRENT_DIR}/${SERVER} ]; then - mkdir -p ${CURRENT_DIR}/${SERVER} -fi - -if [ ! -d ${CURRENT_DIR}/${AGENT} ]; then - mkdir -p ${CURRENT_DIR}/${AGENT} -fi - -if [ ! -d ${CURRENT_DIR}/${CA} ]; then - mkdir -p ${CURRENT_DIR}/${CA} -fi - diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/table.json b/src/gausskernel/dbmind/tools/anomaly_detection/table.json deleted file mode 100644 index 09c424cc6..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/table.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "sqlite": { - "database_exporter": { - "create_table": "create table if not exists database_exporter(timestamp bigint, guc_parameter text, current_connections int, qps int, process text, temp_file text);", - "insert": "insert into database_exporter values(%d, \"%s\", %d, %d, \"%s\", \"%s\");", - "count": "select count(1) from database_exporter;", - "limit_max_rows": "delete from database_exporter where timestamp in (select timestamp from database_exporter order by timestamp desc limit -1 offset %d)", - "limit_max_periods": "delete from database_exporter where timestamp < %d;" - }, - "os_exporter": { - "create_table": "create table if not exists os_exporter(timestamp bigint, cpu_usage text, io_wait text, io_read text, io_write text, memory_usage text, disk_space text);", - "insert": "insert into os_exporter values(%d, \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\");", - "count": "select count(1) from os_exporter;", - "limit_max_rows": "delete from os_exporter where timestamp in (select timestamp from os_exporter order by timestamp desc limit -1 offset %d)", - "limit_max_periods": "delete from os_exporter where timestamp < %d;" - }, - "wdr": { - "create_table": "create table if not exists wdr(timestamp bigint, db_name text, table_name text, query text, explain text, start_time text, finish_time text, indexes text, wdr_features text);", - "insert": "insert into wdr values(%d, \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\");", - "count": "select count(1) from wdr;", - "limit_max_rows": "delete from wdr where timestamp in (select timestamp from wdr order by timestamp desc limit -1 offset %d);", - "limit_max_periods": "delete from wdr where timestamp < %d;" - } - }, - "function": { - "check_table": "select name from sqlite_master where type = 'table'", - "get_table_rows": "select count(*) from {table}", - "get_all_fields": "PRAGMA table_info({table});", - "get_all_tables": "select name from sqlite_master where type = 'table'", - "get_timeseries_by_number": "select * from (select timestamp, {field} from {table} order by timestamp desc limit {number}) order by timestamp", - "get_timeseries_by_timestamp": "select timestamp, {field} from {table} where timestamp >= '{select_timestamp}'", - "get_earliest_timestamp": "select timestamp from {table} limit 1", - "get_latest_timestamp": "select timestamp from {table} order by timestamp desc limit 1" - } -} diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/task/database_exporter.py b/src/gausskernel/dbmind/tools/anomaly_detection/task/database_exporter.py deleted file mode 100644 index 9f91a6c16..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/task/database_exporter.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" - -import os -import re -import shlex -import subprocess -import time - -from utils import DBAgent, convert_to_mb - - -class DatabaseExporter: - __tablename__ = 'database_exporter' - - def __init__(self, db_port): - self.port = db_port - - def guc_parameter(self): - """ - get database guc parameter - :return: {work_mem: value, shared_buffers: value, max_connections: value} - """ - result = [] - guc_names = ['work_mem', 'shared_buffers', 'max_connections'] - sql = "select setting, unit from pg_settings where name = '{guc_name}';" - with DBAgent(port=self.port, database='postgres') as db: - for guc_name in guc_names: - res = db.fetch_all_result(sql.format(guc_name=guc_name)) - if guc_name != 'max_connections': - res = convert_to_mb(str(res[0][0]) + res[0][1]) - result.append(res) - else: - result.append(res[0][0]) - result = ",".join(map(lambda x: str(x), result)) - return result - - def current_connections(self): - """ - Get current connections - :return: - """ - # Get current_connections: - sql = "select count(1) from pg_stat_activity;" - with DBAgent(port=self.port, database='postgres') as db: - result = db.fetch_all_result(sql)[0][0] - return result - 1 - - def qps(self): - sql = "select select_count+update_count+insert_count+delete_count from gs_sql_count;" - with DBAgent(port=self.port, database='postgres') as db: - num1 = db.fetch_all_result(sql) - time.sleep(0.1) - num2 = db.fetch_all_result(sql) - result = (num2[0][0] - num1[0][0] - 1) * 10 if num2[0][0] > num1[0][0] else 0 - return result - - def process(self): - result = {} - child1 = subprocess.Popen(shlex.split("ps -aux"), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False) - child2 = subprocess.Popen(shlex.split("sort -k3nr"), stdin=child1.stdout, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, shell=False) - child3 = subprocess.Popen(shlex.split("grep -v gaussdb"), stdin=child2.stdout, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, shell=False) - child4 = subprocess.Popen(shlex.split("awk '{print $2,$3,$4,$11}'"), stdin=child3.stdout, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, shell=False) - child5 = subprocess.Popen(shlex.split("head -4"), stdin=child4.stdout, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, shell=False) - stream = child5.communicate() - if not stream[1]: - res = stream[0].decode('utf-8').strip() - res = res.split('\n') - for item in res: - match_res = re.match(r'(\d+) (\d+(?:\.\d+)) (\d+(?:\.\d+)) (.+)', item, re.DOTALL) - if not match_res: - continue - pid = match_res.group(1) - cpu_usage = match_res.group(2) - memory_usage = match_res.group(3) - process = match_res.group(4) - key = str(pid) + '_' + process - result[key] = str(cpu_usage) + ':' + str(memory_usage) - return str(result) - else: - return str(result) - - def temp_file(self): - sql = 'select datapath from pg_node_env;' - with DBAgent(port=self.port, database='postgres') as db: - datapath = db.fetch_all_result(sql)[0][0] - pgsql_tmp = os.path.join(datapath, 'base/pgsql_tmp') - if not os.path.exists(pgsql_tmp): - return 'f' - if len(os.listdir(pgsql_tmp)) > 0: - return 't' - else: - return 'f' - - def output(self): - result = [self.guc_parameter(), self.current_connections(), self.qps(), self.process(), self.temp_file()] - return result diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/task/metric_task.conf b/src/gausskernel/dbmind/tools/anomaly_detection/task/metric_task.conf deleted file mode 100644 index b88308d04..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/task/metric_task.conf +++ /dev/null @@ -1,23 +0,0 @@ -[detector_method] -trend = os_exporter -slow_sql = wdr - -[os_exporter] -cpu_usage_minimum = 10 -cpu_usage_maximum = 10 -memory_usage_minimum = 10 -memory_usage_maximum = 10 -io_read_minimum = 10 -io_read_maximum = 10 -io_write_minimum = 10 -io_write_maximum = 10 -io_wait_minimum = 10 -io_wait_maximum = 10 -disk_space_minimum = 10 -disk_space_maximum = 10 - -[common_parameter] -data_period = 3000S -interval = 60S -freq = 3S -period = 2 diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/task/metric_task.py b/src/gausskernel/dbmind/tools/anomaly_detection/task/metric_task.py deleted file mode 100644 index ee5e91d0f..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/task/metric_task.py +++ /dev/null @@ -1,81 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import os -import subprocess - -from utils import convert_to_mb - - -def cpu_usage(): - child1 = subprocess.Popen(['ps', '-ux'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False) - child2 = subprocess.Popen(['grep', 'gaussd[b]'], stdin=child1.stdout, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, shell=False) - sub_chan = child2.communicate() - if not sub_chan[0]: - result = 0.0 - else: - result = sub_chan[0].split()[2].decode('utf-8') - return result - - -def io_read(): - child1 = subprocess.Popen(['pidstat', '-d'], stdout=subprocess.PIPE, shell=False) - child2 = subprocess.Popen(['grep', 'gaussd[b]'], stdin=child1.stdout, stdout=subprocess.PIPE, shell=False) - sub_chan = child2.communicate() - if not sub_chan[0]: - result = 0.0 - else: - result = sub_chan[0].split()[3].decode('utf-8') - return result - - -def io_write(): - child1 = subprocess.Popen(['pidstat', '-d'], stdout=subprocess.PIPE, shell=False) - child2 = subprocess.Popen(['grep', 'gaussd[b]'], stdin=child1.stdout, stdout=subprocess.PIPE, shell=False) - sub_chan = child2.communicate() - if not sub_chan[0]: - result = 0.0 - else: - result = sub_chan[0].split()[4].decode('utf-8') - return result - - -def memory_usage(): - child1 = subprocess.Popen(['ps', '-ux'], stdout=subprocess.PIPE, shell=False) - child2 = subprocess.Popen(['grep', 'gaussd[b]'], stdin=child1.stdout, stdout=subprocess.PIPE, shell=False) - sub_chan = child2.communicate() - if not sub_chan[0]: - result = 0.0 - else: - result = sub_chan[0].split()[3].decode('utf-8') - return result - - -def disk_space(): - pg_data = os.getenv('PGDATA') - if pg_data is None: - raise ValueError('not found PGDATA in environment.') - else: - pg_data = os.path.realpath(pg_data) - child = subprocess.Popen(['du', '-sh', pg_data], stdout=subprocess.PIPE, shell=False) - sub_chan = child.communicate() - if sub_chan[1] is not None: - raise ValueError('error when get disk usage of openGauss: {error}'. - format(error=sub_chan[1].decode('utf-8'))) - if not sub_chan[0]: - result = 0.0 - else: - result = convert_to_mb(sub_chan[0].decode('utf-8')) - return result diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/task/os_exporter.py b/src/gausskernel/dbmind/tools/anomaly_detection/task/os_exporter.py deleted file mode 100644 index fa386d098..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/task/os_exporter.py +++ /dev/null @@ -1,101 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import os -import subprocess - -from utils import DBAgent, convert_to_mb - - -class OSExporter: - __tablename__ = 'os_exporter' - - def __init__(self, db_port): - self.port = db_port - - @staticmethod - def cpu_usage(): - child1 = subprocess.Popen(['ps', '-ux'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False) - child2 = subprocess.Popen(['grep', 'gaussd[b]'], stdin=child1.stdout, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, shell=False) - sub_chan = child2.communicate() - if not sub_chan[0]: - result = 0.0 - else: - result = sub_chan[0].split()[2].decode('utf-8') - return result - - @staticmethod - def io_read(): - child1 = subprocess.Popen(['pidstat', '-d'], stdout=subprocess.PIPE, shell=False) - child2 = subprocess.Popen(['grep', 'gaussd[b]'], stdin=child1.stdout, stdout=subprocess.PIPE, shell=False) - sub_chan = child2.communicate() - if not sub_chan[0]: - result = 0.0 - else: - result = sub_chan[0].split()[3].decode('utf-8') - return result - - @staticmethod - def io_write(): - child1 = subprocess.Popen(['pidstat', '-d'], stdout=subprocess.PIPE, shell=False) - child2 = subprocess.Popen(['grep', 'gaussd[b]'], stdin=child1.stdout, stdout=subprocess.PIPE, shell=False) - sub_chan = child2.communicate() - if not sub_chan[0]: - result = 0.0 - else: - result = sub_chan[0].split()[4].decode('utf-8') - return result - - @staticmethod - def io_wait(): - child1 = subprocess.Popen(['iostat'], stdout=subprocess.PIPE, shell=False) - sub_chan = child1.communicate() - if not sub_chan[0]: - result = 0.0 - else: - result = sub_chan[0].decode("utf-8").split("\n")[3].split()[3] - return result - - @staticmethod - def memory_usage(): - child1 = subprocess.Popen(['ps', '-ux'], stdout=subprocess.PIPE, shell=False) - child2 = subprocess.Popen(['grep', 'gaussd[b]'], stdin=child1.stdout, stdout=subprocess.PIPE, shell=False) - sub_chan = child2.communicate() - if not sub_chan[0]: - result = 0.0 - else: - result = sub_chan[0].split()[3].decode('utf-8') - return result - - def disk_space(self): - sql = 'select datapath from pg_node_env;' - with DBAgent(port=self.port, database='postgres') as db: - datapath = db.fetch_all_result(sql)[0][0] - pg_data = os.path.realpath(datapath) - child = subprocess.Popen(['du', '-sh', pg_data], stdout=subprocess.PIPE, shell=False) - sub_chan = child.communicate() - if sub_chan[1] is not None: - raise ValueError('error when get disk usage of openGauss: {error}'. - format(error=sub_chan[1].decode('utf-8'))) - if not sub_chan[0]: - result = '0.0' - else: - result = str(convert_to_mb(sub_chan[0].decode('utf-8'))) - return result - - def output(self): - result = [self.cpu_usage(), self.io_wait(), self.io_read(), - self.io_write(), self.memory_usage(), self.disk_space()] - return result diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/task/wdr.py b/src/gausskernel/dbmind/tools/anomaly_detection/task/wdr.py deleted file mode 100644 index 81b645ab6..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/task/wdr.py +++ /dev/null @@ -1,69 +0,0 @@ -import logging -import re -import time - -import global_vars -from utils import DBAgent, extract_table_from_sql, input_sql_processing - - -class WDR: - __tablename__ = 'wdr' - - def __init__(self, db_port, db_type): - self.port = db_port - self.db_type = db_type - - def extract_index(self, database, table): - sql = "select indexname, indexdef from pg_indexes where tablename='{table}'".format(table=table) - with DBAgent(port=self.port, database=database) as db: - res = db.fetch_all_result(sql) - return dict(res) - - def mapper_function(self, value): - index_info = {} - database = value[0] - tables = extract_table_from_sql(value[1]) - if tables: - for table in tables: - indexes = self.extract_index(database, table) - index_info[table] = indexes - tables = ','.join(tables) - index_info = str(index_info) - wdr_features = ';'.join([str(item) for item in value[5:]]) - # Delete 'p_rows' 'p_time' in explain if value[2] is not None. - if value[2] is not None: - explain = re.sub(r"Datanode Name:.+?\n", "", re.sub(r' p-time=[\d\.]+? p-rows=[\d] ', r' ', value[2])) - explain = explain.replace('\"', '\'') - else: - explain = '' - return (value[0], tables, input_sql_processing(value[1]), explain, - value[3].strftime(global_vars.DATE_FORMAT), value[4].strftime(global_vars.DATE_FORMAT), index_info, - wdr_features) - - def wdr_features(self, start_time, end_time): - if start_time and end_time: - sql = "select db_name, query, query_plan, start_time, finish_time, n_returned_rows, n_tuples_fetched, " \ - "n_tuples_returned, n_tuples_inserted, n_tuples_updated, n_tuples_deleted, n_blocks_fetched, " \ - "n_blocks_hit, db_time, cpu_time, execution_time, parse_time, plan_time, rewrite_time, " \ - "pl_execution_time, pl_compilation_time, data_io_time, lock_count, lock_time, lock_wait_count, " \ - "lock_wait_time, lock_max_count, lwlock_count, lwlock_wait_count, lwlock_time, lwlock_wait_time from " \ - "statement_history where finish_time between '{start_time}' and '{end_time}'" \ - .format(start_time=start_time, end_time=end_time) - with DBAgent(port=self.port, database='postgres') as db: - result = db.fetch_all_result(sql) - if result: - result = list(filter(lambda x: re.match(r'UPDATE|SELECT|INSERT|DELETE', x[1].strip().upper()), result)) - result = list(map(self.mapper_function, result)) - return result - - def output(self): - # Collect wdr only on CN or single node. - if self.db_type not in ('cn', 'single'): - return [] - start_time = global_vars.SLOW_START_TIME - end_time = int(time.time()) - global_vars.SLOW_START_TIME = end_time - start_time_string = time.strftime(global_vars.DATE_FORMAT, time.localtime(start_time)) - end_time_string = time.strftime(global_vars.DATE_FORMAT, time.localtime(end_time)) - result = self.wdr_features(start_time_string, end_time_string) - return result diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/test/test_diagnosing.py b/src/gausskernel/dbmind/tools/anomaly_detection/test/test_diagnosing.py deleted file mode 100644 index 2133c4ce5..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/test/test_diagnosing.py +++ /dev/null @@ -1,954 +0,0 @@ -import logging -import os -import sqlite3 -import sys -import time -import unittest -from datetime import timedelta - -sys.path.append("../") - -from utils import TimeString, convert_to_mb, unify_sql, extract_table_from_sql, wdr_sql_processing, input_sql_processing -from detector.tools.slow_sql.diagnosing import diagnose_auto -from detector.tools.slow_sql.diagnosing import diagnose_user -from detector.service.storage import sqlite_storage -from detector.service.storage.sqlite_storage import SQLiteStorage -from detector.algorithm.auto_arima import AutoArima - -logging.basicConfig(level=logging.INFO) - - -class TestDataHandler(unittest.TestCase): - def test_10_sqlite3(self): - database_dir = os.path.realpath("./data") - if not os.path.exists(database_dir): - os.mkdir(database_dir) - database_path = os.path.join(database_dir, '127_0_0_1_8000') - conn = SQLiteStorage(database_path) - conn.connect() - try: - conn.execute("select count(*) from wdr") - conn.execute("DROP table database_exporter") - conn.execute("DROP table OSExporter") - conn.execute("DROP table wdr") - logging.info('create tables') - conn.execute("create table database_exporter(timestamp bigint, guc_parameter text, " - "connrent_connections int, qps text, process text, temp_file text)") - conn.execute("create table os_exporter(timestamp bigint, cpu_usage text, io_wait text," - " io_read text, io_write text, memory_usage text, disk_space text)") - conn.execute("create table wdr(timestamp bigint, db_name text, table_name text, query text," - " explain text, start_time text, finish_time text, indexes text)") - except sqlite3.OperationalError: - logging.info('create tables') - conn.execute("create table database_exporter(timestamp bigint, guc_parameter text, " - "connrent_connections int, qps text, process text, temp_file text)") - conn.execute("create table os_exporter(timestamp bigint, cpu_usage text, io_wait text," - " io_read text, io_write text, memory_usage text, disk_space text)") - conn.execute("create table wdr(timestamp bigint, db_name text, table_name text, query text," - " explain text, start_time text, finish_time text, indexes text)") - - self.assertIsNotNone(conn) - - def test_11_full_scan(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select * from bmsql_item" - if conn.execute("SELECT * from wdr where timestamp == 1617885101"): - logging.info("EXIST timestamp == 1617885101 ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885101') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885101, db_name='tpcc', table_name='bmsql_item', - query="select * from bmsql_item", - explain="Aggregate (cost=2559.00..2559.01 rows=1 width=8)\n " - "-> Seq Scan on bmsql_item (cost=0.00..2309.00 rows=100000 width=6)", - start_time="2021-04-08 20:31:41", - finish_time="2021-04-08 20:31:44", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885101, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '0.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='t')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:31:41") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:31:41", "2021-04-08 20:31:44", - ['FullScan: Select all columns from the table, which causes the full scan', - 'Please stop this operation if it is not necessary']]) - - def test_12_fuzzy_query(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select i_id from bmsql_item where i_id like 100" - if conn.execute("SELECT * from wdr where timestamp == 1617885105"): - logging.info("EXIST timestamp == 1617885105 ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885105') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885105, db_name='tpcc', table_name='bmsql_item', - query="select i_id from bmsql_item where i_id like 100", - explain="Seq Scan on bmsql_item (cost=0.00..2809.00 rows=500 width=4)\n" - " Filter: ((i_id)::text ~~ '100'::text)", - start_time="2021-04-08 20:31:45", - finish_time="2021-04-08 20:31:49", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885105, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '0.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='t')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:31:45") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:31:45", "2021-04-08 20:31:49", - ['FuzzyQuery: SQL statement uses the keyword "like" causes fuzzy query', - 'Avoid fuzzy queries or do not use full fuzzy queries']]) - - def test_13_is_not_null(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select i_id from bmsql_item where i_id is not null" - if conn.execute("SELECT * from wdr where timestamp == 1617885110"): - logging.info("EXIST timestamp == 1617885110 ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885110') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885110, db_name='tpcc', table_name='bmsql_item', - query="select i_id from bmsql_item where i_id is not null", - explain="Seq Scan on bmsql_item (cost=0.00..2309.00 rows=100000 width=4)\n (1 row)", - start_time="2021-04-08 20:31:50", - finish_time="2021-04-08 20:31:54", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885110, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '0.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='t')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:31:50") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:31:50", "2021-04-08 20:31:54", - ['IndexFailure: SQL statement uses the keyword "is not null" causes indexes failure', - 'Do not use "is not null", otherwise, the index will be invalid']]) - - def test_14_unequal(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select i_id from bmsql_item where i_id != 100" - if conn.execute("SELECT * from wdr where timestamp == 1617885115"): - logging.info("EXIST timestamp == 1617885115 ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885115') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885115, db_name='tpcc', table_name='bmsql_item', - query="select i_id from bmsql_item where i_id != 100", - explain="Seq Scan on bmsql_item (cost=0.00..2559.00 rows=99999" - " width=4)\n Filter: (i_id <> 100)", - start_time="2021-04-08 20:31:55", - finish_time="2021-04-08 20:31:59", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885115, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '0.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='t')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:31:55") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:31:55", "2021-04-08 20:31:59", - ['IndexFailure: SQL statement uses the keyword "!=" causes indexes failure', - 'Change the unequal sign to "or"']]) - - def test_15_function(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select i_id from bmsql_item where substring(i_name,1,3)='abc'" - if conn.execute("SELECT * from wdr where timestamp == 1617885120"): - logging.info("EXIST timestamp == 1617885120 ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885120') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885120, db_name='tpcc', table_name='bmsql_item', - query="select i_id from bmsql_item where substring(i_name,1,3)='abc'", - explain="Seq Scan on bmsql_item (cost=0.00..2809.00 rows=500 width=4)\n" - " Filter: (‘substring’((i_name)::text, 1, 3) = 'abc'::text)", - start_time="2021-04-08 20:32:00", - finish_time="2021-04-08 20:32:04", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885120, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '0.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='t')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:32:00") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:32:00", "2021-04-08 20:32:04", - ['IndexFailure: Function operations are used in the WHERE ' - 'clause causes the SQL engine to abandon ' - ' indexes and use the full table scan', - 'Avoid using function operations in the where clause']]) - - def test_16_or(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select i_id from bmsql_item where i_id =100 or i_price =100" - if conn.execute("SELECT * from wdr where timestamp == 1617885125"): - logging.info("EXIST timestamp == {1617885125} ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885125') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885125, db_name='tpcc', table_name='bmsql_item', - query="select i_id from bmsql_item where i_id =100 or i_price =100", - explain="Seq Scan on bmsql_item (cost=0.00..2809.00 rows=28 width=4)\n" - " Filter: ((i_id = 100) OR (i_price = 100::numeric))", - start_time="2021-04-08 20:32:05", - finish_time="2021-04-08 20:32:09", - indexes="{'bmsql_item': {'index_item': 'CREATE INDEX index_item ON bmsql_item" - " USING btree (i_name) TABLESPACE pg_default', " - "'index_item2': 'CREATE INDEX index_item2 ON bmsql_item USING btree " - "(i_name) TABLESPACE pg_default', " - "'index_item3': 'CREATE INDEX index_item3 ON bmsql_item USING btree " - "(i_name) TABLESPACE pg_default', " - "'index_item4': 'CREATE INDEX index_item4 ON bmsql_item USING btree " - "(i_name) TABLESPACE pg_default', " - "'index_item5': 'CREATE INDEX index_item5 ON bmsql_item USING btree " - "(i_name) TABLESPACE pg_default', " - "'index_item6': 'CREATE INDEX index_item6 ON bmsql_item USING btree " - "(i_name) TABLESPACE pg_default', " - "'index_item7': 'CREATE INDEX index_item7 ON bmsql_item USING btree " - "(i_name) TABLESPACE pg_default', " - "'index_item_id': 'CREATE INDEX index_item_id ON bmsql_item USING btree " - "(i_im_id) TABLESPACE pg_default', " - "'index_comb': 'CREATE INDEX index_comb ON bmsql_item USING btree " - "(i_id, i_name) TABLESPACE pg_default', " - "'bmsql_item_pkey': 'CREATE UNIQUE INDEX bmsql_item_pkey ON bmsql_item " - "USING btree (i_id) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885125, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '0.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='t')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:32:05") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:32:05", "2021-04-08 20:32:09", - ['NeedIndex: The SQL or condition contains columns which are not all created indexes', - 'Create indexes on all related columns'], - ['External Resources: There are a large number of redundant indexes in related columns,' - ' resulting in slow insert performance', 'Delete the duplicate index before insert']]) - - def test_17_update(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "update bmsql_item set i_id = 1, i_name = 'name', i_price = 100, i_data = 'sadsad', i_im_id = 123" - if conn.execute("SELECT * from wdr where timestamp == 1617885130"): - logging.info("EXIST timestamp == 1617885130 ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885130') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885130, db_name='tpcc', table_name='bmsql_item', - query="update bmsql_item set i_id = 1, i_name = 'name', i_price = 100, i_data = 'sadsad', i_im_id = 123", - explain=" Update on bmsql_item (cost=0.00..2309.00 rows=100000 width=6)\n" - " -> Seq Scan on bmsql_item (cost=0.00..2309.00 rows=100000 width=6)", - start_time="2021-04-08 20:32:10", - finish_time="2021-04-08 20:32:14", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885130, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '0.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='t')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:32:10") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:32:10", "2021-04-08 20:32:14", - ['FullScan: The UPDATE statement updates all columns', - 'If only one or two columns are changed, do not update all columns, otherwise, ' - 'frequent calls will cause significant performance consumption']]) - - def test_18_expr_in_where(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select i_id from bmsql_item where i_id/2=40" - if conn.execute("SELECT * from wdr where timestamp == 1617885135"): - logging.info("EXIST timestamp == 1617885135 ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885135') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885135, db_name='tpcc', table_name='bmsql_item', - query="select i_id from bmsql_item where i_id/2=40", - explain=" Seq Scan on bmsql_item (cost=0.00..2809.00 rows=500 width=4)\n" - " Filter: ((i_id / 2) = 40::double precision)", - start_time="2021-04-08 20:32:15", - finish_time="2021-04-08 20:32:19", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885135, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '0.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='t')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:32:15") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:32:15", "2021-04-08 20:32:19", - ['IndexFailure: Expression manipulation of the WHERE ' - 'clause causes the SQL engine to abandon ' - 'the use of indexes in favor of full table scans', - 'Change the WHERE clause, do not perform expression operations on the columns']]) - - def test_19_not_in(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select i_id from bmsql_item where i_id not in(1,100)" - if conn.execute("SELECT * from wdr where timestamp == 1617885140"): - logging.info("EXIST timestamp == 1617885140 ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885140') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885140, db_name='tpcc', table_name='bmsql_item', - query="select i_id from bmsql_item where i_id not in(1,100)", - explain="Seq Scan on bmsql_item (cost=0.00..2559.00 rows=99998 width=4)\n" - " Filter: (i_id <> ALL ('{1,100}'::integer[]))", - start_time="2021-04-08 20:32:20", - finish_time="2021-04-08 20:32:24", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885140, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '0.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='t')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:32:20") - logging.info('RCA:%s', rca) - self.assertEqual(rca, - ["2021-04-08 20:32:20", "2021-04-08 20:32:24", ['FullScan: SQL statement uses the keyword "IN"' - ' or "NOT IN" causes the full scan', - 'For continuity values, you can use the ' - 'keyword "between" instead']]) - - def test_20_range_too_large(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select i_id from bmsql_item where i_id < 90000 and i_id > 0" - if conn.execute("SELECT * from wdr where timestamp == 1617885145"): - logging.info("EXIST timestamp == 1617885140 ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885145') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885145, db_name='tpcc', table_name='bmsql_item', - query="select i_id from bmsql_item where i_id < 90000 and i_id > 0", - explain="Seq Scan on bmsql_item (cost=0.00..2559.00 rows=99998 width=4)\n" - " Filter: (i_id <> ALL ('{1,100}'::integer[]))", - start_time="2021-04-08 20:32:25", - finish_time="2021-04-08 20:32:29", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885145, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '0.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='t')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:32:25") - logging.info('RCA:%s', rca) - self.assertEqual(rca, - ["2021-04-08 20:32:25", "2021-04-08 20:32:29", ['RangeTooLarge: Condition range is too large', - 'Please reduce query range']]) - - def test_21_sort(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select h_c_id from bmsql_history order by hist_id desc" - if conn.execute("SELECT * from database_exporter where timestamp == 1617885150"): - logging.info("[database_exporter] EXIST timestamp == 1617885150") - conn.execute('DELETE FROM database_exporter WHERE timestamp == 1617885150') - if conn.execute("SELECT * from wdr where timestamp == 1617885150"): - logging.info("[wdr] EXIST timestamp == 1617885150") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885150') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885150, db_name='tpcc', table_name='bmsql_history', - query="select h_c_id from bmsql_history order by hist_id desc", - explain=" Sort (cost=706347.33..715885.54 rows=3815283 width=8)\n" - " Sort Key: hist_id DESC\n" - " -> Seq Scan on bmsql_history (cost=0.00..80623.83 rows=3815283 width=8)", - start_time="2021-04-08 20:32:30", - finish_time="2021-04-08 20:32:34", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885150, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '0.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='t')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:32:30") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:32:30", "2021-04-08 20:32:34", - ['ExternalSorting: The cost of query statement sorting is too high, ' - 'resulting in slow SQL', 'Adjust the size of work_mem']]) - - def test_22_join(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select hist_id,s_i_id from bmsql_history, bmsql_stock where hist_id = s_i_id" - if conn.execute("SELECT * from wdr where timestamp == 1617885155"): - logging.info("EXIST timestamp == 1617885155 ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885155') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885155, db_name='tpcc', table_name='bmsql_history', - query="select hist_id,s_i_id from bmsql_history, bmsql_stock where hist_id = s_i_id", - explain=" Nested Loop (cost=0.00..1443045369679.17 rows=10006168 width=8)\n" - " -> Seq Scan on bmsql_history (cost=0.00..168981.63 rows=7774563 width=4)\n" - " -> Index Only Scan using bmsql_stock_pkey on bmsql_stock (cost=0.00..185610.10 rows=100 width=4)\n" - " Index Cond: (s_i_id = bmsql_history.hist_id)", - start_time="2021-04-08 20:32:35", - finish_time="2021-04-08 20:32:39", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885155, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '0.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='t')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:32:35") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:32:35", "2021-04-08 20:32:39", - ['NestLoop: The slow execution of "NestLoop" operator during JOIN ' - 'operation of a large table, resulting in slow SQL', - 'Turn off NestLoop by setting the GUC parameter "enable_nestloop" to off, ' - 'and let the optimizer choose other join methods']]) - - def test_23_aggregate(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select h_data from bmsql_history group by h_data" - if conn.execute("SELECT * from wdr where timestamp == 1617885160"): - logging.info("EXIST timestamp == 1617885160 ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885160') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885160, db_name='tpcc', table_name='bmsql_history', - query="select h_data from bmsql_history group by h_data", - explain=" SortAggregate (cost=873904.59..1189659.38 rows=10003079 width=33)\n" - " Group By Key: s_dist_01\n" - " -> Seq Scan on bmsql_stock (cost=0.00..585154.79 rows=10003079 width=25)", - start_time="2021-04-08 20:32:40", - finish_time="2021-04-08 20:32:44", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885160, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '0.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='t')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:32:40") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:32:40", "2021-04-08 20:32:44", - ['SortAggregate: For the aggregate operation of the large result set, ' - '"Sort Aggregate" operator has poor performance', - 'By setting the GUC parameter enable_sort to off, let ' - 'the optimizer select the HashAgg operator']]) - - def test_24_redistribute(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select o_custkey, l_partkey from orders, lineitem where o_custkey = l_partkey" - if conn.execute("SELECT * from wdr where timestamp == 1617885165"): - logging.info("EXIST timestamp == 1617885165 ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885165') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885165, db_name='tpch', table_name='{orders, lineitem}', - query="select o_custkey, l_partkey from orders, lineitem where o_custkey = l_partkey", - explain=" Row Adapter (cost=4535992602.55..4535992602.55 rows=22266252160 width=16)\n" - " -> Vector Streaming (type: GATHER) (cost=63865790.87..4535992602.55" - " rows=22266252160 width=16)\n" - " Node/s: All datanodes\n" - " -> Vector Sonic Hash Join (cost=63865786.87..3724202159.14" - " rows=22266252160 width=16)\n" - " Hash Cond: (orders.o_custkey = lineitem.l_partkey)\n" - " -> Vector Streaming(type: REDISTRIBUTE) " - "(cost=0.00..12298155.00 rows=750000000 width=8)\n" - " Spawn on: All datanodes\n" - " -> Vector Partition Iterator " - "(cost=0.00..1985655.00 rows=750000000 width=8)\n" - " Iterations: 7\n" - " -> Partitioned CStore Scan on orders " - "(cost=0.00..1985655.00 rows=750000000 width=8)\n" - " Selected Partitions: 1..7\n" - " -> Vector Streaming(type: REDISTRIBUTE) " - "(cost=0.00..51268011.03 rows=3000028242 width=8)\n" - " Spawn on: All datanodes\n" - " -> Vector Partition Iterator (cost=0.00..10017622.71" - " rows=3000028242 width=8)\n" - " Iterations: 7\n" - " -> Partitioned CStore Scan on lineitem " - "(cost=0.00..10017622.71 rows=3000028242 width=8)\n" - " Selected Partitions: 1..7", - start_time="2021-04-08 20:32:45", - finish_time="2021-04-08 20:32:49", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885165, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '0.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='t')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:32:45") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:32:45", "2021-04-08 20:32:49", - ['DataSkew: Data redistribution during the query execution', - 'It is recommended to use the distribution key recommending tool ' - 'to recommend appropriate distribution keys to avoid data skew']]) - - def test_25_redundant_index(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select i_name from bmsql_item where i_name = 'frank'" - if conn.execute("SELECT * from wdr where timestamp == 1617885170"): - logging.info("EXIST timestamp == 1617885160 ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885170') - conn.execute('DELETE FROM database_exporter WHERE timestamp == 1617885170') - - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885170, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '4.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='f')) - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885170, db_name='tpcc', table_name='bmsql_item', - query="select i_name from bmsql_item where i_name = 'frank'", - explain="Seq Scan on bmsql_item (cost=0.00..2559.00 rows=99999 width=4)\n" - " Filter: ((i_name)::text = 'frank'::text)", - start_time="2021-04-08 20:32:50", - finish_time="2021-04-08 20:32:54", - indexes="{'bmsql_item': {'index_item': 'CREATE INDEX index_item ON bmsql_item" - " USING btree (i_name) TABLESPACE pg_default', " - "'index_item2': 'CREATE INDEX index_item2 ON bmsql_item USING btree " - "(i_name) TABLESPACE pg_default', " - "'index_item3': 'CREATE INDEX index_item3 ON bmsql_item USING btree " - "(i_name) TABLESPACE pg_default', " - "'index_item4': 'CREATE INDEX index_item4 ON bmsql_item USING btree " - "(i_name) TABLESPACE pg_default', " - "'index_item5': 'CREATE INDEX index_item5 ON bmsql_item USING btree " - "(i_name) TABLESPACE pg_default', " - "'index_item6': 'CREATE INDEX index_item6 ON bmsql_item USING btree " - "(i_name) TABLESPACE pg_default', " - "'index_item7': 'CREATE INDEX index_item7 ON bmsql_item USING btree " - "(i_name) TABLESPACE pg_default', " - "'index_item_id': 'CREATE INDEX index_item_id ON bmsql_item USING btree " - "(i_im_id) TABLESPACE pg_default', " - "'index_comb': 'CREATE INDEX index_comb ON bmsql_item USING btree " - "(i_id, i_name) TABLESPACE pg_default', " - "'bmsql_item_pkey': 'CREATE UNIQUE INDEX bmsql_item_pkey ON bmsql_item " - "USING btree (i_id) TABLESPACE pg_default'}}")) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:32:50") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:32:50", "2021-04-08 20:32:54", - ['External Resources: There are a large number of redundant ' - 'indexes in related columns, resulting in slow insert performance', - 'Delete the duplicate index before insert']]) - - def test_26_resource_shortage(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select i_im_id from bmsql_item where i_im_id = 123" - if conn.execute("SELECT * from database_exporter where timestamp == 1617885175"): - logging.info("[database_exporter] EXIST timestamp == 1617885175") - conn.execute('DELETE FROM database_exporter WHERE timestamp == 16178851275') - if conn.execute("SELECT * from wdr where timestamp == 1617885175"): - logging.info("[wdr] EXIST timestamp == 1617885150") - conn.execute('DELETE FROM wdr WHERE timestamp == 16178851275') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885175, db_name='tpcc', table_name='bmsql_history', - query="select i_im_id from bmsql_item where i_im_id = 123", - explain="Seq Scan on bmsql_item (cost=0.00..2559.00 rows=99999 width=4)\n" - " Filter: (i_im_id = 123)", - start_time="2021-04-08 20:32:55", - finish_time="2021-04-08 20:32:59", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885175, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '24.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='f')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:32:55") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:32:55", "2021-04-08 20:32:59", - ['External Resources: External processes occupy a large number of system resources,' - ' resulting in a database resource shortage', - 'Stop unnecessary large processes']]) - - def test_27_others(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select i_id from bmsql_item where i_id = 123" - if conn.execute("SELECT * from database_exporter where timestamp == 1617885180"): - logging.info("[database_exporter] EXIST timestamp == 1617885180") - conn.execute('DELETE FROM database_exporter WHERE timestamp == 16178851280') - if conn.execute("SELECT * from wdr where timestamp == 1617885180"): - logging.info("[wdr] EXIST timestamp == 1617885180") - conn.execute('DELETE FROM wdr WHERE timestamp == 16178851280') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885180, db_name='tpcc', table_name='bmsql_history', - query="select i_id from bmsql_item where i_id = 123", - explain="Seq Scan on bmsql_item (cost=0.00..2559.00 rows=99999 width=4)\n" - " Filter: (i_id = 123)", - start_time="2021-04-08 20:33:00", - finish_time="2021-04-08 20:33:04", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885180, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '4.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='f')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:33:00") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:33:00", "2021-04-08 20:33:04", - ['External Resources: Database load request crowded', - 'It is recommended to change the free time to execute']]) - - def test_28_insert(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "insert into bmsql_item values(11321321,'123',300.00,'wsqddas',12)" - if conn.execute("SELECT * from wdr where timestamp == 1617885200"): - logging.info("[wdr] EXIST timestamp == 1617885200") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885200') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885200, db_name='tpcc', table_name='bmsql_item', - query="insert into bmsql_item values(11321321,'123',300.00,'wsqddas',12)", - explain=" Insert on bmsql_item (cost=0.00..0.01 rows=1 width=0)\n" - " -> Result (cost=0.00..0.01 rows=1 width=0)", - start_time="2021-04-08 20:33:20", - finish_time="2021-04-08 20:33:24", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - - - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885200, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=-1, - process="{'java': '14.2:0.2', 'containerd': '4.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='f')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:33:20") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:33:20", "2021-04-08 20:33:24", - ['Database Resources: A large number of data or indexes are involved', - 'You can use "copy" instead of "insert"']]) - - def test_29_delete(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "delete from bmsql_item where i_id = 1" - if conn.execute("SELECT * from wdr where timestamp == 1617885205"): - logging.info("[wdr] EXIST timestamp == 1617885205") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885205') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885205, db_name='tpcc', table_name='bmsql_item', - query="delete from bmsql_item where i_id = 1", - explain=" Delete on bmsql_item (cost=0.00..8.28 rows=1 width=6)\n" - " -> Index Scan using bmsql_item_pkey on bmsql_item (cost=0.00..8.28 rows=1 width=6)\n" - " Index Cond: (i_id = 1)", - start_time="2021-04-08 20:33:25", - finish_time="2021-04-08 20:33:29", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - - - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885205, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=-1, - process="{'java': '14.2:0.2', 'containerd': '4.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='f')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:33:25") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:33:25", "2021-04-08 20:33:29", - ['Database Resources: A large number of data or indexes are involved', - 'You may launch batch deletions or remove and recreate indexes']]) - - def test_30_load_request_centralization(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select i_id from bmsql_item where i_id =100" - if conn.execute("SELECT * from wdr where timestamp == 1617885185"): - logging.info("EXIST timestamp == 1617885185 ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885185') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885185, db_name='tpcc', table_name='bmsql_item', - query="select i_id from bmsql_item where i_id =100", - explain="Seq Scan on bmsql_item (cost=0.00..2809.00 rows=28 width=4)\n" - " Filter: ((i_id = 100))", - start_time="2021-04-08 20:33:05", - finish_time="2021-04-08 20:33:09", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885185, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=6037, - process="{'java': '14.2:0.2', 'containerd': '4.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='f')) - rca = diagnose_user(conn, query, start_time="2021-04-08 20:33:05") - logging.info('RCA:%s', rca) - self.assertEqual(rca, ["2021-04-08 20:33:05", "2021-04-08 20:33:09", - ['External Resources: Database request crowded', - 'It is recommended to change the free time to execute']]) - - def test_31_auto(self): - conn = SQLiteStorage(os.path.realpath("./data/127_0_0_1_8000")) - conn.connect() - query = "select i_id from bmsql_item where i_id =100" - if conn.execute("SELECT * from wdr where timestamp == 1617885190"): - logging.info("EXIST timestamp == 1617885190 ") - conn.execute('DELETE FROM wdr WHERE timestamp == 1617885190') - - conn.execute("insert into wdr values ({timestamp}, \"{db_name}\", \"{table_name}\", \"{query}\"," - " \"{explain}\", \"{start_time}\", \"{finish_time}\", \"{indexes}\")" - .format(timestamp=1617885190, db_name='tpcc', table_name='bmsql_item', - query="select i_id from bmsql_item where i_id =100", - explain="Seq Scan on bmsql_item (cost=0.00..2809.00 rows=28 width=4)\n" - " Filter: ((i_id = 100))", - start_time="2021-04-08 20:33:10", - finish_time="2021-04-08 20:33:14", - indexes="{'bmsql_order_line': {'bmsql_order_line_pkey': 'CREATE UNIQUE INDEX " - "bmsql_order_line_pkey ON bmsql_order_line USING btree (ol_w_id, ol_d_id," - " ol_o_id, ol_number) TABLESPACE pg_default'}}")) - conn.execute("insert into database_exporter values ({timestamp}, \"{guc_parameter}\", " - "{connrent_connections}, {qps}, \"{process}\", \"{temp_file}\")" - .format(timestamp=1617885190, guc_parameter='128.0,40.0078125,200', connrent_connections=18, - qps=1, - process="{'java': '14.2:0.2', 'containerd': '4.4:0.0', 'python': '0.3:0.0', " - "'/usr/lib/systemd/systemd': '0.1:0.0', '/usr/sbin/irqbalance': '0.1:0.0'}", - temp_file='f')) - rca = diagnose_auto(conn, query, start_time="2021-04-08 20:33:10") - logging.info('RCA:%s', rca) - self.assertEqual(rca, [['External Resources: Database request crowded', - 'It is recommended to change the free time to execute']]) - - - - - def test_40_select_timeseries_by_timestamp(self, table='wdr', database_path='./data/127_0_0_1_8000'): - timestamp = int(time.time()) - with sqlite_storage.SQLiteStorage(database_path) as db: - timeseries_by_timestamp = db.select_timeseries_by_timestamp(table=table, field='query', period='10000W', - timestamp=timestamp) - self.assertGreater(len(timeseries_by_timestamp), 0) - - def test_41_select_timeseries_by_number(self, table='wdr', database_path='./data/127_0_0_1_8000'): - with sqlite_storage.SQLiteStorage(database_path) as db: - timeseries_by_number = db.select_timeseries_by_number(table=table, field='query', number=10) - self.assertGreater(len(timeseries_by_number), 0) - - def test_42_get_table_rows(self, table='wdr', database_path='./data/127_0_0_1_8000'): - with sqlite_storage.SQLiteStorage(database_path) as db: - rows = db.get_table_rows(table=table) - self.assertGreater(rows, 0) - - def test_43_get_all_tables(self, database_path='./data/127_0_0_1_8000'): - with sqlite_storage.SQLiteStorage(database_path) as db: - tables = db.get_all_tables() - self.assertEqual(set(tables), set(['wdr', 'database_exporter', 'os_exporter'])) - - def test_44_check_tables(self, database_path='./data/127_0_0_1_8000'): - with sqlite_storage.SQLiteStorage(database_path) as db: - flag = db.check_table('wdr') - self.assertTrue(flag) - - def test_45_get_all_fields(self, table='wdr', database_path='./data/127_0_0_1_8000'): - with sqlite_storage.SQLiteStorage(database_path) as db: - fields = db.get_all_fields(table=table) - self.assertEqual(set(fields), - set(['db_name', 'table_name', 'query', 'explain', 'start_time', 'finish_time', 'indexes'])) - - def test_46_get_earliest_timestamp(self, table='wdr', database_path='./data/127_0_0_1_8000'): - with sqlite_storage.SQLiteStorage(database_path) as db: - timestamp = db.get_earliest_timestamp(table=table) - self.assertEqual(timestamp, 1617885101) - - def test_47_get_lastest_timestamp(self, table='wdr', database_path='./data/127_0_0_1_8000'): - with sqlite_storage.SQLiteStorage(database_path) as db: - timestamp = db.get_latest_timestamp(table=table) - self.assertEqual(timestamp, 1617885205) - - def test_48_load_sql_operation(self, table='wdr', database_path='./data/127_0_0_1_8000'): - with sqlite_storage.SQLiteStorage(database_path) as db: - self.assertGreater(len(db.sql_operation), 0) - - def test_49_fetch_all_result(self, table='wdr', database_path='./data/127_0_0_1_8000'): - with sqlite_storage.SQLiteStorage(database_path) as db: - result = db.fetch_all_result('select * from {table}'.format(table=table)) - self.assertGreater(len(result), 0) - - def test_50_TimeString(self): - transform_result_1 = TimeString('10S').to_second() - transform_result_2 = TimeString('2M').to_second() - transform_result_3 = TimeString('3H').to_second() - transform_result_4 = TimeString('1D').to_second() - transform_result_5 = TimeString('2W').to_second() - - transform_result_6 = TimeString('10S').to_timedelta() - transform_result_7 = TimeString('2M').to_timedelta() - transform_result_8 = TimeString('3H').to_timedelta() - transform_result_9 = TimeString('1D').to_timedelta() - transform_result_10 = TimeString('2W').to_timedelta() - - self.assertEqual(transform_result_1, 10) - self.assertEqual(transform_result_2, 2 * 60) - self.assertEqual(transform_result_3, 3 * 60 * 60) - self.assertEqual(transform_result_4, 1 * 24 * 60 * 60) - self.assertEqual(transform_result_5, 2 * 7 * 24 * 60 * 60) - - self.assertEqual(transform_result_6, timedelta(seconds=10)) - self.assertEqual(transform_result_7, timedelta(minutes=2)) - self.assertEqual(transform_result_8, timedelta(hours=3)) - self.assertEqual(transform_result_9, timedelta(days=1)) - self.assertEqual(transform_result_10, timedelta(weeks=2)) - - def test_51_unify_byte_info(self): - unify_result_1 = convert_to_mb('10K') - unify_result_2 = convert_to_mb('10M') - unify_result_3 = convert_to_mb('10G') - unify_result_4 = convert_to_mb('10T') - unify_result_5 = convert_to_mb('10P') - - self.assertEqual(unify_result_1, 0.009765625) - self.assertEqual(unify_result_2, 10) - self.assertEqual(unify_result_3, 10 * 1024) - self.assertEqual(unify_result_4, 10 * 1024 * 1024) - self.assertEqual(unify_result_5, 10 * 1024 * 1024 * 1024) - - def test_54_unify_byte_unit(self): - res1 = extract_table_from_sql("select name from table1 where id=3;") - res2 = extract_table_from_sql("select id, age from table2 where id=3;") - self.assertEqual(res1[0], 'table1') - self.assertEqual(res2[0], 'table2') - - def test_55_input_sql_processing(self): - sql1 = input_sql_processing("select id from item where name='jack'") - sql2 = input_sql_processing("SELECT i_price, i_name, i_data FROM bmsql_item WHERE i_id = 3") - self.assertEqual(sql1, "SELECT ID FROM ITEM WHERE NAME = ?") - self.assertEqual(sql2, "SELECT I_PRICE , I_NAME , I_DATA FROM BMSQL_ITEM WHERE I_ID = ?") - - def test_56_wdr_sql_processing(self): - sql1 = wdr_sql_processing("select id from item where name=$1") - sql2 = wdr_sql_processing( - "SELECT i_price, i_name, i_data FROM bmsql_item WHERE i_id = $1 and i_i_id = $2") - self.assertEqual(sql1, "SELECT ID FROM ITEM WHERE NAME = ?") - self.assertEqual(sql2, "SELECT I_PRICE , I_NAME , I_DATA FROM BMSQL_ITEM WHERE I_ID = ? AND I_I_ID = ?") - - def test_57_unify_sql(self): - sql = unify_sql("select id from item where name = 'jack'") - self.assertEqual(sql, "SELECT ID FROM ITEM WHERE NAME = 'JACK'") - - def test_58_auto_arima(self): - timeseries = [(1606189212, 66.0), (1606189213, 55.0), (1606189214, 47.0), (1606189215, 13.0), - (1606189216, 107.0), - (1606189217, 46.0), (1606189218, 39.0), (1606189219, 10.0), (1606189220, 53.0), - (1606189221, 54.0), - (1606189222, 13.0), (1606189223, 50.0), (1606189224, 109.0), (1606189225, 49.0), - (1606189226, 46.0), - (1606189227, 29.0), (1606189228, 97.0), (1606189229, 9.0), (1606189230, 99.0), (1606189231, 26.0), - (1606189232, 49.0), (1606189233, 12.0), (1606189234, 111.0), (1606189235, 1.0), - (1606189236, 63.0), - (1606189237, 39.0), (1606189238, 114.0), (1606189239, 10.0), (1606189240, 0.0), - (1606189241, 43.0), - (1606189242, 25.0), (1606189243, 91.0), (1606189244, 92.0), (1606189245, 28.0), - (1606189246, 87.0), - (1606189247, 38.0), (1606189248, 43.0), (1606189249, 19.0), (1606189250, 26.0), - (1606189251, 118.0)] - am = AutoArima() - am.fit(timeseries) - result = am.forecast(period=10, freq='2S') - self.assertEqual(len(result[0]), 10) - - -if __name__ == '__main__': - unittest.main() diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/utils/__init__.py b/src/gausskernel/dbmind/tools/anomaly_detection/utils/__init__.py deleted file mode 100644 index 012a5ad8f..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/utils/__init__.py +++ /dev/null @@ -1,540 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import datetime -import logging -import os -import re -import sys -import shlex -import ctypes -import subprocess -import requests -import config -from datetime import timedelta -from threading import Thread, Event -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes -import hashlib - -import psycopg2 -import sqlparse -from sqlparse.sql import Identifier, IdentifierList -from sqlparse.tokens import Keyword, DML - -from global_vars import DATE_FORMAT - -split_flag = ('!=', '<=', '>=', '==', '<', '>', '=', ',', '*', ';', '%', '+', ',', ';', '/') - - -class AesCbcUtil(object): - """ - aes cbc tool - """ - @classmethod - def check_content_key(cls, content, key): - """ - check ase cbc content and key - """ - if not isinstance(content, bytes): - raise ValueError('incorrect parameter.') - if not isinstance(key, (bytes, str)): - raise ValueError('incorrect parameter.') - - iv_len = 16 - if not len(content) >= (iv_len + 16): - raise Exception(Errors.GAUSS_61101.build_msg("check content key. " - "content's len must >= (iv_len + 16).")) - - @classmethod - def aes_cbc_decrypt(cls, content, key): - """ - aes cbc decrypt for content and key - """ - cls.check_content_key(content, key) - if isinstance(key, str): - key = bytes(key) - iv_len = 16 - # pre shared key iv - iv = content[16 + 1 + 16 + 1:16 + 1 + 16 + 1 + 16] - - # pre shared key enctryt - enc_content = content[:iv_len] - - try: - backend = default_backend() - except Exception as imp_clib_err: - if str(imp_clib_err).find('SSLv3_method') == -1: - # not find SSLv3_method, and it's not ours - local_path = os.path.dirname(os.path.realpath(__file__)) - clib_path = os.path.realpath(os.path.join(local_path, "../clib")) - ssl_path = os.path.join(clib_path, 'libssl.so.1.1') - crypto_path = os.path.join(clib_path, 'libcrypto.so.1.1') - if os.path.isfile(crypto_path): - ctypes.CDLL(crypto_path, mode=ctypes.RTLD_GLOBAL) - if os.path.isfile(ssl_path): - ctypes.CDLL(ssl_path, mode=ctypes.RTLD_GLOBAL) - else: - ssl_path = '/usr/lib64/libssl.so.1.1' - crypto_path = '/usr/lib64/libcrypto.so.1.1' - if os.path.isfile(crypto_path): - ctypes.CDLL(crypto_path, mode=ctypes.RTLD_GLOBAL) - if os.path.isfile(ssl_path): - ctypes.CDLL(ssl_path, mode=ctypes.RTLD_GLOBAL) - backend = default_backend() - - cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend) - decrypter = cipher.decryptor() - dec_content = decrypter.update(enc_content) + decrypter.finalize() - server_decipher_key = dec_content.rstrip(b'\x00')[:-1].decode() - return server_decipher_key - - @classmethod - def get_old_version_path(cls, path): - """ Compatible old version path, only 'encrypt' - old: /home/xxx/key_0 - new: /home/xxx/cipher/key_0 - """ - dirname, basename = os.path.split(path.rstrip("/")) - dirname, _ = os.path.split(dirname) - path = os.path.join(dirname, basename) - return path - - @classmethod - def aes_cbc_decrypt_with_path(cls, cipher_path, rand_path): - """ - aes cbc decrypt for one path - """ - if not os.path.isdir(cipher_path): - cipher_path = cls.get_old_version_path(cipher_path) - rand_path = cls.get_old_version_path(rand_path) - with open(os.path.join(cipher_path, 'server.key.cipher'), 'rb') as cipher_file: - cipher_txt = cipher_file.read() - with open(os.path.join(rand_path, 'server.key.rand'), 'rb') as rand_file: - rand_txt = rand_file.read() - - if cipher_txt is None or cipher_txt == "": - return None - - server_vector_cipher_vector = cipher_txt[16 + 1:16 + 1 + 16] - # pre shared key rand - server_key_rand = rand_txt[:16] - - # worker key - server_decrypt_key = hashlib.pbkdf2_hmac('sha256', server_key_rand, - server_vector_cipher_vector, 10000, 16) - - enc = cls.aes_cbc_decrypt(cipher_txt, server_decrypt_key) - return enc - - @classmethod - def aes_cbc_decrypt_with_multi(cls, cipher_root, rand_root): - """ - decrypt message with multi depth - """ - num = 0 - rt = "" - if not os.path.isdir(cipher_root): - cipher_root = os.path.dirname(cipher_root.rstrip("/")) - rand_root = os.path.dirname(rand_root.rstrip("/")) - while True: - cipher_path = os.path.join(cipher_root, "key_%s" % num) - rand_path = os.path.join(rand_root, "key_%s" % num) - part = cls.aes_cbc_decrypt_with_path(cipher_path, rand_path) - if part is None: - break - elif len(part) < 15: - rt += part - break - else: - rt += part - num = num + 1 - - if rt == "": - return None - return rt - - @staticmethod - def format_path(root_path): - """format decrypt_with_multi or decrypt_with_path""" - return os.path.join(root_path, "cipher"), os.path.join(root_path, "rand") - - -class RepeatTimer(Thread): - """ - This class inherits from threading.Thread, it is used for periodic execution - function at a specified time interval. - """ - - def __init__(self, interval, function, *args, **kwargs): - Thread.__init__(self) - self._interval = interval - self._function = function - self._args = args - self._kwargs = kwargs - self._finished = Event() - - def run(self): - while not self._finished.is_set(): - # Execute first, wait later. - self._function(*self._args, **self._kwargs) - self._finished.wait(self._interval) - self._finished.set() - - def cancel(self): - self._finished.set() - - -class StdStreamSuppressor: - """ - This class suppress standard stream object 'stdout' and 'stderr' in context. - """ - - def __init__(self): - self.default_stdout_fd = sys.stdout.fileno() - self.default_stderr_fd = sys.stderr.fileno() - self.null_device_fd = [os.open(os.devnull, os.O_WRONLY), os.open(os.devnull, os.O_WRONLY)] - self.standard_stream_fd = (os.dup(self.default_stdout_fd), os.dup(self.default_stderr_fd)) - - def __enter__(self): - os.dup2(self.null_device_fd[0], self.default_stdout_fd) - os.dup2(self.null_device_fd[1], self.default_stderr_fd) - - def __exit__(self, *args): - os.dup2(self.standard_stream_fd[0], self.default_stdout_fd) - os.dup2(self.standard_stream_fd[1], self.default_stderr_fd) - os.close(self.null_device_fd[0]) - os.close(self.null_device_fd[1]) - - -class TimeString: - TIMEDELTA_MAPPER = {'W': timedelta(weeks=1), - 'D': timedelta(days=1), - 'H': timedelta(hours=1), - 'M': timedelta(minutes=1), - 'S': timedelta(seconds=1)} - SECOND_MAPPER = {'W': 7 * 24 * 3600, 'D': 24 * 3600, 'H': 3600, 'M': 60, 'S': 1} - - def __init__(self, time_string): - """ - Transform time string to timedelta or second, only support 'weeks(W), days(D), - hours(H), minutes(M), seconds(S) - :param time_string: string, time string like '10S', '20H', '3W'. - """ - self._str = time_string - num, self._unit = re.match(r'(\d+)?([WDHMS])', time_string).groups() - - if self._unit is None: - raise ValueError('Incorrect format %s.' % time_string) - if num is None: - self._val = 1 - else: - self._val = int(num) - - def to_second(self): - return TimeString.SECOND_MAPPER.get(self._unit) * self._val - - def to_timedelta(self): - return TimeString.TIMEDELTA_MAPPER.get(self._unit) * self._val - - @property - def standard(self): - return '%dS' % self.to_second() - - -class DBAgent: - def __init__(self, port, host=None, user=None, password=None, database=None): - self.host = host - self.port = port - self.user = user - self.database = database - self.password = password - self.conn = None - self.cursor = None - self.connect() - - def __enter__(self): - self.connect() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() - - def connect(self): - self.conn = psycopg2.connect(host=self.host, - user=self.user, - password=self.password, - database=self.database, - port=self.port) - self.conn.set_client_encoding('latin9') - self.cursor = self.conn.cursor() - - def fetch_all_result(self, sql): - try: - self.cursor.execute(sql) - result = list(self.cursor.fetchall()) - return result - except Exception as e: - logging.getLogger('agent').warning(str(e)) - - def close(self): - self.cursor.close() - self.conn.close() - - -def remove_comment(sql): - sql = re.sub(r'\n', r' ', sql) - sql = re.sub(r'/\s*\*[\w\W]*?\*\s*/\s*', r'', sql) - sql = re.sub(r'^--.*\s?', r'', sql) - return sql - - -def unify_sql(sql): - index = 0 - sql = remove_comment(sql) - while index < len(sql): - if sql[index] in split_flag: - if sql[index:index + 2] in split_flag: - sql = sql[:index].strip() + ' ' + sql[index:index + 2] + ' ' + sql[index + 2:].strip() - index = index + 3 - else: - sql = sql[:index].strip() + ' ' + sql[index] + ' ' + sql[index + 1:].strip() - index = index + 2 - else: - index = index + 1 - new_sql = list() - for word in sql.split(): - new_sql.append(word.upper()) - sql = ' '.join(new_sql) - return sql.strip() - - -def input_sql_processing(sql): - """ - SQL desensitization - """ - if not sql: - return '' - standard_sql = unify_sql(sql) - - if standard_sql.startswith('INSERT'): - standard_sql = re.sub(r'VALUES (\(.*\))', r'VALUES', standard_sql) - # remove digital like 12, 12.565 - standard_sql = re.sub(r'[\s]+\d+(\.\d+)?', r' ?', standard_sql) - # remove '$n' in sql - standard_sql = re.sub(r'\$\d+', r'?', standard_sql) - # remove single quotes content - standard_sql = re.sub(r'\'.*?\'', r'?', standard_sql) - # remove double quotes content - standard_sql = re.sub(r'".*?"', r'?', standard_sql) - # remove '(1' format - standard_sql = re.sub(r'\(\d+(\.\d+)?', r'(?', standard_sql) - # remove '`' in sql - standard_sql = re.sub(r'`', r'', standard_sql) - # remove ; in sql - standard_sql = re.sub(r';', r'', standard_sql) - - return standard_sql.strip() - - -def wdr_sql_processing(sql): - standard_sql = unify_sql(sql) - standard_sql = re.sub(r';', r'', standard_sql) - standard_sql = re.sub(r'VALUES (\(.*\))', r'VALUES', standard_sql) - standard_sql = re.sub(r'\$\d+?', r'?', standard_sql) - return standard_sql - - -def convert_to_mb(volume_str): - """ - Transfer unit of K、M、G、T、P to M - :param volume_str: string, byte information like '100M', '2K', '30G'. - :return: int, bytes size in unit of M, like '400M' -> 400. - """ - convtbl = {'K': 1 / 1024, 'M': 1, 'G': 1024, 'T': 1024 * 1024, 'P': 1024 * 1024 * 1024} - - volume_str = volume_str.upper() - num, unit = re.match(r'^(\d+|\d+\.\d+)([KMGTP])', volume_str).groups() - if (num is None) or (unit is None) or (unit not in 'KMGTP'): - raise ValueError('cannot parse format of {bytes}'.format(bytes=volume_str)) - return convtbl[unit] * int(float(num)) - - -def fatal_exit(msg=None): - if msg: - print("FATAL: %s." % msg, file=sys.stderr) - logging.getLogger('service').fatal("A fatal problem has occurred, and the process will exit.") - raise SystemExit(2) - - -def abnormal_exit(msg=None): - if msg: - print("ERROR: %s." % msg, file=sys.stderr) - logging.getLogger('service').fatal("An abnormal has occurred, and the process will exit.") - raise SystemExit(1) - - -def check_select(parsed_sql): - if not parsed_sql.is_group: - return False - for token in parsed_sql.tokens: - if token.ttype is DML and token.value.upper() == 'SELECT': - return True - return False - - -def get_table_token_list(parsed_sql, token_list): - flag = False - for token in parsed_sql.tokens: - if not flag: - if token.ttype is Keyword and token.value.upper() == 'FROM': - flag = True - else: - if check_select(token): - get_table_token_list(token, token_list) - elif token.ttype is Keyword: - return - else: - token_list.append(token) - - -def extract_table_from_select(sql): - tables = [] - table_token_list = [] - sql_parsed = sqlparse.parse(sql)[0] - get_table_token_list(sql_parsed, table_token_list) - for table_token in table_token_list: - if isinstance(table_token, Identifier): - tables.append(table_token.get_name()) - elif isinstance(table_token, IdentifierList): - for identifier in table_token.get_identifiers(): - tables.append(identifier.get_name()) - else: - if table_token.ttype is Keyword: - tables.append(table_token.value) - return tables - - -def extract_table_from_sql(sql): - """ - Function: get table name in sql - has many problems in code, especially in 'delete', 'update', 'insert into' sql - """ - if not sql.strip(): - return [] - delete_pattern_1 = re.compile(r'FROM\s+([^\s]*)[;\s ]?', re.IGNORECASE) - delete_pattern_2 = re.compile(r'FROM\s+([^\s]*)\s+WHERE', re.IGNORECASE) - update_pattern = re.compile(r'UPDATE\s+([^\s]*)\s+SET', re.IGNORECASE) - insert_pattern = re.compile(r'INSERT\s+INTO\s+([^\s]*)\s+VALUES', re.IGNORECASE) - if sql.upper().strip().startswith('SELECT'): - tables = extract_table_from_select(sql) - elif sql.upper().strip().startswith('DELETE'): - if 'WHERE' not in sql: - tables = delete_pattern_1.findall(sql) - else: - tables = delete_pattern_2.findall(sql) - elif sql.upper().strip().startswith('UPDATE'): - tables = update_pattern.findall(sql) - elif sql.upper().strip().startswith('INSERT INTO'): - sql = re.sub(r'\(.*?\)', r' ', sql) - tables = insert_pattern.findall(sql) - else: - tables = [] - return tables - - -def check_time_legality(time_string): - try: - datetime.datetime.strptime(time_string, DATE_FORMAT) - return True - except ValueError: - return False - - -def check_port_occupancy(port): - if not port.isdigit(): - raise RuntimeError("The port should be digit: '{port}'".format(port=port)) - child = subprocess.Popen(shlex.split('lsof -i:{port}'.format(port=port)), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False) - stream = child.communicate() - if stream[0]: - raise RuntimeError("The port {port} is occupied.".format(port=port)) - - -def read_pid_file(filepath): - """ - Return the pid of the running process recorded in the file, - and return 0 if the acquisition fails. - """ - if not os.path.exists(filepath): - return 0 - - try: - with open(filepath, mode='r') as f: - pid = int(f.read()) - if os.path.exists('/proc/%d' % pid): - return pid - else: - return 0 - except PermissionError: - return 0 - except ValueError: - return 0 - - -def check_db_alive(port, database='postgres'): - try: - with DBAgent(port=port, database=database) as db: - sql = "select pg_sleep(0.1)" - result = db.fetch_all_result(sql) - return True - except Exception as e: - return False - - -def check_collector(): - agent_logger = logging.getLogger('agent') - - try: - req_url = 'http://{host}:{port}/sink'.format(host=config.get('server', 'host'), port=config.get('server', 'listen_port')) - response = requests.get(req_url) - return True - except Exception as e: - agent_logger.error("{error}".format(error=str(e))) - return False - - -def check_tls_protocol(): - try: - context = config.getboolean('security', 'tls') - if context: - protocol = 'https' - else: - protocol = 'http' - return protocol - - except Exception as e: - agent_logger.error("[security] part must exists in configure file.") - raise - - -def getpasswd(key_path): - if os.path.isdir(key_path): - output = AesCbcUtil.aes_cbc_decrypt_with_multi(*AesCbcUtil.format_path(key_path)) - if len(str(output).strip().split()) < 1: - return '' - else: - return str(output).strip().split()[-1] - else: - return '' - diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/utils/sql_parsing.py b/src/gausskernel/dbmind/tools/anomaly_detection/utils/sql_parsing.py deleted file mode 100644 index f9d55c4b3..000000000 --- a/src/gausskernel/dbmind/tools/anomaly_detection/utils/sql_parsing.py +++ /dev/null @@ -1,132 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import ast -import re - -import sqlparse -from sqlparse.sql import Where, Comparison, Operation, Function, Identifier -from sqlparse.tokens import DML, Token - - -def is_subquery(parse_tree): - if not parse_tree.is_group: - return False - for item in parse_tree.tokens: - if item.ttype is DML and item.value.upper() == 'SELECT': - return True - return False - - -def analyze_column(column, where_clause): - for tokens in where_clause.tokens: - if isinstance(tokens, Comparison) and isinstance(tokens.left, Identifier): - column.add(tokens.left.value) - - -def get_columns(sql): - column = set() - parsed_tree = sqlparse.parse(sql)[0] - for item in parsed_tree: - if isinstance(item, Where): - analyze_column(column, item) - return list(column) - - -def get_indexes(dbagent, sql, timestamp): - """ - Get indexes of SQL from dataset. - :param dbagent: obj, interface for sqlite3. - :param sql: str, query. - :return: list, the set of indexes. - """ - indexes = [] - indexes_dict = dbagent.fetch_all_result("SELECT indexes from wdr where timestamp ==\"{timestamp}\"" - " and query == \"{query}\"".format(timestamp=timestamp, - query=sql)) - if len(indexes_dict): - try: - indexes_dict = ast.literal_eval(indexes_dict[0][0]) - indexes_def_list = list(list(indexes_dict.values())[0].values()) - for sql_index in indexes_def_list: - value_in_bracket = re.compile(r'[(](.*?)[)]', re.S) - indexes.append(re.findall(value_in_bracket, sql_index)[0].split(',')[0]) - except Exception: - return indexes - return indexes - - -def analyze_unequal_clause(tokens): - for token in tokens: - if token.ttype is Token.Operator.Comparison and token.value.upper() == 'LIKE': - return 'FuzzyQuery' - elif token.ttype is Token.Operator.Comparison and token.value.upper() == '!=': - return 'UnEqual' - - -def analyze_where_clause(dbagent, where, timestamp): - """ - Analyze RCA of SQL from the where clause. - :param dbagent: obj, interface for sqlite3. - :param where: tokens, where clause of sqlparse. - :return: str, key value of RCA. - """ - if "OR" in where.value.upper(): - columns = get_columns(where.parent.value) - indexes = get_indexes(dbagent, where.parent.value, timestamp) - for column in columns: - if column not in indexes: - return 'OR' - - res = None - for tokens in where.tokens: - if isinstance(tokens, Comparison): - if isinstance(tokens.left, Operation): - res = 'ExprInWhere' - break - elif isinstance(tokens.left, Function): - res = 'Function' - break - elif isinstance(tokens, Comparison) and "<" in tokens.parent.value or ">" in tokens.parent.value: - res = 'RangeTooLarge' - break - else: - res = analyze_unequal_clause(tokens) - break - if res: - return res - if 'NOT IN' in where.value.upper(): - return "NotIn" - if "is not null".upper() in where.value.upper(): - return 'IsNotNULL' - - -def sql_parse(dbagent, sql, timestamp): - sql = re.sub(r'\n|\t', r' ', sql) - sql = re.sub(r'[ ]{2,}', r' ', sql) - parse_tree = sqlparse.parse(sql)[0] - - if "select count( * ) from".upper() in parse_tree.value.upper() or \ - "select * from".upper() in parse_tree.value.upper() or \ - "select count(*) from".upper() in parse_tree.value.upper() or \ - "select count( *) from".upper() in parse_tree.value.upper() or \ - "select count(* ) from".upper() in parse_tree.value.upper(): - return "FullScan" - - if "update".upper() in parse_tree.value.upper() and "set".upper() in parse_tree.value.upper(): - return 'Update' - - for item in parse_tree: - if isinstance(item, Where): - return analyze_where_clause(dbagent, item, timestamp) diff --git a/src/gausskernel/dbmind/tools/app/__init__.py b/src/gausskernel/dbmind/tools/app/__init__.py new file mode 100644 index 000000000..5ea3454fc --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + + +def register_timed_app(): + from . import timed_app + diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/__init__.py b/src/gausskernel/dbmind/tools/app/diagnosis/__init__.py new file mode 100644 index 000000000..3381d6711 --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/__init__.py @@ -0,0 +1,61 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +from dbmind.common.types.alarm import Alarm, ALARM_TYPES, ALARM_LEVEL +from .cluster import diagnose_cluster +from .query import diagnose_query +from .system import diagnose_system + + +def _find_highest_level(root_causes): + highest = ALARM_LEVEL.NOTSET + for root_cause in root_causes: + if root_cause.level > highest: + highest = root_cause.level + + return highest + + +def diagnose_for_sequences(host, metric, sequences): + rca_system = list() + # input parameter format: + # ('10.90.56.172', 'os_cpu_usage_rate', [Sequence, Sequence, Sequence]) + for seq in sequences: + root_causes = diagnose_system(host, metric, seq) + if len(root_causes) == 0: + continue + alarm = Alarm( + host=host, + metric_name=metric, + alarm_content='found anomaly on %s' % metric, + alarm_type=ALARM_TYPES.SYSTEM, + alarm_subtype=None, + alarm_level=_find_highest_level(root_causes), + alarm_cause=root_causes + ) + alarm.set_timestamp(start=seq.timestamps[0], + end=seq.timestamps[-1]) + rca_system.append(alarm) + + return rca_system + + +def diagnose_for_alarm_logs(host, alarm_logs): + rca_cluster = list() + # input parameter format: + # ('10.90.56.172', [Log, Log, Log]) + for alarm_log in alarm_logs: + rca_cluster.extend( + diagnose_cluster(host, alarm_log) + ) + return rca_cluster diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/cluster/__init__.py b/src/gausskernel/dbmind/tools/app/diagnosis/cluster/__init__.py new file mode 100644 index 000000000..cda231369 --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/cluster/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + + +from .entry import diagnose_cluster + diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/cluster/entry.py b/src/gausskernel/dbmind/tools/app/diagnosis/cluster/entry.py new file mode 100644 index 000000000..bebcd5bd1 --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/cluster/entry.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + + +def diagnose_cluster(host, alarm_log): + pass + diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/query/__init__.py b/src/gausskernel/dbmind/tools/app/diagnosis/query/__init__.py new file mode 100644 index 000000000..666956fa1 --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/query/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import logging + +from dbmind.common.types.root_cause import RootCause +from .slow_sql.analyzer import SlowSQLAnalyzer + +_analyzer = SlowSQLAnalyzer() + + +def diagnose_query(slow_query): + try: + _analyzer.run(slow_query) + except Exception as e: + slow_query.add_cause(RootCause.get('LACK_INFORMATION')) + logging.exception(e) + return slow_query diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/__init__.py b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/__init__.py new file mode 100644 index 000000000..054f32436 --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/analyzer.py b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/analyzer.py new file mode 100644 index 000000000..dddc4509d --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/analyzer.py @@ -0,0 +1,178 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import logging +import re +from collections import defaultdict +from typing import List, Dict + +import numpy as np + +from dbmind.common.types.misc import SlowQuery +from dbmind.common.types.root_cause import RootCause +from .featurelib import load_feature_lib, get_feature_mapper +from .query_feature import QueryFeature +from ..slow_sql import query_info_source + +_system_table_keywords = ('PG_', 'GS_') +_shield_keywords = ('CREATE', 'DROP', 'ALTER', 'TRUNCATE', 'GRANT', 'REVOKE', 'COMMIT', 'ROLLBACK') + +FEATURE_LIB = load_feature_lib() +FEATURES_CAUSE_MAPPER = get_feature_mapper() +FEATURES, CAUSES, WEIGHT_MATRIX = FEATURE_LIB['features'], FEATURE_LIB['labels'], FEATURE_LIB['weight_matrix'] + + +def _vector_distance(vector1: List, vector2: List, label: int, weight_matrix: List[List]) -> float: + """ + Calculate the distance between vectors based on the improved Hamming algorithm + :param vector1: input vector + :param vector2: input vector + :param label: input label value of vector2 + :param weight_matrix: weight matrix + :return: distance of two vectors + """ + if len(vector1) != len(vector2): + raise ValueError('not equal.') + distance = 0.0 + for index in range(len(vector1)): + if vector1[index] == vector2[index] and vector1[index]: + distance += weight_matrix[label - 1][index] * vector1[index] + return distance + + +def _euclid_distance(vector1: List, vector2: List) -> float: + """ + Calculate the distance between vectors based on the euclid algorithm + :param vector1: input vector + :param vector2: input vector + :return: distance of two vectors + """ + dist = np.sqrt(np.sum(np.square([item1 - item2 for item1, item2 in zip(vector1, vector2)]))) + return round(dist, 4) + + +def _calculate_nearest_feature(sql_feature: List, topk: int = 3) -> List: + """ + Return the topk feature that is most similar to the input vector + :param sql_feature: input vector + :param topk: The number of most similar features output + :return: The most similar feature and its index information + """ + indexes = [] + for feature, cause in zip(FEATURES, CAUSES): + dis = _vector_distance(feature, sql_feature, cause, WEIGHT_MATRIX) + indexes.append((dis, cause)) + indexes = sorted(indexes, key=lambda x: x[1], reverse=True) + indexes = list(filter(lambda x: x[0], indexes)) + filter_indexes = [] + _id = 0 + while _id < len(indexes): + max_dis = 0 + while _id + 1 < len(indexes) and indexes[_id][1] == indexes[_id + 1][1]: + max_dis = max(indexes[_id][0], indexes[_id + 1][0]) + _id += 1 + filter_indexes.append([max(max_dis, indexes[_id][0]), indexes[_id][1]]) + _id += 1 + probability_sum = sum([item[0] for item in filter_indexes[:topk]]) + filter_indexes = [[item[0] / probability_sum, item[1]] for item in filter_indexes[:topk]] + filter_indexes = sorted(filter_indexes, key=lambda x: x[0], reverse=True) + return filter_indexes + + +class SlowSQLAnalyzer: + """ + Classes for diagnosing slow SQL + """ + + def __init__(self, topk: int = 3, buffer_capacity: int = 500): + """ + :param topk: The number of output root causes + :param buffer_capacity: The length of slow SQL buffer queue + """ + self.topk = topk + self.sql_buffers = [] + self.buffer_capacity = buffer_capacity + + def run(self, slow_query_instance: SlowQuery) -> [SlowQuery, None]: + """ + API for slow SQL diagnostic calls + :param slow_query_instance: The instance of slow query + :return: The result of slow query, if the slow query has been diagnosed, then return None + """ + if self._sql_judge(slow_query_instance): + return + data_factory = query_info_source.QueryContext(slow_query_instance) + pg_class = data_factory.acquire_pg_class() + schema_info = pg_class.get(slow_query_instance.db_name, {}) + self._analyze(slow_query_instance, data_factory, schema_info) + return slow_query_instance + + def _sql_judge(self, slow_sql_instance: SlowQuery) -> bool: + """Determine if the SQL has been diagnosed, return True if it has been diagnosed, else return False""" + diagnosed_flag = "%s-%s-%s" % ( + slow_sql_instance.start_at, slow_sql_instance.start_at + slow_sql_instance.duration_time, + slow_sql_instance.template_id) + if diagnosed_flag in self.sql_buffers: + return True + if len(self.sql_buffers) >= self.buffer_capacity: + self.sql_buffers.pop() + self.sql_buffers.append(diagnosed_flag) + return False + + def _analyze(self, slow_sql_instance: SlowQuery, data_factory: query_info_source.QueryContext, + schema_infos: Dict) -> [SlowQuery, + None]: + """Slow SQL diagnosis main process""" + logging.debug(f"[SLOW QUERY] Diagnosing SQL: {slow_sql_instance.query}") + exist_tables = defaultdict(list) + if slow_sql_instance.query.upper() == 'COMMIT' or slow_sql_instance.query.upper().startswith('SET'): + title = FEATURES_CAUSE_MAPPER.get('C_UNKNOWN') + root_cause = RootCause.get(title) + slow_sql_instance.add_cause(root_cause) + return + if sum(item in slow_sql_instance.query.upper() for item in _system_table_keywords): + root_cause = RootCause.get(FEATURES_CAUSE_MAPPER.get('C_VIEW')) + slow_sql_instance.add_cause(root_cause) + return + if sum(item in slow_sql_instance.query.upper() for item in _shield_keywords): + root_cause = RootCause.get(FEATURES_CAUSE_MAPPER.get('C_SQL')) + slow_sql_instance.add_cause(root_cause) + return + if schema_infos: + query = slow_sql_instance.query + regex_result = re.findall(r"([\w\d_]+)\.([\w\d_]+)", slow_sql_instance.query) + if regex_result: + for schema, table in regex_result: + exist_tables[schema].append(table) + query.replace("%s.%s" % (schema, table), ' ') + for table in schema_infos[slow_sql_instance.schema_name]: + if table in query: + exist_tables[slow_sql_instance.schema_name].append(table) + slow_sql_instance.tables_name = exist_tables + feature_generator = QueryFeature(slow_sql_instance, data_factory) + feature_generator.initialize_metrics() + feature, details = feature_generator() + logging.debug("[SLOW QUERY] Feature vector: %s, detail: %s", feature, details) + topk_root_cause = _calculate_nearest_feature(feature, topk=self.topk) + logging.debug("[SLOW QUERY] Topk root cause: %s", topk_root_cause) + if topk_root_cause: + for probability, index in topk_root_cause: + cause_key = f"C{index}" + title = FEATURES_CAUSE_MAPPER.get(cause_key, 'C_UNKNOWN') + root_cause = RootCause.get(title) + root_cause.set_probability(probability) + root_cause.format(**details) + slow_sql_instance.add_cause(root_cause) + if not slow_sql_instance.root_causes: + title = FEATURES_CAUSE_MAPPER.get('C_UNKNOWN') + root_cause = RootCause.get(title) + slow_sql_instance.add_cause(root_cause) diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/featurelib/__init__.py b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/featurelib/__init__.py new file mode 100644 index 000000000..573abba97 --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/featurelib/__init__.py @@ -0,0 +1,28 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import os + +from . import feature_mapping +from . import features + + +def load_feature_lib(): + return features.FEATURE_LIB + + +def get_feature_mapper(): + return { + item: value + for item, value in + feature_mapping.__dict__.items() if item.startswith('C') + } diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/featurelib/feature_mapping.py b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/featurelib/feature_mapping.py new file mode 100644 index 000000000..e957e5a38 --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/featurelib/feature_mapping.py @@ -0,0 +1,32 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +C1 = 'LOCK_CONTENTION_SQL' +C2 = 'LARGE_DEAD_RATE' +C3 = 'LARGE_FETCHED_TUPLES' +C4 = 'LARGE_RETURNED_ROWS' +C5 = 'SMALL_SHARED_BUFFER_SQL' +C6 = 'UPDATED_REDUNDANT_INDEX' +C7 = 'LARGE_UPDATED_TUPLES' +C8 = 'INSERTED_REDUNDANT_INDEX' +C9 = 'INSERTED_INDEX_NUMBER' +C10 = 'LARGE_INSERTED_TUPLES' +C11 = 'DELETED_REDUNDANT_INDEX' +C12 = 'LARGE_DELETED_TUPLES' +C13 = 'EXTERNAL_SORT' +C14 = 'LARGE_TPS' +C15 = 'VACUUM_SQL' +C16 = 'ANALYZE_SQL' +C17 = 'SYSTEM_SQL' +C_UNKNOWN = 'UNKNOWN' +C_VIEW = 'DATABASE_VIEW' +C_SQL = 'ILLEGAL_SQL' diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/featurelib/feature_model.py b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/featurelib/feature_model.py new file mode 100644 index 000000000..4d4cc0b62 --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/featurelib/feature_model.py @@ -0,0 +1,78 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import csv +from collections import defaultdict +from typing import List + +import numpy as np + +from ..analyzer import _euclid_distance as euclid_distance +from dbmind.common.utils import ExceptionCatch + + +def calculate_weight(features: np.ndarray, labels: np.ndarray) -> List: + """ + Calculate weight matrix based on feature set + :param features: feature set + :param labels: label set + :return: weight_matrix + """ + normalize_features, normalize_labels = [], [] + features_labels_dict = defaultdict(list) + for i in range(len(labels)): + features_labels_dict[labels[i]].append(features[i]) + for key, value in sorted(features_labels_dict.items(), key=lambda x: x[0]): + normalize_features.append(np.sum(value, axis=0).tolist()) + normalize_labels.append(key) + n_label = len(normalize_labels) + dist_matrix = [[0] * n_label for i in range(n_label)] + weight_matrix = [] + for i in range(n_label): + for j in range(i, n_label): + if i == j: + dist = 0 + else: + dist = euclid_distance(normalize_features[i], normalize_features[j]) + dist_matrix[i][j] = dist + dist_matrix[j][i] = dist + for i in range(n_label): + n = sum([dist_matrix[i][j] * dist_matrix[i][j] for j in range(n_label) if i != j]) + weight_vector = np.sum(np.array( + [(1 / (dist_matrix[i][j] * dist_matrix[i][j])) * np.array(normalize_features[j]) for j in range(n_label) if + i != j]), axis=0) / n + residual_vector = np.abs(weight_vector - np.array(normalize_features[i])) + feature_weight = residual_vector / np.sum(residual_vector) + weight_matrix.append(feature_weight) + return weight_matrix + + +@ExceptionCatch(strategy='exit', name='FEATURE') +def build_model(feature_path: str, feature_number: int, feature_dimension: int, + save_path: str = './features_new.npz') -> None: + """ + Build a model based on features + :param feature_path: path of feature file + :param feature_number: the number of feature + :param feature_dimension: the dimension of feature + :param save_path: path to save feature model + :return: None + """ + features, labels = np.zeros((feature_number, feature_dimension)), np.zeros(feature_number) + with open(feature_path, mode='r') as f: + csv_reader = csv.reader(f) + for line in csv_reader: + line = [int(item) for item in line] + features[csv_reader.line_num - 1] = line[:-1] + labels[csv_reader.line_num - 1] = line[-1] + weight_matrix = calculate_weight(features, labels) + np.savez(save_path, features=features, labels=labels, weight_matrix=weight_matrix) diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/featurelib/features.py b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/featurelib/features.py new file mode 100644 index 000000000..ffb8b1e2d --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/featurelib/features.py @@ -0,0 +1,194 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +FEATURE_LIB = {'features': [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]], + 'labels': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, + 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, + 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, + 17], + 'weight_matrix': [ + [0.9984554256549266, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [0.0001024917425499386, 0.9984554256549266, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [0.0001024917425499386, 0.0001024917425499386, 0.9984554256549266, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.9984554256549266, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.9984554256549266, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.9984554256549266, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.9984554256549266, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.9984554256549266, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.9984554256549266, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.9984554256549266, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.9984554256549266, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.9984554256549266, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.9984554256549266, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.9984554256549266, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.9984554256549266, 0.0001024917425499386, 0.0001024917425499386, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, 0.0001024917425499386, + 0.0001024917425499386, 0.0001024917425499386, 0.9984554256549266, 0.0001024917425499386, + 1.2279293994575322e-06, 1.3549565787117599e-06, 1.1432446132880474e-06, 1.2279293994575322e-06, + 1.1432446132880474e-06, 1.1009022202033048e-06], + [1.5687755404435794e-11, 1.5687755404435794e-11, 1.5687755404435794e-11, 1.5687755404435794e-11, + 1.5687755404435794e-11, 1.5687755404435794e-11, 1.5687755404435794e-11, 1.5687755404435794e-11, + 1.5687755404435794e-11, 1.5687755404435794e-11, 1.5687755404435794e-11, 1.5687755404435794e-11, + 1.5687755404435794e-11, 1.5687755404435794e-11, 1.5687755404435794e-11, 1.5687755404435794e-11, + 0.1705882352512993, 0.1882352940703992, 0.15882352937189934, 0.1705882352512993, + 0.15882352937189934, 0.15294117643219937]] + } diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/query_feature.py b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/query_feature.py new file mode 100644 index 000000000..f7bdccc0d --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/query_feature.py @@ -0,0 +1,466 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from dbmind import global_vars +from dbmind.common.parser.sql_parsing import sql_processing +from dbmind.common.types.misc import SlowQuery +from ..slow_sql.query_info_source import QueryContext +from ..slow_sql import significance_detection + + +def _get_threshold(name: str) -> [float, int]: + return global_vars.dynamic_configs.get('slow_sql_threshold', name) + + +class QueryFeature: + """ + Feature processing factory + """ + def __init__(self, slow_sql_instance: SlowQuery, data_factory: QueryContext = None): + """ + :param slow_sql_instance: The instance of slow query + :param data_factory: The factory of slow query data processing factory + self.table_structure: data structure to save table structure + self.lock_info: data structure to save lock information of slow query + self.database_info: data structure to save lock information of database info such as QPS, CONNECTION, etc + self.system_info: data structure to save system information + self.detail: data structure to save diagnosis information + """ + self.slow_sql_instance = slow_sql_instance + self.data_factory = data_factory + self.table_structure = None + self.lock_info = None + self.database_info = None + self.system_info = None + self.detail = {} + + def initialize_metrics(self): + """Initialize the data structure such as database_info, table_structure, lock_info, etc""" + self.database_info = self.data_factory.acquire_database_info() + self.table_structure = self.data_factory.acquire_tables_structure_info() + self.lock_info = self.data_factory.acquire_lock_info() + self.system_info = self.data_factory.acquire_system_info() + + @property + def select_type(self) -> bool: + """Determine whether it is a select statement""" + filter_query = self.slow_sql_instance.query.strip().lower() + if filter_query.startswith('select'): + return True + return False + + @property + def update_type(self) -> bool: + """Determine whether it is a update statement""" + filter_query = self.slow_sql_instance.query.strip().lower() + if filter_query.startswith('update'): + return True + return False + + @property + def delete_type(self) -> bool: + """Determine whether it is a delete statement""" + filter_query = self.slow_sql_instance.query.strip().lower() + if filter_query.startswith('delete'): + return True + return False + + @property + def insert_type(self) -> bool: + """Determine whether it is a insert statement""" + filter_query = self.slow_sql_instance.query.strip().lower() + if filter_query.startswith('insert'): + return True + return False + + @property + def other_type(self) -> bool: + """Determine whether it is a other statement, return True if it not start with select. delete, insert, update""" + if not self.select_type and not self.insert_type and not self.update_type and not self.delete_type: + return True + return False + + @property + def query_block(self) -> bool: + """Determine whether the query is blocked during execution""" + if not any((self.slow_sql_instance.lock_wait_count, self.slow_sql_instance.lwlock_wait_count)): + return False + for index, query_info in enumerate(self.lock_info.locked_query): + if sql_processing(self.slow_sql_instance.query) == sql_processing(query_info): + self.detail['lock_info'] = "SQL was blocked by: '%s'" % self.lock_info.locker_query[index] + return True + if self.slow_sql_instance.lock_wait_count and not self.slow_sql_instance.lwlock_wait_count: + self.detail['lock_info'] = "lock count: %s" % self.slow_sql_instance.lock_wait_count + elif not self.slow_sql_instance.lock_wait_count and self.slow_sql_instance.lwlock_wait_count: + self.detail['lock_info'] = "lwlock count: %s" % self.slow_sql_instance.lwlock_wait_count + else: + self.detail['lock_info'] = "lock count %s, lwlock count %s" % ( + self.slow_sql_instance.lock_wait_count, self.slow_sql_instance.lwlock_wait_count) + return True + + @property + def large_table(self) -> bool: + """Determine whether the query related table is large""" + if not self.table_structure: + return False + tuples_info = {f"{item.schema_name}:{item.table_name}": [item.live_tuples, item.dead_tuples] for item in + self.table_structure} + self.detail['large_table'] = {} + for table_name, tuples_number_list in tuples_info.items(): + if sum(tuples_number_list) > _get_threshold('tuple_number_limit'): + self.detail['large_table'][table_name] = tuples_number_list + if self.detail.get('large_table'): + return True + return False + + @property + def large_dead_tuples(self) -> bool: + """Determine whether the query related table has too many dead tuples""" + if not self.table_structure or not self.large_table or self.insert_type: + return False + dead_rate_info = {f"{item.schema_name}:{item.table_name}": item.dead_rate for item in + self.table_structure} + self.detail['dead_rate'] = {} + for table_name, dead_rate in dead_rate_info.items(): + if dead_rate > _get_threshold('dead_rate_limit'): + self.detail['dead_rate'][table_name] = dead_rate + if self.detail.get('dead_rate'): + return True + return False + + @property + def large_fetch_tuples(self) -> bool: + """Determine whether the query related table has too many fetch tuples""" + fetched_tuples = self.slow_sql_instance.n_tuples_fetched + returned_tuples = self.slow_sql_instance.n_tuples_returned + if not self.table_structure or max([item.live_tuples for item in self.table_structure]) == 0: + if fetched_tuples + returned_tuples > _get_threshold('fetch_tuples_limit'): + self.detail['fetched_tuples'] = fetched_tuples + returned_tuples + self.detail['fetched_tuples_rate'] = 'UNKNOWN' + return True + return False + live_tuples_list = {f"{item.schema_name}:{item.table_name}": item.live_tuples for item in + self.table_structure} + if (fetched_tuples + returned_tuples) / max(live_tuples_list.values()) > _get_threshold('fetch_rate_limit'): + self.detail['fetched_tuples'] = fetched_tuples + returned_tuples + self.detail['fetched_tuples_rate'] = round( + (fetched_tuples + returned_tuples) / max(live_tuples_list.values()), 4) + return True + else: + return False + + @property + def large_returned_rows(self) -> bool: + """Determine whether the query related table has too many returned tuples""" + returned_rows = self.slow_sql_instance.n_returned_rows + if not self.table_structure or max([item.live_tuples for item in self.table_structure]) == 0: + if returned_rows > _get_threshold('returned_rows_limit'): + self.detail['returned_rows'] = returned_rows + self.detail['returned_rows_rate'] = 'UNKNOWN' + return True + return False + live_tuples_list = {f"{item.schema_name}:{item.table_name}": item.live_tuples for item in + self.table_structure} + if returned_rows / max(live_tuples_list.values()) > _get_threshold( + 'returned_rate_limit'): + self.detail['returned_rows'] = returned_rows + self.detail['returned_rows_rate'] = round(returned_rows / max(live_tuples_list.values()), 4) + return True + else: + return False + + @property + def lower_hit_ratio(self) -> bool: + """Determine whether the query related table has lower hit ratio""" + self.detail['hit_rate'] = self.slow_sql_instance.hit_rate + if not self.large_table or self.insert_type: + return False + if self.slow_sql_instance.hit_rate < _get_threshold('hit_rate_limit'): + return True + return False + + @property + def redundant_index(self) -> bool: + """Determine whether the query related table has too redundant index""" + if not self.table_structure or not self.large_table: + return False + redundant_index_info = {f"{item.schema_name}:{item.table_name}": item.redundant_index for item in + self.table_structure} + self.detail['redundant_index'] = {} + for table_name, redundant_index_list in redundant_index_info.items(): + if redundant_index_list: + self.detail['redundant_index'][table_name] = redundant_index_list + if self.detail.get('redundant_index'): + return True + return False + + @property + def update_redundant_index(self) -> bool: + """Determine whether the update query related table has redundant index""" + return self.update_type and self.redundant_index + + @property + def insert_redundant_index(self) -> bool: + """Determine whether the insert query related table has redundant index""" + return self.insert_type and self.redundant_index + + @property + def delete_redundant_index(self) -> bool: + """Determine whether the delete query related table has redundant index""" + return self.delete_type and self.redundant_index + + @property + def large_updated_tuples(self) -> bool: + """Determine whether the query related table has large update tuples""" + updated_tuples = self.slow_sql_instance.n_tuples_updated + if not self.table_structure or max([item.live_tuples for item in self.table_structure]) == 0: + if updated_tuples > _get_threshold('updated_tuples_limit'): + self.detail['updated_tuples'] = updated_tuples + self.detail['updated_tuples_rate'] = 'UNKNOWN' + return True + return False + live_tuples_list = {f"{item.schema_name}:{item.table_name}": item.live_tuples for item in + self.table_structure} + if updated_tuples / max(live_tuples_list.values()) > _get_threshold( + 'updated_rate_limit'): + self.detail['updated_tuples'] = updated_tuples + self.detail['updated_tuples_rate'] = round(updated_tuples / max(live_tuples_list.values()), 4) + return True + else: + return False + + @property + def large_inserted_tuples(self) -> bool: + """Determine whether the query related table has large insert tuples""" + inserted_tuples = self.slow_sql_instance.n_tuples_inserted + if not self.table_structure or max([item.live_tuples for item in self.table_structure]) == 0: + if inserted_tuples > _get_threshold('inserted_tuples_limit'): + self.detail['inserted_tuples'] = inserted_tuples + self.detail['inserted_tuples_rate'] = 'UNKNOWN' + return True + return False + live_tuples_list = {f"{item.schema_name}:{item.table_name}": item.live_tuples for item in + self.table_structure} + if inserted_tuples / max(live_tuples_list.values()) > _get_threshold( + 'inserted_rate_limit'): + self.detail['inserted_tuples'] = inserted_tuples + self.detail['inserted_tuples_rate'] = round(inserted_tuples / max(live_tuples_list.values()), 4) + return True + else: + return False + + @property + def large_index_number(self) -> bool: + """Determine whether the query related table has too many index""" + if not self.table_structure: + return False + index_info = {f"{item.schema_name}:{item.table_name}": item.index for item in + self.table_structure} + self.detail['index'] = {} + for table_name, index_list in index_info.items(): + if len(index_list) > _get_threshold('index_number_limit'): + self.detail['index'][table_name] = index_list + if self.detail.get('index'): + return True + return False + + @property + def index_number_insert(self) -> bool: + """Determine whether the insert query related table has too many index""" + return self.insert_type and self.large_index_number + + @property + def large_deleted_tuples(self) -> bool: + """Determine whether the query related table has too many delete tuples""" + deleted_tuples = self.slow_sql_instance.n_tuples_deleted + if not self.table_structure or max([item.live_tuples for item in self.table_structure]) == 0: + if deleted_tuples > _get_threshold('deleted_tuples_limit'): + self.detail['deleted_tuples'] = deleted_tuples + self.detail['deleted_tuples_rate'] = 'UNKNOWN' + return True + return False + live_tuples_list = {f"{item.schema_name}:{item.table_name}": item.live_tuples for item in + self.table_structure} + if deleted_tuples / max(live_tuples_list.values()) > _get_threshold( + 'deleted_rate_limit'): + self.detail['deleted_tuples'] = deleted_tuples + self.detail['deleted_tuples_rate'] = round(deleted_tuples / max(live_tuples_list.values()), 4) + return True + else: + return False + + @property + def external_sort(self) -> bool: + """Determine whether the query related table has external sort""" + if self.slow_sql_instance.sort_count and not self.slow_sql_instance.sort_mem_used: + self.detail['external_sort'] = f"The probability of falling disk behavior during execution is " \ + f"{self.slow_sql_instance.sort_count}% " + return True + elif self.slow_sql_instance.hash_count and not self.slow_sql_instance.hash_mem_used: + self.detail['external_sort'] = f"The probability of falling disk behavior during execution is " \ + f"{self.slow_sql_instance.hash_count}%" + return True + elif self.slow_sql_instance.sort_spill_count: + self.detail['external_sort'] = f"The probability of falling disk behavior during execution is " \ + f"{self.slow_sql_instance.sort_spill_count}%" + return True + elif self.slow_sql_instance.hash_spill_count: + self.detail['external_sort'] = f"The probability of falling disk behavior during execution is " \ + f"{self.slow_sql_instance.hash_spill_count}%" + return True + else: + return False + + @property + def vacuum_operation(self) -> bool: + """Determine whether the query related table has vacuum operation""" + if not self.table_structure or not self.large_table: + return False + auto_vacuum_info = {f"{item.schema_name}:{item.table_name}": item.last_autovacuum for item in + self.table_structure} + user_vacuum_info = {f"{item.schema_name}:{item.table_name}": item.vacuum for item in + self.table_structure} + self.detail['autovacuum'] = {} + for table_name, autovacuum_time in auto_vacuum_info.items(): + if self.slow_sql_instance.start_at <= autovacuum_time <= self.slow_sql_instance.start_at + \ + self.slow_sql_instance.duration_time: + self.detail['autovacuum'][table_name] = autovacuum_time + for table_name, vacuum_time in user_vacuum_info.items(): + if self.slow_sql_instance.start_at <= vacuum_time <= self.slow_sql_instance.start_at + \ + self.slow_sql_instance.duration_time: + self.detail['autovacuum'][table_name] = vacuum_time + if self.detail.get('autovacuum'): + return True + return False + + @property + def analyze_operation(self) -> bool: + """Determine whether the query related table has analyze operation""" + if not self.table_structure or not self.large_table: + return False + auto_analyze_info = {f"{item.schema_name}:{item.table_name}": item.last_autoanalyze for item in + self.table_structure} + user_analyze_info = {f"{item.schema_name}:{item.table_name}": item.analyze for item in + self.table_structure} + self.detail['autoanalyze'] = {} + for table_name, autoanalyze_time in auto_analyze_info.items(): + if self.slow_sql_instance.start_at <= autoanalyze_time <= self.slow_sql_instance.start_at + \ + self.slow_sql_instance.duration_time: + self.detail['autoanalyze'][table_name] = autoanalyze_time + for table_name, analyze_time in user_analyze_info.items(): + if self.slow_sql_instance.start_at <= analyze_time <= self.slow_sql_instance.start_at + \ + self.slow_sql_instance.duration_time: + self.detail['autoanalyze'][table_name] = analyze_time + if self.detail.get('autoanalyze'): + return True + return False + + @property + def tps_significant_change(self) -> bool: + """Determine whether the QPS of the related table has a mutation""" + cur_database_tps = self.database_info.current_tps + his_database_tps = self.database_info.history_tps + if not his_database_tps and not cur_database_tps: + return False + elif not his_database_tps and cur_database_tps: + if max(cur_database_tps) > _get_threshold('tps_limit'): + self.detail['tps'] = round(max(cur_database_tps), 4) + return True + else: + return False + elif his_database_tps and not cur_database_tps: + return False + else: + if significance_detection.detect(cur_database_tps, his_database_tps) and max( + cur_database_tps) > _get_threshold('tps_limit'): + self.detail['tps'] = round(max(cur_database_tps), 4) + return True + return False + + @property + def large_iops(self) -> bool: + """Determine whether the QPS of the related table has large IOPS""" + if self.system_info.iops > _get_threshold('iops_limit'): + self.detail['system_cause']['iops'] = self.system_info.iops + return True + return False + + @property + def large_iowait(self) -> bool: + """Determine whether the QPS of the related table has large IOWAIT""" + if self.system_info.iowait > _get_threshold('iowait_limit'): + self.detail['system_cause']['iowait'] = self.system_info.iowait + return True + return False + + @property + def large_ioutils(self) -> bool: + """Determine whether the QPS of the related table has large IOUTILS""" + ioutils_dict = {} + for device, ioutils in self.system_info.ioutils.items(): + if ioutils > _get_threshold('ioutils_limit'): + ioutils_dict[device] = ioutils + if ioutils_dict: + self.detail['system_cause']['ioutils'] = ioutils_dict + return True + return False + + @property + def large_iocapacity(self) -> bool: + """Determine whether the QPS of the related table has large IOCAPACITY""" + if self.system_info.iocapacity > _get_threshold('iocapacity_limit'): + self.detail['system_cause']['iocapacity'] = self.system_info.iocapacity + return True + return False + + @property + def large_cpu_usage(self) -> bool: + """Determine whether the QPS of the related table has large CPU USAGE""" + if self.system_info.cpu_usage > _get_threshold('cpu_usage_limit'): + self.detail['system_cause']['cpu_usage'] = self.system_info.cpu_usage + return True + return False + + @property + def large_load_average(self) -> bool: + """Determine whether the QPS of the related table has large LOAD AVERAGE""" + if self.system_info.load_average > _get_threshold('load_average_rate_limit'): + self.detail['system_cause']['load_average'] = self.system_info.load_average + return True + return False + + def __call__(self): + self.detail['system_cause'] = {} + features = [self.query_block, + self.large_dead_tuples, + self.large_fetch_tuples, + self.large_returned_rows, + self.lower_hit_ratio, + self.update_redundant_index, + self.insert_redundant_index, + self.delete_redundant_index, + self.large_updated_tuples, + self.large_inserted_tuples, + self.index_number_insert, + self.large_deleted_tuples, + self.external_sort, + self.vacuum_operation, + self.analyze_operation, + self.tps_significant_change, + self.large_iowait, + self.large_iops, + self.large_load_average, + self.large_cpu_usage, + self.large_ioutils, + self.large_iocapacity] + features = [int(item) for item in features] + return features, self.detail diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/query_info_source.py b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/query_info_source.py new file mode 100644 index 000000000..1c9064034 --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/query_info_source.py @@ -0,0 +1,303 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import logging +from datetime import datetime, timedelta +from typing import Dict, List + +from dbmind.common.parser.sql_parsing import is_num, str2int +from dbmind.common.utils import ExceptionCatch +from dbmind.service import dai + +excetpion_catcher = ExceptionCatch(strategy='exit', name='SLOW QUERY') + + +class TableStructure: + """Data structure to save table structure, contains the main information of the table structure such as + database address, database name, schema name, table name, dead tuples, etc + """ + + def __init__(self): + self.db_host = None + self.db_port = None + self.db_name = None + self.schema_name = None + self.table_name = None + self.dead_tuples = 0 + self.live_tuples = 0 + self.dead_rate = 0.0 + self.last_autovacuum = 0 + self.last_autoanalyze = 0 + self.vacuum = 0 + self.analyze = 0 + self.table_size = 0 + self.index = [] + self.redundant_index = [] + + +class LockInfo: + """Data structure to save lock information such as database information, locker_query and locked_query, etc""" + + def __init__(self): + self.db_host = None + self.db_port = None + self.locked_query = [] + self.locked_query_start = [] + self.locker_query = [] + self.locker_query_end = [] + + +class DatabaseInfo: + """Data structure to save database information such as database address and TPS, connection""" + + def __init__(self): + self.db_host = None + self.db_port = None + self.history_tps = [] + self.current_tps = [] + self.max_conn = 0 + self.used_conn = 0 + + +class SystemInfo: + """Data structure to save system information such as database address, IOWAIT, IOCAPACITY, CPU_USAGE, etc""" + + def __init__(self): + self.db_host = None + self.db_port = None + self.iops = 0.0 + self.ioutils = {} + self.iocapacity = 0.0 + self.iowait = 0.0 + self.cpu_usage = 0.0 + self.mem_usage = 0.0 + self.load_average = 0.0 + + +class QueryContext: + """The object of slow query data processing factory""" + + def __init__(self, slow_sql_instance, default_fetch_interval=15, expansion_factor=5, + retrieval_time=5): + """ + :param slow_sql_instance: The instance of slow query + :param default_fetch_interval: fetch interval of data source + :param expansion_factor: Ensure that the time expansion rate of the data can be collected + :param retrieval_time: Historical index retrieval time + """ + self.fetch_interval = default_fetch_interval + self.expansion_factor = expansion_factor + self.retrieval_time = retrieval_time + self.slow_sql_instance = slow_sql_instance + self.query_start_time = datetime.fromtimestamp(self.slow_sql_instance.start_at / 1000) + self.query_end_time = datetime.fromtimestamp( + self.slow_sql_instance.start_at / 1000 + + self.slow_sql_instance.duration_time / 1000 + + self.expansion_factor * self.acquire_fetch_interval() + ) + logging.debug('[SLOW QUERY] slow sql info: %s', slow_sql_instance.__dict__) + logging.debug('[SLOW QUERY] fetch start time: %s, fetch end time: %s', self.query_start_time, self.query_end_time) + logging.debug('[SLOW QUERY] fetch interval: %s', self.fetch_interval) + + + @excetpion_catcher + def acquire_pg_class(self) -> Dict: + """Get all object information in the database""" + pg_class = {} + sequences = dai.get_metric_sequence('pg_class_relsize', self.query_start_time, self.query_end_time).from_server( + f"{self.slow_sql_instance.db_host}:{self.slow_sql_instance.db_port}").fetchall() + for sequence in sequences: + pg_class['db_host'] = self.slow_sql_instance.db_host + pg_class['db_port'] = self.slow_sql_instance.db_port + db_name = sequence.labels['datname'] + schema_name = sequence.labels['nspname'] + table_name = sequence.labels['relname'] + if db_name not in pg_class: + pg_class[db_name] = {} + pg_class[db_name][schema_name] = [] + pg_class[db_name][schema_name].append(table_name) + elif schema_name not in pg_class[db_name]: + pg_class[db_name][schema_name] = [] + pg_class[db_name][schema_name].append(table_name) + else: + pg_class[db_name][schema_name].append(table_name) + return pg_class + + @excetpion_catcher + def acquire_fetch_interval(self) -> int: + """Get data source collection frequency""" + sequence = dai.get_latest_metric_sequence("os_disk_iops", self.retrieval_time).from_server( + f"{self.slow_sql_instance.db_host}").fetchone() + logging.debug('[SLOW QUERY] acquire_fetch_interval: %s.', sequence) + timestamps = sequence.timestamps + if len(timestamps) >= 2: + self.fetch_interval = int(timestamps[-1]) // 1000 - int(timestamps[-2]) // 1000 + return self.fetch_interval + + @excetpion_catcher + def acquire_lock_info(self) -> LockInfo: + """Get lock information during slow SQL execution""" + blocks_info = LockInfo() + locks_sequences = dai.get_metric_sequence("pg_lock_sql_locked_times", self.query_start_time, + self.query_end_time).from_server( + f"{self.slow_sql_instance.db_host}:{self.slow_sql_instance.db_port}").fetchall() + logging.debug('[SLOW QUERY] acquire_lock_info: %s.', locks_sequences) + locked_query, locked_query_start, locker_query, locker_query_start = [], [], [], [] + for locks_sequence in locks_sequences: + logging.debug('[SLOW QUERY] acquire_lock_info: %s.', locks_sequence) + locked_query.append(locks_sequence.labels.get('locked_query', 'Unknown')) + locked_query_start.append(locks_sequence.labels.get('locked_query_start', 'Unknown')) + locker_query.append(locks_sequence.labels.get('locker_query', 'Unknown')) + locker_query_start.append(locks_sequence.labels.get('locker_query_start', 'Unknown')) + blocks_info.locked_query = locked_query + blocks_info.locked_query_start = locked_query_start + blocks_info.locker_query = locker_query + blocks_info.locker_query_start = locker_query_start + + return blocks_info + + @excetpion_catcher + def acquire_tables_structure_info(self) -> List: + """Acquire table structure information related to slow query""" + table_structure = [] + if not self.slow_sql_instance.tables_name: + return table_structure + for schema_name, tables_name in self.slow_sql_instance.tables_name.items(): + for table_name in tables_name: + table_info = TableStructure() + table_info.db_host = self.slow_sql_instance.db_host + table_info.db_port = self.slow_sql_instance.db_port + table_info.db_name = self.slow_sql_instance.db_name + table_info.schema_name = schema_name + table_info.table_name = table_name + pg_stat_user_tables_info = dai.get_metric_sequence("pg_tables_expansion_rate_dead_rate", + self.query_start_time, + self.query_end_time).from_server( + f"{self.slow_sql_instance.db_host}:{self.slow_sql_instance.db_port}").filter( + datname=f"{self.slow_sql_instance.db_name}").filter( + schemaname=f"{schema_name}").filter(relname=f"{table_name}").fetchone() + logging.debug('[SLOW QUERY] acquire_tables_structure[pg_stat_user_tables]: %s.', pg_stat_user_tables_info) + pg_table_size_info = dai.get_metric_sequence("pg_tables_size_bytes", self.query_start_time, + self.query_end_time).from_server( + f"{self.slow_sql_instance.db_host}:{self.slow_sql_instance.db_port}").filter( + datname=f"{self.slow_sql_instance.db_name}").filter( + nspname=f"{schema_name}").filter(relname=f"{table_name}").fetchone() + logging.debug('[SLOW QUERY] acquire_tables_structure[pg_table_size]: %s.', pg_table_size_info) + index_number_info = dai.get_metric_sequence("pg_index_idx_scan", self.query_start_time, + self.query_end_time).from_server( + f"{self.slow_sql_instance.db_host}:{self.slow_sql_instance.db_port}").filter( + datname=f"{self.slow_sql_instance.db_name}").filter( + nspname=f"{schema_name}").filter(tablename=f"{table_name}").fetchall() + logging.debug('[SLOW QUERY] acquire_tables_structure[index_number]: %s.', index_number_info) + redundant_index_info = dai.get_metric_sequence("pg_never_used_indexes_index_size", + self.query_start_time, + self.query_end_time).from_server( + f"{self.slow_sql_instance.db_host}:{self.slow_sql_instance.db_port}").filter( + datname=f"{self.slow_sql_instance.db_name}").filter( + schemaname=f"{schema_name}").filter(relname=f"{table_name}").fetchall() + logging.debug('[SLOW QUERY] acquire_tables_structure[redundant_index]: %s.', redundant_index_info) + if pg_stat_user_tables_info.values: + table_info.dead_tuples = int(pg_stat_user_tables_info.labels['n_dead_tup']) + table_info.live_tuples = int(pg_stat_user_tables_info.labels['n_live_tup']) + table_info.last_autovacuum = str2int(pg_stat_user_tables_info.labels['last_autovacuum']) * 1000 if \ + is_num(pg_stat_user_tables_info.labels['last_autovacuum']) else 0 + table_info.last_autoanalyze = str2int(pg_stat_user_tables_info.labels['last_autoanalyze']) * 1000 if \ + is_num(pg_stat_user_tables_info.labels['last_autoanalyze']) else 0 + table_info.vacuum = str2int(pg_stat_user_tables_info.labels['last_vacuum']) * 1000 if is_num( + pg_stat_user_tables_info.labels[ + 'last_vacuum']) else 0 + table_info.analyze = str2int(pg_stat_user_tables_info.labels['last_analyze']) * 1000 if is_num( + pg_stat_user_tables_info.labels[ + 'last_analyze']) else 0 + table_info.dead_rate = round(float(max(pg_stat_user_tables_info.values)), 4) + if pg_table_size_info.values: + table_info.table_size = round(float(max(pg_table_size_info.values)) / 1024 / 1024, 4) + if index_number_info: + table_info.index = [item.labels['relname'] for item in index_number_info if item.labels] + if redundant_index_info: + table_info.redundant_index = [item.labels['indexrelname'] for item in redundant_index_info] + table_structure.append(table_info) + + return table_structure + + @excetpion_catcher + def acquire_database_info(self) -> DatabaseInfo: + """Acquire table database information related to slow query""" + database_info = DatabaseInfo() + days_time_interval = 24 * 60 * 60 + cur_tps_sequences = dai.get_metric_sequence("gaussdb_qps_by_instance", self.query_start_time, + self.query_end_time).from_server( + f"{self.slow_sql_instance.db_host}:{self.slow_sql_instance.db_port}").fetchone() + logging.debug('[SLOW QUERY] acquire_database_info[cur_tps]: %s.', cur_tps_sequences) + his_tps_sequences = dai.get_metric_sequence("gaussdb_qps_by_instance", + self.query_start_time - timedelta(seconds=days_time_interval), + self.query_end_time - timedelta( + seconds=days_time_interval)).from_server( + f"{self.slow_sql_instance.db_host}:{self.slow_sql_instance.db_port}").fetchone() + logging.debug('[SLOW QUERY] acquire_database_info[his_tps]: %s.', his_tps_sequences) + max_conn_sequence = dai.get_metric_sequence("pg_connections_max_conn", self.query_start_time, + self.query_end_time).from_server( + f"{self.slow_sql_instance.db_host}:{self.slow_sql_instance.db_port}").fetchone() + logging.debug('[SLOW QUERY] acquire_database_info[max_conn]: %s.', max_conn_sequence) + used_conn_sequence = dai.get_metric_sequence("pg_connections_used_conn", self.query_start_time, + self.query_end_time).from_server( + f"{self.slow_sql_instance.db_host}:{self.slow_sql_instance.db_port}").fetchone() + logging.debug('[SLOW QUERY] acquire_database_info[used_conn]: %s.', used_conn_sequence) + if his_tps_sequences.values: + database_info.history_tps = [float(item) for item in his_tps_sequences.values] + if cur_tps_sequences.values: + database_info.current_tps = [float(item) for item in cur_tps_sequences.values] + if max_conn_sequence.values: + database_info.max_conn = int(max(max_conn_sequence.values)) + database_info.used_conn = int(max(used_conn_sequence.values)) + + return database_info + + @excetpion_catcher + def acquire_system_info(self) -> SystemInfo: + """Acquire system information on the database server """ + system_info = SystemInfo() + iops_info = dai.get_metric_sequence("os_disk_iops", self.query_start_time, self.query_end_time).from_server( + f"{self.slow_sql_instance.db_host}").fetchone() + logging.debug('[SLOW QUERY] acquire_database_info[iops]: %s.', iops_info) + ioutils_info = dai.get_metric_sequence("os_disk_ioutils", self.query_start_time, + self.query_end_time).from_server( + f"{self.slow_sql_instance.db_host}").fetchall() + logging.debug('[SLOW QUERY] acquire_database_info[ioutils]: %s.', ioutils_info) + iocapacity_info = dai.get_metric_sequence("os_disk_iocapacity", self.query_start_time, + self.query_end_time).from_server( + f"{self.slow_sql_instance.db_host}").fetchone() + logging.debug('[SLOW QUERY] acquire_database_info[iocapacity]: %s.', iocapacity_info) + iowait_info = dai.get_metric_sequence("os_cpu_iowait", self.query_start_time, self.query_end_time).from_server( + f"{self.slow_sql_instance.db_host}").fetchone() + logging.debug('[SLOW QUERY] acquire_database_info[iowait]: %s.', iowait_info) + cpu_usage_info = dai.get_metric_sequence("os_cpu_usage", self.query_start_time, + self.query_end_time).from_server( + f"{self.slow_sql_instance.db_host}").fetchone() + logging.debug('[SLOW QUERY] acquire_database_info[cpu_usage]: %s.', cpu_usage_info) + mem_usage_info = dai.get_metric_sequence("os_mem_usage", self.query_start_time, + self.query_end_time).from_server( + f"{self.slow_sql_instance.db_host}").fetchone() + logging.debug('[SLOW QUERY] acquire_database_info[mem_usage]: %s.', mem_usage_info) + load_average_info = dai.get_metric_sequence("node_load1", self.query_start_time, self.query_end_time).filter( + instance=f"{self.slow_sql_instance.db_host}:9100").fetchone() + logging.debug('[SLOW QUERY] acquire_database_info[load_average]: %s.', load_average_info) + system_info.iops = int(max(iops_info.values)) + ioutils_dict = {item.labels['device']: round(float(max(item.values)), 4) for item in ioutils_info} + system_info.ioutils = ioutils_dict + system_info.iocapacity = round(float(max(iocapacity_info.values)), 4) + system_info.iowait = round(float(max(iowait_info.values)), 4) + system_info.cpu_usage = round(float(max(cpu_usage_info.values)), 4) + system_info.mem_usage = round(float(max(mem_usage_info.values)), 4) + system_info.load_average = round(float(max(load_average_info.values)), 4) + + return system_info diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/significance_detection/__init__.py b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/significance_detection/__init__.py new file mode 100644 index 000000000..214efbabf --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/significance_detection/__init__.py @@ -0,0 +1,28 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + + +def detect(data1, data2, method='bool', threshold=0.01, p_value=0.5): + if method == 'bool': + from .sum_base import detect as sum_detect + from .average_base import detect as avg_detect + from .ks_base import detect as ks_detect + vote_res = [sum_detect(data1, data2, threshold=threshold), + avg_detect(data1, data2, threshold=threshold), + ks_detect(data1, data2, p_value=p_value)] + return vote_res.count(True) > vote_res.count(False) + elif method == 'other': + from .average_base import detect as avg_detect + return avg_detect(data1, data2, threshold=threshold, method=method) + else: + raise diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/significance_detection/average_base.py b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/significance_detection/average_base.py new file mode 100644 index 000000000..4f19f701c --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/significance_detection/average_base.py @@ -0,0 +1,40 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +alpha = 1e-10 + + +def detect(data1, data2, threshold=0.5, method='bool'): + """ + Calculate whether the data has abrupt changes based on the average value + :param data1: input data array + :param data2: input data array + :param threshold: Mutation rate + :param method: The way to calculate the mutation + :return: bool + """ + if not isinstance(data1, list) or not isinstance(data2, list): + raise TypeError("The format of the input data is wrong.") + avg1 = sum(data1) / len(data1) if data1 else 0 + avg2 = sum(data2) / len(data2) if data2 else 0 + + if method == 'bool': + return avg1 * (1 - threshold) > avg2 + elif method == 'other': + if avg1 < avg2: + return 0 + else: + return (avg1 - avg2) / (avg1 + alpha) + else: + raise ValueError('Not supported method %s.' % method) + diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/significance_detection/ks_base.py b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/significance_detection/ks_base.py new file mode 100644 index 000000000..206fe1b2e --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/significance_detection/ks_base.py @@ -0,0 +1,29 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from scipy.stats import ks_2samp + + +def detect(data1, data2, p_value=0.05): + """ + Calculate whether the data has abrupt changes based on the KS algorithm + :param data1: input data array + :param data2: input data array + :param p_value: confidence value + :return: bool + """ + if not data1 or not data2: + return False + beta, norm = ks_2samp(data1, data2) + if norm < p_value: + return True + return False diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/significance_detection/sum_base.py b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/significance_detection/sum_base.py new file mode 100644 index 000000000..426b03006 --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/query/slow_sql/significance_detection/sum_base.py @@ -0,0 +1,37 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + + +def detect(data1, data2, threshold=0.5, method='bool'): + """ + Calculate whether the data has abrupt changes based on the sum value + :param data1: input data array + :param data2: input data array + :param threshold: Mutation rate + :param method: The way to calculate the mutation + :return: bool + """ + alpha = 1e-10 + if not isinstance(data1, list) or not isinstance(data2, list): + raise TypeError("The format of the input data is wrong.") + sum1 = sum(data1) + sum2 = sum(data2) + if method == 'bool': + return sum1 * (1 - threshold) > sum2 + elif method == 'other': + if sum1 < sum2: + return 0 + else: + return (sum1 - sum2) / (sum1 + alpha) + else: + raise ValueError('Not supported method %s.' % method) diff --git a/src/gausskernel/dbmind/tools/app/diagnosis/system/__init__.py b/src/gausskernel/dbmind/tools/app/diagnosis/system/__init__.py new file mode 100644 index 000000000..32ed18123 --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/diagnosis/system/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + + +def diagnose_system(*args): + return [] diff --git a/src/test/regress/expected/sqlldr/.gitkeep b/src/gausskernel/dbmind/tools/app/healing/.gitkeep similarity index 100% rename from src/test/regress/expected/sqlldr/.gitkeep rename to src/gausskernel/dbmind/tools/app/healing/.gitkeep diff --git a/src/gausskernel/dbmind/tools/app/monitoring/__init__.py b/src/gausskernel/dbmind/tools/app/monitoring/__init__.py new file mode 100644 index 000000000..e87dd292b --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/monitoring/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + diff --git a/src/gausskernel/dbmind/tools/app/timed_app.py b/src/gausskernel/dbmind/tools/app/timed_app.py new file mode 100644 index 000000000..8945cd969 --- /dev/null +++ b/src/gausskernel/dbmind/tools/app/timed_app.py @@ -0,0 +1,111 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import logging + +from dbmind import constants +from dbmind import global_vars +from dbmind.app.diagnosis import diagnose_query +from dbmind.common.algorithm.forecasting import quickly_forecast +from dbmind.common.dispatcher import timer +from dbmind.service import dai +from dbmind.common import utils + +metric_value_range_map = utils.read_simple_config_file(constants.METRIC_VALUE_RANGE_CONFIG) + +detection_interval = global_vars.configs.getint( + 'SELF-MONITORING', 'detection_interval' +) + +last_detection_minutes = global_vars.configs.getint( + 'SELF-MONITORING', 'last_detection_time' +) / 60 + +how_long_to_forecast_minutes = global_vars.configs.getint( + 'SELF-MONITORING', 'forecasting_future_time' +) / 60 + +"""The Four Golden Signals: +https://sre.google/sre-book/monitoring-distributed-systems/#xref_monitoring_golden-signals +""" +golden_kpi = list(map( + str.strip, + global_vars.configs.get( + 'SELF-MONITORING', 'golden_kpi' + ).split(',') +)) + + +def quickly_forecast_wrapper(sequence, forecasting_minutes): + forecast_result = quickly_forecast(sequence, forecasting_minutes) + metric_value_range = metric_value_range_map.get(sequence.name) + if metric_value_range and forecast_result: + metric_value_range = metric_value_range.split(",") + try: + metric_value_low = float(metric_value_range[0]) + metric_value_high = float(metric_value_range[1]) + except ValueError as ex: + logging.warning("quickly_forecast_wrapper value error:%s," + " so forecast_result will not be cliped." % ex) + return forecast_result + + f_values = list(forecast_result.values) + for i in range(len(f_values)): + if f_values[i] < metric_value_low: + f_values[i] = metric_value_low + if f_values[i] > metric_value_high: + f_values[i] = metric_value_high + forecast_result.values = tuple(f_values) + return forecast_result + + +@timer(detection_interval) +def self_monitoring(): + # diagnose for slow queries + if constants.SLOW_QUERY_DIAGNOSIS_NAME in global_vars.backend_timed_task: + slow_query_collection = dai.get_all_slow_queries(last_detection_minutes) + logging.debug('The length of slow_query_collection is %d.', len(slow_query_collection)) + dai.save_slow_queries( + global_vars.worker.parallel_execute( + diagnose_query, ((slow_query,) for slow_query in slow_query_collection) + ) + ) + + +@timer(how_long_to_forecast_minutes * 60) +def forecast_kpi(): + if constants.FORECAST_NAME not in global_vars.backend_timed_task: + return + + # The general training length is at least three times the forecasting length. + expansion_factor = 5 + enough_history_minutes = how_long_to_forecast_minutes * expansion_factor + if enough_history_minutes <= 0: + logging.error( + 'The value of enough_history_minutes less than or equal to 0 ' + 'and DBMind has ignored it.' + ) + return + + for metric in golden_kpi: + last_sequences = dai.get_latest_metric_sequence(metric, enough_history_minutes).fetchall() + future_sequences = global_vars.worker.parallel_execute( + quickly_forecast_wrapper, ((sequence, how_long_to_forecast_minutes) + for sequence in last_sequences) + ) + detect_materials = list() + for last_sequence, future_sequence in zip(last_sequences, future_sequences): + host = dai.SequenceUtils.from_server(last_sequence) + detect_materials.append((host, metric, future_sequence)) + # Save the forecast future KPIs for users browsing. + dai.save_forecast_sequence(metric, host, future_sequence) + diff --git a/src/gausskernel/dbmind/tools/cmd/__init__.py b/src/gausskernel/dbmind/tools/cmd/__init__.py new file mode 100644 index 000000000..a10bdc671 --- /dev/null +++ b/src/gausskernel/dbmind/tools/cmd/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import sys + +from .cli import DBMindRun + + +def main() -> None: + try: + DBMindRun(sys.argv[1:]) + except InterruptedError: + sys.exit(1) diff --git a/src/gausskernel/dbmind/tools/cmd/cli.py b/src/gausskernel/dbmind/tools/cmd/cli.py new file mode 100644 index 000000000..1a247c87e --- /dev/null +++ b/src/gausskernel/dbmind/tools/cmd/cli.py @@ -0,0 +1,122 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""Command Line Interface""" + +import argparse +import os + +from dbmind import components as components_module +from dbmind.common.exceptions import SetupError, ConfigSettingError +from dbmind.constants import __description__, __version__ +from . import edbmind +from . import setup +from . import config_utils +from .. import constants +from .. import global_vars + + +def build_parser(): + actions = ['setup', 'start', 'stop'] + # Create the top-level parser to parse the common action. + parser = argparse.ArgumentParser( + description=__description__ + ) + parser.add_argument('--version', action='version', version=__version__) + + # Add sub-commands: + subparsers = parser.add_subparsers(title='available subcommands', + help="type ' -h' for help on a specific subcommand", + dest='subcommand') + # Create the parser for the "service" command. + parser_service = subparsers.add_parser('service', help='send a command to DBMind to change the status of ' + 'the service') + parser_service.add_argument('action', choices=actions, help='perform an action for service') + parser_service.add_argument('-c', '--conf', metavar='DIRECTORY', required=True, type=os.path.realpath, + help='set the directory of configuration files') + parser_service.add_argument('--only-run', choices=constants.TIMED_TASK_NAMES, + help='explicitly set a certain task running in the backend') + config_mode_group = parser_service.add_mutually_exclusive_group() + config_mode_group.add_argument('--interactive', action='store_true', + help='configure and initialize with interactive mode') + config_mode_group.add_argument('--initialize', action='store_true', + help='initialize and check configurations after configuring.') + + # Create the parser for the "set" command. + parser_set = subparsers.add_parser('set', help='set a parameter') + parser_set.add_argument('section', help='which section (case sensitive) to set') + parser_set.add_argument('option', help='which option to set') + parser_set.add_argument('target', help='the parameter target to set') + parser_set.add_argument('-c', '--conf', metavar='DIRECTORY', required=True, + help='set the directory of configuration files') + + # Create the parser for the "component" command. + # This component includes Prometheus-exporter and other components that can be + # run independently through the command line. + # Components can be easily extended, similar to a plug-in. + # The component need to be called can import DBMind packages directly. + components = components_module.list_components() + parser_component = subparsers.add_parser('component', + help='pass command line arguments to each sub-component.') + parser_component.add_argument('name', metavar='COMPONENT_NAME', choices=components, + help='choice a component to start. ' + + str(components)) + parser_component.add_argument('arguments', metavar='ARGS', nargs=argparse.REMAINDER, + help='arguments for the component to start') + return parser + + +class DBMindRun: + """Helper class to use as main for DBMind: + + DBMindRun(*sys.argv[1:]) + """ + + def __init__(self, argv): + os.umask(0o0077) + + parser = build_parser() + args = parser.parse_args(argv) + try: + if args.subcommand == 'service': + if args.action == 'setup': + if args.interactive: + setup.setup_directory_interactive(args.conf) + elif args.initialize: + setup.initialize_and_check_config(args.conf, interactive=False) + else: + setup.setup_directory(args.conf) + elif args.action == 'start': + # Determine which task runs in the backend. + if args.only_run is None: + global_vars.backend_timed_task.extend(constants.TIMED_TASK_NAMES) + else: + global_vars.backend_timed_task.append(args.only_run) + edbmind.DBMindMain(args.conf).start() + elif args.action == 'stop': + edbmind.DBMindMain(args.conf).stop(level='mid') + elif args.action == 'reload': + edbmind.DBMindMain(args.conf).reload() + else: + parser.print_usage() + elif args.subcommand == 'show': + pass + elif args.subcommand == 'set': + config_utils.set_config_parameter(args.conf, args.section, args.option, args.target) + elif args.subcommand == 'component': + components_module.call_component(args.name, args.arguments) + else: + parser.print_usage() + except SetupError as e: + parser.error(message=e.msg) + except ConfigSettingError as e: + parser.error(message=str(e)) diff --git a/src/gausskernel/dbmind/tools/cmd/config_utils.py b/src/gausskernel/dbmind/tools/cmd/config_utils.py new file mode 100644 index 000000000..6fcd06247 --- /dev/null +++ b/src/gausskernel/dbmind/tools/cmd/config_utils.py @@ -0,0 +1,196 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import os +import re +from configparser import ConfigParser +from configparser import NoSectionError, NoOptionError + +from dbmind import constants +from dbmind.common import security +from dbmind.common.exceptions import InvalidPasswordException, ConfigSettingError +from dbmind.metadatabase.dao.dynamic_config import dynamic_config_get, dynamic_config_set +from dbmind.common.utils import write_to_terminal + +NULL_TYPE = '(null)' # empty text. +ENCRYPTED_SIGNAL = 'Encrypted->' + + +def check_config_validity(section, option, value, inline_comment=None): + config_item = '%s-%s' % (section, option) + # exceptional cases: + if config_item == 'METADATABASE-port': + return True, None + # normal inspection process: + if 'port' in option: + valid_port = str.isdigit(value) and 0 < int(value) <= 65535 + if not valid_port: + return False, 'Invalid port %s' % value + if 'database' in option: + if value == NULL_TYPE or value.strip() == '': + return False, 'Unspecified database name' + if 'Options:' in inline_comment: + # determine setting option whether choose from option list. + results = re.findall(r'Options: (.*)?\.', inline_comment) + if len(results) > 0: + options = list(map(str.strip, results[0].split(','))) + if value not in options: + return False, 'Invalid choice: %s' % value + if 'dbtype' in option and value == 'opengauss': + write_to_terminal( + 'WARN: default PostgresSQL connector (psycopg2-binary) does not support openGauss.\n' + 'It would help if you compiled psycopg2 with openGauss manually or ' + 'created a connection user after setting the GUC password_encryption_type to 1.', + color='yellow' + ) + + # Add more checks here. + return True, None + + +def load_sys_configs(confile): + # Note: To facilitate the user to modify the configuration items through the + # configuration file easily, we add inline comments to the file, but we need + # to remove the inline comments while parsing. + # Otherwise, it will cause the read configuration items to be wrong. + configs = ConfigParser(inline_comment_prefixes='#') + with open(file=confile, mode='r') as fp: + configs.read_file(fp) + + class ConfigWrapper(object): + def __getattribute__(self, name): + try: + return object.__getattribute__(self, name) + except (AttributeError, KeyError): + return configs.__getattribute__(name) + + # Self-defined converters: + @staticmethod + def get(section, option, *args, **kwargs): + """Faked get() for ConfigParser class.""" + value = configs.get(section, option, *args, **kwargs) + if value == NULL_TYPE: + value = '' + if 'password' in option and value.startswith(ENCRYPTED_SIGNAL): + s1 = dynamic_config_get('dbmind_config', 'cipher_s1') + s2 = dynamic_config_get('dbmind_config', 'cipher_s2') + iv = dynamic_config_get('iv_table', '%s-%s' % (section, option)) + try: + value = security.decrypt(s1, s2, iv, value.lstrip(ENCRYPTED_SIGNAL)) + except Exception as e: + raise InvalidPasswordException(e) + return value + + return ConfigWrapper() + + +class ConfigUpdater: + def __init__(self, filepath): + self.config = ConfigParser(inline_comment_prefixes=None) + self.filepath = os.path.abspath(filepath) + self.fp = None + self.readonly = True + + def get(self, section, option): + value = self.config.get(section, option) + default_value, inline_comment = map(str.strip, value.rsplit('#', 1)) + if default_value == '': + default_value = NULL_TYPE + return default_value, inline_comment + + def set(self, section, option, value, inline_comment): + self.readonly = False + self.config.set(section, option, '%s # %s' % (value, inline_comment)) + + def sections(self, skip_list=()): + for section in self.config.sections(): + if section not in skip_list: + comment = self.config.get('COMMENT', section, fallback='') + yield section, comment + + def items(self, section): + for option in self.config.options(section): + default_value, inline_comment = self.get(section, option) + yield option, default_value, inline_comment + + def __enter__(self): + self.fp = open(file=self.filepath, mode='r+', errors='ignore') + self.config.read_file(self.fp) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.readonly: + # output configurations + self.fp.truncate(0) + self.fp.seek(0) + with open( + file=os.path.join(constants.MISC_PATH, constants.CONFILE_HEADER_NAME) + ) as header_fp: + self.fp.writelines(header_fp.readlines()) + self.config.write(self.fp) + self.fp.flush() + self.fp.close() + + +class DynamicConfig: + @staticmethod + def get(*args, **kwargs): + return dynamic_config_get(*args, **kwargs) + + @staticmethod + def set(*args, **kwargs): + return dynamic_config_set(*args, **kwargs) + + +def set_config_parameter(confpath, section: str, option: str, value: str): + if not os.path.exists(confpath): + raise ConfigSettingError("Invalid directory '%s', please set up first." % confpath) + + # Section is case sensitive. + if section.isupper(): + with ConfigUpdater(os.path.join(confpath, constants.CONFILE_NAME)) as config: + # If not found, raise NoSectionError or NoOptionError. + try: + old_value, comment = config.get(section, option) + except (NoSectionError, NoOptionError): + raise ConfigSettingError('Not found the parameter %s-%s.' % (section, option)) + valid, reason = check_config_validity(section, option, value, comment) + if not valid: + raise ConfigSettingError('Incorrect value due to %s.' % reason) + # If user wants to change password, we should encrypt the plain-text password first. + if 'password' in option: + # dynamic_config_xxx searches file from current working directory. + os.chdir(confpath) + s1 = dynamic_config_get('dbmind_config', 'cipher_s1') + s2 = dynamic_config_get('dbmind_config', 'cipher_s2') + # Every time a new password is generated, update the IV. + iv = security.generate_an_iv() + dynamic_config_set('iv_table', '%s-%s' % (section, option), iv) + cipher = security.encrypt(s1, s2, iv, value) + value = ENCRYPTED_SIGNAL + cipher + config.set(section, option, value, comment) + elif section.islower(): + # dynamic_config_xxx searches file from current working directory. + os.chdir(confpath) + try: + old_value = dynamic_config_get(section, option) + except ValueError: + raise ConfigSettingError('Not found the parameter %s-%s.' % (section, option)) + if not old_value: + raise ConfigSettingError('Not found the parameter %s-%s.' % (section, option)) + dynamic_config_set(section, option, value) + else: + # If run here, it seems that the format of section string is not correct. + raise ConfigSettingError('%s is an incorrect section. ' + 'Please take note that section string is case sensitive.' % section) + + write_to_terminal('Success to modify parameter %s-%s.' % (section, option), color='green') diff --git a/src/gausskernel/dbmind/tools/cmd/edbmind.py b/src/gausskernel/dbmind/tools/cmd/edbmind.py new file mode 100644 index 000000000..95a5fbf1b --- /dev/null +++ b/src/gausskernel/dbmind/tools/cmd/edbmind.py @@ -0,0 +1,165 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""DBMind common functionality interface""" + +import logging +import os +import signal +import sys +import traceback +from logging import handlers +import time + +from dbmind import app +from dbmind import constants +from dbmind import global_vars +from dbmind.cmd.config_utils import ( + load_sys_configs, + DynamicConfig +) +from dbmind.common import platform +from dbmind.common import utils +from dbmind.common.daemon import Daemon, read_dbmind_pid_file +from dbmind.common.dispatcher import TimedTaskManager +from dbmind.common.dispatcher import get_worker_instance +from dbmind.common.exceptions import SetupError + +# Support input() function in using backspace. +try: + import readline +except ImportError: + pass + +SKIP_LIST = ('COMMENT', 'LOG') + +dbmind_master_should_exit = False + + +def _check_confpath(confpath): + confile_path = os.path.join(confpath, constants.CONFILE_NAME) + if os.path.exists(confile_path): + return True + return False + + +def _process_clean(force=False): + global_vars.worker.terminate(cancel_futures=force) + TimedTaskManager.stop() + logging.shutdown() + + +def signal_handler(signum, frame): + global dbmind_master_should_exit + + if signum == signal.SIGINT or signum == signal.SIGHUP: + utils.write_to_terminal('Reloading parameters.', color='green') + global_vars.configs = load_sys_configs(constants.CONFILE_NAME) + elif signum == signal.SIGUSR2: + # used for debugging + utils.write_to_terminal('Stack frames:', color='green') + traceback.print_stack(frame) + elif signum == signal.SIGTERM: + signal.signal(signal.SIGTERM, signal.SIG_IGN) + logging.info('DBMind received exit signal.') + utils.write_to_terminal('Cleaning opened resources...') + _process_clean() + dbmind_master_should_exit = True + elif signum == signal.SIGQUIT: + signal.signal(signal.SIGQUIT, signal.SIG_IGN) + logging.info('DBMind received the signal: exit immediately.') + utils.write_to_terminal('Cleaning opened resources...') + _process_clean(True) + dbmind_master_should_exit = True + + +class DBMindMain(Daemon): + def __init__(self, confpath): + if not _check_confpath(confpath): + raise SetupError("Invalid directory '%s', please set up first." % confpath) + + self.confpath = os.path.abspath(confpath) + self.worker = None + + pid_file = os.path.join(confpath, constants.PIDFILE_NAME) + super().__init__(pid_file) + + def run(self): + os.chdir(self.confpath) + os.umask(0o0077) + + utils.set_proc_title('DBMind [Master Process]') + # Set global variables. + global_vars.confpath = self.confpath + global_vars.configs = load_sys_configs(constants.CONFILE_NAME) + global_vars.dynamic_configs = DynamicConfig + global_vars.metric_map = utils.read_simple_config_file( + constants.METRIC_MAP_CONFIG + ) + global_vars.must_filter_labels = utils.read_simple_config_file( + constants.MUST_FILTER_LABEL_CONFIG + ) + + # Set logger. + os.makedirs('logs', exist_ok=True) + max_bytes = global_vars.configs.getint('LOG', 'maxbytes') + backup_count = global_vars.configs.getint('LOG', 'backupcount') + disk_handler = utils.MultiProcessingRFHandler(filename=os.path.join('logs', constants.LOGFILE_NAME), + maxBytes=max_bytes, + backupCount=backup_count) + + disk_handler.setFormatter( + logging.Formatter("[%(asctime)s %(levelname)s][%(process)d-%(thread)d][%(name)s]: %(message)s") + ) + logger = logging.getLogger() + logger.name = 'DBMind' + logger.addHandler(disk_handler) + logger.setLevel(global_vars.configs.get('LOG', 'level').upper()) + + logging.info('DBMind is starting.') + # Register signal handler. + if not platform.WIN32: + signal.signal(signal.SIGHUP, signal_handler) + signal.signal(signal.SIGUSR2, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + signal.signal(signal.SIGQUIT, signal_handler) + signal.signal(signal.SIGINT, signal_handler) + + # Create execution pool. + global_vars.worker = self.worker = get_worker_instance('local', 0) + # Start timed tasks. + app.register_timed_app() + TimedTaskManager.start() + + # Pending until all threads exist. + while not dbmind_master_should_exit: + time.sleep(1) + logging.info('DBMind will close.') + + def clean(self): + if os.path.exists(self.pid_file): + os.unlink(self.pid_file) + + def reload(self): + pid = read_dbmind_pid_file(self.pid_file) + if pid > 0: + if platform.WIN32: + os.kill(pid, signal.SIGINT) + else: + os.kill(pid, signal.SIGHUP) + else: + utils.write_to_terminal( + 'Invalid DBMind process.', + level='error', + color='red' + ) + os.remove(self.pid_file) diff --git a/src/gausskernel/dbmind/tools/cmd/setup.py b/src/gausskernel/dbmind/tools/cmd/setup.py new file mode 100644 index 000000000..eecc14df4 --- /dev/null +++ b/src/gausskernel/dbmind/tools/cmd/setup.py @@ -0,0 +1,228 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import getpass +import os +import shutil +from configparser import ConfigParser + +from dbmind import constants, global_vars +from dbmind.cmd.config_utils import ( + ConfigUpdater, check_config_validity, + DynamicConfig, load_sys_configs, + NULL_TYPE, ENCRYPTED_SIGNAL +) +from dbmind.cmd.edbmind import SKIP_LIST +from dbmind.common import utils, security +from dbmind.common.exceptions import SetupError, SQLExecutionError +from dbmind.metadatabase import ( + create_dynamic_config_schema, + create_metadatabase_schema, + destroy_metadatabase +) +from dbmind.metadatabase.dao.dynamic_config import dynamic_config_set, dynamic_config_get + + +def initialize_and_check_config(confpath, interactive=False): + if not os.path.exists(confpath): + raise SetupError('Not found the directory %s.' % confpath) + confpath = os.path.abspath(confpath) # in case of dir changed. + os.chdir(confpath) + dbmind_conf_path = os.path.join(confpath, constants.CONFILE_NAME) + dynamic_config_path = os.path.join(confpath, constants.DYNAMIC_CONFIG) + + def _create_dynamic_config_schema_and_generate_keys(): + utils.write_to_terminal('Starting to generate a dynamic config file...', color='green') + create_dynamic_config_schema() + s1_ = security.safe_random_string(16) + s2_ = security.safe_random_string(16) + dynamic_config_set('dbmind_config', 'cipher_s1', s1_) + dynamic_config_set('dbmind_config', 'cipher_s2', s2_) + return s1_, s2_ + + if not os.path.exists(dynamic_config_path): + # If dynamic config file does not exist, create a new one. + s1, s2 = _create_dynamic_config_schema_and_generate_keys() + else: + # If exists, need not create a new dynamic config file + # and directly load hash key s1 and s2 from it. + s1 = dynamic_config_get('dbmind_config', 'cipher_s1') + s2 = dynamic_config_get('dbmind_config', 'cipher_s2') + if not (s1 and s2): + # If s1 or s2 is invalid, it indicates that an broken event may occurred while generating + # the dynamic config file. Hence, the whole process of generation is unreliable and we have to + # generate a new dynamic config file. + os.unlink(dynamic_config_path) + s1, s2 = _create_dynamic_config_schema_and_generate_keys() + + # Check some configurations and encrypt passwords. + with ConfigUpdater(dbmind_conf_path) as config: + if not interactive: + for section, section_comment in config.sections(SKIP_LIST): + for option, value, inline_comment in config.items(section): + valid, invalid_reason = check_config_validity( + section, option, value, inline_comment + ) + if not valid: + raise SetupError( + "Wrong %s-%s in the file dbmind.conf due to '%s'. Please revise it." % ( + section, option, invalid_reason + ) + ) + + utils.write_to_terminal('Starting to encrypt the plain-text passwords in the config file...', color='green') + for section, section_comment in config.sections(SKIP_LIST): + for option, value, inline_comment in config.items(section): + if 'password' in option and value != NULL_TYPE: + # Skip when the password has encrypted. + if value.startswith(ENCRYPTED_SIGNAL): + continue + # Every time a new password is generated, update the IV. + iv = security.generate_an_iv() + dynamic_config_set('iv_table', '%s-%s' % (section, option), iv) + cipher_text = security.encrypt(s1, s2, iv, value) + # Use a signal ENCRYPTED_SIGNAL to mark the password that has been encrypted. + decorated_cipher_text = ENCRYPTED_SIGNAL + cipher_text + config.set(section, option, decorated_cipher_text, inline_comment) + + # config and initialize meta-data database. + utils.write_to_terminal('Starting to initialize and check the essential variables...', color='green') + global_vars.dynamic_configs = DynamicConfig + global_vars.configs = load_sys_configs( + constants.CONFILE_NAME + ) + utils.write_to_terminal('Starting to connect to meta-database and create tables...', color='green') + try: + create_metadatabase_schema(check_first=False) + except SQLExecutionError: + utils.write_to_terminal('The given database has duplicate tables. ' + 'If you want to reinitialize the database, press [R]. ' + 'If you want to keep the existent tables, press [K].', color='red') + input_char = '' + while input_char not in ('R', 'K'): + input_char = input('Press [R] to reinitialize; Press [K] to keep and ignore:').upper() + if input_char == 'R': + utils.write_to_terminal('Starting to drop existent tables in meta-database...', color='green') + destroy_metadatabase() + utils.write_to_terminal('Starting to create tables for meta-database...', color='green') + create_metadatabase_schema(check_first=True) + if input_char == 'K': + utils.write_to_terminal('Ignoring...', color='green') + utils.write_to_terminal('The setup process finished successfully.', color='green') + + +def setup_directory_interactive(confpath): + # Determine whether the directory is empty. + if os.path.exists(confpath) and len(os.listdir(confpath)) > 0: + raise SetupError("Given setup directory '%s' already exists." % confpath) + + # Make the confpath directory and copy all files + # (basically all files are config files) from MISC directory. + shutil.copytree( + src=constants.MISC_PATH, + dst=confpath + ) + + utils.write_to_terminal('Starting to configure...', color='green') + # Generate an initial configuration file. + config_src = os.path.join(constants.MISC_PATH, constants.CONFILE_NAME) + config_dst = os.path.join(confpath, constants.CONFILE_NAME) + # read configurations + config = ConfigParser(inline_comment_prefixes=None) + with open(file=config_src, mode='r', errors='ignore') as fp: + config.read_file(fp) + + try: + # Modify configuration items by user's typing. + for section in config.sections(): + if section in SKIP_LIST: + continue + section_comment = config.get('COMMENT', section, fallback='') + utils.write_to_terminal('[%s]' % section, color='white') + utils.write_to_terminal(section_comment, color='yellow') + # Get each configuration item. + for option, values in config.items(section): + default_value, inline_comment = map(str.strip, values.rsplit('#', 1)) + # If not set default value, the default value is null. + if default_value.strip() == '': + default_value = NULL_TYPE + # hidden password + input_value = '' + if 'password' in option: + input_func = getpass.getpass + else: + input_func = input + + while input_value.strip() == '': + # Ask for options. + input_value = input_func('%s (%s) [default: %s]:' % (option, inline_comment, default_value)) + # If user does not set the option, set default target. + if input_value.strip() == '': + input_value = default_value + + valid, invalid_reason = check_config_validity( + section, option, input_value, inline_comment + ) + if not valid: + utils.write_to_terminal( + "Please retype due to '%s'." % invalid_reason, + level='error', + color='red' + ) + input_value = '' + config.set(section, option, '%s # %s' % (input_value, inline_comment)) + except (KeyboardInterrupt, EOFError): + utils.write_to_terminal('Removing generated files due to keyboard interrupt.') + shutil.rmtree( + path=confpath + ) + return + + # output configurations + with open(file=config_dst, mode='w+') as fp: + # Add header comments (including license and notice). + with open(file=os.path.join(confpath, constants.CONFILE_HEADER_NAME)) as header_fp: + fp.writelines(header_fp.readlines()) + config.write(fp) + + initialize_and_check_config(confpath, interactive=True) + + +def setup_directory(confpath): + # Determine whether the directory is empty. + if os.path.exists(confpath) and len(os.listdir(confpath)) > 0: + raise SetupError("Given setup directory '%s' already exists." % confpath) + + utils.write_to_terminal( + "You are not in the interactive mode so you must modify configurations manually.\n" + "The file you need to modify is '%s'.\n" + "After configuring, you should continue to set up and initialize the directory with --initialize option, " + "e.g.,\n " + "'... service setup -c %s --initialize'" + % (os.path.join(confpath, constants.CONFILE_NAME), confpath), + color='yellow') + + # Make the confpath directory and copy all files + # (basically all files are config files) from MISC directory. + shutil.copytree( + src=constants.MISC_PATH, + dst=confpath + ) + # output configurations + with open(file=os.path.join(confpath, constants.CONFILE_NAME), mode='r+') as fp: + old = fp.readlines() + # Add header comments (including license and notice). + with open(file=os.path.join(constants.MISC_PATH, constants.CONFILE_HEADER_NAME)) as header_fp: + fp.seek(0) + fp.writelines(header_fp.readlines()) + fp.writelines(old) + utils.write_to_terminal("Configure directory '%s' has been created successfully." % confpath, color='green') diff --git a/src/gausskernel/dbmind/tools/common/__init__.py b/src/gausskernel/dbmind/tools/common/__init__.py new file mode 100644 index 000000000..054f32436 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. diff --git a/src/gausskernel/dbmind/tools/common/algorithm/__init__.py b/src/gausskernel/dbmind/tools/common/algorithm/__init__.py new file mode 100644 index 000000000..054f32436 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/algorithm/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. diff --git a/src/gausskernel/dbmind/tools/common/algorithm/basic.py b/src/gausskernel/dbmind/tools/common/algorithm/basic.py new file mode 100644 index 000000000..9776ee9b4 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/algorithm/basic.py @@ -0,0 +1,103 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + + +def binary_search(L, target): + """A binary search with left-closed and right-opened style. + + :return the index of specific target; if not found, return -1. + """ + if len(L) == 0: + return -1 + # [0, length) + lo, hi = 0, len(L) + while lo < hi: # equals to lo == hi + mid = lo + (hi - lo) // 2 + if L[mid] == target: + return mid + elif L[mid] < target: + # [mid + 1, hi) + lo = mid + 1 + elif L[mid] > target: + # [lo, mid) + hi = mid + return -1 + + +def how_many_lesser_elements(L, target): + """The function bases on finding the leftmost element with binary search. + + About Binary Search + ============= + + .. + + Rank queries can be performed with the procedure for finding the leftmost element. + The number of elements less than the target target is returned by the procedure. + + -- Wikipedia: binary search algorithm + + + The pseudocode for finding the leftmost element: + https://en.wikipedia.org/wiki/Binary_search_algorithm#Procedure_for_finding_the_leftmost_element + """ + if len(L) == 0: + return -1 + # [0, length - 1] + lo, hi = 0, len(L) - 1 + while lo <= hi: # equals to lo == hi + 1 + mid = lo + (hi - lo) // 2 + if L[mid] == target: + hi = mid - 1 # shrink right bound + elif L[mid] < target: + # [mid + 1, hi] + lo = mid + 1 + elif L[mid] > target: + # [lo, mid - 1] + hi = mid - 1 + return lo + + +def how_many_larger_elements(L, target): + if len(L) == 0: + return -1 + # [0, length - 1] + lo, hi = 0, len(L) - 1 + while lo <= hi: # equals to lo == hi + 1 + mid = lo + (hi - lo) // 2 + if L[mid] == target: + lo = mid + 1 # shrink left bound + elif L[mid] < target: + # [mid + 1, hi] + lo = mid + 1 + elif L[mid] > target: + # [lo, mid - 1] + hi = mid - 1 + return hi + + +def binary_search_left(L, target): + """Wrap the function ``how_many_lesser_elements(L, target)`` by adding + a check for return target. + + :return -1 when not found the target target. + """ + lo = how_many_lesser_elements(L, target) + return -1 if lo >= len(L) or L[lo] != target else lo + + +def binary_search_right(L, target): + """Similar to above function.""" + hi = how_many_larger_elements(L, target) + return -1 if hi < 0 or L[hi] != target else hi + diff --git a/src/gausskernel/dbmind/tools/common/algorithm/forecasting/__init__.py b/src/gausskernel/dbmind/tools/common/algorithm/forecasting/__init__.py new file mode 100644 index 000000000..c708955ae --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/algorithm/forecasting/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +from .forcasting_algorithm import ForecastingFactory +from .forcasting_algorithm import quickly_forecast diff --git a/src/gausskernel/dbmind/tools/ai_manager/definitions/__init__.py b/src/gausskernel/dbmind/tools/common/algorithm/forecasting/arima_model/__init__.py similarity index 100% rename from src/gausskernel/dbmind/tools/ai_manager/definitions/__init__.py rename to src/gausskernel/dbmind/tools/common/algorithm/forecasting/arima_model/__init__.py diff --git a/src/gausskernel/dbmind/tools/common/algorithm/forecasting/arima_model/arima_alg.py b/src/gausskernel/dbmind/tools/common/algorithm/forecasting/arima_model/arima_alg.py new file mode 100644 index 000000000..48d7b5ca4 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/algorithm/forecasting/arima_model/arima_alg.py @@ -0,0 +1,652 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +from types import SimpleNamespace +import numpy as np +import logging +import time +from numpy import dot, log, zeros, pi +from scipy import optimize +from scipy import signal +from scipy.signal import lfilter +from .arima_common import lagmat, OLS + + +def _ar_transparams(params): + """ + transforms ar params to induce stationarity/invertability. + :param params: type->np.array + :return newparams: type->np.array + """ + newparams = np.tanh(params / 2) + tmp = np.tanh(params / 2) + for j in range(1, len(params)): + ar_param = newparams[j] + for kiter in range(j): + tmp[kiter] -= ar_param * newparams[j - kiter - 1] + newparams[:j] = tmp[:j] + return newparams + + +def _ar_invtransparams(params): + """ + return the inverse of the ar params. + :param params: type->np.array + :return invarcoefs: type->np.array + """ + params = params.copy() + tmp = params.copy() + for j in range(len(params) - 1, 0, -1): + ar_param = params[j] + for kiter in range(j): + tmp[kiter] = (params[kiter] + ar_param * params[j - kiter - 1]) / \ + (1 - ar_param ** 2) + params[:j] = tmp[:j] + invarcoefs = 2 * np.arctanh(params) + return invarcoefs + + +def _ma_transparams(params): + """ + transforms ma params to induce stationarity/invertability. + :param params: type->np.array + :return newparams: type->np.array + """ + newparams = ((1 - np.exp(-params)) / (1 + np.exp(-params))).copy() + tmp = ((1 - np.exp(-params)) / (1 + np.exp(-params))).copy() + + for j in range(1, len(params)): + ma_param = newparams[j] + for kiter in range(j): + tmp[kiter] += ma_param * newparams[j - kiter - 1] + newparams[:j] = tmp[:j] + return newparams + + +def _ma_invtransparams(macoefs): + """ + return the inverse of the ma params. + :param params: type->np.array + :return invmacoefs: type->np.array + """ + tmp = macoefs.copy() + for j in range(len(macoefs) - 1, 0, -1): + ma_param = macoefs[j] + for kiter in range(j): + tmp[kiter] = (macoefs[kiter] - ma_param * + macoefs[j - kiter - 1]) / (1 - ma_param ** 2) + macoefs[:j] = tmp[:j] + invmacoefs = -np.log((1 - macoefs) / (1 + macoefs)) + return invmacoefs + + +class DummyArray: + """ support __array_interface__ and base""" + + def __init__(self, interface, base=None): + self.__array_interface__ = interface + self.base = base + + def wr_dummy(self): + """write dummy""" + + def rd_dummy(self): + """read dummy""" + + +def _maybe_view_as_subclass(original_array, new_array): + """ + return new_array for view_as_subclass. + :param original_array: type->np.array + :return new_array: type->np.array + """ + if type(original_array) is not type(new_array): + new_array = new_array.view(type=type(original_array)) + if new_array.__array_finalize__: + new_array.__array_finalize__(original_array) + return new_array + + +def _as_strided(x_raw, shape=None, strides=None): + """ + create a view into the array with the given shape and strides. + :param x_raw: type->np.array + :param shape: type->tuple + :param strides: type->tuple + :return view: type->np.array + """ + subok = False + writeable = True + x_raw = np.array(x_raw, copy=False, subok=subok) + interface = dict(x_raw.__array_interface__) + if shape is not None: + interface['shape'] = tuple(shape) + if strides is not None: + interface['strides'] = tuple(strides) + + array = np.asarray(DummyArray(interface, base=x_raw)) + array.dtype = x_raw.dtype + + view = _maybe_view_as_subclass(x_raw, array) + + if view.flags.writeable and not writeable: + view.flags.writeable = False + + return view + + +def _toeplitz(c_raw, r_raw=None): + """ + construct a toeplitz matrix. + :param c_raw: type->np.array + :param r_raw: type->np.array + :return view: type->np.array + """ + c_raw = np.asarray(c_raw).ravel() + if r_raw is None: + r_raw = c_raw.conjugate() + else: + r_raw = np.asarray(r_raw).ravel() + + vals = np.concatenate((c_raw[::-1], r_raw[1:])) + out_shp = len(c_raw), len(r_raw) + num = vals.strides[0] + return _as_strided(vals[len(c_raw) - 1:], shape=out_shp, strides=(-num, num)).copy() + + +def yule_walker(x_raw, order=1): + """ + estimate ar parameters from a sequence. + :param x_raw type->np.array + :param order: type->tuple + :return rho: type->np.array, + """ + method = "adjusted" + df_raw = None + demean = True + x_raw = np.array(x_raw, dtype=np.float64) + if demean: + x_raw -= x_raw.mean() + num = df_raw or x_raw.shape[0] + + adj_needed = method == "adjusted" + + if x_raw.ndim > 1 and x_raw.shape[1] != 1: + raise ValueError("expecting a vector to estimate ar parameters") + r_raw = np.zeros(order + 1, np.float64) + r_raw[0] = (x_raw ** 2).sum() / num + for k in range(1, order + 1): + r_raw[k] = (x_raw[0:-k] * x_raw[k:]).sum() / (num - k * adj_needed) + r_tope = _toeplitz(r_raw[:-1]) + rho = np.linalg.solve(r_tope, r_raw[1:]) + + return rho + + +def _arma_impulse_response(new_ar_coeffs, new_ma_coeffs, leads=100): + """ + compute the impulse response function (MA representation) for ARMA process + :param new_ar_coeffs: type->np.array + :param new_ma_coeffs: type->np.array + :param leads: type->int + :return: impulse response: type->np.array + """ + impulse = np.zeros(leads) + impulse[0] = 1. + return signal.lfilter(new_ma_coeffs, new_ar_coeffs, impulse) + + +def _unpack_params(params, order, k_trend, reverse=False): + """ + unpack trend, exparams, arparams, maparams from params + :param params: type->np.array + :param order: type->tuple + :param k_trend: type->int + :param reverse: type->bool + :return: trend: type->np.array, exparams: type->np.array, + arparams: type->np.array, maparams: type->np.array + """ + k_ar, _ = order + k = k_trend + maparams = params[k + k_ar:] + arparams = params[k:k + k_ar] + trend = params[:k_trend] + exparams = params[k_trend:k] + if reverse: + return trend, exparams, arparams[::-1], maparams[::-1] + return trend, exparams, arparams, maparams + + +def _get_predict_mu_coeffs(k_trend, trendparam, arparams, steps): + """ + the mu is the constant for predict + :param k_trend: type->int + :param trendparam: type->np.array + :param arparams: type->np.array + :param steps: type->int + :return: mu_coeffs: type->np.array + """ + if k_trend == 1: + mu_coeffs = trendparam * (1 - arparams.sum()) + mu_coeffs = np.array([mu_coeffs] * steps) + else: + mu_coeffs = np.zeros(steps) + + return mu_coeffs + + +def _get_resid_out_of_sample(errors, order): + """ + returns resid for sample prediction. + :param errors: type->np.array + :param order: type->tuple + :return: resid: type->np.array + """ + k_ma = order.k_ma + k_ar = order.k_ar + start = 0 + resid = None + if k_ma: + resid = np.zeros(k_ma) + if start: + resid[:k_ma] = errors[start - k_ma - k_ar:start - k_ar] + else: + resid[:k_ma] = errors[-k_ma:] + return resid + + +def _get_predict_out_of_sample(raw_data, order, trendparam, arparams, steps): + """ + returns endog, mu of appropriate length for sample prediction. + :param raw_data: type->np.array + :param order: type->tuple + :param trendparam: type->np.array + :param arparams: type->np.array + :param steps: type->int + :return: endog: type->np.array, mu_coeffs: type->np.array + """ + k_ar = order.k_ar + start = 0 + + y_raw = raw_data.y + mu_coeffs = _get_predict_mu_coeffs(raw_data.k_trend, trendparam, arparams, steps) + endog = np.zeros(k_ar + steps - 1) + + if k_ar and start: + endog[:k_ar] = y_raw[start - k_ar:start] + elif k_ar: + endog[:k_ar] = y_raw[-k_ar:] + + return endog, mu_coeffs + + +def _arma_predict_out_of_sample(params, steps, errors, order, raw_data): + """ + predict steps data according to sample. + :param params: type->np.array + :param steps: type->int + :param errors: type->np.array + :param order: type->tuple + :param raw_data: type->np.array + :return: forecast: type->np.array + """ + (trendparam, _, + arparams, maparams) = _unpack_params(params, (order.k_ar, order.k_ma), + raw_data.k_trend, reverse=True) + resid = _get_resid_out_of_sample(errors, order) + endog, mu_coeffs = _get_predict_out_of_sample(raw_data, + order, + trendparam, + arparams, + steps) + + forecast = np.zeros(steps) + if steps == 1: + if order.k_ma: + return mu_coeffs[0] + np.dot(arparams, endog[:order.k_ar]) \ + + np.dot(maparams, resid[:order.k_ma]) + + return mu_coeffs[0] + np.dot(arparams, endog[:order.k_ar]) + + i = 0 if order.k_ma else -1 + for i in range(min(order.k_ma, steps - 1)): + fcast = (mu_coeffs[i] + np.dot(arparams, endog[i:i + order.k_ar]) + + np.dot(maparams[:order.k_ma - i], resid[i:i + order.k_ma])) + forecast[i] = fcast + endog[i + order.k_ar] = fcast + + for i in range(i + 1, steps - 1): + fcast = mu_coeffs[i] + np.dot(arparams, endog[i:i + order.k_ar]) + forecast[i] = fcast + endog[i + order.k_ar] = fcast + + forecast[steps - 1] = mu_coeffs[steps - 1] + np.dot(arparams, endog[steps - 1:]) + return forecast + + +def unintegrate_levels(x_raw, k_diff): + """ + returns the successive differences needed to unintegrate the series. + :param x_raw: type->np.array + :param k_diff: type->int + :return: unintegrated series: type->np.array + """ + x_raw = x_raw[:k_diff] + return np.asarray([np.diff(x_raw, k_diff - i)[0] for i in range(k_diff, 0, -1)]) + + +def unintegrate(x_raw, levels): + """ + after taking n-differences of a series, return the original series + :param x_raw: type->np.array + :param levels: type->np.array + :return: original series: ype->np.array + """ + x0_raw = [] + levels = list(levels)[:] + if len(levels) > 1: + x0_raw = levels.pop(-1) + return unintegrate(np.cumsum(np.r_[x0_raw, x_raw]), levels) + if len(levels) != 0: + x0_raw = levels[0] + return np.cumsum(np.r_[x0_raw, x_raw]) + + +def _get_ar_order(y_raw, maxlag): + """ + the ar order computed by bic is used for OLS + :param y_raw: type->np.array + :param maxlag: type->int + :return ar_order: type->int + """ + nexog = 0 + x_mat, y_mat = lagmat(y_raw, maxlag, original="sep") + _y_mat = y_mat[maxlag:] + _x_mat = x_mat[maxlag:] + base_col = x_mat.shape[1] - nexog - maxlag + sel = np.ones(x_mat.shape[1], dtype=bool) + sel[base_col: base_col + maxlag] = False + + min_bic = np.inf + ar_order = 0 + for i in range(maxlag + 1): + sel[base_col: base_col + i] = True + if not np.any(sel): + continue + mod = OLS(_y_mat, _x_mat[:, sel]) + mod.fit() + sigma2 = 1.0 / mod.nobs * np.sum(mod.wresid ** 2, axis=0) + bic = np.log(sigma2) + (1 + mod.df_model) * np.log(mod.nobs) / mod.nobs + if bic < min_bic: + min_bic = bic + ar_order = mod.df_model + ar_order = int(ar_order) + return ar_order + + +def _get_lag_data_and_resid(y_raw, p_tmp, arcoefs_tmp, order): + """ + the lag_endog and lag_resid are used to fit coeffs. + :param y_raw: type->np.array + :param p_tmp: type->np.array + :param arcoefs_tmp: type->np.array + :param arcoefs_tmp: type->np.array + :param order: type->tuple + :return lag_endog: type->np.array, lag_resid: type->np.array + """ + k_ar, k_ma = order + resid = y_raw[p_tmp:] - np.dot(lagmat(y_raw, p_tmp, trim='both'), arcoefs_tmp) + if k_ar < p_tmp + k_ma: + endog_start = p_tmp + k_ma - k_ar + resid_start = 0 + else: + endog_start = 0 + resid_start = k_ar - p_tmp - k_ma + lag_endog = lagmat(y_raw, k_ar, 'both')[endog_start:] + lag_resid = lagmat(resid, k_ma, 'both')[resid_start:] + + return lag_endog, lag_resid + + +def _compute_start_ar_ma_coeffs(k_ar, k_ma, y_raw): + """ + the start ar/ma coeffs for lbfgs to give a optimal parameters. + :param k_ar: type->np.array + :param k_ma: type->np.array + :param y_raw: type->np.array + :return coeffs: type->np.array + """ + nobs = len(y_raw) + maxlag = int(round(12 * (nobs / 100.) ** (1 / 4.))) + if maxlag >= nobs: + maxlag = nobs - 1 + + ar_order = _get_ar_order(y_raw, maxlag) + _x_mat, _y_mat = lagmat(y_raw, ar_order, original="sep") + _y_mat = _y_mat[ar_order:] + _x_mat = _x_mat[ar_order:] + ols_mod = OLS(_y_mat, _x_mat) + ols_res = ols_mod.fit() + arcoefs_tmp = ols_res + + if ar_order + k_ma >= len(y_raw): + raise ValueError("start ar order is not valid") + + lag_endog, lag_resid = _get_lag_data_and_resid(y_raw, + ar_order, + arcoefs_tmp, + (k_ar, k_ma)) + x_stack = np.column_stack((lag_endog, lag_resid)) + coeffs = OLS(y_raw[max(ar_order + k_ma, k_ar):], x_stack).fit() + + return coeffs + + +def _get_errors(params, raw_data, order): + """ + return the errors model predict data and raw data for forecast. + :param params: type->np.array + :param raw_data: type->np.array + :param order: type->tuple + :return errors: type->np.array + """ + params = np.asarray(params) + y_raw = raw_data.y.copy() + k_ar = order.k_ar + k_ma = order.k_ma + + if raw_data.k_trend > 0: + y_raw -= dot(raw_data.x, params[:raw_data.k_trend]) + + (_, _, arparams, maparams) = _unpack_params(params, (k_ar, k_ma), + raw_data.k_trend, reverse=False) + ar_c = np.r_[1, -arparams] + ma_c = np.r_[1, maparams] + zi_raw = zeros((max(k_ar, k_ma))) + for i in range(k_ar): + zi_raw[i] = sum(-ar_c[:i + 1][::-1] * y_raw[:i + 1]) + err = lfilter(ar_c, ma_c, y_raw, zi=zi_raw) + errors = err[0][k_ar:] + return errors + + +class ARIMA: + """ARIMA model can forecast series according to history series""" + + def __init__(self, y_raw, order): + """ + :param y_raw: type->np.array + :param order: type->tuple + """ + k_ar, k_diff, k_ma = order + self.order = SimpleNamespace(k_ar=k_ar, k_diff=k_diff, k_ma=k_ma) + y_raw = np.asarray(y_raw) if isinstance(y_raw, (list, tuple)) else y_raw + y_fit = np.diff(y_raw, n=k_diff) + x_fit = np.ones((len(y_fit), 1)) + self.raw_data = SimpleNamespace(x=x_fit, y=y_fit, raw_y=y_raw, k_trend=1) + self.nobs = len(y_fit) - k_ar + self.is_transparams = True + self.resid = None + self.params = None + + def _fit_start_coeffs(self, order): + """ + compute start coeffs of ar and ma for optimize. + :param order: type->tuple + :return start_params: type->np.array + """ + k_ar, k_ma, k_trend = order + start_params = zeros((k_ar + k_ma + k_trend)) + + y_raw = np.array(self.raw_data.y, np.float64) + x_raw = self.raw_data.x + if k_trend != 0: + ols_params = OLS(y_raw, x_raw).fit() + start_params[:k_trend] = ols_params + y_raw -= np.dot(x_raw, ols_params).squeeze() + if k_ma != 0: + if k_ar != 0: + start_params[k_trend:k_trend + k_ar + k_ma] = \ + _compute_start_ar_ma_coeffs(k_ar, k_ma, y_raw) + else: + ar_coeffs = yule_walker(y_raw, order=k_ma) + new_ar_coeffs = np.r_[[1], -ar_coeffs.squeeze()] + start_params[k_trend + k_ar:k_trend + k_ar + k_ma] = \ + _arma_impulse_response(new_ar_coeffs, [1], leads=k_ma + 1)[1:] + if k_ma == 0 and k_ar != 0: + arcoefs = yule_walker(y_raw, order=k_ar) + start_params[k_trend:k_trend + k_ar] = arcoefs + + if k_ar and not np.all(np.abs(np.roots(np.r_[1, -start_params[k_trend:k_trend + k_ar]] + )) < 1): + raise ValueError("the ar start coeffs is invalid") + if k_ma and not np.all(np.abs(np.roots(np.r_[1, start_params[k_trend + k_ar:]] + )) < 1): + raise ValueError("the ma start coeffs is invalid") + + return self._invtransparams(start_params) + + def loglike_css(self, params): + """ + return the llf to compute BIC. + :param params: type->np.array + :return llf: type->float + """ + nobs = self.nobs + if self.is_transparams: + newparams = self._transparams(params) + else: + newparams = params + errors = _get_errors(newparams, self.raw_data, self.order) + + ssr = np.dot(errors, errors) + sigma2 = ssr / nobs + llf = -nobs / 2. * (log(2 * pi) + log(sigma2)) - ssr / (2 * sigma2) + return llf + + def _transparams(self, params): + """ + return the trans of coeffs. + :param params: type->np.array + :return newparams: type->np.array + """ + k_ar, k_ma = self.order.k_ar, self.order.k_ma + k = self.raw_data.k_trend + newparams = np.zeros_like(params) + + if k != 0: + newparams[:k] = params[:k] + + if k_ar != 0: + newparams[k:k + k_ar] = _ar_transparams(params[k:k + k_ar].copy()) + + if k_ma != 0: + newparams[k + k_ar:] = _ma_transparams(params[k + k_ar:].copy()) + return newparams + + def _invtransparams(self, start_params): + """ + return the inverse of the coeffs. + :param start_params: type->np.array + :return newparams: type->np.array + """ + k_ar, k_ma = self.order.k_ar, self.order.k_ma + k = self.raw_data.k_trend + newparams = start_params.copy() + arcoefs = newparams[k:k + k_ar] + macoefs = newparams[k + k_ar:] + + if k_ar != 0: + newparams[k:k + k_ar] = _ar_invtransparams(arcoefs) + + if k_ma != 0: + newparams[k + k_ar:k + k_ar + k_ma] = _ma_invtransparams(macoefs) + return newparams + + @property + def llf(self): + """the llf for errors estimated is used to compute BIC""" + return self.loglike_css(self.params) + + @property + def bic(self): + """the BIC is for optimal parameters:(p d q)""" + nobs = self.nobs + df_model = self.raw_data.k_trend + self.order.k_ar + self.order.k_ma + return -2 * self.llf + np.log(nobs) * (df_model + 1) + + def fit(self, sequence=None): + """ + fit trend_coeffs, ar_coeffs, ma_coeffs for ARIMA model. + :return None + """ + k = self.raw_data.k_trend + nobs = self.raw_data.y.shape[0] + start_params = self._fit_start_coeffs((self.order.k_ar, self.order.k_ma, k)) + + def loglike(params, *args): + return -self.loglike_css(params) / nobs + + kwargs = {'m': 12, 'pgtol': 1e-08, 'factr': 100.0, 'approx_grad': True, 'maxiter': 500} + retvals = optimize.fmin_l_bfgs_b(loglike, start_params, disp=-1, **kwargs) + xopt = retvals[0] + if self.is_transparams: + self.params = self._transparams(xopt) + else: + self.params = xopt + self.is_transparams = False + + def forecast(self, steps): + """ + return the forecast data form history data with ar coeffs, + ma coeffs and diff order. + :param steps: type->int + :return forecast: type->np.array + """ + ctime = int(time.time()) + logging.debug("[ARIMA:forecast:%s]: steps:%s, order:%s, coeffs:%s" % + (ctime, steps, self.order, self.params)) + logging.debug("[ARIMA:forecast:%s]: raw_data:%s" % (ctime, self.raw_data.y)) + self.resid = _get_errors(self.params, self.raw_data, self.order).squeeze() + forecast = _arma_predict_out_of_sample(self.params, steps, self.resid, + self.order, self.raw_data) + + forecast = unintegrate( + forecast, + unintegrate_levels( + self.raw_data.raw_y[-self.order.k_diff:], + self.order.k_diff + ) + )[self.order.k_diff:] + logging.debug("[ARIMA:forecast:%s]: forecast result: %s" % (ctime, forecast)) + return forecast diff --git a/src/gausskernel/dbmind/tools/common/algorithm/forecasting/arima_model/arima_common.py b/src/gausskernel/dbmind/tools/common/algorithm/forecasting/arima_model/arima_common.py new file mode 100644 index 000000000..4b9e97ce0 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/algorithm/forecasting/arima_model/arima_common.py @@ -0,0 +1,241 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +from types import SimpleNamespace +import numpy as np + + +def _right_squeeze(arr, stop_dim=0): + """ + remove trailing singleton dimensions + :param arr: type->np.array + :param stop_dim: type->int + :return: arr: type->np.array + """ + last = arr.ndim + for squeeze in reversed(arr.shape): + if squeeze > 1: + break + last -= 1 + last = max(last, stop_dim) + + return arr.reshape(arr.shape[:last]) + + +def array_like( + obj, + name, + ndim=1, + order=None, + optional=False, +): + """ + Convert array-like to a ndarray and check conditions + """ + if optional and obj is None: + return None + arr = np.asarray(obj, dtype=None, order=order) + if ndim is not None: + if arr.ndim > ndim: + arr = _right_squeeze(arr, stop_dim=ndim) + elif arr.ndim < ndim: + arr = np.reshape(arr, arr.shape + (1,) * (ndim - arr.ndim)) + if arr.ndim != ndim: + msg = "{0} is required to have ndim {1} but has ndim {2}" + raise ValueError(msg.format(name, ndim, arr.ndim)) + return arr + + +def lagmat(x_raw, maxlag, trim='forward', original='ex'): + """ + create 2d array of lags. + """ + + x_raw = array_like(x_raw, 'x', ndim=2) + trim = 'none' if trim is None else trim + trim = trim.lower() + + dropidx = 0 + nobs, nvar = x_raw.shape + if original in ['ex', 'sep']: + dropidx = nvar + if maxlag >= nobs: + raise ValueError("maxlag should be < nobs") + lmat = np.zeros((nobs + maxlag, nvar * (maxlag + 1))) + for k in range(0, int(maxlag + 1)): + lmat[maxlag - k:nobs + maxlag - k, + nvar * (maxlag - k):nvar * (maxlag - k + 1)] = x_raw + + if trim in ('none', 'forward'): + startobs = 0 + elif trim in ('backward', 'both'): + startobs = maxlag + else: + raise ValueError('trim option not valid') + + if trim in ('none', 'backward'): + stopobs = len(lmat) + else: + stopobs = nobs + + lags = lmat[startobs:stopobs, dropidx:] + if original == 'sep': + leads = lmat[startobs:stopobs, :dropidx] + + if original == 'sep': + return lags, leads + + return lags + + +def get_matrix_inverse_and_singular(x_raw, rcond=1e-15): + """ + (1)the linear equation: 'Y = AX', so 'A = np.dot(inv_X, Y)', + this function to return inv_X; + (2)use SVD decompose to compute X_inv, like: X = USV_t, then + X_inv = np.dot(V, np.dot(inv_S, U_t)) + (3)return singular values of X from SVD decompose + """ + x_raw = np.asarray(x_raw) + u_mat, s_mat, vt_mat = np.linalg.svd(x_raw, False) + raw_s = np.copy(s_mat) + rank = min(u_mat.shape[0], vt_mat.shape[1]) + cutoff_value = rcond * np.maximum.reduce(s_mat) + + inv_s = np.asarray([1./s_mat[i] if s_mat[i] > cutoff_value else 0 for i in range(rank)]) + + inv_x = np.dot(np.transpose(vt_mat), + np.multiply(inv_s[:, np.core.newaxis], np.transpose(u_mat))) + + return inv_x, raw_s + + +def get_k_constant(x_raw): + """return the k_constant of x matrix""" + augmented_x = np.column_stack((np.ones(x_raw.shape[0]), x_raw)) + augm_rank = np.linalg.matrix_rank(augmented_x) + orig_rank = np.linalg.matrix_rank(x_raw) + k_constant = int(orig_rank == augm_rank) + return k_constant + + +class OLS(): + """The OLS can compute linear correlation coefficient about x and y""" + def __init__(self, y_raw, x_raw): + self._x = np.asarray(x_raw) + self._y = np.asarray(y_raw.flatten()) + self.nobs = float(self._x.shape[0]) + self.matrix_param = SimpleNamespace(df_model=None, + df_resid=None, + rank=None, + k_constant=get_k_constant(self._x)) + self.coeffs = None + self.normalized_cov_params = None + + @property + def df_model(self): + """ + the model degree of freedom. + """ + return self.matrix_param.df_model + + + @property + def df_resid(self): + """ + the residual degree of freedom. + """ + return self.matrix_param.df_resid + + @df_resid.setter + def df_resid(self, value): + self.matrix_param.df_resid = value + + @property + def wresid(self): + """ + The residuals of the transformed/whitened regressand and regressor(s). + """ + return self._y - self.predict(self.coeffs, self._x) + + @property + def scale(self): + """ + a scale factor for the covariance matrix. + """ + wresid = self.wresid + return np.dot(wresid, wresid) / self.df_resid + + @property + def tvalues(self): + """ + return the t-statistic for a given parameter estimate. + """ + return self.coeffs / np.sqrt(np.diag(self.cov_params())) + + def cov_params(self): + """ + Compute the variance/covariance matrix. + """ + return self.normalized_cov_params * self.scale + + @property + def llf(self): + """the llf for errors estimated is used to compute BIC""" + return self.loglike(self.coeffs) + + def loglike(self, coeffs): + """ + The likelihood function for the OLS model. + """ + nobs2 = self.nobs / 2.0 + nobs = float(self.nobs) + resid = np.array(self._y) - np.dot(self._x, coeffs) + ssr = np.sum(resid**2) + llf = -nobs2*np.log(2*np.pi) - nobs2*np.log(ssr / nobs) - nobs2 + + return llf + + @property + def bic(self): + r""" + For a model with a constant :math:`-2llf + \log(n)(df\_model+1)`. + For a model without a constant :math:`-2llf + \log(n)(df\_model)`. + """ + return (-2 * self.llf + np.log(self.nobs) * (self.df_model + + self.matrix_param.k_constant)) + + def fit(self): + """ + full fit of the model. + """ + x_inverse, x_singular = get_matrix_inverse_and_singular(self._x) + self.normalized_cov_params = np.dot(x_inverse, np.transpose(x_inverse)) + + self.matrix_param.rank = np.linalg.matrix_rank(np.diag(x_singular)) + self.coeffs = np.dot(x_inverse, self._y) + + if self.matrix_param.df_model is None: + self.matrix_param.df_model = float(self.matrix_param.rank - + self.matrix_param.k_constant) + if self.matrix_param.df_resid is None: + self.matrix_param.df_resid = self.nobs - self.matrix_param.rank + + return self.coeffs + + @staticmethod + def predict(coeffs, x_raw): + """ + linear predict with coeffs matrix, and return predicted values. + """ + return np.dot(x_raw, coeffs) diff --git a/src/gausskernel/dbmind/tools/common/algorithm/forecasting/forcasting_algorithm.py b/src/gausskernel/dbmind/tools/common/algorithm/forecasting/forcasting_algorithm.py new file mode 100644 index 000000000..501122c3c --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/algorithm/forecasting/forcasting_algorithm.py @@ -0,0 +1,183 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import logging +import numpy as np +import itertools +from types import SimpleNamespace +from ...types import Sequence +from ..statistics import sequence_interpolate, trim_head_and_tail_nan +from .. import seasonal as seasonal_interface + + +MAX_AR_ORDER = 5 +MAX_MA_ORDER = 5 +MIN_DATA_LENGTH = max(MAX_AR_ORDER, MAX_MA_ORDER) + + +def estimate_order_of_model_parameters(raw_data, k_ar_min=0, k_diff_min=0, + k_ma_min=0, k_diff_max=0): + """return model type and model order""" + diff_data = np.diff(raw_data) + algorithm_name = "linear" + k_ar_valid, k_ma_valid = 0, 0 + min_bic = np.inf + bic_result_list = [] + for k_ar, k_diff, k_ma in \ + itertools.product(range(k_ar_min, MAX_AR_ORDER + 1), + range(k_diff_min, k_diff_max + 1), + range(k_ma_min, MAX_MA_ORDER + 1)): + if k_ar == 0 and k_diff == 0 and k_ma == 0: + continue + + try: + from .arima_model.arima_alg import ARIMA + + model = ARIMA(diff_data, order=(k_ar, k_diff, k_ma), ) + model.fit() + bic_result = model.bic + bic_result_list.append(bic_result) + if not np.isnan(bic_result) and bic_result < min_bic: + algorithm_name = "arima" + min_bic = bic_result + k_ar_valid = k_ar + k_ma_valid = k_ma + except ValueError: + """Ignore while ValueError occurred.""" + except Exception as e: + logging.warning("Warning occurred when estimate order of model parameters, " + "warning_msg is: %s", e) + order = (k_ar_valid, 1, k_ma_valid) + return algorithm_name, order + + +class ForecastingAlgorithm: + """abstract forecast alg class""" + + def fit(self, sequence: Sequence): + """the subclass should implement, tarin model param""" + pass + + def forecast(self, forecast_length: int) -> Sequence: + """the subclass should implement, forecast series according history series""" + pass + + +class ForecastingFactory: + """the ForecastingFactory can create forecast model""" + _CACHE = {} # Reuse an instantiated object. + + @staticmethod + def get_instance(raw_data) -> ForecastingAlgorithm: + """return forecast model according algorithm_name""" + algorithm_name, order = estimate_order_of_model_parameters(raw_data) + logging.debug('Choose %s algorithm to forecast.', algorithm_name) + if algorithm_name == "linear": + from .simple_forecasting import SimpleLinearFitting + ForecastingFactory._CACHE[algorithm_name] = SimpleLinearFitting() + elif algorithm_name == "arima" or algorithm_name is None: + from .arima_model.arima_alg import ARIMA + ForecastingFactory._CACHE[algorithm_name] = ARIMA(raw_data, order) + else: + raise NotImplementedError(f'Failed to load {algorithm_name} algorithm.') + + return ForecastingFactory._CACHE[algorithm_name] + + +def _check_forecasting_minutes(forecasting_minutes): + """ + check input params: forecasting_minutes whether is valid. + :param forecasting_minutes: type->int or float + :return: None + """ + check_result = True + message = "" + if not isinstance(forecasting_minutes, (int, float)): + check_result = False + message = "forecasting_minutes value type must be int or float" + elif forecasting_minutes < 0: + check_result = False + message = "forecasting_minutes value must >= 0" + elif forecasting_minutes in (np.inf, -np.inf, np.nan, None): + check_result = False + message = f"forecasting_minutes value must not be:{forecasting_minutes}" + + if not check_result: + raise ValueError(message) + + +def decompose_sequence(sequence): + seasonal_data = None + raw_data = np.array(list(sequence.values)) + is_seasonal, period = seasonal_interface.is_seasonal_series(raw_data) + if is_seasonal: + decompose_results = seasonal_interface.seasonal_decompose(raw_data, period=period) + seasonal = decompose_results[0] + trend = decompose_results[1] + resid = decompose_results[2] + train_sequence = Sequence(timestamps=sequence.timestamps, values=trend) + train_sequence = sequence_interpolate(train_sequence) + seasonal_data = SimpleNamespace(is_seasonal=is_seasonal, + seasonal=seasonal, + trend=trend, + resid=resid, + period=period) + else: + train_sequence = sequence + return seasonal_data, train_sequence + + +def compose_sequence(seasonal_data, train_sequence, forecast_length, forecast_data): + if seasonal_data and seasonal_data.is_seasonal: + start_index = len(train_sequence) % seasonal_data.period + forecast_data = seasonal_data.seasonal[start_index: start_index + forecast_length] + \ + forecast_data + \ + seasonal_data.resid[start_index: start_index + forecast_length] + forecast_timestamps = [train_sequence.timestamps[-1] + train_sequence.step * (i + 1) + for i in range(int(forecast_length))] + return Sequence(timestamps=forecast_timestamps, values=forecast_data) + + +def quickly_forecast(sequence, forecasting_minutes): + """ + return forecast sequence in forecasting_minutes from raw sequnece + :param sequence: type->Sequence + :param forecasting_minutes: type->int or float + :return: forecase sequence: type->Sequence + """ + # 1 check forecasting minutes + _check_forecasting_minutes(forecasting_minutes) + forecasting_length = int(forecasting_minutes * 60 * 1000 // sequence.step) + if forecasting_length == 0 or forecasting_minutes == 0: + return Sequence() + + # 2 interpolate + sequence = sequence_interpolate(sequence) + + # 3 decompose sequence + seasonal_data, train_sequence = decompose_sequence(sequence) + + # 4 get model from ForecastingFactory + model = ForecastingFactory.get_instance(list(train_sequence.values)) + + # 5 model fit and forecast + model.fit(train_sequence) + forecast_data = model.forecast(forecasting_length) + forecast_data = trim_head_and_tail_nan(forecast_data) + + # 6 compose sequence + forecast_sequence = compose_sequence(seasonal_data, + train_sequence, + forecasting_length, + forecast_data) + return forecast_sequence diff --git a/src/gausskernel/dbmind/tools/common/algorithm/forecasting/simple_forecasting.py b/src/gausskernel/dbmind/tools/common/algorithm/forecasting/simple_forecasting.py new file mode 100644 index 000000000..0629b545e --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/algorithm/forecasting/simple_forecasting.py @@ -0,0 +1,95 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import numpy as np +from sklearn.linear_model import LinearRegression +from sklearn.preprocessing import PolynomialFeatures + +from ...types import Sequence +from .forcasting_algorithm import ForecastingAlgorithm + + +def series_to_supervised(sequence: Sequence, test_split=.0, poly_degree=None): + x, y = sequence.to_2d_array() + length = sequence.length + test_length = int(length * test_split) + x_train, x_test = x[:length - test_length], x[length - test_length:] + y_train, y_test = y[:length - test_length], y[length - test_length:] + if poly_degree: + poly = PolynomialFeatures(degree=poly_degree).fit(x) + x_train = poly.transform(x_train) + x_test = poly.transform(x_test) + return x_train, x_test, y_train, y_test + + +class SimpleLinearFitting(ForecastingAlgorithm): + def __init__(self): + self.model = LinearRegression(copy_X=False) + self.interval = None + self.last_x = None + + def fit(self, sequence: Sequence): + if sequence.length < 2: + raise ValueError('Unable to fit the sequence due to short length.') + + x, y = sequence.to_2d_array() + self.interval = x[1] - x[0] + self.last_x = x[-1] + x = np.reshape(x, newshape=(-1, 1)) + + self.model.fit(x, y) + + def forecast(self, forecast_length): + future = np.arange(start=self.last_x + self.interval, + stop=self.last_x + self.interval * (forecast_length + 1), + step=self.interval).reshape(-1, 1) + result = self.model.predict(future) + return result.tolist() + + +class SupervisedModel(ForecastingAlgorithm): + def __init__(self, model=None, bias=False, poly_degree=None): + self.bias = bias + self.poly_degree = poly_degree + # Use the passed Model instance if exists. + if not model: + self.model = LinearRegression(normalize=True) + else: + self.model = model + self.predict_steps = None + self.sequence = None + + def fit(self, sequence: Sequence): + if sequence.length < 2: + raise ValueError('Unable to fit the sequence due to short length.') + + # dummy to fit + self.sequence = sequence + + def forecast(self, forecast_length): + if not isinstance(forecast_length, int): + raise ValueError('#2 forecasting_minutes must be an integer.') + + self.predict_steps = forecast_length if forecast_length > 1 else 1 + x_train, x_test, y_train, y_test = series_to_supervised(self.sequence) + x_pred = np.arange(start=self.sequence.length, + stop=self.sequence.length + self.predict_steps, + step=1).reshape(-1, 1) + self.model.fit(np.array(x_train).reshape(-1, 1), + np.array(y_train).reshape(-1, 1)) + y_pred = self.model.predict(X=x_pred) + if self.bias: + bias = y_pred.flatten()[0] - self.sequence.values[-1] + y_pred -= bias + return Sequence(timestamps=x_pred.flatten().tolist(), + values=y_pred.flatten().tolist()) diff --git a/src/gausskernel/dbmind/tools/ai_manager/module/__init__.py b/src/gausskernel/dbmind/tools/common/algorithm/ml/__init__.py similarity index 100% rename from src/gausskernel/dbmind/tools/ai_manager/module/__init__.py rename to src/gausskernel/dbmind/tools/common/algorithm/ml/__init__.py diff --git a/src/gausskernel/dbmind/tools/common/algorithm/ml/metric.py b/src/gausskernel/dbmind/tools/common/algorithm/ml/metric.py new file mode 100644 index 000000000..00c69a64d --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/algorithm/ml/metric.py @@ -0,0 +1,14 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +from sklearn.metrics import r2_score diff --git a/src/gausskernel/dbmind/tools/common/algorithm/ml/supervised.py b/src/gausskernel/dbmind/tools/common/algorithm/ml/supervised.py new file mode 100644 index 000000000..2159de5e7 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/algorithm/ml/supervised.py @@ -0,0 +1,14 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +from sklearn.linear_model import LinearRegression diff --git a/src/gausskernel/dbmind/tools/common/algorithm/seasonal.py b/src/gausskernel/dbmind/tools/common/algorithm/seasonal.py new file mode 100644 index 000000000..655c85c5a --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/algorithm/seasonal.py @@ -0,0 +1,120 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import numpy as np +from scipy import signal +from .statistics import trim_head_and_tail_nan +import warnings +warnings.filterwarnings("ignore") + +def acf(x_raw: np, nlags=None): + """the acf can compute correlation from x[t] and x[t -k]""" + x_raw = np.array(x_raw) + x_diff = x_raw - x_raw.mean() + n_x = len(x_raw) + d_param = n_x * np.ones(2 * n_x - 1) + acov = np.correlate(x_diff, x_diff, "full")[n_x - 1:] / d_param[n_x - 1:] + return acov[: nlags + 1] / acov[0] + + +def _padding_nans(x_raw, trim_head=None, trim_tail=None): + """padding nans in head or tail of x_raw""" + result = None + if trim_head is None and trim_tail is None: + result = x_raw + elif trim_tail is None: + result = np.r_[[np.nan] * trim_head, x_raw] + elif trim_head is None: + result = np.r_[x_raw, [np.nan] * trim_tail] + elif trim_head and trim_tail: + result = np.r_[[np.nan] * trim_head, x_raw, [np.nan] * trim_tail] + return result + + +def _get_trend(x_raw, filt): + """"use filt to extract trend component""" + trim_head = int(np.ceil(len(filt) / 2.) - 1) or None + trim_tail = int(np.ceil(len(filt) / 2.) - len(filt) % 2) or None + result = signal.convolve(x_raw, filt, mode='valid') + result = _padding_nans(result, trim_head, trim_tail) + + return result + + +def is_seasonal_series(s_values, high_ac_threshold: float = 0.7, min_seasonal_freq=3): + """judge series whether is seasonal with acf alg""" + result = False + period = None + s_ac = acf(s_values, nlags=len(s_values)) + diff_ac = np.diff(s_ac) + high_ac_peak_pos = (1 + np.argwhere((diff_ac[:-1] > 0) & (diff_ac[1:] < 0) + & (s_ac[1: -1] > high_ac_threshold)).flatten()) + + for i in high_ac_peak_pos: + if i > min_seasonal_freq: + period = high_ac_peak_pos[np.argmax(s_ac[high_ac_peak_pos])] + result = True + break + return result, period + + +def get_seasonal_period(s_values, high_ac_threshold: float = 0.5): + """"return seasonal period""" + result = is_seasonal_series(s_values, high_ac_threshold) + return result[1] + + +def _get_filt(period): + """the filter to extract trend component""" + if period % 2 == 0: + filt = np.array([.5] + [1] * (period - 1) + [.5]) / period + else: + filt = np.repeat(1. / period, period) + return filt + + +def _get_seasonal(x_raw, detrended, period): + """"return seasonal component from x_raw, detrended and period""" + nobs = len(x_raw) + period_averages = np.array([np.nanmean(detrended[i::period]) for i in range(period)]) + period_averages -= np.mean(period_averages, axis=0) + seasonal = np.tile(period_averages.T, nobs // period + 1).T[:nobs] + + return seasonal + + +def seasonal_decompose(x_raw, period=None): + """seasonal series can decompose three component: trend, seasonal, resid""" + pfreq = period + + if np.ndim(x_raw) > 1: + raise ValueError("x ndim > 1 not implemented") + + if period is None: + raise ValueError("preiod must not None") + + if not np.all(np.isfinite(x_raw)): + raise ValueError("the x has no valid values") + + if x_raw.shape[0] < 2 * pfreq: + raise ValueError(f"the x length:{x_raw.shape[0]} not meet 2 preiod:{2 * pfreq}") + x_raw = trim_head_and_tail_nan(x_raw) + filt = _get_filt(period) + trend = _get_trend(x_raw, filt) + trend = trim_head_and_tail_nan(trend) + detrended = x_raw - trend + + seasonal = _get_seasonal(x_raw, detrended, period) + seasonal = trim_head_and_tail_nan(seasonal) + resid = detrended - seasonal + return seasonal, trend, resid diff --git a/src/gausskernel/dbmind/tools/common/algorithm/statistics.py b/src/gausskernel/dbmind/tools/common/algorithm/statistics.py new file mode 100644 index 000000000..3eb18b684 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/algorithm/statistics.py @@ -0,0 +1,200 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +from types import SimpleNamespace +import numpy as np +from scipy.interpolate import interp1d +from dbmind.common.types import Sequence + + +def np_quantile(values, quantile): + """return the quantile of values""" + return np.nanpercentile(values, quantile) + + +def np_shift(values, shift_distance=1): + """shift values a shift_distance""" + shifted_values = np.roll(values, shift_distance) + for i in range(shift_distance): + shifted_values[i] = shifted_values[shift_distance] + return shifted_values + + +def np_moving_avg(values, window=5, mode="same"): + """Compute the moving average for sequence + and create a new sequence as the return value.""" + moving_avg_values = np.convolve(values, np.ones((window,)) / window, mode=mode) + start_idx = len(values) - window + moving_avg_values[start_idx:] = moving_avg_values[start_idx] # padding other remaining value + return moving_avg_values + + +def np_moving_std(values, window=10): + """Compute and return the standard deviation for sequence.""" + sequence_length = len(values) + calculation_length = sequence_length - window + moving_std_values = [np.std(values[i:i + window]) for i in range(calculation_length)] + # padding + for _ in range(window): + moving_std_values.append(moving_std_values[-1]) + + return np.array(moving_std_values) + + +def np_double_rolling(values, agg="mean", window1=5, window2=1, diff_mode="diff"): + """double rolling the values""" + if agg == "mean": + left_rolling = np_moving_avg(np_shift(values), window=window1) + right_rolling = np_moving_avg(values[::-1], window=window2)[::-1] + elif agg == "std": + left_rolling = np_moving_std(np_shift(values), window=window1) + right_rolling = np_moving_std(values[::-1], window=window2)[::-1] + else: + return values + diff_mode_map = { + "diff": (right_rolling - left_rolling), + "abs_diff": np.abs(right_rolling - left_rolling), + "rel_diff": (right_rolling - left_rolling) / left_rolling, + "abs_rel_diff": np.abs(right_rolling - left_rolling) / left_rolling + } + r_data = diff_mode_map.get(diff_mode) + values_length = len(values) + window = max(window1, window2) + tail_length = int(window / 2) + for i in range(tail_length): + r_data[values_length - i - 1] = r_data[values_length - tail_length - 1] + return r_data + + +def trim_head_and_tail_nan(data): + """ + when there are nan value at head or tail of forecast_data, + this function will fill value with near value + :param data: type->np.array + :return:data: type->np.array + """ + head_start_nona_value = 0 + head_na_index = [] + tail_start_nona_value = 0 + tail_na_index = [] + + if len(data) == 0: + return data + + for i in range(len(data)): + if not np.isnan(data[0]): + break + if not np.isnan(data[i]): + head_start_nona_value = data[i] + break + else: + head_na_index.append(i) + + for i in range(len(data) - 1, 1, -1): + if not np.isnan(data[-1]): + break + if not np.isnan(data[i]): + tail_start_nona_value = data[i] + break + else: + tail_na_index.append(i) + + for i in head_na_index: + data[i] = head_start_nona_value + + for i in tail_na_index: + data[i] = tail_start_nona_value + + return data + + +def _init_interpolate_param(sequence): + """"init interpolate param for sequence_interpolate function""" + x_raw = np.array(list(range(len(sequence.timestamps)))) + y_raw = np.array(sequence.values) + head_na_index = [] + head_start_nona_value = None + tail_na_index = [] + tail_start_nona_value = None + x_new = list(x_raw) + y_new = list(y_raw) + + #init head_start_nona_value, head_na_index + for i in range(len(y_raw)): + if not np.isnan(y_raw[0]): + break + if not np.isnan(y_raw[i]): + head_start_nona_value = y_raw[i] + break + else: + head_na_index.append(i) + + #init tail_start_nona_value, tail_na_index + for i in range(len(y_raw) - 1, 1, -1): + if not np.isnan(y_raw[-1]): + break + if not np.isnan(y_raw[i]): + tail_start_nona_value = y_raw[i] + break + else: + tail_na_index.append(i) + + #pop the nan from head and tail of data + for i in range(len(head_na_index)): + x_new.pop(0) + y_new.pop(0) + + for i in range(len(tail_na_index)): + x_new.pop(-1) + y_new.pop(-1) + + na_param = SimpleNamespace(head_na_index=head_na_index, tail_na_index=tail_na_index, + head_start_nona_value=head_start_nona_value, + tail_start_nona_value=tail_start_nona_value) + return x_new, y_new, na_param + + +def sequence_interpolate(sequence: Sequence, fit_method="cubic"): + """interpolate with scipy interp1d""" + nan_exist_result = [True if not np.isnan(i) else False for i in sequence.values] + if all(nan_exist_result): + return sequence + if True not in nan_exist_result: + raise ValueError("sequence values are all nan") + + y_raw = np.array(sequence.values) + y_nona = [] + x_nona = [] + na_index = [] + + x_new, y_new, na_param = _init_interpolate_param(sequence) + + #prepare x_nona and y_nona for interp1d + for i in range(len(y_new)): + if not np.isnan(y_new[i]): + y_nona.append(y_new[i]) + x_nona.append(x_new[i]) + else: + na_index.append(i) + + fit_func = interp1d(x_nona, y_nona, kind=fit_method) + y_new = fit_func(x_new) + + #replace the nan with interp1d value for raw y + for i in na_index: + raw_index = i + len(na_param.head_na_index) + y_raw[raw_index] = y_new[i] + + y_raw[na_param.head_na_index] = na_param.head_start_nona_value + y_raw[na_param.tail_na_index] = na_param.tail_start_nona_value + return Sequence(timestamps=sequence.timestamps, values=y_raw) diff --git a/src/gausskernel/dbmind/tools/common/daemon.py b/src/gausskernel/dbmind/tools/common/daemon.py new file mode 100644 index 000000000..eaf6cfd41 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/daemon.py @@ -0,0 +1,145 @@ +""" +Copyright (c) 2020 Huawei Technologies Co.,Ltd. + +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +""" + +import abc +import atexit +import os +import signal +import sys +import time + +import dbmind.common.process +from .platform import WIN32 + +write_info = sys.stdout.write +write_error = sys.stderr.write + + +def read_dbmind_pid_file(filepath): + """Return the running process's pid from file. + If the acquisition fails, return 0. + + Note + ~~~~~~~~ + + The func only can read the pid file for DBMind due to specific/fine-grained verification. + """ + try: + if not os.path.exists(filepath): + return 0 + with open(filepath, mode='r') as fp: + pid = int(fp.readline().strip()) + proc = dbmind.common.process.Process(pid) + + if proc.alive: + return pid + else: + return 0 + except PermissionError: + return 0 + except ValueError: + return 0 + except FileNotFoundError: + return 0 + + +class Daemon: + """A generic daemon class for DBMind.""" + + class STATUS: + PENDING = 0 + RUNNING = 1 + + def __init__(self, pid_file, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): + self.pid_file = os.path.abspath(pid_file) + self.stdin = stdin + self.stdout = stdout + self.stderr = stderr + self.status = Daemon.STATUS.PENDING + + def daemonize(self): + if not WIN32: + """UNIX-like OS has the double-fork magic.""" + try: + if os.fork() > 0: + sys.exit(0) # the first parent exits. + except OSError as e: + write_error('[Daemon Process]: cannot fork the first process: %s.\n' % e.strerror) + sys.exit(1) + # modify env + os.chdir('/') + os.setsid() + os.umask(0) + try: + if os.fork() > 0: + sys.exit(0) + except OSError as e: + write_error('[Daemon Process]: cannot fork the second process: %s.\n' % e.strerror) + sys.exit(1) + + # redirect standard fd + sys.stdout.flush() + sys.stderr.flush() + os.dup2(sys.stdin.fileno(), open(self.stdin, 'r').fileno()) + os.dup2(sys.stdout.fileno(), open(self.stdout, 'r').fileno()) + os.dup2(sys.stderr.fileno(), open(self.stderr, 'r').fileno()) + + atexit.register(self.clean) + + # Write daemon pid file. + with open(self.pid_file, 'w+') as fp: + fp.write('%d\n' % os.getpid()) + + def start(self): + """Start the daemon process""" + # Verify that the pidfile is valid and check if the daemon already runs. + pid = read_dbmind_pid_file(self.pid_file) + if pid > 0: + write_error('[Daemon Process]: process (%d) already exists.\n' % pid) + sys.exit(1) + + self.daemonize() + self.status = Daemon.STATUS.RUNNING + write_info('The process has been started.\n') + self.run() + + def stop(self, level='low'): + level_mapper = {'low': signal.SIGTERM, 'mid': signal.SIGQUIT, 'high': signal.SIGKILL} + + """Stop the daemon process""" + pid = read_dbmind_pid_file(self.pid_file) + if pid <= 0: + write_error('[Daemon Process]: process not running.\n') + return + + # If the pid is valid, try to kill the daemon process. + try: + while True: + # retry to kill + write_error('Closing the process...\n') + os.kill(pid, level_mapper[level]) + time.sleep(1) + except OSError as e: + if 'No such process' in e.strerror and os.path.exists(self.pid_file): + os.remove(self.pid_file) + + @abc.abstractmethod + def clean(self): + """Cleanup before exit""" + + @abc.abstractmethod + def run(self): + """Subclass should override the run() method.""" + diff --git a/src/gausskernel/dbmind/tools/common/dispatcher/__init__.py b/src/gausskernel/dbmind/tools/common/dispatcher/__init__.py new file mode 100644 index 000000000..4074f20fd --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/dispatcher/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +from .task_scheduler import TimedTaskManager +from .task_scheduler import timer +from .task_worker import ProcessWorker +from .task_worker import get_worker_instance diff --git a/src/gausskernel/dbmind/tools/common/dispatcher/task_scheduler.py b/src/gausskernel/dbmind/tools/common/dispatcher/task_scheduler.py new file mode 100644 index 000000000..9123e3100 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/dispatcher/task_scheduler.py @@ -0,0 +1,91 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import logging +from threading import Thread, Event + +ONE_DAY = 86400 # 24 * 60 * 60 seconds + + +class RepeatedTimer(Thread): + """RepeatedTimer class. + This class inherits from `threading.Thread`, + which triggers a periodic func at a fixed interval. + """ + + def __init__(self, interval, function, *args, **kwargs): + self._interval = interval + self._function = function + self._args = args + self._kwargs = kwargs + self._finished = Event() + Thread.__init__(self) + + def run(self): + while not self._finished.is_set(): + try: + self._function(*self._args, **self._kwargs) + except Exception as e: + logging.error('RepeatedTimer<%s(%s), %d> occurred an error because %s.' + % (self._function, self._args, self._interval, e)) + logging.exception(e) + self._finished.wait(self._interval) + self._finished.set() + + def cancel(self): + self._finished.set() + + def __hash__(self): + return hash((self._interval, self._function)) + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self._interval == other._interval and self._function == other._function + else: + return False + + def __str__(self): + return '%s(%s, %s)' % (self.__class__.__name__, self._function.__name__, self._interval) + + def __repr__(self): + return self.__str__() + + +class _TimedTaskManager: + def __init__(self): + self.task_table = dict() + self.timers = set() + + def apply(self, func, seconds): + self.task_table[func] = seconds + self.timers.add(RepeatedTimer(seconds, func)) + + def start(self): + for t in self.timers: + t.start() + + def stop(self): + for t in self.timers: + t.cancel() + + +TimedTaskManager = _TimedTaskManager() + + +# TODO: make it dummy and implement with reflection. +def timer(seconds): + def inner(func): + TimedTaskManager.apply(func, seconds) + return func + + return inner diff --git a/src/gausskernel/dbmind/tools/common/dispatcher/task_worker.py b/src/gausskernel/dbmind/tools/common/dispatcher/task_worker.py new file mode 100644 index 000000000..60797d97a --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/dispatcher/task_worker.py @@ -0,0 +1,177 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import logging +import os +import signal +from abc import ABC, abstractmethod +from concurrent.futures.process import ProcessPoolExecutor +from concurrent.futures import as_completed, wait +import concurrent +from multiprocessing import Event + +from dbmind.common import utils +from dbmind.common.platform import WIN32 + +IN_PROCESS = 'DBMind [Worker Process] [IN PROCESS]' +PENDING = 'DBMind [Worker Process] [IDLE]' + + +def _initializer(): + signal.signal(signal.SIGTERM, signal.SIG_IGN) + signal.signal(signal.SIGQUIT, signal.SIG_IGN) + utils.set_proc_title(PENDING) + + +def function_starter(func, *args, **kwargs): + utils.set_proc_title(IN_PROCESS) + try: + return func(*args, **kwargs) + finally: + utils.set_proc_title(PENDING) + + +class AbstractWorker(ABC): + CLOSED = 0 + RUNNING = 1 + + def __init__(self, worker_num): + self.worker_num = worker_num + self.status = self.RUNNING + + @abstractmethod + def _parallel_execute(self, func, iterable): + pass + + @abstractmethod + def _submit(self, func, synchronized, args): + pass + + @abstractmethod + def as_completed(self, funcs): + pass + + def apply(self, func, synchronized=True, args=()): + logging.info('Dispatch the task %s (%s) to workers.', func.__name__, args) + return self._submit(func, synchronized, args) + + def parallel_execute(self, func, iterable): + if self.status == self.CLOSED: + logging.warning('Worker already exited.') + return + logging.info('Dispatch the multiple tasks %s to workers.', func.__name__) + return self._parallel_execute(func, iterable) + + @abstractmethod + def terminate(self, cancel_futures): + self.status = self.CLOSED + + +class _ProcessPoolExecutor(ProcessPoolExecutor): + + @staticmethod + def _wait_for_notify(event): + event.wait() + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # Make the process pool is a fixed process pool, which creates many idle processes and waits for the + # scheduler's task. Why not use lazy-loading mode? Because the worker process forked from the master process, + # the master process maybe have some running backend threads while forking. This action will cause unexpected + # behaviors, such as timed backend threads also being forked and run in the child process. + event = Event() + for _ in range(self._max_workers): + self.submit(self._wait_for_notify, event) + event.set() + + def shutdown(self, wait=True, *, cancel_futures=False): + # Added cancel_futures into shutdown() method in version 3.9. + # Hence, we have to force to kill all sub-processes explicitly. + logging.debug('Terminate workerProcesses: cancel_futures: %s, backends: %s.', + cancel_futures, list(self._processes.keys())) + if cancel_futures and len(self._processes) > 0: + for pid in self._processes.keys(): + try: + os.kill(pid, signal.SIGKILL) + except ProcessLookupError: + logging.warning('Not found the process ID %d to kill.', pid) + os.wait() + else: + super().shutdown() + + +class ProcessWorker(AbstractWorker): + def __init__(self, worker_num): + if worker_num <= 0: + worker_num = max(os.cpu_count() // 2, 3) + logging.warning( + '[ProcessWorker] automatically set worker_num = %d due to target error.', worker_num + ) + if WIN32: + from concurrent.futures.thread import ThreadPoolExecutor + self.pool = ThreadPoolExecutor(worker_num) + else: + self.pool = _ProcessPoolExecutor(worker_num, initializer=_initializer) + + super().__init__(worker_num) + + def _parallel_execute(self, func, iterable): + futures = [] + + for params in iterable: + if isinstance(params, dict): + args = list() + kwargs = params + else: + args = list(params) + kwargs = dict() + args.insert(0, func) + futures.append(self.pool.submit(function_starter, *args, **kwargs)) + + wait(futures) + results = [] + for future in futures: + try: + results.append(future.result()) + except concurrent.futures.process.BrokenProcessPool: + # killed by parent process + results.append(None) + except Exception as e: + results.append(None) + logging.exception(e) + return results + + def _submit(self, func, synchronized, args): + args = list(args) + args.insert(0, func) + if synchronized: + return self.pool.submit(function_starter, *args).result() + else: + return self.pool.submit(function_starter, *args) + + def as_completed(self, funcs): + return as_completed(funcs) + + def terminate(self, cancel_futures): + super().terminate(cancel_futures) + self.pool.shutdown(True, cancel_futures=cancel_futures) + + +def get_worker_instance(_type, process_num, hosts=None) -> AbstractWorker: + if _type == 'local': + return ProcessWorker(process_num) + elif _type == 'dist': + raise NotImplementedError + else: + raise ValueError('Invalid configuration: [WORKER] type: %s.' % _type) + diff --git a/src/gausskernel/dbmind/tools/common/either.py b/src/gausskernel/dbmind/tools/common/either.py new file mode 100644 index 000000000..b8d16a166 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/either.py @@ -0,0 +1,45 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""This implementation is similar to Java-like Optional.""" +from abc import abstractmethod + + +class Maybe: + @abstractmethod + def get(self, *args, **kwargs): + pass + + +class OptionalValue(Maybe): + def __init__(self, value): + self.value = value + + def get(self, default=None): + return default if self.value is None else self.value + + +class OptionalContainer(Maybe): + def __init__(self, container): + self.container = container + + def __bool__(self): + return bool(self.container) + + def get(self, item, default=None): + try: + return self.container.__getitem__(item) + except (IndexError, KeyError): + return default + + def __getitem__(self, item): + return self.get(item) diff --git a/src/gausskernel/dbmind/tools/common/exceptions.py b/src/gausskernel/dbmind/tools/common/exceptions.py new file mode 100644 index 000000000..6c9843143 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/exceptions.py @@ -0,0 +1,35 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + + +class ApiClientException(Exception): + """API client exception, raises when response status code != 200.""" + pass + + +class SetupError(Exception): + def __init__(self, msg, *args, **kwargs): + self.msg = msg + + +class InvalidPasswordException(Exception): + pass + + +class SQLExecutionError(Exception): + pass + + +class ConfigSettingError(Exception): + pass + diff --git a/src/gausskernel/dbmind/tools/common/http/__init__.py b/src/gausskernel/dbmind/tools/common/http/__init__.py new file mode 100644 index 000000000..6d2de5c65 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/http/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +from .http_service import HttpService +from .http_service import Request, Response +from .http_service import request_mapping + diff --git a/src/gausskernel/dbmind/tools/common/http/controller_aop.py b/src/gausskernel/dbmind/tools/common/http/controller_aop.py new file mode 100644 index 000000000..95be44f5e --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/http/controller_aop.py @@ -0,0 +1,41 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + + +import json +from functools import wraps + +skip_filter_paths = ["/metrics", "/favicon.ico"] + + +def do_before(): + """Nothing""" + +def do_after(rt_result): + """Nothing""" + +def do_exception(exception): + """Nothing""" + +def around(func, *args, **kw): + @wraps(func) + def wrapper(): + do_before() + try: + rt_result = func(*args, **kw) + final_rt = do_after(rt_result) + except BaseException as exception: + final_rt = do_exception(exception) + return final_rt + + return wrapper diff --git a/src/gausskernel/dbmind/tools/common/http/http_service.py b/src/gausskernel/dbmind/tools/common/http/http_service.py new file mode 100644 index 000000000..448f8e659 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/http/http_service.py @@ -0,0 +1,226 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""This HttpService supports multiple backends, including: + fastAPI, flask, and waitress.""" +import logging +import ssl +from urllib.parse import quote + + +class BACKEND_TYPES: + FAST_API = 0 + PURE_FLASK = 1 + FLASK_WITH_WAITRESS = 2 + + +try: + """FastAPI is more modern, so we regard it as the first choice.""" + from fastapi import FastAPI + from fastapi import Response as _Response + + import uvicorn + from uvicorn.config import LOGGING_CONFIG + + _BACKEND = BACKEND_TYPES.FAST_API +except ImportError: + from flask import Flask + from flask import Response as _Response + from flask import request as _request + + _BACKEND = BACKEND_TYPES.PURE_FLASK + try: + from waitress import serve + + _BACKEND = BACKEND_TYPES.FLASK_WITH_WAITRESS + except ImportError: + pass + +_RequestMappingTable = dict() + + +class Request: + """A wrapper for Request.""" + + def __init__(self, req): + self.req = req + + @property + def json(self): + if _BACKEND >= BACKEND_TYPES.PURE_FLASK: + return self.req.json + elif _BACKEND == BACKEND_TYPES.FAST_API: + return self.req.json() + + @property + def body(self): + if _BACKEND >= BACKEND_TYPES.PURE_FLASK: + return self.req.body + elif _BACKEND == BACKEND_TYPES.FAST_API: + return self.req.body() + + @property + def url(self): + return str(self.req.url) + + @property + def cookies(self): + return self.req.cookies + + @staticmethod + def parse(args): + if _BACKEND >= BACKEND_TYPES.PURE_FLASK: + return _request + elif _BACKEND == BACKEND_TYPES.FAST_API: + if len(args) == 0 or len(args[0]) == 0: + return + request = args.pop(0)[0] + return Request(request) + + +class Response(_Response): + def __init__( + self, content=None, status_code=200, headers=None, mimetype=None, **kwargs + ): + super().__init__(content, status_code, headers, mimetype, **kwargs) + + +class RequestLogger: + """Wrap a WSGI application to log requests. + The format of logger refers to ``paste.TransLogger``.""" + format = ('%(REMOTE_ADDR)s - %(REMOTE_USER)s ' + '"%(REQUEST_METHOD)s %(REQUEST_URI)s %(HTTP_VERSION)s" ' + '%(status)s %(bytes)s "%(HTTP_REFERER)s" "%(HTTP_USER_AGENT)s"') + + def __init__(self, application): + self.application = application + + def __call__(self, environ, start_response): + req_uri = quote(environ.get('SCRIPT_NAME', '') + + environ.get('PATH_INFO', '')) + if environ.get('QUERY_STRING'): + req_uri += '?' + environ['QUERY_STRING'] + method = environ['REQUEST_METHOD'] + + def start_response_wrapper(status, headers, exc_info=None): + # headers is a list of two-tuple, we should traverse it here. + content_length = '?' + for name, value in headers: + if name.lower() == 'content-length': + content_length = value + RequestLogger.log(environ, method, req_uri, status, content_length) + return start_response(status, headers) + + return self.application(environ, start_response_wrapper) + + @staticmethod + def log(environ, method, req_uri, status, content_length): + remote_addr = '-' + if environ.get('HTTP_X_FORWARDED_FOR'): + remote_addr = environ['HTTP_X_FORWARDED_FOR'] + elif environ.get('REMOTE_ADDR'): + remote_addr = environ['REMOTE_ADDR'] + message = RequestLogger.format % { + 'REMOTE_ADDR': remote_addr, + 'REMOTE_USER': environ.get('REMOTE_USER') or '-', + 'REQUEST_METHOD': method, + 'REQUEST_URI': req_uri, + 'HTTP_VERSION': environ.get('SERVER_PROTOCOL'), + 'status': status.split(None, 1)[0], + 'bytes': content_length, + 'HTTP_REFERER': environ.get('HTTP_REFERER', '-'), + 'HTTP_USER_AGENT': environ.get('HTTP_USER_AGENT', '-'), + } + logging.log(logging.INFO, message) + + +class HttpService: + """A Http service implementation. + ~~~~~~~~~~~~~~~~~~ + + To decouple web service framework and web service interface, DBMind implements the class. + In this way, DBMind can change to another web framework (e.g., web.py, ASGI) easily.""" + + def __init__(self, name=__name__): + if _BACKEND >= BACKEND_TYPES.PURE_FLASK: + self.app = Flask(name) + elif _BACKEND == BACKEND_TYPES.FAST_API: + self.app = FastAPI(title=name) + else: + raise AssertionError('Should not run to here.') + + self.rule_num = 0 + + def attach(self, func, rule, **options): + """Attach a rule to the backend app.""" + is_api = options.pop('api', False) + if _BACKEND >= BACKEND_TYPES.PURE_FLASK: + endpoint = options.pop("endpoint", None) + rule = rule.replace('{', '<').replace('}', '>') + self.app.add_url_rule(rule, endpoint, func, **options) + elif _BACKEND == BACKEND_TYPES.FAST_API: + rule = rule.replace('<', '{').replace('>', '}') + if is_api: + self.app.add_api_route(rule, func, **options) + else: + self.app.add_route(rule, func, **options) + self.rule_num += 1 + + def route(self, rule, **options): + def decorator(f): + self.attach(f, rule, **options) + return f + + return decorator + + def register_controller_module(self, module_name): + __import__(module_name) + for rule, items in _RequestMappingTable.items(): + f, options = items + self.attach(f, rule, **options) + + def start_listen(self, host, port, + ssl_keyfile=None, ssl_certfile=None, ssl_keyfile_password=None): + if _BACKEND != BACKEND_TYPES.FAST_API and (ssl_keyfile or ssl_certfile): + raise NotImplementedError( + 'Not supported Https for flask. You should install fastapi and uvicorn.' + ) + + if _BACKEND == BACKEND_TYPES.FLASK_WITH_WAITRESS: + serve(RequestLogger(self.app), _quiet=True, + listen="{host}:{port}".format(host=host, port=port)) + elif _BACKEND == BACKEND_TYPES.PURE_FLASK: + self.app.run(host=host, port=port) + elif _BACKEND == BACKEND_TYPES.FAST_API: + config = uvicorn.Config(self.app, host=host, port=port, + ssl_keyfile=ssl_keyfile, ssl_certfile=ssl_certfile, + ssl_keyfile_password=ssl_keyfile_password, + log_config=None) + config.load() + if config.is_ssl: + config.ssl.options |= ( + ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 + ) # RFC 7540 Section 9.2: MUST be TLS >=1.2 + config.ssl.set_ciphers('DHE+AESGCM:ECDHE+AESGCM') + server = uvicorn.Server(config) + server.run() + + +def request_mapping(rule, **kwargs): + """To record to a static mapping dict.""" + + def decorator(f): + _RequestMappingTable[rule] = (f, kwargs) + return f + + return decorator + diff --git a/src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/__init__.py b/src/gausskernel/dbmind/tools/common/parser/__init__.py similarity index 100% rename from src/gausskernel/dbmind/tools/ai_manager/module/anomaly_detection/__init__.py rename to src/gausskernel/dbmind/tools/common/parser/__init__.py diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/utils/plan_parsing.py b/src/gausskernel/dbmind/tools/common/parser/plan_parsing.py similarity index 92% rename from src/gausskernel/dbmind/tools/anomaly_detection/utils/plan_parsing.py rename to src/gausskernel/dbmind/tools/common/parser/plan_parsing.py index 32f5e5ad7..4888a11f4 100644 --- a/src/gausskernel/dbmind/tools/anomaly_detection/utils/plan_parsing.py +++ b/src/gausskernel/dbmind/tools/common/parser/plan_parsing.py @@ -1,17 +1,16 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" import re EMPTY = '' @@ -160,7 +159,6 @@ class Plan: indent_len = count_indent(line_with_indent) if line_with_indent.strip().startswith(CHILD_NODE_FLAG): level = (indent_len - self.primal_indent_len - DEFAULT_INDENT_LEN) // HIERARCHY_IDENT_LEN + 1 - assert level >= 1 return level else: return 0 if self.primal_indent_len == indent_len else \ @@ -184,6 +182,7 @@ class Plan: def parse(self, text: str): # Remove redundant text to interference with the parsing process. tidy_text = text.strip('\n') + # lines = tidy_text.split(r'\n') lines = tidy_text.splitlines() if len(lines) == 0: return @@ -242,7 +241,7 @@ class Plan: self.traverse(append) opts.sort(key=lambda n: n.exec_cost, reverse=True) for idx, item in enumerate(opts): - if str.startswith(item.name, 'Sort') or str.startswith(item.name, 'SortAggregate'): + if str.startswith(item.cmdline, 'Sort') or str.startswith(item.cmdline, 'SortAggregate'): opts[0], opts[idx] = opts[idx], opts[0] return opts diff --git a/src/gausskernel/dbmind/tools/common/parser/sql_parsing.py b/src/gausskernel/dbmind/tools/common/parser/sql_parsing.py new file mode 100644 index 000000000..f41f62323 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/parser/sql_parsing.py @@ -0,0 +1,270 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import ast +import re + +import sqlparse +from sqlparse.sql import Identifier, IdentifierList +from sqlparse.sql import Where, Comparison, Operation, Function +from sqlparse.tokens import Keyword, DML +from sqlparse.tokens import Token + +SQL_SYMBOLS = ( + '!=', '<=', '>=', '==', '<', '>', '=', ',', '*', ';', '%', '+', ',', ';', '/' +) + + +def is_subquery(parse_tree): + if not parse_tree.is_group: + return False + for item in parse_tree.tokens: + if item.ttype is DML and item.value.upper() == 'SELECT': + return True + return False + + +def analyze_column(column, where_clause): + for tokens in where_clause.tokens: + if isinstance(tokens, Comparison) and isinstance(tokens.left, Identifier): + column.add(tokens.left.value) + + +def get_columns(sql): + column = set() + parsed_tree = sqlparse.parse(sql)[0] + for item in parsed_tree: + if isinstance(item, Where): + analyze_column(column, item) + return list(column) + + +def get_indexes(dbagent, sql, timestamp): + """ + Get indexes of SQL from dataset. + :param dbagent: obj, interface for sqlite3. + :param sql: str, query. + :return: list, the set of indexes. + """ + indexes = [] + indexes_dict = dbagent.fetch_all_result("SELECT indexes from wdr where timestamp ==\"{timestamp}\"" + " and query == \"{query}\"".format(timestamp=timestamp, + query=sql)) + if len(indexes_dict): + try: + indexes_dict = ast.literal_eval(indexes_dict[0][0]) + indexes_def_list = list(list(indexes_dict.values())[0].values()) + for sql_index in indexes_def_list: + value_in_bracket = re.compile(r'[(](.*?)[)]', re.S) + indexes.append(re.findall(value_in_bracket, sql_index)[0].split(',')[0]) + except Exception: + return indexes + return indexes + + +def analyze_unequal_clause(tokens): + for token in tokens: + if token.ttype is Token.Operator.Comparison and token.value.upper() == 'LIKE': + return 'FuzzyQuery' + elif token.ttype is Token.Operator.Comparison and token.value.upper() == '!=': + return 'UnEqual' + elif token.ttype is Token.Operator.Comparison and token.value.upper() == 'NOT IN': + return 'NotIn' + + +def analyze_where_clause(dbagent, where, timestamp): + """ + Analyze RCA of SQL from the where clause. + :param dbagent: obj, interface for sqlite3. + :param where: tokens, where clause of sqlparse. + :return: str, key target of RCA. + """ + if "OR" in where.value.upper(): + columns = get_columns(where.parent.value) + indexes = get_indexes(dbagent, where.parent.value, timestamp) + for column in columns: + if column not in indexes: + return 'OR' + + for tokens in where.tokens: + if isinstance(tokens, Comparison): + if isinstance(tokens.left, Operation): + return 'ExprInWhere' + elif isinstance(tokens.left, Function): + return 'Function' + elif isinstance(tokens, Comparison) and "<" in tokens.parent.value or ">" in tokens.parent.value: + return 'RangeTooLarge' + else: + return analyze_unequal_clause(tokens) + + if "is not null".upper() in where.value.upper(): + return 'IsNotNULL' + + +def sql_parse(dbagent, sql, timestamp): + sql = re.sub(r'\n|\t', r' ', sql) + sql = re.sub(r'[ ]{2,}', r' ', sql) + parse_tree = sqlparse.parse(sql)[0] + + if "select count( * ) from".upper() in parse_tree.value.upper() or \ + "select * from".upper() in parse_tree.value.upper() or \ + "select count(*) from".upper() in parse_tree.value.upper() or \ + "select count( *) from".upper() in parse_tree.value.upper() or \ + "select count(* ) from".upper() in parse_tree.value.upper(): + return "FullScan" + + if "update".upper() in parse_tree.value.upper() and "set".upper() in parse_tree.value.upper(): + return 'Update' + + for item in parse_tree: + if isinstance(item, Where): + return analyze_where_clause(dbagent, item, timestamp) + + +def wdr_sql_processing(sql): + standard_sql = unify_sql(sql) + standard_sql = re.sub(r';', r'', standard_sql) + standard_sql = re.sub(r'VALUES (\(.*\))', r'VALUES', standard_sql) + standard_sql = re.sub(r'\$\d+?', r'?', standard_sql) + return standard_sql + + +def check_select(parsed_sql): + if not parsed_sql.is_group: + return False + for token in parsed_sql.tokens: + if token.ttype is DML and token.value.upper() == 'SELECT': + return True + return False + + +def get_table_token_list(parsed_sql, token_list): + flag = False + for token in parsed_sql.tokens: + if not flag: + if token.ttype is Keyword and token.value.upper() == 'FROM': + flag = True + else: + if check_select(token): + get_table_token_list(token, token_list) + elif token.ttype is Keyword: + return + else: + token_list.append(token) + + +def extract_table_from_select(sql): + tables = [] + table_token_list = [] + sql_parsed = sqlparse.parse(sql)[0] + get_table_token_list(sql_parsed, table_token_list) + for table_token in table_token_list: + if isinstance(table_token, Identifier): + tables.append(table_token.get_name()) + elif isinstance(table_token, IdentifierList): + for identifier in table_token.get_identifiers(): + tables.append(identifier.get_name()) + else: + if table_token.ttype is Keyword: + tables.append(table_token.value) + return tables + + +def extract_table_from_sql(sql): + """ + Function: get table name in sql + has many problems in code, especially in 'delete', 'update', 'insert into' sql + """ + if not sql.strip(): + return [] + delete_pattern_1 = re.compile(r'FROM\s+([^\s]*)[;\s ]?', re.IGNORECASE) + delete_pattern_2 = re.compile(r'FROM\s+([^\s]*)\s+WHERE', re.IGNORECASE) + update_pattern = re.compile(r'UPDATE\s+([^\s]*)\s+SET', re.IGNORECASE) + insert_pattern = re.compile(r'INSERT\s+INTO\s+([^\s]*)\s+VALUES', re.IGNORECASE) + if sql.upper().strip().startswith('SELECT'): + tables = extract_table_from_select(sql) + elif sql.upper().strip().startswith('DELETE'): + if 'WHERE' not in sql: + tables = delete_pattern_1.findall(sql) + else: + tables = delete_pattern_2.findall(sql) + elif sql.upper().strip().startswith('UPDATE'): + tables = update_pattern.findall(sql) + elif sql.upper().strip().startswith('INSERT INTO'): + sql = re.sub(r'\(.*?\)', r' ', sql) + tables = insert_pattern.findall(sql) + else: + tables = [] + return tables + + +def remove_comment(sql): + sql = re.sub(r'\n', r' ', sql) + sql = re.sub(r'/\s*\*[\w\W]*?\*\s*/\s*', r'', sql) + sql = re.sub(r'^--.*\s?', r'', sql) + return sql + + +def unify_sql(sql): + index = 0 + sql = remove_comment(sql) + while index < len(sql): + if sql[index] in SQL_SYMBOLS: + if sql[index:index + 2] in SQL_SYMBOLS: + sql = sql[:index].strip() + ' ' + sql[index:index + 2] + ' ' + sql[index + 2:].strip() + index = index + 3 + else: + sql = sql[:index].strip() + ' ' + sql[index] + ' ' + sql[index + 1:].strip() + index = index + 2 + else: + index = index + 1 + new_sql = list() + for word in sql.split(): + new_sql.append(word.upper()) + sql = ' '.join(new_sql) + return sql.strip() + + +def sql_processing(sql): + if not sql: + return '' + standard_sql = unify_sql(sql) + + if standard_sql.startswith('INSERT'): + standard_sql = re.sub(r'VALUES (\(.*\))', r'VALUES', standard_sql) + # remove digital like 12, 12.565 + standard_sql = re.sub(r'[\s]+\d+(\.\d+)?', r' ?', standard_sql) + # remove '$n' in sql + standard_sql = re.sub(r'\$\d+', r'?', standard_sql) + # remove single quotes content + standard_sql = re.sub(r'\'.*?\'', r'?', standard_sql) + # remove double quotes content + standard_sql = re.sub(r'".*?"', r'?', standard_sql) + # remove '(1' format + standard_sql = re.sub(r'\(\d+(\.\d+)?', r'(?', standard_sql) + # remove '`' in sql + standard_sql = re.sub(r'`', r'', standard_sql) + # remove ; in sql + standard_sql = re.sub(r';', r'', standard_sql) + + return standard_sql.strip() + + +def is_num(input_str): + if isinstance(input_str, str) and re.match(r'^\d+\.?\d+$', input_str): + return True + return False + + +def str2int(input_str): + return int(re.match(r'^(\d+)\.?\d+$', input_str).groups()[0]) diff --git a/src/gausskernel/dbmind/tools/common/platform/__init__.py b/src/gausskernel/dbmind/tools/common/platform/__init__.py new file mode 100644 index 000000000..abbdddd62 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/platform/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + + +import os + +_PLATFORM = os.sys.platform +LINUX = _PLATFORM == 'linux' +WIN32 = _PLATFORM == 'win32' + +# declaration +if WIN32: + from ._win32 import win32_get_process_cwd + from ._win32 import win32_get_process_cmdline + from ._win32 import win32_get_process_path + from ._win32 import win32_is_process_running + diff --git a/src/gausskernel/dbmind/tools/common/platform/_win32.py b/src/gausskernel/dbmind/tools/common/platform/_win32.py new file mode 100644 index 000000000..8e6ffc642 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/platform/_win32.py @@ -0,0 +1,283 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import ctypes +import os + +try: + from ctypes import wintypes +except ImportError: + raise AssertionError('BUG: Should not call here.') + +ntdll = ctypes.WinDLL('ntdll') +kernel32 = ctypes.WinDLL('kernel32', use_last_error=True) + +# WIN32API Definitions +PROCESS_VM_READ = 0x0010 +PROCESS_QUERY_INFORMATION = 0x0400 +SYNCHRONIZE = 0x100000 + +ERROR_INVALID_HANDLE = 0x0006 +ERROR_PARTIAL_COPY = 0x012B + + +to_pointer = ctypes.POINTER + +PULONG = to_pointer(wintypes.ULONG) +ULONG_PTR = wintypes.LPVOID +SIZE_T = ctypes.c_size_t + +kernel32.ReadProcessMemory.argtypes = ( + wintypes.HANDLE, + wintypes.LPCVOID, + wintypes.LPVOID, + SIZE_T, + to_pointer(SIZE_T)) + +kernel32.CloseHandle.argtypes = (wintypes.HANDLE,) + +kernel32.GetCurrentProcess.restype = wintypes.HANDLE +kernel32.GetCurrentProcess.argtypes = () + +kernel32.OpenProcess.restype = wintypes.HANDLE +kernel32.OpenProcess.argtypes = ( + wintypes.DWORD, + wintypes.BOOL, + wintypes.DWORD) + +kernel32.WaitForSingleObject.restype = wintypes.DWORD +kernel32.WaitForSingleObject.argtypes = (wintypes.HANDLE, wintypes.DWORD) + +kernel32.GetExitCodeProcess.restype = wintypes.BOOL +kernel32.GetExitCodeProcess.argtypes = (wintypes.HANDLE, wintypes.LPDWORD) + +# NTAPI Definitions +NTSTATUS = wintypes.LONG +PVOID = wintypes.LPVOID +ULONG = wintypes.ULONG +PROCESSINFOCLASS = wintypes.ULONG + +ProcessBasicInformation = 0 +ProcessDebugPort = 7 +ProcessWow64Information = 26 +ProcessImageFileName = 27 +ProcessBreakOnTermination = 29 + +STATUS_UNSUCCESSFUL = NTSTATUS(0xC0000001) +STATUS_INFO_LENGTH_MISMATCH = NTSTATUS(0xC0000004).value +STATUS_INVALID_HANDLE = NTSTATUS(0xC0000008).value +STATUS_OBJECT_TYPE_MISMATCH = NTSTATUS(0xC0000024).value + +WAIT_TIMEOUT = 0x00000102 +STILL_ACTIVE = 259 + + +class UNICODE_STRING(ctypes.Structure): + _fields_ = (('Length', wintypes.USHORT), + ('MaximumLength', wintypes.USHORT), + ('Buffer', wintypes.PWCHAR)) # to_pointer(wintypes.WCHAR) + + +class LIST_ENTRY(ctypes.Structure): + pass + + +RPLIST_ENTRY = to_pointer(LIST_ENTRY) + +LIST_ENTRY._fields_ = (('Flink', RPLIST_ENTRY), + ('Blink', RPLIST_ENTRY)) + + +class LDR_DATA_TABLE_ENTRY(ctypes.Structure): + _fields_ = (('Reserved1', PVOID * 2), + ('InMemoryOrderLinks', LIST_ENTRY), + ('Reserved2', PVOID * 2), + ('DllBase', PVOID), + ('EntryPoint', PVOID), + ('Reserved3', PVOID), + ('FullDllName', UNICODE_STRING), + ('Reserved4', wintypes.BYTE * 8), + ('Reserved5', PVOID * 3), + ('CheckSum', PVOID), + ('TimeDateStamp', wintypes.ULONG)) + + +RPLDR_DATA_TABLE_ENTRY = to_pointer(LDR_DATA_TABLE_ENTRY) + + +class PEB_LDR_DATA(ctypes.Structure): + _fields_ = (('Reserved1', wintypes.BYTE * 8), + ('Reserved2', PVOID * 3), + ('InMemoryOrderModuleList', LIST_ENTRY)) + + +RPPEB_LDR_DATA = to_pointer(PEB_LDR_DATA) + + +class RTL_USER_PROCESS_PARAMETERS(ctypes.Structure): + _fields_ = (('MaximumLength', ULONG), + ('Length', ULONG), + ('Flags', ULONG), + ('DebugFlags', ULONG), + ('ConsoleHandle', PVOID), + ('ConsoleFlags', ULONG), + ('StdInputHandle', PVOID), + ('StdOutputHandle', PVOID), + ('StdErrorHandle', PVOID), + ('CurrentDirectoryPath', UNICODE_STRING), + ('CurrentDirectoryHandle', PVOID), + ('DllPath', UNICODE_STRING), + ('ImagePathName', UNICODE_STRING), + ('CommandLine', UNICODE_STRING) + # ... + ) + + +RPRTL_USER_PROCESS_PARAMETERS = to_pointer(RTL_USER_PROCESS_PARAMETERS) +PPS_POST_PROCESS_INIT_ROUTINE = PVOID + + +class PEB(ctypes.Structure): + _fields_ = (('Reserved1', wintypes.BYTE * 2), + ('BeingDebugged', wintypes.BYTE), + ('Reserved2', wintypes.BYTE * 1), + ('Reserved3', PVOID * 2), + ('Ldr', RPPEB_LDR_DATA), + ('ProcessParameters', RPRTL_USER_PROCESS_PARAMETERS), + ('Reserved4', wintypes.BYTE * 104), + ('Reserved5', PVOID * 52), + ('PostProcessInitRoutine', PPS_POST_PROCESS_INIT_ROUTINE), + ('Reserved6', wintypes.BYTE * 128), + ('Reserved7', PVOID * 1), + ('SessionId', wintypes.ULONG)) + + +RPPEB = to_pointer(PEB) + + +class PROCESS_BASIC_INFORMATION(ctypes.Structure): + _fields_ = (('Reserved1', PVOID), + ('PebBaseAddress', RPPEB), + ('Reserved2', PVOID * 2), + ('UniqueProcessId', ULONG_PTR), + ('Reserved3', PVOID)) + + +ntdll.NtQueryInformationProcess.restype = NTSTATUS +ntdll.NtQueryInformationProcess.argtypes = ( + wintypes.HANDLE, + PROCESSINFOCLASS, + PVOID, + wintypes.ULONG, + PULONG) + + +def _win32_get_user_process_params(handle): + info = PROCESS_BASIC_INFORMATION() + status = ntdll.NtQueryInformationProcess(handle, + ProcessBasicInformation, + ctypes.byref(info), + ctypes.sizeof(info), + None) + if status < 0: + raise OSError + + peb = PEB() + address = PVOID.from_buffer(info.PebBaseAddress).value + kernel32.ReadProcessMemory(handle, + address, + ctypes.byref(peb), + ctypes.sizeof(peb), + None) + + params = RTL_USER_PROCESS_PARAMETERS() + n_read = SIZE_T() + address = PVOID.from_buffer(peb.ProcessParameters).value + kernel32.ReadProcessMemory(handle, + address, + ctypes.byref(params), + ctypes.sizeof(params), + ctypes.byref(n_read)) + + return params + + +def win32_get_process_cwd(pid): + """Implement the func with pure WIN32 API. + + Reference: + + - https://stackoverflow.com/questions/14018280/how-to-get-a-process-working-dir-on-windows + - https://stackoverflow.com/questions/35106511/how-to-access-the-peb-of-another-process-with-python-ctypes + + :param pid: Process ID + :return: return process's current working directory + """ + handle = kernel32.OpenProcess(PROCESS_VM_READ | + PROCESS_QUERY_INFORMATION, + False, pid) + + upp = _win32_get_user_process_params(handle) + path = (wintypes.WCHAR * (upp.CurrentDirectoryPath.Length // 2 + 1))() + kernel32.ReadProcessMemory(handle, + upp.CurrentDirectoryPath.Buffer, + ctypes.byref(path), + upp.CurrentDirectoryPath.Length, + None) + kernel32.CloseHandle(handle) + return path.value + + +def win32_get_process_cmdline(pid): + handle = kernel32.OpenProcess(PROCESS_VM_READ | + PROCESS_QUERY_INFORMATION, + False, pid) + + params = _win32_get_user_process_params(handle) + path = (wintypes.WCHAR * (params.CommandLine.Length // 2 + 1))() + kernel32.ReadProcessMemory(handle, + params.CommandLine.Buffer, + ctypes.byref(path), + params.CommandLine.Length, + None) + kernel32.CloseHandle(handle) + return path.value + + +def win32_get_process_path(pid): + handle = kernel32.OpenProcess(PROCESS_VM_READ | + PROCESS_QUERY_INFORMATION, + False, pid) + + upp = _win32_get_user_process_params(handle) + path = (wintypes.WCHAR * (upp.ImagePathName.Length // 2 + 1))() + kernel32.ReadProcessMemory(handle, + upp.ImagePathName.Buffer, + ctypes.byref(path), + upp.ImagePathName.Length, + None) + kernel32.CloseHandle(handle) + return path.value + + +def win32_is_process_running(pid): + if pid in (os.getppid(), os.getpid()): + return True + + handle = kernel32.OpenProcess(PROCESS_QUERY_INFORMATION, + False, pid) + exit_code = wintypes.DWORD() + kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) + kernel32.CloseHandle(handle) + return exit_code.value == STILL_ACTIVE + diff --git a/src/gausskernel/dbmind/tools/common/process.py b/src/gausskernel/dbmind/tools/common/process.py new file mode 100644 index 000000000..abd883ba4 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/process.py @@ -0,0 +1,78 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import os + +from dbmind.common import platform + + +class Process: + """A cross-platform class that can be used to obtain process information, including: + + - current working directory + - command line + - executable file path + - alive + + """ + + def __init__(self, pid): + if pid <= 0: + raise ValueError('Invalid process ID %d' % pid) + self._pid = pid + self._cmdline = None + self._path = None + self._cwd = None + + def _alive(self): + if platform.WIN32: + if not platform.win32_is_process_running(self._pid): + return False + + self._cmdline = platform.win32_get_process_cmdline(self._pid) + self._path = platform.win32_get_process_path(self._pid) + self._cwd = platform.win32_get_process_cwd(self._pid) + return True + else: + # unix-like operation systems are ok. + if not os.path.exists('/proc/%d' % self._pid): + return False + + with open('/proc/%d/cmdline' % self._pid, mode='rb') as fp: + self._cmdline = fp.readline().replace(b'\x00', b' ').decode() + self._path = os.readlink('/proc/%d/exe' % self._pid) + self._cwd = os.readlink('/proc/%d/cwd' % self._pid) + return True + + @property + def alive(self): + return self._alive() + + @property + def cmdline(self): + if self._cmdline is None: + self._alive() + return self._cmdline + + @property + def path(self): + if self._path is None: + self._alive() + return self._path + + @property + def cwd(self): + # `cwd` can change so call `self.alive()` momentarily. + if self._alive(): + return self._cwd + else: + return None \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/common/security.py b/src/gausskernel/dbmind/tools/common/security.py new file mode 100644 index 000000000..d7f47ec4b --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/security.py @@ -0,0 +1,92 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import base64 +import hmac +import random +import secrets +import string + +from Crypto.Cipher import AES +from Crypto.Util.Padding import pad, unpad + + +def check_path_valid(path): + char_black_list = (' ', '|', ';', '&', '$', '<', '>', '`', '\\', + '\'', '"', '{', '}', '(', ')', '[', ']', '~', + '*', '?', '!', '\n') + + if path.strip() == '': + return True + + for char in char_black_list: + if path.find(char) >= 0: + return False + + return True + + +def unsafe_random_string(length): + """Used to generate a fixed-length random + string which is not used in the sensitive scenarios.""" + alphabet = string.ascii_letters + string.digits + return ''.join(random.choice(alphabet) for _ in range(length)) + + +def safe_random_string(length): + """Used to generate a fixed-length random + string which is used in the security and cryptography.""" + alphabet = string.ascii_letters + string.digits + return ''.join(secrets.choice(alphabet) for _ in range(length)) + + +def generate_an_iv() -> str: + """Generate and return an initialization vector for AES.""" + return safe_random_string(16) + + +def encrypt(s1: str, s2: str, iv: str, pt: str) -> str: + """Encrypt a series of plain text with two strings. + :param s1: string #1 + :param s2: string #2 + :param iv: initialization vector, used by AES256-CBC + :param pt: plain text + :return: cipher text + """ + if pt == '': + return '' + nb = 16 # the number of block including cipher and plain text + h = hmac.new(s1.encode(), s2.encode(), digestmod='sha256') + master_key = h.hexdigest()[:32].encode() # 32 bytes means AES256 + cipher = AES.new(master_key, AES.MODE_CBC, iv.encode()) + pt = pt.encode() + ct = cipher.encrypt(pad(pt, nb)) + return base64.b64encode(ct).decode() + + +def decrypt(s1: str, s2: str, iv: str, ct: str) -> str: + """Decrypt a series of cipher text with two strings. + :param s1: string #1 + :param s2: string #2 + :param iv: initialization vector, used by AES256-CBC + :param ct: cipher text + :return: plain text + """ + if ct == '': + return '' + nb = 16 # the number of block including cipher and plain text + h = hmac.new(s1.encode(), s2.encode(), digestmod='sha256') + master_key = h.hexdigest()[:32].encode() # 32 bytes means AES256 + cipher = AES.new(master_key, AES.MODE_CBC, iv.encode()) + ct = base64.b64decode(ct) + pt = unpad(cipher.decrypt(ct), nb) + return pt.decode() diff --git a/src/gausskernel/dbmind/tools/common/tsdb/__init__.py b/src/gausskernel/dbmind/tools/common/tsdb/__init__.py new file mode 100644 index 000000000..2dca1cc92 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/tsdb/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +from .tsdb_client_factory import TsdbClientFactory + diff --git a/src/gausskernel/dbmind/tools/ai_manager/module/index_advisor/__init__.py b/src/gausskernel/dbmind/tools/common/tsdb/influxdb_client.py similarity index 100% rename from src/gausskernel/dbmind/tools/ai_manager/module/index_advisor/__init__.py rename to src/gausskernel/dbmind/tools/common/tsdb/influxdb_client.py diff --git a/src/gausskernel/dbmind/tools/ai_manager/tools/__init__.py b/src/gausskernel/dbmind/tools/common/tsdb/opentsdb_client.py similarity index 100% rename from src/gausskernel/dbmind/tools/ai_manager/tools/__init__.py rename to src/gausskernel/dbmind/tools/common/tsdb/opentsdb_client.py diff --git a/src/gausskernel/dbmind/tools/common/tsdb/prometheus_client.py b/src/gausskernel/dbmind/tools/common/tsdb/prometheus_client.py new file mode 100644 index 000000000..a1d47fe97 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/tsdb/prometheus_client.py @@ -0,0 +1,295 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import logging +from datetime import datetime, timedelta +from urllib.parse import urlparse + +import requests +from requests.adapters import HTTPAdapter +from requests.packages.urllib3.util.retry import Retry + +from ..exceptions import ApiClientException +from ..types import Sequence +from .tsdb_client import TsdbClient + +# In case of a connection failure try 2 more times +MAX_REQUEST_RETRIES = 3 +# wait 1 second before retrying in case of an error +RETRY_BACKOFF_FACTOR = 1 +# retry only on these status +RETRY_ON_STATUS = [408, 429, 500, 502, 503, 504] + + +# Standardized the format of return value. +def _standardize(data): + rv = [] + for datum in data: + if 'values' not in datum: + datum['values'] = [datum.pop('value')] + datum_metric = datum.get('metric') or {} + datum_values = datum.get('values') or {} + metric_name = datum_metric.pop('__name__', None) + rv.append( + Sequence( + timestamps=tuple(int(item[0] * 1000) for item in datum_values), + values=tuple(float(item[1]) for item in datum_values), + name=metric_name, + labels=datum_metric + ) + ) + return rv + + +class PrometheusClient(TsdbClient): + """ + A Class for collection of metrics from a Prometheus Host. + :param url: (str) url for the prometheus host + :param headers: (dict) A dictionary of http headers to be used to communicate with + the host. Example: {"Authorization": "bearer my_oauth_token_to_the_host"} + :param disable_ssl: (bool) If set to True, will disable ssl certificate verification + for the http requests made to the prometheus host + :param retry: (Retry) Retry adapter to retry on HTTP errors + """ + + def __init__( + self, + url: str, + headers: dict = None, + disable_ssl: bool = False, + retry: Retry = None + ): + """Functions as a Constructor for the class PrometheusConnect.""" + if url is None: + raise TypeError("missing url") + + self.headers = headers + self.url = url + self.prometheus_host = urlparse(self.url).netloc + self._all_metrics = None + self.ssl_verification = not disable_ssl + + if retry is None: + retry = Retry( + total=MAX_REQUEST_RETRIES, + backoff_factor=RETRY_BACKOFF_FACTOR, + status_forcelist=RETRY_ON_STATUS, + ) + + self._session = requests.Session() + self._session.mount(self.url, HTTPAdapter(max_retries=retry)) + + def check_connection(self, params: dict = None) -> bool: + """ + Check Prometheus connection. + :param params: (dict) Optional dictionary containing parameters to be + sent along with the API request. + :returns: (bool) True if the endpoint can be reached, False if cannot be reached. + """ + response = self._session.get( + "{0}/".format(self.url), + verify=self.ssl_verification, + headers=self.headers, + params=params + ) + return response.ok + + def get_current_metric_value( + self, metric_name: str, label_config: dict = None, params: dict = None + ): + r""" + Get the current metric target for the specified metric and label configuration. + :param metric_name: (str) The name of the metric + :param label_config: (dict) A dictionary that specifies metric labels and their + values + :param params: (dict) Optional dictionary containing GET parameters to be sent + along with the API request, such as "time" + :returns: (list) A list of current metric values for the specified metric + :raises: + (RequestException) Raises an exception in case of a connection error + (ApiClientException) Raises in case of non 200 response status code + """ + params = params or {} + data = [] + if label_config: + label_list = [str(key + "=" + "'" + label_config[key] + "'") for key in label_config] + query = metric_name + "{" + ",".join(label_list) + "}" + else: + query = metric_name + + # using the query API to get raw data + response = self._session.get( + "{0}/api/v1/query".format(self.url), + params={**{"query": query}, **params}, + verify=self.ssl_verification, + headers=self.headers, + ) + + if response.status_code == 200: + data += response.json()["data"]["result"] + else: + raise ApiClientException( + "HTTP Status Code {} ({!r})".format(response.status_code, response.content) + ) + return _standardize(data) + + def get_metric_range_data( + self, + metric_name: str, + label_config: dict = None, + start_time: datetime = (datetime.now() - timedelta(minutes=10)), + end_time: datetime = datetime.now(), + chunk_size: timedelta = None, + step: str = None, + params: dict = None + ): + r""" + Get the current metric target for the specified metric and label configuration. + :param metric_name: (str) The name of the metric. + :param label_config: (dict) A dictionary specifying metric labels and their + values. + :param start_time: (datetime) A datetime object that specifies the metric range start time. + :param end_time: (datetime) A datetime object that specifies the metric range end time. + :param chunk_size: (timedelta) Duration of metric data downloaded in one request. For + example, setting it to timedelta(hours=3) will download 3 hours worth of data in each + request made to the prometheus host + :param step: (str) Query resolution step width in duration format or float number of seconds + :param params: (dict) Optional dictionary containing GET parameters to be + sent along with the API request, such as "time" + :return: (list) A list of metric data for the specified metric in the given time + range + :raises: + (RequestException) Raises an exception in case of a connection error + (ApiClientException) Raises in case of non 200 response status code + """ + params = params or {} + data = [] + + if not (isinstance(start_time, datetime) and isinstance(end_time, datetime)): + raise TypeError("start_time and end_time can only be of type datetime.datetime") + + start = round(start_time.timestamp()) + end = round(end_time.timestamp()) + if start > end: + return data + + chunk_seconds = round((end_time - start_time).total_seconds()) + + if label_config: + label_list = [str(key + "=" + "'" + label_config[key] + "'") for key in label_config] + query = metric_name + "{" + ",".join(label_list) + "}" + else: + query = metric_name + + if step is None: + # using the query API to get raw data + response = self._session.get( + "{0}/api/v1/query".format(self.url), + params={ + **{ + "query": query + "[" + str(chunk_seconds) + "s" + "]", + "time": end, + }, + **params, + }, + verify=self.ssl_verification, + headers=self.headers, + ) + else: + # using the query_range API to get raw data + response = self._session.get( + "{0}/api/v1/query_range".format(self.url), + params={**{"query": query, "start": start, "end": end, "step": step}, **params}, + verify=self.ssl_verification, + headers=self.headers, + ) + + if response.status_code == 200: + data += response.json()["data"]["result"] + else: + raise ApiClientException( + "HTTP Status Code {} ({!r})".format(response.status_code, response.content) + ) + + + logging.debug('Fetched sequence (%s) from tsdb from %s to %s. The length of sequence is %s.', + metric_name, start_time, end_time, len(data)) + return _standardize(data) + + def custom_query(self, query: str, params: dict = None): + """ + Send a custom query to a Prometheus Host. + This method takes as input a string which will be sent as a query to + the specified Prometheus Host. This query is a PromQL query. + :param query: (str) This is a PromQL query, a few examples can be found + at https://prometheus.io/docs/prometheus/latest/querying/examples/ + :param params: (dict) Optional dictionary containing GET parameters to be + sent along with the API request, such as "time" + :returns: (list) A list of metric data received in response of the query sent + :raises: + (RequestException) Raises an exception in case of a connection error + (PrometheusApiClientException) Raises in case of non 200 response status code + """ + params = params or {} + query = str(query) + # using the query API to get raw data + response = self._session.get( + "{0}/api/v1/query".format(self.url), + params={**{"query": query}, **params}, + verify=self.ssl_verification, + headers=self.headers, + ) + if response.status_code == 200: + data = response.json()["data"]["result"] + else: + raise ApiClientException( + "HTTP Status Code {} ({!r})".format(response.status_code, response.content) + ) + + return _standardize(data) + + def custom_query_range(self, query: str, start_time: datetime, end_time: datetime, + step: str, params: dict = None): + """ + Send a query_range to a Prometheus Host. + This method takes as input a string which will be sent as a query to + the specified Prometheus Host. This query is a PromQL query. + :param query: (str) This is a PromQL query, a few examples can be found + at https://prometheus.io/docs/prometheus/latest/querying/examples/ + :param start_time: (datetime) A datetime object that specifies the query range start time. + :param end_time: (datetime) A datetime object that specifies the query range end time. + :param step: (str) Query resolution step width in duration format or float number of seconds + :param params: (dict) Optional dictionary containing GET parameters to be + sent along with the API request, such as "timeout" + :returns: (dict) A dict of metric data received in response of the query sent + :raises: + (RequestException) Raises an exception in case of a connection error + (PrometheusApiClientException) Raises in case of non 200 response status code + """ + start = round(start_time.timestamp()) + end = round(end_time.timestamp()) + params = params or {} + query = str(query) + # using the query_range API to get raw data + response = self._session.get( + "{0}/api/v1/query_range".format(self.url), + params={**{"query": query, "start": start, "end": end, "step": step}, **params}, + verify=self.ssl_verification, + headers=self.headers, + ) + if response.status_code == 200: + data = response.json()["data"]["result"] + else: + raise ApiClientException( + "HTTP Status Code {} ({!r})".format(response.status_code, response.content) + ) + return _standardize(data) diff --git a/src/gausskernel/dbmind/tools/common/tsdb/tsdb_client.py b/src/gausskernel/dbmind/tools/common/tsdb/tsdb_client.py new file mode 100644 index 000000000..6a7c8c942 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/tsdb/tsdb_client.py @@ -0,0 +1,50 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +from datetime import datetime, timedelta + + +class TsdbClient(object): + """The common baseclass of various time series database + implementation classes, which is actually an interface, + and other subclasses are implemented based on this + interface in order to keep consistent format of + return value to the upper layer's calling. + + ..Attention:: + + The format of return value should be a list of Sequence. + + """ + + def check_connection(self, params: dict = None) -> bool: + """check to connect tsdb client""" + pass + + def get_current_metric_value(self, + metric_name: str, + label_config: dict = None, + params: dict = None): + """get metric target from tsdb""" + pass + + def get_metric_range_data(self, + metric_name: str, + label_config: dict = None, + start_time: datetime = (datetime.now() - timedelta(minutes=10)), + end_time: datetime = datetime.now(), + chunk_size: timedelta = None, + step: str = None, + params: dict = None): + """get metric target from tsdb""" + pass diff --git a/src/gausskernel/dbmind/tools/common/tsdb/tsdb_client_factory.py b/src/gausskernel/dbmind/tools/common/tsdb/tsdb_client_factory.py new file mode 100644 index 000000000..eac9b59a7 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/tsdb/tsdb_client_factory.py @@ -0,0 +1,48 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import threading + +from dbmind.common.exceptions import ApiClientException +from dbmind.common.tsdb.tsdb_client import TsdbClient +from dbmind.common.tsdb.prometheus_client import PrometheusClient +from dbmind import global_vars + + +class TsdbClientFactory(object): + tsdb_client = None + shared_lock = threading.Lock() + + @classmethod + def get_tsdb_client(cls) -> TsdbClient: + if cls.tsdb_client is not None: + return cls.tsdb_client + with cls.shared_lock: + if cls.tsdb_client is None: + cls._init_client() + return cls.tsdb_client + + @classmethod + def _init_client(cls): + configs = global_vars.configs + tsdb_name = configs.get('TSDB', 'name') + host = configs.get('TSDB', 'host') + port = configs.get('TSDB', 'port') + url = 'http://' + host + ':' + port + if tsdb_name == 'prometheus': + client = PrometheusClient(url=url) + if not client.check_connection(): + raise ApiClientException("Failed to connect TSDB url.") + cls.tsdb_client = client + if cls.tsdb_client is None: + raise ApiClientException("Failed to init TSDB client, please check config file") diff --git a/src/gausskernel/dbmind/tools/common/types/__init__.py b/src/gausskernel/dbmind/tools/common/types/__init__.py new file mode 100644 index 000000000..2fe9b065f --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/types/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from .enumerations import ALARM_LEVEL, ALARM_TYPES, ALARM_STATUS +from .alarm import Alarm +from .root_cause import RootCause +from .sequence import Sequence +from .misc import SlowQuery diff --git a/src/gausskernel/dbmind/tools/common/types/alarm.py b/src/gausskernel/dbmind/tools/common/types/alarm.py new file mode 100644 index 000000000..3c4a8a73d --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/types/alarm.py @@ -0,0 +1,80 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from typing import Optional, Iterable, Union + +from .root_cause import RootCause +from .enumerations import ALARM_TYPES, ALARM_LEVEL + + +class Alarm: + def __init__(self, + host: Union[str], + alarm_content: str, + alarm_type: ALARM_TYPES, + alarm_subtype=None, + metric_name: str = None, + alarm_level: ALARM_LEVEL = ALARM_LEVEL.ERROR, + alarm_cause: Optional[Union[RootCause, Iterable[RootCause]]] = None, + extra=None): + self.host = host + self.alarm_content = alarm_content + self.alarm_type = alarm_type + self.alarm_subtype = alarm_subtype + self.metric_name = metric_name + self.alarm_level = alarm_level + self.start_timestamp = self.end_timestamp = None + + self.extra = extra + + if isinstance(alarm_cause, Iterable): + self.alarm_cause = list(alarm_cause) + elif isinstance(alarm_cause, RootCause): + self.alarm_cause = [alarm_cause] + else: + self.alarm_cause = list() + + def add_reason(self, root_cause): + self.alarm_cause.append(root_cause) + return self + + def set_timestamp(self, start, end=None): + self.start_timestamp = start + self.end_timestamp = end + return self + + def __repr__(self): + return '[%s](%s)' % ( + self.alarm_content, self.alarm_cause + ) + + @property + def root_causes(self): + lines = list() + index = 1 + for c in self.alarm_cause: + lines.append( + '%d. %s: (%.2f) %s' % (index, c.title, c.probability, c.detail) + ) + index += 1 + return '\n'.join(lines) + + @property + def suggestions(self): + lines = list() + index = 1 + for c in self.alarm_cause: + lines.append( + '%d. %s' % (index, c.suggestion if c.suggestion else 'No suggestions.') + ) + index += 1 + return '\n'.join(lines) diff --git a/src/gausskernel/dbmind/tools/common/types/enumerations.py b/src/gausskernel/dbmind/tools/common/types/enumerations.py new file mode 100644 index 000000000..83c4afd6d --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/types/enumerations.py @@ -0,0 +1,39 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from enum import IntEnum + + +class ALARM_LEVEL(IntEnum): + CRITICAL = 50 + FATAL = CRITICAL + ERROR = 40 + WARNING = 30 + WARN = WARNING + INFO = 20 + NOTICE = INFO + DEBUG = 10 + NOTSET = 0 + + def __str__(self): + return self._name_ + + +class ALARM_STATUS: + RESOLVED = 'resolved' + UNRESOLVED = 'unresolved' + + +class ALARM_TYPES: + SYSTEM = 'SYSTEM' + SLOW_QUERY = 'SLOW_QUERY' + ALARM_LOG = 'ALARM_LOG' diff --git a/src/gausskernel/dbmind/tools/common/types/misc.py b/src/gausskernel/dbmind/tools/common/types/misc.py new file mode 100644 index 000000000..fa7767d27 --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/types/misc.py @@ -0,0 +1,78 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from .root_cause import RootCause + + +class SlowQuery: + def __init__(self, db_host, db_port, db_name, schema_name, query, start_timestamp, duration_time, + hit_rate=None, fetch_rate=None, cpu_time=None, data_io_time=None, template_id=None, sort_count=None, + sort_mem_used=None, sort_spill_count=None, hash_count=None, hash_mem_used=None, hash_spill_count=None, + lock_wait_count=None, lwlock_wait_count=None, n_returned_rows=None, n_tuples_returned=None, + n_tuples_fetched=None, n_tuples_inserted=None, n_tuples_updated=None, n_tuples_deleted=None, **kwargs): + self.db_host = db_host + self.db_port = db_port + self.schema_name = schema_name + self.db_name = db_name + self.tables_name = None + self.query = query + self.start_at = start_timestamp + self.duration_time = duration_time + self.hit_rate = hit_rate + self.fetch_rate = fetch_rate + self.cpu_time = cpu_time + self.data_io_time = data_io_time + self.template_id = template_id + self.sort_count = sort_count + self.sort_mem_used = sort_mem_used + self.sort_spill_count = sort_spill_count + self.hash_count = hash_count + self.hash_mem_used = hash_mem_used + self.hash_spill_count = hash_spill_count + self.lwlock_wait_count = lwlock_wait_count + self.lock_wait_count = lock_wait_count + self.n_returned_rows = n_returned_rows + self.n_tuples_returned = n_tuples_returned + self.n_tuples_fetched = n_tuples_fetched + self.n_tuples_inserted = n_tuples_inserted + self.n_tuples_updated = n_tuples_updated + self.n_tuples_deleted = n_tuples_deleted + self.alarm_cause = list() + + self.kwargs = kwargs + + def add_cause(self, root_cause: RootCause): + self.alarm_cause.append(root_cause) + return self + + @property + def root_causes(self): + lines = list() + index = 1 + for c in self.alarm_cause: + lines.append( + '%d. %s: (%.2f) %s' % (index, c.title, c.probability, c.detail) + ) + index += 1 + return '\n'.join(lines) + + @property + def suggestions(self): + lines = list() + index = 1 + for c in self.alarm_cause: + lines.append( + '%d. %s' % (index, c.suggestion if c.suggestion else 'No suggestions.') + ) + index += 1 + return '\n'.join(lines) + diff --git a/src/gausskernel/dbmind/tools/common/types/root_cause.py b/src/gausskernel/dbmind/tools/common/types/root_cause.py new file mode 100644 index 000000000..48987ed0a --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/types/root_cause.py @@ -0,0 +1,196 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from .enumerations import ALARM_LEVEL + + +class _Define: + def __init__(self, category, + detail=None, + suggestion='', + level=ALARM_LEVEL.ERROR): + self.category = category + self.detail = detail + self.level = level + self.suggestion = suggestion + + +class RootCause: + # Format: + # title = _Define('category', 'default_detail', default_level=INFO) + # demos: + SYSTEM_ERROR = _Define('[SYSTEM]', 'System error') + DISK_SPILL = _Define('[SYSTEM][DISK]', 'Disk already spills') + + # system level + WORKING_CPU_CONTENTION = _Define('[SYSTEM][CPU]', 'Workloads compete to use CPU resources.') + NONWORKING_CPU_CONTENTION = _Define('[SYSTEM][CPU]', 'Non-business tasks consume CPU resources.') + WORKING_IO_CONTENTION = _Define('[SYSTEM][IO]', 'Workloads compete to use IO resources.') + NONWORKING_IO_CONTENTION = _Define('[SYSTEM][IO]', 'Non-business tasks consume IO resources.') + WORKING_MEM_CONTENTION = _Define('[SYSTEM][MEMORY]', 'Workloads compete to use memory resources.') + NONWORKING_MEM_CONTENTION = _Define('[SYSTEM][MEMORY]', 'Non-business tasks consume memory resources.') + LOCK_CONTENTION = _Define('[SYSTEM][LOCK]', 'Query waits for locks.') + SMALL_SHARED_BUFFER = _Define('[SYSTEM][BUFFER]', 'shared buffer is small.') + TABLE_EXPANSION = _Define('[SYSTEM][EXPANSION]', 'too many dirty tuples exist.') + VACUUM = _Define('[SYSTEM][VACUUM]', 'doing vacuum.') + BGWRITER_CHECKPOINT = _Define('[SYSTEM][CHECKPOINT]', 'background checkpoint.') + ANALYZE = _Define('[SYSTEM][ANALYZE]', 'analyzing tables.') + WALWRITER = _Define('[SYSTEM][WAL]', 'writing WAL.') + FULL_CONNECTIONS = _Define('[SYSTEM][CONNECTIONS]', 'too many connections.') + COMPLEX_SLOW_QUERY = _Define('[SYSTEM][SLOWQUERY]', 'slow queries exist.') + LOW_NETWORK_BANDWIDTH = _Define('[SYSTEM][NETWORK]', 'network is busy.') + LOW_IO_BANDWIDTH = _Define('[SYSTEM][IO]', 'IO is busy.') + LOW_CPU_IDLE = _Define('[SYSTEM][IO]', 'CPU is busy.') + DISK_WILL_SPILL = _Define('[SYSTEM][DISK]', 'Disk will spill.') + DISK_BURST_INCREASE = _Define('[SYSTEM][DISK]', 'Disk usage suddenly increase.') + # slow query + LOCK_CONTENTION_SQL = _Define('[SLOW QUERY][LOCK]', + 'There is lock competition during statement execution, and SQL is blocked, ' + 'detail: {lock_info}.', + 'Adjust the business.') + LARGE_DEAD_RATE = _Define('[SLOW QUERY][TABLE EXPANSION]', + 'The dead tuples in the related table of the SQL are relatively large, ' + 'which affects the execution performance, ' + 'detail: {large_table}, {dead_rate}.', + 'Reasonably adjust autovacuum_related parameters to ensure that dead_rate are in ' + 'a reasonable range.') + LARGE_FETCHED_TUPLES = _Define('[SLOW QUERY][FETCHED TUPLES]', + 'The SQL scans a large number of tuples, ' + 'detail: fetched_tuples({fetched_tuples}), fetched_tuples_rate({fetched_tuples_rate})', + 'Check whether the field has an index;' + 'Avoid operations such as select count(*);' + 'Whether syntax problems cause the statement index to fail, the general ' + 'index failure cases include: ' + '1). The range is too large; 2). There is an implicit conversion; ' + '3). Use fuzzy query, etc;') + LARGE_RETURNED_ROWS = _Define('[SLOW QUERY][FETCHED ROWS]', + 'The SQL return a large number of tuples, ' + 'detail: returned_rows({returned_rows}), returned_rows_rate({returned_rows_rate})', + 'Optimize business statements and try to avoid similar operations.') + SMALL_SHARED_BUFFER_SQL = _Define('[SLOW SQL][SHARED BUFFER]', + 'The database shared_buffers parameter setting may be too small, ' + 'resulting in a small cache hit rate,' + 'detail: hit_rate({hit_rate})', + 'It is recommended to adjust the shared_buffers parameter reasonably') + UPDATED_REDUNDANT_INDEX = _Define('[SLOW SQL][REDUNDANT INDEX]', + 'There are redundant indexes in UPDATED related tables,' + 'detail: {redundant_index}.', + 'Delete irrelevant redundant indexes.') + LARGE_UPDATED_TUPLES = _Define('[SLOW SQL][UPDATED TUPLES]', + 'The UPDATE operation has a large number of update tuples, ' + 'resulting in slow SQL performance,' + 'detail: updated_tuples({updated_tuples}), updated_tuples_rate({updated_tuples_rate})', + 'It is recommended to plan business reasonably and perform staggered peaks.') + INSERTED_REDUNDANT_INDEX = _Define('[SLOW SQL][REDUNDANT INDEX]', + 'There are redundant indexes in INSERTED related tables,' + 'detail: {redundant_index}.', + 'Delete irrelevant redundant indexes.') + INSERTED_INDEX_NUMBER = _Define('[SLOW SQL][INDEX NUMBER]', + 'INSERT involves too many indexes in the table, ' + 'which affects insert performance, ' + 'detail: {index}', + 'The more indexes there are, the greater the maintenance cost for insert operations' + ' and the slower the speed of inserting a piece of data. Therefore, design business' + ' indexes reasonably.') + LARGE_INSERTED_TUPLES = _Define('[SLOW SQL][INSERTED TUPLES]', + 'The INSERT operation has a large number of insert tuples, ' + 'resulting in slow SQL performance,' + 'detail: inserted_tuples({inserted_tuples}), inserted_tuples_rate({inserted_tuples_rate})', + 'It is recommended to plan business reasonably and perform staggered peaks.') + DELETED_REDUNDANT_INDEX = _Define('[SLOW SQL][REDUNDANT INDEX]', + 'There are redundant indexes in DELETED related tables,' + 'detail: {redundant_index}.', + 'Delete irrelevant redundant indexes.') + LARGE_DELETED_TUPLES = _Define('[SLOW SQL][DELETED TUPLES]', + 'The INSERT operation has a large number of delete tuples, ' + 'resulting in slow SQL performance,' + 'detail: deleted_tuples({deleted_tuples}), deleted_tuples_rate({deleted_tuples_rate})', + 'It is recommended to plan business reasonably and perform staggered peaks.') + EXTERNAL_SORT = _Define('[SLOW SQL][EXTERNAL SORT]', + 'External sort is suspected during SQL execution, ' + 'resulting in slow SQL performance, ' + 'detail: {external_sort}', + 'Reasonably increase the work_mem parameter according to business needs') + LARGE_TPS = _Define('[SLOW SQL][LOAD]', + 'During SQL execution, the database load is concentrated, resulting in poor performance,' + 'detail: tps({tps})', + '') + VACUUM_SQL = _Define('[SLOW SQL][VACUUM]', + 'During SQL execution, related tables are executing VACUUM tasks, ' + 'resulting in slow queries,' + 'detail: {autovacuum}', + '') + ANALYZE_SQL = _Define('[SLOW SQL][ANALYZE]', + 'During SQL execution, related tables are executing ANALYZE tasks, ' + 'resulting in slow queries,' + 'detail: {autoanalyze}', + '') + SYSTEM_SQL = _Define('[SLOW SQL][SYSTEM]', + 'During SQL execution, system resources are shortage,' + 'detail: {system_cause}', + '') + DATABASE_VIEW = _Define('[SLOW SQL][VIEW]', + 'Poor performance of database views', + 'System table query service, no suggestion.') + ILLEGAL_SQL = _Define('[SLOW SQL][SQL]', + 'Only support UPDATE, DELETE, INSERT, SELECT', + '') + LACK_INFORMATION = _Define('[SLOW SQL][UNKNOWN]', + 'Cannot diagnose due to lack of information.', + '') + UNKNOWN = _Define('[SLOW SQL][UNKNOWN]', + 'UNKNOWN', + '') + # ... + # Define more root causes *above*. + + @staticmethod + def has(title): + return isinstance(title, str) and hasattr(RootCause, title.upper()) + + @staticmethod + def get(title): + """Generate dynamic ``RootCause`` object.""" + defined = getattr(RootCause, title.upper()) + if not isinstance(defined, _Define): + raise TypeError('Wrong ROOTCAUSE definition.') + return RootCause(title.upper(), 1., defined) + + def format(self, *args, **kwargs): + self.detail = self.detail.format(*args, **kwargs) + return self + + def __init__(self, title, probability, defined): + self.title = title + self.probability = probability + self.category = defined.category + self.detail = defined.detail + self.level = defined.level + self.suggestion = defined.suggestion + + def set_probability(self, probability): + self.probability = probability + return self + + def set_detail(self, detail): + self.detail = detail + return self + + def set_level(self, level): + self.level = level + return self + + def __repr__(self): + return 'RootCause{title=%s, category=%s, level=%s, detail=%s, prob=%f}' % ( + self.title, self.category, self.level, self.detail, self.probability + ) + diff --git a/src/gausskernel/dbmind/tools/common/types/sequence.py b/src/gausskernel/dbmind/tools/common/types/sequence.py new file mode 100644 index 000000000..76004833d --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/types/sequence.py @@ -0,0 +1,237 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from typing import Optional + +from dbmind.common.algorithm.basic import binary_search +from dbmind.common.algorithm.basic import how_many_lesser_elements, how_many_larger_elements +from ..either import OptionalContainer, OptionalValue +from ..utils import cached_property + +EMPTY_TUPLE = tuple() + + +class Sequence: + def __init__(self, timestamps=None, values=None, name=None, step=None, labels=None): + """Sequence is an **immutable** data structure, which wraps time series and + its information. + + .. attention:: + + All properties are cached property due to immutability. + Forced to modify a Sequence object could cause error. + + It is one and only representation for time series in DBMind.""" + timestamps = OptionalValue(timestamps).get(EMPTY_TUPLE) + values = OptionalValue(values).get(EMPTY_TUPLE) + self._timestamps = tuple(timestamps) + self._values = tuple(values) + success, message = Sequence._check_validity(self._timestamps, self._values) + if not success: + raise ValueError(message) + + # ``self.name`` is an optional variable. + self.name = name + self._step = step + self._labels = labels or {} + # Attach sub-sequence to parent-sequence with logical pointer. + # By this means, sub-sequence can avoid redundant records. + self._parent: Sequence = None + self._parent_start = None + self._parent_end = None + + @staticmethod + def _check_validity(timestamps, values): + # invalid scenarios + if None in (timestamps, values): + return False, 'NoneType object is not iterable.' + if len(timestamps) != len(values): + return False, 'The length between Timestamps (%d) and values (%d) must be equal.' % ( + len(timestamps), len(values) + ) + if len(timestamps) != len(set(timestamps)): + return False, 'The sequence prohibits duplicate timestamp.' + if not all(x < y for x, y in zip(timestamps, timestamps[1:])): + return False, 'Timestamps must be strictly increasing.' + for t in timestamps: + if type(t) is not int: + return False, 'The type of timestamp must be integer.' + + # valid + return True, None + + @staticmethod + def _create_sub_sequence(parent, ts_start, ts_end): + """This Sequence slicing method is not the same as list slicing. List in Python + slices from start index till (end - 1), specified as list elements. + But the sub-sequence includes the last element, i.e., ts_end. + """ + if parent is None: + raise ValueError('Parent should not be NoneType.') + if OptionalContainer(parent._timestamps).get(0) == ts_start and \ + OptionalContainer(parent._timestamps).get(-1) == ts_end: + return parent + + sub = Sequence() + if ts_start > ts_end: + return sub + + sub._parent = parent + sub._parent_start = ts_start + sub._parent_end = ts_end + return sub + + def _get_entity(self): + """Sub-sequence does not store data entity. Hence, the method backtracks to the + start node (aka, head node, ancestor node) and return a quadruple to caller to traverse. + + Return a quadruple: + .. + + (timestamps, values, starting timestamp, ending timestamp). + + """ + this_ts_start = OptionalContainer(self._timestamps).get(0) + this_ts_end = OptionalContainer(self._timestamps).get(-1) + # If current sequence has no parent node, the sequence must be at the start node. + if not self._parent: + return self._timestamps, self._values, this_ts_start, this_ts_end + + # Since current sequence has a parent node, we should backtrack until + # we find the start node and then return the data entity based on + # the data recorded in the start node. + start_node = self + while start_node._parent is not None: + start_node = start_node._parent + + ts_start = OptionalValue(self._parent_start).get(this_ts_start) + ts_end = OptionalValue(self._parent_end).get(this_ts_end) + return start_node._timestamps, start_node._values, ts_start, ts_end + + def get(self, timestamp) -> Optional[int]: + """Get a target by timestamp. + + :return If not found, return None.""" + timestamps, values, ts_start, ts_end = self._get_entity() + if None in (ts_start, ts_end): + return + + if ts_start <= timestamp <= ts_end: + idx = binary_search(timestamps, timestamp) + if idx < 0: + return + return values[idx] + + @cached_property + def length(self): + timestamps, _, ts_start, ts_end = self._get_entity() + if timestamps == EMPTY_TUPLE: + return 0 + # Notice: this is a TRICK for binary search: + # ``how_many_larger_elements()`` can ensure that + # the position of the searching element always stays + # at the position of the last element not greater than it in the array. + start_position = how_many_lesser_elements(timestamps, ts_start) + end_position = how_many_larger_elements(timestamps, ts_end) + return end_position - start_position + 1 + + def to_2d_array(self): + return self.timestamps, self.values + + @cached_property + def values(self): + """The property will generate a copy.""" + timestamps, values, ts_start, ts_end = self._get_entity() + return values[how_many_lesser_elements(timestamps, ts_start): + how_many_larger_elements(timestamps, ts_end) + 1] + + @cached_property + def timestamps(self): + """The property will generate a copy.""" + timestamps, values, ts_start, ts_end = self._get_entity() + return timestamps[how_many_lesser_elements(timestamps, ts_start): + how_many_larger_elements(timestamps, ts_end) + 1] + + @cached_property + def step(self): + if self._step is None: + return measure_sequence_interval(self) + return self._step + + @property + def labels(self): + return self._labels + + def copy(self): + return Sequence( + self.timestamps, self.values, self.name, self.step, self.labels + ) + + def to_dict(self): + return { + 'name': self.name, + 'timestamps': self.timestamps, + 'values': self.values, + 'labels': self.labels, + } + + def __getitem__(self, item): + """If parameter ``item`` is a two-tuple, create a sub-sequence and return it. + If ``item`` is an integer, which represents an index (timestamp) of target, search and return + the target. + + :exception raise ValueError while item does not belong to any valid types. + """ + if isinstance(item, int): + return self.get(item) + elif isinstance(item, slice): + raise NotImplementedError + elif isinstance(item, tuple) and len(item) == 2: + # To distinguish with slicing, override the tuple form. + start = OptionalContainer(item).get(0, default=OptionalContainer(self._timestamps).get(0)) + end = OptionalContainer(item).get(1, default=OptionalContainer(self._timestamps).get(-1)) + return Sequence._create_sub_sequence(self, start, end) + else: + raise ValueError('Not support %s type.' % type(item)) + + def __len__(self): + return self.length + + def __repr__(self): + return 'Sequence[%s](%d)%s' % (self.name, self.length, self._labels) + + def __iter__(self): + """Return a pairwise point (timestamp, value).""" + return zip(*self.to_2d_array()) + + def __add__(self, other): + if not isinstance(other, Sequence): + raise TypeError('The data type must be Sequence.') + return Sequence( + timestamps=(self.timestamps + other.timestamps), + values=(self.values + other.values) + ) + + +def measure_sequence_interval(sequence): + histogram = dict() + timestamps = sequence.timestamps + for i in range(0, len(timestamps) - 1): + interval = timestamps[i + 1] - timestamps[i] + histogram[interval] = histogram.get(interval, 0) + 1 + # Calculate the mode of the interval array. + most_interval = most_count = 0 + for interval, count in histogram.items(): + if count > most_count: + most_interval = interval + most_count = count + return int(most_interval) diff --git a/src/gausskernel/dbmind/tools/common/utils.py b/src/gausskernel/dbmind/tools/common/utils.py new file mode 100644 index 000000000..da5875c1f --- /dev/null +++ b/src/gausskernel/dbmind/tools/common/utils.py @@ -0,0 +1,344 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import argparse +import logging +import os +import re +import sys +import types +import time +import multiprocessing +import subprocess +import traceback +import threading +from functools import wraps +from datetime import datetime +from queue import Empty +from logging.handlers import RotatingFileHandler + +RED_FMT = "\033[31;1m{}\033[0m" +GREEN_FMT = "\033[32;1m{}\033[0m" +YELLOW_FMT = "\033[33;1m{}\033[0m" +WHITE_FMT = "\033[37;1m{}\033[0m" + + +class cached_property: + """A decorator for caching a property.""" + + def __init__(self, func): + self.func = func + + def __get__(self, instance, owner): + if instance is None: + return self + + value = self.func(instance) + setattr(instance, self.func.__name__, value) + return value + + +def memoize(func): + """The function is a generic cache, + which won't cache unhashable types (e.g., dict, list) but only the immutable types.""" + memtbl = {} + + @wraps(func) + def wrapper(*args): + if args in memtbl: + return memtbl[args] + else: + rv = func(*args) + memtbl[args] = rv + return rv + + return wrapper + + +def where_am_i(fvars): + """Return the module which current function runs on. + + :param fvars: the return value of function ``globals()``. + """ + file, name = fvars.get('__file__'), fvars.get('__name__') + if None in (file, name): + return None + return name + + +def read_simple_config_file(filepath): + """Read the content of ``key=value`` format configuration file. + The default prefix of comment is sharp (#). e.g., + :: + + # The following is a demonstration. + key1 = value1 + key2 = value2 # some comments + + + """ + conf = dict() + with open(filepath, encoding='UTF-8') as fp: + lines = fp.readlines() + configs = map( + lambda tup: (tup[0].strip(), tup[1].strip()), + filter( + lambda tup: len(tup) == 2, + map( + lambda line: line.split('='), + filter( + lambda line: not (line.startswith('#') or line == ''), + map( + lambda line: line.strip(), + lines + ) + ) + ) + ) + ) + + for name, value in configs: + conf[name] = value + + return conf + + +def write_to_terminal( + message, + level='info', + color=None +): + levels = ('info', 'error') + colors = ('white', 'red', 'green', 'yellow', None) + dbmind_assert(color in colors and level in levels) + + if not isinstance(message, str): + message = str(message) + + # coloring. + if color == 'white': + out_message = WHITE_FMT.format(message) + elif color == 'red': + out_message = RED_FMT.format(message) + elif color == 'green': + out_message = GREEN_FMT.format(message) + elif color == 'yellow': + out_message = YELLOW_FMT.format(message) + else: + out_message = message + + # choosing a streaming. + if level == 'error': + sys.stderr.write(out_message) + sys.stderr.write(os.linesep) + sys.stderr.flush() + else: + sys.stdout.write(out_message) + sys.stdout.write(os.linesep) + sys.stdout.flush() + + +class MultiProcessingRFHandler(RotatingFileHandler): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self._queue = multiprocessing.Queue(-1) + self._should_exit = False + self._receiv_thr = threading.Thread(target=self._receive) + self._receiv_thr.start() + + def _receive(self): + while True: + try: + record = self._queue.get_nowait() + super().emit(record) + except Empty: + time.sleep(.1) + except (KeyboardInterrupt, SystemExit): + raise + except EOFError: + break + except: + traceback.print_exc(file=sys.stderr) + if self._should_exit and self._queue.empty(): + break + + def _send(self, s): + self._queue.put_nowait(s) + + def emit(self, record): + try: + if record.args: + record.msg = record.msg % record.args + record.args = None + if record.exc_info: + record.exc_info = None + self._send(record) + except (KeyboardInterrupt, SystemExit): + raise + except: + self.handleError(record) + + def close(self): + super().close() + self._should_exit = True + + +class ExceptionCatch: + """Class for catching object exception""" + def __init__(self, strategy='warn', name='UNKNOWN'): + """ + :param strategy: Exception handling strategy + :param name: The object from which the exception came + """ + self.strategy = strategy + self.name = name + + def __get__(self, instance, cls): + if instance is None: + return self + return types.MethodType(self, instance) + + def __call__(self, func): + wraps(func)(self) + + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + if self.strategy == 'warn': + logging.warning(f"[{self.name}] {func.__name__} occurred exception: {str(e)}", exc_info=True) + elif self.strategy == 'exit': + raise e + else: + raise ValueError('Not support strategy %s' % self.strategy) + + return wrapper + + +def set_proc_title(name: str): + new_name = name.encode('ascii', 'replace') + + try: + import ctypes + libc = ctypes.CDLL('libc.so.6') + progname = ctypes.c_char_p.in_dll(libc, '__progname_full') + with open('/proc/self/cmdline') as fp: + old_progname_len = len(fp.readline()) + if old_progname_len > len(new_name): + # padding blank chars + new_name += b' ' * (old_progname_len - len(new_name)) + + # for `ps` command: + # Environment variables are already copied to Python app zone. + # We can get environment variables by `os.environ` module, + # so we can ignore the destroying from the following action. + libc.strcpy(progname, ctypes.c_char_p(new_name)) + # for `top` command and `/proc/self/comm`: + buff = ctypes.create_string_buffer(len(new_name) + 1) + buff.value = new_name + libc.prctl(15, ctypes.byref(buff), 0, 0, 0) + except Exception as e: + logging.debug('An error (%s) occured while setting the process name.', e) + + +def retry(times_limit=2): + """A decorator which helps to retry while an exception occurs.""" + def decorator(func): + def wrap(*args, **kwargs): + try_times = 0 + while True: + try: + return func(*args, **kwargs) + except Exception as e: + try_times += 1 + if try_times < times_limit: + logging.warning( + 'Caught an exception while %s running, and try to run again.', func.__name__ + ) + continue + else: + raise e + return wrap + return decorator + + +def keep_inputting_until_correct(prompt, options): + input_char = '' + while input_char not in options: + input_char = input(prompt).upper() + return input_char + + +def check_positive_integer(value): + if re.match(r'^\d+$', value): + return int(value) + else: + raise argparse.ArgumentTypeError('%s is not a valid number.' % value) + + +def check_positive_float(value): + if re.match(r'^\d+(.\d*)?$', value): + return float(value) + else: + raise argparse.ArgumentTypeError('%s is not a valid number.' % value) + + +def check_ssl_file_permission(keyfile, certfile): + if not keyfile or not certfile: + return + ssl_keyfile_permission_invalid = (os.stat(keyfile).st_mode & 0o777) > 0o600 + ssl_certfile_permission_invalid = (os.stat(certfile).st_mode & 0o777) > 0o600 + if ssl_keyfile_permission_invalid: + result_msg = "WARNING:the ssl keyfile permission greater then 600" + write_to_terminal(result_msg, color="yellow") + if ssl_certfile_permission_invalid: + result_msg = "WARNING:the ssl certfile permission greater then 600" + write_to_terminal(result_msg, color="yellow") + + +def check_ssl_certificate_remaining_days(certificate_path, certificate_warn_threshold=90): + """ + Check whether the certificate is expired or invalid. + :param certificate_path: path of certificate. + :certificate_warn_threshold: the warning days for certificate_remaining_days + output: dict, check result which include 'check status' and 'check information'. + """ + if not certificate_path: + return + gmt_format = '%b %d %H:%M:%S %Y GMT' + child = subprocess.Popen(['openssl', 'x509', '-in', certificate_path, '-noout', '-dates'], + shell=False, stdout=subprocess.PIPE, stdin=subprocess.PIPE) + sub_chan = child.communicate() + if sub_chan[0]: + not_after = sub_chan[0].decode('utf-8').split('\n')[1].split('=')[1].strip() + end_time = datetime.strptime(not_after, gmt_format) + certificate_remaining_days = (end_time - datetime.now()).days + if 0 < certificate_remaining_days < certificate_warn_threshold: + result_msg = "WARNING: the '{certificate}' has remaining " \ + "{certificate_remaining_days} days before out of date." \ + .format(certificate=certificate_path, + certificate_remaining_days=certificate_remaining_days) + write_to_terminal(result_msg, color="yellow") + elif certificate_remaining_days <= 0: + result_msg = "WARNING: the '{certificate}' is out of date."\ + .format(certificate=certificate_path) + write_to_terminal(result_msg, color="yellow") + + +def dbmind_assert(condition, comment=None): + if not condition: + if comment is None: + raise AssertionError("Please check the value of this variable.") + else: + raise ValueError(comment) + diff --git a/src/gausskernel/dbmind/tools/components/__init__.py b/src/gausskernel/dbmind/tools/components/__init__.py new file mode 100644 index 000000000..3daf8de60 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/__init__.py @@ -0,0 +1,38 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import importlib +import os +import pkgutil +import sys + +from dbmind.common.utils import where_am_i + + +def list_components(): + """Return all components in current directory.""" + curr_dir = os.path.abspath(os.path.dirname(__file__)) + components = list( + map(lambda tup: tup[1], + pkgutil.iter_modules((curr_dir,))) + ) + + return components + + +def call_component(name, arguments): + component = importlib.import_module('.' + name, where_am_i(globals())) + if not hasattr(component, 'main'): + print('FATAL: Component %s must define function main() in the __init__.py.', + file=sys.stderr) + exit(1) + component.main(arguments) diff --git a/src/gausskernel/dbmind/tools/components/extract_log.py b/src/gausskernel/dbmind/tools/components/extract_log.py new file mode 100644 index 000000000..c989aea2e --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/extract_log.py @@ -0,0 +1,455 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import re +import os +import sys +import argparse +import json +import random +import time +from collections import deque +from subprocess import Popen, PIPE + +SQL_TYPE = ['select ', 'delete ', 'insert ', 'update '] +SQL_AMOUNT = 0 +PLACEHOLDER = r'@@@' +SAMPLE_NUM = 5 +IS_ALL_LATEST_SQL = False +FILEHANDLES = 500 +SQL_PATTERN = [r'\((\s*(\d+(\.\d+)?\s*)[,]?)+\)', # match integer set in the IN collection + r'([^\\])\'((\')|(.*?([^\\])\'))', # match all content in single quotes + r'(([^<>]\s*=\s*)|([^<>]\s+))(\d+)(\.\d+)?'] # match single integer + + +def truncate_template(templates, update_time, avg_update): + global IS_ALL_LATEST_SQL + prune_list = [] + # get the currently unupdated template list + if not IS_ALL_LATEST_SQL: + for sql_template, sql_detail in templates.items(): + if sql_detail['update'][-1] != update_time and len(sql_detail['update']) < avg_update: + prune_list.append((sql_template, len(sql_detail['update']))) + # filter by update frequency + if len(prune_list) > len(templates) / SAMPLE_NUM: + sorted(prune_list, key=lambda elem: elem[1]) + prune_list = prune_list[:len(templates) // SAMPLE_NUM] + if len(prune_list): + for item in prune_list: + del templates[item[0]] + return True + IS_ALL_LATEST_SQL = True + # if all templates have been updated, then randomly selected one to be deleted + if random.random() < 0.5: + del templates[random.sample(templates.keys(), 1)[0]] + return True + return False + + +def get_workload_template(templates, sqls, args): + update_time = time.time() + invalid_template = [] + total_update = 0 + is_record = True + # delete templates that have not been updated within UPDATE_THRESHOLD threshold + for sql_template, sql_detail in templates.items(): + if (update_time - sql_detail['update'][-1]) / 60 / 60 / 24 >= args.max_reserved_period: + invalid_template.append(sql_template) + continue + total_update += len(sql_detail['update']) + avg_update = (total_update / len(templates)) if len(templates) else 0 + for item in invalid_template: + del templates[item] + for sql in sqls: + sql_template = sql + for pattern in SQL_PATTERN: + sql_template = re.sub(pattern, PLACEHOLDER, sql_template) + if sql_template not in templates: + # prune the templates if the total size is greater than the given threshold + if len(templates) >= args.max_template_num: + is_record = truncate_template(templates, update_time, avg_update) + if not is_record: + continue + templates[sql_template] = {} + templates[sql_template]['cnt'] = 0 + templates[sql_template]['samples'] = [] + templates[sql_template]['update'] = [] + templates[sql_template]['cnt'] += 1 + # clear the update threshold outside + for ind, item in enumerate(templates[sql_template]['update']): + if (update_time - item) / 60 / 60 / 24 < args.max_reserved_period: + templates[sql_template]['update'] = templates[sql_template]['update'][ind:] + break + # update the last update time of the sql template + if update_time not in templates[sql_template]['update']: + templates[sql_template]['update'].append(update_time) + # reservoir sampling + if len(templates[sql_template]['samples']) < SAMPLE_NUM: + if sql not in templates[sql_template]['samples']: + templates[sql_template]['samples'].append(sql) + else: + if random.randint(0, templates[sql_template]['cnt']) < SAMPLE_NUM: + templates[sql_template]['samples'][random.randint(0, SAMPLE_NUM - 1)] = sql + + +def output_valid_sql(sql): + is_quotation_valid = sql.count("'") % 2 + if re.search(r'=([\s]+)?\$', sql): + return '' + if 'from pg_' in sql.lower() or 'gs_index_advise' in sql.lower() or is_quotation_valid: + return '' + if any(tp in sql.lower() for tp in SQL_TYPE[1:]) or \ + (SQL_TYPE[0] in sql.lower() and 'from ' in sql.lower()): + sql = re.sub(r'for\s+update[\s;]*$', '', sql, flags=re.I) + return sql.strip('; ') + ';' + return '' + + +class SqlRecord: + def __init__(self): + self.sqllist = [] + self.in_transaction = False + + +def read_record_rest(file): + # get the rest string for a record, and start line of the next record + line = file.readline() + rest_content = '' + while re.match(r'^\t', line): + rest_content += (line.strip('\n') + ' ') + line = file.readline() + return rest_content, line + + +def get_parsed_sql(file, filter_config, log_info_position): + global SQL_AMOUNT + user = filter_config['user'] + database = filter_config['database'] + sql_amount = filter_config['sql_amount'] + statement = filter_config['statement'] + user_position = log_info_position.get('u') + database_position = log_info_position.get('d') + threadid_position = log_info_position.get('p') + line = file.readline() + sql_record = SqlRecord() + search_p = r'execute .*: (.*)' + if statement: + search_p = r'statement: (.*)|' + search_p + while line: + if sql_amount and SQL_AMOUNT == sql_amount: + break + try: + if (statement and re.search('statement: ', line.lower(), re.IGNORECASE)) \ + or re.search(r'execute .*:', line, re.IGNORECASE): + rest_content, nextline = read_record_rest(file) + recordstring = line.strip() + ' ' + rest_content.strip() + line = nextline + log_info = recordstring.split(' ') + if (user and user != log_info[user_position]) \ + or (database and database != log_info[database_position]): + continue + search_results = re.search(search_p, recordstring, re.IGNORECASE).groups() + sql = search_results[0] if search_results[0] else search_results[1] + if re.match(r'(start transaction)|(begin)|(begin transaction)', sql.lower() \ + .split(';')[0].strip()) \ + and threadid_position: + sql_record.in_transaction = True + if sql_record.sqllist: + yield ''.join(sql_record.sqllist) + SQL_AMOUNT += 1 + sql_record.sqllist = [] + sql = '' if len(sql.lower().strip(';').split(';', 1)) == 1 else \ + sql.lower().strip(';').split(';', 1)[1] + if sql.lower().strip().strip(';').strip().endswith(('commit', 'rollback')) \ + and threadid_position: + output_sql = output_valid_sql(sql.lower().strip().strip(';') \ + .replace('commit', '').replace('rollback', '')) + if output_sql: + sql_record.sqllist.append(output_sql) + sql_record.in_transaction = False + if sql_record.sqllist: + yield ''.join(sql_record.sqllist) + SQL_AMOUNT += 1 + sql_record.sqllist = [] + else: + output_sql = output_valid_sql(sql) + if output_sql: + if sql_record.in_transaction == False: + yield output_sql + SQL_AMOUNT += 1 + else: + sql_record.sqllist.append(output_sql) + continue + elif re.search(r'parameters: ', line, re.IGNORECASE): + param_list = re.search(r'parameters: (.*)', line.strip(), + re.IGNORECASE).group(1).split(', $') + param_list = list(param.split('=', 1) for param in param_list) + param_list.sort(key=lambda x: int(x[0].strip(' $')), + reverse=True) + for item in param_list: + sql = sql.replace(item[0].strip() if re.match(r'\$', item[0]) else + ('$' + item[0].strip()), item[1].strip()) + output_sql = output_valid_sql(sql) + if output_sql: + if not sql_record.in_transaction: + yield output_sql + SQL_AMOUNT += 1 + else: + sql_record.sqllist.append(output_sql) + line = file.readline() + else: + line = file.readline() + except: + line = file.readline() + + +def get_start_position(start_time, file_path): + while start_time: + cmd = 'head -n $(cat %s | grep -m 1 -n "^%s" | awk -F : \'{print $1}\') %s | wc -c' % \ + (file_path, start_time, file_path) + proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) + std, err_msg = proc.communicate() + if proc.returncode == 0 and not err_msg: + return int(std) + elif len(start_time) > 13: + start_time = start_time[0: -3] + else: + break + return -1 + + +def make_not_existsfile(basename): + index = 0 + tempname = basename + while os.path.exists(tempname): + tempname = basename + '.' + str(index) + index += 1 + return tempname + + +def get_tempfile_name(threadid): + return make_not_existsfile('/tmp/' + 'threadid_' + threadid + '.log') + + +class threadid_info: + + def __init__(self, filename): + self.filename = filename + self.fileh = open(self.filename, 'w') + + def close(self): + self.fileh.close() + + def open(self): + self.fileh = open(self.filename, 'a+') + + def write(self, content): + self.fileh.write(content) + + +# split the log to different files groupby the threadid with file handles below FILEHANDLES +def group_log_by_threadid(f, threadid_position): + threadid = '000000' + threadid_log = dict() + closed_files = deque() + opened_files = deque() + threadid_log_files = [] + + try: + for line in f: + if not line.startswith('\t') and threadid_position: + try: + threadid = line.strip().split()[threadid_position] + except IndexError: + raise ValueError(f'wrong format for log line:{line.strip()}') + if not threadid.isdigit(): + raise ValueError(f'invalid int value {threadid} for %p') + if not threadid in threadid_log: + threadid_log_file = get_tempfile_name(threadid) + threadid_log_files.append(threadid_log_file) + threadid_log[threadid] = threadid_info(threadid_log_file) + opened_files.append(threadid) + elif threadid in closed_files: + closed_files.remove(threadid) + threadid_log[threadid].open() + opened_files.append(threadid) + threadid_log[threadid].write(line) + if len(opened_files) > FILEHANDLES: + threadid = opened_files.popleft() + threadid_log[threadid].close() + closed_files.append(threadid) + for threadid in opened_files: + threadid_log[threadid].close() + except Exception as ex: + for threadid in opened_files: + threadid_log[threadid].close() + for threadid_log_file in threadid_log_files: + os.remove(threadid_log_file) + raise ex + + return threadid_log_files + + +def merge_log(threadid_log_files, start_time): + merged_log_file = '/tmp/threadid_groupby_id' + start_time + '.log' + merged_log_file = make_not_existsfile(merged_log_file) + with open(merged_log_file, 'w') as fileh: + for threadid_log_file in threadid_log_files: + for line in open(threadid_log_file): + fileh.write(line) + os.remove(threadid_log_file) + return merged_log_file + + +def split_transaction(transactions): + for transaction in transactions: + for sql in transaction.strip().strip(';').split(';'): + yield sql + + +def generate_info_position(log_line_prefix): + log_info_position = {} + index = 0 + for _format in log_line_prefix.replace(' ', '').replace('%', ''): + log_info_position[_format] = index + if _format == 'm': + index += 1 + index += 1 + return log_info_position + + +def record_sql(valid_files, args, log_info_position, output_obj): + for ind, file in enumerate(valid_files): + if args.sql_amount and SQL_AMOUNT >= args.sql_amount: + break + file_path = os.path.join(args.l, file) + if os.path.isfile(file_path) and re.search(r'.log$', file): + start_position = 0 + if ind == 0 and args.start_time: + start_position = get_start_position(args.start_time, file_path) + if start_position == -1: + continue + with open(file_path) as f: + f.seek(start_position, 0) + threadid_log_files = group_log_by_threadid(f, log_info_position.get('p')) + try: + merged_log_file = merge_log(threadid_log_files, args.start_time if args.start_time else '') + except Exception as ex: + raise ex + finally: + for threadid_log_file in threadid_log_files: + if os.path.isfile(threadid_log_file): + os.remove(threadid_log_file) + try: + with open(merged_log_file, mode='r') as f: + if isinstance(output_obj, dict): + get_workload_template(output_obj, split_transaction( + get_parsed_sql(f, args.U, args.d, + args.sql_amount, + args.statement), + ), args) + else: + filter_config = {'user': args.U, 'database': args.d, + 'sql_amount': args.sql_amount, 'statement': args.statement} + for sql in get_parsed_sql(f, filter_config, log_info_position): + output_obj.write(sql + '\n') + except Exception as ex: + raise ex + finally: + os.remove(merged_log_file) + + +def extract_sql_from_log(args): + files = [file for file in os.listdir(args.l) if file.endswith('.log')] + log_info_position = generate_info_position(args.p) + files = sorted(files, key=lambda x: os.path.getctime(os.path.join(args.l, x)), reverse=True) + valid_files = files + if args.start_time: + time_stamp = int(time.mktime(time.strptime(args.start_time, '%Y-%m-%d %H:%M:%S'))) + valid_files = [] + for file in files: + if os.path.getmtime(os.path.join(args.l, file)) < time_stamp: + break + valid_files.insert(0, file) + if args.json: + try: + with open(args.f, 'r') as output_file: + templates = json.load(output_file) + except (json.JSONDecodeError, FileNotFoundError) as e: + templates = {} + record_sql(valid_files, args, log_info_position, templates) + with open(args.f, 'w') as output_file: + json.dump(templates, output_file) + else: + with open(args.f, 'w') as output_file: + record_sql(valid_files, args, log_info_position, output_file) + + +def main(argv): + arg_parser = argparse.ArgumentParser() + arg_parser.add_argument("l", help="The path of the log file that needs to be parsed.") + arg_parser.add_argument("f", help="The output path of the extracted file.") + arg_parser.add_argument("p", + type=str, + help="Log line prefix") + arg_parser.add_argument("-d", help="Name of database") + arg_parser.add_argument("-U", help="Username for database log-in") + arg_parser.add_argument("--start_time", help="Start time of extracted log") + arg_parser.add_argument("--sql_amount", help="The number of sql collected", type=int) + arg_parser.add_argument("--statement", action='store_true', help="Extract statement log type", + default=False) + arg_parser.add_argument("--max_reserved_period", type=int, help='Specify days to reserve template') + arg_parser.add_argument("--max_template_num", type=int, help='Set the max number of template and ' + 'the number below 5000 is advised ' + 'for time cost') + arg_parser.add_argument("--json", action='store_true', + help="Whether the workload file format is json", default=False) + + args = arg_parser.parse_args(argv) + if args.U: + if not 'u' in args.p: + raise argparse.ArgumentTypeError(f"input parameter p '{args.p}' does not contain" + " '%u' and U is not allowed.") + if args.d: + if not 'd' in args.p: + raise argparse.ArgumentTypeError(f"input parameter p '{args.p}' does not contain" + " '%d' and d is not allowed.") + if args.start_time: + # compatible with '2022-1-4 1:2:3' + args.start_time = time.strftime('%Y-%m-%d %H:%M:%S', + time.strptime(args.start_time, + '%Y-%m-%d %H:%M:%S') + ) + if not 'm' in args.p: + raise argparse.ArgumentTypeError(f"input parameter p '{args.p}' does not contain" + " '%m' and start_time is not allowed.") + if args.sql_amount is not None and args.sql_amount <= 0: + raise argparse.ArgumentTypeError("sql_amount %s is an invalid positive int value" % + args.sql_amount) + if args.max_reserved_period and args.max_reserved_period <= 0: + raise argparse.ArgumentTypeError("max_reserved_period %s is an invalid positive int value" % + args.max_reserved_period) + if args.max_template_num and args.max_template_num <= 0: + raise argparse.ArgumentTypeError("max_template_num %s is an invalid positive int value" % + args.max_template_num) + elif args.max_template_num and args.max_template_num > 5000: + print('max_template_num %d above 5000 is not advised for time cost' % args.max_template_num) + if not args.max_reserved_period: + args.max_reserved_period = float('inf') + if not args.max_template_num: + args.max_template_num = float('inf') + extract_sql_from_log(args) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/src/gausskernel/dbmind/tools/components/forecast.py b/src/gausskernel/dbmind/tools/components/forecast.py new file mode 100644 index 000000000..74ae12ffa --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/forecast.py @@ -0,0 +1,143 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import argparse +import os +import sys +import time +import traceback +import csv + +from prettytable import PrettyTable + +from dbmind import constants +from dbmind import global_vars +from dbmind.cmd.config_utils import load_sys_configs +from dbmind.common.utils import keep_inputting_until_correct +from dbmind.common.utils import write_to_terminal +from dbmind.metadatabase.dao import forecasting_metrics +from dbmind.common.utils import check_positive_integer, check_positive_float + + +def show(metric, host, start_time, end_time): + field_names = ( + 'rowid', 'metric_name', + 'host_ip', 'metric_time', + 'metric_value' + ) + output_table = PrettyTable() + output_table.field_names = field_names + + result = forecasting_metrics.select_forecasting_metric( + metric_name=metric, host_ip=host, + min_metric_time=start_time, max_metric_time=end_time + ).all() + for row_ in result: + row = [getattr(row_, field) for field in field_names] + output_table.add_row(row) + + nb_rows = len(result) + if nb_rows > 50: + write_to_terminal('The number of rows is greater than 50. ' + 'It seems too long to see.') + char = keep_inputting_until_correct('Do you want to dump to a file? [Y]es, [N]o.', ('Y', 'N')) + if char == 'Y': + dump_file_name = 'metric_forecast_%s.csv' % int(time.time()) + with open(dump_file_name, 'w+') as fp: + csv_writer = csv.writer(fp) + for row_ in result: + row = [str(getattr(row_, field)).strip() for field in field_names] + csv_writer.writerow(row) + write_to_terminal('Dumped file is %s.' % os.path.abspath(dump_file_name)) + elif char == 'N': + print(output_table) + print('(%d rows)' % nb_rows) + else: + print(output_table) + print('(%d rows)' % nb_rows) + + +def clean(retention_days): + if float(retention_days) == 0: + forecasting_metrics.truncate_forecasting_metrics() + else: + start_time = int((time.time() - float(retention_days) * 24 * 60 * 60) * 1000) + forecasting_metrics.delete_timeout_forecasting_metrics(start_time) + write_to_terminal('Success to delete redundant results.') + + +def main(argv): + parser = argparse.ArgumentParser(description='Workload Forecasting: Forecast monitoring metrics') + parser.add_argument('action', choices=('show', 'clean'), help='choose a functionality to perform') + parser.add_argument('-c', '--conf', metavar='DIRECTORY', required=True, + help='set the directory of configuration files') + + parser.add_argument('--metric-name', metavar='METRIC_NAME', + help='set a metric name you want to retrieve') + parser.add_argument('--host', metavar='HOST', + help='set a host you want to retrieve') + parser.add_argument('--start-time', + type=check_positive_float, + metavar='TIMESTAMP_IN_MICROSECONDS', + help='set a start time of for retrieving') + parser.add_argument('--end-time', + type=check_positive_float, + metavar='TIMESTAMP_IN_MICROSECONDS', + help='set a end time of for retrieving') + + parser.add_argument('--retention-days', + type=check_positive_float, + metavar='DAYS', + default=0, + help='clear historical diagnosis results and set ' + 'the maximum number of days to retain data') + + args = parser.parse_args(argv) + + if not os.path.exists(args.conf): + parser.exit(1, 'Not found the directory %s.' % args.conf) + + if args.action == 'show': + if None in (args.metric_name, args.host, args.start_time, args.end_time): + write_to_terminal('There may be a lot of results because you did not use all filter conditions.', + color='red') + inputted_char = keep_inputting_until_correct('Press [A] to agree, press [Q] to quit:', ('A', 'Q')) + if inputted_char == 'Q': + parser.exit(0, "Quitting due to user's instruction.") + elif args.action == 'clean': + if args.retention_days is None: + write_to_terminal('You did not specify retention days, so we will delete all historical results.', + color='red') + inputted_char = keep_inputting_until_correct('Press [A] to agree, press [Q] to quit:', ('A', 'Q')) + if inputted_char == 'Q': + parser.exit(0, "Quitting due to user's instruction.") + + # Set the global_vars so that DAO can login the meta-database. + os.chdir(args.conf) + global_vars.configs = load_sys_configs(constants.CONFILE_NAME) + + try: + if args.action == 'show': + show(args.metric_name, args.host, args.start_time, args.end_time) + elif args.action == 'clean': + clean(args.retention_days) + except Exception as e: + write_to_terminal('An error occurred probably due to database operations, ' + 'please check database configurations. For details:\n' + + str(e), color='red', level='error') + traceback.print_tb(e.__traceback__) + return 2 + return args + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/src/gausskernel/dbmind/tools/index_advisor/README.md b/src/gausskernel/dbmind/tools/components/index_advisor/README.md similarity index 80% rename from src/gausskernel/dbmind/tools/index_advisor/README.md rename to src/gausskernel/dbmind/tools/components/index_advisor/README.md index 854b2a72d..e12630e2c 100644 --- a/src/gausskernel/dbmind/tools/index_advisor/README.md +++ b/src/gausskernel/dbmind/tools/components/index_advisor/README.md @@ -20,16 +20,6 @@ benefit of it for the workload. python extract_log.py [l LOG_DIRECTORY] [f OUTPUT_FILE] [-d DATABASE] [-U USERNAME] [--start_time] [--sql_amount] [--statement] [--json] -## index_server - -**Index_server** is a lightweight tool for deploying regular index recommendations, - -and setting related parameters by configuring **database.conf** file. - -## Usage - - echo passwd | python index_server.py & - ## Dependencies python3.x diff --git a/src/gausskernel/dbmind/tools/components/index_advisor/__init__.py b/src/gausskernel/dbmind/tools/components/index_advisor/__init__.py new file mode 100644 index 000000000..aeed1f09e --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/index_advisor/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +from .index_advisor_workload import main diff --git a/src/gausskernel/dbmind/tools/index_advisor/DAO/driver_execute.py b/src/gausskernel/dbmind/tools/components/index_advisor/dao/driver_execute.py similarity index 66% rename from src/gausskernel/dbmind/tools/index_advisor/DAO/driver_execute.py rename to src/gausskernel/dbmind/tools/components/index_advisor/dao/driver_execute.py index 784a92723..62fe9bf9b 100644 --- a/src/gausskernel/dbmind/tools/index_advisor/DAO/driver_execute.py +++ b/src/gausskernel/dbmind/tools/components/index_advisor/dao/driver_execute.py @@ -1,5 +1,20 @@ -import psycopg2 +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + import re + +import psycopg2 + from .execute_factory import ExecuteFactory from .execute_factory import IndexInfo @@ -31,50 +46,48 @@ class DriverExecute(ExecuteFactory): self.cur.close() self.conn.close() + def is_multi_node(self): + self.init_conn_handle() + try: + self.cur.execute("select count(*) from pgxc_node where node_type='C';") + self.conn.commit() + return self.cur.fetchall()[0][0] > 0 + finally: + self.close_conn() + @staticmethod - def parse_single_advisor_result(res, workload_table_name): - table_index_dict = {} - items = res.strip('()').split(',', 2) - if len(items) == 3: + def parse_single_advisor_result(res, table_index_dict): + multi_cols_p = re.compile('("[\w,]+")') + multi_cols_res = multi_cols_p.search(res) + items = res.strip('()').split(',', 3) + if multi_cols_res: + items[2] = multi_cols_res.group().strip('",') + items[3] = items[3].split(',')[-1] + if len(items) == 4: table = items[0] + '.' + items[1] - workload_table_name[items[0]] = workload_table_name.get(items[0], set()) - workload_table_name[items[0]].add(items[1]) - indexes = re.split('[()]', items[2].strip('\"')) - for columns in indexes: - if columns == '': - continue - if table not in table_index_dict.keys(): - table_index_dict[table] = [] - table_index_dict[table].append(columns) + columns = items[2].strip('\"') + if columns == '': + return table_index_dict + if table not in table_index_dict.keys(): + table_index_dict[table] = [] + table_index_dict[table].append((columns, items[-1])) return table_index_dict - @staticmethod - def make_single_advisor_sql(ori_sql): - sql = 'select gs_index_advise(\'' - for elem in ori_sql: - if elem == '\'': - sql += '\'' - sql += elem - sql += '\');' - - return sql - # call the single-index-advisor in the database - def query_index_advisor(self, query, workload_table_name): + def query_index_advisor(self, query): table_index_dict = {} if 'select' not in query.lower(): return table_index_dict if self.schema: sql = 'SET current_schema = %s;' % self.schema - sql += DriverExecute.make_single_advisor_sql(query) + sql += ExecuteFactory.make_single_advisor_sql(query) result = self.execute(sql=sql) if not result: return table_index_dict for res in result: - table_index_dict.update(DriverExecute.parse_single_advisor_result(res[0], - workload_table_name)) + DriverExecute.parse_single_advisor_result(res[0], table_index_dict) return table_index_dict @@ -87,14 +100,16 @@ class DriverExecute(ExecuteFactory): # create hypo-indexes if self.schema: sqls = 'SET current_schema = %s;' % self.schema - sqls += 'SET enable_hypo_index = on;' + sqls += 'SET enable_hypo_index = on;SELECT hypopg_reset_index();' if multi_node: sqls += 'SET enable_fast_query_shipping = off;SET enable_stream_operator = on;' for table in query_index_dict.keys(): - for columns in query_index_dict[table]: - if columns != '': - sqls += "SELECT hypopg_create_index('CREATE INDEX ON %s(%s)');" % \ - (table, columns) + for columns_tulpe in query_index_dict[table]: + if columns_tulpe != '': + content = "SELECT hypopg_create_index('CREATE INDEX ON %s(%s) %s');" % \ + (table, columns_tulpe[0], columns_tulpe[1]) + content = content.replace('""', '') + sqls += content sqls += 'SELECT * from hypopg_display_index();' result = self.execute(sqls) if not result: @@ -102,7 +117,8 @@ class DriverExecute(ExecuteFactory): hypoid_table_column = {} for item in result: if len(item) == 4: - table_name = re.search(r'btree_(.*%s)' % item[2], item[0]).group(1) + table_name = re.search( + r'btree(_global|_local|)_(.*%s)' % item[2], item[0]).group(2) match_flag, table_name = ExecuteFactory.match_table_name(table_name, query_index_dict) if not match_flag: @@ -118,7 +134,8 @@ class DriverExecute(ExecuteFactory): # parse the result of explain plan for item in result: if 'Index' in item[0] and 'Scan' in item[0] and 'btree' in item[0]: - super().get_valid_indexes(item[0], hypoid_table_column, valid_indexes) + super().get_valid_indexes( + item[0], hypoid_table_column, valid_indexes) self.execute('SELECT hypopg_reset_index()') return valid_indexes @@ -150,7 +167,8 @@ class DriverExecute(ExecuteFactory): index_size_sql = 'select * from hypopg_estimate_size(%s);' % index_id res = self.execute(index_size_sql) if res: - index_config[hypo_index_num].storage = float(res[0][0]) / 1024 / 1024 + index_config[hypo_index_num].storage = float( + res[0][0]) / 1024 / 1024 def estimate_workload_cost_file(self, workload, index_config=None, ori_indexes_name=None): total_cost = 0 @@ -163,25 +181,29 @@ class DriverExecute(ExecuteFactory): # create hypo-indexes self.execute('SET enable_hypo_index = on') for index in index_config: - res = self.execute("SELECT * from hypopg_create_index('CREATE INDEX ON %s(%s)')" % - (index.table, index.columns)) + res = self.execute("SELECT * from hypopg_create_index('CREATE INDEX ON %s(%s) %s')" % + (index.table, index.columns, index.index_type)) if self.max_index_storage and res: - self.update_index_storage(res[0][0], index_config, hypo_index_num) + self.update_index_storage( + res[0][0], index_config, hypo_index_num) hypo_index_num += 1 if self.multi_node: - self.execute('SET enable_fast_query_shipping = off;SET enable_stream_operator = on') + self.execute( + 'SET enable_fast_query_shipping = off;SET enable_stream_operator = on') self.execute("SET explain_perf_mode = 'normal'") for ind, query in enumerate(workload): # record ineffective sql and negative sql for candidate indexes if is_computed: - super().record_ineffective_negative_sql(index_config[0], query, ind) + super().record_ineffective_negative_sql( + index_config[0], query, ind) if 'select ' not in query.statement.lower(): workload[ind].cost_list.append(0) else: res = self.execute('EXPLAIN ' + query.statement) if res: - query_cost = DriverExecute.parse_explain_plan(res, index_config, ori_indexes_name) + query_cost = DriverExecute.parse_explain_plan( + res, index_config, ori_indexes_name) query_cost *= workload[ind].frequency workload[ind].cost_list.append(query_cost) if index_config and len(index_config) == 1 and query_cost < workload[ind].cost_list[0]: @@ -193,19 +215,20 @@ class DriverExecute(ExecuteFactory): self.execute('SELECT hypopg_reset_index()') return total_cost - def check_useless_index(self, tables, history_indexes, history_invalid_indexes): - schemas = [elem.lower() for elem in filter(None, self.schema.split(','))] + def check_useless_index(self, history_indexes, history_invalid_indexes): + schemas = [elem.lower() + for elem in filter(None, self.schema.split(','))] whole_indexes = list() redundant_indexes = list() - matched_table_name = set() - if not tables: - return whole_indexes, redundant_indexes for schema in schemas: - if not tables.get(schema.strip()): + table_sql = "select tablename from pg_tables where schemaname = '%s'" % schema + table_res = self.execute(table_sql) + if not table_res: continue - tables_string = ','.join(["'%s'" % table for table in tables[schema.strip()]]) + tables = [item[0] for item in table_res] + tables_string = ','.join(["'%s'" % table for table in tables]) # query all table index information and primary key information - sql = "SELECT c.relname AS tablename, i.relname AS indexname, " \ + sql = "set current_schema = %s; SELECT c.relname AS tablename, i.relname AS indexname, " \ "pg_get_indexdef(i.oid) AS indexdef, p.contype AS pkey from " \ "pg_index x JOIN pg_class c ON c.oid = x.indrelid JOIN " \ "pg_class i ON i.oid = x.indexrelid LEFT JOIN pg_namespace n " \ @@ -213,41 +236,31 @@ class DriverExecute(ExecuteFactory): "AND p.contype = 'p') WHERE (c.relkind = ANY (ARRAY['r'::\"char\", " \ "'m'::\"char\"])) AND (i.relkind = ANY (ARRAY['i'::\"char\", 'I'::\"char\"])) " \ "AND n.nspname = '%s' AND c.relname in (%s) order by c.relname;" % \ - (schema, tables_string) + (schema, schema, tables_string) res = self.execute(sql) if not res: continue cur_table_indexes = list() for item in res: cur_columns = re.search(r'\(([^\(\)]*)\)', item[2]).group(1) - cur_index_obj = IndexInfo(schema, item[0], item[1], cur_columns, item[2]) + cur_index_obj = IndexInfo( + schema, item[0], item[1], cur_columns, item[2]) if item[3]: cur_index_obj.primary_key = True # record all indexes whole_indexes.append(cur_index_obj) - # match the last recommendation result + # update historical indexes validity tbl_name = schema + '.' + item[0] if history_indexes.get(tbl_name): - matched_table_name.add(tbl_name) - super().match_last_result(tbl_name, cur_columns, history_indexes, - history_invalid_indexes) - # record redundant indexes + super().match_last_result(tbl_name, cur_columns, history_indexes, + history_invalid_indexes) + # after retrieving all indexes of a table, + # start recording redundant indexes of the table if cur_table_indexes and cur_table_indexes[-1].table != item[0]: super().record_redundant_indexes(cur_table_indexes, redundant_indexes) cur_table_indexes = [] cur_table_indexes.append(cur_index_obj) if cur_table_indexes: - # record redundant indexes + # record the redundant indexes of the last table super().record_redundant_indexes(cur_table_indexes, redundant_indexes) - # match history indexes to find invalid indexes - check_table_list = list(set(history_indexes.keys()).difference(matched_table_name)) - for table in check_table_list: - sql = "select indexdef from pg_indexes where schemaname='%s' and tablename='%s';" % \ - (table.split('.')[0], table.split('.')[1]) - res = self.execute(sql) - for item in res: - cur_columns = re.search(r'\(([^\(\)]*)\)', item[0]).group(1) - super().match_last_result(table, cur_columns, history_indexes, - history_invalid_indexes) return whole_indexes, redundant_indexes - diff --git a/src/gausskernel/dbmind/tools/index_advisor/DAO/execute_factory.py b/src/gausskernel/dbmind/tools/components/index_advisor/dao/execute_factory.py similarity index 78% rename from src/gausskernel/dbmind/tools/index_advisor/DAO/execute_factory.py rename to src/gausskernel/dbmind/tools/components/index_advisor/dao/execute_factory.py index 1112264c0..8517972a0 100644 --- a/src/gausskernel/dbmind/tools/index_advisor/DAO/execute_factory.py +++ b/src/gausskernel/dbmind/tools/components/index_advisor/dao/execute_factory.py @@ -1,3 +1,16 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + import re @@ -58,7 +71,14 @@ class ExecuteFactory: tokens = record.split(' ') for token in tokens: if 'btree' in token: - hypo_index_id = re.search(r'\d+', token.split('_', 1)[0]).group() + if 'btree_global_' in token: + index_type = 'global' + elif 'btree_local_' in token: + index_type = 'local' + else: + index_type = '' + hypo_index_id = re.search( + r'\d+', token.split('_', 1)[0]).group() table_columns = hypoid_table_column.get(hypo_index_id) if not table_columns: continue @@ -66,7 +86,7 @@ class ExecuteFactory: if table_name not in valid_indexes.keys(): valid_indexes[table_name] = [] if columns not in valid_indexes[table_name]: - valid_indexes[table_name].append(columns) + valid_indexes[table_name].append((columns, index_type)) @staticmethod def record_ineffective_negative_sql(candidate_index, obj, ind): @@ -108,14 +128,26 @@ class ExecuteFactory: @staticmethod def match_last_result(table_name, index_column, history_indexes, history_invalid_indexes): for column in history_indexes.get(table_name, dict()): - history_index_column = list(map(str.strip, column.split(','))) - existed_index_column = list(map(str.strip, index_column.split(','))) + # if the historical index matches the existed index successfully, + # then set the historical index to invalid + history_index_column = list(map(str.strip, column[0].split(','))) + existed_index_column = list(map(str.strip, index_column[0].split(','))) if len(history_index_column) > len(existed_index_column): continue if history_index_column == existed_index_column[0:len(history_index_column)]: history_indexes[table_name].remove(column) - history_invalid_indexes[table_name] = history_invalid_indexes.get(table_name, list()) + history_invalid_indexes[table_name] = history_invalid_indexes.get( + table_name, list()) history_invalid_indexes[table_name].append(column) if not history_indexes[table_name]: del history_indexes[table_name] + @staticmethod + def make_single_advisor_sql(ori_sql): + sql = 'select gs_index_advise(\'' + for elem in ori_sql: + if elem == '\'': + sql += '\'' + sql += elem + sql += '\');' + return sql diff --git a/src/gausskernel/dbmind/tools/index_advisor/DAO/gsql_execute.py b/src/gausskernel/dbmind/tools/components/index_advisor/dao/gsql_execute.py similarity index 71% rename from src/gausskernel/dbmind/tools/index_advisor/DAO/gsql_execute.py rename to src/gausskernel/dbmind/tools/components/index_advisor/dao/gsql_execute.py index d07dfc7e1..074a721ea 100644 --- a/src/gausskernel/dbmind/tools/index_advisor/DAO/gsql_execute.py +++ b/src/gausskernel/dbmind/tools/components/index_advisor/dao/gsql_execute.py @@ -1,9 +1,23 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + import os import re import shlex import subprocess -import sys import time +import sys + from .execute_factory import ExecuteFactory from .execute_factory import IndexInfo @@ -31,10 +45,11 @@ class GSqlExecute(ExecuteFactory): for target_sql in target_sql_list: cmd += target_sql + ';' cmd += '\"' - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + proc = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) (stdout, stderr) = proc.communicate() stdout, stderr = stdout.decode(), stderr.decode() - if 'gsql' in stderr or 'failed to connect' in stderr: + if 'gsql: FATAL:' in stderr or 'failed to connect' in stderr: raise ConnectionError("An error occurred while connecting to the database.\n" + "Details: " + stderr) return stdout @@ -43,55 +58,50 @@ class GSqlExecute(ExecuteFactory): def run_shell_sql_cmd(sql_file): cmd = BASE_CMD + ' -f ./' + sql_file try: - ret = subprocess.check_output(shlex.split(cmd), stderr=subprocess.STDOUT) + ret = subprocess.check_output( + shlex.split(cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: - print(e.output, file=sys.stderr) + print(e.output.decode(), file=sys.stderr) return ret.decode() + def is_multi_node(self): + cmd = BASE_CMD + " -c " + shlex.quote("select count(*) from pgxc_node where node_type='C';") + try: + ret = subprocess.check_output( + shlex.split(cmd), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + print(e.output.decode(), file=sys.stderr) + return int(ret.decode().strip().split()[2]) > 0 + @staticmethod - def parse_single_advisor_result(res, workload_table_name): - table_index_dict = {} + def parse_single_advisor_result(res, table_index_dict): if len(res) > 2 and res[0:2] == ' (': - items = res.split(',', 2) + items = res.split(',') table = items[0][2:] + '.' + items[1] - workload_table_name[items[0][2:]] = workload_table_name.get(items[0][2:], set()) - workload_table_name[items[0][2:]].add(items[1]) - indexes = re.split('[()]', items[2][:-1].strip('\"')) - for columns in indexes: - if columns == '': - continue - if table not in table_index_dict.keys(): - table_index_dict[table] = [] - table_index_dict[table].append(columns) + columns = ','.join(items[2:-1]).strip('\"') + if columns == '': + return table_index_dict + if table not in table_index_dict.keys(): + table_index_dict[table] = [] + table_index_dict[table].append((columns, items[-1].strip(') '))) return table_index_dict - @staticmethod - def make_single_advisor_sql(ori_sql): - sql = 'select gs_index_advise(\'' - for elem in ori_sql: - if elem == '\'': - sql += '\'' - sql += elem - sql += '\');' - - return sql - # call the single-index-advisor in the database - def query_index_advisor(self, query, workload_table_name): + def query_index_advisor(self, query): table_index_dict = {} if 'select' not in query.lower(): return table_index_dict - sql = self.make_single_advisor_sql(query) + sql = ExecuteFactory.make_single_advisor_sql(query) # escape double quotes in query sql = sql.replace('"', '\\"') if '"' in sql else sql result = self.run_shell_cmd([sql]).split('\n') for res in result: - table_index_dict.update(self.parse_single_advisor_result(res, workload_table_name)) + self.parse_single_advisor_result(res, table_index_dict) return table_index_dict @@ -107,11 +117,12 @@ class GSqlExecute(ExecuteFactory): sql_list.append('SET enable_fast_query_shipping = off;') sql_list.append('SET enable_stream_operator = on;') for table in query_index_dict.keys(): - for columns in query_index_dict[table]: - if columns != '': - sql_list.append("SELECT hypopg_create_index('CREATE INDEX ON %s(%s)')" % - (table, columns)) + for columns_tuple in query_index_dict[table]: + if columns_tuple[0]: + sql_list.append("SELECT hypopg_create_index('CREATE INDEX ON %s(%s) %s')" % + (table, columns_tuple[0], columns_tuple[1])) sql_list.append('SELECT hypopg_display_index()') + # escape double quotes in query query = query.replace('"', '\\"') if '"' in query else query sql_list.append("SET explain_perf_mode = 'normal'; explain " + query) sql_list.append('SELECT hypopg_reset_index()') @@ -124,8 +135,8 @@ class GSqlExecute(ExecuteFactory): if hypo_display and 'btree' in line: hypo_index_info = line.split(',', 3) if len(hypo_index_info) == 4: - table_name = re.search(r'btree_(.*%s)' % hypo_index_info[2], - hypo_index_info[0]).group(1) + table_name = re.search(r'btree(_global|_local|)_(.*%s)' % hypo_index_info[2], + hypo_index_info[0]).group(2) match_flag, table_name = ExecuteFactory.match_table_name(table_name, query_index_dict) if not match_flag: @@ -145,7 +156,8 @@ class GSqlExecute(ExecuteFactory): res = self.run_shell_cmd([index_size_sql]).split('\n') for line in res: if re.match(r'\d+', line.strip()): - index_config[hypo_index_num].storage = float(line.strip()) / 1024 / 1024 + index_config[hypo_index_num].storage = float( + line.strip()) / 1024 / 1024 @staticmethod # parse the explain plan to get estimated cost by database optimizer @@ -223,8 +235,8 @@ class GSqlExecute(ExecuteFactory): # create hypo-indexes file.write('SET enable_hypo_index = on;\n') for index in index_config: - file.write("SELECT hypopg_create_index('CREATE INDEX ON %s(%s)');\n" % - (index.table, index.columns)) + file.write("SELECT hypopg_create_index('CREATE INDEX ON %s(%s) %s');\n" % + (index.table, index.columns, index.index_type)) if self.multi_node: file.write('set enable_fast_query_shipping = off;\n') file.write('set enable_stream_operator = on; \n') @@ -237,7 +249,8 @@ class GSqlExecute(ExecuteFactory): select_sql_pos.append(ind) # record ineffective sql and negative sql for candidate indexes if is_computed: - super().record_ineffective_negative_sql(index_config[0], query, ind) + super().record_ineffective_negative_sql( + index_config[0], query, ind) result = self.run_shell_sql_cmd(sql_file).split('\n') if os.path.exists(sql_file): @@ -251,17 +264,24 @@ class GSqlExecute(ExecuteFactory): return total_cost - def check_useless_index(self, tables, history_indexes, history_invalid_indexes): - schemas = [elem.lower() for elem in filter(None, self.schema.split(','))] + def check_useless_index(self, history_indexes, history_invalid_indexes): + schemas = [elem.lower() + for elem in filter(None, self.schema.split(','))] whole_indexes = list() redundant_indexes = list() - matched_table_name = set() - if not tables: - return whole_indexes, redundant_indexes for schema in schemas: - if not tables.get(schema.strip()): + table_sql = "select tablename from pg_tables where schemaname = '%s'" % schema + table_res = self.run_shell_cmd([table_sql]).split('\n') + if not table_res: continue - tables_string = ','.join(["'%s'" % table for table in tables[schema.strip()]]) + tables = [] + for line in table_res: + if 'tablename' in line or re.match(r'-+', line) or re.match(r'\(\d+ rows?\)', line): + continue + tables.append(line.strip()) + if not tables: + continue + tables_string = ','.join(["'%s'" % table for table in tables]) # query all table index information and primary key information sql = "SELECT c.relname AS tablename, i.relname AS indexname, " \ "pg_get_indexdef(i.oid) AS indexdef, p.contype AS pkey from " \ @@ -276,24 +296,40 @@ class GSqlExecute(ExecuteFactory): if not res: continue cur_table_indexes = list() + indexdef_list = [] for line in res: if 'tablename' in line or re.match(r'-+', line): continue elif re.match(r'\(\d+ rows?\)', line): continue elif '|' in line: - table, index, indexdef, pkey = [item.strip() for item in line.split('|')] - cur_columns = re.search(r'\(([^\(\)]*)\)', indexdef).group(1) - cur_index_obj = IndexInfo(schema, table, index, cur_columns, indexdef) + temptable, tempindex, indexdef, temppkey = [ + item.strip() for item in line.split('|')] + if temptable and tempindex: + table, index, pkey = temptable, tempindex, temppkey + if line.strip().endswith(('+| p', '+|')): + if len(indexdef_list) >= 2: + indexdef_list.append(' ' + indexdef.strip(' +')) + else: + indexdef_list.append(indexdef.strip(' +')) + continue + elif indexdef_list and indexdef.startswith(')'): + indexdef_list.append(indexdef.strip().strip('+').strip()) + indexdef = '\n'.join(indexdef_list) + indexdef_list = [] + cur_columns = re.search( + r'\(([^\(\)]*)\)', indexdef).group(1) + cur_index_obj = IndexInfo( + schema, table, index, cur_columns, indexdef) if pkey: cur_index_obj.primary_key = True # record all indexes whole_indexes.append(cur_index_obj) - # match the last recommendation result + # update historical indexes validity tbl_name = schema + '.' + table if history_indexes.get(tbl_name): - matched_table_name.add(tbl_name) - super().match_last_result(tbl_name, cur_columns, history_indexes, history_invalid_indexes) + super().match_last_result(tbl_name, cur_columns, + history_indexes, history_invalid_indexes) # record redundant indexes if cur_table_indexes and cur_table_indexes[-1].table != table: super().record_redundant_indexes(cur_table_indexes, redundant_indexes) @@ -302,18 +338,4 @@ class GSqlExecute(ExecuteFactory): if cur_table_indexes: # record redundant indexes super().record_redundant_indexes(cur_table_indexes, redundant_indexes) - # match history indexes to find invalid indexes - check_table_list = list(set(history_indexes.keys()).difference(matched_table_name)) - sql_list = [] - for table in check_table_list: - sql = "select indexdef from pg_indexes where schemaname='%s' and tablename='%s'" % \ - (table.split('.')[0], table.split('.')[1]) - sql_list.append(sql) - res = self.run_shell_cmd(sql_list).split('\n') - for line in res: - if 'CREATE INDEX' in line: - cur_columns = re.search(r'\(([^\(\)]*)\)', line).group(1) - super().match_last_result(table, cur_columns, history_indexes, - history_invalid_indexes) return whole_indexes, redundant_indexes - diff --git a/src/gausskernel/dbmind/tools/components/index_advisor/index_advisor_workload.py b/src/gausskernel/dbmind/tools/components/index_advisor/index_advisor_workload.py new file mode 100644 index 000000000..b2f8912a7 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/index_advisor/index_advisor_workload.py @@ -0,0 +1,1018 @@ +""" +Copyright (c) 2020 Huawei Technologies Co.,Ltd. + +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +""" +import os +import sys +import argparse +import copy +import getpass +import random +import re +import json +import select +import logging + +try: + from .dao.gsql_execute import GSqlExecute + from .dao.execute_factory import ExecuteFactory +except ImportError: + from dao.gsql_execute import GSqlExecute + from dao.execute_factory import ExecuteFactory + +ENABLE_MULTI_NODE = False +SAMPLE_NUM = 5 +MAX_INDEX_COLUMN_NUM = 4 +MAX_INDEX_NUM = None +MAX_INDEX_STORAGE = None +FULL_ARRANGEMENT_THRESHOLD = 20 +NEGATIVE_RATIO_THRESHOLD = 0.2 +SHARP = '#' +JSON_TYPE = False +DRIVER = None +BLANK = ' ' +SQL_TYPE = ['select', 'delete', 'insert', 'update'] +SQL_PATTERN = [r'\((\s*(\d+(\.\d+)?\s*)[,]?)+\)', # match integer set in the IN collection + r'([^\\])\'((\')|(.*?([^\\])\'))', # match all content in single quotes + r'(([^<>]\s*=\s*)|([^<>]\s+))(\d+)(\.\d+)?'] # match single integer +SQL_DISPLAY_PATTERN = [r'\((\s*(\d+(\.\d+)?\s*)[,]?)+\)', # match integer set in the IN collection + r'\'((\')|(.*?\'))', # match all content in single quotes + r'([^\_\d])\d+(\.\d+)?'] # match single integer +logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') + + +class CheckValid(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + ill_character = [" ", "|", ";", "&", "$", "<", ">", "`", "\\", "'", "\"", + "{", "}", "(", ")", "[", "]", "~", "*", "?", "!", "\n"] + if not values.strip(): + return + if any(ill_char in values for ill_char in ill_character): + raise Exception( + "There are illegal characters in the %s." % self.dest) + setattr(namespace, self.dest, values) + + +def read_input_from_pipe(): + """ + Read stdin input if there is "echo 'str1 str2' | python xx.py", + return the input string + """ + input_str = "" + r_handle, _, _ = select.select([sys.stdin], [], [], 0) + if not r_handle: + return "" + + for item in r_handle: + if item == sys.stdin: + input_str = sys.stdin.read().strip() + return input_str + + +def get_password(): + password = read_input_from_pipe() + if password: + logging.warning("Read password from pipe.") + else: + password = getpass.getpass("Password for database user:") + if not password: + raise ValueError('Please input the password') + return password + + +class QueryItem: + def __init__(self, sql, freq): + self.statement = sql + self.frequency = freq + self.valid_index_list = [] + self.cost_list = [] + + +class IndexItem: + def __init__(self, tbl, cols, index_type=None): + self.table = tbl + self.columns = cols + self.atomic_pos = 0 + self.benefit = 0 + self.storage = 0 + self.positive_pos = [] + self.ineffective_pos = [] + self.negative_pos = [] + self.total_sql_num = 0 + self.insert_sql_num = 0 + self.update_sql_num = 0 + self.delete_sql_num = 0 + self.select_sql_num = 0 + self.index_type = index_type + self.is_candidate = False + + def __str__(self): + return f'{self.table} {self.columns} {self.index_type}' + + +def singleton(cls): + instances = {} + def _singleton(*args, **kwargs): + if not cls in instances: + instances[cls] = cls(*args, **kwargs) + return instances[cls] + return _singleton + + +@singleton +class IndexItemFactory: + def __init__(self): + self.indexes = {} + + def get_index(self, tbl, cols, index_type): + if not (tbl, tuple(cols), index_type) in self.indexes: + self.indexes[(tbl, tuple(cols), index_type)] = IndexItem(tbl, cols, index_type=index_type) + return self.indexes[(tbl, tuple(cols), index_type)] + + +class IndexAdvisor: + def __init__(self, db, workload_info, multi_iter_mode): + self.db = db + self.workload_info = workload_info + self.workload_used_index = set() + self.multi_iter_mode = multi_iter_mode + + self.determine_indexes = [] + self.integrate_indexes = {} + self.index_cost_total = [] + + self.display_detail_info = {} + + def retain_lower_cost_index(self, candidate_indexes): + remove_indexes = [] + for i in range(len(candidate_indexes)-1): + if candidate_indexes[i].table != candidate_indexes[i+1].table: + continue + if candidate_indexes[i].columns == candidate_indexes[i+1].columns: + if self.index_cost_total[candidate_indexes[i].atomic_pos] <= \ + self.index_cost_total[candidate_indexes[i+1].atomic_pos]: + remove_indexes.append(i+1) + else: + remove_indexes.append(i) + for index in remove_indexes[::-1]: + candidate_indexes.pop(index) + + def complex_index_advisor(self): + self.display_detail_info['workloadCount'] = self.workload_info[1] + print_header_boundary(" Generate candidate indexes ") + candidate_indexes = generate_candidate_indexes( + self.workload_info[0], self.db) + if DRIVER: + self.db.init_conn_handle() + if len(candidate_indexes) == 0: + print("No candidate indexes generated!") + self.db.estimate_workload_cost_file(self.workload_info[0], + ori_indexes_name=self.workload_used_index) + if DRIVER: + self.db.close_conn() + return None + + print_header_boundary(" Determine optimal indexes ") + atomic_config_total = generate_atomic_config(self.workload_info[0]) + if atomic_config_total and len(atomic_config_total[0]) != 0: + raise ValueError("The empty atomic config isn't generated!") + for atomic_config in atomic_config_total: + self.index_cost_total.append( + self.db.estimate_workload_cost_file(self.workload_info[0], atomic_config, + self.workload_used_index)) + if DRIVER: + self.db.close_conn() + + opt_config = greedy_determine_opt_config(self.workload_info[0], atomic_config_total, + candidate_indexes, self.index_cost_total[0]) + self.retain_lower_cost_index(candidate_indexes) + if len(opt_config) == 0: + print("No optimal indexes generated!") + return None + return opt_config + + def retain_high_benefit_index(self, candidate_indexes): + remove_indexes = [] + for i in range(len(candidate_indexes)-1): + candidate_indexes[i].cost_pos = i + 1 + if candidate_indexes[i].table != candidate_indexes[i+1].table: + continue + if candidate_indexes[i].columns == candidate_indexes[i+1].columns: + if candidate_indexes[i].benefit >= candidate_indexes[i+1].benefit: + remove_indexes.append(i+1) + else: + remove_indexes.append(i) + candidate_indexes[len(candidate_indexes)-1].cost_pos = len(candidate_indexes) + for index in remove_indexes[::-1]: + candidate_indexes.pop(index) + + def simple_index_advisor(self): + self.display_detail_info['workloadCount'] = self.workload_info[1] + print_header_boundary(" Generate candidate indexes ") + candidate_indexes = generate_candidate_indexes( + self.workload_info[0], self.db) + if DRIVER: + self.db.init_conn_handle() + if len(candidate_indexes) == 0: + print("No candidate indexes generated!") + self.db.estimate_workload_cost_file(self.workload_info[0], + ori_indexes_name=self.workload_used_index) + if DRIVER: + self.db.close_conn() + return None + + print_header_boundary(" Determine optimal indexes ") + ori_total_cost = \ + self.db.estimate_workload_cost_file(self.workload_info[0], + ori_indexes_name=self.workload_used_index) + self.index_cost_total.append(ori_total_cost) + for obj in candidate_indexes: + new_total_cost = self.db.estimate_workload_cost_file( + self.workload_info[0], [obj]) + obj.benefit = ori_total_cost - new_total_cost + self.index_cost_total.append(new_total_cost) + self.retain_high_benefit_index(candidate_indexes) + if DRIVER: + self.db.close_conn() + if len(self.index_cost_total) == 1: + print("No optimal indexes generated!") + return None + global MAX_INDEX_NUM + MAX_INDEX_NUM = MAX_INDEX_NUM or 10 + return candidate_indexes + + def filter_low_benefit_index(self, opt_indexes): + for key, index in enumerate(opt_indexes): + sql_optimzed = 0 + if self.multi_iter_mode: + cost_list_pos = index.atomic_pos + else: + cost_list_pos = index.cost_pos + # calculate the average benefit of each positive SQL + for pos in index.positive_pos: + sql_optimzed += 1 - self.workload_info[0][pos].cost_list[cost_list_pos] / \ + self.workload_info[0][pos].cost_list[0] + negative_sql_ratio = 0 + if index.total_sql_num: + negative_sql_ratio = (index.insert_sql_num + index.delete_sql_num + + index.update_sql_num) / index.total_sql_num + # filter the candidate indexes that do not meet the conditions of optimization + if not index.positive_pos: + continue + if sql_optimzed / len(index.positive_pos) < 0.1: + continue + if sql_optimzed / len(index.positive_pos) < \ + NEGATIVE_RATIO_THRESHOLD < negative_sql_ratio: + continue + self.determine_indexes.append(index) + + def record_info(self, index, sql_info, cost_list_pos, table_name, statement): + workload_optimized = (1 - self.index_cost_total[cost_list_pos] / + self.index_cost_total[0]) * 100 + sql_info['workloadOptimized'] = '%.2f' % \ + (workload_optimized if workload_optimized > 1 else 1) + sql_info['schemaName'] = index.table.split('.')[0] + sql_info['tbName'] = table_name + sql_info['columns'] = index.columns + sql_info['index_type'] = index.index_type + sql_info['statement'] = statement + sql_info['dmlCount'] = round(index.total_sql_num) + sql_info['selectRatio'] = 1 + sql_info['insertRatio'] = sql_info['deleteRatio'] = sql_info['updateRatio'] = 0 + if index.total_sql_num: + sql_info['selectRatio'] = round( + index.select_sql_num * 100 / index.total_sql_num, 2) + sql_info['insertRatio'] = round( + index.insert_sql_num * 100 / index.total_sql_num, 2) + sql_info['deleteRatio'] = round( + index.delete_sql_num * 100 / index.total_sql_num, 2) + sql_info['updateRatio'] = round(100 - sql_info['selectRatio'] - sql_info['insertRatio'] + - sql_info['deleteRatio'], 2) + self.display_detail_info['recommendIndexes'].append(sql_info) + + def computer_index_optimization_info(self, index, table_name, statement, opt_indexes): + if self.multi_iter_mode: + cost_list_pos = index.atomic_pos + else: + cost_list_pos = index.cost_pos + sql_info = {'sqlDetails': []} + benefit_types = [index.ineffective_pos, + index.positive_pos, index.negative_pos] + for category, sql_pos in enumerate(benefit_types): + sql_count = 0 + for item in sql_pos: + sql_count += self.workload_info[0][item].frequency + for pos in sql_pos: + sql_detail = {} + sql_template = self.workload_info[0][pos].statement + for pattern in SQL_DISPLAY_PATTERN: + sql_template = re.sub(pattern, '?', sql_template) + + sql_detail['sqlTemplate'] = sql_template + sql_detail['sql'] = self.workload_info[0][pos].statement + sql_detail['sqlCount'] = int(round(sql_count)) + if category == 1: + sql_optimzed = (self.workload_info[0][pos].cost_list[0] - + self.workload_info[0][pos].cost_list[cost_list_pos]) / \ + self.workload_info[0][pos].cost_list[cost_list_pos] + sql_detail['optimized'] = '%.3f' % sql_optimzed + sql_detail['correlationType'] = category + sql_info['sqlDetails'].append(sql_detail) + self.record_info(index, sql_info, cost_list_pos, table_name, statement) + + def display_advise_indexes_info(self, opt_indexes, show_detail): + index_current_storage = 0 + cnt = 0 + self.display_detail_info['recommendIndexes'] = [] + for key, index in enumerate(self.determine_indexes): + # constraints for Top-N algorithm + if MAX_INDEX_STORAGE and (index_current_storage + index.storage) > MAX_INDEX_STORAGE: + continue + if MAX_INDEX_NUM and cnt == MAX_INDEX_NUM: + break + if not self.multi_iter_mode and index.benefit <= 0: + continue + index_current_storage += index.storage + cnt += 1 + # display determine indexes + table_name = index.table.split('.')[-1] + index_name = 'idx_%s_%s%s' % (table_name, (index.index_type + + '_' if index.index_type else '') \ + ,'_'.join(index.columns.split(', '))) + statement = 'CREATE INDEX %s ON %s%s%s;' % (index_name, index.table, + '(' + index.columns + ')', + (' '+index.index_type if index.index_type else '')) + print(statement) + if show_detail: + # record detailed SQL optimization information for each index + self.computer_index_optimization_info( + index, table_name, statement, opt_indexes) + + def generate_incremental_index(self, history_advise_indexes): + self.integrate_indexes = copy.copy(history_advise_indexes) + self.integrate_indexes['currentIndexes'] = {} + for key, index in enumerate(self.determine_indexes): + self.integrate_indexes['currentIndexes'][index.table] = \ + self.integrate_indexes['currentIndexes'].get(index.table, []) + self.integrate_indexes['currentIndexes'][index.table].append( + (index.columns, index.index_type)) + + def generate_redundant_useless_indexes(self, history_invalid_indexes): + whole_indexes, redundant_indexes = get_whole_index(self.db, + self.integrate_indexes['historyIndexes'], + history_invalid_indexes, + self.display_detail_info) + display_useless_redundant_indexes(whole_indexes, redundant_indexes, + self.workload_used_index, self.display_detail_info) + + def display_incremental_index(self, history_invalid_indexes, + workload_file_path): + def rm_schema(table_name): + return table_name.split('.')[-1] + # display historical effective indexes + if self.integrate_indexes['historyIndexes']: + print_header_boundary(" Historical effective indexes ") + for table_name, index_list in self.integrate_indexes['historyIndexes'].items(): + for column in index_list: + index_name = 'idx_%s_%s%s' % (rm_schema(table_name), + (column[1] + '_' if column[1] else ''), + '_'.join(column[0].split(', '))) + statement = 'CREATE INDEX %s ON %s%s%s;' % (index_name, table_name, + '(' + column[0] + ')', (' ' + column[1] if column[1] else '')) + print(statement) + # display historical invalid indexes + if history_invalid_indexes: + print_header_boundary(" Historical invalid indexes ") + for table_name, index_list in history_invalid_indexes.items(): + for column in index_list: + index_name = 'idx_%s_%s%s' % (rm_schema(table_name), + (column[1] + '_' if column[1] else ''), + '_'.join(column[0].split(', '))) + statement = 'CREATE INDEX %s ON %s%s%s;' % (index_name, table_name, + '(' + column[0] + ')', (' ' + column[1] if column[1] else '')) + print(statement) + # save integrate indexes result + integrate_indexes_file = os.path.join(os.path.dirname(workload_file_path), + 'index_result.json') + for table, indexes in self.integrate_indexes['currentIndexes'].items(): + self.integrate_indexes['historyIndexes'][table] = \ + self.integrate_indexes['historyIndexes'].get(table, []) + self.integrate_indexes['historyIndexes'][table].extend(indexes) + self.integrate_indexes['historyIndexes'][table] = \ + list( + set(map(tuple, (self.integrate_indexes['historyIndexes'][table])))) + with open(integrate_indexes_file, 'w') as file: + json.dump(self.integrate_indexes['historyIndexes'], file) + + +def green(text): + return '\033[32m%s\033[0m' % text + + +def print_header_boundary(header): + # Output a header first, which looks more beautiful. + try: + term_width = os.get_terminal_size().columns + # The width of each of the two sides of the terminal. + side_width = (term_width - len(header)) // 2 + except (AttributeError, OSError): + side_width = 0 + title = SHARP * side_width + header + SHARP * side_width + print(green(title)) + + +def load_workload(file_path): + wd_dict = {} + workload = [] + global BLANK + with open(file_path, 'r') as file: + raw_text = ''.join(file.readlines()) + sqls = raw_text.split(';') + for sql in sqls: + if any(re.search(r'((\A|[\s\(,])%s[\s*\(])' % tp, sql.lower()) for tp in SQL_TYPE): + TWO_BLANKS = BLANK * 2 + while TWO_BLANKS in sql: + sql = sql.replace(TWO_BLANKS, BLANK) + if sql.strip() not in wd_dict.keys(): + wd_dict[sql.strip()] = 1 + else: + wd_dict[sql.strip()] += 1 + for sql, freq in wd_dict.items(): + workload.append(QueryItem(sql, freq)) + + return workload + + +def get_workload_template(workload): + templates = {} + placeholder = r'@@@' + + for item in workload: + sql_template = item.statement + for pattern in SQL_PATTERN: + sql_template = re.sub(pattern, placeholder, sql_template) + if sql_template not in templates: + templates[sql_template] = {} + templates[sql_template]['cnt'] = 0 + templates[sql_template]['samples'] = [] + templates[sql_template]['cnt'] += item.frequency + # reservoir sampling + if len(templates[sql_template]['samples']) < SAMPLE_NUM: + templates[sql_template]['samples'].append(item.statement) + else: + if random.randint(0, templates[sql_template]['cnt']) < SAMPLE_NUM: + templates[sql_template]['samples'][random.randint(0, SAMPLE_NUM - 1)] = \ + item.statement + + return templates + + +def workload_compression(input_path): + compressed_workload = [] + total_num = 0 + if JSON_TYPE: + with open(input_path, 'r') as file: + templates = json.load(file) + else: + workload = load_workload(input_path) + templates = get_workload_template(workload) + + for _, elem in templates.items(): + for sql in elem['samples']: + compressed_workload.append( + QueryItem(sql.strip(), elem['cnt'] / len(elem['samples']))) + total_num += elem['cnt'] + return compressed_workload, total_num + + +# enumerate the column combinations for a suggested index +def get_indexable_columns(table_index_dict): + query_indexable_columns = {} + if len(table_index_dict) == 0: + return query_indexable_columns + + for table in table_index_dict.keys(): + query_indexable_columns[table] = [] + for columns_tuple in table_index_dict[table]: + indexable_columns = columns_tuple[0].split(',') + for column in indexable_columns: + for ind, item in enumerate(query_indexable_columns[table]): + if column != item[0]: + continue + if columns_tuple[1] == item[1]: + query_indexable_columns[table].pop(ind) + break + query_indexable_columns[table].append( + (column, columns_tuple[1])) + + return query_indexable_columns + + +def add_column(valid_index_dict, table, columns_info, single_col_info): + columns, columns_index_type = columns_info + single_column, single_index_type = single_col_info + if columns_index_type.strip('"') != single_index_type.strip('"'): + add_column(valid_index_dict, table, (columns, 'local'), + (single_column, 'local')) + add_column(valid_index_dict, table, (columns, 'global'), + (single_column, 'global')) + else: + current_columns_tuple = ( + columns + ',' + single_column, columns_index_type) + if current_columns_tuple in valid_index_dict[table]: + return + if single_index_type == 'local': + global_columns_tuple = (columns + ',' + single_column, 'global') + if global_columns_tuple in valid_index_dict[table]: + global_pos = valid_index_dict[table].index( + global_columns_tuple) + valid_index_dict[table][global_pos] = current_columns_tuple + current_columns_tuple = global_columns_tuple + valid_index_dict[table].append(current_columns_tuple) + + +def get_valid_index_dict(table_index_dict, query, db): + need_check = False + query_indexable_columns = get_indexable_columns(table_index_dict) + valid_index_dict = db.query_index_check(query.statement, query_indexable_columns, + ENABLE_MULTI_NODE) + + for i in range(MAX_INDEX_COLUMN_NUM): + for table in valid_index_dict.keys(): + for columns, index_type in valid_index_dict[table]: + if columns.count(',') != i: + continue + need_check = True + for single_column, single_index_type in query_indexable_columns[table]: + if single_column not in columns: + add_column(valid_index_dict, table, (columns, index_type), + (single_column, single_index_type)) + if need_check: + valid_index_dict = db.query_index_check(query.statement, valid_index_dict, + ENABLE_MULTI_NODE) + need_check = False + else: + break + return valid_index_dict + + +def print_candidate_indexes(column_sqls, table, candidate_indexes): + if column_sqls[0][1]: + print("table: ", table, "columns: ",column_sqls[0][0], + "type: ", column_sqls[0][1]) + else: + print("table: ", table, "columns: ",column_sqls[0][0]) + if (table, tuple(column_sqls[0][0]), column_sqls[0][1]) not in IndexItemFactory().indexes: + index = IndexItemFactory().get_index(table, column_sqls[0][0], 'local') + index.index_type = 'global' + else: + index = IndexItemFactory().get_index(table, column_sqls[0][0], column_sqls[0][1]) + index.is_candidate = True + candidate_indexes.append(index) + + +def filter_redundant_indexes(index_dict): + candidate_indexes = [] + for table, column_sqls in index_dict.items(): + # sorted using index_type and columns + sorted_column_sqls = sorted( + column_sqls.items(), key=lambda item: (item[0][1], item[0][0])) + merged_column_sqls = [] + # merge sqls + for i in range(len(sorted_column_sqls) - 1): + if re.match(sorted_column_sqls[i][0][0] + ',', sorted_column_sqls[i+1][0][0]) and \ + sorted_column_sqls[i][0][1] == sorted_column_sqls[i+1][0][1]: + sorted_column_sqls[i+1][1].extend(sorted_column_sqls[i][1]) + else: + merged_column_sqls.append(sorted_column_sqls[i]) + else: + merged_column_sqls.append(sorted_column_sqls[-1]) + # sort using columns + merged_column_sqls.sort(key=lambda item: item[0][0]) + for i in range(len(merged_column_sqls)-1): + # same columns + if merged_column_sqls[i][0][0] == \ + merged_column_sqls[i+1][0][0]: + print_candidate_indexes(merged_column_sqls[i], + table, + candidate_indexes) + continue + # left match for the partation table + if re.match(merged_column_sqls[i][0][0] + ',', + merged_column_sqls[i+1][0][0]): + merged_column_sqls[i+1][1].extend( + merged_column_sqls[i][1]) + merged_column_sqls[i+1] = ((merged_column_sqls[i+1][0][0], 'global'), + merged_column_sqls[i+1][1]) + continue + print_candidate_indexes(merged_column_sqls[i], table, candidate_indexes) + else: + print_candidate_indexes(merged_column_sqls[-1], table, candidate_indexes) + return candidate_indexes + + +def filter_duplicate_indexes(valid_index_dict, index_dict, workload, pos): + for table in valid_index_dict.keys(): + if table not in index_dict.keys(): + index_dict[table] = {} + valid_index_dict[table].sort(key=lambda x: -len(x[0])) + for columns, index_type in valid_index_dict[table]: + if len(workload[pos].valid_index_list) >= FULL_ARRANGEMENT_THRESHOLD: + break + if (columns, index_type) in index_dict[table]: + index_dict[table][(columns, index_type)].append(pos) + else: + column_sql = {(columns, index_type): [pos]} + index_dict[table].update(column_sql) + workload[pos].valid_index_list.append( + IndexItemFactory().get_index(table, columns, index_type=index_type)) + + +def generate_candidate_indexes(workload, db): + index_dict = {} + if DRIVER: + db.init_conn_handle() + for k, query in enumerate(workload): + if not re.search(r'(\A|\s)select\s', query.statement.lower()): + continue + table_index_dict = db.query_index_advisor(query.statement) + valid_index_dict = get_valid_index_dict(table_index_dict, query, db) + # filter duplicate indexes + filter_duplicate_indexes(valid_index_dict, index_dict, workload, k) + + # filter redundant indexes + candidate_indexes = filter_redundant_indexes(index_dict) + if DRIVER: + db.close_conn() + return candidate_indexes + + +def get_atomic_config_for_query(indexes, config, ind, atomic_configs): + if ind == len(indexes): + table_count = {} + for index in config: + if index.table not in table_count.keys(): + table_count[index.table] = 1 + else: + table_count[index.table] += 1 + if len(table_count) > 2 or table_count[index.table] > 2: + return + atomic_configs.append(config) + + return + + get_atomic_config_for_query( + indexes, copy.copy(config), ind + 1, atomic_configs) + config.append(indexes[ind]) + get_atomic_config_for_query( + indexes, copy.copy(config), ind + 1, atomic_configs) + + +def is_same_config(config1, config2): + if len(config1) != len(config2): + return False + + for index1 in config1: + is_found = False + for index2 in config2: + if index1.table == index2.table and index1.columns == index2.columns \ + and index1.index_type == index2.index_type: + is_found = True + if not is_found: + return False + + return True + + +def generate_atomic_config(workload): + atomic_config_total = [] + + for query in workload: + if len(query.valid_index_list) == 0: + continue + + atomic_configs = [] + atomic_config = [] + get_atomic_config_for_query( + query.valid_index_list, atomic_config, 0, atomic_configs) + + is_found = False + for new_config in atomic_configs: + for exist_config in atomic_config_total: + if is_same_config(new_config, exist_config): + is_found = True + break + if not is_found: + atomic_config_total.append(new_config) + is_found = False + + return atomic_config_total + + +def atomic_config_is_valid(atomic_config, config): + is_exist = False + is_same = False + if len(atomic_config) == 1: + is_same = (config[-1] is atomic_config[0]) + for atomic_index in atomic_config: + is_exist = False + for index in config: + if index is atomic_index: + index.storage = atomic_index.storage + is_exist = True + break + if not is_exist: + break + return is_exist, is_same + + +# find the subsets of a given config in the atomic configs +def find_subsets_num(config, atomic_config_total): + atomic_subsets_num = [] + cur_index_atomic_pos = -1 + for i, atomic_config in enumerate(atomic_config_total): + if len(atomic_config) > len(config): + continue + is_exist, is_same = atomic_config_is_valid(atomic_config, config) + if is_same: + cur_index_atomic_pos = i + if is_exist: + atomic_subsets_num.append(i) + if cur_index_atomic_pos == -1: + raise ValueError("No atomic configs found for current config!") + return atomic_subsets_num, cur_index_atomic_pos + + +# infer the total cost of workload for a config according to the cost of atomic configs +def infer_workload_cost(workload, config, atomic_config_total): + total_cost = 0 + is_computed = False + atomic_subsets_num, cur_index_atomic_pos = find_subsets_num( + config, atomic_config_total) + if len(atomic_subsets_num) == 0: + raise ValueError("No atomic configs found for current config!") + if not config[-1].total_sql_num: + is_computed = True + for ind, obj in enumerate(workload): + if max(atomic_subsets_num) >= len(obj.cost_list): + raise ValueError("Wrong atomic config for current query!") + # compute the cost for selection + min_cost = obj.cost_list[0] + for num in atomic_subsets_num: + if num < len(obj.cost_list) and obj.cost_list[num] < min_cost: + min_cost = obj.cost_list[num] + total_cost += min_cost + + # record ineffective sql and negative sql for candidate indexes + if is_computed: + ExecuteFactory.record_ineffective_negative_sql( + config[-1], obj, ind) + return total_cost, cur_index_atomic_pos + + +def get_whole_index(db, history_indexes, history_invalid_index, detail_info): + if DRIVER: + db.init_conn_handle() + whole_index, redundant_indexes = \ + db.check_useless_index(history_indexes, history_invalid_index) + if DRIVER: + db.close_conn() + print_header_boundary(" Created indexes ") + detail_info['createdIndexes'] = [] + if not whole_index: + print("No created index!") + else: + for index in whole_index: + index_info = {'schemaName': index.schema, 'tbName': index.table, + 'columns': index.columns, 'statement': index.indexdef + ';'} + detail_info['createdIndexes'].append(index_info) + print("%s: %s;" % (index.schema, index.indexdef)) + return whole_index, redundant_indexes + + +def display_redundant_indexes(redundant_indexes, unused_index_columns, remove_list, detail_info): + for pos, index in enumerate(redundant_indexes): + is_redundant = False + for redundant_obj in index.redundant_obj: + # redundant objects are not in the useless index set or + # both redundant objects and redundant index in useless index must be redundant index + index_exist = redundant_obj.indexname not in unused_index_columns.keys() or \ + (unused_index_columns.get(redundant_obj.indexname) and + unused_index_columns.get(index.indexname)) + if index_exist: + is_redundant = True + if not is_redundant: + remove_list.append(pos) + for item in sorted(remove_list, reverse=True): + redundant_indexes.pop(item) + + if not redundant_indexes: + print("No redundant index!") + # redundant index + for index in redundant_indexes: + statement = "DROP INDEX %s.%s;" % \ + (index.schema, index.indexname) + print(statement) + existing_index = [item.indexname + ':' + + item.columns for item in index.redundant_obj] + redundant_index = {"schemaName": index.schema, "tbName": index.table, "type": 2, + "columns": index.columns, "statement": statement, + "existingIndex": existing_index} + detail_info['uselessIndexes'].append(redundant_index) + + +def display_useless_redundant_indexes(whole_indexes, redundant_indexes, + workload_indexes, detail_info): + indexes_name = set(index.indexname for index in whole_indexes) + unused_index = list(indexes_name.difference(workload_indexes)) + remove_list = [] + print_header_boundary(" Current workload useless indexes ") + detail_info['uselessIndexes'] = [] + # useless index not contain unique index + unused_index_columns = {} + has_unused_index = False + for cur_index in unused_index: + for index in whole_indexes: + if cur_index != index.indexname: + continue + # get useless index details from whole index + unused_index_columns[cur_index] = index.columns + if 'UNIQUE INDEX' not in index.indexdef: + has_unused_index = True + statement = "DROP INDEX %s;" % index.indexname + print(statement) + useless_index = {"schemaName": index.schema, "tbName": index.table, "type": 3, + "columns": index.columns, "statement": statement} + detail_info['uselessIndexes'].append(useless_index) + if not has_unused_index: + print("No useless index!") + print_header_boundary(" Redundant indexes ") + # filter redundant index + display_redundant_indexes( + redundant_indexes, unused_index_columns, remove_list, detail_info) + + +def greedy_determine_opt_config(workload, atomic_config_total, candidate_indexes, origin_sum_cost): + opt_config = [] + index_num_record = set() + min_cost = origin_sum_cost + for i in range(len(candidate_indexes)): + if i == 1 and min_cost == origin_sum_cost: + break + cur_min_cost = origin_sum_cost + cur_index = None + cur_index_num = -1 + for k, index in enumerate(candidate_indexes): + if k in index_num_record: + continue + cur_config = copy.copy(opt_config) + cur_config.append(index) + cur_estimated_cost, cur_index_atomic_pos = \ + infer_workload_cost(workload, cur_config, atomic_config_total) + if cur_estimated_cost < cur_min_cost: + cur_min_cost = cur_estimated_cost + cur_index = index + cur_index.atomic_pos = cur_index_atomic_pos + cur_index_num = k + if cur_index and cur_min_cost < min_cost: + if MAX_INDEX_STORAGE and sum([obj.storage for obj in opt_config]) + \ + cur_index.storage > MAX_INDEX_STORAGE: + candidate_indexes.remove(cur_index) + continue + if len(opt_config) == MAX_INDEX_NUM: + break + min_cost = cur_min_cost + opt_config.append(cur_index) + index_num_record.add(cur_index_num) + else: + break + + return opt_config + + +def get_last_indexes_result(input_path): + last_indexes_result_file = os.path.join( + os.path.dirname(input_path), 'index_result.json') + integrate_indexes = {'historyIndexes': {}} + if os.path.exists(last_indexes_result_file): + try: + with open(last_indexes_result_file, 'r') as file: + integrate_indexes['historyIndexes'] = json.load(file) + except json.JSONDecodeError: + return integrate_indexes + return integrate_indexes + + +def index_advisor_workload(history_advise_indexes, db, workload_file_path, + multi_iter_mode, show_detail): + workload_info = workload_compression(workload_file_path) + index_advisor = IndexAdvisor(db, workload_info, multi_iter_mode) + if multi_iter_mode: + opt_indexes = index_advisor.complex_index_advisor() + else: + opt_indexes = index_advisor.simple_index_advisor() + if opt_indexes: + index_advisor.filter_low_benefit_index(opt_indexes) + index_advisor.display_advise_indexes_info(opt_indexes, show_detail) + + index_advisor.generate_incremental_index(history_advise_indexes) + history_invalid_indexes = {} + index_advisor.generate_redundant_useless_indexes(history_invalid_indexes) + index_advisor.display_incremental_index( + history_invalid_indexes, workload_file_path) + if show_detail: + print_header_boundary(" Display detail information ") + sql_info = json.dumps( + index_advisor.display_detail_info, indent=4, separators=(',', ':')) + print(sql_info) + + +def check_parameter(args): + global MAX_INDEX_NUM, ENABLE_MULTI_NODE, MAX_INDEX_STORAGE, JSON_TYPE, DRIVER + if args.max_index_num is not None and args.max_index_num <= 0: + raise argparse.ArgumentTypeError("%s is an invalid positive int value" % + args.max_index_num) + if args.max_index_storage is not None and args.max_index_storage <= 0: + raise argparse.ArgumentTypeError("%s is an invalid positive int value" % + args.max_index_storage) + JSON_TYPE = args.json + MAX_INDEX_NUM = args.max_index_num + ENABLE_MULTI_NODE = args.multi_node + MAX_INDEX_STORAGE = args.max_index_storage + if args.U and args.U != getpass.getuser() and not args.W: + raise ValueError('Enter the \'-W\' parameter for user ' + + args.U + ' when executing the script.') + + +def main(argv): + arg_parser = argparse.ArgumentParser( + description='Generate index set for workload.') + arg_parser.add_argument("p", help="Port of database", type=int) + arg_parser.add_argument("d", help="Name of database", action=CheckValid) + arg_parser.add_argument( + "--h", help="Host for database", action=CheckValid) + arg_parser.add_argument( + "-U", help="Username for database log-in", action=CheckValid) + arg_parser.add_argument( + "f", help="File containing workload queries (One query per line)") + arg_parser.add_argument("--schema", help="Schema name for the current business data", + required=True, action=CheckValid) + arg_parser.add_argument( + "--max_index_num", help="Maximum number of suggested indexes", type=int) + arg_parser.add_argument("--max_index_storage", + help="Maximum storage of suggested indexes/MB", type=int) + arg_parser.add_argument("--multi_iter_mode", action='store_true', + help="Whether to use multi-iteration algorithm", default=False) + arg_parser.add_argument("--multi_node", action='store_true', + help="Whether to support distributed scenarios", default=False) + arg_parser.add_argument("--json", action='store_true', + help="Whether the workload file format is json", default=False) + arg_parser.add_argument("--driver", action='store_true', + help="Whether to employ python-driver", default=False) + arg_parser.add_argument("--show_detail", action='store_true', + help="Whether to show detailed sql information", default=False) + args = arg_parser.parse_args(argv) + + args.W = get_password() + check_parameter(args) + # Initialize the connection + global DRIVER + if args.driver: + try: + import psycopg2 + try: + from .dao.driver_execute import DriverExecute + except ImportError: + from dao.driver_execute import DriverExecute + db = DriverExecute(args.d, args.U, args.W, args.h, args.p, args.schema, + args.multi_node, args.max_index_storage) + except ImportError: + logging.warning('Python driver import failed, ' + 'the gsql mode will be selected to connect to the database.') + db = GSqlExecute(args.d, args.U, args.W, args.h, args.p, args.schema, + args.multi_node, args.max_index_storage) + db.init_conn_handle() + args.driver = None + else: + db = GSqlExecute(args.d, args.U, args.W, args.h, args.p, args.schema, + args.multi_node, args.max_index_storage) + db.init_conn_handle() + DRIVER = args.driver + if args.multi_node and not db.is_multi_node(): + raise argparse.ArgumentTypeError('--multi_node is only support for distributed database') + index_advisor_workload(get_last_indexes_result(args.f), db, args.f, + args.multi_iter_mode, args.show_detail) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/src/gausskernel/dbmind/tools/components/opengauss_exporter/__init__.py b/src/gausskernel/dbmind/tools/components/opengauss_exporter/__init__.py new file mode 100644 index 000000000..0d9d90019 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/opengauss_exporter/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +__version__ = '1.0.0' + + +from .core.main import main diff --git a/src/gausskernel/dbmind/tools/components/opengauss_exporter/__main__.py b/src/gausskernel/dbmind/tools/components/opengauss_exporter/__main__.py new file mode 100644 index 000000000..6894a1570 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/opengauss_exporter/__main__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import sys + +try: + from dbmind.components.opengauss_exporter import main +except ImportError: + sys.path.append('..') + from opengauss_exporter import main + +main(sys.argv[1:]) diff --git a/src/gausskernel/dbmind/tools/components/opengauss_exporter/core/__init__.py b/src/gausskernel/dbmind/tools/components/opengauss_exporter/core/__init__.py new file mode 100644 index 000000000..6ac2204d4 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/opengauss_exporter/core/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2021 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + diff --git a/src/gausskernel/dbmind/tools/components/opengauss_exporter/core/controller.py b/src/gausskernel/dbmind/tools/components/opengauss_exporter/core/controller.py new file mode 100644 index 000000000..e4736492d --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/opengauss_exporter/core/controller.py @@ -0,0 +1,34 @@ +# Copyright (c) 2021 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# from flask import Response, Flask +# from waitress import serve + +from dbmind.common.http.http_service import HttpService +from dbmind.common.http.http_service import Response +from .service import query_all_metrics + +app = HttpService('DBMind-openGauss-exporter') + + +@app.route('/', methods=['GET', 'POST']) +def index(*args): + return Response('openGauss exporter (DBMind)') + + +def metrics(*args): + return Response(query_all_metrics(), mimetype='text/plain') + + +def run(host, port, telemetry_path, ssl_keyfile, ssl_certfile, ssl_keyfile_password): + app.attach(metrics, telemetry_path) + app.start_listen(host, port, ssl_keyfile, ssl_certfile, ssl_keyfile_password) diff --git a/src/gausskernel/dbmind/tools/components/opengauss_exporter/core/main.py b/src/gausskernel/dbmind/tools/components/opengauss_exporter/core/main.py new file mode 100644 index 000000000..4b22b3aec --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/opengauss_exporter/core/main.py @@ -0,0 +1,224 @@ +# Copyright (c) 2021 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import argparse +import getpass +import logging +import os +import sys +import re +import tempfile +from logging.handlers import TimedRotatingFileHandler + +import yaml + +from dbmind.common.daemon import Daemon +from dbmind.common.utils import set_proc_title, check_ssl_certificate_remaining_days,\ + check_ssl_file_permission +from dbmind.common.utils import write_to_terminal +from . import controller +from . import service +from .. import __version__ + +ROOT_DIR_PATH = os.path.abspath( + os.path.join(os.path.dirname(__file__), '..') +) + +YAML_DIR_PATH = os.path.join(ROOT_DIR_PATH, 'yamls') +DEFAULT_YAML = 'default.yml' +PG_SETTINGS_YAML = 'pg_settings.yml' +STATEMENTS_YAML = 'statements.yml' + +DEFAULT_LOGFILE = 'dbmind_opengauss_exporter.log' +with tempfile.NamedTemporaryFile(suffix='.pid') as fp: + EXPORTER_PIDFILE_NAME = fp.name + + +class PairAction(argparse.Action): + def __call__(self, parser, args, values, option_string=None): + d = dict() + try: + for pair in values.split(','): + name, value = pair.split('=') + d[name.strip()] = value.strip() + setattr(args, self.dest, d) + except ValueError: + parser.error('Illegal constant labels: %s.' % values) + + +def wipe_off_password(dsn): + result = re.findall(r'[^:]*://[^:]*:(.+)@[^@]*:[^:]*/[^:]*', dsn) + if len(result) == 0: + result = re.findall(r'password=(.*)\s', dsn) + if len(result) == 0: + return "*********" + + password = result[0] + if len(password) == 0: + return "*********" + return dsn.replace(password, '******') + + +def path_type(path): + if os.path.exists(path): + return os.path.abspath(path) + else: + raise argparse.ArgumentTypeError('%s is not a valid path.' % path) + + +def parse_argv(argv): + parser = argparse.ArgumentParser( + description='openGauss Exporter (DBMind): Monitoring for openGauss.' + ) + parser.add_argument('--url', required=True, help='openGauss database target url.') + parser.add_argument('--config', type=path_type, default=os.path.join(YAML_DIR_PATH, DEFAULT_YAML), + help='path to config file.') + parser.add_argument('--constant-labels', default='', action=PairAction, + help='a list of label=value separated by comma(,).') + parser.add_argument('--web.listen-address', default='127.0.0.1', + help='address on which to expose metrics and web interface') + parser.add_argument('--web.listen-port', type=int, default=9187, + help='listen port to expose metrics and web interface') + parser.add_argument('--web.telemetry-path', default='/metrics', + help='path under which to expose metrics.') + parser.add_argument('--disable-cache', action='store_true', + help='force not using cache.') + parser.add_argument('--disable-settings-metrics', action='store_true', + help='not collect pg_settings.yml metrics.') + parser.add_argument('--disable-statement-history-metrics', action='store_true', + help='not collect statement-history metrics (including slow queries).') + parser.add_argument('--disable-https', action='store_true', + help='disable Https schema') + parser.add_argument('--ssl-keyfile', type=path_type, help='set the path of ssl key file') + parser.add_argument('--ssl-certfile', type=path_type, help='set the path of ssl certificate file') + parser.add_argument('--parallel', default=5, type=int, + help='not collect pg_settings.yml metrics.') + parser.add_argument('--log.filepath', type=os.path.abspath, default=os.path.join(os.getcwd(), DEFAULT_LOGFILE), + help='the path to log') + parser.add_argument('--log.level', default='info', choices=('debug', 'info', 'warn', 'error', 'fatal'), + help='only log messages with the given severity or above.' + ' Valid levels: [debug, info, warn, error, fatal]') + parser.add_argument('--version', action='version', version=__version__) + + args = parser.parse_args(argv) + ssl_keyfile_pwd = None + if args.disable_https: + # Clear up redundant arguments. + args.ssl_keyfile = None + args.ssl_certfile = None + else: + if not (args.ssl_keyfile and args.ssl_certfile): + parser.error('If you use the Https protocol (default), you need to give the argument values ' + 'of --ssl-keyfile and --ssl-certfile. ' + 'Otherwise, use the --disable-https argument to disable the Https protocol.') + else: + # Need to check whether the key file has been encrypted. + with open(args.ssl_keyfile) as fp: + for line in fp.readlines(): + if line.startswith('Proc-Type') and 'ENCRYPTED' in line.upper(): + ssl_keyfile_pwd = '' + while not ssl_keyfile_pwd: + ssl_keyfile_pwd = getpass.getpass('Enter PEM pass phrase:') + setattr(args, 'keyfile_password', ssl_keyfile_pwd) + return args + + +def set_logger(filepath, level): + level = level.upper() + log_path = os.path.dirname(filepath) + if not os.path.isdir(log_path): + os.makedirs(log_path, 500) + + formatter = logging.Formatter( + '[%(asctime)s]' + '[%(filename)s:%(lineno)d]' + '[%(funcName)s][%(levelname)s][%(threadName)s] ' + '- %(message)s' + ) + handler = TimedRotatingFileHandler( + filename=filepath, + when='D', + interval=1, + backupCount=15, + encoding='UTF-8', + delay=False, + utc=True + ) + handler.setFormatter(formatter) + handler.setLevel(level) + default_logger = logging.getLogger() + default_logger.setLevel(level) + default_logger.addHandler(handler) + + +class ExporterMain(Daemon): + def clean(self): + if os.path.exists(self.pid_file): + os.unlink(self.pid_file) + + def __init__(self, argv): + self.args = parse_argv(argv) + self.pid_file = EXPORTER_PIDFILE_NAME + super().__init__(self.pid_file) + + def run(self): + # Wipe off password of url for the process title. + try: + url = self.args.url + wiped_url = wipe_off_password(url) + with open('/proc/self/cmdline') as fp: + cmdline = fp.readline().replace('\x00', ' ') + wiped_cmdline = cmdline.replace(url, wiped_url) + set_proc_title(wiped_cmdline) + except FileNotFoundError: + # ignore + pass + + set_logger(self.args.__dict__['log.filepath'], + self.args.__dict__['log.level']) + try: + service.config_collecting_params( + url=self.args.url, + parallel=self.args.parallel, + disable_cache=self.args.disable_cache, + constant_labels=self.args.constant_labels, + ) + except ConnectionError: + write_to_terminal('Failed to connect to the url, exiting...', color='red') + sys.exit(1) + if not self.args.disable_settings_metrics: + with open(os.path.join(YAML_DIR_PATH, PG_SETTINGS_YAML), errors='ignore') as fp: + service.register_metrics(yaml.load(fp, Loader=yaml.FullLoader)) + if not self.args.disable_statement_history_metrics: + with open(os.path.join(YAML_DIR_PATH, STATEMENTS_YAML), errors='ignore') as fp: + service.register_metrics( + yaml.load(fp, Loader=yaml.FullLoader), + force_connection_db='postgres' + ) + with open(self.args.config, errors='ignore') as fp: + service.register_metrics(yaml.load(fp, Loader=yaml.FullLoader)) + + check_ssl_file_permission(self.args.ssl_keyfile, self.args.ssl_certfile) + check_ssl_certificate_remaining_days(self.args.ssl_certfile) + + controller.run( + host=self.args.__dict__['web.listen_address'], + port=self.args.__dict__['web.listen_port'], + telemetry_path=self.args.__dict__['web.telemetry_path'], + ssl_keyfile=self.args.ssl_keyfile, + ssl_certfile=self.args.ssl_certfile, + ssl_keyfile_password=self.args.keyfile_password + ) + + +def main(argv): + ExporterMain(argv).start() diff --git a/src/gausskernel/dbmind/tools/components/opengauss_exporter/core/opengauss_driver.py b/src/gausskernel/dbmind/tools/components/opengauss_exporter/core/opengauss_driver.py new file mode 100644 index 000000000..642ccb7b3 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/opengauss_exporter/core/opengauss_driver.py @@ -0,0 +1,117 @@ +# Copyright (c) 2021 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import logging +import time +from threading import local + +import psycopg2 +import psycopg2.errors +import psycopg2.extensions +import psycopg2.extras + + +class Driver: + def __init__(self): + self._url = None + self.parsed_dsn = None + self.initialized = False + self._conn = local() + + def initialize(self, url): + self._url = url + self.parsed_dsn = psycopg2.extensions.parse_dsn(self._url) + self.initialized = True + try: + dsn = self._url + conn = psycopg2.connect(dsn) + conn.cursor().execute('select 1;') + conn.close() + except Exception: + raise ConnectionError() + + @property + def address(self): + return '%s:%s' % (self.parsed_dsn['host'], self.parsed_dsn['port']) + + @property + def dbname(self): + return self.parsed_dsn['dbname'] + + def query(self, stmt, timeout=0, force_connection_db=None): + if not self.initialized: + raise AssertionError() + + try: + conn = self.get_conn(force_connection_db) + with conn.cursor( + cursor_factory=psycopg2.extras.RealDictCursor + ) as cursor: + try: + start = time.monotonic() + if timeout > 0: + cursor.execute('SET statement_timeout = %d;' % (timeout * 1000)) + cursor.execute(stmt) + result = cursor.fetchall() + except psycopg2.extensions.QueryCanceledError as e: + logging.error('%s: %s.' % (e.pgerror, stmt)) + logging.info( + 'Time elapsed during execution is %fs ' + 'but threshold is %fs.' % (time.monotonic() - start, timeout) + ) + result = [] + conn.commit() + except psycopg2.InternalError as e: + logging.error("Cannot execute '%s' due to internal error: %s." % (stmt, e.pgerror)) + result = [] + except Exception as e: + logging.exception(e) + result = [] + + return result + + def get_conn(self, force_connection_db=None): + """Cache the connection in the thread so that the thread can + reuse this connection next time, thereby avoiding repeated creation. + By this way, we can realize thread-safe database query, + and at the same time, it can also have an ability similar to a connection pool. """ + # If query wants to connect to other database by force, we can generate and cache + # the new connection as the following. + if force_connection_db: + db_name = force_connection_db + parsed_dsn = self.parsed_dsn.copy() + parsed_dsn['dbname'] = force_connection_db + dsn = ' '.join(['{}={}'.format(k, v) for k, v in parsed_dsn.items()]) + else: + db_name = self.dbname + dsn = self._url + + if not hasattr(self._conn, db_name) or getattr(self._conn, db_name).closed: + setattr(self._conn, db_name, psycopg2.connect(dsn)) + # Check whether the connection is timeout or invalid. + try: + getattr(self._conn, db_name).cursor().execute('select 1;') + except ( + psycopg2.InternalError, + psycopg2.InterfaceError, + psycopg2.errors.AdminShutdown, + psycopg2.OperationalError + ) as e: + logging.warning( + 'Cached database connection to openGauss has been timeout due to %s, reconnecting.' % e + ) + setattr(self._conn, db_name, psycopg2.connect(dsn)) + except Exception as e: + logging.error('Failed to connect to openGauss with cached connection (%s), try to reconnect.' % e) + setattr(self._conn, db_name, psycopg2.connect(dsn)) + return getattr(self._conn, db_name) + diff --git a/src/gausskernel/dbmind/tools/components/opengauss_exporter/core/service.py b/src/gausskernel/dbmind/tools/components/opengauss_exporter/core/service.py new file mode 100644 index 000000000..beb4b3350 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/opengauss_exporter/core/service.py @@ -0,0 +1,306 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import logging +import time +from concurrent.futures import as_completed +from concurrent.futures.thread import ThreadPoolExecutor +from datetime import datetime + +from prometheus_client import ( + Gauge, Summary, Histogram, Info, Enum +) +from prometheus_client.exposition import generate_latest +from prometheus_client.registry import CollectorRegistry + +from dbmind.common.utils import dbmind_assert +from .opengauss_driver import Driver + +statusEnable = "enable" +statusDisable = "disable" +defaultVersion = ">=0.0.0" + +PROMETHEUS_TYPES = { + # Indeed, COUNTER should use the type `Counter` rather than `Gauge`, + # but PG-exporter and openGauss-exporter (golang version) + # are all using ConstValue (i.e., the same action as Gauge), + # so we have to inherit the usage. + 'COUNTER': Gauge, 'GAUGE': Gauge, 'SUMMARY': Summary, + 'HISTOGRAM': Histogram, 'INFO': Info, 'ENUM': Enum +} + +PROMETHEUS_LABEL = 'LABEL' +PROMETHEUS_DISCARD = 'DISCARD' +FROM_INSTANCE_KEY = 'from_instance' + +driver = Driver() +_thread_pool_executor = None +_registry = CollectorRegistry() + +_use_cache = True +global_labels = {FROM_INSTANCE_KEY: ''} + +_dbrole = 'primary' +_dbversion = '9.2.24' + +query_instances = list() + + +def is_valid_version(version): + return True + + +def cast_to_numeric(v): + if v is None: + return float('nan') + elif isinstance(v, datetime): + return int(v.timestamp() * 1000) + else: + return float(v) + + +class Query: + """Maybe only a SQL statement for PG exporter.""" + + def __init__(self, item): + self.name = item.get('name') + self.sql = item['sql'] + self.version = item.get('version', defaultVersion) + self.timeout = item.get('timeout') + self.ttl = item.get('ttl', 0) # cache_seconds for PG exporter + self.status = item.get('status', 'enable') == 'enable' # enable or disable + self.dbrole = item.get('dbRole') or 'primary' # primary, standby, ... + + self._cache = None + self._last_scrape_timestamp = 0 + + def fetch(self, alternative_timeout, force_connection_db=None): + current_timestamp = int(time.time() * 1000) + mapper = { + 'last_scrape_timestamp': self._last_scrape_timestamp, + 'scrape_interval': current_timestamp - self._last_scrape_timestamp + } + + if self._cache and (current_timestamp - self._last_scrape_timestamp) < (self.ttl * 1000): + return self._cache + + # Refresh cache: + # If the query gives explict timeout, then use it, + # otherwise use passed `alternative_timeout`. + formatted = self.sql.format_map(mapper) # If sql has placeholder, render it. + logging.debug('Query the SQL statement: %s.', formatted) + self._cache = driver.query(formatted, + self.timeout or alternative_timeout, + force_connection_db) + self._last_scrape_timestamp = current_timestamp + return self._cache + + +class Metric: + """Metric family structure: + Only parsing the metric dict and + lazy loading the Prometheus metric object.""" + + def __init__(self, item): + self.name = item['name'] + self.desc = item.get('description', '') + self.usage = item['usage'].upper() + self.value = None + self.prefix = '' + self.is_label = False + self.is_valid = False + + if self.usage in PROMETHEUS_TYPES: + """Supported metric type.""" + self.is_valid = True + elif self.usage == PROMETHEUS_LABEL: + """Use the `is_label` field to mark this metric as a label.""" + self.is_label = True + self.is_valid = True + elif self.usage == PROMETHEUS_DISCARD: + """DISCARD means do nothing.""" + self.is_valid = False + else: + raise ValueError('Not support usage %s.' % self.usage) + + def activate(self, labels=()): + """Instantiate specific Prometheus metric objects.""" + dbmind_assert(not self.is_label and self.prefix) + + self.value = PROMETHEUS_TYPES[self.usage]( + # Prefix query instance name to the specific metric. + '%s_%s' % (self.prefix, self.name), self.desc, labels + ) + return self.value + + +class QueryInstance: + def __init__(self, d): + self.name = d['name'] + self.desc = d.get('desc', '') + self.queries = list() + self.metrics = list() + self.labels = list() + self.status = d.get('status', 'enable') == 'enable' + self.ttl = d.get('ttl', 0) + self.timeout = d.get('timeout', 0) + self.public = d.get('public', True) + + # Compatible with PG-exporter format, + # convert the query field into a list. + if isinstance(d['query'], str): + d['query'] = [ + {'name': self.name, 'sql': d['query'], 'ttl': self.ttl, 'timeout': self.timeout} + ] + + dbmind_assert(isinstance(d['query'], list)) + for q in d['query']: + # Compatible with PG-exporter + query = Query(q) + # TODO: check whether the query is invalid. + if query.status and query.dbrole == _dbrole and is_valid_version(query.version): + self.queries.append(query) + else: + logging.info('Skip the query %s (status: %s, dbRole: %s, version: %s).' % ( + query.name, query.status, query.dbrole, query.version)) + + for m in d['metrics']: + # Compatible with PG-exporter + if len(m) == len({'metric_name': {'usage': '?', 'description': '?'}}): + # Covert to the openGauss-exporter format. + # The following is a demo for metric structure in the openGauss-exporter: + # {'name': 'metric_name', 'usage': '?', 'description': '?'} + name, value = next(iter(m.items())) + m = {'name': name} + m.update(value) + + # Parse dict structure to a Metric object, then we can + # use this object's fields directly. + metric = Metric(m) + if not metric.is_valid: + continue + if not metric.is_label: + metric.prefix = self.name + self.metrics.append(metric) + else: + self.labels.append(metric.name) + + # `global_labels` is required and must be added anytime. + self.labels.extend(global_labels.keys()) + self._forcing_db = None + + def register(self, registry): + for metric in self.metrics: + registry.register( + metric.activate(self.labels) + ) + + def force_query_into_another_db(self, db_name): + self._forcing_db = db_name + + def update(self): + # Clear old metric's value and its labels. + for metric in self.metrics: + metric.value.clear() + + for query in self.queries: + # Force the query into connecting to the specific database + # rather than the default database, if needed. + try: + rows = query.fetch(self.timeout, self._forcing_db) + except Exception as e: + logging.exception(e) + logging.info("Error SQL statement is '%s'.", query.sql) + continue + else: + if len(rows) == 0: + logging.warning("Fetched nothing for metric '%s'." % query.name) + continue + + # Update for all metrics in current query instance. + for row in rows: + # `global_labels` is the essential labels for each metric family. + labels = {label: str(row.get(label, global_labels.get(label))) for label in self.labels} + for metric in self.metrics: + metric_family = metric.value.labels(**labels) + value = row.get(metric.name) + # None is equivalent to NaN instead of zero. + if value is None: + logging.warning( + 'Not found field %s in the %s.', metric.name, self.name + ) + + value = cast_to_numeric(value) + # Different usages (Prometheus data type) have different setting methods. + # Thus, we have to select to different if-branches according to metric's usage. + if metric.usage == 'COUNTER': + metric_family.set(value) + elif metric.usage == 'GAUGE': + metric_family.set(value) + elif metric.usage == 'SUMMARY': + metric_family.observe(value) + elif metric.usage == 'HISTOGRAM': + metric_family.observe(value) + else: + logging.error( + 'Not supported metric %s due to usage %s.' % (metric.name, metric.usage) + ) + + +def config_collecting_params(url, parallel, disable_cache, constant_labels): + global _use_cache, _thread_pool_executor + + driver.initialize(url) + _thread_pool_executor = ThreadPoolExecutor(max_workers=parallel) + _use_cache = not disable_cache + # Append extra labels, including essential labels (e.g., from_server) + # and constant labels from user's configurations. + global_labels[FROM_INSTANCE_KEY] = driver.address + global_labels.update(constant_labels) + logging.info( + 'Monitoring %s, use cache: %s, extra labels: %s.', + global_labels[FROM_INSTANCE_KEY], _use_cache, global_labels + ) + + +def register_metrics(parsed_yml, force_connection_db=None): + """Some metrics need to be queried on the specific database + (e.g., tables or views under the dbe_perf schema need + to query on the `postgres` database). + Therefore, we cannot specify that all metrics are collected + through the default database, + and this is the purpose of the parameter `force_connection_db`. + """ + dbmind_assert(isinstance(parsed_yml, dict)) + + for name, raw_query_instance in parsed_yml.items(): + dbmind_assert(isinstance(raw_query_instance, dict)) + + raw_query_instance.setdefault('name', name) + instance = QueryInstance(raw_query_instance) + instance.force_query_into_another_db(force_connection_db) + instance.register(_registry) + query_instances.append(instance) + + +def query_all_metrics(): + futures = [] + for instance in query_instances: + futures.append(_thread_pool_executor.submit(instance.update)) + + for future in as_completed(futures): + try: + future.result() + except Exception as e: + logging.exception(e) + + return generate_latest(_registry) diff --git a/src/gausskernel/dbmind/tools/components/opengauss_exporter/yamls/default.yml b/src/gausskernel/dbmind/tools/components/opengauss_exporter/yamls/default.yml new file mode 100644 index 000000000..6b6f908e2 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/opengauss_exporter/yamls/default.yml @@ -0,0 +1,1976 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +pg_db: + name: pg_db + desc: OpenGauss database statistics + query: + - name: pg_db + sql: |- + SELECT d.datid,d.datname,numbackends, + xact_commit,xact_rollback,xact_rollback + xact_commit AS xact_total, + blks_read,blks_hit,blks_read + blks_hit AS blks_access, + tup_returned,tup_fetched,tup_inserted,tup_updated,tup_deleted,tup_inserted + tup_updated + tup_deleted AS tup_modified, + conflicts,temp_files,temp_bytes,deadlocks, + blk_read_time,blk_write_time, extract(epoch from stats_reset) as stats_reset, + confl_tablespace,confl_lock,confl_snapshot,confl_bufferpin,confl_deadlock + FROM pg_stat_database d,pg_stat_database_conflicts pdc + WHERE pdc.datname = d.datname and d.datname NOT IN ('postgres', 'template0', 'template1'); + version: '>=0.0.0' + timeout: 1 + ttl: -1 + status: enable + dbRole: "" + metrics: + - name: datid + description: OID of a database + usage: LABEL + - name: datname + description: Name of this database + usage: LABEL + - name: numbackends + description: Number of backends currently connected to this database. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset. + usage: GAUGE + - name: xact_commit + description: Number of transactions in this database that have been committed + usage: COUNTER + - name: xact_rollback + description: Number of transactions in this database that have been rolled back + usage: COUNTER + - name: blks_read + description: Number of disk blocks read in this database + usage: COUNTER + - name: blks_hit + description: Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the OpenGauss buffer cache, not the operating system's file system cache) + usage: COUNTER + - name: tup_returned + description: Number of rows returned by queries in this database + usage: COUNTER + - name: tup_fetched + description: Number of rows fetched by queries in this database + usage: COUNTER + - name: tup_inserted + description: Number of rows inserted by queries in this database + usage: COUNTER + - name: tup_updated + description: Number of rows updated by queries in this database + usage: COUNTER + - name: tup_deleted + description: Number of rows deleted by queries in this database + usage: COUNTER + - name: conflicts + description: Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see pg_stat_database_conflicts for details.) + usage: COUNTER + - name: temp_files + description: Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the log_temp_files setting. + usage: COUNTER + - name: temp_bytes + description: Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting. + usage: COUNTER + - name: deadlocks + description: Number of deadlocks detected in this database + usage: COUNTER + - name: blk_read_time + description: Time spent reading data file blocks by backends in this database, in milliseconds + usage: COUNTER + - name: blk_write_time + description: Time spent writing data file blocks by backends in this database, in milliseconds + usage: COUNTER + - name: stats_reset + description: Time at which these statistics were last reset + usage: COUNTER + - name: confl_tablespace + description: Number of queries in this database that have been canceled due to dropped tablespaces + usage: COUNTER + - name: confl_lock + description: Number of queries in this database that have been canceled due to lock timeouts + usage: COUNTER + - name: confl_snapshot + description: Number of queries in this database that have been canceled due to old snapshots + usage: COUNTER + - name: confl_bufferpin + description: Number of queries in this database that have been canceled due to pinned buffers + usage: COUNTER + - name: confl_deadlock + description: Number of queries in this database that have been canceled due to deadlocks + usage: COUNTER + status: enable + ttl: -1 + timeout: 1 + public: true + +pg_meta: + name: pg_meta + desc: OpenGauss database directory + query: + - name: pg_meta + sql: |- + SELECT (SELECT system_identifier FROM pg_control_system()) AS cluster_id, + current_setting('port') AS listen_port, + current_setting('wal_level') AS wal_level, + current_setting('server_version') AS version, + current_setting('server_version_num') AS ver_num, + 'N/A' AS primary_conninfo, + 1 AS info; + version: '>=0.0.0' + timeout: 1 + status: enable + dbRole: "" + metrics: + - name: cluster_id + description: cluster system identifier + usage: LABEL + - name: listen_port + description: listen port + usage: LABEL + - name: wal_level + description: wal level + usage: LABEL + - name: version + description: server version in human readable format + usage: LABEL + - name: ver_num + description: server version number in machine readable format + usage: LABEL + - name: primary_conninfo + description: connection string to upstream (do not set password here) + usage: LABEL + - name: info + description: constant 1 + usage: GAUGE + status: enable + ttl: 60 + timeout: 1 + public: true + +pg_connections: + name: pg_connections + desc: OpenGauss database connections + query: + - name: pg_connections + sql: select max_conn,used_conn,max_conn-used_conn res_for_normal from (select count(*) used_conn from pg_stat_activity) t1,(select setting::int max_conn from pg_settings where name='max_connections') t2; + version: '>=0.0.0' + timeout: 1 + status: enable + dbRole: "" + metrics: + - name: max_conn + description: total of connections + usage: GAUGE + - name: used_conn + description: used of connections + usage: GAUGE + - name: res_for_normal + description: reserve of connections + usage: GAUGE + status: enable + ttl: 60 + timeout: 1 + public: true + +pg_session_connection: + name: pg_session_connection + desc: OpenGauss backend activity group by state + query: + - name: pg_session_connection + sql: select client_addr,state,count(1) as count from pg_stat_activity group by client_addr,state order by 3 desc limit 20 ; + version: '>=0.0.0' + timeout: 1 + status: enable + dbRole: "" + metrics: + - name: client_addr + description: client address + usage: LABEL + - name: state + description: session state + usage: LABEL + - name: count + description: session count + usage: GAUGE + status: enable + ttl: 60 + timeout: 1 + public: true + + + +# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +# ┃ pg_stat_activity +# ┃ OpenGauss backend activity group by state +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ TTL ┆ 60 +# ┃ Timeout ┆ 1s +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ LABEL datname Name of this database +# ┃ LABEL state connection state +# ┃ GAUGE count number of connections in this state +# ┃ GAUGE max_duration max duration since state change among (datname, state) +# ┃ GAUGE max_tx_duration max duration in seconds any active transaction has been running +# ┃ GAUGE max_conn_duration max backend session duration since state change among (datname, state) +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ pg_stat_activity_count{datname,state} GAUGE number of connections in this state +# ┃ pg_stat_activity_max_duration{datname,state} GAUGE max duration since state change among (datname, state) +# ┃ pg_stat_activity_max_tx_duration{datname,state} GAUGE max duration in seconds any active transaction has been running +# ┃ pg_stat_activity_max_conn_duration{datname,state} GAUGE max backend session duration since state change among (datname, state) +# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +pg_activity: + name: pg_activity + desc: OpenGauss backend activity group by state + query: + - name: pg_activity + sql: |- + SELECT datname, + state, + coalesce(count, 0) AS count, + coalesce(max_duration, 0) AS max_duration, + coalesce(max_tx_duration, 0) AS max_tx_duration, + coalesce(max_conn_duration, 0) AS max_conn_duration + FROM (SELECT d.oid AS database, d.datname, a.state + FROM pg_database d, + unnest(ARRAY ['active','idle','idle in transaction','idle in transaction (aborted)','fastpath function call','disabled']) a(state) + WHERE d.datname NOT IN ('template0','template1')) base + LEFT JOIN ( + SELECT datname, state, + count(*) AS count, + max(extract(epoch from now() - state_change)) AS max_duration, + max(extract(epoch from now() - xact_start)) AS max_tx_duration, + max(extract(epoch from now() - backend_start)) AS max_conn_duration + FROM pg_stat_activity WHERE pid <> pg_backend_pid() + GROUP BY datname, state + ) a USING (datname, state); + version: '>=1.0.0' + timeout: 1 + ttl: 60 + status: enable + dbRole: "" + metrics: + - name: datname + description: Name of this database + usage: LABEL + - name: state + description: connection state + usage: LABEL + - name: count + description: number of connections in this state + usage: GAUGE + - name: max_duration + description: max duration since state change among (datname, state) + usage: GAUGE + - name: max_tx_duration + description: max duration in seconds any active transaction has been running + usage: GAUGE + - name: max_conn_duration + description: max backend session duration since state change among (datname, state) + usage: GAUGE + status: enable + ttl: 60 + timeout: 1 + public: true + +# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +# ┃ pg_downstream +# ┃ openGauss replication client count group by state +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ TTL ┆ 60 +# ┃ Timeout ┆ 100ms +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ LABEL state downstream state +# ┃ GAUGE count downstream count +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ pg_downstream_count{state} GAUGE downstream count +# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +pg_downstream: + name: pg_downstream + desc: openGauss replication client count group by state + query: + - name: pg_downstream + sql: | + SELECT l.state, coalesce(count, 0 ) AS count + FROM unnest(ARRAY ['Streaming','Startup','Catchup', 'Backup', 'Stopping']) l(state) + LEFT JOIN (SELECT state, count(*) AS count FROM pg_stat_replication GROUP BY state)r ON l.state = r.state + version: '>=0.0.0' + timeout: 0.5 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: state + description: downstream state + usage: LABEL + - name: count + description: downstream count + usage: GAUGE + status: enable + ttl: 60 + timeout: 0.1 + public: true + +# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +# ┃ pg_stat_replication +# ┃ +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ TTL ┆ 60 +# ┃ Timeout ┆ 1s +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ LABEL pid unique walsender pid +# ┃ LABEL client_addr client address of wal receiver +# ┃ LABEL application_name application name of standby +# ┃ LABEL state replication state startup|catchup|streaming|backup|stopping +# ┃ LABEL sync_state replication sync state async|potential|sync|quorum +# ┃ COUNTER lsn current log position on this server +# ┃ GAUGE sent_diff last log position sent to this standby server diff with current lsn +# ┃ GAUGE write_diff last log position written to disk by this standby server diff with current lsn +# ┃ GAUGE flush_diff last log position flushed to disk by this standby server diff with current lsn +# ┃ GAUGE replay_diff last log position replayed into the database on this standby server diff with current lsn +# ┃ COUNTER sent_lsn last log position sent to this standby server +# ┃ COUNTER write_lsn last log position written to disk by this standby server +# ┃ COUNTER flush_lsn last log position flushed to disk by this standby server +# ┃ COUNTER replay_lsn last log position replayed into the database on this standby server +# ┃ GAUGE write_lag latest ACK lsn diff with write (sync-remote-write lag) +# ┃ GAUGE flush_lag latest ACK lsn diff with flush (sync-remote-flush lag) +# ┃ GAUGE replay_lag latest ACK lsn diff with replay (sync-remote-apply lag) +# ┃ GAUGE backend_uptime how long since standby connect to this server +# ┃ GAUGE backend_xmin this standby's xmin horizon reported by hot_standby_feedback. +# ┃ GAUGE sync_priority priority of being chosen as synchronous standby +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ pg_stat_replication_lsn{pid,client_addr,application_name,state,sync_state} COUNTER current log position on this server +# ┃ pg_stat_replication_sent_diff{pid,client_addr,application_name,state,sync_state} GAUGE last log position sent to this standby server diff with current lsn +# ┃ pg_stat_replication_write_diff{pid,client_addr,application_name,state,sync_state} GAUGE last log position written to disk by this standby server diff with current lsn +# ┃ pg_stat_replication_flush_diff{pid,client_addr,application_name,state,sync_state} GAUGE last log position flushed to disk by this standby server diff with current lsn +# ┃ pg_stat_replication_replay_diff{pid,client_addr,application_name,state,sync_state} GAUGE last log position replayed into the database on this standby server diff with current lsn +# ┃ pg_stat_replication_sent_lsn{pid,client_addr,application_name,state,sync_state} COUNTER last log position sent to this standby server +# ┃ pg_stat_replication_write_lsn{pid,client_addr,application_name,state,sync_state} COUNTER last log position written to disk by this standby server +# ┃ pg_stat_replication_flush_lsn{pid,client_addr,application_name,state,sync_state} COUNTER last log position flushed to disk by this standby server +# ┃ pg_stat_replication_replay_lsn{pid,client_addr,application_name,state,sync_state} COUNTER last log position replayed into the database on this standby server +# ┃ pg_stat_replication_write_lag{pid,client_addr,application_name,state,sync_state} GAUGE latest ACK lsn diff with write (sync-remote-write lag) +# ┃ pg_stat_replication_flush_lag{pid,client_addr,application_name,state,sync_state} GAUGE latest ACK lsn diff with flush (sync-remote-flush lag) +# ┃ pg_stat_replication_replay_lag{pid,client_addr,application_name,state,sync_state} GAUGE latest ACK lsn diff with replay (sync-remote-apply lag) +# ┃ pg_stat_replication_backend_uptime{pid,client_addr,application_name,state,sync_state} GAUGE how long since standby connect to this server +# ┃ pg_stat_replication_backend_xmin{pid,client_addr,application_name,state,sync_state} GAUGE this standby's xmin horizon reported by hot_standby_feedback. +# ┃ pg_stat_replication_sync_priority{pid,client_addr,application_name,state,sync_state} GAUGE priority of being chosen as synchronous standby +# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +pg_replication: + name: pg_replication + query: + - name: pg_replication + sql: |- + select pid,client_addr,application_name,state,sync_state,lsn, + lsn - sent_location as sent_diff,lsn - write_location as write_diff,lsn - flush_location as flush_diff,lsn - replay_location as replay_diff, + sent_location,write_location,flush_location,replay_location,replay_lag,backend_uptime,sync_priority + from + ( + select pr.pid,client_addr,application_name,pr.state,pr.sync_state, + pg_xlog_location_diff (case when pg_is_in_recovery() then pg_last_xlog_receive_location() else pg_current_xlog_location() end, '0/0') as lsn, + pg_xlog_location_diff(pr.sender_sent_location,'0/0') as sent_location, + pg_xlog_location_diff(pr.receiver_write_location,'0/0') as write_location, + pg_xlog_location_diff(pr.receiver_flush_location,'0/0') as flush_location, + pg_xlog_location_diff(pr.receiver_replay_location,'0/0') as replay_location, + pg_xlog_location_diff(pr.receiver_replay_location, pg_current_xlog_location()) as replay_lag, + extract(EPOCH from now() - backend_start) as backend_uptime,pr.sync_priority + from + pg_stat_replication pr + ); + version: '>=1.0.0' + timeout: 1 + ttl: 60 + status: enable + dbRole: "" + metrics: + - name: pid + description: unique walsender pid + usage: LABEL + - name: client_addr + description: client address of wal receiver + usage: LABEL + - name: application_name + description: application name of standby + usage: LABEL + - name: state + description: replication state startup|catchup|streaming|backup|stopping + usage: LABEL + - name: sync_state + description: replication sync state async|potential|sync|quorum + usage: LABEL + - name: lsn + description: current log position on this server + usage: COUNTER + - name: sent_diff + description: last log position sent to this standby server diff with current lsn + usage: GAUGE + - name: write_diff + description: last log position written to disk by this standby server diff with current lsn + usage: GAUGE + - name: flush_diff + description: last log position flushed to disk by this standby server diff with current lsn + usage: GAUGE + - name: replay_diff + description: last log position replayed into the database on this standby server diff with current lsn + usage: GAUGE + - name: sent_location + description: last log position sent to this standby server + usage: COUNTER + - name: write_location + description: last log position written to disk by this standby server + usage: COUNTER + - name: flush_location + description: last log position flushed to disk by this standby server + usage: COUNTER + - name: replay_location + description: last log position replayed into the database on this standby server + usage: COUNTER + - name: replay_lag + description: latest ACK lsn diff with replay (sync-remote-apply lag) + usage: GAUGE + - name: backend_uptime + description: how long since standby connect to this server + usage: GAUGE + - name: sync_priority + description: priority of being chosen as synchronous standby + usage: GAUGE + status: enable + ttl: 60 + timeout: 1 + public: true + + +# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +# ┃ pg_replication_slots +# ┃ +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ TTL ┆ 60 +# ┃ Timeout ┆ 1s +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ LABEL slot_name Slot name +# ┃ LABEL plugin Logical plugin +# ┃ LABEL slot_type Slot type +# ┃ LABEL datoid Database oid +# ┃ LABEL database Database name +# ┃ LABEL active Is active +# ┃ LABEL xmin replication xid +# ┃ LABEL catalog_xmin logical decode xid +# ┃ LABEL restart_lsn Xlog info +# ┃ GAUGE delay_lsn delay lsn from pg_current_xlog_location() +# ┃ DISCARD dummy_standby Is real standby +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ pg_replication_slots_delay_lsn{slot_name,plugin,slot_type,datoid,database,active,xmin,catalog_xmin,restart_lsn} GAUGE delay lsn from pg_current_xlog_location() +# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +pg_slot: + name: pg_replication_slots + query: + - name: pg_replication_slots + sql: |- + select slot_name, + coalesce(plugin,'_') as plugin, + slot_type,datoid,coalesce(database,'_') as database, + (case active when 't' then 1 else 0 end)as active, + coalesce(xmin,'_') as xmin, + dummy_standby, + pg_xlog_location_diff(CASE WHEN pg_is_in_recovery() THEN restart_lsn + ELSE pg_current_xlog_location() END , restart_lsn) AS delay_lsn + from pg_replication_slots; + version: '>=1.0.0' + timeout: 1 + ttl: 60 + status: enable + dbRole: "" + metrics: + - name: slot_name + description: Slot name + usage: LABEL + - name: plugin + description: Logical plugin + usage: LABEL + - name: slot_type + description: Slot type + usage: LABEL + - name: datoid + description: Database oid + usage: LABEL + - name: database + description: Database name + usage: LABEL + - name: active + description: Is active + usage: GAUGE + - name: xmin + description: replication xid + usage: LABEL + - name: delay_lsn + description: delay lsn from pg_current_xlog_location() + usage: GAUGE + - name: dummy_standby + description: Is real standby + usage: DISCARD + status: enable + ttl: 60 + timeout: 1 + public: true + +# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +# ┃ pg_database +# ┃ OpenGauss Database size +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ TTL ┆ 60 +# ┃ Timeout ┆ 1s +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ LABEL datname Name of this database +# ┃ GAUGE size_bytes Disk space used by the database +# ┃ GAUGE age database age calculated by age(datfrozenxid64) +# ┃ GAUGE is_template 1 for template db and 0 for normal db +# ┃ GAUGE allow_conn 1 allow connection and 0 does not allow +# ┃ GAUGE conn_limit connection limit, -1 for no limit +# ┃ GAUGE frozen_xid tuple with xmin below this will always be visable (until wrap around) +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ pg_database_size_bytes{datname} GAUGE Disk space used by the database +# ┃ pg_database_age{datname} GAUGE database age calculated by age(datfrozenxid64) +# ┃ pg_database_is_template{datname} GAUGE 1 for template db and 0 for normal db +# ┃ pg_database_allow_conn{datname} GAUGE 1 allow connection and 0 does not allow +# ┃ pg_database_conn_limit{datname} GAUGE connection limit, -1 for no limit +# ┃ pg_database_frozen_xid{datname} GAUGE tuple with xmin below this will always be visable (until wrap around) +# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +pg_database: + name: pg_database + desc: OpenGauss Database size + query: + - name: pg_database + sql: |- + SELECT datname, + pg_database_size(pg_database.datname) as size_bytes, + age(datfrozenxid64) AS age, + datistemplate AS is_template, + datallowconn AS allow_conn, + datconnlimit AS conn_limit, + datfrozenxid::TEXT::BIGINT as frozen_xid + FROM pg_database + where datname NOT IN ('template0','template1'); + version: '>=0.0.0' + timeout: 1 + ttl: 60 + status: enable + dbRole: "primary" + - name: pg_database + sql: |- + SELECT datname, + pg_database_size(pg_database.datname) as size_bytes, + age(datfrozenxid64) AS age, + datistemplate AS is_template, + datallowconn AS allow_conn, + datconnlimit AS conn_limit, + datfrozenxid::TEXT::BIGINT as frozen_xid + FROM pg_database + where datname NOT IN ('template0','template1'); + version: '>=0.0.0' + timeout: 1 + ttl: 60 + status: disable + dbRole: "standby" + metrics: + - name: datname + description: Name of this database + usage: LABEL + - name: size_bytes + description: Disk space used by the database + usage: GAUGE + - name: age + description: database age calculated by age(datfrozenxid64) + usage: GAUGE + - name: is_template + description: 1 for template db and 0 for normal db + usage: GAUGE + - name: allow_conn + description: 1 allow connection and 0 does not allow + usage: GAUGE + - name: conn_limit + description: connection limit, -1 for no limit + usage: GAUGE + - name: frozen_xid + description: tuple with xmin below this will always be visable (until wrap around) + usage: GAUGE + status: enable + ttl: 60 + timeout: 1 + public: true + + +# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +# ┃ pg_checkpoint +# ┃ checkpoint information from pg_control_checkpoint since 10 +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ TTL ┆ 5 +# ┃ Timeout ┆ 1s +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ COUNTER checkpoint_lsn lsn of checkpoint +# ┃ COUNTER redo_lsn redo start LSN +# ┃ GAUGE tli current WAL timeline +# ┃ GAUGE prev_tli previous WAL timeline +# ┃ GAUGE full_page_writes is full page write enabled ? +# ┃ GAUGE next_xid_epoch next xid epoch since this checkpoint +# ┃ GAUGE next_xid next xid since this checkpoint +# ┃ GAUGE next_oid next object id since this checkpoint +# ┃ GAUGE next_multixact_id next multixact id of this checkpoint +# ┃ GAUGE next_multi_offset next multixact id offset of this checkpoint +# ┃ GAUGE oldest_xid oldest existing xid of the checkpoint +# ┃ GAUGE oldest_xid_dbid which db contains the oldest xid +# ┃ GAUGE oldest_active_xid oldest active xid of the checkpoint +# ┃ GAUGE oldest_multi_xid oldest active multi xid of the checkpoint +# ┃ GAUGE oldest_multi_dbid which db contins the oldest multi xid +# ┃ GAUGE oldest_commit_ts_xid xid with oldest commit ts by the checkpoint +# ┃ GAUGE newest_commit_ts_xid xid with newest commit ts by the checkpoint +# ┃ GAUGE time timestamp of this checkpoint +# ┃ GAUGE elapse time elapsed since this checkpoint in seconds +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ pg_checkpoint_checkpoint_lsn{} COUNTER lsn of checkpoint +# ┃ pg_checkpoint_redo_lsn{} COUNTER redo start LSN +# ┃ pg_checkpoint_tli{} GAUGE current WAL timeline +# ┃ pg_checkpoint_prev_tli{} GAUGE previous WAL timeline +# ┃ pg_checkpoint_full_page_writes{} GAUGE is full page write enabled ? +# ┃ pg_checkpoint_next_xid_epoch{} GAUGE next xid epoch since this checkpoint +# ┃ pg_checkpoint_next_xid{} GAUGE next xid since this checkpoint +# ┃ pg_checkpoint_next_oid{} GAUGE next object id since this checkpoint +# ┃ pg_checkpoint_next_multixact_id{} GAUGE next multixact id of this checkpoint +# ┃ pg_checkpoint_next_multi_offset{} GAUGE next multixact id offset of this checkpoint +# ┃ pg_checkpoint_oldest_xid{} GAUGE oldest existing xid of the checkpoint +# ┃ pg_checkpoint_oldest_xid_dbid{} GAUGE which db contains the oldest xid +# ┃ pg_checkpoint_oldest_active_xid{} GAUGE oldest active xid of the checkpoint +# ┃ pg_checkpoint_oldest_multi_xid{} GAUGE oldest active multi xid of the checkpoint +# ┃ pg_checkpoint_oldest_multi_dbid{} GAUGE which db contins the oldest multi xid +# ┃ pg_checkpoint_oldest_commit_ts_xid{} GAUGE xid with oldest commit ts by the checkpoint +# ┃ pg_checkpoint_newest_commit_ts_xid{} GAUGE xid with newest commit ts by the checkpoint +# ┃ pg_checkpoint_time{} GAUGE timestamp of this checkpoint +# ┃ pg_checkpoint_elapse{} GAUGE time elapsed since this checkpoint in seconds +# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +pg_checkpoint: + name: pg_checkpoint + desc: checkpoint information from pg_control_checkpoint since 10 + query: + - name: pg_checkpoint + sql: |- + SELECT + checkpoint_lsn, + redo_lsn, + timeline_id AS tli, + full_page_writes, + next_oid::BIGINT, + next_multixact_id::text::BIGINT, + next_multi_offset::text::BIGINT, + oldest_xid::text::BIGINT, + oldest_xid_dbid::text::BIGINT, + oldest_active_xid::text::BIGINT, + checkpoint_time AS time, + extract(epoch from now() - checkpoint_time) AS elapse + FROM pg_control_checkpoint(); + version: '>=0.0.0' + timeout: 1 + ttl: 5 + status: enable + dbRole: "" + metrics: + - name: checkpoint_lsn + description: lsn of checkpoint + usage: COUNTER + - name: redo_lsn + description: redo start LSN + usage: COUNTER + - name: tli + description: current WAL timeline + usage: GAUGE + - name: prev_tli + description: previous WAL timeline + usage: GAUGE + - name: full_page_writes + description: is full page write enabled ? + usage: GAUGE + - name: next_xid_epoch + description: next xid epoch since this checkpoint + usage: GAUGE + - name: next_xid + description: next xid since this checkpoint + usage: GAUGE + - name: next_oid + description: next object id since this checkpoint + usage: GAUGE + - name: next_multixact_id + description: next multixact id of this checkpoint + usage: GAUGE + - name: next_multi_offset + description: next multixact id offset of this checkpoint + usage: GAUGE + - name: oldest_xid + description: oldest existing xid of the checkpoint + usage: GAUGE + - name: oldest_xid_dbid + description: which db contains the oldest xid + usage: GAUGE + - name: oldest_active_xid + description: oldest active xid of the checkpoint + usage: GAUGE + - name: oldest_multi_xid + description: oldest active multi xid of the checkpoint + usage: GAUGE + - name: oldest_multi_dbid + description: which db contins the oldest multi xid + usage: GAUGE + - name: oldest_commit_ts_xid + description: xid with oldest commit ts by the checkpoint + usage: GAUGE + - name: newest_commit_ts_xid + description: xid with newest commit ts by the checkpoint + usage: GAUGE + - name: time + description: timestamp of this checkpoint + usage: GAUGE + - name: elapse + description: time elapsed since this checkpoint in seconds + usage: GAUGE + status: enable + ttl: 5 + timeout: 1 + public: true + +# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +# ┃ pg_run_times +# ┃ OpenGauss database run times +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ TTL ┆ 60 +# ┃ Timeout ┆ 1s +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ LABEL run_name Name of cluster +# ┃ GAUGE db_role Role of database +# ┃ GAUGE run_time Run times of cluster +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ pg_run_times_db_role{run_name} GAUGE Role of database +# ┃ pg_run_times_run_time{run_name} GAUGE Run times of cluster +# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +pg_run_times: + name: pg_run_times + desc: OpenGauss database run times + query: + - name: pg_run_times + sql: select 'cluster_runtime' as run_name,(case pg_is_in_recovery() when 'f' then 1 else 0 end) as db_role,extract(epoch from(now() - pg_postmaster_start_time())) as run_time; + version: '>=0.0.0' + timeout: 1 + ttl: 60 + status: enable + dbRole: "" + metrics: + - name: run_name + description: Name of cluster + usage: LABEL + - name: db_role + description: Role of database + usage: GAUGE + - name: run_time + description: Run times of cluster + usage: GAUGE + status: enable + ttl: 60 + timeout: 1 + public: true + +# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +# ┃ pg +# ┃ openGauss basic information +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ TTL ┆ 60 +# ┃ Timeout ┆ 100ms +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ GAUGE timestamp current database timestamp in unix epoch +# ┃ GAUGE uptime seconds since postmaster start +# ┃ GAUGE boot_time postmaster boot timestamp in unix epoch +# ┃ COUNTER lsn log sequence number, current write location +# ┃ COUNTER insert_lsn primary only, location of current wal inserting +# ┃ COUNTER write_lsn primary only, location of current wal writing +# ┃ COUNTER flush_lsn primary only, location of current wal syncing +# ┃ COUNTER receive_lsn replica only, location of wal synced to disk +# ┃ COUNTER replay_lsn replica only, location of wal applied +# ┃ GAUGE conf_reload_time seconds since last configuration reload +# ┃ GAUGE last_replay_time time when last transaction been replayed +# ┃ GAUGE lag replica only, replication lag in seconds +# ┃ GAUGE is_in_recovery 1 if in recovery mode +# ┃ GAUGE is_wal_replay_paused 1 if wal play is paused +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ pg_timestamp{} GAUGE current database timestamp in unix epoch +# ┃ pg_uptime{} GAUGE seconds since postmaster start +# ┃ pg_boot_time{} GAUGE postmaster boot timestamp in unix epoch +# ┃ pg_lsn{} COUNTER log sequence number, current write location +# ┃ pg_insert_lsn{} COUNTER primary only, location of current wal inserting +# ┃ pg_write_lsn{} COUNTER primary only, location of current wal writing +# ┃ pg_flush_lsn{} COUNTER primary only, location of current wal syncing +# ┃ pg_receive_lsn{} COUNTER replica only, location of wal synced to disk +# ┃ pg_replay_lsn{} COUNTER replica only, location of wal applied +# ┃ pg_conf_reload_time{} GAUGE seconds since last configuration reload +# ┃ pg_last_replay_time{} GAUGE time when last transaction been replayed +# ┃ pg_lag{} GAUGE replica only, replication lag in seconds +# ┃ pg_is_in_recovery{} GAUGE 1 if in recovery mode +# ┃ pg_is_wal_replay_paused{} GAUGE 1 if wal play is paused +# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +pg: + name: pg + desc: openGauss basic information + query: + - name: pg + desc: 'primary database ' + sql: | + SELECT extract(EPOCH FROM CURRENT_TIMESTAMP) AS timestamp, + extract(EPOCH FROM now() - pg_postmaster_start_time()) AS uptime, + extract(EPOCH FROM pg_postmaster_start_time()) AS boot_time, + pg_xlog_location_diff(pg_current_xlog_location() , '0/0') AS lsn, + pg_xlog_location_diff(pg_current_xlog_insert_location(),'0/0') AS insert_lsn, + pg_xlog_location_diff(pg_current_xlog_location() , '0/0') AS write_lsn, + pg_xlog_location_diff(pg_current_xlog_location() , '0/0') AS flush_lsn, + NULL::BIGINT AS receive_lsn, + NULL::BIGINT AS replay_lsn, + extract(EPOCH FROM now() - pg_conf_load_time()) AS conf_reload_time, + NULL::FLOAT AS last_replay_time, + 0::FLOAT AS lag, + pg_is_in_recovery() AS is_in_recovery, + FALSE AS is_wal_replay_paused + ; + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: primary + - name: pg + desc: 'standby database ' + sql: | + SELECT extract(EPOCH FROM CURRENT_TIMESTAMP) AS timestamp, + extract(EPOCH FROM now() - pg_postmaster_start_time()) AS uptime, + extract(EPOCH FROM pg_postmaster_start_time()) AS boot_time, + pg_xlog_location_diff(pg_last_xlog_receive_location() , '0/0') AS lsn, + NULL::BIGINT AS insert_lsn, + NULL::BIGINT AS write_lsn, + NULL::BIGINT AS flush_lsn, + pg_xlog_location_diff(pg_last_xlog_receive_location() , '0/0') AS receive_lsn, + pg_xlog_location_diff(pg_last_xlog_receive_location() , '0/0') AS replay_lsn, + extract(EPOCH FROM now() - pg_conf_load_time()) AS conf_reload_time, + extract(EPOCH FROM pg_last_xact_replay_timestamp()) AS last_replay_time, + pg_is_in_recovery() AS is_in_recovery + ; + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: standby + metrics: + - name: timestamp + description: current database timestamp in unix epoch + usage: GAUGE + - name: uptime + description: seconds since postmaster start + usage: GAUGE + - name: boot_time + description: postmaster boot timestamp in unix epoch + usage: GAUGE + - name: lsn + description: log sequence number, current write location + usage: COUNTER + - name: insert_lsn + description: primary only, location of current wal inserting + usage: COUNTER + - name: write_lsn + description: primary only, location of current wal writing + usage: COUNTER + - name: flush_lsn + description: primary only, location of current wal syncing + usage: COUNTER + - name: receive_lsn + description: replica only, location of wal synced to disk + usage: COUNTER + - name: replay_lsn + description: replica only, location of wal applied + usage: COUNTER + - name: conf_reload_time + description: seconds since last configuration reload + usage: GAUGE + - name: last_replay_time + description: time when last transaction been replayed + usage: GAUGE + - name: lag + description: replica only, replication lag in seconds + usage: GAUGE + - name: is_in_recovery + description: 1 if in recovery mode + usage: GAUGE + - name: is_wal_replay_paused + description: 1 if wal play is paused + usage: GAUGE + status: enable + ttl: 60 + timeout: 0.1 + public: true + +pg_setting: + name: pg_setting + desc: Important postgres setting entries that must kept same on entire cluster + query: + - name: pg_setting + sql: |- + SELECT current_setting('max_connections') AS max_connections, + current_setting('max_prepared_transactions') AS max_prepared_transactions, + current_setting('max_replication_slots') AS max_replication_slots, + current_setting('max_wal_senders') AS max_wal_senders, + current_setting('max_locks_per_transaction') AS max_locks_per_transaction, + current_setting('block_size') AS block_size, + CASE current_setting('wal_log_hints') WHEN 'on' THEN 1 ELSE 0 END AS wal_log_hints; + version: '>=0.0.0' + timeout: 1 + ttl: 60 + status: enable + dbRole: "" + metrics: + - name: max_connections + description: number of concurrent connections to the database server + usage: GAUGE + - name: max_prepared_transactions + description: maximum number of transactions that can be in the prepared state simultaneously + usage: GAUGE + - name: max_replication_slots + description: maximum number of replication slots + usage: GAUGE + - name: max_wal_senders + description: maximum number of concurrent connections from standby servers + usage: GAUGE + - name: max_locks_per_transaction + description: no more than this many distinct objects can be locked at any one time + usage: GAUGE + - name: block_size + description: pg page block size, 8192 by default + usage: GAUGE + - name: wal_log_hints + description: whether wal_log_hints is enabled, 1 enabled 0 disabled + usage: GAUGE + status: enable + ttl: 5 + timeout: 1 + public: true + +pg_class: + name: pg_class + desc: Postgres relation catalog info db level normal version + query: + - name: pg_class + sql: |- + SELECT CURRENT_CATALOG AS datname,(select nspname from pg_namespace where oid=relnamespace) as nspname,relname,relkind,relpages,reltuples, + CASE WHEN relkind = 'i' THEN NULL ELSE age(relfrozenxid64) END AS relage,pg_relation_size(oid) AS relsize + FROM pg_class + WHERE relkind = 'r' and relname not like 'pg_%' and relname not like 'gs_%' and nspname not in ('information_schema', 'pg_catalog') + ORDER BY relpages DESC LIMIT 32; + version: '>=0.0.0' + timeout: 3 + ttl: 60 + status: enable + dbRole: "primary" + - name: pg_class + sql: |- + SELECT CURRENT_CATALOG AS datname,(select nspname from pg_namespace where oid=relnamespace) as nspname,relname,relkind,relpages,reltuples, + CASE WHEN relkind = 'i' THEN NULL ELSE age(relfrozenxid64) END AS relage,pg_relation_size(oid) AS relsize + FROM pg_class + WHERE relkind = 'r' and relname not like 'pg_%' and relname not like 'gs_%' and nspname not in ('information_schema', 'pg_catalog') + ORDER BY relpages DESC LIMIT 32; + version: '>=0.0.0' + timeout: 3 + ttl: 60 + status: disable + dbRole: "standby" + metrics: + - name: datname + description: database name of this relation + usage: LABEL + - name: nspname + description: schema name of this relation + usage: LABEL + - name: relname + description: relation name of this relation + usage: LABEL + - name: relkind + description: relation type r-table i-index s-sequence m-mview t-toast + usage: LABEL + - name: relpages + description: exact page count of this relation + usage: GAUGE + - name: reltuples + description: estimate relation tuples + usage: GAUGE + - name: relage + description: age of non-index relation + usage: GAUGE + - name: relsize + description: size of this relation + usage: GAUGE + status: enable + ttl: 5 + timeout: 3 + public: true + +# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +# ┃ pg_lock +# ┃ OpenGauss lock distribution by mode +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ TTL ┆ 60 +# ┃ Timeout ┆ 1s +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ LABEL datname Name of this database +# ┃ LABEL mode Type of Lock +# ┃ GAUGE count Number of locks +# ┣┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈ +# ┃ pg_lock_count{datname,mode} GAUGE Number of locks +# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +pg_lock: + name: pg_lock + desc: OpenGauss lock distribution by mode + query: + - name: pg_lock + sql: |- + SELECT datname, mode, coalesce(count, 0) AS count + FROM ( + SELECT d.oid AS database, d.datname, l.mode + FROM pg_database d,unnest(ARRAY ['AccessShareLock','RowShareLock','RowExclusiveLock','ShareUpdateExclusiveLock','ShareLock','ShareRowExclusiveLock','ExclusiveLock','AccessExclusiveLock']) l(mode) + WHERE d.datname NOT IN ('template0','template1')) base + LEFT JOIN (SELECT database, mode, count(1) AS count FROM pg_locks WHERE database IS NOT NULL GROUP BY database, mode) cnt + USING (database, mode); + version: '>=0.0.0' + timeout: 1 + ttl: 60 + status: enable + dbRole: "" + metrics: + - name: datname + description: Name of this database + usage: LABEL + - name: mode + description: Type of Lock + usage: LABEL + - name: count + description: Number of locks + usage: GAUGE + status: enable + ttl: 60 + timeout: 1 + public: true + + +#pg_lock_detail: +# name: pg_lock_detail +# desc: OpenGauss lock sqls +# query: +# - name: pg_lock_detail +# sql: |- +# with tl as (select usename,granted,locktag,query_start,query +# from pg_locks l,pg_stat_activity a +# where l.pid=a.pid and locktag in(select locktag from pg_locks where granted='f')) +# select ts.usename locker_user,ts.query_start locker_query_start,ts.granted locker_granted,ts.query locker_query,tt.query locked_query,tt.query_start locked_query_start,tt.granted locked_granted,tt.usename locked_user,extract(epoch from now() - tt.query_start) as locked_times +# from (select * from tl where granted='t') as ts,(select * from tl where granted='f') tt +# where ts.locktag=tt.locktag order by 1; +# version: '>=0.0.0' +# timeout: 1 +# ttl: 60 +# status: enable +# dbRole: "" +# metrics: +# - name: locker_user +# description: locker user +# usage: LABEL +# - name: locked_user +# description: locked user +# usage: LABEL +# - name: locker_granted +# description: locker granted +# usage: LABEL +# - name: locked_granted +# description: locked granted +# usage: LABEL +# - name: locker_query_start +# description: locker query start +# usage: LABEL +# - name: locked_query_start +# description: locked query start +# usage: LABEL +# - name: locked_times +# description: Total wait times +# usage: GAUGE +# - name: locker_query +# description: locker query +# usage: LABEL +# - name: locked_query +# description: locked query +# usage: LABEL +# status: enable +# ttl: 60 +# timeout: 1 +# public: true + +pg_locker: + name: pg_locker + desc: OpenGauss locker count + query: + - name: pg_locker + sql: |- + with tl as (select usename,granted,locktag,query_start,query + from pg_locks l,pg_stat_activity a + where l.pid=a.pid and locktag in(select locktag from pg_locks where granted='f')) + select usename,query_start,granted,query,count(query) count + from tl where granted='t' group by usename,query_start,granted,query order by 5 desc; + version: '>=0.0.0' + timeout: 1 + ttl: 60 + status: enable + dbRole: "" + metrics: + - name: usename + description: locker user name + usage: LABEL + - name: granted + description: locker granted + usage: LABEL + - name: query_start + description: locker query start + usage: LABEL + - name: count + description: locker user count + usage: GAUGE + - name: query + description: locker query + usage: LABEL + status: enable + ttl: 60 + timeout: 1 + public: true + +pg_active_slowsql: + name: pg_active_slowsql + desc: OpenGauss active slow query + query: + - name: pg_active_slowsql + sql: select datname,usename,client_addr,pid,query_start::text,extract(epoch from (now() - query_start)) as query_runtime,xact_start::text,extract(epoch from(now() - xact_start)) as xact_runtime,state,query from pg_stat_activity where state not in('idle') and query_start is not null; + version: '>=0.0.0' + timeout: 1 + ttl: 60 + status: enable + dbRole: "" + metrics: + - name: datname + description: Name of database + usage: LABEL + - name: usename + description: Name of user + usage: LABEL + - name: client_addr + description: Client address + usage: LABEL + - name: pid + description: Client pid + usage: LABEL + - name: query_start + description: Query start time + usage: LABEL + - name: query_runtime + description: Query running time + usage: GAUGE + - name: xact_start + description: Start time of transaction + usage: LABEL + - name: xact_runtime + description: transaction running time + usage: LABEL + - name: state + description: Query state + usage: LABEL + - name: query + description: Query sql + usage: LABEL + status: enable + ttl: 60 + timeout: 1 + public: true + +pg_wait_events: + name: pg_wait_events + desc: OpenGauss wait event statements + query: + - name: pg_wait_events + sql: select nodename,type,event,wait,failed_wait,total_wait_time from dbe_perf.wait_events where wait !=0 order by total_wait_time desc; + version: '>=0.0.0' + timeout: 1 + ttl: 60 + status: enable + dbRole: "" + metrics: + - name: nodename + description: Name of node + usage: LABEL + - name: type + description: Type of wait events + usage: LABEL + - name: event + description: Event name + usage: LABEL + - name: wait + description: Numbers of wait + usage: COUNTER + - name: failed_wait + description: failed wait + usage: LABEL + - name: total_wait_time + description: Total wait times + usage: COUNTER + status: enable + ttl: 60 + timeout: 1 + public: true + +pg_table: + name: pg_table + desc: PostgreSQL table statistics, db level, normal version + query: + - name: pg_table + sql: |- + SELECT CURRENT_CATALOG AS datname,psut.schemaname AS nspname,psut.relname, + seq_scan,seq_tup_read,idx_scan,idx_tup_fetch,(seq_scan + idx_scan) AS tbl_scan,(seq_tup_read + idx_tup_fetch) AS tup_read, + n_tup_ins,n_tup_upd,n_tup_del,(n_tup_ins + n_tup_upd + n_tup_del) AS n_tup_mod, + n_tup_hot_upd,n_live_tup,n_dead_tup, + nvl(last_vacuum::text,'1970-01-01') as last_vacuum,nvl(last_autovacuum::text,'1970-01-01') as last_autovacuum,nvl(last_analyze::text,'1970-01-01') as last_analyze,nvl(last_autoanalyze::text,'1970-01-01') as last_autoanalyze,vacuum_count,autovacuum_count,analyze_count,autoanalyze_count, + extract(epoch from now() -(case when nvl(last_vacuum,'1970-01-01')>nvl(last_autovacuum,'1970-01-01') then nvl(last_vacuum,'1970-01-01') else nvl(last_autovacuum,'1970-01-01') end))::int vacuum_delay, + extract(epoch from now() -(case when nvl(last_analyze,'1970-01-01')>nvl(last_autoanalyze,'1970-01-01') then nvl(last_analyze,'1970-01-01') else nvl(last_autoanalyze,'1970-01-01') end))::int analyze_delay, + heap_blks_read,heap_blks_hit,idx_blks_read,idx_blks_hit, + toast_blks_read,toast_blks_hit,tidx_blks_read,tidx_blks_hit + FROM pg_stat_user_tables psut,pg_statio_user_tables psio + WHERE psio.relid = psut.relid and psio.schemaname not in ('pg_catalog', 'information_schema','snapshot') and (n_live_tup+n_dead_tup)>10000; + version: '>=0.0.0' + timeout: 10 + ttl: 60 + status: enable + dbRole: "primary" + - name: pg_table + sql: |- + SELECT CURRENT_CATALOG AS datname,psut.schemaname AS nspname,psut.relname, + seq_scan,seq_tup_read,idx_scan,idx_tup_fetch,(seq_scan + idx_scan) AS tbl_scan,(seq_tup_read + idx_tup_fetch) AS tup_read, + n_tup_ins,n_tup_upd,n_tup_del,(n_tup_ins + n_tup_upd + n_tup_del) AS n_tup_mod, + n_tup_hot_upd,n_live_tup,n_dead_tup, + nvl(last_vacuum::text,'1970-01-01') as last_vacuum,nvl(last_autovacuum::text,'1970-01-01') as last_autovacuum,nvl(last_analyze::text,'1970-01-01') as last_analyze,nvl(last_autoanalyze::text,'1970-01-01') as last_autoanalyze,vacuum_count,autovacuum_count,analyze_count,autoanalyze_count, + extract(epoch from now() -(case when nvl(last_vacuum,'1970-01-01')>nvl(last_autovacuum,'1970-01-01') then nvl(last_vacuum,'1970-01-01') else nvl(last_autovacuum,'1970-01-01') end))::int vacuum_delay, + extract(epoch from now() -(case when nvl(last_analyze,'1970-01-01')>nvl(last_autoanalyze,'1970-01-01') then nvl(last_analyze,'1970-01-01') else nvl(last_autoanalyze,'1970-01-01') end))::int analyze_delay, + heap_blks_read,heap_blks_hit,idx_blks_read,idx_blks_hit, + toast_blks_read,toast_blks_hit,tidx_blks_read,tidx_blks_hit + FROM pg_stat_user_tables psut,pg_statio_user_tables psio + WHERE psio.relid = psut.relid and psio.schemaname not in ('pg_catalog', 'information_schema','snapshot') and (n_live_tup+n_dead_tup)>10000; + version: '>=0.0.0' + timeout: 10 + ttl: 60 + status: disable + dbRole: "standby" + metrics: + - name: datname + description: database name of this relation + usage: LABEL + - name: nspname + description: schema name of this relation + usage: LABEL + - name: relname + description: relation name of this relation + usage: LABEL + - name: seq_scan + description: sequential scans initiated on this table + usage: COUNTER + - name: seq_tup_read + description: live rows fetched by sequential scans + usage: COUNTER + - name: idx_scan + description: index scans initiated on this table + usage: COUNTER + - name: idx_tup_fetch + description: rows fetched by index scans + usage: COUNTER + - name: tbl_scan + description: total table scan = index scan + seq scan + usage: COUNTER + - name: tup_read + description: total tuples read = index fetch + seq read + usage: COUNTER + - name: n_tup_ins + description: rows inserted + usage: COUNTER + - name: n_tup_upd + description: rows updated + usage: COUNTER + - name: n_tup_del + description: rows deleted + usage: COUNTER + - name: n_tup_mod + description: rows modified (insert + update + delete) + usage: COUNTER + - name: n_tup_hot_upd + description: rows updated in HOT mode + usage: COUNTER + - name: n_live_tup + description: estimated live rows + usage: GAUGE + - name: n_dead_tup + description: estimated dead rows + usage: GAUGE + - name: n_mod_since_analyze + description: rows changed since last analyze + usage: GAUGE + - name: last_vacuum + description: when table was manually vacuumed last time (FULL not count) + usage: LABEL + - name: last_autovacuum + description: when table was automatically vacuumed last time + usage: LABEL + - name: last_analyze + description: when table was manually analyzed last time + usage: LABEL + - name: last_autoanalyze + description: when table was automatically analyzed last time + usage: LABEL + - name: vacuum_delay + description: delay from last vacuum time + usage: GAUGE + - name: analyze_delay + description: delay from last analyze time + usage: GAUGE + - name: vacuum_count + description: manual vacuum count (FULL not count) + usage: COUNTER + - name: autovacuum_count + description: automatic vacuum count + usage: COUNTER + - name: analyze_count + description: manual analyze count + usage: COUNTER + - name: autoanalyze_count + description: automatic analyze count + usage: COUNTER + - name: heap_blks_read + description: relation heap read + usage: COUNTER + - name: heap_blks_hit + description: relation heap hit + usage: COUNTER + - name: idx_blks_read + description: index read + usage: COUNTER + - name: idx_blks_hit + description: index hit + usage: COUNTER + - name: toast_blks_read + description: toast heap read + usage: COUNTER + - name: toast_blks_hit + description: toast heap hit + usage: COUNTER + - name: tidx_blks_read + description: toast index read + usage: COUNTER + - name: tidx_blks_hit + description: toast index hit + usage: COUNTER + status: enable + ttl: 60 + timeout: 10 + +pg_index: + name: pg_index + desc: opengauss index statistics, db level, normal version + query: + - name: pg_index + sql: |- + SELECT CURRENT_CATALOG AS datname,psui.schemaname AS nspname,psui.relname AS tablename,psui.indexrelname AS relname, + idx_scan, idx_tup_read,idx_tup_fetch,idx_blks_read,idx_blks_hit + FROM pg_stat_user_indexes psui,pg_statio_user_indexes psio + WHERE psio.indexrelid = psui.indexrelid and psui.schemaname not in ('pg_catalog', 'information_schema','snapshot') + order by idx_scan desc limit 100; + version: '>=0.0.0' + timeout: 10 + ttl: 3600 + dbRole: "primary" + status: enable + - name: pg_index + sql: |- + SELECT CURRENT_CATALOG AS datname,psui.schemaname AS nspname,psui.relname AS tablename,psui.indexrelname AS relname, + idx_scan,idx_tup_read,idx_tup_fetch,idx_blks_read,idx_blks_hit + FROM pg_stat_user_indexes psui,pg_statio_user_indexes psio + WHERE psio.indexrelid = psui.indexrelid and psui.schemaname not in ('pg_catalog', 'information_schema','snapshot') + order by idx_scan desc limit 100; + version: '>=0.0.0' + timeout: 10 + ttl: 3600 + dbRole: "standby" + status: disable + metrics: + - name: datname + description: database name of this relation + usage: LABEL + - name: nspname + description: schema name of this relation + usage: LABEL + - name: tablename + description: relation name of this relation + usage: LABEL + - name: relname + description: index name of this relation + usage: LABEL + - name: idx_scan + description: index scans initiated on this index + usage: GAUGE + - name: idx_tup_read + description: index entries returned by scans on this index + usage: COUNTER + - name: idx_tup_fetch + description: live table rows fetched by simple index scans using this index + usage: COUNTER + - name: idx_blks_read + description: blocks been read from disk of this index + usage: COUNTER + - name: idx_blks_hit + description: blocks hit from cache of this index + usage: COUNTER + status: enable + ttl: 10 + timeout: 1 + +pg_tables_size: + name: pg_tables_size + desc: OpenGauss tables size + query: + - name: pg_tables_size + sql: |- + SELECT CURRENT_CATALOG AS datname,nsp.nspname,rel.relname, + pg_total_relation_size(rel.oid) AS bytes, + pg_relation_size(rel.oid) AS relsize, + pg_indexes_size(rel.oid) AS indexsize, + pg_total_relation_size(reltoastrelid) AS toastsize + FROM pg_namespace nsp JOIN pg_class rel ON nsp.oid = rel.relnamespace + WHERE nspname NOT IN ('pg_catalog', 'information_schema','snapshot') AND rel.relkind = 'r' + order by 4 desc limit 100; + version: '>=0.0.0' + timeout: 10 + ttl: 3600 + dbRole: "primary" + status: enable + - name: pg_tables_size + sql: |- + SELECT CURRENT_CATALOG AS datname,nsp.nspname,rel.relname, + pg_total_relation_size(rel.oid) AS bytes, + pg_relation_size(rel.oid) AS relsize, + pg_indexes_size(rel.oid) AS indexsize, + pg_total_relation_size(reltoastrelid) AS toastsize + FROM pg_namespace nsp JOIN pg_class rel ON nsp.oid = rel.relnamespace + WHERE nspname NOT IN ('pg_catalog', 'information_schema','snapshot') AND rel.relkind = 'r' + order by 4 desc limit 100; + version: '>=0.0.0' + timeout: 10 + ttl: 3600 + dbRole: "standby" + status: disable + metrics: + - name: datname + description: database name of this relation + usage: LABEL + - name: nspname + description: schema name of this relation + usage: LABEL + - name: relname + description: relation name of this relation + usage: LABEL + - name: bytes + description: total size of this table (including toast, index, toast index) + usage: GAUGE + - name: relsize + description: size of this table itself (main, vm, fsm) + usage: GAUGE + - name: indexsize + description: size of all related indexes + usage: GAUGE + - name: toastsize + description: size of corresponding toast tables + usage: GAUGE + status: enable + ttl: 3600 + timeout: 10 + +pg_indexes_size: + name: pg_indexes_size + desc: OpenGauss database indexes size + query: + - name: pg_indexes_size + sql: |- + select schemaname schema_name,relname table_name,indexrelname index_name,pg_table_size(indexrelid) as index_size + from pg_stat_user_indexes + where schemaname not in('pg_catalog', 'information_schema','snapshot') + order by 4 desc limit 100; + version: '>=0.0.0' + timeout: 10 + ttl: 3600 + dbRole: "primary" + status: disable + - name: pg_indexes_size + sql: |- + select schemaname schema_name,relname table_name,indexrelname index_name,pg_table_size(indexrelid) as index_size + from pg_stat_user_indexes + where schemaname not in('pg_catalog', 'information_schema','snapshot') + order by 4 desc limit 100; + version: '>=0.0.0' + timeout: 10 + ttl: 3600 + dbRole: "standby" + status: disable + metrics: + - name: schema_name + description: Schema name of index + usage: LABEL + - name: table_name + description: Table name of index + usage: LABEL + - name: index_name + description: Name of index + usage: LABEL + - name: index_size + description: Size of index + usage: GAUGE + status: enable + ttl: 3600 + timeout: 10 + +pg_need_indexes: + name: pg_need_indexes + desc: OpenGauss tables need indexes + query: + - name: pg_need_indexes + sql: |- + select schemaname||'.'||relname as tablename, pg_size_pretty(pg_table_size(relid)) as table_size, seq_scan, seq_tup_read, coalesce(idx_scan,0) idx_scan, coalesce(idx_tup_fetch,0) idx_tup_fetch,coalesce((idx_scan/(case when (seq_scan+idx_scan) >0 then (seq_scan+idx_scan) else 1 end) * 100),0) as rate + from pg_stat_user_tables + where schemaname not in('pg_catalog', 'information_schema','snapshot') and pg_table_size(relid) > 1024*1024*1024 and coalesce((idx_scan/(case when (seq_scan+idx_scan) >0 then (seq_scan+idx_scan) else 1 end) * 100),0) < 90 + order by seq_scan desc limit 10; + version: '>=0.0.0' + timeout: 10 + ttl: 3600 + dbRole: "primary" + status: enable + - name: pg_need_indexes + sql: |- + select schemaname||'.'||relname as tablename, pg_size_pretty(pg_table_size(relid)) as table_size, seq_scan, seq_tup_read, coalesce(idx_scan,0) idx_scan, coalesce(idx_tup_fetch,0) idx_tup_fetch,coalesce((idx_scan/(case when (seq_scan+idx_scan) >0 then (seq_scan+idx_scan) else 1 end) * 100),0) as rate + from pg_stat_user_tables + where schemaname not in('pg_catalog', 'information_schema','snapshot') and pg_table_size(relid) > 1024*1024*1024 and coalesce((idx_scan/(case when (seq_scan+idx_scan) >0 then (seq_scan+idx_scan) else 1 end) * 100),0) < 90 + order by seq_scan desc limit 10; + version: '>=0.0.0' + timeout: 10 + ttl: 3600 + dbRole: "standby" + status: disable + metrics: + - name: tablename + description: Name of table + usage: LABEL + - name: table_size + description: Size of table + usage: LABEL + - name: seq_scan + description: Scan numbers of seq + usage: GAUGE + - name: seq_tup_read + description: Tup read numbers of seq + usage: GAUGE + - name: idx_scan + description: Scan numbers of indexes + usage: GAUGE + - name: idx_tup_fetch + description: Tup fetch numbers of indexes + usage: GAUGE + - name: rate + description: Index used rate + usage: GAUGE + status: enable + ttl: 3600 + timeout: 10 + +pg_never_used_indexes: + name: pg_never_used_indexes + desc: OpenGauss indexes never used + query: + - name: pg_never_used_indexes + sql: |- + select CURRENT_CATALOG as datname, pi.schemaname, pi.relname, pi.indexrelname, pg_table_size(pi.indexrelid) as index_size + from pg_indexes pis + join pg_stat_user_indexes pi + on pis.schemaname = pi.schemaname and pis.tablename = pi.relname and pis.indexname = pi.indexrelname + left join pg_constraint pco + on pco.conname = pi.indexrelname and pco.conrelid = pi.relid + where pco.contype is distinct from 'p' and pco.contype is distinct from 'u' + and (idx_scan,idx_tup_read,idx_tup_fetch) = (0,0,0) + and pis.indexdef !~ ' UNIQUE INDEX ' + and pis.schemaname not in('pg_catalog', 'information_schema','snapshot') + order by pg_table_size(indexrelid) desc; + version: '>=0.0.0' + timeout: 10 + ttl: 3600 + dbRole: "primary" + status: enable + - name: pg_never_used_indexes + sql: |- + select CURRENT_CATALOG as datname, pi.schemaname, pi.relname, pi.indexrelname, pg_table_size(pi.indexrelid) as index_size + from pg_indexes pis + join pg_stat_user_indexes pi + on pis.schemaname = pi.schemaname and pis.tablename = pi.relname and pis.indexname = pi.indexrelname + left join pg_constraint pco + on pco.conname = pi.indexrelname and pco.conrelid = pi.relid + where pco.contype is distinct from 'p' and pco.contype is distinct from 'u' + and (idx_scan,idx_tup_read,idx_tup_fetch) = (0,0,0) + and pis.indexdef !~ ' UNIQUE INDEX ' + and pis.schemaname not in('pg_catalog', 'information_schema','snapshot') + order by pg_table_size(indexrelid) desc; + version: '>=0.0.0' + timeout: 10 + ttl: 3600 + dbRole: "standby" + status: disable + metrics: + - name: datname + description: database of table + usage: LABEL + - name: schemaname + description: Schema of table + usage: LABEL + - name: relname + description: Name of table + usage: LABEL + - name: indexrelname + description: Name of index + usage: LABEL + - name: index_size + description: Size of index + usage: GAUGE + status: enable + ttl: 3600 + timeout: 10 + +pg_tables_expansion_rate: + name: pg_tables_expansion_rate + desc: OpenGauss database tables expansion rate + query: + - name: pg_tables_expansion_rate + sql: |- + select CURRENT_CATALOG as datname, schemaname,relname,n_live_tup,n_dead_tup,round((n_dead_tup/(n_dead_tup+n_live_tup) *100),2) as dead_rate, + extract(epoch from coalesce(last_vacuum,'1970-01-01')::text) as last_vacuum, + extract(epoch from coalesce(last_autovacuum,'1970-01-01')::text) as last_autovacuum , + extract(epoch from coalesce(last_analyze,'1970-01-01')::text) as last_analyze, + extract(epoch from coalesce(last_autoanalyze,'1970-01-01')::text) as last_autoanalyze, + vacuum_count,autovacuum_count,analyze_count,autoanalyze_count + from pg_stat_user_tables + where n_live_tup > 0 + order by 5 asc; + version: '>=0.0.0' + timeout: 1 + status: enable + metrics: + - name: datname + description: database name of table + usage: LABEL + - name: schemaname + description: Schema name of table + usage: LABEL + - name: relname + description: Table name of table + usage: LABEL + - name: n_live_tup + description: live tup of table + usage: LABEL + - name: n_dead_tup + description: dead tup of table + usage: LABEL + - name: dead_rate + description: Dead rate of table + usage: GAUGE + - name: last_vacuum + description: dead tup of table + usage: LABEL + - name: last_autovacuum + description: dead tup of table + usage: LABEL + - name: last_analyze + description: dead tup of table + usage: LABEL + - name: last_autoanalyze + description: dead tup of table + usage: LABEL + - name: vacuum_count + description: count of vacuum + usage: GAUGE + - name: autovacuum_count + description: Count of autovacuum + usage: GAUGE + - name: analyze_count + description: Count of analyze + usage: GAUGE + - name: autoanalyze_count + description: Count of autoanalyze + usage: GAUGE + status: enable + ttl: 60 + timeout: 1 + +pg_lock_sql: + name: pg_lock_sql + desc: OpenGauss lock sqls + query: + - name: pg_lock_sql + sql: |- + select distinct locker.pid as locker_pid, + locked.pid as locked_pid, + coalesce(locker_act.client_addr,'127.0.0.1')::inet as locker_addr, + coalesce(locked_act.client_addr,'127.0.0.1')::inet as locked_addr, + locker_act.usename as locker_username, + locked_act.usename as locked_username, + locker.mode as locker_mode, + locked.mode as locked_mode, + locker.locktype as locker_locktype, + locked.locktype as locked_locktype, + locker_act.usename as locker_user, + locked_act.usename as locker_user, + (locker_act.xact_start)::text as locker_xact_start, + (locked_act.xact_start)::text as locked_xact_start, + (locker_act.query_start)::text as locker_query_start, + (locked_act.query_start)::text as locked_query_start, + extract(epoch from now() - locked_act.query_start) as locked_times, + locker_act.query as locker_query, + locked_act.query as locked_query + from pg_locks locked, + pg_locks locker, + pg_stat_activity locked_act, + pg_stat_activity locker_act + where locker.granted=true + and locked.granted=false + and locked.pid=locked_act.pid + and locker.pid=locker_act.pid + and locker_act.query not like '%select distinct locker.pid %' + and locker.pid <> locked.pid + and locker.mode not like 'AccessShareLock' and locker.mode not like 'ExclusiveLock' + order by 13 asc limit 10; + version: '>=0.0.0' + timeout: 1 + status: enable + metrics: + - name: locker_pid + description: Pid of locker + usage: LABEL + - name: locked_pid + description: Pid of locked + usage: LABEL + - name: locker_addr + description: Event name + usage: LABEL + - name: locked_addr + description: Event name + usage: LABEL + - name: locker_username + description: Numbers of wait + usage: LABEL + - name: locked_username + description: Numbers of wait + usage: LABEL + - name: locker_mode + description: failed wait + usage: LABEL + - name: locked_mode + description: failed wait + usage: LABEL + - name: locker_locktype + description: Total wait times + usage: LABEL + - name: locked_locktype + description: Total wait times + usage: LABEL + - name: locker_user + description: Total wait times + usage: LABEL + - name: locked_user + description: Total wait times + usage: LABEL + - name: locker_xact_start + description: Total wait times + usage: LABEL + - name: locked_xact_start + description: Total wait times + usage: LABEL + - name: locker_query_start + description: Total wait times + usage: LABEL + - name: locked_query_start + description: Total wait times + usage: LABEL + - name: locked_times + description: Total wait times + usage: GAUGE + - name: locker_query + description: Total wait times + usage: LABEL + - name: locked_query + description: Total wait times + usage: LABEL + status: enable + ttl: 60 + timeout: 1 + + +og_memory_info: + name: og_memory_info + desc: OpenGauss memory usage informations + query: + - name: og_memory_info + sql: select memorytype,memorymbytes from pv_total_memory_detail(); + version: '>=0.0.0' + timeout: 1 + ttl: 60 + status: enable + dbRole: "" + metrics: + - name: memorytype + description: Name of memorytype + usage: LABEL + - name: memorymbytes + description: memorymbytes + usage: GAUGE + status: enable + ttl: 60 + timeout: 1 + public: true + +og_session_memory: + name: og_session_memory + desc: OpenGauss session use memory information + query: + - name: og_session_memory + sql: |- + select sessionid, + coalesce(application_name,'')as application_name, + coalesce(client_addr::text,'') as client_addr, + sum(usedsize)::bigint as usedsize, + sum(totalsize)::bigint as totalsize, + query + from gs_session_memory_detail s,pg_stat_activity a + where substring_inner(sessid,position('.' in sessid) +1)=a.sessionid + group by sessionid,query,application_name,client_addr + order by sum(totalsize) desc limit 10; + version: '>=0.0.0' + timeout: 30 + ttl: 600 + status: enable + dbRole: "" + metrics: + - name: sessionid + description: sessionid + usage: LABEL + - name: application_name + description: application name + usage: LABEL + - name: client_addr + description: client addr + usage: LABEL + - name: usedsize + description: session used memory + usage: GAUGE + - name: totalsize + description: session total memory + usage: GAUGE + - name: query + description: query + usage: LABEL + status: enable + ttl: 600 + timeout: 30 + public: true + +og_context_memory: + name: og_context_memory + desc: OpenGauss context use memory information + query: + - name: og_session_memory + sql: |- + select contextname, + sum(usedsize)::bigint as usedsize, + sum(totalsize)::bigint as totalsize + from gs_session_memory_detail + group by contextname + order by sum(totalsize) desc limit 10; + version: '>=0.0.0' + timeout: 30 + ttl: 600 + status: enable + dbRole: "" + metrics: + - name: contextname + description: contextname + usage: LABEL + - name: usedsize + description: session used memory + usage: GAUGE + - name: totalsize + description: session total memory + usage: GAUGE + status: enable + ttl: 600 + timeout: 30 + public: true + +og_state_memory: + name: og_state_memory + desc: OpenGauss session state use memory information + query: + - name: og_state_memory + sql: |- + select state,sum(totalsize)::bigint as totalsize + from gs_session_memory_detail m,pg_stat_activity a + where substring_inner(sessid,position('.' in sessid) +1)=a.sessionid + and usename<>'mondb' and pid != pg_backend_pid() + group by state order by sum(totalsize) desc; + version: '>=0.0.0' + timeout: 30 + ttl: 600 + status: enable + dbRole: "" + metrics: + - name: state + description: session state + usage: LABEL + - name: totalsize + description: session state total memory + usage: GAUGE + status: enable + ttl: 600 + timeout: 30 + public: true + +og_cpu_load: + name: og_cpu_load + desc: OpenGauss cpu load + query: + - name: og_cpu_load + sql: select 'og_total_cpu' og_total_cpu,total_cpu() total_cpu; + version: '>=0.0.0' + timeout: 1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: og_total_cpu + description: total cpu name + usage: LABEL + - name: total_cpu + description: total cpu use + usage: GAUGE + status: enable + ttl: 10 + timeout: 1 + public: true + diff --git a/src/gausskernel/dbmind/tools/components/opengauss_exporter/yamls/pg_settings.yml b/src/gausskernel/dbmind/tools/components/opengauss_exporter/yamls/pg_settings.yml new file mode 100644 index 000000000..8a877db8b --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/opengauss_exporter/yamls/pg_settings.yml @@ -0,0 +1,26 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +# This configure file uses PG-exporter format. +pg_settings: + query: "select name, case when vartype = 'bool' then (case when setting = 'on' then 1. else 0. end) else setting::float end as setting, vartype from pg_settings where vartype not in ('enum', 'string');" + metrics: + - name: + usage: "LABEL" + description: "Name of setting" + - setting: + usage: "GAUGE" + description: "Value of setting" + - vartype: + usage: "LABEL" + description: "Type of setting" diff --git a/src/gausskernel/dbmind/tools/components/opengauss_exporter/yamls/statements.yml b/src/gausskernel/dbmind/tools/components/opengauss_exporter/yamls/statements.yml new file mode 100644 index 000000000..cfece2a1a --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/opengauss_exporter/yamls/statements.yml @@ -0,0 +1,146 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +pg_sql_statement_history: + name: pg_sql_statement_history + desc: openGauss history query statement history + query: + - name: pg_sql_statement_history + sql: " + SELECT H.unique_query_id, + H.db_name AS datname, + H.schema_name AS SCHEMA, + H.query, + (extract(epoch + FROM H.start_time) * 1000)::bigint as start_time, + (extract(epoch + FROM H.finish_time) * 1000)::bigint as finish_time, + extract(epoch + FROM H.finish_time - H.start_time)* 1000 AS exc_time, + H.cpu_time, + H.data_io_time, + H.n_returned_rows, + H.n_tuples_fetched, + H.n_tuples_returned, + H.n_tuples_inserted, + H.n_tuples_updated, + H.n_tuples_deleted, + (H.n_blocks_hit / (H.n_blocks_fetched+0.01)) AS hit_rate, + (H.n_blocks_fetched / (H.n_blocks_hit+0.01)) AS fetch_rate, + H.lock_wait_count, + H.lwlock_wait_count, + S.n_calls, + S.sort_count / S.n_calls AS sort_count, + S.sort_mem_used / S.n_calls AS sort_mem_used, + S.sort_spill_count / S.n_calls AS sort_spill_count, + S.hash_count / S.n_calls AS hash_count, + S.hash_mem_used / S.n_calls AS hash_mem_used, + S.hash_spill_count / S.n_calls AS hash_spill_count + FROM dbe_perf.statement_history H inner join dbe_perf.statement S + on H.unique_query_id = S.unique_sql_id + WHERE H.query !='COMMIT' + AND H.application_name != 'gs_clean' + AND S.n_calls > 1 + AND (H.start_time > now() - (1 / 24 / 60 / 60) * ({scrape_interval} / 1000) + OR (exc_time > {scrape_interval} AND H.finish_time > now() - (1 / 24 / 60 / 60) * ({scrape_interval} / 1000)) + ) + ORDER BY H.start_time DESC + LIMIT 50;" + version: '>=0.0.0' + timeout: 10 + status: enable + metrics: + - name: datname + description: Name of database + usage: LABEL + - name: schema + description: Name of schema + usage: LABEL + - name: start_time + description: start executing time + usage: LABEL + - name: finish_time + description: finish executing time + usage: LABEL + - name: query + description: query text + usage: LABEL + - name: unique_query_id + description: unique query id + usage: LABEL + - name: debug_query_id + description: debug query id + usage: LABEL + - name: n_returned_rows + description: select returned rows + usage: LABEL + - name: n_tuples_returned + description: n tuples return + usage: LABEL + - name: n_tuples_fetched + description: select fetch rows + usage: LABEL + - name: n_tuples_inserted + description: insert tuples + usage: LABEL + - name: n_tuples_updated + description: update tuples + usage: LABEL + - name: n_tuples_deleted + description: delete tuples + usage: LABEL + - name: hit_rate + description: hit ratio + usage: LABEL + - name: fetch_rate + description: fetch rate + usage: LABEL + - name: lock_wait_count + description: lock wait count + usage: LABEL + - name: lwlock_wait_count + description: lwclock wait count + usage: LABEL + - name: cpu_time + description: cpu time + usage: LABEL + - name: data_io_time + description: data_io_time + usage: LABEL + - name: exc_time + description: time taken to execute + usage: GAUGE + - name: n_calls + description: the number of calls + usage: LABEL + - name: sort_count + description: sort count + usage: LABEL + - name: sort_mem_used + description: sort mem used + usage: LABEL + - name: sort_spill_count + description: sort spill count + usage: LABEL + - name: hash_count + description: hash count + usage: LABEL + - name: hash_mem_used + description: hash mem used + usage: LABEL + - name: hash_spill_count + description: hash spill count + usage: LABEL + status: enable + ttl: 0 + timeout: 1 diff --git a/src/gausskernel/dbmind/tools/predictor/install/ca_ext.txt b/src/gausskernel/dbmind/tools/components/predictor/install/ca_ext.txt similarity index 100% rename from src/gausskernel/dbmind/tools/predictor/install/ca_ext.txt rename to src/gausskernel/dbmind/tools/components/predictor/install/ca_ext.txt diff --git a/src/gausskernel/dbmind/tools/components/predictor/install/requirements-gpu.txt b/src/gausskernel/dbmind/tools/components/predictor/install/requirements-gpu.txt new file mode 100644 index 000000000..7fc4f1820 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/predictor/install/requirements-gpu.txt @@ -0,0 +1,7 @@ +configparser +Flask +Keras==2.3.1 +numpy +pandas +scikit-learn +tensorflow-gpu==1.15.5 diff --git a/src/gausskernel/dbmind/tools/components/predictor/install/requirements.txt b/src/gausskernel/dbmind/tools/components/predictor/install/requirements.txt new file mode 100644 index 000000000..7472214f5 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/predictor/install/requirements.txt @@ -0,0 +1,7 @@ +configparser +Flask +Keras==2.3.1 +numpy +pandas +scikit-learn +tensorflow==1.15.5 diff --git a/src/gausskernel/dbmind/tools/predictor/install/ssl.sh b/src/gausskernel/dbmind/tools/components/predictor/install/ssl.sh similarity index 100% rename from src/gausskernel/dbmind/tools/predictor/install/ssl.sh rename to src/gausskernel/dbmind/tools/components/predictor/install/ssl.sh diff --git a/src/gausskernel/dbmind/tools/predictor/python/certs.py b/src/gausskernel/dbmind/tools/components/predictor/python/certs.py similarity index 97% rename from src/gausskernel/dbmind/tools/predictor/python/certs.py rename to src/gausskernel/dbmind/tools/components/predictor/python/certs.py index 587b36032..3b20fcdc7 100644 --- a/src/gausskernel/dbmind/tools/predictor/python/certs.py +++ b/src/gausskernel/dbmind/tools/components/predictor/python/certs.py @@ -1,69 +1,69 @@ -""" - openGauss is licensed under Mulan PSL v2. - You can use this software according to the terms and conditions of the Mulan PSL v2. - You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - - THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - See the Mulan PSL v2 for more details. - - Copyright (c) 2020 Huawei Technologies Co.,Ltd. - Description: The certificate functions for AiEngine. -""" - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes - -import os -import hashlib - -def check_content_key(content, key): - if not (type(content) == bytes): - raise Exception("content's type must be bytes.") - elif not (type(key) in (bytes, str)): - raise Exception("bytes's type must be in (bytes, str).") - - iv_len = 16 - if not (len(content) >= (iv_len + 16)): - raise Exception("content's len must >= (iv_len + 16).") - -def aes_cbc_decrypt(content, key): - check_content_key(content, key) - if type(key) == str: - key = bytes(key) - iv_len = 16 - # pre shared key iv - iv = content[16 + 1 + 16 + 1:16 + 1 + 16 + 1 + 16] - - # pre shared key enctryt - enc_content = content[:iv_len] - backend = default_backend() - cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend) - decrypter = cipher.decryptor() - dec_content = decrypter.update(enc_content) + decrypter.finalize() - server_decipher_key = dec_content.decode('utf-8','ignore').rstrip(b'\x00'.decode()) - return server_decipher_key - -def aes_cbc_decrypt_with_path(path): - ciper_path = os.path.realpath(path + '/server.key.cipher') - with open(ciper_path, 'rb') as f: - cipher_txt = f.read() - rand_path = os.path.realpath(path + '/server.key.rand') - with open(rand_path, 'rb') as f: - rand_txt = f.read() - if cipher_txt is None or cipher_txt == "": - return None - - server_vector_cipher_vector = cipher_txt[16 + 1:16 + 1 + 16] - # pre shared key rand - server_key_rand = rand_txt[:16] - - # worker key - server_decrypt_key = hashlib.pbkdf2_hmac('sha256', server_key_rand, - server_vector_cipher_vector, - 10000, 16) - server_key = aes_cbc_decrypt(cipher_txt, server_decrypt_key) - return server_key +""" + openGauss is licensed under Mulan PSL v2. + You can use this software according to the terms and conditions of the Mulan PSL v2. + You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + + THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + See the Mulan PSL v2 for more details. + + Copyright (c) 2020 Huawei Technologies Co.,Ltd. + Description: The certificate functions for AiEngine. +""" + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes + +import os +import hashlib + +def check_content_key(content, key): + if not (type(content) == bytes): + raise Exception("content's type must be bytes.") + elif not (type(key) in (bytes, str)): + raise Exception("bytes's type must be in (bytes, str).") + + iv_len = 16 + if not (len(content) >= (iv_len + 16)): + raise Exception("content's len must >= (iv_len + 16).") + +def aes_cbc_decrypt(content, key): + check_content_key(content, key) + if type(key) == str: + key = bytes(key) + iv_len = 16 + # pre shared key iv + iv = content[16 + 1 + 16 + 1:16 + 1 + 16 + 1 + 16] + + # pre shared key enctryt + enc_content = content[:iv_len] + backend = default_backend() + cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend) + decrypter = cipher.decryptor() + dec_content = decrypter.update(enc_content) + decrypter.finalize() + server_decipher_key = dec_content.decode('utf-8','ignore').rstrip(b'\x00'.decode()) + return server_decipher_key + +def aes_cbc_decrypt_with_path(path): + ciper_path = os.path.realpath(path + '/server.key.cipher') + with open(ciper_path, 'rb') as f: + cipher_txt = f.read() + rand_path = os.path.realpath(path + '/server.key.rand') + with open(rand_path, 'rb') as f: + rand_txt = f.read() + if cipher_txt is None or cipher_txt == "": + return None + + server_vector_cipher_vector = cipher_txt[16 + 1:16 + 1 + 16] + # pre shared key rand + server_key_rand = rand_txt[:16] + + # worker key + server_decrypt_key = hashlib.pbkdf2_hmac('sha256', server_key_rand, + server_vector_cipher_vector, + 10000, 16) + server_key = aes_cbc_decrypt(cipher_txt, server_decrypt_key) + return server_key diff --git a/src/gausskernel/dbmind/tools/predictor/python/e_log/.gitkeep b/src/gausskernel/dbmind/tools/components/predictor/python/e_log/.gitkeep similarity index 98% rename from src/gausskernel/dbmind/tools/predictor/python/e_log/.gitkeep rename to src/gausskernel/dbmind/tools/components/predictor/python/e_log/.gitkeep index 58c6a0063..222de580b 100644 --- a/src/gausskernel/dbmind/tools/predictor/python/e_log/.gitkeep +++ b/src/gausskernel/dbmind/tools/components/predictor/python/e_log/.gitkeep @@ -1 +1 @@ -#the file to track this essential empty dictionary. +#the file to track this essential empty dictionary. diff --git a/src/gausskernel/dbmind/tools/predictor/python/log.conf b/src/gausskernel/dbmind/tools/components/predictor/python/log.conf similarity index 93% rename from src/gausskernel/dbmind/tools/predictor/python/log.conf rename to src/gausskernel/dbmind/tools/components/predictor/python/log.conf index 2751cdfff..a802a031b 100644 --- a/src/gausskernel/dbmind/tools/predictor/python/log.conf +++ b/src/gausskernel/dbmind/tools/components/predictor/python/log.conf @@ -1,39 +1,39 @@ -[loggers] -keys=root, parse, model - -[logger_root] -level=DEBUG -handlers=streamHandler,fileHandler -qualname=root - -[logger_parse] -level=DEBUG -handlers=fileHandler -qualname=parse - -[logger_model] -level=DEBUG -handlers=streamHandler,fileHandler -qualname=model - -[handlers] -keys=streamHandler,fileHandler - -[handler_streamHandler] -class=StreamHandler -level=INFO -formatter=simpleFormatter -args=(sys.stdout,) - -[handler_fileHandler] -class=handlers.RotatingFileHandler -level=DEBUG -formatter=simpleFormatter -args=('./e_log/model_logs', 'a', 10*1024*1024, 30) - -[formatters] -keys=simpleFormatter - -[formatter_simpleFormatter] -format=%(levelname)s - %(name)s - %(asctime)s - %(module)s.%(funcName)s - %(message)s +[loggers] +keys=root, parse, model + +[logger_root] +level=DEBUG +handlers=streamHandler,fileHandler +qualname=root + +[logger_parse] +level=DEBUG +handlers=fileHandler +qualname=parse + +[logger_model] +level=DEBUG +handlers=streamHandler,fileHandler +qualname=model + +[handlers] +keys=streamHandler,fileHandler + +[handler_streamHandler] +class=StreamHandler +level=INFO +formatter=simpleFormatter +args=(sys.stdout,) + +[handler_fileHandler] +class=handlers.RotatingFileHandler +level=DEBUG +formatter=simpleFormatter +args=('./e_log/model_logs', 'a', 10*1024*1024, 30) + +[formatters] +keys=simpleFormatter + +[formatter_simpleFormatter] +format=%(levelname)s - %(name)s - %(asctime)s - %(module)s.%(funcName)s - %(message)s datefmt= \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/predictor/python/log/.gitkeep b/src/gausskernel/dbmind/tools/components/predictor/python/log/.gitkeep similarity index 98% rename from src/gausskernel/dbmind/tools/predictor/python/log/.gitkeep rename to src/gausskernel/dbmind/tools/components/predictor/python/log/.gitkeep index 58c6a0063..222de580b 100644 --- a/src/gausskernel/dbmind/tools/predictor/python/log/.gitkeep +++ b/src/gausskernel/dbmind/tools/components/predictor/python/log/.gitkeep @@ -1 +1 @@ -#the file to track this essential empty dictionary. +#the file to track this essential empty dictionary. diff --git a/src/gausskernel/dbmind/tools/predictor/python/model.py b/src/gausskernel/dbmind/tools/components/predictor/python/model.py similarity index 97% rename from src/gausskernel/dbmind/tools/predictor/python/model.py rename to src/gausskernel/dbmind/tools/components/predictor/python/model.py index 53e72fcd9..a3f6a741b 100644 --- a/src/gausskernel/dbmind/tools/predictor/python/model.py +++ b/src/gausskernel/dbmind/tools/components/predictor/python/model.py @@ -1,691 +1,691 @@ -""" - openGauss is licensed under Mulan PSL v2. - You can use this software according to the terms and conditions of the Mulan PSL v2. - You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - - THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - See the Mulan PSL v2 for more details. - - Copyright (c) 2020 Huawei Technologies Co.,Ltd. - Description: The general utilities and APIs of machine learning models. -""" - -import os -import pickle - -import ast -from keras.backend.tensorflow_backend import set_session -import tensorflow as tf -import keras -import time -from keras import backend as K -from keras.models import load_model, Sequential -from sklearn.model_selection import train_test_split -from keras.layers import LSTM, Dense, CuDNNLSTM -import numpy as np -import pandas as pd -from sklearn.decomposition import PCA -import shutil -from keras.preprocessing import sequence -from keras.callbacks import Callback -import logging.config - -import settings - -class LossHistory(Callback): - """ - This function recods the training process to the target log file. - """ - def __init__(self, log_path, model_name, max_epoch): - self.log_path = log_path - self.model_name = model_name - self.max_epoch = max_epoch - def on_train_begin(self, logs={}): - self.losses = [] - self.val_acc = [] - def on_epoch_end(self, epoch, logs={}): - now = time.time() - local_time = time.localtime(now) - self.losses.append(logs.get('loss')) - if epoch % 100 == 0: - json_log = open(self.log_path, mode='at', buffering=1) - json_log.write(time.strftime('%Y-%m-%d %H:%M:%S', local_time) + - ' [TRAINING] [MODEL]: %s [Epoch]: %d/%d [Loss]: %.2f' % - (self.model_name, epoch, self.max_epoch, logs['loss']) + '\n') - json_log.close() - -class FeatureParser(): - """ - This is the feature_parser class for AI Engine, includes the methods to parse encoded file. - """ - - def __init__(self, model_info, filename): - """ - The file should be in the format of , , , , , - , for , it should be an list of encoded feature with fixed length, Which - must be ensured by the backend side. - 'dim_red': the 'n_components' of PCA. - 'filename': the path of target file to parse. - 'model_name': the model that the parser adapted to - """ - self.dim_red = round(float(model_info.dim_red), 2) - self.filename = filename - self.model_name = model_info.model_name - logging.config.fileConfig('log.conf') - self.parse_logger = logging.getLogger('parse') - - def parse(self, is_train=True): - try: - df_tmp = pd.read_csv(self.filename, header=None, - names=["query_id", "plan_node_id", "parent_node_id", "enc", - "startup_time", "total_time", "actual_rows", "peak_mem"], index_col=False) - - df = df_tmp.sort_values(by=['query_id', 'plan_node_id']) - df.reset_index(drop=True, inplace=True) - enc_arr = np.array([list(map(float, df['enc'].values[i].split())) for i in range(len(df))]) - df['enc'] = list(enc_arr) - - except FileNotFoundError: - self.parse_logger.error('The encoding file is not found.') - raise - except KeyError: - self.parse_logger.error('Missing compulsory encoding information.') - raise - except: - raise - if self.dim_red > 0: - path_pca_model = os.path.realpath( - os.path.join(settings.PATH_MODELS, self.model_name, self.model_name + '.pkl')) - if is_train: - try: - reload_pca = open(path_pca_model, 'rb') - dim_reducer = pickle.load(reload_pca) - reload_pca.close() - reduced = dim_reducer.transform(enc_arr) - df['enc'] = list(reduced) - except: - dim_reducer = PCA(self.dim_red, svd_solver='full') - dim_reducer.fit(enc_arr) - reduced = dim_reducer.transform(enc_arr) - df['enc'] = list(reduced) - self.parse_logger.debug('[reduce ratio]:{}'.format(self.dim_red)) - self.parse_logger.debug('[PCA] n_dim:{}'.format(dim_reducer.n_components_)) - self.parse_logger.debug('[PCA] explained:{}'.format(np.sum(dim_reducer.explained_variance_ratio_))) - if not os.path.exists(pca_to_save): - os.mknod(pca_to_save, 0o600) - pca_to_save = open(path_pca_model, 'wb') - pickle.dump(dim_reducer, pca_to_save) - pca_to_save.close() - else: - pred_reload_pca = open(path_pca_model, 'rb') - dim_reducer = pickle.load(pred_reload_pca) - pred_reload_pca.close() - reduced = dim_reducer.transform(enc_arr) - df['enc'] = list(reduced) - df.sort_values(inplace=True, by=["query_id", "plan_node_id"]) - feature_length = len(df.iloc[0]['enc']) - arr_enc = [] - arr_child = [] - arr_startup = [] - arr_total = [] - arr_rows = [] - arr_mem = [] - indx = np.loadtxt(self.filename, delimiter=",", usecols=(0, 1, 2), dtype=np.int) - children = [[] for _ in range(len(df))] - base = 0 - prev = 0 - for index, row in df.iterrows(): - if prev != row.query_id: - base = index - prev = row.query_id - if row.parent_node_id != 0: - (children[base + row.parent_node_id - 1]).append(row.plan_node_id) - df["children"] = children - for i in indx: - qid = i[0] - nid = i[1] - enc = [] - child = [] - serial = df[(df.query_id == qid) & (df.plan_node_id == nid)] - arr_startup.append(serial.startup_time.values[0]) - arr_total.append(serial.total_time.values[0]) - arr_rows.append(serial.actual_rows.values[0]) - arr_mem.append(serial.peak_mem.values[0]) - self.gen_data(df, qid, nid, enc, child, nid) - arr_enc.append(enc) - arr_child.append(child) - return feature_length, arr_enc, arr_child, arr_startup, arr_total, arr_rows, arr_mem - - def gen_data(self, df, qid, nid, enc, child, base_nid): - serial = df[(df.query_id == qid) & (df.plan_node_id == nid)] - try: - enc.append(serial.enc.values[0]) - except: - self.parse_logger.error('Failed to parse encoding information.') - raise - child_list_tmp = serial.children.tolist()[0] - child_list = [x - base_nid for x in child_list_tmp] - child.append(child_list) - for child_id in child_list_tmp: - self.gen_data(df, qid, child_id, enc, child, base_nid) - - - -class ModelInfo: - """ This is model_info class that keeps the parameters about the model configuration - - 'max_epoch': [OPTIONAL] default 500 - 'learning_rate': [OPTIONAL] default 1 - 'hidden_units': [OPTIONAL] default 50 - 'batch_size': [OPTIONAL] default 5000 - 'model_name': [COMPULSORY] model name to be saved, can be a already trained model - 'dim_red': [OPTIONAL] part of variance explained by PCA, default 0 means no PCA - 'model_targets': [COMPULSORY] target labels to predict - """ - - def __init__(self, model_name): - self.model_name = model_name - self.max_epoch = 500 - self.learning_rate = 1 - self.hidden_units = 50 - self.batch_size = 500 - self.dim_red = -1 - self.model_targets = '' - self.model_dir = os.path.realpath(os.path.join(settings.PATH_MODELS_INFO, self.model_name)) - self.conf_path = os.path.realpath(os.path.join(self.model_dir, self.model_name + '.conf')) - self.model_path = os.path.realpath(os.path.join(self.model_dir, self.model_name + '.h5')) - self.feature_length = None - self.label_length = None - self.max_startup = None - self.max_total = None - self.max_row = None - self.max_mem = None - self.last_epoch = None - logging.config.fileConfig('log.conf') - self.model_logger = logging.getLogger('model') - - def get_info(self, arg_json): - """ - get the model information from curl request and update the config parameters - :param arg_json: the json format of received curl request - :return: 0: Success - F: TypeError - I: Invalid parameter type - M: Missing compulsory argument - """ - if 'labels' in arg_json.keys(): - self.model_targets = str(arg_json['labels']) - else: - return 'M' - for key in arg_json.keys(): - if key == 'max_epoch': - try: - max_epoch = int(arg_json[key]) - if max_epoch <= 0: - return 'F' - self.max_epoch = max_epoch - except TypeError: - return 'F' - elif key == 'model_name': - self.model_name = str(arg_json['model_name']) - elif key == 'learning_rate': - try: - learning_rate = round(float(arg_json[key]), 2) - if learning_rate <= 0: - return 'F' - self.learning_rate = learning_rate - except TypeError: - return 'F' - elif key == 'hidden_units': - try: - hidden_units = int(arg_json[key]) - if hidden_units <= 0: - return 'F' - self.hidden_units = hidden_units - except TypeError: - return 'F' - elif key == 'batch_size': - try: - batch_size = int(arg_json[key]) - if batch_size <= 0: - return 'F' - self.batch_size = batch_size - except TypeError: - return 'F' - elif key == 'labels': - tmp_targets = arg_json[key] - if len(tmp_targets) != len(set(tmp_targets)): - return 'F' - checklist = ['S', 'T', 'R', 'M'] - model_targets = '' - for i in checklist: - if i in tmp_targets: - model_targets += i - self.model_targets = model_targets - self.label_length = len(model_targets) - elif key == 'dim_red': - try: - dim_red = round(float(arg_json[key]), 2) - if dim_red <= 0 and dim_red != -1: - return 'F' - self.dim_red = dim_red - except TypeError: - return 'F' - elif key == 'template_name': - if arg_json[key] != 'rlstm': - return 'F' - else: - return 'I' - if os.path.exists(self.conf_path) and os.path.getsize(self.conf_path): - self.update_info() - else: - self.dump_dict() - return '0' - - def update_info(self): - params_ = self.load_dict(self.conf_path) - self.feature_length = params_['feature_length'] - self.max_startup = params_['max_startup'] - self.max_total = params_['max_total'] - self.max_row = params_['max_total'] - self.max_mem = params_['max_mem'] - self.last_epoch = params_['last_epoch'] - if self.check_params(): - return self.dump_dict() - else: - return False - - def to_dict(self): - params_dict = {} - try: - params_dict['model_name'] = self.model_name - params_dict['max_epoch'] = self.max_epoch - params_dict['learning_rate'] = self.learning_rate - params_dict['hidden_units'] = self.hidden_units - params_dict['batch_size'] = self.batch_size - params_dict['dim_red'] = self.dim_red - params_dict['model_targets'] = self.model_targets - params_dict['label_length'] = self.label_length - params_dict['max_startup'] = self.max_startup - params_dict['max_total'] = self.max_total - params_dict['max_row'] = self.max_row - params_dict['max_mem'] = self.max_mem - params_dict['last_epoch'] = self.last_epoch - params_dict['model_path'] = self.model_path - params_dict['conf_path'] = self.conf_path - params_dict['model_dir'] = self.model_dir - params_dict['feature_length'] = self.feature_length - self.model_logger.info(params_dict) - except ValueError: - self.model_logger.error('Model Info ERROR: missing compulsory parameter.') - raise - except: - raise - return params_dict - - def dump_dict(self): - """ - save model information - :return: - """ - params_dict = self.to_dict() - if self.configure_check(params_dict): - if not os.path.exists(self.conf_path): - os.mknod(self.conf_path, 0o600) - with open(self.conf_path, 'w') as cf: - cf.write(str(params_dict)) - return True - else: - return False - - def load_dict(self, conf_path): - """ - load the model information - :param conf_path: path to model configurations - :return: model_infor in dictionary format - """ - with open(conf_path, 'r') as conf: - params_dict = ast.literal_eval(conf.read()) - return params_dict - - def configure_check(self, params_dict): - ''' - To determine whether the model needs to be re-initialized - :param model_name: name of the model - :return: - ''' - if not os.path.isdir(self.model_dir): - os.makedirs(self.model_dir, mode=0o700) - return True - elif not os.path.exists(self.conf_path): - if os.path.getsize(self.conf_path): - return True - else: - saved_conf = self.load_dict(self.conf_path) - checklist = ['dim_red', 'model_targets'] - for item in checklist: - if str(params_dict[item]) != str(saved_conf[item]): - return False - return True - - def load_info(self): - params_dict = self.load_dict(self.conf_path) - try: - self.model_name = params_dict['model_name'] - self.max_epoch = params_dict['max_epoch'] - self.learning_rate = params_dict['learning_rate'] - self.hidden_units = params_dict['hidden_units'] - self.batch_size = params_dict['batch_size'] - self.dim_red = params_dict['dim_red'] - self.feature_length = params_dict['feature_length'] - self.label_length = params_dict['label_length'] - self.max_startup = params_dict['max_startup'] - self.max_total = params_dict['max_total'] - self.max_row = params_dict['max_row'] - self.max_mem = params_dict['max_mem'] - self.model_targets = params_dict['model_targets'] - self.last_epoch = params_dict['last_epoch'] - self.model_path = params_dict['model_path'] - self.conf_path = params_dict['conf_path'] - self.model_dir = params_dict['model_dir'] - except KeyError: - self.model_logger.error('Some of the model parameters are missing.') - raise - except: - raise - - def check_params(self): - params_dict = self.to_dict() - for val in params_dict.values(): - if 'None' == str(val): - self.model_logger.warning( - 'The params of model is not complete, and the params are as following: {}'.format(params_dict)) - return False - return True - - - - def make_epsilon(self): - epsilon_startup = 1 / float(self.max_startup) - epsilon_total = 1 / float(self.max_total) - epsilon_row = 1 / float(self.max_row) - epsilon_mem = 1 / float(self.max_mem) - epsilon_arr = [] - for label in self.model_targets: - if label == 'S': - epsilon_arr.append(epsilon_startup) - elif label == 'T': - epsilon_arr.append(epsilon_total) - elif label == 'R': - epsilon_arr.append(epsilon_row) - elif label == 'M': - epsilon_arr.append(epsilon_mem) - return epsilon_arr - - -class RnnModel(): - """ - This is the rnn_model class that keeps APIs for ml functions. - """ - - def __init__(self, model_info): - config = tf.compat.v1.ConfigProto() - config.gpu_options.allow_growth = True - self.graph = tf.Graph() - self.session = tf.compat.v1.Session(config=config, graph=self.graph) - self.model = None - self.model_info = model_info - logging.config.fileConfig('log.conf') - self.model_logger = logging.getLogger('model') - - def _build_model(self, epsilon): - model = Sequential() - try: - model.add(CuDNNLSTM(units=int(self.model_info.hidden_units), return_sequences=True, - input_shape=(None, int(self.model_info.feature_length)))) - model.add(CuDNNLSTM(units=int(self.model_info.hidden_units), return_sequences=False)) - except: - model.add(LSTM(units=int(self.model_info.hidden_units), return_sequences=True, - input_shape=(None, int(self.model_info.feature_length)))) - model.add(LSTM(units=int(self.model_info.hidden_units), return_sequences=False)) - model.add(Dense(units=int(self.model_info.hidden_units), activation='relu')) - model.add(Dense(units=int(self.model_info.hidden_units), activation='relu')) - model.add(Dense(units=int(self.model_info.label_length), activation='sigmoid')) - optimizer = keras.optimizers.Adadelta(lr=float(self.model_info.learning_rate), rho=0.95) - ratio_error = ratio_error_loss_wrapper(epsilon) - ratio_acc_2 = ratio_error_acc_wrapper(epsilon, 2) - model.compile(loss=ratio_error, metrics=[ratio_acc_2], optimizer=optimizer) - return model - - def parse(self, filename): - ''' - parse the file and get the encoded features - :param filename: the path of file to parse - :return: feature: the features for training - label: the labels for training - need_init: whether the model need init - ''' - parser = FeatureParser(self.model_info, filename) - feature_length, arr_enc, arr_child, arr_startup, arr_total, arr_row, arr_mem = \ - parser.parse() - need_init = self.check_need_init(feature_length) - if need_init: - max_startup, max_total, max_row, max_mem = np.max(arr_startup), np.max(arr_total), np.max(arr_row), np.max( - arr_mem) - self.model_info.max_startup = max(max_startup, 1) - self.model_info.max_total = max(max_total, 1) - self.model_info.max_row = max(max_row, 1) - self.model_info.max_mem = max(max_mem, 1) - self.model_info.feature_length = feature_length - shutil.rmtree(self.model_info.model_path, ignore_errors=True) - shutil.rmtree(os.path.realpath( - os.path.join(settings.PATH_LOG, self.model_info.model_name)), ignore_errors=True) - arr_startup = np.array(arr_startup, dtype=float).reshape((-1, 1)) - arr_total = np.array(arr_total, dtype=float).reshape((-1, 1)) - arr_row = np.array(arr_row, dtype=float).reshape((-1, 1)) - arr_mem = np.array(arr_mem, dtype=float).reshape((-1, 1)) - arr_startup /= float(self.model_info.max_startup) - arr_total /= float(self.model_info.max_total) - arr_row /= float(self.model_info.max_row) - arr_mem /= float(self.model_info.max_mem) - label = None - for target in self.model_info.model_targets: - if label is None: - if target == 'S': - label = arr_startup - elif target == 'T': - label = arr_total - elif target == 'R': - label = arr_row - elif target == 'M': - label = arr_mem - else: - if target == 'S': - label = np.hstack((label, arr_startup)) - elif target == 'T': - label = np.hstack((label, arr_total)) - elif target == 'R': - label = np.hstack((label, arr_row)) - elif target == 'M': - label = np.hstack((label, arr_mem)) - max_len = 0 - for sample in arr_enc: - max_len = max(len(sample), max_len) - feature = sequence.pad_sequences(arr_enc, maxlen=max_len) - self.model_logger.debug('Sequence padding to max_len: %d', max_len) - return feature, label, need_init - - def check_need_init(self, feature_length): - ''' - To determine whether the model needs to be re-initialized - :param model_name: name of the model - :return: - ''' - if not (os.path.exists(self.model_info.model_path) and os.path.getsize(self.model_info.model_path)): - return True - conf_dict = self.model_info.load_dict(self.model_info.conf_path) - # check model's params - if not (feature_length == self.model_info.feature_length \ - and self.model_info.label_length == conf_dict['label_length'] \ - and self.model_info.hidden_units == conf_dict['hidden_units']): - return True - return False - - def fit(self, filename): - keras.backend.clear_session() - set_session(self.session) - with self.graph.as_default(): - feature, label, need_init = self.parse(filename) - os.environ['CUDA_VISIBLE_DEVICES'] = '0' - epsilon = self.model_info.make_epsilon() - if need_init: - epoch_start = 0 - self.model = self._build_model(epsilon) - else: - epoch_start = int(self.model_info.last_epoch) - ratio_error = ratio_error_loss_wrapper(epsilon) - ratio_acc_2 = ratio_error_acc_wrapper(epsilon, 2) - self.model = load_model(self.model_info.model_path, - custom_objects={'ratio_error': ratio_error, 'ratio_acc': ratio_acc_2}) - self.model_info.last_epoch = int(self.model_info.max_epoch) + epoch_start - self.model_info.dump_dict() - log_path = os.path.realpath(os.path.join(settings.PATH_LOG, self.model_info.model_name + '_log.json')) - if not os.path.exists(log_path): - os.mknod(log_path, mode=0o600) - json_logging_callback = LossHistory(log_path, self.model_info.model_name, self.model_info.last_epoch) - X_train, X_val, y_train, y_val = \ - train_test_split(feature, label, test_size=0.1) - self.model.fit(X_train, y_train, epochs=self.model_info.last_epoch, - batch_size=int(self.model_info.batch_size), validation_data=(X_val, y_val), - verbose=0, initial_epoch=epoch_start, callbacks=[json_logging_callback]) - self.model.save(self.model_info.model_path) - val_pred = self.model.predict(X_val) - val_re = get_ratio_errors_general(val_pred, y_val, epsilon) - self.model_logger.debug(val_re) - del self.model - return val_re - - def predict(self, filename): - with self.graph.as_default(): - try: - parser = FeatureParser(self.model_info, filename) - feature_length, arr_enc, _, _, _, _, _ = parser.parse(is_train=False) - debug_info = '\n' - for tree in arr_enc: - for node in tree: - for code in node: - debug_info += str(code) - debug_info += ' ' - debug_info += '\n' - debug_info += '\n' - self.model_logger.debug(debug_info) - max_len = 0 - for sample in arr_enc: - max_len = max(len(sample), max_len) - feature = sequence.pad_sequences(arr_enc, maxlen=max_len) - pred = self.model.predict(x=feature) - self.model_info.dump_dict() - return pred - except FileNotFoundError: - self.model_logger.error('The file to predict is not found.') - raise - except: - raise - - def load(self): - """ - Routine to load pre-trained model for prediction purpose - :param model_name: name of the checkpoint - :return: tf.Session, out_nodes - """ - keras.backend.clear_session() - set_session(self.session) - self.model_info.load_info() - with self.graph.as_default(): - epsilon = self.model_info.make_epsilon() - ratio_error = ratio_error_loss_wrapper(epsilon) - ratio_acc_2 = ratio_error_acc_wrapper(epsilon, 2) - try: - self.model = load_model(self.model_info.model_path, - custom_objects={'ratio_error': ratio_error, 'ratio_acc': ratio_acc_2}) - except FileNotFoundError: - self.model_logger.error('Failed to load model information file.') - raise - except: - raise - - -def get_ratio_errors_general(pred_arr, true_arr, epsilon_arr): - errors = [] - for i in range(len(epsilon_arr)): - pred, true, epsilon = pred_arr[:, i], true_arr[:, i], epsilon_arr[i] - ratio_1 = (pred + epsilon) / (true + epsilon) - ratio_2 = (true + epsilon) / (pred + epsilon) - ratio = np.maximum(ratio_1, ratio_2).mean() - errors.append(ratio) - return errors - - -def ratio_error_loss(y_true, y_pred, epsilon): - """ - Calculate the ratio error for the loss function. - :param y_true: - :param y_pred: - :param epsilon: - :return: - """ - ratio_1 = keras.layers.Lambda(lambda x: (x[0] + x[2]) / (x[1] + x[2]))([y_true, y_pred, epsilon]) - ratio_2 = keras.layers.Lambda(lambda x: (x[0] + x[2]) / (x[1] + x[2]))([y_pred, y_true, epsilon]) - ratio = K.maximum(ratio_1, ratio_2) - loss = K.mean(ratio) - return loss - - -def ratio_error_loss_wrapper(epsilon): - """ - Wrapper function which calculates ratio error for the loss function. - :param epsilon: - :return: - """ - epsilon = K.constant(epsilon) - - def ratio_error(y_true, y_pred): - return ratio_error_loss(y_true, y_pred, epsilon) - - return ratio_error - - -def ratio_error_acc(y_true, y_pred, epsilon, threshold): - """ - Calculate the ratio error accuracy with the threshold. - :param y_true: - :param y_pred: - :param epsilon: - :param threshold: - :return: - """ - ratio_1 = keras.layers.Lambda(lambda x: (x[0] + x[2]) / (x[1] + x[2]))([y_true, y_pred, epsilon]) - ratio_2 = keras.layers.Lambda(lambda x: (x[0] + x[2]) / (x[1] + x[2]))([y_pred, y_true, epsilon]) - ratio = K.maximum(ratio_1, ratio_2) - mask = K.cast(K.less(ratio, threshold), dtype="float32") - return K.mean(mask) - - -def ratio_error_acc_wrapper(epsilon, threshold): - """ - Wrapper function which calculates ratio error for the ratio error accuracy with the threshold. - :param epsilon: - :param threshold: - :return: - """ - epsilon = K.constant(epsilon) - threshold = K.constant(threshold) - - def ratio_acc(y_true, y_pred): - return ratio_error_acc(y_true, y_pred, epsilon, threshold) - - return ratio_acc +""" + openGauss is licensed under Mulan PSL v2. + You can use this software according to the terms and conditions of the Mulan PSL v2. + You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + + THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + See the Mulan PSL v2 for more details. + + Copyright (c) 2020 Huawei Technologies Co.,Ltd. + Description: The general utilities and APIs of machine learning models. +""" + +import os +import pickle + +import ast +from keras.backend.tensorflow_backend import set_session +import tensorflow as tf +import keras +import time +from keras import backend as K +from keras.models import load_model, Sequential +from sklearn.model_selection import train_test_split +from keras.layers import LSTM, Dense, CuDNNLSTM +import numpy as np +import pandas as pd +from sklearn.decomposition import PCA +import shutil +from keras.preprocessing import sequence +from keras.callbacks import Callback +import logging.config + +import settings + +class LossHistory(Callback): + """ + This function recods the training process to the target log file. + """ + def __init__(self, log_path, model_name, max_epoch): + self.log_path = log_path + self.model_name = model_name + self.max_epoch = max_epoch + def on_train_begin(self, logs={}): + self.losses = [] + self.val_acc = [] + def on_epoch_end(self, epoch, logs={}): + now = time.time() + local_time = time.localtime(now) + self.losses.append(logs.get('loss')) + if epoch % 100 == 0: + json_log = open(self.log_path, mode='at', buffering=1) + json_log.write(time.strftime('%Y-%m-%d %H:%M:%S', local_time) + + ' [TRAINING] [MODEL]: %s [Epoch]: %d/%d [Loss]: %.2f' % + (self.model_name, epoch, self.max_epoch, logs['loss']) + '\n') + json_log.close() + +class FeatureParser(): + """ + This is the feature_parser class for AI Engine, includes the methods to parse encoded file. + """ + + def __init__(self, model_info, filename): + """ + The file should be in the format of , , , , , + , for , it should be an list of encoded feature with fixed length, Which + must be ensured by the backend side. + 'dim_red': the 'n_components' of PCA. + 'filename': the path of target file to parse. + 'model_name': the model that the parser adapted to + """ + self.dim_red = round(float(model_info.dim_red), 2) + self.filename = filename + self.model_name = model_info.model_name + logging.config.fileConfig('log.conf') + self.parse_logger = logging.getLogger('parse') + + def parse(self, is_train=True): + try: + df_tmp = pd.read_csv(self.filename, header=None, + names=["query_id", "plan_node_id", "parent_node_id", "enc", + "startup_time", "total_time", "actual_rows", "peak_mem"], index_col=False) + + df = df_tmp.sort_values(by=['query_id', 'plan_node_id']) + df.reset_index(drop=True, inplace=True) + enc_arr = np.array([list(map(float, df['enc'].values[i].split())) for i in range(len(df))]) + df['enc'] = list(enc_arr) + + except FileNotFoundError: + self.parse_logger.error('The encoding file is not found.') + raise + except KeyError: + self.parse_logger.error('Missing compulsory encoding information.') + raise + except: + raise + if self.dim_red > 0: + path_pca_model = os.path.realpath( + os.path.join(settings.PATH_MODELS, self.model_name, self.model_name + '.pkl')) + if is_train: + try: + reload_pca = open(path_pca_model, 'rb') + dim_reducer = pickle.load(reload_pca) + reload_pca.close() + reduced = dim_reducer.transform(enc_arr) + df['enc'] = list(reduced) + except: + dim_reducer = PCA(self.dim_red, svd_solver='full') + dim_reducer.fit(enc_arr) + reduced = dim_reducer.transform(enc_arr) + df['enc'] = list(reduced) + self.parse_logger.debug('[reduce ratio]:{}'.format(self.dim_red)) + self.parse_logger.debug('[PCA] n_dim:{}'.format(dim_reducer.n_components_)) + self.parse_logger.debug('[PCA] explained:{}'.format(np.sum(dim_reducer.explained_variance_ratio_))) + if not os.path.exists(path_pca_model): + os.mknod(path_pca_model, 0o600) + pca_to_save = open(path_pca_model, 'wb') + pickle.dump(dim_reducer, pca_to_save) + pca_to_save.close() + else: + pred_reload_pca = open(path_pca_model, 'rb') + dim_reducer = pickle.load(pred_reload_pca) + pred_reload_pca.close() + reduced = dim_reducer.transform(enc_arr) + df['enc'] = list(reduced) + df.sort_values(inplace=True, by=["query_id", "plan_node_id"]) + feature_length = len(df.iloc[0]['enc']) + arr_enc = [] + arr_child = [] + arr_startup = [] + arr_total = [] + arr_rows = [] + arr_mem = [] + indx = np.loadtxt(self.filename, delimiter=",", usecols=(0, 1, 2), dtype=np.int) + children = [[] for _ in range(len(df))] + base = 0 + prev = 0 + for index, row in df.iterrows(): + if prev != row.query_id: + base = index + prev = row.query_id + if row.parent_node_id != 0: + (children[base + row.parent_node_id - 1]).append(row.plan_node_id) + df["children"] = children + for i in indx: + qid = i[0] + nid = i[1] + enc = [] + child = [] + serial = df[(df.query_id == qid) & (df.plan_node_id == nid)] + arr_startup.append(serial.startup_time.values[0]) + arr_total.append(serial.total_time.values[0]) + arr_rows.append(serial.actual_rows.values[0]) + arr_mem.append(serial.peak_mem.values[0]) + self.gen_data(df, qid, nid, enc, child, nid) + arr_enc.append(enc) + arr_child.append(child) + return feature_length, arr_enc, arr_child, arr_startup, arr_total, arr_rows, arr_mem + + def gen_data(self, df, qid, nid, enc, child, base_nid): + serial = df[(df.query_id == qid) & (df.plan_node_id == nid)] + try: + enc.append(serial.enc.values[0]) + except: + self.parse_logger.error('Failed to parse encoding information.') + raise + child_list_tmp = serial.children.tolist()[0] + child_list = [x - base_nid for x in child_list_tmp] + child.append(child_list) + for child_id in child_list_tmp: + self.gen_data(df, qid, child_id, enc, child, base_nid) + + + +class ModelInfo: + """ This is model_info class that keeps the parameters about the model configuration + + 'max_epoch': [OPTIONAL] default 500 + 'learning_rate': [OPTIONAL] default 1 + 'hidden_units': [OPTIONAL] default 50 + 'batch_size': [OPTIONAL] default 5000 + 'model_name': [COMPULSORY] model name to be saved, can be a already trained model + 'dim_red': [OPTIONAL] part of variance explained by PCA, default 0 means no PCA + 'model_targets': [COMPULSORY] target labels to predict + """ + + def __init__(self, model_name): + self.model_name = model_name + self.max_epoch = 500 + self.learning_rate = 1 + self.hidden_units = 50 + self.batch_size = 500 + self.dim_red = -1 + self.model_targets = '' + self.model_dir = os.path.realpath(os.path.join(settings.PATH_MODELS_INFO, self.model_name)) + self.conf_path = os.path.realpath(os.path.join(self.model_dir, self.model_name + '.conf')) + self.model_path = os.path.realpath(os.path.join(self.model_dir, self.model_name + '.h5')) + self.feature_length = None + self.label_length = None + self.max_startup = None + self.max_total = None + self.max_row = None + self.max_mem = None + self.last_epoch = None + logging.config.fileConfig('log.conf') + self.model_logger = logging.getLogger('model') + + def get_info(self, arg_json): + """ + get the model information from curl request and update the config parameters + :param arg_json: the json format of received curl request + :return: 0: Success + F: TypeError + I: Invalid parameter type + M: Missing compulsory argument + """ + if 'labels' in arg_json.keys(): + self.model_targets = str(arg_json['labels']) + else: + return 'M' + for key in arg_json.keys(): + if key == 'max_epoch': + try: + max_epoch = int(arg_json[key]) + if max_epoch <= 0: + return 'F' + self.max_epoch = max_epoch + except TypeError: + return 'F' + elif key == 'model_name': + self.model_name = str(arg_json['model_name']) + elif key == 'learning_rate': + try: + learning_rate = round(float(arg_json[key]), 2) + if learning_rate <= 0: + return 'F' + self.learning_rate = learning_rate + except TypeError: + return 'F' + elif key == 'hidden_units': + try: + hidden_units = int(arg_json[key]) + if hidden_units <= 0: + return 'F' + self.hidden_units = hidden_units + except TypeError: + return 'F' + elif key == 'batch_size': + try: + batch_size = int(arg_json[key]) + if batch_size <= 0: + return 'F' + self.batch_size = batch_size + except TypeError: + return 'F' + elif key == 'labels': + tmp_targets = arg_json[key] + if len(tmp_targets) != len(set(tmp_targets)): + return 'F' + checklist = ['S', 'T', 'R', 'M'] + model_targets = '' + for i in checklist: + if i in tmp_targets: + model_targets += i + self.model_targets = model_targets + self.label_length = len(model_targets) + elif key == 'dim_red': + try: + dim_red = round(float(arg_json[key]), 2) + if dim_red <= 0 and dim_red != -1: + return 'F' + self.dim_red = dim_red + except TypeError: + return 'F' + elif key == 'template_name': + if arg_json[key] != 'rlstm': + return 'F' + else: + return 'I' + if os.path.exists(self.conf_path) and os.path.getsize(self.conf_path): + self.update_info() + else: + self.dump_dict() + return '0' + + def update_info(self): + params_ = self.load_dict(self.conf_path) + self.feature_length = params_['feature_length'] + self.max_startup = params_['max_startup'] + self.max_total = params_['max_total'] + self.max_row = params_['max_total'] + self.max_mem = params_['max_mem'] + self.last_epoch = params_['last_epoch'] + if self.check_params(): + return self.dump_dict() + else: + return False + + def to_dict(self): + params_dict = {} + try: + params_dict['model_name'] = self.model_name + params_dict['max_epoch'] = self.max_epoch + params_dict['learning_rate'] = self.learning_rate + params_dict['hidden_units'] = self.hidden_units + params_dict['batch_size'] = self.batch_size + params_dict['dim_red'] = self.dim_red + params_dict['model_targets'] = self.model_targets + params_dict['label_length'] = self.label_length + params_dict['max_startup'] = self.max_startup + params_dict['max_total'] = self.max_total + params_dict['max_row'] = self.max_row + params_dict['max_mem'] = self.max_mem + params_dict['last_epoch'] = self.last_epoch + params_dict['model_path'] = self.model_path + params_dict['conf_path'] = self.conf_path + params_dict['model_dir'] = self.model_dir + params_dict['feature_length'] = self.feature_length + self.model_logger.info(params_dict) + except ValueError: + self.model_logger.error('Model Info ERROR: missing compulsory parameter.') + raise + except: + raise + return params_dict + + def dump_dict(self): + """ + save model information + :return: + """ + params_dict = self.to_dict() + if self.configure_check(params_dict): + if not os.path.exists(self.conf_path): + os.mknod(self.conf_path, 0o600) + with open(self.conf_path, 'w') as cf: + cf.write(str(params_dict)) + return True + else: + return False + + def load_dict(self, conf_path): + """ + load the model information + :param conf_path: path to model configurations + :return: model_infor in dictionary format + """ + with open(conf_path, 'r') as conf: + params_dict = ast.literal_eval(conf.read()) + return params_dict + + def configure_check(self, params_dict): + ''' + To determine whether the model needs to be re-initialized + :param model_name: name of the model + :return: + ''' + if not os.path.isdir(self.model_dir): + os.makedirs(self.model_dir, mode=0o700) + return True + elif not os.path.exists(self.conf_path): + if os.path.getsize(self.conf_path): + return True + else: + saved_conf = self.load_dict(self.conf_path) + checklist = ['dim_red', 'model_targets'] + for item in checklist: + if str(params_dict[item]) != str(saved_conf[item]): + return False + return True + + def load_info(self): + params_dict = self.load_dict(self.conf_path) + try: + self.model_name = params_dict['model_name'] + self.max_epoch = params_dict['max_epoch'] + self.learning_rate = params_dict['learning_rate'] + self.hidden_units = params_dict['hidden_units'] + self.batch_size = params_dict['batch_size'] + self.dim_red = params_dict['dim_red'] + self.feature_length = params_dict['feature_length'] + self.label_length = params_dict['label_length'] + self.max_startup = params_dict['max_startup'] + self.max_total = params_dict['max_total'] + self.max_row = params_dict['max_row'] + self.max_mem = params_dict['max_mem'] + self.model_targets = params_dict['model_targets'] + self.last_epoch = params_dict['last_epoch'] + self.model_path = params_dict['model_path'] + self.conf_path = params_dict['conf_path'] + self.model_dir = params_dict['model_dir'] + except KeyError: + self.model_logger.error('Some of the model parameters are missing.') + raise + except: + raise + + def check_params(self): + params_dict = self.to_dict() + for val in params_dict.values(): + if 'None' == str(val): + self.model_logger.warning( + 'The params of model is not complete, and the params are as following: {}'.format(params_dict)) + return False + return True + + + + def make_epsilon(self): + epsilon_startup = 1 / float(self.max_startup) + epsilon_total = 1 / float(self.max_total) + epsilon_row = 1 / float(self.max_row) + epsilon_mem = 1 / float(self.max_mem) + epsilon_arr = [] + for label in self.model_targets: + if label == 'S': + epsilon_arr.append(epsilon_startup) + elif label == 'T': + epsilon_arr.append(epsilon_total) + elif label == 'R': + epsilon_arr.append(epsilon_row) + elif label == 'M': + epsilon_arr.append(epsilon_mem) + return epsilon_arr + + +class RnnModel(): + """ + This is the rnn_model class that keeps APIs for ml functions. + """ + + def __init__(self, model_info): + config = tf.compat.v1.ConfigProto() + config.gpu_options.allow_growth = True + self.graph = tf.Graph() + self.session = tf.compat.v1.Session(config=config, graph=self.graph) + self.model = None + self.model_info = model_info + logging.config.fileConfig('log.conf') + self.model_logger = logging.getLogger('model') + + def _build_model(self, epsilon): + model = Sequential() + try: + model.add(CuDNNLSTM(units=int(self.model_info.hidden_units), return_sequences=True, + input_shape=(None, int(self.model_info.feature_length)))) + model.add(CuDNNLSTM(units=int(self.model_info.hidden_units), return_sequences=False)) + except: + model.add(LSTM(units=int(self.model_info.hidden_units), return_sequences=True, + input_shape=(None, int(self.model_info.feature_length)))) + model.add(LSTM(units=int(self.model_info.hidden_units), return_sequences=False)) + model.add(Dense(units=int(self.model_info.hidden_units), activation='relu')) + model.add(Dense(units=int(self.model_info.hidden_units), activation='relu')) + model.add(Dense(units=int(self.model_info.label_length), activation='sigmoid')) + optimizer = keras.optimizers.Adadelta(lr=float(self.model_info.learning_rate), rho=0.95) + ratio_error = ratio_error_loss_wrapper(epsilon) + ratio_acc_2 = ratio_error_acc_wrapper(epsilon, 2) + model.compile(loss=ratio_error, metrics=[ratio_acc_2], optimizer=optimizer) + return model + + def parse(self, filename): + ''' + parse the file and get the encoded features + :param filename: the path of file to parse + :return: feature: the features for training + label: the labels for training + need_init: whether the model need init + ''' + parser = FeatureParser(self.model_info, filename) + feature_length, arr_enc, arr_child, arr_startup, arr_total, arr_row, arr_mem = \ + parser.parse() + need_init = self.check_need_init(feature_length) + if need_init: + max_startup, max_total, max_row, max_mem = np.max(arr_startup), np.max(arr_total), np.max(arr_row), np.max( + arr_mem) + self.model_info.max_startup = max(max_startup, 1) + self.model_info.max_total = max(max_total, 1) + self.model_info.max_row = max(max_row, 1) + self.model_info.max_mem = max(max_mem, 1) + self.model_info.feature_length = feature_length + shutil.rmtree(self.model_info.model_path, ignore_errors=True) + shutil.rmtree(os.path.realpath( + os.path.join(settings.PATH_LOG, self.model_info.model_name)), ignore_errors=True) + arr_startup = np.array(arr_startup, dtype=float).reshape((-1, 1)) + arr_total = np.array(arr_total, dtype=float).reshape((-1, 1)) + arr_row = np.array(arr_row, dtype=float).reshape((-1, 1)) + arr_mem = np.array(arr_mem, dtype=float).reshape((-1, 1)) + arr_startup /= float(self.model_info.max_startup) + arr_total /= float(self.model_info.max_total) + arr_row /= float(self.model_info.max_row) + arr_mem /= float(self.model_info.max_mem) + label = None + for target in self.model_info.model_targets: + if label is None: + if target == 'S': + label = arr_startup + elif target == 'T': + label = arr_total + elif target == 'R': + label = arr_row + elif target == 'M': + label = arr_mem + else: + if target == 'S': + label = np.hstack((label, arr_startup)) + elif target == 'T': + label = np.hstack((label, arr_total)) + elif target == 'R': + label = np.hstack((label, arr_row)) + elif target == 'M': + label = np.hstack((label, arr_mem)) + max_len = 0 + for sample in arr_enc: + max_len = max(len(sample), max_len) + feature = sequence.pad_sequences(arr_enc, maxlen=max_len) + self.model_logger.debug('Sequence padding to max_len: %d', max_len) + return feature, label, need_init + + def check_need_init(self, feature_length): + ''' + To determine whether the model needs to be re-initialized + :param model_name: name of the model + :return: + ''' + if not (os.path.exists(self.model_info.model_path) and os.path.getsize(self.model_info.model_path)): + return True + conf_dict = self.model_info.load_dict(self.model_info.conf_path) + # check model's params + if not (feature_length == self.model_info.feature_length \ + and self.model_info.label_length == conf_dict['label_length'] \ + and self.model_info.hidden_units == conf_dict['hidden_units']): + return True + return False + + def fit(self, filename): + keras.backend.clear_session() + set_session(self.session) + with self.graph.as_default(): + feature, label, need_init = self.parse(filename) + os.environ['CUDA_VISIBLE_DEVICES'] = '0' + epsilon = self.model_info.make_epsilon() + if need_init: + epoch_start = 0 + self.model = self._build_model(epsilon) + else: + epoch_start = int(self.model_info.last_epoch) + ratio_error = ratio_error_loss_wrapper(epsilon) + ratio_acc_2 = ratio_error_acc_wrapper(epsilon, 2) + self.model = load_model(self.model_info.model_path, + custom_objects={'ratio_error': ratio_error, 'ratio_acc': ratio_acc_2}) + self.model_info.last_epoch = int(self.model_info.max_epoch) + epoch_start + self.model_info.dump_dict() + log_path = os.path.realpath(os.path.join(settings.PATH_LOG, self.model_info.model_name + '_log.json')) + if not os.path.exists(log_path): + os.mknod(log_path, mode=0o600) + json_logging_callback = LossHistory(log_path, self.model_info.model_name, self.model_info.last_epoch) + X_train, X_val, y_train, y_val = \ + train_test_split(feature, label, test_size=0.1) + self.model.fit(X_train, y_train, epochs=self.model_info.last_epoch, + batch_size=int(self.model_info.batch_size), validation_data=(X_val, y_val), + verbose=0, initial_epoch=epoch_start, callbacks=[json_logging_callback]) + self.model.save(self.model_info.model_path) + val_pred = self.model.predict(X_val) + val_re = get_ratio_errors_general(val_pred, y_val, epsilon) + self.model_logger.debug(val_re) + del self.model + return val_re + + def predict(self, filename): + with self.graph.as_default(): + try: + parser = FeatureParser(self.model_info, filename) + feature_length, arr_enc, _, _, _, _, _ = parser.parse(is_train=False) + debug_info = '\n' + for tree in arr_enc: + for node in tree: + for code in node: + debug_info += str(code) + debug_info += ' ' + debug_info += '\n' + debug_info += '\n' + self.model_logger.debug(debug_info) + max_len = 0 + for sample in arr_enc: + max_len = max(len(sample), max_len) + feature = sequence.pad_sequences(arr_enc, maxlen=max_len) + pred = self.model.predict(x=feature) + self.model_info.dump_dict() + return pred + except FileNotFoundError: + self.model_logger.error('The file to predict is not found.') + raise + except: + raise + + def load(self): + """ + Routine to load pre-trained model for prediction purpose + :param model_name: name of the checkpoint + :return: tf.Session, out_nodes + """ + keras.backend.clear_session() + set_session(self.session) + self.model_info.load_info() + with self.graph.as_default(): + epsilon = self.model_info.make_epsilon() + ratio_error = ratio_error_loss_wrapper(epsilon) + ratio_acc_2 = ratio_error_acc_wrapper(epsilon, 2) + try: + self.model = load_model(self.model_info.model_path, + custom_objects={'ratio_error': ratio_error, 'ratio_acc': ratio_acc_2}) + except FileNotFoundError: + self.model_logger.error('Failed to load model information file.') + raise + except: + raise + + +def get_ratio_errors_general(pred_arr, true_arr, epsilon_arr): + errors = [] + for i in range(len(epsilon_arr)): + pred, true, epsilon = pred_arr[:, i], true_arr[:, i], epsilon_arr[i] + ratio_1 = (pred + epsilon) / (true + epsilon) + ratio_2 = (true + epsilon) / (pred + epsilon) + ratio = np.maximum(ratio_1, ratio_2).mean() + errors.append(ratio) + return errors + + +def ratio_error_loss(y_true, y_pred, epsilon): + """ + Calculate the ratio error for the loss function. + :param y_true: + :param y_pred: + :param epsilon: + :return: + """ + ratio_1 = keras.layers.Lambda(lambda x: (x[0] + x[2]) / (x[1] + x[2]))([y_true, y_pred, epsilon]) + ratio_2 = keras.layers.Lambda(lambda x: (x[0] + x[2]) / (x[1] + x[2]))([y_pred, y_true, epsilon]) + ratio = K.maximum(ratio_1, ratio_2) + loss = K.mean(ratio) + return loss + + +def ratio_error_loss_wrapper(epsilon): + """ + Wrapper function which calculates ratio error for the loss function. + :param epsilon: + :return: + """ + epsilon = K.constant(epsilon) + + def ratio_error(y_true, y_pred): + return ratio_error_loss(y_true, y_pred, epsilon) + + return ratio_error + + +def ratio_error_acc(y_true, y_pred, epsilon, threshold): + """ + Calculate the ratio error accuracy with the threshold. + :param y_true: + :param y_pred: + :param epsilon: + :param threshold: + :return: + """ + ratio_1 = keras.layers.Lambda(lambda x: (x[0] + x[2]) / (x[1] + x[2]))([y_true, y_pred, epsilon]) + ratio_2 = keras.layers.Lambda(lambda x: (x[0] + x[2]) / (x[1] + x[2]))([y_pred, y_true, epsilon]) + ratio = K.maximum(ratio_1, ratio_2) + mask = K.cast(K.less(ratio, threshold), dtype="float32") + return K.mean(mask) + + +def ratio_error_acc_wrapper(epsilon, threshold): + """ + Wrapper function which calculates ratio error for the ratio error accuracy with the threshold. + :param epsilon: + :param threshold: + :return: + """ + epsilon = K.constant(epsilon) + threshold = K.constant(threshold) + + def ratio_acc(y_true, y_pred): + return ratio_error_acc(y_true, y_pred, epsilon, threshold) + + return ratio_acc diff --git a/src/gausskernel/dbmind/tools/predictor/python/run.py b/src/gausskernel/dbmind/tools/components/predictor/python/run.py similarity index 97% rename from src/gausskernel/dbmind/tools/predictor/python/run.py rename to src/gausskernel/dbmind/tools/components/predictor/python/run.py index 44bcb861e..e10dbb7e8 100644 --- a/src/gausskernel/dbmind/tools/predictor/python/run.py +++ b/src/gausskernel/dbmind/tools/components/predictor/python/run.py @@ -1,349 +1,349 @@ -""" - openGauss is licensed under Mulan PSL v2. - You can use this software according to the terms and conditions of the Mulan PSL v2. - You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - - THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - See the Mulan PSL v2 for more details. - - Copyright (c) 2020 Huawei Technologies Co.,Ltd. - Description: The APIs to call ml functions and reply to the client requests. -""" - -import os -import ssl - -# Open source libraries -from datetime import datetime -from flask import Flask, request, jsonify -import numpy as np -from werkzeug.utils import secure_filename -import logging.config - -from model import ModelInfo, RnnModel -import settings -from certs import aes_cbc_decrypt_with_path - -# global variables -os.environ['CUDA_VISIBLE_DEVICES'] = settings.GPU_CONFIG -app = Flask(__name__) -loaded_model = None -model_config = None -req_logger = None -model_logger = None -parse_logger = None -tb_url = None - - -def __get_flask_params__(): - ''' - Returns the connection parameters of the Flask server app - :return: tuple of debug, server host and server port number - ''' - server_debug = int(settings.DEFAULT_FLASK_DEBUG) - server_host = settings.DEFAULT_FLASK_SERVER_HOST - server_port = int(settings.DEFAULT_FLASK_SERVER_PORT) - return server_debug, server_host, server_port - - -def __port_in_use__(port): - import socket - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - return s.connect_ex(('localhost', port)) == 0 - - -@app.route('/check', methods=['POST']) -def check(): - global req_logger - """ - API for check the - :return: - """ - if request.method == 'POST': - req_logger.info(request.data) - return '0' - - -@app.route('/configure', methods=['POST']) -def configure_training(): - ''' - API for configuring model, needs to be called before prediction or train - CURL format: - curl -X POST -d '{"max_epoch":"200", "learning_rate":"0.01", "hidden_units":"60", "batch_size": "1000", - "dim_red": "0.7","model_name":"rlstm"}' -H 'Content-Type: application/json' 'https://127.0.0.1:5000/configure' - JSON Parameters: - 'max_epoch': [OPTIONAL] default 500 - 'learning_rate': [OPTIONAL] default 1 - 'hidden_units': [OPTIONAL] default 50 - 'batch_size': [OPTIONAL] default 5000 - 'template_name' [OPTIONAL] network type of the target model, default rlstm - model - 'model_name': [COMPULSORY] model name to be saved, can be a already trained model - 'labels': [COMPULSORY] target labels to predict - 'dim_red': [OPTIONAL] part of variance explained by PCA, default -1 means no PCA - :return: 0: Success - F: TypeError - I: Invalid parameter type - M: Missing compulsory argument - ''' - global model_config - global req_logger - global model_logger - global tb_url - if request.method == 'POST': - req_logger.info(request.data) - arg_json = request.get_json() - if 'model_name' in arg_json: - model_name = arg_json['model_name'] - model_config = ModelInfo(model_name) - else: - return 'M' - return model_config.get_info(arg_json) - - -@app.route('/train', methods=['GET', 'POST']) -def train(): - ''' - API for training the model, should be called after configuration - CURL format: - curl -X POST -F file=@/path/to/encoded/data 'https://127.0.0.1:5000/train' - :return: a jsonified result - { - 'final_model_name': 'xxx', - 're_startup': 'xxx', - 're_total': 'xxx', - 'converged': '0/1', - 'feature_length': '???' - } - Errors: 'M': Missing compulsory parameter in json - Errors: 'R': session is running - ''' - global running - global model_config - global req_logger - global model_logger - if request.method == 'POST': - if running == 1: - return 'R' - running = 1 - # save file - if 'file' in request.files: - f = request.files['file'] - else: - return 'M' - base_path = os.path.dirname(__file__) - dtObj = datetime.now() - fname = str(dtObj.year) + '-' + str(dtObj.month) + '-' + str(dtObj.day) + '_' \ - + str(dtObj.hour) + '-' + str(dtObj.minute) + '-' + str(dtObj.second) + '-' \ - + secure_filename(f.filename) - file_path = os.path.realpath(os.path.join( - base_path, settings.PATH_UPLOAD, fname)) - f.save(file_path) - # trigger training - try: - model = RnnModel(model_config) - val_re = model.fit(file_path) - except: - running = 0 - raise - re_startup, re_total, re_row, re_mem = -1, -1, -1, -1 - converged = 1 - for v in val_re: - if v > 2: - converged = 0 - break - for i in range(int(model_config.label_length)): - if model_config.model_targets[i] == 'S': - re_startup = val_re[i] - elif model_config.model_targets[i] == 'T': - re_total = val_re[i] - elif model_config.model_targets[i] == 'R': - re_row = val_re[i] - elif model_config.model_targets[i] == 'M': - re_mem = val_re[i] - res = { - 're_startup': re_startup, - 're_total': re_total, - 're_row': re_row, - 're_mem': re_mem, - 'max_startup': float(model_config.max_startup), - 'max_total': float(model_config.max_total), - 'max_row': float(model_config.max_row), - 'max_mem': float(model_config.max_mem), - 'converged': converged, - 'feature_length': int(model_config.feature_length) - } - - running = 0 - model_logger.info(jsonify(res)) - return jsonify(res) - - -@app.route('/track_process', methods=['POST']) -def track_process(): - ''' - return the log file path that records the model's training process information - CURL format: - curl -X POST -d '{"modelName":"test"}' -H 'Content-Type: application/json' - 'https://127.0.0.1:5000/track_process' - - :return: log_path if the training log exists - F if the log file has not been generated or contents nothing - M if missing compulsory parameter - ''' - global tb_url - if request.method == 'POST': - req_logger.info(request.data) - arg_json = request.get_json() - if 'modelName' in arg_json: - model_name = arg_json['modelName'] - base_path = os.path.dirname(__file__) - log_path = os.path.realpath(os.path.join(base_path, settings.PATH_LOG, model_name + '_log.json')) - if not (os.path.exists(log_path) and os.path.getsize(log_path)): - return 'F' - else: - return log_path - else: - return 'M' - - -@app.route('/model_setup', methods=['POST']) -def setup(): - ''' - API for setup up the model for prediction. - CURL format: - curl -X POST -d '{"model_name": "rlstm"}' -H 'Content-Type: application/json' - 'https://127.0.0.1:5000/model_setup' - JSON Parameter: - 'model_name': [COMPULSORY] name of the model to be activated for predict route - :return: M: Missing compulsory parameter in json - i: Internal error when loading - N: Model not found - ''' - global loaded_model - global running - global req_logger - global model_logger - if request.method == 'POST': - req_logger.info('request for setup is {}'.format(request.data)) - if running == 1: - return 'R' - running = 1 - arg_json = request.get_json() - if 'model_name' in arg_json: - model_name = arg_json['model_name'] - model_config = ModelInfo(model_name) - try: - loaded_model = RnnModel(model_config) - loaded_model.load() - except KeyError: - running = 0 - return 'N' - except FileNotFoundError: - running = 0 - return 'N' - except: - running = 0 - return 'i' - else: - running = 0 - return 'M' - return '0' - - -@app.route('/predict', methods=['POST']) -def predict(): - ''' - Route for prediction, should be called after setup to choose the model to predict - CURL format: - curl -X POST -F file=@/path/to/encoded/data 'https://127.0.0.1:5000/predict' - :return: a jsonified result - { - 'pred_startup': 'xxx', - 'pred_total': 'xxx', - 'successful': '0/1' - } - - Failures: 'M': Missing compulsory parameter in json - 'S': Session is not loaded, setup required - ''' - global running - global loaded_model - global req_logger - global model_logger - if request.method == 'POST': - if not (loaded_model and running == 1): - model_logger.error('model is not loaded or running is %d' % running) - return 'S' - if 'file' in request.files: - f = request.files['file'] - else: - return 'M' - base_path = os.path.dirname(__file__) - dtObj = datetime.now() - fname = str(dtObj.year) + '-' + str(dtObj.month) + '-' + str(dtObj.day) + '_' \ - + str(dtObj.hour) + '-' + str(dtObj.minute) + '-' + str(dtObj.second) + '-' \ - + 'tmp.csv' - file_path = os.path.realpath(os.path.join( - base_path, settings.PATH_UPLOAD, fname)) - f.save(file_path) - # trigger prediction - try: - pred = loaded_model.predict(file_path) - except: - model_logger.error('Model prediction failed.') - running = 0 - os.remove(file_path) - raise - pred_startup, pred_total, pred_row, pred_mem = None, None, None, None - info = loaded_model.model_info - for i in range(len(info.model_targets)): - if info.model_targets[i] == 'S': - pred_startup = pred[:, i] * info.max_startup - pred_startup = str(list(pred_startup.astype(int)))[1:-1] - elif info.model_targets[i] == 'T': - pred_total = pred[:, i] * info.max_total - pred_total = str(list(pred_total.astype(int)))[1:-1] - elif info.model_targets[i] == 'R': - pred_row = np.exp(pred[:, i] * info.max_row) - 1 - pred_row = str(list(pred_row.astype(int)))[1:-1] - elif info.model_targets[i] == 'M': - pred_mem = pred[:, i] * info.max_mem - pred_mem = str(list(pred_mem.astype(int)))[1:-1] - res = { - 'pred_startup': pred_startup, - 'pred_total': pred_total, - 'pred_rows': pred_row, - 'pred_mem': pred_mem - } - model_logger.debug(jsonify(res)) - os.remove(file_path) - running = 0 - return jsonify(res) - -def run(): - global running - global req_logger - global model_logger - global parse_logger - running = 0 - logging.config.fileConfig('log.conf') - req_logger = logging.getLogger() - model_logger = logging.getLogger('model') - parse_logger = logging.getLogger('parse') - key = aes_cbc_decrypt_with_path(settings.PATH_SSL) - context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) - context.load_cert_chain(certfile=settings.PATH_SERVER_CRT, keyfile=settings.PATH_SERVER_KEY, password=key) - context.load_verify_locations(settings.PATH_CA) - context.verify_mode = ssl.CERT_REQUIRED - server_debug, server_host, server_port = __get_flask_params__() - app.run(host=server_host, port=server_port, debug=server_debug, threaded=True, ssl_context=context) - exit(0) - - -if __name__ == '__main__': - run() +""" + openGauss is licensed under Mulan PSL v2. + You can use this software according to the terms and conditions of the Mulan PSL v2. + You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + + THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + See the Mulan PSL v2 for more details. + + Copyright (c) 2020 Huawei Technologies Co.,Ltd. + Description: The APIs to call ml functions and reply to the client requests. +""" + +import os +import ssl + +# Open source libraries +from datetime import datetime +from flask import Flask, request, jsonify +import numpy as np +from werkzeug.utils import secure_filename +import logging.config + +from model import ModelInfo, RnnModel +import settings +from certs import aes_cbc_decrypt_with_path + +# global variables +os.environ['CUDA_VISIBLE_DEVICES'] = settings.GPU_CONFIG +app = Flask(__name__) +loaded_model = None +model_config = None +req_logger = None +model_logger = None +parse_logger = None +tb_url = None + + +def __get_flask_params__(): + ''' + Returns the connection parameters of the Flask server app + :return: tuple of debug, server host and server port number + ''' + server_debug = int(settings.DEFAULT_FLASK_DEBUG) + server_host = settings.DEFAULT_FLASK_SERVER_HOST + server_port = int(settings.DEFAULT_FLASK_SERVER_PORT) + return server_debug, server_host, server_port + + +def __port_in_use__(port): + import socket + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + return s.connect_ex(('localhost', port)) == 0 + + +@app.route('/check', methods=['POST']) +def check(): + global req_logger + """ + API for check the + :return: + """ + if request.method == 'POST': + req_logger.info(request.data) + return '0' + + +@app.route('/configure', methods=['POST']) +def configure_training(): + ''' + API for configuring model, needs to be called before prediction or train + CURL format: + curl -X POST -d '{"max_epoch":"200", "learning_rate":"0.01", "hidden_units":"60", "batch_size": "1000", + "dim_red": "0.7","model_name":"rlstm"}' -H 'Content-Type: application/json' 'https://127.0.0.1:5000/configure' + JSON Parameters: + 'max_epoch': [OPTIONAL] default 500 + 'learning_rate': [OPTIONAL] default 1 + 'hidden_units': [OPTIONAL] default 50 + 'batch_size': [OPTIONAL] default 5000 + 'template_name' [OPTIONAL] network type of the target model, default rlstm + model + 'model_name': [COMPULSORY] model name to be saved, can be a already trained model + 'labels': [COMPULSORY] target labels to predict + 'dim_red': [OPTIONAL] part of variance explained by PCA, default -1 means no PCA + :return: 0: Success + F: TypeError + I: Invalid parameter type + M: Missing compulsory argument + ''' + global model_config + global req_logger + global model_logger + global tb_url + if request.method == 'POST': + req_logger.info(request.data) + arg_json = request.get_json() + if 'model_name' in arg_json: + model_name = arg_json['model_name'] + model_config = ModelInfo(model_name) + else: + return 'M' + return model_config.get_info(arg_json) + + +@app.route('/train', methods=['GET', 'POST']) +def train(): + ''' + API for training the model, should be called after configuration + CURL format: + curl -X POST -F file=@/path/to/encoded/data 'https://127.0.0.1:5000/train' + :return: a jsonified result + { + 'final_model_name': 'xxx', + 're_startup': 'xxx', + 're_total': 'xxx', + 'converged': '0/1', + 'feature_length': '???' + } + Errors: 'M': Missing compulsory parameter in json + Errors: 'R': session is running + ''' + global running + global model_config + global req_logger + global model_logger + if request.method == 'POST': + if running == 1: + return 'R' + running = 1 + # save file + if 'file' in request.files: + f = request.files['file'] + else: + return 'M' + base_path = os.path.dirname(__file__) + dtObj = datetime.now() + fname = str(dtObj.year) + '-' + str(dtObj.month) + '-' + str(dtObj.day) + '_' \ + + str(dtObj.hour) + '-' + str(dtObj.minute) + '-' + str(dtObj.second) + '-' \ + + secure_filename(f.filename) + file_path = os.path.realpath(os.path.join( + base_path, settings.PATH_UPLOAD, fname)) + f.save(file_path) + # trigger training + try: + model = RnnModel(model_config) + val_re = model.fit(file_path) + except: + running = 0 + raise + re_startup, re_total, re_row, re_mem = -1, -1, -1, -1 + converged = 1 + for v in val_re: + if v > 2: + converged = 0 + break + for i in range(int(model_config.label_length)): + if model_config.model_targets[i] == 'S': + re_startup = val_re[i] + elif model_config.model_targets[i] == 'T': + re_total = val_re[i] + elif model_config.model_targets[i] == 'R': + re_row = val_re[i] + elif model_config.model_targets[i] == 'M': + re_mem = val_re[i] + res = { + 're_startup': re_startup, + 're_total': re_total, + 're_row': re_row, + 're_mem': re_mem, + 'max_startup': float(model_config.max_startup), + 'max_total': float(model_config.max_total), + 'max_row': float(model_config.max_row), + 'max_mem': float(model_config.max_mem), + 'converged': converged, + 'feature_length': int(model_config.feature_length) + } + + running = 0 + model_logger.info(jsonify(res)) + return jsonify(res) + + +@app.route('/track_process', methods=['POST']) +def track_process(): + ''' + return the log file path that records the model's training process information + CURL format: + curl -X POST -d '{"modelName":"test"}' -H 'Content-Type: application/json' + 'https://127.0.0.1:5000/track_process' + + :return: log_path if the training log exists + F if the log file has not been generated or contents nothing + M if missing compulsory parameter + ''' + global tb_url + if request.method == 'POST': + req_logger.info(request.data) + arg_json = request.get_json() + if 'modelName' in arg_json: + model_name = arg_json['modelName'] + base_path = os.path.dirname(__file__) + log_path = os.path.realpath(os.path.join(base_path, settings.PATH_LOG, model_name + '_log.json')) + if not (os.path.exists(log_path) and os.path.getsize(log_path)): + return 'F' + else: + return log_path + else: + return 'M' + + +@app.route('/model_setup', methods=['POST']) +def setup(): + ''' + API for setup up the model for prediction. + CURL format: + curl -X POST -d '{"model_name": "rlstm"}' -H 'Content-Type: application/json' + 'https://127.0.0.1:5000/model_setup' + JSON Parameter: + 'model_name': [COMPULSORY] name of the model to be activated for predict route + :return: M: Missing compulsory parameter in json + i: Internal error when loading + N: Model not found + ''' + global loaded_model + global running + global req_logger + global model_logger + if request.method == 'POST': + req_logger.info('request for setup is {}'.format(request.data)) + if running == 1: + return 'R' + running = 1 + arg_json = request.get_json() + if 'model_name' in arg_json: + model_name = arg_json['model_name'] + model_config = ModelInfo(model_name) + try: + loaded_model = RnnModel(model_config) + loaded_model.load() + except KeyError: + running = 0 + return 'N' + except FileNotFoundError: + running = 0 + return 'N' + except: + running = 0 + return 'i' + else: + running = 0 + return 'M' + return '0' + + +@app.route('/predict', methods=['POST']) +def predict(): + ''' + Route for prediction, should be called after setup to choose the model to predict + CURL format: + curl -X POST -F file=@/path/to/encoded/data 'https://127.0.0.1:5000/predict' + :return: a jsonified result + { + 'pred_startup': 'xxx', + 'pred_total': 'xxx', + 'successful': '0/1' + } + + Failures: 'M': Missing compulsory parameter in json + 'S': Session is not loaded, setup required + ''' + global running + global loaded_model + global req_logger + global model_logger + if request.method == 'POST': + if not (loaded_model and running == 1): + model_logger.error('model is not loaded or running is %d' % running) + return 'S' + if 'file' in request.files: + f = request.files['file'] + else: + return 'M' + base_path = os.path.dirname(__file__) + dtObj = datetime.now() + fname = str(dtObj.year) + '-' + str(dtObj.month) + '-' + str(dtObj.day) + '_' \ + + str(dtObj.hour) + '-' + str(dtObj.minute) + '-' + str(dtObj.second) + '-' \ + + 'tmp.csv' + file_path = os.path.realpath(os.path.join( + base_path, settings.PATH_UPLOAD, fname)) + f.save(file_path) + # trigger prediction + try: + pred = loaded_model.predict(file_path) + except: + model_logger.error('Model prediction failed.') + running = 0 + os.remove(file_path) + raise + pred_startup, pred_total, pred_row, pred_mem = None, None, None, None + info = loaded_model.model_info + for i in range(len(info.model_targets)): + if info.model_targets[i] == 'S': + pred_startup = pred[:, i] * info.max_startup + pred_startup = str(list(pred_startup.astype(int)))[1:-1] + elif info.model_targets[i] == 'T': + pred_total = pred[:, i] * info.max_total + pred_total = str(list(pred_total.astype(int)))[1:-1] + elif info.model_targets[i] == 'R': + pred_row = np.exp(pred[:, i] * info.max_row) - 1 + pred_row = str(list(pred_row.astype(int)))[1:-1] + elif info.model_targets[i] == 'M': + pred_mem = pred[:, i] * info.max_mem + pred_mem = str(list(pred_mem.astype(int)))[1:-1] + res = { + 'pred_startup': pred_startup, + 'pred_total': pred_total, + 'pred_rows': pred_row, + 'pred_mem': pred_mem + } + model_logger.debug(jsonify(res)) + os.remove(file_path) + running = 0 + return jsonify(res) + +def run(): + global running + global req_logger + global model_logger + global parse_logger + running = 0 + logging.config.fileConfig('log.conf') + req_logger = logging.getLogger() + model_logger = logging.getLogger('model') + parse_logger = logging.getLogger('parse') + key = aes_cbc_decrypt_with_path(settings.PATH_SSL) + context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) + context.load_cert_chain(certfile=settings.PATH_SERVER_CRT, keyfile=settings.PATH_SERVER_KEY, password=key) + context.load_verify_locations(settings.PATH_CA) + context.verify_mode = ssl.CERT_REQUIRED + server_debug, server_host, server_port = __get_flask_params__() + app.run(host=server_host, port=server_port, debug=server_debug, threaded=True, ssl_context=context) + exit(0) + + +if __name__ == '__main__': + run() diff --git a/src/gausskernel/dbmind/tools/predictor/python/saved_models/.gitkeep b/src/gausskernel/dbmind/tools/components/predictor/python/saved_models/.gitkeep similarity index 98% rename from src/gausskernel/dbmind/tools/predictor/python/saved_models/.gitkeep rename to src/gausskernel/dbmind/tools/components/predictor/python/saved_models/.gitkeep index 58c6a0063..222de580b 100644 --- a/src/gausskernel/dbmind/tools/predictor/python/saved_models/.gitkeep +++ b/src/gausskernel/dbmind/tools/components/predictor/python/saved_models/.gitkeep @@ -1 +1 @@ -#the file to track this essential empty dictionary. +#the file to track this essential empty dictionary. diff --git a/src/gausskernel/dbmind/tools/predictor/python/settings.py b/src/gausskernel/dbmind/tools/components/predictor/python/settings.py similarity index 96% rename from src/gausskernel/dbmind/tools/predictor/python/settings.py rename to src/gausskernel/dbmind/tools/components/predictor/python/settings.py index 512011039..33a774efe 100644 --- a/src/gausskernel/dbmind/tools/predictor/python/settings.py +++ b/src/gausskernel/dbmind/tools/components/predictor/python/settings.py @@ -1,41 +1,41 @@ -""" - openGauss is licensed under Mulan PSL v2. - You can use this software according to the terms and conditions of the Mulan PSL v2. - You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - - THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - See the Mulan PSL v2 for more details. - - Copyright (c) 2020 Huawei Technologies Co.,Ltd. - Description: The settings for AiEngine. -""" - -import os - -# Flask settings -DEFAULT_FLASK_SERVER_HOST = '127.0.0.1' -DEFAULT_FLASK_SERVER_PORT = '5000' -DEFAULT_FLASK_DEBUG = '0' # Do not use debug mode in production - -# Path settings -PATH_UPLOAD = 'uploads/' -PATH_MODELS = 'saved_models/' -PATH_LOG = 'log/' -PATH_ENGINE_LOG = 'e_log/model_logs' - -# Path for certifications -PATH_SSL = "path_to_CA" -PATH_CA = PATH_SSL + '/demoCA/cacert.pem' -PATH_SERVER_KEY = PATH_SSL + '/server.key' -PATH_SERVER_CRT = PATH_SSL + '/server.crt' - -# GPU configuration set as '-1' if no gpu is available, default two gpus -GPU_CONFIG = '0,1' - -# Path for logs -base_path = os.path.dirname(__file__) -PATH_MODELS_INFO = os.path.realpath(os.path.join(base_path, PATH_MODELS)) +""" + openGauss is licensed under Mulan PSL v2. + You can use this software according to the terms and conditions of the Mulan PSL v2. + You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + + THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + See the Mulan PSL v2 for more details. + + Copyright (c) 2020 Huawei Technologies Co.,Ltd. + Description: The settings for AiEngine. +""" + +import os + +# Flask settings +DEFAULT_FLASK_SERVER_HOST = '127.0.0.1' +DEFAULT_FLASK_SERVER_PORT = '5000' +DEFAULT_FLASK_DEBUG = '0' # Do not use debug mode in production + +# Path settings +PATH_UPLOAD = 'uploads/' +PATH_MODELS = 'saved_models/' +PATH_LOG = 'log/' +PATH_ENGINE_LOG = 'e_log/model_logs' + +# Path for certifications +PATH_SSL = "path_to_CA" +PATH_CA = PATH_SSL + '/demoCA/cacert.pem' +PATH_SERVER_KEY = PATH_SSL + '/server.key' +PATH_SERVER_CRT = PATH_SSL + '/server.crt' + +# GPU configuration set as '-1' if no gpu is available, default two gpus +GPU_CONFIG = '0,1' + +# Path for logs +base_path = os.path.dirname(__file__) +PATH_MODELS_INFO = os.path.realpath(os.path.join(base_path, PATH_MODELS)) diff --git a/src/gausskernel/dbmind/tools/predictor/python/uploads/.gitkeep b/src/gausskernel/dbmind/tools/components/predictor/python/uploads/.gitkeep similarity index 98% rename from src/gausskernel/dbmind/tools/predictor/python/uploads/.gitkeep rename to src/gausskernel/dbmind/tools/components/predictor/python/uploads/.gitkeep index 58c6a0063..222de580b 100644 --- a/src/gausskernel/dbmind/tools/predictor/python/uploads/.gitkeep +++ b/src/gausskernel/dbmind/tools/components/predictor/python/uploads/.gitkeep @@ -1 +1 @@ -#the file to track this essential empty dictionary. +#the file to track this essential empty dictionary. diff --git a/src/gausskernel/dbmind/tools/components/reprocessing_exporter/__init__.py b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/__init__.py new file mode 100644 index 000000000..0d9d90019 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +__version__ = '1.0.0' + + +from .core.main import main diff --git a/src/gausskernel/dbmind/tools/components/reprocessing_exporter/__main__.py b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/__main__.py new file mode 100644 index 000000000..1bb75bd2b --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/__main__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import sys + +try: + from dbmind.components.reprocessing_exporter import main +except ImportError: + sys.path.append('..') + from reprocessing_exporter import main + +main(sys.argv[1:]) diff --git a/src/gausskernel/dbmind/tools/components/reprocessing_exporter/core/__init__.py b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/core/__init__.py new file mode 100644 index 000000000..e87dd292b --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/core/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + diff --git a/src/gausskernel/dbmind/tools/components/reprocessing_exporter/core/controller.py b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/core/controller.py new file mode 100644 index 000000000..2def89367 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/core/controller.py @@ -0,0 +1,35 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from dbmind.common.http.http_service import HttpService +from dbmind.common.http.http_service import Response + + +from .service import query_all_metrics + +app = HttpService('Reprocessing Exporter') + + +@app.route('/', methods=['GET', 'POST']) +def index(*args): + return Response('Hello Reprocessing Exporter!') + + +@app.route('/metrics', methods=['GET']) +def metrics(*args): + return Response(query_all_metrics(), mimetype='text/plain') + + +def run(host, port, ssl_keyfile, ssl_certfile, ssl_keyfile_password): + app.start_listen(host=host, port=port, + ssl_keyfile=ssl_keyfile, ssl_certfile=ssl_certfile, + ssl_keyfile_password=ssl_keyfile_password) diff --git a/src/gausskernel/dbmind/tools/components/reprocessing_exporter/core/dao.py b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/core/dao.py new file mode 100644 index 000000000..5e9d138e1 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/core/dao.py @@ -0,0 +1,45 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from collections import defaultdict +from dbmind.common.tsdb.prometheus_client import PrometheusClient + +_prometheus_client: 'PrometheusClient' = None + + +class PrometheusMetricConfig: + def __init__(self, name, promql, desc): + self.name = name + self.desc = desc + self.promql = promql + self.labels = [] + self.label_map = defaultdict(str) + + def __repr__(self): + return repr((self.name, self.promql, self.labels)) + + +def set_prometheus_client(host, port): + global _prometheus_client + + url = 'http://' + host + ':' + port + client = PrometheusClient(url) + if not client.check_connection(): + raise ConnectionRefusedError("failed to connect TSDB url: %s" % url) + + _prometheus_client = client + + +def query(promql): + return _prometheus_client.custom_query( + promql + ) diff --git a/src/gausskernel/dbmind/tools/components/reprocessing_exporter/core/main.py b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/core/main.py new file mode 100644 index 000000000..fcea36139 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/core/main.py @@ -0,0 +1,152 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import argparse +import getpass +import os +import tempfile +import logging +from logging.handlers import TimedRotatingFileHandler + +from dbmind.common.daemon import Daemon +from dbmind.common.utils import check_ssl_certificate_remaining_days, check_ssl_file_permission +from . import controller +from . import dao +from . import service +from .. import __version__ + +CURR_DIR = os.path.abspath( + os.path.join(os.path.dirname(__file__), '..') +) +DEFAULT_YAML = 'reprocessing_exporter.yml' +DEFAULT_LOGFILE = 'reprocessing_exporter.log' +with tempfile.NamedTemporaryFile(suffix='.pid') as fp: + EXPORTER_PIDFILE_NAME = fp.name + + +def path_type(path): + if os.path.exists(path): + return os.path.abspath(path) + else: + raise argparse.ArgumentTypeError('%s is not a valid path.' % path) + + +def parse_argv(argv): + parser = argparse.ArgumentParser( + description='Reprocessing Exporter: A re-processing module for metrics stored in the Prometheus server.' + ) + parser.add_argument('prometheus_host', help='from which host to pull data') + parser.add_argument('prometheus_port', help='the port to connect to the Prometheus host') + parser.add_argument('--disable-https', action='store_true', + help='disable Https schema') + parser.add_argument('--ssl-keyfile', type=path_type, help='set the path of ssl key file') + parser.add_argument('--ssl-certfile', type=path_type, help='set the path of ssl certificate file') + parser.add_argument('--web.listen-address', default='127.0.0.1', + help='address on which to expose metrics and web interface') + parser.add_argument('--web.listen-port', type=int, default=8181, + help='listen port to expose metrics and web interface') + parser.add_argument('--collector.config', type=path_type, default=os.path.join(CURR_DIR, DEFAULT_YAML), + help='according to the content of the yaml file for metric collection') + parser.add_argument('--log.filepath', type=os.path.abspath, + default=os.path.join(os.getcwd(), DEFAULT_LOGFILE), + help='the path to log') + parser.add_argument('--log.level', default='info', choices=('debug', 'info', 'warn', 'error', 'fatal'), + help='only log messages with the given severity or above.' + ' Valid levels: [debug, info, warn, error, fatal]') + parser.add_argument('--version', action='version', version=__version__) + + args = parser.parse_args(argv) + ssl_keyfile_pwd = None + if args.disable_https: + # Clear up redundant arguments. + args.ssl_keyfile = None + args.ssl_certfile = None + else: + if not (args.ssl_keyfile and args.ssl_certfile): + parser.error('If you use the Https protocol (default), you need to give the argument values ' + 'of --ssl-keyfile and --ssl-certfile. ' + 'Otherwise, use the --disable-https argument to disable the Https protocol.') + else: + # Need to check whether the key file has been encrypted. + with open(args.ssl_keyfile) as fp: + for line in fp.readlines(): + if line.startswith('Proc-Type') and 'ENCRYPTED' in line.upper(): + ssl_keyfile_pwd = '' + while not ssl_keyfile_pwd: + ssl_keyfile_pwd = getpass.getpass('Enter PEM pass phrase:') + setattr(args, 'keyfile_password', ssl_keyfile_pwd) + return args + + +def set_logger(filepath, level): + level = level.upper() + log_path = os.path.dirname(filepath) + if not os.path.isdir(log_path): + os.makedirs(log_path, 500) + + formatter = logging.Formatter( + '[%(asctime)s]' + '[%(filename)s:%(lineno)d]' + '[%(funcName)s][%(levelname)s][%(threadName)s] ' + '- %(message)s' + ) + handler = TimedRotatingFileHandler( + filename=filepath, + when='D', + interval=1, + backupCount=15, + encoding='UTF-8', + delay=False, + utc=True + ) + handler.setFormatter(formatter) + handler.setLevel(level) + default_logger = logging.getLogger() + default_logger.setLevel(level) + default_logger.addHandler(handler) + + +class ExporterMain(Daemon): + def clean(self): + if os.path.exists(self.pid_file): + os.unlink(self.pid_file) + + def __init__(self, argv): + self.args = parse_argv(argv) + self.pid_file = EXPORTER_PIDFILE_NAME + super().__init__(self.pid_file) + + def run(self): + set_logger(self.args.__dict__['log.filepath'], + self.args.__dict__['log.level']) + dao.set_prometheus_client( + host=self.args.__dict__['prometheus_host'], + port=self.args.__dict__['prometheus_port'] + ) + service.register_prometheus_metrics( + rule_filepath=self.args.__dict__['collector.config'] + ) + + check_ssl_file_permission(self.args.ssl_keyfile, self.args.ssl_certfile) + check_ssl_certificate_remaining_days(self.args.ssl_certfile) + + controller.run( + host=self.args.__dict__['web.listen_address'], + port=self.args.__dict__['web.listen_port'], + ssl_keyfile=self.args.ssl_keyfile, + ssl_certfile=self.args.ssl_certfile, + ssl_keyfile_password=self.args.keyfile_password + ) + + +def main(argv): + ExporterMain(argv).start() diff --git a/src/gausskernel/dbmind/tools/components/reprocessing_exporter/core/service.py b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/core/service.py new file mode 100644 index 000000000..59688d984 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/core/service.py @@ -0,0 +1,91 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import os +import logging +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor, as_completed + +import yaml +from prometheus_client import CollectorRegistry +from prometheus_client import generate_latest +from prometheus_client import Gauge + + +from .dao import PrometheusMetricConfig +from .dao import query + +_metric_cnfs = [] +_thread_pool_executor = ThreadPoolExecutor(max_workers=os.cpu_count()) + +_registry = CollectorRegistry() +_from_metrics = defaultdict() + + +def register_prometheus_metrics(rule_filepath): + with open(rule_filepath) as f: + data = yaml.load(f, Loader=yaml.FullLoader) + + for _, item in data.items(): + cnf = PrometheusMetricConfig( + name=item['name'], + promql=item['query'][0]['promql'], + desc=item['desc'] + ) + for mtr in item['metrics']: + if mtr["usage"] == "LABEL": + cnf.labels.append(mtr["name"]) + cnf.label_map[mtr["label"]] = mtr["name"] + + gauge = Gauge(cnf.name, cnf.desc, cnf.labels, registry=_registry) + _from_metrics[item['name']] = (gauge, cnf) + _metric_cnfs.append(cnf) + + +def _standardize_labels(labels_map): + if 'from_instance' in labels_map: + labels_map['from_instance'] = labels_map['from_instance'].replace('\'', '') + + +def query_all_metrics(): + queried_results = [] + # Return a two-tuples which records the input with output + # because prevent out of order due to concurrency. + all_tasks = [ + _thread_pool_executor.submit( + lambda cnf: (cnf, query(cnf.promql)), + cnf + ) for cnf in _metric_cnfs + ] + for future in as_completed(all_tasks): + queried_results.append(future.result()) + + for metric_cnf, diff_instance_results in queried_results: + for result_sequence in diff_instance_results: + if len(result_sequence) == 0: + logging.warning('Fetched nothing for %s.', metric_cnf.name) + continue + + gauge, cnf = _from_metrics[metric_cnf.name] + labels_map = result_sequence.labels + # Unify the outputting label names for all metrics. + target_labels_map = {} + for k, v in labels_map.items(): + target_labels_map[cnf.label_map[k]] = v + _standardize_labels(target_labels_map) + value = result_sequence.values[0] + try: + gauge.labels(**target_labels_map).set(value) + except Exception as e: + logging.exception(e) + + return generate_latest(_registry) diff --git a/src/gausskernel/dbmind/tools/components/reprocessing_exporter/reprocessing_exporter.yml b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/reprocessing_exporter.yml new file mode 100644 index 000000000..1435ec6ef --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/reprocessing_exporter/reprocessing_exporter.yml @@ -0,0 +1,478 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +os_cpu_iowait: + name: os_cpu_iowait + desc: iowait + query: + - name: os_cpu_iowait + promql: " + label_replace( + (avg(irate(node_cpu_seconds_total{mode='iowait'}[5m])) by (job, instance)), + 'instance', '$1', 'instance', '(.*):.*') + " + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: from_job + label: job + description: from job + usage: LABEL + - name: from_instance + label: instance + description: from instance + usage: LABEL + status: enable + ttl: 60 + timeout: 0.1 + + +io_read_total: + name: io_read_total + desc: total number of io read + query: + - name: io_read_total + promql: " + label_replace( + sum by (instance) (irate(node_disk_reads_completed_total[5m])) + , 'instance', '$1', 'instance', '(.*):.*')" + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: from_instance + label: instance + description: from instance + usage: LABEL + status: enable + ttl: 60 + timeout: 0.1 + + +io_write_total: + name: io_write_total + desc: total write number + query: + - name: io_write_total + promql: " + label_replace( + sum by (instance) (irate(node_disk_writes_completed_total[5m])) + , 'instance', '$1', 'instance', '(.*):.*')" + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: from_instance + label: instance + description: from instance + usage: LABEL + status: enable + ttl: 60 + timeout: 0.1 + + +os_disk_iops: + name: os_disk_iops + desc: iops + query: + - name: os_disk_iops + promql: " + label_replace( + sum by (instance) (irate(node_disk_reads_completed_total[5m])) + sum by (instance) + (irate(node_disk_writes_completed_total[5m])), + 'instance', '$1', 'instance', '(.*):.*') + " + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: from_instance + label: instance + description: from instance + usage: LABEL + status: enable + ttl: 60 + timeout: 0.1 + + +os_disk_ioutils: + name: os_disk_ioutils + desc: ioutils + query: + - name: os_disk_ioutils + promql: " + label_replace( + irate(node_disk_io_time_seconds_total[3m]), + 'instance', '$1', 'instance', '(.*):.*') + " + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: device + label: device + description: device label + usage: LABEL + - name: from_job + label: job + description: from job + usage: LABEL + - name: from_instance + label: instance + description: from instance + usage: LABEL + status: enable + ttl: 60 + timeout: 0.1 + + +io_read_bytes: + name: io_read_bytes + desc: io read bytes + query: + - name: io_read_bytes + promql: " + label_replace( + sum by (instance) (irate(node_disk_read_bytes_total[1m])) / 1024 / 1024, + 'instance', '$1', 'instance', '(.*):.*') + " + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: from_instance + label: instance + description: from instance + usage: LABEL + status: enable + ttl: 60 + timeout: 0.1 + + +io_write_bytes: + name: io_write_bytes + desc: io write bytes + query: + - name: io_write_bytes + promql: " + label_replace( + sum by (instance) (irate(node_disk_written_bytes_total[1m])) / 1024 / 1024, + 'instance', '$1', 'instance', '(.*):.*') + " + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: from_instance + label: instance + description: from instance + usage: LABEL + status: enable + ttl: 60 + timeout: 0.1 + + +os_disk_iocapacity: + name: os_disk_iocapacity + desc: os_disk_iocapacity + query: + - name: os_disk_iocapacity + promql: " + label_replace( + sum by (instance) (irate(node_disk_read_bytes_total[1m])) / 1024 / 1024 + sum by (instance) (irate(node_disk_written_bytes_total[1m])) / 1024 / 1024, + 'instance', '$1', 'instance', '(.*):.*') + " + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: from_instance + label: instance + description: from instance + usage: LABEL + status: enable + ttl: 60 + timeout: 0.1 + + +os_disk_usage: + name: os_disk_usage + desc: os_disk_usage + query: + - name: os disk usage + promql: " + label_replace( + 1 - ((node_filesystem_avail_bytes{fstype=~'ext.|xfs'}) / node_filesystem_size_bytes{fstype=~'ext.|xfs'}), + 'instance', '$1', 'instance', '(.*):.*') + " + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: from_job + label: job + description: job + usage: LABEL + - name: from_instance + label: instance + description: instance + usage: LABEL + - name: fstype + label: fstype + description: fstype + usage: LABEL + - name: mountpoint + label: mountpoint + description: mountpoint + usage: LABEL + - name: device + label: device + description: device + usage: LABEL + status: enable + ttl: 60 + timeout: 0.1 + + +io_queue_number: + name: io_queue_number + desc: io queue number + query: + - name: io_queue_number + promql: " + label_replace( + rate(node_disk_io_time_weighted_seconds_total[5m]), + 'instance', '$1', 'instance', '(.*):.*') + " + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: from_job + label: job + description: job + usage: LABEL + - name: from_instance + label: instance + description: instance + usage: LABEL + - name: device + label: device + description: device + usage: LABEL + + status: enable + ttl: 60 + timeout: 0.1 + + +io_read_delay_time: + name: io_read_delay_time + desc: io read delay time + query: + - name: io_read_delay_time + promql: " + label_replace( + rate(node_disk_read_time_seconds_total[5m]) / (rate(node_disk_reads_completed_total[5m]) + 0.00001) * 1000, + 'instance', '$1', 'instance', '(.*):.*') + " + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: from_job + label: job + description: job + usage: LABEL + - name: from_instance + label: instance + description: instance + usage: LABEL + - name: device + label: device + description: device + usage: LABEL + + status: enable + ttl: 60 + timeout: 0.1 + + +io_write_delay_time: + name: io_write_delay_time + desc: io write delay time + query: + - name: io_write_delay_time + promql: " + label_replace( + rate(node_disk_write_time_seconds_total[5m]) / (rate(node_disk_writes_completed_total[5m]) + 0.00001) * 1000, + 'instance', '$1', 'instance', '(.*):.*') + " + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: from_job + label: job + description: job + usage: LABEL + - name: from_instance + label: instance + description: instance + usage: LABEL + - name: device + label: device + description: device + usage: LABEL + + status: enable + ttl: 60 + timeout: 0.1 + + +os_cpu_processor_number: + name: os_cpu_processor_number + desc: os_cpu_processor_number + query: + - name: os_cpu_processor_number + promql: " + label_replace( + count by (instance, job) (node_cpu_seconds_total{mode='user'}), + 'instance', '$1', 'instance', '(.*):.*') + " + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: from_job + label: job + description: from job + usage: LABEL + - name: from_instance + label: instance + description: from instance + usage: LABEL + status: enable + ttl: 60 + timeout: 0.1 + + +os_cpu_usage: + name: os_cpu_usage + desc: used for CPU usage collection + query: + - name: os_cpu_usage + promql: " + label_replace( + 1 - (avg by(job, instance) (irate(node_cpu_seconds_total{mode='idle'}[5m]))), + 'instance', '$1', 'instance', '(.*):.*') + " + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: from_job + label: job + description: from job + usage: LABEL + - name: from_instance + label: instance + description: from instance + usage: LABEL + status: enable + ttl: 60 + timeout: 0.1 + +os_mem_usage: + name: os_mem_usage + desc: used for memory usage collection + query: + - name: os_mem_usage + promql: " + label_replace( + 1 - avg(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes) by (job, instance), + 'instance', '$1', 'instance', '(.*):.*') + " + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: from_job + label: job + description: from job + usage: LABEL + - name: from_instance + label: instance + description: from instance + usage: LABEL + status: enable + ttl: 60 + timeout: 0.1 + +gaussdb_qps_by_instance: + name: gaussdb_qps_by_instance + desc: qps collection + query: + - name: gaussdb_qps_by_instance + promql: | + ceil(sum(irate(pg_db_xact_commit[5m])) by (job, from_instance) + + sum(irate(pg_db_xact_rollback[5m])) by (job, from_instance)) + version: '>=0.0.0' + timeout: 0.1 + ttl: 10 + status: enable + dbRole: "" + metrics: + - name: from_job + label: job + description: from job + usage: LABEL + - name: from_instance + label: from_instance + description: from instance + usage: LABEL + status: enable + ttl: 60 + timeout: 0.1 + diff --git a/src/gausskernel/dbmind/tools/components/slow_query_diagnosis.py b/src/gausskernel/dbmind/tools/components/slow_query_diagnosis.py new file mode 100644 index 000000000..3045ba963 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/slow_query_diagnosis.py @@ -0,0 +1,128 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import argparse +import sys +import os +import traceback +import time + +from prettytable import PrettyTable + +from dbmind import global_vars +from dbmind import constants +from dbmind.common.utils import write_to_terminal +from dbmind.cmd.config_utils import load_sys_configs +from dbmind.common.utils import keep_inputting_until_correct +from dbmind.common.utils import check_positive_integer, check_positive_float +from dbmind.metadatabase.dao import slow_queries + + +def show(query, start_time, end_time): + field_names = ( + 'slow_query_id', 'schema_name', 'db_name', + 'query', 'start_at', 'duration_time', + 'root_cause', 'suggestion' + ) + output_table = PrettyTable() + output_table.field_names = field_names + + result = slow_queries.select_slow_queries(field_names, query, start_time, end_time) + nb_rows = 0 + for slow_query in result: + row = [getattr(slow_query, field) for field in field_names] + output_table.add_row(row) + nb_rows += 1 + + if nb_rows > 50: + write_to_terminal('The number of rows is greater than 50. ' + 'It seems too long to see.') + char = keep_inputting_until_correct('Do you want to dump to a file? [Y]es, [N]o.', ('Y', 'N')) + if char == 'Y': + dump_file_name = 'slow_queries_%s.txt' % int(time.time()) + with open(dump_file_name, 'w+') as fp: + fp.write(str(output_table)) + write_to_terminal('Dumped file is %s.' % os.path.abspath(dump_file_name)) + elif char == 'N': + print(output_table) + print('(%d rows)' % nb_rows) + else: + print(output_table) + print('(%d rows)' % nb_rows) + + +def clean(retention_days): + if float(retention_days) == 0: + slow_queries.truncate_slow_queries() + else: + start_time = int((time.time() - retention_days * 24 * 60 * 60) * 1000) + slow_queries.delete_slow_queries(start_time) + write_to_terminal('Success to delete redundant results.') + + +def main(argv): + parser = argparse.ArgumentParser(description='Slow Query Diagnosis: Analyse the root cause of slow query') + parser.add_argument('action', choices=('show', 'clean'), help='choose a functionality to perform') + parser.add_argument('-c', '--conf', metavar='DIRECTORY', required=True, + help='set the directory of configuration files') + + parser.add_argument('--query', metavar='SLOW_QUERY', + help='set a slow query you want to retrieve') + parser.add_argument('--start-time', type=check_positive_integer, metavar='TIMESTAMP_IN_MICROSECONDS', + help='set the start time of a slow SQL diagnosis result to be retrieved') + parser.add_argument('--end-time', type=check_positive_integer, metavar='TIMESTAMP_IN_MICROSECONDS', + help='set the end time of a slow SQL diagnosis result to be retrieved') + + parser.add_argument('--retention-days', type=check_positive_float, metavar='DAYS', default=0, + help='clear historical diagnosis results and set ' + 'the maximum number of days to retain data') + + args = parser.parse_args(argv) + + if not os.path.exists(args.conf): + parser.exit(1, 'Not found the directory %s.' % args.conf) + + if args.action == 'show': + if None in (args.query, args.start_time, args.end_time): + write_to_terminal('There may be a lot of results because you did not use all filter conditions.', + color='red') + inputted_char = keep_inputting_until_correct('Press [A] to agree, press [Q] to quit:', ('A', 'Q')) + if inputted_char == 'Q': + parser.exit(0, "Quitting due to user's instruction.") + elif args.action == 'clean': + if args.retention_days is None: + write_to_terminal('You did not specify retention days, so we will delete all historical results.', + color='red') + inputted_char = keep_inputting_until_correct('Press [A] to agree, press [Q] to quit:', ('A', 'Q')) + if inputted_char == 'Q': + parser.exit(0, "Quitting due to user's instruction.") + + # Set the global_vars so that DAO can login the meta-database. + os.chdir(args.conf) + global_vars.configs = load_sys_configs(constants.CONFILE_NAME) + + try: + if args.action == 'show': + show(args.query, args.start_time, args.end_time) + elif args.action == 'clean': + clean(args.retention_days) + except Exception as e: + write_to_terminal('An error occurred probably due to database operations, ' + 'please check database configurations. For details:\n' + + str(e), color='red', level='error') + traceback.print_tb(e.__traceback__) + return 2 + return args + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/src/gausskernel/dbmind/tools/sqldiag/README.md b/src/gausskernel/dbmind/tools/components/sqldiag/README.md similarity index 100% rename from src/gausskernel/dbmind/tools/sqldiag/README.md rename to src/gausskernel/dbmind/tools/components/sqldiag/README.md diff --git a/src/gausskernel/dbmind/tools/components/sqldiag/__init__.py b/src/gausskernel/dbmind/tools/components/sqldiag/__init__.py new file mode 100644 index 000000000..6b574003f --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/sqldiag/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + + +from .main import main \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/components/sqldiag/__main__.py b/src/gausskernel/dbmind/tools/components/sqldiag/__main__.py new file mode 100644 index 000000000..113c50f5f --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/sqldiag/__main__.py @@ -0,0 +1,22 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import sys + +try: + from dbmind.components.sqldiag import main +except ImportError: + sys.path.append('..') + from sqldiag import main + +main(sys.argv[1:]) diff --git a/src/gausskernel/dbmind/tools/sqldiag/algorithm/diag.py b/src/gausskernel/dbmind/tools/components/sqldiag/algorithm/diag.py similarity index 83% rename from src/gausskernel/dbmind/tools/sqldiag/algorithm/diag.py rename to src/gausskernel/dbmind/tools/components/sqldiag/algorithm/diag.py index 011d6808d..12c9281f3 100644 --- a/src/gausskernel/dbmind/tools/sqldiag/algorithm/diag.py +++ b/src/gausskernel/dbmind/tools/components/sqldiag/algorithm/diag.py @@ -1,7 +1,20 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + import logging import sys -from preprocessing import LoadData +from ..preprocessing import LoadData from .duration_time_model.dnn import DnnModel from .duration_time_model.template import TemplateModel diff --git a/src/gausskernel/dbmind/tools/sqldiag/algorithm/duration_time_model/__init__.py b/src/gausskernel/dbmind/tools/components/sqldiag/algorithm/duration_time_model/__init__.py similarity index 100% rename from src/gausskernel/dbmind/tools/sqldiag/algorithm/duration_time_model/__init__.py rename to src/gausskernel/dbmind/tools/components/sqldiag/algorithm/duration_time_model/__init__.py diff --git a/src/gausskernel/dbmind/tools/sqldiag/algorithm/duration_time_model/dnn.py b/src/gausskernel/dbmind/tools/components/sqldiag/algorithm/duration_time_model/dnn.py similarity index 89% rename from src/gausskernel/dbmind/tools/sqldiag/algorithm/duration_time_model/dnn.py rename to src/gausskernel/dbmind/tools/components/sqldiag/algorithm/duration_time_model/dnn.py index b392a6ee2..4af924322 100644 --- a/src/gausskernel/dbmind/tools/sqldiag/algorithm/duration_time_model/dnn.py +++ b/src/gausskernel/dbmind/tools/components/sqldiag/algorithm/duration_time_model/dnn.py @@ -1,3 +1,16 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + import logging import os import sys @@ -8,9 +21,9 @@ from abc import ABC import numpy as np from sklearn.preprocessing import MinMaxScaler -from algorithm.word2vec import Word2Vector -from preprocessing import templatize_sql -from utils import check_illegal_sql +from ..word2vec import Word2Vector +from ...preprocessing import templatize_sql +from ...utils import check_illegal_sql from . import AbstractModel diff --git a/src/gausskernel/dbmind/tools/sqldiag/algorithm/duration_time_model/template.py b/src/gausskernel/dbmind/tools/components/sqldiag/algorithm/duration_time_model/template.py similarity index 97% rename from src/gausskernel/dbmind/tools/sqldiag/algorithm/duration_time_model/template.py rename to src/gausskernel/dbmind/tools/components/sqldiag/algorithm/duration_time_model/template.py index e6fe9e114..ec20d0ede 100644 --- a/src/gausskernel/dbmind/tools/sqldiag/algorithm/duration_time_model/template.py +++ b/src/gausskernel/dbmind/tools/components/sqldiag/algorithm/duration_time_model/template.py @@ -20,9 +20,9 @@ import stat from functools import reduce from collections import defaultdict -from algorithm.sql_similarity import calc_sql_distance -from preprocessing import get_sql_template, templatize_sql -from utils import check_illegal_sql, LRUCache +from ..sql_similarity import calc_sql_distance +from ...preprocessing import get_sql_template, templatize_sql +from ...utils import check_illegal_sql, LRUCache from . import AbstractModel diff --git a/src/gausskernel/dbmind/tools/sqldiag/algorithm/sql_similarity/__init__.py b/src/gausskernel/dbmind/tools/components/sqldiag/algorithm/sql_similarity/__init__.py similarity index 100% rename from src/gausskernel/dbmind/tools/sqldiag/algorithm/sql_similarity/__init__.py rename to src/gausskernel/dbmind/tools/components/sqldiag/algorithm/sql_similarity/__init__.py diff --git a/src/gausskernel/dbmind/tools/sqldiag/algorithm/sql_similarity/cosine_distance.py b/src/gausskernel/dbmind/tools/components/sqldiag/algorithm/sql_similarity/cosine_distance.py similarity index 100% rename from src/gausskernel/dbmind/tools/sqldiag/algorithm/sql_similarity/cosine_distance.py rename to src/gausskernel/dbmind/tools/components/sqldiag/algorithm/sql_similarity/cosine_distance.py diff --git a/src/gausskernel/dbmind/tools/sqldiag/algorithm/sql_similarity/levenshtein.py b/src/gausskernel/dbmind/tools/components/sqldiag/algorithm/sql_similarity/levenshtein.py similarity index 100% rename from src/gausskernel/dbmind/tools/sqldiag/algorithm/sql_similarity/levenshtein.py rename to src/gausskernel/dbmind/tools/components/sqldiag/algorithm/sql_similarity/levenshtein.py diff --git a/src/gausskernel/dbmind/tools/sqldiag/algorithm/sql_similarity/list_distance.py b/src/gausskernel/dbmind/tools/components/sqldiag/algorithm/sql_similarity/list_distance.py similarity index 100% rename from src/gausskernel/dbmind/tools/sqldiag/algorithm/sql_similarity/list_distance.py rename to src/gausskernel/dbmind/tools/components/sqldiag/algorithm/sql_similarity/list_distance.py diff --git a/src/gausskernel/dbmind/tools/sqldiag/algorithm/sql_similarity/parse_tree.py b/src/gausskernel/dbmind/tools/components/sqldiag/algorithm/sql_similarity/parse_tree.py similarity index 100% rename from src/gausskernel/dbmind/tools/sqldiag/algorithm/sql_similarity/parse_tree.py rename to src/gausskernel/dbmind/tools/components/sqldiag/algorithm/sql_similarity/parse_tree.py diff --git a/src/gausskernel/dbmind/tools/sqldiag/algorithm/word2vec.py b/src/gausskernel/dbmind/tools/components/sqldiag/algorithm/word2vec.py similarity index 70% rename from src/gausskernel/dbmind/tools/sqldiag/algorithm/word2vec.py rename to src/gausskernel/dbmind/tools/components/sqldiag/algorithm/word2vec.py index 232d493c2..9013a63bf 100644 --- a/src/gausskernel/dbmind/tools/sqldiag/algorithm/word2vec.py +++ b/src/gausskernel/dbmind/tools/components/sqldiag/algorithm/word2vec.py @@ -1,6 +1,19 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + from gensim.models import word2vec -from preprocessing import templatize_sql +from ..preprocessing import templatize_sql class Sentence(object): diff --git a/src/gausskernel/dbmind/tools/sqldiag/load_sql_from_wdr.py b/src/gausskernel/dbmind/tools/components/sqldiag/load_sql_from_wdr.py similarity index 100% rename from src/gausskernel/dbmind/tools/sqldiag/load_sql_from_wdr.py rename to src/gausskernel/dbmind/tools/components/sqldiag/load_sql_from_wdr.py diff --git a/src/gausskernel/dbmind/tools/sqldiag/main.py b/src/gausskernel/dbmind/tools/components/sqldiag/main.py similarity index 95% rename from src/gausskernel/dbmind/tools/sqldiag/main.py rename to src/gausskernel/dbmind/tools/components/sqldiag/main.py index 256e33d84..aafb1e8de 100644 --- a/src/gausskernel/dbmind/tools/sqldiag/main.py +++ b/src/gausskernel/dbmind/tools/components/sqldiag/main.py @@ -17,15 +17,15 @@ import logging import sys from configparser import ConfigParser -from algorithm.diag import SQLDiag -from utils import ResultSaver, is_valid_conf -from preprocessing import LoadData, split_sql +from .algorithm.diag import SQLDiag +from .utils import ResultSaver, is_valid_conf +from .preprocessing import LoadData, split_sql __version__ = '2.0.0' __description__ = 'SQLdiag integrated by openGauss.' -def parse_args(): +def parse_args(argv): parser = argparse.ArgumentParser(description=__description__) parser.add_argument('mode', choices=['train', 'predict', 'finetune'], help='The training mode is to perform feature extraction and ' @@ -47,7 +47,7 @@ def parse_args(): help='The storage path of the model file, used to read or save the model file.') parser.add_argument('--config-file', default='sqldiag.conf') parser.version = __version__ - return parser.parse_args() + return parser.parse_args(argv) def get_config(filepath): @@ -56,7 +56,8 @@ def get_config(filepath): return cp -def main(args): +def main(argv): + args = parse_args(argv) logging.basicConfig(level=logging.WARNING) if not is_valid_conf(args.config_file): logging.fatal('The [--config-file] parameter is incorrect') @@ -120,5 +121,4 @@ def main(args): if __name__ == '__main__': - main(parse_args()) - + main(sys.argv[1:]) diff --git a/src/gausskernel/dbmind/tools/sqldiag/preprocessing.py b/src/gausskernel/dbmind/tools/components/sqldiag/preprocessing.py similarity index 100% rename from src/gausskernel/dbmind/tools/sqldiag/preprocessing.py rename to src/gausskernel/dbmind/tools/components/sqldiag/preprocessing.py diff --git a/src/gausskernel/dbmind/tools/sqldiag/requirements.txt b/src/gausskernel/dbmind/tools/components/sqldiag/requirements.txt similarity index 100% rename from src/gausskernel/dbmind/tools/sqldiag/requirements.txt rename to src/gausskernel/dbmind/tools/components/sqldiag/requirements.txt diff --git a/src/gausskernel/dbmind/tools/sqldiag/result.png b/src/gausskernel/dbmind/tools/components/sqldiag/result.png similarity index 99% rename from src/gausskernel/dbmind/tools/sqldiag/result.png rename to src/gausskernel/dbmind/tools/components/sqldiag/result.png index b616bb6e8..aaabd4bf3 100644 Binary files a/src/gausskernel/dbmind/tools/sqldiag/result.png and b/src/gausskernel/dbmind/tools/components/sqldiag/result.png differ diff --git a/src/gausskernel/dbmind/tools/sqldiag/sample_data/predict.csv b/src/gausskernel/dbmind/tools/components/sqldiag/sample_data/predict.csv similarity index 100% rename from src/gausskernel/dbmind/tools/sqldiag/sample_data/predict.csv rename to src/gausskernel/dbmind/tools/components/sqldiag/sample_data/predict.csv diff --git a/src/gausskernel/dbmind/tools/sqldiag/sample_data/train.csv b/src/gausskernel/dbmind/tools/components/sqldiag/sample_data/train.csv similarity index 100% rename from src/gausskernel/dbmind/tools/sqldiag/sample_data/train.csv rename to src/gausskernel/dbmind/tools/components/sqldiag/sample_data/train.csv diff --git a/src/gausskernel/dbmind/tools/sqldiag/sqldiag.conf b/src/gausskernel/dbmind/tools/components/sqldiag/sqldiag.conf similarity index 100% rename from src/gausskernel/dbmind/tools/sqldiag/sqldiag.conf rename to src/gausskernel/dbmind/tools/components/sqldiag/sqldiag.conf diff --git a/src/gausskernel/dbmind/tools/sqldiag/utils.py b/src/gausskernel/dbmind/tools/components/sqldiag/utils.py similarity index 100% rename from src/gausskernel/dbmind/tools/sqldiag/utils.py rename to src/gausskernel/dbmind/tools/components/sqldiag/utils.py diff --git a/src/gausskernel/dbmind/tools/xtuner/README.md b/src/gausskernel/dbmind/tools/components/xtuner/README.md similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/README.md rename to src/gausskernel/dbmind/tools/components/xtuner/README.md diff --git a/src/gausskernel/dbmind/tools/components/xtuner/__init__.py b/src/gausskernel/dbmind/tools/components/xtuner/__init__.py new file mode 100644 index 000000000..fdc941729 --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/xtuner/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +from .tuner.main import main diff --git a/src/gausskernel/dbmind/tools/components/xtuner/__main__.py b/src/gausskernel/dbmind/tools/components/xtuner/__main__.py new file mode 100644 index 000000000..3e2c206be --- /dev/null +++ b/src/gausskernel/dbmind/tools/components/xtuner/__main__.py @@ -0,0 +1,22 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import sys + +try: + from dbmind.components.xtuner.tuner.main import main +except ImportError: + sys.path.append('..') + from xtuner.tuner.main import main + +main(sys.argv[1:]) diff --git a/src/gausskernel/dbmind/tools/xtuner/share/knobs.json.template b/src/gausskernel/dbmind/tools/components/xtuner/share/knobs.json.template similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/share/knobs.json.template rename to src/gausskernel/dbmind/tools/components/xtuner/share/knobs.json.template diff --git a/src/gausskernel/dbmind/tools/xtuner/share/server.json.template b/src/gausskernel/dbmind/tools/components/xtuner/share/server.json.template similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/share/server.json.template rename to src/gausskernel/dbmind/tools/components/xtuner/share/server.json.template diff --git a/src/gausskernel/dbmind/tools/xtuner/share/xtuner.conf.template b/src/gausskernel/dbmind/tools/components/xtuner/share/xtuner.conf.template similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/share/xtuner.conf.template rename to src/gausskernel/dbmind/tools/components/xtuner/share/xtuner.conf.template diff --git a/src/gausskernel/dbmind/tools/xtuner/test/test_db_agent.py b/src/gausskernel/dbmind/tools/components/xtuner/test/test_db_agent.py similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/test/test_db_agent.py rename to src/gausskernel/dbmind/tools/components/xtuner/test/test_db_agent.py diff --git a/src/gausskernel/dbmind/tools/xtuner/test/test_pso.py b/src/gausskernel/dbmind/tools/components/xtuner/test/test_pso.py similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/test/test_pso.py rename to src/gausskernel/dbmind/tools/components/xtuner/test/test_pso.py diff --git a/src/gausskernel/dbmind/tools/xtuner/test/test_ssh.py b/src/gausskernel/dbmind/tools/components/xtuner/test/test_ssh.py similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/test/test_ssh.py rename to src/gausskernel/dbmind/tools/components/xtuner/test/test_ssh.py diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/resource/__init__.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/__init__.py similarity index 100% rename from src/gausskernel/dbmind/tools/anomaly_detection/detector/service/resource/__init__.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/__init__.py diff --git a/src/gausskernel/dbmind/tools/anomaly_detection/detector/service/storage/__init__.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/algorithms/__init__.py similarity index 100% rename from src/gausskernel/dbmind/tools/anomaly_detection/detector/service/storage/__init__.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/algorithms/__init__.py diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/algorithms/pso.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/algorithms/pso.py similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/tuner/algorithms/pso.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/algorithms/pso.py diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/algorithms/rl_agent.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/algorithms/rl_agent.py similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/tuner/algorithms/rl_agent.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/algorithms/rl_agent.py diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/README.md b/src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/README.md similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/README.md rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/README.md diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/__init__.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/__init__.py similarity index 97% rename from src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/__init__.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/__init__.py index 585f4f9cd..266b1b9f1 100644 --- a/src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/__init__.py +++ b/src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/__init__.py @@ -18,8 +18,8 @@ import os import types import logging -from tuner.exceptions import ConfigureError -from tuner.executor import ExecutorFactory +from ..exceptions import ConfigureError +from ..executor import ExecutorFactory # Create a local shell with resident memory. # We must pass a local shell as an API to benchmark instance, diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/period.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/period.py similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/period.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/period.py diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/sysbench.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/sysbench.py similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/sysbench.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/sysbench.py diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/template.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/template.py similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/template.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/template.py diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/tpcc.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/tpcc.py similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/tpcc.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/tpcc.py diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/tpcds.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/tpcds.py similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/tpcds.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/tpcds.py diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/tpch.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/tpch.py similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/tuner/benchmark/tpch.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/benchmark/tpch.py diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/character.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/character.py similarity index 99% rename from src/gausskernel/dbmind/tools/xtuner/tuner/character.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/character.py index 770280391..6174765fa 100644 --- a/src/gausskernel/dbmind/tools/xtuner/tuner/character.py +++ b/src/gausskernel/dbmind/tools/components/xtuner/tuner/character.py @@ -13,7 +13,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. """ -from tuner.utils import cached_property +from .utils import cached_property class WORKLOAD_TYPE: diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/db_agent.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/db_agent.py similarity index 96% rename from src/gausskernel/dbmind/tools/xtuner/tuner/db_agent.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/db_agent.py index bc00f745d..cdc6c4816 100644 --- a/src/gausskernel/dbmind/tools/xtuner/tuner/db_agent.py +++ b/src/gausskernel/dbmind/tools/components/xtuner/tuner/db_agent.py @@ -15,13 +15,13 @@ See the Mulan PSL v2 for more details. import logging -from tuner.character import OpenGaussMetric -from tuner.exceptions import DBStatusError, SecurityError, ExecutionError, OptionError -from tuner.executor import ExecutorFactory -from tuner.knob import RecommendedKnobs, Knob -from tuner.utils import clip -from tuner.utils import construct_dividing_line -from tuner.utils import to_tuples +from .character import OpenGaussMetric +from .exceptions import DBStatusError, SecurityError, ExecutionError, OptionError +from .executor import ExecutorFactory +from .knob import RecommendedKnobs, Knob +from .utils import clip +from .utils import construct_dividing_line +from .utils import to_tuples def check_special_character(phrase): @@ -322,12 +322,12 @@ class DB_Agent: # Check whether frequent input password is required. user_desc = self.exec_command_on_host('sudo -n -l -U %s' % self.host_user, ignore_status_code=True) if (not user_desc) or user_desc.find('NOPASSWD') < 0: - logging.warning("Hint: You must add this line '%s ALL=(ALL) NOPASSWD: ALL' to the file '/etc/sudoers' " - "with administrator permission.", self.host_user) + logging.warning("Hint: You must add this line '%s ALL=(root) NOPASSWD: /usr/bin/tee /proc/sys/vm/drop_caches'" + " to the file '/etc/sudoers' with administrator permission.", self.host_user) return False self.exec_command_on_host('sync') - self.exec_command_on_host('sudo bash -c "echo 1 > /proc/sys/vm/drop_caches"') + self.exec_command_on_host('echo 1 | sudo /usr/bin/tee /proc/sys/vm/drop_caches') return True except Exception as e: logging.warning("Cannot drop cache. %s.", e) diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/db_env.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/db_env.py similarity index 98% rename from src/gausskernel/dbmind/tools/xtuner/tuner/db_env.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/db_env.py index 16ac5203a..65cef6306 100644 --- a/src/gausskernel/dbmind/tools/xtuner/tuner/db_env.py +++ b/src/gausskernel/dbmind/tools/components/xtuner/tuner/db_env.py @@ -17,8 +17,8 @@ import logging import numpy as np -from tuner.env import Env, Box -from tuner.exceptions import ExecutionError +from .env import Env, Box +from .exceptions import ExecutionError class DB_Env(Env): diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/env.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/env.py similarity index 97% rename from src/gausskernel/dbmind/tools/xtuner/tuner/env.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/env.py index d779aa6eb..3d32b3e00 100644 --- a/src/gausskernel/dbmind/tools/xtuner/tuner/env.py +++ b/src/gausskernel/dbmind/tools/components/xtuner/tuner/env.py @@ -22,7 +22,8 @@ class Box: def __init__(self, low, high, shape, dtype=np.float32): self.dtype = dtype - assert np.isscalar(low) and np.isscalar(high) + if not (np.isscalar(low) and np.isscalar(high)): + raise ValueError() self.low = np.full(shape, low) self.high = np.full(shape, high) self.shape = shape diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/exceptions.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/exceptions.py similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/tuner/exceptions.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/exceptions.py diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/executor.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/executor.py similarity index 98% rename from src/gausskernel/dbmind/tools/xtuner/tuner/executor.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/executor.py index f2c5634ed..0e165ecf9 100644 --- a/src/gausskernel/dbmind/tools/xtuner/tuner/executor.py +++ b/src/gausskernel/dbmind/tools/components/xtuner/tuner/executor.py @@ -177,13 +177,13 @@ class SSH(Executor): time.sleep(0.1) # Wait until all commands are executed. - start_ts = time.time() + start_time = time.monotonic() while not chan.exit_status_ready(): if chan.recv_ready(): stdout.append(chan.recv(buff_size)) if chan.recv_stderr_ready(): stderr.append(chan.recv_stderr(buff_size)) - if timeout and (time.time() - start_ts) > timeout: + if timeout and (time.monotonic() - start_time) > timeout: break time.sleep(0.1) diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/knob.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/knob.py similarity index 99% rename from src/gausskernel/dbmind/tools/xtuner/tuner/knob.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/knob.py index 4178da9cb..1f0ce0e70 100644 --- a/src/gausskernel/dbmind/tools/xtuner/tuner/knob.py +++ b/src/gausskernel/dbmind/tools/components/xtuner/tuner/knob.py @@ -17,7 +17,7 @@ import json from prettytable import PrettyTable -from tuner.utils import construct_dividing_line +from .utils import construct_dividing_line class _KnobEncoder(json.JSONEncoder): diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/main.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/main.py similarity index 98% rename from src/gausskernel/dbmind/tools/xtuner/tuner/main.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/main.py index 77eff02f3..b24eae5a9 100644 --- a/src/gausskernel/dbmind/tools/xtuner/tuner/main.py +++ b/src/gausskernel/dbmind/tools/components/xtuner/tuner/main.py @@ -25,9 +25,9 @@ import sys import logging from getpass import getpass -from tuner.exceptions import OptionError -from tuner.xtuner import procedure_main -from tuner import utils +from .exceptions import OptionError +from .xtuner import procedure_main +from . import utils __version__ = '2.1.0' __description__ = 'X-Tuner: a self-tuning tool integrated by openGauss.' @@ -237,13 +237,13 @@ def get_config(filepath): return config -def main(): +def main(argv): if not check_version(): print("FATAL: You should use at least Python 3.6 or above version.") return -1 parser = get_argv_parser() - args = parser.parse_args() + args = parser.parse_args(argv) mode = args.mode db_info = build_db_info(args) if not db_info: @@ -263,7 +263,3 @@ def main(): 'For details about the error cause, please see %s.' % (e, config['logfile']), file=sys.stderr, flush=True) return -1 - - -if __name__ == '__main__': - main() diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/recommend.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/recommend.py similarity index 98% rename from src/gausskernel/dbmind/tools/xtuner/tuner/recommend.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/recommend.py index 520a9544c..a6e61878b 100644 --- a/src/gausskernel/dbmind/tools/xtuner/tuner/recommend.py +++ b/src/gausskernel/dbmind/tools/components/xtuner/tuner/recommend.py @@ -16,13 +16,13 @@ import inspect from prettytable import PrettyTable -from tuner.character import OpenGaussMetric -from tuner.character import WORKLOAD_TYPE -from tuner.knob import Knob -from tuner.knob import RecommendedKnobs -from tuner.utils import GREEN_FMT, YELLOW_FMT, RED_FMT -from tuner.utils import cached_property -from tuner.utils import clip +from .character import OpenGaussMetric +from .character import WORKLOAD_TYPE +from .knob import Knob +from .knob import RecommendedKnobs +from .utils import GREEN_FMT, YELLOW_FMT, RED_FMT +from .utils import cached_property +from .utils import clip # The basic storage unit is kB. SIZE_UNIT_MAP = {"kB": 1, diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/recorder.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/recorder.py similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/tuner/recorder.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/recorder.py diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/utils.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/utils.py similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/tuner/utils.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/utils.py diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/xtuner.conf b/src/gausskernel/dbmind/tools/components/xtuner/tuner/xtuner.conf similarity index 100% rename from src/gausskernel/dbmind/tools/xtuner/tuner/xtuner.conf rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/xtuner.conf diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/xtuner.py b/src/gausskernel/dbmind/tools/components/xtuner/tuner/xtuner.py similarity index 95% rename from src/gausskernel/dbmind/tools/xtuner/tuner/xtuner.py rename to src/gausskernel/dbmind/tools/components/xtuner/tuner/xtuner.py index 2f41b003e..d649473fe 100644 --- a/src/gausskernel/dbmind/tools/xtuner/tuner/xtuner.py +++ b/src/gausskernel/dbmind/tools/components/xtuner/tuner/xtuner.py @@ -18,15 +18,15 @@ import os import signal from logging import handlers -from tuner import benchmark -from tuner.character import WORKLOAD_TYPE -from tuner.db_agent import new_db_agent -from tuner.db_env import DB_Env -from tuner.knob import load_knobs_from_json_file -from tuner.recommend import recommend_knobs -from tuner.recorder import Recorder -from tuner.exceptions import ConfigureError -from tuner.utils import YELLOW_FMT +from . import benchmark +from .character import WORKLOAD_TYPE +from .db_agent import new_db_agent +from .db_env import DB_Env +from .knob import load_knobs_from_json_file +from .recommend import recommend_knobs +from .recorder import Recorder +from .exceptions import ConfigureError +from .utils import YELLOW_FMT def prompt_restart_risks(): @@ -207,7 +207,8 @@ def global_search(env, config): pbound = {name: (0, 1) for name in env.db.ordered_knob_list} def performance_function(**params): - assert len(params) == env.nb_actions, 'Failed to check the input feature dimension.' + if not len(params) == env.nb_actions: + raise AssertionError('Failed to check the input feature dimension.') for name, val in params.items(): index = env.db.ordered_knob_list.index(name) diff --git a/src/gausskernel/dbmind/tools/constants.py b/src/gausskernel/dbmind/tools/constants.py new file mode 100644 index 000000000..7aaebad51 --- /dev/null +++ b/src/gausskernel/dbmind/tools/constants.py @@ -0,0 +1,40 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import os + +__version__ = '1.0.0' +__description__ = 'openGauss DBMind: An autonomous platform for openGauss' + +DBMIND_PATH = os.path.dirname(os.path.abspath(__file__)) +MISC_PATH = os.path.join(DBMIND_PATH, 'misc') + +CONFILE_NAME = 'dbmind.conf' # the name of configuration file +CONFILE_HEADER_NAME = 'dbmind.conf.header' +PIDFILE_NAME = 'dbmind.pid' +LOGFILE_NAME = 'dbmind.log' +METRIC_MAP_CONFIG = 'metric_map.conf' +MUST_FILTER_LABEL_CONFIG = 'filter_label.conf' +METRIC_VALUE_RANGE_CONFIG = "metric_value_range.conf" +DYNAMIC_CONFIG = 'dynamic_config.db' +DATE_FORMAT = '%Y-%m-%d %H:%M:%S' + +DBMIND_CORE_CONTROLLER = 'dbmind.controllers.dbmind_core' + +# The following list shows tasks that may be dispatched in the backend. +SLOW_QUERY_DIAGNOSIS_NAME = 'slow_query_diagnosis' +FORECAST_NAME = 'forecast' +ANOMALY_DETECTION_NAME = 'anomaly_detection' +ALARM_LOG_DIAGNOSIS_NAME = 'alarm_log_diagnosis' +TIMED_TASK_NAMES = ( + SLOW_QUERY_DIAGNOSIS_NAME, FORECAST_NAME +) diff --git a/src/gausskernel/dbmind/tools/global_vars.py b/src/gausskernel/dbmind/tools/global_vars.py new file mode 100644 index 000000000..51b9316b9 --- /dev/null +++ b/src/gausskernel/dbmind/tools/global_vars.py @@ -0,0 +1,26 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""Using this global_vars must import as the following: + + >>> from dbmind import global_vars + +""" +configs = None +dynamic_configs = None +metric_map = None +must_filter_labels = None +worker = None +confpath = None +backend_list = [] +shared_buffer = None +backend_timed_task = [] diff --git a/src/gausskernel/dbmind/tools/index_advisor/database-info.conf b/src/gausskernel/dbmind/tools/index_advisor/database-info.conf deleted file mode 100644 index ee758a561..000000000 --- a/src/gausskernel/dbmind/tools/index_advisor/database-info.conf +++ /dev/null @@ -1,25 +0,0 @@ -[server] -app_name= -database= -port= -host= -user= -workload_user= -schema= - -index_intervals= -max_index_num= -max_index_storage= -driver= - -sql_amount= -max_generate_log_size= -statement= -log_min_duration_statement= -log_statement= -output_sql_file= -datanode= -pg_log_path= - -ai_monitor_url= - diff --git a/src/gausskernel/dbmind/tools/index_advisor/extract_log.py b/src/gausskernel/dbmind/tools/index_advisor/extract_log.py deleted file mode 100644 index fe567e5b4..000000000 --- a/src/gausskernel/dbmind/tools/index_advisor/extract_log.py +++ /dev/null @@ -1,258 +0,0 @@ -import re -import os -import argparse -import json -import random -import time -from subprocess import Popen, PIPE - -SQL_TYPE = ['select ', 'delete ', 'insert ', 'update '] -SQL_AMOUNT = 0 -PLACEHOLDER = r'@@@' -SAMPLE_NUM = 5 -UPDATE_THRESHOLD = 7 -TEMPLATE_LENGTH_THRESHOLD = 5e+03 -IS_ALL_LATEST_SQL = False -SQL_PATTERN = [r'\((\s*(\d+(\.\d+)?\s*)[,]?)+\)', # match integer set in the IN collection - r'([^\\])\'((\')|(.*?([^\\])\'))', # match all content in single quotes - r'(([^<>]\s*=\s*)|([^<>]\s+))(\d+)(\.\d+)?'] # match single integer - - -def truncate_template(templates, update_time, avg_update): - global IS_ALL_LATEST_SQL - prune_list = [] - # get the currently unupdated template list - if not IS_ALL_LATEST_SQL: - for sql_template, sql_detail in templates.items(): - if sql_detail['update'][-1] != update_time and len(sql_detail['update']) < avg_update: - prune_list.append((sql_template, len(sql_detail['update']))) - # filter by update frequency - if len(prune_list) > len(templates)/SAMPLE_NUM: - sorted(prune_list, key=lambda elem: elem[1]) - prune_list = prune_list[:len(templates)//SAMPLE_NUM] - if len(prune_list): - for item in prune_list: - del templates[item] - return True - IS_ALL_LATEST_SQL = True - # if all templates have been updated, then randomly selected one to be deleted - if random.random() < 0.5: - del templates[random.sample(templates.keys(), 1)] - return True - return False - - -def get_workload_template(templates, sqls): - update_time = time.time() - invalid_template = [] - total_update = 0 - is_record = True - # delete templates that have not been updated within UPDATE_THRESHOLD threshold - for sql_template, sql_detail in templates.items(): - if (update_time - sql_detail['update'][-1])/60/60/24 >= UPDATE_THRESHOLD: - invalid_template.append(sql_template) - continue - total_update += len(sql_detail['update']) - avg_update = (total_update / len(templates)) if len(templates) else 0 - for item in invalid_template: - del templates[item] - for sql in sqls: - sql_template = sql - for pattern in SQL_PATTERN: - sql_template = re.sub(pattern, PLACEHOLDER, sql_template) - if sql_template not in templates: - # prune the templates if the total size is greater than the given threshold - if len(templates) > TEMPLATE_LENGTH_THRESHOLD: - is_record = truncate_template(templates, update_time, avg_update) - if not is_record: - continue - templates[sql_template] = {} - templates[sql_template]['cnt'] = 0 - templates[sql_template]['samples'] = [] - templates[sql_template]['update'] = [] - templates[sql_template]['cnt'] += 1 - # clear the update threshold outside - for ind, item in enumerate(templates[sql_template]['update']): - if (update_time - item)/60/60/24 < UPDATE_THRESHOLD: - templates[sql_template]['update'] = templates[sql_template]['update'][ind:] - break - # update the last update time of the sql template - if update_time not in templates[sql_template]['update']: - templates[sql_template]['update'].append(update_time) - # reservoir sampling - if len(templates[sql_template]['samples']) < SAMPLE_NUM: - if sql not in templates[sql_template]['samples']: - templates[sql_template]['samples'].append(sql) - else: - if random.randint(0, templates[sql_template]['cnt']) < SAMPLE_NUM: - templates[sql_template]['samples'][random.randint(0, SAMPLE_NUM - 1)] = sql - - -def output_valid_sql(sql): - is_quotation_valid = sql.count("'") % 2 - if 'from pg_' in sql.lower() or 'gs_index_advise' in sql.lower() or is_quotation_valid: - return '' - if any(tp in sql.lower() for tp in SQL_TYPE[1:]) or \ - (SQL_TYPE[0] in sql.lower() and 'from ' in sql.lower()): - sql = re.sub(r'for\s+update[\s;]*$', '', sql, flags=re.I) - return sql.strip() if sql.endswith('; ') else sql + ';' - return '' - - -def get_parsed_sql(file, user, database, sql_amount, statement): - global SQL_AMOUNT - line = file.readline() - sql = '' - statement_flag = False - execute_flag = False - - while line: - if sql_amount and SQL_AMOUNT == sql_amount: - break - try: - # Identify statement scene - if re.search('statement: ', line.lower(), re.IGNORECASE) and statement: - if output_valid_sql(sql): - SQL_AMOUNT += 1 - yield output_valid_sql(sql) - log_info = line.split(' ') - if (user and user not in log_info) or ( - database and database not in log_info): - line = file.readline() - continue - statement_flag = True - sql = re.search(r'statement: (.*)', line.strip(), re.IGNORECASE).group(1) + ' ' - line = file.readline() - - # Identify execute statement scene - elif re.search(r'execute .*:', line, re.IGNORECASE): - if output_valid_sql(sql): - SQL_AMOUNT += 1 - yield output_valid_sql(sql) - log_info = line.split(' ') - if (user and user not in log_info) or ( - database and database not in log_info): - line = file.readline() - continue - execute_flag = True - sql = re.search(r'execute .*: (.*)', line.strip(), re.IGNORECASE).group(1) - line = file.readline() - else: - if statement_flag: - if re.match(r'^\t', line): - sql += line.strip('\t\n') - else: - statement_flag = False - if output_valid_sql(sql): - SQL_AMOUNT += 1 - yield output_valid_sql(sql) - sql = '' - if execute_flag: - execute_flag = False - if re.search(r'parameters: ', line, re.IGNORECASE): - param_list = re.search(r'parameters: (.*)', line.strip(), - re.IGNORECASE).group(1).split(', $') - param_list = list(param.split('=', 1) for param in param_list) - param_list.sort(key=lambda x: int(x[0].strip(' $')), - reverse=True) - for item in param_list: - sql = sql.replace(item[0].strip() if re.match(r'\$', item[0]) else - ('$' + item[0].strip()), item[1].strip()) - if output_valid_sql(sql): - SQL_AMOUNT += 1 - yield output_valid_sql(sql) - sql = '' - line = file.readline() - except: - execute_flag = False - statement_flag = False - line = file.readline() - - -def get_start_position(start_time, file_path): - while start_time: - cmd = 'head -n $(cat %s | grep -m 1 -n "^%s" | awk -F : \'{print $1}\') %s | wc -c' % \ - (file_path, start_time, file_path) - proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) - std, err_msg = proc.communicate() - if proc.returncode == 0 and not err_msg: - return int(std) - elif len(start_time) > 13: - start_time = start_time[0: -3] - else: - break - return -1 - - -def record_sql(valid_files, args, output_obj): - for ind, file in enumerate(valid_files): - if args.sql_amount and SQL_AMOUNT >= args.sql_amount: - break - file_path = os.path.join(args.l, file) - if os.path.isfile(file_path) and re.search(r'.log$', file): - start_position = 0 - if ind == 0 and args.start_time: - start_position = get_start_position(args.start_time, file_path) - if start_position == -1: - continue - with open(file_path, mode='r') as f: - f.seek(start_position, 0) - if isinstance(output_obj, dict): - get_workload_template(output_obj, get_parsed_sql(f, args.U, args.d, - args.sql_amount, - args.statement)) - else: - for sql in get_parsed_sql(f, args.U, args.d, args.sql_amount, args.statement): - output_obj.write(sql + '\n') - - -def extract_sql_from_log(args): - files = os.listdir(args.l) - files = sorted(files, key=lambda x: os.path.getctime(os.path.join(args.l, x)), reverse=True) - valid_files = files - if args.start_time: - time_stamp = int(time.mktime(time.strptime(args.start_time, '%Y-%m-%d %H:%M:%S'))) - valid_files = [] - for file in files: - if os.path.getmtime(os.path.join(args.l, file)) < time_stamp: - break - valid_files.insert(0, file) - if args.json: - try: - with open(args.f, 'r') as output_file: - templates = json.load(output_file) - except (json.JSONDecodeError, FileNotFoundError) as e: - templates = {} - record_sql(valid_files, args, templates) - with open(args.f, 'w') as output_file: - json.dump(templates, output_file) - else: - with open(args.f, 'w') as output_file: - record_sql(valid_files, args, output_file) - - -def main(): - arg_parser = argparse.ArgumentParser() - arg_parser.add_argument("l", help="The path of the log file that needs to be parsed.") - arg_parser.add_argument("f", help="The output path of the extracted file.") - arg_parser.add_argument("-d", help="Name of database") - arg_parser.add_argument("-U", help="Username for database log-in") - arg_parser.add_argument("--start_time", help="Start time of extracted log") - arg_parser.add_argument("--sql_amount", help="The number of sql collected", type=int) - arg_parser.add_argument("--statement", action='store_true', help="Extract statement log type", - default=False) - arg_parser.add_argument("--json", action='store_true', - help="Whether the workload file format is json", default=False) - - args = arg_parser.parse_args() - if args.start_time: - time.strptime(args.start_time, '%Y-%m-%d %H:%M:%S') - if args.sql_amount is not None and args.sql_amount <= 0: - raise argparse.ArgumentTypeError("%s is an invalid positive int value" % args.sql_amount) - extract_sql_from_log(args) - - -if __name__ == '__main__': - main() - - diff --git a/src/gausskernel/dbmind/tools/index_advisor/index_advisor_workload.py b/src/gausskernel/dbmind/tools/index_advisor/index_advisor_workload.py deleted file mode 100644 index 76793de36..000000000 --- a/src/gausskernel/dbmind/tools/index_advisor/index_advisor_workload.py +++ /dev/null @@ -1,813 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" -import os -import sys -import argparse -import copy -import getpass -import random -import re -import json -import select -import logging - -from DAO.gsql_execute import GSqlExecute -from mcts import MCTS - -ENABLE_MULTI_NODE = False -SAMPLE_NUM = 5 -MAX_INDEX_COLUMN_NUM = 5 -MAX_INDEX_NUM = 10 -MAX_INDEX_STORAGE = None -FULL_ARRANGEMENT_THRESHOLD = 20 -NEGATIVE_RATIO_THRESHOLD = 0.2 -SHARP = '#' -JSON_TYPE = False -DRIVER = None -BLANK = ' ' -SQL_TYPE = ['select', 'delete', 'insert', 'update'] -SQL_PATTERN = [r'\((\s*(\d+(\.\d+)?\s*)[,]?)+\)', # match integer set in the IN collection - r'([^\\])\'((\')|(.*?([^\\])\'))', # match all content in single quotes - r'(([^<>]\s*=\s*)|([^<>]\s+))(\d+)(\.\d+)?'] # match single integer -SQL_DISPLAY_PATTERN = [r'\((\s*(\d+(\.\d+)?\s*)[,]?)+\)', # match integer set in the IN collection - r'\'((\')|(.*?\'))', # match all content in single quotes - r'([^\_\d])\d+(\.\d+)?'] # match single integer -logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') - - -class CheckValid(argparse.Action): - def __call__(self, parser, namespace, values, option_string=None): - ill_character = [" ", "|", ";", "&", "$", "<", ">", "`", "\\", "'", "\"", - "{", "}", "(", ")", "[", "]", "~", "*", "?", "!", "\n"] - if not values.strip(): - return - if any(ill_char in values for ill_char in ill_character): - raise Exception("There are illegal characters in the %s." % self.dest) - setattr(namespace, self.dest, values) - - -def read_input_from_pipe(): - """ - Read stdin input if there is "echo 'str1 str2' | python xx.py", - return the input string - """ - input_str = "" - r_handle, _, _ = select.select([sys.stdin], [], [], 0) - if not r_handle: - return "" - - for item in r_handle: - if item == sys.stdin: - input_str = sys.stdin.read().strip() - return input_str - - -class PwdAction(argparse.Action): - def __call__(self, parser, namespace, values, option_string=None): - password = read_input_from_pipe() - if password: - logging.warning("Read password from pipe.") - else: - password = getpass.getpass("Password for database user:") - setattr(namespace, self.dest, password) - - -class QueryItem: - def __init__(self, sql, freq): - self.statement = sql - self.frequency = freq - self.valid_index_list = [] - self.cost_list = [] - - -class IndexItem: - instances = {} - - @classmethod - def get_index(cls, tbl, cols): - if not (tbl, cols) in cls.instances: - cls.instances[(tbl, cols)] = cls(tbl, cols) - return cls.instances[(tbl, cols)] - - def __init__(self, tbl, cols): - self.table = tbl - self.columns = cols - self.atomic_pos = 0 - self.benefit = 0 - self.storage = 0 - self.positive_pos = [] - self.ineffective_pos = [] - self.negative_pos = [] - self.total_sql_num = 0 - self.insert_sql_num = 0 - self.update_sql_num = 0 - self.delete_sql_num = 0 - self.select_sql_num = 0 - self.is_candidate = False - - -def green(text): - return '\033[32m%s\033[0m' % text - - -def print_header_boundary(header): - # Output a header first, which looks more beautiful. - try: - term_width = os.get_terminal_size().columns - # The width of each of the two sides of the terminal. - side_width = (term_width - len(header)) // 2 - except (AttributeError, OSError): - side_width = 0 - title = SHARP * side_width + header + SHARP * side_width - print(green(title)) - - -def filter_low_benefit(candidate_indexes, multi_iter_mode, workload): - remove_list = [] - for key, index in enumerate(candidate_indexes): - sql_optimzed = 0 - if multi_iter_mode: - cost_list_pos = index.atomic_pos - else: - cost_list_pos = key + 1 - for ind, pos in enumerate(index.positive_pos): - sql_optimzed += 1 - workload[pos].cost_list[cost_list_pos] / workload[pos].cost_list[0] - negative_ratio = ((index.insert_sql_num + index.delete_sql_num + - index.update_sql_num) / index.total_sql_num) if index.total_sql_num else 0 - # filter the candidate indexes that do not meet the conditions of optimization - if not index.positive_pos: - remove_list.append(key) - elif sql_optimzed / len(index.positive_pos) < 0.1: - remove_list.append(key) - elif sql_optimzed / len(index.positive_pos) < NEGATIVE_RATIO_THRESHOLD < negative_ratio: - remove_list.append(key) - for item in sorted(remove_list, reverse=True): - candidate_indexes.pop(item) - - -def display_recommend_result(workload, candidate_indexes, index_cost_total, multi_iter_mode, - display_info, integrate_indexes, history_invalid_indexes): - cnt = 0 - index_current_storage = 0 - # filter candidate indexes with low benefit - filter_low_benefit(candidate_indexes, multi_iter_mode, workload) - # display determine result - integrate_indexes['currentIndexes'] = dict() - for key, index in enumerate(candidate_indexes): - integrate_indexes['currentIndexes'][index.table] = \ - integrate_indexes['currentIndexes'].get(index.table, list()) - integrate_indexes['currentIndexes'][index.table].append(index.columns) - # association history recommendation results - if integrate_indexes['historyIndexes']: - from DAO.execute_factory import ExecuteFactory - ExecuteFactory.match_last_result(index.table, index.columns, - integrate_indexes, history_invalid_indexes) - if MAX_INDEX_STORAGE and (index_current_storage + index.storage) > MAX_INDEX_STORAGE: - continue - if MAX_INDEX_NUM and cnt == MAX_INDEX_NUM: - break - index_current_storage += index.storage - table_name = index.table.split('.')[-1] - index_name = 'idx_' + table_name + '_' + '_'.join(index.columns.split(', ')) - statement = 'CREATE INDEX ' + index_name + ' ON ' + index.table + '(' + index.columns + ');' - print(statement) - cnt += 1 - if multi_iter_mode: - cost_list_pos = index.atomic_pos - else: - cost_list_pos = key + 1 - - sql_info = {'sqlDetails': []} - benefit_types = [index.ineffective_pos, index.positive_pos, index.negative_pos] - for category, benefit_type in enumerate(benefit_types): - sql_count = 0 - for item in benefit_type: - sql_count += workload[item].frequency - for ind, pos in enumerate(benefit_type): - sql_detail = {} - sql_template = workload[pos].statement - for pattern in SQL_DISPLAY_PATTERN: - sql_template = re.sub(pattern, '?', sql_template) - - sql_detail['sqlTemplate'] = sql_template - sql_detail['sql'] = workload[pos].statement - sql_detail['sqlCount'] = int(round(sql_count)) - if category == 1: - sql_optimzed = (workload[pos].cost_list[0] - - workload[pos].cost_list[cost_list_pos]) / \ - workload[pos].cost_list[cost_list_pos] - sql_detail['optimized'] = '%.3f' % sql_optimzed - sql_detail['correlationType'] = category - sql_info['sqlDetails'].append(sql_detail) - workload_optimized = (1 - index_cost_total[cost_list_pos] / index_cost_total[0]) * 100 - sql_info['workloadOptimized'] = '%.2f' % (workload_optimized if workload_optimized > 1 else 1) - sql_info['schemaName'] = index.table.split('.')[0] - sql_info['tbName'] = table_name - sql_info['columns'] = index.columns - sql_info['statement'] = statement - sql_info['dmlCount'] = round(index.total_sql_num) - sql_info['selectRatio'] = round((index.select_sql_num * 100 / - index.total_sql_num) if index.total_sql_num else 0, 2) - sql_info['insertRatio'] = round((index.insert_sql_num * 100 / - index.total_sql_num) if index.total_sql_num else 0, 2) - sql_info['deleteRatio'] = round((index.delete_sql_num * 100 / - index.total_sql_num) if index.total_sql_num else 0, 2) - sql_info['updateRatio'] = round((100 - sql_info['selectRatio'] - sql_info['insertRatio'] - - sql_info['deleteRatio']) if index.total_sql_num else 0, 2) - display_info['recommendIndexes'].append(sql_info) - - -def load_workload(file_path): - wd_dict = {} - workload = [] - global BLANK - with open(file_path, 'r') as file: - raw_text = ''.join(file.readlines()) - sqls = raw_text.split(';') - for sql in sqls: - if any(tp in sql.lower() for tp in SQL_TYPE): - TWO_BLANKS = BLANK * 2 - while TWO_BLANKS in sql: - sql = sql.replace(TWO_BLANKS, BLANK) - if sql not in wd_dict.keys(): - wd_dict[sql] = 1 - else: - wd_dict[sql] += 1 - for sql, freq in wd_dict.items(): - workload.append(QueryItem(sql, freq)) - - return workload - - -def get_workload_template(workload): - templates = {} - placeholder = r'@@@' - - for item in workload: - sql_template = item.statement - for pattern in SQL_PATTERN: - sql_template = re.sub(pattern, placeholder, sql_template) - if sql_template not in templates: - templates[sql_template] = {} - templates[sql_template]['cnt'] = 0 - templates[sql_template]['samples'] = [] - templates[sql_template]['cnt'] += item.frequency - # reservoir sampling - if len(templates[sql_template]['samples']) < SAMPLE_NUM: - templates[sql_template]['samples'].append(item.statement) - else: - if random.randint(0, templates[sql_template]['cnt']) < SAMPLE_NUM: - templates[sql_template]['samples'][random.randint(0, SAMPLE_NUM - 1)] = \ - item.statement - - return templates - - -def workload_compression(input_path): - compressed_workload = [] - total_num = 0 - if JSON_TYPE: - with open(input_path, 'r') as file: - templates = json.load(file) - else: - workload = load_workload(input_path) - templates = get_workload_template(workload) - - for _, elem in templates.items(): - for sql in elem['samples']: - compressed_workload.append(QueryItem(sql.strip(), elem['cnt'] / len(elem['samples']))) - total_num += elem['cnt'] - return compressed_workload, total_num - - -# enumerate the column combinations for a suggested index -def get_indexable_columns(table_index_dict): - query_indexable_columns = {} - if len(table_index_dict) == 0: - return query_indexable_columns - - for table in table_index_dict.keys(): - query_indexable_columns[table] = [] - for columns in table_index_dict[table]: - indexable_columns = columns.split(',') - for column in indexable_columns: - query_indexable_columns[table].append(column) - - return query_indexable_columns - - -def get_valid_index_dict(table_index_dict, query, db): - need_check = False - query_indexable_columns = get_indexable_columns(table_index_dict) - valid_index_dict = db.query_index_check(query.statement, query_indexable_columns, - ENABLE_MULTI_NODE) - - for i in range(MAX_INDEX_COLUMN_NUM): - for table in valid_index_dict.keys(): - for columns in valid_index_dict[table]: - if columns.count(',') == i: - need_check = True - for single_column in query_indexable_columns[table]: - if single_column not in columns: - valid_index_dict[table].append(columns + ',' + single_column) - if need_check: - valid_index_dict = db.query_index_check(query.statement, valid_index_dict, - ENABLE_MULTI_NODE) - need_check = False - else: - break - return valid_index_dict - - -def generate_candidate_indexes(workload, workload_table_name, db): - candidate_indexes = [] - index_dict = {} - if DRIVER: - db.init_conn_handle() - for k, query in enumerate(workload): - if not re.search(r'(\A|\s)select\s', query.statement.lower()): - continue - table_index_dict = db.query_index_advisor(query.statement, workload_table_name) - valid_index_dict = get_valid_index_dict(table_index_dict, query, db) - - # record valid indexes for every sql of workload and generate candidate indexes - for table in valid_index_dict.keys(): - if table not in index_dict.keys(): - index_dict[table] = {} - for columns in valid_index_dict[table]: - if len(workload[k].valid_index_list) >= FULL_ARRANGEMENT_THRESHOLD: - break - workload[k].valid_index_list.append(IndexItem.get_index(table, columns)) - if columns in index_dict[table]: - index_dict[table][columns].append(k) - else: - column_sql = {columns: [k]} - index_dict[table].update(column_sql) - # filter redundant indexes for candidate indexes - for table, column_sqls in index_dict.items(): - sorted_column_sqls = sorted(column_sqls.items(), key=lambda item: item[0]) - for i in range(len(sorted_column_sqls) - 1): - if re.match(sorted_column_sqls[i][0], sorted_column_sqls[i+1][0]): - sorted_column_sqls[i+1][1].extend(sorted_column_sqls[i][1]) - else: - print("table: ", table, "columns: ", sorted_column_sqls[i][0]) - candidate_indexes.append(IndexItem.get_index(table, sorted_column_sqls[i][0], - )) - print("table: ", table, "columns: ", sorted_column_sqls[-1][0]) - candidate_indexes.append( - IndexItem.get_index(table, sorted_column_sqls[-1][0])) - for index in candidate_indexes: - index.is_candidate = True - if DRIVER: - db.close_conn() - return candidate_indexes - - -def get_atomic_config_for_query(indexes, config, ind, atomic_configs): - if ind == len(indexes): - table_count = {} - for index in config: - if index.table not in table_count.keys(): - table_count[index.table] = 1 - else: - table_count[index.table] += 1 - if len(table_count) > 2 or table_count[index.table] > 2: - return - atomic_configs.append(config) - - return - - get_atomic_config_for_query(indexes, copy.copy(config), ind + 1, atomic_configs) - config.append(indexes[ind]) - get_atomic_config_for_query(indexes, copy.copy(config), ind + 1, atomic_configs) - - -def is_same_config(config1, config2): - if len(config1) != len(config2): - return False - - for index1 in config1: - is_found = False - for index2 in config2: - if index1.table == index2.table and index1.columns == index2.columns: - is_found = True - if not is_found: - return False - - return True - - -def generate_atomic_config(workload): - atomic_config_total = [] - - for query in workload: - if len(query.valid_index_list) == 0: - continue - - atomic_configs = [] - config = [] - get_atomic_config_for_query(query.valid_index_list, config, 0, atomic_configs) - - is_found = False - for new_config in atomic_configs: - for exist_config in atomic_config_total: - if is_same_config(new_config, exist_config): - is_found = True - break - if not is_found: - atomic_config_total.append(new_config) - is_found = False - - return atomic_config_total - - -# find the subsets of a given config in the atomic configs -def find_subsets_num(config, atomic_config_total): - atomic_subsets_num = [] - is_exist = False - cur_index_atomic_pos = -1 - for i, atomic_config in enumerate(atomic_config_total): - if len(atomic_config) > len(config): - continue - # Record the atomic index position of the newly added index - if len(atomic_config) == 1 and atomic_config[0].table == config[-1].table and \ - atomic_config[0].columns == config[-1].columns: - cur_index_atomic_pos = i - for atomic_index in atomic_config: - is_exist = False - for index in config: - if atomic_index.table == index.table and atomic_index.columns == index.columns: - index.storage = atomic_index.storage - is_exist = True - break - if not is_exist: - break - if is_exist: - atomic_subsets_num.append(i) - if cur_index_atomic_pos == -1: - raise ValueError("No atomic configs found for current config!") - return atomic_subsets_num, cur_index_atomic_pos - - -def get_index_num(index, atomic_config_total): - for i, atomic_config in enumerate(atomic_config_total): - if len(atomic_config) == 1 and atomic_config[0].table == index.table and \ - atomic_config[0].columns == index.columns: - return i - - return -1 - - -# infer the total cost of workload for a config according to the cost of atomic configs -def infer_workload_cost(workload, config, atomic_config_total): - total_cost = 0 - is_computed = False - atomic_subsets_num, cur_index_atomic_pos = find_subsets_num(config, atomic_config_total) - if len(atomic_subsets_num) == 0: - raise ValueError("No atomic configs found for current config!") - if not config[-1].total_sql_num: - is_computed = True - for ind, obj in enumerate(workload): - if max(atomic_subsets_num) >= len(obj.cost_list): - raise ValueError("Wrong atomic config for current query!") - # compute the cost for selection - min_cost = obj.cost_list[0] - for num in atomic_subsets_num: - if num < len(obj.cost_list) and obj.cost_list[num] < min_cost: - min_cost = obj.cost_list[num] - total_cost += min_cost - - # record ineffective sql and negative sql for candidate indexes - if is_computed: - from DAO.execute_factory import ExecuteFactory - ExecuteFactory.record_ineffective_negative_sql(config[-1], obj, ind) - return total_cost, cur_index_atomic_pos - - -def get_whole_index(tables, db, detail_info, history_indexes, history_invalid_index): - if DRIVER: - db.init_conn_handle() - whole_index, redundant_indexes = \ - db.check_useless_index(tables, history_indexes, history_invalid_index) - if DRIVER: - db.close_conn() - print_header_boundary(" Created indexes ") - detail_info['createdIndexes'] = [] - if not whole_index: - print("No created index!") - else: - for index in whole_index: - index_info = {'schemaName': index.schema, 'tbName': index.table, - 'columns': index.columns, 'statement': index.indexdef + ';'} - detail_info['createdIndexes'].append(index_info) - print("%s: %s;" % (index.schema, index.indexdef)) - return whole_index, redundant_indexes - - -def display_last_recommend_result(integrate_indexes, history_invalid_indexes, input_path): - # display historical effective indexes - if integrate_indexes['historyIndexes']: - print_header_boundary(" Historical effective indexes ") - for table_name, index_list in integrate_indexes['historyIndexes'].items(): - for column in index_list: - index_name = 'idx_' + table_name.split('.')[-1] + '_' + '_'.join(column.split(', ')) - statement = 'CREATE INDEX ' + index_name + ' ON ' + table_name + '(' + column + ');' - print(statement) - # display historical invalid indexes - if history_invalid_indexes: - print_header_boundary(" Historical invalid indexes ") - for table_name, index_list in history_invalid_indexes.items(): - for column in index_list: - index_name = 'idx_' + table_name.split('.')[-1] + '_' + '_'.join(column.split(', ')) - statement = 'CREATE INDEX ' + index_name + ' ON ' + table_name + '(' + column + ');' - print(statement) - # save integrate indexes result - integrate_indexes_file = os.path.join(os.path.dirname(input_path), 'index_result.json') - if integrate_indexes.get('currentIndexes'): - for table, indexes in integrate_indexes['currentIndexes'].items(): - integrate_indexes['historyIndexes'][table] = \ - integrate_indexes['historyIndexes'].get(table, list()) - integrate_indexes['historyIndexes'][table].extend(indexes) - integrate_indexes['historyIndexes'][table] = \ - list(set(integrate_indexes['historyIndexes'][table])) - with open(integrate_indexes_file, 'w') as file: - json.dump(integrate_indexes['historyIndexes'], file) - - -def check_unused_index_workload(whole_indexes, redundant_indexes, workload_indexes, detail_info): - indexes_name = set(index.indexname for index in whole_indexes) - unused_index = list(indexes_name.difference(workload_indexes)) - remove_list = [] - print_header_boundary(" Current workload useless indexes ") - detail_info['uselessIndexes'] = [] - # useless index - unused_index_columns = dict() - has_unused_index = False - for cur_index in unused_index: - for index in whole_indexes: - if cur_index == index.indexname: - unused_index_columns[cur_index] = index.columns - if 'UNIQUE INDEX' not in index.indexdef: - has_unused_index = True - statement = "DROP INDEX %s;" % index.indexname - print(statement) - useless_index = {"schemaName": index.schema, "tbName": index.table, "type": 3, - "columns": index.columns, "statement": statement} - detail_info['uselessIndexes'].append(useless_index) - if not has_unused_index: - print("No useless index!") - print_header_boundary(" Redundant indexes ") - # filter redundant index - for pos, index in enumerate(redundant_indexes): - is_redundant = False - for redundant_obj in index.redundant_obj: - # redundant objects are not in the useless index set or - # equal to the column value in the useless index must be redundant index - index_exist = redundant_obj.indexname not in unused_index_columns.keys() or \ - (unused_index_columns.get(redundant_obj.indexname) and - redundant_obj.columns == unused_index_columns[redundant_obj.indexname]) - if index_exist: - is_redundant = True - if not is_redundant: - remove_list.append(pos) - for item in sorted(remove_list, reverse=True): - redundant_indexes.pop(item) - - if not redundant_indexes: - print("No redundant index!") - # redundant index - for index in redundant_indexes: - statement = "DROP INDEX %s.%s;" % \ - (index.schema, index.indexname) - print(statement) - existing_index = [item.indexname + ':' + item.columns for item in index.redundant_obj] - redundant_index = {"schemaName": index.schema, "tbName": index.table, "type": 2, - "columns": index.columns, "statement": statement, - "existingIndex": existing_index} - detail_info['uselessIndexes'].append(redundant_index) - - -def simple_index_advisor(input_path, max_index_num, integrate_indexes, db): - workload, workload_count = workload_compression(input_path) - print_header_boundary(" Generate candidate indexes ") - ori_indexes_name = set() - history_invalid_indexes = {} - workload_table_name = dict() - display_info = {'workloadCount': workload_count, 'recommendIndexes': []} - candidate_indexes = generate_candidate_indexes(workload, workload_table_name, db) - if DRIVER: - db.init_conn_handle() - if len(candidate_indexes) == 0: - print("No candidate indexes generated!") - db.estimate_workload_cost_file(workload, ori_indexes_name=ori_indexes_name) - if DRIVER: - db.close_conn() - return ori_indexes_name, workload_table_name, display_info, history_invalid_indexes - - print_header_boundary(" Determine optimal indexes ") - ori_total_cost = db.estimate_workload_cost_file(workload, ori_indexes_name=ori_indexes_name) - index_cost_total = [ori_total_cost] - for _, obj in enumerate(candidate_indexes): - new_total_cost = db.estimate_workload_cost_file(workload, [obj]) - obj.benefit = ori_total_cost - new_total_cost - if obj.benefit > 0: - index_cost_total.append(new_total_cost) - if DRIVER: - db.close_conn() - if len(index_cost_total) == 1: - print("No optimal indexes generated!") - return ori_indexes_name, workload_table_name, display_info, history_invalid_indexes - global MAX_INDEX_NUM - MAX_INDEX_NUM = max_index_num - # match the last recommendation result - display_recommend_result(workload, candidate_indexes, index_cost_total, False, display_info, - integrate_indexes, history_invalid_indexes) - return ori_indexes_name, workload_table_name, display_info, history_invalid_indexes - - -def greedy_determine_opt_config(workload, atomic_config_total, candidate_indexes, origin_sum_cost): - opt_config = [] - index_num_record = set() - min_cost = origin_sum_cost - for i in range(len(candidate_indexes)): - if i == 1 and min_cost == origin_sum_cost: - break - cur_min_cost = origin_sum_cost - cur_index = None - cur_index_num = -1 - for k, index in enumerate(candidate_indexes): - if k in index_num_record: - continue - cur_config = copy.copy(opt_config) - cur_config.append(index) - cur_estimated_cost, cur_index_atomic_pos = \ - infer_workload_cost(workload, cur_config, atomic_config_total) - if cur_estimated_cost < cur_min_cost: - cur_min_cost = cur_estimated_cost - cur_index = index - cur_index.atomic_pos = cur_index_atomic_pos - cur_index_num = k - if cur_index and cur_min_cost < min_cost: - if MAX_INDEX_STORAGE and sum([obj.storage for obj in opt_config]) + \ - cur_index.storage > MAX_INDEX_STORAGE: - candidate_indexes.remove(cur_index) - continue - if len(opt_config) == MAX_INDEX_NUM: - break - min_cost = cur_min_cost - opt_config.append(cur_index) - index_num_record.add(cur_index_num) - else: - break - - return opt_config - - -def complex_index_advisor(input_path, integrate_indexes, db): - workload, workload_count = workload_compression(input_path) - print_header_boundary(" Generate candidate indexes ") - history_invalid_indexes = {} - ori_indexes_name = set() - workload_table_name = dict() - display_info = {'workloadCount': workload_count, 'recommendIndexes': []} - candidate_indexes = generate_candidate_indexes(workload, workload_table_name, db) - if DRIVER: - db.init_conn_handle() - if len(candidate_indexes) == 0: - print("No candidate indexes generated!") - db.estimate_workload_cost_file(workload, ori_indexes_name=ori_indexes_name) - if DRIVER: - db.close_conn() - return ori_indexes_name, workload_table_name, display_info, history_invalid_indexes - - print_header_boundary(" Determine optimal indexes ") - atomic_config_total = generate_atomic_config(workload) - if atomic_config_total and len(atomic_config_total[0]) != 0: - raise ValueError("The empty atomic config isn't generated!") - index_cost_total = [] - for atomic_config in atomic_config_total: - index_cost_total.append(db.estimate_workload_cost_file(workload, atomic_config, - ori_indexes_name)) - if DRIVER: - db.close_conn() - if MAX_INDEX_STORAGE: - opt_config = MCTS(workload, atomic_config_total, candidate_indexes, - MAX_INDEX_STORAGE, MAX_INDEX_NUM) - else: - opt_config = greedy_determine_opt_config(workload, atomic_config_total, - candidate_indexes, index_cost_total[0]) - if len(opt_config) == 0: - print("No optimal indexes generated!") - return ori_indexes_name, workload_table_name, display_info, history_invalid_indexes - # match the last invalid recommendation result - display_recommend_result(workload, opt_config, index_cost_total, True, display_info, - integrate_indexes, history_invalid_indexes) - return ori_indexes_name, workload_table_name, display_info, history_invalid_indexes - - -def get_last_indexes_result(input_path): - last_indexes_result_file = os.path.join(os.path.dirname(input_path), 'index_result.json') - integrate_indexes = {'historyIndexes': dict()} - if os.path.exists(last_indexes_result_file): - try: - with open(last_indexes_result_file, 'r') as file: - integrate_indexes['historyIndexes'] = json.load(file) - except json.JSONDecodeError: - return integrate_indexes - return integrate_indexes - - -def main(): - arg_parser = argparse.ArgumentParser(description='Generate index set for workload.') - arg_parser.add_argument("p", help="Port of database", type=int) - arg_parser.add_argument("d", help="Name of database", action=CheckValid) - arg_parser.add_argument("--h", help="Host for database", action=CheckValid) - arg_parser.add_argument("-U", help="Username for database log-in", action=CheckValid) - arg_parser.add_argument("-W", help="Password for database user", nargs="?", action=PwdAction) - arg_parser.add_argument("f", help="File containing workload queries (One query per line)") - arg_parser.add_argument("--schema", help="Schema name for the current business data", - required=True, action=CheckValid) - arg_parser.add_argument("--max_index_num", help="Maximum number of suggested indexes", type=int) - arg_parser.add_argument("--max_index_storage", - help="Maximum storage of suggested indexes/MB", type=int) - arg_parser.add_argument("--multi_iter_mode", action='store_true', - help="Whether to use multi-iteration algorithm", default=False) - arg_parser.add_argument("--multi_node", action='store_true', - help="Whether to support distributed scenarios", default=False) - arg_parser.add_argument("--json", action='store_true', - help="Whether the workload file format is json", default=False) - arg_parser.add_argument("--driver", action='store_true', - help="Whether to employ python-driver", default=False) - arg_parser.add_argument("--show_detail", action='store_true', - help="Whether to show detailed sql information", default=False) - args = arg_parser.parse_args() - - global MAX_INDEX_NUM, ENABLE_MULTI_NODE, MAX_INDEX_STORAGE, JSON_TYPE, DRIVER - if args.max_index_num is not None and args.max_index_num <= 0: - raise argparse.ArgumentTypeError("%s is an invalid positive int value" % - args.max_index_num) - if args.max_index_storage is not None and args.max_index_storage <= 0: - raise argparse.ArgumentTypeError("%s is an invalid positive int value" % - args.max_index_storage) - - JSON_TYPE = args.json - MAX_INDEX_NUM = args.max_index_num or 10 - ENABLE_MULTI_NODE = args.multi_node - MAX_INDEX_STORAGE = args.max_index_storage - if args.U and args.U != getpass.getuser() and not args.W: - raise ValueError('Enter the \'-W\' parameter for user ' - + args.U + ' when executing the script.') - # Initialize the connection - if args.driver: - try: - from DAO.driver_execute import DriverExecute - db = DriverExecute(args.d, args.U, args.W, args.h, args.p, args.schema, - args.multi_node, args.max_index_storage) - except ImportError: - logging.warning('Python driver import failed, ' - 'the gsql mode will be selected to connect to the database.') - db = GSqlExecute(args.d, args.U, args.W, args.h, args.p, args.schema, - args.multi_node, args.max_index_storage) - db.init_conn_handle() - args.driver = None - else: - db = GSqlExecute(args.d, args.U, args.W, args.h, args.p, args.schema, - args.multi_node, args.max_index_storage) - db.init_conn_handle() - DRIVER = args.driver - integrate_indexes = get_last_indexes_result(args.f) - if args.multi_iter_mode: - workload_indexes, tables, detail_info, history_invalid_indexes = \ - complex_index_advisor(args.f, integrate_indexes, db) - else: - workload_indexes, tables, detail_info, history_invalid_indexes = \ - simple_index_advisor(args.f, args.max_index_num, integrate_indexes, db) - - whole_indexes, redundant_indexes = \ - get_whole_index(tables, db, detail_info, - integrate_indexes['historyIndexes'], history_invalid_indexes) - # check the unused indexes of the current workload based on the whole index - check_unused_index_workload(whole_indexes, redundant_indexes, workload_indexes, detail_info) - # display the results of the last index recommendation - display_last_recommend_result(integrate_indexes, history_invalid_indexes, args.f) - if args.show_detail: - print_header_boundary(" Display detail information ") - sql_info = json.dumps(detail_info, indent=4, separators=(',', ':')) - print(sql_info) - - -if __name__ == '__main__': - main() - diff --git a/src/gausskernel/dbmind/tools/index_advisor/index_server.py b/src/gausskernel/dbmind/tools/index_advisor/index_server.py deleted file mode 100644 index b8778d6ff..000000000 --- a/src/gausskernel/dbmind/tools/index_advisor/index_server.py +++ /dev/null @@ -1,372 +0,0 @@ -try: - import sys - import os - import argparse - import logging - import time - import signal - import re - import select - import urllib - import json - import datetime - from urllib.request import Request - from subprocess import Popen, PIPE - from threading import Thread, Event, Timer - from configparser import ConfigParser - from logging import handlers -except ImportError as err: - sys.exit("index_server.py: Failed to import module: %s." % str(err)) - - -current_dirname = os.path.dirname(os.path.realpath(__file__)) -__description__ = 'index advise: index server tool.' - - -class RepeatTimer(Thread): - """ - This class inherits from threading.Thread, it is used for periodic execution - function at a specified time interval. - """ - - def __init__(self, interval, function, *args, **kwargs): - Thread.__init__(self) - self._interval = interval - self._function = function - self._args = args - self._kwargs = kwargs - self._finished = Event() - - def run(self): - while not self._finished.is_set(): - # Execute first, wait later. - self._function(*self._args, **self._kwargs) - self._finished.wait(self._interval) - self._finished.set() - - def cancel(self): - self._finished.set() - - -class CreateLogger: - def __init__(self, level, log_name): - self.level = level - self.log_name = log_name - - def create_log(self, log_path): - logger = logging.getLogger(self.log_name) - log_path = os.path.join(os.path.dirname(log_path), 'log') - if os.path.exists(log_path): - if os.path.isfile(log_path): - os.remove(log_path) - os.mkdir(log_path) - else: - os.makedirs(log_path) - agent_handler = handlers.RotatingFileHandler(filename=os.path.join(log_path, self.log_name), - maxBytes=1024 * 1024 * 100, - backupCount=5) - agent_handler.setFormatter(logging.Formatter( - "[%(asctime)s %(levelname)s]-[%(filename)s][%(lineno)d]: %(message)s")) - logger.addHandler(agent_handler) - logger.setLevel(getattr(logging, self.level.upper()) - if hasattr(logging, self.level.upper()) else logging.INFO) - return logger - - -class IndexServer: - def __init__(self, pid_file, logger, password, **kwargs): - self.pid_file = pid_file - self.logger = logger - self.password = password - self._kwargs = kwargs - - def check_proc_exist(self, proc_name): - """ - check proc exist - :param proc_name: proc name - :return: proc pid - """ - check_proc = "ps ux | grep '%s' | grep -v grep | grep -v nohup | awk \'{print $2}\'" % proc_name - _, std = self.execute_cmd(check_proc) - current_pid = str(os.getpid()) - pid_list = [pid for pid in std.split("\n") if pid and pid != current_pid] - if not pid_list: - return "" - return " ".join(pid_list) - - def execute_cmd(self, cmd): - """ - execute cmd - :param cmd: cmd str - :param shell: execute shell mode, True or False - :return: execute result - """ - proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) - std, err_msg = proc.communicate() - if proc.returncode != 0: - self.logger.error("Failed to execute command. Error: %s." % str(err_msg)) - return proc.returncode, std.decode() - - def save_recommendation_infos(self, recommendation_infos): - headers = {'Content-Type': 'application/json'} - data = json.dumps(recommendation_infos, default=lambda o: o.__dict__, sort_keys=True, - indent=4).encode() - request = Request(url=self._kwargs['ai_monitor_url'], headers=headers, - data=data) - - response = None - try: - response = urllib.request.urlopen(request, timeout=600) - result = json.loads(response.read()) - finally: - if response: - response.close() - - return result - - def convert_output_to_recommendation_infos(self, sql_lines): - detail_info_pos = 0 - index_info = sql_lines.splitlines() - for pos, line in enumerate(index_info): - if 'Display detail information' in line: - detail_info_pos = pos + 1 - break - detail_info_json = json.loads('\n'.join(index_info[detail_info_pos:])) - detail_info_json['appName'] = self._kwargs.get('app_name') - detail_info_json['nodeHost'] = self._kwargs.get('host') - detail_info_json['dbName'] = self._kwargs.get('database') - return detail_info_json - - def execute_index_advisor(self): - self.logger.info('Index advisor task starting.') - try: - cmd = 'echo %s | python3 %s/index_advisor_workload.py %s %s %s -U %s -W ' \ - '--schema %s --json --multi_iter_mode --show_detail' % ( - self.password, current_dirname, self._kwargs['port'], self._kwargs['database'], - self._kwargs['output_sql_file'], self._kwargs['user'], self._kwargs['schema']) - if self._kwargs['max_index_storage']: - cmd += ' --max_index_storage %s ' % self._kwargs['max_index_storage'] - if self._kwargs['max_index_num']: - cmd += ' --max_index_num %s ' % self._kwargs['max_index_num'] - if self._kwargs['driver']: - try: - import psycopg2 - cmd += ' --driver' - except ImportError: - self.logger.warning('Driver import failed, use gsql to connect to the database.') - self.logger.info('Index advisor cmd:%s' % cmd.split('|')[-1]) - if os.path.exists(self._kwargs['output_sql_file']): - _, res = self.execute_cmd(cmd) - detail_info_json = self.convert_output_to_recommendation_infos(res) - - self.logger.info('Index advisor result: %s.' % detail_info_json) - result = self.save_recommendation_infos(detail_info_json) - if result['status'] is not True: - self.logger.error('Fail to upload index result, Error: %s' % result['message']) - else: - self.logger.info('Success to upload index result.') - except Exception as e: - self.logger.error(e) - - def extract_log(self, start_time): - extract_log_cmd = 'python3 %s %s %s --start_time "%s" --json ' % \ - (os.path.join(current_dirname, 'extract_log.py'), - self._kwargs['pg_log_path'], - self._kwargs['output_sql_file'], start_time) - if self._kwargs['database']: - extract_log_cmd += ' -d %s ' % self._kwargs['database'] - if self._kwargs['wl_user']: - extract_log_cmd += ' -U %s ' % self._kwargs['wl_user'] - if self._kwargs['sql_amount']: - extract_log_cmd += ' --sql_amount %s ' % self._kwargs['sql_amount'] - if self._kwargs['statement']: - extract_log_cmd += ' --statement ' - self.logger.info('Extracting log cmd: %s' % extract_log_cmd) - self.execute_cmd(extract_log_cmd) - self.logger.info('The current log extraction is complete.') - - def monitor_log_size(self, guc_reset): - self.logger.info('Open GUC params.') - # get original all file size - original_total_size = self.get_directory_size() - self.logger.info('Original total file size: %sM' % (original_total_size / 1024 / 1024)) - deviation_size = 0 - # open guc - guc_reload = 'gs_guc reload -Z datanode -D {datanode} -c "log_min_duration_statement = 0" && ' \ - 'gs_guc reload -Z datanode -D {datanode} -c "log_statement= \'all\'"' \ - .format(datanode=self._kwargs['datanode']) - self.execute_cmd(guc_reload) - start_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(time.time()))) - # caculate log size - count = 0 - while deviation_size < self._kwargs['max_generate_log_size']: - time.sleep(5) - current_size = self.get_directory_size() - deviation_size = (current_size - original_total_size) / 1024 / 1024 - if current_size - original_total_size < 0: - if count >= 60: - break - count += 1 - self.logger.info('Current log size difference: %sM' % deviation_size) - self.logger.info('Start to reset GUC, cmd: %s' % guc_reset) - returncode, res = self.execute_cmd(guc_reset) - if returncode == 0: - self.logger.info('Success to reset GUC setting.') - else: - self.logger.error('Failed to reset GUC params. please check it.') - return start_time - - def get_directory_size(self): - files = os.listdir(self._kwargs['pg_log_path']) - total_size = 0 - for file in files: - total_size += os.path.getsize(os.path.join(self._kwargs['pg_log_path'], file)) - return total_size - - def execute_index_recommendation(self): - self.logger.info('Start checking guc.') - try: - guc_check = 'gs_guc check -Z datanode -D {datanode} -c "log_min_duration_statement" && ' \ - 'gs_guc check -Z datanode -D {datanode} -c "log_statement" '\ - .format(datanode=self._kwargs['datanode']) - returncode, res = self.execute_cmd(guc_check) - origin_min_duration = self._kwargs['log_min_duration_statement'] - origin_log_statement = self._kwargs['log_statement'] - if returncode == 0: - self.logger.info('Original GUC settings is: %s' % res) - match_res = re.findall(r'log_min_duration_statement=(\'?[a-zA-Z0-9]+\'?)', res) - if match_res: - origin_min_duration = match_res[-1] - if 'NULL' in origin_min_duration: - origin_min_duration = '30min' - match_res = re.findall(r'log_statement=(\'?[a-zA-Z]+\'?)', res) - if match_res: - origin_log_statement = match_res[-1] - if 'NULL' in origin_log_statement: - origin_log_statement = 'none' - self.logger.info('Parsed (log_min_duration_statement, log_statement) GUC params are (%s, %s)' % - (origin_min_duration, origin_log_statement)) - self.logger.info('Test reseting GUC command...') - guc_reset = 'gs_guc reload -Z datanode -D %s -c "log_min_duration_statement = %s" && ' \ - 'gs_guc reload -Z datanode -D %s -c "log_statement= %s"' % \ - (self._kwargs['datanode'], origin_min_duration, - self._kwargs['datanode'], origin_log_statement) - returncode, res = self.execute_cmd(guc_reset) - if returncode != 0: - guc_reset = 'gs_guc reload -Z datanode -D %s -c "log_min_duration_statement = %s" && ' \ - 'gs_guc reload -Z datanode -D %s -c "log_statement= %s"' % \ - (self._kwargs['datanode'], self._kwargs['log_min_duration_statement'], - self._kwargs['datanode'], self._kwargs['log_statement']) - ret, res = self.execute_cmd(guc_reset) - if ret != 0: - raise Exception('Cannot reset GUC initial value, please check it.') - self.logger.info('Test successfully') - # open guc and monitor log real-time size - start_time = self.monitor_log_size(guc_reset) - # extract log - self.extract_log(start_time) - # index advise - self.execute_index_advisor() - except Exception as e: - self.logger.error(e) - guc_reset = 'gs_guc reload -Z datanode -D %s -c "log_min_duration_statement = %s" && ' \ - 'gs_guc reload -Z datanode -D %s -c "log_statement= %s"' % \ - (self._kwargs['datanode'], self._kwargs['log_min_duration_statement'], - self._kwargs['datanode'], self._kwargs['log_statement']) - self.execute_cmd(guc_reset) - - def start_service(self): - # check service is running or not. - if os.path.isfile(self.pid_file): - pid = self.check_proc_exist("index_server") - if pid: - raise Exception("Error: Process already running, can't start again.") - else: - os.remove(self.pid_file) - - # get listen host and port - self.logger.info("Start service...") - # write process pid to file - if not os.path.isdir(os.path.dirname(self.pid_file)): - os.makedirs(os.path.dirname(self.pid_file), 0o700) - with open(self.pid_file, mode='w') as f: - f.write(str(os.getpid())) - - self.logger.info("Index advisor execution intervals is: %sh" % - self._kwargs['index_intervals']) - index_recommendation_thread = RepeatTimer(self._kwargs['index_intervals']*60*60, - self.execute_index_recommendation) - self.logger.info("Start timer...") - index_recommendation_thread.start() - - -def read_input_from_pipe(): - """ - Read stdin input if there is "echo 'str1 str2' | python xx.py", - return the input string - """ - input_str = "" - r_handle, _, _ = select.select([sys.stdin], [], [], 0) - if not r_handle: - return "" - - for item in r_handle: - if item == sys.stdin: - input_str = sys.stdin.read().strip() - return input_str - - -def parse_check_conf(config_path): - config = ConfigParser() - config.read(config_path) - config_dict = dict() - config_dict['app_name'] = config.get("server", "app_name") - config_dict['database'] = config.get("server", "database") - config_dict['port'] = config.get("server", "port") - config_dict['host'] = config.get("server", "host") - config_dict['user'] = config.get("server", "user") - config_dict['wl_user'] = config.get("server", "workload_user") - config_dict['schema'] = config.get("server", "schema") - config_dict['max_index_num'] = config.getint("server", "max_index_num") - config_dict['max_index_storage'] = config.get("server", "max_index_storage") - config_dict['driver'] = config.getboolean("server", "driver") - config_dict['index_intervals'] = config.getint("server", "index_intervals") - config_dict['sql_amount'] = config.getint("server", "sql_amount") - config_dict['output_sql_file'] = config.get("server", "output_sql_file") - config_dict['datanode'] = config.get("server", "datanode") - config_dict['pg_log_path'] = config.get("server", "pg_log_path") - config_dict['ai_monitor_url'] = config.get("server", "ai_monitor_url") - config_dict['max_generate_log_size'] = config.getfloat("server", "max_generate_log_size") - config_dict['statement'] = config.getboolean("server", "statement") - config_dict['log_min_duration_statement'] = config.get("server", "log_min_duration_statement") - config_dict['log_statement'] = config.get("server", "log_statement") - if not config_dict['log_min_duration_statement'] or \ - not re.match(r'[a-zA-Z0-9]+', config_dict['log_min_duration_statement']): - raise ValueError("Please enter a legal value of [log_min_duration_statement]") - legal_log_statement = ['none', 'all', 'ddl', 'mod'] - if config_dict['log_statement'] not in legal_log_statement: - raise ValueError("Please enter a legal value of [log_statement]") - return config_dict - - -def manage_service(): - config_path = os.path.join(current_dirname, 'database-info.conf') - config_dict = parse_check_conf(config_path) - LOGGER = CreateLogger("debug", "start_service.log").create_log(config_dict.get('output_sql_file')) - server_pid_file = os.path.join(current_dirname, 'index_server.pid') - password = read_input_from_pipe() - IndexServer(server_pid_file, LOGGER, password, **config_dict).start_service() - - -def main(): - try: - manage_service() - except Exception as err_msg: - print(err_msg) - sys.exit(1) - - -if __name__ == '__main__': - main() - - diff --git a/src/gausskernel/dbmind/tools/index_advisor/mcts.py b/src/gausskernel/dbmind/tools/index_advisor/mcts.py deleted file mode 100644 index f6776c0ba..000000000 --- a/src/gausskernel/dbmind/tools/index_advisor/mcts.py +++ /dev/null @@ -1,396 +0,0 @@ -import sys -import math -import random -import copy - - -MAX_INDEX_NUM = 0 -STORAGE_THRESHOLD = 0 -AVAILABLE_CHOICES = [] -ATOMIC_CHOICES = [] -WORKLOAD_INFO = [] - - -def is_same_index(index, compared_index): - return index.table == compared_index.table and \ - index.columns == compared_index.columns - - -def atomic_config_is_valid(atomic_config, candidate_indexes): - # if candidate indexes contains all atomic index of atomic_config, then record it - for atomic_index in atomic_config: - is_exist = False - for index in candidate_indexes: - if is_same_index(index, atomic_index): - index.storage = atomic_index.storage - is_exist = True - break - if not is_exist: - return False - return True - - -def find_subsets_num(choice): - atomic_subsets_num = [] - for pos, atomic in enumerate(ATOMIC_CHOICES): - if not atomic or len(atomic) > len(choice): - continue - # find valid atomic index - if atomic_config_is_valid(atomic, choice): - atomic_subsets_num.append(pos) - # find the same atomic index as the candidate index - if len(atomic) == 1 and (is_same_index(choice[-1], atomic[0])): - choice[-1].atomic_pos = pos - return atomic_subsets_num - - -def find_best_benefit(choice): - atomic_subsets_num = find_subsets_num(choice) - total_benefit = 0 - for ind, obj in enumerate(WORKLOAD_INFO): - # calculate the optimal benefit for each sql - max_benefit = 0 - for pos in atomic_subsets_num: - if (obj.cost_list[0] - obj.cost_list[pos]) > max_benefit: - max_benefit = obj.cost_list[0] - obj.cost_list[pos] - total_benefit += max_benefit - return total_benefit - - -def get_diff(available_choices, choices): - except_choices = copy.copy(available_choices) - for i in available_choices: - for j in choices: - if is_same_index(i, j): - except_choices.remove(i) - return except_choices - - -class State(object): - """ - The game state of the Monte Carlo tree search, - the state data recorded under a certain Node node, - including the current game score, the current number of game rounds, - and the execution record from the beginning to the current. - - It is necessary to realize whether the current state has reached the end of the game state, - and support the operation of randomly fetching from the Action collection. - """ - - def __init__(self): - self.current_storage = 0.0 - self.current_benefit = 0.0 - # record the sum of choices up to the current state - self.accumulation_choices = [] - # record available choices of current state - self.available_choices = [] - self.displayable_choices = [] - - def get_available_choices(self): - return self.available_choices - - def set_available_choices(self, choices): - self.available_choices = choices - - def get_current_storage(self): - return self.current_storage - - def set_current_storage(self, value): - self.current_storage = value - - def get_current_benefit(self): - return self.current_benefit - - def set_current_benefit(self, value): - self.current_benefit = value - - def get_accumulation_choices(self): - return self.accumulation_choices - - def set_accumulation_choices(self, choices): - self.accumulation_choices = choices - - def is_terminal(self): - # the current node is a leaf node - return len(self.accumulation_choices) == MAX_INDEX_NUM - - def compute_benefit(self): - return self.current_benefit - - def get_next_state_with_random_choice(self): - # ensure that the choices taken are not repeated - if not self.available_choices: - return None - random_choice = random.choice([choice for choice in self.available_choices]) - self.available_choices.remove(random_choice) - choice = copy.copy(self.accumulation_choices) - choice.append(random_choice) - benefit = find_best_benefit(choice) - # if current choice not satisfy restrictions, then continue get next choice - if benefit <= self.current_benefit or \ - self.current_storage + random_choice.storage > STORAGE_THRESHOLD: - return self.get_next_state_with_random_choice() - - next_state = State() - # initialize the properties of the new state - next_state.set_accumulation_choices(choice) - next_state.set_current_benefit(benefit) - next_state.set_current_storage(self.current_storage + random_choice.storage) - next_state.set_available_choices(get_diff(AVAILABLE_CHOICES, choice)) - return next_state - - def __repr__(self): - self.displayable_choices = ['{}: {}'.format(choice.table, choice.columns) - for choice in self.accumulation_choices] - return "reward: {}, storage :{}, choices: {}".format( - self.current_benefit, self.current_storage, self.displayable_choices) - - -class Node(object): - """ - The Node of the Monte Carlo tree search tree contains the parent node and - current point information, - which is used to calculate the traversal times and quality value of the UCB, - and the State of the Node selected by the game. - """ - def __init__(self): - self.visit_number = 0 - self.quality = 0.0 - - self.parent = None - self.children = [] - self.state = None - - def get_parent(self): - return self.parent - - def set_parent(self, parent): - self.parent = parent - - def get_children(self): - return self.children - - def expand_child(self, node): - node.set_parent(self) - self.children.append(node) - - def set_state(self, state): - self.state = state - - def get_state(self): - return self.state - - def get_visit_number(self): - return self.visit_number - - def set_visit_number(self, number): - self.visit_number = number - - def update_visit_number(self): - self.visit_number += 1 - - def get_quality_value(self): - return self.quality - - def set_quality_value(self, value): - self.quality = value - - def update_quality_value(self, reward): - self.quality += reward - - def is_all_expand(self): - return len(self.children) == \ - len(AVAILABLE_CHOICES) - len(self.get_state().get_accumulation_choices()) - - def __repr__(self): - return "Node: {}, Q/N: {}/{}, State: {}".format( - hash(self), self.quality, self.visit_number, self.state) - - -def tree_policy(node): - """ - In the Selection and Expansion stages of Monte Carlo tree search, - the node that needs to be searched (such as the root node) is passed in, - and the best node that needs to be expanded is returned according to the exploration/exploitation algorithm. - Note that if the node is a leaf node, it will be returned directly. - - The basic strategy is to first find the child nodes that have not been selected at present, - and select them randomly if there are more than one. If both are selected, - find the one with the largest UCB value that has weighed exploration/exploitation, - and randomly select if the UCB values are equal. - """ - - # check if the current node is leaf node - while node and not node.get_state().is_terminal(): - - if node.is_all_expand(): - node = best_child(node, True) - else: - # return the new sub node - sub_node = expand(node) - # when there is no node that satisfies the condition in the remaining nodes, - # this state is empty - if sub_node.get_state(): - return sub_node - - # return the leaf node - return node - - -def default_policy(node): - """ - In the Simulation stage of Monte Carlo tree search, input a node that needs to be expanded, - create a new node after random operation, and return the reward of the new node. - Note that the input node should not be a child node, - and there are unexecuted Actions that can be expendable. - - The basic strategy is to choose the Action at random. - """ - - # get the state of the game - current_state = copy.deepcopy(node.get_state()) - - # run until the game over - while not current_state.is_terminal(): - # pick one random action to play and get next state - next_state = current_state.get_next_state_with_random_choice() - if not next_state: - break - current_state = next_state - - final_state_reward = current_state.compute_benefit() - return final_state_reward - - -def expand(node): - """ - Enter a node, expand a new node on the node, use the random method to execute the Action, - and return the new node. Note that it is necessary to ensure that the newly - added nodes are different from other node Action - """ - - new_state = node.get_state().get_next_state_with_random_choice() - sub_node = Node() - sub_node.set_state(new_state) - node.expand_child(sub_node) - - return sub_node - - -def best_child(node, is_exploration): - """ - Using the UCB algorithm, - select the child node with the highest score after weighing the exploration and exploitation. - Note that if it is the prediction stage, - the current Q-value score with the highest score is directly selected. - """ - - best_score = -sys.maxsize - best_sub_node = None - - # travel all sub nodes to find the best one - for sub_node in node.get_children(): - # The children nodes of the node contains the children node whose state is empty, - # this kind of node comes from the node that does not meet the conditions. - if not sub_node.get_state(): - continue - # ignore exploration for inference - if is_exploration: - C = 1 / math.sqrt(2.0) - else: - C = 0.0 - - # UCB = quality / times + C * sqrt(2 * ln(total_times) / times) - left = sub_node.get_quality_value() / sub_node.get_visit_number() - right = 2.0 * math.log(node.get_visit_number()) / sub_node.get_visit_number() - score = left + C * math.sqrt(right) - # get the maximum score, while filtering nodes that do not meet the space constraints and - # nodes that have no revenue - if score > best_score \ - and sub_node.get_state().get_current_storage() <= STORAGE_THRESHOLD \ - and sub_node.get_state().get_current_benefit() > 0: - best_sub_node = sub_node - best_score = score - - return best_sub_node - - -def backpropagate(node, reward): - """ - In the Backpropagation stage of Monte Carlo tree search, - input the node that needs to be expended and the reward of the newly executed Action, - feed it back to the expend node and all upstream nodes, - and update the corresponding data. - """ - - # update util the root node - while node is not None: - # update the visit number - node.update_visit_number() - - # update the quality value - node.update_quality_value(reward) - - # change the node to the parent node - node = node.parent - - -def monte_carlo_tree_search(node): - """ - Implement the Monte Carlo tree search algorithm, pass in a root node, - expand new nodes and update data according to the - tree structure that has been explored before in a limited time, - and then return as long as the child node with the highest exploitation. - - When making predictions, - you only need to select the node with the largest exploitation according to the Q value, - and find the next optimal node. - """ - - computation_budget = len(AVAILABLE_CHOICES) * 3 - - # run as much as possible under the computation budget - for i in range(computation_budget): - # 1. find the best node to expand - expand_node = tree_policy(node) - if not expand_node: - # when it is None, it means that all nodes are added but no nodes meet the space limit - break - # 2. random get next action and get reward - reward = default_policy(expand_node) - - # 3. update all passing nodes with reward - backpropagate(expand_node, reward) - - # get the best next node - best_next_node = best_child(node, False) - - return best_next_node - - -def MCTS(workload_info, atomic_choices, available_choices, storage_threshold, max_index_num): - global ATOMIC_CHOICES, STORAGE_THRESHOLD, WORKLOAD_INFO, AVAILABLE_CHOICES, MAX_INDEX_NUM - WORKLOAD_INFO = workload_info - AVAILABLE_CHOICES = available_choices - ATOMIC_CHOICES = atomic_choices - STORAGE_THRESHOLD = storage_threshold - MAX_INDEX_NUM = max_index_num if max_index_num else len(available_choices) - - # create the initialized state and initialized node - init_state = State() - choices = copy.copy(available_choices) - init_state.set_available_choices(choices) - init_node = Node() - init_node.set_state(init_state) - current_node = init_node - - opt_config = [] - # set the rounds to play - for i in range(len(AVAILABLE_CHOICES)): - if current_node: - current_node = monte_carlo_tree_search(current_node) - if current_node: - opt_config = current_node.state.accumulation_choices - else: - break - return opt_config diff --git a/src/gausskernel/dbmind/tools/metadatabase/__init__.py b/src/gausskernel/dbmind/tools/metadatabase/__init__.py new file mode 100644 index 000000000..c582d38cc --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/__init__.py @@ -0,0 +1,69 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import sqlalchemy +from sqlalchemy.engine import create_engine +from sqlalchemy.exc import ProgrammingError + +from .base import Base, DynamicConfig +from .schema import load_all_schema_models +from ..common.exceptions import SQLExecutionError + + +def create_metadatabase_schema(check_first=True): + from .business_db import update_session_clz_from_configs + from .business_db import session_clz + + update_session_clz_from_configs() + load_all_schema_models() + try: + Base.metadata.create_all( + session_clz.get('engine'), + checkfirst=check_first + ) + except Exception as e: + raise SQLExecutionError(e) + + +def destroy_metadatabase(): + from .business_db import update_session_clz_from_configs + from .business_db import session_clz + + update_session_clz_from_configs() + load_all_schema_models() + try: + Base.metadata.drop_all( + session_clz.get('engine') + ) + except Exception as e: + raise SQLExecutionError(e) + + +def create_dynamic_config_schema(): + from sqlalchemy.orm import sessionmaker + from dbmind.constants import DYNAMIC_CONFIG + from ._utils import create_dsn + from .dao.dynamic_config import table_mapper + + load_all_schema_models() + engine = create_engine(create_dsn('sqlite', DYNAMIC_CONFIG), encoding='utf-8') + DynamicConfig.metadata.create_all(engine) + + # Batch insert default values into config tables. + with sessionmaker(engine, autocommit=True, autoflush=True)() as session: + for table_name, table in table_mapper.items(): + try: + session.bulk_save_objects(table.default_values()) + except sqlalchemy.exc.IntegrityError: + # May be duplicate, ignore it. + pass diff --git a/src/gausskernel/dbmind/tools/metadatabase/_utils.py b/src/gausskernel/dbmind/tools/metadatabase/_utils.py new file mode 100644 index 000000000..bffb894fd --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/_utils.py @@ -0,0 +1,45 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from urllib import parse + +DB_TYPES = {'sqlite', 'opengauss', 'postgresql', 'mysql'} + + +def create_dsn( + db_type, + database, + host=None, + port=None, + username=None, + password=None +): + """Generate a DSN (Data Source Name) according to the user's given parameters. + Meanwhile, DBMind will adapt some interfaces to SQLAlchemy, such as openGauss.""" + if db_type not in DB_TYPES: + raise ValueError("Not supported database type '%s'." % db_type) + if db_type == 'opengauss': + db_type = 'postgresql' + # DBMind has to override the following method. + # Otherwise, SQLAlchemy will raise an exception about unknown version. + from sqlalchemy.dialects.postgresql.base import PGDialect + PGDialect._get_server_version_info = lambda *args: (9, 2) + if db_type == 'sqlite': + dsn = '{}:///{}?check_same_thread=False'.format(db_type, database) + else: + username = parse.quote(username) + password = parse.quote(password) + host = parse.quote(host) + database = parse.quote(database) + dsn = '{}://{}:{}@{}:{}/{}'.\ + format(db_type, username, password, host, port, database) + return dsn diff --git a/src/gausskernel/dbmind/tools/metadatabase/base.py b/src/gausskernel/dbmind/tools/metadatabase/base.py new file mode 100644 index 000000000..992a9f854 --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/base.py @@ -0,0 +1,20 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from sqlalchemy.orm import declarative_base + +# To storage at remote database server, mainly saving large scale data business, such as +# the result of time-series forecasting and slow query analysis. +Base = declarative_base() +# To record dynamic config not likes static text-based file. +# The dynamic config can be modified by user frequently and fresh immediately. +DynamicConfig = declarative_base(name='DynamicConfig') diff --git a/src/gausskernel/dbmind/tools/metadatabase/business_db.py b/src/gausskernel/dbmind/tools/metadatabase/business_db.py new file mode 100644 index 000000000..388078ffe --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/business_db.py @@ -0,0 +1,60 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import contextlib + +from sqlalchemy.engine import create_engine +from sqlalchemy.orm import sessionmaker + +from dbmind import global_vars +from ._utils import create_dsn + +session_clz = dict() + + +def update_session_clz_from_configs(): + db_type = global_vars.configs.get('METADATABASE', 'dbtype') + database = global_vars.configs.get('METADATABASE', 'database') + host = global_vars.configs.get('METADATABASE', 'host') + port = global_vars.configs.get('METADATABASE', 'port') + username = global_vars.configs.get('METADATABASE', 'username') + password = global_vars.configs.get('METADATABASE', 'password') + + dsn = create_dsn(db_type, database, host, port, username, password) + postgres_dsn = create_dsn(db_type, 'postgres', host, port, username, password) + engine = create_engine(dsn, pool_pre_ping=True, encoding='utf-8') + + session_maker = sessionmaker(bind=engine) + session_clz.update( + postgres_dsn=postgres_dsn, + dsn=dsn, + engine=engine, + session_maker=session_maker, + db_type=db_type, + db_name=database + ) + + +@contextlib.contextmanager +def get_session(): + if len(session_clz) == 0: + update_session_clz_from_configs() + + session = session_clz['session_maker']() + try: + yield session + session.commit() + except Exception as exception: + session.rollback() + raise exception + finally: + session.close() diff --git a/src/gausskernel/dbmind/tools/metadatabase/dao/__init__.py b/src/gausskernel/dbmind/tools/metadatabase/dao/__init__.py new file mode 100644 index 000000000..a24a4032b --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/dao/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from . import forecasting_metrics +from . import slow_queries +from . import dynamic_config diff --git a/src/gausskernel/dbmind/tools/metadatabase/dao/_common.py b/src/gausskernel/dbmind/tools/metadatabase/dao/_common.py new file mode 100644 index 000000000..3b4d26bbd --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/dao/_common.py @@ -0,0 +1,25 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from sqlalchemy import text + +from ..business_db import get_session, session_clz + + +def truncate_table(table_name): + with get_session() as session: + if session_clz.get('db_type') == 'sqlite': + sql_prefix = 'DELETE FROM ' + else: + sql_prefix = 'TRUNCATE TABLE ' + session.execute(text(sql_prefix + table_name)) + session.commit() diff --git a/src/gausskernel/dbmind/tools/metadatabase/dao/dynamic_config.py b/src/gausskernel/dbmind/tools/metadatabase/dao/dynamic_config.py new file mode 100644 index 000000000..b2583b732 --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/dao/dynamic_config.py @@ -0,0 +1,80 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""`dynamic_config_set()` and `dynamic_config_get()` startup after get_session(). +And the function get_session() can only load SQLite database in the current working directory. +Hence, if we want to use `dynamic_config_set()` or `dynamic_config_get()`, we should change the +working directory to the confpath (the path of configuration files). + + Examples + ------------ + >>> import os + >>> os.chdir(confpath) + >>> dynamic_config_get('foo', 'bar') + +If you want to add more dynamic configurations, you should follow the underlying list: + +1. Create a python file named config_xxx.py in the module of ```dbmind.metadatabase.schema```; +2. Define an ORM class for your dynamic configurations. You can refer to class ```DynamicConfig```. +3. Link your ORM class here. Add table name into the ```table_mapper```; +4. Then, all main processes are finished. You can call ```dynamic_config_set()``` and ```dynamic_config_get()``` +functions to modify and read them. +""" + +from sqlalchemy import update, insert + +from ..dynamic_config import get_session +from ..schema.config_slow_sql_threshold import SlowSQLThreshold +from ..schema.config_dbmind_system import DBMindConfig +from ..schema.config_iv_table import IV_Table + +table_mapper = { + SlowSQLThreshold.__tablename__: SlowSQLThreshold, + DBMindConfig.__tablename__: DBMindConfig, + IV_Table.__tablename__: IV_Table +} + + +def dynamic_config_set(table_name, name, value): + table = table_mapper.get(table_name) + if table is None: + raise ValueError('Invalid table name.') + + with get_session() as session: + # If the table has the given name, then update its value. + # Otherwise, insert a new row into the table. + if session.query(table).filter(table.name == name).count() > 0: + session.execute( + update(table).where( + table.name == name + ).values( + value=value + ).execution_options( + synchronize_session="fetch" + ) + ) + else: + session.execute( + insert(table).values(name=name, value=value) + ) + + +def dynamic_config_get(table_name, name): + table = table_mapper.get(table_name) + if not table: + raise ValueError('Not found the table %s.' % table_name) + with get_session() as session: + result = tuple(session.query(table).filter(table.name == name)) + if len(result) == 0: + return None + return result[0].value + diff --git a/src/gausskernel/dbmind/tools/metadatabase/dao/forecasting_metrics.py b/src/gausskernel/dbmind/tools/metadatabase/dao/forecasting_metrics.py new file mode 100644 index 000000000..70bcc4ebe --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/dao/forecasting_metrics.py @@ -0,0 +1,110 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from typing import Sequence + +from ._common import truncate_table +from ..business_db import get_session +from ..schema import ForecastingMetrics + + +def delete_forecasting_metrics(metric_name, host_ip, metric_min_time, metric_max_time): + """Delete old forecast metric data.""" + with get_session() as session: + session.query(ForecastingMetrics).filter( + ForecastingMetrics.metric_name == metric_name, + ForecastingMetrics.host_ip == host_ip, + ForecastingMetrics.metric_time >= metric_min_time, + ForecastingMetrics.metric_time <= metric_max_time + ).delete() + + +def truncate_forecasting_metrics(): + truncate_table(ForecastingMetrics.__tablename__) + + +def batch_insert_forecasting_metric(metric_name, host_ip, + metric_value: Sequence, metric_time: Sequence, + metric_type=None, node_id=None): + """Batch insert node metrics into the table.""" + node_metric_lists = [] + for v, t in zip(metric_value, metric_time): + node_metric_lists.append( + ForecastingMetrics( + metric_name=metric_name, + metric_type=metric_type, + host_ip=host_ip, + node_id=node_id, + metric_value=round(v, 2), + metric_time=t + ) + ) + with get_session() as session: + session.bulk_save_objects(node_metric_lists) + + +def delete_timeout_forecasting_metrics(oldest_metric_time): + """To prevent the table from over-expanding.""" + with get_session() as session: + session.query(ForecastingMetrics).filter( + ForecastingMetrics.metric_time < oldest_metric_time + ).delete() + + +def select_forecasting_metric( + metric_name=None, host_ip=None, + min_metric_time=None, max_metric_time=None, + node_id=None +): + with get_session() as session: + result = session.query(ForecastingMetrics) + if metric_name is not None: + result = result.filter( + ForecastingMetrics.metric_name == metric_name + ) + if host_ip is not None: + result = result.filter( + ForecastingMetrics.host_ip == host_ip + ) + if min_metric_time is not None: + result = result.filter( + min_metric_time <= ForecastingMetrics.metric_time + ) + if max_metric_time is not None: + result = result.filter( + ForecastingMetrics.metric_time <= max_metric_time + ) + if node_id is not None: + result = result.filter( + ForecastingMetrics.node_id == node_id + ) + + return result + + +def count_forecasting_metric(metric_name=None, host_ip=None, node_id=None): + with get_session() as session: + result = session.query(ForecastingMetrics) + if metric_name is not None: + result = result.filter( + ForecastingMetrics.metric_name == metric_name + ) + if host_ip is not None: + result = result.filter( + ForecastingMetrics.host_ip == host_ip + ) + if node_id is not None: + result = result.filter( + ForecastingMetrics.node_id == node_id + ) + + return result.count() diff --git a/src/gausskernel/dbmind/tools/metadatabase/dao/slow_queries.py b/src/gausskernel/dbmind/tools/metadatabase/dao/slow_queries.py new file mode 100644 index 000000000..07633e1d1 --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/dao/slow_queries.py @@ -0,0 +1,79 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from ..business_db import get_session +from ..schema import SlowQueries +from ._common import truncate_table + +from sqlalchemy.orm import load_only + + +def insert_slow_query( + schema_name, db_name, query, start_at, duration_time, + template_id=None, hit_rate=None, fetch_rate=None, + cpu_time=None, data_io_time=None, root_cause=None, suggestion=None +): + with get_session() as session: + session.add( + SlowQueries( + schema_name=schema_name, + db_name=db_name, + query=query, + template_id=template_id, + start_at=start_at, + duration_time=duration_time, + hit_rate=hit_rate, + fetch_rate=fetch_rate, + cpu_time=cpu_time, + data_io_time=data_io_time, + root_cause=root_cause, + suggestion=suggestion + ) + ) + + +def select_slow_queries(target_list=(), query=None, start_time=None, end_time=None): + with get_session() as session: + result = session.query(SlowQueries) + if len(target_list) > 0: + result = result.options(load_only(*target_list)) + if query is not None: + result = result.filter( + SlowQueries.query.like(query) + ) + if start_time is not None: + result = result.filter( + SlowQueries.start_at >= start_time + ) + if end_time is not None: + result = result.filter( + SlowQueries.start_at <= end_time + ) + + return result.order_by(SlowQueries.start_at) + + +def count_slow_queries(): + with get_session() as session: + return session.query(SlowQueries.slow_query_id).count() + + +def delete_slow_queries(start_time): + """To prevent the table from over-expanding.""" + with get_session() as session: + session.query(SlowQueries).filter( + SlowQueries.start_at <= start_time + ).delete() + + +def truncate_slow_queries(): + truncate_table(SlowQueries.__tablename__) diff --git a/src/gausskernel/dbmind/tools/metadatabase/dynamic_config.py b/src/gausskernel/dbmind/tools/metadatabase/dynamic_config.py new file mode 100644 index 000000000..a58c29ce4 --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/dynamic_config.py @@ -0,0 +1,46 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import contextlib + +from sqlalchemy.engine import create_engine +from sqlalchemy.orm import sessionmaker + +from dbmind.constants import DYNAMIC_CONFIG +from ._utils import create_dsn + + +_session_maker = None + + +@contextlib.contextmanager +def get_session(): + global _session_maker + + if not _session_maker: + # Notice: Current working directory is the confpath, so we can + # use the relative path directly. + dsn = create_dsn('sqlite', DYNAMIC_CONFIG) + engine = create_engine(dsn) + _session_maker = sessionmaker(engine) + + session = _session_maker() + try: + yield session + session.commit() + except Exception as exception: + session.rollback() + raise exception + finally: + session.close() + + diff --git a/src/gausskernel/dbmind/tools/metadatabase/schema/__init__.py b/src/gausskernel/dbmind/tools/metadatabase/schema/__init__.py new file mode 100644 index 000000000..ddb800d0f --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/schema/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from .forecasting_metrics import ForecastingMetrics +from .slow_queries import SlowQueries + + +def load_all_schema_models(): + """Dummy function: + Loading all the table schema models can be realized by ```import *```. This + function only serves as a self-comment in the form and has no actual action. """ diff --git a/src/gausskernel/dbmind/tools/metadatabase/schema/config_dbmind_system.py b/src/gausskernel/dbmind/tools/metadatabase/schema/config_dbmind_system.py new file mode 100644 index 000000000..02c088295 --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/schema/config_dbmind_system.py @@ -0,0 +1,34 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from sqlalchemy import Column, String + +from .. import DynamicConfig + +_default = { + 'cipher_s1': '', + 'cipher_s2': '' +} + + +class DBMindConfig(DynamicConfig): + __tablename__ = "dbmind_config" + + name = Column(String, primary_key=True) + value = Column(String, nullable=False) + + @staticmethod + def default_values(): + rows = [] + for name, value in _default.items(): + rows.append(DBMindConfig(name=name, value=value)) + return rows diff --git a/src/gausskernel/dbmind/tools/metadatabase/schema/config_iv_table.py b/src/gausskernel/dbmind/tools/metadatabase/schema/config_iv_table.py new file mode 100644 index 000000000..e64459b21 --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/schema/config_iv_table.py @@ -0,0 +1,35 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""This IV table is just for AES encryption.""" +from sqlalchemy import Column, String + +from .. import DynamicConfig + +_default = { + # Nothing before inserting. +} + + +class IV_Table(DynamicConfig): + __tablename__ = "iv_table" + + name = Column(String, primary_key=True) + value = Column(String, nullable=False) + + @staticmethod + def default_values(): + # return empty list rather than NoneType. + # Otherwise, raise TypeError: 'NoneType' object is not iterable. + return [] + + diff --git a/src/gausskernel/dbmind/tools/metadatabase/schema/config_slow_sql_threshold.py b/src/gausskernel/dbmind/tools/metadatabase/schema/config_slow_sql_threshold.py new file mode 100644 index 000000000..8ff20dc53 --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/schema/config_slow_sql_threshold.py @@ -0,0 +1,53 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from sqlalchemy import Column, String, Float + +from .. import DynamicConfig + +_default = { + 'tuple_number_limit': 5000, + 'fetch_tuples_limit': 10000, + 'fetch_rate_limit': 0.3, + 'returned_rows_limit': 1000, + 'returned_rate_limit': 0.3, + 'updated_tuples_limit': 1000, + 'updated_rate_limit': 1000, + 'deleted_tuples_limit': 1000, + 'deleted_rate_limit': 0.3, + 'inserted_tuples_limit': 1000, + 'inserted_rate_limit': 0.3, + 'hit_rate_limit': 0.95, + 'dead_rate_limit': 0.2, + 'index_number_limit': 3, + 'load_average_rate_limit': 0.6, + 'cpu_usage_limit': 0.5, + 'iops_limit': 0.5, + 'ioutils_limit': 0.5, + 'iowait_limit': 0.05, + 'tps_limit': 2000, + 'iocapacity_limit': 50 +} + + +class SlowSQLThreshold(DynamicConfig): + __tablename__ = "slow_sql_threshold" + + name = Column(String, primary_key=True) + value = Column(Float, nullable=False) + + @staticmethod + def default_values(): + rows = [] + for name, value in _default.items(): + rows.append(SlowSQLThreshold(name=name, value=value)) + return rows diff --git a/src/gausskernel/dbmind/tools/metadatabase/schema/forecasting_metrics.py b/src/gausskernel/dbmind/tools/metadatabase/schema/forecasting_metrics.py new file mode 100644 index 000000000..63e615930 --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/schema/forecasting_metrics.py @@ -0,0 +1,29 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from sqlalchemy import Column, String, Integer, BigInteger, CHAR, Index, Numeric + +from .. import Base + + +class ForecastingMetrics(Base): + __tablename__ = "tb_forecasting_metrics" + + rowid = Column(Integer, primary_key=True, autoincrement=True) + metric_name = Column(String(32), nullable=False) + metric_type = Column(String(8)) + host_ip = Column(CHAR(24), nullable=False) + node_id = Column(BigInteger) + metric_value = Column(Numeric(16, 2), nullable=False) + metric_time = Column(BigInteger, nullable=False) + + idx_forecasting_metrics = Index("idx_forecasting_metrics", metric_name, host_ip, metric_time) diff --git a/src/gausskernel/dbmind/tools/metadatabase/schema/slow_queries.py b/src/gausskernel/dbmind/tools/metadatabase/schema/slow_queries.py new file mode 100644 index 000000000..f8af2dca1 --- /dev/null +++ b/src/gausskernel/dbmind/tools/metadatabase/schema/slow_queries.py @@ -0,0 +1,36 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from sqlalchemy import Column, String, Integer, BigInteger, Float, Index + +from .. import Base + + +class SlowQueries(Base): + __tablename__ = "tb_slow_queries" + + slow_query_id = Column(Integer, primary_key=True, autoincrement=True) + schema_name = Column(String(64), nullable=False) + db_name = Column(String(64), nullable=False) + query = Column(String(1024), nullable=False) + template_id = Column(BigInteger) + start_at = Column(BigInteger, nullable=False) + duration_time = Column(Float, nullable=False) + hit_rate = Column(Float) + fetch_rate = Column(Float) + cpu_time = Column(Float) + data_io_time = Column(Float) + root_cause = Column(String(1024)) + suggestion = Column(String(1024)) + + idx_slow_queries = Index("idx_slow_queries", duration_time, start_at) + diff --git a/src/gausskernel/dbmind/tools/misc/dbmind.conf b/src/gausskernel/dbmind/tools/misc/dbmind.conf new file mode 100644 index 000000000..b332bf9cf --- /dev/null +++ b/src/gausskernel/dbmind/tools/misc/dbmind.conf @@ -0,0 +1,32 @@ +[TSDB] +name = prometheus # The type of time-series database. Options: prometheus. +host = # Address of time-series database. +port = # Port to connect to time-series database. + +[METADATABASE] +dbtype = sqlite # Database type. Options: sqlite, opengauss, postgresql. +host = # Address of meta-data database. +port = # Port to connect to meta-data database. +username = # User name to connect to meta-data database. +password = (null) # Password to connect to meta-data database. +database = # Database name to connect to meta-data database. + + +[SELF-MONITORING] +detection_interval = 600 # Unit is second. The interval for performing health examination on the openGauss through monitoring metrics. +last_detection_time = 600 # Unit is second. The time for last detection. +forecasting_future_time = 3600 # Unit is second. How long the KPI in the future for forecasting. Meanwhile, this is the period for the forecast. +# The following golden_kpi of monitoring system is vital. +golden_kpi = os_cpu_usage, os_mem_usage, gaussdb_qps_by_instance # DBMind only measures and detects the golden metrics in the anormaly detection processing. + + +[LOG] +maxbytes = 10485760 # Default is 10Mb. Maximum size of a single log file. If maxbytes is zero, the file grows indefinitely. +backupcount = 1 # Number of backups of log files. +level = INFO # Options: DEBUG, INFO, WARNING, ERROR. + +[COMMENT] +worker = The form of executing compute-intensive tasks. Tasks can be executed locally or distributed to multiple nodes for execution. +tsdb = Configure the data source for time series data, which come from monitoring the openGauss instance. +metadatabase = Configure the database to record meta-data, which the database can store meta-data for the forecasting and diagnosis process. The database should be an openGauss instance. +self-monitoring = Set up parameters for monitoring openGauss instance. diff --git a/src/gausskernel/dbmind/tools/misc/dbmind.conf.header b/src/gausskernel/dbmind/tools/misc/dbmind.conf.header new file mode 100644 index 000000000..09a79d435 --- /dev/null +++ b/src/gausskernel/dbmind/tools/misc/dbmind.conf.header @@ -0,0 +1,22 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +# Notice: +# 1. (null) explicitly represents empty or null. Meanwhile blank represents undefined. +# 2. DBMind encrypts password parameters. Hence, there is no plain-text password after initialization. +# 3. Users can only configure the plain-text password in this file before initializing +# (that is, using the --initialize option), +# and then if users want to modify the password-related information, +# users need to use the 'set' sub-command to achieve. +# 4. If users use relative path in this file, the current working directory is the directory where this file is located. + diff --git a/src/gausskernel/dbmind/tools/misc/filter_label.conf b/src/gausskernel/dbmind/tools/misc/filter_label.conf new file mode 100644 index 000000000..938e45ec1 --- /dev/null +++ b/src/gausskernel/dbmind/tools/misc/filter_label.conf @@ -0,0 +1,26 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +# For time-series database, there may be a value with multiple fields (or tags, labels), +# many of sequence values are not what we want to collect, thus we need to filter by +# these fields, by setting some conditions. In this configuration file, users can +# define the fields (or conditions) for filtering sequence values​by themselves. + +# For example, +# 1) we want to filter sequence values with the label 'app_name' from Prometheus, configure this: +# app_name = +# 2) we want to filter sequence values from Prometheus that have the label 'db_name' and +# the label value is 'monitor_db' and 'system_db', we can configure it as: +# db_name = monitor_db +# db_name = system_db + diff --git a/src/gausskernel/dbmind/tools/misc/metric_map.conf b/src/gausskernel/dbmind/tools/misc/metric_map.conf new file mode 100644 index 000000000..1080fc74c --- /dev/null +++ b/src/gausskernel/dbmind/tools/misc/metric_map.conf @@ -0,0 +1,19 @@ +# The name of different metric collectors may be different, +# so we converte the different metric name here. + +# Format: +# metric name in the DBMind = metric name in the collector (e.g., Prometheus-exporter, Agent) +# +os_disk_io_cnt = os_disk_io_cnt +os_disk_io_load = os_disk_io_load +os_disk_read_bytes = os_disk_read_bytes +os_disk_read_time = os_disk_read_time +os_disk_write_bytes = os_disk_write_bytes +os_disk_write_time = os_disk_write_time +os_cpu_usage = os_cpu_usage +os_system_cpu_usage = os_system_cpu_usage +os_user_cpu_usage = os_user_cpu_usage +opengauss_blocks_read_time = opengauss_blocks_read_time +opengauss_blocks_write_time = opengauss_blocks_write_time +opengauss_sql_cpu_time_rate = opengauss_sql_cpu_time_rate +disk_usage = disk_usage diff --git a/src/gausskernel/dbmind/tools/misc/metric_value_range.conf b/src/gausskernel/dbmind/tools/misc/metric_value_range.conf new file mode 100644 index 000000000..37c6b328f --- /dev/null +++ b/src/gausskernel/dbmind/tools/misc/metric_value_range.conf @@ -0,0 +1,35 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +#This configuration file specifies the upper and lower values ​​of the metric values, +#the numeric type supports float and integer and infinity + +os_cpu_iowait = 0, 1 +os_cpu_usage = 0, 1 +os_mem_usage = 0, 1 +io_read_total = 0, inf +io_write_total = 0, inf +io_read_bytes = 0, inf +io_write_bytes = 0, inf +io_queue_number = 0, inf +io_read_delay_time = 0, inf +io_write_delay_time = 0, inf +os_disk_iops = 0, inf +os_disk_ioutils = 0, 1 +os_disk_iocapacity = 0, 1 +os_disk_usage = 0, 1 +os_cpu_processor_number = 0, inf +gaussdb_qps_by_instance = 0, inf +gaussdb_invalid_logins_rate = 0, 1 + + diff --git a/src/gausskernel/dbmind/tools/predictor/install/requirements-gpu.txt b/src/gausskernel/dbmind/tools/predictor/install/requirements-gpu.txt deleted file mode 100644 index 4cba5fa3c..000000000 --- a/src/gausskernel/dbmind/tools/predictor/install/requirements-gpu.txt +++ /dev/null @@ -1,7 +0,0 @@ -configparser==3.8.1 -Flask==0.12.2 -Keras==2.2.4 -numpy==1.16.4 -pandas==0.25.1 -scikit-learn==0.19.1 -tensorflow-gpu==1.14.0 diff --git a/src/gausskernel/dbmind/tools/predictor/install/requirements.txt b/src/gausskernel/dbmind/tools/predictor/install/requirements.txt deleted file mode 100644 index 9ba1c08c9..000000000 --- a/src/gausskernel/dbmind/tools/predictor/install/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -configparser==3.8.1 -Flask==0.12.2 -Keras==2.2.4 -numpy==1.16.4 -pandas==0.25.1 -scikit-learn==0.19.1 -tensorflow==1.14.0 diff --git a/src/gausskernel/dbmind/tools/requirements-aarch64.txt b/src/gausskernel/dbmind/tools/requirements-aarch64.txt new file mode 100644 index 000000000..caea1e5db --- /dev/null +++ b/src/gausskernel/dbmind/tools/requirements-aarch64.txt @@ -0,0 +1,20 @@ +## For DBMind-core ## +cryptography==2.5 # for paramiko on openEuler +paramiko==2.7.2 +numpy==1.16.5 # for openEuler aarch64 +scipy==1.6.0 +scikit-learn +requests +sqlparse +fastapi +uvicorn +sqlalchemy +psycopg2-binary +pycryptodome +## For components ## +## Prometheus Exporter +pyyaml +prometheus-client +## X-Tuner ## +bayesian-optimization +ptable diff --git a/src/gausskernel/dbmind/tools/requirements-optional.txt b/src/gausskernel/dbmind/tools/requirements-optional.txt new file mode 100644 index 000000000..0e3532de2 --- /dev/null +++ b/src/gausskernel/dbmind/tools/requirements-optional.txt @@ -0,0 +1,3 @@ +tensorflow >= 2.1.0 +keras-rl2~=1.0.4 +gensim==3.8.3 diff --git a/src/gausskernel/dbmind/tools/requirements-x86.txt b/src/gausskernel/dbmind/tools/requirements-x86.txt new file mode 100644 index 000000000..64b206871 --- /dev/null +++ b/src/gausskernel/dbmind/tools/requirements-x86.txt @@ -0,0 +1,18 @@ +## For DBMind-core ## +paramiko==2.7.2 +numpy +scikit-learn +requests +sqlparse +fastapi +uvicorn +sqlalchemy +psycopg2-binary +pycryptodome +## For components ## +## Prometheus Exporter +pyyaml +prometheus-client +## X-Tuner ## +bayesian-optimization +ptable diff --git a/src/gausskernel/dbmind/tools/ai_server/__init__.py b/src/gausskernel/dbmind/tools/service/__init__.py similarity index 100% rename from src/gausskernel/dbmind/tools/ai_server/__init__.py rename to src/gausskernel/dbmind/tools/service/__init__.py diff --git a/src/gausskernel/dbmind/tools/service/dai.py b/src/gausskernel/dbmind/tools/service/dai.py new file mode 100644 index 000000000..48a3617cf --- /dev/null +++ b/src/gausskernel/dbmind/tools/service/dai.py @@ -0,0 +1,208 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""Data Access Interface (DAI): + + - Wrap all data fetching operations from different sources; + - The module is the main entry for all data; + - The data obtained from here ensures that the format is clean and uniform; + - The data has been preprocessed here. +""" +import logging +from abc import abstractmethod +from datetime import timedelta, datetime +from typing import List + +from dbmind import global_vars +from dbmind.common.tsdb import TsdbClientFactory +from dbmind.common.types import Sequence +from dbmind.common.types.misc import SlowQuery +from dbmind.metadatabase import dao + +# Notice: 'DISTINGUISHING_INSTANCE_LABEL' is a magic string, i.e., our own name. +# Thus, not all collection agents (such as Prometheus's openGauss-exporter) +# distinguish different instance addresses through this one. +# Actually, this is a risky action for us, currently. +DISTINGUISHING_INSTANCE_LABEL = 'from_instance' + + +class _AbstractLazyFetcher: + def __init__(self): + # The default filter should contain some labels (or tags) + # from user's config. Otherwise, there will be lots of data stream + # fetching from the remote time series database, whereas, we don't need them. + self.labels = dict.copy(global_vars.must_filter_labels or {}) + self.rv = None + + def filter(self, **kwargs): + self.labels.update(kwargs) + return self + + def from_server(self, host): + self.labels[DISTINGUISHING_INSTANCE_LABEL] = host + return self + + def fetchall(self): + self.rv = self._real_fetching_action() + return self.rv + + def fetchone(self): + self.rv = self.rv or self._real_fetching_action() + # If iterator has un-popped elements then return it, + # otherwise return empty of the sequence. + try: + return self.rv.pop(0) + except IndexError: + return Sequence() + + @abstractmethod + def _real_fetching_action(self) -> List[Sequence]: + """Abstract interface which the sub-class only need + implement it. + """ + + +class SequenceUtils: + @staticmethod + def from_server(s: Sequence): + return s.labels.get(DISTINGUISHING_INSTANCE_LABEL) + + +# TODO: add reverse mapper. +def _map_metric(metric_name, to_internal_name=True): + """Use metric_map.conf to map given metric_name + so as to adapt to the different metric names from different collectors. + """ + if global_vars.metric_map is None: + logging.warning( + 'Cannot map the given metric since global_vars.metric_map is NoneType.' + ) + return metric_name + return global_vars.metric_map.get(metric_name, metric_name).strip() + + +def get_metric_type(metric_name): + """Dummy""" + return None + + +def get_metric_sequence(metric_name, start_time, end_time): + """Get monitoring sequence from time-series database between + start_time and end_time""" + + # TODO: step + # step = auto_estimate_step(...) + class _Abstract_LazyFetcherImpl(_AbstractLazyFetcher): + def _real_fetching_action(self) -> List[Sequence]: + return TsdbClientFactory.get_tsdb_client() \ + .get_metric_range_data(metric_name=_map_metric(metric_name), + label_config=self.labels, + start_time=start_time, + end_time=end_time) + + return _Abstract_LazyFetcherImpl() + + +def get_latest_metric_sequence(metric_name, minutes): + """Get the monitoring sequence from time-series database in + the last #2 minutes.""" + # TODO: the datetime is not always the same as server's, so we + # should impl a sync mechanism. + start_time = datetime.now() - timedelta(minutes=minutes) + end_time = datetime.now() + return get_metric_sequence(metric_name, start_time, end_time) + + +def get_latest_metric_value(metric_name): + class _Abstract_LazyFetcherImpl(_AbstractLazyFetcher): + def _real_fetching_action(self) -> List[Sequence]: + return TsdbClientFactory.get_tsdb_client() \ + .get_current_metric_value( + metric_name=metric_name, + label_config=self.labels + ) + + return _Abstract_LazyFetcherImpl() + + +def save_forecast_sequence(metric_name, host, sequence): + dao.forecasting_metrics.batch_insert_forecasting_metric( + metric_name, host, sequence.values, sequence.timestamps, + metric_type=get_metric_type(metric_name), + node_id=None + ) + + +def save_slow_queries(slow_queries): + for slow_query in slow_queries: + dao.slow_queries.insert_slow_query( + schema_name=slow_query.schema_name, + db_name=slow_query.db_name, + query=slow_query.query, + start_at=slow_query.start_at, + duration_time=slow_query.duration_time, + hit_rate=slow_query.hit_rate, fetch_rate=slow_query.fetch_rate, + cpu_time=slow_query.cpu_time, data_io_time=slow_query.data_io_time, + root_cause=slow_query.root_causes, suggestion=slow_query.suggestions, + template_id=slow_query.template_id + ) + + +def get_all_slow_queries(minutes): + slow_queries = [] + sequences = get_latest_metric_sequence('pg_sql_statement_history_exc_time', minutes).fetchall() + for sequence in sequences: + from_instance = SequenceUtils.from_server(sequence) + db_host, db_port = from_instance.split(':') + db_name = sequence.labels['datname'] + schema_name = sequence.labels['schema'].split(',')[-1] \ + if ',' in sequence.labels['schema'] else sequence.labels['schema'] + query = sequence.labels['query'] + start_timestamp = int(sequence.labels['start_time']) + duration_time = int(sequence.labels['finish_time']) - int(sequence.labels['start_time']) + hit_rate = round(float(sequence.labels['hit_rate']), 4) + fetch_rate = round(float(sequence.labels['fetch_rate']), 4) + cpu_time = round(float(sequence.labels['cpu_time']), 4) + data_io_time = round(float(sequence.labels['data_io_time']), 4) + sort_count = round(float(sequence.labels['sort_count']), 4) + sort_spill_count = round(float(sequence.labels['sort_spill_count']), 4) + sort_mem_used = round(float((sequence.labels['sort_mem_used'])), 4) + hash_count = round(float(sequence.labels['hash_count']), 4) + hash_spill_count = round(float((sequence.labels['hash_spill_count'])), 4) + hash_mem_used = round(float(sequence.labels['hash_mem_used']), 4) + template_id = sequence.labels['unique_query_id'] + lock_wait_count = int(sequence.labels['lock_wait_count']) + lwlock_wait_count = int(sequence.labels['lwlock_wait_count']) + n_returned_rows = int(sequence.labels['n_returned_rows']) + n_tuples_returned = int(sequence.labels['n_tuples_returned']) + n_tuples_fetched = int(sequence.labels['n_tuples_fetched']) + n_tuples_inserted = int(sequence.labels['n_tuples_inserted']) + n_tuples_updated = int(sequence.labels['n_tuples_updated']) + n_tuples_deleted = int(sequence.labels['n_tuples_deleted']) + slow_sql_info = SlowQuery( + db_host=db_host, db_port=db_port, + schema_name=schema_name, db_name=db_name, query=query, + start_timestamp=start_timestamp, duration_time=duration_time, + hit_rate=hit_rate, fetch_rate=fetch_rate, + cpu_time=cpu_time, data_io_time=data_io_time, + sort_count=sort_count, sort_spill_count=sort_spill_count, + sort_mem_used=sort_mem_used, hash_count=hash_count, + hash_spill_count=hash_spill_count, hash_mem_used=hash_mem_used, + template_id=template_id, lock_wait_count=lock_wait_count, + lwlock_wait_count=lwlock_wait_count, n_returned_rows=n_returned_rows, + n_tuples_returned=n_tuples_returned, n_tuples_fetched=n_tuples_fetched, + n_tuples_inserted=n_tuples_inserted, n_tuples_updated=n_tuples_updated, + n_tuples_deleted=n_tuples_deleted + ) + + slow_queries.append(slow_sql_info) + return slow_queries diff --git a/src/gausskernel/dbmind/tools/tests/__init__.py b/src/gausskernel/dbmind/tools/tests/__init__.py new file mode 100644 index 000000000..f9e2f2eac --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + + +def auto_test_all(): + pass diff --git a/src/gausskernel/dbmind/tools/tests/features b/src/gausskernel/dbmind/tools/tests/features new file mode 100644 index 000000000..b824f00b6 --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/features @@ -0,0 +1,3 @@ +1,0,1,0,0,1 +0,1,1,0,0,2 +0,0,1,0,1,3 \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/tests/test_arima.py b/src/gausskernel/dbmind/tools/tests/test_arima.py new file mode 100644 index 000000000..b4453185f --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_arima.py @@ -0,0 +1,121 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +DATA = [13313.158424272488, 13325.379505621688, 13334.55192625661, 13340.650475363756, 13343.772205687826, 13344.39494047619, 13344.166964285712, 13344.142559523809, 13343.943303571428, 13343.560714285712, 13343.174553571429, 13342.948363095235, 13342.965029761901, 13343.095238095237, 13343.270982142858, 13343.376488095237, 13343.037648809524, 13342.243898809522, 13341.172321428574, 13339.748809523811, 13338.070238095237, 13336.10386904762, 13333.81964285714, 13331.286607142858, 13328.519345238095, 13325.544791666664, 13322.711607142852, 13320.207142857138, 13317.903720238095, 13315.871726190475, 13314.125744047617, 13312.851636904761, 13312.084226190475, 13311.308035714283, 13310.231547619045, 13309.215922619045, 13308.33898809524, 13307.434523809521, 13306.375744047617, 13304.958035714286, 13303.59315476191, 13302.31770833333, 13300.47857142857, 13298.399107142857, 13296.480059523805, 13294.53616071428, 13292.734375, 13291.083482142858, 13289.590476190471, 13288.41071428571, 13287.041964285718, 13285.283928571425, 13283.716220238091, 13282.55595238095, 13281.691666666664, 13280.99270833333, 13280.24151785714, 13279.468898809526, 13278.72395833333, 13278.082291666666, 13277.523214285713, 13277.015476190478, 13276.663392857141, 13276.383482142857, 13276.166666666666, 13275.988541666664, 13275.77008928571, 13275.407291666668, 13274.845982142855, 13273.955357142855, 13272.837648809527, 13271.57113095238, 13270.091071428571, 13268.563541666666, 13267.088988095235, 13265.841220238095, 13264.935565476188, 13264.527976190477, 13264.542261904764, 13264.464136904762, 13264.080803571424, 13263.694047619052, 13263.600446428567, 13263.605803571425, 13263.940178571425, 13264.902380952382, 13266.397172619047, 13268.294642857145, 13270.566220238092, 13274.152976190475, 13279.630505952378, 13286.531696428574, 13294.213988095236, 13302.325000000003, 13310.271577380947, 13317.422321428568, 13323.682291666666, 13329.11577380952, 13333.991964285715, 13337.979166666666, 13340.577232142854, 13341.788244047619, 13341.89851190476, 13341.434226190471, 13340.768154761907, 13340.294196428571, 13340.397619047617, 13341.264583333339, 13342.870684523808, 13345.116964285713, 13347.972172619046, 13351.45982142857, 13355.472023809527, 13359.7650297619, 13364.219642857139, 13368.722023809523, 13373.027827380947, 13376.583482142854, 13378.945535714287, 13380.194791666663, 13380.546279761906, 13380.124999999996, 13379.115773809524, 13377.801041666666, 13376.358482142858, 13374.684672619049, 13372.664285714287, 13370.58199404762, 13368.98095238095, 13368.156696428568, 13367.36011904762, 13366.921726190478, 13367.512351190475, 13368.308035714286, 13369.347916666664, 13371.615624999999, 13376.15, 13383.320684523813, 13392.714880952379, 13403.687648809524, 13416.508035714283, 13430.16696428571, 13443.325446428567, 13456.327678571426, 13468.703273809524, 13479.399404761902, 13487.458928571426, 13492.454166666666, 13494.663392857145, 13494.561607142854, 13492.695982142857, 13490.061309523808, 13487.750297619046, 13486.898214285715, 13488.561904761906, 13493.509821428568, 13502.421279761904, 13515.86324404762, 13533.77767857143, 13555.543898809525, 13580.324999999997, 13607.150148809518, 13635.018005952385, 13662.777827380956, 13689.603571428568, 13714.708482142854, 13737.083482142854, 13756.244940476188, 13771.951785714284, 13784.20952380952, 13793.897172619047, 13801.576041666669, 13807.534226190475, 13812.291220238096, 13816.649255952381, 13821.848511904758, 13828.61279761905, 13836.977232142857, 13847.625297619046, 13861.190327380953, 13877.038690476193, 13894.795684523808, 13914.85505952381, 13937.397172619048, 13961.658779761903, 13986.452232142858, 14011.390178571428, 14036.525595238092, 14061.457589285716, 14085.998363095237, 14110.643898809523, 14135.863690476193, 14161.37574404762, 14186.556994047614, 14211.026636904762, 14234.549404761901, 14256.943303571425, 14277.708035714286, 14296.433928571425, 14313.012351190471, 14327.29211309524, 14339.24107142857, 14349.068154761904, 14357.257291666667, 14364.342708333332, 14370.73377976191, 14376.81889880952, 14383.2025297619, 14390.216815476191, 14397.930803571426, 14406.515922619046, 14415.952083333339, 14426.223363095238, 14437.362202380953, 14449.35952380952, 14462.113988095238, 14475.29761904762, 14488.65848214286, 14502.298958333335, 14516.313392857139, 14530.65074404762, 14545.09389880952, 14559.555357142857, 14574.23526785714, 14589.45357142857, 14605.211904761907, 14621.178720238095, 14637.254613095234, 14653.54345238095, 14669.977678571426, 14686.451636904765, 14703.189880952381, 14720.225446428572, 14737.39925595238, 14754.760416666666, 14772.59806547619, 14791.632589285713, 14812.290327380953, 14834.473214285712, 14858.038244047617, 14882.67782738095, 14908.110119047617, 14933.915922619042, 14959.50238095238, 14984.126636904759, 15007.050148809523, 15027.637648809517, 15045.44538690476, 15060.349255952378, 15072.510863095236, 15082.21949404762, 15089.699702380953, 15095.428869047619, 15099.834374999997, 15103.268154761907, 15106.38288690476, 15109.955357142855, 15114.593452380956, 15120.229613095235, 15126.716369047617, 15134.14151785714, 15142.681101190468, 15152.415029761902, 15163.333630952377, 15175.671875, 15189.438095238093, 15204.106249999997, 15218.789732142857, 15233.133630952381, 15246.67693452381, 15258.872767857138, 15270.139880952382, 15281.143303571429, 15291.784672619044, 15301.26651785714, 15309.190625, 15315.767113095235, 15321.41770833333, 15326.505803571425, 15331.566964285716, 15337.183184523808, 15342.700148809521, 15347.02648809524, 15350.548065476189, 15354.367261904761, 15358.566220238095, 15362.85119047619, 15367.223660714286, 15371.515178571432, 15375.642261904759, 15379.529315476186, 15383.201785714286, 15386.859672619046, 15390.209821428572, 15392.69985119048, 15394.486458333333, 15395.803720238093, 15396.698363095236, 15397.530952380952, 15398.492857142857, 15399.67083333333, 15400.994047619042, 15402.438839285714, 15404.061011904761, 15405.863690476188, 15407.977529761903, 15410.327678571428, 15412.723511904758, 15415.191517857138, 15417.73363095238, 15420.125744047617, 15422.445982142857, 15424.745684523808, 15426.866964285713, 15428.677976190475, 15430.05431547619, 15431.10803571428, 15432.006547619048, 15432.667261904762, 15433.298511904759, 15434.141964285709, 15435.679613095237, 15438.15535714286, 15441.207291666666, 15445.281845238093, 15450.577380952385, 15456.86369047619, 15464.26979166667, 15472.922172619048, 15482.332440476188, 15492.032589285714, 15501.342261904756, 15509.888541666665, 15518.028273809521, 15525.119791666666, 15530.78199404762, 15535.422023809524, 15539.04538690476, 15541.429315476193, 15542.781994047615, 15543.502380952381, 15543.840029761906, 15543.946428571422, 15543.887797619049, 15543.985267857144, 15544.408482142855, 15544.99449404762, 15545.732589285715, 15546.693749999999, 15547.888095238095, 15549.268452380948, 15550.61994047619, 15552.013095238095, 15553.640327380946, 15555.312053571422, 15556.983184523813, 15558.624553571426, 15560.060565476188, 15561.209672619048, 15562.166071428568, 15563.494494047614, 15565.71532738095, 15568.062797619052, 15569.552380952377, 15570.104464285712, 15569.715624999999, 15568.467410714284, 15566.522321428572, 15564.205059523809, 15561.93720238095, 15559.513392857141, 15556.741517857141, 15554.275892857138, 15552.850297619048, 15552.659970238095, 15553.411309523814, 15554.22738095238, 15554.74107142857, 15555.260714285712, 15555.676190476193, 15555.659672619044, 15555.044345238097, 15553.837351190474, 15552.180803571428, 15550.125446428567, 15547.984375, 15546.572767857142, 15546.110416666663, 15546.041369047616, 15546.023809523806, 15546.057291666662, 15546.232440476188, 15546.613541666666, 15547.264732142854, 15548.308333333332, 15549.647916666667, 15551.319791666667, 15553.291666666662, 15555.345535714287, 15557.464732142857, 15559.720238095237, 15562.070833333333, 15564.358184523811, 15566.432440476188, 15568.188988095235, 15569.5837797619, 15570.365476190478, 15570.561011904756, 15570.442708333332, 15570.05044642857, 15569.28705357143, 15568.13288690476, 15566.762053571423, 15565.006250000002, 15562.756994047617, 15560.377976190475, 15558.168154761901, 15556.186607142854, 15554.411309523808, 15552.710863095237, 15550.91383928571, 15549.409672619046, 15548.438988095235, 15547.956845238097, 15547.936904761906, 15548.091964285712, 15548.263392857145, 15548.599107142853, 15549.326041666669, 15550.604017857138, 15552.568601190475, 15554.865178571428, 15557.122172619045, 15559.38333333333, 15561.647916666667, 15563.95833333333, 15566.378869047616, 15568.780357142854, 15570.87589285714, 15572.632440476193, 15574.178720238095, 15575.558779761906, 15576.858035714286, 15578.117410714283, 15579.331845238092, 15580.37797619047, 15581.14270833333, 15581.698660714286, 15582.25907738095, 15582.876339285713, 15583.425892857143, 15583.871130952379, 15584.277529761903, 15584.824851190475, 15585.566517857138, 15586.583482142858, 15588.034672619044, 15589.894047619046, 15591.79508928571, 15593.36949404762, 15594.59226190476, 15595.331845238092, 15595.348511904758, 15594.645833333332, 15593.445684523807, 15591.843154761906, 15589.646874999999, 15586.806547619051, 15583.589434523805, 15580.312053571426, 15577.285863095238, 15574.834970238095, 15573.048363095242, 15571.759374999996, 15570.880952380952, 15570.487202380948, 15570.707886904758, 15571.472619047614, 15572.735416666663, 15574.524851190477, 15576.551934523808, 15578.50282738095, 15580.474107142858, 15582.564434523809, 15584.61607142857, 15586.533928571425, 15588.324553571425, 15590.016815476185, 15591.560416666665, 15592.932142857142, 15594.273511904756, 15595.61994047619, 15596.79613095238, 15597.783928571429, 15598.617559523811, 15599.119791666666, 15599.328273809524, 15599.391964285707, 15599.390178571428, 15599.296875, 15598.951041666665, 15598.441220238092, 15597.880208333336, 15597.231994047619, 15596.49925595238, 15595.826041666665, 15595.117261904761, 15594.146577380956, 15592.805803571426, 15591.021130952382, 15588.772916666667, 15586.101934523813, 15583.168005952379, 15580.25431547619, 15577.53913690476, 15575.084523809519, 15573.08482142857, 15571.78125, 15571.26711309524, 15571.491369047619, 15572.359226190478, 15573.840624999999, 15575.79241071428, 15577.672023809526, 15579.37693452381, 15580.961309523811, 15582.147916666667, 15582.640178571426, 15582.377678571429, 15581.64151785714, 15580.71398809524, 15579.659375, 15578.553273809524, 15577.664285714283, 15576.913839285715, 15576.14925595238, 15575.511309523808, 15575.227976190477, 15575.299553571429, 15575.488095238092, 15575.700744047619, 15575.863839285716, 15575.906845238094, 15575.868452380955, 15575.729017857142, 15575.503125, 15575.306398809524, 15575.28586309524, 15575.455208333333, 15575.602678571422, 15575.670238095237, 15575.700446428571, 15575.631994047617, 15575.362648809521, 15574.861607142857, 15574.125, 15572.990773809519, 15571.385416666668, 15569.36443452381, 15567.177976190475, 15564.77306547619, 15562.033928571429, 15559.391964285714, 15557.161607142858, 15555.356101190477, 15554.078125, 15553.448214285712, 15553.45773809524, 15554.032589285713, 15554.993898809522, 15556.383184523807, 15558.212946428574, 15559.99851190476, 15561.379464285712, 15562.378273809527, 15562.960863095233, 15563.126785714281, 15562.805952380948, 15562.028273809523, 15561.07157738095, 15560.047916666663, 15559.063244047617, 15558.320982142857, 15557.910267857143, 15557.792113095236, 15557.933184523808, 15558.294494047619, 15558.890178571426, 15559.713095238092, 15560.66845238095, 15561.740029761906, 15562.817857142858, 15563.510267857142, 15563.517261904764, 15563.156994047617, 15562.679910714283, 15561.994791666662, 15560.961011904761, 15559.401785714286, 15557.14806547619, 15553.876041666663, 15549.265773809524, 15543.75744047619, 15538.190922619051, 15532.786607142853, 15527.955803571425, 15524.149404761902, 15520.876190476185, 15517.493452380953, 15514.248809523811, 15511.874851190474, 15510.901339285716, 15511.18318452381, 15512.319940476187, 15513.841220238091, 15515.016071428574, 15515.572321428574, 15516.115773809524, 15517.452976190478, 15519.103720238092, 15520.038690476187, 15520.065029761907, 15519.385863095236, 15518.080952380955, 15516.403124999999, 15514.548065476189, 15512.400148809527, 15509.949553571425, 15507.25595238095, 15504.723363095238, 15502.922619047617, 15501.848660714284, 15501.344047619048, 15501.229017857144, 15501.170535714286, 15501.103124999996, 15501.019494047616, 15500.860565476194, 15500.618601190481, 15500.368750000001, 15500.132142857141, 15499.84300595238, 15499.536309523808, 15499.341666666665, 15499.208184523808, 15499.060267857141, 15499.02961309524, 15499.170684523808, 15499.417410714286, 15499.72857142857, 15500.096726190475, 15500.594047619046, 15501.159821428573, 15501.684821428567, 15502.259672619051, 15502.672321428574, 15502.942708333332, 15502.632589285715, 15501.410565476193, 15499.775744047616, 15497.518749999996, 15494.364285714284, 15490.34360119048, 15485.453720238094, 15479.96056547619, 15474.412053571428, 15468.758928571426, 15463.581696428573, 15459.400595238092, 15455.517857142857, 15452.061904761904, 15449.443154761899, 15447.637648809523, 15446.652232142857, 15446.335267857145, 15446.370386904762, 15446.619642857138, 15446.88973214286, 15447.003273809521, 15447.152083333332, 15447.401488095236, 15447.562053571426, 15447.548660714283, 15447.259970238092, 15446.642410714287, 15445.704464285718, 15444.470982142857, 15443.171279761904, 15441.876190476189, 15440.531696428574, 15439.375148809524, 15438.42529761905, 15437.629910714284, 15437.410416666664, 15437.952529761906, 15439.020089285712, 15439.964285714286, 15440.147916666667, 15440.026636904764, 15440.278869047623, 15440.953720238098, 15442.243303571426, 15444.304166666667, 15446.861458333333, 15449.544494047617, 15452.034672619044, 15454.633333333339, 15457.704315476189, 15460.870982142857, 15463.76577380952, 15466.3125, 15468.534226190475, 15470.536160714286, 15472.592708333335, 15474.842559523811, 15477.624702380952, 15481.24583333333, 15485.620833333332, 15490.476488095239, 15495.353273809518, 15500.012648809517, 15504.309970238091, 15508.12946428571, 15511.180654761903, 15513.47157738095, 15515.148958333333, 15516.249255952387, 15516.936755952376, 15517.366517857141, 15517.619494047616, 15517.633333333335, 15517.366369047617, 15516.716220238091, 15515.779315476188, 15514.764880952385, 15513.800148809521, 15512.948660714286, 15512.25431547619, 15511.90044642857, 15512.067113095243, 15512.773958333333, 15514.171279761898, 15516.494196428563, 15519.668303571429, 15523.53095238095, 15527.914880952381, 15532.633482142857, 15537.554910714287, 15542.37991071428, 15547.106547619049, 15552.014434523811, 15557.027678571425, 15561.981398809527, 15566.695386904761, 15570.938839285713, 15574.738988095234, 15578.087053571428, 15581.511607142855, 15585.53764880952, 15589.545089285712, 15593.03467261905, 15596.098511904762, 15598.763839285715, 15600.886755952377, 15602.449404761906, 15603.56339285714, 15604.353422619046, 15604.350744047617, 15603.241369047617, 15601.665327380953, 15599.968750000005, 15597.96220238095, 15595.660416666664, 15593.387648809523, 15591.397767857141, 15589.601785714283, 15588.039880952381, 15586.794345238097, 15585.745684523808, 15584.849851190478, 15584.115773809524, 15583.603125000005, 15583.363095238097, 15583.44419642857, 15583.718303571433, 15584.218750000002, 15584.973065476188, 15585.772470238095, 15586.534226190477, 15587.232142857141, 15587.886458333336, 15588.424553571425, 15588.68601190476, 15588.514880952382, 15588.155803571432, 15587.820535714287, 15587.514880952382, 15587.565476190475, 15588.20476190476, 15589.309226190475, 15590.644642857143, 15592.210267857145, 15594.114880952384, 15596.34300595238, 15598.612053571429, 15600.92931547619, 15603.207886904762, 15605.00163690476, 15606.15892857143, 15606.980208333332, 15607.839583333336, 15608.718601190478, 15609.413988095237, 15610.091964285715, 15610.986160714283, 15611.685863095237, 15612.153422619045, 15612.72395833333, 15613.382440476193, 15613.96696428571, 15614.330803571429, 15614.50089285714, 15614.642410714283, 15614.716964285712, 15614.646875000002, 15614.54702380952, 15614.352827380953, 15614.037499999999, 15613.733928571428, 15613.334226190478, 15612.724255952382, 15612.06919642857, 15611.451190476193, 15610.811607142858, 15610.172172619052, 15609.630803571425, 15609.302529761904, 15609.150744047616, 15609.039434523807, 15608.941666666666, 15608.91532738095, 15608.891220238094, 15608.914434523811, 15609.128720238092, 15609.616220238093, 15610.376636904759, 15611.444047619045, 15612.802529761904, 15614.302083333336, 15615.946428571428, 15617.704464285714, 15619.490624999997, 15621.122916666669, 15622.545535714284, 15623.706994047614, 15624.541369047616, 15625.10119047619, 15625.36279761905, 15625.53110119048, 15625.80669642857, 15626.149107142859, 15626.517410714287, 15627.02232142857, 15627.679613095233, 15628.368303571428, 15629.088244047614, 15629.769791666666, 15630.360714285713, 15630.758333333333, 15630.933928571429, 15631.044940476188, 15631.243154761903, 15631.54107142857, 15631.861160714287, 15632.162797619048, 15632.445982142857, 15632.747023809521, 15633.17544642857, 15633.820089285715, 15634.649553571426, 15635.755505952384, 15637.148809523813, 15638.706398809523, 15640.315476190475, 15641.905654761904, 15643.589285714286, 15645.289583333328, 15646.72380952381, 15647.947619047614, 15649.11220238095, 15650.061309523811, 15650.673511904759, 15650.996279761903, 15651.290625000005, 15651.849851190475, 15652.327678571432, 15652.572767857142, 15653.036607142858, 15653.641815476187, 15654.04211309524, 15654.285416666666, 15654.466517857145, 15654.73035714286, 15654.855357142857, 15654.610416666672, 15654.356250000003, 15654.294047619045, 15654.225297619045, 15654.19970238095, 15654.470535714287, 15654.837053571428, 15655.154910714284, 15655.36369047619, 15655.591666666669, 15655.977678571431, 15656.307291666662, 15656.541964285712, 15656.690922619046, 15656.703869047617, 15656.572023809518, 15656.451041666669, 15656.430208333339, 15656.42619047619, 15656.325892857141, 15656.086160714287, 15655.86160714286, 15655.64657738095, 15655.399107142857, 15655.17857142857, 15654.977678571428, 15654.798214285714, 15654.631547619045, 15654.51919642857, 15654.450744047614, 15654.50267857143, 15654.651785714286, 15654.82455357143, 15655.118750000005, 15655.558779761906, 15656.11160714286, 15656.78794642857, 15657.738690476193, 15659.06324404762, 15660.703273809526, 15662.441964285712, 15664.357142857145, 15666.57380952381, 15668.867857142852, 15671.070238095237, 15673.171130952382, 15675.274404761903, 15677.101041666669, 15678.37886904762, 15679.389583333335, 15680.337648809522, 15681.196577380953, 15682.014732142854, 15682.944642857141, 15684.131845238093, 15685.62083333333, 15687.165178571428, 15688.746428571429, 15690.511904761908, 15692.314434523809, 15694.015178571428, 15695.501339285714, 15696.720238095237, 15697.695684523807, 15698.321875, 15698.309821428571, 15697.474999999997, 15695.625892857144, 15692.55178571429, 15688.153720238097, 15682.597619047616, 15676.261011904762, 15669.500892857146, 15662.480208333336, 15655.21398809524, 15647.962797619048, 15641.10729166666, 15635.032291666666, 15629.843601190474, 15625.534970238092, 15622.001339285714, 15618.945535714283, 15616.04598214286, 15612.983184523806, 15609.666369047616, 15606.038839285715, 15601.91755952381, 15597.12738095238, 15591.476339285715, 15585.051636904764, 15578.203273809524, 15571.111607142855, 15563.848660714286, 15556.700595238091, 15549.76220238095, 15542.71994047619, 15535.484375, 15528.320982142857, 15521.927232142854, 15516.587351190481, 15512.025, 15508.034077380953, 15504.483779761904, 15501.227529761903, 15498.33125, 15496.090476190475, 15494.676785714284, 15493.986309523809, 15493.619940476188, 15493.30044642857, 15492.959374999999, 15492.621874999999, 15492.29523809524, 15491.997767857145, 15491.758035714283, 15491.447470238092, 15490.86443452381, 15489.899404761902, 15488.541666666662, 15486.766666666666, 15484.618898809524, 15482.147916666669, 15479.277529761906, 15476.041964285712, 15472.437202380952, 15468.265476190478, 15463.599999999997, 15458.705505952381, 15453.665476190474, 15448.541815476188, 15443.487797619051, 15438.598214285716, 15433.964434523808, 15429.52053571429, 15425.063988095235, 15420.720089285713, 15416.503571428575, 15412.290327380953, 15408.166220238096, 15404.287351190478, 15400.585714285718, 15397.124255952382, 15393.832142857142, 15390.53407738095, 15387.460416666672, 15384.644345238095, 15382.33005952381, 15380.70520833333, 15379.599107142854, 15378.76979166667, 15378.039880952385, 15377.244494047622, 15376.494791666666, 15375.950744047617, 15375.499107142858, 15375.328571428565, 15375.355803571429, 15375.399851190474, 15375.459226190478, 15375.524404761903, 15375.646279761906, 15375.79285714286, 15375.918750000003, 15376.052976190473, 15376.103422619048, 15375.931547619051, 15375.480357142857, 15374.713988095235, 15373.574255952379, 15371.979910714286, 15369.85282738095, 15367.12395833333, 15363.841815476193, 15359.927827380952, 15355.388690476191, 15350.403869047619, 15344.997916666669, 15339.171577380952, 15333.109374999996, 15326.938839285709, 15320.768452380955, 15314.602529761903, 15308.234970238098, 15301.751785714287, 15295.226339285713, 15288.58824404762, 15282.000000000002, 15275.653124999993, 15269.845089285713, 15264.47470238095, 15259.14255952381, 15254.201934523808, 15250.13660714286, 15246.782291666666, 15244.185863095237, 15242.550297619046, 15241.622321428571, 15241.17767857143, 15240.926636904762, 15240.926041666664, 15241.533035714288, 15242.511011904764, 15243.527678571429, 15244.762946428567, 15246.181101190474, 15247.511458333338, 15248.774702380953, 15250.00238095238, 15251.109672619048, 15252.093601190472, 15252.857142857141, 15253.26279761904, 15253.32232142857, 15253.035267857142, 15252.386607142855, 15251.259375, 15249.728125000001, 15247.858928571433, 15245.638244047617, 15243.183928571429, 15240.624255952376, 15238.049999999996, 15235.470535714283, 15232.889434523808, 15230.351190476187, 15228.055357142857, 15225.993898809524, 15224.037500000002, 15222.125148809524, 15220.243303571431, 15218.443601190478, 15216.654017857145, 15214.789732142854, 15212.844196428567, 15210.82931547619, 15208.88288690476, 15207.333333333338, 15206.51681547619, 15206.364285714284, 15206.515178571428, 15206.862648809527, 15207.28065476191, 15207.713095238098, 15208.281845238094, 15208.98482142857, 15209.659077380955, 15210.005208333336, 15209.752976190475, 15209.113095238095, 15208.40595238095, 15207.612202380948, 15206.91369047619, 15206.445982142854, 15206.088541666672, 15205.788392857146, 15205.552232142858, 15205.41681547619, 15205.42306547619, 15205.479910714286, 15205.544345238097, 15205.660119047623, 15205.842261904761, 15206.024107142859, 15206.203422619046, 15206.463095238096, 15206.858630952382, 15207.305357142859, 15207.579315476194, 15207.62544642857, 15207.471726190475, 15207.220238095235, 15206.91636904762, 15206.561160714286, 15206.047470238093, 15205.22172619048, 15204.07931547619, 15202.518303571425, 15200.694940476196, 15198.77157738095, 15196.650595238098, 15194.192708333336, 15191.31324404762, 15188.111458333338, 15184.77053571428, 15181.585565476193, 15178.756398809524, 15176.324404761903, 15174.057738095236, 15171.81592261905, 15169.69776785715, 15167.766666666666, 15166.041666666666, 15164.41116071429, 15162.751785714287, 15160.901339285716, 15158.755505952382, 15156.59538690476, 15154.851041666667, 15153.743154761913, 15153.190178571425, 15153.00208333333, 15153.070089285718, 15153.341071428571, 15153.827529761904, 15154.52961309524, 15155.356696428571, 15156.203273809524, 15156.9625, 15157.4255952381, 15157.545386904765, 15157.298214285713, 15156.594494047622, 15155.364136904764, 15153.642261904759, 15151.448660714283, 15148.63898809524, 15145.285863095238, 15141.467857142852, 15137.324851190475, 15133.195535714287, 15129.28794642857, 15125.727380952381, 15122.768601190477, 15120.427827380952, 15118.727529761903, 15117.62991071429, 15116.789732142859, 15116.236011904764, 15116.031696428574, 15115.831994047618, 15115.502529761907, 15115.343154761904, 15115.422023809524, 15115.748363095234, 15115.991964285715, 15116.099851190475, 15116.313839285714, 15116.309672619049, 15115.90223214286, 15115.269642857143, 15114.493601190477, 15113.276190476194, 15111.558779761906, 15109.714136904759, 15108.231547619045, 15107.231994047617, 15106.579464285713, 15106.390624999996, 15106.66056547619, 15107.211160714287, 15108.017410714283, 15109.131994047611, 15110.492261904765, 15111.734672619046, 15112.708035714286, 15113.576041666669, 15114.530654761904, 15115.610565476194, 15116.67202380952, 15117.722470238092, 15118.788244047622, 15119.820535714287, 15120.781696428576, 15121.634970238096, 15122.287499999997, 15122.751934523805, 15122.848809523808, 15122.452083333332, 15121.805208333333, 15121.040773809524, 15120.095089285718, 15119.071279761903, 15118.004761904758, 15116.622321428573, 15114.82395833333, 15112.588839285712, 15110.20386904762, 15107.854761904764, 15105.37068452381, 15102.70699404762, 15100.114136904762, 15097.719940476189, 15095.240178571425, 15092.936607142858, 15091.08229166667, 15089.464136904764, 15087.70357142857, 15085.58050595238, 15083.192708333332, 15080.7375, 15078.087648809522, 15075.013095238095, 15071.943154761906, 15069.01770833333, 15066.107142857145, 15063.380803571425, 15061.016964285709, 15059.157142857139, 15057.718601190472, 15056.534375000003, 15055.65461309524, 15055.362053571429, 15055.817410714284, 15057.257738095239, 15059.902232142857, 15063.747172619045, 15068.659523809521, 15074.372023809521, 15080.581845238095, 15087.121130952382, 15093.984672619048, 15100.929464285715, 15107.59449404762, 15113.487351190475, 15118.473363095234, 15122.61681547619, 15125.99107142857, 15128.872172619043, 15131.600148809524, 15134.488541666666, 15137.53645833333, 15140.673214285714, 15144.071279761907, 15147.94494047619, 15152.29136904762, 15157.106101190475, 15162.314285714288, 15167.752827380955, 15173.539732142859, 15179.769494047616, 15186.412946428567, 15193.454315476189, 15200.667559523816, 15207.746875000003, 15214.263988095237, 15219.937500000002, 15224.855952380953, 15229.269047619046, 15233.123214285712, 15236.110863095237, 15238.150744047616, 15239.438541666666, 15240.116071428576, 15240.345535714283, 15240.434970238091, 15240.585119047619, 15240.811904761904, 15240.956101190472, 15240.97157738095, 15240.928273809524, 15240.805654761903, 15240.754166666664, 15241.006845238095, 15241.626041666666, 15242.600595238095, 15243.846874999996, 15245.335863095237, 15247.069494047622, 15248.973958333336, 15251.035565476184, 15253.362202380953, 15255.861458333327, 15258.260416666672, 15260.554761904761, 15262.82648809524, 15265.12172619048, 15267.34389880952, 15269.37068452381, 15271.20431547619, 15272.777083333336, 15274.105803571425, 15275.046279761911, 15275.456994047616, 15275.30744047619, 15274.533928571429, 15273.0212797619, 15271.018898809521, 15268.960119047617, 15266.92470238095, 15265.038541666669, 15263.261160714288, 15261.428273809526, 15259.540029761903, 15257.61517857143, 15255.80476190476, 15254.409970238095, 15253.343005952376, 15252.318005952373, 15251.29732142857, 15250.285714285716, 15249.311011904765, 15248.51800595238, 15248.037499999999, 15247.809672619049, 15247.690773809521, 15247.580059523809, 15247.469345238094, 15247.372767857145, 15247.383482142854, 15247.501339285714, 15247.730952380953, 15248.04851190476, 15248.279315476191, 15248.424553571429, 15248.508184523811, 15248.60163690476, 15248.70357142857, 15248.806547619048, 15248.887351190475, 15248.940029761901, 15248.969494047617, 15248.981101190475, 15249.141666666666, 15249.468601190478, 15249.945386904765, 15250.548214285709, 15251.333184523808, 15252.327529761906, 15253.453571428574, 15254.590624999999, 15255.659375, 15256.822172619046, 15258.015029761906, 15259.028571428567, 15259.804464285715, 15260.299851190477, 15260.451934523808, 15260.262500000003, 15259.70238095238, 15258.783779761907, 15257.498065476193, 15255.675595238095, 15253.561904761906, 15251.49553571428, 15249.531101190474, 15247.669940476191, 15245.90625, 15244.283184523809, 15242.882142857141, 15241.727529761907, 15240.86056547619, 15240.352232142854, 15240.05029761905, 15239.888988095237, 15239.790476190477, 15239.661011904762, 15239.534226190477, 15239.405059523806, 15239.22068452381, 15238.888392857145, 15238.374404761906, 15237.824255952379, 15237.315029761907, 15236.736458333333, 15236.234672619048, 15235.997470238091, 15235.96071428571, 15235.954761904759, 15235.956398809521, 15236.122172619049, 15236.537946428572, 15237.017113095237, 15237.488988095238, 15238.097767857143, 15238.711755952381, 15239.144642857147, 15239.466369047617, 15239.823958333332, 15240.341071428573, 15240.964583333332, 15241.603273809524, 15242.19181547619, 15242.610565476194, 15242.796875000002, 15242.706845238095, 15242.233779761904, 15241.333779761906, 15240.14553571429, 15238.622916666669, 15236.724702380947, 15234.52306547619, 15232.032291666666, 15229.351636904765, 15226.64285714286, 15224.071874999996, 15221.815178571429, 15219.93764880953, 15218.309672619049, 15216.896875000002, 15215.744047619048, 15214.881845238093, 15214.377529761901, 15214.08273809524, 15213.789880952381, 15213.46369047619, 15213.016220238094, 15212.364136904764, 15211.461309523813, 15210.322619047616, 15208.962797619048, 15207.320386904761, 15205.455952380953, 15203.553720238093, 15201.67261904762, 15199.830803571429, 15198.190773809523, 15196.76904761905, 15195.646726190475, 15194.838244047622, 15194.366666666669, 15194.313690476194, 15194.55327380952, 15195.102380952381, 15195.965178571425, 15197.090476190475, 15198.505952380952, 15200.26130952381, 15202.192113095238, 15204.220982142857, 15206.281547619046, 15208.173660714283, 15209.866369047617, 15211.129166666666, 15211.89002976191, 15212.189732142857, 15211.843750000002, 15210.705208333333, 15208.86875, 15206.459374999999, 15203.589583333332, 15200.313988095235, 15196.539434523807, 15192.482738095237, 15188.304017857141, 15183.966517857145, 15179.614136904758, 15175.490476190475, 15171.663095238095, 15168.110119047615, 15164.824255952377, 15161.887053571429, 15159.411904761906, 15157.319047619048, 15155.42857142857, 15153.744345238096, 15152.276339285716, 15150.947172619048, 15149.80818452381, 15148.927976190478, 15148.256994047622, 15147.73333333333, 15147.380952380952, 15147.15625, 15147.06116071428, 15147.039136904761, 15147.029910714284, 15146.96994047619, 15146.675892857142, 15145.970386904764, 15144.803422619048, 15143.18869047619, 15141.232589285715, 15139.09523809524, 15136.853273809524, 15134.501339285713, 15132.033035714283, 15129.48511904762, 15126.708779761904, 15123.652232142857, 15120.42023809524, 15117.165922619046, 15113.925148809523, 15110.585863095239, 15107.14404761905, 15103.570386904761, 15099.779017857141, 15095.85595238095, 15091.941071428573, 15088.062053571428, 15083.881547619048, 15079.159226190475, 15074.148809523807, 15069.051785714286, 15063.843303571428, 15058.61279761905, 15053.53467261905, 15048.56264880953, 15043.882291666669, 15039.652529761901, 15036.089285714283, 15033.252083333333, 15030.77782738095, 15028.405505952376, 15026.088095238092, 15023.950892857147, 15022.031994047617, 15020.277083333332, 15018.59955357143, 15016.942410714284, 15015.247023809521, 15013.511011904759, 15011.670833333332, 15009.797767857139, 15008.071726190481, 15006.236755952375, 15004.009970238092, 15001.406845238093, 14998.375000000002, 14995.001488095237, 14991.535416666671, 14988.029910714284, 14984.582589285716, 14981.110714285714, 14977.42648809524, 14973.711309523811, 14970.175892857138, 14966.869196428574, 14963.871130952382, 14961.073065476185, 14958.234523809524, 14955.314434523809, 14952.402380952377, 14949.7962797619, 14947.787797619043, 14946.396726190475, 14945.45550595238, 14944.810565476188, 14944.371279761901, 14944.093005952376, 14944.12321428571, 14944.535416666671, 14945.180208333333, 14945.885714285712, 14946.440327380953, 14946.786011904758, 14946.985119047617, 14947.043005952382, 14946.943749999999, 14946.769196428568, 14946.355505952382, 14945.50431547619, 14944.36398809524, 14942.942708333332, 14941.275297619048, 14939.413690476193, 14937.268749999996, 14934.931994047616, 14932.515476190478, 14930.050148809523, 14927.729315476188, 14925.677529761906, 14923.832291666664, 14922.128571428568, 14920.43839285714, 14918.782589285714, 14917.3119047619, 14915.98303571429, 14914.736160714288, 14913.423809523809, 14912.013839285715, 14910.614880952378, 14909.062351190474, 14907.283333333335, 14905.418154761903, 14903.384226190474, 14901.05476190476, 14898.330654761903, 14895.283333333333, 14892.052232142854, 14888.600297619043, 14884.965476190475, 14881.309970238095, 14877.763839285712, 14874.356994047623, 14871.150148809522, 14868.30327380952, 14866.001934523809, 14864.167559523808, 14862.737500000003, 14861.73958333333, 14861.09226190476, 14860.726488095233, 14860.618749999994, 14860.719791666663, 14860.87336309524, 14860.933928571429, 14860.867857142857, 14860.590773809525, 14860.100744047622, 14859.505505952382, 14858.744642857142, 14857.75505952381, 14856.45744047619, 14854.796279761906, 14852.888392857145, 14850.881696428572, 14848.744196428568, 14846.526339285712, 14844.235267857144, 14841.654166666665, 14839.025, 14836.726488095237, 14834.96994047619, 14833.670833333332, 14832.509374999996, 14831.393005952381, 14830.255505952378, 14829.192708333332, 14828.433779761901, 14828.145833333332, 14828.300148809523, 14829.017857142857, 14829.747470238091, 14829.756547619048, 14829.414285714282, 14828.889732142858, 14828.007291666669, 14827.006845238095, 14826.022023809524, 14824.61770833333, 14822.59940476191, 14819.986309523805, 14817.309672619045, 14815.323214285712, 14813.916220238096, 14812.806547619046, 14812.185714285715, 14811.789732142854, 14811.259821428568, 14810.955208333333, 14810.922619047617, 14810.735714285718, 14810.234523809524, 14809.545684523811, 14808.814285714288, 14808.238095238092, 14807.677678571423, 14807.033928571424, 14806.520684523808, 14806.241666666667, 14806.18601190476, 14806.12276785714, 14805.919196428573, 14805.506101190478, 14804.766517857144, 14803.73720238095, 14802.691517857142, 14801.71532738095, 14800.550595238092, 14799.14270833333, 14797.503720238092, 14795.879761904758, 14794.53199404762, 14793.589136904759, 14793.13556547619, 14793.181845238096, 14793.67038690476, 14794.273214285715, 14794.987648809523, 14795.839434523808, 14796.638392857141, 14797.28363095238, 14797.611607142851, 14797.384374999996, 14796.644494047623, 14795.484374999996, 14793.835714285715, 14792.004017857138, 14790.02306547619, 14787.805357142854, 14785.594791666666, 14783.56681547619, 14781.826785714284, 14780.455059523809, 14779.33318452381, 14778.284523809521, 14777.366517857141, 14776.597470238095, 14776.017410714283, 14775.57038690476, 14775.085119047619, 14774.384672619042, 14773.460565476185, 14772.333928571425, 14771.00416666667, 14769.668898809521, 14768.388392857141, 14767.059077380953, 14765.612351190477, 14764.16383928571, 14762.708482142854, 14761.170535714286, 14759.602976190474, 14758.120238095235, 14756.783630952381, 14755.375297619048, 14753.756845238093, 14752.09538690476, 14750.509970238096, 14749.013839285715, 14747.80773809524, 14747.060565476193, 14746.659672619046, 14746.462797619051, 14746.331845238092, 14746.167261904764, 14745.926339285712, 14745.55803571428, 14745.149404761907, 14744.757738095237, 14744.17276785714, 14743.306845238098, 14742.316220238095, 14741.309226190475, 14740.388095238091, 14739.527529761906, 14738.812946428574, 14738.39449404762, 14738.039732142857, 14737.502827380948, 14736.887053571429, 14736.354315476188, 14735.800892857138, 14735.154315476188, 14734.453124999996, 14733.842261904761, 14733.256249999997, 14732.489583333332, 14731.647172619047, 14730.900595238087, 14730.199851190471, 14729.376190476185, 14728.450744047619, 14727.586904761907, 14726.846874999996, 14726.174107142857, 14725.549107142855, 14724.922172619046, 14724.292708333332, 14723.762648809521, 14723.45684523809, 14723.333630952377, 14723.309226190471, 14723.252529761903, 14723.072172619042, 14722.79553571428, 14722.405208333332, 14722.078571428567, 14721.92633928571, 14721.854910714286, 14721.833928571425, 14721.960416666667, 14722.213392857138, 14722.540476190477, 14722.838541666662, 14723.083779761904, 14723.512797619045, 14724.118898809524, 14724.70669642857, 14725.179464285715, 14725.555803571426, 14725.880952380947, 14726.148214285715, 14726.352232142857, 14726.485714285713, 14726.449999999997, 14726.214732142855, 14725.85907738095, 14725.443452380947, 14724.887946428571, 14724.06666666666, 14723.012797619049, 14721.870833333332, 14720.778869047617, 14719.850595238093, 14719.212797619046, 14718.787053571428, 14718.441517857142, 14718.15595238095, 14718.12247023809, 14718.526339285712, 14719.288095238093, 14720.331547619046, 14721.549404761901, 14722.808630952379, 14723.915178571431, 14724.78020833333, 14725.405208333332, 14725.824107142851, 14725.994791666662, 14725.812202380946, 14725.336458333333, 14724.635416666662, 14723.838244047616, 14723.048214285709, 14722.415476190477, 14722.011309523808, 14721.715476190477, 14721.498809523808, 14721.387351190471, 14721.452232142858, 14721.73720238095, 14722.140624999996, 14722.56145833333, 14723.088244047616, 14723.715773809521, 14724.26532738095, 14724.806398809524, 14725.455654761903, 14726.122023809525, 14726.528869047619, 14726.590922619043, 14726.432738095235, 14726.232738095237, 14725.919345238091, 14725.533333333333, 14725.391666666663, 14725.34583333333, 14725.259970238098, 14725.278571428567, 14725.550446428568, 14726.046428571428, 14726.713988095235, 14727.351190476185, 14727.920535714286, 14728.49494047619, 14728.95595238095, 14729.42931547619, 14730.105505952379, 14731.092708333337, 14732.385416666666, 14733.733630952382, 14734.974702380952, 14736.184523809521, 14737.395535714284, 14738.545089285712, 14739.581249999996, 14740.5775297619, 14741.464583333336, 14741.974999999999, 14742.202083333332, 14742.624553571426, 14743.413392857145, 14744.47946428571, 14745.75431547619, 14747.201488095234, 14748.849107142858, 14750.619791666668, 14752.365773809524, 14754.081845238095, 14755.765922619048, 14757.325892857141, 14758.640178571422, 14759.653125000003, 14760.507589285713, 14761.331398809521, 14762.188839285714, 14763.029017857141, 14763.755059523808, 14764.44345238095, 14765.1462797619, 14765.794345238095, 14766.453273809519, 14767.156845238089, 14767.860863095237, 14768.487499999996, 14768.93675595238, 14769.280803571428, 14769.648660714283, 14770.04851190476, 14770.392261904759, 14770.64345238095, 14770.805059523811, 14770.924107142855, 14770.848065476184, 14770.535416666666, 14770.100297619048, 14769.661607142854, 14769.29613095238, 14768.863392857142, 14768.420386904765, 14768.072172619048, 14767.698363095236, 14767.262797619045, 14766.803571428567, 14766.320238095232, 14765.79196428571, 14765.111607142855, 14764.363392857142, 14763.720982142857, 14763.084970238093, 14762.263095238091, 14761.42038690476, 14760.723958333336, 14760.29136904762, 14760.062499999996, 14759.833482142854, 14759.683630952377, 14759.577232142858, 14759.388541666665, 14759.130803571432, 14758.930952380953, 14758.831249999994, 14758.751339285713, 14758.552380952382, 14758.24419642857, 14757.950744047617, 14757.628422619044, 14757.244940476188, 14756.854464285712, 14756.476488095237, 14756.095684523809, 14755.651934523808, 14755.105654761903, 14754.51383928571, 14753.942261904758, 14753.472619047618, 14753.08973214286, 14752.687053571432, 14752.256101190475, 14751.808035714283, 14751.255654761906, 14750.473214285716, 14749.425297619046, 14748.073214285712, 14746.44761904762, 14744.639434523813, 14742.810565476188, 14741.103422619046, 14739.493750000001, 14737.976934523807, 14736.513690476188, 14735.179017857141, 14734.116071428567, 14733.380803571425, 14733.011904761903, 14732.814434523805, 14732.388690476188, 14731.508333333333, 14730.40773809524, 14729.494196428568, 14728.968303571432, 14728.903422619043, 14729.111607142855, 14729.39568452381, 14729.585119047619, 14729.523065476187, 14729.032886904755, 14728.661755952377, 14728.904166666665, 14729.065178571425, 14728.733779761904, 14728.137797619049, 14727.256398809524, 14726.03169642857, 14724.881994047617, 14723.985416666663, 14723.371279761901, 14722.637648809521, 14721.305357142854, 14719.745982142855, 14718.410416666666, 14717.074107142862, 14715.840922619045, 14714.987797619044, 14714.182589285716, 14713.409523809523, 14712.991964285713, 14712.809226190477, 14712.446279761903, 14711.529166666665, 14709.788541666667, 14707.490773809524, 14704.993750000001, 14702.307440476188, 14699.29970238095, 14696.015476190478, 14692.688095238093, 14689.612797619044, 14687.109672619048, 14685.368005952381, 14684.45089285714, 14684.117857142857, 14683.994494047616, 14684.025892857138, 14684.37663690476, 14685.015625, 14685.638690476191, 14685.999553571428, 14686.03467261905, 14685.742708333333, 14685.007142857141, 14683.891517857144, 14682.751636904759, 14681.554910714287, 14680.081101190472, 14678.543749999997, 14677.212946428568, 14676.028869047617, 14674.870386904762, 14673.788839285713, 14672.88139880952, 14671.891517857144, 14670.475297619048, 14668.938095238094, 14667.627827380955, 14666.334672619047, 14664.859226190474, 14663.307886904759, 14661.800892857142, 14660.447470238096, 14659.370238095236, 14658.693303571425, 14658.440476190477, 14658.408482142851, 14658.432738095236, 14658.475595238095, 14658.51681547619, 14658.46889880952, 14658.279315476191, 14657.930654761903, 14657.425148809523, 14656.806994047614, 14656.14181547619, 14655.35982142857, 14654.380952380952, 14653.37157738095, 14652.411755952382, 14651.549553571429, 14650.81101190476, 14650.174851190472, 14649.62782738095, 14649.195535714282, 14648.993898809527, 14648.979464285712, 14649.050595238092, 14649.112648809518, 14649.138392857141, 14649.057142857142, 14648.837202380953, 14648.571279761902, 14648.449702380947, 14648.541964285716, 14648.617261904761, 14648.67113095238, 14648.825892857145, 14649.04895833333, 14649.337053571431, 14649.716220238091, 14650.470684523805, 14651.810416666669, 14653.557142857142, 14655.508184523807, 14657.414434523807, 14659.254315476188, 14661.1306547619, 14662.941220238095, 14664.559672619043, 14666.070535714283, 14667.210416666663, 14667.614880952378, 14667.308928571429, 14666.451488095234, 14665.294791666665, 14663.949107142857, 14662.290178571428, 14660.411160714286, 14658.51800595238, 14656.546726190472, 14654.413690476187, 14652.110119047617, 14649.639285714284, 14647.066964285716, 14644.499404761906, 14641.85863095238, 14639.097172619046, 14636.145535714286, 14633.103571428574, 14630.208928571423, 14627.494791666666, 14624.963541666666, 14622.595982142855, 14620.361607142855, 14618.203720238094, 14616.181398809524, 14614.489136904758, 14613.061755952376, 14611.436011904761, 14609.300297619046, 14606.709672619047, 14603.794345238091, 14600.51726190476, 14596.809077380953, 14592.509970238096, 14587.545684523808, 14582.032291666663, 14576.119940476194, 14570.161607142854, 14564.419494047619, 14558.95163690476, 14553.800744047614, 14549.071726190468, 14544.609375, 14540.511309523808, 14536.894791666664, 14533.596874999996, 14530.756845238098, 14528.404613095234, 14526.380505952378, 14524.591964285715, 14522.941517857142, 14521.34196428571, 14519.907738095237, 14518.646130952384, 14517.475148809519, 14516.343601190474, 14515.01383928571, 14513.377976190477, 14511.523065476185, 14509.42574404762, 14507.03556547619, 14504.53363095238, 14502.003869047618, 14499.471726190477, 14496.987499999996, 14494.583035714284, 14492.332291666671, 14490.216815476191, 14488.14107142857, 14486.048214285714, 14483.965773809521, 14481.729761904762, 14479.384375, 14477.068750000002, 14474.607291666665, 14471.888095238095, 14468.977678571422, 14465.941369047623, 14462.891071428567, 14459.94657738095, 14457.249702380946, 14455.42693452381, 14454.155803571426, 14452.599255952382, 14450.945982142854, 14449.394494047618, 14448.129017857138, 14447.162202380952, 14446.31294642857, 14445.517857142857, 14444.684226190475, 14443.220833333327, 14441.238244047618, 14439.502678571429, 14438.073660714286, 14436.853273809518, 14435.628720238095, 14434.384375, 14433.21696428571, 14432.133630952376, 14431.118154761903, 14430.110565476189, 14429.203422619046, 14428.363839285717, 14427.342113095237, 14425.960863095239, 14424.25863095238, 14422.481696428573, 14420.752083333335, 14419.023660714283, 14417.248363095234, 14415.447619047616, 14413.644345238092, 14411.703422619046, 14409.671726190478, 14407.85208333333, 14406.22931547619, 14404.40833333333, 14402.263244047623, 14399.963392857142, 14397.52306547619, 14394.852827380953, 14391.980803571429, 14389.093303571428, 14386.309970238091, 14383.475744047619, 14380.565922619046, 14377.736458333333, 14375.217559523806, 14373.013392857145, 14370.915327380948, 14368.836904761903, 14366.682589285716, 14364.540773809527, 14362.565922619046, 14360.841666666665, 14359.297321428565, 14357.827529761904, 14356.190625, 14354.393303571429, 14352.71964285714, 14351.29553571428, 14350.181398809518, 14349.241369047619, 14348.338095238096, 14347.421428571426, 14346.557589285712, 14345.795535714284, 14345.18318452381, 14344.580654761903, 14343.874553571432, 14343.05520833333, 14342.01800595238, 14340.684672619049, 14339.097023809523, 14337.32738095238, 14335.35863095238, 14333.229910714283, 14330.935119047619, 14328.528422619049, 14326.070684523807, 14323.609226190478, 14321.272619047617, 14319.111904761903, 14316.995386904762, 14314.887648809521, 14312.784077380953, 14310.700000000003, 14308.738244047618, 14306.985714285713, 14305.429910714287, 14304.091369047617, 14302.87961309524, 14301.575446428573, 14300.20669642857, 14298.782589285718, 14297.349404761904, 14295.911904761906, 14294.309523809525, 14292.263095238095, 14289.686309523811, 14286.771428571428, 14283.719494047617, 14280.711904761902, 14277.870684523808, 14275.240922619047, 14272.790029761907, 14270.50282738095, 14268.419047619045, 14266.724999999997, 14265.514285714286, 14264.59821428571, 14263.777976190475, 14263.077380952382, 14262.432738095235, 14261.72247023809, 14261.007589285715, 14260.304613095237, 14259.526041666666, 14258.603720238092, 14257.534970238095, 14256.301190476193, 14254.895386904762, 14253.26339285714, 14251.44032738095, 14249.658482142857, 14247.895833333327, 14246.09002976191, 14244.359375000002, 14242.630803571432, 14240.876785714283, 14239.191517857142, 14237.523214285715, 14235.839583333336, 14234.209970238095, 14232.514434523808, 14230.669047619049, 14228.837053571428, 14227.070535714287, 14225.398660714287, 14223.79136904762, 14222.155952380954, 14220.423809523814, 14218.544940476188, 14216.522767857141, 14214.327083333332, 14212.037053571432, 14209.560119047619, 14206.696726190481, 14203.473363095238, 14200.010119047616, 14196.479761904759, 14193.14270833333, 14190.062797619048, 14187.131101190475, 14184.405803571428, 14181.922172619048, 14179.651488095236, 14177.718154761904, 14176.197172619046, 14175.019345238095, 14174.006696428576, 14173.047916666666, 14172.199255952379, 14171.49300595238, 14170.791517857144, 14170.030357142858, 14169.226488095233, 14168.281101190474, 14167.180654761903, 14165.919345238095, 14164.527380952379, 14163.041517857142, 14161.453422619052, 14159.804315476185, 14158.190773809521, 14156.653571428573, 14155.1462797619, 14153.671875, 14152.3494047619, 14151.330803571429, 14150.630505952382, 14150.218601190474, 14150.139880952382, 14150.354910714283, 14150.79330357143, 14151.459374999997, 14152.400446428574, 14153.49419642857, 14154.458928571423, 14155.255505952382, 14155.925, 14156.48645833333, 14156.89494047619, 14157.037499999999, 14156.918898809527, 14156.807142857142, 14156.656845238098, 14156.262946428576, 14155.746577380953, 14155.02470238095, 14153.988690476193, 14152.736607142857, 14151.41636904762, 14150.113095238095, 14148.802380952382, 14147.143749999996, 14145.18244047619, 14143.366071428572, 14141.745238095242, 14140.230208333342, 14138.82455357143, 14137.468005952382, 14136.001636904764, 14134.31755952381, 14132.455803571425, 14130.452678571426, 14128.35982142857, 14126.275297619051, 14124.122619047617, 14121.955357142855, 14119.884375000001, 14117.809523809525, 14115.801488095234, 14113.913690476187, 14112.082589285716, 14110.372470238091, 14108.79211309524, 14107.122767857141, 14105.247916666665, 14103.293452380953, 14101.336755952381, 14099.462648809518, 14097.720089285718, 14096.281398809524, 14095.292708333332, 14094.595386904759, 14093.79732142857, 14092.722470238097, 14091.47038690476, 14089.967113095237, 14087.98556547619, 14085.394940476188, 14082.223958333332, 14078.427976190478, 14073.548214285713, 14067.328422619048, 14060.087797619046, 14052.140624999996, 14043.874702380954, 14035.552678571428, 14027.495833333332, 14020.121726190475, 14013.429761904761, 14007.325148809527, 14002.223511904756, 13998.528720238095, 13996.197767857147, 13995.112202380953, 13995.015327380954, 13995.526785714286, 13996.228124999996, 13996.664136904761, 13996.677083333336, 13996.193898809524, 13995.165029761907, 13993.667261904762, 13991.74568452381, 13989.511011904768, 13986.970684523809, 13984.313690476185, 13981.8912202381, 13979.909375000003, 13978.281845238098, 13977.106845238097, 13976.538839285715, 13976.466666666664, 13976.909672619046, 13977.877380952383, 13979.291815476188, 13981.00744047619, 13982.848660714286, 13984.804017857145, 13987.138541666665, 13989.85907738095, 13992.642261904759, 13995.200744047619, 13997.418601190477, 13999.195982142854, 14000.644196428573, 14001.779910714291, 14002.397321428576, 14002.378125, 14001.602678571431, 13999.966666666665, 13997.575892857147, 13994.680059523806, 13991.315029761912, 13987.606398809525, 13983.325297619049, 13978.351339285713, 13973.066220238097, 13967.602083333335, 13961.952678571426, 13956.382440476187, 13951.16755952381, 13946.424107142857, 13942.207440476188, 13938.469345238098, 13935.373065476191, 13933.125595238098, 13931.49866071429, 13930.327083333332, 13929.622916666669, 13929.195089285715, 13928.89285714286, 13928.689880952385, 13928.468601190478, 13928.132738095239, 13927.725595238095, 13927.217410714286, 13926.639434523806, 13926.052827380956, 13925.474255952378, 13925.006547619048, 13924.711904761907, 13924.573958333336, 13924.741369047617, 13925.10863095238, 13925.537946428576, 13926.172767857146, 13926.990922619047, 13927.829166666666, 13928.45163690476, 13928.617708333333, 13928.260119047616, 13927.193749999997, 13924.675595238095, 13921.054761904761, 13916.988839285716, 13912.204315476194, 13906.711011904765, 13900.525595238098, 13893.71994047619, 13886.38735119048, 13878.412499999997, 13869.963988095236, 13861.66160714286, 13853.291517857144, 13844.330654761903, 13834.90788690476, 13825.207142857142, 13815.660714285716, 13806.648363095239, 13798.418601190475, 13791.131696428576, 13784.72261904762, 13779.226041666669, 13774.678720238095, 13771.014732142858, 13768.139732142852, 13765.83854166666, 13763.881994047617, 13761.99032738095, 13759.942708333332, 13757.803720238098, 13755.72366071429, 13753.740178571428, 13751.826636904761, 13750.189880952383, 13749.052827380952, 13748.51636904762, 13748.573363095235, 13749.208482142853, 13750.460565476187, 13752.230505952382, 13754.598214285712, 13757.59360119048, 13760.957142857143, 13764.217559523811, 13767.052083333332, 13769.504315476193, 13771.633035714287, 13773.506250000004, 13775.190476190472, 13776.615624999999, 13777.594791666666, 13778.135267857138, 13778.564583333338, 13779.364880952382, 13781.065029761907, 13783.828571428568, 13787.89851190476, 13793.615178571425, 13800.988541666666, 13809.90282738095, 13820.25119047619, 13831.995833333332, 13844.960863095239, 13858.75758928571, 13872.810416666664, 13886.534374999997, 13899.224404761906, 13910.141220238089, 13918.696130952385, 13924.326785714284, 13926.398660714283, 13924.526041666662, 13918.584374999999, 13908.387797619045, 13894.130654761906, 13876.759821428574, 13857.463541666668, 13837.286011904762, 13817.214732142857, 13798.169791666665, 13780.98556547619, 13766.24300595238, 13754.311755952382, 13745.56666666667, 13740.122470238095, 13737.437797619048, 13736.83273809524, 13737.738095238097, 13739.557886904764, 13742.002529761909, 13744.885714285712, 13747.777083333338, 13750.175892857142, 13751.779613095243, 13752.341666666665, 13751.785267857142, 13749.963392857138, 13746.845833333333, 13742.460119047622, 13736.916815476194, 13730.446279761903, 13723.219196428574, 13715.61770833333, 13708.075892857145, 13700.915476190476, 13694.081696428573, 13687.617559523807, 13681.612648809518, 13676.218154761906, 13671.372916666665, 13667.044642857147, 13663.340624999999, 13660.270684523812, 13657.818154761906, 13655.914136904761, 13654.716369047619, 13654.279017857145, 13654.591220238095, 13655.93392857143, 13658.605505952379, 13662.375744047617, 13666.72306547619, 13671.580654761903, 13677.043898809527, 13682.872172619049, 13688.84836309524, 13694.951636904761, 13701.034821428573, 13706.67276785714, 13711.707142857136, 13716.389434523811, 13721.139583333332, 13725.894494047618, 13730.461160714283, 13735.034077380953, 13739.78125, 13744.625297619043, 13749.31190476191, 13753.688392857146, 13757.665476190476, 13761.185565476193, 13764.31473214286, 13767.252232142855, 13770.04181547619, 13772.515029761906, 13774.431547619051, 13775.604761904762, 13776.179910714283, 13776.284226190475, 13775.723660714284, 13774.252083333333, 13771.889434523808, 13768.842559523808, 13765.311011904761, 13761.49717261905, 13757.430505952376, 13753.225446428567, 13749.0681547619, 13745.160714285712, 13741.726785714283, 13738.944345238093, 13736.83169642857, 13735.20386904762, 13733.913541666669, 13732.949999999999, 13732.715178571425, 13733.571279761909, 13735.677678571425, 13738.926488095234, 13742.903720238097, 13747.460714285713, 13752.573809523808, 13758.260267857147, 13764.560119047623, 13771.25386904762, 13777.949851190475, 13784.360119047617, 13790.170386904765, 13795.357291666669, 13800.28244047619, 13805.013392857141, 13809.493452380953, 13813.751041666666, 13817.649851190477, 13821.23645833333, 13824.53973214286, 13827.44464285714, 13830.050744047618, 13832.558928571429, 13834.945684523811, 13837.126339285714, 13839.28630952381, 13841.384523809527, 13843.308779761905, 13845.15148809524, 13847.01949404762, 13848.536755952382, 13849.501190476194, 13850.161607142863, 13850.561458333335, 13850.737499999997, 13850.615773809526, 13850.177083333336, 13849.52053571429, 13848.687202380956, 13847.711309523807, 13847.05297619048, 13846.825148809521, 13846.598660714284, 13846.391220238094, 13846.359375000002, 13846.495535714286, 13846.84494047619, 13847.74107142857, 13849.620386904762, 13852.943303571425, 13857.78556547619, 13863.434375000004, 13869.499553571433, 13876.122767857141, 13883.479017857142, 13891.61875, 13900.40074404762, 13909.430803571428, 13918.26651785714, 13926.52098214286, 13933.911755952384, 13941.15892857143, 13948.923511904764, 13957.037053571428, 13965.202827380954, 13973.166517857144, 13980.728571428574, 13987.71369047619, 13993.910863095243, 13999.243750000001, 14004.07648809524, 14008.698511904764, 14013.074702380956, 14017.328720238093, 14021.716666666664, 14026.394494047623, 14031.509821428575, 14037.197619047622, 14043.55178571429, 14050.475446428567, 14057.567559523808, 14064.709970238098, 14071.925744047618, 14078.911458333336, 14085.218750000002, 14090.547619047615, 14094.861904761907, 14098.318601190478, 14101.028571428573, 14103.043750000003, 14104.653422619045, 14105.745238095242, 14106.074851190475, 14105.880505952378, 14105.699107142857, 14106.365624999999, 14108.58318452381, 14112.76845238095, 14119.074702380956, 14126.995833333336, 14136.047321428574, 14146.290178571428, 14157.780505952382, 14170.445684523813, 14183.920089285717, 14197.541964285716, 14210.637946428573, 14222.63720238095, 14233.137946428573, 14242.466666666669, 14251.184970238095, 14259.39285714286, 14267.007440476193, 14273.805208333339, 14279.698660714288, 14284.557589285716, 14288.438541666666, 14291.674255952381, 14294.798809523809, 14298.413392857141, 14302.85848214286, 14308.239732142858, 14314.724107142858, 14322.599702380952, 14332.134523809524, 14343.70431547619, 14357.39211309524, 14372.792113095242, 14389.435119047623, 14406.880357142858, 14424.566815476188, 14442.237053571425, 14459.68348214286, 14476.865773809519, 14493.796874999996, 14510.133482142859, 14525.538541666663, 14539.964583333336, 14553.297916666666, 14565.296428571428, 14575.977678571433, 14585.277976190475, 14593.183035714286, 14599.686755952376, 14604.72261904762, 14608.706696428568, 14612.173214285713, 14615.531547619046, 14619.225892857146, 14623.694494047617, 14629.263095238095, 14636.14970238095, 14644.470982142857, 14654.139136904767, 14665.140624999996, 14677.356547619045, 14690.58303571428, 14704.641964285714, 14719.384970238096, 14734.593750000002, 14750.181994047622, 14765.79851190476, 14780.851934523807, 14794.986458333338, 14808.158333333335, 14820.720833333333, 14833.119196428574, 14845.656845238094, 14858.381547619045, 14871.36101190476, 14884.579761904759, 14898.16845238095, 14912.520684523808, 14928.090476190471, 14945.01919642857, 14963.593005952385, 14983.988690476193, 15005.569196428565, 15027.592261904761, 15049.715922619045, 15071.847619047618, 15094.073214285712, 15116.419047619043, 15138.678422619045, 15160.593154761911, 15181.221726190475, 15199.53407738095, 15215.483482142863, 15229.482440476191, 15241.723511904762, 15252.185565476191, 15260.938541666668, 15268.203720238094, 15274.227827380953, 15279.354613095242, 15284.020089285717, 15288.671130952382, 15293.827232142858, 15299.945982142859, 15307.300148809527, 15315.90550595238, 15325.736904761907, 15336.881249999997, 15349.290773809522, 15362.52961309524, 15375.99181547619, 15389.399255952381, 15402.579910714287, 15415.266369047622, 15427.161160714286, 15438.204166666672, 15448.384375000001, 15457.590178571429, 15465.978273809522, 15474.216517857141, 15482.671726190478, 15491.325892857147, 15500.20520833333, 15509.227976190474, 15518.407440476194, 15527.662351190475, 15536.901785714288, 15546.036011904762, 15554.493452380955, 15561.23377976191, 15565.863988095238, 15568.235863095237, 15568.03125, 15565.309523809521, 15560.273065476187, 15553.136011904762, 15543.98645833333, 15532.98035714286, 15520.686607142854, 15507.919196428571, 15495.418601190475, 15483.941369047616, 15474.295089285712, 15467.148809523811, 15463.330654761901, 15463.813095238098, 15469.319940476187, 15480.092410714286, 15495.817559523814, 15516.017410714283, 15539.895535714286, 15566.386160714288, 15594.409375000001, 15622.860267857144, 15650.618898809524, 15676.594196428574, 15699.872619047617, 15719.793601190475, 15736.443898809524, 15750.151190476188, 15761.414732142857, 15770.933482142862, 15779.086011904761, 15786.158482142855, 15792.213392857142, 15797.414583333333, 15802.260267857138, 15807.640922619046, 15814.162351190475, 15822.005803571428, 15831.177976190475, 15841.703125, 15853.759375, 15867.51979166666, 15883.026934523808, 15900.29255952381, 15918.817857142858, 15937.50610119048, 15955.572470238092, 15972.499553571428, 15988.17202380952, 16002.436607142854, 16015.169345238091, 16026.378125000001, 16036.16383928571, 16044.275744047622, 16050.79107142857, 16056.188095238094, 16060.58794642857, 16064.204910714283, 16067.070684523807, 16069.238095238095, 16070.963095238092, 16072.497470238097, 16074.104761904762, 16075.924107142855, 16077.852976190474, 16079.643005952385, 16081.21458333333, 16082.81636904762, 16084.582589285717, 16086.574851190475, 16088.803124999999, 16091.099107142858, 16093.101636904765, 16094.63407738095, 16095.784077380953, 16096.977232142852, 16098.725297619045, 16100.881249999999, 16102.902827380947, 16104.342559523811, 16105.133630952385, 16105.618303571431, 16106.193898809524, 16107.063244047617, 16108.279315476188, 16109.682142857142, 16111.192261904762, 16113.099255952382, 16116.057142857142, 16120.660565476188, 16126.945982142857, 16134.672023809526, 16143.62663690476, 16153.668749999997, 16164.587499999996, 16176.09613095238, 16187.694940476187, 16198.907440476187, 16209.590922619043, 16219.62738095238, 16228.862946428573, 16237.03065476191, 16243.967857142854, 16249.701636904758, 16254.409226190475, 16258.341666666664, 16261.737499999994, 16264.754017857143, 16267.366220238098, 16269.679464285715, 16271.925, 16274.256994047619, 16276.495089285716, 16278.390625, 16279.980952380953, 16281.26726190476, 16282.27544642857, 16283.170089285715, 16284.02782738095, 16284.664434523806, 16284.844791666663, 16284.461011904765, 16283.780952380952, 16283.245089285716, 16282.97172619048, 16283.001636904764, 16283.20282738095, 16283.227529761903, 16282.94494047619, 16282.609970238094, 16282.450148809527, 16282.497321428566, 16282.830654761907, 16283.47470238095, 16284.397023809519, 16285.682738095236, 16287.511011904759, 16290.022916666665, 16293.179464285715, 16296.924702380955, 16301.284375, 16306.308184523805, 16311.94345238095, 16317.922172619043, 16323.938690476187, 16329.686904761904, 16335.0369047619, 16339.994196428568, 16344.630059523804, 16348.811458333337, 16352.328273809524, 16355.153869047617, 16357.254613095236, 16358.758333333333, 16359.912053571425, 16360.938988095237, 16361.977976190474, 16363.14598214285, 16364.375297619046, 16365.634523809524, 16367.047023809524, 16368.533482142857, 16370.062351190474, 16371.548958333335, 16372.770089285716, 16373.556547619048, 16373.879761904762, 16373.772767857135, 16373.304464285713, 16372.570982142857, 16371.468601190472, 16369.93943452381, 16368.071428571426, 16366.077380952385, 16364.175148809521, 16362.58601190476, 16361.352380952381, 16360.510565476188, 16360.223065476188, 16360.39657738095, 16360.890327380952, 16361.776041666668, 16363.05833333333, 16364.578273809524, 16366.280059523808, 16368.003571428568, 16369.664136904761, 16371.190476190475, 16372.45982142857, 16373.668303571429, 16374.670684523811, 16375.18020833333, 16375.217857142854, 16374.898660714283, 16374.434672619043, 16373.95848214285, 16373.506547619043, 16373.092410714284, 16372.59761904762, 16371.771428571425, 16370.984970238093, 16370.6462797619, 16370.616815476187, 16370.680059523807, 16370.511011904762, 16370.101785714287, 16369.690029761901, 16369.283184523814, 16368.9056547619, 16368.552976190475, 16368.078273809524, 16367.297321428567, 16366.204761904759, 16364.943154761906, 16363.650446428568, 16362.356696428571, 16360.76711309524, 16358.755803571425, 16356.53645833333, 16354.239136904758, 16351.85595238095, 16349.422321428572, 16346.942708333332, 16344.434970238091, 16342.013988095237, 16339.66458333333, 16337.485714285713, 16335.561309523813, 16333.74568452381, 16331.819345238098, 16329.904315476191, 16328.315029761907, 16326.97708333333, 16325.7244047619, 16324.511607142855, 16323.441815476193, 16322.460714285715, 16321.577083333335, 16320.897619047613, 16320.573809523808, 16320.508333333333, 16320.59642857143, 16321.094345238098, 16322.061458333332, 16323.45982142857, 16325.195386904761, 16327.23154761904, 16329.413541666665, 16331.533035714283, 16333.545684523808, 16335.504017857142, 16337.277083333336, 16338.754613095243, 16340.019642857142, 16341.009374999996, 16341.726488095237, 16342.261755952384, 16342.621428571429, 16342.890476190474, 16343.090476190475, 16343.152827380953, 16343.17782738095, 16343.227678571426, 16343.384375000001, 16343.611160714283, 16343.767410714287, 16343.767410714283, 16343.516666666668, 16343.387648809523, 16344.06369047619, 16345.812499999996, 16348.259970238098, 16350.911755952378, 16353.461755952385, 16355.99107142857, 16358.731101190471, 16361.755803571428, 16365.281994047622, 16368.99196428571, 16372.41488095238, 16375.409523809521, 16377.812351190474, 16379.701190476191, 16381.266369047622, 16381.96532738095, 16381.248214285712, 16379.60357142857, 16377.788988095237, 16376.044345238097, 16374.034821428568, 16371.683630952377, 16369.497470238095, 16367.833928571428, 16366.665476190474, 16366.39345238095, 16367.456845238095, 16369.35848214286, 16371.210267857141, 16372.715476190471, 16374.160565476188, 16375.621726190471, 16376.939285714283, 16378.068898809515, 16379.147321428567, 16380.487797619046, 16382.085863095237, 16383.820982142854, 16385.78497023809, 16387.923809523814, 16390.013244047615, 16391.909226190477, 16393.61607142857, 16395.132589285706, 16396.432589285712, 16397.35223214285, 16397.81413690476, 16397.928124999995, 16397.838541666664, 16397.678422619047, 16397.432440476186, 16397.05461309524, 16396.770982142858, 16396.723065476184, 16396.719196428567, 16396.8462797619, 16397.15952380952, 16397.586904761898, 16398.001339285714, 16398.185267857138, 16398.213244047613, 16398.28318452381, 16398.159375000003, 16397.615029761906, 16396.876190476192, 16395.792559523805, 16394.181249999998, 16392.087797619046, 16389.636755952382, 16387.095386904763, 16384.497916666667, 16381.78407738095, 16379.09241071428, 16376.554166666665, 16374.122321428567, 16371.902678571429, 16370.093898809526, 16368.675892857142, 16367.540029761903, 16366.61130952381, 16365.919047619049, 16365.423363095235, 16365.108333333332, 16365.04732142857, 16365.065625, 16364.956101190475, 16364.69970238095, 16364.319047619048, 16363.795833333332, 16363.08601190476, 16362.114434523808, 16360.829464285713, 16359.232738095234, 16357.22708333333, 16354.89970238095, 16352.459970238093, 16349.876934523814, 16347.119047619048, 16344.30491071428, 16341.58735119048, 16339.036011904758, 16336.773214285715, 16334.878125, 16333.401636904759, 16332.288244047622, 16331.534672619046, 16331.292559523808, 16331.414880952381, 16331.688541666666, 16332.13854166666, 16332.538541666669, 16332.542857142862, 16332.11145833333, 16331.3625, 16330.371577380953, 16329.035119047616, 16327.338541666666, 16325.428720238095, 16323.35386904762, 16320.96964285714, 16318.50788690477, 16316.31041666666, 16314.365624999997, 16312.554613095243, 16310.904166666665, 16309.480803571429, 16308.171279761906, 16306.97261904762, 16306.004910714284, 16305.47708333333, 16305.694047619048, 16306.682589285716, 16308.357291666665, 16310.619345238098, 16313.238392857147, 16316.224851190473, 16319.620089285707, 16323.144791666671, 16326.386607142855, 16328.964285714283, 16330.519047619051, 16331.026339285716, 16330.51800595238, 16328.976785714287, 16326.71443452381, 16324.011160714283, 16321.083184523808, 16318.274553571428, 16315.82648809524, 16313.825595238095, 16312.300148809523, 16311.285267857142, 16310.847321428573, 16311.058035714283, 16311.72113095238, 16312.469345238093, 16313.123363095234, 16313.58333333333, 16313.77574404762, 16313.734821428572, 16313.398809523813, 16312.479166666668, 16310.854017857144, 16308.64494047619, 16306.012946428573, 16303.062053571426, 16299.723363095238, 16296.079166666666, 16292.360863095237, 16288.664583333333, 16285.08556547619, 16281.89598214286, 16279.218303571426, 16276.961904761898, 16275.077827380956, 16273.554315476187, 16272.40342261905, 16271.561309523811, 16270.913839285713, 16270.429761904765, 16270.143303571425, 16269.841517857141, 16269.456845238095, 16269.007886904761, 16268.380803571434, 16267.563690476187, 16266.601934523807, 16265.532886904755, 16264.405505952382, 16263.103720238092, 16261.521874999997, 16259.8431547619, 16258.02142857143, 16256.085863095233, 16254.199255952379, 16252.363392857143, 16250.474255952377, 16248.536011904762, 16246.617113095239, 16244.874107142854, 16243.405357142858, 16242.101339285713, 16240.84613095238, 16239.392410714283, 16237.69032738095, 16235.971726190475, 16234.285416666666, 16232.56830357143, 16230.831101190475, 16228.953124999996, 16226.747172619043, 16224.306994047616, 16221.896874999997, 16219.663839285713, 16217.649702380953, 16215.612648809521, 16213.555357142857, 16211.62470238095, 16209.768749999996, 16208.031696428567, 16206.54613095238, 16205.317410714286, 16204.331398809523, 16203.573809523808, 16202.948214285712, 16202.429613095239, 16201.90982142857, 16201.29851190476, 16200.603422619048, 16199.783333333333, 16198.804464285715, 16197.806845238094, 16196.904315476191, 16196.120238095236, 16195.280952380952, 16194.291220238094, 16193.369494047614, 16192.503273809523, 16191.62395833333, 16190.6869047619, 16189.575148809527, 16188.10863095238, 16186.023065476185, 16183.384375, 16180.463095238094, 16177.478125000001, 16174.289285714287, 16170.746279761903, 16166.869642857142, 16162.78705357143, 16158.80074404762, 16155.112648809523, 16152.047172619043, 16149.668005952377, 16147.867559523813, 16146.635416666666, 16145.953571428567, 16146.089285714283, 16147.224999999997, 16149.227232142852, 16151.85357142857, 16154.846279761901, 16157.888988095237, 16160.752678571429, 16163.401339285716, 16165.839136904757, 16168.150446428568, 16170.163392857141, 16171.693898809524, 16172.854464285709, 16173.781696428572, 16174.578571428572, 16175.23869047619, 16175.699404761901, 16175.917113095236, 16175.80193452381, 16175.294196428571, 16174.46220238095, 16173.486309523809, 16172.432291666668, 16171.242113095239, 16169.92202380952, 16168.514434523808, 16167.259821428568, 16166.312053571433, 16165.539434523811, 16164.820238095239, 16164.077380952382, 16163.257142857141, 16162.465029761906, 16161.694940476193, 16160.833928571428, 16159.95684523809, 16158.998809523817, 16157.957142857138, 16157.069494047619, 16156.492410714287, 16156.317708333332, 16156.434374999997, 16156.653869047614, 16157.083779761904, 16157.845684523814, 16158.822023809527, 16160.063541666668, 16161.464285714286, 16162.700446428571, 16163.646428571425, 16164.14583333333, 16164.123363095232, 16163.672619047611, 16162.821726190474, 16161.514583333332, 16159.868601190474, 16157.872023809521, 16155.613690476188, 16153.397023809524, 16151.36056547619, 16149.594345238094, 16148.216815476193, 16147.085863095239, 16146.027380952377, 16145.165029761907, 16144.495833333332, 16144.003125, 16143.696130952381, 16143.410267857142, 16143.090773809527, 16142.829910714283, 16142.561011904758, 16142.331845238095, 16142.024702380948, 16141.522916666663, 16140.905208333332, 16140.131696428567, 16139.310267857138, 16138.073809523808, 16135.590178571434, 16131.763392857141, 16127.249851190478, 16122.45982142857, 16117.584375000002, 16112.574404761903, 16107.2087797619, 16101.561309523802, 16095.61056547619, 16089.778869047617, 16084.787797619045, 16080.742261904765, 16077.538988095233, 16075.163244047622, 16073.633184523807, 16072.759077380953, 16072.332738095238, 16072.133184523807, 16071.978273809524, 16072.008779761902, 16072.698809523808, 16073.92574404762, 16074.970386904759, 16075.558333333332, 16075.717857142858, 16075.706696428573, 16075.771726190478, 16076.064434523809, 16076.691666666666, 16077.505357142853, 16078.108779761906, 16078.538392857141, 16078.985267857144, 16079.348511904758, 16079.419940476191, 16079.111904761901, 16078.602232142859, 16077.933482142855, 16076.99821428571, 16075.671428571428, 16073.933184523808, 16071.715178571425, 16068.844047619043, 16065.112053571425, 16060.51800595238, 16055.055357142854, 16048.47336309524, 16040.599553571425, 16031.414880952381, 16021.002678571429, 16009.294940476188, 15996.278273809523, 15982.138244047619, 15967.184523809517, 15951.66130952381, 15935.842857142854, 15920.199702380947, 15905.141815476187, 15890.99375, 15877.985565476194, 15866.313392857146, 15856.039583333333, 15847.276190476194, 15840.171577380956, 15834.645833333332, 15830.501041666661, 15827.577976190474, 15825.662648809524, 15824.364285714284, 15823.457886904758, 15822.919345238091, 15822.82708333333, 15823.069791666669, 15823.37425595238, 15823.692559523808, 15824.032589285714, 15824.247023809521, 15824.300148809521, 15824.396875000004, 15824.583482142858, 15824.711309523807, 15824.598958333332, 15824.108928571432, 15823.107142857145, 15821.58720238095, 15819.625297619046, 15817.190625, 15814.27976190476, 15810.929613095233, 15807.198958333332, 15803.180803571428, 15799.015476190478, 15794.925446428573, 15790.963541666666, 15787.054166666672, 15783.059672619049, 15778.915029761907, 15774.63407738095, 15770.05550595238, 15765.184374999993, 15760.114880952382, 15754.81458333333, 15749.046726190474, 15742.773214285713, 15736.12886904762, 15729.418601190471, 15722.847023809521, 15716.523809523807, 15710.873511904765, 15705.793154761906, 15701.137351190475, 15697.322916666666, 15694.651190476185, 15693.199255952377, 15693.038095238093, 15693.988095238092, 15695.870089285709, 15698.384970238092, 15701.011904761903, 15703.755059523808, 15706.652232142857, 15709.307440476188, 15711.579613095235, 15713.512648809523, 15714.993749999996, 15716.001785714281, 15716.709226190475, 15717.346577380953, 15717.874553571426, 15717.790178571426, 15716.598363095234, 15714.226488095239, 15710.636160714283, 15705.861458333333, 15700.136607142857, 15693.372767857145, 15685.288690476185, 15675.981547619049, 15665.960267857141, 15655.904761904756, 15646.35431547619, 15637.286160714284, 15628.529761904761, 15619.91071428571, 15611.047172619048, 15602.060119047619, 15593.31220238095, 15584.684672619049, 15575.787797619048, 15566.3744047619, 15556.415773809524, 15545.891666666661, 15535.11681547619, 15524.658035714287, 15514.942113095238, 15506.050595238097, 15497.778869047623, 15490.134970238092, 15483.323958333336, 15477.672470238094, 15473.454761904759, 15470.903720238091, 15469.851190476185, 15469.802380952382, 15470.372619047617, 15471.341517857141, 15472.745238095236, 15474.508035714283, 15476.520238095238, 15478.577827380946, 15480.406994047617, 15481.874851190478, 15483.023958333339, 15483.868452380953, 15484.541815476185, 15485.122470238091, 15485.482142857141, 15485.458184523808, 15484.82366071428, 15483.460565476185, 15481.40773809524, 15478.860416666661, 15475.836904761903, 15472.279464285715, 15468.151488095236, 15463.406101190474, 15458.138988095234, 15452.720535714283, 15447.456696428568, 15442.41056547619, 15437.5525297619, 15432.722321428568, 15427.936160714284, 15423.275744047616, 15418.68571428571, 15414.370833333338, 15410.505505952377, 15406.773809523802, 15402.997172619049, 15398.954910714283, 15394.281845238093, 15389.011160714283, 15383.147767857141, 15376.825892857145, 15370.403720238091, 15363.880803571423, 15357.11517857143, 15350.309523809517, 15343.55044642857, 15336.98392857142, 15331.057886904762, 15325.897470238093, 15321.509077380953, 15317.861160714281, 15314.716815476186, 15311.972321428568, 15309.6568452381, 15307.687351190474, 15306.094047619043, 15304.994791666668, 15304.29613095238, 15303.803422619043, 15303.340029761906, 15302.745386904759, 15301.81889880952, 15300.193303571425, 15297.764285714284, 15294.724702380952, 15291.115178571428, 15287.088392857138, 15282.981845238097, 15278.945982142852, 15274.955654761898, 15270.831845238097, 15266.741964285715, 15263.125446428567, 15260.07961309524, 15257.398660714282, 15254.789583333328, 15251.897470238098, 15248.336904761907, 15244.21726190476, 15240.051190476193, 15236.287202380952, 15232.975297619043, 15229.889880952376, 15226.900892857142, 15224.10014880952, 15221.633035714289, 15219.552232142858, 15217.620833333327, 15215.30446428571, 15212.095238095237, 15207.845833333333, 15202.51220238095, 15196.39970238095, 15189.93050595238, 15183.031547619048, 15175.538095238093, 15167.471279761907, 15159.295238095234, 15151.474851190473, 15144.3025297619, 15137.889583333332, 15132.299107142857, 15127.347767857138, 15122.719642857139, 15118.446726190474, 15114.81101190476, 15111.892410714281, 15109.480505952382, 15107.39032738095, 15105.236755952375, 15102.51413690476, 15099.015625, 15094.957142857142, 15090.549255952381, 15085.82976190476, 15080.805654761903, 15075.545684523802, 15070.042113095236, 15064.26711309524, 15058.520833333327, 15053.204910714283, 15048.454017857144, 15044.152678571425, 15040.101488095233, 15036.198809523808, 15032.421875, 15028.785119047618, 15025.502380952375, 15022.828422619048, 15020.667708333332, 15018.84345238095, 15017.24479166666, 15015.564583333327, 15013.692857142854, 15011.58244047619, 15009.2025297619, 15006.61383928571, 15003.64449404761, 15000.076041666665, 14996.03645833333, 14991.654315476191, 14987.08556547619, 14982.628571428568, 14978.434672619045, 14974.81220238095, 14971.986904761901, 14969.793303571425, 14968.03333333333, 14966.834821428567, 14966.131398809524, 14965.888690476191, 14966.08452380952, 14966.521279761904, 14967.091517857141, 14967.614434523808, 14967.889732142858, 14968.119642857142, 14968.631696428572, 14969.285267857138, 14969.992857142852, 14970.693452380947, 14971.265773809519, 14971.704761904759, 14972.04508928571, 14972.173511904755, 14972.065029761903, 14971.736755952375, 14971.209672619048, 14970.461904761902, 14969.445833333333, 14968.218749999995, 14966.793452380947, 14965.384226190472, 14963.996130952379, 14962.536160714284, 14961.052380952377, 14959.425297619044, 14957.559374999999, 14955.669642857138, 14953.93645833333, 14952.248660714284, 14950.528869047617, 14948.752529761903, 14946.87872023809, 14944.975892857141, 14942.996428571425, 14940.884672619042, 14938.758630952381, 14936.799553571425, 14935.224553571425, 14934.25580357142, 14933.908928571424, 14933.981398809517, 14934.266220238093, 14934.883184523802, 14936.010119047616, 14937.504910714279, 14939.154464285712, 14940.69985119047, 14941.913541666667, 14942.718005952376, 14943.172916666661, 14943.371130952377, 14943.554910714283, 14943.707142857142, 14943.786309523804, 14943.957142857142, 14944.273958333333, 14944.596875, 14944.736607142855, 14944.636458333327, 14944.322172619042, 14943.863095238092, 14943.348809523804, 14943.015476190474, 14943.023214285713, 14943.497321428567, 14944.605357142857, 14946.651785714283, 14950.022916666667, 14954.962053571426, 14961.474553571425, 14969.501934523805, 14978.991815476185, 14989.72217261905, 15001.360863095235, 15013.675, 15026.503422619044, 15039.338541666666, 15051.609821428567, 15062.941369047614, 15073.093303571422, 15081.845386904759, 15089.130803571428, 15095.026041666666, 15099.550595238097, 15102.660863095238, 15104.318452380947, 15104.92276785714, 15104.779613095237, 15104.058184523808, 15103.194494047617, 15102.399107142848, 15101.657291666661, 15100.890029761904, 15100.11354166666, 15099.501785714283, 15099.099999999999, 15098.774851190474, 15098.559374999997, 15098.459077380947, 15098.275595238096, 15097.976190476187, 15097.571130952376, 15097.12366071428, 15096.68616071428, 15096.241369047619, 15095.64776785714, 15094.762797619043, 15093.749553571426, 15092.883928571428, 15092.28363095238, 15092.003720238092, 15092.09583333333, 15092.423363095235, 15092.889732142854, 15093.643303571425, 15094.953720238093, 15097.083184523804, 15099.905059523811, 15103.148809523807, 15106.781249999996, 15110.589732142857, 15114.457886904762, 15118.827083333326, 15123.969642857139, 15129.436607142858, 15134.841220238091, 15139.809821428571, 15143.994940476185, 15147.238988095234, 15149.274107142854, 15150.333928571425, 15150.543303571429, 15149.620684523808, 15147.441071428573, 15144.096726190475, 15139.849553571423, 15134.962202380953, 15129.715029761903, 15124.487946428568, 15119.7181547619, 15115.450148809521, 15111.682440476185, 15108.337202380953, 15105.355803571429, 15102.956994047618, 15101.138988095237, 15099.86532738095, 15099.017708333333, 15098.277529761901, 15097.448809523808, 15096.735119047617, 15096.449107142855, 15096.73333333333, 15097.610119047617, 15099.086309523802, 15101.105357142855, 15103.576041666665, 15106.674404761898, 15110.70699404762, 15115.669196428567, 15121.150892857142, 15126.750297619043, 15132.472023809521, 15138.602529761903, 15145.231547619049, 15152.438839285713, 15160.251190476189, 15168.437946428567, 15176.823214285716, 15185.542857142851, 15194.846577380944, 15204.808333333336, 15215.28095238095, 15225.778422619045, 15235.759226190474, 15244.852529761903, 15253.017261904759, 15260.461309523807, 15267.19732142857, 15273.014583333335, 15277.644345238095, 15280.874702380952, 15282.77008928571, 15283.687946428567, 15283.965476190475, 15283.896874999999, 15283.688541666663, 15283.28020833333, 15282.627380952375, 15281.80431547619, 15280.972916666664, 15280.339732142851, 15279.797172619046, 15279.14419642857, 15278.563690476189, 15278.110416666666, 15277.65639880952, 15277.291964285712, 15277.317113095238, 15278.048214285713, 15279.67098214286, 15282.175892857142, 15285.527678571423, 15289.60788690476, 15294.201636904761, 15299.301041666664, 15305.191964285712, 15311.89925595238, 15318.904017857136, 15325.772321428572, 15332.224553571425, 15338.219345238094, 15343.872023809523, 15349.362202380953, 15354.945684523806, 15360.710863095237, 15366.252976190472, 15371.295982142858, 15376.108184523808, 15380.916815476188, 15385.931547619046, 15391.37544642857, 15397.277380952379, 15403.65148809523, 15410.406250000002, 15417.230059523808, 15424.077380952376, 15430.991964285713, 15437.848660714284, 15444.599702380952, 15451.14553571428, 15457.228869047614, 15462.62023809524, 15467.19538690476, 15470.87172619047, 15473.783779761901, 15476.109374999996, 15478.013244047614, 15479.626934523809, 15480.843303571426, 15481.581547619046, 15481.905208333332, 15482.012648809521, 15482.065476190475, 15482.160267857138, 15482.513541666665, 15483.509226190472, 15485.189880952385, 15487.441071428568, 15490.34717261905, 15493.736607142851, 15497.245982142857, 15500.683035714286, 15504.095238095235, 15507.722172619046, 15511.470535714283, 15514.864434523803, 15517.697619047616, 15520.068005952377, 15522.240476190475, 15524.771726190473, 15528.252529761903, 15532.693452380947, 15537.55744047619, 15542.36294642857, 15547.076934523811, 15551.896874999997, 15557.043005952379, 15562.348660714284, 15567.543452380953, 15572.474255952378, 15577.006845238095, 15581.523809523813, 15586.640029761898, 15592.49523809524, 15598.944642857145, 15605.539434523806, 15611.970386904759, 15618.483928571428, 15625.096279761898, 15631.759970238094, 15638.380803571428, 15644.58526785714, 15650.035863095238, 15654.679166666667, 15658.595238095231, 15662.116666666667, 15665.42901785714, 15668.414880952381, 15671.009672619053, 15673.09717261905, 15674.69032738095, 15675.909970238097, 15677.109523809524, 15678.770982142854, 15681.160863095238, 15684.243452380953, 15687.970982142857, 15692.332440476188, 15697.127976190475, 15702.112797619046, 15707.380505952382, 15713.052976190475, 15718.867857142857, 15724.42470238095, 15729.474404761904, 15734.052976190475, 15738.367113095237, 15742.63139880952, 15747.048660714283, 15751.725744047619, 15756.429166666663, 15761.013690476193, 15765.626934523809, 15770.5994047619, 15776.143154761903, 15782.000595238094, 15787.841666666665, 15793.558184523805, 15799.000595238093, 15804.162202380952, 15809.154017857138, 15813.960565476187, 15818.293452380953, 15821.75669642857, 15823.979315476188, 15825.071726190472, 15825.279910714286, 15824.48333333333, 15822.715029761903, 15820.042261904757, 15816.592708333332, 15812.405803571428, 15807.830208333335, 15803.066517857142, 15798.28214285714, 15793.801488095238, 15789.654315476188, 15785.889583333326, 15782.644940476193, 15779.844940476185, 15777.246726190471, 15774.670833333332, 15771.804464285715, 15768.576339285715, 15765.240029761904, 15761.935416666664, 15758.858928571428, 15756.189732142855, 15753.768005952375, 15751.318452380952, 15748.723958333332, 15746.142261904759, 15743.570535714283, 15741.043303571425, 15738.378273809523, 15735.165773809522, 15731.20744047619, 15726.469494047617, 15721.284375, 15716.50238095238, 15712.697470238098, 15709.712351190472, 15707.348214285712, 15705.459970238093, 15704.243898809524, 15704.029315476191, 15704.70357142857, 15705.968601190472, 15707.690476190475, 15709.574255952382, 15711.062351190478, 15711.93988095238, 15712.309970238095, 15712.136458333332, 15711.208035714286, 15709.219940476194, 15706.270982142854, 15702.651636904759, 15698.302678571425, 15693.183630952377, 15687.65595238095, 15682.16339285714, 15676.95133928571, 15672.178273809526, 15667.943749999993, 15664.406547619043, 15661.440773809523, 15658.875148809519, 15656.737351190472, 15654.957589285712, 15653.473214285712, 15652.24494047619, 15651.300297619044, 15650.607589285712, 15650.081696428568, 15649.731398809523, 15649.631398809524, 15649.718005952382, 15649.722172619044, 15649.489732142854, 15648.88214285714, 15647.811309523811, 15646.113839285712, 15643.725892857145, 15640.789136904765, 15637.288244047613, 15633.09866071428, 15628.306398809522, 15623.24345238095, 15618.134523809522, 15613.193601190473, 15608.649404761902, 15604.808035714286, 15601.849255952378, 15599.503124999994, 15597.775, 15596.82633928571, 15596.567857142858, 15596.864136904764, 15597.609375, 15598.66041666667, 15599.661904761904, 15600.39107142857, 15600.741369047617, 15600.77425595238, 15600.53467261904, 15600.024553571428, 15599.298809523805, 15598.423660714283, 15597.418749999993, 15596.292113095236, 15595.245386904762, 15594.313541666663, 15593.496874999997, 15592.800446428568, 15592.128720238095, 15591.538541666663, 15591.154166666664, 15591.067410714284, 15591.328124999996, 15592.16011904762, 15593.748065476186, 15595.783333333333, 15598.00238095238, 15600.420833333332, 15602.944791666663, 15605.470833333333, 15608.005803571428, 15610.384970238094, 15612.469494047617, 15614.031101190472, 15614.813541666661, 15615.164732142854, 15615.415327380948, 15615.58125, 15615.917857142855, 15616.607142857136, 15617.290029761903, 15617.886755952377, 15618.75238095238, 15620.11026785714, 15622.017708333333, 15624.231101190477, 15626.527529761903, 15628.999107142854, 15631.50639880952, 15633.879315476195, 15636.25461309524, 15638.875744047617, 15641.504910714284, 15643.6619047619, 15645.322023809522, 15646.669940476188, 15647.905952380946, 15648.953720238093, 15649.750446428567, 15650.294345238095, 15650.719494047615, 15650.891517857139, 15650.753273809521, 15650.565773809518, 15650.396577380947, 15650.315625, 15650.234970238098, 15650.116220238093, 15650.003422619046, 15649.848065476188, 15649.615922619048, 15649.41845238095, 15649.21770833333, 15648.963244047616, 15648.734226190474, 15648.423511904759, 15647.954464285713, 15647.427827380947, 15646.961458333333, 15646.662797619048, 15646.466369047619, 15646.098660714286, 15645.651636904762, 15645.340476190475, 15645.018303571425, 15644.669196428571, 15644.408779761907, 15644.145089285712, 15643.854910714286, 15643.531845238089, 15643.213690476194, 15643.12157738095, 15643.269494047614, 15643.410416666664, 15643.536011904762, 15643.695089285715, 15643.799851190477, 15643.887351190475, 15644.159375, 15645.12782738095, 15646.681994047616, 15648.207291666664, 15649.621874999999, 15651.00758928571, 15652.36830357142, 15653.707291666666, 15655.069345238093, 15656.444494047619, 15657.537499999997, 15657.82857142857, 15657.53363095238, 15657.235565476185, 15656.90550595238, 15656.517708333333, 15656.095238095237, 15655.65595238095, 15655.154166666665, 15654.50342261905, 15653.677380952378, 15652.536755952378, 15650.99851190476, 15649.239583333336, 15647.494047619042, 15645.797321428565, 15643.988839285712, 15641.990178571423, 15639.980654761903, 15638.06086309524, 15636.359374999996, 15635.012648809523, 15634.046577380946, 15633.315029761907, 15632.588095238092, 15631.840624999999, 15631.263690476191, 15630.983779761906, 15630.968601190474, 15631.242857142854, 15631.78794642857, 15632.582738095238, 15633.530059523806, 15634.512053571429, 15635.620535714286, 15636.944791666663, 15638.40982142857, 15640.086309523811, 15641.980505952384, 15643.847321428573, 15645.488988095238, 15646.873214285712, 15648.11577380952, 15649.263541666665, 15650.269940476188, 15651.100892857145, 15651.789434523806, 15652.141666666663, 15652.100595238093, 15651.935267857145, 15651.837053571426, 15651.889434523808, 15652.06845238095, 15652.296130952382, 15652.540773809522, 15652.744345238098, 15652.93244047619, 15653.268452380948, 15653.64732142857, 15653.901934523814, 15654.024107142854, 15654.057142857138, 15654.054315476187, 15654.122916666669, 15654.27306547619, 15654.505059523804, 15654.871130952379, 15655.27991071428, 15655.880952380947, 15656.826339285715, 15657.982440476188, 15659.223511904762, 15660.538988095239, 15661.969791666666, 15663.527232142857, 15665.22217261905, 15667.015922619043, 15668.881547619048, 15670.912797619052, 15673.194047619045, 15675.724255952382, 15678.45654761905, 15681.31532738095, 15684.194642857145, 15687.138839285712, 15690.30193452381, 15693.700744047619, 15697.205654761903, 15700.50729166666, 15703.48824404762, 15706.136607142857, 15708.668154761903, 15711.222916666664, 15713.582440476188, 15715.640178571426, 15717.246130952382, 15718.36145833333, 15719.181398809522, 15719.92336309524, 15720.593898809519, 15721.349255952378, 15722.055505952381, 15722.561458333332, 15723.140922619048, 15723.911160714284, 15725.005059523808, 15726.423660714288, 15728.00163690476, 15729.648958333328, 15731.41369047619, 15733.275892857144, 15735.18601190476, 15737.187053571428, 15739.28244047619, 15741.357440476193, 15743.179910714283, 15744.823660714283, 15746.55699404762, 15748.373809523813, 15750.029464285717, 15751.377232142851, 15752.40267857143, 15752.99032738095, 15753.091517857141, 15752.576041666665, 15751.340624999999, 15749.480357142857, 15747.116369047622, 15744.340327380947, 15741.356696428571, 15738.384523809527, 15735.61026785714, 15733.24910714286, 15731.322023809527, 15730.001041666666, 15729.790327380953, 15730.767708333333, 15732.602529761903, 15735.139732142858, 15738.360119047615, 15742.102083333328, 15746.299553571429, 15750.799404761901, 15755.249107142854, 15759.644345238092, 15763.832440476188, 15767.566964285716, 15770.867857142854, 15773.79895833333, 15776.331249999996, 15778.619940476188, 15780.770238095238, 15783.072172619046, 15786.153124999999, 15790.284523809523, 15795.325744047619, 15801.1931547619, 15807.802678571428, 15815.081696428575, 15823.05357142857, 15831.70178571428, 15840.95386904762, 15850.676041666664, 15860.649553571422, 15870.375892857135, 15879.773958333333, 15888.996279761901, 15898.070684523807, 15907.151190476194, 15916.204166666666, 15924.756250000002, 15932.484821428567, 15939.354464285712, 15945.474702380952, 15951.239434523804, 15956.639732142858, 15961.333184523808, 15965.452529761904, 15969.01845238095, 15971.86532738095, 15974.235714285714, 15976.494791666666, 15978.750148809526, 15980.937202380956, 15982.980952380953, 15984.981994047619, 15987.360416666666, 15990.05669642857, 15992.935267857141, 15996.274851190477, 16000.180357142857, 16004.43020833333, 16008.81324404762, 16013.184523809523, 16017.356250000003, 16021.195535714289, 16024.593898809524, 16027.654761904765, 16030.46175595238, 16032.833333333332, 16034.63244047619, 16035.91071428571, 16036.790178571428, 16037.434226190477, 16038.136755952377, 16039.10907738095, 16040.279166666664, 16041.521726190475, 16042.781696428567, 16044.237797619044, 16046.029613095234, 16048.215922619043, 16050.818601190475, 16053.757291666665, 16056.805357142857, 16059.75163690476, 16062.483333333335, 16065.013988095234, 16067.426785714286, 16069.717113095234, 16072.012351190475, 16074.185714285712, 16076.13958333333, 16077.884970238101, 16079.44970238095, 16081.071428571428, 16083.011011904764, 16085.328720238094, 16088.042559523808, 16091.337351190477, 16095.293898809517, 16099.990625000002, 16105.552232142853, 16112.012946428571, 16119.325446428571, 16127.203273809526, 16135.358184523806, 16143.671726190474, 16152.012053571429, 16160.032886904764, 16167.451785714284, 16174.242857142857, 16180.223214285717, 16185.181249999996, 16188.989136904762, 16191.577678571432, 16192.726488095232, 16192.093601190474, 16189.486755952385, 16184.947023809524, 16179.028124999999, 16172.11354166666, 16164.429910714287, 16156.438839285713, 16148.337499999994, 16140.350744047613, 16132.968601190478, 16126.59806547619, 16121.507589285715, 16117.89151785714, 16115.356994047617, 16113.48958333333, 16112.075595238095, 16110.741071428572, 16109.56666666666, 16108.733779761906, 16108.128571428573, 16107.57232142857, 16106.815029761901, 16105.562499999996, 16104.110119047617, 16102.81220238095, 16101.399851190474, 16099.972172619046, 16098.75044642857, 16097.577827380956, 16096.36994047619, 16095.093601190474, 16093.773660714283, 16092.591369047617, 16091.299851190477, 16089.683035714283, 16088.178125000002, 16086.881101190475, 16085.476190476187, 16083.873809523808, 16082.197470238094, 16080.712202380955, 16079.63883928571, 16078.932886904759, 16078.482142857141, 16078.243898809524, 16078.10729166666, 16077.928571428572, 16077.750446428567, 16077.69345238095, 16077.744642857144, 16077.917708333336, 16078.215624999997, 16078.574702380953, 16079.08303571428, 16079.69389880952, 16080.402678571425, 16081.34375, 16082.44806547619, 16083.5994047619, 16084.809226190471, 16086.10773809524, 16087.496726190471, 16089.05818452381, 16090.669791666664, 16092.244791666668, 16093.868898809522, 16095.492410714278, 16097.077083333335, 16098.519047619046, 16099.625892857144, 16100.376190476185, 16100.815773809518, 16100.809226190471, 16100.275595238094, 16099.366071428567, 16098.077380952385, 16096.398065476185, 16094.545386904765, 16092.815625000001, 16090.942410714286, 16088.214285714286, 16084.675446428568, 16080.911755952382, 16077.246726190471, 16073.765476190472, 16070.433184523808, 16067.256249999999, 16064.159226190475, 16061.00148809524, 16058.200446428573, 16056.392559523805, 16055.515476190474, 16055.113095238092, 16054.987946428573, 16055.031845238098, 16055.192559523808, 16055.410714285716, 16055.633928571426, 16055.905952380946, 16056.30327380952, 16057.020386904758, 16058.006845238095, 16059.06741071428, 16060.314732142855, 16061.811309523813, 16063.400148809522, 16064.994791666666, 16066.682440476185, 16068.57976190476, 16070.607738095237, 16072.568601190475, 16074.464136904757, 16076.420684523806, 16078.17544642857, 16079.54345238095, 16080.775148809524, 16082.113839285712, 16083.53467261905, 16084.581547619046, 16084.903571428573, 16084.409375, 16083.478125000001, 16082.514136904756, 16081.427976190475, 16080.479613095236, 16079.556994047616, 16078.011011904764, 16076.143303571425, 16074.513988095237, 16073.147767857135, 16072.19032738095, 16071.430357142852, 16070.622470238091, 16069.912648809519, 16069.038244047617, 16068.06220238095, 16067.50431547619, 16067.031845238093, 16066.408035714283, 16065.971875, 16065.677083333332, 16065.394196428564, 16065.115624999999, 16064.973660714286, 16065.149255952376, 16065.767410714287, 16066.788244047617, 16068.17113095238, 16069.963095238092, 16072.200297619043, 16074.991815476187, 16078.179166666663, 16081.376934523809, 16084.417857142855, 16087.135714285716, 16089.19181547619, 16090.62663690476, 16091.524851190474, 16091.815178571425, 16091.519047619046, 16090.603571428574, 16089.152678571429, 16087.525297619046, 16086.01398809524, 16084.723809523808, 16083.812351190478, 16083.35982142857, 16083.178273809524, 16083.128720238092, 16083.25595238095, 16083.501190476185, 16083.751041666663, 16083.938095238098, 16084.067261904758, 16084.115327380952, 16083.988839285712, 16083.682738095235, 16083.635863095236, 16084.019642857142, 16084.564583333333, 16085.008928571428, 16085.417708333327, 16085.986607142857, 16086.571875, 16087.187351190478, 16087.943303571425, 16088.726190476187, 16089.14226190476, 16089.14732142857, 16088.896279761904, 16088.597023809523, 16088.433482142857, 16088.257142857141, 16088.022172619043, 16087.65788690476, 16087.064434523809, 16086.30357142857, 16085.403869047617, 16084.388839285712, 16083.414136904761, 16082.559970238095, 16081.701785714284, 16080.81532738095, 16079.967559523802, 16079.329166666661, 16078.955654761903, 16078.669642857141, 16078.455059523805, 16078.451339285717, 16078.61220238095, 16078.852232142854, 16079.263244047623, 16079.846428571429, 16080.59613095238, 16081.391517857144, 16082.28482142857, 16083.43125, 16084.803571428567, 16086.344791666666, 16088.013839285712, 16089.814583333333, 16091.740178571425, 16093.810267857141, 16095.84300595238, 16097.700595238091, 16099.283928571425, 16100.556547619042, 16101.763392857141, 16102.888392857145, 16103.930654761902, 16104.918154761903, 16105.725297619043, 16106.380505952378, 16107.014285714284, 16107.737351190475, 16108.576934523806, 16109.530357142858, 16110.46875, 16111.311458333332, 16111.963244047616, 16112.396130952382, 16112.74122023809, 16112.996726190475, 16113.090624999997, 16112.935714285715, 16112.40833333333, 16111.293898809527, 16109.462648809524, 16107.143601190475, 16104.610416666663, 16102.025744047614, 16099.338095238092, 16096.513541666665, 16093.812946428572, 16091.491369047613, 16089.751636904755, 16088.89404761905, 16088.873511904758, 16089.237351190472, 16089.697767857144, 16090.068452380947, 16090.446279761898, 16090.952976190478, 16091.474851190478, 16091.962202380953, 16092.40714285714, 16092.621279761903, 16092.704761904759, 16093.080059523809, 16093.924404761903, 16095.25193452381, 16096.927380952382, 16098.80074404762, 16100.907291666663, 16103.28363095238, 16105.801339285716, 16108.591815476191, 16111.738095238095, 16114.943898809524, 16118.015476190472, 16120.901041666668, 16123.590922619045, 16126.093154761904, 16128.30401785714, 16130.174999999996, 16131.759374999994, 16132.945982142859, 16133.68586309524, 16134.213541666666, 16134.842410714284, 16135.639434523806, 16136.55744047619, 16137.632886904765, 16138.802380952378, 16139.893303571429, 16140.992559523807, 16142.307440476188, 16143.96294642857, 16146.046726190472, 16148.507589285713, 16151.438839285713, 16154.944642857145, 16159.043898809523, 16163.784226190475, 16169.211904761903, 16175.450744047617, 16182.427083333332, 16189.792857142855, 16197.123214285717, 16204.1712797619, 16210.829910714287, 16217.040327380953, 16222.811904761906, 16228.260714285709, 16233.291517857142, 16237.449404761901, 16240.673363095237, 16243.166369047616, 16244.898809523807, 16245.690922619044, 16245.462648809522, 16244.070982142854, 16241.334374999997, 16237.33095238095, 16232.408035714283, 16226.87098214285, 16220.78467261905, 16214.273363095233, 16207.677827380956, 16201.340624999999, 16195.45238095238, 16190.250000000002, 16185.991964285713, 16182.738095238095, 16180.574255952377, 16179.934077380953, 16181.18645833333, 16184.486904761901, 16189.756696428572, 16196.438690476194, 16204.154464285715, 16212.694345238093, 16221.720982142851, 16230.99598214286, 16240.214136904759, 16248.883333333339, 16256.571875, 16262.99270833333, 16268.036160714284, 16272.055208333333, 16275.420833333332, 16278.350148809524, 16281.088392857144, 16283.552976190478, 16285.591964285715, 16287.210119047619, 16288.527083333332, 16289.856696428567, 16291.439136904759, 16293.128273809521, 16294.581250000001, 16295.927380952382, 16297.127529761903, 16298.097916666664, 16299.100744047617, 16300.396428571428, 16302.219642857144, 16304.300744047616, 16306.014434523808, 16307.561309523808, 16309.403571428567, 16311.269940476188, 16313.06294642857, 16314.90997023809, 16316.78913690476, 16318.488988095236, 16319.667708333327, 16320.324107142855, 16320.942261904762, 16321.575148809523, 16322.06086309524, 16322.471726190475, 16322.824702380953, 16323.18095238095, 16323.501785714287, 16323.801041666666, 16324.153869047617, 16324.52083333333, 16324.894940476193, 16325.233184523806, 16325.559523809521, 16325.771874999999, 16325.89494047619, 16326.089583333332, 16326.39181547619, 16326.822916666657, 16327.163392857135, 16327.260714285712, 16327.150148809522, 16326.938839285713, 16326.679464285715, 16326.604464285712, 16326.79508928571, 16327.095386904764, 16327.605208333332, 16328.431249999996, 16329.596279761909, 16331.158035714287, 16333.159970238095, 16335.650446428568, 16338.591815476188, 16341.89226190476, 16345.660267857142, 16349.721577380944, 16353.655059523811, 16357.488541666666, 16361.352083333333, 16365.37991071428, 16369.479017857144, 16373.36294642857, 16376.975892857141, 16380.220386904755, 16382.883482142857, 16385.120238095235, 16387.218154761907, 16389.033482142855, 16390.584077380947, 16391.82098214286, 16392.705059523807, 16393.4900297619, 16394.37336309524, 16395.523214285713, 16396.91264880952, 16398.33779761905, 16399.73244047619, 16401.104315476194, 16402.433779761905, 16403.821726190472, 16405.298809523814, 16406.627529761903, 16407.471726190473, 16407.79330357143, 16407.762648809523, 16407.45520833333, 16406.781398809522, 16405.71145833333, 16404.295535714282, 16402.660565476188, 16400.873363095237, 16399.064732142855, 16397.476190476187, 16396.050446428573, 16394.62604166667, 16393.295684523808, 16392.25625, 16391.687648809522, 16391.795684523815, 16391.998363095234, 16391.994791666664, 16392.282738095237, 16392.795833333334, 16393.461607142854, 16394.439732142855, 16395.382886904757, 16396.386755952386, 16397.744642857142, 16398.985416666663, 16400.224851190476, 16401.67872023809, 16402.964732142853, 16404.04226190476, 16404.902232142853, 16405.49166666667, 16406.17559523809, 16406.792559523812, 16406.86175595238, 16406.621428571427, 16406.37172619047, 16406.223660714284, 16406.136607142853, 16406.13511904762, 16406.082291666666, 16405.965327380953, 16405.94330357143, 16405.946726190472, 16405.83958333333, 16405.359970238096, 16404.629464285714, 16404.03318452381, 16403.774107142857, 16404.036904761902, 16405.187648809522, 16407.03616071428, 16409.049404761903, 16411.091666666664, 16413.271726190476, 16415.608333333334, 16417.75997023809, 16419.206696428566, 16419.63869047619, 16418.831547619047, 16416.67142857143, 16413.290178571428, 16409.23839285714, 16404.850892857143, 16400.045386904756, 16395.229464285712, 16390.85476190476, 16386.998660714282, 16383.481845238091, 16380.187202380952, 16377.276041666666, 16374.879017857143, 16372.667113095235, 16370.556845238094, 16369.013095238091, 16367.937499999996, 16366.806101190472, 16365.526339285716, 16364.360714285709, 16363.499702380952, 16362.837202380953, 16362.264880952376, 16361.929910714287, 16361.786904761904, 16361.46889880952, 16360.943154761904, 16360.494494047622, 16360.22261904762, 16360.009970238094, 16359.765922619043, 16359.497172619049, 16359.203125, 16358.958928571423, 16358.801785714286, 16358.74523809524, 16358.700297619045, 16358.606696428573, 16358.399702380953, 16358.191666666666, 16358.116220238093, 16358.053869047619, 16358.016071428565, 16357.911309523804, 16357.618601190474, 16357.141666666666, 16356.607291666665, 16356.15535714286, 16355.813095238094, 16355.479315476194, 16355.064732142855, 16354.719196428567, 16354.503422619046, 16354.412648809524, 16354.46875, 16354.58333333333, 16354.638095238095, 16354.47083333333, 16354.175892857147, 16354.008928571428, 16353.98958333333, 16354.110714285709, 16354.388839285712, 16354.607738095234, 16354.666964285712, 16354.67901785714, 16354.664583333333, 16354.72380952381, 16354.811755952385, 16354.73095238095, 16354.473660714284, 16353.96294642857, 16353.10014880952, 16352.102529761902, 16351.165029761909, 16350.262202380956, 16349.435863095237, 16348.576488095237, 16347.479910714283, 16346.099255952382, 16344.514583333332, 16342.865029761904, 16341.183482142857, 16339.469345238093, 16337.768005952381, 16336.143898809521, 16334.558928571429, 16333.079166666666, 16331.949404761906, 16331.18586309524, 16330.506250000002, 16329.643154761909, 16328.649107142859, 16327.606994047617, 16326.449851190475, 16325.157886904764, 16323.821130952381, 16322.417261904768, 16320.765178571426, 16318.894345238097, 16317.072470238092, 16315.413541666663, 16313.865922619043, 16312.379613095236, 16310.78630952381, 16308.994494047616, 16306.993303571428, 16304.815476190475, 16302.626190476187, 16300.464136904759, 16298.153720238095, 16295.820238095237, 16293.464285714283, 16290.917559523805, 16288.36294642857, 16285.928869047617, 16283.604017857144, 16281.418452380953, 16279.364732142858, 16277.468601190474, 16275.853273809524, 16274.383482142854, 16273.081101190475, 16272.045238095237, 16271.150595238092, 16270.298660714287, 16269.51904761905, 16268.785267857142, 16267.933779761903, 16267.002529761903, 16266.111755952381, 16265.294047619049, 16264.39330357143, 16263.365773809524, 16262.253422619044, 16261.013244047617, 16259.424851190472, 16257.379761904756, 16255.065625, 16252.55282738095, 16249.862500000001, 16247.000446428567, 16244.127529761903, 16241.366964285715, 16238.763690476191, 16236.274107142854, 16233.948958333332, 16231.859375, 16229.742113095239, 16227.20669642857, 16224.189136904759, 16220.843452380952, 16217.057589285712, 16212.808184523808, 16208.127529761907, 16203.059226190477, 16197.824851190475, 16192.68869047619, 16187.99419642857, 16184.012946428571, 16180.599404761904, 16177.615624999997, 16175.178720238093, 16173.238839285712, 16171.768601190472, 16170.846875000001, 16170.335416666665, 16170.04732142857, 16169.82395833333, 16169.629613095238, 16169.523958333333, 16169.32976190476, 16168.9681547619, 16168.543005952377, 16168.069642857145, 16167.518005952375, 16166.88184523809, 16166.134523809524, 16165.248809523808, 16164.240476190475, 16163.005654761906, 16161.643303571429, 16160.330505952381, 16158.981398809523, 16157.531398809524, 16156.017708333333, 16154.417857142855, 16152.721428571424, 16151.073065476194, 16149.600148809524, 16148.434226190471, 16147.525297619048, 16146.678869047613, 16145.885119047616, 16145.170238095243, 16144.442708333338, 16143.824702380953, 16143.402380952382, 16142.993750000001, 16142.456845238095, 16141.75014880952, 16140.935714285712, 16140.041964285718, 16139.342261904761, 16139.223363095243, 16139.81145833333, 16140.97916666666, 16142.484821428572, 16144.182589285716, 16145.999404761906, 16147.938095238093, 16150.027380952379, 16152.287351190475, 16154.472619047614, 16156.212351190477, 16157.413244047619, 16158.125148809524, 16158.541517857142, 16158.883779761909, 16159.183630952384, 16159.435565476191, 16159.58616071428, 16159.487202380953, 16159.143005952385, 16158.555654761907, 16157.734226190474, 16156.761904761901, 16155.78244047619, 16154.759226190474, 16153.611904761901, 16152.373809523811, 16150.968601190474, 16149.528422619045, 16148.291666666666, 16147.329166666668, 16146.619047619048, 16146.045386904758, 16145.311755952376, 16144.422767857144, 16143.443749999997, 16142.26711309524, 16140.90044642857, 16139.337053571428, 16137.428273809524, 16135.092261904767, 16132.234672619046, 16128.942410714284, 16125.41413690476, 16121.497470238095, 16117.433333333332, 16113.56190476191, 16109.973363095234, 16106.933482142857, 16104.45223214286, 16102.412797619043, 16100.943452380952, 16099.911011904758, 16099.283482142851, 16099.313988095242, 16099.747172619049, 16100.244791666666, 16100.809523809521, 16101.231547619049, 16101.42232142858, 16101.525595238098, 16101.449553571429, 16101.044047619049, 16100.099999999997, 16098.548363095235, 16096.70684523809, 16094.663839285713, 16092.318005952378, 16089.692261904758, 16086.811011904758, 16083.684821428573, 16080.378869047623, 16077.188244047617, 16074.248958333335, 16071.567410714284, 16069.058184523808, 16066.792857142857, 16064.979613095236, 16063.589285714286, 16062.392857142855, 16061.451636904765, 16060.907291666668, 16060.596726190475, 16060.469345238098, 16060.567410714286, 16060.92619047619, 16061.375892857139, 16061.67202380952, 16061.846279761903, 16062.241517857141, 16062.67767857143, 16062.842261904758, 16062.685267857141, 16062.384672619048, 16062.234821428572, 16062.039732142859, 16061.581845238095, 16061.129017857142, 16060.86220238095, 16060.586458333333, 16060.369791666666, 16060.446428571426, 16061.029761904761, 16061.998214285712, 16062.951190476188, 16063.922470238094, 16065.198065476188, 16066.601190476193, 16067.859821428576, 16069.089583333332, 16070.299107142855, 16071.30357142857, 16071.838988095236, 16071.874107142858, 16071.336160714289, 16070.079017857144, 16068.28110119048, 16066.188541666663, 16063.96101190476, 16061.49970238095, 16058.649107142859, 16055.42648809524, 16051.957291666664, 16048.302529761906, 16044.715029761901, 16041.506547619048, 16038.673065476187, 16036.105059523808, 16033.845535714292, 16032.12142857143, 16031.121279761903, 16030.833333333341, 16031.218898809519, 16032.17619047619, 16033.48273809524, 16034.944047619048, 16036.423511904764, 16037.762946428571, 16038.775892857142, 16039.37961309524, 16039.601339285713, 16039.60282738095, 16039.45535714286, 16039.223214285717, 16039.166071428574, 16039.237648809521, 16039.432291666666, 16039.880505952382, 16040.553124999999, 16041.540922619046, 16042.934374999999, 16044.73943452381, 16046.913988095243, 16049.315773809521, 16051.716071428567, 16054.126190476194, 16056.500148809524, 16058.767857142857, 16060.970982142862, 16062.881547619048, 16064.274851190477, 16065.155357142858, 16065.67678571429, 16066.129613095238, 16066.615178571425, 16067.029017857141, 16067.4193452381, 16067.845982142855, 16068.291815476188, 16068.821428571431, 16069.599851190475, 16070.68764880952, 16071.98110119048, 16073.320684523811, 16074.704910714283, 16076.34866071429, 16078.299702380953, 16080.420238095237, 16082.554910714287, 16084.505357142858, 16086.127083333335, 16087.293005952382, 16087.870535714283, 16087.79970238095, 16087.111011904764, 16085.718005952385, 16083.594047619048, 16081.237202380953, 16079.165029761902, 16077.554910714283, 16076.319791666669, 16075.253273809523, 16074.383630952385, 16073.759375000001, 16073.36026785714, 16073.48005952381, 16074.536011904764, 16076.466220238091, 16079.138690476191, 16082.773511904761, 16087.787648809524, 16094.54851190476, 16103.017708333335, 16113.024404761907, 16124.371726190471, 16136.516666666661, 16148.782440476194, 16160.674107142851, 16171.730208333336, 16181.695386904765, 16190.38407738095, 16197.59955357143, 16203.483035714284, 16208.406845238094, 16212.81815476191, 16217.318601190478, 16222.271577380958, 16227.654166666665, 16233.765327380946, 16240.954166666663, 16249.155654761904, 16258.252827380953, 16268.213392857144, 16278.731249999999, 16289.337797619051, 16299.59375, 16309.281994047617, 16318.507886904761, 16327.229166666666, 16335.115922619045, 16342.138095238095, 16348.54880952381, 16354.344047619048, 16359.521279761904, 16364.30744047619, 16369.026785714286, 16373.740476190475, 16378.255952380952, 16382.53556547619, 16386.567410714288, 16389.98556547619, 16392.43913690476, 16393.93511904762, 16394.380357142858, 16396.181249999998, 16401.49613095238, 16407.029017857145, 16410.15744047619, 16411.146874999995, 16410.076041666667, 16407.431994047613, 16404.03720238095, 16400.808482142853, 16398.61130952381, 16395.040327380953, 16387.66607142857, 16379.789732142854, 16374.074702380956, 16370.04241071428, 16367.605505952379, 16366.721279761903, 16366.69345238095, 16366.531101190474, 16365.537797619045, 16363.693452380952, 16361.12767857143, 16357.805952380953, 16353.976488095237, 16349.975446428567, 16345.950446428571, 16341.921279761906, 16337.84583333333, 16333.633482142857, 16329.382291666665, 16325.196577380953, 16321.18273809524, 16317.495089285712, 16314.145982142854, 16311.141220238094, 16308.59375, 16306.858184523808, 16306.330059523814, 16307.18973214286, 16309.411904761904, 16312.796428571426, 16316.997172619043, 16321.962648809527, 16327.80610119048, 16334.24107142857, 16340.702232142858, 16346.62782738095, 16351.735416666666, 16355.979761904759, 16359.398363095237, 16362.032589285713, 16364.064583333338, 16365.466815476193, 16366.097767857138, 16366.154017857138, 16365.960714285713, 16365.67857142857, 16365.404166666664, 16365.325892857141, 16365.686904761906, 16366.437500000002, 16367.387202380947, 16368.476785714283, 16369.70654761905, 16371.10044642857, 16372.602976190472, 16374.180208333333, 16375.839880952382, 16377.497916666665, 16378.871279761901, 16379.97738095238, 16381.072470238088, 16382.310565476193, 16383.665625000001, 16385.050744047618, 16386.501190476192, 16387.94925595238, 16389.209077380954, 16390.233184523808, 16391.061160714286, 16391.654910714282, 16391.915773809527, 16391.830505952377, 16391.35892857143, 16390.402678571427, 16389.041369047616, 16387.576785714282, 16386.268303571425, 16385.06339285714, 16383.768154761907, 16382.62916666667, 16381.80223214286, 16380.898214285715, 16379.873065476191, 16379.00491071428, 16378.474107142858, 16378.16383928571, 16377.823660714277, 16377.408482142855, 16377.08125, 16376.725297619045, 16376.215922619043, 16375.824851190477, 16375.72142857143, 16375.777380952379, 16375.686755952382, 16375.466815476191, 16375.396726190478, 16375.454613095235, 16375.441964285716, 16375.210714285713, 16374.792410714283, 16374.364434523808, 16373.878273809521, 16373.284226190475, 16372.626488095235, 16371.859970238093, 16370.929613095239, 16369.929315476187, 16369.067261904758, 16368.486904761903, 16368.216369047619, 16368.121279761901, 16368.063244047617, 16368.029166666669, 16368.045535714284, 16367.97306547619, 16367.727529761903, 16367.286458333336, 16366.513988095237, 16365.354315476188, 16363.822321428568, 16361.945833333333, 16359.909226190475, 16357.779315476191, 16355.468452380952, 16353.087053571422, 16350.578869047622, 16348.032738095235, 16345.622172619049, 16343.198958333332, 16340.67619047619, 16338.190029761903, 16335.752976190472, 16333.378720238095, 16331.153571428567, 16329.033779761901, 16327.16413690476, 16325.568154761904, 16324.168898809523, 16323.092261904761, 16322.366517857141, 16321.85952380952, 16321.503869047614, 16321.306845238094, 16321.381249999999, 16321.780357142858, 16322.393005952375, 16323.129464285712, 16324.033630952381, 16325.18407738095, 16326.63035714285, 16328.271874999999, 16329.96532738095, 16331.748660714286, 16333.656101190474, 16335.59895833333, 16337.53095238095, 16339.45744047619, 16341.261011904759, 16342.868601190477, 16344.167113095236, 16345.135267857144, 16345.89330357143, 16346.280059523806, 16346.104166666668, 16345.298809523805, 16343.709821428567, 16341.576785714286, 16339.240327380952, 16336.52470238095, 16333.345982142857, 16329.89449404762, 16326.205952380953, 16322.415773809522, 16318.68125, 16315.139434523813, 16312.076488095237, 16309.379464285716, 16306.677380952377, 16304.145535714284, 16302.045386904761, 16300.282291666666, 16298.876339285713, 16297.763095238091, 16296.836607142854, 16296.083928571434, 16295.426190476188, 16294.790922619048, 16294.36056547619, 16294.1556547619, 16293.973511904756, 16293.87842261905, 16294.092559523811, 16294.827529761906, 16296.05119047619, 16297.705059523809, 16299.88005952381, 16302.433035714283, 16305.246726190475, 16308.404017857141, 16311.955654761907, 16315.753720238092, 16319.437797619046, 16322.715476190475, 16325.466220238095, 16327.72738095238, 16329.495238095236, 16330.785863095243, 16331.483184523811, 16331.425000000001, 16330.636011904762, 16329.230654761906, 16327.363541666664, 16325.20357142857, 16322.94657738095, 16320.68095238095, 16318.437053571426, 16316.28556547619, 16314.43333333333, 16312.981994047617, 16311.788988095233, 16310.744196428574, 16309.912797619048, 16309.42529761905, 16309.04613095238, 16308.295684523808, 16307.126339285713, 16305.603571428568, 16303.457291666666, 16300.662946428567, 16297.440029761901, 16293.840773809523, 16289.780952380952, 16285.216517857141, 16280.489583333332, 16276.003273809521, 16271.781547619048, 16267.880803571425, 16264.600892857141, 16262.058184523808, 16260.197916666666, 16259.012797619043, 16258.438392857135, 16258.160267857138, 16257.757142857145, 16257.160863095238, 16256.445089285713, 16255.575892857147, 16254.501339285714, 16253.146874999999, 16251.514136904761, 16249.597023809521, 16247.30059523809, 16244.867857142857, 16242.892857142857, 16241.394345238095, 16239.876041666663, 16238.092410714284, 16236.116220238095, 16234.049851190475, 16231.89136904762, 16229.647619047619, 16227.4431547619, 16225.40178571428, 16223.391369047617, 16221.508928571431, 16220.292559523808, 16220.025297619046, 16220.414434523813, 16221.185714285715, 16222.171875, 16223.356101190477, 16225.014136904761, 16227.050744047623, 16229.11800595238, 16231.069791666665, 16232.579910714287, 16233.104910714283, 16232.521874999997, 16230.833779761904, 16227.94389880952, 16223.624851190474, 16217.397916666663, 16209.264285714284, 16199.552529761904, 16188.508482142859, 16176.631696428572, 16164.617410714283, 16153.007440476187, 16142.148958333335, 16132.280803571426, 16123.68303571428, 16116.700297619043, 16111.252827380955, 16106.980952380945, 16103.51577380952, 16100.252083333333, 16096.715773809521, 16092.822023809518, 16088.429315476185, 16083.28556547619, 16077.360416666663, 16070.743898809527, 16063.560565476191, 16056.222172619046, 16049.08943452381, 16042.48526785714, 16036.569494047617, 16030.866369047619, 16025.15491071428, 16019.854017857142, 16015.315029761903, 16011.475595238095, 16008.139136904765, 16005.013690476188, 16001.879017857142, 15998.533928571425, 15995.082440476188, 15992.081994047616, 15989.842708333335, 15988.323958333327, 15987.451488095237, 15987.194940476185, 15987.562946428567, 15988.68333333333, 15988.062053571426, 15983.639732142852, 15978.849999999997, 15976.300892857138, 15975.580803571434, 15976.509374999996, 15978.734523809519, 15981.607589285715, 15984.263541666665, 15986.032886904759, 15989.41116071429, 15996.66741071428, 16004.351041666665, 16009.783630952381, 16013.442261904758, 16015.559672619049, 16016.310119047619, 16016.385714285712, 16016.778273809523, 16018.080952380948, 16020.351488095233, 16023.356845238095, 16027.073065476188, 16031.475297619043, 16036.42723214286, 16041.636904761901, 16046.850595238093, 16052.008779761907, 16057.195386904761, 16062.438839285709, 16067.615476190478, 16072.714732142857, 16077.686160714286, 16082.381547619043, 16086.61696428571, 16090.165773809524, 16092.662797619048, 16093.701190476188, 16093.020386904762, 16090.78363095238, 16087.401636904759, 16083.097916666664, 16077.907440476187, 16071.911607142858, 16065.34211309524, 16058.703571428572, 16052.570238095233, 16047.276339285716, 16043.084523809524, 16039.858928571422, 16037.334523809519, 16035.470238095235, 16034.25267857143, 16033.611309523809, 16033.423065476187, 16033.502232142857, 16033.763839285712, 16034.125297619046, 16034.25148809524, 16033.948363095235, 16033.340922619045, 16032.470535714283, 16031.326190476188, 16029.989285714286, 16028.498214285715, 16026.853720238092, 16025.125595238093, 16023.287648809519, 16021.486904761903, 16019.92827380952, 16018.457738095234, 16016.962797619042, 16015.416964285718, 16013.876190476185, 16012.419494047617, 16011.009970238092, 16009.619642857144, 16008.385119047616, 16007.208184523803, 16006.050892857143, 16005.051636904758, 16004.207440476188, 16003.55416666666, 16002.988392857138, 16002.415178571426, 16001.852976190474, 16001.25684523809, 16000.541964285712, 15999.878273809518, 15999.427529761904, 15999.083928571428, 15998.733184523808, 15998.47306547619, 15998.404315476188, 15998.393898809523, 15998.297916666663, 15998.208482142853, 15998.369047619046, 15998.787351190475, 15999.252678571424, 15999.683482142855, 16000.244196428574, 16000.87470238095, 16001.387946428571, 16001.843154761904, 16002.363541666666, 16002.659821428568, 16002.176785714284, 16000.717410714286, 15998.419642857141, 15995.515327380945, 15992.140029761895, 15988.439732142857, 15984.591517857141, 15980.641220238093, 15976.596428571424, 15972.657886904759, 15969.060267857138, 15965.79345238095, 15962.720089285709, 15959.809523809523, 15956.870089285709, 15953.568898809519, 15949.797916666663, 15945.545535714284, 15940.918005952377, 15936.04032738095, 15931.042261904759, 15926.18303571428, 15921.680654761903, 15917.46220238095, 15913.661904761904, 15910.553422619048, 15907.988988095234, 15905.809374999993, 15903.969940476187, 15902.388244047617, 15901.105505952377, 15900.010714285718, 15898.89851190476, 15897.751190476187, 15896.324404761897, 15894.428124999997, 15892.286458333327, 15890.246279761901, 15888.356547619043, 15886.537946428567, 15884.765178571422, 15883.064136904764, 15881.45610119047, 15879.940922619046, 15878.696428571422, 15877.814136904759, 15877.19985119047, 15876.678422619045, 15876.177232142854, 15875.761755952377, 15875.38288690476, 15875.015922619048, 15874.576041666665, 15873.86919642857, 15872.803869047613, 15871.50595238095, 15870.245684523805, 15869.101488095233, 15867.997916666665, 15866.854761904759, 15865.464732142851, 15863.781696428572, 15862.030505952382, 15860.416517857144, 15859.069345238095, 15857.731547619045, 15856.211607142854, 15854.641071428567, 15853.066666666666, 15851.543452380953, 15850.238392857147, 15849.136309523803, 15848.194642857141, 15847.451488095237, 15846.988988095238, 15846.935267857138, 15847.14925595238, 15847.489732142858, 15847.975297619045, 15848.63630952381, 15849.453869047617, 15850.403422619043, 15851.388541666664, 15852.240476190478, 15852.833035714284, 15853.177083333332, 15853.291369047614, 15853.199553571425, 15853.00238095238, 15852.667410714283, 15852.175595238092, 15851.587351190474, 15850.888690476191, 15850.180952380948, 15849.449107142855, 15848.569791666669, 15847.481994047614, 15845.988244047616, 15844.034523809521, 15841.734374999996, 15839.109226190478, 15836.191517857138, 15832.925744047616, 15829.180803571426, 15825.018898809521, 15820.727678571428, 15816.735119047617, 15813.246428571423, 15810.20163690476, 15807.518601190472, 15805.401636904759, 15804.011309523808, 15803.406994047615, 15803.645535714286, 15804.661011904758, 15806.141517857139, 15807.68898809524, 15809.287053571428, 15810.923214285709, 15812.531696428565, 15813.793154761906, 15814.487648809523, 15814.824107142855, 15814.933035714283, 15814.936160714284, 15815.130654761904, 15815.394642857142, 15815.34300595238, 15815.07008928571, 15814.728273809524, 15814.411755952382, 15814.167857142855, 15813.93675595238, 15813.729017857144, 15813.438690476185, 15812.861904761901, 15812.126190476187, 15811.511160714283, 15810.96443452381, 15810.446726190477, 15810.031547619048, 15809.703422619046, 15809.35907738095, 15808.858630952382, 15808.26383928571, 15807.78705357143, 15807.599255952377, 15807.76577380952, 15808.415624999996, 15809.467708333332, 15810.766071428567, 15812.31770833333, 15814.00119047619, 15815.887351190471, 15817.868303571428, 15819.65446428571, 15821.211755952376, 15822.376785714283, 15822.763988095237, 15822.38958333333, 15821.717559523808, 15820.971279761903, 15820.208184523804, 15819.436160714284, 15818.718452380956, 15818.220386904759, 15818.043154761901, 15818.401785714286, 15819.52157738095, 15821.349107142854, 15823.354761904759, 15825.056101190477, 15826.320982142857, 15827.094196428572, 15827.658482142855, 15828.096726190477, 15828.065476190477, 15827.482440476193, 15826.402083333327, 15824.824107142857, 15823.049107142857, 15821.514434523808, 15820.377678571429, 15819.558184523805, 15818.737946428568, 15817.784375000005, 15817.041815476188, 15816.551785714284, 15816.186458333335, 15816.010863095236, 15815.932291666662, 15815.76904761905, 15815.487499999996, 15815.157440476187, 15814.75044642857, 15814.230208333336, 15813.455208333335, 15812.431994047616, 15811.31264880952, 15810.08407738095, 15808.698660714286, 15807.15550595238, 15805.519940476188, 15803.813988095237, 15802.102380952376, 15800.430208333339, 15798.703720238093, 15796.960119047619, 15795.206696428573, 15793.427678571428, 15791.61770833333, 15789.799404761903, 15788.016517857139, 15786.234523809524, 15784.458184523808, 15782.625595238094, 15780.730654761903, 15778.747470238095, 15776.801785714286, 15774.938541666663, 15773.102678571428, 15771.279315476191, 15769.400595238092, 15767.558630952379, 15765.680357142857, 15763.771577380952, 15762.133630952374, 15760.766369047615, 15759.44806547619, 15758.275595238092, 15757.374999999995, 15756.664285714283, 15756.169494047614, 15755.935714285712, 15756.072321428574, 15756.603422619042, 15757.249851190478, 15757.916369047616, 15758.610267857139, 15759.17931547619, 15759.59747023809, 15760.002380952381, 15760.289732142854, 15760.298065476194, 15759.94032738095, 15759.240922619048, 15758.201934523808, 15756.836309523807, 15755.357886904761, 15753.954613095235, 15752.727529761903, 15751.743898809524, 15750.996874999993, 15750.474999999999, 15750.291220238092, 15750.416369047616, 15750.890327380946, 15751.629613095238, 15752.313541666663, 15752.824851190475, 15753.036160714286, 15752.741369047619, 15752.063392857139, 15751.106994047619, 15749.859672619043, 15748.357738095237, 15746.610714285713, 15744.710119047619, 15742.764136904765, 15740.796874999996, 15738.795833333332, 15736.686309523811, 15734.508928571422, 15732.527083333332, 15730.948065476188, 15729.790327380948, 15728.854464285712, 15728.061011904761, 15727.45669642857, 15727.006845238095, 15726.77157738095, 15726.94583333333, 15727.436309523808, 15727.926339285712, 15728.156845238098, 15728.142857142862, 15728.167708333332, 15728.463839285709, 15728.981398809523, 15729.58154761905, 15730.209077380952, 15730.951190476188, 15731.96800595238, 15733.175595238097, 15734.497023809521, 15735.867113095233, 15737.395684523804, 15738.957886904764, 15740.420535714284, 15741.961309523807, 15743.72633928571, 15745.827529761904, 15748.14032738095, 15750.77395833333, 15753.752976190472, 15756.63095238095, 15758.769047619051, 15760.066517857138, 15760.740773809526, 15760.764434523808, 15760.105505952377, 15758.457738095238, 15755.674107142855, 15751.775, 15747.015327380946, 15741.999851190474, 15737.11101190476, 15732.469642857139, 15728.019940476188, 15723.530952380952, 15718.90863095238, 15714.54270833333, 15710.586607142854, 15706.955654761903, 15703.63958333333, 15700.602529761903, 15697.877827380948, 15695.344791666666, 15692.966220238095, 15691.006994047624, 15689.518898809523, 15688.197172619048, 15687.050297619051, 15686.18824404762, 15685.504613095234, 15685.115773809526, 15685.460863095233, 15686.97544642857, 15689.775148809524, 15693.787202380956, 15698.897470238098, 15704.911458333336, 15711.654166666664, 15719.022172619045, 15726.873363095237, 15734.869940476185, 15742.632440476193, 15749.9625, 15756.834821428567, 15763.193005952382, 15769.105208333332, 15774.939285714283, 15780.950446428571, 15787.156994047622, 15793.460714285715, 15799.900297619048, 15806.422470238094, 15812.841220238097, 15819.182738095236, 15825.629910714286, 15832.074107142851, 15838.130059523808, 15843.891666666666, 15849.777678571429, 15856.071428571422, 15862.809374999999, 15869.824107142857, 15877.070386904761, 15884.584077380947, 15892.225148809526, 15899.975892857141, 15907.842410714286, 15915.433333333336, 15922.199255952379, 15927.920833333332, 15932.683482142857, 15936.697619047616, 15940.037648809524, 15942.585416666663, 15944.348809523808, 15945.445089285718, 15946.043303571425, 15946.407291666666, 15946.759375, 15947.28095238095, 15948.06175595238, 15949.061458333335, 15950.44166666667, 15952.36279761905, 15954.769642857142, 15957.531845238094, 15960.716666666665, 15964.199851190475, 15967.740773809524, 15971.23779761905, 15974.602529761902, 15978.057142857138, 15981.40699404762, 15984.326636904761, 15986.828124999996, 15988.967708333335, 15990.83392857143, 15992.504166666664, 15994.092559523806, 15995.615178571428, 15996.976934523807, 15998.022470238095, 15998.840327380947, 15999.496428571425, 16000.015327380952, 16000.48794642857, 16000.876488095235, 16001.272172619048, 16001.629017857142, 16001.799107142857, 16001.732589285715, 16001.402678571425, 16000.941071428575, 16000.626041666668, 16000.633035714287, 16000.922023809524, 16001.269047619046, 16001.488392857138, 16001.662500000002, 16001.916666666662, 16002.375148809524, 16003.066369047616, 16003.778869047619, 16004.331101190477, 16004.590476190471, 16004.598511904762, 16004.503571428568, 16004.382440476187, 16004.419196428571, 16004.653422619045, 16004.983928571426, 16005.29895833333, 16005.53511904762, 16005.809672619043, 16006.351934523807, 16007.185714285712, 16008.029464285715, 16008.721279761903, 16009.17529761905, 16009.373660714286, 16009.348214285712, 16009.0462797619, 16008.512946428571, 16007.73720238095, 16006.570535714287, 16004.96889880952, 16003.179166666665, 16001.297470238094, 15999.169196428567, 15996.826190476193, 15994.442559523808, 15992.413541666663, 15990.911755952382, 15989.576041666665, 15988.193601190478, 15986.770089285712, 15985.226041666669, 15983.615476190478, 15982.216815476197, 15981.11875, 15980.099107142858, 15979.031994047617, 15978.172470238093, 15977.91116071428, 15978.132886904761, 15978.723065476188, 15979.863244047616, 15981.550297619044, 15983.486904761909, 15985.480952380947, 15987.695089285713, 15989.961904761903, 15991.818749999999, 15993.130952380947, 15994.177678571425, 15995.063095238098, 15995.585863095233, 15995.765178571428, 15995.85357142857, 15995.97619047619, 15996.156696428567, 15996.384970238094, 15996.69092261905, 15997.196428571431, 15997.832738095234, 15998.631994047619, 15999.767113095237, 16001.070684523807, 16002.359523809524, 16003.594047619052, 16004.655059523811, 16005.512946428571, 16006.192708333332, 16006.598660714284, 16006.725297619045, 16006.537946428572, 16005.923660714287, 16004.92693452381, 16003.637648809521, 16002.06473214286, 16000.194196428567, 15998.219047619046, 15996.355803571429, 15994.678125000002, 15993.191517857147, 15991.741369047619, 15990.348511904758, 15989.27157738095, 15988.592410714284, 15988.388244047617, 15988.804910714287, 15989.61086309524, 15990.466517857141, 15991.267857142851, 15991.991369047617, 15992.803571428567, 15993.781547619052, 15994.724851190478, 15995.48824404762, 15996.016517857144, 15996.22782738095, 15996.241369047619, 15996.224404761906, 15996.210119047619, 15996.250595238093, 15996.288244047622, 15996.333779761906, 15996.439285714287, 15996.601488095237, 15996.864285714284, 15997.247321428576, 15997.866220238093, 15998.769940476188, 15999.967559523811, 16001.580059523805, 16003.623065476188, 16005.896577380956, 16008.28363095238, 16010.611309523809, 16012.878422619046, 16015.302827380956, 16017.65476190476, 16019.743154761907, 16021.617261904765, 16023.34047619048, 16024.945238095239, 16026.411607142854, 16027.817113095238, 16029.349255952378, 16030.776339285712, 16031.680803571428, 16032.233928571428, 16032.629315476188, 16032.677380952378, 16032.195684523811, 16031.471130952385, 16030.88244047619, 16030.381994047619, 16029.976636904761, 16029.795238095237, 16029.988541666666, 16030.449255952379, 16031.012202380953, 16031.83616071428, 16032.932589285712, 16034.00342261905, 16034.801488095243, 16035.370684523808, 16035.72217261905, 16035.874851190474, 16035.901339285716, 16035.846726190477, 16035.73898809524, 16035.645833333332, 16035.65833333333, 16035.619345238094, 16035.406994047622, 16035.105357142855, 16034.706547619046, 16034.050595238095, 16032.980952380953, 16031.564880952381, 16029.89226190476, 16027.754910714286, 16024.962499999996, 16021.713541666662, 16018.272172619048, 16014.68913690476, 16011.031398809526, 16007.553422619048, 16004.448660714283, 16001.765476190474, 15999.565327380955, 15997.896428571425, 15996.829761904764, 15996.303273809526, 15996.018452380953, 15995.720833333338, 15995.307886904764, 15994.62589285714, 15993.601041666669, 15992.338392857144, 15990.871577380953, 15989.029166666669, 15986.664583333335, 15983.73288690476, 15980.618750000001, 15977.940029761907, 15975.929910714287, 15974.544791666669, 15973.70476190476, 15973.162499999999, 15972.710863095239, 15972.539732142854, 15972.80818452381, 15973.55178571429, 15974.516815476185, 15975.17708333333, 15975.309821428573, 15975.11056547619, 15974.687946428572, 15974.082142857142, 15973.384821428574, 15972.483333333337, 15971.344345238094, 15970.097172619044, 15968.797023809524, 15967.389434523811, 15966.069791666665, 15965.037500000002, 15964.08869047619, 15962.961309523811, 15961.518005952381, 15959.847470238092, 15957.867559523811, 15955.577678571432, 15953.476488095243, 15952.041220238092, 15951.151934523814, 15950.294494047617, 15949.547619047617, 15949.158482142857, 15949.152529761906, 15949.65714285714, 15950.919494047623, 15952.72366071428, 15954.442113095238, 15955.716517857145, 15956.383035714289, 15956.679464285718, 15956.873214285717, 15957.126785714283, 15957.371726190477, 15957.388392857141, 15957.205654761903, 15956.983184523808, 15956.794494047614, 15956.692261904758, 15956.92544642857, 15957.488541666666, 15958.151190476188, 15958.716517857141, 15959.381845238095, 15960.298809523814, 15961.259672619046, 15962.210863095237, 15963.131249999997, 15963.75014880952, 15963.804761904761, 15963.21175595238, 15961.938095238098, 15960.040327380955, 15957.516220238094, 15954.458928571425, 15951.023214285713, 15947.338690476194, 15943.586607142854, 15940.030654761904, 15936.816220238095, 15934.05059523809, 15931.919642857138, 15930.426190476184, 15929.416517857142, 15928.639285714284, 15928.011160714283, 15927.35848214286, 15926.400744047616, 15925.013541666669, 15923.063095238093, 15920.286011904762, 15916.321577380953, 15911.139434523806, 15904.997767857141, 15897.96369047619, 15889.702678571428, 15880.091220238091, 15869.28720238095, 15857.360416666666, 15844.412499999999, 15830.787202380952, 15817.24791666667, 15804.551785714286, 15793.11324404762, 15783.299702380955, 15775.51636904762, 15769.853422619048, 15766.225148809526, 15764.516964285714, 15764.702232142854, 15766.68199404762, 15769.997321428573, 15773.983630952385, 15778.09866071429, 15781.881398809524, 15785.08244047619, 15787.465178571429, 15788.749255952382, 15788.827083333332, 15787.750595238093, 15785.753273809527, 15783.008630952385, 15779.716071428567, 15776.004761904758, 15772.20238095238, 15768.693601190475, 15765.856101190475, 15764.144494047616, 15763.849255952382, 15765.213244047616, 15768.005505952382, 15771.639583333332, 15775.775892857142, 15780.19166666667, 15784.690476190477, 15789.150446428568, 15793.537797619052, 15797.787499999999, 15801.830357142857, 15805.180357142854, 15807.496428571429, 15809.012648809527, 15809.476339285715, 15808.37693452381, 15805.308630952382, 15800.077678571428, 15792.813839285714, 15783.531398809526, 15772.256994047617, 15759.32261904762, 15745.184821428567, 15730.630654761904, 15716.70818452381, 15704.500892857144, 15694.806845238094, 15687.847321428573, 15683.594345238094, 15682.02202380953, 15683.075446428567, 15686.60773809524, 15692.275297619051, 15699.17693452381, 15706.188541666663, 15712.32633928572, 15716.309374999997, 15716.779315476191, 15712.696726190477, 15703.66711309524, 15689.96994047619, 15672.114285714284, 15650.61011904762, 15625.92351190476, 15598.891220238093, 15570.737053571424, 15542.6099702381, 15515.579166666666, 15490.807589285716, 15468.984672619048, 15449.807589285716, 15432.655357142854, 15417.25669642857, 15403.507440476196, 15391.477083333328, 15381.055952380955, 15372.292559523814, 15365.004761904762, 15358.008333333333, 15350.03988095238, 15340.445238095239, 15328.990773809524, 15315.566220238092, 15300.013988095237, 15281.787053571428, 15260.441666666671, 15235.917559523814, 15208.70744047619, 15179.72455357143, 15149.927232142858, 15120.023809523811, 15090.27321428571, 15060.92410714286, 15032.720386904759, 15006.304613095237, 14981.821279761907, 14959.315625000003, 14938.819345238098, 14920.587351190474, 14904.734672619048, 14891.33392857143, 14880.600148809524, 14872.115922619043, 14864.679613095239, 14856.884672619048, 14847.549107142857, 14835.536160714286, 14820.210267857145, 14801.60044642857, 14780.106994047619, 14756.317410714286, 14730.68169642857, 14703.775446428574, 14676.47023809524, 14649.835267857141, 14624.928869047617, 14603.118303571428, 14585.298511904764, 14571.360267857139, 14560.622470238095, 14552.350595238095, 14546.256696428576, 14542.518452380953, 14541.197321428568, 14542.086607142859, 14544.94613095238, 14548.922470238094, 14552.821130952376, 14555.88407738095, 14558.073511904762, 14559.112202380953, 14558.176785714284, 14554.48377976191, 14547.520089285716, 14537.143154761907, 14523.557738095242, 14507.212053571433, 14488.639880952385, 14468.199255952382, 14445.853869047622, 14421.903422619045, 14397.015922619046, 14371.61279761905, 14345.987648809523, 14320.492410714283, 14295.35431547619, 14270.937797619048, 14247.848511904762, 14226.730357142857, 14208.108184523813, 14192.098511904764, 14178.596279761907, 14167.30773809524, 14157.924255952381, 14150.087648809524, 14143.163690476187, 14136.39077380952, 14129.247023809525, 14121.141517857144, 14111.809672619045, 14101.37916666667, 14089.86428571429, 14077.36607142857, 14063.953869047622, 14049.784523809523, 14035.402678571429, 14021.477827380953, 14008.286458333332, 13995.981547619045, 13984.374851190478, 13973.639583333335, 13964.371874999999, 13956.790922619048, 13950.757291666669, 13945.876339285714, 13941.528273809521, 13937.028869047623, 13932.134672619048, 13927.05818452381, 13921.926339285712, 13916.22306547619, 13909.453422619052, 13901.454910714283, 13892.104017857147, 13881.480654761906, 13869.641666666668, 13856.636011904759, 13842.510863095235, 13827.18720238095, 13810.758184523807, 13793.366517857141, 13775.074255952382, 13756.018601190475, 13736.593750000005, 13717.255803571425, 13698.656101190474, 13681.525, 13666.237202380955, 13652.915476190472, 13641.574404761906, 13632.252976190477, 13624.790773809524, 13618.931398809527, 13614.445833333333, 13611.117708333335, 13608.594791666666, 13606.233928571426, 13603.538392857145, 13600.356696428571, 13596.560267857147, 13592.040625000001, 13586.75610119048, 13580.920982142858, 13574.832738095243, 13568.49866071429, 13562.044047619049, 13555.831547619046, 13550.038988095239, 13544.530208333335, 13539.312648809526, 13534.52053571429, 13530.236904761907, 13526.22440476191, 13522.152232142855, 13518.042559523814, 13514.040625, 13510.229166666666, 13506.736607142857, 13503.654910714286, 13500.830357142862, 13498.099702380956, 13495.634523809527, 13493.710714285718, 13492.323660714283, 13491.407738095237, 13490.713392857142, 13489.79330357143, 13488.372470238095, 13486.544047619043, 13484.595982142857, 13482.727083333333, 13480.812797619052, 13478.734523809524, 13476.670982142854, 13474.709970238095, 13472.907142857144, 13471.471428571429, 13470.47961309524, 13469.76904761905, 13469.271726190475, 13468.954910714283, 13468.741964285713, 13468.733482142858, 13468.991815476193, 13469.327232142858, 13469.631547619047, 13469.811458333335, 13469.690773809527, 13469.235714285714, 13468.633333333333, 13468.002529761901, 13467.275, 13466.281547619046, 13464.979761904759, 13463.513690476188, 13461.91473214286, 13460.396874999999, 13459.255505952378, 13458.67172619048, 13458.545535714286, 13458.761755952379, 13459.479910714286, 13460.697767857144, 13462.137351190475, 13463.72157738095, 13465.755952380956, 13468.366517857145, 13471.314880952383, 13474.28556547619, 13477.166666666666, 13479.791517857144, 13482.014880952376, 13484.006845238093, 13485.928720238093, 13487.701636904765, 13488.904315476193, 13489.21964285714, 13488.88988095238, 13488.208630952377, 13487.202827380954, 13486.017410714283, 13484.730059523808, 13483.228422619046, 13481.613839285716, 13480.096726190475, 13478.83154761905, 13477.95744047619, 13477.501636904764, 13477.534523809521, 13478.25907738095, 13479.710565476187, 13481.746130952382, 13484.163095238095, 13486.780208333335, 13489.50297619048, 13492.250148809524, 13494.85357142857, 13497.195982142854, 13499.39404761905, 13501.483482142858, 13503.384077380955, 13505.001190476189, 13506.419047619045, 13507.745089285716, 13509.065773809523, 13510.556994047614, 13512.309375000003, 13514.496874999997, 13517.068452380947, 13519.870089285718, 13522.883035714287, 13526.37693452381, 13530.492113095239, 13535.419940476193, 13541.221130952375, 13547.747321428573, 13555.179910714287, 13563.50848214286, 13572.36235119048, 13581.191815476193, 13589.449553571429, 13596.763244047619, 13602.93869047619, 13607.664880952381, 13610.90818452381, 13612.791071428574, 13613.21919642857, 13612.105803571434, 13609.639136904761, 13606.223809523812, 13602.366220238095, 13598.387797619045, 13594.556398809518, 13591.122023809527, 13588.309821428573, 13586.377380952383, 13585.415625, 13585.323065476188, 13585.929613095239, 13587.226636904765, 13589.133779761903, 13591.520684523812, 13593.924851190475, 13595.86949404762, 13597.097470238095, 13597.441220238095, 13596.877827380953, 13595.28973214286, 13592.842559523811, 13589.91860119048, 13586.57157738095, 13582.692708333336, 13578.476488095239, 13574.192261904764, 13569.80550595238, 13565.262648809527, 13560.430505952381, 13555.645386904762, 13551.245386904762, 13546.85773809524, 13542.70163690476, 13539.584970238093, 13538.147023809524, 13538.583333333336, 13540.789583333333, 13544.747470238097, 13550.555505952381, 13558.071279761903, 13566.994345238096, 13576.897916666663, 13587.01830357143, 13596.370833333332, 13604.124999999996, 13609.975148809524, 13613.824255952382, 13615.544642857147, 13615.056994047616, 13612.40044642857, 13607.773809523811, 13601.911607142854, 13595.819494047619, 13590.36026785714, 13586.757142857145, 13586.398809523811, 13590.460863095233, 13599.512946428573, 13613.394940476193, 13631.665773809524, 13653.713541666666, 13678.86220238095, 13706.335565476193, 13735.111309523809, 13764.054613095239, 13792.095386904759, 13818.062499999996, 13841.152232142857, 13861.477529761907, 13879.566517857147, 13895.867113095239, 13910.784523809527, 13924.244791666668, 13936.290625, 13946.94940476191, 13956.414880952381, 13965.778273809525, 13976.102976190476, 13987.965029761903, 14001.42752976191, 14016.368601190476, 14032.70505952381, 14050.757291666663, 14070.873660714284, 14093.315029761903, 14117.741666666669, 14143.249553571432, 14169.367708333333, 14195.683035714283, 14221.922470238093, 14247.997767857145, 14273.500892857144, 14298.034672619046, 14321.384970238092, 14343.265625000002, 14363.584821428567, 14382.129761904762, 14398.370535714286, 14412.079166666666, 14423.327232142863, 14432.494494047616, 14440.513839285712, 14448.675595238092, 14458.309077380953, 14470.633333333333, 14486.267857142857, 14505.089732142855, 14526.74895833334, 14550.751934523805, 14576.456101190477, 14603.22619047619, 14630.29523809524, 14656.684970238097, 14681.309523809525, 14702.74553571428, 14720.199107142851, 14734.004910714286, 14744.890178571428, 14753.489880952378, 14760.089285714288, 14764.556101190477, 14766.825148809517, 14767.074255952388, 14765.647619047622, 14763.488392857138, 14761.612648809527, 14760.67604166667, 14760.643452380953, 14761.64970238095, 14764.617857142854, 14770.200148809521, 14778.819047619048, 14790.824999999993, 14805.922172619052, 14823.214732142857, 14841.795386904765, 14861.099255952378, 14881.114136904762, 14901.813690476189, 14922.821875, 14943.935565476188, 14964.986309523809, 14985.548958333333, 15005.346428571434, 15024.223214285712, 15042.038988095232, 15058.507291666669, 15073.264583333332, 15086.099999999997, 15096.856994047623, 15105.75074404762, 15113.063541666663, 15119.187946428574, 15124.733630952382, 15130.461755952385, 15136.781547619048, 15143.98794642857, 15152.357440476197, 15161.806398809527, 15172.734226190472, 15185.630952380956, 15200.338541666666, 15216.184523809521, 15232.600595238098, 15249.143898809521, 15265.798809523809, 15282.457738095238, 15298.902827380953, 15314.829315476189, 15329.046726190478, 15340.620089285716, 15349.68318452381, 15356.87470238095, 15362.824255952382, 15368.043452380953, 15372.500446428574, 15376.1462797619, 15379.592857142858, 15383.654613095237, 15388.963392857142, 15395.928720238095, 15404.586160714287, 15414.67529761905, 15426.058333333336, 15438.729166666666, 15452.752827380948, 15468.26726190476, 15484.78913690476, 15501.862797619044, 15519.490476190475, 15537.409672619046, 15555.300297619051, 15572.991517857141, 15589.986904761903, 15605.587648809524, 15619.419940476191, 15631.260119047614, 15641.106101190475, 15649.002083333327, 15655.098511904762, 15659.63556547619, 15662.888095238095, 15665.284375000001, 15667.38363095238, 15669.89449404762, 15673.358928571432, 15678.11205357143, 15684.49419642857, 15692.602083333333, 15702.319047619045, 15713.481249999999, 15725.850297619045, 15739.136011904762, 15752.921874999996, 15766.759821428574, 15780.388541666669, 15793.602976190474, 15805.947023809518, 15817.096130952381, 15827.004761904762, 15835.963988095236, 15844.43675595238, 15852.59717261905, 15860.736309523809, 15869.021874999997, 15877.254761904762, 15885.357440476188, 15893.58616071428, 15902.292559523808, 15911.568005952378, 15921.102380952381, 15930.469791666668, 15939.484226190474, 15947.74300595238, 15955.114880952377, 15961.840029761903, 15967.970982142855, 15973.371428571425, 15977.841517857136, 15981.394940476188, 15984.157291666663, 15986.240178571425, 15987.732886904761, 15988.866964285713, 15989.787499999997, 15990.449255952379, 15990.945833333333, 15991.420238095237, 15992.060267857138, 15992.994047619042, 15994.19092261905, 15995.690625, 15997.650892857138, 16000.230803571429, 16003.488095238097, 16007.496428571429, 16012.132142857141, 16017.176785714286, 16022.495684523808, 16028.012797619043, 16033.773809523811, 16039.634226190472, 16045.389732142854, 16050.895982142858, 16055.875446428567, 16060.160416666666, 16063.79627976191, 16066.793898809521, 16069.193005952384, 16071.048065476187, 16072.44508928571, 16073.710565476187, 16075.05892857143, 16076.286607142858, 16077.493601190472, 16079.213839285712, 16081.737053571429, 16085.141517857146, 16089.41011904762, 16094.525297619046, 16100.219345238093, 16106.050297619046, 16111.843601190474, 16117.89449404762, 16124.302678571428, 16130.47663690476, 16136.045684523811, 16141.020238095243, 16145.383928571426, 16148.983928571426, 16151.918898809521, 16154.446874999996, 16156.567708333327, 16158.401339285718, 16160.215922619049, 16162.174999999996, 16164.128720238095, 16165.83720238095, 16167.349999999999, 16168.815476190475, 16170.281547619046, 16171.801339285716, 16173.411755952377, 16174.990476190475, 16176.348363095238, 16177.33645833333, 16177.583333333332, 16176.90773809524, 16175.447172619048, 16173.429166666669, 16171.0318452381, 16168.08199404762, 16164.384523809524, 16159.901636904759, 16154.859077380956, 16149.562797619048, 16144.571428571433, 16140.302678571425, 16136.699255952382, 16133.401636904759, 16130.158779761898, 16127.10595238095, 16124.42574404762, 16122.191815476193, 16120.199999999999, 16118.312351190478, 16116.658779761907, 16115.353869047614, 16114.547172619048, 16114.463541666662, 16115.101785714287, 16116.352380952381, 16118.140476190472, 16120.349404761904, 16122.854166666666, 16125.543750000003, 16128.287053571428, 16130.952232142858, 16133.322619047616, 16135.231994047617, 16136.730654761903, 16137.901339285709, 16138.73601190476, 16139.258779761902, 16139.549107142855, 16139.777976190475, 16140.069345238093, 16140.359672619046, 16140.532738095237, 16140.501785714288, 16140.48779761905, 16140.727678571431, 16141.253273809523, 16141.805505952376, 16142.050744047616, 16141.973363095238, 16141.508779761898, 16140.5875, 16139.481249999999, 16138.521875000002, 16137.475446428567, 16135.966071428571, 16134.00267857143, 16131.718005952382, 16129.362499999996, 16126.99970238095, 16124.651339285716, 16122.612648809523, 16120.860416666663, 16119.149553571426, 16117.759523809522, 16117.184523809521, 16117.506101190475, 16118.756845238098, 16120.966369047619, 16123.976934523807, 16127.696279761907, 16132.112499999996, 16137.118898809522, 16142.565327380953, 16148.054166666663, 16153.141815476185, 16157.734374999996, 16161.868452380948, 16165.51919642857, 16168.692261904758, 16171.418898809521, 16173.532291666663, 16174.987499999996, 16175.929166666665, 16176.550148809521, 16176.932886904759, 16177.026488095235, 16176.92425595238, 16176.783779761901, 16176.656101190472, 16176.700595238097, 16176.984821428567, 16177.311458333332, 16177.686458333332, 16178.131547619045, 16178.655357142858, 16179.34717261905, 16180.246428571425, 16181.342857142858, 16182.579613095237, 16183.857291666664, 16185.048065476194, 16186.136309523808, 16187.065922619046, 16187.755357142858, 16188.107886904756, 16188.159226190475, 16187.971577380958, 16187.407886904764, 16186.575595238095, 16185.638244047623, 16184.718005952385, 16184.077678571428, 16183.859374999996, 16184.020982142854, 16184.630208333336, 16185.75208333333, 16187.128422619051, 16188.739136904756, 16190.751934523802, 16193.316517857147, 16196.662946428572, 16200.655357142858, 16205.068005952377, 16209.843154761904, 16214.797619047617, 16219.730654761897, 16224.70327380952, 16229.687202380952, 16234.376636904764, 16238.362648809523, 16241.368303571433, 16243.565625000001, 16245.118898809524, 16245.984821428565, 16246.285863095238, 16246.257440476193, 16246.025, 16245.622321428567, 16245.185119047617, 16244.86220238095, 16244.729166666666, 16244.650744047616, 16244.436160714286, 16244.156696428567, 16243.944494047619, 16243.64151785714, 16243.13660714286, 16242.50744047619, 16241.770684523808, 16240.834821428567, 16239.576785714286, 16238.023214285713, 16236.235416666672, 16234.256994047619, 16232.004166666664, 16229.42023809524, 16226.599553571434, 16223.573065476188, 16220.18839285714, 16216.623363095234, 16213.054613095237, 16209.388839285715, 16205.838839285718, 16202.819494047617, 16200.582142857142, 16199.243601190477, 16198.859672619043, 16199.451785714286, 16201.031101190474, 16203.458035714284, 16206.687797619052, 16210.68392857143, 16215.256398809524, 16220.13303571428, 16225.088095238092, 16229.980208333338, 16234.627380952381, 16238.818749999997, 16242.539434523807, 16245.804166666663, 16248.480654761901, 16250.626488095231, 16252.38333333333, 16253.756845238095, 16254.789583333333, 16255.710119047617, 16256.903124999999, 16258.591369047623, 16260.400595238092, 16261.59910714286, 16262.140624999995, 16262.486607142851, 16262.784970238092, 16262.960119047613, 16262.790178571428, 16261.959672619045, 16260.022767857141, 16256.709523809526, 16252.458184523808, 16247.986160714288, 16243.26488095238, 16237.795684523808, 16231.498809523806, 16224.472916666664, 16216.942857142854, 16209.348065476184, 16202.059374999999, 16195.296130952376, 16189.258779761902, 16184.078571428572, 16179.69836309524, 16176.122767857143, 16173.23943452381, 16170.820535714283, 16168.845238095235, 16167.271726190475, 16165.949404761906, 16164.827083333332, 16163.83958333333, 16162.839880952379, 16161.980505952373, 16161.315029761903, 16160.84583333333, 16160.810565476193, 16161.203273809526, 16161.825446428567, 16162.602232142854, 16163.494940476188, 16164.341220238095, 16165.138988095234, 16165.953273809524, 16166.774107142854, 16167.368005952381, 16167.615476190473, 16167.682142857142, 16167.577976190474, 16167.164136904761, 16166.410119047616, 16165.376785714281, 16164.067410714284, 16162.445238095233, 16160.550744047616, 16158.524999999996, 16156.267261904759, 16153.66041666667, 16150.633779761907, 16147.230952380947, 16143.535416666675, 16139.503571428568, 16135.032886904759, 16130.048660714283, 16124.699553571425, 16119.100892857141, 16113.332440476188, 16107.459821428567, 16101.639136904758, 16095.97708333333, 16090.50595238095, 16085.29136904762, 16080.535714285717, 16076.432142857142, 16072.927232142854, 16070.193303571425, 16068.291666666666, 16066.998065476191, 16066.22514880952, 16065.945535714283, 16066.145238095238, 16066.761904761908, 16067.642410714281, 16068.714285714283, 16069.927827380952, 16071.065178571425, 16072.050595238092, 16072.961607142857, 16073.700297619049, 16074.236904761903, 16074.538839285715, 16074.485565476196, 16074.049851190477, 16073.161160714284, 16071.761309523808, 16069.883333333335, 16067.487053571424, 16064.556696428564, 16061.245089285716, 16057.596874999994, 16053.66592261904, 16049.602529761907, 16045.38035714286, 16041.097470238092, 16036.945089285715, 16032.866220238095, 16028.987202380953, 16025.393898809521, 16021.90625, 16018.464136904762, 16014.910119047616, 16011.233928571426, 16007.545535714284, 16003.709970238098, 15999.749702380952, 15995.916666666666, 15992.10238095238, 15988.119642857142, 15984.141220238093, 15980.29732142857, 15976.744047619048, 15973.615922619052, 15970.97886904762, 15968.952232142858, 15967.373958333335, 15966.010714285716, 15964.924851190475, 15964.315625, 15964.220982142855, 15964.533333333333, 15965.093303571428, 15965.764285714286, 15966.484226190474, 15967.148809523806, 15967.705952380944, 15967.972767857143, 15967.694047619043, 15966.73630952381, 15965.171279761906, 15963.131845238093, 15960.749107142858, 15958.143303571425, 15955.335714285713, 15952.375892857144, 15949.229017857144, 15946.05223214286, 15943.111607142857, 15940.659077380948, 15938.888392857138, 15937.731101190477, 15936.944047619045, 15936.374553571428, 15936.057738095235, 15936.155208333335, 15936.95625, 15938.563244047617, 15940.71949404762, 15943.02470238095, 15945.040773809522, 15946.665327380953, 15948.153273809527, 15949.680505952381, 15951.512202380953, 15953.51994047619, 15955.429315476187, 15957.171428571426, 15958.96800595238, 15961.079166666666, 15963.559077380956, 15966.489880952382, 15969.782291666666, 15973.213839285712, 15976.419642857141, 15979.418898809521, 15982.298065476187, 15985.059821428571, 15987.638988095232, 15989.957589285712, 15992.037797619045, 15993.88616071428, 15995.531398809519, 15997.027529761903, 15998.393452380953, 15999.538392857145, 16000.438541666663, 16001.015178571422, 16001.284523809523, 16001.21458333333, 16000.731845238095, 15999.941220238095, 15998.97619047619, 15997.935565476191, 15996.818154761904, 15995.606696428571, 15994.34360119048, 15993.219642857144, 15992.410565476188, 15991.934523809517, 15991.804613095237, 15992.007142857141, 15992.460565476187, 15993.094791666666, 15994.063095238093, 15995.532589285713, 15997.415922619048, 15999.572916666662, 16001.937053571428, 16004.528571428571, 16007.36726190476, 16010.413244047617, 16013.572023809524, 16016.944494047619, 16020.324255952379, 16023.413095238093, 16026.359970238093, 16029.312797619052, 16032.106398809523, 16034.657440476187, 16037.192857142858, 16039.71071428571, 16042.323511904759, 16044.875595238093, 16047.428422619045, 16050.185267857138, 16052.964880952382, 16055.598660714291, 16058.177529761904, 16060.840029761897, 16063.337351190472, 16065.614732142854, 16067.64702380952, 16069.476041666665, 16071.013541666665, 16072.102827380953, 16072.781696428565, 16073.122767857141, 16073.182142857142, 16073.001785714287, 16072.534672619046, 16071.789285714283, 16070.922916666666, 16069.923660714283, 16068.922916666666, 16068.179464285713, 16067.79434523809, 16067.720833333335, 16067.833630952384, 16068.01979166667, 16068.293898809527, 16068.655059523806, 16069.077827380952, 16069.590624999999, 16070.143898809527, 16070.64523809523, 16071.095535714292, 16071.571577380948, 16072.173660714287, 16072.985119047617, 16074.149107142852, 16075.758184523806, 16077.812946428567, 16080.380803571425, 16083.445684523813, 16086.938839285718, 16090.746875000003, 16094.863392857142, 16099.311160714284, 16103.756249999997, 16107.927083333332, 16111.833035714284, 16115.426934523803, 16118.566964285712, 16121.231547619049, 16123.494345238094, 16125.41041666666, 16126.990029761904, 16128.227380952381, 16129.371874999997, 16130.627380952381, 16131.814732142857, 16132.652976190477, 16133.120982142851, 16133.334226190475, 16133.41011904762, 16133.417410714283, 16133.434821428567, 16133.619047619046, 16133.909523809523, 16134.194494047617, 16134.559226190475, 16135.34836309524, 16136.798065476187, 16138.831994047616, 16141.10044642857, 16143.446428571428, 16145.803720238093, 16148.070833333333, 16150.394047619051, 16152.823660714283, 16155.365029761906, 16157.781101190478, 16159.79107142857, 16161.476339285713, 16163.194345238095, 16165.089136904759, 16167.081696428573, 16169.162351190475, 16171.434374999999, 16173.895982142854, 16176.389434523806, 16178.946428571431, 16181.598660714284, 16184.194196428572, 16186.492559523807, 16188.550892857142, 16190.622321428571, 16192.547470238089, 16194.021428571428, 16195.105505952382, 16195.84345238095, 16196.182886904759, 16196.29702380952, 16196.36517857143, 16196.51800595238, 16196.67276785714, 16196.622470238095, 16196.533482142855, 16196.58943452381, 16196.723511904758, 16196.92470238095, 16197.090624999993, 16196.990029761904, 16196.420982142858, 16195.293749999999, 16193.514434523808, 16190.834523809524, 16187.154464285715, 16182.558333333336, 16177.24375, 16171.433928571429, 16165.415029761902, 16159.347619047616, 16153.32901785714, 16147.533779761903, 16142.14136904762, 16137.512946428571, 16134.083630952382, 16132.052678571423, 16131.325744047625, 16131.807589285712, 16133.52321428571, 16136.390625000002, 16140.307440476194, 16145.187797619043, 16150.725595238093, 16156.297470238094, 16161.242708333335, 16165.175595238095, 16167.972916666666, 16169.693154761906, 16170.249404761904, 16169.707886904764, 16168.297172619043, 16166.079910714283, 16163.129464285712, 16159.843898809524, 16156.487797619051, 16153.072321428568, 16149.699107142855, 16146.379613095243, 16143.187500000002, 16140.210416666665, 16137.38095238095, 16134.686309523808, 16132.277827380947, 16130.271577380956, 16128.670238095237, 16127.453720238098, 16126.543898809523, 16125.763541666665, 16124.929166666669, 16123.937351190474, 16122.658482142857, 16120.816964285716, 16118.345982142857, 16115.287499999993, 16111.610714285713, 16107.357589285715, 16102.722023809521, 16097.945238095237, 16093.179315476187, 16088.589136904768, 16084.295089285717, 16080.419940476193, 16077.123660714286, 16074.436458333332, 16072.378720238095, 16071.071279761903, 16070.538541666665, 16070.644196428573, 16071.122916666665, 16071.681101190474, 16072.273363095237, 16072.93125, 16073.546428571426, 16074.097023809523, 16074.569940476196, 16074.837648809524, 16074.668154761903, 16074.097023809523, 16073.342857142854, 16072.472916666666, 16071.578273809524, 16070.780208333332, 16070.056994047616, 16069.340476190475, 16068.614880952378, 16067.9625, 16067.505208333332, 16067.15520833333, 16066.868005952381, 16066.678124999993, 16066.400744047616, 16065.76711309524, 16064.543898809523, 16062.440476190475, 16059.178422619043, 16054.68913690476, 16049.280505952385, 16043.312202380952, 16037.031845238094, 16030.614136904764, 16024.311755952382, 16018.562053571426, 16013.69732142857, 16010.040178571428, 16007.931101190472, 16007.386309523808, 16008.15148809524, 16009.934672619045, 16012.319642857145, 16015.107589285715, 16017.999702380952, 16020.578125, 16022.783333333335, 16024.59761904762, 16026.027380952382, 16027.029613095237, 16027.425446428568, 16027.16369047619, 16026.14404761905, 16024.122470238095, 16021.40461309524, 16018.322619047616, 16014.888839285715, 16011.048958333335, 16006.733184523811, 16001.841071428573, 15996.47068452381, 15990.86026785714, 15985.468303571428, 15980.917261904764, 15977.161458333336, 15973.958630952378, 15971.104464285712, 15968.63244047619, 15966.716964285712, 15965.579166666666, 15965.206845238095, 15965.364136904767, 15965.82172619048, 15966.140178571426, 15965.83482142857, 15964.484523809526, 15961.850148809524, 15957.865922619045, 15952.63095238095, 15946.380952380956, 15939.509523809518, 15932.327678571428, 15925.036309523814, 15917.866666666669, 15911.292261904759, 15905.892410714283, 15902.183779761906, 15900.404166666664, 15900.468005952385, 15901.939434523809, 15904.233779761904, 15906.965476190477, 15909.894196428573, 15912.86681547619, 15915.668154761908, 15918.068749999997, 15919.845833333333, 15920.85505952381, 15920.86160714286, 15919.71711309524, 15917.084226190478, 15912.706101190475, 15906.52306547619, 15898.409523809521, 15888.473958333332, 15876.773660714287, 15863.295238095234, 15848.172916666668, 15831.767113095235, 15814.780357142858, 15798.246130952379, 15782.817559523808, 15768.867410714283, 15756.731845238095, 15746.51800595238, 15738.343303571433, 15732.353124999996, 15728.433035714283, 15726.255357142858, 15725.335863095233, 15725.031994047617, 15724.78273809524, 15723.766964285713, 15720.904910714284, 15715.340922619049, 15706.698958333332, 15695.196130952381, 15681.376488095237, 15666.185119047619, 15650.68541666667, 15635.330803571429, 15620.636755952377, 15607.274255952385, 15595.845238095237, 15587.37782738095, 15582.780505952385, 15581.813839285709, 15583.533779761901, 15586.627529761907, 15589.826190476193, 15593.118005952376, 15596.65342261905, 15600.634077380955, 15605.351339285713, 15609.84806547619, 15612.731398809523, 15613.466815476191, 15611.850297619048, 15607.794494047619, 15601.023065476187, 15590.847172619046, 15576.614880952382, 15557.912500000002, 15534.837648809522, 15508.310714285715, 15479.458184523808, 15448.940773809523, 15417.693601190475, 15386.899107142863, 15357.88720238095, 15331.537351190473, 15308.376785714283, 15288.806398809524, 15272.885714285716, 15260.35907738095, 15250.859672619046, 15244.19836309524, 15239.826339285712, 15236.675595238095, 15233.434970238097, 15228.686904761906, 15221.002976190477, 15209.17782738095, 15192.367708333335, 15170.443750000002, 15143.761458333332, 15112.858184523808, 15078.438244047617, 15041.332142857142, 15002.529166666669, 14963.094642857144, 14924.105059523808, 14886.808333333332, 14852.16964285715, 14820.373363095234, 14791.081398809523, 14763.82306547619, 14738.200148809527, 14714.161458333332, 14691.883779761902, 14671.62410714286, 14653.17782738095, 14635.28199404762, 14616.77633928571, 14597.129613095238, 14576.02961309524, 14553.090922619049, 14527.87395833333, 14499.89479166667, 14468.437946428567, 14433.183035714286, 14394.541964285718, 14353.532589285714, 14310.903720238097, 14266.908779761903, 14222.20803571429, 14177.45699404762, 14133.220833333333, 14090.158928571429, 14049.039285714289, 14010.372172619045, 13974.450744047619, 13941.558779761906, 13912.089434523808, 13886.510416666668, 13864.697767857144, 13846.289880952381, 13830.73735119048, 13816.798214285714, 13802.859672619052, 13787.456845238092, 13769.549404761901, 13748.829613095235, 13725.36994047619, 13699.358482142858, 13671.25431547619, 13641.53794642857, 13610.661904761906, 13579.402380952379, 13548.831696428573, 13520.301636904764, 13494.825595238097, 13472.273660714289, 13452.114285714284, 13433.976339285713, 13417.599255952377, 13403.189880952383, 13391.039880952381, 13381.358630952382, 13373.888988095237, 13367.36473214286, 13360.481249999999, 13352.660714285717, 13343.855357142857, 13333.71741071429, 13321.345684523809, 13306.057886904762, 13287.42038690476, 13264.977678571426, 13238.87470238096, 13210.078125, 13179.540625000001, 13147.626190476189, 13114.390178571428, 13080.19523809524, 13045.691964285716, 13011.120238095235, 12976.69538690476, 12942.988392857138, 12910.447321428574, 12879.281696428568, 12849.726339285713, 12822.601339285713, 12798.580803571425, 12777.711607142857, 12759.908333333335, 12744.91294642857, 12732.45014880952, 12722.086309523807, 12713.166071428574, 12705.037499999999, 12697.267113095237, 12689.129464285712, 12679.888392857141, 12669.22544642857, 12656.971875, 12643.209970238098, 12627.894196428573, 12611.0, 12592.959970238098, 12574.323511904764, 12555.205952380953, 12535.94761904762, 12517.157142857144, 12499.204464285713, 12482.512946428573, 12467.59523809524, 12454.51755952381, 12442.800446428579, 12431.750148809524, 12420.713541666666, 12409.394642857143, 12397.462946428572, 12384.610565476189, 12370.81235119048, 12355.869791666666, 12339.388690476193, 12321.487202380955, 12302.550744047621, 12282.70089285714, 12262.127827380953, 12240.976041666669, 12219.376488095239, 12197.40907738095, 12175.030654761904, 12152.354613095238, 12129.568898809523, 12106.822172619046, 12084.579761904763, 12063.484077380952, 12044.091220238097, 12026.880059523806, 12012.051339285716, 11999.519642857143, 11989.172321428567, 11980.852678571428, 11974.27782738095, 11969.17886904762, 11965.153869047623, 11961.746279761905, 11958.341964285715, 11954.454761904763, 11949.671875000002, 11943.677827380956, 11936.403571428571, 11927.786755952378, 11917.794642857141, 11906.50163690476, 11893.995535714286, 11880.308482142857, 11865.83020833333, 11850.98303571429, 11836.349404761902, 11822.68244047619, 11810.391666666668, 11799.476339285713, 11789.654017857143, 11780.551041666664, 11772.076041666669, 11764.400148809522, 11757.476934523811, 11751.253273809525, 11745.426190476192, 11739.444345238095, 11733.049255952383, 11726.360565476189, 11719.85952380952, 11713.962053571431, 11708.845535714288, 11704.563392857144, 11701.00431547619, 11697.97113095238, 11695.533482142857, 11693.857738095236, 11692.861904761905, 11692.419345238091, 11692.330654761907, 11692.422321428574, 11692.516071428572, 11692.466964285715, 11692.186160714286, 11691.583333333336, 11690.369791666668, 11688.076636904763, 11684.436607142854, 11679.304613095237, 11672.741517857141, 11664.971577380953, 11656.169494047617, 11646.629613095238, 11636.651190476196, 11626.352529761903, 11615.984970238096, 11605.976041666665, 11596.749999999998, 11588.616815476187, 11581.491964285715, 11575.229910714286, 11569.845684523809, 11565.100297619047, 11561.214434523812, 11558.695535714287, 11557.59315476191, 11557.808482142857, 11558.945684523806, 11560.564434523814, 11562.574404761905, 11564.872619047619, 11567.201636904761, 11569.403124999999, 11570.790476190477, 11570.715476190475, 11569.112648809525, 11565.977083333335, 11561.774851190481, 11557.22916666667, 11552.759672619044, 11548.584523809524, 11544.831845238095, 11541.469791666666, 11538.702232142858, 11536.724999999997, 11535.664285714285, 11535.644345238097, 11536.408482142857, 11537.481994047617, 11538.503571428571, 11539.209672619045, 11539.443601190473, 11539.222767857143, 11538.40773809524, 11536.607589285713, 11533.393601190477, 11528.605654761905, 11522.408184523814, 11515.049851190477, 11506.74642857143, 11497.778125000003, 11488.579017857142, 11479.534970238094, 11470.927678571428, 11463.071428571428, 11456.353422619044, 11451.161458333336, 11447.318452380952, 11444.487946428573, 11442.515922619048, 11441.322023809527, 11440.977083333331, 11441.521130952382, 11443.050744047618, 11445.572321428574, 11448.922023809528, 11452.711904761907, 11456.942708333336, 11461.885714285716, 11467.596279761907, 11473.88020833333, 11480.36354166667, 11486.948363095236, 11493.407440476189, 11499.786904761906, 11506.353273809524, 11513.305803571428, 11520.753869047618, 11528.4375, 11536.285267857143, 11544.44553571428, 11552.788988095232, 11560.927976190478, 11568.807440476188, 11576.33482142857, 11583.246726190477, 11589.390476190474, 11594.617261904763, 11598.879464285712, 11602.088839285716, 11604.108035714287, 11605.03943452381, 11605.11220238095, 11604.514732142858, 11603.372619047623, 11601.84761904762, 11599.888095238095, 11597.053273809524, 11592.991964285717, 11587.850744047622, 11581.865476190478, 11575.336904761903, 11568.591666666665, 11561.377678571424, 11553.443749999999, 11544.973660714286, 11536.412648809524, 11528.6625, 11522.63556547619, 11518.313392857142, 11515.567857142858, 11514.180059523813, 11513.69955357143, 11514.426488095238, 11516.812053571428, 11520.883035714283, 11526.836458333335, 11535.17529761905, 11546.416369047622, 11561.120238095242, 11579.732440476186, 11602.322916666668, 11628.611904761907, 11657.857738095237, 11689.31175595238, 11721.563095238093, 11754.385714285712, 11789.430059523811, 11827.30699404762, 11867.66830357143, 11909.821726190476, 11953.517113095239, 11998.990624999999, 12046.488095238092, 12095.867708333333, 12147.18348214286, 12198.992410714289, 12248.003125, 12292.169196428571, 12330.88735119048, 12364.237500000001, 12392.259672619046, 12414.981994047619, 12432.677827380952, 12445.906994047622, 12455.715029761905, 12463.246130952379, 12469.429315476193, 12475.035119047618, 12480.716071428571, 12486.88095238095, 12493.71726190476, 12501.388690476191, 12510.089136904762, 12519.895982142854, 12530.706845238095, 12542.54181547619, 12555.305505952381, 12568.734226190474, 12582.498511904765, 12596.38258928571, 12610.480357142855, 12624.827976190478, 12639.288244047619, 12653.673363095237, 12667.909821428564, 12681.9369047619, 12695.59523809524, 12708.78571428571, 12721.44136904762, 12733.369047619048, 12744.346875, 12754.34613095238, 12763.46056547619, 12771.75639880952, 12779.544940476191, 12786.898363095237, 12793.637202380953, 12799.783779761903, 12805.415476190476, 12810.672619047622, 12815.585863095237, 12820.037499999999, 12823.945982142857, 12827.406101190474, 12830.30669642857, 12832.719196428576, 12835.073511904762, 12837.543898809525, 12840.219345238098, 12843.152380952382, 12846.498214285719, 12850.415773809522, 12854.91681547619, 12859.986755952385, 12865.682886904759, 12871.952232142852, 12878.545386904765, 12885.457589285716, 12892.724851190473, 12900.138244047617, 12907.597023809523, 12915.23452380952, 12922.92232142857, 12930.396130952382, 12937.636458333332, 12944.604910714286, 12951.21056547619, 12957.464880952382, 12963.61056547619, 12970.076041666669, 12976.988095238092, 12984.176636904762, 12991.639732142854, 12999.563690476189, 13007.803124999999, 13016.276636904759, 13025.20848214286, 13034.70386904762, 13044.559523809521, 13054.52574404762, 13064.757142857141, 13075.318452380952, 13086.20580357143, 13097.464880952379, 13109.133184523807, 13121.18125, 13133.474255952382, 13145.71949404762, 13157.625297619046, 13169.077678571426, 13179.470238095237, 13188.456547619044, 13196.209374999997, 13202.764583333335, 13208.247767857141, 13212.832291666664, 13216.597767857142, 13219.653571428571, 13222.30625, 13224.78958333333, 13227.413839285715, 13230.434523809521, 13233.917708333336, 13237.886458333332, 13242.312053571426, 13247.319047619047, 13253.257142857141, 13260.31145833333, 13268.359821428574, 13277.405357142854, 13287.41011904762, 13298.122470238095, 13309.1869047619, 13320.522172619043, 13331.984523809524, 13343.394196428568, 13354.700446428571, 13365.894642857142, 13377.121726190471, 13388.390922619048, 13399.724107142854, 13410.978422619046, 13422.010863095236, 13432.958184523804, 13443.880357142858, 13454.628869047618, 13465.011904761905, 13474.870833333336, 13484.197767857142, 13492.982738095234, 13500.931547619042, 13508.244345238096, 13515.354910714283, 13521.95476190476, 13527.772619047617, 13532.809226190475, 13536.929910714282, 13540.004613095238, 13541.883184523811, 13542.587351190476, 13542.497916666665, 13541.806994047616, 13540.54613095238, 13539.001339285713, 13537.494642857138, 13536.132291666663, 13534.964434523805, 13534.160416666666, 13533.650446428568, 13533.275595238098, 13533.03869047619, 13532.941815476188, 13532.954464285713, 13533.05074404762, 13533.254166666666, 13533.615327380947, 13534.209226190475, 13535.03095238095, 13536.130059523804, 13537.340624999997, 13538.41145833333, 13539.280059523808, 13540.023363095233, 13540.64032738095, 13540.99598214286, 13541.062351190478, 13540.768898809521, 13539.803273809524, 13538.549107142855, 13537.71949404762, 13537.145684523812, 13536.339880952377, 13534.949404761905, 13532.899255952385, 13530.152529761905, 13526.777380952379, 13522.838988095235, 13518.667261904757, 13514.150148809518, 13508.912648809524, 13503.258779761907, 13497.715476190471, 13492.588392857138, 13487.92931547619, 13483.922619047617, 13480.599107142854, 13477.917410714283, 13475.904613095234, 13474.515476190469, 13473.879166666664, 13474.460714285718, 13476.907440476185, 13481.681845238092, 13489.0212797619, 13498.639285714286, 13509.88288690476, 13522.08824404762, 13534.445386904765, 13546.463392857138, 13557.776934523808, 13567.78511904762, 13575.78645833333, 13581.11845238095, 13583.416071428574, 13582.975148809524, 13580.392559523805, 13576.436904761906, 13572.083184523804, 13567.58452380952, 13562.869345238092, 13558.211011904761, 13553.782738095235, 13549.95386904762, 13547.125744047615, 13545.402380952382, 13544.816815476184, 13545.250148809519, 13546.412351190473, 13548.191220238095, 13550.475892857141, 13552.793749999999, 13554.747023809521, 13555.924553571425, 13555.95208333333, 13554.658928571429, 13551.959077380952, 13547.869047619048, 13542.462946428568, 13535.972023809518, 13528.788244047617, 13521.351785714287, 13513.959375000002, 13506.840178571425, 13500.160863095238, 13494.05625, 13488.758035714278, 13484.494196428568, 13481.507738095237, 13480.249404761904, 13481.330803571429, 13485.338541666668, 13492.635267857144, 13502.872023809525, 13515.263541666664, 13528.96964285714, 13543.104464285718, 13556.906398809519, 13569.752380952375, 13580.911458333332, 13589.554613095233, 13594.810863095237, 13595.90699404762, 13593.127678571424, 13587.439583333333, 13579.90833333333, 13571.498214285715, 13562.69181547619, 13553.809523809525, 13545.269345238095, 13537.342708333335, 13530.238988095238, 13524.53125, 13520.335714285713, 13517.229910714286, 13514.759375, 13512.781547619046, 13511.415178571428, 13510.516964285707, 13509.763392857145, 13508.702083333332, 13506.91190476191, 13503.833184523804, 13499.17038690476, 13492.976041666663, 13485.285714285712, 13476.101041666667, 13465.472470238095, 13453.699255952382, 13441.059672619045, 13428.037499999993, 13415.19732142857, 13402.962351190477, 13391.716666666665, 13381.892113095237, 13373.868898809524, 13367.931101190477, 13364.596726190477, 13364.555059523813, 13368.30327380952, 13376.027380952379, 13387.459970238098, 13402.019345238097, 13418.945386904761, 13437.200744047619, 13456.026190476185, 13474.626488095237, 13491.984672619048, 13507.252976190475, 13519.706101190477, 13528.657589285709, 13534.12425595238, 13536.745089285712, 13537.22857142857, 13536.475744047619, 13534.795535714286, 13532.448660714283, 13529.81324404762, 13527.03511904762, 13524.760863095235, 13523.90267857143, 13524.733630952382, 13527.07261904762, 13530.266517857144, 13533.352976190477, 13535.767857142857, 13537.201785714284, 13537.529761904761, 13536.779910714286, 13534.415476190477, 13529.368898809524, 13521.903720238091, 13511.226041666667, 13494.82261904762, 13471.675148809523, 13442.210416666667, 13407.105505952379, 13366.679910714282, 13321.03169642857, 13270.625297619043, 13216.637351190475, 13159.846130952383, 13102.494940476185, 13048.81622023809, 13001.781994047622, 12963.110565476189, 12934.031101190474, 12915.583184523808, 12908.457589285712, 12912.450000000003, 12926.490029761904, 12948.70803571428, 12976.91532738095, 13009.087797619048, 13043.275446428568, 13077.544791666669, 13110.282142857144, 13139.82648809524, 13164.89970238095, 13185.121130952377, 13200.872023809525, 13213.169940476193, 13223.110267857144, 13231.341369047617, 13238.240624999999, 13244.065773809521, 13249.223363095238, 13254.85044642857, 13262.073958333336, 13271.434821428571, 13283.223214285716, 13297.407291666663, 13313.822619047616, 13332.544196428567, 13353.906696428574, 13378.15818452381, 13405.081696428573, 13433.832142857143, 13463.306398809518, 13492.504017857142, 13520.541815476188, 13546.505654761906, 13569.797172619043, 13590.462648809522, 13608.503720238097, 13623.817410714286, 13636.255803571428, 13645.869940476188, 13652.899553571428, 13657.816517857149, 13661.222321428568, 13663.919047619045, 13666.642410714287, 13670.102529761903, 13675.215476190475, 13682.339136904759, 13691.450892857141, 13702.301785714286, 13714.671130952385, 13728.427827380952, 13743.168749999999, 13758.238392857143, 13773.022619047617, 13786.715922619045, 13798.330357142857, 13807.401488095235, 13814.139285714284, 13818.90476190476, 13821.905654761904, 13823.553571428572, 13824.325892857141, 13824.416071428574, 13823.983630952382, 13823.218005952382, 13822.506845238095, 13822.461607142857, 13823.554761904761, 13826.217559523806, 13830.65625, 13836.969196428568, 13845.314285714287, 13855.671726190474, 13868.113541666666, 13882.672767857144, 13898.950595238091, 13916.443005952382, 13934.76979166667, 13953.451636904761, 13972.278273809525, 13990.9037202381, 14008.775297619048, 14025.873958333335, 14042.207440476193, 14057.517559523809, 14071.65788690476, 14084.5625, 14095.939732142857, 14105.612053571434, 14113.567559523817, 14119.808482142857, 14124.65119047619, 14128.447619047622, 14131.438095238094, 14134.060416666665, 14136.738244047618, 14139.79851190476, 14143.612797619051, 14148.622619047617, 14155.003124999996, 14162.709523809526, 14171.798214285714, 14182.345238095235, 14194.135416666662, 14206.63898809524, 14219.315178571425, 14231.59464285714, 14242.954910714283, 14253.068601190475, 14262.085565476193, 14270.512499999999, 14278.520535714284, 14285.878273809527, 14292.53214285714, 14298.805654761907, 14305.439732142857, 14312.913392857145, 14321.436904761906, 14331.159523809521, 14342.552232142854, 14355.48080357143, 14368.970684523805, 14383.038095238093, 14398.05104166667, 14414.196428571428, 14431.56398809524, 14450.057291666662, 14469.502529761903, 14489.863392857143, 14510.362053571429, 14530.458333333332, 14550.78556547619, 14571.431845238096, 14592.059077380953, 14612.129017857138, 14630.881994047619, 14647.944494047622, 14663.192559523804, 14676.411011904762, 14687.608928571428, 14697.028422619045, 14704.79107142857, 14710.940922619046, 14715.680357142857, 14719.488839285716, 14722.86324404762, 14726.440922619046, 14730.679464285718, 14735.787499999999, 14742.012946428567, 14749.55699404762, 14758.418005952379, 14768.553571428572, 14779.934970238091, 14792.21532738095, 14805.046279761898, 14817.945089285713, 14830.708779761906, 14843.29285714285, 14855.495833333332, 14867.245833333336, 14878.716071428571, 14889.865773809524, 14900.572470238092, 14910.96532738095, 14921.330803571429, 14931.384374999996, 14940.434226190471, 14948.477976190474, 14955.966517857145, 14962.997767857145, 14969.270386904758, 14974.826190476191, 14979.96875, 14984.668154761906, 14988.42708333333, 14991.530654761911, 14994.626785714283, 14997.753869047618, 15000.495238095236, 15002.65907738095, 15004.420833333335, 15005.755059523808, 15006.370386904762, 15006.355505952379, 15006.09464285714, 15005.888392857138, 15005.904761904765, 15006.150148809524, 15006.669494047623, 15007.561755952382, 15008.933184523808, 15010.768452380953, 15013.114732142858, 15016.188988095237, 15020.20729166667, 15025.007440476187, 15030.374702380956, 15036.336607142854, 15043.012648809521, 15050.168601190471, 15057.448511904764, 15064.89925595238, 15072.4875, 15079.897767857141, 15086.961458333335, 15093.461755952376, 15099.26458333333, 15104.334523809526, 15108.448511904764, 15111.680654761903, 15114.243154761902, 15116.154166666665, 15117.382291666667, 15117.925892857142, 15117.959077380947, 15118.005952380952, 15118.49553571428, 15119.555505952381, 15121.437500000002, 15124.06398809524, 15127.032291666666, 15130.139434523808, 15133.810267857147, 15138.263392857138, 15142.925148809521, 15147.473958333336, 15151.763095238095, 15155.560714285712, 15158.615327380952, 15161.02351190476, 15163.136458333332, 15165.088839285716, 15166.484375, 15167.227083333335, 15167.737500000003, 15168.149404761903, 15168.617261904761, 15169.419494047617, 15170.668154761901, 15172.271130952377, 15174.299851190475, 15177.04776785714, 15180.784970238092, 15185.571875, 15191.38705357143, 15198.298363095237, 15206.211309523811, 15214.814285714283, 15223.974404761904, 15233.794940476188, 15244.250744047617, 15255.057886904759, 15265.887946428571, 15276.488839285716, 15286.692410714286, 15296.095982142857, 15304.45550595238, 15312.045386904761, 15319.033333333333, 15325.45818452381, 15331.471428571429, 15337.320535714289, 15343.213095238088, 15349.411309523808, 15356.166666666666, 15363.80208333333, 15372.601785714289, 15382.37589285714, 15392.724107142858, 15403.271874999993, 15413.74494047619, 15423.801636904758, 15433.117857142857, 15441.552678571425, 15448.999107142858, 15455.418898809523, 15460.736160714288, 15464.954910714283, 15468.322619047614, 15471.101190476187, 15473.334672619048, 15475.20178571428, 15476.919494047619, 15478.530059523808, 15480.130357142853, 15481.89583333333, 15483.802083333332, 15485.632738095237, 15487.504017857142, 15489.6125, 15492.036755952384, 15494.746726190471, 15497.709821428572, 15500.9, 15504.275297619048, 15507.576636904756, 15510.623065476191, 15513.650297619046, 15516.671577380946, 15519.480357142857, 15521.877827380955, 15523.831845238092, 15525.226339285715, 15525.881398809526, 15525.695238095237, 15524.711160714289, 15523.199107142857, 15521.251785714287, 15518.761607142851, 15515.856696428567, 15512.674999999996, 15509.262946428567, 15505.906398809524, 15502.842113095237, 15500.28288690476, 15498.47157738095, 15497.44866071428, 15497.167261904762, 15497.603422619048, 15498.577678571432, 15500.034970238092, 15501.908928571425, 15504.038095238098, 15506.335863095233, 15508.650297619048, 15510.819345238093, 15512.736607142855, 15514.358184523808, 15515.713095238094, 15516.901488095236, 15517.93913690476, 15518.763244047619, 15519.324999999999, 15519.621577380947, 15519.557142857142, 15519.096279761907, 15518.351339285713, 15517.395238095238, 15516.234375, 15514.868154761907, 15513.412797619045, 15511.96964285714, 15510.636755952377, 15509.531249999995, 15508.659523809521, 15508.087053571426, 15507.844345238093, 15507.616517857141, 15507.107142857145, 15506.3025297619, 15505.184821428567, 15503.753125000001, 15501.919196428573, 15499.501934523809, 15496.620386904762, 15493.368154761903, 15489.704017857139, 15485.926636904758, 15482.42336309524, 15479.332886904762, 15476.76770833333, 15474.757142857141, 15473.303720238093, 15472.587946428568, 15472.754017857142, 15473.859970238093, 15475.785267857142, 15478.368452380953, 15481.461160714287, 15484.880505952377, 15488.434672619043, 15492.067261904758, 15495.816666666664, 15499.538244047617, 15503.047470238093, 15506.21175595238, 15509.006547619045, 15511.483035714286, 15513.719345238093, 15515.719047619043, 15517.559523809527, 15519.809523809517, 15523.07470238095, 15527.544642857141, 15532.986904761903, 15539.001934523809, 15545.399553571431, 15552.045386904765, 15558.819345238095, 15565.85788690476, 15573.202380952376, 15580.209970238098, 15586.102827380953, 15590.820833333333, 15594.81071428571, 15598.335119047619, 15601.52782738095, 15604.419345238095, 15606.993601190472, 15609.175297619046, 15610.70520833333, 15611.681696428568, 15612.368452380953, 15612.683630952379, 15612.51994047619, 15612.02232142857, 15611.420684523811, 15611.105357142855, 15611.26532738095, 15611.877827380953, 15613.091666666665, 15614.81755952381, 15616.895089285716, 15619.261755952379, 15621.822619047614, 15624.442261904762, 15626.930059523811, 15629.06517857143, 15630.69181547619, 15631.875297619043, 15632.761904761906, 15633.411755952378, 15633.898363095233, 15634.379315476188, 15634.839732142855, 15635.31175595238, 15635.863839285716, 15636.432589285712, 15636.989880952378, 15637.397172619043, 15637.62470238095, 15637.876041666672, 15638.001190476194, 15637.641964285714, 15636.787499999999, 15635.430208333333, 15633.392857142855, 15630.517559523805, 15626.871874999993, 15622.540178571428, 15617.515773809526, 15611.809970238097, 15605.602380952381, 15599.101488095237, 15592.275148809522, 15585.09880952381, 15577.742410714289, 15570.695238095233, 15564.263244047619, 15558.408928571429, 15552.716964285715, 15546.978571428574, 15541.459077380956, 15536.238095238092, 15530.691517857138, 15524.654166666665, 15518.653273809527, 15512.496130952382, 15505.885267857147, 15498.844494047617, 15491.605059523808, 15484.078125, 15476.237797619044, 15468.438988095237, 15461.395982142854, 15455.41309523809, 15450.14657738095, 15445.63988095238, 15441.955654761907, 15439.14300595238, 15437.376488095237, 15436.676488095243, 15436.718898809519, 15437.14553571428, 15437.886309523808, 15438.772767857141, 15439.56413690476, 15440.11979166666, 15440.481696428573, 15440.643154761903, 15440.509970238092, 15440.287499999997, 15440.213095238092, 15440.449404761906, 15441.221577380953, 15442.780803571426, 15445.268898809523, 15448.460119047611, 15452.086160714283, 15456.316964285712, 15461.302678571428, 15467.288541666669, 15474.305357142859, 15481.60431547619, 15488.36428571429, 15494.178720238093, 15498.697767857142, 15502.121130952382, 15504.78883928571, 15506.8744047619, 15508.173065476194, 15507.69657738095, 15505.314434523809, 15502.16294642857, 15499.106696428567, 15496.397321428572, 15494.645089285716, 15494.190922619051, 15494.602083333333, 15495.559821428573, 15497.61770833333, 15501.519494047616, 15507.201636904761, 15514.029315476186, 15521.606101190471, 15529.758035714287, 15538.010119047614, 15546.003720238095, 15553.957886904758, 15561.687797619046, 15568.522767857145, 15573.980208333332, 15577.593154761904, 15578.810863095234, 15576.965922619045, 15571.517113095235, 15562.239434523804, 15549.545684523806, 15534.148809523813, 15516.769494047616, 15498.116220238095, 15478.992708333333, 15460.195238095233, 15442.477232142859, 15426.659523809521, 15413.731696428566, 15404.398958333324, 15398.263541666665, 15394.561011904756, 15392.544196428571, 15391.421130952382, 15391.085863095237, 15391.740773809519, 15393.318154761906, 15395.599255952378, 15397.867857142857, 15399.360119047622, 15399.815327380953, 15399.232589285712, 15397.732738095237, 15395.45401785714, 15392.006398809519, 15386.762499999999, 15379.605803571423, 15370.880059523804, 15361.36101190476, 15351.740178571428, 15342.257886904756, 15332.71443452381, 15323.146428571428, 15314.134821428568, 15305.90520833333, 15298.644345238092, 15292.51770833333, 15287.454910714288, 15283.143303571425, 15279.313690476185, 15276.042261904764, 15273.726339285715, 15272.41264880952, 15271.619791666666, 15271.180803571428, 15270.917559523805, 15270.534226190477, 15269.805208333335, 15268.711011904767, 15267.321875, 15265.564285714283, 15263.225446428576, 15260.200892857145, 15256.501934523805, 15251.989434523808, 15246.62708333333, 15240.685267857138, 15234.399553571431, 15227.83095238095, 15220.930059523807, 15213.653720238095, 15206.14494047619, 15198.552380952378, 15191.070089285715, 15184.044940476188, 15177.722172619046, 15172.020386904762, 15166.88913690476, 15162.224255952378, 15157.749404761906, 15153.333779761904, 15148.92931547619, 15144.388244047619, 15139.560267857141, 15134.327678571432, 15128.666517857144, 15122.637797619049, 15116.303422619052, 15109.757886904761, 15103.217410714286, 15096.96056547619, 15091.15550595238, 15085.984374999996, 15081.487499999996, 15077.596577380948, 15074.181101190477, 15071.188095238094, 15068.598511904762, 15066.423809523809, 15064.729613095235, 15063.411309523804, 15062.345089285713, 15061.406845238093, 15060.525, 15059.600595238095, 15058.593154761904, 15057.630059523814, 15056.645684523808, 15055.524404761903, 15054.336160714289, 15053.186011904761, 15052.00074404762, 15050.610119047617, 15048.951934523808, 15047.166815476185, 15045.33095238095, 15043.34836309524, 15041.32678571428, 15039.373511904765, 15037.41860119048, 15035.34449404762, 15033.351636904761, 15031.779315476191, 15030.71726190476, 15030.175148809521, 15030.117410714287, 15030.392410714283, 15030.785267857143, 15031.097321428568, 15031.371726190475, 15031.786160714284, 15032.174255952375, 15032.366071428572, 15032.149553571428, 15031.479166666666, 15030.50461309524, 15029.150892857142, 15027.53050595238, 15025.951785714286, 15024.450297619045, 15022.929910714283, 15021.39732142857, 15019.872321428567, 15018.566964285716, 15017.45550595238, 15016.410565476188, 15015.634374999996, 15015.162499999999, 15014.833333333332, 15014.49300595238, 15014.098660714284, 15013.65, 15013.207589285712, 15012.809523809521, 15012.348958333332, 15011.748660714286, 15010.942113095238, 15009.924851190477, 15008.74568452381, 15007.451041666665, 15006.14404761905, 15004.884970238092, 15003.617559523807, 15002.276488095235, 15000.884077380953, 14999.531249999995, 14998.325446428571, 14997.322916666666, 14996.519791666664, 14995.957142857143, 14995.646577380952, 14995.610416666666, 14995.878720238097, 14996.428869047617, 14997.320386904761, 14998.552976190475, 15000.146428571428, 15001.953720238098, 15003.71964285714, 15005.432886904762, 15007.170089285715, 15008.914434523811, 15010.471577380953, 15011.920982142854, 15013.266220238094, 15014.391220238094, 15015.241964285713, 15015.901636904762, 15016.613541666666, 15017.35357142857, 15017.912797619048, 15018.245833333336, 15018.556994047614, 15018.801785714284, 15018.91056547619, 15018.941666666664, 15018.89791666666, 15018.816964285717, 15018.668898809523, 15018.42619047619, 15018.234672619048, 15018.15669642857, 15018.049702380955, 15017.884821428568, 15017.752529761909, 15017.644047619046, 15017.574404761903, 15017.576041666665, 15017.647916666667, 15017.895982142854, 15018.174107142855, 15018.39181547619, 15018.675892857143, 15018.974107142858, 15019.238839285721, 15019.526636904764, 15019.804761904765, 15019.988690476188, 15020.159970238097, 15020.363839285712, 15020.498065476193, 15020.440773809521, 15020.094791666672, 15019.517261904759, 15018.643749999994, 15017.326339285712, 15015.577083333332, 15013.39494047619, 15010.727529761907, 15007.57336309524, 15004.114583333332, 15000.524107142854, 14996.913839285715, 14993.323958333332, 14989.918005952379, 14986.976339285713, 14984.600297619043, 14982.498809523808, 14980.253273809521, 14977.70580357143, 14974.943154761906, 14972.045684523808, 14969.117261904761, 14966.32782738095, 14963.64494047619, 14960.905357142854, 14958.044642857141, 14955.46726190476, 14953.575744047619, 14952.342261904765, 14951.382738095239, 14950.516220238098, 14949.847619047623, 14949.235416666668, 14948.53705357143, 14947.728720238098, 14946.929761904761, 14946.09821428571, 14945.110565476189, 14943.958482142858, 14942.765476190472, 14941.621577380953, 14940.303273809524, 14938.64002976191, 14936.624107142854, 14934.353422619042, 14931.90892857143, 14929.319345238095, 14926.467857142854, 14923.39404761905, 14920.268750000003, 14917.074553571429, 14913.911160714286, 14910.859672619048, 14907.911160714284, 14904.992857142852, 14901.911755952384, 14898.865476190475, 14896.227827380953, 14893.986607142855, 14892.212351190477, 14890.820684523806, 14889.557440476188, 14888.604910714283, 14888.203125, 14888.318005952377, 14888.819047619043, 14889.44508928571, 14889.997619047619, 14890.474999999999, 14890.77470238095, 14890.941369047618, 14891.11607142857, 14891.17648809524, 14891.114136904762, 14891.097767857143, 14891.169047619043, 14891.086011904765, 14890.77321428571, 14890.319345238093, 14889.591369047623, 14888.501785714283, 14886.934672619049, 14884.616071428572, 14881.195535714283, 14876.55669642857, 14870.884523809524, 14864.417857142851, 14857.317261904762, 14849.681696428568, 14841.308630952382, 14832.088095238098, 14822.61369047619, 14813.6619047619, 14805.627827380953, 14798.630803571423, 14792.667113095236, 14787.841517857147, 14783.832440476193, 14780.480357142857, 14778.568005952382, 14778.328720238098, 14779.027678571429, 14779.967410714284, 14780.908928571429, 14781.323363095242, 14780.611904761907, 14778.706845238095, 14775.882291666663, 14771.919345238091, 14766.319791666667, 14759.22142857143, 14751.017708333338, 14742.005654761906, 14732.392113095237, 14722.728422619046, 14713.589434523805, 14705.016220238093, 14697.1875, 14690.564732142857, 14685.524702380955, 14682.377380952385, 14681.634375000001, 14683.949404761906, 14689.829315476189, 14699.389434523811, 14712.084821428576, 14727.12544642857, 14743.609970238093, 14760.626785714283, 14777.366369047619, 14793.19345238095, 14807.223958333332, 14818.471726190477, 14826.147321428567, 14829.861160714287, 14830.03273809524, 14827.241517857145, 14821.680654761907, 14813.342559523808, 14802.13645833333, 14787.849255952382, 14770.580654761903, 14750.391964285718, 14727.182440476194, 14700.915773809522, 14671.499404761904, 14638.972767857147, 14604.038095238093, 14567.618898809522, 14530.430952380953, 14493.074999999999, 14455.722470238097, 14418.425000000005, 14381.351488095233, 14344.917261904762, 14309.834375000002, 14276.655654761904, 14245.659077380955, 14217.04776785714, 14190.971726190475, 14167.44285714286, 14146.572023809527, 14128.622470238091, 14113.758184523807, 14101.722172619046, 14091.885119047625, 14083.67529761905, 14076.250595238094, 14068.569494047619, 14059.255357142854, 14046.950148809525, 14030.49122023809, 14009.00431547619, 13982.315476190477, 13950.682589285712, 13914.626190476189, 13874.894791666666, 13832.384970238098, 13787.989285714284, 13742.913541666669, 13698.352380952381, 13655.449851190471, 13615.121428571429, 13577.406994047617, 13541.903720238091, 13508.07886904762, 13475.265476190474, 13443.28586309524, 13412.35431547619, 13382.878869047623, 13354.929761904765, 13327.90119047619, 13301.124255952376, 13274.347023809518, 13247.626785714287, 13220.98020833333, 13194.330505952383, 13167.41294642857, 13139.766815476189, 13110.913541666669, 13080.940625000003, 13050.656696428574, 13021.104166666668, 12993.11294642857, 12967.104166666666, 12943.354017857144, 12922.108482142863, 12903.454613095237, 12887.495982142857, 12874.27306547619, 12863.68497023809, 12855.448511904764, 12849.01651785714, 12843.871875000003, 12839.750000000002, 12836.414285714283, 12833.67857142857, 12831.437500000002, 12829.436904761906, 12827.38556547619, 12825.143601190475, 12822.472767857143, 12819.243452380955, 12815.486904761907, 12811.014285714284, 12805.704761904764, 12799.705654761907, 12793.084077380956, 12785.841369047617, 12778.176488095238, 12770.31696428571, 12762.344642857144, 12754.204910714283, 12745.892708333333, 12737.641071428574, 12729.774553571431, 12722.237202380948, 12715.050744047629, 12708.448809523808, 12702.147321428572, 12695.819047619048, 12689.697172619048, 12684.056101190481, 12678.959672619047, 12674.303869047622, 12669.740773809524, 12665.076785714284, 12660.22470238095, 12655.233333333335, 12650.53735119048, 12646.378273809521, 12642.70744047619, 12639.472767857143, 12636.552083333332, 12633.858630952382, 12631.490178571428, 12629.663095238095, 12628.41220238095, 12627.565029761912, 12626.94032738095, 12626.441220238095, 12625.98377976191, 12625.515178571426, 12625.080208333335, 12624.81860119048, 12624.789583333335, 12624.883333333333, 12624.844345238094, 12624.51428571429, 12623.879166666671, 12622.954761904759, 12621.787946428572, 12620.50044642858, 12619.13988095238, 12617.613244047614, 12615.794642857147, 12613.612648809521, 12611.303422619043, 12609.171726190478, 12607.259077380955, 12605.498511904761, 12603.813095238094, 12602.027678571429, 12600.100446428572, 12598.030208333334, 12595.897916666669, 12593.785267857147, 12591.59598214286, 12589.109226190474, 12586.21711309524, 12582.92306547619, 12579.16830357143, 12575.091964285712, 12570.963988095235, 12566.793898809525, 12562.37574404762, 12557.687648809526, 12552.886904761906, 12548.142559523814, 12543.740178571434, 12539.846279761909, 12536.453273809524, 12533.43303571429, 12530.558482142857, 12527.906101190478, 12525.674553571425, 12523.885863095236, 12522.443005952382, 12521.277380952382, 12520.243005952385, 12519.260267857142, 12518.457291666664, 12517.870684523814, 12517.518601190477, 12517.566815476193, 12517.95535714286, 12518.601041666669, 12519.569196428572, 12520.77976190476, 12522.09226190476, 12523.506398809526, 12524.977678571428, 12526.542113095242, 12528.108184523811, 12529.228869047616, 12529.92708333333, 12530.375148809524, 12530.607142857141, 12530.641220238094, 12530.42306547619, 12529.894642857147, 12529.180357142857, 12528.312797619046, 12527.257142857145, 12526.257142857141, 12525.421726190478, 12524.69985119048, 12524.09449404762, 12523.686309523813, 12523.834226190478, 12524.874107142854, 12526.79657738095, 12529.513690476191, 12533.035565476192, 12537.227529761907, 12541.897172619048, 12547.056398809524, 12552.566220238095, 12558.348065476192, 12564.2181547619, 12569.789136904761, 12574.84568452381, 12579.665476190477, 12584.594791666663, 12589.706845238097, 12595.032886904764, 12600.436607142854, 12605.683035714284, 12610.570982142855, 12615.07529761905, 12619.332886904764, 12623.431398809524, 12627.009226190472, 12629.822767857147, 12632.077529761904, 12634.05535714286, 12636.037946428572, 12638.47961309524, 12641.704761904764, 12645.783630952385, 12650.766369047622, 12656.582589285717, 12663.28541666667, 12670.821428571428, 12678.993749999996, 12687.737946428573, 12696.920386904761, 12706.17306547619, 12715.30848214286, 12724.404166666669, 12733.27976190476, 12741.628571428573, 12749.325892857145, 12756.558035714286, 12763.709672619052, 12770.807738095236, 12777.733928571432, 12784.629315476197, 12791.755952380952, 12799.007142857141, 12806.433035714283, 12814.432738095235, 12822.849702380956, 12831.369047619048, 12839.596875, 12847.108482142858, 12853.909242724867, 12860.170940806876, 12866.059939531366, 12872.226294406652] +seasonal_data = [0.46366666666666667, 0.4663888888888889, 0.4695238095238095, 0.47208333333333335, 0.47500000000000003, 0.47750000000000004, 0.4833333333333333, 0.4883333333333333, 0.49366666666666664, 0.49866666666666665, 0.5035000000000001, 0.5085, 0.5126666666666668, 0.5175000000000001, 0.5216666666666667, 0.5261666666666667, 0.5303333333333333, 0.5343333333333333, 0.5386666666666667, 0.5426666666666666, 0.5463333333333333, 0.5503333333333333, 0.5543333333333333, 0.5578333333333333, 0.5618333333333333, 0.5653333333333334, 0.569, 0.5746666666666667, 0.586, 0.6028333333333333, 0.6199999999999999, 0.637, 0.654, 0.6711666666666666, 0.6888333333333333, 0.7071666666666666, 0.7254999999999999, 0.7428333333333332, 0.7545, 0.7608333333333334, 0.7636666666666667, 0.7661666666666667, 0.7673333333333333, 0.7661666666666667, 0.7645000000000001, 0.7601666666666667, 0.7550000000000001, 0.749, 0.7416666666666667, 0.734, 0.729, 0.7225, 0.7171666666666667, 0.7133333333333333, 0.7073333333333334, 0.7043333333333333, 0.6998333333333333, 0.6958333333333333, 0.6918333333333333, 0.6871666666666666, 0.6836666666666666, 0.6816666666666666, 0.6798333333333334, 0.6783333333333335, 0.6783333333333333, 0.6768333333333334, 0.6775, 0.6775, 0.6778333333333334, 0.6785000000000001, 0.6783333333333333, 0.6783333333333333, 0.6783333333333335, 0.6775, 0.6775, 0.6780000000000002, 0.678, 0.6786666666666668, 0.6796666666666666, 0.6811666666666667, 0.6828333333333333, 0.6838333333333334, 0.6848333333333333, 0.6866666666666666, 0.688, 0.6889999999999998, 0.69, 0.6913333333333334, 0.6941666666666666, 0.697, 0.7, 0.7026666666666667, 0.7038333333333333, 0.7051666666666666, 0.7058333333333333, 0.7068333333333333, 0.7064999999999999, 0.7055, 0.7026666666666667, 0.6984999999999999, 0.6926666666666667, 0.6839999999999999, 0.6728333333333334, 0.6623333333333334, 0.6518333333333334, 0.641, 0.6323333333333333, 0.6193333333333333, 0.6060000000000001, 0.5943333333333334, 0.5841666666666667, 0.5771666666666666, 0.5741666666666667, 0.5703333333333334, 0.5655, 0.5588333333333334, 0.5491666666666667, 0.5421666666666667, 0.5268333333333334, 0.5075000000000001, 0.4885, 0.4685, 0.4485, 0.42783333333333334, 0.40049999999999997, 0.3735, 0.3481666666666666, 0.32399999999999995, 0.30366666666666664, 0.2866666666666667, 0.26949999999999996, 0.2535, 0.2375, 0.22116666666666665, 0.21150000000000002, 0.198, 0.17966666666666667, 0.16116666666666665, 0.14716666666666667, 0.13416666666666666, 0.11750000000000001, 0.09783333333333333, 0.07866666666666668, 0.06033333333333333, 0.0435, 0.0325, 0.026666666666666665, 0.0245, 0.03133333333333334, 0.042666666666666665, 0.055499999999999994, 0.0725, 0.09066666666666665, 0.10899999999999999, 0.128, 0.146, 0.16366666666666665, 0.1728333333333333, 0.17766666666666664, 0.18133333333333332, 0.1703333333333333, 0.16183333333333333, 0.15883333333333333, 0.15516666666666667, 0.1445, 0.13083333333333333, 0.11566666666666667, 0.10366666666666668, 0.08633333333333335, 0.065, 0.06066666666666667, 0.05266666666666666, 0.03716666666666667, 0.022, 0.013499999999999998, 0.008666666666666666, 0.006333333333333334, 0.005333333333333333, 0.005833333333333334, 0.006500000000000001, 0.006666666666666666, 0.006166666666666667, 0.006666666666666666, 0.007000000000000001, 0.007000000000000001, 0.007333333333333333, 0.007166666666666667, 0.0075, 0.007166666666666667, 0.006666666666666666, 0.006333333333333334, 0.006333333333333334, 0.0055000000000000005, 0.005, 0.004833333333333334, 0.004833333333333334, 0.0045000000000000005, 0.004, 0.004000000000000001, 0.0035000000000000005, 0.003, 0.002666666666666667, 0.002666666666666667, 0.002666666666666667, 0.0021666666666666666, 0.0015, 0.001666666666666667, 0.005333333333333334, 0.017233333333333333, 0.03423333333333334, 0.0519, 0.06989999999999999, 0.08656666666666665, 0.09423333333333332, 0.11239999999999999, 0.13256666666666667, 0.15323333333333333, 0.17106666666666664, 0.18116666666666664, 0.18783333333333332, 0.19516666666666665, 0.20283333333333334, 0.21266666666666664, 0.22083333333333335, 0.20400000000000001, 0.18366666666666667, 0.16266666666666668, 0.14100000000000001, 0.11866666666666666, 0.095, 0.07033333333333333, 0.04466666666666667, 0.018833333333333334, 0.0038333333333333336, 0.002833333333333333, 0.003166666666666667, 0.003166666666666667, 0.0035000000000000005, 0.003833333333333333, 0.004166666666666667, 0.0045, 0.004666666666666667, 0.004, 0.003333333333333334, 0.003, 0.0028333333333333335, 0.003, 0.0028333333333333335, 0.0023333333333333335, 0.0023333333333333335, 0.001666666666666667, 0.0016666666666666666, 0.0015, 0.0013333333333333333, 0.0015, 0.0013333333333333333, 0.0013333333333333335, 0.0011666666666666668, 0.0016666666666666666, 0.0015, 0.0015, 0.001666666666666667, 0.001666666666666667, 0.001666666666666667, 0.0015, 0.0015, 0.001, 0.0041666666666666675, 0.015500000000000003, 0.022833333333333337, 0.023000000000000003, 0.02616666666666667, 0.04083333333333334, 0.054166666666666675, 0.07116666666666667, 0.08600000000000001, 0.0915, 0.09483333333333335, 0.09316666666666668, 0.1015, 0.10416666666666667, 0.10933333333333335, 0.10616666666666667, 0.09283333333333334, 0.07616666666666667, 0.06183333333333334, 0.05733333333333333, 0.052000000000000005, 0.04366666666666667, 0.028333333333333332, 0.02566666666666667, 0.017333333333333333, 0.005833333333333334, 0.005833333333333334, 0.005666666666666666, 0.005333333333333334, 0.004833333333333334, 0.003833333333333333, 0.002, 0.002, 0.002833333333333333, 0.003166666666666667, 0.003333333333333333, 0.003333333333333333, 0.003833333333333333, 0.004333333333333333, 0.0045, 0.005333333333333334, 0.006, 0.006500000000000001, 0.006500000000000001, 0.006333333333333334, 0.006500000000000001, 0.0068333333333333345, 0.006333333333333334, 0.006666666666666666, 0.007166666666666667, 0.008, 0.008166666666666666, 0.008166666666666666, 0.008166666666666666, 0.008666666666666666, 0.009166666666666667, 0.009333333333333334, 0.010166666666666668, 0.010499999999999999, 0.009833333333333333, 0.011833333333333335, 0.023333333333333338, 0.04016666666666667, 0.057499999999999996, 0.07549999999999998, 0.094, 0.1115, 0.1305, 0.13916666666666666, 0.14316666666666666, 0.14483333333333331, 0.14433333333333334, 0.146, 0.14883333333333332, 0.15183333333333332, 0.15433333333333332, 0.153, 0.14950000000000002, 0.1565, 0.1696666666666667, 0.18216666666666667, 0.18883333333333333, 0.17733333333333332, 0.15983333333333333, 0.1415, 0.12383333333333332, 0.11216666666666666, 0.10116666666666667, 0.09016666666666666, 0.07933333333333333, 0.0685, 0.057999999999999996, 0.058499999999999996, 0.06383333333333333, 0.06983333333333333, 0.07583333333333334, 0.08183333333333334, 0.08816666666666666, 0.0935, 0.093, 0.09083333333333334, 0.08866666666666666, 0.08666666666666667, 0.08483333333333334, 0.08316666666666667, 0.08199999999999999, 0.08066666666666666, 0.07916666666666668, 0.07883333333333334, 0.08433333333333333, 0.0915, 0.09866666666666665, 0.10566666666666666, 0.1125, 0.11916666666666667, 0.12516666666666668, 0.13116666666666668, 0.13683333333333333, 0.14216666666666666, 0.14733333333333334, 0.155, 0.17, 0.19033333333333335, 0.21083333333333334, 0.23116666666666666, 0.25183333333333335, 0.27266666666666667, 0.29416666666666663, 0.31633333333333336, 0.3388333333333334, 0.35950000000000004, 0.373, 0.3815, 0.39016666666666666, 0.3993333333333334, 0.4086666666666667, 0.4181666666666667, 0.42750000000000005, 0.43616666666666665, 0.44466666666666665, 0.45216666666666666, 0.45933333333333326, 0.466, 0.4721666666666667, 0.47800000000000004, 0.483, 0.4876666666666667, 0.4918333333333333, 0.4956666666666666, 0.4996666666666667, 0.5036666666666666, 0.508, 0.5121666666666667, 0.5168333333333333, 0.5208333333333334, 0.5256666666666667, 0.53, 0.5346666666666666, 0.5398333333333334, 0.5448333333333333, 0.5501666666666667, 0.5551666666666667, 0.5603333333333332, 0.5653333333333332, 0.5708333333333334, 0.576, 0.5814999999999999, 0.5868333333333333, 0.5921666666666667, 0.5971666666666666, 0.6021666666666666, 0.6071666666666666, 0.6123333333333334, 0.6173333333333334, 0.622, 0.627, 0.6315000000000001, 0.6363333333333334, 0.6408333333333334, 0.6458333333333334, 0.651, 0.6566666666666667, 0.663, 0.6691666666666667, 0.676, 0.6826666666666666, 0.6896666666666667, 0.6968333333333333, 0.7038333333333333, 0.7104999999999999, 0.717, 0.7223333333333333, 0.7275, 0.7329999999999999, 0.7383333333333333, 0.7436666666666667, 0.7488333333333334, 0.7538333333333334, 0.7588333333333334, 0.7641666666666667, 0.7691666666666667, 0.775, 0.7801666666666667, 0.7855000000000001, 0.7901666666666667, 0.7951666666666668, 0.8000000000000002, 0.805, 0.8099999999999999, 0.8150000000000001, 0.8200000000000001, 0.8234999999999999, 0.8241666666666667, 0.8215, 0.819, 0.8148333333333333, 0.8098333333333333, 0.8036666666666668, 0.7941666666666667, 0.782, 0.7683333333333333, 0.755, 0.7435, 0.7338333333333333, 0.7235, 0.7136666666666667, 0.7041666666666667, 0.6931666666666667, 0.686, 0.6805000000000001, 0.6755000000000001, 0.67, 0.6636666666666666, 0.657, 0.6485000000000001, 0.6399999999999999, 0.6306666666666667, 0.6218333333333333, 0.6105, 0.5991666666666666, 0.592, 0.5941666666666666, 0.6038333333333332, 0.6158333333333333, 0.6301666666666667, 0.6426666666666667, 0.6563333333333333, 0.6713333333333333, 0.6891666666666666, 0.7071666666666666, 0.7216666666666667, 0.728, 0.7283333333333333, 0.728, 0.7261666666666666, 0.7238333333333333, 0.7219999999999999, 0.7198333333333332, 0.716, 0.7125, 0.709, 0.7058333333333333, 0.7028333333333333, 0.6993333333333334, 0.6971666666666667, 0.6975, 0.6975, 0.6976666666666668, 0.6973333333333334, 0.6968333333333334, 0.6971666666666667, 0.6961666666666667, 0.6955, 0.6946666666666668, 0.6933333333333334, 0.6923333333333334, 0.6923333333333332, 0.691, 0.6921666666666667, 0.6931666666666667, 0.6933333333333334, 0.6945, 0.6956666666666667, 0.6973333333333332, 0.7003333333333333, 0.7033333333333333, 0.7053333333333333, 0.709, 0.7121666666666666, 0.7151666666666666, 0.7186666666666666, 0.7206666666666666, 0.7229999999999999, 0.7251666666666666, 0.7274999999999999, 0.73, 0.733, 0.7361666666666666, 0.7391666666666665, 0.7431666666666665, 0.7476666666666667, 0.7548333333333334, 0.7618333333333334, 0.768, 0.774, 0.7795, 0.7848333333333333, 0.7898333333333334, 0.7931666666666667, 0.7958333333333334, 0.7978333333333333, 0.7971666666666666, 0.7935, 0.7893333333333333, 0.7811666666666667, 0.7713333333333333, 0.7605000000000001, 0.7486666666666667, 0.738, 0.7243333333333334, 0.7041666666666667, 0.6761666666666667, 0.6518333333333334, 0.6295, 0.6116666666666667, 0.5965, 0.583, 0.5713333333333332, 0.5606666666666666, 0.5538333333333333, 0.5543333333333333, 0.5648333333333333, 0.5748333333333333, 0.5848333333333333, 0.5928333333333333, 0.6008333333333333, 0.6083333333333333, 0.6156666666666667, 0.6226666666666667, 0.6285000000000001, 0.6338333333333334, 0.6395000000000001, 0.6443333333333333, 0.6483333333333333, 0.6531666666666667, 0.6563333333333333, 0.6588333333333334, 0.6601666666666667, 0.6601666666666668, 0.6591666666666667, 0.6576666666666667, 0.655, 0.6523333333333333, 0.6481666666666667, 0.644, 0.638, 0.633, 0.6293333333333333, 0.6271666666666667, 0.6251666666666666, 0.625, 0.63, 0.6393333333333333, 0.651, 0.6626666666666667, 0.6759999999999999, 0.6898333333333333, 0.7023333333333334, 0.715, 0.7289999999999999, 0.7413333333333333, 0.7476666666666667, 0.7493333333333333, 0.75, 0.7499999999999999, 0.7506666666666667, 0.7508333333333332, 0.7505, 0.7498333333333334, 0.749, 0.7489999999999999, 0.7496666666666666, 0.7506666666666666, 0.7521666666666667, 0.7538333333333334, 0.7561666666666665, 0.7576666666666666, 0.7596666666666667, 0.7609999999999999, 0.7625, 0.763, 0.7635, 0.764, 0.7631666666666665, 0.763, 0.7626666666666667, 0.7626666666666666, 0.7603333333333333, 0.7581666666666667, 0.7558333333333332, 0.7543333333333333, 0.7531666666666668, 0.7526666666666666, 0.7533333333333333, 0.7545, 0.7553333333333333, 0.7563333333333333, 0.7591666666666667, 0.7621666666666667, 0.7655000000000001, 0.7685000000000001, 0.771, 0.7731666666666668, 0.7753333333333334, 0.7763333333333333, 0.7781666666666668, 0.7796666666666667, 0.7810000000000001, 0.7821666666666667, 0.7835000000000001, 0.7845000000000001, 0.7856666666666667, 0.7861666666666667, 0.786, 0.7858333333333334, 0.7816666666666667, 0.7771666666666668, 0.7723333333333333, 0.7671666666666668, 0.7608333333333335, 0.7548333333333334, 0.7475, 0.7398333333333333, 0.732, 0.7235, 0.7171666666666667, 0.711, 0.7064999999999999, 0.7006666666666665, 0.695, 0.6895, 0.6858333333333333, 0.6825, 0.6793333333333333, 0.6761666666666667, 0.674, 0.6713333333333333, 0.6666666666666667, 0.6633333333333333, 0.6596666666666667, 0.6543333333333333, 0.6481666666666667, 0.6416666666666666, 0.635, 0.6288333333333334, 0.6226666666666667, 0.617, 0.6131666666666666, 0.6093333333333333, 0.606, 0.6046666666666667, 0.6036666666666667, 0.6036666666666667, 0.6045, 0.6065, 0.6084999999999999, 0.6108333333333333, 0.6136666666666667, 0.6168333333333333, 0.6205, 0.6236666666666666, 0.6268333333333335, 0.6293333333333333, 0.6318333333333334, 0.6343333333333333, 0.6373333333333333, 0.6401666666666667, 0.6421666666666667, 0.6443333333333333, 0.6466666666666667, 0.6516666666666666, 0.6625000000000001, 0.6784999999999999, 0.6945, 0.7093333333333334, 0.7224999999999999, 0.7363333333333333, 0.7491666666666666, 0.7603333333333333, 0.7706666666666666, 0.7776666666666666, 0.7768333333333333, 0.7685000000000001, 0.759, 0.7483333333333333, 0.7378333333333333, 0.7261666666666666, 0.7151666666666667, 0.7056666666666667, 0.6968333333333334, 0.6873333333333334, 0.681, 0.6776666666666666, 0.6756666666666666, 0.6753333333333333, 0.6765000000000001, 0.678, 0.6806666666666666, 0.6836666666666666, 0.6868333333333334, 0.6918333333333333, 0.696, 0.7, 0.7043333333333333, 0.71, 0.7151666666666667, 0.7198333333333333, 0.7243333333333333, 0.7286666666666666, 0.733, 0.7373333333333334, 0.7415, 0.7458333333333333, 0.7493333333333333, 0.7526666666666666, 0.7566666666666667, 0.761, 0.7643333333333333, 0.7675000000000001, 0.7693333333333333, 0.7706666666666666, 0.7718333333333334, 0.773, 0.7741666666666667, 0.774, 0.7733333333333333, 0.7726666666666666, 0.7731666666666667, 0.7728333333333334, 0.774, 0.7741666666666667, 0.775, 0.7753333333333334, 0.7753333333333334, 0.7751666666666667, 0.7728333333333334, 0.7705, 0.7581666666666667, 0.7183333333333334, 0.673, 0.6296666666666667, 0.5905, 0.558, 0.5276666666666666, 0.499, 0.47333333333333333, 0.4485, 0.434, 0.44816666666666666, 0.46766666666666656, 0.48666666666666664, 0.5015000000000001, 0.5101666666666667, 0.518, 0.5250666666666668, 0.5317333333333333, 0.5385666666666666, 0.5447333333333334, 0.5512333333333335, 0.5577333333333334, 0.5637333333333333, 0.5697333333333333, 0.5757333333333333, 0.5817333333333333, 0.5873333333333333, 0.5933333333333334, 0.5985, 0.6043333333333334, 0.6096666666666667, 0.615, 0.621, 0.6263333333333334, 0.6323333333333334, 0.6373333333333334, 0.6431666666666667, 0.6483333333333333, 0.6543333333333333, 0.6598333333333334, 0.6655, 0.6711666666666667, 0.6768333333333334, 0.6838333333333333, 0.6901666666666666, 0.6971666666666667, 0.7036666666666667, 0.7105, 0.7168333333333334, 0.7233333333333334, 0.7296666666666667, 0.736, 0.742, 0.7485, 0.7561666666666667, 0.7636666666666667, 0.7715, 0.7795, 0.7876666666666666, 0.7943333333333334, 0.8005000000000001, 0.8051666666666668, 0.8095000000000001, 0.8113333333333334, 0.8106666666666668, 0.8091666666666667, 0.806, 0.8000000000000002, 0.7935000000000001, 0.7881666666666667, 0.7836666666666667, 0.7806666666666667, 0.7773333333333333, 0.7750000000000001, 0.7726666666666667, 0.7711666666666667, 0.771, 0.772, 0.7738333333333334, 0.7741666666666667, 0.7736666666666667, 0.773, 0.7725000000000001, 0.7725000000000001, 0.773, 0.7726666666666667, 0.7718333333333334, 0.7708333333333333, 0.77, 0.7703333333333333, 0.7710000000000001, 0.7713333333333334, 0.7718333333333334, 0.7715000000000001, 0.772, 0.7715, 0.7715000000000001, 0.7728333333333335, 0.7738333333333334, 0.7745000000000001, 0.7743333333333334, 0.7741666666666667, 0.7745000000000001, 0.7751666666666667, 0.7755000000000001, 0.7776666666666668, 0.7796666666666667, 0.782, 0.784, 0.7866666666666667, 0.7896666666666666, 0.7918333333333333, 0.7941666666666667, 0.7968333333333334, 0.796, 0.7951666666666667, 0.7945, 0.785, 0.7483333333333334, 0.7123333333333334, 0.6785, 0.6475000000000001, 0.6170000000000001, 0.5875000000000001, 0.5630000000000001, 0.5391666666666668, 0.5161666666666667, 0.5025000000000001, 0.5166666666666666, 0.5308333333333333, 0.5441666666666667, 0.5563333333333333, 0.568, 0.579, 0.5896666666666667, 0.6001666666666667, 0.6103333333333334, 0.6205, 0.6306666666666667, 0.6405000000000001, 0.6503333333333333, 0.6601666666666667, 0.6696666666666666, 0.679, 0.6881666666666666, 0.6973333333333332, 0.7064999999999999, 0.7150000000000001, 0.7213333333333334, 0.7278333333333333, 0.7343333333333334, 0.7408333333333333, 0.7473333333333334, 0.7533333333333333, 0.756, 0.7573333333333334, 0.7591666666666667, 0.7615000000000001, 0.766, 0.7703333333333333, 0.7735000000000001, 0.7738333333333334, 0.7741666666666667, 0.7741666666666667, 0.7766666666666667, 0.7795, 0.7808333333333334, 0.7811666666666668, 0.7801666666666668, 0.7780000000000001, 0.7751666666666668, 0.7750000000000001, 0.7745000000000001, 0.7735000000000001, 0.7715, 0.7695000000000001, 0.7685000000000001, 0.7666666666666666, 0.765, 0.7628333333333334, 0.76, 0.7546666666666667, 0.75, 0.7453333333333333, 0.7408333333333333, 0.7358333333333333, 0.7296666666666666, 0.7245, 0.7198333333333332, 0.7164999999999999, 0.7141666666666666, 0.7135, 0.7118333333333332, 0.7103333333333334, 0.709, 0.709, 0.71, 0.7116666666666667, 0.7135, 0.716, 0.7190000000000001, 0.7216666666666667, 0.7243333333333334, 0.7271666666666666, 0.7318333333333334, 0.7361666666666667, 0.7390000000000001, 0.7405000000000002, 0.7406666666666667, 0.7401666666666668, 0.7405, 0.741, 0.7418333333333335, 0.7433333333333334, 0.7428333333333335, 0.7425, 0.7430000000000001, 0.7441666666666666, 0.746, 0.7478333333333333, 0.7495, 0.7515000000000001, 0.7531666666666668, 0.7551666666666667, 0.757547619047619, 0.759547619047619, 0.7612142857142857, 0.7622142857142857, 0.7642142857142857, 0.7663809523809524, 0.7688809523809523, 0.7713809523809524, 0.7740476190476191, 0.7752142857142857, 0.7766666666666667, 0.7776666666666667, 0.779, 0.779, 0.778, 0.7775000000000001, 0.776, 0.7743333333333334, 0.772, 0.7701666666666667, 0.7689999999999999, 0.7683333333333333, 0.7655000000000001, 0.7641666666666667, 0.7628333333333334, 0.7598333333333334, 0.7578333333333334, 0.7561666666666667, 0.7548333333333334, 0.7545, 0.7538333333333334, 0.7535000000000001, 0.7553333333333334, 0.7581666666666667, 0.7606666666666666, 0.7641666666666667, 0.7661666666666667, 0.7701666666666667, 0.775, 0.7781666666666667, 0.7793333333333333, 0.781, 0.782, 0.7823333333333333, 0.7826666666666667, 0.7815, 0.7808333333333334, 0.7773333333333333, 0.7723333333333333, 0.7671666666666667, 0.7628333333333334, 0.7565, 0.7503333333333334, 0.7448333333333333, 0.7403333333333333, 0.7373333333333334, 0.734, 0.7321666666666667, 0.7311666666666667, 0.7311666666666667, 0.7323333333333333, 0.7343333333333334, 0.7365, 0.7373333333333334, 0.7383333333333334, 0.7373333333333334, 0.7365000000000002, 0.7355, 0.7353333333333334, 0.7378333333333333, 0.7430000000000001, 0.749, 0.7556666666666667, 0.7615000000000001, 0.7665, 0.7731666666666667, 0.7809999999999999, 0.7876666666666667, 0.7938333333333334, 0.7983333333333333, 0.8003333333333333, 0.8019999999999999, 0.8028333333333334, 0.8043333333333333, 0.8061666666666667, 0.808, 0.8088333333333333, 0.8101666666666667, 0.8111666666666666, 0.8116666666666668, 0.8113333333333334, 0.8113333333333334, 0.8115, 0.8115, 0.8116666666666668, 0.8123333333333334, 0.8128333333333334, 0.8116666666666668, 0.8091666666666667, 0.8061666666666667, 0.8019999999999999, 0.7976666666666667, 0.7936666666666667, 0.7910000000000001, 0.7881666666666668, 0.7846666666666667, 0.7818333333333334, 0.781, 0.782, 0.783, 0.7861666666666668, 0.7895000000000001, 0.7930000000000001, 0.797, 0.8015000000000001, 0.8048333333333334, 0.8076666666666668, 0.8106666666666668, 0.8135000000000001, 0.8161666666666667, 0.8181666666666668, 0.8191666666666666, 0.8200000000000001, 0.8205, 0.8196666666666668, 0.8206666666666665, 0.8223333333333332, 0.8240000000000001, 0.8258333333333333, 0.8244999999999999, 0.8191666666666666, 0.8164999999999999, 0.8161666666666667, 0.8165000000000001, 0.8101666666666667, 0.7933333333333333, 0.7675, 0.7426666666666668, 0.7196666666666667, 0.7021666666666666, 0.6905, 0.6781666666666667, 0.6686666666666666, 0.6638333333333334, 0.6679999999999999, 0.682, 0.7053333333333334, 0.7276666666666667, 0.7481666666666668, 0.7673333333333334, 0.784, 0.799, 0.8086666666666668, 0.8118333333333334, 0.8128333333333334, 0.8143333333333335, 0.8148333333333333, 0.8150000000000001, 0.8140000000000001, 0.8128333333333334, 0.8126666666666669, 0.8126666666666666, 0.8110000000000002, 0.8090000000000002, 0.8073333333333335, 0.8053333333333335, 0.8029999999999999, 0.8005000000000001, 0.799, 0.7965, 0.7951666666666667, 0.7939999999999999, 0.7953333333333334, 0.7981666666666667, 0.8011666666666667, 0.8038333333333334, 0.8063333333333335, 0.8091666666666667, 0.8113333333333334, 0.8145000000000001, 0.8155000000000001, 0.8165000000000001, 0.817, 0.817, 0.8170000000000002, 0.8173333333333334, 0.8181666666666667, 0.8178333333333334, 0.818, 0.8188333333333333, 0.8211666666666666, 0.8228333333333333, 0.8240000000000001, 0.8210000000000001, 0.8133333333333332, 0.8069999999999998, 0.7993333333333333, 0.7913333333333333, 0.7826666666666666, 0.7728333333333333, 0.7621666666666667, 0.7525000000000001, 0.7449999999999999, 0.7434999999999999, 0.7464999999999999, 0.7486666666666666, 0.7525000000000001, 0.758, 0.7648333333333334, 0.772, 0.78, 0.7868333333333333, 0.792, 0.7955, 0.7995000000000001, 0.8036666666666668, 0.8078333333333335, 0.8116666666666668, 0.8136666666666666, 0.8155000000000001, 0.8158333333333333, 0.8150000000000001, 0.8125, 0.8091666666666668, 0.8058333333333334, 0.8018333333333334, 0.7971666666666668, 0.7923333333333334, 0.7886666666666667, 0.7845000000000002, 0.7816666666666668, 0.7806666666666667, 0.7815000000000001, 0.7838333333333334, 0.7851666666666667, 0.787, 0.7891666666666668, 0.7908333333333333, 0.7931666666666667, 0.7958333333333333, 0.7976666666666666, 0.7998333333333333, 0.8023333333333333, 0.8038333333333334, 0.806, 0.8075000000000001, 0.8085000000000001, 0.8100000000000002, 0.8118333333333334, 0.8140000000000001, 0.8161666666666667, 0.8178333333333334, 0.8186666666666668, 0.8191666666666668, 0.8206666666666667, 0.8213333333333332, 0.8234999999999999, 0.8263333333333334, 0.8281666666666666, 0.8295, 0.8311666666666667, 0.8325000000000001, 0.8333333333333334, 0.8334999999999999, 0.8324999999999999, 0.8333333333333333, 0.833, 0.8316666666666667, 0.8296666666666667, 0.8268333333333334, 0.8233333333333335, 0.8193333333333334, 0.8156666666666667, 0.8123333333333334, 0.8085000000000001, 0.8041666666666668, 0.8005000000000001, 0.7985, 0.7935000000000001, 0.79, 0.788, 0.788, 0.7896666666666667, 0.7928333333333335, 0.7966666666666666, 0.8003333333333333, 0.8043333333333333, 0.8065000000000001, 0.8125, 0.818, 0.8233333333333333, 0.8266666666666668, 0.8288333333333334, 0.8308333333333333, 0.8324999999999999, 0.8346666666666666, 0.8366666666666666, 0.8386666666666667, 0.8394999999999999, 0.8406666666666667, 0.8416666666666666, 0.8433333333333334, 0.8446666666666667, 0.8461666666666666, 0.8486666666666667, 0.8506666666666668, 0.8526666666666666, 0.8556666666666667, 0.8603333333333334, 0.8638333333333333, 0.8664999999999999, 0.8695, 0.8723333333333333, 0.8718333333333332, 0.8705, 0.8691666666666666, 0.867, 0.865, 0.8615, 0.8586666666666666, 0.8559999999999999, 0.853, 0.8484999999999999, 0.8451666666666666, 0.841, 0.837, 0.8333333333333334, 0.8283333333333334, 0.8240000000000001, 0.8198333333333334, 0.8148333333333333, 0.8088333333333333, 0.8048333333333334, 0.8031666666666666, 0.8025, 0.8016666666666665, 0.8015000000000001, 0.805, 0.8093333333333333, 0.8131666666666666, 0.818, 0.8230000000000001, 0.8278333333333334, 0.8313333333333333, 0.8353333333333334, 0.8386666666666667, 0.8413333333333334, 0.8406666666666667, 0.8390000000000001, 0.8378333333333334, 0.8366666666666667, 0.8363333333333334, 0.8353333333333334, 0.8345, 0.8318333333333333, 0.8281666666666666, 0.8231666666666667, 0.8168333333333333, 0.8100000000000002, 0.8030000000000002, 0.7961666666666667, 0.7896666666666666, 0.7846666666666666, 0.7805000000000001, 0.7771666666666667, 0.7753333333333333, 0.7746666666666667, 0.7746666666666667, 0.7758333333333334, 0.7786666666666667, 0.7826666666666667, 0.7858333333333334, 0.7871666666666667, 0.7851666666666667, 0.7833333333333334, 0.7823333333333334, 0.781, 0.7805000000000001, 0.7795, 0.7768333333333334, 0.7729999999999999, 0.7703333333333334, 0.7688333333333334, 0.7686666666666666, 0.7685, 0.768, 0.7681666666666666, 0.7689999999999999, 0.7701666666666667, 0.7716666666666667, 0.7735000000000001, 0.7743333333333334, 0.7755, 0.7788333333333334, 0.7828333333333334, 0.7858333333333334, 0.7886666666666667, 0.7886666666666666, 0.787, 0.7853333333333333, 0.7831666666666667, 0.7809999999999999, 0.7761666666666667, 0.7698333333333334, 0.7633333333333333, 0.7576666666666666, 0.751, 0.7473333333333334, 0.7446666666666666, 0.7416666666666667, 0.7390000000000001, 0.7366666666666667, 0.7354999999999999, 0.7361666666666666, 0.7368333333333335, 0.7372500000000001, 0.7390833333333333, 0.74025, 0.7422500000000001, 0.74475, 0.7467499999999999, 0.74875, 0.7520833333333333, 0.7550833333333333, 0.7570833333333333, 0.7598333333333332, 0.7623333333333333, 0.7653333333333333, 0.7683333333333333, 0.7713333333333334, 0.7701666666666667, 0.769, 0.7676666666666667, 0.7663333333333334, 0.7651666666666668, 0.7631666666666668, 0.7616666666666667, 0.7596666666666667, 0.7571666666666667, 0.7541666666666667, 0.752, 0.7496666666666666, 0.7476666666666666, 0.7451666666666666, 0.744, 0.7409999999999999, 0.7371666666666666, 0.7335, 0.7304999999999999, 0.7275, 0.7283333333333333, 0.7298333333333333, 0.7308333333333333, 0.7318333333333333, 0.733, 0.7356666666666667, 0.7366666666666667, 0.7373333333333333, 0.7363333333333333, 0.7353333333333334, 0.7345, 0.7325, 0.7296666666666666, 0.7255, 0.7204999999999999, 0.7165, 0.7144999999999999, 0.712, 0.7088333333333333, 0.7058333333333333, 0.7021666666666666, 0.6994999999999999, 0.6980000000000001, 0.697, 0.696, 0.6953333333333334, 0.6943333333333334, 0.6938333333333333, 0.6948333333333333, 0.6948333333333333, 0.6951666666666667, 0.6958333333333334, 0.6961666666666667, 0.6961666666666667, 0.6961666666666667, 0.6944999999999999, 0.6881666666666667, 0.6811666666666667, 0.6755, 0.6745, 0.6756666666666666, 0.6763333333333332, 0.6758333333333333, 0.6761666666666667, 0.6761666666666667, 0.6776666666666666, 0.683, 0.689, 0.6928333333333334, 0.6928333333333333, 0.6903333333333332, 0.6885, 0.6868333333333333, 0.6836666666666666, 0.6783333333333333, 0.6715000000000001, 0.6641666666666667, 0.6491666666666667, 0.6326666666666666, 0.6156666666666667, 0.5986666666666667, 0.5808333333333333, 0.564, 0.549, 0.5376666666666667, 0.5265, 0.5146666666666666, 0.5138333333333334, 0.518, 0.5231666666666667, 0.529, 0.5348333333333333, 0.5413333333333333, 0.5481666666666667, 0.5546666666666666, 0.5623333333333334, 0.5725, 0.5801666666666667, 0.5845, 0.5876666666666666, 0.5908333333333333, 0.5948333333333332, 0.5983333333333334, 0.6018333333333333, 0.605, 0.6083333333333333, 0.6113333333333333, 0.6115, 0.6125, 0.6134999999999999, 0.6091666666666666, 0.5993333333333334, 0.5896666666666667, 0.5801666666666667, 0.5701666666666666, 0.5593333333333333, 0.5458333333333334, 0.5253333333333334, 0.5035000000000001, 0.485, 0.472, 0.46333333333333326, 0.45500000000000007, 0.446, 0.437, 0.4288333333333333, 0.42000000000000004, 0.4171666666666667, 0.41583333333333333, 0.4046666666666667, 0.3928333333333333, 0.381, 0.3723333333333333, 0.367, 0.362, 0.35483333333333333, 0.3468333333333333, 0.33766666666666667, 0.31999999999999995, 0.28933333333333333, 0.25916666666666666, 0.229, 0.19533333333333333, 0.15833333333333333, 0.12050000000000001, 0.084, 0.052166666666666674, 0.035333333333333335, 0.03216666666666667, 0.045166666666666674, 0.05450000000000001, 0.06866666666666668, 0.08583333333333334, 0.09333333333333334, 0.09333333333333335, 0.09316666666666668, 0.0925, 0.0815, 0.06466666666666666, 0.05166666666666666, 0.04166666666666667, 0.027500000000000004, 0.010000000000000002, 0.0021666666666666666, 0.002, 0.002, 0.0028333333333333335, 0.0026666666666666666, 0.0028333333333333335, 0.0028333333333333335, 0.003166666666666667, 0.003, 0.0035000000000000005, 0.003333333333333333, 0.003333333333333333, 0.003333333333333333, 0.0026666666666666666, 0.002666666666666667, 0.0055000000000000005, 0.017499999999999998, 0.027333333333333338, 0.03266666666666667, 0.0365, 0.051000000000000004, 0.05766666666666668, 0.05766666666666668, 0.05766666666666668, 0.05800000000000001, 0.05483333333333333, 0.042833333333333334, 0.033, 0.029000000000000005, 0.0335, 0.03133333333333334, 0.02483333333333334, 0.025, 0.024833333333333336, 0.024, 0.024, 0.024333333333333335, 0.024, 0.022833333333333334, 0.013833333333333333, 0.0018333333333333333, 0.0016666666666666666, 0.0015, 0.0021666666666666666, 0.013833333333333336, 0.026333333333333337, 0.03266666666666666, 0.033666666666666664, 0.0385, 0.05183333333333333, 0.06633333333333333, 0.0785, 0.08199999999999999, 0.08166666666666667, 0.07083333333333333, 0.058499999999999996, 0.052000000000000005, 0.051000000000000004, 0.04616666666666667, 0.033666666666666664, 0.019666666666666666, 0.009166666666666667, 0.005666666666666666, 0.005166666666666667, 0.004666666666666667, 0.004666666666666666, 0.0045, 0.004666666666666667, 0.004333333333333333, 0.0036666666666666666, 0.0026666666666666666, 0.001, 0.0018333333333333333, 0.0021666666666666666, 0.002333333333333333, 0.006, 0.018333333333333333, 0.02783333333333333, 0.03066666666666667, 0.03383333333333334, 0.04850000000000001, 0.0645, 0.08066666666666668, 0.09733333333333334, 0.11433333333333333, 0.1125, 0.10016666666666667, 0.0905, 0.08883333333333335, 0.09466666666666666, 0.09633333333333334, 0.09133333333333335, 0.07983333333333334, 0.06583333333333333, 0.05083333333333334, 0.04916666666666667, 0.05016666666666667, 0.05133333333333334, 0.052166666666666674, 0.04316666666666667, 0.027000000000000003, 0.016333333333333335, 0.011166666666666668, 0.009, 0.018333333333333333, 0.036166666666666666, 0.05466666666666666, 0.07383333333333333, 0.093, 0.11466666666666665, 0.1355, 0.154, 0.16383333333333333, 0.16366666666666668, 0.152, 0.13416666666666666, 0.11516666666666667, 0.09583333333333333, 0.07516666666666666, 0.054000000000000006, 0.0335, 0.01566666666666667, 0.006833333333333333, 0.008166666666666666, 0.008166666666666666, 0.008833333333333334, 0.008666666666666666, 0.007666666666666666, 0.0075000000000000015, 0.007333333333333333, 0.007666666666666667, 0.007000000000000001, 0.005833333333333334, 0.0045, 0.005, 0.0075000000000000015, 0.0195, 0.036833333333333336, 0.05449999999999999, 0.0705, 0.08816666666666666, 0.10583333333333331, 0.10866666666666665, 0.11183333333333331, 0.11533333333333333, 0.11566666666666665, 0.1055, 0.09116666666666665, 0.07766666666666666, 0.07333333333333333, 0.07416666666666667, 0.07616666666666666, 0.09283333333333334, 0.10433333333333332, 0.11233333333333335, 0.12050000000000001, 0.13066666666666665, 0.141, 0.15050000000000002, 0.15283333333333332, 0.1485, 0.144, 0.14033333333333334, 0.14166666666666666, 0.15283333333333332, 0.16583333333333333, 0.17850000000000002, 0.191, 0.203, 0.21483333333333335, 0.22683333333333336, 0.23833333333333334, 0.24983333333333335, 0.2611666666666667, 0.26583333333333337, 0.26983333333333337, 0.2738333333333334, 0.2773333333333333, 0.2813333333333333, 0.2853333333333333, 0.28933333333333333, 0.29333333333333333, 0.2975, 0.3015, 0.30583333333333335, 0.3098333333333333, 0.3138333333333333, 0.31850000000000006, 0.3225, 0.32633333333333336, 0.33033333333333337, 0.33433333333333337, 0.33849999999999997, 0.3425, 0.34650000000000003, 0.35050000000000003, 0.3551666666666667, 0.3591666666666667, 0.36383333333333334, 0.36883333333333335, 0.37383333333333335, 0.37899999999999995, 0.38416666666666666, 0.38916666666666666, 0.3946666666666666, 0.3996666666666666, 0.40483333333333327, 0.4098333333333334, 0.41483333333333333, 0.41949999999999993, 0.42433333333333334, 0.4291666666666667, 0.434, 0.43899999999999995, 0.44366666666666676, 0.4486666666666667, 0.4528333333333333, 0.4578333333333333, 0.4628333333333334, 0.46783333333333327, 0.4726666666666667, 0.47766666666666663, 0.48250000000000004, 0.4875, 0.493, 0.49899999999999994, 0.505, 0.5108571428571429, 0.5161904761904762, 0.5221904761904763, 0.5275238095238095, 0.5335238095238095, 0.5388571428571429, 0.5443571428571429, 0.5493571428571429, 0.5535238095238095, 0.5585238095238095, 0.563, 0.568, 0.5726666666666667, 0.5776666666666667, 0.5823333333333334, 0.5873333333333333, 0.5925, 0.5974999999999999, 0.6031666666666666, 0.6081666666666667, 0.6138333333333332, 0.6188333333333332, 0.6241666666666666, 0.6291666666666667, 0.6343333333333334, 0.6393333333333333, 0.6445000000000001, 0.6495, 0.6545000000000001, 0.6603333333333333, 0.6663333333333334, 0.6728333333333334, 0.6788333333333334, 0.6856666666666666, 0.6918333333333333, 0.6983333333333333, 0.7045, 0.7113333333333334, 0.7175, 0.7236666666666667, 0.7286666666666666, 0.7338333333333333, 0.7388333333333333, 0.744, 0.7491666666666666, 0.7546666666666666, 0.7601666666666667, 0.7653333333333333, 0.7706666666666667, 0.7756666666666667, 0.7813333333333333, 0.7866666666666666, 0.7923333333333333, 0.7973333333333333, 0.8028333333333334, 0.8078333333333333, 0.8130000000000001, 0.8176666666666665, 0.8073333333333335, 0.7765, 0.7456666666666666, 0.7196666666666667, 0.7016666666666665, 0.6873333333333334, 0.6733333333333332, 0.6599999999999999, 0.6471666666666667, 0.6345, 0.6356666666666667, 0.6561666666666667, 0.6751666666666667, 0.6893333333333334, 0.6958333333333334, 0.6993333333333334, 0.7021666666666666, 0.7034999999999999, 0.7045, 0.7063333333333333, 0.7098333333333333, 0.7148333333333332, 0.7191666666666665, 0.723, 0.7266666666666666, 0.7296666666666666, 0.732, 0.7358333333333333, 0.739, 0.7405, 0.7404999999999999, 0.7396666666666667, 0.7406666666666667, 0.742, 0.7431666666666666, 0.7430000000000001, 0.7426666666666667, 0.7418333333333333, 0.7388333333333333, 0.7375, 0.7373333333333334, 0.7369999999999999, 0.7363333333333333, 0.7356666666666667, 0.7328333333333333, 0.7293333333333333, 0.7264999999999999, 0.7231666666666666, 0.7223333333333334, 0.7213333333333333, 0.72, 0.7195, 0.72, 0.7203333333333333, 0.7230000000000001, 0.7273333333333334, 0.7314999999999999, 0.7348333333333333, 0.7385, 0.7425, 0.7460000000000001, 0.7481666666666666, 0.748, 0.7485, 0.7486666666666666, 0.748, 0.7481666666666668, 0.7495, 0.7503333333333333, 0.7501666666666666, 0.7503333333333333, 0.7508333333333334, 0.7529999999999999, 0.7545, 0.7555, 0.7573333333333332, 0.7585, 0.7591666666666665, 0.76, 0.7594999999999998, 0.7593333333333333, 0.7596666666666666, 0.7603333333333333, 0.7613333333333332, 0.7633333333333333, 0.7653333333333333, 0.7681666666666667, 0.7713333333333334, 0.7738333333333334, 0.7758333333333334, 0.7788333333333333, 0.7801666666666667, 0.777, 0.7721666666666668, 0.7658333333333334, 0.7618333333333334, 0.7583333333333334, 0.755, 0.751, 0.7496666666666666, 0.7443333333333333, 0.7393333333333334, 0.7388333333333332, 0.7403333333333333, 0.7428333333333332, 0.742, 0.7396666666666667, 0.7373333333333334, 0.7363333333333333, 0.7343333333333334, 0.7356666666666667, 0.7376666666666667, 0.7393333333333334, 0.7398333333333333, 0.7406666666666668, 0.7425, 0.7448333333333335, 0.7455, 0.7464999999999999, 0.7466666666666667, 0.7458333333333333, 0.7453333333333334, 0.7445, 0.7436666666666667, 0.7415, 0.7388333333333332, 0.7361666666666666, 0.735, 0.7333333333333333, 0.7331666666666667, 0.733, 0.7331666666666666, 0.7336666666666666, 0.7353333333333333, 0.7375, 0.7391666666666666, 0.7413333333333334, 0.7433333333333334, 0.745, 0.7466666666666667, 0.7495, 0.7525000000000001, 0.7553333333333334, 0.7576666666666667, 0.7606666666666667, 0.765, 0.7686666666666666, 0.7723333333333333, 0.7738333333333334, 0.7751666666666667, 0.776, 0.7768333333333334, 0.7771666666666668, 0.7768333333333334, 0.7761666666666667, 0.7726666666666666, 0.7675000000000001, 0.7618333333333334, 0.7593333333333334, 0.7563333333333333, 0.753, 0.7488333333333335, 0.7453333333333333, 0.7425, 0.7398333333333335, 0.7388333333333333, 0.7393333333333334, 0.7413333333333334, 0.7423333333333334, 0.7436666666666667, 0.7456666666666667, 0.7483333333333333, 0.7516666666666667, 0.7555, 0.7588333333333332, 0.7638333333333334, 0.768, 0.771, 0.7746666666666667, 0.7761666666666667, 0.7671666666666667, 0.7551666666666667, 0.7433333333333334, 0.7351666666666667, 0.7303333333333334, 0.726, 0.7233333333333334, 0.7216666666666667, 0.7188333333333332, 0.7156666666666667, 0.7088333333333333, 0.6903333333333334, 0.6721666666666666, 0.6508333333333333, 0.6273333333333333, 0.6036666666666666, 0.5801666666666667, 0.5568333333333333, 0.5351666666666667, 0.5166666666666667, 0.5125, 0.5238333333333334, 0.5346666666666667, 0.5451666666666667, 0.5555, 0.5658333333333333, 0.5758333333333333, 0.5858333333333332, 0.5956666666666667, 0.6053333333333334, 0.6154999999999999, 0.6298333333333332, 0.6479999999999999, 0.6661666666666666, 0.6836666666666666, 0.6973333333333332, 0.7090000000000001, 0.7203333333333334, 0.7303333333333334, 0.7394999999999999, 0.7473333333333334, 0.749, 0.7466666666666667, 0.7433333333333334, 0.7396666666666667, 0.7378333333333333, 0.738, 0.7383333333333334, 0.7394999999999999, 0.7401666666666666, 0.7395, 0.7405000000000002, 0.7408333333333335, 0.7418333333333333, 0.743, 0.7446666666666667, 0.7464999999999999, 0.747, 0.7470000000000001, 0.7456666666666666, 0.7448333333333333, 0.7441666666666666, 0.7438333333333332, 0.7436666666666667, 0.7415, 0.7378333333333333, 0.7338333333333333, 0.7308333333333332, 0.7283333333333333, 0.7265, 0.725, 0.7220000000000001, 0.7184999999999999, 0.7148333333333332, 0.7131666666666666, 0.7139999999999999, 0.7143333333333334, 0.715, 0.7153333333333334, 0.7168333333333332, 0.7189999999999999, 0.7216666666666667, 0.7243333333333333, 0.7273333333333334, 0.7301666666666666, 0.7318333333333333, 0.7338333333333333, 0.7356666666666667, 0.7376666666666667, 0.7395, 0.7421666666666668, 0.7453333333333333, 0.7473333333333334, 0.7493333333333333, 0.751, 0.7516666666666667, 0.7513333333333334, 0.7508333333333332, 0.7505, 0.7506666666666667, 0.7493333333333333, 0.748, 0.7473333333333334, 0.7463333333333334, 0.745, 0.7440000000000001, 0.7435, 0.7433333333333334, 0.7426666666666667, 0.7425, 0.7426666666666667, 0.7425, 0.7425, 0.7421666666666666, 0.7408333333333335, 0.7413333333333333, 0.7401666666666666, 0.7378333333333333, 0.7343333333333334, 0.73, 0.7291666666666667, 0.729, 0.7295, 0.7303333333333334, 0.7326666666666666, 0.734, 0.7371666666666667, 0.7413333333333334, 0.7448333333333333, 0.7483333333333333, 0.749, 0.7493333333333333, 0.7501666666666666, 0.751, 0.7513333333333334, 0.7516666666666667, 0.7525, 0.7533333333333332, 0.7563333333333333, 0.7603333333333333, 0.7633333333333333, 0.7666666666666666, 0.7698333333333334, 0.7733333333333333, 0.7775000000000001, 0.7811666666666668, 0.7845000000000001, 0.7878333333333334, 0.7913333333333333, 0.7943333333333333, 0.7978333333333334, 0.8003333333333333, 0.8020000000000002, 0.8028333333333334, 0.8028333333333333, 0.8021666666666668, 0.8013333333333333, 0.8003333333333333, 0.7968333333333335, 0.7936666666666667, 0.7908333333333333, 0.7889999999999999, 0.7878333333333334, 0.7865000000000001, 0.7866666666666667, 0.7875000000000001, 0.788, 0.7878333333333334, 0.7898333333333334, 0.7918333333333333, 0.7936666666666667, 0.7948333333333334, 0.7955, 0.7971666666666667, 0.7985, 0.8009999999999999, 0.8033333333333333, 0.8055000000000001, 0.8078333333333335, 0.8071666666666667, 0.8045, 0.8003333333333333, 0.7968333333333334, 0.7916666666666667, 0.7831666666666667, 0.7731666666666668, 0.7635, 0.7545, 0.7451666666666666, 0.7384999999999999, 0.7344999999999999, 0.7326666666666666, 0.7306666666666666, 0.7298333333333333, 0.731, 0.7328333333333333, 0.7343333333333333, 0.7361666666666666, 0.7381666666666666, 0.7401666666666668, 0.7418333333333333, 0.743, 0.7431666666666666, 0.7423333333333334, 0.7426666666666667, 0.7433333333333334, 0.7441666666666668, 0.7443333333333333, 0.7441666666666666, 0.7416666666666667, 0.7371666666666667, 0.7246666666666667, 0.7025, 0.6825, 0.6638333333333334, 0.6481666666666667, 0.6336666666666666, 0.6201666666666666, 0.6081666666666666, 0.599, 0.5925, 0.5945, 0.6073333333333333, 0.6191666666666666, 0.6305, 0.6396666666666666, 0.6483333333333333, 0.6568333333333334, 0.6639999999999999, 0.6711666666666667, 0.6776666666666666, 0.6838333333333334, 0.6899333333333334, 0.6959333333333333, 0.7017666666666666, 0.7071, 0.7127666666666667, 0.7177666666666667, 0.7236, 0.7294333333333334, 0.7357666666666667, 0.7417666666666667, 0.7471666666666665, 0.7515, 0.7546666666666667, 0.7568333333333334, 0.7581666666666667, 0.7601666666666667, 0.7613333333333333, 0.7615, 0.7608333333333333, 0.7601666666666665, 0.7591666666666665, 0.7589999999999999, 0.759, 0.7598333333333332, 0.7606666666666666, 0.7606666666666666, 0.7608333333333333, 0.7608333333333333, 0.7596666666666666, 0.7593333333333333, 0.7596666666666666, 0.7606666666666666, 0.7626666666666666, 0.7653333333333333, 0.7678333333333334, 0.7706666666666667, 0.7733333333333333, 0.7763333333333333, 0.7805000000000001, 0.7836666666666667, 0.7865, 0.7893333333333334, 0.7915000000000001, 0.791, 0.7881666666666667, 0.7853333333333333, 0.783, 0.781, 0.7798333333333334, 0.7786666666666667, 0.7778333333333334, 0.7755, 0.7715, 0.769, 0.7683333333333333, 0.768, 0.7675000000000001, 0.7665, 0.7648333333333334, 0.763, 0.7601666666666667, 0.7585, 0.7593333333333333, 0.7606666666666666, 0.7626666666666666, 0.764, 0.7648333333333333, 0.7646666666666666, 0.7636666666666666, 0.7625, 0.7625, 0.7618333333333334, 0.7575000000000001, 0.7515, 0.745, 0.7373333333333333, 0.7295, 0.7226666666666667, 0.7138333333333333, 0.7045, 0.6931666666666667, 0.6828333333333333, 0.6758333333333333, 0.6708333333333333, 0.6671666666666667, 0.6638333333333334, 0.6615, 0.6598333333333334, 0.6613333333333333, 0.664, 0.6678333333333334, 0.6693333333333333, 0.6708333333333334, 0.6728333333333334, 0.674, 0.6769999999999999, 0.6798333333333334, 0.6825, 0.6853333333333333, 0.6911666666666667, 0.7001666666666667, 0.7111666666666667, 0.7220000000000001, 0.7313333333333334, 0.7403333333333333, 0.7471666666666668, 0.7516666666666667, 0.756, 0.7601666666666667, 0.7616666666666666, 0.7606666666666666, 0.7596666666666667, 0.7576666666666666, 0.7563333333333333, 0.7555, 0.7566666666666666, 0.7596666666666667, 0.7623333333333333, 0.7645000000000001, 0.7658333333333334, 0.7665, 0.7666666666666666, 0.7676666666666667, 0.7691666666666667, 0.771, 0.77275, 0.7734166666666666, 0.7740833333333335, 0.7737499999999999, 0.77375, 0.7739166666666668, 0.7729166666666667, 0.7697499999999999, 0.7652500000000001, 0.7605833333333334, 0.7558333333333334, 0.7521666666666668, 0.748, 0.7443333333333333, 0.7416666666666666, 0.7398333333333333, 0.7401666666666668, 0.7426666666666667, 0.7455, 0.7481666666666666, 0.7486666666666666, 0.7501666666666666, 0.7521666666666667, 0.7553333333333333, 0.7578333333333332, 0.7585, 0.7591666666666665, 0.7596666666666666, 0.7604999999999998, 0.7613333333333332, 0.7629999999999999, 0.7633333333333333, 0.7645, 0.7655000000000001, 0.7675, 0.7703333333333333, 0.7728333333333334, 0.7748333333333334, 0.7741666666666667, 0.7731666666666668, 0.7725000000000001, 0.7718333333333334, 0.77, 0.7671666666666667, 0.7633333333333334, 0.7598333333333334, 0.7558333333333332, 0.7481666666666666, 0.7365, 0.7253333333333333, 0.7155, 0.7066666666666668, 0.6990000000000001, 0.6925, 0.6863333333333334, 0.6805, 0.6753333333333333, 0.675, 0.682, 0.6893333333333334, 0.6965, 0.7035, 0.7123333333333333, 0.7208333333333333, 0.729, 0.7375, 0.7431666666666666, 0.7473333333333333, 0.75, 0.752, 0.7526666666666666, 0.7533333333333333, 0.7523333333333333, 0.7508333333333334, 0.749, 0.7471666666666666, 0.7474999999999999, 0.7495, 0.7523333333333333, 0.7546666666666667, 0.7581666666666667, 0.7608333333333334, 0.7628333333333333, 0.7649999999999999, 0.7675000000000001, 0.7688333333333334, 0.7708333333333334, 0.7718333333333334, 0.7733333333333334, 0.776, 0.7769999999999999, 0.7793333333333333, 0.7815000000000001, 0.7843333333333333, 0.7861666666666667, 0.788, 0.7896666666666666, 0.7905, 0.7891666666666667, 0.7871666666666666, 0.7868333333333334, 0.7858333333333334, 0.783, 0.78, 0.7778333333333334, 0.7768333333333335, 0.7755000000000001, 0.7736666666666667, 0.773, 0.7728333333333335, 0.7716666666666667, 0.7706666666666667, 0.7713333333333334, 0.7715, 0.7721666666666667, 0.7718333333333334, 0.7708333333333334, 0.7718333333333335, 0.7735000000000001, 0.7746666666666667, 0.7766666666666667, 0.7771666666666668, 0.7786666666666668, 0.7800000000000001, 0.7810000000000001, 0.7813333333333334, 0.7823333333333334, 0.7828333333333334, 0.7836666666666667, 0.7846666666666667, 0.7855000000000001, 0.7871666666666667, 0.7881666666666667, 0.7898333333333334, 0.7918333333333334, 0.7955, 0.7991666666666667, 0.8023333333333333, 0.8055, 0.8085000000000001, 0.8100238095238096, 0.8108571428571428, 0.8121904761904762, 0.8135238095238095, 0.8145238095238095, 0.8146904761904761, 0.8146904761904761, 0.8158571428571427, 0.8165238095238095, 0.8175238095238095, 0.8198333333333332, 0.8236666666666667, 0.8246666666666667, 0.826, 0.8265, 0.8266666666666665, 0.8263333333333331, 0.8251666666666667, 0.8215, 0.817, 0.8126666666666666, 0.8078333333333333, 0.8038333333333334, 0.7991666666666667, 0.7956666666666667, 0.7925000000000001, 0.789, 0.7851666666666668, 0.7850000000000001, 0.7855000000000001, 0.786, 0.787, 0.79, 0.7933333333333333, 0.7963333333333333, 0.7975, 0.799, 0.8015000000000001, 0.8038333333333332, 0.8061666666666666, 0.8076666666666666, 0.8088333333333333, 0.8073333333333335, 0.804, 0.8, 0.7978333333333334, 0.7955000000000001, 0.7918333333333334, 0.7866666666666667, 0.7815000000000001, 0.7770000000000001, 0.7733333333333333, 0.7726666666666667, 0.7726666666666666, 0.7726666666666666, 0.7726666666666666, 0.7731666666666667, 0.7753333333333334, 0.7775000000000001, 0.7806666666666667, 0.7843333333333333, 0.7868333333333334, 0.7875, 0.7885000000000001, 0.7895000000000001, 0.7905, 0.7918333333333334, 0.7931666666666667, 0.796, 0.7976666666666666, 0.7983333333333333, 0.7993333333333335, 0.8013333333333333, 0.8043333333333333, 0.8075000000000001, 0.8106666666666668, 0.8135, 0.8141666666666667, 0.8066666666666669, 0.8001666666666667, 0.7956666666666667, 0.7905, 0.7828333333333333, 0.7736666666666666, 0.7646666666666666, 0.7568333333333334, 0.7498333333333334, 0.7449999999999999, 0.7478333333333333, 0.75, 0.7511666666666666, 0.7531666666666668, 0.7575000000000001, 0.7620000000000001, 0.7665, 0.7693333333333334, 0.7713333333333334, 0.7723333333333334, 0.773, 0.7735000000000001, 0.7733333333333334, 0.7736666666666667, 0.7756666666666667, 0.7781666666666667, 0.7805, 0.7835, 0.7871666666666667, 0.7906666666666666, 0.7945, 0.7983333333333333, 0.8016666666666665, 0.8045, 0.8055, 0.8065000000000001, 0.8071666666666667, 0.8078333333333333, 0.8085000000000001, 0.8100000000000002, 0.8111666666666666, 0.8121666666666668, 0.8140000000000001, 0.8145000000000001, 0.8153333333333335, 0.8168333333333333, 0.8186666666666665, 0.82, 0.8215, 0.8225, 0.8234999999999999, 0.825, 0.826, 0.8285, 0.8306666666666667, 0.8318333333333333, 0.8318333333333333, 0.8324999999999999, 0.8318333333333333, 0.8316666666666667, 0.8300000000000001, 0.8275, 0.8251666666666667, 0.8226666666666667, 0.8205, 0.8188333333333333, 0.8183333333333334, 0.8175238095238095, 0.8171904761904762, 0.8156904761904762, 0.8163571428571428, 0.8178571428571428, 0.8195238095238094, 0.8200238095238095, 0.8200238095238095, 0.8206904761904761, 0.8221904761904761, 0.8249047619047619, 0.8280714285714286, 0.8327380952380953, 0.8367380952380952, 0.8410714285714285, 0.8454047619047618, 0.8497380952380953, 0.8499047619047619, 0.8392380952380952, 0.8287380952380952, 0.8181666666666667, 0.8094999999999999, 0.8033333333333333, 0.7993333333333333, 0.7951666666666666, 0.7915, 0.7891666666666667, 0.7908333333333333, 0.8021666666666667, 0.8123333333333334, 0.8213333333333332, 0.828, 0.8310000000000001, 0.8314999999999999, 0.8320000000000001, 0.8326666666666667, 0.8326666666666667, 0.833, 0.8336666666666666, 0.8344999999999999, 0.8358333333333332, 0.8373333333333333, 0.8390000000000001, 0.8411666666666667, 0.8428333333333333, 0.8433333333333332, 0.8436666666666666, 0.8428333333333333, 0.8428333333333333, 0.8433333333333334, 0.8436666666666666, 0.8441666666666666, 0.8443333333333334, 0.8436666666666666, 0.8428333333333333, 0.8428333333333333, 0.8426666666666666, 0.8433333333333334, 0.8433333333333334, 0.8428333333333333, 0.8418333333333333, 0.8408333333333333, 0.8391666666666667, 0.8365, 0.8335000000000001, 0.8300000000000001, 0.8275, 0.825, 0.8230000000000001, 0.8203333333333335, 0.8180000000000002, 0.8160000000000002, 0.8141666666666667, 0.8148333333333335, 0.8148333333333333, 0.8156666666666667, 0.8160000000000001, 0.8166666666666668, 0.8166666666666667, 0.8153333333333335, 0.8151666666666667, 0.8156666666666668, 0.8158333333333333, 0.8143333333333335, 0.8140000000000001, 0.8125, 0.8113333333333335, 0.8098333333333334, 0.8078333333333333, 0.8078333333333333, 0.8063333333333335, 0.8031666666666668, 0.8008333333333335, 0.7993333333333333, 0.7976666666666666, 0.7963333333333333, 0.7941666666666667, 0.7918333333333333, 0.7903333333333333, 0.7895000000000001, 0.7885000000000001, 0.7888333333333334, 0.7895000000000001, 0.7895000000000001, 0.789, 0.7888333333333334, 0.788, 0.7865, 0.7851666666666668, 0.7848333333333334, 0.7863333333333333, 0.7871666666666667, 0.7878333333333334, 0.7855000000000001, 0.7833333333333334, 0.7805, 0.779, 0.778, 0.7758333333333333, 0.7718333333333333, 0.766, 0.76, 0.7541666666666667, 0.7533333333333333, 0.7533333333333333, 0.7526666666666666, 0.7523333333333333, 0.7526666666666666, 0.7531666666666667, 0.7543333333333333, 0.7565, 0.7588333333333332, 0.7606666666666666, 0.7616666666666666, 0.7623333333333332, 0.7648333333333333, 0.7659999999999999, 0.7655, 0.7643333333333333, 0.7626666666666666, 0.7606666666666666, 0.7575, 0.7546666666666667, 0.7513333333333334, 0.7481666666666668, 0.744, 0.7408333333333335, 0.7403333333333334, 0.7408333333333333, 0.7411666666666668, 0.7411666666666668, 0.7418333333333333, 0.7411666666666668, 0.7406666666666667, 0.7401666666666666, 0.7405, 0.7405, 0.7403333333333333, 0.7405, 0.741, 0.7394999999999999, 0.7381666666666666, 0.7391666666666666, 0.7401666666666668, 0.7413333333333334, 0.7423333333333334, 0.7433333333333334, 0.7443333333333333, 0.7456666666666667, 0.7464999999999999, 0.7488333333333334, 0.7513333333333334, 0.7521666666666667, 0.7511666666666666, 0.7496666666666666, 0.7474999999999999, 0.7444999999999999, 0.7406666666666666, 0.7369999999999999, 0.734, 0.7316666666666667, 0.7293333333333333, 0.7266666666666667, 0.7236666666666667, 0.7215, 0.7196666666666667, 0.7191666666666666, 0.7163333333333333, 0.7116666666666667, 0.7066666666666667, 0.7025, 0.6983333333333333, 0.6955, 0.6955, 0.6946666666666667, 0.6936666666666667, 0.6928333333333334, 0.6941666666666667, 0.6961666666666667, 0.6981666666666667, 0.6995, 0.7001666666666667, 0.6990000000000001, 0.6975, 0.6961666666666667, 0.6950000000000001, 0.6923333333333334, 0.6906666666666667, 0.6896666666666667, 0.6886666666666666, 0.6856666666666666, 0.683, 0.682, 0.6811666666666667, 0.6805000000000001, 0.6796666666666666, 0.6796666666666666, 0.6788333333333334, 0.6765000000000001, 0.6728333333333334, 0.6696666666666667, 0.6666666666666667, 0.6641666666666667, 0.6585, 0.6525000000000001, 0.6465000000000001, 0.6411666666666667, 0.6351666666666667, 0.6271666666666667, 0.6251666666666666, 0.6266666666666667, 0.6276666666666666, 0.6281666666666667, 0.6321666666666667, 0.6361666666666667, 0.6401666666666667, 0.6435000000000001, 0.6475000000000001, 0.655, 0.6586666666666667, 0.6601666666666667, 0.6621666666666667, 0.6628333333333334, 0.6613333333333334, 0.658, 0.653, 0.6471666666666667, 0.6381666666666667, 0.629, 0.6183333333333334, 0.6063333333333334, 0.5943333333333333, 0.5836666666666666, 0.575, 0.5683333333333334, 0.5631666666666667, 0.5591666666666667, 0.5614999999999999, 0.5653333333333334, 0.5703333333333334, 0.5773333333333334, 0.5851666666666667, 0.5926666666666668, 0.5983333333333334, 0.6013333333333334, 0.605, 0.6088333333333333, 0.6103333333333334, 0.6105, 0.6083333333333334, 0.6003333333333334, 0.5925, 0.5853333333333333, 0.5798333333333334, 0.5701666666666667, 0.5586666666666666, 0.5455, 0.5241666666666667, 0.5001666666666666, 0.472, 0.43999999999999995, 0.40700000000000003, 0.3736666666666667, 0.33866666666666667, 0.30233333333333334, 0.2675, 0.233, 0.2046666666666667, 0.17816666666666667, 0.16616666666666666, 0.1625, 0.15766666666666668, 0.15316666666666667, 0.15116666666666664, 0.15716666666666665, 0.16299999999999998, 0.17016666666666663, 0.178, 0.18433333333333332, 0.17166666666666666, 0.15399999999999997, 0.13799999999999998, 0.12933333333333333, 0.12566666666666665, 0.12133333333333333, 0.11633333333333333, 0.11166666666666666, 0.10650000000000001, 0.10466666666666669, 0.11066666666666666, 0.11700000000000002, 0.12283333333333335, 0.12150000000000001, 0.11100000000000002, 0.09866666666666667, 0.08733333333333333, 0.07533333333333334, 0.07400000000000001, 0.07650000000000001, 0.08116666666666668, 0.08700000000000001, 0.09150000000000001, 0.10083333333333333, 0.1095, 0.11150000000000002, 0.1095, 0.10766666666666666, 0.09683333333333334, 0.08066666666666668, 0.06433333333333333, 0.05283333333333333, 0.043333333333333335, 0.027500000000000004, 0.014833333333333334, 0.011166666666666667, 0.010499999999999999, 0.010166666666666668, 0.009, 0.008333333333333335, 0.008166666666666668, 0.007833333333333335, 0.007000000000000001, 0.007666666666666666, 0.006833333333333333, 0.006500000000000001, 0.007000000000000001, 0.007333333333333333, 0.007833333333333333, 0.010833333333333334, 0.022333333333333334, 0.037333333333333336, 0.05016666666666667, 0.06400000000000002, 0.0805, 0.09366666666666668, 0.1005, 0.10183333333333336, 0.10166666666666668, 0.098, 0.086, 0.06999999999999999, 0.05716666666666668, 0.0505, 0.04416666666666667, 0.033166666666666664, 0.028833333333333332, 0.029666666666666668, 0.03283333333333334, 0.0335, 0.0335, 0.03383333333333333, 0.03266666666666666, 0.02416666666666667, 0.014000000000000002, 0.0115, 0.0085, 0.006, 0.012833333333333335, 0.025500000000000002, 0.025500000000000002, 0.025666666666666664, 0.03083333333333333, 0.044000000000000004, 0.05733333333333333, 0.058666666666666666, 0.058666666666666666, 0.058333333333333334, 0.04766666666666667, 0.0345, 0.0345, 0.034333333333333334, 0.02933333333333334, 0.01616666666666667, 0.0035000000000000005, 0.0028333333333333335, 0.003166666666666667, 0.003, 0.003166666666666667, 0.003166666666666667, 0.003166666666666667, 0.003, 0.0028333333333333335, 0.002666666666666667, 0.002, 0.0013333333333333335, 0.0011666666666666668, 0.001, 0.0006666666666666668, 0.0034999999999999996, 0.015166666666666667, 0.0235, 0.026833333333333338, 0.030166666666666668, 0.044166666666666674, 0.055166666666666676, 0.05500000000000001, 0.05500000000000001, 0.05566666666666666, 0.05300000000000001, 0.04166666666666667, 0.03333333333333333, 0.03166666666666666, 0.037333333333333336, 0.03383333333333334, 0.022833333333333337, 0.023333333333333338, 0.023333333333333334, 0.023000000000000003, 0.022833333333333337, 0.022666666666666672, 0.023000000000000003, 0.021333333333333336, 0.012333333333333337, 0.0026666666666666666, 0.002833333333333333, 0.0024999999999999996, 0.003166666666666666, 0.014500000000000002, 0.03166666666666666, 0.05, 0.06883333333333333, 0.089, 0.10983333333333331, 0.13083333333333333, 0.14516666666666667, 0.15399999999999997, 0.15933333333333333, 0.15383333333333332, 0.14350000000000002, 0.1295, 0.11516666666666668, 0.1, 0.08083333333333334, 0.0595, 0.04516666666666667, 0.036333333333333336, 0.030666666666666665, 0.024666666666666667, 0.017666666666666667, 0.013499999999999998, 0.009333333333333332, 0.0045, 0.003, 0.003333333333333333, 0.0041666666666666675, 0.005666666666666666, 0.007333333333333333, 0.0095, 0.014833333333333334, 0.028500000000000004, 0.042666666666666665, 0.057499999999999996, 0.07333333333333333, 0.09016666666666666, 0.10716666666666667, 0.124, 0.13766666666666666, 0.15116666666666667, 0.157, 0.14566666666666667, 0.13133333333333336, 0.11833333333333333, 0.11166666666666666, 0.1105, 0.10933333333333335, 0.10816666666666667, 0.1105, 0.11316666666666667, 0.121, 0.13733333333333334, 0.15499999999999997, 0.172, 0.18216666666666664, 0.18583333333333332, 0.17066666666666663, 0.15416666666666665, 0.13799999999999998, 0.13166666666666665, 0.1298333333333333, 0.12866666666666665, 0.12883333333333333, 0.12933333333333333, 0.13, 0.13083333333333333, 0.15116666666666667, 0.1725, 0.19366666666666665, 0.205, 0.21183333333333332, 0.219, 0.22549999999999998, 0.23183333333333334, 0.2381666666666667, 0.2441666666666667, 0.2495, 0.25483333333333336, 0.2593333333333333, 0.26383333333333336, 0.26816666666666666, 0.27216666666666667, 0.27666666666666667, 0.2813333333333333, 0.2856666666666667, 0.29033333333333333, 0.29500000000000004, 0.3001666666666667, 0.30566666666666664, 0.31083333333333335, 0.3165, 0.32233333333333336, 0.3285, 0.33399999999999996, 0.3398333333333333, 0.3458333333333333, 0.35183333333333333, 0.3576666666666667, 0.3636666666666667, 0.36983333333333335, 0.37583333333333335, 0.38133333333333336, 0.38666666666666666, 0.39166666666666666, 0.3968333333333333, 0.40116666666666667, 0.40599999999999997, 0.41083333333333333, 0.41583333333333333, 0.4205, 0.4253333333333333, 0.43, 0.43466666666666665, 0.43966666666666665, 0.44433333333333336, 0.4493333333333333, 0.454, 0.4586666666666666, 0.46316666666666667, 0.4683333333333334, 0.47350000000000003, 0.47850000000000004, 0.4836666666666667, 0.4886666666666667, 0.49383333333333335, 0.49883333333333335, 0.5041666666666667, 0.5091666666666667, 0.5141666666666667, 0.5189999999999999, 0.5231666666666667, 0.5281666666666667, 0.5325, 0.5375, 0.5421666666666667, 0.5471666666666667, 0.5516666666666666, 0.5566666666666666, 0.5616666666666665, 0.5666666666666667, 0.572, 0.577, 0.5820000000000001, 0.587, 0.5916666666666666, 0.5966666666666666, 0.6013333333333333, 0.606, 0.6104999999999999, 0.6154999999999999, 0.6205, 0.6260000000000001, 0.6326666666666666, 0.6388333333333334, 0.6456666666666666, 0.6523333333333332, 0.6593333333333333, 0.6663333333333333, 0.6733333333333333, 0.6798333333333334, 0.6863333333333334, 0.6923333333333332, 0.6975, 0.7033333333333334, 0.7086666666666667, 0.7140000000000001, 0.72, 0.7253333333333334, 0.7313333333333334, 0.7368333333333333, 0.7428333333333332, 0.7483333333333333, 0.7543333333333334, 0.7595, 0.7651666666666667, 0.7703333333333333, 0.7753333333333333, 0.7811666666666667, 0.7863333333333333, 0.7929999999999999, 0.7998333333333334, 0.8078333333333333, 0.8158333333333333, 0.8226666666666667, 0.8165000000000001, 0.7961666666666666, 0.7733333333333333, 0.7511666666666666, 0.7296666666666667, 0.708, 0.6866666666666666, 0.6653333333333333, 0.6438333333333333, 0.6221666666666666, 0.6123333333333333, 0.6168333333333333, 0.6233333333333333, 0.6286666666666666, 0.634, 0.6378333333333334, 0.6411666666666667, 0.6426666666666667, 0.6433333333333333, 0.6463333333333334, 0.6501666666666667, 0.653, 0.6563333333333333, 0.6601666666666667, 0.6635, 0.667, 0.6698333333333333, 0.6775, 0.6868333333333333, 0.6955, 0.704, 0.7128333333333333, 0.721, 0.7291666666666666, 0.736, 0.7436666666666667, 0.7511666666666666, 0.7543333333333333, 0.7556666666666666, 0.7566666666666666, 0.7551666666666665, 0.7523333333333333, 0.7501666666666666, 0.7466666666666666, 0.7434999999999998, 0.7404999999999999, 0.7373333333333333, 0.7351666666666666, 0.7333333333333334, 0.7318333333333333, 0.7329999999999999, 0.7358333333333332, 0.7388333333333333, 0.7423333333333334, 0.7463333333333333, 0.749, 0.7516666666666666, 0.7538333333333332, 0.7568333333333332, 0.7576666666666665, 0.7571666666666667, 0.7568333333333332, 0.7561666666666665, 0.7563333333333333, 0.7573333333333333, 0.7579999999999999, 0.7583333333333334, 0.7593333333333334, 0.7593333333333334, 0.7606666666666666, 0.7631666666666667, 0.7655, 0.7671666666666666, 0.7676666666666667, 0.767, 0.7676666666666666, 0.7686666666666666, 0.7668333333333333, 0.7658333333333333, 0.7655, 0.7655, 0.7659999999999999, 0.767, 0.7691666666666667, 0.772, 0.775, 0.7771666666666667, 0.7815000000000001, 0.7859285714285714, 0.7905952380952381, 0.7944285714285715, 0.7955952380952381, 0.7950952380952382, 0.7942619047619048, 0.7937619047619048, 0.7925952380952382, 0.7900952380952382, 0.7875952380952381, 0.785, 0.7821666666666667, 0.7786666666666667, 0.7751666666666667, 0.7735000000000001, 0.7705, 0.7676666666666666, 0.7638333333333334, 0.7616666666666666, 0.7596666666666667, 0.7578333333333334, 0.7563333333333333, 0.7556666666666667, 0.7573333333333332, 0.7585, 0.76, 0.7606666666666667, 0.7621666666666667, 0.7636666666666667, 0.7641666666666667, 0.7631666666666665, 0.7621666666666667, 0.7603333333333333, 0.7576666666666667, 0.7535000000000001, 0.749, 0.7444999999999999, 0.7391666666666666, 0.7335, 0.7275, 0.7206666666666667, 0.713, 0.7071666666666666, 0.702, 0.6981666666666666, 0.6953333333333334, 0.6923333333333332, 0.6903333333333332, 0.69, 0.6904999999999999, 0.6931666666666667, 0.6961666666666666, 0.6991666666666666, 0.7026666666666667, 0.706, 0.7086666666666666, 0.7116666666666667, 0.7144999999999999, 0.7176666666666667, 0.7253333333333333, 0.7334999999999999, 0.7414999999999999, 0.7484999999999999, 0.7515000000000001, 0.7536666666666667, 0.7558333333333334, 0.7588333333333334, 0.7608333333333334, 0.7618333333333334, 0.7591666666666667, 0.7550000000000001, 0.7506666666666667, 0.7471666666666666, 0.7468333333333332, 0.7476666666666667, 0.7496666666666666, 0.7508333333333332, 0.7538333333333334, 0.7566666666666666, 0.759, 0.7621666666666667, 0.7655000000000001, 0.7685000000000001, 0.7713333333333333, 0.7725, 0.7735000000000001, 0.7745, 0.7743333333333333, 0.7733333333333333, 0.7716666666666667, 0.7686666666666666, 0.7648333333333334, 0.76, 0.756, 0.7538333333333334, 0.7515000000000001, 0.749, 0.7474999999999999, 0.7468333333333333, 0.746, 0.7295, 0.6946666666666667, 0.6616666666666667, 0.6296666666666667, 0.5986666666666667, 0.5686666666666667, 0.5398333333333334, 0.5118333333333334, 0.48500000000000004, 0.45983333333333326, 0.45199999999999996, 0.4645, 0.4766666666666667, 0.4886666666666667, 0.5003333333333333, 0.5116666666666667, 0.5228333333333334, 0.5333333333333334, 0.545, 0.5591666666666667, 0.5763333333333334, 0.5936666666666667, 0.6108333333333333, 0.6281666666666667, 0.6455, 0.6624999999999999, 0.6795, 0.6963333333333332, 0.7118333333333332, 0.7249999999999999, 0.7346666666666667, 0.7408333333333333, 0.736, 0.7196666666666667, 0.7038333333333333, 0.6884047619047619, 0.6730714285714285, 0.6582380952380953, 0.6434047619047618, 0.6280714285714286, 0.6132380952380952, 0.6015714285714286, 0.6010714285714285, 0.6115714285714285, 0.6215714285714287, 0.6316666666666666, 0.642, 0.6533333333333333, 0.6646666666666666, 0.6759999999999999, 0.6868333333333333, 0.6971666666666667, 0.7073333333333334, 0.717, 0.7264999999999999, 0.735, 0.742, 0.7468333333333332, 0.7496666666666666, 0.7518333333333334, 0.7543333333333333, 0.7565, 0.7586666666666666, 0.7605, 0.7625, 0.7643333333333333, 0.7668333333333334, 0.7698333333333334, 0.7743333333333334, 0.779, 0.783, 0.7868333333333333, 0.7889999999999999, 0.7918333333333333, 0.7938333333333334, 0.7958333333333333, 0.7973333333333332, 0.7988333333333333, 0.7994999999999999, 0.7988333333333333, 0.7958333333333332, 0.7924999999999999, 0.7885, 0.7823333333333332, 0.7751666666666666, 0.7688333333333333, 0.7626666666666667, 0.7558333333333334, 0.7486666666666666, 0.7423333333333333, 0.7388333333333332, 0.7361666666666666, 0.7338333333333333, 0.7321666666666665, 0.7313333333333334, 0.7291666666666666, 0.7269999999999999, 0.7253333333333332, 0.7248333333333332, 0.7249999999999999, 0.7249999999999999, 0.7226666666666666, 0.7219999999999999, 0.7229999999999999, 0.7236666666666666, 0.7243333333333333, 0.7259999999999999, 0.7276666666666666, 0.7291666666666666, 0.7306666666666666, 0.7323333333333333, 0.7363333333333333, 0.739, 0.7411666666666668, 0.7438333333333333, 0.7474999999999999, 0.7501666666666666, 0.7528333333333332, 0.7558333333333332, 0.7576666666666665, 0.759, 0.7605, 0.7606666666666666, 0.7611666666666665, 0.7619999999999999, 0.7631666666666665, 0.7641666666666665, 0.7658333333333334, 0.7668333333333333, 0.7685000000000001, 0.7706666666666667, 0.7715, 0.7741666666666667, 0.7740000000000001, 0.7735, 0.7721666666666667, 0.7698333333333334, 0.7651666666666668, 0.7618333333333334, 0.7611666666666667, 0.7610714285714286, 0.7614047619047619, 0.7617380952380952, 0.7619047619047619, 0.7592380952380953, 0.7572380952380952, 0.7567380952380953, 0.7584047619047619, 0.7592380952380953, 0.7579047619047619, 0.756, 0.7545, 0.7535, 0.7548333333333332, 0.7586666666666667, 0.7613333333333332, 0.7641666666666667, 0.7665, 0.769, 0.7708333333333334, 0.7721666666666668, 0.7741666666666667, 0.7763333333333333, 0.7793333333333334, 0.7825, 0.7866666666666666, 0.7906666666666667, 0.7933333333333333, 0.7929999999999999, 0.7935000000000001, 0.7943333333333333, 0.7928333333333334, 0.7893333333333333, 0.784, 0.7798333333333334, 0.7751666666666666, 0.7696666666666667, 0.7648333333333334, 0.7631666666666665, 0.7618333333333334, 0.7603333333333333, 0.7605, 0.7618333333333334, 0.7638333333333333, 0.7633333333333333, 0.7634999999999998, 0.7648333333333333, 0.7663333333333333, 0.7678333333333333, 0.7688333333333333, 0.7695, 0.7696666666666666, 0.77, 0.7686666666666666, 0.7686666666666667, 0.7678333333333333, 0.766, 0.7643333333333334, 0.7615000000000001, 0.7581666666666667, 0.758, 0.7591666666666667, 0.7601666666666667, 0.756, 0.7416666666666667, 0.7286666666666667, 0.7171666666666667, 0.7068333333333333, 0.6978333333333333, 0.6896666666666667, 0.6788333333333334, 0.668, 0.6611666666666667, 0.6628333333333334, 0.6743333333333333, 0.6846666666666666, 0.6931666666666667, 0.7015, 0.7091666666666666, 0.7166666666666666, 0.7241666666666666, 0.7313333333333334, 0.7338333333333333, 0.7333333333333333, 0.7338333333333332, 0.735, 0.7371666666666666, 0.7391666666666665, 0.7408333333333333, 0.7426666666666667, 0.7433333333333334, 0.7431666666666666, 0.7431666666666665, 0.7443333333333333, 0.7438333333333332, 0.7431666666666666, 0.743, 0.742, 0.7403333333333333, 0.738, 0.7368333333333333, 0.7358333333333333, 0.736, 0.7371666666666667, 0.7396666666666667, 0.7418333333333333, 0.7438333333333333, 0.7448333333333333, 0.7464999999999999, 0.7483333333333333, 0.7503333333333333, 0.7528333333333334, 0.7556666666666666, 0.7573333333333333, 0.7563333333333333, 0.7561666666666667, 0.7555, 0.7565, 0.7585, 0.7616666666666667, 0.7655, 0.7689999999999999, 0.7706666666666667, 0.7715, 0.7743333333333333, 0.7758333333333333, 0.7775000000000001, 0.7786666666666667, 0.779, 0.7778333333333334, 0.7750000000000001, 0.7725000000000001, 0.7701666666666667, 0.7681666666666666, 0.7663333333333333, 0.7649999999999999, 0.7633333333333333, 0.7618333333333334, 0.76, 0.7586666666666666, 0.7576666666666666, 0.7561666666666665, 0.7566666666666666, 0.7566666666666666, 0.7576666666666665, 0.7586666666666666, 0.7603333333333332, 0.7609999999999999, 0.7613333333333332, 0.7609999999999999, 0.7606666666666666, 0.7598333333333332, 0.7578333333333334, 0.7573333333333333, 0.7563333333333333, 0.755, 0.7521666666666667, 0.7491666666666668, 0.7464999999999999, 0.7448333333333335, 0.7428333333333333, 0.7416666666666667, 0.741, 0.741, 0.7411666666666668, 0.7413333333333334, 0.743, 0.7463333333333333, 0.7491666666666666, 0.7518333333333332, 0.7551666666666667, 0.7588333333333332, 0.7626666666666666, 0.7653333333333333, 0.7673333333333333, 0.7701666666666667, 0.7713333333333334, 0.7723333333333333, 0.7738333333333334, 0.7748333333333333, 0.7758333333333334, 0.7753333333333334, 0.7735000000000001, 0.7721666666666668, 0.7705, 0.7686666666666666, 0.7676666666666667, 0.7659999999999999, 0.7645, 0.7618333333333333, 0.7595, 0.7581666666666665, 0.7578333333333332, 0.7575, 0.7578333333333332, 0.7578333333333334, 0.7583333333333333, 0.7588333333333332, 0.7598333333333334, 0.7621666666666667, 0.7638333333333334, 0.7653333333333333, 0.7675000000000001, 0.7708333333333334, 0.774, 0.7773333333333333, 0.7803333333333333, 0.7811666666666668, 0.78, 0.778, 0.7756666666666667, 0.7723333333333333, 0.7666666666666666, 0.7601666666666667, 0.7535, 0.7466666666666667, 0.7408333333333333, 0.738, 0.7373333333333333, 0.7375, 0.7384999999999999, 0.7404999999999999, 0.7431666666666666, 0.7458333333333332, 0.7488333333333334, 0.7523333333333333, 0.7553333333333333, 0.7579999999999999, 0.7605, 0.7619999999999999, 0.7626666666666666, 0.763, 0.764, 0.7648333333333333, 0.7655, 0.766, 0.7663333333333333, 0.7666666666666666, 0.7678333333333333, 0.7688333333333334, 0.771, 0.7736666666666666, 0.7761666666666667, 0.7763333333333333, 0.7748333333333334, 0.7735000000000001, 0.772, 0.7698333333333334, 0.7661666666666667, 0.763, 0.7593333333333334, 0.7545, 0.7496666666666667, 0.7456666666666667, 0.742, 0.7378333333333333, 0.7343333333333333, 0.7314999999999999, 0.7293333333333333, 0.727, 0.7248333333333333, 0.7236666666666666, 0.7233333333333333, 0.7249999999999999, 0.7281666666666666, 0.7313333333333334, 0.7341666666666666, 0.7391666666666666, 0.7426666666666667, 0.7473333333333333, 0.7516666666666667, 0.7548333333333332, 0.7556666666666667, 0.7561666666666667, 0.7571666666666668, 0.7571666666666667, 0.7573333333333333, 0.7556666666666667, 0.7556666666666667, 0.7555000000000001, 0.7558333333333332, 0.7576666666666667, 0.7615000000000001, 0.7655000000000001, 0.7691666666666668, 0.7708333333333333, 0.7708333333333333, 0.7710000000000001, 0.771, 0.7713333333333334, 0.7716666666666667, 0.7723333333333333, 0.7735000000000001, 0.7751666666666667, 0.7766666666666666, 0.7818333333333334, 0.7886666666666666, 0.795, 0.8006666666666667, 0.8053333333333332, 0.8085000000000001, 0.8111666666666666, 0.812, 0.8111666666666666, 0.8099999999999999, 0.8086666666666666, 0.8061666666666667, 0.8035, 0.8009999999999999, 0.799, 0.7978333333333334, 0.7956666666666667, 0.7951666666666666, 0.7938333333333334, 0.7925, 0.7908333333333333, 0.7898333333333334, 0.7881666666666667, 0.7875, 0.7865, 0.7855000000000001, 0.7853333333333333, 0.7848333333333334, 0.7861666666666667, 0.7866666666666667, 0.7866666666666667, 0.7871666666666667, 0.79, 0.7918333333333334, 0.7933333333333333, 0.7946666666666667, 0.7956666666666667, 0.7968333333333334, 0.7986666666666667, 0.8020000000000002, 0.806, 0.8083333333333333, 0.8093333333333333, 0.8100000000000002, 0.8115, 0.8136666666666666, 0.8160000000000001, 0.8186666666666668, 0.8198333333333334, 0.8211666666666666, 0.8221666666666667, 0.8239999999999998, 0.8251666666666665, 0.8271666666666666, 0.8283333333333334, 0.8296666666666666, 0.8303333333333333, 0.8296666666666667, 0.8300000000000001, 0.8273333333333334, 0.8244999999999999, 0.8225, 0.8210000000000001, 0.8193333333333334, 0.8181666666666667, 0.8160000000000001, 0.8138333333333334, 0.8135, 0.8089999999999999, 0.8065, 0.8046666666666666, 0.8013333333333333, 0.798, 0.7931666666666667, 0.7878333333333333, 0.784, 0.7816666666666666, 0.7806666666666666, 0.7836666666666667, 0.785, 0.7866666666666667, 0.7893333333333332, 0.7918333333333333, 0.7951666666666667, 0.7983333333333333, 0.8003333333333333, 0.8013333333333333, 0.7996666666666667, 0.7976666666666666, 0.7975, 0.7956666666666667, 0.7941666666666667, 0.791, 0.788, 0.7863333333333333, 0.7848333333333334, 0.783, 0.7818333333333334, 0.7813333333333333, 0.7803333333333333, 0.7801666666666667, 0.7793333333333334, 0.78, 0.7815000000000001, 0.7830000000000001, 0.7848333333333335, 0.7873333333333334, 0.7893333333333333, 0.79, 0.7918333333333333, 0.7935000000000001, 0.7945, 0.7953333333333333, 0.7955, 0.7951666666666666, 0.7948333333333333, 0.7941666666666667, 0.7936666666666666, 0.7943333333333333, 0.7946666666666667, 0.795, 0.7969999999999999, 0.799, 0.8011666666666667, 0.8031666666666666, 0.8048333333333334, 0.8066666666666666, 0.8076666666666668, 0.8033333333333333, 0.7968333333333334, 0.7915000000000001, 0.7861666666666667, 0.7805000000000001, 0.7748333333333333, 0.7688333333333333, 0.7633333333333333, 0.7583333333333333, 0.7539999999999999, 0.7558333333333332, 0.7595000000000001, 0.7609999999999999, 0.7621666666666667, 0.7636666666666667, 0.7648333333333334, 0.7651666666666666, 0.7648333333333333, 0.7636666666666667, 0.7625, 0.7595, 0.7573333333333332, 0.7563333333333333, 0.7543333333333333, 0.7553333333333333, 0.7573333333333333, 0.7606666666666666, 0.7641666666666665, 0.7685000000000001, 0.7723333333333333, 0.7766666666666667, 0.7806666666666667, 0.7846666666666666, 0.789, 0.7903333333333332, 0.7911666666666667, 0.7915, 0.7918333333333333, 0.792, 0.7929999999999999, 0.7943333333333332, 0.7948333333333333, 0.7958333333333333, 0.7973333333333332, 0.7991666666666666, 0.8006666666666666, 0.8021666666666667, 0.8038333333333334, 0.8053333333333332, 0.8075000000000001, 0.8095000000000001, 0.8115, 0.8133333333333335, 0.8141666666666667, 0.8151666666666667, 0.8161666666666667, 0.8165000000000001, 0.8158333333333333, 0.8153333333333332, 0.8143333333333332, 0.8118333333333334, 0.8099999999999999, 0.8073333333333332, 0.8033333333333333, 0.7995, 0.7965000000000001, 0.7951666666666666, 0.794952380952381, 0.7952857142857144, 0.794952380952381, 0.7971190476190476, 0.799452380952381, 0.8031190476190476, 0.807952380952381, 0.8127857142857143, 0.8176190476190476, 0.8219523809523809, 0.8261666666666667, 0.8303333333333333, 0.8356666666666666, 0.8394999999999999, 0.8426666666666666, 0.8455, 0.8491666666666665, 0.8513333333333332, 0.8523333333333334, 0.8531666666666666, 0.8535, 0.8526666666666667, 0.8486666666666667, 0.8441666666666666, 0.8400000000000001, 0.8351666666666666, 0.8295, 0.825, 0.8206666666666667, 0.8164999999999999, 0.8118333333333332, 0.8085000000000001, 0.8075000000000001, 0.8074999999999999, 0.8083333333333333, 0.8093333333333333, 0.8110000000000002, 0.8123333333333334, 0.8140000000000001, 0.8153333333333335, 0.8175000000000001, 0.8185, 0.8203333333333334, 0.8213333333333332, 0.8225, 0.8234999999999999, 0.8244999999999999, 0.8253333333333333, 0.826, 0.828, 0.8311666666666666, 0.8346666666666666, 0.8371666666666666, 0.8385, 0.8394999999999999, 0.8409999999999999, 0.8426666666666666, 0.8446666666666666, 0.8461666666666666, 0.8463333333333333, 0.8451666666666666, 0.8441666666666666, 0.8418333333333333, 0.8399999999999999, 0.8375, 0.8344999999999999, 0.8316666666666667, 0.8288333333333332, 0.827, 0.8253333333333334, 0.8234999999999999, 0.8215, 0.8210000000000001, 0.8216666666666667, 0.8218333333333334, 0.8223333333333332, 0.8231666666666667, 0.8241666666666667, 0.8246666666666667, 0.825, 0.8265, 0.8278333333333332, 0.8271666666666666, 0.8261666666666667, 0.8254999999999999, 0.8248333333333333, 0.8231666666666667, 0.8210000000000001, 0.8185, 0.8158333333333335, 0.8118333333333334, 0.8076666666666668, 0.8055, 0.8038333333333334, 0.8023333333333333, 0.8001666666666667, 0.7983333333333332, 0.7966666666666666, 0.7953333333333333, 0.7953333333333333, 0.7951666666666666, 0.7951666666666666, 0.7945, 0.7945, 0.7953333333333333, 0.7969999999999999, 0.7988333333333334, 0.8001666666666667, 0.8018333333333333, 0.8028333333333334, 0.804, 0.8055, 0.8071666666666667, 0.7981666666666667, 0.7748333333333335, 0.7518333333333335, 0.7300000000000001, 0.7098333333333333, 0.6906666666666668, 0.6723333333333333, 0.6555, 0.6398333333333334, 0.6291666666666667, 0.633, 0.6515, 0.6696666666666666, 0.6864999999999999, 0.7015, 0.7144999999999999, 0.727, 0.7393333333333334, 0.7513333333333333, 0.759, 0.7623333333333333, 0.7645000000000001, 0.7658333333333334, 0.7675000000000001, 0.7686666666666667, 0.7700000000000001, 0.7705, 0.7703333333333333, 0.7689999999999999, 0.7673333333333333, 0.7651666666666667, 0.7633333333333333, 0.7616666666666666, 0.7595, 0.7579999999999999, 0.7576666666666666, 0.7568333333333332, 0.7564999999999998, 0.7563333333333333, 0.7556666666666666, 0.7551666666666665, 0.7546666666666666, 0.7549999999999999, 0.7555, 0.7575, 0.7588333333333332, 0.7615, 0.7635, 0.7655, 0.7673333333333334, 0.7683333333333333, 0.7696666666666666, 0.7706666666666666, 0.772, 0.773, 0.7738333333333334, 0.7731666666666668, 0.7716666666666667, 0.7703333333333333, 0.7695000000000001, 0.7676666666666667, 0.765, 0.7615000000000001, 0.7580000000000001, 0.7540000000000001, 0.7506666666666667, 0.7474999999999999, 0.745, 0.7423333333333334, 0.7390000000000001, 0.7368333333333335, 0.7348333333333333, 0.7338333333333333, 0.731, 0.7288333333333333, 0.7263333333333333, 0.7243333333333333, 0.7231666666666665, 0.7218333333333332, 0.7198333333333332, 0.7189999999999999, 0.7189999999999999, 0.7184999999999999, 0.72, 0.7199999999999999, 0.7189999999999999, 0.7188333333333332, 0.7179999999999999, 0.7169999999999999, 0.7156666666666666, 0.7135, 0.7104999999999999, 0.7074999999999999, 0.704, 0.7011666666666666, 0.6991666666666665, 0.696, 0.691, 0.6868333333333333, 0.6851666666666667, 0.6841666666666667, 0.6835, 0.6835, 0.6835, 0.6835, 0.6843333333333333, 0.6813333333333332, 0.6783333333333333, 0.6748333333333333, 0.6716666666666666, 0.6686666666666665, 0.6653333333333333, 0.6621666666666666, 0.6591666666666667, 0.6561666666666667, 0.6538333333333333, 0.6581666666666667, 0.6661666666666666, 0.6741666666666667, 0.6811666666666667, 0.6851666666666667, 0.6891666666666667, 0.6881666666666666, 0.6871666666666667, 0.6861666666666666, 0.6841666666666666, 0.6801666666666666, 0.6741666666666666, 0.6685, 0.6646666666666666, 0.663, 0.661, 0.6631666666666667, 0.6651666666666667, 0.6676666666666667, 0.6681666666666668, 0.6685000000000001, 0.6685000000000001, 0.6685000000000001, 0.6675, 0.6671666666666667, 0.6656666666666667, 0.663, 0.6601666666666667, 0.6576666666666667, 0.6573333333333333, 0.6558333333333334, 0.6555, 0.6545, 0.6513333333333333, 0.6473333333333333, 0.6448333333333334, 0.6388333333333334, 0.632, 0.625, 0.618, 0.6123333333333333, 0.6066666666666667, 0.602, 0.5991666666666666, 0.5981666666666666, 0.5971666666666666, 0.6008333333333333, 0.6041666666666666, 0.6045, 0.6008333333333333, 0.5966666666666667, 0.5868333333333333, 0.576, 0.5650000000000001, 0.553, 0.5413333333333333, 0.5303333333333333, 0.5206666666666667, 0.5133333333333333, 0.51, 0.5081666666666667, 0.5119999999999999, 0.5161666666666667, 0.5203333333333333, 0.5251666666666666, 0.5288333333333333, 0.5291666666666666, 0.5261666666666667, 0.5206666666666666, 0.5111666666666667, 0.49949999999999994, 0.48549999999999993, 0.47133333333333327, 0.4578333333333333, 0.44433333333333325, 0.42733333333333323, 0.4109999999999999, 0.39499999999999996, 0.37649999999999995, 0.3603333333333333, 0.3421666666666666, 0.32416666666666666, 0.30599999999999994, 0.27849999999999997, 0.2376666666666666, 0.20083333333333334, 0.1663333333333333, 0.13499999999999995, 0.11949999999999998, 0.10849999999999997, 0.09049999999999997, 0.06866666666666663, 0.0523333333333333, 0.0528333333333333, 0.06416666666666662, 0.07599999999999997, 0.08616666666666664, 0.09116666666666663, 0.08449999999999996, 0.07449999999999997, 0.07133333333333329, 0.07216666666666663, 0.06749999999999998, 0.05449999999999996, 0.04366666666666663, 0.0323333333333333, 0.022166666666666633, 0.0168333333333333, 0.0123333333333333, 0.008166666666666633, 0.006999999999999967, 0.0073333333333333, 0.008166666666666633, 0.007999999999999967, 0.007166666666666634, 0.0068333333333333, 0.006333333333333299, 0.006499999999999967, 0.0063333333333333, 0.009166666666666634, 0.02049999999999997, 0.032666666666666636, 0.03299999999999997, 0.03599999999999996, 0.04966666666666664, 0.06233333333333331, 0.06266666666666663, 0.06299999999999997, 0.06316666666666663, 0.05983333333333331, 0.0478333333333333, 0.034333333333333306, 0.0338333333333333, 0.03966666666666664, 0.041833333333333306, 0.04499999999999997, 0.061666666666666634, 0.07233333333333332, 0.07499999999999998, 0.07466666666666663, 0.0743333333333333, 0.07449999999999998, 0.07349999999999998, 0.06449999999999997, 0.04916666666666664, 0.032999999999999974, 0.016166666666666638, 0.0053333333333333, 0.013499999999999967, 0.028166666666666635, 0.03499999999999996, 0.03716666666666664, 0.0418333333333333, 0.05499999999999996, 0.0543333333333333, 0.0543333333333333, 0.05466666666666663, 0.05416666666666663, 0.04399999999999997, 0.029666666666666636, 0.0228333333333333, 0.021499999999999967, 0.016666666666666632, 0.003333333333333297, 0.003333333333333297, 0.0036666666666666306, 0.003499999999999964, 0.003533333333333297, 0.0030333333333332976, 0.0028666666666666307, 0.003199999999999964, 0.0028666666666666307, 0.003199999999999964, 0.003366666666666631, 0.0038666666666666307, 0.0036999999999999646, 0.0036999999999999646, 0.004166666666666631, 0.0038333333333332976, 0.007333333333333298, 0.01916666666666663, 0.03066666666666663, 0.03249999999999996, 0.03666666666666664, 0.0503333333333333, 0.06116666666666663, 0.06899999999999998, 0.0683333333333333, 0.0688333333333333, 0.0658333333333333, 0.05449999999999996, 0.0428333333333333, 0.0418333333333333, 0.0463333333333333, 0.047666666666666635, 0.047999999999999966, 0.05049999999999997, 0.05716666666666663, 0.05683333333333329, 0.056333333333333305, 0.056499999999999974, 0.056999999999999974, 0.05733333333333329, 0.0503333333333333, 0.03766666666666663, 0.029333333333333295, 0.021666666666666633, 0.019333333333333296, 0.03383333333333329, 0.05399999999999996, 0.07433333333333329, 0.09566666666666664, 0.11733333333333329, 0.13933333333333328, 0.16149999999999998, 0.18199999999999997, 0.19166666666666662, 0.19999999999999998, 0.1963333333333333, 0.182, 0.16083333333333333, 0.13866666666666663, 0.11533333333333329, 0.0918333333333333, 0.0673333333333333, 0.0448333333333333, 0.03249999999999996, 0.020333333333333297, 0.009333333333333301, 0.003499999999999967, 0.004166666666666634, 0.005666666666666634, 0.006666666666666633, 0.007166666666666634, 0.0078333333333333, 0.0073333333333333, 0.0078333333333333, 0.008666666666666633, 0.010499999999999966, 0.015499999999999967, 0.0283333333333333, 0.04499999999999997, 0.06299999999999997, 0.08166666666666664, 0.10049999999999996, 0.12066666666666663, 0.13649999999999998, 0.14916666666666661, 0.1603333333333333, 0.16583333333333328, 0.15966666666666662, 0.14566666666666664, 0.13266666666666665, 0.12649999999999997, 0.1268333333333333, 0.12633333333333333, 0.12999999999999998, 0.13799999999999996, 0.14683333333333332, 0.15916666666666662, 0.17466666666666664, 0.19016666666666665, 0.20083333333333328, 0.2038333333333333, 0.20049999999999998, 0.1983333333333333, 0.19616666666666663, 0.19349999999999995, 0.19549999999999995, 0.19749999999999995, 0.2003333333333333, 0.20649999999999996, 0.21633333333333332, 0.2268333333333333, 0.2368333333333333, 0.24666666666666665, 0.2565, 0.2653333333333333, 0.2703333333333333, 0.2743333333333333, 0.27849999999999997, 0.2825, 0.2865, 0.2904999999999999, 0.29466666666666663, 0.2988333333333333, 0.30283333333333334, 0.30716666666666664, 0.31116666666666665, 0.31516666666666665, 0.31866666666666665, 0.32266666666666666, 0.32636666666666664, 0.33036666666666664, 0.33419999999999994, 0.3375333333333333, 0.34153333333333336, 0.34519999999999995, 0.34919999999999995, 0.3535333333333333, 0.3580333333333333, 0.3630333333333333, 0.3678333333333333, 0.37249999999999994, 0.37749999999999995, 0.38249999999999995, 0.3875, 0.3923333333333333, 0.39699999999999996, 0.40166666666666667, 0.4056666666666667, 0.4096666666666667, 0.4136666666666667, 0.418, 0.4223333333333333, 0.42699999999999994, 0.43099999999999994, 0.43599999999999994, 0.4408333333333333, 0.4458333333333333, 0.4508333333333333, 0.45549999999999996, 0.46066666666666667, 0.4656666666666666, 0.4703333333333333, 0.4749999999999999, 0.47999999999999987, 0.485, 0.48999999999999994, 0.49499999999999994, 0.5005, 0.5058333333333334, 0.5109999999999999, 0.516, 0.5211666666666667, 0.5265, 0.5315, 0.5365, 0.541, 0.5459999999999999, 0.5505, 0.5549999999999999, 0.5596666666666666, 0.5636666666666666, 0.5685, 0.5725, 0.577, 0.581, 0.5851666666666666, 0.5891666666666666, 0.5931666666666666, 0.5973333333333334, 0.6013333333333334, 0.6056666666666667, 0.6096666666666667, 0.6136666666666667, 0.6178333333333332, 0.6218333333333332, 0.6261666666666666, 0.6303333333333333, 0.6355000000000001, 0.641, 0.647, 0.6528333333333334, 0.6588333333333333, 0.665, 0.6713333333333333, 0.6779999999999999, 0.6845, 0.6911666666666667, 0.697, 0.7028333333333332, 0.708, 0.7138333333333333, 0.7196666666666667, 0.7255, 0.7311666666666665, 0.7364999999999998, 0.7416666666666666, 0.7468333333333332, 0.7518333333333332, 0.7568333333333332, 0.7619999999999999, 0.7669999999999999, 0.7711666666666666, 0.7761666666666667, 0.7805, 0.7855, 0.7913333333333333, 0.7976666666666666, 0.8036666666666668, 0.8045, 0.7976666666666666, 0.7806666666666666, 0.7649999999999999, 0.7495, 0.735, 0.7204999999999999, 0.7063333333333334, 0.6918333333333333, 0.6786666666666666, 0.6693333333333333, 0.6663333333333333, 0.6721666666666667, 0.6753333333333333, 0.6768333333333334, 0.6773333333333333, 0.6775, 0.6766666666666666, 0.6743333333333333, 0.6686666666666667, 0.6643333333333333, 0.6620000000000001, 0.6615, 0.6633333333333333, 0.6665, 0.6689999999999999, 0.6703333333333333, 0.6723333333333332, 0.6773333333333332, 0.6866666666666665, 0.6941666666666666, 0.7003333333333333, 0.7051666666666666, 0.7086666666666666, 0.7118333333333332, 0.7163333333333333, 0.7196666666666666, 0.7224999999999999, 0.7231666666666666, 0.7226666666666666, 0.7233333333333333, 0.7255, 0.7276666666666667, 0.7278333333333333, 0.7264999999999999, 0.7253333333333333, 0.7261666666666666, 0.727, 0.7273333333333334, 0.7271666666666666, 0.726, 0.7245, 0.7224999999999999, 0.7234999999999999, 0.726, 0.7281666666666666, 0.7301666666666666, 0.7316666666666667, 0.733, 0.7343333333333333, 0.7371666666666666, 0.7395, 0.741, 0.7409999999999999, 0.7411666666666668, 0.741, 0.7408333333333333, 0.7413333333333333, 0.7424999999999999, 0.7443333333333333, 0.7459999999999999, 0.7478333333333332, 0.7516666666666667, 0.7566666666666667, 0.761, 0.7648333333333334, 0.7681666666666667, 0.7715000000000001, 0.7745, 0.7765000000000001, 0.7778333333333334, 0.7776666666666667, 0.7780000000000001, 0.7786666666666667, 0.7796666666666667, 0.7820000000000001, 0.7846666666666667, 0.7866666666666667, 0.789, 0.7916666666666666, 0.7935, 0.796, 0.795, 0.7838333333333333, 0.7573333333333333, 0.73, 0.7035, 0.6778333333333333, 0.6526666666666666, 0.628, 0.6051666666666666, 0.5841666666666666, 0.5698333333333333, 0.5661666666666667, 0.5786666666666667, 0.5918333333333333, 0.605, 0.6181666666666666, 0.6311666666666668, 0.6441666666666667, 0.6569999999999999, 0.6688333333333333, 0.6771666666666667, 0.6853333333333332, 0.6924999999999999, 0.6996666666666667, 0.7068333333333333, 0.7146666666666666, 0.7226666666666666, 0.7306666666666667, 0.7381666666666666, 0.7453333333333333, 0.7511666666666665, 0.7558333333333332, 0.7598333333333332, 0.7638333333333334, 0.7678333333333334, 0.7696666666666666, 0.7715, 0.773, 0.7746666666666667, 0.7771666666666667, 0.7798333333333333, 0.7826666666666666, 0.7851666666666667, 0.787, 0.7878333333333334, 0.7896666666666666, 0.7908333333333333, 0.7921666666666666, 0.7911666666666666, 0.7889999999999999, 0.788, 0.7875, 0.7888333333333334, 0.7905, 0.7928333333333334, 0.7956666666666667, 0.798, 0.7978333333333333, 0.7991666666666667, 0.8013333333333333, 0.8021666666666667, 0.8025, 0.8011666666666667, 0.7998333333333333, 0.7981666666666667, 0.7961666666666667, 0.7946666666666666, 0.7959999999999999, 0.7985, 0.7998333333333333, 0.8008333333333333, 0.8004999999999999, 0.8008333333333333, 0.7996666666666667, 0.7993333333333333, 0.798, 0.797, 0.7943333333333333, 0.7881666666666666, 0.7831666666666666, 0.7798333333333334, 0.7785, 0.7776666666666667, 0.7781666666666667, 0.7776666666666667, 0.7776666666666667, 0.7776666666666667, 0.7785, 0.7815000000000001, 0.7828333333333334, 0.7763333333333333, 0.7566666666666666, 0.7306666666666667, 0.7056666666666667, 0.682, 0.6598333333333333, 0.6383333333333333, 0.6178333333333333, 0.5993333333333333, 0.5831666666666667, 0.5753333333333334, 0.5813333333333334, 0.5941666666666666, 0.6066666666666667, 0.619, 0.6308333333333332, 0.6424999999999998, 0.654, 0.6651666666666667, 0.6758333333333333, 0.6864999999999999, 0.6966666666666667, 0.7066666666666667, 0.7166666666666666, 0.7266666666666666, 0.7366666666666666, 0.7469999999999999, 0.7573333333333333, 0.7655, 0.7731666666666666, 0.7791666666666666, 0.782, 0.7835, 0.784, 0.7836666666666667, 0.7828333333333333, 0.7813333333333333, 0.7785, 0.7765000000000001, 0.7746666666666667, 0.774, 0.7736666666666666, 0.7731666666666668, 0.7736666666666666, 0.7736666666666667, 0.774, 0.7746666666666667, 0.7763333333333333, 0.7786666666666667, 0.7801666666666667, 0.7805, 0.7821666666666667, 0.7841666666666667, 0.7843333333333333, 0.7851666666666668, 0.7858333333333334, 0.7853333333333333, 0.785, 0.784, 0.7828333333333334, 0.7805000000000001, 0.7761666666666668, 0.7713333333333334, 0.7663333333333333, 0.7598333333333334, 0.7521666666666667, 0.744, 0.735, 0.7258333333333333, 0.7165, 0.7091666666666667, 0.7041666666666667, 0.7, 0.6966666666666665, 0.6945, 0.6928333333333333, 0.6916666666666667, 0.6903333333333332, 0.69, 0.6903333333333332, 0.6903333333333332, 0.6904999999999999, 0.6913333333333332, 0.6908333333333333, 0.6908333333333333, 0.6918333333333333, 0.6931666666666667, 0.6971666666666667, 0.7043333333333333, 0.7115, 0.7181666666666666, 0.7238333333333332, 0.7286666666666666, 0.7343333333333334, 0.7403333333333333, 0.7456666666666666, 0.7508333333333332, 0.7541666666666667, 0.7536666666666666, 0.7533333333333333, 0.7531666666666667, 0.7508333333333332, 0.7488333333333334, 0.7474999999999999, 0.7451666666666666, 0.7431666666666666, 0.7409999999999999, 0.739, 0.7368333333333333, 0.7346666666666667, 0.7328333333333333, 0.7346666666666667, 0.7365, 0.7388333333333333, 0.741, 0.7455, 0.7505000000000001, 0.7548333333333334, 0.76, 0.7643333333333333, 0.7675000000000001, 0.7701666666666667, 0.7731666666666668, 0.7761666666666668, 0.7793333333333334, 0.7791666666666667, 0.7790000000000001, 0.7793333333333334, 0.7793333333333334, 0.7808333333333334, 0.7836666666666667, 0.7868333333333333, 0.7891666666666668, 0.7913333333333334, 0.7943333333333333, 0.7976666666666667, 0.8003333333333333, 0.8008333333333333, 0.8018333333333333, 0.8026666666666668, 0.8036666666666668, 0.8045000000000002, 0.8055, 0.8063333333333335, 0.8063333333333335, 0.8063333333333335, 0.8066666666666669, 0.8085000000000001, 0.8095000000000001, 0.8093333333333333, 0.8089999999999999, 0.8080000000000002, 0.8080000000000002, 0.8076666666666668, 0.8076666666666666, 0.8076666666666668, 0.8081666666666667, 0.8088333333333335, 0.8096666666666668, 0.8108333333333334, 0.8123333333333335, 0.8143333333333332, 0.8146666666666669, 0.8156666666666668, 0.8161666666666667, 0.8166666666666667, 0.8165000000000001, 0.8158333333333333, 0.8150000000000001, 0.8143333333333335, 0.8131666666666668, 0.812, 0.8118333333333334, 0.8111666666666668, 0.8103333333333336, 0.807, 0.79, 0.7668333333333333, 0.7451666666666668, 0.726, 0.7124999999999999, 0.7023333333333334, 0.6935, 0.6856666666666666, 0.6793333333333333, 0.6766666666666665, 0.6888333333333333, 0.7084999999999999, 0.7273333333333333, 0.7443333333333333, 0.7565, 0.7658333333333334, 0.7736666666666666, 0.7796666666666666, 0.7843333333333333, 0.7875, 0.7893333333333333, 0.7898333333333334, 0.7893333333333333, 0.787, 0.783, 0.7788333333333333, 0.7745, 0.7706666666666667, 0.7676666666666667, 0.7655, 0.7656666666666666, 0.7668333333333333, 0.768, 0.7698333333333334, 0.772, 0.7693333333333333, 0.7535000000000001, 0.7381666666666666, 0.7233333333333333, 0.709, 0.6936666666666667, 0.6785, 0.6644166666666667, 0.65225, 0.6429166666666666, 0.6404166666666666, 0.6524166666666666, 0.6649166666666667, 0.67675, 0.6885833333333333, 0.6999166666666666, 0.7109166666666666, 0.7219999999999999, 0.7323333333333333, 0.7408333333333332, 0.748, 0.7543333333333333, 0.7608333333333334, 0.7675, 0.7743333333333333, 0.7808333333333334, 0.7868333333333334, 0.7918333333333333, 0.795, 0.7958333333333333, 0.7955, 0.7953333333333333, 0.7943333333333333, 0.7936666666666666, 0.7918333333333333, 0.7901666666666667, 0.7891666666666667, 0.7883333333333333, 0.789, 0.7921666666666667, 0.7956666666666667, 0.7976666666666666, 0.8004999999999999, 0.8021666666666667, 0.8041666666666666, 0.8058333333333334, 0.8065, 0.8065000000000001, 0.805, 0.8029999999999999, 0.8013333333333333, 0.8005000000000001, 0.7983333333333333, 0.7955, 0.792, 0.7883333333333333, 0.7868333333333333, 0.7883333333333333, 0.7914999999999999, 0.7938333333333333, 0.7928333333333333, 0.7921666666666666, 0.7925, 0.7936666666666666, 0.7956666666666666, 0.7983333333333332, 0.7991666666666667, 0.7979999999999999, 0.7966666666666666, 0.7949999999999999, 0.7953333333333333, 0.7958333333333333, 0.7949999999999999, 0.7945, 0.7939999999999999, 0.7938333333333334, 0.7938333333333334, 0.7936666666666666, 0.7935, 0.7943333333333333, 0.7956666666666667, 0.7973333333333333, 0.8001666666666667, 0.8029999999999999, 0.8011666666666667, 0.791, 0.7816666666666667, 0.7729285714285714, 0.7660952380952382, 0.761095238095238, 0.7579285714285714, 0.7545952380952381, 0.751595238095238, 0.749095238095238, 0.7514285714285714, 0.7620952380952379, 0.7724285714285715, 0.783, 0.791, 0.7975000000000001, 0.8019999999999999, 0.8051666666666666, 0.8061666666666667, 0.8073333333333335, 0.8083333333333333, 0.8089999999999999, 0.8089999999999999, 0.808, 0.8073333333333335, 0.8065000000000001, 0.8066666666666666, 0.8071666666666667, 0.8101666666666668, 0.8121666666666666, 0.8136666666666666, 0.8138333333333334, 0.8143333333333335, 0.8151666666666667, 0.8158333333333333, 0.8161666666666667, 0.8160000000000001, 0.8168333333333333, 0.8160000000000001, 0.8151666666666667, 0.8148333333333333, 0.8151666666666667, 0.8150000000000001, 0.8151666666666667, 0.8163333333333334, 0.8176666666666665, 0.8176666666666665, 0.8171666666666667, 0.817, 0.8173333333333334, 0.8173333333333334, 0.8175000000000001, 0.8173333333333334, 0.8166666666666668, 0.8148333333333333, 0.8130000000000001, 0.8126666666666669, 0.8123333333333335, 0.8123333333333335, 0.8116666666666669, 0.8090000000000002, 0.8046666666666669, 0.8003333333333333, 0.7945000000000001, 0.7873333333333334, 0.7831666666666667, 0.7811666666666668, 0.779, 0.7769999999999999, 0.7751666666666666, 0.7748333333333333, 0.7766666666666666, 0.7783333333333333, 0.7823333333333333, 0.789, 0.7931666666666667, 0.795, 0.797, 0.7985, 0.7998333333333334, 0.8013333333333333, 0.8019999999999999, 0.8035, 0.8039999999999999, 0.8035, 0.8023333333333333, 0.8001666666666665, 0.7979999999999999, 0.7954999999999999, 0.7838333333333333, 0.7729999999999999, 0.7648333333333334, 0.7606666666666666, 0.7586666666666666, 0.7578333333333334, 0.7566666666666667, 0.7541666666666667, 0.7515, 0.7496666666666667, 0.7571666666666667, 0.7638333333333333, 0.7681666666666667, 0.7681666666666666, 0.7656666666666666, 0.7621666666666667, 0.7611666666666667, 0.7629999999999999, 0.7643333333333333, 0.7641666666666665, 0.7649999999999999, 0.7666666666666666, 0.7686666666666666, 0.772, 0.7765, 0.7816666666666666, 0.7853333333333333, 0.7883333333333333, 0.7928333333333333, 0.7988333333333333, 0.8041666666666668, 0.8071666666666666, 0.8084999999999999, 0.8089999999999999, 0.8086666666666668, 0.807, 0.8043333333333333, 0.8019999999999999, 0.7979999999999999, 0.794, 0.7903333333333334, 0.7888333333333334, 0.7895, 0.7898333333333333, 0.7901666666666667, 0.7913333333333333, 0.7924999999999999, 0.7928333333333333, 0.7939999999999999, 0.7949999999999999, 0.7953333333333333, 0.7945, 0.7924999999999999, 0.7905, 0.7873333333333333, 0.7835000000000001, 0.78, 0.7776666666666666, 0.7746666666666667, 0.7708333333333333, 0.768, 0.7666666666666667, 0.7665, 0.7683333333333333, 0.7725, 0.7775000000000001, 0.7823333333333333, 0.7855, 0.7889999999999999, 0.7928333333333333, 0.7959999999999999, 0.7986666666666666, 0.8004999999999999, 0.8006666666666666, 0.7996666666666666, 0.7981666666666666, 0.7951666666666666, 0.7923333333333332, 0.7899999999999999, 0.7884999999999999, 0.7871666666666666, 0.7855, 0.7841666666666667, 0.7825, 0.7795, 0.7765, 0.7761666666666667, 0.7771666666666667, 0.7776666666666667, 0.7771666666666667, 0.7761666666666667, 0.7768333333333334, 0.7775, 0.7788333333333333, 0.7823333333333333, 0.7861666666666667, 0.7889999999999999, 0.792, 0.7953333333333333, 0.7993333333333333, 0.8033333333333333, 0.8056666666666666, 0.8078333333333333, 0.8095000000000001, 0.8108333333333334, 0.812, 0.8136666666666669, 0.8153333333333335, 0.8166666666666667, 0.8183333333333334, 0.8203333333333334, 0.8223333333333332, 0.8241666666666665, 0.8258333333333333, 0.827, 0.8281666666666666, 0.8293333333333333, 0.8303333333333333, 0.8314999999999999, 0.8328333333333333, 0.8336666666666666, 0.834, 0.8344999999999999, 0.8348333333333333, 0.8348333333333333, 0.8353333333333332, 0.8343333333333334, 0.8320000000000001, 0.8305, 0.8291666666666666, 0.8281666666666666, 0.8273333333333334, 0.8268333333333333, 0.8254999999999999, 0.8253333333333334, 0.8246666666666667, 0.8254999999999999, 0.8273333333333334, 0.8283333333333331, 0.8286666666666666, 0.8285, 0.8288333333333332, 0.8288333333333332, 0.8296666666666666, 0.8298333333333332, 0.8298333333333332, 0.8295, 0.829, 0.8283333333333334, 0.8283333333333334, 0.8286666666666666, 0.8271666666666666, 0.8125, 0.7828333333333333, 0.7526666666666666, 0.7271666666666666, 0.7068333333333332, 0.6904999999999999, 0.6758333333333333, 0.6621666666666666, 0.6496666666666666, 0.6399999999999999, 0.6444999999999999, 0.6655, 0.689, 0.7089999999999999, 0.725, 0.7383333333333333, 0.7514999999999998, 0.7641666666666665, 0.7769999999999999, 0.7898333333333333, 0.8016666666666665, 0.8123333333333334, 0.821, 0.8274999999999999, 0.8328333333333333, 0.837, 0.8406666666666665, 0.8436666666666666, 0.8463333333333332, 0.8478333333333332, 0.8483333333333333, 0.8479999999999999, 0.8461666666666666, 0.8363333333333332, 0.8284999999999998, 0.8244999999999999, 0.8213333333333332, 0.8183333333333331, 0.8138333333333334, 0.8093333333333332, 0.8051666666666666, 0.8025, 0.8018333333333333, 0.8106666666666668, 0.8163333333333334, 0.8173333333333334, 0.818, 0.8188333333333333, 0.8203333333333334, 0.8211666666666666, 0.8231666666666666, 0.8243333333333333, 0.8243333333333333, 0.8234999999999999, 0.8236666666666667, 0.8228333333333333, 0.8213333333333332, 0.8203333333333334, 0.82, 0.8213333333333332, 0.8091666666666667, 0.7783333333333332, 0.7483333333333333, 0.7221666666666666, 0.7008333333333333, 0.6846666666666665, 0.6688333333333333, 0.6526666666666666, 0.6368333333333333, 0.6213333333333333, 0.6198333333333332, 0.6379999999999999, 0.6568333333333334, 0.6729999999999999, 0.6856666666666666, 0.6958333333333333, 0.706, 0.7161666666666667, 0.7258333333333333, 0.7348333333333332, 0.7434999999999999, 0.7521666666666667, 0.7605000000000001, 0.7686666666666666, 0.7763333333333333, 0.7841666666666667, 0.7921666666666667, 0.7996666666666666, 0.8071666666666667, 0.8148333333333333, 0.8225, 0.8283333333333334, 0.8331666666666667, 0.8376666666666667, 0.8413333333333333, 0.8418333333333333, 0.8411666666666667, 0.8404999999999999, 0.8391666666666666, 0.8373333333333333, 0.8353333333333332, 0.834, 0.8328333333333333, 0.8309999999999998, 0.8288333333333332, 0.8291666666666666, 0.8294999999999998, 0.829, 0.8286666666666666, 0.8284999999999998, 0.8268333333333333, 0.8263333333333331, 0.8259999999999998, 0.8258333333333333, 0.8264999999999999, 0.8275, 0.8283333333333334, 0.8301666666666666, 0.8318333333333332, 0.834, 0.837, 0.8390000000000001, 0.8398333333333333, 0.8408333333333333, 0.8428333333333333, 0.8448333333333332, 0.8474999999999999, 0.8494999999999999, 0.8515, 0.8524999999999998, 0.8535, 0.8546666666666667, 0.857, 0.8593333333333334, 0.86, 0.8608333333333332, 0.8611666666666666, 0.8619999999999999, 0.8636666666666667, 0.8656666666666666, 0.8676666666666666, 0.8695, 0.8713333333333333, 0.8728333333333333, 0.875, 0.8764999999999998, 0.8781666666666667, 0.8793333333333333, 0.8796666666666667, 0.8798333333333332, 0.8806666666666667, 0.8815, 0.876, 0.8498333333333333, 0.8234999999999999, 0.8025, 0.7836666666666666, 0.7658333333333334, 0.7488333333333332, 0.7326666666666666, 0.7166666666666666, 0.7014999999999999, 0.6936666666666665, 0.7074999999999999, 0.7221666666666666, 0.7326666666666666, 0.7416666666666666, 0.7505, 0.7593333333333334, 0.7676666666666666, 0.7758333333333334, 0.784, 0.7916666666666667, 0.7993333333333333, 0.8063333333333332, 0.8123333333333334, 0.8173333333333334, 0.8223333333333332, 0.8261666666666667, 0.8298333333333334, 0.8324999999999999, 0.8343333333333331, 0.8363333333333334, 0.8385, 0.8399999999999999, 0.8393333333333333, 0.8391666666666666, 0.8383333333333333, 0.8383333333333333, 0.8376666666666666, 0.8343333333333334, 0.8321666666666665, 0.8306666666666664, 0.8283333333333331, 0.8261666666666667, 0.8248333333333333, 0.8240000000000001, 0.8226666666666667, 0.8210000000000001, 0.8198333333333332, 0.8210000000000001, 0.8206666666666667, 0.8188333333333334, 0.8166666666666668, 0.8136666666666666, 0.8121666666666666, 0.8105, 0.8088333333333333, 0.8073333333333332, 0.8066666666666666, 0.807, 0.8089999999999999, 0.8041666666666666, 0.7816666666666666, 0.7608333333333334, 0.7461666666666666, 0.7338333333333333, 0.7231666666666665, 0.7135, 0.7041666666666666, 0.6956666666666667, 0.6871666666666666, 0.687, 0.7061666666666666, 0.7258333333333333, 0.7390000000000001, 0.7488333333333335, 0.757, 0.7638333333333334, 0.7696666666666667, 0.7753333333333334, 0.78, 0.7835000000000001, 0.7858333333333334, 0.7873333333333334, 0.7885, 0.79, 0.7923333333333333, 0.7938333333333333, 0.7946666666666666, 0.7936666666666665, 0.7929999999999999, 0.7925, 0.7918333333333333, 0.7901666666666667, 0.7885, 0.787, 0.785, 0.784, 0.784, 0.785, 0.7853333333333334, 0.7853333333333334, 0.7861666666666668, 0.7878333333333334, 0.7898333333333334, 0.7923333333333333, 0.7953333333333333, 0.7975, 0.7978333333333334, 0.7988333333333333, 0.8, 0.8019999999999999, 0.8031666666666666, 0.805, 0.807, 0.8085000000000001, 0.8095000000000001, 0.8116666666666668, 0.8151666666666667, 0.8171666666666667, 0.818, 0.8171666666666667, 0.8163333333333332, 0.8141666666666666, 0.8089999999999999, 0.8038333333333332, 0.7988333333333333, 0.7933333333333332, 0.7881666666666666, 0.7801666666666666, 0.7736666666666666, 0.7685, 0.7638333333333333, 0.7601666666666665, 0.7588333333333332, 0.7568333333333334, 0.7548333333333332, 0.7528333333333334, 0.7503333333333333, 0.7513333333333333, 0.7526666666666666, 0.7536666666666665, 0.7516666666666667, 0.7488333333333332, 0.7473333333333333, 0.7464999999999999, 0.7456666666666666, 0.7456666666666666, 0.7453333333333333, 0.7441666666666666, 0.7421666666666666, 0.7391666666666666, 0.7396666666666667, 0.7374999999999999, 0.7338333333333333, 0.7304999999999999, 0.7263333333333333, 0.7216666666666666, 0.716, 0.7111666666666666, 0.7063333333333334, 0.7033333333333334, 0.6993333333333333, 0.6983333333333333, 0.6978333333333333, 0.6971666666666667, 0.6971666666666667, 0.696, 0.6955, 0.6935, 0.6915, 0.6891666666666667, 0.6871666666666666, 0.6851666666666667, 0.683, 0.679, 0.6759999999999999, 0.6738333333333333, 0.6726666666666666, 0.6738333333333333, 0.6766666666666667, 0.6806666666666665, 0.6843333333333332, 0.6858333333333333, 0.6861666666666666, 0.6869999999999999, 0.6869999999999999, 0.6865, 0.6861666666666666, 0.6849999999999999, 0.6811666666666667, 0.6744999999999999, 0.668, 0.6645, 0.6625, 0.6616666666666666, 0.6609999999999999, 0.6615, 0.6615, 0.6615, 0.6623333333333333, 0.6556666666666666, 0.6475, 0.6385, 0.6288333333333334, 0.6198333333333332, 0.6104999999999999, 0.5963333333333333, 0.5820000000000001, 0.569, 0.5569999999999999, 0.5541666666666666, 0.5533333333333333, 0.5533333333333333, 0.5548333333333333, 0.5559999999999999, 0.558, 0.5646666666666667, 0.572, 0.579, 0.5855, 0.592, 0.5983333333333333, 0.6049999999999999, 0.6111666666666666, 0.617, 0.621, 0.6224999999999999, 0.6234999999999999, 0.6234999999999999, 0.6231666666666666, 0.6218333333333332, 0.6184999999999999, 0.6138333333333332, 0.6051666666666666, 0.591, 0.577, 0.5646666666666667, 0.5516666666666665, 0.5386666666666666, 0.5269999999999999, 0.5163333333333333, 0.5071666666666667, 0.49866666666666665, 0.49433333333333324, 0.49549999999999994, 0.49483333333333324, 0.49083333333333334, 0.48683333333333323, 0.48316666666666663, 0.47816666666666663, 0.4715, 0.45983333333333337, 0.4493333333333333, 0.43866666666666665, 0.41950000000000004, 0.4021666666666667, 0.38916666666666666, 0.37216666666666665, 0.3495, 0.32116666666666666, 0.2808333333333333, 0.2428333333333333, 0.2036666666666666, 0.16366666666666665, 0.13216666666666663, 0.10216666666666663, 0.07249999999999997, 0.05649999999999996, 0.05283333333333329, 0.053666666666666626, 0.06783333333333329, 0.08549999999999995, 0.10316666666666663, 0.12066666666666662, 0.13833333333333328, 0.1558333333333333, 0.17183333333333328, 0.17343333333333327, 0.1584333333333333, 0.14259999999999992, 0.12743333333333326, 0.11059999999999995, 0.09343333333333328, 0.07626666666666662, 0.058433333333333295, 0.0409333333333333, 0.024099999999999965, 0.013499999999999967, 0.012499999999999966, 0.013166666666666636, 0.011499999999999967, 0.0108333333333333, 0.0098333333333333, 0.009499999999999969, 0.009666666666666632, 0.008666666666666633, 0.007666666666666632, 0.005666666666666634, 0.006666666666666633, 0.015999999999999966, 0.026333333333333302, 0.026833333333333303, 0.0293333333333333, 0.04266666666666664, 0.05399999999999997, 0.06066666666666663, 0.061166666666666626, 0.061166666666666626, 0.058333333333333307, 0.0473333333333333, 0.036833333333333294, 0.037333333333333295, 0.04283333333333329, 0.0418333333333333, 0.030833333333333303, 0.02449999999999997, 0.02399999999999997, 0.024166666666666635, 0.02399999999999997, 0.02399999999999997, 0.0243333333333333, 0.022833333333333303, 0.014666666666666633, 0.0019999999999999697, 0.0013333333333333027, 0.0011666666666666362, 0.0016666666666666364, 0.011333333333333303, 0.025499999999999967, 0.026166666666666633, 0.025833333333333302, 0.030499999999999965, 0.04249999999999997, 0.05666666666666664, 0.06749999999999998, 0.07699999999999997, 0.07633333333333331, 0.06666666666666664, 0.0533333333333333, 0.05299999999999997, 0.053666666666666626, 0.049166666666666636, 0.0373333333333333, 0.02349999999999997, 0.012999999999999966, 0.0038333333333333006, 0.0038333333333333006, 0.0038333333333333006, 0.003499999999999967, 0.0031666666666666328, 0.0023333333333333, 0.0024999999999999667, 0.0021666666666666336, 0.0019999999999999666, 0.0016666666666666336, 0.0013333333333333003, 0.0013333333333333003, 0.0011666666666666336, 0.003666666666666633, 0.014499999999999968, 0.030333333333333302, 0.046499999999999965, 0.06066666666666663, 0.07766666666666663, 0.09399999999999997, 0.10649999999999997, 0.10766666666666665, 0.10816666666666663, 0.10549999999999997, 0.09483333333333331, 0.07899999999999999, 0.06366666666666664, 0.057833333333333306, 0.05199999999999998, 0.03599999999999997, 0.023666666666666634, 0.022833333333333303, 0.022333333333333302, 0.02199999999999997, 0.022166666666666633, 0.022166666666666633, 0.02099999999999997, 0.013666666666666636, 0.0024999999999999667, 0.0023333333333333, 0.0019999999999999666, 0.0028333333333333, 0.012999999999999966, 0.030833333333333303, 0.0493333333333333, 0.06916666666666663, 0.08999999999999997, 0.1108333333333333, 0.1333333333333333, 0.15099999999999997, 0.1628333333333333, 0.16699999999999998, 0.1608333333333333, 0.14366666666666664, 0.12483333333333331, 0.10566666666666664, 0.08549999999999998, 0.06416666666666664, 0.04216666666666664, 0.0243333333333333, 0.012833333333333299, 0.007999999999999967, 0.004833333333333301, 0.005166666666666634, 0.006999999999999967, 0.006999999999999967, 0.007166666666666634, 0.008666666666666635, 0.009999999999999967, 0.012999999999999966, 0.0148333333333333, 0.0148333333333333, 0.015166666666666634, 0.0173333333333333, 0.026499999999999968, 0.04266666666666664, 0.059833333333333294, 0.07599999999999996, 0.09249999999999996, 0.10633333333333331, 0.12033333333333329, 0.13699999999999996, 0.15316666666666662, 0.16716666666666663, 0.17416666666666664, 0.17533333333333329, 0.1763333333333333, 0.18099999999999997, 0.1858333333333333, 0.19116666666666665, 0.19599999999999995, 0.19983333333333328, 0.2043333333333333, 0.20916666666666664, 0.21399999999999997, 0.21899999999999994, 0.223, 0.22383333333333327, 0.22449999999999998, 0.22633333333333328, 0.22966666666666663, 0.23366666666666663, 0.23966666666666664, 0.24666666666666667, 0.25283333333333335, 0.2588333333333333, 0.2658333333333333, 0.2726666666666666, 0.2796666666666666, 0.2866666666666666, 0.294, 0.301, 0.3051666666666667, 0.309, 0.313, 0.317, 0.32083333333333336, 0.3243333333333333, 0.32733333333333337, 0.33099999999999996, 0.3343333333333333, 0.3373333333333333, 0.3413333333333333, 0.3446666666666666, 0.3486666666666666, 0.3526666666666667, 0.35583333333333333, 0.35983333333333334, 0.36383333333333334, 0.36716666666666664, 0.371, 0.37499999999999994, 0.3783333333333333, 0.38216666666666665, 0.3858333333333333, 0.38983333333333337, 0.3948333333333333, 0.3988333333333333, 0.4033333333333333, 0.4083333333333333, 0.4125, 0.4175, 0.4221666666666666, 0.42699999999999994, 0.43133333333333335, 0.43549999999999994, 0.43949999999999995, 0.44383333333333336, 0.44833333333333336, 0.45233333333333325, 0.4573333333333333, 0.46133333333333326, 0.4648333333333333, 0.46699999999999997, 0.4691666666666666, 0.47199999999999986, 0.4741666666666665, 0.4768333333333333, 0.47916666666666663, 0.4821666666666665, 0.4843333333333333, 0.48733333333333323, 0.4918333333333333, 0.49866666666666665, 0.5048333333333332, 0.5098333333333332, 0.5156666666666666, 0.5213333333333333, 0.5275, 0.5335, 0.5396666666666667, 0.5456666666666666, 0.5511666666666667, 0.5555000000000001, 0.5611666666666666, 0.5671666666666667, 0.5731666666666667, 0.579, 0.5844999999999999, 0.5894999999999999, 0.5951666666666666, 0.6004999999999999, 0.6060000000000001, 0.6108333333333333, 0.6158333333333333, 0.6208333333333333, 0.6256666666666666, 0.6301666666666667, 0.6345, 0.6395000000000001, 0.6438333333333334, 0.6485, 0.6528333333333334, 0.6578333333333333, 0.6626666666666667, 0.6685, 0.674, 0.6799999999999999, 0.6863333333333334, 0.6923333333333332, 0.6986666666666667, 0.7048333333333333, 0.7114999999999999, 0.7175, 0.7234999999999999, 0.7286666666666666, 0.7338333333333332, 0.7393333333333333, 0.7446666666666666, 0.7503333333333334, 0.7556666666666667, 0.7606666666666667, 0.7656666666666667, 0.7708333333333334, 0.776, 0.7809999999999999, 0.7858333333333333, 0.7903333333333333, 0.7945, 0.7985, 0.7995, 0.7985, 0.7901666666666667, 0.7763333333333333, 0.7625, 0.7496666666666667, 0.7403333333333333, 0.7295, 0.7156666666666667, 0.7018333333333333, 0.6910000000000001, 0.6823333333333333, 0.6806666666666666, 0.6841666666666666, 0.6876666666666666, 0.6896666666666667, 0.6871666666666666, 0.6848333333333333, 0.6843333333333333, 0.6833333333333333, 0.6824999999999999, 0.682, 0.6813333333333333, 0.6815, 0.6815, 0.682, 0.683, 0.686, 0.6908333333333333, 0.6958333333333333, 0.701, 0.7061666666666666, 0.7114999999999999, 0.7164999999999999, 0.7216666666666667, 0.7273333333333334, 0.7338333333333333, 0.738, 0.742, 0.7463333333333333, 0.7498333333333334, 0.7533333333333333, 0.7565, 0.7595, 0.7606666666666666, 0.7613333333333332, 0.7611666666666667, 0.7605, 0.7578333333333334, 0.7541666666666667, 0.7494523809523809, 0.7447857142857142, 0.7402857142857142, 0.7361190476190476, 0.7331190476190476, 0.7304523809523809, 0.7286190476190476, 0.7282857142857143, 0.7292857142857143, 0.7301190476190476, 0.7328333333333333, 0.7358333333333332, 0.7396666666666667, 0.7438333333333333, 0.7475, 0.7515000000000001, 0.7538333333333334, 0.7558333333333334, 0.7578333333333334, 0.7615, 0.7645, 0.7671666666666666, 0.7689999999999999, 0.77, 0.7713333333333334, 0.772, 0.7736666666666666, 0.7748333333333334, 0.7758333333333334, 0.7766666666666667, 0.7778333333333334, 0.7786666666666668, 0.7795000000000001, 0.7805000000000001, 0.781, 0.7815000000000001, 0.7826666666666667, 0.7851666666666667, 0.7868333333333334, 0.7863333333333333, 0.7826666666666667, 0.7781666666666667, 0.7743333333333334, 0.7713333333333334, 0.7676666666666667, 0.7676666666666667, 0.7683333333333333, 0.7688333333333334, 0.7696666666666667, 0.7708333333333334, 0.7753333333333334, 0.7803333333333333, 0.7845000000000001, 0.7876666666666667, 0.791, 0.7915, 0.7916666666666667, 0.7898333333333334, 0.7878333333333334, 0.7871666666666667, 0.7861666666666667, 0.7851666666666667, 0.7845, 0.7835000000000001, 0.7843333333333333, 0.7843333333333333, 0.7823333333333334, 0.7825000000000001, 0.7835000000000001, 0.7846666666666667, 0.7851666666666667, 0.7853333333333333, 0.7845, 0.7825, 0.7805000000000001, 0.7773333333333333, 0.7756666666666667, 0.7735, 0.7703333333333333, 0.7671666666666666, 0.7648333333333333, 0.7636666666666667, 0.7638333333333333, 0.7648333333333334, 0.7648333333333334, 0.7666666666666667, 0.7683333333333333, 0.77, 0.7726666666666667, 0.7743333333333334, 0.776, 0.7765000000000001, 0.7765000000000001, 0.7773333333333334, 0.778, 0.7773333333333334, 0.7768333333333335, 0.7771666666666668, 0.7781666666666667, 0.78, 0.7821666666666667, 0.7846666666666666, 0.7868333333333333, 0.788, 0.7886666666666666, 0.7901666666666667, 0.7913333333333333, 0.7918333333333333, 0.7913333333333333, 0.79, 0.788, 0.787, 0.786, 0.7866666666666667, 0.7878333333333334, 0.7893333333333334, 0.7911666666666667, 0.7931666666666668, 0.7936666666666667, 0.7946666666666667, 0.7953333333333333, 0.7951666666666667, 0.7951666666666667, 0.7943333333333334, 0.7939999999999999, 0.7933333333333333, 0.7926666666666666, 0.7911666666666667, 0.7915, 0.7921666666666667, 0.7929999999999999, 0.7939999999999999, 0.792, 0.7818333333333334, 0.7701666666666667, 0.7593333333333334, 0.7515, 0.7466666666666667, 0.743, 0.7405, 0.738, 0.734, 0.734, 0.7408333333333333, 0.7501666666666665, 0.7596666666666666, 0.7666666666666666, 0.7705, 0.7733333333333332, 0.7748333333333333, 0.776, 0.7778333333333334, 0.7788333333333334, 0.7813333333333334, 0.7835000000000001, 0.7846666666666667, 0.7855000000000001, 0.7871666666666667, 0.7886666666666666, 0.79, 0.7905, 0.7905, 0.7913333333333334, 0.792, 0.7935, 0.794, 0.7933333333333333, 0.7921666666666667, 0.788, 0.7836666666666667, 0.7805000000000001, 0.7781666666666667, 0.7746666666666666, 0.7708333333333334, 0.7648333333333334, 0.76, 0.756, 0.7533333333333333, 0.7538333333333334, 0.7541666666666667, 0.7533333333333333, 0.753, 0.7541666666666667, 0.7566666666666666, 0.7606666666666666, 0.7645, 0.7676666666666667, 0.7695000000000001, 0.7711666666666667, 0.7728333333333335, 0.7751666666666667, 0.7776666666666667, 0.7783333333333334, 0.7778333333333334, 0.7751666666666667, 0.7718333333333333, 0.7678333333333334, 0.7628333333333333, 0.7576666666666666, 0.7529999999999999, 0.75, 0.7464999999999999, 0.7435, 0.741, 0.7401666666666668, 0.7406666666666666, 0.7423333333333333, 0.7443333333333333, 0.7474999999999999, 0.7498333333333334, 0.7511666666666665, 0.7521666666666667, 0.7538333333333334, 0.7553333333333333, 0.7558333333333332, 0.756, 0.7566666666666666, 0.7566666666666666, 0.7565, 0.7571666666666667, 0.7581666666666667, 0.7596666666666667, 0.7598333333333332, 0.7618333333333334, 0.7651666666666667, 0.7673333333333334, 0.769, 0.7715, 0.7716666666666666, 0.7716666666666667, 0.7707619047619046, 0.7692619047619047, 0.7692619047619047, 0.7674285714285715, 0.7649285714285714, 0.7634285714285713, 0.7619285714285714, 0.7599285714285714, 0.757595238095238, 0.7549285714285714, 0.7539999999999999, 0.7539999999999999, 0.7529999999999999, 0.7508333333333332, 0.7478333333333333, 0.7443333333333333, 0.741, 0.7386666666666667, 0.739, 0.7403333333333333, 0.7398333333333333, 0.7396666666666667, 0.7411666666666666, 0.7446666666666667, 0.7493333333333334, 0.7551666666666667, 0.7605000000000001, 0.7643333333333334, 0.767, 0.7686666666666666, 0.7716666666666667, 0.7738333333333334, 0.7735000000000001, 0.7721666666666667, 0.7711666666666666, 0.7693333333333333, 0.7685000000000001, 0.7695000000000001, 0.7708333333333334, 0.773, 0.7741666666666667, 0.7758333333333335, 0.7765000000000001, 0.7780000000000001, 0.7790000000000001, 0.78, 0.7808333333333334, 0.781, 0.782, 0.7826666666666666, 0.7835000000000001, 0.7841666666666667, 0.7863333333333334, 0.7876666666666667, 0.7899166666666667, 0.7927500000000001, 0.7949166666666667, 0.7975833333333334, 0.7977500000000001, 0.79825, 0.79925, 0.8002499999999999, 0.8015833333333333, 0.8035833333333333, 0.8038333333333334, 0.8038333333333334, 0.8045, 0.8043333333333333, 0.8065, 0.8088333333333333, 0.8101666666666667, 0.8106666666666668, 0.8111666666666666, 0.8109999999999999, 0.8094999999999999, 0.8081666666666667, 0.8065, 0.8058333333333334, 0.8046666666666666, 0.8028333333333333, 0.8011666666666667, 0.8003333333333332, 0.7978333333333334, 0.7948333333333333, 0.7938333333333334, 0.7913333333333334, 0.7878333333333333, 0.7846666666666666, 0.7815, 0.7791666666666666, 0.7783333333333333, 0.7783333333333333, 0.78, 0.7818333333333334, 0.7828333333333333, 0.784, 0.7865, 0.7885, 0.7891666666666668, 0.7901666666666667, 0.7913333333333333, 0.7911666666666667, 0.7908333333333334, 0.7906666666666667, 0.7903333333333333, 0.7903333333333334, 0.7895000000000001, 0.7895000000000001, 0.7876666666666667, 0.785, 0.7811666666666668, 0.7776666666666666, 0.7743333333333333, 0.7723333333333333, 0.7735, 0.775, 0.7765, 0.7765, 0.7745, 0.7628333333333333, 0.7525, 0.7430000000000001, 0.7348333333333333, 0.7264999999999999, 0.7168333333333333, 0.708, 0.7003333333333333, 0.6956666666666667, 0.6986666666666667, 0.7121666666666667, 0.7244999999999999, 0.7366666666666666, 0.7476666666666667, 0.7575000000000001, 0.7666666666666666, 0.7748333333333333, 0.782, 0.7869999999999999, 0.7891666666666667, 0.7901666666666667, 0.7911666666666666, 0.7921666666666666, 0.7925, 0.7925, 0.7923333333333333, 0.7925, 0.7915, 0.7885, 0.7845000000000001, 0.7815000000000001, 0.7783333333333333, 0.7751666666666666, 0.7726666666666666, 0.7716666666666667, 0.7705, 0.7685000000000001, 0.7681666666666667, 0.7691666666666667, 0.7711666666666667, 0.7725000000000001, 0.7738333333333334, 0.775, 0.7746666666666668, 0.7738333333333334, 0.7731666666666668, 0.7726666666666666, 0.7708333333333333, 0.77, 0.7688333333333333, 0.7675, 0.7671666666666666, 0.7666666666666666, 0.7663333333333332, 0.7676666666666666, 0.7691666666666667, 0.7713333333333333, 0.7726666666666666, 0.774, 0.7753333333333333, 0.7755833333333333, 0.7755833333333334, 0.7750833333333333, 0.7757500000000002, 0.7742500000000001, 0.7719166666666666, 0.76925, 0.76875, 0.7674166666666666, 0.7655833333333334, 0.7643333333333333, 0.7631666666666665, 0.7618333333333334, 0.7608333333333333, 0.759, 0.7585, 0.759, 0.7589999999999999, 0.7596666666666666, 0.7613333333333332, 0.7628333333333333, 0.7639999999999999, 0.7653333333333333, 0.7671666666666666, 0.768, 0.768, 0.7678333333333333, 0.7665, 0.7658333333333334, 0.7658333333333334, 0.7671666666666667, 0.7681666666666667, 0.7696666666666666, 0.7711666666666667, 0.7751666666666667, 0.779, 0.7826666666666667, 0.788, 0.7895000000000001, 0.7905, 0.7911666666666667, 0.7921666666666667, 0.7935, 0.7946666666666667, 0.7956666666666667, 0.7968333333333334, 0.7975000000000001, 0.7976666666666667, 0.8010000000000002, 0.8038333333333334, 0.8063333333333335, 0.8086666666666668, 0.8106666666666668, 0.8116666666666668, 0.8125000000000002, 0.8135, 0.8145, 0.8155000000000001, 0.8156666666666667, 0.8135, 0.8113333333333334, 0.8081666666666667, 0.8053333333333332, 0.8036666666666668, 0.8016666666666667, 0.799, 0.796, 0.7931666666666667, 0.7901666666666667, 0.7915, 0.7926666666666666, 0.7949999999999999, 0.7966666666666666, 0.7969999999999999, 0.7981666666666667, 0.7986666666666666, 0.7996666666666666, 0.8001666666666667, 0.8015000000000001, 0.8015000000000001, 0.8016666666666667, 0.8006666666666667, 0.8, 0.7979999999999999, 0.7939999999999999, 0.7911666666666666, 0.788, 0.7815, 0.772, 0.7643333333333333, 0.7578333333333334, 0.7531666666666667, 0.7493333333333333, 0.7481666666666668, 0.7478333333333333, 0.7468333333333333, 0.7461666666666666, 0.7495, 0.7566666666666666, 0.7615000000000001, 0.7655000000000001, 0.7685000000000001, 0.7703333333333334, 0.7713333333333333, 0.7716666666666667, 0.7735000000000001, 0.7751666666666667, 0.7751666666666668, 0.7753333333333334, 0.7768333333333335, 0.7778333333333334, 0.7795, 0.7808333333333334, 0.7828333333333333, 0.785, 0.7875, 0.7899999999999999, 0.7936666666666666, 0.7953333333333333, 0.7935, 0.7908333333333333, 0.7869999999999999, 0.7831666666666666, 0.7791666666666666, 0.776, 0.7708666666666667, 0.7655333333333333, 0.7602, 0.7573666666666666, 0.7588666666666667, 0.7608666666666666, 0.7633666666666667, 0.7658666666666667, 0.7680333333333333, 0.7700333333333333, 0.7731666666666668, 0.7768333333333335, 0.7805000000000002, 0.7831666666666667, 0.7846666666666667, 0.7866666666666667, 0.7885, 0.7893333333333333, 0.7896666666666667, 0.7893333333333333, 0.7893333333333333, 0.7893333333333333, 0.788, 0.7849999999999999, 0.7815000000000001, 0.7775000000000001, 0.7741666666666667, 0.7726666666666666, 0.7721666666666667, 0.7725000000000001, 0.7713333333333333, 0.7706666666666667, 0.7721666666666667, 0.7748333333333334, 0.777, 0.7790000000000001, 0.78, 0.78, 0.7793333333333334, 0.7785, 0.7783333333333334, 0.7783333333333334, 0.776, 0.7743333333333334, 0.7723333333333333, 0.771, 0.7703333333333334, 0.77, 0.7696666666666667, 0.7706666666666666, 0.7723333333333333, 0.7736666666666666, 0.7763333333333333, 0.7783333333333333, 0.7785, 0.7786666666666666, 0.7786666666666666, 0.7788333333333333, 0.7791666666666666, 0.7785, 0.7775000000000001, 0.7753333333333334, 0.7743333333333332, 0.7723333333333333, 0.7723333333333333, 0.7718333333333334, 0.7711666666666666, 0.771, 0.7715, 0.7716666666666667, 0.7701666666666667, 0.7699999999999999, 0.7696666666666666, 0.7708333333333334, 0.7726666666666666, 0.7753333333333334, 0.7780000000000001, 0.7798333333333334, 0.7806666666666666, 0.7818333333333334, 0.7848333333333335, 0.7868333333333334, 0.7876666666666667, 0.7878333333333334, 0.7876666666666667, 0.7876666666666667, 0.7885, 0.7896666666666666, 0.792, 0.7941666666666667, 0.7955, 0.7971666666666667, 0.799, 0.8016666666666667, 0.8041666666666668, 0.8061666666666667, 0.8076666666666666, 0.8091666666666667, 0.8095000000000001, 0.8096666666666668, 0.8105, 0.8113333333333335, 0.8130000000000001, 0.8136666666666666, 0.8148333333333333, 0.8160000000000001, 0.817, 0.8178333333333333, 0.8188333333333333, 0.8196666666666668, 0.8208333333333334, 0.8228333333333333, 0.8241666666666667, 0.825, 0.8230000000000001, 0.8208333333333332, 0.8166666666666667, 0.8133333333333332, 0.8109999999999999, 0.8098333333333333, 0.8083333333333333, 0.8063333333333332, 0.8038333333333332, 0.8008333333333333, 0.8014999999999999, 0.8025, 0.8061666666666667, 0.8093333333333333, 0.8123333333333334, 0.8145, 0.8164999999999999, 0.818, 0.8201666666666666, 0.8240000000000001, 0.827, 0.8295, 0.8306666666666664, 0.8316666666666667, 0.8299999999999998, 0.827, 0.8234999999999999, 0.8198333333333332, 0.8153333333333335, 0.8086666666666666, 0.8009999999999999, 0.7918333333333333, 0.7843333333333333, 0.7778333333333333, 0.7745, 0.7718333333333333, 0.7698333333333334, 0.768, 0.767, 0.7681666666666667, 0.7705, 0.7755, 0.7795, 0.7825, 0.7835, 0.7851666666666668, 0.7863333333333333, 0.7871666666666667, 0.7876666666666667, 0.7878333333333333, 0.787, 0.7845000000000001, 0.7818333333333334, 0.7791666666666667, 0.7766666666666666, 0.776, 0.7788333333333333, 0.7825, 0.7865, 0.7899999999999999, 0.7831666666666666, 0.7701666666666667, 0.7588333333333334, 0.7488333333333334, 0.7406666666666666, 0.732, 0.7211666666666666, 0.711, 0.7023333333333334, 0.6973333333333332, 0.706, 0.7218333333333333, 0.7363333333333333, 0.7493333333333333, 0.7611666666666667, 0.7716666666666667, 0.7809999999999999, 0.7888333333333334, 0.7946666666666666, 0.7971666666666666, 0.7968333333333333, 0.7966666666666666, 0.7966666666666666, 0.7965, 0.7965, 0.7973333333333332, 0.7988333333333333, 0.8011666666666667, 0.8030000000000002, 0.8036666666666665, 0.8046666666666666, 0.8056666666666666, 0.8066666666666666, 0.8078333333333333, 0.8086666666666668, 0.8086666666666666, 0.8078333333333333, 0.8066666666666666, 0.8066666666666666, 0.8080000000000002, 0.8090000000000002, 0.8098333333333334, 0.8108333333333334, 0.8118333333333334, 0.8128333333333334, 0.8145000000000001, 0.8166666666666668, 0.8185, 0.8203333333333334, 0.8223333333333332, 0.8240000000000001, 0.826, 0.8278333333333332, 0.8298333333333334, 0.8318333333333333, 0.8326666666666667, 0.8328333333333333, 0.8331666666666667, 0.8326666666666667, 0.8323333333333333, 0.8300000000000001, 0.8271666666666666, 0.8234999999999999, 0.8195, 0.8163333333333332, 0.8145, 0.8138333333333332, 0.8128333333333334, 0.8135, 0.8111666666666666, 0.8048333333333334, 0.7993333333333333, 0.7949999999999999, 0.7918333333333333, 0.7886666666666666, 0.7849999999999999, 0.7806666666666666, 0.7763333333333333, 0.7711666666666667, 0.7685000000000001, 0.7725000000000001, 0.7765000000000001, 0.7801666666666668, 0.7831666666666667, 0.7881666666666666, 0.7921666666666667, 0.7961666666666667, 0.8009999999999999, 0.8051666666666666, 0.8084999999999999, 0.8113333333333334, 0.8133333333333332, 0.8148333333333333, 0.8161666666666667, 0.8150000000000001, 0.8151666666666667, 0.8151666666666667, 0.8146666666666667, 0.8146666666666667, 0.8163333333333334, 0.8185, 0.8205, 0.8233333333333333, 0.8261666666666667, 0.8286666666666666, 0.8305, 0.8324999999999999, 0.8343333333333331, 0.836, 0.8381666666666666, 0.8399999999999999, 0.8414999999999999, 0.8426666666666666, 0.844, 0.8456666666666667, 0.8465, 0.8471666666666666, 0.8483333333333333, 0.8501666666666667, 0.8508333333333333, 0.8501666666666665, 0.8501666666666667, 0.85, 0.8503333333333334, 0.851, 0.8531666666666666, 0.8556666666666667, 0.8585238095238095, 0.8596904761904762, 0.8598571428571429, 0.8590238095238096, 0.8576904761904762, 0.8561904761904762, 0.8541904761904762, 0.8513571428571428, 0.8483571428571428, 0.8450238095238095, 0.8411666666666667, 0.8378333333333332, 0.836, 0.8366666666666666, 0.8363333333333334, 0.8364999999999998, 0.8368333333333332, 0.8371666666666666, 0.8368333333333332, 0.837, 0.8356666666666666, 0.8361666666666666, 0.8358333333333332, 0.8344999999999999, 0.8343333333333331, 0.8338333333333333, 0.833, 0.833, 0.8338333333333333, 0.8336666666666666, 0.8348333333333333, 0.8343333333333334, 0.8338333333333333, 0.8341666666666665, 0.834, 0.8331666666666667, 0.8318333333333333, 0.8291666666666666, 0.8243333333333334, 0.8191666666666666, 0.8141666666666667, 0.8085000000000001, 0.8009999999999999, 0.7941666666666667, 0.7885, 0.7841666666666667, 0.7803333333333333, 0.7778333333333334, 0.7771666666666668, 0.7783333333333333, 0.7805000000000001, 0.7828333333333333, 0.7875, 0.7886666666666666, 0.7888333333333334, 0.7881666666666667, 0.7868333333333334, 0.7851666666666667, 0.7835, 0.7803333333333333, 0.7763333333333333, 0.7726666666666666, 0.7698333333333334, 0.771, 0.772, 0.7723333333333333, 0.7726666666666666, 0.7731666666666668, 0.7738333333333334, 0.7755000000000001, 0.777, 0.7796666666666667, 0.7803333333333333, 0.7796666666666666, 0.7788333333333333, 0.7783333333333333, 0.7788333333333334, 0.7769333333333333, 0.7739333333333333, 0.7704333333333333, 0.7669333333333334, 0.7636, 0.7615999999999999, 0.7577666666666666, 0.7545999999999999, 0.7517666666666667, 0.7492666666666666, 0.7486666666666666, 0.7493333333333332, 0.7505, 0.7516666666666666, 0.7529999999999999, 0.7539999999999999, 0.7554999999999998, 0.7575, 0.7594999999999998, 0.7615, 0.764, 0.7663333333333332, 0.7686666666666666, 0.7715, 0.7735000000000001, 0.7755, 0.7783333333333333, 0.7803333333333333, 0.7811666666666667, 0.7781666666666667, 0.7755, 0.7725, 0.7676666666666667, 0.7606666666666666, 0.7546666666666667, 0.7493333333333333, 0.745, 0.741, 0.7376666666666667, 0.7376666666666667, 0.7366666666666666, 0.7338333333333333, 0.7313333333333334, 0.7306666666666667, 0.7281666666666666, 0.7255, 0.7224999999999999, 0.7203333333333333, 0.7186666666666666, 0.7166666666666666, 0.7104999999999999, 0.7063333333333333, 0.7033333333333334, 0.7008333333333333, 0.6993333333333333, 0.6976666666666665, 0.6975, 0.696, 0.693, 0.6900000000000001, 0.6923333333333334, 0.6935, 0.6941666666666666, 0.6938333333333333, 0.6928333333333334, 0.6914999999999999, 0.6846666666666666, 0.6783333333333333, 0.6733333333333333, 0.6683333333333333, 0.6631666666666667, 0.659, 0.6553333333333333, 0.6525, 0.6505, 0.6478333333333333, 0.6493333333333333, 0.6513333333333333, 0.6533333333333333, 0.6555, 0.6576666666666667, 0.6596666666666667, 0.6616666666666667, 0.6623333333333333, 0.6613333333333333, 0.6611666666666667, 0.6616666666666667, 0.6623333333333334, 0.6653333333333333, 0.6683333333333332, 0.6713333333333333, 0.6743333333333333, 0.6761666666666667, 0.6785, 0.6823333333333333, 0.6851666666666667, 0.6871666666666667, 0.6885, 0.6880000000000001, 0.6878333333333333, 0.6848333333333333, 0.6821666666666667, 0.6788333333333333, 0.6761666666666667, 0.6733333333333333, 0.6713333333333333, 0.6693333333333333, 0.6681666666666667, 0.6666666666666666, 0.6646666666666666, 0.6646666666666667, 0.6643333333333333, 0.6663333333333333, 0.6665000000000001, 0.6655, 0.6646666666666667, 0.6648333333333334, 0.6645000000000001, 0.6575, 0.6515, 0.6463333333333334, 0.6421666666666667, 0.6381666666666667, 0.6353333333333333, 0.6306666666666667, 0.6265, 0.6221666666666666, 0.618, 0.621, 0.6236666666666666, 0.6256666666666667, 0.6265000000000001, 0.6271666666666667, 0.6288333333333334, 0.6335, 0.6358333333333333, 0.6375, 0.6355000000000001, 0.6276666666666667, 0.6168333333333333, 0.5943333333333334, 0.5723333333333334, 0.5468333333333334, 0.5198333333333334, 0.49283333333333335, 0.4676666666666667, 0.42333333333333334, 0.3848333333333333, 0.35816666666666663, 0.33099999999999996, 0.3053333333333333, 0.2756666666666666, 0.25449999999999995, 0.2345, 0.21183333333333332, 0.18016666666666664, 0.16416666666666663, 0.14249999999999996, 0.11449999999999998, 0.08983333333333329, 0.07549999999999997, 0.0728333333333333, 0.07116666666666664, 0.054833333333333324, 0.04099999999999999, 0.035833333333333314, 0.03299999999999999, 0.03316666666666665, 0.03316666666666664, 0.03283333333333332, 0.03199999999999999, 0.02416666666666665, 0.00983333333333331, 0.008999999999999979, 0.007833333333333312, 0.007166666666666645, 0.017833333333333312, 0.03049999999999997, 0.03683333333333331, 0.03666666666666664, 0.041666666666666644, 0.05399999999999997, 0.05983333333333331, 0.05999999999999998, 0.059833333333333315, 0.059166666666666645, 0.04833333333333331, 0.035333333333333314, 0.028999999999999977, 0.02949999999999997, 0.02433333333333331, 0.010499999999999976, 0.0033333333333333084, 0.002499999999999975, 0.0023333333333333084, 0.0023333333333333084, 0.002166666666666642, 0.002166666666666642, 0.0018333333333333084, 0.0011666666666666418, 0.0011666666666666418, 0.0016666666666666416, 0.0018333333333333084, 0.0018333333333333084, 0.0026666666666666414, 0.002999999999999975, 0.002999999999999975, 0.0061666666666666415, 0.017999999999999978, 0.020999999999999977, 0.020999999999999974, 0.023999999999999976, 0.03749999999999998, 0.04716666666666665, 0.04633333333333331, 0.04616666666666665, 0.04616666666666665, 0.043333333333333314, 0.03166666666666664, 0.028333333333333304, 0.029333333333333312, 0.034999999999999976, 0.036666666666666646, 0.043666666666666645, 0.06066666666666665, 0.07683333333333331, 0.08399999999999999, 0.09, 0.0915, 0.0915, 0.09049999999999998, 0.08133333333333331, 0.06616666666666665, 0.04933333333333332, 0.03233333333333331, 0.016666666666666646, 0.02049999999999998, 0.025499999999999978, 0.02366666666666665, 0.02366666666666665, 0.028333333333333315, 0.04233333333333331, 0.05633333333333331, 0.06116666666666665, 0.06116666666666665, 0.06049999999999998, 0.04966666666666665, 0.038333333333333316, 0.038499999999999986, 0.03933333333333332, 0.03483333333333331, 0.021833333333333312, 0.007666666666666645, 0.0028333333333333114, 0.0028333333333333114, 0.003499999999999978, 0.0036666666666666453, 0.0036666666666666453, 0.0036666666666666445, 0.003166666666666645, 0.002999999999999978, 0.002999999999999978, 0.0038333333333333114, 0.0038333333333333114, 0.004166666666666645, 0.0036666666666666445, 0.0043333333333333115, 0.007666666666666644, 0.019166666666666648, 0.03233333333333331, 0.03933333333333332, 0.04183333333333331, 0.054333333333333324, 0.05766666666666666, 0.059166666666666645, 0.05899999999999998, 0.05799999999999998, 0.054666666666666655, 0.04299999999999998, 0.02949999999999998, 0.023333333333333317, 0.028666666666666653, 0.026499999999999985, 0.026999999999999986, 0.02516666666666665, 0.02516666666666665, 0.02566666666666665, 0.02566666666666665, 0.02566666666666665, 0.02566666666666665, 0.02449999999999998, 0.01649999999999998, 0.0051666666666666475, 0.0013333333333333138, 0.001833333333333314, 0.0028333333333333136, 0.01416666666666665, 0.03166666666666664, 0.050333333333333306, 0.06999999999999998, 0.09066666666666665, 0.11133333333333331, 0.13366666666666666, 0.15566666666666665, 0.17616666666666664, 0.18916666666666665, 0.18866666666666665, 0.18149999999999997, 0.16749999999999995, 0.14816666666666664, 0.12783333333333333, 0.10733333333333332, 0.08549999999999998, 0.06349999999999997, 0.042666666666666644, 0.028999999999999977, 0.018666666666666644, 0.009999999999999978, 0.007166666666666645, 0.008833333333333313, 0.011499999999999979, 0.013333333333333312, 0.014833333333333313, 0.017833333333333312, 0.020833333333333308, 0.024499999999999977, 0.02749999999999998, 0.03266666666666664, 0.04616666666666665, 0.05949999999999998, 0.07149999999999998, 0.08499999999999999, 0.1008333333333333, 0.11483333333333332, 0.12766666666666665, 0.1363333333333333, 0.1343333333333333, 0.1283333333333333, 0.11399999999999996, 0.09949999999999998, 0.08666666666666664, 0.08016666666666664, 0.07899999999999999, 0.07999999999999999, 0.0823333333333333, 0.08849999999999997, 0.10499999999999995, 0.12316666666666662, 0.14149999999999996, 0.16066666666666662, 0.17916666666666664, 0.1903333333333333, 0.19566666666666663, 0.19999999999999998, 0.20466666666666664, 0.20916666666666664, 0.21516666666666664, 0.22116666666666665, 0.22783333333333333, 0.23383333333333328, 0.24016666666666664, 0.24616666666666664, 0.25166666666666665, 0.2573333333333333, 0.2623333333333333, 0.26799999999999996, 0.27249999999999996, 0.27699999999999997, 0.281, 0.285, 0.28883333333333333, 0.2928333333333333, 0.29683333333333334, 0.30083333333333334, 0.3048333333333333, 0.3088333333333333, 0.3128333333333333, 0.3173333333333333, 0.3218333333333333, 0.3268333333333333, 0.331, 0.33599999999999997, 0.34049999999999997, 0.3446666666666667, 0.3496666666666667, 0.35366666666666663, 0.3583333333333333, 0.3623333333333333, 0.36649999999999994, 0.3711666666666666, 0.37616666666666665, 0.38049999999999995, 0.38549999999999995, 0.39049999999999996, 0.39549999999999996, 0.40049999999999997, 0.4058333333333334, 0.41083333333333333, 0.41566666666666663, 0.41999999999999993, 0.42416666666666664, 0.4288333333333333, 0.4333333333333333, 0.4378333333333333, 0.4408333333333333, 0.44433333333333336, 0.44733333333333336, 0.4506666666666666, 0.4541666666666667, 0.4573333333333333, 0.4611666666666666, 0.46416666666666667, 0.46733333333333327, 0.4706666666666667, 0.475, 0.4796666666666667, 0.48516666666666663, 0.49083333333333323, 0.4963333333333333, 0.5021666666666667, 0.5078333333333334, 0.5138333333333334, 0.52, 0.526, 0.5323333333333333, 0.5381666666666667, 0.543, 0.5479999999999999, 0.5531666666666667, 0.5581666666666667, 0.5631666666666666, 0.5681666666666666, 0.573, 0.5780000000000001, 0.5831666666666666, 0.5883333333333333, 0.594, 0.5994999999999999, 0.6048333333333333, 0.6103333333333333, 0.6156666666666666, 0.6209999999999999, 0.6263333333333333, 0.6313333333333333, 0.6365000000000001, 0.6415, 0.6471666666666667, 0.653, 0.6593333333333333, 0.6658333333333334, 0.6726666666666666, 0.6796666666666666, 0.6866666666666668, 0.6938333333333333, 0.7008333333333333, 0.7083333333333334, 0.7146666666666667, 0.721, 0.7266666666666667, 0.7321666666666666, 0.7373333333333333, 0.7424999999999999, 0.7474999999999999, 0.7529999999999999, 0.758, 0.763, 0.768, 0.7731666666666668, 0.7781666666666667, 0.7835, 0.7885000000000001, 0.7938333333333334, 0.7993333333333335, 0.8046666666666666, 0.8099999999999999, 0.8154999999999999, 0.8220000000000001, 0.8271666666666666, 0.8275, 0.8231666666666666, 0.8136666666666666, 0.7963333333333333, 0.7743333333333333, 0.7528333333333334, 0.7321666666666666, 0.7118333333333333, 0.6913333333333334, 0.6723333333333333, 0.6591666666666667, 0.651, 0.649, 0.6546666666666666, 0.6653333333333333, 0.6758333333333333, 0.6858333333333333, 0.6955, 0.704, 0.7116666666666667, 0.7184999999999999, 0.7248333333333333, 0.7298333333333333, 0.7351666666666665, 0.7404999999999999, 0.7448333333333333, 0.7481666666666666, 0.7515, 0.7553333333333333, 0.7596666666666666, 0.7616666666666666, 0.7616666666666665, 0.762, 0.762, 0.7601666666666667, 0.7581666666666667, 0.7571666666666668, 0.7561666666666667, 0.7551666666666667, 0.7541666666666667, 0.7556666666666667, 0.7586666666666666, 0.7616666666666666, 0.7616666666666667, 0.7605, 0.76, 0.758, 0.7558333333333334, 0.7528333333333334, 0.7496666666666666, 0.7451666666666668, 0.7406666666666666, 0.7364999999999999, 0.7343333333333333, 0.7338333333333333, 0.7329999999999999, 0.7333333333333333, 0.7333333333333333, 0.7338333333333333, 0.7331666666666666, 0.7341666666666666, 0.7355, 0.7368333333333333, 0.7373333333333333, 0.7373333333333333, 0.7378333333333333, 0.7374999999999999, 0.7373333333333333, 0.7375, 0.7391666666666666, 0.7401666666666666, 0.7415, 0.7424999999999999, 0.7446666666666666, 0.7484999999999999, 0.7501666666666666, 0.7521666666666667, 0.7545, 0.7565, 0.7583333333333333, 0.7596666666666666, 0.7598333333333331, 0.7598333333333332, 0.7594999999999998, 0.7586666666666665, 0.7583333333333333, 0.7583333333333333, 0.7578333333333332, 0.7578333333333332, 0.7581666666666667, 0.7598333333333332, 0.7618333333333333, 0.7638333333333333, 0.7661666666666667, 0.761, 0.7381666666666666, 0.7151666666666666, 0.6926666666666667, 0.67, 0.647, 0.6238333333333334, 0.6011666666666666, 0.5793333333333334, 0.561, 0.5548333333333334, 0.5688333333333333, 0.5835, 0.5976666666666667, 0.6121666666666666, 0.6263333333333333, 0.6405, 0.6541666666666666, 0.6675, 0.6779999999999999, 0.6843333333333332, 0.6896666666666667, 0.6950000000000001, 0.7008333333333332, 0.7066666666666667, 0.7118333333333333, 0.7163333333333333, 0.7211666666666666, 0.7244999999999999, 0.7263333333333333, 0.7273333333333333, 0.7271666666666665, 0.7271666666666665, 0.7268333333333332, 0.7261666666666666, 0.7254999999999999, 0.7244999999999999, 0.7233333333333333, 0.7234999999999999, 0.7249999999999999, 0.7268333333333332, 0.7296666666666666, 0.7326666666666666, 0.7351666666666666, 0.7375, 0.7408333333333333, 0.7443333333333333, 0.7474999999999999, 0.7496666666666666, 0.7513333333333334, 0.7535, 0.7555, 0.7575, 0.7596666666666666, 0.7623333333333332, 0.7645, 0.7651666666666667, 0.7653333333333332, 0.7668333333333333, 0.7688333333333333, 0.7695, 0.7691666666666667, 0.7676666666666666, 0.7665, 0.7644999999999998, 0.7623333333333332, 0.7615, 0.7613333333333332, 0.7596666666666666, 0.7569999999999999, 0.7553333333333333, 0.7548333333333332, 0.7541666666666667, 0.7531666666666667, 0.753, 0.7531666666666667, 0.7543333333333333, 0.7558333333333332, 0.7585, 0.762, 0.766, 0.7703333333333334, 0.7755, 0.7805, 0.7833333333333334, 0.7851666666666667, 0.7856666666666666, 0.7856666666666666, 0.785, 0.7815000000000001, 0.7771666666666667, 0.7725000000000001, 0.7683333333333333, 0.7643333333333333, 0.762, 0.7605, 0.759, 0.757, 0.7553333333333333, 0.7555, 0.7535000000000001, 0.7514999999999998, 0.7478333333333332, 0.7448333333333332, 0.7401666666666666, 0.735, 0.7278333333333332, 0.7198333333333333, 0.7110000000000001, 0.7028333333333333, 0.6963333333333332, 0.6891666666666667, 0.6828333333333333, 0.6755000000000001, 0.67, 0.6666666666666667, 0.6706666666666667, 0.6808333333333334, 0.6928333333333334, 0.7046666666666667, 0.7026666666666668, 0.6896666666666668, 0.6778333333333334, 0.6673333333333333, 0.6573333333333333, 0.6465, 0.632, 0.6138333333333333, 0.5961666666666667, 0.5813333333333333, 0.5826666666666667, 0.5966666666666667, 0.6103333333333334, 0.6238333333333334, 0.6373333333333333, 0.6508333333333333, 0.6641666666666667, 0.6775, 0.6901666666666666, 0.7009999999999998, 0.7106666666666666, 0.72, 0.7293333333333333, 0.7364999999999999, 0.7409999999999999, 0.7433333333333333, 0.7454999999999999, 0.7468333333333332, 0.7473333333333333, 0.7459999999999999, 0.744, 0.7423333333333334, 0.7401666666666666, 0.7393333333333334, 0.741, 0.744, 0.7463333333333333, 0.7483333333333333, 0.7511666666666665, 0.7554999999999998, 0.7603333333333333, 0.7641666666666667, 0.7668333333333333, 0.7693333333333333, 0.7715, 0.774, 0.7768333333333334, 0.7798333333333334, 0.7818333333333334, 0.7835, 0.7848333333333334, 0.7861666666666667, 0.7876666666666667, 0.7898333333333334, 0.7918333333333333, 0.7926666666666666, 0.7925, 0.791, 0.7903333333333334, 0.7893333333333333, 0.7863333333333333, 0.7828333333333333, 0.7798333333333333, 0.7768333333333333, 0.7731666666666666, 0.7705, 0.7686666666666666, 0.7678333333333333, 0.7665, 0.7648333333333333, 0.7634999999999998, 0.7626666666666666, 0.7606666666666666, 0.7573333333333333, 0.7536666666666666, 0.7489999999999999, 0.7434999999999999, 0.738, 0.7311666666666665, 0.7248333333333332, 0.7193333333333333, 0.7131666666666666, 0.7064999999999999, 0.7013333333333333, 0.702, 0.7076666666666667, 0.7145, 0.7221666666666666, 0.7301666666666666, 0.7356666666666667, 0.7401666666666668, 0.7461666666666666, 0.7541666666666667, 0.762, 0.7645000000000001, 0.7628333333333333, 0.7603333333333333, 0.757, 0.755, 0.755, 0.7569999999999999, 0.7581666666666667, 0.7588333333333332, 0.7596666666666666, 0.7608333333333333, 0.7615, 0.7625, 0.7641666666666665, 0.7658333333333333, 0.7673333333333333, 0.7691666666666667, 0.771, 0.773, 0.7736666666666666, 0.7746666666666667, 0.7765000000000001, 0.7786666666666667, 0.7798333333333334, 0.7811666666666667, 0.7835000000000001, 0.7836666666666667, 0.7833333333333334, 0.7833333333333334, 0.7846666666666666, 0.7856666666666667, 0.7861666666666667, 0.7866666666666667, 0.7878333333333334, 0.7886666666666667, 0.7891666666666667, 0.7908333333333333, 0.7928333333333334, 0.7946666666666666, 0.7951666666666667, 0.7961666666666667, 0.7978333333333334, 0.7993333333333335, 0.8016666666666667, 0.8038333333333334, 0.806, 0.8089999999999999, 0.8121666666666666, 0.8143333333333332, 0.8158333333333333, 0.8141666666666667, 0.8105, 0.8065, 0.8011666666666667, 0.7953333333333333, 0.7868333333333333, 0.7781666666666667, 0.7698333333333334, 0.7615000000000001, 0.7556666666666667, 0.7526666666666667, 0.7508333333333332, 0.7501666666666666, 0.7491666666666666, 0.7478333333333333, 0.75, 0.7528333333333334, 0.7551666666666665, 0.7576666666666665, 0.7596666666666666, 0.7603333333333333, 0.7613333333333332, 0.7621666666666667, 0.7648333333333334, 0.7681666666666666, 0.7706666666666667, 0.7718333333333333, 0.7729999999999999, 0.7738333333333334, 0.7735, 0.7725, 0.7711666666666667, 0.768, 0.7673333333333333, 0.7676666666666667, 0.7676666666666667, 0.7661666666666667, 0.764, 0.7619999999999999, 0.7593333333333333, 0.7571666666666667, 0.7561666666666667, 0.7568333333333334, 0.7528333333333332, 0.7488333333333334, 0.7441666666666666, 0.7391666666666666, 0.7346666666666666, 0.7316666666666666, 0.7303333333333333, 0.7304999999999999, 0.7296666666666666, 0.7288333333333332, 0.7293333333333333, 0.7288333333333333, 0.7296666666666666, 0.7331666666666666, 0.7371666666666667, 0.7403333333333333, 0.7438333333333332, 0.7486666666666666, 0.7418333333333333, 0.733, 0.7244999999999999, 0.7171666666666667, 0.7108333333333332, 0.7048333333333333, 0.6991666666666666, 0.6943333333333334, 0.6891666666666667, 0.6833333333333333, 0.6895, 0.6978333333333333, 0.7063333333333333, 0.7144999999999999, 0.7215, 0.728, 0.7343333333333333, 0.7406666666666666, 0.7466666666666666, 0.7521666666666667, 0.758, 0.7636666666666666, 0.7688333333333334, 0.7739999999999999, 0.78, 0.7855, 0.7913333333333333, 0.7966666666666666, 0.8019999999999999, 0.8074999999999999, 0.8118333333333334, 0.8148333333333333, 0.8166666666666667, 0.8168333333333333, 0.817, 0.8164999999999999, 0.8146666666666667, 0.8125, 0.8105, 0.8085000000000001, 0.8068333333333333, 0.8055, 0.8056666666666666, 0.8066666666666666, 0.8068333333333333, 0.8075000000000001, 0.8086666666666668, 0.8096666666666668, 0.8103333333333333, 0.8106666666666668, 0.8105, 0.8108333333333334, 0.8108333333333334, 0.8116666666666668, 0.8123333333333335, 0.8128333333333334, 0.8130000000000001, 0.8141666666666667, 0.8148333333333333, 0.8146666666666667, 0.8145000000000001, 0.8141666666666667, 0.8140000000000001, 0.8113333333333334, 0.8078333333333335, 0.8048333333333334, 0.8016666666666667, 0.7968333333333335, 0.7918333333333334, 0.7866666666666667, 0.7808333333333335, 0.7751666666666667, 0.7685000000000001, 0.763, 0.7573333333333333, 0.7501666666666666, 0.7434999999999999, 0.7373333333333333, 0.7313333333333334, 0.7253333333333334, 0.7208333333333333, 0.7168333333333333, 0.7138333333333333, 0.712, 0.7113333333333334, 0.7128333333333333, 0.7143333333333333, 0.7163333333333333, 0.7194999999999999, 0.7234999999999999, 0.7283333333333333, 0.7361666666666665, 0.7454047619047619, 0.7544047619047619, 0.7632380952380953, 0.7702380952380952, 0.7775714285714286, 0.7855714285714286, 0.7927380952380952, 0.7997380952380952, 0.8060714285714285, 0.8072380952380952, 0.8076666666666668, 0.8076666666666668, 0.8078333333333335, 0.8080000000000002, 0.8085000000000001, 0.8075000000000001, 0.8068333333333333, 0.8063333333333335, 0.806, 0.8061666666666667, 0.8053333333333332, 0.8043333333333333, 0.8018333333333333, 0.7998333333333333, 0.7979999999999999, 0.7979999999999999, 0.7978333333333334, 0.7976666666666666, 0.7973333333333332, 0.7981666666666667, 0.7993333333333333, 0.7996666666666667, 0.7995, 0.7993333333333335, 0.7981666666666667, 0.796, 0.7931666666666668, 0.7903333333333333, 0.7873333333333334, 0.7841666666666668, 0.7813333333333334, 0.78, 0.7801666666666667, 0.7816666666666666, 0.7825, 0.7823333333333334, 0.784, 0.7863333333333333, 0.7895000000000001, 0.7936666666666666, 0.7969999999999999, 0.7998333333333333, 0.8028333333333333, 0.8048333333333334, 0.8071666666666667, 0.8105, 0.8121666666666666, 0.8128333333333334, 0.8121666666666666, 0.8113333333333334, 0.8111666666666666, 0.8106666666666668, 0.8108333333333334, 0.8105, 0.8100000000000002, 0.8099999999999999, 0.8103333333333333, 0.8096666666666668, 0.8098333333333334, 0.8081666666666667, 0.8063333333333335, 0.805, 0.8031666666666666, 0.8011666666666667, 0.8001666666666667, 0.7983333333333333, 0.7945, 0.7896666666666666, 0.7853333333333332, 0.7815, 0.7763333333333333, 0.7708333333333333, 0.7643333333333333, 0.7581666666666667, 0.7511666666666666, 0.7451666666666666, 0.7421666666666666, 0.7481666666666666, 0.7555, 0.7638333333333333, 0.7731666666666666, 0.7816666666666666, 0.7891666666666667, 0.7965, 0.8048333333333334, 0.8120952380952382, 0.8187619047619048, 0.8184285714285714, 0.8159285714285713, 0.8127619047619048, 0.8089285714285716, 0.8059285714285714, 0.8047619047619048, 0.8044285714285714, 0.803595238095238, 0.804, 0.8035, 0.8023333333333333, 0.8015000000000001, 0.8014999999999999, 0.8014999999999999, 0.8006666666666666, 0.8008333333333333, 0.8013333333333332, 0.8023333333333333, 0.8026666666666668, 0.804, 0.8063333333333335, 0.8088333333333333, 0.8128333333333334, 0.818, 0.8243333333333333, 0.829, 0.8311666666666667, 0.8328333333333333, 0.8344999999999999, 0.8354999999999999, 0.8363333333333332, 0.8366666666666667, 0.8338333333333333, 0.8311666666666666, 0.8286666666666666, 0.827, 0.8273333333333334, 0.8276666666666666, 0.8278333333333332, 0.8281666666666666, 0.8286666666666666, 0.8291666666666666, 0.8306666666666667, 0.8320000000000001, 0.833, 0.8341666666666667, 0.8351666666666666, 0.8366666666666666, 0.8378333333333332, 0.8394999999999999, 0.8400000000000001, 0.8399999999999999, 0.8385, 0.8373333333333333, 0.8363333333333334, 0.835, 0.8338333333333333, 0.8324999999999999, 0.8313333333333333, 0.8299999999999998, 0.829, 0.8286666666666666, 0.8301666666666666, 0.8311666666666667, 0.8321666666666667, 0.8324999999999999, 0.8336666666666666, 0.835, 0.837, 0.8393333333333333, 0.8431666666666666, 0.8473333333333333, 0.8516666666666666, 0.8554999999999999, 0.8595, 0.8643333333333333, 0.8675, 0.8706666666666667, 0.8734999999999999, 0.8758333333333332, 0.8773333333333333, 0.8779999999999999, 0.8779999999999999, 0.8775000000000001, 0.8771666666666667, 0.8758333333333332, 0.873, 0.8695, 0.8651666666666668, 0.861, 0.8561666666666665, 0.851, 0.8461666666666666, 0.8418333333333333, 0.8363333333333334, 0.8306666666666667, 0.826, 0.821, 0.8158333333333333, 0.8089999999999999, 0.8031666666666666, 0.7978333333333334, 0.7921666666666667, 0.7873333333333334, 0.7825, 0.7790000000000001, 0.7783333333333333, 0.7786666666666667, 0.7805, 0.7846666666666667, 0.791, 0.7988333333333333, 0.8068333333333333, 0.8138333333333334, 0.821, 0.8256666666666665, 0.8273333333333334, 0.8283333333333334, 0.8281666666666666, 0.827, 0.8231666666666666, 0.8176666666666665, 0.8131666666666666, 0.8094999999999999, 0.8068333333333333, 0.8071666666666666, 0.8086666666666666, 0.8096666666666665, 0.8113333333333334, 0.813, 0.8141666666666667, 0.8150000000000001, 0.8143333333333332, 0.8131666666666666, 0.8113333333333334, 0.8086666666666668, 0.8061666666666666, 0.8046666666666666, 0.8035, 0.8033333333333333, 0.8043333333333333, 0.8065, 0.8111666666666666, 0.8168333333333333, 0.8223333333333332, 0.8275, 0.8298333333333334, 0.8308333333333333, 0.8315000000000001, 0.8308333333333333, 0.8293333333333333, 0.8276666666666668, 0.8238333333333333, 0.8186666666666668, 0.8145000000000001, 0.8110000000000002, 0.8090000000000002, 0.8095000000000001, 0.8110000000000002, 0.8140000000000001, 0.8175000000000001, 0.8200000000000001, 0.8218333333333334, 0.8238333333333333, 0.8244999999999999, 0.8238333333333333, 0.8256666666666665, 0.8258333333333333, 0.8241666666666667, 0.8210000000000001, 0.8181666666666667, 0.8119999999999999, 0.8068333333333333, 0.8013095238095238, 0.7954761904761904, 0.7936428571428572, 0.7931428571428573, 0.7934761904761904, 0.7948095238095239, 0.7974761904761906, 0.8003095238095238, 0.8071428571428572, 0.8136428571428571, 0.8213333333333332, 0.8296666666666667, 0.8356666666666666, 0.8393333333333333, 0.8423333333333332, 0.8455, 0.8474999999999999, 0.8494999999999999, 0.8514999999999999, 0.8536666666666666, 0.8548333333333333, 0.8569999999999999, 0.859, 0.8606666666666667, 0.8608333333333332, 0.8613333333333333, 0.8628333333333333, 0.8634999999999999, 0.8640000000000001, 0.8640000000000001, 0.8636666666666667, 0.8629999999999999, 0.8616666666666667, 0.8586666666666666, 0.8571666666666667, 0.8546666666666667, 0.8516666666666666, 0.8494999999999999, 0.8471666666666666, 0.8446666666666666, 0.8427142857142856, 0.8407142857142856, 0.8390476190476189, 0.8393809523809523, 0.839547619047619, 0.8392142857142856, 0.8388809523809522, 0.8367142857142855, 0.8352142857142855, 0.8337142857142856, 0.8329999999999999, 0.8321666666666665, 0.8318333333333332, 0.8318333333333333, 0.8320000000000001, 0.8334999999999999, 0.8351666666666666, 0.8411666666666665, 0.8483333333333333, 0.8554999999999999, 0.8616666666666667, 0.8676666666666666, 0.8721666666666665, 0.8761666666666666, 0.8800000000000001, 0.8833333333333334, 0.8865000000000001, 0.8865000000000001, 0.8854999999999998, 0.8843333333333334, 0.8841666666666667, 0.884, 0.8855000000000001, 0.8875, 0.8893333333333333, 0.8913333333333334, 0.893, 0.8949999999999999, 0.8969999999999999, 0.8996666666666666, 0.9016666666666666, 0.9030000000000001, 0.9043333333333334, 0.9061666666666668, 0.9085000000000001, 0.9108333333333334, 0.913, 0.9146666666666666, 0.9165000000000001, 0.9168333333333335, 0.9175000000000001, 0.9191666666666667, 0.9195, 0.9136666666666666, 0.8998333333333333, 0.8865000000000001, 0.8741666666666668, 0.8628333333333333, 0.8515, 0.8423333333333332, 0.8336666666666666, 0.8256666666666668, 0.819, 0.818, 0.8248333333333333, 0.8306666666666667, 0.8351666666666666, 0.8393333333333333, 0.8423333333333332, 0.8453333333333333, 0.8469999999999999, 0.8481666666666665, 0.8489999999999999, 0.849, 0.8493333333333333, 0.8503333333333334, 0.8513333333333332, 0.8521666666666665, 0.8543333333333333, 0.8558333333333332, 0.859, 0.861, 0.8619999999999999, 0.8643333333333333, 0.8668333333333333, 0.8688333333333332, 0.8709999999999999, 0.8716666666666667, 0.8718333333333333, 0.8721666666666665, 0.8708333333333333, 0.8698333333333335, 0.8686666666666666, 0.8666666666666666, 0.8634999999999999, 0.8608333333333332, 0.8583333333333332, 0.8568333333333333, 0.8539999999999999, 0.8503333333333332, 0.8468333333333332, 0.8436666666666666, 0.841, 0.8376666666666666, 0.8345, 0.8314999999999999, 0.8283333333333334, 0.8256666666666665, 0.8253333333333333, 0.8256666666666665, 0.8125, 0.7969999999999999, 0.784, 0.7746666666666666, 0.7674999999999998, 0.7608333333333333, 0.7555, 0.7508333333333332, 0.7456666666666666, 0.7414999999999999, 0.7521666666666667, 0.7661666666666667, 0.7788333333333333, 0.7889999999999999, 0.7976666666666665, 0.8051666666666666, 0.8111666666666666, 0.8164999999999999, 0.8206666666666665, 0.8238333333333333, 0.8251666666666667, 0.825, 0.8244999999999999, 0.8230000000000001, 0.8211666666666666, 0.8191666666666666, 0.8176666666666668, 0.8150000000000001, 0.8115, 0.8085000000000001, 0.8065000000000001, 0.8038333333333334, 0.7991666666666667, 0.7941666666666667, 0.789, 0.784, 0.7779999999999999, 0.7728333333333333, 0.768, 0.7628333333333333, 0.7579999999999999, 0.754, 0.7515, 0.7481666666666666, 0.7461666666666666, 0.7445, 0.7445, 0.7445, 0.7451666666666666, 0.7456666666666666, 0.7461666666666666, 0.7471666666666666, 0.7484999999999999, 0.7515, 0.7535, 0.756, 0.7573333333333332, 0.7583333333333333, 0.76, 0.7633333333333333, 0.7666666666666666, 0.7689999999999999, 0.7713333333333333, 0.7721666666666667, 0.7731666666666668, 0.7728333333333334, 0.7729999999999999, 0.7725, 0.7701666666666667, 0.7668333333333333, 0.7631666666666667, 0.7611666666666667, 0.7588333333333332, 0.758, 0.7579999999999999, 0.7585, 0.7589999999999999, 0.7615, 0.7649999999999999, 0.7669999999999999, 0.7695000000000001, 0.7715000000000001, 0.7735, 0.776, 0.7766666666666667, 0.7766666666666666, 0.7773333333333334, 0.7755000000000001, 0.7736666666666667, 0.7726666666666666, 0.7711666666666667, 0.7688333333333334, 0.7649999999999999, 0.7605, 0.7568333333333334, 0.7538333333333334, 0.7468333333333333, 0.7413333333333333, 0.7363333333333333, 0.7313333333333334, 0.7266666666666667, 0.723, 0.7208333333333333, 0.7193333333333334, 0.7183333333333333, 0.7173333333333333, 0.7196666666666666, 0.7224999999999999, 0.7255, 0.7285, 0.7310000000000001, 0.7338333333333333, 0.7368333333333333, 0.7393333333333334, 0.7413333333333334, 0.7433333333333334, 0.7433333333333333, 0.7428333333333333, 0.7426666666666667, 0.7423333333333333, 0.7421666666666666, 0.7413333333333333, 0.7388333333333333, 0.7343333333333334, 0.7296666666666667, 0.7256666666666667, 0.7236666666666667, 0.7216666666666667, 0.7196666666666667, 0.7163333333333333, 0.7044999999999999, 0.6908333333333333, 0.6753333333333333, 0.6589999999999999, 0.6356666666666666, 0.6114999999999999, 0.5864999999999999, 0.5619999999999999, 0.5371666666666666, 0.5098333333333332, 0.48683333333333334, 0.4655, 0.44849999999999995, 0.4343333333333333, 0.42733333333333334, 0.42033333333333334, 0.4086666666666667, 0.40249999999999997, 0.39916666666666667, 0.40049999999999997, 0.4068333333333333, 0.4138333333333333, 0.4208333333333333, 0.42833333333333334, 0.43633333333333335, 0.44366666666666665, 0.45533333333333337, 0.4616666666666667, 0.46533333333333327, 0.469, 0.47233333333333327, 0.4755, 0.47833333333333333, 0.4803333333333334, 0.4806666666666667, 0.48149999999999993, 0.479, 0.4715, 0.4621666666666666, 0.45216666666666666, 0.43950000000000006, 0.4251666666666666, 0.4093333333333334, 0.3923333333333333, 0.3755, 0.356, 0.34049999999999997, 0.32766666666666666, 0.31616666666666665, 0.309, 0.30666666666666664, 0.3018333333333333, 0.291, 0.28700000000000003, 0.2801666666666667, 0.2676666666666666, 0.24533333333333332, 0.21966666666666662, 0.19349999999999998, 0.16299999999999998, 0.1303333333333333, 0.1013333333333333, 0.07866666666666665, 0.050999999999999976, 0.02683333333333331, 0.011499999999999979, 0.006833333333333311, 0.007999999999999978, 0.009666666666666646, 0.01033333333333331, 0.009666666666666646, 0.008666666666666645, 0.008499999999999978, 0.007666666666666644, 0.007666666666666645, 0.007166666666666645, 0.007166666666666645, 0.01683333333333331, 0.03183333333333331, 0.04649999999999998, 0.04983333333333331, 0.05633333333333331, 0.07083333333333332, 0.08416666666666664, 0.09166666666666665, 0.09849999999999998, 0.10349999999999997, 0.09849999999999998, 0.08849999999999998, 0.07966666666666664, 0.08299999999999999, 0.07866666666666663, 0.06483333333333331, 0.05216666666666665, 0.044333333333333315, 0.03816666666666665, 0.03266666666666665, 0.027166666666666645, 0.022166666666666647, 0.01633333333333331, 0.009833333333333312, 0.007666666666666645, 0.0073333333333333115, 0.007666666666666645, 0.007499999999999977, 0.0066666666666666445, 0.006499999999999978, 0.005499999999999979, 0.007166666666666645, 0.018333333333333313, 0.03399999999999998, 0.04599999999999998, 0.05533333333333331, 0.06783333333333331, 0.06799999999999998, 0.06833333333333332, 0.06799999999999998, 0.06799999999999998, 0.06449999999999997, 0.05233333333333332, 0.03616666666666664, 0.024499999999999977, 0.015333333333333306, 0.001999999999999978, 0.0023333333333333114, 0.0029999999999999775, 0.0039999999999999775, 0.004833333333333311, 0.0056666666666666445, 0.0073333333333333115, 0.009333333333333312, 0.011166666666666644, 0.013333333333333312, 0.015166666666666644, 0.014666666666666644, 0.013666666666666643, 0.013499999999999976, 0.02433333333333331, 0.03983333333333332, 0.054166666666666655, 0.06616666666666664, 0.07633333333333332, 0.08983333333333332, 0.09366666666666665, 0.09399999999999999, 0.09433333333333331, 0.09383333333333331, 0.08249999999999999, 0.06616666666666664, 0.05016666666666664, 0.036333333333333315, 0.023999999999999976, 0.007499999999999977, 0.0016666666666666444, 0.0013333333333333112, 0.000999999999999978, 0.0006666666666666446, 0.00033333333333331115, 0.00016666666666664446, 0.0004999999999999778, 0.00033333333333331115, 0.0003333333333333112, 0.0006666666666666446, 0.0011666666666666444, 0.0011666666666666444, 0.0011666666666666444, 0.0011666666666666444, 0.0013333333333333112, 0.004499999999999978, 0.015666666666666645, 0.019666666666666645, 0.019666666666666645, 0.02283333333333331, 0.03249999999999998, 0.03866666666666665, 0.03933333333333332, 0.04016666666666665, 0.04099999999999998, 0.03883333333333332, 0.02799999999999998, 0.02433333333333331, 0.02449999999999998, 0.02099999999999998, 0.010833333333333313, 0.005166666666666644, 0.004499999999999978, 0.0036666666666666445, 0.0026666666666666445, 0.0016666666666666444, 0.0011666666666666444, 0.0008333333333333112, 0.0011666666666666444, 0.0021666666666666444, 0.004166666666666645, 0.0056666666666666445, 0.006833333333333311, 0.008166666666666645, 0.01999999999999998, 0.03783333333333331, 0.05666666666666664, 0.07649999999999998, 0.09666666666666665, 0.11733333333333332, 0.1378333333333333, 0.15883333333333333, 0.18016666666666664, 0.20083333333333334, 0.21049999999999996, 0.21466666666666664, 0.21516666666666664, 0.21233333333333332, 0.2063333333333333, 0.19149999999999998, 0.173, 0.15499999999999997, 0.13749999999999998, 0.12099999999999997, 0.10549999999999997, 0.08949999999999998, 0.07699999999999997, 0.06649999999999998, 0.057833333333333306, 0.05533333333333331, 0.05349999999999998, 0.05016666666666665, 0.04749999999999998, 0.043666666666666645, 0.038333333333333316, 0.036333333333333315, 0.041999999999999975, 0.05266666666666664, 0.06199999999999998, 0.07366666666666664, 0.08966666666666664, 0.10699999999999998, 0.12383333333333331, 0.14216666666666664, 0.16166666666666665, 0.17866666666666664, 0.18716666666666665, 0.19199999999999998, 0.19866666666666663, 0.20316666666666663, 0.2036666666666666, 0.20266666666666663, 0.2023333333333333, 0.20133333333333328, 0.20066666666666663, 0.19966666666666663, 0.19849999999999995, 0.1968333333333333, 0.1958333333333333, 0.1963333333333333, 0.19866666666666663, 0.20249999999999999, 0.2056666666666666, 0.209, 0.2165, 0.22449999999999998, 0.233, 0.24166666666666664, 0.24966666666666665, 0.25866666666666666, 0.26266666666666666, 0.26683333333333337, 0.2708333333333333, 0.275, 0.275, 0.27399999999999997, 0.27316666666666667, 0.27216666666666667, 0.2713333333333333, 0.2703333333333333, 0.2748333333333333, 0.2788333333333333, 0.2828333333333333, 0.2871666666666667, 0.2911666666666667, 0.2955, 0.2998333333333333, 0.3038333333333333, 0.30833333333333335, 0.3123333333333333, 0.316, 0.31999999999999995, 0.324, 0.32766666666666666, 0.33166666666666667, 0.3355, 0.3418333333333333, 0.3505, 0.35883333333333334, 0.36733333333333335, 0.37616666666666665, 0.3846666666666666, 0.39366666666666666, 0.4026666666666666, 0.4116666666666666, 0.4208333333333333, 0.42749999999999994, 0.43183333333333335, 0.4365, 0.441, 0.4451666666666667, 0.4496666666666667, 0.4536666666666666, 0.458, 0.4619999999999999, 0.4663333333333333, 0.4703333333333332, 0.47450000000000003, 0.47883333333333333, 0.48283333333333334, 0.48749999999999993, 0.49149999999999994, 0.4956666666666666, 0.4998333333333333, 0.5046666666666666, 0.5096666666666667, 0.5146666666666666, 0.5198333333333334, 0.5248333333333333, 0.5301666666666667, 0.5353333333333332, 0.5403333333333333, 0.5458333333333333, 0.5508333333333334, 0.5551666666666667, 0.5595, 0.5635, 0.5681666666666667, 0.5721666666666666, 0.5766666666666667, 0.5806666666666666, 0.5854999999999999, 0.5898333333333333, 0.5941666666666666, 0.599, 0.6036666666666666, 0.6086666666666666, 0.6133333333333333, 0.6183333333333334, 0.6231666666666666, 0.6281666666666667, 0.6326666666666667, 0.6376666666666667, 0.6428333333333333, 0.6478333333333334, 0.6529999999999999, 0.6586666666666666, 0.6643333333333332, 0.6703333333333333, 0.6758333333333333, 0.6818333333333333, 0.6883333333333332, 0.6943333333333332, 0.7003333333333333, 0.7063333333333333, 0.712, 0.7173333333333334, 0.7223333333333334, 0.7273333333333334, 0.7321666666666666, 0.7371666666666666, 0.7413333333333333, 0.7461666666666666, 0.7506666666666666, 0.7553333333333333, 0.7598333333333332, 0.7645, 0.7691666666666667, 0.7738333333333334, 0.7788333333333333, 0.7838333333333333, 0.7888333333333334, 0.794, 0.7991666666666667, 0.8053333333333332, 0.8123333333333334, 0.8196666666666668, 0.8275, 0.8331666666666667, 0.8201666666666666, 0.7966666666666666, 0.7741666666666667, 0.7518333333333332, 0.7304999999999999, 0.7084999999999999, 0.6864999999999999, 0.6648333333333333, 0.6433333333333333, 0.6243333333333332, 0.6246666666666666, 0.6355000000000001, 0.6463333333333334, 0.657, 0.6671666666666667, 0.6758333333333333, 0.6824999999999999, 0.6876666666666666, 0.6904999999999999, 0.6926666666666665, 0.6936666666666667, 0.6914999999999999, 0.6885, 0.6849999999999999, 0.6815, 0.6776666666666666, 0.6769999999999999, 0.6824999999999999, 0.6903333333333334, 0.6975, 0.704, 0.7118333333333333, 0.7198333333333333, 0.728, 0.7364999999999999, 0.7458333333333333, 0.752, 0.7529999999999999, 0.7533333333333333, 0.7549999999999999, 0.7555, 0.7575, 0.7598333333333332, 0.7618333333333333, 0.7636666666666667, 0.766, 0.7698333333333334, 0.7729999999999999, 0.7766666666666666, 0.7795, 0.784, 0.7876666666666667, 0.791, 0.7938333333333333, 0.7959999999999999, 0.7958333333333333, 0.7936666666666665, 0.7914999999999999, 0.7866666666666665, 0.7825, 0.7781666666666667, 0.7736666666666665, 0.7696666666666665, 0.7665, 0.7634999999999998, 0.7629999999999999, 0.7641666666666665, 0.7656666666666666, 0.7695, 0.7731666666666667, 0.7776666666666666, 0.7825, 0.7863333333333333, 0.7896666666666666, 0.7931666666666667, 0.7951666666666666, 0.7966666666666666, 0.7981666666666667, 0.7998333333333333, 0.8008333333333333, 0.8008333333333333, 0.8009999999999999, 0.8016666666666665, 0.8001666666666667, 0.799, 0.7993333333333333, 0.7998333333333333, 0.7995, 0.7995, 0.7985, 0.7985, 0.7983333333333332, 0.7976666666666665, 0.7985, 0.7991666666666666, 0.8, 0.8013333333333332, 0.8036666666666668, 0.8049999999999999, 0.8068333333333333, 0.8076666666666666, 0.8076666666666666, 0.8065, 0.8051666666666666, 0.8028333333333334, 0.8, 0.7966666666666666, 0.7926666666666667, 0.7886666666666667, 0.7851666666666668, 0.7823333333333335, 0.7808333333333334, 0.7801666666666668, 0.7801666666666668, 0.7813333333333334, 0.7823333333333335, 0.7843333333333334, 0.7868333333333334, 0.789, 0.7913333333333334, 0.7921666666666667, 0.7926666666666666, 0.7936666666666666, 0.7943333333333333, 0.7946666666666666, 0.7939999999999999, 0.7925, 0.791, 0.7902619047619048, 0.7899285714285714, 0.7910952380952381, 0.7920952380952381, 0.7937619047619048, 0.7950952380952381, 0.7969285714285714, 0.7997619047619048, 0.803095238095238, 0.8044285714285714, 0.805, 0.8051666666666666, 0.8045, 0.8026666666666668, 0.8, 0.7966666666666666, 0.7906666666666666, 0.7843333333333333, 0.7771666666666667, 0.7763333333333333, 0.7783333333333333, 0.781, 0.7845, 0.7858333333333333, 0.7865, 0.7883333333333333, 0.7913333333333334, 0.7955, 0.7998333333333333, 0.7993333333333335, 0.7956666666666667, 0.7916666666666667, 0.7875000000000001, 0.7860000000000001, 0.7863333333333334, 0.7868333333333334, 0.7873333333333334, 0.7873333333333334, 0.787, 0.7856666666666667, 0.7853333333333334, 0.7853333333333334, 0.7856666666666667, 0.7873333333333334, 0.7883333333333333, 0.7903333333333334, 0.793, 0.7958333333333334, 0.7996666666666667, 0.8046666666666666, 0.8090952380952381, 0.8112619047619049, 0.8110952380952382, 0.8094285714285714, 0.8074285714285715, 0.803595238095238, 0.8000952380952382, 0.7959285714285715, 0.7920952380952382, 0.7877619047619048, 0.7846666666666667, 0.7836666666666667, 0.7848333333333334, 0.7876666666666667, 0.7911666666666667, 0.7963333333333333, 0.8009999999999999, 0.8049999999999999, 0.8076666666666666, 0.8106666666666665, 0.8125, 0.8131666666666668, 0.8131666666666668, 0.8123333333333334, 0.8100000000000002, 0.8068333333333333, 0.8033333333333333, 0.8003333333333333, 0.798, 0.7951666666666667, 0.7938333333333334, 0.7926666666666666, 0.7928333333333334, 0.7916666666666666, 0.7905, 0.7891666666666667, 0.7875, 0.7838333333333333, 0.7808333333333334, 0.7783333333333333, 0.7745, 0.7703333333333333, 0.7651666666666667, 0.7611666666666667, 0.7558333333333334, 0.7489999999999999, 0.7411666666666665, 0.7331666666666665, 0.7238333333333332, 0.7146666666666667, 0.7061666666666666, 0.6988333333333333, 0.6926666666666667, 0.6863333333333334, 0.6823333333333333, 0.6798333333333333, 0.679, 0.6858333333333333, 0.6945, 0.7036666666666667, 0.7115, 0.7185, 0.7228333333333333, 0.7273333333333333, 0.7318333333333333, 0.7368333333333333, 0.7423333333333333, 0.7433333333333334, 0.7430000000000001, 0.7420000000000001, 0.7416666666666667, 0.7421666666666666, 0.7445, 0.7473333333333333, 0.7501666666666666, 0.7529999999999999, 0.7549999999999999, 0.7563333333333333, 0.7575, 0.7594999999999998, 0.7616666666666666, 0.764, 0.7668333333333333, 0.7696666666666667, 0.7725, 0.7745, 0.7775000000000001, 0.7808333333333334, 0.7846666666666666, 0.7866666666666666, 0.7883333333333333, 0.7891666666666667, 0.7891666666666668, 0.7868333333333333, 0.784, 0.782, 0.7791666666666666, 0.7766666666666666, 0.7738333333333334, 0.7726666666666666, 0.7708333333333333, 0.7698333333333334, 0.7695000000000001, 0.7711666666666666, 0.7728333333333334, 0.7738333333333334, 0.7756666666666667, 0.777, 0.7783333333333334, 0.7775000000000001, 0.7783333333333334, 0.7786666666666667, 0.7793333333333334, 0.7793333333333334, 0.7801666666666668, 0.7818333333333334, 0.7835, 0.7843333333333333, 0.7838333333333334, 0.7856666666666666, 0.7866666666666667, 0.7873333333333334, 0.786, 0.7846666666666666, 0.7821666666666667, 0.7796666666666667, 0.7761666666666667, 0.774, 0.773, 0.772, 0.7711666666666666, 0.7715, 0.7731666666666668, 0.7763333333333333, 0.7803333333333333, 0.7843333333333333, 0.7876666666666667, 0.7906666666666667, 0.7941666666666667, 0.7973333333333333, 0.8008333333333333, 0.8031666666666666, 0.806, 0.8088333333333335, 0.8118333333333332, 0.8143333333333332, 0.818, 0.8215, 0.8236666666666667, 0.8241666666666667, 0.8244999999999999, 0.8251666666666665, 0.8248333333333333, 0.8216666666666667, 0.8178333333333333, 0.8148333333333333, 0.8114999999999999, 0.808, 0.8061666666666666, 0.8055, 0.8043333333333333, 0.8025, 0.8011666666666667, 0.8026666666666668, 0.8043333333333333, 0.8048333333333334, 0.806, 0.8076666666666668, 0.808, 0.8088333333333335, 0.8088333333333335, 0.8085000000000001, 0.8073333333333335, 0.804, 0.8, 0.7955, 0.7891666666666668, 0.7868333333333333, 0.788, 0.7888333333333334, 0.7911666666666667, 0.7925, 0.7789999999999999, 0.7595, 0.7421666666666666, 0.727, 0.7144999999999999, 0.6985, 0.6806666666666666, 0.6638333333333334, 0.6475, 0.634, 0.6373333333333333, 0.649, 0.6601666666666667, 0.6709999999999999, 0.6816666666666666, 0.6921666666666666, 0.7025, 0.7126666666666666, 0.7226666666666667, 0.7326666666666666, 0.7423333333333333, 0.7518333333333334, 0.7615, 0.7708333333333333, 0.7785, 0.7848333333333334, 0.7901666666666667, 0.795, 0.7995000000000001, 0.8028333333333334, 0.8055, 0.8031666666666668, 0.7935000000000001, 0.7841666666666667, 0.7765, 0.7703333333333333, 0.765, 0.7596666666666667, 0.7545, 0.7503333333333332, 0.7461666666666666, 0.747, 0.7543333333333333, 0.76, 0.7646666666666666, 0.7686666666666666, 0.7718333333333333, 0.7745, 0.7765000000000001, 0.776, 0.7755000000000001, 0.7746666666666667, 0.7743333333333334, 0.7750000000000001, 0.7763333333333333, 0.7783333333333333, 0.7803333333333333, 0.783, 0.786, 0.7913333333333333, 0.7948333333333334, 0.7975, 0.798, 0.799, 0.7995, 0.7988333333333333, 0.7978333333333333, 0.7971666666666666, 0.7961666666666667, 0.7941666666666667, 0.7936666666666666, 0.793, 0.7941666666666667, 0.7939999999999999, 0.7936666666666666, 0.7939999999999999, 0.795, 0.795, 0.794, 0.7921666666666667, 0.7898333333333334, 0.787, 0.7833333333333334, 0.7796666666666667, 0.7746666666666666, 0.7689999999999999, 0.7641666666666667, 0.763, 0.7646666666666666, 0.7678333333333334, 0.7716666666666667, 0.7706666666666667, 0.7503333333333334, 0.7315, 0.7146666666666667, 0.6986666666666667, 0.6823333333333333, 0.663, 0.6428333333333334, 0.6228333333333333, 0.6038333333333332, 0.5915, 0.5998333333333333, 0.6076666666666666, 0.6155, 0.623, 0.6303333333333334, 0.6378333333333333, 0.645, 0.6521666666666667, 0.6591666666666667, 0.6658333333333333, 0.6721666666666667, 0.6788333333333334, 0.6851666666666667, 0.6935, 0.7023333333333334, 0.7110000000000001, 0.7193333333333334, 0.728, 0.7363333333333333, 0.7448333333333332, 0.7533333333333333, 0.7615, 0.7696666666666666, 0.7758333333333333, 0.7811666666666667, 0.7865, 0.7916666666666667, 0.7968333333333334, 0.8016666666666665, 0.8066666666666666, 0.8111666666666666, 0.8156666666666667, 0.8191666666666666, 0.8223333333333332, 0.826, 0.829, 0.8321666666666665, 0.835, 0.837, 0.8381666666666666, 0.8396666666666667, 0.8408333333333333, 0.8428333333333333, 0.8436666666666666, 0.8445, 0.8463333333333333, 0.8471666666666666, 0.8483333333333333, 0.8496666666666666, 0.8484999999999999, 0.8469999999999999, 0.8453333333333333, 0.8433333333333334, 0.8408333333333333, 0.8361666666666666, 0.8313333333333335, 0.8281666666666666, 0.8244999999999999, 0.8219999999999998, 0.8219999999999998, 0.821, 0.82, 0.8195, 0.821, 0.8228333333333333, 0.8240000000000001, 0.825, 0.8256666666666665, 0.826, 0.8253333333333333, 0.8263333333333334, 0.828, 0.8296666666666667, 0.8319999999999999, 0.8366666666666667, 0.8398333333333333, 0.8428333333333333, 0.8448333333333332, 0.8438333333333332, 0.8418333333333333, 0.8385, 0.8345, 0.8303333333333333, 0.8248333333333333, 0.8164999999999999, 0.8076666666666666, 0.7969999999999999, 0.7859999999999999, 0.7768333333333334, 0.7699999999999999, 0.7648333333333333, 0.7598333333333334, 0.7548333333333334, 0.7508333333333332, 0.747, 0.7443333333333333, 0.742, 0.7401666666666666, 0.7386666666666667, 0.7363333333333333, 0.7331666666666666, 0.7295, 0.7244999999999999, 0.7186666666666666, 0.7154999999999999, 0.7179999999999999, 0.7254999999999999, 0.7351666666666666, 0.7448333333333332, 0.7523333333333333, 0.759, 0.7658333333333334, 0.7745, 0.7830000000000001, 0.7903333333333333, 0.7926666666666667, 0.7903333333333334, 0.7846666666666667, 0.7793333333333334, 0.7780000000000001, 0.7783333333333334, 0.7795000000000001, 0.7798333333333334, 0.7803333333333333, 0.7805, 0.7806666666666667, 0.781, 0.7833333333333334, 0.7858333333333334, 0.7865, 0.7861666666666667, 0.7858333333333334, 0.7856666666666667, 0.7853333333333333, 0.7873333333333333, 0.788, 0.7896666666666666, 0.7921666666666667, 0.7936666666666666, 0.7915000000000001, 0.7895000000000001, 0.7876904761904762, 0.7863571428571429, 0.7858571428571428, 0.7841904761904762, 0.784690476190476, 0.7851904761904761, 0.7853571428571429, 0.7873571428571429, 0.7943571428571428, 0.8018571428571428, 0.8089999999999999, 0.8163333333333334, 0.8231666666666666, 0.829, 0.834, 0.8383333333333333, 0.8426666666666666, 0.8459999999999999, 0.8488333333333333, 0.8511666666666666, 0.8531666666666666, 0.8543333333333333, 0.8551666666666666, 0.8551666666666666, 0.8533333333333333, 0.8523333333333334, 0.85, 0.8480000000000001, 0.8436666666666668, 0.8388333333333333, 0.8341666666666667, 0.8296666666666667, 0.8256666666666665, 0.8221666666666667, 0.8206666666666667, 0.8183333333333334, 0.8165000000000001, 0.8151666666666667, 0.8158333333333335, 0.8168333333333333, 0.8175000000000001, 0.8181666666666667, 0.818, 0.8178333333333334, 0.8173333333333334, 0.8165000000000001, 0.8151666666666667, 0.813, 0.8096666666666668, 0.8063333333333335, 0.8031666666666666, 0.7995000000000001, 0.7975000000000001, 0.7891666666666668, 0.782, 0.7756666666666667, 0.7711666666666667, 0.7676666666666667, 0.766, 0.7651666666666667, 0.7646666666666666, 0.7651666666666667, 0.7655, 0.7731666666666667, 0.7803333333333333, 0.7878333333333333, 0.7948333333333333, 0.8015000000000001, 0.8085000000000001, 0.8151666666666667, 0.8221666666666667, 0.8291666666666666, 0.8356666666666666, 0.841, 0.8461666666666666, 0.8506666666666666, 0.8551666666666666, 0.8585, 0.8613333333333333, 0.8638333333333333, 0.865, 0.865, 0.8654999999999999, 0.8661666666666668, 0.8658333333333333, 0.8643333333333333, 0.8623333333333333, 0.86, 0.8548333333333332, 0.849, 0.844, 0.8394999999999999, 0.8343333333333334, 0.8293333333333333, 0.8251666666666667, 0.8221666666666667, 0.8191666666666666, 0.8171666666666667, 0.8168333333333333, 0.8168333333333333, 0.8163333333333334, 0.8160000000000001, 0.8145, 0.8128333333333334, 0.8116666666666668, 0.8108333333333334, 0.8108333333333334, 0.8111666666666666, 0.8128333333333334, 0.8155000000000001, 0.8185, 0.8211666666666666, 0.8238333333333333, 0.8183333333333334, 0.8135, 0.8096666666666665, 0.8065, 0.8045, 0.8031666666666666, 0.8013333333333333, 0.7997619047619049, 0.7984285714285714, 0.7985952380952381, 0.806595238095238, 0.8144285714285715, 0.8214285714285715, 0.8277619047619048, 0.8330952380952381, 0.8364285714285714, 0.8384285714285713, 0.8398333333333333, 0.8409999999999999, 0.8416666666666666, 0.843, 0.843, 0.8413333333333333, 0.8391666666666666, 0.836, 0.8333333333333333, 0.8303333333333333, 0.8251666666666667, 0.8186666666666668, 0.8109999999999999, 0.8041666666666666, 0.8026666666666668, 0.8043333333333333, 0.8058333333333334, 0.807, 0.8051666666666666, 0.7911666666666667, 0.7801666666666667, 0.772, 0.7663333333333333, 0.7609999999999999, 0.7518333333333332, 0.7416666666666666, 0.7324999999999999, 0.7253333333333333, 0.7226666666666667, 0.7346666666666666, 0.7471666666666665, 0.7596666666666667, 0.772, 0.7848333333333333, 0.7966666666666666, 0.8073333333333332, 0.817, 0.8248333333333333, 0.8319999999999999, 0.8379999999999999, 0.8428333333333333, 0.8463333333333333, 0.8478333333333333, 0.8465, 0.8446666666666666, 0.8424999999999999, 0.8391666666666666, 0.8351666666666666, 0.8306666666666667, 0.827, 0.8239999999999998, 0.8218333333333334, 0.8203333333333334, 0.82, 0.8206666666666667, 0.8223333333333332, 0.826, 0.8316666666666667, 0.8371666666666666, 0.8418333333333333, 0.8458333333333332, 0.849, 0.853, 0.8563333333333333, 0.8596666666666666, 0.8623333333333333, 0.8639999999999999, 0.8643333333333333, 0.8648333333333333, 0.8655000000000002, 0.8668333333333333, 0.8684999999999998, 0.8695, 0.8706666666666667, 0.8703333333333333, 0.869, 0.8683333333333334, 0.8683333333333334, 0.866, 0.8633333333333333, 0.86, 0.8563333333333333, 0.8524999999999998, 0.8498333333333333, 0.8481666666666667, 0.8474999999999999, 0.8463333333333333, 0.8458333333333332, 0.8463333333333333, 0.8473333333333333, 0.8473333333333333, 0.8473333333333333, 0.8458333333333332, 0.8431666666666666, 0.8401666666666667, 0.8363333333333334, 0.8316666666666667, 0.8258333333333333, 0.8211666666666666, 0.8161666666666667, 0.8121666666666666, 0.8085000000000001, 0.8116666666666668, 0.8191666666666666, 0.827, 0.835, 0.8416666666666666, 0.8468333333333333, 0.851, 0.8559999999999999, 0.8596666666666666, 0.8638333333333333, 0.8634999999999999, 0.8598333333333332, 0.8553333333333333, 0.8521666666666666, 0.851, 0.8518333333333332, 0.8539999999999999, 0.8551666666666666, 0.8581666666666667, 0.8606666666666666, 0.8618333333333335, 0.8623333333333333, 0.8638333333333333, 0.8648333333333333, 0.8656666666666666, 0.8666666666666666, 0.8673333333333334, 0.8686666666666666, 0.869, 0.8698333333333332, 0.8711666666666666, 0.8723333333333333, 0.873, 0.874, 0.8755, 0.8761666666666666, 0.8758333333333332, 0.8746666666666666, 0.8736666666666666, 0.8718333333333333, 0.8691666666666666, 0.8663333333333332, 0.8638333333333332, 0.8608333333333332, 0.8574999999999999, 0.8545, 0.8528333333333332, 0.8515, 0.8504999999999999, 0.8494999999999999, 0.8493333333333333, 0.8496666666666666, 0.849, 0.8481666666666665, 0.8479999999999999, 0.8474999999999999, 0.8471666666666666, 0.8451666666666666, 0.844, 0.8431666666666666, 0.842, 0.8408333333333333, 0.8414999999999999, 0.8431666666666666, 0.844, 0.8446666666666666, 0.8456666666666666, 0.8493333333333333, 0.8515, 0.853, 0.8538333333333334, 0.8548333333333332, 0.8548333333333332, 0.8539999999999999, 0.8533333333333333, 0.8531666666666666, 0.8511666666666666, 0.8483333333333333, 0.8456666666666667, 0.843, 0.8411666666666667, 0.8391666666666666, 0.8365, 0.8326666666666667, 0.8271666666666666, 0.8206666666666667, 0.8141666666666667, 0.8071666666666667, 0.8008333333333335, 0.7946666666666667, 0.7895000000000001, 0.7853333333333333, 0.7841666666666667, 0.7849999999999999, 0.7871666666666667, 0.7901666666666667, 0.7916666666666667, 0.7933333333333333, 0.7945833333333334, 0.7967500000000001, 0.7975833333333334, 0.7970833333333334, 0.7935833333333333, 0.7895833333333333, 0.7865833333333334, 0.7840833333333335, 0.7844166666666668, 0.7849166666666667, 0.784, 0.783, 0.7835000000000001, 0.7830000000000001, 0.7833333333333334, 0.7843333333333333, 0.7846666666666667, 0.7846666666666666, 0.7838333333333334, 0.7833333333333334, 0.7846666666666666, 0.7848333333333334, 0.7841666666666667, 0.7855000000000001, 0.7865, 0.7861666666666667, 0.7845000000000001, 0.7818333333333334, 0.7796666666666667, 0.7771666666666667, 0.7743333333333332, 0.772, 0.7678333333333333, 0.7631666666666667, 0.7591666666666667, 0.7556666666666666, 0.7533333333333332, 0.7523333333333333, 0.7518333333333332, 0.7498333333333334, 0.7486666666666666, 0.7478333333333333, 0.7491666666666666, 0.7508333333333332, 0.7521666666666667, 0.7533333333333332, 0.7533333333333333, 0.7535000000000001, 0.7539999999999999, 0.7559999999999999, 0.7583333333333333, 0.7603333333333333, 0.7625, 0.765, 0.7675, 0.7708333333333334, 0.7758333333333334, 0.7765, 0.7763333333333333, 0.7763333333333333, 0.7758333333333333, 0.7758333333333334, 0.7751666666666666, 0.7723333333333333, 0.7685, 0.764, 0.76, 0.7601666666666667, 0.7609999999999999, 0.7619999999999999, 0.7625, 0.7625, 0.7625, 0.7638333333333333, 0.7661666666666667, 0.7671666666666666, 0.7681666666666667, 0.7691666666666667, 0.7695000000000001, 0.7701666666666667, 0.7711666666666667, 0.7718333333333334, 0.7715000000000001, 0.7705, 0.7681666666666667, 0.767, 0.7643333333333333, 0.7606666666666666, 0.7566666666666666, 0.752, 0.747, 0.7426666666666666, 0.74, 0.7354999999999999, 0.7304999999999999, 0.7256666666666667, 0.7223333333333334, 0.72, 0.718, 0.716, 0.715, 0.7136666666666666, 0.7116666666666667, 0.7128333333333332, 0.7158333333333333, 0.7186666666666666, 0.7208333333333332, 0.7238333333333332, 0.726, 0.7284999999999999, 0.7304999999999999, 0.7321666666666666, 0.7338333333333333, 0.7338333333333333, 0.7331666666666666, 0.7311666666666666, 0.7295, 0.7261666666666666, 0.7211666666666666, 0.7156666666666667, 0.7096666666666667, 0.7043333333333333, 0.6996666666666667, 0.696, 0.6926666666666665, 0.6913333333333332, 0.6898333333333333, 0.6891666666666667, 0.6910000000000001, 0.694, 0.697, 0.6996666666666667, 0.7016666666666667, 0.7036666666666667, 0.7066666666666667, 0.7089999999999999, 0.708, 0.707, 0.7026666666666667, 0.6924999999999999, 0.6813333333333332, 0.6606666666666666, 0.6439999999999999, 0.6298333333333332, 0.6153333333333333, 0.6014999999999999, 0.5914999999999999, 0.5823333333333334, 0.577, 0.5771666666666666, 0.5793333333333333, 0.591, 0.5996666666666666, 0.6061666666666666, 0.6126666666666667, 0.619, 0.625, 0.6308333333333334, 0.6365000000000001, 0.6415, 0.6455, 0.6495, 0.6529999999999999, 0.6543333333333334, 0.65, 0.6415, 0.6315, 0.6174999999999999, 0.6003333333333333, 0.5761666666666666, 0.5501666666666666, 0.5241666666666667, 0.4891666666666666, 0.4531666666666666, 0.4265, 0.40483333333333327, 0.3763333333333333, 0.3431666666666666, 0.31399999999999995, 0.28683333333333333, 0.2588333333333333, 0.22983333333333328, 0.20649999999999996, 0.178, 0.14566666666666664, 0.11133333333333331, 0.08483333333333332, 0.06633333333333331, 0.04766666666666665, 0.034333333333333306, 0.022999999999999975, 0.012999999999999979, 0.005833333333333311, 0.007499999999999979, 0.007666666666666645, 0.008666666666666645, 0.009333333333333312, 0.010333333333333312, 0.011333333333333312, 0.01199999999999998, 0.014166666666666647, 0.01649999999999998, 0.02883333333333331, 0.04599999999999997, 0.06366666666666664, 0.08166666666666664, 0.09999999999999998, 0.11833333333333332, 0.13199999999999995, 0.13199999999999995, 0.12966666666666665, 0.1263333333333333, 0.11366666666666662, 0.09466666666666664, 0.07716666666666663, 0.058166666666666644, 0.03949999999999998, 0.02049999999999998, 0.004833333333333311, 0.002999999999999978, 0.0026666666666666445, 0.0026666666666666445, 0.0028333333333333114, 0.0028333333333333114, 0.001999999999999978, 0.001999999999999978, 0.0016666666666666444, 0.0014999999999999777, 0.0016666666666666444, 0.001499999999999978, 0.0013333333333333112, 0.0023333333333333114, 0.001499999999999978, 0.0046666666666666445, 0.016333333333333314, 0.030666666666666648, 0.03366666666666664, 0.036999999999999984, 0.048499999999999974, 0.048499999999999974, 0.04933333333333331, 0.048499999999999974, 0.049499999999999975, 0.04733333333333332, 0.03716666666666664, 0.02283333333333331, 0.019833333333333307, 0.016333333333333307, 0.0046666666666666445, 0.0046666666666666445, 0.0038333333333333114, 0.0043333333333333115, 0.003333333333333312, 0.0024999999999999784, 0.000999999999999978, 0.001999999999999978, 0.0029999999999999775, 0.004833333333333312, 0.004999999999999978, 0.004999999999999978, 0.004999999999999978, 0.004833333333333312, 0.01583333333333331, 0.03199999999999999, 0.04883333333333332, 0.06166666666666665, 0.07716666666666665, 0.09266666666666665, 0.10983333333333331, 0.12599999999999997, 0.13199999999999998, 0.1315, 0.12049999999999998, 0.10416666666666666, 0.08716666666666664, 0.07366666666666664, 0.05749999999999998, 0.04033333333333332, 0.023166666666666648, 0.007166666666666648, 0.0011666666666666472, 0.0011666666666666472, 0.0014999999999999807, 0.0019999999999999805, 0.002999999999999981, 0.002833333333333314, 0.0024999999999999805, 0.002333333333333314, 0.0021666666666666475, 0.0019999999999999805, 0.0019999999999999805, 0.0021666666666666475, 0.0024999999999999805, 0.005333333333333314, 0.01599999999999998, 0.02416666666666665, 0.02416666666666665, 0.027666666666666645, 0.04216666666666665, 0.05816666666666666, 0.06449999999999997, 0.06466666666666665, 0.06399999999999999, 0.06116666666666666, 0.05033333333333332, 0.04233333333333331, 0.043333333333333314, 0.04016666666666665, 0.025999999999999985, 0.010166666666666647, 0.003999999999999981, 0.003666666666666647, 0.003666666666666647, 0.0031666666666666475, 0.002333333333333314, 0.0024999999999999805, 0.001833333333333314, 0.0014999999999999807, 0.0013333333333333142, 0.0011666666666666475, 0.0011666666666666475, 0.001833333333333314, 0.01349999999999998, 0.03116666666666664, 0.049999999999999975, 0.06933333333333332, 0.08966666666666664, 0.11133333333333331, 0.13366666666666666, 0.15333333333333332, 0.15416666666666665, 0.1535, 0.14199999999999996, 0.12433333333333332, 0.10599999999999998, 0.08716666666666666, 0.06666666666666664, 0.04516666666666665, 0.022666666666666647, 0.0031666666666666475, 0.002666666666666647, 0.0028333333333333136, 0.0031666666666666475, 0.003999999999999981, 0.0051666666666666475, 0.0061666666666666484, 0.007999999999999981, 0.010333333333333314, 0.01316666666666665, 0.01599999999999998, 0.018499999999999982, 0.021833333333333316, 0.02366666666666665, 0.02649999999999998, 0.036999999999999984, 0.05249999999999998, 0.06849999999999998, 0.0818333333333333, 0.09499999999999997, 0.10233333333333332, 0.10983333333333331, 0.11733333333333332, 0.1268333333333333, 0.13516666666666666, 0.13166666666666665, 0.12216666666666665, 0.11316666666666667, 0.10683333333333331, 0.10066666666666664, 0.0998333333333333, 0.09933333333333332, 0.09416666666666663, 0.0833333333333333, 0.07216666666666664, 0.06449999999999997, 0.05716666666666664, 0.04916666666666664, 0.041499999999999974, 0.03433333333333331, 0.028166666666666652, 0.022166666666666647, 0.02116666666666665, 0.03483333333333331, 0.054333333333333324, 0.07433333333333332, 0.09549999999999999, 0.11683333333333332, 0.13799999999999998, 0.15916666666666665, 0.18016666666666664, 0.20066666666666663, 0.2208333333333333, 0.23166666666666663, 0.2373333333333333, 0.24333333333333332, 0.2485, 0.2538333333333333, 0.25883333333333336, 0.26316666666666666, 0.2678333333333333, 0.27233333333333337, 0.2768333333333334, 0.2816666666666666, 0.2866666666666667, 0.2906666666666667, 0.29533333333333334, 0.29966666666666664, 0.304, 0.30883333333333335, 0.313, 0.31799999999999995, 0.3225, 0.32666666666666666, 0.33066666666666666, 0.3355, 0.3398333333333333, 0.3448333333333333, 0.3496666666666667, 0.35466666666666663, 0.3598333333333333, 0.3648333333333333, 0.36983333333333335, 0.3748333333333333, 0.3798333333333333, 0.38416666666666666, 0.3891666666666666, 0.39316666666666666, 0.39799999999999996, 0.40199999999999997, 0.4065, 0.4105, 0.41516666666666663, 0.41966666666666663, 0.42416666666666664, 0.429, 0.4331666666666666, 0.4381666666666666, 0.4421666666666667, 0.44716666666666666, 0.45133333333333325, 0.45549999999999996, 0.45983333333333326, 0.46483333333333327, 0.4703333333333333, 0.4753333333333333, 0.4808333333333333, 0.48583333333333323, 0.4916666666666666, 0.4966666666666667, 0.5026666666666666, 0.5085, 0.514, 0.519, 0.5235000000000001, 0.5285, 0.5336666666666667, 0.5388333333333334, 0.544, 0.549, 0.5538333333333333, 0.5588333333333333, 0.5638333333333334, 0.5688333333333333, 0.5736666666666667, 0.5786666666666667, 0.583, 0.5878333333333332, 0.592, 0.597, 0.6014999999999999, 0.6064999999999999, 0.6116666666666666, 0.6166666666666666, 0.6221666666666665, 0.6278333333333334, 0.6345, 0.6405, 0.6473333333333333, 0.6538333333333333, 0.6605000000000001, 0.6673333333333333, 0.6738333333333333, 0.6808333333333333, 0.6871666666666667, 0.6935, 0.6986666666666667, 0.7045, 0.7098333333333333, 0.7153333333333333, 0.7203333333333333, 0.7254999999999999, 0.7303333333333333, 0.7348333333333332, 0.7396666666666667, 0.7443333333333333, 0.7493333333333333, 0.754, 0.7586666666666666, 0.7633333333333333, 0.7683333333333333, 0.7723333333333333, 0.7773333333333332, 0.7831666666666666, 0.7901666666666667, 0.7971666666666667, 0.8046666666666666, 0.8126666666666666, 0.8211666666666666, 0.8300000000000001, 0.8391666666666667, 0.8491666666666667, 0.8553333333333333, 0.8445, 0.8328333333333333, 0.8203333333333334, 0.8031666666666666, 0.7828333333333333, 0.7623333333333333, 0.7411666666666666, 0.72, 0.6975, 0.6758333333333334, 0.6681666666666667, 0.6601666666666668, 0.6515000000000001, 0.6465, 0.6436666666666666, 0.641, 0.639, 0.6366666666666667, 0.6348333333333332, 0.6358333333333334, 0.6393333333333333, 0.6453333333333333, 0.6568333333333334, 0.6713333333333333, 0.6859999999999999, 0.6990000000000001, 0.7101666666666666, 0.7215, 0.7331666666666667, 0.7446666666666666, 0.7545, 0.7621666666666667, 0.7653333333333333, 0.7665, 0.7675, 0.7701666666666667, 0.7741666666666667, 0.7773333333333333, 0.7808333333333334, 0.7838333333333334, 0.7886666666666666, 0.7916666666666666, 0.7945, 0.7968333333333333, 0.7979999999999999, 0.7989999999999999, 0.7994999999999999, 0.7998333333333333, 0.8003333333333332, 0.8003333333333332, 0.7976666666666665, 0.7946666666666666, 0.7916666666666666, 0.789, 0.7868333333333333, 0.784, 0.7815000000000001, 0.78, 0.778, 0.7766666666666667, 0.7773333333333334, 0.7803333333333333, 0.7833333333333334, 0.7846666666666666, 0.7875, 0.7898333333333334, 0.7916666666666667, 0.7936666666666666, 0.7956666666666667, 0.7969999999999999, 0.7983333333333332, 0.7983333333333332, 0.7985, 0.8, 0.8008333333333333, 0.8019999999999999, 0.8026666666666665, 0.8026666666666668, 0.8023333333333333, 0.8023333333333333, 0.8019999999999999, 0.8019999999999999, 0.8021666666666667, 0.8016666666666665, 0.8019999999999999, 0.8018333333333333, 0.8008333333333333, 0.7998333333333333, 0.7993333333333333, 0.7993333333333333, 0.7999999999999999, 0.8006666666666666, 0.8013333333333333, 0.8029999999999999, 0.8039999999999999, 0.8058333333333334, 0.8098333333333333, 0.8130000000000001, 0.8161666666666667, 0.8191666666666666, 0.8221666666666666, 0.8244999999999999, 0.8275, 0.8291666666666666, 0.8306666666666667, 0.8316666666666667, 0.8318333333333333, 0.8333333333333333, 0.8346666666666666, 0.8356666666666666, 0.8346666666666668, 0.8331666666666667, 0.8311666666666667, 0.8296666666666667, 0.8276666666666666, 0.8236666666666667, 0.8195, 0.8151666666666667, 0.8101666666666667, 0.806, 0.8023333333333333, 0.8006666666666666, 0.7986666666666666, 0.7969999999999999, 0.796, 0.7976666666666666, 0.7996666666666667, 0.8013333333333333, 0.8030000000000002, 0.8036666666666668, 0.8056666666666669, 0.8058333333333334, 0.8063333333333335, 0.8071666666666667, 0.8066666666666669, 0.8046666666666666, 0.8023333333333333, 0.7986666666666667, 0.795, 0.7908333333333333, 0.7855000000000001, 0.7801666666666667, 0.7769999999999999, 0.7773333333333333, 0.7791666666666666, 0.782, 0.7831666666666667, 0.7849999999999999, 0.7869999999999999, 0.7896666666666666, 0.7929999999999999, 0.7973333333333332, 0.7993333333333333, 0.7969999999999999, 0.7941666666666667, 0.791, 0.7896666666666666, 0.7895, 0.7895, 0.7895, 0.7889999999999999, 0.7885, 0.7881666666666667, 0.7881666666666667, 0.7881666666666667, 0.7885, 0.7895, 0.7895, 0.7886666666666666, 0.7888333333333334, 0.791, 0.7938333333333333, 0.7968333333333334, 0.7996666666666666, 0.8016666666666667, 0.8015000000000001, 0.7998333333333333, 0.7988333333333333, 0.7968333333333334, 0.7936666666666666, 0.7898333333333334, 0.7849999999999999, 0.7793333333333334, 0.7753333333333334, 0.7723333333333333, 0.7716666666666667, 0.7723333333333333, 0.7728333333333334, 0.776, 0.7791666666666667, 0.7823333333333333, 0.7868333333333333, 0.7915000000000001, 0.7941666666666667, 0.7966666666666666, 0.799, 0.8004999999999999, 0.8019999999999999, 0.8021666666666667, 0.8020000000000002, 0.8009999999999999, 0.7983333333333333, 0.7965, 0.7953333333333333, 0.7938333333333333, 0.7925, 0.7891666666666667, 0.7658333333333334, 0.7391666666666666, 0.7141666666666666, 0.6901666666666666, 0.6678333333333333, 0.6456666666666667, 0.6243333333333333, 0.6041666666666667, 0.5848333333333333, 0.569, 0.5741666666666667, 0.5841666666666666, 0.594, 0.6036666666666666, 0.6133333333333333, 0.6223333333333333, 0.6301666666666665, 0.6373333333333333, 0.6445, 0.6508333333333333, 0.657, 0.6626666666666667, 0.6675, 0.6763333333333333, 0.6876666666666666, 0.6986666666666668, 0.7103333333333333, 0.7201666666666667, 0.728, 0.7348333333333332, 0.7408333333333333, 0.7464999999999999, 0.7528333333333334, 0.7543333333333333, 0.7526666666666667, 0.7516666666666667, 0.7501666666666666, 0.7505, 0.7526666666666666, 0.7556666666666667, 0.7586666666666666, 0.7619999999999999, 0.7645, 0.7674999999999998, 0.7703333333333333, 0.7726666666666666, 0.7751666666666667, 0.778, 0.78, 0.782, 0.7848333333333334, 0.787, 0.7895, 0.7918333333333333, 0.7941666666666667, 0.7968333333333333, 0.7993333333333335, 0.8008333333333333, 0.8019999999999999, 0.8018333333333333, 0.8001666666666667, 0.7985, 0.7971666666666667, 0.7958333333333334, 0.7938333333333334, 0.7918333333333334, 0.7903333333333333, 0.7886666666666667, 0.7875000000000001, 0.788, 0.7898333333333334, 0.792, 0.7943333333333333, 0.7963333333333333, 0.7986666666666666, 0.8006666666666666, 0.8023333333333333, 0.8035, 0.8051666666666668, 0.8068333333333333, 0.8086666666666666, 0.8100000000000002, 0.8101666666666667, 0.8105, 0.8103333333333333, 0.8095000000000001, 0.8089999999999999, 0.8086666666666666, 0.8065, 0.8045, 0.8023333333333333, 0.8008333333333333, 0.8001666666666667, 0.7991666666666666, 0.7995, 0.7995, 0.7993333333333333, 0.8006666666666666, 0.8041666666666666, 0.8071666666666666, 0.8094999999999999, 0.8116666666666668, 0.8135, 0.8161666666666667, 0.8185, 0.8223333333333332, 0.826, 0.8296666666666667, 0.8324999999999999, 0.8348333333333333, 0.8378333333333332, 0.8406666666666667, 0.8433333333333334, 0.8453333333333333, 0.8458333333333332, 0.8454999999999998, 0.8444999999999998, 0.8431666666666666, 0.8416666666666666, 0.8386666666666667, 0.8348333333333333, 0.8316666666666667, 0.828, 0.8240000000000001, 0.8216666666666667, 0.8200000000000001, 0.8188333333333333, 0.8181666666666667, 0.8173333333333334, 0.819, 0.8208333333333332, 0.8219999999999998, 0.8236666666666667, 0.8256666666666665, 0.8271666666666666, 0.8284999999999998, 0.8296666666666667, 0.8303333333333333, 0.8313333333333333, 0.8313333333333333, 0.8303333333333333, 0.8286666666666667, 0.8258333333333333, 0.8213333333333332, 0.8008333333333333, 0.7588333333333332, 0.7166666666666666, 0.6768333333333333, 0.6398333333333334, 0.6054999999999999, 0.5733333333333333, 0.5429999999999999, 0.5151666666666668, 0.49016666666666664, 0.48216666666666663, 0.4965, 0.5123333333333333, 0.5266666666666666, 0.5389999999999999, 0.5501666666666667, 0.5611666666666667, 0.5718333333333333, 0.5821666666666666, 0.5921666666666666, 0.6023333333333333, 0.6121666666666666, 0.6216666666666666, 0.631, 0.6401666666666668, 0.6491666666666667, 0.6581666666666667, 0.6671666666666667, 0.6759999999999999, 0.6848333333333334, 0.6936666666666665, 0.7031666666666666, 0.7129999999999999, 0.7228333333333332, 0.7328333333333333, 0.7428333333333332, 0.7529999999999999, 0.7629999999999999, 0.7723333333333333, 0.7803333333333333, 0.787, 0.792, 0.7958333333333333, 0.7988333333333333, 0.7995, 0.7996666666666667, 0.7983333333333332, 0.7963333333333333, 0.7945, 0.7929999999999999, 0.7918333333333333, 0.7896666666666666, 0.7875, 0.7858333333333334, 0.7861666666666667, 0.7861666666666667, 0.7865, 0.7868333333333334, 0.7868333333333334, 0.7873333333333333, 0.7875, 0.7896666666666666, 0.7918333333333333, 0.7939999999999999, 0.7946666666666666, 0.7946666666666666, 0.7939999999999999, 0.7935, 0.7935, 0.7931666666666667, 0.7923333333333333, 0.7908333333333333, 0.7895000000000001, 0.7871666666666668, 0.7865000000000001, 0.7865000000000001, 0.7865, 0.7868333333333333, 0.7868333333333333, 0.7868333333333333, 0.7873333333333334, 0.7878333333333334, 0.7883333333333333, 0.7891666666666667, 0.7881666666666667, 0.7861666666666667, 0.7836666666666667, 0.7801666666666667, 0.7758333333333333, 0.7698333333333334, 0.7646666666666666, 0.761, 0.7603333333333333, 0.7603333333333333, 0.7615000000000001, 0.7633333333333333, 0.7663333333333334, 0.7696666666666667, 0.773, 0.7781666666666667, 0.783, 0.7846666666666666, 0.7838333333333334, 0.783, 0.7828333333333334, 0.782, 0.7813333333333334, 0.7816666666666667, 0.7828333333333333, 0.7835000000000001, 0.784, 0.787, 0.7889999999999999, 0.7908333333333333, 0.7906666666666667, 0.7906666666666667, 0.7908333333333333, 0.7908333333333333, 0.7911666666666667, 0.7918333333333333, 0.7916666666666666, 0.7906666666666666, 0.7899999999999999, 0.7886666666666666, 0.7873333333333334, 0.7863333333333333, 0.7843333333333333, 0.7823333333333333, 0.7798333333333334, 0.7783333333333333, 0.7775, 0.7766666666666666, 0.7758095238095237, 0.7761428571428571, 0.7786428571428572, 0.7816428571428572, 0.7853095238095238, 0.7886428571428572, 0.7926428571428572, 0.7958095238095237, 0.7993095238095238, 0.8031428571428572, 0.807, 0.8103333333333333, 0.8128333333333334, 0.8154999999999999, 0.8185, 0.8211666666666666, 0.8234999999999999, 0.8263333333333331, 0.8291666666666666, 0.8321666666666665, 0.8343333333333331, 0.8366666666666666, 0.8393333333333333, 0.8393333333333333, 0.8389999999999999, 0.8383333333333333, 0.837, 0.8351666666666666, 0.8320000000000001, 0.829, 0.8268333333333333, 0.8231666666666666, 0.818, 0.8163333333333334, 0.8150000000000001, 0.8146666666666667, 0.8146666666666667, 0.8145, 0.8153333333333332, 0.8163333333333332, 0.817, 0.8186666666666665, 0.8211666666666666, 0.8221666666666666, 0.8238333333333333, 0.8238333333333333, 0.8238333333333333, 0.8246666666666667, 0.8264999999999999, 0.8281666666666666, 0.8296666666666667, 0.8303333333333333, 0.8285, 0.8264999999999999, 0.8233333333333333, 0.8206666666666667, 0.8178333333333333, 0.8141666666666666, 0.8096666666666665, 0.8031666666666666, 0.7948333333333333, 0.7753333333333333, 0.7591666666666667, 0.745, 0.7326666666666666, 0.7218333333333333, 0.7123333333333333, 0.7040000000000001, 0.6965, 0.6900000000000001, 0.6848333333333333, 0.692, 0.6984999999999999, 0.7026666666666667, 0.7048333333333333, 0.7048333333333333, 0.7045, 0.7028333333333332, 0.7024999999999999, 0.7073333333333334, 0.7163333333333333, 0.7251666666666666, 0.7348333333333332, 0.7443333333333333, 0.7535000000000001, 0.7638333333333334, 0.7733333333333333, 0.7826666666666666, 0.7893333333333333, 0.791, 0.7891666666666667, 0.7870000000000001, 0.7845000000000001, 0.7835000000000001, 0.7833333333333334, 0.7835000000000001, 0.7845, 0.786, 0.7878333333333334, 0.7903333333333334, 0.7935, 0.7968333333333334, 0.7996666666666667, 0.8023333333333333, 0.8048333333333334, 0.8068333333333333, 0.8083333333333333, 0.8108333333333334, 0.8136666666666666, 0.8160000000000001, 0.8180000000000002, 0.8181666666666667, 0.8164999999999999, 0.8115, 0.8073333333333335, 0.8041666666666666, 0.8016666666666665, 0.7981666666666667, 0.7953333333333333, 0.7923333333333333, 0.7891666666666667, 0.7875, 0.7881666666666667, 0.7928333333333333, 0.7965, 0.7993809523809523, 0.7995476190476191, 0.800047619047619, 0.800047619047619, 0.8008809523809524, 0.8018809523809522, 0.8027142857142857, 0.8037142857142857, 0.8040476190476191, 0.8043809523809523, 0.8045, 0.8065, 0.8085000000000001, 0.8103333333333333, 0.8125, 0.8158333333333333, 0.8201666666666668, 0.8228333333333333, 0.8236666666666667, 0.8246666666666667, 0.8261666666666667, 0.8274999999999999, 0.8283333333333331, 0.829, 0.8278333333333332, 0.8258333333333333, 0.8218333333333334, 0.8191666666666666, 0.8181666666666665, 0.8171666666666667, 0.8163333333333334, 0.8154999999999999, 0.8141666666666667, 0.8133333333333332, 0.8106666666666668, 0.8085000000000001, 0.8086666666666668, 0.8088333333333333, 0.8098333333333334, 0.8116666666666668, 0.8131666666666668, 0.8148333333333333, 0.8166666666666667, 0.8176666666666665, 0.82, 0.8219999999999998, 0.8228333333333333, 0.8211666666666666, 0.8171666666666667, 0.8115, 0.8055, 0.7975, 0.7895000000000001, 0.7818333333333334, 0.7746666666666667, 0.7666666666666667, 0.7583333333333334, 0.7516666666666667, 0.7460000000000001, 0.741, 0.7361666666666666, 0.733, 0.7303333333333334, 0.7283333333333333, 0.7276666666666667, 0.7278333333333333, 0.7278333333333333, 0.728, 0.7296666666666667, 0.732, 0.7348333333333332, 0.7388333333333333, 0.7431666666666666, 0.7475, 0.7548333333333332, 0.7648333333333334, 0.7761666666666667, 0.7869999999999999, 0.7939999999999999, 0.7998333333333333, 0.8058333333333334, 0.8115, 0.8166666666666667, 0.8211666666666666, 0.8216666666666667, 0.8193333333333334, 0.8163333333333334, 0.8130000000000001, 0.8131666666666668, 0.8148333333333333, 0.8166666666666668, 0.8186666666666665, 0.8208333333333334, 0.8220000000000001, 0.8243333333333333, 0.827, 0.8296666666666667, 0.8338333333333333, 0.8376666666666666, 0.8414999999999999, 0.8448333333333332, 0.8476666666666667, 0.8501666666666667, 0.8521666666666666, 0.8533333333333333, 0.8536666666666667, 0.853, 0.8495000000000001, 0.8451666666666666, 0.8403333333333333, 0.8351666666666666, 0.8296666666666667, 0.8243333333333334, 0.8201666666666668, 0.817, 0.8128333333333334, 0.8101666666666667, 0.8113333333333334, 0.8140000000000001, 0.8158333333333333, 0.8178333333333333, 0.8186666666666668, 0.8196666666666665, 0.8188333333333333, 0.8176666666666665, 0.818, 0.8183333333333334, 0.8168333333333333, 0.8141666666666666, 0.8121666666666666, 0.8103333333333333, 0.8095000000000001, 0.8098333333333333, 0.8130000000000001, 0.8171666666666667, 0.8218333333333334, 0.8258333333333333, 0.8298333333333334, 0.8318333333333333, 0.833, 0.8334999999999999, 0.8351666666666666, 0.8348333333333333, 0.8318333333333333, 0.8125, 0.7793333333333333, 0.7476666666666667, 0.7181666666666666, 0.692, 0.6679999999999999, 0.6458333333333334, 0.6238333333333334, 0.6038333333333332, 0.587, 0.5861666666666666, 0.5993333333333333, 0.6126666666666667, 0.6256666666666666, 0.6385, 0.6508333333333333, 0.6624999999999999, 0.6741666666666666, 0.691, 0.7106666666666667, 0.7291666666666666, 0.7471666666666665, 0.7631666666666665, 0.7769999999999999, 0.7896666666666665, 0.8023333333333333, 0.8151666666666667, 0.827, 0.8326666666666667, 0.8346666666666666, 0.8371666666666666, 0.8394999999999999, 0.8416666666666666, 0.845, 0.8484999999999999, 0.8511666666666666, 0.8531666666666666, 0.8550000000000001, 0.8564999999999999, 0.858, 0.8583333333333334, 0.8581666666666667, 0.8581666666666667, 0.8586666666666668, 0.8593333333333334, 0.8596666666666668, 0.8586666666666666, 0.8583333333333334, 0.8588333333333333, 0.8588333333333333, 0.8581666666666667, 0.8566666666666667, 0.8564999999999999, 0.8554999999999999, 0.8525, 0.8478333333333333, 0.8445, 0.8415000000000001, 0.8385, 0.836, 0.8354999999999999, 0.8356666666666666, 0.8355, 0.8356666666666666, 0.8376666666666666, 0.8421666666666667, 0.8461666666666666, 0.849, 0.8511666666666666, 0.8531666666666666, 0.8543333333333333, 0.8545, 0.8546666666666667, 0.8549999999999999, 0.8554999999999999, 0.8556666666666667, 0.8561666666666667, 0.8558333333333333, 0.8591666666666666, 0.8634999999999999, 0.8681666666666666, 0.8736666666666666, 0.8785000000000001, 0.881, 0.882, 0.8825, 0.8828333333333334, 0.884, 0.8815, 0.8778333333333332, 0.8734999999999999, 0.8703333333333333, 0.8671666666666666, 0.865, 0.8646666666666667, 0.8651666666666668, 0.8663333333333332, 0.8678333333333332, 0.8691666666666666, 0.8691666666666666, 0.8698333333333332, 0.8699999999999999, 0.8703333333333333, 0.8714999999999999, 0.8723333333333333, 0.8731666666666668, 0.873, 0.8726666666666667, 0.8724999999999999, 0.8744999999999999, 0.8746666666666666, 0.8714999999999999, 0.8674999999999999, 0.8625, 0.86, 0.8579999999999999, 0.8564999999999999, 0.8558333333333332, 0.8545, 0.8528333333333332, 0.8536666666666666, 0.8574999999999999, 0.8623333333333333, 0.8683333333333334, 0.8710000000000001, 0.8731666666666668, 0.8751666666666666, 0.8763333333333334, 0.8778333333333332, 0.8788333333333334, 0.8788333333333334, 0.8785000000000001, 0.8779999999999999, 0.877, 0.8773333333333333, 0.8775000000000001, 0.8774999999999998, 0.877, 0.8761666666666666, 0.8738333333333334, 0.8720000000000001, 0.8705, 0.8691666666666666, 0.868, 0.8668333333333333, 0.866, 0.8658333333333333, 0.866, 0.8643333333333334, 0.8648333333333333, 0.8656666666666666, 0.8658333333333333, 0.8658333333333333, 0.8656666666666666, 0.8638333333333333, 0.8611666666666666, 0.8578333333333333, 0.8549999999999999, 0.8546666666666667, 0.8521666666666665, 0.8491666666666667, 0.8466666666666665, 0.844, 0.8411666666666665, 0.8391666666666666, 0.8373333333333333, 0.8344999999999999, 0.8311666666666666, 0.8284999999999998, 0.8283333333333334, 0.8283333333333334, 0.8286666666666667, 0.828, 0.8273333333333334, 0.8241666666666667, 0.8203333333333334, 0.8173333333333334, 0.8145, 0.8099999999999999, 0.8046666666666666, 0.7991666666666667, 0.7933333333333333, 0.7893333333333333, 0.7861666666666667, 0.786, 0.7861666666666667, 0.7856666666666666, 0.7836666666666666, 0.7815, 0.7778333333333334, 0.7718333333333333, 0.7663333333333332, 0.7593333333333333, 0.7523333333333333, 0.7449999999999999, 0.7378333333333333, 0.7313333333333333, 0.7256666666666666, 0.7249999999999999, 0.7268333333333332, 0.7313333333333333, 0.7361666666666665, 0.7408333333333333, 0.7438333333333332, 0.7458333333333332, 0.7468333333333332, 0.7491666666666666, 0.7525, 0.7525, 0.7515, 0.7505, 0.75, 0.7506666666666666, 0.7526666666666666, 0.7556666666666666, 0.759, 0.7609999999999999, 0.7633333333333333, 0.7653333333333332, 0.7674999999999998, 0.7691666666666667, 0.7685, 0.7674999999999998, 0.7673333333333333, 0.7673333333333332, 0.7685, 0.7705, 0.7718333333333334, 0.7738333333333334, 0.7741666666666667, 0.7751666666666666, 0.7778333333333334, 0.7808333333333333, 0.7805000000000001, 0.7796666666666667, 0.7781666666666667, 0.7756666666666667, 0.7726666666666666, 0.7686666666666666, 0.7668333333333333, 0.7641666666666665, 0.7616666666666665, 0.7591666666666665, 0.7566666666666666, 0.7551666666666665, 0.7541666666666667, 0.7536666666666666, 0.7541666666666667, 0.7551666666666665, 0.7551666666666667, 0.7555, 0.756, 0.7565, 0.7595, 0.7626666666666666, 0.7656666666666666, 0.7686666666666666, 0.771, 0.7713333333333333, 0.7703333333333333, 0.7696666666666666, 0.7676666666666667, 0.7653333333333333, 0.7618333333333334, 0.7550000000000001, 0.7471666666666666, 0.7401666666666666, 0.7333333333333333, 0.728, 0.7249999999999999, 0.7213333333333333, 0.7183333333333333, 0.7166666666666666, 0.7161666666666665, 0.7181666666666666, 0.7216666666666666, 0.7246666666666666, 0.7276666666666667, 0.7306666666666667, 0.7326666666666667, 0.7346666666666668, 0.7376666666666667, 0.7390000000000001, 0.74, 0.7416666666666668, 0.7430000000000001, 0.7440000000000001, 0.7450000000000001, 0.7456666666666667, 0.7458333333333333, 0.7456666666666667, 0.7428333333333333, 0.7365, 0.7316666666666667, 0.7264999999999999, 0.7205, 0.7154999999999999, 0.7104999999999999, 0.7064999999999999, 0.7033333333333334, 0.7005, 0.6993333333333334, 0.7031666666666665, 0.7059999999999998, 0.7085, 0.7076666666666667, 0.7058333333333333, 0.6998333333333332, 0.6911666666666666, 0.6829999999999999, 0.675, 0.667, 0.6571666666666666, 0.6461666666666666, 0.6351666666666667, 0.6283333333333333, 0.6221666666666666, 0.6201666666666668, 0.621, 0.6218333333333332, 0.623, 0.625, 0.6286666666666666, 0.6336666666666667, 0.6396666666666666, 0.6458333333333334, 0.6518333333333334, 0.6585, 0.6646666666666666, 0.67, 0.6755000000000001, 0.6763333333333333, 0.6756666666666666, 0.6756666666666666, 0.6749999999999999, 0.6744999999999999, 0.6735, 0.672, 0.671, 0.6666666666666667, 0.6609999999999999, 0.6591666666666667, 0.6561666666666667, 0.6495, 0.6403333333333333, 0.6275, 0.6144999999999999, 0.6018333333333332, 0.5888333333333333, 0.5806666666666667, 0.5778333333333332, 0.5778333333333333, 0.5796666666666667, 0.5833333333333333, 0.5881666666666666, 0.5911666666666666, 0.5923333333333333, 0.5784999999999999, 0.5595, 0.5368333333333333, 0.5063333333333333, 0.4723333333333334, 0.4385, 0.40599999999999997, 0.37499999999999994, 0.34550000000000003, 0.3103333333333333, 0.28966666666666663, 0.27449999999999997, 0.257, 0.23916666666666667, 0.22016666666666662, 0.20016666666666666, 0.17216666666666663, 0.14266666666666666, 0.11699999999999999, 0.0998333333333333, 0.08066666666666664, 0.07116666666666664, 0.07066666666666664, 0.06966666666666665, 0.06866666666666665, 0.07316666666666664, 0.08866666666666664, 0.09849999999999998, 0.10716666666666666, 0.10766666666666665, 0.10766666666666665, 0.09633333333333331, 0.08116666666666664, 0.0693333333333333, 0.059666666666666646, 0.04616666666666664, 0.02833333333333331, 0.017166666666666643, 0.0069999999999999785, 0.0039999999999999775, 0.003333333333333311, 0.0039999999999999775, 0.004166666666666645, 0.0043333333333333115, 0.004499999999999978, 0.0039999999999999775, 0.0039999999999999775, 0.003833333333333311, 0.003333333333333311, 0.0029999999999999775, 0.002833333333333311, 0.0021666666666666444, 0.004666666666666644, 0.01633333333333331, 0.02566666666666665, 0.028166666666666645, 0.03199999999999998, 0.046499999999999986, 0.059999999999999984, 0.06549999999999997, 0.06549999999999997, 0.06583333333333333, 0.06299999999999999, 0.05166666666666665, 0.042499999999999975, 0.03949999999999998, 0.035666666666666645, 0.021166666666666646, 0.007666666666666644, 0.0023333333333333114, 0.0029999999999999775, 0.003166666666666645, 0.0026666666666666445, 0.0023333333333333114, 0.0021666666666666444, 0.0021666666666666444, 0.001999999999999978, 0.0023333333333333114, 0.0028333333333333114, 0.0036666666666666445, 0.0036666666666666445, 0.013833333333333312, 0.02799999999999998, 0.030666666666666655, 0.030833333333333317, 0.035833333333333314, 0.046499999999999986, 0.046499999999999986, 0.045999999999999985, 0.045499999999999985, 0.045499999999999985, 0.035333333333333314, 0.021166666666666646, 0.018333333333333313, 0.017833333333333316, 0.01299999999999998, 0.002833333333333314, 0.0029999999999999805, 0.0029999999999999805, 0.002833333333333314, 0.0021666666666666475, 0.0016666666666666473, 0.0024999999999999805, 0.0029999999999999805, 0.0028333333333333136, 0.002666666666666647, 0.0021666666666666475, 0.0016666666666666473, 0.0016666666666666473, 0.0013333333333333138, 0.0013333333333333138, 0.0013333333333333138, 0.003666666666666647, 0.014833333333333315, 0.030833333333333317, 0.044333333333333315, 0.04949999999999998, 0.05849999999999998, 0.05899999999999998, 0.05899999999999998, 0.059333333333333314, 0.059333333333333314, 0.056166666666666656, 0.044499999999999984, 0.02849999999999998, 0.014999999999999986, 0.009833333333333317, 0.0015833333333333164, 0.0014166666666666499, 0.0015833333333333168, 0.0022499999999999834, 0.0032499999999999834, 0.00441666666666665, 0.005249999999999984, 0.006249999999999983, 0.007249999999999984, 0.009249999999999984, 0.010499999999999983, 0.012999999999999984, 0.014333333333333318, 0.013833333333333317, 0.024166666666666652, 0.04016666666666665, 0.057499999999999975, 0.07566666666666663, 0.0948333333333333, 0.1138333333333333, 0.1338333333333333, 0.15349999999999997, 0.16866666666666666, 0.16849999999999996, 0.15783333333333333, 0.1418333333333333, 0.12566666666666665, 0.10849999999999997, 0.09033333333333332, 0.07099999999999998, 0.050999999999999976, 0.030666666666666655, 0.014333333333333318, 0.014499999999999985, 0.014833333333333315, 0.01566666666666665, 0.015833333333333317, 0.015999999999999983, 0.01466666666666665, 0.013999999999999985, 0.011999999999999985, 0.009999999999999985, 0.010499999999999983, 0.010999999999999985, 0.011833333333333317, 0.01466666666666665, 0.025999999999999985, 0.04233333333333331, 0.06066666666666665, 0.07899999999999999, 0.09683333333333331, 0.10899999999999999, 0.10849999999999997, 0.10816666666666666, 0.10733333333333332, 0.10416666666666666, 0.09149999999999998, 0.07399999999999998, 0.05583333333333331, 0.03666666666666665, 0.019666666666666652, 0.00816666666666665, 0.008499999999999983, 0.008833333333333318, 0.00966666666666665, 0.009833333333333317, 0.009833333333333317, 0.010333333333333318, 0.01116666666666665, 0.012999999999999984, 0.015166666666666651, 0.01816666666666665, 0.02199999999999998, 0.02633333333333332, 0.04016666666666665, 0.05683333333333331, 0.07499999999999998, 0.09349999999999999, 0.11266666666666665, 0.13266666666666665, 0.15249999999999997, 0.17166666666666663, 0.19099999999999998, 0.2096666666666666, 0.219, 0.22566666666666663, 0.23266666666666666, 0.23900000000000002, 0.24516666666666667, 0.2505, 0.25566666666666665, 0.261, 0.26616666666666666, 0.27116666666666667, 0.27616666666666667, 0.281, 0.2855, 0.2901666666666667, 0.2941666666666667, 0.29833333333333334, 0.3023333333333333, 0.3063333333333333, 0.3103333333333333, 0.3143333333333333, 0.3178333333333333, 0.3218333333333333, 0.32616666666666666, 0.3305, 0.3355, 0.33999999999999997, 0.34500000000000003, 0.3496666666666667, 0.35433333333333333, 0.3591666666666667, 0.3636666666666667, 0.368, 0.37216666666666665, 0.37616666666666665, 0.38016666666666665, 0.38466666666666666, 0.3888333333333333, 0.39416666666666667, 0.3995, 0.4051666666666667, 0.4111666666666666, 0.4173333333333334, 0.42333333333333334, 0.4291666666666666, 0.43516666666666665, 0.44066666666666665, 0.4465, 0.4511666666666666, 0.45599999999999996, 0.46066666666666667, 0.466, 0.4715, 0.477, 0.48283333333333334, 0.4888333333333333, 0.49516666666666664, 0.5011666666666666, 0.5071666666666667, 0.5133333333333334, 0.5191666666666668, 0.5241666666666667, 0.5291666666666666, 0.5338333333333333, 0.5388333333333334, 0.5435000000000001, 0.5481666666666667, 0.5528333333333333, 0.5578333333333333, 0.5621666666666666, 0.5671666666666666, 0.5723333333333334, 0.5773333333333334, 0.5826666666666667, 0.5876666666666666, 0.593, 0.598, 0.6033333333333333, 0.6083333333333333, 0.6136666666666667, 0.6186666666666667, 0.6234999999999999, 0.6286666666666667, 0.6341666666666665, 0.6401666666666668, 0.6453333333333333, 0.6513333333333333, 0.6566666666666666, 0.6626666666666667, 0.6685000000000001, 0.6744999999999999, 0.681, 0.6868333333333333, 0.6928333333333334, 0.6983333333333333, 0.7041666666666666, 0.7095, 0.7151666666666667, 0.721, 0.7264999999999999, 0.7323333333333333, 0.7375, 0.743, 0.748, 0.753, 0.758, 0.7631666666666667, 0.7681666666666667, 0.7725, 0.7775000000000001, 0.7818333333333334, 0.7878333333333333, 0.7945, 0.8016666666666665, 0.8093333333333333, 0.8173333333333334, 0.826, 0.835, 0.8448333333333332, 0.8546666666666667, 0.8654999999999999, 0.8755000000000001, 0.882, 0.8808333333333334, 0.876, 0.8665, 0.8525, 0.8346666666666668, 0.8156666666666667, 0.7965, 0.7765000000000001, 0.7558333333333334, 0.7353333333333333, 0.7205, 0.7070000000000001, 0.6965, 0.687, 0.6818333333333333, 0.6778333333333333, 0.6741666666666667, 0.6711666666666667, 0.6688333333333334, 0.6716666666666666, 0.6808333333333334, 0.6926666666666667, 0.7048333333333334, 0.7181666666666666, 0.7293333333333333, 0.74, 0.7493333333333333, 0.7568333333333334, 0.764, 0.7689999999999999, 0.7693333333333333, 0.769, 0.7698333333333334, 0.7725, 0.7763333333333333, 0.7806666666666666, 0.7843333333333333, 0.7886666666666666, 0.7923333333333333, 0.7953333333333333, 0.7958333333333333, 0.796, 0.796, 0.7958333333333333, 0.7953333333333333, 0.7938333333333334, 0.7928333333333334, 0.7918333333333333, 0.7901666666666667, 0.7888333333333334, 0.789, 0.789, 0.7886666666666666, 0.788, 0.7876666666666667, 0.7881666666666668, 0.79, 0.7913333333333333, 0.7935000000000001, 0.795, 0.7969999999999999, 0.7986666666666666, 0.7999999999999999, 0.8008333333333333, 0.8003333333333332, 0.7991666666666666, 0.7981666666666667, 0.798, 0.7971666666666667, 0.7961666666666667, 0.7958333333333333, 0.7946666666666667, 0.7943333333333332, 0.7938333333333334, 0.7945, 0.7948333333333333, 0.7949999999999999, 0.7953333333333333, 0.7965, 0.7976666666666665, 0.7979999999999999, 0.7988333333333333, 0.7981666666666666, 0.7986666666666666, 0.7998333333333333, 0.8016666666666667, 0.8026666666666668, 0.8036666666666668, 0.8041666666666668, 0.8058333333333334, 0.8058333333333334, 0.8066666666666666, 0.8086666666666666, 0.8108333333333334, 0.8123333333333334, 0.8135, 0.8154999999999999, 0.8171666666666667, 0.819, 0.82, 0.8225, 0.8248333333333333, 0.8273333333333334, 0.8295, 0.8308333333333332, 0.8326666666666667, 0.8336666666666666, 0.8346666666666666, 0.8346666666666666, 0.8336666666666666, 0.833, 0.8321666666666665, 0.8305, 0.8263333333333334, 0.8221666666666666, 0.8181666666666667, 0.8141666666666667, 0.8101666666666667, 0.8071666666666667, 0.8051666666666666, 0.8025, 0.8003333333333332, 0.7994999999999999, 0.8001666666666667, 0.8009999999999999, 0.8015000000000001, 0.8008333333333333, 0.7998333333333333, 0.7978333333333333, 0.7951666666666666, 0.7921666666666667, 0.7876666666666667, 0.7816666666666667, 0.7755, 0.7685000000000001, 0.76, 0.752, 0.744, 0.7371666666666667, 0.7328333333333333, 0.7351666666666666, 0.743, 0.752, 0.7608333333333334, 0.7673333333333333, 0.7736666666666666, 0.7808333333333334, 0.7888333333333333, 0.7963333333333333, 0.8023333333333333, 0.8019999999999999, 0.7975000000000001, 0.7918333333333333, 0.7851666666666668, 0.7828333333333334, 0.7825, 0.7825000000000001, 0.7825, 0.781, 0.78, 0.78, 0.7801666666666667, 0.7809999999999999, 0.7845, 0.7873333333333333, 0.7895, 0.792, 0.7931666666666667, 0.7956666666666667, 0.7984166666666667, 0.7989166666666667, 0.7999166666666666, 0.8005833333333333, 0.7985833333333333, 0.7955833333333333, 0.7922499999999999, 0.78825, 0.7849166666666666, 0.7819166666666666, 0.7778333333333333, 0.7739999999999999, 0.7696666666666666, 0.7665, 0.7661666666666667, 0.7671666666666666, 0.7683333333333333, 0.7701666666666667, 0.773, 0.7766666666666666, 0.7808333333333334, 0.7866666666666667, 0.7916666666666667, 0.795, 0.7971666666666668, 0.7988333333333333, 0.8, 0.8005000000000001, 0.8004999999999999, 0.7985, 0.7966666666666666, 0.7951666666666666, 0.7931666666666667, 0.7925, 0.7929999999999999, 0.7925, 0.7913333333333333, 0.7898333333333334, 0.7876666666666667, 0.787, 0.785, 0.7825, 0.78, 0.7766666666666666, 0.7723333333333333, 0.7688333333333334, 0.7651666666666667, 0.7591666666666667, 0.7523333333333333, 0.7433333333333334, 0.7313333333333333, 0.7191666666666666, 0.709, 0.6998333333333333, 0.6911666666666667, 0.6835, 0.6765000000000001, 0.6716666666666666, 0.6688333333333334, 0.6735, 0.6836666666666666, 0.6948333333333333, 0.705, 0.7138333333333333, 0.72, 0.7246666666666667, 0.73, 0.7361666666666666, 0.7423333333333333, 0.7434999999999999, 0.7428333333333332, 0.7418333333333333, 0.7408333333333333, 0.7408333333333333, 0.7431666666666665, 0.7461666666666666, 0.7494999999999999, 0.7523333333333333, 0.7539999999999999, 0.7551666666666665, 0.7566666666666666, 0.7576666666666665, 0.7589999999999999, 0.7601666666666665, 0.7613333333333333, 0.7633333333333333, 0.7643333333333333, 0.7658333333333334, 0.7681666666666667, 0.7711666666666667, 0.7745, 0.7778333333333334, 0.7803333333333333, 0.7826666666666667, 0.7845, 0.7833333333333334, 0.7831666666666666, 0.7821666666666667, 0.7809999999999999, 0.7793333333333333, 0.7771666666666667, 0.7753333333333334, 0.7743333333333332, 0.7726666666666666, 0.7708333333333333, 0.7718333333333333, 0.7729999999999999, 0.7751666666666667, 0.7773333333333334, 0.7793333333333334, 0.7811666666666668, 0.783, 0.7845714285714285, 0.786904761904762, 0.7889047619047619, 0.7914047619047619, 0.7937380952380952, 0.7960714285714285, 0.7972380952380952, 0.7982380952380952, 0.7987380952380952, 0.7992380952380953, 0.7976666666666666, 0.7955, 0.7955, 0.7947, 0.7935333333333333, 0.7915333333333334, 0.7900333333333334, 0.7890333333333334, 0.7895333333333333, 0.7895333333333333, 0.7912, 0.7935333333333333, 0.7955333333333334, 0.7973333333333333, 0.7995, 0.8025, 0.8063333333333332, 0.8103333333333333, 0.8133333333333332, 0.8164999999999999, 0.82, 0.8240000000000001, 0.8273333333333334, 0.8303333333333333, 0.8321666666666667, 0.834, 0.8354999999999999, 0.8375, 0.8386666666666667, 0.8391666666666666, 0.8393333333333333, 0.8393333333333333, 0.8383333333333333, 0.8351666666666666, 0.8323333333333333, 0.8295, 0.8265, 0.8225000000000001, 0.8200000000000001, 0.8183333333333334, 0.8161666666666667, 0.8141666666666667, 0.8128333333333334, 0.8140000000000001, 0.8156666666666667, 0.817, 0.8186666666666668, 0.8201666666666666, 0.8215, 0.8226666666666667, 0.8244999999999999, 0.8258333333333333, 0.827, 0.8281666666666666, 0.8285, 0.8266666666666665, 0.8246666666666667, 0.8208333333333334, 0.8025, 0.772, 0.7426666666666667, 0.7148333333333333, 0.6891666666666667, 0.665, 0.643, 0.6238333333333334, 0.6056666666666667, 0.5906666666666667, 0.5906666666666667, 0.6041666666666667, 0.6176428571428572, 0.6304761904761904, 0.6424761904761904, 0.6539761904761905, 0.6649761904761904, 0.676142857142857, 0.6869761904761905, 0.6976428571428571, 0.7083095238095238, 0.7184761904761905, 0.7283333333333333, 0.7378333333333333, 0.7471666666666666, 0.7563333333333333, 0.7656666666666667, 0.7746666666666666, 0.7833333333333333, 0.7906666666666666, 0.7958333333333334, 0.7988333333333333, 0.8018333333333333, 0.8036666666666665, 0.8045, 0.8039999999999999, 0.8033333333333333, 0.8015000000000001, 0.7985, 0.7953333333333333, 0.7938333333333333, 0.791, 0.7863333333333332, 0.7826666666666666, 0.7798333333333333, 0.7779999999999999, 0.776, 0.775, 0.7751666666666666, 0.7761666666666667, 0.7763333333333333, 0.7791666666666667, 0.7838333333333334, 0.7878333333333333, 0.7903333333333333, 0.7928333333333334, 0.7938333333333334, 0.7946666666666666, 0.7946666666666666, 0.7953333333333332, 0.7961666666666666, 0.7973333333333332, 0.7983333333333333, 0.799, 0.8005000000000001, 0.8006666666666666, 0.8, 0.7995, 0.7998333333333333, 0.7986666666666667, 0.7983333333333335, 0.798, 0.7969999999999999, 0.7963333333333333, 0.7954999999999999, 0.7954999999999999, 0.7971666666666666, 0.7981666666666666, 0.7986666666666666, 0.7979999999999999, 0.7973333333333332, 0.7963333333333333, 0.7963333333333333, 0.7958333333333333, 0.7948333333333333, 0.7931666666666668, 0.7905000000000001, 0.7871666666666667, 0.7825, 0.779, 0.7738333333333334, 0.7705, 0.7670000000000001, 0.7646666666666667, 0.7636666666666667, 0.7628333333333333, 0.7635, 0.7643333333333333, 0.7670000000000001, 0.7701666666666667, 0.7743333333333333, 0.7763333333333334, 0.7778333333333334, 0.7786666666666667, 0.7785000000000001, 0.7790000000000001, 0.7793333333333334, 0.7798333333333334, 0.7801666666666668, 0.7801666666666668, 0.7801666666666668, 0.7806666666666667, 0.7786666666666667, 0.7776666666666667, 0.7771666666666667, 0.7771666666666667, 0.7775000000000001, 0.7783333333333333, 0.7783333333333334, 0.7785, 0.7783333333333334, 0.7778333333333334, 0.7788333333333334, 0.7796666666666667, 0.779, 0.778, 0.7761666666666667, 0.7743333333333333, 0.7731666666666667, 0.7711666666666667, 0.7703333333333333, 0.7698333333333334, 0.7708333333333334, 0.7696666666666667, 0.7691666666666667, 0.7691666666666667, 0.7695000000000001, 0.7705, 0.7716666666666667, 0.7745, 0.7768333333333333, 0.7796666666666667, 0.7816666666666666, 0.7855, 0.7903333333333333, 0.7943333333333333, 0.7978333333333334, 0.8008333333333335, 0.804, 0.8068333333333333, 0.8096666666666668, 0.8128333333333334, 0.8168333333333333, 0.8205, 0.8225, 0.8231666666666666, 0.8234999999999999, 0.8241666666666667, 0.8240000000000001, 0.8231666666666667, 0.8216666666666667, 0.8188333333333334, 0.8158333333333333, 0.8126666666666666, 0.8106666666666668, 0.8098333333333334, 0.8088333333333335, 0.8081666666666667, 0.8075000000000001, 0.8083333333333333, 0.8083333333333333, 0.8090000000000002, 0.8098333333333334, 0.8105, 0.8108333333333334, 0.8110000000000002, 0.8130000000000001, 0.8148333333333333, 0.8171666666666667, 0.8181666666666667, 0.8205, 0.8234999999999999, 0.8266666666666668, 0.8285, 0.8283333333333334, 0.8275, 0.8256666666666665, 0.8225, 0.8188333333333333, 0.8148333333333333, 0.8116666666666668, 0.8061666666666667, 0.7985000000000001, 0.7903333333333333, 0.7823333333333333, 0.7735, 0.7645833333333334, 0.75675, 0.7490833333333333, 0.7420833333333333, 0.7349166666666667, 0.7285833333333332, 0.72325, 0.7195833333333332, 0.7175833333333332, 0.7172499999999998, 0.7169999999999999, 0.7158333333333333, 0.7141666666666666, 0.7118333333333332, 0.7101666666666666, 0.7144999999999999, 0.7221666666666666, 0.7304999999999999, 0.74, 0.748, 0.7555, 0.7636666666666667, 0.7723333333333333, 0.7811666666666668, 0.7878333333333334, 0.7891666666666667, 0.7875000000000001, 0.7855000000000001, 0.7825, 0.7801666666666667, 0.7786666666666668, 0.7778333333333334, 0.7778333333333334, 0.7771666666666667, 0.7768333333333335, 0.777, 0.7774285714285715, 0.7782619047619048, 0.7777619047619048, 0.7790952380952382, 0.7805952380952381, 0.7817619047619048, 0.7832619047619048, 0.7877619047619048, 0.7927619047619048, 0.7974285714285714, 0.8015000000000001, 0.8039999999999999, 0.8063333333333332, 0.8073333333333335, 0.8078333333333333, 0.8078333333333333, 0.8071666666666666, 0.8045, 0.8016666666666665, 0.7983333333333332, 0.7953333333333333, 0.793, 0.7928333333333334, 0.7936666666666667, 0.795, 0.7961666666666667, 0.7969999999999999, 0.7985, 0.8005000000000001, 0.8031666666666666, 0.805, 0.808, 0.8105, 0.8121666666666666, 0.8140000000000001, 0.8163809523809524, 0.817047619047619, 0.8178809523809523, 0.8192142857142857, 0.8210476190476191, 0.8245714285714285, 0.8275714285714285, 0.8302380952380952, 0.8310714285714287, 0.8307380952380953, 0.8301904761904761, 0.8321904761904761, 0.8333571428571428, 0.8333571428571428, 0.8325238095238096, 0.8311666666666666, 0.8299999999999998, 0.8288333333333332, 0.8305, 0.8326666666666667, 0.8331666666666665, 0.8333333333333334, 0.8343333333333334, 0.8363333333333334, 0.8378333333333334, 0.8393333333333333, 0.8399999999999999, 0.8416666666666666, 0.8433333333333334, 0.8446666666666666, 0.8474999999999999, 0.8493333333333333, 0.8504999999999999, 0.8506666666666666, 0.8503333333333334, 0.8494999999999999, 0.8493333333333333, 0.8476666666666667, 0.8443333333333334, 0.8401666666666667, 0.8356666666666666, 0.8308333333333333, 0.8256666666666665, 0.8200000000000001, 0.8140000000000001, 0.807, 0.8003333333333333, 0.794, 0.788, 0.7838333333333334, 0.7798333333333334, 0.7765, 0.7738333333333334, 0.7725000000000001, 0.7708333333333333, 0.77, 0.7695, 0.7705, 0.7726666666666666, 0.7746666666666667, 0.7775, 0.7808333333333334, 0.7845000000000001, 0.789, 0.7983333333333335, 0.8103333333333333, 0.8213333333333332, 0.8318333333333333, 0.8396666666666667, 0.8460000000000001, 0.8508333333333333, 0.8560000000000001, 0.8608333333333332, 0.8634999999999999, 0.8619999999999999, 0.8581666666666667, 0.8546666666666667, 0.8508333333333333, 0.8488333333333333, 0.8469999999999999, 0.8465, 0.8461666666666666, 0.8458333333333332, 0.8461666666666666, 0.8474999999999999, 0.8483333333333333, 0.8496666666666666, 0.851, 0.852, 0.8545, 0.8573333333333333, 0.8603333333333334, 0.8641666666666665, 0.8666666666666666, 0.869, 0.8716666666666667, 0.8719999999999999, 0.8698333333333332, 0.8674999999999999, 0.8629999999999999, 0.8586666666666666, 0.8531666666666666, 0.8463333333333333, 0.8394999999999999, 0.8333333333333334, 0.8281666666666666, 0.8255000000000001, 0.8263333333333334, 0.8285, 0.8311666666666667, 0.833, 0.8345, 0.836, 0.837, 0.8376666666666666, 0.8373333333333333, 0.8358333333333332, 0.833, 0.8298333333333334, 0.8276666666666668, 0.826, 0.8251666666666667, 0.8251666666666667, 0.827, 0.8293333333333333, 0.8333333333333333, 0.8376666666666667, 0.8411666666666665, 0.8414999999999999, 0.8418333333333333, 0.841, 0.8398333333333333, 0.8371666666666666, 0.8343333333333334, 0.8300000000000001, 0.8226666666666667, 0.8136666666666666, 0.7956666666666667, 0.78, 0.7661666666666667, 0.7538333333333334, 0.7428333333333333, 0.7336666666666667, 0.7258333333333333, 0.7198333333333333, 0.7161666666666666, 0.7146666666666667, 0.7231666666666666, 0.7324999999999999, 0.74, 0.7464999999999999, 0.7525, 0.7603333333333333, 0.7559999999999999, 0.7481666666666666, 0.7405, 0.7341666666666666, 0.7306666666666667, 0.7285, 0.7275, 0.7271904761904762, 0.7270238095238095, 0.7248571428571429, 0.7346904761904762, 0.7475238095238096, 0.7598571428571429, 0.7711904761904762, 0.7808571428571429, 0.7896904761904762, 0.7980238095238096, 0.8063333333333335, 0.8148333333333333, 0.8230000000000001, 0.8308333333333333, 0.8385, 0.8463333333333333, 0.8526666666666667, 0.858, 0.8626666666666667, 0.8666666666666666, 0.8696666666666667, 0.8718333333333333, 0.8738333333333334, 0.8744999999999999, 0.874, 0.8726666666666667, 0.8714999999999999, 0.8708333333333333, 0.8683333333333334, 0.866, 0.8641666666666667, 0.8626666666666667, 0.8606666666666667, 0.8593333333333334, 0.8585, 0.8576666666666666, 0.857, 0.8556666666666667, 0.8568333333333333, 0.8578333333333333, 0.8588333333333333, 0.8585, 0.8588333333333333, 0.8593333333333334, 0.8601666666666666, 0.8613333333333333, 0.8628333333333333, 0.8646666666666667, 0.865, 0.866, 0.8671666666666666, 0.8693333333333333, 0.8734999999999999, 0.8788333333333334, 0.8841666666666667, 0.8896666666666666, 0.8944999999999999, 0.8976666666666666, 0.9011666666666667, 0.9041666666666668, 0.9068333333333334, 0.9091666666666667, 0.9083333333333334, 0.9061666666666666, 0.9033333333333333, 0.9008333333333335, 0.8988333333333335, 0.8983333333333334, 0.8985, 0.8993333333333332, 0.8996666666666668, 0.9003333333333334, 0.9016666666666667, 0.9030000000000001, 0.9048333333333334, 0.9055, 0.9068333333333334, 0.9086666666666666, 0.9106666666666667, 0.9118333333333334, 0.9133333333333333, 0.9145, 0.9151666666666667, 0.9153333333333332, 0.9111666666666667, 0.8995, 0.8881666666666665, 0.8785000000000001, 0.8720000000000001, 0.8658333333333333, 0.8591666666666666, 0.8528333333333332, 0.8463333333333333, 0.8408333333333333, 0.8400000000000001, 0.8469999999999999, 0.8531666666666666, 0.8571666666666667, 0.8576666666666666, 0.8575000000000002, 0.8581666666666667, 0.8585, 0.8593333333333334, 0.8593333333333334, 0.8588333333333333, 0.859, 0.8591666666666666, 0.8588333333333333, 0.8588333333333333, 0.86, 0.8601666666666666, 0.8605, 0.8606666666666667, 0.861, 0.8621666666666666, 0.8605, 0.8601666666666666, 0.8603333333333334, 0.86, 0.8598333333333332, 0.8606666666666667, 0.8613333333333333, 0.8618333333333335, 0.8621666666666666, 0.8623333333333333, 0.8640000000000001, 0.8644999999999999, 0.8625, 0.8465, 0.8156666666666667, 0.7851666666666667, 0.7561666666666667, 0.7289999999999999, 0.7033333333333334, 0.6786666666666666, 0.656, 0.6343333333333334, 0.6161666666666666, 0.6136666666666667, 0.626, 0.6386666666666667, 0.651, 0.6628333333333333, 0.6759166666666666, 0.69025, 0.7054166666666666, 0.7204166666666667, 0.7355833333333333, 0.7497499999999999, 0.7625833333333334, 0.7744166666666666, 0.7829166666666667, 0.79125, 0.7981666666666667, 0.8021666666666667, 0.8045, 0.8053333333333332, 0.8030000000000002, 0.7988333333333334, 0.795, 0.7895, 0.7846666666666666, 0.7779999999999999, 0.7699999999999999, 0.7628333333333334, 0.755, 0.7474999999999999, 0.744, 0.7428333333333332, 0.7421666666666666, 0.7436666666666667, 0.7461666666666666, 0.7499999999999999, 0.7543333333333333, 0.7581666666666667, 0.762, 0.7665, 0.7693333333333333, 0.7713333333333333, 0.7726666666666666, 0.7738333333333334, 0.7763333333333333, 0.7785000000000001, 0.7805000000000001, 0.7823333333333334, 0.7843333333333333, 0.7858333333333334, 0.786, 0.7859999999999999, 0.7861666666666667, 0.7868333333333334, 0.786, 0.7851666666666668, 0.7851666666666668, 0.7845000000000001, 0.7845000000000001, 0.7845000000000001, 0.7861666666666667, 0.7881666666666667, 0.7901666666666667, 0.7913333333333333, 0.7935000000000001, 0.7963333333333333, 0.7979999999999999, 0.7991666666666666, 0.7991666666666666, 0.7991666666666666, 0.7988333333333333, 0.7953333333333333, 0.7915, 0.7885000000000001, 0.7845, 0.7805000000000001, 0.7768333333333334, 0.7748333333333334, 0.7731666666666668, 0.7721666666666668, 0.7708333333333334, 0.7723333333333334, 0.7741666666666667, 0.7756666666666667, 0.7778333333333334, 0.7788333333333334, 0.7798333333333335, 0.7808333333333335, 0.7825000000000001, 0.7835000000000001, 0.7828333333333334, 0.7801666666666667, 0.7771666666666667, 0.7736666666666667, 0.7701666666666667, 0.7678333333333334, 0.7648333333333334, 0.7621666666666667, 0.7586666666666667, 0.7551666666666667, 0.7535000000000001, 0.7491666666666668, 0.7455, 0.7424999999999999, 0.7394999999999999, 0.7363333333333333, 0.7343333333333333, 0.7314999999999999, 0.7289999999999999, 0.7274999999999999, 0.7254999999999999, 0.7274999999999999, 0.7298333333333333, 0.7318333333333333, 0.7336666666666667, 0.7341666666666666, 0.7341666666666666, 0.7341666666666666, 0.7345, 0.7325, 0.7308333333333332, 0.7298333333333333, 0.7284999999999999, 0.7274999999999999, 0.7258333333333333, 0.7248333333333332, 0.7244999999999999, 0.7241666666666665, 0.7218333333333332, 0.7208333333333332, 0.72, 0.7188333333333332, 0.7174999999999999, 0.7143333333333333, 0.712, 0.709, 0.7063333333333333, 0.7036666666666667, 0.7026666666666667, 0.7016666666666667, 0.7005, 0.7005, 0.7005, 0.7010000000000001, 0.7011666666666667, 0.7023333333333334, 0.6996666666666667, 0.6973333333333334, 0.6933333333333334, 0.6903333333333335, 0.6873333333333334, 0.6841666666666668, 0.6811666666666667, 0.6788333333333334, 0.6768333333333334, 0.675, 0.6766666666666666, 0.6779999999999999, 0.6809999999999999, 0.683, 0.6849999999999999, 0.6871666666666666, 0.6891666666666667, 0.6911666666666666, 0.6859999999999999, 0.6703333333333333, 0.6393333333333333, 0.6083333333333333, 0.5743333333333334, 0.5348333333333333, 0.504, 0.47450000000000003, 0.4456666666666667, 0.4181666666666667, 0.39849999999999997, 0.3898333333333333, 0.39666666666666667, 0.4038333333333333, 0.41483333333333333, 0.43216666666666664, 0.4413333333333333, 0.449, 0.4565, 0.46333333333333326, 0.4701666666666666, 0.47683333333333333, 0.483, 0.4888333333333333, 0.49483333333333335, 0.5, 0.5046666666666667, 0.5086666666666666, 0.512, 0.509, 0.503, 0.4835, 0.4568333333333333, 0.41550000000000004, 0.3683333333333333, 0.3198333333333333, 0.26883333333333337, 0.2173333333333333, 0.16633333333333333, 0.12083333333333332, 0.07683333333333334, 0.046666666666666655, 0.023666666666666655, 0.015499999999999991, 0.010999999999999989, 0.008666666666666658, 0.01649999999999999, 0.03149999999999999, 0.04466666666666666, 0.057166666666666664, 0.059166666666666666, 0.058499999999999996, 0.057666666666666665, 0.05616666666666666, 0.05583333333333333, 0.05516666666666666, 0.047166666666666655, 0.03183333333333332, 0.017999999999999988, 0.004999999999999989, 0.0031666666666666557, 0.0031666666666666557, 0.0036666666666666558, 0.003999999999999989, 0.004499999999999989, 0.006333333333333321, 0.01849999999999999, 0.02433333333333332, 0.024166666666666656, 0.023833333333333324, 0.02383333333333332, 0.02383333333333332, 0.022999999999999986, 0.022666666666666654, 0.022499999999999985, 0.02033333333333332, 0.007666666666666655, 0.0019999999999999888, 0.0018333333333333222, 0.0018333333333333222, 0.0018333333333333222, 0.0016666666666666555, 0.004666666666666656, 0.016333333333333325, 0.02316666666666666, 0.02316666666666666, 0.026666666666666655, 0.036333333333333315, 0.036333333333333315, 0.03666666666666665, 0.03649999999999999, 0.03683333333333332, 0.03449999999999999, 0.022999999999999986, 0.016666666666666656, 0.016499999999999987, 0.012999999999999987, 0.002999999999999986, 0.0036666666666666527, 0.0033333333333333192, 0.0033333333333333192, 0.0033333333333333192, 0.002499999999999986, 0.0031666666666666523, 0.0023333333333333196, 0.0034999999999999858, 0.011666666666666653, 0.02633333333333332, 0.02566666666666666, 0.025833333333333326, 0.025833333333333326, 0.025499999999999988, 0.025666666666666654, 0.024833333333333325, 0.024999999999999988, 0.02383333333333332, 0.01533333333333332, 0.0006666666666666529, 0.0006666666666666529, 0.0006666666666666529, 0.0008333333333333194, 0.0011666666666666529, 0.0011666666666666529, 0.0014999999999999862, 0.0013333333333333194, 0.0033333333333333192, 0.016499999999999987, 0.029666666666666654, 0.030166666666666654, 0.030166666666666658, 0.02999999999999999, 0.030166666666666654, 0.030166666666666654, 0.029833333333333323, 0.029833333333333323, 0.02783333333333332, 0.014666666666666656, 0.0016666666666666527, 0.0013333333333333196, 0.001166666666666653, 0.001166666666666653, 0.0006666666666666529, 0.0038333333333333192, 0.015499999999999986, 0.026166666666666654, 0.026166666666666654, 0.029666666666666654, 0.040166666666666656, 0.03999999999999999, 0.04033333333333332, 0.04049999999999999, 0.04116666666666665, 0.03883333333333332, 0.027499999999999986, 0.01733333333333332, 0.01833333333333332, 0.014999999999999986, 0.004499999999999986, 0.004999999999999986, 0.004666666666666653, 0.005166666666666654, 0.005333333333333319, 0.005166666666666654, 0.00483333333333332, 0.004499999999999986, 0.004666666666666654, 0.012999999999999987, 0.02783333333333332, 0.03133333333333332, 0.031499999999999986, 0.030833333333333324, 0.030166666666666654, 0.029666666666666657, 0.029999999999999992, 0.02999999999999999, 0.028833333333333322, 0.021333333333333322, 0.007333333333333319, 0.004166666666666654, 0.003999999999999987, 0.004499999999999987, 0.00483333333333332, 0.005166666666666654, 0.005666666666666653, 0.005666666666666652, 0.007999999999999986, 0.020833333333333322, 0.03699999999999999, 0.054333333333333324, 0.07366666666666666, 0.09349999999999999, 0.11399999999999999, 0.1345, 0.15433333333333332, 0.17416666666666664, 0.1918333333333333, 0.19899999999999998, 0.1998333333333333, 0.19516666666666665, 0.18216666666666664, 0.16749999999999998, 0.1515, 0.1335, 0.12449999999999997, 0.11566666666666665, 0.10366666666666664, 0.09349999999999999, 0.09, 0.07866666666666666, 0.0735, 0.06983333333333332, 0.06599999999999999, 0.06349999999999999, 0.05316666666666666, 0.04399999999999999, 0.03899999999999999, 0.030833333333333324, 0.01933333333333332, 0.017966666666666655, 0.017466666666666658, 0.016966666666666654, 0.017633333333333324, 0.01779999999999999, 0.01779999999999999, 0.01779999999999999, 0.018133333333333324, 0.02629999999999999, 0.04146666666666665, 0.059666666666666646, 0.07899999999999999, 0.09633333333333331, 0.11366666666666665, 0.1328333333333333, 0.15199999999999997, 0.17099999999999999, 0.18949999999999997, 0.20066666666666663, 0.2053333333333333, 0.20933333333333332, 0.21333333333333332, 0.21866666666666665, 0.22433333333333333, 0.22966666666666669, 0.2341666666666667, 0.23900000000000002, 0.2435, 0.24916666666666668, 0.2543333333333333, 0.2598333333333333, 0.26483333333333337, 0.2698333333333333, 0.2748333333333334, 0.27949999999999997, 0.28500000000000003, 0.2901666666666667, 0.29516666666666663, 0.2995, 0.3035, 0.308, 0.312, 0.31649999999999995, 0.3205, 0.325, 0.32966666666666666, 0.33466666666666667, 0.3393333333333333, 0.344, 0.34900000000000003, 0.354, 0.359, 0.364, 0.369, 0.374, 0.3788333333333333, 0.3828333333333333, 0.3878333333333333, 0.39216666666666666, 0.39683333333333326, 0.40083333333333326, 0.4051666666666667, 0.4095, 0.4136666666666667, 0.4181666666666667, 0.42299999999999993, 0.42800000000000005, 0.4328333333333333, 0.4381666666666666, 0.44366666666666665, 0.4496666666666666, 0.45533333333333326, 0.4613333333333333, 0.4671666666666667, 0.4728333333333333, 0.4779999999999999, 0.4838333333333334, 0.4893333333333333, 0.495, 0.5001666666666666, 0.5051666666666667, 0.5103333333333333, 0.5153333333333333, 0.5203333333333333, 0.5253333333333334, 0.5303333333333333, 0.5353571428571429, 0.5401904761904762, 0.5451904761904762, 0.5506904761904762, 0.5556904761904762, 0.5613571428571429, 0.5663571428571428, 0.5718571428571428, 0.5771904761904761, 0.5825238095238096, 0.5876666666666667, 0.593, 0.598, 0.6026666666666667, 0.6076666666666666, 0.6125, 0.6175, 0.6226666666666667, 0.6278333333333334, 0.6335, 0.6395, 0.6449999999999999, 0.6508333333333334, 0.6568333333333334, 0.6628333333333334, 0.6688333333333334, 0.6748333333333333, 0.6811666666666667, 0.6873333333333334, 0.6933333333333334, 0.6986666666666667, 0.7046666666666667, 0.7103333333333333, 0.7158333333333333, 0.7213333333333333, 0.7266666666666667, 0.7325, 0.7375, 0.7428333333333332, 0.7478333333333332, 0.7533333333333333, 0.7585, 0.764, 0.77, 0.7761666666666667, 0.7821666666666667, 0.7878333333333333, 0.7938333333333334, 0.7998333333333333, 0.8058333333333334, 0.8118333333333332, 0.8176666666666665, 0.8236666666666667, 0.8286666666666667, 0.8335000000000001, 0.8385, 0.8436666666666668, 0.849, 0.8543333333333333, 0.8596666666666668, 0.8648333333333333, 0.8703333333333333, 0.8768333333333335, 0.8843333333333334, 0.8893333333333333, 0.889, 0.8831666666666667, 0.8734999999999999, 0.8601666666666666, 0.8378333333333334, 0.8131666666666666, 0.7891666666666667, 0.7638333333333334, 0.7385, 0.7155, 0.6978333333333333, 0.6841666666666667, 0.6733333333333335, 0.6671666666666667, 0.6758333333333334, 0.6906666666666667, 0.7056666666666668, 0.7201666666666667, 0.728, 0.7336666666666667, 0.7398333333333333, 0.7478333333333333, 0.7569999999999999, 0.7656666666666666, 0.7689999999999999, 0.7675000000000001, 0.764, 0.76, 0.7615000000000001, 0.7625, 0.7636666666666666, 0.7646666666666666, 0.7658333333333334, 0.7668333333333334, 0.7671666666666667, 0.7675, 0.7685000000000001, 0.7685000000000001, 0.7678333333333333, 0.7698333333333334, 0.7715000000000001, 0.7726666666666666, 0.7706666666666667, 0.7685000000000001, 0.7666666666666667, 0.7656666666666666, 0.7655000000000001, 0.7668333333333333, 0.7700000000000001, 0.7728333333333334, 0.7758333333333334, 0.7786666666666667, 0.7845, 0.79, 0.7948333333333334, 0.7998333333333333, 0.804, 0.8061666666666667, 0.8066666666666666, 0.807, 0.8051666666666668, 0.804, 0.8025, 0.8009999999999999, 0.7996666666666666, 0.7981666666666667, 0.7971666666666667, 0.7966666666666666, 0.7961666666666667, 0.7963333333333333, 0.7981666666666667, 0.8, 0.8015000000000001, 0.8035, 0.8039999999999999, 0.8041666666666666, 0.8041666666666666, 0.8043333333333333, 0.8035, 0.8025, 0.8006666666666666, 0.7988333333333334, 0.7971666666666667, 0.7951666666666667, 0.7946666666666666, 0.7948333333333333, 0.7948333333333334, 0.7951666666666666, 0.7976666666666666, 0.8001666666666667, 0.8031666666666666, 0.8055, 0.8081666666666667, 0.8101666666666667, 0.8103333333333333, 0.8098333333333334, 0.8099999999999999, 0.8110000000000002, 0.8106666666666668, 0.8096666666666668, 0.8076666666666666, 0.8061666666666667, 0.8041666666666666, 0.8021666666666667, 0.8019999999999999, 0.8003333333333333, 0.7988333333333333, 0.7976666666666666, 0.798, 0.7985, 0.7993333333333333, 0.8003333333333332, 0.8004999999999999, 0.8, 0.7981666666666667, 0.7958333333333333, 0.7931666666666667, 0.7881666666666666, 0.7581666666666667, 0.7263333333333333, 0.6958333333333333, 0.6651666666666667, 0.6359999999999999, 0.6083333333333333, 0.5828333333333333, 0.5599999999999999, 0.5371666666666667, 0.5171666666666667, 0.5221666666666667, 0.5298333333333334, 0.5373333333333333, 0.5446666666666666, 0.5516666666666666, 0.5591666666666667, 0.5671666666666667, 0.5756666666666667, 0.5841666666666666, 0.5925, 0.601, 0.6096666666666667, 0.6183333333333334, 0.6268333333333334, 0.6355000000000001, 0.6431666666666667, 0.6503333333333334, 0.6565, 0.6628333333333333, 0.6685000000000001, 0.6741666666666667, 0.6793333333333333, 0.6846666666666666, 0.6898333333333333, 0.6951666666666666, 0.7003333333333333, 0.7054999999999999, 0.7103333333333333, 0.7153333333333334, 0.7211666666666666, 0.7271666666666666, 0.7333333333333333, 0.7393333333333334, 0.7455, 0.7515000000000001, 0.7575000000000001, 0.7635000000000001, 0.7695000000000001, 0.7755, 0.7811666666666668, 0.7863333333333333, 0.7915000000000001, 0.7965, 0.8016666666666667, 0.8066666666666666, 0.8121666666666666, 0.8171666666666667, 0.8226666666666667, 0.8275, 0.8298333333333332, 0.8306666666666667, 0.8305, 0.8305, 0.8300000000000001, 0.8293333333333333, 0.8273333333333334, 0.826, 0.8251666666666667, 0.8243333333333333, 0.8254999999999999, 0.8285, 0.833, 0.8373333333333333, 0.8418333333333333, 0.8455, 0.8493333333333333, 0.8508333333333333, 0.8486666666666667, 0.8456666666666667, 0.8438333333333334, 0.8406666666666667, 0.8355, 0.8291666666666666, 0.8218333333333334, 0.8153333333333335, 0.8083333333333333, 0.8029999999999999, 0.8005000000000001, 0.7966666666666666, 0.7891666666666668, 0.7795, 0.7691666666666667, 0.7571666666666667, 0.7450000000000001, 0.7296666666666668, 0.7161666666666666, 0.7023333333333334, 0.688, 0.6755, 0.6698333333333333, 0.6715, 0.676, 0.6835, 0.6921666666666666, 0.7045, 0.7168333333333334, 0.7293333333333333, 0.7416666666666667, 0.7536666666666666, 0.7605, 0.7638333333333333, 0.7655, 0.7641666666666667, 0.7635, 0.7623333333333333, 0.7609999999999999, 0.76, 0.7598333333333332, 0.7601666666666667, 0.762, 0.7623333333333333, 0.7631666666666667, 0.7655000000000001, 0.7675, 0.7698333333333334, 0.7713333333333334, 0.7728333333333334, 0.7740000000000001, 0.774, 0.7743333333333334, 0.7750000000000001, 0.7756666666666667, 0.7776666666666667, 0.7791666666666668, 0.7801666666666668, 0.7805000000000002, 0.7793333333333334, 0.7775000000000001, 0.7775000000000001, 0.7770000000000001, 0.7745000000000001, 0.7715000000000001, 0.7686666666666666, 0.7655000000000001, 0.7628333333333333, 0.7616666666666666, 0.7611666666666665, 0.7618333333333334, 0.7616666666666666, 0.7609999999999999, 0.7625, 0.7631666666666665, 0.7625, 0.7631666666666665, 0.7635, 0.7638333333333333, 0.7653333333333333, 0.7665, 0.7676666666666667, 0.768, 0.7685000000000001, 0.7708333333333334, 0.7738333333333334, 0.7766666666666667, 0.7795, 0.7821666666666667, 0.784, 0.786, 0.786, 0.7875, 0.789, 0.791, 0.7931666666666668, 0.7948333333333333, 0.7966666666666666, 0.7978333333333334, 0.799, 0.8, 0.804, 0.8076666666666666, 0.8109999999999999, 0.8140000000000001, 0.8164999999999999, 0.819, 0.8211666666666666, 0.8239999999999998, 0.8261666666666667, 0.8281666666666666, 0.8291666666666666, 0.8303333333333333, 0.8319999999999999, 0.8328333333333333, 0.8341666666666667, 0.8351666666666666, 0.8353333333333334, 0.835, 0.8348333333333333, 0.8333333333333333, 0.8320000000000001, 0.8293333333333333, 0.8236666666666667, 0.8165000000000001, 0.8085000000000001, 0.7996666666666667, 0.7906666666666666, 0.7818333333333334, 0.773, 0.7646666666666666, 0.7556666666666667, 0.747, 0.7401666666666666, 0.7333333333333334, 0.7278333333333333, 0.7236666666666667, 0.7193333333333334, 0.7148333333333333, 0.7113333333333334, 0.7091666666666666, 0.708, 0.7108333333333333, 0.7176666666666666, 0.7273333333333333, 0.7346666666666667, 0.7416666666666667, 0.7506666666666667, 0.7598333333333334, 0.7693333333333333, 0.7781666666666667, 0.7865, 0.7885, 0.7881666666666667, 0.7868333333333334, 0.788, 0.7895000000000001, 0.7885, 0.7883333333333333, 0.7876666666666667, 0.7878333333333334, 0.7885, 0.791, 0.792, 0.7926666666666666, 0.7918095238095237, 0.7906428571428572, 0.7913095238095238, 0.7919761904761905, 0.7931428571428571, 0.7943095238095239, 0.7948095238095239, 0.7971428571428572, 0.8004761904761905, 0.8046428571428572, 0.8098333333333334, 0.8150000000000001, 0.8196666666666668, 0.8226666666666667, 0.8215, 0.8088333333333333, 0.7971666666666667, 0.786, 0.7763333333333333, 0.7673333333333334, 0.7591666666666665, 0.7518333333333332, 0.7455, 0.7408333333333335, 0.7401666666666666, 0.7503333333333334, 0.76, 0.7689999999999999, 0.7763333333333333, 0.7823333333333333, 0.7863333333333333, 0.7891666666666667, 0.7908333333333333, 0.7915, 0.7911666666666667, 0.7906666666666666, 0.7901666666666667, 0.7891666666666667, 0.7876666666666667, 0.7866666666666667, 0.7865000000000001, 0.7858333333333334, 0.7861666666666667, 0.7873333333333333, 0.7893333333333333, 0.7921666666666668, 0.7955000000000001, 0.7988333333333333, 0.8031666666666666, 0.8071666666666667, 0.8098333333333333, 0.8135, 0.817, 0.8195, 0.8211666666666666, 0.8221666666666666, 0.8218333333333334, 0.8196666666666665, 0.8158333333333333, 0.8116666666666668, 0.808, 0.8026666666666668, 0.7961666666666667, 0.7898333333333334, 0.782, 0.7716666666666666, 0.7621666666666667, 0.7543333333333333, 0.7473333333333333, 0.7393333333333333, 0.7324999999999999, 0.7278333333333332, 0.7219999999999999, 0.718, 0.7178333333333332, 0.7228333333333332, 0.7278333333333332, 0.7326666666666666, 0.7364999999999999, 0.7416666666666666, 0.7464999999999999, 0.7508333333333332, 0.7576666666666666, 0.7611666666666665, 0.7631666666666665, 0.7628333333333333, 0.7628333333333333, 0.7596666666666667, 0.7569999999999999, 0.755, 0.7536666666666667, 0.7523333333333333, 0.7511666666666666, 0.7523333333333333, 0.7533333333333333, 0.7546666666666667, 0.7548333333333332, 0.7575000000000001, 0.7603333333333333, 0.7623333333333333, 0.7633333333333333, 0.7626666666666666, 0.7618333333333334, 0.7606666666666666, 0.7596666666666666, 0.7588333333333332, 0.7576666666666666, 0.7573333333333332, 0.7578333333333334, 0.7588333333333332, 0.7611666666666667, 0.7646666666666666, 0.7676666666666667, 0.7706666666666667, 0.7728333333333334, 0.774, 0.7766666666666667, 0.7796666666666667, 0.7821666666666667, 0.785, 0.786, 0.7871666666666667, 0.7891666666666667, 0.7911666666666667, 0.794, 0.7975000000000001, 0.8008333333333333, 0.8046666666666666, 0.8086666666666666, 0.8113333333333334, 0.8151666666666667, 0.8191666666666666, 0.8221666666666667, 0.8240000000000001, 0.8246666666666667, 0.8238333333333333, 0.8218333333333332, 0.8188333333333333, 0.8143333333333332, 0.8105, 0.806, 0.8020000000000002, 0.7985, 0.7961666666666667, 0.7943333333333333, 0.7926666666666666, 0.7926666666666666, 0.7928333333333334, 0.7946666666666666, 0.7969999999999999, 0.7996666666666667, 0.8005000000000001, 0.8009999999999999, 0.8020000000000002, 0.8033333333333333, 0.8055, 0.8066666666666669, 0.8071666666666667, 0.8061666666666667, 0.804, 0.8020000000000002, 0.8011666666666667, 0.8001666666666667, 0.7985000000000001, 0.7966666666666666, 0.7948333333333334, 0.7926666666666667, 0.7911666666666667, 0.791, 0.7911666666666666, 0.7913333333333333, 0.7913333333333333, 0.7921666666666666, 0.7925, 0.7931666666666667, 0.7936666666666665, 0.7939999999999999, 0.7941666666666667, 0.793, 0.792, 0.7908333333333333, 0.7893333333333333, 0.7853333333333333, 0.7801666666666668, 0.7728333333333334, 0.764, 0.7548333333333334, 0.7461666666666666, 0.7386666666666667, 0.7321666666666666, 0.7266666666666666, 0.722, 0.7188333333333332, 0.7171666666666666, 0.7201666666666666, 0.7271666666666666, 0.7348333333333333, 0.7431666666666666, 0.75, 0.7541666666666667, 0.7583333333333334, 0.7618333333333334, 0.7661666666666667, 0.77, 0.7703333333333333, 0.7683333333333333, 0.7668333333333333, 0.7649999999999999, 0.7649999999999999, 0.7663333333333332, 0.7668333333333333, 0.7675, 0.7671666666666666, 0.7675, 0.7676666666666667, 0.7681666666666667, 0.7685, 0.7696666666666667, 0.771, 0.772, 0.7741666666666667, 0.7773333333333333, 0.7816666666666666, 0.7856666666666667, 0.7896666666666666, 0.7931666666666668, 0.7965000000000001, 0.7985, 0.8006666666666667, 0.8035, 0.8049999999999999, 0.8056666666666666, 0.8056666666666666, 0.8056666666666666, 0.8063333333333332, 0.8073333333333335, 0.8083333333333333, 0.8091666666666667, 0.8093333333333333, 0.8081666666666669, 0.8063333333333335, 0.8053333333333335, 0.805, 0.8051666666666668, 0.8061666666666667, 0.8071666666666667, 0.8078333333333333, 0.8078333333333333, 0.8085000000000001, 0.8096666666666668, 0.812, 0.8140000000000001, 0.8146666666666667, 0.8153333333333335, 0.8153333333333335, 0.8153333333333332, 0.8156666666666667, 0.8160000000000001, 0.8138333333333334, 0.8118333333333332, 0.8095000000000001, 0.806, 0.8029999999999999, 0.8001666666666667, 0.7983333333333333, 0.7951666666666667, 0.7921666666666667, 0.7908333333333333, 0.7913333333333333, 0.7925, 0.7935000000000001, 0.7958333333333334, 0.7976666666666666, 0.7989999999999999, 0.7988333333333333, 0.800142857142857, 0.8021428571428573, 0.8036428571428571, 0.8031428571428572, 0.8013095238095238, 0.7994761904761905, 0.7961428571428572, 0.7929761904761905, 0.7891428571428573, 0.7853095238095238, 0.7818333333333334, 0.7775000000000001, 0.7729999999999999, 0.7705, 0.7693333333333333, 0.7686666666666666, 0.7693333333333333, 0.7711666666666667, 0.7736666666666666, 0.7768333333333334, 0.7796666666666667, 0.7826666666666666, 0.7878333333333334, 0.7928333333333333, 0.7988333333333333, 0.8053333333333332, 0.8113333333333334, 0.8164999999999999, 0.8215, 0.8254999999999999, 0.8293333333333333, 0.8336666666666666, 0.8358333333333332, 0.8379999999999999, 0.8391666666666666, 0.8391666666666666, 0.8396666666666667, 0.8404999999999999, 0.8406666666666667, 0.8413333333333333, 0.8414999999999999, 0.8406666666666667, 0.8403333333333333, 0.8401666666666665, 0.8399999999999999, 0.8398333333333333, 0.8393333333333333, 0.8385, 0.8383333333333333, 0.8378333333333334, 0.8381666666666666, 0.8396666666666667, 0.8396666666666667, 0.8406666666666667, 0.8416666666666666, 0.8416666666666666, 0.8424999999999999, 0.8431666666666666, 0.8434999999999999, 0.8448333333333332, 0.8454999999999998, 0.8453333333333333, 0.8438333333333332, 0.8408333333333333, 0.8379999999999999, 0.8371666666666666, 0.8353333333333334, 0.8338333333333333, 0.8324999999999999, 0.8308333333333332, 0.8293333333333333, 0.8276333333333333, 0.8271333333333333, 0.8281333333333333, 0.8287999999999999, 0.8286333333333331, 0.8281333333333333, 0.8269666666666666, 0.8257999999999999, 0.8248, 0.8236333333333332, 0.8230000000000001, 0.8243333333333333, 0.825, 0.8263333333333334, 0.8281666666666666, 0.8314999999999999, 0.8358333333333332, 0.8399999999999999, 0.8436666666666668, 0.8478333333333333, 0.8516666666666666, 0.8539999999999999, 0.8554999999999999, 0.857, 0.8576666666666666, 0.8571666666666667, 0.8568333333333333, 0.8576666666666666, 0.8586666666666666, 0.8584999999999999, 0.8585, 0.858, 0.8541666666666666, 0.8288333333333332, 0.8004999999999999, 0.7736666666666665, 0.7473333333333333, 0.7218333333333333, 0.6978333333333333, 0.6759999999999999, 0.6553333333333333, 0.6366666666666666, 0.623, 0.6318333333333334, 0.6453333333333333, 0.6585, 0.6716666666666666, 0.6841666666666667, 0.6963333333333332, 0.708, 0.7196666666666666, 0.7343333333333334, 0.7499999999999999, 0.7648333333333333, 0.7779999999999999, 0.7895, 0.7998333333333333, 0.8089999999999999, 0.8171666666666667, 0.825, 0.8318333333333333, 0.8344999999999999, 0.8348333333333333, 0.8341666666666667, 0.8334999999999999, 0.8333333333333334, 0.8326666666666667, 0.8318333333333333, 0.8311666666666666, 0.8296666666666667, 0.8278333333333332, 0.826, 0.8243333333333334, 0.8234999999999999, 0.8231666666666666, 0.8230000000000001, 0.8231666666666667, 0.8239999999999998, 0.8246666666666667, 0.8256666666666665, 0.827, 0.8286666666666666, 0.8301666666666666, 0.8305, 0.8308333333333333, 0.8314999999999999, 0.8326666666666667, 0.8333333333333334, 0.833, 0.8328333333333333, 0.8321666666666667, 0.8306666666666667, 0.8285, 0.8266666666666668, 0.8233333333333333, 0.8205, 0.8184999999999999, 0.8175000000000001, 0.8178333333333333, 0.8183333333333334, 0.819, 0.8206666666666667, 0.8233333333333333, 0.8263333333333334, 0.8309999999999998, 0.8343333333333334, 0.8375, 0.8403333333333333, 0.8433333333333334, 0.8466666666666667, 0.8504999999999999, 0.8521666666666666, 0.8474999999999999, 0.8353333333333334, 0.8240000000000001, 0.8150000000000001, 0.8059999999999998, 0.7973333333333332, 0.7889999999999999, 0.781, 0.7735833333333333, 0.76825, 0.7700833333333333, 0.7804166666666668, 0.79075, 0.7999166666666666, 0.8089166666666667, 0.8177500000000002, 0.8262500000000002, 0.8345833333333333, 0.843, 0.8525, 0.86, 0.8663333333333334, 0.8706666666666665, 0.8736666666666666, 0.8755, 0.8758333333333332, 0.875, 0.8723333333333333, 0.8676666666666666, 0.8619999999999999, 0.8568333333333333, 0.8523333333333334, 0.8481666666666665, 0.845, 0.8418333333333333, 0.8404999999999999, 0.8414999999999999, 0.8456666666666667, 0.8508333333333333, 0.8541666666666666, 0.8563333333333333, 0.8571666666666667, 0.8593333333333332, 0.8619999999999999, 0.8653333333333333, 0.8675, 0.8681666666666666, 0.8666666666666666, 0.8648333333333333, 0.8638333333333332, 0.8651666666666665, 0.8671666666666666, 0.8684999999999998, 0.8696666666666667, 0.8705, 0.8713333333333333, 0.8718333333333333, 0.8728333333333333, 0.8748333333333334, 0.8761666666666666, 0.8763333333333334, 0.8765000000000001, 0.8765000000000001, 0.8756666666666666, 0.8746666666666666, 0.8738333333333334, 0.8723333333333333, 0.8713333333333333, 0.8686666666666667, 0.8673333333333332, 0.8666666666666666, 0.8661666666666668, 0.8658333333333333, 0.8658333333333333, 0.8661666666666668, 0.8666666666666666, 0.8684999999999998, 0.8695, 0.8713333333333333, 0.874, 0.8776666666666667, 0.8816666666666666, 0.8846666666666667, 0.8866666666666667, 0.8883333333333333, 0.8891666666666668, 0.8893333333333334, 0.8893333333333333, 0.8893333333333334, 0.8876666666666667, 0.8853333333333333, 0.8821666666666668, 0.8795, 0.8773333333333333, 0.8746666666666666, 0.8726666666666667, 0.8691666666666666, 0.8643333333333334, 0.8593333333333334, 0.8545, 0.8411666666666667, 0.827, 0.8141666666666667, 0.8026666666666665, 0.7928333333333333, 0.784, 0.7778333333333333, 0.7741666666666667, 0.7718333333333333, 0.771, 0.7781666666666667, 0.787, 0.7946666666666667, 0.7996666666666666, 0.8043333333333333, 0.807, 0.807, 0.8046666666666666, 0.8001666666666667, 0.7956666666666667, 0.7946666666666667, 0.7938333333333334, 0.793, 0.7931666666666668, 0.7906666666666667, 0.79, 0.7911666666666668, 0.793, 0.7956666666666667, 0.7971666666666668, 0.7950000000000002, 0.7915000000000001, 0.7883333333333333, 0.7858333333333334, 0.7863333333333334, 0.786, 0.7855000000000001, 0.7858333333333334, 0.7868333333333334, 0.7873333333333334, 0.7873333333333334, 0.7886666666666667, 0.7906666666666667, 0.7926666666666666, 0.7936666666666666, 0.7946666666666667, 0.7956666666666667, 0.796, 0.7951666666666666, 0.7958333333333333, 0.7969999999999999, 0.7975, 0.7969999999999999, 0.7965, 0.7954999999999999, 0.7943333333333333, 0.7913333333333333, 0.7878333333333333, 0.7858333333333334, 0.7828333333333333, 0.7786666666666667, 0.7746666666666667, 0.7708333333333334, 0.7663333333333334, 0.7631666666666665, 0.7603333333333333, 0.7593333333333334, 0.7591666666666667, 0.7581666666666667, 0.7573333333333332, 0.7585, 0.7599999999999999, 0.7606666666666666, 0.7616666666666665, 0.7628333333333333, 0.7638333333333331, 0.7621666666666667, 0.7611666666666667, 0.7601666666666667, 0.7596666666666667, 0.757, 0.7541666666666667, 0.751, 0.7470000000000001, 0.742, 0.7378333333333333, 0.7364999999999999, 0.7344999999999999, 0.7329999999999999, 0.7314999999999999, 0.7314999999999999, 0.732, 0.734, 0.7369999999999999, 0.74, 0.7421666666666666, 0.7445, 0.7468333333333333, 0.7476666666666667, 0.7466666666666667, 0.7446666666666666, 0.743, 0.741, 0.739, 0.7375, 0.7355, 0.7341666666666666, 0.7328333333333333, 0.7325, 0.7335, 0.735, 0.734, 0.7336666666666667, 0.7283333333333333, 0.7198333333333333, 0.7123333333333333, 0.7051666666666666, 0.6991666666666667, 0.6931666666666667, 0.6871666666666666, 0.6818333333333333, 0.6788333333333333, 0.6751666666666667, 0.6765, 0.6808333333333334, 0.6853333333333333, 0.6885000000000001, 0.6885, 0.6885, 0.6883333333333332, 0.6871666666666666, 0.6855, 0.6845, 0.6835000000000001, 0.6823333333333335, 0.6813333333333333, 0.6803333333333332, 0.6813333333333333, 0.6823333333333332, 0.6843333333333333, 0.6873333333333334, 0.69, 0.692, 0.6941666666666666, 0.6944999999999999, 0.6935, 0.6931666666666667, 0.6921666666666667, 0.6861666666666666, 0.6755833333333333, 0.6629166666666666, 0.6489166666666667, 0.6349166666666667, 0.6214166666666666, 0.6090833333333333, 0.5970833333333333, 0.5844166666666666, 0.5724166666666667, 0.5664166666666667, 0.5695, 0.5761666666666667, 0.5848333333333333, 0.594, 0.6033333333333333, 0.6136666666666667, 0.6246666666666666, 0.6365000000000001, 0.6486666666666666, 0.6606666666666667, 0.6675, 0.6735, 0.6786666666666668, 0.6798333333333333, 0.6798333333333334, 0.6793333333333333, 0.6788333333333333, 0.6778333333333333, 0.6756666666666666, 0.6726666666666666, 0.6691666666666667, 0.6631666666666666, 0.6573333333333333, 0.655, 0.6529999999999999, 0.651, 0.6475000000000001, 0.6416666666666666, 0.6361666666666667, 0.6271666666666667, 0.6091666666666666, 0.5873333333333334, 0.5658333333333333, 0.5436666666666666, 0.5123333333333334, 0.4798333333333333, 0.4456666666666667, 0.41133333333333333, 0.37583333333333335, 0.34516666666666673, 0.32799999999999996, 0.32033333333333336, 0.31283333333333335, 0.3035, 0.289, 0.27149999999999996, 0.25433333333333336, 0.23633333333333334, 0.21183333333333332, 0.18633333333333332, 0.15583333333333332, 0.12166666666666666, 0.08666666666666666, 0.054166666666666655, 0.03633333333333332, 0.022499999999999985, 0.011666666666666655, 0.004166666666666655, 0.004666666666666656, 0.006499999999999989, 0.02066666666666666, 0.03666666666666666, 0.05233333333333333, 0.06766666666666667, 0.07733333333333334, 0.07783333333333334, 0.07733333333333334, 0.0765, 0.0765, 0.07383333333333333, 0.06016666666666666, 0.04433333333333333, 0.02883333333333333, 0.01349999999999999, 0.004166666666666656, 0.0038333333333333227, 0.006666666666666657, 0.01799999999999999, 0.026666666666666655, 0.031666666666666655, 0.03616666666666666, 0.05116666666666666, 0.05833333333333333, 0.05899999999999998, 0.05933333333333333, 0.0595, 0.056333333333333326, 0.04466666666666665, 0.03499999999999999, 0.03033333333333333, 0.02566666666666666, 0.010166666666666655, 0.0028333333333333223, 0.002333333333333322, 0.0016666666666666555, 0.0019999999999999888, 0.0021666666666666557, 0.0021666666666666557, 0.002499999999999989, 0.0036666666666666558, 0.012166666666666655, 0.02799999999999999, 0.029833333333333323, 0.02999999999999999, 0.029666666666666654, 0.028833333333333322, 0.028833333333333322, 0.028999999999999988, 0.028666666666666656, 0.027166666666666655, 0.01833333333333332, 0.002999999999999986, 0.0011666666666666529, 0.0008333333333333194, 0.0008333333333333194, 0.0016666666666666527, 0.0018333333333333194, 0.001999999999999986, 0.0026666666666666527, 0.004666666666666653, 0.015833333333333317, 0.015999999999999983, 0.016166666666666652, 0.016166666666666652, 0.016166666666666652, 0.015999999999999983, 0.01566666666666665, 0.01566666666666665, 0.015333333333333319, 0.013499999999999984, 0.0023333333333333183, 0.0016666666666666514, 0.0014999999999999849, 0.0019999999999999853, 0.002999999999999985, 0.0028333333333333184, 0.005999999999999985, 0.01733333333333332, 0.033333333333333326, 0.05016666666666666, 0.06716666666666667, 0.08516666666666665, 0.09983333333333333, 0.10049999999999999, 0.09949999999999999, 0.09899999999999999, 0.09583333333333333, 0.08416666666666665, 0.06816666666666667, 0.05183333333333332, 0.035833333333333314, 0.01849999999999999, 0.004333333333333319, 0.0034999999999999866, 0.0038333333333333197, 0.004166666666666654, 0.00483333333333332, 0.00483333333333332, 0.00483333333333332, 0.00533333333333332, 0.013166666666666655, 0.02783333333333332, 0.03366666666666666, 0.033333333333333326, 0.03316666666666666, 0.03383333333333332, 0.03416666666666666, 0.03499999999999999, 0.03466666666666666, 0.03399999999999999, 0.025833333333333326, 0.011333333333333322, 0.004999999999999986, 0.005499999999999986, 0.005666666666666653, 0.005499999999999986, 0.005999999999999986, 0.00533333333333332, 0.005666666666666653, 0.007666666666666653, 0.021666666666666657, 0.03899999999999999, 0.05833333333333333, 0.07816666666666665, 0.09916666666666665, 0.12033333333333332, 0.1385, 0.146, 0.15066666666666664, 0.15399999999999997, 0.14533333333333331, 0.12866666666666665, 0.10999999999999999, 0.09033333333333331, 0.06999999999999998, 0.04983333333333332, 0.03499999999999999, 0.04083333333333332, 0.05449999999999999, 0.06783333333333333, 0.08183333333333331, 0.10033333333333332, 0.12066666666666666, 0.141, 0.16133333333333333, 0.1815, 0.19883333333333333, 0.20833333333333331, 0.2088333333333333, 0.2053333333333333, 0.20066666666666663, 0.1895, 0.17683333333333334, 0.16116666666666665, 0.14583333333333331, 0.13016666666666668, 0.11499999999999999, 0.09649999999999999, 0.08116666666666665, 0.07066666666666666, 0.06816666666666665, 0.07949999999999999, 0.09216666666666665, 0.10883333333333331, 0.12533333333333332, 0.142, 0.15866666666666665, 0.17816666666666664, 0.199, 0.2188333333333333, 0.23149999999999996, 0.2373333333333333, 0.24333333333333332, 0.24833333333333335, 0.2533333333333333, 0.25833333333333336, 0.25966666666666666, 0.26166666666666666, 0.2636666666666666, 0.2658333333333333, 0.2683333333333333, 0.27116666666666667, 0.27316666666666667, 0.27616666666666667, 0.2786666666666666, 0.281, 0.28700000000000003, 0.292, 0.29733333333333334, 0.30216666666666664, 0.30666666666666664, 0.31083333333333335, 0.315, 0.31899999999999995, 0.32316666666666666, 0.3278333333333333, 0.3318333333333333, 0.3368333333333333, 0.3411666666666667, 0.3461666666666666, 0.35100000000000003, 0.356, 0.3611666666666667, 0.36616666666666664, 0.37116666666666664, 0.37616666666666665, 0.3811666666666666, 0.38549999999999995, 0.39016666666666666, 0.39416666666666667, 0.3983333333333333, 0.4013333333333334, 0.40483333333333327, 0.4078333333333333, 0.4111666666666666, 0.4141666666666667, 0.4176666666666666, 0.4213333333333333, 0.4248333333333333, 0.4288333333333333, 0.4331666666666666, 0.43866666666666665, 0.44383333333333336, 0.4493333333333333, 0.45533333333333337, 0.4613333333333333, 0.4673333333333334, 0.4734999999999999, 0.47999999999999987, 0.48599999999999993, 0.4918333333333333, 0.4973333333333333, 0.5023333333333333, 0.5078333333333334, 0.5125, 0.5175, 0.522, 0.5268333333333334, 0.5308333333333333, 0.5355, 0.5405, 0.5456666666666667, 0.5516666666666666, 0.5569999999999999, 0.5628333333333333, 0.5678333333333333, 0.5736666666666667, 0.5786666666666667, 0.5845, 0.5898333333333332, 0.5946666666666667, 0.5996666666666666, 0.6046666666666667, 0.6093333333333334, 0.6140000000000001, 0.619, 0.6238333333333334, 0.6293333333333334, 0.6348333333333334, 0.6408333333333334, 0.6468333333333334, 0.6526666666666667, 0.6581666666666667, 0.6641666666666667, 0.6696666666666666, 0.6756666666666666, 0.6816666666666668, 0.6873333333333334, 0.693, 0.6981666666666666, 0.7036666666666667, 0.7088333333333333, 0.7143333333333333, 0.7193333333333334, 0.725, 0.7301666666666666, 0.7354999999999999, 0.7403333333333333, 0.7453333333333333, 0.7503333333333333, 0.7555, 0.7613333333333333, 0.7671666666666667, 0.7731666666666667, 0.7786666666666666, 0.7845, 0.7896666666666666, 0.7956666666666667, 0.8006666666666666, 0.8061666666666667, 0.8109999999999999, 0.8153333333333335, 0.8195, 0.8238333333333333, 0.8285, 0.8333333333333334, 0.8383333333333333, 0.8433333333333334, 0.8488333333333333, 0.8545, 0.8611666666666666, 0.8688333333333332, 0.8728333333333333, 0.8728333333333333, 0.8690000000000001, 0.8605, 0.8480000000000001, 0.8316666666666667, 0.8125, 0.7903333333333334, 0.766, 0.74, 0.718, 0.6998333333333334, 0.6855, 0.6738333333333333, 0.6666666666666666, 0.6678333333333334, 0.6759999999999999, 0.6866666666666666, 0.6991666666666666, 0.7103333333333334, 0.7193333333333334, 0.728, 0.7356666666666667, 0.7453333333333333, 0.7548333333333332, 0.7593333333333333, 0.7598333333333334, 0.759, 0.7563333333333333, 0.7548333333333332, 0.754, 0.7526666666666666, 0.7516666666666667, 0.75, 0.7486666666666666, 0.748, 0.7468333333333332, 0.7466666666666667, 0.7456666666666666, 0.744, 0.7428333333333332, 0.7425, 0.7424999999999999, 0.7431666666666666, 0.7435, 0.7436666666666667, 0.7438333333333333, 0.7443333333333333, 0.7471666666666665, 0.7508333333333332, 0.7545, 0.7583333333333333, 0.7626666666666666, 0.7663333333333333, 0.7703333333333333, 0.7735000000000001, 0.777, 0.7806666666666666, 0.7816666666666667, 0.7821666666666667, 0.7831666666666667, 0.7831666666666667, 0.7821666666666667, 0.7803333333333333, 0.7783333333333333, 0.7763333333333333, 0.7743333333333334, 0.7723333333333333, 0.7726666666666666, 0.7745000000000001, 0.7745, 0.7755000000000001, 0.7776666666666667, 0.7808333333333334, 0.7835000000000001, 0.7853333333333333, 0.7846666666666666, 0.784, 0.782, 0.779, 0.7775000000000001, 0.7765, 0.775, 0.7733333333333333, 0.7716666666666667, 0.7718333333333333, 0.7733333333333334, 0.7748333333333334, 0.7775000000000001, 0.7801666666666668, 0.7830000000000001, 0.7845000000000001, 0.7856666666666667, 0.7866666666666667, 0.7876666666666667, 0.7873333333333334, 0.7871666666666667, 0.7875, 0.7873333333333334, 0.7866666666666667, 0.7856666666666667, 0.7856666666666667, 0.785, 0.7831666666666667, 0.78, 0.7756666666666667, 0.7705, 0.7648333333333334, 0.7583333333333334, 0.7505, 0.7424999999999999, 0.7331666666666666, 0.7241666666666666, 0.7166666666666667, 0.7106666666666667, 0.7056666666666667, 0.702, 0.70075, 0.7090833333333333, 0.7219166666666667, 0.7345833333333334, 0.74825, 0.76225, 0.7757499999999999, 0.7889166666666666, 0.8025833333333333, 0.8164166666666667, 0.8281666666666666, 0.8314999999999999, 0.8324999999999999, 0.8348333333333333, 0.8368333333333332, 0.8388333333333332, 0.841, 0.8426666666666666, 0.8431666666666666, 0.843, 0.8419999999999999, 0.8406666666666667, 0.8386666666666667, 0.8348333333333333, 0.8309999999999998, 0.8273333333333334, 0.8243333333333334, 0.8223333333333332, 0.8210000000000001, 0.8203333333333334, 0.8193333333333334, 0.8186666666666665, 0.8181666666666667, 0.8183333333333334, 0.8193333333333334, 0.8198333333333332, 0.819, 0.8183333333333334, 0.8173333333333334, 0.8166666666666668, 0.8163333333333334, 0.8163333333333334, 0.8168333333333335, 0.8175000000000001, 0.8175000000000001, 0.8176666666666665, 0.8186666666666668, 0.8186666666666668, 0.8196666666666668, 0.8195, 0.8198333333333332, 0.8203333333333334, 0.82, 0.82, 0.8201666666666666, 0.8201666666666666, 0.8203333333333334, 0.8210000000000001, 0.8218333333333332, 0.8236666666666667, 0.8256666666666665, 0.8276666666666666, 0.8309999999999998, 0.8348333333333333, 0.8385, 0.8423333333333332, 0.8455, 0.8481666666666665, 0.8503333333333332, 0.852, 0.8526666666666667, 0.8526666666666666, 0.8524999999999998, 0.8506666666666666, 0.8489999999999999, 0.8481666666666665, 0.8461666666666666, 0.8446666666666666, 0.8433333333333332, 0.8411666666666665, 0.8393333333333333, 0.8368333333333332, 0.8308333333333333, 0.817, 0.7838333333333333, 0.7471666666666666, 0.7133333333333333, 0.6806666666666665, 0.6491666666666667, 0.6196666666666666, 0.5913333333333333, 0.5648333333333333, 0.5426666666666666, 0.5306666666666666, 0.5389999999999999, 0.5511666666666667, 0.563, 0.5743333333333333, 0.5854999999999999, 0.5968333333333333, 0.6083333333333333, 0.6251666666666666, 0.6473333333333333, 0.6698333333333333, 0.6921666666666667, 0.7135, 0.7333333333333333, 0.7518333333333334, 0.7691666666666667, 0.785, 0.799, 0.8061666666666667, 0.8076666666666666, 0.8076666666666666, 0.807, 0.8063333333333335, 0.8063333333333335, 0.8056666666666666, 0.8056666666666666, 0.8063333333333332, 0.8081333333333334, 0.8086333333333334, 0.8079666666666666, 0.8073, 0.8063, 0.8051333333333333, 0.8036333333333332, 0.8034666666666667, 0.8024666666666667, 0.8013, 0.8, 0.8003333333333332, 0.8008333333333333, 0.8018333333333333, 0.8029999999999999, 0.8043333333333333, 0.8065, 0.8085000000000001, 0.8103333333333333, 0.8101666666666667, 0.8096666666666668, 0.8096666666666668, 0.808, 0.8048333333333334, 0.8016666666666665, 0.799, 0.795, 0.7918333333333333, 0.7891666666666667, 0.7883333333333333, 0.7875, 0.7865, 0.7871666666666667, 0.789, 0.7903333333333333, 0.7916666666666666, 0.7926666666666666, 0.7936666666666666, 0.7946666666666667, 0.7956666666666666, 0.7966666666666666, 0.7973333333333333, 0.7981666666666667, 0.7986666666666666, 0.8003333333333333, 0.8013333333333332, 0.8033333333333333, 0.8045, 0.8058333333333334, 0.8078333333333333, 0.8095000000000001, 0.8111666666666668, 0.8133333333333335, 0.8161666666666667, 0.819, 0.8223333333333335, 0.826, 0.8296666666666667, 0.8333333333333334, 0.8363333333333334, 0.8388333333333332, 0.8398333333333333, 0.8396666666666667, 0.8388333333333332, 0.837, 0.8356666666666668, 0.8326666666666667, 0.8298333333333334, 0.8276666666666668, 0.826, 0.8244999999999999, 0.8244999999999999, 0.8255000000000001, 0.8263333333333331, 0.8273333333333331, 0.8266666666666665, 0.828, 0.8285, 0.827, 0.8243333333333333, 0.8225, 0.8205, 0.8166666666666668, 0.8111666666666666, 0.805, 0.7993333333333335, 0.7926666666666666, 0.7871666666666667, 0.7831666666666667, 0.7796666666666667, 0.7763333333333333, 0.7736666666666666, 0.7725000000000001, 0.773, 0.7747380952380952, 0.7762380952380953, 0.777904761904762, 0.7797380952380953, 0.7810714285714286, 0.7825714285714287, 0.7839047619047619, 0.7849047619047619, 0.7877380952380953, 0.7912380952380953, 0.7925, 0.7946666666666667, 0.7976666666666666, 0.8006666666666667, 0.8031666666666666, 0.8055, 0.808, 0.8111666666666666, 0.8133333333333332, 0.8148333333333333, 0.8185, 0.8221666666666666, 0.8248333333333333, 0.827, 0.8288333333333334, 0.8316666666666667, 0.8334999999999999, 0.8330833333333333, 0.8310833333333333, 0.8300833333333333, 0.8285833333333332, 0.8270833333333332, 0.8264166666666666, 0.8252499999999999, 0.8254166666666667, 0.8252499999999999, 0.8265833333333333, 0.8296666666666666, 0.8328333333333333, 0.8351666666666666, 0.8365, 0.8375, 0.8363333333333334, 0.836, 0.8346666666666666, 0.8331666666666667, 0.8313333333333333, 0.8286666666666667, 0.8243333333333333, 0.8088333333333333, 0.7909999999999999, 0.7741666666666667, 0.7601666666666667, 0.7470000000000001, 0.7355, 0.7248333333333333, 0.7144999999999999, 0.7060000000000001, 0.701, 0.7083333333333333, 0.7198333333333332, 0.7311666666666666, 0.7423333333333333, 0.7526666666666666, 0.7621666666666667, 0.7703333333333334, 0.7778333333333334, 0.7843333333333333, 0.7858333333333334, 0.7846666666666666, 0.7826666666666666, 0.7796666666666666, 0.7756666666666666, 0.7708333333333333, 0.766, 0.7618333333333334, 0.7568333333333334, 0.7518333333333332, 0.7508333333333332, 0.7509999999999999, 0.751, 0.752, 0.7525000000000001, 0.7531666666666667, 0.7523333333333333, 0.7518333333333332, 0.7528333333333332, 0.7588333333333332, 0.7641666666666667, 0.7686666666666667, 0.7728333333333334, 0.7758333333333333, 0.7793333333333333, 0.7828333333333333, 0.7871666666666667, 0.7911666666666667, 0.7941666666666667, 0.792, 0.7903333333333333, 0.7896666666666666, 0.7896666666666667, 0.7896666666666667, 0.7896666666666667, 0.7885000000000001, 0.7865, 0.7866666666666667, 0.7871666666666667, 0.7875, 0.7868333333333334, 0.7861666666666667, 0.786, 0.7858333333333334, 0.7858333333333334, 0.787, 0.7895000000000001, 0.7896666666666666, 0.7881666666666667, 0.7873333333333333, 0.7881666666666667, 0.7898333333333334, 0.7908333333333333, 0.7918333333333334, 0.7926666666666666, 0.7938333333333334, 0.7953333333333333, 0.7969999999999999, 0.7998333333333333, 0.8006666666666666, 0.8008333333333333, 0.8005000000000001, 0.7998333333333334, 0.8, 0.7996666666666667, 0.7995, 0.7994999999999999, 0.7975, 0.7961666666666666, 0.7968333333333334, 0.7983333333333332, 0.7996666666666666, 0.8013333333333333, 0.8023333333333333, 0.8033333333333333, 0.8043333333333335, 0.8051666666666668, 0.8081666666666667, 0.8113333333333334, 0.8137333333333334, 0.8149000000000001, 0.8160666666666667, 0.8167333333333333, 0.8169000000000001, 0.8183999999999999, 0.8200666666666667, 0.8207333333333334, 0.8207333333333333, 0.8207333333333334, 0.8213333333333332, 0.8223333333333332, 0.8231666666666666, 0.8244999999999999, 0.8263333333333331, 0.827, 0.8268333333333333, 0.8266666666666665, 0.8256666666666665, 0.8236666666666667, 0.8215, 0.8188333333333333, 0.8160000000000001, 0.8140000000000001, 0.8119999999999999, 0.8108333333333334, 0.8101666666666667, 0.8105, 0.8119999999999999, 0.8130000000000001, 0.8138333333333334, 0.8156666666666667, 0.8185, 0.82, 0.8216666666666667, 0.8213333333333332, 0.8215, 0.8215, 0.8216666666666667, 0.8223333333333332, 0.8218333333333334, 0.8186666666666668, 0.8136666666666666, 0.808, 0.8018333333333334, 0.7965, 0.7901666666666667, 0.7838333333333334, 0.7768333333333334, 0.7695000000000001, 0.7635, 0.7605000000000001, 0.7583333333333333, 0.7569999999999999, 0.7565, 0.7565, 0.7566666666666666, 0.7561666666666665, 0.7565, 0.7583333333333332, 0.7638333333333333, 0.7693333333333332, 0.7738333333333334, 0.7786666666666666, 0.7836666666666667, 0.7895, 0.7956666666666667, 0.8023333333333333, 0.8078333333333333, 0.8116666666666668, 0.8121666666666666, 0.8118333333333334, 0.8125, 0.8131666666666668, 0.8125, 0.8116666666666668, 0.8105, 0.8106666666666668, 0.8125952380952383, 0.8152619047619047, 0.8164285714285715, 0.8155952380952382, 0.814595238095238, 0.8132619047619046, 0.8129285714285714, 0.812095238095238, 0.8114285714285714, 0.8090952380952381, 0.8056666666666665, 0.8019999999999999, 0.7991666666666666, 0.7979999999999998, 0.7968333333333333, 0.7961666666666667, 0.7951666666666667, 0.7946666666666667, 0.7945, 0.7946666666666666, 0.7951666666666666, 0.7969999999999999, 0.8004999999999999, 0.8045, 0.8094999999999999, 0.8138333333333334, 0.8183333333333334, 0.8218333333333334, 0.8246666666666667, 0.8273333333333334, 0.8291666666666666, 0.8306666666666667, 0.8293333333333333, 0.8285, 0.8276666666666668, 0.8273333333333334, 0.8263333333333331, 0.8263333333333331, 0.8271666666666666, 0.8286666666666667, 0.8305, 0.8324999999999999, 0.8350833333333334, 0.8357499999999998, 0.8365833333333332, 0.83675, 0.83825, 0.8384166666666666, 0.8387499999999999, 0.8382499999999998, 0.8380833333333333, 0.8370833333333332, 0.8366666666666667, 0.8371666666666666, 0.8364999999999998, 0.8358333333333332, 0.834, 0.8326666666666667, 0.8295, 0.8288333333333332, 0.828, 0.827, 0.8261666666666667, 0.8234999999999999, 0.8208333333333332, 0.8193333333333334, 0.8173333333333334, 0.8161666666666667, 0.8164999999999999, 0.8153333333333332, 0.8135, 0.8123333333333334, 0.8115, 0.8128333333333334, 0.8151666666666667, 0.8161666666666667, 0.8186666666666665, 0.82, 0.8213333333333332, 0.8225, 0.8241666666666667, 0.8253333333333334, 0.8261666666666665, 0.8271666666666666, 0.8268333333333333, 0.8273333333333334, 0.827, 0.827, 0.8271666666666666, 0.8271666666666666, 0.827, 0.827, 0.8276666666666666, 0.8285, 0.8303333333333333, 0.8328333333333333, 0.836, 0.8399999999999999, 0.8438333333333332, 0.8473333333333333, 0.8504999999999999, 0.8541666666666666, 0.8496666666666666, 0.8443333333333334, 0.8394999999999999, 0.8346666666666668, 0.8301666666666666, 0.8258333333333334, 0.8216666666666667, 0.8181666666666667, 0.8161666666666667, 0.8140000000000001, 0.8201666666666666, 0.8275, 0.8343333333333334, 0.8406666666666667, 0.8441666666666666, 0.8476666666666667, 0.8516666666666666, 0.8536666666666667, 0.8529523809523809, 0.8524523809523809, 0.8516190476190475, 0.8504523809523808, 0.8481190476190476, 0.8466190476190475, 0.8467857142857141, 0.8456190476190475, 0.8439523809523809, 0.8436190476190475, 0.8443333333333334, 0.8451666666666666, 0.8456666666666666, 0.8458333333333334, 0.8468333333333333, 0.8466666666666665, 0.8471666666666666, 0.8491666666666665, 0.8504999999999999, 0.8518333333333332, 0.8535, 0.8546666666666665, 0.8543333333333333, 0.8533333333333333, 0.8523333333333334, 0.852, 0.8513333333333334, 0.8494999999999999, 0.8484999999999999, 0.8481666666666665, 0.8484999999999999, 0.8474999999999999, 0.8484999999999999, 0.8498333333333333, 0.851, 0.8515, 0.8521666666666666, 0.8533333333333333, 0.8528333333333332, 0.8483333333333333, 0.8443333333333334, 0.8426666666666666, 0.8396666666666667, 0.8173333333333334, 0.7851666666666667, 0.7547666666666667, 0.7261, 0.6982666666666667, 0.6731, 0.6524333333333333, 0.6321, 0.6117666666666667, 0.5934333333333333, 0.5959333333333333, 0.6096, 0.6228333333333333, 0.6356666666666666, 0.6486666666666666, 0.6621666666666667, 0.6791666666666666, 0.6991666666666667, 0.7190000000000001, 0.7384999999999999, 0.7578333333333334, 0.776, 0.7931666666666667, 0.8086666666666666, 0.8216666666666667, 0.8318333333333333, 0.8373333333333332, 0.8386666666666664, 0.8378333333333332, 0.8361666666666666, 0.8333333333333333, 0.8311666666666667, 0.8291666666666666, 0.8278333333333332, 0.8283333333333331, 0.8288333333333334, 0.8296666666666667, 0.8305, 0.8324999999999999, 0.8351666666666666, 0.8378333333333332, 0.8396666666666667, 0.8408333333333333, 0.8413333333333333, 0.8408333333333333, 0.8413333333333333, 0.8416666666666666, 0.8423333333333332, 0.8426666666666666, 0.8421666666666665, 0.8418333333333333, 0.841, 0.8396666666666667, 0.8381666666666666, 0.8371666666666666, 0.8351666666666666, 0.833, 0.8305, 0.828, 0.8251666666666667, 0.8011666666666667, 0.7711666666666667, 0.7435, 0.7175, 0.6926666666666665, 0.6693333333333332, 0.6471666666666667, 0.626, 0.606, 0.5871666666666667, 0.5903333333333334, 0.6013333333333333, 0.6118333333333333, 0.6218333333333333, 0.6313333333333333, 0.6418333333333333, 0.6556666666666666, 0.6719999999999999, 0.6881666666666667, 0.7043333333333333, 0.7205, 0.7361666666666666, 0.7515, 0.7668333333333333, 0.7816666666666666, 0.7946666666666666, 0.8033333333333333, 0.8089999999999999, 0.8140000000000001, 0.8188333333333333, 0.8233333333333333, 0.828, 0.8320000000000001, 0.8361666666666666, 0.8401666666666665, 0.8444999999999998, 0.8483333333333334, 0.8521666666666666, 0.8559999999999999, 0.8581666666666667, 0.8586666666666666, 0.8586666666666666, 0.859, 0.8583333333333332, 0.8581666666666667, 0.8576666666666666, 0.8571666666666665, 0.8563333333333333, 0.8546666666666667, 0.8548333333333332, 0.8556666666666667, 0.8566666666666667, 0.8576666666666666, 0.8593333333333334, 0.8605, 0.8608333333333332, 0.8621666666666666, 0.8633333333333333, 0.865, 0.8653333333333333, 0.8658333333333333, 0.8653333333333333, 0.8644999999999999, 0.8629999999999999, 0.861, 0.8594999999999999, 0.8564999999999999, 0.8539999999999999, 0.8521666666666665, 0.8503333333333332, 0.8491666666666665, 0.8481666666666665, 0.8471666666666666, 0.8466666666666665, 0.8466666666666665, 0.8469999999999999, 0.8489999999999999, 0.8514999999999999, 0.853, 0.8550000000000001, 0.8545, 0.8533333333333333, 0.8526666666666667, 0.8521666666666666, 0.8513333333333334, 0.8511666666666666, 0.8501666666666667, 0.8483333333333333, 0.8473333333333333, 0.8465, 0.8481666666666665, 0.8513333333333334, 0.8531666666666666, 0.8541666666666666, 0.8563333333333333, 0.8583333333333332, 0.8605, 0.8623333333333333, 0.8633333333333333, 0.8613333333333333, 0.8584999999999999, 0.8551666666666666, 0.8524999999999998, 0.8508333333333333, 0.8478333333333333, 0.8448333333333332, 0.841, 0.8371666666666666, 0.8336666666666666, 0.834, 0.8348333333333333, 0.8346666666666666, 0.8336666666666666, 0.8324999999999999, 0.8313333333333333, 0.8293333333333333, 0.8273333333333334, 0.8251666666666667, 0.8234999999999999, 0.8253333333333334, 0.829, 0.8336666666666666, 0.8386666666666667, 0.8436666666666668, 0.8486666666666667, 0.8536666666666667, 0.8591666666666666, 0.8653333333333333, 0.8713333333333333, 0.8724999999999999, 0.8718333333333333, 0.8709999999999999, 0.8711666666666666, 0.8706666666666667, 0.8705, 0.8698333333333332, 0.8693333333333333, 0.8671666666666666, 0.8648333333333333, 0.8621666666666666, 0.8571666666666665, 0.8523333333333334, 0.8481666666666665, 0.8436666666666666, 0.8401666666666665, 0.8373333333333333, 0.834, 0.8306666666666667, 0.8275, 0.8261666666666667, 0.8258333333333333, 0.8261666666666667, 0.8246666666666667, 0.8240000000000001, 0.8218333333333334, 0.8196666666666668, 0.8176666666666665, 0.8156666666666667, 0.8136666666666666, 0.8111666666666666, 0.812, 0.8126666666666666, 0.8140000000000001, 0.8150000000000001, 0.8155000000000001, 0.8161666666666667, 0.817, 0.8186666666666665, 0.8191666666666666, 0.8196666666666665, 0.819074074074074, 0.8177083333333333, 0.816190476190476, 0.8138888888888888] + +import time +import numpy as np +from dbmind.common.types.sequence import Sequence +from dbmind.common.algorithm.forecasting.forcasting_algorithm import quickly_forecast, \ + sequence_interpolate +from dbmind.common.algorithm.statistics import trim_head_and_tail_nan + + +def do_arima_forecast_show(origin_data, forecast_and_history_data, p=0, d=0, q=0): + try: + import matplotlib.pyplot as plt + fig = plt.figure(figsize=(16, 9)) + ax1 = fig.add_subplot(311) + + # forecast_data/data + ax1.plot(forecast_and_history_data, color="red", linestyle='--', + label="forecast_data:(p:%s, d:%s, q:%s)" + % (p, d, q)) + ax1.plot(origin_data, label="raw_data") + ax1.legend() + plt.show() + except Exception: + pass + + +def test_arima_for_linear(): + data = DATA[2000:5000] + data = list(range(3000)) + train_length = 2100 + forecast_length = 900 + train_data = data[:train_length] + s = Sequence(timestamps=list(range(len(data[:train_length]))), values=train_data, step=1000) + forecasting_minutes = forecast_length * s.step // (60 * 1000) + forecast_sequence = quickly_forecast(sequence=s, forecasting_minutes=forecasting_minutes) + do_arima_forecast_show(origin_data=data[:forecast_length + train_length], + forecast_and_history_data=list(data[:train_length]) + list(forecast_sequence.values)) + assert True + + +def test_arima_for_nolinear(): + data = DATA[2000:5000] + train_length = 2100 + forecast_length = 900 + train_data = data[:train_length] + s = Sequence(timestamps=list(range(len(train_data))), values=train_data, step=1000) + forecasting_minutes = forecast_length * s.step // (60 * 1000) + forecast_sequence = quickly_forecast(sequence=s, forecasting_minutes=forecasting_minutes) + do_arima_forecast_show(origin_data=data[:forecast_length + train_length], + forecast_and_history_data=list(data[:train_length]) + list(forecast_sequence.values)) + assert True + + +def test_arima_for_seasonal(): + forecast_length = 1000 + train_length = 8000 + train_data = seasonal_data[:train_length] + s = Sequence(timestamps=list(range(len(train_data))), values=train_data, step=600) + forecasting_minutes = forecast_length * s.step // (60 * 1000) + forecast_sequence = quickly_forecast(sequence=s, forecasting_minutes=forecasting_minutes) + do_arima_forecast_show(origin_data=seasonal_data[:forecast_length + train_length], + forecast_and_history_data=list(seasonal_data[:train_length]) + list(forecast_sequence.values)) + assert True + + +def test_sequence_interpolate(): + start_time_array = time.strptime("2021-12-20 00:00:00", "%Y-%m-%d %H:%M:%S") + start_timestamp = int(time.mktime(start_time_array)) * 1000 + interval = 60 * 1000 + head_nan_data = [np.nan, np.nan, np.nan, np.nan] + list(range(1, 100)) + tail_nan_data = list(range(1, 100)) + [np.nan, np.nan, np.nan, np.nan] + head_tail_nan_data = head_nan_data + tail_nan_data + all_nan_data = [np.nan] * 100 + internal_nan_data = head_nan_data + [np.nan] * 10 + tail_nan_data + invalid_data = [all_nan_data, ] + test_data = [head_nan_data, tail_nan_data, head_tail_nan_data, all_nan_data, internal_nan_data] + except_count = 0 + for t_data in test_data: + try: + timestamps = [start_timestamp + interval * (i + 1) for i in range(len(t_data))] + s = Sequence(timestamps=timestamps, values=t_data) + s_new = sequence_interpolate(s) + not_exist_nan = all([True if not np.isnan(i) else False for i in s_new.values]) + assert not_exist_nan + + except Exception: + except_count += 1 + continue + assert except_count == len(invalid_data) + + +def test_trim_head_and_tail_nan(): + test_data = [np.nan, 1, 2, 3, np.nan] + trim_data = trim_head_and_tail_nan(test_data) + assert trim_data[0] != np.nan + assert trim_data[-1] != np.nan + test_data = [] + trim_data = trim_head_and_tail_nan(test_data) + assert len(trim_data) == 0 + + test_data = [np.nan, np.nan, np.nan] + trim_data = trim_head_and_tail_nan(test_data) + assert len(trim_data) == len(test_data) + assert len(trim_data) != 0 + assert trim_data[0] == 0 + assert trim_data[-1] == 0 diff --git a/src/gausskernel/dbmind/tools/tests/test_basic_algorithms.py b/src/gausskernel/dbmind/tools/tests/test_basic_algorithms.py new file mode 100644 index 000000000..c6bc05176 --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_basic_algorithms.py @@ -0,0 +1,43 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from dbmind.common.algorithm import basic + +s1 = (1, 3, 3, 4, 10, 11, 15, 17, 20, 20, 20, 21, 21, 22) +s2 = (1, 2, 2, 2, 3) + + +def test_binary_search(): + for s in (s1, s2): + for v in s: + assert s[basic.binary_search(s, v)] == v + + +def test_binary_search_left(): + assert basic.binary_search_left(s1, 1) == 0 + assert basic.binary_search_left(s1, 20) == 8 + assert basic.binary_search_left(s1, 22) == len(s1) - 1 + assert basic.binary_search_left(s1, 0) == -1 + assert basic.binary_search_left(s1, 2222) == -1 + + assert basic.how_many_lesser_elements(s1, 0) == 0 + assert basic.how_many_lesser_elements(s1, 1) == 0 + assert basic.how_many_lesser_elements(s1, 3) == 1 + assert basic.how_many_lesser_elements(s1, 4) == 3 + assert basic.how_many_lesser_elements(s1, 10) == 4 + assert basic.how_many_lesser_elements(s1, 100) == len(s1) + + start = basic.how_many_lesser_elements(s1, 17) + end = basic.how_many_lesser_elements(s1, 22) + length = end - start + 1 + assert length == 7 + diff --git a/src/gausskernel/dbmind/tools/tests/test_daemon.py b/src/gausskernel/dbmind/tools/tests/test_daemon.py new file mode 100644 index 000000000..21005f2ce --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_daemon.py @@ -0,0 +1,104 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import multiprocessing +import os +import shutil +import time + +import dbmind.common.process +from dbmind.common.daemon import Daemon +from dbmind.common.platform import WIN32 + +BASEPATH = os.path.abspath(os.path.dirname(__file__)) +PID_NAME = 'tester.pid' + + +def test_process(): + proc = dbmind.common.process.Process(os.getpid()) + assert proc.alive is True + assert 'python' in proc.path + assert os.path.samefile(proc.cwd, os.getcwd()) + + +class DaemonTester(Daemon): + def __init__(self, q, pid_file): + self.q = q + super().__init__(pid_file) + + def clean(self): + self.q.put(3) + print('I am cleaning garbage and closing resources.') + + def run(self): + working_dir = os.path.dirname(self.pid_file) + os.chdir(working_dir) + self.q.put(2) + while True: + # blocking + print('I am running.') + time.sleep(1) + + def start(self): + self.q.put(1) + super().start() + + +def test_daemon(): + old_path = os.getcwd() + working_dir = os.path.join(BASEPATH, 'tmp') + PID_PATH = os.path.join(working_dir, PID_NAME) + os.makedirs(working_dir, exist_ok=True) + os.chdir(working_dir) + q = multiprocessing.Queue() + + start_one = DaemonTester(q, PID_PATH) + stop_one = DaemonTester(None, PID_PATH) + + process = multiprocessing.Process(target=start_one.start) + process.start() + start_success = q.get() + assert start_success == 1 + # run_success = q.get() + # assert run_success == 2 + assert process.pid != os.getpid() + + while not os.path.exists(PID_PATH): + time.sleep(0.1) + + file_pid = 0 + with open(PID_PATH) as fp: + file_pid = int(fp.readline().strip()) + + running_proc = dbmind.common.process.Process(file_pid) + assert running_proc.alive + assert os.path.samefile(running_proc.cwd, os.getcwd()) + + if WIN32: + # There is a bug on Windows, + # which raises segmentfault while calling WIN32 API. + # So we have to bypass the stop() test case. + process.terminate() + process.join() + assert not process.is_alive() + else: + stop_one.stop() + assert not running_proc.alive + + q.close() + + try: + shutil.rmtree(working_dir) + except: + pass + os.chdir(old_path) diff --git a/src/gausskernel/dbmind/tools/tests/test_dai.py b/src/gausskernel/dbmind/tools/tests/test_dai.py new file mode 100644 index 000000000..4ed641402 --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_dai.py @@ -0,0 +1,99 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import configparser + +from dbmind import global_vars +from dbmind.common.tsdb import TsdbClientFactory +from dbmind.common.types import RootCause +from dbmind.common.types import Sequence +from dbmind.common.types import SlowQuery +from dbmind.metadatabase import create_metadatabase_schema +from dbmind.service import dai +from dbmind.service.dai import SequenceUtils, DISTINGUISHING_INSTANCE_LABEL + +configs = configparser.ConfigParser() +configs.add_section('TSDB') +configs.set('TSDB', 'name', 'prometheus') +configs.set('TSDB', 'host', '10.90.56.172') # TODO: CHANGE or IGNORE +configs.set('TSDB', 'port', '9090') +configs.add_section('METADATABASE') +configs.set('METADATABASE', 'dbtype', 'sqlite') +configs.set('METADATABASE', 'host', '') +configs.set('METADATABASE', 'port', '') +configs.set('METADATABASE', 'username', '') +configs.set('METADATABASE', 'password', '') +configs.set('METADATABASE', 'database', 'test_metadatabase.db') +global_vars.configs = configs +global_vars.must_filter_labels = {} +golden_kpi = ('os_cpu_usage', 'os_mem_usage', + 'gaussdb_qps_by_instance', 'gaussdb_dynamic_used_memory') + +create_metadatabase_schema() + + +def test_range_metrics(): + minutes = 10 + dai.get_latest_metric_sequence('pg_boot_time', minutes).fetchall() + for metric in golden_kpi: + results = dai.get_latest_metric_sequence(metric, minutes).fetchall() + for sequence in results: + assert sequence.name in golden_kpi + assert sequence.length > 0 + host = SequenceUtils.from_server(sequence) + assert host is not None and host != '' + + +def test_tsdb(): + for metric in golden_kpi: + results = TsdbClientFactory.get_tsdb_client().get_current_metric_value( + metric_name=metric + ) + + for sequence in results: + assert isinstance(sequence, Sequence) + + from_instance = SequenceUtils.from_server(sequence) + + inner_results = TsdbClientFactory.get_tsdb_client().get_metric_range_data( + metric_name=metric, + label_config={DISTINGUISHING_INSTANCE_LABEL: from_instance}, + params={'step': '30s'} + ) + for inner_result in inner_results: + assert inner_result.name == metric + assert len(inner_result) > 0 + + +def test_save_xxx(): + host = '127.0.0.1' + metric_name = 'test_metric' + + sequence = Sequence(tuple(range(0, 100)), tuple(range(100, 200))) + dai.save_forecast_sequence(host, metric_name, sequence) + + slow_query = SlowQuery( + db_host='10.90.5.172', + db_port=1234, + schema_name='test_schema', + db_name='test_db', + query='select sleep(100);', + start_timestamp=1000, + duration_time=2, + hit_rate=0.90, + fetch_rate=1000, + cpu_time=100, + data_io_time=100 + ) + slow_query.add_cause(RootCause.get('LOCK_CONTENTION')) + dai.save_slow_queries([slow_query, slow_query, slow_query]) + diff --git a/src/gausskernel/dbmind/tools/tests/test_detection.py b/src/gausskernel/dbmind/tools/tests/test_detection.py new file mode 100644 index 000000000..cc7d9775c --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_detection.py @@ -0,0 +1,63 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from sklearn.svm import SVR + +from dbmind.common.algorithm.forecasting.simple_forecasting import SupervisedModel +from dbmind.common.algorithm.forecasting import ForecastingFactory +from dbmind.common.types import Sequence + + +linear_seq = Sequence(tuple(range(1, 10)), tuple(range(1, 10))) + + +def roughly_compare(list1, list2, threshold=1): + if len(list1) != len(list2): + return False + for v1, v2 in zip(list1, list2): + if abs(v1 - v2) > threshold: + return False + return True + + +def test_linear_regression(): + linear = ForecastingFactory.get_instance('linear') + linear.fit(linear_seq) + result = linear.forecast(10) + assert result.length == 10 + timestamps, values = result.to_2d_array() + assert tuple(timestamps) == tuple(range(10, 20)) + assert roughly_compare(values, range(10, 20)) + + assert ForecastingFactory.get_instance('linear') is linear + + +def test_supervised_linear_regression(): + linear = SupervisedModel() + linear.fit(linear_seq) + result = linear.forecast(10) + assert result.length == 10 + timestamps, values = result.to_2d_array() + assert tuple(timestamps) == tuple(range(9, 19)) # different from SimpleLinearFitting. + assert roughly_compare(values, range(9, 19)) + + +def test_supervised_svr(): + # WARNING: the SVR model with nonlinear kernel does not work. + svr = SupervisedModel(SVR(kernel='linear', verbose=True, max_iter=100)) + svr.fit(linear_seq) + result = svr.forecast(10) + assert result.length == 10 + timestamps, values = result.to_2d_array() + assert tuple(timestamps) == tuple(range(9, 19)) # different from SimpleLinearFitting. + assert roughly_compare(values, range(9, 19)) + diff --git a/src/gausskernel/dbmind/tools/tests/test_edbmind.py b/src/gausskernel/dbmind/tools/tests/test_edbmind.py new file mode 100644 index 000000000..658d48948 --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_edbmind.py @@ -0,0 +1,40 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import os +import shutil +from unittest import mock + +from dbmind import constants +from dbmind.cmd import edbmind +from dbmind.common.dispatcher import task_scheduler + + +def mock_setup_directory(confpath): + os.makedirs(confpath, exist_ok=True) + src_confile = os.path.join(constants.MISC_PATH, constants.CONFILE_NAME) + dst_confile = os.path.join(confpath, constants.CONFILE_NAME) + shutil.copyfile(src_confile, dst_confile) + + +def test_startup(): + confpath = 'tmp' + mock_setup_directory(confpath) + task_scheduler.TimedTaskManager.start = mock.Mock() + assert len(task_scheduler.TimedTaskManager.timers) == 0 + _dbmind = edbmind.DBMindMain(confpath) + _dbmind.daemonize = mock.Mock() + _dbmind.run = mock.Mock() + _dbmind.start() + _dbmind.daemonize.assert_called_once_with() + + # TODO: check whether DBMind bound timed app. diff --git a/src/gausskernel/dbmind/tools/tests/test_exporters.py b/src/gausskernel/dbmind/tools/tests/test_exporters.py new file mode 100644 index 000000000..96a9ae1ef --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_exporters.py @@ -0,0 +1,54 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +from unittest import mock + +from psycopg2.extras import RealDictRow + +from dbmind.common.tsdb.prometheus_client import PrometheusClient +from dbmind.common.types.sequence import Sequence +from dbmind.components.opengauss_exporter.core import controller as oe_controller +from dbmind.components.opengauss_exporter.core.main import main as oe_main +from dbmind.components.reprocessing_exporter.core import controller as re_controller +from dbmind.components.reprocessing_exporter.core.main import main as re_main + + +@mock.patch('psycopg2.connect') +def test_opengauss_exporter(mock_connect): + expected = RealDictRow([('fake1', 1), ('fake2', 2)]) + + mock_con = mock_connect.return_value + mock_cur = mock_con.cursor.return_value + mock_cur_cm = mock_cur.__enter__.return_value + mock_cur_cm.fetchall.return_value = expected + + oe_controller.run = mock.MagicMock() + oe_main(['--url', 'postgres://a:b@127.0.0.1:1234/testdb', '--disable-https']) + oe_controller.run.assert_called_once() + + assert oe_controller.query_all_metrics().startswith(b'# HELP') + + +def test_reprocessing_exporter(): + PrometheusClient.custom_query = mock.MagicMock( + return_value=[Sequence((1, 2, 3), (100, 200, 300), + name='os_cpu_usage', + labels={'from_instance': '127.0.0.1'})] + ) + PrometheusClient.check_connection = mock.Mock(return_value=True) + + re_controller.run = mock.MagicMock() + re_main(['127.0.0.1', '1234', '--disable-https']) + re_controller.run.assert_called_once() + + assert re_controller.query_all_metrics().startswith(b'# HELP') diff --git a/src/gausskernel/dbmind/tools/tests/test_index_advisor_workload.py b/src/gausskernel/dbmind/tools/tests/test_index_advisor_workload.py new file mode 100644 index 000000000..43fe0860c --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_index_advisor_workload.py @@ -0,0 +1,462 @@ +""" +Copyright (c) 2021 Huawei Technologies Co.,Ltd. + +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. + +This file contains test cases for workload index advisor. + +""" + +import os +import io +import shlex +import sys +import json +import unittest +from unittest.mock import patch, mock_open +from collections.abc import Iterable +from collections import defaultdict + +import index_advisor_workload as iaw + + +def hash_any(obj): + try: + return hash(obj) + except: + h = 0 + for item in obj: + h = 31 * h + (hash_any(item) & 255) + return h + + +def list_equal(list1, list2): + def is_iterable(L): + return isinstance(L, Iterable) and not isinstance(L, str) + + def is_nested_list(L): + if not is_iterable(L): + return False + return len(L) > 0 and is_iterable(L[0]) + + assert is_iterable(list1) + + list1_copy = sorted(list1, key=hash_any) + list2_copy = sorted(list2, key=hash_any) + for a, b in zip(list1_copy, list2_copy): + if is_nested_list(a): + return list_equal(a, b) + if a != b: + print("False is ", a, b) + return False + + return True + + +class Case: + expected_sql_frequency = {"select * from student_range_part where credit=1 and stu_id='a_1' and stu_name='b__1'": 1, + "select * from student_range_part1 where student_range_part1.stu_id = '12' " + "and student_range_part1.stu_name='b__1' and credit=1": 3, + "select * from student_range_part where stu_id='w_1'": 1, + "select * from student_range_part where stu_name='q__1' and stu_id='w_1'": 1, + 'select * from student_range_part1 where credit=1': 1} + sql_content = """select * from student_range_part where credit=1 and stu_id='a_1' and stu_name='b__1'; +select * from student_range_part1 where student_range_part1.stu_id = '12' and student_range_part1.stu_name='b__1' and credit=1; +select * from student_range_part1 where student_range_part1.stu_id = '12' and student_range_part1.stu_name='b__1' and credit=1; +select * from student_range_part1 where student_range_part1.stu_id = '12' and student_range_part1.stu_name='b__1' and credit=1; +select * from student_range_part where stu_id='w_1'; +select * from student_range_part where stu_name='q__1' and stu_id='w_1'; +select * from student_range_part1 where credit=1; + """ + config1 = [iaw.IndexItem(tbl='test1', cols='a,b', index_type='global'), + iaw.IndexItem(tbl='test1', cols='c', index_type='global'), + iaw.IndexItem(tbl='test1', cols='d', index_type='local'), ] + + config2 = [iaw.IndexItem(tbl='test1', cols='b', index_type='global'), + iaw.IndexItem(tbl='test1', cols='c', index_type='global'), + iaw.IndexItem(tbl='test1', cols='d', index_type='local'), ] + + config3 = [iaw.IndexItem(tbl='test1', cols='a,b', index_type='global'), + iaw.IndexItem(tbl='test1', cols='c', index_type='global'), + iaw.IndexItem(tbl='test1', cols='d', index_type='local'), + iaw.IndexItem(tbl='test1', cols='e', index_type='local')] + + config4 = [iaw.IndexItem(tbl='test1', cols='a,b', index_type='local'), + iaw.IndexItem(tbl='test1', cols='c', index_type='global'), + iaw.IndexItem(tbl='test1', cols='d', index_type='local'), ] + + class QueryItem: + pass + + class IndexItem: + pass + + indexlist = [{'atomic_pos': 0, + 'benefit': 0, + 'columns': 'stu_name, stu_id, credit', + 'delete_sql_num': 0, + 'index_type': 'local', + 'ineffective_pos': [], + 'insert_sql_num': 0, + 'negative_pos': [], + 'positive_pos': None, + 'select_sql_num': 0, + 'storage': 0, + 'table': 'public.student_range_part', + 'total_sql_num': 0, + 'update_sql_num': 0}, + {'atomic_pos': 0, + 'benefit': 0, + 'columns': 'stu_name, credit, stu_id', + 'delete_sql_num': 0, + 'index_type': 'local', + 'ineffective_pos': [], + 'insert_sql_num': 0, + 'negative_pos': [], + 'positive_pos': None, + 'select_sql_num': 0, + 'storage': 0, + 'table': 'public.student_range_part1', + 'total_sql_num': 0, + 'update_sql_num': 0}, + {'atomic_pos': 0, + 'benefit': 0, + 'columns': 'stu_id', + 'delete_sql_num': 0, + 'index_type': 'global', + 'ineffective_pos': [], + 'insert_sql_num': 0, + 'negative_pos': [], + 'positive_pos': None, + 'select_sql_num': 0, + 'storage': 0, + 'table': 'public.student_range_part', + 'total_sql_num': 0, + 'update_sql_num': 0}, + {'atomic_pos': 0, + 'benefit': 0, + 'columns': 'stu_id, stu_name', + 'delete_sql_num': 0, + 'index_type': 'global', + 'ineffective_pos': [], + 'insert_sql_num': 0, + 'negative_pos': [], + 'positive_pos': None, + 'select_sql_num': 0, + 'storage': 0, + 'table': 'public.student_range_part', + 'total_sql_num': 0, + 'update_sql_num': 0}, + {'atomic_pos': 3, + 'benefit': 0, + 'columns': 'stu_id', + 'delete_sql_num': 0, + 'index_type': 'global', + 'ineffective_pos': [0, 3], + 'insert_sql_num': 0, + 'negative_pos': [], + 'positive_pos': [2], + 'select_sql_num': 3.0, + 'storage': 0, + 'table': 'public.student_range_part', + 'total_sql_num': 3.0, + 'update_sql_num': 0}] + queryitemlist = [{'cost_list': [36.0, 8.27, 36.0, 8.27, 8.27], + 'frequency': 1.0, + 'statement': 'select * from student_range_part where credit=1 and ' + "stu_id='a_1' and stu_name='b__1'"}, + {'cost_list': [24.81, 24.81, 24.81, 24.81, 24.81], + 'frequency': 3.0, + 'statement': 'select * from student_range_part1 where ' + "student_range_part1.stu_id = '12' and " + "student_range_part1.stu_name='b__1' and credit=1", }, + {'cost_list': [41.0, 41.0, 41.0, 8.27, 8.27], + 'frequency': 1.0, + 'statement': "select * from student_range_part where stu_id='w_1'"}, + {'cost_list': [46.0, 8.27, 46.0, 8.27, 8.27], + 'frequency': 1.0, + 'statement': "select * from student_range_part where stu_name='q__1' and " + "stu_id='w_1'", }, + {'cost_list': [8.27, 8.27, 8.27, 8.27, 8.27], + 'frequency': 1.0, + 'statement': 'select * from student_range_part1 where credit=1'}] + costlist = [156.08, 90.61999999999999, 156.08, + 57.889999999999986, 57.889999999999986] + display_info = {'recommendIndexes': [], 'workloadCount': 7} + workload = [] + candidate_indexes = [] + candidate_index_list = [{'atomic_pos': 3, + 'benefit': 0, + 'columns': 'stu_id', + 'delete_sql_num': 0, + 'index_type': 'global', + 'ineffective_pos': [0, 3], + 'insert_sql_num': 0, + 'negative_pos': [], + 'positive_pos': [2], + 'select_sql_num': 3.0, + 'storage': 0, + 'table': 'public.student_range_part', + 'total_sql_num': 3.0, + 'update_sql_num': 0}] + for index in candidate_index_list: + candidate_index = IndexItem() + for attr, value in index.items(): + setattr(candidate_index, attr, value) + candidate_indexes.append(candidate_index) + + for query, index in zip(queryitemlist, indexlist): + queryitem = QueryItem() + indexitem = IndexItem() + for attr, value in query.items(): + setattr(queryitem, attr, value) + for attr, value in index.items(): + setattr(indexitem, attr, value) + queryitem.index_list = [indexitem] + workload.append(queryitem) + + +class IndexAdvisorTester(unittest.TestCase): + + def test_get_indexable_columns(self): + tables = 'table1 table2 table2 table3 table3 table3'.split() + columns = 'col1,col2 col2 col3 col1,col2 col2,col3 col2,col5'.split() + index_types = 'global local global global local local'.split() + table_index_dict = defaultdict(list) + for table, column, index_type in zip(tables, columns, index_types): + table_index_dict[table].append((column, index_type)) + expected_query_indexable_columns = {'table1': [('col1', 'global'), ('col2', 'global')], + 'table2': [('col2', 'local'), ('col3', 'global')], + 'table3': [('col1', 'global'), ('col2', 'global'), ('col2', 'local'), + ('col3', 'local'), ('col5', 'local')] + } + query_indexable_columns = iaw.get_indexable_columns(table_index_dict) + self.maxDiff = None + for table in expected_query_indexable_columns: + self.assertTrue(list_equal( + expected_query_indexable_columns[table], query_indexable_columns[table])) + + def test_generate_atomic_config(self): + queryitem1 = iaw.QueryItem('test', 0) + queryitem2 = iaw.QueryItem('test', 0) + queryitem3 = iaw.QueryItem('test', 0) + queryitem4 = iaw.QueryItem('test', 0) + queryitem1.valid_index_list = [iaw.IndexItem('table1', 'col1,col2', index_type='local'), + iaw.IndexItem( + 'table2', 'col1,col3', index_type='global'), + iaw.IndexItem( + 'table3', 'col1,col3', index_type='global') + ] + queryitem2.valid_index_list = [iaw.IndexItem('table1', 'col1,col2', index_type='local'), + iaw.IndexItem( + 'table1', 'col2,col3', index_type='global'), + ] + queryitem3.valid_index_list = [iaw.IndexItem('table4', 'col1,col2', index_type=''), + iaw.IndexItem( + 'table4', 'col3', index_type=''), + ] + queryitem4.valid_index_list = [] + atomic_config_total = iaw.generate_atomic_config( + [queryitem1, queryitem2, queryitem3, queryitem4]) + table_combinations_list = [] + cols_combinations_list = [] + index_type_combinations_list = [] + for combinations in atomic_config_total: + table_combination = [] + cols_combination = [] + index_type_combination = [] + for indexitem in combinations: + table_combination.append(indexitem.table) + cols_combination.append(indexitem.columns) + index_type_combination.append(indexitem.index_type) + table_combinations_list.append(table_combination) + cols_combinations_list.append(cols_combination) + index_type_combinations_list.append(index_type_combination) + expected_table_comination_list = [[], ['table1'], ['table2'], ['table3'], ['table1', 'table2'], + ['table1', 'table3'], ['table2', 'table3'], [ + 'table1', 'table2', 'table3'], + ['table1'], ['table1', 'table1'], [ + 'table4'], ['table4'], ['table4', 'table4'] + ] + expected_cols_combinations_list = [[], ['col1,col2'], ['col1,col3'], ['col1,col3'], ['col1,col2', 'col1,col3'], + ['col1,col2', 'col1,col3'], [ + 'col1,col3', 'col1,col3'], + ['col1,col2', 'col1,col3', 'col1,col3'], [ + 'col2,col3'], ['col1,col2', 'col2,col3'], + ['col1,col2'], ['col3'], [ + 'col1,col2', 'col3'] + ] + expected_index_type_combinations_list = [[], ['local'], ['global'], ['global'], ['local', 'global'], ['local', 'global'], + ['global', 'global'], [ + 'local', 'global', 'global'], ['global'], + ['local', 'global'], [ + ''], [''], ['', ''] + ] + self.maxDiff = None + self.assertTrue(list_equal(table_combinations_list, + expected_table_comination_list)) + self.assertTrue(list_equal(cols_combinations_list, + expected_cols_combinations_list)) + self.assertTrue(list_equal(index_type_combinations_list, + expected_index_type_combinations_list)) + + def test_find_subsets_num(self): + # not contain new index config + with self.assertRaises(ValueError): + iaw.find_subsets_num(Case.config1, [[], Case.config1]) + atomic_subsets_num, cur_index_atomic_pos = iaw.find_subsets_num([Case.config1[0]], + [[Case.config1[0]], Case.config1, Case.config2, Case.config4]) + expected_atomic_subsets_num = [0] + expected_cur_index_atomic_pos = 0 + self.assertEqual((expected_atomic_subsets_num, expected_cur_index_atomic_pos), + (atomic_subsets_num, cur_index_atomic_pos)) + atomic_subsets_num, cur_index_atomic_pos = iaw.find_subsets_num(Case.config3, + [[Case.config3[-1]], Case.config1, Case.config3]) + expected_atomic_subsets_num = [0, 2] + expected_cur_index_atomic_pos = 0 + self.assertEqual((expected_atomic_subsets_num, expected_cur_index_atomic_pos), + (atomic_subsets_num, cur_index_atomic_pos)) + # Case.config3 and Case.config4 containing same index with different index_type + atomic_subsets_num, cur_index_atomic_pos = \ + iaw.find_subsets_num( + Case.config3, [[Case.config3[-1]], Case.config1, Case.config3, Case.config4]) + expected_atomic_subsets_num = [0, 2] + expected_cur_index_atomic_pos = 0 + self.assertEqual((expected_atomic_subsets_num, expected_cur_index_atomic_pos), + (atomic_subsets_num, cur_index_atomic_pos)) + + def test_is_same_config(self): + self.assertTrue(iaw.is_same_config(Case.config1, Case.config1)) + self.assertFalse(iaw.is_same_config(Case.config1, Case.config2)) + self.assertFalse(iaw.is_same_config(Case.config1, Case.config3)) + self.assertFalse(iaw.is_same_config(Case.config1, Case.config4)) + + def test_load_workload(self): + with patch('index_advisor_workload.open', mock_open(read_data=Case.sql_content)) as m: + workload = iaw.load_workload('testfile') + sql_frequency = [] + for item in workload: + sql_frequency.append((item.statement, item.frequency)) + self.maxDiff = True + self.assertDictEqual(dict(sql_frequency), Case.expected_sql_frequency) + + def test_get_workload_template(self): + workload = [] + for sql, frequency in Case.expected_sql_frequency.items(): + workload.append(iaw.QueryItem(sql, frequency)) + expected_templates = {'select * from student_range_part where credi@@@ and stu_id@@@ and stu_name@@@': + {'cnt': 1, 'samples': ["select * from student_range_part where credit=1 " + "and stu_id='a_1' and stu_name='b__1'"]}, + + 'select * from student_range_part1 where student_range_part1.stu_id =@@@ ' + 'and student_range_part1.stu_name@@@ and credi@@@': + {'cnt': 3, 'samples': ["select * from student_range_part1 where " + "student_range_part1.stu_id = '12' and student_range_part1.stu_name='b__1' and credit=1"]}, + 'select * from student_range_part where stu_id@@@': + {'cnt': 1, 'samples': [ + "select * from student_range_part where stu_id='w_1'"]}, + 'select * from student_range_part where stu_name@@@ and stu_id@@@': + {'cnt': 1, 'samples': ["select * from student_range_part where stu_name='q__1' " + "and stu_id='w_1'"]}, + 'select * from student_range_part1 where credi@@@': + {'cnt': 1, 'samples': ['select * from student_range_part1 where credit=1']}} + templates = iaw.get_workload_template(workload) + self.assertDictEqual(templates, expected_templates) + + def test_workload_compression(self): + with patch('index_advisor_workload.open', mock_open(read_data=Case.sql_content)) as m: + compressed_workload, total_num = iaw.workload_compression('test') + expected_total_num = 7 + self.assertEqual(total_num, expected_total_num) + + def test_parse_plan_cost(self): + planlist = ['Partition Iterator (cost=0.00..36.00 rows=1 width=19)', + 'Index Scan using student_range_part1_credit_tableoid_idx on student_range_part1 ' + ' (cost=0.00..8.27 rows=1 width=19)', + 'Index Scan using <142374>btree_global_student_range_part_stu_id on student_range_part' + ' (cost=0.00..8.27 rows=1 width=19)', + 'Partition Iterator (cost=0.00..36.00 rows=1 width=19)', + 'Partition Iterator (cost=0.00..8.27 rows=1 width=19)'] + expected_costlist = [36, 8.27, 8.27, 36.00, 8.27] + for subplan, expected_cost in zip(planlist, expected_costlist): + self.assertEqual( + expected_cost, iaw.GSqlExecute.parse_plan_cost(subplan)) + + def test_record_info(self): + expected_recommend = {'columns': 'stu_id', + 'deleteRatio': 0.0, + 'dmlCount': 3, + 'index_type': 'global', + 'insertRatio': 0.0, + 'schemaName': 'public', + 'selectRatio': 100.0, + 'sqlDetails': [{'correlationType': 0, + 'sql': 'select * from student_range_part where credit=1 and ' + "stu_id='a_1' and stu_name='b__1'", + 'sqlCount': 2, + 'sqlTemplate': 'select * from student_range_part where ' + 'credit? and stu_id=? and stu_name=?'}, + {'correlationType': 0, + 'sql': 'select * from student_range_part where ' + "stu_name='q__1' and stu_id='w_1'", + 'sqlCount': 2, + 'sqlTemplate': 'select * from student_range_part where ' + 'stu_name=? and stu_id=?'}, + {'correlationType': 1, + 'optimized': '3.958', + 'sql': "select * from student_range_part where stu_id='w_1'", + 'sqlCount': 1, + 'sqlTemplate': 'select * from student_range_part where ' + 'stu_id=?'}], + 'statement': 'CREATE INDEX idx_student_range_part_global_stu_id ON ' + 'public.student_range_part(stu_id) global;', + 'tbName': 'student_range_part', + 'updateRatio': 0.0, + 'workloadOptimized': '62.91'} + sql_info = dict() + advisor = iaw.IndexAdvisor('db', Case.workload, False) + advisor.display_detail_info = dict() + advisor.display_detail_info['recommendIndexes'] = [] + advisor.index_cost_total = Case.costlist + advisor.record_info(Case.candidate_indexes[0], + sql_info, 3, 'student_range_part', + 'CREATE INDEX idx_student_range_part_global_stu_id ON ' + 'public.student_range_part(stu_id) global;') + + for key, value in advisor.display_detail_info['recommendIndexes'][0].items(): + self.assertEqual(expected_recommend[key], value) + + def test_remote(self): + if not os.path.exists('remote.json'): + print("Not found remote.json file so not tested for remote.") + return + with open('remote.json') as f: + config = json.load(f) + cmd = config['cmd'] + pwd = config['pwd'] + sys.stdin = io.IOBase() + mock_r, mock_w = os.pipe() + os.write(mock_w, 'mock text'.encode()) + sys.stdin.fileno = lambda: mock_r + sys.stdin.readable = lambda: True + sys.stdin.read = lambda: pwd + sys.argv[1:] = shlex.split(cmd) + ret = iaw.main() + if '--driver' in cmd: + sys.argv[1:] = shlex.split(cmd.replace('--driver', '')) + else: + sys.argv[1:] = shlex.split(cmd + '--driver') + ret = iaw.main() + + +if __name__ == '__main__': + unittest.main() diff --git a/src/gausskernel/dbmind/tools/tests/test_metadatabase.py b/src/gausskernel/dbmind/tools/tests/test_metadatabase.py new file mode 100644 index 000000000..7324b06a3 --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_metadatabase.py @@ -0,0 +1,106 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import os +import time + +from sqlalchemy import create_engine +from sqlalchemy.orm import sessionmaker + +from dbmind.constants import DYNAMIC_CONFIG +from dbmind.metadatabase import business_db, Base +from dbmind.metadatabase import create_dynamic_config_schema +from dbmind.metadatabase.dao.dynamic_config import dynamic_config_get, dynamic_config_set +from dbmind.metadatabase.dao.forecasting_metrics import * +from dbmind.metadatabase.dao.slow_queries import * + +# Clear the last testing db. +os.path.exists('test_metadatabase.db') and os.remove('test_metadatabase.db') +os.path.exists(DYNAMIC_CONFIG) and os.remove(DYNAMIC_CONFIG) + +engine = create_engine('sqlite:///test_metadatabase.db') +session_maker = sessionmaker(autocommit=False, autoflush=False, bind=engine) + +business_db.session_clz.update( + engine=engine, + session_maker=session_maker, + db_type='sqlite' +) + +Base.metadata.create_all(engine) + + +def test_slow_queries(): + insert_slow_query('schema', 'db0', 'query0', int(time.time() * 1000), 10) + insert_slow_query('schema', 'db0', 'query1', int(time.time() * 1000), 11) + insert_slow_query('schema', 'db0', 'query1', int(time.time() * 1000), 11) + insert_slow_query('schema', 'db0', 'query1', int(time.time() * 1000), 11) + count = 0 + for query in select_slow_queries(): + count += 1 + assert query.schema_name == 'schema' + assert query.db_name == 'db0' + assert query.start_at <= int(time.time() * 1000) + assert count == count_slow_queries() + truncate_slow_queries() + assert count_slow_queries() == 0 + + +def test_forecasting_metrics(): + truncate_forecasting_metrics() # clear + + batch_insert_forecasting_metric( + metric_name='metric0', + host_ip='127.0.0.1', + metric_value=tuple(range(0, 1000)), + metric_time=tuple(range(0, 1000)) + ) + batch_insert_forecasting_metric( + metric_name='metric1', + host_ip='127.0.0.1', + metric_value=tuple(range(0, 1000)), + metric_time=tuple(range(0, 1000)) + ) + batch_insert_forecasting_metric( + metric_name='metric2', + host_ip='127.0.0.1', + metric_value=tuple(range(0, 1000)), + metric_time=tuple(range(0, 1000)) + ) + assert count_forecasting_metric(metric_name='metric0') == 1000 + assert count_forecasting_metric() == 1000 * 3 + for i, metric in enumerate(select_forecasting_metric( + metric_name='metric1', host_ip='127.0.0.1', + min_metric_time=500, max_metric_time=800 + )): + assert metric.metric_value == 500 + i + delete_timeout_forecasting_metrics(oldest_metric_time=500) + assert count_forecasting_metric() == 1000 * 3 // 2 + truncate_forecasting_metrics() + assert count_forecasting_metric() == 0 + + +def test_dynamic_config_db(): + create_dynamic_config_schema() + assert dynamic_config_get('slow_sql_threshold', 'cpu_usage_limit') == 0.5 + dynamic_config_set('slow_sql_threshold', 'cpu_usage_limit', 1) + assert dynamic_config_get('slow_sql_threshold', 'cpu_usage_limit') == 1 + + dynamic_config_set('slow_sql_threshold', 'no_this_name', 1) + assert dynamic_config_get('slow_sql_threshold', 'no_this_name') == 1 + + try: + dynamic_config_set('no_this_table', 'no_this_name', 1) + except AssertionError: + pass + else: + assert False diff --git a/src/gausskernel/dbmind/tools/tests/test_query_source.py b/src/gausskernel/dbmind/tools/tests/test_query_source.py new file mode 100644 index 000000000..b52a218eb --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_query_source.py @@ -0,0 +1,286 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from typing import List +from unittest import mock + +from dbmind.app.diagnosis.query.slow_sql import query_info_source +from dbmind.app.diagnosis.query.slow_sql.query_info_source import TableStructure, DatabaseInfo, LockInfo, SystemInfo +from dbmind.common.types import Sequence +from dbmind.common.types.misc import SlowQuery +from dbmind.service import dai +from dbmind.service.dai import _AbstractLazyFetcher + + +def test_table_structure(): + table_info = TableStructure() + table_info.db_host = '127.0.0.1' + table_info.db_port = '8080' + table_info.db_name = 'user' + table_info.schema_name = 'public' + table_info.table_name = 'table_1' + table_info.dead_tuples = 1000 + table_info.live_tuples = 100000 + table_info.dead_rate = 0.9 + table_info.last_autovacuum = None + table_info.last_autoanalyze = None + table_info.vacuum = None + table_info.analyze = None + table_info.table_size = 10000 + table_info.index = ['index1', 'index2'] + table_info.redundant_index = ['redundant_index'] + assert table_info.db_host == '127.0.0.1' + assert table_info.db_port == '8080' + assert table_info.db_name == 'user' + assert table_info.schema_name == 'public' + assert table_info.table_name == 'table_1' + assert table_info.dead_tuples == 1000 + assert table_info.live_tuples == 100000 + assert table_info.dead_rate == 0.9 + assert table_info.last_autovacuum is None + assert table_info.last_autoanalyze is None + assert table_info.vacuum is None + assert table_info.analyze is None + assert table_info.table_size == 10000 + assert table_info.index == ['index1', 'index2'] + assert table_info.redundant_index == ['redundant_index'] + + +def test_database_info(): + database_info = DatabaseInfo() + assert database_info.db_host is None + assert database_info.db_port is None + assert database_info.history_tps == [] + assert database_info.current_tps == [] + assert database_info.max_conn == 0 + assert database_info.used_conn == 0 + + +def test_lock_info(): + lock_info = LockInfo() + assert lock_info.db_host is None + assert lock_info.db_port is None + assert lock_info.locked_query == [] + assert lock_info.locked_query_start == [] + assert lock_info.locker_query == [] + assert lock_info.locker_query_end == [] + + +def test_system_info(): + system_info = SystemInfo() + system_info.db_host = '127.0.0.1' + system_info.db_port = '8080' + system_info.iops = 100 + system_info.ioutils = {} + system_info.iocapacity = 1.0 + system_info.iowait = 2.0 + system_info.cpu_usage = 3.0 + system_info.mem_usage = 4.0 + system_info.load_average = 5.0 + assert system_info.db_host == '127.0.0.1' + assert system_info.db_port == '8080' + assert system_info.iops == 100 + assert system_info.ioutils == {} + assert system_info.iocapacity == 1.0 + assert system_info.iowait == 2.0 + assert system_info.cpu_usage == 3.0 + assert system_info.mem_usage == 4.0 + assert system_info.load_average == 5.0 + + +pg_class_relsize_dict = {'datname': 'database1', 'nspname': 'schema1', 'relname': 'table1', 'relkind': 'r'} +pg_lock_sql_locked_times_dict = {'locked_query': 'update table2 set age=20 where id=3', + 'locked_query_start': 1640139695, + 'locker_query': 'delete from table2 where id=3', + 'locker_query_start': 1640139690} +pg_tables_expansion_rate_dead_rate_dict = {'datname': 'database1', 'schemaname': 'schema1', 'relname': 'table1', + 'n_live_tup': 10000, + 'n_dead_tup': 100, 'dead_rate': 0.01, 'last_vacuum': None, + 'last_autovacuum': None, + 'last_analyze': None, 'last_autoanalyze': None} +pg_tables_size_bytes_dict = {'datname': 'database1', 'nspname': 'schema1', 'relname': 'tables'} +pg_index_idx_scan_dict = {'datname': 'database1', 'nspname': 'schema1', 'tablename': 'table1', 'relname': 'index1'} +pg_never_used_indexes_index_size_dict = {'datname': 'database1', 'schemaname': 'schema1', 'relname': 'table1', + 'indexrelname': 'table_index1'} +gaussdb_qps_by_instance_dict = {'instance': '127.0.0.1:5432'} +pg_connections_max_conn_dict = {'instance': '127.0.0.1:5432'} +pg_connections_used_conn_dict = {'instance': '127.0.0.1:5432'} +os_disk_iops_dict = {'instance': '127.0.0.1:5432'} +os_disk_ioutils_dict = {'instance': '127.0.0.1:5432', 'device': 'sdm-0'} +os_cpu_iowait_dict = {'instance': '127.0.0.1:5432'} +os_disk_iocapacity_dict = {'instance': '127.0.0.1:5432'} +os_cpu_usage_rate_dict = {'instance': '127.0.0.1:5432'} +os_mem_usage_dict = {'instance': '127.0.0.1:5432'} +node_load1_dict = {'instance': '127.0.0.1:5432'} + +pg_class_relsize_seq = Sequence(timestamps=(1640139695000,), + values=(1000,), + name='pg_class_relsize', + step=5, + labels=pg_class_relsize_dict) + +pg_lock_sql_locked_times_seq = Sequence(timestamps=(1640139695000,), + values=(1000,), + name='pg_lock_sql_locked_times', + step=5, + labels=pg_lock_sql_locked_times_dict) + +pg_tables_expansion_rate_dead_rate_seq = Sequence(timestamps=(1640139695000, 1640139700000, 1640139705000), + values=(0.1, 0.2, 0.3), + name='pg_tables_expansion_rate_dead_rate', + step=5, + labels=pg_tables_expansion_rate_dead_rate_dict) + +pg_tables_size_bytes_seq = Sequence(timestamps=(1640139695000,), + values=(10,), + name='pg_tables_size_bytes', + step=5, + labels=pg_tables_size_bytes_dict) + +pg_index_idx_scan_seq = Sequence(timestamps=(1640139695000,), + values=(10000,), + name='pg_index_idx_scan', + step=5, + labels=pg_index_idx_scan_dict) + +pg_never_used_indexes_index_size_seq = Sequence(timestamps=(1640139695000,), + values=(0,), + name='pg_never_used_indexes_index_size', + step=5, + labels=pg_never_used_indexes_index_size_dict) + +gaussdb_qps_by_instance_seq = Sequence(timestamps=(1640139695000,), + values=(1000,), + name='gaussdb_qps_by_instance', + step=5, + labels=gaussdb_qps_by_instance_dict) + +pg_connections_max_conn_seq = Sequence(timestamps=(1640139695000,), + values=(100,), + name='pg_connections_max_conn', + step=5, + labels=pg_connections_max_conn_dict) + +pg_connections_used_conn_seq = Sequence(timestamps=(1640139695000,), + values=(10,), + name='pg_connections_used_conn', + step=5, + labels=pg_connections_used_conn_dict) + +os_disk_iops_seq = Sequence(timestamps=(1640139695000, 1640139700000, 1640139705000), + values=(1000, 1000, 1000), + name='os_disk_iops', + step=5, + labels=os_disk_iops_dict) + +os_disk_ioutils_seq = Sequence(timestamps=(1640139695000, 1640139700000, 1640139705000), + values=(0.5, 0.3, 0.2), + name='os_disk_ioutils', + step=5, + labels=os_disk_ioutils_dict) + +os_cpu_iowait_seq = Sequence(timestamps=(1640139695000,), + values=(0.15,), + name='os_cpu_iowait', + step=5, + labels=os_cpu_iowait_dict) + +os_disk_iocapacity_seq = Sequence(timestamps=(1640139695000,), + values=(200,), + name='os_disk_iocapacity', + step=5, + labels=os_disk_iocapacity_dict) + +os_cpu_usage_rate_seq = Sequence(timestamps=(1640139695000,), + values=(0.2,), + name='os_cpu_usage', + step=5, + labels=os_cpu_usage_rate_dict) + +os_mem_usage_seq = Sequence(timestamps=(1640139695000,), + values=(0.2,), + name='os_mem_usage', + step=5, + labels=os_mem_usage_dict) + +node_load1_seq = Sequence(timestamps=(1640139695000,), + values=(0.3,), + name='node_load1', + step=5, + labels=node_load1_dict) + + +class MockedFetcher(_AbstractLazyFetcher): + def __init__(self, metric, start_time=None, end_time=None): + super().__init__() + self.metric = metric + self.start_time = start_time + self.end_time = end_time + + def _real_fetching_action(self) -> List[Sequence]: + self.metric = f"{self.metric}_seq" + return [globals().get(self.metric, None)] + + +dai.get_latest_metric_sequence = mock.Mock(side_effect=lambda x, y: MockedFetcher(metric=x)) +dai.get_metric_sequence = mock.Mock(side_effect=lambda x, y, z: MockedFetcher(metric=x)) + + +def test_query_source(): + # lock of tables_name + slow_sql_instance = SlowQuery(db_host='127.0.0.1', db_port='8080', db_name='database1', schema_name='public', + query='select count(*) from schema1.table1', start_timestamp=1640139690, + duration_time=1000, + hit_rate=0.99, fetch_rate=0.98, cpu_time=14200, data_io_time=1231243, + template_id=12432453234, + sort_count=13, sort_mem_used=12.43, sort_spill_count=3, hash_count=0, hash_mem_used=0, + hash_spill_count=0, lock_wait_count=0, lwlock_wait_count=0, n_returned_rows=1, + n_tuples_returned=100000, n_tuples_fetched=0, n_tuples_deleted=0, n_tuples_inserted=0, + n_tuples_updated=0) + slow_sql_instance.tables_name = {'schema1': ['table1']} + query_source = query_info_source.QueryContext(slow_sql_instance) + pg_class = query_source.acquire_pg_class() + pg_lock_sql = query_source.acquire_lock_info() + pg_tables_structure = query_source.acquire_tables_structure_info() + database_info = query_source.acquire_database_info() + fetch_interval = query_source.acquire_fetch_interval() + system_info = query_source.acquire_system_info() + assert pg_class.get('db_host') == '127.0.0.1' + assert pg_class.get('db_port') == '8080' + assert pg_class.get('database1') == {'schema1': ['table1']} + assert pg_lock_sql.locked_query[0] == 'update table2 set age=20 where id=3' + assert pg_lock_sql.locked_query_start[0] == 1640139695 + assert pg_lock_sql.locker_query[0] == 'delete from table2 where id=3' + assert pg_lock_sql.locker_query_start[0] == 1640139690 + assert len(pg_tables_structure) == 1 + assert pg_tables_structure[0].db_name == 'database1' + assert pg_tables_structure[0].schema_name == 'schema1' + assert pg_tables_structure[0].table_name == 'table1' + assert pg_tables_structure[0].dead_rate == 0.3 + assert pg_tables_structure[0].dead_tuples == 100 + assert pg_tables_structure[0].live_tuples == 10000 + assert pg_tables_structure[0].last_autovacuum == 0 + assert pg_tables_structure[0].last_autoanalyze == 0 + assert pg_tables_structure[0].analyze == 0 + assert pg_tables_structure[0].vacuum == 0 + assert database_info.history_tps[0] == 1000.0 + assert database_info.current_tps[0] == 1000.0 + assert database_info.max_conn == 100 + assert database_info.used_conn == 10 + assert system_info.iops == 1000 + assert system_info.ioutils == {'sdm-0': 0.5} + assert system_info.iocapacity == 200 + assert system_info.iowait == 0.15 + assert system_info.cpu_usage == 0.2 + assert system_info.mem_usage == 0.2 + assert system_info.load_average == 0.3 + assert fetch_interval == 5 diff --git a/src/gausskernel/dbmind/tools/tests/test_security.py b/src/gausskernel/dbmind/tools/tests/test_security.py new file mode 100644 index 000000000..c0c1a8e46 --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_security.py @@ -0,0 +1,27 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import random + +from dbmind.common import security + + +def test_encryption_and_decryption(): + for i in range(100): + s1 = security.safe_random_string(16) + s2 = security.safe_random_string(16) + # Test whether the function supports unfixed length. + plain = security.unsafe_random_string(random.randint(0, 64)) + iv = security.generate_an_iv() + cipher = security.encrypt(s1, s2, iv, plain) + decrypted_text = security.decrypt(s1, s2, iv, cipher) + assert plain == decrypted_text diff --git a/src/gausskernel/dbmind/tools/tests/test_signifinance_detction.py b/src/gausskernel/dbmind/tools/tests/test_signifinance_detction.py new file mode 100644 index 000000000..58dc2df50 --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_signifinance_detction.py @@ -0,0 +1,40 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from dbmind.app.diagnosis.query.slow_sql.significance_detection import average_base, ks_base, sum_base + +big_current_data = [10, 10, 10, 10, 10] +big_history_data = [1, 1, 1, 1, 1] + +small_current_data = [1.2, 1.2, 1.2, 1.2, 1.2] +small_history_data = [1, 1, 1, 1, 1] + + +def test_average_base(): + check_res_1 = average_base.detect(big_current_data, big_history_data) + check_res_2 = average_base.detect(small_current_data, small_history_data) + assert check_res_1 + assert not check_res_2 + + +def test_ks_base(): + check_res_1 = ks_base.detect(big_current_data, big_history_data) + check_res_2 = ks_base.detect(small_current_data, small_history_data) + assert check_res_1 + assert not check_res_2 + + +def test_sum_base(): + check_res_1 = sum_base.detect(big_current_data, big_history_data) + check_res_2 = sum_base.detect(small_current_data, small_history_data) + assert check_res_1 + assert not check_res_2 diff --git a/src/gausskernel/dbmind/tools/tests/test_slow_sql_analyzer.py b/src/gausskernel/dbmind/tools/tests/test_slow_sql_analyzer.py new file mode 100644 index 000000000..532c74851 --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_slow_sql_analyzer.py @@ -0,0 +1,476 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import configparser +from typing import List +from unittest import mock + +import numpy as np + +from dbmind.app.diagnosis.query.slow_sql import analyzer +from dbmind.app.diagnosis.query.slow_sql import query_feature +from dbmind.app.diagnosis.query.slow_sql import query_info_source +from dbmind.app.diagnosis.query.slow_sql.analyzer import SlowSQLAnalyzer +from dbmind.app.diagnosis.query.slow_sql.featurelib import load_feature_lib, get_feature_mapper +from dbmind.app.diagnosis.query.slow_sql.query_info_source import TableStructure, DatabaseInfo, LockInfo, SystemInfo +from dbmind.common.types import Sequence +from dbmind.service import dai +from dbmind.app.diagnosis.query.slow_sql.significance_detection import average_base, ks_base, sum_base +from dbmind.service.dai import _AbstractLazyFetcher + + +big_current_data = [10, 10, 10, 10, 10] +big_history_data = [1, 1, 1, 1, 1] + +small_current_data = [2, 1, 3, 1, 0] +small_history_data = [1, 1, 1, 1, 1] + +ilegal_current_data = [] +ilegal_history_data = [1, 1, 1, 1, 1] + + +configs = configparser.ConfigParser() +configs.add_section('slow_sql_threshold') +configs.set('slow_sql_threshold', 'tuple_number_limit', '1000') +configs.set('slow_sql_threshold', 'dead_rate_limit', '0.2') +configs.set('slow_sql_threshold', 'fetch_tuples_limit', '1000') +configs.set('slow_sql_threshold', 'fetch_rate_limit', '0.6') +configs.set('slow_sql_threshold', 'returned_rows_limit', '1000') +configs.set('slow_sql_threshold', 'returned_rate_limit', '0.6') +configs.set('slow_sql_threshold', 'hit_rate_limit', '0.9') +configs.set('slow_sql_threshold', 'updated_tuples_limit', '1000') +configs.set('slow_sql_threshold', 'updated_rate_limit', '0.6') +configs.set('slow_sql_threshold', 'inserted_tuples_limit', '1000') +configs.set('slow_sql_threshold', 'inserted_rate_limit', '0.6') +configs.set('slow_sql_threshold', 'index_number_limit', '3') +configs.set('slow_sql_threshold', 'deleted_tuples_limit', '1000') +configs.set('slow_sql_threshold', 'deleted_rate_limit', '0.6') +configs.set('slow_sql_threshold', 'tps_limit', '100') +configs.set('slow_sql_threshold', 'iops_limit', '100') +configs.set('slow_sql_threshold', 'iowait_limit', '0.2') +configs.set('slow_sql_threshold', 'ioutils_limit', '0.8') +configs.set('slow_sql_threshold', 'iocapacity_limit', '100') +configs.set('slow_sql_threshold', 'cpu_usage_limit', '0.5') +configs.set('slow_sql_threshold', 'load_average_rate_limit', '0.5') + +query_feature._get_threshold = mock.Mock(side_effect=lambda x: configs.getfloat('slow_sql_threshold', x)) + + +simple_slow_sql_dict = {'from_instance': '127.0.0.1:5432', 'datname': 'database1', 'schema': 'public', + 'query': 'select count(*) from schema1.table1', 'start_time': '1640139690000', + 'finish_time': '1640139700000', 'hit_rate': '0.988', 'fetch_rate': '0.99', 'cpu_time': '14200', + 'data_io_time': '1231243', 'unique_query_id': '12432453234', 'sort_count': '13', + 'sort_mem_used': '12.43', 'sort_spill_count': '3', 'hash_count': '0', 'hash_mem_used': '0', + 'hash_spill_count': '0', 'lock_wait_count': '0', 'lwlock_wait_count': '0', + 'n_returned_rows': '1', 'n_tuples_returned': '100000', 'n_tuples_fetched': '0', + 'n_tuples_inserted': '0', 'n_tuples_updated': '0', 'n_tuples_deleted': 0} +simple_slow_sql_seq = Sequence(timestamps=[1640139695000], + values=[101], + name='pg_sql_statement_history_exec_time', + step=5, + labels=simple_slow_sql_dict) + +complex_slow_sql_dict = {'from_instance': '127.0.0.1:5432', 'datname': 'database1', 'schema': 'public', + 'query': 'update schema1.table1 set age=30 where id=3', 'start_time': '1640139690000', + 'finish_time': '1640139700000', 'hit_rate': '0.899', 'fetch_rate': '0.99', 'cpu_time': '14200', + 'data_io_time': '1231243', 'unique_query_id': '12432453234', 'sort_count': '0', + 'sort_mem_used': '0', 'sort_spill_count': '0', 'hash_count': '0', 'hash_mem_used': '0', + 'hash_spill_count': '0', 'lock_wait_count': '2', 'lwlock_wait_count': '3', + 'n_returned_rows': '100000', 'n_tuples_returned': '100000', 'n_tuples_fetched': '100000', + 'n_tuples_inserted': '0', 'n_tuples_updated': '100000', 'n_tuples_deleted': 0} +complex_slow_sql_seq = Sequence(timestamps=[1640139695000], + values=[101], + name='pg_sql_statement_history_exec_time', + step=5, + labels=complex_slow_sql_dict) + +commit_slow_sql_dict = {'from_instance': '127.0.0.1:5432', 'datname': 'database1', 'schema': 'public', + 'query': 'COMMIT', 'start_time': '1640139790000', + 'finish_time': '1640139700000', 'hit_rate': '0.988', 'fetch_rate': '0.99', 'cpu_time': '14200', + 'data_io_time': '1231243', 'unique_query_id': '12432453234', 'sort_count': '13', + 'sort_mem_used': '12.43', 'sort_spill_count': '3', 'hash_count': '0', 'hash_mem_used': '0', + 'hash_spill_count': '0', 'lock_wait_count': '0', 'lwlock_wait_count': '0', + 'n_returned_rows': '1', 'n_tuples_returned': '100000', 'n_tuples_fetched': '0', + 'n_tuples_inserted': '0', 'n_tuples_updated': '0', 'n_tuples_deleted': 0} +commit_slow_sql_seq = Sequence(timestamps=[1640139695000], + values=[101], + name='pg_sql_statement_history_exec_time', + step=5, + labels=commit_slow_sql_dict) + +system_slow_sql_dict = {'from_instance': '127.0.0.1:5432', 'datname': 'database1', 'schema': 'public', + 'query': 'select * from PG_SETTINGS', 'start_time': '1640139890000', + 'finish_time': '1640139700000', 'hit_rate': '0.988', 'fetch_rate': '0.99', 'cpu_time': '14200', + 'data_io_time': '1231243', 'unique_query_id': '12432453234', 'sort_count': '13', + 'sort_mem_used': '12.43', 'sort_spill_count': '3', 'hash_count': '0', 'hash_mem_used': '0', + 'hash_spill_count': '0', 'lock_wait_count': '0', 'lwlock_wait_count': '0', + 'n_returned_rows': '1', 'n_tuples_returned': '100000', 'n_tuples_fetched': '0', + 'n_tuples_inserted': '0', 'n_tuples_updated': '0', 'n_tuples_deleted': 0} +system_slow_sql_seq = Sequence(timestamps=[1640139695000], + values=[101], + name='pg_sql_statement_history_exec_time', + step=5, + labels=system_slow_sql_dict) + +shield_slow_sql_dict = {'from_instance': '127.0.0.1:5432', 'datname': 'database1', 'schema': 'public', + 'query': 'CREATE TABLE table1(id int, name varchar(10))', 'start_time': '1640149690000', + 'finish_time': '1640139700000', 'hit_rate': '0.988', 'fetch_rate': '0.99', 'cpu_time': '14200', + 'data_io_time': '1231243', 'unique_query_id': '12432453234', 'sort_count': '13', + 'sort_mem_used': '12.43', 'sort_spill_count': '3', 'hash_count': '0', 'hash_mem_used': '0', + 'hash_spill_count': '0', 'lock_wait_count': '0', 'lwlock_wait_count': '0', + 'n_returned_rows': '1', 'n_tuples_returned': '100000', 'n_tuples_fetched': '0', + 'n_tuples_inserted': '0', 'n_tuples_updated': '0', 'n_tuples_deleted': 0} +shield_slow_sql_seq = Sequence(timestamps=[1640139695000], + values=[101], + name='pg_sql_statement_history_exec_time', + step=5, + labels=shield_slow_sql_dict) + +locked_slow_sql_dict = {'from_instance': '127.0.0.1:5432', 'datname': 'database1', 'schema': 'public', + 'query': 'CREATE TABLE table1(id int, name varchar(10))', 'start_time': '1640149690000', + 'finish_time': '1640139700000', 'hit_rate': '0.988', 'fetch_rate': '0.99', 'cpu_time': '14200', + 'data_io_time': '1231243', 'unique_query_id': '12432453234', 'sort_count': '13', + 'sort_mem_used': '12.43', 'sort_spill_count': '3', 'hash_count': '0', 'hash_mem_used': '0', + 'hash_spill_count': '0', 'lock_wait_count': '1', 'lwlock_wait_count': '1', + 'n_returned_rows': '1', 'n_tuples_returned': '100000', 'n_tuples_fetched': '0', + 'n_tuples_inserted': '0', 'n_tuples_updated': '0', 'n_tuples_deleted': 0} +locked_slow_sql_seq = Sequence(timestamps=[1640139695000], + values=[101], + name='pg_sql_statement_history_exec_time', + step=5, + labels=locked_slow_sql_dict) + + +class SimpleQueryMockedFetcher(_AbstractLazyFetcher): + def _real_fetching_action(self) -> List[Sequence]: + return [simple_slow_sql_seq] + + +class ComplexQueryMockedFetcher(_AbstractLazyFetcher): + def _real_fetching_action(self) -> List[Sequence]: + return [complex_slow_sql_seq] + + +class CommitQueryMockedFetcher(_AbstractLazyFetcher): + def _real_fetching_action(self) -> List[Sequence]: + return [commit_slow_sql_seq] + + +class SystemQueryMockedFetcher(_AbstractLazyFetcher): + def _real_fetching_action(self) -> List[Sequence]: + return [system_slow_sql_seq] + + +class ShieldQueryMockedFetcher(_AbstractLazyFetcher): + def _real_fetching_action(self) -> List[Sequence]: + return [shield_slow_sql_seq] + + +class LockedQueryMockedFetcher(_AbstractLazyFetcher): + def _real_fetching_action(self) -> List[Sequence]: + return [locked_slow_sql_seq] + + +class MockedSimpleQueryContext: + @staticmethod + def acquire_pg_class(): + return {'database1': {'schema1': ['table1', 'table2'], 'schema2': ['table3'], 'public': []}} + + @staticmethod + def acquire_fetch_interval(): + return 5 + + @staticmethod + def acquire_lock_info(): + return LockInfo() + + @staticmethod + def acquire_tables_structure_info(): + return [TableStructure()] + + @staticmethod + def acquire_database_info(): + return DatabaseInfo() + + @staticmethod + def acquire_system_info(): + return SystemInfo() + + +class MockedComplexQueryContext: + @staticmethod + def acquire_pg_class(): + return {'database1': {'schema1': ['table1', 'table2'], 'schema2': ['table3'], 'public': []}} + + @staticmethod + def acquire_fetch_interval(): + return 5 + + @staticmethod + def acquire_lock_info(): + lock_info = LockInfo() + lock_info.db_host = '127.0.0.1' + lock_info.locked_query = ['update schema1.table1 set age=30 where id=3'] + lock_info.locked_query_start = [1640139690000] + lock_info.locker_query = ['vacuum full'] + lock_info.locker_query_start = [1640139900000] + return lock_info + + @staticmethod + def acquire_tables_structure_info(): + table_info = TableStructure() + table_info.db_host = '127.0.0.1' + table_info.db_port = '5432' + table_info.db_name = 'database1' + table_info.schema_name = 'schema1' + table_info.table_name = 'table1' + table_info.dead_tuples = 8000 + table_info.live_tuples = 10000 + table_info.dead_rate = 0.7 + table_info.last_autovacuum = 1640139690000 + table_info.last_autoanalyze = 1640139690000 + table_info.analyze = 1640139690 + table_info.vacuum = 1640139690 + table_info.table_size = 10000 + table_info.index = ['index1', 'index2', 'index3', 'index4'] + table_info.redundant_index = ['redundant_index1', 'redundant_index2', 'redundant_index3', 'redundant_index4'] + return [table_info] + + @staticmethod + def acquire_database_info(): + db_info = DatabaseInfo() + db_info.db_host = '127.0.0.1' + db_info.db_port = '8080' + db_info.history_tps = [100] + db_info.current_tps = [100000] + db_info.max_conn = 100 + db_info.used_conn = 99 + return db_info + + @staticmethod + def acquire_system_info(): + system_info = SystemInfo() + system_info.db_host = '127.0.0.1' + system_info.db_port = '8080' + system_info.iops = 10000 + system_info.ioutils = {'sdm-0': 0.9} + system_info.iocapacity = 10000 + system_info.iowait = 0.7 + system_info.cpu_usage = 0.9 + system_info.mem_usage = 0.9 + system_info.load_average = 0.9 + return system_info + + +dai.get_latest_metric_sequence = mock.Mock(side_effect=[SimpleQueryMockedFetcher(), + ComplexQueryMockedFetcher(), + LockedQueryMockedFetcher(), + SimpleQueryMockedFetcher(), + CommitQueryMockedFetcher(), + SystemQueryMockedFetcher(), + SimpleQueryMockedFetcher(), + ShieldQueryMockedFetcher()]) +query_info_source.QueryContext = mock.Mock(side_effect=[MockedSimpleQueryContext, + MockedComplexQueryContext, + MockedSimpleQueryContext, + MockedSimpleQueryContext, + MockedSimpleQueryContext, + MockedSimpleQueryContext, + MockedSimpleQueryContext, + MockedSimpleQueryContext]) + + +def test_average_base(): + check_res_1 = average_base.detect(big_current_data, big_history_data, method='bool') + check_res_2 = average_base.detect(small_current_data, small_history_data, method='bool') + check_res_3 = average_base.detect(ilegal_current_data, ilegal_history_data, method='bool') + check_res_4 = average_base.detect(big_current_data, big_history_data, method='other') + check_res_5 = average_base.detect(big_history_data, big_current_data, method='other') + try: + _ = average_base.detect(100, 200) + except TypeError as execinfo: + assert 'The format of the input data is wrong' in str(execinfo) + try: + _ = average_base.detect(big_current_data, big_history_data, method='inner') + except ValueError as execinfo: + assert 'Not supported method' in str(execinfo) + assert check_res_1 + assert not check_res_2 + assert not check_res_3 + assert round(check_res_4, 4) == 0.9000 + assert check_res_5 == 0 + + +def test_ks_base(): + check_res_1 = ks_base.detect(big_current_data, big_history_data) + check_res_2 = ks_base.detect(small_current_data, small_history_data) + check_res_3 = ks_base.detect(ilegal_current_data, ilegal_history_data) + assert check_res_1 + assert not check_res_2 + assert not check_res_3 + + +def test_sum_base(): + check_res_1 = sum_base.detect(big_current_data, big_history_data, method='bool') + check_res_2 = sum_base.detect(small_current_data, small_history_data, method='bool') + check_res_3 = sum_base.detect(ilegal_current_data, ilegal_history_data, method='bool') + check_res_4 = sum_base.detect(big_current_data, big_history_data, method='other') + check_res_5 = sum_base.detect(big_history_data, big_current_data, method='other') + try: + _ = sum_base.detect(100, 200) + except TypeError as execinfo: + assert 'The format of the input data is wrong' in str(execinfo) + try: + _ = sum_base.detect(big_current_data, big_history_data, method='inner') + except ValueError as execinfo: + assert 'Not supported method' in str(execinfo) + assert check_res_1 + assert not check_res_2 + assert not check_res_3 + assert round(check_res_4, 4) == 0.9000 + assert check_res_5 == 0 + + +def test_load_feature_lib(): + feature_lib = load_feature_lib() + assert len(feature_lib) == 3 + assert len(feature_lib['features']) > 0 + assert len(feature_lib['labels']) > 0 + assert len(feature_lib['weight_matrix']) > 0 + + +def test_get_feature_mapper(): + feature_mapping = get_feature_mapper() + assert len(feature_mapping) == 20 + + +def test_vector_distance(): + feature_lib = load_feature_lib() + features, causes, weight_matrix = feature_lib['features'], feature_lib['labels'], feature_lib['weight_matrix'] + feature_instance1 = np.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + feature_instance2 = np.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + distance = analyzer._vector_distance(feature_instance1, features[0], 1, weight_matrix) + assert round(distance, 4) == 0.9985 + try: + _ = analyzer._vector_distance(feature_instance2, features[0], 1, weight_matrix) + except ValueError as execinfo: + assert 'not equal' in str(execinfo) + + +def test_euclid_distance(): + feature1 = np.array([1, 1, 0, 0, 0]) + feature2 = np.array([0, 1, 0, 0, 0]) + distance = analyzer._euclid_distance(feature1, feature2) + assert distance == 1.0 + + +def test_calculate_nearest_feature(): + feature = np.array([1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]) + nearest_feature = analyzer._calculate_nearest_feature(feature, topk=3) + assert len(nearest_feature) == 3 + assert nearest_feature[0][1] == 11 + assert nearest_feature[1][1] == 6 + assert nearest_feature[2][1] == 17 + + +def test_simple_query_feature(): + slow_sql_instance = dai.get_all_slow_queries(minutes=5)[0] + data_factory = query_info_source.QueryContext(slow_sql_instance) + query_f = query_feature.QueryFeature(slow_sql_instance, data_factory) + query_f.initialize_metrics() + features, detail = query_f() + assert query_f.select_type + assert not query_f.update_type + assert not query_f.delete_type + assert not query_f.insert_type + assert not query_f.other_type + assert not query_f.query_block + assert not query_f.large_table + assert not query_f.large_dead_tuples + assert query_f.large_fetch_tuples + assert not query_f.large_returned_rows + assert not query_f.lower_hit_ratio + assert not query_f.update_redundant_index + assert not query_f.insert_redundant_index + assert not query_f.delete_redundant_index + assert not query_f.large_updated_tuples + assert not query_f.large_inserted_tuples + assert not query_f.index_number_insert + assert not query_f.large_deleted_tuples + assert query_f.external_sort + assert not query_f.vacuum_operation + assert not query_f.analyze_operation + assert not query_f.tps_significant_change + assert not query_f.large_iowait + assert not query_f.large_iops + assert not query_f.large_load_average + assert not query_f.large_cpu_usage + assert not query_f.large_ioutils + assert not query_f.large_iocapacity + assert features == [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +def test_complex_query_feature(): + slow_sql_instance = dai.get_all_slow_queries(minutes=5)[0] + data_factory = query_info_source.QueryContext(slow_sql_instance) + query_f = query_feature.QueryFeature(slow_sql_instance, data_factory) + query_f.initialize_metrics() + _, _ = query_f() + assert not query_f.select_type + assert query_f.update_type + assert query_f.query_block + assert query_f.large_table + assert not query_f.external_sort + assert query_f.large_dead_tuples + assert query_f.large_updated_tuples + assert query_f.update_redundant_index + assert query_f.lower_hit_ratio + assert query_f.redundant_index + assert query_f.large_index_number + assert query_f.vacuum_operation + assert query_f.analyze_operation + assert query_f.tps_significant_change + assert query_f.large_iops + assert query_f.large_iowait + assert query_f.large_ioutils + assert query_f.large_iocapacity + assert query_f.large_cpu_usage + assert query_f.large_load_average + + +def test_locked_query_feature(): + slow_sql_instance = dai.get_all_slow_queries(minutes=5)[0] + data_factory = query_info_source.QueryContext(slow_sql_instance) + query_f = query_feature.QueryFeature(slow_sql_instance, data_factory) + query_f.initialize_metrics() + _, _ = query_f() + assert query_f.query_block + assert query_f.other_type + + +def test_slow_analyzer(): + _analyzer = SlowSQLAnalyzer(topk=3) + simple_slow_sql_instances = dai.get_all_slow_queries(minutes=5) + commit_slow_sql_instances = dai.get_all_slow_queries(minutes=5) + system_slow_sql_instances = dai.get_all_slow_queries(minutes=5) + repeat_slow_sql_instances = dai.get_all_slow_queries(minutes=5) + shield_slow_sql_instances = dai.get_all_slow_queries(minutes=5) + _analyzer.run(simple_slow_sql_instances[-1]) + assert sum(item in simple_slow_sql_instances[-1].root_causes for item in ('EXTERNAL_SORT', 'LARGE_FETCHED_TUPLES')) + _analyzer.run(commit_slow_sql_instances[-1]) + assert sum(item in commit_slow_sql_instances[-1].root_causes for item in ('UNKNOWN')) + _analyzer.run(system_slow_sql_instances[-1]) + assert sum(item in system_slow_sql_instances[-1].root_causes for item in ('DATABASE_VIEW')) + _analyzer.run(repeat_slow_sql_instances[-1]) + assert not repeat_slow_sql_instances[-1].root_causes + _analyzer.run(shield_slow_sql_instances[-1]) + assert sum(item in shield_slow_sql_instances[-1].root_causes for item in ('ILLEGAL_SQL')) diff --git a/src/gausskernel/dbmind/tools/tests/test_slow_sql_feature.py b/src/gausskernel/dbmind/tools/tests/test_slow_sql_feature.py new file mode 100644 index 000000000..46d8144cd --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_slow_sql_feature.py @@ -0,0 +1,55 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import csv +import os + +import numpy as np + +from dbmind.app.diagnosis.query.slow_sql.featurelib.feature_model import calculate_weight, build_model + +FEATURES_DIMENSION = 5 +LABEL_DIMENSION = 1 +FEATURES_NUMBER = 3 +FEATURE_PATH = os.path.join(os.path.dirname(__file__), 'features') + + +def remove_file(file_path): + if os.path.exists(file_path) and os.path.isfile(file_path): + os.remove(file_path) + + +def check_file(file_path): + return os.path.exists(file_path) + + +def test_calculate_weight(): + features, labels = np.zeros((FEATURES_NUMBER, FEATURES_DIMENSION)), np.zeros(FEATURES_NUMBER) + print(FEATURE_PATH) + with open(FEATURE_PATH, mode='r') as f: + csv_reader = csv.reader(f) + for line in csv_reader: + line = [int(item) for item in line] + features[csv_reader.line_num - 1] = line[:-1] + labels[csv_reader.line_num - 1] = line[-1] + weight_matrix = calculate_weight(features, labels) + assert len(weight_matrix) == 3 + assert len(weight_matrix[0]) == 5 + + +def test_build_model(): + feature_lib_path = 'test_feature_lib.npz' + remove_file(feature_lib_path) + build_model(feature_path=FEATURE_PATH, feature_number=FEATURES_NUMBER, feature_dimension=FEATURES_DIMENSION, + save_path=feature_lib_path) + assert check_file(feature_lib_path) + remove_file(feature_lib_path) diff --git a/src/gausskernel/dbmind/tools/tests/test_slow_sql_rca.py b/src/gausskernel/dbmind/tools/tests/test_slow_sql_rca.py new file mode 100644 index 000000000..06cca8e22 --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_slow_sql_rca.py @@ -0,0 +1,52 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import configparser + +from dbmind import global_vars +from dbmind.cmd.config_utils import DynamicConfig + +configs = configparser.ConfigParser() +configs.add_section('TSDB') +configs.set('TSDB', 'name', 'prometheus') +configs.set('TSDB', 'host', '10.90.56.172') # TODO: CHANGE or IGNORE +configs.set('TSDB', 'port', '9090') +configs.add_section('METADATABASE') +configs.set('METADATABASE', 'dbtype', 'sqlite') +configs.set('METADATABASE', 'host', '') +configs.set('METADATABASE', 'port', '') +configs.set('METADATABASE', 'username', '') +configs.set('METADATABASE', 'password', '') +configs.set('METADATABASE', 'database', 'test_metadatabase.db') +global_vars.configs = configs +global_vars.must_filter_labels = {} +global_vars.dynamic_configs = DynamicConfig + +from dbmind.service.dai import get_all_slow_queries +from dbmind.app.diagnosis.query import diagnose_query + + +def test_rca_service(): + slow_query_instances = get_all_slow_queries(0.4) + print(len(slow_query_instances)) + for slow_query_instance in slow_query_instances: + diagnose_query(slow_query_instance) + print('*' * 100) + print(slow_query_instance.query) + print(slow_query_instance.root_causes) + print(slow_query_instance.suggestions) + print('*' * 100) + + +if __name__ == '__main__': + test_rca_service() diff --git a/src/gausskernel/dbmind/tools/tests/test_stats_algorithms.py b/src/gausskernel/dbmind/tools/tests/test_stats_algorithms.py new file mode 100644 index 000000000..38290d1eb --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_stats_algorithms.py @@ -0,0 +1,44 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from dbmind.common.algorithm.statistics import * + +test_values1 = list(range(0, 10)) +# [0, 0, ..., 1, 1, ..., ..., 0, 0] +test_values2 = [0] * 10 + [1] * 10 + [0] * 10 +test_values3 = [1, 2, 3.5, 0.4, 5] + + +def test_quantile(): + assert np_quantile(test_values1, 1) == 0.09 + assert np_quantile(test_values2, 2) == 0 + assert np_quantile(test_values3, 1) > 0.42 + + +def test_shift(): + assert np_shift(test_values1).tolist() == [0, 0, 1, 2, 3, 4, 5, 6, 7, 8] + assert np_shift(test_values1, 9).tolist() == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +def test_moving_avg(): + assert np_moving_avg(test_values1, window=1).tolist() == test_values1 + assert np_moving_avg([1, 2], 2).tolist() == [1.5, 1.5] + assert np_moving_avg([1, 2, 3], 2).tolist() == [0.5, 1.5, 1.5] + + +def test_moving_std(): + assert np_moving_std([1, 2], 1).tolist() == [0, 0] + assert np_moving_std([1, 2, 3], 2).tolist() == [0.5, 0.5, 0.5] + + +def test_double_rolling(): + assert np_double_rolling([1, 2, 3], window1=1, window2=2).tolist() == [1.5, 1.5, 1.5] diff --git a/src/gausskernel/dbmind/tools/tests/test_task_scheduler.py b/src/gausskernel/dbmind/tools/tests/test_task_scheduler.py new file mode 100644 index 000000000..ead903ab8 --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_task_scheduler.py @@ -0,0 +1,70 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import time + +from dbmind.common.dispatcher import task_scheduler + + +def test_repeated_timer(): + count = 0 + + def increase(): + nonlocal count + count += 1 + + timer = task_scheduler.RepeatedTimer(0.1, increase) + timer.start() + time.sleep(0.5) + assert count >= 5 + # Cancel method should work. + timer.cancel() + time.sleep(0.5) + assert 5 <= count <= 6 + + # RepeatedTimer can add into set. + _set = set() + timer2 = task_scheduler.RepeatedTimer(0.1, increase) + _set.add(timer) + _set.add(timer2) + assert len(_set) == 1 + timer3 = task_scheduler.RepeatedTimer(0.2, increase) + _set.add(timer3) + assert len(_set) == 2 + + +VAR1 = VAR2 = 0 + + +@task_scheduler.timer(0.1) +def outer_increase1(): + global VAR1 + VAR1 += 1 + + +@task_scheduler.timer(0.2) +def outer_increase2(): + global VAR2 + VAR2 += 2 + + +def test_timer_task_mgr(): + tasks = ('RepeatedTimer(outer_increase1, 0.1)', 'RepeatedTimer(outer_increase2, 0.2)') + for t in tasks: + assert t in str(task_scheduler.TimedTaskManager.timers) + assert len(task_scheduler.TimedTaskManager.timers) >= 2 + task_scheduler.TimedTaskManager.start() + time.sleep(1) + assert VAR1 <= VAR2 + assert VAR1 >= 10 + task_scheduler.TimedTaskManager.stop() diff --git a/src/gausskernel/dbmind/tools/tests/test_timed_app.py b/src/gausskernel/dbmind/tools/tests/test_timed_app.py new file mode 100644 index 000000000..399bdc3cd --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_timed_app.py @@ -0,0 +1,118 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +import configparser +import random +import os +from datetime import datetime, timedelta +from unittest.mock import MagicMock + +from dbmind import global_vars +from dbmind.service.dai import _AbstractLazyFetcher +from dbmind.cmd.edbmind import get_worker_instance +from dbmind.cmd.config_utils import DynamicConfig + + +prom_addr = os.environ.get('PROMETHEUS_ADDR', 'hostname:9090') +prom_host, prom_port = prom_addr.split(':') +configs = configparser.ConfigParser() +configs.add_section('TSDB') +configs.set('TSDB', 'name', 'prometheus') +configs.set('TSDB', 'host', prom_host) +configs.set('TSDB', 'port', prom_port) +configs.add_section('SELF-MONITORING') +configs.set( + 'SELF-MONITORING', 'detection_interval', '600' +) +configs.set( + 'SELF-MONITORING', 'last_detection_time', '600' +) +configs.set( + 'SELF-MONITORING', 'forecasting_future_time', '86400' +) +configs.set( + 'SELF-MONITORING', 'golden_kpi', 'os_cpu_usage, os_mem_usage' +) +configs.set( + 'SELF-MONITORING', 'result_storage_retention', '600' +) +global_vars.configs = configs +global_vars.dynamic_configs = DynamicConfig +global_vars.worker = get_worker_instance('local', 2) + +from dbmind.common.types import Sequence +from dbmind.service import dai +from dbmind.app.timed_app import self_monitoring, forecast_kpi + + +def faked_get_metric_sequence(metric_name, start_time, end_time): + step = 5 + random_scope = 0.5 + timestamps = list(range(int(start_time.timestamp()), int(end_time.timestamp()) + step, step)) + values = timestamps.copy() + # set random values + for _ in range(0, min(len(timestamps) // 5, 5)): + idx = random.randint(0, len(timestamps) - 1) + current_value = values[idx] + values[idx] = random.randint( + int(current_value * (1 - random_scope)), + int(current_value * (1 + random_scope)) + ) + + class MockFetcher(_AbstractLazyFetcher): + def _real_fetching_action(self): + hosts = ('192.168.1.100:1234', '192.168.1.101:5678', '192.168.1.102:1111') + rv = list() + for host in hosts: + rv.append( + Sequence(timestamps=timestamps, values=values, name=metric_name, labels={'from_instance': host}) + ) + return rv + + return MockFetcher() + + +def faked_get_latest_metric_sequence(metric_name, minutes): + end_time = datetime.now() + start_time = end_time - timedelta(minutes=minutes) + return faked_get_metric_sequence(metric_name, start_time, end_time) + + +def test_self_monitoring(): + if prom_host == 'hostname': + # Use faked data source since not found PROMETHEUS_ADDR environment variable. + dai.get_metric_sequence = faked_get_metric_sequence + dai.get_latest_metric_sequence = faked_get_latest_metric_sequence + dai.get_all_slow_queries = MagicMock( + return_value=() + ) + + dai.get_all_last_monitoring_alarm_logs = MagicMock( + return_value=() + ) + + dai.save_history_alarm = print + dai.save_slow_queries = print + self_monitoring() + + +def test_forecast_kpi(): + dai.get_metric_sequence = faked_get_metric_sequence + dai.save_forecast_sequence = print + dai.save_future_alarm = print + forecast_kpi() + + +if __name__ == '__main__': + test_self_monitoring() + test_forecast_kpi() + diff --git a/src/gausskernel/dbmind/tools/tests/test_types.py b/src/gausskernel/dbmind/tools/tests/test_types.py new file mode 100644 index 000000000..3dcd9533e --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_types.py @@ -0,0 +1,60 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +from dbmind.common.types import Sequence + + +def test_sequence(): + no_one_seq = Sequence(range(0, 0), range(0, 0)) + assert str(no_one_seq) == 'Sequence[None](0){}' + assert len(no_one_seq) == 0 + + s1_tms = (10, 20, 30, 40, 50) + s1_vls = (1, 2, 3, 4, 5) + s1 = Sequence(s1_tms, s1_vls) + assert s1.timestamps == (10, 20, 30, 40, 50) + assert s1.values == (1, 2, 3, 4, 5) + assert len(s1) == 5 + assert s1[30] == 3 + assert s1[1] is None + + sub1 = s1[20, 40] # (20, 30, 40), (2, 3, 4) + assert sub1.timestamps == (20, 30, 40) + assert sub1.values == (2, 3, 4) + + assert len(sub1) == 3 + + sub_non = sub1[100, 111] + assert len(sub_non) == 0 + assert sub_non.timestamps == tuple() + assert sub_non.values == tuple() + sub2 = s1[40, 80] + assert len(sub2) == 2 + sub3 = sub2[40, 40] + assert len(sub3) == 1 + sub4 = sub2[80, 80] + assert len(sub4) == 0 + assert sub2.values == (4, 5) + + assert sub2[40] == 4, sub2[50] == 5 + + e = None + try: + Sequence((1, 2, 3, 4, 4, 5), (10, 20, 30, 40, 30, 20)) + except ValueError as _: + e = _ + assert isinstance(e, ValueError) + + # test iterator for sequence + for i, (t, v) in enumerate(s1): + assert t == s1_tms[i] and v == s1_vls[i] + diff --git a/src/gausskernel/dbmind/tools/tests/test_utils.py b/src/gausskernel/dbmind/tools/tests/test_utils.py new file mode 100644 index 000000000..7cae1e569 --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_utils.py @@ -0,0 +1,35 @@ +# Copyright (c) 2022 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import os + +from dbmind.constants import METRIC_MAP_CONFIG, MISC_PATH +from dbmind.common import utils + +CURR_DIR = os.path.abspath(os.path.dirname(__file__)) + + +def test_read_simple_conf_file(): + conf = utils.read_simple_config_file( + os.path.join(MISC_PATH, METRIC_MAP_CONFIG) + ) + + assert len(conf) > 0 + for name, value in conf.items(): + assert not name.startswith('#') + + +def test_write_to_terminal(): + utils.write_to_terminal(1111) + utils.write_to_terminal(111, level='error', color='red') + utils.write_to_terminal('hello world', color='yellow') diff --git a/src/gausskernel/dbmind/tools/tests/test_worker.py b/src/gausskernel/dbmind/tools/tests/test_worker.py new file mode 100644 index 000000000..0711941c2 --- /dev/null +++ b/src/gausskernel/dbmind/tools/tests/test_worker.py @@ -0,0 +1,48 @@ +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import time + +from dbmind.common.dispatcher.task_worker import ProcessWorker + + +def square(v): + return v * v + + +def sleepy_square(v): + time.sleep(0.2) + return v * v + + +process_worker = ProcessWorker(10) + + +def test_process_worker(): + assert process_worker.apply(square, True, (2,)) == 4 + assert process_worker.parallel_execute(square, ((0,), (1,), (2,), (3,), (4,))) == [0, 1, 4, 9, 16] + + # Should execute in parallel, only waiting for the slowest one. + start_time = time.time() + process_worker.parallel_execute(sleepy_square, [(v,) for v in range(10)]) + end_time = time.time() + assert 0.2 < (end_time - start_time) < 0.3 + + +def test_blocking_task(): + start_time = time.time() + process_worker._parallel_execute(sleepy_square, [[1], [1], [1], [1], [1]]) + end_time = time.time() + + interval = end_time - start_time + assert 0.2 < interval < 0.3 diff --git a/src/gausskernel/dbmind/tools/xtuner/requirements-aarch64.txt b/src/gausskernel/dbmind/tools/xtuner/requirements-aarch64.txt deleted file mode 100644 index 8823951df..000000000 --- a/src/gausskernel/dbmind/tools/xtuner/requirements-aarch64.txt +++ /dev/null @@ -1,6 +0,0 @@ -cryptography==2.5 -paramiko==2.7.2 -numpy==1.16.5 -scipy==1.6.0 -bayesian-optimization -ptable diff --git a/src/gausskernel/dbmind/tools/xtuner/requirements-x86.txt b/src/gausskernel/dbmind/tools/xtuner/requirements-x86.txt deleted file mode 100644 index 7f5dbf350..000000000 --- a/src/gausskernel/dbmind/tools/xtuner/requirements-x86.txt +++ /dev/null @@ -1,5 +0,0 @@ -tensorflow >= 2.1.0 # optional -keras-rl2~=1.0.4 # optional -paramiko==2.7.2 -bayesian-optimization -ptable diff --git a/src/gausskernel/dbmind/tools/xtuner/setup.py b/src/gausskernel/dbmind/tools/xtuner/setup.py deleted file mode 100644 index 3d2df8dfe..000000000 --- a/src/gausskernel/dbmind/tools/xtuner/setup.py +++ /dev/null @@ -1,68 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" - -import os -import sys -import platform - -from setuptools import setup, find_packages - - -def read_requirements(): - """Parse requirements.txt.""" - if 'aarch64' in platform.uname().machine: - filepath = os.path.join('.', 'requirements-aarch64.txt') - else: - filepath = os.path.join('.', 'requirements-x86.txt') - with open(filepath, 'r') as f: - requirements = [_line.rstrip() for _line in f] - requirements.reverse() - return requirements - -def check_version(): - version_info = sys.version_info - major, minor = version_info.major, version_info.minor - # At least, the Python version is (3, 6) - if major < 3 or minor <= 5: - return False - return True - -if not check_version(): - print("Requires Python >= 3.6") - exit(-1) - -# Read the package information from the main.py. -pkginfo = dict() -with open(os.path.join('tuner', 'main.py')) as pkginfo_fp: - for line in pkginfo_fp.readlines(): - if line.startswith(('__version__', '__description__')): - exec(line, pkginfo) - -setup( - name="openGauss-xtuner", - version=pkginfo['__version__'], - description=pkginfo['__description__'], - author="Huawei Technologies Co.,Ltd.", - url='https://gitee.com/opengauss/openGauss-server', - license='Mulan PSL v2', - install_requires=read_requirements(), - packages=find_packages(exclude='test'), - package_data={'': ['*']}, - entry_points={ - 'console_scripts': [ - 'gs_xtuner = tuner.main: main', - ], - }, -) diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/__init__.py b/src/gausskernel/dbmind/tools/xtuner/tuner/__init__.py deleted file mode 100644 index 7ed6f5e5c..000000000 --- a/src/gausskernel/dbmind/tools/xtuner/tuner/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" diff --git a/src/gausskernel/dbmind/tools/xtuner/tuner/algorithms/__init__.py b/src/gausskernel/dbmind/tools/xtuner/tuner/algorithms/__init__.py deleted file mode 100644 index 7ed6f5e5c..000000000 --- a/src/gausskernel/dbmind/tools/xtuner/tuner/algorithms/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -""" -Copyright (c) 2020 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -""" diff --git a/src/gausskernel/optimizer/commands/CMakeLists.txt b/src/gausskernel/optimizer/commands/CMakeLists.txt index 49bdd8d6e..4f9360c7f 100755 --- a/src/gausskernel/optimizer/commands/CMakeLists.txt +++ b/src/gausskernel/optimizer/commands/CMakeLists.txt @@ -1,6 +1,10 @@ #This is the main CMAKE for build all components. AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} TGT_commands_SRC) +if("${ENABLE_LITE_MODE}" STREQUAL "ON") + list(REMOVE_ITEM TGT_commands_SRC ${CMAKE_CURRENT_SOURCE_DIR}/obs_stream.cpp) +endif() + set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/sequence ) diff --git a/src/gausskernel/optimizer/commands/Makefile b/src/gausskernel/optimizer/commands/Makefile index 0d57a9f1c..eaa8f21de 100644 --- a/src/gausskernel/optimizer/commands/Makefile +++ b/src/gausskernel/optimizer/commands/Makefile @@ -27,7 +27,11 @@ OBJS = aggregatecmds.o alter.o analyze.o async.o cluster.o comment.o \ portalcmds.o prepare.o proclang.o packagecmds.o publicationcmds.o\ schemacmds.o seclabel.o sec_rls_cmds.o subscriptioncmds.o tablecmds.o tablespace.o trigger.o \ tsearchcmds.o typecmds.o user.o vacuum.o vacuumlazy.o \ - variable.o verify.o view.o gds_stream.o obs_stream.o formatter.o datasourcecmds.o \ + variable.o verifyrepair.o verify.o view.o gds_stream.o formatter.o datasourcecmds.o \ directory.o auto_explain.o shutdown.o +ifeq ($(enable_lite_mode), no) +OBJS += obs_stream.o +endif + include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/optimizer/commands/aggregatecmds.cpp b/src/gausskernel/optimizer/commands/aggregatecmds.cpp index 052812cd6..68db543f5 100644 --- a/src/gausskernel/optimizer/commands/aggregatecmds.cpp +++ b/src/gausskernel/optimizer/commands/aggregatecmds.cpp @@ -264,12 +264,14 @@ void RenameAggregate(List* name, List* args, const char* newname) #ifndef ENABLE_MULTIPLE_NODES Datum allargtypes = ProcedureGetAllArgTypes(tup, &isNull); + Datum argmodes = SysCacheGetAttr(PROCOID, tup, Anum_pg_proc_proargmodes, &isNull); /* make sure the new name doesn't exist */ - if (SearchSysCache4(PROCALLARGS, + if (SearchSysCacheForProcAllArgs( CStringGetDatum(newname), allargtypes, ObjectIdGetDatum(namespaceOid), - ObjectIdGetDatum(packageoid))) + ObjectIdGetDatum(packageoid), + argmodes)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_FUNCTION), errmsg("function %s already exists in schema \"%s\"", diff --git a/src/gausskernel/optimizer/commands/alter.cpp b/src/gausskernel/optimizer/commands/alter.cpp index b33c04616..004ccd046 100644 --- a/src/gausskernel/optimizer/commands/alter.cpp +++ b/src/gausskernel/optimizer/commands/alter.cpp @@ -533,6 +533,9 @@ void ExecAlterOwnerStmt(AlterOwnerStmt* stmt) AlterFunctionOwner(stmt->object, stmt->objarg, newowner); break; + case OBJECT_PACKAGE: + AlterPackageOwner(stmt->object, newowner); + break; case OBJECT_LANGUAGE: AlterLanguageOwner(strVal(linitial(stmt->object)), newowner); break; diff --git a/src/gausskernel/optimizer/commands/analyze.cpp b/src/gausskernel/optimizer/commands/analyze.cpp index b3344e3eb..3b694e1d3 100755 --- a/src/gausskernel/optimizer/commands/analyze.cpp +++ b/src/gausskernel/optimizer/commands/analyze.cpp @@ -1527,6 +1527,7 @@ static inline void cleanup_indexes(int nindexes, Relation* Irel, const Relation ivinfo.analyze_only = true; ivinfo.estimated_count = true; ivinfo.message_level = elevel; + ivinfo.invisibleParts = NULL; /* * if not handle the return value of index_vacuum_cleanup(), @@ -1579,6 +1580,65 @@ static inline void cleanup_indexes(int nindexes, Relation* Irel, const Relation } } +static void do_analyze_rel_start_log(bool inh, int elevel, Relation onerel) +{ + char* namespace_name = get_namespace_name(RelationGetNamespace(onerel)); + if (inh) + ereport(elevel, + (errmodule(MOD_AUTOVAC), + errmsg("analyzing \"%s.%s\" inheritance tree", + namespace_name, + RelationGetRelationName(onerel)))); + else + ereport(elevel, + (errmodule(MOD_AUTOVAC), + errmsg("analyzing \"%s.%s\"", + namespace_name, + RelationGetRelationName(onerel)))); + pfree_ext(namespace_name); +} + +static int64 do_analyze_calculate_sample_target_rows(int attr_cnt, VacAttrStats** vacattrstats, int nindexes, + AnlIndexData* indexdata) +{ + int64 targrows = 100; + for (int i = 0; i < attr_cnt; i++) { + if (targrows < vacattrstats[i]->minrows) + targrows = vacattrstats[i]->minrows; + } + + for (int ind = 0; ind < nindexes; ind++) { + AnlIndexData* thisdata = &indexdata[ind]; + + for (int i = 0; i < thisdata->attr_cnt; i++) { + if (targrows < thisdata->vacattrstats[i]->minrows) + targrows = thisdata->vacattrstats[i]->minrows; + } + } + return targrows; +} + +static void do_analyze_rel_end_log(TimestampTz starttime, Relation onerel, PGRUsage* ru0) +{ + /* Log the action if appropriate */ + if (IsAutoVacuumWorkerProcess() && u_sess->attr.attr_storage.Log_autovacuum_min_duration >= 0) { + if (u_sess->attr.attr_storage.Log_autovacuum_min_duration == 0 || + TimestampDifferenceExceeds( + starttime, GetCurrentTimestamp(), u_sess->attr.attr_storage.Log_autovacuum_min_duration)) { + char* dbname = get_and_check_db_name(u_sess->proc_cxt.MyDatabaseId); + char* namespace_name = get_namespace_name(RelationGetNamespace(onerel)); + ereport(LOG, + (errmsg("automatic analyze of table \"%s.%s.%s\" system usage: %s", + dbname, + namespace_name, + RelationGetRelationName(onerel), + pg_rusage_show(ru0)))); + pfree_ext(dbname); + pfree_ext(namespace_name); + } + } +} + /* * do_analyze_rel() -- analyze one relation, recursively or not * @@ -1623,16 +1683,7 @@ static void do_analyze_rel(Relation onerel, VacuumStmt* vacstmt, BlockNumber rel bool replicate_needs_extstats = false; es_check_availability_for_table(vacstmt, onerel, inh, &replicate_needs_extstats); - if (inh) - ereport(elevel, - (errmsg("analyzing \"%s.%s\" inheritance tree", - get_namespace_name(RelationGetNamespace(onerel)), - RelationGetRelationName(onerel)))); - else - ereport(elevel, - (errmsg("analyzing \"%s.%s\"", - get_namespace_name(RelationGetNamespace(onerel)), - RelationGetRelationName(onerel)))); + do_analyze_rel_start_log(inh, elevel, onerel); caller_context = do_analyze_preprocess(onerel->rd_rel->relowner, &ru0, @@ -1670,20 +1721,7 @@ static void do_analyze_rel(Relation onerel, VacuumStmt* vacstmt, BlockNumber rel * possible overflow in Vitter's algorithm. (Note: that will also be the * target in the corner case where there are no analyzable columns.) */ - targrows = 100; - for (i = 0; i < attr_cnt; i++) { - if (targrows < vacattrstats[i]->minrows) - targrows = vacattrstats[i]->minrows; - } - - for (int ind = 0; ind < nindexes; ind++) { - AnlIndexData* thisdata = &indexdata[ind]; - - for (i = 0; i < thisdata->attr_cnt; i++) { - if (targrows < thisdata->vacattrstats[i]->minrows) - targrows = thisdata->vacattrstats[i]->minrows; - } - } + targrows = do_analyze_calculate_sample_target_rows(attr_cnt, vacattrstats, nindexes, indexdata); /* * If get sample rows on datanode or coordinator or not. there are two cases: @@ -1882,21 +1920,6 @@ static void do_analyze_rel(Relation onerel, VacuumStmt* vacstmt, BlockNumber rel } if (!inh) { - if (RelationIsUstoreFormat(onerel)) { - PgStat_StatDBEntry* dbentry = NULL; - PgStat_StatTabEntry* tabentry = NULL; - dbentry = pgstat_fetch_stat_dbentry(u_sess->proc_cxt.MyDatabaseId); - if (dbentry != NULL) { - PgStat_StatTabKey tabkey; - tabkey.statFlag = RelationIsPartition(onerel) ? onerel->parentId : InvalidOid; - tabkey.tableid = RelationGetRelid(onerel); - tabentry = (PgStat_StatTabEntry*)hash_search(dbentry->tables, (void*)(&tabkey), HASH_FIND, NULL); - if (tabentry && tabentry->n_live_tuples > 0) { - totalrows = tabentry->n_live_tuples; - totaldeadrows = tabentry->n_dead_tuples; - } - } - } /* Update the pg_class for relation and index */ update_pages_and_tuples_pgclass(onerel, vacstmt, @@ -1928,18 +1951,7 @@ static void do_analyze_rel(Relation onerel, VacuumStmt* vacstmt, BlockNumber rel /* Done with indexes */ vac_close_indexes(nindexes, Irel, NoLock); - /* Log the action if appropriate */ - if (IsAutoVacuumWorkerProcess() && u_sess->attr.attr_storage.Log_autovacuum_min_duration >= 0) { - if (u_sess->attr.attr_storage.Log_autovacuum_min_duration == 0 || - TimestampDifferenceExceeds( - starttime, GetCurrentTimestamp(), u_sess->attr.attr_storage.Log_autovacuum_min_duration)) - ereport(LOG, - (errmsg("automatic analyze of table \"%s.%s.%s\" system usage: %s", - get_and_check_db_name(u_sess->proc_cxt.MyDatabaseId), - get_namespace_name(RelationGetNamespace(onerel)), - RelationGetRelationName(onerel), - pg_rusage_show(&ru0)))); - } + do_analyze_rel_end_log(starttime, onerel, &ru0); do_analyze_finalize(caller_context, save_userid, save_sec_context, save_nestlevel, analyzemode); @@ -3212,6 +3224,7 @@ void CstoreAnalyzePrefetch( } if (t_thrd.cstore_cxt.InProgressAioCUDispatchCount > 0) { +#ifndef ENABLE_LITE_MODE int tmp_count = t_thrd.cstore_cxt.InProgressAioCUDispatchCount; HOLD_INTERRUPTS(); FileAsyncCURead(dList, t_thrd.cstore_cxt.InProgressAioCUDispatchCount); @@ -3219,6 +3232,7 @@ void CstoreAnalyzePrefetch( RESUME_INTERRUPTS(); FileAsyncCUClose(vfdList, tmp_count); +#endif } pfree_ext(dList); diff --git a/src/gausskernel/optimizer/commands/cluster.cpp b/src/gausskernel/optimizer/commands/cluster.cpp index 39d3eedf3..7d66f8509 100755 --- a/src/gausskernel/optimizer/commands/cluster.cpp +++ b/src/gausskernel/optimizer/commands/cluster.cpp @@ -519,7 +519,7 @@ void cluster_rel(Oid tableOid, Oid partitionOid, Oid indexOid, bool recheck, boo } if (OldHeap->storage_type == SEGMENT_PAGE) { - ereport(LOG, (errmsg("skipping segment table \"%s\" --- please use gs_space_shrink " + ereport(INFO, (errmsg("skipping segment table \"%s\" --- please use gs_space_shrink " "to recycle segment space.", RelationGetRelationName(OldHeap)))); relation_close(OldHeap, lockMode); return; @@ -1521,7 +1521,7 @@ Oid make_new_heap(Oid OIDOldHeap, Oid NewTableSpace, int lockMode) * Output : oid of new heap */ Oid makePartitionNewHeap(Relation partitionedTableRel, TupleDesc partTabHeapDesc, Datum partTabRelOptions, - Oid oldPartOid, Oid partToastOid, Oid NewTableSpace, bool isCStore) + Oid oldPartOid, Oid partToastOid, Oid NewTableSpace, bool isCStore, Oid subpartFilenode) { char NewHeapName[NAMEDATALEN]; Oid OIDNewHeap = InvalidOid; @@ -1550,7 +1550,7 @@ Oid makePartitionNewHeap(Relation partitionedTableRel, TupleDesc partTabHeapDesc partitionedTableRel->rd_rel->relkind, partitionedTableRel->rd_rel->relpersistence, false, - RelationIsMapped(partitionedTableRel), + subpartFilenode == InvalidOid ? RelationIsMapped(partitionedTableRel) : false, true, 0, ONCOMMIT_NOOP, @@ -3672,7 +3672,7 @@ void CBIVacuumFullMainPartiton(Oid parentOid) /* If vacuum full partitioned segment table, give hint here */ if (parentHeap->storage_type == SEGMENT_PAGE) { - ereport(LOG, (errmsg("skipping segment table \"%s\" --- please use gs_space_shrink " + ereport(INFO, (errmsg("skipping segment table \"%s\" --- please use gs_space_shrink " "to recycle segment space.", RelationGetRelationName(parentHeap)))); } @@ -3877,7 +3877,7 @@ static void VacFullCompaction(Relation oldHeap, Oid partOid) static void rebuildPartVacFull(Relation oldHeap, Oid partOid, int freezeMinAge, int freezeTableAge, VacuumStmt* vacstmt) { Oid tableOid = RelationGetRelid(oldHeap); - uint32 statFlag = tableOid; + uint32 statFlag = RelationIsSubPartitioned(oldHeap) ? partid_get_parentid(partOid) : tableOid; Oid OIDNewHeap = InvalidOid; bool swapToastByContent = false; TransactionId frozenXid = InvalidTransactionId; diff --git a/src/gausskernel/optimizer/commands/copy.cpp b/src/gausskernel/optimizer/commands/copy.cpp index 74e3eb6de..ad6e43840 100644 --- a/src/gausskernel/optimizer/commands/copy.cpp +++ b/src/gausskernel/optimizer/commands/copy.cpp @@ -983,7 +983,7 @@ uint64 DoCopy(CopyStmt* stmt, const char* queryString) } /* @Online expansion: check if the table is in redistribution read only mode */ - if (rel != NULL && is_from && RelationInClusterResizingReadOnly(rel)) + if (rel != NULL && is_from && RelationInClusterResizingWriteErrorMode(rel)) ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("%s is redistributing, please retry later.", rel->rd_rel->relname.data))); @@ -2076,6 +2076,7 @@ static void ProcessCopyNotAllowedOptions(CopyState cstate) (errcode(ERRCODE_SYNTAX_ERROR), errmsg("out_fix_alignment is only allowed in write-only foreign tables"))); } +#ifdef ENABLE_MULTIPLE_NODES /* * We could not make CopyGetDataDefault to respond to cancel signal. As a result * subtransactions that include COPY statement would be able to rollback or trigger exception @@ -2084,8 +2085,8 @@ static void ProcessCopyNotAllowedOptions(CopyState cstate) if (IsInLiveSubtransaction()) { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("COPY does not support subtransactions or exceptions."))); } +#endif } - /* * ProcessCopyErrorLogSetUps is used to set up necessary structures used for copy from error logging. */ @@ -2262,7 +2263,6 @@ static CopyState BeginCopy(bool is_from, Relation rel, Node* raw_query, const ch ProcessCopyOptions(cstate, is_from, options); if (is_copy) ProcessCopyNotAllowedOptions(cstate); - /* Process the source/target relation or query */ if (rel) { Assert(!raw_query); @@ -2560,6 +2560,43 @@ static char* FindFileName(const char* path) return (name_start == NULL) ? (char*)path : (name_start + 1); } +static void CopyToCheck(Relation rel) +{ + if (rel->rd_rel->relkind == RELKIND_VIEW) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot copy from view \"%s\"", RelationGetRelationName(rel)), + errhint("Try the COPY (SELECT ...) TO variant."))); + else if (rel->rd_rel->relkind == RELKIND_CONTQUERY) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot copy from contview \"%s\"", RelationGetRelationName(rel)), + errhint("Try the COPY (SELECT ...) TO variant."))); + else if (rel->rd_rel->relkind == RELKIND_MATVIEW) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot copy from materialized view \"%s\"", RelationGetRelationName(rel)), + errhint("Try the COPY (SELECT ...) TO variant."))); + else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) { + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot copy from foreign table \"%s\"", RelationGetRelationName(rel)), + errhint("Try the COPY (SELECT ...) TO variant."))); + } else if (rel->rd_rel->relkind == RELKIND_STREAM) { + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot copy from stream \"%s\"", RelationGetRelationName(rel)), + errhint("Try the COPY (SELECT ...) TO variant."))); + } else if (RELKIND_IS_SEQUENCE(rel->rd_rel->relkind)) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot copy from (large) sequence \"%s\"", RelationGetRelationName(rel)))); + else + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot copy from non-table relation \"%s\"", RelationGetRelationName(rel)))); + +} /* * Setup CopyState to read tuples from a table or a query for COPY TO. */ @@ -2572,39 +2609,7 @@ CopyState BeginCopyTo( bool flag = rel != NULL && rel->rd_rel->relkind != RELKIND_RELATION; if (flag) { - if (rel->rd_rel->relkind == RELKIND_VIEW) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot copy from view \"%s\"", RelationGetRelationName(rel)), - errhint("Try the COPY (SELECT ...) TO variant."))); - else if (rel->rd_rel->relkind == RELKIND_CONTQUERY) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot copy from contview \"%s\"", RelationGetRelationName(rel)), - errhint("Try the COPY (SELECT ...) TO variant."))); - else if (rel->rd_rel->relkind == RELKIND_MATVIEW) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot copy from materialized view \"%s\"", RelationGetRelationName(rel)), - errhint("Try the COPY (SELECT ...) TO variant."))); - else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot copy from foreign table \"%s\"", RelationGetRelationName(rel)), - errhint("Try the COPY (SELECT ...) TO variant."))); - } else if (rel->rd_rel->relkind == RELKIND_STREAM) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot copy from stream \"%s\"", RelationGetRelationName(rel)), - errhint("Try the COPY (SELECT ...) TO variant."))); - } else if (RELKIND_IS_SEQUENCE(rel->rd_rel->relkind)) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot copy from (large) sequence \"%s\"", RelationGetRelationName(rel)))); - else - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot copy from non-table relation \"%s\"", RelationGetRelationName(rel)))); + CopyToCheck(rel); } cstate = BeginCopy(false, rel, query, queryString, attnamelist, options); @@ -4209,6 +4214,7 @@ static uint64 CopyFrom(CopyState cstate) (void)MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); if (IS_PGXC_COORDINATOR) { +#ifdef ENABLE_MULTIPLE_NODES PG_TRY(); { is_EOF = !NextCopyFrom(cstate, econtext, values, nulls, &loaded_oid); @@ -4227,7 +4233,7 @@ static uint64 CopyFrom(CopyState cstate) } PG_END_TRY(); - +#endif if (unlikely(is_EOF)) break; } else { @@ -6446,66 +6452,21 @@ static void append_defvals(Datum* values, CopyState cstate) if (cstate->num_defaults <= 0) { return; } - /* if sql has default column, do with the line_buf and */ - /* make the col name and value one to one. */ - ListCell* cur = NULL; - char *tmpLineBuf = pg_strdup(cstate->line_buf.data); - char *originHead = tmpLineBuf; - resetStringInfo(&(cstate->line_buf)); - char *token = NULL; - int attNums=0, tokenNum=0; - bool first = true; - - foreach (cur, cstate->attnumlist) { - attNums++; - } - - /* - * When copy from text or csv that one col is self-incremental col, - * and there are more columns in the text than the columns to be imported, - * you must set ignore_extra_data 'on'. In this case, we choose the number - * of columns we want from front to back in the text, discarding the last - * unwanted values. - */ - for (tokenNum=1, token = strsep(&tmpLineBuf, cstate->delim); - token != NULL && tokenNum <= attNums; - token = strsep(&tmpLineBuf, cstate->delim), ++tokenNum) { - if (first) { - appendBinaryStringInfo(&cstate->line_buf, token, strlen(token)); - first = false; - } else { - appendBinaryStringInfo(&cstate->line_buf, cstate->delim, strlen(cstate->delim)); - appendBinaryStringInfo(&cstate->line_buf, token, strlen(token)); - } - } - - /* - * If the text contains fewer columns of data than the columns to - * be imported and fill_missing_field is 'on', populate the data - * with delimiters. - */ - if (attNums - tokenNum >= 0) { - for (; tokenNum <= attNums; ++tokenNum) { - appendBinaryStringInfo(&cstate->line_buf, cstate->delim, strlen(cstate->delim)); - } - } - free(originHead); - originHead = NULL; - tmpLineBuf = NULL; + /* In binary format, the first two bytes indicate the number of columns */ + int binaryColBytesSize = 2; CopyStateData new_cstate = *cstate; int i; new_cstate.fe_msgbuf = makeStringInfo(); + if(IS_BINARY(cstate)) { + appendBinaryStringInfo(new_cstate.fe_msgbuf, cstate->line_buf.data, binaryColBytesSize); + } for (i = 0; i < cstate->num_defaults; i++) { int attindex = cstate->defmap[i]; Datum defvalue = values[attindex]; - if (!IS_BINARY(cstate)) { - CopySendString(&new_cstate, new_cstate.delim); - } - /* * For using the values in their output form, it is not sufficient * to just call its output function. The format should match @@ -6537,11 +6498,19 @@ static void append_defvals(Datum* values, CopyState cstate) false /* there's at least one user-supplied attribute */); else CopyAttributeOutText(&new_cstate, string); + CopySendString(&new_cstate, new_cstate.delim); } } - /* Append the generated default values to the user-supplied data-row */ - appendBinaryStringInfo(&cstate->line_buf, new_cstate.fe_msgbuf->data, new_cstate.fe_msgbuf->len); + if(IS_BINARY(cstate)) { + appendBinaryStringInfo(new_cstate.fe_msgbuf, cstate->line_buf.data + binaryColBytesSize, new_cstate.line_buf.len - binaryColBytesSize); + } else { + appendBinaryStringInfo(new_cstate.fe_msgbuf, cstate->line_buf.data, new_cstate.line_buf.len); + } + /* reset */ + resetStringInfo(&cstate->line_buf); + /* append all to line_buf */ + appendBinaryStringInfo(&cstate->line_buf, new_cstate.fe_msgbuf->data, new_cstate.fe_msgbuf->len); } #endif @@ -6596,10 +6565,14 @@ retry: if (cstate->mode == MODE_NORMAL) { if (cstate->filename && is_obs_protocol(cstate->filename)) { +#ifndef ENABLE_LITE_MODE if (getNextOBS(cstate)) { cstate->eol_type = EOL_UNKNOWN; goto retry; } +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } else { if (getNextGDS(cstate)) { if (cstate->eol_type != EOL_UD) @@ -9265,12 +9238,16 @@ void bulkloadFuncFactory(CopyState cstate) switch (mode) { case MODE_NORMAL: /* for GDS oriented dist import */ if (is_obs_protocol(cstate->filename)) { +#ifndef ENABLE_LITE_MODE /* Attache working house routines for OBS oriented dist import */ func.initBulkLoad = initOBSModeState; func.endBulkLoad = endOBSModeBulkLoad; copyGetDataFunc = NULL; readlineFunc = CopyGetNextLineFromOBS; getNextCopyFunc = getNextOBS; +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } else { /* Attache working house routines for GDS oriented dist import */ func.initBulkLoad = initNormalModeState; @@ -9336,6 +9313,7 @@ CopyState beginExport( cstate->writelineFunc = RemoteExportWriteOut; if (is_obs_protocol(filename)) { +#ifndef ENABLE_LITE_MODE /* Fetch OBS write only table related attribtues */ getOBSOptions(&cstate->obs_copy_options, options); @@ -9352,6 +9330,9 @@ CopyState beginExport( } initOBSModeState(cstate, object_path, tasklist); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } else { initNormalModeState(cstate, filename, tasklist); } @@ -9506,6 +9487,7 @@ void endExport(CopyState cstate) endRoachBulkLoad(cstate); } } else if (cstate->copy_dest == COPY_OBS) { +#ifndef ENABLE_LITE_MODE if (IS_PGXC_DATANODE) { if (cstate->outBuffer->len > 0) RemoteExportFlushData(cstate); @@ -9513,6 +9495,9 @@ void endExport(CopyState cstate) cstate->io_stream->Flush(); endOBSModeBulkLoad(cstate); } +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } else exportDeinitOutBuffer(cstate); MemoryContextDelete(cstate->rowcontext); @@ -10056,18 +10041,15 @@ static bool IfCopyLineMatchWhenPositionExpr(CopyState cstate, LoadWhenExpr *when return (whenpostion_strcmp(rawfield, rawfieldlen, when->val, strlen(when->val)) == 0); else { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("WHEN oper error"))); - return false; } } else if (strlen(when->oper) == 2) { if (strncmp(when->oper, "<>", 2) == 0) return (whenpostion_strcmp(rawfield, rawfieldlen, when->val, strlen(when->val)) != 0); else { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("WHEN oper error"))); - return false; } } else { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("WHEN oper error"))); - return false; } return false; } @@ -10080,13 +10062,11 @@ static void CopyGetWhenExprAttFieldno(CopyState cstate, LoadWhenExpr *when, List } if (when->attname == NULL) { ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), errmsg("WHEN no field name"))); - return; } if (attnamelist == NULL) { if (cstate->rel == NULL) { ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), errmsg("WHEN no relation"))); - return; } tupDesc = RelationGetDescr(cstate->rel); for (int i = 0; i < tupDesc->natts; i++) { @@ -10111,7 +10091,6 @@ static void CopyGetWhenExprAttFieldno(CopyState cstate, LoadWhenExpr *when, List } ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), errmsg("WHEN field name not find"))); } - return; } static bool IfCopyLineMatchWhenFieldExpr(CopyState cstate, LoadWhenExpr *when) @@ -10137,18 +10116,15 @@ static bool IfCopyLineMatchWhenFieldExpr(CopyState cstate, LoadWhenExpr *when) return (strcmp(cstate->raw_fields[attnum], when->val) == 0); else { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("WHEN field oper error"))); - return false; } } else if (strlen(when->oper) == 2){ if (strncmp(when->oper, "<>", 2) == 0) return (strcmp(cstate->raw_fields[attnum], when->val) != 0); else { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("WHEN field oper error"))); - return false; } } else { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("WHEN field oper error"))); - return false; } return false; } @@ -10200,13 +10176,11 @@ static int CopyGetColumnListIndex(CopyState cstate, List *attnamelist, const cha int index = 0; if (colname == NULL) { ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), errmsg("Column name is NULL"))); - return InvalidAttrNumber; } if (attnamelist == NULL) { if (cstate->rel == NULL) { ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), errmsg("Column list no relation"))); - return InvalidAttrNumber; } tupDesc = RelationGetDescr(cstate->rel); for (int i = 0; i < tupDesc->natts; i++) { diff --git a/src/gausskernel/optimizer/commands/dbcommands.cpp b/src/gausskernel/optimizer/commands/dbcommands.cpp index 99df84575..71d7c48ce 100644 --- a/src/gausskernel/optimizer/commands/dbcommands.cpp +++ b/src/gausskernel/optimizer/commands/dbcommands.cpp @@ -41,9 +41,10 @@ #include "catalog/pg_database.h" #include "catalog/pg_db_role_setting.h" #include "catalog/pg_job.h" +#include "catalog/pg_subscription.h" #include "catalog/pg_proc.h" #include "catalog/pg_tablespace.h" -#include "catalog/pg_subscription.h" +#include "catalog/pg_uid_fn.h" #include "catalog/pgxc_slice.h" #include "catalog/storage_xlog.h" #include "commands/comment.h" @@ -1102,6 +1103,9 @@ void dropdb(const char* dbname, bool missing_ok) if (!HeapTupleIsValid(tup)) ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for database %u", db_id))); + if (EnableGlobalSysCache()) { + g_instance.global_sysdbcache.DropDB(db_id, true); + } simple_heap_delete(pgdbrel, &tup->t_self); ReleaseSysCache(tup); @@ -1121,6 +1125,7 @@ void dropdb(const char* dbname, bool missing_ok) * Remove shared dependency references for the database. */ dropDatabaseDependencies(db_id); + DeleteDatabaseUidEntry(db_id); /* * Request an immediate checkpoint to flush all the dirty pages in share buffer @@ -1287,6 +1292,16 @@ void RenameDatabase(const char* oldname, const char* newname) if (!HeapTupleIsValid(newtup)) ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for database %u", db_id))); (void)namestrcpy(&(((Form_pg_database)GETSTRUCT(newtup))->datname), newname); + + /* + * We have to do GSC DropDb to invalid the GSC content of that database even we do rename on dbName + * rename db dont change GSC content, but a name swap for two db may cause lsc fake cache hit, which + * brings about inconsistent data event + **/ + if (EnableGlobalSysCache()) { + g_instance.global_sysdbcache.DropDB(db_id, false); + } + simple_heap_update(rel, &newtup->t_self, newtup); CatalogUpdateIndexes(rel, newtup); @@ -1555,6 +1570,9 @@ static void movedb(const char* dbname, const char* tblspcname) newtuple = (HeapTuple) tableam_tops_modify_tuple(oldtuple, RelationGetDescr(pgdbrel), new_record, new_record_nulls, new_record_repl); + if (EnableGlobalSysCache()) { + g_instance.global_sysdbcache.DropDB(db_id, false); + } simple_heap_update(pgdbrel, &oldtuple->t_self, newtuple); /* Update indexes */ @@ -1803,6 +1821,9 @@ void AlterDatabase(AlterDatabaseStmt* stmt, bool isTopLevel) } newtuple = (HeapTuple) tableam_tops_modify_tuple(tuple, RelationGetDescr(rel), new_record, new_record_nulls, new_record_repl); + if (EnableGlobalSysCache() && privateobject != NULL) { + g_instance.global_sysdbcache.DropDB(HeapTupleGetOid(tuple), false); + } simple_heap_update(rel, &tuple->t_self, newtuple); /* Update indexes */ @@ -2377,7 +2398,7 @@ void xlog_db_create(Oid dstDbId, Oid dstTbSpcId, Oid srcDbId, Oid srcTbSpcId) } } -void xlog_db_drop(Oid dbId, Oid tbSpcId) +void do_db_drop(Oid dbId, Oid tbSpcId) { char* dst_path = GetDatabasePath(dbId, tbSpcId); @@ -2397,7 +2418,7 @@ void xlog_db_drop(Oid dbId, Oid tbSpcId) /* Also, clean out any fsync requests that might be pending in md.c */ ForgetDatabaseSyncRequests(dbId); - + /* Clean out the xlog relcache too */ XLogDropDatabase(dbId); @@ -2446,6 +2467,13 @@ void xlogRemoveRemainSegsByDropDB(Oid dbId, Oid tablespaceId) remainSegsLock.unLock(); } +void xlog_db_drop(XLogRecPtr lsn, Oid dbId, Oid tbSpcId) +{ + UpdateMinRecoveryPoint(lsn, false); + do_db_drop(dbId, tbSpcId); + xlogRemoveRemainSegsByDropDB(dbId, tbSpcId); +} + void dbase_redo(XLogReaderState* record) { uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; @@ -2455,8 +2483,7 @@ void dbase_redo(XLogReaderState* record) xlog_db_create(xlrec->db_id, xlrec->tablespace_id, xlrec->src_db_id, xlrec->src_tablespace_id); } else if (info == XLOG_DBASE_DROP) { xl_dbase_drop_rec* xlrec = (xl_dbase_drop_rec*)XLogRecGetData(record); - xlog_db_drop(xlrec->db_id, xlrec->tablespace_id); - xlogRemoveRemainSegsByDropDB(xlrec->db_id, xlrec->tablespace_id); + xlog_db_drop(record->EndRecPtr, xlrec->db_id, xlrec->tablespace_id); } else ereport(PANIC, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("dbase_redo: unknown op code %hhu", info))); diff --git a/src/gausskernel/optimizer/commands/directory.cpp b/src/gausskernel/optimizer/commands/directory.cpp index 0b2d60299..7e78109e6 100644 --- a/src/gausskernel/optimizer/commands/directory.cpp +++ b/src/gausskernel/optimizer/commands/directory.cpp @@ -200,7 +200,6 @@ void CreatePgDirectory(CreateDirectoryStmt* stmt) } tup = (HeapTuple) tableam_tops_modify_tuple(oldtup, tupDesc, values, nulls, replaces); simple_heap_update(rel, &tup->t_self, tup); - CatalogUpdateIndexes(rel, tup); ReleaseSysCache(oldtup); tableam_tops_free_tuple(tup); diff --git a/src/gausskernel/optimizer/commands/dropcmds.cpp b/src/gausskernel/optimizer/commands/dropcmds.cpp index aa094be72..b755ee061 100644 --- a/src/gausskernel/optimizer/commands/dropcmds.cpp +++ b/src/gausskernel/optimizer/commands/dropcmds.cpp @@ -45,6 +45,9 @@ static bool CheckObjectDropPrivilege(ObjectType removeType, Oid objectId) case OBJECT_FUNCTION: aclresult = pg_proc_aclcheck(objectId, GetUserId(), ACL_DROP); break; + case OBJECT_PACKAGE: + aclresult = pg_package_aclcheck(objectId, GetUserId(), ACL_DROP); + break; case OBJECT_SCHEMA: aclresult = pg_namespace_aclcheck(objectId, GetUserId(), ACL_DROP); break; @@ -63,6 +66,7 @@ static bool CheckObjectDropPrivilege(ObjectType removeType, Oid objectId) static void DropExtensionInListIsSupported(List* objname) { static const char *supportList[] = { + "drop", "postgis", "packages", #ifndef ENABLE_MULTIPLE_NODES diff --git a/src/gausskernel/optimizer/commands/explain.cpp b/src/gausskernel/optimizer/commands/explain.cpp index f675a88c9..f9686f447 100755 --- a/src/gausskernel/optimizer/commands/explain.cpp +++ b/src/gausskernel/optimizer/commands/explain.cpp @@ -24,6 +24,7 @@ #include "catalog/pg_obsscaninfo.h" #include "catalog/pg_type.h" #include "db4ai/create_model.h" +#include "db4ai/hyperparameter_validation.h" #include "commands/createas.h" #include "commands/defrem.h" #include "commands/prepare.h" @@ -35,6 +36,7 @@ #include "executor/node/nodeSetOp.h" #include "foreign/dummyserver.h" #include "foreign/fdwapi.h" +#include "instruments/generate_report.h" #include "nodes/print.h" #include "opfusion/opfusion_util.h" #include "opfusion/opfusion.h" @@ -75,6 +77,7 @@ #include "catalog/pgxc_node.h" #include "pgxc/pgxc.h" #endif +#include "db4ai/aifuncs.h" /* Thread local variables for plan_table. */ THR_LOCAL bool OnlySelectFromPlanTable = false; @@ -139,7 +142,8 @@ static void show_expression( Node* node, const char* qlabel, PlanState* planstate, List* ancestors, bool useprefix, ExplainState* es); static void show_qual( List* qual, const char* qlabel, PlanState* planstate, List* ancestors, bool useprefix, ExplainState* es); -static void show_scan_qual(List* qual, const char* qlabel, PlanState* planstate, List* ancestors, ExplainState* es); +static void show_scan_qual(List *qual, const char *qlabel, PlanState *planstate, List *ancestors, ExplainState *es, + bool show_prefix = false); static void show_skew_optimization(const PlanState* planstate, ExplainState* es); template static void show_bloomfilter(Plan* plan, PlanState* planstate, List* ancestors, ExplainState* es); @@ -166,6 +170,7 @@ static void show_vechash_info(VecHashJoinState* hashstate, ExplainState* es); static void show_tidbitmap_info(BitmapHeapScanState *planstate, ExplainState *es); static void show_instrumentation_count(const char* qlabel, int which, const PlanState* planstate, ExplainState* es); static void show_removed_rows(int which, const PlanState* planstate, int idx, int smpIdx, int* removeRows); +static int check_integer_overflow(double var); static void show_foreignscan_info(ForeignScanState* fsstate, ExplainState* es); static void show_dfs_block_info(PlanState* planstate, ExplainState* es); static void show_detail_storage_info_text(Instrumentation* instr, StringInfo instr_info); @@ -174,12 +179,13 @@ static void show_storage_filter_info(PlanState* planstate, ExplainState* es); static void show_llvm_info(const PlanState* planstate, ExplainState* es); static void show_modifytable_merge_info(const PlanState* planstate, ExplainState* es); static void show_recursive_info(RecursiveUnionState* rustate, ExplainState* es); +static void show_startwith_dfx(StartWithOpState* rustate, ExplainState* es); static const char* explain_get_index_name(Oid indexId); static void ExplainIndexScanDetails(Oid indexid, ScanDirection indexorderdir, ExplainState* es); static void ExplainScanTarget(Scan* plan, ExplainState* es); static void ExplainModifyTarget(ModifyTable* plan, ExplainState* es); static void ExplainTargetRel(Plan* plan, Index rti, ExplainState* es); -static void show_on_duplicate_info(ModifyTableState* mtstate, ExplainState* es); +static void show_on_duplicate_info(ModifyTableState* mtstate, ExplainState* es, List* ancestors); #ifndef PGXC static void show_modifytable_info(ModifyTableState* mtstate, ExplainState* es); #endif /* PGXC */ @@ -663,9 +669,12 @@ static void ExplainOneQuery( */ DestReceiverTrainModel* dest_train_model = NULL; dest_train_model = (DestReceiverTrainModel*) CreateDestReceiver(DestTrainModel); - configure_dest_receiver_train_model(dest_train_model, (AlgorithmML) cm->algorithm, cm->model, queryString); + configure_dest_receiver_train_model(dest_train_model, CurrentMemoryContext, (AlgorithmML)cm->algorithm, + cm->model, queryString, true); - PlannedStmt* plan = plan_create_model(cm, queryString, params, (DestReceiver*)dest_train_model); + PlannedStmt *plan = + plan_create_model(cm, queryString, params, (DestReceiver *)dest_train_model, CurrentMemoryContext); + print_hyperparameters(DEBUG1, dest_train_model->hyperparameters); ExplainOnePlan(plan, into, es, queryString, dest, params); return; @@ -1119,6 +1128,13 @@ void ExplainOnePlan( /* Create textual dump of plan tree */ ExplainPrintPlan(es, queryDesc); + /* Print post-plan-tree explanation info, if there is any. */ + if (es->post_str != NULL) { + appendStringInfo(es->str, "%s\n", es->post_str->data); + DestroyStringInfo(es->post_str); + es->post_str = NULL; + } + /* for explain plan: after explained all nodes */ if (es->plan && es->planinfo != NULL) { es->planinfo->m_planTableData->set_plan_table_ids(queryDesc->plannedstmt->queryId, es); @@ -1408,7 +1424,7 @@ void ExplainOneQueryForStatistics(QueryDesc* queryDesc) u_sess->exec_cxt.under_auto_explain = false; u_sess->instr_cxt.global_instr = oldInstr; - AutoContextSwitch memSwitch(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_CBB)); + AutoContextSwitch memSwitch(g_instance.wlm_cxt->query_resource_track_mcxt); t_thrd.shemem_ptr_cxt.mySessionMemoryEntry->query_plan = (char*)palloc0(es.str->len + 1 + 1); errno_t rc = memcpy_s(t_thrd.shemem_ptr_cxt.mySessionMemoryEntry->query_plan, es.str->len, es.str->data, es.str->len); @@ -1529,13 +1545,172 @@ static void show_bucket_info(PlanState* planstate, ExplainState* es, bool is_pre } } +typedef struct PrintFlags { + bool print_number; + bool print_twodots; + bool print_comma; + bool print_prev; +} PrintFlags; + +static void set_print_flags(int i, bool showPrev, bool isLast, PrintFlags* flags) +{ + if (i == 0) { + flags->print_prev = false; + flags->print_number = true; + flags->print_twodots = false; + flags->print_comma = false; + } else if (showPrev) { + /* the last partition */ + if (isLast) { + flags->print_prev = false; + flags->print_number = true; + flags->print_twodots = true; + flags->print_comma = false; + } else { + flags->print_prev = false; + flags->print_number = false; + flags->print_twodots = false; + flags->print_comma = false; + } + } else { + /* the previous has not been printed */ + if (!flags->print_number) + flags->print_prev = true; + else + flags->print_prev = false; + + flags->print_number = true; + flags->print_twodots = false; + flags->print_comma = true; + } +} + +static void add_pruning_info_nums(StringInfo dest, PrintFlags flags, int partID, int partID_prev) +{ + /* first check whether print the previous partition number */ + if (flags.print_prev) { + appendStringInfoString(dest, ".."); + appendStringInfo(dest, "%d", partID_prev + 1); + } + + /* then print the current partition number */ + if (flags.print_twodots) { + appendStringInfoString(dest, ".."); + } else if (flags.print_comma) { + appendStringInfoString(dest, ","); + } + + if (flags.print_number) { + appendStringInfo(dest, "%d", partID + 1); + } +} + +/* + * Show number of subpartitions selected for each partition. + * If subpartitions is not pruned at all, show "ALL". + * The caller should call DestroyStringInfo to free allocated memory. + */ +static StringInfo get_subpartition_pruning_info(Scan* scanplan, List* rtable) +{ + PruningResult* pr = scanplan->pruningInfo; + StringInfo strif = makeStringInfo(); + ListCell* lc = NULL; + bool all = true; + RangeTblEntry* rte = rt_fetch(scanplan->scanrelid, rtable); + Relation rel = heap_open(rte->relid, AccessShareLock); + List* subpartList = RelationGetSubPartitionOidListList(rel); + int idx = 0; + foreach (lc, pr->ls_selectedSubPartitions) { + idx++; + if (idx > list_length(subpartList)) { + break; + } + SubPartitionPruningResult* spr = (SubPartitionPruningResult*)lfirst(lc); + /* check if all subpartition is selected */ + int selected = list_length(spr->ls_selectedSubPartitions); + int count = list_length((List*)list_nth(subpartList, spr->partSeq)); + all &= (selected == count); + /* save pruning map in strif temporarily */ + appendStringInfo(strif, "%d:%d", spr->partSeq + 1, selected); + if (lc != list_tail(pr->ls_selectedSubPartitions)) { + appendStringInfo(strif, ", "); + } + } + + if (all) { + resetStringInfo(strif); + appendStringInfo(strif, "ALL"); + } + + /* clean-ups */ + ReleaseSubPartitionOidList(&subpartList); + heap_close(rel, AccessShareLock); + return strif; +} +/* + * Show subpartition pruning information in the form of: + * Selected Partitions: 1,3 + * Selected Subpartitions: 1:1,3:1 + * Which means select first subpartition of 1 and 3 partition + * Note that pretty mode is not supported in this version, since subpartitions can only + * be created in Cent deployment for now and pretty is only for stream plans. + */ +static void add_subpartition_pruning_info_text(PlanState* planstate, ExplainState* es) +{ + Scan* scanplan = (Scan*)planstate->plan; + PruningResult* pr = scanplan->pruningInfo; + if (pr->ls_selectedSubPartitions == NIL) { + return; + } + + StringInfo dest = es->str; /* Consider perf_mode = normal only */ + + /* Apply indent properly */ + if (es->wlm_statistics_plan_max_digit) { + appendStringInfoSpaces(dest, *es->wlm_statistics_plan_max_digit); + appendStringInfoString(dest, " | "); + appendStringInfoSpaces(dest, es->indent); + } else { + appendStringInfoSpaces(dest, es->indent * 2); /* 2 is the coefficient for text format indent */ + } + appendStringInfo(dest, "Selected Subpartitions: "); + + /* Show the content */ + if (scanplan->itrs <= 0) { + appendStringInfo(dest, "NONE"); + } else if (scanplan->pruningInfo->expr != NULL) { + appendStringInfo(dest, "PART"); + } else { + StringInfo strif = get_subpartition_pruning_info(scanplan, es->rtable); + appendStringInfoString(dest, strif->data); + DestroyStringInfo(strif); + } + appendStringInfoChar(dest, '\n'); +} + +static void add_subpartition_pruning_info_others(PlanState* planstate, ExplainState* es) +{ + Scan* scanplan = (Scan*)planstate->plan; + PruningResult* pr = scanplan->pruningInfo; + if (pr->ls_selectedSubPartitions == NIL) { + return; + } + + if (scanplan->itrs <= 0) { + ExplainPropertyText("Selected Subpartitions", "NONE", es); + } else if (scanplan->pruningInfo->expr != NULL) { + ExplainPropertyText("Selected Subpartitions", "PART", es); + } else { + StringInfo strif = get_subpartition_pruning_info(scanplan, es->rtable); + ExplainPropertyText("Selected Subpartitions", strif->data, es); + DestroyStringInfo(strif); + } +} + static void show_pruning_info(PlanState* planstate, ExplainState* es, bool is_pretty) { Scan* scanplan = (Scan*)planstate->plan; - bool print_number = false; - bool print_twodots = false; - bool print_comma = false; - bool print_prev = false; + PrintFlags flags = {false, false, false, false}; int partID = 0; int partID_prev = 0; @@ -1581,66 +1756,12 @@ static void show_pruning_info(PlanState* planstate, ExplainState* es, bool is_pr break; // set print flags for the current and the previouse partition number - if (i == 0) { - print_prev = false; - print_number = true; - print_twodots = false; - print_comma = false; - } else if (partID - partID_prev == 1) { - // the last partition - if (i == scanplan->itrs - 1) { - print_prev = false; - print_number = true; - print_twodots = true; - print_comma = false; - } else { - print_prev = false; - print_number = false; - print_twodots = false; - print_comma = false; - } - } else { - // the previous has not been printed - if (!print_number) - print_prev = true; - else - print_prev = false; - - print_number = true; - print_twodots = false; - print_comma = true; - } + set_print_flags(i, partID - partID_prev == 1, i == scanplan->itrs - 1, &flags); if (is_pretty) { - // first check whether print the previous partition number - if (print_prev) { - appendStringInfoString(es->planinfo->m_detailInfo->info_str, ".."); - appendStringInfo(es->planinfo->m_detailInfo->info_str, "%d", partID_prev + 1); - } - - // then print the current partition number - if (print_twodots) - appendStringInfoString(es->planinfo->m_detailInfo->info_str, ".."); - else if (print_comma) - appendStringInfoString(es->planinfo->m_detailInfo->info_str, ","); - - if (print_number) - appendStringInfo(es->planinfo->m_detailInfo->info_str, "%d", partID + 1); + add_pruning_info_nums(es->planinfo->m_detailInfo->info_str, flags, partID, partID_prev); } else { - // first check whether print the previous partition number - if (print_prev) { - appendStringInfoString(es->str, ".."); - appendStringInfo(es->str, "%d", partID_prev + 1); - } - - // then print the current partition number - if (print_twodots) - appendStringInfoString(es->str, ".."); - else if (print_comma) - appendStringInfoString(es->str, ","); - - if (print_number) - appendStringInfo(es->str, "%d", partID + 1); + add_pruning_info_nums(es->str, flags, partID, partID_prev); } i++; } @@ -1649,6 +1770,10 @@ static void show_pruning_info(PlanState* planstate, ExplainState* es, bool is_pr appendStringInfoChar(es->planinfo->m_detailInfo->info_str, '\n'); else appendStringInfoChar(es->str, '\n'); + + if (scanplan->itrs > 0 && scanplan->pruningInfo->expr == NULL) { + add_subpartition_pruning_info_text(planstate, es); + } } else { if (scanplan->itrs <= 0) { ExplainPropertyText("Selected Partitions", "NONE", es); @@ -1677,57 +1802,17 @@ static void show_pruning_info(PlanState* planstate, ExplainState* es, bool is_pr break; // set print flags for the current and the previouse partition number - if (i == 0) { - print_prev = false; - print_number = true; - print_twodots = false; - print_comma = false; - } else if (partID - partID_prev == 1) { - // the last partition - if (i == scanplan->itrs - 1) { - print_prev = false; - print_number = true; - print_twodots = true; - print_comma = false; - } else { - print_prev = false; - print_number = false; - print_twodots = false; - print_comma = false; - } - } else { - // the previous has not been printed - if (!print_number) { - print_prev = true; - } else { - print_prev = false; - } + set_print_flags(i, partID - partID_prev == 1, i == scanplan->itrs - 1, &flags); - print_number = true; - print_twodots = false; - print_comma = true; - } + add_pruning_info_nums(strif, flags, partID, partID_prev); - // first check whether print the previous partition number - if (print_prev) { - appendStringInfoString(strif, ".."); - appendStringInfo(strif, "%d", partID_prev + 1); - } - - // then print the current partition number - if (print_twodots) - appendStringInfoString(strif, ".."); - else if (print_comma) - appendStringInfoString(strif, ","); - - if (print_number) - appendStringInfo(strif, "%d", partID + 1); i++; } /*print out the partition numbers string*/ ExplainPropertyText("Selected Partitions", strif->data, es); pfree_ext(strif->data); pfree_ext(strif); + add_subpartition_pruning_info_others(planstate, es); } } } @@ -1788,7 +1873,45 @@ static void ExplainNodePartition(const Plan* plan, ExplainState* es) if (flag == 0) { appendStringInfo(es->str, "Iterations: %d", ((PartIterator*)plan)->itrs); } - appendStringInfoChar(es->str, '\n'); +} + +static bool GetSubPartitionIterations(const Plan* plan, const ExplainState* es, int* cnt) +{ + *cnt = 0; + const Plan* curPlan = plan; + switch (nodeTag(curPlan->lefttree)) { + case T_RowToVec: { + RowToVec* rowToVecPlan = (RowToVec*)curPlan->lefttree; + Plan* scanPlan = (Plan*)rowToVecPlan->plan.lefttree; + if (!(IsA(scanPlan, Scan) || IsA(scanPlan, SeqScan) || IsA(scanPlan, IndexOnlyScan) || + IsA(scanPlan, IndexScan) || IsA(scanPlan, BitmapHeapScan) || IsA(scanPlan, TidScan))) { + break; + } + curPlan = &rowToVecPlan->plan; + /* fallthrough */ + } + case T_SeqScan: + case T_IndexScan: + case T_IndexOnlyScan: + case T_BitmapIndexScan: + case T_BitmapHeapScan: + case T_CStoreScan: + case T_TidScan: { + PruningResult* pr = ((Scan*)curPlan->lefttree)->pruningInfo; + if (pr == NULL || pr->ls_selectedSubPartitions == NIL || pr->expr != NULL) { + return false; + } + ListCell* lc = NULL; + foreach (lc, pr->ls_selectedSubPartitions) { + SubPartitionPruningResult* spr = (SubPartitionPruningResult*)lfirst(lc); + *cnt += list_length(spr->ls_selectedSubPartitions); + } + return true; + } + default: + return false; + } + return false; /* Syntactic sugar */ } #ifndef ENABLE_MULTIPLE_NODES @@ -1960,9 +2083,11 @@ static void ExplainNode( case T_WorkTableScan: case T_ForeignScan: case T_VecForeignScan: - case T_GradientDescentState: ExplainScanTarget((Scan*)plan, es); break; + case T_TrainModel: + appendStringInfo(es->str, " - %s", sname); + break; case T_ExtensiblePlan: if (((Scan*)plan)->scanrelid > 0) ExplainScanTarget((Scan*)plan, es); @@ -2462,7 +2587,7 @@ static void ExplainNode( } es->indent--; } else - show_scan_qual((List*)action->qual, "Update Cond", planstate, ancestors, es); + show_scan_qual((List*)action->qual, "Update Cond", planstate, ancestors, es, true); } else if (!action->matched) { if (mt->remote_insert_plans) { appendStringInfoSpaces(es->str, es->indent * 2); @@ -2475,7 +2600,7 @@ static void ExplainNode( } es->indent--; } else - show_scan_qual((List*)action->qual, "Insert Cond", planstate, ancestors, es); + show_scan_qual((List*)action->qual, "Insert Cond", planstate, ancestors, es, true); } } @@ -2484,7 +2609,7 @@ static void ExplainNode( ModifyTableState* mtstate = (ModifyTableState*)planstate; if (mtstate->mt_upsert != NULL && mtstate->mt_upsert->us_action != UPSERT_NONE && mtstate->resultRelInfo->ri_NumIndices > 0) { - show_on_duplicate_info(mtstate, es); + show_on_duplicate_info(mtstate, es, ancestors); } /* non-merge cases */ foreach (elt, mt->remote_plans) { @@ -2522,8 +2647,18 @@ static void ExplainNode( break; case T_StartWithOp: show_startwith_pseudo_entries(planstate, ancestors, es); + show_startwith_dfx((StartWithOpState*)planstate, es); break; case T_SeqScan: + show_tablesample(plan, planstate, ancestors, es); + if (!((SeqScan*)plan)->scanBatchMode) { + show_scan_qual(plan->qual, "Filter", planstate, ancestors, es); + if (plan->qual) { + show_instrumentation_count("Rows Removed by Filter", 1, planstate, es); + } + } + break; + case T_CStoreScan: #ifdef ENABLE_MULTIPLE_NODES case T_TsStoreScan: @@ -2747,6 +2882,14 @@ static void ExplainNode( case T_RecursiveUnion: show_recursive_info((RecursiveUnionState*)planstate, es); break; + case T_RowToVec: + if (IsA(plan->lefttree, SeqScan) && ((SeqScan*)plan->lefttree)->scanBatchMode) { + show_scan_qual(plan->lefttree->qual, "Filter", planstate, ancestors, es); + if (plan->lefttree->qual) { + show_instrumentation_count("Rows Removed by Filter", 1, planstate->lefttree, es); + } + } + break; default: break; @@ -2805,7 +2948,7 @@ static void ExplainNode( Instrumentation* instr = NULL; if (es->detail) { ExplainOpenGroup("Cpus In Detail", "Cpus In Detail", false, es); - int dop = planstate->plan->dop; + int dop = planstate->plan->parallel_enabled ? planstate->plan->dop : 1; for (int i = 0; i < u_sess->instr_cxt.global_instr->getInstruNodeNum(); i++) { #ifdef ENABLE_MULTIPLE_NODES char* node_name = PGXCNodeGetNodeNameFromId(i, PGXC_NODE_DATANODE); @@ -2913,6 +3056,12 @@ static void ExplainNode( appendStringInfoSpaces(es->str, es->indent * 2); } ExplainNodePartition(plan, es); + int subPartCnt = 0; + if (GetSubPartitionIterations(plan, es, &subPartCnt)) { + appendStringInfo(es->str, ", Sub Iterations: %d", subPartCnt); + } + appendStringInfoChar(es->str, '\n'); + } else { es->planinfo->m_detailInfo->set_plan_name(); @@ -2922,6 +3071,10 @@ static void ExplainNode( } } else { ExplainPropertyInteger("Iterations", ((PartIterator*)plan)->itrs, es); + int subPartCnt = 0; + if (GetSubPartitionIterations(plan, es, &subPartCnt)) { + ExplainPropertyInteger("Sub Iterations", subPartCnt, es); + } } break; @@ -3069,6 +3222,9 @@ runnext: if (strcmp(pt_operation, "INDEX") == 0 && pt_index_name != NULL && pt_index_owner != NULL) es->planinfo->m_planTableData->set_plan_table_objs( planstate->plan->plan_node_id, pt_index_name, pt_operation, pt_index_owner); + + /* 4. set cost and cardinality */ + es->planinfo->m_planTableData->set_plan_table_cost_card(plan->plan_node_id, plan->total_cost, plan->plan_rows); } } @@ -3094,6 +3250,11 @@ static void CalculateProcessedRows( Plan* plan = planstate->plan; switch (nodeTag(plan)) { case T_SeqScan: + if (!((SeqScan*)plan)->scanBatchMode) { + show_removed_rows(1, planstate, idx, smpIdx, &removed_rows); + *processed_rows += removed_rows; + } + break; case T_CStoreScan: #ifdef ENABLE_MULTIPLE_NODES case T_TsStoreScan: @@ -3330,11 +3491,13 @@ static void show_qual( /* * Show a qualifier expression for a scan plan node */ -static void show_scan_qual(List* qual, const char* qlabel, PlanState* planstate, List* ancestors, ExplainState* es) +static void show_scan_qual(List *qual, const char *qlabel, PlanState *planstate, List *ancestors, ExplainState *es, + bool show_prefix) { bool useprefix = false; - useprefix = (IsA(planstate->plan, SubqueryScan) || IsA(planstate->plan, VecSubqueryScan) || es->verbose); + useprefix = + (show_prefix || IsA(planstate->plan, SubqueryScan) || IsA(planstate->plan, VecSubqueryScan) || es->verbose); show_qual(qual, qlabel, planstate, ancestors, useprefix, es); } @@ -4312,7 +4475,7 @@ static void show_detail_filenum_info(const PlanState* planstate, ExplainState* e int datanode_size = 0; int i = 0; int j = 0; - int dop = planstate->plan->dop; + int dop = planstate->plan->parallel_enabled ? planstate->plan->dop : 1; int count_dn_writefile = 0; if (u_sess->instr_cxt.global_instr) @@ -4469,7 +4632,7 @@ static void show_detail_execute_info(const PlanState* planstate, ExplainState* e int datanode_size = 0; int i = 0; int j = 0; - int dop = planstate->plan->dop; + int dop = planstate->plan->parallel_enabled ? planstate->plan->dop : 1; if (u_sess->instr_cxt.global_instr) datanode_size = u_sess->instr_cxt.global_instr->getInstruNodeNum(); @@ -5127,6 +5290,39 @@ static void show_vechash_info(VecHashJoinState* hashstate, ExplainState* es) } } +static void show_startwith_dfx(StartWithOpState* swstate, ExplainState* es) +{ + List* result = NIL; + IterationStats* iters = &(swstate->iterStats); + + /* return if not verbose or not actually executed */ + if (!es->verbose || iters->currentStartTime.tv_sec == 0) { + return; + } + + const char *qlabel = "Start With Iteration Statistics"; + bool rotated = (iters->totalIters > SW_LOG_ROWS_FULL) ? true : false; + int offset = rotated ? iters->totalIters % SW_LOG_ROWS_HALF : 0; + StringInfo si = makeStringInfo(); + + for (int i = 0; i < SW_LOG_ROWS_FULL && i < iters->totalIters; i++) { + int ri = (i >= SW_LOG_ROWS_HALF && rotated) ? + SW_LOG_ROWS_HALF + (i + offset) % SW_LOG_ROWS_HALF : i; + double total_time = (iters->endTimeBuf[ri].tv_sec - iters->startTimeBuf[ri].tv_sec) / 0.001 + + (iters->endTimeBuf[ri].tv_usec - iters->startTimeBuf[ri].tv_usec) * 0.001; + appendStringInfo( + si, "\nIteration: %d, Row(s): %ld, Time Cost: %.3lf ms", + iters->levelBuf[ri], iters->rowCountBuf[ri], total_time); + result = lappend(result, pstrdup(si->data)); + resetStringInfo(si); + } + DestroyStringInfo(si); + + ExplainPropertyListPostPlanTree(qlabel, result, es); + + list_free_deep(result); +} + static void show_recursive_info(RecursiveUnionState* rustate, ExplainState* es) { PlanState* planstate = (PlanState*)rustate; @@ -5221,7 +5417,7 @@ static void show_datanode_buffers(ExplainState* es, PlanState* planstate) { Instrumentation* instr = NULL; int nodeNum = u_sess->instr_cxt.global_instr->getInstruNodeNum(); - int dop = planstate->plan->dop; + int dop = planstate->plan->parallel_enabled ? planstate->plan->dop : 1; int i = 0; int j = 0; @@ -5288,7 +5484,7 @@ static void show_analyze_buffers(ExplainState* es, const PlanState* planstate, S instr_time blk_write_time_max, blk_write_time_min; bool is_execute = false; Instrumentation* instr = NULL; - int dop = planstate->plan->dop; + int dop = planstate->plan->parallel_enabled ? planstate->plan->dop : 1; INSTR_TIME_SET_ZERO(blk_read_time_max); INSTR_TIME_SET_ZERO(blk_write_time_max); @@ -5874,7 +6070,7 @@ static void show_detail_cpu(ExplainState* es, PlanState* planstate) bool is_null = false; int i = 0; int j = 0; - int dop = planstate->plan->dop; + int dop = planstate->plan->parallel_enabled ? planstate->plan->dop : 1; if (u_sess->instr_cxt.global_instr->getInstruNodeNum() > 0) { for (i = 0; i < u_sess->instr_cxt.global_instr->getInstruNodeNum(); i++) { @@ -6807,7 +7003,7 @@ static void get_oper_time(ExplainState* es, PlanState* planstate, const Instrume static void show_stream_send_time(ExplainState* es, const PlanState* planstate) { bool isSend = false; - int dop = planstate->plan->dop; + int dop = planstate->plan->parallel_enabled ? planstate->plan->dop : 1; /* stream send time will print only es->detail is true and t_thrd.explain_cxt.explain_perf_mode is not normal */ if (t_thrd.explain_cxt.explain_perf_mode == EXPLAIN_NORMAL || es->detail == false) @@ -6880,7 +7076,7 @@ static void show_datanode_time(ExplainState* es, PlanState* planstate) bool executed = true; int i = 0; int j = 0; - int dop = planstate->plan->dop; + int dop = planstate->plan->parallel_enabled ? planstate->plan->dop : 1; if (es->detail) { if (es->format == EXPLAIN_FORMAT_TEXT) @@ -7189,7 +7385,7 @@ static void show_instrumentation_count(const char* qlabel, int which, const Plan { double nfiltered = 0.0; Instrumentation* instr = NULL; - int dop = planstate->plan->dop; + int dop = planstate->plan->parallel_enabled ? planstate->plan->dop : 1; if (!es->analyze || !planstate->instrument) return; @@ -7246,19 +7442,38 @@ static void show_removed_rows(int which, const PlanState* planstate, int idx, in u_sess->instr_cxt.global_instr->isFromDataNode(planstate->plan->plan_node_id)) { instr = u_sess->instr_cxt.global_instr->getInstrSlot(idx, planstate->plan->plan_node_id, smpIdx); if (instr != NULL && instr->nloops > 0) { - if (which == 1) - *removeRows = instr->nfiltered1; - else if (which == 2) - *removeRows = instr->nfiltered2; + if (which == 1) { + *removeRows = check_integer_overflow(instr->nfiltered1); + } else if (which == 2) { + *removeRows = check_integer_overflow(instr->nfiltered2); + } } } else { - if (which == 1) - *removeRows = planstate->instrument->nfiltered1; - else if (which == 2) - *removeRows = planstate->instrument->nfiltered2; + if (which == 1) { + *removeRows = check_integer_overflow(planstate->instrument->nfiltered1); + } else if (which == 2) { + *removeRows = check_integer_overflow(planstate->instrument->nfiltered2); + } } } +/* + * Check for possible integer overflow when assign a double variable to an int variable. + */ +static int check_integer_overflow(double var) + +{ + if (var > (double)PG_INT32_MAX || var < (double)PG_INT32_MIN) { + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmodule(MOD_OPT), + errmsg("Integer overflow."), + errdetail("Integer overflow occurred when assigning a double variable to an int variable."), + errcause("Try to assign a double variable to an int variable."), + erraction("Please check whether the double variable exceeds the representation range of int."))); + } + return (int)var; +} + /* * Show extra information for a ForeignScan node. */ @@ -7387,7 +7602,7 @@ static void show_analyze_dfs_info(const PlanState* planstate, ExplainState* es) Instrumentation* instr = NULL; int i = 0; int j = 0; - int dop = planstate->plan->dop; + int dop = planstate->plan->parallel_enabled ? planstate->plan->dop : 1; double total_local_block = 0.0; double total_remote_block = 0.0; double total_datacache_block_count = 0.0; @@ -7455,7 +7670,7 @@ static void show_dfs_block_info(PlanState* planstate, ExplainState* es) int i = 0; int j = 0; bool has_info = false; - int dop = planstate->plan->dop; + int dop = planstate->plan->parallel_enabled ? planstate->plan->dop : 1; if (es->detail) { for (i = 0; i < u_sess->instr_cxt.global_instr->getInstruNodeNum(); i++) { @@ -7718,7 +7933,7 @@ static void show_analyze_storage_info_of_dfs(const PlanState* planstate, Explain uint64 total_dynamicfiles = 0; uint64 total_staticFiles = 0; Instrumentation* instr = NULL; - int dop = planstate->plan->dop; + int dop = planstate->plan->parallel_enabled ? planstate->plan->dop : 1; for (i = 0; i < u_sess->instr_cxt.global_instr->getInstruNodeNum(); i++) { for (j = 0; j < dop; j++) { instr = u_sess->instr_cxt.global_instr->getInstrSlot(i, planstate->plan->plan_node_id, j); @@ -7763,7 +7978,7 @@ static void show_analyze_storage_info_of_logft(const PlanState* planstate, Expla uint64 total_filename = 0; uint64 total_refuted_by_filename = 0; uint64 total_incompleted = 0; - int dop = planstate->plan->dop; + int dop = planstate->plan->parallel_enabled ? planstate->plan->dop : 1; Instrumentation* instr = NULL; int node_idx = 0; @@ -7842,7 +8057,7 @@ static void show_storage_filter_info(PlanState* planstate, ExplainState* es) int i = 0; int j = 0; bool has_info = false; - int dop = planstate->plan->dop; + int dop = planstate->plan->parallel_enabled ? planstate->plan->dop : 1; if (es->detail) { for (i = 0; i < u_sess->instr_cxt.global_instr->getInstruNodeNum(); i++) { @@ -8237,7 +8452,7 @@ static void ExplainTargetRel(Plan* plan, Index rti, ExplainState* es) /* * Show extra information for upsert info */ -static void show_on_duplicate_info(ModifyTableState* mtstate, ExplainState* es) +static void show_on_duplicate_info(ModifyTableState* mtstate, ExplainState* es, List* ancestors) { ResultRelInfo* resultRelInfo = mtstate->resultRelInfo; IndexInfo* indexInfo = NULL; @@ -8265,6 +8480,21 @@ static void show_on_duplicate_info(ModifyTableState* mtstate, ExplainState* es) if (idxNames != NIL) { ExplainPropertyList("Conflict Arbiter Indexes", idxNames, es); } + + /* Show ON DUPLICATE KEY UPDATE WHERE quals info if specified */ + if (mtstate->mt_upsert->us_updateWhere != NIL) { + ModifyTable *node = (ModifyTable*)mtstate->ps.plan; + Node* expr = NULL; + if (IsA(node->upsertWhere, List)) { + expr = (Node*)make_ands_explicit((List*)node->upsertWhere); + } else { + expr = node->upsertWhere; + } + List* clauseList = list_make1(expr); + show_upper_qual(clauseList, "Conflict Filter", &mtstate->ps, ancestors, es); + list_free(clauseList); + show_instrumentation_count("Rows Removed by Conflict Filter", 1, &mtstate->ps, es); + } } #ifndef PGXC /* @@ -8387,6 +8617,27 @@ static void ExplainPrettyList(List* data, ExplainState* es) appendStringInfoChar(es->planinfo->m_verboseInfo->info_str, '\n'); } +/* + * Explain a property like what ExplainPropertyList does but append it to + * the end of the plan tree. + * We reuse ExplainPropertyList here by temporarily setting and recovering + * es->str such that the explation is printed into es->post_str instead. + * "data" is a list of C strings. + */ +void ExplainPropertyListPostPlanTree(const char* qlabel, List* data, ExplainState* es) +{ + if (es->post_str == NULL) { + es->post_str = makeStringInfo(); + } + StringInfo str_backup = es->str; + int indent_backup = es->indent; + es->str = es->post_str; + es->indent = 0; + ExplainPropertyList(qlabel, data, es); + es->str = str_backup; + es->indent = indent_backup; +} + /* * Explain a property, such as sort keys or targets, that takes the form of * a list of unlabeled items. "data" is a list of C strings. @@ -9910,6 +10161,7 @@ void PlanTable::set_plan_name() appendStringInfoSpaces(info_str, 8); } } +template void PlanTable::set_plan_name(); /* --------------------------------function for explain plan--------------------- */ /* @@ -10278,6 +10530,21 @@ void PlanTable::set_plan_table_projection(int plan_node_id, List* tlist) m_plan_table[plan_node_id - 1]->m_isnull[PT_PROJECTION] = false; } +/* + * Description: Set cost and cardinality into PlanTable. + * Parameters: + * @in plan_cost: plan cost. + * @in plan_cardinality: rows accessed by current operation. + * Return: void + */ +void PlanTable::set_plan_table_cost_card(int plan_node_id, double plan_cost, double plan_cardinality) +{ + m_plan_table[plan_node_id - 1]->m_datum->cost = plan_cost; + m_plan_table[plan_node_id - 1]->m_datum->cardinality = plan_cardinality; + m_plan_table[plan_node_id - 1]->m_isnull[PT_COST] = false; + m_plan_table[plan_node_id - 1]->m_isnull[PT_CARDINALITY] = false; +} + /* * Description: Call heap_insert to insert all nodes tuples of the plan into table. * Parameters: @@ -10321,7 +10588,11 @@ void PlanTable::insert_plan_table_tuple() if (m_plan_table[i]->m_datum->projection != NULL) new_record[PT_PROJECTION] = CStringGetTextDatum(m_plan_table[i]->m_datum->projection->data); + new_record[PT_COST] = Float8GetDatum(m_plan_table[i]->m_datum->cost); + new_record[PT_CARDINALITY] = Float8GetDatum(m_plan_table[i]->m_datum->cardinality); + tuple = (HeapTuple)heap_form_tuple(plan_table_des, new_record, m_plan_table[i]->m_isnull); + /* * Insert new record into plan_table table */ @@ -10818,3 +11089,220 @@ static void show_unique_check_info(PlanState *planstate, ExplainState *es) } } } + +void ExplainDatumProperty(char const *name, Datum const value, Oid const type, ExplainState* es) +{ + Datum output_datum = 0; + char const *output = nullptr; + + if (type_is_array_domain(type)) { + output_datum = OidFunctionCall2(F_ARRAY_OUT, value, type); + output = DatumGetCString(output_datum); + ExplainPropertyText(name, output, es); + } else { + switch (type) { + case INT1OID: + ExplainPropertyInteger(name, DatumGetInt8(value), es); + break; + case INT2OID: + ExplainPropertyInteger(name, DatumGetInt16(value), es); + break; + case INT4OID: + ExplainPropertyInteger(name, DatumGetInt32(value), es); + break; + case INT8OID: + ExplainPropertyLong(name, DatumGetInt64(value), es); + break; + case FLOAT4OID: + ExplainPropertyFloat(name, DatumGetFloat4(value), 6, es); + break; + case FLOAT8OID: + ExplainPropertyFloat(name, DatumGetFloat8(value), 10, es); + break; + default: + output = Datum_to_string(value, type, type == InvalidOid); + ExplainPropertyText(name, output, es); + break; + } + } +} +/* + * this function explains a group of model information (recursively if necessary) + * as provided by the list + */ +ListCell* ExplainTrainInfoGroup(List const* group, ExplainState *es, bool const group_opened) +{ + ListCell *lc = nullptr; + TrainingInfo *train_info = nullptr; + + /* + * in a single traversal of the group there will (potentially) be + * recursive calls that will print nested sub-groups (by properly + * opening and closing them) + */ + foreach(lc, group) { + train_info = lfirst_node(TrainingInfo, lc); + if (train_info->open_group) { + /* + * if the cell for some reason has both opening and closing booleans set we ignore it + */ + if (train_info->close_group) + continue; + + if (!train_info->name) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_SYNTAX_ERROR), + errmsg("the name of a group cannot be empty (when opening)"))); + + // open the group at the output + ExplainOpenGroup(train_info->name, train_info->name, true, es); + // here comes a recursive call to print the group + List next_group; + next_group.length = 0; // not needed for us, not properly set (you are warned) + next_group.type = group->type; + next_group.head = lnext(lc); // next group continues with the first element of the group + next_group.tail = group->tail; + + /* + * once we have printed a whole group we gotta fast-forward to the first element after the group. + * the returned ListCell is the closing element of the group we just printed (foreach will do the rest) + */ + lc = ExplainTrainInfoGroup(&next_group, es, true); + /* + * if there are more opening groups than closing one at some point the list will have to get + * exhausted and there will be recursive call that cannot cleanly finish + */ + if (!lc) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_SYNTAX_ERROR), + errmsg("group of name \"%s\" was not closed", train_info->name))); + + auto current_train_info = lfirst_node(TrainingInfo, lc); + + /* + * the name of opening and closing groups has to match, otherwise error. + * observe that both strings are non-empty (otherwise error before this) + */ + if (strcmp(train_info->name, current_train_info->name) != 0) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_SYNTAX_ERROR), + errmsg("group of name \"%s\" was not properly closed, closing group name was \"%s\"", + train_info->name, current_train_info->name))); + continue; + } else if (train_info->close_group) { + /* + * this is in charge of validating that closing a group is correct (by having opened at the + * parent call) + */ + if (!group_opened) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_SYNTAX_ERROR), + errmsg("badly formed group of name \"%s\" of training information (opening group not found)", + train_info->name))); + + if (!train_info->name) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_SYNTAX_ERROR), + errmsg("the name of a group cannot be empty (when closing)"))); + + ExplainCloseGroup(train_info->name, train_info->name, true, es); + // we return the closing ListCell + break; + } else { + /* + * we output the next object of the group + */ + ExplainDatumProperty(train_info->name, train_info->value, train_info->type, es); + } + } + return lc; +} +void do_model_explain(ExplainState *es, const Model *model) +{ + ListCell *lc = nullptr; + ExplainBeginOutput(es); + + /* + * there is a common part of each model that can be formatted now + */ + ExplainOpenGroup("General information", "General information", true, es); + ExplainPropertyText("Name", model->model_name, es); + ExplainPropertyText("Algorithm", algorithm_ml_to_string(model->algorithm), es); + if (model->sql != NULL) + ExplainPropertyText("Query", model->sql, es); + + char const *return_type_str = nullptr; + switch (model->return_type) { + case BOOLOID: + return_type_str = prediction_type_to_string(TYPE_BOOL); + break; + case BYTEAOID: + return_type_str = prediction_type_to_string(TYPE_BYTEA); + break; + case INT4OID: + return_type_str = prediction_type_to_string(TYPE_INT32); + break; + case INT8OID: + return_type_str = prediction_type_to_string(TYPE_INT64); + break; + case FLOAT4OID: + return_type_str = prediction_type_to_string(TYPE_FLOAT32); + break; + case FLOAT8OID: + return_type_str = prediction_type_to_string(TYPE_FLOAT64); + break; + case FLOAT8ARRAYOID: + return_type_str = prediction_type_to_string(TYPE_FLOAT64ARRAY); + break; + case NUMERICOID: + return_type_str = prediction_type_to_string(TYPE_NUMERIC); + break; + case TEXTOID: + return_type_str = prediction_type_to_string(TYPE_TEXT); + break; + case VARCHAROID: + return_type_str = prediction_type_to_string(TYPE_VARCHAR); + break; + default: + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid return type (%d) for model of name \"%s\"", model->return_type, + model->model_name))); + } + ExplainPropertyText("Return type", return_type_str, es); + ExplainPropertyFloat("Pre-processing time", model->pre_time_secs, 6, es); + ExplainPropertyFloat("Execution time", model->exec_time_secs, 6, es); + ExplainPropertyInteger("Processed tuples", model->processed_tuples, es); + ExplainPropertyInteger("Discarded tuples", model->discarded_tuples, es); + ExplainCloseGroup("General information", "General information", true, es); + + /* + * hyper-parameters are now output + */ + ExplainOpenGroup("Hyper-parameters", "Hyper-parameters", true, es); + foreach(lc, model->hyperparameters) { + auto hyperparameter = lfirst_node(Hyperparameter, lc); + ExplainDatumProperty(hyperparameter->name, hyperparameter->value, hyperparameter->type, es); + } + ExplainCloseGroup("Hyper-parameters", "Hyper-parameters", true, es); + + /* + * scores are now output + */ + ExplainOpenGroup("Scores", "Scores", true, es); + foreach(lc, model->scores) { + auto training_score = lfirst_node(TrainingScore, lc); + ExplainPropertyFloat(training_score->name, training_score->value, 10, es); + } + ExplainCloseGroup("Scores", "Scores", true, es); + + /* + * Model-dependent information + */ + AlgorithmAPI *algo_api = get_algorithm_api(model->algorithm); + if (algo_api->explain && model->data.version > DB4AI_MODEL_V00) { + List *train_data = algo_api->explain(algo_api, &model->data, model->return_type); + if (train_data) { + ExplainOpenGroup("Train data", "Train data", true, es); + ExplainTrainInfoGroup(train_data, es, false); + ExplainCloseGroup("Train data", "Train data", true, es); + } + } + + /* emit closing boilerplate */ + ExplainEndOutput(es); +} diff --git a/src/gausskernel/optimizer/commands/extension.cpp b/src/gausskernel/optimizer/commands/extension.cpp index fa322af85..0caa0cf92 100644 --- a/src/gausskernel/optimizer/commands/extension.cpp +++ b/src/gausskernel/optimizer/commands/extension.cpp @@ -870,7 +870,7 @@ static void execute_extension_script(Oid extensionOid, ExtensionControlFile* con t_sql, CStringGetTextDatum("^\\\\echo.*$"), CStringGetTextDatum(""), - CStringGetTextDatum("ng")); + CStringGetTextDatum("mg")); /* * If it's not relocatable, substitute the target schema name for diff --git a/src/gausskernel/optimizer/commands/foreigncmds.cpp b/src/gausskernel/optimizer/commands/foreigncmds.cpp index 7fa2a410a..3a06f520e 100644 --- a/src/gausskernel/optimizer/commands/foreigncmds.cpp +++ b/src/gausskernel/optimizer/commands/foreigncmds.cpp @@ -1040,7 +1040,11 @@ void AlterForeignServer(AlterForeignServerStmt* stmt) } else if (0 == pg_strcasecmp(typeName, HDFS)) { FEATURE_NOT_PUBLIC_ERROR("HDFS is not yet supported."); }else if (0 == pg_strcasecmp(typeName, OBS)) { +#ifndef ENABLE_LITE_MODE (void)dfs::InvalidOBSConnectorCache(srvId); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } else if (0 == pg_strcasecmp(typeName, DUMMY_SERVER)) { InvalidDummyServerCache(srvId); } @@ -1081,7 +1085,11 @@ void RemoveForeignServerById(Oid srvId) } else if (0 == pg_strcasecmp(typeName, HDFS)) { FEATURE_NOT_PUBLIC_ERROR("HDFS is not yet supported."); } else if (0 == pg_strcasecmp(typeName, OBS)) { +#ifndef ENABLE_LITE_MODE (void)dfs::InvalidOBSConnectorCache(srvId); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } else if (0 == pg_strcasecmp(typeName, DUMMY_SERVER)) { InvalidDummyServerCache(srvId); } diff --git a/src/gausskernel/optimizer/commands/functioncmds.cpp b/src/gausskernel/optimizer/commands/functioncmds.cpp index 3eef01061..37b69a6d7 100644 --- a/src/gausskernel/optimizer/commands/functioncmds.cpp +++ b/src/gausskernel/optimizer/commands/functioncmds.cpp @@ -53,8 +53,11 @@ #include "catalog/pg_proc_fn.h" #include "catalog/pg_type.h" #include "catalog/pg_type_fn.h" +#include "catalog/gs_db_privilege.h" +#include "catalog/namespace.h" #include "commands/defrem.h" #include "commands/proclang.h" +#include "commands/typecmds.h" #include "executor/executor.h" #include "gs_policy/gs_policy_masking.h" #include "miscadmin.h" @@ -153,6 +156,17 @@ static void compute_return_type( ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("table of type %s typelem does not exist", TypeNameToString(returnType)))); } + + if (((Form_pg_type)GETSTRUCT(typtup))->typcategory == TYPCATEGORY_TABLEOF_VARCHAR || + ((Form_pg_type)GETSTRUCT(typtup))->typcategory == TYPCATEGORY_TABLEOF_INTEGER) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_PLSQL), + errmsg("table of index type is not supported as function return type."), + errdetail("N/A"), + errcause("feature not supported"), + erraction("check define of funtion"))); + } } else { rettype = typeTypeId(typtup); } @@ -256,10 +270,15 @@ static void examine_parameter_list(List* parameters, Oid languageOid, const char ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("parameterCount is invalid %d", parameterCount))); } - inTypes = (Oid*)palloc(parameterCount * sizeof(Oid)); - allTypes = (Datum*)palloc(parameterCount * sizeof(Datum)); - paramModes = (Datum*)palloc(parameterCount * sizeof(Datum)); - paramNames = (Datum*)palloc0(parameterCount * sizeof(Datum)); + + /* parameterCount will be zero when someone create a function without paramters. */ + if (parameterCount != 0) { + inTypes = (Oid*)palloc(parameterCount * sizeof(Oid)); + allTypes = (Datum*)palloc(parameterCount * sizeof(Datum)); + paramModes = (Datum*)palloc(parameterCount * sizeof(Datum)); + paramNames = (Datum*)palloc0(parameterCount * sizeof(Datum)); + } + *parameterDefaults = NIL; /* may need a pstate for parse analysis of default exprs */ @@ -308,18 +327,7 @@ static void examine_parameter_list(List* parameters, Oid languageOid, const char errmsg("argument type %s is only a shell", TypeNameToString(t)))); } - /* if table of type, find its array type */ - if (((Form_pg_type)GETSTRUCT(typtup))->typtype == TYPTYPE_TABLEOF) { - toid = ((Form_pg_type)GETSTRUCT(typtup))->typelem; - if (!OidIsValid(toid)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("table of type %s typelem does not exist", TypeNameToString(t)))); - toid = InvalidOid; /* keep compiler quiet */ - } - } else { - toid = typeTypeId(typtup); - } - + toid = typeTypeId(typtup); ReleaseSysCache(typtup); } else if (!OidIsValid(toid)) { int rc = 0; @@ -482,6 +490,8 @@ static void examine_parameter_list(List* parameters, Oid languageOid, const char free_parsestate(pstate); /* Now construct the proper outputs as needed */ + + /* if there are no paramters (parameterCount is 0), we make a InvalidOidVector */ *parameterTypes = buildoidvector(inTypes, inCount); if (outCount > 0 || varCount > 0) { @@ -903,7 +913,25 @@ void CreateFunction(CreateFunctionStmt* stmt, const char* queryString, Oid pkg_o bool shippable = false; bool package = false; bool proIsProcedure = stmt->isProcedure; - if (PLSQL_SECURITY_DEFINER) { + if (!OidIsValid(pkg_oid)) { + u_sess->plsql_cxt.debug_query_string = pstrdup(queryString); + } + if (PLSQL_SECURITY_DEFINER && u_sess->attr.attr_common.upgrade_mode == 0 && OidIsValid(pkg_oid)) { + bool isnull = false; + HeapTuple pkgTuple = SearchSysCache1(PACKAGEOID, ObjectIdGetDatum(pkg_oid)); + Datum pkgSecDefDatum = SysCacheGetAttr(PACKAGEOID, pkgTuple, Anum_gs_package_pkgsecdef, &isnull); + if (isnull) { + security = false; + } else { + bool pkgSecDef = DatumGetBool(pkgSecDefDatum); + if (!pkgSecDef) { + security = false; + } else { + security = true; + } + } + ReleaseSysCache(pkgTuple); + } else if (PLSQL_SECURITY_DEFINER && u_sess->attr.attr_common.upgrade_mode == 0) { security = true; } probin_str = NULL; @@ -913,8 +941,6 @@ void CreateFunction(CreateFunctionStmt* stmt, const char* queryString, Oid pkg_o if (rc == PLPGSQL_COMPILE_PACKAGE) { u_sess->plsql_cxt.procedure_start_line = stmt->startLineNumber; u_sess->plsql_cxt.procedure_first_line = stmt->firstLineNumber; - } else { - u_sess->plsql_cxt.sourceText = pstrdup(queryString); } u_sess->plsql_cxt.isCreateFunction = true; /* @@ -946,12 +972,7 @@ void CreateFunction(CreateFunctionStmt* stmt, const char* queryString, Oid pkg_o errcause("The schema in the package does not support object creation.."), erraction("Please create an object in another schema."))); } - - /* Check we have creation rights in target namespace */ - aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceId)); - + bool anyResult = CheckCreatePrivilegeInNamespace(namespaceId, GetUserId(), CREATE_ANY_FUNCTION); if (namespaceId == PG_PUBLIC_NAMESPACE && !isRelSuperuser()) { ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), @@ -965,7 +986,7 @@ void CreateFunction(CreateFunctionStmt* stmt, const char* queryString, Oid pkg_o pgxc_lock_for_utility_stmt((Node*)stmt, namespaceId == u_sess->catalog_cxt.myTempNamespace); if (u_sess->attr.attr_sql.enforce_a_behavior) { - proowner = GetUserIdFromNspId(namespaceId); + proowner = GetUserIdFromNspId(namespaceId, false, anyResult); if (!OidIsValid(proowner)) proowner = GetUserId(); @@ -973,9 +994,7 @@ void CreateFunction(CreateFunctionStmt* stmt, const char* queryString, Oid pkg_o isalter = true; if (isalter) { - aclresult = pg_namespace_aclcheck(namespaceId, proowner, ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceId)); + (void)CheckCreatePrivilegeInNamespace(namespaceId, proowner, CREATE_ANY_FUNCTION); } } else { proowner = GetUserId(); @@ -998,7 +1017,7 @@ void CreateFunction(CreateFunctionStmt* stmt, const char* queryString, Oid pkg_o : 0))); languageOid = HeapTupleGetOid(languageTuple); - + #ifdef ENABLE_MULTIPLE_NODES if (languageOid == JavalanguageId) { /* @@ -1143,6 +1162,9 @@ void CreateFunction(CreateFunctionStmt* stmt, const char* queryString, Oid pkg_o stmt->isPrivate); u_sess->plsql_cxt.procedure_start_line = 0; u_sess->plsql_cxt.procedure_first_line = 0; + if (u_sess->plsql_cxt.debug_query_string != NULL && !OidIsValid(pkg_oid)) { + pfree_ext(u_sess->plsql_cxt.debug_query_string); + } } /* @@ -1400,7 +1422,7 @@ void RemovePackageById(Oid pkgOid, bool isBody) } } -void dropFunctionByPackageOid(Oid package_oid) +void DeleteFunctionByPackageOid(Oid package_oid) { if (!OidIsValid(package_oid)) { return; @@ -1510,19 +1532,16 @@ void RenameFunction(List* name, List* argtypes, const char* newname) } } else { Datum packageOidDatum; - Oid packageOid = InvalidOid; bool isNull = false; packageOidDatum = SysCacheGetAttr(PROCOID, tup, Anum_pg_proc_packageid, &isNull); - packageOid = ObjectIdGetDatum(packageOidDatum); - if (!OidIsValid(packageOid)) { - packageOidDatum = ObjectIdGetDatum(InvalidOid); - } - Datum allargtypes = ProcedureGetAllArgTypes(tup, &isNull); - if (SearchSysCacheExists4(PROCALLARGS, + Datum argmodes = SysCacheGetAttr(PROCOID, tup, Anum_pg_proc_proargmodes, &isNull); + if (SearchSysCacheExistsForProcAllArgs( CStringGetDatum(newname), allargtypes, - ObjectIdGetDatum(namespaceOid), packageOidDatum)) { + ObjectIdGetDatum(namespaceOid), + packageOidDatum, + argmodes)) { ereport(ERROR, (errcode(ERRCODE_DUPLICATE_FUNCTION), errmsg("function %s already exists in schema \"%s\"", @@ -1608,8 +1627,9 @@ void AlterFunctionOwner(List* name, List* argtypes, Oid newOwnerId) /* * Change function owner by Oid + * in byPackage: means whether called by Alter Package Owner */ -void AlterFunctionOwner_oid(Oid procOid, Oid newOwnerId) +void AlterFunctionOwner_oid(Oid procOid, Oid newOwnerId, bool byPackage) { Relation rel; HeapTuple tup; @@ -1635,7 +1655,9 @@ void AlterFunctionOwner_oid(Oid procOid, Oid newOwnerId) if (!HeapTupleIsValid(tup)) /* should not happen */ ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for function %u", procOid))); - checkAllowAlter(tup); + if (!byPackage) { + checkAllowAlter(tup); + } AlterFunctionOwner_internal(rel, tup, newOwnerId); /* Recode time of change the funciton owner. */ @@ -1644,6 +1666,43 @@ void AlterFunctionOwner_oid(Oid procOid, Oid newOwnerId) heap_close(rel, NoLock); } +/* + * Change function owner by package Oid, called by Alter Package Owner + */ +void AlterFunctionOwnerByPkg(Oid packageOid, Oid newOwnerId) +{ + if (!OidIsValid(packageOid)) { + return; + } + + HeapTuple oldtup; + ScanKeyData entry; + ScanKeyInit(&entry, Anum_pg_proc_packageid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(packageOid)); + Relation pg_proc_rel = heap_open(ProcedureRelationId, RowExclusiveLock); + SysScanDesc scan = systable_beginscan(pg_proc_rel, InvalidOid, false, NULL, 1, &entry); + while ((oldtup = systable_getnext(scan)) != NULL) { + HeapTuple proctup = heap_copytuple(oldtup); + Oid funcOid = InvalidOid; + if (HeapTupleIsValid(proctup)) { + funcOid = HeapTupleGetOid(proctup); + if (!OidIsValid(funcOid)) { + ereport(ERROR, + (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmodule(MOD_PLSQL), + errmsg("cache lookup failed for function id %u", funcOid))); + } + } else { + ereport(ERROR, + (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmodule(MOD_PLSQL), + errmsg("cache lookup failed for function %u", funcOid))); + } + AlterFunctionOwner_oid(funcOid, newOwnerId, true); + } + systable_endscan(scan); + heap_close(pg_proc_rel, NoLock); +} + /* * @Description: Alter function owner. * @in rel: pg_proc relation. @@ -1725,6 +1784,8 @@ static void AlterFunctionOwner_internal(Relation rel, HeapTuple tup, Oid newOwne /* Update owner dependency reference */ changeDependencyOnOwner(ProcedureRelationId, procOid, newOwnerId); + /* Update owner of function type build in pg_type */ + AlterTypeOwnerByFunc(procOid, newOwnerId); } ReleaseSysCache(tup); @@ -2502,10 +2563,13 @@ Oid AlterFunctionNamespace_oid(Oid procOid, Oid nspOid) } Datum allargtypes = ProcedureGetAllArgTypes(tup, &isNull); - if (SearchSysCacheExists4(PROCALLARGS, + Datum argmodes = SysCacheGetAttr(PROCOID, tup, Anum_pg_proc_proargmodes, &isNull); + if (SearchSysCacheExistsForProcAllArgs( CStringGetDatum(NameStr(proc->proname)), allargtypes, - ObjectIdGetDatum(nspOid), packageOidDatum)) { + ObjectIdGetDatum(nspOid), + packageOidDatum, + argmodes)) { ereport(ERROR, (errcode(ERRCODE_DUPLICATE_FUNCTION), errmsg("function \"%s\" already exists in schema \"%s\"", diff --git a/src/gausskernel/optimizer/commands/gds_stream.cpp b/src/gausskernel/optimizer/commands/gds_stream.cpp index cd55888af..15258b860 100644 --- a/src/gausskernel/optimizer/commands/gds_stream.cpp +++ b/src/gausskernel/optimizer/commands/gds_stream.cpp @@ -315,7 +315,8 @@ retry: return 0; #endif ereport(ERROR, - (errcode_for_socket_access(), errmsg("Unexpected EOF on GDS connection \"%s\": %m", m_uri->ToString()))); + (errcode(ERRCODE_CONNECTION_RESET_BY_PEER), + errmsg("Unexpected EOF on GDS connection \"%s\": %m", m_uri->ToString()))); return -1; } else if (nread == 0) diff --git a/src/gausskernel/optimizer/commands/indexcmds.cpp b/src/gausskernel/optimizer/commands/indexcmds.cpp index c76387f3a..004858d90 100644 --- a/src/gausskernel/optimizer/commands/indexcmds.cpp +++ b/src/gausskernel/optimizer/commands/indexcmds.cpp @@ -32,6 +32,9 @@ #include "catalog/pg_partition_fn.h" #include "catalog/pg_tablespace.h" #include "catalog/pg_type.h" +#include "catalog/gs_db_privilege.h" +#include "catalog/namespace.h" +#include "catalog/pg_namespace.h" #include "commands/comment.h" #include "commands/dbcommands.h" #include "commands/defrem.h" @@ -99,6 +102,7 @@ static void AddIndexColumnForGpi(IndexStmt* stmt); static void AddIndexColumnForCbi(IndexStmt* stmt); static void CheckIndexParamsNumber(IndexStmt* stmt); static bool CheckIdxParamsOwnPartKey(Relation rel, const List* indexParams); +static bool CheckWhetherForbiddenFunctionalIdx(Oid relationId, Oid namespaceId, List* indexParams); /* * CheckIndexCompatible @@ -302,7 +306,7 @@ static void CheckPartitionUniqueKey(Relation rel, int2vector *partKey, IndexStmt if (partKey->dim1 > numberOfAttributes) { ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("unique index columns must contain the partition key"))); + errmsg("unique local index columns must contain all the partition keys"))); } for (j = 0; j < partKey->dim1; j++) { @@ -310,13 +314,102 @@ static void CheckPartitionUniqueKey(Relation rel, int2vector *partKey, IndexStmt Form_pg_attribute att_tup = rel->rd_att->attrs[attNum - 1]; if (!columnIsExist(rel, att_tup, stmt->indexParams)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("unique index columns must contain the partition key and collation must be default " - "collation"))); + ereport( + ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("unique local index columns must contain all the partition keys and collation must be default " + "collation"))); } } } +static void CheckPartitionIndexDef(IndexStmt* stmt, List *partitionTableList) +{ + List *partitionIndexdef = (List*)stmt->partClause; + + int partitionLens = list_length(partitionTableList); + int idfLens = list_length(partitionIndexdef); + + if (partitionLens > idfLens) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("Not enough index partition defined"))); + } else if (partitionLens < idfLens) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("number of partitions of LOCAL index must equal that of the " + "underlying table"))); + } + + return; +} + +/* + * Extract SubPartitionIdfs when CREATE INDEX with subpartitions. + */ +static List *ExtractSubPartitionIdf(IndexStmt* stmt, List *partitionList, + List *subPartitionList, List *partitionIndexdef) +{ + ListCell *lc1 = NULL; + ListCell *lc2 = NULL; + int subpartitionLens = 0; + int expectedSubLens = 0; + List *subPartitionIdf = NIL; + + partitionIndexdef = (List*)stmt->partClause; + List *backupIdxdef = (List *)copyObject(partitionIndexdef); + int partitionLen = list_length(partitionIndexdef); + + /* Fast check partition length */ + if (partitionLen != partitionList->length) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("Wrong number of partitions when create index specify subpartition."))); + } + + /* Next check specify subpartition with metadata in pg_partition */ + foreach(lc1, subPartitionList) { + List *subPartitions = (List *)lfirst(lc1); + int subLens = list_length(subPartitions); + + foreach(lc2, partitionIndexdef) { + RangePartitionindexDefState *idxDef = (RangePartitionindexDefState*)lfirst(lc2); + int idfLens = list_length(idxDef->sublist); + + if (subLens == idfLens) { + subPartitionIdf = lappend(subPartitionIdf, copyObject(idxDef->sublist)); + partitionIndexdef = list_delete(partitionIndexdef, lfirst(lc2)); + break; + } + } + + expectedSubLens += subPartitions->length; + } + + /* Fail exactly match if partitionIndexdef */ + if (partitionIndexdef != NULL) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("Cannot match subpartitions when create subpartition indexes."))); + } + + /* Count sum of subpartitions */ + foreach(lc1, backupIdxdef) { + RangePartitionindexDefState *def = (RangePartitionindexDefState*)lfirst(lc1); + subpartitionLens += list_length(def->sublist); + } + + /* Check total subpartition number */ + if (subpartitionLens != expectedSubLens) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("Wrong number of subpartitions when create index specify subpartition."))); + } + + list_free_ext(backupIdxdef); + return subPartitionIdf; +} + /* * DefineIndex * Creates a new index. @@ -429,6 +522,15 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al if (strcmp(stmt->accessMethod, "ubtree") != 0) { elog(ERROR, "%s index is not supported for ustore", (stmt->accessMethod)); } + if (stmt->deferrable == true) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_EXECUTOR), + errmsg("Ustore table does not support to set deferrable."), + errdetail("N/A"), + errcause("feature not supported"), + erraction("check constraints of columns"))); + } } if (strcmp(stmt->accessMethod, "ubtree") == 0 && @@ -448,6 +550,12 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al } } + if (CheckWhetherForbiddenFunctionalIdx(relationId, namespaceId, stmt->indexParams)) { + ereport(ERROR, + (errmodule(MOD_EXECUTOR), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("not supported to create a functional index on this table."))); + } if (RELATION_IS_PARTITIONED(rel)) { if (stmt->unique && (!stmt->isPartitioned || is_alter_table)) { @@ -542,9 +650,10 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al } } - if (list_length(stmt->indexIncludingParams) > 0 && !skip_build) { + if (list_length(stmt->indexIncludingParams) > 0 && strcmp(stmt->accessMethod, "ubtree") != 0) { ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("create index does not support have include parameter"))); + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("create a index with include columns is only supported in ubtree"))); } /* @@ -619,11 +728,7 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al * bootstrapping, since permissions machinery may not be working yet. */ if (check_rights && !IsBootstrapProcessingMode()) { - AclResult aclresult; - - aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceId)); + (void)CheckCreatePrivilegeInNamespace(namespaceId, GetUserId(), CREATE_ANY_INDEX); } /* @@ -663,17 +768,39 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al } if (PointerIsValid(stmt->partClause)) { - partitionIndexdef = (List*)stmt->partClause; + if (RelationIsSubPartitioned(rel)) { + ListCell* lc1 = NULL; + ListCell* lc2 = NULL; + List* subPartitions = NIL; - /* index partition's number must no less than table partition's number */ - if (partitionTableList->length > ((List*)stmt->partClause)->length) { - ereport( - ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("Not enough index partition defined"))); - } else if (partitionTableList->length < ((List*)stmt->partClause)->length) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("number of partitions of LOCAL index must equal that of the " - "underlying table"))); + partitionIndexdef = (List*)stmt->partClause; + subPartitionIndexDef = ExtractSubPartitionIdf(stmt, + partitionTableList, + subPartitionTupleList, + partitionIndexdef); + + /* Fill partitionOidList */ + foreach (lc1, partitionTableList) { + HeapTuple tuple = (HeapTuple)lfirst(lc1); + partitionOidList = lappend_oid(partitionOidList, HeapTupleGetOid(tuple)); + } + + /* Fill subPartitionOidList */ + foreach (lc1, subPartitionTupleList) { + subPartitions = (List*)lfirst(lc1); + + List* subPartitionOids = NIL; + foreach (lc2, subPartitions) { + HeapTuple tuple = (HeapTuple)lfirst(lc2); + subPartitionOids = lappend_oid(subPartitionOids, HeapTupleGetOid(tuple)); + } + subPartitionOidList = lappend(subPartitionOidList, subPartitionOids); + } + } else { + partitionIndexdef = (List*)stmt->partClause; + + /* index partition's number must no less than table partition's number */ + CheckPartitionIndexDef(stmt, partitionTableList); } } else { if (!RelationIsSubPartitioned(rel)) { @@ -927,14 +1054,6 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al } } - TableCreateSupport indexCreateSupport{false,false,false,false,false,false}; - ListCell* cell = NULL; - foreach (cell, stmt->options) { - DefElem* defElem = (DefElem*)lfirst(cell); - SetOneOfCompressOption(defElem->defname, &indexCreateSupport); - } - - CheckCompressOption(&indexCreateSupport); /* * Parse AM-specific options, convert to text array form, validate. */ @@ -1699,6 +1818,15 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al (void)VirtualXactLock(old_snapshots[i], true); } + if (IS_PGXC_COORDINATOR) { + /* + * Last thing to do is release the session-level lock on the parent table. + */ + UnlockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock); + + return indexRelationId; + } + /* * Index can now be marked valid -- update its pg_index entry */ @@ -3668,6 +3796,8 @@ static void CheckIndexParamsNumber(IndexStmt* stmt) { errmsg("cannot use more than %d columns in a cross-bucket index", INDEX_MAX_KEYS - 1))); } } + + static bool CheckIdxParamsOwnPartKey(Relation rel, const List* indexParams) { int2vector* partKey = ((RangePartitionMap*)rel->partMap)->partitionKey; @@ -3680,3 +3810,131 @@ static bool CheckIdxParamsOwnPartKey(Relation rel, const List* indexParams) } return true; } + + +static bool +CheckWhetherForbiddenFunctionalIdx(Oid relationId, Oid namespaceId, List* indexParams) +{ + ListCell* lc = NULL; + bool isFunctionalIdx = false; + + foreach (lc, indexParams) { + IndexElem* elem = (IndexElem*)lfirst(lc); + if (PointerIsValid(elem) && PointerIsValid(elem->expr) + && nodeTag(elem->expr) == T_FuncExpr) { + isFunctionalIdx = true; + break; + } + } + /* + * If the index is not a functional index, the function will return false directly. + * */ + if (likely((!isFunctionalIdx))) { + return false; + } + + /* Currently, there is only one element in the forbidden list. + * Hence we can determine it using the following method briefly. + * */ + if (unlikely(namespaceId == PG_DB4AI_NAMESPACE)) { + return true; + } + + return false; +} + + +#ifdef ENABLE_MULTIPLE_NODES +/* + * @Description : Mark index indisvalid. + * @in : schemaname, idxname + * @out : None + */ +Datum gs_mark_indisvalid(PG_FUNCTION_ARGS) +{ + if ((IS_PGXC_COORDINATOR && !IsConnFromCoord()) || IS_PGXC_DATANODE) { + ereport(ERROR, (errmodule(MOD_FUNCTION), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Unsupported function for users."), + errdetail("This is an inner function used for CIC."), + errcause("This function is not supported for users to execute directly."), + erraction("Please do not execute this function."))); + } else { + char* schname = PG_GETARG_CSTRING(0); + char* idxname = PG_GETARG_CSTRING(1); + if (idxname == NULL || strlen(idxname) == 0) { + ereport(ERROR, (errmodule(MOD_INDEX), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Invalid input index name."), + errdetail("The input index name is null."), + errcause("Input empty or less parameters."), + erraction("Please input the correct index name."))); + PG_RETURN_VOID(); + } + mark_indisvalid_all_cns(schname, idxname); + } + PG_RETURN_VOID(); +} + +/* Mark the given index indisvalid, used for create index concurrently */ +void mark_indisvalid_local(char* schname, char* idxname) +{ + Oid idx_oid = InvalidOid; + if (schname == NULL || strlen(schname) == 0) { + idx_oid = RangeVarGetRelid(makeRangeVar(NULL, idxname, -1), NoLock, false); + } else { + idx_oid = RangeVarGetRelid(makeRangeVar(schname, idxname, -1), NoLock, false); + } + + if (!OidIsValid(idx_oid)) { + ereport(ERROR, (errmodule(MOD_INDEX), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("The given schema or index name cannot find."), + errdetail("Cannot find valid oid from the given index name."), + errcause("Input error schema or index name."), + erraction("Check the input schema and index name."))); + } + + if (IS_PGXC_COORDINATOR && !IsConnFromCoord() && GetTopTransactionIdIfAny() != InvalidTransactionId) { + CommitTransactionCommand(); + StartTransactionCommand(); + } + + Relation rel = heap_open(IndexGetRelation(idx_oid, false), ShareUpdateExclusiveLock); + LockRelId heaprelid = rel->rd_lockInfo.lockRelId; + heap_close(rel, NoLock); + + LockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock); + index_set_state_flags(idx_oid, INDEX_CREATE_SET_VALID); + /* + * The pg_index update will cause backends (including this one) to update + * relcache entries for the index itself, but we should also send a + * relcache inval on the parent table to force replanning of cached plans. + * Otherwise existing sessions might fail to use the new index where it + * would be useful. (Note that our earlier commits did not create reasons + * to replan; so relcache flush on the index itself was sufficient.) + */ + CacheInvalidateRelcacheByRelid(heaprelid.relId); + + UnlockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock); +} + +void mark_indisvalid_all_cns(char* schname, char* idxname) +{ + if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) { + ParallelFunctionState* state = NULL; + StringInfoData buf; + initStringInfo(&buf); + appendStringInfo(&buf, "select gs_mark_indisvalid("); + if (schname == NULL || strlen(schname) == 0) { + appendStringInfo(&buf, "'', %s)", quote_literal_cstr(idxname)); + } else { + appendStringInfo(&buf, "%s, %s)", quote_literal_cstr(schname), quote_literal_cstr(idxname)); + } + + state = RemoteFunctionResultHandler(buf.data, NULL, NULL, true, EXEC_ON_COORDS, true); + FreeParallelFunctionState(state); + pfree_ext(buf.data); + } + + mark_indisvalid_local(schname, idxname); +} + +#endif diff --git a/src/gausskernel/optimizer/commands/lockcmds.cpp b/src/gausskernel/optimizer/commands/lockcmds.cpp index 750fd0f8e..19402c370 100644 --- a/src/gausskernel/optimizer/commands/lockcmds.cpp +++ b/src/gausskernel/optimizer/commands/lockcmds.cpp @@ -65,6 +65,11 @@ void LockTableCommand(LockStmt* lockstmt) errmsg("permission denied: \"%s\" is a system catalog", rv->relname), errhint("use xc_maintenance_mode to lock this system catalog"))); } + /* In redistribute, support auto send term to lock holder. */ + if (lockstmt->cancelable && !u_sess->attr.attr_sql.enable_cluster_resize) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Only support gs_redis application using"))); + } + t_thrd.xact_cxt.enable_lock_cancel = lockstmt->cancelable; reloid = RangeVarGetRelidExtended(rv, lockstmt->mode, @@ -80,6 +85,7 @@ void LockTableCommand(LockStmt* lockstmt) if (recurse) LockTableRecurse(reloid, lockstmt->mode, lockstmt->nowait); } + t_thrd.xact_cxt.enable_lock_cancel = false; } /* diff --git a/src/gausskernel/optimizer/commands/matview.cpp b/src/gausskernel/optimizer/commands/matview.cpp index bb37d0591..2114585af 100755 --- a/src/gausskernel/optimizer/commands/matview.cpp +++ b/src/gausskernel/optimizer/commands/matview.cpp @@ -1193,7 +1193,7 @@ static void ExecCreateMatInc(QueryDesc*queryDesc, Query *query, Relation matview if (rel->rd_tam_type == TAM_USTORE) { tmpTuple = UHeapToHeap(rel->rd_att, (UHeapTuple)tuple); tmpTuple->t_xid_base = ((UHeapTuple)tuple)->t_xid_base; - HeapTupleSetXmin(tmpTuple, ((UHeapTuple)tuple)->disk_tuple->xid); + tmpTuple->t_data->t_choice.t_heap.t_xmin = ((UHeapTuple)tuple)->disk_tuple->xid; tuple = tmpTuple; } HeapTuple copyTuple = heap_copytuple(tuple); @@ -2078,6 +2078,9 @@ void insert_into_mlog_table(Relation rel, Oid mlogid, HeapTuple tuple, ItemPoint for (i = 0, j = 0; i < relAttnumAll; i++) { if (relDesc->attrs[i]->attisdropped) { + ereport(DEBUG5, + (errmodule(MOD_OPT), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Skip dropped column %d on base table when insert into mlog table", i))); continue; } values[MlogAttributeNum + j] = rel_values[i]; @@ -2446,6 +2449,12 @@ void check_basetable(Query *query, bool isCreateMatview, bool isIncremental) } #endif + if (rte->is_ustore) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialized view is not supported in ustore yet"))); + } + Relation rel = heap_open(rte->relid, AccessShareLock); if (RelationisEncryptEnable(rel)) { ereport(ERROR, (errmodule(MOD_SEC_TDE), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), diff --git a/src/gausskernel/optimizer/commands/packagecmds.cpp b/src/gausskernel/optimizer/commands/packagecmds.cpp index a4986b694..c6e6d8486 100644 --- a/src/gausskernel/optimizer/commands/packagecmds.cpp +++ b/src/gausskernel/optimizer/commands/packagecmds.cpp @@ -35,6 +35,7 @@ #include "knl/knl_variable.h" #include "access/heapam.h" +#include "access/tableam.h" #include "access/transam.h" #include "catalog/dependency.h" #include "catalog/indexing.h" @@ -42,10 +43,14 @@ #include "catalog/objectaccess.h" #include "catalog/pg_control.h" #include "catalog/pg_namespace.h" +#include "catalog/pg_object.h" #include "catalog/gs_package.h" #include "catalog/gs_package_fn.h" +#include "catalog/gs_db_privilege.h" #include "commands/defrem.h" +#include "commands/typecmds.h" #include "pgxc/pgxc.h" +#include "storage/tcap.h" #include "utils/acl.h" #include "utils/builtins.h" #include "utils/rel.h" @@ -61,12 +66,12 @@ void CreatePackageCommand(CreatePackageStmt* stmt, const char* queryString) ereport(ERROR, (errcode(ERRCODE_INVALID_PACKAGE_DEFINITION), errmsg("not support create package in distributed database"))); #endif + u_sess->plsql_cxt.debug_query_string = pstrdup(queryString); Oid packageId; Oid namespaceId; char* pkgname = NULL; char* pkgspecsrc = NULL; Oid pkgOwner; - AclResult aclresult; /* Convert list of names to a name and namespace */ namespaceId = QualifiedNameGetCreationNamespace(stmt->pkgname, &pkgname); @@ -84,11 +89,7 @@ void CreatePackageCommand(CreatePackageStmt* stmt, const char* queryString) * namespace, if the owner of the namespce has the same name as the namescpe */ bool isAlter = false; - u_sess->plsql_cxt.sourceText = pstrdup(queryString); - /* Check we have creation rights in target namespace */ - aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceId)); + bool anyResult = CheckCreatePrivilegeInNamespace(namespaceId, GetUserId(), CREATE_ANY_PACKAGE); //@Temp Table. Lock Cluster after determine whether is a temp object, // so we can decide if locking other coordinator @@ -97,7 +98,7 @@ void CreatePackageCommand(CreatePackageStmt* stmt, const char* queryString) pkgspecsrc = stmt->pkgspec; if (u_sess->attr.attr_sql.enforce_a_behavior) { - pkgOwner = GetUserIdFromNspId(namespaceId); + pkgOwner = GetUserIdFromNspId(namespaceId, false, anyResult); if (!OidIsValid(pkgOwner)) pkgOwner = GetUserId(); @@ -106,9 +107,7 @@ void CreatePackageCommand(CreatePackageStmt* stmt, const char* queryString) isAlter = true; if (isAlter) { - aclresult = pg_namespace_aclcheck(namespaceId, pkgOwner, ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceId)); + (void)CheckCreatePrivilegeInNamespace(namespaceId, pkgOwner, CREATE_ANY_PACKAGE); } } else { pkgOwner = GetUserId(); @@ -136,6 +135,9 @@ void CreatePackageCommand(CreatePackageStmt* stmt, const char* queryString) errcause("debug mode"), erraction("check package spec"))); } + if (u_sess->plsql_cxt.debug_query_string) { + pfree_ext(u_sess->plsql_cxt.debug_query_string); + } } @@ -145,12 +147,12 @@ void CreatePackageBodyCommand(CreatePackageBodyStmt* stmt, const char* queryStri ereport(ERROR, (errcode(ERRCODE_INVALID_PACKAGE_DEFINITION), errmsg("not support create package in distributed database"))); #endif + u_sess->plsql_cxt.debug_query_string = pstrdup(queryString); //Oid packageId; Oid namespaceId; char* pkgname = NULL; char* pkgBodySrc = NULL; Oid pkgOwner; - AclResult aclresult; /* Convert list of names to a name and namespace */ namespaceId = QualifiedNameGetCreationNamespace(stmt->pkgname, &pkgname); @@ -160,11 +162,7 @@ void CreatePackageBodyCommand(CreatePackageBodyStmt* stmt, const char* queryStri * namespace, if the owner of the namespce has the same name as the namescpe */ bool isAlter = false; - u_sess->plsql_cxt.sourceText = pstrdup(queryString); - /* Check we have creation rights in target namespace */ - aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceId)); + bool anyResult = CheckCreatePrivilegeInNamespace(namespaceId, GetUserId(), CREATE_ANY_PACKAGE); //@Temp Table. Lock Cluster after determine whether is a temp object, // so we can decide if locking other coordinator @@ -173,7 +171,7 @@ void CreatePackageBodyCommand(CreatePackageBodyStmt* stmt, const char* queryStri pkgBodySrc = stmt->pkgbody; if (u_sess->attr.attr_sql.enforce_a_behavior) { - pkgOwner = GetUserIdFromNspId(namespaceId); + pkgOwner = GetUserIdFromNspId(namespaceId, false, anyResult); if (!OidIsValid(pkgOwner)) pkgOwner = GetUserId(); @@ -181,9 +179,7 @@ void CreatePackageBodyCommand(CreatePackageBodyStmt* stmt, const char* queryStri isAlter = true; if (isAlter) { - aclresult = pg_namespace_aclcheck(namespaceId, pkgOwner, ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceId)); + (void)CheckCreatePrivilegeInNamespace(namespaceId, pkgOwner, CREATE_ANY_PACKAGE); } } else { pkgOwner = GetUserId(); @@ -204,4 +200,113 @@ void CreatePackageBodyCommand(CreatePackageBodyStmt* stmt, const char* queryStri errcause("package body is null"), erraction("check package body"))); } + if (u_sess->plsql_cxt.debug_query_string) { + pfree_ext(u_sess->plsql_cxt.debug_query_string); + } +} + +/* + * Change package owner by name + */ +void AlterPackageOwner(List* name, Oid newOwnerId) +{ +#ifdef ENABLE_MULTIPLE_NODES + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("package not supported in distributed database"))); +#endif + Oid pkgOid = PackageNameListGetOid(name, false); + Relation rel; + HeapTuple tup; + if (IsSystemObjOid(pkgOid)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PACKAGE_DEFINITION), + errmsg("ownerId change failed for package %u, because it is a builtin package.", pkgOid))); + } + rel = heap_open(PackageRelationId, RowExclusiveLock); + tup = SearchSysCache1(PACKAGEOID, ObjectIdGetDatum(pkgOid)); + /* should not happen */ + if (!HeapTupleIsValid(tup)) { + ereport( + ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for package %u", pkgOid))); + } + + TrForbidAccessRbObject(PACKAGEOID, pkgOid); + + Form_gs_package gs_package_tuple = (Form_gs_package)GETSTRUCT(tup); + /* + * If the new owner is the same as the existing owner, consider the + * command to have succeeded. This is for dump restoration purposes. + */ + if (gs_package_tuple->pkgowner == newOwnerId) { + ReleaseSysCache(tup); + /* Recode time of change the funciton owner. */ + UpdatePgObjectMtime(pkgOid, OBJECT_TYPE_PKGSPEC); + heap_close(rel, NoLock); + return; + } + + Datum repl_val[Natts_gs_package]; + bool repl_null[Natts_gs_package]; + bool repl_repl[Natts_gs_package]; + Acl* newAcl = NULL; + Datum aclDatum; + bool isNull = false; + HeapTuple newtuple; + AclResult aclresult; + + /* Superusers can always do it */ + if (!superuser()) { + /* Otherwise, must be owner of the existing object */ + if (!pg_package_ownercheck(pkgOid, GetUserId())) + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PACKAGE, NameStr(gs_package_tuple->pkgname)); + + /* Must be able to become new owner */ + check_is_member_of_role(GetUserId(), newOwnerId); + + /* New owner must have CREATE privilege on namespace */ + aclresult = pg_namespace_aclcheck(gs_package_tuple->pkgnamespace, newOwnerId, ACL_CREATE); + if (aclresult != ACLCHECK_OK) + aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(gs_package_tuple->pkgnamespace)); + } + + /* alter owner of procedure in packgae */ + AlterFunctionOwnerByPkg(pkgOid, newOwnerId); + /* alter packgae type onwer */ + AlterTypeOwnerByPkg(pkgOid, newOwnerId); + + errno_t errorno = EOK; + errorno = memset_s(repl_null, sizeof(repl_null), false, sizeof(repl_null)); + securec_check(errorno, "\0", "\0"); + errorno = memset_s(repl_repl, sizeof(repl_repl), false, sizeof(repl_repl)); + securec_check(errorno, "\0", "\0"); + + repl_repl[Anum_gs_package_pkgowner - 1] = true; + repl_val[Anum_gs_package_pkgowner - 1] = ObjectIdGetDatum(newOwnerId); + + /* + * Determine the modified ACL for the new owner. This is only + * necessary when the ACL is non-null. + */ + aclDatum = SysCacheGetAttr(PACKAGEOID, tup, Anum_gs_package_pkgacl, &isNull); + if (!isNull) { + newAcl = aclnewowner(DatumGetAclP(aclDatum), gs_package_tuple->pkgowner, newOwnerId); + repl_repl[Anum_gs_package_pkgacl - 1] = true; + repl_val[Anum_gs_package_pkgacl - 1] = PointerGetDatum(newAcl); + } + + newtuple = (HeapTuple) tableam_tops_modify_tuple(tup, RelationGetDescr(rel), repl_val, repl_null, repl_repl); + + simple_heap_update(rel, &newtuple->t_self, newtuple); + CatalogUpdateIndexes(rel, newtuple); + + tableam_tops_free_tuple(newtuple); + + /* Update owner dependency reference */ + changeDependencyOnOwner(PackageRelationId, pkgOid, newOwnerId); + + ReleaseSysCache(tup); + /* Recode time of change the funciton owner. */ + UpdatePgObjectMtime(pkgOid, OBJECT_TYPE_PKGSPEC); + heap_close(rel, NoLock); + return; } diff --git a/src/gausskernel/optimizer/commands/prepare.cpp b/src/gausskernel/optimizer/commands/prepare.cpp index 4946921c1..34cead744 100755 --- a/src/gausskernel/optimizer/commands/prepare.cpp +++ b/src/gausskernel/optimizer/commands/prepare.cpp @@ -47,6 +47,9 @@ #include "pgxc/execRemote.h" #include "catalog/pgxc_node.h" #endif +#include "replication/walreceiver.h" + +#define CLUSTER_EXPANSION_BASE 2 void InitQueryHashTable(void); static ParamListInfo EvaluateParams(CachedPlanSource* psrc, List* params, const char* queryString, EState* estate); @@ -460,9 +463,7 @@ static ParamListInfo EvaluateParams(CachedPlanSource* psrc, List* params, const prm->ptype = param_types[i]; prm->pflags = PARAM_FLAG_CONST; prm->value = ExecEvalExprSwitchContext(n, GetPerTupleExprContext(estate), &prm->isnull, NULL); - prm->tableOfIndexType = InvalidOid; - prm->tableOfIndex = NULL; - prm->isnestedtable = false; + prm->tabInfo = NULL; i++; } @@ -1323,7 +1324,11 @@ Datum pg_prepared_statement(PG_FUNCTION_ARGS) securec_check(rc, "\0", "\0"); values[0] = CStringGetTextDatum(prep_stmt->stmt_name); - values[1] = CStringGetTextDatum(prep_stmt->plansource->query_string); + char* maskquery = maskPassword(prep_stmt->plansource->query_string); + const char* query = (maskquery == NULL) ? prep_stmt->plansource->query_string : maskquery; + values[1] = CStringGetTextDatum(query); + if (query != maskquery) + pfree_ext(maskquery); values[2] = TimestampTzGetDatum(prep_stmt->prepare_time); values[3] = build_regtype_array(prep_stmt->plansource->param_types, prep_stmt->plansource->num_params); values[4] = BoolGetDatum(prep_stmt->from_sql); @@ -1398,8 +1403,9 @@ void DropDatanodeStatement(const char* stmt_name) List* nodelist = NIL; /* make a List of integers from node numbers */ - for (i = 0; i < entry->current_nodes_number; i++) + for (i = 0; i < entry->current_nodes_number; i++) { nodelist = lappend_int(nodelist, entry->dns_node_indices[i]); + } CN_GPC_LOG("drop datanode statment", NULL, entry->stmt_name); @@ -1435,7 +1441,7 @@ void DeActiveAllDataNodeStatements(void) tmp_num = entry->current_nodes_number; entry->current_nodes_number = 0; if (tmp_num > 0) { - Assert(tmp_num <= u_sess->pgxc_cxt.NumDataNodes); + Assert(tmp_num <= Max(u_sess->pgxc_cxt.NumTotalDataNodes, u_sess->pgxc_cxt.NumDataNodes)); errorno = memset_s(entry->dns_node_indices, tmp_num * sizeof(int), 0, tmp_num * sizeof(int)); securec_check_c(errorno, "\0", "\0"); } @@ -1474,7 +1480,7 @@ bool HaveActiveDatanodeStatements(void) * Returns false if statement has not been active on the node and should be * prepared on the node */ -bool ActivateDatanodeStatementOnNode(const char* stmt_name, int noid) +bool ActivateDatanodeStatementOnNode(const char* stmt_name, int nodeIdx) { DatanodeStatement* entry = NULL; int i; @@ -1484,23 +1490,24 @@ bool ActivateDatanodeStatementOnNode(const char* stmt_name, int noid) /* see if statement already active on the node */ for (i = 0; i < entry->current_nodes_number; i++) { - if (entry->dns_node_indices[i] == noid) + if (entry->dns_node_indices[i] == nodeIdx) { return true; + } } /* After cluster expansion, must expand entry->dns_node_indices array too */ if (entry->current_nodes_number == entry->max_nodes_number) { int* new_dns_node_indices = (int*)MemoryContextAllocZero( - u_sess->pcache_cxt.datanode_queries->hcxt, entry->max_nodes_number * 2 * sizeof(int)); + u_sess->pcache_cxt.datanode_queries->hcxt, entry->max_nodes_number * CLUSTER_EXPANSION_BASE * sizeof(int)); errno_t errorno = EOK; errorno = memcpy_s(new_dns_node_indices, - entry->max_nodes_number * 2 * sizeof(int), + entry->max_nodes_number * CLUSTER_EXPANSION_BASE * sizeof(int), entry->dns_node_indices, entry->max_nodes_number * sizeof(int)); securec_check(errorno, "\0", "\0"); pfree_ext(entry->dns_node_indices); entry->dns_node_indices = new_dns_node_indices; - entry->max_nodes_number = entry->max_nodes_number * 2; + entry->max_nodes_number = entry->max_nodes_number * CLUSTER_EXPANSION_BASE; elog(LOG, "expand node ids array for active datanode statements " "after cluster expansion, now array size is %d", @@ -1508,8 +1515,7 @@ bool ActivateDatanodeStatementOnNode(const char* stmt_name, int noid) } /* statement is not active on the specified node append item to the list */ - entry->dns_node_indices[entry->current_nodes_number++] = noid; - + entry->dns_node_indices[entry->current_nodes_number++] = nodeIdx; return false; } diff --git a/src/gausskernel/optimizer/commands/publicationcmds.cpp b/src/gausskernel/optimizer/commands/publicationcmds.cpp index b069a7e3e..1378357c5 100644 --- a/src/gausskernel/optimizer/commands/publicationcmds.cpp +++ b/src/gausskernel/optimizer/commands/publicationcmds.cpp @@ -21,6 +21,7 @@ #include "access/heapam.h" #include "access/htup.h" #include "access/xact.h" +#include "access/sysattr.h" #include "catalog/catalog.h" #include "catalog/indexing.h" @@ -411,11 +412,15 @@ void RemovePublicationRelById(Oid proid) Relation rel; HeapTuple tup; Form_pg_publication_rel pubrel; + ScanKeyData scanKey[1]; + ScanKeyInit(&scanKey[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(proid)); rel = heap_open(PublicationRelRelationId, RowExclusiveLock); - tup = SearchSysCache1(PUBLICATIONREL, ObjectIdGetDatum(proid)); + SysScanDesc scanDesc = systable_beginscan(rel, PublicationRelObjectIndexId, true, NULL, 1, scanKey); + tup = systable_getnext(scanDesc); if (!HeapTupleIsValid(tup)) { - elog(ERROR, "cache lookup failed for publication table %u", proid); + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("could not find tuple for publication %u", proid))); } pubrel = (Form_pg_publication_rel)GETSTRUCT(tup); @@ -424,8 +429,7 @@ void RemovePublicationRelById(Oid proid) simple_heap_delete(rel, &tup->t_self); - ReleaseSysCache(tup); - + systable_endscan(scanDesc); heap_close(rel, RowExclusiveLock); } diff --git a/src/gausskernel/optimizer/commands/schemacmds.cpp b/src/gausskernel/optimizer/commands/schemacmds.cpp index a182a77a0..0f04c3ff5 100644 --- a/src/gausskernel/optimizer/commands/schemacmds.cpp +++ b/src/gausskernel/optimizer/commands/schemacmds.cpp @@ -609,10 +609,16 @@ static void AlterSchemaOwner_internal(HeapTuple tup, Relation rel, Oid newOwnerI bool isNull = false; HeapTuple newtuple; AclResult aclresult; + Oid nspid = HeapTupleGetOid(tup); /* Otherwise, must be owner of the existing object */ - if (!pg_namespace_ownercheck(HeapTupleGetOid(tup), GetUserId())) + if (IsSystemNamespace(nspid)) { + if (!initialuser()) { + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_NAMESPACE, NameStr(nspForm->nspname)); + } + } else if (!pg_namespace_ownercheck(nspid, GetUserId())) { aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_NAMESPACE, NameStr(nspForm->nspname)); + } /* Must be able to become new owner */ check_is_member_of_role(GetUserId(), newOwnerId); diff --git a/src/gausskernel/optimizer/commands/sec_rls_cmds.cpp b/src/gausskernel/optimizer/commands/sec_rls_cmds.cpp index b1b2d26f5..73c55ca9d 100644 --- a/src/gausskernel/optimizer/commands/sec_rls_cmds.cpp +++ b/src/gausskernel/optimizer/commands/sec_rls_cmds.cpp @@ -52,6 +52,7 @@ #include "utils/sec_rls_utils.h" #include "utils/syscache.h" #include "utils/snapmgr.h" +#include "utils/knl_relcache.h" /* * The row level security policies for one relation should be @@ -795,7 +796,7 @@ void RelationBuildRlsPolicies(Relation relation) * Set up memory context, always set up some kind of policy here. * If no explicit policies are found then an implicit default-deny policy is created. */ - MemoryContext rlscxt = AllocSetContextCreate(u_sess->cache_mem_cxt, + MemoryContext rlscxt = AllocSetContextCreate(LocalMyDBCacheMemCxt(), "Row-level-security policy descriptor", ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, diff --git a/src/gausskernel/optimizer/commands/sequence/sequence.cpp b/src/gausskernel/optimizer/commands/sequence/sequence.cpp index 65dcc0578..ee97de1c7 100644 --- a/src/gausskernel/optimizer/commands/sequence/sequence.cpp +++ b/src/gausskernel/optimizer/commands/sequence/sequence.cpp @@ -373,7 +373,7 @@ void gen_uuid_for_CreateSchemaStmt(List* stmts, List* uuids) void InitGlobalSeq() { - for (int i = 0; i < GS_NUM_OF_BUCKETS; i++) { + for (int i = 0; i < NUM_GS_PARTITIONS; i++) { g_instance.global_seq[i].shb_list = NULL; g_instance.global_seq[i].lock_id = FirstGlobalSeqLock + i; } @@ -645,7 +645,7 @@ static int128 GetNextvalLocal(SeqTable elm, Relation seqrel) XLogRegisterData((char*)&xlrec, sizeof(xl_seq_rec)); XLogRegisterData((char*)seqtuple.t_data, seqtuple.t_len); - recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG, false, seqrel->rd_node.bucketNode); + recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG, seqrel->rd_node.bucketNode); PageSetLSN(page, recptr); } @@ -1571,7 +1571,7 @@ static void do_setval(Oid relid, int128 next, bool iscalled) XLogRegisterData((char*)&xlrec, sizeof(xl_seq_rec)); XLogRegisterData((char*)seqtuple.t_data, seqtuple.t_len); - recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG, false, seqrel->rd_node.bucketNode); + recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG, seqrel->rd_node.bucketNode); PageSetLSN(page, recptr); } @@ -2567,6 +2567,34 @@ static void CheckUpdateSequenceMsgStatus(PGXCNodeHandle* exec_handle, const char } #endif // ENABLE_MULTIPLE_NODES +static void SaveNextValForSequence(char* dbname, char* schemaname, char *seqname, int64* res) +{ + bool issame = false; + if (u_sess->xact_cxt.sendSeqName != NULL) { + ListCell *db_name_cell = NULL; + ListCell *schema_name_cell = NULL; + ListCell *seq_name_cell = NULL; + ListCell *res_cell = NULL; + forfour(db_name_cell, u_sess->xact_cxt.sendSeqDbName, schema_name_cell, u_sess->xact_cxt.sendSeqSchmaName, + seq_name_cell, u_sess->xact_cxt.sendSeqName, res_cell, u_sess->xact_cxt.send_result) { + if (strcmp(seqname, (char*)lfirst(seq_name_cell)) == 0 && strcmp(dbname, (char*)lfirst(db_name_cell)) == 0 + && strcmp(schemaname, (char*)lfirst(schema_name_cell)) == 0) { + int64* nu = (int64*)lfirst(res_cell); + pfree_ext(nu); + lfirst(res_cell) = (void*)res; + issame = true; + break; + } + } + } + if (!issame) { + u_sess->xact_cxt.sendSeqDbName = lappend(u_sess->xact_cxt.sendSeqDbName, pstrdup(dbname)); + u_sess->xact_cxt.sendSeqSchmaName = lappend(u_sess->xact_cxt.sendSeqSchmaName, pstrdup(schemaname)); + u_sess->xact_cxt.sendSeqName = lappend(u_sess->xact_cxt.sendSeqName, pstrdup(seqname)); + u_sess->xact_cxt.send_result = lappend(u_sess->xact_cxt.send_result, res); + } +} + static void updateNextValForSequence(Buffer buf, Form_pg_sequence seq, HeapTupleData seqtuple, Relation seqrel, int64 result) { @@ -2598,7 +2626,7 @@ static void updateNextValForSequence(Buffer buf, Form_pg_sequence seq, HeapTuple XLogRegisterBuffer(0, buf, REGBUF_WILL_INIT); XLogRegisterData((char*)&xlrec, sizeof(xl_seq_rec)); XLogRegisterData((char*)seqtuple.t_data, seqtuple.t_len); - recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG, false, seqrel->rd_node.bucketNode); + recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG, seqrel->rd_node.bucketNode); PageSetLSN(page, recptr); } /* @@ -2631,10 +2659,7 @@ static void updateNextValForSequence(Buffer buf, Form_pg_sequence seq, HeapTuple curr = MemoryContextSwitchTo(u_sess->top_transaction_mem_cxt); int64* res = (int64*)palloc(sizeof(int64)); *res = result; - u_sess->xact_cxt.sendSeqDbName = lappend(u_sess->xact_cxt.sendSeqDbName, pstrdup(dbname)); - u_sess->xact_cxt.sendSeqSchmaName = lappend(u_sess->xact_cxt.sendSeqSchmaName, pstrdup(schemaname)); - u_sess->xact_cxt.sendSeqName = lappend(u_sess->xact_cxt.sendSeqName, pstrdup(seqname)); - u_sess->xact_cxt.send_result = lappend(u_sess->xact_cxt.send_result, res); + SaveNextValForSequence(dbname, schemaname, seqname, res); MemoryContextSwitchTo(curr); } else { /* nexval execute direct on cn will not notify dn */ diff --git a/src/gausskernel/optimizer/commands/sequence/sequence_util.cpp b/src/gausskernel/optimizer/commands/sequence/sequence_util.cpp index bc99a7d2e..f5b9ced40 100644 --- a/src/gausskernel/optimizer/commands/sequence/sequence_util.cpp +++ b/src/gausskernel/optimizer/commands/sequence/sequence_util.cpp @@ -22,10 +22,13 @@ #include "access/gtm.h" #include "access/multixact.h" #include "access/xlogproc.h" +#include "catalog/pg_proc.h" #include "commands/dbcommands.h" #include "commands/sequence.h" #include "gtm/gtm_client.h" +#include "parser/parse_coerce.h" #include "storage/lmgr.h" +#include "utils/builtins.h" #include "utils/lsyscache.h" /* @@ -115,7 +118,7 @@ void fill_seq_with_data(Relation rel, HeapTuple tuple) XLogRegisterData((char*)&xlrec, sizeof(xl_seq_rec)); XLogRegisterData((char*)tuple->t_data, tuple->t_len); - recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG, false, rel->rd_node.bucketNode); + recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG, rel->rd_node.bucketNode); PageSetLSN(page, recptr); } @@ -566,3 +569,104 @@ SeqTable GetGlobalSeqElm(Oid relid, GlobalSeqInfoHashBucket* bucket) return currseq; } + +/* + * Add coercion for (numeric)func() to get (int8)func(). + * There's no need to concern numeric overflow since large sequence is not supported before upgrade. + */ +static Node* update_seq_expr(FuncExpr* func) +{ + func->funcresulttype = NUMERICOID; + Node* newnode = coerce_to_target_type( + NULL, (Node*)func, NUMERICOID, INT8OID, -1, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST, -1); + return newnode; +} + +/* + * Add coercion for (int8)func() to get (numeric)func(). + */ +static Node* rollback_seq_expr(FuncExpr* func) +{ + func->funcresulttype = INT8OID; + Node* newnode = coerce_to_target_type( + NULL, (Node*)func, INT8OID, NUMERICOID, -1, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST, -1); + return newnode; +} + +typedef struct MutateSeqExprCxt { + Node* (*worker)(FuncExpr* func); + Oid expextedResType; +} MutateSeqExprCxt; + +static bool RevertUpgradedFunc(FuncExpr* func, Node** ret) +{ + Assert(list_length(func->args) == 1); + Node* arg = (Node*)linitial(func->args); + if (!IsA(arg, FuncExpr)) { + return false; + } + FuncExpr* innerFunc = (FuncExpr*)arg; + if (innerFunc->funcid == NEXTVALFUNCOID || innerFunc->funcid == CURRVALFUNCOID || + innerFunc->funcid == LASTVALFUNCOID) { + innerFunc->funcresulttype = INT8OID; + *ret = (Node*)innerFunc; + return true; + } + return false; +} + +static Node* large_sequence_modify_node_tree_mutator(Node* node, void* cxt) +{ + /* Traverse through the expression tree and convert nextval() calls with proper coercion */ + if (node == NULL) { + return NULL; + } + if (IsA(node, Query)) { + return (Node*)query_tree_mutator( + (Query*) node, (Node* (*)(Node*, void*))large_sequence_modify_node_tree_mutator, cxt, 0); + } + if (IsA(node, FuncExpr)) { + FuncExpr* func = (FuncExpr*) node; + MutateSeqExprCxt* context = (MutateSeqExprCxt*)cxt; + if (func->funcid == NEXTVALFUNCOID || func->funcid == CURRVALFUNCOID || func->funcid == LASTVALFUNCOID) { + if (func->funcresulttype == context->expextedResType) { + /* Check to allow reentrancy of rollback/upgrade procedure */ + return (Node*)func; + } + return ((MutateSeqExprCxt*)cxt)->worker(func); + } else if (context->expextedResType == INT8OID && func->funcid == 1779) { /* Only for rollback func()::int8 */ + Node* ret = NULL; + if (RevertUpgradedFunc(func, &ret)) { + return ret; + } + } + } + return expression_tree_mutator( + node, (Node* (*)(Node*, void*))large_sequence_modify_node_tree_mutator, cxt); +} + +Datum large_sequence_upgrade_node_tree(PG_FUNCTION_ARGS) +{ + char* res = NULL; + char* orig = text_to_cstring(PG_GETARG_TEXT_P(0)); + Node* expr = (Node*)stringToNode_skip_extern_fields(orig); + MutateSeqExprCxt cxt = {update_seq_expr, NUMERICOID}; + expr = query_or_expression_tree_mutator( + expr, (Node* (*)(Node*, void*))large_sequence_modify_node_tree_mutator, &cxt, 0); + res = nodeToString(expr); + + PG_RETURN_TEXT_P(cstring_to_text(res)); +} + +Datum large_sequence_rollback_node_tree(PG_FUNCTION_ARGS) +{ + char* res = NULL; + char* orig = text_to_cstring(PG_GETARG_TEXT_P(0)); + Node* expr = (Node*)stringToNode_skip_extern_fields(orig); + MutateSeqExprCxt cxt = {rollback_seq_expr, INT8OID}; + expr = query_or_expression_tree_mutator( + expr, (Node* (*)(Node*, void*))large_sequence_modify_node_tree_mutator, &cxt, 0); + res = nodeToString(expr); + + PG_RETURN_TEXT_P(cstring_to_text(res)); +} diff --git a/src/gausskernel/optimizer/commands/subscriptioncmds.cpp b/src/gausskernel/optimizer/commands/subscriptioncmds.cpp index 3697e4dc5..b40214c01 100644 --- a/src/gausskernel/optimizer/commands/subscriptioncmds.cpp +++ b/src/gausskernel/optimizer/commands/subscriptioncmds.cpp @@ -44,6 +44,10 @@ #include "utils/array.h" #include "utils/acl.h" +static void ConnectPublisher(char *conninfo, char* slotname); +static void CreateSlotInPublisher(char *slotname); +static void ValidateReplicationSlot(char *slotname, List *publications); + /* * Common option parsing function for CREATE and ALTER SUBSCRIPTION commands. * @@ -155,6 +159,11 @@ static Datum publicationListToArray(List *publist) MemoryContext memcxt; MemoryContext oldcxt; + if (list_length(publist) <= 0) { + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("LogicDecode[Publication]: null publication name list"))); + } + /* Create memory context for temporary allocations. */ memcxt = AllocSetContextCreate(CurrentMemoryContext, "publicationListToArray to array", ALLOCSET_DEFAULT_SIZES); oldcxt = MemoryContextSwitchTo(memcxt); @@ -188,20 +197,34 @@ static Datum publicationListToArray(List *publist) } /* - * connect publisher and create slot + * connect publisher and create slot. + * the input conninfo should be encrypt, we will decrypt password inside */ -static void ConnectAndCreateSlot(char *conninfo, char *slotname) +static void ConnectPublisher(char *conninfo, char *slotname) { /* Try to connect to the publisher. */ volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; SpinLockAcquire(&walrcv->mutex); walrcv->conn_target = REPCONNTARGET_PUBLICATION; SpinLockRelease(&walrcv->mutex); - bool connectSuccess = (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_connect(conninfo, NULL, slotname, -1); + + char *decryptConninfo = DecryptConninfo(conninfo); + bool connectSuccess = (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_connect(decryptConninfo, NULL, slotname, -1); + int rc = memset_s(decryptConninfo, strlen(decryptConninfo), 0, strlen(decryptConninfo)); + securec_check(rc, "", ""); + pfree_ext(decryptConninfo); + if (!connectSuccess) { ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("could not connect to the publisher"))); } +} +/* + * Create replication slot in publisher side. + * Please make sure you have already connect to publisher before calling this func. + */ +static void CreateSlotInPublisher(char *slotname) +{ LibpqrcvConnectParam options; int rc = memset_s(&options, sizeof(LibpqrcvConnectParam), 0, sizeof(LibpqrcvConnectParam)); securec_check(rc, "", ""); @@ -219,9 +242,35 @@ static void ConnectAndCreateSlot(char *conninfo, char *slotname) PG_RE_THROW(); } PG_END_TRY(); +} - /* And we are done with the remote side. */ - (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_disconnect(); +/* Validate the replication slot by start streaming */ +static void ValidateReplicationSlot(char *slotname, List *publications) +{ + /* + * We just want to validate the replication slot, so the start point is not important. + * so we use InvalidXLogRecPtr as the start point, then the replication slot won't advance. + * and we won't decode any data here. + */ + LibpqrcvConnectParam options; + int rc = memset_s(&options, sizeof(LibpqrcvConnectParam), 0, sizeof(LibpqrcvConnectParam)); + securec_check(rc, "", ""); + options.logical = true; + options.startpoint = InvalidXLogRecPtr; + options.slotname = slotname; + options.protoVersion = 1; + options.publicationNames = publications; + PG_TRY(); + { + (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_startstreaming(&options); + } + PG_CATCH(); + { + /* Close the connection in case of failure. */ + (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_disconnect(); + PG_RE_THROW(); + } + PG_END_TRY(); } /* @@ -312,7 +361,6 @@ ObjectAddress CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel) values[Anum_pg_subscription_subconninfo - 1] = CStringGetTextDatum(encryptConninfo); pfree_ext(conninfoList); - pfree_ext(encryptConninfo); if (enabled) { if (!slotname_given) { slotname = stmt->subname; @@ -348,9 +396,15 @@ ObjectAddress CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel) */ if (enabled) { Assert(slotname); - ConnectAndCreateSlot(conninfo, slotname); + ConnectPublisher(encryptConninfo, slotname); + CreateSlotInPublisher(slotname); + (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_disconnect(); } + + pfree_ext(encryptConninfo); heap_close(rel, RowExclusiveLock); + rc = memset_s(stmt->conninfo, strlen(stmt->conninfo), 0, strlen(stmt->conninfo)); + securec_check(rc, "", ""); /* Don't wake up logical replication launcher unnecessarily */ if (enabled) { @@ -392,6 +446,12 @@ ObjectAddress AlterSubscription(AlterSubscriptionStmt *stmt) List *publications; Subscription *sub; int rc; + bool checkConn = false; + bool validateSlot = false; + bool createSlot = false; + bool needFreeConninfo = false; + char *finalSlotName = NULL; + char *encryptConninfo = NULL; rel = heap_open(SubscriptionRelationId, RowExclusiveLock); @@ -408,6 +468,8 @@ ObjectAddress AlterSubscription(AlterSubscriptionStmt *stmt) subid = HeapTupleGetOid(tup); sub = GetSubscription(subid, false); enabled = sub->enabled; + finalSlotName = sub->name; + encryptConninfo = sub->conninfo; /* Parse options. */ parse_subscription_options(stmt->options, &conninfo, &publications, &enabled_given, &enabled, &slotname_given, @@ -435,28 +497,17 @@ ObjectAddress AlterSubscription(AlterSubscriptionStmt *stmt) const char* sensitiveOptionsArray[] = {"password"}; const int sensitiveArrayLength = lengthof(sensitiveOptionsArray); EncryptGenericOptions(conninfoList, sensitiveOptionsArray, sensitiveArrayLength, SUBSCRIPTION_MODE); - char *encryptConninfo = DefListToString(conninfoList); + encryptConninfo = DefListToString(conninfoList); + needFreeConninfo = true; values[Anum_pg_subscription_subconninfo - 1] = CStringGetTextDatum(encryptConninfo); replaces[Anum_pg_subscription_subconninfo - 1] = true; pfree_ext(conninfoList); - pfree_ext(encryptConninfo); - /* check whether new conninfo can be used to connect to new publisher */ if (sub->enabled || (enabled_given && enabled)) { - /* Try to connect to the publisher. */ - volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; - SpinLockAcquire(&walrcv->mutex); - walrcv->conn_target = REPCONNTARGET_PUBLICATION; - SpinLockRelease(&walrcv->mutex); - bool connectSuccess = (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_connect(conninfo, NULL, sub->slotname, -1); - if (!connectSuccess) { - ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("The new conninfo cannot connect to new publisher."))); - } - - /* And we are done with the remote side. */ - (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_disconnect(); + /* we need to check whether new conninfo can be used to connect to new publisher */ + checkConn = true; } } if (slotname_given) { @@ -469,6 +520,9 @@ ObjectAddress AlterSubscription(AlterSubscriptionStmt *stmt) if (slot_name) { if (sub->enabled || (enabled_given && enabled)) { values[Anum_pg_subscription_subslotname - 1] = DirectFunctionCall1(namein, CStringGetDatum(slot_name)); + /* if old slotname is null or same as new slot name, then we need to validate the new slot name */ + validateSlot = sub->slotname == NULL || strcmp(slot_name, sub->slotname) != 0; + finalSlotName = slot_name; } else { ereport(ERROR, (errmsg("Currently enabled=false, cannot change slot_name to a non-null value."))); } @@ -518,36 +572,37 @@ ObjectAddress AlterSubscription(AlterSubscriptionStmt *stmt) } /* enable subscription */ if (!sub->enabled && enabled) { - /* - * If slot_name is not specified or is empty, the default value is used; - * otherwise, the user-specified slot_name is used. - */ - char *temp_slotname = sub->name; - if (slotname_given && slot_name && *slot_name) { - temp_slotname = slot_name; - } - /* if slot hasn't been created, then create it */ if (!sub->slotname || !*(sub->slotname)) { - if (conninfo) { - ConnectAndCreateSlot(conninfo, temp_slotname); - } else { - /* Sensitive options for subscription, will be encrypted when saved to catalog. */ - const char* sensitiveOptionsArray[] = {"password"}; - const int sensitiveArrayLength = lengthof(sensitiveOptionsArray); - List *defList = ConninfoToDefList(sub->conninfo); - DecryptOptions(defList, sensitiveOptionsArray, sensitiveArrayLength, SUBSCRIPTION_MODE); - char *decryptConnInfo = DefListToString(defList); - - ConnectAndCreateSlot(decryptConnInfo, temp_slotname); - - pfree_ext(defList); - pfree_ext(decryptConnInfo); - } + createSlot = true; } + } + + if (checkConn || createSlot || validateSlot) { + ConnectPublisher(encryptConninfo, finalSlotName); + + if (createSlot) { + CreateSlotInPublisher(finalSlotName); + } + + /* no need to validate replication slot if the slot is created just by ourself */ + if (!createSlot && validateSlot) { + ValidateReplicationSlot(finalSlotName, sub->publications); + } + + (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_disconnect(); ApplyLauncherWakeupAtCommit(); } + if (needFreeConninfo) { + pfree_ext(encryptConninfo); + } + + if (conninfo) { + rc = memset_s(conninfo, strlen(conninfo), 0, strlen(conninfo)); + securec_check(rc, "", ""); + } + return myself; } @@ -698,34 +753,16 @@ void DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel) initStringInfo(&cmd); appendStringInfo(&cmd, "DROP_REPLICATION_SLOT %s", quote_identifier(slotname)); - volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; - SpinLockAcquire(&walrcv->mutex); - walrcv->conn_target = REPCONNTARGET_PUBLICATION; - SpinLockRelease(&walrcv->mutex); - - /* Sensitive options for subscription, will be encrypted when saved to catalog. */ - const char* sensitiveOptionsArray[] = {"password"}; - const int sensitiveArrayLength = lengthof(sensitiveOptionsArray); - List *defList = ConninfoToDefList(conninfo); - DecryptOptions(defList, sensitiveOptionsArray, sensitiveArrayLength, SUBSCRIPTION_MODE); - conninfo = DefListToString(defList); - - bool connectSuccess = (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_connect(conninfo, NULL, subname, -1); - pfree_ext(defList); - pfree_ext(conninfo); - if (!connectSuccess) { - ereport(ERROR, - (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("could not connect to publisher when attempting to drop " - "the replication slot \"%s\". Use ALTER SUBSCRIPTION " - "... SET (slot_name = NONE) to disassociate the " - "subscription from the slot.", - slotname))); - } - + ConnectPublisher(conninfo, slotname); PG_TRY(); { - if (!(WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_command(cmd.data, &err)) { - (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_disconnect(); + int sqlstate = 0; + bool res = WalReceiverFuncTable[GET_FUNC_IDX].walrcv_command(cmd.data, &err, &sqlstate); + if (!res && sqlstate == ERRCODE_UNDEFINED_OBJECT) { + /* drop replication slot failed cause it doesn't exist on publisher, give a warning and continue */ + ereport(WARNING, (errmsg("could not drop the replication slot \"%s\" on publisher", slotname), + errdetail("The error was: %s", err))); + } else if (!res) { ereport(ERROR, (errmsg("could not drop the replication slot \"%s\" on publisher", slotname), errdetail("The error was: %s", err))); } else { diff --git a/src/gausskernel/optimizer/commands/tablecmds.cpp b/src/gausskernel/optimizer/commands/tablecmds.cpp index f34aa7d2f..3f44a4d21 100644 --- a/src/gausskernel/optimizer/commands/tablecmds.cpp +++ b/src/gausskernel/optimizer/commands/tablecmds.cpp @@ -58,12 +58,14 @@ #include "catalog/pg_opclass.h" #include "catalog/pg_partition.h" #include "catalog/pg_partition_fn.h" +#include "catalog/pg_rewrite.h" #include "catalog/pg_hashbucket.h" #include "catalog/pg_hashbucket_fn.h" #include "catalog/pg_tablespace.h" #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" #include "catalog/pg_type_fn.h" +#include "catalog/pg_uid_fn.h" #include "catalog/storage.h" #include "catalog/storage_xlog.h" #include "catalog/toasting.h" @@ -72,6 +74,8 @@ #include "catalog/gs_encrypted_columns.h" #include "catalog/gs_global_config.h" #include "catalog/gs_matview.h" +#include "catalog/gs_db_privilege.h" +#include "catalog/namespace.h" #include "commands/cluster.h" #include "commands/comment.h" #include "commands/dbcommands.h" @@ -84,6 +88,7 @@ #include "commands/trigger.h" #include "commands/typecmds.h" #include "commands/vacuum.h" +#include "commands/verify.h" #include "commands/matview.h" #include "executor/executor.h" #include "executor/node/nodeModifyTable.h" @@ -120,7 +125,6 @@ #include "storage/freespace.h" #include "storage/lmgr.h" #include "storage/lock/lock.h" -#include "storage/page_compression.h" #include "storage/predicate.h" #include "storage/remote_read.h" #include "storage/smgr/segment.h" @@ -525,7 +529,7 @@ static void ATPrepAddOids(List** wqueue, Relation rel, bool recurse, AlterTableC static void ATExecDropNotNull(Relation rel, const char* colName, LOCKMODE lockmode); static void ATExecSetNotNull(AlteredTableInfo* tab, Relation rel, const char* colName, LOCKMODE lockmode); static void ATExecColumnDefault(Relation rel, const char* colName, Node* newDefault, LOCKMODE lockmode); -static void ATPrepSetStatistics(Relation rel, const char* colName, Node* newValue, LOCKMODE lockmode); +static void ATPrepSetStatistics(Relation rel); static void ATExecSetStatistics( Relation rel, const char* colName, Node* newValue, AlterTableStatProperty additional_property, LOCKMODE lockmode); static void ATExecAddStatistics(Relation rel, Node* def, LOCKMODE lockmode); @@ -628,17 +632,27 @@ static void CheckIntervalValue( const Form_pg_attribute* attrs, const List* pos, const IntervalPartitionDefState* intervalPartDef); static void CheckPartitionTablespace(const char* spcname, Oid owner); static Const* GetListPartitionValue(Form_pg_attribute attrs, List* value); -static void CompareListValue(const List* pos, Form_pg_attribute* attrs, PartitionState* partdef); static bool ConfirmTypeInfo(Oid* target_oid, int* target_mod, Const* src, Form_pg_attribute attrs, bool isinterval); static void ATPrepAddPartition(Relation rel); +static void ATPrepAddSubPartition(Relation rel); static void ATPrepDropPartition(Relation rel); +static void ATPrepDropSubPartition(Relation rel); static void ATPrepUnusableIndexPartition(Relation rel); static void ATPrepUnusableAllIndexOnPartition(Relation rel); -static void ATExecAddPartition(Relation rel, AddPartitionState* partState); -static void ATExecAddRangePartition(Relation rel, AddPartitionState* partState); -static void ATExecAddListPartition(Relation rel, AddPartitionState* partState); -static void ATExecDropPartition(Relation rel, AlterTableCmd* cmd); +static void ATExecAddPartition(Relation rel, AddPartitionState *partState); +static void ATExecAddRangePartition(Relation rel, AddPartitionState *partState); +static void ATExecAddListPartition(Relation rel, AddPartitionState *partState); +static void ATExecAddSubPartition(Relation rel, AddSubPartitionState *subpartState); +static void CheckForAddPartition(Relation rel, List *partDefStateList); +static void CheckTablespaceForAddPartition(Relation rel, List *partDefStateList); +static void CheckPartitionNameConflictForAddPartition(List *newPartitionNameList, List *existingPartitionNameList); +static void CheckPartitionValueConflictForAddPartition(Relation rel, Node *partDefState); +static void CheckSubpartitionForAddPartition(Relation rel, Node *partDefState); +static void ATExecDropPartition(Relation rel, AlterTableCmd *cmd); +static void ATExecDropSubPartition(Relation rel, AlterTableCmd *cmd); +static Oid GetPartOidByATcmd(Relation rel, AlterTableCmd *cmd, const char *command); +static Oid GetSubpartOidByATcmd(Relation rel, AlterTableCmd *cmd, Oid *partOid, const char *command); static void ATExecUnusableIndexPartition(Relation rel, const char* partition_name); static void ATExecUnusableIndex(Relation rel); @@ -647,6 +661,7 @@ static void ATExecUnusableAllIndexOnPartition(Relation rel, const char* partitio static void ATExecModifyRowMovement(Relation rel, bool rowMovement); static void ATExecTruncatePartition(Relation rel, AlterTableCmd* cmd); static void ATExecTruncateSubPartition(Relation rel, AlterTableCmd* cmd); +static void checkStorageTypeForExchange(Relation partTableRel, Relation ordTableRel); static void checkColStoreForExchange(Relation partTableRel, Relation ordTableRel); static void ATExecExchangePartition(Relation partTableRel, AlterTableCmd* cmd); static void UpdatePrevIntervalPartToRange( @@ -693,6 +708,8 @@ static void AlterPartitionedSetWaitCleanGPI(bool alterGPI, Relation partTableRel static void AlterSubPartitionedSetWaitCleanGPI(bool alterGPI, Relation partTableRel, Oid partOid, Oid subPartOid); static Oid AddTemporaryRangePartitionForAlterPartitions(const AlterTableCmd* cmd, Relation partTableRel, int sequence, bool* renameTargetPart); +static Oid AddTemporaryPartitionForAlterPartitions(const AlterTableCmd* cmd, Relation rel, + Oid srcPartOid, bool* renameTargetPart); static void ExchangePartitionWithGPI(const AlterTableCmd* cmd, Relation partTableRel, Oid srcPartOid, TransactionId frozenXid, MultiXactId multiXid); static void fastAddPartition(Relation partTableRel, List* destPartDefList, List** newPartOidList); @@ -736,6 +753,7 @@ static void ATCheckDuplicateColumn(const AlterTableCmd* cmd, const List* tabCmds static void ATCheckNotNullConstr(const AlterTableCmd* cmd, const AlteredTableInfo* tab); static void DelDependencONDataType(Relation rel, Relation depRel, const Form_pg_attribute attTup); static void ATExecEncryptionKeyRotation(Relation rel, LOCKMODE lockmode); +static bool IsViewAndRuleDependReltion(Oid relId); inline static bool CStoreSupportATCmd(AlterTableType cmdtype) { @@ -1072,10 +1090,10 @@ static bool isOrientationSet(List* options, bool* isCUFormat, bool isDfsTbl) * @Param [IN] relkind: table's kind(ordinary table or other database object). * @return: option with defalut options. */ -static List* AddDefaultOptionsIfNeed(List* options, const char relkind, CreateStmt* stmt, Oid relnamespace) +static List* AddDefaultOptionsIfNeed(List* options, const char relkind, int8 relcmprs, Oid relnamespace) { List* res = options; - int8 relcmprs = stmt->row_compress; + ListCell* cell = NULL; bool isCStore = false; bool isTsStore = false; @@ -1084,7 +1102,6 @@ static List* AddDefaultOptionsIfNeed(List* options, const char relkind, CreateSt bool isUstore = false; bool assignedStorageType = false; - TableCreateSupport tableCreateSupport{false,false,false,false,false,false}; (void)isOrientationSet(options, NULL, false); foreach (cell, options) { DefElem* def = (DefElem*)lfirst(cell); @@ -1114,8 +1131,6 @@ static List* AddDefaultOptionsIfNeed(List* options, const char relkind, CreateSt ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), errmsg("It is not allowed to assign version option for non-dfs table."))); - } else { - SetOneOfCompressOption(def->defname, &tableCreateSupport); } if (pg_strcasecmp(def->defname, "orientation") == 0 && pg_strcasecmp(defGetString(def), ORIENTATION_ORC) == 0) { @@ -1141,15 +1156,6 @@ static List* AddDefaultOptionsIfNeed(List* options, const char relkind, CreateSt res = lappend(options, def); } - bool noSupportTable = isCStore || isTsStore || relkind != RELKIND_RELATION || - stmt->relation->relpersistence == RELPERSISTENCE_UNLOGGED || - stmt->relation->relpersistence == RELPERSISTENCE_TEMP || - stmt->relation->relpersistence == RELPERSISTENCE_GLOBAL_TEMP; - if (noSupportTable && tableCreateSupport.compressType) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), errmsg("only row orientation table support compresstype."))); - } - CheckCompressOption(&tableCreateSupport); - if (isUstore && !isCStore && !hasCompression) { DefElem* def = makeDefElem("compression", (Node *)makeString(COMPRESSION_NO)); res = lappend(options, def); @@ -1171,8 +1177,8 @@ static List* AddDefaultOptionsIfNeed(List* options, const char relkind, CreateSt DefElem* def1 = makeDefElem("orientation", (Node*)makeString(ORIENTATION_ROW)); DefElem* def2 = makeDefElem("compression", (Node*)rowCmprOpt); res = list_make2(def1, def2); - if (u_sess->attr.attr_sql.enable_default_ustore_table && !IsSystemNamespace(relnamespace) && - !assignedStorageType) { + if (g_instance.attr.attr_storage.enable_ustore && u_sess->attr.attr_sql.enable_default_ustore_table && + !IsSystemNamespace(relnamespace) && !assignedStorageType) { DefElem* def3 = makeDefElem("storage_type", (Node*)makeString(TABLE_ACCESS_METHOD_USTORE)); res = lappend(res, def3); } @@ -1185,12 +1191,12 @@ static List* AddDefaultOptionsIfNeed(List* options, const char relkind, CreateSt DefElem *def1 = makeDefElem("orientation", (Node *)makeString(ORIENTATION_ROW)); res = lcons(def1, options); } - if (!hasCompression && !tableCreateSupport.compressType) { + if (!hasCompression) { DefElem *def2 = makeDefElem("compression", (Node *)rowCmprOpt); res = lappend(options, def2); } - if (u_sess->attr.attr_sql.enable_default_ustore_table && !IsSystemNamespace(relnamespace) && - !assignedStorageType) { + if (g_instance.attr.attr_storage.enable_ustore && u_sess->attr.attr_sql.enable_default_ustore_table && + !IsSystemNamespace(relnamespace) && !assignedStorageType) { DefElem *def2 = makeDefElem("storage_type", (Node *)makeString(TABLE_ACCESS_METHOD_USTORE)); res = lappend(options, def2); } @@ -1830,7 +1836,7 @@ static void CheckPartitionKeyForCreateTable(PartitionState *partTableState, List partTableState->partitionStrategy != PART_STRATEGY_LIST) ComparePartitionValue(pos, descriptor->attrs, partTableState->partitionList); else if (partTableState->partitionStrategy == PART_STRATEGY_LIST) - CompareListValue(pos, descriptor->attrs, partTableState); + CompareListValue(pos, descriptor->attrs, partTableState->partitionList); list_free_ext(pos); } @@ -1922,6 +1928,7 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS) bool isInLedgerNsp = false; HashBucketInfo* bucketinfo = NULL; DistributionType distType; + bool relhasuids = false; /* * isalter is true, change the owner of the objects as the owner of the @@ -1940,7 +1947,14 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS) */ rc = strncpy_s(relname, NAMEDATALEN, stmt->relation->relname, NAMEDATALEN - 1); securec_check(rc, "", ""); - + + if (ISMATMAP(relname) || ISMLOG(relname)) { + ereport(WARNING, (errcode(ERRCODE_INVALID_NAME), + errmsg("\"%s\" is not an appropriated name for relation", relname), + errdetail("The kernel may treat it as a %s table of materialized view", + ISMATMAP(relname) ? "map" : "mlog"))); + } + if (stmt->relation->relpersistence == RELPERSISTENCE_UNLOGGED && STMT_RETRY_ENABLED) stmt->relation->relpersistence = RELPERSISTENCE_PERMANENT; @@ -2004,7 +2018,7 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS) * drop, and mark stmt->relation as RELPERSISTENCE_TEMP if a temporary * namespace is selected. */ - namespaceId = RangeVarGetAndCheckCreationNamespace(stmt->relation, NoLock, NULL); + namespaceId = RangeVarGetAndCheckCreationNamespace(stmt->relation, NoLock, NULL, relkind); if (u_sess->attr.attr_sql.enforce_a_behavior) { /* Identify user ID that will own the table @@ -2016,8 +2030,10 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS) */ if (!OidIsValid(ownerId) && (relkind == RELKIND_RELATION || RELKIND_IS_SEQUENCE(relkind) || relkind == RELKIND_VIEW || relkind == RELKIND_COMPOSITE_TYPE - || relkind == RELKIND_CONTQUERY)) - ownerId = GetUserIdFromNspId(namespaceId); + || relkind == RELKIND_CONTQUERY)) { + bool anyResult = CheckRelationCreateAnyPrivilege(GetUserId(), relkind); + ownerId = GetUserIdFromNspId(namespaceId, false, anyResult); + } if (!OidIsValid(ownerId)) ownerId = GetUserId(); @@ -2029,7 +2045,11 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS) AclResult aclresult; aclresult = pg_namespace_aclcheck(namespaceId, ownerId, ACL_CREATE); - if (aclresult != ACLCHECK_OK) + bool anyResult = false; + if (aclresult != ACLCHECK_OK && !IsSysSchema(namespaceId)) { + anyResult = CheckRelationCreateAnyPrivilege(ownerId, relkind); + } + if (aclresult != ACLCHECK_OK && !anyResult) aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(namespaceId)); } } @@ -2104,7 +2124,7 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS) /* Add default options for relation if need. */ if (!dfsTablespace) { if (!u_sess->attr.attr_common.IsInplaceUpgrade) { - stmt->options = AddDefaultOptionsIfNeed(stmt->options, relkind, stmt, namespaceId); + stmt->options = AddDefaultOptionsIfNeed(stmt->options, relkind, stmt->row_compress, namespaceId); } } else { checkObjectCreatedinHDFSTblspc(stmt, relkind); @@ -2164,6 +2184,11 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS) reloptions = transformRelOptions((Datum)0, stmt->options, NULL, validnsps, true, false); StdRdOptions* std_opt = (StdRdOptions*)heap_reloptions(relkind, reloptions, true); + relhasuids = StdRdOptionsHasUids(std_opt, relkind); + if (relhasuids && t_thrd.proc->workingVersionNum < HASUID_VERSION_NUM) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("hasuids is not supported in current version!"))); + } if (std_opt != NULL) { RowTblCheckHashBucketOption(stmt->options, std_opt); if ((std_opt->segment)) { @@ -2230,9 +2255,9 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS) ) { ereport(ERROR, (errmsg("Ustore table creation is not supported."))); } - if (g_instance.attr.attr_storage.undo_zone_count == 0) { - ereport(ERROR, (errmsg("Ustore table creation is not supported due to undo zone count is 0. Set it to" - "non-zero value to enable ustore."))); + if (!g_instance.attr.attr_storage.enable_ustore) { + ereport(ERROR, (errmsg("Ustore is disabled, please set enable_ustore=on."))); + } orientedFrom = (Node *)makeString(TABLE_ACCESS_METHOD_USTORE); storeChar = TABLE_ACCESS_METHOD_USTORE; @@ -2339,13 +2364,10 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS) ereport(LOG, (errmodule(MOD_TIMESERIES), errmsg("use implicit distribution column method."))); } } else if (pg_strcasecmp(storeChar, TABLE_ACCESS_METHOD_USTORE) == 0) { - auto compression = StdRdOptionsGetStringData(std_opt, compression, COMPRESSION_NO); - auto orientation = StdRdOptionsGetStringData(std_opt, orientation, ORIENTATION_ROW); - if ((pg_strcasecmp(COMPRESSION_NO, compression) != 0 && - pg_strcasecmp(ORIENTATION_COLUMN, orientation) == 0) || + if (pg_strcasecmp(COMPRESSION_NO, StdRdOptionsGetStringData(std_opt, compression, COMPRESSION_NO)) != 0 || IsCompressedByCmprsInPgclass((RelCompressType)stmt->row_compress)) { ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("UStore tables do not support compression."))); + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("UStore tables do not support compression."))); } ForbidToSetOptionsForRowTbl(stmt->options); ForbidToSetOptionsForUstoreTbl(stmt->options); @@ -2386,7 +2408,10 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS) } if (pg_strcasecmp(storeChar, ORIENTATION_ROW) == 0) { - RowTblCheckCompressionOption(stmt->options); + RowTblCheckCompressionOption(stmt->options, stmt->row_compress); + } else if (relhasuids == true) { + ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), + errmsg("hasuids is only supported for row table"))); } if (stmt->ofTypename) { @@ -2460,14 +2485,6 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS) } stmt->partTableState->subPartitionState->partitionList = NIL; - if (storage_type == SEGMENT_PAGE) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("The subpartition table do not support segment-page storage."), - errcause("The function is not implemented."), - erraction("Do not set up segment-page storage.")))); - } - if (hashbucket) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), (errmsg("Un-support feature"), @@ -2475,25 +2492,18 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS) errcause("The function is not implemented."), erraction("Do not set up hashbucket.")))); } - - if (pg_strcasecmp(storeChar, TABLE_ACCESS_METHOD_USTORE) == 0) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("The subpartition table does not support ustore."), - errcause("The function is not implemented."), - erraction("Do not set up ustore.")))); - } } } localHasOids = interpretOidsOption(stmt->options); descriptor->tdhasoid = (localHasOids || parentOidCount > 0); - if ((pg_strcasecmp(storeChar, ORIENTATION_COLUMN) == 0 || pg_strcasecmp(storeChar, ORIENTATION_TIMESERIES) == 0) && - localHasOids) { + if ((pg_strcasecmp(storeChar, ORIENTATION_COLUMN) == 0 || + pg_strcasecmp(storeChar, ORIENTATION_TIMESERIES) == 0 || + relhasuids) && localHasOids) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Local OID column not supported in column/timeseries store tables."))); + errmsg("Local OID column not supported in column/timeseries/hasuids store tables."))); } bool is_gc_fdw = false; @@ -2722,6 +2732,10 @@ Oid DefineRelation(CreateStmt* stmt, char relkind, Oid ownerId, bool isCTAS) /* Store inheritance information for new rel. */ StoreCatalogInheritance(relationId, inheritOids); + if (relhasuids) { + InsertUidEntry(relationId); + } + /* * We must bump the command counter to make the newly-created relation * tuple visible for opening. @@ -3090,7 +3104,8 @@ ObjectAddresses* PreCheckforRemoveRelation(DropStmt* drop, StringInfo tmp_queryS if (!OidIsValid(relOid)) { bool missing_ok = drop->missing_ok; - if (!u_sess->attr.attr_common.xc_maintenance_mode) { + /* for the inconsistent index on nodes cause by creating index concurrently, missing is ok on local node */ + if (!u_sess->attr.attr_common.xc_maintenance_mode && relkind != RELKIND_INDEX) { if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) cn_miss_relation = true; else @@ -3113,7 +3128,7 @@ ObjectAddresses* PreCheckforRemoveRelation(DropStmt* drop, StringInfo tmp_queryS */ if (delrel != NULL && !u_sess->attr.attr_sql.enable_cluster_resize && (RelationInClusterResizingReadOnly(delrel) || - (RelationInClusterResizing(delrel) && drop->removeType != OBJECT_TABLE))) { + (RelationInClusterResizing(delrel) && drop->removeType != OBJECT_TABLE))) { ereport(ERROR, (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION), errmsg("%s is redistributing, please retry later.", delrel->rd_rel->relname.data))); @@ -3391,7 +3406,8 @@ void RemoveRelations(DropStmt* drop, StringInfo tmp_queryString, RemoteQueryExec if (!OidIsValid(relOid)) { bool missing_ok = drop->missing_ok; - if (!u_sess->attr.attr_common.xc_maintenance_mode && !IS_SINGLE_NODE) { + /* for the inconsistent index on nodes cause by creating index concurrently, missing is ok on local node */ + if (!u_sess->attr.attr_common.xc_maintenance_mode && !IS_SINGLE_NODE && relkind != RELKIND_INDEX) { if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) cn_miss_relation = true; else if (!ENABLE_ROUTER_DN) // in router, drop if exists should enable on dn. @@ -3409,6 +3425,9 @@ void RemoveRelations(DropStmt* drop, StringInfo tmp_queryString, RemoteQueryExec RemoveJobsWhenRemoveRelation(relOid); } #endif /* ENABLE_MULTIPLE_NODES */ + if (relkind == RELKIND_RELATION) { + DeleteUidEntry(relOid); + } delrel = try_relation_open(relOid, NoLock); /* @@ -3419,7 +3438,7 @@ void RemoveRelations(DropStmt* drop, StringInfo tmp_queryString, RemoteQueryExec */ if (delrel != NULL && !u_sess->attr.attr_sql.enable_cluster_resize && (RelationInClusterResizingReadOnly(delrel) || - (RelationInClusterResizing(delrel) && drop->removeType != OBJECT_TABLE))) { + (RelationInClusterResizing(delrel) && drop->removeType != OBJECT_TABLE))) { ereport(ERROR, (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION), errmsg("%s is redistributing, please retry later.", delrel->rd_rel->relname.data))); @@ -3476,7 +3495,7 @@ void RemoveRelations(DropStmt* drop, StringInfo tmp_queryString, RemoteQueryExec if (TrCheckRecyclebinDrop(drop, objects)) { /* Here we use Recyclebin-based-Drop. */ - TrDrop(objects, drop->behavior); + TrDrop(drop, objects, drop->behavior); } else { /* Here we really delete them. */ performMultipleDeletions(objects, drop->behavior, flags); @@ -6841,6 +6860,10 @@ void AlterTable(Oid relid, LOCKMODE lockmode, AlterTableStmt* stmt) errmsg("ALTER MATERIALIZED VIEW is not yet supported."))); } + if (RelationIsSubPartitioned(rel) && cmd->subtype == AT_ClusterOn) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot cluster a subpartition table"))); + } + if (RelationIsCUFormat(rel) && !CStoreSupportATCmd(cmd->subtype)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -6949,7 +6972,9 @@ static LOCKMODE GetPartitionLockLevel(AlterTableType subType) LOCKMODE cmdLockMode; switch (subType) { case AT_AddPartition: + case AT_AddSubPartition: case AT_DropPartition: + case AT_DropSubPartition: case AT_ExchangePartition: case AT_TruncatePartition: cmdLockMode = RowExclusiveLock; @@ -7009,197 +7034,6 @@ LOCKMODE AlterTableGetLockLevel(List* cmds) * 3. Catcache access isn't coordinated at all so refreshes can occur at * any time. */ -#ifdef REDUCED_ALTER_TABLE_LOCK_LEVELS - ListCell* lcmd = NULL; - LOCKMODE lockmode = ShareUpdateExclusiveLock; - - foreach (lcmd, cmds) { - AlterTableCmd* cmd = (AlterTableCmd*)lfirst(lcmd); - LOCKMODE cmd_lockmode = AccessExclusiveLock; /* default for compiler */ - - switch (cmd->subtype) { - case AT_UnusableIndex: - cmd_lockmode = AccessExclusiveLock; - break; - case AT_AddPartition: - case AT_DropPartition: - case AT_ExchangePartition: - case AT_MergePartition: - case AT_UnusableIndexPartition: - case AT_UnusableAllIndexOnPartition: - case AT_SplitPartition: - cmd_lockmode = AccessExclusiveLock; - break; - case AT_TruncatePartition: - cmd_lockmode = AccessExclusiveLock; - break; - /* - * Need AccessExclusiveLock for these subcommands because they - * affect or potentially affect both read and write - * operations. - * - * New subcommand types should be added here by default. - */ - case AT_AddColumn: /* may rewrite heap, in some cases and visible - * to SELECT */ - case AT_DropColumn: /* change visible to SELECT */ - case AT_AddColumnToView: /* CREATE VIEW */ - case AT_AlterColumnType: /* must rewrite heap */ - case AT_DropConstraint: /* as DROP INDEX */ - case AT_AddOids: /* must rewrite heap */ - case AT_DropOids: /* calls AT_DropColumn */ - case AT_EnableAlwaysRule: /* may change SELECT rules */ - case AT_EnableReplicaRule: /* may change SELECT rules */ - case AT_EnableRule: /* may change SELECT rules */ - case AT_DisableRule: /* may change SELECT rules */ - case AT_EnableRls: /* may change SELECT|UPDATE|DELETE policies */ - case AT_DisableRls: /* may change SELECT|UPDATE|DELETE policies */ - case AT_ForceRls: /* may change SELECT|UPDATE|DELETE policies */ - case AT_NoForceRls: /* may change SELECT|UPDATE|DELETE policies */ - case AT_EncryptionKeyRotation: /* TDE Encryption Key Rotation */ - case AT_ChangeOwner: /* change visible to SELECT */ - case AT_SetTableSpace: /* must rewrite heap */ - case AT_DropNotNull: /* may change some SQL plans */ - case AT_SetNotNull: - case AT_GenericOptions: - case AT_SET_COMPRESS: - case AT_AlterColumnGenericOptions: - case AT_EnableRowMoveMent: - case AT_DisableRowMoveMent: - cmd_lockmode = AccessExclusiveLock; - break; - - case AT_SetPartitionTableSpace: - /* partitioned table lock: AccessShareLock - * partition lock: AccessExclusiveLock - */ - cmd_lockmode = AccessExclusiveLock; - break; - -#ifdef PGXC - case AT_DistributeBy: /* Changes table distribution type */ - case AT_SubCluster: /* Changes node list of distribution */ - case AT_AddNodeList: /* Adds nodes in distribution */ - case AT_DeleteNodeList: /* Deletes nodes in distribution */ - case AT_UpdateSliceLike: /* Update slice like tmptable in distribution */ - cmd_lockmode = ExclusiveLock; - break; -#endif - - /* - * These subcommands affect write operations only. - */ - case AT_ColumnDefault: - case AT_ProcessedConstraint: /* becomes AT_AddConstraint */ - case AT_AddConstraintRecurse: /* becomes AT_AddConstraint */ - case AT_ReAddConstraint: /* becomes AT_AddConstraint */ - case AT_EnableTrig: - case AT_EnableAlwaysTrig: - case AT_EnableReplicaTrig: - case AT_EnableTrigAll: - case AT_EnableTrigUser: - case AT_DisableTrig: - case AT_DisableTrigAll: - case AT_DisableTrigUser: - case AT_AddIndex: /* from ADD CONSTRAINT */ - case AT_AddIndexConstraint: - case AT_ReplicaIdentity: - cmd_lockmode = ShareRowExclusiveLock; - break; - - case AT_AddConstraint: - if (IsA(cmd->def, Constraint)) { - Constraint* con = (Constraint*)cmd->def; - - switch (con->contype) { - case CONSTR_EXCLUSION: - case CONSTR_PRIMARY: - case CONSTR_UNIQUE: - - /* - * Cases essentially the same as CREATE INDEX. We - * could reduce the lock strength to ShareLock if - * we can work out how to allow concurrent catalog - * updates. - */ - cmd_lockmode = ShareRowExclusiveLock; - break; - case CONSTR_FOREIGN: - - /* - * We add triggers to both tables when we add a - * Foreign Key, so the lock level must be at least - * as strong as CREATE TRIGGER. - */ - cmd_lockmode = ShareRowExclusiveLock; - break; - - default: - cmd_lockmode = ShareRowExclusiveLock; - } - } - break; - - /* - * These subcommands affect inheritance behaviour. Queries - * started before us will continue to see the old inheritance - * behaviour, while queries started after we commit will see - * new behaviour. No need to prevent reads or writes to the - * subtable while we hook it up though. - */ - case AT_AddInherit: - case AT_DropInherit: - cmd_lockmode = ShareUpdateExclusiveLock; - break; - - /* - * These subcommands affect implicit row type conversion. They - * have affects similar to CREATE/DROP CAST on queries. We - * don't provide for invalidating parse trees as a result of - * such changes. Do avoid concurrent pg_class updates, - * though. - */ - case AT_AddOf: - case AT_DropOf: - cmd_lockmode = ShareUpdateExclusiveLock; - - /* - * These subcommands affect general strategies for performance - * and maintenance, though don't change the semantic results - * from normal data reads and writes. Delaying an ALTER TABLE - * behind currently active writes only delays the point where - * the new strategy begins to take effect, so there is no - * benefit in waiting. In this case the minimum restriction - * applies: we don't currently allow concurrent catalog - * updates. - */ - case AT_SetStatistics: - case AT_ClusterOn: - case AT_DropCluster: - case AT_SetRelOptions: - case AT_ResetRelOptions: - case AT_ReplaceRelOptions: - case AT_SetOptions: - case AT_ResetOptions: - case AT_SetStorage: - case AT_ValidateConstraint: - cmd_lockmode = ShareUpdateExclusiveLock; - break; - - default: /* oops */ - { - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized alter table type: %d", (int)cmd->subtype))); - } break; - } - - /* - * Take the greatest lockmode from any subcommand - */ - lockmode = set_lockmode(lockmode, cmd_lockmode); - } -#else ListCell* lcmd = NULL; /* default lock mode of DDL is the highest mode 8, even if commands list is empty */ @@ -7208,19 +7042,17 @@ LOCKMODE AlterTableGetLockLevel(List* cmds) if (cmds && list_length(cmds) > 0) { /* clear the default lock mode, so it's safe to compare with other lock modes. */ lockmode = NoLock; - foreach (lcmd, cmds) { - LOCKMODE cmd_lockmode = AccessExclusiveLock; - -#ifndef ENABLE_MULTIPLE_NODES AlterTableCmd* cmd = (AlterTableCmd*)lfirst(lcmd); + LOCKMODE cmd_lockmode = u_sess->attr.attr_sql.enable_cluster_resize && cmd->subtype == AT_SetRelOptions ? + ExclusiveLock : AccessExclusiveLock; +#ifndef ENABLE_MULTIPLE_NODES cmd_lockmode = GetPartitionLockLevel(cmd->subtype); #endif /* update with the higher lock mode */ lockmode = set_lockmode(lockmode, cmd_lockmode); } } -#endif return lockmode; } @@ -7315,6 +7147,39 @@ static void ATController(Relation rel, List* cmds, bool recurse, LOCKMODE lockmo ATRewriteTables(&wqueue, lockmode); } +static void CheckIsViewAndRuleForAlterPartition(AlterTableCmd *cmd, Relation rel) +{ + bool result = false; + bool hasView = IsViewAndRuleDependReltion(RelationGetRelid(rel)); + switch (cmd->subtype) { + case AT_ExchangePartition: + if (cmd->alterGPI && hasView) { + result = true; + } + break; + case AT_DropPartition: + case AT_DropSubPartition: + case AT_MergePartition: + case AT_SplitPartition: + case AT_SplitSubPartition: + if (hasView) { + result = true; + } + break; + default: /* other option will pass */ + break; + } + if (result) { + ereport(ERROR, + (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST), + (errmsg("Cannot perform this operation because There are views or rules that depend on table %s.", + RelationGetRelationName(rel)), + errdetail("N/A"), + errhint("drop the views or rules first. Use pg_rules to find rules. Use pg_class, pg_rewrite, " + "pg_depend, pg_namespacesql to find views")))); + } +} + /* * ATPrepCmd * @@ -7340,6 +7205,8 @@ static void ATPrepCmd(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recu * numbers in different children). */ cmd = (AlterTableCmd*)copyObject(cmd); + + CheckIsViewAndRuleForAlterPartition(cmd, rel); /* * Do permissions checking, recursion to child tables if needed, and any * additional phase-1 processing needed. @@ -7357,6 +7224,12 @@ static void ATPrepCmd(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recu /* Recursion occurs during execution phase */ pass = AT_PASS_ADD_PARTITION; break; + case AT_AddSubPartition: /* ADD SUBPARTITION */ + ATSimplePermissions(rel, ATT_TABLE); + ATPrepAddSubPartition(rel); + /* ADD SUBPARTITION obeys the same recursion order with ADD PARTITION */ + pass = AT_PASS_ADD_PARTITION; + break; case AT_AddColumnToView: /* add column via CREATE OR REPLACE * VIEW */ ATSimplePermissions(rel, ATT_VIEW); @@ -7395,7 +7268,7 @@ static void ATPrepCmd(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recu case AT_SetStatistics: /* ALTER COLUMN SET STATISTICS */ ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode); /* Performs own permission checks */ - ATPrepSetStatistics(rel, cmd->name, cmd->def, lockmode); + ATPrepSetStatistics(rel); pass = AT_PASS_MISC; break; case AT_AddStatistics: /* ADD STATISTICS */ @@ -7403,7 +7276,7 @@ static void ATPrepCmd(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recu ATSimplePermissions(rel, ATT_TABLE | ATT_FOREIGN_TABLE); ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode); /* Performs own permission checks */ - ATPrepSetStatistics(rel, cmd->name, cmd->def, lockmode); + ATPrepSetStatistics(rel); es_check_alter_table_statistics(rel, cmd); pass = AT_PASS_MISC; break; @@ -7433,6 +7306,12 @@ static void ATPrepCmd(List** wqueue, Relation rel, AlterTableCmd* cmd, bool recu /* Recursion occurs during execution phase */ pass = AT_PASS_DROP; break; + case AT_DropSubPartition: /* DROP SUBPARTITION */ + ATSimplePermissions(rel, ATT_TABLE); + ATPrepDropSubPartition(rel); + /* Recursion occurs during execution phase */ + pass = AT_PASS_DROP; + break; case AT_UnusableIndexPartition: /* UNUSEABLE INDEX PARTITION */ ATSimplePermissions(rel, ATT_INDEX); ATPrepUnusableIndexPartition(rel); @@ -7691,6 +7570,7 @@ static bool ATCheckLedgerTableCmd(Relation rel, AlterTableCmd* cmd) case AT_AlterColumnType: /* ALTER COLUMN TYPE */ case AT_ExchangePartition: /* EXCHANGE PARTITION */ case AT_DropPartition: /* DROP PARTITION */ + case AT_DropSubPartition: /* DROP PARTITION */ case AT_TruncatePartition: /* TRUNCATE PARTITION */ case AT_TruncateSubPartition: /* TRUNCATE PARTITION */ /* Blockchain related tables can't ALTER */ @@ -7788,8 +7668,9 @@ static void ATRewriteCatalogs(List** wqueue, LOCKMODE lockmode) if (get_rel_persistence(tab->relid) == RELPERSISTENCE_GLOBAL_TEMP) { gtt_create_storage_files(tab->relid); } - - if (tab->relkind == RELKIND_RELATION || tab->relkind == RELKIND_MATVIEW) { + /* u_sess->attr.attr_sql.enable_cluster_resize = true, alter operation don't handle toast */ + if ((tab->relkind == RELKIND_RELATION || tab->relkind == RELKIND_MATVIEW) && + !u_sess->attr.attr_sql.enable_cluster_resize) { Relation rel = relation_open(tab->relid, NoLock); Datum toast_reloptions = (Datum)0; @@ -7824,6 +7705,9 @@ static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterT case AT_AddPartition: /* add partition */ ATExecAddPartition(rel, (AddPartitionState*)cmd->def); break; + case AT_AddSubPartition: /* add subpartition */ + ATExecAddSubPartition(rel, (AddSubPartitionState*)cmd->def); + break; case AT_ColumnDefault: /* ALTER COLUMN DEFAULT */ ATExecColumnDefault(rel, cmd->name, cmd->def, lockmode); break; @@ -7860,6 +7744,9 @@ static void ATExecCmd(List** wqueue, AlteredTableInfo* tab, Relation rel, AlterT case AT_DropPartition: /* drop partition */ ATExecDropPartition(rel, cmd); break; + case AT_DropSubPartition: /* drop subpartition */ + ATExecDropSubPartition(rel, cmd); + break; case AT_UnusableIndexPartition: /* unusable index partition */ ATExecUnusableIndexPartition(rel, cmd->name); break; @@ -9249,7 +9136,7 @@ static void ATPrepAddColumn( ReleaseSysCache(typeTuple); // check the supported data type and error report if needed. - if (RelationIsCUFormat(rel) && !IsTypeSupportedByCStore(typeOid, typmod)) { + if (RelationIsCUFormat(rel) && !IsTypeSupportedByCStore(typeOid)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("type \"%s\" is not supported in column store", format_type_with_typemod(typeOid, typmod)))); @@ -10112,7 +9999,7 @@ static void ATExecColumnDefault(Relation rel, const char* colName, Node* newDefa /* * ALTER TABLE ALTER COLUMN SET STATISTICS */ -static void ATPrepSetStatistics(Relation rel, const char* colName, Node* newValue, LOCKMODE lockmode) +static void ATPrepSetStatistics(Relation rel) { /* * We do our own permission checking because (a) we want to allow SET @@ -10934,6 +10821,17 @@ static void ATExecAddConstraint(List** wqueue, AlteredTableInfo* tab, Relation r ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("column store unsupport constraint \"%s\"", GetConstraintType(newConstraint->contype)))); + + if (rel->rd_tam_type == TAM_USTORE && newConstraint->deferrable == true) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmodule(MOD_COMMAND), + errmsg("Ustore table does not support to set deferrable."), + errdetail("N/A"), + errcause("feature not supported"), + erraction("check constraints of columns"))); + } + /* * Currently, we only expect to see CONSTR_CHECK and CONSTR_FOREIGN nodes * arriving here (see the preprocessing done in parse_utilcmd.c). Use a @@ -11688,6 +11586,36 @@ static void ATExecValidateConstraint(Relation rel, char* constrName, bool recurs } else { validateCheckConstraint(rel, tuple); } + } else if (RelationIsSubPartitioned(rel)) { + List* partitions = NIL; + ListCell* cell = NULL; + Partition partition = NULL; + Relation partRel = NULL; + + partitions = relationGetPartitionList(rel, lockmode); + foreach (cell, partitions) { + partition = (Partition)lfirst(cell); + partRel = partitionGetRelation(rel, partition); + + List *subpartitions = relationGetPartitionList(partRel, lockmode); + ListCell *subcell = NULL; + foreach (subcell, subpartitions) { + Partition subpartition = (Partition)lfirst(subcell); + if (RELATION_OWN_BUCKETKEY(rel)) { + /* validate constraint for every buckets */ + validateCheckConstraintForBucket(partRel, subpartition, tuple); + } else { + Relation subpartRel = partitionGetRelation(partRel, subpartition); + + validateCheckConstraint(subpartRel, tuple); + + releaseDummyRelation(&subpartRel); + } + } + releasePartitionList(partRel, &subpartitions, lockmode); + releaseDummyRelation(&partRel); + } + releasePartitionList(rel, &partitions, lockmode); } else { List* partitions = NIL; ListCell* cell = NULL; @@ -12569,6 +12497,46 @@ static void ATExecDropConstraint(Relation rel, const char* constrName, DropBehav heap_close(conrel, RowExclusiveLock); } +static void CheckHugeToastInternal(TupleDesc reldesc, Relation rel, AttrNumber attnum) +{ + HeapTuple tuple; + Datum values[reldesc->natts]; + bool isnull[reldesc->natts]; + TableScanDesc scan = tableam_scan_begin(rel, SnapshotNow, 0, NULL); + while ((tuple = (HeapTuple)tableam_scan_getnexttuple(scan, ForwardScanDirection)) != NULL) { + tableam_tops_deform_tuple(tuple, reldesc, values, isnull); + if (!isnull[attnum - 1] && VARATT_IS_HUGE_TOAST_POINTER(DatumGetPointer(values[attnum - 1]))) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Un-support alter clob/blob to text type when more than 1GB"))); + } + } + tableam_scan_end(scan); +} + +void CheckHugeToast(AlteredTableInfo *tab, Relation rel, AttrNumber attnum) +{ + TupleDesc reldesc = tab->oldDesc; + Form_pg_attribute attr = reldesc->attrs[attnum - 1]; + + if (attr->atttypid != CLOBOID && attr->atttypid != BLOBOID) { + return; + } + + if (RelationIsPartitioned(rel)) { + ListCell *partCell = NULL; + List *partList = relationGetPartitionList(rel, NoLock); + foreach(partCell, partList) { + Partition part = (Partition)lfirst(partCell); + Relation partRel = partitionGetRelation(rel, part); + CheckHugeToastInternal(reldesc, partRel, attnum); + releaseDummyRelation(&partRel); + } + releasePartitionList(rel, &partList, NoLock); + } else { + CheckHugeToastInternal(reldesc, rel, attnum); + } +} + /* * ALTER COLUMN TYPE */ @@ -12628,7 +12596,7 @@ static void ATPrepAlterColumnType(List** wqueue, AlteredTableInfo* tab, Relation typenameTypeIdAndMod(NULL, typname, &targettype, &targettypmod); // check the unsupported datatype. - if (RelationIsColStore(rel) && !IsTypeSupportedByCStore(targettype, targettypmod)) { + if (RelationIsColStore(rel) && !IsTypeSupportedByCStore(targettype)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("type \"%s\" is not supported in column store", @@ -12748,6 +12716,9 @@ static void ATPrepAlterColumnType(List** wqueue, AlteredTableInfo* tab, Relation tab->newvals = lappend(tab->newvals, newval); if (ATColumnChangeRequiresRewrite(transform, attnum)) tab->rewrite = true; + if (targettype != CLOBOID && targettype != BLOBOID) { + CheckHugeToast(tab, rel, attnum); + } } else if (transform != NULL) ereport( ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a table", RelationGetRelationName(rel)))); @@ -14457,67 +14428,6 @@ static void ATExecSetRelOptionsToast(Oid toastid, List* defList, AlterTableType heap_close(pgclass, RowExclusiveLock); } -/* - * Do not modify compression parameters. - */ -void static CheckSupportModifyCompression(Relation rel, bytea* relOoption, List* defList) -{ - if (!relOoption) { - return; - } - if (!REL_SUPPORT_COMPRESSED(rel) || rel->rd_node.opt == 0) { - ForbidUserToSetCompressedOptions(defList); - return; - } - PageCompressOpts* newCompressOpt = &(((StdRdOptions*)relOoption)->compress); - RelFileCompressOption current; - TransCompressOptions(rel->rd_node, ¤t); - if (newCompressOpt) { - if (newCompressOpt->compressType != (int)current.compressAlgorithm) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("change compresstype OPTION is not supported"))); - } - if ((int)current.compressAlgorithm != COMPRESS_TYPE_NONE && - newCompressOpt->compressChunkSize != CHUNK_SIZE_LIST[current.compressChunkSize]) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("change compress_chunk_size OPTION is not supported"))); - } - if (!newCompressOpt->compressByteConvert && newCompressOpt->compressDiffConvert) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), - errmsg("compress_diff_convert should be used with compress_byte_convert."))); - } - if (current.compressAlgorithm == COMPRESS_TYPE_PGLZ) { - ListCell *opt = NULL; - foreach (opt, defList) { - DefElem *def = (DefElem *)lfirst(opt); - if (pg_strcasecmp(def->defname, "compress_level") == 0) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), - errmsg("compress_level should be used with ZSTD algorithm."))); - } - } - } - } else { - if ((int)current.compressAlgorithm != COMPRESS_TYPE_NONE) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("change compresstype OPTION is not supported"))); - } - } - - /* - * forbid modify partition CompressOption - */ - if (HEAP_IS_PARTITIONED(rel)) { - if ((int)current.compressLevel != newCompressOpt->compressLevel) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("change partition compressLevel OPTION is not supported"))); - } - if ((int)current.compressPreallocChunks != newCompressOpt->compressPreallocChunks) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("change partition compress_prealloc_chunks OPTION is not supported"))); - } - } -} - /* * Set, reset, or replace reloptions. */ @@ -14542,6 +14452,8 @@ static void ATExecSetRelOptions(Relation rel, List* defList, AlterTableType oper Oid rel_cn_oid = InvalidOid; RedisHtlAction redis_action = REDIS_REL_INVALID; char* merge_list = NULL; + bool oldRelHasUids = RELATION_HAS_UIDS(rel); + bool newRelHasUids = false; if (defList == NIL && operation != AT_ReplaceRelOptions) return; /* nothing to do */ @@ -14577,7 +14489,7 @@ static void ATExecSetRelOptions(Relation rel, List* defList, AlterTableType oper // we have to handle psort tuple's options if this is an index relation using PSORT method. // it's identifyed by access method whose oid is PSORT_AM_OID. // and the psort tuple id is saved in index relation's relcudescrelid field. - // + bool needSetPsortOpt = false; Oid psortTid = InvalidOid; Oid indexAmId = DatumGetObjectId(SysCacheGetAttr(RELOID, tuple, Anum_pg_class_relam, &isnull)); @@ -14655,7 +14567,6 @@ static void ATExecSetRelOptions(Relation rel, List* defList, AlterTableType oper } /* Validate */ - bytea* relOpt = NULL; switch (rel->rd_rel->relkind) { case RELKIND_RELATION: { /* this options only can be used when define a new relation. @@ -14664,8 +14575,12 @@ static void ATExecSetRelOptions(Relation rel, List* defList, AlterTableType oper ForbidUserToSetDefinedOptions(defList); bytea* heapRelOpt = heap_reloptions(rel->rd_rel->relkind, newOptions, true); - relOpt = heapRelOpt; const char* algo = RelationGetAlgo(rel); + newRelHasUids = StdRdOptionsHasUids(heapRelOpt, RELKIND_RELATION); + if (rel->rd_rel->relhasoids && newRelHasUids) { + ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), + errmsg("table with oids cannot add or modify hasuids by ALTER TABLE command."))); + } if (RelationIsColStore(rel)) { /* un-supported options. dont care its values */ ForbidToSetOptionsForColTbl(defList); @@ -14682,6 +14597,9 @@ static void ATExecSetRelOptions(Relation rel, List* defList, AlterTableType oper if (algo == NULL || *algo == '\0') { ForbidToSetTdeOptionsForNonTdeTbl(defList); } + if (RelationIsUstoreFormat(rel)) { + ForbidToSetOptionsForUstoreTbl(defList); + } } /* validate the values of ttl and period for partition manager */ @@ -14694,23 +14612,22 @@ static void ATExecSetRelOptions(Relation rel, List* defList, AlterTableType oper case RELKIND_MATVIEW: case RELKIND_CONTQUERY: case RELKIND_VIEW:{ + Assert(oldRelHasUids == false); (void)heap_reloptions(rel->rd_rel->relkind, newOptions, true); break; } case RELKIND_INDEX: - case RELKIND_GLOBAL_INDEX: { + case RELKIND_GLOBAL_INDEX: ForbidUserToSetDefinedIndexOptions(defList); - relOpt = index_reloptions(rel->rd_am->amoptions, newOptions, true); + Assert(oldRelHasUids == false); + (void)index_reloptions(rel->rd_am->amoptions, newOptions, true); break; - } default: ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a table, view, materialized view, index, or TOAST table", RelationGetRelationName(rel)))); break; } - - CheckSupportModifyCompression(rel, relOpt, defList); /* * All we need do here is update the pg_class row; the new options will be @@ -14740,6 +14657,8 @@ static void ATExecSetRelOptions(Relation rel, List* defList, AlterTableType oper ReleaseSysCache(tuple); + AddOrDropUidsAttr(relid, oldRelHasUids, newRelHasUids); + /* repeat the whole exercise for the toast table, if there's one */ if (RELATION_IS_PARTITIONED(rel)) { partCacheList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_PARTITION, relid); @@ -15315,6 +15234,7 @@ static void copy_relation_data(Relation rel, SMgrRelation* dstptr, ForkNumber fo if (rdStatus == SMGR_RD_CRC_ERROR) { addBadBlockStat(&src->smgr_rnode.node, forkNum); + addGlobalRepairBadBlockStat(src->smgr_rnode, forkNum, blkno); if (RelationNeedsWAL(rel) && CanRemoteRead() && !IsSegmentFileNode(src->smgr_rnode.node)) { ereport(WARNING, @@ -15324,18 +15244,20 @@ static void copy_relation_data(Relation rel, SMgrRelation* dstptr, ForkNumber fo relpath(src->smgr_rnode, forkNum)), handle_in_client(true))); - RemoteReadBlock(src->smgr_rnode, forkNum, blkno, buf); + RemoteReadBlock(src->smgr_rnode, forkNum, blkno, buf, NULL); /* * segment-page storage may fail here, because it use logic blocknumber while CRC * use physical block number */ - if (PageIsVerified(page, blkno)) + if (PageIsVerified(page, blkno)) { smgrwrite(src, forkNum, blkno, buf, false); - else + UpdateRepairTime(src->smgr_rnode.node, forkNum, blkno); + } else { ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), (errmsg("fail to remote read page, data corrupted in network")))); + } } else { ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), @@ -15463,6 +15385,7 @@ static void mergeHeapBlock(Relation src, Relation dest, ForkNumber forkNum, char if (rdStatus == SMGR_RD_CRC_ERROR) { addBadBlockStat(&src->rd_node, forkNum); + addGlobalRepairBadBlockStat(src->rd_smgr->smgr_rnode, forkNum, src_blkno); if (RelationNeedsWAL(src) && CanRemoteRead() && !IsSegmentFileNode(src->rd_node)) { ereport(WARNING, @@ -15472,14 +15395,16 @@ static void mergeHeapBlock(Relation src, Relation dest, ForkNumber forkNum, char relpath(src->rd_smgr->smgr_rnode, forkNum)), handle_in_client(true))); - RemoteReadBlock(src->rd_smgr->smgr_rnode, forkNum, src_blkno, buf); + RemoteReadBlock(src->rd_smgr->smgr_rnode, forkNum, src_blkno, buf, NULL); - if (PageIsVerified(page, src_blkno)) + if (PageIsVerified(page, src_blkno)) { smgrwrite(src->rd_smgr, forkNum, src_blkno, buf, false); - else + UpdateRepairTime(src->rd_node, forkNum, src_blkno); + } else { ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), (errmsg("fail to remote read page, data corrupted in network")))); + } } else { ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), @@ -15499,6 +15424,8 @@ static void mergeHeapBlock(Relation src, Relation dest, ForkNumber forkNum, char if (!tableam_tops_page_get_item(src, &tuple, page, tupleNo, destBlocks)) { continue; } + ChunkIdHashKey hashkey; + OldToNewChunkIdMapping mapping = NULL; /* If toast storage, modify va_toastrelid and va_valueid. */ if (OidIsValid(destToastOid)) { @@ -15506,8 +15433,6 @@ static void mergeHeapBlock(Relation src, Relation dest, ForkNumber forkNum, char Datum values[numAttrs]; bool isNull[numAttrs]; int i = 0; - ChunkIdHashKey hashkey; - OldToNewChunkIdMapping mapping = NULL; /* Ustore not support compress yet */ if (RelationIsUstoreFormat(src) || !HEAP_TUPLE_IS_COMPRESSED(tuple.t_data)) { @@ -15521,7 +15446,6 @@ static void mergeHeapBlock(Relation src, Relation dest, ForkNumber forkNum, char struct varlena* value = NULL; value = (struct varlena*)DatumGetPointer(values[i]); - if (srcTupleDesc->attrs[i]->attlen == -1 && !isNull[i] && VARATT_IS_EXTERNAL(value)) { struct varatt_external* toastPointer = NULL; @@ -15540,6 +15464,30 @@ static void mergeHeapBlock(Relation src, Relation dest, ForkNumber forkNum, char } } } + } else if (RelationIsToast(dest)) { + /* for toast, more than 1GB CLOB/BLOB the first chunk chunk_data */ + Datum values[3]; + bool isNull[3]; + tableam_tops_deform_tuple(&tuple, src->rd_att, values, isNull); + struct varlena* value = (struct varlena*)DatumGetPointer(values[2]); + if (!isNull[2] && VARATT_IS_EXTERNAL_ONDISK_B(value)) { + struct varatt_external* toastPointer = NULL; + + toastPointer = (varatt_external*)(VARDATA_EXTERNAL((varattrib_1b_e*)(value))); + Assert(toastPointer->va_toastrelid == src->rd_id); + toastPointer->va_toastrelid = dest->rd_id; + + rc = memset_s(&hashkey, sizeof(hashkey), 0, sizeof(hashkey)); + securec_check(rc, "\0", "\0"); + hashkey.toastTableOid = src->rd_id; + hashkey.oldChunkId = toastPointer->va_valueid; + + mapping = (OldToNewChunkIdMapping)hash_search(chunkIdHashTable, &hashkey, HASH_FIND, NULL); + + if (PointerIsValid(mapping)) { + toastPointer->va_valueid = mapping->newChunkId; + } + } } } @@ -15754,6 +15702,7 @@ static void ATExecEncryptionKeyRotation(Relation rel, LOCKMODE lockmode) /* encryption key rotation */ tde_reloption = list_make1(makeDefElem("dek_cipher", (Node*)makeString(dek_cipher))); ATExecSetRelOptions(rel, tde_reloption, AT_SetRelOptions, lockmode, true); + ereport(LOG, (errmsg("TDE key rotation success"), errdetail("check TDE table info dek_cipher is changed"))); /* sync TDE storage hash table */ if (IS_PGXC_DATANODE) { tde_key_manager->save_key(tde_data); @@ -16986,7 +16935,7 @@ static void AtExecCopySlice(CatCList* sliceList, Oid tabOid, Relation pgxcSliceR bool replaces[Natts_pgxc_slice] = {false}; for (int i = 0; i < sliceList->n_members; i++) { - oldTup = &sliceList->members[i]->tuple; + oldTup = t_thrd.lsc_cxt.FetchTupleFromCatCList(sliceList, i); bool isnull = false; Datum val = fastgetattr(oldTup, Anum_pgxc_slice_type, RelationGetDescr(pgxcSliceRel), &isnull); if (DatumGetChar(val) == PGXC_SLICE_TYPE_TABLE) { @@ -17043,7 +16992,7 @@ static void AtExecUpdateSliceLike(Relation rel, const RangeVar* refTableName) } /* drop tabOid1 slice tuples except table entry tuple */ for (i = 0; i < sliceList1->n_members; i++) { - oldTup = &sliceList1->members[i]->tuple; + oldTup = t_thrd.lsc_cxt.FetchTupleFromCatCList(sliceList1, i); simple_heap_delete(pgxcSliceRel, &oldTup->t_self); } ReleaseSysCacheList(sliceList1); @@ -17633,7 +17582,7 @@ void AlterTableNamespace(AlterObjectSchemaStmt* stmt) /* Get and lock schema OID and check its permissions. */ newrv = makeRangeVar(stmt->newschema, RelationGetRelationName(rel), -1); - nspOid = RangeVarGetAndCheckCreationNamespace(newrv, NoLock, NULL); + nspOid = RangeVarGetAndCheckCreationNamespace(newrv, NoLock, NULL, '\0'); /* common checks on switching namespaces */ CheckSetNamespace(oldNspOid, nspOid, RelationRelationId, relid); @@ -18098,7 +18047,8 @@ void RangeVarCallbackOwnsRelation( } AclResult aclresult = pg_class_aclcheck(relId, GetUserId(), ACL_INDEX); - if (aclresult != ACLCHECK_OK && !pg_class_ownercheck(relId, GetUserId())) { + if (aclresult != ACLCHECK_OK && !pg_class_ownercheck(relId, GetUserId()) && + !HasSpecAnyPriv(GetUserId(), CREATE_ANY_INDEX, false)) { aclcheck_error(aclresult, ACL_KIND_CLASS, relation->relname); } if (!g_instance.attr.attr_common.allowSystemTableMods && !u_sess->attr.attr_common.IsInplaceUpgrade && @@ -18373,15 +18323,14 @@ List* GetPartitionkeyPos(List* partitionkeys, List* schema) } /* duplicate partitionkey name */ - if (is_exist != NULL && is_exist[column_count]) { + if (is_exist[column_count]) { pfree_ext(is_exist); ereport(ERROR, (errcode(ERRCODE_DUPLICATE_COLUMN), errmsg("duplicate partition key: %s", partitonkey_name))); } /* recoed attribute info when the partitionkey is unique */ - if (is_exist != NULL) - is_exist[column_count] = true; + is_exist[column_count] = true; break; } @@ -18955,12 +18904,11 @@ static void ReportListPartitionIntersect(const List* partitionList, Const* value } } -static void CompareListValue(const List* pos, Form_pg_attribute* attrs, PartitionState* partdef) +void CompareListValue(const List* pos, Form_pg_attribute* attrs, List *partitionList) { - if (pos == NULL || attrs == NULL || partdef == NULL) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("invalid range partiiton table definition"))); + if (pos == NULL || attrs == NULL) { + ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("invalid list partiiton table definition"))); } - List* partitionList = partdef->partitionList; Oid typeOid = InvalidOid; ListCell* lhead = pos->head; @@ -19121,21 +19069,31 @@ static void ATPrepAddPartition(Relation rel) ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not add partition against NON-PARTITIONED table"))); } - if (RelationIsSubPartitioned(rel)) { - ereport( - ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("For subpartition table, add partition is not yet supported."), - errcause("The function is not implemented."), erraction("Use other actions instead.")))); - } - if (rel->partMap->type == PART_TYPE_INTERVAL) { ereport(ERROR, (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), errmsg("can not add partition against interval partitioned table"))); } } +static void ATPrepAddSubPartition(Relation rel) +{ + if (!RelationIsSubPartitioned(rel)) { + ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("Wrong object type"), + errdetail("Can not add subpartition against NON-SUBPARTITIONED table"), + errcause("ADD SUBPARTITION works on a NON-SUBPARTITIONED table"), + erraction("Please check DDL syntax for \"ADD SUBPARTITION\""))); + } + + if (rel->partMap->type == PART_TYPE_INTERVAL) { + ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("Un-support feature"), + errdetail("Can not add subpartition against interval partitioned table"), + errcause("ADD SUBPARTITION works on an interval partitioned table"), + erraction("Please check DDL syntax for \"ADD SUBPARTITION\""))); + } +} + /* * @@GaussDB@@ * Target : data partition @@ -19146,17 +19104,22 @@ static void ATPrepAddPartition(Relation rel) static void ATPrepDropPartition(Relation rel) { if (!RELATION_IS_PARTITIONED(rel)) { - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("can not drop partition against NON-PARTITIONED table"))); + ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("Un-support feature"), + errdetail("Can not drop partition against NON-PARTITIONED table"), + errcause("DROP PARTITION works on a NON-PARTITIONED table"), + erraction("Please check DDL syntax for \"DROP PARTITION\""))); } +} - if (RelationIsSubPartitioned(rel)) { - ereport( - ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), - errdetail("For subpartition table, drop partition is not yet supported."), - errcause("The function is not implemented."), erraction("Use other actions instead.")))); +static void ATPrepDropSubPartition(Relation rel) +{ + if (!RelationIsSubPartitioned(rel)) { + ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("Un-support feature"), + errdetail("Can not drop subpartition against NON-SUBPARTITIONED table"), + errcause("DROP SUBPARTITION works on a NON-SUBPARTITIONED table"), + erraction("Please check DDL syntax for \"DROP SUBPARTITION\""))); } } @@ -19326,8 +19289,10 @@ static void ATPrepSplitSubPartition(Relation rel) } } -static void ATExecAddPartition(Relation rel, AddPartitionState* partState) +static void ATExecAddPartition(Relation rel, AddPartitionState *partState) { + Assert(RELATION_IS_PARTITIONED(rel)); + if (rel->partMap->type == PART_TYPE_LIST) { if (IsA(linitial(partState->partitionList), ListPartitionDefState)) { ATExecAddListPartition(rel, partState); @@ -19349,184 +19314,221 @@ static void ATExecAddPartition(Relation rel, AddPartitionState* partState) } } -static void ATExecAddListPartition(Relation rel, AddPartitionState* partState) +/* check tablespace permission for add partition/subpartition */ +static void CheckTablespaceForAddPartition(Relation rel, List *partDefStateList) { - Relation pgPartRel = NULL; - Oid existingPartOid = InvalidOid; - Oid newListPartOid = InvalidOid; - List* partKeyValueList = NULL; - Datum new_reloptions; - Datum rel_reloptions; - HeapTuple tuple; - bool isnull = false; - List* old_reloptions = NIL; - ListCell* cell = NULL; - int i = 0; - Oid bucketOid; - - ListPartitionDefState* listPartDef = NULL; - - /* check tablespace privileges */ - foreach (cell, partState->partitionList) { - listPartDef = (ListPartitionDefState*)lfirst(cell); - if (PointerIsValid(listPartDef->tablespacename)) - CheckPartitionTablespace(listPartDef->tablespacename, rel->rd_rel->relowner); - } - /* check 2: can not add more partition, because more enough */ - if ((getNumberOfPartitions(rel) + partState->partitionList->length) > MAX_PARTITION_NUM) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("too many partitions for partitioned table"), - errhint("Number of partitions can not be more than %d", MAX_PARTITION_NUM))); - } - - /* check 3: name conflict check */ - foreach (cell, partState->partitionList) { - listPartDef = (ListPartitionDefState*)lfirst(cell); - existingPartOid = partitionNameGetPartitionOid(rel->rd_id, - listPartDef->partitionName, - PART_OBJ_TYPE_TABLE_PARTITION, - AccessExclusiveLock, - true, - false, - NULL, - NULL, - NoLock); - if (OidIsValid(existingPartOid)) { - ereport(ERROR, - ((errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("adding partition name conflict with existing partitions: \"%s\".", - listPartDef->partitionName)))); + ListCell *cell = NULL; + foreach (cell, partDefStateList) { + switch (nodeTag(lfirst(cell))) { + case T_RangePartitionDefState: + { + RangePartitionDefState *partDef = (RangePartitionDefState*)lfirst(cell); + if (PointerIsValid(partDef->tablespacename)) { + CheckPartitionTablespace(partDef->tablespacename, rel->rd_rel->relowner); + } + CheckTablespaceForAddPartition(rel, partDef->subPartitionDefState); + break; + } + case T_ListPartitionDefState: + { + ListPartitionDefState *partDef = (ListPartitionDefState*)lfirst(cell); + if (PointerIsValid(partDef->tablespacename)) { + CheckPartitionTablespace(partDef->tablespacename, rel->rd_rel->relowner); + } + CheckTablespaceForAddPartition(rel, partDef->subPartitionDefState); + break; + } + case T_HashPartitionDefState: + { + HashPartitionDefState *partDef = (HashPartitionDefState*)lfirst(cell); + if (PointerIsValid(partDef->tablespacename)) { + CheckPartitionTablespace(partDef->tablespacename, rel->rd_rel->relowner); + } + CheckTablespaceForAddPartition(rel, partDef->subPartitionDefState); + break; + } + default: + ereport(ERROR, (errmodule(MOD_COMMAND), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Unknown PartitionDefState for ADD PARTITION"), + errdetail("N/A"), errcause("The partition type is incorrect."), + erraction("Use the correct partition type."))); } } +} - /* check 4: new adding partitions behind the last partition */ - listPartDef = (ListPartitionDefState*)linitial(partState->partitionList); +static void CheckPartitionNameConflictForAddPartition(List *newPartitionNameList, List *existingPartitionNameList) +{ + Assert(PointerIsValid(newPartitionNameList)); + Assert(PointerIsValid(existingPartitionNameList)); - int partNum = getNumberOfPartitions(rel); - for (i = 0; i < partNum; i++) { - ListCell* cell = NULL; - int j = 0; - for (j = 0; j < ((ListPartitionMap*)rel->partMap)->listElements[i].len; j++) { - Const* curBound = ((ListPartitionMap*)rel->partMap)->listElements[i].boundary[j]; - foreach(cell, listPartDef->boundary) { - Const* val = (Const*)lfirst(cell); - if (partitonKeyCompare(&curBound, &val, 1) == 0) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("list boundary of adding partition MUST overlap existing partition"))); - } + ListCell *cell1 = NULL; + ListCell *cell2 = NULL; + ListCell *lc = NULL; + foreach (cell1, newPartitionNameList) { + char *newPartitionName1 = (char *)lfirst(cell1); + + /* 1. the newPartitionNameList should not be different itself */ + lc = cell1; + while ((lc = lnext(lc)) != NULL) { + char *newPartitionName2 = (char *)lfirst(lc); + if (strcmp(newPartitionName1, newPartitionName2) == 0) { + ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("The partition name \"%s\" is duplicated with another new partition name", + newPartitionName1), + errdetail("N/A"), + errcause("When ADD PARTITION/SUBPARTITION, one partition name is duplicated with another one"), + erraction("Check the syntax, and change the duplicated partition name"))); + } + } + + /* 2. the newPartitionNameList should not be different from existingPartitionNameList */ + foreach (cell2, existingPartitionNameList) { + char *existingPartitionName = (char *)lfirst(cell2); + if (strcmp(newPartitionName1, existingPartitionName) == 0) { + ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("The partition name \"%s\" is duplicated with existing partition name", newPartitionName1), + errdetail("N/A"), + errcause( + "When ADD PARTITION/SUBPARTITION, one partition name is duplicated with the existing name"), + erraction("Check the syntax, and change the duplicated partition name"))); } } } - partKeyValueList = transformIntoTargetType(rel->rd_att->attrs, - ((ListPartitionMap*)rel->partMap)->partitionKey->values[0], - listPartDef->boundary); - existingPartOid = partitionValuesGetPartitionOid(rel, partKeyValueList, AccessExclusiveLock, false, true, false); - if (OidIsValid(existingPartOid)) { - list_free_deep(partKeyValueList); - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("list boundary of adding partition MUST overlap existing partition"))); - } - - /* check 5: whether has the unusable local index */ - if (!checkRelationLocalIndexesUsable(rel)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("can't add partition bacause the relation %s has unusable local index", - NameStr(rel->rd_rel->relname)), - errhint("please reindex the unusable index first."))); - } - - bool* isTimestamptz = CheckPartkeyHasTimestampwithzone(rel); - - pgPartRel = relation_open(PartitionRelationId, RowExclusiveLock); - - /* step 2: add new partition entry in pg_partition */ - /* TRANSFORM into target first */ - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(rel->rd_id)); - rel_reloptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isnull); - - old_reloptions = untransformRelOptions(rel_reloptions); - RemoveRedisRelOptionsFromList(&old_reloptions); - new_reloptions = transformRelOptions((Datum)0, old_reloptions, NULL, NULL, false, false); - ReleaseSysCache(tuple); - - if (old_reloptions != NIL) - list_free_ext(old_reloptions); - - bucketOid = RelationGetBucketOid(rel); - foreach (cell, partState->partitionList) { - listPartDef = (ListPartitionDefState*)lfirst(cell); - newListPartOid = HeapAddListPartition(pgPartRel, - rel->rd_id, - rel->rd_rel->reltablespace, - bucketOid, - listPartDef, - rel->rd_rel->relowner, - (Datum)new_reloptions, - isTimestamptz, - RelationGetStorageType(rel)); - - /* step 3: no need to update number of partitions in pg_partition */ - /* - * We must bump the command counter to make the newly-created partition - * tuple visible for opening. - */ - CommandCounterIncrement(); - - if (RelationIsColStore(rel)) { - addCudescTableForNewPartition(rel, newListPartOid); - addDeltaTableForNewPartition(rel, newListPartOid); - } - - addIndexForPartition(rel, newListPartOid); - - addToastTableForNewPartition(rel, newListPartOid); - /* step 4: invalidate relation */ - CacheInvalidateRelcache(rel); - } - - /* close relation, done */ - relation_close(pgPartRel, NoLock); - list_free_deep(partKeyValueList); - pfree_ext(isTimestamptz); } - -/* - * @@GaussDB@@ - * Target : data partition - * Brief : - * Description : - * Notes : - */ -static void ATExecAddRangePartition(Relation rel, AddPartitionState* partState) +static void CheckPartitionValueConflictForAddPartition(Relation rel, Node *partDefState) { - Relation pgPartRel = NULL; + Assert(IsA(partDefState, RangePartitionDefState) || IsA(partDefState, ListPartitionDefState)); + + int i; + int j; + ListCell *cell = NULL; + Const *curBound = NULL; + Const *val = NULL; + List *partKeyValueList = NIL; Oid existingPartOid = InvalidOid; - Oid newPartOid = InvalidOid; - List* partKeyValueList = NULL; - Datum new_reloptions; - Datum rel_reloptions; - HeapTuple tuple; - bool isnull = false; - List* old_reloptions = NIL; - ListCell* cell = NULL; - Oid bucketOid; - RangePartitionDefState* partDef = NULL; - - /* check tablespace privileges */ - foreach (cell, partState->partitionList) { - partDef = (RangePartitionDefState*)lfirst(cell); - if (PointerIsValid(partDef->tablespacename)) - CheckPartitionTablespace(partDef->tablespacename, rel->rd_rel->relowner); + int partNum = getNumberOfPartitions(rel); + if (nodeTag(partDefState) == T_RangePartitionDefState) { + RangePartitionDefState *partDef = (RangePartitionDefState *)partDefState; + RangePartitionMap *partMap = (RangePartitionMap *)rel->partMap; + curBound = (Const *)copyObject(partMap->rangeElements[partNum - 1].boundary[0]); + val = partDef->curStartVal; + if (!curBound->ismaxvalue && val != NULL && partitonKeyCompare(&val, &curBound, 1) != 0) { + ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), + errmsg("start value of partition \"%s\" NOT EQUAL up-boundary of last partition.", + partDef->partitionInitName ? partDef->partitionInitName : partDef->partitionName))); + } + partKeyValueList = transformConstIntoTargetType(rel->rd_att->attrs, partMap->partitionKey, partDef->boundary); + pfree_ext(curBound); + } else { + ListPartitionDefState *partDef = (ListPartitionDefState *)partDefState; + ListPartitionMap *partMap = (ListPartitionMap *)rel->partMap; + for (i = 0; i < partNum; i++) { + for (j = 0; j < partMap->listElements[i].len; j++) { + curBound = partMap->listElements[i].boundary[j]; + foreach (cell, partDef->boundary) { + val = (Const *)lfirst(cell); + if (partitonKeyCompare(&curBound, &val, 1) == 0) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("list boundary of adding partition MUST NOT overlap with existing partition"))); + } + } + } + } + partKeyValueList = + transformIntoTargetType(rel->rd_att->attrs, partMap->partitionKey->values[0], partDef->boundary); } + + existingPartOid = partitionValuesGetPartitionOid(rel, partKeyValueList, AccessShareLock, false, true, false); + if (OidIsValid(existingPartOid)) { + if (rel->partMap->type == PART_TYPE_RANGE) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("upper boundary of adding partition MUST overtop last existing partition"))); + } else { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("list boundary of adding partition MUST NOT overlap with existing partition"))); + } + } +} + +static void CheckSubpartitionForAddPartition(Relation rel, Node *partDefState) +{ + Assert(IsA(partDefState, RangePartitionDefState) || IsA(partDefState, ListPartitionDefState)); + + List *subPartitionDefStateList = NIL; + if (nodeTag(partDefState) == T_RangePartitionDefState) { + RangePartitionDefState *partDef= (RangePartitionDefState*)partDefState; + subPartitionDefStateList = partDef->subPartitionDefState; + } else { + ListPartitionDefState *partDef= (ListPartitionDefState*)partDefState; + subPartitionDefStateList = partDef->subPartitionDefState; + } + + if (subPartitionDefStateList == NIL) { + return; + } + + /* a. only can be subpartition table */ + if (!RelationIsSubPartitioned(rel)) { + ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("Un-support feature"), + errdetail("Can not add subpartition against NON-SUBPARTITIONED table"), + errcause("ADD SUBPARTITION works on a NON-SUBPARTITIONED table"), + erraction("Please check DDL syntax for \"ADD SUBPARTITION\""))); + } + + /* get the nessary subpartition info */ + char subparttype = PART_STRATEGY_INVALID; + List *subpartKeyPosList = NIL; + RelationGetSubpartitionInfo(rel, &subparttype, &subpartKeyPosList, NULL); + + /* b. check the subpartition type is same as the relation itself */ + NodeTag subparttypeTag = GetPartitionStateType(subparttype); + ListCell* subcell = NULL; + foreach (subcell, subPartitionDefStateList) { + Node *subPartitionDefState = (Node *)lfirst(subcell); + if (nodeTag(subPartitionDefState) != subparttypeTag) { + ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), + errmsg("The syntax format of subpartition is incorrect, the declaration and " + "definition of the subpartition do not match."), + errdetail("The syntax format of subpartition %s is incorrect.", + GetPartitionDefStateName(subPartitionDefState)), + errcause("The declaration and definition of the subpartition do not match."), + erraction("Consistent declaration and definition of subpartition."))); + } + } + + /* c. subpartition values constraint */ + switch (subparttype) { + case PART_STRATEGY_RANGE: + ComparePartitionValue(subpartKeyPosList, (RelationGetDescr(rel))->attrs, subPartitionDefStateList); + break; + case PART_STRATEGY_LIST: + CompareListValue(subpartKeyPosList, (RelationGetDescr(rel))->attrs, subPartitionDefStateList); + break; + case PART_STRATEGY_HASH: + break; + default: + ereport(ERROR, (errmodule(MOD_COMMAND), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Only support RANGE/LIST/HASH for subpartition table"), + errdetail("N/A"), errcause("The partition type is incorrect."), + erraction("Use the correct partition type."))); + } + + list_free_ext(subpartKeyPosList); +} + +static void CheckForAddPartition(Relation rel, List *partDefStateList) +{ + Assert(RELATION_IS_PARTITIONED(rel)); + + /* check 1: tablespace privileges */ + CheckTablespaceForAddPartition(rel, partDefStateList); + /* check 2: can not add more partition, because more enough */ - if ((getNumberOfPartitions(rel) + partState->partitionList->length) > MAX_PARTITION_NUM) { + if ((getNumberOfPartitions(rel) + partDefStateList->length) > MAX_PARTITION_NUM) { ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("too many partitions for partitioned table"), @@ -19534,56 +19536,36 @@ static void ATExecAddRangePartition(Relation rel, AddPartitionState* partState) } /* check 3: name conflict check */ - foreach (cell, partState->partitionList) { - partDef = (RangePartitionDefState*)lfirst(cell); - existingPartOid = partitionNameGetPartitionOid(rel->rd_id, - partDef->partitionName, - PART_OBJ_TYPE_TABLE_PARTITION, - AccessExclusiveLock, - true, - false, - NULL, - NULL, - NoLock); - if (OidIsValid(existingPartOid)) { - ereport(ERROR, - ((errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg( - "adding partition name conflict with existing partitions: \"%s\".", partDef->partitionName)))); - } + List* newPartitionNameList = GetPartitionNameList(partDefStateList); + List* existingPartitionNameList = RelationGetPartitionNameList(rel); + CheckPartitionNameConflictForAddPartition(newPartitionNameList, existingPartitionNameList); + list_free_deep(existingPartitionNameList); + /* don't free the cell of newPartitionNameList */ + list_free_ext(newPartitionNameList); + + /* check 4: partition values constraint */ + int2vector *partitionKey = GetPartitionKey(rel->partMap); + List *partKeyPosList = NIL; + for (int i = 0; i < partitionKey->dim1; i++) { + partKeyPosList = lappend_int(partKeyPosList, partitionKey->values[i] - 1); + } + if (rel->partMap->type == PART_TYPE_RANGE) { + ComparePartitionValue(partKeyPosList, (RelationGetDescr(rel))->attrs, partDefStateList); + } else if (rel->partMap->type == PART_TYPE_LIST) { + CompareListValue(partKeyPosList, (RelationGetDescr(rel))->attrs, partDefStateList); + } + list_free_ext(partKeyPosList); + + ListCell *cell = NULL; + foreach (cell, partDefStateList) { + /* check 5: new adding partitions behind the last partition */ + CheckPartitionValueConflictForAddPartition(rel, (Node*)lfirst(cell)); + + /* check 6: constraint for subpartition */ + CheckSubpartitionForAddPartition(rel, (Node*)lfirst(cell)); } - /* check 4: new adding partitions behind the last partition */ - partDef = (RangePartitionDefState*)linitial(partState->partitionList); - - int partNum = getNumberOfPartitions(rel); - Const* curBound = (Const*)copyObject(((RangePartitionMap*)rel->partMap)->rangeElements[partNum - 1].boundary[0]); - Const* curStartVal = partDef->curStartVal; - if (!curBound->ismaxvalue && curStartVal != NULL && partitonKeyCompare(&curStartVal, &curBound, 1) != 0) { - if (partDef->partitionInitName != NULL) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("start value of partition \"%s\" NOT EQUAL up-boundary of last partition.", - partDef->partitionInitName))); - } else { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("start value of partition \"%s\" NOT EQUAL up-boundary of last partition.", - partDef->partitionName))); - } - } - - partKeyValueList = transformConstIntoTargetType( - rel->rd_att->attrs, ((RangePartitionMap*)rel->partMap)->partitionKey, partDef->boundary); - existingPartOid = partitionValuesGetPartitionOid(rel, partKeyValueList, AccessExclusiveLock, false, true, false); - if (OidIsValid(existingPartOid)) { - list_free_deep(partKeyValueList); - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("upper boundary of adding partition MUST overtop last existing partition"))); - } - - /* check 5: whether has the unusable local index */ + /* check 7: whether has the unusable local index */ if (!checkRelationLocalIndexesUsable(rel)) { ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), @@ -19591,14 +19573,73 @@ static void ATExecAddRangePartition(Relation rel, AddPartitionState* partState) NameStr(rel->rd_rel->relname)), errhint("please reindex the unusable index first."))); } +} + +static void ATExecAddListPartition(Relation rel, AddPartitionState *partState) +{ + Relation pgPartRel = NULL; + Oid newPartOid = InvalidOid; + List *newSubpartOidList = NIL; + Datum new_reloptions; + Datum rel_reloptions; + HeapTuple tuple; + bool isnull = false; + List* old_reloptions = NIL; + ListCell* cell = NULL; + Oid bucketOid; + Relation parentrel = NULL; + char subparttype = PART_STRATEGY_INVALID; + List *subpartKeyPosList = NIL; + int2vector *subpartitionKey = NULL; + + /* if the relation is a partrel of a subpartition, here we get the relation first */ + if (RelationIsPartitionOfSubPartitionTable(rel)) { + /* the lock of parentrel has been obtained already, seen in ATExecAddSubPartition */ + parentrel = heap_open(rel->parentId, NoLock); + if (!RelationIsValid(parentrel)) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("missing relation for partition \"%s\"", rel->rd_rel->relname.data), + errdetail("N/A"), + errcause("Maybe the partition table is dropped"), + erraction("Check system table 'pg_class' for more information"))); + } + } + + ListPartitionDefState* partDef = NULL; + + /* step 1: Check before the actual work */ + CheckForAddPartition(rel, partState->partitionList); bool* isTimestamptz = CheckPartkeyHasTimestampwithzone(rel); + bool *isTimestamptzForSubPartKey = NULL; + if (RelationIsSubPartitioned(rel)) { + RelationGetSubpartitionInfo(rel, &subparttype, &subpartKeyPosList, &subpartitionKey); + + int subPartKeyNum = list_length(subpartKeyPosList); + isTimestamptzForSubPartKey = (bool *)palloc0(sizeof(bool) * subPartKeyNum); + ListCell *subpartKeyCell = NULL; + int partKeyIdx = 0; + foreach (subpartKeyCell, subpartKeyPosList) { + int pos = lfirst_int(subpartKeyCell); + if ((RelationGetDescr(rel))->attrs[pos]->atttypid == TIMESTAMPTZOID) { + isTimestamptzForSubPartKey[partKeyIdx] = true; + } + partKeyIdx++; + } + list_free_ext(subpartKeyPosList); + } pgPartRel = relation_open(PartitionRelationId, RowExclusiveLock); /* step 2: add new partition entry in pg_partition */ /* TRANSFORM into target first */ - tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(rel->rd_id)); + Oid relOid = + RelationIsPartitionOfSubPartitionTable(rel) ? ObjectIdGetDatum(rel->parentId) : ObjectIdGetDatum(rel->rd_id); + tuple = SearchSysCache1(RELOID, relOid); + if (!HeapTupleIsValid(tuple)) + ereport(ERROR, (errmodule(MOD_COMMAND), errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("cache lookup failed for relation %u", relOid), errdetail("N/A"), + errcause("System error."), erraction("Contact engineer to support."))); rel_reloptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isnull); old_reloptions = untransformRelOptions(rel_reloptions); @@ -19610,9 +19651,26 @@ static void ATExecAddRangePartition(Relation rel, AddPartitionState* partState) list_free_ext(old_reloptions); bucketOid = RelationGetBucketOid(rel); + + List *partitionNameList = + list_concat(GetPartitionNameList(partState->partitionList), RelationGetPartitionNameList(rel)); foreach (cell, partState->partitionList) { - partDef = (RangePartitionDefState*)lfirst(cell); - newPartOid = heapAddRangePartition(pgPartRel, + partDef = (ListPartitionDefState*)lfirst(cell); + + PartitionState *partitionState = makeNode(PartitionState); + partitionState->partitionStrategy = PART_STRATEGY_LIST; + partitionState->partitionNameList = partitionNameList; + if (RelationIsSubPartitioned(rel)) { + partitionState->subPartitionState = makeNode(PartitionState); + partitionState->subPartitionState->partitionStrategy = subparttype; + if (partDef->subPartitionDefState == NIL) { + Node *subPartitionDefState = MakeDefaultSubpartition(partitionState, (Node *)partDef); + partDef->subPartitionDefState = + lappend(partDef->subPartitionDefState, subPartitionDefState); + } + } + + newPartOid = HeapAddListPartition(pgPartRel, rel->rd_id, rel->rd_rel->reltablespace, bucketOid, @@ -19621,6 +19679,21 @@ static void ATExecAddRangePartition(Relation rel, AddPartitionState* partState) (Datum)new_reloptions, isTimestamptz, RelationGetStorageType(rel), + subpartitionKey, + RelationIsPartitionOfSubPartitionTable(rel)); + + Oid partTablespaceOid = + GetPartTablespaceOidForSubpartition(rel->rd_rel->reltablespace, partDef->tablespacename); + newSubpartOidList = addNewSubPartitionTuplesForPartition(pgPartRel, + newPartOid, + partTablespaceOid, + bucketOid, + rel->rd_rel->relowner, + (Datum)new_reloptions, + isTimestamptzForSubPartKey, + RelationGetStorageType(rel), + partitionState, + (Node *)partDef, AccessExclusiveLock); /* step 3: no need to update number of partitions in pg_partition */ @@ -19635,17 +19708,254 @@ static void ATExecAddRangePartition(Relation rel, AddPartitionState* partState) addDeltaTableForNewPartition(rel, newPartOid); } - addIndexForPartition(rel, newPartOid); + if (RelationIsPartitionOfSubPartitionTable(rel)) { + addIndexForPartition(parentrel, newPartOid); + addToastTableForNewPartition(rel, newPartOid, true); + } else if (RelationIsSubPartitioned(rel)) { + Assert(newSubpartOidList != NIL); + Partition part = partitionOpen(rel, newPartOid, AccessExclusiveLock); + Relation partrel = partitionGetRelation(rel, part); + ListCell* lc = NULL; + foreach (lc, newSubpartOidList) { + Oid subpartOid = lfirst_oid(lc); + addIndexForPartition(rel, subpartOid); + addToastTableForNewPartition(partrel, subpartOid, true); + } + releaseDummyRelation(&partrel); + partitionClose(rel, part, NoLock); + } else { + addIndexForPartition(rel, newPartOid); + addToastTableForNewPartition(rel, newPartOid); + } - addToastTableForNewPartition(rel, newPartOid); /* step 4: invalidate relation */ - CacheInvalidateRelcache(rel); + if (RelationIsPartitionOfSubPartitionTable(rel)) { + CacheInvalidateRelcache(parentrel); + CacheInvalidatePartcacheByPartid(rel->rd_id); + } else { + CacheInvalidateRelcache(rel); + } + pfree_ext(partitionState->subPartitionState); + pfree_ext(partitionState); } /* close relation, done */ relation_close(pgPartRel, NoLock); - list_free_deep(partKeyValueList); pfree_ext(isTimestamptz); + pfree_ext(isTimestamptzForSubPartKey); + list_free_ext(partitionNameList); + + if (RelationIsPartitionOfSubPartitionTable(rel)) { + heap_close(parentrel, NoLock); + } +} + +/* + * @@GaussDB@@ + * Target : data partition + * Brief : + * Description : + * Notes : + */ +static void ATExecAddRangePartition(Relation rel, AddPartitionState *partState) +{ + Relation pgPartRel = NULL; + Oid newPartOid = InvalidOid; + List *newSubpartOidList = NIL; + Datum new_reloptions; + Datum rel_reloptions; + HeapTuple tuple; + bool isnull = false; + List* old_reloptions = NIL; + ListCell* cell = NULL; + Oid bucketOid; + Relation parentrel = NULL; + char subparttype = PART_STRATEGY_INVALID; + List *subpartKeyPosList = NIL; + int2vector *subpartitionKey = NULL; + + /* if the relation is a partrel of a subpartition, here we get the relation first */ + if (RelationIsPartitionOfSubPartitionTable(rel)) { + /* the lock of parentrel has been obtained already, seen in ATExecAddSubPartition */ + parentrel = heap_open(rel->parentId, NoLock); + if (!RelationIsValid(parentrel)) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("missing relation for partition \"%s\"", rel->rd_rel->relname.data), + errdetail("N/A"), + errcause("Maybe the partition table is dropped"), + erraction("Check system table 'pg_class' for more information"))); + } + } + + RangePartitionDefState* partDef = NULL; + + /* step 1: Check before the actual work */ + CheckForAddPartition(rel, partState->partitionList); + + bool* isTimestamptz = CheckPartkeyHasTimestampwithzone(rel); + bool *isTimestamptzForSubPartKey = NULL; + if (RelationIsSubPartitioned(rel)) { + RelationGetSubpartitionInfo(rel, &subparttype, &subpartKeyPosList, &subpartitionKey); + + int subPartKeyNum = list_length(subpartKeyPosList); + isTimestamptzForSubPartKey = (bool *)palloc0(sizeof(bool) * subPartKeyNum); + ListCell *subpartKeyCell = NULL; + int partKeyIdx = 0; + foreach (subpartKeyCell, subpartKeyPosList) { + int pos = lfirst_int(subpartKeyCell); + if ((RelationGetDescr(rel))->attrs[pos]->atttypid == TIMESTAMPTZOID) { + isTimestamptzForSubPartKey[partKeyIdx] = true; + } + partKeyIdx++; + } + list_free_ext(subpartKeyPosList); + } + + pgPartRel = relation_open(PartitionRelationId, RowExclusiveLock); + + /* step 2: add new partition entry in pg_partition */ + /* TRANSFORM into target first */ + Oid relOid = + RelationIsPartitionOfSubPartitionTable(rel) ? ObjectIdGetDatum(rel->parentId) : ObjectIdGetDatum(rel->rd_id); + tuple = SearchSysCache1(RELOID, relOid); + if (!HeapTupleIsValid(tuple)) + ereport(ERROR, (errmodule(MOD_COMMAND), errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("cache lookup failed for relation %u", relOid), errdetail("N/A"), + errcause("System error."), erraction("Contact engineer to support."))); + rel_reloptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isnull); + + old_reloptions = untransformRelOptions(rel_reloptions); + RemoveRedisRelOptionsFromList(&old_reloptions); + new_reloptions = transformRelOptions((Datum)0, old_reloptions, NULL, NULL, false, false); + ReleaseSysCache(tuple); + + if (old_reloptions != NIL) + list_free_ext(old_reloptions); + + bucketOid = RelationGetBucketOid(rel); + + List *partitionNameList = + list_concat(GetPartitionNameList(partState->partitionList), RelationGetPartitionNameList(rel)); + foreach (cell, partState->partitionList) { + partDef = (RangePartitionDefState*)lfirst(cell); + + PartitionState *partitionState = makeNode(PartitionState); + partitionState->partitionStrategy = PART_STRATEGY_RANGE; + partitionState->partitionNameList = partitionNameList; + if (RelationIsSubPartitioned(rel)) { + partitionState->subPartitionState = makeNode(PartitionState); + partitionState->subPartitionState->partitionStrategy = subparttype; + if (partDef->subPartitionDefState == NIL) { + Node *subPartitionDefState = MakeDefaultSubpartition(partitionState, (Node *)partDef); + partDef->subPartitionDefState = lappend(partDef->subPartitionDefState, subPartitionDefState); + } + } + + newPartOid = heapAddRangePartition(pgPartRel, + rel->rd_id, + rel->rd_rel->reltablespace, + bucketOid, + partDef, + rel->rd_rel->relowner, + (Datum)new_reloptions, + isTimestamptz, + RelationGetStorageType(rel), + AccessExclusiveLock, + subpartitionKey, + RelationIsPartitionOfSubPartitionTable(rel)); + + Oid partTablespaceOid = + GetPartTablespaceOidForSubpartition(rel->rd_rel->reltablespace, partDef->tablespacename); + newSubpartOidList = addNewSubPartitionTuplesForPartition(pgPartRel, + newPartOid, + partTablespaceOid, + bucketOid, + rel->rd_rel->relowner, + (Datum)new_reloptions, + isTimestamptzForSubPartKey, + RelationGetStorageType(rel), + partitionState, + (Node *)partDef, + AccessExclusiveLock); + + /* step 3: no need to update number of partitions in pg_partition */ + /* + * We must bump the command counter to make the newly-created partition + * tuple visible for opening. + */ + CommandCounterIncrement(); + + if (RelationIsColStore(rel)) { + addCudescTableForNewPartition(rel, newPartOid); + addDeltaTableForNewPartition(rel, newPartOid); + } + + if (RelationIsPartitionOfSubPartitionTable(rel)) { + addIndexForPartition(parentrel, newPartOid); + addToastTableForNewPartition(rel, newPartOid, true); + } else if (RelationIsSubPartitioned(rel)) { + Assert(newSubpartOidList != NIL); + Partition part = partitionOpen(rel, newPartOid, AccessExclusiveLock); + Relation partrel = partitionGetRelation(rel, part); + ListCell* lc = NULL; + foreach (lc, newSubpartOidList) { + Oid subpartOid = lfirst_oid(lc); + addIndexForPartition(rel, subpartOid); + addToastTableForNewPartition(partrel, subpartOid, true); + } + releaseDummyRelation(&partrel); + partitionClose(rel, part, NoLock); + } else { + addIndexForPartition(rel, newPartOid); + addToastTableForNewPartition(rel, newPartOid); + } + + /* step 4: invalidate relation */ + if (RelationIsPartitionOfSubPartitionTable(rel)) { + CacheInvalidateRelcache(parentrel); + CacheInvalidatePartcacheByPartid(rel->rd_id); + } else { + CacheInvalidateRelcache(rel); + } + pfree_ext(partitionState->subPartitionState); + pfree_ext(partitionState); + } + + /* close relation, done */ + relation_close(pgPartRel, NoLock); + pfree_ext(isTimestamptz); + pfree_ext(isTimestamptzForSubPartKey); + list_free_ext(partitionNameList); + + if (RelationIsPartitionOfSubPartitionTable(rel)) { + heap_close(parentrel, NoLock); + } +} + +static void ATExecAddSubPartition(Relation rel, AddSubPartitionState *subpartState) +{ + Assert(PointerIsValid(subpartState->partitionName)); + Assert(RelationIsSubPartitioned(rel)); + + Oid partOid = partitionNameGetPartitionOid(rel->rd_id, + subpartState->partitionName, + PART_OBJ_TYPE_TABLE_PARTITION, + AccessExclusiveLock, + false, + false, + NULL, + NULL, + NoLock); + Partition part = partitionOpen(rel, partOid, AccessExclusiveLock); + Relation partrel = partitionGetRelation(rel, part); + + AddPartitionState* partState = makeNode(AddPartitionState); + partState->partitionList = subpartState->subPartitionList; + ATExecAddPartition(partrel, partState); + + releaseDummyRelation(&partrel); + partitionClose(rel, part, NoLock); + pfree_ext(partState); } /* Assume the caller has already hold RowExclusiveLock on the pg_partition. */ @@ -19686,7 +19996,7 @@ static void UpdateIntervalPartToRange(Relation relPartition, Oid partOid, const * if the intervalPartOid is not InvalidOid, the interval partition which is specificed by it * need to be changed to normal range partition. */ -void fastDropPartition(Relation rel, Oid partOid, const char* stmt, Oid intervalPartOid) +void fastDropPartition(Relation rel, Oid partOid, const char* stmt, Oid intervalPartOid, bool sendInvalid) { Partition part = NULL; Relation pg_partition = NULL; @@ -19719,7 +20029,16 @@ void fastDropPartition(Relation rel, Oid partOid, const char* stmt, Oid interval /* step 3: no need to update number of partitions in pg_partition */ /* step 4: invalidate relation */ - CacheInvalidateRelcache(rel); + /* if the relation is a partrel, we send invalid message to the partition itself, + * else we send invalid message to the relation */ + if (sendInvalid) { + if (RelationIsPartitionOfSubPartitionTable(rel)) { + CacheInvalidatePartcacheByPartid(rel->rd_id); + } else { + CacheInvalidateRelcache(rel); + } + } + /* make the dropping partition invisible, fresh partition map for the new partition */ relation_close(pg_partition, RowExclusiveLock); @@ -19738,15 +20057,142 @@ void fastDropPartition(Relation rel, Oid partOid, const char* stmt, Oid interval * Description : * Notes : */ -static void ATExecDropPartition(Relation rel, AlterTableCmd* cmd) +static void ATExecDropPartition(Relation rel, AlterTableCmd *cmd) { + Assert(RELATION_IS_PARTITIONED(rel)); + Oid partOid = InvalidOid; - RangePartitionDefState* rangePartDef = NULL; + List *subpartOidList = NIL; + Oid subpartOid = InvalidOid; + ListCell* cell = NULL; + Partition part = NULL; + Relation partrel = NULL; + if (rel->partMap->type == PART_TYPE_HASH) { ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Droping hash partition is unsupported."))); } /* getting the dropping partition's oid */ - /* FIRST IS the DROP PARTITION PART_NAME branch */ + partOid = GetPartOidByATcmd(rel, cmd, "DROP PARTITION"); + + /* check 1: check validity of partition oid */ + if (!OidIsValid(partOid)) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("The partition number is invalid or out-of-range"))); + } + + /* get subpartOidList if is subpartition */ + if (RelationIsSubPartitioned(rel)) { + part = partitionOpen(rel, partOid, AccessExclusiveLock); + partrel = partitionGetRelation(rel, part); + subpartOidList = relationGetPartitionOidList(partrel); + } + + /* check 2: can not drop the last existing partition */ + if (getNumberOfPartitions(rel) == 1) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_OPERATION), errmsg("Cannot drop the only partition of a partitioned table"))); + } + if (RelationIsSubPartitioned(rel)) { + foreach (cell, subpartOidList) { + subpartOid = DatumGetObjectId(lfirst(cell)); + AlterSubPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid, subpartOid); + } + } else { + AlterPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid); + } + + if (!cmd->alterGPI) { + // Unusable Global Index + ATUnusableGlobalIndex(rel); + } + + Oid changeToRangePartOid = InvalidOid; + if (rel->partMap->type != PART_TYPE_LIST && rel->partMap->type != PART_TYPE_HASH) { +#ifdef ENABLE_MULTIPLE_NODES + if (unlikely(RelationIsTsStore(rel) && OidIsValid(RelationGetDeltaRelId(rel))) && IS_PGXC_DATANODE) { + Tsdb::DeleteDeltaByPartition(GetActiveSnapshot(), rel, partOid); + } +#endif /* ENABLE_MULTIPLE_NODES */ + changeToRangePartOid = GetNeedDegradToRangePartOid(rel, partOid); + } + + if (RelationIsSubPartitioned(rel)) { + foreach (cell, subpartOidList) { + subpartOid = DatumGetObjectId(lfirst(cell)); + /* we don't send invalid message to the partition here, as the partition will be dropped soon */ + fastDropPartition(partrel, subpartOid, "DROP SUBPARTITION", InvalidOid, false); + } + releasePartitionOidList(&subpartOidList); + releaseDummyRelation(&partrel); + partitionClose(rel, part, NoLock); + fastDropPartition(rel, partOid, "DROP PARTITION"); + } else { + fastDropPartition(rel, partOid, "DROP PARTITION", changeToRangePartOid); + } +} + +static void ATExecDropSubPartition(Relation rel, AlterTableCmd *cmd) +{ + Assert(RelationIsSubPartitioned(rel)); + + Oid partOid = InvalidOid; + Oid subpartOid = InvalidOid; + + char subparttype = PART_STRATEGY_INVALID; + RelationGetSubpartitionInfo(rel, &subparttype, NULL, NULL); + if (subparttype == PART_STRATEGY_HASH) { + ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Un-support feature"), + errdetail("The syntax is unsupported for hash subpartition"), + errcause("Try DROP SUBPARTITION on a hash-subpartitioned table"), + erraction("Please check DDL syntax for \"DROP SUBPARTITION\""))); + } + + /* getting the dropping subpartition's oid */ + subpartOid = GetSubpartOidByATcmd(rel, cmd, &partOid, "DROP SUBPARTITION"); + + /* check 1: check validity of partition oid */ + if (!OidIsValid(partOid)) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("The partition which owns the subpartition is missing"), + errdetail("N/A"), + errcause("Maybe the subpartition table is dropped"), + erraction("Check system table 'pg_partition' for more information"))); + } + if (!OidIsValid(subpartOid)) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("The subpartition number is invalid or out-of-range"), + errdetail("N/A"), + errcause("Wrong or invalid value for DROP SUBPARTITION"), + erraction("Please check DDL syntax for \"DROP SUBPARTITION\""))); + } + + Partition part = partitionOpen(rel, partOid, AccessExclusiveLock); + Relation partrel = partitionGetRelation(rel, part); + + /* check 2: can not drop the last existing subpartition */ + if (getNumberOfPartitions(partrel) == 1) { + ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), + errmsg("Cannot drop the only subpartition of a partitioned table"), + errdetail("N/A"), + errcause("DROP SUBPARTITION works on the partition which has only one subpartition"), + erraction("Please check DDL syntax for \"DROP SUBPARTITION\""))); + } + AlterSubPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid, subpartOid); + + if (!cmd->alterGPI) { + // Unusable Global Index + ATUnusableGlobalIndex(rel); + } + + fastDropPartition(partrel, subpartOid, "DROP SUBPARTITION"); + releaseDummyRelation(&partrel); + partitionClose(rel, part, NoLock); +} + +static Oid GetPartOidByATcmd(Relation rel, AlterTableCmd *cmd, const char *command) +{ + Oid partOid = InvalidOid; + + /* FIRST IS the PARTITION (partname) branch */ if (PointerIsValid(cmd->name)) { partOid = partitionNameGetPartitionOid(rel->rd_id, cmd->name, @@ -19757,60 +20203,121 @@ static void ATExecDropPartition(Relation rel, AlterTableCmd* cmd) NULL, NULL, NoLock); - } else { - switch (rel->partMap->type) { - case PART_TYPE_LIST: - case PART_TYPE_HASH: - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), errmsg("The syntax is unsupported for list/hash partition "))); - break; - default: - /* next IS the DROP PARTITION FOR (MAXVALUELIST) branch */ - rangePartDef = (RangePartitionDefState*)cmd->def; - rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, - ((RangePartitionMap*)rel->partMap)->partitionKey, - rangePartDef->boundary); - partOid = partitionValuesGetPartitionOid(rel, - rangePartDef->boundary, - AccessExclusiveLock, - true, - true, /* will check validity of partition oid next step */ - false); - - } + return partOid; } - /* check 1: check validity of partition oid */ - if (!OidIsValid(partOid)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("The partition number is invalid or out-of-range"))); + /* next IS the PARTITION FOR (partvalue) branch */ + RangePartitionDefState *rangePartDef = (RangePartitionDefState*)cmd->def; + switch (rel->partMap->type) { + case PART_TYPE_RANGE: + case PART_AREA_INTERVAL: + rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, + ((RangePartitionMap*)rel->partMap)->partitionKey, + rangePartDef->boundary); + break; + case PART_TYPE_LIST: + rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, + ((ListPartitionMap*)rel->partMap)->partitionKey, + rangePartDef->boundary); + break; + case PART_TYPE_HASH: + rangePartDef->boundary = transformConstIntoTargetType(rel->rd_att->attrs, + ((HashPartitionMap*)rel->partMap)->partitionKey, + rangePartDef->boundary); + break; + default: + ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Unknown partitioned type"), + errdetail("This parttype is not supported for %s", command), + errcause("Only range/list/hash/interval partitioned table is supported for %s", command), + erraction("Please check DDL syntax for \"%s\"", command))); + } + partOid = partitionValuesGetPartitionOid(rel, + rangePartDef->boundary, + AccessExclusiveLock, + true, + true, /* will check validity of partition oid next step */ + false); + return partOid; +} + +static Oid GetSubpartOidByATcmd(Relation rel, AlterTableCmd *cmd, Oid *partOid, const char *command) +{ + Oid subpartOid = InvalidOid; + + /* FIRST IS the SUBPARTITION (subpartname) branch */ + if (PointerIsValid(cmd->name)) { + subpartOid = partitionNameGetPartitionOid(rel->rd_id, + cmd->name, + PART_OBJ_TYPE_TABLE_SUB_PARTITION, + AccessExclusiveLock, + false, + false, + NULL, + NULL, + NoLock, + partOid); + return subpartOid; } - /* check 2: can not drop the last existing partition */ - if (getNumberOfPartitions(rel) == 1) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), errmsg("Cannot drop the only partition of a partitioned table"))); - } - AlterPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid); + /* next IS the SUBPARTITION FOR (subpartvalue) branch */ + int2vector *subpartitionKey = NULL; + RelationGetSubpartitionInfo(rel, NULL, NULL, &subpartitionKey); - if (!cmd->alterGPI) { - // Unusable Global Index - ATUnusableGlobalIndex(rel); - } else { - /* Delete partition tuples in GPI */ - DeleteGPITuplesForPartition(RelationGetRelid(rel), partOid); + RangePartitionDefState *rangePartDef = (RangePartitionDefState*)cmd->def; + int2vector *partitionKey = NULL; + switch (rel->partMap->type) { + case PART_TYPE_RANGE: + partitionKey = ((RangePartitionMap*)rel->partMap)->partitionKey; + break; + case PART_TYPE_LIST: + partitionKey = ((ListPartitionMap*)rel->partMap)->partitionKey; + break; + case PART_TYPE_HASH: + partitionKey = ((HashPartitionMap*)rel->partMap)->partitionKey; + break; + default: + ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("Unknown partitioned type"), + errdetail("This parttype is not supported for %s", command), + errcause("Only range/list/hash partitioned table is supported for %s", command), + erraction("Please check DDL syntax for \"%s\"", command))); } - - if (rel->partMap->type != PART_TYPE_LIST && rel->partMap->type != PART_TYPE_HASH) { -#ifdef ENABLE_MULTIPLE_NODES - if (unlikely(RelationIsTsStore(rel) && OidIsValid(RelationGetDeltaRelId(rel))) && IS_PGXC_DATANODE) { - Tsdb::DeleteDeltaByPartition(GetActiveSnapshot(), rel, partOid); + if (list_length(rangePartDef->boundary) != (partitionKey->dim1 + subpartitionKey->dim1)) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("Number of boundary items NOT EQUAL to number of partition keys"), + errdetail("There must be %d boundary items for %s in a subpartitioned table", + partitionKey->dim1 + subpartitionKey->dim1, command), + errcause("N/A"), erraction("Check whether the SQL statements is correct.")))); + } + List *partBoundary = NIL; + List *subpartBoundary = NIL; + ListCell *cell = list_head(rangePartDef->boundary); + for (int i = 0; i < list_length(rangePartDef->boundary); i++) { + if (i < partitionKey->dim1) { + partBoundary = lappend(partBoundary, lfirst(cell)); + } else { + subpartBoundary = lappend(subpartBoundary, lfirst(cell)); } -#endif /* ENABLE_MULTIPLE_NODES */ - Oid changeToRangePartOid = GetNeedDegradToRangePartOid(rel, partOid); - fastDropPartition(rel, partOid, "DROP PARTITION", changeToRangePartOid); - } else { - fastDropPartition(rel, partOid, "DROP PARTITION"); + cell = lnext(cell); } + partBoundary = transformConstIntoTargetType(rel->rd_att->attrs, + partitionKey, + partBoundary); + subpartBoundary = transformConstIntoTargetType(rel->rd_att->attrs, + subpartitionKey, + subpartBoundary); + + subpartOid = subpartitionValuesGetSubpartitionOid(rel, + partBoundary, + subpartBoundary, + AccessExclusiveLock, + true, + true, /* will check validity of partition oid next step */ + false, + partOid); + list_free_ext(partBoundary); + list_free_ext(subpartBoundary); + + return subpartOid; } /* @@ -20170,6 +20677,93 @@ List* GetPartitionBoundary(Relation partTableRel, Node *PartDef) return boundary; } +static char** getPartitionIndexesName(Oid partitionOid, List* indexList) { + if (indexList == NIL) { + return NULL; + } + int loc = 0; + ListCell* cell = NULL; + char** partitionIndexNames = (char**)palloc(sizeof(char*)*indexList->length); + foreach (cell, indexList) { + Oid indexOid = lfirst_oid(cell); + char* name = getPartitionIndexName(indexOid, partitionOid); + if (name == NULL) { + ereport(ERROR, + (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), + errmsg("Invalid name of local index %u on the partition %u", indexOid, partitionOid))); + } + partitionIndexNames[loc++] = name; + } + return partitionIndexNames; +} + +/* + * @@GaussDB@@ + * Target : data partition + * Brief : Change the names of the local indexes on the target partition table in pg_partition. + * Description : + * Notes : + */ +static void renamePartitionIndexes(Oid partitionedTableOid, Oid partitionOid, char** partitionIndexNames, + List* indexList) +{ + if (partitionIndexNames == NULL) { + return; + } + int loc = 0; + ListCell* cell = NULL; + foreach (cell, indexList) { + Oid indexOid = lfirst_oid(cell); + + Relation indexRel = relation_open(indexOid, AccessShareLock); + + Oid indexDestPartOid = getPartitionIndexOid(indexRel->rd_id, partitionOid); + Partition indexPart = partitionOpen(indexRel, indexDestPartOid, AccessExclusiveLock); + + renamePartitionInternal(partitionedTableOid, indexDestPartOid, partitionIndexNames[loc++]); + + partitionClose(indexRel, indexPart, NoLock); + relation_close(indexRel, AccessShareLock); + } +} + +/* + * @@GaussDB@@ + * Target : data partition + * Brief : Truncate operation, create a new partition to replace the original partition. + * Description : + * Notes : + */ +static void heap_truncate_one_part_new(const AlterTableCmd* cmd, Relation rel, Oid srcPartOid) { + Partition srcPart = NULL; + bool renameTargetPart = false; + char* destPartitionName = NULL; + + Oid destPartOid = AddTemporaryPartitionForAlterPartitions(cmd, rel, srcPartOid, &renameTargetPart); + + List* indexList = RelationGetSpecificKindIndexList(rel, false); + char** partitionIndexNames = getPartitionIndexesName(srcPartOid, indexList); + + srcPart = partitionOpen(rel, srcPartOid, AccessExclusiveLock); + destPartitionName = pstrdup(PartitionGetPartitionName(srcPart)); + partitionClose(rel, srcPart, NoLock); + + CommandCounterIncrement(); + fastDropPartition(rel, srcPartOid, "TRUNCATE PARTITION"); + + CommandCounterIncrement(); + renamePartitionIndexes(rel->rd_id, destPartOid, partitionIndexNames, indexList); + + if (renameTargetPart) { + CommandCounterIncrement(); + renamePartitionInternal(rel->rd_id, destPartOid, destPartitionName); + } + + list_free_ext(indexList); + pfree_ext(partitionIndexNames); + pfree_ext(destPartitionName); +} + static void ATExecTruncatePartitionForSubpartitionTable(Relation rel, Oid partOid, AlterTableCmd* cmd) { /* truncate subpartitioned table */ @@ -20213,6 +20807,7 @@ static void ATExecTruncatePartition(Relation rel, AlterTableCmd* cmd) Oid partOid = InvalidOid; Oid newPartOid = InvalidOid; Relation newTableRel = NULL; + bool all_ubtree = true; oidList = heap_truncate_find_FKs(relid); if (PointerIsValid(oidList)) { @@ -20302,17 +20897,22 @@ static void ATExecTruncatePartition(Relation rel, AlterTableCmd* cmd) return; } - AlterPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid); - if (!cmd->alterGPI) { // Unusable Global Index ATUnusableGlobalIndex(rel); } else { /* Delete partition tuples in GPI and add parent to pending vacuum list */ - DeleteGPITuplesForPartition(RelationGetRelid(rel), partOid); + all_ubtree = DeleteGPITuplesForPartition(RelationGetRelid(rel), partOid); + AlterPartitionedSetWaitCleanGPI(cmd->alterGPI, rel, partOid); + } + + if (all_ubtree) { + /* If no nbtree global index exists */ + heap_truncate_one_part(rel, partOid); + } else { + heap_truncate_one_part_new(cmd, rel, partOid); } - heap_truncate_one_part(rel, partOid); pgstat_report_truncate(partOid, rel->rd_id, rel->rd_rel->relisshared); /* If newTableRel is not NULL, the parent rel must be in redistribution */ @@ -20844,7 +21444,7 @@ static void mergePartitionHeapData(Relation partTableRel, Relation tempTableRel, NULL, InvalidOid, InvalidOid, - NULL, + chunkIdHashTable, false); mergeToastBlocks += srcPartToastBlocks; @@ -21279,11 +21879,6 @@ static void ATExecMergePartition(Relation partTableRel, AlterTableCmd* cmd) srcPartOid = lfirst_oid(cell); if (destPartOid != srcPartOid) { AlterPartitionedSetWaitCleanGPI(cmd->alterGPI, partTableRel, srcPartOid); - - if (cmd->alterGPI) { - DeleteGPITuplesForPartition(RelationGetRelid(partTableRel), srcPartOid); - } - fastDropPartition(partTableRel, srcPartOid, "MERGE PARTITIONS"); } } @@ -21540,23 +22135,8 @@ static void ATExecExchangePartition(Relation partTableRel, AlterTableCmd* cmd) ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("ALTER TABLE EXCHANGE requires an ordinary table"))); } - if (RELATION_HAS_BUCKET(ordTableRel) != RELATION_HAS_BUCKET(partTableRel)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ALTER TABLE EXCHANGE requires both ordinary table and partitioned table " - "to have the same hashbucket option(on or off)"))); - } - - if (RELATION_CREATE_BUCKET(ordTableRel)) { - oidvector *bucketList1 = searchHashBucketByOid(ordTableRel->rd_bucketoid); - oidvector *bucketList2 = searchHashBucketByOid(partTableRel->rd_bucketoid); - if (!hashbucket_eq(bucketList1, bucketList2)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ALTER TABLE EXCHANGE requires both ordinary table and partitioned table " - "to have the same buckets list"))); - } - } + // Check storage parameters for two tables + checkStorageTypeForExchange(partTableRel, ordTableRel); // Check row level security policy if (RelationHasRlspolicy(partTableRel->rd_id) || RelationHasRlspolicy(ordTableRel->rd_id)) { @@ -21634,6 +22214,33 @@ static void ATExecExchangePartition(Relation partTableRel, AlterTableCmd* cmd) } } +static void checkStorageTypeForExchange(Relation partTableRel, Relation ordTableRel) +{ + if (RELATION_HAS_BUCKET(ordTableRel) != RELATION_HAS_BUCKET(partTableRel)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("ALTER TABLE EXCHANGE requires both ordinary table and partitioned table " + "to have the same hashbucket option(on or off)"))); + } + + if (RELATION_CREATE_BUCKET(ordTableRel)) { + oidvector *bucketList1 = searchHashBucketByOid(ordTableRel->rd_bucketoid); + oidvector *bucketList2 = searchHashBucketByOid(partTableRel->rd_bucketoid); + if (!hashbucket_eq(bucketList1, bucketList2)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("ALTER TABLE EXCHANGE requires both ordinary table and partitioned table " + "to have the same buckets list"))); + } + } + if (ordTableRel->storage_type != partTableRel->storage_type) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("ALTER TABLE EXCHANGE requires both ordinary table and partitioned table " + "to have the same storage type"))); + } +} + static void checkColStoreForExchange(Relation partTableRel, Relation ordTableRel) { if ((RelationIsColStore(partTableRel) && !RelationIsColStore(ordTableRel)) || @@ -21650,11 +22257,6 @@ static void checkCompressForExchange(Relation partTableRel, Relation ordTableRel (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("tables in ALTER TABLE EXCHANGE PARTITION must have the same type of compress"))); } - if (partTableRel->rd_node.opt != ordTableRel->rd_node.opt) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("tables in ALTER TABLE EXCHANGE PARTITION must have the same type of compress"))); - } } // Description : Check number, type of column @@ -21697,11 +22299,11 @@ static void checkColumnForExchange(Relation partTableRel, Relation ordTableRel) SysScanDesc ordAttrdefScan = NULL; HeapTuple ordAttrdefTuple = NULL; - partHeapTuple = &partAttList->members[i]->tuple; + partHeapTuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(partAttList, i); heap_deform_tuple(partHeapTuple, attDesc, partVals, partNulls); for (j = 0; j < ordAttList->n_members; j++) { - ordHeapTuple = &ordAttList->members[j]->tuple; + ordHeapTuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(ordAttList, i); heap_deform_tuple(ordHeapTuple, attDesc, ordVals, ordNulls); if (DatumGetInt8(ordVals[Anum_pg_attribute_attnum - 1]) == @@ -21823,8 +22425,8 @@ static void checkColumnForExchange(Relation partTableRel, Relation ordTableRel) heap_close(attrdefRel, RowExclusiveLock); - ReleaseCatCacheList(partAttList); - ReleaseCatCacheList(ordAttList); + ReleaseSysCacheList(partAttList); + ReleaseSysCacheList(ordAttList); pfree_ext(ordVals); pfree_ext(partVals); pfree_ext(ordNulls); @@ -21885,8 +22487,9 @@ bool checkRelationLocalIndexesUsable(Relation relation) HeapTuple htup; /* Prepare to scan pg_index for entries having indrelid = this rel. */ + Oid relid = RelationIsPartitionOfSubPartitionTable(relation) ? relation->parentId : RelationGetRelid(relation); ScanKeyInit( - &skey, Anum_pg_index_indrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(relation))); + &skey, Anum_pg_index_indrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); indrel = heap_open(IndexRelationId, AccessShareLock); indscan = systable_beginscan(indrel, IndexIndrelidIndexId, true, NULL, 1, &skey); @@ -23096,11 +23699,6 @@ static void ATExecSplitPartition(Relation partTableRel, AlterTableCmd* cmd) #endif AlterPartitionedSetWaitCleanGPI(cmd->alterGPI, partTableRel, srcPartOid); - - if (cmd->alterGPI) { - DeleteGPITuplesForPartition(partTableOid, srcPartOid); - } - // drop src partition fastDropPartition(partTableRel, srcPartOid, "SPLIT PARTITION"); currentPartNum = getNumberOfPartitions(partTableRel); @@ -23509,7 +24107,7 @@ static void ATExecSplitSubPartition(Relation partTableRel, AlterTableCmd* cmd) } // drop src partition - fastDropPartition(partRel, srcSubPartOid, "SPLIT SUBPARTITION"); + fastDropPartition(partRel, srcSubPartOid, "SPLIT SUBPARTITION", InvalidOid, false); CacheInvalidatePartcache(part); releaseDummyRelation(&partRel); @@ -23572,9 +24170,16 @@ static void checkSplitPointForSplit(SplitPartitionState* splitPart, Relation par // check split point length if (partKeyNum != list_length(splitPart->split_point)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("number of boundary items NOT EQUAL to number of partition keys"))); + if (RelationIsPartitionOfSubPartitionTable(partTableRel)) { + ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), + (errmsg("Number of boundary items NOT EQUAL to number of partition keys"), + errdetail("There can only be one boundary value for split range subpartitions"), + errcause("N/A"), erraction("Check whether the SQL statements. is correct.")))); + } else { + ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), + (errmsg("number of boundary items NOT EQUAL to number of partition keys"), errdetail("N/A"), + errcause("N/A"), erraction("Check whether the SQL statements. is correct.")))); + } } pstate = make_parsestate(NULL); @@ -23724,7 +24329,7 @@ static Oid GetNewPartitionOid(Relation pgPartRel, Relation partTableRel, Node *p bucketOid, (RangePartitionDefState *)partDef, partTableRel->rd_rel->relowner, - new_reloptions, + (Datum)new_reloptions, isTimestamptz, stype, AccessExclusiveLock); @@ -23736,7 +24341,7 @@ static Oid GetNewPartitionOid(Relation pgPartRel, Relation partTableRel, Node *p bucketOid, (ListPartitionDefState *)partDef, partTableRel->rd_rel->relowner, - new_reloptions, + (Datum)new_reloptions, isTimestamptz, stype); break; @@ -23747,7 +24352,7 @@ static Oid GetNewPartitionOid(Relation pgPartRel, Relation partTableRel, Node *p bucketOid, (HashPartitionDefState *)partDef, partTableRel->rd_rel->relowner, - new_reloptions, + (Datum)new_reloptions, isTimestamptz, stype); break; @@ -23808,7 +24413,7 @@ static Oid AddTemporaryPartition(Relation partTableRel, Node* partDef) /* Temporary tables do not use segment-page */ newPartOid = GetNewPartitionOid(pgPartRel, partTableRel, partDef, bucketOid, - isTimestamptz, RelationGetStorageType(partTableRel), new_reloptions); + isTimestamptz, RelationGetStorageType(partTableRel), (Datum)new_reloptions); // We must bump the command counter to make the newly-created // partition tuple visible for opening. @@ -24015,6 +24620,42 @@ static Oid AddTemporaryHashPartitionForAlterPartitions(const AlterTableCmd* cmd, return newPartOid; } +/* + * Description: Add a temporary partition during exchange/truncate partition. + * + * Parameters: + * @in cmd: subcommand of an ALTER TABLE. + * @in partTableRel: partition table relation. + * @in srcPartOid: current partition oid. + */ +static Oid AddTemporaryPartitionForAlterPartitions(const AlterTableCmd* cmd, Relation partTableRel, + Oid srcPartOid, bool* renameTargetPart) +{ + Oid destPartOid = InvalidOid; + int partSeq = getPartitionElementsIndexByOid(partTableRel, srcPartOid); + + switch (partTableRel->partMap->type) { + case PART_TYPE_LIST: { + destPartOid = AddTemporaryListPartitionForAlterPartitions(cmd, partTableRel, partSeq, renameTargetPart); + break; + } + case PART_TYPE_HASH: { + destPartOid = AddTemporaryHashPartitionForAlterPartitions(cmd, partTableRel, partSeq, renameTargetPart); + break; + } + case PART_TYPE_RANGE: { + destPartOid = AddTemporaryRangePartitionForAlterPartitions(cmd, partTableRel, partSeq, renameTargetPart); + break; + } + default: + ereport(ERROR, + (errcode(ERRCODE_INVALID_OPERATION), + errmsg("Only the List/Hash/Range partitioned table is supported for update global index."))); + break; + } + return destPartOid; +} + /* * Description: Add a temporary partition during exchange partition. * @@ -24029,30 +24670,10 @@ static void ExchangePartitionWithGPI(const AlterTableCmd* cmd, Relation partTabl List* indexList = NIL; ListCell* cell = NULL; Partition srcPart = NULL; - Oid destPartOid = InvalidOid; bool renameTargetPart = false; char* destPartitionName = NULL; - int partSeq = getPartitionElementsIndexByOid(partTableRel, srcPartOid); - switch (partTableRel->partMap->type) { - case PART_TYPE_LIST: { - destPartOid = AddTemporaryListPartitionForAlterPartitions(cmd, partTableRel, partSeq, &renameTargetPart); - break; - } - case PART_TYPE_HASH: { - destPartOid = AddTemporaryHashPartitionForAlterPartitions(cmd, partTableRel, partSeq, &renameTargetPart); - break; - } - case PART_TYPE_RANGE: { - destPartOid = AddTemporaryRangePartitionForAlterPartitions(cmd, partTableRel, partSeq, &renameTargetPart); - break; - } - default: - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("Only the List/Hash/Range partitioned table is supported for data exchange."))); - break; - } + Oid destPartOid = AddTemporaryPartitionForAlterPartitions(cmd, partTableRel, srcPartOid, &renameTargetPart); srcPart = partitionOpen(partTableRel, srcPartOid, AccessExclusiveLock); destPartitionName = pstrdup(PartitionGetPartitionName(srcPart)); @@ -24089,7 +24710,6 @@ static void ExchangePartitionWithGPI(const AlterTableCmd* cmd, Relation partTabl CommandCounterIncrement(); AlterPartitionedSetWaitCleanGPI(cmd->alterGPI, partTableRel, srcPartOid); - DeleteGPITuplesForPartition(RelationGetRelid(partTableRel), srcPartOid); fastDropPartition(partTableRel, srcPartOid, "EXCHANGE PARTITIONS"); if (renameTargetPart) { @@ -24116,7 +24736,6 @@ static void fastAddPartition(Relation partTableRel, List* destPartDefList, List* pgPartRel = relation_open(PartitionRelationId, RowExclusiveLock); -#ifndef ENABLE_MULTIPLE_NODES bool isNull = false; HeapTuple tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(partTableRel->rd_id)); Datum relOptions = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_reloptions, &isNull); @@ -24124,8 +24743,7 @@ static void fastAddPartition(Relation partTableRel, List* destPartDefList, List* Datum newRelOptions = transformRelOptions((Datum)0, oldRelOptions, NULL, NULL, false, false); ReleaseSysCache(tuple); list_free_ext(oldRelOptions); -#endif - + foreach (cell, destPartDefList) { RangePartitionDefState* partDef = (RangePartitionDefState*)lfirst(cell); @@ -24135,11 +24753,7 @@ static void fastAddPartition(Relation partTableRel, List* destPartDefList, List* bucketOid, partDef, partTableRel->rd_rel->relowner, -#ifndef ENABLE_MULTIPLE_NODES (Datum)newRelOptions, -#else - (Datum)0, -#endif isTimestamptz, RelationGetStorageType(partTableRel), AccessExclusiveLock); @@ -25397,32 +26011,73 @@ static void ExecRewriteRowPartitionedTable(AlteredTableInfo* tab, Oid NewTableSp partTabRelOptions = (Datum)0; } - partitions = relationGetPartitionList(partitionedTableRel, AccessExclusiveLock); - foreach (cell, partitions) { - Partition partition = (Partition)lfirst(cell); - Relation oldRel = partitionGetRelation(partitionedTableRel, partition); + if (RelationIsSubPartitioned(partitionedTableRel)) { + partitions = relationGetPartitionList(partitionedTableRel, AccessExclusiveLock); + foreach (cell, partitions) { + Partition partition = (Partition)lfirst(cell); + Relation partrel = partitionGetRelation(partitionedTableRel, partition); - /* make a temp table for swapping partition */ - Oid OIDNewHeap = makePartitionNewHeap(partitionedTableRel, - partTabHeapDesc, - partTabRelOptions, - oldRel->rd_id, - oldRel->rd_rel->reltoastrelid, - oldRel->rd_rel->reltablespace); + List *subpartitions = relationGetPartitionList(partrel, AccessExclusiveLock); + ListCell* subcell = NULL; + foreach (subcell, subpartitions) { + Partition subpartition = (Partition)lfirst(subcell); + Relation oldRel = partitionGetRelation(partrel, subpartition); - Relation newRel = heap_open(OIDNewHeap, lockmode); - /* rewrite the temp table by partition */ - ATRewriteTable(tab, oldRel, newRel); - heap_close(newRel, NoLock); + /* make a temp table for swapping partition */ + Oid OIDNewHeap = makePartitionNewHeap(partrel, + RelationGetDescr(partrel), + partTabRelOptions, + oldRel->rd_id, + oldRel->rd_rel->reltoastrelid, + oldRel->rd_rel->reltablespace, + false, + partitionedTableRel->rd_rel->relfilenode); - /* swap the temp table and partition */ - finishPartitionHeapSwap(oldRel->rd_id, OIDNewHeap, false, u_sess->utils_cxt.RecentXmin, - GetOldestMultiXactId()); + Relation newRel = heap_open(OIDNewHeap, lockmode); + /* rewrite the temp table by partition */ + ATRewriteTable(tab, oldRel, newRel); + heap_close(newRel, NoLock); - /* record the temp table oid for dropping */ - tempTableOidList = lappend_oid(tempTableOidList, OIDNewHeap); + /* swap the temp table and partition */ + finishPartitionHeapSwap(oldRel->rd_id, OIDNewHeap, false, u_sess->utils_cxt.RecentXmin, + GetOldestMultiXactId()); - releaseDummyRelation(&oldRel); + /* record the temp table oid for dropping */ + tempTableOidList = lappend_oid(tempTableOidList, OIDNewHeap); + + releaseDummyRelation(&oldRel); + } + releasePartitionList(partrel, &subpartitions, AccessExclusiveLock); + releaseDummyRelation(&partrel); + } + } else { + partitions = relationGetPartitionList(partitionedTableRel, AccessExclusiveLock); + foreach (cell, partitions) { + Partition partition = (Partition)lfirst(cell); + Relation oldRel = partitionGetRelation(partitionedTableRel, partition); + + /* make a temp table for swapping partition */ + Oid OIDNewHeap = makePartitionNewHeap(partitionedTableRel, + partTabHeapDesc, + partTabRelOptions, + oldRel->rd_id, + oldRel->rd_rel->reltoastrelid, + oldRel->rd_rel->reltablespace); + + Relation newRel = heap_open(OIDNewHeap, lockmode); + /* rewrite the temp table by partition */ + ATRewriteTable(tab, oldRel, newRel); + heap_close(newRel, NoLock); + + /* swap the temp table and partition */ + finishPartitionHeapSwap(oldRel->rd_id, OIDNewHeap, false, u_sess->utils_cxt.RecentXmin, + GetOldestMultiXactId()); + + /* record the temp table oid for dropping */ + tempTableOidList = lappend_oid(tempTableOidList, OIDNewHeap); + + releaseDummyRelation(&oldRel); + } } ReleaseSysCache(tuple); @@ -26627,3 +27282,29 @@ void CheckDropViewValidity(ObjectType stmtType, char relKind, const char* relnam } DropErrorMsgWrongType(relname, relKind, expectedRelKind); } + +static bool IsViewAndRuleDependReltion(Oid relId) +{ + bool result = false; + Relation depRel = NULL; + ScanKeyData key[2]; + SysScanDesc depScan = NULL; + HeapTuple depTup = NULL; + + depRel = heap_open(DependRelationId, AccessShareLock); + + ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RewriteRelationId)); + ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relId)); + + depScan = systable_beginscan(depRel, InvalidOid, false, NULL, 2, key); + + if (HeapTupleIsValid(depTup = systable_getnext(depScan))) { + result = true; + } + + systable_endscan(depScan); + relation_close(depRel, AccessShareLock); + + return result; +} + diff --git a/src/gausskernel/optimizer/commands/tablespace.cpp b/src/gausskernel/optimizer/commands/tablespace.cpp index d979780f4..64f751851 100644 --- a/src/gausskernel/optimizer/commands/tablespace.cpp +++ b/src/gausskernel/optimizer/commands/tablespace.cpp @@ -3065,5 +3065,6 @@ Oid ConvertToPgclassRelTablespaceOid(Oid tblspc) */ Oid ConvertToRelfilenodeTblspcOid(Oid tblspc) { - return (InvalidOid == tblspc) ? u_sess->proc_cxt.MyDatabaseTableSpace : tblspc; + Assert(CheckMyDatabaseMatch()); + return (InvalidOid == tblspc) ? GetMyDatabaseTableSpace() : tblspc; } diff --git a/src/gausskernel/optimizer/commands/trigger.cpp b/src/gausskernel/optimizer/commands/trigger.cpp index 1239f772c..a16f211ca 100644 --- a/src/gausskernel/optimizer/commands/trigger.cpp +++ b/src/gausskernel/optimizer/commands/trigger.cpp @@ -72,6 +72,7 @@ #include "pgxc/pgxc.h" #include "optimizer/pgxcship.h" #endif +#include "utils/knl_relcache.h" /* * Note that similar macros also exist in executor/execMain.c. There does not @@ -1495,7 +1496,7 @@ void RelationBuildTriggers(Relation relation) SetTriggerFlags(trigdesc, &(triggers[i])); /* Copy completed trigdesc into cache storage */ - oldContext = MemoryContextSwitchTo(u_sess->cache_mem_cxt); + oldContext = MemoryContextSwitchTo(LocalMyDBCacheMemCxt()); relation->trigdesc = CopyTriggerDesc(trigdesc); (void)MemoryContextSwitchTo(oldContext); diff --git a/src/gausskernel/optimizer/commands/typecmds.cpp b/src/gausskernel/optimizer/commands/typecmds.cpp index 6cd3cba2b..732f76921 100644 --- a/src/gausskernel/optimizer/commands/typecmds.cpp +++ b/src/gausskernel/optimizer/commands/typecmds.cpp @@ -40,6 +40,7 @@ #include "access/xact.h" #include "catalog/catalog.h" #include "catalog/dependency.h" +#include "catalog/gs_package.h" #include "catalog/heap.h" #include "catalog/indexing.h" #include "catalog/pg_authid.h" @@ -52,8 +53,10 @@ #include "catalog/pg_proc.h" #include "catalog/pg_proc_fn.h" #include "catalog/pg_range.h" +#include "catalog/pg_synonym.h" #include "catalog/pg_type.h" #include "catalog/pg_type_fn.h" +#include "catalog/gs_db_privilege.h" #include "commands/defrem.h" #include "commands/tablecmds.h" #include "commands/typecmds.h" @@ -163,7 +166,6 @@ void DefineType(List* names, List* parameters) Oid resulttype; ListCell* pl = NULL; Oid typowner = InvalidOid; - AclResult aclresult; /* * isalter is true, change the owner of the objects as the owner of the @@ -184,8 +186,13 @@ void DefineType(List* names, List* parameters) */ /* Convert list of names to a name and namespace */ typeNamespace = QualifiedNameGetCreationNamespace(names, &typname); + /* + * anyResult is true, explain that the current user is granted create any type permission + */ + bool anyResult = CheckCreatePrivilegeInNamespace(typeNamespace, GetUserId(), CREATE_ANY_TYPE); + if (u_sess->attr.attr_sql.enforce_a_behavior) { - typowner = GetUserIdFromNspId(typeNamespace); + typowner = GetUserIdFromNspId(typeNamespace, false, anyResult); if (!OidIsValid(typowner)) typowner = GetUserId(); @@ -195,15 +202,9 @@ void DefineType(List* names, List* parameters) } else { typowner = GetUserId(); } - /* XXX this is unnecessary given the superuser check above */ - /* Check we have creation rights in target namespace */ - aclresult = pg_namespace_aclcheck(typeNamespace, GetUserId(), ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(typeNamespace)); + if (isalter) { - aclresult = pg_namespace_aclcheck(typeNamespace, typowner, ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(typeNamespace)); + (void)CheckCreatePrivilegeInNamespace(typeNamespace, typowner, CREATE_ANY_TYPE); } /* @@ -1039,9 +1040,15 @@ void DefineEnum(CreateEnumStmt* stmt) /* Convert list of names to a name and namespace */ enumNamespace = QualifiedNameGetCreationNamespace(stmt->typname, &enumName); - + /* + * anyResult is true, explain that the current user is granted create any type permission + */ + bool anyResult = false; + if (!IsSysSchema(enumNamespace)) { + anyResult = HasSpecAnyPriv(GetUserId(), CREATE_ANY_TYPE, false); + } if (u_sess->attr.attr_sql.enforce_a_behavior) { - typowner = GetUserIdFromNspId(enumNamespace); + typowner = GetUserIdFromNspId(enumNamespace, false, anyResult); if (!OidIsValid(typowner)) typowner = GetUserId(); @@ -1052,12 +1059,10 @@ void DefineEnum(CreateEnumStmt* stmt) } /* Check we have creation rights in target namespace */ aclresult = pg_namespace_aclcheck(enumNamespace, GetUserId(), ACL_CREATE); - if (aclresult != ACLCHECK_OK) + if (aclresult != ACLCHECK_OK && !anyResult) aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(enumNamespace)); if (isalter) { - aclresult = pg_namespace_aclcheck(enumNamespace, typowner, ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(enumNamespace)); + (void)CheckCreatePrivilegeInNamespace(enumNamespace, typowner, CREATE_ANY_TYPE); } /* @@ -1239,8 +1244,15 @@ void DefineRange(CreateRangeStmt* stmt) /* Convert list of names to a name and namespace */ typeNamespace = QualifiedNameGetCreationNamespace(stmt->typname, &typname); + /* + * anyResult is true, explain that the current user is granted create any typepermission + */ + bool anyResult = false; + if (!IsSysSchema(typeNamespace)) { + anyResult = HasSpecAnyPriv(GetUserId(), CREATE_ANY_TYPE, false); + } if (u_sess->attr.attr_sql.enforce_a_behavior) { - typowner = GetUserIdFromNspId(typeNamespace); + typowner = GetUserIdFromNspId(typeNamespace, false, anyResult); if (!OidIsValid(typowner)) { typowner = GetUserId(); @@ -1253,12 +1265,10 @@ void DefineRange(CreateRangeStmt* stmt) } /* Check we have creation rights in target namespace */ aclresult = pg_namespace_aclcheck(typeNamespace, GetUserId(), ACL_CREATE); - if (aclresult != ACLCHECK_OK) + if (aclresult != ACLCHECK_OK && !anyResult) aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(typeNamespace)); if (isalter) { - aclresult = pg_namespace_aclcheck(typeNamespace, typowner, ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, get_namespace_name(typeNamespace)); + (void)CheckCreatePrivilegeInNamespace(typeNamespace, typowner, CREATE_ANY_TYPE); } /* @@ -2076,7 +2086,7 @@ Oid DefineCompositeType(RangeVar* typevar, List* coldeflist) * check is here mainly to get a better error message about a "type" * instead of below about a "relation". */ - typeNamespace = RangeVarGetAndCheckCreationNamespace(createStmt->relation, NoLock, NULL); + typeNamespace = RangeVarGetAndCheckCreationNamespace(createStmt->relation, NoLock, NULL, RELKIND_COMPOSITE_TYPE); RangeVarAdjustRelationPersistence(createStmt->relation, typeNamespace); old_type_oid = GetSysCacheOid2(TYPENAMENSP, CStringGetDatum(createStmt->relation->relname), ObjectIdGetDatum(typeNamespace)); @@ -3307,6 +3317,99 @@ void AlterTypeOwnerInternal(Oid typeOid, Oid newOwnerId, bool hasDependEntry) heap_close(rel, RowExclusiveLock); } +/* + * AlterTypeOwnerByPkg - change package type owner + * + * This is currently only used to propagate ALTER PACKAGE OWNER to a + * package. Package will build types, and add to pg_type. + * It assumes the caller has done all needed checks. + */ +void AlterTypeOwnerByPkg(Oid pkgOid, Oid newOwnerId) +{ + if (!OidIsValid(pkgOid)) { + return; + } + + Relation depRel; + ScanKeyData key[2]; + SysScanDesc scan; + HeapTuple tup; + const int keyNumber = 2; + bool isPkgDepTyp = false; + Form_pg_depend depTuple = NULL; + + depRel = heap_open(DependRelationId, RowExclusiveLock); + ScanKeyInit(&key[0], Anum_pg_depend_refclassid, + BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(PackageRelationId)); + ScanKeyInit(&key[1], Anum_pg_depend_refobjid, + BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(pkgOid)); + scan = systable_beginscan(depRel, DependReferenceIndexId, true, NULL, keyNumber, key); + + while (HeapTupleIsValid(tup = systable_getnext(scan))) { + depTuple = (Form_pg_depend)GETSTRUCT(tup); + isPkgDepTyp = (depTuple->deptype == DEPENDENCY_AUTO) && + (depTuple->classid == TypeRelationId || depTuple->classid == PgSynonymRelationId); + if (!isPkgDepTyp) { + continue; + } + if (depTuple->classid == TypeRelationId) { + AlterTypeOwnerInternal(depTuple->objid, newOwnerId, false); + } else { + AlterSynonymOwnerByOid(depTuple->objid, newOwnerId); + } + } + + systable_endscan(scan); + heap_close(depRel, RowExclusiveLock); + return; +} + +/* + * AlterTypeOwnerByFunc - change func type owner + * + * This is currently only used to propagate ALTER FUNCTION OWNER + * Procedure will build types when the type is nested, and add to pg_type. + * It assumes the caller has done all needed checks. + */ +void AlterTypeOwnerByFunc(Oid funcOid, Oid newOwnerId) +{ +#ifdef ENABLE_MULTIPLE_NODES + /* procedure type will only be build in centralized mode */ + return; +#endif + if (!OidIsValid(funcOid)) { + return; + } + + Relation depRel; + ScanKeyData key[2]; + SysScanDesc scan; + HeapTuple tup; + const int keyNumber = 2; + bool isFuncDepTyp = false; + Form_pg_depend depTuple = NULL; + + depRel = heap_open(DependRelationId, RowExclusiveLock); + ScanKeyInit(&key[0], Anum_pg_depend_refclassid, + BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(ProcedureRelationId)); + ScanKeyInit(&key[1], Anum_pg_depend_refobjid, + BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(funcOid)); + scan = systable_beginscan(depRel, DependReferenceIndexId, true, NULL, keyNumber, key); + + while (HeapTupleIsValid(tup = systable_getnext(scan))) { + depTuple = (Form_pg_depend)GETSTRUCT(tup); + isFuncDepTyp = (depTuple->deptype == DEPENDENCY_AUTO) && depTuple->classid == TypeRelationId; + if (!isFuncDepTyp) { + continue; + } + AlterTypeOwnerInternal(depTuple->objid, newOwnerId, false); + } + + systable_endscan(scan); + heap_close(depRel, RowExclusiveLock); + return; +} + /* * Execute ALTER TYPE SET SCHEMA */ diff --git a/src/gausskernel/optimizer/commands/user.cpp b/src/gausskernel/optimizer/commands/user.cpp index 0862b8ddb..28ca1f82b 100755 --- a/src/gausskernel/optimizer/commands/user.cpp +++ b/src/gausskernel/optimizer/commands/user.cpp @@ -3395,6 +3395,9 @@ void RenameRole(const char* oldname, const char* newname) int i; Oid roleid; bool is_opradmin = false; + Relation pg_job_tbl = NULL; + TableScanDesc scan = NULL; + HeapTuple tuple = NULL; Relation rel = heap_open(AuthIdRelationId, RowExclusiveLock); TupleDesc dsc = RelationGetDescr(rel); @@ -3508,6 +3511,23 @@ void RenameRole(const char* oldname, const char* newname) * Close pg_authid, but keep lock till commit. */ heap_close(rel, NoLock); + + /* + * change the user name in the pg_job. + */ + pg_job_tbl = heap_open(PgJobRelationId, ExclusiveLock); + scan = heap_beginscan(pg_job_tbl, SnapshotNow, 0, NULL); + + while (HeapTupleIsValid(tuple = heap_getnext(scan, ForwardScanDirection))) { + Form_pg_job pg_job = (Form_pg_job)GETSTRUCT(tuple); + if (strcmp(NameStr(pg_job->log_user), oldname) == 0) { + update_pg_job_username(pg_job->job_id, newname); + } + } + + heap_endscan(scan); + heap_close(pg_job_tbl, ExclusiveLock); + } /* @@ -3982,6 +4002,9 @@ static bool IsLockOnRelation(const LockInstanceData* instance) case LOCKTAG_TUPLE: on_relation = true; break; + case LOCKTAG_UID: + on_relation = true; + break; case LOCKTAG_CSTORE_FREESPACE: on_relation = true; break; diff --git a/src/gausskernel/optimizer/commands/vacuum.cpp b/src/gausskernel/optimizer/commands/vacuum.cpp index 2266a2609..6c78fbdda 100644 --- a/src/gausskernel/optimizer/commands/vacuum.cpp +++ b/src/gausskernel/optimizer/commands/vacuum.cpp @@ -66,6 +66,7 @@ #include "utils/guc.h" #include "utils/lsyscache.h" #include "utils/memutils.h" +#include "utils/rbtree.h" #include "utils/snapmgr.h" #include "utils/syscache.h" #include "access/heapam.h" @@ -129,6 +130,8 @@ static void DropEmptyPartitionDirectories(Oid relid); static THR_LOCAL BufferAccessStrategy vac_strategy; static THR_LOCAL int elevel = -1; +static void UstoreVacuumGPIPartition(Oid relid, Relation rel); + static void vac_truncate_clog(TransactionId frozenXID, MultiXactId frozenMulti); static bool vacuum_rel(Oid relid, VacuumStmt* vacstmt, bool do_toast); static void GPIVacuumMainPartition( @@ -567,6 +570,69 @@ static List *GetVacuumObjectOfSubpartitionTable(const Oid relId) return result; } +static vacuum_object *GetVacuumObjectOfSubpartition(VacuumStmt* vacstmt, Oid relationid) +{ + Assert(PointerIsValid(vacstmt->relation->subpartitionname)); + + /* + * for dfs table, there is no partition table now so just return + * for dfs special vacuum. + */ + if (hdfsVcuumAction(vacstmt->options)) { + return NULL; + } + + Oid partitionid = InvalidOid; + Oid subpartitionid = InvalidOid; + Form_pg_partition subpartitionForm; + HeapTuple subpartitionTup; + MemoryContext oldcontext = NULL; + vacuum_object* vacObj = NULL; + + subpartitionid = partitionNameGetPartitionOid(relationid, + vacstmt->relation->subpartitionname, + PART_OBJ_TYPE_TABLE_SUB_PARTITION, + AccessShareLock, + true, + false, + NULL, + NULL, + NoLock, + &partitionid); + if (!OidIsValid(subpartitionid)) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("subpartition \"%s\" of relation \"%s\" does not exist", + vacstmt->relation->subpartitionname, + vacstmt->relation->relname))); + } + + subpartitionTup = SearchSysCache1WithLogLevel(PARTRELID, ObjectIdGetDatum(subpartitionid), LOG); + if (!HeapTupleIsValid(subpartitionTup)) { + ereport(ERROR, + (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("cache lookup failed for subpartition %u", subpartitionid))); + } + subpartitionForm = (Form_pg_partition)GETSTRUCT(subpartitionTup); + Assert(subpartitionForm->parttype == PART_OBJ_TYPE_TABLE_SUB_PARTITION); + + if (t_thrd.vacuum_cxt.vac_context) { + oldcontext = MemoryContextSwitchTo(t_thrd.vacuum_cxt.vac_context); + } + + vacObj = (vacuum_object *)palloc0(sizeof(vacuum_object)); + vacObj->tab_oid = subpartitionid; + vacObj->parent_oid = partitionid; + vacObj->flags = VACFLG_SUB_PARTITION; + + if (t_thrd.vacuum_cxt.vac_context) { + (void)MemoryContextSwitchTo(oldcontext); + } + + ReleaseSysCache(subpartitionTup); + return vacObj; +} + /* * Build a list of Oids for each relation to be processed * @@ -758,8 +824,14 @@ List* get_rel_oids(Oid relid, VacuumStmt* vacstmt) } ReleaseSysCache(partitionTup); + } else if (PointerIsValid(vacstmt->relation->subpartitionname)) { + /* 2. a subpartition */ + vacObj = GetVacuumObjectOfSubpartition(vacstmt, relationid); + if (PointerIsValid(vacObj)) { + oid_list = lappend(oid_list, vacObj); + } } else { - /* 2.a relation */ + /* 3.a relation */ classTup = SearchSysCache1WithLogLevel(RELOID, ObjectIdGetDatum(relationid), LOG); if (HeapTupleIsValid(classTup)) { @@ -1034,6 +1106,12 @@ void vacuum_set_xid_limits(Relation rel, int64 freeze_min_age, int64 freeze_tabl * always an independent transaction. */ *oldestXmin = GetOldestXmin(rel); + if (IsCatalogRelation(rel) || RelationIsAccessibleInLogicalDecoding(rel)) { + TransactionId CatalogXmin = GetReplicationSlotCatalogXmin(); + if (TransactionIdIsNormal(CatalogXmin) && TransactionIdPrecedes(CatalogXmin, *oldestXmin)) { + *oldestXmin = CatalogXmin; + } + } Assert(TransactionIdIsNormal(*oldestXmin)); @@ -1053,8 +1131,8 @@ void vacuum_set_xid_limits(Relation rel, int64 freeze_min_age, int64 freeze_tabl * Compute the cutoff XID, being careful not to generate a "permanent" XID */ limit = *oldestXmin; - if (limit > FirstNormalTransactionId + freezemin) - limit -= freezemin; + if (limit > FirstNormalTransactionId + (uint64)freezemin) + limit -= (uint64)freezemin; else limit = FirstNormalTransactionId; @@ -1064,8 +1142,8 @@ void vacuum_set_xid_limits(Relation rel, int64 freeze_min_age, int64 freeze_tabl * freeze age of zero. */ nextXid = ReadNewTransactionId(); - if (nextXid > FirstNormalTransactionId + g_instance.attr.attr_storage.autovacuum_freeze_max_age) - safeLimit = nextXid - g_instance.attr.attr_storage.autovacuum_freeze_max_age; + if (nextXid > FirstNormalTransactionId + (uint64)g_instance.attr.attr_storage.autovacuum_freeze_max_age) + safeLimit = nextXid - (uint64)g_instance.attr.attr_storage.autovacuum_freeze_max_age; else safeLimit = FirstNormalTransactionId; @@ -1101,8 +1179,8 @@ void vacuum_set_xid_limits(Relation rel, int64 freeze_min_age, int64 freeze_tabl * XID. */ limit = ReadNewTransactionId(); - if (limit > FirstNormalTransactionId + freezetable) - limit -= freezetable; + if (limit > FirstNormalTransactionId + (uint64)freezetable) + limit -= (uint64)freezetable; else limit = FirstNormalTransactionId; @@ -1118,8 +1196,8 @@ void vacuum_set_xid_limits(Relation rel, int64 freeze_min_age, int64 freeze_tabl * for Xids */ mxLimit = GetOldestMultiXactId(); - if (mxLimit > FirstMultiXactId + freezemin) - mxLimit -= freezemin; + if (mxLimit > FirstMultiXactId + (uint64)freezemin) + mxLimit -= (uint64)freezemin; else mxLimit = FirstMultiXactId; @@ -2207,6 +2285,12 @@ static bool vacuum_rel(Oid relid, VacuumStmt* vacstmt, bool do_toast) } if (!onerel) { + if (onesubpart != NULL) { + partitionClose(onesubpartrel, onesubpart, lmode); + } + if (onesubpartrel != NULL) { + releaseDummyRelation(&onesubpartrel); + } if (onepart != NULL) partitionClose(onepartrel, onepart, lmode); if (onepartrel != NULL) @@ -2351,12 +2435,14 @@ static bool vacuum_rel(Oid relid, VacuumStmt* vacstmt, bool do_toast) * get_rel_oids() but seems safer to check after we've locked the * relation. */ + + bool isUstoreGPI = RelationIsUstoreIndex(onerel) && RelationIsGlobalIndex(onerel); if (onerel->rd_rel->relkind != RELKIND_RELATION && #ifdef ENABLE_MOT !(RelationIsForeignTable(onerel) && isMOTFromTblOid(onerel->rd_id)) && #endif onerel->rd_rel->relkind != RELKIND_MATVIEW && - onerel->rd_rel->relkind != RELKIND_TOASTVALUE) { + onerel->rd_rel->relkind != RELKIND_TOASTVALUE && !isUstoreGPI) { if (vacstmt->options & VACOPT_VERBOSE) messageLevel = VERBOSEMESSAGE; @@ -2674,7 +2760,11 @@ static bool vacuum_rel(Oid relid, VacuumStmt* vacstmt, bool do_toast) pgstat_report_vacuum(relid, InvalidOid, false, 0); } else { pgstat_report_waitstatus_relname(STATE_VACUUM, get_nsp_relname(relid)); - TableRelationVacuum(onerel, vacstmt, vac_strategy); + if (isUstoreGPI) { + UstoreVacuumGPIPartition(relid, onerel); + } else { + TableRelationVacuum(onerel, vacstmt, vac_strategy); + } } } (void)pgstat_report_waitstatus(oldStatus); @@ -2754,10 +2844,10 @@ static bool vacuum_rel(Oid relid, VacuumStmt* vacstmt, bool do_toast) if (isFakeRelation) { Assert(onerelid.relId == relid && OidIsValid(relationid)); if (OidIsValid(subparentid)) { - UnLockPartitionVacuumForSession(&partIdtf, relationid, subparentid, lmode); - UnLockPartitionVacuumForSession(&subpartIdtf, subparentid, relid, lmode); + UnLockPartitionVacuumForSession(&partIdtf, relationid, subparentid, lmode); + UnlockRelationIdForSession(&partLockRelId, lmodePartTable); } else { UnLockPartitionVacuumForSession(&partIdtf, relationid, relid, lmode); @@ -3737,8 +3827,9 @@ static List* get_tables_to_merge() appendStringInfo(info->schemaname, "%s", schema_name); pfree_ext(schema_name); - make_real_queries(info); - + if (info->is_hdfs) { + make_real_queries(info); + } infos = lappend(infos, info); ereport(DEBUG1, @@ -4254,6 +4345,7 @@ static void GPICleanInvisibleIndex(Relation indrel, IndexBulkDeleteResult **stat ivinfo.message_level = elevel; ivinfo.num_heap_tuples = indrel->rd_rel->reltuples; ivinfo.strategy = vac_strategy; + ivinfo.invisibleParts = NULL; VacStates* pvacStates = (VacStates*)palloc0(sizeof(VacStates)); pvacStates->invisiblePartOids = invisibleParts; @@ -4313,6 +4405,102 @@ static void GPIOpenGlobalIndexes(Relation onerel, LOCKMODE lockmode, int* nindex list_free(globIndOidList); } + +static void UstoreVacuumGPIPartition(Oid relid, Relation rel) +{ + ScanKeyData skey[2]; + /* Open pg_index to find out the relation owning current GPI. */ + Relation pgIndex = heap_open(IndexRelationId, AccessShareLock); + ScanKeyInit(&skey[0], Anum_pg_index_indexrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); + SysScanDesc scanIndexes = systable_beginscan(pgIndex, IndexRelidIndexId, true, NULL, 1, skey); + HeapTuple indexTuple = systable_getnext(scanIndexes); + if (!HeapTupleIsValid(indexTuple)) { + systable_endscan(scanIndexes); + heap_close(pgIndex, NoLock); + return; + } + /* Get the Oid of relation. */ + Form_pg_index indexTupleForm = (Form_pg_index)GETSTRUCT(indexTuple); + Oid indexOfRelOid = indexTupleForm->indrelid; + systable_endscan(scanIndexes); + heap_close(pgIndex, NoLock); + /* Here we first get the main partition of the relation, and then check its + * wait_cleanup_gpi flag is 'y' or 'n': + * 1. If wait_cleanup_gpi=y, which means we need to vacuum the GPI. + * 2. If wait_cleanup_gpi=n, which means we do not need to vacuum the GPI. + */ + Relation pgPartition = heap_open(PartitionRelationId, RowExclusiveLock); + ScanKeyInit(&skey[0], Anum_pg_partition_parttype, BTEqualStrategyNumber, + F_CHAREQ, CharGetDatum(PART_OBJ_TYPE_PARTED_TABLE)); + ScanKeyInit(&skey[1], Anum_pg_partition_parentid, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(indexOfRelOid)); + SysScanDesc scanParts = systable_beginscan(pgPartition, PartitionParentOidIndexId, + true, NULL, 2, skey); + HeapTuple partTuple = systable_getnext(scanParts); + if (!HeapTupleIsValid(partTuple)) { + systable_endscan(scanParts); + heap_close(pgPartition, NoLock); + return; + } + Form_pg_partition partTupleForm = (Form_pg_partition)GETSTRUCT(partTuple); + partTuple = SearchSysCache3(PARTPARTOID, PointerGetDatum(partTupleForm->relname.data), + CharGetDatum(PART_OBJ_TYPE_PARTED_TABLE), ObjectIdGetDatum(indexOfRelOid)); + systable_endscan(scanParts); + if (!HeapTupleIsValid(partTuple)) { + ereport(ERROR, + (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("cache lookup failed for partition %u", indexOfRelOid))); + } + /* Use PartitionInvisibleMetadataKeep to judge the wait_cleanup_gpi flag. */ + bool isNull = false; + Datum partOptions = fastgetattr(partTuple, Anum_pg_partition_reloptions, + RelationGetDescr(pgPartition), &isNull); + if (isNull || !PartitionInvisibleMetadataKeep(partOptions)) { + ReleaseSysCache(partTuple); + heap_close(pgPartition, NoLock); + return; + } + /* Find out the invisible parts of the relation. */ + OidRBTree *invisibleParts = CreateOidRBTree(); + if (ConditionalLockPartition(indexOfRelOid, ADD_PARTITION_ACTION, AccessShareLock, PARTITION_SEQUENCE_LOCK)) { + PartitionGetAllInvisibleParts(indexOfRelOid, &invisibleParts); + UnlockPartition(indexOfRelOid, ADD_PARTITION_ACTION, AccessShareLock, PARTITION_SEQUENCE_LOCK); + } + /* In rbtree, rb_leftmost will return NULL if rbtree is empty. */ + if (rb_leftmost(invisibleParts) == NULL) { + DestroyOidRBTree(&invisibleParts); + ReleaseSysCache(partTuple); + heap_close(pgPartition, NoLock); + return; + } + /* Start the cleanup process and collect the info needed */ + IndexVacuumInfo ivinfo; + ivinfo.index = rel; + ivinfo.analyze_only = false; + ivinfo.estimated_count = false; + ivinfo.message_level = elevel; + ivinfo.num_heap_tuples = -1; + ivinfo.strategy = vac_strategy; + ivinfo.invisibleParts = invisibleParts; + /* Cleanup process of index */ + index_vacuum_cleanup(&ivinfo, NULL); + OidRBTree *cleanedParts = CreateOidRBTree(); + OidRBTreeUnionOids(cleanedParts, invisibleParts); + if (ConditionalLockPartition(indexOfRelOid, ADD_PARTITION_ACTION, AccessShareLock, PARTITION_SEQUENCE_LOCK)) { + PartitionSetEnabledClean(indexOfRelOid, cleanedParts, invisibleParts, true); + UnlockPartition(indexOfRelOid, ADD_PARTITION_ACTION, AccessShareLock, PARTITION_SEQUENCE_LOCK); + } else { + /* Updates reloptions of cleanedParts in pg_partition after GPI vacuum is executed */ + PartitionSetEnabledClean(indexOfRelOid, cleanedParts, invisibleParts, false); + } + DestroyOidRBTree(&invisibleParts); + DestroyOidRBTree(&cleanedParts); + /* Set wait_cleanup_gpi=n after we finish the vacuum cleanup. */ + UpdateWaitCleanGpiRelOptions(pgPartition, partTuple, false, true); + ReleaseSysCache(partTuple); + heap_close(pgPartition, NoLock); +} + // vacuum main partition table to delete invisible tuple in global partition index static void GPIVacuumMainPartition( Relation onerel, const VacuumStmt* vacstmt, LOCKMODE lockmode, BufferAccessStrategy bstrategy) diff --git a/src/gausskernel/optimizer/commands/vacuumlazy.cpp b/src/gausskernel/optimizer/commands/vacuumlazy.cpp index d0204fd42..ebef225da 100644 --- a/src/gausskernel/optimizer/commands/vacuumlazy.cpp +++ b/src/gausskernel/optimizer/commands/vacuumlazy.cpp @@ -152,8 +152,9 @@ static bool cbi_lazy_tid_reaped(ItemPointer itemptr, void* state, Oid partOid = static bool lazy_tid_reaped(ItemPointer itemptr, void* state, Oid partOid = InvalidOid, int2 bktId = InvalidBktId); static int cbi_vac_cmp_itemptr(const void* left, const void* right); static int vac_cmp_itemptr(const void* left, const void* right); -extern bool ShouldAttemptTruncation(const LVRelStats *vacrelstats); extern void vacuum_log_cleanup_info(Relation rel, LVRelStats* vacrelstats); +static bool HeapPageCheckForUsedLinePointer(Page page); +static bool UHeapPageCheckForUsedLinePointer(Page page, Relation relation); /* * lazy_vacuum_rel() -- perform LAZY VACUUM for one heap relation @@ -227,7 +228,9 @@ void lazy_vacuum_rel(Relation onerel, VacuumStmt* vacstmt, BufferAccessStrategy ResultRelInfo *resultRelInfo = NULL; if (onerel->rd_rel->relhasindex) { resultRelInfo = makeNode(ResultRelInfo); - if (vacstmt->onepartrel != NULL) { + if (vacstmt->issubpartition) { + InitResultRelInfo(resultRelInfo, vacstmt->parentpartrel, 1, 0); + } else if (vacstmt->onepartrel != NULL) { InitResultRelInfo(resultRelInfo, vacstmt->onepartrel, 1, 0); } else { InitResultRelInfo(resultRelInfo, onerel, 1, 0); @@ -485,7 +488,11 @@ void lazy_vacuum_rel(Relation onerel, VacuumStmt* vacstmt, BufferAccessStrategy new_rel_pages = vacrelstats->old_rel_pages; new_rel_tuples = vacrelstats->old_rel_tuples; } - if (RelationIsPartition(onerel)) { + if (vacstmt->issubpartition) { + Assert(vacstmt->parentpartrel != NULL); + Assert(vacstmt->parentpart != NULL); + new_rel_allvisible = visibilitymap_count(vacstmt->parentpartrel, vacstmt->parentpart); + } else if (RelationIsPartition(onerel)) { Assert(vacstmt->onepartrel != NULL); Assert(vacstmt->onepart != NULL); new_rel_allvisible = visibilitymap_count(vacstmt->onepartrel, vacstmt->onepart); @@ -510,12 +517,12 @@ void lazy_vacuum_rel(Relation onerel, VacuumStmt* vacstmt, BufferAccessStrategy * will lead to misbehave when update other index usable partitions ---the horrible * misdguge as hot update even if update indexes columns. */ - if (!vacstmt->parentpartrel) { - vac_update_pgclass_partitioned_table( - vacstmt->onepartrel, vacstmt->onepartrel->rd_rel->relhasindex, new_frozen_xid, new_min_multi); - } else { + if (vacstmt->issubpartition) { vac_update_pgclass_partitioned_table( vacstmt->parentpartrel, vacstmt->parentpartrel->rd_rel->relhasindex, new_frozen_xid, new_min_multi); + } else { + vac_update_pgclass_partitioned_table( + vacstmt->onepartrel, vacstmt->onepartrel->rd_rel->relhasindex, new_frozen_xid, new_min_multi); } // update stats of local partition indexes @@ -634,6 +641,7 @@ void lazy_vacuum_rel(Relation onerel, VacuumStmt* vacstmt, BufferAccessStrategy pg_rusage_show(&ru0)))); } } + pfree_ext(vacrelstats); gstrace_exit(GS_TRC_ID_lazy_vacuum_rel); } @@ -1464,8 +1472,7 @@ static IndexBulkDeleteResult** lazy_scan_heap( PageSetLSN(page, recptr); } END_CRIT_SECTION(); - if (PageIs8BXidHeapVersion(page) && - TransactionIdPrecedes(((HeapPageHeader)page)->pd_xid_base, u_sess->utils_cxt.RecentXmin)) { + if (TransactionIdPrecedes(((HeapPageHeader)page)->pd_xid_base, u_sess->utils_cxt.RecentXmin)) { if (u_sess->utils_cxt.RecentXmin - ((HeapPageHeader)page)->pd_xid_base > CHANGE_XID_BASE) (void)heap_change_xidbase_after_freeze(onerel, buf); } @@ -1835,6 +1842,7 @@ void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats, const LVR ivinfo.message_level = elevel; ivinfo.num_heap_tuples = vacrelstats->old_rel_tuples; ivinfo.strategy = vacStrategy; + ivinfo.invisibleParts = NULL; /* Do bulk deletion */ if (RelationIsCrossBucketIndex(indrel)) { @@ -1870,6 +1878,7 @@ extern IndexBulkDeleteResult *lazy_cleanup_index(Relation indrel, IndexBulkDelet ivinfo.message_level = elevel; ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples; ivinfo.strategy = vac_strategy; + ivinfo.invisibleParts = NULL; stats = index_vacuum_cleanup(&ivinfo, stats); if (stats != NULL) { ereport(elevel, @@ -2067,8 +2076,6 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats) while (blkno > vacrelstats->nonempty_pages) { Buffer buf; Page page; - OffsetNumber offnum, - maxoff; bool hastup = NULL; /* @@ -2084,13 +2091,24 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats) elapsed = currenttime; INSTR_TIME_SUBTRACT(elapsed, starttime); if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000) >= AUTOVACUUM_TRUNCATE_LOCK_CHECK_INTERVAL) { - if (LockHasWaitersRelation(onerel, AccessExclusiveLock)) { - ereport(elevel, - (errmsg("\"%s\": suspending truncate due to conflicting lock request", - RelationGetRelationName(onerel)))); + if (RelationIsPartition(onerel)) { + if (LockHasWaitersPartition(onerel, AccessExclusiveLock)) { + ereport(elevel, + (errmsg("\"%s\": suspending truncate due to conflicting lock request", + RelationGetRelationName(onerel)))); - vacrelstats->lock_waiter_detected = true; - return blkno; + vacrelstats->lock_waiter_detected = true; + return blkno; + } + } else { + if (LockHasWaitersRelation(onerel, AccessExclusiveLock)) { + ereport(elevel, + (errmsg("\"%s\": suspending truncate due to conflicting lock request", + RelationGetRelationName(onerel)))); + + vacrelstats->lock_waiter_detected = true; + return blkno; + } } starttime = currenttime; } @@ -2112,28 +2130,8 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats) page = BufferGetPage(buf); - if (PageIsNew(page) || PageIsEmpty(page)) { - /* PageIsNew probably shouldn't happen... */ - UnlockReleaseBuffer(buf); - continue; - } - - hastup = false; - maxoff = PageGetMaxOffsetNumber(page); - for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { - ItemId itemid = PageGetItemId(page, offnum); - - /* - * Note: any non-unused item should be taken as a reason to keep - * this page. We formerly thought that DEAD tuples could be - * thrown away, but that's not so, because we'd not have cleaned - * out their index entries. - */ - if (ItemIdIsUsed(itemid)) { - hastup = true; - break; /* can stop scanning */ - } - } /* scan along page */ + hastup = RelationIsUstoreFormat(onerel) ? UHeapPageCheckForUsedLinePointer(page, onerel) : + HeapPageCheckForUsedLinePointer(page); UnlockReleaseBuffer(buf); @@ -2351,45 +2349,13 @@ void elogVacuumInfo(Relation rel, HeapTuple tuple, char* funcName, TransactionId } } -/* - * ShouldAttemptTruncation - should we attempt to truncate the heap? - * - * Don't even think about it unless we have a shot at releasing a goodly - * number of pages. Otherwise, the time taken isn't worth it. - * - * Also don't attempt it if we are doing early pruning/vacuuming, because a - * scan which cannot find a truncated heap page cannot determine that the - * snapshot is too old to read that page. We might be able to get away with - * truncating all except one of the pages, setting its LSN to (at least) the - * maximum of the truncated range if we also treated an index leaf tuple - * pointing to a missing heap page as something to trigger the "snapshot too - * old" error, but that seems fragile and seems like it deserves its own patch - * if we consider it. - * - * This is split out so that we can test whether truncation is going to be - * called for before we actually do it. If you change the logic here, be - * careful to depend only on fields that lazy_scan_heap updates on-the-fly. - */ - -bool ShouldAttemptTruncation(const LVRelStats *vacrelstats) -{ - BlockNumber possiblyFreeable; - - possiblyFreeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages; - if (possiblyFreeable > 0 && (possiblyFreeable >= REL_TRUNCATE_MINIMUM || - possiblyFreeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)) - return true; - else - return false; -} - /* * Return true if there is any used line pointer on the page. * Assume the caller has atleast a shared lock */ static bool UHeapPageCheckForUsedLinePointer(Page page, Relation relation) { - if (PageIsNew(page) || UPageIsEmpty((UHeapPageHeaderData *)page, RelationGetInitTd(relation))) { + if (PageIsNew(page) || UPageIsEmpty((UHeapPageHeaderData *)page)) { return false; } @@ -2423,91 +2389,3 @@ static bool HeapPageCheckForUsedLinePointer(Page page) return false; } - -/* - * Rescan end pages to verify that they are (still) empty of tuples. - * - * Returns number of nondeletable pages (last nonempty page + 1). - */ -// static BlockNumber -BlockNumber CountNondeletablePages(Relation onerel, LVRelStats *vacrelstats) -{ - BlockNumber blkno; - instr_time starttime; - instr_time currenttime; - instr_time elapsed; - - /* Initialize the starttime if we check for conflicting lock requests */ - INSTR_TIME_SET_CURRENT(starttime); - - /* Strange coding of loop control is needed because blkno is unsigned */ - blkno = vacrelstats->rel_pages; - while (blkno > vacrelstats->nonempty_pages) { - Buffer buf; - Page page; - bool hastup = NULL; - - /* - * Check if another process requests a lock on our relation. We are - * holding an AccessExclusiveLock here, so they will be waiting. We - * only do this in autovacuum_truncate_lock_check millisecond - * intervals, and we only check if that interval has elapsed once - * every 32 blocks to keep the number of system calls and actual - * shared lock table lookups to a minimum. - */ - if ((blkno % 32) == 0) { - INSTR_TIME_SET_CURRENT(currenttime); - elapsed = currenttime; - INSTR_TIME_SUBTRACT(elapsed, starttime); - if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000) >= AUTOVACUUM_TRUNCATE_LOCK_CHECK_INTERVAL) { - if (LockHasWaitersRelation(onerel, AccessExclusiveLock)) { - ereport(elevel, (errmsg("\"%s\": suspending truncate " - "due to conflicting lock request", - RelationGetRelationName(onerel)))); - - vacrelstats->lock_waiter_detected = true; - return blkno; - } - starttime = currenttime; - } - } - - /* - * We don't insert a vacuum delay point here, because we have an - * exclusive lock on the table which we want to hold for as short a - * time as possible. We still need to check for interrupts however. - */ - CHECK_FOR_INTERRUPTS(); - - blkno--; - - buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno, RBM_NORMAL, vac_strategy); - - /* In this phase we only need shared access to the buffer */ - LockBuffer(buf, BUFFER_LOCK_SHARE); - - page = BufferGetPage(buf); - - /* - * Note: any non-unused item should be taken as a reason to keep - * this page. We formerly thought that DEAD tuples could be - * thrown away, but that's not so, because we'd not have cleaned - * out their index entries. - */ - hastup = RelationIsUstoreFormat(onerel) ? UHeapPageCheckForUsedLinePointer(page, onerel) : - HeapPageCheckForUsedLinePointer(page); - - UnlockReleaseBuffer(buf); - - /* Done scanning if we found a tuple here */ - if (hastup) - return blkno + 1; - } - - /* - * If we fall out of the loop, all the previously-thought-to-be-empty - * pages still are; we need not bother to look at the last known-nonempty - * page. - */ - return vacrelstats->nonempty_pages; -} diff --git a/src/gausskernel/optimizer/commands/verify.cpp b/src/gausskernel/optimizer/commands/verify.cpp index 9853c1392..748ab8e91 100644 --- a/src/gausskernel/optimizer/commands/verify.cpp +++ b/src/gausskernel/optimizer/commands/verify.cpp @@ -1281,6 +1281,16 @@ static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc) SMGR_READ_STATUS rdStatus = smgrread(src, forkNum, blkno, buf); /* check the page & crc */ if (rdStatus == SMGR_RD_CRC_ERROR) { + // Retry 5 times to increase program reliability. + for (int retryTimes = 1; retryTimes < FAIL_RETRY_MAX_NUM && rdStatus == SMGR_RD_CRC_ERROR; ++retryTimes) { + /* If we got a cancel signal during the copy of the data, quit */ + CHECK_FOR_INTERRUPTS(); + rdStatus = smgrread(src, forkNum, blkno, buf); + } + if (rdStatus != SMGR_RD_CRC_ERROR) { + continue; + } + isValidRelationPage = false; /* * check the cudesc table|cudesc-toast| cudesc_index. If one of them is damaged, we will have to @@ -1300,6 +1310,8 @@ static bool VerifyRowRelFast(Relation rel, VerifyDesc* checkCudesc) RelationGetRelationName(rel), relpathbackend(src->smgr_rnode.node, src->smgr_rnode.backend, forkNum)), handle_in_client(true))); + /* Add the wye page to the global variable and try to fix it. */ + addGlobalRepairBadBlockStat(src->smgr_rnode, forkNum, blkno); } } diff --git a/src/gausskernel/optimizer/commands/verifyrepair.cpp b/src/gausskernel/optimizer/commands/verifyrepair.cpp new file mode 100644 index 000000000..434099ece --- /dev/null +++ b/src/gausskernel/optimizer/commands/verifyrepair.cpp @@ -0,0 +1,1641 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * pagerepair.cpp + * verify and repair bad pages and files.. + * + * IDENTIFICATION + * src/gausskernel/optimizer/commands/verifyrepair.cpp + * + * ------------------------------------------------------------------------- + */ + +/* + * @Description: add statistics of bad block where read a bad page/cu + * @IN RelFileNodeBackend: RelFileNodeBackend for page/cu + * @IN forknum: forknum for page/cu + * @IN blocknum: blocknum for page/cu + */ + +#include "commands/verify.h" +#include "commands/copy.h" +#include "access/tableam.h" +#include "commands/tablespace.h" +#include "storage/smgr/fd.h" + +const int TIMEOUT_MIN = 60; +const int TIMEOUT_MAX = 3600; +static void checkUserPermission(); +static void checkInstanceType(); +static void checkSupUserOrOperaMode(); + +/* + * Record a statistics of bad block: + * read a bad page/cu --> addGlobalRepairBadBlockStat() record in global_repair_bad_block_stat + * + * Query statistics of bad block: + * local_bad_block_info() read the global_repair_bad_block_stat and output + * + * @Description: init process statistics of bad block hash table + */ +void initRepairBadBlockStat() +{ + HASHCTL info; + if (g_instance.repair_cxt.global_repair_bad_block_stat == NULL) { + /* hash accessed by database file id */ + errno_t rc = memset_s(&info, sizeof(info), 0, sizeof(info)); + securec_check(rc, "", ""); + info.keysize = sizeof(BadBlockKey); + info.entrysize = sizeof(BadBlockEntry); + info.hash = tag_hash; + info.hcxt = INSTANCE_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE); + g_instance.repair_cxt.global_repair_bad_block_stat = hash_create("Page Repair Hash Table", + MAX_REPAIR_PAGE_NUM, &info, + HASH_ELEM | HASH_FUNCTION |HASH_CONTEXT); + if (!g_instance.repair_cxt.global_repair_bad_block_stat) { + ereport(FATAL, (errcode(ERRCODE_INITIALIZE_FAILED), + (errmsg("could not initialize page repair Hash table")))); + } + } +} + + +/* BatchClearBadBlock + * clear global_repair_bad_block_stat hashtable entry when the relation drop or truncate. + */ +void BatchClearBadBlock(const RelFileNode rnode, ForkNumber forknum, BlockNumber startblkno) +{ + HASH_SEQ_STATUS status; + BadBlockEntry *entry = NULL; + bool found = false; + HTAB* bad_block_hash = g_instance.repair_cxt.global_repair_bad_block_stat; + + if (IsSegmentFileNode(rnode)) { + return; + } + + LWLockAcquire(RepairBadBlockStatHashLock, LW_EXCLUSIVE); + if (g_instance.repair_cxt.global_repair_bad_block_stat == NULL) { + LWLockRelease(RepairBadBlockStatHashLock); + return; + } + + hash_seq_init(&status, bad_block_hash); + while ((entry = (BadBlockEntry *)hash_seq_search(&status)) != NULL) { + if (RelFileNodeEquals(rnode, entry->key.relfilenode) && entry->key.forknum == forknum && + entry->key.blocknum >= startblkno) { + (void)hash_search(bad_block_hash, &(entry->key), HASH_REMOVE, &found); + } + } + + LWLockRelease(RepairBadBlockStatHashLock); +} + +bool BadBlockMatch(BadBlockEntry *entry, RelFileNode rnode, ForkNumber forknum, BlockNumber segno) +{ + if (IsSegmentFileNode(rnode)) { + Oid relNode = 0; + BlockNumber blknum = 0; + + if (entry->pblk.relNode != EXTENT_INVALID) { + relNode = entry->pblk.relNode; + blknum = entry->pblk.block; + } else { + SegPageLocation loc = + seg_get_physical_location(entry->key.relfilenode, entry->key.forknum, entry->key.blocknum); + entry->pblk.relNode = relNode = (uint8) EXTENT_SIZE_TO_TYPE(loc.extent_size); + entry->pblk.block = blknum = loc.blocknum; + } + + if (relNode == rnode.relNode && entry->key.relfilenode.spcNode == rnode.spcNode && + entry->key.relfilenode.dbNode == rnode.dbNode && entry->key.forknum == forknum && + blknum >= segno * RELSEG_SIZE && blknum < (segno + 1) * RELSEG_SIZE) { + return true; + } + } else { + if (RelFileNodeEquals(rnode, entry->key.relfilenode) && entry->key.forknum == forknum && + entry->key.blocknum >= segno * RELSEG_SIZE && entry->key.blocknum < (segno + 1) * RELSEG_SIZE) { + return true; + } + } + return false; +} + +/* BatchUpdateRepairTime + * update global_repair_bad_block_stat hashtable repair time, when the file repair finish. + */ +void BatchUpdateRepairTime(RelFileNode rnode, ForkNumber forknum, BlockNumber segno) +{ + HASH_SEQ_STATUS status; + BadBlockEntry *entry = NULL; + HTAB* bad_block_hash = g_instance.repair_cxt.global_repair_bad_block_stat; + + LWLockAcquire(RepairBadBlockStatHashLock, LW_EXCLUSIVE); + if (g_instance.repair_cxt.global_repair_bad_block_stat == NULL) { + LWLockRelease(RepairBadBlockStatHashLock); + return; + } + + hash_seq_init(&status, bad_block_hash); + while ((entry = (BadBlockEntry *)hash_seq_search(&status)) != NULL) { + if (BadBlockMatch(entry, rnode, forknum, segno)) { + entry->repair_time = GetCurrentTimestamp(); + } + } + + LWLockRelease(RepairBadBlockStatHashLock); +} + +void UpdateRepairTime(const RelFileNode &rnode, ForkNumber forknum, BlockNumber blocknum) +{ + bool found = false; + BadBlockKey key; + + key.relfilenode.spcNode = rnode.spcNode; + key.relfilenode.dbNode = rnode.dbNode; + key.relfilenode.relNode = rnode.relNode; + key.relfilenode.bucketNode = rnode.bucketNode; + key.forknum = forknum; + key.blocknum = blocknum; + + Assert(g_instance.repair_cxt.global_repair_bad_block_stat != NULL); + LWLockAcquire(RepairBadBlockStatHashLock, LW_EXCLUSIVE); + if (g_instance.repair_cxt.global_repair_bad_block_stat == NULL) { + LWLockRelease(RepairBadBlockStatHashLock); + return; + } + /* insert if not find, if out of memory return NULL */ + BadBlockEntry* entry = + (BadBlockEntry*)hash_search(g_instance.repair_cxt.global_repair_bad_block_stat, &key, HASH_ENTER, &found); + + if (entry != NULL) { + if (!found) { + // Update the check time when the first insertion is performed. + char* path = relpathperm(key.relfilenode, key.forknum); + errno_t rc = snprintf_s(entry->path, MAX_PATH, MAX_PATH - 1, "%s", path); + securec_check_ss(rc, "\0", "\0"); + pfree(path); + entry->check_time = GetCurrentTimestamp(); + entry->repair_time = GetCurrentTimestamp(); + entry->key = key; + } else { + entry->repair_time = GetCurrentTimestamp(); + } + } + LWLockRelease(RepairBadBlockStatHashLock); +} + +void addGlobalRepairBadBlockStat(const RelFileNodeBackend &rnode, ForkNumber forknum, BlockNumber blocknum) +{ + TimestampTz check_time = GetCurrentTimestamp(); + BadBlockKey key; + errno_t rc = 0; + bool found = false; + + key.relfilenode.spcNode = rnode.node.spcNode; + key.relfilenode.dbNode = rnode.node.dbNode; + key.relfilenode.relNode = rnode.node.relNode; + key.relfilenode.bucketNode = rnode.node.bucketNode; + key.forknum = forknum; + key.blocknum = blocknum; + + Assert(g_instance.repair_cxt.global_repair_bad_block_stat != NULL); + LWLockAcquire(RepairBadBlockStatHashLock, LW_EXCLUSIVE); + if (g_instance.repair_cxt.global_repair_bad_block_stat == NULL) { + LWLockRelease(RepairBadBlockStatHashLock); + return; + } + + /* insert if not find, if out of memory return NULL */ + BadBlockEntry* entry = + (BadBlockEntry*)hash_search(g_instance.repair_cxt.global_repair_bad_block_stat, &key, HASH_ENTER, &found); + + if (entry != NULL) { + if (!found) { + // Update the check time when the first insertion is performed. + char* path = relpathperm(key.relfilenode, key.forknum); + rc = snprintf_s(entry->path, MAX_PATH, MAX_PATH - 1, "%s", path); + securec_check_ss(rc, "\0", "\0"); + pfree(path); + entry->check_time = check_time; + entry->repair_time = -1; + entry->key = key; + entry->pblk.relNode = EXTENT_INVALID; + entry->pblk.block = InvalidBlockNumber; + } + } + LWLockRelease(RepairBadBlockStatHashLock); +} + + +bool CheckSum(const PageHeader page, BlockNumber blockNum) +{ + bool checksum_matched = false; + if (CheckPageZeroCases(page)) { + uint16 checksum = pg_checksum_page((char*)page, (BlockNumber)blockNum); + checksum_matched = (checksum == page->pd_checksum); + } + return checksum_matched; +} + +// only check page is in memory or not, not read it +Buffer PageIsInMemory(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum) +{ + Buffer buf = InvalidBuffer; + int buf_id = 0; + BufferDesc *bufDesc = NULL; + BufferTag new_tag; + + INIT_BUFFERTAG(new_tag, smgr->smgr_rnode.node, forkNum, blockNum); + uint32 new_hash = BufTableHashCode(&new_tag); + LWLock *new_partition_lock = BufMappingPartitionLock(new_hash); + /* see if the block is in the buffer pool already */ + (void)LWLockAcquire(new_partition_lock, LW_SHARED); + buf_id = BufTableLookup(&new_tag, new_hash); + if (buf_id != -1) { + ResourceOwnerEnlargeBuffers(t_thrd.utils_cxt.CurrentResourceOwner); + bufDesc = GetBufferDescriptor(buf_id); + buf = BufferDescriptorGetBuffer(bufDesc); + if (!PinBuffer(bufDesc, NULL)) { + buf = InvalidBuffer; + } + } + LWLockRelease(new_partition_lock); + + return buf; +} + + +// init RelFileNode and outputFilename +void PrepForRead(char* path, int64 blocknum, bool is_segment, RelFileNode *relnode) +{ + char* pathFirstpart = (char*)palloc0(MAXFNAMELEN); + errno_t rc = 0; + bool flag = false; + + RelFileNodeForkNum relfilenode; + if (strlen(pathFirstpart) == 0) { + relfilenode = relpath_to_filenode(path); + } else { + relfilenode = relpath_to_filenode(pathFirstpart); + } + if (relfilenode.forknumber != MAIN_FORKNUM) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("Error forknum is: %d", relfilenode.forknumber)))); + } + if (is_segment) { + relfilenode.rnode.node.bucketNode = SegmentBktId; + // base/16604/4161_b10426 + char* bucketNodestr = strstr(path, "_b"); + if (NULL != bucketNodestr) { + bucketNodestr += 2; /* delete first two chars: _b */ + flag = StrToInt32(bucketNodestr, &(relfilenode.rnode.node.bucketNode)); + if (!flag) { + ereport(ERROR, (errmsg("Can not covert %s to int32 type. \n", bucketNodestr))); + } + rc = strncpy_s(pathFirstpart, MAXFNAMELEN, path, strlen(path) - strlen(bucketNodestr)); + securec_check(rc, "\0", "\0"); + } + if (!IsSegmentPhysicalRelNode(relfilenode.rnode.node)) { + SMgrRelation reln = smgropen(relfilenode.rnode.node, InvalidBackendId); + bool exist = seg_exists(reln, MAIN_FORKNUM, blocknum); + bool found = false; + if (!(exist && reln->seg_desc[MAIN_FORKNUM] != NULL)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Error parameter to get smgr relation."))); + } + (void)SegBufferAlloc(reln->seg_space, relfilenode.rnode.node, MAIN_FORKNUM, blocknum, &found); + if (!found) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Error parameter to find buffer"))); + } + } + } else { + relfilenode.rnode.node.bucketNode = InvalidBktId; + } + RelFileNodeCopy(*relnode, relfilenode.rnode.node, relfilenode.rnode.node.bucketNode); + pfree(pathFirstpart); +} + +bool tryRepairPage(int blocknum, bool is_segment, RelFileNode *relnode, int timeout) +{ + char *buf = (char*)palloc0(BLCKSZ); + XLogPhyBlock pblk; + ForkNumber forknum = MAIN_FORKNUM; + RelFileNode logicalRelNode = {0}; + int logicalBlocknum = 0; + + SMgrRelation smgr = smgropen(*relnode, InvalidBackendId, GetColumnNum(forknum)); + + RelFileNodeBackend relnodeBack; + relnodeBack.node = *relnode; + relnodeBack.backend = InvalidBackendId; + bool isSegmentPhysical = is_segment && IsSegmentPhysicalRelNode(*relnode); + + if (is_segment && !isSegmentPhysical) { + SegPageLocation loc = + seg_get_physical_location(*relnode, forknum, blocknum); + pblk = { + .relNode = (uint8) EXTENT_SIZE_TO_TYPE(loc.extent_size), + .block = loc.blocknum, + .lsn = InvalidXLogRecPtr + }; + logicalRelNode.relNode = (uint8) EXTENT_SIZE_TO_TYPE(loc.extent_size); + logicalRelNode.spcNode = relnode->spcNode; + logicalRelNode.dbNode = relnode->dbNode; + logicalRelNode.bucketNode = SegmentBktId; + logicalBlocknum = loc.blocknum; + } + + // to repair page + if (is_segment && !isSegmentPhysical) { + RemoteReadBlock(relnodeBack, forknum, blocknum, buf, &pblk, timeout); + } else { + RemoteReadBlock(relnodeBack, forknum, blocknum, buf, NULL, timeout); + } + + if (PageIsVerified((Page)buf, blocknum)) { + if (is_segment) { + SegSpace* spc = spc_open(relnode->spcNode, relnode->dbNode, false, false); + if (!spc) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("Spc open failed. spcNode is: %u, dbNode is %u", + relnode->spcNode, relnode->dbNode)))); + } + if (isSegmentPhysical) { + seg_physical_write(spc, *relnode, forknum, blocknum, buf, true); + } else { + seg_physical_write(spc, logicalRelNode, forknum, logicalBlocknum, buf, true); + } + } else { + smgrwrite(smgr, forknum, blocknum, buf, true); + } + BadBlockKey key; + + key.relfilenode.spcNode = relnode->spcNode; + key.relfilenode.dbNode = relnode->dbNode; + key.relfilenode.relNode = relnode->relNode; + key.relfilenode.bucketNode = relnode->bucketNode; + key.forknum = forknum; + key.blocknum = blocknum; + + Assert(g_instance.repair_cxt.global_repair_bad_block_stat != NULL); + + LWLockAcquire(RepairBadBlockStatHashLock, LW_EXCLUSIVE); + bool found = false; + BadBlockEntry* entry = + (BadBlockEntry*)hash_search(g_instance.repair_cxt.global_repair_bad_block_stat, &key, + HASH_ENTER, &found); + + TimestampTz currentTime = GetCurrentTimestamp(); + entry->repair_time = currentTime; + if (!found) { + char* path = relpathperm(key.relfilenode, key.forknum); + errno_t rc = snprintf_s(entry->path, MAX_PATH, MAX_PATH - 1, "%s", path); + securec_check_ss(rc, "\0", "\0"); + pfree(path); + entry->check_time = currentTime; + entry->pblk.relNode = EXTENT_INVALID; + entry->pblk.block = InvalidBlockNumber; + } + LWLockRelease(RepairBadBlockStatHashLock); + return true; + } else { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("remote get page check error"))); + return false; + } +} + +bool repairPage(char* path, uint blocknum, bool is_segment, int timeout) +{ + // check parameters + if (blocknum > MaxBlockNumber) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("Blocknum should be between 0 and %u. \n", MaxBlockNumber)))); + + RelFileNode relnode = {0}; + + if (timeout < TIMEOUT_MIN || timeout > TIMEOUT_MAX) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("The timeout(%d) is an incorrect input. Value range: [60, 3600]. \n", timeout)))); + return false; + } + t_thrd.storage_cxt.timeoutRemoteOpera = timeout; + + PrepForRead((char*)path, blocknum, is_segment, &relnode); + + return tryRepairPage(blocknum, is_segment, &relnode, timeout); +} + +Datum local_bad_block_info(PG_FUNCTION_ARGS) +{ + checkUserPermission(); +#define BAD_BLOCK_STAT_NATTS 10 + FuncCallContext* funcctx = NULL; + HASH_SEQ_STATUS* hash_seq = NULL; + + LWLockAcquire(RepairBadBlockStatHashLock, LW_SHARED); + + if (SRF_IS_FIRSTCALL()) { + TupleDesc tupdesc = NULL; + MemoryContext oldcontext = NULL; + int i = 1; + + /* create a function context for cross-call persistence */ + funcctx = SRF_FIRSTCALL_INIT(); + /* + * switch to memory context appropriate for multiple function calls + */ + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + /* build tupdesc for result tuples */ + tupdesc = CreateTemplateTupleDesc(BAD_BLOCK_STAT_NATTS, false); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "node_name", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "spc_node", OIDOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "db_node", OIDOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "rel_node", OIDOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "bucket_node", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "fork_num", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "block_num", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "file_path", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "check_time", TIMESTAMPTZOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "repair_time", TIMESTAMPTZOID, -1, 0); + + funcctx->tuple_desc = BlessTupleDesc(tupdesc); + + if (g_instance.repair_cxt.global_repair_bad_block_stat) { + hash_seq = (HASH_SEQ_STATUS*)palloc0(sizeof(HASH_SEQ_STATUS)); + hash_seq_init(hash_seq, g_instance.repair_cxt.global_repair_bad_block_stat); + } else { + (void)MemoryContextSwitchTo(oldcontext); + LWLockRelease(RepairBadBlockStatHashLock); + SRF_RETURN_DONE(funcctx); + } + + funcctx->user_fctx = (void*)hash_seq; + + (void)MemoryContextSwitchTo(oldcontext); + } + /* stuff done on every call of the function */ + funcctx = SRF_PERCALL_SETUP(); + if (funcctx->user_fctx != NULL) { + hash_seq = (HASH_SEQ_STATUS*)funcctx->user_fctx; + BadBlockEntry* badblock_entry = (BadBlockEntry*)hash_seq_search(hash_seq); + + if (badblock_entry != NULL) { + Datum values[BAD_BLOCK_STAT_NATTS]; + bool nulls[BAD_BLOCK_STAT_NATTS]; + HeapTuple tuple = NULL; + + errno_t rc = memset_s(nulls, sizeof(nulls), 0, sizeof(nulls)); + securec_check(rc, "\0", "\0"); + + int i = 0; + + values[i++] = CStringGetTextDatum(g_instance.attr.attr_common.PGXCNodeName); + values[i++] = UInt32GetDatum(badblock_entry->key.relfilenode.spcNode); + values[i++] = UInt32GetDatum(badblock_entry->key.relfilenode.dbNode); + values[i++] = UInt32GetDatum(badblock_entry->key.relfilenode.relNode); + values[i++] = Int32GetDatum(badblock_entry->key.relfilenode.bucketNode); + values[i++] = Int32GetDatum(badblock_entry->key.forknum); + values[i++] = UInt32GetDatum(badblock_entry->key.blocknum); + values[i++] = CStringGetTextDatum(badblock_entry->path); + values[i++] = TimestampTzGetDatum(badblock_entry->check_time); + if (badblock_entry->repair_time == -1) { + nulls[i++] = true; + } else { + values[i++] = TimestampTzGetDatum(badblock_entry->repair_time); + } + + tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + + LWLockRelease(RepairBadBlockStatHashLock); + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); + } else { + LWLockRelease(RepairBadBlockStatHashLock); + SRF_RETURN_DONE(funcctx); + } + } else { + LWLockRelease(RepairBadBlockStatHashLock); + SRF_RETURN_DONE(funcctx); + } +} + +Datum local_clear_bad_block_info(PG_FUNCTION_ARGS) +{ + checkUserPermission(); + HASH_SEQ_STATUS hash_seq; + BadBlockEntry* tempEntry = NULL; + bool found = false; + uint32 no_repair_num = 0; + + LWLockAcquire(RepairBadBlockStatHashLock, LW_EXCLUSIVE); + + if (g_instance.repair_cxt.global_repair_bad_block_stat) { + hash_seq_init(&hash_seq, g_instance.repair_cxt.global_repair_bad_block_stat); + } else { + LWLockRelease(RepairBadBlockStatHashLock); + PG_RETURN_BOOL(false); + } + + while ((tempEntry = (BadBlockEntry*)hash_seq_search(&hash_seq)) != NULL) { + if (tempEntry->repair_time != -1) { + hash_search(g_instance.repair_cxt.global_repair_bad_block_stat, tempEntry, HASH_REMOVE, &found); + } else { + no_repair_num++; + } + } + + LWLockRelease(RepairBadBlockStatHashLock); + + PG_RETURN_BOOL(true); +} + +Datum gs_repair_page(PG_FUNCTION_ARGS) +{ + checkInstanceType(); + checkSupUserOrOperaMode(); + // read in parameters + char* path = text_to_cstring(PG_GETARG_TEXT_P(0)); + uint32 blockNum = PG_GETARG_UINT32(1); + bool is_segment = PG_GETARG_BOOL(2); + int32 timeout = PG_GETARG_INT32(3); + + bool result = repairPage(path, blockNum, is_segment, timeout); + PG_RETURN_BOOL(result); +} + +/* check whether the input path is a legal path */ +bool CheckRelDataFilePath(const char* path) +{ + const char *danger_character_list[] = {"|", ";", "&", "$", "<", ">", "`", "\\", "'", "\"", "{", + "}", "(", ")", "[", "]", "~", "*", "?", "!", "\n", " ", NULL}; + for (int i = 0; danger_character_list[i] != NULL; i++) { + if (strstr(path, danger_character_list[i]) != NULL) { + return false; + } + } + + return true; +} + +Datum gs_repair_file(PG_FUNCTION_ARGS) +{ + checkInstanceType(); + checkSupUserOrOperaMode(); + Oid tableOid = PG_GETARG_UINT32(0); + char* path = text_to_cstring(PG_GETARG_TEXT_P(1)); + int32 timeout = PG_GETARG_INT32(2); + + if (!CheckRelDataFilePath(path)) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("The input path(%s) is an incorrect relation file path input. \n", path)))); + return false; + } + + if (timeout < TIMEOUT_MIN || timeout > TIMEOUT_MAX) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("The timeout(%d) is an incorrect input. Value range: [60, 3600]. \n", timeout)))); + return false; + } + t_thrd.storage_cxt.timeoutRemoteOpera = timeout; + + bool result = gsRepairFile(tableOid, path, timeout); + PG_RETURN_BOOL(result); +} + +void gs_verify_page_by_disk(SMgrRelation smgr, ForkNumber forkNum, int blockNum, char* disk_page_res) +{ + char* buffer = (char*)palloc0(BLCKSZ); + errno_t rc = 0; + SMGR_READ_STATUS rdStatus = smgrread(smgr, forkNum, blockNum, buffer); + if (rdStatus == SMGR_RD_CRC_ERROR) { + uint16 checksum = pg_checksum_page((char*)buffer, blockNum); + PageHeader pghr = (PageHeader)buffer; + rc = snprintf_s(disk_page_res, ERR_MSG_LEN, ERR_MSG_LEN - 1, + "page verification failed, calculated checksum %hu but expected %hu.", + checksum, pghr->pd_checksum); + securec_check_ss(rc, "\0", "\0"); + addGlobalRepairBadBlockStat(smgr->smgr_rnode, forkNum, blockNum); + } else if (rdStatus == SMGR_RD_NO_BLOCK) { + rc = snprintf_s(disk_page_res, ERR_MSG_LEN, ERR_MSG_LEN - 1, + "The page does not exist."); + securec_check_ss(rc, "\0", "\0"); + } else if (rdStatus == SMGR_RD_OK) { + rc = snprintf_s(disk_page_res, ERR_MSG_LEN, ERR_MSG_LEN - 1, + "page verification succeeded."); + securec_check_ss(rc, "\0", "\0"); + } else { + rc = snprintf_s(disk_page_res, ERR_MSG_LEN, ERR_MSG_LEN - 1, + "Unrecognized Error."); + securec_check_ss(rc, "\0", "\0"); + } + pfree(buffer); +} + +void splicMemPageMsg(bool isPageValid, bool isDirty, char* mem_page_res) +{ + errno_t rc = 0; + if (!isPageValid) { + rc = snprintf_s(mem_page_res, ERR_MSG_LEN, ERR_MSG_LEN - 1, + "page in memory, page verification failed, calculated checksum is error."); + securec_check_ss(rc, "\0", "\0"); + } else if (isDirty) { + rc = snprintf_s(mem_page_res, ERR_MSG_LEN, ERR_MSG_LEN - 1, + "page is dirty, page verification succeeded."); + securec_check_ss(rc, "\0", "\0"); + } else { + rc = snprintf_s(mem_page_res, ERR_MSG_LEN, ERR_MSG_LEN - 1, + "page is not dirty, page verification succeeded."); + securec_check_ss(rc, "\0", "\0"); + } +} + +bool isNeedRepairPageByMem(char* disk_page_res, int blockNum, char* mem_page_res, + XLogPhyBlock *pblk, RelFileNode relnode) +{ + bool found = true; + bool need_repair = false; + bool isDirty = false; + bool isPageValid = false; + BufferDesc* buf_desc = NULL; + char* buffer = (char*)palloc0(BLCKSZ); + bool is_repair = false; + errno_t rc = 0; + SMGR_READ_STATUS rdStatus = SMGR_RD_CRC_ERROR; + SegSpace *spc = NULL; + + SMgrRelation smgr = smgropen(relnode, InvalidBackendId, GetColumnNum(MAIN_FORKNUM)); + /* check memory buffer */ + Buffer buf = PageIsInMemory(smgr, MAIN_FORKNUM, blockNum); + if (BufferIsInvalid(buf)) { + found = false; + rc = snprintf_s(mem_page_res, ERR_MSG_LEN, ERR_MSG_LEN - 1, "page not in memory"); + securec_check_ss(rc, "\0", "\0"); + } else { + buf_desc = GetBufferDescriptor(buf - 1); + uint32 old_buf_state = LockBufHdr(buf_desc); + isDirty = old_buf_state & BM_DIRTY; + UnlockBufHdr(buf_desc, old_buf_state); + Page page = BufferGetPage(buf); + + isPageValid = (PageGetPageLayoutVersion(page) == PG_UHEAP_PAGE_LAYOUT_VERSION) ? + UPageHeaderIsValid((UHeapPageHeaderData *) page) : + PageHeaderIsValid((PageHeader)page); + splicMemPageMsg(isPageValid, isDirty, mem_page_res); + } + + if (IsSegmentPhysicalRelNode(relnode)) { + spc = spc_open(relnode.spcNode, relnode.dbNode, false, false); + if (!spc) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("Spc open failed. spcNode is: %u, dbNode is %u", + relnode.spcNode, relnode.dbNode)))); + } + seg_physical_read(spc, relnode, MAIN_FORKNUM, blockNum, buffer); + if (PageIsVerified(buffer, blockNum)) { + rdStatus = SMGR_RD_OK; + } else { + rdStatus = SMGR_RD_CRC_ERROR; + } + } else { + rdStatus = smgrread(smgr, MAIN_FORKNUM, blockNum, buffer); + } + + if (rdStatus == SMGR_RD_OK) { + rc = snprintf_s(disk_page_res, ERR_MSG_LEN, ERR_MSG_LEN - 1, "page verification succeeded."); + securec_check_ss(rc, "\0", "\0"); + } else { + rc = snprintf_s(disk_page_res, ERR_MSG_LEN, ERR_MSG_LEN - 1, + "page verification failed, calculated checksum is error."); + securec_check_ss(rc, "\0", "\0"); + need_repair = true; + } + + if (!found && need_repair) { + if (IsSegmentPhysicalRelNode(relnode)) { + const int TIMEOUT = 1200; + is_repair = tryRepairPage(blockNum, true, &relnode, TIMEOUT); + } else { + buf = ReadBufferWithoutRelcache(relnode, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL, pblk); + is_repair = true; + UpdateRepairTime(relnode, MAIN_FORKNUM, blockNum); + } + } else if (found && need_repair && isPageValid) { + buf_desc = GetBufferDescriptor(buf - 1); + if (!isDirty) { + LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); + MarkBufferDirty(buf); + LockBuffer(buf, BUFFER_LOCK_UNLOCK); + } + LockBuffer(buf, BUFFER_LOCK_SHARE); + FlushBuffer(buf_desc, NULL, WITH_NORMAL_CACHE, true); + LockBuffer(buf, BUFFER_LOCK_UNLOCK); + is_repair = true; + UpdateRepairTime(relnode, MAIN_FORKNUM, blockNum); + } + + if (!BufferIsInvalid(buf)) { + buf_desc = GetBufferDescriptor(buf - 1); + UnpinBuffer(buf_desc, true); + } + pfree(buffer); + return is_repair; +} + +Datum gs_verify_and_tryrepair_page(PG_FUNCTION_ARGS) +{ +#define REPAIR_BLOCK_STAT_NATTS 6 + checkInstanceType(); + checkSupUserOrOperaMode(); + /* read in parameters */ + char* path = text_to_cstring(PG_GETARG_TEXT_P(0)); + uint32 blockNum = PG_GETARG_UINT32(1); + bool verify_mem = PG_GETARG_BOOL(2); + bool is_segment = PG_GETARG_BOOL(3); + errno_t rc = 0; + TupleDesc tupdesc = NULL; + Datum values[6]; + bool nulls[6] = {false}; + HeapTuple tuple = NULL; + int i = 0; + char* disk_page_res = (char*)palloc0(ERR_MSG_LEN); + char* mem_page_res = (char*)palloc0(ERR_MSG_LEN); + bool is_repair = false; + int j = 1; + XLogPhyBlock pblk = {0, 0, 0}; + + /* build tupdesc for result tuples */ + tupdesc = CreateTemplateTupleDesc(REPAIR_BLOCK_STAT_NATTS, false); + TupleDescInitEntry(tupdesc, (AttrNumber)j++, "node_name", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)j++, "path", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)j++, "blocknum", OIDOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)j++, "disk_page_res", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)j++, "mem_page_res", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)j++, "is_repair", BOOLOID, -1, 0); + tupdesc = BlessTupleDesc(tupdesc); + + rc = memset_s(nulls, sizeof(nulls), 0, sizeof(nulls)); + securec_check(rc, "\0", "\0"); + + /* check parameters */ + if (blockNum > MaxBlockNumber) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("Blocknum should be between 0 and %u. \n", MaxBlockNumber)))); + + RelFileNode relnode = {0, 0, 0, -1}; + + PrepForRead((char*)path, blockNum, is_segment, &relnode); + bool isSegmentPhysical = is_segment && IsSegmentPhysicalRelNode(relnode); + + if (is_segment && !isSegmentPhysical) { + SegPageLocation loc = seg_get_physical_location(relnode, MAIN_FORKNUM, blockNum); + pblk.relNode = (uint8) EXTENT_SIZE_TO_TYPE(loc.extent_size); + pblk.block = loc.blocknum; + } + + SMgrRelation smgr = smgropen(relnode, InvalidBackendId, GetColumnNum(MAIN_FORKNUM)); + + if (!verify_mem && !IsSegmentPhysicalRelNode(relnode)) { // only check disk + gs_verify_page_by_disk(smgr, MAIN_FORKNUM, blockNum, disk_page_res); + } else { + if (is_segment && !isSegmentPhysical) { + is_repair = isNeedRepairPageByMem(disk_page_res, blockNum, mem_page_res, &pblk, relnode); + } else { + is_repair = isNeedRepairPageByMem(disk_page_res, blockNum, mem_page_res, NULL, relnode); + } + } + + values[i++] = CStringGetTextDatum(g_instance.attr.attr_common.PGXCNodeName); + values[i++] = CStringGetTextDatum(path); + values[i++] = UInt32GetDatum(blockNum); + values[i++] = CStringGetTextDatum(disk_page_res); + if (verify_mem) { + values[i++] = CStringGetTextDatum(mem_page_res); + } else { + nulls[i++] = true; /* memory res is null */ + } + values[i++] = BoolGetDatum(is_repair); + + tuple = heap_form_tuple(tupdesc, values, nulls); + pfree(disk_page_res); + pfree(mem_page_res); + PG_RETURN_DATUM(HeapTupleGetDatum(tuple)); +} + +/* + * Read block from buffer from primary, returning it as bytea + */ +Datum gs_read_segment_block_from_remote(PG_FUNCTION_ARGS) +{ + if (GetUserId() != BOOTSTRAP_SUPERUSERID) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("must be initial account to read files")))); + } + bytea* result = NULL; + + /* handle optional arguments */ + uint32 spcNode = PG_GETARG_UINT32(0); + uint32 dbNode = PG_GETARG_UINT32(1); + uint32 relNode = PG_GETARG_UINT32(2); + int16 bucketNode = PG_GETARG_INT16(3); + int32 forkNum = PG_GETARG_INT32(4); + uint64 blockNum = (uint64)PG_GETARG_TRANSACTIONID(5); + uint32 blockSize = PG_GETARG_UINT32(6); + uint64 lsn = (uint64)PG_GETARG_TRANSACTIONID(7); + uint32 seg_relNode = PG_GETARG_UINT32(8); + uint32 seg_block = PG_GETARG_UINT32(9); + int32 timeout = PG_GETARG_INT32(10); + + XLogPhyBlock pblk = { + .relNode = seg_relNode, + .block = seg_block, + .lsn = InvalidXLogRecPtr + }; + + RepairBlockKey key; + key.relfilenode.spcNode = spcNode; + key.relfilenode.dbNode = dbNode; + key.relfilenode.relNode = relNode; + key.relfilenode.bucketNode = bucketNode; + key.forknum = forkNum; + key.blocknum = blockNum; + + (void)StandbyReadPageforPrimary(key, blockSize, lsn, &result, timeout, &pblk); + + if (NULL != result) { + PG_RETURN_BYTEA_P(result); + } else { + PG_RETURN_NULL(); + } +} + +Datum gs_verify_data_file(PG_FUNCTION_ARGS) +{ + checkSupUserOrOperaMode(); +#define VERIFY_DATA_FILE_NATTS 4 + // read in parameters + FuncCallContext* funcctx = NULL; + bool is_segment = PG_GETARG_BOOL(0); + List *badFileItems = NIL; + bool isNull = false; + + if (SRF_IS_FIRSTCALL()) { + TupleDesc tupdesc = NULL; + HeapTuple classTup; + MemoryContext oldcontext = NULL; + int i = 1; + + /* create a function context for cross-call persistence */ + funcctx = SRF_FIRSTCALL_INIT(); + /* + * switch to memory context appropriate for multiple function calls + */ + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + /* build tupdesc for result tuples */ + tupdesc = CreateTemplateTupleDesc(VERIFY_DATA_FILE_NATTS, false); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "node_name", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "rel_oid", OIDOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "rel_name", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)i++, "miss_file_path", TEXTOID, -1, 0); + + funcctx->tuple_desc = BlessTupleDesc(tupdesc); + + Relation relation = heap_open(RelationRelationId, AccessShareLock); + SysScanDesc scan = systable_beginscan(relation, InvalidOid, false, NULL, 0, NULL); + List *spcList = NIL; + while ((classTup = systable_getnext(scan)) != NULL) { + Form_pg_class classForm = (Form_pg_class)GETSTRUCT(classTup); + Datum bucketdatum = tableam_tops_tuple_getattr(classTup, Anum_pg_class_relbucket, + RelationGetDescr(relation), &isNull); + Oid bucketOid = ObjectIdGetDatum(bucketdatum); + if ((classForm->relkind != RELKIND_RELATION && + classForm->relkind != RELKIND_TOASTVALUE) || + (classForm->relpersistence != RELPERSISTENCE_PERMANENT && + classForm->relpersistence != RELPERSISTENCE_UNLOGGED) || + ((bucketOid <= 0) && is_segment) || + ((bucketOid > 0) && !is_segment)) { + continue; + } + + Oid relOid = 0; + Datum oiddatum = tableam_tops_tuple_getattr(classTup, ObjectIdAttributeNumber, + RelationGetDescr(relation), &isNull); + relOid = ObjectIdGetDatum(oiddatum); + Relation tableRel = heap_open(relOid, AccessShareLock); + + if (is_segment) { + spcList = appendIfNot(spcList, ConvertToRelfilenodeTblspcOid(classForm->reltablespace)); + } else { + badFileItems = getNonSegmentBadFiles(badFileItems, relOid, classForm, tableRel); + } + heap_close(tableRel, AccessShareLock); + } + systable_endscan(scan); + heap_close(relation, AccessShareLock); + if (spcList != NIL) { + badFileItems = getSegmentBadFiles(spcList, badFileItems); + } + funcctx->user_fctx = (void*)badFileItems; + (void)MemoryContextSwitchTo(oldcontext); + } + /* stuff done on every call of the function */ + funcctx = SRF_PERCALL_SETUP(); + if (funcctx->user_fctx != NULL) { + badFileItems = (List*)funcctx->user_fctx; + ListCell* badFileItemCell = list_head(badFileItems); + BadFileItem* badFileItem = (BadFileItem*)lfirst(badFileItemCell); + Datum values[VERIFY_DATA_FILE_NATTS]; + bool nulls[VERIFY_DATA_FILE_NATTS] = {false}; + HeapTuple tuple = NULL; + int i = 0; + errno_t rc = memset_s(nulls, sizeof(nulls), 0, sizeof(nulls)); + securec_check(rc, "\0", "\0"); + + values[i++] = CStringGetTextDatum(g_instance.attr.attr_common.PGXCNodeName); + values[i++] = UInt32GetDatum(badFileItem->reloid); + values[i++] = CStringGetTextDatum(badFileItem->relname.data); + values[i++] = CStringGetTextDatum(badFileItem->relfilepath); + + badFileItems = list_delete_first(badFileItems); + funcctx->user_fctx = (void*)badFileItems; + tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); + } + SRF_RETURN_DONE(funcctx); +} + +List* getNonSegmentBadFiles(List* badFileItems, Oid relOid, Form_pg_class classForm, Relation tableRel) +{ + if (classForm->parttype == PARTTYPE_PARTITIONED_RELATION || + classForm->parttype == PARTTYPE_SUBPARTITIONED_RELATION) { + badFileItems = getPartitionBadFiles(tableRel, badFileItems, relOid); + } else { + badFileItems = getTableBadFiles(badFileItems, relOid, classForm, tableRel); + } + return badFileItems; +} + +List* getPartitionBadFiles(Relation tableRel, List* badFileItems, Oid relOid) +{ + HeapTuple partitionTup; + Relation prelation = heap_open(PartitionRelationId, AccessShareLock); + SysScanDesc pscan = systable_beginscan(prelation, InvalidOid, false, NULL, 0, NULL); + while ((partitionTup = systable_getnext(pscan)) != NULL) { + Form_pg_partition partitionForm = (Form_pg_partition)GETSTRUCT(partitionTup); + RelFileNode prnode = {0}; + Oid prelOid = 0; + bool isNull = false; + int maxSegno = 0; + + Datum poiddatum = tableam_tops_tuple_getattr(partitionTup, ObjectIdAttributeNumber, + RelationGetDescr(prelation), &isNull); + prelOid = ObjectIdGetDatum(poiddatum); + + if (partitionForm->relfilenode == 0 && partitionForm->parentid == relOid && + partitionForm->parttype == PARTTYPE_PARTITIONED_RELATION) { + badFileItems = getPartitionBadFiles(tableRel, badFileItems, prelOid); + continue; + } else if (partitionForm->relfilenode == 0 || partitionForm->parentid != relOid) { + continue; + } + + Partition ptmpRel = partitionOpen(tableRel, prelOid, AccessShareLock, InvalidBktId); + prnode = ptmpRel->pd_node; + partitionClose(tableRel, ptmpRel, AccessShareLock); + + char* path = relpathperm(prnode, MAIN_FORKNUM); + + struct stat statBuf; + if (stat(path, &statBuf) < 0) { + badFileItems = appendBadFileItems(badFileItems, prelOid, partitionForm->relname.data, path); + } + + maxSegno = getMaxSegno(&prnode); + if (maxSegno != 0) { + badFileItems = getSegnoBadFiles(path, maxSegno, prelOid, partitionForm->relname.data, badFileItems); + } + } + systable_endscan(pscan); + heap_close(prelation, AccessShareLock); + return badFileItems; +} + +int getMaxSegno(RelFileNode* prnode) +{ + const int POINT_LEN = 2; + int maxSegno = 0; + errno_t rc = 0; + char* oidStr; + char* dirPath = relSegmentDir(*prnode, MAIN_FORKNUM); + DIR* pdir = NULL; + struct dirent* ent = NULL; + + oidStr = (char*)palloc0(getIntLength(prnode->relNode) + POINT_LEN); + rc = snprintf_s(oidStr, getIntLength(prnode->relNode) + POINT_LEN, + getIntLength(prnode->relNode) + POINT_LEN - 1, "%u.", prnode->relNode); + securec_check_ss(rc, "\0", "\0"); + + pdir = opendir(dirPath); + if (NULL != pdir) { + while (NULL != (ent = readdir(pdir))) { + /* skip . and .. */ + if (0 == strcmp(ent->d_name, ".") || 0 == strcmp(ent->d_name, "..") || + 0 != strncmp(ent->d_name, oidStr, getIntLength(prnode->relNode) + 1)) { + continue; + } + int segno = 0; + Oid relNode = 0; + int nRet = 0; + nRet = sscanf_s(ent->d_name, "%u.%d", &relNode, &segno); + securec_check_ss_c(nRet, "", ""); + + if (segno > maxSegno) { + maxSegno = segno; + } + } + (void)closedir(pdir); + pdir = NULL; + } + pfree(oidStr); + return maxSegno; +} + +List* getTableBadFiles(List* badFileItems, Oid relOid, Form_pg_class classForm, Relation tableRel) +{ + int maxSegno = 0; + RelFileNode rnode = tableRel->rd_node; + char* path = relpathperm(rnode, MAIN_FORKNUM); + struct stat statBuf; + if (stat(path, &statBuf) < 0) { + badFileItems = appendBadFileItems(badFileItems, relOid, classForm->relname.data, path); + } + + if (classForm->relpersistence == RELPERSISTENCE_UNLOGGED) { + char* initPath = relpathperm(rnode, INIT_FORKNUM); + if (stat(initPath, &statBuf) < 0) { + badFileItems = appendBadFileItems(badFileItems, relOid, classForm->relname.data, initPath); + } + pfree(initPath); + } + + maxSegno = getMaxSegno(&rnode); + + if (maxSegno != 0) { + badFileItems = getSegnoBadFiles(path, maxSegno, relOid, classForm->relname.data, badFileItems); + } + pfree(path); + return badFileItems; +} + +List* getSegmentBadFiles(List* spcList, List* badFileItems) +{ + ListCell *currentCell = NULL; + struct stat statBuf; + foreach (currentCell, spcList) { + RelFileNode relFileNode = { + .spcNode = lfirst_oid(currentCell), + .dbNode = u_sess->proc_cxt.MyDatabaseId, + .relNode = 1, + .bucketNode = SegmentBktId + }; + char* segmentDir = relSegmentDir(relFileNode, MAIN_FORKNUM); + List* segmentFiles = getSegmentMainFilesPath(segmentDir, '/', 5); + ListCell *currentCell = NULL; + foreach(currentCell, segmentFiles) { + if (stat((char*)lfirst(currentCell), &statBuf) < 0) { + badFileItems = appendBadFileItems(badFileItems, 0, "none", (char*)lfirst(currentCell)); + } else { + uint32 highWater = getSegmentFileHighWater((char*)lfirst(currentCell)); + int fileNum = highWater / (REGR_MCR_SIZE_1GB / BLCKSZ) + 1; + badFileItems = getSegnoBadFiles((char*)lfirst(currentCell), fileNum - 1, 0, "none", badFileItems); + } + } + } + return badFileItems; +} + +List* getSegnoBadFiles(char* path, int maxSegno, Oid relOid, char* tabName, List* badFileItems) +{ + if (maxSegno < 1) { + return badFileItems; + } + struct stat statBuf; + List* segmentFiles = getSegmentMainFilesPath(path, '.', maxSegno); + ListCell *currentCell = NULL; + foreach(currentCell, segmentFiles) { + if (stat((char*)lfirst(currentCell), &statBuf) < 0) { + badFileItems = appendBadFileItems(badFileItems, relOid, tabName, (char*)lfirst(currentCell)); + } + } + return badFileItems; +} + +List* appendBadFileItems(List* badFileItems, Oid relOid, char* tabName, char* path) +{ + errno_t rc = 0; + BadFileItem *badFileItem = (BadFileItem*)palloc0(sizeof(BadFileItem)); + + badFileItem->reloid = relOid; + rc = snprintf_s(badFileItem->relname.data, NAMEDATALEN, + NAMEDATALEN - 1, "%s", tabName); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(badFileItem->relfilepath, MAX_PATH, + MAX_PATH - 1, "%s", path); + securec_check_ss(rc, "\0", "\0"); + badFileItems = lappend(badFileItems, badFileItem); + + return badFileItems; +} + +static int containsNums(const char *str, const char chr) +{ + int count = 0; + int i = 0; + while (*(str + i)) { + if (str[i] == chr) { + ++count; + } + ++i; + } + return count; +} + +char* relSegmentDir(RelFileNode rnode, ForkNumber forknum) +{ + if (forknum != MAIN_FORKNUM) { + return NULL; + } + char* path = NULL; + char* pathDir = (char*)palloc0(MAX_PATH); + int times = 0; + char *token = NULL; + char *tmptoken = NULL; + + path = relpathperm(rnode, forknum); + times = containsNums(path, '/'); + token = strtok_r(path, "/", &tmptoken); + int index = 0; + while (token != NULL) { + errno_t rc = 0; + if (index == 0) { + rc = snprintf_s(pathDir, MAX_PATH, MAX_PATH - 1, "%s", token); + } else { + rc = snprintf_s(pathDir, MAX_PATH, MAX_PATH - 1, "%s/%s", pathDir, token); + } + securec_check_ss(rc, "\0", "\0"); + token = strtok_r(NULL, "/", &tmptoken); + if (times == ++index) { + break; + } + } + pfree(path); + return pathDir; +} + +List* getSegmentMainFilesPath(char* segmentDir, char split, int num) +{ + if (segmentDir == NULL) { + return NULL; + } + + List* segmentMainFilesPath = NIL; + + for (int i = 1; i <= num; i++) { + char* path = NULL; + int pathlen = strlen(segmentDir) + getIntLength(num) + 2; + path = (char*)palloc0(pathlen); + int rc = snprintf_s(path, pathlen, pathlen - 1, "%s%c%d", segmentDir, split, i); + securec_check_ss(rc, "\0", "\0"); + segmentMainFilesPath = lappend(segmentMainFilesPath, path); + } + return segmentMainFilesPath; +} + +List* appendIfNot(List* targetList, Oid datum) +{ + bool found = false; + ListCell *currentcell = NULL; + + foreach (currentcell, targetList) { + if (lfirst_oid(currentcell) == datum) { + found = true; + break; + } + } + + if (!found) { + targetList = lappend_oid(targetList, datum); + } + + return targetList; +} + +uint32 getSegmentFileHighWater(char* path) +{ + uint32 flags = O_RDWR | PG_BINARY; + int fd = -1; + char *buffer = (char*)palloc0(BLCKSZ); + uint32 result = 0; + st_df_map_head* head; + fd = BasicOpenFile(path, flags, S_IWUSR | S_IRUSR); + if (fd < 0) { + pfree(buffer); + ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", path))); + } + off_t offset = ((off_t)DF_MAP_HEAD_PAGE) * BLCKSZ; + pgstat_report_waitevent(WAIT_EVENT_DATA_FILE_READ); + int nbytes = pread(fd, buffer, BLCKSZ, offset); + pgstat_report_waitevent(WAIT_EVENT_END); + + if (close(fd)) { + pfree(buffer); + ereport(ERROR, (errcode_for_file_access(), errmsg("could not close file \"%s\": %m", path))); + } + + if (nbytes != BLCKSZ) { + pfree(buffer); + ereport(ERROR, + (errcode(MOD_SEGMENT_PAGE), + errcode_for_file_access(), + errmsg("could not read segment block %d in file %s", DF_MAP_HEAD_PAGE, path), + errdetail("errno: %d", errno))); + result = 0; + } else { + head = (st_df_map_head*)PageGetContents(buffer); + result = head->high_water_mark; + } + pfree(buffer); + return result; +} + +int getIntLength(uint32 intValue) +{ + int length = 1; + const int BITSMAX = 9; + const int TEN = 10; + + while (intValue > BITSMAX) { + length++; + intValue /= TEN; + } + return length; +} + +static bool PrimaryRepairSegFile(RemoteReadFileKey *repairFileKey, char* path, int32 seg_no, int32 maxSegno, + int timeout, int64 size) +{ + struct stat statBuf; + errno_t rc; + char* buf = NULL; + char *segpath = (char *)palloc0(strlen(path) + SEGLEN); + uint32 seg_size = (seg_no < maxSegno ? (RELSEG_SIZE * BLCKSZ) : (size % (RELSEG_SIZE * BLCKSZ))); + + if (seg_no == 0) { + rc = sprintf_s(segpath, strlen(path) + SEGLEN, "%s", path); + } else { + rc = sprintf_s(segpath, strlen(path) + SEGLEN, "%s.%d", path, seg_no); + } + securec_check_ss(rc, "", ""); + + if (stat(segpath, &statBuf) < 0) { + /* ENOENT is expected after the last segment... */ + if (errno != ENOENT) { + ereport(WARNING, (errcode_for_file_access(), + errmsg("could not stat file \"%s\" before repair: %m", segpath))); + pfree(segpath); + return false; + } + + RelFileNodeBackend rnode; + rnode.node = repairFileKey->relfilenode; + rnode.backend = InvalidBackendId; + RelFileNodeForkNum fileNode; + fileNode.rnode = rnode; + fileNode.forknumber = repairFileKey->forknum; + fileNode.segno = seg_no; + fileNode.storage = ROW_STORE; + if (!repair_deleted_file_check(fileNode, -1)) { + ereport(WARNING, (errcode_for_file_access(), + errmsg("could not repair file \"%s\" before deleted not closed: %m", segpath))); + pfree(segpath); + return false; + } + CacheInvalidateSmgr(rnode); + + int fd = CreateRepairFile(segpath); + if (fd < 0) { + ereport(WARNING, (errcode_for_file_access(), + errmsg("could not create repair file \"%s\", segno is %d", + relpathperm(repairFileKey->relfilenode, repairFileKey->forknum), seg_no))); + pfree(segpath); + return false; + } + + buf = (char*)palloc0(MAX_BATCH_READ_BLOCKNUM * BLCKSZ); + int batch_size = MAX_BATCH_READ_BLOCKNUM * BLCKSZ; + int max_times = seg_size % batch_size == 0 ? seg_size / batch_size : (seg_size / batch_size + 1); + + for (int j = 0; j < max_times; j++) { + int read_size = 0; + uint32 remote_size = 0; + repairFileKey->blockstart = seg_no * RELSEG_SIZE + j * MAX_BATCH_READ_BLOCKNUM; + if (seg_size % batch_size != 0) { + read_size = (j == max_times - 1 ? seg_size % batch_size : batch_size); + } else { + read_size = batch_size; + } + + RemoteReadFile(repairFileKey, buf, read_size, timeout, &remote_size); + + rc = WriteRepairFile(fd, segpath, buf, j * batch_size, read_size); + if (rc != 0) { + (void)close(fd); + pfree(buf); + pfree(segpath); + ereport(WARNING, (errcode_for_file_access(), + errmsg("could not write repair file \"%s\", segno is %d", + relpathperm(repairFileKey->relfilenode, repairFileKey->forknum), seg_no))); + return false; + } + } + + if (!repair_deleted_file_check(fileNode, fd)) { + (void)close(fd); + } + pfree(buf); + rc = CheckAndRenameFile(segpath); + if (rc != 0) { + pfree(segpath); + ereport(WARNING, (errcode_for_file_access(), + errmsg("could not rename file \"%s\", segno is %d", + relpathperm(repairFileKey->relfilenode, repairFileKey->forknum), seg_no))); + return false; + } + BatchUpdateRepairTime(repairFileKey->relfilenode, repairFileKey->forknum, seg_no); + } + pfree(segpath); + return true; +} + +void CreateZeroFile(char* path) +{ + int fd = -1; + int retry_times = 0; + const int MAX_RETRY_TIME = 2; + + while (fd < 0) { + retry_times++; + fd = BasicOpenFile((char*)path, O_CREAT | O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); + + if (fd < 0) { + if (retry_times < MAX_RETRY_TIME) { + continue; + } + if (errno != ENOENT) { + ereport(WARNING, (errcode_for_file_access(), errmsg("could not create file \"%s\": %m", path))); + return; + } + ereport(ERROR, (errcode_for_file_access(), errmsg("could not create file \"%s\": %m", path))); + } + (void)close(fd); + return; + } +} + +static void checkFileNeedCreate(char* firstPath, RemoteReadFileKey key) +{ + struct stat statBuf; + if (stat(firstPath, &statBuf) < 0 && errno == ENOENT) { + CreateZeroFile(firstPath); + ereport(WARNING, (errmsg("standby size is zero, only create file, file path is %s", firstPath))); + } + if (key.forknum == INIT_FORKNUM) { + char* unlogPath = relpathperm(key.relfilenode, MAIN_FORKNUM); + if (stat(unlogPath, &statBuf) < 0) { + CreateZeroFile(unlogPath); + ereport(WARNING, (errmsg("standby size is zero, only create file, file path is %s", unlogPath))); + } + pfree(unlogPath); + } + return; +} + +bool gsRepairFile(Oid tableOid, char* path, int timeout) +{ + Relation relation = NULL; + bool isSegment = false; + RelFileNodeForkNum relFileNodeForkNum; + RemoteReadFileKey repairFileKey; + bool isCsnOrCLog = isCLogOrCsnLogPath(path); + if (isCsnOrCLog) { + return gsRepairCsnOrCLog(path, timeout); + } + + relFileNodeForkNum = relpath_to_filenode(path); + isSegment = relFileNodeForkNum.rnode.node.relNode <= 5 && relFileNodeForkNum.rnode.node.relNode > 0; + if (isSegment) { + relFileNodeForkNum.rnode.node.bucketNode = SegmentBktId; + DirectFunctionCall2(pg_advisory_xact_lock_int4, t_thrd.postmaster_cxt.xc_lockForBackupKey1, + t_thrd.postmaster_cxt.xc_lockForBackupKey2); + } else { + relation = heap_open(tableOid, AccessExclusiveLock); + } + + repairFileKey.relfilenode = relFileNodeForkNum.rnode.node; + repairFileKey.forknum = relFileNodeForkNum.forknumber; + char* firstPath = relpathperm(repairFileKey.relfilenode, repairFileKey.forknum); + + int64 size = RemoteReadFileSize(&repairFileKey, timeout); + if (size == -1) { + if (!isSegment) { + heap_close(relation, AccessExclusiveLock); + } + ereport(WARNING, + (errmsg("The file does not exist on the standby DN, don't need repair, file path is %s", firstPath))); + pfree(firstPath); + BatchClearBadBlock(repairFileKey.relfilenode, repairFileKey.forknum, 0); + return false; + } + if (size == 0) { + ereport(WARNING, (errmsg("standby size is zero, file path is %s", firstPath))); + checkFileNeedCreate(firstPath, repairFileKey); + pfree(firstPath); + BatchClearBadBlock(repairFileKey.relfilenode, repairFileKey.forknum, 0); + return true; + } + + int maxSegno = size / (RELSEG_SIZE * BLCKSZ); + + for (int i = 0; i <= maxSegno; i++) { + bool repair = PrimaryRepairSegFile(&repairFileKey, firstPath, i, maxSegno, timeout, size); + if (!repair) { + ereport(WARNING, (errmsg("repair file %s seg_no is %d, failed", path, i))); + pfree(firstPath); + return false; + } + } + + if (isSegment) { + RepairFileKey key; + key.relfilenode = repairFileKey.relfilenode; + key.forknum = repairFileKey.forknum; + key.segno = maxSegno; + df_close_all_file(key, maxSegno); + df_open_all_file(key, maxSegno); + } else { + heap_close(relation, AccessExclusiveLock); + } + pfree(firstPath); + return true; +} + +int CheckAndRenameFile(char* path) +{ + char *tempPath = (char *)palloc0(strlen(path) + SEGLEN); + errno_t rc; + + rc = sprintf_s(tempPath, strlen(path) + SEGLEN, "%s.repair", path); + securec_check_ss(rc, "", "") + rc = durable_rename(tempPath, path, ERROR); + if (rc != 0) { + ereport(WARNING, (errcode_for_file_access(), + errmsg("could not stat file \"%s\":%m", path))); + pfree(tempPath); + return -1; + } else { + ereport(LOG, (errmodule(MOD_REDO), + errmsg("file rename from %s to %s finish", tempPath, path))); + } + pfree(tempPath); + return 0; +} + +bool isCLogOrCsnLogPath(char* path) +{ + if ((strstr((char *)path, "pg_clog/")) != NULL || (strstr((char *)path, "pg_csnlog/")) != NULL) { + return true; + } + return false; +} + +bool gsRepairCsnOrCLog(char* path, int timeout) +{ + struct stat statBuf; + if (stat(path, &statBuf) >= 0) { + ereport(ERROR, + (errmsg("file %s exists.", path))); + return false; + } + int nmatch = 0; + char* logType = (char*)palloc0(MAX_PATH_LEN); + Oid logName = 0; + int transType = 0; + char* tmptoken = NULL; + errno_t rc = strcpy_s(logType, strlen(path) + 1, path); + securec_check(rc, "\0", "\0"); + strtok_s(logType, "/", &tmptoken); + nmatch = sscanf_s(tmptoken, "%u", &logName); + if (nmatch != 1) { + pfree(logType); + ereport(ERROR, (errcode_for_file_access(), + errmsg("path does not contain valid logName"))); + return false; + } + + if (strcmp(logType, "pg_clog") == 0) { + transType = 1; + } else if (strcmp(logType, "pg_csnlog") == 0) { + transType = 2; + } else { + pfree(logType); + ereport(ERROR, (errcode_for_file_access(), + errmsg("path not match clog or csnlog"))); + return false; + } + pfree(logType); + + RelFileNode relFileNode = { + .spcNode = (uint32)transType, + .dbNode = 0, + .relNode = logName, + .bucketNode = InvalidBktId + }; + + RemoteReadFileKey repairFileKey = { + .relfilenode = relFileNode, + .forknum = MAIN_FORKNUM, + .blockstart = 0 + }; + + uint32 log_size = 16 * REGR_MCR_SIZE_1MB; + + int fd = CreateRepairFile(path); + if (fd < 0) { + ereport(WARNING, (errcode_for_file_access(), + errmsg("could not create repair file \"%s\"", path))); + return false; + } + + char* buf = (char*)palloc0(log_size); + uint32 remote_size = 0; + RemoteReadFile(&repairFileKey, buf, log_size, timeout, &remote_size); + + rc = WriteRepairFile(fd, path, buf, 0, remote_size); + if (rc != 0) { + pfree(buf); + (void)close(fd); + return false; + } + pfree(buf); + (void)close(fd); + CheckAndRenameFile(path); + return true; +} + +static void checkSupUserOrOperaMode() +{ + if (!CheckVerionSupportRepair()) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Verify and repair page and file is not supported yet"))); + } + if (!superuser() && !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode)) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("Must be system admin or operator admin in operation mode to call this function.")))); + } +} + +static void checkUserPermission() +{ + if (!superuser() && !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode) + && !isMonitoradmin(GetUserId())) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("Must be system admin, operator admin in operation mode or monitor admin " + "to call this function.")))); + } +} + + +static void checkInstanceType() +{ + load_server_mode(); + + if (t_thrd.xlog_cxt.server_mode != PRIMARY_MODE) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("Must be in primary DN.")))); + } +} \ No newline at end of file diff --git a/src/gausskernel/optimizer/commands/view.cpp b/src/gausskernel/optimizer/commands/view.cpp index d9fa1f9af..c38cd816d 100644 --- a/src/gausskernel/optimizer/commands/view.cpp +++ b/src/gausskernel/optimizer/commands/view.cpp @@ -146,7 +146,7 @@ static Oid DefineVirtualRelation(RangeVar* relation, List* tlist, bool replace, * namespace is temporary. */ lockmode = replace ? AccessExclusiveLock : NoLock; - (void)RangeVarGetAndCheckCreationNamespace(relation, lockmode, &viewOid); + (void)RangeVarGetAndCheckCreationNamespace(relation, lockmode, &viewOid, RELKIND_VIEW); bool flag = OidIsValid(viewOid) && replace; if (flag) { @@ -557,7 +557,7 @@ Oid DefineView(ViewStmt* stmt, const char* queryString, bool send_remote, bool i if (stmt->relkind == OBJECT_MATVIEW) { /* Relation Already Created */ - (void)RangeVarGetAndCheckCreationNamespace(view, NoLock, &viewOid); + (void)RangeVarGetAndCheckCreationNamespace(view, NoLock, &viewOid, RELKIND_MATVIEW); #ifdef ENABLE_MULTIPLE_NODES /* try to send CREATE MATERIALIZED VIEW to DNs, Only consider PGXC now. */ diff --git a/src/gausskernel/optimizer/path/allpaths.cpp b/src/gausskernel/optimizer/path/allpaths.cpp index b8d1e2c75..51dc7d2ab 100755 --- a/src/gausskernel/optimizer/path/allpaths.cpp +++ b/src/gausskernel/optimizer/path/allpaths.cpp @@ -388,6 +388,63 @@ static void set_correlated_rel_pathlist(PlannerInfo* root, RelOptInfo* rel) return; } +#ifdef ENABLE_MULTIPLE_NODES +/* + * Check we could reduce broadcast above scan of predpush subquery or not. + */ +static bool reduce_predpush_broadcast(PlannerInfo* root, Path *path) +{ + bool reduce = false; + ItstDisKey dis_keys = root->dis_keys; + + if (list_length(path->distribute_keys) != 1) { + return false; + } + + if (!IsA((Node*)linitial(path->distribute_keys), Var)) { + return false; + } + + Var *dist_key = (Var *)linitial(path->distribute_keys); + + if (dis_keys.superset_keys != NULL && list_length(dis_keys.superset_keys) == 1) { + List *matching = (List *)linitial(dis_keys.superset_keys); + + if (list_length(matching) == 1) { + Var *var = (Var *)linitial(matching); + if (equal(var, dist_key)) { + reduce = true; + } + } + } + + return reduce; +} + +static Path *make_predpush_subpath(PlannerInfo* root, RelOptInfo* rel, Path *path) +{ + List* quals = NULL; + Path *subpath = NULL; + ListCell* lc = NULL; + Bitmapset* upper_params = NULL; + + foreach (lc, rel->subplanrestrictinfo) { + RestrictInfo *res_info = (RestrictInfo*)lfirst(lc); + quals = lappend(quals, res_info->clause); + } + + /* get the upper param IDs */ + if (SUBQUERY_PREDPUSH(root)) { + upper_params = collect_param_clause((Node*)quals); + } + + subpath = (Path*)create_result_path(root, rel, quals, path, upper_params); + + return subpath; +} + +#endif + /* * set_base_rel_pathlists * Finds all paths available for scanning each base-relation entry. @@ -461,6 +518,26 @@ static void set_base_rel_pathlists(PlannerInfo* root) break; } + /* Here we want to reduce broadcast in predpush */ + if (SUBQUERY_PREDPUSH(root) && + rel->subplanrestrictinfo != NIL && + reduce_predpush_broadcast(root, path)) { + subpath = make_predpush_subpath(root, rel, path); + + /* Do not free old path, maybe it's used in other places. */ + lfirst(lc) = subpath; + if (lc2 != NULL) { + lfirst(lc2) = subpath; + } + + /* Set cheapest startup path as result + predpush-index path */ + if (is_cheapest_startup) { + rel->cheapest_startup_path = subpath; + } + + continue; + } + Distribution* distribution = ng_get_dest_distribution(path); Distribution* target_distribution = ng_get_correlated_subplan_group_distribution(); @@ -3653,9 +3730,14 @@ static void make_partiterator_pathkey( int2vector* partitionKey = NULL; ListCell* pk_cell = NULL; ListCell* rt_ec_cell = NULL; + IndexesUsableType usable_type; Oid indexOid = ((IndexPath*)itrpath->subPath)->indexinfo->indexoid; - IndexesUsableType usable_type = eliminate_partition_index_unusable(indexOid, rel->pruning_result, NULL, NULL); - + if (u_sess->attr.attr_sql.enable_hypo_index && ((IndexPath *)itrpath->subPath)->indexinfo->hypothetical) { + /* hypothetical index does not support partition index unusable */ + usable_type = INDEXES_FULL_USABLE; + } else { + usable_type = eliminate_partition_index_unusable(indexOid, rel->pruning_result, NULL, NULL); + } if (INDEXES_FULL_USABLE != usable_type) { /* some index partition is unusable */ OPT_LOG(DEBUG2, "fail to inherit pathkeys since some index partition is unusable"); @@ -3958,71 +4040,6 @@ static Path* create_partiterator_path(PlannerInfo* root, RelOptInfo* rel, Path* return result; } -/* - * Compute the number of parallel workers that should be used to scan a - * relation. We compute the parallel workers based on the size of the heap to - * be scanned and the size of the index to be scanned, then choose a minimum - * of those. - * - * "heap_pages" is the number of pages from the table that we expect to scan, or - * -1 if we don't expect to scan any. - * - */ -int compute_parallel_worker(const RelOptInfo *rel, double heap_pages, int rel_maxworker) -{ - int parallel_workers = 0; - int max_workers = max_parallel_maintenance_workers; - - if (rel_maxworker != -1) { - max_workers = Min(max_workers, rel_maxworker); - } - - /* - * If the number of pages being scanned is insufficient to justify a - * parallel scan, just return zero ... unless it's an inheritance - * child. In that case, we want to generate a parallel path here - * anyway. It might not be worthwhile just for this relation, but - * when combined with all of its inheritance siblings it may well pay - * off. - */ - if ((rel->reloptkind == RELOPT_BASEREL && - heap_pages >= 0 && heap_pages < min_parallel_table_scan_size) || - max_workers == 0) { - return 0; - } - - /* Return what user tell us */ - if (rel_maxworker != -1) { - return max_workers; - } - - if (heap_pages >= 0) { - int heap_parallel_threshold; - int heap_parallel_workers = 1; - /* - * Select the number of workers based on the log of the size of - * the relation. This probably needs to be a good deal more - * sophisticated, but we need something here for now. Note that - * the upper limit of the min_parallel_table_scan_size GUC is - * chosen to prevent overflow here. - */ - heap_parallel_threshold = Max(min_parallel_table_scan_size, 1); - while (heap_pages >= (BlockNumber)heap_parallel_threshold) { - heap_parallel_workers++; - heap_parallel_threshold *= 4; - if (heap_parallel_threshold > INT_MAX / 4) { - break; /* avoid overflow */ - } - } - parallel_workers = heap_parallel_workers; - } - - /* In no case use more than caller supplied maximum number of workers */ - parallel_workers = Min(parallel_workers, max_workers); - - return parallel_workers; -} - /* * try to add control operator PartIterator over scan operator, so we can * scan all selected partitions diff --git a/src/gausskernel/optimizer/path/clausesel.cpp b/src/gausskernel/optimizer/path/clausesel.cpp index 3714599af..e26a57713 100755 --- a/src/gausskernel/optimizer/path/clausesel.cpp +++ b/src/gausskernel/optimizer/path/clausesel.cpp @@ -43,6 +43,7 @@ */ typedef struct RangeQueryClause { struct RangeQueryClause* next; /* next in linked list */ + Expr* clause; /* the second clause for range-query */ Node* var; /* The common variable of the clauses */ bool have_lobound; /* found a low-bound clause yet? */ bool have_hibound; /* found a high-bound clause yet? */ @@ -184,6 +185,7 @@ Selectivity clauselist_selectivity( rinfo = (RestrictInfo*)clause; if (rinfo->pseudoconstant) { s1 = s1 * s2; + rinfo->clause->selec = s2; continue; } clause = (Node*)rinfo->clause; @@ -211,20 +213,26 @@ Selectivity clauselist_selectivity( break; default: /* Just merge the selectivity in generically */ - if ((uint32)u_sess->attr.attr_sql.cost_param & COST_ALTERNATIVE_CONJUNCT) + if ((uint32)u_sess->attr.attr_sql.cost_param & COST_ALTERNATIVE_CONJUNCT) { s1 = MIN(s1, s2); - else + expr->xpr.selec = s1; + } else { s1 = s1 * s2; + expr->xpr.selec = s2; + } break; } continue; } /* Not the right form, so treat it generically. */ - if ((uint32)u_sess->attr.attr_sql.cost_param & COST_ALTERNATIVE_CONJUNCT) + if ((uint32)u_sess->attr.attr_sql.cost_param & COST_ALTERNATIVE_CONJUNCT) { s1 = MIN(s1, s2); - else + expr->xpr.selec = s1; + } else { s1 = s1 * s2; + expr->xpr.selec = s2; + } } /* @@ -276,12 +284,16 @@ Selectivity clauselist_selectivity( } /* Merge in the selectivity of the pair of clauses */ s1 *= s2; + rqlist->clause->selec = s2; } else { /* Only found one of a pair, merge it in generically */ - if (rqlist->have_lobound) + if (rqlist->have_lobound) { s1 *= rqlist->lobound; - else + rqlist->clause->selec = rqlist->lobound; + } else { s1 *= rqlist->hibound; + rqlist->clause->selec = rqlist->hibound; + } } varlist = lappend(varlist, rqlist->var); /* release storage and advance */ @@ -365,6 +377,7 @@ static void addRangeClause(RangeQueryClause** rqlist, Node* clause, bool varonle rqelem->hibound = s2; } } + rqelem->clause = (Expr*)clause; return; } @@ -380,6 +393,8 @@ static void addRangeClause(RangeQueryClause** rqlist, Node* clause, bool varonle rqelem->have_hibound = true; rqelem->hibound = s2; } + rqelem->clause = (Expr*)clause; + rqelem->clause->selec = s2; rqelem->next = *rqlist; *rqlist = rqelem; } diff --git a/src/gausskernel/optimizer/path/costsize.cpp b/src/gausskernel/optimizer/path/costsize.cpp index 9abcc12b0..14765833e 100755 --- a/src/gausskernel/optimizer/path/costsize.cpp +++ b/src/gausskernel/optimizer/path/costsize.cpp @@ -1038,6 +1038,11 @@ static bool enable_parametrized_path(PlannerInfo* root, RelOptInfo* baserel, Pat { Assert(path != NULL); + if (!ENABLE_SQL_BETA_FEATURE(PREDPUSH_SAME_LEVEL)) { + /* sql beta feature is necessary */ + return false; + } + if (ENABLE_PRED_PUSH_FORCE(root) && is_predpush_dest(root, baserel->relids)) { if (path->param_info) { return !bms_is_subset(path->param_info->ppi_req_outer, predpush_candidates_same_level(root)); @@ -1049,6 +1054,9 @@ static bool enable_parametrized_path(PlannerInfo* root, RelOptInfo* baserel, Pat return false; } +#define HEAP_PAGES_FETCHED(isUstore, pages_fetched, allvisfrac) \ + (isUstore) ? 0.0 : ceil((pages_fetched) * (1.0 - (allvisfrac))) + /* * cost_index * Determines and returns the cost of scanning a relation using an index. @@ -1071,6 +1079,7 @@ void cost_index(IndexPath* path, PlannerInfo* root, double loop_count) { IndexOptInfo* index = path->indexinfo; RelOptInfo* baserel = index->rel; + bool isUstore = baserel->is_ustore; bool indexonly = (path->path.pathtype == T_IndexOnlyScan); List* allclauses = NIL; Cost startup_cost = 0; @@ -1178,6 +1187,8 @@ void cost_index(IndexPath* path, PlannerInfo* root, double loop_count) * We use the measured fraction of the entire heap that is all-visible, * which might not be particularly relevant to the subset of the heap * that this query will fetch; but it's not clear how to do better. + * For ustore, there is visibility info in index so we will not need to + * fetch any heap pages for index-only scan. * ---------- */ @@ -1198,7 +1209,7 @@ void cost_index(IndexPath* path, PlannerInfo* root, double loop_count) tuples_fetched * loop_count, (BlockNumber)baserel->pages, (double)index->pages, root, ispartitionedindex); if (indexonly) - pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac)); + pages_fetched = HEAP_PAGES_FETCHED(isUstore, pages_fetched, baserel->allvisfrac); /* Apply cost mod */ spc_random_page_cost = RANDOM_PAGE_COST(use_modded_cost, old_random_page_cost, \ @@ -1227,7 +1238,7 @@ void cost_index(IndexPath* path, PlannerInfo* root, double loop_count) pages_fetched * loop_count, (BlockNumber)baserel->pages, (double)index->pages, root, ispartitionedindex); if (indexonly) - pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac)); + pages_fetched = HEAP_PAGES_FETCHED(isUstore, pages_fetched, baserel->allvisfrac); /* Apply cost mod after new pages fetched */ spc_random_page_cost = RANDOM_PAGE_COST(use_modded_cost, old_random_page_cost, \ @@ -1248,7 +1259,7 @@ void cost_index(IndexPath* path, PlannerInfo* root, double loop_count) tuples_fetched, (BlockNumber)baserel->pages, (double)index->pages, root, ispartitionedindex); if (indexonly) - pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac)); + pages_fetched = HEAP_PAGES_FETCHED(isUstore, pages_fetched, baserel->allvisfrac); /* Apply cost mod */ spc_random_page_cost = RANDOM_PAGE_COST(use_modded_cost, old_random_page_cost, \ @@ -1266,7 +1277,7 @@ void cost_index(IndexPath* path, PlannerInfo* root, double loop_count) pages_fetched = ceil(indexSelectivity * (double)baserel->pages); if (indexonly) - pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac)); + pages_fetched = HEAP_PAGES_FETCHED(isUstore, pages_fetched, baserel->allvisfrac); if (pages_fetched > 0) { /* Apply cost mod after new pages fetched */ @@ -1617,6 +1628,13 @@ void cost_bitmap_heap_scan( */ pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched); } + + ereport(DEBUG2, + (errmodule(MOD_OPT), + errmsg("Computing IndexScanCost: pagesFetched: %lf, tuples_fetched: %lf, indexSelectivity: %lf," + " indexTotalCost: %lf, loopCount: %lf, T: %lf", + pages_fetched, tuples_fetched, indexSelectivity, indexTotalCost, loop_count, T))); + if (pages_fetched >= T) { pages_fetched = T; } else { @@ -1652,6 +1670,10 @@ void cost_bitmap_heap_scan( cpu_per_tuple = u_sess->attr.attr_sql.cpu_tuple_cost + qpqual_cost.per_tuple; run_cost += cpu_per_tuple * tuples_fetched; + ereport(DEBUG2, + (errmodule(MOD_OPT), + errmsg("Computing IndexScanCost: startupCost: %lf, runCost: %lf", + startup_cost, run_cost))); path->startup_cost = startup_cost; path->total_cost = startup_cost + run_cost; @@ -4296,6 +4318,7 @@ void final_cost_hashjoin(PlannerInfo* root, HashPath* path, JoinCostWorkspace* w path->jpath.path.startup_cost = startup_cost; path->jpath.path.total_cost = startup_cost + run_cost; path->jpath.path.stream_cost = inner_path->stream_cost; + path->joinRows = hashjointuples; if (!u_sess->attr.attr_sql.enable_hashjoin && hasalternative) path->jpath.path.total_cost *= g_instance.cost_cxt.disable_cost_enlarge_factor; diff --git a/src/gausskernel/optimizer/path/es_selectivity.cpp b/src/gausskernel/optimizer/path/es_selectivity.cpp index b35764c0b..73e6ef109 100644 --- a/src/gausskernel/optimizer/path/es_selectivity.cpp +++ b/src/gausskernel/optimizer/path/es_selectivity.cpp @@ -1,2551 +1,2589 @@ -/* ------------------------------------------------------------------------- - * - * es_selectivity.cpp - * Routines to compute multi-column selectivities - * - * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * - * IDENTIFICATION - * src/gausskernel/optimizer/path/es_selectivity.cpp - * - * ------------------------------------------------------------------------- - */ -#include "postgres.h" - -#include "catalog/pg_collation.h" -#include "catalog/pg_proc.h" -#include "catalog/pg_statistic.h" -#include "optimizer/cost.h" -#include "optimizer/pathnode.h" -#include "utils/guc.h" -#include "utils/lsyscache.h" -#include "nodes/print.h" -#include "parser/parsetree.h" -#include "utils/extended_statistics.h" - -const int TOW_MEMBERS = 2; - -ES_SELECTIVITY::ES_SELECTIVITY() - : es_candidate_list(NULL), - es_candidate_saved(NULL), - unmatched_clause_group(NULL), - root(NULL), - sjinfo(NULL), - origin_clauses(NULL), - path(NULL), - bucketsize_list(NULL) -{} - -ES_SELECTIVITY::~ES_SELECTIVITY() -{} - -bool ES_SELECTIVITY::ContainIndexCols(const es_candidate* es, const IndexOptInfo* index) const -{ - for (int pos = 0; pos < index->ncolumns; pos++) { - int indexAttNum = index->indexkeys[pos]; - /* - * Notice: indexAttNum can be negative. Some indexAttNums of junk column may be negative - * since they are located before the first visible column. for example, the indexAttNum - * of 'oid' column in system table 'pg_class' is -2. - */ - if (indexAttNum >= 0 && !bms_is_member(indexAttNum, es->left_attnums)) - return false; - } - - return true; -} - -bool ES_SELECTIVITY::MatchUniqueIndex(const es_candidate* es) const -{ - ListCell* lci = NULL; - foreach (lci, es->left_rel->indexlist) { - IndexOptInfo* indexToMatch = (IndexOptInfo*)lfirst(lci); - if (indexToMatch->relam == BTREE_AM_OID && indexToMatch->unique - && ContainIndexCols(es, indexToMatch)) { - return true; - } - } - - return false; -} - -/* - * check whether the equality constraints match an unique index. - * We know the result only has one row if finding a matched unique index. - */ -void ES_SELECTIVITY::CalSelWithUniqueIndex(Selectivity &result) -{ - List* es_candidate_used = NULL; - ListCell* l = NULL; - foreach(l, es_candidate_list) { - es_candidate* temp = (es_candidate*)lfirst(l); - if (temp->tag == ES_EQSEL && MatchUniqueIndex(temp) && - temp->left_rel && temp->left_rel->tuples >= 1.0) { - result *= 1.0 / temp->left_rel->tuples; - es_candidate_used = lappend(es_candidate_used, temp); - } - } - - /* - * Finally, we need to delete es_candidates which have already used. The rests es_candidates - * will calculate with statistic info. - */ - es_candidate_saved = es_candidate_list; - es_candidate_list = list_difference_ptr(es_candidate_list, es_candidate_used); - - list_free(es_candidate_used); -} - -/* - * @brief Main entry for using extended statistic to calculate selectivity - * root_input can only be NULL when processing group by clauses - */ -Selectivity ES_SELECTIVITY::calculate_selectivity(PlannerInfo* root_input, List* clauses_input, - SpecialJoinInfo* sjinfo_input, JoinType jointype, JoinPath* path_input, es_type action, STATS_EST_TYPE eType) -{ - Selectivity result = 1.0; - root = root_input; - sjinfo = sjinfo_input; - origin_clauses = clauses_input; - path = path_input; - - /* group clauselist */ - if (action == ES_GROUPBY) { - /* group clauselist for group by clauses */ - group_clauselist_groupby(origin_clauses); - } else { - group_clauselist(origin_clauses); - } - - /* - * Before reading statistic, We check whether the equality constraints match an - * unique index. We know the result only has one row if finding a matched unique index. - */ - CalSelWithUniqueIndex(result); - - /* read statistic */ - read_statistic(); - - /* calculate selectivity */ - ListCell* l = NULL; - foreach(l, es_candidate_list) { - es_candidate* temp = (es_candidate*)lfirst(l); - switch (temp->tag) { - case ES_EQSEL: - result *= cal_eqsel(temp); - break; - case ES_EQJOINSEL: - /* compute hash bucket size */ - if (action == ES_COMPUTEBUCKETSIZE) { - es_bucketsize* bucket = (es_bucketsize*)palloc(sizeof(es_bucketsize)); - cal_bucket_size(temp, bucket); - bucketsize_list = lappend(bucketsize_list, bucket); - } else { - result *= cal_eqjoinsel(temp, jointype); - } - break; - case ES_GROUPBY: - build_pseudo_varinfo(temp, eType); - break; - default: - break; - } - } - - es_candidate_list = es_candidate_saved; - - /* free memory, but unmatched_clause_group need to be free manually */ - clear(); - - return result; -} - -/* - * @brief group clause by clause type and involving rels, for now, only support eqsel and eqjoinsel - */ -void ES_SELECTIVITY::group_clauselist(List* clauses) -{ - ListCell* l = NULL; - foreach(l, clauses) { - Node* clause = (Node*)lfirst(l); - - if (!IsA(clause, RestrictInfo)) { - unmatched_clause_group = lappend(unmatched_clause_group, clause); - continue; - } - - RestrictInfo* rinfo = (RestrictInfo*)clause; - if (rinfo->pseudoconstant || rinfo->norm_selec > 1 || rinfo->orclause) { - unmatched_clause_group = lappend(unmatched_clause_group, clause); - continue; - } - - if (is_opclause(rinfo->clause)) { - OpExpr* opclause = (OpExpr*)rinfo->clause; - Oid opno = opclause->opno; - - /* only handle "=" operator */ - if (get_oprrest(opno) == EQSELRETURNOID) { - int relid_num = bms_num_members(rinfo->clause_relids); - if (relid_num == 1) { - /* only process clause like t1.a = 1, so only one relid */ - load_eqsel_clause(rinfo); - continue; - } else if (relid_num == TOW_MEMBERS) { - /* only process clause like t1.a = t2.b, so only two relids */ - load_eqjoinsel_clause(rinfo); - continue; - } else { - unmatched_clause_group = lappend(unmatched_clause_group, rinfo); - continue; - } - } - } else if ((rinfo->clause) != NULL && IsA(rinfo->clause, NullTest)) { - NullTest* nullclause = (NullTest*)rinfo->clause; - int relid_num = bms_num_members(rinfo->clause_relids); - if (relid_num == 1 && nullclause->nulltesttype == IS_NULL) { - load_eqsel_clause(rinfo); - continue; - } - } - - unmatched_clause_group = lappend(unmatched_clause_group, clause); - } - - recheck_candidate_list(); - debug_print(); - - return; -} - -/* - * @brief group groupby-clause by clause type and involving rels, for - */ -void ES_SELECTIVITY::group_clauselist_groupby(List* varinfos) -{ - ListCell* l = NULL; - foreach(l, varinfos) { - GroupVarInfo* varinfo = (GroupVarInfo*)lfirst(l); - if (!is_var_node(varinfo->var)) { - unmatched_clause_group = lappend(unmatched_clause_group, varinfo); - continue; - } - - Var* var = NULL; - if (IsA(varinfo->var, RelabelType)) - var = (Var*)((RelabelType*)varinfo->var)->arg; - else - var = (Var*)varinfo->var; - - ListCell* l2 = NULL; - bool found_match = false; - foreach(l2, es_candidate_list) { - es_candidate* temp = (es_candidate*)lfirst(l2); - - if (temp->tag != ES_GROUPBY) - continue; - - if (varinfo->rel == temp->left_rel) { - /* only use left attnums for group by clauses */ - temp->left_attnums = bms_add_member(temp->left_attnums, var->varattno); - temp->clause_group = lappend(temp->clause_group, varinfo); - add_clause_map(temp, var->varattno, 0, (Node*)var, NULL); - found_match = true; - break; - } - } - - /* if not matched, build a new cell in es_candidate_list */ - if (!found_match) { - es_candidate* es = (es_candidate*)palloc(sizeof(es_candidate)); - RelOptInfo* temp_rel = NULL; - - init_candidate(es); - - es->tag = ES_GROUPBY; - es->left_rel = varinfo->rel; - es->left_relids = bms_copy(varinfo->rel->relids); - es->left_attnums = bms_add_member(es->left_attnums, var->varattno); - add_clause_map(es, var->varattno, 0, (Node*)var, NULL); - read_rel_rte(varinfo->var, &temp_rel, &es->left_rte); - Assert(es->left_rel == temp_rel); - es->clause_group = lappend(es->clause_group, varinfo); - es_candidate_list = lappend(es_candidate_list, es); - } - } - - recheck_candidate_list(); - debug_print(); - - return; -} - -/* - * @brief initial es_candidate, set all elements to default value or NULL - */ -void ES_SELECTIVITY::init_candidate(es_candidate* es) const -{ - es->tag = ES_EMPTY; - es->relids = NULL; - es->left_relids = NULL; - es->right_relids = NULL; - es->left_attnums = NULL; - es->right_attnums = NULL; - es->left_stadistinct = 0.0; - es->right_stadistinct = 0.0; - es->left_first_mcvfreq = 0.0; - es->right_first_mcvfreq = 0.0; - es->left_rel = NULL; - es->right_rel = NULL; - es->left_rte = NULL; - es->right_rte = NULL; - es->clause_group = NIL; - es->clause_map = NIL; - es->left_extended_stats = NULL; - es->right_extended_stats = NULL; - es->pseudo_clause_list = NIL; - es->has_null_clause = false; - return; -} - -/* - * @brief free memory used in calculate_selectivity except unmatched_clause_group - */ -void ES_SELECTIVITY::clear() -{ - /* delete es_candidate_list */ - ListCell* l = NULL; - foreach(l, es_candidate_list) { - es_candidate* temp = (es_candidate*)lfirst(l); - bms_free_ext(temp->relids); - bms_free_ext(temp->left_relids); - bms_free_ext(temp->right_relids); - bms_free_ext(temp->left_attnums); - bms_free_ext(temp->right_attnums); - temp->left_rel = NULL; - temp->right_rel = NULL; - temp->left_rte = NULL; - temp->right_rte = NULL; - list_free_ext(temp->clause_group); - list_free_deep(temp->clause_map); - clear_extended_stats(temp->left_extended_stats); - clear_extended_stats(temp->right_extended_stats); - list_free_ext(temp->pseudo_clause_list); - } - list_free_deep(es_candidate_list); - - /* - * unmatched_clause_group need to be free manually after - * it is used in clause_selectivity(). - */ - root = NULL; - sjinfo = NULL; - origin_clauses = NULL; - return; -} - -/* - * @brief free memory used by saving extended_stats after calculation - */ -void ES_SELECTIVITY::clear_extended_stats(ExtendedStats* extended_stats) const -{ - if (extended_stats) { - bms_free_ext(extended_stats->bms_attnum); - if (extended_stats->mcv_numbers) - pfree_ext(extended_stats->mcv_numbers); - if (extended_stats->mcv_values) - pfree_ext(extended_stats->mcv_values); - if (extended_stats->mcv_nulls) - pfree_ext(extended_stats->mcv_nulls); - if (extended_stats->other_mcv_numbers) - pfree_ext(extended_stats->other_mcv_numbers); - pfree_ext(extended_stats); - extended_stats = NULL; - } - return; -} - -/* - * @brief free memory of extended_stats_list by calling clear_extended_stats - */ -void ES_SELECTIVITY::clear_extended_stats_list(List* stats_list) const -{ - if (stats_list) { - ListCell* lc = NULL; - foreach(lc, stats_list) { - ExtendedStats* extended_stats = (ExtendedStats*)lfirst(lc); - clear_extended_stats(extended_stats); - } - list_free_ext(stats_list); - } - return; -} - -/* - * @brief copy the original pointer, repoint it to something else - * in order to avoid failure when using list_free - * @param ListCell* l - * @return - * @exception None - */ -ExtendedStats* ES_SELECTIVITY::copy_stats_ptr(ListCell* l) const -{ - ExtendedStats* result = (ExtendedStats*)lfirst(l); - lfirst(l) = NULL; - return result; -} - -/* - * @brief add an eqjsel clause to es_candidate_list and group by relid - * we should have bms_num_members(clause->clause_relids) == 1 - */ -void ES_SELECTIVITY::load_eqsel_clause(RestrictInfo* clause) -{ - /* group clause by rels, add to es_candidate_list */ - ListCell* l = NULL; - foreach(l, es_candidate_list) { - es_candidate* temp = (es_candidate*)lfirst(l); - - if (temp->tag != ES_EQSEL) - continue; - - if (bms_equal(clause->clause_relids, temp->relids)) { - if (add_attnum(clause, temp)) { - temp->clause_group = lappend(temp->clause_group, clause); - if (IsA(clause->clause, NullTest)) - temp->has_null_clause = true; - return; - } - } - } - - /* if not matched, build a new cell in es_candidate_list */ - if (!build_es_candidate(clause, ES_EQSEL)) - unmatched_clause_group = lappend(unmatched_clause_group, clause); - - return; -} - -/* - * @brief add an eqjoinsel clause to es_candidate_list and group by relid - */ -void ES_SELECTIVITY::load_eqjoinsel_clause(RestrictInfo* clause) -{ - /* - * the relids in the clause should be as same as sjinfo, so we can avoid parameterized conditon. - */ - if (sjinfo) { - if (!bms_overlap(sjinfo->min_lefthand, clause->clause_relids) || - !bms_overlap(sjinfo->min_righthand, clause->clause_relids)) { - unmatched_clause_group = lappend(unmatched_clause_group, clause); - return; - } - } - - /* group clause by rels, add to es_candidate_list */ - if (bms_num_members(clause->left_relids) == 1 && bms_num_members(clause->right_relids) == 1) { - ListCell* l = NULL; - foreach(l, es_candidate_list) { - es_candidate* temp = (es_candidate*)lfirst(l); - - if (temp->tag != ES_EQJOINSEL) - continue; - - if (bms_equal(clause->clause_relids, temp->relids)) { - if (add_attnum(clause, temp)) { - temp->clause_group = lappend(temp->clause_group, clause); - return; - } - } - } - - /* if not matched, build a new cell in es_candidate_list */ - if (!build_es_candidate(clause, ES_EQJOINSEL)) - unmatched_clause_group = lappend(unmatched_clause_group, clause); - - return; - } - - unmatched_clause_group = lappend(unmatched_clause_group, clause); - - return; -} - -/* - * @brief make a combination of es->right_attnums or es->left_attnums with input attnum by clause map - * @param left: true: add to es->right_attnums; false: add to es->left_attnums - * @return combination of Bitmapset - * @exception None - */ -Bitmapset* ES_SELECTIVITY::make_attnums_by_clause_map(es_candidate* es, Bitmapset* attnums, bool left) const -{ - ListCell* lc_clause_map = NULL; - Bitmapset* result = NULL; - foreach(lc_clause_map, es->clause_map) { - es_clause_map* clause_map = (es_clause_map*)lfirst(lc_clause_map); - if (left && bms_is_member(clause_map->left_attnum, attnums)) - result = bms_add_member(result, clause_map->right_attnum); - else if (!left && bms_is_member(clause_map->right_attnum, attnums)) - result = bms_add_member(result, clause_map->left_attnum); - } - return result; -} - -/* - * @brief find the matched extended stats in stats_list - * @param es :proving mathing conditions including relids , attnums - * @param stats_list : the extended statistic list - * @param left : true : match with the left_rel; false: match with the right_rel - * @return None - * @exception None - */ -void ES_SELECTIVITY::match_extended_stats(es_candidate* es, List* stats_list, bool left) -{ - int max_matched = 0; - int num_members = bms_num_members(es->left_attnums); - char other_side_starelkind; - RangeTblEntry* other_side_rte = NULL; - Bitmapset* this_side_attnums = NULL; - if (left) { - /* this side is left and the other side is right */ - other_side_starelkind = OidIsValid(es->right_rte->partitionOid) ? STARELKIND_PARTITION : STARELKIND_CLASS; - other_side_rte = es->right_rte; - this_side_attnums = es->left_attnums; - } else { - /* this side is right and other side is left */ - other_side_starelkind = OidIsValid(es->left_rte->partitionOid) ? STARELKIND_PARTITION : STARELKIND_CLASS; - other_side_rte = es->left_rte; - this_side_attnums = es->right_attnums; - } - - /* best_matched_listcell use to save the best match from stats list */ - ListCell* best_matched_listcell = NULL; - /* best_matched_stats use to save the best match from es_get_multi_column_stats */ - ListCell* best_matched_stats = (ListCell*)palloc(sizeof(ListCell)); - lfirst(best_matched_stats) = NULL; - ListCell* lc = NULL; - foreach(lc, stats_list) { - ExtendedStats* extended_stats = (ExtendedStats*)lfirst(lc); - ExtendedStats* other_side_extended_stats = NULL; - if (bms_is_subset(extended_stats->bms_attnum, this_side_attnums)) { - int matched = bms_num_members(extended_stats->bms_attnum); - - Bitmapset* other_side_attnums = make_attnums_by_clause_map(es, extended_stats->bms_attnum, left); - other_side_extended_stats = es_get_multi_column_stats( - other_side_rte->relid, other_side_starelkind, other_side_rte->inh, other_side_attnums); - if (other_side_extended_stats != NULL && matched == num_members) { - /* all attnums have extended stats, leave */ - if (left) { - es->left_extended_stats = copy_stats_ptr(lc); - es->right_extended_stats = other_side_extended_stats; - } else { - es->right_extended_stats = copy_stats_ptr(lc); - es->left_extended_stats = other_side_extended_stats; - } - clear_extended_stats((ExtendedStats*)lfirst(best_matched_stats)); - break; - } else if (other_side_extended_stats != NULL && matched > max_matched) { - /* not all attnums have extended stats, find the first maximum match */ - best_matched_listcell = lc; - clear_extended_stats((ExtendedStats*)lfirst(best_matched_stats)); - lfirst(best_matched_stats) = other_side_extended_stats; - max_matched = matched; - } else - clear_extended_stats(other_side_extended_stats); - } - } - - if (best_matched_listcell && lfirst(best_matched_stats)) { - if (left) { - es->left_extended_stats = copy_stats_ptr(best_matched_listcell); - es->right_extended_stats = (ExtendedStats*)lfirst(best_matched_stats); - } else { - es->right_extended_stats = copy_stats_ptr(best_matched_listcell); - es->left_extended_stats = (ExtendedStats*)lfirst(best_matched_stats); - } - lfirst(best_matched_stats) = NULL; - /* remove members not in the multi-column stats */ - if (max_matched != num_members) { - Bitmapset* tmpset = bms_difference(es->left_attnums, es->left_extended_stats->bms_attnum); - int dump_attnum; - while ((dump_attnum = bms_first_member(tmpset)) > 0) { - es->left_attnums = bms_del_member(es->left_attnums, dump_attnum); - remove_attnum(es, dump_attnum); - } - bms_free_ext(tmpset); - } - } - pfree_ext(best_matched_stats); - return; -} - -/* - * @brief modify distinct value using possion model - * @param es : proving the distinct value to modify - * @param left :true : modify the left distinct value; false: modify the right one - * @param sjinfo :join infos from inputs of calculate_selecitvity, can be NULL for eqsel - * @return None - * @exception None - */ -void ES_SELECTIVITY::modify_distinct_by_possion_model(es_candidate* es, bool left, SpecialJoinInfo* spjinfo) const -{ - bool enablePossion = false; - double varratio = 1.0; - ListCell* lc = NULL; - VariableStatData vardata; - float4 distinct = 0.0; - double tuples = 0.0; - - /* build vardata */ - vardata.enablePossion = true; - if (left && es->left_rel->tuples > 0) { - vardata.rel = es->left_rel; - distinct = es->left_stadistinct; - tuples = es->left_rel->tuples; - foreach(lc, es->clause_map) { - es_clause_map* map = (es_clause_map*)lfirst(lc); - vardata.var = (Node*)map->left_var; - enablePossion = can_use_possion(&vardata, spjinfo, &varratio); - if (!enablePossion) - break; - } - } else if (!left && es->right_rel->tuples > 0) { - vardata.rel = es->right_rel; - distinct = es->right_stadistinct; - tuples = es->right_rel->tuples; - foreach(lc, es->clause_map) { - es_clause_map* map = (es_clause_map*)lfirst(lc); - vardata.var = (Node*)map->right_var; - enablePossion = can_use_possion(&vardata, spjinfo, &varratio); - if (!enablePossion) - break; - } - } - - if (enablePossion) { - double tmp = distinct; - distinct = NUM_DISTINCT_SELECTIVITY_FOR_POISSON(distinct, tuples, varratio); - ereport(ES_DEBUG_LEVEL, - (errmodule(MOD_OPT), - (errmsg("[ES]The origin distinct value is %f. After using possion model with ntuples=%f and ration=%e \ - The new distinct value is %f", - tmp, - tuples, - varratio, - distinct)))); - } - - if (left && enablePossion) { - es->left_stadistinct = distinct; - } else if ((!left) && enablePossion) { - es->right_stadistinct = distinct; - } - return; -} - -static bool ClauseIsLegal(es_type type, const Node* left, const Node* right, int leftAttnum, int rightAttnum) -{ - if (leftAttnum < 0 || rightAttnum < 0) { - return false; - } - - /* check clause type */ - switch (type) { - case ES_EQSEL: - if (!IsA(left, Const) && !IsA(right, Const) && !IsA(left, Param) && !IsA(right, Param)) - return false; - else if (IsA(left, Const) && ((Const*)left)->constisnull) - return false; - else if (IsA(right, Const) && ((Const*)right)->constisnull) - return false; - break; - case ES_EQJOINSEL: - default: - break; - } - return true; -} - -static inline bool RteIsValid(const RangeTblEntry* rte) -{ - return (rte != NULL && rte->rtekind == RTE_RELATION); -} - -void ES_SELECTIVITY::setup_es(es_candidate* es, es_type type, RestrictInfo* clause) -{ - es->tag = type; - es->relids = bms_copy(clause->clause_relids); - es->clause_group = lappend(es->clause_group, clause); - es->has_null_clause = IsA(clause->clause, NullTest); - es->left_first_mcvfreq = 0.0; - es->right_first_mcvfreq = 0.0; -} - -bool ES_SELECTIVITY::build_es_candidate_for_eqsel(es_candidate* es, Node* var, int attnum, bool left, - RestrictInfo* clause) -{ - read_rel_rte(var, &es->left_rel, &es->left_rte); - if (!RteIsValid(es->left_rte)) { - return false; - } - es->left_attnums = bms_add_member(es->left_attnums, attnum); - if (left) { - es->left_relids = - clause->left_relids != NULL ? bms_copy(clause->left_relids) : bms_copy(clause->clause_relids); - } else { - es->left_relids = - clause->right_relids != NULL ? bms_copy(clause->right_relids) : bms_copy(clause->clause_relids); - } - add_clause_map(es, attnum, 0, var, NULL); - return true; -} - -/* - * @brief build a new es_candidate and add to es_candidate_list - */ -bool ES_SELECTIVITY::build_es_candidate(RestrictInfo* clause, es_type type) -{ - Node* left = NULL; - Node* right = NULL; - int left_attnum = 0; - int right_attnum = 0; - bool success = false; - - if (IsA(clause->clause, OpExpr)) { - OpExpr* opclause = (OpExpr*)clause->clause; - - Assert(list_length(opclause->args) == TOW_MEMBERS); - - left = (Node*)linitial(opclause->args); - right = (Node*)lsecond(opclause->args); - left_attnum = read_attnum(left); - right_attnum = read_attnum(right); - if (!ClauseIsLegal(type, left, right, left_attnum, right_attnum)) - return false; - } else { - Assert(IsA(clause->clause, NullTest)); - NullTest* nullclause = (NullTest*)clause->clause; - left = (Node*)nullclause->arg; - left_attnum = read_attnum(left); - if (left_attnum < 0) - return false; - } - - es_candidate* es = (es_candidate*)palloc(sizeof(es_candidate)); - init_candidate(es); - - switch (type) { - case ES_EQSEL: - /* only use left side */ - if (left_attnum > 0 && right_attnum == 0) { - success = build_es_candidate_for_eqsel(es, left, left_attnum, true, clause); - } else if (right_attnum > 0 && left_attnum == 0) { - Assert(clause->right_relids != NULL); - success = build_es_candidate_for_eqsel(es, right, right_attnum, false, clause); - } else { - pfree_ext(es); - return false; - } - break; - case ES_EQJOINSEL: - if (left_attnum > 0 && right_attnum > 0) { - read_rel_rte(left, &es->left_rel, &es->left_rte); - read_rel_rte(right, &es->right_rel, &es->right_rte); - if (!RteIsValid(es->left_rte) || !RteIsValid(es->right_rte)) { - break; - } - es->left_relids = bms_copy(clause->left_relids); - es->right_relids = bms_copy(clause->right_relids); - es->left_attnums = bms_add_member(es->left_attnums, left_attnum); - es->right_attnums = bms_add_member(es->right_attnums, right_attnum); - add_clause_map(es, left_attnum, right_attnum, left, right); - success = true; - } else { - pfree_ext(es); - return false; - } - break; - default: - /* for future development, should not reach here now */ - pfree_ext(es); - return false; - } - - /* double check */ - if (!success) { - es->left_rel = NULL; - es->right_rel = NULL; - es->left_rte = NULL; - es->right_rte = NULL; - pfree_ext(es); - return false; - } - - setup_es(es, type, clause); - - es_candidate_list = lappend(es_candidate_list, es); - - return true; -} - -/* - * @brief remove useless member in es_candidate_list to unmatched_clause_group - */ -void ES_SELECTIVITY::recheck_candidate_list() -{ - if (!es_candidate_list) - return; - ListCell* l = NULL; - bool validate = true; - - /* try to use equivalence_class to re-combinate clauses first */ - foreach(l, es_candidate_list) { - es_candidate* temp = (es_candidate*)lfirst(l); - if (temp->tag == ES_EQJOINSEL && list_length(temp->clause_group) == 1 && list_length(es_candidate_list) > 1) - (void)try_equivalence_class(temp); - } - - foreach(l, es_candidate_list) { - es_candidate* temp = (es_candidate*)lfirst(l); - switch (temp->tag) { - case ES_EQSEL: - if (list_length(temp->clause_group) <= 1) - validate = false; - else if (temp->left_rte && bms_num_members(temp->left_attnums) <= 1) - validate = false; - break; - case ES_EQJOINSEL: - if (list_length(temp->clause_group) <= 1) - validate = false; - else if (bms_num_members(temp->left_attnums) <= 1 || bms_num_members(temp->right_attnums) <= 1) - validate = false; - break; - case ES_GROUPBY: - if (bms_num_members(temp->left_attnums) <= 1) - validate = false; - break; - default: - break; - } - if (!validate) { - unmatched_clause_group = list_concat(unmatched_clause_group, temp->clause_group); - temp->tag = ES_EMPTY; - temp->clause_group = NULL; - } - } - return; -} - -static inline bool IsUnsupportedCases(const EquivalenceClass* ec) -{ - /* only consider var = var situation */ - if (ec->ec_has_const) - return true; - - /* ignore broken ecs */ - if (ec->ec_broken) - return true; - - /* if members of ECs are less than two, won't generate any substitute */ - if (list_length(ec->ec_members) <= TOW_MEMBERS) - return true; - - return false; -} - -/* - * @brief try to find a substitude clause building from equivalence classes - * @return true when find a substitude clause; false when find nothing - */ -bool ES_SELECTIVITY::try_equivalence_class(es_candidate* es) -{ - if (path && path->path.pathtype == T_MergeJoin) { - /* for mergejoin, do not adjust clause using equivalence class */ - return false; - } - - ListCell* lc = NULL; - bool result = false; - - foreach(lc, root->eq_classes) { - EquivalenceClass* ec = (EquivalenceClass*)lfirst(lc); - - if (IsUnsupportedCases(ec)) - continue; - - /* We can quickly ignore any that don't cover the join, too */ - if (!bms_is_subset(es->relids, ec->ec_relids)) - continue; - - Bitmapset* tmpset = bms_copy(ec->ec_relids); - int ec_relid = 0; - while ((ec_relid = bms_first_member(tmpset)) >= 0) { - if (bms_is_member(ec_relid, es->relids)) - continue; - ListCell* lc2 = NULL; - foreach(lc2, es_candidate_list) { - es_candidate* temp = (es_candidate*)lfirst(lc2); - if (temp->tag != ES_EQJOINSEL) - continue; - if (bms_equal(temp->relids, es->relids)) - continue; - if (bms_is_member(ec_relid, temp->relids) && bms_overlap(temp->relids, es->relids)) { - Bitmapset* interset_relids = bms_intersect(temp->relids, es->relids); - Bitmapset* join_relids = bms_copy(interset_relids); - join_relids = bms_add_member(join_relids, ec_relid); - Assert(bms_equal(join_relids, temp->relids)); - Bitmapset* outer_relids = bms_make_singleton(ec_relid); - List* pseudo_clauselist = - generate_join_implied_equalities_normal(root, ec, join_relids, outer_relids, interset_relids); - if (pseudo_clauselist != NULL) { - result = match_pseudo_clauselist(pseudo_clauselist, temp, es->clause_group); - if (log_min_messages <= ES_DEBUG_LEVEL) { - ereport(ES_DEBUG_LEVEL, - (errmodule(MOD_OPT_JOIN), errmsg("[ES]Build new clause using equivalence class:)"))); - print_clauses(pseudo_clauselist); - ereport(ES_DEBUG_LEVEL, - (errmodule(MOD_OPT_JOIN), - errmsg("[ES]The old clause will be abandoned? %d)", (int)result))); - print_clauses(es->clause_group); - } - } - - bms_free_ext(interset_relids); - bms_free_ext(join_relids); - bms_free_ext(outer_relids); - if (result) { - /* replace the removed clause in clauselist with the new built one */ - ListCell* lc3 = NULL; - foreach(lc3, origin_clauses) { - void* clause = (void*)lfirst(lc3); - if (clause == linitial(es->clause_group)) { - /* maybe cause memory problem as the old clause is not released */ - lfirst(lc3) = linitial(pseudo_clauselist); - } - } - /* For hashclause, we have to process joinrestrictinfo in path as well */ - if (path) { - foreach(lc3, path->joinrestrictinfo) { - void* clause = (void*)lfirst(lc3); - if (clause == linitial(es->clause_group)) { - /* maybe cause memory problem as the old clause is not released */ - lfirst(lc3) = linitial(pseudo_clauselist); - } - } - } - break; - } - } - } - if (result) { - /* - * If sucess, the clause has been tranformed and saved in another es_candidate. - * So no need to keep this es_candidate anymore. - */ - es->tag = ES_EMPTY; - es->clause_group = NULL; - break; - } - } - bms_free_ext(tmpset); - if (result) - break; - } - return result; -} - -/* - * @brief try to match the newborn clause building by try_equivalence_class() with the existed clause group - * like what we do in group_clauselist(), but more simple. - */ -bool ES_SELECTIVITY::match_pseudo_clauselist(List* clauses, es_candidate* es, List* origin_clause) -{ - bool result = false; - ListCell* lc = NULL; - foreach(lc, clauses) { - Node* clause = (Node*)lfirst(lc); - - if (!IsA(clause, RestrictInfo)) { - continue; - } - - RestrictInfo* rinfo = (RestrictInfo*)clause; - if (rinfo->pseudoconstant || rinfo->norm_selec > 1 || rinfo->orclause) { - continue; - } - - if (is_opclause(rinfo->clause)) { - OpExpr* opclause = (OpExpr*)rinfo->clause; - Oid opno = opclause->opno; - - /* only handle "=" operator */ - if (get_oprrest(opno) == EQSELRETURNOID) { - Assert(bms_num_members(rinfo->clause_relids) == TOW_MEMBERS); - if (add_attnum(rinfo, es)) { - es->clause_group = lappend(es->clause_group, clause); - es->pseudo_clause_list = lappend(es->pseudo_clause_list, clause); - es->pseudo_clause_list = list_concat(es->pseudo_clause_list, origin_clause); - result = true; - } - } - } - } - return result; -} - -/* - * @brief relpace the original clause in the input clause list with the new clause build by equivalence class, - * the memory used by old clause will be release by optimizer context or something esle - */ -void ES_SELECTIVITY::replace_clause(Datum* old_clause, Datum* new_clause) const -{ - ListCell* lc = NULL; - foreach(lc, origin_clauses) { - if (lfirst(lc) == old_clause) { - lfirst(lc) = new_clause; - break; - } - } -} - -/* - * @brief main entry to read statistic which will be used to calculate selectivity, called by - * calculate_selectivity - */ -void ES_SELECTIVITY::read_statistic() -{ - if (!es_candidate_list) - return; - ListCell* l = NULL; - foreach(l, es_candidate_list) { - es_candidate* temp = (es_candidate*)lfirst(l); - switch (temp->tag) { - case ES_EQSEL: - case ES_GROUPBY: - read_statistic_eqsel(temp); - break; - case ES_EQJOINSEL: - read_statistic_eqjoinsel(temp); - break; - default: - /* empty */ - break; - } - } - return; -} - -bool ES_SELECTIVITY::cal_stadistinct_eqjoinsel(es_candidate* es) -{ - /* - * Since we can not tell how many or which columns are actaully null when nullfrac == 1.0, - * so we will not use multi-column when nullfrac == 1.0. - */ - if (es->left_extended_stats && - (es->left_extended_stats->nullfrac != 0.0 || es->left_extended_stats->distinct != 0.0 || - es->left_extended_stats->mcv_values) && - es->right_extended_stats && - (es->right_extended_stats->nullfrac != 0.0 || es->right_extended_stats->distinct != 0.0 || - es->right_extended_stats->mcv_values)) { - /* should not recieve empty extended stas here, except all columns are empty */ - if (es->left_extended_stats->distinct < 0) - es->left_stadistinct = clamp_row_est(-1 * es->left_extended_stats->distinct * es->left_rel->tuples * - (1.0 - es->left_extended_stats->nullfrac)); - else - es->left_stadistinct = clamp_row_est(es->left_extended_stats->distinct); - - if (es->right_extended_stats->distinct < 0) - es->right_stadistinct = clamp_row_est(-1 * es->right_extended_stats->distinct * es->right_rel->tuples * - (1.0 - es->right_extended_stats->nullfrac)); - else - es->right_stadistinct = clamp_row_est(es->right_extended_stats->distinct); - - /* - * Use possion model if satisify condition. - */ - modify_distinct_by_possion_model(es, true, sjinfo); - modify_distinct_by_possion_model(es, false, sjinfo); - - /* replace the old clause with the new one */ - if (es->pseudo_clause_list) { - ListCell* lc2 = NULL; - foreach(lc2, es->pseudo_clause_list) { - Datum* new_clause = (Datum*)lfirst(lc2); - lc2 = lnext(lc2); - Datum* old_clause = (Datum*)lfirst(lc2); - /* make sure the new clause is still in the clause group */ - ListCell* lc3 = NULL; - foreach(lc3, es->clause_group) { - if ((Datum*)lfirst(lc3) == new_clause) { - replace_clause(old_clause, new_clause); - break; - } - } - } - } - return true; - } - return false; -} - -#define CLEAN_UP_TEMP_OBJECTS(tmp_left, tmp_right, left_stats_list, right_stats_list) \ - do { \ - bms_free_ext(tmp_left); \ - bms_free_ext(tmp_right); \ - clear_extended_stats_list(left_stats_list); \ - clear_extended_stats_list(right_stats_list); \ - } while (0) - -/* - * @brief read extended statistic for eqjoinsel - * There are three possible situations: - * (1) No statistic data in pg_statistic, then num_stats will be 0 - * (2) There are over 100 records in pg_statistic when search extended statistics for target table, - * then the returned stats_list will be empty and we have to search manually. In this case, - * we have too many combinations to try so that we will make some compromise and only - * try limited possibilities. - * (3) There are less than 100 records and the returned stats_list will be all records. Then we will - * search the list for the best answer. - */ -void ES_SELECTIVITY::read_statistic_eqjoinsel(es_candidate* es) -{ - int left_num_stats = 0; - int right_num_stats = 0; - char left_starelkind = OidIsValid(es->left_rte->partitionOid) ? STARELKIND_PARTITION : STARELKIND_CLASS; - char right_starelkind = OidIsValid(es->right_rte->partitionOid) ? STARELKIND_PARTITION : STARELKIND_CLASS; - - /* read all multi-column statistic from pg_statistic if possible */ - List* left_stats_list = - es_get_multi_column_stats(es->left_rte->relid, left_starelkind, es->left_rte->inh, &left_num_stats); - List* right_stats_list = - es_get_multi_column_stats(es->right_rte->relid, right_starelkind, es->right_rte->inh, &right_num_stats); - - /* no multi-column statistic */ - if (left_num_stats == 0 || right_num_stats == 0) { - report_no_stats(es->left_rte->relid, es->left_attnums); - report_no_stats(es->right_rte->relid, es->right_attnums); - remove_candidate(es); - return; - } - - /* save attnums for no analyze list */ - Bitmapset* tmp_left = bms_copy(es->left_attnums); - Bitmapset* tmp_right = bms_copy(es->right_attnums); - - /* when at least one side return with multi-column statistic list */ - if (left_num_stats <= ES_MAX_FETCH_NUM_OF_INSTANCE && left_num_stats <= right_num_stats) { - match_extended_stats(es, left_stats_list, true); - } else if (right_num_stats <= ES_MAX_FETCH_NUM_OF_INSTANCE) { - match_extended_stats(es, right_stats_list, false); - } else { - /* - * There are too many multi-column statistic, so return null list. - * We have to search pg_statistic manually with limited combinations. - * So could lose some matches. - */ - ereport(ES_DEBUG_LEVEL, - (errmodule(MOD_OPT_JOIN), errmsg("[ES] Too many multi-column statistic, could lose matches."))); - - while (bms_num_members(es->left_attnums) >= TOW_MEMBERS) { - ExtendedStats* left_extended_stats = - es_get_multi_column_stats(es->left_rte->relid, left_starelkind, es->left_rte->inh, es->left_attnums); - if (left_extended_stats != NULL) { - ExtendedStats* right_extended_stats = es_get_multi_column_stats( - es->right_rte->relid, right_starelkind, es->right_rte->inh, es->right_attnums); - if (right_extended_stats != NULL) { - es->left_extended_stats = left_extended_stats; - es->right_extended_stats = right_extended_stats; - break; - } else - clear_extended_stats(left_extended_stats); - } - - /* - * if not found, delete the first member of attnums and use the rest atts to match. - * delete clause and clause_map in es. - */ - remove_attnum(es, bms_first_member(es->left_attnums)); - } - } - - if (!cal_stadistinct_eqjoinsel(es)) { - /* no multi-column statistic matched */ - report_no_stats(es->left_rte->relid, tmp_left); - report_no_stats(es->right_rte->relid, tmp_right); - remove_candidate(es); - } - - CLEAN_UP_TEMP_OBJECTS(tmp_left, tmp_right, left_stats_list, right_stats_list); - return; -} - -void ES_SELECTIVITY::remove_members_without_es_stats(int max_matched, int num_members, es_candidate* es) -{ - if (max_matched != num_members) { - Bitmapset* tmpset = bms_difference(es->left_attnums, es->left_extended_stats->bms_attnum); - int dump_attnum; - while ((dump_attnum = bms_first_member(tmpset)) > 0) { - es->left_attnums = bms_del_member(es->left_attnums, dump_attnum); - remove_attnum(es, dump_attnum); - } - bms_free_ext(tmpset); - } -} - -void ES_SELECTIVITY::cal_stadistinct_eqsel(es_candidate* es) -{ - /* - * Since we can not tell how many or which columns are actaully null when nullfrac == 1.0, - * so we will not use multi-column when nullfrac == 1.0. - */ - if (es->left_extended_stats && - (es->left_extended_stats->nullfrac != 0.0 || es->left_extended_stats->distinct != 0.0 || - es->left_extended_stats->mcv_values)) { - /* should not recieve empty extended stas here, except all columns are empty */ - if (es->left_extended_stats->distinct < 0) - es->left_stadistinct = clamp_row_est(-1 * es->left_extended_stats->distinct * es->left_rel->tuples); - else - es->left_stadistinct = clamp_row_est(es->left_extended_stats->distinct); - - /* - * Use possion model if satisify condition. - * we don't need possion to estimate distinct because - * we can't estimate accurate for multiple exprs. - */ - if (es->tag != ES_GROUPBY) - modify_distinct_by_possion_model(es, true, NULL); - } else { - /* no multi-column statistic matched */ - remove_candidate(es); - } -} - -/* - * @brief read extended statistic for eqsel or groupby, details are as same as read_statistic_eqjoinsel - */ -void ES_SELECTIVITY::read_statistic_eqsel(es_candidate* es) -{ - int num_stats = 0; - char starelkind = OidIsValid(es->left_rte->partitionOid) ? STARELKIND_PARTITION : STARELKIND_CLASS; - /* read all multi-column statistic from pg_statistic if possible */ - List* stats_list = - es_get_multi_column_stats(es->left_rte->relid, starelkind, es->left_rte->inh, &num_stats, es->has_null_clause); - /* no multi-column statistic */ - if (num_stats == 0) { - report_no_stats(es->left_rte->relid, es->left_attnums); - remove_candidate(es); - return; - } - - /* return with multi-column statistic list */ - if (num_stats <= ES_MAX_FETCH_NUM_OF_INSTANCE) { - ListCell* l = NULL; - int max_matched = 0; - int num_members = bms_num_members(es->left_attnums); - ListCell* best_matched_stat_ptr = NULL; - foreach(l, stats_list) { - ExtendedStats* extended_stats = (ExtendedStats*)lfirst(l); - if (bms_is_subset(extended_stats->bms_attnum, es->left_attnums)) { - int matched = bms_num_members(extended_stats->bms_attnum); - if (matched == num_members) { - es->left_extended_stats = copy_stats_ptr(l); - break; - } else if (matched > max_matched) { - best_matched_stat_ptr = l; - max_matched = matched; - } - } - } - - if (best_matched_stat_ptr != NULL) { - es->left_extended_stats = copy_stats_ptr(best_matched_stat_ptr); - /* remove members not in the multi-column stats */ - remove_members_without_es_stats(max_matched, num_members, es); - } - } else { - /* - * There are too many multi-column statistic, so return null list. - * We have to search pg_statistic manually with limited combinations. - * So could lose some matches. - */ - ereport(ES_DEBUG_LEVEL, - (errmodule(MOD_OPT_JOIN), errmsg("[ES] Too many multi-column statistic, could lose matches."))); - - while (bms_num_members(es->left_attnums) >= TOW_MEMBERS) { - ExtendedStats* extended_stats = es_get_multi_column_stats( - es->left_rte->relid, starelkind, es->left_rte->inh, es->left_attnums, es->has_null_clause); - if (extended_stats != NULL) { - es->left_extended_stats = extended_stats; - break; - } - - /* - * if not found, delete one attnum and use the rest to try again. - * delete clause and clause_map. - */ - remove_attnum(es, bms_first_member(es->left_attnums)); - } - } - - cal_stadistinct_eqsel(es); - - clear_extended_stats_list(stats_list); - - return; -} - -/* - * @brief read attnum from input, - * @param node: should be a Var* - * @return return -1 if varattno <= 0, return 0 means node is not a var - * @exception None - */ -int ES_SELECTIVITY::read_attnum(Node* node) const -{ - Node* basenode = NULL; - int attnum = -1; - if (IsA(node, RelabelType)) - basenode = (Node*)((RelabelType*)node)->arg; - else - basenode = node; - - if (IsA(basenode, Var)) { - Var* var = (Var*)basenode; - attnum = var->varattno; - if (attnum <= 0) - attnum = -1; - } else if (IsA(node, Const) || IsA(node, Param)) - attnum = 0; - - return attnum; -} - -static List* BuildNewRelList(Bitmapset* attnumsTmp, Oid relidOid) -{ - List* record = NIL; - List* relidList = NIL; - List* attidList = NIL; - int attnum = bms_first_member(attnumsTmp); - while (attnum != -1) { - attidList = lappend_int(attidList, attnum); - attnum = bms_first_member(attnumsTmp); - } - bms_free_ext(attnumsTmp); - relidList = lappend_oid(relidList, relidOid); - record = lappend(record, relidList); - record = lappend(record, attidList); - return record; -} - -/* - * @brief save non-analyze multi-column to g_NoAnalyzeRelNameList - * @param relid_oid, relation oid - * @param attnums, multi-column attribute number - * @return - * @exception None - */ -void ES_SELECTIVITY::report_no_stats(Oid relid_oid, Bitmapset* attnums) const -{ - /* We don't save non-analyze multi-column to g_NoAnalyzeRelNameList when resource_track_log=summary. */ - if (u_sess->attr.attr_storage.resource_track_log == SUMMARY || relid_oid == 0) - return; - - /* - * We should not save the relation to non-analyze list if is under analyzing, - * because it will create temp table and execute some query, the temp table - * don't be analyzed when 2% analyzing. - */ - if (u_sess->analyze_cxt.is_under_analyze) - return; - - Assert(bms_num_members(attnums) >= TOW_MEMBERS); - MemoryContext oldcontext = MemoryContextSwitchTo(t_thrd.mem_cxt.msg_mem_cxt); - Bitmapset* attnums_tmp = bms_copy(attnums); - - ListCell* lc = NULL; - bool found = false; - if (t_thrd.postgres_cxt.g_NoAnalyzeRelNameList != NIL) { - foreach(lc, t_thrd.postgres_cxt.g_NoAnalyzeRelNameList) { - List* record = (List*)lfirst(lc); - if (relid_oid == linitial_oid((List*)linitial(record))) { - ListCell* sublist = NULL; - for (sublist = lnext(list_head(record)); sublist != NULL; sublist = lnext(sublist)) { - List* attid_list = (List*)lfirst(sublist); - if (list_length(attid_list) == bms_num_members(attnums_tmp)) { - Bitmapset* attnums_from_list = NULL; - ListCell* cell = attid_list->head; - while (cell != NULL) { - attnums_from_list = bms_add_member(attnums_from_list, (int)lfirst_int(cell)); - cell = cell->next; - } - if (bms_equal(attnums_from_list, attnums_tmp)) - found = true; - bms_free_ext(attnums_from_list); - } - } - if (!found) { - List* attid_list = NIL; - int attnum = bms_first_member(attnums_tmp); - while (attnum != -1) { - attid_list = lappend_int(attid_list, attnum); - attnum = bms_first_member(attnums_tmp); - } - bms_free_ext(attnums_tmp); - record = lappend(record, attid_list); - found = true; - } - } - } - } - - if (!found) { - /* add a new rel list */ - List* record = BuildNewRelList(attnums_tmp, relid_oid); - - /* Add a new rel list into g_NoAnalyzeRelNameList. */ - t_thrd.postgres_cxt.g_NoAnalyzeRelNameList = lappend(t_thrd.postgres_cxt.g_NoAnalyzeRelNameList, record); - } - - (void)MemoryContextSwitchTo(oldcontext); - return; -} - -static bool MatchOnSameSide(RestrictInfo* clause, es_candidate* temp, int leftAttnum, int rightAttnum) -{ - return bms_equal(clause->left_relids, temp->left_relids) && - bms_equal(clause->right_relids, temp->right_relids) && - !bms_is_member(leftAttnum, temp->left_attnums) && !bms_is_member(rightAttnum, temp->right_attnums); -} - -static bool MatchOnOtherSide(RestrictInfo* clause, es_candidate* temp, int leftAttnum, int rightAttnum) -{ - return bms_equal(clause->right_relids, temp->left_relids) && - bms_equal(clause->left_relids, temp->right_relids) && - !bms_is_member(rightAttnum, temp->left_attnums) && - !bms_is_member(leftAttnum, temp->right_attnums); -} - -static inline bool AttnumIsInvalid(int leftAttnum, int rightAttnum) -{ - return (leftAttnum < 0 || rightAttnum < 0 || (leftAttnum == 0 && rightAttnum == 0)); -} - -void ES_SELECTIVITY::add_attnum_for_eqsel(es_candidate* temp, int attnum, Node* arg) const -{ - temp->left_attnums = bms_add_member(temp->left_attnums, attnum); - add_clause_map(temp, attnum, 0, arg, NULL); -} - -/* - * @brief read and add attnums to es_candidate - * @return true if success - */ -bool ES_SELECTIVITY::add_attnum(RestrictInfo* clause, es_candidate* temp) const -{ - Node* left = NULL; - Node* right = NULL; - int left_attnum = 0; - int right_attnum = 0; - - if (IsA(clause->clause, OpExpr)) { - OpExpr* opclause = (OpExpr*)clause->clause; - Assert(list_length(opclause->args) == TOW_MEMBERS); - left = (Node*)linitial(opclause->args); - right = (Node*)lsecond(opclause->args); - left_attnum = read_attnum(left); - right_attnum = read_attnum(right); - } else { - Assert(IsA(clause->clause, NullTest)); - left = (Node*)((NullTest*)clause->clause)->arg; - left_attnum = read_attnum(left); - } - - if (AttnumIsInvalid(left_attnum, right_attnum)) - return false; - - switch (temp->tag) { - case ES_EQSEL: - /* - * We wouldn't have clauses like: t1.a = 1 and t1.a = 2 here - * because optimizor will find the conflict first. - */ - if (left_attnum > 0 && right_attnum > 0) - return false; - - if (bms_equal(clause->left_relids, temp->left_relids) || - (IsA(clause->clause, NullTest) && bms_equal(clause->clause_relids, temp->left_relids))) { - add_attnum_for_eqsel(temp, left_attnum, left); - } else if (bms_equal(clause->right_relids, temp->left_relids)) { - add_attnum_for_eqsel(temp, right_attnum, right); - } else - return false; - - break; - case ES_EQJOINSEL: - /* - * Normally, we shouldn't have clauses like: t1.a = t2.a and t1.b = t2.a here - * because clause is generated from equivalence class, in this case, - * t1.a, t2.a and t1.b is in one equivalence class and will only generate - * one clause. - * However, if we try to build an es_candidate using equivalence class such as tpch Q9, - * there could be some scenarios we have not forseen now. - * So, for safety, we still check whether the clauses is something - * like: t1.a = t2.a and t1.b = t2.a - */ - if (left_attnum == 0 || right_attnum == 0) - return false; - - if (MatchOnSameSide(clause, temp, left_attnum, right_attnum)) { - temp->left_attnums = bms_add_member(temp->left_attnums, left_attnum); - temp->right_attnums = bms_add_member(temp->right_attnums, right_attnum); - add_clause_map(temp, left_attnum, right_attnum, left, right); - } else if (MatchOnOtherSide(clause, temp, left_attnum, right_attnum)) { - temp->left_attnums = bms_add_member(temp->left_attnums, right_attnum); - temp->right_attnums = bms_add_member(temp->right_attnums, left_attnum); - add_clause_map(temp, right_attnum, left_attnum, right, left); - } else - return false; - - break; - default: - /* should not reach here */ - return false; - } - - return true; -} - -/* - * @brief add clause_map to es_candidate, clause_map is a map which can be used to find the right arg - * using an attnum of left arg in a clause. So we don't have to parse the clause again. - */ -void ES_SELECTIVITY::add_clause_map( - es_candidate* es, int left_attnum, int right_attnum, Node* left_arg, Node* right_arg) const -{ - es_clause_map* clause_map = (es_clause_map*)palloc(sizeof(es_clause_map)); - clause_map->left_attnum = left_attnum; - clause_map->right_attnum = right_attnum; - if (left_arg) { - if (IsA(left_arg, RelabelType)) - clause_map->left_var = (Var*)((RelabelType*)left_arg)->arg; - else - clause_map->left_var = (Var*)left_arg; - } - - if (right_arg) { - if (IsA(right_arg, RelabelType)) - clause_map->right_var = (Var*)((RelabelType*)right_arg)->arg; - else - clause_map->right_var = (Var*)right_arg; - } - es->clause_map = lappend(es->clause_map, clause_map); - return; -} - -/* - * @brief read RelOptInfo and RangeTblEntry, save to es_candidate - * @param node: should be a Var* - */ -void ES_SELECTIVITY::read_rel_rte(Node* node, RelOptInfo** rel, RangeTblEntry** rte) -{ - Node* basenode = NULL; - - if (IsA(node, RelabelType)) - basenode = (Node*)((RelabelType*)node)->arg; - else - basenode = node; - - if (IsA(basenode, Var)) { - Assert(root != NULL); - Assert(root->parse != NULL); - - Var* var = (Var*)basenode; - *rel = find_base_rel(root, var->varno); - *rte = rt_fetch((*rel)->relid, root->parse->rtable); - } - - return; -} - -void ES_SELECTIVITY::save_selectivity( - es_candidate* es, double left_join_ratio, double right_join_ratio, bool save_semi_join) -{ - VariableStatData vardata; - RatioType type; - if (es->tag == ES_EQSEL) - type = RatioType_Filter; - else if (es->tag == ES_EQJOINSEL) - type = RatioType_Join; - else - return; - - if (es->left_rel) { - vardata.rel = es->left_rel; - ListCell* lc = NULL; - foreach(lc, es->clause_map) { - es_clause_map* map = (es_clause_map*)lfirst(lc); - vardata.var = (Node*)map->left_var; - if (!save_semi_join) - set_varratio_after_calc_selectivity(&vardata, type, left_join_ratio, sjinfo); - /* Bloom filter can be used only when var = var. */ - if (es->tag == ES_EQJOINSEL) { - VariableStatData vardata2; - vardata2.rel = es->right_rel; - vardata2.var = (Node*)map->right_var; - /* Set var's ratio which will be used by bloom filter set. */ - set_equal_varratio(&vardata, vardata2.rel->relids, left_join_ratio, sjinfo); - if (!save_semi_join) { - set_equal_varratio(&vardata2, vardata.rel->relids, right_join_ratio, sjinfo); - set_varratio_after_calc_selectivity(&vardata2, type, right_join_ratio, sjinfo); - } - } - } - } - return; -} - -/* - * @brief calculate bucket size, actually only save results for estimate_hash_bucketsize to use - */ -void ES_SELECTIVITY::cal_bucket_size(es_candidate* es, es_bucketsize* bucket) const -{ - double tuples; - RelOptInfo* rel = NULL; - ListCell* lc = NULL; - - bucket->left_relids = bms_copy(es->left_relids); - bucket->right_relids = bms_copy(es->right_relids); - bucket->left_rel = es->left_rel; - bucket->right_rel = es->right_rel; - bucket->left_distinct = es->left_stadistinct; - bucket->right_distinct = es->right_stadistinct; - bucket->left_mcvfreq = es->left_first_mcvfreq; - bucket->right_mcvfreq = es->right_first_mcvfreq; - - if (es->left_extended_stats->dndistinct > 0) - bucket->left_dndistinct = es->left_extended_stats->dndistinct; - else { - rel = es->left_rel; - tuples = get_local_rows( - rel->tuples, rel->multiple, IsLocatorReplicated(rel->locator_type), ng_get_dest_num_data_nodes(rel)); - bucket->left_dndistinct = clamp_row_est(-1 * es->left_extended_stats->dndistinct * tuples); - } - - if (es->right_extended_stats->dndistinct > 0) - bucket->right_dndistinct = es->right_extended_stats->dndistinct; - else { - rel = es->right_rel; - tuples = get_local_rows( - rel->tuples, rel->multiple, IsLocatorReplicated(rel->locator_type), ng_get_dest_num_data_nodes(rel)); - bucket->right_dndistinct = clamp_row_est(-1 * es->right_extended_stats->dndistinct * tuples); - } - - bucket->left_hashkeys = NIL; - bucket->right_hashkeys = NIL; - foreach(lc, es->clause_map) { - es_clause_map* map = (es_clause_map*)lfirst(lc); - bucket->left_hashkeys = lappend(bucket->left_hashkeys, map->left_var); - bucket->right_hashkeys = lappend(bucket->right_hashkeys, map->right_var); - } - - return; -} - -Selectivity ES_SELECTIVITY::estimate_hash_bucketsize( - es_bucketsize* es_bucket, double* distinctnum, bool left, Path* inner_path, double nbuckets) -{ - double estfract, ndistinct, mcvfreq, avgfreq; - RelOptInfo* rel = left ? es_bucket->left_rel : es_bucket->right_rel; - List* hashkeys = left ? es_bucket->left_hashkeys : es_bucket->right_hashkeys; - - ndistinct = estimate_local_numdistinct(es_bucket, left, inner_path); - *distinctnum = ndistinct; - - /* Compute avg freq of all distinct data values in raw relation */ - avgfreq = 1.0 / ndistinct; - - /* - * Initial estimate of bucketsize fraction is 1/nbuckets as long as the - * number of buckets is less than the expected number of distinct values; - * otherwise it is 1/ndistinct. - */ - if (ndistinct > nbuckets) - estfract = 1.0 / nbuckets; - else { - if (ndistinct < 1.0) - ndistinct = 1.0; - estfract = 1.0 / ndistinct; - } - - /* - * Look up the frequency of the most common value, if available. - */ - mcvfreq = left ? es_bucket->left_mcvfreq : es_bucket->right_mcvfreq; - - /* We should adjust mcvfreq with selectivity because mcvfreq is changed after joined or joined. */ - mcvfreq /= (rel->rows / rel->tuples); - ereport(DEBUG1, - (errmodule(MOD_OPT_JOIN), - errmsg("[ES]rows=%.lf, tuples=%.lf, multiple=%.lf", rel->rows, rel->tuples, rel->multiple))); - - /* - * Adjust estimated bucketsize upward to account for skewed distribution. - */ - if (avgfreq > 0.0 && mcvfreq > avgfreq) { - /* if hashkey contains distribute key, mcv freq should be multiplied by dn number */ - double multiple = 1.0; - /* for now, only consider one distribute key situation */ - if (list_length(hashkeys) >= list_length(rel->distribute_keys) && - list_is_subset(rel->distribute_keys, hashkeys)) - multiple = u_sess->pgxc_cxt.NumDataNodes; - - estfract *= mcvfreq / avgfreq; - /* if adjusted selectivity is larger than mcvfreq, then the estimate is too far off, - take the mcvfreq instead. */ - if (estfract > mcvfreq * multiple) - estfract = mcvfreq * multiple; - } - - ereport(DEBUG1, - (errmodule(MOD_OPT_JOIN), - errmsg("[ES]ndistinct=%.lf, avgfreq=%.10f, mcvfreq=%.10f, estfract=%.10f", - ndistinct, - avgfreq, - mcvfreq, - estfract))); - - /* - * Clamp bucketsize to sane range (the above adjustment could easily - * produce an out-of-range result). We set the lower bound a little above - * zero, since zero isn't a very sane result. - * We should adjust the lower bound as 1.0e-7 because the distinct value - * may be larger than 1000000 as the increasing of work_mem. - */ - if (estfract < 1.0e-7) - estfract = 1.0e-7; - else if (estfract > 1.0) { - if (mcvfreq > 0.0) - estfract = mcvfreq; - else - estfract = 1.0; - } - - return (Selectivity)estfract; -} - -/* - * @brief mostly as same as estimate_local_numdistinct - */ -double ES_SELECTIVITY::estimate_local_numdistinct(es_bucketsize* bucket, bool left, Path* pathnode) -{ - VariableStatData vardata; - bool usesinglestats = true; - double ndistinct = left ? bucket->left_dndistinct : bucket->right_dndistinct; - double global_distinct = left ? bucket->left_distinct : bucket->right_distinct; - unsigned int num_datanodes = ng_get_dest_num_data_nodes(pathnode); - List* hashkeys = left ? bucket->left_hashkeys : bucket->right_hashkeys; - - vardata.rel = left ? bucket->left_rel : bucket->right_rel; - - /* we should adjust local distinct if there is no tuples in dn1 for global stats. */ - if ((ndistinct * num_datanodes) < global_distinct) - ndistinct = get_local_rows(global_distinct, vardata.rel->multiple, false, num_datanodes); - - /* Adjust global distinct values for STREAM_BROADCAST and STREAM_REDISTRIBUTE. */ - if (IsA(pathnode, StreamPath) || IsA(pathnode, HashPath) || IsLocatorReplicated(pathnode->locator_type)) { - ndistinct = - estimate_hash_num_distinct(root, hashkeys, pathnode, &vardata, ndistinct, global_distinct, &usesinglestats); - } - - /* - * Adjust ndistinct to account for restriction clauses. Observe we are - * assuming that the data distribution is affected uniformly by the - * restriction clauses! - * - * XXX Possibly better way, but much more expensive: multiply by - * selectivity of rel's restriction clauses that mention the target Var. - * - * Only single stat need multiple the ratio as rows/tuples, because - * possion for global stat. - * Else if we have use possion, we don't need multiple the ratio. - */ - return ndistinct; -} - -Selectivity ES_SELECTIVITY::cal_eqjoinsel(es_candidate* es, JoinType jointype) -{ - Selectivity result = 1.0; - RelOptInfo* inner_rel = NULL; - bool inner_on_left = false; - switch (jointype) { - case JOIN_INNER: - case JOIN_LEFT: - case JOIN_FULL: - result *= cal_eqjoinsel_inner(es); - break; - case JOIN_SEMI: - case JOIN_ANTI: - case JOIN_LEFT_ANTI_FULL: - /* - * Look up the join's inner relation. min_righthand is sufficient - * information because neither SEMI nor ANTI joins permit any - * reassociation into or out of their RHS, so the righthand will - * always be exactly that set of rels. - */ - inner_rel = find_join_input_rel(root, sjinfo->min_righthand); - - /* inner_rel could be a join rel */ - inner_on_left = (bms_is_subset(es->left_rel->relids, inner_rel->relids)); - if (!inner_on_left) { - Assert(bms_is_subset(es->right_rel->relids, inner_rel->relids)); - } - result *= cal_eqjoinsel_semi(es, inner_rel, inner_on_left); - break; - default: - /* other values not expected here */ - break; - } - return result; -} - -/* - * @brief calculate selectivity for eqsel using multi-column statistics - */ -Selectivity ES_SELECTIVITY::cal_eqsel(es_candidate* es) -{ - Selectivity result = 1.0; - ListCell* lc = NULL; - int i = 0; - int j = 0; - int column_count = 0; - bool match = false; - - Assert(es->left_extended_stats); - - /* if all clauses are null, just use nullfrac */ - if (es->has_null_clause) { - foreach(lc, es->clause_group) { - RestrictInfo* rinfo = (RestrictInfo*)lfirst(lc); - if (!IsA(rinfo->clause, NullTest)) - break; - } - if (lc == NULL) { - result = es->left_extended_stats->nullfrac; - CLAMP_PROBABILITY(result); - save_selectivity(es, result, 0.0); - return result; - } - } - - /* if there is no MCV, just use distinct */ - if (!es->left_extended_stats->mcv_values) { - result = (result - es->left_extended_stats->nullfrac) / es->left_stadistinct; - CLAMP_PROBABILITY(result); - save_selectivity(es, result, 0.0); - ereport(ES_DEBUG_LEVEL, - (errmodule(MOD_OPT), - (errmsg("[ES]extended statistic is used to calculate eqsel selectivity as %e", result)))); - return result; - } - - /* try to use MCV */ - column_count = es->clause_group->length; - - Assert(column_count == bms_num_members(es->left_extended_stats->bms_attnum)); - - /* set up attnum order */ - int* attnum_order = (int*)palloc(column_count * sizeof(int)); - set_up_attnum_order(es, attnum_order, true); - - Assert(es->left_extended_stats->mcv_nvalues / column_count == es->left_extended_stats->mcv_nnumbers); - - /* match MCV with const value from clauses */ - double sum_mcv_numbers = 0.0; - for (i = 0; i < es->left_extended_stats->mcv_nnumbers; i++) { - match = false; - j = 0; - /* process clause one by one */ - foreach(lc, es->clause_group) { - FmgrInfo eqproc; - Datum const_value; - bool var_on_left = false; - - /* set up eqproc */ - RestrictInfo* clause = (RestrictInfo*)lfirst(lc); - int mcv_position = attnum_order[j] * es->left_extended_stats->mcv_nnumbers + i; - - if (IsA(clause->clause, OpExpr)) { - OpExpr* opclause = (OpExpr*)clause->clause; - Oid opno = opclause->opno; - fmgr_info(get_opcode(opno), &eqproc); - - /* set up const value */ - Node* left = (Node*)linitial(opclause->args); - Node* right = (Node*)lsecond(opclause->args); - if (IsA(left, Const)) { - const_value = ((Const*)left)->constvalue; - var_on_left = false; - } else if (IsA(right, Const)) { - const_value = ((Const*)right)->constvalue; - var_on_left = true; - } - - Datum mcv_value = es->left_extended_stats->mcv_values[mcv_position]; - if (var_on_left) - match = DatumGetBool(FunctionCall2Coll(&eqproc, DEFAULT_COLLATION_OID, mcv_value, const_value)); - else - match = DatumGetBool(FunctionCall2Coll(&eqproc, DEFAULT_COLLATION_OID, const_value, mcv_value)); - } else { - Assert(IsA(clause->clause, NullTest)); - match = es->left_extended_stats->mcv_nulls[mcv_position]; - } - - if (!match) - break; - j++; - } - - if (match) { - result = es->left_extended_stats->mcv_numbers[i]; - break; - } else - sum_mcv_numbers += es->left_extended_stats->mcv_numbers[i]; - } - - if (!match) { - double sum_other_mcv_numbers = 0.0; - for (int index = 0; index < es->left_extended_stats->other_mcv_nnumbers; index++) - sum_other_mcv_numbers += es->left_extended_stats->other_mcv_numbers[index]; - result = 1.0 - sum_mcv_numbers - sum_other_mcv_numbers - es->left_extended_stats->nullfrac; - CLAMP_PROBABILITY(result); - float4 other_distinct = clamp_row_est(es->left_stadistinct - es->left_extended_stats->mcv_nnumbers); - result /= other_distinct; - - /* - * Another cross-check: selectivity shouldn't be estimated as more - * than the least common "most common value". - */ - int last_mcv_member = es->left_extended_stats->mcv_nnumbers - 1; - float4 least_common_value = es->left_extended_stats->mcv_numbers[last_mcv_member]; - if (result > least_common_value) - result = least_common_value; - } - - pfree_ext(attnum_order); - - CLAMP_PROBABILITY(result); - save_selectivity(es, result, 0.0); - ereport(ES_DEBUG_LEVEL, - (errmodule(MOD_OPT), (errmsg("[ES]extended statistic is used to calculate eqsel selectivity as %e", result)))); - return result; -} - -/* - * @brief calculate selectivity for join using multi-column statistics - */ -Selectivity ES_SELECTIVITY::cal_eqjoinsel_inner(es_candidate* es) -{ - Assert(es->left_extended_stats); - Selectivity result = 1.0; - int i; - - /* update nullfrac to contain null mcv fraction */ - for (i = 0; i < es->left_extended_stats->other_mcv_nnumbers; i++) - es->left_extended_stats->nullfrac += es->left_extended_stats->other_mcv_numbers[i]; - for (i = 0; i < es->right_extended_stats->other_mcv_nnumbers; i++) - es->right_extended_stats->nullfrac += es->right_extended_stats->other_mcv_numbers[i]; - - /* if there is no MCV, just use distinct */ - if (!es->left_extended_stats->mcv_values || !es->right_extended_stats->mcv_values) { - result *= (1.0 - es->left_extended_stats->nullfrac) * (1.0 - es->right_extended_stats->nullfrac); - result /= es->left_stadistinct > es->right_stadistinct ? es->left_stadistinct : es->right_stadistinct; - CLAMP_PROBABILITY(result); - double left_ratio = es->right_stadistinct / es->left_stadistinct * (1.0 - es->left_extended_stats->nullfrac); - double right_ratio = es->left_stadistinct / es->right_stadistinct * (1.0 - es->right_extended_stats->nullfrac); - save_selectivity(es, left_ratio, right_ratio); - ereport(ES_DEBUG_LEVEL, - (errmodule(MOD_OPT), - (errmsg("[ES]extended statistic is used to calculate eqjoinsel_inner selectivity as %e", result)))); - return result; - } - - /* try to use MCV */ - int column_count = es->clause_group->length; - Assert(column_count == bms_num_members(es->left_extended_stats->bms_attnum)); - - /* set up attnum order */ - int* left_attnum_order = (int*)palloc(column_count * sizeof(int)); - int* right_attnum_order = (int*)palloc(column_count * sizeof(int)); - set_up_attnum_order(es, left_attnum_order, true); - set_up_attnum_order(es, right_attnum_order, false); - - ListCell* lc = NULL; - FmgrInfo* eqproc = (FmgrInfo*)palloc(column_count * sizeof(FmgrInfo)); - bool* left_var_on_clause_leftside = (bool*)palloc(column_count * sizeof(bool)); - i = 0; - foreach(lc, es->clause_group) { - /* set up eqproc */ - RestrictInfo* clause = (RestrictInfo*)lfirst(lc); - OpExpr* opclause = (OpExpr*)clause->clause; - Oid opno = opclause->opno; - fmgr_info(get_opcode(opno), &(eqproc[i])); - - /* set up left_var_on_clause_leftside */ - if (bms_equal(clause->left_relids, es->left_relids)) - left_var_on_clause_leftside[i] = true; - else - left_var_on_clause_leftside[i] = false; - - i++; - } - - /* prepare to match MCVs */ - double left_relfrac = es->left_rel->rows / es->left_rel->tuples; - double right_relfrac = es->right_rel->rows / es->right_rel->tuples; - CLAMP_PROBABILITY(left_relfrac); - CLAMP_PROBABILITY(right_relfrac); - double relfrac = left_relfrac * right_relfrac; - - int left_mcv_nums = es->left_extended_stats->mcv_nnumbers; - int right_mcv_nums = es->right_extended_stats->mcv_nnumbers; - bool* left_match = (bool*)palloc0(left_mcv_nums * sizeof(bool)); - bool* right_match = (bool*)palloc0(right_mcv_nums * sizeof(bool)); - - /* - * The calculation logic here is as same as that in eqjoinsel_inner: - * Note we assume that each MCV will match at most one member of the - * other MCV list. If the operator isn't really equality, there could - * be multiple matches --- but we don't look for them, both for speed - * and because the math wouldn't add up... - */ - double matchprodfreq = 0.0; - int nmatches = 0; - for (i = 0; i < left_mcv_nums; i++) { - int j; - - for (j = 0; j < right_mcv_nums; j++) { - if (right_match[j]) - continue; - bool all_match = false; - int k = 0; - /* process clause one by one */ - foreach(lc, es->clause_group) { - int left_mcv_position = left_attnum_order[k] * left_mcv_nums + i; - int right_mcv_position = right_attnum_order[k] * right_mcv_nums + j; - Datum left_mcv_value = es->left_extended_stats->mcv_values[left_mcv_position]; - Datum right_mcv_value = es->right_extended_stats->mcv_values[right_mcv_position]; - if (left_var_on_clause_leftside[k]) - all_match = DatumGetBool( - FunctionCall2Coll(&(eqproc[k]), DEFAULT_COLLATION_OID, left_mcv_value, right_mcv_value)); - else - all_match = DatumGetBool( - FunctionCall2Coll(&(eqproc[k]), DEFAULT_COLLATION_OID, right_mcv_value, left_mcv_value)); - if (!all_match) - break; - k++; - } - - if (all_match) { - left_match[i] = right_match[j] = true; - matchprodfreq += es->left_extended_stats->mcv_numbers[i] * es->right_extended_stats->mcv_numbers[j]; - nmatches++; - break; - } - } - } - - /* adjust match freq according to relation's filter fraction */ - double left_nullfrac = es->left_extended_stats->nullfrac; - double right_nullfrac = es->right_extended_stats->nullfrac; - double left_matchfreq, right_matchfreq, left_unmatchfreq, right_unmatchfreq, left_otherfreq, right_otherfreq, - left_totalsel, right_totalsel; - int tmp_nmatches = (int)ceil((double)nmatches * relfrac); - if (nmatches != 0) - matchprodfreq *= (double)tmp_nmatches / nmatches; - CLAMP_PROBABILITY(matchprodfreq); - /* Sum up frequencies of matched and unmatched MCVs */ - left_matchfreq = left_unmatchfreq = 0.0; - for (i = 0; i < left_mcv_nums; i++) { - if (left_match[i]) - left_matchfreq += es->left_extended_stats->mcv_numbers[i]; - else - left_unmatchfreq += es->left_extended_stats->mcv_numbers[i]; - } - CLAMP_PROBABILITY(left_matchfreq); - CLAMP_PROBABILITY(left_unmatchfreq); - right_matchfreq = right_unmatchfreq = 0.0; - for (i = 0; i < right_mcv_nums; i++) { - if (right_match[i]) - right_matchfreq += es->right_extended_stats->mcv_numbers[i]; - else - right_unmatchfreq += es->right_extended_stats->mcv_numbers[i]; - } - CLAMP_PROBABILITY(right_matchfreq); - CLAMP_PROBABILITY(right_unmatchfreq); - pfree_ext(left_match); - pfree_ext(right_match); - pfree_ext(left_attnum_order); - pfree_ext(right_attnum_order); - - /* - * Compute total frequency of non-null values that are not in the MCV - * lists. - */ - left_otherfreq = 1.0 - left_nullfrac - left_matchfreq - left_unmatchfreq; - right_otherfreq = 1.0 - right_nullfrac - right_matchfreq - right_unmatchfreq; - CLAMP_PROBABILITY(left_otherfreq); - CLAMP_PROBABILITY(right_otherfreq); - - /* - * We can estimate the total selectivity from the point of view of - * relation 1 as: the known selectivity for matched MCVs, plus - * unmatched MCVs that are assumed to match against random members of - * relation 2's non-MCV population, plus non-MCV values that are - * assumed to match against random members of relation 2's unmatched - * MCVs plus non-MCV values. - */ - int left_nvalues_frac = (int)ceil((double)left_mcv_nums * left_relfrac); - int right_nvalues_frac = (int)ceil((double)right_mcv_nums * right_relfrac); - left_totalsel = matchprodfreq; - if (es->right_extended_stats->distinct > right_nvalues_frac) - left_totalsel += - left_unmatchfreq * right_otherfreq / (es->right_extended_stats->distinct - right_nvalues_frac) * relfrac; - if (es->right_extended_stats->distinct > tmp_nmatches) - left_totalsel += left_otherfreq * (right_otherfreq + right_unmatchfreq) / - (es->right_extended_stats->distinct - tmp_nmatches) * relfrac; - /* Same estimate from the point of view of relation 2. */ - right_totalsel = matchprodfreq; - if (es->left_extended_stats->distinct > left_nvalues_frac) - right_totalsel += - right_unmatchfreq * left_otherfreq / (es->left_extended_stats->distinct - left_nvalues_frac) * relfrac; - if (es->left_extended_stats->distinct > tmp_nmatches) - right_totalsel += right_otherfreq * (left_otherfreq + left_unmatchfreq) / - (es->left_extended_stats->distinct - tmp_nmatches) * relfrac; - - /* - * Use the smaller of the two estimates. This can be justified in - * essentially the same terms as given below for the no-stats case: to - * a first approximation, we are estimating from the point of view of - * the relation with smaller nd. - */ - if (relfrac == 0) - result = 0; - else - result = (left_totalsel < right_totalsel) ? left_totalsel / relfrac : right_totalsel / relfrac; - - /* - * calculate join ratio for both two tables, admitting that smaller distinct - * values will be all joined out - */ - double left_join_ratio = 0.0; - double right_join_ratio = 0.0; - - if (nmatches != 0 && left_relfrac != 0 && right_relfrac != 0) { - left_join_ratio = right_matchfreq * tmp_nmatches / (nmatches * left_relfrac); - right_join_ratio = right_matchfreq * tmp_nmatches / (nmatches * right_relfrac); - } - - if (es->left_extended_stats->distinct > es->right_extended_stats->distinct) { - if (es->left_extended_stats->distinct != tmp_nmatches) { - left_join_ratio += left_otherfreq * (es->right_extended_stats->distinct - tmp_nmatches) / - (es->left_extended_stats->distinct - tmp_nmatches); - } - right_join_ratio += right_otherfreq; - } else if (es->left_extended_stats->distinct < es->right_extended_stats->distinct) { - if (es->right_extended_stats->distinct != tmp_nmatches) { - right_join_ratio += right_otherfreq * (es->left_extended_stats->distinct - tmp_nmatches) / - (es->right_extended_stats->distinct - tmp_nmatches); - } - left_join_ratio += left_otherfreq; - } - CLAMP_PROBABILITY(left_join_ratio); - CLAMP_PROBABILITY(right_join_ratio); - CLAMP_PROBABILITY(result); - - save_selectivity(es, left_join_ratio, right_join_ratio); - ereport(ES_DEBUG_LEVEL, - (errmodule(MOD_OPT), - (errmsg("[ES]extended statistic is used to calculate eqjoinsel selectivity as %e", result)))); - - /* save mcv freq to calculate skew or hash bucket size */ - es->left_first_mcvfreq = es->left_extended_stats->mcv_numbers[0]; - es->right_first_mcvfreq = es->right_extended_stats->mcv_numbers[0]; - - return result; -} - -Selectivity ES_SELECTIVITY::cal_eqjoinsel_semi(es_candidate* es, RelOptInfo* inner_rel, bool inner_on_left) -{ - Assert(es->left_extended_stats); - Selectivity result = 1.0; - double nullfrac = inner_on_left ? es->right_extended_stats->nullfrac : es->left_extended_stats->nullfrac; - double inner_distinct = inner_on_left ? es->left_stadistinct : es->right_stadistinct; - double outer_distinct = inner_on_left ? es->right_stadistinct : es->left_stadistinct; - - /* - * Clamp inner_distinct to be not more than what we estimate the inner relation's - * size to be, especially when inner_rel can be a joined rel. - */ - inner_distinct = Min(inner_distinct, inner_rel->rows); - - /* if there is no MCV, just use distinct */ - if (!es->left_extended_stats->mcv_values || !es->right_extended_stats->mcv_values) { - result *= (1.0 - nullfrac); - if (inner_distinct < outer_distinct) - result *= inner_distinct / outer_distinct; - } else { - /* try to use MCV */ - int column_count = es->clause_group->length; - Assert(column_count == bms_num_members(es->left_extended_stats->bms_attnum)); - - /* set up attnum order */ - int* left_attnum_order = (int*)palloc(column_count * sizeof(int)); - int* right_attnum_order = (int*)palloc(column_count * sizeof(int)); - set_up_attnum_order(es, left_attnum_order, true); - set_up_attnum_order(es, right_attnum_order, false); - - ListCell* lc = NULL; - FmgrInfo* eqproc = (FmgrInfo*)palloc(column_count * sizeof(FmgrInfo)); - bool* left_var_on_clause_leftside = (bool*)palloc(column_count * sizeof(bool)); - int i = 0; - foreach(lc, es->clause_group) { - /* set up eqproc */ - RestrictInfo* clause = (RestrictInfo*)lfirst(lc); - OpExpr* opclause = (OpExpr*)clause->clause; - Oid opno = opclause->opno; - fmgr_info(get_opcode(opno), &(eqproc[i])); - - /* set up left_var_on_clause_leftside */ - if (bms_equal(clause->left_relids, es->left_relids)) - left_var_on_clause_leftside[i] = true; - else - left_var_on_clause_leftside[i] = false; - - i++; - } - - /* prepare to match MCVs */ - int left_mcv_nums = es->left_extended_stats->mcv_nnumbers; - int right_mcv_nums = es->right_extended_stats->mcv_nnumbers; - bool* left_match = (bool*)palloc0(left_mcv_nums * sizeof(bool)); - bool* right_match = (bool*)palloc0(right_mcv_nums * sizeof(bool)); - - /* - * The calculation logic here is as same as that in eqjoinsel_inner: - * Note we assume that each MCV will match at most one member of the - * other MCV list. If the operator isn't really equality, there could - * be multiple matches --- but we don't look for them, both for speed - * and because the math wouldn't add up... - */ - double matchprodfreq = 0.0; - int nmatches = 0; - for (i = 0; i < left_mcv_nums; i++) { - int j; - - for (j = 0; j < right_mcv_nums; j++) { - if (right_match[j]) - continue; - bool all_match = false; - int k = 0; - /* process clause one by one */ - foreach(lc, es->clause_group) { - int left_mcv_position = left_attnum_order[k] * left_mcv_nums + i; - int right_mcv_position = right_attnum_order[k] * right_mcv_nums + j; - Datum left_mcv_value = es->left_extended_stats->mcv_values[left_mcv_position]; - Datum right_mcv_value = es->right_extended_stats->mcv_values[right_mcv_position]; - if (left_var_on_clause_leftside[k]) - all_match = DatumGetBool( - FunctionCall2Coll(&(eqproc[k]), DEFAULT_COLLATION_OID, left_mcv_value, right_mcv_value)); - else - all_match = DatumGetBool( - FunctionCall2Coll(&(eqproc[k]), DEFAULT_COLLATION_OID, right_mcv_value, left_mcv_value)); - if (!all_match) - break; - k++; - } - - if (all_match) { - left_match[i] = right_match[j] = true; - nmatches++; - break; - } - } - } - - pfree_ext(left_attnum_order); - pfree_ext(right_attnum_order); - matchprodfreq = 1.0; - int mcv_num = inner_on_left ? left_mcv_nums : right_mcv_nums; - for (i = 0; i < mcv_num; i++) { - if (inner_on_left && right_match[i]) - matchprodfreq += es->right_extended_stats->mcv_numbers[i]; - else if (!inner_on_left && left_match[i]) - matchprodfreq += es->left_extended_stats->mcv_numbers[i]; - } - CLAMP_PROBABILITY(matchprodfreq); - - /* - * Now we need to estimate the fraction of relation 1 that has at - * least one join partner. We know for certain that the matched MCVs - * do, so that gives us a lower bound, but we're really in the dark - * about everything else. Our crude approach is: if nd1 <= nd2 then - * assume all non-null rel1 rows have join partners, else assume for - * the uncertain rows that a fraction nd2/nd1 have join partners. We - * can discount the known-matched MCVs from the distinct-values counts - * before doing the division. - * - * Crude as the above is, it's completely useless if we don't have - * reliable ndistinct values for both sides. Hence, if either nd1 or - * nd2 is default, punt and assume half of the uncertain rows have - * join partners. - */ - inner_distinct -= nmatches; - outer_distinct -= nmatches; - double uncertainfrac, uncertain; - if (inner_distinct >= outer_distinct || inner_distinct < 0) - uncertainfrac = 1.0; - else - uncertainfrac = inner_distinct / outer_distinct; - uncertain = 1.0 - matchprodfreq - nullfrac; - CLAMP_PROBABILITY(uncertain); - result = matchprodfreq + uncertainfrac * uncertain; - } - - CLAMP_PROBABILITY(result); - save_selectivity(es, result, 0.0, true); - ereport(ES_DEBUG_LEVEL, - (errmodule(MOD_OPT), - (errmsg("[ES]extended statistic is used to calculate eqjoinsel_semi selectivity as %e", result)))); - return result; -} - -/* - * @brief calculate distinct for groupby using multi-column statistics, - * in order to use the result in estimate_num_groups(), - * build a varinfo and save it in unmatched_clause_group. - */ -void ES_SELECTIVITY::build_pseudo_varinfo(es_candidate* es, STATS_EST_TYPE eType) -{ - /* build pseudo varinfo here with multi-column distinct */ - GroupVarInfo* varinfo = (GroupVarInfo*)palloc(sizeof(GroupVarInfo)); - varinfo->var = NULL; - varinfo->rel = es->left_rel; - if (eType == STATS_TYPE_GLOBAL) - varinfo->ndistinct = es->left_stadistinct; - else { - /* get local distinct */ - if (es->left_extended_stats->dndistinct < 0) { - double ntuples = get_local_rows(es->left_rel->tuples, - es->left_rel->multiple, - IsLocatorReplicated(es->left_rel->locator_type), - ng_get_dest_num_data_nodes(es->left_rel)); - varinfo->ndistinct = clamp_row_est( - -1 * es->left_extended_stats->dndistinct * ntuples * (1.0 - es->left_extended_stats->nullfrac)); - } else - varinfo->ndistinct = clamp_row_est(es->left_extended_stats->dndistinct); - } - varinfo->isdefault = false; - varinfo->es_is_used = true; - varinfo->es_attnums = bms_copy(es->left_attnums); - unmatched_clause_group = lappend(unmatched_clause_group, varinfo); - ereport(ES_DEBUG_LEVEL, - (errmodule(MOD_OPT), - (errmsg("[ES]extended statistic is used to calculate groupby distinct as %f", varinfo->ndistinct)))); - - return; -} - -/* - * @brief delete an invalid es_candidate, the memory will be free in clear(). - */ -void ES_SELECTIVITY::remove_candidate(es_candidate* es) -{ - switch (es->tag) { - case ES_EQSEL: - case ES_EQJOINSEL: - case ES_GROUPBY: - unmatched_clause_group = list_concat(unmatched_clause_group, es->clause_group); - break; - default: - break; - } - es->tag = ES_EMPTY; - es->clause_group = NULL; - - return; -} - -/* - * @brief remove corresponding stuff from the left_attnums of es_candidate according dump attnum, - * dump attnum should be delete outside this function. - */ -void ES_SELECTIVITY::remove_attnum(es_candidate* es, int dump_attnum) -{ - /* delete clause and clause_map according to dump */ - ListCell* lc_clause = list_head(es->clause_group); - ListCell* lc_clause_map = NULL; - ListCell* prev_clause = NULL; - ListCell* prev_clause_map = NULL; - foreach(lc_clause_map, es->clause_map) { - es_clause_map* clause_map = (es_clause_map*)lfirst(lc_clause_map); - - if (clause_map->left_attnum == dump_attnum) { - if (es->tag == ES_EQJOINSEL) { - Assert(bms_is_member(clause_map->right_attnum, es->right_attnums)); - es->right_attnums = bms_del_member(es->right_attnums, clause_map->right_attnum); - } - - /* if found, delete clause map from the list */ - pfree_ext(clause_map); - lfirst(lc_clause_map) = NULL; - es->clause_map = list_delete_cell(es->clause_map, lc_clause_map, prev_clause_map); - - /* delete clause from list and add to unmatch list */ - unmatched_clause_group = lappend(unmatched_clause_group, lfirst(lc_clause)); - lfirst(lc_clause) = NULL; - es->clause_group = list_delete_cell(es->clause_group, lc_clause, prev_clause); - - /* no need to continue, just try to find next matched stats */ - break; - } else { - prev_clause = lc_clause; - prev_clause_map = lc_clause_map; - lc_clause = lc_clause->next; - } - } - - return; -} - -/* - * @brief Set up attnum order according to clause map, so that we can use this order - * to locate the corresponding clause when we go through the bitmap of attnums. - * We need this order because the clause is ordered by its position in clauselist and mcv - * in extended stats are ordered by attnum. - * left == true : set up left attnum order ; left == false : set up right attnum order . - */ -void ES_SELECTIVITY::set_up_attnum_order(es_candidate* es, int* attnum_order, bool left) const -{ - int i; - int j = 0; - ListCell* lc = NULL; - foreach(lc, es->clause_map) { - i = 0; - int attnum = 0; - es_clause_map* clause_map = (es_clause_map*)lfirst(lc); - Bitmapset* tmpset = left ? bms_copy(es->left_attnums) : bms_copy(es->right_attnums); - while ((attnum = bms_first_member(tmpset)) >= 0) { - if ((left && attnum == clause_map->left_attnum) || (!left && attnum == clause_map->right_attnum)) { - attnum_order[j] = i; - break; - } - i++; - } - pfree_ext(tmpset); - j++; - } - return; -} - -/* - * @brief print debug info including ES type, involving rels and clauses - */ -void ES_SELECTIVITY::debug_print() -{ - if (log_min_messages > ES_DEBUG_LEVEL) - return; - ListCell* l = NULL; - foreach(l, es_candidate_list) { - es_candidate* temp = (es_candidate*)lfirst(l); - ereport(DEBUG1, - (errmodule(MOD_OPT_JOIN), - errmsg("[ES]ES_TYPE = %d (0:empty; 1:eqsel; 2:eqjoinsel; 3:group by)", (int)temp->tag))); - if (temp->tag == ES_EMPTY) - continue; - - print_relids(temp->relids, "All rels:"); - - if (temp->left_rte) { - print_relids(temp->left_relids, "Left rels:"); - print_relids(temp->left_attnums, "Left attnums:"); - print_rel(temp->left_rte); - } - - if (temp->right_rte) { - print_relids(temp->right_relids, "Right rels:"); - print_relids(temp->right_attnums, "Right attnums:"); - print_rel(temp->right_rte); - } - - switch (temp->tag) { - case ES_EQSEL: - case ES_EQJOINSEL: - print_clauses(temp->clause_group); - break; - default: - break; - } - } - return; -} - -void ES_SELECTIVITY::print_rel(RangeTblEntry* rel) const -{ - StringInfoData buf; - initStringInfo(&buf); - appendStringInfo(&buf, "%s, relkind: %c, inheritance or not:%d", rel->relname, rel->relkind, (int)rel->inh); - ereport(DEBUG1, (errmodule(MOD_OPT_JOIN), (errmsg("[ES]%s", buf.data)))); - pfree_ext(buf.data); - return; -} - -void ES_SELECTIVITY::print_relids(Bitmapset* relids, const char* str) const -{ - StringInfoData buf; - initStringInfo(&buf); - Relids tmprelids = bms_copy(relids); - int x; - appendStringInfoString(&buf, str); - while ((x = bms_first_member(tmprelids)) >= 0) { - appendStringInfo(&buf, "%d, ", x); - } - bms_free_ext(tmprelids); - ereport(DEBUG1, (errmodule(MOD_OPT_JOIN), (errmsg("[ES]%s", buf.data)))); - pfree_ext(buf.data); - return; -} - -void ES_SELECTIVITY::print_clauses(List* clauses) const -{ - if (root == NULL || list_length(clauses) == 0) - return; - - ListCell* l = NULL; - StringInfoData buf; - initStringInfo(&buf); - - appendStringInfo(&buf, "Clause length:%d, clause list:", list_length(clauses)); - - foreach(l, clauses) { - RestrictInfo* c = (RestrictInfo*)lfirst(l); - char* expr = print_expr((Node*)c->clause, root->parse->rtable); - appendStringInfoString(&buf, expr); - pfree_ext(expr); - if (lnext(l)) - appendStringInfoString(&buf, ", "); - } - - ereport(DEBUG1, (errmodule(MOD_OPT_JOIN), errmsg("[ES]%s", buf.data))); - - pfree_ext(buf.data); - - return; -} - -char* ES_SELECTIVITY::print_expr(const Node* expr, const List* rtable) const -{ - return ExprToString(expr, rtable); -} +/* ------------------------------------------------------------------------- + * + * es_selectivity.cpp + * Routines to compute multi-column selectivities + * + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/gausskernel/optimizer/path/es_selectivity.cpp + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "catalog/pg_collation.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_statistic.h" +#include "optimizer/cost.h" +#include "optimizer/pathnode.h" +#include "optimizer/var.h" +#include "utils/guc.h" +#include "utils/lsyscache.h" +#include "nodes/print.h" +#include "parser/parsetree.h" +#include "utils/extended_statistics.h" + +const int TOW_MEMBERS = 2; + +ES_SELECTIVITY::ES_SELECTIVITY() + : es_candidate_list(NULL), + es_candidate_saved(NULL), + unmatched_clause_group(NULL), + root(NULL), + sjinfo(NULL), + origin_clauses(NULL), + path(NULL), + bucketsize_list(NULL) +{} + +ES_SELECTIVITY::~ES_SELECTIVITY() +{} + +bool ES_SELECTIVITY::ContainIndexCols(const es_candidate* es, const IndexOptInfo* index) const +{ + for (int pos = 0; pos < index->ncolumns; pos++) { + int indexAttNum = index->indexkeys[pos]; + /* + * Notice: indexAttNum can be negative. Some indexAttNums of junk column may be negative + * since they are located before the first visible column. for example, the indexAttNum + * of 'oid' column in system table 'pg_class' is -2. + */ + if (indexAttNum >= 0 && !bms_is_member(indexAttNum, es->left_attnums)) + return false; + } + + return true; +} + +bool ES_SELECTIVITY::MatchUniqueIndex(const es_candidate* es) const +{ + ListCell* lci = NULL; + foreach (lci, es->left_rel->indexlist) { + IndexOptInfo* indexToMatch = (IndexOptInfo*)lfirst(lci); + if (indexToMatch->relam == BTREE_AM_OID && indexToMatch->unique + && ContainIndexCols(es, indexToMatch)) { + return true; + } + } + + return false; +} + +/* + * check whether the equality constraints match an unique index. + * We know the result only has one row if finding a matched unique index. + */ +void ES_SELECTIVITY::CalSelWithUniqueIndex(Selectivity &result) +{ + List* es_candidate_used = NULL; + ListCell* l = NULL; + foreach(l, es_candidate_list) { + es_candidate* temp = (es_candidate*)lfirst(l); + if (temp->tag == ES_EQSEL && MatchUniqueIndex(temp) && + temp->left_rel && temp->left_rel->tuples >= 1.0) { + result *= 1.0 / temp->left_rel->tuples; + es_candidate_used = lappend(es_candidate_used, temp); + } + } + + /* + * Finally, we need to delete es_candidates which have already used. The rests es_candidates + * will calculate with statistic info. + */ + es_candidate_saved = es_candidate_list; + es_candidate_list = list_difference_ptr(es_candidate_list, es_candidate_used); + + list_free(es_candidate_used); +} + +/* + * @brief Main entry for using extended statistic to calculate selectivity + * root_input can only be NULL when processing group by clauses + */ +Selectivity ES_SELECTIVITY::calculate_selectivity(PlannerInfo* root_input, List* clauses_input, + SpecialJoinInfo* sjinfo_input, JoinType jointype, JoinPath* path_input, es_type action, STATS_EST_TYPE eType) +{ + Selectivity result = 1.0; + root = root_input; + sjinfo = sjinfo_input; + origin_clauses = clauses_input; + path = path_input; + + /* group clauselist */ + if (action == ES_GROUPBY) { + /* group clauselist for group by clauses */ + group_clauselist_groupby(origin_clauses); + } else { + group_clauselist(origin_clauses); + } + + /* + * Before reading statistic, We check whether the equality constraints match an + * unique index. We know the result only has one row if finding a matched unique index. + */ + CalSelWithUniqueIndex(result); + + /* read statistic */ + read_statistic(); + + /* calculate selectivity */ + ListCell* l = NULL; + foreach(l, es_candidate_list) { + es_candidate* temp = (es_candidate*)lfirst(l); + switch (temp->tag) { + case ES_EQSEL: + result *= cal_eqsel(temp); + break; + case ES_EQJOINSEL: + /* compute hash bucket size */ + if (action == ES_COMPUTEBUCKETSIZE) { + es_bucketsize* bucket = (es_bucketsize*)palloc(sizeof(es_bucketsize)); + cal_bucket_size(temp, bucket); + bucketsize_list = lappend(bucketsize_list, bucket); + } else { + result *= cal_eqjoinsel(temp, jointype); + } + break; + case ES_GROUPBY: + build_pseudo_varinfo(temp, eType); + break; + default: + break; + } + } + + es_candidate_list = es_candidate_saved; + + /* free memory, but unmatched_clause_group need to be free manually */ + clear(); + + return result; +} + +/* + * @brief group clause by clause type and involving rels, for now, only support eqsel and eqjoinsel + */ +void ES_SELECTIVITY::group_clauselist(List* clauses) +{ + ListCell* l = NULL; + foreach(l, clauses) { + Node* clause = (Node*)lfirst(l); + + if (!IsA(clause, RestrictInfo)) { + unmatched_clause_group = lappend(unmatched_clause_group, clause); + continue; + } + + RestrictInfo* rinfo = (RestrictInfo*)clause; + if (rinfo->pseudoconstant || rinfo->norm_selec > 1 || rinfo->orclause) { + unmatched_clause_group = lappend(unmatched_clause_group, clause); + continue; + } + + if (is_opclause(rinfo->clause)) { + OpExpr* opclause = (OpExpr*)rinfo->clause; + Oid opno = opclause->opno; + + /* only handle "=" operator */ + if (get_oprrest(opno) == EQSELRETURNOID) { + int relid_num = bms_num_members(rinfo->clause_relids); + if (relid_num == 1) { + /* only process clause like t1.a = 1, so only one relid */ + load_eqsel_clause(rinfo); + continue; + } else if (relid_num == TOW_MEMBERS) { + /* only process clause like t1.a = t2.b, so only two relids */ + load_eqjoinsel_clause(rinfo); + continue; + } else { + unmatched_clause_group = lappend(unmatched_clause_group, rinfo); + continue; + } + } + } else if ((rinfo->clause) != NULL && IsA(rinfo->clause, NullTest)) { + NullTest* nullclause = (NullTest*)rinfo->clause; + int relid_num = bms_num_members(rinfo->clause_relids); + if (relid_num == 1 && nullclause->nulltesttype == IS_NULL) { + load_eqsel_clause(rinfo); + continue; + } + } + + unmatched_clause_group = lappend(unmatched_clause_group, clause); + } + + recheck_candidate_list(); + debug_print(); + + return; +} + +/* + * @brief group groupby-clause by clause type and involving rels, for + */ +void ES_SELECTIVITY::group_clauselist_groupby(List* varinfos) +{ + ListCell* l = NULL; + foreach(l, varinfos) { + GroupVarInfo* varinfo = (GroupVarInfo*)lfirst(l); + if (!is_var_node(varinfo->var)) { + unmatched_clause_group = lappend(unmatched_clause_group, varinfo); + continue; + } + + Var* var = NULL; + if (IsA(varinfo->var, RelabelType)) + var = (Var*)((RelabelType*)varinfo->var)->arg; + else + var = (Var*)varinfo->var; + + ListCell* l2 = NULL; + bool found_match = false; + foreach(l2, es_candidate_list) { + es_candidate* temp = (es_candidate*)lfirst(l2); + + if (temp->tag != ES_GROUPBY) + continue; + + if (varinfo->rel == temp->left_rel) { + /* only use left attnums for group by clauses */ + temp->left_attnums = bms_add_member(temp->left_attnums, var->varattno); + temp->clause_group = lappend(temp->clause_group, varinfo); + add_clause_map(temp, var->varattno, 0, (Node*)var, NULL); + found_match = true; + break; + } + } + + /* if not matched, build a new cell in es_candidate_list */ + if (!found_match) { + es_candidate* es = (es_candidate*)palloc(sizeof(es_candidate)); + RelOptInfo* temp_rel = NULL; + + init_candidate(es); + + es->tag = ES_GROUPBY; + es->left_rel = varinfo->rel; + es->left_relids = bms_copy(varinfo->rel->relids); + es->left_attnums = bms_add_member(es->left_attnums, var->varattno); + add_clause_map(es, var->varattno, 0, (Node*)var, NULL); + read_rel_rte(varinfo->var, &temp_rel, &es->left_rte); + Assert(es->left_rel == temp_rel); + es->clause_group = lappend(es->clause_group, varinfo); + es_candidate_list = lappend(es_candidate_list, es); + } + } + + recheck_candidate_list(); + debug_print(); + + return; +} + +/* + * @brief initial es_candidate, set all elements to default value or NULL + */ +void ES_SELECTIVITY::init_candidate(es_candidate* es) const +{ + es->tag = ES_EMPTY; + es->relids = NULL; + es->left_relids = NULL; + es->right_relids = NULL; + es->left_attnums = NULL; + es->right_attnums = NULL; + es->left_stadistinct = 0.0; + es->right_stadistinct = 0.0; + es->left_first_mcvfreq = 0.0; + es->right_first_mcvfreq = 0.0; + es->left_rel = NULL; + es->right_rel = NULL; + es->left_rte = NULL; + es->right_rte = NULL; + es->clause_group = NIL; + es->clause_map = NIL; + es->left_extended_stats = NULL; + es->right_extended_stats = NULL; + es->pseudo_clause_list = NIL; + es->has_null_clause = false; + return; +} + +/* + * @brief free memory used in calculate_selectivity except unmatched_clause_group + */ +void ES_SELECTIVITY::clear() +{ + /* delete es_candidate_list */ + ListCell* l = NULL; + foreach(l, es_candidate_list) { + es_candidate* temp = (es_candidate*)lfirst(l); + bms_free_ext(temp->relids); + bms_free_ext(temp->left_relids); + bms_free_ext(temp->right_relids); + bms_free_ext(temp->left_attnums); + bms_free_ext(temp->right_attnums); + temp->left_rel = NULL; + temp->right_rel = NULL; + temp->left_rte = NULL; + temp->right_rte = NULL; + list_free_ext(temp->clause_group); + list_free_deep(temp->clause_map); + clear_extended_stats(temp->left_extended_stats); + clear_extended_stats(temp->right_extended_stats); + list_free_ext(temp->pseudo_clause_list); + } + list_free_deep(es_candidate_list); + + /* + * unmatched_clause_group need to be free manually after + * it is used in clause_selectivity(). + */ + root = NULL; + sjinfo = NULL; + origin_clauses = NULL; + return; +} + +/* + * @brief free memory used by saving extended_stats after calculation + */ +void ES_SELECTIVITY::clear_extended_stats(ExtendedStats* extended_stats) const +{ + if (extended_stats) { + bms_free_ext(extended_stats->bms_attnum); + if (extended_stats->mcv_numbers) + pfree_ext(extended_stats->mcv_numbers); + if (extended_stats->mcv_values) + pfree_ext(extended_stats->mcv_values); + if (extended_stats->mcv_nulls) + pfree_ext(extended_stats->mcv_nulls); + if (extended_stats->other_mcv_numbers) + pfree_ext(extended_stats->other_mcv_numbers); + pfree_ext(extended_stats); + extended_stats = NULL; + } + return; +} + +/* + * @brief free memory of extended_stats_list by calling clear_extended_stats + */ +void ES_SELECTIVITY::clear_extended_stats_list(List* stats_list) const +{ + if (stats_list) { + ListCell* lc = NULL; + foreach(lc, stats_list) { + ExtendedStats* extended_stats = (ExtendedStats*)lfirst(lc); + clear_extended_stats(extended_stats); + } + list_free_ext(stats_list); + } + return; +} + +/* + * @brief copy the original pointer, repoint it to something else + * in order to avoid failure when using list_free + * @param ListCell* l + * @return + * @exception None + */ +ExtendedStats* ES_SELECTIVITY::copy_stats_ptr(ListCell* l) const +{ + ExtendedStats* result = (ExtendedStats*)lfirst(l); + lfirst(l) = NULL; + return result; +} + +/* + * @brief add an eqjsel clause to es_candidate_list and group by relid + * we should have bms_num_members(clause->clause_relids) == 1 + */ +void ES_SELECTIVITY::load_eqsel_clause(RestrictInfo* clause) +{ + /* group clause by rels, add to es_candidate_list */ + ListCell* l = NULL; + foreach(l, es_candidate_list) { + es_candidate* temp = (es_candidate*)lfirst(l); + + if (temp->tag != ES_EQSEL) + continue; + + if (bms_equal(clause->clause_relids, temp->relids)) { + if (add_attnum(clause, temp)) { + temp->clause_group = lappend(temp->clause_group, clause); + if (IsA(clause->clause, NullTest)) + temp->has_null_clause = true; + return; + } + } + } + + /* if not matched, build a new cell in es_candidate_list */ + if (!build_es_candidate(clause, ES_EQSEL)) + unmatched_clause_group = lappend(unmatched_clause_group, clause); + + return; +} + +/* + * @brief add an eqjoinsel clause to es_candidate_list and group by relid + */ +void ES_SELECTIVITY::load_eqjoinsel_clause(RestrictInfo* clause) +{ + /* + * the relids in the clause should be as same as sjinfo, so we can avoid parameterized conditon. + */ + if (sjinfo) { + if (!bms_overlap(sjinfo->min_lefthand, clause->clause_relids) || + !bms_overlap(sjinfo->min_righthand, clause->clause_relids)) { + unmatched_clause_group = lappend(unmatched_clause_group, clause); + return; + } + } + + /* group clause by rels, add to es_candidate_list */ + if (bms_num_members(clause->left_relids) == 1 && bms_num_members(clause->right_relids) == 1) { + ListCell* l = NULL; + foreach(l, es_candidate_list) { + es_candidate* temp = (es_candidate*)lfirst(l); + + if (temp->tag != ES_EQJOINSEL) + continue; + + if (bms_equal(clause->clause_relids, temp->relids)) { + if (add_attnum(clause, temp)) { + temp->clause_group = lappend(temp->clause_group, clause); + return; + } + } + } + + /* if not matched, build a new cell in es_candidate_list */ + if (!build_es_candidate(clause, ES_EQJOINSEL)) + unmatched_clause_group = lappend(unmatched_clause_group, clause); + + return; + } + + unmatched_clause_group = lappend(unmatched_clause_group, clause); + + return; +} + +/* + * @brief make a combination of es->right_attnums or es->left_attnums with input attnum by clause map + * @param left: true: add to es->right_attnums; false: add to es->left_attnums + * @return combination of Bitmapset + * @exception None + */ +Bitmapset* ES_SELECTIVITY::make_attnums_by_clause_map(es_candidate* es, Bitmapset* attnums, bool left) const +{ + ListCell* lc_clause_map = NULL; + Bitmapset* result = NULL; + foreach(lc_clause_map, es->clause_map) { + es_clause_map* clause_map = (es_clause_map*)lfirst(lc_clause_map); + if (left && bms_is_member(clause_map->left_attnum, attnums)) + result = bms_add_member(result, clause_map->right_attnum); + else if (!left && bms_is_member(clause_map->right_attnum, attnums)) + result = bms_add_member(result, clause_map->left_attnum); + } + return result; +} + +/* + * @brief find the matched extended stats in stats_list + * @param es :proving mathing conditions including relids , attnums + * @param stats_list : the extended statistic list + * @param left : true : match with the left_rel; false: match with the right_rel + * @return None + * @exception None + */ +void ES_SELECTIVITY::match_extended_stats(es_candidate* es, List* stats_list, bool left) +{ + int max_matched = 0; + int num_members = bms_num_members(es->left_attnums); + char other_side_starelkind; + RangeTblEntry* other_side_rte = NULL; + Bitmapset* this_side_attnums = NULL; + if (left) { + /* this side is left and the other side is right */ + other_side_starelkind = OidIsValid(es->right_rte->partitionOid) ? STARELKIND_PARTITION : STARELKIND_CLASS; + other_side_rte = es->right_rte; + this_side_attnums = es->left_attnums; + } else { + /* this side is right and other side is left */ + other_side_starelkind = OidIsValid(es->left_rte->partitionOid) ? STARELKIND_PARTITION : STARELKIND_CLASS; + other_side_rte = es->left_rte; + this_side_attnums = es->right_attnums; + } + + /* best_matched_listcell use to save the best match from stats list */ + ListCell* best_matched_listcell = NULL; + /* best_matched_stats use to save the best match from es_get_multi_column_stats */ + ListCell* best_matched_stats = (ListCell*)palloc(sizeof(ListCell)); + lfirst(best_matched_stats) = NULL; + ListCell* lc = NULL; + foreach(lc, stats_list) { + ExtendedStats* extended_stats = (ExtendedStats*)lfirst(lc); + ExtendedStats* other_side_extended_stats = NULL; + if (bms_is_subset(extended_stats->bms_attnum, this_side_attnums)) { + int matched = bms_num_members(extended_stats->bms_attnum); + + Bitmapset* other_side_attnums = make_attnums_by_clause_map(es, extended_stats->bms_attnum, left); + other_side_extended_stats = es_get_multi_column_stats( + other_side_rte->relid, other_side_starelkind, other_side_rte->inh, other_side_attnums); + if (other_side_extended_stats != NULL && matched == num_members) { + /* all attnums have extended stats, leave */ + if (left) { + es->left_extended_stats = copy_stats_ptr(lc); + es->right_extended_stats = other_side_extended_stats; + } else { + es->right_extended_stats = copy_stats_ptr(lc); + es->left_extended_stats = other_side_extended_stats; + } + clear_extended_stats((ExtendedStats*)lfirst(best_matched_stats)); + break; + } else if (other_side_extended_stats != NULL && matched > max_matched) { + /* not all attnums have extended stats, find the first maximum match */ + best_matched_listcell = lc; + clear_extended_stats((ExtendedStats*)lfirst(best_matched_stats)); + lfirst(best_matched_stats) = other_side_extended_stats; + max_matched = matched; + } else + clear_extended_stats(other_side_extended_stats); + } + } + + if (best_matched_listcell && lfirst(best_matched_stats)) { + if (left) { + es->left_extended_stats = copy_stats_ptr(best_matched_listcell); + es->right_extended_stats = (ExtendedStats*)lfirst(best_matched_stats); + } else { + es->right_extended_stats = copy_stats_ptr(best_matched_listcell); + es->left_extended_stats = (ExtendedStats*)lfirst(best_matched_stats); + } + lfirst(best_matched_stats) = NULL; + /* remove members not in the multi-column stats */ + if (max_matched != num_members) { + Bitmapset* tmpset = bms_difference(es->left_attnums, es->left_extended_stats->bms_attnum); + int dump_attnum; + while ((dump_attnum = bms_first_member(tmpset)) > 0) { + es->left_attnums = bms_del_member(es->left_attnums, dump_attnum); + remove_attnum(es, dump_attnum); + } + bms_free_ext(tmpset); + } + } + pfree_ext(best_matched_stats); + return; +} + +/* + * @brief modify distinct value using possion model + * @param es : proving the distinct value to modify + * @param left :true : modify the left distinct value; false: modify the right one + * @param sjinfo :join infos from inputs of calculate_selecitvity, can be NULL for eqsel + * @return None + * @exception None + */ +void ES_SELECTIVITY::modify_distinct_by_possion_model(es_candidate* es, bool left, SpecialJoinInfo* spjinfo) const +{ + bool enablePossion = false; + double varratio = 1.0; + ListCell* lc = NULL; + VariableStatData vardata; + float4 distinct = 0.0; + double tuples = 0.0; + + /* build vardata */ + vardata.enablePossion = true; + if (left && es->left_rel->tuples > 0) { + vardata.rel = es->left_rel; + distinct = es->left_stadistinct; + tuples = es->left_rel->tuples; + foreach(lc, es->clause_map) { + es_clause_map* map = (es_clause_map*)lfirst(lc); + vardata.var = (Node*)map->left_var; + enablePossion = can_use_possion(&vardata, spjinfo, &varratio); + if (!enablePossion) + break; + } + } else if (!left && es->right_rel->tuples > 0) { + vardata.rel = es->right_rel; + distinct = es->right_stadistinct; + tuples = es->right_rel->tuples; + foreach(lc, es->clause_map) { + es_clause_map* map = (es_clause_map*)lfirst(lc); + vardata.var = (Node*)map->right_var; + enablePossion = can_use_possion(&vardata, spjinfo, &varratio); + if (!enablePossion) + break; + } + } + + if (enablePossion) { + double tmp = distinct; + distinct = NUM_DISTINCT_SELECTIVITY_FOR_POISSON(distinct, tuples, varratio); + ereport(ES_DEBUG_LEVEL, + (errmodule(MOD_OPT), + (errmsg("[ES]The origin distinct value is %f. After using possion model with ntuples=%f and ration=%e \ + The new distinct value is %f", + tmp, + tuples, + varratio, + distinct)))); + } + + if (left && enablePossion) { + es->left_stadistinct = distinct; + } else if ((!left) && enablePossion) { + es->right_stadistinct = distinct; + } + return; +} + +static bool ClauseIsLegal(es_type type, const Node* left, const Node* right, int leftAttnum, int rightAttnum) +{ + if (leftAttnum < 0 || rightAttnum < 0) { + return false; + } + + /* check clause type */ + switch (type) { + case ES_EQSEL: + if (!IsA(left, Const) && !IsA(right, Const) && !IsA(left, Param) && !IsA(right, Param)) + return false; + else if (IsA(left, Const) && ((Const*)left)->constisnull) + return false; + else if (IsA(right, Const) && ((Const*)right)->constisnull) + return false; + break; + case ES_EQJOINSEL: + default: + break; + } + return true; +} + +static inline bool RteIsValid(const RangeTblEntry* rte) +{ + return (rte != NULL && rte->rtekind == RTE_RELATION); +} + +void ES_SELECTIVITY::setup_es(es_candidate* es, es_type type, RestrictInfo* clause) +{ + es->tag = type; + es->relids = bms_copy(clause->clause_relids); + es->clause_group = lappend(es->clause_group, clause); + es->has_null_clause = IsA(clause->clause, NullTest); + es->left_first_mcvfreq = 0.0; + es->right_first_mcvfreq = 0.0; +} + +bool ES_SELECTIVITY::build_es_candidate_for_eqsel(es_candidate* es, Node* var, int attnum, bool left, + RestrictInfo* clause) +{ + read_rel_rte(var, &es->left_rel, &es->left_rte); + if (!RteIsValid(es->left_rte)) { + return false; + } + es->left_attnums = bms_add_member(es->left_attnums, attnum); + if (left) { + es->left_relids = + clause->left_relids != NULL ? bms_copy(clause->left_relids) : bms_copy(clause->clause_relids); + } else { + es->left_relids = + clause->right_relids != NULL ? bms_copy(clause->right_relids) : bms_copy(clause->clause_relids); + } + add_clause_map(es, attnum, 0, var, NULL); + return true; +} + +/* + * @brief build a new es_candidate and add to es_candidate_list + */ +bool ES_SELECTIVITY::build_es_candidate(RestrictInfo* clause, es_type type) +{ + Node* left = NULL; + Node* right = NULL; + int left_attnum = 0; + int right_attnum = 0; + bool success = false; + + if (IsA(clause->clause, OpExpr)) { + OpExpr* opclause = (OpExpr*)clause->clause; + + Assert(list_length(opclause->args) == TOW_MEMBERS); + + left = (Node*)linitial(opclause->args); + right = (Node*)lsecond(opclause->args); + left_attnum = read_attnum(left); + right_attnum = read_attnum(right); + if (!ClauseIsLegal(type, left, right, left_attnum, right_attnum)) + return false; + } else { + Assert(IsA(clause->clause, NullTest)); + NullTest* nullclause = (NullTest*)clause->clause; + left = (Node*)nullclause->arg; + left_attnum = read_attnum(left); + if (left_attnum < 0) + return false; + } + + es_candidate* es = (es_candidate*)palloc(sizeof(es_candidate)); + init_candidate(es); + + switch (type) { + case ES_EQSEL: + /* only use left side */ + if (left_attnum > 0 && right_attnum == 0) { + success = build_es_candidate_for_eqsel(es, left, left_attnum, true, clause); + } else if (right_attnum > 0 && left_attnum == 0) { + Assert(clause->right_relids != NULL); + success = build_es_candidate_for_eqsel(es, right, right_attnum, false, clause); + } else { + pfree_ext(es); + return false; + } + break; + case ES_EQJOINSEL: + if (left_attnum > 0 && right_attnum > 0) { + read_rel_rte(left, &es->left_rel, &es->left_rte); + read_rel_rte(right, &es->right_rel, &es->right_rte); + if (!RteIsValid(es->left_rte) || !RteIsValid(es->right_rte)) { + break; + } + es->left_relids = bms_copy(clause->left_relids); + es->right_relids = bms_copy(clause->right_relids); + es->left_attnums = bms_add_member(es->left_attnums, left_attnum); + es->right_attnums = bms_add_member(es->right_attnums, right_attnum); + add_clause_map(es, left_attnum, right_attnum, left, right); + success = true; + } else { + pfree_ext(es); + return false; + } + break; + default: + /* for future development, should not reach here now */ + pfree_ext(es); + return false; + } + + /* double check */ + if (!success) { + es->left_rel = NULL; + es->right_rel = NULL; + es->left_rte = NULL; + es->right_rte = NULL; + pfree_ext(es); + return false; + } + + setup_es(es, type, clause); + + es_candidate_list = lappend(es_candidate_list, es); + + return true; +} + +/* + * @brief remove useless member in es_candidate_list to unmatched_clause_group + */ +void ES_SELECTIVITY::recheck_candidate_list() +{ + if (!es_candidate_list) + return; + ListCell* l = NULL; + bool validate = true; + + /* try to use equivalence_class to re-combinate clauses first */ + foreach(l, es_candidate_list) { + es_candidate* temp = (es_candidate*)lfirst(l); + if (temp->tag == ES_EQJOINSEL && list_length(temp->clause_group) == 1 && list_length(es_candidate_list) > 1) + (void)try_equivalence_class(temp); + } + + foreach(l, es_candidate_list) { + es_candidate* temp = (es_candidate*)lfirst(l); + switch (temp->tag) { + case ES_EQSEL: + if (list_length(temp->clause_group) <= 1) + validate = false; + else if (temp->left_rte && bms_num_members(temp->left_attnums) <= 1) + validate = false; + break; + case ES_EQJOINSEL: + if (list_length(temp->clause_group) <= 1) + validate = false; + else if (bms_num_members(temp->left_attnums) <= 1 || bms_num_members(temp->right_attnums) <= 1) + validate = false; + break; + case ES_GROUPBY: + if (bms_num_members(temp->left_attnums) <= 1) + validate = false; + break; + default: + break; + } + if (!validate) { + unmatched_clause_group = list_concat(unmatched_clause_group, temp->clause_group); + temp->tag = ES_EMPTY; + temp->clause_group = NULL; + } + } + return; +} + +static inline bool IsUnsupportedCases(const EquivalenceClass* ec) +{ + /* only consider var = var situation */ + if (ec->ec_has_const) + return true; + + /* ignore broken ecs */ + if (ec->ec_broken) + return true; + + /* if members of ECs are less than two, won't generate any substitute */ + if (list_length(ec->ec_members) <= TOW_MEMBERS) + return true; + + return false; +} + +/* + * @brief pre-check the es candidate item is or not in current equivalence class. + * @return bool, true or false. + */ +bool ES_SELECTIVITY::IsEsCandidateInEqClass(es_candidate *es, EquivalenceClass *ec) +{ + if (es == NULL || ec == NULL) { + return false; + } + + /* Quickly ignore any that don't cover the join */ + if (!bms_is_subset(es->relids, ec->ec_relids)) { + return false; + } + + foreach_cell (lc, ec->ec_members) { + EquivalenceMember *em = (EquivalenceMember *)lfirst(lc); + Var *emVar = (Var *)LocateOpExprLeafVar((Node *)em->em_expr); + + if (emVar == NULL) { + continue; + } + + if (emVar->varattno <= 0) { + continue; + } + + /* left or right branch of join es occurs in the current equivalencen member, so the ec is valid. */ + if ((bms_equal(es->left_relids, em->em_relids) && bms_is_member(emVar->varattno, es->left_attnums)) || + (bms_equal(es->right_relids, em->em_relids) && bms_is_member(emVar->varattno, es->right_attnums))) { + return true; + } + } + + return false; +} + +/* + * @brief try to find a substitude clause building from equivalence classes + * @return true when find a substitude clause; false when find nothing + */ +bool ES_SELECTIVITY::try_equivalence_class(es_candidate* es) +{ + if (path && path->path.pathtype == T_MergeJoin) { + /* for mergejoin, do not adjust clause using equivalence class */ + return false; + } + + ListCell* lc = NULL; + bool result = false; + + foreach(lc, root->eq_classes) { + EquivalenceClass* ec = (EquivalenceClass*)lfirst(lc); + + if (IsUnsupportedCases(ec)) + continue; + + /* ignore ec which does not contain the es info. */ + if (!IsEsCandidateInEqClass(es, ec)) + continue; + + Bitmapset* tmpset = bms_copy(ec->ec_relids); + int ec_relid = 0; + while ((ec_relid = bms_first_member(tmpset)) >= 0) { + if (bms_is_member(ec_relid, es->relids)) + continue; + ListCell* lc2 = NULL; + foreach(lc2, es_candidate_list) { + es_candidate* temp = (es_candidate*)lfirst(lc2); + if (temp->tag != ES_EQJOINSEL) + continue; + if (bms_equal(temp->relids, es->relids)) + continue; + if (bms_is_member(ec_relid, temp->relids) && bms_overlap(temp->relids, es->relids)) { + Bitmapset* interset_relids = bms_intersect(temp->relids, es->relids); + Bitmapset* join_relids = bms_copy(interset_relids); + join_relids = bms_add_member(join_relids, ec_relid); + Assert(bms_equal(join_relids, temp->relids)); + Bitmapset* outer_relids = bms_make_singleton(ec_relid); + List* pseudo_clauselist = + generate_join_implied_equalities_normal(root, ec, join_relids, outer_relids, interset_relids); + if (pseudo_clauselist != NULL) { + result = match_pseudo_clauselist(pseudo_clauselist, temp, es->clause_group); + if (log_min_messages <= ES_DEBUG_LEVEL) { + ereport(ES_DEBUG_LEVEL, + (errmodule(MOD_OPT_JOIN), errmsg("[ES]Build new clause using equivalence class:)"))); + print_clauses(pseudo_clauselist); + ereport(ES_DEBUG_LEVEL, + (errmodule(MOD_OPT_JOIN), + errmsg("[ES]The old clause will be abandoned? %d)", (int)result))); + print_clauses(es->clause_group); + } + } + + bms_free_ext(interset_relids); + bms_free_ext(join_relids); + bms_free_ext(outer_relids); + if (result) { + /* replace the removed clause in clauselist with the new built one */ + ListCell* lc3 = NULL; + foreach(lc3, origin_clauses) { + void* clause = (void*)lfirst(lc3); + if (clause == linitial(es->clause_group)) { + /* maybe cause memory problem as the old clause is not released */ + lfirst(lc3) = linitial(pseudo_clauselist); + } + } + /* For hashclause, we have to process joinrestrictinfo in path as well */ + if (path) { + foreach(lc3, path->joinrestrictinfo) { + void* clause = (void*)lfirst(lc3); + if (clause == linitial(es->clause_group)) { + /* maybe cause memory problem as the old clause is not released */ + lfirst(lc3) = linitial(pseudo_clauselist); + } + } + } + break; + } + } + } + if (result) { + /* + * If sucess, the clause has been tranformed and saved in another es_candidate. + * So no need to keep this es_candidate anymore. + */ + es->tag = ES_EMPTY; + es->clause_group = NULL; + break; + } + } + bms_free_ext(tmpset); + if (result) + break; + } + return result; +} + +/* + * @brief try to match the newborn clause building by try_equivalence_class() with the existed clause group + * like what we do in group_clauselist(), but more simple. + */ +bool ES_SELECTIVITY::match_pseudo_clauselist(List* clauses, es_candidate* es, List* origin_clause) +{ + bool result = false; + ListCell* lc = NULL; + foreach(lc, clauses) { + Node* clause = (Node*)lfirst(lc); + + if (!IsA(clause, RestrictInfo)) { + continue; + } + + RestrictInfo* rinfo = (RestrictInfo*)clause; + if (rinfo->pseudoconstant || rinfo->norm_selec > 1 || rinfo->orclause) { + continue; + } + + if (is_opclause(rinfo->clause)) { + OpExpr* opclause = (OpExpr*)rinfo->clause; + Oid opno = opclause->opno; + + /* only handle "=" operator */ + if (get_oprrest(opno) == EQSELRETURNOID) { + Assert(bms_num_members(rinfo->clause_relids) == TOW_MEMBERS); + if (add_attnum(rinfo, es)) { + es->clause_group = lappend(es->clause_group, clause); + es->pseudo_clause_list = lappend(es->pseudo_clause_list, clause); + es->pseudo_clause_list = list_concat(es->pseudo_clause_list, origin_clause); + result = true; + } + } + } + } + return result; +} + +/* + * @brief relpace the original clause in the input clause list with the new clause build by equivalence class, + * the memory used by old clause will be release by optimizer context or something esle + */ +void ES_SELECTIVITY::replace_clause(Datum* old_clause, Datum* new_clause) const +{ + ListCell* lc = NULL; + foreach(lc, origin_clauses) { + if (lfirst(lc) == old_clause) { + lfirst(lc) = new_clause; + break; + } + } +} + +/* + * @brief main entry to read statistic which will be used to calculate selectivity, called by + * calculate_selectivity + */ +void ES_SELECTIVITY::read_statistic() +{ + if (!es_candidate_list) + return; + ListCell* l = NULL; + foreach(l, es_candidate_list) { + es_candidate* temp = (es_candidate*)lfirst(l); + switch (temp->tag) { + case ES_EQSEL: + case ES_GROUPBY: + read_statistic_eqsel(temp); + break; + case ES_EQJOINSEL: + read_statistic_eqjoinsel(temp); + break; + default: + /* empty */ + break; + } + } + return; +} + +bool ES_SELECTIVITY::cal_stadistinct_eqjoinsel(es_candidate* es) +{ + /* + * Since we can not tell how many or which columns are actaully null when nullfrac == 1.0, + * so we will not use multi-column when nullfrac == 1.0. + */ + if (es->left_extended_stats && + (es->left_extended_stats->nullfrac != 0.0 || es->left_extended_stats->distinct != 0.0 || + es->left_extended_stats->mcv_values) && + es->right_extended_stats && + (es->right_extended_stats->nullfrac != 0.0 || es->right_extended_stats->distinct != 0.0 || + es->right_extended_stats->mcv_values)) { + /* should not recieve empty extended stas here, except all columns are empty */ + if (es->left_extended_stats->distinct < 0) + es->left_stadistinct = clamp_row_est(-1 * es->left_extended_stats->distinct * es->left_rel->tuples * + (1.0 - es->left_extended_stats->nullfrac)); + else + es->left_stadistinct = clamp_row_est(es->left_extended_stats->distinct); + + if (es->right_extended_stats->distinct < 0) + es->right_stadistinct = clamp_row_est(-1 * es->right_extended_stats->distinct * es->right_rel->tuples * + (1.0 - es->right_extended_stats->nullfrac)); + else + es->right_stadistinct = clamp_row_est(es->right_extended_stats->distinct); + + /* + * Use possion model if satisify condition. + */ + modify_distinct_by_possion_model(es, true, sjinfo); + modify_distinct_by_possion_model(es, false, sjinfo); + + /* replace the old clause with the new one */ + if (es->pseudo_clause_list) { + ListCell* lc2 = NULL; + foreach(lc2, es->pseudo_clause_list) { + Datum* new_clause = (Datum*)lfirst(lc2); + lc2 = lnext(lc2); + Datum* old_clause = (Datum*)lfirst(lc2); + /* make sure the new clause is still in the clause group */ + ListCell* lc3 = NULL; + foreach(lc3, es->clause_group) { + if ((Datum*)lfirst(lc3) == new_clause) { + replace_clause(old_clause, new_clause); + break; + } + } + } + } + return true; + } + return false; +} + +#define CLEAN_UP_TEMP_OBJECTS(tmp_left, tmp_right, left_stats_list, right_stats_list) \ + do { \ + bms_free_ext(tmp_left); \ + bms_free_ext(tmp_right); \ + clear_extended_stats_list(left_stats_list); \ + clear_extended_stats_list(right_stats_list); \ + } while (0) + +/* + * @brief read extended statistic for eqjoinsel + * There are three possible situations: + * (1) No statistic data in pg_statistic, then num_stats will be 0 + * (2) There are over 100 records in pg_statistic when search extended statistics for target table, + * then the returned stats_list will be empty and we have to search manually. In this case, + * we have too many combinations to try so that we will make some compromise and only + * try limited possibilities. + * (3) There are less than 100 records and the returned stats_list will be all records. Then we will + * search the list for the best answer. + */ +void ES_SELECTIVITY::read_statistic_eqjoinsel(es_candidate* es) +{ + int left_num_stats = 0; + int right_num_stats = 0; + char left_starelkind = OidIsValid(es->left_rte->partitionOid) ? STARELKIND_PARTITION : STARELKIND_CLASS; + char right_starelkind = OidIsValid(es->right_rte->partitionOid) ? STARELKIND_PARTITION : STARELKIND_CLASS; + + /* read all multi-column statistic from pg_statistic if possible */ + List* left_stats_list = + es_get_multi_column_stats(es->left_rte->relid, left_starelkind, es->left_rte->inh, &left_num_stats); + List* right_stats_list = + es_get_multi_column_stats(es->right_rte->relid, right_starelkind, es->right_rte->inh, &right_num_stats); + + /* no multi-column statistic */ + if (left_num_stats == 0 || right_num_stats == 0) { + report_no_stats(es->left_rte->relid, es->left_attnums); + report_no_stats(es->right_rte->relid, es->right_attnums); + remove_candidate(es); + return; + } + + /* save attnums for no analyze list */ + Bitmapset* tmp_left = bms_copy(es->left_attnums); + Bitmapset* tmp_right = bms_copy(es->right_attnums); + + /* when at least one side return with multi-column statistic list */ + if (left_num_stats <= ES_MAX_FETCH_NUM_OF_INSTANCE && left_num_stats <= right_num_stats) { + match_extended_stats(es, left_stats_list, true); + } else if (right_num_stats <= ES_MAX_FETCH_NUM_OF_INSTANCE) { + match_extended_stats(es, right_stats_list, false); + } else { + /* + * There are too many multi-column statistic, so return null list. + * We have to search pg_statistic manually with limited combinations. + * So could lose some matches. + */ + ereport(ES_DEBUG_LEVEL, + (errmodule(MOD_OPT_JOIN), errmsg("[ES] Too many multi-column statistic, could lose matches."))); + + while (bms_num_members(es->left_attnums) >= TOW_MEMBERS) { + ExtendedStats* left_extended_stats = + es_get_multi_column_stats(es->left_rte->relid, left_starelkind, es->left_rte->inh, es->left_attnums); + if (left_extended_stats != NULL) { + ExtendedStats* right_extended_stats = es_get_multi_column_stats( + es->right_rte->relid, right_starelkind, es->right_rte->inh, es->right_attnums); + if (right_extended_stats != NULL) { + es->left_extended_stats = left_extended_stats; + es->right_extended_stats = right_extended_stats; + break; + } else + clear_extended_stats(left_extended_stats); + } + + /* + * if not found, delete the first member of attnums and use the rest atts to match. + * delete clause and clause_map in es. + */ + remove_attnum(es, bms_first_member(es->left_attnums)); + } + } + + if (!cal_stadistinct_eqjoinsel(es)) { + /* no multi-column statistic matched */ + report_no_stats(es->left_rte->relid, tmp_left); + report_no_stats(es->right_rte->relid, tmp_right); + remove_candidate(es); + } + + CLEAN_UP_TEMP_OBJECTS(tmp_left, tmp_right, left_stats_list, right_stats_list); + return; +} + +void ES_SELECTIVITY::remove_members_without_es_stats(int max_matched, int num_members, es_candidate* es) +{ + if (max_matched != num_members) { + Bitmapset* tmpset = bms_difference(es->left_attnums, es->left_extended_stats->bms_attnum); + int dump_attnum; + while ((dump_attnum = bms_first_member(tmpset)) > 0) { + es->left_attnums = bms_del_member(es->left_attnums, dump_attnum); + remove_attnum(es, dump_attnum); + } + bms_free_ext(tmpset); + } +} + +void ES_SELECTIVITY::cal_stadistinct_eqsel(es_candidate* es) +{ + /* + * Since we can not tell how many or which columns are actaully null when nullfrac == 1.0, + * so we will not use multi-column when nullfrac == 1.0. + */ + if (es->left_extended_stats && + (es->left_extended_stats->nullfrac != 0.0 || es->left_extended_stats->distinct != 0.0 || + es->left_extended_stats->mcv_values)) { + /* should not recieve empty extended stas here, except all columns are empty */ + if (es->left_extended_stats->distinct < 0) + es->left_stadistinct = clamp_row_est(-1 * es->left_extended_stats->distinct * es->left_rel->tuples); + else + es->left_stadistinct = clamp_row_est(es->left_extended_stats->distinct); + + /* + * Use possion model if satisify condition. + * we don't need possion to estimate distinct because + * we can't estimate accurate for multiple exprs. + */ + if (es->tag != ES_GROUPBY) + modify_distinct_by_possion_model(es, true, NULL); + } else { + /* no multi-column statistic matched */ + remove_candidate(es); + } +} + +/* + * @brief read extended statistic for eqsel or groupby, details are as same as read_statistic_eqjoinsel + */ +void ES_SELECTIVITY::read_statistic_eqsel(es_candidate* es) +{ + int num_stats = 0; + char starelkind = OidIsValid(es->left_rte->partitionOid) ? STARELKIND_PARTITION : STARELKIND_CLASS; + /* read all multi-column statistic from pg_statistic if possible */ + List* stats_list = + es_get_multi_column_stats(es->left_rte->relid, starelkind, es->left_rte->inh, &num_stats, es->has_null_clause); + /* no multi-column statistic */ + if (num_stats == 0) { + report_no_stats(es->left_rte->relid, es->left_attnums); + remove_candidate(es); + return; + } + + /* return with multi-column statistic list */ + if (num_stats <= ES_MAX_FETCH_NUM_OF_INSTANCE) { + ListCell* l = NULL; + int max_matched = 0; + int num_members = bms_num_members(es->left_attnums); + ListCell* best_matched_stat_ptr = NULL; + foreach(l, stats_list) { + ExtendedStats* extended_stats = (ExtendedStats*)lfirst(l); + if (bms_is_subset(extended_stats->bms_attnum, es->left_attnums)) { + int matched = bms_num_members(extended_stats->bms_attnum); + if (matched == num_members) { + es->left_extended_stats = copy_stats_ptr(l); + break; + } else if (matched > max_matched) { + best_matched_stat_ptr = l; + max_matched = matched; + } + } + } + + if (best_matched_stat_ptr != NULL) { + es->left_extended_stats = copy_stats_ptr(best_matched_stat_ptr); + /* remove members not in the multi-column stats */ + remove_members_without_es_stats(max_matched, num_members, es); + } + } else { + /* + * There are too many multi-column statistic, so return null list. + * We have to search pg_statistic manually with limited combinations. + * So could lose some matches. + */ + ereport(ES_DEBUG_LEVEL, + (errmodule(MOD_OPT_JOIN), errmsg("[ES] Too many multi-column statistic, could lose matches."))); + + while (bms_num_members(es->left_attnums) >= TOW_MEMBERS) { + ExtendedStats* extended_stats = es_get_multi_column_stats( + es->left_rte->relid, starelkind, es->left_rte->inh, es->left_attnums, es->has_null_clause); + if (extended_stats != NULL) { + es->left_extended_stats = extended_stats; + break; + } + + /* + * if not found, delete one attnum and use the rest to try again. + * delete clause and clause_map. + */ + remove_attnum(es, bms_first_member(es->left_attnums)); + } + } + + cal_stadistinct_eqsel(es); + + clear_extended_stats_list(stats_list); + + return; +} + +/* + * @brief read attnum from input, + * @param node: should be a Var* + * @return return -1 if varattno <= 0, return 0 means node is not a var + * @exception None + */ +int ES_SELECTIVITY::read_attnum(Node* node) const +{ + Node* basenode = NULL; + int attnum = -1; + if (IsA(node, RelabelType)) + basenode = (Node*)((RelabelType*)node)->arg; + else + basenode = node; + + if (IsA(basenode, Var)) { + Var* var = (Var*)basenode; + attnum = var->varattno; + if (attnum <= 0) + attnum = -1; + } else if (IsA(node, Const) || IsA(node, Param)) + attnum = 0; + + return attnum; +} + +static List* BuildNewRelList(Bitmapset* attnumsTmp, Oid relidOid) +{ + List* record = NIL; + List* relidList = NIL; + List* attidList = NIL; + int attnum = bms_first_member(attnumsTmp); + while (attnum != -1) { + attidList = lappend_int(attidList, attnum); + attnum = bms_first_member(attnumsTmp); + } + bms_free_ext(attnumsTmp); + relidList = lappend_oid(relidList, relidOid); + record = lappend(record, relidList); + record = lappend(record, attidList); + return record; +} + +/* + * @brief save non-analyze multi-column to g_NoAnalyzeRelNameList + * @param relid_oid, relation oid + * @param attnums, multi-column attribute number + * @return + * @exception None + */ +void ES_SELECTIVITY::report_no_stats(Oid relid_oid, Bitmapset* attnums) const +{ + /* We don't save non-analyze multi-column to g_NoAnalyzeRelNameList when resource_track_log=summary. */ + if (u_sess->attr.attr_storage.resource_track_log == SUMMARY || relid_oid == 0) + return; + + /* + * We should not save the relation to non-analyze list if is under analyzing, + * because it will create temp table and execute some query, the temp table + * don't be analyzed when 2% analyzing. + */ + if (u_sess->analyze_cxt.is_under_analyze) + return; + + Assert(bms_num_members(attnums) >= TOW_MEMBERS); + MemoryContext oldcontext = MemoryContextSwitchTo(t_thrd.mem_cxt.msg_mem_cxt); + Bitmapset* attnums_tmp = bms_copy(attnums); + + ListCell* lc = NULL; + bool found = false; + if (t_thrd.postgres_cxt.g_NoAnalyzeRelNameList != NIL) { + foreach(lc, t_thrd.postgres_cxt.g_NoAnalyzeRelNameList) { + List* record = (List*)lfirst(lc); + if (relid_oid == linitial_oid((List*)linitial(record))) { + ListCell* sublist = NULL; + for (sublist = lnext(list_head(record)); sublist != NULL; sublist = lnext(sublist)) { + List* attid_list = (List*)lfirst(sublist); + if (list_length(attid_list) == bms_num_members(attnums_tmp)) { + Bitmapset* attnums_from_list = NULL; + ListCell* cell = attid_list->head; + while (cell != NULL) { + attnums_from_list = bms_add_member(attnums_from_list, (int)lfirst_int(cell)); + cell = cell->next; + } + if (bms_equal(attnums_from_list, attnums_tmp)) + found = true; + bms_free_ext(attnums_from_list); + } + } + if (!found) { + List* attid_list = NIL; + int attnum = bms_first_member(attnums_tmp); + while (attnum != -1) { + attid_list = lappend_int(attid_list, attnum); + attnum = bms_first_member(attnums_tmp); + } + bms_free_ext(attnums_tmp); + record = lappend(record, attid_list); + found = true; + } + } + } + } + + if (!found) { + /* add a new rel list */ + List* record = BuildNewRelList(attnums_tmp, relid_oid); + + /* Add a new rel list into g_NoAnalyzeRelNameList. */ + t_thrd.postgres_cxt.g_NoAnalyzeRelNameList = lappend(t_thrd.postgres_cxt.g_NoAnalyzeRelNameList, record); + } + + (void)MemoryContextSwitchTo(oldcontext); + return; +} + +static bool MatchOnSameSide(RestrictInfo* clause, es_candidate* temp, int leftAttnum, int rightAttnum) +{ + return bms_equal(clause->left_relids, temp->left_relids) && + bms_equal(clause->right_relids, temp->right_relids) && + !bms_is_member(leftAttnum, temp->left_attnums) && !bms_is_member(rightAttnum, temp->right_attnums); +} + +static bool MatchOnOtherSide(RestrictInfo* clause, es_candidate* temp, int leftAttnum, int rightAttnum) +{ + return bms_equal(clause->right_relids, temp->left_relids) && + bms_equal(clause->left_relids, temp->right_relids) && + !bms_is_member(rightAttnum, temp->left_attnums) && + !bms_is_member(leftAttnum, temp->right_attnums); +} + +static inline bool AttnumIsInvalid(int leftAttnum, int rightAttnum) +{ + return (leftAttnum < 0 || rightAttnum < 0 || (leftAttnum == 0 && rightAttnum == 0)); +} + +void ES_SELECTIVITY::add_attnum_for_eqsel(es_candidate* temp, int attnum, Node* arg) const +{ + temp->left_attnums = bms_add_member(temp->left_attnums, attnum); + add_clause_map(temp, attnum, 0, arg, NULL); +} + +/* + * @brief read and add attnums to es_candidate + * @return true if success + */ +bool ES_SELECTIVITY::add_attnum(RestrictInfo* clause, es_candidate* temp) const +{ + Node* left = NULL; + Node* right = NULL; + int left_attnum = 0; + int right_attnum = 0; + + if (IsA(clause->clause, OpExpr)) { + OpExpr* opclause = (OpExpr*)clause->clause; + Assert(list_length(opclause->args) == TOW_MEMBERS); + left = (Node*)linitial(opclause->args); + right = (Node*)lsecond(opclause->args); + left_attnum = read_attnum(left); + right_attnum = read_attnum(right); + } else { + Assert(IsA(clause->clause, NullTest)); + left = (Node*)((NullTest*)clause->clause)->arg; + left_attnum = read_attnum(left); + } + + if (AttnumIsInvalid(left_attnum, right_attnum)) + return false; + + switch (temp->tag) { + case ES_EQSEL: + /* + * We wouldn't have clauses like: t1.a = 1 and t1.a = 2 here + * because optimizor will find the conflict first. + */ + if (left_attnum > 0 && right_attnum > 0) + return false; + + if (bms_equal(clause->left_relids, temp->left_relids) || + (IsA(clause->clause, NullTest) && bms_equal(clause->clause_relids, temp->left_relids))) { + add_attnum_for_eqsel(temp, left_attnum, left); + } else if (bms_equal(clause->right_relids, temp->left_relids)) { + add_attnum_for_eqsel(temp, right_attnum, right); + } else + return false; + + break; + case ES_EQJOINSEL: + /* + * Normally, we shouldn't have clauses like: t1.a = t2.a and t1.b = t2.a here + * because clause is generated from equivalence class, in this case, + * t1.a, t2.a and t1.b is in one equivalence class and will only generate + * one clause. + * However, if we try to build an es_candidate using equivalence class such as tpch Q9, + * there could be some scenarios we have not forseen now. + * So, for safety, we still check whether the clauses is something + * like: t1.a = t2.a and t1.b = t2.a + */ + if (left_attnum == 0 || right_attnum == 0) + return false; + + if (MatchOnSameSide(clause, temp, left_attnum, right_attnum)) { + temp->left_attnums = bms_add_member(temp->left_attnums, left_attnum); + temp->right_attnums = bms_add_member(temp->right_attnums, right_attnum); + add_clause_map(temp, left_attnum, right_attnum, left, right); + } else if (MatchOnOtherSide(clause, temp, left_attnum, right_attnum)) { + temp->left_attnums = bms_add_member(temp->left_attnums, right_attnum); + temp->right_attnums = bms_add_member(temp->right_attnums, left_attnum); + add_clause_map(temp, right_attnum, left_attnum, right, left); + } else + return false; + + break; + default: + /* should not reach here */ + return false; + } + + return true; +} + +/* + * @brief add clause_map to es_candidate, clause_map is a map which can be used to find the right arg + * using an attnum of left arg in a clause. So we don't have to parse the clause again. + */ +void ES_SELECTIVITY::add_clause_map( + es_candidate* es, int left_attnum, int right_attnum, Node* left_arg, Node* right_arg) const +{ + es_clause_map* clause_map = (es_clause_map*)palloc(sizeof(es_clause_map)); + clause_map->left_attnum = left_attnum; + clause_map->right_attnum = right_attnum; + if (left_arg) { + if (IsA(left_arg, RelabelType)) + clause_map->left_var = (Var*)((RelabelType*)left_arg)->arg; + else + clause_map->left_var = (Var*)left_arg; + } + + if (right_arg) { + if (IsA(right_arg, RelabelType)) + clause_map->right_var = (Var*)((RelabelType*)right_arg)->arg; + else + clause_map->right_var = (Var*)right_arg; + } + es->clause_map = lappend(es->clause_map, clause_map); + return; +} + +/* + * @brief read RelOptInfo and RangeTblEntry, save to es_candidate + * @param node: should be a Var* + */ +void ES_SELECTIVITY::read_rel_rte(Node* node, RelOptInfo** rel, RangeTblEntry** rte) +{ + Node* basenode = NULL; + + if (IsA(node, RelabelType)) + basenode = (Node*)((RelabelType*)node)->arg; + else + basenode = node; + + if (IsA(basenode, Var)) { + Assert(root != NULL); + Assert(root->parse != NULL); + + Var* var = (Var*)basenode; + *rel = find_base_rel(root, var->varno); + *rte = rt_fetch((*rel)->relid, root->parse->rtable); + } + + return; +} + +void ES_SELECTIVITY::save_selectivity( + es_candidate* es, double left_join_ratio, double right_join_ratio, bool save_semi_join) +{ + VariableStatData vardata; + RatioType type; + if (es->tag == ES_EQSEL) + type = RatioType_Filter; + else if (es->tag == ES_EQJOINSEL) + type = RatioType_Join; + else + return; + + if (es->left_rel) { + vardata.rel = es->left_rel; + ListCell* lc = NULL; + foreach(lc, es->clause_map) { + es_clause_map* map = (es_clause_map*)lfirst(lc); + vardata.var = (Node*)map->left_var; + if (!save_semi_join) + set_varratio_after_calc_selectivity(&vardata, type, left_join_ratio, sjinfo); + /* Bloom filter can be used only when var = var. */ + if (es->tag == ES_EQJOINSEL) { + VariableStatData vardata2; + vardata2.rel = es->right_rel; + vardata2.var = (Node*)map->right_var; + /* Set var's ratio which will be used by bloom filter set. */ + set_equal_varratio(&vardata, vardata2.rel->relids, left_join_ratio, sjinfo); + if (!save_semi_join) { + set_equal_varratio(&vardata2, vardata.rel->relids, right_join_ratio, sjinfo); + set_varratio_after_calc_selectivity(&vardata2, type, right_join_ratio, sjinfo); + } + } + } + } + return; +} + +/* + * @brief calculate bucket size, actually only save results for estimate_hash_bucketsize to use + */ +void ES_SELECTIVITY::cal_bucket_size(es_candidate* es, es_bucketsize* bucket) const +{ + double tuples; + RelOptInfo* rel = NULL; + ListCell* lc = NULL; + + bucket->left_relids = bms_copy(es->left_relids); + bucket->right_relids = bms_copy(es->right_relids); + bucket->left_rel = es->left_rel; + bucket->right_rel = es->right_rel; + bucket->left_distinct = es->left_stadistinct; + bucket->right_distinct = es->right_stadistinct; + bucket->left_mcvfreq = es->left_first_mcvfreq; + bucket->right_mcvfreq = es->right_first_mcvfreq; + + if (es->left_extended_stats->dndistinct > 0) + bucket->left_dndistinct = es->left_extended_stats->dndistinct; + else { + rel = es->left_rel; + tuples = get_local_rows( + rel->tuples, rel->multiple, IsLocatorReplicated(rel->locator_type), ng_get_dest_num_data_nodes(rel)); + bucket->left_dndistinct = clamp_row_est(-1 * es->left_extended_stats->dndistinct * tuples); + } + + if (es->right_extended_stats->dndistinct > 0) + bucket->right_dndistinct = es->right_extended_stats->dndistinct; + else { + rel = es->right_rel; + tuples = get_local_rows( + rel->tuples, rel->multiple, IsLocatorReplicated(rel->locator_type), ng_get_dest_num_data_nodes(rel)); + bucket->right_dndistinct = clamp_row_est(-1 * es->right_extended_stats->dndistinct * tuples); + } + + bucket->left_hashkeys = NIL; + bucket->right_hashkeys = NIL; + foreach(lc, es->clause_map) { + es_clause_map* map = (es_clause_map*)lfirst(lc); + bucket->left_hashkeys = lappend(bucket->left_hashkeys, map->left_var); + bucket->right_hashkeys = lappend(bucket->right_hashkeys, map->right_var); + } + + return; +} + +Selectivity ES_SELECTIVITY::estimate_hash_bucketsize( + es_bucketsize* es_bucket, double* distinctnum, bool left, Path* inner_path, double nbuckets) +{ + double estfract, ndistinct, mcvfreq, avgfreq; + RelOptInfo* rel = left ? es_bucket->left_rel : es_bucket->right_rel; + List* hashkeys = left ? es_bucket->left_hashkeys : es_bucket->right_hashkeys; + + ndistinct = estimate_local_numdistinct(es_bucket, left, inner_path); + *distinctnum = ndistinct; + + /* Compute avg freq of all distinct data values in raw relation */ + avgfreq = 1.0 / ndistinct; + + /* + * Initial estimate of bucketsize fraction is 1/nbuckets as long as the + * number of buckets is less than the expected number of distinct values; + * otherwise it is 1/ndistinct. + */ + if (ndistinct > nbuckets) + estfract = 1.0 / nbuckets; + else { + if (ndistinct < 1.0) + ndistinct = 1.0; + estfract = 1.0 / ndistinct; + } + + /* + * Look up the frequency of the most common value, if available. + */ + mcvfreq = left ? es_bucket->left_mcvfreq : es_bucket->right_mcvfreq; + + /* We should adjust mcvfreq with selectivity because mcvfreq is changed after joined or joined. */ + mcvfreq /= (rel->rows / rel->tuples); + ereport(DEBUG1, + (errmodule(MOD_OPT_JOIN), + errmsg("[ES]rows=%.lf, tuples=%.lf, multiple=%.lf", rel->rows, rel->tuples, rel->multiple))); + + /* + * Adjust estimated bucketsize upward to account for skewed distribution. + */ + if (avgfreq > 0.0 && mcvfreq > avgfreq) { + /* if hashkey contains distribute key, mcv freq should be multiplied by dn number */ + double multiple = 1.0; + /* for now, only consider one distribute key situation */ + if (list_length(hashkeys) >= list_length(rel->distribute_keys) && + list_is_subset(rel->distribute_keys, hashkeys)) + multiple = u_sess->pgxc_cxt.NumDataNodes; + + estfract *= mcvfreq / avgfreq; + /* if adjusted selectivity is larger than mcvfreq, then the estimate is too far off, + take the mcvfreq instead. */ + if (estfract > mcvfreq * multiple) + estfract = mcvfreq * multiple; + } + + ereport(DEBUG1, + (errmodule(MOD_OPT_JOIN), + errmsg("[ES]ndistinct=%.lf, avgfreq=%.10f, mcvfreq=%.10f, estfract=%.10f", + ndistinct, + avgfreq, + mcvfreq, + estfract))); + + /* + * Clamp bucketsize to sane range (the above adjustment could easily + * produce an out-of-range result). We set the lower bound a little above + * zero, since zero isn't a very sane result. + * We should adjust the lower bound as 1.0e-7 because the distinct value + * may be larger than 1000000 as the increasing of work_mem. + */ + if (estfract < 1.0e-7) + estfract = 1.0e-7; + else if (estfract > 1.0) { + if (mcvfreq > 0.0) + estfract = mcvfreq; + else + estfract = 1.0; + } + + return (Selectivity)estfract; +} + +/* + * @brief mostly as same as estimate_local_numdistinct + */ +double ES_SELECTIVITY::estimate_local_numdistinct(es_bucketsize* bucket, bool left, Path* pathnode) +{ + VariableStatData vardata; + bool usesinglestats = true; + double ndistinct = left ? bucket->left_dndistinct : bucket->right_dndistinct; + double global_distinct = left ? bucket->left_distinct : bucket->right_distinct; + unsigned int num_datanodes = ng_get_dest_num_data_nodes(pathnode); + List* hashkeys = left ? bucket->left_hashkeys : bucket->right_hashkeys; + + vardata.rel = left ? bucket->left_rel : bucket->right_rel; + + /* we should adjust local distinct if there is no tuples in dn1 for global stats. */ + if ((ndistinct * num_datanodes) < global_distinct) + ndistinct = get_local_rows(global_distinct, vardata.rel->multiple, false, num_datanodes); + + /* Adjust global distinct values for STREAM_BROADCAST and STREAM_REDISTRIBUTE. */ + if (IsA(pathnode, StreamPath) || IsA(pathnode, HashPath) || IsLocatorReplicated(pathnode->locator_type)) { + ndistinct = + estimate_hash_num_distinct(root, hashkeys, pathnode, &vardata, ndistinct, global_distinct, &usesinglestats); + } + + /* + * Adjust ndistinct to account for restriction clauses. Observe we are + * assuming that the data distribution is affected uniformly by the + * restriction clauses! + * + * XXX Possibly better way, but much more expensive: multiply by + * selectivity of rel's restriction clauses that mention the target Var. + * + * Only single stat need multiple the ratio as rows/tuples, because + * possion for global stat. + * Else if we have use possion, we don't need multiple the ratio. + */ + return ndistinct; +} + +Selectivity ES_SELECTIVITY::cal_eqjoinsel(es_candidate* es, JoinType jointype) +{ + Selectivity result = 1.0; + RelOptInfo* inner_rel = NULL; + bool inner_on_left = false; + switch (jointype) { + case JOIN_INNER: + case JOIN_LEFT: + case JOIN_FULL: + result *= cal_eqjoinsel_inner(es); + break; + case JOIN_SEMI: + case JOIN_ANTI: + case JOIN_LEFT_ANTI_FULL: + /* + * Look up the join's inner relation. min_righthand is sufficient + * information because neither SEMI nor ANTI joins permit any + * reassociation into or out of their RHS, so the righthand will + * always be exactly that set of rels. + */ + inner_rel = find_join_input_rel(root, sjinfo->min_righthand); + + /* inner_rel could be a join rel */ + inner_on_left = (bms_is_subset(es->left_rel->relids, inner_rel->relids)); + if (!inner_on_left) { + Assert(bms_is_subset(es->right_rel->relids, inner_rel->relids)); + } + result *= cal_eqjoinsel_semi(es, inner_rel, inner_on_left); + break; + default: + /* other values not expected here */ + break; + } + return result; +} + +/* + * @brief calculate selectivity for eqsel using multi-column statistics + */ +Selectivity ES_SELECTIVITY::cal_eqsel(es_candidate* es) +{ + Selectivity result = 1.0; + ListCell* lc = NULL; + int i = 0; + int j = 0; + int column_count = 0; + bool match = false; + + Assert(es->left_extended_stats); + + /* if all clauses are null, just use nullfrac */ + if (es->has_null_clause) { + foreach(lc, es->clause_group) { + RestrictInfo* rinfo = (RestrictInfo*)lfirst(lc); + if (!IsA(rinfo->clause, NullTest)) + break; + } + if (lc == NULL) { + result = es->left_extended_stats->nullfrac; + CLAMP_PROBABILITY(result); + save_selectivity(es, result, 0.0); + return result; + } + } + + /* if there is no MCV, just use distinct */ + if (!es->left_extended_stats->mcv_values) { + result = (result - es->left_extended_stats->nullfrac) / es->left_stadistinct; + CLAMP_PROBABILITY(result); + save_selectivity(es, result, 0.0); + ereport(ES_DEBUG_LEVEL, + (errmodule(MOD_OPT), + (errmsg("[ES]extended statistic is used to calculate eqsel selectivity as %e", result)))); + return result; + } + + /* try to use MCV */ + column_count = es->clause_group->length; + + Assert(column_count == bms_num_members(es->left_extended_stats->bms_attnum)); + + /* set up attnum order */ + int* attnum_order = (int*)palloc(column_count * sizeof(int)); + set_up_attnum_order(es, attnum_order, true); + + Assert(es->left_extended_stats->mcv_nvalues / column_count == es->left_extended_stats->mcv_nnumbers); + + /* match MCV with const value from clauses */ + double sum_mcv_numbers = 0.0; + for (i = 0; i < es->left_extended_stats->mcv_nnumbers; i++) { + match = false; + j = 0; + /* process clause one by one */ + foreach(lc, es->clause_group) { + FmgrInfo eqproc; + Datum const_value; + bool var_on_left = false; + + /* set up eqproc */ + RestrictInfo* clause = (RestrictInfo*)lfirst(lc); + int mcv_position = attnum_order[j] * es->left_extended_stats->mcv_nnumbers + i; + + if (IsA(clause->clause, OpExpr)) { + OpExpr* opclause = (OpExpr*)clause->clause; + Oid opno = opclause->opno; + fmgr_info(get_opcode(opno), &eqproc); + + /* set up const value */ + Node* left = (Node*)linitial(opclause->args); + Node* right = (Node*)lsecond(opclause->args); + if (IsA(left, Const)) { + const_value = ((Const*)left)->constvalue; + var_on_left = false; + } else if (IsA(right, Const)) { + const_value = ((Const*)right)->constvalue; + var_on_left = true; + } + + Datum mcv_value = es->left_extended_stats->mcv_values[mcv_position]; + if (var_on_left) + match = DatumGetBool(FunctionCall2Coll(&eqproc, DEFAULT_COLLATION_OID, mcv_value, const_value)); + else + match = DatumGetBool(FunctionCall2Coll(&eqproc, DEFAULT_COLLATION_OID, const_value, mcv_value)); + } else { + Assert(IsA(clause->clause, NullTest)); + match = es->left_extended_stats->mcv_nulls[mcv_position]; + } + + if (!match) + break; + j++; + } + + if (match) { + result = es->left_extended_stats->mcv_numbers[i]; + break; + } else + sum_mcv_numbers += es->left_extended_stats->mcv_numbers[i]; + } + + if (!match) { + double sum_other_mcv_numbers = 0.0; + for (int index = 0; index < es->left_extended_stats->other_mcv_nnumbers; index++) + sum_other_mcv_numbers += es->left_extended_stats->other_mcv_numbers[index]; + result = 1.0 - sum_mcv_numbers - sum_other_mcv_numbers - es->left_extended_stats->nullfrac; + CLAMP_PROBABILITY(result); + float4 other_distinct = clamp_row_est(es->left_stadistinct - es->left_extended_stats->mcv_nnumbers); + result /= other_distinct; + + /* + * Another cross-check: selectivity shouldn't be estimated as more + * than the least common "most common value". + */ + int last_mcv_member = es->left_extended_stats->mcv_nnumbers - 1; + float4 least_common_value = es->left_extended_stats->mcv_numbers[last_mcv_member]; + if (result > least_common_value) + result = least_common_value; + } + + pfree_ext(attnum_order); + + CLAMP_PROBABILITY(result); + save_selectivity(es, result, 0.0); + ereport(ES_DEBUG_LEVEL, + (errmodule(MOD_OPT), (errmsg("[ES]extended statistic is used to calculate eqsel selectivity as %e", result)))); + return result; +} + +/* + * @brief calculate selectivity for join using multi-column statistics + */ +Selectivity ES_SELECTIVITY::cal_eqjoinsel_inner(es_candidate* es) +{ + Assert(es->left_extended_stats); + Selectivity result = 1.0; + int i; + + /* update nullfrac to contain null mcv fraction */ + for (i = 0; i < es->left_extended_stats->other_mcv_nnumbers; i++) + es->left_extended_stats->nullfrac += es->left_extended_stats->other_mcv_numbers[i]; + for (i = 0; i < es->right_extended_stats->other_mcv_nnumbers; i++) + es->right_extended_stats->nullfrac += es->right_extended_stats->other_mcv_numbers[i]; + + /* if there is no MCV, just use distinct */ + if (!es->left_extended_stats->mcv_values || !es->right_extended_stats->mcv_values) { + result *= (1.0 - es->left_extended_stats->nullfrac) * (1.0 - es->right_extended_stats->nullfrac); + result /= es->left_stadistinct > es->right_stadistinct ? es->left_stadistinct : es->right_stadistinct; + CLAMP_PROBABILITY(result); + double left_ratio = es->right_stadistinct / es->left_stadistinct * (1.0 - es->left_extended_stats->nullfrac); + double right_ratio = es->left_stadistinct / es->right_stadistinct * (1.0 - es->right_extended_stats->nullfrac); + save_selectivity(es, left_ratio, right_ratio); + ereport(ES_DEBUG_LEVEL, + (errmodule(MOD_OPT), + (errmsg("[ES]extended statistic is used to calculate eqjoinsel_inner selectivity as %e", result)))); + return result; + } + + /* try to use MCV */ + int column_count = es->clause_group->length; + Assert(column_count == bms_num_members(es->left_extended_stats->bms_attnum)); + + /* set up attnum order */ + int* left_attnum_order = (int*)palloc(column_count * sizeof(int)); + int* right_attnum_order = (int*)palloc(column_count * sizeof(int)); + set_up_attnum_order(es, left_attnum_order, true); + set_up_attnum_order(es, right_attnum_order, false); + + ListCell* lc = NULL; + FmgrInfo* eqproc = (FmgrInfo*)palloc(column_count * sizeof(FmgrInfo)); + bool* left_var_on_clause_leftside = (bool*)palloc(column_count * sizeof(bool)); + i = 0; + foreach(lc, es->clause_group) { + /* set up eqproc */ + RestrictInfo* clause = (RestrictInfo*)lfirst(lc); + OpExpr* opclause = (OpExpr*)clause->clause; + Oid opno = opclause->opno; + fmgr_info(get_opcode(opno), &(eqproc[i])); + + /* set up left_var_on_clause_leftside */ + if (bms_equal(clause->left_relids, es->left_relids)) + left_var_on_clause_leftside[i] = true; + else + left_var_on_clause_leftside[i] = false; + + i++; + } + + /* prepare to match MCVs */ + double left_relfrac = es->left_rel->rows / es->left_rel->tuples; + double right_relfrac = es->right_rel->rows / es->right_rel->tuples; + CLAMP_PROBABILITY(left_relfrac); + CLAMP_PROBABILITY(right_relfrac); + double relfrac = left_relfrac * right_relfrac; + + int left_mcv_nums = es->left_extended_stats->mcv_nnumbers; + int right_mcv_nums = es->right_extended_stats->mcv_nnumbers; + bool* left_match = (bool*)palloc0(left_mcv_nums * sizeof(bool)); + bool* right_match = (bool*)palloc0(right_mcv_nums * sizeof(bool)); + + /* + * The calculation logic here is as same as that in eqjoinsel_inner: + * Note we assume that each MCV will match at most one member of the + * other MCV list. If the operator isn't really equality, there could + * be multiple matches --- but we don't look for them, both for speed + * and because the math wouldn't add up... + */ + double matchprodfreq = 0.0; + int nmatches = 0; + for (i = 0; i < left_mcv_nums; i++) { + int j; + + for (j = 0; j < right_mcv_nums; j++) { + if (right_match[j]) + continue; + bool all_match = false; + int k = 0; + /* process clause one by one */ + foreach(lc, es->clause_group) { + int left_mcv_position = left_attnum_order[k] * left_mcv_nums + i; + int right_mcv_position = right_attnum_order[k] * right_mcv_nums + j; + Datum left_mcv_value = es->left_extended_stats->mcv_values[left_mcv_position]; + Datum right_mcv_value = es->right_extended_stats->mcv_values[right_mcv_position]; + if (left_var_on_clause_leftside[k]) + all_match = DatumGetBool( + FunctionCall2Coll(&(eqproc[k]), DEFAULT_COLLATION_OID, left_mcv_value, right_mcv_value)); + else + all_match = DatumGetBool( + FunctionCall2Coll(&(eqproc[k]), DEFAULT_COLLATION_OID, right_mcv_value, left_mcv_value)); + if (!all_match) + break; + k++; + } + + if (all_match) { + left_match[i] = right_match[j] = true; + matchprodfreq += es->left_extended_stats->mcv_numbers[i] * es->right_extended_stats->mcv_numbers[j]; + nmatches++; + break; + } + } + } + + /* adjust match freq according to relation's filter fraction */ + double left_nullfrac = es->left_extended_stats->nullfrac; + double right_nullfrac = es->right_extended_stats->nullfrac; + double left_matchfreq, right_matchfreq, left_unmatchfreq, right_unmatchfreq, left_otherfreq, right_otherfreq, + left_totalsel, right_totalsel; + int tmp_nmatches = (int)ceil((double)nmatches * relfrac); + if (nmatches != 0) + matchprodfreq *= (double)tmp_nmatches / nmatches; + CLAMP_PROBABILITY(matchprodfreq); + /* Sum up frequencies of matched and unmatched MCVs */ + left_matchfreq = left_unmatchfreq = 0.0; + for (i = 0; i < left_mcv_nums; i++) { + if (left_match[i]) + left_matchfreq += es->left_extended_stats->mcv_numbers[i]; + else + left_unmatchfreq += es->left_extended_stats->mcv_numbers[i]; + } + CLAMP_PROBABILITY(left_matchfreq); + CLAMP_PROBABILITY(left_unmatchfreq); + right_matchfreq = right_unmatchfreq = 0.0; + for (i = 0; i < right_mcv_nums; i++) { + if (right_match[i]) + right_matchfreq += es->right_extended_stats->mcv_numbers[i]; + else + right_unmatchfreq += es->right_extended_stats->mcv_numbers[i]; + } + CLAMP_PROBABILITY(right_matchfreq); + CLAMP_PROBABILITY(right_unmatchfreq); + pfree_ext(left_match); + pfree_ext(right_match); + pfree_ext(left_attnum_order); + pfree_ext(right_attnum_order); + + /* + * Compute total frequency of non-null values that are not in the MCV + * lists. + */ + left_otherfreq = 1.0 - left_nullfrac - left_matchfreq - left_unmatchfreq; + right_otherfreq = 1.0 - right_nullfrac - right_matchfreq - right_unmatchfreq; + CLAMP_PROBABILITY(left_otherfreq); + CLAMP_PROBABILITY(right_otherfreq); + + /* + * We can estimate the total selectivity from the point of view of + * relation 1 as: the known selectivity for matched MCVs, plus + * unmatched MCVs that are assumed to match against random members of + * relation 2's non-MCV population, plus non-MCV values that are + * assumed to match against random members of relation 2's unmatched + * MCVs plus non-MCV values. + */ + int left_nvalues_frac = (int)ceil((double)left_mcv_nums * left_relfrac); + int right_nvalues_frac = (int)ceil((double)right_mcv_nums * right_relfrac); + left_totalsel = matchprodfreq; + if (es->right_extended_stats->distinct > right_nvalues_frac) + left_totalsel += + left_unmatchfreq * right_otherfreq / (es->right_extended_stats->distinct - right_nvalues_frac) * relfrac; + if (es->right_extended_stats->distinct > tmp_nmatches) + left_totalsel += left_otherfreq * (right_otherfreq + right_unmatchfreq) / + (es->right_extended_stats->distinct - tmp_nmatches) * relfrac; + /* Same estimate from the point of view of relation 2. */ + right_totalsel = matchprodfreq; + if (es->left_extended_stats->distinct > left_nvalues_frac) + right_totalsel += + right_unmatchfreq * left_otherfreq / (es->left_extended_stats->distinct - left_nvalues_frac) * relfrac; + if (es->left_extended_stats->distinct > tmp_nmatches) + right_totalsel += right_otherfreq * (left_otherfreq + left_unmatchfreq) / + (es->left_extended_stats->distinct - tmp_nmatches) * relfrac; + + /* + * Use the smaller of the two estimates. This can be justified in + * essentially the same terms as given below for the no-stats case: to + * a first approximation, we are estimating from the point of view of + * the relation with smaller nd. + */ + if (relfrac == 0) + result = 0; + else + result = (left_totalsel < right_totalsel) ? left_totalsel / relfrac : right_totalsel / relfrac; + + /* + * calculate join ratio for both two tables, admitting that smaller distinct + * values will be all joined out + */ + double left_join_ratio = 0.0; + double right_join_ratio = 0.0; + + if (nmatches != 0 && left_relfrac != 0 && right_relfrac != 0) { + left_join_ratio = right_matchfreq * tmp_nmatches / (nmatches * left_relfrac); + right_join_ratio = right_matchfreq * tmp_nmatches / (nmatches * right_relfrac); + } + + if (es->left_extended_stats->distinct > es->right_extended_stats->distinct) { + if (es->left_extended_stats->distinct != tmp_nmatches) { + left_join_ratio += left_otherfreq * (es->right_extended_stats->distinct - tmp_nmatches) / + (es->left_extended_stats->distinct - tmp_nmatches); + } + right_join_ratio += right_otherfreq; + } else if (es->left_extended_stats->distinct < es->right_extended_stats->distinct) { + if (es->right_extended_stats->distinct != tmp_nmatches) { + right_join_ratio += right_otherfreq * (es->left_extended_stats->distinct - tmp_nmatches) / + (es->right_extended_stats->distinct - tmp_nmatches); + } + left_join_ratio += left_otherfreq; + } + CLAMP_PROBABILITY(left_join_ratio); + CLAMP_PROBABILITY(right_join_ratio); + CLAMP_PROBABILITY(result); + + save_selectivity(es, left_join_ratio, right_join_ratio); + ereport(ES_DEBUG_LEVEL, + (errmodule(MOD_OPT), + (errmsg("[ES]extended statistic is used to calculate eqjoinsel selectivity as %e", result)))); + + /* save mcv freq to calculate skew or hash bucket size */ + es->left_first_mcvfreq = es->left_extended_stats->mcv_numbers[0]; + es->right_first_mcvfreq = es->right_extended_stats->mcv_numbers[0]; + + return result; +} + +Selectivity ES_SELECTIVITY::cal_eqjoinsel_semi(es_candidate* es, RelOptInfo* inner_rel, bool inner_on_left) +{ + Assert(es->left_extended_stats); + Selectivity result = 1.0; + double nullfrac = inner_on_left ? es->right_extended_stats->nullfrac : es->left_extended_stats->nullfrac; + double inner_distinct = inner_on_left ? es->left_stadistinct : es->right_stadistinct; + double outer_distinct = inner_on_left ? es->right_stadistinct : es->left_stadistinct; + + /* + * Clamp inner_distinct to be not more than what we estimate the inner relation's + * size to be, especially when inner_rel can be a joined rel. + */ + inner_distinct = Min(inner_distinct, inner_rel->rows); + + /* if there is no MCV, just use distinct */ + if (!es->left_extended_stats->mcv_values || !es->right_extended_stats->mcv_values) { + result *= (1.0 - nullfrac); + if (inner_distinct < outer_distinct) + result *= inner_distinct / outer_distinct; + } else { + /* try to use MCV */ + int column_count = es->clause_group->length; + Assert(column_count == bms_num_members(es->left_extended_stats->bms_attnum)); + + /* set up attnum order */ + int* left_attnum_order = (int*)palloc(column_count * sizeof(int)); + int* right_attnum_order = (int*)palloc(column_count * sizeof(int)); + set_up_attnum_order(es, left_attnum_order, true); + set_up_attnum_order(es, right_attnum_order, false); + + ListCell* lc = NULL; + FmgrInfo* eqproc = (FmgrInfo*)palloc(column_count * sizeof(FmgrInfo)); + bool* left_var_on_clause_leftside = (bool*)palloc(column_count * sizeof(bool)); + int i = 0; + foreach(lc, es->clause_group) { + /* set up eqproc */ + RestrictInfo* clause = (RestrictInfo*)lfirst(lc); + OpExpr* opclause = (OpExpr*)clause->clause; + Oid opno = opclause->opno; + fmgr_info(get_opcode(opno), &(eqproc[i])); + + /* set up left_var_on_clause_leftside */ + if (bms_equal(clause->left_relids, es->left_relids)) + left_var_on_clause_leftside[i] = true; + else + left_var_on_clause_leftside[i] = false; + + i++; + } + + /* prepare to match MCVs */ + int left_mcv_nums = es->left_extended_stats->mcv_nnumbers; + int right_mcv_nums = es->right_extended_stats->mcv_nnumbers; + bool* left_match = (bool*)palloc0(left_mcv_nums * sizeof(bool)); + bool* right_match = (bool*)palloc0(right_mcv_nums * sizeof(bool)); + + /* + * The calculation logic here is as same as that in eqjoinsel_inner: + * Note we assume that each MCV will match at most one member of the + * other MCV list. If the operator isn't really equality, there could + * be multiple matches --- but we don't look for them, both for speed + * and because the math wouldn't add up... + */ + double matchprodfreq = 0.0; + int nmatches = 0; + for (i = 0; i < left_mcv_nums; i++) { + int j; + + for (j = 0; j < right_mcv_nums; j++) { + if (right_match[j]) + continue; + bool all_match = false; + int k = 0; + /* process clause one by one */ + foreach(lc, es->clause_group) { + int left_mcv_position = left_attnum_order[k] * left_mcv_nums + i; + int right_mcv_position = right_attnum_order[k] * right_mcv_nums + j; + Datum left_mcv_value = es->left_extended_stats->mcv_values[left_mcv_position]; + Datum right_mcv_value = es->right_extended_stats->mcv_values[right_mcv_position]; + if (left_var_on_clause_leftside[k]) + all_match = DatumGetBool( + FunctionCall2Coll(&(eqproc[k]), DEFAULT_COLLATION_OID, left_mcv_value, right_mcv_value)); + else + all_match = DatumGetBool( + FunctionCall2Coll(&(eqproc[k]), DEFAULT_COLLATION_OID, right_mcv_value, left_mcv_value)); + if (!all_match) + break; + k++; + } + + if (all_match) { + left_match[i] = right_match[j] = true; + nmatches++; + break; + } + } + } + + pfree_ext(left_attnum_order); + pfree_ext(right_attnum_order); + matchprodfreq = 1.0; + int mcv_num = inner_on_left ? left_mcv_nums : right_mcv_nums; + for (i = 0; i < mcv_num; i++) { + if (inner_on_left && right_match[i]) + matchprodfreq += es->right_extended_stats->mcv_numbers[i]; + else if (!inner_on_left && left_match[i]) + matchprodfreq += es->left_extended_stats->mcv_numbers[i]; + } + CLAMP_PROBABILITY(matchprodfreq); + + /* + * Now we need to estimate the fraction of relation 1 that has at + * least one join partner. We know for certain that the matched MCVs + * do, so that gives us a lower bound, but we're really in the dark + * about everything else. Our crude approach is: if nd1 <= nd2 then + * assume all non-null rel1 rows have join partners, else assume for + * the uncertain rows that a fraction nd2/nd1 have join partners. We + * can discount the known-matched MCVs from the distinct-values counts + * before doing the division. + * + * Crude as the above is, it's completely useless if we don't have + * reliable ndistinct values for both sides. Hence, if either nd1 or + * nd2 is default, punt and assume half of the uncertain rows have + * join partners. + */ + inner_distinct -= nmatches; + outer_distinct -= nmatches; + double uncertainfrac, uncertain; + if (inner_distinct >= outer_distinct || inner_distinct < 0) + uncertainfrac = 1.0; + else + uncertainfrac = inner_distinct / outer_distinct; + uncertain = 1.0 - matchprodfreq - nullfrac; + CLAMP_PROBABILITY(uncertain); + result = matchprodfreq + uncertainfrac * uncertain; + } + + CLAMP_PROBABILITY(result); + save_selectivity(es, result, 0.0, true); + ereport(ES_DEBUG_LEVEL, + (errmodule(MOD_OPT), + (errmsg("[ES]extended statistic is used to calculate eqjoinsel_semi selectivity as %e", result)))); + return result; +} + +/* + * @brief calculate distinct for groupby using multi-column statistics, + * in order to use the result in estimate_num_groups(), + * build a varinfo and save it in unmatched_clause_group. + */ +void ES_SELECTIVITY::build_pseudo_varinfo(es_candidate* es, STATS_EST_TYPE eType) +{ + /* build pseudo varinfo here with multi-column distinct */ + GroupVarInfo* varinfo = (GroupVarInfo*)palloc(sizeof(GroupVarInfo)); + varinfo->var = NULL; + varinfo->rel = es->left_rel; + if (eType == STATS_TYPE_GLOBAL) + varinfo->ndistinct = es->left_stadistinct; + else { + /* get local distinct */ + if (es->left_extended_stats->dndistinct < 0) { + double ntuples = get_local_rows(es->left_rel->tuples, + es->left_rel->multiple, + IsLocatorReplicated(es->left_rel->locator_type), + ng_get_dest_num_data_nodes(es->left_rel)); + varinfo->ndistinct = clamp_row_est( + -1 * es->left_extended_stats->dndistinct * ntuples * (1.0 - es->left_extended_stats->nullfrac)); + } else + varinfo->ndistinct = clamp_row_est(es->left_extended_stats->dndistinct); + } + varinfo->isdefault = false; + varinfo->es_is_used = true; + varinfo->es_attnums = bms_copy(es->left_attnums); + unmatched_clause_group = lappend(unmatched_clause_group, varinfo); + ereport(ES_DEBUG_LEVEL, + (errmodule(MOD_OPT), + (errmsg("[ES]extended statistic is used to calculate groupby distinct as %f", varinfo->ndistinct)))); + + return; +} + +/* + * @brief delete an invalid es_candidate, the memory will be free in clear(). + */ +void ES_SELECTIVITY::remove_candidate(es_candidate* es) +{ + switch (es->tag) { + case ES_EQSEL: + case ES_EQJOINSEL: + case ES_GROUPBY: + unmatched_clause_group = list_concat(unmatched_clause_group, es->clause_group); + break; + default: + break; + } + es->tag = ES_EMPTY; + es->clause_group = NULL; + + return; +} + +/* + * @brief remove corresponding stuff from the left_attnums of es_candidate according dump attnum, + * dump attnum should be delete outside this function. + */ +void ES_SELECTIVITY::remove_attnum(es_candidate* es, int dump_attnum) +{ + /* delete clause and clause_map according to dump */ + ListCell* lc_clause = list_head(es->clause_group); + ListCell* lc_clause_map = NULL; + ListCell* prev_clause = NULL; + ListCell* prev_clause_map = NULL; + foreach(lc_clause_map, es->clause_map) { + es_clause_map* clause_map = (es_clause_map*)lfirst(lc_clause_map); + + if (clause_map->left_attnum == dump_attnum) { + if (es->tag == ES_EQJOINSEL) { + Assert(bms_is_member(clause_map->right_attnum, es->right_attnums)); + es->right_attnums = bms_del_member(es->right_attnums, clause_map->right_attnum); + } + + /* if found, delete clause map from the list */ + pfree_ext(clause_map); + lfirst(lc_clause_map) = NULL; + es->clause_map = list_delete_cell(es->clause_map, lc_clause_map, prev_clause_map); + + /* delete clause from list and add to unmatch list */ + unmatched_clause_group = lappend(unmatched_clause_group, lfirst(lc_clause)); + lfirst(lc_clause) = NULL; + es->clause_group = list_delete_cell(es->clause_group, lc_clause, prev_clause); + + /* no need to continue, just try to find next matched stats */ + break; + } else { + prev_clause = lc_clause; + prev_clause_map = lc_clause_map; + lc_clause = lc_clause->next; + } + } + + return; +} + +/* + * @brief Set up attnum order according to clause map, so that we can use this order + * to locate the corresponding clause when we go through the bitmap of attnums. + * We need this order because the clause is ordered by its position in clauselist and mcv + * in extended stats are ordered by attnum. + * left == true : set up left attnum order ; left == false : set up right attnum order . + */ +void ES_SELECTIVITY::set_up_attnum_order(es_candidate* es, int* attnum_order, bool left) const +{ + int i; + int j = 0; + ListCell* lc = NULL; + foreach(lc, es->clause_map) { + i = 0; + int attnum = 0; + es_clause_map* clause_map = (es_clause_map*)lfirst(lc); + Bitmapset* tmpset = left ? bms_copy(es->left_attnums) : bms_copy(es->right_attnums); + while ((attnum = bms_first_member(tmpset)) >= 0) { + if ((left && attnum == clause_map->left_attnum) || (!left && attnum == clause_map->right_attnum)) { + attnum_order[j] = i; + break; + } + i++; + } + pfree_ext(tmpset); + j++; + } + return; +} + +/* + * @brief print debug info including ES type, involving rels and clauses + */ +void ES_SELECTIVITY::debug_print() +{ + if (log_min_messages > ES_DEBUG_LEVEL) + return; + ListCell* l = NULL; + foreach(l, es_candidate_list) { + es_candidate* temp = (es_candidate*)lfirst(l); + ereport(DEBUG1, + (errmodule(MOD_OPT_JOIN), + errmsg("[ES]ES_TYPE = %d (0:empty; 1:eqsel; 2:eqjoinsel; 3:group by)", (int)temp->tag))); + if (temp->tag == ES_EMPTY) + continue; + + print_relids(temp->relids, "All rels:"); + + if (temp->left_rte) { + print_relids(temp->left_relids, "Left rels:"); + print_relids(temp->left_attnums, "Left attnums:"); + print_rel(temp->left_rte); + } + + if (temp->right_rte) { + print_relids(temp->right_relids, "Right rels:"); + print_relids(temp->right_attnums, "Right attnums:"); + print_rel(temp->right_rte); + } + + switch (temp->tag) { + case ES_EQSEL: + case ES_EQJOINSEL: + print_clauses(temp->clause_group); + break; + default: + break; + } + } + return; +} + +void ES_SELECTIVITY::print_rel(RangeTblEntry* rel) const +{ + StringInfoData buf; + initStringInfo(&buf); + appendStringInfo(&buf, "%s, relkind: %c, inheritance or not:%d", rel->relname, rel->relkind, (int)rel->inh); + ereport(DEBUG1, (errmodule(MOD_OPT_JOIN), (errmsg("[ES]%s", buf.data)))); + pfree_ext(buf.data); + return; +} + +void ES_SELECTIVITY::print_relids(Bitmapset* relids, const char* str) const +{ + StringInfoData buf; + initStringInfo(&buf); + Relids tmprelids = bms_copy(relids); + int x; + appendStringInfoString(&buf, str); + while ((x = bms_first_member(tmprelids)) >= 0) { + appendStringInfo(&buf, "%d, ", x); + } + bms_free_ext(tmprelids); + ereport(DEBUG1, (errmodule(MOD_OPT_JOIN), (errmsg("[ES]%s", buf.data)))); + pfree_ext(buf.data); + return; +} + +void ES_SELECTIVITY::print_clauses(List* clauses) const +{ + if (root == NULL || list_length(clauses) == 0) + return; + + ListCell* l = NULL; + StringInfoData buf; + initStringInfo(&buf); + + appendStringInfo(&buf, "Clause length:%d, clause list:", list_length(clauses)); + + foreach(l, clauses) { + RestrictInfo* c = (RestrictInfo*)lfirst(l); + char* expr = print_expr((Node*)c->clause, root->parse->rtable); + appendStringInfoString(&buf, expr); + pfree_ext(expr); + if (lnext(l)) + appendStringInfoString(&buf, ", "); + } + + ereport(DEBUG1, (errmodule(MOD_OPT_JOIN), errmsg("[ES]%s", buf.data))); + + pfree_ext(buf.data); + + return; +} + +char* ES_SELECTIVITY::print_expr(const Node* expr, const List* rtable) const +{ + return ExprToString(expr, rtable); +} diff --git a/src/gausskernel/optimizer/path/indxpath.cpp b/src/gausskernel/optimizer/path/indxpath.cpp index 31346a6ae..2afe50bdf 100755 --- a/src/gausskernel/optimizer/path/indxpath.cpp +++ b/src/gausskernel/optimizer/path/indxpath.cpp @@ -38,14 +38,12 @@ #include "optimizer/restrictinfo.h" #include "optimizer/var.h" #include "parser/parse_hint.h" -#include "parser/parse_oper.h" #include "parser/parsetree.h" #include "utils/builtins.h" #include "utils/bytea.h" #include "utils/lsyscache.h" #include "utils/pg_locale.h" #include "utils/selfuncs.h" -#include "rusagestub.h" #define IsBooleanOpfamily(opfamily) ((opfamily) == BOOL_BTREE_FAM_OID || \ (opfamily) == BOOL_HASH_FAM_OID || (opfamily) == BOOL_UBTREE_FAM_OID) @@ -824,8 +822,9 @@ static bool PathkeysIsUnusefulForPartition(const IndexOptInfo* index) /* * Only the local index's pathkeys is unuseful. * It can only ensure that the current partition data is in order. + * Hypothetical index is usefull by default. */ - if (index->ispartitionedindex && index->isGlobal) { + if (index->isGlobal || (u_sess->attr.attr_sql.enable_hypo_index && index->hypothetical)) { return false; } bool result = false; @@ -2216,100 +2215,6 @@ static void match_clauses_to_index(IndexOptInfo* index, List* clauses, IndexClau } } -static Oid typeCastForIdxMatch[] = {RTRIM1FUNCOID /* bpchar to text func */ - /* Supported in the future */}; - -inline bool SupportedConvertForIdxMatch(Oid FuncOid) -{ - for (int i = 0; i < (int)lengthof(typeCastForIdxMatch); ++i) { - if (typeCastForIdxMatch[i] == FuncOid) { - return true; - } - } - /* Didn't find. */ - return false; -} - -static bool ConvertFuncEqConst(Expr* clause, Node* leftop, Node* rightop, - RestrictInfo* rinfo, RestrictInfo* &rinfo_converted) -{ - if ((IsA(leftop, FuncExpr) && IsA(rightop, Const) && SupportedConvertForIdxMatch(((FuncExpr*)leftop)->funcid)) || - (IsA(leftop, Const) && IsA(rightop, FuncExpr) && SupportedConvertForIdxMatch(((FuncExpr*)rightop)->funcid))) { - Node* funcop = IsA(leftop, FuncExpr) ? leftop : rightop; - Node* constop = IsA(leftop, FuncExpr) ? rightop : leftop; - if (list_length(((FuncExpr*)funcop)->args) == 1 && IsA(lfirst(list_head(((FuncExpr*)funcop)->args)), Var)) { - Var* var = (Var*)copyObject((Var*)linitial(((FuncExpr*)funcop)->args)); - Const* c = (Const*)copyObject(constop); - c->consttype = var->vartype; - - rinfo_converted = (RestrictInfo*)copyObject(rinfo); - rinfo_converted->clause = make_op(NULL, - get_operator_name(((OpExpr*)clause)->opno, exprType((Node*)var), exprType((Node*)c)), - (Node*)var, (Node*)c, ((OpExpr*)clause)->location); - - exprSetInputCollation((Node*)rinfo_converted->clause, ((OpExpr*)clause)->inputcollid); - rinfo_converted->converted = true; /* Mark this rinfo is built by type conversion */ - rinfo->converted = true; /* Mark this rinfo has been converted */ - - return true; - } - } - - return false; -} - -static bool ConvertFuncEqVar(Expr* clause, Node* leftop, Node* rightop, - RestrictInfo* rinfo, RestrictInfo* &rinfo_converted) -{ - if ((IsA(leftop, FuncExpr) && IsA(rightop, Var) && SupportedConvertForIdxMatch(((FuncExpr*)leftop)->funcid)) || - (IsA(leftop, Var) && IsA(rightop, FuncExpr) && SupportedConvertForIdxMatch(((FuncExpr*)rightop)->funcid))) { - Node* funcop = IsA(leftop, FuncExpr) ? leftop : rightop; - Node* varop = IsA(leftop, FuncExpr) ? rightop : leftop; - if (list_length(((FuncExpr*)funcop)->args) == 1 && IsA(lfirst(list_head(((FuncExpr*)funcop)->args)), Var)) { - Var* var1 = (Var*)copyObject((Var*)linitial(((FuncExpr*)funcop)->args)); - Var* var2 = (Var*)copyObject(varop); - var2->vartype = var1->vartype; - - rinfo_converted = (RestrictInfo*)copyObject(rinfo); - rinfo_converted->clause = make_op(NULL, - get_operator_name(((OpExpr*)clause)->opno, exprType((Node*)var1), exprType((Node*)var2)), - (Node*)var1, (Node*)var2, ((OpExpr*)clause)->location); - - exprSetInputCollation((Node*)rinfo_converted->clause, ((OpExpr*)clause)->inputcollid); - rinfo_converted->converted = true; /* Mark this rinfo is built by type conversion */ - rinfo->converted = true; /* Mark this rinfo has been converted */ - - return true; - } - } - - return false; -} - -static bool ConvertTypeForIdxMatch(RestrictInfo* rinfo, RestrictInfo* &rinfo_converted) -{ - bool converted = false; - Expr* clause = rinfo->clause; - if (is_opclause(clause)) { - Node* leftop = NULL; - Node* rightop = NULL; - - leftop = get_leftop(clause); - rightop = get_rightop(clause); - if (leftop == NULL || rightop == NULL) { - return false; - } - - /* 1. consider FuncExpr = Const, like: a = 1, b = 'zzy'... */ - converted = converted ? true : ConvertFuncEqConst(clause, leftop, rightop, rinfo, rinfo_converted); - - /* 2. consider FuncExpr = Var, like: join XXX on t.a = t.b */ - converted = converted ? true : ConvertFuncEqVar(clause, leftop, rightop, rinfo, rinfo_converted); - } - - return converted; -} - /* * match_clause_to_index * Test whether a qual clause can be used with an index. @@ -2354,19 +2259,10 @@ static void match_clause_to_index(IndexOptInfo* index, RestrictInfo* rinfo, Inde return; } - /* - * Before matching, We try to convert const type for adapting to the - * index aiming to match. - */ - RestrictInfo* rinfo_converted = NULL; - bool converted = ConvertTypeForIdxMatch(rinfo, rinfo_converted); - Assert((converted && rinfo_converted) || (!converted && !rinfo_converted)); - /* OK, check each index column for a match */ for (indexcol = 0; indexcol < index->nkeycolumns; indexcol++) { - if (match_clause_to_indexcol(index, indexcol, converted ? rinfo_converted : rinfo)) { - clauseset->indexclauses[indexcol] = list_append_unique_ptr(clauseset->indexclauses[indexcol], - converted ? rinfo_converted : rinfo); + if (match_clause_to_indexcol(index, indexcol, rinfo)) { + clauseset->indexclauses[indexcol] = list_append_unique_ptr(clauseset->indexclauses[indexcol], rinfo); clauseset->nonempty = true; return; } @@ -2956,6 +2852,17 @@ bool eclass_member_matches_indexcol(EquivalenceClass* ec, EquivalenceMember* em, return match_index_to_operand((Node*)em->em_expr, indexcol, index); } +static bool relation_has_unique_index_for_no_index(PlannerInfo* root, RelOptInfo* rel) +{ + /* Each row in the delete-delta table is unique even if there is no index exists */ + if (u_sess->attr.attr_sql.enable_cluster_resize && + strncmp(root->simple_rte_array[rel->relid]->relname, + REDIS_DELETE_DELTA_TABLE_PREFIX, strlen(REDIS_DELETE_DELTA_TABLE_PREFIX)) == 0) { + return true; + } + return false; +} + /* * relation_has_unique_index_for * Determine whether the relation provably has at most one row satisfying @@ -2987,9 +2894,9 @@ bool relation_has_unique_index_for( list_length(exprlist) == list_length(oprlist), MOD_OPT, "Exprlist and oprlist are not equal in length"); /* Short-circuit if no indexes... */ - if (rel->indexlist == NIL) - return false; - + if (rel->indexlist == NIL) { + return relation_has_unique_index_for_no_index(root, rel); + } /* * Examine the rel's restriction clauses for usable var = const clauses * that we can add to the restrictlist. diff --git a/src/gausskernel/optimizer/path/joinpath.cpp b/src/gausskernel/optimizer/path/joinpath.cpp index f645fef03..1dad81610 100755 --- a/src/gausskernel/optimizer/path/joinpath.cpp +++ b/src/gausskernel/optimizer/path/joinpath.cpp @@ -2497,6 +2497,7 @@ static void getBoundaryFromBaseRel(PlannerInfo* root, PartIteratorPath* itrpath) tuple = SearchSysCache2((int)ATTNUM, ObjectIdGetDatum(partitionedtableid), Int16GetDatum(map->partitionKey->values[0])); if (!HeapTupleIsValid(tuple)) { + decre_partmap_refcount(relation->partMap); ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for attribute %d of relation %u", diff --git a/src/gausskernel/optimizer/plan/createplan.cpp b/src/gausskernel/optimizer/plan/createplan.cpp index d60796651..a55d76cd5 100755 --- a/src/gausskernel/optimizer/plan/createplan.cpp +++ b/src/gausskernel/optimizer/plan/createplan.cpp @@ -1950,6 +1950,19 @@ static SeqScan* create_seqscan_plan(PlannerInfo* root, Path* best_path, List* tl copy_path_costsize(&scan_plan->plan, best_path); + /* + * While u_sess->attr.attr_sql.vectorEngineStrategy is OPT_VECTOR_ENGINE, we will use the + * tuples number of relation to compute cost to determine using vector engine or not. + * Partition table may have pruning, so for patition table using path rows instead of tuples. + */ + if (u_sess->attr.attr_sql.vectorEngineStrategy == OPT_VECTOR_ENGINE && + (best_path->parent->pruning_result == NULL || + best_path->parent->pruning_result->state == PRUNING_RESULT_FULL)) { + scan_plan->tableRows = best_path->parent->tuples; + } else { + scan_plan->tableRows = best_path->rows; + } + return scan_plan; } @@ -2563,17 +2576,7 @@ static Scan* create_indexscan_plan( continue; /* implied by index predicate */ } } - - /* - * Add index scan filter condition only when the rinfo is not built by type conversion. - * Our system supports some type convertions to match the index type. When this conversion - * happened, the rinfo will be deep copied from another rinfo parsed from user's input. To - * distinguish them, we mark rinfo->converted = true. Obviously, the rinfo(s) built by conversion - * should not be added into qpqual again. (refer to function: ConvertTypeForIdxMatch for conversion process) - */ - if (rinfo->converted == false) { - qpqual = lappend(qpqual, rinfo); - } + qpqual = lappend(qpqual, rinfo); } /* Sort clauses into best execution order */ @@ -4887,6 +4890,7 @@ static HashJoin* create_hashjoin_plan(PlannerInfo* root, HashPath* best_path, Pl /* Set dop from path. */ join_plan->join.plan.dop = best_path->jpath.path.dop; hash_plan->plan.dop = best_path->jpath.path.dop; + join_plan->joinRows = best_path->joinRows; join_plan->isSonicHash = u_sess->attr.attr_sql.enable_sonic_hashjoin && isSonicHashJoinEnable(join_plan); @@ -5583,6 +5587,7 @@ static SeqScan* make_seqscan(List* qptlist, List* qpqual, Index scanrelid) plan->righttree = NULL; plan->isDeltaTable = false; node->scanrelid = scanrelid; + node->scanBatchMode = false; return node; } @@ -5985,6 +5990,7 @@ SubqueryScan* make_subqueryscan(List* qptlist, List* qpqual, Index scanrelid, Pl /* * Support partition index unusable. + * Hypothetical index does not support partition index unusable. * */ Plan* create_globalpartInterator_plan(PlannerInfo* root, PartIteratorPath* pIterpath) @@ -5992,7 +5998,7 @@ Plan* create_globalpartInterator_plan(PlannerInfo* root, PartIteratorPath* pIter Plan* plan = NULL; /* The subpath is index path. */ - if (is_partitionIndex_Subpath(pIterpath->subPath)) { + if (u_sess->attr.attr_sql.enable_hypo_index == false && is_partitionIndex_Subpath(pIterpath->subPath)) { Path* index_path = pIterpath->subPath; /* Get the usable type of the index subpath. */ @@ -6072,6 +6078,12 @@ Plan* create_globalpartInterator_plan(PlannerInfo* root, PartIteratorPath* pIter } else if (is_pwj_path((Path*)pIterpath)) { plan = (Plan*)create_partIterator_plan(root, pIterpath, NULL); } else { /* Other pathes. */ + if (u_sess->attr.attr_sql.enable_hypo_index && is_partitionIndex_Subpath(pIterpath->subPath) && + ((IndexPath *)pIterpath->subPath)->indexinfo->hypothetical) { + pIterpath->subPath->parent->partItrs_for_index_usable = + bms_num_members(pIterpath->subPath->parent->pruning_result->bm_rangeSelectedPartitions); + pIterpath->subPath->parent->partItrs_for_index_unusable = 0; + } GlobalPartIterator* gpIter = (GlobalPartIterator*)palloc(sizeof(GlobalPartIterator)); gpIter->curItrs = pIterpath->subPath->parent->partItrs; gpIter->pruningResult = pIterpath->subPath->parent->pruning_result; @@ -8871,10 +8883,12 @@ ModifyTable* make_modifytable(CmdType operation, bool canSetTag, List* resultRel node->updateTlist = upsertClause->updateTlist; node->exclRelTlist = upsertClause->exclRelTlist; node->exclRelRTIndex = upsertClause->exclRelIndex; + node->upsertWhere = upsertClause->upsertWhere; } else { node->upsertAction = UPSERT_NONE; node->updateTlist = NIL; node->exclRelTlist = NIL; + node->upsertWhere = NULL; } #ifdef STREAMPLAN diff --git a/src/gausskernel/optimizer/plan/initsplan.cpp b/src/gausskernel/optimizer/plan/initsplan.cpp index 973d92b9b..d3153df76 100644 --- a/src/gausskernel/optimizer/plan/initsplan.cpp +++ b/src/gausskernel/optimizer/plan/initsplan.cpp @@ -906,8 +906,12 @@ static SpecialJoinInfo* make_outerjoininfo( ereport(ERROR, (errmodule(MOD_OPT), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), +#ifndef ENABLE_MULTIPLE_NODES errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE cannot be applied to the nullable side " "of an outer join"))); +#else + errmsg("SELECT FOR UPDATE/SHARE cannot be applied to the nullable side of an outer join"))); +#endif } } diff --git a/src/gausskernel/optimizer/plan/planner.cpp b/src/gausskernel/optimizer/plan/planner.cpp index 4efc95fa7..59b701227 100755 --- a/src/gausskernel/optimizer/plan/planner.cpp +++ b/src/gausskernel/optimizer/plan/planner.cpp @@ -123,7 +123,8 @@ const static Oid VectorEngineUnsupportType[] = { LINEOID, CIRCLEOID, POLYGONOID, - PATHOID + PATHOID, + HASH32OID }; extern PGXCNodeAllHandles* connect_compute_pool(int srvtype); @@ -226,8 +227,7 @@ typedef struct { * With multinode hint: * allow the query to continue, report no warnings or errors. */ -typedef struct -{ +typedef struct { int remote_query_count; bool has_modify_table; @@ -238,6 +238,11 @@ typedef struct List *nodeList; } FindNodesContext; +typedef struct { + bool has_redis_stream; + int broadcast_stream_cnt; +} FindStreamNodesForLoopContext; + static bool needs_two_level_groupagg(PlannerInfo* root, Plan* plan, Node* distinct_node, List* distributed_key, bool* need_redistribute, bool* need_local_redistribute); static Plan* mark_agg_stream(PlannerInfo* root, List* tlist, Plan* plan, List* group_or_distinct_cls, @@ -248,10 +253,11 @@ static Plan* mark_group_stream(PlannerInfo* root, List* tlist, Plan* result_plan static Plan* mark_distinct_stream( PlannerInfo* root, List* tlist, Plan* plan, List* groupcls, Index query_level, List* current_pathkeys); static List* get_optimal_distribute_key(PlannerInfo* root, List* groupClause, Plan* plan, double* multiple); +static bool vector_engine_walker_internal(Plan* result_plan, bool check_rescan, VectorPlanContext* planContext); static bool vector_engine_expression_walker(Node* node, DenseRank_context* context); static bool vector_engine_walker(Plan* result_plan, bool check_rescan); static Plan* fallback_plan(Plan* result_plan); -static Plan* vectorize_plan(Plan* result_plan, bool ignore_remotequery); +static Plan* vectorize_plan(Plan* result_plan, bool ignore_remotequery, bool forceVectorEngine); static Plan* build_vector_plan(Plan* plan); static Plan* mark_windowagg_stream( PlannerInfo* root, Plan* plan, List* tlist, WindowClause* wc, List* pathkeys, WindowLists* wflists); @@ -674,12 +680,10 @@ PlannedStmt* standard_planner(Query* parse, int cursorOptions, ParamListInfo bou MOD_OPT, "resultRelations is not empty when finish creating a plan for a scrollable cursor"); -#ifdef STREAMPLAN if ((IS_STREAM_PLAN || (IS_PGXC_DATANODE && (!IS_STREAM || IS_STREAM_DATANODE))) && root->query_level == 1) { /* remote query and windowagg do not support vectorize rescan, so fallback to row plan */ top_plan = try_vectorize_plan(top_plan, parse, cursorOptions & CURSOR_OPT_HOLD); } -#endif top_plan = set_plan_references(root, top_plan); delete_redundant_streams_of_remotequery((RemoteQuery *)top_plan); @@ -1579,6 +1583,8 @@ Plan* subquery_planner(PlannerGlobal* glob, Query* parse, PlannerInfo* parent_ro if (parse->upsertClause) { parse->upsertClause->updateTlist = (List*) preprocess_expression(root, (Node*)parse->upsertClause->updateTlist, EXPRKIND_TARGET); + parse->upsertClause->upsertWhere = (Node*) + preprocess_expression(root, (Node*)parse->upsertClause->upsertWhere, EXPRKIND_QUAL); } root->append_rel_list = (List*)preprocess_expression(root, (Node*)root->append_rel_list, EXPRKIND_APPINFO); @@ -1859,6 +1865,7 @@ Plan* subquery_planner(PlannerGlobal* glob, Query* parse, PlannerInfo* parent_ro /* Fix var's if we have changed var */ if (root->var_mappings != NIL) { fix_vars_plannode(root, plan); + root->parse->is_from_inlist2join_rewrite = true; } return plan; @@ -2662,8 +2669,12 @@ static Plan* grouping_planner(PlannerInfo* root, double tuple_fraction) if (parse->rowMarks) ereport(ERROR, (errmodule(MOD_OPT), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), +#ifndef ENABLE_MULTIPLE_NODES errmsg("SELECT FOR UPDATE/SHARE/NO KEY UPDATE/KEY SHARE is not allowed " "with UNION/INTERSECT/EXCEPT"), +#else + errmsg("SELECT FOR UPDATE/SHARE is not allowed with UNION/INTERSECT/EXCEPT"), +#endif errdetail("N/A"), errcause("SQL uses unsupported feature."), erraction("Modify SQL statement according to the manual."))); @@ -4911,9 +4922,9 @@ static void preprocess_rowmarks(PlannerInfo* root) /* The strength of lc is not set at old version and distribution. Set it according to forUpdate. */ if (t_thrd.proc->workingVersionNum < ENHANCED_TUPLE_LOCK_VERSION_NUM #ifdef ENABLE_MULTIPLE_NODES - || true + || true #endif - ) { + ) { rc->strength = rc->forUpdate ? LCS_FORUPDATE : LCS_FORSHARE; } switch (rc->strength) { @@ -4934,6 +4945,7 @@ static void preprocess_rowmarks(PlannerInfo* root) break; } newrc->noWait = rc->noWait; + newrc->waitSec = rc->waitSec; newrc->isParent = false; newrc->bms_nodeids = ng_get_baserel_data_nodeids(rte->relid, rte->relkind); @@ -4961,6 +4973,7 @@ static void preprocess_rowmarks(PlannerInfo* root) else newrc->markType = ROW_MARK_COPY; newrc->noWait = false; /* doesn't matter */ + newrc->waitSec = 0; newrc->isParent = false; newrc->bms_nodeids = (RTE_RELATION == rte->rtekind && RELKIND_FOREIGN_TABLE != rte->relkind && RELKIND_STREAM != rte->relkind) @@ -7428,100 +7441,6 @@ bool plan_cluster_use_sort(Oid tableOid, Oid indexOid) return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost); } -/* - * plan_create_index_workers - * Use the planner to decide how many parallel worker processes - * CREATE INDEX should request for use - * - * tableOid is the table on which the index is to be built. indexOid is the - * OID of an index to be created or reindexed (which must be a btree index). - * - * Return value is the number of parallel worker processes to request. It - * may be unsafe to proceed if this is 0. Note that this does not include the - * leader participating as a worker (value is always a number of parallel - * worker processes). - * - * Note: caller had better already hold some type of lock on the table and - * index. - */ -int plan_create_index_workers(Oid tableOid) -{ - PlannerInfo *root; - Query *query; - PlannerGlobal *glob; - RangeTblEntry *rte; - Relation heap; - RelOptInfo *rel; - int parallel_workers; - RelPageType heap_blocks; - double reltuples; - double allvisfrac; - - /* Set up largely-dummy planner state */ - query = makeNode(Query); - query->commandType = CMD_SELECT; - - glob = makeNode(PlannerGlobal); - - root = makeNode(PlannerInfo); - root->parse = query; - root->glob = glob; - root->query_level = 1; - root->planner_cxt = CurrentMemoryContext; - root->wt_param_id = -1; - - /* - * Build a minimal RTE. - * - * Set the target's table to be an inheritance parent. This is a kludge - * that prevents problems within get_relation_info(), which does not - * expect that any IndexOptInfo is currently undergoing REINDEX. - */ - rte = makeNode(RangeTblEntry); - rte->rtekind = RTE_RELATION; - rte->relid = tableOid; - rte->relkind = RELKIND_RELATION; /* Don't be too picky. */ - rte->lateral = false; - rte->inh = true; - rte->inFromCl = true; - query->rtable = list_make1(rte); - - /* Set up RTE/RelOptInfo arrays */ - setup_simple_rel_arrays(root); - - /* Build RelOptInfo */ - rel = build_simple_rel(root, 1, RELOPT_BASEREL); - - heap = heap_open(tableOid, NoLock); - - /* - * Determine if it's safe to proceed. - * - * Currently, parallel workers can't access the leader's temporary tables, - * or the leader's relmapper.c state, which is needed for builds on mapped - * relations. - */ - if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP || RelationIsMapped(heap)) { - parallel_workers = 0; - goto done; - } - /* - * Estimate heap relation size ourselves, since rel->pages cannot be - * trusted (heap RTE was marked as inheritance parent) - */ - estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac, NULL); - - /* - * Determine number of workers to scan the heap relation using generic - * model - */ - parallel_workers = compute_parallel_worker(rel, heap_blocks, RelationGetParallelWorkers(heap, -1)); -done: - heap_close(heap, NoLock); - - return parallel_workers; -} - /* * @@GaussDB@@ * Target : data partition @@ -8521,11 +8440,15 @@ static bool IsTypeUnSupportedByVectorEngine(Oid typeOid) { /* we don't support user defined type. */ if (typeOid >= FirstNormalObjectId) { + ereport(DEBUG2, (errmodule(MOD_OPT_PLANNER), + errmsg("Vectorize plan failed due to has unsupport type: %u", typeOid))); return true; } for (uint32 i = 0; i < sizeof(VectorEngineUnsupportType) / sizeof(Oid); ++i) { if (VectorEngineUnsupportType[i] == typeOid) { + ereport(DEBUG2, (errmodule(MOD_OPT_PLANNER), + errmsg("Vectorize plan failed due to has unsupport type: %u", typeOid))); return true; } } @@ -8537,7 +8460,7 @@ static bool IsTypeUnSupportedByVectorEngine(Oid typeOid) * @param[IN] node: current expr node * @return: bool, true if it has */ -bool vector_engine_unsupport_expression_walker(Node* node) +bool vector_engine_unsupport_expression_walker(Node* node, VectorPlanContext* planContext) { if (node == NULL) { return false; @@ -8553,14 +8476,19 @@ bool vector_engine_unsupport_expression_walker(Node* node) case T_ConvertRowtypeExpr: case T_ArrayExpr: case T_RowExpr: + case T_Rownum: case T_XmlExpr: case T_CoerceToDomain: case T_CoerceToDomainValue: case T_CurrentOfExpr: + ereport(DEBUG2, (errmodule(MOD_OPT_PLANNER), + errmsg("Vectorize plan failed due to has unsupport expression: %d", nodeTag(node)))); return true; case T_Var: { Var *var = (Var *)node; if (var->varattno == InvalidAttrNumber) { + ereport(DEBUG2, (errmodule(MOD_OPT_PLANNER), + errmsg("Vectorize plan failed due to has system column"))); return true; } else { return IsTypeUnSupportedByVectorEngine(var->vartype); @@ -8568,22 +8496,52 @@ bool vector_engine_unsupport_expression_walker(Node* node) break; } case T_Const: { - Const* c = (Const *)node; - return IsTypeUnSupportedByVectorEngine(c->consttype); - } + Const* c = (Const *)node; + return IsTypeUnSupportedByVectorEngine(c->consttype); + } case T_Param: { Param *par = (Param *)node; return IsTypeUnSupportedByVectorEngine(par->paramtype); } + case T_SubPlan: { + SubPlan* subplan = (SubPlan*)node; + /* make sure that subplan return type must supported by vector engine */ + if (!IsTypeSupportedByCStore(subplan->firstColType)) { + return true; + } + break; + } + /* count the number of complex expression, for vectorized plan of row tables */ + case T_CoerceViaIO: + case T_GroupingFunc: + case T_WindowFunc: + case T_FuncExpr: { + /* + * make sure that expr return type must supported by vector engine. + * When the expression is filter, the type is not checked because + * the result value is not passed up for calculation. + */ + if (planContext && !planContext->currentExprIsFilter + && !IsTypeSupportedByCStore(exprType(node))) { + return true; + } + break; + } default: break; } - return expression_tree_walker(node, (bool (*)())vector_engine_unsupport_expression_walker, (void*)NULL); + + return expression_tree_walker(node, (bool (*)())vector_engine_unsupport_expression_walker, (void*)planContext); } /* * @Description: Try to generate vectorized plan * + * The GUC 'try_vector_engine_strategy' is off, the function only processes the plan with + * column store relation. Otherwish, the function will force vectorize the plan, + * deal with these 8 scans: SeqScan, IndexScan, IndexOnlyScan, BitmapHeapScan, TidScan, FunctionScan, + * ValuesScan, ForeignScan. + * * @param[IN] top_plan: current plan node * @param[IN] parse: query tree * @param[IN] from_subplan: if node from subplan @@ -8592,16 +8550,22 @@ bool vector_engine_unsupport_expression_walker(Node* node) */ Plan* try_vectorize_plan(Plan* top_plan, Query* parse, bool from_subplan, PlannerInfo* subroot) { - /* If has no column store relation, just leave unchanged */ - if (!has_column_store_relation(top_plan)) + /* + * If has the three conditions, just leave unchanged: + * 1.The GUC 'try_vector_engine_strategy' is off, and it has no column store relation; + */ + if (u_sess->attr.attr_sql.vectorEngineStrategy == OFF_VECTOR_ENGINE && + !has_column_store_relation(top_plan)) { return top_plan; + } /* * Fallback to original non-vectorized plan, if either the GUC 'enable_vector_engine' * is turned off or the plan cannot go through vector_engine_walker. */ - if (!u_sess->attr.attr_sql.enable_vector_engine || vector_engine_walker(top_plan, from_subplan) || + if (!u_sess->attr.attr_sql.enable_vector_engine || (subroot != NULL && subroot->is_under_recursive_tree) || + vector_engine_walker(top_plan, from_subplan) || (ENABLE_PRED_PUSH_ALL(NULL) || (subroot != NULL && SUBQUERY_PREDPUSH(subroot)))) { /* * Distributed Recursive CTE Support @@ -8619,7 +8583,8 @@ Plan* try_vectorize_plan(Plan* top_plan, Query* parse, bool from_subplan, Planne */ top_plan = fallback_plan(top_plan); } else { - top_plan = vectorize_plan(top_plan, from_subplan); + bool forceVectorEngine = (u_sess->attr.attr_sql.vectorEngineStrategy != OFF_VECTOR_ENGINE); + top_plan = vectorize_plan(top_plan, from_subplan, forceVectorEngine); if (from_subplan && !IsVecOutput(top_plan)) top_plan = fallback_plan(top_plan); @@ -8679,10 +8644,14 @@ static bool vector_engine_expression_walker(Node* node, DenseRank_context* conte /* * Only ROW_NUMBER, RANK, AVG, COUNT, MAX, MIN and SUM are supported now - * and their func oid must be found in hash table g_instance.vec_func_hash. + * and their func oid must be found in hash table g_instance.vec_func_hash. + * + * For the system internal thread, VecFuncHash is not be Initialize, + * these scenes do not need to vectorized plan. */ - (void)hash_search(g_instance.vec_func_hash, &funcOid, HASH_FIND, &found); - + if (g_instance.vec_func_hash != NULL) { + (void)hash_search(g_instance.vec_func_hash, &funcOid, HASH_FIND, &found); + } /* If not found means that the Agg function is not yet implemented */ if (!found) return true; @@ -8709,6 +8678,8 @@ static bool vector_engine_setfunc_walker(Node* node, DenseRank_context* context) FuncExpr* expr = (FuncExpr*)node; if (expr->funcretset == true) { + ereport(DEBUG2, (errmodule(MOD_OPT_PLANNER), + errmsg("Vectorize plan failed due to has set function: %u", expr->funcid))); return true; } } @@ -8716,6 +8687,391 @@ static bool vector_engine_setfunc_walker(Node* node, DenseRank_context* context) return expression_tree_walker(node, (bool (*)())vector_engine_setfunc_walker, context); } +/* + * Check if there is any data type unsupported by cstore. If so, stop rowtovec + */ +bool CheckTypeSupportRowToVec(List* targetlist, int errLevel) +{ + ListCell* cell = NULL; + TargetEntry* entry = NULL; + Var* var = NULL; + foreach(cell, targetlist) { + entry = (TargetEntry*)lfirst(cell); + if (IsA(entry->expr, Var)) { + var = (Var*)entry->expr; + if (var->varattno > 0 && var->varoattno > 0 + && var->vartype != TIDOID // cstore support for hidden column CTID + && !IsTypeSupportedByCStore(var->vartype)) { + ereport(errLevel, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("type \"%s\" is not supported in column store", + format_type_with_typemod(var->vartype, var->vartypmod)))); + return false; + } + } + } + return true; +} + +/* + * @Description: Check if the plan node is supported by VecMarkPos + * + * @param[IN] node: current plan node + * @return: bool, true means support + */ +static bool VecMarkPosSupport(Plan *plan) +{ + if (IsA(plan, Sort) || IsA(plan, Material)) { + return true; + } + + return false; +} + +static inline bool CheckVectorEngineUnsupportedFeature(Plan* plan, VectorPlanContext* planContext) +{ + /* if have set-returning function, not support. */ + if (vector_engine_setfunc_walker((Node*)(plan->targetlist), NULL)) + return true; + + /* check whether there is unsupport expression in vector engine */ + if (vector_engine_unsupport_expression_walker((Node*)plan->targetlist, planContext)) + return true; + + planContext->currentExprIsFilter = true; + if (vector_engine_unsupport_expression_walker((Node*)plan->qual, planContext)) + return true; + planContext->currentExprIsFilter = false; + + return false; +} + +inline void ComputeExprMapCost(Oid typeId, VectorExprContext* context) +{ + if (!COL_IS_ENCODE(typeId)) { + Cost rowMapExprCost = 0.3; + Cost vecMapExprCost = 0.2; + context->planContext->rowCost += rowMapExprCost * context->rows; + context->planContext->vecCost += vecMapExprCost * context->rows; + } else { + Cost rowMapExprCost = 0.6; + Cost vecMapExprCost = 0.3; + context->planContext->rowCost += rowMapExprCost * context->rows; + context->planContext->vecCost += vecMapExprCost * context->rows; + } + + return; +} + +inline void ComputeExprEvalCost(Oid typeId, VectorExprContext* context) +{ + if (!COL_IS_ENCODE(typeId)) { + Cost rowExprEvalCost = 0.3; + Cost vecExprEvalCost = 0.3; + context->planContext->rowCost += rowExprEvalCost * context->rows; + context->planContext->vecCost += vecExprEvalCost * context->rows; + } else { + Cost rowExprEvalCost = 2.5; + Cost vecExprEvalCost = 0.8; + context->planContext->rowCost += rowExprEvalCost * context->rows; + context->planContext->vecCost += vecExprEvalCost * context->rows; + } + + return; +} + +/* + * @Description: Check if it has unsupport expression and get the costs of expression + * + * @param[IN] node: current expr node + * @return: bool, true if it has + */ +bool VectorEngineCheckExpressionInternal(Node* node, VectorExprContext* context) +{ + if (node == NULL) { + return false; + } + + /* get costs of using row engine and vector engine */ + if (context) { + Cost rowArrayExprCost = 0.2; + Cost vecArrayExprCost = 1.3; + + switch (nodeTag(node)) { + case T_Var: { + Var* var = (Var*)node; + int varattno = (int)var->varattno; + if (!list_member_int(context->varList, varattno)) { + context->varList = lappend_int(context->varList, varattno); + } + ComputeExprMapCost(var->vartype, context); + return false; + } + case T_Const: { + Const* con = (Const*)node; + ComputeExprMapCost(con->consttype, context); + return false; + } + case T_Param: { + Param* param = (Param*)node; + ComputeExprMapCost(param->paramtype, context); + return false; + } + case T_FuncExpr: { + FuncExpr* funcExpr = (FuncExpr*)node; + ComputeExprEvalCost(funcExpr->funcresulttype, context); + break; + } + case T_OpExpr: { + OpExpr* opExpr = (OpExpr*)node; + ComputeExprEvalCost(opExpr->opresulttype, context); + break; + } + case T_Aggref: { + Aggref* aggref = (Aggref*)node; + /* Aggref compute tuple is it's lefttree output count */ + int savedRows = context->rows; + context->rows = context->lefttreeRows; + ComputeExprEvalCost(aggref->aggtrantype, context); + expression_tree_walker(node, (bool (*)())VectorEngineCheckExpressionInternal, (void*)context); + context->rows = savedRows; + return false;; + } + case T_ScalarArrayOpExpr: { + context->planContext->rowCost += rowArrayExprCost * context->rows; + context->planContext->vecCost += vecArrayExprCost * context->rows; + break; + } + default: + break; + } + } + + return expression_tree_walker(node, (bool (*)())VectorEngineCheckExpressionInternal, (void*)context); +} + +static inline void ComputeQualCost(List* qual, VectorExprContext* context) +{ + ListCell* l = NULL; + foreach (l, qual) { + Expr* node = (Expr*)lfirst(l); + ereport(DEBUG2, (errmodule(MOD_OPT_PLANNER), + errmsg("[ROWTOVEC OPTIMAL] compute qual. current rows: %f, select: %f", + context->rows, node->selec))); + VectorEngineCheckExpressionInternal((Node*)node, context); + context->rows = context->rows * node->selec; + } +} + +/* return true means not support vector engine */ +template +bool CostVectorScan(Scan* scanPlan, VectorPlanContext* planContext) +{ + if (!planContext->forceVectorEngine || !CheckTypeSupportRowToVec(scanPlan->plan.targetlist, DEBUG2)) { + return true; + } + + /* only optimal mode need collect costs. */ + if (u_sess->attr.attr_sql.vectorEngineStrategy != OPT_VECTOR_ENGINE) { + return false; + } + + Cost rowScanCost = 1.0; + Cost vecBatchScanCost = 0.9; + Cost rowToVecTransCost = 0.7; + Cost origRowCost = planContext->rowCost; + Cost origVecCost = planContext->vecCost; + VectorExprContext exprContext; + exprContext.planContext = planContext; + exprContext.varList = NIL; + int qualColCount = 0; + int dop = SET_DOP(scanPlan->plan.dop); + /* seqscan will scan all rows in table, index scan do not. */ + if (isSeqscan) { + exprContext.rows = scanPlan->tableRows / dop; + planContext->rowCost += rowScanCost * exprContext.rows; + planContext->vecCost += vecBatchScanCost * exprContext.rows; + } else { + exprContext.rows = scanPlan->plan.plan_rows / dop; + planContext->rowCost += rowScanCost * exprContext.rows; + } + + ComputeQualCost(scanPlan->plan.qual, &exprContext); + if (isSeqscan) { + /* batch mode will trans qual column first */ + qualColCount = list_length(exprContext.varList); + planContext->vecCost += rowToVecTransCost * exprContext.rows * qualColCount; + } + exprContext.rows = scanPlan->plan.plan_rows / dop; + + VectorEngineCheckExpressionInternal((Node*)scanPlan->plan.targetlist, &exprContext); + if (isSeqscan) { + int lateTransColCount = list_length(exprContext.varList) - qualColCount; + planContext->vecCost += rowToVecTransCost * exprContext.rows * lateTransColCount; + } else { + /* index scan add rowtovec on scan, so the costs is (row scan costs + rowtovec costs) */ + planContext->vecCost = origVecCost + planContext->rowCost - origRowCost; + planContext->vecCost += rowToVecTransCost * exprContext.rows * list_length(scanPlan->plan.targetlist); + } + planContext->containRowTable = true; + + ereport(DEBUG2, (errmodule(MOD_OPT_PLANNER), + errmsg("[ROWTOVEC OPTIMAL] scan cost: row: %f, vec: %f", + planContext->rowCost - origRowCost, planContext->vecCost - origVecCost))); + + return false; +} + +static bool CheckWindowsAggExpr(Plan* resultPlan, bool check_rescan, VectorPlanContext* planContext) +{ + /* Only default window clause is supported now */ + if (((WindowAgg*)resultPlan)->frameOptions != + (FRAMEOPTION_RANGE | FRAMEOPTION_START_UNBOUNDED_PRECEDING | FRAMEOPTION_END_CURRENT_ROW)) + return true; + + /* Check if targetlist contains unsupported feature */ + DenseRank_context context; + context.has_agg = false; + context.has_denserank = false; + if (vector_engine_expression_walker((Node*)(resultPlan->targetlist), &context)) + return true; + + /* Only single denserank is supported now */ + if (context.has_agg && context.has_denserank) + return true; + + /* + * WindowAgg nodes never have quals, since they can only occur at the + * logical top level of a query (ie, after any WHERE or HAVING filters) + */ + WindowAgg* wa = (WindowAgg*)resultPlan; + if (vector_engine_unsupport_expression_walker((Node*)wa->startOffset, planContext)) + return true; + if (vector_engine_unsupport_expression_walker((Node*)wa->endOffset, planContext)) + return true; + + if (vector_engine_walker_internal(resultPlan->lefttree, check_rescan, planContext)) + return true; + + return false; +} + +static bool CheckForeignScanExpr(Plan* resultPlan, VectorPlanContext* planContext) +{ + ForeignScan* fscan = (ForeignScan*)resultPlan; + if (IsSpecifiedFDWFromRelid(fscan->scan_relid, GC_FDW) || + IsSpecifiedFDWFromRelid(fscan->scan_relid, LOG_FDW)) { + resultPlan->vec_output = false; + } +#ifdef ENABLE_MOT + /* do not support row to vector for mot table */ + if (IsSpecifiedFDWFromRelid(fscan->scan_relid, MOT_FDW)) { + resultPlan->vec_output = false; + return true; + } +#endif + if (!CheckTypeSupportRowToVec(resultPlan->targetlist, DEBUG2)) { + resultPlan->vec_output = false; + return true; + } + CostVectorScan((Scan*)resultPlan, planContext); + + return false; +} + +void CostVectorAgg(Plan* plan, VectorPlanContext* planContext) +{ + /* only optimal mode need collect costs. */ + if (u_sess->attr.attr_sql.vectorEngineStrategy != OPT_VECTOR_ENGINE) { + return; + } + + int dop = SET_DOP(plan->dop); + VectorExprContext exprContext; + exprContext.planContext = planContext; + exprContext.varList = NIL; + exprContext.rows = plan->plan_rows / dop; + if (likely(plan->lefttree != NULL)) { + exprContext.lefttreeRows = plan->lefttree->plan_rows / SET_DOP(plan->lefttree->dop); + } else { + exprContext.lefttreeRows = plan->plan_rows / dop; + } + Cost origRowCost = planContext->rowCost; + Cost origVecCost = planContext->vecCost; + + VectorEngineCheckExpressionInternal((Node*)plan->targetlist, &exprContext); + ComputeQualCost(plan->qual, &exprContext); + + ereport(DEBUG2, (errmodule(MOD_OPT_PLANNER), + errmsg("[ROWTOVEC OPTIMAL] agg cost: row: %f, vec: %f", + planContext->rowCost - origRowCost, planContext->vecCost - origVecCost))); +} + +void CostVectorNestJoin(Join* join, VectorPlanContext* planContext) +{ + /* only optimal mode need collect costs. */ + if (u_sess->attr.attr_sql.vectorEngineStrategy != OPT_VECTOR_ENGINE) { + return; + } + + int dop = SET_DOP(join->plan.dop); + VectorExprContext exprContext; + exprContext.planContext = planContext; + exprContext.varList = NIL; + exprContext.lefttreeRows = join->plan.righttree->plan_rows / dop; + Cost origRowCost = planContext->rowCost; + Cost origVecCost = planContext->vecCost; + + /* joinqual execute count is determined by lefttree */ + exprContext.rows = (join->plan.righttree->plan_rows * join->plan.lefttree->plan_rows) / dop; + ComputeQualCost(join->joinqual, &exprContext); + ComputeQualCost(join->nulleqqual, &exprContext); + + exprContext.rows = join->plan.plan_rows; + VectorEngineCheckExpressionInternal((Node*)join->plan.targetlist, &exprContext); + + ereport(DEBUG2, (errmodule(MOD_OPT_PLANNER), + errmsg("[ROWTOVEC OPTIMAL] nestloop cost: row: %f, vec: %f", + planContext->rowCost - origRowCost, planContext->vecCost - origVecCost))); +} + + +void CostVectorHashJoin(Join* join, VectorPlanContext* planContext) +{ + /* only optimal mode need collect costs. */ + if (u_sess->attr.attr_sql.vectorEngineStrategy != OPT_VECTOR_ENGINE) { + return; + } + + HashJoin* hashjoin = (HashJoin*)join; + int dop = SET_DOP(join->plan.dop); + VectorExprContext exprContext; + exprContext.planContext = planContext; + exprContext.varList = NIL; + exprContext.lefttreeRows = join->plan.righttree->plan_rows / dop; + Cost origRowCost = planContext->rowCost; + Cost origVecCost = planContext->vecCost; + + exprContext.rows = hashjoin->joinRows; + VectorEngineCheckExpressionInternal((Node*)hashjoin->hashclauses, &exprContext); + ComputeQualCost(join->joinqual, &exprContext); + ComputeQualCost(join->nulleqqual, &exprContext); + + Cost rowJoinCost = 0.8; + Cost rowHashCost = 1.5; + Cost vecJoinCost = 0.4; + exprContext.rows = hashjoin->joinRows; + exprContext.planContext->rowCost += rowHashCost * exprContext.lefttreeRows; + exprContext.planContext->rowCost += rowJoinCost * exprContext.rows; + exprContext.planContext->vecCost += vecJoinCost * exprContext.rows; + + exprContext.rows = join->plan.plan_rows; + VectorEngineCheckExpressionInternal((Node*)join->plan.targetlist, &exprContext); + + ereport(DEBUG2, (errmodule(MOD_OPT_PLANNER), + errmsg("[ROWTOVEC OPTIMAL] hashjoin cost: row: %f, vec: %f", + planContext->rowCost - origRowCost, planContext->vecCost - origVecCost))); +} + /* * @Description: Walk through the plan tree to see if it's supported in Vector Engine * @@ -8723,32 +9079,46 @@ static bool vector_engine_setfunc_walker(Node* node, DenseRank_context* context) * @param[IN] check_rescan: if need check rescan * @return: bool, true means unsupported, false means supported */ -static bool vector_engine_walker(Plan* result_plan, bool check_rescan) +static bool vector_engine_walker_internal(Plan* result_plan, bool check_rescan, VectorPlanContext* planContext) { if (result_plan == NULL) return false; - /* if have set-returning function, not support. */ - if (vector_engine_setfunc_walker((Node*)(result_plan->targetlist), NULL)) - return true; - - /* check whether there is unsupport expression in vector engine */ - if (vector_engine_unsupport_expression_walker((Node*)result_plan->targetlist)) - return true; - if (vector_engine_unsupport_expression_walker((Node*)result_plan->qual)) + if (CheckVectorEngineUnsupportedFeature(result_plan, planContext)) { return true; + } switch (nodeTag(result_plan)) { - /* Operators below cannot be vectorized */ - case T_SeqScan: + /* + * If the GUC 'try_vector_engine_strategy' is off, Operators below cannot be vectorized. + * If the GUC 'try_vector_engine_strategy' is force/optimal, 8 scans(SeqScan, IndexScan, IndexOnlyScan, + * BitmapHeapScan, TidScan, FunctionScan, ValuesScan, Foreignscan) can be force to vector engine, + * unless the targetlist has datatype unsupported by column store. + */ + case T_SeqScan: { if (result_plan->isDeltaTable) { return false; } + + return CostVectorScan((Scan*)result_plan, planContext); + } case T_IndexScan: case T_IndexOnlyScan: case T_BitmapHeapScan: case T_TidScan: - case T_FunctionScan: + case T_FunctionScan: { + if (!planContext->forceVectorEngine || !CheckTypeSupportRowToVec(result_plan->targetlist, DEBUG2)) { + return true; + } + + return CostVectorScan((Scan*)result_plan, planContext); + } + case T_ValuesScan: { + if (!CheckTypeSupportRowToVec(result_plan->targetlist, DEBUG2)) { + return true; + } + break; + } case T_CteScan: case T_LockRows: case T_MergeAppend: @@ -8761,33 +9131,36 @@ static bool vector_engine_walker(Plan* result_plan, bool check_rescan) if (check_rescan) return true; - if (vector_engine_walker(result_plan->lefttree, check_rescan)) + if (vector_engine_walker_internal(result_plan->lefttree, check_rescan, planContext)) return true; break; case T_Stream: { check_rescan = false; Stream* sj = (Stream*)result_plan; - if (vector_engine_unsupport_expression_walker((Node*)sj->distribute_keys)) + if (vector_engine_unsupport_expression_walker((Node*)sj->distribute_keys, planContext)) return true; - if (vector_engine_walker(result_plan->lefttree, check_rescan)) + if (vector_engine_walker_internal(result_plan->lefttree, check_rescan, planContext)) return true; } break; case T_Limit: { Limit* lm = (Limit*)result_plan; - if (vector_engine_unsupport_expression_walker((Node*)lm->limitCount)) + if (vector_engine_unsupport_expression_walker((Node*)lm->limitCount, planContext)) return true; - if (vector_engine_unsupport_expression_walker((Node*)lm->limitOffset)) + if (vector_engine_unsupport_expression_walker((Node*)lm->limitOffset, planContext)) return true; - if (vector_engine_walker(result_plan->lefttree, check_rescan)) + if (vector_engine_walker_internal(result_plan->lefttree, check_rescan, planContext)) return true; } break; case T_BaseResult: { BaseResult* br = (BaseResult*)result_plan; - if (vector_engine_unsupport_expression_walker((Node*)br->resconstantqual)) + if (vector_engine_unsupport_expression_walker((Node*)br->resconstantqual, planContext)) return true; - if (vector_engine_walker(result_plan->lefttree, check_rescan)) + if (vector_engine_walker_internal(result_plan->lefttree, check_rescan, planContext)) return true; + if (!CheckTypeSupportRowToVec(result_plan->targetlist, DEBUG2)) { + return true; + } } break; case T_PartIterator: case T_SetOp: @@ -8799,7 +9172,7 @@ static bool vector_engine_walker(Plan* result_plan, bool check_rescan) case T_Material: case T_Hash: case T_Sort: - if (vector_engine_walker(result_plan->lefttree, check_rescan)) + if (vector_engine_walker_internal(result_plan->lefttree, check_rescan, planContext)) return true; break; @@ -8812,78 +9185,62 @@ static bool vector_engine_walker(Plan* result_plan, bool check_rescan) if (vector_engine_expression_walker((Node*)(result_plan->qual), NULL)) return true; - if (vector_engine_walker(result_plan->lefttree, check_rescan)) + if (vector_engine_walker_internal(result_plan->lefttree, check_rescan, planContext)) return true; /* Check if contains array operator, not support distrtribute on ARRAY type now */ if (has_array_operator(result_plan)) return true; + CostVectorAgg(result_plan, planContext); } break; case T_WindowAgg: { - - /* Only default window clause is supported now */ - if (((WindowAgg*)result_plan)->frameOptions != - (FRAMEOPTION_RANGE | FRAMEOPTION_START_UNBOUNDED_PRECEDING | FRAMEOPTION_END_CURRENT_ROW)) - return true; - - /* Check if targetlist contains unsupported feature */ - DenseRank_context context; - context.has_agg = false; - context.has_denserank = false; - if (vector_engine_expression_walker((Node*)(result_plan->targetlist), &context)) - return true; - - /* Only single denserank is supported now */ - if (context.has_agg && context.has_denserank) - return true; - - /* - * WindowAgg nodes never have quals, since they can only occur at the - * logical top level of a query (ie, after any WHERE or HAVING filters) - */ - WindowAgg* wa = (WindowAgg*)result_plan; - if (vector_engine_unsupport_expression_walker((Node*)wa->startOffset)) - return true; - if (vector_engine_unsupport_expression_walker((Node*)wa->endOffset)) - return true; - - if (vector_engine_walker(result_plan->lefttree, check_rescan)) + if (CheckWindowsAggExpr(result_plan, check_rescan, planContext)) { return true; + } } break; case T_MergeJoin: { MergeJoin* mj = (MergeJoin*)result_plan; - if (vector_engine_unsupport_expression_walker((Node*)mj->mergeclauses)) + if (vector_engine_unsupport_expression_walker((Node*)mj->mergeclauses, planContext)) return true; /* Find unsupport expr *Join* clause */ - if (vector_engine_unsupport_expression_walker((Node*)mj->join.joinqual)) + if (vector_engine_unsupport_expression_walker((Node*)mj->join.joinqual, planContext)) return true; - if (vector_engine_unsupport_expression_walker((Node*)mj->join.nulleqqual)) + if (vector_engine_unsupport_expression_walker((Node*)mj->join.nulleqqual, planContext)) return true; - if (vector_engine_walker(result_plan->lefttree, check_rescan)) + if (vector_engine_walker_internal(result_plan->lefttree, check_rescan, planContext)) return true; - if (vector_engine_walker(result_plan->righttree, check_rescan)) + if (vector_engine_walker_internal(result_plan->righttree, check_rescan, planContext)) return true; + + /* + * If the top plan nodes of mergejoin righttree is unsupported by VecMarkPos, + * cannot generate vectorized plan. + */ + if (!VecMarkPosSupport(result_plan->righttree)) { + return true; + } } break; case T_NestLoop: { NestLoop* nl = (NestLoop*)result_plan; /* Find unsupport expr in *Join* clause */ - if (vector_engine_unsupport_expression_walker((Node*)nl->join.joinqual)) + if (vector_engine_unsupport_expression_walker((Node*)nl->join.joinqual, planContext)) return true; - if (vector_engine_unsupport_expression_walker((Node*)nl->join.nulleqqual)) + if (vector_engine_unsupport_expression_walker((Node*)nl->join.nulleqqual, planContext)) return true; - if (vector_engine_walker(result_plan->lefttree, check_rescan)) + if (vector_engine_walker_internal(result_plan->lefttree, check_rescan, planContext)) return true; if (IsA(result_plan->righttree, Material) && result_plan->righttree->allParam == NULL) check_rescan = false; else check_rescan = true; - if (vector_engine_walker(result_plan->righttree, check_rescan)) + if (vector_engine_walker_internal(result_plan->righttree, check_rescan, planContext)) return true; + CostVectorNestJoin((Join*)result_plan, planContext); } break; case T_HashJoin: { @@ -8894,27 +9251,26 @@ static bool vector_engine_walker(Plan* result_plan, bool check_rescan) HashJoin* hj = (HashJoin*)result_plan; /* Find unsupport expr in *Hash* clause */ - if (vector_engine_unsupport_expression_walker((Node*)hj->hashclauses)) + if (vector_engine_unsupport_expression_walker((Node*)hj->hashclauses, planContext)) return true; /* Find unsupport expr in *Join* clause */ - if (vector_engine_unsupport_expression_walker((Node*)hj->join.joinqual)) + if (vector_engine_unsupport_expression_walker((Node*)hj->join.joinqual, planContext)) return true; - if (vector_engine_unsupport_expression_walker((Node*)hj->join.nulleqqual)) + if (vector_engine_unsupport_expression_walker((Node*)hj->join.nulleqqual, planContext)) return true; - if (vector_engine_walker(result_plan->lefttree, check_rescan)) + if (vector_engine_walker_internal(result_plan->lefttree, check_rescan, planContext)) return true; - if (vector_engine_walker(result_plan->righttree, check_rescan)) + if (vector_engine_walker_internal(result_plan->righttree, check_rescan, planContext)) return true; + CostVectorHashJoin((Join*)result_plan, planContext); } break; case T_Append: { Append* append = (Append*)result_plan; ListCell* lc = NULL; foreach (lc, append->appendplans) { - Plan* plan = (Plan*)lfirst(lc); - - if (vector_engine_walker(plan, check_rescan)) + if (vector_engine_walker_internal((Plan*)lfirst(lc), check_rescan, planContext)) return true; } } break; @@ -8923,34 +9279,26 @@ static bool vector_engine_walker(Plan* result_plan, bool check_rescan) ModifyTable* mt = (ModifyTable*)result_plan; ListCell* lc = NULL; foreach (lc, mt->plans) { - Plan* plan = (Plan*)lfirst(lc); - if (vector_engine_walker(plan, check_rescan)) + if (vector_engine_walker_internal((Plan*)lfirst(lc), check_rescan, planContext)) return true; } } break; case T_SubqueryScan: { SubqueryScan* ss = (SubqueryScan*)result_plan; - - if (ss->subplan && vector_engine_walker(ss->subplan, check_rescan)) + if (ss->subplan && vector_engine_walker_internal(ss->subplan, check_rescan, planContext)) return true; } break; case T_ForeignScan: { - ForeignScan* fscan = (ForeignScan*)result_plan; - if (IsSpecifiedFDWFromRelid(fscan->scan_relid, GC_FDW) || - IsSpecifiedFDWFromRelid(fscan->scan_relid, LOG_FDW)) { - result_plan->vec_output = false; - return true; - } + return CheckForeignScanExpr(result_plan, planContext); } break; case T_ExtensiblePlan: { ExtensiblePlan* ext_plan = (ExtensiblePlan*)result_plan; ListCell* lc = NULL; foreach (lc, ext_plan->extensible_plans) { - Plan* plan = (Plan*)lfirst(lc); - if (vector_engine_walker(plan, check_rescan)) + if (vector_engine_walker_internal((Plan*)lfirst(lc), check_rescan, planContext)) return true; } } break; @@ -8962,6 +9310,52 @@ static bool vector_engine_walker(Plan* result_plan, bool check_rescan) return false; } +/* + * @Description: Walk through the plan tree to see if it's supported in Vector Engine + * + * @param[IN] result_plan: current plan node + * @param[IN] check_rescan: if need check rescan + * @return: bool, true means unsupported, false means supported + */ +static bool vector_engine_walker(Plan* result_plan, bool check_rescan) +{ + VectorPlanContext planContext; + planContext.containRowTable = false; + planContext.currentExprIsFilter = false; + planContext.rowCost = 0.0; + planContext.vecCost = 0.0; + + /* for OPT_VECTOR_ENGINE, we treat plan can be transformed to vectorized plan, + * and if the plan not satisfied rules to vectorize, will return false later. + */ + if (u_sess->attr.attr_sql.vectorEngineStrategy == OFF_VECTOR_ENGINE) { + planContext.forceVectorEngine = false; + } else { + planContext.forceVectorEngine = true; + } + + bool res = vector_engine_walker_internal(result_plan, check_rescan, &planContext); + + if (!res && u_sess->attr.attr_sql.vectorEngineStrategy == OPT_VECTOR_ENGINE && planContext.containRowTable) { + /* add vectorow cost for using vector engine */ + Cost vecToRowCost = 0.2; + Cost vecToRowCosts = vecToRowCost * result_plan->plan_rows; + planContext.vecCost += vecToRowCosts; + /* vector cost multiply 1.2 to ensure that performance not degree */ + planContext.vecCost *= 1.2; + ereport(DEBUG2, (errmodule(MOD_OPT_PLANNER), + errmsg("[ROWTOVEC OPTIMAL] total cost: row: %f, vector: %f, choose %s", + planContext.rowCost, planContext.vecCost, + (planContext.vecCost >= planContext.rowCost ? "row" : "vector")))); + /* while using vector engine cost is larger than using row engine, do not transform to vector plan */ + if (planContext.vecCost >= planContext.rowCost) { + res = true; + } + } + + return res; +} + /* * @Description: Fallback plan, generate hybrid row-column plan * @@ -9091,6 +9485,12 @@ static Plan* fallback_plan(Plan* result_plan) return result_plan; } +static inline Plan* make_rowtove_plan(Plan* plan) +{ + make_dummy_targetlist(plan); + return (Plan *)make_rowtovec(plan); +} + /* * @Description: Generate vectorized plan * @@ -9098,7 +9498,7 @@ static Plan* fallback_plan(Plan* result_plan) * @param[IN] ignore_remotequery: if ignore RemoteQuery node * @return: Plan*, vectorized plan */ -Plan* vectorize_plan(Plan* result_plan, bool ignore_remotequery) +Plan* vectorize_plan(Plan* result_plan, bool ignore_remotequery, bool forceVectorEngine) { if (result_plan == NULL) return NULL; @@ -9119,38 +9519,50 @@ Plan* vectorize_plan(Plan* result_plan, bool ignore_remotequery) result_plan = build_vector_plan(result_plan); break; case T_ForeignScan: - if (IsVecOutput(result_plan)) + if (IsVecOutput(result_plan)) { return build_vector_plan(result_plan); - break; - case T_ExtensiblePlan: - { - ExtensiblePlan* ext_plans = (ExtensiblePlan*)result_plan; - ListCell* lc = NULL; - List* newPlans = NIL; - - foreach (lc, ext_plans->extensible_plans) { - Plan* plan = (Plan*)lfirst(lc); - lfirst(lc) = vectorize_plan(plan, ignore_remotequery); - if (IsVecOutput(result_plan) && - !IsVecOutput(plan)) { - if (IsA(plan, ForeignScan)) { - build_vector_plan(plan); - } else { - plan = (Plan*)make_rowtovec(plan); - } - } else if (!IsVecOutput(result_plan) && IsVecOutput(plan)) - plan = (Plan*)make_vectorow(plan); - newPlans = lappend(newPlans, plan); - } - ext_plans->extensible_plans = newPlans; - if (IsVecOutput(result_plan)) { - build_vector_plan(result_plan); - } - break; + } else if (forceVectorEngine) { + result_plan = make_rowtove_plan(result_plan); } + break; + case T_ExtensiblePlan: { + ExtensiblePlan* ext_plans = (ExtensiblePlan*)result_plan; + ListCell* lc = NULL; + List* newPlans = NIL; + + foreach (lc, ext_plans->extensible_plans) { + Plan* plan = (Plan*)lfirst(lc); + lfirst(lc) = vectorize_plan(plan, ignore_remotequery, forceVectorEngine); + if (IsVecOutput(result_plan) && !IsVecOutput(plan)) { + if (IsA(plan, ForeignScan)) { + build_vector_plan(plan); + } else { + plan = (Plan*)make_rowtovec(plan); + } + } else if (!IsVecOutput(result_plan) && IsVecOutput(plan)) { + plan = (Plan*)make_vectorow(plan); + } + newPlans = lappend(newPlans, plan); + } + ext_plans->extensible_plans = newPlans; + if (IsVecOutput(result_plan)) { + build_vector_plan(result_plan); + } + break; + } case T_SeqScan: { - if (result_plan->isDeltaTable) { - result_plan = (Plan*)make_rowtovec(result_plan); + if (result_plan->isDeltaTable || forceVectorEngine) { + result_plan = make_rowtove_plan(result_plan); + } + break; + } + case T_IndexScan: + case T_IndexOnlyScan: + case T_BitmapHeapScan: + case T_TidScan: + case T_FunctionScan: { + if (forceVectorEngine) { + result_plan = make_rowtove_plan(result_plan); } break; } @@ -9175,24 +9587,22 @@ Plan* vectorize_plan(Plan* result_plan, bool ignore_remotequery) case T_Stream: case T_Material: case T_WindowAgg: - result_plan->lefttree = vectorize_plan(result_plan->lefttree, ignore_remotequery); - if (result_plan->lefttree && IsVecOutput(result_plan->lefttree)) + result_plan->lefttree = vectorize_plan(result_plan->lefttree, ignore_remotequery, forceVectorEngine); + if (result_plan->lefttree && IsVecOutput(result_plan->lefttree)) { return build_vector_plan(result_plan); - else if ((result_plan->lefttree && !IsVecOutput(result_plan->lefttree)) && + } else if ((result_plan->lefttree && !IsVecOutput(result_plan->lefttree)) && u_sess->attr.attr_sql.enable_force_vector_engine) { result_plan->lefttree = (Plan*)make_rowtovec(result_plan->lefttree); return build_vector_plan(result_plan); } else if (IsA(result_plan, BaseResult) && result_plan->lefttree == NULL) { - make_dummy_targetlist(result_plan); - result_plan = (Plan*)make_rowtovec(result_plan); - return result_plan; + return make_rowtove_plan(result_plan); } break; case T_MergeJoin: case T_NestLoop: - result_plan->lefttree = vectorize_plan(result_plan->lefttree, ignore_remotequery); - result_plan->righttree = vectorize_plan(result_plan->righttree, ignore_remotequery); + result_plan->lefttree = vectorize_plan(result_plan->lefttree, ignore_remotequery, forceVectorEngine); + result_plan->righttree = vectorize_plan(result_plan->righttree, ignore_remotequery, forceVectorEngine); if (IsVecOutput(result_plan->lefttree) && IsVecOutput(result_plan->righttree)) { return build_vector_plan(result_plan); @@ -9218,7 +9628,7 @@ Plan* vectorize_plan(Plan* result_plan, bool ignore_remotequery) case T_Hash: break; case T_Agg: { - result_plan->lefttree = vectorize_plan(result_plan->lefttree, ignore_remotequery); + result_plan->lefttree = vectorize_plan(result_plan->lefttree, ignore_remotequery, forceVectorEngine); if (IsVecOutput(result_plan->lefttree)) return build_vector_plan(result_plan); } break; @@ -9227,8 +9637,9 @@ Plan* vectorize_plan(Plan* result_plan, bool ignore_remotequery) */ case T_HashJoin: { /* HashJoin supports vector right now */ - result_plan->lefttree = vectorize_plan(result_plan->lefttree, ignore_remotequery); - result_plan->righttree->lefttree = vectorize_plan(result_plan->righttree->lefttree, ignore_remotequery); + result_plan->lefttree = vectorize_plan(result_plan->lefttree, ignore_remotequery, forceVectorEngine); + result_plan->righttree->lefttree = + vectorize_plan(result_plan->righttree->lefttree, ignore_remotequery, forceVectorEngine); if (IsVecOutput(result_plan->lefttree) && IsVecOutput(result_plan->righttree->lefttree)) { /* Remove hash node */ @@ -9249,7 +9660,7 @@ Plan* vectorize_plan(Plan* result_plan, bool ignore_remotequery) bool isVec = true; foreach (lc, append->appendplans) { Plan* plan = (Plan*)lfirst(lc); - plan = vectorize_plan(plan, ignore_remotequery); + plan = vectorize_plan(plan, ignore_remotequery, forceVectorEngine); lfirst(lc) = plan; if (!IsVecOutput(plan)) { if (u_sess->attr.attr_sql.enable_force_vector_engine) @@ -9270,45 +9681,44 @@ Plan* vectorize_plan(Plan* result_plan, bool ignore_remotequery) } } break; - case T_ModifyTable: + case T_ModifyTable: { /* ModifyTable doesn't support vector right now */ - { - ModifyTable* mt = (ModifyTable*)result_plan; - ListCell* lc = NULL; - List* newPlans = NIL; + ModifyTable* mt = (ModifyTable*)result_plan; + ListCell* lc = NULL; + List* newPlans = NIL; - foreach (lc, mt->plans) { - Plan* plan = (Plan*)lfirst(lc); - lfirst(lc) = vectorize_plan(plan, ignore_remotequery); - if (IsVecOutput(result_plan) && - !IsVecOutput(plan)) { // If we support vectorize ModifyTable, please remove it - if (IsA(plan, ForeignScan)) { - build_vector_plan(plan); - } else { - plan = (Plan*)make_rowtovec(plan); - } - } else if (!IsVecOutput(result_plan) && IsVecOutput(plan)) - plan = (Plan*)make_vectorow(plan); - newPlans = lappend(newPlans, plan); + foreach (lc, mt->plans) { + Plan* plan = (Plan*)lfirst(lc); + lfirst(lc) = vectorize_plan(plan, ignore_remotequery, forceVectorEngine); + /* If we support vectorize ModifyTable, please remove it */ + if (IsVecOutput(result_plan) && !IsVecOutput(plan)) { + if (IsA(plan, ForeignScan)) { + build_vector_plan(plan); + } else { + plan = (Plan*)make_rowtovec(plan); + } + } else if (!IsVecOutput(result_plan) && IsVecOutput(plan)) { + plan = (Plan*)make_vectorow(plan); } - mt->plans = newPlans; - if (IsVecOutput(result_plan)) { - build_vector_plan(result_plan); - } - break; + newPlans = lappend(newPlans, plan); } - - case T_SubqueryScan: - /* SubqueryScan supports vector right now */ - { - SubqueryScan* ss = (SubqueryScan*)result_plan; - if (ss->subplan) - ss->subplan = vectorize_plan(ss->subplan, ignore_remotequery); - if (IsVecOutput(ss->subplan)) { // If we support vectorize ModifyTable, please remove it - build_vector_plan(result_plan); - } + mt->plans = newPlans; + if (IsVecOutput(result_plan)) { + build_vector_plan(result_plan); } break; + } + + case T_SubqueryScan: { + /* SubqueryScan supports vector right now */ + SubqueryScan* ss = (SubqueryScan*)result_plan; + if (ss->subplan) + ss->subplan = vectorize_plan(ss->subplan, ignore_remotequery, forceVectorEngine); + if (IsVecOutput(ss->subplan)) { // If we support vectorize ModifyTable, please remove it + build_vector_plan(result_plan); + } + break; + } default: break; @@ -9411,6 +9821,44 @@ static Plan* build_vector_plan(Plan* plan) return plan; } +bool CheckColumnsSuportedByBatchMode(List *targetList, List *qual) +{ + List *vars = NIL; + ListCell *l = NULL; + + /* Consider the targetList */ + foreach (l, targetList) { + ListCell *vl = NULL; + GenericExprState *gstate = (GenericExprState *)lfirst(l); + TargetEntry *tle = (TargetEntry *)gstate->xprstate.expr; + + /* if have set-returning function, not support. */ + if (vector_engine_setfunc_walker((Node*)tle, NULL)) { + return false; + } + + /* Pull vars from the targetlist. */ + vars = pull_var_clause((Node *)tle, PVC_RECURSE_AGGREGATES, PVC_RECURSE_PLACEHOLDERS); + + foreach (vl, vars) { + Var *var = (Var *)lfirst(vl); + if (var->varattno < 0 || !IsTypeSupportedByCStore(var->vartype)) { + return false; + } + } + } + + /* Now consider the quals */ + vars = pull_var_clause((Node *)qual, PVC_RECURSE_AGGREGATES, PVC_RECURSE_PLACEHOLDERS); + foreach (l, vars) { + Var *var = (Var *)lfirst(l); + if (var->varattno < 0 || !IsTypeSupportedByCStore(var->vartype)) { + return false; + } + } + return true; +} + /* * cost_agg_convert_to_path * convert subplan to path before we calculate agg cost of each hash agg method @@ -14154,3 +14602,48 @@ Plan* get_foreign_scan(Plan* plan) return plan; } +static void check_redistribute_stream_walker(Plan* plan, void* context, const char* query_string) +{ + if (plan == NULL) { + return; + } + FindStreamNodesForLoopContext *ctx = (FindStreamNodesForLoopContext*)context; + if (IsA(plan, Stream) && ((Stream*)plan)->type == STREAM_REDISTRIBUTE) { + ctx->has_redis_stream = true; + return; + } + if (IsA(plan, Stream) && ((Stream*)plan)->type == STREAM_BROADCAST) { + ctx->broadcast_stream_cnt++; + return; + } + return PlanTreeWalker(plan, check_redistribute_stream_walker, context, NULL); +} + +bool check_stream_for_loop_fetch(Portal portal) +{ + if (unlikely(portal == NULL || portal->cplan == NULL)) { + return false; + } + bool has_stream = false; + ListCell* lc = NULL; + foreach(lc, portal->cplan->stmt_list) { + if (has_stream) + break; + PlannedStmt* plannedstmt = (PlannedStmt*)lfirst(lc); + FindStreamNodesForLoopContext context; + /* only redistribute stream or more than one broadcast stream may cause hang in loop sql */ + if (IsA(plannedstmt, PlannedStmt)) { + errno_t rc = 0; + rc = memset_s(&context, sizeof(FindStreamNodesForLoopContext), 0, sizeof(FindStreamNodesForLoopContext)); + securec_check(rc, "\0", "\0"); + context.has_redis_stream = false; + context.broadcast_stream_cnt = 0; + check_redistribute_stream_walker(plannedstmt->planTree, &context, NULL); + has_stream |= context.has_redis_stream; + has_stream |= (context.broadcast_stream_cnt > 1); + } + } + portal->hasStreamForPlpgsql = has_stream; + return has_stream; +} + diff --git a/src/gausskernel/optimizer/plan/planrewrite.cpp b/src/gausskernel/optimizer/plan/planrewrite.cpp index e6aac6652..f940a683a 100644 --- a/src/gausskernel/optimizer/plan/planrewrite.cpp +++ b/src/gausskernel/optimizer/plan/planrewrite.cpp @@ -1103,6 +1103,8 @@ static char* get_attr_name(int attrnum) return (char*)"xc_node_id"; case BucketIdAttributeNumber: return (char*)"tablebucketid"; + case UidAttributeNumber: + return (char*)"gs_tuple_uid"; #endif default: ereport(ERROR, diff --git a/src/gausskernel/optimizer/plan/planstartwith.cpp b/src/gausskernel/optimizer/plan/planstartwith.cpp index 7afa8403a..9a2adfb9a 100644 --- a/src/gausskernel/optimizer/plan/planstartwith.cpp +++ b/src/gausskernel/optimizer/plan/planstartwith.cpp @@ -1407,6 +1407,10 @@ static Sort *CreateSortPlanUnderRU(PlannerInfo* root, Plan* lefttree, List *sibl foreach (lc1, lefttree->targetlist) { TargetEntry *tle = (TargetEntry *)lfirst(lc1); + if (tle->resname == NULL) { + continue; + } + /* one more fix name */ char *label = strrchr(tle->resname, '@'); label += 1; diff --git a/src/gausskernel/optimizer/plan/setrefs.cpp b/src/gausskernel/optimizer/plan/setrefs.cpp index f200df737..9a0b05b4b 100644 --- a/src/gausskernel/optimizer/plan/setrefs.cpp +++ b/src/gausskernel/optimizer/plan/setrefs.cpp @@ -845,6 +845,10 @@ static Plan* set_plan_refs(PlannerInfo* root, Plan* plan, int rtoffset) itlist = build_tlist_index(splan->exclRelTlist); splan->updateTlist = fix_join_expr(root, splan->updateTlist, NULL, itlist, linitial_int(splan->resultRelations), rtoffset); + splan->upsertWhere = (Node*)fix_join_expr(root, (List*)splan->upsertWhere, NULL, + itlist, linitial_int(splan->resultRelations), rtoffset); + splan->exclRelTlist = + fix_scan_list(root, splan->exclRelTlist, rtoffset); } splan->exclRelRTIndex += rtoffset; diff --git a/src/gausskernel/optimizer/plan/streamplan.cpp b/src/gausskernel/optimizer/plan/streamplan.cpp index 25599f31d..c2d0de453 100644 --- a/src/gausskernel/optimizer/plan/streamplan.cpp +++ b/src/gausskernel/optimizer/plan/streamplan.cpp @@ -27,9 +27,11 @@ #include "pgxc/groupmgr.h" #include "pgxc/poolmgr.h" #include "pgxc/poolutils.h" +#include "pgxc/nodemgr.h" #include "utils/syscache.h" #include "instruments/instr_statement.h" - +#include "replication/walreceiver.h" + /* only operator with qual supporting can use hashfilter */ static int g_support_hashfilter_types[] = { T_SeqScan, @@ -1056,6 +1058,57 @@ bool IsModifyTableForDfsTable(Plan* AppendNode) return false; } +void disaster_read_array_init() +{ + Snapshot snapshot = GetActiveSnapshot(); + if (snapshot == NULL) { + snapshot = GetTransactionSnapshot(); + } + + LWLockAcquire(MaxCSNArrayLock, LW_SHARED); + CommitSeqNo *maxcsn = t_thrd.xact_cxt.ShmemVariableCache->max_csn_array; + bool *mainstandby = t_thrd.xact_cxt.ShmemVariableCache->main_standby_array; + if (maxcsn == NULL) { + ereport(ERROR, (errmsg("max_csn_array is NULL"))); + } + if (mainstandby == NULL) { + ereport(ERROR, (errmsg("main_standby_array is NULL"))); + } + + int slice_num = u_sess->pgxc_cxt.NumDataNodes; + int slice_internal_num = u_sess->pgxc_cxt.standby_num + 1; + + for (int i = 0; i < slice_num; i++) { + int j = 0; + for (; j < slice_internal_num; j++) { + int nodeIdx = i + j * slice_num; + bool set = false; + if (snapshot->snapshotcsn <= maxcsn[nodeIdx] + 1) { + u_sess->pgxc_cxt.disasterReadArray[i] = nodeIdx; + set = true; + ereport(LOG, (errmsg("select [%d, %d] node index %d, nodeid, %d, csn %lu, %s", + i, j, + nodeIdx, + u_sess->pgxc_cxt.poolHandle->dn_conn_oids[nodeIdx], + maxcsn[nodeIdx], + mainstandby[nodeIdx] ? "Main Standby" : "Cascade Standby"))); + } else { + ereport(LOG, (errmsg("nodeid %d, snapshotcsn = %lu, max_csn_array = %lu", + u_sess->pgxc_cxt.poolHandle->dn_conn_oids[nodeIdx], + snapshot->snapshotcsn, + maxcsn[nodeIdx]))); + } + if (set && !mainstandby[nodeIdx]) { + break; + } + } + if (j == (u_sess->pgxc_cxt.standby_num + 1)) + ereport(LOG, (errmsg("current slice datanode is all invalid"))); + } + LWLockRelease(MaxCSNArrayLock); + u_sess->pgxc_cxt.DisasterReadArrayInit = true; +} + NodeDefinition* get_all_datanodes_def() { Oid* dn_node_arr = NULL; @@ -1064,27 +1117,45 @@ NodeDefinition* get_all_datanodes_def() NodeDefinition* nodeDefArray = NULL; int rc = 0; - PgxcNodeGetOids(NULL, &dn_node_arr, NULL, &dn_node_num, false); + if (IS_DISASTER_RECOVER_MODE) { + PgxcNodeGetOidsForInit(NULL, &dn_node_arr, NULL, &dn_node_num, NULL, false); + } else { + PgxcNodeGetOids(NULL, &dn_node_arr, NULL, &dn_node_num, false); + } - if (u_sess->pgxc_cxt.NumDataNodes != dn_node_num) { + int dnNum = Max(u_sess->pgxc_cxt.NumTotalDataNodes, u_sess->pgxc_cxt.NumDataNodes); + if (dnNum != dn_node_num) { ResetSessionExecutorInfo(true); if (dn_node_arr != NULL) { pfree_ext(dn_node_arr); dn_node_arr = NULL; } - PgxcNodeGetOids(NULL, &dn_node_arr, NULL, &dn_node_num, false); - - if (u_sess->pgxc_cxt.NumDataNodes != dn_node_num) + if (IS_DISASTER_RECOVER_MODE) { + PgxcNodeGetOidsForInit(NULL, &dn_node_arr, NULL, &dn_node_num, NULL, false); + } else { + PgxcNodeGetOids(NULL, &dn_node_arr, NULL, &dn_node_num, false); + } + if (dnNum != dn_node_num) ereport(ERROR, (errmodule(MOD_OPT), errcode(ERRCODE_OPTIMIZER_INCONSISTENT_STATE), errmsg("total datanodes maybe be changed"))); } + if (IS_CN_DISASTER_RECOVER_MODE) { + disaster_read_array_init(); + } + nodeDefArray = (NodeDefinition*)palloc(sizeof(NodeDefinition) * u_sess->pgxc_cxt.NumDataNodes); - for (i = 0; i < dn_node_num; i++) { - Oid current_primary_oid = PgxcNodeGetPrimaryDNFromMatric(dn_node_arr[i]); - NodeDefinition* res = PgxcNodeGetDefinition(current_primary_oid); + NodeDefinition* res = NULL; + for (i = 0; i < u_sess->pgxc_cxt.NumDataNodes; i++) { + if (!IS_DISASTER_RECOVER_MODE) { + Oid current_primary_oid = PgxcNodeGetPrimaryDNFromMatric(dn_node_arr[i]); + res = PgxcNodeGetDefinition(current_primary_oid); + } else { + int index = u_sess->pgxc_cxt.disasterReadArray[i]; + res = PgxcNodeGetDefinition(dn_node_arr[index == -1 ? i : index]); + } rc = memcpy_s(&nodeDefArray[i], sizeof(NodeDefinition), res, sizeof(NodeDefinition)); securec_check(rc, "\0", "\0"); diff --git a/src/gausskernel/optimizer/plan/streamplan_single.cpp b/src/gausskernel/optimizer/plan/streamplan_single.cpp index 9d78fefe3..4504df93e 100644 --- a/src/gausskernel/optimizer/plan/streamplan_single.cpp +++ b/src/gausskernel/optimizer/plan/streamplan_single.cpp @@ -612,8 +612,11 @@ static void mark_distribute_setop_distribution(PlannerInfo* root, Node* node, Pl redistributeDistribution); } } - Stream *streamNode = (Stream *)newplan; - streamNode->is_sorted = IsA(node, MergeAppend) ? true : false; + + if (IsA(newplan, Stream)) { + Stream *streamNode = (Stream *)newplan; + streamNode->is_sorted = IsA(node, MergeAppend) ? true : false; + } if (PointerIsValid(mergeAppend)) { newSubPlans = lappend(newSubPlans, diff --git a/src/gausskernel/optimizer/plan/streamwalker.cpp b/src/gausskernel/optimizer/plan/streamwalker.cpp index 5108beb65..e28b35271 100644 --- a/src/gausskernel/optimizer/plan/streamwalker.cpp +++ b/src/gausskernel/optimizer/plan/streamwalker.cpp @@ -137,17 +137,27 @@ static void stream_walker_query_insertinto_rep(Query* query, shipping_context *c continue; } RangeTblEntry *rte = rt_fetch(index, query->rtable); - if (rte->rtekind != RTE_SUBQUERY || rte->subquery == NULL || !rte->subquery->hasWindowFuncs) { + if (rte->rtekind != RTE_SUBQUERY || rte->subquery == NULL) { continue; } - if (!containReplicatedTable(rte->subquery->rtable)) { - continue; + + if (rte->subquery->hasWindowFuncs && containReplicatedTable(rte->subquery->rtable)) { + cxt->current_shippable = false; + break; } + + /* Cannot shipping if there are junk tlists in replicated subquery */ + if (check_replicated_junktlist(rte->subquery)) { + cxt->current_shippable = false; + break; + } + } + + if (!cxt->current_shippable) { errno_t sprintf_rc = sprintf_s(u_sess->opt_cxt.not_shipping_info->not_shipping_reason, NOTPLANSHIPPING_LENGTH, "\"insert into replicated table with select rep table with winfunc\" can not be shipped"); securec_check_ss_c(sprintf_rc, "\0", "\0"); - cxt->current_shippable = false; } } @@ -589,66 +599,86 @@ static bool contains_unsupport_tables(List* rtable, Query* query, shipping_conte RangeTblEntry* rte = (RangeTblEntry*)lfirst(item); rIdx++; - if (rte->rtekind == RTE_RELATION) { - if (table_contain_unsupport_feature(rte->relid, query) && !u_sess->attr.attr_sql.enable_cluster_resize) { - context->current_shippable = false; - return true; + switch (rte->rtekind) { + case RTE_RELATION: { + if (table_contain_unsupport_feature(rte->relid, query) && + !u_sess->attr.attr_sql.enable_cluster_resize) { + context->current_shippable = false; + return true; + } + + rte->locator_type = GetLocatorType(rte->relid); + /* SQLONHADOOP has to support RROBIN MODULO distribution mode */ + if (((rte->locator_type == LOCATOR_TYPE_RROBIN || rte->locator_type == LOCATOR_TYPE_MODULO) && + rte->relkind != RELKIND_FOREIGN_TABLE && rte->relkind != RELKIND_STREAM)) { + sprintf_rc = sprintf_s(u_sess->opt_cxt.not_shipping_info->not_shipping_reason, + NOTPLANSHIPPING_LENGTH, + "Table %s can not be shipped", + get_rel_name(rte->relid)); + securec_check_ss_c(sprintf_rc, "\0", "\0"); + context->current_shippable = false; + return true; + } + if (rte->inh && has_subclass(rte->relid)) { + sprintf_rc = sprintf_s(u_sess->opt_cxt.not_shipping_info->not_shipping_reason, + NOTPLANSHIPPING_LENGTH, + "Table %s inherited can not be shipped", + get_rel_name(rte->relid)); + securec_check_ss_c(sprintf_rc, "\0", "\0"); + context->current_shippable = false; + return true; + } + + if (query->commandType == CMD_INSERT && list_length(rtable) == 2 && rIdx == 1) + target_table_loctype = rte->locator_type; + + break; } + case RTE_SUBQUERY: { + /* + * We allow to push the nextval and uuid_generate_v1 to DN for the following query: + * insert into t1 select nextval('seq1'),* from t2; + * insert into t1 select uuid_generate_v1, * from t2; + * It fullfill the following conditions: + * 1. Top level query is Insert. + * 2. There are two RTE in rtable, the first one is the target table, + * which should be hash/range/list distributed. + * The second one is a subquery + * We allow the the nextval and uuid_generate_v1 in the target list of the subquery. + */ + bool supportLoctype = (target_table_loctype == LOCATOR_TYPE_HASH || + IsLocatorDistributedBySlice(target_table_loctype) || + target_table_loctype == LOCATOR_TYPE_NONE); + if (query->commandType == CMD_INSERT && list_length(rtable) == 2 && rIdx == 2 && + supportLoctype) { + scontext.allow_func_in_targetlist = true; + } - rte->locator_type = GetLocatorType(rte->relid); - /* SQLONHADOOP has to support RROBIN MODULO distribution mode */ - if (((rte->locator_type == LOCATOR_TYPE_RROBIN || rte->locator_type == LOCATOR_TYPE_MODULO) && - rte->relkind != RELKIND_FOREIGN_TABLE && rte->relkind != RELKIND_STREAM)) { - sprintf_rc = sprintf_s(u_sess->opt_cxt.not_shipping_info->not_shipping_reason, - NOTPLANSHIPPING_LENGTH, - "Table %s can not be shipped", - get_rel_name(rte->relid)); - securec_check_ss_c(sprintf_rc, "\0", "\0"); - context->current_shippable = false; - return true; + (void)stream_walker((Node*)rte->subquery, (void*)(&scontext)); + + inh_shipping_context(context, &scontext); + + scontext.allow_func_in_targetlist = false; + + break; } - if (rte->inh && has_subclass(rte->relid)) { - sprintf_rc = sprintf_s(u_sess->opt_cxt.not_shipping_info->not_shipping_reason, - NOTPLANSHIPPING_LENGTH, - "Table %s inherited can not be shipped", - get_rel_name(rte->relid)); - securec_check_ss_c(sprintf_rc, "\0", "\0"); - context->current_shippable = false; - return true; + case RTE_FUNCTION: { + (void)stream_walker((Node*)rte->funcexpr, (void*)(&scontext)); + + inh_shipping_context(context, &scontext); + + break; } + case RTE_VALUES: { + (void)stream_walker((Node*)rte->values_lists, (void*)(&scontext)); - if (query->commandType == CMD_INSERT && list_length(rtable) == 2 && rIdx == 1) - target_table_loctype = rte->locator_type; - } else if (rte->rtekind == RTE_SUBQUERY) { - /* - * We allow to push the nextval and uuid_generate_v1 to DN for the following query: - * insert into t1 select nextval('seq1'),* from t2; - * insert into t1 select uuid_generate_v1, * from t2; - * It fullfill the following conditions: - * 1. Top level query is Insert. - * 2. There are two RTE in rtable, the first one is the target table, - * which should be hash/range/list distributed. - * The second one is a subquery - * We allow the the nextval and uuid_generate_v1 in the target list of the subquery. - */ - if (query->commandType == CMD_INSERT && list_length(rtable) == 2 && rIdx == 2 && - (target_table_loctype == LOCATOR_TYPE_HASH || IsLocatorDistributedBySlice(target_table_loctype))) { - scontext.allow_func_in_targetlist = true; + inh_shipping_context(context, &scontext); + + break; + } + default: { + break; } - - (void)stream_walker((Node*)rte->subquery, (void*)(&scontext)); - - inh_shipping_context(context, &scontext); - - scontext.allow_func_in_targetlist = false; - } else if (rte->rtekind == RTE_FUNCTION) { - (void)stream_walker((Node*)rte->funcexpr, (void*)(&scontext)); - - inh_shipping_context(context, &scontext); - } else if (rte->rtekind == RTE_VALUES) { - (void)stream_walker((Node*)rte->values_lists, (void*)(&scontext)); - - inh_shipping_context(context, &scontext); } } @@ -680,7 +710,8 @@ static bool rel_contain_unshippable_feature(RangeTblEntry* rte, shipping_context * if 1. the target table is hash/range/list distributed table * 2. the nextval and uuid_generate_v1 function existed in the target list of the result table */ - if (rte->locator_type == LOCATOR_TYPE_HASH || IsLocatorDistributedBySlice(rte->locator_type)) { + if (rte->locator_type == LOCATOR_TYPE_HASH || IsLocatorDistributedBySlice(rte->locator_type) || + rte->locator_type == LOCATOR_TYPE_NONE) { context->allow_func_in_targetlist = true; } } diff --git a/src/gausskernel/optimizer/plan/subselect.cpp b/src/gausskernel/optimizer/plan/subselect.cpp index c19712802..f96e938fe 100644 --- a/src/gausskernel/optimizer/plan/subselect.cpp +++ b/src/gausskernel/optimizer/plan/subselect.cpp @@ -3081,6 +3081,7 @@ static Bitmapset* finalize_plan(PlannerInfo* root, Plan* plan, Bitmapset* valid_ scan_params = bms_add_member(bms_copy(scan_params), locally_added_param); (void)finalize_primnode((Node*)mtplan->returningLists, &context); (void)finalize_primnode((Node*)mtplan->updateTlist, &context); + (void)finalize_primnode((Node*)mtplan->upsertWhere, &context); finalize_plans(root, &context, mtplan->plans, valid_params, scan_params); } break; #ifdef PGXC diff --git a/src/gausskernel/optimizer/prep/prepjointree.cpp b/src/gausskernel/optimizer/prep/prepjointree.cpp index 85e93c5a2..01993edef 100755 --- a/src/gausskernel/optimizer/prep/prepjointree.cpp +++ b/src/gausskernel/optimizer/prep/prepjointree.cpp @@ -1297,6 +1297,11 @@ static Node* pull_up_simple_subquery(PlannerInfo* root, Node* jtnode, RangeTblEn } } + foreach(lc, subquery->rtable) { + RangeTblEntry *rte = (RangeTblEntry *)lfirst(lc); + rte->pulled_from_subquery = true; + } + /* * Now append the adjusted rtable entries to upper query. (We hold off * until after fixing the upper rtable entries; no point in running that diff --git a/src/gausskernel/optimizer/prep/preprownum.cpp b/src/gausskernel/optimizer/prep/preprownum.cpp index 85de5bbd5..1af3bc7e8 100755 --- a/src/gausskernel/optimizer/prep/preprownum.cpp +++ b/src/gausskernel/optimizer/prep/preprownum.cpp @@ -16,11 +16,11 @@ * preprownum.cpp * Planner preprocessing for ROWNUM * The main function is to rewrite ROWNUM to LIMIT in parse tree if possible. - * For example, + * For example, * {select * from table_name where rownum < 5;} * can be rewrited to * {select * from table_name limit 4;} - * + * * IDENTIFICATION * src/gausskernel/optimizer/prep/preprownum.cpp * @@ -29,17 +29,19 @@ #include "optimizer/prep.h" #include "nodes/makefuncs.h" +#include "utils/int8.h" #ifndef ENABLE_MULTIPLE_NODES static Node* preprocess_rownum_opexpr(PlannerInfo* root, Query* parse, OpExpr* expr, bool isOrExpr); -static Node* process_rownum_boolexpr(PlannerInfo *root, Query* parse, BoolExpr* quals); +static Node* process_rownum_boolexpr(PlannerInfo* root, Query* parse, BoolExpr* quals); static Node* process_rownum_lt(Query *parse, OpExpr* qual, bool isOrExpr); static Node* process_rownum_le(Query* parse, OpExpr* qual, bool isOrExpr); static Node* process_rownum_eq(Query* parse, OpExpr* qual, bool isOrExpr); static Node* process_rownum_gt(Query* parse, OpExpr* qual, bool isOrExpr); static Node* process_rownum_ge(Query* parse, OpExpr* qual, bool isOrExpr); static Node* process_rownum_ne(Query* parse, OpExpr* qual, bool isOrExpr); -static int64 extract_rownum_limit(OpExpr *expr); +static bool try_extract_rownum_limit(OpExpr* expr, int64* retValue); +static bool try_extract_numeric_rownum(Oid type, Datum value, int64* retValue); /* * preprocess_rownum @@ -62,7 +64,7 @@ void preprocess_rownum(PlannerInfo *root, Query *parse) return; } } - + quals = (Node*)canonicalize_qual((Expr*)quals, false); switch (nodeTag(quals)) { case T_OpExpr: { @@ -154,8 +156,9 @@ static bool is_optimizable_rownum_opexpr(PlannerInfo* root, OpExpr* expr) /* now, only constant integer types are supported to rewrite */ Oid consttype = ((Const*)rightArg)->consttype; - if (consttype == INT8OID || consttype == INT4OID || - consttype == INT2OID || consttype == INT1OID) { + if (consttype == INT8OID || consttype == INT4OID || + consttype == INT2OID || consttype == INT1OID || + consttype == NUMERICOID) { return true; } @@ -173,12 +176,14 @@ static Node* preprocess_rownum_opexpr(PlannerInfo* root, Query* parse, OpExpr* e case INT8LTOID: case INT84LTOID: case INT82LTOID: + case NUMERICLTOID: /* operator '<' */ return process_rownum_lt(parse, expr, isOrExpr); case INT8LEOID: case INT84LEOID: case INT82LEOID: + case NUMERICLEOID: /* operator '<=' */ return process_rownum_le(parse, expr, isOrExpr); @@ -191,12 +196,14 @@ static Node* preprocess_rownum_opexpr(PlannerInfo* root, Query* parse, OpExpr* e case INT8GTOID: case INT84GTOID: case INT82GTOID: + case NUMERICGTOID: /* operator '>' */ return process_rownum_gt(parse, expr, isOrExpr); case INT8GEOID: case INT84GEOID: case INT82GEOID: + case NUMERICGEOID: /* operator '>=' */ return process_rownum_ge(parse, expr, isOrExpr); @@ -211,43 +218,99 @@ static Node* preprocess_rownum_opexpr(PlannerInfo* root, Query* parse, OpExpr* e } } -/* extract const value from OpExpr like {rownum op Const} */ -static int64 extract_rownum_limit(OpExpr *expr) +/* + * adapt process rownum to limit with different ops when rownum is numeric + * Note: if someone change the behavior of process_rownum_ops, please adapt + * this function, too! + */ +static bool try_extract_numeric_rownum(Oid type, Datum value, int64* retValue) { + Numeric ceilValue = DatumGetNumeric(DirectFunctionCall1(numeric_ceil, value)); + Numeric floorValue = DatumGetNumeric(DirectFunctionCall1(numeric_floor, value)); + NumericVar x; + + /* + * '<' takes ceilValue and '>' takes floorValue because we need to keep + * consistent to the process_rownum_ops functions. + */ + switch (type) { + case NUMERICLTOID: + case NUMERICGEOID: + { + uint16 numFlags = NUMERIC_NB_FLAGBITS(ceilValue); + if (NUMERIC_FLAG_IS_NANORBI(numFlags) && !NUMERIC_FLAG_IS_BI(numFlags)) { + return false; + } + init_var_from_num(ceilValue, &x); + break; + } + case NUMERICLEOID: + case NUMERICGTOID: + { + uint16 numFlags = NUMERIC_NB_FLAGBITS(floorValue); + if (NUMERIC_FLAG_IS_NANORBI(numFlags) && !NUMERIC_FLAG_IS_BI(numFlags)) { + return false; + } + init_var_from_num(floorValue, &x); + break; + } + default: + ereport(ERROR, ((errmodule(MOD_OPT), + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unsupported data type %u for ROWNUM limit", NUMERICOID)))); + } + + return numericvar_to_int64(&x, retValue); +} + +/* extract const value from OpExpr like {rownum op Const} */ +static bool try_extract_rownum_limit(OpExpr *expr, int64* retValue) +{ + bool canExtract = true; + Const* con = (Const *)llast(expr->args); Oid type = con->consttype; Datum value = con->constvalue; - if (type == INT8OID) { - return DatumGetInt64(value); + if (type == NUMERICOID) { + /* + * convert numeric to int64 + * if value is larger than the range of int64, rownum will not be converted to limit. + */ + canExtract = try_extract_numeric_rownum(expr->opno, value, retValue); + } else if (type == INT8OID) { + *retValue = DatumGetInt64(value); } else if (type == INT4OID) { - return (int64)DatumGetInt32(value); + *retValue = (int64)DatumGetInt32(value); } else if (type == INT2OID) { - return (int64)DatumGetInt16(value); + *retValue = (int64)DatumGetInt16(value); } else if (type == INT1OID) { - return (int64)DatumGetInt8(value); + *retValue = (int64)DatumGetInt8(value); } else { ereport(ERROR, ((errmodule(MOD_OPT), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported data type %u for ROWNUM limit", type)))); - return -1; } + + return canExtract; } /* process operator '<' in rownum expr like {rownum < 5} * if the OpExpr is rewritten, the original OpExpr can be * substituted by a bool constant expr. */ -static Node* process_rownum_lt(Query *parse, OpExpr *rnxpr, bool isOrExpr) +static Node* process_rownum_lt(Query *parse, OpExpr *qual, bool isOrExpr) { - int64 limitValue = extract_rownum_limit(rnxpr); + int64 limitValue = -1; + if (!try_extract_rownum_limit(qual, &limitValue)) + return (Node*)qual; /* ROWNUM OpExpr in OrExpr */ if (isOrExpr) { if (limitValue <= 1) { return makeBoolConst(false, false); } - return (Node*)rnxpr; + return (Node*)qual; } /* ROWNUM OpExpr in AndExpr */ @@ -265,13 +328,14 @@ static Node* process_rownum_lt(Query *parse, OpExpr *rnxpr, bool isOrExpr) * substituted by a bool constant expr. */ static Node* process_rownum_le(Query* parse, OpExpr* qual, bool isOrExpr) { - int64 limitValue = extract_rownum_limit(qual); + int64 limitValue = -1; + if (!try_extract_rownum_limit(qual, &limitValue)) + return (Node*)qual; /* ROWNUM OpExpr in OrExpr */ if (isOrExpr) { - if (limitValue < 1) { + if (limitValue < 1) return makeBoolConst(false, false); - } return (Node*)qual; } @@ -288,13 +352,14 @@ static Node* process_rownum_le(Query* parse, OpExpr* qual, bool isOrExpr) /* process operator '=' in rownum expr like {rownum = 5} */ static Node* process_rownum_eq(Query* parse, OpExpr* qual, bool isOrExpr) { - int64 limitValue = extract_rownum_limit(qual); + int64 limitValue = -1; + if (!try_extract_rownum_limit(qual, &limitValue)) + return (Node*)qual; /* ROWNUM OpExpr in OrExpr */ if (isOrExpr) { - if (limitValue < 1) { + if (limitValue < 1) return makeBoolConst(false, false); - } return (Node*)qual; } @@ -311,18 +376,18 @@ static Node* process_rownum_eq(Query* parse, OpExpr* qual, bool isOrExpr) /* process operator '>' in rownum expr like {rcoerceownum > 5} */ static Node* process_rownum_gt(Query* parse, OpExpr* qual, bool isOrExpr) { - int64 limitValue = extract_rownum_limit(qual); + int64 limitValue = -1; + if (!try_extract_rownum_limit(qual, &limitValue)) + return (Node*)qual; - if (limitValue < 1) { + if (limitValue < 1) return makeBoolConst(true, false); - } /* ROWNUM OpExpr in OrExpr */ - if (isOrExpr) { + if (isOrExpr) return (Node*)qual; - } - /* ROWNUM OpExpr in AndExpr, + /* ROWNUM OpExpr in AndExpr, * here limitValue >= 1, so ROWNUM > limitValue is always false. */ rewrite_rownum_to_limit(parse, 0); return makeBoolConst(false, false); @@ -333,16 +398,16 @@ static Node* process_rownum_gt(Query* parse, OpExpr* qual, bool isOrExpr) * substituted by a bool constant expr. */ static Node* process_rownum_ge(Query* parse, OpExpr* qual, bool isOrExpr) { - int64 limitValue = extract_rownum_limit(qual); + int64 limitValue = -1; + if (!try_extract_rownum_limit(qual, &limitValue)) + return (Node*)qual; - if (limitValue <= 1) { + if (limitValue <= 1) return makeBoolConst(true, false); - } /* ROWNUM OpExpr in OrExpr */ - if (isOrExpr) { + if (isOrExpr) return (Node*)qual; - } /* ROWNUM OpExpr in AndExpr */ rewrite_rownum_to_limit(parse, 0); @@ -354,16 +419,16 @@ static Node* process_rownum_ge(Query* parse, OpExpr* qual, bool isOrExpr) */ static Node* process_rownum_ne(Query* parse, OpExpr* qual, bool isOrExpr) { - int64 limitValue = extract_rownum_limit(qual); + int64 limitValue = -1; + if (!try_extract_rownum_limit(qual, &limitValue)) + return (Node*)qual; - if (limitValue < 1) { + if (limitValue < 1) return makeBoolConst(true, false); - } /* ROWNUM OpExpr in OrExpr */ - if (isOrExpr) { + if (isOrExpr) return (Node*)qual; - } /* ROWNUM OpExpr in AndExpr */ if (limitValue == 1) { diff --git a/src/gausskernel/optimizer/prep/prepunion.cpp b/src/gausskernel/optimizer/prep/prepunion.cpp index 6322bdf19..77028b0f8 100644 --- a/src/gausskernel/optimizer/prep/prepunion.cpp +++ b/src/gausskernel/optimizer/prep/prepunion.cpp @@ -1603,6 +1603,7 @@ static void expand_inherited_rtentry(PlannerInfo* root, RangeTblEntry* rte, Inde newrc->rowmarkId = oldrc->rowmarkId; newrc->markType = oldrc->markType; newrc->noWait = oldrc->noWait; + newrc->waitSec = oldrc->waitSec; newrc->isParent = false; root->rowMarks = lappend(root->rowMarks, newrc); @@ -2367,6 +2368,7 @@ void expand_internal_rtentry(PlannerInfo* root, RangeTblEntry* rte, Index rti) newrc->rowmarkId = oldrc->rowmarkId; newrc->markType = oldrc->markType; newrc->noWait = oldrc->noWait; + newrc->waitSec = oldrc->waitSec; newrc->isParent = false; root->rowMarks = lappend(root->rowMarks, newrc); diff --git a/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp b/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp index ee77557e4..c5cabcf01 100644 --- a/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp +++ b/src/gausskernel/optimizer/rewrite/rewriteHandler.cpp @@ -74,7 +74,8 @@ static TargetEntry* process_matched_tle(TargetEntry* src_tle, TargetEntry* prior static Node* get_assignment_input(Node* node); static void rewriteValuesRTE(RangeTblEntry* rte, Relation target_relation, List* attrnos); static void rewriteTargetListUD(Query* parsetree, RangeTblEntry* target_rte, Relation target_relation); -static void markQueryForLocking(Query* qry, Node* jtnode, LockClauseStrength strength, bool noWait, bool pushedDown); +static void markQueryForLocking(Query* qry, Node* jtnode, LockClauseStrength strength, bool noWait, bool pushedDown, + int waitSec); static List* matchLocks(CmdType event, RuleLock* rulelocks, int varno, Query* parsetree); static Query* fireRIRrules(Query* parsetree, List* activeRIRs, bool forUpdatePushedDown); @@ -98,8 +99,8 @@ static bool pull_qual_vars_walker(Node* node, pull_qual_vars_context* context); * to the current subquery, requiring all rels to be opened with RowShareLock. * This should always be false at the start of the recursion. * - * A secondary purpose of this routine is to fix up JOIN RTE references to - * dropped columns (see details below). Because the RTEs are modified in + * Caution: A secondary purpose of this routine is to fix up JOIN RTE references + * to dropped columns (see details below). Because the RTEs are modified in * place, it is generally appropriate for the caller of this routine to have * first done a copyObject() to make a writable copy of the querytree in the * current memory context. @@ -1570,8 +1571,26 @@ static Query* ApplyRetrieveRule(Query* parsetree, RewriteRule* rule, int rt_inde AcquireRewriteLocks(rule_action, forUpdatePushedDown); + /* + * If FOR [KEY] UPDATE/SHARE of view, mark all the contained tables as implicit + * FOR [KEY] UPDATE/SHARE, the same as the parser would have done if the view's + * subquery had been written out explicitly. + * + * Note: we don't consider forUpdatePushedDown here; such marks will be + * made by recursing from the upper level in markQueryForLocking. + */ + if (rc != NULL) + markQueryForLocking(rule_action, (Node*)rule_action->jointree, rc->strength, rc->noWait, true, + rc->waitSec); + /* * Recursively expand any view references inside the view. + * + * Note: this must happen after markQueryForLocking. That way, any UPDATE + * permission bits needed for sub-views are initially applied to their + * RTE_RELATION RTEs by markQueryForLocking, and then transferred to their + * OLD rangetable entries by the action below (in a recursive call of this + * routine). */ rule_action = fireRIRrules(rule_action, activeRIRs, forUpdatePushedDown); @@ -1607,17 +1626,6 @@ static Query* ApplyRetrieveRule(Query* parsetree, RewriteRule* rule, int rt_inde rte->updatedCols = NULL; rte->extraUpdatedCols = NULL; - /* - * If FOR [KEY] UPDATE/SHARE of view, mark all the contained tables as implicit - * FOR [KEY] UPDATE/SHARE, the same as the parser would have done if the view's - * subquery had been written out explicitly. - * - * Note: we don't consider forUpdatePushedDown here; such marks will be - * made by recursing from the upper level in markQueryForLocking. - */ - if (rc != NULL) - markQueryForLocking(rule_action, (Node*)rule_action->jointree, rc->strength, rc->noWait, true); - return parsetree; } @@ -1632,7 +1640,8 @@ static Query* ApplyRetrieveRule(Query* parsetree, RewriteRule* rule, int rt_inde * OLD and NEW rels for updating. The best way to handle that seems to be * to scan the jointree to determine which rels are used. */ -static void markQueryForLocking(Query* qry, Node* jtnode, LockClauseStrength strength, bool noWait, bool pushedDown) +static void markQueryForLocking(Query* qry, Node* jtnode, LockClauseStrength strength, bool noWait, bool pushedDown, + int waitSec) { if (jtnode == NULL) return; @@ -1641,12 +1650,13 @@ static void markQueryForLocking(Query* qry, Node* jtnode, LockClauseStrength str RangeTblEntry* rte = rt_fetch(rti, qry->rtable); if (rte->rtekind == RTE_RELATION) { - applyLockingClause(qry, rti, strength, noWait, pushedDown); + applyLockingClause(qry, rti, strength, noWait, pushedDown, waitSec); rte->requiredPerms |= ACL_SELECT_FOR_UPDATE; } else if (rte->rtekind == RTE_SUBQUERY) { - applyLockingClause(qry, rti, strength, noWait, pushedDown); - /* FOR [KYE] UPDATE/SHARE of subquery is propagated to subquery's rels */ - markQueryForLocking(rte->subquery, (Node*)rte->subquery->jointree, strength, noWait, true); + applyLockingClause(qry, rti, strength, noWait, pushedDown, waitSec); + /* FOR UPDATE/SHARE of subquery is propagated to subquery's rels */ + markQueryForLocking(rte->subquery, (Node*)rte->subquery->jointree, strength, noWait, true, + waitSec); } /* other RTE types are unaffected by FOR UPDATE */ } else if (IsA(jtnode, FromExpr)) { @@ -1654,12 +1664,12 @@ static void markQueryForLocking(Query* qry, Node* jtnode, LockClauseStrength str ListCell* l = NULL; foreach (l, f->fromlist) - markQueryForLocking(qry, (Node*)lfirst(l), strength, noWait, pushedDown); + markQueryForLocking(qry, (Node*)lfirst(l), strength, noWait, pushedDown, waitSec); } else if (IsA(jtnode, JoinExpr)) { JoinExpr* j = (JoinExpr*)jtnode; - markQueryForLocking(qry, j->larg, strength, noWait, pushedDown); - markQueryForLocking(qry, j->rarg, strength, noWait, pushedDown); + markQueryForLocking(qry, j->larg, strength, noWait, pushedDown, waitSec); + markQueryForLocking(qry, j->rarg, strength, noWait, pushedDown, waitSec); } else ereport(ERROR, (errmodule(MOD_OPT_REWRITE), @@ -3007,7 +3017,7 @@ char* GetInsertIntoStmt(CreateTableAsStmt* stmt) */ RangeVar *relation = stmt->into->rel; if (relation->schemaname == NULL && relation->relpersistence != RELPERSISTENCE_TEMP) { - Oid namespaceid = RangeVarGetAndCheckCreationNamespace(relation, NoLock, NULL); + Oid namespaceid = RangeVarGetAndCheckCreationNamespace(relation, NoLock, NULL, RELKIND_RELATION); relation->schemaname = get_namespace_name(namespaceid); } @@ -3071,7 +3081,7 @@ List *QueryRewriteRefresh(Query *parse_tree) RangeVar *relation = stmt->relation; if (relation->schemaname == NULL && relation->relpersistence != RELPERSISTENCE_TEMP) { - Oid namespaceid = RangeVarGetAndCheckCreationNamespace(relation, NoLock, NULL); + Oid namespaceid = RangeVarGetAndCheckCreationNamespace(relation, NoLock, NULL, RELKIND_MATVIEW); relation->schemaname = get_namespace_name(namespaceid); } @@ -3134,8 +3144,10 @@ List *QueryRewriteRefresh(Query *parse_tree) dataQuery = (Query *) linitial(actions); - deparse_query(dataQuery, cquery, NIL, false, false, NULL); + Query* parsetree = (Query*)copyObject(dataQuery); + deparse_query(parsetree, cquery, NIL, false, false, NULL); char* selectstr = pstrdup(cquery->data); + pfree_ext(parsetree); initStringInfo(cquery); appendStringInfo(cquery, "INSERT "); diff --git a/src/gausskernel/optimizer/sqladvisor/sqladvisor.cpp b/src/gausskernel/optimizer/sqladvisor/sqladvisor.cpp index d9cf20fe9..f88171d8f 100644 --- a/src/gausskernel/optimizer/sqladvisor/sqladvisor.cpp +++ b/src/gausskernel/optimizer/sqladvisor/sqladvisor.cpp @@ -421,7 +421,7 @@ Datum analyze_query(PG_FUNCTION_ARGS) Cost cost = 0.0; analyzeQuery(adviseQuery, true, &cost); - + pfree_ext(queryString); PG_RETURN_BOOL(true); } diff --git a/src/gausskernel/optimizer/sqladvisor/sqladvisor_online.cpp b/src/gausskernel/optimizer/sqladvisor/sqladvisor_online.cpp index 53a380fb0..22c705791 100644 --- a/src/gausskernel/optimizer/sqladvisor/sqladvisor_online.cpp +++ b/src/gausskernel/optimizer/sqladvisor/sqladvisor_online.cpp @@ -179,9 +179,14 @@ ParamListInfo copyDynParam(ParamListInfo srcParamLI) get_typlenbyval(srcPrm->ptype, &typLen, &typByVal); destPrm->value = datumCopy(srcPrm->value, typByVal, typLen); } - destPrm->tableOfIndexType = srcPrm->tableOfIndexType; - destPrm->tableOfIndex = copyTableOfIndex(srcPrm->tableOfIndex); - destPrm->isnestedtable = srcPrm->isnestedtable; + destPrm->tabInfo = NULL; + if (srcPrm->tabInfo != NULL) { + destPrm->tabInfo = (TableOfInfo*)palloc0(sizeof(TableOfInfo)); + destPrm->tabInfo->tableOfIndexType = srcPrm->tabInfo->tableOfIndexType; + destPrm->tabInfo->tableOfIndex = copyTableOfIndex(srcPrm->tabInfo->tableOfIndex); + destPrm->tabInfo->isnestedtable = srcPrm->tabInfo->isnestedtable; + destPrm->tabInfo->tableOfLayers = srcPrm->tabInfo->tableOfLayers; + } CopyCursorInfoData(&destPrm->cursor_data, &srcPrm->cursor_data); } @@ -270,7 +275,7 @@ static PLpgSQL_expr* copyPLpgsqlExpr(PLpgSQL_expr* srcExpr) if (srcExpr == NULL) { return NULL; } - PLpgSQL_expr* destExpr = (PLpgSQL_expr*)palloc(sizeof(PLpgSQL_expr)); + PLpgSQL_expr* destExpr = (PLpgSQL_expr*)palloc0(sizeof(PLpgSQL_expr)); destExpr->dtype = srcExpr->dtype; destExpr->dno = srcExpr->dno; @@ -279,6 +284,7 @@ static PLpgSQL_expr* copyPLpgsqlExpr(PLpgSQL_expr* srcExpr) destExpr->plan = NULL; destExpr->func = copyPLpgsqlFunc(srcExpr->func); destExpr->paramnos = bms_copy(srcExpr->paramnos); + destExpr->out_param_dno = srcExpr->out_param_dno; destExpr->ns = copyPLpgNsitem(srcExpr->ns); destExpr->expr_simple_expr = NULL; @@ -289,6 +295,10 @@ static PLpgSQL_expr* copyPLpgsqlExpr(PLpgSQL_expr* srcExpr) destExpr->expr_simple_in_use = false; destExpr->expr_simple_lxid = 0; destExpr->isouttype = srcExpr->isouttype; + destExpr->is_have_tableof_index_var = srcExpr->is_have_tableof_index_var; + destExpr->tableof_var_dno = srcExpr->tableof_var_dno; + destExpr->is_have_tableof_index_func = srcExpr->is_have_tableof_index_func; + destExpr->tableof_func_dno = srcExpr->tableof_func_dno; return destExpr; } @@ -381,6 +391,7 @@ static PLpgSQL_function* copyPLpgsqlFunc(PLpgSQL_function* srcFunc) destFunc->use_count = srcFunc->use_count; destFunc->pre_parse_trig = srcFunc->pre_parse_trig; destFunc->tg_relation = NULL; + destFunc->is_plpgsql_func_with_outparam = srcFunc->is_plpgsql_func_with_outparam; return destFunc; } @@ -430,14 +441,18 @@ static PLpgSQL_execstate* copyPLpgEstate(PLpgSQL_execstate* srcEstate) PLpgSQL_execstate* destEstate = (PLpgSQL_execstate*)palloc(sizeof(PLpgSQL_execstate)); destEstate->func = NULL; - destEstate->retval = (Datum)0;; + destEstate->retval = (Datum)0; destEstate->retisnull = srcEstate->retisnull; destEstate->rettype = srcEstate->rettype; + destEstate->paramval = (Datum)0; + destEstate->paramisnull = srcEstate->paramisnull; + destEstate->paramtype = srcEstate->paramtype; destEstate->fn_rettype = srcEstate->fn_rettype; destEstate->retistuple = srcEstate->retistuple; destEstate->retisset = srcEstate->retisset; destEstate->readonly_func = srcEstate->readonly_func; destEstate->rettupdesc = NULL; + destEstate->paramtupdesc = NULL; destEstate->exitlabel = NULL; destEstate->cur_error = NULL; destEstate->tuple_store = NULL; @@ -472,6 +487,7 @@ static PLpgSQL_execstate* copyPLpgEstate(PLpgSQL_execstate* srcEstate) destEstate->cursor_return_data = NULL; destEstate->stack_entry_start = srcEstate->stack_entry_start; destEstate->curr_nested_table_type = 0; + destEstate->is_exception = false; return destEstate; } diff --git a/src/gausskernel/optimizer/util/bucketinfo.cpp b/src/gausskernel/optimizer/util/bucketinfo.cpp index 321b2edcc..e0f37ef43 100644 --- a/src/gausskernel/optimizer/util/bucketinfo.cpp +++ b/src/gausskernel/optimizer/util/bucketinfo.cpp @@ -44,7 +44,8 @@ bool hasValidBuckets(RangeVar* r, int bucketmapsize) foreach (lc, r->buckets) { uint2 v = (uint2)intVal(lfirst(lc)); if (v >= bucketmapsize) { - ereport(ERROR, + int eleval = (bucketmapsize == 0 ? PANIC : ERROR); + ereport(eleval, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("buckets id %d of table \"%s\" is outsize range [%d,%d]", v, diff --git a/src/gausskernel/optimizer/util/clauses.cpp b/src/gausskernel/optimizer/util/clauses.cpp index 77acedd84..a9a155d74 100644 --- a/src/gausskernel/optimizer/util/clauses.cpp +++ b/src/gausskernel/optimizer/util/clauses.cpp @@ -2178,6 +2178,109 @@ static bool rowtype_field_matches( return true; } +static Node *GetNullExprFromRowExpr(RowExpr *rarg, NullTest *ntest) +{ + List *newargs = NIL; + ListCell *l = NULL; + NullTest* newntest = NULL; + + foreach (l, rarg->args) { + Node *relem = (Node *)lfirst(l); + + /* + * A constant field refutes the whole NullTest if it's + * of the wrong nullness; else we can discard it. + */ + if (relem && IsA(relem, Const)) { + Const *carg = (Const *)relem; + + if (carg->constisnull ? (ntest->nulltesttype == IS_NOT_NULL) : (ntest->nulltesttype == IS_NULL)) { + return makeBoolConst(false, false); + } + continue; + } + /* + * Else, make a scalar (argisrow == false) NullTest + * for this field. Scalar semantics are required + * because IS [NOT] NULL doesn't recurse; see comments + * in ExecEvalNullTest(). + */ + newntest = makeNode(NullTest); + newntest->arg = (Expr *)relem; + newntest->nulltesttype = ntest->nulltesttype; + newntest->argisrow = false; + newargs = lappend(newargs, newntest); + } + /* If all the inputs were constants, result is TRUE */ + if (newargs == NIL) { + return makeBoolConst(true, false); + } + /* If only one nonconst input, it's the result */ + if (list_length(newargs) == 1) { + return (Node *)linitial(newargs); + } + /* Else we need an AND node */ + return (Node *)make_andclause(newargs); +} + +static Node *GetNullExprFromRowExprForAFormat(RowExpr *rarg, NullTest *ntest) +{ + List *newargs = NIL; + ListCell *l = NULL; + NullTest* newntest = NULL; + + foreach (l, rarg->args) { + Node *relem = (Node *)lfirst(l); + + /* + * A constant field refutes the whole NullTest if it's + * of the wrong nullness; else we can discard it. + */ + if (relem && IsA(relem, Const)) { + Const *carg = (Const *)relem; + + if (!(carg->constisnull)) { + if (ntest->nulltesttype == IS_NOT_NULL) { + return makeBoolConst(true, false); + } + if (ntest->nulltesttype == IS_NULL) { + return makeBoolConst(false, false); + } + } + continue; + } + /* + * Else, make a scalar (argisrow == false) NullTest + * for this field. Scalar semantics are required + * because IS [NOT] NULL doesn't recurse; see comments + * in ExecEvalNullTest(). + */ + newntest = makeNode(NullTest); + newntest->arg = (Expr *)relem; + newntest->nulltesttype = IS_NULL; + newntest->argisrow = false; + newargs = lappend(newargs, newntest); + } + /* If all the inputs were constants, result is TRUE */ + if (newargs == NIL) { + if (ntest->nulltesttype == IS_NOT_NULL) { + return makeBoolConst(false, false); + } + if (ntest->nulltesttype == IS_NULL) { + return makeBoolConst(true, false); + } + } + /* If only one nonconst input, it's the result */ + if (list_length(newargs) == 1) + return (Node *)linitial(newargs); + if (ntest->nulltesttype == IS_NULL) { + /* Else we need an AND node */ + return (Node *)make_andclause(newargs); + } else { + return (Node *)make_notclause((Expr *)make_andclause(newargs)); + } +} + /* -------------------- * eval_const_expressions * @@ -3081,44 +3184,11 @@ Node* eval_const_expressions_mutator(Node* node, eval_const_expressions_context* * efficient to evaluate, as well as being more amenable * to optimization. */ - RowExpr* rarg = (RowExpr*)arg; - List* newargs = NIL; - ListCell* l = NULL; - - foreach (l, rarg->args) { - Node* relem = (Node*)lfirst(l); - - /* - * A constant field refutes the whole NullTest if it's - * of the wrong nullness; else we can discard it. - */ - if (relem && IsA(relem, Const)) { - Const* carg = (Const*)relem; - - if (carg->constisnull ? (ntest->nulltesttype == IS_NOT_NULL) : (ntest->nulltesttype == IS_NULL)) - return makeBoolConst(false, false); - continue; - } - /* - * Else, make a scalar (argisrow == false) NullTest - * for this field. Scalar semantics are required - * because IS [NOT] NULL doesn't recurse; see comments - * in ExecEvalNullTest(). - */ - newntest = makeNode(NullTest); - newntest->arg = (Expr*)relem; - newntest->nulltesttype = ntest->nulltesttype; - newntest->argisrow = false; - newargs = lappend(newargs, newntest); + if (AFORMAT_NULL_TEST_MODE) { + return GetNullExprFromRowExprForAFormat((RowExpr *)arg, ntest); + } else { + return GetNullExprFromRowExpr((RowExpr *)arg, ntest); } - /* If all the inputs were constants, result is TRUE */ - if (newargs == NIL) - return makeBoolConst(true, false); - /* If only one nonconst input, it's the result */ - if (list_length(newargs) == 1) - return (Node*)linitial(newargs); - /* Else we need an AND node */ - return (Node*)make_andclause(newargs); } if (!ntest->argisrow && arg && IsA(arg, Const)) { Const* carg = (Const*)arg; @@ -3948,6 +4018,14 @@ static void recheck_cast_function_args(List* args, Oid result_type, HeapTuple fu } } + /* if argtype is table of, change its element type */ + for (int i = 0; i < nargs; i++) { + Oid baseOid = InvalidOid; + if (isTableofType(proargtypes[i], &baseOid, NULL)) { + proargtypes[i] = baseOid; + } + } + errno_t errorno; errorno = memcpy_s(declared_arg_types, FUNC_MAX_ARGS * sizeof(Oid), proargtypes, proc_arg * sizeof(Oid)); securec_check(errorno, "", ""); diff --git a/src/gausskernel/optimizer/util/learn/plan_tree_model.cpp b/src/gausskernel/optimizer/util/learn/plan_tree_model.cpp index 3ee869b27..49e2ace60 100644 --- a/src/gausskernel/optimizer/util/learn/plan_tree_model.cpp +++ b/src/gausskernel/optimizer/util/learn/plan_tree_model.cpp @@ -1,286 +1,286 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * plan_tree_model.cpp - * - * IDENTIFICATION - * src/gausskernel/optimizer/util/learn/plan_tree_model.cpp - * - * ------------------------------------------------------------------------- - */ - -#include "cjson/cJSON.h" -#include "funcapi.h" -#include "optimizer/comm.h" -#include "optimizer/encoding.h" -#include "optimizer/learn.h" -#include "optimizer/plan_tree_model.h" -#include "utils/timestamp.h" - -static char* FormConfigureJson(const Form_gs_opt_model modelinfo, const char* labels); -static char* FormSetupJson(const char* modelName); -static void ConfigureModel(Form_gs_opt_model modelinfo, const char* labels, char** filename); -static char* TrainModel(const Form_gs_opt_model modelinfo, char* filename); -static void Unlinkfile(char* filename); - -/** - * @Description: registered function for model plan tree model training procedure - * @in maxEpoch - max number of epoch to train - * @in learningRate - learning rate for neural network back propagation - * @in hiddenUnits - number of hidden units in the fully-connect layer - * @out catalog gs_opt_model's related columns will be updated accordingly - * @return sucessful or other error/warning messages - */ -char* TreeModelTrain(Form_gs_opt_model modelinfo, char* labels) -{ - char* filename = (char*)palloc0(sizeof(char) * MAX_LEN_TEXT); - char* buf = NULL; - /* 1. configure the remote server for training to see if the server is ready */ - ConfigureModel(modelinfo, labels, &filename); - - /* 2. save encoded data to file */ - SaveDataToFile(filename); - - /* 3. send saved file to server to trigger training */ - buf = TrainModel(modelinfo, filename); - return buf; -} - -/** - * @Description: function for plan tree model predicting procedure - * @in data_id - id of dataset to be predicted - * @return sucessful or other error/warning messages - */ -char* TreeModelPredict(const char* modelName, char* filepath, const char* ip, int port) -{ - /* 1. setup the model for prediction */ - char* buf = (char*)palloc0(sizeof(char) * CURL_BUF_SIZE); - AiEngineConnInfo* conninfo = (AiEngineConnInfo*)palloc0(sizeof(AiEngineConnInfo)); - char portStr[PORT_LEN] = {'\0'}; - errno_t ret = sprintf_s(portStr, PORT_LEN, "%d", port); - securec_check_ss(ret, "\0", "\0"); - conninfo->host = pstrdup(ip); - conninfo->port = pstrdup(portStr); - conninfo->request_api = pstrdup(PYTHON_SERVER_ROUTE_PREPREDICT); - conninfo->header = pstrdup(PYTHON_SERVER_HEADER_JSON); - conninfo->json_string = FormSetupJson(modelName); - if (!TryConnectRemoteServer(conninfo, &buf)) { - DestroyConnInfo(conninfo); - ParseResBuf(buf, filepath, "AI engine connection failed."); - return buf; - } - - switch (buf[0]) { - case '0': { - ereport(NOTICE, (errmodule(MOD_OPT_AI), errmsg("Model setup successfully."))); - break; - } - case 'M': { - ParseResBuf(buf, filepath, "Internal error: missing compulsory key."); - break; - } - case 'i': { - ParseResBuf(buf, filepath, "Internal error: failed to load model, please retrain."); - break; - } - case 'N': { - ParseResBuf(buf, filepath, "Internal error: model not found, please make sure the model is trained."); - break; - } - case 'R': { - ParseResBuf(buf, filepath, "Another session is running on AIEngine. If not, please restart AIEngine"); - break; - } - default: { - ParseResBuf(buf, filepath, "Internal error: unknown error."); - break; - } - } - /* 2. send saved file to server to trigger prediction */ - conninfo->request_api = pstrdup(PYTHON_SERVER_ROUTE_PREDICT); - conninfo->url = NULL; - conninfo->header = NULL; - conninfo->json_string = NULL; - conninfo->file_tag = pstrdup("file"); - conninfo->file_path = pstrdup(filepath); - if (!TryConnectRemoteServer(conninfo, &buf)) { - ParseResBuf(buf, filepath, "AI engine connection failed."); - return buf; - } - switch (buf[0]) { - case 'M': { - ParseResBuf(buf, filepath, "Internal error: fail to load the file to predict."); - break; - } - case 'S': { - ParseResBuf(buf, filepath, "Internal error: session is not loaded, model setup required."); - break; - } - default: { - break; - } - } - return buf; -} - -static char* FormConfigureJson(const Form_gs_opt_model modelinfo, const char* labels) -{ - char* result = (char*)palloc0(sizeof(char) * MAX_LEN_JSON); - cJSON* jsonObj = cJSON_CreateObject(); - cJSON_AddStringToObject(jsonObj, "template_name", modelinfo->template_name.data); - cJSON_AddStringToObject(jsonObj, "model_name", modelinfo->model_name.data); - cJSON_AddNumberToObject(jsonObj, "max_epoch", modelinfo->max_epoch); - cJSON_AddNumberToObject(jsonObj, "learning_rate", modelinfo->learning_rate); - cJSON_AddNumberToObject(jsonObj, "dim_red", modelinfo->dim_red); - cJSON_AddNumberToObject(jsonObj, "hidden_units", modelinfo->hidden_units); - cJSON_AddNumberToObject(jsonObj, "batch_size", modelinfo->batch_size); - cJSON_AddStringToObject(jsonObj, "labels", labels); - char* buf = cJSON_Print(jsonObj); - if (buf != NULL) { - result = pstrdup(buf); - cJSON_free(buf); - } - cJSON_Delete(jsonObj); - return result; -} - -static char* FormSetupJson(const char* modelName) -{ - char* result = (char*)palloc0(sizeof(char) * MAX_LEN_JSON); - cJSON* jsonObj = cJSON_CreateObject(); - cJSON_AddStringToObject(jsonObj, "model_name", modelName); - char* buf = cJSON_Print(jsonObj); - if (buf != NULL) { - result = pstrdup(buf); - cJSON_free(buf); - } - cJSON_Delete(jsonObj); - return result; -} - -/* configure the remote server for training to see if the server is ready */ -static void ConfigureModel(Form_gs_opt_model modelinfo, const char* labels, char** filename) -{ - char* buf = NULL; - errno_t ret = sprintf_s( - *filename, MAX_LEN_TEXT - 1, "%s-%s", TRAIN_DATASET_FILEPATH, timestamptz_to_str(GetCurrentTimestamp())); - securec_check_ss(ret, "\0", "\0"); - AiEngineConnInfo* conninfo = (AiEngineConnInfo*)palloc0(sizeof(AiEngineConnInfo)); - char portStr[PORT_LEN] = {'\0'}; - ret = sprintf_s(portStr, PORT_LEN, "%d", modelinfo->port); - securec_check_ss(ret, "\0", "\0"); - conninfo->host = pstrdup(modelinfo->ip.data); - conninfo->port = pstrdup(portStr); - conninfo->request_api = pstrdup(PYTHON_SERVER_ROUTE_PRETRAIN); - conninfo->header = pstrdup(PYTHON_SERVER_HEADER_JSON); - conninfo->json_string = FormConfigureJson(modelinfo, labels); - if (!TryConnectRemoteServer(conninfo, &buf)) { - ParseConfigBuf(buf, conninfo, "AI engine connection failed."); - } - - switch (buf[0]) { - case '0': { - pfree_ext(buf); - DestroyConnInfo(conninfo); - ereport(NOTICE, - (errmodule(MOD_OPT_AI), - errcode(ERRCODE_UNEXPECTED_NODE_STATE), - errmsg("Model configured successfully."))); - break; - } - case 'F': { - ParseConfigBuf(buf, conninfo, "AIEngine internal error: configuration contains type error."); - break; - } - case 'I': { - ParseConfigBuf(buf, conninfo, "AIEngine internal error: key not unrecognized."); - break; - } - case 'M': { - ParseConfigBuf(buf, conninfo, "AIEngine internal error: missing compulsory key."); - break; - } - default: { - ParseConfigBuf(buf, conninfo, "AIEngine internal error: unknown error."); - break; - } - } -} - -static char* TrainModel(const Form_gs_opt_model modelinfo, char* filename) -{ - char* buf = NULL; - char portStr[PORT_LEN] = {'\0'}; - errno_t ret = sprintf_s(portStr, PORT_LEN, "%d", modelinfo->port); - securec_check_ss(ret, "\0", "\0"); - AiEngineConnInfo* conninfo = (AiEngineConnInfo*)palloc0(sizeof(AiEngineConnInfo)); - conninfo->host = pstrdup(modelinfo->ip.data); - conninfo->port = pstrdup(portStr); - conninfo->request_api = pstrdup(PYTHON_SERVER_ROUTE_TRAIN); - conninfo->url = NULL; - conninfo->header = NULL; - conninfo->json_string = NULL; - conninfo->file_tag = pstrdup("file"); - conninfo->file_path = pstrdup(filename); - if (!TryConnectRemoteServer(conninfo, &buf)) { - DestroyConnInfo(conninfo); - pfree_ext(buf); - ereport(ERROR, - (errmodule(MOD_OPT_AI), errcode(ERRCODE_UNEXPECTED_NODE_STATE), errmsg("AI engine connection failed."))); - return buf; - } - - DestroyConnInfo(conninfo); - Unlinkfile(filename); - switch (buf[0]) { - case 'M': { - pfree_ext(buf); - ereport(ERROR, - (errmodule(MOD_OPT_AI), - errcode(ERRCODE_DATA_CORRUPTED), - errmsg("AIEngine internal error: missing compulsory key."))); - break; - } - case 'R': { - pfree_ext(buf); - ereport(ERROR, - (errmodule(MOD_OPT_AI), - errcode(ERRCODE_UNEXPECTED_NODE_STATE), - errmsg("Another session is running on AIEngine. If not, please restart AIEngine"))); - break; - } - default: { - break; - } - } - return buf; -} - -static void Unlinkfile(char* filename) -{ - if (access(filename, F_OK) == 0) { - if (unlink(filename) < 0) { - ereport(ERROR, - (errmodule(MOD_OPT_AI), - errcode_for_file_access(), - errmsg("could not unlink file \"%s\": %m", filename))); - } else { - ereport(LOG, (errmsg("Unlinked file: \"%s\"", filename))); - } - } else { - ereport(ERROR, - (errmodule(MOD_OPT_AI), errcode_for_file_access(), errmsg("could not access file \"%s\": %m", filename))); - } -} +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * plan_tree_model.cpp + * + * IDENTIFICATION + * src/gausskernel/optimizer/util/learn/plan_tree_model.cpp + * + * ------------------------------------------------------------------------- + */ + +#include "cjson/cJSON.h" +#include "funcapi.h" +#include "optimizer/comm.h" +#include "optimizer/encoding.h" +#include "optimizer/learn.h" +#include "optimizer/plan_tree_model.h" +#include "utils/timestamp.h" + +static char* FormConfigureJson(const Form_gs_opt_model modelinfo, const char* labels); +static char* FormSetupJson(const char* modelName); +static void ConfigureModel(Form_gs_opt_model modelinfo, const char* labels, char** filename); +static char* train_model(const Form_gs_opt_model modelinfo, char* filename); +static void Unlinkfile(char* filename); + +/** + * @Description: registered function for model plan tree model training procedure + * @in maxEpoch - max number of epoch to train + * @in learningRate - learning rate for neural network back propagation + * @in hiddenUnits - number of hidden units in the fully-connect layer + * @out catalog gs_opt_model's related columns will be updated accordingly + * @return sucessful or other error/warning messages + */ +char* TreeModelTrain(Form_gs_opt_model modelinfo, char* labels) +{ + char* filename = (char*)palloc0(sizeof(char) * MAX_LEN_TEXT); + char* buf = NULL; + /* 1. configure the remote server for training to see if the server is ready */ + ConfigureModel(modelinfo, labels, &filename); + + /* 2. save encoded data to file */ + SaveDataToFile(filename); + + /* 3. send saved file to server to trigger training */ + buf = train_model(modelinfo, filename); + return buf; +} + +/** + * @Description: function for plan tree model predicting procedure + * @in data_id - id of dataset to be predicted + * @return sucessful or other error/warning messages + */ +char* TreeModelPredict(const char* modelName, char* filepath, const char* ip, int port) +{ + /* 1. setup the model for prediction */ + char* buf = (char*)palloc0(sizeof(char) * CURL_BUF_SIZE); + AiEngineConnInfo* conninfo = (AiEngineConnInfo*)palloc0(sizeof(AiEngineConnInfo)); + char portStr[PORT_LEN] = {'\0'}; + errno_t ret = sprintf_s(portStr, PORT_LEN, "%d", port); + securec_check_ss(ret, "\0", "\0"); + conninfo->host = pstrdup(ip); + conninfo->port = pstrdup(portStr); + conninfo->request_api = pstrdup(PYTHON_SERVER_ROUTE_PREPREDICT); + conninfo->header = pstrdup(PYTHON_SERVER_HEADER_JSON); + conninfo->json_string = FormSetupJson(modelName); + if (!TryConnectRemoteServer(conninfo, &buf)) { + DestroyConnInfo(conninfo); + ParseResBuf(buf, filepath, "AI engine connection failed."); + return buf; + } + + switch (buf[0]) { + case '0': { + ereport(NOTICE, (errmodule(MOD_OPT_AI), errmsg("Model setup successfully."))); + break; + } + case 'M': { + ParseResBuf(buf, filepath, "Internal error: missing compulsory key."); + break; + } + case 'i': { + ParseResBuf(buf, filepath, "Internal error: failed to load model, please retrain."); + break; + } + case 'N': { + ParseResBuf(buf, filepath, "Internal error: model not found, please make sure the model is trained."); + break; + } + case 'R': { + ParseResBuf(buf, filepath, "Another session is running on AIEngine. If not, please restart AIEngine"); + break; + } + default: { + ParseResBuf(buf, filepath, "Internal error: unknown error."); + break; + } + } + /* 2. send saved file to server to trigger prediction */ + conninfo->request_api = pstrdup(PYTHON_SERVER_ROUTE_PREDICT); + conninfo->url = NULL; + conninfo->header = NULL; + conninfo->json_string = NULL; + conninfo->file_tag = pstrdup("file"); + conninfo->file_path = pstrdup(filepath); + if (!TryConnectRemoteServer(conninfo, &buf)) { + ParseResBuf(buf, filepath, "AI engine connection failed."); + return buf; + } + switch (buf[0]) { + case 'M': { + ParseResBuf(buf, filepath, "Internal error: fail to load the file to predict."); + break; + } + case 'S': { + ParseResBuf(buf, filepath, "Internal error: session is not loaded, model setup required."); + break; + } + default: { + break; + } + } + return buf; +} + +static char* FormConfigureJson(const Form_gs_opt_model modelinfo, const char* labels) +{ + char* result = (char*)palloc0(sizeof(char) * MAX_LEN_JSON); + cJSON* jsonObj = cJSON_CreateObject(); + cJSON_AddStringToObject(jsonObj, "template_name", modelinfo->template_name.data); + cJSON_AddStringToObject(jsonObj, "model_name", modelinfo->model_name.data); + cJSON_AddNumberToObject(jsonObj, "max_epoch", modelinfo->max_epoch); + cJSON_AddNumberToObject(jsonObj, "learning_rate", modelinfo->learning_rate); + cJSON_AddNumberToObject(jsonObj, "dim_red", modelinfo->dim_red); + cJSON_AddNumberToObject(jsonObj, "hidden_units", modelinfo->hidden_units); + cJSON_AddNumberToObject(jsonObj, "batch_size", modelinfo->batch_size); + cJSON_AddStringToObject(jsonObj, "labels", labels); + char* buf = cJSON_Print(jsonObj); + if (buf != NULL) { + result = pstrdup(buf); + cJSON_free(buf); + } + cJSON_Delete(jsonObj); + return result; +} + +static char* FormSetupJson(const char* modelName) +{ + char* result = (char*)palloc0(sizeof(char) * MAX_LEN_JSON); + cJSON* jsonObj = cJSON_CreateObject(); + cJSON_AddStringToObject(jsonObj, "model_name", modelName); + char* buf = cJSON_Print(jsonObj); + if (buf != NULL) { + result = pstrdup(buf); + cJSON_free(buf); + } + cJSON_Delete(jsonObj); + return result; +} + +/* configure the remote server for training to see if the server is ready */ +static void ConfigureModel(Form_gs_opt_model modelinfo, const char* labels, char** filename) +{ + char* buf = NULL; + errno_t ret = sprintf_s( + *filename, MAX_LEN_TEXT - 1, "%s-%s", TRAIN_DATASET_FILEPATH, timestamptz_to_str(GetCurrentTimestamp())); + securec_check_ss(ret, "\0", "\0"); + AiEngineConnInfo* conninfo = (AiEngineConnInfo*)palloc0(sizeof(AiEngineConnInfo)); + char portStr[PORT_LEN] = {'\0'}; + ret = sprintf_s(portStr, PORT_LEN, "%d", modelinfo->port); + securec_check_ss(ret, "\0", "\0"); + conninfo->host = pstrdup(modelinfo->ip.data); + conninfo->port = pstrdup(portStr); + conninfo->request_api = pstrdup(PYTHON_SERVER_ROUTE_PRETRAIN); + conninfo->header = pstrdup(PYTHON_SERVER_HEADER_JSON); + conninfo->json_string = FormConfigureJson(modelinfo, labels); + if (!TryConnectRemoteServer(conninfo, &buf)) { + ParseConfigBuf(buf, conninfo, "AI engine connection failed."); + } + + switch (buf[0]) { + case '0': { + pfree_ext(buf); + DestroyConnInfo(conninfo); + ereport(NOTICE, + (errmodule(MOD_OPT_AI), + errcode(ERRCODE_UNEXPECTED_NODE_STATE), + errmsg("Model configured successfully."))); + break; + } + case 'F': { + ParseConfigBuf(buf, conninfo, "AIEngine internal error: configuration contains type error."); + break; + } + case 'I': { + ParseConfigBuf(buf, conninfo, "AIEngine internal error: key not unrecognized."); + break; + } + case 'M': { + ParseConfigBuf(buf, conninfo, "AIEngine internal error: missing compulsory key."); + break; + } + default: { + ParseConfigBuf(buf, conninfo, "AIEngine internal error: unknown error."); + break; + } + } +} + +static char* train_model(const Form_gs_opt_model modelinfo, char* filename) +{ + char* buf = NULL; + char portStr[PORT_LEN] = {'\0'}; + errno_t ret = sprintf_s(portStr, PORT_LEN, "%d", modelinfo->port); + securec_check_ss(ret, "\0", "\0"); + AiEngineConnInfo* conninfo = (AiEngineConnInfo*)palloc0(sizeof(AiEngineConnInfo)); + conninfo->host = pstrdup(modelinfo->ip.data); + conninfo->port = pstrdup(portStr); + conninfo->request_api = pstrdup(PYTHON_SERVER_ROUTE_TRAIN); + conninfo->url = NULL; + conninfo->header = NULL; + conninfo->json_string = NULL; + conninfo->file_tag = pstrdup("file"); + conninfo->file_path = pstrdup(filename); + if (!TryConnectRemoteServer(conninfo, &buf)) { + DestroyConnInfo(conninfo); + pfree_ext(buf); + ereport(ERROR, + (errmodule(MOD_OPT_AI), errcode(ERRCODE_UNEXPECTED_NODE_STATE), errmsg("AI engine connection failed."))); + return buf; + } + + DestroyConnInfo(conninfo); + Unlinkfile(filename); + switch (buf[0]) { + case 'M': { + pfree_ext(buf); + ereport(ERROR, + (errmodule(MOD_OPT_AI), + errcode(ERRCODE_DATA_CORRUPTED), + errmsg("AIEngine internal error: missing compulsory key."))); + break; + } + case 'R': { + pfree_ext(buf); + ereport(ERROR, + (errmodule(MOD_OPT_AI), + errcode(ERRCODE_UNEXPECTED_NODE_STATE), + errmsg("Another session is running on AIEngine. If not, please restart AIEngine"))); + break; + } + default: { + break; + } + } + return buf; +} + +static void Unlinkfile(char* filename) +{ + if (access(filename, F_OK) == 0) { + if (unlink(filename) < 0) { + ereport(ERROR, + (errmodule(MOD_OPT_AI), + errcode_for_file_access(), + errmsg("could not unlink file \"%s\": %m", filename))); + } else { + ereport(LOG, (errmsg("Unlinked file: \"%s\"", filename))); + } + } else { + ereport(ERROR, + (errmodule(MOD_OPT_AI), errcode_for_file_access(), errmsg("could not access file \"%s\": %m", filename))); + } +} diff --git a/src/gausskernel/optimizer/util/nodegroups.cpp b/src/gausskernel/optimizer/util/nodegroups.cpp index 9d1243293..c0de157f4 100644 --- a/src/gausskernel/optimizer/util/nodegroups.cpp +++ b/src/gausskernel/optimizer/util/nodegroups.cpp @@ -2773,7 +2773,7 @@ Bitmapset *ngroup_info_hash_search(Oid ngroup_oid) uint32 hashcode = oid_hash((const void *)&ngroup_oid, sizeof(ngroup_oid)); LWLock *new_partition_lock = ngroup_mapping_partitionlock(hashcode); - + (void)LWLockAcquire(NgroupDestoryLock, LW_SHARED); (void)LWLockAcquire(new_partition_lock, LW_SHARED); NGroupInfo *ngroup_info = (NGroupInfo *)hash_search(g_instance.ngroup_hash_table, &ngroup_oid, HASH_FIND, &found); @@ -2782,6 +2782,7 @@ Bitmapset *ngroup_info_hash_search(Oid ngroup_oid) bms_nodeids = bms_copy(ngroup_info->bms_nodeids); } LWLockRelease(new_partition_lock); + LWLockRelease(NgroupDestoryLock); return bms_nodeids; } @@ -2796,7 +2797,7 @@ void ngroup_info_hash_insert(Oid ngroup_oid, Bitmapset *bms_node_ids) MemoryContext old_mem_context = MemoryContextSwitchTo(g_instance.ngroup_hash_table->hcxt); bms_node_ids_copy = bms_copy(bms_node_ids); MemoryContextSwitchTo(old_mem_context); - + (void)LWLockAcquire(NgroupDestoryLock, LW_SHARED); (void)LWLockAcquire(new_partition_lock, LW_EXCLUSIVE); NGroupInfo *ngroup_info = (NGroupInfo *)hash_search(g_instance.ngroup_hash_table, &ngroup_oid, HASH_ENTER, &found); @@ -2805,14 +2806,16 @@ void ngroup_info_hash_insert(Oid ngroup_oid, Bitmapset *bms_node_ids) ngroup_info->bms_nodeids = bms_node_ids_copy; } else { LWLockRelease(new_partition_lock); + LWLockRelease(NgroupDestoryLock); pfree(bms_node_ids_copy); ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("failed to insert node group hash table"))); } LWLockRelease(new_partition_lock); + LWLockRelease(NgroupDestoryLock); } -void ngroup_info_hash_delete(Oid ngroup_oid) +void ngroup_info_hash_delete(Oid ngroup_oid, bool is_destory) { if (InvalidOid == ngroup_oid) { ereport(ERROR, (errcode(ERRCODE_UNEXPECTED_NODE_STATE), @@ -2822,8 +2825,10 @@ void ngroup_info_hash_delete(Oid ngroup_oid) Bitmapset *bms_ptr = NULL; uint32 hashcode = oid_hash((const void *)&ngroup_oid, sizeof(ngroup_oid)); LWLock *new_partition_lock = ngroup_mapping_partitionlock(hashcode); - - (void)LWLockAcquire(new_partition_lock, LW_EXCLUSIVE); + if (!is_destory) { + (void)LWLockAcquire(NgroupDestoryLock, LW_SHARED); + } + (void)LWLockAcquire(new_partition_lock, LW_EXCLUSIVE); NGroupInfo *ngroup_info = (NGroupInfo *)hash_search(g_instance.ngroup_hash_table, &ngroup_oid, HASH_FIND, &found); if (ngroup_info) @@ -2831,7 +2836,13 @@ void ngroup_info_hash_delete(Oid ngroup_oid) hash_search(g_instance.ngroup_hash_table, &ngroup_oid, HASH_REMOVE, &found); LWLockRelease(new_partition_lock); + if (!is_destory) { + LWLockRelease(NgroupDestoryLock); + } if (!found && ngroup_info) { + if (is_destory) { + LWLockRelease(NgroupDestoryLock); + } ereport(ERROR, (errcode(ERRCODE_UNEXPECTED_NODE_STATE), errmsg("delete failed from nodegroup hash table, Oid is %u.", ngroup_oid))); } @@ -2844,11 +2855,14 @@ void ngroup_info_hash_destory(void) HASH_SEQ_STATUS hash_seq; NGroupInfo* entry = NULL; + (void)LWLockAcquire(NgroupDestoryLock, LW_EXCLUSIVE); + hash_seq_init(&hash_seq, g_instance.ngroup_hash_table); while ((entry = (NGroupInfo*)hash_seq_search(&hash_seq)) != NULL) { ereport(LOG, (errmsg(" ngroup_info_hash_print ngroup_info_hash__delete_all entry->oid: %d ", entry->oid))); - ngroup_info_hash_delete(entry->oid); + ngroup_info_hash_delete(entry->oid, true); } + LWLockRelease(NgroupDestoryLock); } diff --git a/src/gausskernel/optimizer/util/optcommon.cpp b/src/gausskernel/optimizer/util/optcommon.cpp index 2c66ab32d..f36977e78 100755 --- a/src/gausskernel/optimizer/util/optcommon.cpp +++ b/src/gausskernel/optimizer/util/optcommon.cpp @@ -18,6 +18,7 @@ #include "knl/knl_variable.h" #include "executor/exec/execStream.h" +#include "db4ai/db4ai_api.h" /* * Optimizer common function that return a plan node's plain text, we wrapper it from @@ -448,7 +449,11 @@ void GetPlanNodePlainText( *pname = *sname = *pt_operation = "Row Adapter"; break; case T_RowToVec: - *pname = *sname = *pt_operation = "Vector Adapter"; + if (IsA(plan->lefttree, SeqScan) && ((SeqScan*)plan->lefttree)->scanBatchMode) { + *pname = *sname = *pt_operation = "Vector Adapter(type: BATCH MODE)"; + } else { + *pname = *sname = *pt_operation = "Vector Adapter"; + } break; case T_VecAppend: *pname = *sname = *pt_operation = "Vector Append"; @@ -481,11 +486,12 @@ void GetPlanNodePlainText( *pname = "Vector Merge"; *sname = *pt_operation = "Vector Merge Join"; break; - case T_GradientDescent: - *pname = *sname = *pt_options = "Gradient Descent"; - break; - case T_KMeans: - *pname = *sname = *pt_options = "K-Means"; + case T_TrainModel: { + TrainModel *ptrain = (TrainModel*)plan; + AlgorithmAPI *api = get_algorithm_api(ptrain->algorithm); + *pname = "Train Model"; + *sname = *pt_operation = (char*) api->name; + } break; default: *pname = *sname = *pt_operation = "?\?\?"; diff --git a/src/gausskernel/optimizer/util/orclauses.cpp b/src/gausskernel/optimizer/util/orclauses.cpp deleted file mode 100644 index 1b024b7f7..000000000 --- a/src/gausskernel/optimizer/util/orclauses.cpp +++ /dev/null @@ -1,332 +0,0 @@ -/*------------------------------------------------------------------------- - * - * orclauses.cpp - * Routines to extract restriction OR clauses from join OR clauses - * - * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * - * IDENTIFICATION - * src/gausskernel/optimizer/util/orclauses.cpp - * - * ------------------------------------------------------------------------- - */ - -#include "postgres.h" - -#include "optimizer/clauses.h" -#include "optimizer/cost.h" -#include "optimizer/orclauses.h" -#include "optimizer/restrictinfo.h" - -static bool is_safe_restriction_clause_for(RestrictInfo* rinfo, RelOptInfo* rel); -static Expr* extract_or_clause(RestrictInfo* or_rinfo, RelOptInfo* rel); -static void consider_new_or_clause(PlannerInfo* root, RelOptInfo* rel, Expr* orclause, RestrictInfo* join_or_rinfo); - -/* - * extract_restriction_or_clauses - * Examine join OR-of-AND clauses to see if any useful restriction OR - * clauses can be extracted. If so, add them to the query. - * - * Although a join clause must reference multiple relations overall, - * an OR of ANDs clause might contain sub-clauses that reference just one - * relation and can be used to build a restriction clause for that rel. - * For example consider - * WHERE ((a.x = 42 AND b.y = 43) OR (a.x = 44 AND b.z = 45)); - * We can transform this into - * WHERE ((a.x = 42 AND b.y = 43) OR (a.x = 44 AND b.z = 45)) - * AND (a.x = 42 OR a.x = 44) - * AND (b.y = 43 OR b.z = 45); - * which allows the latter clauses to be applied during the scans of a and b, - * perhaps as index qualifications, and in any case reducing the number of - * rows arriving at the join. In essence this is a partial transformation to - * CNF (AND of ORs format). It is not complete, however, because we do not - * unravel the original OR --- doing so would usually bloat the qualification - * expression to little gain. - * - * The added quals are partially redundant with the original OR, and therefore - * would cause the size of the joinrel to be underestimated when it is finally - * formed. (This would be true of a full transformation to CNF as well; the - * fault is not really in the transformation, but in clauselist_selectivity's - * inability to recognize redundant conditions.) We can compensate for this - * redundancy by changing the cached selectivity of the original OR clause, - * cancelling out the (valid) reduction in the estimated sizes of the base - * relations so that the estimated joinrel size remains the same. This is - * a MAJOR HACK: it depends on the fact that clause selectivities are cached - * and on the fact that the same RestrictInfo node will appear in every - * joininfo list that might be used when the joinrel is formed. - * And it doesn't work in cases where the size estimation is nonlinear - * (i.e., outer and IN joins). But it beats not doing anything. - * - * We examine each base relation to see if join clauses associated with it - * contain extractable restriction conditions. If so, add those conditions - * to the rel's baserestrictinfo and update the cached selectivities of the - * join clauses. Note that the same join clause will be examined afresh - * from the point of view of each baserel that participates in it, so its - * cached selectivity may get updated multiple times. - */ -void extract_restriction_or_clauses(PlannerInfo* root) -{ - /* Examine each baserel for potential join OR clauses */ - for (int rti = 1; rti < root->simple_rel_array_size; rti++) { - RelOptInfo* rel = root->simple_rel_array[rti]; - ListCell* lc = NULL; - - /* there may be empty slots corresponding to non-baserel RTEs */ - if (rel == NULL) { - continue; - } - - Assert(rel->relid == (uint)rti); /* sanity check on array */ - - /* ignore RTEs that are "other rels" */ - if (rel->reloptkind != RELOPT_BASEREL) { - continue; - } - - /* - * Find potentially interesting OR joinclauses. We can use any - * joinclause that is considered safe to move to this rel by the - * parameterized-path machinery, even though what we are going to do - * with it is not exactly a parameterized path. - * - * However, it seems best to ignore clauses that have been marked - * redundant (by setting norm_selec > 1). That likely can't happen - * for OR clauses, but let's be safe. - */ - foreach (lc, rel->joininfo) { - RestrictInfo* rinfo = (RestrictInfo*)lfirst(lc); - - if (restriction_is_or_clause(rinfo) && join_clause_is_movable_to(rinfo, rel->relid) && - rinfo->norm_selec <= 1) { - /* Try to extract a qual for this rel only */ - Expr* orclause = extract_or_clause(rinfo, rel); - - /* - * If successful, decide whether we want to use the clause, - * and insert it into the rel's restrictinfo list if so. - */ - if (orclause) { - consider_new_or_clause(root, rel, orclause, rinfo); - } - } - } - } -} - -/* - * Is the given primitive (non-OR) RestrictInfo safe to move to the rel? - */ -static bool is_safe_restriction_clause_for(RestrictInfo* rinfo, RelOptInfo* rel) -{ - /* - * We want clauses that mention the rel, and only the rel. So in - * particular pseudoconstant clauses can be rejected quickly. Then check - * the clause's Var membership. - */ - if (rinfo->pseudoconstant) { - return false; - } - if (!bms_equal(rinfo->clause_relids, rel->relids)) { - return false; - } - - /* We don't want extra evaluations of any volatile functions */ - if (contain_volatile_functions((Node*)rinfo->clause)) { - return false; - } - - return true; -} - -/* - * Try to extract a restriction clause mentioning only "rel" from the given - * join OR-clause. - * - * We must be able to extract at least one qual for this rel from each of - * the arms of the OR, else we can't use it. - * - * Returns an OR clause (not a RestrictInfo!) pertaining to rel, or NULL - * if no OR clause could be extracted. - */ -static Expr* extract_or_clause(RestrictInfo* or_rinfo, RelOptInfo* rel) -{ - List* clauselist = NIL; - ListCell* lc = NULL; - - /* - * Scan each arm of the input OR clause. Notice we descend into - * or_rinfo->orclause, which has RestrictInfo nodes embedded below the - * toplevel OR/AND structure. This is useful because we can use the info - * in those nodes to make is_safe_restriction_clause_for()'s checks - * cheaper. We'll strip those nodes from the returned tree, though, - * meaning that fresh ones will be built if the clause is accepted as a - * restriction clause. This might seem wasteful --- couldn't we re-use - * the existing RestrictInfos? But that'd require assuming that - * selectivity and other cached data is computed exactly the same way for - * a restriction clause as for a join clause, which seems undesirable. - */ - Assert(or_clause((Node*)or_rinfo->orclause)); - foreach (lc, ((BoolExpr*)or_rinfo->orclause)->args) { - Node* orarg = (Node*)lfirst(lc); - List* subclauses = NIL; - Node* subclause = NULL; - - /* OR arguments should be ANDs or sub-RestrictInfos */ - if (and_clause(orarg)) { - List* andargs = ((BoolExpr*)orarg)->args; - ListCell* lc2 = NULL; - - foreach (lc2, andargs) { - RestrictInfo* rinfo = (RestrictInfo*)lfirst(lc2); - - Assert(IsA(rinfo, RestrictInfo)); - if (restriction_is_or_clause(rinfo)) { - /* - * Recurse to deal with nested OR. Note we *must* recurse - * here, this isn't just overly-tense optimization: we - * have to descend far enough to find and strip all - * RestrictInfos in the expression. - */ - Expr* suborclause = NULL; - - suborclause = extract_or_clause(rinfo, rel); - if (suborclause) { - subclauses = lappend(subclauses, suborclause); - } - } else if (is_safe_restriction_clause_for(rinfo, rel)) { - subclauses = lappend(subclauses, rinfo->clause); - } - } - } else { - Assert(IsA(orarg, RestrictInfo)); - Assert(!restriction_is_or_clause((RestrictInfo*)orarg)); - if (is_safe_restriction_clause_for((RestrictInfo*)orarg, rel)) { - subclauses = lappend(subclauses, ((RestrictInfo*)orarg)->clause); - } - } - - /* - * If nothing could be extracted from this arm, we can't do anything - * with this OR clause. - */ - if (subclauses == NIL) { - return NULL; - } - - /* - * OK, add subclause(s) to the result OR. If we found more than one, - * we need an AND node. But if we found only one, and it is itself an - * OR node, add its subclauses to the result instead; this is needed - * to preserve AND/OR flatness (ie, no OR directly underneath OR). - */ - clauselist = lappend(clauselist, make_ands_explicit(subclauses)); - subclause = (Node*)make_ands_explicit(subclauses); - if (or_clause(subclause)) { - clauselist = list_concat(clauselist, list_copy(((BoolExpr*)subclause)->args)); - } else { - clauselist = lappend(clauselist, subclause); - } - } - - /* - * If we got a restriction clause from every arm, wrap them up in an OR - * node. (In theory the OR node might be unnecessary, if there was only - * one arm --- but then the input OR node was also redundant.) - */ - if (clauselist != NIL) { - return make_orclause(clauselist); - } - return NULL; -} - -/* - * Consider whether a successfully-extracted restriction OR clause is - * actually worth using. If so, add it to the planner's data structures, - * and adjust the original join clause (join_or_rinfo) to compensate. - */ -static void consider_new_or_clause(PlannerInfo* root, RelOptInfo* rel, Expr* orclause, RestrictInfo* join_or_rinfo) -{ - RestrictInfo* or_rinfo = NULL; - Selectivity or_selec, orig_selec; - - /* - * Build a RestrictInfo from the new OR clause. We can assume it's valid - * as a base restriction clause. - */ - or_rinfo = make_restrictinfo(orclause, true, false, false, join_or_rinfo->security_level, NULL, NULL, NULL); - - /* - * Estimate its selectivity. (We could have done this earlier, but doing - * it on the RestrictInfo representation allows the result to get cached, - * saving work later.) - */ - or_selec = clause_selectivity(root, (Node*)or_rinfo, 0, JOIN_INNER, NULL); - - /* - * The clause is only worth adding to the query if it rejects a useful - * fraction of the base relation's rows; otherwise, it's just going to - * cause duplicate computation (since we will still have to check the - * original OR clause when the join is formed). Somewhat arbitrarily, we - * set the selectivity threshold at 0.9. - */ - if (or_selec > 0.9) { - return; /* forget it */ - } - /* - * OK, add it to the rel's restriction-clause list. - */ - rel->baserestrictinfo = lappend(rel->baserestrictinfo, or_rinfo); - - /* - * Adjust the original join OR clause's cached selectivity to compensate - * for the selectivity of the added (but redundant) lower-level qual. This - * should result in the join rel getting approximately the same rows - * estimate as it would have gotten without all these shenanigans. - * - * XXX major hack alert: this depends on the assumption that the - * selectivity will stay cached. - * - * XXX another major hack: we adjust only norm_selec, the cached - * selectivity for JOIN_INNER semantics, even though the join clause - * might've been an outer-join clause. This is partly because we can't - * easily identify the relevant SpecialJoinInfo here, and partly because - * the linearity assumption we're making would fail anyway. (If it is an - * outer-join clause, "rel" must be on the nullable side, else we'd not - * have gotten here. So the computation of the join size is going to be - * quite nonlinear with respect to the size of "rel", so it's not clear - * how we ought to adjust outer_selec even if we could compute its - * original value correctly.) - */ - if (or_selec > 0) { - SpecialJoinInfo sjinfo; - - /* - * Make up a SpecialJoinInfo for JOIN_INNER semantics. (Compare - * approx_tuple_count() in costsize.c.) - */ - sjinfo.type = T_SpecialJoinInfo; - sjinfo.min_lefthand = bms_difference(join_or_rinfo->clause_relids, rel->relids); - sjinfo.min_righthand = rel->relids; - sjinfo.syn_lefthand = sjinfo.min_lefthand; - sjinfo.syn_righthand = sjinfo.min_righthand; - sjinfo.jointype = JOIN_INNER; - /* we don't bother trying to make the remaining fields valid */ - sjinfo.lhs_strict = false; - sjinfo.delay_upper_joins = false; - sjinfo.join_quals = NIL; - - /* Compute inner-join size */ - orig_selec = clause_selectivity(root, (Node*)join_or_rinfo, 0, JOIN_INNER, &sjinfo); - - /* And hack cached selectivity so join size remains the same */ - join_or_rinfo->norm_selec = orig_selec / or_selec; - /* ensure result stays in sane range, in particular not "redundant" */ - if (join_or_rinfo->norm_selec > 1) { - join_or_rinfo->norm_selec = 1; - } - /* as explained above, we don't touch outer_selec */ - } -} diff --git a/src/gausskernel/optimizer/util/pathnode.cpp b/src/gausskernel/optimizer/util/pathnode.cpp index 7d732bd34..7f26d6bc9 100755 --- a/src/gausskernel/optimizer/util/pathnode.cpp +++ b/src/gausskernel/optimizer/util/pathnode.cpp @@ -1296,6 +1296,35 @@ static void inherit_child_hintvalue(Path* new_path, Path* outer_path, Path* inne } } +/* + * @brief set_predpush_same_level_hint + * Set predpush same level hint state. If given hint is valid for the new path, increase the hint value. + */ +static void set_predpush_same_level_hint(HintState* hstate, RelOptInfo* rel, Path* path) +{ + /* + * Guarding conditions. + */ + Assert(path != NULL); + if (path->param_info == NULL || rel->reloptkind != RELOPT_BASEREL) { + return; + } + + if (hstate == NULL || hstate->predpush_same_level_hint == NIL) { + return; + } + + ListCell *lc = NULL; + foreach (lc, hstate->predpush_same_level_hint) { + PredpushSameLevelHint *predpushSameLevelHint = (PredpushSameLevelHint*)lfirst(lc); + if (is_predpush_same_level_matched(predpushSameLevelHint, rel->relids, path->param_info)) { + predpushSameLevelHint->base.state = HINT_STATE_USED; + path->hint_value++; + break; + } + } +} + /* * @Description: Set hint values to this new path. * @in join_rel: Join relition. @@ -1327,6 +1356,11 @@ void set_hint_value(RelOptInfo* join_rel, Path* new_path, HintState* hstate) inherit_child_hintvalue(new_path, outer_path, inner_path); } + + /* Use bit-wise and instead, since root is not accessible and permit_predpush is not supported. */ + if ((PRED_PUSH_FORCE & (uint)u_sess->attr.attr_sql.rewrite_rule)) { + set_predpush_same_level_hint(hstate, join_rel, new_path); + } } static void AddGatherJoinrel(PlannerInfo* root, RelOptInfo* parentRel, @@ -2139,6 +2173,7 @@ bool CheckBitmapHeapPathIsCrossbucket(Path* bitmapqual) /* * Support partiton index unusable. * Check if the index in bitmap heap path is unusable. Contains at least one, return false. + * Hypothetical index does not support partition index unusable. */ bool check_bitmap_heap_path_index_unusable(Path* bitmapqual, RelOptInfo* baserel) { @@ -2164,6 +2199,9 @@ bool check_bitmap_heap_path_index_unusable(Path* bitmapqual, RelOptInfo* baserel } else if (IsA(bitmapqual, IndexPath)) { IndexPath* ipath = (IndexPath*)bitmapqual; Oid index_oid = ipath->indexinfo->indexoid; + if (u_sess->attr.attr_sql.enable_hypo_index && ipath->indexinfo->hypothetical) { + return indexUnusable; + } indexUnusable = checkPartitionIndexUnusable(index_oid, baserel->partItrs, baserel->pruning_result); if (!indexUnusable) { return indexUnusable; diff --git a/src/gausskernel/optimizer/util/pgxcship.cpp b/src/gausskernel/optimizer/util/pgxcship.cpp index b9a59c968..478b71ad1 100644 --- a/src/gausskernel/optimizer/util/pgxcship.cpp +++ b/src/gausskernel/optimizer/util/pgxcship.cpp @@ -4397,6 +4397,26 @@ bool check_insert_subquery_on_singlenode( return false; } +bool check_replicated_junktlist(Query* subquery) +{ + ListCell *lc = NULL; + + foreach (lc, subquery->targetList) { + TargetEntry *en = (TargetEntry *)lfirst(lc); + + if (!IsA(en->expr, Var)) { + continue; + } + + Var *v = (Var *)en->expr; + if (v->varattno < 0) { + return true; + } + } + + return false; +} + /* * check_insert_subquery_shippability: * Check if an INSERT SELECT query is shippable. @@ -4470,6 +4490,12 @@ static void check_insert_subquery_shippability(Query* query, Query* subquery, Sh return; } + if (IsExecNodesReplicated(exec_nodes_rte) && + IsExecNodesReplicated(exec_nodes_qry) && + check_replicated_junktlist(subquery)) { + return; + } + /* Support HASH/REPLICATION/LIST/RANGE for now */ bool isDistBySlice = IsLocatorDistributedBySlice(exec_nodes_rte->baselocatortype); if (exec_nodes_rte->baselocatortype != LOCATOR_TYPE_HASH && !isDistBySlice) { diff --git a/src/gausskernel/optimizer/util/plananalyzer.cpp b/src/gausskernel/optimizer/util/plananalyzer.cpp index de89774f7..0dfac796a 100644 --- a/src/gausskernel/optimizer/util/plananalyzer.cpp +++ b/src/gausskernel/optimizer/util/plananalyzer.cpp @@ -1067,7 +1067,7 @@ void RecordQueryPlanIssues(const List* results) } /* Hold the planer issue info in memory context of workload manager */ - AutoContextSwitch memSwitch(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_CBB)); + AutoContextSwitch memSwitch(g_instance.wlm_cxt->query_resource_track_mcxt); t_thrd.shemem_ptr_cxt.mySessionMemoryEntry->query_plan_issue = pstrdup(max_issue_desc); return; diff --git a/src/gausskernel/optimizer/util/plancat.cpp b/src/gausskernel/optimizer/util/plancat.cpp index f9ad97f38..f04251166 100755 --- a/src/gausskernel/optimizer/util/plancat.cpp +++ b/src/gausskernel/optimizer/util/plancat.cpp @@ -67,6 +67,8 @@ #define DEFAULT_PAGES_NUM (u_sess->attr.attr_sql.enable_global_stats ? 10 * u_sess->pgxc_cxt.NumDataNodes : 10) #define DEFAULT_TUPLES_NUM DEFAULT_PAGES_NUM +#define ESTIMATE_SUBPARTITION_NUMBER 100 + /* Hook for plugins to get control in get_relation_info() */ THR_LOCAL get_relation_info_hook_type get_relation_info_hook = NULL; @@ -112,12 +114,7 @@ static void acquireSamplesForPartitionedRelationModify( return; } - List* partoidlist; - if (RelationIsSubPartitioned(relation)) { - partoidlist = RelationGetSubPartitionOidList(relation, lmode); - } else { - partoidlist = relationGetPartitionOidList(relation); - } + List* partoidlist = relationGetPartitionOidList(relation); int totalPartitionNumber = list_length(partoidlist); int nonzeroPartitionNumber = 0; BlockNumber partPages = 0; @@ -141,47 +138,22 @@ static void acquireSamplesForPartitionedRelationModify( continue; } - if (RelationIsSubPartitioned(relation)) { - Oid subparentid = partid_get_parentid(partitionOid); - if (!OidIsValid(subparentid) || - !ConditionalLockPartition(relation->rd_id, subparentid, lmode, PARTITION_LOCK) || - !ConditionalLockPartition(subparentid, partitionOid, lmode, PARTITION_LOCK)) { - continue; - } - Partition part = partitionOpen(relation, subparentid, lmode); - Relation partrel = partitionGetRelation(relation, part); - Partition subpart = partitionOpen(partrel, partitionOid, lmode); - currentPartPages = PartitionGetNumberOfBlocksInFork(partrel, subpart, MAIN_FORKNUM, true); - /* for empty heap, PartitionGetNumberOfBlocks() return 0 */ - if (currentPartPages > 0) { - if (sampledPartitionOids != NULL) - *sampledPartitionOids = lappend_oid(*sampledPartitionOids, partitionOid); - - partPages += currentPartPages; - relpartPages += subpart->pd_part->relpages; - nonzeroPartitionNumber++; - } - partitionClose(partrel, subpart, lmode); - releaseDummyRelation(&partrel); - partitionClose(relation, part, lmode); - } else { - if (!ConditionalLockPartition(relation->rd_id, partitionOid, lmode, PARTITION_LOCK)) { - continue; - } - - Partition part = partitionOpen(relation, partitionOid, lmode); - currentPartPages = PartitionGetNumberOfBlocksInFork(relation, part, MAIN_FORKNUM, true); - /* for empty heap, PartitionGetNumberOfBlocks() return 0 */ - if (currentPartPages > 0) { - if (sampledPartitionOids != NULL) - *sampledPartitionOids = lappend_oid(*sampledPartitionOids, partitionOid); - - partPages += currentPartPages; - relpartPages += part->pd_part->relpages; - nonzeroPartitionNumber++; - } - partitionClose(relation, part, lmode); + if (!ConditionalLockPartition(relation->rd_id, partitionOid, lmode, PARTITION_LOCK)) { + continue; } + + Partition part = partitionOpen(relation, partitionOid, lmode); + currentPartPages = PartitionGetNumberOfBlocksInFork(relation, part, MAIN_FORKNUM, true); + /* for empty heap, PartitionGetNumberOfBlocks() return 0 */ + if (currentPartPages > 0) { + if (sampledPartitionOids != NULL) + *sampledPartitionOids = lappend_oid(*sampledPartitionOids, partitionOid); + + partPages += currentPartPages; + relpartPages += part->pd_part->relpages; + nonzeroPartitionNumber++; + } + partitionClose(relation, part, lmode); } if (relpartPages > 0 && nonzeroPartitionNumber > ESTIMATE_PARTITION_NUMBER_THRESHOLD) { @@ -204,9 +176,97 @@ int GetTotalPartitionNumber(Relation relation) return totalPartitionNumber; } +static void acquireSamplesForSubPartitionedRelation(Relation relation, LOCKMODE lmode, RelPageType *samplePages, + List **sampledPartitionOids) +{ + Assert(RelationIsSubPartitioned(relation)); + + /* we set no lock here to avoid wait on lock when another session still holds the lock */ + List *subpartidlist = RelationGetSubPartitionOidList(relation, NoLock); + int totalSubPartitionNumber = list_length(subpartidlist); + int nonzeroSubPartitionNumber = 0; + int notAvailSubPartitionNumber = 0; + BlockNumber partPages = 0; + BlockNumber currentPartPages = 0; + Oid relid = RelationGetRelid(relation); + + int iterations = 0; + ListCell *cell = NULL; + + int stepNum = totalSubPartitionNumber > ESTIMATE_SUBPARTITION_NUMBER ? + totalSubPartitionNumber / ESTIMATE_PARTITION_NUMBER :1; + foreach (cell, subpartidlist) { + iterations++; + if (iterations % stepNum != 0) { + continue; + } + + Oid subpartid = DatumGetObjectId(lfirst(cell)); + if (!OidIsValid(subpartid)) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("The subpartition of relation %u is invalid", RelationGetRelid(relation)), + errdetail("N/A"), + errcause("Maybe the partition table is dropped"), + erraction("Check system table 'pg_partition' for more information"))); + } + Oid partid = partid_get_parentid(subpartid); + if (!OidIsValid(partid)) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("The partition which owns the subpartition %u is missing", subpartid), + errdetail("N/A"), + errcause("Maybe the subpartition table is dropped"), + erraction("Check system table 'pg_partition' for more information"))); + } + + if (!ConditionalLockPartition(relid, partid, lmode, PARTITION_LOCK) || + !ConditionalLockPartition(partid, subpartid, lmode, PARTITION_LOCK)) { + notAvailSubPartitionNumber++; + continue; + } + + /* the partition may be dropped */ + Partition part = tryPartitionOpen(relation, partid, lmode); + if (part == NULL) { + totalSubPartitionNumber--; + continue; + } + Relation partrel = partitionGetRelation(relation, part); + /* the subpartition may be dropped */ + Partition subpart = tryPartitionOpen(partrel, subpartid, lmode); + if (subpart == NULL) { + totalSubPartitionNumber--; + continue; + } + currentPartPages = PartitionGetNumberOfBlocksInFork(partrel, subpart, MAIN_FORKNUM, true); + /* for empty heap, PartitionGetNumberOfBlocks() return 0 */ + if (currentPartPages > 0) { + if (sampledPartitionOids != NULL) + *sampledPartitionOids = lappend_oid(*sampledPartitionOids, subpartid); + + partPages += currentPartPages; + nonzeroSubPartitionNumber++; + } + partitionClose(partrel, subpart, lmode); + releaseDummyRelation(&partrel); + partitionClose(relation, part, lmode); + + if (nonzeroSubPartitionNumber == ESTIMATE_SUBPARTITION_NUMBER) { + break; + } + } + + *samplePages = ComputeTheTotalPages(relation, nonzeroSubPartitionNumber, notAvailSubPartitionNumber, + totalSubPartitionNumber, partPages); +} + static void acquireSamplesForPartitionedRelation( Relation relation, LOCKMODE lmode, RelPageType* samplePages, List** sampledPartitionOids) { + if (RelationIsSubPartitioned(relation)) { + acquireSamplesForSubPartitionedRelation(relation, lmode, samplePages, sampledPartitionOids); + return; + } + if (RelationIsPartitioned(relation)) { if (relation->rd_rel->relkind == RELKIND_RELATION) { int totalRangePartitionNumber = GetTotalPartitionNumber(relation); @@ -217,7 +277,6 @@ static void acquireSamplesForPartitionedRelation( BlockNumber currentPartPages = 0; Partition part = NULL; int notAvailPartitionCnt = 0; - RelPageType subPartitionPages = 0; for (partitionNumber = 0; partitionNumber < totalPartitionNumber; partitionNumber++) { Oid partitionOid = InvalidOid; @@ -248,48 +307,36 @@ static void acquireSamplesForPartitionedRelation( } #endif } - if (RelationIsSubPartitioned(relation)) { - Partition partition = partitionOpen(relation, partitionOid, lmode); - Relation subPartitionRelation = partitionGetRelation(relation, partition); - RelPageType curpages = 0; - acquireSamplesForPartitionedRelation(subPartitionRelation, lmode, &curpages, sampledPartitionOids); - releaseDummyRelation(&subPartitionRelation); - partitionClose(relation, partition, lmode); - subPartitionPages += curpages; - } else { - if (!OidIsValid(partitionOid)) - continue; - if (!ConditionalLockPartition(relation->rd_id, partitionOid, lmode, PARTITION_LOCK)) { - notAvailPartitionCnt++; - continue; - } + if (!OidIsValid(partitionOid)) + continue; - part = partitionOpen(relation, partitionOid, lmode); - currentPartPages = PartitionGetNumberOfBlocksInFork(relation, part, MAIN_FORKNUM, true); - partitionClose(relation, part, lmode); + if (!ConditionalLockPartition(relation->rd_id, partitionOid, lmode, PARTITION_LOCK)) { + notAvailPartitionCnt++; + continue; + } - /* for empty heap, PartitionGetNumberOfBlocks() return 0 */ - if (currentPartPages > 0) { - if (sampledPartitionOids != NULL) - *sampledPartitionOids = lappend_oid(*sampledPartitionOids, partitionOid); + part = partitionOpen(relation, partitionOid, lmode); + currentPartPages = PartitionGetNumberOfBlocksInFork(relation, part, MAIN_FORKNUM, true); + partitionClose(relation, part, lmode); - partPages += currentPartPages; - if (++nonzeroPartitionNumber == ESTIMATE_PARTITION_NUMBER) { - break; - } + /* for empty heap, PartitionGetNumberOfBlocks() return 0 */ + if (currentPartPages > 0) { + if (sampledPartitionOids != NULL) + *sampledPartitionOids = lappend_oid(*sampledPartitionOids, partitionOid); + + partPages += currentPartPages; + if (++nonzeroPartitionNumber == ESTIMATE_PARTITION_NUMBER) { + break; } } } - if (subPartitionPages != 0) { - *samplePages = subPartitionPages; - } else { - *samplePages = ComputeTheTotalPages(relation, nonzeroPartitionNumber, notAvailPartitionCnt, - totalPartitionNumber, partPages); - } + *samplePages = ComputeTheTotalPages(relation, nonzeroPartitionNumber, notAvailPartitionCnt, + totalPartitionNumber, partPages); - if (*samplePages < relation->rd_rel->relpages / ESTIMATE_PARTPAGES_THRESHOLD) { + if (nonzeroPartitionNumber == ESTIMATE_PARTITION_NUMBER && + *samplePages < relation->rd_rel->relpages / ESTIMATE_PARTPAGES_THRESHOLD) { if (sampledPartitionOids != NULL) list_free_ext(*sampledPartitionOids); acquireSamplesForPartitionedRelationModify(relation, lmode, samplePages, sampledPartitionOids); @@ -298,6 +345,43 @@ static void acquireSamplesForPartitionedRelation( } } +static RelPageType EstimatePartitionIndexPages(Relation relation, Relation indexRelation, List *sampledPartitionIds) +{ + /* normal partitioned index, including crossbucket index */ + if (sampledPartitionIds == NIL) { + return indexRelation->rd_rel->relpages; + } + + ListCell *cell = NULL; + BlockNumber indexPages = 0; + BlockNumber partIndexPages = 0; + BlockNumber indexrelPartPages = 0; /* analyzed pages, stored in pg_partition->relpages */ + int partitionNum = getNumberOfPartitions(relation); + foreach (cell, sampledPartitionIds) { + Oid partOid = lfirst_oid(cell); + Oid partIndexOid = getPartitionIndexOid(indexRelation->rd_id, partOid); + Partition partIndex = partitionOpen(indexRelation, partIndexOid, AccessShareLock); + partIndexPages += PartitionGetNumberOfBlocks(indexRelation, partIndex); + indexrelPartPages += partIndex->pd_part->relpages; + partitionClose(indexRelation, partIndex, AccessShareLock); + } + indexPages = partIndexPages * (partitionNum / sampledPartitionIds->length); + + if (!RelationIsSubPartitioned(relation)) { + indexPages = partIndexPages * (partitionNum / sampledPartitionIds->length); + if (indexrelPartPages > 0 && partitionNum > ESTIMATE_PARTITION_NUMBER && + partIndexPages < indexRelation->rd_rel->relpages / ESTIMATE_PARTPAGES_THRESHOLD) { + if (sampledPartitionIds->length > ESTIMATE_PARTITION_NUMBER_THRESHOLD) { + indexPages = partIndexPages * (indexRelation->rd_rel->relpages / (double)indexrelPartPages); + } else { + indexPages = indexRelation->rd_rel->relpages; + } + } + } + + return indexPages; +} + /* * get_relation_info - * Retrieves catalog information for a given relation. @@ -599,38 +683,7 @@ void get_relation_info(PlannerInfo* root, Oid relationObjectId, bool inhparent, /* global partitioned index */ info->pages = RelationGetNumberOfBlocks(indexRelation); } else { - /* normal partitioned index, including crossbucket index */ - ListCell* cell = NULL; - BlockNumber indexPages = 0; - BlockNumber partIndexPages = 0; - BlockNumber indexrelPartPages = 0; /* analyzed pages, stored in pg_partition->relpages */ - int partitionNum = getNumberOfPartitions(relation); - foreach (cell, sampledPartitionIds) { - Oid partOid = lfirst_oid(cell); - Oid partIndexOid = getPartitionIndexOid(indexRelation->rd_id, partOid); - Partition partIndex = partitionOpen(indexRelation, partIndexOid, AccessShareLock); - partIndexPages += PartitionGetNumberOfBlocks(indexRelation, partIndex); - indexrelPartPages += partIndex->pd_part->relpages; - partitionClose(indexRelation, partIndex, AccessShareLock); - } - // if sampled ESTIMATE_PARTITION_NUMBER, infer the pages of index, - // else partIndexPages is the actrual pages of index. - indexPages = partIndexPages; - if (sampledPartitionIds != NIL) { - if (sampledPartitionIds->length == ESTIMATE_PARTITION_NUMBER) - indexPages = partIndexPages * (partitionNum / ESTIMATE_PARTITION_NUMBER); - } - if (indexrelPartPages > 0 && - partitionNum > ESTIMATE_PARTITION_NUMBER && - partIndexPages < indexRelation->rd_rel->relpages / ESTIMATE_PARTPAGES_THRESHOLD) { - if (sampledPartitionIds->length > ESTIMATE_PARTITION_NUMBER_THRESHOLD) { - indexPages = - partIndexPages * (indexRelation->rd_rel->relpages / (double)indexrelPartPages); - } else { - indexPages = indexRelation->rd_rel->relpages; - } - } - info->pages = indexPages; + info->pages = EstimatePartitionIndexPages(relation, indexRelation, sampledPartitionIds); } } #ifdef PGXC @@ -1470,7 +1523,7 @@ List* build_index_tlist(PlannerInfo* root, IndexOptInfo* index, Relation heapRel if (indexkey < 0) { att_tup = SystemAttributeDefinition(indexkey, heapRelation->rd_rel->relhasoids, - RELATION_HAS_BUCKET(heapRelation)); + RELATION_HAS_BUCKET(heapRelation), RELATION_HAS_UIDS(heapRelation)); } else { att_tup = heapRelation->rd_att->attrs[indexkey - 1]; } diff --git a/src/gausskernel/optimizer/util/predtest.cpp b/src/gausskernel/optimizer/util/predtest.cpp index 851fe9c56..97f8f9887 100644 --- a/src/gausskernel/optimizer/util/predtest.cpp +++ b/src/gausskernel/optimizer/util/predtest.cpp @@ -97,7 +97,6 @@ static bool list_member_strip(const List* list, Expr* datum); static bool btree_predicate_proof(const Expr* predicate, const Node* clause, bool refute_it); static Oid get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it); static void InvalidateOprProofCacheCallBack(Datum arg, int cacheid, uint32 hashvalue); - /* * predicate_implied_by * Recursively checks whether the clauses in restrictinfo_list imply @@ -1455,7 +1454,7 @@ static Oid get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it) hash_create("Btree proof lookup cache", 256, &ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); /* Arrange to flush cache on pg_amop changes */ - CacheRegisterSyscacheCallback(AMOPOPID, InvalidateOprProofCacheCallBack, (Datum)0); + CacheRegisterSessionSyscacheCallback(AMOPOPID, InvalidateOprProofCacheCallBack, (Datum)0); } key.pred_op = pred_op; @@ -1584,7 +1583,7 @@ static Oid get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it) /* * Callback for pg_amop inval events */ -static void InvalidateOprProofCacheCallBack(Datum arg, int cacheid, uint32 hashvalue) +void InvalidateOprProofCacheCallBack(Datum arg, int cacheid, uint32 hashvalue) { HASH_SEQ_STATUS status; OprProofCacheEntry* hentry = NULL; diff --git a/src/gausskernel/optimizer/util/pruning.cpp b/src/gausskernel/optimizer/util/pruning.cpp index 95fd82dcd..2b1f51165 100644 --- a/src/gausskernel/optimizer/util/pruning.cpp +++ b/src/gausskernel/optimizer/util/pruning.cpp @@ -84,6 +84,28 @@ static PartitionMap* GetRelPartitionMap(Relation relation) return relation->partMap; } +static void CollectSubpartitionPruningResults(PruningResult* resPartition, Relation current_relation) +{ + if (!RelationIsSubPartitioned(current_relation)) { + return; + } + + int partSeq; + ListCell *cell = NULL; + Oid partitionOid = InvalidOid; + foreach (cell, resPartition->ls_rangeSelectedPartitions) { + partSeq = lfirst_int(cell); + partitionOid = getPartitionOidFromSequence(current_relation, partSeq); + SubPartitionPruningResult *subPartPruningRes = + PreGetSubPartitionFullPruningResult(current_relation, partitionOid); + if (subPartPruningRes == NULL) { + continue; + } + subPartPruningRes->partSeq = partSeq; + resPartition->ls_selectedSubPartitions = lappend(resPartition->ls_selectedSubPartitions, subPartPruningRes); + } +} + /* * @@GaussDB@@ * Brief @@ -112,26 +134,14 @@ PruningResult* GetPartitionInfo(PruningResult* result, EState* estate, Relation (resPartition->bm_rangeSelectedPartitions == NULL && PruningResultIsSubset(resPartition))) { destroyPruningResult(resPartition); resPartition = getFullPruningResult(current_relation); + CollectSubpartitionPruningResults(resPartition, current_relation); return resPartition; } if (PointerIsValid(resPartition) && !PruningResultIsFull(resPartition)) generateListFromPruningBM(resPartition); - if (RelationIsSubPartitioned(current_relation)) { - int partSeq = 0; - ListCell *cell = NULL; - Oid partitionOid = InvalidOid; - foreach (cell, resPartition->ls_rangeSelectedPartitions) { - partSeq = lfirst_int(cell); - partitionOid = getPartitionOidFromSequence(current_relation, partSeq); - SubPartitionPruningResult *subPartPruningRes = - PreGetSubPartitionFullPruningResult(current_relation, partitionOid); - if (subPartPruningRes == NULL) { - continue; - } - subPartPruningRes->partSeq = partSeq; - resPartition->ls_selectedSubPartitions = lappend(resPartition->ls_selectedSubPartitions, subPartPruningRes); - } - } + + CollectSubpartitionPruningResults(resPartition, current_relation); + return resPartition; } diff --git a/src/gausskernel/optimizer/util/restrictinfo.cpp b/src/gausskernel/optimizer/util/restrictinfo.cpp index 2493c94f8..bb30bfd8d 100644 --- a/src/gausskernel/optimizer/util/restrictinfo.cpp +++ b/src/gausskernel/optimizer/util/restrictinfo.cpp @@ -359,12 +359,6 @@ static RestrictInfo* make_restrictinfo_internal(Expr* clause, Expr* orclause, bo rc = memset_s(&restrictinfo->right_bucketsize, sizeof(BucketSize), 0, sizeof(BucketSize)); securec_check(rc, "\0", "\0"); - /* - * Mark whether this rinfo converted. - * default false, become true when type conversion happened during index matching process - */ - restrictinfo->converted = false; - return restrictinfo; } @@ -534,8 +528,9 @@ List* extract_actual_clauses(List* restrictinfo_list, bool pseudoconstant) AssertEreport(IsA(rinfo, RestrictInfo), MOD_OPT, ""); /* we consider the qual is real if pseudoconstant is true and clause_relids is non-null. */ - if ((rinfo->pseudoconstant == pseudoconstant) && (!pseudoconstant || bms_is_empty(rinfo->clause_relids))) + if ((rinfo->pseudoconstant == pseudoconstant) && (!pseudoconstant || bms_is_empty(rinfo->clause_relids))) { result = lappend(result, rinfo->clause); + } } return result; } diff --git a/src/gausskernel/optimizer/util/subpartitionpruning.cpp b/src/gausskernel/optimizer/util/subpartitionpruning.cpp index e27fefe3b..3e230a036 100644 --- a/src/gausskernel/optimizer/util/subpartitionpruning.cpp +++ b/src/gausskernel/optimizer/util/subpartitionpruning.cpp @@ -117,55 +117,22 @@ PruningResult* getFullPruningResult(Relation relation) for (i = 0; i < rangePartitionMap->rangeElementsNum; i++) { pruningRes->bm_rangeSelectedPartitions = bms_add_member(pruningRes->bm_rangeSelectedPartitions, i); pruningRes->ls_rangeSelectedPartitions = lappend_int(pruningRes->ls_rangeSelectedPartitions, i); - if (RelationIsSubPartitioned(relation)) { - Oid partitionid = rangePartitionMap->rangeElements[i].partitionOid; - SubPartitionPruningResult *subPartPruningRes = - PreGetSubPartitionFullPruningResult(relation, partitionid); - if (subPartPruningRes == NULL) { - continue; - } - subPartPruningRes->partSeq = i; - pruningRes->ls_selectedSubPartitions = lappend(pruningRes->ls_selectedSubPartitions, subPartPruningRes); - } } - if (relation->partMap->type != PART_TYPE_INTERVAL) { pruningRes->intervalOffset = 0; pruningRes->intervalSelectedPartitions = NULL; } } else if (relation->partMap->type == PART_TYPE_LIST) { listPartitionMap = (ListPartitionMap*)relation->partMap; - for (i = 0; i < listPartitionMap->listElementsNum; i++) { pruningRes->bm_rangeSelectedPartitions = bms_add_member(pruningRes->bm_rangeSelectedPartitions, i); pruningRes->ls_rangeSelectedPartitions = lappend_int(pruningRes->ls_rangeSelectedPartitions, i); - if (RelationIsSubPartitioned(relation)) { - Oid partitionid = listPartitionMap->listElements[i].partitionOid; - SubPartitionPruningResult *subPartPruningRes = - PreGetSubPartitionFullPruningResult(relation, partitionid); - if (subPartPruningRes == NULL) { - continue; - } - subPartPruningRes->partSeq = i; - pruningRes->ls_selectedSubPartitions = lappend(pruningRes->ls_selectedSubPartitions, subPartPruningRes); - } } } else if (relation->partMap->type == PART_TYPE_HASH) { hashPartitionMap = (HashPartitionMap*)relation->partMap; - for (i = 0; i < hashPartitionMap->hashElementsNum; i++) { pruningRes->bm_rangeSelectedPartitions = bms_add_member(pruningRes->bm_rangeSelectedPartitions, i); pruningRes->ls_rangeSelectedPartitions = lappend_int(pruningRes->ls_rangeSelectedPartitions, i); - if (RelationIsSubPartitioned(relation)) { - Oid partitionid = hashPartitionMap->hashElements[i].partitionOid; - SubPartitionPruningResult *subPartPruningRes = - PreGetSubPartitionFullPruningResult(relation, partitionid); - if (subPartPruningRes == NULL) { - continue; - } - subPartPruningRes->partSeq = i; - pruningRes->ls_selectedSubPartitions = lappend(pruningRes->ls_selectedSubPartitions, subPartPruningRes); - } } } @@ -323,7 +290,6 @@ static IndexesUsableType eliminate_subpartition_index_unusable(Relation heapRel, List* part_seqs = inputPruningResult->ls_rangeSelectedPartitions; ListCell* cell = NULL; - int idx = 0; bool unusable = false; // first copy out 2 copies @@ -354,7 +320,7 @@ static IndexesUsableType eliminate_subpartition_index_unusable(Relation heapRel, /* get index partition and add it to a list for following scan */ ListCell *lc = NULL; SubPartitionPruningResult *subPartPruning = - (SubPartitionPruningResult *)list_nth(inputPruningResult->ls_selectedSubPartitions, idx++); + GetSubPartitionPruningResult(inputPruningResult->ls_selectedSubPartitions, partSeq); List *subPartList = subPartPruning->ls_selectedSubPartitions; foreach (lc, subPartList) diff --git a/src/gausskernel/optimizer/util/var.cpp b/src/gausskernel/optimizer/util/var.cpp index b423a8720..2ad24aff0 100644 --- a/src/gausskernel/optimizer/util/var.cpp +++ b/src/gausskernel/optimizer/util/var.cpp @@ -1468,3 +1468,15 @@ List* check_vartype(Node* node) return rs; } +Node* LocateOpExprLeafVar(Node* node) +{ + if (node == NULL) { + return NULL; + } + + if (IsA(node, Var)) { + return node; + } + + return expression_tree_mutator(node, (Node* (*)(Node*, void*)) LocateOpExprLeafVar, NULL); +} \ No newline at end of file diff --git a/src/gausskernel/process/globalplancache/globalplancache.cpp b/src/gausskernel/process/globalplancache/globalplancache.cpp index 8674aa730..96f6ecf75 100644 --- a/src/gausskernel/process/globalplancache/globalplancache.cpp +++ b/src/gausskernel/process/globalplancache/globalplancache.cpp @@ -51,6 +51,21 @@ template void GlobalPlanCache::RemovePlanSource(CachedPlanSource* template void GlobalPlanCache::RemovePlanSource(CachedPlanSource* plansource, const char* stmt_name); +static bool has_diff_schema(const List *list1, const List *list2) +{ + const ListCell *cell = NULL; + + if (list2 == NIL) { + return list1 != NULL; + } + foreach (cell, list1) { + if (!list_member_oid(list2, lfirst_oid(cell))) { + return true; + } + } + return false; +} + static bool CompareSearchPath(struct OverrideSearchPath* path1, struct OverrideSearchPath* path2) { @@ -70,10 +85,10 @@ CompareSearchPath(struct OverrideSearchPath* path1, struct OverrideSearchPath* p if (path1->addCatalog != path2->addCatalog) { return false; } - if (list_difference_oid(path1->schemas, path2->schemas) != NULL) { + if (has_diff_schema(path1->schemas, path2->schemas)) { return false; } - if (list_difference_oid(path2->schemas, path1->schemas) != NULL) { + if (has_diff_schema(path2->schemas, path1->schemas)) { return false; } return true; @@ -380,6 +395,7 @@ CachedPlanSource* GlobalPlanCache::Fetch(const char *query_string, uint32 query_ CachedPlanSource* psrc = entry->val.plansource; psrc->gpc.status.AddRefcount(); if (!psrc->gpc.status.IsValid()) { + MemoryContextSwitchTo(oldcontext); LWLockRelease(GetMainLWLockByIndex(lock_id)); MoveIntoInvalidPlanList(psrc); psrc->gpc.status.SubRefCount(); diff --git a/src/gausskernel/process/globalplancache/globalplancache_util.cpp b/src/gausskernel/process/globalplancache/globalplancache_util.cpp index b5cea6396..b3c0927cf 100644 --- a/src/gausskernel/process/globalplancache/globalplancache_util.cpp +++ b/src/gausskernel/process/globalplancache/globalplancache_util.cpp @@ -300,6 +300,46 @@ void CNGPCCleanUpSession() } } +void GPCCleanUpSessionSavedPlan() +{ + if (!ENABLE_GPC) { + return; + } + if (u_sess->pcache_cxt.first_saved_plan == NULL && + u_sess->pcache_cxt.unnamed_stmt_psrc == NULL && + u_sess->pcache_cxt.ungpc_saved_plan == NULL) { + return; + } + /* unnamed_stmt_psrc only save shared gpc plan or private plan, + * so we only need to sub refcount for shared plan. */ + if (u_sess->pcache_cxt.unnamed_stmt_psrc && u_sess->pcache_cxt.unnamed_stmt_psrc->gpc.status.InShareTable()) { + u_sess->pcache_cxt.unnamed_stmt_psrc->gpc.status.SubRefCount(); + u_sess->pcache_cxt.unnamed_stmt_psrc = NULL; + } + /* if in shared memory, delete context. */ + /* For DN and CN */ + CachedPlanSource* psrc = u_sess->pcache_cxt.first_saved_plan; + CachedPlanSource* next = NULL; + u_sess->pcache_cxt.first_saved_plan = NULL; + while (psrc != NULL) { + next = psrc->next_saved; + Assert (!psrc->gpc.status.InShareTable()); + if (!psrc->gpc.status.IsPrivatePlan()) + DropCachedPlan(psrc); + psrc = next; + } + /* For CN */ + psrc = u_sess->pcache_cxt.ungpc_saved_plan; + next = NULL; + while (psrc != NULL) { + next = psrc->next_saved; + Assert (!psrc->gpc.status.InShareTable()); + if (!psrc->gpc.status.IsPrivatePlan()) + DropCachedPlan(psrc); + psrc = next; + } +} + /* incase change shared plan in execute stage, copy stmt into sess */ List* CopyLocalStmt(const List* stmt_list, const MemoryContext parent_cxt, MemoryContext* plan_context) { diff --git a/src/gausskernel/process/job/gs_job_calendar.cpp b/src/gausskernel/process/job/gs_job_calendar.cpp index c5cae66ba..a9a8f85a0 100644 --- a/src/gausskernel/process/job/gs_job_calendar.cpp +++ b/src/gausskernel/process/job/gs_job_calendar.cpp @@ -1284,7 +1284,7 @@ static bool recheck_calendar_period(Calendar calendar, TimestampTz start_date, T start_date = truncate_calendar_date(CStringGetTextDatum(field_str), start_date); next_date = truncate_calendar_date(CStringGetTextDatum(field_str), next_date); - int64 elapsed = (int)timestamp_diff_internal(cstring_to_text(field_str), start_date, next_date, true); + int64 elapsed = timestamp_diff_internal(cstring_to_text(field_str), start_date, next_date, true); if (elapsed % (calendar->interval * multiplier) == 0) { return true; } @@ -1521,4 +1521,4 @@ Datum evaluate_calendar_string_internal(PG_FUNCTION_ARGS) Datum date_after = PG_GETARG_DATUM(2); /* return date after */ Datum new_next_date = evaluate_repeat_interval(string, start_date, date_after); PG_RETURN_DATUM(new_next_date); -} +} \ No newline at end of file diff --git a/src/gausskernel/process/job/gs_job_manager.cpp b/src/gausskernel/process/job/gs_job_manager.cpp index 521f65b8d..feca70a7d 100644 --- a/src/gausskernel/process/job/gs_job_manager.cpp +++ b/src/gausskernel/process/job/gs_job_manager.cpp @@ -69,31 +69,6 @@ static bool run_sql_job(Datum job_name, StringInfoData *buf); static bool run_procedure_job(Datum job_name, StringInfoData *buf); static char *run_external_job(Datum job_name); -/* - * @brief delete by sysscan - * Perform a simple heap delete. - * @param rel Target relation - * @param object_name Delete by key - * @param attribute_number Attribute number - */ -static void delete_by_sysscan(Relation rel, const Datum object_name, AttrNumber attribute_number) -{ - ScanKeyInfo scan_key_info; - scan_key_info.attribute_value = object_name; - scan_key_info.attribute_number = attribute_number; - scan_key_info.procedure = F_TEXTEQ; - List *tuples = search_by_sysscan_1(rel, &scan_key_info); - if (tuples == NULL) { - return; - } - ListCell *lc = NULL; - foreach (lc, tuples) { - HeapTuple tuple = (HeapTuple)lfirst(lc); - simple_heap_delete(rel, &tuple->t_self); - } - list_free_deep(tuples); -} - /* * @brief delete_by_syscache * Perform a simple heap delete by searching syscache. @@ -108,7 +83,7 @@ static void delete_by_syscache(Relation rel, const Datum object_name, SysCacheId return; } for (int i = 0; i < tuples->n_members; i++) { - HeapTuple tuple = &(tuples->members[i]->tuple); + HeapTuple tuple = t_thrd.lsc_cxt.FetchTupleFromCatCList(tuples, i); simple_heap_delete(rel, &tuple->t_self); } ReleaseSysCacheList(tuples); @@ -146,7 +121,10 @@ void delete_from_argument(const Datum object_name) void delete_from_job(const Datum job_name) { Relation rel = heap_open(PgJobRelationId, RowExclusiveLock); - delete_by_sysscan(rel, job_name, Anum_pg_job_job_name); + HeapTuple tuple = search_from_pg_job(rel, job_name); + if (tuple != NULL) { + simple_heap_delete(rel, &tuple->t_self); + } heap_close(rel, NoLock); } @@ -155,42 +133,41 @@ void delete_from_job(const Datum job_name) * Delete from pg_job_proc. * @param job_name */ -void delete_from_job_proc(const Datum object_name) +void delete_from_job_proc(const Datum job_name) { Relation rel = heap_open(PgJobProcRelationId, RowExclusiveLock); - delete_by_sysscan(rel, object_name, Anum_pg_job_proc_job_name); + HeapTuple tuple = search_from_pg_job_proc_no_exception(rel, job_name); + if (tuple != NULL) { + simple_heap_delete(rel, &tuple->t_self); + } heap_close(rel, NoLock); } -/* - * @brief search_from_pg_job - * Search from pg_job. - * @param pg_job_rel pg_job relation - * @param job_name - * @return HeapTuple result - */ HeapTuple search_from_pg_job(Relation pg_job_rel, Datum job_name) { - ScanKeyInfo scan_key_info; - scan_key_info.attribute_value = job_name; - scan_key_info.attribute_number = Anum_pg_job_job_name; - scan_key_info.procedure = F_TEXTEQ; - List *tuples = search_by_sysscan_1(pg_job_rel, &scan_key_info); + ScanKeyInfo scan_key_info1; + scan_key_info1.attribute_value = job_name; + scan_key_info1.attribute_number = Anum_pg_job_job_name; + scan_key_info1.procedure = F_TEXTEQ; + ScanKeyInfo scan_key_info2; + scan_key_info2.attribute_value = PointerGetDatum(u_sess->proc_cxt.MyProcPort->database_name); + scan_key_info2.attribute_number = Anum_pg_job_dbname; + scan_key_info2.procedure = F_NAMEEQ; + + List *tuples = search_by_sysscan_2(pg_job_rel, &scan_key_info1, &scan_key_info2); if (tuples == NIL) { return NULL; } Assert(list_length(tuples) == 1); - HeapTuple oldtuple = NULL; - if (list_length(tuples) == 1) { - oldtuple = (HeapTuple)linitial(tuples); - } else { + if (list_length(tuples) != 1) { ereport(ERROR, (errmodule(MOD_JOB), errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("find %d tuples match job_name %s in system table pg_job.", list_length(tuples), - TextDatumGetCString(job_name)), - errdetail("N/A"), errcause("job name is not exist"), erraction("Please check job_name"))); + errmsg("find %d tuples match job_name %s in system table pg_job.", list_length(tuples), + TextDatumGetCString(job_name)), + errdetail("N/A"), errcause("job name is not exist"), erraction("Please check job_name"))); } - list_free(tuples); - return oldtuple; + HeapTuple tuple = (HeapTuple)linitial(tuples); + list_free_ext(tuples); + return tuple; } /* @@ -1178,6 +1155,9 @@ static bool run_procedure_job(Datum job_name, StringInfoData *buf) appendStringInfoString(buf, job_target->job_proc_value->action_str); appendStringInfoString(buf, "("); for (int i = 0; i < job_target->job_attribute_value->number_of_arguments; i++) { + if (i > 0) { + appendStringInfoString(buf, ", "); + } appendStringInfoString(buf, "'"); appendStringInfoString(buf, job_target->arguments[i].argument_value); appendStringInfoString(buf, "'::"); @@ -1745,4 +1725,4 @@ void remove_scheduler_objects_from_owner(const char *user_str) } list_free_deep(drop_object_names); list_free_deep(drop_object_types); -} +} \ No newline at end of file diff --git a/src/gausskernel/process/job/job_scheduler.cpp b/src/gausskernel/process/job/job_scheduler.cpp index 063236886..1c5b2fb1b 100755 --- a/src/gausskernel/process/job/job_scheduler.cpp +++ b/src/gausskernel/process/job/job_scheduler.cpp @@ -274,6 +274,9 @@ NON_EXEC_STATIC void JobScheduleMain() /* Abort the current transaction in order to recover */ AbortCurrentTransaction(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + elog(LOG, "Job scheduler encounter abnormal, detail error msg: %s.", edata->message); /* @@ -690,17 +693,7 @@ static bool SkipSchedulerJob(Datum *values, bool *nulls, Timestamp curtime) return true; /* skip here to avoid further overhead */ } - /* immediate job (expired after one successful run) */ - if (nulls[Anum_pg_job_interval - 1]) { - return false; - } else { - char* interval_str = TextDatumGetCString(values[Anum_pg_job_interval - 1]); - if (pg_strcasecmp(interval_str, "null") == 0) { - pfree_ext(interval_str); - return false; - } - } - return true; + return false; } /* diff --git a/src/gausskernel/process/job/job_worker.cpp b/src/gausskernel/process/job/job_worker.cpp index 8d739f0ec..28330be1f 100755 --- a/src/gausskernel/process/job/job_worker.cpp +++ b/src/gausskernel/process/job/job_worker.cpp @@ -231,6 +231,9 @@ void JobExecuteWorkerMain() /* Flush any leaked data in the top-level context */ MemoryContextResetAndDeleteChildren(t_thrd.mem_cxt.msg_mem_cxt); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + LWLockReleaseAll(); if (t_thrd.utils_cxt.CurrentResourceOwner) { ResourceOwnerRelease(t_thrd.utils_cxt.CurrentResourceOwner, RESOURCE_RELEASE_BEFORE_LOCKS, false, true); diff --git a/src/gausskernel/process/main/main.cpp b/src/gausskernel/process/main/main.cpp index 02eb5f1b8..6a6ca1027 100755 --- a/src/gausskernel/process/main/main.cpp +++ b/src/gausskernel/process/main/main.cpp @@ -127,7 +127,6 @@ int main(int argc, char* argv[]) PmTopMemoryContext = t_thrd.top_mem_cxt; knl_thread_init(MASTER_THREAD); - t_thrd.bn = NULL; t_thrd.fake_session = create_session_context(t_thrd.top_mem_cxt, 0); t_thrd.fake_session->status = KNL_SESS_FAKE; diff --git a/src/gausskernel/process/postmaster/CMakeLists.txt b/src/gausskernel/process/postmaster/CMakeLists.txt index 70d4b2bdc..35215f3c1 100755 --- a/src/gausskernel/process/postmaster/CMakeLists.txt +++ b/src/gausskernel/process/postmaster/CMakeLists.txt @@ -1,6 +1,14 @@ #This is the main CMAKE for build bin. AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} TGT_postmaster_SRC) +if("${ENABLE_LITE_MODE}" STREQUAL "ON") + list(REMOVE_ITEM TGT_postmaster_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/barrier_creator.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/barrier_arch.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/aiocompleter.cpp + ) +endif() + set(TGT_postmaster_INC ${PROJECT_SRC_DIR}/gausskernel/cbb/communication ${PROJECT_SRC_DIR}/include/libcomm diff --git a/src/gausskernel/process/postmaster/Makefile b/src/gausskernel/process/postmaster/Makefile index c600e3c54..789461cae 100644 --- a/src/gausskernel/process/postmaster/Makefile +++ b/src/gausskernel/process/postmaster/Makefile @@ -33,8 +33,12 @@ ifneq "$(MAKECMDGOALS)" "clean" endif OBJS = autovacuum.o bgwriter.o fork_process.o pgarch.o pgstat.o postmaster.o gaussdb_version.o\ startup.o syslogger.o walwriter.o walwriterauxiliary.o checkpointer.o pgaudit.o alarmchecker.o \ - twophasecleaner.o aiocompleter.o fencedudf.o lwlockmonitor.o cbmwriter.o pagewriter.o snapcapturer.o rbcleaner.o globalstats.o \ - barrier_creator.o bgworker.o barrier_arch.o $(top_builddir)/src/lib/config/libconfig.a + twophasecleaner.o fencedudf.o lwlockmonitor.o cbmwriter.o pagewriter.o pagerepair.o snapcapturer.o rbcleaner.o globalstats.o \ + bgworker.o $(top_builddir)/src/lib/config/libconfig.a + +ifeq ($(enable_lite_mode), no) +OBJS += barrier_creator.o barrier_preparse.o barrier_arch.o aiocompleter.o +endif include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/process/postmaster/autovacuum.cpp b/src/gausskernel/process/postmaster/autovacuum.cpp index 3a3df7f30..3a3273ae9 100755 --- a/src/gausskernel/process/postmaster/autovacuum.cpp +++ b/src/gausskernel/process/postmaster/autovacuum.cpp @@ -317,6 +317,9 @@ NON_EXEC_STATIC void AutoVacLauncherMain() /* Abort the current transaction in order to recover */ AbortCurrentTransaction(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + /* * Now return to normal top-level context and clear ErrorContext for * next time. @@ -910,8 +913,9 @@ static Oid do_start_worker(void) */ t_thrd.autovacuum_cxt.recentXid = ReadNewTransactionId(); if (t_thrd.autovacuum_cxt.recentXid > - FirstNormalTransactionId + g_instance.attr.attr_storage.autovacuum_freeze_max_age) - xidForceLimit = t_thrd.autovacuum_cxt.recentXid - g_instance.attr.attr_storage.autovacuum_freeze_max_age; + FirstNormalTransactionId + (uint64)g_instance.attr.attr_storage.autovacuum_freeze_max_age) + xidForceLimit = t_thrd.autovacuum_cxt.recentXid - + (uint64)g_instance.attr.attr_storage.autovacuum_freeze_max_age; else xidForceLimit = FirstNormalTransactionId; @@ -919,8 +923,9 @@ static Oid do_start_worker(void) /* Also determine the oldest datminmxid we will consider. */ t_thrd.autovacuum_cxt.recentMulti = ReadNextMultiXactId(); if (t_thrd.autovacuum_cxt.recentMulti > - FirstMultiXactId + g_instance.attr.attr_storage.autovacuum_freeze_max_age) - multiForceLimit = t_thrd.autovacuum_cxt.recentMulti - g_instance.attr.attr_storage.autovacuum_freeze_max_age; + FirstMultiXactId + (uint64)g_instance.attr.attr_storage.autovacuum_freeze_max_age) + multiForceLimit = t_thrd.autovacuum_cxt.recentMulti - + (uint64)g_instance.attr.attr_storage.autovacuum_freeze_max_age; else multiForceLimit = FirstMultiXactId; #endif @@ -1248,6 +1253,9 @@ NON_EXEC_STATIC void AutoVacWorkerMain() /* Report the error to the server log */ EmitErrorReport(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + /* * We can now go away. Note that because we called InitProcess, a * callback was registered to do ProcKill, which will clean up @@ -2370,9 +2378,50 @@ static void do_autovacuum(void) } tableam_scan_end(relScan); - heap_close(classRel, AccessShareLock); DEBUG_MOD_STOP_TIMER(MOD_AUTOVAC, "AUTOVAC TIMER: Scan pg_class to determine which toast tables to vacuum"); + /* On the fourth pass: check USTORE GPI tables */ + ScanKeyInit(&key[0], Anum_pg_class_relkind, BTEqualStrategyNumber, F_CHAREQ, CharGetDatum(RELKIND_GLOBAL_INDEX)); + relScan = tableam_scan_begin(classRel, SnapshotNow, 1, &key[0]); + while ((tuple = (HeapTuple)tableam_scan_getnexttuple(relScan, ForwardScanDirection)) != NULL) { + Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple); + Oid relid = HeapTupleGetOid(tuple); + AutoVacOpts *relopts = NULL; + /* + * We cannot safely process other backends' temp tables, so skip them. + */ + if (classForm->relpersistence == RELPERSISTENCE_TEMP || + classForm->relpersistence == RELPERSISTENCE_GLOBAL_TEMP) + continue; + /* only UBTree supports vacuum independently */ + if (classForm->relam != UBTREE_AM_OID) { + continue; + } + /* fetch reloptions */ + relopts = extract_autovac_opts(tuple, pg_class_desc); + /* only UBTree supports vacuum, and enabled will be set true */ + if (relopts == NULL || !relopts->enabled) { + continue; + } + /* we skipped relation_support_autoavac() and relation_needs_vacanalyze() checks here */ + vacObj = (vacuum_object*)palloc(sizeof(vacuum_object)); + vacObj->tab_oid = relid; + vacObj->parent_oid = InvalidOid; + vacObj->dovacuum = true; + vacObj->dovacuum_toast = false; + vacObj->doanalyze = false; + vacObj->need_freeze = false; + vacObj->is_internal_relation = false; + /* VACFLG_MAIN_PARTITION makes no sense when vacuuming UBTree */ + vacObj->flags = VACFLG_SIMPLE_HEAP; /* ignore this flag as we will not use it, + * and to be safe we can use VACFLG_SIMPLE_HEAP. + */ + table_oids = lappend(table_oids, vacObj); + } + tableam_scan_end(relScan); + heap_close(classRel, AccessShareLock); + DEBUG_MOD_STOP_TIMER(MOD_AUTOVAC, "AUTOVAC TIMER: Scan pg_class to determine which UBTree tables to vacuum"); + /* * Create one buffer access strategy object per buffer pool for VACUUM to use. * We want to use the same one across all the vacuum operations we perform, @@ -2705,7 +2754,8 @@ AutoVacOpts* extract_autovac_opts(HeapTuple tup, TupleDesc pg_class_desc) Assert(((Form_pg_class)GETSTRUCT(tup))->relkind == RELKIND_RELATION || ((Form_pg_class) GETSTRUCT(tup))->relkind == RELKIND_MATVIEW || - ((Form_pg_class)GETSTRUCT(tup))->relkind == RELKIND_TOASTVALUE); + ((Form_pg_class)GETSTRUCT(tup))->relkind == RELKIND_TOASTVALUE || + ((Form_pg_class)GETSTRUCT(tup))->relkind == RELKIND_GLOBAL_INDEX); relopts = extractRelOptions(tup, pg_class_desc, InvalidOid); if (relopts == NULL) @@ -2720,6 +2770,12 @@ AutoVacOpts* extract_autovac_opts(HeapTuple tup, TupleDesc pg_class_desc) av->enabled = false; } + /* force set av->enabled in ustore's GPI */ + if ((((Form_pg_class)GETSTRUCT(tup))->relkind == RELKIND_GLOBAL_INDEX) && + RelationIsTableAccessMethodUStoreType(relopts)) { + av->enabled = true; + } + pfree_ext(relopts); return av; @@ -2842,6 +2898,17 @@ static autovac_table* table_recheck_autovac( if (!HeapTupleIsValid(classTup)) return NULL; classForm = (Form_pg_class)GETSTRUCT(classTup); + /* this is UBTree, use another bypass */ + if (classForm->relam == UBTREE_AM_OID) { + avopts = extract_autovac_opts(classTup, pg_class_desc); + tab = calculate_vacuum_cost_and_freezeages(avopts, false, false); + if (tab != NULL) { + tab->at_relid = relid; + tab->at_sharedrel = classForm->relisshared; + tab->at_dovacuum = true; + } + return tab; + } /* * Get the applicable reloptions. If it is a TOAST table, try to get the @@ -2938,9 +3005,9 @@ static void determine_vacuum_params(float4& vac_scale_factor, int& vac_base_thre #ifndef ENABLE_MULTIPLE_NODES if (t_thrd.autovacuum_cxt.recentMulti > - FirstMultiXactId + g_instance.attr.attr_storage.autovacuum_freeze_max_age) + FirstMultiXactId + (uint64)g_instance.attr.attr_storage.autovacuum_freeze_max_age) multiForceLimit = t_thrd.autovacuum_cxt.recentMulti - - g_instance.attr.attr_storage.autovacuum_freeze_max_age; + (uint64)g_instance.attr.attr_storage.autovacuum_freeze_max_age; else multiForceLimit = FirstMultiXactId; #endif diff --git a/src/gausskernel/process/postmaster/barrier_arch.cpp b/src/gausskernel/process/postmaster/barrier_arch.cpp index 1c961e874..51bfbaa7d 100755 --- a/src/gausskernel/process/postmaster/barrier_arch.cpp +++ b/src/gausskernel/process/postmaster/barrier_arch.cpp @@ -85,6 +85,7 @@ static void WaitBarrierArch(XLogRecPtr barrierLsn, const char *slotName) (errmsg("WaitBarrierArch start: 0x%lx", barrierLsn))); int cnt = 0; + const int interval = 100; do { ArchiveTaskStatus *archive_task_status = NULL; archive_task_status = find_archive_task_status(slotName); @@ -104,8 +105,17 @@ static void WaitBarrierArch(XLogRecPtr barrierLsn, const char *slotName) " due to administrator command"))); } pg_usleep(100000L); - cnt++; - + if (t_thrd.barrier_arch.lastArchiveLoc == pg_atomic_read_u64(&archive_task_status->archived_lsn)) { + cnt++; + if ((cnt % interval) == 0) { + ereport(WARNING, (errmsg("[WaitBarrierArch] arch thread now archived" + " lsn: %08X/%08X, timeout count: %d", (uint32)(t_thrd.barrier_arch.lastArchiveLoc >> 32), + (uint32)t_thrd.barrier_arch.lastArchiveLoc, cnt))); + } + } else { + cnt = 0; + } + t_thrd.barrier_arch.lastArchiveLoc = pg_atomic_read_u64(&archive_task_status->archived_lsn); if (cnt > WAIT_ARCHIVE_TIMEOUT) { ereport(ERROR, (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), errmsg("Wait archived timeout."))); } @@ -159,7 +169,6 @@ void ProcessBarrierQueryArchive(char* id) static void BarrierArchWakenStop(SIGNAL_ARGS) { t_thrd.barrier_arch.ready_to_stop = true; - ereport(LOG, (errmsg("[BarrierArch] barrier_arch thread shut down."))); } static void BarrierArchSighupHandler(SIGNAL_ARGS) @@ -200,6 +209,9 @@ static PGXCNodeAllHandles* GetAllNodesHandles() conn_handles = get_handles(barrierDataNodeList, barrierCoordList, false); + list_free(barrierCoordList); + list_free(barrierDataNodeList); + return conn_handles; } @@ -211,6 +223,8 @@ static void SendBarrierArchRequest(const PGXCNodeAllHandles* handles, int count, errno_t rc; char barrierInfo[BARRIER_ARCH_INFO_LEN]; + ereport(LOG, (errmsg("Start to send barrier arch request to all nodes."))); + for (conn = 0; conn < count; conn++) { PGXCNodeHandle* handle = NULL; @@ -297,15 +311,17 @@ static void CheckBarrierArchCommandStatus(const PGXCNodeAllHandles* conn_handles static void QueryBarrierArch(PGXCNodeAllHandles* handles, ArchiveConfig *archive_obs) { int connCnt = handles->co_conn_count + handles->dn_conn_count; - if (connCnt >= g_instance.archive_obs_cxt.max_node_cnt) { - ereport(WARNING, (errmsg("current cn get connCnt: <%d> max than cluster connCnt: <%d>", - connCnt, g_instance.archive_obs_cxt.max_node_cnt))); + SpinLockAcquire(&g_instance.archive_obs_cxt.barrier_lock); + int archivMaxNodeCnt = g_instance.archive_obs_cxt.max_node_cnt; + if (connCnt >= archivMaxNodeCnt) { + SpinLockRelease(&g_instance.archive_obs_cxt.barrier_lock); + ereport(DEBUG2, (errmsg("current cn get connCnt: <%d> max than cluster connCnt: <%d>", + connCnt, archivMaxNodeCnt))); return; } ArchiveBarrierLsnInfo barrierLsnInfo[g_instance.archive_obs_cxt.max_node_cnt]; - SpinLockAcquire(&g_instance.archive_obs_cxt.barrier_lock); if (strncmp(t_thrd.barrier_arch.barrierName, g_instance.archive_obs_cxt.barrierName, strlen(g_instance.archive_obs_cxt.barrierName)) == 0) { SpinLockRelease(&g_instance.archive_obs_cxt.barrier_lock); @@ -326,9 +342,8 @@ static void QueryBarrierArch(PGXCNodeAllHandles* handles, ArchiveConfig *archive errorno = memcpy_s(&barrierLsnInfo, sizeof(ArchiveBarrierLsnInfo) * g_instance.archive_obs_cxt.max_node_cnt, g_instance.archive_obs_cxt.barrier_lsn_info, sizeof(ArchiveBarrierLsnInfo) * g_instance.archive_obs_cxt.max_node_cnt); - securec_check(errorno, "\0", "\0"); - SpinLockRelease(&g_instance.archive_obs_cxt.barrier_lock); + securec_check(errorno, "\0", "\0"); SendBarrierArchRequest(handles, connCnt, barrierLsnInfo); @@ -383,6 +398,8 @@ NON_EXEC_STATIC void BarrierArchMain(knl_thread_arg* arg) ereport(LOG, (errmsg("[BarrierArch] barrier arch thread starts. slot name: %s", t_thrd.barrier_arch.slot_name))); + on_shmem_exit(PGXCNodeCleanAndRelease, 0); + BarrierArchSetupSignalHook(); BaseInit(); @@ -424,6 +441,9 @@ NON_EXEC_STATIC void BarrierArchMain(knl_thread_arg* arg) /* Report the error to the server log */ EmitErrorReport(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + /* release resource */ LWLockReleaseAll(); @@ -457,7 +477,6 @@ NON_EXEC_STATIC void BarrierArchMain(knl_thread_arg* arg) (void)gs_signal_unblock_sigusr2(); SetProcessingMode(NormalProcessing); - on_shmem_exit(PGXCNodeCleanAndRelease, 0); pg_usleep_retry(1000000L, 0); @@ -476,18 +495,22 @@ NON_EXEC_STATIC void BarrierArchMain(knl_thread_arg* arg) ereport(DEBUG1, (errmsg("[BarrierArch] Current node is not first node: %s", g_instance.attr.attr_common.PGXCNodeName))); - if (u_sess->sig_cxt.got_PoolReload) { + if (IsGotPoolReload()) { processPoolerReload(); - u_sess->sig_cxt.got_PoolReload = false; + ResetGotPoolReload(false); } CHECK_FOR_INTERRUPTS(); pg_usleep(10000000L); } while (1); - if (g_instance.archive_obs_cxt.barrier_lsn_info == NULL) { + SpinLockAcquire(&g_instance.archive_obs_cxt.barrier_lock); + if (g_instance.archive_obs_cxt.barrier_lsn_info == NULL || + g_instance.archive_obs_cxt.max_node_cnt == 0) { + SpinLockRelease(&g_instance.archive_obs_cxt.barrier_lock); ereport(WARNING, (errmsg("[BarrierArch] barrier_lsn_info not alloc."))); return; } + SpinLockRelease(&g_instance.archive_obs_cxt.barrier_lock); #endif ereport(DEBUG1, (errmsg("[BarrierArch] Init connections with CN/DN, dn count : %d, cn count : %d", @@ -502,14 +525,15 @@ NON_EXEC_STATIC void BarrierArchMain(knl_thread_arg* arg) } #ifdef ENABLE_MULTIPLE_NODES - if (u_sess->sig_cxt.got_PoolReload) { + if (IsGotPoolReload()) { processPoolerReload(); - u_sess->sig_cxt.got_PoolReload = false; + ResetGotPoolReload(false); if (!IsFirstCn()) break; } + PGXCNodeAllHandles* handles = GetAllNodesHandles(); - + QueryBarrierArch(handles, &obsArchiveSlot->archive_config); pfree_pgxc_all_handles(handles); diff --git a/src/gausskernel/process/postmaster/barrier_creator.cpp b/src/gausskernel/process/postmaster/barrier_creator.cpp index cd95b3fb6..7c800cf4e 100755 --- a/src/gausskernel/process/postmaster/barrier_creator.cpp +++ b/src/gausskernel/process/postmaster/barrier_creator.cpp @@ -45,15 +45,71 @@ #include "pgxc/pgxcnode.h" #include "tcop/utility.h" #include "pgxc/poolutils.h" +#include "access/gtm.h" -const int BARRIER_NAME_LEN = 40; #define TIME_GET_MILLISEC(t) (((long)(t).tv_sec * 1000) + ((long)(t).tv_usec) / 1000) +const int BARRIER_NAME_LEN = 40; +const char* CSN_BARRIER_PATTREN_STR = "csn_%021lu_%013ld"; +const char* CSN_SWITCHOVER_BARRIER_PATTREN_STR = "csn_%021lu_dr_switchover"; + +void GetCsnBarrierName(char* barrierRet, bool isSwitchoverBarrier) +{ + struct timeval tv; + int rc; + CommitSeqNo csn; + + gettimeofday(&tv, NULL); + if (GTM_MODE) + csn = GetCSNGTM(); + else + csn = CommitCSNGTM(false); + + if (isSwitchoverBarrier) { + rc = snprintf_s(barrierRet, BARRIER_NAME_LEN, BARRIER_NAME_LEN - 1, CSN_SWITCHOVER_BARRIER_PATTREN_STR, csn); + } else { + rc = snprintf_s(barrierRet, BARRIER_NAME_LEN, BARRIER_NAME_LEN - 1, CSN_BARRIER_PATTREN_STR, csn, + TIME_GET_MILLISEC(tv)); + } + securec_check_ss_c(rc, "\0", "\0"); + elog(DEBUG1, "GetCsnBarrierName csn = %lu, barrier_name = %s", csn, barrierRet); +} + +CommitSeqNo CsnBarrierNameGetCsn(const char *csnBarrier) +{ + CommitSeqNo csn; + long ts = 0; + if ((strstr(csnBarrier, "_dr_switchover") != NULL && + sscanf_s(csnBarrier, CSN_SWITCHOVER_BARRIER_PATTREN_STR, &csn) == 1) || + sscanf_s(csnBarrier, CSN_BARRIER_PATTREN_STR, &csn, &ts) == 2) { + return csn; + } + return 0; +} + +int64 CsnBarrierNameGetTimeStamp(const char *csnBarrier) +{ + CommitSeqNo csn; + int64 ts = 0; + if (sscanf_s(csnBarrier, CSN_BARRIER_PATTREN_STR, &csn, &ts) == 2) { + return ts; + } + return 0; +} + +bool IsSwitchoverBarrier(const char *csnBarrier) +{ + if (!IS_CSN_BARRIER(csnBarrier) || (strstr(csnBarrier, "_dr_switchover") == NULL)) { + return false; + } + return true; +} bool IsFirstCn() { - char* firstExecNode = find_first_exec_cn(); + char *firstExecNode = find_first_exec_cn(); return (strcmp(firstExecNode, g_instance.attr.attr_common.PGXCNodeName) == 0); } + void barrier_creator_thread_shutdown(void) { g_instance.barrier_creator_cxt.stop = true; @@ -89,12 +145,11 @@ static void barrier_creator_setup_signal_hook(void) (void)sigdelset(&t_thrd.libpq_cxt.BlockSig, SIGQUIT); } -static uint64_t read_barrier_id_from_obs(const char *slotName) +static uint64_t read_barrier_id_from_obs(const char *slotName, long *currBarrierTime) { char barrier_name[BARRIER_NAME_LEN]; int ret; uint64_t barrier_id; - long ts = 0; if (ArchiveReplicationReadFile(BARRIER_FILE, (char *)barrier_name, MAX_BARRIER_ID_LENGTH, slotName)) { barrier_name[BARRIER_NAME_LEN - 1] = '\0'; @@ -104,7 +159,12 @@ static uint64_t read_barrier_id_from_obs(const char *slotName) return 0; } - ret = sscanf_s(barrier_name, "hadr_%020" PRIu64 "_%013ld", &barrier_id, &ts); +#ifdef ENABLE_MULTIPLE_NODES + ret = sscanf_s(barrier_name, "csn_%021" PRIu64 "_%013ld", &barrier_id, currBarrierTime); +#else + ret = sscanf_s(barrier_name, "hadr_%020" PRIu64 "_%013ld", &barrier_id, currBarrierTime); +#endif + if (ret == 2) { barrier_id++; return barrier_id; @@ -112,67 +172,107 @@ static uint64_t read_barrier_id_from_obs(const char *slotName) return 0; } -uint64_t GetObsBarrierIndex(const List *archiveSlotNames) +uint64_t GetObsBarrierIndex(const List *archiveSlotNames, long *last_barrier_time) { uint64_t maxIndex = 0; - + long maxBarrierTime = 0; + foreach_cell(cell, archiveSlotNames) { + long currBarrierTime = 0; + char* slotName = (char*)lfirst(cell); + if (slotName == NULL || strlen(slotName) == 0) { + continue; + } + uint64_t readIndex = read_barrier_id_from_obs(slotName, &currBarrierTime); + maxIndex = (readIndex > maxIndex) ? readIndex : maxIndex; + maxBarrierTime = (currBarrierTime > maxBarrierTime) ? currBarrierTime : maxBarrierTime; + } + *last_barrier_time = maxBarrierTime; + + return maxIndex; +} + +uint64 GetObsFirstCNBarrierTimeline(const List *archiveSlotNames) +{ + uint64 timeline = 0; + foreach_cell(cell, archiveSlotNames) { char* slotName = (char*)lfirst(cell); if (slotName == NULL || strlen(slotName) == 0) { continue; } - uint64_t readIndex = read_barrier_id_from_obs(slotName); - maxIndex = (readIndex > maxIndex) ? readIndex : maxIndex; + timeline = ReadBarrierTimelineRecordFromObs(slotName); + break; } - - return maxIndex; + return timeline; } -static void AllocBarrierLsnInfo() +static void AllocBarrierLsnInfo(int nodeSize) { int rc; g_instance.archive_obs_cxt.barrier_lsn_info = (ArchiveBarrierLsnInfo *)palloc0( - sizeof(ArchiveBarrierLsnInfo) * g_instance.archive_obs_cxt.max_node_cnt); + sizeof(ArchiveBarrierLsnInfo) * nodeSize); rc = memset_s(g_instance.archive_obs_cxt.barrier_lsn_info, - sizeof(ArchiveBarrierLsnInfo) * g_instance.archive_obs_cxt.max_node_cnt, 0, - sizeof(ArchiveBarrierLsnInfo) * g_instance.archive_obs_cxt.max_node_cnt); + sizeof(ArchiveBarrierLsnInfo) * nodeSize, 0, + sizeof(ArchiveBarrierLsnInfo) * nodeSize); securec_check(rc, "", ""); } #ifdef ENABLE_MULTIPLE_NODES static void BarrierCreatorPoolerReload(void) { + destroy_handles(); processPoolerReload(); ereport(LOG, (errmsg("[BarrierCreatorPoolerReload] Reload connections with CN/DN, dn count : %d, cn count : %d", u_sess->pgxc_cxt.NumDataNodes, u_sess->pgxc_cxt.NumCoords))); - + if (g_instance.archive_obs_cxt.archive_slot_num == 0) { + return; + } + int maxNodeCnt = *t_thrd.pgxc_cxt.shmemNumCoords + *t_thrd.pgxc_cxt.shmemNumDataNodes; - if (maxNodeCnt > g_instance.archive_obs_cxt.max_node_cnt) { + SpinLockAcquire(&g_instance.archive_obs_cxt.barrier_lock); + g_instance.archive_obs_cxt.max_node_cnt = 0; + SpinLockRelease(&g_instance.archive_obs_cxt.barrier_lock); + int nodeSize = maxNodeCnt; if (g_instance.archive_obs_cxt.barrier_lsn_info != NULL) { pfree_ext(g_instance.archive_obs_cxt.barrier_lsn_info); } - g_instance.archive_obs_cxt.max_node_cnt = maxNodeCnt; - AllocBarrierLsnInfo(); + AllocBarrierLsnInfo(nodeSize); + SpinLockAcquire(&g_instance.archive_obs_cxt.barrier_lock); + g_instance.archive_obs_cxt.max_node_cnt = nodeSize; + SpinLockRelease(&g_instance.archive_obs_cxt.barrier_lock); } } #endif +static void FreeBarrierLsnInfo() +{ + SpinLockAcquire(&g_instance.archive_obs_cxt.barrier_lock); + g_instance.archive_obs_cxt.max_node_cnt = 0; + SpinLockRelease(&g_instance.archive_obs_cxt.barrier_lock); + pfree_ext(g_instance.archive_obs_cxt.barrier_lsn_info); +} + void barrier_creator_main(void) { uint64_t index = 0; + long last_barrier_time = 0; + struct timeval tv; + int rc; char barrier_name[BARRIER_NAME_LEN]; + List* archiveSlotNames; MemoryContext barrier_creator_context; sigjmp_buf local_sigjmp_buf; - int rc; - struct timeval tv; + t_thrd.barrier_creator_cxt.is_first_barrier = true; char username[NAMEDATALEN]; char *dbname = (char *)pstrdup(DEFAULT_DATABASE); - - ereport(LOG, (errmsg("[BarrierCreator] barrier creator thread starts."))); - + bool startCsnBarrier = g_instance.attr.attr_storage.auto_csn_barrier; + // use InnerMaintenanceTools mode to avoid deadlock with thread pool + u_sess->proc_cxt.IsInnerMaintenanceTools = true; + ereport(LOG, (errmsg("[BarrierCreator] barrier creator started"))); + g_instance.archive_obs_cxt.max_node_cnt = 0; SetProcessingMode(InitProcessing); t_thrd.role = BARRIER_CREATOR; @@ -211,8 +311,9 @@ void barrier_creator_main(void) * See notes in postgres.c about the design of this coding. */ int curTryCounter; - int* oldTryCounter = NULL; + int *oldTryCounter = NULL; if (sigsetjmp(local_sigjmp_buf, 1) != 0) { + destroy_handles(); gstrace_tryblock_exit(true, oldTryCounter); /* Since not using PG_TRY, must reset error stack by hand */ @@ -226,9 +327,13 @@ void barrier_creator_main(void) /* Report the error to the server log */ EmitErrorReport(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + /* release resource */ LWLockReleaseAll(); + FreeBarrierLsnInfo(); /* * Now return to normal top-level context and clear ErrorContext for * next time. @@ -241,27 +346,15 @@ void barrier_creator_main(void) RESUME_INTERRUPTS(); return; } - destroy_handles(); oldTryCounter = gstrace_tryblock_entry(&curTryCounter); /* We can now handle ereport(ERROR) */ t_thrd.log_cxt.PG_exception_stack = &local_sigjmp_buf; - /* * Unblock signals (they were blocked when the postmaster forked us) */ gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL); (void)gs_signal_unblock_sigusr2(); - - int count = 0; - while (count++ < 10) { - if (g_instance.barrier_creator_cxt.stop) { - return; - } - pg_usleep(USECS_PER_SEC); - } - SetProcessingMode(NormalProcessing); - exec_init_poolhandles(); #ifdef ENABLE_MULTIPLE_NODES @@ -274,65 +367,150 @@ void barrier_creator_main(void) ereport(DEBUG1, (errmsg("[BarrierCreator] Current node is not first node: %s", g_instance.attr.attr_common.PGXCNodeName))); - if (u_sess->sig_cxt.got_PoolReload) { + if (IsGotPoolReload()) { BarrierCreatorPoolerReload(); - u_sess->sig_cxt.got_PoolReload = false; + ResetGotPoolReload(false); } CHECK_FOR_INTERRUPTS(); - pg_usleep(10000000L); + pg_usleep(1000000L); } while (1); #endif ereport(DEBUG1, (errmsg("[BarrierCreator] Init connections with CN/DN, dn count : %d, cn count : %d", u_sess->pgxc_cxt.NumDataNodes, u_sess->pgxc_cxt.NumCoords))); + ereport(LOG, (errmsg("[BarrierCreator] %s is barrier creator", g_instance.attr.attr_common.PGXCNodeName))); g_instance.barrier_creator_cxt.stop = false; - List *archiveSlotNames = GetAllArchiveSlotsName(); - if (archiveSlotNames == NIL || archiveSlotNames->length == 0) { - return; - } - index = GetObsBarrierIndex(archiveSlotNames); - list_free_deep(archiveSlotNames); - - if (g_instance.archive_obs_cxt.barrier_lsn_info == NULL) { - g_instance.archive_obs_cxt.max_node_cnt = *t_thrd.pgxc_cxt.shmemNumCoords + *t_thrd.pgxc_cxt.shmemNumDataNodes; - AllocBarrierLsnInfo(); - } - - while (!g_instance.barrier_creator_cxt.stop) { - /* in hadr switchover, barrier creator thread stop creating new barriers during service truncate.*/ - if (g_instance.archive_obs_cxt.in_service_truncate == true) { - continue; + if (g_instance.archive_obs_cxt.archive_slot_num != 0) { + t_thrd.barrier_creator_cxt.archive_slot_names = GetAllArchiveSlotsName(); + if (t_thrd.barrier_creator_cxt.archive_slot_names == NIL || + t_thrd.barrier_creator_cxt.archive_slot_names->length == 0) { + return; } - - pg_usleep_retry(1000000L, 0); - if (g_instance.archive_obs_cxt.archive_slot_num == 0) { - g_instance.barrier_creator_cxt.stop = true; - break; + index = GetObsBarrierIndex(t_thrd.barrier_creator_cxt.archive_slot_names, &last_barrier_time); + t_thrd.barrier_creator_cxt.first_cn_timeline = + GetObsFirstCNBarrierTimeline(t_thrd.barrier_creator_cxt.archive_slot_names); + /* + * If the barrier creator node has changed, the time of the current node may be earlier than the barrier time, + * wait for a while to prevent barrier time rollback. + */ + do { + gettimeofday(&tv, NULL); + long current_time = TIME_GET_MILLISEC(tv); + if (last_barrier_time < current_time) { + break; + } + long time_diff = last_barrier_time - current_time; + ereport(LOG, (errmsg("[BarrierCreator] current time %ld is smaller than barrier time %ld, and sleep %ld ms", + current_time, last_barrier_time, time_diff))); + pg_usleep(time_diff * 1000L); + } while (1); + if (t_thrd.barrier_creator_cxt.is_first_barrier) { + gettimeofday(&tv, NULL); + WriteGlobalBarrierListStartTimeOnMedia(TIME_GET_MILLISEC(tv)); } +#ifdef ENABLE_MULTIPLE_NODES + while (!START_AUTO_CSN_BARRIER) { + pg_usleep(1000000L); + } +#endif + } #ifdef ENABLE_MULTIPLE_NODES - if (u_sess->sig_cxt.got_PoolReload) { + CleanupBarrierLock(); +#endif + + while (!g_instance.barrier_creator_cxt.stop) { + if (t_thrd.barrier_creator_cxt.got_SIGHUP) { + t_thrd.barrier_preparse_cxt.got_SIGHUP = false; + ProcessConfigFile(PGC_SIGHUP); + startCsnBarrier = g_instance.attr.attr_storage.auto_csn_barrier; + } + + /* in hadr switchover, barrier creator thread stop creating new barriers during service truncate.*/ + if (g_instance.archive_obs_cxt.archive_slot_num != 0 && + g_instance.archive_obs_cxt.in_service_truncate == true) { + continue; + } + if (g_instance.archive_obs_cxt.archive_slot_num != 0) { + if (t_thrd.barrier_creator_cxt.barrier_update_last_time_info == NULL) { + t_thrd.barrier_creator_cxt.barrier_update_last_time_info = (BarrierUpdateLastTimeInfo*)palloc0( + sizeof(BarrierUpdateLastTimeInfo) * g_instance.attr.attr_storage.max_replication_slots); + } + if (g_instance.archive_obs_cxt.barrier_lsn_info == NULL) { + int nodeSize = *t_thrd.pgxc_cxt.shmemNumCoords + *t_thrd.pgxc_cxt.shmemNumDataNodes; + AllocBarrierLsnInfo(nodeSize); + SpinLockAcquire(&g_instance.archive_obs_cxt.barrier_lock); + g_instance.archive_obs_cxt.max_node_cnt = nodeSize; + SpinLockRelease(&g_instance.archive_obs_cxt.barrier_lock); + } + archiveSlotNames = GetAllArchiveSlotsName(); + if (archiveSlotNames == NIL || archiveSlotNames->length == 0) { + ereport(WARNING, (errmsg("[BarrierCreator] could not get archive slot name when barrier start"))); + return; + } + if (t_thrd.barrier_creator_cxt.archive_slot_names == NULL) { + t_thrd.barrier_creator_cxt.archive_slot_names = archiveSlotNames; + t_thrd.barrier_creator_cxt.first_cn_timeline = + GetObsFirstCNBarrierTimeline(t_thrd.barrier_creator_cxt.archive_slot_names); + } + if (archiveSlotNames->length > t_thrd.barrier_creator_cxt.archive_slot_names->length) { + t_thrd.barrier_creator_cxt.archive_slot_names = archiveSlotNames; + t_thrd.barrier_creator_cxt.is_first_barrier = true; + gettimeofday(&tv, NULL); + WriteGlobalBarrierListStartTimeOnMedia(TIME_GET_MILLISEC(tv)); + } else if (archiveSlotNames->length < t_thrd.barrier_creator_cxt.archive_slot_names->length) { + t_thrd.barrier_creator_cxt.archive_slot_names = archiveSlotNames; + } + } + pg_usleep_retry(500000L, 0); + if (!startCsnBarrier && g_instance.archive_obs_cxt.archive_slot_num == 0) { + g_instance.barrier_creator_cxt.stop = true; + for (int i = 0; i < g_instance.attr.attr_storage.max_replication_slots; i++) { + if (g_instance.archive_thread_info.obsBarrierArchPID[i] != 0) { + signal_child(g_instance.archive_thread_info.obsBarrierArchPID[i], SIGUSR2, -1); + } + } + break; + } + gettimeofday(&tv, NULL); + /* create barrier with increasing index */ + +#ifdef ENABLE_MULTIPLE_NODES + if (IsGotPoolReload()) { BarrierCreatorPoolerReload(); - u_sess->sig_cxt.got_PoolReload = false; + ResetGotPoolReload(false); if (!IsFirstCn()) break; } -#endif - gettimeofday(&tv, NULL); + ereport(DEBUG1, (errmsg("[BarrierCreator] auto_csn_barrier: %d", startCsnBarrier))); + if (startCsnBarrier) { + rc = snprintf_s(barrier_name, BARRIER_NAME_LEN, BARRIER_NAME_LEN - 1, CSN_BARRIER_NAME); + securec_check_ss_c(rc, "\0", "\0"); - /* create barrier with increasing index */ - ereport(DEBUG1, (errmsg("[BarrierCreator] %s is barrier creator", g_instance.attr.attr_common.PGXCNodeName))); - rc = snprintf_s(barrier_name, BARRIER_NAME_LEN, BARRIER_NAME_LEN - 1, "hadr_%020" PRIu64 "_%013ld", index, - TIME_GET_MILLISEC(tv)); - securec_check_ss_c(rc, "\0", "\0"); - ereport(DEBUG1, (errmsg("[BarrierCreator] creating barrier %s", barrier_name))); -#ifdef ENABLE_MULTIPLE_NODES - RequestBarrier(barrier_name, NULL); + RequestBarrier(barrier_name, NULL); + ereport(LOG, (errmsg("[BarrierCreator]barrier %s created", barrier_name))); + } #else + rc = snprintf_s(barrier_name, BARRIER_NAME_LEN, BARRIER_NAME_LEN - 1, "hadr_%020" PRIu64 "_%013ld", index, + TIME_GET_MILLISEC(tv)); + securec_check_ss_c(rc, "\0", "\0"); + DisasterRecoveryRequestBarrier(barrier_name); + ereport(LOG, (errmsg("[BarrierCreator] barrier %s created", barrier_name))); #endif index++; } ereport(LOG, (errmsg("[BarrierCreator] barrier creator thread exits."))); + if (t_thrd.barrier_creator_cxt.barrier_update_last_time_info != 0) { + for (int i = 0; i < g_instance.attr.attr_storage.max_replication_slots; i++) { + if (t_thrd.barrier_creator_cxt.barrier_update_last_time_info[i].archiveSlotName != NULL) { + pfree_ext(t_thrd.barrier_creator_cxt.barrier_update_last_time_info[i].archiveSlotName); + } + } + pfree_ext(t_thrd.barrier_creator_cxt.barrier_update_last_time_info); + } + destroy_handles(); + FreeBarrierLsnInfo(); + proc_exit(0); } diff --git a/src/gausskernel/process/postmaster/barrier_preparse.cpp b/src/gausskernel/process/postmaster/barrier_preparse.cpp new file mode 100644 index 000000000..b06e024fe --- /dev/null +++ b/src/gausskernel/process/postmaster/barrier_preparse.cpp @@ -0,0 +1,329 @@ +/* + * Copyright (c) 2021 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * barrier_preparse.cpp + * + * IDENTIFICATION + * src/gausskernel/process/postmaster/barrier_preparse.cpp + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "knl/knl_variable.h" +#include "gs_thread.h" +#include +#include +#include +#include + +#include "access/xlog.h" +#include "access/xlog_internal.h" + +#include "libpq/pqsignal.h" +#include "miscadmin.h" +#include "pgstat.h" +#include "storage/ipc.h" +#include "replication/walreceiver.h" +#include "pgxc/barrier.h" +#include "postmaster/barrier_preparse.h" + +typedef struct XLogPageReadPrivate { + const char *datadir; + TimeLineID tli; +} XLogPageReadPrivate; + +#define NEED_INSERT_INTO_HASH \ + ((record->xl_rmid == RM_BARRIER_ID) && ((info == XLOG_BARRIER_SWITCHOVER) || \ + (IS_PGXC_COORDINATOR && info == XLOG_BARRIER_COMMIT) || (IS_PGXC_DATANODE && info == XLOG_BARRIER_CREATE))) + +static void InitBarrierHash() +{ + if (g_instance.csn_barrier_cxt.barrier_context == NULL) { + g_instance.csn_barrier_cxt.barrier_context = AllocSetContextCreate(g_instance.instance_context, + "CsnBarrierContext", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE, + SHARED_CONTEXT); + } + + HASHCTL ctl; + errno_t rc = 0; + + /* Init hash table */ + rc = memset_s(&ctl, sizeof(HASHCTL), 0, sizeof(HASHCTL)); + securec_check(rc, "", ""); + ctl.keysize = MAX_BARRIER_ID_LENGTH * sizeof(char); + ctl.entrysize = MAX_BARRIER_ID_LENGTH * sizeof(char); + ctl.hash = string_hash; + ctl.hcxt = g_instance.csn_barrier_cxt.barrier_context; + g_instance.csn_barrier_cxt.barrier_hash_table = hash_create("Barrier Id Storage Table", INIBARRIERCACHESIZE, + &ctl, HASH_ELEM | HASH_FUNCTION | HASH_SHRCTX); + g_instance.csn_barrier_cxt.barrier_hashtbl_lock = LWLockAssign(LWTRANCHE_BARRIER_TBL); +} + +static void SetBarrieID(const char *barrierId, XLogRecPtr lsn) +{ + errno_t rc = EOK; + const uint32 shiftSize = 32; + volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + + SpinLockAcquire(&walrcv->mutex); + rc = strncpy_s((char *)walrcv->lastReceivedBarrierId, MAX_BARRIER_ID_LENGTH, barrierId, MAX_BARRIER_ID_LENGTH - 1); + securec_check(rc, "\0", "\0"); + + walrcv->lastReceivedBarrierId[MAX_BARRIER_ID_LENGTH - 1] = '\0'; + walrcv->lastReceivedBarrierLSN = lsn; + SpinLockRelease(&walrcv->mutex); + + ereport(LOG, (errmsg("SetBarrieID set the barrier ID is %s, the barrier LSN is %08X/%08X", barrierId, + (uint32)(lsn >> shiftSize), (uint32)lsn))); +} + +static void BarrierPreParseSigHupHandler(SIGNAL_ARGS) +{ + int save_errno = errno; + + t_thrd.barrier_preparse_cxt.got_SIGHUP = true; + if (t_thrd.proc) { + SetLatch(&t_thrd.proc->procLatch); + } + errno = save_errno; +} + +static void BarrierPreParseShutdownHandler(SIGNAL_ARGS) +{ + int save_errno = errno; + + t_thrd.barrier_preparse_cxt.shutdown_requested = true; + + if (t_thrd.proc) + SetLatch(&t_thrd.proc->procLatch); + + errno = save_errno; +} + +static void BarrierPreParseQuickDie(SIGNAL_ARGS) +{ + gs_signal_setmask(&t_thrd.libpq_cxt.BlockSig, NULL); + + /* + * We DO NOT want to run proc_exit() callbacks -- we're here because + * shared memory may be corrupted, so we don't want to try to clean up our + * transaction. Just nail the windows shut and get out of town. Now that + * there's an atexit callback to prevent third-party code from breaking + * things by calling exit() directly, we have to reset the callbacks + * explicitly to make this work as intended. + */ + on_exit_reset(); + + /* + * Note we do exit(2) not exit(0). This is to force the postmaster into a + * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random + * backend. This is necessary precisely because we don't clean up our + * shared memory state. (The "dead man switch" mechanism in pmsignal.c + * should ensure the postmaster sees this as a crash, too, but no harm in + * being doubly sure.) + */ + exit(2); +} + +static void BarrierPreParseSigUsr1Handler(SIGNAL_ARGS) +{ + int saveErrno = errno; + + latch_sigusr1_handler(); + + errno = saveErrno; +} + +/* + * Called when the BarrierPreParseMain is ending. + */ +static void ShutdownBarrierPreParse(int code, Datum arg) +{ + g_instance.proc_base->BarrierPreParseLatch = NULL; +} + +void SetBarrierPreParseLsn(XLogRecPtr startptr) +{ + volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + SpinLockAcquire(&walrcv->mutex); + walrcv->lastReceivedBarrierLSN = startptr; + SpinLockRelease(&walrcv->mutex); +} + +void BarrierPreParseMain(void) +{ + volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + MemoryContext preParseContext; + XLogRecord *record = NULL; + XLogReaderState *xlogreader = NULL; + char *errormsg = NULL; + XLogPageReadPrivate readprivate; + XLogRecPtr startLSN; + XLogRecPtr preStartLSN; + bool found = false; + XLogRecPtr barrierLSN; + char *xLogBarrierId = NULL; + char barrierId[MAX_BARRIER_ID_LENGTH] = {0}; + const uint32 shiftSize = 32; + int rc; + + ereport(LOG, (errmsg("[BarrierPreParse] barrier preparse thread started"))); + + /* + * Reset some signals that are accepted by postmaster but not here + */ + (void)gspqsignal(SIGHUP, BarrierPreParseSigHupHandler); + (void)gspqsignal(SIGINT, SIG_IGN); + (void)gspqsignal(SIGTERM, BarrierPreParseShutdownHandler); + (void)gspqsignal(SIGQUIT, BarrierPreParseQuickDie); /* hard crash time */ + (void)gspqsignal(SIGALRM, SIG_IGN); + (void)gspqsignal(SIGPIPE, SIG_IGN); + (void)gspqsignal(SIGUSR1, BarrierPreParseSigUsr1Handler); + (void)gspqsignal(SIGUSR2, SIG_IGN); + + /* + * Reset some signals that are accepted by postmaster but not here + */ + (void)gspqsignal(SIGCHLD, SIG_DFL); + (void)gspqsignal(SIGTTIN, SIG_DFL); + (void)gspqsignal(SIGTTOU, SIG_DFL); + (void)gspqsignal(SIGCONT, SIG_DFL); + (void)gspqsignal(SIGWINCH, SIG_DFL); + + /* We allow SIGQUIT (quickdie) at all times */ + (void)sigdelset(&t_thrd.libpq_cxt.BlockSig, SIGQUIT); + + on_shmem_exit(ShutdownBarrierPreParse, 0); + + preParseContext = AllocSetContextCreate(t_thrd.top_mem_cxt, "Barrier PreParse", ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); + (void)MemoryContextSwitchTo(preParseContext); + + /* + * Unblock signals (they were blocked when the postmaster forked us) + */ + gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL); + (void)gs_signal_unblock_sigusr2(); + + g_instance.proc_base->BarrierPreParseLatch = &t_thrd.proc->procLatch; + + startLSN = walrcv->lastReceivedBarrierLSN; + ereport(LOG, (errmsg("[BarrierPreParse] preparse thread start at %08X/%08X", (uint32)(startLSN >> shiftSize), + (uint32)startLSN))); + + if (g_instance.csn_barrier_cxt.barrier_hash_table == NULL) { + InitBarrierHash(); + } + + readprivate.datadir = t_thrd.proc_cxt.DataDir; + readprivate.tli = GetRecoveryTargetTLI(); + + xlogreader = XLogReaderAllocate(&SimpleXLogPageRead, &readprivate); + if (xlogreader == NULL) + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + errmsg("memory is temporarily unavailable while allocate xlog reader"))); + + /* + * Loop forever + */ + for (;;) { + /* Clear any already-pending wakeups */ + ResetLatch(&t_thrd.proc->procLatch); + + if (t_thrd.barrier_preparse_cxt.got_SIGHUP) { + t_thrd.barrier_preparse_cxt.got_SIGHUP = false; + ProcessConfigFile(PGC_SIGHUP); + } + + if (t_thrd.barrier_preparse_cxt.shutdown_requested) { + ereport(LOG, (errmsg("[BarrierPreParse] preparse thread shut down"))); + XLogReaderFree(xlogreader); + proc_exit(0); /* done */ + } + + found = false; + preStartLSN = startLSN; + startLSN = XLogFindNextRecord(xlogreader, startLSN); + if (XLogRecPtrIsInvalid(startLSN)) { + startLSN = preStartLSN; + if (!XLByteEQ(walrcv->receiver_flush_location, startLSN)) { + /* reset startLSN */ + startLSN = walrcv->lastRecoveredBarrierLSN; + ereport(LOG, (errmsg("[BarrierPreParse] reset startLSN with lastRecoveredBarrierLSN: %08X/%08X", + (uint32)(startLSN >> shiftSize), (uint32)startLSN))); + } + continue; + } + + do { + record = XLogReadRecord(xlogreader, startLSN, &errormsg); + if (record == NULL) { + break; + } + uint8 info = XLogRecGetInfo(xlogreader) & ~XLR_INFO_MASK; + if (NEED_INSERT_INTO_HASH) { + xLogBarrierId = XLogRecGetData(xlogreader); + if (!IS_CSN_BARRIER(xLogBarrierId)) { + ereport(WARNING, (errmsg("[BarrierPreParse] %s is not for standby cluster", xLogBarrierId))); + } else { + // insert into hash table + found = true; + barrierLSN = xlogreader->EndRecPtr; + rc = strncpy_s((char *)barrierId, MAX_BARRIER_ID_LENGTH, xLogBarrierId, MAX_BARRIER_ID_LENGTH - 1); + securec_check(rc, "\0", "\0"); + barrierId[MAX_BARRIER_ID_LENGTH - 1] = '\0'; + LWLockAcquire(g_instance.csn_barrier_cxt.barrier_hashtbl_lock, LW_EXCLUSIVE); + BarrierCacheInsertBarrierId(barrierId); + LWLockRelease(g_instance.csn_barrier_cxt.barrier_hashtbl_lock); + ereport(LOG, (errmsg("[BarrierPreParse] insert barrierID %s to the hash table, rmid: %d, crc: %d.", + barrierId, record->xl_rmid, record->xl_crc))); + } + } + startLSN = InvalidXLogRecPtr; + } while (!t_thrd.barrier_preparse_cxt.shutdown_requested); + + /* close xlogreadfd after circulation */ + CloseXlogFile(); + + if (found) { + SetBarrieID(barrierId, barrierLSN); + } + + startLSN = XLogRecPtrIsInvalid(xlogreader->ReadRecPtr) ? preStartLSN : xlogreader->ReadRecPtr; + + if (XLogRecPtrIsInvalid(xlogreader->ReadRecPtr) && errormsg) { + ereport(LOG, (errmsg("[BarrierPreParse] preparse thread get an error info %s", errormsg))); + } + const long sleepTime = 1000; + rc = WaitLatch(&t_thrd.proc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, sleepTime); + if (((unsigned int)rc) & WL_POSTMASTER_DEATH) { + XLogReaderFree(xlogreader); + ereport(LOG, (errmsg("[BarrierPreParse] preparse thread shut down with code 1"))); + gs_thread_exit(1); + } + } +} + +void WakeUpBarrierPreParseBackend() +{ + if (g_instance.pid_cxt.BarrierPreParsePID != 0) { + if (g_instance.proc_base->BarrierPreParseLatch != NULL) { + SetLatch(g_instance.proc_base->BarrierPreParseLatch); + } + } +} diff --git a/src/gausskernel/process/postmaster/bgworker.cpp b/src/gausskernel/process/postmaster/bgworker.cpp index 10be9aba7..f30e94618 100644 --- a/src/gausskernel/process/postmaster/bgworker.cpp +++ b/src/gausskernel/process/postmaster/bgworker.cpp @@ -24,6 +24,7 @@ #include "utils/postinit.h" #include "utils/snapmgr.h" #include "commands/dbcommands.h" +#include "pgstat.h" extern void StreamSaveTxnContext(StreamTxnContext* stc); extern void StreamRestoreTxnContext(StreamTxnContext* stc); @@ -259,6 +260,9 @@ void BackgroundWorkerMain(void) /* Report the error to the parallel leader and the server log */ EmitErrorReport(); + + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); /* * These operations are really just a minimal subset of * AbortTransaction(). We don't have very many resources to worry @@ -366,13 +370,19 @@ bool RegisterBackgroundWorker(BgWorkerContext *bwc) bgw->disable_count = 0; /* Construct bgworker thread args */ - bwa = (BackgroundWorkerArgs*)palloc(sizeof(BackgroundWorkerArgs)); + bwa = (BackgroundWorkerArgs*)MemoryContextAllocZero( + INSTANCE_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE), sizeof(BackgroundWorkerArgs)); bwa->bgwcontext = bwc; bwa->bgworker = bgw; bwa->bgworkerId = bgw->bgw_id; /* Fork a new worker thread */ bgw->bgw_notify_pid = initialize_util_thread(BGWORKER, bwa); + /* failed to fork a new thread */ + if (bgw->bgw_notify_pid == 0) { + pfree_ext(bwa); + return false; + } /* Copy the registration data into the registered workers list. */ slist_push_head(&t_thrd.bgworker_cxt.bgwlist, &bgw->rw_lnode); @@ -464,7 +474,7 @@ void BgworkerListWaitFinish(int *nparticipants) int nunstarts = 0; Assert(nparticipants != NULL); - + WaitState oldStatus = pgstat_report_waitstatus(STATE_WAIT_SYNC_BGWORKERS); while (!alldone) { nfinished = 0; slist_foreach(iter, &t_thrd.bgworker_cxt.bgwlist) { @@ -499,6 +509,7 @@ void BgworkerListWaitFinish(int *nparticipants) usleep(BGWORKER_LOOP_SLEEP_TIME); } } + pgstat_report_waitstatus(oldStatus); } int LaunchBackgroundWorkers(int nworkers, void *bgshared, bgworker_main bgmain, bgworker_exit bgexit) diff --git a/src/gausskernel/process/postmaster/bgwriter.cpp b/src/gausskernel/process/postmaster/bgwriter.cpp index a87be3d69..e5b67dacf 100755 --- a/src/gausskernel/process/postmaster/bgwriter.cpp +++ b/src/gausskernel/process/postmaster/bgwriter.cpp @@ -68,12 +68,6 @@ */ #define HIBERNATE_FACTOR 50 -/* - * Interval in which standby snapshots are logged into the WAL stream, in - * milliseconds. - */ -#define LOG_SNAPSHOT_INTERVAL_MS 15000 - /* * LSN and timestamp at which we last issued a LogStandbySnapshot(), to avoid * doing so too often or repeatedly if there has been no other write activity @@ -143,6 +137,9 @@ static void bgwriter_handle_exceptions(WritebackContext wb_context, MemoryContex /* abort async io, must before LWlock release */ AbortAsyncListIO(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + /* * These operations are really just a minimal subset of * AbortTransaction(). We don't have very many resources to worry @@ -276,6 +273,15 @@ void BackgroundWriterMain(void) bool can_hibernate = false; int rc; + /* + * when double write is disabled, pg_dw_meta will be created with dw_file_num = 0, so + * here is for upgrading process. bgwriter will run when enable_incremetal_checkpoint = off. + */ + if (pg_atomic_read_u32(&g_instance.dw_batch_cxt.dw_version) < DW_SUPPORT_REABLE_DOUBLE_WRITE + && t_thrd.proc->workingVersionNum >= DW_SUPPORT_REABLE_DOUBLE_WRITE) { + dw_upgrade_renable_double_write(); + } + /* Clear any already-pending wakeups */ ResetLatch(&t_thrd.proc->procLatch); @@ -350,13 +356,6 @@ void BackgroundWriterMain(void) if (now >= timeout) { LogCheckSlot(); } - - if (now >= timeout) { - uint32 term_cur = Max(g_instance.comm_cxt.localinfo_cxt.term_from_file, - g_instance.comm_cxt.localinfo_cxt.term_from_xlog); - write_term_log(term_cur); - g_instance.comm_cxt.localinfo_cxt.set_term = true; - } } /* @@ -691,7 +690,7 @@ static void drop_rel_all_forks_buffers() } } LWLockRelease(g_instance.bgwriter_cxt.rel_hashtbl_lock); - + if (rel_num > 0) { DropRelFileNodeAllBuffersUsingHash(rel_bak); diff --git a/src/gausskernel/process/postmaster/cbmwriter.cpp b/src/gausskernel/process/postmaster/cbmwriter.cpp index 380108e9c..c82192c3b 100755 --- a/src/gausskernel/process/postmaster/cbmwriter.cpp +++ b/src/gausskernel/process/postmaster/cbmwriter.cpp @@ -140,6 +140,8 @@ void CBMWriterMain(void) /* Report the error to the server log */ EmitErrorReport(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); /* * These operations are really just a minimal subset of * AbortTransaction(). We don't have very many resources to worry diff --git a/src/gausskernel/process/postmaster/checkpointer.cpp b/src/gausskernel/process/postmaster/checkpointer.cpp index 52a0e9142..c45b702ee 100755 --- a/src/gausskernel/process/postmaster/checkpointer.cpp +++ b/src/gausskernel/process/postmaster/checkpointer.cpp @@ -44,6 +44,7 @@ #include "miscadmin.h" #include "pgstat.h" #include "postmaster/bgwriter.h" +#include "postmaster/pagewriter.h" #include "replication/syncrep.h" #include "storage/buf/bufmgr.h" #include "storage/ipc.h" @@ -113,10 +114,6 @@ * is truncated solely by checkpointer through its smgrsync. * ---------- */ -typedef struct { - SyncRequestType type; /* request type */ - FileTag ftag; /* file identifier */ -} CheckpointerRequest; typedef struct CheckpointerShmemStruct { ThreadId checkpointer_pid; /* PID (0 if not started) */ @@ -276,6 +273,8 @@ void CheckpointerMain(void) CallCheckpointCallback(EVENT_CHECKPOINT_ABORT, 0); #endif + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); /* * These operations are really just a minimal subset of * AbortTransaction(). We don't have very many resources to worry @@ -364,7 +363,7 @@ void CheckpointerMain(void) */ for (;;) { bool do_checkpoint = false; - bool do_filesync = false; + bool do_dirty_flush = false; int flags = 0; pg_time_t now; int elapsed_secs; @@ -379,7 +378,7 @@ void CheckpointerMain(void) /* * Process any requests or signals received recently. */ - AbsorbFsyncRequests(); + CkptAbsorbFsyncRequests(); if (t_thrd.checkpoint_cxt.got_SIGHUP) { t_thrd.checkpoint_cxt.got_SIGHUP = false; @@ -481,11 +480,6 @@ void CheckpointerMain(void) do_restartpoint = false; } - /* timed checkpoint request and force checkpoint have higher priority than file-sync-only request */ - if ((flags & CHECKPOINT_FILE_SYNC) && !(flags & CHECKPOINT_CAUSE_TIME) && !(flags & CHECKPOINT_FORCE)) { - do_filesync = true; - } - /* * We will warn if (a) too soon since last checkpoint (whatever * caused it) and (b) somebody set the CHECKPOINT_CAUSE_XLOG flag @@ -514,14 +508,26 @@ void CheckpointerMain(void) t_thrd.checkpoint_cxt.ckpt_start_time = now; t_thrd.checkpoint_cxt.ckpt_cached_elapsed = 0; + if (flags & CHECKPOINT_FLUSH_DIRTY) { + do_dirty_flush = true; + } /* - * Do a normal checkpoint/restartpoint or just do file sync. + * Do a normal checkpoint/restartpoint. */ - if (do_filesync) { - if (u_sess->attr.attr_common.log_checkpoints) { - ereport(LOG, (errmsg("file sync checkpoint is trigger."))); + if (do_dirty_flush) { + ereport(LOG, (errmsg("[file repair] request checkpoint, flush all dirty page."))); + Assert(RecoveryInProgress()); + if (ENABLE_INCRE_CKPT) { + g_instance.ckpt_cxt_ctl->full_ckpt_expected_flush_loc = get_dirty_page_queue_tail(); + pg_memory_barrier(); + if (get_dirty_page_num() > 0) { + g_instance.ckpt_cxt_ctl->flush_all_dirty_page = true; + ereport(LOG, (errmsg("[file repair] need flush %ld pages.", get_dirty_page_num()))); + CheckPointBuffers(flags, true); + } + } else { + CheckPointBuffers(flags, true); } - CheckPointSyncWithAbsorption(); } else if (!do_restartpoint) { CreateCheckPoint(flags); ckpt_performed = true; @@ -552,7 +558,7 @@ void CheckpointerMain(void) * checkpoints happen at a predictable spacing. */ t_thrd.checkpoint_cxt.last_checkpoint_time = now; - } else if (!do_filesync) { + } else if (!do_dirty_flush) { /* * We were not able to perform the restartpoint (checkpoints * throw an ERROR in case of error). Most likely because we @@ -718,7 +724,7 @@ void CheckpointWriteDelay(int flags, double progress) UpdateSharedMemoryConfig(); } - AbsorbFsyncRequests(); + CkptAbsorbFsyncRequests(); t_thrd.checkpoint_cxt.absorbCounter = WRITES_PER_ABSORB; CheckArchiveTimeout(); @@ -741,7 +747,7 @@ void CheckpointWriteDelay(int flags, double progress) * operations even when we don't sleep, to prevent overflow of the * fsync request queue. */ - AbsorbFsyncRequests(); + CkptAbsorbFsyncRequests(); t_thrd.checkpoint_cxt.absorbCounter = WRITES_PER_ABSORB; } } @@ -906,6 +912,7 @@ static void ReqShutdownHandler(SIGNAL_ARGS) * CheckpointerShmemSize * Compute space needed for checkpointer-related shared memory */ +const uint DDL_REQUEST_MAX = 100000; Size CheckpointerShmemSize(void) { Size size; @@ -915,7 +922,12 @@ Size CheckpointerShmemSize(void) * NBuffers. This may prove too large or small ... */ size = offsetof(CheckpointerShmemStruct, requests); - size = add_size(size, mul_size(TOTAL_BUFFER_NUM, sizeof(CheckpointerRequest))); + if (ENABLE_INCRE_CKPT) { + /* incremental checkpoint, the checkpoint thread only handle the drop table request and drop db request */ + size = add_size(size, mul_size(DDL_REQUEST_MAX, sizeof(CheckpointerRequest))); + } else { + size = add_size(size, mul_size(TOTAL_BUFFER_NUM, sizeof(CheckpointerRequest))); + } return size; } @@ -941,16 +953,7 @@ void CheckpointerShmemInit(void) /* The memory of the memset sometimes exceeds 2 GB. so, memset_s cannot be used. */ MemSet((char*)t_thrd.checkpoint_cxt.CheckpointerShmem, 0, size); SpinLockInit(&t_thrd.checkpoint_cxt.CheckpointerShmem->ckpt_lck); - t_thrd.checkpoint_cxt.CheckpointerShmem->max_requests = TOTAL_BUFFER_NUM; - } -} - -void set_flag_checkpoint_file_sync(int flags, volatile CheckpointerShmemStruct* cps) -{ - if (flags & CHECKPOINT_FILE_SYNC) { - } else { - /* normal checkpoint request also includes file sync, so unset this bit */ - cps->ckpt_flags &= ~CHECKPOINT_FILE_SYNC; + t_thrd.checkpoint_cxt.CheckpointerShmem->max_requests = ENABLE_INCRE_CKPT ? DDL_REQUEST_MAX : TOTAL_BUFFER_NUM; } } @@ -1021,7 +1024,6 @@ void RequestCheckpoint(int flags) old_failed = cps->ckpt_failed; old_started = cps->ckpt_started; cps->ckpt_flags |= flags; - set_flag_checkpoint_file_sync(flags, cps); SpinLockRelease(&cps->ckpt_lck); /* @@ -1133,7 +1135,7 @@ void RequestCheckpoint(int flags) * the queue is full and contains no duplicate entries. In that case, we * let the backend know by returning false. */ -bool ForwardSyncRequest(const FileTag *ftag, SyncRequestType type) +bool CkptForwardSyncRequest(const FileTag *ftag, SyncRequestType type) { CheckpointerRequest* request = NULL; bool too_full = false; @@ -1196,6 +1198,78 @@ bool ForwardSyncRequest(const FileTag *ftag, SyncRequestType type) return true; } +int getDuplicateRequest(CheckpointerRequest *requests, int num_requests, bool *skip_slot) +{ + struct CheckpointerSlotMapping { + CheckpointerRequest request; + int slot; + }; + + int n; + int num_skipped = 0; + HASHCTL ctl; + HTAB* htab = NULL; + + /* Initialize temporary hash table */ + errno_t rc = memset_s(&ctl, sizeof(ctl), 0, sizeof(ctl)); + securec_check(rc, "\0", "\0"); + ctl.keysize = sizeof(CheckpointerRequest); + ctl.entrysize = sizeof(struct CheckpointerSlotMapping); + ctl.hash = tag_hash; + ctl.hcxt = CurrentMemoryContext; + + htab = hash_create("CompactRequestQueue", num_requests, &ctl, + HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + + /* + * The basic idea here is that a request can be skipped if it's followed + * by a later, identical request. It might seem more sensible to work + * backwards from the end of the queue and check whether a request is + * *preceded* by an earlier, identical request, in the hopes of doing less + * copying. But that might change the semantics, if there's an + * intervening FORGET_RELATION_FSYNC or FORGET_DATABASE_FSYNC request, so + * we do it this way. It would be possible to be even smarter if we made + * the code below understand the specific semantics of such requests (it + * could blow away preceding entries that would end up being canceled + * anyhow), but it's not clear that the extra complexity would buy us + * anything. + */ + + for (n = 0; n < num_requests; n++) { + CheckpointerRequest* request = NULL; + struct CheckpointerSlotMapping* slotmap; + bool found = false; + + /* + * We use the request struct directly as a hashtable key. This + * assumes that any padding bytes in the structs are consistently the + * same, which should be okay because we zeroed them in + * CheckpointerShmemInit. Note also that RelFileNode had better + * contain no pad bytes. + */ + request = &requests[n]; + slotmap = (CheckpointerSlotMapping*)hash_search(htab, request, HASH_ENTER, &found); + + if (found) { + /* Duplicate, so mark the previous occurrence as skippable */ + skip_slot[slotmap->slot] = true; + num_skipped++; + } + + /* Remember slot containing latest occurrence of this request value */ + slotmap->slot = n; + } + + /* Done with the hash table. */ + hash_destroy(htab); + + /* If no duplicates, we're out of luck. */ + if (!num_skipped) { + return 0; + } + + return num_skipped; +} /* * CompactCheckpointerRequestQueue @@ -1215,16 +1289,9 @@ bool ForwardSyncRequest(const FileTag *ftag, SyncRequestType type) */ static bool CompactCheckpointerRequestQueue(void) { - struct CheckpointerSlotMapping { - CheckpointerRequest request; - int slot; - }; - - int n, preserve_count; - int num_skipped = 0; - HASHCTL ctl; - HTAB* htab = NULL; + int preserve_count; bool* skip_slot = NULL; + int num_skipped = 0; /* must hold CheckpointerCommLock in exclusive mode */ Assert(LWLockHeldByMe(CheckpointerCommLock)); @@ -1232,62 +1299,11 @@ static bool CompactCheckpointerRequestQueue(void) /* Initialize skip_slot array */ skip_slot = (bool*)palloc0(sizeof(bool) * t_thrd.checkpoint_cxt.CheckpointerShmem->num_requests); - /* Initialize temporary hash table */ - errno_t ret = memset_s(&ctl, sizeof(ctl), 0, sizeof(ctl)); - securec_check(ret, "\0", "\0"); - ctl.keysize = sizeof(CheckpointerRequest); - ctl.entrysize = sizeof(struct CheckpointerSlotMapping); - ctl.hash = tag_hash; - ctl.hcxt = CurrentMemoryContext; - - htab = hash_create("CompactCheckpointerRequestQueue", - t_thrd.checkpoint_cxt.CheckpointerShmem->num_requests, - &ctl, - HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); - - /* - * The basic idea here is that a request can be skipped if it's followed - * by a later, identical request. It might seem more sensible to work - * backwards from the end of the queue and check whether a request is - * *preceded* by an earlier, identical request, in the hopes of doing less - * copying. But that might change the semantics, if there's an - * intervening FORGET_RELATION_FSYNC or FORGET_DATABASE_FSYNC request, so - * we do it this way. It would be possible to be even smarter if we made - * the code below understand the specific semantics of such requests (it - * could blow away preceding entries that would end up being canceled - * anyhow), but it's not clear that the extra complexity would buy us - * anything. - */ - for (n = 0; n < t_thrd.checkpoint_cxt.CheckpointerShmem->num_requests; n++) { - CheckpointerRequest* request = NULL; - struct CheckpointerSlotMapping* slotmap; - bool found = false; - - /* - * We use the request struct directly as a hashtable key. This - * assumes that any padding bytes in the structs are consistently the - * same, which should be okay because we zeroed them in - * CheckpointerShmemInit. Note also that RelFileNode had better - * contain no pad bytes. - */ - request = &t_thrd.checkpoint_cxt.CheckpointerShmem->requests[n]; - slotmap = (CheckpointerSlotMapping*)hash_search(htab, request, HASH_ENTER, &found); - - if (found) { - /* Duplicate, so mark the previous occurrence as skippable */ - skip_slot[slotmap->slot] = true; - num_skipped++; - } - - /* Remember slot containing latest occurrence of this request value */ - slotmap->slot = n; - } - - /* Done with the hash table. */ - hash_destroy(htab); + num_skipped = getDuplicateRequest(t_thrd.checkpoint_cxt.CheckpointerShmem->requests, + t_thrd.checkpoint_cxt.CheckpointerShmem->num_requests, skip_slot); /* If no duplicates, we're out of luck. */ - if (!num_skipped) { + if (num_skipped == 0) { pfree(skip_slot); return false; } @@ -1295,7 +1311,7 @@ static bool CompactCheckpointerRequestQueue(void) /* We found some duplicates; remove them. */ preserve_count = 0; - for (n = 0; n < t_thrd.checkpoint_cxt.CheckpointerShmem->num_requests; n++) { + for (int n = 0; n < t_thrd.checkpoint_cxt.CheckpointerShmem->num_requests; n++) { if (skip_slot[n]) continue; @@ -1323,7 +1339,7 @@ static bool CompactCheckpointerRequestQueue(void) * we start fsync'ing. Since CreateCheckPoint sometimes runs in * non-checkpointer processes, do nothing if not checkpointer. */ -void AbsorbFsyncRequests(void) +void CkptAbsorbFsyncRequests(void) { CheckpointerRequest* requests = NULL; CheckpointerRequest* request = NULL; @@ -1442,90 +1458,3 @@ void CallCheckpointCallback(CheckpointEvent checkpointEvent, XLogRecPtr lsn) } } #endif - -/* - * CheckPointSyncForDw() -- File sync before dw file can be truncated or recycled. - * Normally, file sync operation is solely handled by checkpointer process. When pagewriter finds short of - * dw file space, it simply requests a 'fake' file-sync checkpoint. - * For standalone backends, as well as for startup process performing dw init, they can handle fsync request - * themselves since no other concurrent pagewriter process should be present. - */ -void CheckPointSyncForDw(void) -{ - if (u_sess->storage_cxt.pendingOps) { - Assert(!IsUnderPostmaster || AmStartupProcess() || AmCheckpointerProcess()); - ProcessSyncRequests(); - } else { - int64 old_fsync_start = 0; - int64 new_fsync_start = 0; - int64 new_fsync_done = 0; - volatile CheckpointerShmemStruct* cps = t_thrd.checkpoint_cxt.CheckpointerShmem; - SpinLockAcquire(&cps->ckpt_lck); - old_fsync_start = cps->fsync_start; - cps->fsync_request++; - SpinLockRelease(&cps->ckpt_lck); - - RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FILE_SYNC); - - /* Wait for a new checkpoint to start. */ - for (;;) { - SpinLockAcquire(&cps->ckpt_lck); - new_fsync_start = cps->fsync_start; - SpinLockRelease(&cps->ckpt_lck); - - if (new_fsync_start != old_fsync_start) { - break; - } - - CHECK_FOR_INTERRUPTS(); - pg_usleep(100000L); - } - - /* - * We are waiting for ckpt_done >= new_started, in a modulo sense. - */ - for (;;) { - SpinLockAcquire(&cps->ckpt_lck); - new_fsync_done = cps->fsync_done; - SpinLockRelease(&cps->ckpt_lck); - - if (new_fsync_done - new_fsync_start >= 0) { - break; - } - - CHECK_FOR_INTERRUPTS(); - pg_usleep(100000L); - } - } - - return; -} - -/* - * CheckPointSyncWithAbsorption() -- Sync files to disk and reset fsync flags. - */ -void CheckPointSyncWithAbsorption(void) -{ - volatile CheckpointerShmemStruct* cps = t_thrd.checkpoint_cxt.CheckpointerShmem; - - SpinLockAcquire(&cps->ckpt_lck); - cps->fsync_start++; - SpinLockRelease(&cps->ckpt_lck); - - ProcessSyncRequests(); - - SpinLockAcquire(&cps->ckpt_lck); - cps->fsync_done = cps->fsync_start; - SpinLockRelease(&cps->ckpt_lck); -} - -int64 CheckPointGetFsyncRequset() -{ - volatile CheckpointerShmemStruct* cps = t_thrd.checkpoint_cxt.CheckpointerShmem; - SpinLockAcquire(&cps->ckpt_lck); - int64 request = cps->fsync_request; - SpinLockRelease(&cps->ckpt_lck); - - return request; -} - diff --git a/src/gausskernel/process/postmaster/fencedudf.cpp b/src/gausskernel/process/postmaster/fencedudf.cpp index 3e53048c0..cc95b5c00 100644 --- a/src/gausskernel/process/postmaster/fencedudf.cpp +++ b/src/gausskernel/process/postmaster/fencedudf.cpp @@ -1721,6 +1721,7 @@ static void FindOrInsertUDFHashTab(FunctionCallInfoData* fcinfo) /* Look up the function within the library */ flinfo->fn_addr = (PGFunction)pg_dlsym(libHandle, flinfo->fnName); } else if (flinfo->fn_languageId == JavalanguageId){ +#ifndef ENABLE_LITE_MODE /* Load libpljava.so to support Java UDF */ char pathbuf[MAXPGPATH]; get_lib_path(my_exec_path, pathbuf); @@ -1742,6 +1743,9 @@ static void FindOrInsertUDFHashTab(FunctionCallInfoData* fcinfo) errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("load java_call_handler failed."))); flinfo->fn_addr = pljava_call_handler; +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } else { char pathbuf[MAXPGPATH]; get_lib_path(my_exec_path, pathbuf); diff --git a/src/gausskernel/process/postmaster/globalstats.cpp b/src/gausskernel/process/postmaster/globalstats.cpp index 82cef5701..921c87dc7 100644 --- a/src/gausskernel/process/postmaster/globalstats.cpp +++ b/src/gausskernel/process/postmaster/globalstats.cpp @@ -241,6 +241,9 @@ NON_EXEC_STATIC void GlobalStatsTrackerMain() /* Report the error to the server log */ EmitErrorReport(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + FlushErrorState(); /* Now we can allow interrupts again */ @@ -283,6 +286,13 @@ NON_EXEC_STATIC void GlobalStatsTrackerMain() } shutdown: + /* + * Before the thread exits, set global_stats_map to NULL to prevent core dump when the + * backend thread accesses the released memory during the prune operation. + */ + PrepareStatsHashForSwitch(); + g_instance.stat_cxt.tableStat->global_stats_map = NULL; + CompleteStatsHashSwitch(); ereport(LOG, (errmsg("global stats shutting down"))); proc_exit(0); } diff --git a/src/gausskernel/process/postmaster/lwlockmonitor.cpp b/src/gausskernel/process/postmaster/lwlockmonitor.cpp index 04d6eeb11..a7a7005bc 100644 --- a/src/gausskernel/process/postmaster/lwlockmonitor.cpp +++ b/src/gausskernel/process/postmaster/lwlockmonitor.cpp @@ -651,6 +651,8 @@ NON_EXEC_STATIC void FaultMonitorMain() /* Report the error to the server log */ EmitErrorReport(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); /* * Now return to normal top-level context and clear ErrorContext for diff --git a/src/gausskernel/process/postmaster/pagerepair.cpp b/src/gausskernel/process/postmaster/pagerepair.cpp new file mode 100644 index 000000000..9c68bcf12 --- /dev/null +++ b/src/gausskernel/process/postmaster/pagerepair.cpp @@ -0,0 +1,1722 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * pagerepair.cpp + * Working mode of pagerepair thread, copy the data page from the primary. + * + * IDENTIFICATION + * src/gausskernel/process/postmaster/pagerepair.cpp + * + * --------------------------------------------------------------------------------------- + */ +#include "access/xlog_basic.h" +#include "access/xlog_internal.h" +#include "access/multi_redo_api.h" +#include "access/extreme_rto/page_redo.h" +#include "access/parallel_recovery/page_redo.h" +#include "access/parallel_recovery/dispatcher.h" +#include "catalog/catalog.h" +#include "gssignal/gs_signal.h" +#include "knl/knl_instance.h" +#include "service/remote_read_client.h" +#include "storage/ipc.h" +#include "storage/copydir.h" +#include "storage/lmgr.h" +#include "storage/remote_read.h" +#include "storage/smgr/fd.h" +#include "pgstat.h" +#include "postmaster/pagerepair.h" +#include "utils/plog.h" +#include "utils/plog.h" +#include "utils/inval.h" + +const int MAX_THREAD_NAME_LEN = 64; +const int XLOG_LSN_SWAP = 32; +const int TEN_MILLISECOND = 10; +#define MAX(A, B) ((B) > (A) ? (B) : (A)) +#define FILE_REPAIR_LOCK g_instance.repair_cxt.file_repair_hashtbl_lock + +typedef struct XLogPageReadPrivate { + int emode; + bool fetching_ckpt; /* are we fetching a checkpoint record? */ + bool randAccess; +} XLogPageReadPrivate; + +/* -------------------------------- + * signal handler routines + * -------------------------------- + */ +static void SetupPageRepairSignalHook(void); +static void PageRepairSigHupHandler(SIGNAL_ARGS); +static void PageRepairSigUsr1Handler(SIGNAL_ARGS); +static void PageRepairSigUsr2Handler(SIGNAL_ARGS); +static void PageRepairShutDownHandler(SIGNAL_ARGS); +static void PageRepairQuickDie(SIGNAL_ARGS); +static void PageRepairHandleInterrupts(void); + +static void SeqRemoteReadPage(); +static void SeqRemoteReadFile(); +static void checkOtherFile(RepairFileKey key, uint32 max_segno, uint64 size); +static void PushBadFileToRemoteHashTbl(RepairFileKey key); + +#define COMPARE_REPAIR_PAGE_KEY(key1, key2) \ + ((key1).relfilenode.relNode == (key2).relfilenode.relNode && \ + (key1).relfilenode.dbNode == (key2).relfilenode.dbNode && \ + (key1).relfilenode.spcNode == (key2).relfilenode.spcNode && \ + (key1).relfilenode.bucketNode == (key2).relfilenode.bucketNode && \ + (key1).forknum == (key2).forknum && \ + (key1).blocknum == (key2).blocknum) + +#define NOT_SUPPORT_PAGE_REPAIR \ + (g_instance.attr.attr_common.cluster_run_mode == RUN_MODE_STANDBY || \ + g_instance.attr.attr_common.stream_cluster_run_mode == RUN_MODE_STANDBY || \ + t_thrd.xlog_cxt.is_hadr_main_standby || t_thrd.xlog_cxt.is_cascade_standby) + +int CheckBlockLsn(XLogReaderState *xlogreader, RepairBlockKey key, XLogRecPtr page_old_lsn, XLogRecPtr *last_lsn) +{ + RepairBlockKey temp_key = {0}; + bool page_found = false; + bool getlsn = false; + + for (int block_id = 0; block_id <= xlogreader->max_block_id; block_id++) { + XLogRecGetBlockTag(xlogreader, block_id, &temp_key.relfilenode, &temp_key.forknum, &temp_key.blocknum); + if (COMPARE_REPAIR_PAGE_KEY(key, temp_key)) { + page_found = true; + getlsn = XLogRecGetBlockLastLsn(xlogreader, block_id, last_lsn); + Assert(getlsn); + if (XLogRecPtrIsInvalid(*last_lsn)) { + ereport(LOG, + (errmsg("check the repair page successfully, last_lsn is 0," + "the page %u/%u/%u bucketnode %d, forknum is %u, blocknum is %u", + key.relfilenode.spcNode, key.relfilenode.dbNode, key.relfilenode.relNode, + key.relfilenode.bucketNode, key.forknum, key.blocknum))); + return CHECK_SUCCESS; + } + /* if the xlog record last_lsn equal the current standby page lsn, means found a complete xlog chain */ + if (*last_lsn == page_old_lsn) { + ereport(LOG, + (errmsg("check the repair page successfully, the page %u/%u/%u bucketnode %d, " + "forknum is %u, blocknum is %u", + key.relfilenode.spcNode, key.relfilenode.dbNode, key.relfilenode.relNode, + key.relfilenode.bucketNode, key.forknum, key.blocknum))); + return CHECK_SUCCESS; + } + /* if the xlog record last lsn */ + if (*last_lsn < page_old_lsn) { + ereport(WARNING, + (errmsg("check the repair page, lsn not match, page_old_lsn is %X/%X, last_lsn is %X/%X, " + "could not repair the page %u/%u/%u bucketnode %d, forknum is %u, blocknum is %u", + (uint32)(page_old_lsn >> XLOG_LSN_SWAP), (uint32)page_old_lsn, + (uint32)(*last_lsn >> XLOG_LSN_SWAP), (uint32)*last_lsn, + key.relfilenode.spcNode, key.relfilenode.dbNode, key.relfilenode.relNode, + key.relfilenode.bucketNode, key.forknum, key.blocknum))); + return CHECK_FAIL; + } + } + } + + if (!page_found) { + ereport(WARNING, + (errmsg("check the repair page, not get page info, page_old_lsn is %X/%X, last_lsn is %X/%X, " + "could not repair the page %u/%u/%u bucketnode %d, forknum is %u, blocknum is %u", + (uint32)(page_old_lsn >> XLOG_LSN_SWAP), (uint32)page_old_lsn, + (uint32)(*last_lsn >> XLOG_LSN_SWAP), (uint32)*last_lsn, + key.relfilenode.spcNode, key.relfilenode.dbNode, key.relfilenode.relNode, + key.relfilenode.bucketNode, key.forknum, key.blocknum))); + return CHECK_FAIL; + } + return NEED_CONTINUE_CHECK; +} + +/* CheckPrimaryPageLSN + * Check whether the data page of the primary DN forms a complete xlog chain with the page of the standby DN. + */ +bool CheckPrimaryPageLSN(XLogRecPtr page_old_lsn, XLogRecPtr page_new_lsn, RepairBlockKey key) +{ + XLogRecPtr prev_lsn = InvalidXLogRecPtr; + XLogRecPtr last_lsn = page_new_lsn; + XLogRecord *record = NULL; + char *errormsg = NULL; + XLogReaderState *xlogreader = NULL; + XLogPageReadPrivate readprivate; + errno_t rc; + int ret_code; + + rc = memset_s(&readprivate, sizeof(XLogPageReadPrivate), 0, sizeof(XLogPageReadPrivate)); + securec_check(rc, "", ""); + + xlogreader = XLogReaderAllocate(&XLogPageRead, &readprivate); + if (xlogreader == NULL) { + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"), + errdetail("Failed while allocating an XLog reading processor for pagerepair thread"))); + } + + xlogreader->system_identifier = t_thrd.shemem_ptr_cxt.ControlFile->system_identifier; + t_thrd.xlog_cxt.recoveryTargetTLI = t_thrd.shemem_ptr_cxt.ControlFile->checkPointCopy.ThisTimeLineID; + t_thrd.xlog_cxt.expectedTLIs = readTimeLineHistory(t_thrd.xlog_cxt.recoveryTargetTLI); + + /* page lsn is the xlog EndRecPtr, so need parse the next record, get the prev lsn */ + while (true) { + if (0 == last_lsn % XLogSegSize) { + XLByteAdvance(last_lsn, SizeOfXLogLongPHD); + } else if (0 == last_lsn % XLOG_BLCKSZ) { + XLByteAdvance(last_lsn, SizeOfXLogShortPHD); + } + record = XLogReadRecord(xlogreader, last_lsn, &errormsg); + if (record == NULL) { + ereport(WARNING, + (errmsg("check the repair page, page_old_lsn is %X/%X, could not get the xlog %X/%X " + "could not repair the page %u/%u/%u bucketnode %d, forknum is %u, blocknum is %u", + (uint32)(page_old_lsn >> XLOG_LSN_SWAP), (uint32)page_old_lsn, + (uint32)(last_lsn >> XLOG_LSN_SWAP), (uint32)last_lsn, + key.relfilenode.spcNode, key.relfilenode.dbNode, key.relfilenode.relNode, + key.relfilenode.bucketNode, key.forknum, key.blocknum))); + return false; + } + + prev_lsn = record->xl_prev; + record = XLogReadRecord(xlogreader, prev_lsn, &errormsg); + + ret_code = CheckBlockLsn(xlogreader, key, page_old_lsn, &last_lsn); + if (ret_code == CHECK_SUCCESS) { + return true; + } else if (ret_code == CHECK_FAIL) { + return false; + } + } + + ereport(WARNING, + (errmsg("check the repair page, could not found the page info from the xlog " + "could not repair the page, page old lsn is %X/%X, last lsn is %X/%X, page new lsn is %X/%X" + "page info is %u/%u/%u bucketnode %d, forknum is %u, blocknum is %u", + (uint32)(page_old_lsn >> XLOG_LSN_SWAP), (uint32)page_old_lsn, + (uint32)(last_lsn >> XLOG_LSN_SWAP), (uint32)last_lsn, + (uint32)(page_new_lsn >> XLOG_LSN_SWAP), (uint32)page_new_lsn, + key.relfilenode.spcNode, key.relfilenode.dbNode, key.relfilenode.relNode, + key.relfilenode.bucketNode, key.forknum, key.blocknum))); + + return false; +} + +void PageRepairHashTblInit(void) +{ + HASHCTL ctl; + + if (g_instance.repair_cxt.page_repair_hashtbl_lock == NULL) { + g_instance.repair_cxt.page_repair_hashtbl_lock = LWLockAssign(LWTRANCHE_PAGE_REPAIR); + } + + if (g_instance.repair_cxt.page_repair_hashtbl == NULL) { + /* hash accessed by database file id */ + errno_t rc = memset_s(&ctl, sizeof(ctl), 0, sizeof(ctl)); + securec_check(rc, "", ""); + ctl.keysize = sizeof(RepairBlockKey); + ctl.entrysize = sizeof(RepairBlockEntry); + ctl.hash = tag_hash; + ctl.hcxt = INSTANCE_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE); + g_instance.repair_cxt.page_repair_hashtbl = hash_create("Page Repair Hash Table", MAX_REPAIR_PAGE_NUM, &ctl, + HASH_ELEM | HASH_FUNCTION |HASH_CONTEXT); + + if (!g_instance.repair_cxt.page_repair_hashtbl) + ereport(FATAL, (errmsg("could not initialize page repair Hash table"))); + } + + return; +} + +void ClearPageRepairTheadMem(void) +{ + if (g_instance.repair_cxt.page_repair_hashtbl != NULL) { + hash_destroy(g_instance.repair_cxt.page_repair_hashtbl); + g_instance.repair_cxt.page_repair_hashtbl = NULL; + } + + if (g_instance.repair_cxt.file_repair_hashtbl != NULL) { + hash_destroy(g_instance.repair_cxt.file_repair_hashtbl); + g_instance.repair_cxt.file_repair_hashtbl = NULL; + } + + return; +} + +/* CopyPageToRepairHashTbl + * After remote read, copy the page to hash table and update the page_new_lsn + * of page repair hash table, means that the page is correct. + */ +void CopyPageToRepairHashTbl(RepairBlockEntry *entry, char *page_content) +{ + XLogRecPtr page_lsn = PageGetLSN(page_content); + errno_t rc = 0; + + memcpy_s(entry->page_content, BLCKSZ, page_content, BLCKSZ); + securec_check(rc, "", ""); + + if (entry->error_type == CRC_CHECK_FAIL) { + entry->page_new_lsn = page_lsn; + entry->page_state = WAIT_REPAIR; + } else { + entry->page_state = WAIT_LSN_CHECK; + } + return; +} + +void CheckPageLSN(RepairBlockKey key) +{ + HTAB *repair_hash = g_instance.repair_cxt.page_repair_hashtbl; + XLogRecPtr standby_flush_lsn; + RepairBlockEntry *entry = NULL; + bool found = false; + XLogRecPtr page_lsn; + XLogRecPtr page_old_lsn; + + LWLockAcquire(g_instance.repair_cxt.page_repair_hashtbl_lock, LW_EXCLUSIVE); + + entry = (RepairBlockEntry*)hash_search(repair_hash, &(key), HASH_FIND, &found); + if (found) { + page_old_lsn = entry->page_old_lsn; + page_lsn = PageGetLSN(entry->page_content); + + LWLockRelease(g_instance.repair_cxt.page_repair_hashtbl_lock); + standby_flush_lsn = GetStandbyFlushRecPtr(NULL); + if (XLByteLE(standby_flush_lsn, page_lsn)) { + return; + } + /* after release the lock, check the page lsn where the error_type is LSN_CHECK_FAIL */ + bool check = CheckPrimaryPageLSN(page_old_lsn, page_lsn, key); + if (check) { + LWLockAcquire(g_instance.repair_cxt.page_repair_hashtbl_lock, LW_EXCLUSIVE); + entry = (RepairBlockEntry*)hash_search(repair_hash, &(key), HASH_FIND, &found); + if (found) { + entry->page_new_lsn = page_lsn; + entry->page_state = WAIT_REPAIR; + (void)gs_signal_send(entry->recovery_tid, SIGUSR1); + } + LWLockRelease(g_instance.repair_cxt.page_repair_hashtbl_lock); + } else { + ereport(PANIC, + (errmsg("check the repair page lsn failed, could not repair the page, " + "page old lsn is %X/%X, primary lsn is %X/%X, " + "page info is %u/%u/%u bucketnode %d, forknum is %u, blocknum is %u", + (uint32)(page_old_lsn >> XLOG_LSN_SWAP), (uint32)page_old_lsn, + (uint32)(page_lsn >> XLOG_LSN_SWAP), (uint32)page_lsn, + key.relfilenode.spcNode, key.relfilenode.dbNode, key.relfilenode.relNode, + key.relfilenode.bucketNode, key.forknum, key.blocknum))); + } + } else { + LWLockRelease(g_instance.repair_cxt.page_repair_hashtbl_lock); + } + return; +} + +int RemoteReadFileSizeNoError(RepairFileKey *key, int64 *size) +{ + /* get remote address */ + char remote_address1[MAXPGPATH] = {0}; /* remote_address1[0] = '\0'; */ + char remote_address2[MAXPGPATH] = {0}; /* remote_address2[0] = '\0'; */ + int timeout = 120; + + GetRemoteReadAddress(remote_address1, remote_address2, MAXPGPATH); + char *remote_address = remote_address1; + if (remote_address[0] == '\0' || remote_address[0] == ':') { + ereport(DEBUG1, (errmodule(MOD_REMOTE), errmsg("remote not available"))); + return REMOTE_READ_IP_NOT_EXIST; + } + ereport(LOG, (errmodule(MOD_REMOTE), errmsg("remote read file size, file %s from %s", + relpathperm(key->relfilenode, key->forknum), + remote_address))); + + RemoteReadFileKey read_key; + read_key.relfilenode = key->relfilenode; + read_key.forknum = key->forknum; + read_key.blockstart = 0; + + PROFILING_REMOTE_START(); + int retCode = RemoteGetFileSize(remote_address, &read_key, InvalidXLogRecPtr, size, timeout); + /* return file size + primary lsn */ + PROFILING_REMOTE_END_READ(sizeof(uint64) + sizeof(uint64), (retCode == REMOTE_READ_OK)); + return retCode; +} + +/* RemoteReadFile + * standby dn use this function repair file. + */ +int RemoteReadFileNoError(RemoteReadFileKey *key, char *buf, XLogRecPtr lsn, uint32 size, + XLogRecPtr *remote_lsn, uint32 *remote_size) +{ + /* get remote address */ + char remote_address1[MAXPGPATH] = {0}; /* remote_address1[0] = '\0'; */ + char remote_address2[MAXPGPATH] = {0}; /* remote_address2[0] = '\0'; */ + char *remote_address = NULL; + + GetRemoteReadAddress(remote_address1, remote_address2, MAXPGPATH); + remote_address = remote_address1; + int timeout = 0; + + if (remote_address[0] == '\0' || remote_address[0] == ':') { + ereport(WARNING, (errcode(ERRCODE_IO_ERROR), errmodule(MOD_REMOTE), errmsg("remote not available"))); + return REMOTE_READ_IP_NOT_EXIST; + } + ereport(LOG, (errmodule(MOD_REMOTE), + errmsg("remote read file, file %s from %s, block start is %u", + relpathperm(key->relfilenode, key->forknum), remote_address, key->blockstart))); + + PROFILING_REMOTE_START(); + int retCode = RemoteGetFile(remote_address, key, lsn, size, buf, remote_lsn, remote_size, timeout); + PROFILING_REMOTE_END_READ(size, (retCode == REMOTE_READ_OK)); + return retCode; +} + +int RemoteReadBlockNoError(RepairBlockKey *key, char *buf, XLogRecPtr lsn, const XLogPhyBlock *pblk) +{ + /* get remote address */ + char remote_address1[MAXPGPATH] = {0}; /* remote_address1[0] = '\0'; */ + char remote_address2[MAXPGPATH] = {0}; /* remote_address2[0] = '\0'; */ + + GetRemoteReadAddress(remote_address1, remote_address2, MAXPGPATH); + char *remote_address = remote_address1; + if (remote_address[0] == '\0' || remote_address[0] == ':') { + ereport(DEBUG1, (errmodule(MOD_REMOTE), errmsg("remote not available"))); + return REMOTE_READ_IP_NOT_EXIST; + } + if (pblk != NULL) { + ereport(LOG, (errmodule(MOD_REMOTE), errmsg("remote read page, file %s block %u (pblk %u/%d) from %s", + relpathperm(key->relfilenode, key->forknum), key->blocknum, pblk->relNode, pblk->block, remote_address))); + } else { + ereport(LOG, (errmodule(MOD_REMOTE), errmsg("remote read page, file %s block %u from %s", + relpathperm(key->relfilenode, key->forknum), key->blocknum, remote_address))); + } + + const int TIMEOUT = 60; + PROFILING_REMOTE_START(); + int retCode = RemoteGetPage(remote_address, key, BLCKSZ, lsn, buf, pblk, TIMEOUT); + PROFILING_REMOTE_END_READ(BLCKSZ, (retCode == REMOTE_READ_OK)); + return retCode; +} + +static void RepairPage(RepairBlockEntry *entry, char *page) +{ + int retCode = 0; + + if (entry->pblk.relNode != InvalidOid) { + retCode = RemoteReadBlockNoError(&entry->key, page, entry->page_old_lsn, &entry->pblk); + } else { + retCode = RemoteReadBlockNoError(&entry->key, page, entry->page_old_lsn, NULL); + } + if (retCode == REMOTE_READ_OK) { + CopyPageToRepairHashTbl(entry, page); + } + + return; +} + +const int MAX_CHECK_LSN_NUM = 100; +static void SeqRemoteReadPage() +{ + HTAB *repair_hash = g_instance.repair_cxt.page_repair_hashtbl; + RepairBlockEntry *entry = NULL; + HASH_SEQ_STATUS status; + RepairBlockKey lsncheck[MAX_CHECK_LSN_NUM]; + int check_lsn_num = 0; + int need_repair_num = 0; + int repair_num = 0; + char page[BLCKSZ] = {0}; + + LWLockAcquire(g_instance.repair_cxt.page_repair_hashtbl_lock, LW_EXCLUSIVE); + + hash_seq_init(&status, repair_hash); + while ((entry = (RepairBlockEntry *)hash_seq_search(&status)) != NULL) { + need_repair_num++; + switch (entry->page_state) { + case WAIT_REMOTE_READ: + RepairPage(entry, page); + if (entry->error_type == LSN_CHECK_FAIL && entry->page_state == WAIT_LSN_CHECK && + check_lsn_num < MAX_CHECK_LSN_NUM) { + lsncheck[check_lsn_num] = entry->key; + check_lsn_num++; + } + if (entry->page_state == WAIT_REPAIR) { + (void)gs_signal_send(entry->recovery_tid, SIGUSR1); + repair_num++; + } + break; + case WAIT_LSN_CHECK: + if (check_lsn_num < MAX_CHECK_LSN_NUM) { + lsncheck[check_lsn_num] = entry->key; + check_lsn_num++; + } + break; + case WAIT_REPAIR: + repair_num++; + (void)gs_signal_send(entry->recovery_tid, SIGUSR1); + break; + default: + ereport(ERROR, (errmsg("error page state during remote read"))); + } + } + + LWLockRelease(g_instance.repair_cxt.page_repair_hashtbl_lock); + + for (int i = 0; i < check_lsn_num; i++) { + RepairBlockKey temp = lsncheck[i]; + CheckPageLSN(temp); + } + if (need_repair_num == repair_num) { + t_thrd.pagerepair_cxt.page_repair_requested = false; + } + return; +} + +static void PageRepairHandleInterrupts(void) +{ + if (t_thrd.pagerepair_cxt.got_SIGHUP) { + t_thrd.pagerepair_cxt.got_SIGHUP = false; + ProcessConfigFile(PGC_SIGHUP); + } + + if (t_thrd.pagerepair_cxt.shutdown_requested && g_instance.pid_cxt.StartupPID == 0) { + ereport(LOG, (errmodule(MOD_REDO), errmsg("pagerepair thread shut down"))); + + u_sess->attr.attr_common.ExitOnAnyError = true; + proc_exit(0); + } +} + +void PageRepairMain(void) +{ + MemoryContext pagerepair_context; + char name[MAX_THREAD_NAME_LEN] = {0}; + uint32 rc = 0; + + t_thrd.role = PAGEREPAIR_THREAD; + + SetupPageRepairSignalHook(); + + /* We allow SIGQUIT (quickdie) at all times */ + (void)sigdelset(&t_thrd.libpq_cxt.BlockSig, SIGQUIT); + + ereport(LOG, (errmodule(MOD_REDO), errmsg("pagerepair started"))); + + /* + * Create a resource owner to keep track of our resources (currently only + * buffer pins). + */ + errno_t err_rc = snprintf_s( + name, MAX_THREAD_NAME_LEN, MAX_THREAD_NAME_LEN - 1, "%s", "PageRepair"); + securec_check_ss(err_rc, "", ""); + + t_thrd.utils_cxt.CurrentResourceOwner = ResourceOwnerCreate(NULL, name, + THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE)); + + /* + * Create a memory context that we will do all our work in. We do this so + * that we can reset the context during error recovery and thereby avoid + * possible memory leaks. Formerly this code just ran in + * TopMemoryContext, but resetting that would be a really bad idea. + */ + pagerepair_context = AllocSetContextCreate( + TopMemoryContext, name, ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); + (void)MemoryContextSwitchTo(pagerepair_context); + + /* + * Unblock signals (they were blocked when the postmaster forked us) + */ + gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL); + (void)gs_signal_unblock_sigusr2(); + + pgstat_report_appname("PageRepair"); + pgstat_report_activity(STATE_IDLE, NULL); + + /* + * Loop forever + */ + for (;;) { + PageRepairHandleInterrupts(); + pgstat_report_activity(STATE_IDLE, NULL); + rc = WaitLatch(&t_thrd.proc->procLatch, WL_TIMEOUT | WL_POSTMASTER_DEATH, (long)TEN_MILLISECOND); + if (rc & WL_POSTMASTER_DEATH) { + gs_thread_exit(1); + } + + ResetLatch(&t_thrd.proc->procLatch); + pgstat_report_activity(STATE_RUNNING, NULL); + if (!t_thrd.pagerepair_cxt.shutdown_requested) { + SeqRemoteReadPage(); + SeqRemoteReadFile(); + } + } +} + +static void SetupPageRepairSignalHook(void) +{ + /* + * Reset some signals that are accepted by postmaster but not here + */ + (void)gspqsignal(SIGHUP, PageRepairSigHupHandler); + (void)gspqsignal(SIGINT, SIG_IGN); + (void)gspqsignal(SIGTERM, PageRepairShutDownHandler); + (void)gspqsignal(SIGQUIT, PageRepairQuickDie); /* hard crash time */ + (void)gspqsignal(SIGALRM, SIG_IGN); + (void)gspqsignal(SIGPIPE, SIG_IGN); + (void)gspqsignal(SIGUSR1, PageRepairSigUsr1Handler); + (void)gspqsignal(SIGUSR2, PageRepairSigUsr2Handler); + + /* + * Reset some signals that are accepted by postmaster but not here + */ + (void)gspqsignal(SIGCHLD, SIG_DFL); + (void)gspqsignal(SIGTTIN, SIG_DFL); + (void)gspqsignal(SIGTTOU, SIG_DFL); + (void)gspqsignal(SIGCONT, SIG_DFL); + (void)gspqsignal(SIGWINCH, SIG_DFL); +} + +static void PageRepairSigUsr1Handler(SIGNAL_ARGS) +{ + int save_errno = errno; + + t_thrd.pagerepair_cxt.page_repair_requested = true; + if (t_thrd.proc) { + SetLatch(&t_thrd.proc->procLatch); + } + + errno = save_errno; +} + +static void PageRepairSigUsr2Handler(SIGNAL_ARGS) +{ + int save_errno = errno; + + t_thrd.pagerepair_cxt.file_repair_requested = true; + if (t_thrd.proc) { + SetLatch(&t_thrd.proc->procLatch); + } + + errno = save_errno; +} + +static void PageRepairSigHupHandler(SIGNAL_ARGS) +{ + int save_errno = errno; + + t_thrd.pagerepair_cxt.got_SIGHUP = true; + if (t_thrd.proc) { + SetLatch(&t_thrd.proc->procLatch); + } + + errno = save_errno; +} + +static void PageRepairShutDownHandler(SIGNAL_ARGS) +{ + int save_errno = errno; + + t_thrd.pagerepair_cxt.shutdown_requested = true; + if (t_thrd.proc) { + SetLatch(&t_thrd.proc->procLatch); + } + + errno = save_errno; +} + +static void PageRepairQuickDie(SIGNAL_ARGS) +{ + gs_signal_setmask(&t_thrd.libpq_cxt.BlockSig, NULL); + + /* + * We DO NOT want to run proc_exit() callbacks -- we're here because + * shared memory may be corrupted, so we don't want to try to clean up our + * transaction. Just nail the windows shut and get out of town. Now that + * there's an atexit callback to prevent third-party code from breaking + * things by calling exit() directly, we have to reset the callbacks + * explicitly to make this work as intended. + */ + on_exit_reset(); + + /* + * Note we do exit(2) not exit(0). This is to force the postmaster into a + * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random + * backend. This is necessary precisely because we don't clean up our + * shared memory state. (The "dead man switch" mechanism in pmsignal.c + * should ensure the postmaster sees this as a crash, too, but no harm in + * being doubly sure.) + */ + gs_thread_exit(2); +} + +/* recovery thread function */ + +bool PushBadPageToRemoteHashTbl(RepairBlockKey key, PageErrorType error_type, XLogRecPtr old_lsn, + XLogPhyBlock pblk, ThreadId tid) +{ + HTAB *repair_hash = g_instance.repair_cxt.page_repair_hashtbl; + bool found = false; + + Assert(repair_hash != NULL); + + LWLockAcquire(g_instance.repair_cxt.page_repair_hashtbl_lock, LW_EXCLUSIVE); + RepairBlockEntry *entry = (RepairBlockEntry*)hash_search(repair_hash, &(key), HASH_ENTER, &found); + if (!found) { + entry->key = key; + entry->recovery_tid = tid; + entry->error_type = error_type; + entry->page_state = WAIT_REMOTE_READ; + entry->page_old_lsn = old_lsn; + entry->page_new_lsn = InvalidXLogRecPtr; + entry->pblk = pblk; + } + LWLockRelease(g_instance.repair_cxt.page_repair_hashtbl_lock); + + if (!found) { + /* need add array and wakeup the page repair thread */ + ThreadId PageRepairPID = g_instance.pid_cxt.PageRepairPID; + if (PageRepairPID != 0) { + (void)gs_signal_send(PageRepairPID, SIGUSR1); + } + if (g_instance.repair_cxt.repair_proc_latch != NULL) { + SetLatch(g_instance.repair_cxt.repair_proc_latch); + } + } + + return found; +} + +bool BlockNodeMatch(RepairBlockKey key, XLogPhyBlock pblk, RelFileNode node, + ForkNumber forknum, BlockNumber minblkno, bool segment_shrink) +{ + if (segment_shrink) { + RelFileNode rnode = key.relfilenode; + rnode.relNode = pblk.relNode; + bool node_equal = RelFileNodeRelEquals(node, rnode); + return node_equal && key.forknum == forknum && pblk.block >= minblkno; + } else { + bool node_equal = IsBucketFileNode(node) ? RelFileNodeEquals(node, key.relfilenode) + : RelFileNodeRelEquals(node, key.relfilenode); + return node_equal && key.forknum == forknum && key.blocknum >= minblkno; + } +} + +bool dbNodeandSpcNodeMatch(RelFileNode *rnode, Oid spcNode, Oid dbNode) +{ + if (OidIsValid(spcNode) && rnode->spcNode != spcNode) { + return false; + } + if (OidIsValid(dbNode) && rnode->dbNode != dbNode) { + return false; + } + return true; +} + +/* BatchClearPageRepairHashTbl + * drop database, or drop segmentspace, need clear the page repair hashTbl, + * if the repair page key dbNode match and spcNode match, need remove. + */ +void BatchClearPageRepairHashTbl(Oid spcNode, Oid dbNode) +{ + HTAB *repair_hash = g_instance.repair_cxt.page_repair_hashtbl; + bool found = false; + RepairBlockEntry *entry = NULL; + HASH_SEQ_STATUS status; + + LWLockAcquire(g_instance.repair_cxt.page_repair_hashtbl_lock, LW_EXCLUSIVE); + + hash_seq_init(&status, repair_hash); + while ((entry = (RepairBlockEntry *)hash_seq_search(&status)) != NULL) { + if (dbNodeandSpcNodeMatch(&(entry->key.relfilenode), spcNode, dbNode)) { + if (hash_search(repair_hash, &(entry->key), HASH_REMOVE, &found) == NULL) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("page repair hash table corrupted"))); + } + } + } + + LWLockRelease(g_instance.repair_cxt.page_repair_hashtbl_lock); + + return; +} + +/* ClearPageRepairHashTbl + * drop table, or truncate table, need clear the page repair hashTbl, if the + * repair page Filenode match need remove. + */ +void ClearPageRepairHashTbl(const RelFileNode &node, ForkNumber forknum, BlockNumber minblkno, + bool segment_shrink) +{ + HTAB *repair_hash = g_instance.repair_cxt.page_repair_hashtbl; + bool found = false; + RepairBlockEntry *entry = NULL; + HASH_SEQ_STATUS status; + + LWLockAcquire(g_instance.repair_cxt.page_repair_hashtbl_lock, LW_EXCLUSIVE); + + hash_seq_init(&status, repair_hash); + while ((entry = (RepairBlockEntry *)hash_seq_search(&status)) != NULL) { + if (BlockNodeMatch(entry->key, entry->pblk, node, forknum, minblkno, segment_shrink)) { + if (hash_search(repair_hash, &(entry->key), HASH_REMOVE, &found) == NULL) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("page repair hash table corrupted"))); + } + } + } + + LWLockRelease(g_instance.repair_cxt.page_repair_hashtbl_lock); + + return; +} + +/* ClearSpecificsPageRepairHashTbl + * If the page repair finish, need clear the page repair hashTbl. + */ +void ClearSpecificsPageRepairHashTbl(RepairBlockKey key) +{ + bool found = false; + HTAB *repair_hash = g_instance.repair_cxt.page_repair_hashtbl; + + LWLockAcquire(g_instance.repair_cxt.page_repair_hashtbl_lock, LW_EXCLUSIVE); + + if ((RepairBlockEntry*)hash_search(repair_hash, &(key), HASH_REMOVE, &found) == NULL) { + ereport(WARNING, + (errmsg("the %u/%u/%u bucketnode %d forknum %u, blknum %u, remove form repair hashtbl, not found", + key.relfilenode.spcNode, key.relfilenode.dbNode, key.relfilenode.relNode, key.relfilenode.bucketNode, + key.forknum, key.blocknum))); + } + + LWLockRelease(g_instance.repair_cxt.page_repair_hashtbl_lock); + return; +} + +/* CheckRepairPage + * recovery thread check the primary page lsn (page_new_lsn) is in the range + * from record_min_lsn to record_max_lsn, + */ +bool CheckRepairPage(RepairBlockKey key, XLogRecPtr min_lsn, XLogRecPtr max_lsn, char *page) +{ + bool found = false; + bool can_recovery = false; + RepairBlockEntry *entry = NULL; + HTAB *repair_hash = g_instance.repair_cxt.page_repair_hashtbl; + + LWLockAcquire(g_instance.repair_cxt.page_repair_hashtbl_lock, LW_EXCLUSIVE); + + entry = (RepairBlockEntry*)hash_search(repair_hash, &(key), HASH_FIND, &found); + if (entry == NULL) { + ereport(ERROR, (errmsg("the page repair hash table corrupted "))); + } + + /* the page_new_lsn is in the range from record_min_lsn to record_max_lsn */ + if (entry->page_state == WAIT_REPAIR && entry->page_new_lsn <= max_lsn && entry->page_new_lsn >= min_lsn) { + errno_t rc; + can_recovery = true; + + rc = memcpy_s(page, BLCKSZ, entry->page_content, BLCKSZ); + securec_check(rc, "", ""); + } + LWLockRelease(g_instance.repair_cxt.page_repair_hashtbl_lock); + + return can_recovery; +} + +void WaitRepalyFinish() +{ + /* file repair finish, need clean the invalid page */ + if (IsExtremeRedo()) { + extreme_rto::WaitAllReplayWorkerIdle(); + } else if (IsParallelRedo()) { + parallel_recovery::WaitAllPageWorkersQueueEmpty(); + } else { + XLogRecPtr standby_replay_lsn = GetXLogReplayRecPtr(NULL, NULL); + XLogRecPtr suspend_lsn = pg_atomic_read_u64(&g_instance.startup_cxt.suspend_lsn); + /* if suspend_lsn > standby_replay_lsn then need wait */ + while (!XLByteLE(suspend_lsn, standby_replay_lsn)) { + /* sleep 1s */ + PageRepairHandleInterrupts(); + pg_usleep(1000000L); + /* get current replay lsn again */ + (void)GetXLogReplayRecPtr(NULL, &standby_replay_lsn); + } + } +} + +const int MAX_REPAIR_FILE_NUM = 20; +void FileRepairHashTblInit(void) +{ + HASHCTL ctl; + + if (g_instance.repair_cxt.file_repair_hashtbl_lock == NULL) { + g_instance.repair_cxt.file_repair_hashtbl_lock = LWLockAssign(LWTRANCHE_FILE_REPAIR); + } + + if (g_instance.repair_cxt.file_repair_hashtbl == NULL) { + /* hash accessed by database file id */ + errno_t rc = memset_s(&ctl, sizeof(ctl), 0, sizeof(ctl)); + securec_check(rc, "", ""); + ctl.keysize = sizeof(RepairFileKey); + ctl.entrysize = sizeof(RepairFileEntry); + ctl.hash = tag_hash; + ctl.hcxt = INSTANCE_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE); + g_instance.repair_cxt.file_repair_hashtbl = hash_create("File Repair Hash Table", MAX_REPAIR_FILE_NUM, &ctl, + HASH_ELEM | HASH_FUNCTION |HASH_CONTEXT); + + if (!g_instance.repair_cxt.file_repair_hashtbl) + ereport(FATAL, (errmsg("could not initialize file repair Hash table"))); + } + + return; +} + +bool CheckFileRepairHashTbl(RelFileNode rnode, ForkNumber forknum, uint32 segno) +{ + HTAB* file_hashtbl = g_instance.repair_cxt.file_repair_hashtbl; + RepairFileKey key; + RepairFileEntry *entry = NULL; + bool found = false; + + key.relfilenode = rnode; + key.forknum = forknum; + key.segno = segno; + + if (file_hashtbl == NULL) { + return found; + } + LWLockAcquire(FILE_REPAIR_LOCK, LW_SHARED); + entry = (RepairFileEntry*)hash_search(file_hashtbl, &(key), HASH_FIND, &found); + if (found) { + if (entry->file_state == WAIT_FILE_REPAIR || entry->file_state == WAIT_RENAME) { + found = true; + } else { + found = false; + } + } + LWLockRelease(FILE_REPAIR_LOCK); + + return found; +} + +void CheckNeedRecordBadFile(RepairFileKey key, uint32 nblock, uint32 blocknum, const XLogPhyBlock *pblk) +{ + if (CheckVerionSupportRepair() && (nblock == 0 || blocknum / RELSEG_SIZE > nblock / RELSEG_SIZE) && + IsPrimaryClusterStandbyDN() && g_instance.repair_cxt.support_repair) { + if (pblk != NULL) { + key.relfilenode.relNode = pblk->relNode; + key.segno = pblk->block / RELSEG_SIZE; + } + if (IsSegmentFileNode(key.relfilenode)) { + key.relfilenode.bucketNode = SegmentBktId; + } + PushBadFileToRemoteHashTbl(key); + } +} + +static void PushBadFileToRemoteHashTbl(RepairFileKey key) +{ + HTAB *file_hash = g_instance.repair_cxt.file_repair_hashtbl; + RepairFileEntry *entry = NULL; + bool found = false; + XLogRecPtr min_recovery_point; + + LWLockAcquire(ControlFileLock, LW_SHARED); + min_recovery_point = t_thrd.shemem_ptr_cxt.ControlFile->minRecoveryPoint; + LWLockRelease(ControlFileLock); + + LWLockAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE); + entry = (RepairFileEntry*)hash_search(file_hash, &(key), HASH_ENTER, &found); + if (!found) { + entry->key.relfilenode.relNode = key.relfilenode.relNode; + entry->key.relfilenode.dbNode = key.relfilenode.dbNode; + entry->key.relfilenode.spcNode = key.relfilenode.spcNode; + entry->key.relfilenode.bucketNode = key.relfilenode.bucketNode; + entry->key.forknum = key.forknum; + entry->key.segno = key.segno; + entry->min_recovery_point = min_recovery_point; + entry->file_state = WAIT_FILE_CHECK_REPAIR; + entry->primary_file_lsn = InvalidXLogRecPtr; + + ereport(LOG, (errmodule(MOD_REDO), + errmsg("[file repair] push to file repair hashtbl, path is %s segno is %u", + relpathperm(entry->key.relfilenode, entry->key.forknum), entry->key.segno))); + } + LWLockRelease(FILE_REPAIR_LOCK); + return; +} + +bool FileNodeMatch(RepairFileKey key, RelFileNode node, ForkNumber forknum, uint32 segno) +{ + bool node_equal = RelFileNodeRelEquals(node, key.relfilenode); + + return node_equal && key.forknum == forknum && key.segno >= segno; +} + +void ClearBadFileHashTbl(const RelFileNode &node, ForkNumber forknum, uint32 segno) +{ + HTAB *file_hash = g_instance.repair_cxt.file_repair_hashtbl; + RepairFileEntry *entry = NULL; + bool found = false; + HASH_SEQ_STATUS status; + + LWLockAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE); + + hash_seq_init(&status, file_hash); + while ((entry = (RepairFileEntry *)hash_seq_search(&status)) != NULL) { + if (FileNodeMatch(entry->key, node, forknum, segno)) { + if (hash_search(file_hash, &(entry->key), HASH_REMOVE, &found) == NULL) { + LWLockRelease(FILE_REPAIR_LOCK); + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("file repair hash table corrupted"))); + } else { + ereport(LOG, (errmodule(MOD_REDO), + errmsg("[file repair] file %s segno is %u entry remove when drop table or truncate table", + relpathperm(entry->key.relfilenode, entry->key.forknum), entry->key.segno))); + } + } + } + + LWLockRelease(FILE_REPAIR_LOCK); + return; +} + + +void BatchClearBadFileHashTbl(Oid spcNode, Oid dbNode) +{ + HTAB *file_hash = g_instance.repair_cxt.file_repair_hashtbl; + RepairFileEntry *entry = NULL; + bool found = false; + HASH_SEQ_STATUS status; + + LWLockAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE); + + hash_seq_init(&status, file_hash); + while ((entry = (RepairFileEntry *)hash_seq_search(&status)) != NULL) { + if (dbNodeandSpcNodeMatch(&(entry->key.relfilenode), spcNode, dbNode)) { + if (hash_search(file_hash, &(entry->key), HASH_REMOVE, &found) == NULL) { + LWLockRelease(FILE_REPAIR_LOCK); + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("file repair hash table corrupted"))); + } else { + ereport(LOG, (errmodule(MOD_REDO), + errmsg("[file repair] file %s segno is %u entry remove when drop database or segment space", + relpathperm(entry->key.relfilenode, entry->key.forknum), entry->key.segno))); + } + } + } + + LWLockRelease(FILE_REPAIR_LOCK); + + return; +} + +void RenameRepairFile(RepairFileKey *key, bool clear_entry) +{ + errno_t rc; + bool found = false; + HTAB *file_hash = g_instance.repair_cxt.file_repair_hashtbl; + char *path = relpathperm(key->relfilenode, key->forknum); + char *tempsegpath = (char *)palloc(strlen(path) + SEGLEN); + char *segpath = (char *)palloc(strlen(path) + SEGLEN); + + /* wait all dirty page flush */ + RequestCheckpoint(CHECKPOINT_FLUSH_DIRTY|CHECKPOINT_WAIT); + + if (key->segno == 0) { + rc = sprintf_s(segpath, strlen(path) + SEGLEN, "%s", path); + securec_check_ss(rc, "", "") + rc = sprintf_s(tempsegpath, strlen(path) + SEGLEN, "%s.repair", path); + securec_check_ss(rc, "", "") + } else { + rc = sprintf_s(segpath, strlen(path) + SEGLEN, "%s.%u", path, key->segno); + securec_check_ss(rc, "", "") + rc = sprintf_s(tempsegpath, strlen(path) + SEGLEN, "%s.%u.repair", path, key->segno); + securec_check_ss(rc, "", "") + } + + rc = durable_rename(tempsegpath, segpath, WARNING); + if (rc == 0) { + ereport(LOG, (errmodule(MOD_REDO), + errmsg("[file repair] file rename from %s to %s finish", tempsegpath, segpath))); + + /* file repair finish, need clean the invalid page */ + if (IsExtremeRedo()) { + extreme_rto::DispatchCleanInvalidPageMarkToAllRedoWorker(*key); + extreme_rto::DispatchClosefdMarkToAllRedoWorker(); + extreme_rto::WaitAllReplayWorkerIdle(); + } else if (IsParallelRedo()) { + if (AmStartupProcess()) { + ProcTxnWorkLoad(true); + } + parallel_recovery::SendCleanInvalidPageMarkToAllWorkers(*key); + parallel_recovery::SendClosefdMarkToAllWorkers(); + parallel_recovery::WaitAllPageWorkersQueueEmpty(); + } else { + } + forget_range_invalid_pages((void*)key); + smgrcloseall(); + + LWLockAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE); + + if (clear_entry) { + if (hash_search(file_hash, key, HASH_REMOVE, &found) == NULL) { + pfree(path); + pfree(segpath); + pfree(tempsegpath); + LWLockRelease(FILE_REPAIR_LOCK); + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("file repair hash table corrupted"))); + } else { + ereport(LOG, (errmodule(MOD_REDO), + errmsg("[file repair] file %s repair finish, remove the entry", segpath))); + } + } + LWLockRelease(FILE_REPAIR_LOCK); + } + pfree(path); + pfree(segpath); + pfree(tempsegpath); + + return; +} + +void CheckNeedRenameFile() +{ + HASH_SEQ_STATUS status; + RepairFileEntry *entry = NULL; + HTAB *file_hash = g_instance.repair_cxt.file_repair_hashtbl; + uint32 need_repair_num = 0; + uint32 need_rename_num = 0; + RepairFileKey *rename_key = NULL; + errno_t rc = 0; + uint32 i = 0; + + LWLockAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE); + hash_seq_init(&status, file_hash); + while ((entry = (RepairFileEntry *)hash_seq_search(&status)) != NULL) { + if (entry->file_state == WAIT_RENAME) { + need_rename_num++; + } + } + if (need_rename_num > 0) { + rename_key = (RepairFileKey*)palloc0(sizeof(RepairFileKey) * need_rename_num); + } + + hash_seq_init(&status, file_hash); + + while ((entry = (RepairFileEntry *)hash_seq_search(&status)) != NULL) { + switch (entry->file_state) { + case WAIT_RENAME: + Assert(XLByteLE(entry->primary_file_lsn, GetXLogReplayRecPtr(NULL, NULL))); + Assert(!IsSegmentFileNode(entry->key.relfilenode)); + rc = memcpy_s(&rename_key[i], sizeof(RepairFileKey), &(entry->key), sizeof(RepairFileKey)); + securec_check(rc, "", ""); + i++; + break; + case WAIT_FILE_REMOTE_READ: + case WAIT_FILE_REPAIR_SEGMENT: + need_repair_num++; + break; + case WAIT_FOREGT_INVALID_PAGE: + { + forget_range_invalid_pages((void*)&entry->key); + bool found = false; + if (hash_search(file_hash, &(entry->key), HASH_REMOVE, &found) == NULL) { + LWLockRelease(FILE_REPAIR_LOCK); + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("file repair hash table corrupted"))); + } else { + ereport(LOG, (errmodule(MOD_REDO), + errmsg("[file repair] file %s seg is %d, repair finish, remove the entry", + relpathperm(entry->key.relfilenode, entry->key.forknum), entry->key.segno))); + } + } + break; + default: + break; + } + } + LWLockRelease(FILE_REPAIR_LOCK); + + for (i = 0; i < need_rename_num; i++) { + RepairFileKey *key = &rename_key[i]; + RenameRepairFile(key, true); + } + if (need_rename_num > 0) { + pfree(rename_key); + rename_key = NULL; + } + if (need_repair_num == 0) { + SetRecoverySuspend(false); + ereport(LOG, (errmodule(MOD_REDO), + errmsg("set recovery suspend to false, the need repair num is zero"))); + } +} + +void CheckIsStopRecovery(void) +{ + uint32 need_repair_num = 0; + uint32 need_rename_num = 0; + HASH_SEQ_STATUS status; + RepairFileEntry *entry = NULL; + HTAB *file_hash = g_instance.repair_cxt.file_repair_hashtbl; + XLogRecPtr repaly = GetXLogReplayRecPtr(NULL, NULL); + XLogRecPtr flush = GetStandbyFlushRecPtr(NULL); + + if (file_hash == NULL) { + return; + } + + if (LWLockConditionalAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE)) { + hash_seq_init(&status, file_hash); + while ((entry = (RepairFileEntry *)hash_seq_search(&status)) != NULL) { + flush = GetStandbyFlushRecPtr(NULL); + if (!XLogRecPtrIsInvalid(entry->min_recovery_point) && XLByteLT(entry->min_recovery_point, repaly) + && entry->file_state == WAIT_FILE_CHECK_REPAIR) { + entry->file_state = WAIT_FILE_REMOTE_READ; + } + + if (entry->file_state == WAIT_FILE_REMOTE_READ || entry->file_state == WAIT_FILE_REPAIR_SEGMENT) { + need_repair_num++; + ereport(LOG, (errmodule(MOD_REDO), + errmsg("[file repair] need remote read or segment file wait rename, file path %s segno is %u", + relpathperm(entry->key.relfilenode, entry->key.forknum), entry->key.segno))); + } + + if ((entry->file_state == WAIT_FILE_REPAIR && !IsSegmentFileNode(entry->key.relfilenode) && + XLByteLT(entry->primary_file_lsn, repaly)) || entry->file_state == WAIT_RENAME) { + entry->file_state = WAIT_RENAME; + need_rename_num++; + ereport(LOG, (errmodule(MOD_REDO), + errmsg("[file repair] need rename, file path %s segno is %u", + relpathperm(entry->key.relfilenode, entry->key.forknum), entry->key.segno))); + } + } + + LWLockRelease(FILE_REPAIR_LOCK); + + if (need_repair_num > 0 || need_rename_num > 0) { + load_server_mode(); + if (NOT_SUPPORT_PAGE_REPAIR) { + return; + } + if (t_thrd.xlog_cxt.server_mode == STANDBY_MODE && g_instance.repair_cxt.support_repair) { + SetRecoverySuspend(true); + ereport(LOG, (errmodule(MOD_REDO), + errmsg("set recovery suspend to true, the need repair num is %d, need rename num is %d", + need_repair_num, need_rename_num))); + } + } + } + + return; +} + +const int REPAIR_LEN = 8; +int CreateRepairFile(char *path) +{ + int fd = -1; + int retry_times = 0; + const int MAX_RETRY_TIME = 2; + errno_t rc; + char *reapirpath = (char *)palloc(strlen(path) + REPAIR_LEN); + + rc = sprintf_s(reapirpath, strlen(path) + REPAIR_LEN, "%s.repair", path); + securec_check_ss(rc, "", ""); + +RETRY: + fd = BasicOpenFile((char*)reapirpath, O_CREAT | O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); + retry_times++; + if (fd < 0) { + if (retry_times < MAX_RETRY_TIME) { + goto RETRY; + } + if (errno != ENOENT) { + ereport(WARNING, (errcode_for_file_access(), + errmsg("[file repair] could not open file \"%s\": %m", reapirpath))); + pfree(reapirpath); + return -1; + } + } + + pfree(reapirpath); + return fd; +} + +int WriteRepairFile(int fd, char* path, char *buf, uint32 offset, uint32 size) +{ + errno_t rc = 0; + char *reapirpath = (char *)palloc(strlen(path) + REPAIR_LEN); + + rc = sprintf_s(reapirpath, strlen(path) + REPAIR_LEN, "%s.repair", path); + securec_check_ss(rc, "", ""); + + if (lseek(fd, offset, SEEK_SET) < 0) { + ereport(WARNING, (errcode_for_file_access(), errmsg("[file repair] could not seek reapir file %s : %m", + reapirpath))); + pfree(reapirpath); + return -1; + } + + if (write(fd, buf, size) != size) { + /* if write didn't set errno, assume problem is no disk space */ + if (errno == 0) { + errno = ENOSPC; + } + ereport(WARNING, (errcode_for_file_access(), errmsg("[file repair] could not write to temp file %s : %m", + reapirpath))); + pfree(reapirpath); + return -1; + } + if (fsync(fd) != 0) { + ereport(WARNING, (errcode_for_file_access(), errmsg("[file repair] could not fsync temp file %s : %m", + reapirpath))); + pfree(reapirpath); + return -1; + } + + pfree(reapirpath); + return 0; +} + +void UnlinkOldBadFile(char *path, RepairFileKey key) +{ + /* wait the xlog repaly finish */ + if (IsExtremeRedo()) { + extreme_rto::DispatchClosefdMarkToAllRedoWorker(); + extreme_rto::WaitAllReplayWorkerIdle(); + } else if (IsParallelRedo()) { + parallel_recovery::SendClosefdMarkToAllWorkers(); + parallel_recovery::WaitAllPageWorkersQueueEmpty(); + } else { + XLogRecPtr standby_replay_lsn = GetXLogReplayRecPtr(NULL, NULL); + XLogRecPtr suspend_lsn = pg_atomic_read_u64(&g_instance.startup_cxt.suspend_lsn); + /* if suspend_lsn > standby_replay_lsn then need wait */ + while (!XLByteLE(suspend_lsn, standby_replay_lsn)) { + /* sleep 1s */ + PageRepairHandleInterrupts(); + pg_usleep(1000000L); + /* get current replay lsn again */ + (void)GetXLogReplayRecPtr(NULL, &standby_replay_lsn); + } + } + /* wait all dirty page flush */ + RequestCheckpoint(CHECKPOINT_FLUSH_DIRTY|CHECKPOINT_WAIT); + + /* handle the backend thread */ + RelFileNodeBackend rnode; + rnode.node = key.relfilenode; + rnode.backend = InvalidBackendId; + CacheInvalidateSmgr(rnode); + int ret = unlink(path); + if (ret < 0 && errno != ENOENT) { + ereport(WARNING, (errcode_for_file_access(), errmsg("[file repair] could not remove file \"%s\": %m", path))); + } + if (ret >= 0) { + ereport(LOG, (errcode_for_file_access(), errmsg("[file repair] remove file \"%s\": %m", path))); + } + CacheInvalidateSmgr(rnode); + /* invalidate shared buffer about this seg file */ + LockRelFileNode(key.relfilenode, AccessExclusiveLock); + RangeForgetBuffer(key.relfilenode, key.forknum, key.segno * RELSEG_SIZE, (key.segno + 1) * RELSEG_SIZE); + UnlockRelFileNode(key.relfilenode, AccessExclusiveLock); + return; +} + +static void RepairSegFile(RepairFileKey key, char *segpath, uint32 seg_no, uint32 max_segno, uint64 size) +{ + char *buf = 0; + int ret_code = REMOTE_READ_NEED_WAIT; + int fd = -1; + errno_t rc = 0; + struct stat statBuf; + uint32 seg_size = 0; + uint32 remote_size = 0; + RemoteReadFileKey read_key; + XLogRecPtr remote_lsn = InvalidXLogRecPtr; + XLogRecPtr standby_flush_lsn = InvalidXLogRecPtr; + bool found = false; + + fd = CreateRepairFile(segpath); + if (fd < 0) { + ereport(WARNING, (errcode_for_file_access(), + errmsg("[file repair] could not create repair file \"%s\", segno is %d", + relpathperm(key.relfilenode, key.forknum), seg_no))); + return; + } + read_key.relfilenode = key.relfilenode; + read_key.forknum = key.forknum; + read_key.blockstart = seg_no * RELSEG_SIZE; + + buf = (char*)palloc(MAX_BATCH_READ_BLOCKNUM * BLCKSZ); + seg_size = (seg_no < max_segno ? (RELSEG_SIZE * BLCKSZ) : (size % (RELSEG_SIZE * BLCKSZ))); + int batch_size = MAX_BATCH_READ_BLOCKNUM * BLCKSZ; + int max_times = seg_size % batch_size == 0 ? seg_size / batch_size : (seg_size / batch_size + 1); + + for (int j = 0; j < max_times; j++) { + int read_size = 0; + if (seg_size % batch_size != 0) { + read_size = (j == max_times - 1 ? seg_size % batch_size : batch_size); + } else { + read_size = batch_size; + } + + read_key.blockstart = seg_no * RELSEG_SIZE + j * MAX_BATCH_READ_BLOCKNUM; + ret_code = RemoteReadFileNoError(&read_key, buf, InvalidXLogRecPtr, read_size, &remote_lsn, &remote_size); + if (ret_code == REMOTE_READ_OK) { + rc = WriteRepairFile(fd, segpath, buf, j * batch_size, read_size); + if (rc != 0) { + ereport(WARNING, (errcode_for_file_access(), + errmsg("[file repair] could not write repair file \"%s\", segno is %d", + relpathperm(key.relfilenode, key.forknum), seg_no))); + pfree(buf); + (void)close(fd); + return; + } + } else { + ereport(WARNING, (errcode_for_file_access(), + errmsg("[file repair] remote read file failed \"%s\", segno is %d, block start %u", + relpathperm(key.relfilenode, key.forknum), seg_no, read_key.blockstart))); + pfree(buf); + (void)close(fd); + return; + } + } + pfree(buf); + (void)close(fd); + + if (ret_code == REMOTE_READ_OK) { + standby_flush_lsn = GetStandbyFlushRecPtr(NULL); + while (!XLByteLT(remote_lsn, standby_flush_lsn)) { + PageRepairHandleInterrupts(); + /* sleep 10ms */ + pg_usleep(10000L); + /* get current replay lsn again */ + standby_flush_lsn = GetStandbyFlushRecPtr(NULL); + } + ereport(LOG, (errmsg("[file repair] wait lsn flush, remote lsn is %X/%X", + (uint32)(remote_lsn >> XLOG_LSN_SWAP), (uint32)remote_lsn))); + } else { + return; + } + + /* wait xlog repaly */ + if (!IsSegmentFileNode(key.relfilenode)) { + if (stat(segpath, &statBuf) < 0) { + if (errno != ENOENT) { + ereport(WARNING, (errcode_for_file_access(), + errmsg("[file repair] could not stat file \"%s\" before repair: %m", segpath))); + UnlinkOldBadFile(segpath, key); + } + } else { + UnlinkOldBadFile(segpath, key); + } + } + + /* wait xlog repaly finish, need get lock */ + LWLockAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE); + RepairFileEntry *temp_entry = (RepairFileEntry*)hash_search(g_instance.repair_cxt.file_repair_hashtbl, &(key), + HASH_FIND, &found); + if (found) { + temp_entry->file_state = IsSegmentFileNode(key.relfilenode) ? WAIT_FILE_REPAIR_SEGMENT : WAIT_FILE_REPAIR; + temp_entry->primary_file_lsn = remote_lsn; + } + LWLockRelease(FILE_REPAIR_LOCK); + return; +} + +bool CheckAllSegmentFileRepair(RepairFileKey key, uint32 max_segno) +{ + uint32 repair_num = 0; + + /* check all slicno file remote read finish */ + LWLockAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE); + for (uint i = 0; i <= max_segno; i++) { + RepairFileKey temp_key; + bool found = false; + temp_key.relfilenode.relNode = key.relfilenode.relNode; + temp_key.relfilenode.dbNode = key.relfilenode.dbNode; + temp_key.relfilenode.spcNode = key.relfilenode.spcNode; + temp_key.relfilenode.bucketNode = key.relfilenode.bucketNode; + temp_key.forknum = key.forknum; + temp_key.segno = i; + + RepairFileEntry *entry = (RepairFileEntry*)hash_search(g_instance.repair_cxt.file_repair_hashtbl, + &(temp_key), HASH_FIND, &found); + Assert(found); + if (found && entry->file_state == WAIT_FILE_REPAIR_SEGMENT) { + repair_num++; + } + } + LWLockRelease(FILE_REPAIR_LOCK); + + if (repair_num == max_segno + 1) { + /* 1. rename all file */ + RepairFileKey rename_key = {0}; + for (uint i = 0; i <= max_segno; i++) { + bool found = false; + + rename_key.relfilenode.relNode = key.relfilenode.relNode; + rename_key.relfilenode.dbNode = key.relfilenode.dbNode; + rename_key.relfilenode.spcNode = key.relfilenode.spcNode; + rename_key.relfilenode.bucketNode = key.relfilenode.bucketNode; + rename_key.forknum = key.forknum; + rename_key.segno = i; + + LWLockAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE); + (void*)hash_search(g_instance.repair_cxt.file_repair_hashtbl, &(rename_key), HASH_FIND, &found); + Assert(found); + LWLockRelease(FILE_REPAIR_LOCK); + if (found) { + RenameRepairFile(&rename_key, false); + } + } + + /* 2. open all file */ + df_open_all_file(rename_key, max_segno); + + /* 3. change file state */ + LWLockAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE); + RepairFileKey change_key; + for (uint i = 0; i <= max_segno; i++) { + bool found = false; + change_key.relfilenode.relNode = key.relfilenode.relNode; + change_key.relfilenode.dbNode = key.relfilenode.dbNode; + change_key.relfilenode.spcNode = key.relfilenode.spcNode; + change_key.relfilenode.bucketNode = key.relfilenode.bucketNode; + change_key.forknum = key.forknum; + change_key.segno = i; + + RepairFileEntry *entry = (RepairFileEntry*)hash_search(g_instance.repair_cxt.file_repair_hashtbl, + &(change_key), HASH_FIND, &found); + Assert(found); + if (found) { + entry->file_state = WAIT_FOREGT_INVALID_PAGE; + } + } + LWLockRelease(FILE_REPAIR_LOCK); + return true; + } + + return false; +} + +void StandbyRemoteReadFile(RepairFileKey key) +{ + int ret_code; + int64 size = 0; + errno_t rc; + bool found = false; + char *path = relpathperm(key.relfilenode, key.forknum); + char *segpath = (char *)palloc(strlen(path) + SEGLEN); + +RETYR: + ret_code = RemoteReadFileSizeNoError(&key, &size); + if (ret_code == REMOTE_READ_OK) { + uint32 max_segno = 0; + + if (size <= 0) { + pfree(path); + pfree(segpath); + LWLockAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE); + RepairFileEntry *temp_entry = (RepairFileEntry*)hash_search(g_instance.repair_cxt.file_repair_hashtbl, + &(key), HASH_FIND, &found); + if (found) { + temp_entry->file_state = WAIT_CLEAN; + } + LWLockRelease(FILE_REPAIR_LOCK); + return; + } + + max_segno = size / (RELSEG_SIZE * BLCKSZ); /* max_segno start from 0 */ + if (key.segno > max_segno) { + ereport(WARNING, (errcode_for_file_access(), + errmsg("[file repair] primary this file %s , segno is %d also not exist, can not repair, wait clean", + relpathperm(key.relfilenode, key.forknum), key.segno))); + + LWLockAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE); + RepairFileEntry *temp_entry = (RepairFileEntry*)hash_search(g_instance.repair_cxt.file_repair_hashtbl, + &(key), HASH_FIND, &found); + if (found) { + temp_entry->file_state = WAIT_CLEAN; + } + LWLockRelease(FILE_REPAIR_LOCK); + + pfree(path); + pfree(segpath); + return; + } + + if (IsSegmentFileNode(key.relfilenode)) { + /* wait all dirty page flush */ + WaitRepalyFinish(); + /* wait all dirty page flush */ + RequestCheckpoint(CHECKPOINT_FLUSH_DIRTY|CHECKPOINT_WAIT); + df_clear_and_close_all_file(key, max_segno); + } + + if (key.segno == 0) { + rc = sprintf_s(segpath, strlen(path) + SEGLEN, "%s", path); + } else { + rc = sprintf_s(segpath, strlen(path) + SEGLEN, "%s.%u", path, key.segno); + } + securec_check_ss(rc, "", ""); + RepairSegFile(key, segpath, key.segno, max_segno, size); + checkOtherFile(key, max_segno, size); + if (IsSegmentFileNode(key.relfilenode) && !CheckAllSegmentFileRepair(key, max_segno)) { + goto RETYR; + } + } + pfree(path); + pfree(segpath); + return; +} + +static void checkOtherFile(RepairFileKey key, uint32 max_segno, uint64 size) +{ + errno_t rc; + bool found = false; + struct stat statBuf; + RepairFileKey temp_key; + RepairFileEntry *temp_entry = NULL; + char *path = relpathperm(key.relfilenode, key.forknum); + char *segpath = (char *)palloc(strlen(path) + SEGLEN); + HTAB *file_hash = g_instance.repair_cxt.file_repair_hashtbl; + + for (uint i = 0; i <= max_segno; i++) { + if (i == 0) { + rc = sprintf_s(segpath, strlen(path) + SEGLEN, "%s", path); + } else { + rc = sprintf_s(segpath, strlen(path) + SEGLEN, "%s.%u", path, i); + } + securec_check_ss(rc, "", ""); + + if (i == key.segno) { + continue; + } + + /* Check whether other segment files exist in the hashtable. */ + temp_key.relfilenode.relNode = key.relfilenode.relNode; + temp_key.relfilenode.dbNode = key.relfilenode.dbNode; + temp_key.relfilenode.spcNode = key.relfilenode.spcNode; + temp_key.relfilenode.bucketNode = key.relfilenode.bucketNode; + temp_key.forknum = key.forknum; + temp_key.segno = i; + + LWLockAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE); + temp_entry = (RepairFileEntry*)hash_search(file_hash, &(temp_key), HASH_FIND, &found); + if (found && temp_entry->file_state == WAIT_FILE_REMOTE_READ) { + LWLockRelease(FILE_REPAIR_LOCK); + RepairSegFile(temp_key, segpath, i, max_segno, size); + continue; + } + + LWLockRelease(FILE_REPAIR_LOCK); + + /* Check whether other segment files exist */ + if (stat(segpath, &statBuf) == 0) { + continue; + } + if (stat(segpath, &statBuf) < 0 && errno != ENOENT) { + continue; + } + + LWLockAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE); + + temp_entry = (RepairFileEntry*)hash_search(file_hash, &(temp_key), HASH_ENTER, &found); + if (!found) { + LWLockAcquire(ControlFileLock, LW_SHARED); + XLogRecPtr min_recovery_point = t_thrd.shemem_ptr_cxt.ControlFile->minRecoveryPoint; + LWLockRelease(ControlFileLock); + temp_entry->key = temp_key; + temp_entry->min_recovery_point = min_recovery_point; + temp_entry->file_state = WAIT_FILE_REMOTE_READ; + temp_entry->primary_file_lsn = InvalidXLogRecPtr; + + ereport(LOG, (errmodule(MOD_REDO), + errmsg("[file repair] check other seg file push to file repair hashtbl, path is %s segno is %u", + relpathperm(key.relfilenode, key.forknum), i))); + LWLockRelease(FILE_REPAIR_LOCK); + RepairSegFile(temp_key, segpath, i, max_segno, size); + } else { + LWLockRelease(FILE_REPAIR_LOCK); + } + } + pfree(path); + pfree(segpath); + return; +} + +const int MAX_FILE_REPAIR_NUM = 10; +static void SeqRemoteReadFile() +{ + HTAB *repair_hash = g_instance.repair_cxt.file_repair_hashtbl; + RepairFileEntry *entry = NULL; + HASH_SEQ_STATUS status; + uint32 need_repair_num = 0; + errno_t rc = 0; + RepairFileKey remote_read[MAX_FILE_REPAIR_NUM] = {0}; + + pg_memory_barrier(); + if (!RecoveryIsSuspend() || XLogRecPtrIsInvalid(g_instance.startup_cxt.suspend_lsn)) { + return; + } + + rc = memset_s(remote_read, sizeof(RepairFileEntry) * MAX_FILE_REPAIR_NUM, 0, + sizeof(RepairFileEntry) * MAX_FILE_REPAIR_NUM); + securec_check(rc, "", ""); + /* wait the xlog repaly finish */ + WaitRepalyFinish(); + + LWLockAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE); + + hash_seq_init(&status, repair_hash); + while ((entry = (RepairFileEntry *)hash_seq_search(&status)) != NULL) { + switch (entry->file_state) { + /* page repair thread only handle need remote read file */ + case WAIT_FILE_CHECK_REPAIR: + case WAIT_FILE_REPAIR: + case WAIT_FOREGT_INVALID_PAGE: + case WAIT_CLEAN: + case WAIT_RENAME: + break; + case WAIT_FILE_REPAIR_SEGMENT: + case WAIT_FILE_REMOTE_READ: + entry->file_state = WAIT_FILE_REMOTE_READ; + if (need_repair_num >= MAX_FILE_REPAIR_NUM) { + break; + } else { + rc = memcpy_s(&remote_read[need_repair_num], sizeof(RepairFileEntry), + entry, sizeof(RepairFileEntry)); + securec_check(rc, "", ""); + } + break; + default: + LWLockRelease(FILE_REPAIR_LOCK); + ereport(ERROR, (errmsg("[file repair] error file state during remote read"))); + break; + } + + } + LWLockRelease(FILE_REPAIR_LOCK); + for (uint32 i = 0; i < need_repair_num; i++) { + RepairFileKey temp = remote_read[i]; + StandbyRemoteReadFile(temp); + } + + LWLockAcquire(FILE_REPAIR_LOCK, LW_EXCLUSIVE); + hash_seq_init(&status, repair_hash); + while ((entry = (RepairFileEntry *)hash_seq_search(&status)) != NULL) { + if (entry->file_state == WAIT_FILE_REPAIR_SEGMENT || entry->file_state == WAIT_FILE_REMOTE_READ || + entry->file_state == WAIT_RENAME) { + need_repair_num++; + } + } + LWLockRelease(FILE_REPAIR_LOCK); + if (need_repair_num == 0) { + SetRecoverySuspend(false); + ereport(LOG, (errmodule(MOD_REDO), + errmsg("pagerepair thread set recovery suspend to false, the need repair num is zero"))); + } + return; +} diff --git a/src/gausskernel/process/postmaster/pagewriter.cpp b/src/gausskernel/process/postmaster/pagewriter.cpp index fedf1c3d7..3b3289c64 100755 --- a/src/gausskernel/process/postmaster/pagewriter.cpp +++ b/src/gausskernel/process/postmaster/pagewriter.cpp @@ -34,6 +34,7 @@ #include "storage/ipc.h" #include "storage/smgr/smgr.h" #include "storage/pmsignal.h" +#include "storage/standby.h" #include "access/double_write.h" #include "access/xlog.h" #include "utils/guc.h" @@ -64,13 +65,22 @@ const int MAX_THREAD_NAME_LEN = 128; */ const int PAGE_QUEUE_SLOT_MIN_RESERVE_NUM = 2; +static TimestampTz g_last_snapshot_ts = 0; +static XLogRecPtr g_last_snapshot_lsn = InvalidXLogRecPtr; + /* Signal handlers */ static void ckpt_pagewriter_sighup_handler(SIGNAL_ARGS); +static void ckpt_pagewriter_sigint_handler(SIGNAL_ARGS); static void ckpt_pagewriter_quick_die(SIGNAL_ARGS); static void ckpt_pagewriter_request_shutdown_handler(SIGNAL_ARGS); static void ckpt_pagewriter_sigusr1_handler(SIGNAL_ARGS); +static void HandlePageWriterMainInterrupts(); + +/* dirty queue handle function */ static void ckpt_try_skip_invalid_elem_in_queue_head(); static void ckpt_try_prune_dirty_page_queue(); + +/* candidate buffer list handle function */ static uint32 calculate_pagewriter_flush_num(); static void candidate_buf_push(int buf_id, int thread_id); static void seg_candidate_buf_push(int buf_id, int thread_id); @@ -120,12 +130,8 @@ Datum ckpt_view_get_min_rec_lsn() ret = memset_s(queue_rec_lsn_s, LSN_LENGTH, 0, LSN_LENGTH); securec_check(ret, "", ""); - ret = snprintf_s(queue_rec_lsn_s, - LSN_LENGTH, - LSN_LENGTH - 1, - "%X/%X", - (uint32)(queue_rec_lsn >> XLOG_LSN_SWAP), - (uint32)queue_rec_lsn); + ret = snprintf_s(queue_rec_lsn_s, LSN_LENGTH, LSN_LENGTH - 1, "%X/%X", + (uint32)(queue_rec_lsn >> XLOG_LSN_SWAP), (uint32)queue_rec_lsn); securec_check_ss(ret, "", ""); return CStringGetTextDatum(queue_rec_lsn_s); } @@ -139,12 +145,8 @@ Datum ckpt_view_get_queue_rec_lsn() ret = memset_s(queue_rec_lsn_s, LSN_LENGTH, 0, LSN_LENGTH); securec_check(ret, "", ""); - ret = snprintf_s(queue_rec_lsn_s, - LSN_LENGTH, - LSN_LENGTH - 1, - "%X/%X", - (uint32)(queue_rec_lsn >> XLOG_LSN_SWAP), - (uint32)queue_rec_lsn); + ret = snprintf_s(queue_rec_lsn_s, LSN_LENGTH, LSN_LENGTH - 1, "%X/%X", + (uint32)(queue_rec_lsn >> XLOG_LSN_SWAP), (uint32)queue_rec_lsn); securec_check_ss(ret, "", ""); return CStringGetTextDatum(queue_rec_lsn_s); } @@ -158,12 +160,8 @@ Datum ckpt_view_get_current_xlog_insert_lsn() ret = memset_s(current_lsn_s, LSN_LENGTH, 0, LSN_LENGTH); securec_check(ret, "", ""); - ret = snprintf_s(current_lsn_s, - LSN_LENGTH, - LSN_LENGTH - 1, - "%X/%X", - (uint32)(current_xlog_insert >> XLOG_LSN_SWAP), - (uint32)current_xlog_insert); + ret = snprintf_s(current_lsn_s, LSN_LENGTH, LSN_LENGTH - 1, "%X/%X", + (uint32)(current_xlog_insert >> XLOG_LSN_SWAP), (uint32)current_xlog_insert); securec_check_ss(ret, "", ""); return CStringGetTextDatum(current_lsn_s); } @@ -296,6 +294,11 @@ void incre_ckpt_pagewriter_cxt_init() g_instance.ckpt_cxt_ctl->pgwr_procs.sub_num = g_instance.attr.attr_storage.pagewriter_thread_num; g_instance.ckpt_cxt_ctl->pgwr_procs.running_num = 0; + g_instance.ckpt_cxt_ctl->prepared = 0; + g_instance.ckpt_cxt_ctl->CkptBufferIdsTail = 0; + g_instance.ckpt_cxt_ctl->CkptBufferIdsFlushPages = 0; + g_instance.ckpt_cxt_ctl->CkptBufferIdsCompletedPages = 0; + uint32 dirty_list_size = MAX_DIRTY_LIST_FLUSH_NUM / thread_num; /* init thread dw cxt and dirty list */ @@ -322,9 +325,9 @@ void candidate_buf_init(void) if (!ENABLE_INCRE_CKPT) { return; } - /* - * Each thread manages a part of the buffer. Several slots are reserved to - * prevent the thread first and last slots equals. + /* + * Each thread manages a part of the buffer. Several slots are reserved to + * prevent the thread first and last slots equals. */ g_instance.ckpt_cxt_ctl->candidate_buffers = (Buffer *) ShmemInitStruct("CandidateBuffers", buffer_num * sizeof(Buffer), &found_candidate_buf); @@ -429,7 +432,7 @@ bool atomic_push_pending_flush_queue(Buffer buffer, XLogRecPtr* queue_head_lsn, loop: exchange.u64[0] = compare.u64[0]; exchange.u64[1] = compare.u64[1] + 1; - *new_tail_loc = exchange.u64[1]; + *new_tail_loc = exchange.u64[1]; if ((uint64)(get_dirty_page_num() + PAGE_QUEUE_SLOT_MIN_RESERVE_NUM) >= g_instance.ckpt_cxt_ctl->dirty_page_queue_size) { @@ -623,7 +626,7 @@ static uint32 ckpt_qsort_dirty_page_for_flush(bool *is_new_relfilenode, uint32 f item->bucketNode = buf_desc->tag.rnode.bucketNode; item->forkNum = buf_desc->tag.forkNum; item->blockNum = buf_desc->tag.blockNum; - if(IsSegmentFileNode(buf_desc->tag.rnode) || buf_desc->tag.rnode.opt != 0) { + if(IsSegmentFileNode(buf_desc->tag.rnode)) { *is_new_relfilenode = true; } } @@ -657,50 +660,25 @@ static void wakeup_sub_thread() } } -/** - * @Description: Distribute the batch dirty pages to multiple pagewriter threads to flush - * @in: num of this batch dirty page - */ -static void divide_dirty_page_to_thread(uint32 requested_flush_num, bool is_new_relfilenode) +static void prepare_dirty_page_applied_state(uint32 requested_flush_num, bool is_new_relfilenode) { - uint32 avg_num; - uint32 remain_need_flush; int thread_loc; - int sub_thread_num = g_instance.ckpt_cxt_ctl->pgwr_procs.sub_num; + int thread_num; - avg_num = requested_flush_num / sub_thread_num; - remain_need_flush = requested_flush_num % sub_thread_num; + pg_atomic_init_u32(&g_instance.ckpt_cxt_ctl->CkptBufferIdsCompletedPages, 0); + pg_atomic_init_u32(&g_instance.ckpt_cxt_ctl->CkptBufferIdsTail, 0); + g_instance.ckpt_cxt_ctl->CkptBufferIdsFlushPages = requested_flush_num; + thread_num = g_instance.ckpt_cxt_ctl->pgwr_procs.num; + PageWriterProc* pgwr; - PageWriterProc *pgwr = NULL; - for (thread_loc = 1; thread_loc < g_instance.ckpt_cxt_ctl->pgwr_procs.num; thread_loc++) { + g_instance.dw_batch_cxt.is_new_relfilenode = is_new_relfilenode; + for (thread_loc = 1; thread_loc < thread_num; thread_loc++) { pgwr = &g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[thread_loc]; - pgwr->thrd_dw_cxt.is_new_relfilenode = is_new_relfilenode; - if (thread_loc == 1) { - pgwr->start_loc = 0; - pgwr->need_flush_num = avg_num + remain_need_flush; - } else { - uint32 prev_start = g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[thread_loc - 1].start_loc; - uint32 prev_flush_num = g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[thread_loc - 1].need_flush_num; - pgwr->start_loc = prev_start + prev_flush_num; - pgwr->need_flush_num = avg_num; - } - - if (pgwr->proc != NULL) { - (void)pg_atomic_add_fetch_u32(&g_instance.ckpt_cxt_ctl->pgwr_procs.running_num, 1); - pg_write_barrier(); - g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[thread_loc].need_flush = true; - pg_write_barrier(); - SetLatch(&(pgwr->proc->procLatch)); - } - - if (u_sess->attr.attr_storage.log_pagewriter) { - int next_flush = g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[thread_loc].need_flush_num; - ereport(LOG, - (errmodule(MOD_INCRE_CKPT), - errmsg("needWritten is %u, thread num is %d, need flush page num is %d", - requested_flush_num, thread_loc, next_flush))); - } } + + pg_write_barrier(); + pg_atomic_init_u32(&g_instance.ckpt_cxt_ctl->prepared, 1); + pg_write_barrier(); } /** @@ -746,6 +724,7 @@ static void ckpt_move_queue_head_after_flush() if (rc & WL_POSTMASTER_DEATH) { gs_thread_exit(1); } + HandlePageWriterMainInterrupts(); } if (u_sess->attr.attr_storage.log_pagewriter) { @@ -766,7 +745,6 @@ static void ckpt_pagewriter_main_thread_flush_dirty_page() uint32 requested_flush_num; int32 expected_flush_num; bool is_new_relfilenode = false; - PageWriterProc* main_pgwr = &g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[0]; Assert(t_thrd.pagewriter_cxt.pagewriter_id == 0); WritebackContextInit(&wb_context, &t_thrd.pagewriter_cxt.page_writer_after); @@ -784,21 +762,11 @@ static void ckpt_pagewriter_main_thread_flush_dirty_page() requested_flush_num = ckpt_qsort_dirty_page_for_flush(&is_new_relfilenode, expected_flush_num); - /* If the requested_flush_num is less than the num of dw batch, main thread flush these dirty page. */ - if (requested_flush_num <= GET_DW_DIRTY_PAGE_MAX(is_new_relfilenode)) { - main_pgwr->thrd_dw_cxt.is_new_relfilenode = is_new_relfilenode; - main_pgwr->thrd_dw_cxt.dw_page_idx = -1; - wakeup_sub_thread(); - if (requested_flush_num > 0) { - dw_perform_batch_flush(requested_flush_num, - g_instance.ckpt_cxt_ctl->CkptBufferIds, &main_pgwr->thrd_dw_cxt); - incre_ckpt_pgwr_flush_dirty_page(wb_context, g_instance.ckpt_cxt_ctl->CkptBufferIds, 0, - requested_flush_num); - main_pgwr->thrd_dw_cxt.dw_page_idx = -1; - } - } else { - divide_dirty_page_to_thread(requested_flush_num, is_new_relfilenode); - } + /* Step 1: set up atomic state for dirty page appiled. */ + prepare_dirty_page_applied_state(requested_flush_num, is_new_relfilenode); + + /* Step 2: wake up all subthreads and main thread sleep. */ + wakeup_sub_thread(); ckpt_move_queue_head_after_flush(); smgrcloseall(); @@ -907,7 +875,6 @@ static uint32 get_page_num_for_lsn(XLogRecPtr target_lsn, uint32 max_num) } const float HIGH_WATER = 0.75; -const float HALF = 0.5; const int BYTE_PER_KB = 1024; static void calculate_max_flush_num() { @@ -1012,17 +979,17 @@ static uint32 calculate_pagewriter_flush_num() now = get_time_ms(); time_diff = now - prev_time; - /* - * We update our variables every AVG_CALCULATE_NUM times to smooth - * pagewriter flush page nums; + /* + * We update our variables every AVG_CALCULATE_NUM times to smooth + * pagewriter flush page nums; */ - if (++counter > AVG_CALCULATE_NUM || + if (++counter > AVG_CALCULATE_NUM || time_diff > AVG_CALCULATE_NUM * u_sess->attr.attr_storage.pageWriterSleep) { time_diff = MAX(1, time_diff); avg_flush_num = (uint32)((((double)total_flush_num) / time_diff * u_sess->attr.attr_storage.pageWriterSleep + avg_flush_num) / 2); - avg_lsn_rate = ((double)(cur_lsn - prev_lsn) / time_diff * u_sess->attr.attr_storage.pageWriterSleep + avg_lsn_rate = ((double)(cur_lsn - prev_lsn) / time_diff * u_sess->attr.attr_storage.pageWriterSleep + avg_lsn_rate) / 2; /* reset our variables */ @@ -1090,17 +1057,115 @@ DEFAULT: return flush_num; } -void dw_upgrade() +void dw_upgrade_batch() { + if (!dw_enabled()) { + return; + } + + ereport(LOG, (errmodule(MOD_DW), errmsg("dw batch upgrade start"))); + + uint64 dw_file_size; + knl_g_dw_context* dw_batch_cxt = &g_instance.dw_batch_cxt; + dw_batch_file_context* dw_file_cxt = &dw_batch_cxt->batch_file_cxts[0]; + + (void)LWLockConditionalAcquire(dw_batch_cxt->flush_lock, LW_EXCLUSIVE); + (void)LWLockConditionalAcquire(dw_file_cxt->flush_lock, LW_EXCLUSIVE); + + wait_all_dw_page_finish_flush(); + + PageWriterSync(); + + /* recovery the guc paramter and working state */ + g_instance.dw_batch_cxt.old_batch_version = false; + g_instance.attr.attr_storage.dw_file_num = g_instance.dw_batch_cxt.recovery_dw_file_num; + g_instance.attr.attr_storage.dw_file_size = g_instance.dw_batch_cxt.recovery_dw_file_size; + + MemoryContext oldcxt = MemoryContextSwitchTo(g_instance.dw_batch_cxt.mem_cxt); + + /* + * DW_BATCH_UPGRADE_META_FILE_NAME is used to judge whether gaussdb quit at this step, + * stop here means meta file may be incomplete, and it can't be used. + */ + int fd = open(DW_BATCH_UPGRADE_META_FILE_NAME, (DW_FILE_FLAG | O_CREAT), DW_FILE_PERM); + if (fd == -1) { + ereport(PANIC, + (errcode_for_file_access(), errmodule(MOD_DW), + errmsg("Could not create file \"%s\"", DW_BATCH_UPGRADE_META_FILE_NAME))); + } + + /* create new version meta file and batch files */ + dw_generate_meta_file(&dw_batch_cxt->batch_meta_file); + + if (close(fd) != 0 || unlink(DW_BATCH_UPGRADE_META_FILE_NAME) != 0) { + ereport(PANIC, + (errcode_for_file_access(), errmodule(MOD_DW), + errmsg("Could not close or remove the DW batch meta upgrade file"))); + } + + /* + * DW_BATCH_UPGRADE_BATCH_FILE_NAME is used to judge whether gaussdb quit at this step, + * stop here means batch files may be incomplete, and it can't be used. + */ + fd = open(DW_BATCH_UPGRADE_BATCH_FILE_NAME, (DW_FILE_FLAG | O_CREAT), DW_FILE_PERM); + if (fd == -1) { + ereport(PANIC, + (errcode_for_file_access(), errmodule(MOD_DW), + errmsg("Could not create file \"%s\"", DW_BATCH_UPGRADE_BATCH_FILE_NAME))); + } + + dw_file_size = DW_FILE_SIZE_UNIT * dw_batch_cxt->batch_meta_file.dw_file_size; + dw_generate_batch_files(dw_batch_cxt->batch_meta_file.dw_file_num, dw_file_size); + + + if (close(fd) != 0 || unlink(DW_BATCH_UPGRADE_BATCH_FILE_NAME) != 0) { + ereport(PANIC, + (errcode_for_file_access(), errmodule(MOD_DW), + errmsg("Could not close or remove the DW batch upgrade file"))); + } + + dw_cxt_init_batch(); + + MemoryContextSwitchTo(oldcxt); + + /* close and remove old version dw batch file */ + if (close(dw_file_cxt->fd) != 0 || unlink(OLD_DW_FILE_NAME) != 0) { + ereport(PANIC, + (errcode_for_file_access(), errmodule(MOD_DW), + errmsg("Could not close or remove the DW batch old version file"))); + } + + + LWLockRelease(dw_file_cxt->flush_lock); + + pfree(dw_file_cxt); + + (void)LWLockRelease(dw_batch_cxt->flush_lock); + + ereport(LOG, (errmodule(MOD_DW), errmsg("dw batch upgrade end"))); + + return; +} + +void dw_upgrade_single() +{ + if (!dw_enabled()) { + return; + } + + ereport(LOG, (errmodule(MOD_DW), errmsg("dw single upgrade start"))); + knl_g_dw_context* dw_single_cxt = &g_instance.dw_single_cxt; (void)LWLockAcquire(dw_single_cxt->second_flush_lock, LW_EXCLUSIVE); wait_all_single_dw_finish_flush_old(); - CheckPointSyncForDw(); + PageWriterSync(); + /* create dw batch flush file */ int fd = open(DW_UPGRADE_FILE_NAME, (DW_FILE_FLAG | O_CREAT), DW_FILE_PERM); if (fd == -1) { ereport(PANIC, - (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not create file \"%s\"", DW_FILE_NAME))); + (errcode_for_file_access(), errmodule(MOD_DW), + errmsg("Could not create file \"%s\"", DW_UPGRADE_FILE_NAME))); } /* close old version file */ if (close(dw_single_cxt->fd) != 0 || unlink(SINGLE_DW_FILE_NAME) != 0) { @@ -1139,30 +1204,25 @@ void dw_upgrade() } LWLockRelease(dw_single_cxt->second_flush_lock); + ereport(LOG, (errmodule(MOD_DW), errmsg("dw single upgrade end"))); + return; } -static void ckpt_pagewriter_main_thread_loop(void) +static void HandlePageWriterMainInterrupts() { - uint32 rc = 0; - uint64 now; - int64 sleep_time; - uint32 candidate_num = 0; - if (t_thrd.pagewriter_cxt.got_SIGHUP) { t_thrd.pagewriter_cxt.got_SIGHUP = false; ProcessConfigFile(PGC_SIGHUP); } - /* need generate new version dw file */ - if (pg_atomic_read_u32(&g_instance.dw_single_cxt.dw_version) < DW_SUPPORT_NEW_SINGLE_FLUSH && - t_thrd.proc->workingVersionNum >= DW_SUPPORT_NEW_SINGLE_FLUSH) { - dw_upgrade(); + if (t_thrd.pagewriter_cxt.sync_requested) { + t_thrd.pagewriter_cxt.sync_requested = false; + PageWriterSyncWithAbsorption(); } /* main thread should finally exit. */ while (t_thrd.pagewriter_cxt.shutdown_requested && g_instance.ckpt_cxt_ctl->page_writer_can_exit) { - int i; if (pg_atomic_read_u32(&g_instance.ckpt_cxt_ctl->current_page_writer_count) == 1) { ereport(LOG, (errmodule(MOD_INCRE_CKPT), @@ -1175,23 +1235,30 @@ static void ckpt_pagewriter_main_thread_loop(void) u_sess->attr.attr_common.ExitOnAnyError = true; /* Normal exit from the pagewriter is here */ proc_exit(0); /* done */ - } else { - for (i = 1; i < g_instance.ckpt_cxt_ctl->pgwr_procs.num; i++) { - if (g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[i].proc != NULL) { - SetLatch(&(g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[i].proc->procLatch)); - } - } - pg_usleep(MILLISECOND_TO_MICROSECOND); - continue; } } + return; +} +static void ckpt_pagewriter_main_thread_loop(void) +{ + uint32 rc = 0; + uint64 now; + int64 sleep_time; + uint32 candidate_num = 0; + + HandlePageWriterMainInterrupts(); + + candidate_num = get_curr_candidate_nums(false) + get_curr_candidate_nums(true); while (get_dirty_page_num() == 0 && candidate_num == (uint32)TOTAL_BUFFER_NUM && !t_thrd.pagewriter_cxt.shutdown_requested) { rc = WaitLatch(&t_thrd.proc->procLatch, WL_TIMEOUT | WL_POSTMASTER_DEATH, (long)TEN_MILLISECOND); if (rc & WL_POSTMASTER_DEATH) { gs_thread_exit(1); } + + HandlePageWriterMainInterrupts(); + candidate_num = get_curr_candidate_nums(false) + get_curr_candidate_nums(true); if (candidate_num == 0) { /* wakeup sub thread scan the buffer pool, init the candidate list */ @@ -1204,10 +1271,11 @@ static void ckpt_pagewriter_main_thread_loop(void) (uint32)g_instance.ckpt_cxt_ctl->pgwr_procs.num) { ckpt_try_skip_invalid_elem_in_queue_head(); ckpt_try_prune_dirty_page_queue(); - + PgwrAbsorbFsyncRequests(); /* Full checkpoint, don't sleep */ sleep_time = get_pagewriter_sleep_time(); while (sleep_time > 0 && !t_thrd.pagewriter_cxt.shutdown_requested && !FULL_CKPT) { + HandlePageWriterMainInterrupts(); /* sleep 1ms check whether a full checkpoint is triggered */ pg_usleep(MILLISECOND_TO_MICROSECOND); sleep_time -= 1; @@ -1228,6 +1296,7 @@ static void ckpt_pagewriter_main_thread_loop(void) static void wakeup_pagewriter_main_thread() { PageWriterProc *pgwr = &g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[0]; + /* The current candidate list is empty, wake up the buffer writer. */ if (pgwr->proc != NULL) { SetLatch(&pgwr->proc->procLatch); @@ -1235,18 +1304,39 @@ static void wakeup_pagewriter_main_thread() return; } +static bool apply_batch_flush_pages(PageWriterProc* pgwr) +{ + uint32 start_loc; + int need_flush_num; + bool is_new_relfilenode = pgwr->thrd_dw_cxt.is_new_relfilenode; + int dw_batch_page_max = GET_DW_DIRTY_PAGE_MAX(is_new_relfilenode); + uint32 total_flush_pages = g_instance.ckpt_cxt_ctl->CkptBufferIdsFlushPages; + start_loc = pg_atomic_fetch_add_u32(&g_instance.ckpt_cxt_ctl->CkptBufferIdsTail, dw_batch_page_max); + + if (start_loc >= total_flush_pages) { + return false; + } + + need_flush_num = dw_batch_page_max; + if (start_loc + need_flush_num > total_flush_pages) { + need_flush_num = total_flush_pages - start_loc; + } + + pgwr->start_loc = start_loc; + pgwr->need_flush_num = need_flush_num; + return true; +} + static void ckpt_pagewriter_sub_thread_loop() { uint32 rc; + uint64 now; + uint32 total_flush_pages; + uint32 old_running_num; + uint32 completed_pages; WritebackContext wb_context; int thread_id = t_thrd.pagewriter_cxt.pagewriter_id; WritebackContextInit(&wb_context, &t_thrd.pagewriter_cxt.page_writer_after); - uint64 now; - - if (t_thrd.pagewriter_cxt.got_SIGHUP) { - t_thrd.pagewriter_cxt.got_SIGHUP = false; - ProcessConfigFile(PGC_SIGHUP); - } if (t_thrd.pagewriter_cxt.shutdown_requested && g_instance.ckpt_cxt_ctl->page_writer_can_exit) { ereport(LOG, @@ -1274,10 +1364,6 @@ static void ckpt_pagewriter_sub_thread_loop() PageWriterProc* pgwr = &g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[thread_id]; if (pgwr->need_flush) { - uint32 old_running_num = 0; - ResourceOwnerEnlargeBuffers(t_thrd.utils_cxt.CurrentResourceOwner); - incre_ckpt_pgwr_flush_dirty_queue(wb_context); - /* scan buffer pool, get flush list and candidate list */ now = get_time_ms(); if (t_thrd.pagewriter_cxt.next_scan_time <= now) { @@ -1291,11 +1377,39 @@ static void ckpt_pagewriter_sub_thread_loop() t_thrd.pagewriter_cxt.next_scan_time = now; } + pg_read_barrier(); + total_flush_pages = g_instance.ckpt_cxt_ctl->CkptBufferIdsFlushPages; + + while (pg_atomic_read_u32(&g_instance.ckpt_cxt_ctl->prepared) == 1 + && pg_atomic_read_u32(&g_instance.ckpt_cxt_ctl->CkptBufferIdsCompletedPages) < total_flush_pages) { + /* apply one batch dirty pages */ + if(!apply_batch_flush_pages(pgwr)) { + break; + } + + /* flush one batch dirty pages */ + ResourceOwnerEnlargeBuffers(t_thrd.utils_cxt.CurrentResourceOwner); + incre_ckpt_pgwr_flush_dirty_queue(wb_context); + + /* add up completed pages */ + completed_pages = pg_atomic_add_fetch_u32( + &g_instance.ckpt_cxt_ctl->CkptBufferIdsCompletedPages, pgwr->need_flush_num); + + /* if flush finished, set prepared to 0 */ + if (completed_pages == total_flush_pages) { + pg_atomic_write_u32(&g_instance.ckpt_cxt_ctl->prepared, 0); + pg_write_barrier(); + } + + pg_read_barrier(); + } + pgwr->need_flush = false; old_running_num = pg_atomic_fetch_sub_u32(&g_instance.ckpt_cxt_ctl->pgwr_procs.running_num, 1); if (old_running_num == 1) { wakeup_pagewriter_main_thread(); } + smgrcloseall(); } @@ -1308,7 +1422,7 @@ static void ckpt_pagewriter_handle_exception(MemoryContext pagewriter_context) * Close all open files after any error. This is helpful on Windows, * where holding deleted files open causes various strange errors. * It's not clear we need it elsewhere, but shouldn't hurt. - */ + */ int id = t_thrd.pagewriter_cxt.pagewriter_id; /* Since not using PG_TRY, must reset error stack by hand */ @@ -1329,6 +1443,8 @@ static void ckpt_pagewriter_handle_exception(MemoryContext pagewriter_context) /* Report the error to the server log */ EmitErrorReport(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); /* * These operations are really just a minimal subset of * AbortTransaction(). We don't have very many resources to worry @@ -1372,6 +1488,10 @@ static void pagewriter_kill(int code, Datum arg) int id = t_thrd.pagewriter_cxt.pagewriter_id; Assert(id >= 0 && id < g_instance.ckpt_cxt_ctl->pgwr_procs.num); + if (id == 0) { + hash_destroy(u_sess->storage_cxt.pendingOps); + u_sess->storage_cxt.pendingOps = NULL; + } /* Making sure that we mark our exit status (as sub threads) so that main pagewriter thread would not be waiting for us in vain */ if (g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[id].need_flush) { g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[id].need_flush = false; @@ -1422,7 +1542,7 @@ static void SetupPageWriterSignalHook(void) * Reset some signals that are accepted by postmaster but not here */ (void)gspqsignal(SIGHUP, ckpt_pagewriter_sighup_handler); - (void)gspqsignal(SIGINT, SIG_IGN); + (void)gspqsignal(SIGINT, ckpt_pagewriter_sigint_handler); (void)gspqsignal(SIGTERM, ckpt_pagewriter_request_shutdown_handler); (void)gspqsignal(SIGQUIT, ckpt_pagewriter_quick_die); /* hard crash time */ (void)gspqsignal(SIGALRM, SIG_IGN); @@ -1440,6 +1560,21 @@ static void SetupPageWriterSignalHook(void) (void)gspqsignal(SIGWINCH, SIG_DFL); } +static void logSnapshotForLogicalDecoding() +{ + if (XLogLogicalInfoActive() && !RecoveryInProgress()) { + TimestampTz timeout = 0; + TimestampTz currentTime = GetCurrentTimestamp(); + timeout = TimestampTzPlusMilliseconds(g_last_snapshot_ts, LOG_SNAPSHOT_INTERVAL_MS); + + /* Log a new xl_running_xacts every 15 seconds for logical replication */ + if (currentTime >= timeout && !XLByteEQ(g_last_snapshot_lsn, GetXLogInsertRecPtr())) { + g_last_snapshot_lsn = LogStandbySnapshot(); + g_last_snapshot_ts = currentTime; + } + } +} + void ckpt_pagewriter_main(void) { sigjmp_buf localSigjmpBuf; @@ -1456,6 +1591,7 @@ void ckpt_pagewriter_main(void) ereport(LOG, (errmodule(MOD_INCRE_CKPT), errmsg("pagewriter started, thread id is %d", t_thrd.pagewriter_cxt.pagewriter_id))); + g_last_snapshot_ts = GetCurrentTimestamp(); /* * Create a resource owner to keep track of our resources (currently only @@ -1509,6 +1645,12 @@ void ckpt_pagewriter_main(void) pgstat_report_appname("PageWriter"); pgstat_report_activity(STATE_IDLE, NULL); + if (t_thrd.pagewriter_cxt.pagewriter_id == 0) { + g_instance.proc_base->pgwrMainThreadLatch = &t_thrd.proc->procLatch; + g_instance.ckpt_cxt_ctl->incre_ckpt_sync_shmem->pagewritermain_pid = t_thrd.proc_cxt.MyProcPid; + InitSync(); + } + pg_time_t now = (pg_time_t) time(NULL); t_thrd.pagewriter_cxt.next_flush_time = now + u_sess->attr.attr_storage.pageWriterSleep; t_thrd.pagewriter_cxt.next_scan_time = now + @@ -1522,7 +1664,31 @@ void ckpt_pagewriter_main(void) * main pagewriter thread need choose a batch page flush to double write file, * than divide to other sub thread. */ + if (t_thrd.pagewriter_cxt.pagewriter_id == 0) { + if (!t_thrd.pagewriter_cxt.shutdown_requested) { + logSnapshotForLogicalDecoding(); + } + /* need generate new version single flush dw file */ + if (pg_atomic_read_u32(&g_instance.dw_single_cxt.dw_version) < DW_SUPPORT_NEW_SINGLE_FLUSH && + t_thrd.proc->workingVersionNum >= DW_SUPPORT_NEW_SINGLE_FLUSH) { + dw_upgrade_single(); + } + + if (pg_atomic_read_u32(&g_instance.dw_batch_cxt.dw_version) < DW_SUPPORT_MULTIFILE_FLUSH && + t_thrd.proc->workingVersionNum >= DW_SUPPORT_MULTIFILE_FLUSH) { + dw_upgrade_batch(); + } + + /* + * when double write is disabled, pg_dw_meta will be created with dw_file_num = 0, so + * here is for upgrading process. pagewrite will run when enable_incremetal_checkpoint = on. + */ + if (pg_atomic_read_u32(&g_instance.dw_batch_cxt.dw_version) < DW_SUPPORT_REABLE_DOUBLE_WRITE + && t_thrd.proc->workingVersionNum >= DW_SUPPORT_REABLE_DOUBLE_WRITE) { + dw_upgrade_renable_double_write(); + } + ckpt_pagewriter_main_thread_loop(); } else { ckpt_pagewriter_sub_thread_loop(); @@ -1766,7 +1932,7 @@ static void incre_ckpt_pgwr_flush_dirty_queue(WritebackContext wb_context) { int thread_id = t_thrd.pagewriter_cxt.pagewriter_id; PageWriterProc* pgwr = &g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[thread_id]; - bool is_new_relfilenode = pgwr->thrd_dw_cxt.is_new_relfilenode; + bool is_new_relfilenode = g_instance.dw_batch_cxt.is_new_relfilenode; uint32 start_loc = pgwr->start_loc; int need_flush_num = pgwr->need_flush_num; int dw_batch_page_max = GET_DW_DIRTY_PAGE_MAX(is_new_relfilenode); @@ -1789,7 +1955,7 @@ static void incre_ckpt_pgwr_flush_dirty_queue(WritebackContext wb_context) pgwr->thrd_dw_cxt.is_new_relfilenode = is_new_relfilenode; pgwr->thrd_dw_cxt.dw_page_idx = -1; - dw_perform_batch_flush(batch_num, dirty_buf_list + offset, &pgwr->thrd_dw_cxt); + dw_perform_batch_flush(batch_num, dirty_buf_list + offset, thread_id, &pgwr->thrd_dw_cxt); flush_num = incre_ckpt_pgwr_flush_dirty_page(wb_context, dirty_buf_list, offset, batch_num); pgwr->thrd_dw_cxt.dw_page_idx = -1; num_actual_flush += flush_num; @@ -1825,7 +1991,7 @@ static void incre_ckpt_pgwr_flush_dirty_list(WritebackContext wb_context, uint32 pgwr->thrd_dw_cxt.is_new_relfilenode = is_new_relfilenode; pgwr->thrd_dw_cxt.dw_page_idx = -1; - dw_perform_batch_flush(batch_num, dirty_buf_list + offset, &pgwr->thrd_dw_cxt); + dw_perform_batch_flush(batch_num, dirty_buf_list + offset, thread_id, &pgwr->thrd_dw_cxt); flush_num = incre_ckpt_pgwr_flush_dirty_page(wb_context, dirty_buf_list, offset, batch_num); pgwr->thrd_dw_cxt.dw_page_idx = -1; num_actual_flush += flush_num; @@ -1833,7 +1999,7 @@ static void incre_ckpt_pgwr_flush_dirty_list(WritebackContext wb_context, uint32 (void)pg_atomic_fetch_add_u64(&g_instance.ckpt_cxt_ctl->page_writer_actual_flush, num_actual_flush); (void)pg_atomic_fetch_add_u32(&g_instance.ckpt_cxt_ctl->page_writer_last_flush, num_actual_flush); - for (int i = 0; i < num_actual_flush; i++) { + for (uint32 i = 0; i < need_flush_num; i++) { buf_id = dirty_buf_list[i].buf_id; if (buf_id == DW_INVALID_BUFFER_ID) { continue; @@ -1970,7 +2136,7 @@ static void incre_ckpt_pgwr_scan_buf_pool(WritebackContext wb_context) batch_scan_num = MIN(pgwr->cand_list_size, MAX_SCAN_BATCH_NUM); end = MIN(start + batch_scan_num, end); max_flush_num = get_list_flush_num(false); - + need_flush_num = get_candidate_buf_and_flush_list(start, end, max_flush_num, &is_new_relfilenode); if (end >= pgwr->buf_id_start + pgwr->cand_list_size) { pgwr->next_scan_normal_loc = pgwr->buf_id_start; @@ -2004,7 +2170,7 @@ static void incre_ckpt_pgwr_scan_buf_pool(WritebackContext wb_context) } /** - * @Description: Scan n buffers in the BufferPool from start to end, put the unreferenced and not dirty + * @Description: Scan n buffers in the BufferPool from start to end, put the unreferenced and not dirty * page into the candidate list, the unreferenced and dirty page into the dirty list. * @in: start, can the buffer pool start loc * @in: end, scan the buffer pool end loc @@ -2254,7 +2420,6 @@ static int64 get_thread_seg_candidate_nums(int thread_id) return curr_cand_num; } - /** * @Description: Return a rough estimate of the current number of buffers in the candidate list. */ @@ -2294,6 +2459,19 @@ static void ckpt_pagewriter_sighup_handler(SIGNAL_ARGS) errno = save_errno; } +/* SIGINT: set flag to run a normal checkpoint right away */ +static void ckpt_pagewriter_sigint_handler(SIGNAL_ARGS) +{ + int save_errno = errno; + + t_thrd.pagewriter_cxt.sync_requested = true; + + if (t_thrd.proc) + SetLatch(&t_thrd.proc->procLatch); + + errno = save_errno; +} + static void ckpt_pagewriter_quick_die(SIGNAL_ARGS) { gs_signal_setmask(&t_thrd.libpq_cxt.BlockSig, NULL); @@ -2351,3 +2529,38 @@ void ckpt_shutdown_pagewriter() pg_usleep(MILLISECOND_TO_MICROSECOND); } } + + +/* The following functions are used by the pagewriter thread to process file sync requests. */ + +Size PageWriterShmemSize(void) +{ + Size size; + + /* Currently, the size of the requests[] array is arbitrarily set equal TOTAL_BUFFER_NUM */ + size = offsetof(IncreCkptSyncShmemStruct, requests); + size = add_size(size, mul_size(TOTAL_BUFFER_NUM, sizeof(CheckpointerRequest))); + + return size; +} + +/* + * PageWriterSyncShmemInit + * Allocate and initialize shared memory of pagewriter handle sync request. + */ +void PageWriterSyncShmemInit(void) +{ + Size size = PageWriterShmemSize(); + bool found = false; + + g_instance.ckpt_cxt_ctl->incre_ckpt_sync_shmem = + (IncreCkptSyncShmemStruct*)ShmemInitStruct("Incre Ckpt Sync Data", size, &found); + + if (!found) { + /* The memory of the memset sometimes exceeds 2 GB. so, memset_s cannot be used. */ + MemSet((char*)g_instance.ckpt_cxt_ctl->incre_ckpt_sync_shmem, 0, size); + SpinLockInit(&g_instance.ckpt_cxt_ctl->incre_ckpt_sync_shmem->sync_lock); + g_instance.ckpt_cxt_ctl->incre_ckpt_sync_shmem->max_requests = TOTAL_BUFFER_NUM; + g_instance.ckpt_cxt_ctl->incre_ckpt_sync_shmem->sync_queue_lwlock = LWLockAssign(LWTRANCHE_PGWR_SYNC_QUEUE); + } +} diff --git a/src/gausskernel/process/postmaster/pgarch.cpp b/src/gausskernel/process/postmaster/pgarch.cpp index 1e436511b..285c81e19 100755 --- a/src/gausskernel/process/postmaster/pgarch.cpp +++ b/src/gausskernel/process/postmaster/pgarch.cpp @@ -107,19 +107,16 @@ static void pgarch_ArchiverCopyLoop(void); static bool pgarch_archiveXlog(char* xlog); static bool pgarch_readyXlog(char* xlog, int xlog_length); static void pgarch_archiveDone(const char* xlog); +static void archKill(int code, Datum arg); +#ifndef ENABLE_LITE_MODE static void pgarch_archiveRoachForPitrStandby(); static bool pgarch_archiveRoachForPitrMaster(XLogRecPtr targetLsn); static bool pgarch_archiveRoachForCoordinator(XLogRecPtr targetLsn); static WalSnd* pgarch_chooseWalsnd(XLogRecPtr targetLsn); typedef bool(*doArchive)(XLogRecPtr); static void pgarch_ArchiverObsCopyLoop(XLogRecPtr flushPtr, doArchive fun); -static void archKill(int code, Datum arg); -static void InitArchiverLastTaskLsn(); -static void InitPitrTaskLastLsn(ArchiveSlotConfig* obs_archive_slot); -static long get_current_barrier_time(); -static void write_start_end_arch_timeto_obs(long cur_time, bool is_start); -static long get_curr_last_xlog_time(XLogRecPtr lastRemovedSegno); -static XLogRecPtr get_oldest_xlog_seg(); +static void InitArchiverLastTaskLsn(ArchiveSlotConfig* obs_archive_slot); +#endif AlarmCheckResult DataInstArchChecker(Alarm* alarm, AlarmAdditionalParam* additionalParam) { @@ -243,7 +240,9 @@ NON_EXEC_STATIC void PgArchiverMain(knl_thread_arg* arg) init_ps_display("archiver process", "", "", ""); setObsArchLatch(&t_thrd.arch.mainloop_latch); - InitArchiverLastTaskLsn(); +#ifndef ENABLE_LITE_MODE + InitArchiverLastTaskLsn(NULL); +#endif pgarch_MainLoop(); gs_thread_exit(0); @@ -336,7 +335,10 @@ static void pgarch_MainLoop(void) struct timeval last_copy_time; gettimeofday(&last_copy_time, NULL); bool time_to_stop = false; +#ifndef ENABLE_LITE_MODE doArchive fun = NULL; + const int millitosec = 1000; +#endif /* * We run the copy loop immediately upon entry, in case there are @@ -388,22 +390,31 @@ static void pgarch_MainLoop(void) else if ((unsigned int)(icurtime - t_thrd.arch.last_sigterm_time) >= (unsigned int)60) break; } +#ifndef ENABLE_LITE_MODE load_server_mode(); if (IsServerModeStandby()) { ArchiveTaskStatus *archive_task_status = NULL; archive_task_status = find_archive_task_status(&t_thrd.arch.archive_task_idx); + if (archive_task_status == NULL) { + ereport(ERROR, + (errmsg("pgarch_Archive main loop failed because could not get an archive task status."))); + } /* if we should do pitr archive, for standby */ volatile unsigned int *pitr_task_status = &archive_task_status->pitr_task_status; if (unlikely(pg_atomic_read_u32(pitr_task_status) == PITR_TASK_GET)) { pgarch_archiveRoachForPitrStandby(); pg_atomic_write_u32(pitr_task_status, PITR_TASK_DONE); + update_archive_start_end_location_file(archive_task_status->archive_task.targetLsn, + TIME_GET_MILLISEC(last_copy_time)); } } +#endif /* Do what we're here for */ if (t_thrd.arch.wakened || time_to_stop) { t_thrd.arch.wakened = false; +#ifndef ENABLE_LITE_MODE obs_archive_slot = getArchiveReplicationSlot(); if (obs_archive_slot != NULL && !IsServerModeStandby()) { gettimeofday(&curtime, NULL); @@ -424,16 +435,20 @@ static void pgarch_MainLoop(void) ereport(ERROR, (errmsg("Invalid pitr task last lsn on DCF mode."))); } else { + if (t_thrd.arch.pitr_task_last_lsn == InvalidXLogRecPtr) { + InitArchiverLastTaskLsn(obs_archive_slot); + ereport(LOG, (errmsg("update arch thread last lsn because current node is not standby, " + "and init last lsn is %08X%08X", (uint32)(t_thrd.arch.pitr_task_last_lsn >> 32), + (uint32)t_thrd.arch.pitr_task_last_lsn))); + } got_recptr = SyncRepGetSyncRecPtr(&receivePtr, &writePtr, &flushPtr, &replayPtr, &amSync, false); if (got_recptr != true) { ereport(ERROR, (errmsg("pgarch_ArchiverObsCopyLoop failed when call SyncRepGetSyncRecPtr"))); } } - if (t_thrd.arch.pitr_task_last_lsn == InvalidXLogRecPtr) { - InitPitrTaskLastLsn(obs_archive_slot); - } - if (time_diff >= t_thrd.arch.task_wait_interval + + if (time_diff >= (u_sess->attr.attr_storage.archive_interval * millitosec) || XLByteDifference(flushPtr, t_thrd.arch.pitr_task_last_lsn) >= OBS_XLOG_SLICE_BLOCK_SIZE) { if (IS_PGXC_COORDINATOR) { fun = &pgarch_archiveRoachForCoordinator; @@ -445,12 +460,14 @@ static void pgarch_MainLoop(void) #endif } pgarch_ArchiverObsCopyLoop(flushPtr, fun); - AdvanceArchiveSlot(flushPtr); } } else { +#endif pgarch_ArchiverCopyLoop(); gettimeofday(&last_copy_time, NULL); +#ifndef ENABLE_LITE_MODE } +#endif } /* @@ -561,7 +578,7 @@ static void pgarch_ArchiverCopyLoop(void) } else { if (++failures >= NUM_ARCHIVE_RETRIES) { ereport(WARNING, - (errmsg("transaction log file \"%s\" could not be archived: too many failures", xlog))); + (errmsg("xlog file \"%s\" could not be archived: too many failures", xlog))); return; /* give up archiving for now */ } pg_usleep(1000000L); /* wait a bit before retrying */ @@ -658,6 +675,7 @@ static inline void UpdateArchivedLsn(XLogRecPtr targetLsn) archive_task_status->archived_lsn = targetLsn; } +#ifndef ENABLE_LITE_MODE /* * pgarch_ArchiverObsCopyLoop * @@ -668,6 +686,8 @@ static void pgarch_ArchiverObsCopyLoop(XLogRecPtr flushPtr, doArchive fun) ereport(LOG, (errmsg("pgarch_ArchiverObsCopyLoop"))); struct timeval tv; + long currTimestamp = 0; + const int millitosec = 1000; bool time_to_stop = false; /* @@ -715,13 +735,18 @@ static void pgarch_ArchiverObsCopyLoop(XLogRecPtr flushPtr, doArchive fun) if (fun(targetLsn) == false) { ereport(WARNING, - (errmsg("transaction log file \"%X/%X\" could not be archived: try again", + (errmsg("xlog file \"%X/%X\" could not be archived: try again", (uint32)(targetLsn >> 32), (uint32)(targetLsn)))); pg_usleep(1000000L); /* wait a bit before retrying */ } else { gettimeofday(&tv, NULL); - t_thrd.arch.last_arch_time = TIME_GET_MILLISEC(tv); + currTimestamp = TIME_GET_MILLISEC(tv); t_thrd.arch.pitr_task_last_lsn = targetLsn; + if (currTimestamp - t_thrd.arch.last_arch_time > + (u_sess->attr.attr_storage.archive_interval * millitosec)) { + AdvanceArchiveSlot(targetLsn); + } + t_thrd.arch.last_arch_time = currTimestamp; UpdateArchivedLsn(targetLsn); ResetLatch(&t_thrd.arch.mainloop_latch); ereport(LOG, @@ -729,6 +754,7 @@ static void pgarch_ArchiverObsCopyLoop(XLogRecPtr flushPtr, doArchive fun) } } while (XLByteLT(t_thrd.arch.pitr_task_last_lsn, flushPtr)); } +#endif /* * pgarch_archiveXlog @@ -962,11 +988,25 @@ static void pgarch_archiveDone(const char* xlog) char rlogready[MAXPGPATH]; char rlogdone[MAXPGPATH]; - StatusFilePath(rlogready, xlog, ".ready"); - StatusFilePath(rlogdone, xlog, ".done"); + StatusFilePath(rlogready, MAXPGPATH, xlog, ".ready"); + StatusFilePath(rlogdone, MAXPGPATH, xlog, ".done"); (void)durable_rename(rlogready, rlogdone, WARNING); } +static void archKill(int code, Datum arg) +{ + setObsArchLatch(NULL); + ereport(LOG, (errmsg("arch thread shut down, slotName: %s", t_thrd.arch.slot_name))); + pfree_ext(t_thrd.arch.slot_name); + if (t_thrd.arch.archive_config != NULL && t_thrd.arch.archive_config->archive_config.conn_config != NULL) { + pfree_ext(t_thrd.arch.archive_config->archive_config.conn_config->obs_address); + pfree_ext(t_thrd.arch.archive_config->archive_config.conn_config->obs_bucket); + pfree_ext(t_thrd.arch.archive_config->archive_config.conn_config->obs_ak); + pfree_ext(t_thrd.arch.archive_config->archive_config.conn_config->obs_sk); + } +} + +#ifndef ENABLE_LITE_MODE /* * pgarch_archiveRoachForPitrStandby * get signal from walreceiver, fork a roach process to archive xlog @@ -974,9 +1014,11 @@ static void pgarch_archiveDone(const char* xlog) static void pgarch_archiveRoachForPitrStandby() { ArchiveTaskStatus *archive_task_status = NULL; - struct timeval tv; - gettimeofday(&tv, NULL); archive_task_status = find_archive_task_status(&t_thrd.arch.archive_task_idx); + if (archive_task_status == NULL) { + ereport(ERROR, + (errmsg("pgarch_archiveRoachForPitrStandby failed because could not get an archive task status."))); + } ereport(LOG, (errmsg("pgarch_archiveRoachForPitrStandby %s : %X/%X, term:%d, subterm:%d", archive_task_status->slotname, @@ -986,7 +1028,6 @@ static void pgarch_archiveRoachForPitrStandby() archive_task_status->archive_task.sub_term))); if (ArchiveReplicationAchiver(&archive_task_status->archive_task) == 0) { archive_task_status->pitr_finish_result = true; - update_archive_start_end_location_file(archive_task_status->archive_task.targetLsn, TIME_GET_MILLISEC(tv)); } else { ereport(WARNING, (errmsg("error when pgarch_archiveRoachForPitrStandby %s : %X/%X, term:%d, subterm:%d", @@ -1019,7 +1060,6 @@ static bool pgarch_archiveRoachForPitrMaster(XLogRecPtr targetLsn) /* subterm update when walsender changed */ int rc = strcpy_s(archive_task_status->archive_task.slot_name, NAMEDATALEN, t_thrd.arch.slot_name); securec_check(rc, "\0", "\0"); - ResetLatch(&t_thrd.arch.mainloop_latch); ereport(LOG, (errmsg("%s : pgarch_archiveRoachForPitrMaster %X/%X", t_thrd.arch.slot_name, (uint32)(targetLsn >> 32), @@ -1034,9 +1074,9 @@ static bool pgarch_archiveRoachForPitrMaster(XLogRecPtr targetLsn) archive_task_status->archiver_latch = &t_thrd.arch.mainloop_latch; add_archive_task_to_list(t_thrd.arch.archive_task_idx, walsnd); SetLatch(&walsnd->latch); + ResetLatch(&t_thrd.arch.mainloop_latch); rc = WaitLatch(&t_thrd.arch.mainloop_latch, - WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - (long)t_thrd.arch.task_wait_interval); + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 3000L); if (rc & WL_POSTMASTER_DEATH) { gs_thread_exit(1); @@ -1062,9 +1102,14 @@ static bool pgarch_archiveRoachForPitrMaster(XLogRecPtr targetLsn) */ static bool pgarch_archiveRoachForCoordinator(XLogRecPtr targetLsn) { - + struct timeval tv; + gettimeofday(&tv, NULL); ArchiveTaskStatus *archive_task_status = NULL; archive_task_status = find_archive_task_status(&t_thrd.arch.archive_task_idx); + if (archive_task_status == NULL) { + ereport(ERROR, + (errmsg("pgarch_archiveRoachForCoordinator failed because could not get an archive task status."))); + } ArchiveXlogMessage archive_xlog_info; archive_xlog_info.targetLsn = targetLsn; archive_xlog_info.term = 0; @@ -1089,6 +1134,8 @@ static bool pgarch_archiveRoachForCoordinator(XLogRecPtr targetLsn) } else { archive_task_status->pitr_finish_result = true; archive_task_status->archive_task.targetLsn = targetLsn; + update_archive_start_end_location_file(archive_task_status->archive_task.targetLsn, + TIME_GET_MILLISEC(tv)); } ereport(LOG, (errmsg("pgarch_archiveRoachForCoordinator %X/%X", (uint32)(targetLsn >> 32), (uint32)(targetLsn)))); @@ -1105,21 +1152,22 @@ static WalSnd* pgarch_chooseWalsnd(XLogRecPtr targetLsn) walsnd = &t_thrd.walsender_cxt.WalSndCtl->walsnds[t_thrd.arch.sync_walsender_idx]; SpinLockAcquire(&walsnd->mutex); if (walsnd->pid != 0 && ((walsnd->sendRole & SNDROLE_PRIMARY_STANDBY) == walsnd->sendRole) - && !XLogRecPtrIsInvalid(walsnd->flush) && XLByteLE(targetLsn, walsnd->flush)) { + && !XLogRecPtrIsInvalid(walsnd->flush) && XLByteLE(targetLsn, walsnd->flush) && + walsnd->is_cross_cluster == false) { SpinLockRelease(&walsnd->mutex); return (WalSnd*)walsnd; } SpinLockRelease(&walsnd->mutex); } - for (i = 0; i < g_instance.attr.attr_storage.max_wal_senders; i++) { /* use volatile pointer to prevent code rearrangement */ walsnd = &t_thrd.walsender_cxt.WalSndCtl->walsnds[i]; SpinLockAcquire(&walsnd->mutex); - if (walsnd->pid != 0 && ((walsnd->sendRole & SNDROLE_PRIMARY_STANDBY) == walsnd->sendRole)) { + if (walsnd->pid != 0 && ((walsnd->sendRole & SNDROLE_PRIMARY_STANDBY) == walsnd->sendRole) && + walsnd->is_cross_cluster == false) { if (XLByteLE(targetLsn, walsnd->flush)) { SpinLockRelease(&walsnd->mutex); ArchiveTaskStatus *archive_status = NULL; @@ -1130,6 +1178,7 @@ static WalSnd* pgarch_chooseWalsnd(XLogRecPtr targetLsn) t_thrd.arch.sync_walsender_idx, i))); } t_thrd.arch.sync_walsender_idx = i; + g_instance.archive_obs_cxt.chosen_walsender_index = i; archive_status->sync_walsender_term++; ereport(LOG, (errmsg("pgarch_chooseWalsnd has change from %d to %d , sub_term:%d", @@ -1142,229 +1191,38 @@ static WalSnd* pgarch_chooseWalsnd(XLogRecPtr targetLsn) return NULL; } - -static void archKill(int code, Datum arg) -{ - setObsArchLatch(NULL); - ereport(LOG, (errmsg("arch thread shut down, slotName: %s", t_thrd.arch.slot_name))); - pfree_ext(t_thrd.arch.slot_name); -} - -static void InitArchiverLastTaskLsn() +static void InitArchiverLastTaskLsn(ArchiveSlotConfig* obs_archive_slot) { struct timeval tv; load_server_mode(); gettimeofday(&tv,NULL); - t_thrd.arch.last_arch_time = TIME_GET_MILLISEC(tv); - ArchiveSlotConfig* obs_archive_slot = getArchiveReplicationSlot(); - if (obs_archive_slot != NULL && !IsServerModeStandby()) { - InitPitrTaskLastLsn(obs_archive_slot); - } -} -static void InitPitrTaskLastLsn(ArchiveSlotConfig* obs_archive_slot) -{ - ArchiveXlogMessage obs_archive_info; - struct timeval tv; - - gettimeofday(&tv,NULL); - if (archive_replication_get_last_xlog(&obs_archive_info, &obs_archive_slot->archive_config) == 0) { - t_thrd.arch.pitr_task_last_lsn = obs_archive_info.targetLsn; - ereport(LOG, - (errmsg("initLastTaskLsn update lsn to %X/%X from obs", (uint32)(t_thrd.arch.pitr_task_last_lsn >> 32), - (uint32)(t_thrd.arch.pitr_task_last_lsn)))); - XLogRecPtr LastRemovedSegno = XLogGetLastRemovedSegno(); - if (LastRemovedSegno == 0) { - LastRemovedSegno = get_oldest_xlog_seg() - 1; - } - XLogRecPtr OldestKeepRecvPtr = (LastRemovedSegno + 1) * XLOG_SEG_SIZE; - if (XLByteLT(t_thrd.arch.pitr_task_last_lsn, OldestKeepRecvPtr)) { - ereport(LOG, - (errmsg("initLastTaskLsn for xlog deleted, %X/%X from obs, %X/%X from local, local will overwrite obs", - (uint32)(t_thrd.arch.pitr_task_last_lsn >> 32), (uint32)(t_thrd.arch.pitr_task_last_lsn), - (uint32)(t_thrd.arch.pitr_task_last_lsn >> 32), (uint32)(t_thrd.arch.pitr_task_last_lsn)))); - long barrier_time = get_current_barrier_time(); - if (barrier_time == 0) { - write_start_end_arch_timeto_obs(TIME_GET_MILLISEC(tv), false); - } else { - write_start_end_arch_timeto_obs(barrier_time, false); - } - write_start_end_arch_timeto_obs(get_curr_last_xlog_time(LastRemovedSegno), true); - t_thrd.arch.pitr_task_last_lsn = OldestKeepRecvPtr - (OldestKeepRecvPtr % XLogSegSize); - } - AdvanceArchiveSlot(t_thrd.arch.pitr_task_last_lsn); - } else { - XLogRecPtr targetLsn = GetFlushRecPtr(); - t_thrd.arch.pitr_task_last_lsn = targetLsn - (targetLsn % XLogSegSize); - AdvanceArchiveSlot(t_thrd.arch.pitr_task_last_lsn); - ereport(LOG, - (errmsg("initLastTaskLsn update lsn to %X/%X from local", (uint32)(t_thrd.arch.pitr_task_last_lsn >> 32), - (uint32)(t_thrd.arch.pitr_task_last_lsn)))); - write_start_end_arch_timeto_obs(TIME_GET_MILLISEC(tv), true); - } -} - -static void write_start_end_arch_timeto_obs(long cur_time, bool is_start) -{ - if (IS_PGXC_COORDINATOR) { - return ; - } - errno_t rc = 0; - ArchiveConfig obsConfig; - const int messageLen = 21; - ArchiveSlotConfig* obs_archive_slot = getArchiveReplicationSlot(); if (obs_archive_slot == NULL) { - return; + t_thrd.arch.last_arch_time = TIME_GET_MILLISEC(tv); + obs_archive_slot = getArchiveReplicationSlot(); } - char pathPrefix[MAXPGPATH] = {0}; - char obsfile_name[MAXPGPATH] = {0}; - ereport(LOG, (errmsg("write_start_end_arch_timeto_obs start, curtime %ld, is_start %s", - cur_time, is_start ? "true" : "false"))); - - /* copy OBS configs to temporary variable for customising file path */ - rc = memcpy_s(&obsConfig, sizeof(ArchiveConfig), &obs_archive_slot->archive_config, sizeof(ArchiveConfig)); - securec_check(rc, "", ""); - - if (!IS_PGXC_COORDINATOR) { - rc = strcpy_s(pathPrefix, MAXPGPATH, obsConfig.archive_prefix); - securec_check(rc, "\0", "\0"); - - char *p = strrchr(pathPrefix, '/'); - if (p == NULL) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Obs path prefix is invalid"))); + volatile int *slot_idx = &t_thrd.arch.slot_idx; + if (obs_archive_slot != NULL && !IsServerModeStandby() && !RecoveryInProgress()) { + if (likely(*slot_idx != -1) && *slot_idx < g_instance.attr.attr_storage.max_replication_slots) { + ReplicationSlot *slot = &t_thrd.slot_cxt.ReplicationSlotCtl->replication_slots[*slot_idx]; + SpinLockAcquire(&slot->mutex); + if (slot->in_use == true && slot->archive_config != NULL) { + t_thrd.arch.pitr_task_last_lsn = slot->data.restart_lsn; + SpinLockRelease(&slot->mutex); + } else { + SpinLockRelease(&slot->mutex); + ereport(ERROR, (errcode_for_file_access(), errmsg("slot idx not valid, obs slot %X/%X not advance ", + (uint32)(t_thrd.arch.pitr_task_last_lsn >> 32), (uint32)(t_thrd.arch.pitr_task_last_lsn)))); + } + ereport(LOG, (errmsg("successful init archive slot %X/%X ", + (uint32)(t_thrd.arch.pitr_task_last_lsn >> 32), (uint32)(t_thrd.arch.pitr_task_last_lsn)))); + } else { + ereport(WARNING, (errmsg("could not init archive slot cause slot index is invalid: %d", *slot_idx))); } - *p = '\0'; - obsConfig.archive_prefix = pathPrefix; - } - rc = snprintf_s(obsfile_name, MAXPGPATH, MAXPGPATH - 1, "%s/%s_%013ld", - ARCH_TIME_FOLDER, is_start ? "s" : "e", cur_time); - securec_check_ss_c(rc, "\0", "\0"); - ArchiveWrite(obsfile_name, "not important content", messageLen, &obsConfig); -} - - -static long get_current_barrier_time() -{ - const int BARRIER_NAME_LEN = 40; - char barrier_name[BARRIER_NAME_LEN]; - int ret; - uint64_t barrier_id; - long ts = 0; - - if (ArchiveReplicationReadFile(BARRIER_FILE, (char *)barrier_name, MAX_BARRIER_ID_LENGTH, t_thrd.arch.slot_name)) { - barrier_name[BARRIER_NAME_LEN - 1] = '\0'; - ereport(LOG, (errmsg("[initLastTaskLsn] read barrier id from obs %s", barrier_name))); } else { - ereport(LOG, (errmsg("[initLastTaskLsn] failed to read barrier id from obs, start barrier from 0"))); - return 0; + ereport(WARNING, (errmsg("could not init archive slot cause current server mode is %d", + t_thrd.xlog_cxt.server_mode))); } - - ret = sscanf_s(barrier_name, "hadr_%020" PRIu64 "_%013ld", &barrier_id, &ts); - if (ret == 2) { - return 0; - } - return ts; -} - -static long get_oldest_xlog_time() -{ - DIR *xldir = NULL; - struct dirent *xlde = NULL; - long oldest_time = 0; - xldir = AllocateDir(XLOGDIR); - if (xldir == NULL) { - ereport(ERROR, - (errcode_for_file_access(), errmsg("could not open transaction log directory \"%s\": %m", XLOGDIR))); - } - struct stat buf; - - while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL) { - /* Ignore files that are not XLOG segments */ - if (strlen(xlde->d_name) != 24 || strspn(xlde->d_name, "0123456789ABCDEF") != 24) { - continue; - } - if (stat(xlde->d_name, &buf) != 0) { - continue; - } - if (oldest_time == 0 || oldest_time > (long)buf.st_mtime) { - oldest_time = (long)buf.st_mtime; - } - } - FreeDir(xldir); - return oldest_time; -} - -static long get_curr_last_xlog_time(XLogRecPtr lastRemovedSegno) -{ - /* can not deal with build scene and pre create scene*/ - int retry_time = 100; - struct timeval tv; -retry: - if (lastRemovedSegno == 0) { - return get_oldest_xlog_time(); - } - XLogRecPtr oldestXlogSegno = lastRemovedSegno + 1; - - char xlogname[MAXFNAMELEN] = {0}; - errno_t errorno = EOK; - errorno = snprintf_s(xlogname, MAXFNAMELEN, MAXFNAMELEN - 1, "%s/%08X%08X%08X", XLOGDIR, - t_thrd.xlog_cxt.ThisTimeLineID, (uint32)((oldestXlogSegno) / XLogSegmentsPerXLogId), - (uint32)((oldestXlogSegno) % XLogSegmentsPerXLogId)); - securec_check_ss(errorno, "", ""); - - struct stat buf; - if (stat(xlogname, &buf) != 0) { - if (retry_time > 0) { - gettimeofday(&tv, NULL); - return TIME_GET_MILLISEC(tv); - } - pg_usleep_retry(1000 * 1000, 0); - retry_time--; - ereport(LOG, (errmsg("[get_curr_last_xlog_time] xlog name not exist %s, try again", xlogname))); - goto retry; - } - return (long)buf.st_mtime; -} - -static XLogRecPtr get_oldest_xlog_seg() -{ - int ret; - TimeLineID timeline; - int segno; - int segoff; - DIR *xldir = NULL; - struct dirent *xlde = NULL; - XLogRecPtr oldest_seg = 0; - xldir = AllocateDir(XLOGDIR); - if (xldir == NULL) { - ereport(ERROR, - (errcode_for_file_access(), errmsg("could not open transaction log directory \"%s\": %m", XLOGDIR))); - } - struct stat buf; - char xlogname[MAXFNAMELEN] = {0}; - errno_t errorno = EOK; - while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL) { - /* Ignore files that are not XLOG segments */ - if (strlen(xlde->d_name) != 24 || strspn(xlde->d_name, "0123456789ABCDEF") != 24) { - continue; - } - errorno = snprintf_s(xlogname, MAXFNAMELEN, MAXFNAMELEN - 1, "%s/%s", XLOGDIR, xlde->d_name); - securec_check_ss(errorno, "", ""); - if (stat(xlogname, &buf) != 0) { - continue; - } - ret = sscanf_s(xlde->d_name, "%8X%8X%8X", &timeline, &segno, &segoff); - if (ret != 3) { - continue; - } - XLogRecPtr seg = segno * XLogSegmentsPerXLogId + segoff; - if (oldest_seg == 0 || oldest_seg > seg) { - oldest_seg = seg; - } - } - FreeDir(xldir); - return oldest_seg; - } +#endif diff --git a/src/gausskernel/process/postmaster/pgaudit.cpp b/src/gausskernel/process/postmaster/pgaudit.cpp index 75e4962e9..16f62980b 100755 --- a/src/gausskernel/process/postmaster/pgaudit.cpp +++ b/src/gausskernel/process/postmaster/pgaudit.cpp @@ -49,6 +49,7 @@ #include "utils/timestamp.h" #include "utils/builtins.h" #include "utils/acl.h" +#include "auditfuncs.h" #include "gssignal/gs_signal.h" #include "gs_policy/policy_common.h" @@ -85,6 +86,8 @@ #define MAX_QUEUE_SIZE 100000 #define MAX_CONNECTION_INFO_SIZE (MAXNUMLEN * 4 + NI_MAXHOST) +const static uint32 uint64_max_len = 20; +const uint32 AUDIT_INDEX_TABLE_VERSION_NUM = 92601; typedef struct { char nuls[2]; /* always \0\0 */ @@ -102,10 +105,11 @@ typedef union { #define PIPE_HEADER_SIZE offsetof(PipeProtoHeader, data) #define PIPE_MAX_PAYLOAD ((int)(PIPE_CHUNK_SIZE - PIPE_HEADER_SIZE)) -/* The indextbl->count should meet a relationship with curidx and begidx. */ -#define AUDIT_COUNT(indextbl) \ - (((indextbl)->curidx >= (indextbl)->begidx) ? ((indextbl)->curidx - (indextbl)->begidx + 1) \ - : ((indextbl)->curidx + (indextbl)->maxnum + 1 - (indextbl)->begidx)) +#define PIPE_READ_INDEX(index) (index * 2) +#define PIPE_WRITE_INDEX(index) (index * 2 + 1) + +#define AUDIT_THREADNUM_ENLARGE \ + ((uint32)g_instance.audit_cxt.thread_num > g_instance.audit_cxt.audit_indextbl->thread_num) /* * We really want line-buffered mode for auditfile output, but Windows does @@ -172,18 +176,6 @@ typedef struct { #define NBUFFER_LISTS 256 -/* These must be exported for EXEC_BACKEND case ... annoying */ -#ifndef WIN32 -int sysauditPipe[2] = {-1, -1}; -#else -HANDLE sysauditPipe[2] = {0, 0}; -#endif - -#ifdef WIN32 -static HANDLE threadHandle = 0; -static CRITICAL_SECTION sysauditorSection; -#endif - /* * Flags set by interrupt handlers for later service in the main loop. */ @@ -212,16 +204,33 @@ typedef struct AuditIndexItem { * Description : */ typedef struct AuditIndexTable { + uint32 maxnum; /* max count of the audit index item */ + uint32 begidx; /* the position of the first audit index item */ + uint32 thread_num; /* the running audit thread num */ + volatile uint32 latest_idx; /* the latest next position of all audit threads index items */ + uint32 curidx[MAX_AUDIT_NUM]; /* the position of the current audit thread index item */ + uint32 count; /* the count of the audit index item */ + pg_time_t last_audit_time; /* the audit time of the latest audit record */ + AuditIndexItem data[1]; +} AuditIndexTable; + +/* + * Brief : old audit index table + * Description : + */ +typedef struct AuditIndexTableOld { uint32 maxnum; /* max count of the audit index item */ uint32 begidx; /* the position of the first audit index item */ uint32 curidx; /* the position of the current audit index item */ uint32 count; /* the count of the audit index item */ pg_time_t last_audit_time; /* the audit time of the latest audit record */ AuditIndexItem data[1]; -} AuditIndexTable; +} AuditIndexTableOld; -static const char audit_indextbl_file[] = "index_table"; +static const char audit_indextbl_file[] = "index_table_new"; +static const char audit_indextbl_old_file[] = "index_table"; static const int indextbl_header_size = offsetof(AuditIndexTable, data); +static const int old_indextbl_header_size = offsetof(AuditIndexTableOld, data); static const char* AuditTypeDescs[] = {"unknown", "login_success", @@ -266,7 +275,12 @@ static const char* AuditTypeDescs[] = {"unknown", "masking_policy", "security_policy", "ddl_sequence", - "ddl_key"}; + "ddl_key", + "ddl_package", + "ddl_model", + "ddl_globalconfig", + "ddl_publication_subscription", + "ddl_foreign_data_wrapper"}; static const int AuditTypeNum = sizeof(AuditTypeDescs) / sizeof(char*); @@ -316,23 +330,6 @@ typedef struct AuditData { #define FILED_NULLABLE(field) (field ? field : _("null")) -/* - * Brief : the string field number in audit record - * Description : - */ -typedef enum { - AUDIT_USER_ID = 0, - AUDIT_USER_NAME, - AUDIT_DATABASE_NAME, - AUDIT_CLIENT_CONNINFO, - AUDIT_OBJECT_NAME, - AUDIT_DETAIL_INFO, - AUDIT_NODENAME_INFO, - AUDIT_THREADID_INFO, - AUDIT_LOCALPORT_INFO, - AUDIT_REMOTEPORT_INFO -} AuditStringFieldNum; - #define PGAUDIT_RESTART_INTERVAL 60 #define PGAUDIT_QUERY_COLS 13 @@ -370,91 +367,71 @@ typedef enum { POLICYAUDITFILE_TYPE, UNKNOWNFILE_TYPE } AuditFileType; + /* Local subroutines */ static void process_pipe_input(char* auditbuffer, int* bytes_in_auditbuffer); static void flush_pipe_input(char* auditbuffer, int* bytes_in_auditbuffer); static void pgaudit_write_file(char* buffer, int count); -static void pgaudit_write_policy_audit_file(const char* buffer, int count); static void auditfile_init(bool allow_errors = false); static FILE *auditfile_open(pg_time_t timestamp, const char *mode, bool allow_errors, const char *filename = pgaudit_filename, bool ignore_num = false); static void auditfile_close(AuditFileType flag); -#ifdef WIN32 -static unsigned int __stdcall pipeThread(void* arg); -#endif -// policy auditing -static void policy_auditfile_rotate(); -static void set_next_policy_rotation_time(void); - +/****** audit logs management ******/ static void auditfile_rotate(bool time_based_rotation, bool size_based_rotation); static void set_next_rotation_time(void); static void pgaudit_cleanup(void); +/****** signale handler *****/ static void pgaudit_exit(SIGNAL_ARGS); static void sigHupHandler(SIGNAL_ARGS); static void sigUsr1Handler(SIGNAL_ARGS); +static void sig_thread_quit_handler(); +static void sig_thread_config_handler(int ¤tAuditRotationAge, int ¤tAuditRemainThreshold); +static void pgauditor_kill(int code, Datum arg); -static void write_pipe_chunks(char* data, int len); +static void write_pipe_chunks(char* data, int len, AuditClassType type = STD_AUDIT_TYPE); static void appendStringField(StringInfo str, const char* s); static void pgaudit_close_file(FILE* fp, const char* file); static void pgaudit_read_indexfile(const char* audit_directory); static void pgaudit_update_indexfile(const char* mode, bool allow_errors); -static void pgaudit_indextbl_init(void); +static bool pgaudit_find_indexfile(void); +static void pgaudit_indexfile_upgrade(void); +static void pgaudit_indexfile_sync(const char* mode, bool allow_errors); +static void pgaudit_rewrite_indexfile(void); +static void pgaudit_indextbl_init_new(void); +void extracted836(errno_t &errorno, int thread_num); +static void pgaudit_reset_indexfile(); static const char* pgaudit_string_field(AuditData* adata, int num); static void deserialization_to_tuple(Datum (&values)[PGAUDIT_QUERY_COLS], AuditData *adata, const AuditMsgHdr &header); static void pgaudit_query_file(Tuplestorestate *state, TupleDesc tdesc, uint32 fnum, TimestampTz begtime, - TimestampTz endtime, const char *audit_directory); + TimestampTz endtime, const char *audit_directory); +static void pgaudit_query_valid_check(const ReturnSetInfo *rsinfo, FunctionCallInfoData *fcinfo, TupleDesc &tupdesc); +static uint32 pgaudit_get_auditfile_num(); +static void pgaudit_update_auditfile_time(pg_time_t timestamp, bool exist); +static void pgaudit_switch_next_auditfile(); + +/********** unified audit ******/ static void pgaudit_send_data_to_elastic(); static void pgaudit_query_file_for_elastic(); static void elasic_search_connection_test(); +static void policy_auditfile_rotate(); +static void set_next_policy_rotation_time(void); +static void pgaudit_write_policy_audit_file(const char* buffer, int count); + +/********** toughness *********/ static void CheckAuditFile(void); +static bool pgaudit_valid_header(const AuditMsgHdr* header); +static void pgaudit_mark_corrupt_info(uint32 fnum); +static void audit_append_xid_info(const char *detail_info, char *detail_info_xid, uint32 len); +static bool audit_status_check_ok(); -/* - * Main entry point for auditor process - * argc/argv parameters are valid only in EXEC_BACKEND case. - */ -NON_EXEC_STATIC void PgAuditorMain() +static void init_audit_signal_handlers() { -#ifndef WIN32 - char auditbuffer[READ_BUF_SIZE + 1] = {0}; - int bytes_in_auditbuffer = 0; -#endif - int currentAuditRotationAge; - int currentAuditRemainThreshold; - pg_time_t now; - - IsUnderPostmaster = true; /* we are a postmaster subprocess now */ - - t_thrd.proc_cxt.MyProcPid = gs_thread_self(); /* reset t_thrd.proc_cxt.MyProcPid */ - - t_thrd.proc_cxt.MyStartTime = time(NULL); /* set our start time in case we call elog */ - now = t_thrd.proc_cxt.MyStartTime; - - t_thrd.role = AUDITOR; - - init_ps_display("auditor process", "", "", ""); - - /* - * Also close our copy of the write end of the pipe. This is needed to - * ensure we can detect pipe EOF correctly. (But note that in the restart - * case, the postmaster already did this.) - */ - InitializeLatchSupport(); /* needed for latch waits */ - /* Initialize private latch for use by signal handlers */ - InitLatch(&t_thrd.audit.sysAuditorLatch); - - /* - * Properly accept or ignore signals the postmaster might send us - * - * Note: we ignore all termination signals, and instead exit only when all - * upstream processes are gone, to ensure we don't miss any dying gasps of - * broken backends... - */ (void)gspqsignal(SIGHUP, sigHupHandler); /* set flag to read config file */ (void)gspqsignal(SIGINT, SIG_IGN); (void)gspqsignal(SIGTERM, SIG_IGN); @@ -472,8 +449,143 @@ NON_EXEC_STATIC void PgAuditorMain() (void)gspqsignal(SIGWINCH, SIG_DFL); gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL); - m_curlUtils.initialize(false, "", "", ""); (void)gs_signal_unblock_sigusr2(); +} + +static void pgaudit_handle_exception() +{ + /* Since not using PG_TRY, must reset error stack by hand */ + t_thrd.log_cxt.error_context_stack = NULL; + t_thrd.log_cxt.call_stack = NULL; + + /* Prevents interrupts while cleaning up */ + HOLD_INTERRUPTS(); + + /* Report the error to the server log */ + EmitErrorReport(); + + ereport(LOG, (errmsg("auditor thread with thread index : %d shutdown abnormaly", t_thrd.audit.cur_thread_idx))); + + (void)MemoryContextSwitchTo(t_thrd.mem_cxt.pgAuditLocalContext); + FlushErrorState(); + + /* Flush any leaked data in the top-level context */ + MemoryContextResetAndDeleteChildren(t_thrd.mem_cxt.pgAuditLocalContext); + + LWLockReleaseAll(); + if (t_thrd.utils_cxt.CurrentResourceOwner) { + ResourceOwnerRelease(t_thrd.utils_cxt.CurrentResourceOwner, RESOURCE_RELEASE_BEFORE_LOCKS, false, true); + } + + /* Now we can allow interrupts again */ + RESUME_INTERRUPTS(); + + /* + * Sleep at least 1 second after any error. A write error is likely + * to be repeated, and we don't want to be filling the error logs as + * fast as we can. + */ + pg_usleep(1000000L); + return; +} + +static void sig_thread_quit_handler() +{ + /* + * audit master thread should try its best to do the flush job when exit: + * 1. wait other audit thread exit, so that index file will not change + * 2. flush index audit file + */ + if (t_thrd.audit.cur_thread_idx == 0) { + while (t_thrd.audit.need_exit) { + if (pg_atomic_read_u32(&g_instance.audit_cxt.current_audit_index) == 1) { + break; + } + } + pgaudit_cleanup(); + pgaudit_update_indexfile(PG_BINARY_W, true); + } + + /* audit master thread should exit last as expected */ + ereport(LOG, (errmsg("auditor thread exit, id : %d", t_thrd.audit.cur_thread_idx))); + + /* + * From here on, elog(ERROR) should end with exit(1), not send + * control back to the sigsetjmp block above. + */ + u_sess->attr.attr_common.ExitOnAnyError = true; +} + +static void sig_thread_config_handler(int ¤tAuditRotationAge, int ¤tAuditRemainThreshold) +{ + ProcessConfigFile(PGC_SIGHUP); + + /* + * If rotation time parameter changed, reset next rotation time, + * but don't immediately force a rotation. + */ + if (currentAuditRotationAge != u_sess->attr.attr_security.Audit_RotationAge) { + currentAuditRotationAge = u_sess->attr.attr_security.Audit_RotationAge; + set_next_rotation_time(); + } + + /* If file remain threshold parameter changed, reset audit index table */ + if (t_thrd.audit.cur_thread_idx == 0 && + currentAuditRemainThreshold != u_sess->attr.attr_security.Audit_RemainThreshold) { + currentAuditRemainThreshold = u_sess->attr.attr_security.Audit_RemainThreshold; + + /* the audit index table may be dirty, so update index table first */ + pgaudit_update_indexfile(PG_BINARY_W, true); + + /* reset the audit index table */ + pgaudit_reset_indexfile(); + } + + /* + * If we had a rotation-disabling failure, re-enable rotation + * attempts after SIGHUP, and force one immediately. + */ + if (t_thrd.audit.rotation_disabled) { + t_thrd.audit.rotation_disabled = false; + t_thrd.audit.rotation_requested = true; + } +} +/* + * Main entry point for auditor process + * argc/argv parameters are valid only in EXEC_BACKEND case. + */ +NON_EXEC_STATIC void PgAuditorMain() +{ + char auditbuffer[READ_BUF_SIZE + 1] = {0}; + int bytes_in_auditbuffer = 0; + int currentAuditRotationAge; + int currentAuditRemainThreshold; + pg_time_t now; + + IsUnderPostmaster = true; /* we are a postmaster subprocess now */ + + t_thrd.proc_cxt.MyProcPid = gs_thread_self(); /* reset t_thrd.proc_cxt.MyProcPid */ + + t_thrd.proc_cxt.MyStartTime = time(NULL); /* set our start time in case we call elog */ + now = t_thrd.proc_cxt.MyStartTime; + + t_thrd.role = AUDITOR; + + /* get thread index here, index 0 is audit master thread as default */ + (void)audit_load_thread_index(); + init_ps_display("auditor process", "", "", ""); + + /* Initialize private latch for use by signal handlers */ + InitLatch(&t_thrd.audit.sysAuditorLatch); + + /* + * Properly accept or ignore signals the postmaster might send us + * + * Note: we ignore all termination signals, and instead exit only when all + * upstream processes are gone, to ensure we don't miss any dying gasps of + * broken backends... + */ + init_audit_signal_handlers(); if (t_thrd.mem_cxt.pgAuditLocalContext == NULL) t_thrd.mem_cxt.pgAuditLocalContext = AllocSetContextCreate(t_thrd.top_mem_cxt, @@ -482,24 +594,35 @@ NON_EXEC_STATIC void PgAuditorMain() ALLOCSET_DEFAULT_INITSIZE * 3, ALLOCSET_DEFAULT_MAXSIZE * 3); - pgaudit_indextbl_init(); + on_shmem_exit(pgauditor_kill, (Datum)0); + (void)MemoryContextSwitchTo(t_thrd.mem_cxt.pgAuditLocalContext); -#ifdef WIN32 - /* Fire up separate data transfer thread */ - InitializeCriticalSection(&sysauditorSection); - EnterCriticalSection(&sysauditorSection); + /* If an exception is encountered, processing resumes here. */ + sigjmp_buf local_sigjmp_buf; + int curTryCounter; + int* oldTryCounter = NULL; + if (sigsetjmp(local_sigjmp_buf, 1) != 0) { + gstrace_tryblock_exit(true, oldTryCounter); + pgaudit_handle_exception(); + } + oldTryCounter = gstrace_tryblock_entry(&curTryCounter); + t_thrd.log_cxt.PG_exception_stack = &local_sigjmp_buf; /* We can now handle ereport(ERROR) */ - threadHandle = (HANDLE)_beginthreadex(NULL, 0, pipeThread, NULL, 0, NULL); - if (threadHandle == 0) - ereport(FATAL, (errmsg("could not create sysauditor data transfer thread: %m"))); -#endif /* WIN32 */ /* remember active auditfile parameters */ currentAuditRotationAge = u_sess->attr.attr_security.Audit_RotationAge; currentAuditRemainThreshold = u_sess->attr.attr_security.Audit_RemainThreshold; + + /* audit master thread */ + if (t_thrd.audit.cur_thread_idx == 0) { + m_curlUtils.initialize(false, "", "", ""); + elasic_search_connection_test(); + } + /* set next planned rotation time */ set_next_rotation_time(); - elasic_search_connection_test(); + + auditfile_init(); /* main worker loop */ for (;;) { @@ -507,10 +630,7 @@ NON_EXEC_STATIC void PgAuditorMain() bool size_based_rotation = false; long cur_timeout; unsigned int cur_flags; - -#ifndef WIN32 unsigned int rc = 0; -#endif /* Clear any already-pending wakeups */ ResetLatch(&t_thrd.audit.sysAuditorLatch); @@ -518,47 +638,22 @@ NON_EXEC_STATIC void PgAuditorMain() /* * Quit if we get SIGQUIT from the postmaster. */ - if (t_thrd.audit.need_exit) + if (t_thrd.audit.need_exit) { + sig_thread_quit_handler(); break; + } - policy_auditfile_rotate(); - pgaudit_send_data_to_elastic(); + if (t_thrd.audit.cur_thread_idx == 0) { + policy_auditfile_rotate(); + pgaudit_send_data_to_elastic(); + } /* * Process any requests or signals received recently. */ if (t_thrd.audit.got_SIGHUP) { t_thrd.audit.got_SIGHUP = false; - ProcessConfigFile(PGC_SIGHUP); - - /* - * If rotation time parameter changed, reset next rotation time, - * but don't immediately force a rotation. - */ - if (currentAuditRotationAge != u_sess->attr.attr_security.Audit_RotationAge) { - currentAuditRotationAge = u_sess->attr.attr_security.Audit_RotationAge; - set_next_rotation_time(); - } - - /* If file remain threshold parameter changed, reset audit index table */ - if (currentAuditRemainThreshold != u_sess->attr.attr_security.Audit_RemainThreshold) { - currentAuditRemainThreshold = u_sess->attr.attr_security.Audit_RemainThreshold; - - /* the audit index table may be dirty, so update index table first */ - pgaudit_update_indexfile(PG_BINARY_W, true); - - /* reset the audit index table */ - pgaudit_indextbl_init(); - } - - /* - * If we had a rotation-disabling failure, re-enable rotation - * attempts after SIGHUP, and force one immediately. - */ - if (t_thrd.audit.rotation_disabled) { - t_thrd.audit.rotation_disabled = false; - t_thrd.audit.rotation_requested = true; - } + sig_thread_config_handler(currentAuditRotationAge, currentAuditRemainThreshold); } if (u_sess->attr.attr_security.Audit_RotationAge > 0 && !t_thrd.audit.rotation_disabled) { @@ -589,7 +684,9 @@ NON_EXEC_STATIC void PgAuditorMain() auditfile_rotate(time_based_rotation, size_based_rotation); } - pgaudit_cleanup(); + if (t_thrd.audit.cur_thread_idx == 0) { + pgaudit_cleanup(); + } /* * Calculate time till next time-based rotation, so that we don't @@ -613,16 +710,14 @@ NON_EXEC_STATIC void PgAuditorMain() /* * Sleep until there's something to do */ -#ifndef WIN32 - rc = WaitLatchOrSocket( - &t_thrd.audit.sysAuditorLatch, WL_LATCH_SET | WL_SOCKET_READABLE | cur_flags, sysauditPipe[0], cur_timeout); + rc = WaitLatchOrSocket(&t_thrd.audit.sysAuditorLatch, WL_LATCH_SET | WL_SOCKET_READABLE | cur_flags, + g_instance.audit_cxt.sys_audit_pipes[PIPE_READ_INDEX(t_thrd.audit.cur_thread_idx)], + cur_timeout); if (rc & WL_SOCKET_READABLE) { - int bytesRead = read( - sysauditPipe[0], auditbuffer + bytes_in_auditbuffer, sizeof(auditbuffer) - bytes_in_auditbuffer - 1); + int bytesRead = read(g_instance.audit_cxt.sys_audit_pipes[PIPE_READ_INDEX(t_thrd.audit.cur_thread_idx)], + auditbuffer + bytes_in_auditbuffer, sizeof(auditbuffer) - bytes_in_auditbuffer - 1); if (bytesRead > 0) { - /* - * Check the current audit file. - */ + /* Check the current audit file */ CheckAuditFile(); } if (bytesRead < 0) { @@ -645,23 +740,6 @@ NON_EXEC_STATIC void PgAuditorMain() flush_pipe_input(auditbuffer, &bytes_in_auditbuffer); } } -#else /* WIN32 */ - - /* - * On Windows we leave it to a separate thread to transfer data and - * detect pipe EOF. The main thread just wakes up to handle SIGHUP - * and rotation conditions. - * - * Server code isn't generally thread-safe, so we ensure that only one - * of the threads is active at a time by entering the critical section - * whenever we're not sleeping. - */ - LeaveCriticalSection(&sysauditorSection); - - (void)WaitLatch(&t_thrd.audit.sysAuditorLatch, WL_LATCH_SET | cur_flags, cur_timeout); - - EnterCriticalSection(&sysauditorSection); -#endif /* WIN32 */ if (t_thrd.audit.pipe_eof_seen) { break; @@ -675,14 +753,12 @@ NON_EXEC_STATIC void PgAuditorMain() */ ereport(DEBUG1, (errmsg("auditor shutting down"))); - pgaudit_cleanup(); - pgaudit_update_indexfile(PG_BINARY_W, true); if (t_thrd.audit.sysauditFile) { fclose(t_thrd.audit.sysauditFile); t_thrd.audit.sysauditFile = NULL; } - - // policy auditing + + /* policy auditing */ if (t_thrd.audit.policyauditFile) { fclose(t_thrd.audit.policyauditFile); t_thrd.audit.policyauditFile = NULL; @@ -694,12 +770,9 @@ NON_EXEC_STATIC void PgAuditorMain() t_thrd.mem_cxt.pgAuditLocalContext = NULL; } - if (sysauditPipe[0] > 0) { - close(sysauditPipe[0]); - sysauditPipe[0] = -1; + if (t_thrd.audit.cur_thread_idx == 0) { + m_curlUtils.~CurlUtils(); } - - m_curlUtils.~CurlUtils(); proc_exit(0); } @@ -710,95 +783,50 @@ void pgaudit_send_data_to_elastic() } } +/* + * Start all audit subprocesses + */ +void pgaudit_start_all(void) +{ + if (g_instance.pid_cxt.PgAuditPID == NULL) { + return; + } + + audit_process_cxt_init(); + for (int i = 0; i < g_instance.audit_cxt.thread_num; ++i) { + if (g_instance.pid_cxt.PgAuditPID[i] == 0) { + g_instance.pid_cxt.PgAuditPID[i] = pgaudit_start(); + ereport(LOG, (errmsg("auditor process %d started, pid=%lu", i, g_instance.pid_cxt.PgAuditPID[i]))); + } + } +} + +/* + * Stop all audit subprocesses + */ +void pgaudit_stop_all(void) +{ + for (int i = 0; i < g_instance.audit_cxt.thread_num; ++i) { + if (g_instance.pid_cxt.PgAuditPID[i] != 0) { + signal_child(g_instance.pid_cxt.PgAuditPID[i], SIGQUIT, -1); + } + } + audit_process_cxt_exit(); + ereport(LOG, (errmsg("parameter audit_enabled is set to false, terminate auditor process."))); +} + /* * Postmaster subroutine to start a sysauditor subprocess. */ ThreadId pgaudit_start(void) { - pg_time_t curtime; ThreadId sysauditorPid; - if (!u_sess->attr.attr_security.Audit_enabled) return 0; - /* - * Do nothing if too soon since last collector start. This is a safety - * valve to protect against continuous respawn attempts if the collector - * is dying immediately at launch. Note that since we will be re-called - * from the postmaster main loop, we will get another chance later. - */ - curtime = time(NULL); - if ((unsigned int)(curtime - t_thrd.audit.last_pgaudit_start_time) < (unsigned int)PGAUDIT_RESTART_INTERVAL) - return 0; - t_thrd.audit.last_pgaudit_start_time = curtime; - - /* - * If first time through, create the pipe which will receive audit - * output. - * - * If the sysauditor crashes and needs to be restarted, we continue to use - * the same pipe (indeed must do so, since extant backends will be writing - * into that pipe). - * - * This means the postmaster must continue to hold the read end of the - * pipe open, so we can pass it down to the reincarnated sysauditor. This - * is a bit klugy but we have little choice. - */ - char Audit_directory_Done[MAXPGPATH] = {0}; -#ifndef WIN32 - if (sysauditPipe[0] < 0) { - if (pipe(sysauditPipe) < 0) - ereport(FATAL, (errcode_for_socket_access(), (errmsg("could not create pipe for sysaudit: %m")))); - } - int rc = snprintf_s(Audit_directory_Done, sizeof(Audit_directory_Done), sizeof(Audit_directory_Done) - 1, "%s/done", - g_instance.attr.attr_security.Audit_directory); - securec_check_ss(rc, "\0", "\0"); -#else - if (!sysauditPipe[0]) { - SECURITY_ATTRIBUTES sa; - errno_t errorno = EOK; - - errorno = memset_s(&sa, sizeof(SECURITY_ATTRIBUTES), 0, sizeof(SECURITY_ATTRIBUTES)); - securec_check(errorno, "\0", "\0"); - sa.nLength = sizeof(SECURITY_ATTRIBUTES); - sa.bInheritHandle = TRUE; - - if (!CreatePipe(&sysauditPipe[0], &sysauditPipe[1], &sa, 32768)) - ereport(FATAL, (errcode_for_file_access(), (errmsg("could not create pipe for sysaudit: %m")))); - } - int ret = snprintf_s(Audit_directory_Done, sizeof(Audit_directory_Done), sizeof(Audit_directory_Done) - 1, - "%s\\done", g_instance.attr.attr_security.Audit_directory); - securec_check_ss(ret, "\0", "\0"); -#endif - - /* - * Create audit directory if not present; ignore errors - */ - (void)pg_mkdir_p(g_instance.attr.attr_security.Audit_directory, S_IRWXU); - (void)pg_mkdir_p(Audit_directory_Done, S_IRWXU); - /* - * The initial auditfile is created right in the postmaster, to verify that - * the Audit_directory is writable. - */ - pgaudit_update_indexfile(PG_BINARY_A, false); - sysauditorPid = initialize_util_thread(AUDITOR); if (sysauditorPid != 0) { /* success, in postmaster */ - if (!auditpipe_done) { -#ifdef WIN32 - int fd; - /* - * open the pipe in binary mode and make sure write pipe is binary, - * to avoid disturbing the pipe chunking protocol. - */ - fd = _open_osfhandle((intptr_t)sysauditPipe[1], _O_APPEND | _O_BINARY); - - _setmode(fd, _O_BINARY); - /* close() must not be called because the close() would close the underlying handle. */ -#endif - auditpipe_done = true; - } return (ThreadId)sysauditorPid; } @@ -1030,21 +1058,6 @@ static void pgaudit_write_file(char* buffer, int count) buffer + offsetof(AuditMsgHdr, size), READ_BUF_SIZE - offsetof(AuditMsgHdr, size), &count, sizeof(uint32)); securec_check(errorno, "\0", "\0"); - if (t_thrd.audit.audit_indextbl) { - /* check to see whether system time changed. */ - if (t_thrd.audit.audit_indextbl->last_audit_time > curtime) { - AuditIndexItem* item = NULL; - item = t_thrd.audit.audit_indextbl->data + t_thrd.audit.audit_indextbl->curidx; - if (item->ctime > 0) - item->ctime *= -1; - t_thrd.audit.audit_indextbl->last_audit_time = curtime; - pgaudit_update_indexfile(PG_BINARY_W, true); - - audit_report(AUDIT_INTERNAL_EVENT, AUDIT_OK, "time", "system time changed."); - } - t_thrd.audit.audit_indextbl->last_audit_time = curtime; - } - errno = 0; retry1: rc = fwrite(buffer, 1, count, t_thrd.audit.sysauditFile); @@ -1097,65 +1110,6 @@ retry: (void)fflush(t_thrd.audit.policyauditFile); } -#ifdef WIN32 - -/* - * Worker thread to transfer data from the pipe to the current auditfile. - * - * We need this because on Windows, WaitforMultipleObjects does not work on - * unnamed pipes: it always reports "signaled", so the blocking ReadFile won't - * allow for SIGHUP; and select is for sockets only. - */ -static unsigned int __stdcall pipeThread(void* arg) -{ - char auditbuffer[READ_BUF_SIZE]; - int bytes_in_auditbuffer = 0; - - for (;;) { - DWORD bytesRead; - BOOL result = false; - - result = ReadFile(sysauditPipe[0], - auditbuffer + bytes_in_auditbuffer, - sizeof(auditbuffer) - bytes_in_auditbuffer, - &bytesRead, - 0); - - /* - * Enter critical section before doing anything that might touch - * global state shared by the main thread. Anything that uses - * palloc()/pfree() in particular are not safe outside the critical - * section. - */ - EnterCriticalSection(&sysauditorSection); - if (!result) { - DWORD error = GetLastError(); - if (error == ERROR_HANDLE_EOF || error == ERROR_BROKEN_PIPE) - break; - _dosmaperr(error); - ereport(LOG, (errcode_for_file_access(), errmsg("could not read from auditor pipe: %m"))); - } else if (bytesRead > 0) { - bytes_in_auditbuffer += bytesRead; - process_pipe_input(auditbuffer, &bytes_in_auditbuffer); - } - LeaveCriticalSection(&sysauditorSection); - } - - /* We exit the above loop only upon detecting pipe EOF */ - t_thrd.audit.pipe_eof_seen = true; - - /* if there's any data left then force it out now */ - flush_pipe_input(auditbuffer, &bytes_in_auditbuffer); - - /* set the latch to waken the main thread, which will quit */ - SetLatch(&t_thrd.audit.sysAuditorLatch); - - LeaveCriticalSection(&sysauditorSection); - _endthread(); - return 0; -} -#endif /* WIN32 */ - /* * Brief : initialize the audit file. * Description : set parameter allow_erros as error level, do not allow error as default @@ -1173,7 +1127,7 @@ static void auditfile_init(bool allow_errors) audit_report(AUDIT_INTERNAL_EVENT, AUDIT_OK, "file", "create a new audit file"); } } - if (!t_thrd.audit.policyauditFile && IS_PGXC_COORDINATOR) { + if (!t_thrd.audit.policyauditFile && IS_PGXC_COORDINATOR && (t_thrd.audit.cur_thread_idx == 0)) { t_thrd.audit.policyauditFile = auditfile_open(time(NULL), "a", allow_errors, policy_audit_filename, true); } } @@ -1193,13 +1147,15 @@ static FILE *auditfile_open(pg_time_t timestamp, const char *mode, bool allow_er FILE *fh = NULL; char* filename = NULL; uint32 fnum = 0; - AuditIndexItem *item = NULL; + int thread_idx = t_thrd.audit.cur_thread_idx; + struct stat st; bool exist = false; - if (!ignore_num && t_thrd.audit.audit_indextbl) { - item = t_thrd.audit.audit_indextbl->data + t_thrd.audit.audit_indextbl->curidx; - fnum = item->filenum; + if (!ignore_num && g_instance.audit_cxt.audit_indextbl) { + fnum = pgaudit_get_auditfile_num(); } + ereport(DEBUG1, (errmsg("audit thread idx: %d auditfile_open ok fnum : %d", thread_idx, fnum))); + filename = (char*)palloc(MAXPGPATH); int rc = snprintf_s( filename, MAXPGPATH, MAXPGPATH - 1, _filename, g_instance.attr.attr_security.Audit_directory, fnum); @@ -1214,16 +1170,8 @@ static FILE *auditfile_open(pg_time_t timestamp, const char *mode, bool allow_er fh = fopen(filename, mode); if (fh != NULL) { setvbuf(fh, NULL, LBF_MODE, 0); -#ifdef WIN32 - /* use CRLF line endings on Windows */ - _setmode(_fileno(fh), _O_BINARY); -#endif - if (!ignore_num && t_thrd.audit.audit_indextbl) { - if (!exist) { - item = t_thrd.audit.audit_indextbl->data + t_thrd.audit.audit_indextbl->curidx; - item->ctime = timestamp; - } - t_thrd.audit.audit_indextbl->count = AUDIT_COUNT(t_thrd.audit.audit_indextbl); + if (!ignore_num && g_instance.audit_cxt.audit_indextbl) { + pgaudit_update_auditfile_time(timestamp, exist); pgaudit_update_indexfile(PG_BINARY_W, true); } } else { @@ -1254,24 +1202,13 @@ static FILE *auditfile_open(pg_time_t timestamp, const char *mode, bool allow_er */ static void auditfile_close(AuditFileType flag) { - AuditIndexItem *item = NULL; - uint32 fnum = 0; - if (t_thrd.audit.sysauditFile == NULL) return; - if ((flag == SYSAUDITFILE_TYPE) && t_thrd.audit.audit_indextbl != NULL) { - item = t_thrd.audit.audit_indextbl->data + t_thrd.audit.audit_indextbl->curidx; - item->filesize = ftell(t_thrd.audit.sysauditFile); - fnum = item->filenum + 1; - - t_thrd.audit.pgaudit_totalspace += item->filesize; - + if ((flag == SYSAUDITFILE_TYPE) && g_instance.audit_cxt.audit_indextbl != NULL) { /* switch to next audit file */ - t_thrd.audit.audit_indextbl->curidx = - (t_thrd.audit.audit_indextbl->curidx + 1) % t_thrd.audit.audit_indextbl->maxnum; - item = t_thrd.audit.audit_indextbl->data + t_thrd.audit.audit_indextbl->curidx; - item->filenum = fnum; + pgaudit_switch_next_auditfile(); + pgaudit_update_indexfile(PG_BINARY_W, true); } if (flag == SYSAUDITFILE_TYPE) { fclose(t_thrd.audit.sysauditFile); @@ -1412,6 +1349,74 @@ static void set_next_rotation_time(void) t_thrd.audit.next_rotation_time = now; } +void pgaudit_gen_auditfile_warning(pg_time_t remain_time, uint4 filesize) +{ + if ((u_sess->attr.attr_security.Audit_CleanupPolicy || remain_time == 0) && + (g_instance.audit_cxt.pgaudit_totalspace + filesize >= + (uint64)u_sess->attr.attr_security.Audit_SpaceLimit * 1024L)) +#ifdef HAVE_LONG_LONG_INT + ereport(WARNING, (errmsg("audit file total space(%lld B) exceed guc parameter(audit_space_limit: %d KB)", + (long long int)(g_instance.audit_cxt.pgaudit_totalspace + filesize), + u_sess->attr.attr_security.Audit_SpaceLimit))); +#else + ereport(WARNING, + (errmsg("audit file total space(%ld B) exceed guc parameter(audit_space_limit: %d KB)", + g_instance.audit_cxt.pgaudit_totalspace + filesize), + u_sess->attr.attr_security.Audit_SpaceLimit))); +#endif + else if (u_sess->attr.attr_security.Audit_CleanupPolicy == 0 && remain_time && + (g_instance.audit_cxt.pgaudit_totalspace + filesize >= + (uint64)u_sess->attr.attr_security.Audit_SpaceLimit * 1024L)) +#ifdef HAVE_LONG_LONG_INT + ereport(WARNING, (errmsg("Based on time-priority policy, the oldest audit file is beyond %d days or " + "audit file total space(%lld B) exceed guc parameter(audit_space_limit: %d KB)", + u_sess->attr.attr_security.Audit_RemainAge, + (long long int)(g_instance.audit_cxt.pgaudit_totalspace + filesize), + u_sess->attr.attr_security.Audit_SpaceLimit))); +#else + ereport(WARNING, (errmsg("Based on time-priority policy, the oldest audit file is beyond %d days or " + "audit file total space(%ld B) exceed guc parameter(audit_space_limit: %d KB)", + u_sess->attr.attr_security.Audit_RemainAge, + (g_instance.audit_cxt.pgaudit_totalspace + filesize), + u_sess->attr.attr_security.Audit_SpaceLimit))); +#endif + if (g_instance.audit_cxt.audit_indextbl->count > (uint32)u_sess->attr.attr_security.Audit_RemainThreshold) + ereport(WARNING, + (errmsg("audit file total count(%u) exceed guc parameter(audit_file_remain_threshold: %d)", + g_instance.audit_cxt.audit_indextbl->count, u_sess->attr.attr_security.Audit_RemainThreshold))); + ereport(WARNING, (errmsg("%s", t_thrd.audit.pgaudit_filepath))); +} + +bool should_keep_basedon_timepolicy(pg_time_t remain_time, uint4 filesize, uint32 index, const AuditIndexItem *item) +{ + if (g_instance.audit_cxt.audit_indextbl->count <= (uint32)u_sess->attr.attr_security.Audit_RemainThreshold && + remain_time && (g_instance.audit_cxt.pgaudit_totalspace + filesize <= SPACE_MAXIMUM_SIZE)) { + + /* As current audit log rotation policy is based on time, just give the warning here for toatl space */ + if ((uint64)(g_instance.audit_cxt.pgaudit_totalspace + filesize - + (uint64)u_sess->attr.attr_security.Audit_SpaceLimit * 1024L) >= t_thrd.audit.space_beyond_size) { + ereport(WARNING, + (errmsg("audit file total space(%lld B) exceed guc parameter(audit_space_limit: %d KB) about %d MB", + (long long int)(g_instance.audit_cxt.pgaudit_totalspace + filesize), + u_sess->attr.attr_security.Audit_SpaceLimit, + (int)(t_thrd.audit.space_beyond_size / (1024 * 1024))))); + + t_thrd.audit.space_beyond_size += SPACE_INTERVAL_SIZE; + } + + /* get the current && next item to estimate time-based policy */ + AuditIndexItem *next = + g_instance.audit_cxt.audit_indextbl->data + ((index + 1) % g_instance.audit_cxt.audit_indextbl->maxnum); + if (remain_time >= (g_instance.audit_cxt.audit_indextbl->last_audit_time - item->ctime) || + (next && (remain_time > (g_instance.audit_cxt.audit_indextbl->last_audit_time - next->ctime)))) { + ereport(WARNING, (errmsg("should_keep_basedon_timepolicy not removed as in remain time"))); + return true; + } + } + + return false; +} + /* * pgaudit_cleanup * @@ -1421,103 +1426,74 @@ static void pgaudit_cleanup(void) { uint32 index = 0; AuditIndexItem* item = NULL; - uint64 filesize = 0; - pg_time_t remain_time = (int64)u_sess->attr.attr_security.Audit_RemainAge * SECS_PER_DAY; // how many seconds - if (t_thrd.audit.audit_indextbl == NULL) + bool truncated = false; + if (g_instance.audit_cxt.audit_indextbl == NULL) return; - if (t_thrd.audit.sysauditFile != NULL) - filesize = ftell(t_thrd.audit.sysauditFile); - index = t_thrd.audit.audit_indextbl->begidx; - while ( - t_thrd.audit.pgaudit_totalspace + filesize >= ((uint64)u_sess->attr.attr_security.Audit_SpaceLimit * 1024L) || - t_thrd.audit.audit_indextbl->count > (uint32)u_sess->attr.attr_security.Audit_RemainThreshold) { + + pg_time_t remain_time = (int64)u_sess->attr.attr_security.Audit_RemainAge * SECS_PER_DAY; // how many seconds + uint64 filesize = u_sess->attr.attr_security.Audit_RotationSize * g_instance.audit_cxt.thread_num * + 1024L; // filesize for current writting files + + LWLockAcquire(g_instance.audit_cxt.index_file_lock, LW_EXCLUSIVE); + index = g_instance.audit_cxt.audit_indextbl->begidx; + while (g_instance.audit_cxt.pgaudit_totalspace + filesize >= + ((uint64)u_sess->attr.attr_security.Audit_SpaceLimit * 1024L) || + g_instance.audit_cxt.audit_indextbl->count > (uint32)u_sess->attr.attr_security.Audit_RemainThreshold) { errno_t errorno = EOK; - struct stat statbuf; - item = t_thrd.audit.audit_indextbl->data + index; + item = g_instance.audit_cxt.audit_indextbl->data + index; + uint32 fnum = item->filenum; /* to check how long the audit file is remained: * a. it must be time-based policy and the specified value is valid; * b. the remained time of oldest audit file is beyond the specified value; * c. the total size is not beyond the maximum space size. */ - if (t_thrd.audit.audit_indextbl->count <= (uint32)u_sess->attr.attr_security.Audit_RemainThreshold && - u_sess->attr.attr_security.Audit_CleanupPolicy == 0 && remain_time && - (t_thrd.audit.pgaudit_totalspace + filesize <= SPACE_MAXIMUM_SIZE)) { - if ((uint64)(t_thrd.audit.pgaudit_totalspace + filesize - - (uint64)u_sess->attr.attr_security.Audit_SpaceLimit * 1024L) >= t_thrd.audit.space_beyond_size) { - ereport(WARNING, - (errmsg("audit file total space(%lld B) exceed guc parameter(audit_space_limit: %d KB) about %d MB", - (long long int)(t_thrd.audit.pgaudit_totalspace + filesize), - u_sess->attr.attr_security.Audit_SpaceLimit, - (int)(t_thrd.audit.space_beyond_size / (1024 * 1024))))); - - t_thrd.audit.space_beyond_size += SPACE_INTERVAL_SIZE; - } - /* get the next item */ - AuditIndexItem* next = - t_thrd.audit.audit_indextbl->data + (index + 1) % t_thrd.audit.audit_indextbl->maxnum; - if (remain_time >= (t_thrd.audit.audit_indextbl->last_audit_time - item->ctime) || - (next && (remain_time > (t_thrd.audit.audit_indextbl->last_audit_time - next->ctime)))) - break; + if (u_sess->attr.attr_security.Audit_CleanupPolicy == 0 && + should_keep_basedon_timepolicy(remain_time, filesize, index, item)) { + break; } + + /* trunate audit file */ int rc = snprintf_s(t_thrd.audit.pgaudit_filepath, MAXPGPATH, MAXPGPATH - 1, pgaudit_filename, g_instance.attr.attr_security.Audit_directory, item->filenum); - securec_check_intval(rc,,); + securec_check_intval(rc, , ); + struct stat statbuf; if (stat(t_thrd.audit.pgaudit_filepath, &statbuf) == 0 && unlink(t_thrd.audit.pgaudit_filepath) < 0) { ereport(WARNING, (errmsg("could not remove audit file: %m"))); break; } - rc = snprintf_truncated_s( - t_thrd.audit.pgaudit_filepath, MAXPGPATH, "remove an audit file(number: %u)", item->filenum); - securec_check_ss(rc, "\0", "\0"); - if ((u_sess->attr.attr_security.Audit_CleanupPolicy || remain_time == 0) && - (t_thrd.audit.pgaudit_totalspace + filesize >= (uint64)u_sess->attr.attr_security.Audit_SpaceLimit * 1024L)) -#ifdef HAVE_LONG_LONG_INT - ereport(WARNING, - (errmsg("audit file total space(%lld B) exceed guc parameter(audit_space_limit: %d KB)", - (long long int)(t_thrd.audit.pgaudit_totalspace + filesize), - u_sess->attr.attr_security.Audit_SpaceLimit))); -#else - ereport(WARNING, - (errmsg("audit file total space(%ld B) exceed guc parameter(audit_space_limit: %d KB)", - (t_thrd.audit.pgaudit_totalspace + filesize), - u_sess->attr.attr_security.Audit_SpaceLimit))); -#endif - else if (u_sess->attr.attr_security.Audit_CleanupPolicy == 0 && remain_time && - (t_thrd.audit.pgaudit_totalspace + filesize >= - (uint64)u_sess->attr.attr_security.Audit_SpaceLimit * 1024L)) -#ifdef HAVE_LONG_LONG_INT - ereport(WARNING, - (errmsg("Based on time-priority policy, the oldest audit file is beyond %d days or " - "audit file total space(%lld B) exceed guc parameter(audit_space_limit: %d KB)", - u_sess->attr.attr_security.Audit_RemainAge, - (long long int)(t_thrd.audit.pgaudit_totalspace + filesize), - u_sess->attr.attr_security.Audit_SpaceLimit))); -#else - ereport(WARNING, - (errmsg("Based on time-priority policy, the oldest audit file is beyond %d days or " - "audit file total space(%ld B) exceed guc parameter(audit_space_limit: %d KB)", - u_sess->attr.attr_security.Audit_RemainAge, - (t_thrd.audit.pgaudit_totalspace + filesize), - u_sess->attr.attr_security.Audit_SpaceLimit))); -#endif - if (t_thrd.audit.audit_indextbl->count > (uint32)u_sess->attr.attr_security.Audit_RemainThreshold) - ereport(WARNING, - (errmsg("audit file total count(%u) exceed guc parameter(audit_file_remain_threshold: %d)", - t_thrd.audit.audit_indextbl->count, - u_sess->attr.attr_security.Audit_RemainThreshold))); - ereport(WARNING, (errmsg("%s", t_thrd.audit.pgaudit_filepath))); + truncated = true; + pgaudit_gen_auditfile_warning(remain_time, filesize); - t_thrd.audit.pgaudit_totalspace -= item->filesize; - if (t_thrd.audit.audit_indextbl->count > 0) - t_thrd.audit.audit_indextbl->count--; - t_thrd.audit.audit_indextbl->begidx = (index + 1) % t_thrd.audit.audit_indextbl->maxnum; + /* update index file object As curretn audit file is removed */ + if (g_instance.audit_cxt.pgaudit_totalspace >= item->filesize) { + g_instance.audit_cxt.pgaudit_totalspace -= item->filesize; + } + if (g_instance.audit_cxt.audit_indextbl->count > 0) { + --g_instance.audit_cxt.audit_indextbl->count; + } + g_instance.audit_cxt.audit_indextbl->begidx = (index + 1) % g_instance.audit_cxt.audit_indextbl->maxnum; errorno = memset_s(item, sizeof(AuditIndexItem), 0, sizeof(AuditIndexItem)); securec_check(errorno, "\0", "\0"); - pgaudit_update_indexfile(PG_BINARY_W, true); - audit_report(AUDIT_INTERNAL_EVENT, AUDIT_OK, "file", t_thrd.audit.pgaudit_filepath); - if (index == t_thrd.audit.audit_indextbl->curidx) + + /* generate audit info for removing an audit file, we only do this thing under auditor thread */ + if (t_thrd.role != AUDITOR) { + rc = snprintf_truncated_s(t_thrd.audit.pgaudit_filepath, MAXPGPATH, "remove an audit file(number: %u)", + fnum); + securec_check_ss(rc, "\0", "\0"); + audit_report(AUDIT_INTERNAL_EVENT, AUDIT_OK, "file", t_thrd.audit.pgaudit_filepath); + } + /* stop till the current writting index */ + uint32 earliest_idx = g_instance.audit_cxt.audit_indextbl->latest_idx - g_instance.audit_cxt.thread_num; + if (index == earliest_idx) { break; - index = t_thrd.audit.audit_indextbl->begidx; + } + /* udpate audit index for next loop */ + index = g_instance.audit_cxt.audit_indextbl->begidx; + } + LWLockRelease(g_instance.audit_cxt.index_file_lock); + + if (truncated) { + pgaudit_update_indexfile(PG_BINARY_W, true); } } @@ -1574,16 +1550,23 @@ static void sigUsr1Handler(SIGNAL_ARGS) * warning from ignoring write()'s result, so do a little dance with casting * rc to void to shut up the compiler. */ -static void write_pipe_chunks(char* data, int len) +static void write_pipe_chunks(char* data, int len, AuditClassType type) { + static volatile uint32 pipe_count = 0; + int thread_num = g_instance.audit_cxt.thread_num; + + /* + * for audit process, postgres will send messages to pipes with round-robin + * for unified audit process, just first pipe will used + */ + int last_pipe_count = pg_atomic_fetch_add_u32(&pipe_count, 1); + int cur_pipe_idx = (type == STD_AUDIT_TYPE) ? (((last_pipe_count + thread_num) % thread_num) * 2 + 1) : 1; + + ereport(DEBUG1, (errmsg("write_pipe_chunks for pipe : %d", cur_pipe_idx))); + PipeProtoChunk p; errno_t errorno = EOK; -#ifndef WIN32 int rc; -#else - DWORD bytesWritten; - BOOL result = false; -#endif Assert(len > 0); @@ -1596,12 +1579,9 @@ static void write_pipe_chunks(char* data, int len) p.proto.len = PIPE_MAX_PAYLOAD; errorno = memcpy_s(p.proto.data, PIPE_MAX_PAYLOAD, data, PIPE_MAX_PAYLOAD); securec_check(errorno, "\0", "\0"); -#ifndef WIN32 - rc = write(sysauditPipe[1], &p, PIPE_HEADER_SIZE + PIPE_MAX_PAYLOAD); + rc = write(g_instance.audit_cxt.sys_audit_pipes[cur_pipe_idx], &p, PIPE_HEADER_SIZE + PIPE_MAX_PAYLOAD); (void)rc; -#else - result = WriteFile(sysauditPipe[1], &p, PIPE_HEADER_SIZE + PIPE_MAX_PAYLOAD, &bytesWritten, 0); -#endif + data += PIPE_MAX_PAYLOAD; len -= PIPE_MAX_PAYLOAD; } @@ -1611,12 +1591,12 @@ static void write_pipe_chunks(char* data, int len) p.proto.len = len; errorno = memcpy_s(p.proto.data, PIPE_MAX_PAYLOAD, data, len); securec_check(errorno, "\0", "\0"); -#ifndef WIN32 - rc = write(sysauditPipe[1], &p, PIPE_HEADER_SIZE + len); + + rc = write(g_instance.audit_cxt.sys_audit_pipes[cur_pipe_idx], &p, PIPE_HEADER_SIZE + len); + if (rc == -1) { + ereport(ERROR, (errmsg("write into pipe error"))); + } (void)rc; -#else - result = WriteFile(sysauditPipe[1], &p, PIPE_HEADER_SIZE + len, &bytesWritten, 0); -#endif } /* @@ -1647,6 +1627,24 @@ static pg_time_t current_timestamp() return milliseconds; } +static bool audit_status_check_ok() +{ +#ifdef ENABLE_MULTIPLE_NODES + /* check whether POSTMASTER is running in standby mode */ + if (!u_sess->attr.attr_security.Audit_enabled || (PGSharedMemoryAttached() && t_thrd.postmaster_cxt.HaShmData && + (STANDBY_MODE == t_thrd.postmaster_cxt.HaShmData->current_mode || + PENDING_MODE == t_thrd.postmaster_cxt.HaShmData->current_mode))) + return false; +#else + /* After the standby read function is added, the standby node needs to be audited. */ + if (!u_sess->attr.attr_security.Audit_enabled || (PGSharedMemoryAttached() && t_thrd.postmaster_cxt.HaShmData && + PENDING_MODE == t_thrd.postmaster_cxt.HaShmData->current_mode)) + return false; +#endif + + return true; +} + /* * check the valid for specific audit type */ @@ -1710,9 +1708,15 @@ static bool audit_type_validcheck(AuditType type) case AUDIT_DDL_FUNCTION: type_status = CHECK_AUDIT_DDL(DDL_FUNCTION); break; + case AUDIT_DDL_PACKAGE: + type_status = CHECK_AUDIT_DDL(DDL_PACKAGE); + break; case AUDIT_DDL_RESOURCEPOOL: type_status = CHECK_AUDIT_DDL(DDL_RESOURCEPOOL); break; + case AUDIT_DDL_GLOBALCONFIG: + type_status = CHECK_AUDIT_DDL(DDL_GLOBALCONFIG); + break; case AUDIT_DDL_WORKLOAD: type_status = CHECK_AUDIT_DDL(DDL_WORKLOAD); break; @@ -1743,6 +1747,15 @@ static bool audit_type_validcheck(AuditType type) case AUDIT_DDL_KEY: type_status = CHECK_AUDIT_DDL(DDL_KEY); break; + case AUDIT_DDL_MODEL: + type_status = CHECK_AUDIT_DDL(DDL_MODEL); + break; + case AUDIT_DDL_PUBLICATION_SUBSCRIPTION: + type_status = CHECK_AUDIT_DDL(DDL_PUBLICATION_SUBSCRIPTION); + break; + case AUDIT_DDL_FOREIGN_DATA_WRAPPER: + type_status = CHECK_AUDIT_DDL(DDL_FOREIGN_DATA_WRAPPER); + break; case AUDIT_DML_ACTION: type_status = (unsigned int)u_sess->attr.attr_security.Audit_DML; break; @@ -1804,7 +1817,7 @@ static bool audit_get_clientinfo(AuditType type, const char* object_name, AuditE *username = u_sess->proc_cxt.MyProcPort->user_name; } - /* + /* * append user id information, get user id from table as invalid in session * not safe when access table when run logout process as not in normal transaction */ @@ -1898,22 +1911,13 @@ static bool audit_get_clientinfo(AuditType type, const char* object_name, AuditE * the fileds are arraged as below sequence, Note it's not liable to modify them as to keep compatibility of version * header|userid|username|dbname|client_info|object_name|detail_info|nodename|threadid|localport|remoteport */ -void audit_report(AuditType type, AuditResult result, const char* object_name, const char* detail_info, AuditClassType ctype) +void audit_report(AuditType type, AuditResult result, const char *object_name, const char *detail_info, + AuditClassType ctype) { -#ifdef ENABLE_MULTIPLE_NODES - /* check whether POSTMASTER is running in standby mode */ - if (!u_sess->attr.attr_security.Audit_enabled || - (PGSharedMemoryAttached() && t_thrd.postmaster_cxt.HaShmData && - (STANDBY_MODE == t_thrd.postmaster_cxt.HaShmData->current_mode || - PENDING_MODE == t_thrd.postmaster_cxt.HaShmData->current_mode))) + /* check the process status to decide whether to report it */ + if (!audit_status_check_ok() || (detail_info == NULL)) { return; -#else - /* After the standby read function is added, the standby node needs to be audited. */ - if (!u_sess->attr.attr_security.Audit_enabled || - (PGSharedMemoryAttached() && t_thrd.postmaster_cxt.HaShmData && - PENDING_MODE == t_thrd.postmaster_cxt.HaShmData->current_mode)) - return; -#endif + } /* check the audit type to decide whether to report it */ if (!audit_type_validcheck(type)) { @@ -1935,6 +1939,15 @@ void audit_report(AuditType type, AuditResult result, const char* object_name, c char* localport = event_info.localport; char* remoteport = event_info.remoteport; + /* append xid info when audit_xid_info = 1 */ + char *detail_info_xid = NULL; + bool audit_xid_info = (u_sess->attr.attr_security.audit_xid_info == 1); + if (audit_xid_info) { + uint32 len = uint64_max_len + strlen("xid=, ") + strlen(detail_info) + 1; + detail_info_xid = (char *)palloc0(len); + audit_append_xid_info(detail_info, detail_info_xid, len); + } + /* append data header */ adata.header.signature[0] = 'A'; adata.header.signature[1] = 'U'; @@ -1954,7 +1967,7 @@ void audit_report(AuditType type, AuditResult result, const char* object_name, c appendStringField(&buf, dbname); appendStringField(&buf, (client_info[0] != '\0') ? client_info : NULL); appendStringField(&buf, object_name); - appendStringField(&buf, detail_info); + appendStringField(&buf, (!audit_xid_info) ? detail_info : detail_info_xid); appendStringField(&buf, g_instance.attr.attr_common.PGXCNodeName); appendStringField(&buf, (threadid[0] != '\0') ? threadid : NULL); appendStringField(&buf, (localport[0] != '\0') ? localport : NULL); @@ -1966,15 +1979,18 @@ void audit_report(AuditType type, AuditResult result, const char* object_name, c * Otherwise, just do a vanilla write to stderr. */ if (WRITE_TO_AUDITPIPE) { - write_pipe_chunks(buf.data, buf.len); + write_pipe_chunks(buf.data, buf.len, ctype); } else if (WRITE_TO_STDAUDITFILE(ctype)) { pgaudit_write_file(buf.data, buf.len); } else if (WRITE_TO_UNIAUDITFILE(ctype)) { pgaudit_write_policy_audit_file(buf.data, buf.len); } else if (detail_info != NULL) { - ereport(LOG, (errmsg("discard audit data: %s", detail_info))); + ereport(LOG, (errmsg("discard audit data: %s", (!audit_xid_info) ? detail_info : detail_info_xid))); } + if (detail_info_xid != NULL) { + pfree(detail_info_xid); + } pfree(buf.data); } @@ -2004,50 +2020,61 @@ static void pgaudit_read_indexfile(const char* audit_directory) size_t nread = 0; AuditIndexTable indextbl; - if (t_thrd.audit.audit_indextbl != NULL) { - pfree(t_thrd.audit.audit_indextbl); - t_thrd.audit.audit_indextbl = NULL; - } - int rc = snprintf_s(tblfile_path, MAXPGPATH, MAXPGPATH - 1, "%s/%s", audit_directory, audit_indextbl_file); securec_check_intval(rc,,); - - /* Check whether the map file is exist. */ - if (stat(tblfile_path, &statbuf) == 0) { - /* Open the audit index table file to write out the current values. */ - fp = AllocateFile(tblfile_path, PG_BINARY_R); - if (NULL == fp) { - ereport(LOG, - (errcode_for_file_access(), errmsg("could not open audit index table file \"%s\": %m", tblfile_path))); - return; - } - /* read the audit index table header first */ - nread = fread(&indextbl, indextbl_header_size, 1, fp); - if (1 == nread) { - errno_t errorno = EOK; - /* maxnum should be restricted with guc parameter audit_file_remain_threshold */ - if (indextbl.maxnum == 0 || indextbl.maxnum > (1024 * 1024 + 1)) { - ereport(ERROR, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("fail to read indextbl maxnum"))); - } - - /* read the whole audit index table */ - t_thrd.audit.audit_indextbl = - (AuditIndexTable*)palloc0(indextbl.maxnum * sizeof(AuditIndexItem) + indextbl_header_size); - errorno = memcpy_s(t_thrd.audit.audit_indextbl, - indextbl.maxnum * sizeof(AuditIndexItem) + indextbl_header_size, - &indextbl, - indextbl_header_size); - securec_check(errorno, "\0", "\0"); - - nread = fread(t_thrd.audit.audit_indextbl->data, sizeof(AuditIndexItem), indextbl.maxnum, fp); - if (nread != indextbl.maxnum) { - ereport(WARNING, - (errcode_for_file_access(), errmsg("could not read audit index file \"%s\": %m", tblfile_path))); - } - } - - pgaudit_close_file(fp, tblfile_path); + /* upgrade processing and sync old index table for old version */ + if (pgaudit_find_indexfile()) { + pgaudit_rewrite_indexfile(); } + /* + * Check whether the map file is exist + * there will be no index audit table file here when audit process first init + * return directly and keep the index audit table values NULL, pgaudit_update_indexfile will flush new one + */ + if (stat(tblfile_path, &statbuf) != 0) { + return; + } + + /* Open the audit index table file to write out the current values. */ + fp = AllocateFile(tblfile_path, PG_BINARY_R); + if (NULL == fp) { + ereport(LOG, + (errcode_for_file_access(), errmsg("could not open audit index table file \"%s\": %m", tblfile_path))); + return; + } + /* read the audit index table header first */ + nread = fread(&indextbl, indextbl_header_size, 1, fp); + if (1 == nread) { + errno_t errorno = EOK; + /* maxnum should be restricted with guc parameter audit_file_remain_threshold */ + if (indextbl.maxnum == 0 || indextbl.maxnum > (1024 * 1024 + 1)) { + ereport(ERROR, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("fail to read indextbl maxnum"))); + } + + LWLockAcquire(g_instance.audit_cxt.index_file_lock, LW_EXCLUSIVE); + + /* free the current audit index table */ + pfree_ext(g_instance.audit_cxt.audit_indextbl); + + /* read the whole audit index table */ + g_instance.audit_cxt.audit_indextbl = (AuditIndexTable *)MemoryContextAllocZero( + g_instance.audit_cxt.global_audit_context, + (indextbl.maxnum * sizeof(AuditIndexItem) + indextbl_header_size)); + errorno = + memcpy_s(g_instance.audit_cxt.audit_indextbl, + indextbl.maxnum * sizeof(AuditIndexItem) + indextbl_header_size, &indextbl, indextbl_header_size); + securec_check(errorno, "\0", "\0"); + + nread = fread(g_instance.audit_cxt.audit_indextbl->data, sizeof(AuditIndexItem), indextbl.maxnum, fp); + if (nread != indextbl.maxnum) { + ereport(WARNING, + (errcode_for_file_access(), errmsg("could not read audit index file \"%s\": %m", tblfile_path))); + } + + LWLockRelease(g_instance.audit_cxt.index_file_lock); + } + + pgaudit_close_file(fp, tblfile_path); } /* @@ -2076,112 +2103,395 @@ static void pgaudit_update_indexfile(const char* mode, bool allow_errors) (errcode_for_file_access(), errmsg("could not open audit index table file \"%s\": %m", tblfile_path))); return; } - - if (t_thrd.audit.audit_indextbl != NULL) { - count = t_thrd.audit.audit_indextbl->maxnum * sizeof(AuditIndexItem) + indextbl_header_size; - nwritten = fwrite(t_thrd.audit.audit_indextbl, 1, count, fp); + /* check upgrade version to do audit upgrade processing */ + pgaudit_indexfile_upgrade(); + LWLockAcquire(g_instance.audit_cxt.index_file_lock, LW_EXCLUSIVE); + if (g_instance.audit_cxt.audit_indextbl != NULL) { + count = g_instance.audit_cxt.audit_indextbl->maxnum * sizeof(AuditIndexItem) + indextbl_header_size; + nwritten = fwrite(g_instance.audit_cxt.audit_indextbl, 1, count, fp); if (nwritten != count) ereport(allow_errors ? LOG : FATAL, (errcode_for_file_access(), errmsg("could not write to audit index file: %m"))); } + LWLockRelease(g_instance.audit_cxt.index_file_lock); + ereport(DEBUG1, (errmsg("pgaudit_update_indexfile index size: %ld", (long)ftell(fp)))); pgaudit_close_file(fp, tblfile_path); } -/* ---------- - * pgaudit_indextbl_init() - - * - * Initialize audit index table. - * ---------- - */ -static void pgaudit_indextbl_init(void) +static uint32 pgaudit_get_max_fnum(uint32 old_thread_num) { - uint32 index = 0; - uint32 old_maxnum = 0; - AuditIndexItem* item = NULL; + uint32 currrent_max_fnum = 0; + for (uint32 i = 0; i < old_thread_num; ++i) { + AuditIndexItem *cur_item = + g_instance.audit_cxt.audit_indextbl->data + g_instance.audit_cxt.audit_indextbl->curidx[i]; + if (currrent_max_fnum < cur_item->filenum) { + currrent_max_fnum = cur_item->filenum; + } + } + return currrent_max_fnum; +} - pgaudit_read_indexfile(g_instance.attr.attr_security.Audit_directory); +/* + * Brief : pgaudit_find_indexfile + * Description : find pgaudit old index file function + */ +static bool pgaudit_find_indexfile(void) +{ + struct stat statbuf; + char tblfile_path[MAXPGPATH] = {0}; + int rc = snprintf_s(tblfile_path, + MAXPGPATH, + MAXPGPATH - 1, + "%s/%s", + g_instance.attr.attr_security.Audit_directory, + audit_indextbl_old_file); + securec_check_ss(rc, "\0", "\0"); + if (stat(tblfile_path, &statbuf) == 0) { + return true; + } + return false; +} - if (t_thrd.audit.audit_indextbl == NULL) { - t_thrd.audit.audit_indextbl = (AuditIndexTable*)palloc0( - (u_sess->attr.attr_security.Audit_RemainThreshold + 1) * sizeof(AuditIndexItem) + indextbl_header_size); - t_thrd.audit.audit_indextbl->maxnum = u_sess->attr.attr_security.Audit_RemainThreshold + 1; - auditfile_init(); +/* + * Brief : pgaudit_indexfile_upgrade + * Description : index table file upgrade function + */ +static void pgaudit_indexfile_upgrade(void) +{ + struct stat statbuf; + char tblfile_path[MAXPGPATH] = {0}; + if (t_thrd.proc == NULL) { return; } + int rc = snprintf_s(tblfile_path, + MAXPGPATH, + MAXPGPATH - 1, + "%s/%s", + g_instance.attr.attr_security.Audit_directory, + audit_indextbl_old_file); + securec_check_intval(rc,,); + ereport(DEBUG1, (errmsg("audit upgrade processing index file upgrade enter"))); - auditfile_init(); + if (t_thrd.proc->workingVersionNum >= AUDIT_INDEX_TABLE_VERSION_NUM) { + /* version is equal to AUDIT_INDEX_TABLE_VERSION_NUM upgrade success */ + if (stat(tblfile_path, &statbuf) == 0) { + /* find old index table and delete it */ + LWLockAcquire(g_instance.audit_cxt.index_file_lock, LW_EXCLUSIVE); + if (unlink(tblfile_path) < 0) { + ereport(WARNING, (errmsg("could not remove audit old index table file \"%s\": %m", tblfile_path))); + } + LWLockRelease(g_instance.audit_cxt.index_file_lock); + } + } else { + /* upgrade processing and sync old index table for old version */ + if (stat(tblfile_path, &statbuf) != 0) { + /* old index table file is not exited */ + ereport(WARNING, (errmsg("could not find audit old index table file \"%s\": %m", tblfile_path))); + } + /* sys index file from new to old */ + pgaudit_indexfile_sync(PG_BINARY_W, true); + } +} - /* caculate the total space of the audit data */ - t_thrd.audit.pgaudit_totalspace = 0; +/* + * Brief : pgaudit_indexfile_sync + * Description : sync old and new index table file function + */ +static void pgaudit_indexfile_sync(const char* mode, bool allow_errors) +{ + FILE* fp = NULL; + struct stat statbuf; + char tblfile_path[MAXPGPATH] = {0}; + size_t nwritten = 0; + size_t count = 0; - index = t_thrd.audit.audit_indextbl->begidx; + int rc = snprintf_s(tblfile_path, MAXPGPATH, MAXPGPATH - 1, "%s/%s", g_instance.attr.attr_security.Audit_directory, + audit_indextbl_old_file); + securec_check_intval(rc, ,); + ereport(DEBUG1, (errmsg("audit upgrade processing index file sync enter"))); + if (stat(tblfile_path, &statbuf) == 0) { + /* old index table file is exist and sync file */ + fp = AllocateFile(tblfile_path, mode); + if (NULL == fp) { + ereport(allow_errors ? LOG : FATAL, (errcode_for_file_access(), + errmsg("could not open audit index table file \"%s\": %m", tblfile_path))); + return; + } + /* copy audit indextbl from new to old in memory */ + if (g_instance.audit_cxt.audit_indextbl != NULL) { + ereport(LOG, (errmsg("audit upgrade processing audit_indextbl != NULL"))); + g_instance.audit_cxt.audit_indextbl_old->maxnum = g_instance.audit_cxt.audit_indextbl->maxnum; + g_instance.audit_cxt.audit_indextbl_old->begidx = g_instance.audit_cxt.audit_indextbl->begidx; + g_instance.audit_cxt.audit_indextbl_old->curidx = g_instance.audit_cxt.audit_indextbl->curidx[0]; + g_instance.audit_cxt.audit_indextbl_old->count = g_instance.audit_cxt.audit_indextbl->count; + g_instance.audit_cxt.audit_indextbl_old->last_audit_time = + g_instance.audit_cxt.audit_indextbl->last_audit_time; + errno_t errorno = EOK; + errorno = memcpy_s(g_instance.audit_cxt.audit_indextbl_old->data, + g_instance.audit_cxt.audit_indextbl->maxnum * sizeof(AuditIndexItem), + g_instance.audit_cxt.audit_indextbl->data, + g_instance.audit_cxt.audit_indextbl->maxnum * sizeof(AuditIndexItem)); + securec_check(errorno, "\0", "\0"); + } + /* write down the current values from audit_indextbl to audit_indextbl_old */ + LWLockAcquire(g_instance.audit_cxt.index_file_lock, LW_EXCLUSIVE); + if (g_instance.audit_cxt.audit_indextbl_old != NULL) { + count = g_instance.audit_cxt.audit_indextbl_old->maxnum * sizeof(AuditIndexItem) + old_indextbl_header_size; + nwritten = fwrite(g_instance.audit_cxt.audit_indextbl_old, 1, count, fp); + if (nwritten != count) + ereport(allow_errors ? LOG : FATAL, + (errcode_for_file_access(), errmsg("could not write to audit old index file: %m"))); + } + LWLockRelease(g_instance.audit_cxt.index_file_lock); + ereport(LOG, (errmsg("pgaudit_indexfile_sync index size: %ld", (long)ftell(fp)))); + pgaudit_close_file(fp, tblfile_path); + } +} + +/* + * Brief : pgaudit_rewrite_indexfile + * Description : read old index file and rewrite to new index file function + */ +static void pgaudit_rewrite_indexfile(void) +{ + FILE* fp = NULL; + char tblfile_path[MAXPGPATH] = {0}; + size_t nread = 0; + AuditIndexTableOld old_index_tbl; + + int rc = snprintf_s(tblfile_path, + MAXPGPATH, + MAXPGPATH - 1, + "%s/%s", + g_instance.attr.attr_security.Audit_directory, + audit_indextbl_old_file); + securec_check_intval(rc,,); + /* open old index file and read audit index table */ + fp = AllocateFile(tblfile_path, PG_BINARY_R); + if (NULL == fp) { + ereport(LOG, + (errcode_for_file_access(), errmsg("could not open audit old index table file \"%s\": %m", tblfile_path))); + return; + } + ereport(LOG, (errmsg("audit upgrade processing rewrite enter"))); + /* read the audit old index table header first */ + nread = fread(&old_index_tbl, old_indextbl_header_size, 1, fp); + if (1 == nread) { + errno_t errorno = EOK; + /* maxnum should be restricted with guc parameter audit_file_remain_threshold */ + if (old_index_tbl.maxnum == 0 || old_index_tbl.maxnum > (1024 * 1024 + 1)) { + ereport(ERROR, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("fail to read indextbl maxnum"))); + } + LWLockAcquire(g_instance.audit_cxt.index_file_lock, LW_EXCLUSIVE); + /* free the current audit index table */ + pfree_ext(g_instance.audit_cxt.audit_indextbl); + pfree_ext(g_instance.audit_cxt.audit_indextbl_old); + /* read the whole audit index table */ + g_instance.audit_cxt.audit_indextbl_old = (AuditIndexTableOld *)MemoryContextAllocZero( + g_instance.audit_cxt.global_audit_context, + (old_index_tbl.maxnum * sizeof(AuditIndexItem) + old_indextbl_header_size)); + errorno = memcpy_s(g_instance.audit_cxt.audit_indextbl_old, + old_index_tbl.maxnum * sizeof(AuditIndexItem) + old_indextbl_header_size, + &old_index_tbl, old_indextbl_header_size); + securec_check(errorno, "\0", "\0"); + /* rewrite old index table to new index table */ + g_instance.audit_cxt.audit_indextbl = (AuditIndexTable *)MemoryContextAllocZero( + g_instance.audit_cxt.global_audit_context, + (old_index_tbl.maxnum * sizeof(AuditIndexItem) + indextbl_header_size)); + g_instance.audit_cxt.audit_indextbl->maxnum = old_index_tbl.maxnum; + g_instance.audit_cxt.audit_indextbl->count = old_index_tbl.count; + g_instance.audit_cxt.audit_indextbl->begidx = old_index_tbl.begidx; + g_instance.audit_cxt.audit_indextbl->last_audit_time = old_index_tbl.last_audit_time; + g_instance.audit_cxt.audit_indextbl->thread_num = 1; + g_instance.audit_cxt.audit_indextbl->curidx[0] = old_index_tbl.curidx; + g_instance.audit_cxt.audit_indextbl->latest_idx = old_index_tbl.curidx + 1; + /* read index item data */ + nread = fread(g_instance.audit_cxt.audit_indextbl->data, sizeof(AuditIndexItem), old_index_tbl.maxnum, fp); + if (nread != old_index_tbl.maxnum) { + ereport(WARNING, + (errcode_for_file_access(), errmsg("could not read audit index file \"%s\": %m", tblfile_path))); + } + + LWLockRelease(g_instance.audit_cxt.index_file_lock); + } + pgaudit_close_file(fp, tblfile_path); +} + +/* + * Brief : init index table in memory + * Description : + * 1. init the index file table based on thread num if index table file is not exit + * 2. calculate pgaudit_totalspace based on cuurent audit files + * it's safe no lock here As only PM or audit master thread can invoke init func + */ +static void pgaudit_indextbl_init_new(void) +{ + /* load from the index file or create new one */ + pgaudit_read_indexfile(g_instance.attr.attr_security.Audit_directory); + + /* init new one when but not from index file when database init first time */ + if (g_instance.audit_cxt.audit_indextbl == NULL) { + ereport(LOG, (errmsg("pgaudit_indextbl_init_new first init"))); + g_instance.audit_cxt.audit_indextbl = + (AuditIndexTable *)MemoryContextAllocZero(g_instance.audit_cxt.global_audit_context, + (u_sess->attr.attr_security.Audit_RemainThreshold + 1) * sizeof(AuditIndexItem) + indextbl_header_size); + g_instance.audit_cxt.audit_indextbl->maxnum = u_sess->attr.attr_security.Audit_RemainThreshold + 1; + g_instance.audit_cxt.audit_indextbl->count = 0; /* audit files count will be updated by auditfile_open */ + g_instance.audit_cxt.audit_indextbl->begidx = 0; + g_instance.audit_cxt.audit_indextbl->thread_num = g_instance.audit_cxt.thread_num; + + for (int i = 0; i < g_instance.audit_cxt.thread_num; ++i) { + g_instance.audit_cxt.audit_indextbl->curidx[i] = i; + AuditIndexItem *item = + g_instance.audit_cxt.audit_indextbl->data + g_instance.audit_cxt.audit_indextbl->curidx[i]; + item->filenum = i; + } + + g_instance.audit_cxt.audit_indextbl->latest_idx = g_instance.audit_cxt.thread_num; + } + + uint32 index = 0; + AuditIndexItem *item = NULL; + + /* + * thread num changed routine + * for thread num enlarge: update latest_idx、curidxes、items and thread_num, if new audit file did not exist, + * audit thread will reinit new one + * for thread num shrink: dismiss the old ones and update curidxes、thread_num but not udpate latest_idx + */ + if (g_instance.audit_cxt.audit_indextbl->thread_num != (uint32)g_instance.audit_cxt.thread_num) { + uint32 old_thread_num = g_instance.audit_cxt.audit_indextbl->thread_num; + uint32 new_thread_num = (uint32)g_instance.audit_cxt.thread_num; + uint32 latest_idx = g_instance.audit_cxt.audit_indextbl->latest_idx; + + if (AUDIT_THREADNUM_ENLARGE) { + /* + * before arrage fnums for new audit threads, get the last max fnum + * then increase the fnum based on the max fnum + */ + uint32 old_max_fnum = pgaudit_get_max_fnum(old_thread_num); + + /* update curidxes and corresponding items */ + uint32 step = new_thread_num - old_thread_num; + for (uint32 i = 0; i < step; ++i) { + uint32 new_idx = old_thread_num + i; + g_instance.audit_cxt.audit_indextbl->curidx[new_idx] = latest_idx + i; + AuditIndexItem *new_item = + g_instance.audit_cxt.audit_indextbl->data + g_instance.audit_cxt.audit_indextbl->curidx[new_idx]; + new_item->filenum = (old_max_fnum + i + 1); + } + g_instance.audit_cxt.audit_indextbl->latest_idx += step; + } else { + uint32 step = old_thread_num - new_thread_num; + uint32 *curidxes = g_instance.audit_cxt.audit_indextbl->curidx; + errno_t errorno = + memmove_s(curidxes, MAX_AUDIT_NUM * sizeof(uint32), curidxes + step, new_thread_num * sizeof(uint32)); + securec_check(errorno, "\0", "\0"); + } + + g_instance.audit_cxt.audit_indextbl->thread_num = (uint32)g_instance.audit_cxt.thread_num; + } + + /* audit threads are writing files in range [earliest_idx, latest_idx) */ + uint32 earliest_idx = g_instance.audit_cxt.audit_indextbl->latest_idx - g_instance.audit_cxt.thread_num; + + /* calculate total space of all audit files */ + g_instance.audit_cxt.pgaudit_totalspace = 0; + index = g_instance.audit_cxt.audit_indextbl->begidx; do { - item = t_thrd.audit.audit_indextbl->data + index; - - t_thrd.audit.pgaudit_totalspace += item->filesize; - - if (index == t_thrd.audit.audit_indextbl->curidx) + item = g_instance.audit_cxt.audit_indextbl->data + index; + g_instance.audit_cxt.pgaudit_totalspace += item->filesize; + /* stop till the current writting index */ + if (index == earliest_idx) { break; - - index = (index + 1) % t_thrd.audit.audit_indextbl->maxnum; + } + index = (index + 1) % g_instance.audit_cxt.audit_indextbl->maxnum; } while (true); - + /* used for give space warning in logs when audit log rotation policy is besed on time */ t_thrd.audit.space_beyond_size = - (t_thrd.audit.pgaudit_totalspace / SPACE_INTERVAL_SIZE) * SPACE_INTERVAL_SIZE + SPACE_INTERVAL_SIZE; + (g_instance.audit_cxt.pgaudit_totalspace / SPACE_INTERVAL_SIZE) * SPACE_INTERVAL_SIZE + SPACE_INTERVAL_SIZE; - old_maxnum = t_thrd.audit.audit_indextbl->maxnum; + ereport(LOG, (errmsg("pgaudit_indextbl_init_new success"))); + return; +} + +static void pgaudit_udpate_maxnum() +{ + errno_t errorno = EOK; + int thread_num = g_instance.attr.attr_security.audit_thread_num; + + int new_indextbl_data_lenth = (u_sess->attr.attr_security.Audit_RemainThreshold + 1) * sizeof(AuditIndexItem); + AuditIndexTable *new_indextbl = (AuditIndexTable *)MemoryContextAllocZero( + g_instance.audit_cxt.global_audit_context, new_indextbl_data_lenth + indextbl_header_size); + + /* curidx and latest_idx should be updated later from old index table file */ + new_indextbl->begidx = 0; + new_indextbl->maxnum = u_sess->attr.attr_security.Audit_RemainThreshold + 1; + + if (g_instance.audit_cxt.audit_indextbl->count > 0) { + AuditIndexItem *item = NULL; + uint32 latest_idx = g_instance.audit_cxt.audit_indextbl->latest_idx; + uint32 index = g_instance.audit_cxt.audit_indextbl->begidx; + uint32 pos = new_indextbl->begidx; + do { + item = g_instance.audit_cxt.audit_indextbl->data + index; + errorno = memcpy_s(new_indextbl->data + pos, (new_indextbl_data_lenth - pos), item, sizeof(AuditIndexItem)); + securec_check(errorno, "\0", "\0"); + new_indextbl->count++; + + /* + * finished copy old index table file from range [begin, latest_idx) + * then update new index table file curidxes + */ + if (index == (latest_idx - 1)) { + for (int i = 0; i < thread_num; ++i) { + new_indextbl->curidx[i] = pos - thread_num + i; + } + new_indextbl->latest_idx = pos; + break; + } + + pos++; + index = (index + 1) % g_instance.audit_cxt.audit_indextbl->maxnum; + } while (true); + } + pfree(g_instance.audit_cxt.audit_indextbl); + g_instance.audit_cxt.audit_indextbl = new_indextbl; +} + +/* + * Brief : reset the index file based on new parameter + * Description : + * 1. clean up the current auditfiles + * 2. alloc new index table using new parameter & swap old one + * 3. flush the index table from memory + */ +static void pgaudit_reset_indexfile() +{ /* If file remain threshold parameter changed more little, than need to cleanup the audit data first */ + uint32 old_maxnum = g_instance.audit_cxt.audit_indextbl->maxnum; if (old_maxnum > (uint32)u_sess->attr.attr_security.Audit_RemainThreshold + 1) { - int rc = snprintf_s(t_thrd.audit.pgaudit_filepath, - MAXPGPATH, - MAXPGPATH - 1, - "%s/%s", - g_instance.attr.attr_security.Audit_directory, - audit_indextbl_file); + int rc = snprintf_s(t_thrd.audit.pgaudit_filepath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", + g_instance.attr.attr_security.Audit_directory, audit_indextbl_file); securec_check_intval(rc,,); if (unlink(t_thrd.audit.pgaudit_filepath) < 0) ereport(WARNING, (errmsg("could not remove audit index table file: %m"))); pgaudit_cleanup(); + ereport(LOG, (errmsg("pgaudit pgaudit_reset_indexfile clean up audit files trigger by parameter changing"))); } /* If file remain threshold parameter changed, than copy the old audit index table to the new table */ if (old_maxnum != (uint32)u_sess->attr.attr_security.Audit_RemainThreshold + 1) { - AuditIndexTable* new_indextbl = NULL; - new_indextbl = (AuditIndexTable*)palloc0( - (u_sess->attr.attr_security.Audit_RemainThreshold + 1) * sizeof(AuditIndexItem) + indextbl_header_size); - new_indextbl->maxnum = u_sess->attr.attr_security.Audit_RemainThreshold + 1; - - if (t_thrd.audit.audit_indextbl->count > 0) { - uint32 pos = 0; - errno_t errorno = EOK; - index = t_thrd.audit.audit_indextbl->begidx; - pos = new_indextbl->begidx; - do { - item = t_thrd.audit.audit_indextbl->data + index; - errorno = memcpy_s(new_indextbl->data + pos, - (u_sess->attr.attr_security.Audit_RemainThreshold + 1) * sizeof(AuditIndexItem) - pos, - item, - sizeof(AuditIndexItem)); - securec_check(errorno, "\0", "\0"); - new_indextbl->count++; - - if (index == t_thrd.audit.audit_indextbl->curidx) - break; - - pos++; - index = (index + 1) % t_thrd.audit.audit_indextbl->maxnum; - new_indextbl->curidx = (new_indextbl->curidx + 1) % new_indextbl->maxnum; - } while (true); - } - pfree(t_thrd.audit.audit_indextbl); - t_thrd.audit.audit_indextbl = new_indextbl; - - pgaudit_update_indexfile(PG_BINARY_W, true); + LWLockAcquire(g_instance.audit_cxt.index_file_lock, LW_EXCLUSIVE); + pgaudit_udpate_maxnum(); + LWLockRelease(g_instance.audit_cxt.index_file_lock); } + + pgaudit_update_indexfile(PG_BINARY_W, true); } /* @@ -2225,22 +2535,6 @@ static const char* pgaudit_string_field(AuditData* adata, int num) return field; } -struct AuditElasticEvent { - const char* aDataType; - const char* aDataResult; - const char* auditUserId; - const char* auditUserName; - const char* auditDatabaseName; - const char* clientConnInfo; - const char* objectName; - const char* detailInfo; - const char* nodeNameInfo; - const char* threadIdInfo; - const char* localPortInfo; - const char* remotePortInfo; - long long eventTime; -}; - static char* serialize_event_to_json(AuditData *adata, long long eventTime) { AuditElasticEvent event; @@ -2461,15 +2755,15 @@ static void deserialization_to_tuple(Datum (&values)[PGAUDIT_QUERY_COLS], Assert(i == PGAUDIT_QUERY_COLS); } + static void pgaudit_query_file(Tuplestorestate *state, TupleDesc tdesc, uint32 fnum, TimestampTz begtime, - TimestampTz endtime, const char *audit_directory) + TimestampTz endtime, const char *audit_directory) { FILE* fp = NULL; size_t nread = 0; TimestampTz datetime; AuditMsgHdr header; AuditData* adata = NULL; - int fd = -1; if (state == NULL || tdesc == NULL) return; @@ -2489,34 +2783,20 @@ static void pgaudit_query_file(Tuplestorestate *state, TupleDesc tdesc, uint32 f Datum values[PGAUDIT_QUERY_COLS] = {0}; bool nulls[PGAUDIT_QUERY_COLS] = {0}; errno_t errorno = EOK; - - /* read the audit message header first */ - if (!fread(&header, sizeof(AuditMsgHdr), 1, fp)) { + /* + * two scenarios tell that the audit file corrupt + * 1. fail to parse the header length + * 2. header encoding is not valid + */ + if (fgetc(fp) == EOF) { break; } - if (header.signature[0] != 'A' || - header.signature[1] != 'U' || - header.version != 0 || - !(header.fields == (PGAUDIT_QUERY_COLS - 1) || - header.fields == PGAUDIT_QUERY_COLS) || - (header.size <= sizeof(AuditMsgHdr))) { - /* To compatible with the old audit files, especially for the database upgraded from old - * versions, we allow the num of fields equal to PGAUDIT_QUERY_COLS or PGAUDIT_QUERY_COLS -1. - */ + (void)fseek(fp, -1, SEEK_CUR); + size_t header_available = fread(&header, sizeof(AuditMsgHdr), 1, fp); + if (header_available != 1 || !pgaudit_valid_header(&header)) { ereport(LOG, (errmsg("invalid data in audit file \"%s\"", t_thrd.audit.pgaudit_filepath))); - - // truncate the current file - pgaudit_close_file(fp, t_thrd.audit.pgaudit_filepath); - fp = NULL; - fd = open(t_thrd.audit.pgaudit_filepath, O_RDWR | O_TRUNC, pgaudit_filemode); - if (fd < 0) { - ereport(LOG, (errcode_for_file_access(), errmsg("could not open audit file \"%s\": %m", - t_thrd.audit.pgaudit_filepath))); - } else { - close(fd); - fd = -1; - } - + /* label the currupt file num, then it may be reinit in audit thread but not here. */ + pgaudit_mark_corrupt_info(fnum); break; } @@ -2526,9 +2806,11 @@ static void pgaudit_query_file(Tuplestorestate *state, TupleDesc tdesc, uint32 f securec_check(errorno, "\0", "\0"); nread = fread((char*)adata + sizeof(AuditMsgHdr), header.size - sizeof(AuditMsgHdr), 1, fp); if (nread != 1) { - ereport(WARNING, + ereport(LOG, (errcode_for_file_access(), errmsg("could not read audit file \"%s\": %m", t_thrd.audit.pgaudit_filepath))); + /* label the currupt file num, then it may be reinit in audit thread but not here. */ + pgaudit_mark_corrupt_info(fnum); pfree(adata); break; } @@ -2618,11 +2900,12 @@ static bool pgaudit_check_system(TimestampTz begtime, TimestampTz endtime, uint3 TimestampTz curr_filetime = 0; TimestampTz next_filetime = 0; AuditIndexItem* item = t_thrd.audit.audit_indextbl->data + index; + uint32 earliest_idx = t_thrd.audit.audit_indextbl->latest_idx - g_instance.audit_cxt.thread_num; if (item->ctime > 0) { curr_filetime = time_t_to_timestamptz(item->ctime); /* check whether the item is the last item */ - if (index == t_thrd.audit.audit_indextbl->curidx) { + if ((index >= earliest_idx && index < t_thrd.audit.audit_indextbl->latest_idx)) { if (curr_filetime <= begtime || curr_filetime <= endtime) { satisfied = true; } @@ -2650,6 +2933,39 @@ static bool pgaudit_check_system(TimestampTz begtime, TimestampTz endtime, uint3 return satisfied; } +/* + * Brief : whether the invoke is allowed for query audit. + * Description : + */ +static void pgaudit_query_valid_check(const ReturnSetInfo *rsinfo, FunctionCallInfoData *fcinfo, TupleDesc &tupdesc) +{ + Oid roleid = InvalidOid; + /* Check some permissions first */ + roleid = GetUserId(); + if (!has_auditadmin_privilege(roleid)) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to query audit"))); + } + + /* check to see if caller supports us returning a tuplestore */ + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + } + if (!((unsigned int)rsinfo->allowedModes & SFRM_Materialize)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + } + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) { + ereport(ERROR, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("return type must be a row type"))); + } + + if (tupdesc->natts != PGAUDIT_QUERY_COLS) { + ereport(ERROR, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("attribute count of the return row type not matched"))); + } +} + /* * Brief : query audit information between begin time and end time. * Description : @@ -2663,48 +2979,34 @@ Datum pg_query_audit(PG_FUNCTION_ARGS) MemoryContext oldcontext = NULL; TimestampTz begtime = PG_GETARG_TIMESTAMPTZ(0); TimestampTz endtime = PG_GETARG_TIMESTAMPTZ(1); - Oid roleid = InvalidOid; char* audit_dir = NULL; - /* Check some permissions first */ - roleid = GetUserId(); - if (!has_auditadmin_privilege(roleid)) { - ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to query audit"))); - } + pgaudit_query_valid_check(rsinfo, fcinfo, tupdesc); + /* + * When g_instance.audit_cxt.audit_indextbl is not NULL, + * but its origin memory context is NULL, free it will generate core + */ if (PG_NARGS() == PG_QUERY_AUDIT_ARGS_MAX) { audit_dir = text_to_cstring(PG_GETARG_TEXT_PP(PG_QUERY_AUDIT_ARGS_MAX - 1)); } - - /* check to see if caller supports us returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - } - if (!((unsigned int)rsinfo->allowedModes & SFRM_Materialize)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("materialize mode required, but it is not allowed in this context"))); - } - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) { - ereport(ERROR, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("return type must be a row type"))); - } - - if (tupdesc->natts != PGAUDIT_QUERY_COLS) { - ereport(ERROR, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("attribute count of the return row type not matched"))); - } + audit_dir = (audit_dir == NULL) ? g_instance.attr.attr_security.Audit_directory : audit_dir; /* - * When t_thrd.audit.audit_indextbl is not NULL, - * but its origin memory context is NULL, free it will generate core + * load the index audit table from global index audit table instance + * then use the local thread one when iterate all audit files */ + LWLockAcquire(g_instance.audit_cxt.index_file_lock, LW_SHARED); t_thrd.audit.audit_indextbl = NULL; - audit_dir = (audit_dir == NULL) ? g_instance.attr.attr_security.Audit_directory : audit_dir; - pgaudit_read_indexfile(audit_dir); + int indextbl_len = + (u_sess->attr.attr_security.Audit_RemainThreshold + 1) * sizeof(AuditIndexItem) + indextbl_header_size; + t_thrd.audit.audit_indextbl = (AuditIndexTable *)palloc0(indextbl_len); + error_t errorno = + memcpy_s(t_thrd.audit.audit_indextbl, indextbl_len, g_instance.audit_cxt.audit_indextbl, indextbl_len); + securec_check(errorno, "\0", "\0"); + LWLockRelease(g_instance.audit_cxt.index_file_lock); + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); @@ -2715,6 +3017,8 @@ Datum pg_query_audit(PG_FUNCTION_ARGS) MemoryContextSwitchTo(oldcontext); + ereport(DEBUG1, + (errmsg("pg_query_audit count: %d indextbl_len: %d", t_thrd.audit.audit_indextbl->count, indextbl_len))); if (begtime < endtime && t_thrd.audit.audit_indextbl != NULL && t_thrd.audit.audit_indextbl->count > 0) { bool satisfied = false; uint32 index = 0; @@ -2732,8 +3036,9 @@ Datum pg_query_audit(PG_FUNCTION_ARGS) pgaudit_query_file(tupstore, tupdesc, fnum, begtime, endtime, audit_dir); satisfied = false; } + ereport(LOG, (errmsg("pg_query_audit current fnum: %d", fnum))); - if (index == t_thrd.audit.audit_indextbl->curidx) { + if (index == (t_thrd.audit.audit_indextbl->latest_idx - 1)) { break; } @@ -2741,13 +3046,9 @@ Datum pg_query_audit(PG_FUNCTION_ARGS) } while (true); } - if (t_thrd.audit.audit_indextbl != NULL) { - pfree(t_thrd.audit.audit_indextbl); - t_thrd.audit.audit_indextbl = NULL; - } /* clean up and return the tuplestore */ + pfree_ext(t_thrd.audit.audit_indextbl); tuplestore_donestoring(tupstore); - return (Datum)0; } @@ -2768,13 +3069,21 @@ Datum pg_delete_audit(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to delete audit"))); } - /* - * When t_thrd.audit.audit_indextbl is not NULL, - * but its origin memory context is NULL, free it will generate core + /* + * load the index audit table from global index audit table instance + * then use the local thread one when iterate all audit files */ - t_thrd.audit.audit_indextbl = NULL; - pgaudit_read_indexfile(g_instance.attr.attr_security.Audit_directory); + LWLockAcquire(g_instance.audit_cxt.index_file_lock, LW_SHARED); + pfree_ext(t_thrd.audit.audit_indextbl); + int indextbl_len = + (u_sess->attr.attr_security.Audit_RemainThreshold + 1) * sizeof(AuditIndexItem) + indextbl_header_size; + t_thrd.audit.audit_indextbl = (AuditIndexTable *)palloc0(indextbl_len); + error_t errorno = + memcpy_s(t_thrd.audit.audit_indextbl, indextbl_len, g_instance.audit_cxt.audit_indextbl, indextbl_len); + securec_check(errorno, "\0", "\0"); + LWLockRelease(g_instance.audit_cxt.index_file_lock); + int thread_num = g_instance.audit_cxt.thread_num; if (begtime < endtime && (t_thrd.audit.audit_indextbl != NULL) && t_thrd.audit.audit_indextbl->count > 0) { bool satisfied = false; uint32 index; @@ -2793,7 +3102,7 @@ Datum pg_delete_audit(PG_FUNCTION_ARGS) satisfied = false; } - if (index == t_thrd.audit.audit_indextbl->curidx) { + if (index == t_thrd.audit.audit_indextbl->curidx[thread_num - 1]) { break; } @@ -2801,11 +3110,7 @@ Datum pg_delete_audit(PG_FUNCTION_ARGS) } while (true); } - if (t_thrd.audit.audit_indextbl) { - pfree(t_thrd.audit.audit_indextbl); - t_thrd.audit.audit_indextbl = NULL; - } - + pfree_ext(t_thrd.audit.audit_indextbl); PG_RETURN_VOID(); } @@ -2823,14 +3128,25 @@ static void elasic_search_connection_test() (void)m_curlUtils.http_post_file_request(url, "", true); } +/* + * check and reinit the audit files + * 1. whether the audit file is exist + * 2. recognize the corrupt file + * 3. try to reinit audit file if sysauditFile is NULL + */ static void CheckAuditFile(void) { uint32 fnum = 0; AuditIndexItem *item = NULL; struct stat statBuf; errno_t rc; - item = t_thrd.audit.audit_indextbl->data + t_thrd.audit.audit_indextbl->curidx; + int thread_idx = t_thrd.audit.cur_thread_idx; + + LWLockAcquire(g_instance.audit_cxt.index_file_lock, LW_SHARED); + item = g_instance.audit_cxt.audit_indextbl->data + g_instance.audit_cxt.audit_indextbl->curidx[thread_idx]; fnum = item->filenum; + LWLockRelease(g_instance.audit_cxt.index_file_lock); + rc = snprintf_s(t_thrd.audit.pgaudit_filepath, MAXPGPATH, MAXPGPATH - 1, pgaudit_filename, g_instance.attr.attr_security.Audit_directory, fnum); securec_check_ss(rc, "\0", "\0"); @@ -2848,7 +3164,323 @@ static void CheckAuditFile(void) t_thrd.audit.sysauditFile = NULL; } } - /* make sure init audit file if pgaudit_filepath accessable */ + + /* reinit audit file if corrupted */ + uint32 corrupt_audit_fnum = pg_atomic_read_u32(&g_instance.audit_cxt.audit_coru_fnum[thread_idx]); + if (corrupt_audit_fnum != UINT32_MAX && corrupt_audit_fnum == fnum) { + ereport(WARNING, (errmsg("invalid data in audit file fnum %d", fnum))); + + /* truncate the current file */ + int fd = open(t_thrd.audit.pgaudit_filepath, O_RDWR | O_TRUNC, pgaudit_filemode); + if (fd < 0) { + ereport(ERROR, (errcode_for_file_access(), + errmsg("could not truncate audit file \"%s\": %m", t_thrd.audit.pgaudit_filepath))); + } else { + close(fd); + } + + /* audit file will init after make sysauditFile NULL generating the new file audit log the same time */ + if (t_thrd.audit.sysauditFile != NULL) { + fclose(t_thrd.audit.sysauditFile); + t_thrd.audit.sysauditFile = NULL; + } + + pg_atomic_write_u32(&g_instance.audit_cxt.audit_coru_fnum[thread_idx], UINT32_MAX); + } + + /* + * make sure init audit file if pgaudit_filepath accessable + * directly return when sysauditFile exist + */ auditfile_init(true); } +static bool pgaudit_valid_header(const AuditMsgHdr* header) +{ + return !((header->signature[0]) != 'A' || header->signature[1] != 'U' || header->version != 0 || + !(header->fields == (PGAUDIT_QUERY_COLS - 1) || header->fields == PGAUDIT_QUERY_COLS) || + (header->size <= sizeof(AuditMsgHdr))); +} + +/* + * mark corrupt fnum by postgres thread + * used for reinit audit files in audit thread + */ +static void pgaudit_mark_corrupt_info(uint32 fnum) +{ + /* + * only the writing audit files could be mark corrupt info + * ignore the old audit files here + */ + int thread_num = g_instance.audit_cxt.thread_num; + + LWLockAcquire(g_instance.audit_cxt.index_file_lock, LW_SHARED); + + /* + * iterate the all writing index looking for the thread idx of fnum + */ + int thread_idx = -1; + for (int i = 0; i < thread_num; ++i) { + AuditIndexItem *item = + g_instance.audit_cxt.audit_indextbl->data + g_instance.audit_cxt.audit_indextbl->curidx[i]; + if (fnum == item->filenum) { + thread_idx = i; + break; + } + } + + LWLockRelease(g_instance.audit_cxt.index_file_lock); + + /* old audit files */ + if (thread_idx == -1) { + return; + } + + ereport(WARNING, (errmsg("audit file num %d is corrupted.", fnum))); + + /* + * if any other thread have updated the audit_fnum or fnum is older one, do nothing but break here + */ + uint32 audit_corrupt_fnum = pg_atomic_read_u32(&g_instance.audit_cxt.audit_coru_fnum[thread_idx]); + if (audit_corrupt_fnum < fnum || audit_corrupt_fnum == UINT32_MAX) { + while (!pg_atomic_compare_exchange_u32(&g_instance.audit_cxt.audit_coru_fnum[thread_idx], &audit_corrupt_fnum, + fnum)) { + audit_corrupt_fnum = pg_atomic_read_u32(&g_instance.audit_cxt.audit_coru_fnum[thread_idx]); + if (audit_corrupt_fnum >= fnum && audit_corrupt_fnum != UINT32_MAX) { + break; + } + } + } +} + +static void audit_append_xid_info(const char *detail_info, char *detail_info_xid, uint32 len) +{ + Assert(u_sess->attr.attr_security.audit_xid_info == 1); + int rc = 0; + TransactionId xid = InvalidTransactionId; + if (IsTransactionState()) { + xid = GetCurrentTransactionId(); + rc = snprintf_s(detail_info_xid, len, len - 1, "xid=%llu, %s", xid, detail_info); + securec_check_ss(rc, "\0", "\0"); + } else { + rc = snprintf_s(detail_info_xid, len, len - 1, "xid=NA, %s", detail_info); + securec_check_ss(rc, "\0", "\0"); + } +} + +/* + * Brief : audit process exit + * Description : when exit the audit thread in PM thread, release related pipes + * audit master thread will do the index audit file flush job, not do it here + */ +void audit_process_cxt_exit() +{ + Assert(t_thrd.role != AUDITOR); + auditpipe_done = false; + + /* close unused reading and writing end */ + int thread_num = g_instance.attr.attr_security.audit_thread_num; + int *sys_audit_pipe = g_instance.audit_cxt.sys_audit_pipes; + if (sys_audit_pipe == NULL) { + return; + } + + /* for close all pipes safely, wait all audit thread exited here */ + while (true) { + if (pg_atomic_read_u32(&g_instance.audit_cxt.current_audit_index) == 0) { + break; + } + } + + for (int i = 0; i < thread_num; ++i) { + if (sys_audit_pipe[PIPE_READ_INDEX(i)] > 0) { + close(sys_audit_pipe[PIPE_READ_INDEX(i)]); + sys_audit_pipe[PIPE_READ_INDEX(i)] = -1; + } + } + pfree(g_instance.audit_cxt.sys_audit_pipes); + g_instance.audit_cxt.sys_audit_pipes = NULL; +} + +/* + * Brief : audit process init for multi-thread manage + * Description : init audit global env for audit threads including + * 1. index file lock + * 2. pipes for audit + * 3. audit logs & path + * 4. audit index file + */ +void audit_process_cxt_init() +{ + Assert(t_thrd.role != AUDITOR); + + /* return directly when audit process init have done */ + if (auditpipe_done) { + return; + } + + ereport(LOG, (errmsg("audit_process_cxt_init enter"))); + errno_t errorno = 0; + int thread_num = g_instance.attr.attr_security.audit_thread_num; + + MemoryContext oldcontext = MemoryContextSwitchTo(g_instance.audit_cxt.global_audit_context); + g_instance.audit_cxt.thread_num = thread_num; + + if (g_instance.audit_cxt.index_file_lock == NULL) { + g_instance.audit_cxt.index_file_lock = LWLockAssign(LWTRANCHE_AUDIT_INDEX_WAIT); + ereport(LOG, (errmsg("audit_process_cxt_init index file lock init ok"))); + } + + /* init all pipes for all audit threads */ + if (g_instance.audit_cxt.sys_audit_pipes == NULL) { + g_instance.audit_cxt.sys_audit_pipes = + (int *)palloc0(sizeof(int) * 2 * thread_num); /* 2 descriptor for one pipe */ + int *&pipes = g_instance.audit_cxt.sys_audit_pipes; + errorno = memset_s(pipes, sizeof(pipes), -1, sizeof(pipes)); + securec_check(errorno, "\0", "\0"); + for (int i = 0; i < thread_num; ++i) { + if (pipe(&pipes[PIPE_READ_INDEX(i)]) < 0) { + ereport(FATAL, (errcode_for_socket_access(), (errmsg("could not create pipe for sysaudit: %m")))); + } + ereport(LOG, (errmsg("audit_process_cxt_init pipe init successfully for pipe : %d file descriptor: %d", i, + g_instance.audit_cxt.sys_audit_pipes[i * 2]))); + } + } + + /* init audit path */ + char Audit_directory_Done[MAXPGPATH] = {0}; + int rc = snprintf_s(Audit_directory_Done, sizeof(Audit_directory_Done), sizeof(Audit_directory_Done) - 1, "%s/done", + g_instance.attr.attr_security.Audit_directory); + securec_check_ss(rc, "\0", "\0"); + (void)pg_mkdir_p(g_instance.attr.attr_security.Audit_directory, S_IRWXU); + (void)pg_mkdir_p(Audit_directory_Done, S_IRWXU); + + /* init index file & hold the content into g_instance.audit_cxt.audit_indextbl */ + g_instance.audit_cxt.audit_indextbl = NULL; + pgaudit_indextbl_init_new(); + pgaudit_update_indexfile(PG_BINARY_A, false); + + if (!auditpipe_done) { + auditpipe_done = true; + } + + (void)MemoryContextSwitchTo(oldcontext); +} + +/* + * Brief : load audit thread index + * Description : + */ +int audit_load_thread_index() +{ + if (t_thrd.audit.cur_thread_idx != -1) { + return t_thrd.audit.cur_thread_idx; + } + + int idx = pg_atomic_fetch_add_u32(&g_instance.audit_cxt.current_audit_index, 1); + t_thrd.audit.cur_thread_idx = idx; + + Assert(t_thrd.audit.cur_thread_idx >= 0); + Assert(t_thrd.audit.cur_thread_idx < g_instance.audit_cxt.thread_num); + + return t_thrd.audit.cur_thread_idx; +} + +/* + * Brief : get audit file num for current thread index + * Description : + */ +uint32 pgaudit_get_auditfile_num() +{ + Assert(t_thrd.role == AUDITOR); + + uint32 fnum = 0; + int thread_idx = t_thrd.audit.cur_thread_idx; + LWLockAcquire(g_instance.audit_cxt.index_file_lock, LW_SHARED); + AuditIndexItem *item = + g_instance.audit_cxt.audit_indextbl->data + g_instance.audit_cxt.audit_indextbl->curidx[thread_idx]; + fnum = item->filenum; + LWLockRelease(g_instance.audit_cxt.index_file_lock); + return fnum; +} + +/* + * Brief : update index table file when new file openned + * Description : flush index table file will just invoke later + */ +void pgaudit_update_auditfile_time(pg_time_t timestamp, bool exist) +{ + int thread_idx = t_thrd.audit.cur_thread_idx; + LWLockAcquire(g_instance.audit_cxt.index_file_lock, LW_EXCLUSIVE); + if (!exist) { + AuditIndexItem *item = + g_instance.audit_cxt.audit_indextbl->data + g_instance.audit_cxt.audit_indextbl->curidx[thread_idx]; + item->ctime = timestamp; + ++g_instance.audit_cxt.audit_indextbl->count; + } + LWLockRelease(g_instance.audit_cxt.index_file_lock); +} + +/* + * Brief : iterate the next audit file & update the index table info + * Description : calc the total space here when rotate one audit file + */ +void pgaudit_switch_next_auditfile() +{ + AuditIndexItem *item = NULL; + uint32 new_fnum = 0; + int thread_idx = t_thrd.audit.cur_thread_idx; + + LWLockAcquire(g_instance.audit_cxt.index_file_lock, LW_EXCLUSIVE); + + /* update the current item filesize */ + item = g_instance.audit_cxt.audit_indextbl->data + g_instance.audit_cxt.audit_indextbl->curidx[thread_idx]; + item->filesize = ftell(t_thrd.audit.sysauditFile); + + /* update the last_audit_time for audit_resource_policy 0 policy */ + pg_time_t curtime = time(NULL); + g_instance.audit_cxt.audit_indextbl->last_audit_time = curtime; + + /* update total space */ + g_instance.audit_cxt.pgaudit_totalspace += item->filesize; + + /* get the next writing audit file index */ + uint32 current_max_fnum = pgaudit_get_max_fnum(g_instance.audit_cxt.thread_num); + uint32 new_idx = pg_atomic_fetch_add_u32(&g_instance.audit_cxt.audit_indextbl->latest_idx, 1); + g_instance.audit_cxt.audit_indextbl->latest_idx = + (g_instance.audit_cxt.audit_indextbl->latest_idx) % g_instance.audit_cxt.audit_indextbl->maxnum; + g_instance.audit_cxt.audit_indextbl->curidx[thread_idx] = (new_idx) % g_instance.audit_cxt.audit_indextbl->maxnum; + + item = g_instance.audit_cxt.audit_indextbl->data + g_instance.audit_cxt.audit_indextbl->curidx[thread_idx]; + item->filenum = ++current_max_fnum; + + LWLockRelease(g_instance.audit_cxt.index_file_lock); + ereport(DEBUG1, (errmsg("pgaudit_switch_next_auditfile new fnum :%d cur pgaudit_totalspace: %ld MB", new_fnum, + g_instance.audit_cxt.pgaudit_totalspace))); +} + +/* + * Brief : do the thread index decreasing when thread exit + * Description : + */ +static void pgauditor_kill(int code, Datum arg) +{ + pg_atomic_fetch_sub_u32(&g_instance.audit_cxt.current_audit_index, 1); +} + +/* + * Brief : check auditor thread + * Description : + */ +bool pg_auditor_thread(ThreadId pid) +{ + if (g_instance.pid_cxt.PgAuditPID == NULL) { + return false; + } + for (int i = 0; i < g_instance.audit_cxt.thread_num; ++i) { + if (pid == g_instance.pid_cxt.PgAuditPID[i]) { + return true; + } + } + return false; +} diff --git a/src/gausskernel/process/postmaster/pgstat.cpp b/src/gausskernel/process/postmaster/pgstat.cpp index 4e4821a39..961574d87 100644 --- a/src/gausskernel/process/postmaster/pgstat.cpp +++ b/src/gausskernel/process/postmaster/pgstat.cpp @@ -97,12 +97,6 @@ #define static #endif -/* ---------- - * Paths for the statistics files (relative to installation's $PGDATA). - * ---------- - */ -#define PGSTAT_STAT_PERMANENT_FILENAME "global/pgstat.stat" -#define PGSTAT_STAT_PERMANENT_TMPFILE "global/pgstat.tmp" #define pg_stat_relation(flag) (InvalidOid == (flag)) /* ---------- @@ -2873,9 +2867,9 @@ void CreateSharedBackendStatus(void) (char*)ShmemInitStruct("Backend Activity Buffer", t_thrd.shemem_ptr_cxt.BackendActivityBufferSize, &found); if (!found) { - rc = memset_s( - t_thrd.shemem_ptr_cxt.BackendActivityBuffer, t_thrd.shemem_ptr_cxt.BackendActivityBufferSize, 0, size); - securec_check(rc, "\0", "\0"); + /* call MemsetHugeSize instead because the size may be larger than INT_MAX. */ + MemsetHugeSize( + t_thrd.shemem_ptr_cxt.BackendActivityBuffer, t_thrd.shemem_ptr_cxt.BackendActivityBufferSize, 0); /* Initialize st_activity pointers. */ buffer = t_thrd.shemem_ptr_cxt.BackendActivityBuffer; @@ -3126,6 +3120,9 @@ void pgstat_bestart(void) beentry->st_activity[g_instance.attr.attr_common.pgstat_track_activity_query_size - 1] = '\0'; beentry->st_queryid = 0; + beentry->st_unique_sql_key.unique_sql_id = 0; + beentry->st_unique_sql_key.user_id = 0; + beentry->st_unique_sql_key.cn_id = 0; beentry->st_tid = gettid(); beentry->st_parent_sessionid = 0; beentry->st_thread_level = 0; @@ -3135,6 +3132,7 @@ void pgstat_bestart(void) beentry->st_waitnode_count = 0; beentry->st_plannodeid = -1; beentry->st_numnodes = -1; + beentry->trace_cxt.trace_id[0] = '\0'; /* Initialize wait event information. */ beentry->st_waitevent = WAIT_EVENT_END; beentry->st_xid = 0; @@ -3334,7 +3332,16 @@ static void pgstat_beshutdown_hook(int code, Datum arg) beentry->globalSessionId.nodeId = 0; beentry->globalSessionId.seq = 0; - pgstat_increment_changecount_after(beentry); + /* + * make sure st_changecount is an even before release it. + * + * In case some thread was interrupted by SIGTERM at any time with a mess st_changecount + * in PgBackendStatus, PgstatCollectorMain may hang-up in waiting its change to even and + * can not exit after receiving SIGTERM signal + */ + do { + pgstat_increment_changecount_after(beentry); + } while ((beentry->st_changecount & 1) != 0); /* * handle below cases: @@ -3396,7 +3403,17 @@ void pgstat_beshutdown_session(int ctrl_index) beentry->globalSessionId.sessionId = 0; beentry->globalSessionId.nodeId = 0; beentry->globalSessionId.seq = 0; - pgstat_increment_changecount_after(beentry); + + /* + * make sure st_changecount is an even before release it. + * + * In case some thread was interrupted by SIGTERM at any time with a mess st_changecount + * in PgBackendStatus, PgstatCollectorMain may hang-up in waiting its change to even and + * can not exit after receiving SIGTERM signal + */ + do { + pgstat_increment_changecount_after(beentry); + } while ((beentry->st_changecount & 1) != 0); /* * pgstat_beshutdown_session will be called in thread pool mode: @@ -3632,6 +3649,23 @@ void pgstat_report_global_session_id(GlobalSessionId globalSessionId) pgstat_increment_changecount_after(beentry); } +void pgstat_report_unique_sql_id(bool resetUniqueSql) +{ + volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; + + if (IS_PGSTATE_TRACK_UNDEFINE) + return; + if (resetUniqueSql) { + beentry->st_unique_sql_key.unique_sql_id = 0; + beentry->st_unique_sql_key.cn_id = 0; + beentry->st_unique_sql_key.user_id = 0; + } else { + beentry->st_unique_sql_key.unique_sql_id = u_sess->unique_sql_cxt.unique_sql_id; + beentry->st_unique_sql_key.cn_id = u_sess->unique_sql_cxt.unique_sql_cn_id; + beentry->st_unique_sql_key.user_id = u_sess->unique_sql_cxt.unique_sql_user_id; + } +} + void pgstat_report_queryid(uint64 queryid) { volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; @@ -3645,14 +3679,18 @@ void pgstat_report_queryid(uint64 queryid) * protocol. The update must appear atomic in any case. */ beentry->st_queryid = queryid; - if (queryid == 0) { - beentry->st_unique_sql_key.unique_sql_id = 0; - beentry->st_unique_sql_key.cn_id = 0; - beentry->st_unique_sql_key.user_id = 0; - } else { - beentry->st_unique_sql_key.unique_sql_id = u_sess->unique_sql_cxt.unique_sql_id; - beentry->st_unique_sql_key.cn_id = u_sess->unique_sql_cxt.unique_sql_cn_id; - beentry->st_unique_sql_key.user_id = u_sess->unique_sql_cxt.unique_sql_user_id; +} + +void pgstat_report_trace_id(knl_u_trace_context *trace_cxt, bool is_report_trace_id) +{ + volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; + if (IS_PGSTATE_TRACK_UNDEFINE) + return; + if (is_report_trace_id) { + errno_t rc = + memcpy_s((void*)beentry->trace_cxt.trace_id, MAX_TRACE_ID_SIZE, trace_cxt->trace_id, + strlen(trace_cxt->trace_id) + 1); + securec_check(rc, "\0", "\0"); } } @@ -4483,12 +4521,6 @@ const char* pgstat_get_wait_io(WaitEventIO w) case WAIT_EVENT_LOGCTRL_SLEEP: event_name = "LOGCTRL_SLEEP"; break; - case WAIT_EVENT_COMPRESS_ADDRESS_FILE_FLUSH: - event_name = "PCA_FLUSH"; - break; - case WAIT_EVENT_COMPRESS_ADDRESS_FILE_SYNC: - event_name = "PCA_SYNC"; - break; /* no default case, so that compiler will warn */ case IO_EVENT_NUM: break; @@ -4501,15 +4533,21 @@ const char* pgstat_get_wait_io(WaitEventIO w) case WAIT_EVENT_UNDO_FILE_WRITE: event_name = "UndoFileWrite"; break; - case WAIT_EVENT_UNDO_FILE_FLUSH: - event_name = "UndoFileFlush"; - break; case WAIT_EVENT_UNDO_FILE_SYNC: event_name = "UndoFileSync"; break; case WAIT_EVENT_UNDO_FILE_EXTEND: event_name = "UndoFileExtend"; break; + case WAIT_EVENT_UNDO_FILE_UNLINK: + event_name = "UndoFileUnlink"; + break; + case WAIT_EVENT_UNDO_META_SYNC: + event_name = "UndoMetaSync"; + break; + default: + event_name = "unknown wait event"; + break; } return event_name; } @@ -4749,6 +4787,92 @@ ThreadId* pgstat_get_stmttag_write_entry(int* num) return threads; } +/* + * @Description: get pid from status entries by application name. + * @IN num: application name + * @OUT num: entries count + * @Return: all node status which names are 'application name' + */ +PgBackendStatusNode* pgstat_get_backend_status_by_appname(const char* appName, int* resultEntryNum) +{ + int idx = 0; + + /* Initialize result number to 0, in case of return NULL directly */ + if (resultEntryNum != NULL) { + *resultEntryNum = 0; + } + + /* If BackendStatusArray is NULL, we will get it from other thread */ + if (t_thrd.shemem_ptr_cxt.BackendStatusArray == NULL) { + if (PgBackendStatusArray != NULL) { + t_thrd.shemem_ptr_cxt.BackendStatusArray = PgBackendStatusArray; + } else { + return NULL; + } + } + + /* Get all status entries, which procpid or sessionid is valid */ + uint32 numBackends = 0; + PgBackendStatusNode* node = gs_stat_read_current_status(&numBackends); + + /* If all entries procpid or sessionid are invalid, get numBackends is 0 and should return directly */ + if (numBackends == 0) { + FreeBackendStatusNodeMemory(node); + return NULL; + } + + /* This function is not under PgstatCollectorMain, this pointer is NULL */ + Assert(!u_sess->stat_cxt.pgStatRunningInCollector); + + /* Initialize head pointer, all nodes struct form to a list. This list does not match wich appname */ + PgBackendStatusNode* otherNodeList = (PgBackendStatusNode*)palloc(sizeof(PgBackendStatusNode)); + PgBackendStatusNode* otherNodeListHead = otherNodeList; + otherNodeList->data = NULL; + otherNodeList->next = NULL; + /* Initialize head pointer, all nodes struct form to a list, This list matches wich appname */ + PgBackendStatusNode* resultNodeList = (PgBackendStatusNode*)palloc(sizeof(PgBackendStatusNode)); + PgBackendStatusNode* resultNodeListHead = resultNodeList; + resultNodeList->data = NULL; + resultNodeList->next = NULL; + + while (node != NULL) { + PgBackendStatus* beentry = node->data; + + /* If the backend thread is valid and application name of beentry equals to appName, record this thread pid */ + if (beentry != NULL) { + if (beentry->st_appname == NULL || beentry->st_tid < 0 || strcmp(beentry->st_appname, appName) != 0) { + /* Node name does not match wich appname, link this pointer to otherNodeList */ + otherNodeList->next = node; + otherNodeList = otherNodeList->next; + node = node->next; + continue; + } + + /* Node name matches appname, link this pointer to resultNodeList and return */ + resultNodeList->next = node; + resultNodeList = resultNodeList->next; + idx++; + } else { + /* If beentry is NULL, should link this pointer to otherNodeList in order to free memory */ + otherNodeList->next = node; + otherNodeList = otherNodeList->next; + } + node = node->next; + } + /* Must set tail pointer's next to NULL to separate node list to otherNodeList and resultNodeList */ + otherNodeList->next = NULL; + resultNodeList->next = NULL; + + if (resultEntryNum != NULL) { + *resultEntryNum = idx; + } + + /* Because we have separate node list to other two lists, so just free the first list here. */ + FreeBackendStatusNodeMemory(otherNodeListHead); + + return resultNodeListHead; +} + /* ---------- * pgstat_get_backend_current_activity() - * @@ -5640,6 +5764,22 @@ static void pgstat_write_statsfile(bool permanent) fputc('d', fpout); } + if (g_instance.repair_cxt.global_repair_bad_block_stat != NULL) { + LWLockAcquire(RepairBadBlockStatHashLock, LW_SHARED); + HASH_SEQ_STATUS repairStat; + BadBlockEntry* repairEntry; + // write the bad page information recorded in global_repair_bad_block_stat. + hash_seq_init(&repairStat, g_instance.repair_cxt.global_repair_bad_block_stat); + while ((repairEntry = (BadBlockEntry*)hash_seq_search(&repairStat)) != NULL) { + fputc('R', fpout); + rc = fwrite(repairEntry, sizeof(BadBlockEntry), 1, fpout); + (void)rc; /* we'll check for error with ferror */ + } + LWLockRelease(RepairBadBlockStatHashLock); + // Use 'r' as the terminator + fputc('r', fpout); + } + /* * No more output to be done. Close the temp file and replace the old * pgstat.stat with it. The ferror() check replaces testing for error @@ -5707,6 +5847,8 @@ static HTAB* pgstat_read_statsfile(Oid onlydb, bool permanent) PgStat_StatTabEntry tabbuf; PgStat_StatFuncEntry funcbuf; PgStat_StatFuncEntry* funcentry = NULL; + BadBlockEntry* repairEntry = NULL; + BadBlockEntry repairBuf; HASHCTL hash_ctl; HTAB* dbhash = NULL; HTAB* tabhash = NULL; @@ -5914,6 +6056,34 @@ static HTAB* pgstat_read_statsfile(Oid onlydb, bool permanent) rc = memcpy_s(funcentry, sizeof(PgStat_StatFuncEntry), &funcbuf, sizeof(funcbuf)); securec_check(rc, "", ""); break; + case 'R': + + if (g_instance.repair_cxt.global_repair_bad_block_stat == NULL) { + break; + } + + if (fread(&repairBuf, 1, sizeof(BadBlockEntry), fpin) != sizeof(BadBlockEntry)) { + ereport(u_sess->stat_cxt.pgStatRunningInCollector ? LOG : WARNING, + (errmsg("corrupted statistics file \"%s\"", statfile))); + goto done; + } + /* + * Add to the DB hash + */ + repairEntry = (BadBlockEntry*)hash_search(g_instance.repair_cxt.global_repair_bad_block_stat, + (void*)&(repairBuf.key), HASH_ENTER, &found); + if (found) { + ereport(u_sess->stat_cxt.pgStatRunningInCollector ? LOG : WARNING, + (errmsg("corrupted statistics file \"%s\"", statfile))); + } + + rc = memcpy_s(repairEntry, sizeof(BadBlockEntry), &repairBuf, sizeof(BadBlockEntry)); + securec_check(rc, "", ""); + + break; + + case 'r': + break; /* * 'E' The EOF marker of a complete stats file. @@ -7145,15 +7315,15 @@ static void prepare_calculate(SqlRTInfoArray* sql_rt_info, int* counter) else sql_rt_info_count = sql_rt_info->sqlRTIndex; + *counter = 0; if (sql_rt_info_count == 0) { sql_rt_info->isFull = false; - *counter = sql_rt_info_count; LWLockRelease(PercentileLock); return; } if (u_sess->percentile_cxt.LocalsqlRT != NULL) - pfree(u_sess->percentile_cxt.LocalsqlRT); + pfree_ext(u_sess->percentile_cxt.LocalsqlRT); u_sess->percentile_cxt.LocalsqlRT = (SqlRTInfo*)MemoryContextAlloc( SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_DFX), sql_rt_info_count * sizeof(SqlRTInfo)); @@ -7184,13 +7354,13 @@ static void prepare_calculate_single(const SqlRTInfoArray* sql_rt_info, int* cou sql_rt_info_count = MAX_SQL_RT_INFO_COUNT; } + *counter = 0; if (sql_rt_info_count == 0) { - *counter = sql_rt_info_count; return; } if (u_sess->percentile_cxt.LocalsqlRT != NULL) - pfree(u_sess->percentile_cxt.LocalsqlRT); + pfree_ext(u_sess->percentile_cxt.LocalsqlRT); u_sess->percentile_cxt.LocalsqlRT = (SqlRTInfo*)MemoryContextAllocZero( SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_DFX), sql_rt_info_count * sizeof(SqlRTInfo)); @@ -8302,6 +8472,10 @@ void initMySessionMemoryEntry(void) static void endMySessionMemoryEntry(int code, Datum arg) { + + /* release the memory on mySessionMemoryEntry */ + pgstat_release_session_memory_entry(); + /* mark my entry not active. */ t_thrd.shemem_ptr_cxt.mySessionMemoryEntry->isValid = false; @@ -9196,7 +9370,7 @@ TableDistributionInfo* get_remote_stat_double_write(TupleDesc tuple_desc) appendStringInfo(&buf, "SELECT node_name, curr_dwn, curr_start_page, file_trunc_num, file_reset_num, " "total_writes, low_threshold_writes, high_threshold_writes, " - "total_pages, low_threshold_pages, high_threshold_pages " + "total_pages, low_threshold_pages, high_threshold_pages, file_id " "FROM local_double_write_stat();"); /* send sql and parallel fetch distribution info from all data nodes */ @@ -9288,8 +9462,8 @@ TableDistributionInfo* streaming_hadr_get_recovery_stat(TupleDesc tuple_desc) appendStringInfo(&buf, "SELECT hadr_sender_node_name, hadr_receiver_node_name, " "source_ip, source_port, dest_ip, dest_port, current_rto, target_rto, current_rpo, target_rpo, " - "current_sleep_time FROM " - "hadr_local_rto_and_rpo_stat();"); + "rto_sleep_time, rpo_sleep_time FROM " + "gs_hadr_local_rto_and_rpo_stat();"); /* send sql and parallel fetch distribution info from all data nodes */ distribuion_info->state = RemoteFunctionResultHandler(buf.data, NULL, NULL, true, EXEC_ON_ALL_NODES, true); @@ -9317,6 +9491,34 @@ TableDistributionInfo* get_remote_node_xid_csn(TupleDesc tuple_desc) return distribuion_info; } +#ifdef ENABLE_MULTIPLE_NODES +TableDistributionInfo* get_remote_index_status(TupleDesc tuple_desc, const char *schname, const char *idxname) +{ + StringInfoData buf; + TableDistributionInfo* distribuion_info = NULL; + + /* the memory palloced here should be free outside where it was called.*/ + distribuion_info = (TableDistributionInfo*)palloc0(sizeof(TableDistributionInfo)); + + initStringInfo(&buf); + + appendStringInfo(&buf, "select a.node_name::text, b.indisready, b.indisvalid from pg_index b " + "left join pgxc_node a on b.xc_node_id = a.node_id " + "where indexrelid = (select oid from pg_class where relnamespace = " + "(select oid from pg_namespace where nspname = %s) " + "and relname = %s);", quote_literal_cstr(schname), quote_literal_cstr(idxname)); + + /* send sql and parallel fetch distribution info from all nodes */ + distribuion_info->state = RemoteFunctionResultHandler(buf.data, NULL, NULL, true, EXEC_ON_ALL_NODES, true); + distribuion_info->slot = MakeSingleTupleTableSlot(tuple_desc); + + pfree_ext(buf.data); + + return distribuion_info; +} + +#endif + /* * the whole process statistics of bad block * used for query statistics of bad block @@ -9710,3 +9912,13 @@ void pgstat_reply_percentile_record() g_instance.stat_cxt.calculate_on_other_cn = false; pq_flush(); } + +void pgstat_release_session_memory_entry() +{ + if (t_thrd.shemem_ptr_cxt.mySessionMemoryEntry != NULL) { + pfree_ext(t_thrd.shemem_ptr_cxt.mySessionMemoryEntry->query_plan); + t_thrd.shemem_ptr_cxt.mySessionMemoryEntry->plan_size = 0; + pfree_ext(t_thrd.shemem_ptr_cxt.mySessionMemoryEntry->query_plan_issue); + } +} + diff --git a/src/gausskernel/process/postmaster/postmaster.cpp b/src/gausskernel/process/postmaster/postmaster.cpp index f875e5a34..8d0e09654 100644 --- a/src/gausskernel/process/postmaster/postmaster.cpp +++ b/src/gausskernel/process/postmaster/postmaster.cpp @@ -87,6 +87,7 @@ #include "access/xact.h" #include "bootstrap/bootstrap.h" #include "commands/matview.h" +#include "commands/verify.h" #include "catalog/pg_control.h" #include "dbmind/hypopg_index.h" #include "instruments/instr_unique_sql.h" @@ -154,8 +155,10 @@ #include "postmaster/walwriter.h" #include "postmaster/walwriterauxiliary.h" #include "postmaster/lwlockmonitor.h" +#include "postmaster/barrier_preparse.h" #include "replication/walreceiver.h" #include "replication/datareceiver.h" +#include "replication/logical.h" #include "replication/slot.h" #include "storage/smgr/fd.h" #include "storage/ipc.h" @@ -214,7 +217,9 @@ #include "distributelayer/streamMain.h" #include "distributelayer/streamProducer.h" +#ifndef ENABLE_LITE_MODE #include "eSDKOBS.h" +#endif #include "cjson/cJSON.h" #include "tcop/stmt_retry.h" @@ -254,6 +259,7 @@ #define static #endif +extern void InitGlobalSeq(); extern void auto_explain_init(void); extern int S3_init(); static const int RECOVERY_PARALLELISM_DEFAULT = 1; @@ -295,6 +301,7 @@ volatile int Shutdown = NoShutdown; extern void gs_set_hs_shm_data(HaShmemData* ha_shm_data); extern void ReaperBackendMain(); +extern void AdjustThreadAffinity(); #define EXTERN_SLOTS_NUM 17 volatile PMState pmState = PM_INIT; @@ -398,10 +405,12 @@ static void RemoteHostInitilize(Port* port); static int StartupPacketInitialize(Port* port); static void PsDisplayInitialize(Port* port); static void SetListenSocket(ReplConnInfo **replConnArray, bool *listen_addr_saved); +static void UpdateArchiveSlotStatus(); static ServerMode get_cur_mode(void); static int get_cur_repl_num(void); +static void PMInitDBStateFile(); static void PMReadDBStateFile(GaussState* state); static void PMSetDBStateFile(GaussState* state); static void PMUpdateDBState(DbState db_state, ServerMode mode, int conn_num); @@ -436,7 +445,7 @@ bool PMstateIsRun(void); #define GTM_LITE_CN (GTM_LITE_MODE && IS_PGXC_COORDINATOR) #ifdef ENABLE_MULTIPLE_NODES -#define START_BARRIER_CREATOR IS_PGXC_COORDINATOR +#define START_BARRIER_CREATOR (IS_PGXC_COORDINATOR && !IS_DISASTER_RECOVER_MODE) #else #define START_BARRIER_CREATOR IS_PGXC_DATANODE #endif @@ -454,6 +463,10 @@ static void InitPostmasterDeathWatchHandle(void); static void NotifyShutdown(void); static void NotifyProcessActive(void); static int init_stream_comm(void); +#ifndef ENABLE_MULTIPLE_NODES +static void handle_change_role_signal(char *role); +static bool CheckSignalByFile(const char *filename, void *infoPtr, size_t infoSize); +#endif int GaussDbThreadMain(knl_thread_arg* arg); const char* GetThreadName(knl_thread_role role); @@ -865,6 +878,7 @@ bool SetDBStateFileState(DbState state, bool optional) if (strlen(gaussdb_state_file) > 0) { char temppath[MAXPGPATH] = {0}; GaussState s; + int len = 0; /* zero it in case gaussdb.state doesn't exist. */ int rc = memset_s(&s, sizeof(GaussState), 0, sizeof(GaussState)); @@ -886,8 +900,9 @@ bool SetDBStateFileState(DbState state, bool optional) } /* Read old content from file. */ - int len = read(fd, &s, sizeof(GaussState)); - if (len != sizeof(GaussState)) { + len = read(fd, &s, sizeof(GaussState)); + /* sizeof(int) is for current_connect_idx of GaussState */ + if ((len != sizeof(GaussState)) && (len != sizeof(GaussState) - sizeof(int))) { write_stderr("Failed to read gaussdb.state: %d", errno); (void)close(fd); return false; @@ -948,10 +963,12 @@ void signal_sysloger_flush(void) void SetShmemCxt(void) { int thread_pool_worker_num = 0; + int thread_pool_stream_proc_num = 0; if (g_threadPoolControler != NULL) { thread_pool_worker_num = g_threadPoolControler->GetThreadNum(); g_instance.shmem_cxt.ThreadPoolGroupNum = g_threadPoolControler->GetGroupNum(); + thread_pool_stream_proc_num = GetThreadPoolStreamProcNum(); } else { g_instance.shmem_cxt.ThreadPoolGroupNum = 0; } @@ -966,7 +983,10 @@ void SetShmemCxt(void) g_instance.attr.attr_storage.max_undo_workers + 1 + AUXILIARY_BACKENDS + AV_LAUNCHER_PROCS + - g_max_worker_processes; + g_max_worker_processes + + thread_pool_stream_proc_num; + +#ifndef ENABLE_LITE_MODE g_instance.shmem_cxt.MaxReserveBackendId = g_instance.attr.attr_sql.job_queue_processes + g_instance.attr.attr_storage.autovacuum_max_workers + g_instance.attr.attr_storage.max_undo_workers + 1 + @@ -974,7 +994,15 @@ void SetShmemCxt(void) AUXILIARY_BACKENDS + AV_LAUNCHER_PROCS + g_max_worker_processes; - +#else + g_instance.shmem_cxt.MaxReserveBackendId = g_instance.attr.attr_sql.job_queue_processes + + g_instance.attr.attr_storage.autovacuum_max_workers + + g_instance.attr.attr_storage.max_undo_workers + 1 + + thread_pool_worker_num + + AUXILIARY_BACKENDS + + AV_LAUNCHER_PROCS + + g_max_worker_processes; +#endif Assert(g_instance.shmem_cxt.MaxBackends <= MAX_BACKENDS); } @@ -1071,6 +1099,15 @@ bool isNotWildcard(void* val1, void* val2) return (strcmp(curhost, nodename) == 0) ? false : true; } +void initKnlRTOContext(void) +{ + if (g_instance.rto_cxt.rto_standby_data == NULL) { + g_instance.rto_cxt.rto_standby_data = (RTOStandbyData*)MemoryContextAllocZero( + INSTANCE_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE), sizeof(RTOStandbyData) * + g_instance.attr.attr_storage.max_wal_senders); + } +} + /* * Postmaster main entry point */ @@ -1083,7 +1120,6 @@ int PostmasterMain(int argc, char* argv[]) bool listen_addr_saved = false; int use_pooler_port = -1; int i; - GaussState state; OptParseContext optCtxt; errno_t rc = 0; Port port; @@ -1123,6 +1159,7 @@ int PostmasterMain(int argc, char* argv[]) */ initialize_feature_flags(); +#ifndef ENABLE_LITE_MODE /* * @OBS * Create a global OBS CA object shared among threads @@ -1130,6 +1167,7 @@ int PostmasterMain(int argc, char* argv[]) initOBSCacheObject(); S3_init(); +#endif /* set memory manager for minizip libs */ pm_set_unzip_memfuncs(); @@ -1138,10 +1176,12 @@ int PostmasterMain(int argc, char* argv[]) cJSON_Hooks hooks = {cJSON_internal_malloc, cJSON_internal_free}; cJSON_InitHooks(&hooks); +#ifdef ENABLE_LLVM_COMPILE /* * Prepare codegen enviroment. */ CodeGenProcessInitialize(); +#endif /* Initialize paths to installation files */ getInstallationPaths(argv[0]); @@ -1352,11 +1392,14 @@ int PostmasterMain(int argc, char* argv[]) case 'X': /* stop barrier */ if ((optCtxt.optarg != NULL) && (strlen(optCtxt.optarg) > 0)) { - rc = strncpy_s(g_instance.stopBarrierId, MAX_BARRIER_ID_LENGTH, (char *)optCtxt.optarg, - strlen(optCtxt.optarg)); - securec_check(rc, "\0", "\0"); - ereport(LOG, (errmsg("Set stop barrierID %s", g_instance.stopBarrierId))); - } + if (strlen(optCtxt.optarg) > MAX_BARRIER_ID_LENGTH) { + ereport(FATAL, (errmsg("the options of -X is too long"))); + } + rc = strncpy_s(g_instance.csn_barrier_cxt.stopBarrierId, MAX_BARRIER_ID_LENGTH, + (char *)optCtxt.optarg, strlen(optCtxt.optarg)); + securec_check(rc, "\0", "\0"); + ereport(LOG, (errmsg("Set stop barrierID %s", g_instance.csn_barrier_cxt.stopBarrierId))); + } break; case 'c': case '-': { @@ -1521,6 +1564,8 @@ int PostmasterMain(int argc, char* argv[]) gs_signal_monitor_startup(); g_instance.attr.attr_common.Logging_collector = true; + g_instance.global_sysdbcache.Init(INSTANCE_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_DEFAULT)); + CreateLocalSysDBCache(); g_instance.pid_cxt.SysLoggerPID = SysLogger_Start(); FencedUDFMasterMain(0, NULL); return 0; @@ -1538,6 +1583,7 @@ int PostmasterMain(int argc, char* argv[]) /* Set parallel recovery config */ ConfigRecoveryParallelism(); + ProcessRedoCpuBindInfo(); /* * Other one-time internal sanity checks can go here, if they are fast. * (Put any slow processing further down, after postmaster.pid creation.) @@ -1547,6 +1593,8 @@ int PostmasterMain(int argc, char* argv[]) ExitPostmaster(1); } + initKnlRTOContext(); + /* * Now that we are done processing the postmaster arguments, reset * getopt(3) library so that it will work correctly in subprocesses. @@ -1915,6 +1963,7 @@ int PostmasterMain(int argc, char* argv[]) } else { g_instance.attr.attr_common.enable_thread_pool = false; g_threadPoolControler = NULL; + AdjustThreadAffinity(); } } @@ -1949,19 +1998,8 @@ int PostmasterMain(int argc, char* argv[]) */ SetHaShmemData(); - rc = memset_s(&state, sizeof(state), 0, sizeof(state)); - securec_check(rc, "", ""); - state.conn_num = t_thrd.postmaster_cxt.HaShmData->repl_list_num; - state.mode = t_thrd.postmaster_cxt.HaShmData->current_mode; - state.state = STARTING_STATE; - state.lsn = 0; - state.term = 0; - state.sync_stat = false; - state.ha_rebuild_reason = NONE_REBUILD; - PMSetDBStateFile(&state); - ereport(LOG, - (errmsg("create gaussdb state file success: db state(STARTING_STATE), server mode(%s)", - wal_get_role_string(t_thrd.postmaster_cxt.HaShmData->current_mode)))); + PMInitDBStateFile(); + set_max_safe_fds(); /* @@ -2004,6 +2042,7 @@ int PostmasterMain(int argc, char* argv[]) write_nondefault_variables(PGC_POSTMASTER); #endif +#ifndef ENABLE_LITE_MODE #if defined (ENABLE_MULTIPLE_NODES) || defined (ENABLE_PRIVATEGAUSS) /* init hotpatch */ if (hotpatch_remove_signal_file(t_thrd.proc_cxt.DataDir) == HP_OK) { @@ -2013,6 +2052,7 @@ int PostmasterMain(int argc, char* argv[]) write_stderr("hotpatch init failed ret is %d!\n", ret); } } +#endif #endif /* * Write the external PID file if requested @@ -2087,6 +2127,9 @@ int PostmasterMain(int argc, char* argv[]) /* initialize workload manager */ InitializeWorkloadManager(); + g_instance.global_sysdbcache.Init(INSTANCE_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_DEFAULT)); + CreateLocalSysDBCache(); + /* Init proc's subxid cache context, parent is g_instance.instance_context */ ProcSubXidCacheContext = AllocSetContextCreate(g_instance.instance_context, "ProcSubXidCacheContext", @@ -2113,8 +2156,6 @@ int PostmasterMain(int argc, char* argv[]) ngroup_info_hash_create(); /*init Role id hash table*/ InitRoleIdHashTable(); - /* pcmap */ - RealInitialMMapLockArray(); /* init unique sql */ InitUniqueSQL(); /* init hypo index */ @@ -2130,6 +2171,8 @@ int PostmasterMain(int argc, char* argv[]) InitPercentile(); /* init dynamic statement track control */ InitTrackStmtControl(); + /* init global sequence */ + InitGlobalSeq(); #ifdef ENABLE_MULTIPLE_NODES /* init compaction */ CompactionProcess::init_instance(); @@ -2190,6 +2233,7 @@ int PostmasterMain(int argc, char* argv[]) if (g_instance.attr.attr_security.enable_tde) { /* init cloud KMS message instance */ TDE::CKMSMessage::get_instance().init(); + TDE::CKMSMessage::get_instance().load_user_info(); /* init TDE storage hash table */ if (IS_PGXC_DATANODE) { TDE::TDEKeyStorage::get_instance().init(); @@ -2197,8 +2241,11 @@ int PostmasterMain(int argc, char* argv[]) } } +#ifndef ENABLE_LITE_MODE if (g_instance.attr.attr_storage.enable_adio_function) AioResourceInitialize(); +#endif + /* start alarm checker thread. */ if (!dummyStandbyMode) g_instance.pid_cxt.AlarmCheckerPID = startAlarmChecker(); @@ -2472,17 +2519,6 @@ static void CheckExtremeRtoGUCConflicts(void) errhint("recommend config \"wal_receiver_buffer_size=64MB\""))); } - if ((g_instance.attr.attr_storage.recovery_parse_workers > 1) && (u_sess->attr.attr_storage.target_rto > 0 || - u_sess->attr.attr_storage.hadr_recovery_time_target > 0 || - u_sess->attr.attr_storage.hadr_recovery_point_target > 0)) { - ereport(WARNING, (errmsg("Extreme RTO and flow control cannot be enabled at the same time."), - errhint("We have reset the flow control to the off state. " - "Please modify the postgresql.conf file."))); - - u_sess->attr.attr_storage.target_rto = 0; - u_sess->attr.attr_storage.hadr_recovery_time_target = 0; - } - #ifndef ENABLE_MULTIPLE_NODES if ((g_instance.attr.attr_storage.recovery_parse_workers > 1) && g_instance.attr.attr_storage.EnableHotStandby) { ereport(ERROR, @@ -2651,6 +2687,7 @@ static void ArchObsThreadStart(int threadIndex) errno_t rc = EOK; rc = memcpy_s(g_instance.archive_thread_info.slotName[threadIndex], NAMEDATALEN, slotName, strlen(slotName)); securec_check(rc, "\0", "\0"); + g_instance.archive_thread_info.obsArchPID[threadIndex] = initialize_util_thread(ARCH, g_instance.archive_thread_info.slotName[threadIndex]); if (START_BARRIER_CREATOR && pmState == PM_RUN) { @@ -2921,6 +2958,7 @@ static int ServerLoop(void) } ereport(DEBUG4, (errmsg("postmaster poll event process end."))); + /* * If the AioCompleters have not been started start them. * These should remain run indefinitely. @@ -2957,22 +2995,29 @@ static int ServerLoop(void) /* If we have lost the audit collector, try to start a new one */ #ifndef ENABLE_MULTIPLE_NODES - if (g_instance.pid_cxt.PgAuditPID == 0 && u_sess->attr.attr_security.Audit_enabled && + if (g_instance.pid_cxt.PgAuditPID != NULL && u_sess->attr.attr_security.Audit_enabled && (pmState == PM_RUN || pmState == PM_HOT_STANDBY) && !dummyStandbyMode) { - g_instance.pid_cxt.PgAuditPID = pgaudit_start(); - ereport(LOG, (errmsg("auditor process started, pid=%lu", g_instance.pid_cxt.PgAuditPID))); + pgaudit_start_all(); + } + + if (g_instance.comm_cxt.isNeedChangeRole) { + char role[MAXPGPATH] = {0}; + if (!CheckSignalByFile(ChangeRoleFile, role, MAXPGPATH)) { + ereport(WARNING, (errmsg("Could not read changerole file"))); + g_instance.comm_cxt.isNeedChangeRole = false; + } + handle_change_role_signal(role); + g_instance.comm_cxt.isNeedChangeRole = false; } #else - if (g_instance.pid_cxt.PgAuditPID == 0 && u_sess->attr.attr_security.Audit_enabled && pmState == PM_RUN && + if (g_instance.pid_cxt.PgAuditPID != NULL && u_sess->attr.attr_security.Audit_enabled && pmState == PM_RUN && !dummyStandbyMode) { - g_instance.pid_cxt.PgAuditPID = pgaudit_start(); - ereport(LOG, (errmsg("auditor process started, pid=%lu", g_instance.pid_cxt.PgAuditPID))); + pgaudit_start_all(); } -#endif +#endif /* If u_sess->attr.attr_security.Audit_enabled is set to false, terminate auditor process. */ - if (g_instance.pid_cxt.PgAuditPID != 0 && !u_sess->attr.attr_security.Audit_enabled) { - signal_child(g_instance.pid_cxt.PgAuditPID, SIGQUIT); - ereport(LOG, (errmsg("parameter audit_enabled is set to false, terminate auditor process."))); + if (g_instance.pid_cxt.PgAuditPID != NULL && !u_sess->attr.attr_security.Audit_enabled) { + pgaudit_stop_all(); } if (g_instance.pid_cxt.AlarmCheckerPID == 0 && !dummyStandbyMode) @@ -2988,6 +3033,21 @@ static int ServerLoop(void) g_instance.pid_cxt.sharedStorageXlogCopyThreadPID = initialize_util_thread(SHARE_STORAGE_XLOG_COPYER); } +#ifdef ENABLE_MULTIPLE_NODES + /* when execuating xlog redo in standby cluster, + * pmState is PM_HOT_STANDBY, neither PM_RECOVERY nor PM_RUN + */ + if (pmState == PM_HOT_STANDBY && g_instance.pid_cxt.BarrierPreParsePID == 0 && + !dummyStandbyMode && IS_DISASTER_RECOVER_MODE) { + g_instance.pid_cxt.BarrierPreParsePID = initialize_util_thread(BARRIER_PREPARSE); + } +#endif + /* + * If the startup thread is running, need check the page repair thread. + */ + if (g_instance.pid_cxt.PageRepairPID == 0 && (pmState == PM_RECOVERY || pmState == PM_HOT_STANDBY)) { + g_instance.pid_cxt.PageRepairPID = initialize_util_thread(PAGEREPAIR_THREAD); + } /* * If no background writer process is running, and we are not in a * state that prevents it, start one. It doesn't matter if this @@ -3066,15 +3126,18 @@ static int ServerLoop(void) /* * Start the Undo launcher thread if we need to. */ - if (g_instance.pid_cxt.UndoLauncherPID == 0 && pmState == PM_RUN && !dummyStandbyMode) { + if (g_instance.attr.attr_storage.enable_ustore && + g_instance.pid_cxt.UndoLauncherPID == 0 && + pmState == PM_RUN && !dummyStandbyMode) { g_instance.pid_cxt.UndoLauncherPID = initialize_util_thread(UNDO_LAUNCHER); } - if (g_instance.pid_cxt.GlobalStatsPID == 0 && pmState == PM_RUN && !dummyStandbyMode) { + if (g_instance.attr.attr_storage.enable_ustore && + g_instance.pid_cxt.GlobalStatsPID == 0 && + pmState == PM_RUN && !dummyStandbyMode) { g_instance.pid_cxt.GlobalStatsPID = initialize_util_thread(GLOBALSTATS_THREAD); } - -#ifndef ENABLE_MULTIPLE_NODES +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) if (u_sess->attr.attr_common.upgrade_mode == 0 && g_instance.pid_cxt.ApplyLauncerPID == 0 && pmState == PM_RUN && !dummyStandbyMode) { g_instance.pid_cxt.ApplyLauncerPID = initialize_util_thread(APPLY_LAUNCHER); @@ -3108,11 +3171,13 @@ static int ServerLoop(void) signal_child(g_instance.pid_cxt.PgJobSchdPID, SIGTERM); } +#ifndef ENABLE_LITE_MODE /* If we have lost the barrier creator thread, try to start a new one */ - if (START_BARRIER_CREATOR && g_instance.pid_cxt.BarrierCreatorPID == 0 && - pmState == PM_RUN && g_instance.archive_obs_cxt.archive_slot_num != 0) { + if (START_BARRIER_CREATOR && g_instance.pid_cxt.BarrierCreatorPID == 0 && pmState == PM_RUN && + (g_instance.archive_obs_cxt.archive_slot_num != 0 || START_AUTO_CSN_BARRIER)) { g_instance.pid_cxt.BarrierCreatorPID = initialize_util_thread(BARRIER_CREATOR); } +#endif /* If we have lost the archiver, try to start a new one */ if (!dummyStandbyMode) { @@ -3139,7 +3204,7 @@ static int ServerLoop(void) /* If we have lost the stats collector, try to start a new one */ if ((IS_PGXC_COORDINATOR || (g_instance.role == VSINGLENODE)) && g_instance.pid_cxt.SnapshotPID == 0 && - pmState == PM_RUN) + u_sess->attr.attr_common.enable_wdr_snapshot && pmState == PM_RUN) g_instance.pid_cxt.SnapshotPID = snapshot_start(); if (ENABLE_ASP && g_instance.pid_cxt.AshPID == 0 && pmState == PM_RUN && !dummyStandbyMode) @@ -3149,7 +3214,8 @@ static int ServerLoop(void) if (ENABLE_STATEMENT_TRACK && g_instance.pid_cxt.StatementPID == 0 && pmState == PM_RUN) g_instance.pid_cxt.StatementPID = initialize_util_thread(TRACK_STMT_WORKER); - if ((IS_PGXC_COORDINATOR || IS_SINGLE_NODE) && g_instance.pid_cxt.PercentilePID == 0 && + if ((IS_PGXC_COORDINATOR || IS_SINGLE_NODE) && u_sess->attr.attr_common.enable_instr_rt_percentile && + g_instance.pid_cxt.PercentilePID == 0 && pmState == PM_RUN) g_instance.pid_cxt.PercentilePID = initialize_util_thread(PERCENTILE_WORKER); @@ -3170,6 +3236,7 @@ static int ServerLoop(void) (g_instance.pid_cxt.CPMonitorPID == 0) && (pmState == PM_RUN) && !dummyStandbyMode) g_instance.pid_cxt.CPMonitorPID = initialize_util_thread(WLM_CPMONITOR); +#ifndef ENABLE_LITE_MODE /* If we have lost the twophase cleaner, try to start a new one */ if ( #ifdef ENABLE_MULTIPLE_NODES @@ -3185,22 +3252,28 @@ static int ServerLoop(void) /* If we have lost the LWLock monitor, try to start a new one */ if (g_instance.pid_cxt.FaultMonitorPID == 0 && pmState == PM_RUN) g_instance.pid_cxt.FaultMonitorPID = initialize_util_thread(FAULTMONITOR); - +#endif /* If we have lost the heartbeat service, try to start a new one */ if (NeedHeartbeat()) g_instance.pid_cxt.HeartbeatPID = initialize_util_thread(HEARTBEAT); /* If we have lost the csnmin sync thread, try to start a new one */ - if (GTM_LITE_CN && g_instance.csnminsync_cxt.is_fcn_ccn && - g_instance.pid_cxt.CsnminSyncPID == 0 && pmState == PM_RUN) { + if (GTM_LITE_CN && g_instance.pid_cxt.CsnminSyncPID == 0 && pmState == PM_RUN) { + g_instance.pid_cxt.CsnminSyncPID = initialize_util_thread(CSNMIN_SYNC); + } + if (IS_CN_DISASTER_RECOVER_MODE && g_instance.pid_cxt.CsnminSyncPID == 0 && pmState == PM_HOT_STANDBY) { g_instance.pid_cxt.CsnminSyncPID = initialize_util_thread(CSNMIN_SYNC); } - if (g_instance.pid_cxt.UndoRecyclerPID == 0 && pmState == PM_RUN) { + if (g_instance.attr.attr_storage.enable_ustore && + g_instance.pid_cxt.UndoRecyclerPID == 0 && + pmState == PM_RUN) { g_instance.pid_cxt.UndoRecyclerPID = initialize_util_thread(UNDO_RECYCLER); } - if (g_instance.pid_cxt.GlobalStatsPID == 0 && pmState == PM_RUN) { + if (g_instance.attr.attr_storage.enable_ustore && + g_instance.pid_cxt.GlobalStatsPID == 0 && + pmState == PM_RUN) { g_instance.pid_cxt.GlobalStatsPID = initialize_util_thread(GLOBALSTATS_THREAD); } @@ -3367,6 +3440,7 @@ int ProcessStartupPacket(Port* port, bool SSLdone) socklen_t oldTvLen = sizeof(oldTv); bool isTvSeted = false; bool clientIsAutonomousTransaction = false; + bool clientIsLocalHadrWalrcv = false; CHECK_FOR_PROCDIEPENDING(); /* Set recv timeout on coordinator in case of connected from external application */ @@ -3619,6 +3693,8 @@ int ProcessStartupPacket(Port* port, bool SSLdone) t_thrd.role = WAL_DB_SENDER; } else if (strcmp(valptr, "hadr_main_standby") == 0) { t_thrd.role = WAL_HADR_SENDER; + } else if (strcmp(valptr, "hadr_standby_cn") == 0) { + t_thrd.role = WAL_HADR_CN_SENDER; } else if (strcmp(valptr, "standby_cluster") == 0) { t_thrd.role = WAL_SHARE_STORE_SENDER; } else { @@ -3750,6 +3826,9 @@ int ProcessStartupPacket(Port* port, bool SSLdone) } else if (strcmp(valptr, "autonomoustransaction") == 0) { clientIsAutonomousTransaction = true; ereport(DEBUG5, (errmsg("autonomoustransaction connected"))); + } else if (strcmp(valptr, "local_hadr_walrcv") == 0) { + clientIsLocalHadrWalrcv = true; + ereport(DEBUG5, (errmsg("local hadr walreceiver connected"))); } else if (strcmp(valptr, "subscription") == 0) { u_sess->proc_cxt.clientIsSubscription = true; ereport(DEBUG5, (errmsg("subscription connected"))); @@ -3808,11 +3887,11 @@ int ProcessStartupPacket(Port* port, bool SSLdone) /* Inner tool with local sha256 will not be authenicated. */ if (clientIsCmAgent || clientIsGsClean || clientIsOM || u_sess->proc_cxt.clientIsGsroach || clientIsWDRXdb || clientIsRemoteRead || u_sess->proc_cxt.clientIsGsCtl || u_sess->proc_cxt.clientIsGsrewind || - u_sess->proc_cxt.clientIsGsredis || clientIsAutonomousTransaction) { + u_sess->proc_cxt.clientIsGsredis || clientIsAutonomousTransaction || clientIsLocalHadrWalrcv) { u_sess->proc_cxt.IsInnerMaintenanceTools = true; } /* cm_agent and gs_clean should not be controlled by workload manager */ - if (clientIsCmAgent || clientIsGsClean) { + if (clientIsCmAgent || clientIsGsClean || clientIsLocalHadrWalrcv) { u_sess->proc_cxt.IsWLMWhiteList = true; } #ifdef ENABLE_MULTIPLE_NODES @@ -3856,7 +3935,7 @@ int ProcessStartupPacket(Port* port, bool SSLdone) * can make sense to first make a basebackup and then stream changes * starting from that. */ - if (AM_WAL_SENDER && !AM_WAL_DB_SENDER && !AM_WAL_HADR_SENDER) { + if (AM_WAL_SENDER && !AM_WAL_DB_SENDER && !AM_WAL_HADR_SENDER && !AM_WAL_HADR_CN_SENDER) { port->database_name[0] = '\0'; } /* set special tcp keepalive parameters for build senders */ @@ -3931,7 +4010,7 @@ int ProcessStartupPacket(Port* port, bool SSLdone) * to connect, we do not check. */ if ((!AM_WAL_SENDER && !(isMaintenanceConnection && (clientIsGsql || clientIsCmAgent || - t_thrd.postmaster_cxt.senderToBuildStandby)) && !clientIsRemoteRead) || + t_thrd.postmaster_cxt.senderToBuildStandby || clientIsLocalHadrWalrcv)) && !clientIsRemoteRead) || t_thrd.postmaster_cxt.senderToDummyStandby) { if (PENDING_MODE == hashmdata->current_mode && !IS_PGXC_COORDINATOR) { @@ -3939,8 +4018,8 @@ int ProcessStartupPacket(Port* port, bool SSLdone) errmsg("can not accept connection in pending mode."))); } else { #ifdef ENABLE_MULTIPLE_NODES - if (STANDBY_MODE == hashmdata->current_mode) { - ereport(elevel, (errcode(ERRCODE_CANNOT_CONNECT_NOW), + if (STANDBY_MODE == hashmdata->current_mode && (!IS_DISASTER_RECOVER_MODE || GTM_FREE_MODE)) { + ereport(ERROR, (errcode(ERRCODE_CANNOT_CONNECT_NOW), errmsg("can not accept connection in standby mode."))); } #else @@ -4402,20 +4481,31 @@ void socket_close_on_exec(void) } } -static bool IsNotCrossRegionMode(void) +static void UpdateCrossRegionMode() { - int num = 0; - int totalNum = 0; - for (int i = 1; i < MAX_REPLNODE_NUM; i++) { + volatile HaShmemData* hashmdata = t_thrd.postmaster_cxt.HaShmData; + int i = 0, repl_list_num = 0; + bool is_cross_region = false; + + for (i = 1; i < MAX_REPLNODE_NUM; i++) { if (t_thrd.postmaster_cxt.ReplConnArray[i] != NULL) { - totalNum++; - if (!t_thrd.postmaster_cxt.ReplConnArray[i]->isCrossRegion && - !t_thrd.postmaster_cxt.ReplConnArray[i]->isCascade) { - num++; + repl_list_num++; + if (t_thrd.postmaster_cxt.ReplConnArray[i]->isCrossRegion && + !IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE) { + is_cross_region = true; } } + if (t_thrd.postmaster_cxt.CrossClusterReplConnArray[i] != NULL) { + repl_list_num++; + } + } + if (is_cross_region != hashmdata->is_cross_region) { + SpinLockAcquire(&hashmdata->mutex); + hashmdata->is_cross_region = is_cross_region; + hashmdata->repl_list_num = repl_list_num; + SpinLockRelease(&hashmdata->mutex); + ereport(LOG, (errmsg("SIGHUP_handler update is_cross_region: %d", hashmdata->is_cross_region))); } - return (totalNum == num); } /* @@ -4492,6 +4582,10 @@ static void SIGHUP_handler(SIGNAL_ARGS) if (g_instance.pid_cxt.StartupPID != 0) signal_child(g_instance.pid_cxt.StartupPID, SIGHUP); + if (g_instance.pid_cxt.PageRepairPID != 0) { + signal_child(g_instance.pid_cxt.PageRepairPID, SIGHUP); + } + #ifdef PGXC /* PGXC_COORD */ if ( #ifdef ENABLE_MULTIPLE_NODES @@ -4572,9 +4666,13 @@ static void SIGHUP_handler(SIGNAL_ARGS) if (g_instance.pid_cxt.SysLoggerPID != 0) signal_child(g_instance.pid_cxt.SysLoggerPID, SIGHUP); /* signal the auditor process */ - if (g_instance.pid_cxt.PgAuditPID != 0) { + if (g_instance.pid_cxt.PgAuditPID != NULL) { Assert(!dummyStandbyMode); - signal_child(g_instance.pid_cxt.PgAuditPID, SIGHUP); + for (int i = 0; i < g_instance.audit_cxt.thread_num; ++i) { + if (g_instance.pid_cxt.PgAuditPID[i] != 0) { + signal_child(g_instance.pid_cxt.PgAuditPID[i], SIGHUP); + } + } } if (g_instance.pid_cxt.PgStatPID != 0) signal_child(g_instance.pid_cxt.PgStatPID, SIGHUP); @@ -4645,7 +4743,7 @@ static void SIGHUP_handler(SIGNAL_ARGS) if (g_instance.pid_cxt.UndoLauncherPID != 0) { signal_child(g_instance.pid_cxt.UndoLauncherPID, SIGHUP); } -#ifndef ENABLE_MULTIPLE_NODES +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) if (g_instance.pid_cxt.ApplyLauncerPID != 0) { signal_child(g_instance.pid_cxt.ApplyLauncerPID, SIGHUP); } @@ -4672,6 +4770,10 @@ static void SIGHUP_handler(SIGNAL_ARGS) } #ifdef ENABLE_MULTIPLE_NODES + if (g_instance.pid_cxt.BarrierPreParsePID != 0) { + signal_child(g_instance.pid_cxt.BarrierPreParsePID, SIGHUP); + } + if (g_instance.pid_cxt.TsCompactionPID != 0) { signal_child(g_instance.pid_cxt.TsCompactionPID, SIGHUP); } @@ -4681,11 +4783,9 @@ static void SIGHUP_handler(SIGNAL_ARGS) (void)streaming_backend_manager(STREAMING_BACKEND_SIGHUP); #endif /* ENABLE_MULTIPLE_NODES */ - if (t_thrd.postmaster_cxt.HaShmData->is_cross_region && IsNotCrossRegionMode()) { - SpinLockAcquire(&t_thrd.postmaster_cxt.HaShmData->mutex); - t_thrd.postmaster_cxt.HaShmData->is_cross_region = false; - SpinLockRelease(&t_thrd.postmaster_cxt.HaShmData->mutex); - } + /* Update is_cross_region for streaming dr */ + UpdateCrossRegionMode(); + /* Reload authentication config files too */ int loadhbaCount = 0; while (!load_hba()) { @@ -4805,6 +4905,16 @@ static void pmdie(SIGNAL_ARGS) signal_child(g_instance.pid_cxt.StartupPID, SIGTERM); } +#ifdef ENABLE_MULTIPLE_NODES + if (g_instance.pid_cxt.BarrierPreParsePID != 0) { + signal_child(g_instance.pid_cxt.BarrierPreParsePID, SIGTERM); + } +#endif + + if (g_instance.pid_cxt.PageRepairPID != 0) { + signal_child(g_instance.pid_cxt.PageRepairPID, SIGTERM); + } + if (g_instance.pid_cxt.BgWriterPID != 0) { Assert(!dummyStandbyMode); signal_child(g_instance.pid_cxt.BgWriterPID, SIGTERM); @@ -4898,7 +5008,7 @@ static void pmdie(SIGNAL_ARGS) if (g_instance.pid_cxt.UndoLauncherPID != 0) { signal_child(g_instance.pid_cxt.UndoLauncherPID, SIGTERM); } -#ifndef ENABLE_MULTIPLE_NODES +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) if (g_instance.pid_cxt.ApplyLauncerPID != 0) { signal_child(g_instance.pid_cxt.ApplyLauncerPID, SIGTERM); } @@ -4916,10 +5026,20 @@ static void pmdie(SIGNAL_ARGS) } } +#ifndef ENABLE_LITE_MODE if (g_instance.pid_cxt.BarrierCreatorPID != 0) { barrier_creator_thread_shutdown(); signal_child(g_instance.pid_cxt.BarrierCreatorPID, SIGTERM); } + if (g_instance.archive_thread_info.obsBarrierArchPID != NULL) { + int arch_loop = 0; + for (arch_loop = 0; arch_loop < g_instance.attr.attr_storage.max_replication_slots; arch_loop++) { + if (g_instance.archive_thread_info.obsBarrierArchPID[arch_loop] != 0) { + signal_child(g_instance.archive_thread_info.obsBarrierArchPID[arch_loop], SIGTERM); + } + } + } +#endif #ifdef ENABLE_MULTIPLE_NODES if (g_instance.pid_cxt.CsnminSyncPID != 0) { @@ -5054,6 +5174,21 @@ static void SetWalsndsNodeState(ClusterNodeState requester, ClusterNodeState oth } } +static void UpdateArchiveSlotStatus() +{ + for (int i = 0; i < g_instance.attr.attr_storage.max_replication_slots; i++) { + ReplicationSlot *slot = NULL; + slot = &t_thrd.slot_cxt.ReplicationSlotCtl->replication_slots[i]; + SpinLockAcquire(&slot->mutex); + if (!slot->active && slot->in_use && slot->extra_content != NULL && + GET_SLOT_PERSISTENCY(slot->data) != RS_BACKUP) { + slot->active = true; + } + SpinLockRelease(&slot->mutex); + ereport(LOG, (errmsg("Archive slot change to active when DBstatus Promoting"))); + } +} + /* prepare to response to standby for switchover */ static void PrepareDemoteResponse(void) { @@ -5075,6 +5210,7 @@ static void PrepareDemoteResponse(void) hashmdata->is_cascade_standby = true; SpinLockRelease(&hashmdata->mutex); + g_instance.global_sysdbcache.RefreshHotStandby(); load_server_mode(); } @@ -5166,7 +5302,7 @@ static void ProcessDemoteRequest(void) if (g_instance.pid_cxt.UndoLauncherPID != 0) signal_child(g_instance.pid_cxt.UndoLauncherPID, SIGTERM); -#ifndef ENABLE_MULTIPLE_NODES +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) if (g_instance.pid_cxt.ApplyLauncerPID != 0) signal_child(g_instance.pid_cxt.ApplyLauncerPID, SIGTERM); #endif @@ -5209,6 +5345,10 @@ static void ProcessDemoteRequest(void) if (g_instance.pid_cxt.StartupPID != 0) signal_child(g_instance.pid_cxt.StartupPID, SIGTERM); + if (g_instance.pid_cxt.PageRepairPID != 0) { + signal_child(g_instance.pid_cxt.PageRepairPID, SIGTERM); + } + if (g_instance.pid_cxt.BgWriterPID != 0) { Assert(!dummyStandbyMode); signal_child(g_instance.pid_cxt.BgWriterPID, SIGTERM); @@ -5292,10 +5432,12 @@ static void ProcessDemoteRequest(void) signal_child(g_instance.pid_cxt.PercentilePID, SIGTERM); } +#ifndef ENABLE_LITE_MODE if (g_instance.pid_cxt.BarrierCreatorPID != 0) { barrier_creator_thread_shutdown(); signal_child(g_instance.pid_cxt.BarrierCreatorPID, SIGTERM); } +#endif #ifdef ENABLE_MULTIPLE_NODES if (g_instance.pid_cxt.CsnminSyncPID != 0) { @@ -5342,7 +5484,7 @@ static void ProcessDemoteRequest(void) if (g_instance.pid_cxt.UndoLauncherPID != 0) signal_child(g_instance.pid_cxt.UndoLauncherPID, SIGTERM); -#ifndef ENABLE_MULTIPLE_NODES +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) if (g_instance.pid_cxt.ApplyLauncerPID != 0) signal_child(g_instance.pid_cxt.ApplyLauncerPID, SIGTERM); #endif @@ -5387,6 +5529,48 @@ static void ProcessDemoteRequest(void) PostmasterStateMachine(); } +/* + * Reaper -- get current time. + */ +static void GetTimeNowForReaperLog(char* nowTime, int timeLen) +{ + time_t formatTime; + struct timeval current = {0}; + const int tmpBufSize = 32; + char tmpBuf[tmpBufSize] = {0}; + + if (nowTime == NULL || timeLen == 0) { + return; + } + + (void)gettimeofday(¤t, NULL); + formatTime = current.tv_sec; + struct tm* pTime = localtime(&formatTime); + strftime(tmpBuf, sizeof(tmpBuf), "%Y-%m-%d %H:%M:%S", pTime); + + errno_t rc = sprintf_s(nowTime, timeLen - 1, "%s.%ld ", tmpBuf, current.tv_usec / 1000); + securec_check_ss(rc, "\0", "\0"); +} + +/* + * Reaper -- encap reaper prefix log. + */ +static char* GetReaperLogPrefix(char* buf, int bufLen) +{ + const int bufSize = 256; + char timeBuf[bufSize] = {0}; + errno_t rc; + + GetTimeNowForReaperLog(timeBuf, bufSize); + + rc = memset_s(buf, bufLen, 0, bufLen); + securec_check(rc, "\0", "\0"); + rc = sprintf_s(buf, bufLen - 1, "%s [postmaster][reaper][%lu]", timeBuf, PostmasterPid); + securec_check_ss(rc, "\0", "\0"); + + return buf; +} + /* * Reaper -- signal handler to cleanup after a child process dies. */ @@ -5397,12 +5581,12 @@ static void reaper(SIGNAL_ARGS) long exitstatus; /* its exit status */ int* status = NULL; ThreadId oldpid = 0; + char logBuf[ReaperLogBufSize] = {0}; #define LOOPTEST() (pid = gs_thread_id(t_thrd.postmaster_cxt.CurExitThread)) #define LOOPHEADER() (exitstatus = (long)(intptr_t)status) gs_signal_setmask(&t_thrd.libpq_cxt.BlockSig, NULL); - ereport(DEBUG4, (errmsg_internal("reaping dead processes"))); for (;;) { LOOPTEST(); @@ -5421,14 +5605,14 @@ static void reaper(SIGNAL_ARGS) */ if (ESRCH == pthread_kill(pid, 0)) { exitstatus = 0; - ereport(LOG, (errmsg("failed to join thread %lu, no such process", pid))); + write_stderr("%s LOG: failed to join thread %lu, no such process\n", + GetReaperLogPrefix(logBuf, ReaperLogBufSize), pid); } else { exitstatus = 1; HandleChildCrash(pid, exitstatus, _(GetProcName(pid))); } } else { LOOPHEADER(); - ereport(DEBUG1, (errmsg("have joined thread %lu, exitstatus=%ld.", pid, exitstatus))); } /* @@ -5436,7 +5620,7 @@ static void reaper(SIGNAL_ARGS) */ if (pid == g_instance.pid_cxt.StartupPID) { g_instance.pid_cxt.StartupPID = 0; - ereport(LOG, (errmsg("startupprocess exit"))); + write_stderr("%s LOG: startupprocess exit\n", GetReaperLogPrefix(logBuf, ReaperLogBufSize)); /* * Startup process exited in response to a shutdown request (or it @@ -5454,6 +5638,8 @@ static void reaper(SIGNAL_ARGS) if (g_instance.demotion > NoDemote && t_thrd.postmaster_cxt.HaShmData->current_mode == STANDBY_MODE && (EXIT_STATUS_0(exitstatus) || EXIT_STATUS_1(exitstatus))) { + pmState = PM_WAIT_BACKENDS; + /* PostmasterStateMachine logic does the rest */ continue; } @@ -5464,7 +5650,8 @@ static void reaper(SIGNAL_ARGS) */ if (pmState == PM_STARTUP && !EXIT_STATUS_0(exitstatus)) { LogChildExit(LOG, _("startup process"), pid, exitstatus); - ereport(LOG, (errmsg("aborting startup due to startup process failure"))); + write_stderr("%s LOG: aborting startup due to startup process failure\n", + GetReaperLogPrefix(logBuf, ReaperLogBufSize)); if (get_real_recovery_parallelism() > 1) { HandleChildCrash(pid, exitstatus, _("startup process")); } else { @@ -5495,6 +5682,11 @@ static void reaper(SIGNAL_ARGS) continue; } + /* startup process exit, standby pagerepair thread can exit */ + if (g_instance.pid_cxt.PageRepairPID != 0) { + signal_child(g_instance.pid_cxt.PageRepairPID, SIGTERM); + } + /* * Startup succeeded, commence normal operations */ @@ -5506,6 +5698,8 @@ static void reaper(SIGNAL_ARGS) if (t_thrd.postmaster_cxt.HaShmData && (t_thrd.postmaster_cxt.HaShmData->current_mode == STANDBY_MODE || t_thrd.postmaster_cxt.HaShmData->current_mode == PENDING_MODE)) { t_thrd.postmaster_cxt.HaShmData->current_mode = PRIMARY_MODE; + UpdateArchiveSlotStatus(); + g_instance.global_sysdbcache.RefreshHotStandby(); if (g_instance.pid_cxt.HeartbeatPID != 0) signal_child(g_instance.pid_cxt.HeartbeatPID, SIGTERM); UpdateOptsFile(); @@ -5527,9 +5721,9 @@ static void reaper(SIGNAL_ARGS) * RecoveryInProgress() */ if (g_instance.attr.attr_storage.max_wal_senders > 0 && CountChildren(BACKEND_TYPE_WALSND) > 0) { - ereport(LOG, - (errmsg("terminating all walsender processes to force cascaded " - "standby(s) to update timeline and reconnect"))); + write_stderr("%s LOG: terminating all walsender processes to force cascaded " + "standby(s) to update timeline and reconnect\n", + GetReaperLogPrefix(logBuf, ReaperLogBufSize)); (void)SignalSomeChildren(SIGUSR2, BACKEND_TYPE_WALSND); } @@ -5588,14 +5782,14 @@ static void reaper(SIGNAL_ARGS) g_instance.attr.attr_storage.xlog_file_path != NULL) { g_instance.pid_cxt.sharedStorageXlogCopyThreadPID = initialize_util_thread(SHARE_STORAGE_XLOG_COPYER); } - - if (g_instance.pid_cxt.UndoLauncherPID == 0 && !dummyStandbyMode) + + if (g_instance.attr.attr_storage.enable_ustore && g_instance.pid_cxt.UndoLauncherPID == 0 && !dummyStandbyMode) g_instance.pid_cxt.UndoLauncherPID = initialize_util_thread(UNDO_LAUNCHER); - if (g_instance.pid_cxt.GlobalStatsPID == 0 && !dummyStandbyMode) + if (g_instance.attr.attr_storage.enable_ustore && g_instance.pid_cxt.GlobalStatsPID == 0 && !dummyStandbyMode) g_instance.pid_cxt.GlobalStatsPID = initialize_util_thread(GLOBALSTATS_THREAD); -#ifndef ENABLE_MULTIPLE_NODES +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) if (u_sess->attr.attr_common.upgrade_mode == 0 && g_instance.pid_cxt.ApplyLauncerPID == 0 && !dummyStandbyMode) { g_instance.pid_cxt.ApplyLauncerPID = initialize_util_thread(APPLY_LAUNCHER); @@ -5620,9 +5814,11 @@ static void reaper(SIGNAL_ARGS) if (ENABLE_TCAP_RECYCLEBIN && (g_instance.role == VSINGLENODE) && pmState == PM_RUN && g_instance.pid_cxt.RbCleanrPID== 0 && !dummyStandbyMode) g_instance.pid_cxt.RbCleanrPID = StartRbCleaner(); - if ((IS_PGXC_COORDINATOR || (g_instance.role == VSINGLENODE)) && g_instance.pid_cxt.SnapshotPID == 0 && !dummyStandbyMode) + if ((IS_PGXC_COORDINATOR || (g_instance.role == VSINGLENODE)) && u_sess->attr.attr_common.enable_wdr_snapshot && + g_instance.pid_cxt.SnapshotPID == 0 && !dummyStandbyMode) g_instance.pid_cxt.SnapshotPID = snapshot_start(); - if ((IS_PGXC_COORDINATOR || IS_SINGLE_NODE) && g_instance.pid_cxt.PercentilePID == 0 && !dummyStandbyMode) + if ((IS_PGXC_COORDINATOR || IS_SINGLE_NODE) && u_sess->attr.attr_common.enable_instr_rt_percentile && + g_instance.pid_cxt.PercentilePID == 0 && !dummyStandbyMode) g_instance.pid_cxt.PercentilePID = initialize_util_thread(PERCENTILE_WORKER); if (ENABLE_ASP && g_instance.pid_cxt.AshPID == 0 && !dummyStandbyMode) @@ -5634,8 +5830,9 @@ static void reaper(SIGNAL_ARGS) /* Database Security: Support database audit */ /* start auditor process */ /* start the audit collector as needed. */ - if (g_instance.pid_cxt.PgAuditPID == 0 && u_sess->attr.attr_security.Audit_enabled && !dummyStandbyMode) - g_instance.pid_cxt.PgAuditPID = pgaudit_start(); + if (u_sess->attr.attr_security.Audit_enabled && !dummyStandbyMode) { + pgaudit_start_all(); + } if (t_thrd.postmaster_cxt.audit_primary_start && !t_thrd.postmaster_cxt.audit_primary_failover && !t_thrd.postmaster_cxt.audit_standby_switchover) { @@ -5643,6 +5840,7 @@ static void reaper(SIGNAL_ARGS) t_thrd.postmaster_cxt.audit_primary_start = false; } +#ifndef ENABLE_LITE_MODE if ( #ifdef ENABLE_MULTIPLE_NODES IS_PGXC_COORDINATOR && @@ -5656,6 +5854,7 @@ static void reaper(SIGNAL_ARGS) if (g_instance.pid_cxt.FaultMonitorPID == 0) g_instance.pid_cxt.FaultMonitorPID = initialize_util_thread(FAULTMONITOR); +#endif /* if workload manager is off, we still use this thread to build user hash table */ if ((ENABLE_WORKLOAD_CONTROL || !WLMIsInfoInit()) && g_instance.pid_cxt.WLMCollectPID == 0 && @@ -5679,22 +5878,27 @@ static void reaper(SIGNAL_ARGS) if (NeedHeartbeat()) g_instance.pid_cxt.HeartbeatPID = initialize_util_thread(HEARTBEAT); +#ifndef ENABLE_LITE_MODE if (START_BARRIER_CREATOR && g_instance.pid_cxt.BarrierCreatorPID == 0 && - g_instance.archive_obs_cxt.archive_slot_num != 0) { + (g_instance.archive_obs_cxt.archive_slot_num != 0 || START_AUTO_CSN_BARRIER)) { g_instance.pid_cxt.BarrierCreatorPID = initialize_util_thread(BARRIER_CREATOR); } +#endif if (GTM_LITE_CN && g_instance.pid_cxt.CsnminSyncPID == 0) { g_instance.pid_cxt.CsnminSyncPID = initialize_util_thread(CSNMIN_SYNC); } + if (IS_CN_DISASTER_RECOVER_MODE && g_instance.pid_cxt.CsnminSyncPID == 0) { + g_instance.pid_cxt.CsnminSyncPID = initialize_util_thread(CSNMIN_SYNC); + } PMUpdateDBState(NORMAL_STATE, get_cur_mode(), get_cur_repl_num()); - ereport(LOG, - (errmsg("update gaussdb state file: db state(NORMAL_STATE), server mode(%s)", - wal_get_role_string(get_cur_mode())))); + write_stderr("%s LOG: update gaussdb state file: db state(NORMAL_STATE), server mode(%s)\n", + GetReaperLogPrefix(logBuf, ReaperLogBufSize), wal_get_role_string(get_cur_mode())); /* at this point we are really open for business */ - ereport(LOG, (errmsg("database system is ready to accept connections"))); + write_stderr("%s LOG: database system is ready to accept connections\n", + GetReaperLogPrefix(logBuf, ReaperLogBufSize)); continue; } @@ -5706,6 +5910,16 @@ static void reaper(SIGNAL_ARGS) g_threadPoolControler->GetScheduler()->SetShutDown(false); continue; } + + if (pid == g_instance.pid_cxt.PageRepairPID) { + g_instance.pid_cxt.PageRepairPID = 0; + + if (!EXIT_STATUS_0(exitstatus)) + HandleChildCrash(pid, exitstatus, _("page reapir process")); + + continue; + } + /* * Was it the bgwriter? Normal exit can be ignored; we'll start a new * one at the next iteration of the postmaster's main loop, if @@ -5780,15 +5994,18 @@ static void reaper(SIGNAL_ARGS) if ((g_instance.attr.attr_storage.xlog_file_path != NULL) && (g_instance.pid_cxt.sharedStorageXlogCopyThreadPID != 0)) { signal_child(g_instance.pid_cxt.sharedStorageXlogCopyThreadPID, SIGTERM); - ereport(LOG, (errmsg("checkpoint thread exit and wait for sharestorage"))); + write_stderr("%s LOG: checkpoint thread exit and wait for sharestorage\n", + GetReaperLogPrefix(logBuf, ReaperLogBufSize)); continue; } - ereport(LOG, (errmsg("checkpoint thread exit and nowait for sharestorage"))); + write_stderr("%s LOG: checkpoint thread exit and nowait for sharestorage\n", + GetReaperLogPrefix(logBuf, ReaperLogBufSize)); } if (pid == g_instance.pid_cxt.sharedStorageXlogCopyThreadPID) { g_instance.pid_cxt.sharedStorageXlogCopyThreadPID = 0; - ereport(LOG, (errmsg("sharestorage thread exit"))); + write_stderr("%s LOG: sharestorage thread exit\n", + GetReaperLogPrefix(logBuf, ReaperLogBufSize)); } /* Waken archiver for the last time */ @@ -5853,9 +6070,9 @@ static void reaper(SIGNAL_ARGS) * We can also shut down the audit collector now; there's * nothing left for it to do. */ - if (g_instance.pid_cxt.PgAuditPID != 0) { + if (g_instance.pid_cxt.PgAuditPID != NULL) { Assert(!dummyStandbyMode); - signal_child(g_instance.pid_cxt.PgAuditPID, SIGQUIT); + pgaudit_stop_all(); } } else { /* @@ -6132,14 +6349,23 @@ static void reaper(SIGNAL_ARGS) /* * Was it the system auditor? If so, try to start a new one. */ - if (pid == g_instance.pid_cxt.PgAuditPID) { - Assert(!dummyStandbyMode); - g_instance.pid_cxt.PgAuditPID = 0; - if (!EXIT_STATUS_0(exitstatus)) - LogChildExit(LOG, _("system auditor process"), pid, exitstatus); - if (pmState == PM_RUN) - g_instance.pid_cxt.PgAuditPID = pgaudit_start(); - continue; + if (g_instance.pid_cxt.PgAuditPID != NULL) { + bool is_audit_thread = false; + for (int i = 0; i < g_instance.audit_cxt.thread_num; ++i) { + Assert(!dummyStandbyMode); + if (pid == g_instance.pid_cxt.PgAuditPID[i]) { + g_instance.pid_cxt.PgAuditPID[i] = 0; + is_audit_thread = true; + if (!EXIT_STATUS_0(exitstatus)) + LogChildExit(LOG, _("system auditor process"), pid, exitstatus); + if (pmState == PM_RUN) + g_instance.pid_cxt.PgAuditPID[i] = pgaudit_start(); + } + } + + if (is_audit_thread) { + continue; + } } /* Was it the system logger? If so, try to start a new one */ @@ -6225,6 +6451,7 @@ static void reaper(SIGNAL_ARGS) continue; } +#ifndef ENABLE_LITE_MODE /* Was it the barrier creator? If so, try to start a new one */ if (START_BARRIER_CREATOR && pid == g_instance.pid_cxt.BarrierCreatorPID) { g_instance.pid_cxt.BarrierCreatorPID = 0; @@ -6232,6 +6459,7 @@ static void reaper(SIGNAL_ARGS) LogChildExit(LOG, _("barrier creator process"), pid, exitstatus); continue; } +#endif if (pid == g_instance.pid_cxt.FaultMonitorPID) { g_instance.pid_cxt.FaultMonitorPID = 0; @@ -6269,7 +6497,8 @@ static void reaper(SIGNAL_ARGS) if (pageredoStatus == PAGE_REDO_THREAD_EXIT_NORMAL) { continue; } else if (pageredoStatus == PAGE_REDO_THREAD_EXIT_ABNORMAL) { - ereport(LOG, (errmsg("aborting due to page redo process failure"))); + write_stderr("%s LOG: aborting due to page redo process failure\n", + GetReaperLogPrefix(logBuf, ReaperLogBufSize)); HandleChildCrash(pid, exitstatus, _("page redo process")); continue; } @@ -6292,6 +6521,14 @@ static void reaper(SIGNAL_ARGS) } #ifdef ENABLE_MULTIPLE_NODES + if (pid == g_instance.pid_cxt.BarrierPreParsePID) { + g_instance.pid_cxt.BarrierPreParsePID = 0; + write_stderr("%s LOG: barrier pre parse thread exit\n", GetReaperLogPrefix(logBuf, ReaperLogBufSize)); + if (!EXIT_STATUS_0(exitstatus)) + HandleChildCrash(pid, exitstatus, _("barrier pre parse process")); + continue; + } + if (pid == g_instance.pid_cxt.TsCompactionPID) { g_instance.pid_cxt.TsCompactionPID = 0; if (!EXIT_STATUS_0(exitstatus)) @@ -6320,7 +6557,7 @@ static void reaper(SIGNAL_ARGS) LogChildExit(LOG, _("undo launcher process"), pid, exitstatus); continue; } -#ifndef ENABLE_MULTIPLE_NODES +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) if (pid == g_instance.pid_cxt.ApplyLauncerPID) { g_instance.pid_cxt.ApplyLauncerPID = 0; @@ -6402,8 +6639,9 @@ static const char* GetProcName(ThreadId pid) return "full SQL statement flush process"; else if (pid == g_instance.pid_cxt.PercentilePID) return "percentile collector process"; - else if (pid == g_instance.pid_cxt.PgAuditPID) + else if (pg_auditor_thread(pid)) { return "system auditor process"; + } else if (pid == g_instance.pid_cxt.SysLoggerPID) return "system logger process"; else if (pid == g_instance.pid_cxt.WLMCollectPID) @@ -6446,6 +6684,8 @@ static const char* GetProcName(ThreadId pid) return "pool cleaner process"; else if (g_instance.pid_cxt.sharedStorageXlogCopyThreadPID == pid) return "share xlog copy process"; + else if (g_instance.pid_cxt.BarrierPreParsePID == pid) + return "barrier preparse process"; else if (g_instance.pid_cxt.CommReceiverPIDS != NULL) { int recv_loop = 0; for (recv_loop = 0; recv_loop < g_instance.attr.attr_network.comm_max_receiver; recv_loop++) { @@ -6467,8 +6707,7 @@ static const char* GetProcName(ThreadId pid) static void CleanupBackend(ThreadId pid, int exitstatus) /* child's exit status. */ { Dlelem* curr = NULL; - - LogChildExit(DEBUG2, _("server process"), pid, exitstatus); + char logBuf[ReaperLogBufSize] = {0}; /* * If a backend dies in an ugly way then we must signal all other backends @@ -6507,11 +6746,13 @@ static void CleanupBackend(ThreadId pid, int exitstatus) /* child's exit status. */ bool found = false; int cnt = 0; + knl_thread_role role = (knl_thread_role)0; for (curr = DLGetTail(g_instance.backend_list); curr; curr = DLGetPred(curr)) { Backend* bp = (Backend*)DLE_VAL(curr); if (bp->pid == pid) { cnt++; + role = bp->role; if (bp->dead_end) { { if (!ReleasePostmasterChildSlot(bp->child_slot)) { @@ -6533,8 +6774,9 @@ static void CleanupBackend(ThreadId pid, int exitstatus) /* child's exit status. } } if (!found) { - elog(WARNING, "Did not found reaper thread id %lu in backend list, with %d thread still alive, "\ - "may has leak occurs", pid, cnt); + write_stderr("%s WARNING: Did not found reaper thread id %lu in backend list, with %d thread still alive," + "last thread role %d, may has leak occurs\n", + GetReaperLogPrefix(logBuf, ReaperLogBufSize), pid, cnt, (int)role); } } @@ -6547,16 +6789,20 @@ static void CleanupBackend(ThreadId pid, int exitstatus) /* child's exit status. */ void HandleChildCrash(ThreadId pid, int exitstatus, const char* procname) { + char logBuf[ReaperLogBufSize] = {0}; + /* * Make log entry unless there was a previous crash (if so, nonzero exit * status is to be expected in SIGQUIT response; don't clutter log) */ if (!g_instance.fatal_error) { LogChildExit(LOG, procname, pid, exitstatus); - ereport(LOG, (errmsg("terminating any other active server processes"))); + write_stderr("%s LOG: terminating any other active server processes\n", + GetReaperLogPrefix(logBuf, ReaperLogBufSize)); } - ereport(LOG, (errmsg("%s (ThreadId %lu) exited with exit code %d", procname, pid, WEXITSTATUS(exitstatus)))); + write_stderr("%s LOG: %s (ThreadId %lu) exited with exit code %d\n", + GetReaperLogPrefix(logBuf, ReaperLogBufSize), procname, pid, WEXITSTATUS(exitstatus)); // Threading: do not handle child crash, // &g_instance.proc_aux_base or autovacuum elog(FATAL) could reach here, @@ -6564,7 +6810,8 @@ void HandleChildCrash(ThreadId pid, int exitstatus, const char* procname) // then backend may handle the signal when doing malloc, cause memory exception. // So exit directly. // - ereport(LOG, (errmsg("the server process exits"))); + + write_stderr("%s LOG: the server process exits\n", GetReaperLogPrefix(logBuf, ReaperLogBufSize)); cancelIpcMemoryDetach(); @@ -6584,18 +6831,25 @@ static void LogChildExit(int lev, const char* procname, ThreadId pid, int exitst */ char activity_buffer[1024]; const char* activity = NULL; + char logBuf[ReaperLogBufSize] = {0}; if (!EXIT_STATUS_0(exitstatus)) { activity = pgstat_get_crashed_backend_activity(pid, activity_buffer, sizeof(activity_buffer)); } if (WIFEXITED(exitstatus)) { - ereport(lev, - - /* ------ - translator: %s is a noun phrase describing a child process, such as - "server process" */ - (errmsg("%s (ThreadId %lu) exited with exit code %d", procname, pid, WEXITSTATUS(exitstatus)), - activity ? errdetail("Failed process was running: %s", activity) : 0)); + if (lev == LOG) { + write_stderr("%s LOG: %s (ThreadId %lu) exited with exit code %d" + "Failed process was running: %s\n", + GetReaperLogPrefix(logBuf, ReaperLogBufSize), procname, pid, WEXITSTATUS(exitstatus), + activity ? activity : 0); + } else { + ereport(lev, + /* ------ + translator: %s is a noun phrase describing a child process, such as + "server process" */ + (errmsg("%s (ThreadId %lu) exited with exit code %d", procname, pid, WEXITSTATUS(exitstatus)), + activity ? errdetail("Failed process was running: %s", activity) : 0)); + } } else if (WIFSIGNALED(exitstatus)) { #if defined(WIN32) ereport(lev, @@ -6620,22 +6874,34 @@ static void LogChildExit(int lev, const char* procname, ThreadId pid, int exitst WTERMSIG(exitstatus) < NSIG ? sys_siglist[WTERMSIG(exitstatus)] : "(unknown)"), activity ? errdetail("Failed process was running: %s", activity) : 0)); #else - ereport(lev, - - /* ------ - translator: %s is a noun phrase describing a child process, such as - "server process" */ - (errmsg("%s (ThreadId %lu) was terminated by signal %d", procname, pid, WTERMSIG(exitstatus)), - activity ? errdetail("Failed process was running: %s", activity) : 0)); + if (lev == LOG) { + write_stderr("%s LOG: %s (ThreadId %lu) exited with exit code %d" + "Failed process was running: %s\n", + GetReaperLogPrefix(logBuf, ReaperLogBufSize), procname, pid, WEXITSTATUS(exitstatus), + activity ? activity : 0); + } else { + ereport(lev, + /* ------ + translator: %s is a noun phrase describing a child process, such as + "server process" */ + (errmsg("%s (ThreadId %lu) exited with exit code %d", procname, pid, WEXITSTATUS(exitstatus)), + activity ? errdetail("Failed process was running: %s", activity) : 0)); + } #endif } else { - ereport(lev, - - /* ------ - translator: %s is a noun phrase describing a child process, such as - "server process" */ - (errmsg("%s (ThreadId %lu) exited with unrecognized status %d", procname, pid, exitstatus), - activity ? errdetail("Failed process was running: %s", activity) : 0)); + if (lev == LOG) { + write_stderr("%s LOG: %s (ThreadId %lu) exited with unrecognized status %d" + "Failed process was running: %s\n", + GetReaperLogPrefix(logBuf, ReaperLogBufSize), procname, pid, WEXITSTATUS(exitstatus), + activity ? activity : 0); + } else { + ereport(lev, + /* ------ + translator: %s is a noun phrase describing a child process, such as + "server process" */ + (errmsg("%s (ThreadId %lu) exited with unrecognized status %d", procname, pid, exitstatus), + activity ? errdetail("Failed process was running: %s", activity) : 0)); + } } } @@ -6663,6 +6929,10 @@ static void PostmasterStateMachineReadOnly(void) if (g_instance.pid_cxt.StartupPID != 0) signal_child(g_instance.pid_cxt.StartupPID, SIGTERM); + if (g_instance.pid_cxt.PageRepairPID != 0) { + signal_child(g_instance.pid_cxt.PageRepairPID, SIGTERM); + } + if (g_instance.pid_cxt.WalReceiverPID != 0) signal_child(g_instance.pid_cxt.WalReceiverPID, SIGTERM); @@ -6692,20 +6962,25 @@ static void PostmasterStateMachineReadOnly(void) if (g_instance.pid_cxt.HeartbeatPID != 0) signal_child(g_instance.pid_cxt.HeartbeatPID, SIGTERM); +#ifndef ENABLE_LITE_MODE if (g_instance.pid_cxt.BarrierCreatorPID != 0) { barrier_creator_thread_shutdown(); signal_child(g_instance.pid_cxt.BarrierCreatorPID, SIGTERM); } +#endif #ifdef ENABLE_MULTIPLE_NODES if (g_instance.pid_cxt.CsnminSyncPID != 0) { csnminsync_thread_shutdown(); signal_child(g_instance.pid_cxt.CsnminSyncPID, SIGTERM); } + + if (g_instance.pid_cxt.BarrierPreParsePID != 0) + signal_child(g_instance.pid_cxt.BarrierPreParsePID, SIGTERM); #endif /* ENABLE_MULTIPLE_NODES */ if (g_instance.pid_cxt.UndoLauncherPID != 0) signal_child(g_instance.pid_cxt.UndoLauncherPID, SIGTERM); -#ifndef ENABLE_MULTIPLE_NODES +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) if (g_instance.pid_cxt.ApplyLauncerPID != 0) signal_child(g_instance.pid_cxt.ApplyLauncerPID, SIGTERM); #endif @@ -6740,12 +7015,26 @@ static bool ObsArchAllShutDown() return true; } +static bool AuditAllShutDown() +{ + if (g_instance.pid_cxt.PgAuditPID != NULL) { + for (int i = 0; i < g_instance.audit_cxt.thread_num; i++) { + if (g_instance.pid_cxt.PgAuditPID[i] != 0) { + return false; + } + } + } + ereport(LOG, (errmsg("All Audit threads exit."))); + return true; +} + static void AsssertAllChildThreadExit() { /* These other guys should be dead already */ Assert(g_instance.pid_cxt.TwoPhaseCleanerPID == 0); Assert(g_instance.pid_cxt.FaultMonitorPID == 0); Assert(g_instance.pid_cxt.StartupPID == 0); + Assert(g_instance.pid_cxt.PageRepairPID == 0); Assert(g_instance.pid_cxt.WalReceiverPID == 0); Assert(g_instance.pid_cxt.WalRcvWriterPID == 0); Assert(g_instance.pid_cxt.DataReceiverPID == 0); @@ -6766,13 +7055,14 @@ static void AsssertAllChildThreadExit() Assert(g_instance.pid_cxt.HeartbeatPID == 0); Assert(g_instance.pid_cxt.CsnminSyncPID == 0); Assert(g_instance.pid_cxt.BarrierCreatorPID == 0); + Assert(g_instance.pid_cxt.BarrierPreParsePID == 0); #ifdef ENABLE_MULTIPLE_NODES Assert(g_instance.pid_cxt.TsCompactionPID == 0); #endif /* ENABLE_MULTIPLE_NODES */ Assert(g_instance.pid_cxt.CommPoolerCleanPID == 0); Assert(g_instance.pid_cxt.UndoLauncherPID == 0); Assert(g_instance.pid_cxt.UndoRecyclerPID == 0); -#ifndef ENABLE_MULTIPLE_NODES +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) Assert(g_instance.pid_cxt.ApplyLauncerPID == 0); #endif Assert(g_instance.pid_cxt.GlobalStatsPID == 0); @@ -6829,8 +7119,9 @@ static void PostmasterStateMachine(void) g_instance.pid_cxt.RbCleanrPID == 0 && g_instance.pid_cxt.SnapshotPID == 0 && g_instance.pid_cxt.PercentilePID == 0 && g_instance.pid_cxt.AshPID == 0 && g_instance.pid_cxt.CsnminSyncPID == 0 && - g_instance.pid_cxt.BarrierCreatorPID == 0 && + g_instance.pid_cxt.BarrierCreatorPID == 0 && g_instance.pid_cxt.PageRepairPID == 0 && #ifdef ENABLE_MULTIPLE_NODES + g_instance.pid_cxt.BarrierPreParsePID == 0 && g_instance.pid_cxt.CommPoolerCleanPID == 0 && streaming_backend_manager(STREAMING_BACKEND_SHUTDOWN) && g_instance.pid_cxt.TsCompactionPID == 0 && g_instance.pid_cxt.TsCompactionAuxiliaryPID == 0 && g_instance.pid_cxt.CommPoolerCleanPID == 0 && @@ -6838,7 +7129,7 @@ static void PostmasterStateMachine(void) g_instance.pid_cxt.UndoLauncherPID == 0 && g_instance.pid_cxt.UndoRecyclerPID == 0 && g_instance.pid_cxt.GlobalStatsPID == 0 && -#ifndef ENABLE_MULTIPLE_NODES +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) g_instance.pid_cxt.ApplyLauncerPID == 0 && #endif IsAllPageWorkerExit() && IsAllBuildSenderExit()) { @@ -6917,9 +7208,9 @@ static void PostmasterStateMachine(void) signal_child(g_instance.pid_cxt.PgStatPID, SIGQUIT); /* signal the auditor process */ - if (g_instance.pid_cxt.PgAuditPID != 0) { + if (g_instance.pid_cxt.PgAuditPID != NULL) { Assert(!dummyStandbyMode); - signal_child(g_instance.pid_cxt.PgAuditPID, SIGQUIT); + pgaudit_stop_all(); } if (g_instance.pid_cxt.sharedStorageXlogCopyThreadPID != 0) { @@ -6966,7 +7257,7 @@ static void PostmasterStateMachine(void) * g_instance.fatal_error processing. */ if (DLGetHead(g_instance.backend_list) == NULL && g_instance.pid_cxt.PgArchPID == 0 && - g_instance.pid_cxt.PgStatPID == 0 && g_instance.pid_cxt.PgAuditPID == 0 && + g_instance.pid_cxt.PgStatPID == 0 && AuditAllShutDown() && ckpt_all_flush_buffer_thread_exit() && ObsArchAllShutDown()) { AsssertAllChildThreadExit(); @@ -7048,6 +7339,7 @@ static void PostmasterStateMachine(void) hashmdata = t_thrd.postmaster_cxt.HaShmData; hashmdata->current_mode = cur_mode; + g_instance.global_sysdbcache.RefreshHotStandby(); g_instance.pid_cxt.StartupPID = initialize_util_thread(STARTUP); Assert(g_instance.pid_cxt.StartupPID != 0); pmState = PM_STARTUP; @@ -7080,6 +7372,7 @@ static void PostmasterStateMachine(void) { volatile HaShmemData* hashmdata = t_thrd.postmaster_cxt.HaShmData; hashmdata->current_mode = STANDBY_MODE; + g_instance.global_sysdbcache.RefreshHotStandby(); UpdateOptsFile(); ereport(LOG, (errmsg("archive recovery started"))); } @@ -7303,6 +7596,7 @@ static int BackendStartup(Port* port, bool isConnectHaPort) * of backends. */ bn->pid = pid; + bn->role = WORKER; bn->is_autovacuum = false; bn->cancel_key = t_thrd.proc_cxt.MyCancelKey; DLInitElem(&bn->elem, bn); @@ -7551,6 +7845,21 @@ static int StartupPacketInitialize(Port* port) static void PsDisplayInitialize(Port* port) { +#ifdef ENABLE_LITE_MODE + char thr_name[16]; + int rcs = 0; + + if (t_thrd.role == WORKER) { + rcs = snprintf_truncated_s(thr_name, sizeof(thr_name), "w:%s", port->user_name); + securec_check_ss(rcs, "\0", "\0"); + (void)pthread_setname_np(gs_thread_self(), thr_name); + } else if (t_thrd.role == THREADPOOL_WORKER) { + rcs = snprintf_truncated_s(thr_name, sizeof(thr_name), "tw:%s", port->user_name); + securec_check_ss(rcs, "\0", "\0"); + (void)pthread_setname_np(gs_thread_self(), thr_name); + } + +#else char remote_ps_data[NI_MAXHOST + NI_MAXSERV + 2]; errno_t rc; @@ -7587,6 +7896,7 @@ static void PsDisplayInitialize(Port* port) port->database_name, remote_ps_data, u_sess->attr.attr_common.update_process_title ? "authentication" : ""); +#endif } void PortInitialize(Port* port, knl_thread_arg* arg) @@ -7759,7 +8069,9 @@ void ExitPostmaster(int status) CloseGaussPidDir(); +#ifndef ENABLE_LITE_MODE obs_deinitialize(); +#endif /* when exiting the postmaster process, destroy the hash table */ if (g_instance.comm_cxt.usedDnSpace != NULL) { @@ -7778,7 +8090,9 @@ void ExitPostmaster(int status) ProcessCommLogicTearDown(); } +#ifdef ENABLE_LLVM_COMPILE CodeGenProcessTearDown(); +#endif /* Save llt data to disk before postmaster exit */ #ifdef ENABLE_LLT @@ -7814,6 +8128,9 @@ static void handle_recovery_started() Assert(g_instance.pid_cxt.SpBgWriterPID == 0); g_instance.pid_cxt.SpBgWriterPID = initialize_util_thread(SPBGWRITER); + Assert(g_instance.pid_cxt.PageRepairPID == 0); + g_instance.pid_cxt.PageRepairPID = initialize_util_thread(PAGEREPAIR_THREAD); + if (ENABLE_INCRE_CKPT) { for (int i = 0; i < g_instance.ckpt_cxt_ctl->pgwr_procs.num; i++) { Assert(g_instance.pid_cxt.PageWriterPID[i] == 0); @@ -7843,7 +8160,11 @@ static void handle_begin_hot_standby() wal_get_role_string(get_cur_mode())))); ereport(LOG, (errmsg("database system is ready to accept read only connections"))); - +#ifdef ENABLE_MULTIPLE_NODES + if (IS_DISASTER_RECOVER_MODE && g_instance.pid_cxt.BarrierPreParsePID == 0) { + g_instance.pid_cxt.BarrierPreParsePID = initialize_util_thread(BARRIER_PREPARSE); + } +#endif pmState = PM_HOT_STANDBY; } } @@ -7858,8 +8179,10 @@ static void handle_promote_signal() gs_lock_test_and_set_64(&g_instance.stat_cxt.NodeStatResetTime, GetCurrentTimestamp()); if (GetHaShmemMode() != STANDBY_MODE) { ereport(LOG, (errmsg("Instance can't be promoted in none standby mode."))); - } else if (IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) { + } else if (IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE) { ereport(LOG, (errmsg("Instance can't be promoted in standby cluster"))); + } else if (t_thrd.postmaster_cxt.HaShmData->is_hadr_main_standby) { + ereport(LOG, (errmsg("Instance can't be promoted in hadr_main_standby mode"))); } else { /* Database Security: Support database audit */ if (t_thrd.walreceiverfuncs_cxt.WalRcv && @@ -7900,6 +8223,7 @@ static void handle_primary_signal(volatile HaShmemData* hashmdata) and max_wal_senders requires at least 2."))); else { hashmdata->current_mode = PRIMARY_MODE; + g_instance.global_sysdbcache.RefreshHotStandby(); UpdateOptsFile(); } } @@ -7918,6 +8242,7 @@ static void handle_standby_signal(volatile HaShmemData* hashmdata) pmState == PM_HOT_STANDBY || pmState == PM_WAIT_READONLY)) { hashmdata->current_mode = STANDBY_MODE; + g_instance.global_sysdbcache.RefreshHotStandby(); PMUpdateDBState(NEEDREPAIR_STATE, get_cur_mode(), get_cur_repl_num()); ereport(LOG, (errmsg("update gaussdb state file: db state(NEEDREPAIR_STATE), server mode(%s)", @@ -7931,7 +8256,40 @@ static void handle_standby_signal(volatile HaShmemData* hashmdata) } } +static void handle_cascade_standby_signal(volatile HaShmemData* hashmdata) +{ + if (g_instance.pid_cxt.StartupPID != 0 && + (pmState == PM_STARTUP || + pmState == PM_RECOVERY || + pmState == PM_HOT_STANDBY || + pmState == PM_WAIT_READONLY)) { + hashmdata->current_mode = STANDBY_MODE; + g_instance.global_sysdbcache.RefreshHotStandby(); + hashmdata->is_cascade_standby = true; + PMUpdateDBState(NEEDREPAIR_STATE, get_cur_mode(), get_cur_repl_num()); + ereport(LOG, + (errmsg("update gaussdb state file: db state(NEEDREPAIR_STATE), server mode(%s)", + wal_get_role_string(get_cur_mode())))); + /* + * wakeup startup process from sleep by signal, cause we are + * in standby mode, the signal has no specific affect. + */ + SendNotifySignal(NOTIFY_CASCADE_STANDBY, g_instance.pid_cxt.StartupPID); + UpdateOptsFile(); + } +} + #ifndef ENABLE_MULTIPLE_NODES +static bool CheckChangeRoleFileExist(const char *filename) +{ + struct stat stat_buf; + if (stat(filename, &stat_buf) != 0 || !S_ISREG(stat_buf.st_mode)) { + return false; + } else { + return true; + } +} + static bool CheckSignalByFile(const char *filename, void *infoPtr, size_t infoSize) { FILE* sofile = nullptr; @@ -8003,9 +8361,9 @@ static void handle_remove_member_signal(uint32 nodeID) } } -static bool CheckChangeRoleSignal(char *role, int len) +static bool CheckChangeRoleSignal() { - return CheckSignalByFile(ChangeRoleFile, role, len); + return CheckChangeRoleFileExist(ChangeRoleFile); } static int changeRole(const char *role) @@ -8013,6 +8371,11 @@ static int changeRole(const char *role) FILE *sofile = nullptr; int timeout = 60; /* seconds */ int ret = -1; + char changeStr[MAXPGPATH] = {0}; + int roleStatus = -1; + int groupStatus = -1; + int priorityStatus = -1; + errno_t rc; sofile = fopen(TimeoutFile, "rb"); if (sofile == nullptr) { ereport(WARNING, (errmsg("Open timeout file %s failed!", TimeoutFile))); @@ -8032,12 +8395,79 @@ static int changeRole(const char *role) const uint32 unit = 1000; uint32 mTimeout = (uint32)timeout * unit; ereport(LOG, (errmsg("The timeout of changing role is %d ms!", mTimeout))); - if (strcmp(role, "fo") == 0) { /* fo denotes follower role */ - ret = dcf_change_member_role(1, g_instance.attr.attr_storage.dcf_attr.dcf_node_id, - static_cast(2), mTimeout); - } else if (strcmp(role, "pa") == 0) { /* pa denotes passive role */ - ret = dcf_change_member_role(1, g_instance.attr.attr_storage.dcf_attr.dcf_node_id, - static_cast(4), mTimeout); + if (sscanf_s(role, "%d_%d_%d", &roleStatus, &groupStatus, &priorityStatus) != 3) { + ereport(WARNING, (errmsg("Content could not get all param for change role %s", role))); + return -1; + } + if (roleStatus == 0) { /* fo denotes follower role */ + if (groupStatus == -1 && priorityStatus == -1) { + ret = dcf_change_member_role(1, g_instance.attr.attr_storage.dcf_attr.dcf_node_id, + static_cast(2), mTimeout); + } else if (groupStatus == -1 && priorityStatus != -1) { + rc = snprintf_s(changeStr, MAXPGPATH, MAXPGPATH - 1, + "[{\"stream_id\":1,\"node_id\":%d, \"priority\":%d,\"role\":\"FOLLOWER\"}]", + g_instance.attr.attr_storage.dcf_attr.dcf_node_id, priorityStatus); + securec_check_ss_c(rc, "\0", "\0"); + ret = dcf_change_member(changeStr, 60000); + } else if (groupStatus != -1 && priorityStatus == -1) { + rc = snprintf_s(changeStr, MAXPGPATH, MAXPGPATH - 1, + "[{\"stream_id\":1,\"node_id\":%d, \"group\":%d,\"role\":\"FOLLOWER\"}]", + g_instance.attr.attr_storage.dcf_attr.dcf_node_id, groupStatus); + securec_check_ss_c(rc, "\0", "\0"); + ret = dcf_change_member(changeStr, 60000); + } else { + rc = snprintf_s(changeStr, MAXPGPATH, MAXPGPATH - 1, + "[{\"stream_id\":1,\"node_id\":%d, \"group\":%d,\"priority\":%d,\"role\":\"FOLLOWER\"}]", + g_instance.attr.attr_storage.dcf_attr.dcf_node_id, groupStatus, priorityStatus); + securec_check_ss_c(rc, "\0", "\0"); + ret = dcf_change_member(changeStr, 60000); + } + } else if (roleStatus == 1) { /* pa denotes passive role */ + if (groupStatus == -1 && priorityStatus == -1) { + ret = dcf_change_member_role(1, g_instance.attr.attr_storage.dcf_attr.dcf_node_id, + static_cast(4), mTimeout); + } else if (groupStatus == -1 && priorityStatus != -1) { + rc = snprintf_s(changeStr, MAXPGPATH, MAXPGPATH - 1, + "[{\"stream_id\":1,\"node_id\":%d, \"priority\":%d,\"role\":\"PASSIVE\"}]", + g_instance.attr.attr_storage.dcf_attr.dcf_node_id, priorityStatus); + securec_check_ss_c(rc, "\0", "\0"); + ret = dcf_change_member(changeStr, 60000); + } else if (groupStatus != -1 && priorityStatus == -1) { + rc = snprintf_s(changeStr, MAXPGPATH, MAXPGPATH - 1, + "[{\"stream_id\":1,\"node_id\":%d, \"group\":%d,\"role\":\"PASSIVE\"}]", + g_instance.attr.attr_storage.dcf_attr.dcf_node_id, groupStatus); + securec_check_ss_c(rc, "\0", "\0"); + ret = dcf_change_member(changeStr, 60000); + } else { + rc = snprintf_s(changeStr, MAXPGPATH, MAXPGPATH - 1, + "[{\"stream_id\":1,\"node_id\":%d, \"group\":%d,\"priority\":%d,\"role\":\"PASSIVE\"}]", + g_instance.attr.attr_storage.dcf_attr.dcf_node_id, groupStatus, priorityStatus); + securec_check_ss_c(rc, "\0", "\0"); + ret = dcf_change_member(changeStr, 60000); + } + } else if (roleStatus == -1) { + if (groupStatus == -1 && priorityStatus == -1) { + ereport(WARNING, (errmsg("Nothing changed when change role been called %s", role))); + return 0; + } else if (groupStatus == -1 && priorityStatus != -1) { + rc = snprintf_s(changeStr, MAXPGPATH, MAXPGPATH - 1, + "[{\"stream_id\":1,\"node_id\":%d, \"priority\":%d}]", + g_instance.attr.attr_storage.dcf_attr.dcf_node_id, priorityStatus); + securec_check_ss_c(rc, "\0", "\0"); + ret = dcf_change_member(changeStr, 60000); + } else if (groupStatus != -1 && priorityStatus == -1) { + rc = snprintf_s(changeStr, MAXPGPATH, MAXPGPATH - 1, + "[{\"stream_id\":1,\"node_id\":%d, \"group\":%d}]", + g_instance.attr.attr_storage.dcf_attr.dcf_node_id, groupStatus); + securec_check_ss_c(rc, "\0", "\0"); + ret = dcf_change_member(changeStr, 60000); + } else { + rc = snprintf_s(changeStr, MAXPGPATH, MAXPGPATH - 1, + "[{\"stream_id\":1,\"node_id\":%d, \"group\":%d,\"priority\":%d}]", + g_instance.attr.attr_storage.dcf_attr.dcf_node_id, groupStatus, priorityStatus); + securec_check_ss_c(rc, "\0", "\0"); + ret = dcf_change_member(changeStr, 60000); + } } else { ereport(WARNING, (errmsg("Content prefix read from role file can not be recognized %s", role))); return -1; @@ -8354,7 +8784,7 @@ static void sigusr1_handler(SIGNAL_ARGS) } if ((mode = CheckSwitchoverSignal()) != 0 && WalRcvIsOnline() && DataRcvIsOnline() && (pmState == PM_STARTUP || pmState == PM_RECOVERY || pmState == PM_HOT_STANDBY || pmState == PM_WAIT_READONLY)) { - if (!IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) { + if (!IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE) { ereport(LOG, (errmsg("to do switchover"))); /* Label the standby to do switchover */ t_thrd.walreceiverfuncs_cxt.WalRcv->node_state = (ClusterNodeState)mode; @@ -8389,6 +8819,11 @@ static void sigusr1_handler(SIGNAL_ARGS) handle_standby_signal(hashmdata); } + /* If it is cascade standby signal, then set HaShmData and send sigusr2 to startup process */ + if (CheckCascadeStandbySignal()) { + handle_cascade_standby_signal(hashmdata); + } + if (CheckPostmasterSignal(PMSIGNAL_UPDATE_NORMAL)) { PMUpdateDBState(NORMAL_STATE, get_cur_mode(), get_cur_repl_num()); ereport(LOG, @@ -8414,8 +8849,6 @@ static void sigusr1_handler(SIGNAL_ARGS) #ifndef ENABLE_MULTIPLE_NODES uint32 nodeID = 0; NewNodeInfo nodeinfo; - char role[3] = {0}; - role[2] = '\0'; RunModeParam param; if (g_instance.attr.attr_storage.dcf_attr.enable_dcf && CheckAddMemberSignal(&nodeinfo) && t_thrd.dcf_cxt.dcfCtxInfo->isDcfStarted) { @@ -8427,8 +8860,9 @@ static void sigusr1_handler(SIGNAL_ARGS) } if (g_instance.attr.attr_storage.dcf_attr.enable_dcf && - CheckChangeRoleSignal(role, 2) && t_thrd.dcf_cxt.dcfCtxInfo->isDcfStarted) { - handle_change_role_signal(role); + CheckChangeRoleSignal() && t_thrd.dcf_cxt.dcfCtxInfo->isDcfStarted && + !g_instance.comm_cxt.isNeedChangeRole) { + g_instance.comm_cxt.isNeedChangeRole = true; } if (g_instance.attr.attr_storage.dcf_attr.enable_dcf && @@ -8437,8 +8871,10 @@ static void sigusr1_handler(SIGNAL_ARGS) } #endif +#ifndef ENABLE_LITE_MODE #if defined (ENABLE_MULTIPLE_NODES) || defined (ENABLE_PRIVATEGAUSS) check_and_process_hotpatch(); +#endif #endif gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL); @@ -8613,6 +9049,7 @@ static void StartRbWorker(void) /* Autovac workers are not dead_end and need a child slot */ bn->child_slot = t_thrd.proc_cxt.MyPMChildSlot = slot; + bn->role = RBWORKER; bn->pid = initialize_util_thread(RBWORKER, bn); t_thrd.proc_cxt.MyPMChildSlot = 0; if (bn->pid > 0) { @@ -8629,6 +9066,7 @@ static void StartRbWorker(void) */ (void)ReleasePostmasterChildSlot(bn->child_slot); bn->pid = 0; + bn->role = (knl_thread_role)0; } else { ereport(LOG, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } @@ -8676,6 +9114,7 @@ static void StartTxnSnapWorker(void) /* Autovac workers are not dead_end and need a child slot */ bn->child_slot = t_thrd.proc_cxt.MyPMChildSlot = slot; + bn->role = TXNSNAP_WORKER; bn->pid = initialize_util_thread(TXNSNAP_WORKER, bn); t_thrd.proc_cxt.MyPMChildSlot = 0; if (bn->pid > 0) { @@ -8692,6 +9131,7 @@ static void StartTxnSnapWorker(void) */ (void)ReleasePostmasterChildSlot(bn->child_slot); bn->pid = 0; + bn->role = (knl_thread_role)0; } else { ereport(LOG, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } @@ -8739,6 +9179,7 @@ static void StartAutovacuumWorker(void) /* Autovac workers are not dead_end and need a child slot */ bn->child_slot = t_thrd.proc_cxt.MyPMChildSlot = slot; + bn->role = AUTOVACUUM_WORKER; bn->pid = initialize_util_thread(AUTOVACUUM_WORKER, bn); t_thrd.proc_cxt.MyPMChildSlot = 0; if (bn->pid > 0) { @@ -8755,6 +9196,7 @@ static void StartAutovacuumWorker(void) */ (void)ReleasePostmasterChildSlot(bn->child_slot); bn->pid = 0; + bn->role = (knl_thread_role)0; } else ereport(LOG, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } @@ -8861,6 +9303,7 @@ static void StartUndoWorker(void) /* UndoWorkers need a child slot */ bn->child_slot = t_thrd.proc_cxt.MyPMChildSlot = slot; + bn->role = UNDO_WORKER; bn->pid = initialize_util_thread(UNDO_WORKER, bn); t_thrd.proc_cxt.MyPMChildSlot = 0; @@ -8878,6 +9321,7 @@ static void StartUndoWorker(void) */ (void)ReleasePostmasterChildSlot(bn->child_slot); bn->pid = 0; + bn->role = (knl_thread_role)0; } else ereport(LOG, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } @@ -9063,6 +9507,7 @@ static void StartPgjobWorker(void) GenerateCancelKey(false); bn->cancel_key = t_thrd.proc_cxt.MyCancelKey; bn->child_slot = t_thrd.proc_cxt.MyPMChildSlot = slot; + bn->role = JOB_WORKER; bn->pid = initialize_util_thread(JOB_WORKER); t_thrd.proc_cxt.MyPMChildSlot = 0; if (bn->pid > 0) { @@ -9079,6 +9524,7 @@ static void StartPgjobWorker(void) */ (void)ReleasePostmasterChildSlot(bn->child_slot); bn->pid = 0; + bn->role = (knl_thread_role)0; bn = NULL; } else ereport(LOG, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); @@ -9127,6 +9573,7 @@ static void StartPoolCleaner(void) GenerateCancelKey(false); bn->cancel_key = t_thrd.proc_cxt.MyCancelKey; bn->child_slot = t_thrd.proc_cxt.MyPMChildSlot = slot; + bn->role = COMM_POOLER_CLEAN; bn->pid = initialize_util_thread(COMM_POOLER_CLEAN); g_instance.pid_cxt.CommPoolerCleanPID = bn->pid; t_thrd.proc_cxt.MyPMChildSlot = 0; @@ -9144,6 +9591,7 @@ static void StartPoolCleaner(void) */ (void)ReleasePostmasterChildSlot(bn->child_slot); bn->pid = 0; + bn->role = (knl_thread_role)0; bn = NULL; } else ereport(LOG, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); @@ -9176,6 +9624,7 @@ static void StartCleanStatement(void) /* Autovac workers are not dead_end and need a child slot */ bn->child_slot = t_thrd.proc_cxt.MyPMChildSlot = slot; + bn->role = TRACK_STMT_CLEANER; bn->pid = initialize_util_thread(TRACK_STMT_CLEANER, bn); t_thrd.proc_cxt.MyPMChildSlot = 0; if (bn->pid > 0) { @@ -9188,6 +9637,7 @@ static void StartCleanStatement(void) (void)ReleasePostmasterChildSlot(bn->child_slot); bn->pid = 0; + bn->role = (knl_thread_role)0; } else { ereport(LOG, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } @@ -9336,6 +9786,7 @@ static ThreadId StartCatchupWorker(void) /* Data catch-up are not dead_end and need a child slot */ bn->child_slot = t_thrd.proc_cxt.MyPMChildSlot = slot; + bn->role = CATCHUP; bn->pid = initialize_util_thread(CATCHUP); t_thrd.proc_cxt.MyPMChildSlot = 0; if (bn->pid > 0) { @@ -9352,6 +9803,7 @@ static ThreadId StartCatchupWorker(void) */ (void)ReleasePostmasterChildSlot(bn->child_slot); bn->pid = 0; + bn->role = (knl_thread_role)0; bn = NULL; } else ereport(LOG, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); @@ -9773,11 +10225,17 @@ void HaShmemInit(void) int i = 0; t_thrd.postmaster_cxt.HaShmData->current_mode = NORMAL_MODE; + g_instance.global_sysdbcache.RefreshHotStandby(); for (i = 1; i < DOUBLE_MAX_REPLNODE_NUM; i++) { t_thrd.postmaster_cxt.HaShmData->disconnect_count[i] = 0; t_thrd.postmaster_cxt.HaShmData->repl_reason[i] = NONE_REBUILD; } SpinLockInit(&t_thrd.postmaster_cxt.HaShmData->mutex); + t_thrd.postmaster_cxt.HaShmData->current_repl = 1; + t_thrd.postmaster_cxt.HaShmData->prev_repl = 0; + t_thrd.postmaster_cxt.HaShmData->is_cascade_standby = false; + t_thrd.postmaster_cxt.HaShmData->is_cross_region = false; + t_thrd.postmaster_cxt.HaShmData->is_hadr_main_standby = false; } if (!IsUnderPostmaster) { gs_set_hs_shm_data(t_thrd.postmaster_cxt.HaShmData); @@ -9792,11 +10250,15 @@ static bool IsChannelAdapt(Port* port, ReplConnInfo* repl) char local_ip[IP_LEN] = {0}; char remote_ip[IP_LEN] = {0}; char* result = NULL; + char* local_ipNoZone = NULL; + char* remote_ipNoZone = NULL; + char local_ipNoZoneData[IP_LEN] = {0}; + char remote_ipNoZoneData[IP_LEN] = {0}; Assert(repl != NULL); if (AF_INET6 == laddr->sa_family) { - result = inet_net_ntop(AF_INET6, &((struct sockaddr_in*)laddr)->sin_addr, 128, local_ip, IP_LEN); + result = inet_net_ntop(AF_INET6, &((struct sockaddr_in6*)laddr)->sin6_addr, 128, local_ip, IP_LEN); if (NULL == result) { ereport(WARNING, (errmsg("inet_net_ntop failed, error: %d", EAFNOSUPPORT))); } @@ -9808,7 +10270,7 @@ static bool IsChannelAdapt(Port* port, ReplConnInfo* repl) } if (AF_INET6 == raddr->sa_family) { - result = inet_net_ntop(AF_INET6, &((struct sockaddr_in*)raddr)->sin_addr, 128, remote_ip, IP_LEN); + result = inet_net_ntop(AF_INET6, &((struct sockaddr_in6*)raddr)->sin6_addr, 128, remote_ip, IP_LEN); if (NULL == result) { ereport(WARNING, (errmsg("inet_net_ntop failed, error: %d", EAFNOSUPPORT))); } @@ -9819,7 +10281,11 @@ static bool IsChannelAdapt(Port* port, ReplConnInfo* repl) } } - if (0 == strcmp(local_ip, repl->localhost) && 0 == strcmp(remote_ip, repl->remotehost)) { + /* remove any '%zone' part from an IPv6 address string */ + local_ipNoZone = remove_ipv6_zone(repl->localhost, local_ipNoZoneData, IP_LEN); + remote_ipNoZone = remove_ipv6_zone(repl->remotehost, remote_ipNoZoneData, IP_LEN); + + if (0 == strcmp(local_ip, local_ipNoZone) && 0 == strcmp(remote_ip, remote_ipNoZone)) { return true; } else { ereport(DEBUG1, (errmsg("connect local ip %s, connect remote ip %s", local_ip, remote_ip))); @@ -9840,7 +10306,7 @@ bool IsFromLocalAddr(Port* port) /* parse the local ip address */ if (AF_INET6 == local_addr->sa_family) { - result = inet_net_ntop(AF_INET6, &((struct sockaddr_in*)local_addr)->sin_addr, 128, local_ip, IP_LEN); + result = inet_net_ntop(AF_INET6, &((struct sockaddr_in6*)local_addr)->sin6_addr, 128, local_ip, IP_LEN); } else if (AF_INET == local_addr->sa_family) { result = inet_net_ntop(AF_INET, &((struct sockaddr_in*)local_addr)->sin_addr, 32, local_ip, IP_LEN); } @@ -9851,7 +10317,7 @@ bool IsFromLocalAddr(Port* port) /* parse the remote ip address */ if (AF_INET6 == remote_addr->sa_family) { - result = inet_net_ntop(AF_INET6, &((struct sockaddr_in*)remote_addr)->sin_addr, 128, remote_ip, IP_LEN); + result = inet_net_ntop(AF_INET6, &((struct sockaddr_in6*)remote_addr)->sin6_addr, 128, remote_ip, IP_LEN); } else if (AF_INET == remote_addr->sa_family) { result = inet_net_ntop(AF_INET, &((struct sockaddr_in*)remote_addr)->sin_addr, 32, remote_ip, IP_LEN); } @@ -9879,7 +10345,7 @@ bool IsLocalAddr(Port* port) char* result = NULL; if (AF_INET6 == laddr->sa_family) { - result = inet_net_ntop(AF_INET6, &((struct sockaddr_in*)laddr)->sin_addr, 128, local_ip, IP_LEN); + result = inet_net_ntop(AF_INET6, &((struct sockaddr_in6*)laddr)->sin6_addr, 128, local_ip, IP_LEN); } else if (AF_INET == laddr->sa_family) { result = inet_net_ntop(AF_INET, &((struct sockaddr_in*)laddr)->sin_addr, 32, local_ip, IP_LEN); } @@ -10075,8 +10541,10 @@ static bool IsLocalPort(Port* port) if (AF_UNIX == laddr->sa_family) { return true; - } else { + } else if (AF_INET == laddr->sa_family) { sockport = ntohs(((struct sockaddr_in*)laddr)->sin_port); + } else if (AF_INET6 == laddr->sa_family) { + sockport = ntohs(((struct sockaddr_in6*)laddr)->sin6_port); } if (sockport == g_instance.attr.attr_network.PostPortNumber) { @@ -10108,11 +10576,13 @@ static void SetHaShmemData() default: break; } + g_instance.global_sysdbcache.RefreshHotStandby(); for (i = 1; i < MAX_REPLNODE_NUM; i++) { if (t_thrd.postmaster_cxt.ReplConnArray[i] != NULL) { repl_list_num++; - if (t_thrd.postmaster_cxt.ReplConnArray[i]->isCrossRegion && !IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) { + if (t_thrd.postmaster_cxt.ReplConnArray[i]->isCrossRegion && + !IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE) { hashmdata->is_cross_region = true; } } @@ -10138,8 +10608,8 @@ static bool IsAlreadyListen(const char* ip, int port) for (listen_index = 0; listen_index != MAXLISTEN; ++listen_index) { if (t_thrd.postmaster_cxt.ListenSocket[listen_index] != PGINVALID_SOCKET) { - struct sockaddr_in saddr; - socklen_t slen; + struct sockaddr_storage saddr; + socklen_t slen = 0; char* result = NULL; rc = memset_s(&saddr, sizeof(saddr), 0, sizeof(saddr)); securec_check(rc, "\0", "\0"); @@ -10152,36 +10622,48 @@ static bool IsAlreadyListen(const char* ip, int port) continue; } - if (AF_INET6 == saddr.sin_family) { - result = inet_net_ntop(AF_INET6, &saddr.sin_addr, 128, sock_ip, IP_LEN); + if (AF_INET6 == ((struct sockaddr *) &saddr)->sa_family) { + result = inet_net_ntop(AF_INET6, &((struct sockaddr_in6 *) &saddr)->sin6_addr, 128, sock_ip, IP_LEN); if (NULL == result) { ereport(WARNING, (errmsg("inet_net_ntop failed, error: %d", EAFNOSUPPORT))); } - } else if (AF_INET == saddr.sin_family) { - result = inet_net_ntop(AF_INET, &saddr.sin_addr, 32, sock_ip, IP_LEN); + } else if (AF_INET == ((struct sockaddr *) &saddr)->sa_family) { + result = inet_net_ntop(AF_INET, &((struct sockaddr_in *) &saddr)->sin_addr, 32, sock_ip, IP_LEN); if (NULL == result) { ereport(WARNING, (errmsg("inet_net_ntop failed, error: %d", EAFNOSUPPORT))); } - } else if (AF_UNIX == saddr.sin_family) { + } else if (AF_UNIX == ((struct sockaddr *) &saddr)->sa_family) { continue; } - if ((0 == strcmp(ip, sock_ip)) && (port == ntohs(saddr.sin_port))) { - return true; - } + /* Check if all IP addresses of local host had been listened already, * which was set using '*' in postgresql.conf. * :: represents all IPs for IPv6, 0.0.0.0 represents all IPs for IPv4. */ - if (saddr.sin_family == AF_INET6) { - if ((strcmp(sock_ip, "::") == 0) && (port == ntohs(saddr.sin_port))) { + if (((struct sockaddr *) &saddr)->sa_family == AF_INET6) { + char* ipNoZone = NULL; + char ipNoZoneData[IP_LEN] = {0}; + + /* remove any '%zone' part from an IPv6 address string */ + ipNoZone = remove_ipv6_zone((char *)ip, ipNoZoneData, IP_LEN); + if ((0 == strcmp(ipNoZone, sock_ip)) && (port == ntohs(((struct sockaddr_in6 *) &saddr)->sin6_port))) { return true; } - } else if (saddr.sin_family == AF_INET) { - if ((strcmp(sock_ip, "0.0.0.0") == 0) && (port == ntohs(saddr.sin_port))) { + + if ((strcmp(sock_ip, "::") == 0) && (port == ntohs(((struct sockaddr_in6 *) &saddr)->sin6_port))) { + return true; + } + } else if (((struct sockaddr *) &saddr)->sa_family == AF_INET) { + if ((0 == strcmp(ip, sock_ip)) && (port == ntohs(((struct sockaddr_in *) &saddr)->sin_port))) { + return true; + } + + if ((strcmp(sock_ip, "0.0.0.0") == 0) && (port == ntohs(((struct sockaddr_in *) &saddr)->sin_port))) { return true; } } else { - ereport(WARNING, (errmsg("Unknown network protocal family type: %d", saddr.sin_family))); + ereport(WARNING, + (errmsg("Unknown network protocal family type: %d", ((struct sockaddr *) &saddr)->sa_family))); } } } @@ -10204,6 +10686,8 @@ bool CheckSockAddr(struct sockaddr* sock_addr, const char* szIP, int port) int portNumber = 0; bool cmpResult = false; char* result = NULL; + char* ipNoZone = NULL; + char ipNoZoneData[IP_LEN] = {0}; if ((NULL == sock_addr) || (NULL == szIP) || (0 == port)) { ereport(LOG, (errmsg("invalid socket information or IP string or port"))); @@ -10239,15 +10723,18 @@ bool CheckSockAddr(struct sockaddr* sock_addr, const char* szIP, int port) break; case AF_INET6: + /* remove any '%zone' part from an IPv6 address string */ + ipNoZone = remove_ipv6_zone((char *)szIP, ipNoZoneData, IP_LEN); + ipv6addr = (struct sockaddr_in6*)sock_addr; result = inet_net_ntop(AF_INET6, &(ipv6addr->sin6_addr), 128, IPstr, MAX_IP_STR_LEN - 1); if (NULL == result) { ereport(WARNING, (errmsg("inet_net_ntop failed, error: %d", EAFNOSUPPORT))); } portNumber = ntohs(ipv6addr->sin6_port); - if ((((0 == strncmp(szIP, LOCAL_HOST, MAX_IP_STR_LEN)) && + if ((((0 == strncmp(ipNoZone, LOCAL_HOST, MAX_IP_STR_LEN)) && (0 == strncmp(IPstr, LOOP_IPV6_IP, MAX_IP_STR_LEN))) || - (0 == strncmp(IPstr, szIP, MAX_IP_STR_LEN))) && + (0 == strncmp(IPstr, ipNoZone, MAX_IP_STR_LEN))) && (port == portNumber)) { cmpResult = true; } @@ -10481,7 +10968,7 @@ static void ListenSocketRegulation(void) static DbState get_share_storage_node_dbstate_sub() { - if (IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) { + if (IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE) { return NORMAL_STATE; } else if (WalRcvIsOnline()) { return NORMAL_STATE; @@ -10497,9 +10984,9 @@ DbState get_local_dbstate_sub(WalRcvData* walrcv, ServerMode mode) bool has_build_reason = true; bool share_storage_has_no_build_reason = (IS_SHARED_STORAGE_MODE && t_thrd.postmaster_cxt.HaShmData->repl_reason[t_thrd.postmaster_cxt.HaShmData->current_repl] == NONE_REBUILD) || - (IS_SHARED_STORAGE_STANBY_CLUSTER_MODE && + (IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE && t_thrd.postmaster_cxt.HaShmData->repl_reason[t_thrd.postmaster_cxt.HaShmData->current_repl] == CONNECT_REBUILD); - bool disater_recovery_has_no_build_reason = (IS_DISASTER_RECOVER_MODE && + bool disater_recovery_has_no_build_reason = (IS_OBS_DISASTER_RECOVER_MODE && t_thrd.postmaster_cxt.HaShmData->repl_reason[t_thrd.postmaster_cxt.HaShmData->current_repl] == NONE_REBUILD); if ((t_thrd.postmaster_cxt.HaShmData->repl_reason[t_thrd.postmaster_cxt.HaShmData->current_repl] == NONE_REBUILD && walrcv != NULL && walrcv->isRuning && @@ -10522,7 +11009,7 @@ DbState get_local_dbstate_sub(WalRcvData* walrcv, ServerMode mode) return CATCHUP_STATE; } else if (IS_SHARED_STORAGE_MODE) { return get_share_storage_node_dbstate_sub(); - } else if (!WalRcvIsOnline() && !IS_DISASTER_RECOVER_MODE) { + } else if (!WalRcvIsOnline() && !IS_OBS_DISASTER_RECOVER_MODE) { return STARTING_STATE; } else { return NORMAL_STATE; @@ -10601,7 +11088,8 @@ static void PMReadDBStateFile(GaussState* state) ereport(LOG, (errmsg("%s: parameter state is null in PMReadDBStateFile()", progname))); return; } - + int rc = memset_s(state, sizeof(GaussState), 0, sizeof(GaussState)); + securec_check(rc, "", ""); statef = fopen(gaussdb_state_file, "r"); if (NULL == statef) { ereport(LOG, @@ -10669,6 +11157,39 @@ static void PMSetDBStateFile(GaussState* state) ereport(LOG, (errmsg("can't rename \"%s\" to \"%s\": %m", temppath, gaussdb_state_file))); } +static bool IsDBStateFileExist() +{ + return access(gaussdb_state_file, F_OK) != -1; +} + +static void PMInitDBStateFile() +{ + GaussState state; + int rc = memset_s(&state, sizeof(state), 0, sizeof(state)); + securec_check(rc, "", ""); + state.conn_num = t_thrd.postmaster_cxt.HaShmData->repl_list_num; + state.mode = t_thrd.postmaster_cxt.HaShmData->current_mode; + state.state = STARTING_STATE; + state.lsn = 0; + state.term = 0; + state.sync_stat = false; + state.ha_rebuild_reason = NONE_REBUILD; + state.current_connect_idx = 1; + if (IS_CN_DISASTER_RECOVER_MODE && IsDBStateFileExist()) { + GaussState temp_state; + PMReadDBStateFile(&temp_state); + t_thrd.postmaster_cxt.HaShmData->current_repl = temp_state.current_connect_idx; + t_thrd.postmaster_cxt.HaShmData->prev_repl = temp_state.current_connect_idx; + state.current_connect_idx = temp_state.current_connect_idx; + } + + PMSetDBStateFile(&state); + ereport(LOG, + (errmsg("create gaussdb state file success: db state(STARTING_STATE), server mode(%s), connection index(%d)", + wal_get_role_string(t_thrd.postmaster_cxt.HaShmData->current_mode), state.current_connect_idx))); +} + + /* * according to input parameters, update the gaussdb state file */ @@ -10720,14 +11241,16 @@ static void PMUpdateDBStateHaRebuildReason(void) state.state = NEEDREPAIR_STATE; } else { state.state = NORMAL_STATE; + state.current_connect_idx = hashmdata->current_repl; } PMSetDBStateFile(&state); ereport(LOG, (errmsg("update gaussdb state file: build reason(%s), " - "db state(%s), server mode(%s)", + "db state(%s), server mode(%s), current connect index(%d)", wal_get_rebuild_reason_string(reason), wal_get_db_state_string(state.state), - wal_get_role_string(get_cur_mode())))); + wal_get_role_string(get_cur_mode()), + state.current_connect_idx))); } static int init_stream_comm() @@ -10941,7 +11464,7 @@ void CleanSystemCaches(bool is_in_read_command) (is_in_read_command ? 1 : 0)))); if (IsTransactionOrTransactionBlock() || !is_in_read_command) { - InvalidateSystemCaches(); + InvalidateSessionSystemCaches(); } } } @@ -10949,34 +11472,53 @@ void CleanSystemCaches(bool is_in_read_command) static void check_and_reset_ha_listen_port(void) { int j; + int repl_list_num = 0; + bool refreshReplList = false; + bool needToRestart = false; + bool refreshListenSocket = false; /* * when Ha replconninfo have changed and current_mode is not NORMAL, * dynamically modify the ha socket. */ for (j = 1; j < MAX_REPLNODE_NUM; j++) { - if (t_thrd.postmaster_cxt.ReplConnChanged[j] || t_thrd.postmaster_cxt.CrossClusterReplConnChanged[j]) - break; - } - if (j < MAX_REPLNODE_NUM) { - int i, repl_list_num = 0; - - CreateHaListenSocket(); - - repl_list_num = 0; - for (i = 1; i < MAX_REPLNODE_NUM; i++) { - t_thrd.postmaster_cxt.ReplConnChanged[i] = false; - t_thrd.postmaster_cxt.CrossClusterReplConnChanged[i] = false; - if (t_thrd.postmaster_cxt.ReplConnArray[i] != NULL) - repl_list_num++; - if (t_thrd.postmaster_cxt.CrossClusterReplConnArray[i] != NULL) - repl_list_num++; + if (t_thrd.postmaster_cxt.ReplConnChangeType[j] == ADD_REPL_CONN_INFO_WITH_OLD_LOCAL_IP_PORT || + t_thrd.postmaster_cxt.ReplConnChangeType[j] == ADD_REPL_CONN_INFO_WITH_NEW_LOCAL_IP_PORT || + t_thrd.postmaster_cxt.CrossClusterReplConnChanged[j]) { + refreshReplList = true; } - /* send SIGTERM to end process senders and receiver */ - t_thrd.postmaster_cxt.HaShmData->repl_list_num = repl_list_num; + if (t_thrd.postmaster_cxt.ReplConnChangeType[j] == OLD_REPL_CHANGE_IP_OR_PORT || + t_thrd.postmaster_cxt.CrossClusterReplConnChanged[j]) { + needToRestart = true; + refreshListenSocket = true; + } + if (t_thrd.postmaster_cxt.ReplConnChangeType[j] == ADD_REPL_CONN_INFO_WITH_NEW_LOCAL_IP_PORT) { + refreshListenSocket = true; + } + + t_thrd.postmaster_cxt.ReplConnChangeType[j] = NO_CHANGE; + t_thrd.postmaster_cxt.CrossClusterReplConnChanged[j] = false; + + if (t_thrd.postmaster_cxt.ReplConnArray[j] != NULL) + repl_list_num++; + if (t_thrd.postmaster_cxt.CrossClusterReplConnArray[j] != NULL) + repl_list_num++; + } + + if (refreshReplList) { + t_thrd.postmaster_cxt.HaShmData->repl_list_num = repl_list_num; + } + + if (refreshListenSocket) { + CreateHaListenSocket(); + ListenSocketRegulation(); + } + + if (needToRestart) { + /* send SIGTERM to end process senders and receiver */ (void)SignalSomeChildren(SIGTERM, BACKEND_TYPE_WALSND); (void)SignalSomeChildren(SIGTERM, BACKEND_TYPE_DATASND); if (g_instance.pid_cxt.WalRcvWriterPID != 0) @@ -10987,17 +11529,16 @@ static void check_and_reset_ha_listen_port(void) signal_child(g_instance.pid_cxt.DataRcvWriterPID, SIGTERM); if (g_instance.pid_cxt.DataReceiverPID != 0) signal_child(g_instance.pid_cxt.DataReceiverPID, SIGTERM); - - ListenSocketRegulation(); } #ifndef ENABLE_MULTIPLE_NODES - if (t_thrd.postmaster_cxt.HaShmData != NULL && - t_thrd.postmaster_cxt.HaShmData->repl_list_num == 0 && - t_thrd.postmaster_cxt.HaShmData->current_mode == PRIMARY_MODE) { - t_thrd.postmaster_cxt.HaShmData->current_mode = NORMAL_MODE; - SetServerMode(NORMAL_MODE); - } + if (t_thrd.postmaster_cxt.HaShmData != NULL && + t_thrd.postmaster_cxt.HaShmData->repl_list_num == 0 && + t_thrd.postmaster_cxt.HaShmData->current_mode == PRIMARY_MODE) { + t_thrd.postmaster_cxt.HaShmData->current_mode = NORMAL_MODE; + g_instance.global_sysdbcache.RefreshHotStandby(); + SetServerMode(NORMAL_MODE); + } #endif return; @@ -11045,6 +11586,12 @@ template static void SetAuxType() { switch (thread_role) { + case PARALLEL_DECODE: + t_thrd.bootstrap_cxt.MyAuxProcType = ParallelDecodeProcess; + break; + case LOGICAL_READ_RECORD: + t_thrd.bootstrap_cxt.MyAuxProcType = LogicalReadRecord; + break; case PAGEREDO: t_thrd.bootstrap_cxt.MyAuxProcType = PageRedoProcess; break; @@ -11090,6 +11637,9 @@ static void SetAuxType() case PAGEWRITER_THREAD: t_thrd.bootstrap_cxt.MyAuxProcType = PageWriterProcess; break; + case PAGEREPAIR_THREAD: + t_thrd.bootstrap_cxt.MyAuxProcType = PageRepairProcess; + break; case THREADPOOL_LISTENER: t_thrd.bootstrap_cxt.MyAuxProcType = TpoolListenerProcess; break; @@ -11106,6 +11656,9 @@ static void SetAuxType() t_thrd.bootstrap_cxt.MyAuxProcType = XlogCopyBackendProcess; break; #ifdef ENABLE_MULTIPLE_NODES + case BARRIER_PREPARSE: + t_thrd.bootstrap_cxt.MyAuxProcType = BarrierPreParseBackendProcess; + break; case TS_COMPACTION: t_thrd.bootstrap_cxt.MyAuxProcType = TsCompactionProcess; break; @@ -11167,6 +11720,7 @@ void SetExtraThreadInfo(knl_thread_arg* arg) t_thrd.bgworker_cxt.bgwcontext = ((BackgroundWorkerArgs *)arg->payload)->bgwcontext; t_thrd.bgworker_cxt.bgworker = ((BackgroundWorkerArgs *)arg->payload)->bgworker; t_thrd.bgworker_cxt.bgworkerId = ((BackgroundWorkerArgs *)arg->payload)->bgworkerId; + pfree_ext(arg->payload); } default: break; @@ -11231,42 +11785,43 @@ int GaussDbAuxiliaryThreadMain(knl_thread_arg* arg) SetProcessingMode(BootstrapProcessing); u_sess->attr.attr_common.IgnoreSystemIndexes = true; BaseInit(); - + BindRedoThreadToSpecifiedCpu(thread_role); /* * When we are an auxiliary process, we aren't going to do the full * InitPostgres pushups, but there are a couple of things that need to get * lit up even in an auxiliary process. */ - if (IsUnderPostmaster) { - /* - * Create a PGPROC so we can use LWLocks. In the EXEC_BACKEND case, - * this was already done by SubPostmasterMain(). - */ + if (thread_role != LOGICAL_READ_RECORD && thread_role != PARALLEL_DECODE) { + if (IsUnderPostmaster) { + /* + * Create a PGPROC so we can use LWLocks. In the EXEC_BACKEND case, + * this was already done by SubPostmasterMain(). + */ #ifndef EXEC_BACKEND - InitAuxiliaryProcess(); + InitAuxiliaryProcess(); #endif - /* - * Assign the ProcSignalSlot for an auxiliary process. Since it - * doesn't have a BackendId, the slot is statically allocated based on - * the auxiliary process type (MyAuxProcType). Backends use slots - * indexed in the range from 1 to g_instance.shmem_cxt.MaxBackends (inclusive), so we use - * g_instance.shmem_cxt.MaxBackends + 1 as the base index of the slot for an - * auxiliary process. - */ - int index = GetAuxProcEntryIndex(g_instance.shmem_cxt.MaxBackends + 1); + /* + * Assign the ProcSignalSlot for an auxiliary process. Since it + * doesn't have a BackendId, the slot is statically allocated based on + * the auxiliary process type (MyAuxProcType). Backends use slots + * indexed in the range from 1 to g_instance.shmem_cxt.MaxBackends (inclusive), so we use + * g_instance.shmem_cxt.MaxBackends + 1 as the base index of the slot for an + * auxiliary process. + */ + int index = GetAuxProcEntryIndex(g_instance.shmem_cxt.MaxBackends + 1); - ProcSignalInit(index); + ProcSignalInit(index); - /* finish setting up bufmgr.c */ - InitBufferPoolBackend(); + /* finish setting up bufmgr.c */ + InitBufferPoolBackend(); - /* register a shutdown callback for LWLock cleanup */ - on_shmem_exit(ShutdownAuxiliaryProcess, 0); + /* register a shutdown callback for LWLock cleanup */ + on_shmem_exit(ShutdownAuxiliaryProcess, 0); + } + pgstat_initialize(); + pgstat_bestart(); } - - pgstat_initialize(); - pgstat_bestart(); /* * XLOG operations */ @@ -11306,6 +11861,16 @@ int GaussDbAuxiliaryThreadMain(knl_thread_arg* arg) proc_exit(1); /* should never return */ break; + case LOGICAL_READ_RECORD: { + LogicalReadWorkerMain(arg->payload); + proc_exit(1); /* should never return */ + } break; + + case PARALLEL_DECODE: { + ParallelDecodeWorkerMain(arg->payload); + proc_exit(1); /* should never return */ + } break; + case CHECKPOINT_THREAD: /* don't set signals, checkpointer has its own agenda */ CheckpointerMain(); @@ -11360,6 +11925,11 @@ int GaussDbAuxiliaryThreadMain(knl_thread_arg* arg) proc_exit(1); break; + case PAGEREPAIR_THREAD: + PageRepairMain(); + proc_exit(1); + break; + case THREADPOOL_LISTENER: TpoolListenerMain(t_thrd.threadpool_cxt.listener); proc_exit(1); @@ -11385,6 +11955,10 @@ int GaussDbAuxiliaryThreadMain(knl_thread_arg* arg) proc_exit(1); break; #ifdef ENABLE_MULTIPLE_NODES + case BARRIER_PREPARSE: + BarrierPreParseMain(); + proc_exit(1); + break; case TS_COMPACTION: CompactionProcess::compaction_main(); proc_exit(1); @@ -11441,16 +12015,8 @@ int GaussDbThreadMain(knl_thread_arg* arg) /* Do this sooner rather than later... */ IsUnderPostmaster = true; /* we are a postmaster subprocess now */ Assert(thread_role == arg->role); - /* - * We should set bn at the beginning of this function, cause if we meet some error before set bn, then we - * can't set t_thrd.bn->dead_end to true(check gs_thread_exit), which will lead CleanupBackend failed. - * The logic of get child slot refer to PortInitialize -> read_backend_variables -> restore_backend_variables - */ - int childSlot = 0; - if (arg != NULL) { - childSlot = ((BackendParameters*)arg->save_para)->MyPMChildSlot; - } - t_thrd.bn = GetBackend(childSlot); + /* get child slot from backend_variables */ + t_thrd.child_slot = (arg != NULL) ? ((BackendParameters*)arg->save_para)->MyPMChildSlot : -1; /* Check this thread will use reserved memory or not */ is_memory_backend_reserved(arg); /* Initialize the Memory Protection at the thread level */ @@ -11511,6 +12077,10 @@ int GaussDbThreadMain(knl_thread_arg* arg) ereport(DEBUG2, (errmsg("start a new [%d] thread with fd:[%d]", thread_role, port.sock))); } + t_thrd.bn = GetBackend(t_thrd.proc_cxt.MyPMChildSlot); + /* thread get backend pointer, backend list can be tracked by current thread's t_thrd.bn */ + t_thrd.is_inited = true; + /* We don't need read GUC variables */ if (!FencedUDFMasterMode && !PythonFencedMasterModel) { /* Read in remaining GUC variables */ @@ -11539,6 +12109,7 @@ int GaussDbThreadMain(knl_thread_arg* arg) * non-EXEC_BACKEND behavior. */ process_shared_preload_libraries(); + CreateLocalSysDBCache(); switch (thread_role) { case STREAM_WORKER: @@ -11617,15 +12188,19 @@ int GaussDbThreadMain(knl_thread_arg* arg) case CBMWRITER: case STARTUP: case PAGEWRITER_THREAD: + case PAGEREPAIR_THREAD: case HEARTBEAT: case SHARE_STORAGE_XLOG_COPYER: #ifdef ENABLE_MULTIPLE_NODES + case BARRIER_PREPARSE: case TS_COMPACTION: case TS_COMPACTION_CONSUMER: case TS_COMPACTION_AUXILIAY: #endif /* ENABLE_MULTIPLE_NODES */ case THREADPOOL_LISTENER: case THREADPOOL_SCHEDULER: + case LOGICAL_READ_RECORD: + case PARALLEL_DECODE: case UNDO_RECYCLER: { SetAuxType(); /* Restore basic shared memory pointers */ @@ -11819,8 +12394,15 @@ int GaussDbThreadMain(knl_thread_arg* arg) } break; case AUDITOR: { - t_thrd.role = AUDITOR; - /* Do not want to attach to shared memory */ + /* Restore basic shared memory pointers */ + InitShmemAccess(UsedShmemSegAddr); + + /* Need a PGPROC to run CreateSharedMemoryAndSemaphores */ + InitAuxiliaryProcess(); + + /* Attach process to shared data structures */ + CreateSharedMemoryAndSemaphores(false, 0); + PgAuditorMain(); proc_exit(0); } break; @@ -11982,6 +12564,7 @@ int GaussDbThreadMain(knl_thread_arg* arg) proc_exit(0); } +#ifndef ENABLE_LITE_MODE case BARRIER_CREATOR: { if (START_BARRIER_CREATOR) { t_thrd.proc_cxt.MyPMChildSlot = AssignPostmasterChildSlot(); @@ -12005,6 +12588,7 @@ int GaussDbThreadMain(knl_thread_arg* arg) proc_exit(0); } } break; +#endif #ifdef ENABLE_MULTIPLE_NODES case COMM_POOLER_CLEAN: { @@ -12093,12 +12677,15 @@ static ThreadMetaData GaussdbThreadGate[] = { { GaussDbThreadMain, DATARECWRITER, "datarecwriter", "data receive writer" }, { GaussDbThreadMain, CBMWRITER, "CBMwriter", "CBM writer" }, { GaussDbThreadMain, PAGEWRITER_THREAD, "pagewriter", "page writer" }, + { GaussDbThreadMain, PAGEREPAIR_THREAD, "pagerepair", "page repair" }, { GaussDbThreadMain, HEARTBEAT, "heartbeat", "heart beat" }, { GaussDbThreadMain, COMM_SENDERFLOWER, "COMMsendflow", "communicator sender flower" }, { GaussDbThreadMain, COMM_RECEIVERFLOWER, "COMMrecvflow", "communicator receiver flower" }, { GaussDbThreadMain, COMM_RECEIVER, "COMMrecloop", "communicator receiver loop" }, { GaussDbThreadMain, COMM_AUXILIARY, "COMMaux", "communicator auxiliary" }, { GaussDbThreadMain, COMM_POOLER_CLEAN, "COMMpoolcleaner", "communicator pooler auto cleaner" }, + { GaussDbThreadMain, LOGICAL_READ_RECORD, "LogicalRead", "LogicalRead pooler auto cleaner" }, + { GaussDbThreadMain, PARALLEL_DECODE, "COMMpoolcleaner", "communicator pooler auto cleaner" }, { GaussDbThreadMain, UNDO_RECYCLER, "undorecycler", "undo recycler" }, { GaussDbThreadMain, UNDO_LAUNCHER, "asyncundolaunch", "async undo launcher" }, { GaussDbThreadMain, UNDO_WORKER, "asyncundoworker", "async undo worker" }, @@ -12113,6 +12700,7 @@ static ThreadMetaData GaussdbThreadGate[] = { /* Keep the block in the end if it may be absent !!! */ #ifdef ENABLE_MULTIPLE_NODES + { GaussDbThreadMain, BARRIER_PREPARSE, "barrierpreparse", "barrier preparse backend" }, { GaussDbThreadMain, TS_COMPACTION, "TScompaction", "timeseries compaction" }, { GaussDbThreadMain, TS_COMPACTION_CONSUMER, "TScompconsumer", @@ -12243,7 +12831,28 @@ ThreadId initialize_worker_thread(knl_thread_role role, Port* port, void* payloa return initialize_thread(thr_argv); } -bool isVaildIp(const char* ip) + +static bool isVaildIpv6(const char* ip) +{ + struct sockaddr_storage addr; + errno_t rc = 0; + char* ipNoZone = NULL; + char ipNoZoneData[IP_LEN] = {0}; + + /* remove any '%zone' part from an IPv6 address string */ + ipNoZone = remove_ipv6_zone((char *)ip, ipNoZoneData, IP_LEN); + + rc = memset_s(&addr, sizeof(addr), 0, sizeof(addr)); + securec_check(rc, "", ""); + addr.ss_family = AF_INET6; + rc = inet_pton(AF_INET6, ipNoZone, &(((struct sockaddr_in6 *) &addr)->sin6_addr)); + if (rc <= 0) { + return false; + } + return true; +} + +bool isVaildIpv4(const char* ip) { int dots = 0; int setions = 0; @@ -12278,6 +12887,22 @@ bool isVaildIp(const char* ip) return false; } +static bool isVaildIp(const char* ip) +{ + bool ret = false; + if (NULL == ip) { + return false; + } + + if (strchr(ip, ':') != NULL) { + return isVaildIpv6(ip); + } else { + return isVaildIpv4(ip); + } + + return ret; +} + /* * set disable_conn_primary, deny connection to this node. */ @@ -12365,6 +12990,7 @@ Datum disable_conn(PG_FUNCTION_ARGS) if (!isVaildIp(host)) { ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("host is invalid"))); } + clean_ipv6_addr(AF_INET6, host); errno_t rc = memcpy_s(disconn_node.disable_conn_node_host, NAMEDATALEN, host, strlen(host) + 1); securec_check(rc, "\0", "\0"); } @@ -12402,7 +13028,8 @@ Datum disable_conn(PG_FUNCTION_ARGS) SpinLockAcquire(&g_instance.comm_cxt.localinfo_cxt.disable_conn_node.info_lck); g_instance.comm_cxt.localinfo_cxt.disable_conn_node.disable_conn_node_data = disconn_node; SpinLockRelease(&g_instance.comm_cxt.localinfo_cxt.disable_conn_node.info_lck); - ereport(LOG, (errcode(ERRCODE_LOG), errmsg("disable_conn set mode to %s", disconn_mode))); + ereport(LOG, (errcode(ERRCODE_LOG), errmsg("disable_conn set mode to %s and host is %s port is %d", disconn_mode, + host, disconn_node.disable_conn_node_port))); PG_RETURN_VOID(); } @@ -12422,19 +13049,24 @@ Datum read_disable_conn_file(PG_FUNCTION_ARGS) char* key_position = NULL; char local_host[NAMEDATALEN]; char local_port[NAMEDATALEN]; - char local_info[NAMEDATALEN]; - const int MAX_LOCAL_ADDRESS_LENGTH = 50; - rc = memset_s(local_info, NAMEDATALEN, 0, NAMEDATALEN); + char local_info[DOUBLE_NAMEDATALEN]; + int max_local_address_length = DOUBLE_NAMEDATALEN - 1; + rc = memset_s(local_info, DOUBLE_NAMEDATALEN, 0, DOUBLE_NAMEDATALEN); securec_check(rc, "\0", "\0"); - if (t_thrd.postmaster_cxt.ReplConnChanged[1] == false || u_sess->attr.attr_storage.ReplConnInfoArr[1] == NULL) { + if (u_sess->attr.attr_storage.ReplConnInfoArr[1] == NULL) { ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("Can't get local connection address."))); } - rc = memcpy_s(local_info, NAMEDATALEN - 1, u_sess->attr.attr_storage.ReplConnInfoArr[1], MAX_LOCAL_ADDRESS_LENGTH); + max_local_address_length = pg_mbcliplen(u_sess->attr.attr_storage.ReplConnInfoArr[1], + strlen(u_sess->attr.attr_storage.ReplConnInfoArr[1]), + max_local_address_length); + rc = memcpy_s(local_info, DOUBLE_NAMEDATALEN - 1, + u_sess->attr.attr_storage.ReplConnInfoArr[1], max_local_address_length); securec_check(rc, "\0", "\0"); key_position = strtok_s(local_info, " ", &next_key); if (key_position == NULL || sscanf_s(key_position, "localhost=%s", local_host, NAMEDATALEN - 1) != 1) { ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("get local host failed!"))); } + clean_ipv6_addr(AF_INET6, local_host); key_position = strtok_s(NULL, " ", &next_key); if (key_position == NULL || sscanf_s(key_position, "localport=%s", local_port, NAMEDATALEN - 1) != 1) { diff --git a/src/gausskernel/process/postmaster/rbcleaner.cpp b/src/gausskernel/process/postmaster/rbcleaner.cpp index 982050729..729dd7396 100644 --- a/src/gausskernel/process/postmaster/rbcleaner.cpp +++ b/src/gausskernel/process/postmaster/rbcleaner.cpp @@ -893,6 +893,9 @@ NON_EXEC_STATIC void RbCleanerMain() */ AbortCurrentTransaction(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + LWLockReleaseAll(); /* @@ -1130,6 +1133,9 @@ NON_EXEC_STATIC void RbWorkerMain() */ AbortCurrentTransaction(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + LWLockReleaseAll(); /* diff --git a/src/gausskernel/process/postmaster/snapcapturer.cpp b/src/gausskernel/process/postmaster/snapcapturer.cpp index 0177d8138..9f46cbf60 100644 --- a/src/gausskernel/process/postmaster/snapcapturer.cpp +++ b/src/gausskernel/process/postmaster/snapcapturer.cpp @@ -289,13 +289,15 @@ static void TxnSnapInsert(void) */ static void TxnSnapDelete(void) { +#define TXNSNAP_EXTRA_RETRNTION_TIME 900 Relation rel; ScanKeyData skey[2]; SysScanDesc sd; HeapTuple tup; - /* Retent snapshots for up to 3 days. */ - const int64 snapRetentionMs = 86400000L * 3; + /* Retent snapshots for up to undo_retention_time + 15min. */ + const int64 snapRetentionMs = 1000L * (u_sess->attr.attr_storage.undo_retention_time + + TXNSNAP_EXTRA_RETRNTION_TIME); TimestampTz ft = TimestampTzPlusMilliseconds(GetCurrentTimestamp(), snapRetentionMs * -1); rel = heap_open(SnapshotRelationId, RowExclusiveLock); @@ -318,12 +320,22 @@ static void TxnSnapDelete(void) */ static void TxnSnapWorkerImpl(void) { + int retentionTime = u_sess->attr.attr_storage.undo_retention_time; + TimestampTz result; + StartTransactionCommand(); TxnSnapInsert(); TxnSnapDelete(); +#ifdef HAVE_INT64_TIMESTAMP + result = GetCurrentTimestamp() - retentionTime * (INT64CONST(1000000)); +#else + result = GetCurrentTimestamp() - retentionTime; +#endif + g_instance.flashback_cxt.oldestXminInFlashback = TvFetchSnpxminRecycle(result); + CommitTransactionCommand(); } @@ -457,6 +469,9 @@ NON_EXEC_STATIC void TxnSnapWorkerMain() */ AbortCurrentTransaction(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + /* Notice: at the most time it isn't necessary to call because * all the LWLocks are released in AbortCurrentTransaction(). * but in some rare exception not in one transaction (for @@ -757,6 +772,9 @@ NON_EXEC_STATIC void TxnSnapCapturerMain() */ AbortCurrentTransaction(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + LWLockReleaseAll(); /* diff --git a/src/gausskernel/process/postmaster/startup.cpp b/src/gausskernel/process/postmaster/startup.cpp index d6415b20a..a6fa6b8e8 100755 --- a/src/gausskernel/process/postmaster/startup.cpp +++ b/src/gausskernel/process/postmaster/startup.cpp @@ -23,6 +23,7 @@ #include #include "access/xlog.h" +#include "access/multi_redo_api.h" #include "libpq/pqsignal.h" #include "miscadmin.h" #include "postmaster/startup.h" @@ -141,6 +142,8 @@ static void StartupProcSigusr2Handler(SIGNAL_ARGS) if (t_thrd.startup_cxt.failover_triggered && t_thrd.postmaster_cxt.HaShmData->is_hadr_main_standby) { t_thrd.startup_cxt.failover_triggered = false; } + } else if (CheckNotifySignal(NOTIFY_CASCADE_STANDBY)) { + t_thrd.startup_cxt.standby_triggered = true; } else if (CheckNotifySignal(NOTIFY_FAILOVER)) { t_thrd.startup_cxt.failover_triggered = true; #ifndef ENABLE_MULTIPLE_NODES @@ -169,6 +172,17 @@ static void StartupProcSigHupHandler(SIGNAL_ARGS) errno = save_errno; } +/* SIGINT: set flag to check repair page */ +static void StartupProcSigIntHandler(SIGNAL_ARGS) +{ + int save_errno = errno; + + t_thrd.startup_cxt.check_repair = true; + + errno = save_errno; +} + + /* SIGTERM: set flag to abort redo and exit */ static void StartupProcShutdownHandler(SIGNAL_ARGS) { @@ -184,6 +198,14 @@ static void StartupProcShutdownHandler(SIGNAL_ARGS) errno = save_errno; } +void HandleStartupPageRepair(RepairBlockKey key, XLogPhyBlock pblk) +{ + XLogReaderState *record = g_instance.startup_cxt.current_record; + parallel_recovery::RecordBadBlockAndPushToRemote(record, key, CRC_CHECK_FAIL, + InvalidXLogRecPtr, pblk); + return; +} + /* Handle SIGHUP and SIGTERM signals of startup process */ void HandleStartupProcInterrupts(void) { @@ -195,6 +217,13 @@ void HandleStartupProcInterrupts(void) ProcessConfigFile(PGC_SIGHUP); } + if (t_thrd.startup_cxt.check_repair) { + if (!IsExtremeRedo() && !IsParallelRedo()) { + parallel_recovery::SeqCheckRemoteReadAndRepairPage(); + } + t_thrd.startup_cxt.check_repair = false; + } + /* * Check if we were requested to exit without finishing recovery. */ @@ -214,6 +243,11 @@ static void StartupReleaseAllLocks(int code, Datum arg) { Assert(t_thrd.proc != NULL); + if (g_instance.startup_cxt.badPageHashTbl != NULL) { + hash_destroy(g_instance.startup_cxt.badPageHashTbl); + g_instance.startup_cxt.badPageHashTbl = NULL; + } + /* Do nothing if we're not in hot standby mode */ if (t_thrd.xlog_cxt.standbyState == STANDBY_DISABLED) return; @@ -268,7 +302,7 @@ void StartupProcessMain(void) * Reset some signals that are accepted by postmaster but not here */ (void)gspqsignal(SIGHUP, StartupProcSigHupHandler); /* reload config file */ - (void)gspqsignal(SIGINT, SIG_IGN); /* ignore query cancel */ + (void)gspqsignal(SIGINT, StartupProcSigIntHandler); /* check repair page and file */ (void)gspqsignal(SIGTERM, StartupProcShutdownHandler); /* request shutdown */ (void)gspqsignal(SIGQUIT, startupproc_quickdie); /* hard crash time */ @@ -293,6 +327,7 @@ void StartupProcessMain(void) (void)gspqsignal(SIGWINCH, SIG_DFL); (void)RegisterRedoInterruptCallBack(HandleStartupProcInterrupts); + (void)RegisterRedoPageRepairCallBack(HandleStartupPageRepair); /* * Unblock signals (they were blocked when the postmaster forked us) */ @@ -322,6 +357,11 @@ void StartupProcessMain(void) */ #endif DeleteDisConnFileInClusterStandby(); + if (!dummyStandbyMode) { + Assert(g_instance.startup_cxt.badPageHashTbl == NULL); + g_instance.startup_cxt.badPageHashTbl = parallel_recovery::BadBlockHashTblCreate(); + } + StartupXLOG(); } diff --git a/src/gausskernel/process/postmaster/twophasecleaner.cpp b/src/gausskernel/process/postmaster/twophasecleaner.cpp index 2f171c833..4b2d357b4 100644 --- a/src/gausskernel/process/postmaster/twophasecleaner.cpp +++ b/src/gausskernel/process/postmaster/twophasecleaner.cpp @@ -159,6 +159,8 @@ NON_EXEC_STATIC void TwoPhaseCleanerMain() /* Report the error to the server log */ EmitErrorReport(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); /* * Now return to normal top-level context and clear ErrorContext for diff --git a/src/gausskernel/process/postmaster/walwriter.cpp b/src/gausskernel/process/postmaster/walwriter.cpp index 6788db4d7..f24da1b3a 100755 --- a/src/gausskernel/process/postmaster/walwriter.cpp +++ b/src/gausskernel/process/postmaster/walwriter.cpp @@ -202,6 +202,8 @@ void WalWriterMain(void) /* abort async io, must before LWlock release */ AbortAsyncListIO(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); /* * These operations are really just a minimal subset of * AbortTransaction(). We don't have very many resources to worry diff --git a/src/gausskernel/process/postmaster/walwriterauxiliary.cpp b/src/gausskernel/process/postmaster/walwriterauxiliary.cpp index 38e427743..10e7eb9b0 100755 --- a/src/gausskernel/process/postmaster/walwriterauxiliary.cpp +++ b/src/gausskernel/process/postmaster/walwriterauxiliary.cpp @@ -128,6 +128,9 @@ void WalWriterAuxiliaryMain(void) /* abort async io, must before LWlock release */ AbortAsyncListIO(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + /* * These operations are really just a minimal subset of * AbortTransaction(). We don't have very many resources to worry diff --git a/src/gausskernel/process/stream/execStream.cpp b/src/gausskernel/process/stream/execStream.cpp index 92160e431..b054c2a1a 100755 --- a/src/gausskernel/process/stream/execStream.cpp +++ b/src/gausskernel/process/stream/execStream.cpp @@ -275,6 +275,7 @@ void StreamSaveTxnContext(StreamTxnContext* stc) StreamTxnContextSaveComboCid(stc); StreamTxnContextSaveXact(stc); StreamTxnContextSaveSnapmgr(stc); + StreamTxnContextSaveInvalidMsg(stc); } void StreamRestoreTxnContext(StreamTxnContext* stc) @@ -282,6 +283,7 @@ void StreamRestoreTxnContext(StreamTxnContext* stc) StreamTxnContextRestoreComboCid(stc); StreamTxnContextRestoreXact(stc); StreamTxnContextRestoreSnapmgr(stc); + StreamTxnContextRestoreInvalidMsg(stc); } /* @@ -720,6 +722,7 @@ static void InitStream(StreamFlowCtl* ctl, StreamTransType transType) producer->setSharedContext(sharedContext); producer->setUniqueSQLKey(u_sess->unique_sql_cxt.unique_sql_id, u_sess->unique_sql_cxt.unique_sql_user_id, u_sess->unique_sql_cxt.unique_sql_cn_id); + producer->setGlobalSessionId(&u_sess->globalSessionId); producerSMPList = lappend(producerSMPList, producer); /* Add all producer to node group to avoid possible consumer-not-deinit */ @@ -738,6 +741,7 @@ static void InitStream(StreamFlowCtl* ctl, StreamTransType transType) StreamProducer(key, pstmt, streamNode, u_sess->stream_cxt.stream_runtime_mem_cxt, consumerNum, transType); producer->setUniqueSQLKey(u_sess->unique_sql_cxt.unique_sql_id, u_sess->unique_sql_cxt.unique_sql_user_id, u_sess->unique_sql_cxt.unique_sql_cn_id); + producer->setGlobalSessionId(&u_sess->globalSessionId); producerSMPList = lappend(producerSMPList, producer); } diff --git a/src/gausskernel/process/stream/streamMain.cpp b/src/gausskernel/process/stream/streamMain.cpp index 6dd144937..ef4421a5e 100755 --- a/src/gausskernel/process/stream/streamMain.cpp +++ b/src/gausskernel/process/stream/streamMain.cpp @@ -106,7 +106,9 @@ int StreamMain() MemoryContext oldMemory = MemoryContextSwitchTo( THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_EXECUTOR)); +#ifdef ENABLE_LLVM_COMPILE CodeGenThreadInitialize(); +#endif (void)MemoryContextSwitchTo(oldMemory); /* We can now handle ereport(ERROR) */ @@ -121,10 +123,13 @@ int StreamMain() pgstat_report_activity(STATE_IDLE, NULL); pgstat_report_waitstatus(STATE_WAIT_COMM); t_thrd.threadpool_cxt.stream->WaitMission(); + Assert(CheckMyDatabaseMatch()); pgstat_report_waitstatus(STATE_WAIT_UNDEFINED); } pgstat_report_queryid(u_sess->debug_query_id); + pgstat_report_unique_sql_id(false); + pgstat_report_global_session_id(u_sess->globalSessionId); pgstat_report_smpid(u_sess->stream_cxt.smp_id); timeInfoRecordStart(); @@ -258,6 +263,7 @@ void ExtractProduerInfo() u_sess->exec_cxt.need_track_resource = u_sess->stream_cxt.producer_obj->getExplainTrack(); u_sess->stream_cxt.producer_obj->getUniqueSQLKey(&u_sess->unique_sql_cxt.unique_sql_id, &u_sess->unique_sql_cxt.unique_sql_user_id, &u_sess->unique_sql_cxt.unique_sql_cn_id); + u_sess->stream_cxt.producer_obj->getGlobalSessionId(&u_sess->globalSessionId); WLMGeneralParam *g_wlm_params = &u_sess->wlm_cxt->wlm_params; errno_t ret = sprintf_s(u_sess->wlm_cxt->control_group, @@ -360,6 +366,9 @@ static void HandleStreamSigjmp() AbortCurrentTransaction(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + LWLockReleaseAll(); if (u_sess->stream_cxt.producer_obj != NULL) { @@ -394,14 +403,13 @@ static void execute_stream_plan(StreamProducer* producer) PlannedStmt* planstmt = producer->getPlan(); CommandDest dest = producer->getDest(); bool save_log_statement_stats = u_sess->attr.attr_common.log_statement_stats; - bool was_logged = false; bool isTopLevel = false; const char* commandTag = NULL; char completionTag[COMPLETION_TAG_BUFSIZE]; Portal portal = NULL; DestReceiver* receiver = NULL; int16 format; - char msec_str[32]; + char msec_str[PRINTF_DST_MAX]; t_thrd.postgres_cxt.debug_query_string = planstmt->query_string; pgstat_report_activity(STATE_RUNNING, t_thrd.postgres_cxt.debug_query_string); @@ -491,9 +499,9 @@ static void execute_stream_plan(StreamProducer* producer) /* * Emit duration logging if appropriate. */ - switch (check_log_duration(msec_str, was_logged)) { + switch (check_log_duration(msec_str, false)) { case 1: - ereport(LOG, (errmsg("duration: %s ms", msec_str), errhidestmt(true))); + Assert(false); break; case 2: ereport(LOG, @@ -742,28 +750,29 @@ void StreamExit() AtProcExit_Buffers(0, 0); ShutdownPostgres(0, 0); - AtProcExit_Files(0, 0); + if(!EnableLocalSysCache()) { + AtProcExit_Files(0, 0); + } StreamQuitAndClean(0, 0); RestoreStream(); - /* release memory context and reset flags. */ - MemoryContextReset(u_sess->syscache_cxt.SysCacheMemCxt); - - errno_t rc = EOK; - rc = memset_s(u_sess->syscache_cxt.SysCache, sizeof(CatCache*) * SysCacheSize, - 0, sizeof(CatCache*) * SysCacheSize); - securec_check(rc, "\0", "\0"); - rc = memset_s(u_sess->syscache_cxt.SysCacheRelationOid, sizeof(Oid) * SysCacheSize, - 0, sizeof(Oid) * SysCacheSize); - securec_check(rc, "\0", "\0"); + if (!EnableLocalSysCache()) { + /* release memory context and reset flags. */ + MemoryContextReset(u_sess->syscache_cxt.SysCacheMemCxt); + errno_t rc = EOK; + rc = memset_s(u_sess->syscache_cxt.SysCache, sizeof(CatCache*) * SysCacheSize, + 0, sizeof(CatCache*) * SysCacheSize); + securec_check(rc, "\0", "\0"); + rc = memset_s(u_sess->syscache_cxt.SysCacheRelationOid, sizeof(Oid) * SysCacheSize, + 0, sizeof(Oid) * SysCacheSize); + securec_check(rc, "\0", "\0"); + } /* release statement_cxt */ if (t_thrd.proc_cxt.MyBackendId != InvalidBackendId) { release_statement_context(t_thrd.shemem_ptr_cxt.MyBEEntry, __FUNCTION__, __LINE__); } - /* When the stream session ends, the user count decreases by one. */ - DecreaseUserCount(u_sess->proc_cxt.MyRoleId); free_session_context(u_sess); } diff --git a/src/gausskernel/process/stream/streamProducer.cpp b/src/gausskernel/process/stream/streamProducer.cpp index 6da2f1143..fa2f0c6e5 100644 --- a/src/gausskernel/process/stream/streamProducer.cpp +++ b/src/gausskernel/process/stream/streamProducer.cpp @@ -102,6 +102,9 @@ StreamProducer::StreamProducer( m_uniqueSQLId = 0; m_uniqueSQLUserId = 0; m_uniqueSQLCNId = 0; + m_globalSessionId.sessionId = 0; + m_globalSessionId.nodeId = 0; + m_globalSessionId.seq = 0; /* Initialize the origin nodelsit */ m_originConsumerNodeList = NIL; m_originProducerExecNodeList = NIL; @@ -522,6 +525,16 @@ void StreamProducer::getUniqueSQLKey(uint64* unique_id, Oid* user_id, uint32* cn *cn_id = m_uniqueSQLCNId; } +void StreamProducer::setGlobalSessionId(GlobalSessionId* globalSessionId) +{ + m_globalSessionId = *globalSessionId; +} + +void StreamProducer::getGlobalSessionId(GlobalSessionId* globalSessionId) +{ + *globalSessionId = m_globalSessionId; +} + /* * @Description: Get send dest for local stream error info. * diff --git a/src/gausskernel/process/tcop/auditfuncs.cpp b/src/gausskernel/process/tcop/auditfuncs.cpp index d4f5f4e8a..fcba9cdeb 100644 --- a/src/gausskernel/process/tcop/auditfuncs.cpp +++ b/src/gausskernel/process/tcop/auditfuncs.cpp @@ -32,9 +32,16 @@ #include "catalog/namespace.h" #include "auditfuncs.h" #include "utils/elog.h" +#include "libpq/libpq-be.h" #define AUDIT_BUFFERSIZ 512 +typedef void (*AuditFunc)(const char* objectname, const char* cmdtext); +typedef struct AuditFuncMap { + ObjectType objType; + AuditFunc auditFunc; +}AuditFuncMap; + static THR_LOCAL ExecutorEnd_hook_type prev_ExecutorEnd = NULL; static THR_LOCAL ProcessUtility_hook_type prev_ProcessUtility = NULL; @@ -62,9 +69,13 @@ static void pgaudit_ddl_user(const char* objectname, const char* cmdtext); static void pgaudit_ddl_view(const char* objectname, const char* cmdtext); static void pgaudit_ddl_matview(const char* objectname, const char* cmdtext); static void pgaudit_ddl_function(const char* objectname, const char* cmdtext); +static void pgaudit_ddl_package(const char* objectname, const char* cmdtext); static void pgaudit_ddl_resourcepool(const char* objectname, const char* cmdtext); +static void pgaudit_alter_globalconfig(const AlterGlobalConfigStmt* stmt, const char* cmdtext); +static void pgaudit_drop_globalconfig(const DropGlobalConfigStmt* stmt, const char* cmdtext); static void pgaudit_ddl_workload(const char* objectname, const char* cmdtext); static void pgaudit_ddl_serverforhardoop(const char* objectname, const char* cmdtext); +static void pgaudit_ddl_model(const char* objectname, const char* cmdtext); static void pgaudit_process_alter_object(Node* node, const char* querystring); static void pgaudit_process_alter_owner(Node* node, const char* querystring); static void pgaudit_process_drop_objects(Node* node, const char* querystring); @@ -74,6 +85,45 @@ static void pgaudit_process_grant_or_revoke_roles(List* grantee_name_list, bool static void pgaudit_delete_files(const char* objectname, const char* cmdtext); static void pgaudit_ddl_weak_password(const char* cmdtext); static void pgaudit_ddl_full_encryption_key(const char* cmdtext); +static void pgaudit_ddl_type(const char* objectname, const char* cmdtext); +static void pgaudit_ddl_datasource(const char* objectname, const char* cmdtext); +static void pgaudit_ddl_rowlevelsecurity(const char* objectname, const char* cmdtext); +static void pgaudit_ddl_synonym(const char* objectName, const char* cmdText); +static void pgaudit_ddl_textsearch(const char* objectname, const char* cmdtext); +static void pgaudit_ddl_publication_subscription(const char* objectname, const char* cmdtext); +static void pgaudit_ddl_fdw(const char* objectname, const char* cmdtext); + +static const AuditFuncMap g_auditFuncMap[] = { + {OBJECT_SCHEMA, pgaudit_ddl_schema}, + {OBJECT_TABLE, pgaudit_ddl_table}, + {OBJECT_FOREIGN_TABLE, pgaudit_ddl_table}, + {OBJECT_STREAM, pgaudit_ddl_table}, + {OBJECT_INTERNAL, pgaudit_ddl_table}, + {OBJECT_TABLESPACE, pgaudit_ddl_tablespace}, + {OBJECT_ROLE, pgaudit_ddl_user}, + {OBJECT_USER, pgaudit_ddl_user}, + {OBJECT_TRIGGER, pgaudit_ddl_trigger}, + {OBJECT_CONTQUERY, pgaudit_ddl_view}, + {OBJECT_VIEW, pgaudit_ddl_view}, + {OBJECT_MATVIEW, pgaudit_ddl_matview}, + {OBJECT_INDEX, pgaudit_ddl_index}, + {OBJECT_TYPE, pgaudit_ddl_type}, + {OBJECT_DATABASE, pgaudit_ddl_database}, + {OBJECT_FUNCTION, pgaudit_ddl_function}, + {OBJECT_PACKAGE, pgaudit_ddl_package}, + {OBJECT_FOREIGN_SERVER, pgaudit_ddl_serverforhardoop}, + {OBJECT_DATA_SOURCE, pgaudit_ddl_datasource}, + {OBJECT_DIRECTORY, pgaudit_ddl_directory}, + {OBJECT_RLSPOLICY, pgaudit_ddl_rowlevelsecurity}, + {OBJECT_SYNONYM, pgaudit_ddl_synonym}, + {OBJECT_TSDICTIONARY, pgaudit_ddl_textsearch}, + {OBJECT_TSCONFIGURATION, pgaudit_ddl_textsearch}, + {OBJECT_PUBLICATION, pgaudit_ddl_publication_subscription}, + {OBJECT_SUBSCRIPTION, pgaudit_ddl_publication_subscription}, + {OBJECT_FDW, pgaudit_ddl_fdw} +}; +static const int g_auditFuncMapNum = sizeof(g_auditFuncMap) / sizeof(AuditFuncMap); + /* * Brief : perfstat_agent_init() * Description : Module load callback. @@ -143,11 +193,11 @@ void pgaudit_system_start_ok(int port) * Brief : void pgaudit_user_login(bool login_ok, char* object_name,const char* detaisinfo) * Description : audit the user login */ -void pgaudit_user_login(bool login_ok, const char* object_name, const char* detaisinfo) +void pgaudit_user_login(bool login_ok, const char* object_name, const char* detailinfo) { AuditType audit_type; AuditResult audit_result; - Assert(detaisinfo); + Assert(detailinfo); if (login_ok) { audit_type = AUDIT_LOGIN_SUCCESS; audit_result = AUDIT_OK; @@ -155,7 +205,15 @@ void pgaudit_user_login(bool login_ok, const char* object_name, const char* deta audit_type = AUDIT_LOGIN_FAILED; audit_result = AUDIT_FAILED; } - audit_report(audit_type, audit_result, object_name, detaisinfo); + + char new_login_info[PGAUDIT_MAXLENGTH] = {0}; + Port *port = u_sess->proc_cxt.MyProcPort; + if (port != NULL) { + int rc = snprintf_s(new_login_info, PGAUDIT_MAXLENGTH, PGAUDIT_MAXLENGTH - 1, "%s, SSL=%s", detailinfo, + port->ssl != NULL ? "on" : "off"); + securec_check_ss(rc, "", ""); + } + audit_report(audit_type, audit_result, object_name, port != NULL ? new_login_info : detailinfo); } /* @@ -296,12 +354,14 @@ static void pgaudit_ddl_database_object( case AUDIT_DDL_INDEX: case AUDIT_DDL_SCHEMA: case AUDIT_DDL_FUNCTION: + case AUDIT_DDL_PACKAGE: case AUDIT_DDL_TABLE: case AUDIT_DDL_TABLESPACE: case AUDIT_DDL_TRIGGER: case AUDIT_DDL_USER: case AUDIT_DDL_VIEW: case AUDIT_DDL_RESOURCEPOOL: + case AUDIT_DDL_GLOBALCONFIG: case AUDIT_DDL_WORKLOAD: case AUDIT_DDL_SERVERFORHADOOP: case AUDIT_DDL_DATASOURCE: @@ -312,6 +372,9 @@ static void pgaudit_ddl_database_object( case AUDIT_DDL_TEXTSEARCH: case AUDIT_DDL_SEQUENCE: case AUDIT_DDL_KEY: + case AUDIT_DDL_MODEL: + case AUDIT_DDL_PUBLICATION_SUBSCRIPTION: + case AUDIT_DDL_FOREIGN_DATA_WRAPPER: pgaudit_store_auditstat(audit_type, audit_result, objectname, mask_string); break; default: @@ -436,6 +499,20 @@ static void pgaudit_ddl_user(const char* objectname, const char* cmdtext) return; } +static void pgaudit_ddl_model(const char* objectname, const char* cmdtext) +{ + AuditType audit_type = AUDIT_DDL_MODEL; + AuditResult audit_result = AUDIT_OK; + + Assert(cmdtext != NULL); + if (!CHECK_AUDIT_DDL(DDL_MODEL)) { + return; + } + + pgaudit_ddl_database_object(audit_type, audit_result, objectname, cmdtext); + return; +} + /* * Brief : pgaudit_ddl_view(char* objectname, const char* cmdtext) * Description : Audit the operations of view @@ -642,6 +719,23 @@ static void pgaudit_ddl_function(const char* objectname, const char* cmdtext) return; } +/* + * Brief : pgaudit_ddl_package(const char* objectname, const char* cmdtext) + * Description : Audit the operations of package + */ +static void pgaudit_ddl_package(const char* objectname, const char* cmdtext) +{ + AuditType audit_type = AUDIT_DDL_PACKAGE; + AuditResult audit_result = AUDIT_OK; + + Assert(cmdtext != NULL); + if (!CHECK_AUDIT_DDL(DDL_PACKAGE)) { + return; + } + pgaudit_ddl_database_object(audit_type, audit_result, objectname, cmdtext); + return; +} + /* * Brief : pgaudit_ddl_resourcepool(const char* objectname, const char* cmdtext) * Description : Audit the operations of resource pool @@ -660,6 +754,42 @@ static void pgaudit_ddl_resourcepool(const char* objectname, const char* cmdtext return; } +/* + * Brief : pgaudit_ddl_resourcepool(const char* objectname, const char* cmdtext) + * Description : Audit the operations of gs_global_config + */ +static void pgaudit_alter_globalconfig(const AlterGlobalConfigStmt* stmt, const char* cmdtext) +{ + Assert(cmdtext != NULL); + if (!CHECK_AUDIT_DDL(DDL_GLOBALCONFIG)) { + return; + } + ListCell* option = NULL; + foreach (option, stmt->options) { + DefElem *defel = (DefElem *)lfirst(option); + pgaudit_ddl_database_object(AUDIT_DDL_GLOBALCONFIG, AUDIT_OK, defel->defname, cmdtext); + } + return; +} + +/* + * Brief : pgaudit_ddl_resourcepool(const char* objectname, const char* cmdtext) + * Description : Audit the operations of gs_global_config + */ +static void pgaudit_drop_globalconfig(const DropGlobalConfigStmt* stmt, const char* cmdtext) +{ + Assert(cmdtext != NULL); + if (!CHECK_AUDIT_DDL(DDL_GLOBALCONFIG)) { + return; + } + ListCell* option = NULL; + foreach (option, stmt->options) { + const char *global_name = strVal(lfirst(option)); + pgaudit_ddl_database_object(AUDIT_DDL_GLOBALCONFIG, AUDIT_OK, global_name, cmdtext); + } + return; +} + /* * Brief : pgaudit_ddl_workload(const char* objectname, const char* cmdtext) * Description : Audit the operations of workload @@ -845,6 +975,42 @@ static void pgaudit_ddl_textsearch(const char* objectname, const char* cmdtext) return; } +/* + * pgaudit_ddl_publication_subscription: + * Audit the operations of publication and subscription + * + * @IN objectname: publication name or subscription name + * @IN cmdtext: cmd string + * @RETURN: void + */ +static void pgaudit_ddl_publication_subscription(const char* objectname, const char* cmdtext) +{ + if (!CHECK_AUDIT_DDL(DDL_PUBLICATION_SUBSCRIPTION)) { + return; + } + + pgaudit_ddl_database_object(AUDIT_DDL_PUBLICATION_SUBSCRIPTION, AUDIT_OK, objectname, cmdtext); + return; +} + +/* + * pgaudit_ddl_fdw: + * Audit the operations of foreign data wrapper + * + * @IN objectname: foreign data wrapper name + * @IN cmdtext: cmd string + * @RETURN: void + */ +static void pgaudit_ddl_fdw(const char* objectname, const char* cmdtext) +{ + if (!CHECK_AUDIT_DDL(DDL_FOREIGN_DATA_WRAPPER)) { + return; + } + + pgaudit_ddl_database_object(AUDIT_DDL_FOREIGN_DATA_WRAPPER, AUDIT_OK, objectname, cmdtext); + return; +} + /* * @Description: audit the operation of set parameter. * @in objectname : the object name need audited. @@ -923,6 +1089,11 @@ static void pgaudit_process_drop_objects(Node* node, const char* querystring) objectname = NameListToString(names); pgaudit_ddl_function(objectname, querystring); } break; + case OBJECT_PACKAGE: + case OBJECT_PACKAGE_BODY: { + objectname = NameListToString(names); + pgaudit_ddl_package(objectname, querystring); + } break; case OBJECT_FOREIGN_SERVER: { objectname = NameListToString(names); pgaudit_ddl_serverforhardoop(objectname, querystring); @@ -953,6 +1124,19 @@ static void pgaudit_process_drop_objects(Node* node, const char* querystring) case OBJECT_COLUMN_SETTING: { pgaudit_ddl_full_encryption_key(querystring); } break; + case OBJECT_DB4AI_MODEL: { + rel = makeRangeVarFromNameList(names); + objectname = rel->relname; + pgaudit_ddl_model(objectname, querystring); + } break; + case OBJECT_PUBLICATION: + objectname = NameListToString(names); + pgaudit_ddl_publication_subscription(objectname, querystring); + break; + case OBJECT_FDW: + objectname = NameListToString(names); + pgaudit_ddl_fdw(objectname, querystring); + break; default: break; } @@ -966,66 +1150,11 @@ static void pgaudit_process_drop_objects(Node* node, const char* querystring) static void pgaudit_audit_object(const char* objname, int ObjectType, const char* cmdtext) { Assert(cmdtext); - switch (ObjectType) { - case OBJECT_SCHEMA: - pgaudit_ddl_schema(objname, cmdtext); - break; - case OBJECT_TABLE: - case OBJECT_FOREIGN_TABLE: /* Execute ALTER FOREIGN TABLE RENAME */ - case OBJECT_STREAM: - case OBJECT_INTERNAL: - pgaudit_ddl_table(objname, cmdtext); - break; - case OBJECT_TABLESPACE: - pgaudit_ddl_tablespace(objname, cmdtext); - break; - case OBJECT_ROLE: - case OBJECT_USER: - pgaudit_ddl_user(objname, cmdtext); - break; - case OBJECT_TRIGGER: - pgaudit_ddl_trigger(objname, cmdtext); - break; - case OBJECT_CONTQUERY: - case OBJECT_VIEW: - pgaudit_ddl_view(objname, cmdtext); - break; - case OBJECT_MATVIEW: - pgaudit_ddl_matview(objname, cmdtext); - break; - case OBJECT_INDEX: - pgaudit_ddl_index(objname, cmdtext); - break; - case OBJECT_TYPE: - pgaudit_ddl_type(objname, cmdtext); - break; - case OBJECT_DATABASE: - pgaudit_ddl_database(objname, cmdtext); - break; - case OBJECT_FUNCTION: - pgaudit_ddl_function(objname, cmdtext); - break; - case OBJECT_FOREIGN_SERVER: - pgaudit_ddl_serverforhardoop(objname, cmdtext); - break; - case OBJECT_DATA_SOURCE: - pgaudit_ddl_datasource(objname, cmdtext); - break; - case OBJECT_DIRECTORY: - pgaudit_ddl_directory(objname, cmdtext); - break; - case OBJECT_RLSPOLICY: - pgaudit_ddl_rowlevelsecurity(objname, cmdtext); - break; - case OBJECT_SYNONYM: - pgaudit_ddl_synonym(objname, cmdtext); - break; - case OBJECT_TSDICTIONARY: - case OBJECT_TSCONFIGURATION: - pgaudit_ddl_textsearch(objname, cmdtext); - break; - default: - break; + for (int i = 0; i < g_auditFuncMapNum; i++) { + if (g_auditFuncMap[i].objType == ObjectType) { + g_auditFuncMap[i].auditFunc(objname, cmdtext); + return; + } } } @@ -1061,6 +1190,9 @@ static void pgaudit_process_alter_owner(Node* node, const char* querystring) case OBJECT_TSDICTIONARY: case OBJECT_TSCONFIGURATION: case OBJECT_TYPE: + case OBJECT_PACKAGE: + case OBJECT_PUBLICATION: + case OBJECT_SUBSCRIPTION: objectname = NameListToString(alterownerstmt->object); break; default: @@ -1106,6 +1238,7 @@ static void pgaudit_process_rename_object(Node* node, const char* querystring) case OBJECT_SCHEMA: case OBJECT_TABLESPACE: case OBJECT_TRIGGER: + case OBJECT_FDW: case OBJECT_FOREIGN_SERVER: case OBJECT_RLSPOLICY: case OBJECT_DATA_SOURCE: @@ -1115,6 +1248,8 @@ static void pgaudit_process_rename_object(Node* node, const char* querystring) case OBJECT_TYPE: case OBJECT_TSDICTIONARY: case OBJECT_TSCONFIGURATION: + case OBJECT_PUBLICATION: + case OBJECT_SUBSCRIPTION: objectname = NameListToString(stmt->object); break; case OBJECT_TABLE: @@ -1254,6 +1389,18 @@ static void pgaudit_ProcessUtility(Node* parsetree, const char* queryString, Par CreateStmt* createforeignstmt = (CreateStmt*)(parsetree); pgaudit_ddl_table(createforeignstmt->relation->relname, queryString); } break; + case T_CreateUserMappingStmt: { + CreateUserMappingStmt *createUserMappingStmt = (CreateUserMappingStmt*)parsetree; + pgaudit_ddl_user(createUserMappingStmt->username, queryString); + } break; + case T_AlterUserMappingStmt: { + AlterUserMappingStmt *alterUserMappingStmt = (AlterUserMappingStmt*)parsetree; + pgaudit_ddl_user(alterUserMappingStmt->username, queryString); + } break; + case T_DropUserMappingStmt: { + DropUserMappingStmt *dropUserMappingStmt = (DropUserMappingStmt*)parsetree; + pgaudit_ddl_user(dropUserMappingStmt->username, queryString); + } break; case T_CreateRoleStmt: { /* Audit create user */ CreateRoleStmt* createrolestmt = (CreateRoleStmt*)(parsetree); pgaudit_ddl_user(createrolestmt->role, queryString); @@ -1338,6 +1485,10 @@ static void pgaudit_ProcessUtility(Node* parsetree, const char* queryString, Par object_name_pointer = pgaudit_get_function_name(createfunctionstmt->funcname); pgaudit_ddl_function(object_name_pointer, queryString); } break; + case T_CreatePackageStmt: + case T_CreatePackageBodyStmt: { + pgaudit_ddl_package(object_name_pointer, queryString); + } case T_AlterFunctionStmt: { /* Audit procedure */ AlterFunctionStmt* alterfunctionstmt = (AlterFunctionStmt*)(parsetree); @@ -1376,6 +1527,10 @@ static void pgaudit_ProcessUtility(Node* parsetree, const char* queryString, Par GrantRoleStmt* grantrolestmt = (GrantRoleStmt*)(parsetree); pgaudit_process_grant_or_revoke_roles(grantrolestmt->grantee_roles, grantrolestmt->is_grant, queryString); } break; + case T_GrantDbStmt: { /* Audit grant or revoke any privilege */ + GrantDbStmt* grantdbstmt = (GrantDbStmt*)(parsetree); + pgaudit_process_grant_or_revoke_roles(grantdbstmt->grantees, grantdbstmt->is_grant, queryString); + } break; case T_DropStmt: /* Audit drop objct */ pgaudit_process_drop_objects(parsetree, queryString); break; @@ -1403,6 +1558,14 @@ static void pgaudit_ProcessUtility(Node* parsetree, const char* queryString, Par DropResourcePoolStmt* dropresourcepoolStmt = (DropResourcePoolStmt*)(parsetree); pgaudit_ddl_resourcepool(dropresourcepoolStmt->pool_name, queryString); } break; + case T_AlterGlobalConfigStmt: { + AlterGlobalConfigStmt* alterglobalconfigStmt = (AlterGlobalConfigStmt*)(parsetree); + pgaudit_alter_globalconfig(alterglobalconfigStmt, queryString); + } break; + case T_DropGlobalConfigStmt: { + DropGlobalConfigStmt* dropglobalconfigStmt = (DropGlobalConfigStmt*)(parsetree); + pgaudit_drop_globalconfig(dropglobalconfigStmt, queryString); + } break; case T_CreateWorkloadGroupStmt: { CreateWorkloadGroupStmt* createworkloadgroupstmt = (CreateWorkloadGroupStmt*)(parsetree); pgaudit_ddl_workload(createworkloadgroupstmt->group_name, queryString); @@ -1558,7 +1721,38 @@ static void pgaudit_ProcessUtility(Node* parsetree, const char* queryString, Par TimeCapsuleStmt *stmt = (TimeCapsuleStmt *)parsetree; pgaudit_ddl_table(stmt->relation->relname, queryString); } break; - + case T_CreateModelStmt: { + CreateModelStmt* createModelStmt = (CreateModelStmt*)(parsetree); + pgaudit_ddl_model(createModelStmt->model, queryString); + } break; + case T_CreatePublicationStmt: { + CreatePublicationStmt *stmt = (CreatePublicationStmt*)parsetree; + pgaudit_ddl_publication_subscription(stmt->pubname, queryString); + } break; + case T_AlterPublicationStmt: { + AlterPublicationStmt *stmt = (AlterPublicationStmt*)parsetree; + pgaudit_ddl_publication_subscription(stmt->pubname, queryString); + } break; + case T_CreateSubscriptionStmt: { + CreateSubscriptionStmt *stmt = (CreateSubscriptionStmt*)parsetree; + pgaudit_ddl_publication_subscription(stmt->subname, queryString); + } break; + case T_AlterSubscriptionStmt: { + AlterSubscriptionStmt *stmt = (AlterSubscriptionStmt*)parsetree; + pgaudit_ddl_publication_subscription(stmt->subname, queryString); + } break; + case T_DropSubscriptionStmt: { + DropSubscriptionStmt *stmt = (DropSubscriptionStmt*)parsetree; + pgaudit_ddl_publication_subscription(stmt->subname, queryString); + } break; + case T_CreateFdwStmt: { + CreateFdwStmt *stmt = (CreateFdwStmt*)parsetree; + pgaudit_ddl_fdw(stmt->fdwname, queryString); + } break; + case T_AlterFdwStmt: { + AlterFdwStmt *stmt = (AlterFdwStmt*)parsetree; + pgaudit_ddl_fdw(stmt->fdwname, queryString); + } break; default: break; } diff --git a/src/gausskernel/process/tcop/autonomoustransaction.cpp b/src/gausskernel/process/tcop/autonomoustransaction.cpp index 55fb68974..270ea8b74 100644 --- a/src/gausskernel/process/tcop/autonomoustransaction.cpp +++ b/src/gausskernel/process/tcop/autonomoustransaction.cpp @@ -147,14 +147,37 @@ void AutonomousSession::DetachSession(void) ReduceSessionCount(); } +/* + * SetDeadLockTimeOut() + * Set current session DeadlockTimeout to INT_MAX. + * So the deadlock can be reported by autonomous session. + */ +void AutonomousSession::SetDeadLockTimeOut(void) +{ + saved_deadlock_timeout = u_sess->attr.attr_storage.DeadlockTimeout; + u_sess->attr.attr_storage.DeadlockTimeout = INT_MAX; +} + +/* + * ReSetDeadLockTimeOut() + * Restore current session DeadlockTimeout. + */ +void AutonomousSession::ReSetDeadLockTimeOut(void) +{ + if (saved_deadlock_timeout != 0) { + u_sess->attr.attr_storage.DeadlockTimeout = saved_deadlock_timeout; + saved_deadlock_timeout = 0; + } +} + /* * ExecSimpleQuery * Entry for executing concatenation statements. * If the execution is successful, the execution result is returned. * An error message is displayed. */ -Datum AutonomousSession::ExecSimpleQuery(const char* query, TupleDesc resultTupleDesc, - int64 currentXid, bool isLockWait) +ATResult AutonomousSession::ExecSimpleQuery(const char* query, TupleDesc resultTupleDesc, + int64 currentXid, bool isLockWait, bool is_plpgsql_func_with_outparam) { if (unlikely(query == NULL)) { ereport(ERROR, (errcode(ERRCODE_PLPGSQL_ERROR), @@ -167,11 +190,11 @@ Datum AutonomousSession::ExecSimpleQuery(const char* query, TupleDesc resultTupl m_res = PQexecAutonm(m_conn, query, currentXid, isLockWait); t_thrd.int_cxt.ImmediateInterruptOK = old; - ATResult result = HandlePGResult(m_conn, m_res, resultTupleDesc); + ATResult result = HandlePGResult(m_conn, m_res, resultTupleDesc, is_plpgsql_func_with_outparam); PQclear(m_res); m_res = NULL; - return result.ResTup; + return result; } /* @@ -210,8 +233,8 @@ void CreateAutonomousSession(void) u_sess->SPI_cxt.autonomous_session->Init(); u_sess->SPI_cxt.autonomous_session->AttachSession(); u_sess->SPI_cxt.autonomous_session->ExecSimpleQuery("set session_timeout = 0;", NULL, 0); - u_sess->SPI_cxt.autonomous_session->current_attach_sessionid = - u_sess->SPI_cxt.autonomous_session->ExecSimpleQuery("select pg_current_sessid();", NULL, 0); + ATResult res = u_sess->SPI_cxt.autonomous_session->ExecSimpleQuery("select pg_current_sessid();", NULL, 0); + u_sess->SPI_cxt.autonomous_session->current_attach_sessionid = res.ResTup; } else { if (!u_sess->SPI_cxt.autonomous_session->GetConnStatus() && !u_sess->SPI_cxt.autonomous_session->ReConnSession()) { @@ -228,6 +251,7 @@ void CreateAutonomousSession(void) void DestoryAutonomousSession(bool force) { if (u_sess->SPI_cxt.autonomous_session && (force || u_sess->SPI_cxt._connected == 0)) { + u_sess->SPI_cxt.autonomous_session->ReSetDeadLockTimeOut(); u_sess->SPI_cxt.autonomous_session->DetachSession(); AutonomousSession* autonomousSession = u_sess->SPI_cxt.autonomous_session; pfree(autonomousSession); @@ -240,14 +264,16 @@ void DestoryAutonomousSession(bool force) * HandleResInfo * Process the result returned by the 'select function'. */ -Datum HandleResInfo(PGconn* conn, const PGresult* result, TupleDesc resultTupleDesc) +Datum HandleResInfo(PGconn *conn, const PGresult *result, TupleDesc resultTupleDesc, bool is_plpgsql_func_with_outparam, + bool *resisnull) { Oid typeInput; Oid typeParam; int nColumns = PQnfields(result); int nRows = PQntuples(result); Datum res; - if (nColumns > 1 || nRows > 1) { + *resisnull = false; + if ((nColumns > 1 || nRows > 1) && !is_plpgsql_func_with_outparam) { ereport(ERROR, (errcode(ERRCODE_PLPGSQL_ERROR), errmsg("Invalid autonomous transaction return datatypes"), errdetail("nColumns = %d, nRows = %d", nColumns, nRows), @@ -255,22 +281,48 @@ Datum HandleResInfo(PGconn* conn, const PGresult* result, TupleDesc resultTupleD erraction("Contact Huawei Engineer."))); } - Oid ftype = PQftype(result, 0); - char* valueStr = PQgetvalue(result, 0, 0); + if (!is_plpgsql_func_with_outparam) { + Oid ftype = PQftype(result, 0); + char* valueStr = PQgetvalue(result, 0, 0); - if (resultTupleDesc != NULL) { - res = RecordCstringGetDatum(resultTupleDesc, valueStr); + if (resultTupleDesc != NULL) { + res = RecordCstringGetDatum(resultTupleDesc, valueStr); + } else { + /* translate data string to Datum */ + getTypeInputInfo(ftype, &typeInput, &typeParam); + if (PQgetisnull(result,0,0)) { + res = (Datum)0; + *resisnull = true; + } else { + res = OidInputFunctionCall(typeInput, valueStr, typeParam, -1); + } + } } else { - /* translate data string to Datum */ - getTypeInputInfo(ftype, &typeInput, &typeParam); - res = OidInputFunctionCall(typeInput, valueStr, typeParam, -1); + Datum *values = (Datum*)palloc(sizeof(Datum) * nColumns); + bool *nulls = (bool*)palloc(sizeof(bool) * nColumns); + + for (int i = 0; i < nColumns; i++) { + Oid ftype = PQftype(result, i); + char* valueStr = PQgetvalue(result, 0, i); + getTypeInputInfo(ftype, &typeInput, &typeParam); + nulls[i] = PQgetisnull(result,0,i); + if (nulls[i]) { + values[i] = (Datum)0; + } else { + values[i] = OidInputFunctionCall(typeInput, valueStr, typeParam, -1); + } + } + HeapTuple rettup = heap_form_tuple(resultTupleDesc, values, nulls); + res = PointerGetDatum(SPI_returntuple(rettup, resultTupleDesc)); + pfree(values); + pfree(nulls); } return res; } -ATResult HandlePGResult(PGconn* conn, PGresult* pgresult, TupleDesc resultTupleDesc) +ATResult HandlePGResult(PGconn* conn, PGresult* pgresult, TupleDesc resultTupleDesc, bool is_plpgsql_func_with_outparam) { if (unlikely(conn == NULL || pgresult == NULL)) { ereport(ERROR, (errcode(ERRCODE_PLPGSQL_ERROR), errmsg("invalid data \" %s \" in autonomous transactions", @@ -286,6 +338,7 @@ ATResult HandlePGResult(PGconn* conn, PGresult* pgresult, TupleDesc resultTupleD res.result = RES_COMMAND_OK; res.withtuple = true; res.ResTup = (Datum)0; + res.resisnull = true; break; /* * contains a single result tuple from the current command. @@ -296,12 +349,13 @@ ATResult HandlePGResult(PGconn* conn, PGresult* pgresult, TupleDesc resultTupleD res.result = RES_SINGLE_TUPLE; res.withtuple = true; res.ResTup = (Datum)0; + res.resisnull = true; break; case PGRES_TUPLES_OK: res.result = RES_TUPLES_OK; res.withtuple = true; - res.ResTup = HandleResInfo(conn, pgresult, resultTupleDesc); + res.ResTup = HandleResInfo(conn, pgresult, resultTupleDesc, is_plpgsql_func_with_outparam, &res.resisnull); break; /* the string sent to the server was empty */ @@ -497,7 +551,9 @@ PGresult* PQexecAutonm(PGconn* conn, const char* query, int64 automnXid, bool is return NULL; } if (isLockWait) { + u_sess->SPI_cxt.autonomous_session->SetDeadLockTimeOut(); XactLockTableWait(automnXid); + u_sess->SPI_cxt.autonomous_session->ReSetDeadLockTimeOut(); CHECK_FOR_INTERRUPTS(); } return PQexecAutonmFinish(conn); diff --git a/src/gausskernel/process/tcop/dest.cpp b/src/gausskernel/process/tcop/dest.cpp index d5f974ef5..66a57cd25 100644 --- a/src/gausskernel/process/tcop/dest.cpp +++ b/src/gausskernel/process/tcop/dest.cpp @@ -69,13 +69,6 @@ static DestReceiver debugtupDR = {debugtup, debugStartup, donothingCleanup, dono static DestReceiver spi_printtupDR = {spi_printtup, spi_dest_startup, donothingCleanup, donothingCleanup, DestSPI}; -void init_sess_dest(DestReceiver* initdonothingDR, DestReceiver* initdebugtupDR, DestReceiver* initspi_printtupDR) -{ - *initdonothingDR = donothingDR; - *initdebugtupDR = debugtupDR; - *initspi_printtupDR = spi_printtupDR; -} - /* Globally available receiver for DestNone */ DestReceiver* None_Receiver = &donothingDR; @@ -119,15 +112,13 @@ DestReceiver* CreateDestReceiver(CommandDest dest) return printtup_create_DR(dest); case DestNone: - return u_sess->utils_cxt.donothingDR; + return &donothingDR; case DestDebug: - u_sess->utils_cxt.debugtupDR->mydest = DestDebug; - return u_sess->utils_cxt.debugtupDR; + return &debugtupDR; case DestSPI: - u_sess->utils_cxt.spi_printtupDR->mydest = DestSPI; - return u_sess->utils_cxt.spi_printtupDR; + return &spi_printtupDR; case DestSPITupleAnalyze: return createAnalyzeSPIDestReceiver(dest); diff --git a/src/gausskernel/process/tcop/fastpath.cpp b/src/gausskernel/process/tcop/fastpath.cpp index 43cafd663..08d3dd5b9 100644 --- a/src/gausskernel/process/tcop/fastpath.cpp +++ b/src/gausskernel/process/tcop/fastpath.cpp @@ -267,7 +267,7 @@ int HandleFunctionRequest(StringInfo msgBuf) struct fp_info* fip = NULL; bool callit = false; bool was_logged = false; - char msec_str[32]; + char msec_str[PRINTF_DST_MAX]; errno_t errorno = EOK; errorno = memset_s(&my_fp, sizeof(struct fp_info), 0, sizeof(struct fp_info)); securec_check(errorno, "\0", "\0"); diff --git a/src/gausskernel/process/tcop/postgres.cpp b/src/gausskernel/process/tcop/postgres.cpp index c92700b9c..22d14f8b4 100755 --- a/src/gausskernel/process/tcop/postgres.cpp +++ b/src/gausskernel/process/tcop/postgres.cpp @@ -44,9 +44,10 @@ #include "access/ustore/undo/knl_uundoapi.h" #include "access/double_write.h" #include "catalog/namespace.h" -#include "catalog/pg_type.h" #include "catalog/pg_authid.h" +#include "catalog/pg_database.h" #include "catalog/pg_proc.h" +#include "catalog/pg_type.h" #include "commands/async.h" #include "commands/matview.h" #include "commands/prepare.h" @@ -210,7 +211,6 @@ typedef struct AttachInfoContext { } AttachInfoContext; #define PARAMS_LEN 4096 -#define PRINTF_DST_MAX 32 #define PRINFT_DST_MAX_DOUBLE 64 #define MEMCPY_DST_NUM 4 @@ -588,6 +588,7 @@ static int SocketBackend(StringInfo inBuf) case 'F': /* fastpath function call */ case 'I': /* Push, Pop schema name */ case 'L': /* Link gc_fdw */ + case 'J': /* Trace ID */ break; case 'X': /* terminate */ @@ -653,6 +654,7 @@ static int SocketBackend(StringInfo inBuf) case 'k': /* Global session ID */ case 'z': /* PBE for DDL */ case 'y': /* sequence from cn 2 dn */ + case 'T': /* consistency point */ break; #endif @@ -1379,9 +1381,8 @@ void exec_simple_plan(PlannedStmt* plan) CommandDest dest = (CommandDest)t_thrd.postgres_cxt.whereToSendOutput; MemoryContext oldcontext; bool save_log_statement_stats = u_sess->attr.attr_common.log_statement_stats; - bool was_logged = false; bool isTopLevel = false; - char msec_str[32]; + char msec_str[PRINTF_DST_MAX]; if (plan == NULL) { ereport(ERROR, (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("Invaild parameter."))); @@ -1668,9 +1669,9 @@ void exec_simple_plan(PlannedStmt* plan) /* * Emit duration logging if appropriate. */ - switch (check_log_duration(msec_str, was_logged)) { + switch (check_log_duration(msec_str, false)) { case 1: - ereport(LOG, (errmsg("duration: %s ms, debugid %ld, unique id %lu", msec_str, u_sess->debug_query_id, u_sess->slow_query_cxt.slow_query.unique_sql_id), errhidestmt(true))); + Assert(false); break; case 2: ereport(LOG, @@ -2133,7 +2134,7 @@ static void exec_simple_query(const char* query_string, MessageType messageType, bool save_log_statement_stats = u_sess->attr.attr_common.log_statement_stats; bool was_logged = false; bool isTopLevel = false; - char msec_str[32]; + char msec_str[PRINTF_DST_MAX]; List* query_string_locationlist = NIL; int stmt_num = 0; size_t query_string_len = 0; @@ -2785,16 +2786,16 @@ static void exec_simple_query(const char* query_string, MessageType messageType, */ switch (check_log_duration(msec_str, was_logged)) { case 1: - ereport(LOG, (errmsg("duration: %s ms, queryid %ld, unique id %lu", msec_str, u_sess->debug_query_id, u_sess->slow_query_cxt.slow_query.unique_sql_id), errhidestmt(true))); + ereport(LOG, (errmsg("duration: %s ms, queryid %ld, unique id %lu", msec_str, + u_sess->debug_query_id, u_sess->slow_query_cxt.slow_query.unique_sql_id), errhidestmt(true))); break; case 2: { char* mask_string = NULL; MASK_PASSWORD_START(mask_string, query_string); - ereport(LOG, - (errmsg("duration: %s ms queryid %ld unique id %ld statement: %s", msec_str, u_sess->debug_query_id, u_sess->slow_query_cxt.slow_query.unique_sql_id,mask_string), - errhidestmt(true), - errdetail_execute(parsetree_list))); + ereport(LOG, (errmsg("duration: %s ms queryid %ld unique id %ld statement: %s", msec_str, + u_sess->debug_query_id, u_sess->slow_query_cxt.slow_query.unique_sql_id, mask_string), + errhidestmt(true), errdetail_execute(parsetree_list))); MASK_PASSWORD_END(mask_string, query_string); break; } @@ -3042,9 +3043,7 @@ static void exec_plan_with_params(StringInfo input_message) */ params->params[paramno].pflags = PARAM_FLAG_CONST; params->params[paramno].ptype = ptype; - params->params[paramno].tableOfIndexType = InvalidOid; - params->params[paramno].tableOfIndex = NULL; - params->params[paramno].isnestedtable = false; + params->params[paramno].tabInfo = NULL; } } else params = NULL; @@ -3205,7 +3204,7 @@ static void exec_parse_message(const char* query_string, /* string to execute */ CachedPlanSource* psrc = NULL; bool is_named = false; bool save_log_statement_stats = u_sess->attr.attr_common.log_statement_stats; - char msec_str[32]; + char msec_str[PRINTF_DST_MAX]; char* mask_string = NULL; #ifdef ENABLE_MULTIPLE_NODES bool runOnSingleNode = false; @@ -3625,7 +3624,7 @@ pass_parsing: */ switch (check_log_duration(msec_str, false)) { case 1: - ereport(LOG, (errmsg("duration: %s ms, queryid %ld unique id %ld", msec_str, u_sess->debug_query_id, u_sess->slow_query_cxt.slow_query.unique_sql_id), errhidestmt(true))); + Assert(false); break; case 2: { char* cur_mask_string = NULL; @@ -3849,8 +3848,7 @@ static int getSingleNodeIdx(StringInfo input_message, CachedPlanSource* psrc, co */ params->params[paramno].pflags = PARAM_FLAG_CONST; params->params[paramno].ptype = ptype; - params->params[paramno].tableOfIndexType = InvalidOid; - params->params[paramno].tableOfIndex = NULL; + params->params[paramno].tabInfo = NULL; /* Reset the compatible illegal chars import flag */ u_sess->mb_cxt.insertValuesBind_compatible_illegal_chars = false; @@ -4121,9 +4119,7 @@ void exec_get_ddl_params(StringInfo input_message) */ params->params[paramno].pflags = PARAM_FLAG_CONST; params->params[paramno].ptype = ptype; - params->params[paramno].tableOfIndexType = InvalidOid; - params->params[paramno].tableOfIndex = NULL; - params->params[paramno].isnestedtable = false; + params->params[paramno].tabInfo = NULL; } } else { params = NULL; @@ -4174,7 +4170,7 @@ static void exec_bind_message(StringInfo input_message) MemoryContext oldContext; bool save_log_statement_stats = u_sess->attr.attr_common.log_statement_stats; bool snapshot_set = false; - char msec_str[32]; + char msec_str[PRINTF_DST_MAX]; u_sess->parser_cxt.param_info = NULL; u_sess->parser_cxt.param_message = NULL; @@ -4672,9 +4668,7 @@ static void exec_bind_message(StringInfo input_message) */ params->params[paramno].pflags = PARAM_FLAG_CONST; params->params[paramno].ptype = ptype; - params->params[paramno].isnestedtable = false; - params->params[paramno].tableOfIndexType = InvalidOid; - params->params[paramno].tableOfIndex = NULL; + params->params[paramno].tabInfo = NULL; /* Reset the compatible illegal chars import flag */ u_sess->mb_cxt.insertValuesBind_compatible_illegal_chars = false; @@ -4788,7 +4782,7 @@ static void exec_bind_message(StringInfo input_message) */ switch (check_log_duration(msec_str, false)) { case 1: - ereport(LOG, (errmsg("duration: %s ms, queryid %ld, unique id %ld", msec_str, u_sess->debug_query_id, u_sess->slow_query_cxt.slow_query.unique_sql_id), errhidestmt(true))); + Assert(false); break; case 2: { char* mask_string = NULL; @@ -4838,7 +4832,7 @@ static void exec_execute_message(const char* portal_name, long max_rows) bool is_xact_command = false; bool execute_is_fetch = false; bool was_logged = false; - char msec_str[32]; + char msec_str[PRINTF_DST_MAX]; gstrace_entry(GS_TRC_ID_exec_execute_message); /* Adjust destination to tell printtup.c what to do */ @@ -5053,7 +5047,8 @@ static void exec_execute_message(const char* portal_name, long max_rows) */ switch (check_log_duration(msec_str, was_logged)) { case 1: - ereport(LOG, (errmsg("duration: %s ms queryid %ld unique id %ld", msec_str, u_sess->debug_query_id, u_sess->slow_query_cxt.slow_query.unique_sql_id), errhidestmt(true))); + ereport(LOG, (errmsg("duration: %s ms queryid %ld unique id %ld", msec_str, + u_sess->debug_query_id, u_sess->slow_query_cxt.slow_query.unique_sql_id), errhidestmt(true))); break; case 2: { char* mask_string = NULL; @@ -5172,6 +5167,7 @@ int check_log_duration(char* msec_str, bool was_logged) } } + msec_str[0] = '\0'; return 0; } @@ -5871,7 +5867,7 @@ void HandlePoolerReload(void) if (IS_PGXC_DATANODE) return; - u_sess->sig_cxt.got_PoolReload = true; + ResetGotPoolReload(true); u_sess->sig_cxt.cp_PoolReload = true; } @@ -6311,6 +6307,12 @@ void ProcessInterrupts(void) errmsg("canceling statement due to %s request", is_datanode ? "coordinator" : "user"))); } } + } else if (pool_validate_cancel_pending && IS_PGXC_COORDINATOR) { + InterruptPending = true; + t_thrd.int_cxt.QueryCancelPending = true; + t_thrd.int_cxt.PoolValidateCancelPending = true; + ereport(WARNING, (errmsg("thread receive SIGUSR2 signal but can not INTERRUPT while in DoingCommandRead. " + "Set validate and interrupt flag for checking next time."))); } if (IsJobSnapshotProcess()) { ereport(ERROR, (errcode(ERRCODE_QUERY_CANCELED), errmsg("canceling snapshot task"))); @@ -6957,10 +6959,10 @@ void reload_configfile(void) /* reload pooler for online business in expansion. */ void reload_online_pooler() { - if (u_sess->sig_cxt.got_PoolReload && !IsConnFromGTMTool()) { + if (IsGotPoolReload() && !IsConnFromGTMTool()) { if (!IsTransactionBlock()) { processPoolerReload(); - u_sess->sig_cxt.got_PoolReload = false; + ResetGotPoolReload(false); } else { ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), @@ -7043,14 +7045,18 @@ void RemoveTempNamespace() if ((IS_PGXC_COORDINATOR || isSingleMode || u_sess->attr.attr_common.xc_maintenance_mode || IS_SINGLE_NODE) && u_sess->catalog_cxt.deleteTempOnQuiting) { MemoryContext current_context = CurrentMemoryContext; + ResourceOwner currentOwner = t_thrd.utils_cxt.CurrentResourceOwner; + bool need_rebuild_lsc = true; PG_TRY(); { + t_thrd.proc_cxt.PostInit->InitLoadLocalSysCache(u_sess->proc_cxt.MyDatabaseId, + u_sess->proc_cxt.MyDatabaseId == TemplateDbOid ? NULL : u_sess->proc_cxt.MyProcPort->database_name); + need_rebuild_lsc = false; + StringInfoData str; initStringInfo(&str); - if (u_sess->catalog_cxt.myTempNamespace) { - ResourceOwner currentOwner = t_thrd.utils_cxt.CurrentResourceOwner; t_thrd.utils_cxt.CurrentResourceOwner = ResourceOwnerCreate(NULL, "ForTempTableDrop", THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); @@ -7080,6 +7086,10 @@ void RemoveTempNamespace() PG_CATCH(); { EmitErrorReport(); + t_thrd.utils_cxt.CurrentResourceOwner = currentOwner; + if (need_rebuild_lsc) { + ReBuildLSC(); + } MemoryContextSwitchTo(current_context); FlushErrorState(); ereport(WARNING, (errmsg("Drop temp schema failed. The temp schema will be drop by TwoPhaseCleanner."))); @@ -7108,16 +7118,12 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam #ifdef ENABLE_MULTIPLE_NODES /* PGXC_DATANODE */ /* Snapshot info */ - TransactionId xmin; - TransactionId xmax; uint64 csn; - TransactionId recent_global_xmin; bool cn_xc_maintain_mode = false; bool remote_gtm_mode = false; /* Timestamp info */ TimestampTz gtmstart_timestamp; TimestampTz stmtsys_timestamp; - GTM_Timeline timeline; int ss_need_sync_wait_all = 0; errno_t rc = EOK; CsnType csn_type; @@ -7286,6 +7292,12 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam gs_signal_setmask(&t_thrd.libpq_cxt.BlockSig, NULL); /* block everything except SIGQUIT */ + if (IsInitdb) { + Assert(!IsUnderPostmaster); + g_instance.global_sysdbcache.Init(INSTANCE_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_DEFAULT)); + CreateLocalSysDBCache(); + } + if (!IsUnderPostmaster) { /* * Validate we have been given a reasonable-looking t_thrd.proc_cxt.DataDir (if under @@ -7373,11 +7385,15 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam #endif #ifndef ENABLE_MULTIPLE_NODES - if (!ENABLE_THREAD_POOL) { + if (!IS_THREAD_POOL_WORKER) { on_shmem_exit(PlDebugerCleanUp, 0); } #endif + if (ENABLE_GPC) { + on_shmem_exit(cleanGPCPlanProcExit, 0); + } + /* * Create a per-backend PGPROC struct in shared memory, except in the * EXEC_BACKEND case where this was done in SubPostmasterMain. We must do @@ -7609,6 +7625,7 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam */ /* Since not using PG_TRY, must reset error stack by hand */ + u_sess->plsql_cxt.cur_exception_cxt = NULL; t_thrd.log_cxt.error_context_stack = NULL; t_thrd.log_cxt.call_stack = NULL; /* reset buffer strategy flag */ @@ -7698,6 +7715,7 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam pfree_ext(t_thrd.wlm_cxt.collect_info->sdetail.msg); } } + u_sess->plsql_cxt.pragma_autonomous = false; u_sess->plsql_cxt.curr_compile_context = NULL; u_sess->pcache_cxt.gpc_in_batch = false; u_sess->pcache_cxt.gpc_in_try_store = false; @@ -7711,6 +7729,9 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam u_sess->statement_cxt.executer_run_level = 0; + /* reset stream for-loop flag when long jump */ + u_sess->SPI_cxt.has_stream_in_cursor_or_forloop_sql = false; + /* release operator-level hash table in memory */ releaseExplainTable(); @@ -7731,6 +7752,8 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam } else { AbortCurrentTransaction(); } + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); /* Notice: at the most time it isn't necessary to call because * all the LWLocks are released in AbortCurrentTransaction(). * but in some rare exception not in one transaction (for @@ -7757,6 +7780,7 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam /* reset query_id after sync quit */ pgstat_report_queryid(0); + pgstat_report_unique_sql_id(true); /* * Make sure debug_query_string gets reset before we possibly clobber @@ -7805,10 +7829,14 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam /* We don't have a transaction command open anymore */ t_thrd.postgres_cxt.xact_started = false; + t_thrd.xact_cxt.isSelectInto = false; + u_sess->pcache_cxt.cur_stmt_name = NULL; /* Now we can allow interrupts again */ RESUME_INTERRUPTS(); + /* Now we not allow pool_validate interrupts again */ + PREVENT_POOL_VALIDATE_SIGUSR2(); } oldTryCounter = gstrace_tryblock_entry(&curTryCounter); @@ -7883,11 +7911,14 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam MemoryContextResetAndDeleteChildren(u_sess->temp_mem_cxt); MemoryContextResetAndDeleteChildren(u_sess->stat_cxt.hotkeySessContext); u_sess->stat_cxt.hotkeyCandidates = NIL; + u_sess->plsql_cxt.pass_func_tupdesc = NULL; /* reset plpgsql compile flag */ u_sess->plsql_cxt.compile_context_list = NULL; u_sess->plsql_cxt.curr_compile_context = NULL; u_sess->plsql_cxt.compile_status = NONE_STATUS; + u_sess->plsql_cxt.func_tableof_index = NULL; + u_sess->plsql_cxt.portal_depth = 0; u_sess->statement_cxt.executer_run_level = 0; @@ -7898,6 +7929,7 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam u_sess->exec_cxt.isLockRows = false; t_thrd.postgres_cxt.mark_explain_analyze = false; t_thrd.postgres_cxt.mark_explain_only = false; + u_sess->SPI_cxt.has_stream_in_cursor_or_forloop_sql = false; if (unlikely(t_thrd.log_cxt.msgbuf->data != NULL)) { pfree_ext(t_thrd.log_cxt.msgbuf->data); } @@ -7960,7 +7992,9 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam /* We're ready for a new query, reset wait status and u_sess->debug_query_id */ pgstat_report_waitstatus(STATE_WAIT_UNDEFINED); pgstat_report_queryid(0); + pgstat_report_unique_sql_id(true); + u_sess->trace_cxt.trace_id[0] = '\0'; /* * If connection to client is lost, we do not need to send message to client. */ @@ -8019,9 +8053,13 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam if (IS_THREAD_POOL_WORKER) { t_thrd.threadpool_cxt.worker->WaitMission(); + Assert(CheckMyDatabaseMatch()); if (!g_instance.archive_obs_cxt.in_switchover && !g_instance.streaming_dr_cxt.isInSwitchover) { Assert(u_sess->status != KNL_SESS_FAKE); } + } else { + /* if we do alter db, reinit syscache */ + ReLoadLSCWhenWaitMission(); } if (isRestoreMode && !IsAbortedTransactionBlockState()) { ResourceOwner currentOwner = t_thrd.utils_cxt.CurrentResourceOwner; @@ -8050,11 +8088,20 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam (void)MemoryContextSwitchTo(old); } + /* + * Record client connection establish time, which start on incommining resuest arrives e.g. poll() + * invoked to accept() and end on returning message by server side clientfd. + * One session records only once. + */ + u_sess->clientConnTime_cxt.checkOnlyInConnProcess = false; + /* * Check cache size to see if we need to AcceptInvalidationMessages. */ CleanSystemCaches(true); + CHECK_FOR_INTERRUPTS(); + /* * (2) Allow asynchronous signals to be executed immediately if they * come in while we are waiting for client input. (This must be @@ -8097,6 +8144,7 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam */ t_thrd.postgres_cxt.DoingCommandRead = false; + CHECK_FOR_INTERRUPTS(); /* * (5) check for any other interesting events that happened while we * slept. @@ -8105,9 +8153,10 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam // (6) process pooler reload before the next transaction begin. // - if (u_sess->sig_cxt.got_PoolReload && !IsTransactionOrTransactionBlock() && !IsConnFromGTMTool()) { + if (IsGotPoolReload() && + !IsTransactionOrTransactionBlock() && !IsConnFromGTMTool()) { processPoolerReload(); - u_sess->sig_cxt.got_PoolReload = false; + ResetGotPoolReload(false); } /* @@ -8135,10 +8184,12 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam u_sess->attr.attr_sql.explain_allow_multinode = false; /* Reset store procedure's session variables. */ - stp_reset_opt_values(); + stp_reset_stmt(); MemoryContext oldMemory = MemoryContextSwitchTo(THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_EXECUTOR)); +#ifdef ENABLE_LLVM_COMPILE CodeGenThreadInitialize(); +#endif (void)MemoryContextSwitchTo(oldMemory); u_sess->exec_cxt.single_shard_stmt = false; /* Set statement_timestamp */ @@ -8303,6 +8354,7 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam { const char* query_string = NULL; + pgstat_report_trace_id(&u_sess->trace_cxt, true); query_string = pq_getmsgstring(&input_message); if (query_string == NULL) { ereport(ERROR, (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), @@ -8545,7 +8597,7 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam u_sess->unique_sql_cxt.unique_sql_user_id = (Oid)pq_getmsgint(&input_message, sizeof(uint32)); u_sess->unique_sql_cxt.unique_sql_id = (uint64)pq_getmsgint64(&input_message); u_sess->slow_query_cxt.slow_query.unique_sql_id = u_sess->unique_sql_cxt.unique_sql_id; - pgstat_report_queryid(u_sess->debug_query_id); + pgstat_report_unique_sql_id(false); Oid procId = 0; uint64 queryId = 0; int64 stamp = 0; @@ -8704,6 +8756,7 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam pq_getmsgend(&input_message); statement_init_metric_context(); + instr_stmt_report_trace_id(u_sess->trace_cxt.trace_id); exec_parse_message(query_string, stmt_name, paramTypes, paramTypeNames, paramModes, numParams); statement_commit_metirc_context(); @@ -8730,6 +8783,7 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam * this message is complex enough that it seems best to put * the field extraction out-of-line */ + instr_stmt_report_trace_id(u_sess->trace_cxt.trace_id); exec_bind_message(&input_message); break; @@ -8738,6 +8792,7 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam const char* portal_name = NULL; int max_rows; + pgstat_report_trace_id(&u_sess->trace_cxt, true); if ((unsigned int)input_message.len > SECUREC_MEM_MAX_LEN) ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid execute message"))); @@ -9143,23 +9198,9 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam rc = memcpy_s(&is_check_xid, sizeof(bool), pq_getmsgbytes(&input_message, sizeof(bool)), sizeof(bool)); securec_check(rc, "\0", "\0"); - if (!is_check_xid) { - ereport(DEBUG1, (errmsg("Received new gxid %lu", gxid))); - /* CN function may use gxid from CN to create tmp lib name */ - if (!GTM_MODE) - t_thrd.xact_cxt.cn_xid = gxid; - else - SetNextTransactionId(gxid, false); - } else { - /* set the check nextxid */ - if (module_logging_is_on(MOD_TRANS_XACT)) - ereport(LOG, - (errmodule(MOD_TRANS_XACT), - errmsg( - "update check xid from %lu to %lu", t_thrd.xact_cxt.reserved_nextxid_check, gxid))); - - t_thrd.xact_cxt.reserved_nextxid_check = gxid; - } + ereport(DEBUG1, (errmsg("Received new gxid %lu", gxid))); + /* CN function may use gxid from CN to create tmp lib name */ + t_thrd.xact_cxt.cn_xid = gxid; pq_getmsgend(&input_message); } break; @@ -9196,56 +9237,6 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam /* quickly set my recent global xmin */ u_sess->utils_cxt.RecentGlobalXmin = GetOldestXmin(NULL, true); } - }else { /* gtm mode */ - /* Set the snapshot we were passed down */ - rc = memcpy_s( - &ss_need_sync_wait_all, sizeof(int), pq_getmsgbytes(&input_message, sizeof(bool)), sizeof(bool)); - securec_check(rc, "", ""); - - rc = memcpy_s(&xmin, - sizeof(TransactionId), - pq_getmsgbytes(&input_message, sizeof(TransactionId)), - sizeof(TransactionId)); - securec_check(rc, "", ""); - rc = memcpy_s(&xmax, - sizeof(TransactionId), - pq_getmsgbytes(&input_message, sizeof(TransactionId)), - sizeof(TransactionId)); - securec_check(rc, "", ""); - rc = memcpy_s(&recent_global_xmin, - sizeof(TransactionId), - pq_getmsgbytes(&input_message, sizeof(TransactionId)), - sizeof(TransactionId)); - securec_check(rc, "", ""); - rc = memcpy_s(&csn, sizeof(uint64), pq_getmsgbytes(&input_message, sizeof(uint64)), sizeof(uint64)); - securec_check(rc, "", ""); - rc = memcpy_s(&timeline, - sizeof(GTM_Timeline), - pq_getmsgbytes(&input_message, sizeof(GTM_Timeline)), - sizeof(GTM_Timeline)); - securec_check(rc, "", ""); - rc = memcpy_s(&cn_xc_maintain_mode, - sizeof(bool), - pq_getmsgbytes(&input_message, sizeof(bool)), - sizeof(bool)); - securec_check(rc, "", ""); - if (t_thrd.proc->workingVersionNum >= 92012) { - remote_gtm_mode = pq_getmsgbyte(&input_message); - } - pq_getmsgend(&input_message); - /* if message length is correct, set u_sess variables */ - u_sess->utils_cxt.RecentGlobalXmin = recent_global_xmin; - u_sess->utils_cxt.cn_xc_maintain_mode = cn_xc_maintain_mode; - - SetGlobalSnapshotData(xmin, xmax, csn, timeline, ss_need_sync_wait_all); - /* - * Fix u_sess->utils_cxt.RecentGlobalXmin using GetOldestXmin, consider local xmins. - * As we might prune or vacuum dead tuples deleted by xid older than u_sess->utils_cxt.RecentGlobalXmin. - * We should keep u_sess->utils_cxt.RecentGlobalXmin is the minnimum xmin. - * If u_sess->utils_cxt.RecentGlobalXmin is larger than local xmins, tuples being accessed might be - * cleaned. - */ - u_sess->utils_cxt.RecentGlobalXmin = GetOldestXmin(NULL, true); } /* check gtm mode, remote should be false, local cannot be true */ if (remote_gtm_mode != g_instance.attr.attr_storage.enable_gtm_free && @@ -9287,20 +9278,27 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam { int command; char* id = NULL; - command = pq_getmsgbyte(&input_message); id = (char*)pq_getmsgstring(&input_message); pq_getmsgend(&input_message); switch (command) { case CREATE_BARRIER_PREPARE: - ProcessCreateBarrierPrepare(id); + ProcessCreateBarrierPrepare(id, false); + break; + + case CREATE_SWITCHOVER_BARRIER_PREPARE: + ProcessCreateBarrierPrepare(id, true); break; case CREATE_BARRIER_END: ProcessCreateBarrierEnd(id); break; + case CREATE_BARRIER_COMMIT: + ProcessCreateBarrierCommit(id); + break; + case CREATE_BARRIER_EXECUTE: ProcessCreateBarrierExecute(id); break; @@ -9436,19 +9434,26 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam } break; #endif case 'n': /* commiting */ + { /* Get the csn passed down */ rc = memcpy_s(&csn, sizeof(uint64), pq_getmsgbytes(&input_message, sizeof(uint64)), sizeof(uint64)); securec_check(rc, "", ""); pq_getmsgend(&input_message); + int nchildren; + TransactionId *children = NULL; + TransactionId xid = GetTopTransactionIdIfAny(); + Assert(TransactionIdIsValid(xid)); + nchildren = xactGetCommittedChildren(&children); /* Set the commit csn to commit_in_progress */ SetXact2CommitInProgress(InvalidTransactionId, csn); + XLogInsertStandbyCSNCommitting(xid, csn, children, nchildren); /* Send back response */ pq_putemptymessage('m'); pq_flush(); break; - + } case 'N': /* commit csn */ /* Set the commit csn passed down */ rc = memcpy_s(&csn, sizeof(uint64), pq_getmsgbytes(&input_message, sizeof(uint64)), sizeof(uint64)); @@ -9521,6 +9526,12 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam (errcode(ERRCODE_SYSTEM_ERROR), errmsg("Need to set support_batch_bind=true if executing batch"))); + /* + * reset unique sql start time, otherwise fusionExecute will repeatly report + * elapsed time more. + */ + u_sess->unique_sql_cxt.unique_sql_start_time = 0; + pgstatCountSQL4SessionLevel(); statement_init_metric_context(); #ifdef USE_RETRY_STUB @@ -9609,7 +9620,37 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam flushSequenceMsg(); } break; + case 'J': + { + char* trace_id = NULL; + trace_id = (char*)pq_getmsgstring(&input_message); + pq_getmsgend(&input_message); + if (strlen(trace_id) > MAX_TRACE_ID_SIZE -1) { + trace_id[MAX_TRACE_ID_SIZE - 1] = '\0'; + ereport(WARNING, (errmsg("trace_id length cannot exceed %d", MAX_TRACE_ID_SIZE - 1))); + } + errno_t rc = + memcpy_s(u_sess->trace_cxt.trace_id, MAX_TRACE_ID_SIZE, trace_id, strlen(trace_id) + 1); + securec_check(rc, "\0", "\0"); + elog(DEBUG1, "trace_id:%s start", u_sess->trace_cxt.trace_id); + } + break; +#ifdef ENABLE_MULTIPLE_NODES + case 'T': + { + LWLockAcquire(XLogMaxCSNLock, LW_SHARED); + CommitSeqNo maxCSN = t_thrd.xact_cxt.ShmemVariableCache->xlogMaxCSN; + LWLockRelease(XLogMaxCSNLock); + StringInfoData buf; + pq_beginmessage(&buf, 'J'); + pq_sendint64(&buf, maxCSN); + pq_sendint8(&buf, t_thrd.postmaster_cxt.HaShmData->is_hadr_main_standby); + pq_endmessage(&buf); + pq_flush(); + } + break; +#endif default: ereport(FATAL, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid frontend message type %c", firstchar))); @@ -9801,6 +9842,11 @@ void log_disconnections(int code, Datum arg) } +void cleanGPCPlanProcExit(int code, Datum arg) +{ + GPCCleanUpSessionSavedPlan(); +} + /* Aduit user logout */ /* * Brief : audit_processlogout @@ -10847,7 +10893,7 @@ static void exec_batch_bind_execute(StringInfo input_message) MemoryContext oldContext; bool save_log_statement_stats = u_sess->attr.attr_common.log_statement_stats; bool snapshot_set = false; - char msec_str[32]; + char msec_str[PRINTF_DST_MAX]; int msg_type; /* D message */ @@ -11221,9 +11267,7 @@ static void exec_batch_bind_execute(StringInfo input_message) */ params->params[paramno].pflags = PARAM_FLAG_CONST; params->params[paramno].ptype = ptype; - params->params[paramno].tableOfIndexType = InvalidOid; - params->params[paramno].tableOfIndex = NULL; - params->params[paramno].isnestedtable = false; + params->params[paramno].tabInfo = NULL; /* Reset the compatible illegal chars import flag */ u_sess->mb_cxt.insertValuesBind_compatible_illegal_chars = false; @@ -11529,7 +11573,7 @@ static void exec_batch_bind_execute(StringInfo input_message) */ switch (check_log_duration(msec_str, false)) { case 1: - ereport(LOG, (errmsg("duration: %s ms, queryid %ld unique id %ld", msec_str, u_sess->debug_query_id, u_sess->slow_query_cxt.slow_query.unique_sql_id), errhidestmt(true))); + Assert(false); break; case 2: { char* mask_string = NULL; diff --git a/src/gausskernel/process/tcop/pquery.cpp b/src/gausskernel/process/tcop/pquery.cpp index f4e374829..685d941f4 100644 --- a/src/gausskernel/process/tcop/pquery.cpp +++ b/src/gausskernel/process/tcop/pquery.cpp @@ -1118,11 +1118,19 @@ bool PortalRun( PLpgSQL_compile_context* save_compile_context = u_sess->plsql_cxt.curr_compile_context; int save_compile_list_length = list_length(u_sess->plsql_cxt.compile_context_list); int save_compile_status = u_sess->plsql_cxt.compile_status; + int savePortalDepth = u_sess->plsql_cxt.portal_depth; + bool savedisAllowCommitRollback = false; + bool needResetErrMsg = false; PG_TRY(); { ActivePortal = portal; t_thrd.utils_cxt.CurrentResourceOwner = portal->resowner; + u_sess->plsql_cxt.portal_depth++; + if (u_sess->plsql_cxt.portal_depth > 1) { + /* commit rollback procedure not support in multi-layer portal called */ + needResetErrMsg = stp_disable_xact_and_set_err_msg(&savedisAllowCommitRollback, STP_XACT_TOO_MANY_PORTAL); + } t_thrd.mem_cxt.portal_mem_cxt = PortalGetHeapMemory(portal); MemoryContextSwitchTo(t_thrd.mem_cxt.portal_mem_cxt); @@ -1201,6 +1209,8 @@ bool PortalRun( { /* Uncaught error while executing portal: mark it dead */ MarkPortalFailed(portal); + u_sess->plsql_cxt.portal_depth = savePortalDepth; + stp_reset_xact_state_and_err_msg(savedisAllowCommitRollback, needResetErrMsg); /* Restore global vars and propagate error */ if (saveMemoryContext == saveTopTransactionContext || @@ -1218,23 +1228,6 @@ bool PortalRun( } t_thrd.mem_cxt.portal_mem_cxt = savePortalContext; - if (ENABLE_WORKLOAD_CONTROL) { - /* save error to history info */ - save_error_message(); - if (g_instance.wlm_cxt->dynamic_workload_inited) { - t_thrd.wlm_cxt.parctl_state.errjmp = 1; - if (t_thrd.wlm_cxt.parctl_state.simple == 0) - dywlm_client_release(&t_thrd.wlm_cxt.parctl_state); - else - WLMReleaseGroupActiveStatement(); - dywlm_client_max_release(&t_thrd.wlm_cxt.parctl_state); - } else - WLMParctlRelease(&t_thrd.wlm_cxt.parctl_state); - - if (IS_PGXC_COORDINATOR && t_thrd.wlm_cxt.collect_info->sdetail.msg) { - pfree_ext(t_thrd.wlm_cxt.collect_info->sdetail.msg); - } - } ereport(DEBUG3, (errmodule(MOD_NEST_COMPILE), errcode(ERRCODE_LOG), errmsg("%s clear curr_compile_context because of error.", __func__))); /* reset nest plpgsql compile */ @@ -1246,6 +1239,9 @@ bool PortalRun( } PG_END_TRY(); + u_sess->plsql_cxt.portal_depth = savePortalDepth; + stp_reset_xact_state_and_err_msg(savedisAllowCommitRollback, needResetErrMsg); + if (ENABLE_WORKLOAD_CONTROL) { t_thrd.wlm_cxt.parctl_state.except = 0; diff --git a/src/gausskernel/process/tcop/stmt_retry.cpp b/src/gausskernel/process/tcop/stmt_retry.cpp index 3e0279a61..25c3557b6 100644 --- a/src/gausskernel/process/tcop/stmt_retry.cpp +++ b/src/gausskernel/process/tcop/stmt_retry.cpp @@ -260,7 +260,7 @@ bool IsStmtRetryCapable(StatementRetryController* controller, bool is_extend_que } if (!bret) { - char* ecode_str = plpgsql_get_sqlstate(sqlerrcode); + const char* ecode_str = plpgsql_get_sqlstate(sqlerrcode); bool valid_ecode = IsStmtNeedRetryByErrCode(ecode_str, elevel); bool is_transblock = IsInTransactionChain(true); bool is_unsupported_query = controller->is_unsupported_query_type; @@ -792,7 +792,7 @@ void StatementRetryStub::ECodeStubTest(void) on_stub_test = true; int ecode = ecode_marker[ecode_pos++]; - char* ecode_str = plpgsql_get_sqlstate(ecode); + const char* ecode_str = plpgsql_get_sqlstate(ecode); ereport(ERROR, (errmodule(MOD_CN_RETRY), errcode(ecode), @@ -811,7 +811,7 @@ void StatementRetryStub::ECodeValidate(void) int sqlerrcode; getElevelAndSqlstate(&elevel, &sqlerrcode); - char* ecode_str = plpgsql_get_sqlstate(sqlerrcode); + const char* ecode_str = plpgsql_get_sqlstate(sqlerrcode); if (IsStmtNeedRetryByErrCode(sqlerrcode, elevel)) { ereport(LOG, (errmodule(MOD_CN_RETRY), errmsg("%s catch error pass %s", STUB_PRINT_PREFIX, ecode_str))); } else { diff --git a/src/gausskernel/process/tcop/utility.cpp b/src/gausskernel/process/tcop/utility.cpp index c312f8c36..328dca910 100755 --- a/src/gausskernel/process/tcop/utility.cpp +++ b/src/gausskernel/process/tcop/utility.cpp @@ -36,6 +36,7 @@ #include "catalog/pg_streaming_fn.h" #include "catalog/toasting.h" #include "catalog/cstore_ctlg.h" +#include "catalog/gs_db_privilege.h" #include "catalog/gs_global_config.h" #include "catalog/gs_matview_dependency.h" #include "catalog/gs_matview.h" @@ -186,7 +187,6 @@ static void attatch_global_info(char** query_string_with_info, VacuumStmt* stmt, AnalyzeMode e_analyze_mode, Oid rel_id, char* foreign_tbl_schedul_message = NULL); static char* get_hybrid_message(ForeignTableDesc* table_desc, VacuumStmt* stmt, char* foreign_tbl_schedul_message); static bool need_full_dn_execution(const char* group_name); -int SetGTMVacuumFlag(GTM_TransactionKey txn_key, bool is_vacuum); extern void check_log_ft_definition(CreateForeignTableStmt* stmt); extern void ts_check_feature_disable(); @@ -245,6 +245,12 @@ bool IsSchemaInDistribution(const Oid namespaceOid) return result; } +static bool foundPgstatPartititonOperations(AlterTableType subtype) +{ + return subtype == AT_TruncatePartition || subtype == AT_ExchangePartition || subtype == AT_DropPartition || + subtype == AT_DropSubPartition; +} + /* ---------------------------------------------------------------- * report_utility_time * @@ -281,10 +287,7 @@ static void report_utility_time(void* parse_tree) AlterTableCmd* cmd = NULL; foreach (lc, ats->cmds) { cmd = (AlterTableCmd*)lfirst(lc); - if (cmd->subtype == AT_TruncatePartition || cmd->subtype == AT_ExchangePartition || - cmd->subtype == AT_DropPartition) { - found = true; - } + found = foundPgstatPartititonOperations(cmd->subtype); } if (found == false) { @@ -449,6 +452,7 @@ static void check_xact_readonly(Node* parse_tree) case T_DropRoleStmt: case T_GrantStmt: case T_GrantRoleStmt: + case T_GrantDbStmt: case T_AlterDefaultPrivilegesStmt: case T_TruncateStmt: case T_DropOwnedStmt: @@ -471,6 +475,8 @@ static void check_xact_readonly(Node* parse_tree) case T_CreateResourcePoolStmt: case T_AlterResourcePoolStmt: case T_DropResourcePoolStmt: + case T_AlterGlobalConfigStmt: + case T_DropGlobalConfigStmt: case T_CreatePolicyLabelStmt: case T_AlterPolicyLabelStmt: case T_DropPolicyLabelStmt: @@ -2479,7 +2485,7 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi Oid node_oid = get_pgxc_nodeoid(g_instance.attr.attr_common.PGXCNodeName); bool nodeis_active = true; nodeis_active = is_pgxc_nodeactive(node_oid); - if (OidIsValid(node_oid) && nodeis_active == false && !IS_CNDISASTER_RECOVER_MODE) + if (OidIsValid(node_oid) && nodeis_active == false && !IS_CN_OBS_DISASTER_RECOVER_MODE) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Current Node is not active"))); } } @@ -2519,20 +2525,6 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi u_sess->debug_query_id = generate_unique_id64(>_queryId); pgstat_report_queryid(u_sess->debug_query_id); } - - /* check the commit cmd */ - if (GTM_MODE && (IS_PGXC_DATANODE || IsConnFromCoord())) { - TransactionId CurrentTopXid = GetTopTransactionIdIfAny(); - if (TransactionIdIsValid(CurrentTopXid) && - TransactionIdIsValid(t_thrd.xact_cxt.reserved_nextxid_check) && - t_thrd.xact_cxt.reserved_nextxid_check != CurrentTopXid) { - ereport(PANIC, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("commit xid %lu is not equal to the excute one %lu.", - CurrentTopXid, - t_thrd.xact_cxt.reserved_nextxid_check)))); - } - } /* only check write nodes csn valid */ if (TransactionIdIsValid(GetTopTransactionIdIfAny())) { CheckProcCsnValid(); @@ -2649,22 +2641,6 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi break; case TRANS_STMT_ROLLBACK: - /* check the abort cmd */ - if (GTM_MODE && (IS_PGXC_DATANODE || IsConnFromCoord())) { - TransactionId CurrentTopXid = GetTopTransactionIdIfAny(); - - if (TransactionIdIsValid(CurrentTopXid) && - TransactionIdIsValid(t_thrd.xact_cxt.reserved_nextxid_check) && - t_thrd.xact_cxt.reserved_nextxid_check != CurrentTopXid) { - /* level ERROR will cause ERRDATA stack overflow */ - ereport(PANIC, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("abort xid %lu is not equal to the former one %lu.", - CurrentTopXid, - t_thrd.xact_cxt.reserved_nextxid_check)))); - } - } - UserAbortTransactionBlock(); FreeSavepointList(); break; @@ -4333,6 +4309,30 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi #endif break; + case T_GrantDbStmt: +#ifdef ENABLE_MULTIPLE_NODES + if (IS_PGXC_COORDINATOR) { + char* first_exec_node = find_first_exec_cn(); + bool is_first_node = (strcmp(first_exec_node, g_instance.attr.attr_common.PGXCNodeName) == 0); + + if (u_sess->attr.attr_sql.enable_parallel_ddl && !is_first_node) { + ExecUtilityStmtOnNodes_ParallelDDLMode( + query_string, NULL, sent_to_remote, false, EXEC_ON_COORDS, false, first_exec_node); + ExecuteGrantDbStmt((GrantDbStmt*)parse_tree); + ExecUtilityStmtOnNodes_ParallelDDLMode( + query_string, NULL, sent_to_remote, false, EXEC_ON_DATANODES, false, first_exec_node); + } else { + ExecuteGrantDbStmt((GrantDbStmt*)parse_tree); + ExecUtilityStmtOnNodes(query_string, NULL, sent_to_remote, false, EXEC_ON_ALL_NODES, false); + } + } else { + ExecuteGrantDbStmt((GrantDbStmt*)parse_tree); + } +#else + ExecuteGrantDbStmt((GrantDbStmt*)parse_tree); +#endif + break; + case T_AlterDefaultPrivilegesStmt: #ifdef PGXC if (IS_PGXC_COORDINATOR) { @@ -4650,7 +4650,18 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi case T_CreateFunctionStmt: /* CREATE FUNCTION */ { - CreateFunction((CreateFunctionStmt*)parse_tree, query_string, InvalidOid); + PG_TRY(); + { + CreateFunction((CreateFunctionStmt*)parse_tree, query_string, InvalidOid); + } + PG_CATCH(); + { + if (u_sess->plsql_cxt.debug_query_string) { + pfree_ext(u_sess->plsql_cxt.debug_query_string); + } + PG_RE_THROW(); + } + PG_END_TRY(); #ifdef PGXC Oid group_oid; bool multi_group = false; @@ -4688,7 +4699,18 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi ereport(ERROR, (errcode(ERRCODE_INVALID_PACKAGE_DEFINITION), errmsg("not support create package in distributed database"))); #endif - CreatePackageCommand((CreatePackageStmt*)parse_tree, query_string); + PG_TRY(); + { + CreatePackageCommand((CreatePackageStmt*)parse_tree, query_string); + } + PG_CATCH(); + { + if (u_sess->plsql_cxt.debug_query_string) { + pfree_ext(u_sess->plsql_cxt.debug_query_string); + } + PG_RE_THROW(); + } + PG_END_TRY(); } break; case T_CreatePackageBodyStmt: /* CREATE PACKAGE SPECIFICATION*/ { @@ -4696,7 +4718,18 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi ereport(ERROR, (errcode(ERRCODE_INVALID_PACKAGE_DEFINITION), errmsg("not support create package in distributed database"))); #endif - CreatePackageBodyCommand((CreatePackageBodyStmt*)parse_tree, query_string); + PG_TRY(); + { + CreatePackageBodyCommand((CreatePackageBodyStmt*)parse_tree, query_string); + } + PG_CATCH(); + { + if (u_sess->plsql_cxt.debug_query_string) { + pfree_ext(u_sess->plsql_cxt.debug_query_string); + } + PG_RE_THROW(); + } + PG_END_TRY(); } break; @@ -4772,14 +4805,13 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi } #ifdef ENABLE_MULTIPLE_NODES - if (stmt->concurrent) { + if (stmt->concurrent && t_thrd.proc->workingVersionNum < CREATE_INDEX_CONCURRENTLY_DIST_VERSION_NUM) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("PGXC does not support concurrent INDEX yet"), errdetail("The feature is not currently supported"))); } #endif - /* INDEX on a temporary table cannot use 2PC at commit */ rel_id = RangeVarGetRelidExtended(stmt->relation, AccessShareLock, true, false, false, true, NULL, NULL); @@ -4858,15 +4890,7 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi g_instance.attr.attr_common.allowSystemTableMods = true; } #endif -#ifdef ENABLE_MULTIPLE_NODES - DefineIndex(rel_id, - stmt, - InvalidOid, /* no predefined OID */ - false, /* is_alter_table */ - true, /* check_rights */ - !u_sess->upg_cxt.new_catalog_need_storage, /* skip_build */ - false); /* quiet */ -#else + Oid indexRelOid = DefineIndex(rel_id, stmt, InvalidOid, /* no predefined OID */ @@ -4874,6 +4898,8 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi true, /* check_rights */ !u_sess->upg_cxt.new_catalog_need_storage, /* skip_build */ false); /* quiet */ + +#ifndef ENABLE_MULTIPLE_NODES if (RelationIsCUFormatByOid(rel_id) && (stmt->primary || stmt->unique)) { DefineDeltaUniqueIndex(rel_id, stmt, indexRelOid); } @@ -4889,6 +4915,15 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi } else { ExecUtilityStmtOnNodes(query_string, exec_nodes, sent_to_remote, stmt->concurrent, exec_type, is_temp); } +#ifdef ENABLE_MULTIPLE_NODES + /* Force non-concurrent build on temporary relations, even if CONCURRENTLY was requested */ + char relPersistence = get_rel_persistence(rel_id); + if (stmt->concurrent && + !(relPersistence == RELPERSISTENCE_TEMP || relPersistence == RELPERSISTENCE_GLOBAL_TEMP)) { + /* for index if caller didn't specify, use oid get indexname. */ + mark_indisvalid_all_cns(stmt->relation->schemaname, get_rel_name(indexRelOid)); + } +#endif } FreeExecNodes(&exec_nodes); #endif @@ -4916,9 +4951,15 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi break; case T_CreateSeqStmt: + { #ifdef PGXC + CreateSeqStmt* stmt = (CreateSeqStmt*)parse_tree; + /* + * We must not scribble on the passed-in CreateSeqStmt, so copy it. (This is + * overkill, but easy.) + */ + stmt = (CreateSeqStmt*)copyObject(stmt); if (IS_PGXC_COORDINATOR) { - CreateSeqStmt* stmt = (CreateSeqStmt*)parse_tree; ExecNodes* exec_nodes = NULL; char* query_stringWithUUID = gen_hybirdmsg_for_CreateSeqStmt(stmt, query_string); @@ -4942,7 +4983,7 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi } } - DefineSequenceWrapper((CreateSeqStmt*)parse_tree); + DefineSequenceWrapper(stmt); if (u_sess->attr.attr_sql.enable_parallel_ddl && !is_first_node) { /* In case this query is related to a SERIAL execution, just bypass */ @@ -4972,7 +5013,7 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi if (IS_MAIN_COORDINATOR && exec_nodes != NULL && exec_nodes->nodeList->length < u_sess->pgxc_cxt.NumDataNodes) { /* NodeGroup: Create sequence in other datanodes without owned by */ - char* msg = deparse_create_sequence((Node*)parse_tree, true); + char* msg = deparse_create_sequence((Node*)stmt, true); exec_remote_query_4_seq(exec_nodes, msg, stmt->uuid); pfree_ext(msg); } @@ -4980,15 +5021,15 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi pfree_ext(query_stringWithUUID); FreeExecNodes(&exec_nodes); } else { - DefineSequenceWrapper((CreateSeqStmt*)parse_tree); + DefineSequenceWrapper(stmt); } #else - DefineSequenceWrapper((CreateSeqStmt*)parse_tree); + DefineSequenceWrapper(stmt); #endif - ClearCreateSeqStmtUUID((CreateSeqStmt*)parse_tree); + ClearCreateSeqStmtUUID(stmt); break; - + } case T_AlterSeqStmt: #ifdef PGXC if (IS_MAIN_COORDINATOR || IS_SINGLE_NODE) { @@ -5916,7 +5957,8 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi case T_CheckPointStmt: if (!(superuser() || isOperatoradmin(GetUserId()))) ereport( - ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be system admin to do CHECKPOINT"))); + ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be system admin or operator admin to do CHECKPOINT"))); /* * You might think we should have a PreventCommandDuringRecovery() * here, but we interpret a CHECKPOINT command during recovery as @@ -6484,6 +6526,52 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi } break; + case T_AlterGlobalConfigStmt: +#ifdef ENABLE_MULTIPLE_NODES + if (IS_PGXC_COORDINATOR) { + char* first_exec_node = find_first_exec_cn(); + bool is_first_node = (strcmp(first_exec_node, g_instance.attr.attr_common.PGXCNodeName) == 0); + if (u_sess->attr.attr_sql.enable_parallel_ddl && !is_first_node) { + ExecUtilityStmtOnNodes_ParallelDDLMode( + query_string, NULL, sent_to_remote, false, EXEC_ON_COORDS, false, first_exec_node); + AlterGlobalConfig((AlterGlobalConfigStmt*)parse_tree); + ExecUtilityStmtOnNodes_ParallelDDLMode( + query_string, NULL, sent_to_remote, false, EXEC_ON_DATANODES, false, first_exec_node); + } else { + AlterGlobalConfig((AlterGlobalConfigStmt*)parse_tree); + ExecUtilityStmtOnNodes(query_string, NULL, sent_to_remote, false, EXEC_ON_ALL_NODES, false); + } + } else { + AlterGlobalConfig((AlterGlobalConfigStmt*)parse_tree); + } +#else + AlterGlobalConfig((AlterGlobalConfigStmt*)parse_tree); +#endif + break; + + case T_DropGlobalConfigStmt: +#ifdef ENABLE_MULTIPLE_NODES + if (IS_PGXC_COORDINATOR) { + char* first_exec_node = find_first_exec_cn(); + bool is_first_node = (strcmp(first_exec_node, g_instance.attr.attr_common.PGXCNodeName) == 0); + if (u_sess->attr.attr_sql.enable_parallel_ddl && !is_first_node) { + ExecUtilityStmtOnNodes_ParallelDDLMode( + query_string, NULL, sent_to_remote, false, EXEC_ON_COORDS, false, first_exec_node); + DropGlobalConfig((DropGlobalConfigStmt*)parse_tree); + ExecUtilityStmtOnNodes_ParallelDDLMode( + query_string, NULL, sent_to_remote, false, EXEC_ON_DATANODES, false, first_exec_node); + } else { + DropGlobalConfig((DropGlobalConfigStmt*)parse_tree); + ExecUtilityStmtOnNodes(query_string, NULL, sent_to_remote, false, EXEC_ON_ALL_NODES, false); + } + } else { + DropGlobalConfig((DropGlobalConfigStmt*)parse_tree); + } +#else + DropGlobalConfig((DropGlobalConfigStmt*)parse_tree); +#endif + break; + case T_CreateWorkloadGroupStmt: #ifndef ENABLE_MULTIPLE_NODES DISTRIBUTED_FEATURE_NOT_SUPPORTED(); @@ -6896,7 +6984,7 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi } case T_CreatePublicationStmt: -#ifdef ENABLE_MULTIPLE_NODES +#if defined(ENABLE_MULTIPLE_NODES) || defined(ENABLE_LITE_MODE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("openGauss does not support PUBLICATION yet"), @@ -6905,7 +6993,7 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi CreatePublication((CreatePublicationStmt *) parse_tree); break; case T_AlterPublicationStmt: -#ifdef ENABLE_MULTIPLE_NODES +#if defined(ENABLE_MULTIPLE_NODES) || defined(ENABLE_LITE_MODE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("openGauss does not support PUBLICATION yet"), @@ -6914,7 +7002,7 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi AlterPublication((AlterPublicationStmt *) parse_tree); break; case T_CreateSubscriptionStmt: -#ifdef ENABLE_MULTIPLE_NODES +#if defined(ENABLE_MULTIPLE_NODES) || defined(ENABLE_LITE_MODE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("openGauss does not support SUBSCRIPTION yet"), @@ -6923,7 +7011,7 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi CreateSubscription((CreateSubscriptionStmt *) parse_tree, is_top_level); break; case T_AlterSubscriptionStmt: -#ifdef ENABLE_MULTIPLE_NODES +#if defined(ENABLE_MULTIPLE_NODES) || defined(ENABLE_LITE_MODE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("openGauss does not support SUBSCRIPTION yet"), @@ -6932,7 +7020,7 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi AlterSubscription((AlterSubscriptionStmt *) parse_tree); break; case T_DropSubscriptionStmt: -#ifdef ENABLE_MULTIPLE_NODES +#if defined(ENABLE_MULTIPLE_NODES) || defined(ENABLE_LITE_MODE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("openGauss does not support SUBSCRIPTION yet"), @@ -7662,6 +7750,9 @@ static const char* AlterObjectTypeCommandTag(ObjectType obj_type) case OBJECT_FUNCTION: tag = "ALTER FUNCTION"; break; + case OBJECT_PACKAGE: + tag = "ALTER PACKAGE"; + break; case OBJECT_INDEX: tag = "ALTER INDEX"; break; @@ -8212,6 +8303,12 @@ const char* CreateCommandTag(Node* parse_tree) tag = (stmt->is_grant) ? "GRANT ROLE" : "REVOKE ROLE"; } break; + case T_GrantDbStmt: { + GrantDbStmt* stmt = (GrantDbStmt*)parse_tree; + + tag = (stmt->is_grant) ? "GRANT" : "REVOKE"; + } break; + case T_AlterDefaultPrivilegesStmt: tag = "ALTER DEFAULT PRIVILEGES"; break; @@ -8571,6 +8668,14 @@ const char* CreateCommandTag(Node* parse_tree) tag = "DROP RESOURCE POOL"; break; + case T_AlterGlobalConfigStmt: + tag = "ALTER GLOBAL CONFIGURATION"; + break; + + case T_DropGlobalConfigStmt: + tag = "Drop GLOBAL CONFIGURATION"; + break; + case T_CreateWorkloadGroupStmt: tag = "CREATE WORKLOAD GROUP"; break; @@ -8837,6 +8942,9 @@ const char* CreateAlterTableCommandTag(const AlterTableType subtype) case AT_AddPartition: tag = "ADD PARTITION"; break; + case AT_AddSubPartition: + tag = "MODIFY PARTITION ADD SUBPARTITION"; + break; case AT_ColumnDefault: tag = "COLUMN DEFAULT"; break; @@ -8867,6 +8975,9 @@ const char* CreateAlterTableCommandTag(const AlterTableType subtype) case AT_DropPartition: tag = "DROP PARTITION"; break; + case AT_DropSubPartition: + tag = "DROP SUBPARTITION"; + break; case AT_AddIndex: tag = "ADD INDEX"; break; @@ -9235,6 +9346,10 @@ LogStmtLevel GetCommandLogLevel(Node* parse_tree) lev = LOGSTMT_DDL; break; + case T_GrantDbStmt: + lev = LOGSTMT_DDL; + break; + case T_AlterDefaultPrivilegesStmt: lev = LOGSTMT_DDL; break; @@ -9536,6 +9651,8 @@ LogStmtLevel GetCommandLogLevel(Node* parse_tree) case T_CreateResourcePoolStmt: case T_AlterResourcePoolStmt: case T_DropResourcePoolStmt: + case T_AlterGlobalConfigStmt: + case T_DropGlobalConfigStmt: case T_CreateWorkloadGroupStmt: case T_AlterWorkloadGroupStmt: case T_DropWorkloadGroupStmt: @@ -10753,10 +10870,6 @@ static void cn_do_vacuum_mpp_table(VacuumStmt* stmt, const char* query_string, b // Step 0: Notify gtm if it is a vacuum is_vacuum = (stmt->options & VACOPT_VACUUM) && (!(stmt->options & VACOPT_FULL)); - if (is_vacuum && GTM_MODE) { - if (SetGTMVacuumFlag(GetCurrentTransactionKeyIfAny(), is_vacuum)) - ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("GTM error, could not set vacuum flag"))); - } // Step 1: Execute query_string on all datanode if (stmt->relation == NULL || (!ISMATMAP(stmt->relation->relname) && !ISMLOG(stmt->relation->relname))) { @@ -11929,7 +12042,7 @@ void DoVacuumMppTable(VacuumStmt* stmt, const char* query_string, bool is_top_le bool isSegmentTable = targRel->storage_type == SEGMENT_PAGE; heap_close(targRel, AccessShareLock); if (isSegmentTable) { - ereport(LOG, (errmsg("skipping segment table \"%s\" --- please use gs_space_shrink " + ereport(INFO, (errmsg("skipping segment table \"%s\" --- please use gs_space_shrink " "to recycle segment space.", stmt->relation->relname))); return; } diff --git a/src/gausskernel/process/threadpool/knl_instance.cpp b/src/gausskernel/process/threadpool/knl_instance.cpp index 3abe4e584..89a6995d3 100755 --- a/src/gausskernel/process/threadpool/knl_instance.cpp +++ b/src/gausskernel/process/threadpool/knl_instance.cpp @@ -55,9 +55,8 @@ const int SIZE_OF_TWO_UINT64 = 16; knl_instance_context g_instance; -#define ALLOCSET_UNDO_MAXSIZE (300 * g_instance.attr.attr_storage.undo_zone_count) +const int ALLOCSET_UNDO_MAXSIZE = 300 * UNDO_ZONE_COUNT; -extern void InitGlobalSeq(); extern void InitGlobalVecFuncMap(); static void knl_g_cost_init(knl_g_cost_context* cost_cxt) @@ -141,6 +140,9 @@ static void knl_g_wal_init(knl_g_wal_context *const wal_cxt) wal_cxt->lastLRCFlushed = WAL_SCANNED_LRC_INIT; wal_cxt->num_locks_in_group = 0; wal_cxt->upgradeSwitchMode = NoDemote; + wal_cxt->totalXlogIterBytes = 0; + wal_cxt->totalXlogIterTimes = 0; + wal_cxt->xlogFlushStats = NULL; } static void knl_g_bgwriter_init(knl_g_bgwriter_context *bgwriter_cxt) @@ -153,6 +155,23 @@ static void knl_g_bgwriter_init(knl_g_bgwriter_context *bgwriter_cxt) bgwriter_cxt->rel_one_fork_hashtbl_lock = NULL; } +static void knl_g_repair_init(knl_g_repair_context *repair_cxt) +{ + Assert(repair_cxt != NULL); + repair_cxt->page_repair_hashtbl = NULL; + repair_cxt->page_repair_hashtbl_lock = NULL; + repair_cxt->repair_proc_latch = NULL; +} + +static void knl_g_startup_init(knl_g_startup_context *starup_cxt) +{ + Assert(starup_cxt != NULL); + starup_cxt->remoteReadPageNum = 0; + starup_cxt->badPageHashTbl = NULL; + starup_cxt->current_record = NULL; +} + + static void knl_g_tests_init(knl_g_tests_context* tests_cxt) { Assert(tests_cxt != NULL); @@ -230,7 +249,7 @@ static void knl_g_parallel_redo_init(knl_g_parallel_redo_context* predo_cxt) predo_cxt->redoPf.recovery_done_ptr = 0; predo_cxt->redoPf.speed_according_seg = 0; predo_cxt->redoPf.local_max_lsn = 0; - predo_cxt->redoPf.oldest_segment = 0; + predo_cxt->redoPf.oldest_segment = 1; knl_g_set_redo_finish_status(0); predo_cxt->redoType = DEFAULT_REDO; predo_cxt->pre_enable_switch = 0; @@ -239,6 +258,7 @@ static void knl_g_parallel_redo_init(knl_g_parallel_redo_context* predo_cxt) pg_atomic_write_u64(&(predo_cxt->max_page_flush_lsn[i]), 0); } predo_cxt->permitFinishRedo = 0; + predo_cxt->last_replayed_conflict_csn = 0; predo_cxt->hotStdby = 0; predo_cxt->newestCheckpointLoc = InvalidXLogRecPtr; errno_t rc = memset_s(const_cast(&predo_cxt->newestCheckpoint), sizeof(CheckPoint), 0, @@ -247,6 +267,25 @@ static void knl_g_parallel_redo_init(knl_g_parallel_redo_context* predo_cxt) predo_cxt->unali_buf = (char*)MemoryContextAllocZero(INSTANCE_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE), NUM_MAX_PAGE_FLUSH_LSN_PARTITIONS * BLCKSZ + BLCKSZ); predo_cxt->ali_buf = (char*)TYPEALIGN(BLCKSZ, predo_cxt->unali_buf); + + rc = memset_s(&predo_cxt->redoCpuBindcontrl, sizeof(RedoCpuBindControl), 0, sizeof(RedoCpuBindControl)); + securec_check(rc, "", ""); +} + +static void knl_g_parallel_decode_init(knl_g_parallel_decode_context* pdecode_cxt) +{ + Assert(pdecode_cxt != NULL); + pdecode_cxt->state = DECODE_INIT; + pdecode_cxt->parallelDecodeCtx = NULL; + pdecode_cxt->ParallelReaderWorkerStatus.threadId = 0; + pdecode_cxt->ParallelReaderWorkerStatus.threadState = PARALLEL_DECODE_WORKER_INVALID; + for (int i = 0; i < MAX_PARALLEL_DECODE_NUM; ++i) { + pdecode_cxt->ParallelDecodeWorkerStatusList[i].threadId = 0; + pdecode_cxt->ParallelDecodeWorkerStatusList[i].threadState = PARALLEL_DECODE_WORKER_INVALID; + } + pdecode_cxt->totalNum = 0; + SpinLockInit(&(pdecode_cxt->rwlock)); + SpinLockInit(&(pdecode_cxt->destroy_lock)); } static void knl_g_cache_init(knl_g_cache_context* cache_cxt) @@ -277,9 +316,18 @@ void knl_g_cachemem_create() DEFAULT_MEMORY_CONTEXT_MAX_SIZE, false); } + for (int i = 0; i < MAX_GLOBAL_PRC_NUM; ++i) { + g_instance.cache_cxt.global_prc_mem[i] = AllocSetContextCreate(g_instance.instance_context, + "GlobalPackageRuntimeCacheMemory", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE, + SHARED_CONTEXT, + DEFAULT_MEMORY_CONTEXT_MAX_SIZE, + false); + } g_instance.plan_cache = New(INSTANCE_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_EXECUTOR)) GlobalPlanCache(); - g_instance.global_session_pkg = - New(INSTANCE_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_EXECUTOR)) PLGlobalPackageRuntimeCache(); + g_instance.global_session_pkg = PLGlobalPackageRuntimeCache::Instance(); } static void knl_g_comm_init(knl_g_comm_context* comm_cxt) { @@ -300,7 +348,10 @@ static void knl_g_comm_init(knl_g_comm_context* comm_cxt) comm_cxt->force_cal_space_info = false; comm_cxt->cal_all_space_info_in_progress = false; comm_cxt->current_gsrewind_count = 0; + comm_cxt->isNeedChangeRole = false; comm_cxt->usedDnSpace = NULL; + comm_cxt->request_disaster_cluster = true; + comm_cxt->lastArchiveRcvTime = 0; #ifdef USE_SSL comm_cxt->libcomm_data_port_list = NULL; @@ -316,12 +367,16 @@ static void knl_g_comm_init(knl_g_comm_context* comm_cxt) knl_g_mctcp_init(&g_instance.comm_cxt.mctcp_cxt); knl_g_commutil_init(&g_instance.comm_cxt.commutil_cxt); knl_g_parallel_redo_init(&g_instance.comm_cxt.predo_cxt); + for (int i = 0; i < g_instance.attr.attr_storage.max_replication_slots; ++i) { + knl_g_parallel_decode_init(&g_instance.comm_cxt.pdecode_cxt[i]); + } } static void knl_g_conn_init(knl_g_conn_context* conn_cxt) { conn_cxt->CurConnCount = 0; conn_cxt->CurCMAConnCount = 0; + conn_cxt->CurCMAProcCount = 0; SpinLockInit(&conn_cxt->ConnCountLock); } @@ -334,6 +389,11 @@ static void knl_g_executor_init(knl_g_executor_context* exec_cxt) #endif } +static void knl_g_rto_init(knl_g_rto_context *rto_cxt) +{ + rto_cxt->rto_standby_data = NULL; +} + static void knl_g_xlog_init(knl_g_xlog_context *xlog_cxt) { xlog_cxt->num_locks_in_group = 0; @@ -367,12 +427,18 @@ static void KnlGUndoInit(knl_g_undo_context *undoCxt) -1, (g_instance.undo_cxt.uZoneBitmap[i])->nwords * sizeof(bitmapword)); } MemoryContextSwitchTo(oldContext); - for (auto i = 0; i < UNDO_ZONE_COUNT; ++i) { - undoCxt->uZones[i] = NULL; - } undoCxt->undoTotalSize = 0; undoCxt->undoMetaSize = 0; undoCxt->uZoneCount = 0; + undoCxt->maxChainSize = 0; + undoCxt->undoChainTotalSize = 0; + undoCxt->oldestFrozenXid = InvalidTransactionId; + undoCxt->oldestXidInUndo = InvalidTransactionId; +} + +static void knl_g_flashback_init(knl_g_flashback_context *flashbackCxt) +{ + flashbackCxt->oldestXminInFlashback = InvalidTransactionId; } static void knl_g_libpq_init(knl_g_libpq_context* libpq_cxt) @@ -470,6 +536,7 @@ static void knl_g_pid_init(knl_g_pid_context* pid_cxt) rc = memset_s(pid_cxt, sizeof(knl_g_pid_context), 0, sizeof(knl_g_pid_context)); pid_cxt->PageWriterPID = NULL; pid_cxt->CommReceiverPIDS = NULL; + pid_cxt->PgAuditPID = NULL; securec_check(rc, "\0", "\0"); } @@ -528,6 +595,10 @@ static void knl_g_dw_init(knl_g_dw_context *dw_cxt) errno_t rc = memset_s(dw_cxt, sizeof(knl_g_dw_context), 0, sizeof(knl_g_dw_context)); securec_check(rc, "\0", "\0"); dw_cxt->closed = 1; + + dw_cxt->old_batch_version = false; + dw_cxt->recovery_dw_file_num = 0; + dw_cxt->recovery_dw_file_size = 0; } static void knl_g_numa_init(knl_g_numa_context* numa_cxt) @@ -550,6 +621,7 @@ static void knl_g_archive_obs_init(knl_g_archive_context *archive_cxt) archive_cxt->in_switchover = false; archive_cxt->in_service_truncate = false; archive_cxt->slot_tline = 0; + archive_cxt->chosen_walsender_index = -1; SpinLockInit(&archive_cxt->barrier_lock); } @@ -610,6 +682,11 @@ static void knl_g_spi_plan_init(knl_g_spi_plan_context* spi_plan_cxt) static void knl_g_roach_init(knl_g_roach_context* roach_cxt) { roach_cxt->isRoachRestore = false; + roach_cxt->targetTimeInPITR = NULL; + roach_cxt->globalBarrierRecordForPITR = NULL; + roach_cxt->isXLogForceRecycled = false; + roach_cxt->isGtmFreeCsn = false; + roach_cxt->targetRestoreTimeFromMedia = NULL; } static void knl_g_streaming_dr_init(knl_g_streaming_dr_context* streaming_dr_cxt) @@ -618,6 +695,48 @@ static void knl_g_streaming_dr_init(knl_g_streaming_dr_context* streaming_dr_cxt streaming_dr_cxt->isInSwitchover = false; streaming_dr_cxt->isInteractionCompleted = false; streaming_dr_cxt->switchoverBarrierLsn = InvalidXLogRecPtr; + streaming_dr_cxt->rpoSleepTime = 0; + streaming_dr_cxt->rpoBalanceSleepTime = 0; + errno_t rc = memset_s(streaming_dr_cxt->currentBarrierId, MAX_BARRIER_ID_LENGTH, 0, + sizeof(streaming_dr_cxt->currentBarrierId)); + securec_check(rc, "\0", "\0"); + rc = memset_s(streaming_dr_cxt->targetBarrierId, MAX_BARRIER_ID_LENGTH, 0, + sizeof(streaming_dr_cxt->targetBarrierId)); + securec_check(rc, "\0", "\0"); + SpinLockInit(&streaming_dr_cxt->mutex); +} + +static void knl_g_csn_barrier_init(knl_g_csn_barrier_context* csn_barrier_cxt) +{ + csn_barrier_cxt->barrier_hash_table = NULL; + csn_barrier_cxt->barrier_hashtbl_lock = NULL; + csn_barrier_cxt->barrier_context = NULL; + errno_t rc = memset_s(csn_barrier_cxt->stopBarrierId, MAX_BARRIER_ID_LENGTH, 0, + sizeof(csn_barrier_cxt->stopBarrierId)); + securec_check(rc, "\0", "\0"); +} + +static void knl_g_audit_init(knl_g_audit_context *audit_cxt) +{ + g_instance.audit_cxt.global_audit_context = AllocSetContextCreate(g_instance.instance_context, + "GlobalCacheMemory", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE, + SHARED_CONTEXT, + DEFAULT_MEMORY_CONTEXT_MAX_SIZE, + false); + + g_instance.audit_cxt.sys_audit_pipes = NULL; + g_instance.audit_cxt.index_file_lock = NULL; + g_instance.audit_cxt.audit_indextbl = NULL; + g_instance.audit_cxt.audit_indextbl_old = NULL; + g_instance.audit_cxt.current_audit_index = 0; + g_instance.audit_cxt.thread_num = 1; + + for (int i = 0; i < MAX_AUDIT_NUM; ++i) { + g_instance.audit_cxt.audit_coru_fnum[i] = UINT32_MAX; + } } void knl_instance_init() @@ -656,7 +775,6 @@ void knl_instance_init() MemoryContextSeal(g_instance.instance_context); MemoryContextSwitchTo(INSTANCE_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_DEFAULT)); - InitGlobalSeq(); InitGlobalVecFuncMap(); pthread_mutex_init(&g_instance.gpc_reset_lock, NULL); g_instance.global_session_seq = 0; @@ -675,6 +793,8 @@ void knl_instance_init() knl_g_wlm_init(g_instance.wlm_cxt); knl_g_ckpt_init(&g_instance.ckpt_cxt); knl_g_bgwriter_init(&g_instance.bgwriter_cxt); + knl_g_repair_init(&g_instance.repair_cxt); + knl_g_startup_init(&g_instance.startup_cxt); knl_g_shmem_init(&g_instance.shmem_cxt); g_instance.ckpt_cxt_ctl = &g_instance.ckpt_cxt; g_instance.ckpt_cxt_ctl = (knl_g_ckpt_context*)TYPEALIGN(SIZE_OF_TWO_UINT64, g_instance.ckpt_cxt_ctl); @@ -682,11 +802,13 @@ void knl_instance_init() knl_g_csnminsync_init(&g_instance.csnminsync_cxt); knl_g_dw_init(&g_instance.dw_batch_cxt); knl_g_dw_init(&g_instance.dw_single_cxt); + knl_g_rto_init(&g_instance.rto_cxt); knl_g_xlog_init(&g_instance.xlog_cxt); knl_g_compaction_init(&g_instance.ts_compaction_cxt); KnlGUndoInit(&g_instance.undo_cxt); knl_g_numa_init(&g_instance.numa_cxt); knl_g_adv_init(&g_instance.adv_cxt); + knl_g_flashback_init(&g_instance.flashback_cxt); #ifdef ENABLE_MOT knl_g_mot_init(&g_instance.mot_cxt); @@ -706,9 +828,8 @@ void knl_instance_init() knl_g_spi_plan_init(&g_instance.spi_plan_cxt); knl_g_roach_init(&g_instance.roach_cxt); knl_g_streaming_dr_init(&g_instance.streaming_dr_cxt); - - errno_t rc = memset_s(g_instance.stopBarrierId, MAX_BARRIER_ID_LENGTH, 0, sizeof(g_instance.stopBarrierId)); - securec_check(rc, "\0", "\0"); + knl_g_csn_barrier_init(&g_instance.csn_barrier_cxt); + knl_g_audit_init(&g_instance.audit_cxt); } void add_numa_alloc_info(void* numaAddr, size_t length) diff --git a/src/gausskernel/process/threadpool/knl_session.cpp b/src/gausskernel/process/threadpool/knl_session.cpp index b6d1601e7..e21ba8497 100755 --- a/src/gausskernel/process/threadpool/knl_session.cpp +++ b/src/gausskernel/process/threadpool/knl_session.cpp @@ -122,8 +122,6 @@ static void knl_u_attr_init(knl_session_attr* attr) attr->attr_sql.under_explain = false; attr->attr_resource.enable_auto_explain = false; attr->attr_sql.enable_upsert_to_merge = false; - attr->attr_sql.for_print_tuple = false; - attr->attr_sql.numeric_out_for_format = false; attr->attr_common.extension_session_vars_array_size = 0; attr->attr_common.extension_session_vars_array = NULL; } @@ -333,7 +331,7 @@ static void knl_u_stream_init(knl_u_stream_context* stream_cxt) static void knl_u_sig_init(knl_u_sig_context* sig_cxt) { sig_cxt->got_SIGHUP = 0; - sig_cxt->got_PoolReload = 0; + sig_cxt->got_pool_reload = 0; sig_cxt->cp_PoolReload = 0; } @@ -353,6 +351,7 @@ static void knl_u_SPI_init(knl_u_SPI_context* spi) spi->autonomous_session = NULL; spi->spi_exec_cplan_stack = NULL; spi->cur_tableof_index = NULL; + spi->has_stream_in_cursor_or_forloop_sql = false; } static void knl_u_trigger_init(knl_u_trigger_context* tri_cxt) @@ -439,6 +438,7 @@ static void knl_u_utils_init(knl_u_utils_context* utils_cxt) utils_cxt->TransactionXmin = FirstNormalTransactionId; utils_cxt->RecentXmin = FirstNormalTransactionId; + utils_cxt->RecentDataXmin = FirstNormalTransactionId; utils_cxt->RecentGlobalXmin = InvalidTransactionId; utils_cxt->RecentGlobalDataXmin = InvalidTransactionId; @@ -464,11 +464,6 @@ static void knl_u_utils_init(knl_u_utils_context* utils_cxt) utils_cxt->memory_context_limited_white_list = NULL; utils_cxt->enable_memory_context_control = false; (void)syscalllockInit(&utils_cxt->deleMemContextMutex); - - utils_cxt->donothingDR = (DestReceiver*)palloc0(sizeof(DestReceiver)); - utils_cxt->debugtupDR = (DestReceiver*)palloc0(sizeof(DestReceiver)); - utils_cxt->spi_printtupDR = (DestReceiver*)palloc0(sizeof(DestReceiver)); - init_sess_dest(utils_cxt->donothingDR, utils_cxt->debugtupDR, utils_cxt->spi_printtupDR); } static void knl_u_security_init(knl_u_security_context* sec_cxt) { @@ -803,6 +798,7 @@ static void knl_u_plpgsql_init(knl_u_plpgsql_context* plsql_cxt) plsql_cxt->curr_compile_context = NULL; plsql_cxt->compile_context_list = NIL; plsql_cxt->plpgsql_IndexErrorVariable = 0; + plsql_cxt->shared_simple_eval_resowner = NULL; plsql_cxt->simple_eval_estate = NULL; plsql_cxt->simple_econtext_stack = NULL; plsql_cxt->context_array = NIL; @@ -830,8 +826,21 @@ static void knl_u_plpgsql_init(knl_u_plpgsql_context* plsql_cxt) plsql_cxt->isCreateFunction = false; plsql_cxt->need_pkg_dependencies = false; plsql_cxt->pkg_dependencies = NIL; + plsql_cxt->func_tableof_index = NIL; + plsql_cxt->pass_func_tupdesc = NULL; + plsql_cxt->portal_depth = 0; plsql_cxt->auto_parent_session_pkgs = NULL; plsql_cxt->not_found_parent_session_pkgs = false; + plsql_cxt->storedPortals = NIL; + plsql_cxt->portalContext = NIL; + plsql_cxt->call_after_auto = false; + plsql_cxt->parent_session_id = 0; + plsql_cxt->parent_thread_id = 0; + plsql_cxt->parent_context = NULL; + plsql_cxt->is_package_instantiation = false; + plsql_cxt->cur_exception_cxt = NULL; + plsql_cxt->pragma_autonomous = false; + plsql_cxt->ActiveLobToastOid = InvalidOid; } static void knl_u_stat_init(knl_u_stat_context* stat_cxt) @@ -1002,6 +1011,7 @@ static void knl_u_unique_sql_init(knl_u_unique_sql_context* unique_sql_cxt) unique_sql_cxt->multi_sql_offset = 0; unique_sql_cxt->is_top_unique_sql = false; unique_sql_cxt->need_update_calls = true; + unique_sql_cxt->skipUniqueSQLCount = 0; unique_sql_cxt->unique_sql_sort_instr = (unique_sql_sorthash_instr*)palloc0(sizeof(unique_sql_sorthash_instr)); unique_sql_cxt->unique_sql_hash_instr = (unique_sql_sorthash_instr*)palloc0(sizeof(unique_sql_sorthash_instr)); unique_sql_cxt->unique_sql_sort_instr->has_sorthash = false; @@ -1028,6 +1038,12 @@ static void knl_u_slow_query_init(knl_u_slow_query_context* slow_query_cxt) slow_query_cxt->slow_query.unique_sql_id = 0; } +static void knl_u_trace_context_init(knl_u_trace_context* trace_cxt) +{ + Assert(trace_cxt != NULL); + trace_cxt->trace_id[0] = '\0'; +} + static void knl_u_ledger_init(knl_u_ledger_context *ledger_context) { ledger_context->resp_tag = NULL; @@ -1062,7 +1078,7 @@ static void knl_u_statement_init(knl_u_statement_context* statement_cxt) statement_cxt->stmt_stat_cxt = NULL; } -static void knl_u_relmap_init(knl_u_relmap_context* relmap_cxt) +void knl_u_relmap_init(knl_u_relmap_context* relmap_cxt) { relmap_cxt->shared_map = (RelMapFile*)palloc0(sizeof(RelMapFile)); relmap_cxt->local_map = (RelMapFile*)palloc0(sizeof(RelMapFile)); @@ -1074,9 +1090,9 @@ static void knl_u_relmap_init(knl_u_relmap_context* relmap_cxt) relmap_cxt->UHeapRelfilenodeMapHash = NULL; } -static void knl_u_inval_init(knl_u_inval_context* inval_cxt) +void knl_u_inval_init(knl_u_inval_context* inval_cxt) { - inval_cxt->deepthInAcceptInvalidationMessage = 0; + inval_cxt->DeepthInAcceptInvalidationMessage = 0; inval_cxt->transInvalInfo = NULL; inval_cxt->SharedInvalidMessagesArray = NULL; inval_cxt->numSharedInvalidMessagesArray = 0; @@ -1088,7 +1104,7 @@ static void knl_u_inval_init(knl_u_inval_context* inval_cxt) inval_cxt->partcache_callback_list = (PARTCACHECALLBACK*)palloc0(sizeof(PARTCACHECALLBACK) * MAX_PARTCACHE_CALLBACKS); inval_cxt->partcache_callback_count = 0; - inval_cxt->SharedInvalidMessageCounter = 0; + inval_cxt->SIMCounter = 0; inval_cxt->catchupInterruptPending = 0; inval_cxt->messages = (SharedInvalidationMessage*)palloc0(MAXINVALMSGS * sizeof(SharedInvalidationMessage)); inval_cxt->nextmsg = 0; @@ -1170,6 +1186,7 @@ static void knl_u_pgxc_init(knl_u_pgxc_context* pgxc_cxt) { #ifdef ENABLE_MULTIPLE_NODES pgxc_cxt->NumDataNodes = 0; + pgxc_cxt->NumTotalDataNodes = 0; #else pgxc_cxt->NumDataNodes = 1; #endif @@ -1196,6 +1213,9 @@ static void knl_u_pgxc_init(knl_u_pgxc_context* pgxc_cxt) pgxc_cxt->PoolerResendParams = false; pgxc_cxt->PoolerConnectionInfo = (PGXCNodeConnectionInfo*)palloc0(sizeof(PGXCNodeConnectionInfo)); pgxc_cxt->poolHandle = NULL; + pgxc_cxt->ConsistencyPointUpdating = false; + pgxc_cxt->disasterReadArray = NULL; + pgxc_cxt->DisasterReadArrayInit = false; pgxc_cxt->connection_cache = NIL; pgxc_cxt->connection_cache_handle = NIL; @@ -1249,6 +1269,7 @@ static void knl_u_xact_init(knl_u_xact_context* xact_cxt) xact_cxt->sendSeqSchmaName = NULL; xact_cxt->sendSeqName = NULL; xact_cxt->send_result = NULL; + xact_cxt->ActiveLobRelid = InvalidOid; } static void knl_u_ps_init(knl_u_ps_context* ps_cxt) @@ -1281,16 +1302,28 @@ static void knl_u_mot_init(knl_u_mot_context* mot_cxt) } #endif +static void knl_u_clientConnTime_init(knl_u_clientConnTime_context* clientConnTime_cxt) +{ + Assert(clientConnTime_cxt != NULL); + + /* Record start time while initializing session for client connection */ + INSTR_TIME_SET_CURRENT(clientConnTime_cxt->connStartTime); + + /* Set flag to "true", to indicate this session in initial process */ + clientConnTime_cxt->checkOnlyInConnProcess = true; +} + void knl_session_init(knl_session_context* sess_cxt) { sess_cxt->status = KNL_SESS_UNINIT; DLInitElem(&sess_cxt->elem, sess_cxt); + DLInitElem(&sess_cxt->elem2, sess_cxt); sess_cxt->attachPid = InvalidTid; sess_cxt->top_transaction_mem_cxt = NULL; sess_cxt->self_mem_cxt = NULL; sess_cxt->temp_mem_cxt = NULL; - + sess_cxt->dbesql_mem_cxt = NULL; sess_cxt->guc_variables = NULL; sess_cxt->num_guc_variables = 0; sess_cxt->session_id = 0; @@ -1339,6 +1372,7 @@ void knl_session_init(knl_session_context* sess_cxt) knl_u_proc_init(&sess_cxt->proc_cxt); knl_u_ps_init(&sess_cxt->ps_cxt); knl_u_regex_init(&sess_cxt->regex_cxt); + knl_u_relcache_init(&sess_cxt->relcache_cxt); knl_u_relmap_init(&sess_cxt->relmap_cxt); knl_u_sig_init(&sess_cxt->sig_cxt); @@ -1359,6 +1393,7 @@ void knl_session_init(knl_session_context* sess_cxt) knl_u_user_login_init(&sess_cxt->user_login_cxt); knl_u_percentile_init(&sess_cxt->percentile_cxt); knl_u_slow_query_init(&sess_cxt->slow_query_cxt); + knl_u_trace_context_init(&sess_cxt->trace_cxt); knl_u_statement_init(&sess_cxt->statement_cxt); knl_u_streaming_init(&sess_cxt->streaming_cxt); knl_u_ledger_init(&sess_cxt->ledger_cxt); @@ -1368,6 +1403,8 @@ void knl_session_init(knl_session_context* sess_cxt) KnlUUstoreInit(&sess_cxt->ustore_cxt); KnlURepOriginInit(&sess_cxt->reporigin_cxt); + knl_u_clientConnTime_init(&sess_cxt->clientConnTime_cxt); + MemoryContextSeal(sess_cxt->top_mem_cxt); } @@ -1530,12 +1567,20 @@ bool stp_set_commit_rollback_err_msg(stp_xact_err_type type) break; case STP_XACT_PACKAGE_INSTANTIATION: rt = snprintf_s(u_sess->SPI_cxt.forbidden_commit_rollback_err_msg, maxMsgLen, maxMsgLen - 1, "%s", - "can not use commit rollback in package instantiation"); + "can not use commit/rollback/savepoint in package instantiation"); break; case STP_XACT_COMPL_SQL: rt = snprintf_s(u_sess->SPI_cxt.forbidden_commit_rollback_err_msg, maxMsgLen, maxMsgLen - 1, "%s", "can not use commit rollback in Complex SQL"); break; + case STP_XACT_IMMUTABLE: + rt = snprintf_s(u_sess->SPI_cxt.forbidden_commit_rollback_err_msg, maxMsgLen, maxMsgLen - 1, "%s", + "commit/rollback/savepoint is not allowed in a non-volatile function"); + break; + case STP_XACT_TOO_MANY_PORTAL: + rt = snprintf_s(u_sess->SPI_cxt.forbidden_commit_rollback_err_msg, maxMsgLen, maxMsgLen - 1, "%s", + "transaction statement in store procedure is not supported used in multi-layer portal"); + break; default: rt = snprintf_s(u_sess->SPI_cxt.forbidden_commit_rollback_err_msg, maxMsgLen, maxMsgLen - 1, "%s", "invalid transaction in store procedure"); diff --git a/src/gausskernel/process/threadpool/knl_thread.cpp b/src/gausskernel/process/threadpool/knl_thread.cpp index cf122ebf0..673d25104 100644 --- a/src/gausskernel/process/threadpool/knl_thread.cpp +++ b/src/gausskernel/process/threadpool/knl_thread.cpp @@ -335,12 +335,9 @@ static void knl_t_xact_init(knl_t_xact_context* xact_cxt) xact_cxt->XactLocalNodeCanAbort = true; xact_cxt->XactPrepareSent = false; xact_cxt->AlterCoordinatorStmt = false; - xact_cxt->XactXidStoreForCheck = InvalidTransactionId; - xact_cxt->reserved_nextxid_check = InvalidTransactionId; xact_cxt->forceSyncCommit = false; /* alloc in TopMemory Context, initialization is NULL when create new thread */ xact_cxt->TransactionAbortContext = NULL; - xact_cxt->GTM_callbacks = NULL; xact_cxt->Seq_callbacks = NULL; xact_cxt->lxid = InvalidTransactionId; xact_cxt->stablexid = InvalidTransactionId; @@ -368,6 +365,10 @@ static void knl_t_xact_init(knl_t_xact_context* xact_cxt) xact_cxt->PGXCNodeId = -1; xact_cxt->inheritFileNode = false; xact_cxt->applying_subxact_undo = false; + xact_cxt->XactXidStoreForCheck = InvalidTransactionId; + xact_cxt->enable_lock_cancel = false; + xact_cxt->ActiveLobRelid = InvalidOid; + xact_cxt->isSelectInto = false; } static void knl_t_mem_init(knl_t_mem_context* mem_cxt) @@ -440,7 +441,6 @@ static void knl_t_xlog_init(knl_t_xlog_context* xlog_cxt) xlog_cxt->is_hadr_main_standby = false; xlog_cxt->startup_processing = false; xlog_cxt->openLogFile = -1; - xlog_cxt->readfrombuffer = false; xlog_cxt->openLogSegNo = 0; xlog_cxt->openLogOff = 0; xlog_cxt->readFile = -1; @@ -500,7 +500,6 @@ static void knl_t_xlog_init(knl_t_xlog_context* xlog_cxt) xlog_cxt->gin_opCtx = NULL; xlog_cxt->gist_opCtx = NULL; xlog_cxt->spg_opCtx = NULL; - xlog_cxt->redo_oldversion_xlog = false; xlog_cxt->CheckpointStats = (CheckpointStatsData*)palloc0(sizeof(CheckpointStatsData)); xlog_cxt->LogwrtResult = (XLogwrtResult*)palloc0(sizeof(XLogwrtResult)); xlog_cxt->LogwrtPaxos = (XLogwrtPaxos*)palloc0(sizeof(XLogwrtPaxos)); @@ -511,6 +510,7 @@ static void knl_t_xlog_init(knl_t_xlog_context* xlog_cxt) #endif xlog_cxt->max_page_flush_lsn = MAX_XLOG_REC_PTR; xlog_cxt->redoInterruptCallBackFunc = NULL; + xlog_cxt->redoPageRepairCallBackFunc = NULL; xlog_cxt->xlog_atomic_op = NULL; xlog_cxt->currentRetryTimes = 0; } @@ -600,6 +600,7 @@ static void knl_t_audit_init(knl_t_audit_context* audit) audit->rotation_requested = false; audit->space_beyond_size = (10 * 1024 * 1024); audit->pgaudit_filepath[0] = '\0'; + audit->cur_thread_idx = -1; } static void knl_t_async_init(knl_t_async_context* asy_cxt) @@ -642,6 +643,7 @@ static void knl_t_arch_init(knl_t_arch_context* arch) arch->task_wait_interval = 1000; arch->last_arch_time = 0; arch->arch_start_timestamp = 0; + arch->arch_start_lsn = InvalidXLogRecPtr; arch->sync_walsender_idx = -1; #ifndef ENABLE_MULTIPLE_NODES arch->sync_follower_id = -1; @@ -655,6 +657,7 @@ static void knl_t_barrier_arch_init(knl_t_barrier_arch_context* barrier_arch) barrier_arch->wakened = false; barrier_arch->ready_to_stop = false; barrier_arch->slot_name = NULL; + barrier_arch->lastArchiveLoc = InvalidXLogRecPtr; } @@ -724,6 +727,7 @@ static void knl_t_libwalreceiver_init(knl_t_libwalreceiver_context* libwalreceiv libwalreceiver_cxt->recvBuf = NULL; libwalreceiver_cxt->shared_storage_buf = NULL; libwalreceiver_cxt->shared_storage_read_buf = NULL; + libwalreceiver_cxt->decompressBuf = NULL; libwalreceiver_cxt->xlogreader = NULL; } @@ -772,7 +776,7 @@ static void knl_t_datasender_init(knl_t_datasender_context* datasender_cxt) static void knl_t_walreceiverfuncs_init(knl_t_walreceiverfuncs_context* walreceiverfuncs_cxt) { walreceiverfuncs_cxt->WalRcv = NULL; - walreceiverfuncs_cxt->WalReplIndex = 0; + walreceiverfuncs_cxt->WalReplIndex = 1; } static void knl_t_replgram_init(knl_t_replgram_context* replgram_cxt) @@ -896,7 +900,6 @@ static void knl_t_utils_init(knl_t_utils_context* utils_cxt) utils_cxt->STPSavedResourceOwner = NULL; utils_cxt->CurTransactionResourceOwner = NULL; utils_cxt->TopTransactionResourceOwner = NULL; - utils_cxt->ResourceRelease_callbacks = NULL; utils_cxt->SortColumnOptimize = false; utils_cxt->pRelatedRel = NULL; utils_cxt->sigTimerId = NULL; @@ -951,6 +954,21 @@ static void knl_t_page_redo_init(knl_t_page_redo_context* page_redo_cxt) page_redo_cxt->shutdown_requested = false; page_redo_cxt->got_SIGHUP = false; page_redo_cxt->sleep_long = false; + page_redo_cxt->check_repair = false; +} + +static void knl_t_parallel_decode_init(knl_t_parallel_decode_worker_context* parallel_decode_cxt) +{ + parallel_decode_cxt->shutdown_requested = false; + parallel_decode_cxt->got_SIGHUP = false; + parallel_decode_cxt->sleep_long = false; +} + +static void knl_t_parallel_decode_reader_init(knl_t_logical_read_worker_context* parallel_decode_reader_cxt) +{ + parallel_decode_reader_cxt->shutdown_requested = false; + parallel_decode_reader_cxt->got_SIGHUP = false; + parallel_decode_reader_cxt->sleep_long = false; } static void knl_t_startup_init(knl_t_startup_context* startup_cxt) @@ -1138,6 +1156,16 @@ static void knl_t_pagewriter_init(knl_t_pagewriter_context* pagewriter_cxt) pagewriter_cxt->pagewriter_id = -1; } +static void knl_t_barrier_creator_init(knl_t_barrier_creator_context* barrier_creator_cxt) +{ + barrier_creator_cxt->archive_slot_names = NULL; + barrier_creator_cxt->got_SIGHUP = false; + barrier_creator_cxt->is_first_barrier = false; + barrier_creator_cxt->barrier_update_last_time_info = NULL; + barrier_creator_cxt->shutdown_requested = false; + barrier_creator_cxt->first_cn_timeline = 0; +} + static void knl_t_xlogcopybackend_init(knl_t_sharestoragexlogcopyer_context* cxt) { cxt->got_SIGHUP = false; @@ -1340,6 +1368,7 @@ static void knl_t_storage_init(knl_t_storage_context* storage_cxt) storage_cxt->statement_fin_time2 = 0; storage_cxt->pageCopy = NULL; + storage_cxt->isSwitchoverLockHolder = false; storage_cxt->num_held_lwlocks = 0; storage_cxt->held_lwlocks = (LWLockHandle*)palloc0(MAX_SIMUL_LWLOCKS * sizeof(LWLockHandle)); storage_cxt->lock_addin_request = 0; @@ -1363,9 +1392,12 @@ static void knl_t_storage_init(knl_t_storage_context* storage_cxt) storage_cxt->cmprMetaInfo = (CmprMetaUnion*)palloc0(sizeof(CmprMetaUnion)); storage_cxt->DataFileIdCache = NULL; storage_cxt->SegSpcCache = NULL; + storage_cxt->uidHashCache = NULL; + storage_cxt->DisasterCache = NULL; storage_cxt->max_safe_fds = 32; storage_cxt->max_userdatafiles = 8192 - 1000; + storage_cxt->timeoutRemoteOpera = 0; } static void knl_t_port_init(knl_t_port_context* port_cxt) @@ -1410,6 +1442,8 @@ static void knl_t_walsender_init(knl_t_walsender_context* walsender_cxt) securec_check(rc, "\0", "\0"); rc = memset_s(walsender_cxt->gucconf_lock_file, MAXPGPATH, 0, MAXPGPATH); securec_check(rc, "\0", "\0"); + rc = memset_s(walsender_cxt->slotname, NAMEDATALEN, 0, NAMEDATALEN); + securec_check(rc, "\0", "\0"); walsender_cxt->ws_dummy_data_read_file_fd = NULL; walsender_cxt->ws_dummy_data_read_file_num = 1; walsender_cxt->CheckCUArray = NULL; @@ -1422,13 +1456,17 @@ static void knl_t_walsender_init(knl_t_walsender_context* walsender_cxt) walsender_cxt->tmpbuf = (StringInfoData*)palloc0(sizeof(StringInfoData)); walsender_cxt->remotePort = 0; walsender_cxt->walSndCaughtUp = false; + walsender_cxt->LogicalSlot = -1; walsender_cxt->last_check_timeout_timestamp = 0; walsender_cxt->isWalSndSendTimeoutMessage = false; walsender_cxt->advancePrimaryConn = NULL; - + walsender_cxt->xlogReadBuf = NULL; + walsender_cxt->compressBuf = NULL; walsender_cxt->ep_fd = -1; walsender_cxt->datafd = -1; walsender_cxt->is_obsmode = false; + walsender_cxt->standbyConnection = false; + walsender_cxt->restoreLogicalLogHead = NULL; } static void knl_t_tsearch_init(knl_t_tsearch_context* tsearch_cxt) @@ -1450,7 +1488,8 @@ static void knl_t_postmaster_init(knl_t_postmaster_context* postmaster_cxt) 0, MAX_REPLNODE_NUM * sizeof(replconninfo*)); securec_check(rc, "\0", "\0"); - rc = memset_s(postmaster_cxt->ReplConnChanged, MAX_REPLNODE_NUM * sizeof(bool), 0, MAX_REPLNODE_NUM * sizeof(bool)); + rc = memset_s(postmaster_cxt->ReplConnChangeType, MAX_REPLNODE_NUM * sizeof(int), + NO_CHANGE, MAX_REPLNODE_NUM * sizeof(int)); securec_check(rc, "\0", "\0"); rc = memset_s(postmaster_cxt->CrossClusterReplConnArray, MAX_REPLNODE_NUM * sizeof(replconninfo*), 0, @@ -1598,6 +1637,12 @@ static void knl_t_bgworker_init(knl_t_bgworker_context* bgworker_cxt) bgworker_cxt->bgworker = NULL; } +static void knl_index_advisor_init(knl_t_index_advisor_context* index_advisor_cxt) +{ + index_advisor_cxt->stmt_table_list = NULL; + index_advisor_cxt->stmt_target_list = NULL; +} + #ifdef ENABLE_MOT static void knl_t_mot_init(knl_t_mot_context* mot_cxt) { @@ -1640,13 +1685,21 @@ void KnlDcfContextInit(knl_t_dcf_context* dcfContext) dcfContext->dcfCtxInfo = NULL; } +void KnlLscContextInit(knl_t_lsc_context *lsc_cxt) +{ + lsc_cxt->lsc = NULL; + lsc_cxt->enable_lsc = false; + lsc_cxt->FetchTupleFromCatCList = NULL; +} + void knl_thread_init(knl_thread_role role) { - /* doesn't init t_thrd.bn, check GaussDbThreadMain for detail */ t_thrd.role = role; t_thrd.subrole = NO_SUBROLE; t_thrd.proc = NULL; t_thrd.pgxact = NULL; + t_thrd.bn = NULL; + t_thrd.is_inited = false; t_thrd.myLogicTid = 10000; t_thrd.fake_session = NULL; t_thrd.threadpool_cxt.reaper_dead_session = false; @@ -1656,7 +1709,8 @@ void knl_thread_init(knl_thread_role role) t_thrd.mcxt_group->Init(t_thrd.top_mem_cxt); MemoryContextSeal(t_thrd.top_mem_cxt); MemoryContextSwitchTo(THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_DEFAULT)); - + + KnlLscContextInit(&t_thrd.lsc_cxt); /* CommProxy Support */ t_thrd.comm_sock_option = g_default_invalid_sock_opt; t_thrd.comm_epoll_option = g_default_invalid_epoll_opt; @@ -1675,6 +1729,7 @@ void knl_thread_init(knl_thread_role role) knl_t_bgwriter_init(&t_thrd.bgwriter_cxt); knl_t_bootstrap_init(&t_thrd.bootstrap_cxt); knl_t_pagewriter_init(&t_thrd.pagewriter_cxt); + knl_t_barrier_creator_init(&t_thrd.barrier_creator_cxt); knl_t_bulkload_init(&t_thrd.bulk_cxt); knl_t_cbm_init(&t_thrd.cbm_cxt); knl_t_checkpoint_init(&t_thrd.checkpoint_cxt); @@ -1741,6 +1796,8 @@ void knl_thread_init(knl_thread_role role) knl_t_pencentile_init(&t_thrd.percentile_cxt); knl_t_perf_snap_init(&t_thrd.perf_snap_cxt); knl_t_page_redo_init(&t_thrd.page_redo_cxt); + knl_t_parallel_decode_init(&t_thrd.parallel_decode_cxt); + knl_t_parallel_decode_reader_init(&t_thrd.logicalreadworker_cxt); knl_t_heartbeat_init(&t_thrd.heartbeat_cxt); knl_t_streaming_init(&t_thrd.streaming_cxt); knl_t_poolcleaner_init(&t_thrd.poolcleaner_cxt); @@ -1753,6 +1810,7 @@ void knl_thread_init(knl_thread_role role) knl_t_security_policy_init(&t_thrd.security_policy_cxt); knl_t_security_ledger_init(&t_thrd.security_ledger_cxt); knl_t_bgworker_init(&t_thrd.bgworker_cxt); + knl_index_advisor_init(&t_thrd.index_advisor_cxt); KnlTApplyLauncherInit(&t_thrd.applylauncher_cxt); KnlTApplyWorkerInit(&t_thrd.applyworker_cxt); KnlTPublicationInit(&t_thrd.publication_cxt); @@ -1781,6 +1839,14 @@ RedoInterruptCallBackFunc RegisterRedoInterruptCallBack(RedoInterruptCallBackFun return oldFunc; } +RedoPageRepairCallBackFunc RegisterRedoPageRepairCallBack(RedoPageRepairCallBackFunc func) +{ + RedoPageRepairCallBackFunc oldFunc = t_thrd.xlog_cxt.redoPageRepairCallBackFunc; + t_thrd.xlog_cxt.redoPageRepairCallBackFunc = func; + return oldFunc; +} + + void RedoInterruptCallBack() { if (t_thrd.xlog_cxt.redoInterruptCallBackFunc != NULL) { @@ -1792,3 +1858,10 @@ void RedoInterruptCallBack() Assert(!AmPageRedoWorker()); } +void RedoPageRepairCallBack(RepairBlockKey key, XLogPhyBlock pblk) +{ + if (t_thrd.xlog_cxt.redoPageRepairCallBackFunc != NULL) { + t_thrd.xlog_cxt.redoPageRepairCallBackFunc(key, pblk); + } +} + diff --git a/src/gausskernel/process/threadpool/threadpool_controler.cpp b/src/gausskernel/process/threadpool/threadpool_controler.cpp index 27759bb0b..63c13d4a9 100644 --- a/src/gausskernel/process/threadpool/threadpool_controler.cpp +++ b/src/gausskernel/process/threadpool/threadpool_controler.cpp @@ -80,6 +80,8 @@ ThreadPoolControler::ThreadPoolControler() m_groupNum = 1; m_threadNum = 0; m_maxPoolSize = 0; + m_maxStreamPoolSize = 0; + m_streamProcRatio = 0; } ThreadPoolControler::~ThreadPoolControler() @@ -108,6 +110,7 @@ void ThreadPoolControler::Init(bool enableNumaDistribute) m_sessCtrl = New(CurrentMemoryContext) ThreadPoolSessControl(CurrentMemoryContext); bool bindCpu = CheckCpuBind(); + bool bindCpuNuma = CheckCpuNumaBind(); int maxThreadNum = 0; int expectThreadNum = 0; int maxStreamNum = 0; @@ -149,7 +152,7 @@ void ThreadPoolControler::Init(bool enableNumaDistribute) } m_groups[i] = New(CurrentMemoryContext)ThreadPoolGroup(maxThreadNum, expectThreadNum, - maxStreamNum, i, numaId, cpuNum, cpuArr); + maxStreamNum, i, numaId, cpuNum, cpuArr, bindCpuNuma); m_groups[i]->Init(enableNumaDistribute); } @@ -172,25 +175,56 @@ void ThreadPoolControler::SetThreadPoolInfo() { InitCpuInfo(); - GetInstanceBind(); + GetInstanceBind(&m_cpuset); - GetCpuAndNumaNum(); + GetCpuAndNumaNum(&m_cpuInfo.totalCpuNum, &m_cpuInfo.totalNumaNum); ParseAttr(); + ParseStreamAttr(); + GetSysCpuInfo(); SetGroupAndThreadNum(); + + SetStreamInfo(); } -void ThreadPoolControler::GetInstanceBind() -{ +void AdjustThreadAffinity(void) { + cpu_set_t m_cpuset; + CPU_ZERO(&m_cpuset); + /* Check if the instance has been attch to some specific CPUs. */ int ret = pthread_getaffinity_np(PostmasterPid, sizeof(cpu_set_t), &m_cpuset); + if (ret == 0) { + if ((CPU_ISSET(0, &m_cpuset)) && (!CPU_ISSET(1, &m_cpuset))) { + int num_processors = sysconf(_SC_NPROCESSORS_CONF); + CPU_ZERO(&m_cpuset); + for (int j = 0; j < num_processors; j++) { + CPU_SET(j, &m_cpuset); + } + + // set CPU affinity of a thread + int s = pthread_setaffinity_np(PostmasterPid, sizeof(cpu_set_t), &m_cpuset); + if (s != 0) { + ereport(WARNING, (errmsg("AdjustThreadAffinity fail to bind thread %lu, errno: %d", PostmasterPid, ret))); + } + } + } +} + + +void ThreadPoolControler::GetInstanceBind(cpu_set_t *cpuset) +{ + /* this function is used to avoid the libgomp bug on some specified OS */ + AdjustThreadAffinity(); + + /* Check if the instance has been attch to some specific CPUs. */ + int ret = pthread_getaffinity_np(PostmasterPid, sizeof(cpu_set_t), cpuset); if (ret == 0) { return; } else { - errno_t rc = memset_s(&m_cpuset, sizeof(m_cpuset), 0, sizeof(m_cpuset)); + errno_t rc = memset_s(cpuset, sizeof(cpu_set_t), 0, sizeof(cpu_set_t)); securec_check(rc, "\0", "\0"); } } @@ -214,11 +248,13 @@ void ThreadPoolControler::ParseAttr() ptoken = TrimStr(strtok_r(attr, pdelimiter, &psave)); if (!IS_NULL_STR(ptoken)) m_attr.threadNum = pg_strtoint32(ptoken); + pfree_ext(ptoken); /* Ger group num */ ptoken = TrimStr(strtok_r(NULL, pdelimiter, &psave)); if (!IS_NULL_STR(ptoken)) m_attr.groupNum = pg_strtoint32(ptoken); + pfree_ext(ptoken); if (m_attr.threadNum < 0 || m_attr.threadNum > MAX_THREAD_POOL_SIZE) INVALID_ATTR_ERROR( @@ -230,6 +266,56 @@ void ThreadPoolControler::ParseAttr() /* Get attach cpu */ m_attr.bindCpu = TrimStr(psave); ParseBindCpu(); + + pfree_ext(attr); +} + +void ThreadPoolControler::ParseStreamAttr() +{ + m_stream_attr.threadNum = DEFAULT_THREAD_POOL_SIZE; + m_stream_attr.procRatio = DEFAULT_THREAD_POOL_STREAM_PROC_RATIO; + m_stream_attr.groupNum = DEFAULT_THREAD_POOL_GROUPS; + m_stream_attr.bindCpu = NULL; + + char* attr = TrimStr(g_instance.attr.attr_common.thread_pool_stream_attr); + if (IS_NULL_STR(attr)) { + return; + } + + char* ptoken = NULL; + char* psave = NULL; + const char* pdelimiter = ","; + + /* Get stream_thread_pool_stream max thread num */ + ptoken = TrimStr(strtok_r(attr, pdelimiter, &psave)); + if (IS_NULL_STR(ptoken) || !isdigit((unsigned char)*ptoken)) { + INVALID_ATTR_ERROR( + errdetail("Current thread_pool_stream_attr format is error, stream_thread_num must be digital.")); + } + m_stream_attr.threadNum = pg_strtoint32(ptoken); + pfree_ext(ptoken); + if (m_stream_attr.threadNum < 0 || m_stream_attr.threadNum > MAX_THREAD_POOL_SIZE) { + INVALID_ATTR_ERROR( + errdetail("Current stream_thread_num %d is out of range [%d, %d].", + m_stream_attr.threadNum, 0, MAX_THREAD_POOL_SIZE)); + } + + /* Get proc ratio of stream threads */ + ptoken = TrimStr(strtok_r(NULL, pdelimiter, &psave)); + if (IS_NULL_STR(ptoken) || !isdigit((unsigned char)*ptoken)) { + INVALID_ATTR_ERROR( + errdetail("Current thread_pool_stream_attr format is error, stream_proc_ratio must be digital.")); + } + m_stream_attr.procRatio = atof(ptoken); + pfree_ext(ptoken); + if (m_stream_attr.procRatio <= 0 || m_stream_attr.procRatio > MAX_THREAD_POOL_STREAM_PROC_RATIO) { + INVALID_ATTR_ERROR( + errdetail("Current stream_proc_ratio %f is out of range (%d, %d].", + m_stream_attr.procRatio, 0, MAX_THREAD_POOL_STREAM_PROC_RATIO)); + } + pfree_ext(attr); + + return; } void ThreadPoolControler::ParseBindCpu() @@ -239,7 +325,8 @@ void ThreadPoolControler::ParseBindCpu() return; } - char* scpu = pstrdup(m_attr.bindCpu); + char* pattr = pstrdup(m_attr.bindCpu); + char* scpu = pattr; char* ptoken = NULL; char* psave = NULL; const char* pdelimiter = ":"; @@ -268,8 +355,13 @@ void ThreadPoolControler::ParseBindCpu() m_cpuInfo.bindType = NODE_BIND; m_cpuInfo.isBindNumaArr = (bool*)palloc0(sizeof(bool) * m_cpuInfo.totalNumaNum); bindNum = ParseRangeStr(psave, m_cpuInfo.isBindNumaArr, m_cpuInfo.totalNumaNum, "nodebind"); + } else if (strncmp("numabind", ptoken, strlen("numabind")) == 0) { + m_cpuInfo.bindType = NUMA_BIND; + m_cpuInfo.isBindCpuNumaArr = (bool*)palloc0(sizeof(bool) * m_cpuInfo.totalCpuNum); + bindNum = ParseRangeStr(psave, m_cpuInfo.isBindCpuNumaArr, m_cpuInfo.totalCpuNum, "numabind"); } else { - INVALID_ATTR_ERROR(errdetail("Only 'nobind', 'allbind', 'cpubind', and 'nodebind' are valid attribute.")); + INVALID_ATTR_ERROR(errdetail("Only 'nobind', 'allbind', 'cpubind', 'nodebind' and 'numabind' " + "are valid attribute.")); } if (bindNum == 0) @@ -278,6 +370,8 @@ void ThreadPoolControler::ParseBindCpu() "1. These CPUs are not active, use lscpu to check On-line CPU(s) list.\n" "2. The process has been bind to other CPUs and there is no intersection," "use taskset -pc to check process CPU bind info.\n")); + pfree_ext(ptoken); + pfree_ext(pattr); } int ThreadPoolControler::ParseRangeStr(char* attr, bool* arr, int totalNum, char* bindtype) @@ -322,23 +416,26 @@ int ThreadPoolControler::ParseRangeStr(char* attr, bool* arr, int totalNum, char } for (int i = startid; i <= endid; i++) { - retNum += arr[startid] ? 0 : 1; + retNum += arr[i] ? 0 : 1; arr[i] = true; } } + /* Don't need to free when error ocurrs, errors here are FATAL level! */ + pfree_ext(pt); + pfree_ext(ptoken); ptoken = TrimStr(strtok_r(NULL, pdelimiter, &psave)); } return retNum; } -void ThreadPoolControler::GetMcsCpuInfo() +bool* ThreadPoolControler::GetMcsCpuInfo(int totalCpuNum) { FILE* fp = NULL; char buf[BUFSIZE]; - m_cpuInfo.isMcsCpuArr = (bool*)palloc0(sizeof(bool) * m_cpuInfo.totalCpuNum); + bool* isMcsCpuArr = (bool*)palloc0(sizeof(bool) * totalCpuNum); /* * When the database is deplyed on MCS, we need to read cpuset.cpus to find @@ -349,51 +446,36 @@ void ThreadPoolControler::GetMcsCpuInfo() if (fp == NULL) { ereport(WARNING, (errcode(ERRCODE_OPERATE_INVALID_PARAM), errmsg("Failed to open file /sys/fs/cgroup/cpuset/cpuset.cpus"))); - errno_t rc = memset_s(m_cpuInfo.isMcsCpuArr, m_cpuInfo.totalCpuNum, 1, m_cpuInfo.totalCpuNum); + errno_t rc = memset_s(isMcsCpuArr, totalCpuNum, 1, totalCpuNum); securec_check(rc, "", ""); - return; + return isMcsCpuArr; } if (fgets(buf, BUFSIZE, fp) == NULL) { ereport(WARNING, (errcode(ERRCODE_OPERATE_INVALID_PARAM), errmsg("Failed to read file /sys/fs/cgroup/cpuset/cpuset.cpus"))); - errno_t rc = memset_s(m_cpuInfo.isMcsCpuArr, m_cpuInfo.totalCpuNum, 1, m_cpuInfo.totalCpuNum); + errno_t rc = memset_s(isMcsCpuArr, totalCpuNum, 1, totalCpuNum); securec_check(rc, "", ""); fclose(fp); - return; + return isMcsCpuArr; } - int mcsNum = ParseRangeStr(buf, m_cpuInfo.isMcsCpuArr, m_cpuInfo.totalCpuNum, "Mcs Cpu set"); + int mcsNum = ParseRangeStr(buf, isMcsCpuArr, totalCpuNum, "Mcs Cpu set"); if (mcsNum == 0) { ereport(WARNING, (errcode(ERRCODE_OPERATE_INVALID_PARAM), errmsg("No available CPUs in /sys/fs/cgroup/cpuset/cpuset.cpus"))); - errno_t rc = memset_s(m_cpuInfo.isMcsCpuArr, m_cpuInfo.totalCpuNum, 1, m_cpuInfo.totalCpuNum); + errno_t rc = memset_s(isMcsCpuArr, totalCpuNum, 1, totalCpuNum); securec_check(rc, "", ""); } fclose(fp); + return isMcsCpuArr; } -void ThreadPoolControler::GetSysCpuInfo() +void ThreadPoolControler::GetActiveCpu(NumaCpuId *numaCpuIdList, int *num) { - FILE* fp = NULL; + *num = 0; char buf[BUFSIZE]; - - if (m_cpuInfo.totalNumaNum == 0 || m_cpuInfo.totalCpuNum == 0) { - ereport(WARNING, (errmsg("Fail to read cpu num or numa num."))); - return; - } - - GetMcsCpuInfo(); - - m_cpuInfo.cpuArr = (int**)palloc0(sizeof(int*) * m_cpuInfo.totalNumaNum); - m_cpuInfo.cpuArrSize = (int*)palloc0(sizeof(int) * m_cpuInfo.totalNumaNum); - int cpu_per_numa = m_cpuInfo.totalCpuNum / m_cpuInfo.totalNumaNum; - for (int i = 0; i < m_cpuInfo.totalNumaNum; i++) - m_cpuInfo.cpuArr[i] = (int*)palloc0(sizeof(int) * cpu_per_numa); - - /* use lscpu to get active cpu info */ - fp = popen("lscpu -b -e=cpu,node", "r"); - + FILE* fp = popen("lscpu -b -e=cpu,node", "r"); if (fp == NULL) { ereport(WARNING, (errmsg("Unable to use 'lscpu' to read CPU info."))); return; @@ -406,30 +488,65 @@ void ThreadPoolControler::GetSysCpuInfo() int numaid = 0; /* try to read the header. */ if (fgets(buf, sizeof(buf), fp) != NULL) { - m_cpuInfo.activeCpuNum = 0; while (fgets(buf, sizeof(buf), fp) != NULL) { ptoken = strtok_r(buf, pdelimiter, &psave); - if (!IS_NULL_STR(ptoken)) + if (!IS_NULL_STR(ptoken)) { cpuid = pg_strtoint32(ptoken); - - ptoken = strtok_r(NULL, pdelimiter, &psave); - if (!IS_NULL_STR(ptoken)) - numaid = pg_strtoint32(ptoken); - - if (IsActiveCpu(cpuid, numaid)) { - m_cpuInfo.cpuArr[numaid][m_cpuInfo.cpuArrSize[numaid]] = cpuid; - m_cpuInfo.cpuArrSize[numaid]++; - m_cpuInfo.activeCpuNum++; } + ptoken = strtok_r(NULL, pdelimiter, &psave); + if (!IS_NULL_STR(ptoken)) { + numaid = pg_strtoint32(ptoken); + } + numaCpuIdList[*num].cpuId = cpuid; + numaCpuIdList[*num].numaId = numaid; + (*num)++; } } + pclose(fp); +} + +void ThreadPoolControler::GetSysCpuInfo() +{ + if (m_cpuInfo.totalNumaNum == 0 || m_cpuInfo.totalCpuNum == 0) { + ereport(WARNING, (errmsg("Fail to read cpu num or numa num."))); + return; + } + + m_cpuInfo.isMcsCpuArr = GetMcsCpuInfo(m_cpuInfo.totalCpuNum); + + m_cpuInfo.cpuArr = (int**)palloc0(sizeof(int*) * m_cpuInfo.totalNumaNum); + m_cpuInfo.cpuArrSize = (int*)palloc0(sizeof(int) * m_cpuInfo.totalNumaNum); + int cpu_per_numa = m_cpuInfo.totalCpuNum / m_cpuInfo.totalNumaNum; + for (int i = 0; i < m_cpuInfo.totalNumaNum; i++) { + m_cpuInfo.cpuArr[i] = (int*)palloc0(sizeof(int) * cpu_per_numa); + } + m_cpuInfo.activeCpuNum = 0; + NumaCpuId *sysNumaCpuIdList = (NumaCpuId*)palloc0(sizeof(NumaCpuId) * m_cpuInfo.totalCpuNum); + int sysNumaCpuIdNum = 0; + GetActiveCpu(sysNumaCpuIdList, &sysNumaCpuIdNum); + + if (sysNumaCpuIdNum == 0) { + return; + } + + for (int i = 0; i < sysNumaCpuIdNum; ++i) { + int cpuid = sysNumaCpuIdList[i].cpuId; + int numaid = sysNumaCpuIdList[i].numaId; + if (IsActiveCpu(cpuid, numaid)) { + m_cpuInfo.cpuArr[numaid][m_cpuInfo.cpuArrSize[numaid]] = cpuid; + m_cpuInfo.cpuArrSize[numaid]++; + m_cpuInfo.activeCpuNum++; + } + + } + + pfree_ext(sysNumaCpuIdList); + for (int i = 0; i < m_cpuInfo.totalNumaNum; i++) { if (m_cpuInfo.cpuArrSize[i] > 0) m_cpuInfo.activeNumaNum++; } - - pclose(fp); } void ThreadPoolControler::InitCpuInfo() @@ -447,7 +564,7 @@ void ThreadPoolControler::InitCpuInfo() m_cpuInfo.isMcsCpuArr = NULL; } -void ThreadPoolControler::GetCpuAndNumaNum() +void ThreadPoolControler::GetCpuAndNumaNum(int32 *totalCpuNum, int32 *totalNumaNum) { char buf[BUFSIZE]; @@ -459,10 +576,10 @@ void ThreadPoolControler::GetCpuAndNumaNum() strncmp("On-line CPU(s) list", buf, strlen("On-line CPU(s) list")) != 0 && strncmp("NUMA node", buf, strlen("NUMA node")) != 0) { char* loc = strchr(buf, ':'); - m_cpuInfo.totalCpuNum = pg_strtoint32(loc + 1); + *totalCpuNum = pg_strtoint32(loc + 1); } else if (strncmp("NUMA node(s)", buf, strlen("NUMA node(s)")) == 0) { char* loc = strchr(buf, ':'); - m_cpuInfo.totalNumaNum = pg_strtoint32(loc + 1); + *totalNumaNum = pg_strtoint32(loc + 1); } } pclose(fp); @@ -479,6 +596,8 @@ bool ThreadPoolControler::IsActiveCpu(int cpuid, int numaid) return (m_cpuInfo.isBindNumaArr[numaid] && m_cpuInfo.isMcsCpuArr[cpuid] && CPU_ISSET(cpuid, &m_cpuset)); case CPU_BIND: return (m_cpuInfo.isBindCpuArr[cpuid] && m_cpuInfo.isMcsCpuArr[cpuid] && CPU_ISSET(cpuid, &m_cpuset)); + case NUMA_BIND: + return (m_cpuInfo.isBindCpuNumaArr[cpuid] && m_cpuInfo.isMcsCpuArr[cpuid] && CPU_ISSET(cpuid, &m_cpuset)); } return false; } @@ -501,6 +620,11 @@ bool ThreadPoolControler::CheckCpuBind() const return true; } +bool ThreadPoolControler::CheckCpuNumaBind() const +{ + return m_cpuInfo.bindType == NUMA_BIND; +} + bool ThreadPoolControler::CheckNumaDistribute(int numaNodeNum) const { if (m_cpuInfo.bindType == NO_CPU_BIND) { @@ -535,6 +659,12 @@ CPUBindType ThreadPoolControler::GetCpuBindType() const return m_cpuInfo.bindType; } +void ThreadPoolControler::SetStreamInfo() +{ + m_streamProcRatio = m_stream_attr.procRatio; + m_maxStreamPoolSize = Min(m_stream_attr.threadNum, m_threadNum); +} + void ThreadPoolControler::SetGroupAndThreadNum() { if (m_attr.groupNum == 0) { diff --git a/src/gausskernel/process/threadpool/threadpool_group.cpp b/src/gausskernel/process/threadpool/threadpool_group.cpp index aa12affe3..59d0203fd 100644 --- a/src/gausskernel/process/threadpool/threadpool_group.cpp +++ b/src/gausskernel/process/threadpool/threadpool_group.cpp @@ -57,7 +57,7 @@ status == STATE_WAIT_XACTSYNC) ThreadPoolGroup::ThreadPoolGroup(int maxWorkerNum, int expectWorkerNum, int maxStreamNum, - int groupId, int numaId, int cpuNum, int* cpuArr) + int groupId, int numaId, int cpuNum, int* cpuArr, bool enableBindCpuNuma) : m_listener(NULL), m_maxWorkerNum(maxWorkerNum), m_maxStreamNum(maxStreamNum), @@ -78,11 +78,13 @@ ThreadPoolGroup::ThreadPoolGroup(int maxWorkerNum, int expectWorkerNum, int maxS m_groupCpuNum(cpuNum), m_groupCpuArr(cpuArr), m_enableNumaDistribute(false), + m_enableBindCpuNuma(enableBindCpuNuma), m_workers(NULL), m_context(NULL) { pthread_mutex_init(&m_mutex, NULL); CPU_ZERO(&m_nodeCpuSet); + CPU_ZERO(&m_CpuNumaSet); m_streams = NULL; m_freeStreamList = NULL; @@ -114,6 +116,12 @@ void ThreadPoolGroup::Init(bool enableNumaDistribute) m_listener = New(CurrentMemoryContext) ThreadPoolListener(this); m_listener->StartUp(); + if (m_enableBindCpuNuma) { + for (int i = 0; i < m_groupCpuNum; i++) { + CPU_SET(m_groupCpuArr[i], &m_CpuNumaSet); + } + } + InitWorkerSentry(); InitStreamSentry(); @@ -158,6 +166,8 @@ void ThreadPoolGroup::AddWorker(int i) if (m_groupCpuArr) { if (m_enableNumaDistribute) { AttachThreadToNodeLevel(m_workers[i].worker->GetThreadId()); + } else if (m_enableBindCpuNuma) { + AttachThreadToCpuNuma(m_workers[i].worker->GetThreadId()); } else { AttachThreadToCPU(m_workers[i].worker->GetThreadId(), m_groupCpuArr[i % m_groupCpuNum]); } @@ -402,6 +412,14 @@ void ThreadPoolGroup::AttachThreadToNodeLevel(ThreadId thread) const ereport(WARNING, (errmsg("Fail to attach thread %lu to numa node %d", thread, m_numaId))); } +void ThreadPoolGroup::AttachThreadToCpuNuma(ThreadId thread) +{ + int ret = pthread_setaffinity_np(thread, sizeof(cpu_set_t), &m_CpuNumaSet); + if (ret != 0) { + ereport(WARNING, (errmsg("Fail to attach thread %lu to CPU NUMA", thread))); + } +} + void ThreadPoolGroup::InitStreamSentry() { m_streams = (ThreadStreamSentry*)palloc0_noexcept(sizeof(ThreadStreamSentry) * m_maxStreamNum); diff --git a/src/gausskernel/process/threadpool/threadpool_listener.cpp b/src/gausskernel/process/threadpool/threadpool_listener.cpp index 9e526ca8d..e3caab1a0 100644 --- a/src/gausskernel/process/threadpool/threadpool_listener.cpp +++ b/src/gausskernel/process/threadpool/threadpool_listener.cpp @@ -49,9 +49,12 @@ #include #include "communication/commproxy_interface.h" +#include "executor/executor.h" +#include "utils/knl_catcache.h" #define INVALID_FD (-1) + static void TpoolListenerLoop(ThreadPoolListener* listener); static void ListenerSIGUSR1Handler(SIGNAL_ARGS) @@ -61,7 +64,7 @@ static void ListenerSIGUSR1Handler(SIGNAL_ARGS) static void ListenerSIGKILLHandler(SIGNAL_ARGS) { - proc_exit(0); + t_thrd.threadpool_cxt.listener->m_getKilled = true; } void TpoolListenerMain(ThreadPoolListener* listener) @@ -109,9 +112,31 @@ ThreadPoolListener::ThreadPoolListener(ThreadPoolGroup* group) m_epollFd = INVALID_FD; m_epollEvents = NULL; m_reaperAllSession = false; + m_getKilled = false; m_freeWorkerList = New(CurrentMemoryContext) DllistWithLock(); m_readySessionList = New(CurrentMemoryContext) DllistWithLock(); m_idleSessionList = New(CurrentMemoryContext) DllistWithLock(); + + if (EnableLocalSysCache()) { + /* see HASH_INDEX, Since the hash table must contain a power-of-2 number of elements */ +#ifdef ENABLE_LITE_MODE + m_session_nbucket = 128; +#else + m_session_nbucket = MAX_THREAD_POOL_SIZE; +#endif + m_session_bucket = (Dllist*)palloc0(m_session_nbucket * sizeof(Dllist)); + m_session_rw_locks = (pthread_rwlock_t *)palloc0(m_session_nbucket * sizeof(pthread_rwlock_t)); + for (int i = 0; i < m_session_nbucket; i++) { + PthreadRwLockInit(&m_session_rw_locks[i], NULL); + } + m_match_search = 0; + + } else { + m_session_nbucket = 0; + m_session_bucket = NULL; + m_session_rw_locks = NULL; + m_match_search = 0; + } } ThreadPoolListener::~ThreadPoolListener() @@ -126,6 +151,13 @@ ThreadPoolListener::~ThreadPoolListener() m_freeWorkerList = NULL; m_readySessionList = NULL; m_idleSessionList = NULL; + + if (EnableLocalSysCache()) { + pfree_ext(m_session_bucket); + pfree_ext(m_session_rw_locks); + } + m_session_nbucket = 0; + m_session_bucket = NULL; } int ThreadPoolListener::StartUp() @@ -211,14 +243,27 @@ void ThreadPoolListener::AddEpoll(knl_session_context* session) bool ThreadPoolListener::TryFeedWorker(ThreadPoolWorker* worker) { - Dlelem* sc = m_readySessionList->RemoveHead(); + Dlelem* sc = GetReadySession(worker); if (sc != NULL) { worker->SetSession((knl_session_context*)sc->dle_val); pg_atomic_fetch_sub_u32((volatile uint32*)&m_group->m_waitServeSessionCount, 1); pg_atomic_fetch_add_u32((volatile uint32*)&m_group->m_processTaskCount, 1); return true; } else { - m_freeWorkerList->AddTail(&worker->m_elem); + if (EnableLocalSysCache()) { +#ifdef ENABLE_LITE_MODE + LocalSysDBCache *lsc = worker->GetThreadContextPtr()->lsc_cxt.lsc; + if (lsc == NULL || lsc->my_database_id == InvalidOid) { + m_freeWorkerList->AddTail(&worker->m_elem); + } else { + m_freeWorkerList->AddHead(&worker->m_elem); + } +#else + m_freeWorkerList->AddHead(&worker->m_elem); +#endif + } else { + m_freeWorkerList->AddTail(&worker->m_elem); + } pg_atomic_fetch_add_u32((volatile uint32*)&m_group->m_idleWorkerNum, 1); return false; } @@ -301,7 +346,11 @@ void ThreadPoolListener::WaitTask() int nevents = 0; while (true) { - if (m_reaperAllSession) { + if (unlikely(m_getKilled)) { + m_getKilled = false; + proc_exit(0); + } + if (unlikely(m_reaperAllSession)) { ReaperAllSession(); } @@ -378,7 +427,7 @@ void ThreadPoolListener::DispatchSession(knl_session_context* session) return; } while (true) { - Dlelem* sc = m_freeWorkerList->RemoveHead(); + Dlelem* sc = GetFreeWorker(session); if (sc != NULL) { ereport(DEBUG2, (errmodule(MOD_THREAD_POOL), @@ -396,11 +445,10 @@ void ThreadPoolListener::DispatchSession(knl_session_context* session) /* Add new session to the head so the connection request can be quickly processed. */ if (session->status == KNL_SESS_UNINIT) { - m_readySessionList->AddHead(&session->elem); + AddIdleSessionToHead(session); } else { - m_readySessionList->AddTail(&session->elem); + AddIdleSessionToTail(session); } - pg_atomic_fetch_add_u32((volatile uint32*)&m_group->m_waitServeSessionCount, 1); break; } @@ -452,3 +500,150 @@ bool ThreadPoolListener::GetSessIshang(instr_time* current_time, uint64* session return ishang; } +Dlelem *ThreadPoolListener::GetFreeWorker(knl_session_context* session) +{ + /* only lite mode need find right threadworker, + * otherwise since there are so many requests, we dont have any freeworkers. so optimization is not necessary */ +#ifdef ENABLE_LITE_MODE + if (!EnableLocalSysCache()) { + return m_freeWorkerList->RemoveHead(); + } + + /* sess is not init, we dont know how to hit the cache */ + if (session->status != KNL_SESS_ATTACH && session->status != KNL_SESS_DETACH) { + return m_freeWorkerList->RemoveTail(); + } + + if (unlikely(session->proc_cxt.MyDatabaseId == InvalidOid)) { + return m_freeWorkerList->RemoveTail(); + } + + /* for lite_mode, threadworkers are a small amount, so it is quickly to traverse the list */ + m_freeWorkerList->GetLock(); + for (Dlelem *elt = m_freeWorkerList->GetHead(); elt != NULL; elt = DLGetSucc(elt)) { + ThreadPoolWorker *worker = (ThreadPoolWorker *)DLE_VAL(elt); + LocalSysDBCache *lsc = worker->GetThreadContextPtr()->lsc_cxt.lsc; + /* uninited lsc are addtotail of the list, so when see one uninited, the follow all are uninited. just break */ + if (unlikely(lsc == NULL || lsc->my_database_id == InvalidOid)) { + break; + } + /* cache hit */ + if (likely(lsc->my_database_id == session->proc_cxt.MyDatabaseId)) { + m_freeWorkerList->Remove(elt); + m_freeWorkerList->ReleaseLock(); + return elt; + } + } + m_freeWorkerList->ReleaseLock(); + /* dont find, use tail instead head, because head of the list has syscache of other db */ + return m_freeWorkerList->RemoveTail(); +#else + return m_freeWorkerList->RemoveHead(); +#endif +} + +static Dlelem *GetHeadUnInitSession(DllistWithLock* m_readySessionList) +{ + /* uninit session needs be replied first */ + m_readySessionList->GetLock(); + Dlelem *head = m_readySessionList->GetHead(); + if (likely(head != NULL)) { + if (((knl_session_context *)DLE_VAL(head))->status != KNL_SESS_UNINIT) { + /* go cache hit branch, set it null */ + head = NULL; + } else { + head = m_readySessionList->RemoveHeadNoLock(); + } + } + m_readySessionList->ReleaseLock(); + Assert(head == NULL || ((knl_session_context *)DLE_VAL(head))->status == KNL_SESS_UNINIT); + return head; +} + +Dlelem *ThreadPoolListener::GetSessFromReadySessionList(ThreadPoolWorker *worker) +{ + Assert(EnableLocalSysCache()); + Dlelem *elt = GetHeadUnInitSession(m_readySessionList); + if (elt != NULL) { + return elt; + } + do { + m_match_search++; + if (unlikely(m_match_search > MATCH_SEARCH_THRESHOLD)) { + m_match_search = 0; + break; + } + LocalSysDBCache *lsc = worker->GetThreadContextPtr()->lsc_cxt.lsc; + // worker not init, any session is matched + if (unlikely(lsc == NULL || lsc->my_database_id == InvalidOid)) { + break; + } + // now we try to reuse workers syscache + Index hash_index = HASH_INDEX(lsc->my_database_id, (uint32)m_session_nbucket); + ResourceOwner owner = LOCAL_SYSDB_RESOWNER; + PthreadRWlockRdlock(owner, &m_session_rw_locks[hash_index]); + elt = DLGetHead(&m_session_bucket[hash_index]); + if (elt == NULL || ((knl_session_context *)DLE_VAL(elt))->proc_cxt.MyDatabaseId != lsc->my_database_id) { + PthreadRWlockUnlock(owner, &m_session_rw_locks[hash_index]); + break; + } + if (!m_readySessionList->RemoveConfirm(&((knl_session_context *)DLE_VAL(elt))->elem)) { + // someone remove it already + PthreadRWlockUnlock(owner, &m_session_rw_locks[hash_index]); + break; + } + PthreadRWlockUnlock(owner, &m_session_rw_locks[hash_index]); + + return &((knl_session_context *)DLE_VAL(elt))->elem; + } while (0); + + elt = m_readySessionList->RemoveHead(); + return elt; +} + +Dlelem *ThreadPoolListener::GetReadySession(ThreadPoolWorker *worker) +{ + if (!EnableLocalSysCache()) { + return m_readySessionList->RemoveHead(); + } + Dlelem *elt = GetSessFromReadySessionList(worker); + if (elt == NULL) { + return NULL; + } + knl_session_context *session = (knl_session_context *)DLE_VAL(elt); + Oid cur_dbid = session->proc_cxt.MyDatabaseId; + Index hash_index = HASH_INDEX(cur_dbid, (uint32)m_session_nbucket); + ResourceOwner owner = LOCAL_SYSDB_RESOWNER; + PthreadRWlockWrlock(owner, &m_session_rw_locks[hash_index]); + DLRemove(&session->elem2); + PthreadRWlockUnlock(owner, &m_session_rw_locks[hash_index]); + return elt; +} + +void ThreadPoolListener::AddIdleSessionToTail(knl_session_context* session) +{ + if (!EnableLocalSysCache()) { + m_readySessionList->AddTail(&session->elem); + return; + } + Index hash_index = HASH_INDEX(session->proc_cxt.MyDatabaseId, (uint32)m_session_nbucket); + ResourceOwner owner = LOCAL_SYSDB_RESOWNER; + PthreadRWlockWrlock(owner, &m_session_rw_locks[hash_index]); + DLAddTail(&m_session_bucket[hash_index], &session->elem2); + PthreadRWlockUnlock(owner, &m_session_rw_locks[hash_index]); + m_readySessionList->AddTail(&session->elem); +} + +void ThreadPoolListener::AddIdleSessionToHead(knl_session_context* session) +{ + if (!EnableLocalSysCache()) { + m_readySessionList->AddHead(&session->elem); + return; + } + Index hash_index = HASH_INDEX(session->proc_cxt.MyDatabaseId, (uint32)m_session_nbucket); + ResourceOwner owner = LOCAL_SYSDB_RESOWNER; + PthreadRWlockWrlock(owner, &m_session_rw_locks[hash_index]); + DLAddHead(&m_session_bucket[hash_index], &session->elem2); + PthreadRWlockUnlock(owner, &m_session_rw_locks[hash_index]); + m_readySessionList->AddHead(&session->elem); +} diff --git a/src/gausskernel/process/threadpool/threadpool_scheduler.cpp b/src/gausskernel/process/threadpool/threadpool_scheduler.cpp index 84ed7196e..e44aa11ca 100644 --- a/src/gausskernel/process/threadpool/threadpool_scheduler.cpp +++ b/src/gausskernel/process/threadpool/threadpool_scheduler.cpp @@ -51,7 +51,7 @@ static void SchedulerSIGKILLHandler(SIGNAL_ARGS) { - proc_exit(0); + t_thrd.threadpool_cxt.scheduler->m_getKilled = true; } void ThreadPoolScheduler::SigHupHandler() @@ -61,7 +61,7 @@ void ThreadPoolScheduler::SigHupHandler() static void reloadConfigFileIfNecessary() { - if (t_thrd.threadpool_cxt.scheduler->m_getSIGHUP) { + if (unlikely(t_thrd.threadpool_cxt.scheduler->m_getSIGHUP)) { t_thrd.threadpool_cxt.scheduler->m_getSIGHUP = false; ProcessConfigFile(PGC_SIGHUP); /* Update most_available_sync if it's modified dynamically. */ @@ -86,6 +86,10 @@ void TpoolSchedulerMain(ThreadPoolScheduler *scheduler) } while (true) { + if (unlikely(scheduler->m_getKilled)) { + scheduler->m_getKilled = false; + proc_exit(0); + } pg_usleep(SCHEDULER_TIME_UNIT); reloadConfigFileIfNecessary(); scheduler->DynamicAdjustThreadPool(); diff --git a/src/gausskernel/process/threadpool/threadpool_sessctl.cpp b/src/gausskernel/process/threadpool/threadpool_sessctl.cpp index d01e3af26..e7e5090b8 100755 --- a/src/gausskernel/process/threadpool/threadpool_sessctl.cpp +++ b/src/gausskernel/process/threadpool/threadpool_sessctl.cpp @@ -54,6 +54,10 @@ #include "executor/executor.h" #include "communication/commproxy_interface.h" +#ifdef MEMORY_CONTEXT_TRACK +#include "memory_func.h" +#endif + ThreadPoolSessControl::ThreadPoolSessControl(MemoryContext context) { @@ -466,7 +470,8 @@ void ThreadPoolSessControl::HandlePoolerReload() Dlelem* elem = DLGetHead(&m_activelist); while (elem != NULL) { ctrl= (knl_sess_control*)DLE_VAL(elem); - ctrl->sess->sig_cxt.got_PoolReload = true; + /* we have already send got_pool_reload to threads */ + ctrl->sess->sig_cxt.got_pool_reload = true; ctrl->sess->sig_cxt.cp_PoolReload = true; elem = DLGetSucc(elem); } @@ -563,6 +568,49 @@ void ThreadPoolSessControl::getSessionMemoryDetail(Tuplestorestate* tupStore, PG_END_TRY(); } +void ThreadPoolSessControl::getSessionMemoryContextInfo(const char* ctx_name, + StringInfoData* buf, knl_sess_control** sess) +{ +#ifdef MEMORY_CONTEXT_TRACK + AutoMutexLock alock(&m_sessCtrlock); + knl_sess_control* ctrl = NULL; + Dlelem* elem = NULL; + + PG_TRY(); + { + HOLD_INTERRUPTS(); + alock.lock(); + + /* collect all the Memory Context status, put in data */ + elem = DLGetHead(&m_activelist); + + while (elem != NULL) { + ctrl = (knl_sess_control*)DLE_VAL(elem); + *sess = ctrl; + if (ctrl->sess) { + (void)syscalllockAcquire(&ctrl->sess->utils_cxt.deleMemContextMutex); + gs_recursive_unshared_memory_context(ctrl->sess->top_mem_cxt, ctx_name, buf); + (void)syscalllockRelease(&ctrl->sess->utils_cxt.deleMemContextMutex); + } + elem = DLGetSucc(elem); + } + alock.unLock(); + + RESUME_INTERRUPTS(); + } + PG_CATCH(); + { + if (*sess != NULL) { + ctrl = *sess; + (void)syscalllockRelease(&ctrl->sess->utils_cxt.deleMemContextMutex); + } + alock.unLock(); + PG_RE_THROW(); + } + PG_END_TRY(); +#endif +} + knl_session_context* ThreadPoolSessControl::GetSessionByIdx(int idx) { if (IsValidCtrlIndex(idx)) { diff --git a/src/gausskernel/process/threadpool/threadpool_stream.cpp b/src/gausskernel/process/threadpool/threadpool_stream.cpp index c760644f8..d87c00fc2 100644 --- a/src/gausskernel/process/threadpool/threadpool_stream.cpp +++ b/src/gausskernel/process/threadpool/threadpool_stream.cpp @@ -137,7 +137,9 @@ void ThreadPoolStream::InitStream() InitializeGUCOptions(); /* Read in remaining GUC variables */ read_nondefault_variables(); + /* Do local initialization of file, storage and buffer managers */ + ReBuildLSC(); InitFileAccess(); smgrinit(); @@ -212,6 +214,7 @@ static void ResetStreamStatus() if (!IS_PGSTATE_TRACK_UNDEFINE) { volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; beentry->st_queryid = 0; + pgstat_report_unique_sql_id(true); beentry->st_sessionid = 0; beentry->st_parent_sessionid = 0; beentry->st_thread_level = 0; diff --git a/src/gausskernel/process/threadpool/threadpool_worker.cpp b/src/gausskernel/process/threadpool/threadpool_worker.cpp index bf81c4cff..bb7ab3aa9 100644 --- a/src/gausskernel/process/threadpool/threadpool_worker.cpp +++ b/src/gausskernel/process/threadpool/threadpool_worker.cpp @@ -86,6 +86,7 @@ ThreadPoolWorker::ThreadPoolWorker(uint idx, ThreadPoolGroup* group, pthread_mut m_cond = cond; m_waitState = STATE_WAIT_UNDEFINED; DLInitElem(&m_elem, this); + m_thrd = &t_thrd; } ThreadPoolWorker::~ThreadPoolWorker() @@ -135,10 +136,12 @@ int ThreadPoolWorker::StartUp() if (m_tid == InvalidTid) { ReleasePostmasterChildSlot(bn->child_slot); bn->pid = 0; + bn->role = (knl_thread_role)0; return STATUS_ERROR; } bn->pid = m_tid; + bn->role = THREADPOOL_WORKER; Assert(bn->child_slot != 0); AddBackend(bn); @@ -213,6 +216,8 @@ void ThreadPoolWorker::WaitMission() if (AmIProxyModeSockfd(m_currentSession->proc_cxt.MyProcPort->sock)) { g_comm_controller->SetCommSockActive(m_currentSession->proc_cxt.MyProcPort->sock, m_idx); } + + Assert(CheckMyDatabaseMatch()); break; } @@ -367,6 +372,9 @@ void ThreadPoolWorker::SetSessionInfo() void ThreadPoolWorker::WaitNextSession() { + if (EnableLocalSysCache()) { + g_instance.global_sysdbcache.GSCMemThresholdCheck(); + } /* Return worker to pool unless we can get a task right now. */ ThreadPoolListener* lsn = m_group->GetListener(); Assert(lsn != NULL); @@ -510,12 +518,13 @@ void ThreadPoolWorker::DetachSessionFromThread() /* If some error occur at session initialization, we need to close it. */ if (m_currentSession->status == KNL_SESS_UNINIT) { m_currentSession->status = KNL_SESS_CLOSERAW; + /* cache may be in wrong stat, rebuild is ok */ + ReBuildLSC(); CleanUpSession(false); m_currentSession = NULL; u_sess = NULL; return; } - m_currentSession->status = KNL_SESS_DETACH; if (t_thrd.pgxact != NULL && m_currentSession->proc_cxt.Isredisworker) { LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); @@ -579,6 +588,7 @@ bool ThreadPoolWorker::AttachSessionToThread() register_backend_version(t_thrd.proc->workingVersionNum); } m_currentSession->status = KNL_SESS_ATTACH; + Assert(CheckMyDatabaseMatch()); } else { m_currentSession->status = KNL_SESS_CLOSE; /* clean up mess. */ @@ -591,8 +601,27 @@ bool ThreadPoolWorker::AttachSessionToThread() } break; case KNL_SESS_DETACH: { +#ifdef ENABLE_LITE_MODE + char thr_name[16]; + int rcs = 0; + Port *port = m_currentSession->proc_cxt.MyProcPort; + + if (t_thrd.role == WORKER) { + rcs = snprintf_truncated_s(thr_name, sizeof(thr_name), "w:%s", port->user_name); + securec_check_ss(rcs, "\0", "\0"); + (void)pthread_setname_np(gs_thread_self(), thr_name); + } else if (t_thrd.role == THREADPOOL_WORKER) { + rcs = snprintf_truncated_s(thr_name, sizeof(thr_name), "tw:%s", port->user_name); + securec_check_ss(rcs, "\0", "\0"); + (void)pthread_setname_np(gs_thread_self(), thr_name); + } +#endif pgstat_initialize_session(); pgstat_couple_decouple_session(true); + /* Postgres init thread syscache. */ + t_thrd.proc_cxt.PostInit->InitLoadLocalSysCache(u_sess->proc_cxt.MyDatabaseId, + u_sess->proc_cxt.MyProcPort->database_name); + Assert(CheckMyDatabaseMatch()); m_currentSession->status = KNL_SESS_ATTACH; } break; @@ -608,6 +637,11 @@ bool ThreadPoolWorker::AttachSessionToThread() CleanUpSession(false); m_currentSession = NULL; u_sess = NULL; +#ifdef ENABLE_LITE_MODE + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->LocalSysDBCacheReSet(); + } +#endif } break; default: @@ -684,18 +718,14 @@ void ThreadPoolWorker::CleanUpSession(bool threadexit) } /* clear pgstat slot */ + pgstat_release_session_memory_entry(); pgstat_deinitialize_session(); pgstat_beshutdown_session(m_currentSession->session_ctr_index); localeconv_deinitialize_session(); /* clean gpc refcount and plancache in shared memory */ - if (!t_thrd.proc_cxt.proc_exit_inprogress) { - if (ENABLE_DN_GPC) - CleanSessGPCPtr(m_currentSession); - if (u_sess->pcache_cxt.unnamed_stmt_psrc && u_sess->pcache_cxt.unnamed_stmt_psrc->gpc.status.InShareTable()) - u_sess->pcache_cxt.unnamed_stmt_psrc->gpc.status.SubRefCount(); - CNGPCCleanUpSession(); - } + if (ENABLE_DN_GPC) + CleanSessGPCPtr(m_currentSession); /* * clear invalid msg slot @@ -810,6 +840,7 @@ static bool InitSession(knl_session_context* session) char* username = session->proc_cxt.MyProcPort->user_name; t_thrd.proc_cxt.PostInit->SetDatabaseAndUser(dbname, InvalidOid, username); t_thrd.proc_cxt.PostInit->InitSession(); + Assert(CheckMyDatabaseMatch()); SetProcessingMode(NormalProcessing); diff --git a/src/gausskernel/runtime/CMakeLists.txt b/src/gausskernel/runtime/CMakeLists.txt index 81d5e812e..c2159d6df 100755 --- a/src/gausskernel/runtime/CMakeLists.txt +++ b/src/gausskernel/runtime/CMakeLists.txt @@ -7,13 +7,18 @@ set(CMAKE_SKIP_RPATH TRUE) INCLUDE_DIRECTORIES(${LIBTHRIFT_INCLUDE_PATH} ${SNAPPY_INCLUDE_PATH} ${CJSON_INCLUDE_PATH} ${BOOST_INCLUDE_PATH}) set(CMAKE_MODULE_PATH - ${CMAKE_CURRENT_SOURCE_DIR}/codegen ${CMAKE_CURRENT_SOURCE_DIR}/executor ${CMAKE_CURRENT_SOURCE_DIR}/opfusion ${CMAKE_CURRENT_SOURCE_DIR}/vecexecutor ) -add_subdirectory(codegen) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND CMAKE_MODULE_PATH + ${CMAKE_CURRENT_SOURCE_DIR}/codegen + ) + add_subdirectory(codegen) +endif() + add_subdirectory(executor) add_subdirectory(opfusion) add_subdirectory(vecexecutor) diff --git a/src/gausskernel/runtime/Makefile b/src/gausskernel/runtime/Makefile index 056b17e17..20138113e 100644 --- a/src/gausskernel/runtime/Makefile +++ b/src/gausskernel/runtime/Makefile @@ -8,9 +8,9 @@ subdir = src/gausskernel/runtime top_builddir = ../../.. include $(top_builddir)/src/Makefile.global -SUBDIRS = executor opfusion vecexecutor +SUBDIRS = executor opfusion vecexecutor -ifeq ($(enable_llvm), yes) +ifneq ($(enable_lite_mode), yes) SUBDIRS += codegen endif diff --git a/src/gausskernel/runtime/codegen/CMakeLists.txt b/src/gausskernel/runtime/codegen/CMakeLists.txt index a4b3f3a1b..bdba85708 100755 --- a/src/gausskernel/runtime/codegen/CMakeLists.txt +++ b/src/gausskernel/runtime/codegen/CMakeLists.txt @@ -3,7 +3,9 @@ set(TGT_codegen_SRC ${CMAKE_CURRENT_SOURCE_DIR}/gscodegen.cpp) set(TGT_codegen_INC ${PROJECT_SRC_DIR}/include +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") ${LIBLLVM_INCLUDE_PATH} +endif() ) set(codegen_DEF_OPTIONS ${MACRO_OPTIONS} -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS) diff --git a/src/gausskernel/runtime/codegen/codegenutil/numericcodegen.cpp b/src/gausskernel/runtime/codegen/codegenutil/numericcodegen.cpp index 8b8db3b54..254d0fae5 100644 --- a/src/gausskernel/runtime/codegen/codegenutil/numericcodegen.cpp +++ b/src/gausskernel/runtime/codegen/codegenutil/numericcodegen.cpp @@ -2073,19 +2073,21 @@ llvm::Value* WrapmakeNumeric64CodeGen(GsCodeGen::LlvmBuilder* ptrbuilder, llvm:: DEFINE_CG_TYPE(int8Type, CHAROID); DEFINE_CG_TYPE(int64Type, INT8OID); + DEFINE_CGVAR_INT64(int64_0, 0); llvm::Function* jitted_make64num = llvmCodeGen->module()->getFunction("LLVMWrapMakeNumeric64"); if (jitted_make64num == NULL) { GsCodeGen::FnPrototype fn_prototype(llvmCodeGen, "LLVMWrapMakeNumeric64", int64Type); fn_prototype.addArgument(GsCodeGen::NamedVariable("value", int64Type)); fn_prototype.addArgument(GsCodeGen::NamedVariable("scale", int8Type)); + fn_prototype.addArgument(GsCodeGen::NamedVariable("arr", int64Type)); jitted_make64num = fn_prototype.generatePrototype(NULL, NULL); llvm::sys::DynamicLibrary::AddSymbol("LLVMWrapMakeNumeric64", (void*)makeNumeric64); } llvmCodeGen->FinalizeFunction(jitted_make64num); - result = ptrbuilder->CreateCall(jitted_make64num, {data, scale}); + result = ptrbuilder->CreateCall(jitted_make64num, {data, scale, int64_0}); return result; } @@ -2103,19 +2105,21 @@ llvm::Value* WrapmakeNumeric128CodeGen(GsCodeGen::LlvmBuilder* ptrbuilder, llvm: DEFINE_CG_TYPE(int8Type, CHAROID); DEFINE_CG_TYPE(int64Type, INT8OID); DEFINE_CG_NINTTYP(int128Type, 128); + DEFINE_CGVAR_INT64(int64_0, 0); llvm::Function* jitted_make128num = llvmCodeGen->module()->getFunction("LLVMWrapMakeNumeric128"); if (jitted_make128num == NULL) { GsCodeGen::FnPrototype fn_prototype(llvmCodeGen, "LLVMWrapMakeNumeric128", int64Type); fn_prototype.addArgument(GsCodeGen::NamedVariable("value", int128Type)); fn_prototype.addArgument(GsCodeGen::NamedVariable("scale", int8Type)); + fn_prototype.addArgument(GsCodeGen::NamedVariable("arr", int64Type)); jitted_make128num = fn_prototype.generatePrototype(NULL, NULL); llvm::sys::DynamicLibrary::AddSymbol("LLVMWrapMakeNumeric128", (void*)makeNumeric128); } llvmCodeGen->FinalizeFunction(jitted_make128num); - result = ptrbuilder->CreateCall(jitted_make128num, {data, scale}); + result = ptrbuilder->CreateCall(jitted_make128num, {data, scale, int64_0}); return result; } diff --git a/src/gausskernel/runtime/codegen/gscodegen.cpp b/src/gausskernel/runtime/codegen/gscodegen.cpp index 146acd3a9..7e7b250fe 100644 --- a/src/gausskernel/runtime/codegen/gscodegen.cpp +++ b/src/gausskernel/runtime/codegen/gscodegen.cpp @@ -24,6 +24,7 @@ #include "codegen/gscodegen.h" #include +#ifdef ENABLE_LLVM_COMPILE #include "llvm/ADT/Triple.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/Analysis/InstructionSimplify.h" @@ -50,6 +51,7 @@ #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm-c/Core.h" +#endif #include "pgxc/pgxc.h" #include "utils/memutils.h" @@ -552,7 +554,11 @@ PointerType* GsCodeGen::getPtrType(Oid TypeID) Type* GsCodeGen::getType(const char* name) { Assert(NULL != m_currentModule && NULL != name); +#if LLVM_MAJOR_VERSION == 12 + return StructType::getTypeByName(context(), StringRef(name, strlen(name))); +#else return m_currentModule->getTypeByName(StringRef(name, strlen(name))); +#endif } PointerType* GsCodeGen::getPtrType(const char* name) @@ -983,7 +989,6 @@ void CodeGenProcessInitialize() } } - /** * @Description : Clean up LLVM enviroment resource * before exit postmaster. diff --git a/src/gausskernel/runtime/executor/Makefile b/src/gausskernel/runtime/executor/Makefile index 0cc592cf1..1c240fbcc 100644 --- a/src/gausskernel/runtime/executor/Makefile +++ b/src/gausskernel/runtime/executor/Makefile @@ -47,7 +47,7 @@ OBJS = execAmi.o execCurrent.o execGrouping.o execJunk.o execMain.o \ nodeGroup.o nodeSubplan.o nodeSubqueryscan.o nodeTidscan.o \ nodeForeignscan.o nodeWindowAgg.o tstoreReceiver.o spi.o \ nodePartIterator.o nodeStub.o execClusterResize.o lightProxy.o execMerge.o \ - nodeExtensible.o route.o nodeGD.o nodeKMeans.o spiDbesql.o + nodeExtensible.o route.o nodeTrainModel.o db4ai_common.o spiDbesql.o override CPPFLAGS += -D__STDC_FORMAT_MACROS diff --git a/src/gausskernel/runtime/executor/db4ai_common.cpp b/src/gausskernel/runtime/executor/db4ai_common.cpp new file mode 100644 index 000000000..4b1baca96 --- /dev/null +++ b/src/gausskernel/runtime/executor/db4ai_common.cpp @@ -0,0 +1,184 @@ +/* +* Copyright (c) 2020 Huawei Technologies Co.,Ltd. +* +* openGauss is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*--------------------------------------------------------------------------------------- +* +* db4ai_common.cpp +* Implementation of Public Methods of DB4AI +* +* IDENTIFICATION +* src/gausskernel/runtime/executor/db4ai_common.cpp +* +* --------------------------------------------------------------------------------------- +*/ + +#include "db4ai/db4ai_common.h" + +FORCE_INLINE +uint64_t time_diff(struct timespec *time_p1, struct timespec *time_p2) +{ + return ((time_p1->tv_sec * 1000000000) + time_p1->tv_nsec) - ((time_p2->tv_sec * 1000000000) + time_p2->tv_nsec); +} + +FORCE_INLINE +double interval_to_sec(double time_interval) +{ + return time_interval / 1000000000.0; +} + +FORCE_INLINE +double interval_to_msec(double time_interval) +{ + return time_interval / 1000000.0; +} + + +Datum float8_get_datum(Oid type, float8 value) +{ + Datum datum = 0; + switch (type) { + case BOOLOID: + datum = BoolGetDatum(value != 0.0); + break; + case INT1OID: + datum = Int8GetDatum(value); + break; + case INT2OID: + datum = Int16GetDatum(value); + break; + case INT4OID: + datum = Int32GetDatum(value); + break; + case INT8OID: + datum = Int64GetDatum(value); + break; + case FLOAT4OID: + datum = Float4GetDatum(value); + break; + case FLOAT8OID: + datum = Float8GetDatum(value); + break; + case NUMERICOID: + datum = DirectFunctionCall1(float8_numeric, Float8GetDatum(value)); + break; + default: + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Oid type %u not yet supported", type))); + break; + } + return datum; +} + +float8 datum_get_float8(Oid type, Datum datum) +{ + float8 value = 0; + switch (type) { + case BOOLOID: + value = DatumGetBool(datum) ? 1.0 : 0.0; + break; + case INT1OID: + value = DatumGetInt8(datum); + break; + case INT2OID: + value = DatumGetInt16(datum); + break; + case INT4OID: + value = DatumGetInt32(datum); + break; + case INT8OID: + value = DatumGetInt64(datum); + break; + case FLOAT4OID: + value = DatumGetFloat4(datum); + break; + case FLOAT8OID: + value = DatumGetFloat8(datum); + break; + case NUMERICOID: + value = DatumGetFloat8(DirectFunctionCall1(numeric_float8_no_overflow, datum)); + break; + default: + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Oid type %u not yet supported", type))); + break; + } + return value; +} + +int32 datum_get_int(Oid type, Datum datum) +{ + int32 value = 0; + switch (type) { + case BOOLOID: + value = DatumGetBool(datum) ? 1 : 0; + break; + case INT1OID: + value = DatumGetInt8(datum); + break; + case INT2OID: + value = DatumGetInt16(datum); + break; + case INT4OID: + value = DatumGetInt32(datum); + break; + case INT8OID: + value = DatumGetInt64(datum); + break; + case FLOAT4OID: + value = DatumGetFloat4(datum); + break; + case FLOAT8OID: + value = DatumGetFloat8(datum); + break; + default: + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Oid type %u not yet supported", type))); + break; + } + return value; +} + +Datum string_to_datum(const char *str, Oid datatype) +{ + switch (datatype) { + case BOOLOID: + return DirectFunctionCall1(boolin, CStringGetDatum(str)); + case INT1OID: + case INT2OID: + case INT4OID: + return Int32GetDatum(atoi(str)); + case INT8OID: + return Int64GetDatum(atoi(str)); + case VARCHAROID: + case BPCHAROID: + case CHAROID: + case TEXTOID: + return CStringGetTextDatum(str); + case FLOAT4OID: + case FLOAT8OID: + return DirectFunctionCall1(float8in, CStringGetDatum(str)); + case CSTRINGOID: + return CStringGetDatum(str); + default: + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("The type is not supported: %u", datatype))); + return CStringGetTextDatum(str); + } +} + +void check_hyper_bounds(unsigned int num_x, unsigned int num_y, const char *hyper) +{ + if (unlikely(UINT32_MAX / num_x < num_y)) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Number multiplication is out of bounds. Hyperparemeter: %s.", hyper))); +} diff --git a/src/gausskernel/runtime/executor/execClusterResize.cpp b/src/gausskernel/runtime/executor/execClusterResize.cpp index d5688ab53..0dc5b3f2b 100644 --- a/src/gausskernel/runtime/executor/execClusterResize.cpp +++ b/src/gausskernel/runtime/executor/execClusterResize.cpp @@ -156,15 +156,6 @@ void RecordDeletedTuple(Oid relid, int2 bucketid, const ItemPointer tupleid, con tableam_tops_free_tuple(tup); } -/* - * - Brief: get and open delete_delta rel - * - Parameter: - * @rel: target relation of UPDATE/DELETE/TRUNCATE operation - * @lockmode: lock mode - * @isMultiCatchup: multi catchup delta or not - * - Return: - * delete_delta rel - */ /* * - Brief: Determine if the relation is under cluster resizing operation * - Parameter: @@ -203,6 +194,25 @@ bool RelationInClusterResizingReadOnly(const Relation rel) return false; } +/* + * - Brief: Determine if the relation is under cluster resizing read only operation + * - Parameter: + * @rel: relation that needs to check + * - Return: + * @TRUE: relation is under cluster resizing endcatchup(write error) + * @FALSE: relation is not under cluster resizing endcatchup(write error) + */ +bool RelationInClusterResizingEndCatchup(const Relation rel) +{ + Assert(rel != NULL); + + /* Check relation's append_mode status */ + if (!IsInitdb && RelationInRedistributeEndCatchup(rel)) + return true; + + return false; +} + /* * @Description: check whether relation is in redistribution though range variable. * @in range_var: range variable which stored relation info. @@ -370,6 +380,15 @@ static inline void RelationGetDeleteDeltaTableName(Relation rel, char* delete_de return; } +/* + * - Brief: get and open delete_delta rel + * - Parameter: + * @rel: target relation of UPDATE/DELETE/TRUNCATE operation + * @lockmode: lock mode + * @isMultiCatchup: multi catchup delta or not + * - Return: + * delete_delta rel + */ Relation GetAndOpenDeleteDeltaRel(const Relation rel, LOCKMODE lockmode, bool isMultiCatchup) { Relation deldelta_rel; @@ -1201,3 +1220,18 @@ void RelationGetNewTableName(Relation rel, char* newtable_name) } return; } + +/* + * - Brief: Determine if the relation is under cluster resizing write error mode + * - Parameter: + * @rel: relation that needs to check + * - Return: + * @TRUE: relation is under cluster resizing write error mode + * @FALSE: relation is not under cluster resizing write error mode + */ +bool RelationInClusterResizingWriteErrorMode(const Relation rel) +{ + return RelationInClusterResizingReadOnly(rel) || + (RelationInClusterResizingEndCatchup(rel) && !pg_try_advisory_lock_for_redis(rel)); +} + diff --git a/src/gausskernel/runtime/executor/execMain.cpp b/src/gausskernel/runtime/executor/execMain.cpp index 1b51ae30c..74c9d4ce1 100755 --- a/src/gausskernel/runtime/executor/execMain.cpp +++ b/src/gausskernel/runtime/executor/execMain.cpp @@ -37,6 +37,7 @@ * * ------------------------------------------------------------------------- */ + #include "codegen/gscodegen.h" #include "postgres.h" @@ -297,9 +298,10 @@ void standard_ExecutorStart(QueryDesc *queryDesc, int eflags) } old_context = MemoryContextSwitchTo(estate->es_query_cxt); - +#ifdef ENABLE_LLVM_COMPILE /* Initialize the actual CodeGenObj */ CodeGenThreadRuntimeSetup(); +#endif /* * Fill in external parameters, if any, from queryDesc; and allocate @@ -543,6 +545,7 @@ void standard_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, long co */ old_context = MemoryContextSwitchTo(estate->es_query_cxt); +#ifdef ENABLE_LLVM_COMPILE /* * Generate machine code for this query. */ @@ -555,6 +558,7 @@ void standard_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, long co CodeGenThreadRuntimeCodeGenerate(); } } +#endif /* Allow instrumentation of Executor overall runtime */ if (queryDesc->totaltime) { @@ -767,9 +771,11 @@ void standard_ExecutorEnd(QueryDesc *queryDesc) UnregisterSnapshot(estate->es_snapshot); UnregisterSnapshot(estate->es_crosscheck_snapshot); +#ifdef ENABLE_LLVM_COMPILE if (!t_thrd.codegen_cxt.g_runningInFmgr) { CodeGenThreadTearDown(); } +#endif /* * Must switch out of context before destroying it @@ -1305,6 +1311,7 @@ void InitPlan(QueryDesc *queryDesc, int eflags) erm->rowmarkId = rc->rowmarkId; erm->markType = rc->markType; erm->noWait = rc->noWait; + erm->waitSec = rc->waitSec; erm->numAttrs = rc->numAttrs; ItemPointerSetInvalid(&(erm->curCtid)); estate->es_rowMarks = lappend(estate->es_rowMarks, erm); @@ -1459,6 +1466,10 @@ void InitPlan(QueryDesc *queryDesc, int eflags) estate->pruningResult = NULL; } + if (planstate->ps_ProjInfo) { + planstate->ps_ProjInfo->pi_topPlan = true; + } + /* * Get the tuple descriptor describing the type of tuples to return. */ @@ -2119,7 +2130,6 @@ static void ExecutePlan(EState *estate, PlanState *planstate, CmdType operation, // planstate->plan will be release if rollback excuted bool is_saved_recursive_union_plan_nodeid = EXEC_IN_RECURSIVE_MODE(planstate->plan); - /* * Loop until we've processed the proper number of tuples from the plan. */ @@ -2206,20 +2216,27 @@ static void ExecutePlan(EState *estate, PlanState *planstate, CmdType operation, slot = ExecFilterJunk(estate->es_junkFilter, slot); } +#ifdef ENABLE_MULTIPLE_NDOES if (stream_instrument) { t_thrd.pgxc_cxt.GlobalNetInstr = planstate->instrument; } - +#endif /* * If we are supposed to send the tuple somewhere, do so. (In * practice, this is probably always the case at this point.) */ - if (sendTuples && !u_sess->exec_cxt.executorStopFlag) { +#ifdef ENABLE_MULTIPLE_NDOES + if (sendTuples && !u_sess->exec_cxt.executorStopFlag) +#else + if (sendTuples) +#endif + { (*dest->receiveSlot)(slot, dest); } +#ifdef ENABLE_MULTIPLE_NDOES t_thrd.pgxc_cxt.GlobalNetInstr = NULL; - +#endif /* * Count tuples processed, if this is a SELECT. (For other operation * types, the ModifyTable plan node must count the appropriate @@ -2944,7 +2961,7 @@ HeapTuple heap_lock_updated(CommandId cid, Relation relation, int lockmode, Item * invalid if it is already committed. */ if (TransactionIdDidCommit(SnapshotDirty.xmin)) { - elog(WARNING, + elog(DEBUG2, "t_xmin %lu is committed in clog, but still" " in procarray, so set it back to invalid.", SnapshotDirty.xmin); diff --git a/src/gausskernel/runtime/executor/execProcnode.cpp b/src/gausskernel/runtime/executor/execProcnode.cpp index 3a246e2c1..fd1cf1372 100755 --- a/src/gausskernel/runtime/executor/execProcnode.cpp +++ b/src/gausskernel/runtime/executor/execProcnode.cpp @@ -163,8 +163,7 @@ #include "securec.h" #include "gstrace/gstrace_infra.h" #include "gstrace/executer_gstrace.h" -#include "executor/node/nodeGD.h" -#include "executor/node/nodeKMeans.h" +#include "executor/node/nodeTrainModel.h" #define NODENAMELEN 64 @@ -396,10 +395,8 @@ PlanState* ExecInitNodeByType(Plan* node, EState* estate, int eflags) return (PlanState*)ExecInitVecMergeJoin((VecMergeJoin*)node, estate, eflags); case T_VecWindowAgg: return (PlanState*)ExecInitVecWindowAgg((VecWindowAgg*)node, estate, eflags); - case T_GradientDescent: - return (PlanState*)ExecInitGradientDescent((GradientDescent*)node, estate, eflags); - case T_KMeans: - return (PlanState*)ExecInitKMeans((KMeans*)node, estate, eflags); + case T_TrainModel: + return (PlanState*)ExecInitTrainModel((TrainModel*)node, estate, eflags); default: ereport(ERROR, (errmodule(MOD_EXECUTOR), @@ -703,10 +700,8 @@ TupleTableSlot* ExecProcNodeByType(PlanState* node) result = ExecStream((StreamState*)node); t_thrd.pgxc_cxt.GlobalNetInstr = NULL; return result; - case T_GradientDescentState: - return ExecGradientDescent((GradientDescentState*)node); - case T_KMeansState: - return ExecKMeans((KMeansState*)node); + case T_TrainModelState: + return ExecTrainModel((TrainModelState*)node); default: ereport(ERROR, (errmodule(MOD_EXECUTOR), @@ -744,6 +739,16 @@ void ExecProcNodeInstr(PlanState* node, TupleTableSlot* result) node->state->es_last_processed = node->state->es_processed; node->instrument->firsttuple = INSTR_TIME_GET_DOUBLE(first_tuple); break; + case T_SeqScanState: + if (((SeqScanState*) node)->scanBatchMode) { + if (!TupIsNull(result)) { + /* Batch mode does not collect memory info as it takes too much CPU resources. */ + InstrStopNode(node->instrument, ((SeqScanState*)node)->scanBatchState->scanBatch.rows, false); + } else { + InstrStopNode(node->instrument, 0.0); + } + break; + } default: InstrStopNode(node->instrument, TupIsNull(result) ? 0.0 : 1.0); break; @@ -770,6 +775,11 @@ static inline TupleTableSlot *ExecResultWrap(PlanState *node) return ExecResult((ResultState*)node); }; +static inline TupleTableSlot *ExecVecToRowWrap(PlanState *node) +{ + return ExecVecToRow((VecToRowState*)node); +} + static inline TupleTableSlot *ExecModifyTableWrap(PlanState *node) { return ExecModifyTable((ModifyTableState*)node); @@ -930,6 +940,11 @@ static inline TupleTableSlot *ExecRemoteQueryWrap(PlanState *node) return ExecRemoteQuery((RemoteQueryState *)node); }; +static inline TupleTableSlot *ExecTrainModelWrap(PlanState *node) +{ + return ExecTrainModel((TrainModelState*)node); +} + static inline TupleTableSlot *ExecStreamWrap(PlanState *node) { return ExecStream((StreamState *)node); @@ -937,6 +952,7 @@ static inline TupleTableSlot *ExecStreamWrap(PlanState *node) ExecProcFuncType g_execProcFuncTable[] = { ExecResultWrap, + ExecVecToRowWrap, DefaultExecProc, ExecModifyTableWrap, ExecModifyTableWrap, @@ -976,6 +992,7 @@ ExecProcFuncType g_execProcFuncTable[] = { ExecLockRowsWrap, ExecLimitWrap, ExecRemoteQueryWrap, + ExecTrainModelWrap, ExecStreamWrap }; @@ -993,9 +1010,11 @@ TupleTableSlot* ExecProcNode(PlanState* node) MemoryContext old_context; /* Response to stop or cancel signal. */ +#ifdef ENABLE_MULTIPLE_NODES if (unlikely(executorEarlyStop())) { return NULL; } +#endif /* Switch to Node Level Memory Context */ old_context = MemoryContextSwitchTo(node->nodeContext); @@ -1008,15 +1027,15 @@ TupleTableSlot* ExecProcNode(PlanState* node) InstrStartNode(node->instrument); } +#ifdef ENABLE_MULTIPLE_NODES if (unlikely(planstate_need_stub(node))) { result = ExecProcNodeStub(node); - } else { + } else +#endif + { int index = (int)(nodeTag(node))-T_ResultState; - if (likely(index <= 0 && index <= T_StreamState - T_ResultState)) { - result = g_execProcFuncTable[index](node); - } else { - result = ExecProcNodeByType(node); - } + Assert(index >= 0 && index <= T_StreamState - T_ResultState); + result = g_execProcFuncTable[index](node); } if (node->instrument != NULL) { @@ -1599,12 +1618,8 @@ static void ExecEndNodeByType(PlanState* node) ExecEndVecWindowAgg((VecWindowAggState*)node); break; - case T_GradientDescentState: - ExecEndGradientDescent((GradientDescentState*)node); - break; - - case T_KMeansState: - ExecEndKMeans((KMeansState*)node); + case T_TrainModelState: + ExecEndTrainModel((TrainModelState*)node); break; default: diff --git a/src/gausskernel/runtime/executor/execQual.cpp b/src/gausskernel/runtime/executor/execQual.cpp index d577d6ea3..ccd880bda 100644 --- a/src/gausskernel/runtime/executor/execQual.cpp +++ b/src/gausskernel/runtime/executor/execQual.cpp @@ -72,6 +72,7 @@ #include "commands/trigger.h" #include "db4ai/gd.h" #include "catalog/pg_proc_fn.h" +#include "access/tuptoaster.h" /* static function decls */ static Datum ExecEvalArrayRef(ArrayRefExprState* astate, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone); @@ -89,6 +90,7 @@ static Datum ExecEvalWholeRowSlow( static Datum ExecEvalConst(ExprState* exprstate, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone); static Datum ExecEvalParamExec(ExprState* exprstate, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone); static Datum ExecEvalParamExtern(ExprState* exprstate, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone); +static bool isVectorEngineSupportSetFunc(Oid funcid); template static void init_fcache( Oid foid, Oid input_collation, FuncExprState* fcache, MemoryContext fcacheCxt, bool needDescForSets); @@ -149,8 +151,7 @@ static Datum ExecEvalGroupingFuncExpr( static Datum ExecEvalGroupingIdExpr( GroupingIdExprState* gstate, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone); static bool func_has_refcursor_args(Oid Funcid, FunctionCallInfoData* fcinfo); - -extern Datum ExecEvalGradientDescent(GradientDescentExprState* mlstate, ExprContext* econtext, bool* isNull, ExprDoneCond* isDone); +extern struct varlena *heap_tuple_fetch_and_copy(Relation rel, struct varlena *attr, bool needcheck); THR_LOCAL PLpgSQL_execstate* plpgsql_estate = NULL; @@ -253,13 +254,13 @@ static Datum ExecEvalArrayRef(ArrayRefExprState* astate, ExprContext* econtext, if (!isAssignment) return (Datum)NULL; } - - Oid tableOfIndexType; - bool isnestedtable = false; - HTAB* tableOfIndex = ExecEvalParamExternTableOfIndex(astate->refexpr, econtext, &tableOfIndexType, &isnestedtable); + ExecTableOfIndexInfo execTableOfIndexInfo; + initExecTableOfIndexInfo(&execTableOfIndexInfo, econtext); + ExecEvalParamExternTableOfIndex((Node*)astate->refexpr->expr, &execTableOfIndexInfo); if (u_sess->SPI_cxt.cur_tableof_index != NULL) { - u_sess->SPI_cxt.cur_tableof_index->tableOfIndexType = tableOfIndexType; - u_sess->SPI_cxt.cur_tableof_index->tableOfIndex = tableOfIndex; + u_sess->SPI_cxt.cur_tableof_index->tableOfIndexType = execTableOfIndexInfo.tableOfIndexType; + u_sess->SPI_cxt.cur_tableof_index->tableOfIndex = execTableOfIndexInfo.tableOfIndex; + u_sess->SPI_cxt.cur_tableof_index->tableOfGetNestLayer = list_length(astate->refupperindexpr); } foreach (l, astate->refupperindexpr) { @@ -269,23 +270,33 @@ static Datum ExecEvalArrayRef(ArrayRefExprState* astate, ExprContext* econtext, ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("number of array dimensions (%d) exceeds the maximum allowed (%d)", i + 1, MAXDIM))); - if (OidIsValid(tableOfIndexType) || isnestedtable) { + if (OidIsValid(execTableOfIndexInfo.tableOfIndexType) || execTableOfIndexInfo.isnestedtable) { + bool isTran = false; + PLpgSQL_execstate* old_estate = plpgsql_estate; Datum exprValue = ExecEvalExpr(eltstate, econtext, &eisnull, NULL); + plpgsql_estate = old_estate; + if (execTableOfIndexInfo.tableOfIndexType == VARCHAROID && !eisnull && VARATT_IS_1B(exprValue)) { + exprValue = transVaratt1BTo4B(exprValue); + isTran = true; + } TableOfIndexKey key; PLpgSQL_var* node = NULL; - key.exprtypeid = tableOfIndexType; + key.exprtypeid = execTableOfIndexInfo.tableOfIndexType; key.exprdatum = exprValue; - int index = getTableOfIndexByDatumValue(key, tableOfIndex, &node); - if (isnestedtable) { + int index = getTableOfIndexByDatumValue(key, execTableOfIndexInfo.tableOfIndex, &node); + if (isTran) { + pfree(DatumGetPointer(exprValue)); + } + if (execTableOfIndexInfo.isnestedtable) { /* for nested table, we should take inner table's array and skip current indx */ if (node == NULL || index == -1) { eisnull = true; } else { PLpgSQL_var* var = node; - isnestedtable = (var->nest_table != NULL); + execTableOfIndexInfo.isnestedtable = (var->nest_table != NULL); array_source = (ArrayType*)DatumGetPointer(var->value); - tableOfIndexType = var->datatype->tableOfIndexType; - tableOfIndex = var->tableOfIndex; + execTableOfIndexInfo.tableOfIndexType = var->datatype->tableOfIndexType; + execTableOfIndexInfo.tableOfIndex = var->tableOfIndex; eisnull = var->isnull; if (plpgsql_estate) plpgsql_estate->curr_nested_table_type = var->datatype->typoid; @@ -321,7 +332,7 @@ static Datum ExecEvalArrayRef(ArrayRefExprState* astate, ExprContext* econtext, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("number of array dimensions (%d) exceeds the maximum allowed (%d)", j + 1, MAXDIM))); - if (tableOfIndexType == VARCHAROID || isnestedtable) { + if (execTableOfIndexInfo.tableOfIndexType == VARCHAROID || execTableOfIndexInfo.isnestedtable) { ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("index by varchar or nested table don't support two subscripts"))); @@ -409,10 +420,6 @@ static Datum ExecEvalArrayRef(ArrayRefExprState* astate, ExprContext* econtext, econtext->caseValue_datum = save_datum; econtext->caseValue_isNull = save_isNull; - if (u_sess->SPI_cxt.cur_tableof_index != NULL) { - u_sess->SPI_cxt.cur_tableof_index->tableOfIndexType = InvalidOid; - u_sess->SPI_cxt.cur_tableof_index->tableOfIndex = NULL; - } /* * For an assignment to a fixed-length array type, both the original @@ -467,10 +474,7 @@ static Datum ExecEvalArrayRef(ArrayRefExprState* astate, ExprContext* econtext, &astate->refelemalign); } } - if (u_sess->SPI_cxt.cur_tableof_index != NULL) { - u_sess->SPI_cxt.cur_tableof_index->tableOfIndexType = InvalidOid; - u_sess->SPI_cxt.cur_tableof_index->tableOfIndex = NULL; - } + if (lIndex == NULL) { if (unlikely(i == 0)) { /* get nested table's inner table */ @@ -1057,7 +1061,11 @@ static Datum ExecEvalRownum(RownumState* exprstate, ExprContext* econtext, bool* *isDone = ExprSingleResult; *isNull = false; - return DirectFunctionCall1(int8_numeric, Int64GetDatum(exprstate->ps->ps_rownum + 1)); + if (ROWNUM_TYPE_COMPAT) { + return DirectFunctionCall1(int8_numeric, Int64GetDatum(exprstate->ps->ps_rownum + 1)); + } else { + return Int64GetDatum(exprstate->ps->ps_rownum + 1); + } } /* ---------------------------------------------------------------- @@ -1143,28 +1151,37 @@ static Datum ExecEvalParamExtern(ExprState* exprstate, ExprContext* econtext, bo return (Datum)0; /* keep compiler quiet */ } -static bool get_tableofindex_param_walker(Node* node, Param* expression) +void initExecTableOfIndexInfo(ExecTableOfIndexInfo* execTableOfIndexInfo, ExprContext* econtext) +{ + execTableOfIndexInfo->econtext = econtext; + execTableOfIndexInfo->tableOfIndex = NULL; + execTableOfIndexInfo->tableOfIndexType = InvalidOid; + execTableOfIndexInfo->isnestedtable = false; + execTableOfIndexInfo->tableOfLayers = 0; + execTableOfIndexInfo->paramid = -1; + execTableOfIndexInfo->paramtype = InvalidOid; +} + +/* this function is only used for getting table of index inout param */ +static bool get_tableofindex_param(Node* node, ExecTableOfIndexInfo* execTableOfIndexInfo) { if (node == NULL) return false; if (IsA(node, Param)) { - expression->paramid = ((Param*)node)->paramid; - expression->paramtype = ((Param*)node)->paramtype; + execTableOfIndexInfo->paramid = ((Param*)node)->paramid; + execTableOfIndexInfo->paramtype = ((Param*)node)->paramtype; return true; - } else if (IsA(node, FuncExpr)) { - FuncExpr* funcExpr = (FuncExpr*)(node); - const Oid array_function_start_oid = 7881; - const Oid array_function_end_oid = 7892; - - bool isArrayFunction = funcExpr->funcid >= array_function_start_oid && - funcExpr->funcid <= array_function_end_oid; - if (isArrayFunction) { - expression->paramid = ((Param*)linitial(funcExpr->args))->paramid; - expression->paramtype = ((Param*)linitial(funcExpr->args))->paramtype; - return true; - } } - return expression_tree_walker(node, (bool (*)())get_tableofindex_param_walker, expression); + return false; +} + +static bool IsTableOfFunc(Oid funcOid) +{ + const Oid array_function_start_oid = 7881; + const Oid array_function_end_oid = 7892; + const Oid array_indexby_delete_oid = 7896; + return (funcOid >= array_function_start_oid && funcOid <= array_function_end_oid) || + funcOid == array_indexby_delete_oid; } /* ---------------------------------------------------------------- @@ -1173,21 +1190,21 @@ static bool get_tableofindex_param_walker(Node* node, Param* expression) * Returns the value of a PARAM_EXTERN table of index and type parameter . * ---------------------------------------------------------------- */ -HTAB* ExecEvalParamExternTableOfIndex(ExprState* exprstate, ExprContext* econtext, - Oid* tableOfIndexType, bool *isnestedtable) +void ExecEvalParamExternTableOfIndex(Node* node, ExecTableOfIndexInfo* execTableOfIndexInfo) { - Param expression; - expression.paramid = -1; - get_tableofindex_param_walker((Node*)exprstate->expr, &expression); + if (get_tableofindex_param(node, execTableOfIndexInfo)) { + ExecEvalParamExternTableOfIndexById(execTableOfIndexInfo); + } +} - if (expression.paramid == -1) { - *tableOfIndexType = InvalidOid; - *isnestedtable = false; - return NULL; +bool ExecEvalParamExternTableOfIndexById(ExecTableOfIndexInfo* execTableOfIndexInfo) +{ + if (execTableOfIndexInfo->paramid == -1) { + return false; } - int thisParamId = expression.paramid; - ParamListInfo paramInfo = econtext->ecxt_param_list_info; + int thisParamId = execTableOfIndexInfo->paramid; + ParamListInfo paramInfo = execTableOfIndexInfo->econtext->ecxt_param_list_info; /* * PARAM_EXTERN parameters must be sought in ecxt_param_list_info. @@ -1199,24 +1216,24 @@ HTAB* ExecEvalParamExternTableOfIndex(ExprState* exprstate, ExprContext* econtex if (!OidIsValid(prm->ptype) && paramInfo->paramFetch != NULL) (*paramInfo->paramFetch)(paramInfo, thisParamId); - if (OidIsValid(prm->ptype)) { + if (OidIsValid(prm->ptype) && prm->tabInfo != NULL && + prm->tabInfo->tableOfIndex != NULL && OidIsValid(prm->tabInfo->tableOfIndexType)) { /* safety check in case hook did something unexpected */ - if (prm->ptype != expression.paramtype) + if (prm->ptype != execTableOfIndexInfo->paramtype) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("type of parameter %d (%s) does not match that when preparing the plan (%s)", thisParamId, format_type_be(prm->ptype), - format_type_be(expression.paramtype)))); - - *tableOfIndexType = prm->tableOfIndexType; - *isnestedtable = prm->isnestedtable; - return prm->tableOfIndex; + format_type_be(execTableOfIndexInfo->paramtype)))); + execTableOfIndexInfo->tableOfIndexType = prm->tabInfo->tableOfIndexType; + execTableOfIndexInfo->isnestedtable = prm->tabInfo->isnestedtable; + execTableOfIndexInfo->tableOfLayers = prm->tabInfo->tableOfLayers; + execTableOfIndexInfo->tableOfIndex = prm->tabInfo->tableOfIndex; + return true; } } - *isnestedtable = false; - *tableOfIndexType = InvalidOid; - return NULL; + return false; } @@ -1383,6 +1400,23 @@ static Oid getRealFuncRetype(int arg_num, Oid* actual_arg_types, FuncExprState* return rettype; } +/* + * Check whether the function is a set function supported by the vector engine. + */ +static bool isVectorEngineSupportSetFunc(Oid funcid) +{ + switch (funcid) { + case OID_REGEXP_SPLIT_TO_TABLE: // regexp_split_to_table + case OID_REGEXP_SPLIT_TO_TABLE_NO_FLAG: // regexp_split_to_table + case OID_ARRAY_UNNEST: // unnest + return true; + break; + default: + return false; + break; + } +} + /* * init_fcache - initialize a FuncExprState node during first use */ @@ -1515,8 +1549,7 @@ static void init_fcache( if (vectorized) { if (fcache->func.fn_retset == true) { - if (fcache->func.fn_oid != OID_REGEXP_SPLIT_TO_TABLE && - fcache->func.fn_oid != OID_REGEXP_SPLIT_TO_TABLE_NO_FLAG) { + if (!isVectorEngineSupportSetFunc(fcache->func.fn_oid)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmodule(MOD_EXECUTOR), @@ -1658,6 +1691,7 @@ static ExprDoneCond ExecEvalFuncArgs( i = 0; econtext->is_cursor = false; + u_sess->plsql_cxt.func_tableof_index = NIL; foreach (arg, argList) { ExprState* argstate = (ExprState*)lfirst(arg); ExprDoneCond thisArgIsDone; @@ -1665,6 +1699,20 @@ static ExprDoneCond ExecEvalFuncArgs( if (has_refcursor && argstate->resultType == REFCURSOROID) econtext->is_cursor = true; fcinfo->arg[i] = ExecEvalExpr(argstate, econtext, &fcinfo->argnull[i], &thisArgIsDone); + ExecTableOfIndexInfo execTableOfIndexInfo; + initExecTableOfIndexInfo(&execTableOfIndexInfo, econtext); + ExecEvalParamExternTableOfIndex((Node*)argstate->expr, &execTableOfIndexInfo); + if (execTableOfIndexInfo.tableOfIndex != NULL) { + MemoryContext oldCxt = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); + PLpgSQL_func_tableof_index* func_tableof = + (PLpgSQL_func_tableof_index*)palloc0(sizeof(PLpgSQL_func_tableof_index)); + func_tableof->varno = i; + func_tableof->tableOfIndexType = execTableOfIndexInfo.tableOfIndexType; + func_tableof->tableOfIndex = copyTableOfIndex(execTableOfIndexInfo.tableOfIndex); + u_sess->plsql_cxt.func_tableof_index = lappend(u_sess->plsql_cxt.func_tableof_index, func_tableof); + MemoryContextSwitchTo(oldCxt); + } + if (has_refcursor && econtext->is_cursor && plpgsql_var_dno != NULL) { plpgsql_var_dno[i] = econtext->dno; CopyCursorInfoData(&fcinfo->refcursor_data.argCursor[i], &econtext->cursor_data); @@ -1803,6 +1851,29 @@ static void tupledesc_match(TupleDesc dst_tupdesc, TupleDesc src_tupdesc) } } +static void set_result_for_plpgsql_language_function_with_outparam(FuncExprState *fcache, Datum *result, bool *isNull) +{ + if (!IsA(fcache->xprstate.expr, FuncExpr)) { + return; + } + FuncExpr *func = (FuncExpr *)fcache->xprstate.expr; + if (!is_function_with_plpgsql_language_and_outparam(func->funcid)) { + return; + } + HeapTupleHeader td = DatumGetHeapTupleHeader(*result); + TupleDesc tupdesc = lookup_rowtype_tupdesc_copy(HeapTupleHeaderGetTypeId(td), HeapTupleHeaderGetTypMod(td)); + HeapTupleData tup; + tup.t_len = HeapTupleHeaderGetDatumLength(td); + tup.t_data = td; + Datum *values = (Datum *)palloc(sizeof(Datum) * tupdesc->natts); + bool *nulls = (bool *)palloc(sizeof(bool) * tupdesc->natts); + heap_deform_tuple(&tup, tupdesc, values, nulls); + *result = values[0]; + *isNull = nulls[0]; + pfree(values); + pfree(nulls); +} + /* * ExecMakeFunctionResult * @@ -2167,6 +2238,8 @@ restart: pfree_ext(var_dno); } + set_result_for_plpgsql_language_function_with_outparam(fcache, &result, isNull); + return result; } @@ -2201,6 +2274,7 @@ static Datum ExecMakeFunctionResultNoSets( bool savedProConfigIsSet = u_sess->SPI_cxt.is_proconfig_set; bool proIsProcedure = false; bool supportTranaction = false; + bool is_have_huge_clob = false; #ifdef ENABLE_MULTIPLE_NODES if (IS_PGXC_COORDINATOR && (t_thrd.proc->workingVersionNum >= STP_SUPPORT_COMMIT_ROLLBACK)) { @@ -2242,14 +2316,15 @@ static Datum ExecMakeFunctionResultNoSets( node->atomic = true; stp_set_commit_rollback_err_msg(STP_XACT_GUC_IN_OPT_CLAUSE); } - Datum datum = SysCacheGetAttr(PROCOID, tp, Anum_pg_proc_prokind, &isNullSTP); - if (isNullSTP) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("cache lookup failed for Anum_pg_proc_prokind"), - errdetail("N/A"), - errcause("System error."), - erraction("Contact Huawei Engineer."))); + /* immutable or stable function should not support commit/rollback */ + bool isNullVolatile = false; + Datum provolatile = SysCacheGetAttr(PROCOID, tp, Anum_pg_proc_provolatile, &isNullVolatile); + if (!isNullVolatile && CharGetDatum(provolatile) != PROVOLATILE_VOLATILE) { + node->atomic = true; + stp_set_commit_rollback_err_msg(STP_XACT_IMMUTABLE); } + + Datum datum = SysCacheGetAttr(PROCOID, tp, Anum_pg_proc_prokind, &isNullSTP); proIsProcedure = PROC_IS_PRO(CharGetDatum(datum)); if (proIsProcedure) { fcache->prokind = 'p'; @@ -2318,6 +2393,7 @@ static Datum ExecMakeFunctionResultNoSets( i = 0; econtext->is_cursor = false; + u_sess->plsql_cxt.func_tableof_index = NIL; foreach (arg, fcache->args) { ExprState* argstate = (ExprState*)lfirst(arg); @@ -2325,6 +2401,34 @@ static Datum ExecMakeFunctionResultNoSets( if (has_refcursor && fcinfo->argTypes[i] == REFCURSOROID) econtext->is_cursor = true; fcinfo->arg[i] = ExecEvalExpr(argstate, econtext, &fcinfo->argnull[i], NULL); + if (is_external_clob(fcinfo->argTypes[i], fcinfo->argnull[i], fcinfo->arg[i])) { + bool is_null = false; + struct varatt_lob_pointer* lob_pointer = (varatt_lob_pointer*)(VARDATA_EXTERNAL(fcinfo->arg[i])); + fcinfo->arg[i] = fetch_lob_value_from_tuple(lob_pointer, InvalidOid, &is_null, &is_have_huge_clob); + } + ExecTableOfIndexInfo execTableOfIndexInfo; + initExecTableOfIndexInfo(&execTableOfIndexInfo, econtext); + ExecEvalParamExternTableOfIndex((Node*)argstate->expr, &execTableOfIndexInfo); + if (execTableOfIndexInfo.tableOfIndex != NULL) { + if (!IsTableOfFunc(fcache->func.fn_oid)) { + MemoryContext oldCxt = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); + PLpgSQL_func_tableof_index* func_tableof = + (PLpgSQL_func_tableof_index*)palloc0(sizeof(PLpgSQL_func_tableof_index)); + func_tableof->varno = i; + func_tableof->tableOfIndexType = execTableOfIndexInfo.tableOfIndexType; + func_tableof->tableOfIndex = copyTableOfIndex(execTableOfIndexInfo.tableOfIndex); + u_sess->plsql_cxt.func_tableof_index = lappend(u_sess->plsql_cxt.func_tableof_index, func_tableof); + MemoryContextSwitchTo(oldCxt); + } + + u_sess->SPI_cxt.cur_tableof_index->tableOfIndexType = execTableOfIndexInfo.tableOfIndexType; + u_sess->SPI_cxt.cur_tableof_index->tableOfIndex = execTableOfIndexInfo.tableOfIndex; + u_sess->SPI_cxt.cur_tableof_index->tableOfNestLayer = execTableOfIndexInfo.tableOfLayers; + /* for nest table of output, save layer of this var tableOfGetNestLayer in ExecEvalArrayRef, + or set to zero for get whole nest table. */ + u_sess->SPI_cxt.cur_tableof_index->tableOfGetNestLayer = -1; + } + if (has_refcursor && econtext->is_cursor) { var_dno[i] = econtext->dno; CopyCursorInfoData(&fcinfo->refcursor_data.argCursor[i], &econtext->cursor_data); @@ -2353,14 +2457,18 @@ static Datum ExecMakeFunctionResultNoSets( pgstat_init_function_usage(fcinfo, &fcusage); + if (fcinfo->flinfo->fn_addr != textcat && is_have_huge_clob) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("huge clob do not support as function in parameter"))); + } fcinfo->isnull = false; if (u_sess->instr_cxt.global_instr != NULL && fcinfo->flinfo->fn_addr == plpgsql_call_handler) { StreamInstrumentation* save_global_instr = u_sess->instr_cxt.global_instr; u_sess->instr_cxt.global_instr = NULL; - result = FunctionCallInvoke(fcinfo); + result = FunctionCallInvoke(fcinfo); // node will be free at here or else; u_sess->instr_cxt.global_instr = save_global_instr; - } - else { + } else { result = FunctionCallInvoke(fcinfo); } *isNull = fcinfo->isnull; @@ -2408,13 +2516,14 @@ static Datum ExecMakeFunctionResultNoSets( pfree_ext(var_dno); } - pfree_ext(node); - u_sess->SPI_cxt.is_stp = savedIsSTP; u_sess->SPI_cxt.is_proconfig_set = savedProConfigIsSet; if (needResetErrMsg) { stp_reset_commit_rolback_err_msg(); } + + set_result_for_plpgsql_language_function_with_outparam(fcache, &result, isNull); + return result; } @@ -2496,6 +2605,7 @@ Tuplestorestate* ExecMakeTableFunctionResult( bool first_time = true; int* var_dno = NULL; bool has_refcursor = false; + bool has_out_param = false; FuncExpr *fexpr = NULL; bool savedIsSTP = u_sess->SPI_cxt.is_stp; @@ -2534,7 +2644,15 @@ Tuplestorestate* ExecMakeTableFunctionResult( if (!HeapTupleIsValid(tp)) { elog(ERROR, "cache lookup failed for function %u", fexpr->funcid); } - + + /* immutable or stable function do not support commit/rollback */ + bool isNullVolatile = false; + Datum provolatile = SysCacheGetAttr(PROCOID, tp, Anum_pg_proc_provolatile, &isNullVolatile); + if (!isNullVolatile && CharGetDatum(provolatile) != PROVOLATILE_VOLATILE) { + node->atomic = true; + stp_set_commit_rollback_err_msg(STP_XACT_IMMUTABLE); + } + Datum datum = SysCacheGetAttr(PROCOID, tp, Anum_pg_proc_prokind, &isNull); proIsProcedure = PROC_IS_PRO(CharGetDatum(datum)); if (proIsProcedure) { @@ -2622,6 +2740,11 @@ Tuplestorestate* ExecMakeTableFunctionResult( has_refcursor = func_has_refcursor_args(fcinfo.flinfo->fn_oid, &fcinfo); + has_out_param = (is_function_with_plpgsql_language_and_outparam(fcinfo.flinfo->fn_oid) != InvalidOid); + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && has_out_param) { + returnsTuple = type_is_rowtype(RECORDOID); + } + int cursor_return_number = fcinfo.refcursor_data.return_number; if (cursor_return_number > 0) { /* init returnCursor to store out-args cursor info on FunctionScan context*/ @@ -2761,7 +2884,7 @@ Tuplestorestate* ExecMakeTableFunctionResult( * set, we fall out of the loop; we'll cons up an all-nulls result * row below. */ - if (returnsTuple && fcinfo.isnull) { + if (returnsTuple && fcinfo.isnull && !has_out_param) { if (!returnsSet) { break; } @@ -2972,8 +3095,8 @@ static Datum ExecEvalFunc(FuncExprState* fcache, ExprContext* econtext, bool* is if (HeapTupleIsValid(cast_tuple)) { Relation cast_rel = heap_open(CastRelationId, AccessShareLock); - uint32 castowner_Anum = Anum_pg_cast_castowner; - if (castowner_Anum <= HeapTupleHeaderGetNatts(cast_tuple->t_data, cast_rel->rd_att)) { + int castowner_Anum = Anum_pg_cast_castowner; + if (castowner_Anum <= (int)HeapTupleHeaderGetNatts(cast_tuple->t_data, cast_rel->rd_att)) { bool isnull = true; Datum datum = fastgetattr(cast_tuple, Anum_pg_cast_castowner, cast_rel->rd_att, &isnull); if (!isnull) { @@ -4317,6 +4440,54 @@ static Datum ExecEvalNullIf(FuncExprState* nullIfExpr, ExprContext* econtext, bo return fcinfo->arg[0]; } +static Datum CheckRowTypeIsNull(TupleDesc tupDesc, HeapTupleData tmptup, NullTest *ntest) +{ + int att; + + for (att = 1; att <= tupDesc->natts; att++) { + /* ignore dropped columns */ + if (tupDesc->attrs[att - 1]->attisdropped) + continue; + if (tableam_tops_tuple_attisnull(&tmptup, att, tupDesc)) { + /* null field disproves IS NOT NULL */ + if (ntest->nulltesttype == IS_NOT_NULL) + return BoolGetDatum(false); + } else { + /* non-null field disproves IS NULL */ + if (ntest->nulltesttype == IS_NULL) + return BoolGetDatum(false); + } + } + + return BoolGetDatum(true); +} + +static Datum CheckRowTypeIsNullForAFormat(TupleDesc tupDesc, HeapTupleData tmptup, NullTest *ntest) +{ + int att; + + for (att = 1; att <= tupDesc->natts; att++) { + /* ignore dropped columns */ + if (tupDesc->attrs[att - 1]->attisdropped) + continue; + if (!tableam_tops_tuple_attisnull(&tmptup, att, tupDesc)) { + /* non-null field disproves IS NULL */ + if (ntest->nulltesttype == IS_NULL) { + return BoolGetDatum(false); + } else { + return BoolGetDatum(true); + } + } + } + + /* non-null field disproves IS NULL */ + if (ntest->nulltesttype == IS_NULL) { + return BoolGetDatum(true); + } else { + return BoolGetDatum(false); + } +} + /* ---------------------------------------------------------------- * ExecEvalNullTest * @@ -4365,7 +4536,6 @@ static Datum ExecEvalNullTest(NullTestState* nstate, ExprContext* econtext, bool int32 tupTypmod; TupleDesc tupDesc; HeapTupleData tmptup; - int att; tuple = DatumGetHeapTupleHeader(result); @@ -4381,22 +4551,11 @@ static Datum ExecEvalNullTest(NullTestState* nstate, ExprContext* econtext, bool tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple); tmptup.t_data = tuple; - for (att = 1; att <= tupDesc->natts; att++) { - /* ignore dropped columns */ - if (tupDesc->attrs[att - 1]->attisdropped) - continue; - if (tableam_tops_tuple_attisnull(&tmptup, att, tupDesc)) { - /* null field disproves IS NOT NULL */ - if (ntest->nulltesttype == IS_NOT_NULL) - return BoolGetDatum(false); - } else { - /* non-null field disproves IS NULL */ - if (ntest->nulltesttype == IS_NULL) - return BoolGetDatum(false); - } + if (AFORMAT_NULL_TEST_MODE) { + return CheckRowTypeIsNullForAFormat(tupDesc, tmptup, ntest); + } else { + return CheckRowTypeIsNull(tupDesc, tmptup, ntest); } - - return BoolGetDatum(true); } else { /* Simple scalar-argument case, or a null rowtype datum */ switch (ntest->nulltesttype) { @@ -5647,20 +5806,6 @@ ExprState* ExecInitExpr(Expr* node, PlanState* parent) state = (ExprState*)rnstate; state->evalfunc = (ExprStateEvalFunc)ExecEvalRownum; } break; - case T_GradientDescentExpr: { - GradientDescentExprState* ml_state = (GradientDescentExprState*)makeNode(GradientDescentExprState); - ml_state->ps = parent; - ml_state->xpr = (GradientDescentExpr*)node; - state = (ExprState*)ml_state; - if (IsA(parent, GradientDescentState)) { - state->evalfunc = (ExprStateEvalFunc)ExecEvalGradientDescent; - } else { - ereport(ERROR, - (errmodule(MOD_DB4AI), - errcode(ERRCODE_INVALID_OPERATION), - errmsg("unrecognized state %d for GradientDescentExpr", parent->type))); - } - } break; default: ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), @@ -5822,6 +5967,95 @@ int ExecCleanTargetListLength(List* targetlist) return len; } +HeapTuple get_tuple(Relation relation, ItemPointer tid) +{ + Buffer user_buf = InvalidBuffer; + HeapTuple tuple = NULL; + HeapTuple new_tuple = NULL; + TM_Result result; + + /* alloc mem for old tuple and set tuple id */ + tuple = (HeapTupleData *)heaptup_alloc(BLCKSZ); + tuple->t_data = (HeapTupleHeader)((char *)tuple + HEAPTUPLESIZE); + Assert(tid != NULL); + tuple->t_self = *tid; + + if (heap_fetch(relation, SnapshotAny, tuple, &user_buf, false, NULL)) { + result = HeapTupleSatisfiesUpdate(tuple, GetCurrentCommandId(true), user_buf, false); + if (result != TM_Ok) { + ereport(ERROR, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("Failed to get tuple"))); + } + + new_tuple = heapCopyTuple((HeapTuple)tuple, relation->rd_att, NULL); + ReleaseBuffer(user_buf); + } else { + ereport(ERROR, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("The tuple is not found"), + errdetail("Another user is getting tuple or the datum is NULL"))); + } + + heap_freetuple(tuple); + return new_tuple; +} + +bool is_external_clob(Oid type_oid, bool is_null, Datum value) +{ + if (type_oid == CLOBOID && !is_null && VARATT_IS_EXTERNAL_LOB(value)) { + return true; + } + return false; +} + +Datum fetch_lob_value_from_tuple(varatt_lob_pointer* lob_pointer, Oid update_oid, bool* is_null, bool* is_huge_clob) +{ + /* get relation by relid */ + ItemPointerData tuple_ctid; + tuple_ctid.ip_blkid.bi_hi = lob_pointer->bi_hi; + tuple_ctid.ip_blkid.bi_lo = lob_pointer->bi_lo; + tuple_ctid.ip_posid = lob_pointer->ip_posid; + Relation relation = heap_open(lob_pointer->relid, RowExclusiveLock); + HeapTuple origin_tuple = get_tuple(relation, &tuple_ctid); + if (!HeapTupleIsValid(origin_tuple)) { + ereport(ERROR, + (errcode(ERRCODE_CACHE_LOOKUP_FAILED), + errmsg("cache lookup failed for tuple from relation %u", lob_pointer->relid))); + } + + Datum attr = fastgetattr(origin_tuple, lob_pointer->columid, relation->rd_att, is_null); + if (!*is_null && VARATT_IS_HUGE_TOAST_POINTER(attr) && is_huge_clob != NULL) { + *is_huge_clob = true; + } + + if (!OidIsValid(update_oid)) { + heap_close(relation, NoLock); + return attr; + } + Datum new_attr = (Datum)0; + if (*is_null) { + new_attr = (Datum)0; + } else { + if (VARATT_IS_SHORT(attr) || VARATT_IS_EXTERNAL(attr)) { + new_attr = PointerGetDatum(attr); + } else if (VARATT_IS_HUGE_TOAST_POINTER(attr)) { + if (unlikely(origin_tuple->tupTableType == UHEAP_TUPLE)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_NAME), + errmsg("UStore cannot update clob column that larger than 1GB"))); + } + Relation update_rel = heap_open(update_oid, RowExclusiveLock); + struct varlena *old_value = (struct varlena *)DatumGetPointer(attr); + struct varlena *new_value = heap_tuple_fetch_and_copy(update_rel, old_value, false); + new_attr = PointerGetDatum(new_value); + heap_close(update_rel, NoLock); + } else { + ereport(ERROR, (errcode(ERRCODE_SYSTEM_ERROR), + errmsg("lob value which fetch from tuple type is not recognized."), + errdetail("lob type is not one of the existing types"))); + } + } + heap_close(relation, NoLock); + return new_attr; +} + /* * ExecTargetList * Evaluates a targetlist with respect to the given @@ -5870,6 +6104,33 @@ static bool ExecTargetList(List* targetlist, ExprContext* econtext, Datum* value } } + + bool isClobAndNotNull = false; + isClobAndNotNull = (IsA(tle->expr, Param)) && (!isnull[resind]) && (((Param*)tle->expr)->paramtype == CLOBOID + || ((Param*)tle->expr)->paramtype == BLOBOID); + if (isClobAndNotNull) { + /* if type is lob pointer, we should fetch real value from tuple. */ + if (VARATT_IS_EXTERNAL_LOB(values[resind])) { + struct varatt_lob_pointer* lob_pointer = (varatt_lob_pointer*)(VARDATA_EXTERNAL(values[resind])); + bool is_null = false; + if (econtext->ecxt_scantuple != NULL) { + Oid update_oid = ((HeapTuple)(econtext->ecxt_scantuple->tts_tuple))->t_tableOid; + values[resind] = fetch_lob_value_from_tuple(lob_pointer, update_oid, &is_null, NULL); + } else { + ItemPointerData tuple_ctid; + tuple_ctid.ip_blkid.bi_hi = lob_pointer->bi_hi; + tuple_ctid.ip_blkid.bi_lo = lob_pointer->bi_lo; + tuple_ctid.ip_posid = lob_pointer->ip_posid; + Relation relation = heap_open(lob_pointer->relid, RowExclusiveLock); + HeapTuple origin_tuple = get_tuple(relation, &tuple_ctid); + Datum attr = fastgetattr(origin_tuple, lob_pointer->columid, relation->rd_att, &is_null); + values[resind] = attr; + heap_close(relation, NoLock); + } + isnull[resind] = is_null; + } + } + ELOG_FIELD_NAME_END; if (itemIsDone[resind] != ExprSingleResult) { @@ -6016,6 +6277,9 @@ TupleTableSlot* ExecProject(ProjectionInfo* projInfo, ExprDoneCond* isDone) if (numSimpleVars > 0) { Datum* values = slot->tts_values; bool* isnull = slot->tts_isnull; +#ifndef ENABLE_MULTIPLE_NODES + Datum* lobPointers = slot->tts_lobPointers; +#endif int* varSlotOffsets = projInfo->pi_varSlotOffsets; int* varNumbers = projInfo->pi_varNumbers; int i; @@ -6029,7 +6293,26 @@ TupleTableSlot* ExecProject(ProjectionInfo* projInfo, ExprDoneCond* isDone) Assert (varNumber < varSlot->tts_tupleDescriptor->natts); Assert (i < slot->tts_tupleDescriptor->natts); +#ifndef ENABLE_MULTIPLE_NODES + Form_pg_attribute attr = varSlot->tts_tupleDescriptor->attrs[varNumber]; + if (t_thrd.xact_cxt.isSelectInto && (attr->atttypid == CLOBOID || attr->atttypid == BLOBOID)) { + struct varlena *toast_pointer_lob = NULL; + toast_pointer_lob = toast_pointer_fetch_data(varSlot, attr, varNumber); + Assert(toast_pointer_lob != NULL); + if (!projInfo->pi_topPlan) { + values[i] = varSlot->tts_values[varNumber]; + lobPointers[i] = PointerGetDatum(toast_pointer_lob); + } else { + values[i] = PointerGetDatum(toast_pointer_lob); + lobPointers[i] = (Datum)0; + } + } else { + values[i] = varSlot->tts_values[varNumber]; + lobPointers[i] = (Datum)0; + } +#else values[i] = varSlot->tts_values[varNumber]; +#endif isnull[i] = varSlot->tts_isnull[varNumber]; } } else { diff --git a/src/gausskernel/runtime/executor/execReplication.cpp b/src/gausskernel/runtime/executor/execReplication.cpp index 5b0025de3..695266eae 100644 --- a/src/gausskernel/runtime/executor/execReplication.cpp +++ b/src/gausskernel/runtime/executor/execReplication.cpp @@ -22,6 +22,7 @@ #include "commands/trigger.h" #include "commands/cluster.h" #include "catalog/pg_partition_fn.h" +#include "catalog/pg_publication.h" #include "executor/executor.h" #include "executor/node/nodeModifyTable.h" #include "nodes/nodeFuncs.h" @@ -73,6 +74,11 @@ static bool build_replindex_scan_key(ScanKey skey, Relation rel, Relation idxrel int pkattno = attoff + 1; int mainattno = indkey->values[attoff]; Oid optype = get_opclass_input_type(opclass->values[attoff]); + if (mainattno > searchslot->tts_tupleDescriptor->natts) { + ereport(ERROR, (errcode(ERRCODE_INVALID_ATTRIBUTE), + errmsg("index key attribute number %d exceeds number of columns %d", + mainattno, searchslot->tts_tupleDescriptor->natts))); + } /* * Load the operator info. We need this to get the equality operator @@ -140,18 +146,22 @@ static void inline CheckTupleModifyRes(TM_Result res) } } +static inline List* GetPartitionList(Relation rel, LOCKMODE lockmode) +{ + if (RelationIsSubPartitioned(rel)) { + return RelationGetSubPartitionList(rel, lockmode); + } else { + return relationGetPartitionList(rel, lockmode); + } +} + static bool PartitionFindReplTupleByIndex(EState *estate, Relation rel, Relation idxrel, LockTupleMode lockmode, TupleTableSlot *searchslot, TupleTableSlot *outslot, FakeRelationPartition *fakeRelInfo) { /* must be non-GPI index */ Assert(!RelationIsGlobalIndex(idxrel)); - if (RelationIsSubPartitioned(rel)) { - fakeRelInfo->partList = RelationGetSubPartitionList(rel, RowExclusiveLock); - } else { - fakeRelInfo->partList = relationGetPartitionList(rel, RowExclusiveLock); - } - + fakeRelInfo->partList = GetPartitionList(rel, RowExclusiveLock); /* search the tuple in partition list one by one */ ListCell *cell = NULL; foreach (cell, fakeRelInfo->partList) { @@ -191,12 +201,7 @@ static bool PartitionFindReplTupleByIndex(EState *estate, Relation rel, Relation static bool PartitionFindReplTupleSeq(Relation rel, LockTupleMode lockmode, TupleTableSlot *searchslot, TupleTableSlot *outslot, FakeRelationPartition *fakeRelInfo) { - if (RelationIsSubPartitioned(rel)) { - fakeRelInfo->partList = RelationGetSubPartitionList(rel, RowExclusiveLock); - } else { - fakeRelInfo->partList = relationGetPartitionList(rel, RowExclusiveLock); - } - + fakeRelInfo->partList = GetPartitionList(rel, RowExclusiveLock); ListCell *cell = NULL; foreach (cell, fakeRelInfo->partList) { Partition heapPart = (Partition)lfirst(cell); @@ -302,77 +307,79 @@ static bool RelationFindReplTupleByIndex(EState *estate, Relation rel, Relation /* Build scan key. */ build_replindex_scan_key(skey, targetRel, idxrel, searchslot); -retry: - found = false; - - scan_handler_idx_rescan(scan, skey, IndexRelationGetNumberOfKeyAttributes(idxrel), NULL, 0); - - /* Try to find the tuple */ - if (RelationIsUstoreFormat(targetRel)) { - found = IndexGetnextSlot(scan, ForwardScanDirection, outslot); - } else { - if ((scantuple = scan_handler_idx_getnext(scan, ForwardScanDirection)) != NULL) { - found = true; - ExecStoreTuple(scantuple, outslot, InvalidBuffer, false); - } - } - - /* Found tuple, try to lock it in the lockmode. */ - if (found) { - outslot->tts_tuple = ExecMaterializeSlot(outslot); - xwait = TransactionIdIsValid(snap.xmin) ? snap.xmin : snap.xmax; - /* - * If the tuple is locked, wait for locking transaction to finish - * and retry. - */ - if (TransactionIdIsValid(xwait)) { - XactLockTableWait(xwait); - goto retry; - } - - Buffer buf; - TM_FailureData hufd; - TM_Result res; - Tuple locktup; - HeapTupleData heaplocktup; - UHeapTupleData UHeaplocktup; - struct { - UHeapDiskTupleData hdr; - char data[MaxPossibleUHeapTupleSize]; - } tbuf; - ItemPointer tid = tableam_tops_get_t_self(targetRel, outslot->tts_tuple); + while (true) { + found = false; + scan_handler_idx_rescan(scan, skey, IndexRelationGetNumberOfKeyAttributes(idxrel), NULL, 0); + /* Try to find the tuple */ if (RelationIsUstoreFormat(targetRel)) { - ItemPointerCopy(tid, &UHeaplocktup.ctid); - rc = memset_s(&tbuf, sizeof(tbuf), 0, sizeof(tbuf)); - securec_check(rc, "\0", "\0"); - UHeaplocktup.disk_tuple = &tbuf.hdr; - locktup = &UHeaplocktup; + found = IndexGetnextSlot(scan, ForwardScanDirection, outslot); } else { - ItemPointerCopy(tid, &heaplocktup.t_self); - locktup = &heaplocktup; + if ((scantuple = scan_handler_idx_getnext(scan, ForwardScanDirection)) != NULL) { + found = true; + ExecStoreTuple(scantuple, outslot, InvalidBuffer, false); + } } + if (found) { + /* Found tuple, try to lock it in the lockmode. */ + outslot->tts_tuple = ExecMaterializeSlot(outslot); + xwait = TransactionIdIsValid(snap.xmin) ? snap.xmin : snap.xmax; + /* + * If the tuple is locked, wait for locking transaction to finish + * and retry. + */ + if (TransactionIdIsValid(xwait)) { + XactLockTableWait(xwait); + continue; + } - /* Get the target tuple's partition for GPI */ - if (isGpi) { - GetFakeRelAndPart(estate, rel, outslot, fakeRelPart); - targetRel = fakeRelPart->partRel; - } - - PushActiveSnapshot(GetLatestSnapshot()); - res = tableam_tuple_lock(targetRel, - locktup, &buf, GetCurrentCommandId(false), lockmode, false, &hufd, - false, false, /* don't follow updates */ - false, /* eval */ - GetLatestSnapshot(), tid, /* ItemPointer */ - false); /* is select for update */ - /* the tuple slot already has the buffer pinned */ - ReleaseBuffer(buf); - PopActiveSnapshot(); - - if (CheckTupleLockRes(res)) { - goto retry; + Buffer buf; + TM_FailureData hufd; + TM_Result res; + Tuple locktup; + HeapTupleData heaplocktup; + UHeapTupleData UHeaplocktup; + struct { + UHeapDiskTupleData hdr; + char data[MaxPossibleUHeapTupleSize]; + } tbuf; + ItemPointer tid = tableam_tops_get_t_self(targetRel, outslot->tts_tuple); + + if (RelationIsUstoreFormat(targetRel)) { + ItemPointerCopy(tid, &UHeaplocktup.ctid); + rc = memset_s(&tbuf, sizeof(tbuf), 0, sizeof(tbuf)); + securec_check(rc, "\0", "\0"); + UHeaplocktup.disk_tuple = &tbuf.hdr; + locktup = &UHeaplocktup; + } else { + ItemPointerCopy(tid, &heaplocktup.t_self); + locktup = &heaplocktup; + } + + /* Get the target tuple's partition for GPI */ + if (isGpi) { + GetFakeRelAndPart(estate, rel, outslot, fakeRelPart); + targetRel = fakeRelPart->partRel; + } + + PushActiveSnapshot(GetLatestSnapshot()); + res = tableam_tuple_lock(targetRel, + locktup, &buf, GetCurrentCommandId(false), lockmode, false, &hufd, + false, false, /* don't follow updates */ + false, /* eval */ + GetLatestSnapshot(), tid, /* ItemPointer */ + false); /* is select for update */ + /* the tuple slot already has the buffer pinned */ + ReleaseBuffer(buf); + PopActiveSnapshot(); + + if (CheckTupleLockRes(res)) { + /* lock tuple failed, try again */ + continue; + } } + /* we are done */ + break; } scan_handler_idx_endscan(scan); @@ -449,6 +456,7 @@ static bool RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode, Tuple int rc; Relation targetRel = fakeRelPart->partRel == NULL ? rel : fakeRelPart->partRel; TupleDesc desc = RelationGetDescr(rel); + bool retry = false; Assert(equalTupleDescs(desc, outslot->tts_tupleDescriptor)); eq = (TypeCacheEntry **)palloc0(sizeof(*eq) * outslot->tts_tupleDescriptor->natts); @@ -457,75 +465,83 @@ static bool RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode, Tuple InitDirtySnapshot(snap); scan = scan_handler_tbl_beginscan(targetRel, &snap, 0, NULL, NULL); -retry: - found = false; + while (true) { + retry = false; + found = false; + scan_handler_tbl_rescan(scan, NULL, targetRel); - scan_handler_tbl_rescan(scan, NULL, targetRel); + /* Try to find the tuple */ + while ((scantuple = scan_handler_tbl_getnext(scan, ForwardScanDirection, targetRel)) != NULL) { + if (!tuple_equals_slot(desc, scantuple, searchslot, eq)) { + continue; + } - /* Try to find the tuple */ - while ((scantuple = scan_handler_tbl_getnext(scan, ForwardScanDirection, targetRel)) != NULL) { - if (!tuple_equals_slot(desc, scantuple, searchslot, eq)) + found = true; + ExecStoreTuple(scantuple, outslot, InvalidBuffer, false); + outslot->tts_tuple = ExecMaterializeSlot(outslot); + + xwait = TransactionIdIsValid(snap.xmin) ? snap.xmin : snap.xmax; + /* + * If the tuple is locked, wait for locking transaction to finish + * and retry. + */ + if (TransactionIdIsValid(xwait)) { + /* retry */ + retry = true; + XactLockTableWait(xwait); + } + break; + } + + if (retry) { continue; - - found = true; - ExecStoreTuple(scantuple, outslot, InvalidBuffer, false); - outslot->tts_tuple = ExecMaterializeSlot(outslot); - - xwait = TransactionIdIsValid(snap.xmin) ? snap.xmin : snap.xmax; - /* - * If the tuple is locked, wait for locking transaction to finish - * and retry. - */ - if (TransactionIdIsValid(xwait)) { - XactLockTableWait(xwait); - goto retry; } + if (found) { + /* Found tuple, try to lock it in the lockmode. */ + Buffer buf; + TM_FailureData hufd; + TM_Result res; + Tuple locktup = NULL; + HeapTupleData heaplocktup; + UHeapTupleData UHeaplocktup; + struct { + UHeapDiskTupleData hdr; + char data[MaxPossibleUHeapTupleSize]; + } tbuf; + ItemPointer tid = tableam_tops_get_t_self(rel, outslot->tts_tuple); - /* Found our tuple and it's not locked */ + if (RelationIsUstoreFormat(targetRel)) { + ItemPointerCopy(tid, &UHeaplocktup.ctid); + rc = memset_s(&tbuf, sizeof(tbuf), 0, sizeof(tbuf)); + securec_check(rc, "\0", "\0"); + UHeaplocktup.disk_tuple = &tbuf.hdr; + locktup = &UHeaplocktup; + } else { + ItemPointerCopy(tid, &heaplocktup.t_self); + locktup = &heaplocktup; + } + + PushActiveSnapshot(GetLatestSnapshot()); + res = tableam_tuple_lock(targetRel, locktup, &buf, GetCurrentCommandId(false), + lockmode, false, &hufd, false, + false, /* don't follow updates */ + false, /* eval */ + GetLatestSnapshot(), tid, /* ItemPointer */ + false); /* is select for update */ + + /* the tuple slot already has the buffer pinned */ + ReleaseBuffer(buf); + PopActiveSnapshot(); + + if (CheckTupleLockRes(res)) { + /* lock tuple failed, try again */ + continue; + } + } + /* we are done */ break; } - /* Found tuple, try to lock it in the lockmode. */ - if (found) { - Buffer buf; - TM_FailureData hufd; - TM_Result res; - Tuple locktup = NULL; - HeapTupleData heaplocktup; - UHeapTupleData UHeaplocktup; - struct { - UHeapDiskTupleData hdr; - char data[MaxPossibleUHeapTupleSize]; - } tbuf; - ItemPointer tid = tableam_tops_get_t_self(rel, outslot->tts_tuple); - - if (RelationIsUstoreFormat(targetRel)) { - ItemPointerCopy(tid, &UHeaplocktup.ctid); - rc = memset_s(&tbuf, sizeof(tbuf), 0, sizeof(tbuf)); - securec_check(rc, "\0", "\0"); - UHeaplocktup.disk_tuple = &tbuf.hdr; - locktup = &UHeaplocktup; - } else { - ItemPointerCopy(tid, &heaplocktup.t_self); - locktup = &heaplocktup; - } - - PushActiveSnapshot(GetLatestSnapshot()); - res = tableam_tuple_lock(targetRel, locktup, &buf, GetCurrentCommandId(false), lockmode, false, &hufd, false, - false, /* don't follow updates */ - false, /* eval */ - GetLatestSnapshot(), tid, /* ItemPointer */ - false); /* is select for update */ - - /* the tuple slot already has the buffer pinned */ - ReleaseBuffer(buf); - PopActiveSnapshot(); - - if (CheckTupleLockRes(res)) { - goto retry; - } - } - scan_handler_tbl_endscan(scan); return found; @@ -793,6 +809,7 @@ void CheckCmdReplicaIdentity(Relation rel, CmdType cmd) RelationGetRelationName(rel)), errhint("To enable deleting from the table, set REPLICA IDENTITY using ALTER TABLE."))); } + pfree(pubactions); } void GetFakeRelAndPart(EState *estate, Relation rel, TupleTableSlot *slot, FakeRelationPartition *relAndPart) diff --git a/src/gausskernel/runtime/executor/execTuples.cpp b/src/gausskernel/runtime/executor/execTuples.cpp index e1436ae7d..d2c0517c7 100644 --- a/src/gausskernel/runtime/executor/execTuples.cpp +++ b/src/gausskernel/runtime/executor/execTuples.cpp @@ -134,12 +134,13 @@ TupleTableSlot* MakeTupleTableSlot(bool has_tuple_mcxt, TableAmType tupslotTable slot->tts_values = NULL; slot->tts_isnull = NULL; slot->tts_mintuple = NULL; +#ifdef ENABLE_MULTIPLE_NODES slot->tts_per_tuple_mcxt = has_tuple_mcxt ? AllocSetContextCreate(slot->tts_mcxt, "SlotPerTupleMcxt", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE) : NULL; - +#endif slot->tts_tupslotTableAm = tupslotTableAm; return slot; @@ -194,6 +195,7 @@ void ExecResetTupleTable(List* tuple_table, /* tuple table */ pfree_ext(slot->tts_values); if (slot->tts_isnull) pfree_ext(slot->tts_isnull); + pfree_ext(slot->tts_lobPointers); if (slot->tts_per_tuple_mcxt) MemoryContextDelete(slot->tts_per_tuple_mcxt); pfree_ext(slot); @@ -263,6 +265,8 @@ void ExecDropSingleTupleTableSlot(TupleTableSlot* slot) pfree_ext(slot->tts_isnull); } + pfree_ext(slot->tts_lobPointers); + if (slot->tts_per_tuple_mcxt != NULL) { MemoryContextDelete(slot->tts_per_tuple_mcxt); } @@ -309,6 +313,7 @@ void ExecSetSlotDescriptor(TupleTableSlot* slot, /* slot to change */ if (slot->tts_isnull != NULL) { pfree_ext(slot->tts_isnull); } + pfree_ext(slot->tts_lobPointers); /* * Install the new descriptor; if it's refcounted, bump its refcount. */ @@ -321,6 +326,7 @@ void ExecSetSlotDescriptor(TupleTableSlot* slot, /* slot to change */ */ slot->tts_values = (Datum*)MemoryContextAlloc(slot->tts_mcxt, tup_desc->natts * sizeof(Datum)); slot->tts_isnull = (bool*)MemoryContextAlloc(slot->tts_mcxt, tup_desc->natts * sizeof(bool)); + slot->tts_lobPointers = (Datum*)MemoryContextAlloc(slot->tts_mcxt, tup_desc->natts * sizeof(Datum)); } /* -------------------------------- @@ -377,7 +383,7 @@ TupleTableSlot* ExecStoreTuple(Tuple tuple, TupleTableSlot* slot, Buffer buffer, tuple = (Tuple)UHeapToHeap(slot->tts_tupleDescriptor, (UHeapTuple)tuple); } - tableam_tslot_store_tuple(tuple, slot, buffer, should_free); + tableam_tslot_store_tuple(tuple, slot, buffer, should_free, false); return slot; } @@ -438,7 +444,7 @@ TupleTableSlot* ExecClearTuple(TupleTableSlot* slot) /* return: slot passed slot slot->tts_shouldFree = false; slot->tts_shouldFreeMin = false; -#ifdef PGXC +#ifdef ENABLE_MULTIPLE_NODES if (slot->tts_shouldFreeRow) { pfree_ext(slot->tts_dataRow); } diff --git a/src/gausskernel/runtime/executor/execUtils.cpp b/src/gausskernel/runtime/executor/execUtils.cpp index 3ac236766..84fd23666 100644 --- a/src/gausskernel/runtime/executor/execUtils.cpp +++ b/src/gausskernel/runtime/executor/execUtils.cpp @@ -89,7 +89,7 @@ static bool check_violation(Relation heap, Relation index, IndexInfo *indexInfo, * CurrentMemoryContext. * ---------------- */ -EState* CreateExecutorState(void) +EState* CreateExecutorState(MemoryContext saveCxt) { EState* estate = NULL; MemoryContext qcontext; @@ -98,11 +98,15 @@ EState* CreateExecutorState(void) /* * Create the per-query context for this Executor run. */ - qcontext = AllocSetContextCreate(CurrentMemoryContext, - "ExecutorState", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + if (saveCxt != NULL) { + qcontext = saveCxt; + } else { + qcontext = AllocSetContextCreate(CurrentMemoryContext, + "ExecutorState", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + } /* * Make the EState node within the per-query context. This way, we don't @@ -1494,6 +1498,7 @@ bool ExecCheckIndexConstraints(TupleTableSlot *slot, EState *estate, Relation ta bool isnull[INDEX_MAX_KEYS]; ItemPointerData invalidItemPtr; bool isPartitioned = false; + bool containGPI; List* partitionIndexOidList = NIL; Oid partoid; int2 bktid; @@ -1510,8 +1515,9 @@ bool ExecCheckIndexConstraints(TupleTableSlot *slot, EState *estate, Relation ta relationDescs = resultRelInfo->ri_IndexRelationDescs; indexInfoArray = resultRelInfo->ri_IndexRelationInfo; heapRelationDesc = resultRelInfo->ri_RelationDesc; + containGPI = resultRelInfo->ri_ContainGPI; actualHeap = targetRel; - + rc = memset_s(isnull, sizeof(isnull), 0, sizeof(isnull)); securec_check(rc, "", ""); @@ -1519,7 +1525,7 @@ bool ExecCheckIndexConstraints(TupleTableSlot *slot, EState *estate, Relation ta Assert(p != NULL && p->pd_part != NULL); isPartitioned = true; - if (!p->pd_part->indisusable) { + if (!p->pd_part->indisusable && !containGPI) { return true; } } @@ -1819,7 +1825,7 @@ List* ExecInsertIndexTuples(TupleTableSlot* slot, ItemPointer tupleid, EState* e if (!indexRelation->rd_index->indisunique) { checkUnique = UNIQUE_CHECK_NO; } else if (conflict != NULL) { - checkUnique = UNIQUE_CHECK_PARTIAL; + checkUnique = UNIQUE_CHECK_UPSERT; } else if (indexRelation->rd_index->indimmediate) { checkUnique = UNIQUE_CHECK_YES; } else { @@ -1851,7 +1857,7 @@ List* ExecInsertIndexTuples(TupleTableSlot* slot, ItemPointer tupleid, EState* e actualheap, actualindex, indexInfo, tupleid, values, isnull, estate, false, errorOK); } - if ((checkUnique == UNIQUE_CHECK_PARTIAL || indexInfo->ii_ExclusionOps != NULL) && !satisfiesConstraint) { + if ((IndexUniqueCheckNoError(checkUnique) || indexInfo->ii_ExclusionOps != NULL) && !satisfiesConstraint) { /* * The tuple potentially violates the uniqueness or exclusion * constraint, so make a note of the index so that we can re-check @@ -2035,7 +2041,7 @@ retry: /* If lossy indexscan, must recheck the condition */ if (is_scan) { /* tuple doesn't actually match, so no conflict */ - continue; + continue; } /* @@ -2062,8 +2068,8 @@ retry: goto retry; } - /* Determine whether the index column of the scanned tuple is the same - * as that of the tuple to be inserted. If not, the tuple pointed to by + /* Determine whether the index column of the scanned tuple is the same + * as that of the tuple to be inserted. If not, the tuple pointed to by * the item has been modified by other transactions. Check again for any conflicts. */ for (int i=0; i < indnkeyatts; i++) { @@ -2072,8 +2078,8 @@ retry: scan_handler_idx_endscan(index_scan); goto retry; } - if (!existing_isnull[i] && - !DatumGetBool(FunctionCall2Coll(&scankeys[i].sk_func, scankeys[i].sk_collation, + if (!existing_isnull[i] && + !DatumGetBool(FunctionCall2Coll(&scankeys[i].sk_func, scankeys[i].sk_collation, existing_values[i], values[i]))) { conflict = false; scan_handler_idx_endscan(index_scan); @@ -2103,7 +2109,7 @@ retry: */ error_new = BuildIndexValueDescription(index, values, isnull); error_existing = BuildIndexValueDescription(index, existing_values, existing_isnull); - newIndex ? + newIndex ? ereport(ERROR, (errcode(ERRCODE_EXCLUSION_VIOLATION), errmsg("could not create exclusion constraint \"%s\" when trying to build a new index", @@ -2282,6 +2288,7 @@ static void ShutdownExprContext(ExprContext* econtext, bool isCommit) MemoryContextSwitchTo(oldcontext); } + int PthreadMutexLock(ResourceOwner owner, pthread_mutex_t* mutex, bool trace) { HOLD_INTERRUPTS(); @@ -2320,3 +2327,106 @@ int PthreadMutexUnlock(ResourceOwner owner, pthread_mutex_t* mutex, bool trace) return ret; } + +int PthreadRWlockTryRdlock(ResourceOwner owner, pthread_rwlock_t* rwlock) +{ + if (owner) { + ResourceOwnerEnlargePthreadRWlock(owner); + } + bool ret; + HOLD_INTERRUPTS(); + ret = pthread_rwlock_tryrdlock(rwlock); + if (ret == 0) { + if (owner) { + ResourceOwnerRememberPthreadRWlock(owner, rwlock); + } else { + START_CRIT_SECTION(); + } + } + RESUME_INTERRUPTS(); + return ret; +} + +void PthreadRWlockRdlock(ResourceOwner owner, pthread_rwlock_t* rwlock) +{ + if (owner) { + ResourceOwnerEnlargePthreadRWlock(owner); + } + HOLD_INTERRUPTS(); + int ret = pthread_rwlock_rdlock(rwlock); + Assert(ret == 0); + if (ret != 0) { + ereport(ERROR, + (errcode(ERRCODE_LOCK_NOT_AVAILABLE), errmsg("aquire rdlock failed"))); + } + if (owner) { + ResourceOwnerRememberPthreadRWlock(owner, rwlock); + } else { + START_CRIT_SECTION(); + } + RESUME_INTERRUPTS(); +} + +int PthreadRWlockTryWrlock(ResourceOwner owner, pthread_rwlock_t* rwlock) +{ + if (owner) { + ResourceOwnerEnlargePthreadRWlock(owner); + } + HOLD_INTERRUPTS(); + int ret = pthread_rwlock_trywrlock(rwlock); + if (ret == 0) { + if (owner) { + ResourceOwnerRememberPthreadRWlock(owner, rwlock); + } else { + START_CRIT_SECTION(); + } + } + RESUME_INTERRUPTS(); + return ret; +} + +void PthreadRWlockWrlock(ResourceOwner owner, pthread_rwlock_t* rwlock) +{ + if (owner) { + ResourceOwnerEnlargePthreadRWlock(owner); + } + HOLD_INTERRUPTS(); + int ret = pthread_rwlock_wrlock(rwlock); + Assert(ret == 0); + if (ret != 0) { + ereport(ERROR, + (errcode(ERRCODE_LOCK_NOT_AVAILABLE), errmsg("aquire wrlock failed"))); + } + if (owner) { + ResourceOwnerRememberPthreadRWlock(owner, rwlock); + } else { + START_CRIT_SECTION(); + } + RESUME_INTERRUPTS(); +} +void PthreadRWlockUnlock(ResourceOwner owner, pthread_rwlock_t* rwlock) +{ + HOLD_INTERRUPTS(); + int ret = pthread_rwlock_unlock(rwlock); + Assert(ret == 0); + if (ret != 0) { + ereport(ERROR, + (errcode(ERRCODE_LOCK_NOT_AVAILABLE), errmsg("release rwlock failed"))); + } + if (owner) { + ResourceOwnerForgetPthreadRWlock(owner, rwlock); + } else { + END_CRIT_SECTION(); + } + RESUME_INTERRUPTS(); +} + +void PthreadRwLockInit(pthread_rwlock_t* rwlock, pthread_rwlockattr_t *attr) +{ + int ret = pthread_rwlock_init(rwlock, attr); + Assert(ret == 0); + if (ret != 0) { + ereport(ERROR, + (errcode(ERRCODE_INITIALIZE_FAILED), errmsg("init rwlock failed"))); + } +} \ No newline at end of file diff --git a/src/gausskernel/runtime/executor/functions.cpp b/src/gausskernel/runtime/executor/functions.cpp index c6deba972..5eedd4bc5 100644 --- a/src/gausskernel/runtime/executor/functions.cpp +++ b/src/gausskernel/runtime/executor/functions.cpp @@ -239,9 +239,9 @@ SQLFunctionParseInfoPtr prepare_sql_fn_parse_info(HeapTuple procedure_tuple, Nod p_info->argtypes = arg_oid_vect; -/* - * Collect names of arguments, too, if any - */ + /* + * Collect names of arguments, too, if any + */ Datum pro_arg_names; Datum pro_arg_modes; int n_arg_names; @@ -730,7 +730,7 @@ static void init_sql_fcache(FmgrInfo* finfo, Oid collation, bool lazy_eval_ok) * coerce the returned rowtype to the desired form (unless the result type * is VOID, in which case there's nothing to coerce to). */ - fcache->returnsTuple = check_sql_fn_retval(f_oid, ret_type, flat_query_list, NULL, &fcache->junkFilter); + fcache->returnsTuple = check_sql_fn_retval(f_oid, ret_type, flat_query_list, NULL, &fcache->junkFilter, false); if (fcache->returnsTuple) { /* Make sure output rowtype is properly blessed */ @@ -962,9 +962,7 @@ static void postquel_sub_params(SQLFunctionCachePtr fcache, FunctionCallInfo fci prm->isnull = fcinfo->argnull[i]; prm->pflags = 0; prm->ptype = fcache->pinfo->argtypes[i]; - prm->tableOfIndexType = InvalidOid; - prm->tableOfIndex = NULL; - prm->isnestedtable = false; + prm->tabInfo = NULL; } } else { fcache->paramLI = NULL; @@ -1577,7 +1575,7 @@ bool check_sql_fn_retval(Oid func_id, Oid ret_type, List* query_tree_list, bool* ListCell* lc = NULL; bool gs_encrypted_proc_was_created = false; AssertArg(!IsPolymorphicType(ret_type)); - + CommandCounterIncrement(); if (modify_target_list != NULL) *modify_target_list = false; /* initialize for no change */ if (junk_filter != NULL) @@ -1752,7 +1750,7 @@ bool check_sql_fn_retval(Oid func_id, Oid ret_type, List* query_tree_list, bool* col_index = 0; new_tlist = NIL; /* these are only used if modifyTargetList */ junk_attrs = NIL; - + Oid gsrelid = InvalidOid; foreach (lc, tlist) { TargetEntry* tle = (TargetEntry*)lfirst(lc); Form_pg_attribute attr; @@ -1812,6 +1810,17 @@ bool check_sql_fn_retval(Oid func_id, Oid ret_type, List* query_tree_list, bool* if (!gs_encrypted_proc_was_created) { all_types_orig = (Datum*)palloc(tup_natts * sizeof(Datum)); all_types = (Datum*)palloc(tup_natts * sizeof(Datum)); + /* if the column result type is diffent than the function table reuslt type */ + if (attr->attrelid != tle->resorigtbl && + /* The colunm relation is not temporal */ + attr->attrelid != 0 && + /* The colunm relation is not temporal */ + attr->attnum == tle->resorigcol) { + /* if all the above conditions are correct - than we might return real table + with same structre but without client logic columns - replace the data type */ + gsrelid = ObjectIdGetDatum(tle->resorigtbl); + } + for (int j = 0; j < col_index - 1; j++) { all_types_orig[j] = -1; all_types[j] = ObjectIdGetDatum(tup_desc->attrs[j]->atttypid); @@ -1847,7 +1856,7 @@ bool check_sql_fn_retval(Oid func_id, Oid ret_type, List* query_tree_list, bool* } } if (gs_encrypted_proc_was_created) { - add_allargtypes_orig(func_id, all_types_orig, all_types, tup_natts); + add_allargtypes_orig(func_id, all_types_orig, all_types, tup_natts, gsrelid); } /* remaining columns in tupdesc had better all be dropped */ @@ -1874,7 +1883,6 @@ bool check_sql_fn_retval(Oid func_id, Oid ret_type, List* query_tree_list, bool* *modify_target_list = true; } } - if (modify_target_list != NULL) { /* ensure resjunk columns are numbered correctly */ foreach (lc, junk_attrs) { @@ -2033,21 +2041,23 @@ void update_gs_encrypted_proc(const Oid func_id, SQLFunctionParseInfoPtr p_info, pfree_ext(allargs_orig); } +static inline bool is_proargmode_any_input(char arg) +{ + return (arg == PROARGMODE_IN || arg == PROARGMODE_INOUT || arg == PROARGMODE_VARIADIC); +} + /* * Replace the parameters data types requested by the client with the encrypted "bytea" data types * add column setting oid info to gs_encrypted_proc data for stored procedure */ bool sql_fn_cl_rewrite_params(const Oid func_id, SQLFunctionParseInfoPtr p_info, bool is_replace) { - bool nulls[Natts_pg_proc] = {0}; - Datum values[Natts_pg_proc] = {0}; - bool replaces[Natts_pg_proc] = {0}; - Datum proallargtypes; - bool isNull = false; - Datum* allargs_orig = NULL; - int allnumargs = 0; - Relation rel = NULL; - Relation gs_rel = NULL; + bool is_supported_outparams_override = false; +#ifndef ENABLE_MULTIPLE_NODES + is_supported_outparams_override = (t_thrd.proc->workingVersionNum >= 92470); +#endif // ENABLE_MULTIPLE_NODES + + CommandCounterIncrement(); // precaution HeapTuple tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(func_id)); if (!HeapTupleIsValid(tuple)) { @@ -2055,126 +2065,154 @@ bool sql_fn_cl_rewrite_params(const Oid func_id, SQLFunctionParseInfoPtr p_info, errmsg("cache lookup failed for function %u when initialize function cache.", func_id))); } + /* get tuple from pg_proc */ Form_pg_proc oldproc = (Form_pg_proc)GETSTRUCT(tuple); - bool is_replaced = false; + /* get argmodes from tuple */ + bool isNull = false; + Datum proargmodes = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_proargmodes, &isNull); + char *argmodes = NULL; + ArrayType *argmodes_arr = NULL; + int n_modes = 0; + if (!isNull) { + argmodes_arr = DatumGetArrayTypeP(proargmodes); /* ensure not toasted */ + n_modes = ARR_DIMS(argmodes_arr)[0]; + bool is_char_oid_array = + ARR_NDIM(argmodes_arr) != 1 || ARR_HASNULL(argmodes_arr) || ARR_ELEMTYPE(argmodes_arr) != CHAROID; + if (is_char_oid_array) { + ereport(ERROR, (errcode(ERRCODE_ARRAY_ELEMENT_ERROR), errmsg("proallargtypes is not a 1-D Oid array"))); + } + + argmodes = (char *)ARR_DATA_PTR(argmodes_arr); + } + + /* get allargs from tuple if available */ + oidvector *tup_allargs = NULL; + if (is_supported_outparams_override) { + tup_allargs = (oidvector *)DatumGetPointer(ProcedureGetAllArgTypes(tuple, &isNull)); + } + + /* replace argtypes and allargs data types from original to real data types */ + bool is_any_replacement = false; + int out_count = 0; for (int i = 0; i < p_info->nargs; i++) { - if (p_info->replaced_argtypes[i] != 0 && oldproc->proargtypes.values[i] != p_info->replaced_argtypes[i]) { - is_replaced = true; - oldproc->proargtypes.values[i] = p_info->replaced_argtypes[i]; + if (p_info->replaced_argtypes[i] == 0 || oldproc->proargtypes.values[i] == p_info->replaced_argtypes[i]) { + continue; + } + + is_any_replacement = true; + oldproc->proargtypes.values[i] = p_info->replaced_argtypes[i]; + + if (argmodes != NULL) { /* skip the out params here. Allargs will have nargs input params */ + while (out_count <= (n_modes - p_info->nargs) && !is_proargmode_any_input(argmodes[i + out_count])) { + out_count++; + } + } + + if (tup_allargs != NULL) { + tup_allargs->values[i + out_count] = p_info->replaced_argtypes[i]; } } - if (!is_replaced) { + if (!is_any_replacement) { ReleaseSysCache(tuple); return false; } - /* support CREATE OR REPLACE with the same parameter types */ -#ifdef ENABLE_MULTIPLE_NODES - if (SearchSysCacheExists3(PROCNAMEARGSNSP, CStringGetDatum(NameStr(oldproc->proname)), - PointerGetDatum(&oldproc->proargtypes), ObjectIdGetDatum(oldproc->pronamespace))) { -#else - Datum packageidDatum = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_packageid, &isNull); - Oid packageOid = ObjectIdGetDatum(packageidDatum); - if (!OidIsValid(packageOid)) { - packageidDatum = ObjectIdGetDatum(InvalidOid); - } - if (SearchSysCacheExists4(PROCALLARGS, CStringGetDatum(NameStr(oldproc->proname)), - PointerGetDatum(&oldproc->proargtypes), ObjectIdGetDatum(oldproc->pronamespace), ObjectIdGetDatum(packageidDatum))) { -#endif - rel = heap_open(ProcedureRelationId, RowExclusiveLock); - HeapTuple oldtup = NULL; -#ifndef ENABLE_MULTIPLE_NODES - if (t_thrd.proc->workingVersionNum < 92470) { - oldtup = SearchSysCache3(PROCNAMEARGSNSP, CStringGetDatum(NameStr(oldproc->proname)), - PointerGetDatum(&oldproc->proargtypes), ObjectIdGetDatum(oldproc->pronamespace)); - } else { - Datum allargtypes = ProcedureGetAllArgTypes(tuple, &isNull); - oldtup = SearchSysCache4(PROCALLARGS, CStringGetDatum(NameStr(oldproc->proname)), - allargtypes, ObjectIdGetDatum(oldproc->pronamespace), - ObjectIdGetDatum(packageOid)); - } -#else + /* + check if tuple already exists. + if is_replace == true then remove old tuple + otherwise return error to client + */ + HeapTuple oldtup = NULL; +#ifndef ENABLE_MULTIPLE_NODES + /* support CREATE OR REPLACE with the same parameter types */ + Datum packageidDatum = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_packageid, &isNull); + if (!is_supported_outparams_override) { oldtup = SearchSysCache3(PROCNAMEARGSNSP, CStringGetDatum(NameStr(oldproc->proname)), - PointerGetDatum(&oldproc->proargtypes), ObjectIdGetDatum(oldproc->pronamespace)); -#endif - - if (is_replace) { - if (HeapTupleIsValid(oldtup)) { - Assert(oldtup != tuple); - HeapTuple old_gs_tup = SearchSysCache1(GSCLPROCID, ObjectIdGetDatum(HeapTupleGetOid(oldtup))); - deleteDependencyRecordsFor(ProcedureRelationId, HeapTupleGetOid(oldtup), true); - simple_heap_delete(rel, &oldtup->t_self); - ReleaseSysCache(oldtup); - if (HeapTupleIsValid(old_gs_tup)) { - gs_rel = heap_open(ClientLogicProcId, RowExclusiveLock); - deleteDependencyRecordsFor(ClientLogicProcId, HeapTupleGetOid(old_gs_tup), true); - simple_heap_delete(gs_rel, &old_gs_tup->t_self); - heap_close(gs_rel, RowExclusiveLock); - ReleaseSysCache(old_gs_tup); - } - } - } else { - /* caller should handle this case - function already exists and it is not replaced */ - return true; - } + PointerGetDatum(&oldproc->proargtypes), ObjectIdGetDatum(oldproc->pronamespace)); + } else { + oldtup = SearchSysCacheForProcAllArgs(CStringGetDatum(NameStr(oldproc->proname)), PointerGetDatum(tup_allargs), + ObjectIdGetDatum(oldproc->pronamespace), packageidDatum, proargmodes); } +#else + oldtup = SearchSysCache3(PROCNAMEARGSNSP, CStringGetDatum(NameStr(oldproc->proname)), + PointerGetDatum(&oldproc->proargtypes), ObjectIdGetDatum(oldproc->pronamespace)); +#endif // ENABLE_MULTIPLE_NODES + + Relation rel = NULL; + Relation gs_rel = NULL; + if (HeapTupleIsValid(oldtup) && is_replace == true) { + Assert(oldtup != tuple); + + /* remove dependent record from gs_encrypted_proc */ + HeapTuple old_gs_tup = SearchSysCache1(GSCLPROCID, ObjectIdGetDatum(HeapTupleGetOid(oldtup))); + if (HeapTupleIsValid(old_gs_tup)) { + gs_rel = heap_open(ClientLogicProcId, RowExclusiveLock); + deleteDependencyRecordsFor(ClientLogicProcId, HeapTupleGetOid(old_gs_tup), true); + simple_heap_delete(gs_rel, &old_gs_tup->t_self); + heap_close(gs_rel, RowExclusiveLock); + ReleaseSysCache(old_gs_tup); + } + + /* remove record from pg_proc */ + rel = heap_open(ProcedureRelationId, RowExclusiveLock); + deleteDependencyRecordsFor(ProcedureRelationId, HeapTupleGetOid(oldtup), true); + simple_heap_delete(rel, &oldtup->t_self); + ReleaseSysCache(oldtup); + } else if (HeapTupleIsValid(oldtup) && is_replace == false) { + ReleaseSysCache(oldtup); + ReleaseSysCache(tuple); + /* caller should handle this case - function already exists and it is not replaced */ + return true; + } + + bool nulls[Natts_pg_proc] = {0}; + Datum values[Natts_pg_proc] = {0}; + bool replaces[Natts_pg_proc] = {0}; values[Anum_pg_proc_proargtypes - 1] = PointerGetDatum(&oldproc->proargtypes); replaces[Anum_pg_proc_proargtypes - 1] = true; /* verify replace for allargtypes */ - proallargtypes = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_proallargtypes, &isNull); + int proallargtypes_size = 0; + Datum *proallargtypes_oids_orig = NULL; + Datum proallargtypes = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_proallargtypes, &isNull); if (!isNull) { - char* argmodes = NULL; - Oid* allargtypes; - ArrayType* arr_all_types = DatumGetArrayTypeP(proallargtypes); /* ensure not toasted */ - allnumargs = ARR_DIMS(arr_all_types)[0]; - bool is_char_oid_array = ARR_NDIM(arr_all_types) != 1 || allnumargs < 0 || ARR_HASNULL(arr_all_types) || - ARR_ELEMTYPE(arr_all_types) != OIDOID; + ArrayType* proallargtypes_arr = DatumGetArrayTypeP(proallargtypes); /* ensure not toasted */ + proallargtypes_size = ARR_DIMS(proallargtypes_arr)[0]; + Assert(proallargtypes_size >= p_info->nargs); + + /* check proallargtypes is not an array */ + bool is_char_oid_array = ARR_NDIM(proallargtypes_arr) != 1 || proallargtypes_size < 0 || + ARR_HASNULL(proallargtypes_arr) || ARR_ELEMTYPE(proallargtypes_arr) != OIDOID; if (is_char_oid_array) { ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("proallargtypes is not a 1-D Oid array"))); } - Assert(allnumargs >= p_info->nargs); - allargtypes = (Oid*)ARR_DATA_PTR(arr_all_types); - Datum proargmodes; -#ifndef ENABLE_MULTIPLE_NODES - if (t_thrd.proc->workingVersionNum < 92470) { - proargmodes = SysCacheGetAttr(PROCNAMEARGSNSP, tuple, Anum_pg_proc_proargmodes, &isNull); - } else { - proargmodes = SysCacheGetAttr(PROCALLARGS, tuple, Anum_pg_proc_proargmodes, &isNull); + if (argmodes_arr != NULL && proallargtypes_size != ARR_DIMS(argmodes_arr)[0]) { + ereport(ERROR, (errcode(ERRCODE_ARRAY_ELEMENT_ERROR), errmsg("proallargtypes is not a 1-D Oid array"))); } -#else - proargmodes = SysCacheGetAttr(PROCNAMEARGSNSP, tuple, Anum_pg_proc_proargmodes, &isNull); -#endif - if (!isNull) { - ArrayType* arr = DatumGetArrayTypeP(proargmodes); /* ensure not toasted */ - bool is_char_oid_array = ARR_NDIM(arr) != 1 || allnumargs != ARR_DIMS(arr)[0] || ARR_HASNULL(arr) || - ARR_ELEMTYPE(arr) != CHAROID; - if (is_char_oid_array) { - ereport(ERROR, (errcode(ERRCODE_ARRAY_ELEMENT_ERROR), errmsg("proallargtypes is not a 1-D Oid array"))); - } - argmodes = (char*)ARR_DATA_PTR(arr); - } - allargs_orig = (Datum*)palloc(allnumargs * sizeof(Datum)); - int in_n = 0; - for (int i = 0; i < allnumargs; i++) { - allargs_orig[i] = -1; - if (in_n >= p_info->nargs) { + Oid *proallargtypes_oids = (Oid *)ARR_DATA_PTR(proallargtypes_arr); + proallargtypes_oids_orig = (Datum *)palloc(proallargtypes_size * sizeof(Datum)); + errno_t rc = memset_s(proallargtypes_oids_orig, proallargtypes_size * sizeof(Datum), -1, + proallargtypes_size * sizeof(Datum)); + securec_check_c(rc, "\0", "\0"); + int input_args_idx = 0; + for (int i = 0; i < proallargtypes_size && input_args_idx < p_info->nargs; i++) { + /* check if input argument */ + if (argmodes == NULL || !is_proargmode_any_input(argmodes[i])) { continue; } - bool is_input_params = argmodes == NULL || argmodes[i] == PROARGMODE_IN || - argmodes[i] == PROARGMODE_INOUT || argmodes[i] == PROARGMODE_VARIADIC; - if (is_input_params) { - if (p_info->replaced_argtypes[in_n] != 0) { - /* type has been replaced for input params */ - allargs_orig[i] = Int32GetDatum(allargtypes[i]); - allargtypes[i] = p_info->replaced_argtypes[in_n]; - } - in_n++; + + /* check if data type needs to be replaced */ + if (p_info->replaced_argtypes[input_args_idx] != 0) { + /* type has been replaced for input params */ + proallargtypes_oids_orig[i] = Int32GetDatum(proallargtypes_oids[i]); + proallargtypes_oids[i] = p_info->replaced_argtypes[input_args_idx]; } + + input_args_idx++; } - values[Anum_pg_proc_proallargtypes - 1] = PointerGetDatum(arr_all_types); + values[Anum_pg_proc_proallargtypes - 1] = PointerGetDatum(proallargtypes_arr); replaces[Anum_pg_proc_proallargtypes - 1] = true; } if (values[Anum_pg_proc_proallargtypes - 1] != 0) { @@ -2183,6 +2221,8 @@ bool sql_fn_cl_rewrite_params(const Oid func_id, SQLFunctionParseInfoPtr p_info, values[Anum_pg_proc_allargtypes - 1] = values[Anum_pg_proc_proargtypes - 1]; } replaces[Anum_pg_proc_allargtypes - 1] = true; + + /* update catalog tables pg_proc and gs_encrypted_proc */ if (!rel) { rel = heap_open(ProcedureRelationId, RowExclusiveLock); } @@ -2190,9 +2230,10 @@ bool sql_fn_cl_rewrite_params(const Oid func_id, SQLFunctionParseInfoPtr p_info, HeapTuple newtup = heap_modify_tuple(tuple, tupDesc, values, nulls, replaces); simple_heap_update(rel, &tuple->t_self, newtup); CatalogUpdateIndexes(rel, newtup); + heap_freetuple_ext(newtup); heap_close(rel, RowExclusiveLock); ReleaseSysCache(tuple); - update_gs_encrypted_proc(func_id, p_info, allargs_orig, allnumargs); + update_gs_encrypted_proc(func_id, p_info, proallargtypes_oids_orig, proallargtypes_size); return false; } diff --git a/src/gausskernel/runtime/executor/instrument.cpp b/src/gausskernel/runtime/executor/instrument.cpp index 85b03fcc6..59331452a 100644 --- a/src/gausskernel/runtime/executor/instrument.cpp +++ b/src/gausskernel/runtime/executor/instrument.cpp @@ -525,13 +525,10 @@ void AddControlMemoryContext(Instrumentation* instr, MemoryContext context) } /* Exit from a plan node */ -void InstrStopNode(Instrumentation* instr, double n_tuples) +void InstrStopNode(Instrumentation* instr, double n_tuples, bool containMemory) { instr_time end_time; - CPUUsage cpu_usage; - int64 memory_size = 0; - int64 control_memory_size = 0; CPUUsageGetCurrent(&cpu_usage); @@ -552,8 +549,9 @@ void InstrStopNode(Instrumentation* instr, double n_tuples) } /* Add delta of buffer usage since entry to node's totals */ - if (instr->need_bufusage) + if (instr->need_bufusage) { BufferUsageAccumDiff(&instr->bufusage, u_sess->instr_cxt.pg_buffer_usage, &instr->bufusage_start); + } CPUUsageAccumDiff(&instr->cpuusage, &cpu_usage, &instr->cpuusage_start); @@ -563,22 +561,26 @@ void InstrStopNode(Instrumentation* instr, double n_tuples) instr->firsttuple = INSTR_TIME_GET_DOUBLE(instr->counter); } - /* calculate the memory context size of this Node */ - CalculateContextSize(instr->memoryinfo.nodeContext, &memory_size); - if (instr->memoryinfo.peakOpMemory < memory_size) - instr->memoryinfo.peakOpMemory = memory_size; + if (containMemory) { + int64 memory_size = 0; + int64 control_memory_size = 0; + /* calculate the memory context size of this Node */ + CalculateContextSize(instr->memoryinfo.nodeContext, &memory_size); + if (instr->memoryinfo.peakOpMemory < memory_size) + instr->memoryinfo.peakOpMemory = memory_size; - List* control_list = instr->memoryinfo.controlContextList; - ListCell* context_cell = NULL; + List* control_list = instr->memoryinfo.controlContextList; + ListCell* context_cell = NULL; - /* calculate all control memory */ - foreach (context_cell, control_list) { - MemoryContext context = (MemoryContext)lfirst(context_cell); - CalculateContextSize(context, &control_memory_size); + /* calculate all control memory */ + foreach (context_cell, control_list) { + MemoryContext context = (MemoryContext)lfirst(context_cell); + CalculateContextSize(context, &control_memory_size); + } + + if (instr->memoryinfo.peakControlMemory < control_memory_size) + instr->memoryinfo.peakControlMemory = control_memory_size; } - - if (instr->memoryinfo.peakControlMemory < control_memory_size) - instr->memoryinfo.peakControlMemory = control_memory_size; } /* Finish a run cycle for a plan node */ diff --git a/src/gausskernel/runtime/executor/lightProxy.cpp b/src/gausskernel/runtime/executor/lightProxy.cpp index f6f40f7a7..d39d1786c 100644 --- a/src/gausskernel/runtime/executor/lightProxy.cpp +++ b/src/gausskernel/runtime/executor/lightProxy.cpp @@ -45,6 +45,7 @@ #include "optimizer/streamplan.h" #include "gs_ledger/blockchain.h" #include "parser/parse_hint.h" +#include "replication/walreceiver.h" const int MAX_COMMAND = 51; typedef struct commandType { @@ -108,7 +109,6 @@ extern void pgxc_node_init(PGXCNodeHandle* handle, int sock); extern void pgxc_handle_unsupported_stmts(Query* query); extern Oid exprType(const Node* expr); -extern bool light_xactnodes_member(bool write, const void* datum); extern int light_node_send_begin(PGXCNodeHandle* handle, bool check_gtm_mode); extern int light_handle_response(PGXCNodeHandle* conn, lightProxyMsgCtl* msgctl, lightProxy* lp); extern void light_node_report_error(lightProxyErrData* combiner); @@ -431,14 +431,25 @@ void lightProxy::connect() { List* dn_allocate = NULL; errno_t ss_rc = 0; + int dnNum = u_sess->pgxc_cxt.NumDataNodes; + if (IS_CN_DISASTER_RECOVER_MODE) { + dnNum = u_sess->pgxc_cxt.NumTotalDataNodes; + if (!u_sess->pgxc_cxt.DisasterReadArrayInit) { + disaster_read_array_init(); + } + Assert(m_nodeIdx < u_sess->pgxc_cxt.NumDataNodes); + if (u_sess->pgxc_cxt.disasterReadArray[m_nodeIdx] != -1) { + m_nodeIdx = u_sess->pgxc_cxt.disasterReadArray[m_nodeIdx]; + } + } m_handle = &u_sess->pgxc_cxt.dn_handles[m_nodeIdx]; if (!IS_VALID_CONNECTION(m_handle)) { - Assert(m_nodeIdx < u_sess->pgxc_cxt.NumDataNodes); - if (m_nodeIdx >= u_sess->pgxc_cxt.NumDataNodes) { + Assert(m_nodeIdx < dnNum); + if (m_nodeIdx >= dnNum) { ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("[LIGHT PROXY] m_nodeIdx error, m_nodeIdx:%d, numDataNodes:%d", - m_nodeIdx, u_sess->pgxc_cxt.NumDataNodes))); + m_nodeIdx, dnNum))); } dn_allocate = lappend_int(dn_allocate, m_nodeIdx); @@ -566,31 +577,13 @@ void lightProxy::proxyNodeBegin(bool is_read_only) if (is_read_only) { gxid = GetCurrentTransactionIdIfAny(); - } else if (GTM_MODE) { - gxid = GetCurrentTransactionId(); - if (!GlobalTransactionIdIsValid(gxid)) { - ereport(ERROR, - (errcode(ERRCODE_CONNECTION_EXCEPTION), errmsg("[LIGHT PROXY] Failed to get new transaction id"))); - } - } - - /* - * Send GXID and check for errors - */ - if (GTM_MODE && GlobalTransactionIdIsValid(gxid) && - pgxc_node_send_gxid(m_handle, gxid, false)) { - ereport(ERROR, - (errcode(ERRCODE_CONNECTION_EXCEPTION), - errmsg("[LIGHT PROXY] Failed to send gxid %lu to %s[%u]", - gxid, - m_handle->remoteNodeName, - m_handle->nodeoid))); } /* * If the node is already a participant in the transaction, skip it */ - if (light_xactnodes_member(false, m_handle) || light_xactnodes_member(true, m_handle)) { + if (list_member(u_sess->pgxc_cxt.XactReadNodes, m_handle) || + list_member(u_sess->pgxc_cxt.XactWriteNodes, m_handle)) { if (!is_read_only) { RegisterTransactionNodes(1, (void**)&m_handle, true); } @@ -885,15 +878,14 @@ bool lightProxy::processMsg(int msgType, StringInfo msg) /* * Emit duration logging if appropriate. */ - char msec_str[32]; + char msec_str[PRINTF_DST_MAX]; switch (check_log_duration(msec_str, false)) { case 1: - ereport(LOG, (errmsg("duration: %s ms, queryid %ld, unique id %lu", msec_str, u_sess->debug_query_id, u_sess->slow_query_cxt.slow_query.unique_sql_id), errhidestmt(true))); + Assert(false); break; case 2: { - ereport(LOG, - (errmsg("duration: %s ms queryid %ld unique id %ld", msec_str, u_sess->debug_query_id, u_sess->slow_query_cxt.slow_query.unique_sql_id), - errhidestmt(true))); + ereport(LOG, (errmsg("duration: %s ms queryid %ld unique id %ld", msec_str, + u_sess->debug_query_id, u_sess->slow_query_cxt.slow_query.unique_sql_id), errhidestmt(true))); break; } default: @@ -1368,7 +1360,6 @@ bool exec_query_through_light_proxy(List* querytree_list, Node* parsetree, bool list_length(single_exec_node->primarynodelist) == 1) { /* GTMLite: need to mark that this is single shard statement */ u_sess->exec_cxt.single_shard_stmt = true; - if (CmdtypeSupportsHotkey(query->commandType)) SendHotkeyToPgstat(); diff --git a/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp b/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp index bd4365a44..6168090fe 100644 --- a/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp +++ b/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp @@ -497,15 +497,13 @@ bool heapam_scan_bitmap_next_block(TableScanDesc scan, TBMIterateResult* tbmres) hscan->rs_base.rs_cbuf = ReleaseAndReadBuffer(hscan->rs_base.rs_cbuf, hscan->rs_base.rs_rd, page); - /* In single mode and hot standby, we may get a null buffer if index + /* In hot standby, we may get a null buffer if index * replayed before the tid replayed. This is acceptable, so we return * directly without reporting error. */ -#ifndef ENABLE_MULTIPLE_NODES if (!BufferIsValid(hscan->rs_base.rs_cbuf)) { return false; } -#endif hscan->rs_base.rs_cblock = page; buffer = hscan->rs_base.rs_cbuf; diff --git a/src/gausskernel/runtime/executor/nodeForeignscan.cpp b/src/gausskernel/runtime/executor/nodeForeignscan.cpp index cbdfd07bf..a07ae9deb 100644 --- a/src/gausskernel/runtime/executor/nodeForeignscan.cpp +++ b/src/gausskernel/runtime/executor/nodeForeignscan.cpp @@ -39,6 +39,8 @@ #include "pgxc/pgxc.h" #endif +#include "utils/knl_relcache.h" + static TupleTableSlot* ForeignNext(ForeignScanState* node); static bool ForeignRecheck(ForeignScanState* node, TupleTableSlot* slot); @@ -201,8 +203,8 @@ ForeignScanState* ExecInitForeignScan(ForeignScan* node, EState* estate, int efl fdwroutine = GetFdwRoutine(fdw->fdwhandler); - /* Save the data for later reuse in u_sess->cache_mem_cxt */ - FdwRoutine* cfdwroutine = (FdwRoutine*)MemoryContextAlloc(u_sess->cache_mem_cxt, sizeof(FdwRoutine)); + /* Save the data for later reuse in LocalMyDBCacheMemCxt */ + FdwRoutine* cfdwroutine = (FdwRoutine*)MemoryContextAlloc(LocalMyDBCacheMemCxt(), sizeof(FdwRoutine)); rc = memcpy_s(cfdwroutine, sizeof(FdwRoutine), fdwroutine, sizeof(FdwRoutine)); securec_check(rc, "\0", "\0"); currentRelation->rd_fdwroutine = cfdwroutine; diff --git a/src/gausskernel/runtime/executor/nodeGD.cpp b/src/gausskernel/runtime/executor/nodeGD.cpp deleted file mode 100755 index c1bbe8fdd..000000000 --- a/src/gausskernel/runtime/executor/nodeGD.cpp +++ /dev/null @@ -1,457 +0,0 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - *--------------------------------------------------------------------------------------- - * - * nodeGD.cpp - * - * IDENTIFICATION - * src/gausskernel/runtime/executor/nodeGD.cpp - * - * --------------------------------------------------------------------------------------- - */ - -#include "postgres.h" - -#include "executor/executor.h" -#include "executor/node/nodeGD.h" -#include "db4ai/gd.h" - -////////////////////////////////////////////////////////////////////////// - -GradientDescentHook_iteration gdhook_iteration = nullptr; - -static bool transfer_slot(GradientDescentState* gd_state, TupleTableSlot* slot, - int ith_tuple, Matrix* features, Matrix* dep_var) -{ - const GradientDescent* gd_node = gd_get_node(gd_state); - Assert(ith_tuple < (int)features->rows); - - if (!slot->tts_isnull[gd_node->targetcol]) { - int feature = 0; - gd_float* w = features->data + ith_tuple * features->columns; - for (int i = 0; i < get_natts(gd_state); i++) { - if (i == gd_node->targetcol && !dep_var_is_continuous(gd_state->algorithm)) { - if (!dep_var_is_binary(gd_state->algorithm)) { - ereport(ERROR, - (errmodule(MOD_DB4AI), - errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("categorical dependent variable not implemented"))); - } - - float dt = 0; - if (get_atttypid(gd_state, gd_node->targetcol) == BOOLOID) - dt = DatumGetBool(slot->tts_values[gd_node->targetcol]) - ? gd_state->algorithm->max_class : gd_state->algorithm->min_class; - else { - bool found = false; - for (int v = 0; v < gd_state->num_classes && !found; v++) { - found = datumIsEqual(slot->tts_values[gd_node->targetcol], gd_state->binary_classes[v], - get_attbyval(gd_state, gd_node->targetcol), - get_attlen(gd_state, gd_node->targetcol)); - if (found) - dt = (v == 1 ? gd_state->algorithm->max_class : gd_state->algorithm->min_class); - } - if (!found) { - if (gd_state->num_classes == 2) - ereport(ERROR, - (errmodule(MOD_DB4AI), - errcode(ERRCODE_TOO_MANY_ARGUMENTS), - errmsg("too many target values for binary operator"))); - - gd_state->binary_classes[gd_state->num_classes++] = - datumCopy(slot->tts_values[gd_node->targetcol], - get_attbyval(gd_state, gd_node->targetcol), - get_attlen(gd_state, gd_node->targetcol)); - } - } - dep_var->data[ith_tuple] = dt; - } else { - gd_float value; - if (slot->tts_isnull[i]) { - Assert(i != gd_node->targetcol); - value = 0.0; // default value for feature, it is not the target for sure - } - else - value = gd_datum_get_float(get_atttypid(gd_state, i), slot->tts_values[i]); - - if (i == gd_node->targetcol) - dep_var->data[ith_tuple] = value; - else { - *w++ = value; - feature++; - } - } - } - Assert(feature == gd_state->n_features-1); - *w = 1.0; // bias - - return true; - } - - return false; -} - -void exec_gd_batch(GradientDescentState* gd_state, int iter) -{ - // get information from the node - const GradientDescent* gd_node = gd_get_node(gd_state); - PlanState* outer_plan = outerPlanState(gd_state); - Matrix* features; - Matrix* dep_var; - bool more = true; - TupleTableSlot* slot = NULL; - do { - // read next batch - features = gd_state->shuffle->get(gd_state->shuffle, &dep_var); - - int ith_tuple = 0; - while (more && ith_tuple < gd_node->batch_size) { - slot = ExecProcNode(outer_plan); - if (TupIsNull(slot)) { - more = false; - } else { - if (transfer_slot(gd_state, slot, ith_tuple, features, dep_var)) { - if (iter == 0) - gd_state->processed++; - - ith_tuple++; - } else { - if (iter == 0) - gd_state->discarded++; - } - } - } - - // use the batch to test now in case the shuffle algorithm - // releases it during unget - if (iter > 0 && ith_tuple > 0) { - if (ith_tuple < gd_node->batch_size) { - matrix_resize(features, ith_tuple, gd_state->n_features); - matrix_resize(dep_var, ith_tuple, 1); - } - - double loss = gd_state->algorithm->test_callback(gd_node, features, dep_var, &gd_state->weights, &gd_state->scores); - gd_state->loss += loss; - ereport(DEBUG1, - (errmodule(MOD_DB4AI), - errmsg("iteration %d loss = %.6f (total %.6g)", iter, loss, gd_state->loss))); - - if (ith_tuple < gd_node->batch_size) { - matrix_resize(features, gd_node->batch_size, gd_state->n_features); - matrix_resize(dep_var, gd_node->batch_size, 1); - } - } - - // give back the batch to the shuffle algorithm - gd_state->shuffle->unget(gd_state->shuffle, ith_tuple); - } while (more); -} - -void exec_gd_start_iteration(GradientDescentState* gd_state) -{ - if (gd_state->optimizer->start_iteration != nullptr) - gd_state->optimizer->start_iteration(gd_state->optimizer); - - if (gd_state->shuffle->start_iteration != nullptr) - gd_state->shuffle->start_iteration(gd_state->shuffle); -} - -void exec_gd_end_iteration(GradientDescentState* gd_state) -{ - if (gd_state->shuffle->end_iteration != nullptr) - gd_state->shuffle->end_iteration(gd_state->shuffle); - - if (gd_state->optimizer->end_iteration != nullptr) - gd_state->optimizer->end_iteration(gd_state->optimizer); -} - -/* ---------------------------------------------------------------- - * ExecGradientDescent - * ---------------------------------------------------------------- - * - * Training and test are interleaved to avoid a double scan over the data - * for training and test. Iteration 0 only computes the initial weights, and - * at each following iteration the model is tested with the current weights - * and new weights are updated with the gradients. The optimization is clear: - * for N iterations, the basic algorithm requires N*2 data scans, while the - * interleaved train&test requires only N+1 data scans. When N=1 the number - * of scans is the same (N*2 = N+1) - */ -TupleTableSlot* ExecGradientDescent(GradientDescentState* gd_state) -{ - // check if training is already finished - if (gd_state->done) - return NULL; - - // get information from the node - const GradientDescent* gd_node = gd_get_node(gd_state); - ScanDirection direction = gd_state->ss.ps.state->es_direction; - PlanState* outer_plan = outerPlanState(gd_state); - - // If backwards scan, just return NULL without changing state. - if (!ScanDirectionIsForward(direction)) - return NULL; - - // for counting execution time - uint64_t start, finish, step; - uint64_t iter_start, iter_finish; - - // iterations - double prev_loss = 0; - TupleTableSlot* slot = NULL; - - gd_state->processed = 0; - gd_state->discarded = 0; - - uint64_t max_usecs = ULLONG_MAX; - if (gd_node->max_seconds > 0) - max_usecs = gd_node->max_seconds * 1000000ULL; - - bool stop = false; - start = gd_get_clock_usecs(); - step = start; - for (int iter = 0; !stop && iter <= gd_node->max_iterations; iter++) { - iter_start = gd_get_clock_usecs(); - - // init loss & scores - scores_init(&gd_state->scores); - gd_state->loss = 0; - - exec_gd_start_iteration(gd_state); - exec_gd_batch(gd_state, iter); - exec_gd_end_iteration(gd_state); - - iter_finish = gd_get_clock_usecs(); - - // delta loss < loss tolerance? - if (iter > 0) - stop = (fabs(prev_loss - gd_state->loss) < gd_node->tolerance); - - if (!stop) { - // continue with another iteration with the new weights - int bytes = sizeof(gd_float) * gd_state->n_features; - int rc = memcpy_s(gd_state->weights.data, gd_state->weights.allocated * sizeof(gd_float), - gd_state->optimizer->weights.data, bytes); - securec_check(rc, "", ""); - - if (iter > 0) { - gd_state->n_iterations++; - if (gdhook_iteration != nullptr) - gdhook_iteration(gd_state); - } - - // timeout || max_iterations - stop = (gd_get_clock_usecs()-start >= max_usecs) - || (iter == gd_node->max_iterations); - } - - // trace at end or no more than once per second - bool trace_iteration = gd_node->verbose || stop; - if (!trace_iteration) { - uint64_t now = gd_get_clock_usecs(); - uint64_t nusecs = now - step; - if (nusecs > 1000000) { - // more than one second - trace_iteration = true; - step = now; - } - } - - if (iter>0 && trace_iteration) { - gd_float* w = gd_state->weights.data; - StringInfoData buf; - initStringInfo(&buf); - for (int i=0 ; in_features ; i++) - appendStringInfo(&buf, "%.3f,", w[i]); - - ereport(DEBUG1, - (errmodule(MOD_DB4AI), - errmsg("ITERATION %d: test_loss=%.6f delta_loss=%.6f tolerance=%.3f accuracy=%.3f tuples=%d coef=%s", - iter, gd_state->loss, - fabs(prev_loss - gd_state->loss), gd_node->tolerance, - get_accuracy(&gd_state->scores), gd_state->processed, - buf.data))); - pfree(buf.data); - } - - prev_loss = gd_state->loss; - - if (!stop) - ExecReScan(outer_plan); // for the next iteration - } - - finish = gd_get_clock_usecs(); - - gd_state->done = true; - gd_state->usecs = finish - start; - - // return trainined model - ExprDoneCond isDone; - slot = ExecProject(gd_state->ss.ps.ps_ProjInfo, &isDone); - - return slot; -} - -/* ---------------------------------------------------------------- - * ExecInitGradientDescent - * - * This initializes the GradientDescent node state structures and - * the node's subplan. - * ---------------------------------------------------------------- - */ -GradientDescentState* ExecInitGradientDescent(GradientDescent* gd_node, EState* estate, int eflags) -{ - GradientDescentState* gd_state = NULL; - Plan* outer_plan = outerPlan(gd_node); - - // check for unsupported flags - Assert(!(eflags & (EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); - - // create state structure - gd_state = makeNode(GradientDescentState); - gd_state->ss.ps.plan = (Plan*)gd_node; - gd_state->ss.ps.state = estate; - - // Tuple table initialization - ExecInitScanTupleSlot(estate, &gd_state->ss); - ExecInitResultTupleSlot(estate, &gd_state->ss.ps); - - // initialize child expressions - ExecAssignExprContext(estate, &gd_state->ss.ps); - gd_state->ss.ps.targetlist = (List*)ExecInitExpr((Expr*)gd_node->plan.targetlist, (PlanState*)gd_state); - - // initialize outer plan - outerPlanState(gd_state) = ExecInitNode(outer_plan, estate, eflags); - - // Initialize result tuple type and projection info. - ExecAssignScanTypeFromOuterPlan(&gd_state->ss); // input tuples - ExecAssignResultTypeFromTL(&gd_state->ss.ps); // result tuple - ExecAssignProjectionInfo(&gd_state->ss.ps, NULL); - gd_state->ss.ps.ps_TupFromTlist = false; - - // select algorithm - gd_state->algorithm = gd_get_algorithm(gd_node->algorithm); - - // Input tuple initialization - gd_state->tupdesc = ExecGetResultType(outerPlanState(gd_state)); - - int natts = gd_state->tupdesc->natts; - gd_state->n_features = natts; // -1 dep_var, +1 bias (fixed as 1) - - for (int i = 0; i < natts; i++) { - Oid oidtype = gd_state->tupdesc->attrs[i]->atttypid; - if (i == gd_node->targetcol) { - switch (oidtype) { - case BITOID: - case VARBITOID: - case BYTEAOID: - case CHAROID: - case RAWOID: - case NAMEOID: - case TEXTOID: - case BPCHAROID: - case VARCHAROID: - case NVARCHAR2OID: - case CSTRINGOID: - case INT1OID: - case INT2OID: - case INT4OID: - case INT8OID: - case FLOAT4OID: - case FLOAT8OID: - case NUMERICOID: - case ABSTIMEOID: - case DATEOID: - case TIMEOID: - case TIMESTAMPOID: - case TIMESTAMPTZOID: - case TIMETZOID: - case SMALLDATETIMEOID: - // detect the different values while reading the data - gd_state->num_classes = 0; - break; - - case BOOLOID: - // values are known in advance - gd_state->binary_classes[0] = BoolGetDatum(false); - gd_state->binary_classes[1] = BoolGetDatum(true); - gd_state->num_classes = 2; - break; - - default: - // unsupported datatypes - ereport(ERROR, - (errmodule(MOD_DB4AI), - errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Datatype of target not supported"))); - break; - } - } - } - - // optimizer - switch (gd_node->optimizer) { - case OPTIMIZER_GD: - gd_state->optimizer = gd_init_optimizer_gd(gd_state); - break; - case OPTIMIZER_NGD: - gd_state->optimizer = gd_init_optimizer_ngd(gd_state); - break; - default: - ereport(ERROR, - (errmodule(MOD_DB4AI), - errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Optimizer %d not supported", gd_node->optimizer))); - break; - } - matrix_init(&gd_state->optimizer->weights, gd_state->n_features); - matrix_init(&gd_state->optimizer->gradients, gd_state->n_features); - - // shuffle - gd_state->shuffle = gd_init_shuffle_cache(gd_state); - gd_state->shuffle->optimizer = gd_state->optimizer; - - // training state initialization - gd_state->done = false; - gd_state->learning_rate = gd_node->learning_rate; - gd_state->n_iterations = 0; - gd_state->loss = 0; - matrix_init(&gd_state->weights, gd_state->n_features); - - return gd_state; -} - -/* ---------------------------------------------------------------- - * ExecEndGradientDescent - * - * This shuts down the subplan and frees resources allocated - * to this node. - * ---------------------------------------------------------------- - */ -void ExecEndGradientDescent(GradientDescentState* gd_state) -{ - // release state - matrix_release(&gd_state->weights); - - gd_state->shuffle->release(gd_state->shuffle); - - matrix_release(&gd_state->optimizer->gradients); - matrix_release(&gd_state->optimizer->weights); - gd_state->optimizer->release(gd_state->optimizer); - - ExecFreeExprContext(&gd_state->ss.ps); - ExecEndNode(outerPlanState(gd_state)); - pfree(gd_state); -} - diff --git a/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp b/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp index ab3225f0c..79fefa14b 100644 --- a/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp +++ b/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp @@ -116,6 +116,7 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node) ScanDirection direction; IndexScanDesc scandesc; TupleTableSlot* slot = NULL; + TupleTableSlot* tmpslot = NULL; ItemPointer tid; bool isVersionScan = TvIsVersionScan(&node->ss); bool isUHeap = false; @@ -135,8 +136,9 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node) scandesc = node->ioss_ScanDesc; econtext = node->ss.ps.ps_ExprContext; slot = node->ss.ss_ScanTupleSlot; - isUHeap = RelationIsUstoreFormat(node->ss.ss_currentRelation); + tmpslot = MakeSingleTupleTableSlot(RelationGetDescr(scandesc->heapRelation), + false, scandesc->indexRelation->rd_tam_type); /* * OK, now that we have what we need, fetch the next tuple. @@ -174,17 +176,16 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node) /* ustore with multi-version ubtree only recheck IndexTuple when xs_recheck_itup is set */ if (indexScan->xs_recheck_itup) { node->ioss_HeapFetches++; - if (!IndexFetchUHeap(indexScan, slot)) { + if (!IndexFetchUHeap(indexScan, tmpslot)) { continue; /* this TID indicate no visible tuple */ } - if (!RecheckIndexTuple(indexScan, slot)) { + if (!RecheckIndexTuple(indexScan, tmpslot)) { continue; /* the visible version not match the IndexTuple */ } } } else if (isVersionScan || !visibilitymap_test(indexScan->heapRelation, ItemPointerGetBlockNumber(tid), &node->ioss_VMBuffer)) { /* IMPORTANT: We ALWAYS visit the heap to check visibility in VERSION SCAN. */ - /* * Rats, we have to visit the heap to check visibility. */ @@ -258,7 +259,7 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node) */ if (tuple == NULL) PredicateLockPage(indexScan->heapRelation, ItemPointerGetBlockNumber(tid), estate->es_snapshot); - + ExecDropSingleTupleTableSlot(tmpslot); return slot; } @@ -266,6 +267,7 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node) * if we get here it means the index scan failed so we are at the end of * the scan.. */ + ExecDropSingleTupleTableSlot(tmpslot); return ExecClearTuple(slot); } diff --git a/src/gausskernel/runtime/executor/nodeKMeans.cpp b/src/gausskernel/runtime/executor/nodeKMeans.cpp deleted file mode 100644 index b6d945dd5..000000000 --- a/src/gausskernel/runtime/executor/nodeKMeans.cpp +++ /dev/null @@ -1,1106 +0,0 @@ -/** -Copyright (c) 2021 Huawei Technologies Co.,Ltd. - -openGauss is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - - http://license.coscl.org.cn/MulanPSL2 - -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. ---------------------------------------------------------------------------------------- - -nodeKMeans.cpp - Functions related to the k-means operator - -IDENTIFICATION - src/gausskernel/runtime/executor/nodeKMeans.cpp - ---------------------------------------------------------------------------------------- -**/ - -#include - -#include "executor/node/nodeKMeans.h" -#include "funcapi.h" -#include "utils/array.h" - -#include "db4ai/fp_ops.h" -#include "db4ai/distance_functions.h" -#include "db4ai/db4ai_cpu.h" - -/* - * these functions are defined in kmeans.cpp and are not publicly exposed (under the public include directories) - */ -extern void compute_cost_and_weights(List const* centroids, GSPoint const* points, uint32_t dimension, - uint32_t num_slots, double* cost); - -extern List* kmeanspp(KMeansStateDescription* description, List* centroids_candidates, - uint32_t idx_current_centroids, uint32_t size_centroid_bytes, std::mt19937_64* prng); - -extern bool init_kmeans(KMeansStateDescription* description, double* bbox_min, double* bbox_max, - GSPoint const* batch, uint32_t num_slots, uint32_t size_centroid_bytes); - -extern bool copy_slot_coordinates_to_array(GSPoint* coordinates, TupleTableSlot const* slot, - uint32_t dimension); - -extern bool closest_centroid(List const* centroids, GSPoint const* point, uint32_t dimension, double* distance); - -extern void update_centroids(KMeansStateDescription* description, GSPoint* slots, uint32_t num_slots, - uint32_t idx_current_centroids, uint32_t idx_next_centroids); - -extern void reset_weights(List const* centroids); - -extern void reset_centroids(KMeansStateDescription* description, uint32_t idx_centroids, - uint32_t size_centroid_bytes); - -extern void merge_centroids(KMeansStateDescription* description, uint32_t idx_current_centroids, - uint32_t idx_next_centroids, uint32_t size_centroid_bytes); - -extern void compute_cost(KMeansStateDescription* description, uint32_t idx_current_centroids); - -uint32_t constexpr MAX_BATCH_SLOTS = 100000U; -uint32_t constexpr NUM_ITERATIONS_KMEANSBB = 10U; - -/* - * internally, the operator works in stages, this enum identifies each one of them - */ -enum AlgoStage : uint32_t { - KMEANS_INIT = 0, - KMEANS_INITIAL_CENTROIDS_RANDOM_SAMPLE, - KMEANS_INITIAL_CENTROIDS_BB_SAMPLE, - KMEANS_INITIAL_CENTROIDS_BB_COMPUTE_COST, - KMEANS_LLOYD -}; - -/* - * this is a faster version of construct_md_array in which we use knowledge we have - * on the centroids to speed up computations - */ -force_inline ArrayType* construct_empty_centroids_array(uint32_t const num_centroids, uint32_t const dimension) { - ArrayType* result = NULL; - int32_t const ndims = 2; - uint32_t const dims[2] = {num_centroids, dimension}; - uint32_t const lbs[2] = {1U, 1U}; - int32_t nbytes = num_centroids * dimension * sizeof(float8); - - nbytes += ARR_OVERHEAD_NONULLS(ndims); - result = reinterpret_cast(palloc0(nbytes)); - SET_VARSIZE(result, nbytes); - result->ndim = ndims; - result->dataoffset = 0; /* marker for no null bitmap */ - result->elemtype = FLOAT8OID; - errno_t errorno = EOK; - errorno = memcpy_s(ARR_DIMS(result), ndims * sizeof(int32_t), dims, ndims * sizeof(int32_t)); - securec_check(errorno, "\0", "\0"); - errorno = memcpy_s(ARR_LBOUND(result), ndims * sizeof(int32_t), lbs, ndims * sizeof(int32_t)); - securec_check(errorno, "\0", "\0"); - - return result; -} - -/* - * a batch is used only once and thus the data it points to is - * released after it has been used - */ -force_inline void release_batch(GSPoint* batch, uint32_t const num_slots) { - GSPoint* point = nullptr; - for (uint32_t current_slot = 0; current_slot < num_slots; ++current_slot) { - point = batch + current_slot; - if (likely(point->should_free)) { - pfree(point->pg_coordinates); - point->pg_coordinates = nullptr; - point->should_free = false; - } - } -} - -bool deal_sample(bool const sample, std::mt19937_64* prng, GSPoint* batch, uint32_t current_slot, - TupleTableSlot* scan_slot, uint32_t const dimension, bool first_candidate, - List* centroid_candidates, double local_sample_probability, AlgoStage stage) -{ - double coin = 0.; - bool result = false; - bool distance_computed = false; - double distance_to_centroid = 0.; - double op_error = 0.; - std::uniform_real_distribution unit_sampler(0., 1.); - /* - * toss the coin to see if we have to - */ - if (sample) { - if (unlikely(prng == nullptr)) - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("k-means exec: prng must be set (non-null)"))); - - coin = unit_sampler(*prng); - /* - * random++ and kmeans|| sample with different probabilities - * (the latter is much more complicated) - * - * when sampling we will pay the price of unpacking every single - * tuple (which has to be done for kmeans|| anyway) for code - * simplicity - */ - result = copy_slot_coordinates_to_array(batch + current_slot, scan_slot, dimension); - - if (result && (stage == KMEANS_INITIAL_CENTROIDS_BB_SAMPLE) && !first_candidate) { - distance_computed = closest_centroid(centroid_candidates, batch + current_slot, - dimension, &distance_to_centroid); - twoMult(local_sample_probability, distance_computed ? distance_to_centroid : 1., - &local_sample_probability, &op_error); - local_sample_probability += op_error; - } - - /* - * if the data point is not valid or did not pass the test - * we release the memory it occupies and ask for the next data point - */ - if (!result || coin >= local_sample_probability) { - if (likely(batch[current_slot].should_free)) { - pfree(batch[current_slot].pg_coordinates); - batch[current_slot].pg_coordinates = nullptr; - batch[current_slot].should_free = false; - } - return true; - } - } - return false; -} -/* - * this is the work horse of the whole algorithm. in here we do a lot of things depending on the stage - * of the algorithm. this function does a complete (single) pass over the data. the upper layer - * calls this function multiple times depending the stage of the algorithm - */ -List* one_data_pass(PlanState* outer_plan, KMeansStateDescription* state_description, uint32_t const batch_size, - GSPoint* batch, uint32_t const idx_current_centroids, uint32_t const idx_next_centroids, - uint32_t const size_centroid_bytes, bool const sample, double const sample_probability, - AlgoStage stage, List* centroid_candidates, double* cost_centroid_candidates, - std::mt19937_64* prng) -{ - bool plan_exhausted = false; - bool result = false; - bool first_candidate = centroid_candidates == nullptr; - uint32_t current_slot = 0; - uint32_t const dimension = state_description->dimension; - uint32_t const num_centroids = state_description->num_centroids; - uint32_t num_elements_round = 0; - uint32_t slot_number = 0; - uint32_t valid_row = 0; - TupleTableSlot* scan_slot = nullptr; - auto bbox_min = reinterpret_cast(state_description->bbox_min); - auto bbox_max = reinterpret_cast(state_description->bbox_max); - double local_sample_probability = sample_probability; - double cost_of_batch = 0.; - GSPoint* centroid_candidate = nullptr; - - while (!plan_exhausted) { - current_slot = 0; - /* we produce a batch of slots to be passed to the algorithm */ - while (current_slot < batch_size) { - scan_slot = ExecProcNode(outer_plan); - ++slot_number; - // every slot will have its own chances - local_sample_probability = sample_probability; - /* - * we get out of the whole thing if we have exhausted the relation or - * we have found our k centroids. - * if we were not able to sample the k centroids, the upper call - * will perform runs until we have done so - */ - if (unlikely(TupIsNull(scan_slot) || state_description->current_centroid == num_centroids)) { - plan_exhausted = true; - break; - } - - /* - * we jump over rows with empty coordinates - */ - if (unlikely(scan_slot->tts_isnull[0])) - continue; - - if (deal_sample(sample, prng, batch, current_slot, scan_slot, dimension, first_candidate, - centroid_candidates, local_sample_probability, stage) == true) - continue; - - if ((stage == KMEANS_INITIAL_CENTROIDS_RANDOM_SAMPLE) || (stage == KMEANS_INITIAL_CENTROIDS_BB_SAMPLE)) { - /* - * we only know the expected number of centroid candidates that we will produce - * but not the exact number. thus we allocate each one of them on demand - * (observe that we cannot use the batch structure because the number of - * candidates we generate can be much larger than the size of a batch) - */ - centroid_candidate = reinterpret_cast(palloc0(sizeof(GSPoint))); - - /* - * observe that the scan_slot was already copied above (when sampling) and thus - * we just move the memory reference from the batch slot to the newly allocated - * GSPoint - */ - *centroid_candidate = batch[current_slot]; - centroid_candidate->distance_to_closest_centroid = DBL_MAX; - centroid_candidate->id = slot_number; - batch[current_slot].id = 0; - batch[current_slot].distance_to_closest_centroid = 0.; - batch[current_slot].weight = 0.; - batch[current_slot].pg_coordinates = nullptr; - batch[current_slot].should_free = false; - - /* - * this stores the reference to the current selected candidate and thus we - * can forget about it - */ - centroid_candidates = lappend(centroid_candidates, centroid_candidate); - - /* - * memory should be allocated in the next iteration - */ - centroid_candidate = nullptr; - - ++num_elements_round; - - /* - * for kmeans|| we produce a single candidate the very first time. - * for random++ we produce a number of candidates in a single pass - * (thus we consume the whole relation since we do not update current_slot) - */ - if (unlikely(first_candidate && (stage == KMEANS_INITIAL_CENTROIDS_BB_SAMPLE))) { - plan_exhausted = true; - break; - } else if (unlikely(first_candidate)) { - first_candidate = false; - } - } else { - /* - * the element's coordinates are copied to be processed - */ - result = copy_slot_coordinates_to_array(batch + current_slot, scan_slot, dimension); - batch[current_slot].id = slot_number; - valid_row = result ? 1U : 0U; - current_slot += valid_row; - state_description->num_dead_points += 1 - valid_row; - - heap_slot_clear(scan_slot); - } - } - - /* we process the batch - * each stage happens in a batch and thus branch misprediction should not be a problem - * also, except for KMEANS_LLOYD, the other two stages require exactly one data pass - */ - switch (stage) { - case KMEANS_INIT: - /* - * this run is to obtain initial statistics about the data (like the number of valid tuples) - * and the coordinates of the bounding box - */ - init_kmeans(state_description, bbox_min, bbox_max, batch, current_slot, size_centroid_bytes); - - /* - * we are done with the batch and thus we release the allocated memory (corresponding - * to the points of the batch) - */ - release_batch(batch, current_slot); - - break; - case KMEANS_INITIAL_CENTROIDS_RANDOM_SAMPLE: - case KMEANS_INITIAL_CENTROIDS_BB_SAMPLE: - /* - * when sampling for random++ and kmeans|| we do no computations other than the sample - * the upper call will run kmeans++ after the candidates have been sampled - */ - break; - case KMEANS_INITIAL_CENTROIDS_BB_COMPUTE_COST: - /* - * when computing the cost of a solution, we do it in a batched manner as the other - * non-sampling cases - */ - compute_cost_and_weights(centroid_candidates, batch, dimension, current_slot, &cost_of_batch); - - if (unlikely(cost_centroid_candidates == nullptr)) - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("k-means exec: cost variable must be non-null"))); - - *cost_centroid_candidates += cost_of_batch; - - /* - * we are done with the batch and thus we release the allocated memory (corresponding - * to the points of the batch) - */ - release_batch(batch, current_slot); - - break; - case KMEANS_LLOYD: - /* - * let's find out which centroid is the closest and aggregate the corresponding statistics - */ - update_centroids(state_description, batch, current_slot, idx_current_centroids, idx_next_centroids); - - /* - * we are done with the batch and thus we release the allocated memory (corresponding - * to the points of the batch) - */ - release_batch(batch, current_slot); - - break; - default: - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("k-means exec: no known algorithm stage"))); - } - } - return centroid_candidates; -} - -/* - * this function encodes the internal representation of the model in a form that the upper layer - * can easily process (virtual tuple) - */ -bool kmeans_get_tupleslot(KMeansState* kmeans_state, TupleTableSlot* slot, TupleDesc tuple_desc) { - auto kmeans_node = reinterpret_cast(kmeans_state->sst.ps.plan); - uint32_t const num_centroids = kmeans_node->parameters.num_centroids; - // this precision for num_points is enough for practical purposes - uint32_t const num_points = kmeans_state->description.num_good_points; - uint32_t const actual_num_centroids = num_centroids > num_points ? num_points : num_centroids; - uint32_t current_position = 0; - uint32_t const dimension = kmeans_state->description.dimension; - uint32_t idx_current_centroids = 1U - (kmeans_state->description.current_iteration & 1U); - uint32_t const size_centroid_bytes = sizeof(float8) * dimension; - uint32_t centroid_coordinates_offset = 0U; - Centroid* current_centroid = nullptr; - /* these are the outer-facing arrays */ - ArrayType* centroid_ids = nullptr; - ArrayType* centroid_coordinates = nullptr; - ArrayType* objective_functions = nullptr; - ArrayType* avg_distances = nullptr; - ArrayType* min_distances = nullptr; - ArrayType* max_distances = nullptr; - ArrayType* std_dev_distances = nullptr; - ArrayType* cluster_sizes = nullptr; - /* these are the inner-facing arrays */ - int32_t* centroid_ids_data = nullptr; - double* centroid_coordinates_data = nullptr; - double* objective_functions_data = nullptr; - double* avg_distances_data = nullptr; - double* min_distances_data = nullptr; - double* max_distances_data = nullptr; - double* std_dev_distances_data = nullptr; - int64_t* cluster_sizes_data = nullptr; - uint64_t cluster_size = 0ULL; - Datum* values = slot->tts_values; - bool* nulls = slot->tts_isnull; - errno_t errorno = EOK; - - /* - * the descriptor is not reset - */ - (void)ExecClearTuple(slot); - - Assert(tuple_desc->natts == NUM_ATTR_OUTPUT); - - /* there is nothing to output any more */ - if (kmeans_state->description.current_centroid >= actual_num_centroids) - return false; - - auto datums_placeholder = reinterpret_cast(palloc0(sizeof(Datum) * actual_num_centroids)); - - /* - * we allocate all arrays in one shot and fill them as we process valid centroids - */ - centroid_ids = construct_array(datums_placeholder, actual_num_centroids, INT4OID, sizeof(int4), true, 'i'); - objective_functions = - construct_array(datums_placeholder, actual_num_centroids, FLOAT8OID, sizeof(float8), FLOAT8PASSBYVAL, 'd'); - avg_distances = - construct_array(datums_placeholder, actual_num_centroids, FLOAT8OID, sizeof(float8), FLOAT8PASSBYVAL, 'd'); - min_distances = - construct_array(datums_placeholder, actual_num_centroids, FLOAT8OID, sizeof(float8), FLOAT8PASSBYVAL, 'd'); - max_distances = - construct_array(datums_placeholder, actual_num_centroids, FLOAT8OID, sizeof(float8), FLOAT8PASSBYVAL, 'd'); - std_dev_distances = - construct_array(datums_placeholder, actual_num_centroids, FLOAT8OID, sizeof(float8), FLOAT8PASSBYVAL, 'd'); - cluster_sizes = - construct_array(datums_placeholder, actual_num_centroids, INT8OID, sizeof(int64_t), FLOAT8PASSBYVAL, 'i'); - /* - * this one is the 2-dimensional array that will hold the coordinates of the centroids - * at this point the data of this array is zeroed - */ - centroid_coordinates = construct_empty_centroids_array(actual_num_centroids, dimension); - - /* - * we now obtain pointers to the actual raw arrays - */ - centroid_ids_data = reinterpret_cast(ARR_DATA_PTR(centroid_ids)); - centroid_coordinates_data = reinterpret_cast(ARR_DATA_PTR(centroid_coordinates)); - objective_functions_data = reinterpret_cast(ARR_DATA_PTR(objective_functions)); - avg_distances_data = reinterpret_cast(ARR_DATA_PTR(avg_distances)); - min_distances_data = reinterpret_cast(ARR_DATA_PTR(min_distances)); - max_distances_data = reinterpret_cast(ARR_DATA_PTR(max_distances)); - std_dev_distances_data = reinterpret_cast(ARR_DATA_PTR(std_dev_distances)); - cluster_sizes_data = reinterpret_cast(ARR_DATA_PTR(cluster_sizes)); - - /* - * we go through the centroids until we have exhausted all (valid) centroids - */ - current_position = 0; - while (current_position < actual_num_centroids) { - current_centroid = - kmeans_state->description.centroids[idx_current_centroids] + current_position; - - cluster_size = current_centroid->statistics.getPopulation(); - - centroid_ids_data[current_position] = current_centroid->id; - objective_functions_data[current_position] = current_centroid->statistics.getTotal(); - avg_distances_data[current_position] = current_centroid->statistics.getEmpiricalMean(); - min_distances_data[current_position] = - cluster_size > 0 ? current_centroid->statistics.getMin() : 0.; - max_distances_data[current_position] = current_centroid->statistics.getMax(); - std_dev_distances_data[current_position] = current_centroid->statistics.getEmpiricalStdDev(); - cluster_sizes_data[current_position] = cluster_size; - errorno = memcpy_s(centroid_coordinates_data + centroid_coordinates_offset, size_centroid_bytes, - ARR_DATA_PTR(current_centroid->coordinates), size_centroid_bytes); - securec_check(errorno, "\0", "\0"); - centroid_coordinates_offset += dimension; - ++current_position; - } - - kmeans_state->description.current_centroid = actual_num_centroids; - - /* - * Processing has finished and we can set the values accordingly - */ - values[0] = PointerGetDatum(centroid_ids); - values[1] = PointerGetDatum(centroid_coordinates); - values[2] = PointerGetDatum(objective_functions); - values[3] = PointerGetDatum(avg_distances); - values[4] = PointerGetDatum(min_distances); - values[5] = PointerGetDatum(max_distances); - values[6] = PointerGetDatum(std_dev_distances); - values[7] = PointerGetDatum(cluster_sizes); - values[8] = UInt64GetDatum(kmeans_state->description.num_good_points); - values[9] = UInt64GetDatum(kmeans_state->description.num_dead_points); - values[10] = Float8GetDatumFast(kmeans_state->description.seeding_time); - values[11] = Float8GetDatumFast(kmeans_state->description.execution_time); - values[12] = UInt32GetDatum(kmeans_state->description.actual_num_iterations); - values[13] = UInt32GetDatum(current_position); - values[14] = UInt64GetDatum(kmeans_node->parameters.external_seed); - /* no null attribute */ - errorno = memset_s(nulls, sizeof(bool) * NUM_ATTR_OUTPUT, 0, sizeof(bool) * NUM_ATTR_OUTPUT); - securec_check(errorno, "\0", "\0"); - - /* - * this saves one round of copying - */ - ExecStoreVirtualTuple(slot); - - pfree(datums_placeholder); - - return true; -} - -/* - * this function initializes the operator - */ -KMeansState* ExecInitKMeans(KMeans* kmeans_node, EState* estate, int eflags) { - KMeansState* kmeans_state = nullptr; - Plan* outer_plan = outerPlan(kmeans_node); - TupleDesc tup_desc_out; - uint32_t const num_centroids = kmeans_node->parameters.num_centroids; - uint32_t const dimension = kmeans_node->description.n_features; - uint16_t current_attr = 0; - - /* check for unsupported flags */ - Assert(!(eflags & (EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); - - /* - * create state structure - */ - kmeans_state = makeNode(KMeansState); - kmeans_state->sst.ps.plan = reinterpret_cast(kmeans_node); - kmeans_state->sst.ps.state = estate; - - /* - * initialize child nodes - */ - outerPlanState(kmeans_state) = ExecInitNode(outer_plan, estate, eflags); - - /* - * tuple table initialization - */ - ExecInitResultTupleSlot(estate, &kmeans_state->sst.ps); - ExecInitScanTupleSlot(estate, &kmeans_state->sst); - - /* - * initialize tuple type. no need to initialize projection info because - * this node doesn't do projections. - */ - ExecAssignScanTypeFromOuterPlan(&kmeans_state->sst); - - /* - * we have to produce the record to be returned - */ - tup_desc_out = CreateTemplateTupleDesc(NUM_ATTR_OUTPUT, false); - - TupleDescInitEntry(tup_desc_out, (AttrNumber)++current_attr, "centroid_ids", INT4ARRAYOID, -1, 0); - TupleDescInitEntry(tup_desc_out, (AttrNumber)++current_attr, "centroid_coordinates", FLOAT8ARRAYOID, -1, 0); - TupleDescInitEntry(tup_desc_out, (AttrNumber)++current_attr, "objective_functions", FLOAT8ARRAYOID, -1, 0); - TupleDescInitEntry(tup_desc_out, (AttrNumber)++current_attr, "avg_distances", FLOAT8ARRAYOID, -1, 0); - TupleDescInitEntry(tup_desc_out, (AttrNumber)++current_attr, "min_distances", FLOAT8ARRAYOID, -1, 0); - TupleDescInitEntry(tup_desc_out, (AttrNumber)++current_attr, "max_distances", FLOAT8ARRAYOID, -1, 0); - TupleDescInitEntry(tup_desc_out, (AttrNumber)++current_attr, "std_dev_distances", FLOAT8ARRAYOID, -1, 0); - TupleDescInitEntry(tup_desc_out, (AttrNumber)++current_attr, "cluster_sizes", INT8ARRAYOID, -1, 0); - TupleDescInitEntry(tup_desc_out, (AttrNumber)++current_attr, "good_points", INT8OID, -1, 0); - TupleDescInitEntry(tup_desc_out, (AttrNumber)++current_attr, "bad_points", INT8OID, -1, 0); - TupleDescInitEntry(tup_desc_out, (AttrNumber)++current_attr, "seeding_time", FLOAT8OID, -1, 0); - TupleDescInitEntry(tup_desc_out, (AttrNumber)++current_attr, "execution_time", FLOAT8OID, -1, 0); - TupleDescInitEntry(tup_desc_out, (AttrNumber)++current_attr, "actual_number_iterations", INT4OID, -1, 0); - TupleDescInitEntry(tup_desc_out, (AttrNumber)++current_attr, "actual_number_centroids", INT4OID, -1, 0); - TupleDescInitEntry(tup_desc_out, (AttrNumber)++current_attr, "seed", INT8OID, -1, 0); - - BlessTupleDesc(tup_desc_out); - - ExecAssignResultType(&kmeans_state->sst.ps, tup_desc_out); - - /* - * Initialize result tuple type and projection info. - */ - ExecAssignProjectionInfo(&kmeans_state->sst.ps, nullptr); - kmeans_state->sst.ps.ps_TupFromTlist = false; - kmeans_state->sst.ps.ps_ProjInfo = nullptr; - - kmeans_state->done = false; - - kmeans_state->description.centroids[0] = - reinterpret_cast(palloc0(sizeof(Centroid) * num_centroids)); - kmeans_state->description.centroids[1] = - reinterpret_cast(palloc0(sizeof(Centroid) * num_centroids)); - - auto datums_tmp = reinterpret_cast(palloc0(sizeof(Datum) * dimension)); - - for (uint32_t c = 0; c < num_centroids; ++c) { - kmeans_state->description.centroids[0][c].id = c + 1; - kmeans_state->description.centroids[1][c].id = c + 1; - /* - * this is an internal array that will eventually hold the coordinates of a centroid - * its representation as a PG Array is legacy. It will be better to represent it as - * an array of double directly (in a manner that can be returned to the client directly) - */ - kmeans_state->description.centroids[0][c].coordinates = - construct_array(datums_tmp, dimension, FLOAT8OID, sizeof(float8), FLOAT8PASSBYVAL, 'd'); - kmeans_state->description.centroids[1][c].coordinates = - construct_array(datums_tmp, dimension, FLOAT8OID, sizeof(float8), FLOAT8PASSBYVAL, 'd'); - } - - /* - * general running time information of the operator - */ - kmeans_state->description.bbox_max = - construct_array(datums_tmp, dimension, FLOAT8OID, sizeof(float8), FLOAT8PASSBYVAL, 'd'); - kmeans_state->description.bbox_min = - construct_array(datums_tmp, dimension, FLOAT8OID, sizeof(float8), FLOAT8PASSBYVAL, 'd'); - kmeans_state->description.num_good_points = 0; - kmeans_state->description.num_dead_points = 0; - kmeans_state->description.current_iteration = 0; - kmeans_state->description.current_centroid = 0; - kmeans_state->description.dimension = dimension; - kmeans_state->description.num_centroids = num_centroids; - - switch (kmeans_node->description.distance) { - case KMEANS_L1: - kmeans_state->description.distance = l1; - break; - case KMEANS_L2: - kmeans_state->description.distance = l2; - break; - case KMEANS_LINF: - kmeans_state->description.distance = linf; - break; - case KMEANS_L2_SQUARED: - kmeans_state->description.distance = l2_squared; - break; - default: - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("k-means init: no known distance function: %u", kmeans_node->description.distance))); - } - - pfree(datums_tmp); - - return kmeans_state; -} - -void kmeans_deal_seeding_function(SeedingFunction seeding_function, uint32_t num_centroids, uint64_t num_points, - double sample_factor, uint32_t num_centroids_orig, Verbosity verbosity, - List** centroid_candidates_kmeans_bb, PlanState* outer_plan, - KMeansStateDescription* state_description, uint32_t const batch_size, - GSPoint* batch, uint32_t idx_current_centroids, - uint32_t idx_next_centroids, uint32_t const size_centroid_bytes, - std::mt19937_64 *prng) -{ - double sample_probability = 0.; - double oversampling = 0.; - struct timespec start_kmeans_round, finish_kmeans_round; - uint64_t num_candidates = 0ULL; - uint64_t prev_num_candidates = 0ULL; - double cost_kmeans_bb = 0.; - double op_error = 0.; - double local_elapsed_time = 0.; - uint32_t current_iteration_kmeans_bb = 0U; - - switch (seeding_function) { - case KMEANS_BB: - sample_probability = num_centroids >= num_points ? 1.0 : sample_factor / num_points; - - /* - * the number of iterations of kmeans|| depends on the value of the initial solution - * if the solution is bad, we will iterate for longer time - * (one iteration consists of two data passes (one to sample candidates, and another - * to compute the cost of the (partial) solution - */ - oversampling = sample_factor * static_cast(num_centroids_orig); - if (verbosity > NO_OUTPUT) - ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("*** k-means|| oversampling factor: %lf, " - "expected number of candidates per round: %lu", sample_factor, - static_cast(oversampling)))); - do { - clock_gettime(CLOCK_MONOTONIC, &start_kmeans_round); - /* - * this data pass will sample candidates - * (the probability of not choosing a single element is > 0 and thus we have to - * try until at least one candidate is found) - */ - - do { - prev_num_candidates = num_candidates; - ExecReScan(outer_plan); - *centroid_candidates_kmeans_bb = one_data_pass(outer_plan, state_description, batch_size, - batch, idx_current_centroids, - idx_next_centroids, size_centroid_bytes, - true, sample_probability, - KMEANS_INITIAL_CENTROIDS_BB_SAMPLE, - *centroid_candidates_kmeans_bb, nullptr, prng); - num_candidates = *centroid_candidates_kmeans_bb ? (*centroid_candidates_kmeans_bb)->length : 0ULL; - } while (num_candidates <= prev_num_candidates); - - /* - * this data pass will compute the cost of the (partial) solution - */ - reset_weights(*centroid_candidates_kmeans_bb); - cost_kmeans_bb = 0.; - ExecReScan(outer_plan); - *centroid_candidates_kmeans_bb = one_data_pass(outer_plan, state_description, batch_size, - batch, idx_current_centroids, - idx_next_centroids, size_centroid_bytes, - false, 0., KMEANS_INITIAL_CENTROIDS_BB_COMPUTE_COST, - *centroid_candidates_kmeans_bb, &cost_kmeans_bb, - nullptr); - - /* - * for the next iteration, sample probability changes according to the cost of the current - * solution - */ - twoDiv(oversampling, cost_kmeans_bb, &sample_probability, &op_error); - sample_probability += op_error; - - clock_gettime(CLOCK_MONOTONIC, &finish_kmeans_round); - local_elapsed_time = static_cast(finish_kmeans_round.tv_sec - start_kmeans_round.tv_sec); - local_elapsed_time += - static_cast(finish_kmeans_round.tv_nsec - start_kmeans_round.tv_nsec) / - 1000000000.0; - - if (verbosity == VERBOSE_OUTPUT) - ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("*** k-means|| round %u stats: cost: %lf, total number of candidates: %u, " - "duration (s): %0.06lf", current_iteration_kmeans_bb + 1, - cost_kmeans_bb, (*centroid_candidates_kmeans_bb)->length, local_elapsed_time))); - - } while (likely((++current_iteration_kmeans_bb < NUM_ITERATIONS_KMEANSBB) && - (num_candidates < num_points))); - - break; - case KMEANS_RANDOM_SEED: - /* - * the expected number of points to sample - */ - oversampling = sample_factor * static_cast(num_centroids_orig); - /* - * if the number of centroids is larger than the number of data points (corner case) - * each data point becomes a centroid. otherwise we over sample (all data points could - * be sampled) - */ - sample_probability = num_centroids >= num_points ? 1.0 : oversampling / num_points; - - if (verbosity > NO_OUTPUT) - ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("*** random++ oversampling factor: %lf, expected number of candidates: %lu", - sample_factor, static_cast(oversampling)))); - - do { - ExecReScan(outer_plan); - *centroid_candidates_kmeans_bb = one_data_pass(outer_plan, state_description, batch_size, batch, - idx_current_centroids, idx_next_centroids, - size_centroid_bytes, true, sample_probability, - KMEANS_INITIAL_CENTROIDS_RANDOM_SAMPLE, - *centroid_candidates_kmeans_bb, nullptr, prng); - num_candidates = *centroid_candidates_kmeans_bb ? (*centroid_candidates_kmeans_bb)->length : 0ULL; - } while ((num_candidates < num_points) && (num_candidates < oversampling)); - reset_weights(*centroid_candidates_kmeans_bb); - break; - default: - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("k-means exec: not known seeding function: %u", seeding_function))); - } -} - -static void output_kmeans_state(Verbosity const verbosity, uint64_t num_good_points, uint64_t num_dead_points, - double local_elapsed_time) -{ - if (verbosity > NO_OUTPUT) { - ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("*** Initial statistics gathered:"))); - ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("*** Number of valid points: %lu", num_good_points))); - ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("*** Number of dead points: %lu", num_dead_points))); - if (verbosity == VERBOSE_OUTPUT) - ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("*** Duration (s): %0.6lf", local_elapsed_time))); - } -} - -/* - * this executes the k-means "training" by executing multiple scans through the data - * until convergence can be declared. - * the operator works in stages: - * 1) obtain general statistics about the input data (one data pass), - * 2) execute a seeding method (random++ or kmeans||) (at least one data pass but not more than 10), - * 3) run Lloyd's algorithm (at least one data pass) - */ -TupleTableSlot* ExecKMeans(KMeansState* kmeans_state_node) -{ - /* - * get information from the node - */ - auto kmeans_node = reinterpret_cast(kmeans_state_node->sst.ps.plan); - KMeansStateDescription& state_description = kmeans_state_node->description; - EState* estate = kmeans_state_node->sst.ps.state; - ScanDirection direction = estate->es_direction; - PlanState* outer_plan = outerPlanState(kmeans_state_node); - TupleTableSlot* kmeans_slot = nullptr; - TupleDesc tup_desc_scan = nullptr; - TupleDesc tup_desc_kmeans = ExecGetResultType(&kmeans_state_node->sst.ps); - GSPoint* batch = nullptr; - uint32_t const max_num_iterations = kmeans_node->parameters.num_iterations; - uint32_t idx_current_centroids = 0U; - uint32_t idx_next_centroids = 0U; - uint32_t num_centroids = state_description.num_centroids; - uint32_t const num_centroids_orig = num_centroids; - uint32_t const dimension = state_description.dimension; - uint32_t const size_centroid_bytes = sizeof(double) * dimension; - uint32_t const one_gb = 1ULL << 30; - uint32_t const max_batch_size = (one_gb - (sizeof(void*) * MAX_BATCH_SLOTS)) / size_centroid_bytes; - uint32_t const batch_size = kmeans_node->description.batch_size < max_batch_size ? - kmeans_node->description.batch_size : max_batch_size; - Verbosity const verbosity = kmeans_node->description.verbosity; - SeedingFunction const seeding_function = kmeans_node->description.seeding; - IncrementalStatistics* prev_solution_statistics = nullptr; - IncrementalStatistics* current_solution_statistics = nullptr; - double cost_fraction = 0.; - double cost_fraction_correction = 0.; - double local_elapsed_time = 0.; - double total_time = 0.; - double const sample_factor = num_centroids < 1000000 ? 4. : 2.; - double const tolerance = kmeans_node->parameters.tolerance; - List* centroid_candidates_kmeans_bb = nullptr; - struct timespec start_time, finish_time, start_kmeans_round, finish_kmeans_round; - uint64_t num_points = 0ULL; - uint64_t seed = kmeans_node->parameters.external_seed; - /* if the user-provided seed is 0 we take the current time but reset - * the higher order bits to be able to return this seed to the user - * as an int32_t so that the user can reproduce the run - * (observe that epoch 2^31 is around year 2038 and the shifts are - * mostly useless) - */ - if (seed == 0ULL) - kmeans_node->parameters.external_seed = seed = (get_time_ms() << 33U) >> 33U; - - Assert(seed != 0); - - uint64_t const external_seed = seed; - /* - * internal seed obtained from random.org. do not change it because - * it will (most probably) change the overall seed used and results - * will not be reproducible - */ - uint64_t const internal_seed = 0x274066DB9441E851ULL; - seed ^= internal_seed; - - /* - * high-quality prng (based Mersenne primes) - * nothing of this sort is currently available in the system - */ - std::mt19937_64 prng(seed); - - // check if training is already finished, if so we return the next centroid - if (!kmeans_state_node->done) { - /* - * Want to scan subplan in the forward direction while feeding rows to - * the algorithm - */ - estate->es_direction = ForwardScanDirection; - - tup_desc_scan = ExecGetResultType(outer_plan); - - /* - * we have to see that the column we are passed on is of type array - */ - Oid oidtype = tup_desc_scan->attrs[0]->atttypid; - - if (unlikely((oidtype != FLOAT8ARRAYOID))) - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("k-means exec: data is not of type float8 (double precision) array"))); - - if (tup_desc_scan->natts != 1) - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("k-means exec: relation should contain only a single attribute " - "(point coordinates in a double precision array)"))); - /* - * this is the array that will contain points in memory to be processed in a batch - */ - batch = reinterpret_cast(palloc0(sizeof(GSPoint) * batch_size)); - - /* - * Before iterating we have to find an initial set of centroids (seeds) - * either random seeds or using kmeans||, this is one table scan on which we - * can also obtain the diameter of the bounding box (for normalization for example) - * from this scan we can also obtain the coordinates of the bounding box - */ - clock_gettime(CLOCK_MONOTONIC, &start_time); - one_data_pass(outer_plan, &state_description, batch_size, batch, - 0, 1, size_centroid_bytes, false, 0., KMEANS_INIT, nullptr, nullptr, nullptr); - clock_gettime(CLOCK_MONOTONIC, &finish_time); - - local_elapsed_time = static_cast(finish_time.tv_sec - start_time.tv_sec); - local_elapsed_time += static_cast(finish_time.tv_nsec - start_time.tv_nsec) / 1000000000.0; - total_time += local_elapsed_time; - - output_kmeans_state(verbosity, state_description.num_good_points, state_description.num_dead_points, - local_elapsed_time); - - num_points = state_description.num_good_points; - /* - * if the number of centroids is larger than the number of data points we save useless computations - * by keeping track of the actual number of centroids that can be realized, in the very end - * we restore the original number of centroids - */ - state_description.num_centroids = num_centroids_orig > num_points ? num_points : num_centroids_orig; - num_centroids = state_description.num_centroids; - - if (unlikely(num_points == 0)) - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("k-means exec: no valid point found (no input array seems to be a one-dimensional array, " - "or no point seems to be fully dimensional (perhaps all points have a null dimension?))"))); - - /* - * it is time to produce an initial set of centroids to start with - */ - clock_gettime(CLOCK_MONOTONIC, &start_time); - - idx_current_centroids = state_description.current_iteration & 1U; - idx_next_centroids = 1U - idx_current_centroids; - - /* - * if the number of centers is larger than the number of data points we choose - * all data points (we have no other option). this is a non-sense corner case. - * for the time being we restrict to k's that fit in memory, later we can - * lift that restriction by spooling centroids to a file - * - * for random production we sample with probability (sample_factor * k)/n - * - * for kmeans|| we sample the very first points with probability sample_factor / n and - * later with probability (sample_factor * k * d(x))/sum(d(y)) - */ - kmeans_deal_seeding_function(seeding_function, num_centroids, num_points, sample_factor, num_centroids_orig, - verbosity, ¢roid_candidates_kmeans_bb, outer_plan, &state_description, - batch_size, batch, idx_current_centroids, idx_next_centroids, - size_centroid_bytes, &prng); - - /* - * once the set of candidates (> k) has been gathered, we produce k initial centroids using - * (weighted) kmeans++ - * observe that the output of this function are the k centroids stored in their place - * and the list of candidates is freed up inside the function, thus accessing the list - * is illegal - */ - if (verbosity > NO_OUTPUT) - ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("*** k-means++ begin: consolidating %u candidates to %u centroid(s)", - centroid_candidates_kmeans_bb->length, num_centroids))); - - clock_gettime(CLOCK_MONOTONIC, &start_kmeans_round); - - centroid_candidates_kmeans_bb = kmeanspp(&state_description, centroid_candidates_kmeans_bb, - idx_current_centroids, size_centroid_bytes, &prng); - - clock_gettime(CLOCK_MONOTONIC, &finish_kmeans_round); - local_elapsed_time = static_cast(finish_kmeans_round.tv_sec - start_kmeans_round.tv_sec); - local_elapsed_time += - static_cast(finish_kmeans_round.tv_nsec - start_kmeans_round.tv_nsec) / - 1000000000.0; - if (verbosity == VERBOSE_OUTPUT) - ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("*** k-means++ ended: duration (s): %0.06lf", local_elapsed_time))); - - Assert(!centroid_candidates_kmeans_bb); - - clock_gettime(CLOCK_MONOTONIC, &finish_time); - - local_elapsed_time = static_cast(finish_time.tv_sec - start_time.tv_sec); - local_elapsed_time += static_cast(finish_time.tv_nsec - start_time.tv_nsec) / 1000000000.0; - total_time += local_elapsed_time; - state_description.seeding_time = local_elapsed_time; - - if (verbosity == VERBOSE_OUTPUT) - ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("*** Seed centroids constructed (%lu, %u): duration (s): %0.6lf", - num_points, state_description.current_centroid, - state_description.seeding_time))); - - - // we reset current_centroid so that later each centroid can be output - state_description.current_centroid = 0; - - /* - * Scan the sub-plan and feed all the tuples to kmeans (every single iteration). - * for these scans we will do batching - */ - do { - clock_gettime(CLOCK_MONOTONIC, &start_time); - idx_next_centroids = 1U - idx_current_centroids; - prev_solution_statistics = state_description.solution_statistics + idx_next_centroids; - - /* - * in this iteration we reset the set of next centroids start with a clean set for aggregation - */ - reset_centroids(&state_description, idx_next_centroids, size_centroid_bytes); - - /* - * every iteration we have to reset the sub-plan to be able to re-scan it - */ - ExecReScan(outer_plan); - one_data_pass(outer_plan, &state_description, batch_size, batch, idx_current_centroids, - idx_next_centroids, size_centroid_bytes, false, 0., KMEANS_LLOYD, nullptr, nullptr, nullptr); - - /* - * let's produce the new set of centroids for the next iteration - * (this should be executed at the coordinator in a distributed environment - */ - merge_centroids(&state_description, idx_current_centroids, idx_next_centroids, size_centroid_bytes); - - compute_cost(&state_description, idx_current_centroids); - current_solution_statistics = state_description.solution_statistics + idx_current_centroids; - - if (unlikely(state_description.current_iteration == 1)) { - cost_fraction = 1.; - } else { - twoDiv(prev_solution_statistics->getTotal(), current_solution_statistics->getTotal(), - &cost_fraction, &cost_fraction_correction); - cost_fraction += cost_fraction_correction; - twoDiff(cost_fraction, 1.0, &cost_fraction, &cost_fraction_correction); - cost_fraction += cost_fraction_correction; - } - - idx_current_centroids = idx_next_centroids; - - clock_gettime(CLOCK_MONOTONIC, &finish_time); - - local_elapsed_time = static_cast(finish_time.tv_sec - start_time.tv_sec); - local_elapsed_time += static_cast(finish_time.tv_nsec - start_time.tv_nsec) / 1000000000.0; - total_time += local_elapsed_time; - - if (verbosity == VERBOSE_OUTPUT) - ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("*** iteration %u: duration (s): %0.6lf, " - "total relevant population %lu, " - "total cost %0.6lf, " - "average distance %0.6lf, " - "min distance %0.6lf, " - "max distance %0.6lf, " - "standard deviation of distances %0.6lf, " - "cost delta: %0.6lf", state_description.current_iteration, local_elapsed_time, - current_solution_statistics->getPopulation(), - current_solution_statistics->getTotal(), current_solution_statistics->getEmpiricalMean(), - current_solution_statistics->getMin(), current_solution_statistics->getMax(), - current_solution_statistics->getEmpiricalStdDev(), cost_fraction))); - } while ((++state_description.current_iteration <= max_num_iterations) - && (cost_fraction >= tolerance) && (num_points > num_centroids)); - - state_description.execution_time = total_time; - state_description.actual_num_iterations = state_description.current_iteration - 1; - - if (verbosity > NO_OUTPUT) { - ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("*** Number of centroids constructed: %u", num_centroids))); - ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("*** Value of global objective function: %0.6lf", current_solution_statistics->getTotal()))); - ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("*** Seed: %lu", external_seed))); - if (verbosity == VERBOSE_OUTPUT) - ereport(NOTICE, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("*** Total duration time (s): %0.6lf", state_description.execution_time))); - } - - /* Finish scanning the subplan, it's safe to early free the memory of lefttree */ - ExecEarlyFree(outer_plan); - - /* - * restore to user specified direction - */ - estate->es_direction = direction; - - /* - * finally set the finished flag to true - */ - kmeans_state_node->done = true; - - /* - * we restore the original number of centroids - */ - state_description.num_centroids = num_centroids_orig; - - pfree(batch); - } - - kmeans_slot = kmeans_state_node->sst.ps.ps_ResultTupleSlot; - (void)kmeans_get_tupleslot(kmeans_state_node, kmeans_slot, tup_desc_kmeans); - - return kmeans_slot; -} - -/* - * this function frees up all the storage required by the algorithm - * at thus point the upper layer has made persistent the model and - * this information is not required anymore - */ -void ExecEndKMeans(KMeansState* kmeans_state_node) { - /* - * clean out the tuple table - */ - (void)ExecClearTuple(kmeans_state_node->sst.ps.ps_ResultTupleSlot); - - /* - * clean up subtrees - */ - auto kmeans_node = reinterpret_cast(kmeans_state_node->sst.ps.plan); - - ExecEndNode(outerPlanState(kmeans_state_node)); - for (uint32_t c = 0; c < kmeans_node->parameters.num_centroids; ++c) { - pfree(kmeans_state_node->description.centroids[0][c].coordinates); - pfree(kmeans_state_node->description.centroids[1][c].coordinates); - } - - pfree(kmeans_state_node->description.centroids[0]); - pfree(kmeans_state_node->description.centroids[1]); - pfree(kmeans_state_node); -} diff --git a/src/gausskernel/runtime/executor/nodeLockRows.cpp b/src/gausskernel/runtime/executor/nodeLockRows.cpp index 75487e2f4..ad6a76e42 100755 --- a/src/gausskernel/runtime/executor/nodeLockRows.cpp +++ b/src/gausskernel/runtime/executor/nodeLockRows.cpp @@ -202,13 +202,14 @@ lnext: /* Need to merge the ustore logic with AM logic */ test = tableam_tuple_lock(bucket_rel, &tuple, &buffer, - estate->es_output_cid, lock_mode, erm->noWait, &tmfd, + estate->es_output_cid, lock_mode, erm->noWait, &tmfd, #ifdef ENABLE_MULTIPLE_NODES - false, false, false, estate->es_snapshot, NULL, true); + false, false, false, estate->es_snapshot, NULL, true, #else - false, true, false, estate->es_snapshot, NULL, true); + false, true, false, estate->es_snapshot, NULL, true, #endif - + false, InvalidTransactionId, + erm->waitSec); ReleaseBuffer(buffer); switch (test) { diff --git a/src/gausskernel/runtime/executor/nodeModifyTable.cpp b/src/gausskernel/runtime/executor/nodeModifyTable.cpp index d581bba15..31817a46b 100644 --- a/src/gausskernel/runtime/executor/nodeModifyTable.cpp +++ b/src/gausskernel/runtime/executor/nodeModifyTable.cpp @@ -141,12 +141,21 @@ static void CheckPlanOutput(Plan* subPlan, Relation resultRel) /* * Compared to pgxc, we have increased the stream plan, * this destroy the logic of the function ExecCheckPlanOutput. - * Modify to use targetlist of stream(VecToRow/RowToVec)->subplan as + * Modify to use targetlist of stream(VecToRow/RowToVec/PartIterator)->subplan as * parameter of ExecCheckPlanOutput. */ - if (IsA(subPlan, Stream) || IsA(subPlan, VecStream) || IsA(subPlan, VecToRow) || IsA(subPlan, RowToVec)) { - - if (IS_PGXC_COORDINATOR) { + switch (nodeTag(subPlan)) { + case T_Stream: + case T_VecStream: + case T_VecToRow: + case T_RowToVec: + case T_PartIterator: + case T_VecPartIterator: { +#ifdef ENABLE_MULTIPLE_NODES + if (!IS_PGXC_COORDINATOR) { + break; + } +#endif /* * dummy target list cannot pass ExecCheckPlanOutput, * so we desend until we found a non-dummy plan @@ -158,11 +167,14 @@ static void CheckPlanOutput(Plan* subPlan, Relation resultRel) subPlan = subPlan->lefttree; } while (has_dummy_targetlist(subPlan)); - /* now the plan is not dummy */ - ExecCheckPlanOutput(resultRel, subPlan->targetlist); + CheckPlanOutput(subPlan, resultRel); + + break; + } + default: { + ExecCheckPlanOutput(resultRel, subPlan->targetlist); + break; } - } else { - ExecCheckPlanOutput(resultRel, subPlan->targetlist); } } @@ -513,10 +525,14 @@ checktest: econtext->ecxt_outertuple = NULL; ExecProject(resultRelInfo->ri_updateProj, NULL); - - *returning = ExecUpdate(conflictTid, oldPartitionOid, bucketid, NULL, - upsertState->us_updateproj, planSlot, &mtstate->mt_epqstate, - mtstate, canSetTag, ((ModifyTable*)mtstate->ps.plan)->partKeyUpdated); + /* Evaluate where qual if exists, add to count if filtered */ + if (ExecQual(upsertState->us_updateWhere, econtext, false)) { + *returning = ExecUpdate(conflictTid, oldPartitionOid, bucketid, NULL, + upsertState->us_updateproj, planSlot, &mtstate->mt_epqstate, + mtstate, canSetTag, ((ModifyTable*)mtstate->ps.plan)->partKeyUpdated); + } else { + InstrCountFiltered1(&mtstate->ps, 1); + } tableam_tops_destroy_tuple(relation, tuple); ReleaseBuffer(buffer); return true; @@ -535,8 +551,85 @@ static inline void ReleaseResourcesForUpsertGPI(bool isgpi, Relation parentRel, } } +void CheckPartitionOidForSpecifiedPartition(RangeTblEntry *rte, Oid partitionid) +{ + if (rte->isContainPartition && rte->partitionOid != partitionid) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("inserted partition key does not map to the table partition"), errdetail("N/A."), + errcause("The value is incorrect."), erraction("Use the correct value.")))); + } +} + +void CheckSubpartitionOidForSpecifiedSubpartition(RangeTblEntry *rte, Oid partitionid, Oid subPartitionId) +{ + if (rte->isContainSubPartition && (rte->partitionOid != partitionid || rte->subpartitionOid != subPartitionId)) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("inserted subpartition key does not map to the table subpartition"), errdetail("N/A."), + errcause("The value is incorrect."), erraction("Use the correct value.")))); + } +} + +static void ReportErrorForSpecifiedPartitionOfUpsert(char *partition, char *table) +{ + ereport(ERROR, + (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The update target %s of upsert is inconsistent with the specified %s in " + "%s table.", + partition, partition, table), + errdetail("N/A."), + errcause("Specifies that the %s syntax does not allow updating inconsistent %s.", partition, partition), + erraction("Modify the SQL statement.")))); +} + +static void CheckPartitionOidForUpsertSpecifiedPartition(RangeTblEntry *rte, Relation resultRelationDesc, + Oid targetPartOid) +{ + if (RelationIsSubPartitioned(resultRelationDesc)) { + Oid parentOid = partid_get_parentid(targetPartOid); + if (rte->isContainPartition && rte->partitionOid != parentOid) { + char *partition = "partition"; + char *table = "subpartition"; + ReportErrorForSpecifiedPartitionOfUpsert(partition, table); + } + if (rte->isContainSubPartition && (rte->partitionOid != parentOid || rte->subpartitionOid != targetPartOid)) { + char *partition = "subpartition"; + char *table = "subpartition"; + ReportErrorForSpecifiedPartitionOfUpsert(partition, table); + } + } else { + if (rte->isContainPartition && rte->partitionOid != targetPartOid) { + char *partition = "partition"; + char *table = "partition"; + ReportErrorForSpecifiedPartitionOfUpsert(partition, table); + } + } +} + +static void ConstraintsForExecUpsert(Relation resultRelationDesc) +{ + if (resultRelationDesc == NULL) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("The result relation is null"), errdetail("N/A."), errcause("System error."), + erraction("Contact engineer to support.")))); + } + if (unlikely(RelationIsCUFormat(resultRelationDesc))) { + ereport(ERROR, + (errmodule(MOD_EXECUTOR), + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("ON DUPLICATE KEY UPDATE is not supported on column orientated table"), errdetail("N/A."), + errcause("The function is not supported."), erraction("Contact engineer to support.")))); + } + + if (unlikely(RelationIsPAXFormat(resultRelationDesc))) { + ereport(ERROR, (errmodule(MOD_EXECUTOR), + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("ON DUPLICATE KEY UPDATE is not supported on DFS table"), errdetail("N/A."), + errcause("The function is not supported."), erraction("Contact engineer to support.")))); + } +} + static Oid ExecUpsert(ModifyTableState* state, TupleTableSlot* slot, TupleTableSlot* planSlot, EState* estate, - bool canSetTag, Tuple tuple, TupleTableSlot** returning, bool* updated) + bool canSetTag, Tuple tuple, TupleTableSlot** returning, bool* updated, Oid* targetPartOid) { Oid newid = InvalidOid; bool specConflict = false; @@ -545,8 +638,11 @@ static Oid ExecUpsert(ModifyTableState* state, TupleTableSlot* slot, TupleTableS Relation resultRelationDesc = NULL; Relation heaprel = NULL; /* actual relation to upsert index */ Relation targetrel = NULL; /* actual relation to upsert tuple */ - Oid partitionid = InvalidOid; /* bucket id for bucket hash table */ + Oid partitionid = InvalidOid; Partition partition = NULL; /* partition info for partition table */ + Oid subPartitionId = InvalidOid; + Relation subPartRel = NULL; + Partition subPart = NULL; int2 bucketid = InvalidBktId; ConflictInfoData conflictInfo; UpsertState* upsertState = state->mt_upsert; @@ -558,23 +654,10 @@ static Oid ExecUpsert(ModifyTableState* state, TupleTableSlot* slot, TupleTableS resultRelInfo = estate->es_result_relation_info; resultRelationDesc = resultRelInfo->ri_RelationDesc; heaprel = resultRelationDesc; - if (heaprel == NULL) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("The result relation is null"))); - } - if (unlikely(RelationIsCUFormat(resultRelationDesc))) { - ereport(ERROR, - (errmodule(MOD_EXECUTOR), - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ON DUPLICATE KEY UPDATE is not supported on column orientated table")))); - } - if (unlikely(RelationIsPAXFormat(resultRelationDesc))) { - ereport(ERROR, - (errmodule(MOD_EXECUTOR), - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ON DUPLICATE KEY UPDATE is not supported on DFS table")))); - } + ConstraintsForExecUpsert(resultRelationDesc); + RangeTblEntry *rte = exec_rt_fetch(resultRelInfo->ri_RangeTableIndex, estate); if (RelationIsPartitioned(resultRelationDesc)) { partitionid = heapTupleGetPartitionId(resultRelationDesc, tuple); searchFakeReationForPartitionOid(estate->esfRelations, @@ -584,8 +667,28 @@ static Oid ExecUpsert(ModifyTableState* state, TupleTableSlot* slot, TupleTableS heaprel, partition, RowExclusiveLock); + CheckPartitionOidForSpecifiedPartition(rte, partitionid); + + if (RelationIsSubPartitioned(resultRelationDesc)) { + subPartitionId = heapTupleGetPartitionId(heaprel, tuple); + searchFakeReationForPartitionOid(estate->esfRelations, + estate->es_query_cxt, + heaprel, + subPartitionId, + subPartRel, + subPart, + RowExclusiveLock); + CheckSubpartitionOidForSpecifiedSubpartition(rte, partitionid, subPartitionId); + + partitionid = subPartitionId; + heaprel = subPartRel; + partition = subPart; + } + + *targetPartOid = partitionid; } + vlock: targetrel = heaprel; if (RELATION_OWN_BUCKET(resultRelationDesc)) { bucketid = computeTupleBucketId(resultRelationDesc, (HeapTuple)tuple); @@ -594,8 +697,6 @@ static Oid ExecUpsert(ModifyTableState* state, TupleTableSlot* slot, TupleTableS } } - - vlock: specConflict = false; bool isgpi = false; Oid conflictPartOid = InvalidOid; @@ -614,11 +715,12 @@ static Oid ExecUpsert(ModifyTableState* state, TupleTableSlot* slot, TupleTableS partition_relation = partitionGetRelation(resultRelationDesc, part); targetrel = partition_relation; } - partitionid = conflictPartOid; + *targetPartOid = conflictPartOid; bucketid = conflictBucketid; } /* committed conflict tuple found */ if (upsertState->us_action == UPSERT_UPDATE) { + CheckPartitionOidForUpsertSpecifiedPartition(rte, resultRelationDesc, *targetPartOid); /* * In case of DUPLICATE KEY UPDATE, execute the UPDATE part. * Be prepared to retry if the UPDATE fails because @@ -627,7 +729,7 @@ static Oid ExecUpsert(ModifyTableState* state, TupleTableSlot* slot, TupleTableS *returning = NULL; if (ExecConflictUpdate(state, resultRelInfo, &conflictInfo, planSlot, slot, - estate, targetrel, partitionid, bucketid, canSetTag, returning)) { + estate, targetrel, *targetPartOid, bucketid, canSetTag, returning)) { InstrCountFiltered2(&state->ps, 1); *updated = true; ReleaseResourcesForUpsertGPI(isgpi, resultRelationDesc, bucketRel, &partition_relation, part); @@ -938,13 +1040,12 @@ TupleTableSlot* ExecInsertT(ModifyTableState* state, TupleTableSlot* slot, Tuple ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("The tuple to be inserted into the table cannot be NULL"))); } - new_id = ExecUpsert(state, slot, planSlot, estate, canSetTag, tuple, &returning, &updated); + new_id = + ExecUpsert(state, slot, planSlot, estate, canSetTag, tuple, &returning, &updated, &partition_id); if (updated) { return returning; } - if (result_relation_desc->rd_rel->parttype == PARTTYPE_PARTITIONED_RELATION) { - partition_id = heapTupleGetPartitionId(result_relation_desc, tuple); - } + if (rel_isblockchain) { is_record = hist_table_record_insert(result_relation_desc, (HeapTuple)tuple, &res_hash); } @@ -956,6 +1057,7 @@ TupleTableSlot* ExecInsertT(ModifyTableState* state, TupleTableSlot* slot, Tuple * the t_self field. */ new_id = InvalidOid; + RangeTblEntry *rte = exec_rt_fetch(result_rel_info->ri_RangeTableIndex, estate); switch (result_relation_desc->rd_rel->parttype) { case PARTTYPE_NON_PARTITIONED_RELATION: case PARTTYPE_VALUE_PARTITIONED_RELATION: { @@ -990,6 +1092,7 @@ TupleTableSlot* ExecInsertT(ModifyTableState* state, TupleTableSlot* slot, Tuple case PARTTYPE_PARTITIONED_RELATION: { /* get partititon oid for insert the record */ partition_id = heapTupleGetPartitionId(result_relation_desc, tuple); + CheckPartitionOidForSpecifiedPartition(rte, partition_id); searchFakeReationForPartitionOid(estate->esfRelations, estate->es_query_cxt, result_relation_desc, partition_id, heap_rel, partition, RowExclusiveLock); @@ -1026,6 +1129,7 @@ TupleTableSlot* ExecInsertT(ModifyTableState* state, TupleTableSlot* slot, Tuple /* get partititon oid for insert the record */ partitionId = heapTupleGetPartitionId(result_relation_desc, tuple); + CheckPartitionOidForSpecifiedPartition(rte, partitionId); searchFakeReationForPartitionOid(estate->esfRelations, estate->es_query_cxt, result_relation_desc, partitionId, partRel, part, @@ -1033,11 +1137,11 @@ TupleTableSlot* ExecInsertT(ModifyTableState* state, TupleTableSlot* slot, Tuple /* get subpartititon oid for insert the record */ subPartitionId = heapTupleGetPartitionId(partRel, tuple); + CheckSubpartitionOidForSpecifiedSubpartition(rte, partitionId, subPartitionId); searchFakeReationForPartitionOid(estate->esfRelations, estate->es_query_cxt, partRel, subPartitionId, subPartRel, subPart, RowExclusiveLock); - partition_id = subPartitionId; heap_rel = subPartRel; partition = subPart; @@ -2757,9 +2861,9 @@ TupleTableSlot* ExecModifyTable(ModifyTableState* node) * to rethink this later. */ ResetPerTupleExprContext(estate); - + t_thrd.xact_cxt.ActiveLobRelid = result_rel_info->ri_RelationDesc->rd_id; plan_slot = ExecProcNode(subPlanState); - + t_thrd.xact_cxt.ActiveLobRelid = InvalidOid; if (TupIsNull(plan_slot)) { record_first_time(); // Flush error recored if need @@ -2997,7 +3101,7 @@ TupleTableSlot* ExecModifyTable(ModifyTableState* node) node->mt_done = true; ResetTrigShipFlag(); - + return NULL; } @@ -3085,6 +3189,7 @@ ModifyTableState* ExecInitModifyTable(ModifyTable* node, EState* estate, int efl upsertState->us_existing = NULL; upsertState->us_excludedtlist = NIL; upsertState->us_updateproj = NULL; + upsertState->us_updateWhere = NIL; mt_state->mt_upsert = upsertState; /* set up epqstate with dummy subplan data for the moment */ @@ -3314,6 +3419,11 @@ ModifyTableState* ExecInitModifyTable(ModifyTable* node, EState* estate, int efl result_rel_info->ri_updateProj = ExecBuildProjectionInfo((List*)setexpr, econtext, upsertState->us_updateproj, result_rel_info->ri_RelationDesc->rd_att); + + /* initialize expression state to evaluate update where clause if exists */ + if (node->upsertWhere) { + upsertState->us_updateWhere = (List*)ExecInitExpr((Expr*)node->upsertWhere, &mt_state->ps); + } } /* diff --git a/src/gausskernel/runtime/executor/nodePartIterator.cpp b/src/gausskernel/runtime/executor/nodePartIterator.cpp index 75f3e2c15..5bcac7821 100755 --- a/src/gausskernel/runtime/executor/nodePartIterator.cpp +++ b/src/gausskernel/runtime/executor/nodePartIterator.cpp @@ -83,15 +83,37 @@ static int GetScanPartitionNum(PartIteratorState* node) return partitionScan; } +void SetPartitionIteratorParamter(PartIteratorState* node, List* subPartLengthList) +{ + if (subPartLengthList != NIL) { + if (node->currentItr == -1) { + node->currentItr++; + } + + int subPartLength = (int)list_nth_int(subPartLengthList, node->currentItr); + if (node->subPartCurrentItr + 1 >= subPartLength) { + node->currentItr++; + node->subPartCurrentItr = -1; + } + node->subPartCurrentItr++; + unsigned int subitr_idx = node->subPartCurrentItr; + PartIterator* pi_node = (PartIterator*)node->ps.plan; + int subPartParamno = pi_node->param->subPartParamno; + ParamExecData* subPartParam = &(node->ps.state->es_param_exec_vals[subPartParamno]); + subPartParam->isnull = false; + subPartParam->value = (Datum)subitr_idx; + node->ps.lefttree->chgParam = bms_add_member(node->ps.lefttree->chgParam, subPartParamno); + } else { + node->currentItr++; + } +} + static void InitScanPartition(PartIteratorState* node, int partitionScan) { int paramno = 0; - int subPartParamno = 0; unsigned int itr_idx = 0; - unsigned int subitr_idx = 0; PartIterator* pi_node = (PartIterator*)node->ps.plan; ParamExecData* param = NULL; - ParamExecData* subPartParam = NULL; PlanState* noden = (PlanState*)node->ps.lefttree; List *subPartLengthList = NIL; if (IsA(noden, VecToRowState)) { @@ -104,25 +126,7 @@ static void InitScanPartition(PartIteratorState* node, int partitionScan) Assert(ForwardScanDirection == pi_node->direction || BackwardScanDirection == pi_node->direction); /* set iterator parameter */ - if (subPartLengthList != NIL) { - if (node->currentItr == -1) { - node->currentItr++; - } - int subPartLength = (int)list_nth_int(subPartLengthList, node->currentItr); - if (node->subPartCurrentItr + 1 >= subPartLength) { - node->currentItr++; - node->subPartCurrentItr = -1; - } - node->subPartCurrentItr++; - subitr_idx = node->subPartCurrentItr; - subPartParamno = pi_node->param->subPartParamno; - subPartParam = &(node->ps.state->es_param_exec_vals[subPartParamno]); - subPartParam->isnull = false; - subPartParam->value = (Datum)subitr_idx; - node->ps.lefttree->chgParam = bms_add_member(node->ps.lefttree->chgParam, subPartParamno); - } else { - node->currentItr++; - } + SetPartitionIteratorParamter(node, subPartLengthList); itr_idx = node->currentItr; if (BackwardScanDirection == pi_node->direction) diff --git a/src/gausskernel/runtime/executor/nodeRecursiveunion.cpp b/src/gausskernel/runtime/executor/nodeRecursiveunion.cpp index 36708502b..ca571f4c8 100644 --- a/src/gausskernel/runtime/executor/nodeRecursiveunion.cpp +++ b/src/gausskernel/runtime/executor/nodeRecursiveunion.cpp @@ -136,6 +136,17 @@ static inline void RecursiveUnionWaitCondNegtive(const bool* true_cond, const bo return; } +static void markIterationStats(RecursiveUnionState* node, bool isSW) +{ + if (node->ps.instrument == NULL) { + return; + } + if (isSW) { + markSWLevelEnd(node->swstate, node->swstate->sw_numtuples); + markSWLevelBegin(node->swstate); + } +} + /* ---------------------------------------------------------------- * ExecRecursiveUnion(node) * @@ -160,7 +171,9 @@ TupleTableSlot* ExecRecursiveUnion(RecursiveUnionState* node) PlanState* inner_plan = innerPlanState(node); RecursiveUnion* plan = (RecursiveUnion*)node->ps.plan; TupleTableSlot* slot = NULL; + TupleTableSlot* swSlot = NULL; bool is_new = false; + bool isSW = IsUnderStartWith((RecursiveUnion *)node->ps.plan); /* 0. build hash table if it is NULL */ if (plan->numCols > 0) { @@ -173,8 +186,10 @@ TupleTableSlot* ExecRecursiveUnion(RecursiveUnionState* node) if (!node->recursing) { for (;;) { slot = ExecProcNode(outer_plan); - if (TupIsNull(slot)) + if (TupIsNull(slot)) { + markIterationStats(node, isSW); break; + } if (plan->numCols > 0) { /* Find or build hashtable entry for this tuple's group */ LookupTupleHashEntry(node->hashtable, slot, &is_new); @@ -187,11 +202,17 @@ TupleTableSlot* ExecRecursiveUnion(RecursiveUnionState* node) } /* - * Add RUITR and nessary internal pseudo columns before store into worktable, - * only processed in start-with case + * For START WITH CONNECT BY, create converted tuple with pseudo columns. */ - if (IsUnderStartWith((RecursiveUnion *)node->ps.plan)) { - slot = ConvertRuScanOutputSlot(node, slot, false); + slot = isSW ? ConvertRuScanOutputSlot(node, slot, false) : slot; + swSlot = isSW ? GetStartWithSlot(node, slot) : NULL; + if (isSW && swSlot == NULL) { + /* + * SWCB terminal condition met. Time to stop. + * Discarding the last tuple. + */ + markSWLevelEnd(node->swstate, node->swstate->sw_numtuples - 1); + break; } /* Each non-duplicate tuple goes to the working table ... */ @@ -201,7 +222,7 @@ TupleTableSlot* ExecRecursiveUnion(RecursiveUnionState* node) node->step_tuple_produced++; /* ... and to the caller */ - return slot; + return (isSW ? swSlot : slot); } /* Mark none-recursive part is down */ @@ -237,12 +258,15 @@ TupleTableSlot* ExecRecursiveUnion(RecursiveUnionState* node) slot = ExecProcNode(inner_plan); if (TupIsNull(slot)) { /* debug information for SWCBcase */ - if (IsUnderStartWith((RecursiveUnion *)node->ps.plan)) { + if (IsUnderStartWith((RecursiveUnion *)node->ps.plan) && + !node->intermediate_empty) { ereport(DEBUG1, (errmodule(MOD_EXECUTOR), errmsg("[SWCB DEBUG] current iteration is done: level:%d rownum_current:%d rownum_total:%lu", node->iteration + 1, node->swstate->sw_numtuples, node->swstate->sw_rownum))); + markSWLevelEnd(node->swstate, node->swstate->sw_numtuples); + markSWLevelBegin(node->swstate); } #ifdef ENABLE_MULTIPLE_NODES /* @@ -348,7 +372,8 @@ TupleTableSlot* ExecRecursiveUnion(RecursiveUnionState* node) /* For start-with, reason ditto */ - if (IsUnderStartWith((RecursiveUnion *)node->ps.plan)) { + bool isSW = IsUnderStartWith((RecursiveUnion*)node->ps.plan); + if (isSW) { int max_times = u_sess->attr.attr_sql.max_recursive_times; StartWithOp *swplan = (StartWithOp *)node->swstate->ps.plan; @@ -365,6 +390,15 @@ TupleTableSlot* ExecRecursiveUnion(RecursiveUnionState* node) } slot = ConvertRuScanOutputSlot(node, slot, true); + swSlot = GetStartWithSlot(node, slot); + if (isSW && swSlot == NULL) { + /* + * SWCB terminal condition met. Time to stop. + * Discarding the last tuple. + */ + markSWLevelEnd(node->swstate, node->swstate->sw_numtuples - 1); + break; + } /* * In ORDER SIBLINGS case, as we add SORT-Operator(material) on top of @@ -373,7 +407,7 @@ TupleTableSlot* ExecRecursiveUnion(RecursiveUnionState* node) if (swplan->swoptions->siblings_orderby_clause) { StartWithOpState *swstate = (StartWithOpState *)node->swstate; if (swstate->sw_nocycleStopOrderSiblings) { - return (TupleTableSlot *)NULL; + return (TupleTableSlot*)NULL; } if (CheckCycleExeception(swstate, slot)) { @@ -391,6 +425,8 @@ TupleTableSlot* ExecRecursiveUnion(RecursiveUnionState* node) tuplestore_puttupleslot(node->intermediate_table, slot); /* ... and return it */ + /* it is okay to point slot to swSlot and return now, if necessary */ + slot = isSW ? swSlot : slot; inner_plan->state->es_skip_early_free = orig_early_free; #ifdef ENABLE_MULTIPLE_NODES diff --git a/src/gausskernel/runtime/executor/nodeSeqscan.cpp b/src/gausskernel/runtime/executor/nodeSeqscan.cpp index 0c62be10f..aa3a1b6b6 100644 --- a/src/gausskernel/runtime/executor/nodeSeqscan.cpp +++ b/src/gausskernel/runtime/executor/nodeSeqscan.cpp @@ -48,7 +48,6 @@ #include "parser/parsetree.h" #include "access/ustore/knl_uheap.h" #include "access/ustore/knl_uscan.h" - #include "optimizer/var.h" #include "optimizer/tlist.h" @@ -190,6 +189,37 @@ static TupleTableSlot* SeqNext(SeqScanState* node); static void ExecInitNextPartitionForSeqScan(SeqScanState* node); +template +FORCE_INLINE +void seq_scan_getnext_template(TableScanDesc scan, TupleTableSlot* slot, ScanDirection direction) +{ + Tuple tuple; + if(hashBucket) { + /* fall back to orign slow function. */ + tuple = scan_handler_tbl_getnext(scan, direction, NULL); + } else if(type == TAM_HEAP) { + tuple = (Tuple)heap_getnext(scan, direction); + } else { + tuple = (Tuple)UHeapGetNext(scan, direction); + } + if (hashBucket) { + scan = ((HBktTblScanDesc)scan)->currBktScan; + } + if (tuple != NULL) { + Assert(slot != NULL); + Assert(slot->tts_tupleDescriptor != NULL); + slot->tts_tupslotTableAm = type; + if (type == TAM_USTORE) { + UHeapSlotStoreUHeapTuple((UHeapTuple)tuple, slot, false, false); + } else { + HeapTuple htup = (HeapTuple)tuple; + heap_slot_store_heap_tuple(htup, slot, scan->rs_cbuf, false, false); + } + } else { + ExecClearTuple(slot); + } +} + /* ---------------------------------------------------------------- * Scan Support * ---------------------------------------------------------------- @@ -202,10 +232,8 @@ static void ExecInitNextPartitionForSeqScan(SeqScanState* node); */ static TupleTableSlot* SeqNext(SeqScanState* node) { - Tuple tuple; TableScanDesc scanDesc; EState* estate = NULL; - ScanDirection direction; TupleTableSlot* slot = NULL; /* @@ -213,7 +241,6 @@ static TupleTableSlot* SeqNext(SeqScanState* node) */ scanDesc = node->ss_currentScanDesc; estate = node->ps.state; - direction = estate->es_direction; slot = node->ss_ScanTupleSlot; GetTableScanDesc(scanDesc, node->ss_currentRelation)->rs_ss_accessor = node->ss_scanaccessor; @@ -221,23 +248,9 @@ static TupleTableSlot* SeqNext(SeqScanState* node) /* * get the next tuple from the table for seqscan. */ - tuple = scan_handler_tbl_getnext(scanDesc, direction, node->ss_currentRelation); + node->fillNextSlotFunc(scanDesc, slot, estate->es_direction); - ADIO_RUN() - { - Start_Prefetch(GetTableScanDesc(scanDesc, node->ss_currentRelation), node->ss_scanaccessor, direction); - } - ADIO_END(); - - /* - * save the tuple and the buffer returned to us by the access methods in - * our scan tuple slot and return the slot. Note: we pass 'false' because - * tuples returned by heap_getnext() are pointers onto disk pages and were - * not created with palloc() and so should not be pfree_ext()'d. Note also - * that ExecStoreTuple will increment the refcount of the buffer; the - * refcount will not be dropped until the tuple table slot is cleared. - */ - return ExecMakeTupleSlot(tuple, GetTableScanDesc(scanDesc, node->ss_currentRelation), slot, node->ss_currentRelation->rd_tam_type); + return slot; } /* @@ -252,6 +265,65 @@ static bool SeqRecheck(SeqScanState* node, TupleTableSlot* slot) return true; } +template +void ExecStoreTupleBatchMode(TableScanDesc scanDesc, TupleTableSlot** slot) +{ + /* sanity checks */ + Assert(scanDesc != NULL); + + /* + * save the tuple and the buffer returned to us by the access methods in + * our scan tuple slots and return the slots in array mode. Note + * that ExecStoreTupleBatch will increment the refcount of the buffer; the + * refcount will not be dropped until the tuple table slot is cleared. + */ + for (int i = 0; i < scanDesc->rs_ctupRows; i++) { + if (tableType == TAM_USTORE) { + UHeapSlotStoreUHeapTuple(((UHeapScanDesc)scanDesc)->rs_ctupBatch[i], + slot[i], false, i != 0); + } else { + heap_slot_store_heap_tuple(&((HeapScanDesc)scanDesc)->rs_ctupBatch[i], + slot[i], scanDesc->rs_cbuf, false, i != 0); + } + } +} + +static ScanBatchResult *SeqNextBatchMode(SeqScanState *node) +{ + TableScanDesc scanDesc; + EState *estate = NULL; + ScanDirection direction; + TupleTableSlot **slot = NULL; + + /* get information from the estate and scan state */ + scanDesc = node->ss_currentScanDesc; + estate = node->ps.state; + direction = estate->es_direction; + slot = &(node->scanBatchState->scanBatch.scanTupleSlotInBatch[0]); + + /* get tuples from the table. */ + scanDesc->rs_maxScanRows = node->scanBatchState->scanTupleSlotMaxNum; + node->scanBatchState->scanfinished = tableam_scan_gettuplebatchmode(scanDesc, direction); + + if (slot[0]->tts_tupslotTableAm == TAM_USTORE) { + ExecStoreTupleBatchMode(scanDesc, slot); + } else { + ExecStoreTupleBatchMode(scanDesc, slot); + } + + /* + * As all tuples are from the same page, we only pins buffer for slot[0] in ExecStoreTupleBatch. + * Here we reset slot[0] when scandesc->rs_ctupRows equals to zero. + */ + if (scanDesc->rs_ctupRows == 0) { + ExecClearTuple(slot[0]); + } + node->scanBatchState->scanBatch.rows = scanDesc->rs_ctupRows; + Assert(scanDesc->rs_ctupRows <= BatchMaxSize); + return &node->scanBatchState->scanBatch; +} + + /* ---------------------------------------------------------------- * ExecSeqScan(node) * @@ -263,7 +335,11 @@ static bool SeqRecheck(SeqScanState* node, TupleTableSlot* slot) */ TupleTableSlot* ExecSeqScan(SeqScanState* node) { - return ExecScan((ScanState*)node, node->ScanNextMtd, (ExecScanRecheckMtd)SeqRecheck); + if (node->scanBatchMode) { + return (TupleTableSlot *)SeqNextBatchMode(node); + } else { + return ExecScan((ScanState *) node, node->ScanNextMtd, (ExecScanRecheckMtd) SeqRecheck); + } } /* ---------------------------------------------------------------- @@ -313,10 +389,18 @@ RangeScanInRedis reset_scan_qual(Relation curr_heap_rel, ScanState* node, bool i } if (u_sess->attr.attr_sql.enable_cluster_resize && RelationInRedistribute(curr_heap_rel)) { List* new_qual = eval_ctid_funcs(curr_heap_rel, node->ps.plan->qual, &node->rangeScanInRedis); - node->ps.qual = (List*)ExecInitExpr((Expr*)new_qual, (PlanState*)&node->ps); + if (!node->scanBatchMode) { + node->ps.qual = (List*)ExecInitExpr((Expr*)new_qual, (PlanState*)&node->ps); + } else { + node->ps.qual = (List*)ExecInitVecExpr((Expr*)new_qual, (PlanState*)&node->ps); + } node->ps.qual_is_inited = true; } else if (!node->ps.qual_is_inited) { - node->ps.qual = (List*)ExecInitExpr((Expr*)node->ps.plan->qual, (PlanState*)&node->ps); + if (!node->scanBatchMode) { + node->ps.qual = (List*)ExecInitExpr((Expr*)node->ps.plan->qual, (PlanState*)&node->ps); + } else { + node->ps.qual = (List*)ExecInitVecExpr((Expr*)node->ps.plan->qual, (PlanState*)&node->ps); + } node->ps.qual_is_inited = true; node->rangeScanInRedis = {false,0,0}; } @@ -536,10 +620,150 @@ void InitScanRelation(SeqScanState* node, EState* estate, int eflags) ExecAssignScanType(node, RelationGetDescr(current_relation)); } + +static void InitRelationBatchScanEnv(SeqScanState *state) +{ + /* so use MemContext which is not freed at all until the end. */ + ProjectionInfo *proj = state->ps.ps_ProjInfo; + ScanBatchState* batchstate = state->scanBatchState; + batchstate->maxcolId = 0; + + if (proj->pi_acessedVarNumbers == NIL) { + return; + } + + List *pColList = proj->pi_acessedVarNumbers; + + batchstate->colNum = list_length(pColList); + batchstate->lateRead = (bool *)palloc0(sizeof(bool) * batchstate->colNum); + batchstate->colId = (int *)palloc(sizeof(int) * batchstate->colNum); + + int i = 0; + ListCell *cell = NULL; + + /* Initilize which columns should be accessed */ + foreach (cell, pColList) { + Assert(lfirst_int(cell) > 0); + batchstate->colId[i] = lfirst_int(cell) - 1; + batchstate->lateRead[i] = false; + i++; + } + + /* Intilize which columns will be late read */ + foreach (cell, proj->pi_lateAceessVarNumbers) { + int colId = lfirst_int(cell) - 1; + for (i = 0; i < batchstate->colNum; ++i) { + if (batchstate->colId[i] == colId) { + batchstate->lateRead[i] = true; + break; + } + } + } +} + +static SeqScanState *ExecInitSeqScanBatchMode(SeqScan *node, SeqScanState* scanstate, EState* estate) +{ + /* + * scanBatchMode is forced set to true if its parent node is RowToVec. + * Here we check again to see whether batch mode scan is supported. + */ + if (node->scanBatchMode) { + Relation currentRelation = scanstate->ss_currentRelation; + /* + * batch mode only supports: + * 1. the relation scan method is not sample. + * 2. the columns in target list and qual are supported by column store. + * 3. the relation is not a system relation. + * 4. the relaseion is not a hash bucket relation. + */ + if (node->tablesample || + !CheckColumnsSuportedByBatchMode(scanstate->ps.targetlist, node->plan.qual) || + currentRelation->rd_id < FirstNormalObjectId || + RELATION_OWN_BUCKET(currentRelation)) { + node->scanBatchMode = false; + } + } + scanstate->scanBatchMode = node->scanBatchMode; + + /* + * init variables for batch scan mode. + * scanstate->ss_currentScanDesc is NULL means that scan will not execute. + */ + if (node->scanBatchMode && scanstate->ss_currentScanDesc) { + int i = 0; + ScanBatchState *scanBatchState = (ScanBatchState*)palloc0(sizeof(ScanBatchState)); + scanstate->scanBatchState = scanBatchState; + scanstate->ps.subPlan = NULL; + + if (scanstate->ss_currentRelation->rd_tam_type == TAM_HEAP) { + HeapScanDesc heapDesc = (HeapScanDesc)(scanstate->ss_currentScanDesc); + heapDesc->rs_ctupBatch = (HeapTupleData*)palloc(sizeof(HeapTupleData) * BatchMaxSize); + } else { + UHeapScanDesc uheapDesc = (UHeapScanDesc)(scanstate->ss_currentScanDesc); + uheapDesc->rs_ctupBatch = (UHeapTuple*)palloc(sizeof(UHeapTuple) * BatchMaxSize); + } + + scanBatchState->scanBatch.scanTupleSlotInBatch = + (TupleTableSlot**)palloc(sizeof(TupleTableSlot*) * BatchMaxSize); + for (i = 0; i < BatchMaxSize; i++) { + TupleTableSlot* slot = ExecAllocTableSlot(&estate->es_tupleTable, + scanstate->ss_currentRelation->rd_tam_type); + ExecSetSlotDescriptor(slot, scanstate->ss_ScanTupleSlot->tts_tupleDescriptor); + scanBatchState->scanBatch.scanTupleSlotInBatch[i] = slot; + } + + /* prepare variables for batch mode scan, also can be found in ExecInitCStoreScan */ + ExecAssignVectorForExprEval(scanstate->ps.ps_ExprContext); + + scanstate->ps.targetlist = (List *)ExecInitVecExpr((Expr *)node->plan.targetlist, (PlanState *)scanstate); + scanBatchState->pCurrentBatch = New(CurrentMemoryContext) + VectorBatch(CurrentMemoryContext, scanstate->ps.ps_ResultTupleSlot->tts_tupleDescriptor); + scanBatchState->pScanBatch = + New(CurrentMemoryContext)VectorBatch(CurrentMemoryContext, scanstate->ss_currentRelation->rd_att); + + scanBatchState->nullflag = (bool*)palloc0(sizeof(bool) * scanBatchState->pScanBatch->m_cols); + + scanstate->ps.ps_ProjInfo = + ExecBuildVecProjectionInfo(scanstate->ps.targetlist, node->plan.qual, scanstate->ps.ps_ExprContext, + scanstate->ps.ps_ResultTupleSlot, scanstate->ss_ScanTupleSlot->tts_tupleDescriptor); + + scanstate->ps.qual = (List *)ExecInitVecExpr((Expr *)scanstate->ps.plan->qual, (PlanState *)&scanstate->ps); + + InitRelationBatchScanEnv(scanstate); + for (i = 0; i < scanBatchState->colNum; i++) { + scanBatchState->maxcolId = Max(scanBatchState->maxcolId, scanBatchState->colId[i]); + } + scanBatchState->maxcolId++; + + /* the follow code realize OptimizeProjectionAndFilter (scanstate); */ + ProjectionInfo *proj = NULL; + bool fSimpleMap = false; + proj = scanstate->ps.ps_ProjInfo; + + /* Check if it is simple without need to invoke projection code */ + fSimpleMap = proj->pi_directMap && (scanBatchState->pCurrentBatch->m_cols == proj->pi_numSimpleVars); + scanstate->ps.ps_ProjInfo->pi_directMap = fSimpleMap; + scanBatchState->scanfinished = false; + } + + return scanstate; +} + static inline void InitSeqNextMtd(SeqScan* node, SeqScanState* scanstate) { if (!node->tablesample) { scanstate->ScanNextMtd = SeqNext; + if(RELATION_OWN_BUCKET(scanstate->ss_currentRelation)) { + if(scanstate->ss_currentRelation->rd_tam_type == TAM_HEAP) + scanstate->fillNextSlotFunc = seq_scan_getnext_template; + else + scanstate->fillNextSlotFunc = seq_scan_getnext_template; + } else { + if(scanstate->ss_currentRelation->rd_tam_type == TAM_HEAP) + scanstate->fillNextSlotFunc = seq_scan_getnext_template; + else + scanstate->fillNextSlotFunc = seq_scan_getnext_template; + } } else { if (RELATION_OWN_BUCKET(scanstate->ss_currentRelation)) { scanstate->ScanNextMtd = HbktSeqSampleNext; @@ -684,6 +908,7 @@ SeqScanState* ExecInitSeqScan(SeqScan* node, EState* estate, int eflags) } else { scanstate->isSampleScan = true; tsc = node->tablesample; + node->scanBatchMode = false; } /* @@ -708,8 +933,6 @@ SeqScanState* ExecInitSeqScan(SeqScan* node, EState* estate, int eflags) /* * tuple table initialization */ - ExecInitResultTupleSlot(estate, &scanstate->ps); - ExecInitScanTupleSlot(estate, scanstate); InitScanRelation(scanstate, estate, eflags); @@ -743,6 +966,8 @@ SeqScanState* ExecInitSeqScan(SeqScan* node, EState* estate, int eflags) ExecAssignScanProjectionInfo(scanstate); + ExecInitSeqScanBatchMode(node, scanstate, estate); + AttrNumber natts = scanstate->ss_ScanTupleSlot->tts_tupleDescriptor->natts; AttrNumber lastVar = -1; bool *isNullProj = NULL; @@ -1068,4 +1293,14 @@ static void ExecInitNextPartitionForSeqScan(SeqScanState* node) } ADIO_END(); } + + if (node->scanBatchMode) { + if (node->ss_currentRelation->rd_tam_type == TAM_HEAP) { + HeapScanDesc heapDesc = (HeapScanDesc)node->ss_currentScanDesc; + heapDesc->rs_ctupBatch = (HeapTupleData*)palloc(sizeof(HeapTupleData) * BatchMaxSize); + } else { + UHeapScanDesc uheapDesc = (UHeapScanDesc)node->ss_currentScanDesc; + uheapDesc->rs_ctupBatch = (UHeapTuple*)palloc(sizeof(UHeapTuple) * BatchMaxSize); + } + } } diff --git a/src/gausskernel/runtime/executor/nodeSort.cpp b/src/gausskernel/runtime/executor/nodeSort.cpp index 06f25aef3..e2cb8b8d2 100644 --- a/src/gausskernel/runtime/executor/nodeSort.cpp +++ b/src/gausskernel/runtime/executor/nodeSort.cpp @@ -54,7 +54,6 @@ TupleTableSlot* ExecSort(SortState* node) * get state info from node */ SO1_printf("ExecSort: %s\n", "entering routine"); - WaitState old_status = pgstat_report_waitstatus(STATE_EXEC_SORT); EState* estate = node->ss.ps.state; ScanDirection dir = estate->es_direction; @@ -120,6 +119,7 @@ TupleTableSlot* ExecSort(SortState* node) } node->tuplesortstate = (void*)tuple_sortstate; + WaitState old_status = pgstat_report_waitstatus(STATE_EXEC_SORT_FETCH_TUPLE); /* * Scan the subplan and feed all the tuples to tuplesort. @@ -135,6 +135,9 @@ TupleTableSlot* ExecSort(SortState* node) #endif /* PGXC */ tuplesort_puttupleslot(tuple_sortstate, slot); } + + pgstat_report_waitstatus(STATE_EXEC_SORT); + sort_count(tuple_sortstate); /* @@ -160,6 +163,7 @@ TupleTableSlot* ExecSort(SortState* node) * Complete the sort. */ tuplesort_performsort(tuple_sortstate); + (void)pgstat_report_waitstatus(old_status); /* * restore to user specified direction @@ -192,7 +196,6 @@ TupleTableSlot* ExecSort(SortState* node) &(plan_state->instrument->sorthashinfo.spaceUsed)); } SO1_printf("ExecSort: %s\n", "sorting done"); - (void)pgstat_report_waitstatus(old_status); } SO1_printf("ExecSort: %s\n", "retrieving tuple from tuplesort"); diff --git a/src/gausskernel/runtime/executor/nodeStartWithOp.cpp b/src/gausskernel/runtime/executor/nodeStartWithOp.cpp index f256f5752..8d0ce77e1 100644 --- a/src/gausskernel/runtime/executor/nodeStartWithOp.cpp +++ b/src/gausskernel/runtime/executor/nodeStartWithOp.cpp @@ -32,7 +32,8 @@ typedef enum StartWithOpExecStatus { SWOP_UNKNOWN = 0, SWOP_BUILD = 1, SWOP_EXECUTE, - SWOP_FINISH + SWOP_FINISH, + SWOP_ESCAPE } StartWithOpExecStatus; #define KEY_START_TAG "{" @@ -193,6 +194,27 @@ extern int SibglingsKeyCmp(Datum x, Datum y, SortSupport ssup) return cmp; } +static bool unsupported_filter_walker(Node *node, Node *context_node) +{ + if (node == NULL) { + return false; + } + + if (!IsA(node, SubPlan)) { + return expression_tree_walker(node, (bool (*)()) unsupported_filter_walker, node); + } + + /* + * Currently we do not support subqueries + * pushed down to connect-by clauses. + */ + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Unsupported subquery found in connect by clause."))); + + return true; +} + /* * -------------------------------------------------------------------------------------- * - EXPORT FUNCTIONS.. @@ -211,6 +233,13 @@ extern int SibglingsKeyCmp(Datum x, Datum y, SortSupport ssup) */ StartWithOpState* ExecInitStartWithOp(StartWithOp* node, EState* estate, int eflags) { + /* + * Error-out unsupported cases before they + * actually cause any harm. + */ + expression_tree_walker((Node*)node->plan.qual, + (bool (*)())unsupported_filter_walker, NULL); + /* * create state structure */ @@ -401,10 +430,10 @@ bool CheckCycleExeception(StartWithOpState *node, TupleTableSlot *slot) static List* peekNextLevel(TupleTableSlot* startSlot, PlanState* outerNode, int level) { List* queue = NULL; - + RecursiveUnionState* rus = (RecursiveUnionState*) outerNode; + StartWithOpState *swnode = rus->swstate; /* clean up RU's old working table */ ExecReScan(outerNode); - RecursiveUnionState* rus = (RecursiveUnionState*) outerNode; /* pushing the depth-first tuple into RU's working table */ rus->recursing = true; tuplestore_puttupleslot(rus->working_table, startSlot); @@ -412,14 +441,17 @@ static List* peekNextLevel(TupleTableSlot* startSlot, PlanState* outerNode, int /* fetch the depth-first tuple's children exactly one level below */ rus->iteration = level; int begin_iteration = rus->iteration; + int begin_rowCount = swnode->sw_rownum; TupleTableSlot* srcSlot = NULL; for (;;) { + swnode->sw_rownum = begin_rowCount; srcSlot = ExecProcNode(outerNode); if (TupIsNull(srcSlot) || (rus->iteration != begin_iteration)) break; TupleTableSlot* newSlot = MakeSingleTupleTableSlot(srcSlot->tts_tupleDescriptor); newSlot = ExecCopySlot(newSlot, srcSlot); queue = lappend(queue, newSlot); } + return queue; } @@ -441,7 +473,7 @@ static bool depth_first_connect(int currentLevel, StartWithOpState *node, List* /* loop until all siblings' DFS are done */ for (;;) { - if (queue->head == NULL) { + if (queue->head == NULL || node->swop_status == SWOP_ESCAPE) { return isCycle; } TupleTableSlot* leader = (TupleTableSlot*) lfirst(queue->head); @@ -450,8 +482,7 @@ static bool depth_first_connect(int currentLevel, StartWithOpState *node, List* } /* DFS: output the depth-first tuple to result table */ - TupleTableSlot *dstSlot = node->ps.ps_ResultTupleSlot; - dstSlot = ConvertStartWithOpOutputSlot(node, leader, dstSlot); + TupleTableSlot* dstSlot = leader; queue->head = queue->head->next; @@ -467,6 +498,7 @@ static bool depth_first_connect(int currentLevel, StartWithOpState *node, List* /* Go into the depth NOW: sibling tuples won't get processed * until all children are done */ + node->sw_rownum = rowCountBefore; bool expectCycle = depth_first_connect(currentLevel + 1, node, peekNextLevel(leader, outerNode, currentLevel), dfsRowCount); @@ -482,25 +514,125 @@ static List* makeStartTuples(StartWithOpState *node) PlanState* outerNode = outerPlanState(node); List* startWithQueue = NULL; RecursiveUnionState* rus = (RecursiveUnionState*) outerNode; - TupleTableSlot *dstSlot = node->ps.ps_ResultTupleSlot; - TupleTableSlot* srcSlot = ExecProcNode(outerNode); + TupleTableSlot* dstSlot = ExecProcNode(outerNode); int begin_iteration = rus->iteration; for (;;) { - if (TupIsNull(srcSlot) || rus->iteration != begin_iteration) { + if (TupIsNull(dstSlot) || rus->iteration != begin_iteration) { break; } - dstSlot = ConvertStartWithOpOutputSlot(node, srcSlot, dstSlot); - TupleTableSlot* newSlot = MakeSingleTupleTableSlot(srcSlot->tts_tupleDescriptor); - newSlot = ExecCopySlot(newSlot, srcSlot); + TupleTableSlot* newSlot = MakeSingleTupleTableSlot(dstSlot->tts_tupleDescriptor); + newSlot = ExecCopySlot(newSlot, dstSlot); startWithQueue = lappend(startWithQueue, newSlot); - srcSlot = ExecProcNode(outerNode); + dstSlot = ExecProcNode(outerNode); } return startWithQueue; } +void markSWLevelBegin(StartWithOpState *node) +{ + if (node->ps.instrument == NULL) { + return; + } + gettimeofday(&(node->iterStats.currentStartTime), NULL); +} + +void markSWLevelEnd(StartWithOpState *node, int64 rowCount) +{ + if (node->ps.instrument == NULL) { + return; + } + node->iterStats.totalIters = node->iterStats.totalIters + 1; + int bufIndex = node->iterStats.totalIters - 1; + + if (bufIndex >= SW_LOG_ROWS_FULL) { + bufIndex = SW_LOG_ROWS_HALF + (bufIndex % SW_LOG_ROWS_HALF); + } + + node->iterStats.levelBuf[bufIndex] = node->iterStats.totalIters; + node->iterStats.rowCountBuf[bufIndex] = rowCount; + node->iterStats.startTimeBuf[bufIndex] = node->iterStats.currentStartTime; + gettimeofday(&(node->iterStats.endTimeBuf[bufIndex]), NULL); +} + +/* + * @Function: ExecStartWithRowLevelQual() + * + * @Brief: + * Check if LEVEL/ROWNUM conditions still hold for the recursion to + * continue. Level and rownum should have been made available + * in dstSlot by ConvertStartWithOpOutputSlot() already. + * + * @Input node: The current recursive union node. + * @Input slot: The next slot to be scanned into start with operator. + * + * @Return: Boolean flag to tell if the iteration should continue. + */ +bool ExecStartWithRowLevelQual(RecursiveUnionState* node, TupleTableSlot* dstSlot) +{ + StartWithOp* swplan = (StartWithOp*)node->swstate->ps.plan; + if (!IsConnectByLevelStartWithPlan(swplan)) { + return true; + } + + ExprContext* expr = node->swstate->ps.ps_ExprContext; + + /* + * Level and rownum pseudo attributes are extracted from StartWithOpPlan + * node so we set filtering tuple as ecxt_scantuple + */ + expr->ecxt_scantuple = dstSlot; + if (!ExecQual(node->swstate->ps.qual, expr, false)) { + return false; + } + return true; +} + +static bool isStoppedByRowNum(RecursiveUnionState* node, TupleTableSlot* slot) +{ + TupleTableSlot* dstSlot = node->swstate->ps.ps_ResultTupleSlot; + bool ret = false; + + /* 1. roll back to the converted result row */ + node->swstate->sw_rownum--; + /* 2. roll back to one row before execution to check rownum stop condition */ + node->swstate->sw_rownum--; + dstSlot = ConvertStartWithOpOutputSlot(node->swstate, slot, dstSlot); + if (ExecStartWithRowLevelQual(node, dstSlot)) { + ret = true; + } + /* undo the rollback after conversion (yes, just one line) */ + node->swstate->sw_rownum++; + return ret; +} + +TupleTableSlot* GetStartWithSlot(RecursiveUnionState* node, TupleTableSlot* slot) +{ + TupleTableSlot* dstSlot = node->swstate->ps.ps_ResultTupleSlot; + dstSlot = ConvertStartWithOpOutputSlot(node->swstate, slot, dstSlot); + + if (!ExecStartWithRowLevelQual(node, dstSlot)) { + StartWithOpState *swnode = node->swstate; + StartWithOp *swplan = (StartWithOp *)swnode->ps.plan; + PlanState *outerNode = outerPlanState(swnode); + bool isDfsEnabled = swplan->swoptions->nocycle && !IsA(outerNode, SortState); + /* + * ROWNUM/LEVEL limit reached: + * Tell ExecRecursiveUnion to terminate the recursion by returning NULL + * + * Specifically for ROWNUM limit reached: + * Tell DFS routine to stop immediately by setting SW status to ESCAPE. + */ + node->swstate->swop_status = isDfsEnabled && isStoppedByRowNum(node, slot) ? + SWOP_ESCAPE : + node->swstate->swop_status; + return NULL; + } + + return dstSlot; +} + TupleTableSlot* ExecStartWithOp(StartWithOpState *node) { - TupleTableSlot *srcSlot = NULL; TupleTableSlot *dstSlot = node->ps.ps_ResultTupleSlot; PlanState *outerNode = outerPlanState(node); StartWithOp *swplan = (StartWithOp *)node->ps.plan; @@ -512,6 +644,7 @@ TupleTableSlot* ExecStartWithOp(StartWithOpState *node) switch (node->swop_status) { case SWOP_BUILD: { Assert (node->sw_workingTable != NULL); + markSWLevelBegin(node); bool isDfsEnabled = swplan->swoptions->nocycle && !IsA(outerNode, SortState); if (isDfsEnabled) { /* For nocycle and non-order-siblings cases we use @@ -530,32 +663,15 @@ TupleTableSlot* ExecStartWithOp(StartWithOpState *node) * We check for interrupts here because infinite loop might happen. */ CHECK_FOR_INTERRUPTS(); - srcSlot = ExecProcNode(outerNode); - - if (TupIsNull(srcSlot)) { - break; - } - - /* convert & store tuple */ - dstSlot = ConvertStartWithOpOutputSlot(node, srcSlot, dstSlot); /* - * In case of connect by level/rownum, loop will end up with LEVEL/ROWNUM - * condition is not satisfied - * - * level/rownum is set in ConvertStartWithOpOutputSlot() + * The actual executions and conversions are done + * in the underlying recursve union node. */ - if (IsConnectByLevelStartWithPlan(swplan)) { - ExprContext *expr = node->ps.ps_ExprContext; + dstSlot = ExecProcNode(outerNode); - /* - * level and rownum pseudo attributes is extract from StartWithPlan - * node so we set filtering tuple as ecxt_scantuple - */ - expr->ecxt_scantuple = dstSlot; - if (!ExecQual(node->ps.qual, expr, false)) { - break; - } + if (TupIsNull(dstSlot)) { + break; } tuplestore_puttupleslot(node->sw_workingTable, dstSlot); diff --git a/src/gausskernel/runtime/executor/nodeTrainModel.cpp b/src/gausskernel/runtime/executor/nodeTrainModel.cpp new file mode 100644 index 000000000..9c9e1af4a --- /dev/null +++ b/src/gausskernel/runtime/executor/nodeTrainModel.cpp @@ -0,0 +1,198 @@ +/* +* Copyright (c) 2020 Huawei Technologies Co.,Ltd. +* +* openGauss is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*--------------------------------------------------------------------------------------- +* +* nodeTrainModel.cpp +* Implementation of Model Training Operators +* +* IDENTIFICATION +* src/gausskernel/runtime/executor/nodeTrainModel.cpp +* +* --------------------------------------------------------------------------------------- +*/ + +#include "postgres.h" +#include "funcapi.h" + +#include "executor/executor.h" +#include "executor/node/nodeTrainModel.h" +#include "db4ai/db4ai_api.h" + +static bool ExecFetchTrainModel(void *callback_data, ModelTuple * tuple) +{ + TrainModelState *pstate = (TrainModelState*)callback_data; + PlanState *outer_plan = outerPlanState(pstate); + TupleTableSlot *slot = ExecProcNode(outer_plan); + if (TupIsNull(slot)) + return false; + + if (tuple != &pstate->tuple) { + // make sure the output tuple has all information + tuple->ncolumns = pstate->tuple.ncolumns; + tuple->typid = pstate->tuple.typid; + tuple->typlen = pstate->tuple.typlen; + tuple->typbyval = pstate->tuple.typbyval; + } + + // support of tuples that are (physical) - i.e., not virtual + if (slot->tts_tuple != nullptr) { + if (!pstate->row_allocated) { + tuple->values = (Datum *)palloc(sizeof(Datum) * tuple->ncolumns); + tuple->isnull = (bool *)palloc(sizeof(bool) *tuple->ncolumns); + pstate->row_allocated = true; + } + /* + * When all or most of a tuple's fields need to be extracted, + * this routine will be significantly quicker than a loop around + * heap_getattr; the loop will become O(N^2) as soon as any + * noncacheable attribute offsets are involved. + */ + heap_deform_tuple((HeapTuple)slot->tts_tuple, slot->tts_tupleDescriptor, + tuple->values, tuple->isnull); + } else { + Assert(!pstate->row_allocated); + tuple->values = slot->tts_values; + tuple->isnull = slot->tts_isnull; + } + return true; +} + +static void ExecReScanTrainModel(void *callback_data) +{ + TrainModelState *pstate = (TrainModelState*)callback_data; + PlanState *outer_plan = outerPlanState(pstate); + ExecReScan(outer_plan); +} + +TrainModelState* ExecInitTrainModel(TrainModel* pnode, EState* estate, int eflags) +{ + TrainModelState *pstate = NULL; + Plan *outer_plan = outerPlan(pnode); + + // check for unsupported flags + Assert(!(eflags & (EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); + + // create state structure + AlgorithmAPI *palgo = get_algorithm_api(pnode->algorithm); + Assert(palgo->create != nullptr); + pstate = palgo->create(palgo, pnode); + pstate->ss.ps.plan = (Plan *)pnode; + pstate->ss.ps.state = estate; + pstate->config = pnode; + pstate->algorithm = palgo; + pstate->finished = 0; + + // Tuple table initialization + ExecInitScanTupleSlot(estate, &pstate->ss); + ExecInitResultTupleSlot(estate, &pstate->ss.ps); + + // initialize child expressions + ExecAssignExprContext(estate, &pstate->ss.ps); + pstate->ss.ps.targetlist = (List *)ExecInitExpr((Expr *)pnode->plan.targetlist, (PlanState *)pstate); + + // initialize outer plan + PlanState *outer_plan_state = ExecInitNode(outer_plan, estate, eflags); + outerPlanState(pstate) = outer_plan_state; + + // Initialize result tuple type and projection info. + ExecAssignScanTypeFromOuterPlan(&pstate->ss); // input tuples + ExecAssignResultTypeFromTL(&pstate->ss.ps); // result tuple + ExecAssignProjectionInfo(&pstate->ss.ps, NULL); + pstate->ss.ps.ps_TupFromTlist = false; + + // Input tuple initialization + TupleDesc tupdesc = ExecGetResultType(outer_plan_state); + pstate->tuple.ncolumns = tupdesc->natts; + pstate->tuple.typid = (Oid *)palloc(sizeof(Oid) * pstate->tuple.ncolumns); + pstate->tuple.typbyval = (bool *)palloc(sizeof(bool) * pstate->tuple.ncolumns); + pstate->tuple.typlen = (int16 *)palloc(sizeof(int16) * pstate->tuple.ncolumns); + for (int c = 0; c < pstate->tuple.ncolumns; c++) { + pstate->tuple.typid[c] = tupdesc->attrs[c]->atttypid; + pstate->tuple.typbyval[c] = tupdesc->attrs[c]->attbyval; + pstate->tuple.typlen[c] = tupdesc->attrs[c]->attlen; + } + + pstate->row_allocated = false; + pstate->fetch = ExecFetchTrainModel; + pstate->rescan = ExecReScanTrainModel; + pstate->callback_data = pstate; + + // Output tuple + TupleDesc tup_desc_out = CreateTemplateTupleDesc(1, false); + TupleDescInitEntry(tup_desc_out, (AttrNumber)1, "model", BYTEARRAYOID, -1, 0); + BlessTupleDesc(tup_desc_out); + ExecAssignResultType(&pstate->ss.ps, tup_desc_out); + ExecAssignProjectionInfo(&pstate->ss.ps, nullptr); + pstate->ss.ps.ps_TupFromTlist = false; + pstate->ss.ps.ps_ProjInfo = nullptr; + + return pstate; +} + +TupleTableSlot* ExecTrainModel(TrainModelState* pstate) +{ + // check if already finished + if (pstate->finished == pstate->config->configurations) + return NULL; + + // If backwards scan, just return NULL without changing state + if (!ScanDirectionIsForward(pstate->ss.ps.state->es_direction)) + return NULL; + + MemoryContext oldcxt = MemoryContextSwitchTo(pstate->config->cxt); + Model *model = nullptr; + model = (Model *)palloc0(sizeof(Model)); + model->status = ERRCODE_INVALID_STATUS; + model->memory_context = pstate->config->cxt; + MemoryContextSwitchTo(oldcxt); + + Assert(pstate->algorithm->run != nullptr); + pstate->algorithm->run(pstate->algorithm, pstate, &model); + if (model->status != ERRCODE_SUCCESSFUL_COMPLETION) { + MemoryContextSwitchTo(pstate->config->cxt); + pfree(model); + MemoryContextSwitchTo(oldcxt); + return NULL; + } + + TupleTableSlot *slot = pstate->ss.ps.ps_ResultTupleSlot; + Datum *values = slot->tts_values; + values[0] = PointerGetDatum(model); + ExecClearTuple(slot); + ExecStoreVirtualTuple(slot); + + return slot; +} + +void ExecEndTrainModel(TrainModelState* pstate) +{ + AlgorithmAPI *palgo = get_algorithm_api(pstate->config->algorithm); + Assert(palgo->end != nullptr); + palgo->end(palgo, pstate); + + if (pstate->row_allocated) { + pfree(pstate->tuple.values); + pfree(pstate->tuple.isnull); + } + pfree(pstate->tuple.typid); + pfree(pstate->tuple.typbyval); + pfree(pstate->tuple.typlen); + + ExecClearTuple(pstate->ss.ps.ps_ResultTupleSlot); + + ExecFreeExprContext(&pstate->ss.ps); + ExecEndNode(outerPlanState(pstate)); + pfree(pstate); +} + diff --git a/src/gausskernel/runtime/executor/spi.cpp b/src/gausskernel/runtime/executor/spi.cpp index 523121a7c..d1cb3ebea 100644 --- a/src/gausskernel/runtime/executor/spi.cpp +++ b/src/gausskernel/runtime/executor/spi.cpp @@ -236,11 +236,34 @@ void SPI_restore_current_stp_transaction_state() } /* This function will be called by commit/rollback inside STP to start a new transaction */ -void SPI_start_transaction(void) +void SPI_start_transaction(List* transactionHead) { + Oid savedCurrentUser = InvalidOid; + int saveSecContext = 0; + MemoryContext savedContext = MemoryContextSwitchTo(t_thrd.mem_cxt.portal_mem_cxt); + GetUserIdAndSecContext(&savedCurrentUser, &saveSecContext); + if (transactionHead != NULL) { + ListCell* cell; + foreach(cell, transactionHead) { + transactionNode* node = (transactionNode*)lfirst(cell); + SetUserIdAndSecContext(node->userId, node->secContext); + break; + } + } + MemoryContextSwitchTo(savedContext); MemoryContext oldcontext = CurrentMemoryContext; - StartTransactionCommand(true); + PG_TRY(); + { + StartTransactionCommand(true); + } + PG_CATCH(); + { + SetUserIdAndSecContext(savedCurrentUser, saveSecContext); + PG_RE_THROW(); + } + PG_END_TRY(); MemoryContextSwitchTo(oldcontext); + SetUserIdAndSecContext(savedCurrentUser, saveSecContext); } /* @@ -454,9 +477,6 @@ void SPI_savepoint_rollback(const char* spName) * interrupt previous cursor fetch. */ CommitTransactionCommand(true); - - /* move old subtransaction's remain resource into new subtransaction. */ - XactResumeSPIContext(false); } /* @@ -489,9 +509,6 @@ void SPI_savepoint_release(const char* spName) * interrupt previous cursor fetch. */ CommitTransactionCommand(true); - - /* move old subtransaction's remain resource into new subtransaction. */ - XactResumeSPIContext(false); } /* @@ -1280,7 +1297,7 @@ char *SPI_fname(TupleDesc tupdesc, int fnumber) if (fnumber > 0) { attr = tupdesc->attrs[fnumber - 1]; } else { - attr = SystemAttributeDefinition(fnumber, true, false); + attr = SystemAttributeDefinition(fnumber, true, false, false); } return pstrdup(NameStr(attr->attname)); @@ -1310,7 +1327,7 @@ char *SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber) if (fnumber > 0) { typoid = tupdesc->attrs[fnumber - 1]->atttypid; } else { - typoid = (SystemAttributeDefinition(fnumber, true, false))->atttypid; + typoid = (SystemAttributeDefinition(fnumber, true, false, false))->atttypid; } getTypeOutputInfo(typoid, &foutoid, &typo_is_varlen); @@ -1363,7 +1380,7 @@ char *SPI_gettype(TupleDesc tupdesc, int fnumber) if (fnumber > 0) { typoid = tupdesc->attrs[fnumber - 1]->atttypid; } else { - typoid = (SystemAttributeDefinition(fnumber, true, false))->atttypid; + typoid = (SystemAttributeDefinition(fnumber, true, false, false))->atttypid; } type_tuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typoid)); @@ -1389,7 +1406,7 @@ Oid SPI_gettypeid(TupleDesc tupdesc, int fnumber) if (fnumber > 0) { return tupdesc->attrs[fnumber - 1]->atttypid; } else { - return (SystemAttributeDefinition(fnumber, true, false))->atttypid; + return (SystemAttributeDefinition(fnumber, true, false, false))->atttypid; } } @@ -1566,6 +1583,19 @@ Portal SPI_cursor_open_with_paramlist(const char *name, SPIPlanPtr plan, ParamLi return SPI_cursor_open_internal(name, plan, params, read_only, isCollectParam); } +#ifdef ENABLE_MULTIPLE_NODES +/* check plan's stream node and set flag */ +static void check_portal_stream(Portal portal) +{ + if (IS_PGXC_COORDINATOR && check_stream_for_loop_fetch(portal)) { + if (!ENABLE_SQL_BETA_FEATURE(PLPGSQL_STREAM_FETCHALL)) { + /* save flag for warning */ + u_sess->SPI_cxt.has_stream_in_cursor_or_forloop_sql = portal->hasStreamForPlpgsql; + } + } +} +#endif + /* * SPI_cursor_open_internal * @@ -1733,11 +1763,15 @@ static Portal SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, ParamL CommandCounterIncrement(); snapshot = GetTransactionSnapshot(); } + #ifdef ENABLE_MULTIPLE_NODES if (isCollectParam && checkCommandTag(portal->commandTag) && checkPlan(portal->stmts)) { collectDynWithArgs(query_string, paramLI, portal->cursorOptions); } + /* check plan if has stream */ + check_portal_stream(portal); #endif + /* * If the plan has parameters, copy them into the portal. Note that this * must be done after revalidating the plan, because in dynamic parameter @@ -1758,6 +1792,10 @@ static Portal SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, ParamL /* Pop the SPI stack */ SPI_STACK_LOG("end", NULL, plan); + + /* reset flag */ + u_sess->SPI_cxt.has_stream_in_cursor_or_forloop_sql = false; + _SPI_end_call(true); /* Return the created portal */ @@ -2728,7 +2766,7 @@ static int _SPI_execute_plan0(SPIPlanPtr plan, ParamListInfo paramLI, Snapshot s * return a special result code if the statement was spelled * SELECT INTO. */ - if (IsA(stmt, CreateTableAsStmt)) { + if (IsA(stmt, CreateTableAsStmt) && ((CreateTableAsStmt *)stmt)->relkind != OBJECT_MATVIEW) { Assert(strncmp(completionTag, "SELECT ", 7) == 0); u_sess->SPI_cxt._current->processed = strtoul(completionTag + 7, NULL, 10); if (((CreateTableAsStmt *)stmt)->is_select_into) { @@ -2841,7 +2879,7 @@ fail: return my_res; } -static bool IsNeedSubTxnForSPIPlan(SPIPlanPtr plan, ParamListInfo paramLI) +static bool IsNeedSubTxnForSPIPlan(SPIPlanPtr plan) { /* not required to act as the same as O */ if (!PLSTMT_IMPLICIT_SAVEPOINT) { @@ -2873,6 +2911,17 @@ static bool IsNeedSubTxnForSPIPlan(SPIPlanPtr plan, ParamListInfo paramLI) if (qry->commandType != CMD_SELECT || qry->rowMarks != NULL || qry->hasModifyingCTE) { return true; } + +#ifdef ENABLE_MULTIPLE_NODES + /* + * DN aborts subtransaction automatically once error occurs. Current start an new implicit + * savepoint to isolate from runtime error even that it is a read only statement. + * + * If statement contains only system table, it will run at CN without savepint. It's + * a worth thing to check it? + */ + return list_length(plansource->relationOids) != 0; +#endif } } @@ -2889,9 +2938,10 @@ extern int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, Snapshot sn { int my_res = 0; - if (IsNeedSubTxnForSPIPlan(plan, paramLI)) { + if (IsNeedSubTxnForSPIPlan(plan)) { volatile int curExceptionCounter; MemoryContext oldcontext = CurrentMemoryContext; + int64 stackId = u_sess->plsql_cxt.nextStackEntryId; /* start an implicit savepoint for this stmt. */ SPI_savepoint_create(NULL); @@ -2911,7 +2961,7 @@ extern int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, Snapshot sn if (curExceptionCounter == u_sess->SPI_cxt.portal_stp_exception_counter && GetCurrentTransactionName() == NULL) { SPI_savepoint_rollbackAndRelease(NULL, InvalidTransactionId); - XactResumeSPIContext(true); + stp_cleanup_subxact_resowner(stackId); } PG_RE_THROW(); } @@ -2924,7 +2974,7 @@ extern int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, Snapshot sn if (curExceptionCounter == u_sess->SPI_cxt.portal_stp_exception_counter && GetCurrentTransactionName() == NULL) { SPI_savepoint_release(NULL); - XactResumeSPIContext(true); + stp_cleanup_subxact_resowner(stackId); } } else { my_res = _SPI_execute_plan0(plan, paramLI, snapshot, @@ -3021,9 +3071,7 @@ ParamListInfo _SPI_convert_params(int nargs, Oid *argtypes, Datum *Values, const if (cursor_data != NULL) { CopyCursorInfoData(&prm->cursor_data, &cursor_data[i]); } - prm->tableOfIndexType = InvalidOid; - prm->tableOfIndex = NULL; - prm->isnestedtable = false; + prm->tabInfo = NULL; } } else { param_list_info = NULL; diff --git a/src/gausskernel/runtime/executor/spiDbesql.cpp b/src/gausskernel/runtime/executor/spiDbesql.cpp index 3ca4cbfa6..359291ce8 100644 --- a/src/gausskernel/runtime/executor/spiDbesql.cpp +++ b/src/gausskernel/runtime/executor/spiDbesql.cpp @@ -123,6 +123,9 @@ void GetColumnDescribe(SPIPlanPtr plan, ArrayType** resDescribe, MemoryContext m SPIDescColumn->resno = descColumns->resno; SPIDescColumn->resorigtbl = descColumns->resorigtbl; SPIDescColumn->resname = (char *)palloc(NAMEDATALEN); + if (!descColumns->resname) { + continue; + } rc = strcpy_s(SPIDescColumn->resname, NAMEDATALEN, (char*)descColumns->resname); securec_check(rc, "", ""); @@ -133,10 +136,7 @@ void GetColumnDescribe(SPIPlanPtr plan, ArrayType** resDescribe, MemoryContext m attTuple = SearchSysCache2(ATTNAME, ObjectIdGetDatum(SPIDescColumn->resorigtbl), PointerGetDatum(SPIDescColumn->resname)); if (!HeapTupleIsValid(attTuple)) { - ereport(ERROR, (errmodule(MOD_OPT), errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" does not exist", SPIDescColumn->resname), - errdetail("N/A"), errcause("column does not exist"), - erraction("check column type"))); + continue; } attForm = ((Form_pg_attribute)GETSTRUCT(attTuple)); attNum = attForm->attnum; @@ -160,7 +160,8 @@ void GetColumnDescribe(SPIPlanPtr plan, ArrayType** resDescribe, MemoryContext m } } } -void SpiGetColumnFromPlan(const char *src, ArrayType** resDescribe, MemoryContext memctx) +void SpiGetColumnFromPlan(const char *src, ArrayType** resDescribe, MemoryContext memctx, + ParserSetupHook parserSetup, void *parserSetupArg) { _SPI_plan plan; if (src == NULL) { @@ -179,13 +180,15 @@ void SpiGetColumnFromPlan(const char *src, ArrayType** resDescribe, MemoryContex plan.stmt_list = NIL; plan.spi_key = INVALID_SPI_KEY; plan.id = (uint32)-1; - + plan.parserSetup = parserSetup; + plan.parserSetupArg = parserSetupArg; _SPI_prepare_plan(src, &plan); GetColumnDescribe(&plan, resDescribe, memctx); _SPI_end_call(true); } -void SpiDescribeColumnsCallback(CommandDest dest, const char *src, ArrayType** resDescribe, MemoryContext memctx) +void SpiDescribeColumnsCallback(CommandDest dest, const char *src, ArrayType** resDescribe, + MemoryContext memctx, ParserSetupHook parserSetup, void *parserSetupArg) { bool connected = false; @@ -199,7 +202,7 @@ void SpiDescribeColumnsCallback(CommandDest dest, const char *src, ArrayType** r } connected = true; /* Do the query. */ - SpiGetColumnFromPlan(src, resDescribe, memctx); + SpiGetColumnFromPlan(src, resDescribe, memctx,parserSetup, parserSetupArg); connected = false; (void)SPI_finish(); diff --git a/src/gausskernel/runtime/opfusion/opfusion.cpp b/src/gausskernel/runtime/opfusion/opfusion.cpp index f248efdda..2bb5333fc 100644 --- a/src/gausskernel/runtime/opfusion/opfusion.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion.cpp @@ -210,7 +210,8 @@ FusionType OpFusion::getFusionType(CachedPlan *plan, ParamListInfo params, List } else if (plantree_list && plan == NULL) { plist = plantree_list; } else { - Assert(0); + /* sql has no plan, do nothing */ + return NONE_FUSION; } /* check stmt num */ @@ -471,7 +472,7 @@ void OpFusion::fusionExecute(StringInfo msg, char *completionTag, bool isTopLeve PG_RE_THROW(); } PG_END_TRY(); - + UpdateSingleNodeByPassUniqueSQLStat(isTopLevel); } @@ -517,17 +518,14 @@ bool OpFusion::process(int op, StringInfo msg, char *completionTag, bool isTopLe void OpFusion::CheckLogDuration() { - char msec_str[32]; + char msec_str[PRINTF_DST_MAX]; switch (check_log_duration(msec_str, false)) { case 1: - ereport(LOG, (errmsg("duration: %s ms, queryid %lu, unique id %lu", msec_str, u_sess->debug_query_id, - u_sess->slow_query_cxt.slow_query.unique_sql_id), - errhidestmt(true))); + Assert(false); break; case 2: { ereport(LOG, (errmsg("duration: %s ms queryid %lu unique id %lu", msec_str, u_sess->debug_query_id, - u_sess->slow_query_cxt.slow_query.unique_sql_id), - errhidestmt(true))); + u_sess->slow_query_cxt.slow_query.unique_sql_id), errhidestmt(true))); break; } default: diff --git a/src/gausskernel/runtime/opfusion/opfusion_indexonlyscan.cpp b/src/gausskernel/runtime/opfusion/opfusion_indexonlyscan.cpp index af408e611..53cf1a61c 100644 --- a/src/gausskernel/runtime/opfusion/opfusion_indexonlyscan.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion_indexonlyscan.cpp @@ -220,6 +220,8 @@ TupleTableSlot *IndexOnlyScanFusion::getTupleSlotInternal() Relation rel = m_index; bool isUStore = RelationIsUstoreFormat(m_rel); bool bucket_changed = false; + TupleTableSlot* tmpreslot = NULL; + tmpreslot = MakeSingleTupleTableSlot(RelationGetDescr(m_scandesc->heapRelation), false, rel->rd_tam_type); while ((tid = scan_handler_idx_getnext_tid(m_scandesc, *m_direction, &bucket_changed)) != NULL) { HeapTuple tuple = NULL; @@ -245,10 +247,10 @@ TupleTableSlot *IndexOnlyScanFusion::getTupleSlotInternal() if (isUStore) { /* ustore with multi-version ubtree only recheck IndexTuple when xs_recheck_itup is set */ if (indexdesc->xs_recheck_itup) { - if (!IndexFetchUHeap(indexdesc, m_reslot)) { + if (!IndexFetchUHeap(indexdesc, tmpreslot)) { continue; /* this TID indicate no visible tuple */ } - if (!RecheckIndexTuple(indexdesc, m_reslot)) { + if (!RecheckIndexTuple(indexdesc, tmpreslot)) { continue; /* the visible version not match the IndexTuple */ } } @@ -302,8 +304,10 @@ TupleTableSlot *IndexOnlyScanFusion::getTupleSlotInternal() StoreIndexTuple(m_reslot, tmptup, m_tupDesc); tableam_tslot_getsomeattrs(m_reslot, m_tupDesc->natts); + ExecDropSingleTupleTableSlot(tmpreslot); return m_reslot; } + ExecDropSingleTupleTableSlot(tmpreslot); return NULL; } diff --git a/src/gausskernel/runtime/opfusion/opfusion_select.cpp b/src/gausskernel/runtime/opfusion/opfusion_select.cpp index 0590da0f6..00b392186 100644 --- a/src/gausskernel/runtime/opfusion/opfusion_select.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion_select.cpp @@ -99,7 +99,14 @@ bool SelectFusion::execute(long max_rows, char* completionTag) * step 1: prepare * *******************/ start_row = m_c_global->m_limitOffset >= 0 ? m_c_global->m_limitOffset : start_row; - get_rows = m_c_global->m_limitCount >= 0 ? (m_c_global->m_limitCount + start_row) : max_rows; + int64 alreadyfetch = (m_local.m_position > start_row) ? (m_local.m_position - start_row) : 0; + /* no limit get fetch size rows */ + get_rows = max_rows; + if (m_c_global->m_limitCount >= 0) { + /* fetch size, limit */ + int64 limit_row = (m_c_global->m_limitCount - alreadyfetch > 0) ? m_c_global->m_limitCount - alreadyfetch : 0; + get_rows = (limit_row > max_rows) ? max_rows : limit_row; + } /********************** * step 2: begin scan * @@ -113,21 +120,27 @@ bool SelectFusion::execute(long max_rows, char* completionTag) unsigned long nprocessed = 0; /* put selected tuple into receiver */ TupleTableSlot* offset_reslot = NULL; - while (nprocessed < (unsigned long)start_row && (offset_reslot = m_local.m_scan->getTupleSlot()) != NULL) { + while (m_local.m_position < (long)start_row && (offset_reslot = m_local.m_scan->getTupleSlot()) != NULL) { tpslot_free_heaptuple(offset_reslot); - nprocessed++; + m_local.m_position++; + } + if (m_local.m_position < (long)start_row) { + Assert(offset_reslot == NULL); + get_rows = 0; + m_local.m_isCompleted = true; } while (nprocessed < (unsigned long)get_rows && (m_local.m_reslot = m_local.m_scan->getTupleSlot()) != NULL) { CHECK_FOR_INTERRUPTS(); + m_local.m_position++; nprocessed++; (*m_local.m_receiver->receiveSlot)(m_local.m_reslot, m_local.m_receiver); tpslot_free_heaptuple(m_local.m_reslot); } if (!ScanDirectionIsNoMovement(*(m_local.m_scan->m_direction))) { - if (max_rows == 0 || nprocessed < (unsigned long)max_rows) { + bool has_complete = (max_rows == 0 || nprocessed < (unsigned long)max_rows); + if (has_complete) { m_local.m_isCompleted = true; } - m_local.m_position += nprocessed; } else { m_local.m_isCompleted = true; } diff --git a/src/gausskernel/runtime/opfusion/opfusion_selectforupdate.cpp b/src/gausskernel/runtime/opfusion/opfusion_selectforupdate.cpp index b41295838..eef56618c 100644 --- a/src/gausskernel/runtime/opfusion/opfusion_selectforupdate.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion_selectforupdate.cpp @@ -130,9 +130,18 @@ bool SelectForUpdateFusion::execute(long max_rows, char *completionTag) /* ****************** * step 1: prepare * * ***************** */ - m_local.m_scan->start_row = m_c_global->m_limitOffset >= 0 ? m_c_global->m_limitOffset : 0; - m_local.m_scan->get_rows = - m_c_global->m_limitCount >= 0 ? (m_c_global->m_limitCount + m_local.m_scan->start_row) : max_rows; + int64 start_row = m_c_global->m_limitOffset >= 0 ? m_c_global->m_limitOffset : 0; + int64 alreadyfetch = (m_local.m_position > start_row) ? (m_local.m_position - start_row) : 0; + /* no limit get fetch size rows */ + int64 get_rows = max_rows; + if (m_c_global->m_limitCount >= 0) { + /* fetch size, limit */ + int64 limit_row = (m_c_global->m_limitCount - alreadyfetch > 0) ? m_c_global->m_limitCount - alreadyfetch : 0; + get_rows = (limit_row > max_rows) ? max_rows : limit_row; + } + m_local.m_scan->start_row = start_row; + m_local.m_scan->get_rows = get_rows; + if (m_local.m_position == 0) { m_local.m_scan->refreshParameter(m_local.m_outParams == NULL ? m_local.m_params : m_local.m_outParams); m_local.m_scan->Init(max_rows); @@ -159,7 +168,6 @@ bool SelectForUpdateFusion::execute(long max_rows, char *completionTag) if (max_rows == 0 || nprocessed < (unsigned long)max_rows) { m_local.m_isCompleted = true; } - m_local.m_position += nprocessed; } else { m_local.m_isCompleted = true; } @@ -220,8 +228,13 @@ unsigned long SelectForUpdateFusion::ExecSelectForUpdate(Relation rel, ResultRel securec_check(errorno, "\0", "\0"); newtuple.t_data = &(tbuf.hdr); - while (nprocessed < (unsigned long)start_row && (tuple = m_local.m_scan->getTuple()) != NULL) { - nprocessed++; + while (m_local.m_position < (long)start_row && (tuple = m_local.m_scan->getTuple()) != NULL) { + m_local.m_position++; + } + if (m_local.m_position < (long)start_row) { + Assert(tuple == NULL); + get_rows = 0; + m_local.m_isCompleted = true; } while (nprocessed < (unsigned long)get_rows && (tuple = m_local.m_scan->getTuple()) != NULL) { @@ -262,7 +275,11 @@ unsigned long SelectForUpdateFusion::ExecSelectForUpdate(Relation rel, ResultRel result = tableam_tuple_lock(bucket_rel == NULL ? destRel : bucket_rel, &newtuple, &buffer, GetCurrentCommandId(true), LockTupleExclusive, LockWaitBlock, &tmfd, false, // allow_lock_self (heap implementation) +#ifdef ENABLE_MULTIPLE_NODES + false, +#else true, // follow_updates +#endif false, // eval GetActiveSnapshot(), &(((HeapTuple)tuple)->t_self), true); @@ -302,6 +319,7 @@ unsigned long SelectForUpdateFusion::ExecSelectForUpdate(Relation rel, ResultRel case TM_Ok: /* done successfully */ nprocessed++; + m_local.m_position++; (*m_local.m_receiver->receiveSlot)(m_local.m_reslot, m_local.m_receiver); ExecClearTuple(m_local.m_reslot); break; @@ -352,6 +370,7 @@ unsigned long SelectForUpdateFusion::ExecSelectForUpdate(Relation rel, ResultRel tableam_tslot_getsomeattrs(m_local.m_reslot, m_global->m_tupDesc->natts); nprocessed++; + m_local.m_position++; (*m_local.m_receiver->receiveSlot)(m_local.m_reslot, m_local.m_receiver); ((HeapTuple)tuple)->t_self = tmfd.ctid; tuple = copyTuple; diff --git a/src/gausskernel/runtime/opfusion/opfusion_update.cpp b/src/gausskernel/runtime/opfusion/opfusion_update.cpp index 3dcce6dcb..4ab14eb98 100644 --- a/src/gausskernel/runtime/opfusion/opfusion_update.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion_update.cpp @@ -310,7 +310,9 @@ unsigned long UpdateFusion::ExecUpdate(Relation rel, ResultRelInfo* result_rel_i bool modifyHist = false; lreplace: + t_thrd.xact_cxt.ActiveLobRelid = rel->rd_id; tup = tableam_tops_opfusion_modify_tuple(oldtup, m_global->m_tupDesc, m_local.m_values, m_local.m_isnull, this); + t_thrd.xact_cxt.ActiveLobRelid = InvalidOid; Relation destRel = RELATION_IS_PARTITIONED(rel) ? partRel : rel; Relation parentRel = RELATION_IS_PARTITIONED(rel) ? rel : NULL; diff --git a/src/gausskernel/runtime/opfusion/opfusion_util.cpp b/src/gausskernel/runtime/opfusion/opfusion_util.cpp index 66b1b6267..a8ff66805 100644 --- a/src/gausskernel/runtime/opfusion/opfusion_util.cpp +++ b/src/gausskernel/runtime/opfusion/opfusion_util.cpp @@ -750,7 +750,8 @@ FusionType getSelectFusionType(List *stmt_list, ParamListInfo params) bool is_select_for_update = (list_length(lockrows->rowMarks) == 1 && IsA(linitial(lockrows->rowMarks), PlanRowMark) && ((PlanRowMark *)linitial(lockrows->rowMarks))->markType == ROW_MARK_EXCLUSIVE && - ((PlanRowMark *)linitial(lockrows->rowMarks))->noWait == false); + ((PlanRowMark *)linitial(lockrows->rowMarks))->noWait == false && + ((PlanRowMark *)linitial(lockrows->rowMarks))->waitSec == 0); if (is_select_for_update) { top_plan = top_plan->lefttree; ftype = SELECT_FOR_UPDATE_FUSION; @@ -1223,6 +1224,8 @@ Relation InitPartitionIndexInFusion(Oid parentIndexOid, Oid partOid, Partition * if (!IsBootstrapProcessingMode()) { ResourceOwnerForgetFakerelRef(t_thrd.utils_cxt.CurrentResourceOwner, (*partIndex)->partrel); } + } else { + UpdatePartrelPointer((*partIndex)->partrel, *parentIndex, *partIndex); } index = (*partIndex)->partrel; } else { @@ -1245,6 +1248,8 @@ void InitPartitionRelationInFusion(Oid partOid, Relation parentRel, Partition* p if (!IsBootstrapProcessingMode()) { ResourceOwnerForgetFakerelRef(t_thrd.utils_cxt.CurrentResourceOwner, (*partRel)->partrel); } + } else { + UpdatePartrelPointer((*partRel)->partrel, parentRel, *partRel); } *rel = (*partRel)->partrel; } else { diff --git a/src/gausskernel/runtime/vecexecutor/vecexpression.cpp b/src/gausskernel/runtime/vecexecutor/vecexpression.cpp index 6e541b55e..8c536458e 100644 --- a/src/gausskernel/runtime/vecexecutor/vecexpression.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecexpression.cpp @@ -103,8 +103,7 @@ static ScalarVector* ExecEvalVecVar( break; } - // Sys column branch - // + /* Sys column branch */ Assert(batch != NULL); if (attnum < 0) { ScalarVector* pVec = batch->GetSysVector(attnum); @@ -229,6 +228,22 @@ static ScalarVector* ExecEvalVecNot( return pVector; } +static ScalarVector* ExecEvalVecRownum( + RownumState* exprstate, ExprContext* econtext, bool* pSelection, ScalarVector* pVector, ExprDoneCond* isDone) +{ + Assert(pSelection != NULL); + ScalarValue* pDest = pVector->m_vals; + int64 ps_rownum = exprstate->ps->ps_rownum; + + for (int i = 0; i < econtext->align_rows; i++) { + SET_NOTNULL(pVector->m_flag[i]); + pDest[i] = ++ps_rownum ; + } + + pVector->m_rows = econtext->align_rows; + + return pVector; +} // TRUE means we deal with or // false means we deal with and template @@ -2334,7 +2349,7 @@ static ScalarVector* ExecEvalVecCoerceViaIO( pVector->m_rows = econtext->align_rows; pVector->m_desc.typeId = iostate->infunc.fn_rettype; - pVector->m_desc.encoded = COL_IS_ENCODE_T(); + pVector->m_desc.encoded = COL_IS_ENCODE(retType); return pVector; } @@ -3165,6 +3180,12 @@ ExprState* ExecInitVecExpr(Expr* node, PlanState* parent) (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmodule(MOD_VEC_EXECUTOR), errmsg("Unsupported array coerce expression in vector engine"))); + case T_Rownum: { + RownumState* rnstate = (RownumState*)makeNode(RownumState); + rnstate->ps = parent; + state = (ExprState*)rnstate; + state->vecExprFun = (VectorExprFun)ExecEvalVecRownum; + } break; default: ereport(ERROR, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), @@ -3537,44 +3558,40 @@ static inline void SetAlignRowsForProject(ExprContext* econtext, VectorBatch* ba * it in the previously specified VectorBatch. * @in projInfo: ProjectionInfo node information * @in selReSet: Sign projInfo->pi_batch's m_sel if need reset. + * @in batchReset: True if pProjBatch is used for multi-entry. * @return: Return project result. */ VectorBatch* ExecVecProject(ProjectionInfo* projInfo, bool selReSet, ExprDoneCond* isDone) { - VectorBatch* pProjBatch = NULL; - VectorBatch* srcBatch = NULL; - ExprContext* econtext = NULL; - int numSimpleVars; - Assert(projInfo != NULL); - // get the projection info we want - // - pProjBatch = projInfo->pi_batch; - econtext = projInfo->pi_exprContext; + VectorBatch* pProjBatch = projInfo->pi_batch; + VectorBatch* srcBatch = NULL; + ExprContext* econtext = projInfo->pi_exprContext; + int numSimpleVars; /* Assume single result row until proven otherwise */ if (isDone != NULL) *isDone = ExprSingleResult; - // Clear any former contents of the result pProjBatch - // + /* Clear any former contents of the result pProjBatch */ pProjBatch->Reset(); if (selReSet) { pProjBatch->ResetSelection(true); } - // align rows + /* align rows */ econtext->align_rows = 0; SetAlignRowsForProject(econtext, econtext->ecxt_outerbatch); SetAlignRowsForProject(econtext, econtext->ecxt_innerbatch); SetAlignRowsForProject(econtext, econtext->ecxt_aggbatch); SetAlignRowsForProject(econtext, econtext->ecxt_scanbatch); Assert(econtext->align_rows != 0); - // Assign simple Vars to result by direct extraction of fields from source - // slots ... a mite ugly, but fast ... - // + /* + * Assign simple Vars to result by direct extraction of fields from source + * slots ... a mite ugly, but fast ... + */ numSimpleVars = projInfo->pi_numSimpleVars; if (numSimpleVars > 0) { ScalarVector* values = pProjBatch->m_arr; @@ -3608,16 +3625,14 @@ VectorBatch* ExecVecProject(ProjectionInfo* projInfo, bool selReSet, ExprDoneCon } } - // Set the number of rows on which batch is used - // + /* Set the number of rows on which batch is used */ pProjBatch->m_rows = srcBatch->m_rows; for (i = 0; i < pProjBatch->m_cols; i++) { pProjBatch->m_arr[i].m_rows = srcBatch->m_rows; } } - // If there are any generic expressions, evaluate them. - // + /* If there are any generic expressions, evaluate them. */ if (projInfo->pi_targetlist) { if (projInfo->jitted_vectarget) { projInfo->jitted_vectarget(econtext, pProjBatch); @@ -3642,20 +3657,14 @@ VectorBatch* ExecVecProject(ProjectionInfo* projInfo, bool selReSet, ExprDoneCon } } - // Kludge: this is to fix some cases only const evaluation in target list, thus - // we may get a over sized batch. Adjust it back here. - // + /* + * Kludge: this is to fix some cases only const evaluation in target list, thus + * we may get a over sized batch. Adjust it back here. + */ if (srcBatch != NULL) pProjBatch->m_rows = Min(pProjBatch->m_rows, srcBatch->m_rows); - // Successfully formed a result batch - // - if (econtext->have_vec_set_fun) { - return projInfo->pi_setFuncBatch; - } else { - Assert(pProjBatch->IsValid()); - return pProjBatch; - } + return (econtext->have_vec_set_fun) ? projInfo->pi_setFuncBatch : pProjBatch; } static ScalarVector* ExecEvalVecGroupingFuncExpr(GroupingFuncExprState* gstate, ExprContext* econtext, diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/vecagg.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/vecagg.cpp index 3bb4fec8c..967cb2939 100644 --- a/src/gausskernel/runtime/vecexecutor/vecnode/vecagg.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecnode/vecagg.cpp @@ -727,6 +727,7 @@ VecAggState* ExecInitVecAggregation(VecAgg* node, EState* estate, int eflags) aggstate->numaggs = aggno + 1; aggstate->aggRun = NULL; +#ifdef ENABLE_LLVM_COMPILE /* * Generate IR function for HashAggRunner::BuildAggTbl function, which * contains hashing part, allocate hashcell and agg part @@ -741,6 +742,7 @@ VecAggState* ExecInitVecAggregation(VecAgg* node, EState* estate, int eflags) dorado::VecHashAggCodeGen::HashAggCodeGen(aggstate); } } +#endif return aggstate; } @@ -1484,7 +1486,7 @@ void BaseAggRunner::BatchSortAggregation(int curr_set, int work_mem, int max_mem FmgrInfo* equalfns = &peragg_stat->equalfns[0]; /* This vector's values all is NULL. */ - if (!FilterRepeatValue(equalfns, curr_set, aggno, first)) { + if (equalfns && !FilterRepeatValue(equalfns, curr_set, aggno, first)) { break; } diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/veccstore.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/veccstore.cpp index 0b9431b39..8ae9add88 100644 --- a/src/gausskernel/runtime/vecexecutor/vecnode/veccstore.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecnode/veccstore.cpp @@ -681,6 +681,7 @@ CStoreScanState* ExecInitCStoreScan( InitCStoreRelation(scan_stat, estate, idx_flag, parent_heap_rel); scan_stat->ps.ps_TupFromTlist = false; +#ifdef ENABLE_LLVM_COMPILE /* * First, not only consider the LLVM native object, but also consider the cost of * the LLVM compilation time. We will not use LLVM optimization if there is @@ -706,6 +707,7 @@ CStoreScanState* ExecInitCStoreScan( llvm_code_gen->addFunctionToMCJit(jitted_vecqual, reinterpret_cast(&(scan_stat->jitted_vecqual))); } } +#endif /* * Initialize result tuple type and projection info. @@ -762,6 +764,7 @@ CStoreScanState* ExecInitCStoreScan( scan_stat->m_pScanBatch->CreateSysColContainer(CurrentMemoryContext, plan_stat->ps_ProjInfo->pi_sysAttrList); } +#ifdef ENABLE_LLVM_COMPILE /** * Since we separate the target list elements into simple var references and * generic expression, we only need to deal the generic expression with LLVM @@ -780,6 +783,7 @@ CStoreScanState* ExecInitCStoreScan( llvm_code_gen->addFunctionToMCJit( jitted_vectarget, reinterpret_cast(&(plan_stat->ps_ProjInfo->jitted_vectarget))); } +#endif scan_stat->m_pScanRunTimeKeys = NULL; scan_stat->m_ScanRunTimeKeysNum = 0; diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/veccstoreindexscan.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/veccstoreindexscan.cpp index 3ece2a8a6..1b83d099b 100644 --- a/src/gausskernel/runtime/vecexecutor/vecnode/veccstoreindexscan.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecnode/veccstoreindexscan.cpp @@ -295,6 +295,7 @@ CStoreIndexScanState* ExecInitCstoreIndexScan(CStoreIndexScan* node, EState* est rc = memcpy_s(indexstate, sizeof(CStoreScanState), scanstate, sizeof(CStoreScanState)); securec_check(rc, "\0", "\0"); +#ifdef ENABLE_LLVM_COMPILE /* * First, not only consider the LLVM native object, but also consider the cost of * the LLVM compilation time. We will not use LLVM optimization if there is @@ -315,6 +316,7 @@ CStoreIndexScanState* ExecInitCstoreIndexScan(CStoreIndexScan* node, EState* est if (jitted_vecqual != NULL) llvmCodeGen->addFunctionToMCJit(jitted_vecqual, reinterpret_cast(&(indexstate->jitted_vecqual))); } +#endif indexstate->ps.plan = (Plan*)node; indexstate->ps.state = estate; diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/vecgroup.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/vecgroup.cpp index 237e18327..6b889d9da 100644 --- a/src/gausskernel/runtime/vecexecutor/vecnode/vecgroup.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecnode/vecgroup.cpp @@ -170,6 +170,7 @@ VecGroupState* ExecInitVecGroup(VecGroup* node, EState* estate, int eflags) grp_state->ss.ps.targetlist = (List*)ExecInitVecExpr((Expr*)node->plan.targetlist, (PlanState*)grp_state); grp_state->ss.ps.qual = (List*)ExecInitVecExpr((Expr*)node->plan.qual, (PlanState*)grp_state); +#ifdef ENABLE_LLVM_COMPILE /* * Check if nlstate->js.joinqual and nlstate->js.ps.qual expr list could be * codegened or not. @@ -184,6 +185,7 @@ VecGroupState* ExecInitVecGroup(VecGroup* node, EState* estate, int eflags) if (grp_vecqual != NULL) llvm_code_gen->addFunctionToMCJit(grp_vecqual, reinterpret_cast(&(grp_state->jitted_vecqual))); } +#endif // initialize child nodes outerPlanState(grp_state) = ExecInitNode(outerPlan(node), estate, eflags); diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/vechashjoin.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/vechashjoin.cpp index 673175773..3aeed2eb5 100644 --- a/src/gausskernel/runtime/vecexecutor/vecnode/vechashjoin.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecnode/vechashjoin.cpp @@ -128,10 +128,12 @@ VecHashJoinState* ExecInitVecHashJoin(VecHashJoin* node, EState* estate, int efl * Since most of the expression information will be used * later, we still need to initialize these expression. */ +#ifdef ENABLE_LLVM_COMPILE dorado::GsCodeGen* llvmCodeGen = (dorado::GsCodeGen*)t_thrd.codegen_cxt.thr_codegen_obj; bool consider_codegen = CodeGenThreadObjectReady() && CodeGenPassThreshold(((Plan*)outer_node)->plan_rows, estate->es_plannedstmt->num_nodes, ((Plan*)outer_node)->dop); +#endif if (hash_state->js.ps.targetlist) { hash_state->js.ps.ps_ProjInfo = ExecBuildVecProjectionInfo(hash_state->js.ps.targetlist, @@ -140,6 +142,7 @@ VecHashJoinState* ExecInitVecHashJoin(VecHashJoin* node, EState* estate, int efl hash_state->js.ps.ps_ResultTupleSlot, NULL); +#ifdef ENABLE_LLVM_COMPILE bool saved_codegen = consider_codegen; if (isIntergratedMachine) { consider_codegen = @@ -169,6 +172,7 @@ VecHashJoinState* ExecInitVecHashJoin(VecHashJoin* node, EState* estate, int efl } consider_codegen = saved_codegen; +#endif ExecAssignVectorForExprEval(hash_state->js.ps.ps_ProjInfo->pi_exprContext); } else { @@ -215,12 +219,14 @@ VecHashJoinState* ExecInitVecHashJoin(VecHashJoin* node, EState* estate, int efl hash_state->bf_runtime.bf_filter_index = hash_state->js.ps.plan->filterIndexList; hash_state->bf_runtime.bf_array = estate->es_bloom_filter.bfarray; +#ifdef ENABLE_LLVM_COMPILE /* consider codegeneration for hashjoin node with respect to innerjoin, * buildhashtable and probehashtable function. */ if (consider_codegen && !node->isSonicHash) { dorado::VecHashJoinCodeGen::HashJoinCodeGen(hash_state); } +#endif return hash_state; } diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/vecmergejoin.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/vecmergejoin.cpp index 75c8c3952..30b5bab4b 100644 --- a/src/gausskernel/runtime/vecexecutor/vecnode/vecmergejoin.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecnode/vecmergejoin.cpp @@ -1899,6 +1899,7 @@ VecMergeJoinState* ExecInitVecMergeJoin(VecMergeJoin* node, EState* estate, int node->mergeNullsFirst, (PlanState*)mergestate); +#ifdef ENABLE_LLVM_COMPILE /* * After all of the expressions have been decided, check if the following * exprs can be codegened or not. @@ -1922,6 +1923,7 @@ VecMergeJoinState* ExecInitVecMergeJoin(VecMergeJoin* node, EState* estate, int jitted_vectarget, reinterpret_cast(&(mergestate->js.ps.ps_ProjInfo->jitted_vectarget))); } } +#endif for (i = 0; i < mergestate->mj_NumClauses; i++) { VecMergeJoinClause clause = &mergestate->mj_Clauses[i]; diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/vecnestloop.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/vecnestloop.cpp index 2fc7e68d8..20e642977 100644 --- a/src/gausskernel/runtime/vecexecutor/vecnode/vecnestloop.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecnode/vecnestloop.cpp @@ -473,6 +473,7 @@ VecNestLoopState* ExecInitVecNestLoop(VecNestLoop* node, EState* estate, int efl nlstate->js.joinqual = (List*)ExecInitVecExpr((Expr*)node->join.joinqual, (PlanState*)nlstate); Assert(node->join.nulleqqual == NIL); +#ifdef ENABLE_LLVM_COMPILE /* * Check if nlstate->js.joinqual and nlstate->js.ps.qual expr list could be * codegened or not. @@ -492,6 +493,7 @@ VecNestLoopState* ExecInitVecNestLoop(VecNestLoop* node, EState* estate, int efl if (nl_joinqual != NULL) llvm_code_gen->addFunctionToMCJit(nl_joinqual, reinterpret_cast(&(nlstate->jitted_joinqual))); } +#endif /* * initialize child nodes diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/vecpartiterator.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/vecpartiterator.cpp index b9f0d057f..e9cf0f54a 100644 --- a/src/gausskernel/runtime/vecexecutor/vecnode/vecpartiterator.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecnode/vecpartiterator.cpp @@ -26,6 +26,7 @@ #include "executor/exec/execdebug.h" #include "vecexecutor/vecpartiterator.h" +#include "executor/node/nodePartIterator.h" #include "executor/tuptable.h" #include "utils/memutils.h" #include "nodes/execnodes.h" @@ -82,6 +83,21 @@ static int GetVecscanPartitionNum(const PartIteratorState* node) case T_CStoreIndexHeapScanState: partitionScan = ((CStoreIndexHeapScanState*)noden)->part_id; break; + case T_RowToVecState: { + ScanState* scanState = (ScanState*)noden->lefttree; + switch (nodeTag(scanState)) { + case T_SeqScanState: + case T_IndexScanState: + case T_IndexOnlyScanState: + case T_BitmapHeapScanState: + partitionScan = scanState->part_id; + break; + default: + partitionScan = pi_node->itrs; + break; + } + break; + } default: partitionScan = pi_node->itrs; break; @@ -89,17 +105,43 @@ static int GetVecscanPartitionNum(const PartIteratorState* node) return partitionScan; } -static void InitVecscanPartition(VecPartIteratorState* node, int partitionScan) +/* return: false means all patition finished */ +static bool InitVecscanPartition(VecPartIteratorState* node, int partitionScan) { int paramno = 0; unsigned int itr_idx = 0; VecPartIterator* pi_node = (VecPartIterator*)node->ps.plan; ParamExecData* param = NULL; + List* subPartLengthList = NULL; + PlanState* noden = NULL; + + /* check sub partitions */ + if (IsA(node->ps.lefttree, RowToVecState)) { + RowToVecState* rowToVecNode = (RowToVecState*)node->ps.lefttree; + noden = (PlanState*)rowToVecNode->ps.lefttree; + if (IsA(noden, ScanState) || IsA(noden, SeqScanState) || IsA(noden, IndexOnlyScanState) || + IsA(noden, IndexScanState) || IsA(noden, BitmapHeapScanState) || IsA(noden, TidScanState)) { + subPartLengthList = ((ScanState *)noden)->subPartLengthList; + } + } + + /* if there is no partition to scan, return false */ + if (node->currentItr + 1 >= partitionScan) { + if (subPartLengthList != NIL) { + int subPartLength = (int)list_nth_int(subPartLengthList, node->currentItr); + if (node->subPartCurrentItr + 1 >= subPartLength) { + return false; + } + } else { + return false; + } + } Assert(ForwardScanDirection == pi_node->direction || BackwardScanDirection == pi_node->direction); /* set iterator parameter */ - node->currentItr++; + SetPartitionIteratorParamter(node, subPartLengthList); + itr_idx = node->currentItr; if (BackwardScanDirection == pi_node->direction) itr_idx = partitionScan - itr_idx - 1; @@ -112,6 +154,8 @@ static void InitVecscanPartition(VecPartIteratorState* node, int partitionScan) /* reset the plan node so that next partition can be scanned */ VecExecReScan(node->ps.lefttree); + + return true; } VectorBatch* ExecVecPartIterator(VecPartIteratorState* node) @@ -138,11 +182,9 @@ VectorBatch* ExecVecPartIterator(VecPartIteratorState* node) return result; for (;;) { - /* if there is no partition to scan, return null */ - if (node->currentItr >= partitionScan - 1) + if (!InitVecscanPartition(node, partitionScan)) { return NULL; - - InitVecscanPartition(node, partitionScan); + } /* For partition wise join, can not early free left tree's caching memory */ orig_early_free = state->es_skip_early_free; @@ -169,6 +211,8 @@ void ExecReScanVecPartIterator(VecPartIteratorState* node) VecPartIterator* pi_node = NULL; int paramno = -1; ParamExecData* param = NULL; + int subPartParamno = -1; + ParamExecData* subPartParam = NULL; /* do nothing if there is no partition to scan */ int partitionScan = GetVecscanPartitionNum(node); @@ -185,6 +229,14 @@ void ExecReScanVecPartIterator(VecPartIteratorState* node) param->value = (Datum)0; node->ps.lefttree->chgParam = bms_add_member(node->ps.lefttree->chgParam, paramno); + node->subPartCurrentItr = -1; + + subPartParamno = pi_node->param->subPartParamno; + subPartParam = &(node->ps.state->es_param_exec_vals[subPartParamno]); + subPartParam->isnull = false; + subPartParam->value = (Datum)0; + node->ps.lefttree->chgParam = bms_add_member(node->ps.lefttree->chgParam, subPartParamno); + /* * if the pruning result isnot null, Reset the subplan node so * that its output can be re-scanned. diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/vecrowtovector.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/vecrowtovector.cpp index 1a89ead1a..38e0e46a9 100644 --- a/src/gausskernel/runtime/vecexecutor/vecnode/vecrowtovector.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecnode/vecrowtovector.cpp @@ -26,6 +26,7 @@ #include "knl/knl_variable.h" #include "access/tableam.h" +#include "access/tuptoaster.h" #include "executor/executor.h" #include "vecexecutor/vecnoderowtovector.h" #include "utils/memutils.h" @@ -35,8 +36,10 @@ #include "utils/numeric.h" #include "utils/numeric_gs.h" #include "storage/item/itemptr.h" +#include "vecexecutor/vecexecutor.h" +#include "vecexecutor/vectorbatch.h" -static void CheckTypeSupportRowToVec(List* targetlist); +#define MAX_LOOPS_FOR_RESET 50 /* * @Description: Pack one tuple into vectorbatch. @@ -66,8 +69,6 @@ bool VectorizeOneTuple(_in_ VectorBatch* pBatch, _in_ TupleTableSlot* slot, _in_ int type_len; Form_pg_attribute attr = slot->tts_tupleDescriptor->attrs[i]; - pBatch->m_arr[i].m_desc.typeId = attr->atttypid; - if (slot->tts_isnull[i] == false) { type_len = attr->attlen; switch (type_len) { @@ -127,13 +128,138 @@ bool VectorizeOneTuple(_in_ VectorBatch* pBatch, _in_ TupleTableSlot* slot, _in_ return may_more; } +inline struct varlena* DetoastDatumBatch(struct varlena* datum, ScalarVector* arr) +{ + if (VARATT_IS_EXTENDED(datum)) { + return heap_tuple_untoast_attr(datum, arr); + } else { + return datum; + } +} + +template +static void FillVector(ScalarVector* pVector, int rows) +{ + for (int i = 0; i < rows; i++) { + if (hasNull && unlikely(IS_NULL(pVector->m_flag[i]))) { + continue; + } + + pVector->AddVar(pVector->m_vals[i], i); + } +} + +template +static void FillTidVector(ScalarVector* pVector, int rows) +{ + for (int i = 0; i < rows; i++) { + if (hasNull && unlikely(IS_NULL(pVector->m_flag[i]))) { + continue; + } + + ItemPointer srcTid = (ItemPointer)DatumGetPointer(pVector->m_vals[i]); + ItemPointer destTid = (ItemPointer)(&pVector->m_vals[i]); + *destTid = *srcTid; + } +} + +template +static void TransformScalarVector(Form_pg_attribute attr, ScalarVector* pVector, int rows) +{ + int i = 0; + int typeLen = attr->attlen; + Datum v, v0; + switch (typeLen) { + case sizeof(char): + case sizeof(int16): + case sizeof(int32): + case sizeof(Datum): + /* nothing to do */ + break; + /* See ScalarVector::DatumToScalar to get the define */ + case 12: /* TIMETZOID, TINTERVALOID */ + case 16: /* INTERVALOID, UUIDOID */ + case 64: /* NAMEOID */ + case -2: + FillVector(pVector, rows); + break; + case -1: + for (i = 0; i < rows; i++) { + if (hasNull && unlikely(IS_NULL(pVector->m_flag[i]))) { + continue; + } + + v0 = pVector->m_vals[i]; + v = PointerGetDatum(DetoastDatumBatch((struct varlena *)DatumGetPointer(v0), pVector)); + /* if numeric cloumn, try to convert numeric to big integer */ + if (attr->atttypid == NUMERICOID) { + v = try_convert_numeric_normal_to_fast(v, pVector); + } + + if (v == v0) { + pVector->AddVar(v0, i); + } else { + pVector->m_vals[i] = v; + } + } + break; + case 6: + if (attr->atttypid == TIDOID && !attr->attbyval) { + FillTidVector(pVector, rows); + } else { + FillVector(pVector, rows); + } + break; + default: + ereport(ERROR, + (errcode(ERRCODE_INDETERMINATE_DATATYPE), errmsg("unsupported datatype branch"))); + } +} + +template +void VectorizeTupleBatchMode(VectorBatch *pBatch, TupleTableSlot **slots, + ExprContext *econtext, ScanBatchState *scanstate, int rows) +{ + int i, j, colidx = 0; + MemoryContext transformContext = econtext->ecxt_per_tuple_memory; + + /* Extract all the values of the old tuple */ + MemoryContext oldContext = MemoryContextSwitchTo(transformContext); + + /* for not late read, deform all the column into batch */ + if (!lateRead) { + for (j = 0; j < rows; j++) { + tableam_tslot_formbatch(slots[j], pBatch, j, scanstate->maxcolId); + } + + for (i = 0; i < scanstate->maxcolId; i++) { + scanstate->nullflag[i] = pBatch->m_arr[i].m_const; + pBatch->m_arr[i].m_const = false; + } + } + + for (i = 0; i < scanstate->colNum; i++) { + if ((lateRead && scanstate->lateRead[i]) || (!lateRead && !scanstate->lateRead[i])) { + colidx = scanstate->colId[i]; + Form_pg_attribute attr = slots[0]->tts_tupleDescriptor->attrs[colidx]; + if (scanstate->nullflag[colidx]) { + TransformScalarVector(attr, &pBatch->m_arr[colidx], rows); + } else { + TransformScalarVector(attr, &pBatch->m_arr[colidx], rows); + } + } + } + + MemoryContextSwitchTo(oldContext); +} + /* * @Description: Vectorized Operator--Convert row data to vector batch. * * @IN state: Row To Vector State. * @return: Return the batch of row table data, return NULL otherwise. */ -VectorBatch* ExecRowToVec(RowToVecState* state) +static VectorBatch* ExecRowToVecTupleMode(RowToVecState* state) { int i; PlanState* outer_plan = NULL; @@ -185,9 +311,187 @@ done: return batch; } +static VectorBatch *ApplyProjectionAndFilterBatch(VectorBatch *pScanBatch, + SeqScanState *node, TupleTableSlot **outerslot) +{ + ExprContext *econtext = NULL; + ProjectionInfo *proj = node->ps.ps_ProjInfo; + VectorBatch *pOutBatch = NULL; + bool fSimpleMap = false; + uint64 inputRows = pScanBatch->m_rows; + List* qual = node->ps.qual; + + econtext = node->ps.ps_ExprContext; + pOutBatch = node->scanBatchState->pCurrentBatch; + fSimpleMap = node->ps.ps_ProjInfo->pi_directMap; + if (pScanBatch->m_rows != 0) { + initEcontextBatch(pScanBatch, NULL, NULL, NULL); + /* Evaluate the qualification clause if any. */ + if (qual != NULL) { + ScalarVector *pVector = NULL; + pVector = ExecVecQual(qual, econtext, false); + /* If no matched rows, fetch again. */ + if (pVector == NULL) { + pOutBatch->m_rows = 0; + /* collect information of removed rows */ + InstrCountFiltered1(node, inputRows - pOutBatch->m_rows); + return pOutBatch; + } + + /* Call optimized PackT function when batch mode is turned on. */ + if (econtext->ecxt_scanbatch->m_sel) { + pScanBatch->Pack(econtext->ecxt_scanbatch->m_sel); + } + } + + /* + * Late read these columns + * reset m_rows to the value before VecQual + */ + VectorizeTupleBatchMode(pScanBatch, outerslot, econtext, node->scanBatchState, pScanBatch->m_rows); + + /* Project the final result */ + if (!fSimpleMap) { + pOutBatch = ExecVecProject(proj, true, NULL); + } else { + /* + * Copy the result to output batch. Note the output batch has different column set than + * the scan batch, so we have to remap them. Projection will handle all logics here, so + * for non simpleMap case, we don't need to do anything. + */ + pOutBatch->m_rows += pScanBatch->m_rows; + for (int i = 0; i < pOutBatch->m_cols; i++) { + AttrNumber att = proj->pi_varNumbers[i]; + Assert(att > 0 && att <= pScanBatch->m_cols); + + errno_t rc = memcpy_s(&pOutBatch->m_arr[i], sizeof(ScalarVector), &pScanBatch->m_arr[att - 1], + sizeof(ScalarVector)); + securec_check(rc, "\0", "\0"); + } + } + } + + if (!proj->pi_exprContext->have_vec_set_fun) { + pOutBatch->m_rows = Min(pOutBatch->m_rows, pScanBatch->m_rows); + pOutBatch->FixRowCount(); + } + + /* collect information of removed rows */ + InstrCountFiltered1(node, inputRows - pOutBatch->m_rows); + + /* Check fullness of return batch and refill it does not contain enough? */ + return pOutBatch; +} + +static VectorBatch *ExecRowToVecBatchMode(RowToVecState *state) +{ + VectorBatch *pFinalBatch = state->m_pCurrentBatch; + SeqScanState *seqScanState = (SeqScanState *)outerPlanState(state); + ScanBatchState *scanBatchState = seqScanState->scanBatchState; + ExprContext *econtext = state->ps.ps_ExprContext; + VectorBatch *pBatch = scanBatchState->pScanBatch; + seqScanState->ps.ps_ProjInfo->pi_exprContext->ecxt_scanbatch = pBatch; + VectorBatch *pOutBatch = scanBatchState->pCurrentBatch; + const int BatchModeMaxTuples = 900; + const int MaxLoopsForReset = 50; + + pFinalBatch->Reset(); + ResetExprContext(seqScanState->ps.ps_ExprContext); + ResetExprContext(econtext); + + /* last time return with rows, but last partition is read out */ + if (scanBatchState->scanfinished || state->m_fNoMoreRows) { + scanBatchState->scanfinished = false; + /* scan next partition for partition iterator */ + return pFinalBatch; + } + + pBatch->Reset(); + pOutBatch->Reset(); + scanBatchState->scanTupleSlotMaxNum = BatchMaxSize; + + int loops = 0; + while (true) { + loops++; + + /* Reset MemoryContext to avoid using too much memory if scan times more than MAX_LOOPS_FOR_RESET */ + if (loops == MaxLoopsForReset) { + if (pFinalBatch->m_rows != 0) { + return pFinalBatch; + } + loops = 0; + ResetExprContext(seqScanState->ps.ps_ExprContext); + ResetExprContext(econtext); + } + ScanBatchResult *scanSlotBatch = (ScanBatchResult *)ExecProcNode((PlanState*)seqScanState); + + /* scanSlotBatch is NULL means early free */ + if (scanSlotBatch == NULL || scanBatchState->scanfinished) { + /* scan next partition for partition iterator */ + scanBatchState->scanfinished = false; + state->m_fNoMoreRows = true; + return pFinalBatch; + } + + /* Vectorize tuples for filter columns. */ + VectorizeTupleBatchMode(pBatch, scanSlotBatch->scanTupleSlotInBatch, + econtext, scanBatchState, scanSlotBatch->rows); + + pBatch->FixRowCount(scanSlotBatch->rows); + + /* apply filter conditions and vectorize tuples for late read columns. */ + pOutBatch = ApplyProjectionAndFilterBatch(pBatch, seqScanState, scanSlotBatch->scanTupleSlotInBatch); + + /* prepare pBatch for next time read */ + for (int i = 0 ; i < pBatch->m_cols; i++) { + scanBatchState->nullflag[i] = false; + } + pBatch->FixRowCount(0); + + if (BatchIsNull(pOutBatch)) { + if (!scanBatchState->scanfinished) { + continue; + } + scanBatchState->scanfinished = false; + state->m_fNoMoreRows = true; + return pFinalBatch; + } + + for (int i = 0; i < pOutBatch->m_cols; i++) { + pFinalBatch->m_arr[i].copyDeep(&(pOutBatch->m_arr[i]), 0, pOutBatch->m_rows); + } + + pFinalBatch->m_rows += pOutBatch->m_rows; + scanBatchState->scanTupleSlotMaxNum = BatchMaxSize - pFinalBatch->m_rows; + + /* + * use BatchModeMaxTuples to avoid that pFinalBatch->m_rows be BatchMaxSize - 1 may + * cause scanBatchState->scanTupleSlotMaxNum = 1, and each SeqNextBatchMode only read + * one tuple. + */ + if (scanBatchState->scanfinished || pFinalBatch->m_rows >= BatchModeMaxTuples) { + /* scaned tuples of this time may not equals to null, next time must be null */ + return pFinalBatch; + } + } + + return pFinalBatch; +} + + +VectorBatch *ExecRowToVec(RowToVecState *state) +{ + if (state->m_batchMode) { + return ExecRowToVecBatchMode(state); + } else { + return ExecRowToVecTupleMode(state); + } +} + RowToVecState* ExecInitRowToVec(RowToVec* node, EState* estate, int eflags) { RowToVecState* state = NULL; + ScanState* scanstate = NULL; /* * create state structure @@ -197,7 +501,9 @@ RowToVecState* ExecInitRowToVec(RowToVec* node, EState* estate, int eflags) state->ps.state = estate; state->ps.vectorized = true; - CheckTypeSupportRowToVec(node->plan.targetlist); + if (!CheckTypeSupportRowToVec(node->plan.targetlist, ERROR)) { + return NULL; + } /* * tuple table initialization @@ -215,6 +521,9 @@ RowToVecState* ExecInitRowToVec(RowToVec* node, EState* estate, int eflags) * We shield the child node from the need to support REWIND, BACKWARD, or * MARK/RESTORE. */ + if (IsA(((Plan *)node)->lefttree, SeqScan)) { + ((SeqScan*)((Plan *)node)->lefttree)->scanBatchMode = true; + } outerPlanState(state) = ExecInitNode(outerPlan(node), estate, eflags); /* @@ -224,6 +533,13 @@ RowToVecState* ExecInitRowToVec(RowToVec* node, EState* estate, int eflags) */ ExecAssignExprContext(estate, &state->ps); + scanstate = (ScanState *)outerPlanState(state); + if (IsA(scanstate, SeqScanState) && ((SeqScan *)scanstate->ps.plan)->scanBatchMode) { + state->m_batchMode = true; + } else { + state->m_batchMode = false; + } + /* * initialize tuple type. no need to initialize projection info because * this node doesn't do projections. @@ -268,25 +584,3 @@ void ExecReScanRowToVec(RowToVecState* node) ExecReScan(node->ps.lefttree); } -/* - * Check if there is any data type unsupported by cstore. If so, stop rowtovec - */ -static void CheckTypeSupportRowToVec(List* targetlist) -{ - ListCell* cell = NULL; - TargetEntry* entry = NULL; - Var* var = NULL; - foreach(cell, targetlist) { - entry = (TargetEntry*)lfirst(cell); - if (IsA(entry->expr, Var)) { - var = (Var*)entry->expr; - if (var->varattno > 0 && var->varoattno > 0 - && var->vartype != TIDOID // cstore support for hidden column CTID - && !IsTypeSupportedByCStore(var->vartype, var->vartypmod)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("type \"%s\" is not supported in column store", - format_type_with_typemod(var->vartype, var->vartypmod)))); - } - } - } -} diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/vecsort.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/vecsort.cpp index 22b0f4534..650218b13 100644 --- a/src/gausskernel/runtime/vecexecutor/vecnode/vecsort.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecnode/vecsort.cpp @@ -331,6 +331,7 @@ VecSortState* ExecInitVecSort(Sort* node, EState* estate, int eflags) SO1_printf("ExecInitVecSort: %s\n", "sort node initialized"); +#ifdef ENABLE_LLVM_COMPILE /* * Consider codegeneration for sort node. In fact, CompareMultiColumn is the * hotest function in sort node. @@ -363,6 +364,7 @@ VecSortState* ExecInitVecSort(Sort* node, EState* estate, int eflags) } } } +#endif return sort_stat; } diff --git a/src/gausskernel/runtime/vecexecutor/vecnode/vectortorow.cpp b/src/gausskernel/runtime/vecexecutor/vecnode/vectortorow.cpp index d8f438a00..6463aca4b 100644 --- a/src/gausskernel/runtime/vecexecutor/vecnode/vectortorow.cpp +++ b/src/gausskernel/runtime/vecexecutor/vecnode/vectortorow.cpp @@ -158,6 +158,7 @@ TupleTableSlot* ExecVecToRow(VecToRowState* state) /* return: a tuple or NULL */ state->m_currentRow = 0; // Convert the batch into row based tuple DevectorizeOneBatch(state); + outer_plan->ps_rownum += current_batch->m_rows; } // retrieve rows from current batch diff --git a/src/gausskernel/security/gs_ledger/ledger_check.cpp b/src/gausskernel/security/gs_ledger/ledger_check.cpp index 6e6d910c7..c64a351b4 100644 --- a/src/gausskernel/security/gs_ledger/ledger_check.cpp +++ b/src/gausskernel/security/gs_ledger/ledger_check.cpp @@ -579,4 +579,4 @@ Datum ledger_gchain_repair(PG_FUNCTION_ARGS) } return UInt64GetDatum(rel_hash); -} +} \ No newline at end of file diff --git a/src/gausskernel/security/gs_ledger/userchain.cpp b/src/gausskernel/security/gs_ledger/userchain.cpp index 0fb163dab..68dc66dcd 100644 --- a/src/gausskernel/security/gs_ledger/userchain.cpp +++ b/src/gausskernel/security/gs_ledger/userchain.cpp @@ -405,10 +405,6 @@ uint64 get_user_tupleid_hash(Relation relation, ItemPointer tupleid) LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); - if (PageIs4BXidVersion(page)) { - heap_page_upgrade(relation, buffer); - } - lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tupleid)); tp.t_tableOid = RelationGetRelid(relation); tp.t_data = (HeapTupleHeader) PageGetItem(page, lp); diff --git a/src/gausskernel/security/gs_policy/gs_policy_audit.cpp b/src/gausskernel/security/gs_policy/gs_policy_audit.cpp index 9f3b2d30a..6209b6bd4 100644 --- a/src/gausskernel/security/gs_policy/gs_policy_audit.cpp +++ b/src/gausskernel/security/gs_policy/gs_policy_audit.cpp @@ -595,7 +595,7 @@ void create_audit_policy(CreateAuditPolicyStmt *stmt) } else { send_manage_message(AUDIT_FAILED); ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), + (errcode(ERRCODE_DUPLICATE_POLICY), errmsg("%s policy already exists, create failed", policy_name))); } return; diff --git a/src/gausskernel/security/gs_policy/policy_common.cpp b/src/gausskernel/security/gs_policy/policy_common.cpp index bdba81d15..e3d1ce968 100644 --- a/src/gausskernel/security/gs_policy/policy_common.cpp +++ b/src/gausskernel/security/gs_policy/policy_common.cpp @@ -215,7 +215,7 @@ static bool verify_function_name(Oid namespaceId, const char *funcname) #endif if (catlist != NULL) { for (int i = 0; i < catlist->n_members && !is_found; ++i) { - HeapTuple proctup = &catlist->members[i]->tuple; + HeapTuple proctup = t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i); Form_pg_proc procform = (Form_pg_proc) GETSTRUCT(proctup); if (procform != NULL && procform->pronamespace == namespaceId) { Oid funcid = HeapTupleGetOid(proctup); @@ -617,7 +617,7 @@ void create_policy_label(CreatePolicyLabelStmt *stmt) ereport(NOTICE, (errmsg("%s label already defined, skipping", label_name))); } else { send_manage_message(AUDIT_FAILED); - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("%s label already defined", label_name))); + ereport(ERROR, (errcode(ERRCODE_DUPLICATE_LABEL), errmsg("%s label already defined", label_name))); } return; } diff --git a/src/gausskernel/security/iprange/iprange.cpp b/src/gausskernel/security/iprange/iprange.cpp index 6ae9c0b90..ca7b66ef9 100644 --- a/src/gausskernel/security/iprange/iprange.cpp +++ b/src/gausskernel/security/iprange/iprange.cpp @@ -605,21 +605,6 @@ std::unordered_set IPRange::get_ranges_set() return rslt; } -void IPRange::print_range(const Range *range) -{ - cout << "range from: " << ip_to_str(&range->from) << " to " << ip_to_str(&range->to) << endl; -} - -void IPRange::print_ranges() -{ - cout << endl << "print ranges" << endl; - cout << "-------------" << endl; - std::unordered_set ranges = get_ranges_set(); - for (const std::string range : ranges) { - cout << "range: " << range << endl; - } -} - void IPRange::copy_without_spaces(char buf[], size_t buf_len, const char *original, size_t original_len) const { if (original_len == 0 || original_len > buf_len) { diff --git a/src/gausskernel/security/tde_key_management/ckms_message.cpp b/src/gausskernel/security/tde_key_management/ckms_message.cpp index 5b1904e63..2d8b6429b 100755 --- a/src/gausskernel/security/tde_key_management/ckms_message.cpp +++ b/src/gausskernel/security/tde_key_management/ckms_message.cpp @@ -54,7 +54,6 @@ void CKMSMessage::init() tde_message_mem = AllocSetContextCreate(g_instance.cache_cxt.global_cache_mem, "TDE_MESSAGE_CONTEXT", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE, SHARED_CONTEXT); } - load_user_info(); } void CKMSMessage::clear() @@ -107,6 +106,7 @@ char* CKMSMessage::read_kms_info_from_file() char* buffer = NULL; int path_len = 0; int json_len = 0; + int max_json_len = 1024; errno_t rc = EOK; data_directory = g_instance.attr.attr_common.data_directory; @@ -129,6 +129,14 @@ char* CKMSMessage::read_kms_info_from_file() json_file.seekg(0, ios::end); json_len = json_file.tellg(); + if (json_len > max_json_len) { + json_file.close(); + ereport(ERROR, + (errmodule(MOD_SEC_TDE), errcode(ERRCODE_FILE_READ_FAILED), + errmsg("kms_iam_info.json file length is bigger than max_len"), + errdetail("file path: $TDE_PATH/tde_config/kms_iam_info.json"), errcause("file context is wrong"), + erraction("check the kms_iam_info.json file"))); + } json_file.seekg(0, ios::beg); buffer = (char*)palloc0(json_len + 1); json_file.read(buffer, json_len + 1); diff --git a/src/gausskernel/storage/CMakeLists.txt b/src/gausskernel/storage/CMakeLists.txt index ec27546a4..a7d93a8d5 100755 --- a/src/gausskernel/storage/CMakeLists.txt +++ b/src/gausskernel/storage/CMakeLists.txt @@ -30,6 +30,10 @@ if("${ENABLE_MOT}" STREQUAL "ON") list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/mot) endif() +if("${ENABLE_UT}" STREQUAL "ON") + list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/test) +endif() + add_subdirectory(access) add_subdirectory(bulkload) add_subdirectory(buffer) @@ -54,3 +58,7 @@ if("${ENABLE_MOT}" STREQUAL "ON") add_subdirectory(mot) endif() +if("${ENABLE_UT}" STREQUAL "ON") + add_subdirectory(test) +endif() + diff --git a/src/gausskernel/storage/access/CMakeLists.txt b/src/gausskernel/storage/access/CMakeLists.txt index d4f2d2b75..ff46e936f 100755 --- a/src/gausskernel/storage/access/CMakeLists.txt +++ b/src/gausskernel/storage/access/CMakeLists.txt @@ -16,7 +16,6 @@ set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/index ${CMAKE_CURRENT_SOURCE_DIR}/nbtree ${CMAKE_CURRENT_SOURCE_DIR}/obs - ${CMAKE_CURRENT_SOURCE_DIR}/archive ${CMAKE_CURRENT_SOURCE_DIR}/psort ${CMAKE_CURRENT_SOURCE_DIR}/redo ${CMAKE_CURRENT_SOURCE_DIR}/rmgrdesc @@ -27,6 +26,13 @@ set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/ustore ) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND CMAKE_MODULE_PATH + ${CMAKE_CURRENT_SOURCE_DIR}/archive + ) + add_subdirectory(archive) +endif() + add_subdirectory(cbtree) add_subdirectory(common) add_subdirectory(dfs) @@ -38,7 +44,6 @@ add_subdirectory(heap) add_subdirectory(index) add_subdirectory(nbtree) add_subdirectory(obs) -add_subdirectory(archive) add_subdirectory(psort) add_subdirectory(redo) add_subdirectory(rmgrdesc) diff --git a/src/gausskernel/storage/access/Makefile b/src/gausskernel/storage/access/Makefile index 5c4357723..b8b996955 100644 --- a/src/gausskernel/storage/access/Makefile +++ b/src/gausskernel/storage/access/Makefile @@ -2,6 +2,9 @@ subdir = src/gausskernel/storage/access top_builddir = ../../../.. include $(top_builddir)/src/Makefile.global -SUBDIRS = cbtree common dfs heap index nbtree ubtree psort rmgrdesc transam obs hash spgist gist gin hbstore redo table ustore archive +SUBDIRS = cbtree common dfs heap index nbtree ubtree psort rmgrdesc transam obs hash spgist gist gin hbstore redo table ustore +ifeq ($(enable_lite_mode), no) +SUBDIRS += archive +endif include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/storage/access/archive/CMakeLists.txt b/src/gausskernel/storage/access/archive/CMakeLists.txt index 9365f6603..582c55be4 100644 --- a/src/gausskernel/storage/access/archive/CMakeLists.txt +++ b/src/gausskernel/storage/access/archive/CMakeLists.txt @@ -1,6 +1,10 @@ #This is the main CMAKE for build bin. AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} TGT_archive_SRC) +if("${ENABLE_LITE_MODE}" STREQUAL "ON") + list(REMOVE_ITEM TGT_archive_SRC ${CMAKE_CURRENT_SOURCE_DIR}/nas_am.cpp) +endif() + set(TGT_archive_INC ${PROJECT_SRC_DIR}/gausskernel/cbb/communication ${PROJECT_SRC_DIR}/include/iprange diff --git a/src/gausskernel/storage/access/archive/Makefile b/src/gausskernel/storage/access/archive/Makefile index 4930f96f1..156260547 100644 --- a/src/gausskernel/storage/access/archive/Makefile +++ b/src/gausskernel/storage/access/archive/Makefile @@ -33,6 +33,9 @@ ifneq "$(MAKECMDGOALS)" "clean" endif endif -OBJS = nas_am.o archive_am.o +OBJS = archive_am.o +ifeq ($(enable_lite_mode), no) +OBJS += nas_am.o +endif include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/storage/access/archive/archive_am.cpp b/src/gausskernel/storage/access/archive/archive_am.cpp index dd50dfa3c..9d6280ef0 100644 --- a/src/gausskernel/storage/access/archive/archive_am.cpp +++ b/src/gausskernel/storage/access/archive/archive_am.cpp @@ -86,3 +86,20 @@ List* ArchiveList(const char* prefix, ArchiveConfig *archive_config, bool report return fileNameList; } + +bool ArchiveFileExist(const char* file_path, ArchiveConfig *archive_config) +{ + bool ret = false; + if (archive_config == NULL) { + ereport(WARNING, (errmsg("when check file exist, the archive config is null"))); + return ret; + } + + if (archive_config->media_type == ARCHIVE_OBS) { + ret = checkOBSFileExist(file_path, archive_config); + } else if (archive_config->media_type == ARCHIVE_NAS) { + ret = checkNASFileExist(file_path, archive_config); + } + + return ret; +} diff --git a/src/gausskernel/storage/access/archive/nas_am.cpp b/src/gausskernel/storage/access/archive/nas_am.cpp index 37a08634e..9c823f265 100644 --- a/src/gausskernel/storage/access/archive/nas_am.cpp +++ b/src/gausskernel/storage/access/archive/nas_am.cpp @@ -47,6 +47,7 @@ #include "replication/walreceiver.h" #define MAX_PATH_LEN 1024 +static int headerLen = 22; size_t NasRead(const char* fileName, const int offset, char *buffer, const int length, ArchiveConfig *nas_config) { @@ -73,8 +74,23 @@ size_t NasRead(const char* fileName, const int offset, char *buffer, const int l errmsg("Cannot get archive config from replication slots"))); } - ret = snprintf_s(file_path, MAXPGPATH, MAXPGPATH - 1, "%s/%s", archive_nas->archive_prefix, fileName); - securec_check_ss(ret, "\0", "\0"); + if (strncmp(fileName, "global_barrier_records", headerLen) != 0) { + ret = snprintf_s(file_path, MAXPGPATH, MAXPGPATH - 1, "%s/%s", archive_nas->archive_prefix, fileName); + securec_check_ss(ret, "\0", "\0"); + } else { + char pathPrefix[MAXPGPATH] = {0}; + ret = strcpy_s(pathPrefix, MAXPGPATH, archive_nas->archive_prefix); + securec_check_ss(ret, "\0", "\0"); + if (!IS_PGXC_COORDINATOR) { + char *p = strrchr(pathPrefix, '/'); + if (p == NULL) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Obs path prefix is invalid"))); + } + *p = '\0'; + } + ret = snprintf_s(file_path, MAXPGPATH, MAXPGPATH - 1, "%s/%s", pathPrefix, fileName); + securec_check_ss(ret, "\0", "\0"); + } if (stat(file_path, &statbuf)) { if (errno != ENOENT) { @@ -107,6 +123,7 @@ int NasWrite(const char* fileName, const char *buffer, const int bufferLength, A int ret = 0; ArchiveConfig *archive_nas = NULL; char file_path[MAXPGPATH] = {0}; + char file_path_bak[MAXPGPATH] = {0}; char *origin_file_path = NULL; char *base_path = NULL; FILE *fp = NULL; @@ -127,8 +144,23 @@ int NasWrite(const char* fileName, const char *buffer, const int bufferLength, A errmsg("Cannot get archive config from replication slots"))); } - ret = snprintf_s(file_path, MAXPGPATH, MAXPGPATH - 1, "%s/%s", archive_nas->archive_prefix, fileName); - securec_check_ss(ret, "\0", "\0"); + if (strncmp(fileName, "global_barrier_records", headerLen) != 0) { + ret = snprintf_s(file_path, MAXPGPATH, MAXPGPATH - 1, "%s/%s", archive_nas->archive_prefix, fileName); + securec_check_ss(ret, "\0", "\0"); + } else { + char pathPrefix[MAXPGPATH] = {0}; + ret = strcpy_s(pathPrefix, MAXPGPATH, archive_nas->archive_prefix); + securec_check_ss(ret, "\0", "\0"); + if (!IS_PGXC_COORDINATOR) { + char *p = strrchr(pathPrefix, '/'); + if (p == NULL) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Obs path prefix is invalid"))); + } + *p = '\0'; + } + ret = snprintf_s(file_path, MAXPGPATH, MAXPGPATH - 1, "%s/%s", pathPrefix, fileName); + securec_check_ss(ret, "\0", "\0"); + } canonicalize_path(file_path); @@ -142,7 +174,9 @@ int NasWrite(const char* fileName, const char *buffer, const int bufferLength, A } } - fp = fopen(file_path, "wb"); + ret = snprintf_s(file_path_bak, MAXPGPATH, MAXPGPATH - 1, "%s.bak", file_path); + securec_check_ss(ret, "\0", "\0"); + fp = fopen(file_path_bak, "wb"); if (fp == NULL) { pfree_ext(origin_file_path); ereport(LOG, (errmsg("could not create file \"%s\": %m", fileName))); @@ -150,10 +184,23 @@ int NasWrite(const char* fileName, const char *buffer, const int bufferLength, A } if (fwrite(buffer, bufferLength, 1, fp) != 1) { + ereport(LOG, (errmsg("could not write file \"%s\": %m", fileName))); pfree_ext(origin_file_path); fclose(fp); return -1; } + if (fflush(fp) != 0) { + ereport(LOG, (errmsg("could not fflush file \"%s\": %m", fileName))); + (void)fclose(fp); + pfree_ext(origin_file_path); + return -1; + } + if (rename(file_path_bak, file_path) < 0) { + ereport(LOG, (errmsg("could not rename file \"%s\": %m", fileName))); + (void)fclose(fp); + pfree_ext(origin_file_path); + return -1; + } pfree_ext(origin_file_path); fclose(fp); @@ -177,11 +224,26 @@ int NasDelete(const char* fileName, ArchiveConfig *nas_config) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Cannot get obs bucket config from replication slots"))); } - - ret = snprintf_s(file_path, MAXPGPATH, MAXPGPATH - 1, "%s/%s", archive_nas->archive_prefix, fileName); - securec_check_ss(ret, "\0", "\0"); + if (strncmp(fileName, "global_barrier_records", headerLen) != 0) { + ret = snprintf_s(file_path, MAXPGPATH, MAXPGPATH - 1, "%s/%s", archive_nas->archive_prefix, fileName); + securec_check_ss(ret, "\0", "\0"); + } else { + char pathPrefix[MAXPGPATH] = {0}; + ret = strcpy_s(pathPrefix, MAXPGPATH, archive_nas->archive_prefix); + securec_check_ss(ret, "\0", "\0"); + if (!IS_PGXC_COORDINATOR) { + char *p = strrchr(pathPrefix, '/'); + if (p == NULL) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Obs path prefix is invalid"))); + } + *p = '\0'; + } + ret = snprintf_s(file_path, MAXPGPATH, MAXPGPATH - 1, "%s/%s", pathPrefix, fileName); + securec_check_ss(ret, "\0", "\0"); + } if (lstat(file_path, &statbuf) < 0) { + ereport(LOG, (errmsg("could not stat file \"%s\": %m", fileName))); return -1; } if (S_ISDIR(statbuf.st_mode)) { @@ -321,6 +383,41 @@ List* NasList(const char* prefix, ArchiveConfig *nas_config) fileNameList = SortFileList(fileNameListTmp); - list_free_ext(fileNameListTmp); + list_free_deep(fileNameListTmp); + fileNameListTmp = NIL; return fileNameList; } + +bool checkNASFileExist(const char* file_path, ArchiveConfig *nas_config) +{ + struct stat buf; + char realPath[MAXPGPATH] = {0}; + int ret = 0; + + if (strncmp(file_path, "global_barrier_records", headerLen) != 0) { + ret = snprintf_s(realPath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", nas_config->archive_prefix, file_path); + securec_check_ss_c(ret, "\0", "\0"); + } else { + char pathPrefix[MAXPGPATH] = {0}; + ret = strcpy_s(pathPrefix, MAXPGPATH, nas_config->archive_prefix); + securec_check_ss_c(ret, "\0", "\0"); + if (!IS_PGXC_COORDINATOR) { + char *p = strrchr(pathPrefix, '/'); + if (p == NULL) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Obs path prefix is invalid"))); + } + *p = '\0'; + } + ret = snprintf_s(realPath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", pathPrefix, file_path); + securec_check_ss_c(ret, "\0", "\0"); + } + if (stat(realPath, &buf) == -1 && errno == ENOENT) { + return false; + } + + if (!S_ISREG(buf.st_mode)) { + return false; + } + + return true; +} \ No newline at end of file diff --git a/src/gausskernel/storage/access/common/heaptuple.cpp b/src/gausskernel/storage/access/common/heaptuple.cpp index 2ac2ce997..ba232aa0d 100644 --- a/src/gausskernel/storage/access/common/heaptuple.cpp +++ b/src/gausskernel/storage/access/common/heaptuple.cpp @@ -73,6 +73,7 @@ #include "utils/memutils.h" #include "utils/elog.h" #include "access/ustore/knl_utuple.h" +#include "vecexecutor/vectorbatch.h" #ifdef ENABLE_UT #define static @@ -278,6 +279,7 @@ bool heap_attisnull(HeapTuple tup, int attnum, TupleDesc tupDesc) #ifdef PGXC case XC_NodeIdAttributeNumber: case BucketIdAttributeNumber: + case UidAttributeNumber: #endif /* these are never null */ break; @@ -585,6 +587,9 @@ Datum heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnu case XC_NodeIdAttributeNumber: result = UInt32GetDatum(tup->t_xc_node_id); break; + case UidAttributeNumber: + result = UInt64GetDatum(HeapTupleGetUid(tup)); + break; #endif default: ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("invalid attnum: %d", attnum))); @@ -622,7 +627,8 @@ HeapTuple heap_copytuple(HeapTuple tuple) Assert(!HEAP_TUPLE_IS_COMPRESSED(tuple->t_data)); - newTuple = (HeapTuple)heaptup_alloc(HEAPTUPLESIZE + tuple->t_len); + newTuple = (HeapTuple)palloc(HEAPTUPLESIZE + tuple->t_len); + newTuple->tupTableType = HEAP_TUPLE; newTuple->t_len = tuple->t_len; newTuple->t_self = tuple->t_self; newTuple->t_tableOid = tuple->t_tableOid; @@ -718,6 +724,11 @@ HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull } else if (att[i]->attlen == -1 && att[i]->attalign == 'd' && att[i]->attndims == 0 && !VARATT_IS_EXTENDED(DatumGetPointer(values[i]))) { values[i] = toast_flatten_tuple_attribute(values[i], att[i]->atttypid, att[i]->atttypmod); + } else if (att[i]->attlen == -1 && att[i]->attalign == 'i' && + VARATT_IS_HUGE_TOAST_POINTER(DatumGetPointer(values[i])) && + !(att[i]->atttypid == CLOBOID || att[i]->atttypid == BLOBOID)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("only suport type(clob/blob) for more than 1G toast"))); } } @@ -744,7 +755,11 @@ HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull * Allocate and zero the space needed. Note that the tuple body and * HeapTupleData management structure are allocated in one chunk. */ - tuple = (HeapTuple)heaptup_alloc(HEAPTUPLESIZE + len); + Size allocSize = HEAPTUPLESIZE + len; + if (tupleDescriptor->tdhasuids) { /* prealloc 8 bytes */ + allocSize = HEAPTUPLESIZE + MAXALIGN(hoff + sizeof(uint64)) + data_len; + } + tuple = (HeapTuple)heaptup_alloc(allocSize); tuple->t_data = td = (HeapTupleHeader)((char *)tuple + HEAPTUPLESIZE); /* @@ -772,6 +787,8 @@ HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull td->t_infomask = HEAP_HASOID; } + td->t_infomask &= ~HEAP_UID_MASK; + heap_fill_tuple(tupleDescriptor, values, isnull, (char *)td + hoff, data_len, &td->t_infomask, (hasnull ? td->t_bits : NULL)); @@ -1071,6 +1088,33 @@ void heap_deformtuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, char static void slot_deform_cmprs_tuple(TupleTableSlot *slot, uint32 natts); +static void deform_next_attribute(bool& slow, long& off, Form_pg_attribute thisatt, char* tp) +{ + if (!slow && thisatt->attcacheoff >= 0) { + off = thisatt->attcacheoff; + } else if (thisatt->attlen == -1) { + /* + * We can only cache the offset for a varlena attribute if the + * offset is already suitably aligned, so that there would be no + * pad bytes in any case: then the offset will be valid for either + * an aligned or unaligned value. + */ + if (!slow && (uintptr_t)(off) == att_align_nominal(off, thisatt->attalign)) { + thisatt->attcacheoff = off; + } else { + off = att_align_pointer(off, thisatt->attalign, -1, tp + off); + slow = true; + } + } else { + /* not varlena, so safe to use att_align_nominal */ + off = att_align_nominal(off, thisatt->attalign); + + if (!slow) { + thisatt->attcacheoff = off; + } + } +} + /* * slot_deform_tuple * Given a TupleTableSlot, extract data from the slot's physical tuple @@ -1134,29 +1178,7 @@ static void slot_deform_tuple(TupleTableSlot *slot, uint32 natts) isnull[attnum] = false; - if (!slow && thisatt->attcacheoff >= 0) { - off = thisatt->attcacheoff; - } else if (thisatt->attlen == -1) { - /* - * We can only cache the offset for a varlena attribute if the - * offset is already suitably aligned, so that there would be no - * pad bytes in any case: then the offset will be valid for either - * an aligned or unaligned value. - */ - if (!slow && (uintptr_t)(off) == att_align_nominal(off, thisatt->attalign)) { - thisatt->attcacheoff = off; - } else { - off = att_align_pointer(off, thisatt->attalign, -1, tp + off); - slow = true; - } - } else { - /* not varlena, so safe to use att_align_nominal */ - off = att_align_nominal(off, thisatt->attalign); - - if (!slow) { - thisatt->attcacheoff = off; - } - } + deform_next_attribute(slow, off, thisatt, tp); values[attnum] = fetchatt(thisatt, tp + off); @@ -1175,6 +1197,66 @@ static void slot_deform_tuple(TupleTableSlot *slot, uint32 natts) slot->tts_slow = slow; } + +static void slot_deform_batch(TupleTableSlot *slot, VectorBatch* batch, int cur_rows, uint32 natts) +{ + HeapTuple tuple = (HeapTuple)slot->tts_tuple; + Assert(tuple->tupTableType == HEAP_TUPLE); + TupleDesc tupleDesc = slot->tts_tupleDescriptor; + HeapTupleHeader tup = tuple->t_data; + bool hasnulls = HeapTupleHasNulls(tuple); + Form_pg_attribute *att = tupleDesc->attrs; + uint32 attnum; + char *tp = NULL; /* ptr to tuple data */ + long off; /* offset in tuple data */ + bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */ + bool slow = false; /* can we use/set attcacheoff? */ + bool heapToUHeap = tupleDesc->tdTableAmType == TAM_USTORE; + + /* + * Check whether the first call for this tuple, and initialize or restore + * loop state. + */ + attnum = 0; + off = 0; + slow = false; + + /* + * Ustore has different alignment rules so we force slow = true here. + * See the comments in heap_deform_tuple() for more information. + */ + slow = heapToUHeap ? true : slow; + + tp = (char *)tup + tup->t_hoff; + + for (; attnum < natts; attnum++) { + Form_pg_attribute thisatt = att[attnum]; + ScalarVector* pVector = &batch->m_arr[attnum]; + + if (hasnulls && att_isnull(attnum, bp)) { + pVector->m_vals[cur_rows] = (Datum)0; + SET_NULL(pVector->m_flag[cur_rows]); + slow = true; /* can't use attcacheoff anymore */ + + /* stole the flag for perf */ + pVector->m_const = true; + continue; + } + + SET_NOTNULL(pVector->m_flag[cur_rows]); + + deform_next_attribute(slow, off, thisatt, tp); + + pVector->m_vals[cur_rows] = fetchatt(thisatt, tp + off); + + off = att_addlength_pointer(off, thisatt->attlen, tp + off); + + if (thisatt->attlen <= 0) { + slow = true; /* can't use attcacheoff anymore */ + } + } +} + #ifdef PGXC /* @@ -1533,6 +1615,50 @@ void heap_slot_getallattrs(TupleTableSlot *slot, bool need_transform_anyarray) slot->tts_nvalid = tdesc_natts; } +static inline int GetAttrNumber(TupleTableSlot* slot, int attnum) +{ + /* Check for caller error */ + if (attnum <= 0 || attnum > slot->tts_tupleDescriptor->natts) { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("invalid attribute number %d", attnum))); + } + + /* internal error */ + if (slot->tts_tuple == NULL) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot extract attribute from empty tuple slot"))); + } + + HeapTuple tuple = (HeapTuple)slot->tts_tuple; + int attno = HeapTupleHeaderGetNatts(tuple->t_data, slot->tts_tupleDescriptor); + attno = Min(attno, attnum); + + return attno; +} + +void heap_slot_formbatch(TupleTableSlot* slot, VectorBatch* batch, int cur_rows, int attnum) +{ + int attno = GetAttrNumber(slot, attnum); + + slot_deform_batch(slot, batch, cur_rows, attno); + + /* If tuple doesn't have all the atts indicated by tupleDesc, read the rest as null */ + for (; attno < attnum; attno++) { + /* get init default value from tupleDesc. + * The original Code is: + * example code: slot->tts_values[attno] = (Datum) 0; + * example code: slot->tts_isnull[attno] = true; + */ + ScalarVector* pVector = &batch->m_arr[attno]; + pVector->m_vals[cur_rows] = heapGetInitDefVal(attno + 1, slot->tts_tupleDescriptor, &slot->tts_isnull[attno]); + if (slot->tts_isnull[attno]) { + pVector->m_const = true; + SET_NULL(pVector->m_flag[cur_rows]); + } else { + SET_NOTNULL(pVector->m_flag[cur_rows]); + } + } +} + /* * heap_slot_getsomeattrs * This function forces the entries of the slot's Datum/isnull @@ -1545,9 +1671,6 @@ void heap_slot_getsomeattrs(TupleTableSlot *slot, int attnum) { Assert(slot->tts_tupslotTableAm == TAM_HEAP); - HeapTuple tuple; - int attno; - /* Quick out if we have 'em all already */ if (slot->tts_nvalid >= attnum) { return; @@ -1561,38 +1684,11 @@ void heap_slot_getsomeattrs(TupleTableSlot *slot, int attnum) } #endif - /* Check for caller error */ - if (attnum <= 0 || attnum > slot->tts_tupleDescriptor->natts) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("invalid attribute number %d", attnum))); - } + int attno = GetAttrNumber(slot, attnum); - /* - * otherwise we had better have a physical tuple (tts_nvalid should equal - * natts in all virtual-tuple cases) - */ - tuple = (HeapTuple)slot->tts_tuple; - /* internal error */ - if (tuple == NULL) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot extract attribute from empty tuple slot"))); - } + slot_deform_tuple(slot, attno); - /* - * load up any slots available from physical tuple - */ - attno = HeapTupleHeaderGetNatts(tuple->t_data, slot->tts_tupleDescriptor); - attno = Min(attno, attnum); - - if (HEAP_TUPLE_IS_COMPRESSED(((HeapTuple)((HeapTuple)slot->tts_tuple))->t_data)) { - slot_deform_cmprs_tuple(slot, attno); - } else { - slot_deform_tuple(slot, attno); - } - - /* - * If tuple doesn't have all the atts indicated by tupleDesc, read the - * rest as null - */ + /* If tuple doesn't have all the atts indicated by tupleDesc, read the rest as null */ for (; attno < attnum; attno++) { /* get init default value from tupleDesc. * The original Code is: @@ -1690,6 +1786,14 @@ void heap_freetuple(HeapTuple htup) pfree(htup); } +void check_column_num(int column_num) +{ + if (column_num > MaxTupleAttributeNumber) { + ereport(ERROR, (errcode(ERRCODE_TOO_MANY_COLUMNS), + errmsg("number of columns (%d) exceeds limit (%d)", column_num, MaxTupleAttributeNumber))); + } +} + /* * heap_form_minimal_tuple * construct a MinimalTuple from the given values[] and isnull[] arrays, @@ -1711,11 +1815,7 @@ MinimalTuple heap_form_minimal_tuple(TupleDesc tupleDescriptor, Datum *values, c int numberOfAttributes = tupleDescriptor->natts; int i; - if (numberOfAttributes > MaxTupleAttributeNumber) { - ereport(ERROR, (errcode(ERRCODE_TOO_MANY_COLUMNS), errmsg("number of columns (%d) exceeds limit (%d)", - numberOfAttributes, MaxTupleAttributeNumber))); - } - + check_column_num(numberOfAttributes); /* * Check for nulls and embedded tuples; expand any toasted attributes in * embedded tuples. This preserves the invariant that toasting can only @@ -1733,6 +1833,10 @@ MinimalTuple heap_form_minimal_tuple(TupleDesc tupleDescriptor, Datum *values, c } else if (att[i]->attlen == -1 && att[i]->attalign == 'd' && att[i]->attndims == 0 && !VARATT_IS_EXTENDED(values[i])) { values[i] = toast_flatten_tuple_attribute(values[i], att[i]->atttypid, att[i]->atttypmod); + } else if (att[i]->attlen == -1 && VARATT_IS_HUGE_TOAST_POINTER(DatumGetPointer(values[i])) && + !(att[i]->atttypid == CLOBOID || att[i]->atttypid == BLOBOID)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("only suport type(clob/blob) for more than 1G toast"))); } } @@ -1755,16 +1859,21 @@ MinimalTuple heap_form_minimal_tuple(TupleDesc tupleDescriptor, Datum *values, c len += data_len; + Size allocSize = len; + if (tupleDescriptor->tdhasuids) { /* prealloc 8 bytes */ + allocSize = MAXALIGN(hoff + sizeof(uint64)) + data_len; + } + /* * Allocate and zero the space needed. */ if (inTuple == NULL) { - tuple = (MinimalTuple)palloc0(len); + tuple = (MinimalTuple)palloc0(allocSize); } else { - if (inTuple->t_len < len) { + if (inTuple->t_len < allocSize) { pfree(inTuple); inTuple = NULL; - tuple = (MinimalTuple)palloc0(len); + tuple = (MinimalTuple)palloc0(allocSize); } else { errno_t rc = memset_s(inTuple, inTuple->t_len, 0, inTuple->t_len); securec_check(rc, "\0", "\0"); @@ -1782,6 +1891,8 @@ MinimalTuple heap_form_minimal_tuple(TupleDesc tupleDescriptor, Datum *values, c tuple->t_infomask = HEAP_HASOID; } + tuple->t_infomask &= ~HEAP_UID_MASK; + heap_fill_tuple(tupleDescriptor, values, isnull, (char *)tuple + hoff, data_len, &tuple->t_infomask, (hasnull ? tuple->t_bits : NULL)); @@ -2479,7 +2590,11 @@ HeapTuple heap_form_cmprs_tuple(TupleDesc tupleDescriptor, FormCmprTupleData *cm * Allocate and zero the space needed. Note that the tuple body and * HeapTupleData management structure are allocated in one chunk. */ - tuple = (HeapTuple)heaptup_alloc(HEAPTUPLESIZE + len); + Size allocSize = HEAPTUPLESIZE + len; + if (tupleDescriptor->tdhasuids) { /* prealloc 8 bytes */ + allocSize = HEAPTUPLESIZE + MAXALIGN(hoff + sizeof(uint64)) + data_len; + } + tuple = (HeapTuple)heaptup_alloc(allocSize); tuple->t_data = td = (HeapTupleHeader)((char*)tuple + HEAPTUPLESIZE); /* @@ -2502,6 +2617,8 @@ HeapTuple heap_form_cmprs_tuple(TupleDesc tupleDescriptor, FormCmprTupleData *cm HeapTupleHeaderSetNatts(td, numberOfAttributes); td->t_hoff = hoff; + td->t_infomask &= ~HEAP_UID_MASK; + /* else leave infomask = 0 */ if (tupleDescriptor->tdhasoid) { td->t_infomask = HEAP_HASOID; @@ -2712,7 +2829,7 @@ static HeapTuple HeapCopyInitdefvalTup(HeapTuple tuple, TupleDesc tupDesc) * * --------------------------------------------------------------------- */ -HeapTuple heapCopyTuple(HeapTuple tuple, TupleDesc tupDesc, Page page) +FORCE_INLINE HeapTuple heapCopyTuple(HeapTuple tuple, TupleDesc tupDesc, Page page) { if (!HeapTupleIsValid(tuple) || tuple->t_data == NULL) { ereport(WARNING, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), @@ -2724,7 +2841,7 @@ HeapTuple heapCopyTuple(HeapTuple tuple, TupleDesc tupDesc, Page page) return heapCopyCompressedTuple(tuple, tupDesc, page); } - if (tupDesc->initdefvals && tupDesc->natts > (int)HeapTupleHeaderGetNatts(tuple->t_data, tupDesc)) { + if (unlikely(tupDesc->initdefvals && tupDesc->natts > (int)HeapTupleHeaderGetNatts(tuple->t_data, tupDesc))) { return HeapCopyInitdefvalTup(tuple, tupDesc); } @@ -2930,7 +3047,6 @@ void heap_slot_materialize(TupleTableSlot *slot) if (BufferIsValid(slot->tts_buffer)) { ReleaseBuffer(slot->tts_buffer); } - slot->tts_buffer = InvalidBuffer; /* @@ -3176,7 +3292,7 @@ HeapTuple heap_slot_copy_heap_tuple(TupleTableSlot *slot) if (TTS_HAS_PHYSICAL_TUPLE(slot)) { return heapCopyTuple((HeapTuple)slot->tts_tuple, slot->tts_tupleDescriptor, - (BufferIsValid(slot->tts_buffer) ? BufferGetPage(slot->tts_buffer) : NULL)); + NULL); } if (slot->tts_mintuple != NULL) { return heap_tuple_from_minimal_tuple(slot->tts_mintuple); @@ -3203,7 +3319,7 @@ HeapTuple heap_slot_copy_heap_tuple(TupleTableSlot *slot) * @param slot: slot to store tuple. * @param: should_free true if clear the slot's tuple contents by pfree_ext() during ExecClearTuple. */ -void heap_slot_store_heap_tuple(HeapTuple tuple, TupleTableSlot* slot, Buffer buffer, bool should_free) +void heap_slot_store_heap_tuple(HeapTuple tuple, TupleTableSlot* slot, Buffer buffer, bool should_free, bool batchMode) { /* * sanity checks @@ -3225,6 +3341,7 @@ void heap_slot_store_heap_tuple(HeapTuple tuple, TupleTableSlot* slot, Buffer bu if (slot->tts_shouldFreeMin) { heap_free_minimal_tuple(slot->tts_mintuple); } +#ifdef ENABLE_MULTIPLE_NODES #ifdef PGXC if (slot->tts_shouldFreeRow) { pfree_ext(slot->tts_dataRow); @@ -3233,12 +3350,16 @@ void heap_slot_store_heap_tuple(HeapTuple tuple, TupleTableSlot* slot, Buffer bu slot->tts_dataRow = NULL; slot->tts_dataLen = -1; - // Row uncompression use slot->tts_per_tuple_mcxt in some case, So we need - // reset memory context. this memory context is introduced by PGXC and it only used - // in function 'slot_deform_datarow'. PGXC also do reset in function 'FetchTuple'. - // So it is safe - // - ResetSlotPerTupleContext(slot); + /* Batch Mode only first tuple need reset context */ + if (!batchMode) { + /* + * Row uncompression use slot->tts_per_tuple_mcxt in some case, So we need reset memory context. + * this memory context is introduced by PGXC and it only used in function 'slot_deform_datarow'. + * PGXC also do reset in function 'FetchTuple'. So it is safe + */ + ResetSlotPerTupleContext(slot); + } +#endif #endif /* @@ -3261,8 +3382,10 @@ void heap_slot_store_heap_tuple(HeapTuple tuple, TupleTableSlot* slot, Buffer bu * tuple on the same disk page: in that case releasing and re-acquiring * the pin is a waste of cycles. This is a common situation during * seqscans, so it's worth troubling over. + * + * Batch Mode only first tuple need do buffer reference. */ - if (slot->tts_buffer != buffer) { + if (!batchMode && slot->tts_buffer != buffer) { if (BufferIsValid(slot->tts_buffer)) { ReleaseBuffer(slot->tts_buffer); } @@ -3330,3 +3453,31 @@ void HeapCopyTupleNoAlloc(HeapTuple dest, HeapTuple src) errno_t errorNo = memcpy_s((char *) dest->t_data, src->t_len, (char *) src->t_data, src->t_len); securec_check(errorNo, "\0", "\0"); } + +uint64 HeapTupleGetUid(HeapTuple tup) +{ + HeapTupleHeader tupHeader = tup->t_data; + if (!HeapTupleHeaderHasUid(tupHeader)) { + return 0; + } + return *((uint64*)((char*)(tupHeader) + tupHeader->t_hoff - sizeof(uint64))); +} +void HeapTupleSetUid(HeapTuple tup, uint64 uid, int nattrs) +{ + /* catalog table not supportted uids */ + Assert(!(tup->t_data->t_infomask & HEAP_HASOID)); + int uidLen = GetUidByteLen(uid); + errno_t rc = 0; + Size len = offsetof(HeapTupleHeaderData, t_bits); + Size data_len = tup->t_len - tup->t_data->t_hoff; + len += HeapTupleHasNulls(tup) ? BITMAPLEN(nattrs) : 0; + int hoff = MAXALIGN(len + uidLen); + rc = memmove_s((char*)tup->t_data + hoff, data_len, (char*)tup->t_data + tup->t_data->t_hoff, data_len); + securec_check(rc, "", ""); + tup->t_data->t_hoff = hoff; + tup->t_data->t_infomask |= GetUidByteLenInfomask(uid); + tup->t_len = hoff + data_len; + HeapTupleHeaderSetDatumLength(tup->t_data, hoff + data_len); + HeapTupleHeaderSetUid(tup->t_data, uid, uidLen); +} + diff --git a/src/gausskernel/storage/access/common/indextuple.cpp b/src/gausskernel/storage/access/common/indextuple.cpp index 146fd33fc..e3f4cf93d 100644 --- a/src/gausskernel/storage/access/common/indextuple.cpp +++ b/src/gausskernel/storage/access/common/indextuple.cpp @@ -127,6 +127,7 @@ IndexTuple index_form_tuple(TupleDesc tuple_descriptor, Datum* values, const boo * on outside storage. This should be improved someday. */ Pointer val = DatumGetPointer(values[i]); + checkHugeToastPointer((varlena *)val); if (VARATT_IS_EXTERNAL(val)) { untoasted_values[i] = PointerGetDatum(heap_tuple_fetch_attr((struct varlena*)DatumGetPointer(values[i]))); untoasted_free[i] = true; diff --git a/src/gausskernel/storage/access/common/printtup.cpp b/src/gausskernel/storage/access/common/printtup.cpp index 8e83edc70..1c8e61406 100644 --- a/src/gausskernel/storage/access/common/printtup.cpp +++ b/src/gausskernel/storage/access/common/printtup.cpp @@ -44,6 +44,10 @@ static void printtup_destroy(DestReceiver *self); static void SendRowDescriptionCols_2(StringInfo buf, TupleDesc typeinfo, List *targetlist, int16 *formats); static void SendRowDescriptionCols_3(StringInfo buf, TupleDesc typeinfo, List *targetlist, int16 *formats); +static void writeString(StringInfo buf, const char *name, bool isWrite); +#ifndef ENABLE_MULTIPLE_NODES +static bool checkNeedUpperAndToUpper(char *dest, const char *source); +#endif /* for stream send function */ static void printBroadCastTuple(TupleTableSlot *tuple, DestReceiver *self); @@ -547,7 +551,8 @@ static void SendRowDescriptionCols_3(StringInfo buf, TupleDesc typeinfo, List *t if (IsClientLogicType(atttypid) && atttypmod == -1) { elog(DEBUG1, "client logic without original type is sent to client"); } - pq_writestring(buf, NameStr(attrs[i]->attname)); + + writeString(buf, NameStr(attrs[i]->attname), true); #ifdef PGXC /* @@ -618,7 +623,7 @@ static void SendRowDescriptionCols_2(StringInfo buf, TupleDesc typeinfo, List *t Oid atttypid = attrs[i]->atttypid; int32 atttypmod = attrs[i]->atttypmod; - pq_sendstring(buf, NameStr(attrs[i]->attname)); + writeString(buf, NameStr(attrs[i]->attname), false); #ifdef PGXC /* @@ -650,6 +655,65 @@ static void SendRowDescriptionCols_2(StringInfo buf, TupleDesc typeinfo, List *t } } +/* + * Using pq_writestring in SendRowDescriptionCols_3 and pq_sendstring in SendRowDescriptionCols_2. + */ +static void writeString(StringInfo buf, const char *name, bool isWrite) +{ + char *res = (char *)name; + +#ifndef ENABLE_MULTIPLE_NODES + /* + * Uppercasing attribute name only works in ORA compatibility mode and centralized environment. + * If the letters is all lowercase, return the result after converting to uppercase. + */ + char objectNameUppercase[NAMEDATALEN] = {'\0'}; + if (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && u_sess->attr.attr_sql.uppercase_attribute_name && + checkNeedUpperAndToUpper(objectNameUppercase, name)) { + res = objectNameUppercase; + } +#endif + + if (likely(isWrite)) { + pq_writestring(buf, res); + } else { + pq_sendstring(buf, res); + } +} + +#ifndef ENABLE_MULTIPLE_NODES +/* + * Check whether the letters is all lowercase. If yes, then needUpper is true. + * Use dest to save the result after converting to uppercase. + */ +static bool checkNeedUpperAndToUpper(char *dest, const char *source) +{ + size_t i = 0; + bool needUpper = true; + while (*source != '\0') { + int mblen = pg_mblen(source); + /* + * If mblen == 1, then need to further determine whether this single-byte character is an uppercase letter. + * Otherwise, copy directly from source to dest. + */ + if (mblen == 1) { + /* this single-byte character is an uppercase letter, do not need upper. */ + if (unlikely(isupper(*source))) { + needUpper = false; + break; + } + dest[i++] = toupper(*source++); + } else { + for (int j = 0; j < mblen; j++) { + dest[i++] = *source++; + } + } + } + dest[i] = '\0'; + return needUpper; +} +#endif + /* * Get the lookup info that printtup() needs */ @@ -1038,19 +1102,8 @@ void printtup(TupleTableSlot *slot, DestReceiver *self) if (thisState->format == 0) { /* Text output */ char *outputstr = NULL; - u_sess->attr.attr_sql.for_print_tuple = true; - PG_TRY(); - { - outputstr = OutputFunctionCall(&thisState->finfo, attr); - } - PG_CATCH(); - { - u_sess->attr.attr_sql.for_print_tuple = false; - PG_RE_THROW(); - } - PG_END_TRY(); - u_sess->attr.attr_sql.for_print_tuple = false; + outputstr = OutputFunctionCall(&thisState->finfo, attr); if (thisState->typisvarlena && self->forAnalyzeSampleTuple && (typeinfo->attrs[i]->atttypid == BYTEAOID || typeinfo->attrs[i]->atttypid == CHAROID || typeinfo->attrs[i]->atttypid == TEXTOID || typeinfo->attrs[i]->atttypid == BLOBOID || diff --git a/src/gausskernel/storage/access/common/reloptions.cpp b/src/gausskernel/storage/access/common/reloptions.cpp index 0c827561e..0de331552 100644 --- a/src/gausskernel/storage/access/common/reloptions.cpp +++ b/src/gausskernel/storage/access/common/reloptions.cpp @@ -86,20 +86,17 @@ static relopt_bool boolRelOpts[] = { {{ "autovacuum_enabled", "Enables autovacuum in this relation", RELOPT_KIND_HEAP | RELOPT_KIND_TOAST }, true }, {{ "user_catalog_table", "Declare a table as an additional catalog table, e.g. for the purpose of logical replication", - RELOPT_KIND_HEAP }, - false }, + RELOPT_KIND_HEAP }, false }, {{ "fastupdate", "Enables \"fast update\" feature for this GIN index", RELOPT_KIND_GIN }, true }, {{ "security_barrier", "View acts as a row security barrier", RELOPT_KIND_VIEW }, false }, {{ "enable_rowsecurity", "Enable row level security or not", RELOPT_KIND_HEAP }, false }, {{ "force_rowsecurity", "Row security forced for owners or not", RELOPT_KIND_HEAP }, false }, {{"enable_tsdb_delta", "Enables delta table for this timeseries relation", RELOPT_KIND_HEAP}, false}, {{ "punctuation_ignore", "Ignore punctuation in zhparser/N-gram text search praser", - RELOPT_KIND_ZHPARSER | RELOPT_KIND_NPARSER }, - true }, + RELOPT_KIND_ZHPARSER | RELOPT_KIND_NPARSER }, true }, {{ "grapsymbol_ignore", "ignore grapsymbol in N-gram text search praser", RELOPT_KIND_NPARSER }, false }, {{ "seg_with_duality", "segmente interfacing idle words with duality in zhparser text search praser", - RELOPT_KIND_ZHPARSER }, - false }, + RELOPT_KIND_ZHPARSER }, false }, {{ "multi_short", "segmente long words to short words in zhparser text search praser", RELOPT_KIND_ZHPARSER }, true }, {{ "multi_duality", "segmente long words with duality in zhparser text search praser", RELOPT_KIND_ZHPARSER }, @@ -115,10 +112,7 @@ static relopt_bool boolRelOpts[] = { {{ "on_commit_delete_rows", "global temp table on commit options", RELOPT_KIND_HEAP}, true}, {{ "crossbucket", "Enables cross bucket index creation in this index relation", RELOPT_KIND_BTREE}, false }, {{ "enable_tde", "enable table's level transparent data encryption", RELOPT_KIND_HEAP }, false }, - {{ "compress_byte_convert", "Whether do byte convert in compression", RELOPT_KIND_HEAP | RELOPT_KIND_BTREE}, - false }, - {{ "compress_diff_convert", "Whether do diiffer convert in compression", RELOPT_KIND_HEAP | RELOPT_KIND_BTREE}, - false }, + {{ "hasuids", "Enables uids in this relation", RELOPT_KIND_HEAP }, false }, /* list terminator */ {{NULL}} }; @@ -228,7 +222,7 @@ static relopt_int intRelOpts[] = { }, {{ "rel_cn_oid", "rel oid on coordinator", RELOPT_KIND_HEAP }, 0, 0, 2000000000 }, - + {{ "exec_step", "redis exec step", RELOPT_KIND_HEAP }, 0, 1, 4 }, {{ "init_td", "number of td slots", RELOPT_KIND_HEAP }, UHEAP_DEFAULT_TD, UHEAP_MIN_TD, UHEAP_MAX_TD }, {{ "bucketcnt", "number of bucket map counts", RELOPT_KIND_HEAP }, 0, 32, 16384 }, { @@ -237,18 +231,8 @@ static relopt_int intRelOpts[] = { "Number of parallel processes that can be used per executor node for this relation.", RELOPT_KIND_HEAP, }, - -1, 0, 32 + 0, 1, 32 }, - {{ "compress_level", "Level of page compression.", RELOPT_KIND_HEAP | RELOPT_KIND_BTREE}, 0, -31, 31}, - {{ "compresstype", "compress type (none, pglz or zstd).", RELOPT_KIND_HEAP | RELOPT_KIND_BTREE}, 0, 0, 2}, - {{ "compress_chunk_size", "Size of chunk to store compressed page.", RELOPT_KIND_HEAP | RELOPT_KIND_BTREE}, - BLCKSZ / 2, - BLCKSZ / 16, - BLCKSZ / 2}, - {{ "compress_prealloc_chunks", "Number of prealloced chunks for each block.", RELOPT_KIND_HEAP | RELOPT_KIND_BTREE}, - 0, - 0, - 7}, /* list terminator */ {{NULL}} }; @@ -269,6 +253,11 @@ static relopt_int64 int64RelOpts[] = { INT64CONST(-1), INT64CONST(0), INT64CONST(2000000000) }, + {{ "create_time", "redis tmp table create time", + RELOPT_KIND_HEAP }, + INT64CONST(0), + INT64CONST(1), + INT64CONST(INT64_MAX) }, /* list terminator */ {{NULL}} }; @@ -1597,8 +1586,13 @@ void fillTdeRelOptions(List *options, char relkind) * @Param[IN] options: input user options. * @See also: */ -void RowTblCheckCompressionOption(List *options) +void RowTblCheckCompressionOption(List *options, int8 rowCompress) { + if (IsCompressedByCmprsInPgclass((RelCompressType) rowCompress)) { + ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), + errmsg("row-oriented table does not support compression"))); + } + ListCell *opt = NULL; if (options == NULL) { @@ -1761,7 +1755,8 @@ void ForbidToSetOptionsForColTbl(List *options) "enable_tde", "encrypt_algo", "dek_cipher", - "cmk_id" + "cmk_id", + "hasuids" }; ForbidUserToSetUnsupportedOptions(options, unsupported, lengthof(unsupported), "column relation"); @@ -1806,7 +1801,8 @@ void ForbidToSetOptionsForUstoreTbl(List *options) "enable_tde", "dek_cipher", "cmk_id", - "encrypt_algo" + "encrypt_algo", + "hasuids" }; ForbidUserToSetUnsupportedOptions(options, unsupported, lengthof(unsupported), "ustore relation"); @@ -1832,7 +1828,8 @@ void forbid_to_set_options_for_timeseries_tbl(List *options) "max_batchrow", "deltarow_threshold", "partial_cluster_rows", - "compresslevel" + "compresslevel", + "hasuids" }; ForbidUserToSetUnsupportedOptions(options, unsupported, lengthof(unsupported), "timeseries relation"); @@ -1865,7 +1862,8 @@ void ForbidToSetOptionsForPSort(List *options) "enable_tsdb_delta", "tsdb_deltamerge_interval", "tsdb_deltamerge_threshold", - "tsdb_deltainsert_threshold" + "tsdb_deltainsert_threshold", + "hasuids" }; ForbidUserToSetUnsupportedOptions(options, unsupported, lengthof(unsupported), "psort index"); @@ -1930,6 +1928,8 @@ bytea *default_reloptions(Datum reloptions, bool validate, relopt_kind kind) { "append_mode", RELOPT_TYPE_STRING, offsetof(StdRdOptions, append_mode) }, { "merge_list", RELOPT_TYPE_STRING, offsetof(StdRdOptions, merge_list) }, { "rel_cn_oid", RELOPT_TYPE_INT, offsetof(StdRdOptions, rel_cn_oid) }, + { "exec_step", RELOPT_TYPE_INT, offsetof(StdRdOptions, exec_step) }, + { "create_time", RELOPT_TYPE_INT64, offsetof(StdRdOptions, create_time) }, { "init_td", RELOPT_TYPE_INT, offsetof(StdRdOptions, initTd) }, { "append_mode_internal", RELOPT_TYPE_INT, offsetof(StdRdOptions, append_mode_internal) }, { "start_ctid_internal", RELOPT_TYPE_STRING, offsetof(StdRdOptions, start_ctid_internal) }, @@ -1948,18 +1948,7 @@ bytea *default_reloptions(Datum reloptions, bool validate, relopt_kind kind) { "cmk_id", RELOPT_TYPE_STRING, offsetof(StdRdOptions, cmk_id)}, { "encrypt_algo", RELOPT_TYPE_STRING, offsetof(StdRdOptions, encrypt_algo)}, { "enable_tde", RELOPT_TYPE_BOOL, offsetof(StdRdOptions, enable_tde)}, - { "compresstype", RELOPT_TYPE_INT, - offsetof(StdRdOptions, compress) + offsetof(PageCompressOpts, compressType)}, - { "compress_level", RELOPT_TYPE_INT, - offsetof(StdRdOptions, compress) + offsetof(PageCompressOpts, compressLevel)}, - { "compress_chunk_size", RELOPT_TYPE_INT, - offsetof(StdRdOptions, compress) + offsetof(PageCompressOpts, compressChunkSize)}, - {"compress_prealloc_chunks", RELOPT_TYPE_INT, - offsetof(StdRdOptions, compress) + offsetof(PageCompressOpts, compressPreallocChunks)}, - { "compress_byte_convert", RELOPT_TYPE_BOOL, - offsetof(StdRdOptions, compress) + offsetof(PageCompressOpts, compressByteConvert)}, - { "compress_diff_convert", RELOPT_TYPE_BOOL, - offsetof(StdRdOptions, compress) + offsetof(PageCompressOpts, compressDiffConvert)}, + { "hasuids", RELOPT_TYPE_BOOL, offsetof(StdRdOptions, hasuids) } }; options = parseRelOptions(reloptions, validate, kind, &numoptions); @@ -2594,7 +2583,8 @@ int8 heaprel_get_compresslevel_from_modes(int16 modes) void ForbidUserToSetDefinedOptions(List *options) { /* the following option must be in tab[] of default_reloptions(). */ - static const char *unchangedOpt[] = {"orientation", "hashbucket", "bucketcnt", "segment", "encrypt_algo"}; + static const char *unchangedOpt[] = {"orientation", "hashbucket", "bucketcnt", "segment", "encrypt_algo", + "storage_type"}; int firstInvalidOpt = -1; if (FindInvalidOption(options, unchangedOpt, lengthof(unchangedOpt), &firstInvalidOpt)) { @@ -2604,25 +2594,6 @@ void ForbidUserToSetDefinedOptions(List *options) } } -/* - * @Description: compressed parameter cannot be changed by ALTER TABLE statement if table is uncompressed table. - * this function do the checking work. - * @Param[IN] options: input user options - * @See also: - */ -void ForbidUserToSetCompressedOptions(List *options) -{ - static const char *unSupportOptions[] = {"compresstype", "compress_chunk_size", "compress_prealloc_chunks", - "compress_level", "compress_byte_convert", "compress_diff_convert"}; - int firstInvalidOpt = -1; - if (FindInvalidOption(options, unSupportOptions, lengthof(unSupportOptions), &firstInvalidOpt)) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - (errmsg("Un-support feature"), errdetail("Option \"%s\" doesn't allow ALTER on uncompressed table", - unSupportOptions[firstInvalidOpt])))); - } -} - /* * @Description: forbid to change inner option * inner options only can be used by system itself. @@ -2648,7 +2619,7 @@ void ForbidOutUsersToSetInnerOptions(List *userOptions) void ForbidUserToSetDefinedIndexOptions(List *options) { /* the following option must be in tab[] of default_reloptions(). */ - static const char *unchangedOpt[] = {"crossbucket"}; + static const char *unchangedOpt[] = {"crossbucket", "storage_type"}; int firstInvalidOpt = -1; if (FindInvalidOption(options, unchangedOpt, lengthof(unchangedOpt), &firstInvalidOpt)) { @@ -2895,6 +2866,7 @@ bool get_crossbucket_option(List **options_ptr, bool stmtoptgpi, char *accessmet return ((res <= 0) ? false : true); } + bool is_contain_crossbucket(List *defList) { ListCell *lc = NULL; @@ -2916,33 +2888,3 @@ bool is_cstore_option(char relkind, Datum reloptions) pfree_ext(std_opt); return result; } - -void SetOneOfCompressOption(const char* defname, TableCreateSupport* tableCreateSupport) -{ - if (pg_strcasecmp(defname, "compresstype") == 0) { - tableCreateSupport->compressType = true; - } else if (pg_strcasecmp(defname, "compress_chunk_size") == 0) { - tableCreateSupport->compressChunkSize = true; - } else if (pg_strcasecmp(defname, "compress_prealloc_chunks") == 0) { - tableCreateSupport->compressPreAllocChunks = true; - } else if (pg_strcasecmp(defname, "compress_level") == 0) { - tableCreateSupport->compressLevel = true; - } else if (pg_strcasecmp(defname, "compress_byte_convert") == 0) { - tableCreateSupport->compressByteConvert = true; - } else if (pg_strcasecmp(defname, "compress_diff_convert") == 0) { - tableCreateSupport->compressDiffConvert = true; - } -} - -void CheckCompressOption(TableCreateSupport *tableCreateSupport) -{ - if (!tableCreateSupport->compressType && HasCompressOption(tableCreateSupport)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), - errmsg("compress_chunk_size/compress_prealloc_chunks/compress_level/compress_byte_convert/" - "compress_diff_convert should be used with compresstype."))); - } - if (!tableCreateSupport->compressByteConvert && tableCreateSupport->compressDiffConvert) { - ereport(ERROR, (errcode(ERRCODE_INVALID_OPTION), - errmsg("compress_diff_convert should be used with compress_byte_convert."))); - } -} \ No newline at end of file diff --git a/src/gausskernel/storage/access/common/tupconvert.cpp b/src/gausskernel/storage/access/common/tupconvert.cpp index 1837114e2..f451a3fcc 100644 --- a/src/gausskernel/storage/access/common/tupconvert.cpp +++ b/src/gausskernel/storage/access/common/tupconvert.cpp @@ -74,9 +74,6 @@ TupleConversionMap *convert_tuples_by_position(TupleDesc indesc, TupleDesc outde int i; int j; bool same = false; - Datum *all_types_orig = NULL; /* original data types for gs_encrypted_proc */ - Datum *all_types = NULL; /* will be used for replace data types in pg_proc */ - bool gs_encrypted_proc_was_created = false; /* Verify compatibility and prepare attribute-number map */ n = outdesc->natts; attrMap = (AttrNumber *)palloc0(n * sizeof(AttrNumber)); @@ -98,30 +95,13 @@ TupleConversionMap *convert_tuples_by_position(TupleDesc indesc, TupleDesc outde if (att->attisdropped) continue; nincols++; - if (gs_encrypted_proc_was_created && !IsClientLogicType(att->atttypid)) { - all_types_orig[j] = -1; - all_types[j] = ObjectIdGetDatum(atttypid); - } - if (IsClientLogicType(att->atttypid)) { - if (!IsClientLogicType(atttypid)) { - /* if we found on call phase that the in desc is client logic but out desc not */ - if (!gs_encrypted_proc_was_created) { - all_types_orig = (Datum*)palloc(n * sizeof(Datum)); - all_types = (Datum*)palloc(n * sizeof(Datum)); - for (int k = 0; k < j; k++) { - all_types_orig[k] = -1; - all_types[k] = ObjectIdGetDatum(outdesc->attrs[k]->atttypid); - } - gs_encrypted_proc_was_created = true; - } - all_types_orig[j] = ObjectIdGetDatum(atttypid); - all_types[j] = ObjectIdGetDatum(att->atttypid); - } - } else if (atttypid != att->atttypid || (atttypmod != att->atttypmod && atttypmod >= 0)) + if (!IsClientLogicType(att->atttypid) && + (atttypid != att->atttypid || (atttypmod != att->atttypmod && atttypmod >= 0))) { ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg_internal("%s", _(msg)), errdetail("Returned type %s does not match expected type %s in column %d.", format_type_with_typemod(att->atttypid, att->atttypmod), format_type_with_typemod(atttypid, atttypmod), noutcols))); + } attrMap[i] = (AttrNumber)(j + 1); j++; break; @@ -171,12 +151,6 @@ TupleConversionMap *convert_tuples_by_position(TupleDesc indesc, TupleDesc outde } else same = false; - if (gs_encrypted_proc_was_created) { - add_allargtypes_orig(func_id, all_types_orig, all_types, n); - ereport(WARNING, (errcode(ERRCODE_CL_FUNCTION_UPDATE), errmsg_internal("%s", _(msg)), - errdetail("it happend during to client logic updating catalog table. if the results looks wrong please try " - "once again"))); - } if (same) { /* Runtime conversion is not needed */ pfree(attrMap); diff --git a/src/gausskernel/storage/access/common/tupdesc.cpp b/src/gausskernel/storage/access/common/tupdesc.cpp index edd705d6e..2a61c5c70 100644 --- a/src/gausskernel/storage/access/common/tupdesc.cpp +++ b/src/gausskernel/storage/access/common/tupdesc.cpp @@ -96,6 +96,7 @@ TupleDesc CreateTemplateTupleDesc(int natts, bool hasoid, TableAmType tam) desc->tdhasoid = hasoid; desc->tdrefcount = -1; /* assume not reference-counted */ desc->initdefvals = NULL; /* initialize the attrinitdefvals */ + desc->tdhasuids = false; desc->tdisredistable = false; desc->tdTableAmType = tam; @@ -132,6 +133,7 @@ TupleDesc CreateTupleDesc(int natts, bool hasoid, Form_pg_attribute* attrs, Tabl desc->tdrefcount = -1; /* assume not reference-counted */ desc->initdefvals = NULL; /* initialize the attrinitdefvals */ desc->tdisredistable = false; + desc->tdhasuids = false; desc->tdTableAmType = tam; return desc; @@ -142,7 +144,7 @@ TupleDesc CreateTupleDesc(int natts, bool hasoid, Form_pg_attribute* attrs, Tabl * This function creates a new TupInitDefVal by copying from an existing * TupInitDefVal. */ -static TupInitDefVal *tupInitDefValCopy(TupInitDefVal *pInitDefVal, int nAttr) +TupInitDefVal *tupInitDefValCopy(TupInitDefVal *pInitDefVal, int nAttr) { TupInitDefVal *dvals = (TupInitDefVal *)palloc(nAttr * sizeof(TupInitDefVal)); for (int i = 0; i < nAttr; ++i) { @@ -184,6 +186,7 @@ TupleDesc CreateTupleDescCopy(TupleDesc tupdesc) desc->tdtypeid = tupdesc->tdtypeid; desc->tdtypmod = tupdesc->tdtypmod; desc->tdisredistable = tupdesc->tdisredistable; + desc->tdhasuids = tupdesc->tdhasuids; /* copy the attinitdefval */ if (tupdesc->initdefvals) { @@ -197,7 +200,7 @@ TupleDesc CreateTupleDescCopy(TupleDesc tupdesc) * TupleConstrCopy * This function creates a new TupleConstr by copying from an existing TupleConstr. */ -static TupleConstr *TupleConstrCopy(const TupleDesc tupdesc) +TupleConstr *TupleConstrCopy(const TupleDesc tupdesc) { TupleConstr *constr = tupdesc->constr; TupleConstr *cpy = (TupleConstr *)palloc0(sizeof(TupleConstr)); @@ -318,6 +321,7 @@ void FreeTupleDesc(TupleDesc tupdesc) } pfree(check); } + pfree_ext(tupdesc->constr->clusterKeys); pfree(tupdesc->constr); tupdesc->constr = NULL; } @@ -914,7 +918,7 @@ static void BlockColumnRelOption(const char *tableFormat, const Oid atttypid, co { if (((pg_strcasecmp(ORIENTATION_COLUMN, tableFormat)) == 0 || (pg_strcasecmp(ORIENTATION_TIMESERIES, tableFormat)) == 0) && - !IsTypeSupportedByCStore(atttypid, atttypmod)) { + !IsTypeSupportedByCStore(atttypid)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("type \"%s\" is not supported in column store", format_type_with_typemod(atttypid, atttypmod)))); } diff --git a/src/gausskernel/storage/access/dfs/CMakeLists.txt b/src/gausskernel/storage/access/dfs/CMakeLists.txt index 2d0f6653f..e02daa249 100755 --- a/src/gausskernel/storage/access/dfs/CMakeLists.txt +++ b/src/gausskernel/storage/access/dfs/CMakeLists.txt @@ -39,17 +39,22 @@ add_static_objtarget(gausskernel_storage_access_dfs TGT_dfs_SRC TGT_dfs_INC "${d set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/carbondata ${CMAKE_CURRENT_SOURCE_DIR}/csv - ${CMAKE_CURRENT_SOURCE_DIR}/orc - ${CMAKE_CURRENT_SOURCE_DIR}/parquet ${CMAKE_CURRENT_SOURCE_DIR}/text ) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND CMAKE_MODULE_PATH + ${CMAKE_CURRENT_SOURCE_DIR}/orc + ${CMAKE_CURRENT_SOURCE_DIR}/parquet + ) + add_subdirectory(orc) + add_subdirectory(parquet) +endif() + if("${ENABLE_MULTIPLE_NODES}" STREQUAL "ON") add_subdirectory(carbondata) endif() add_subdirectory(csv) -add_subdirectory(orc) -add_subdirectory(parquet) add_subdirectory(text) install(PROGRAMS ${CMAKE_CURRENT_SOURCE_DIR}/region_map diff --git a/src/gausskernel/storage/access/dfs/Makefile b/src/gausskernel/storage/access/dfs/Makefile index dbe91a85a..f007ad851 100644 --- a/src/gausskernel/storage/access/dfs/Makefile +++ b/src/gausskernel/storage/access/dfs/Makefile @@ -25,7 +25,11 @@ subdir = src/gausskernel/storage/access/dfs top_builddir = ../../../../.. include $(top_builddir)/src/Makefile.global +ifeq ($(enable_lite_mode), no) SUBDIRS = orc text csv parquet +else +SUBDIRS = text csv +endif ifeq ($(enable_multiple_nodes), yes) SUBDIRS += carbondata diff --git a/src/gausskernel/storage/access/dfs/carbondata/carbondata_column_reader.cpp b/src/gausskernel/storage/access/dfs/carbondata/carbondata_column_reader.cpp index ae5256541..3de3a0082 100644 --- a/src/gausskernel/storage/access/dfs/carbondata/carbondata_column_reader.cpp +++ b/src/gausskernel/storage/access/dfs/carbondata/carbondata_column_reader.cpp @@ -1456,7 +1456,11 @@ void CarbondataColumnReaderImpl::PredicateFilterByte(uint64_t numVal isSelected[i] = HdfsPredicateCheckNull(m_predicate); } else if (m_checkPredicateOnRow) { int64_t tmpValue = (int64_t)pData[rowId]; +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueIntForLlvm(tmpValue, m_predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(tmpValue, m_predicate); +#endif } } } @@ -1475,7 +1479,11 @@ void CarbondataColumnReaderImpl::PredicateFilterBoolean(uint64_t num if (isNullValue(rowId)) { isSelected[i] = HdfsPredicateCheckNull(m_predicate); } else if (m_checkPredicateOnRow) { +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueIntForLlvm(tmpData[rowId], m_predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(tmpData[rowId], m_predicate); +#endif } } } @@ -1495,7 +1503,11 @@ void CarbondataColumnReaderImpl::PredicateFilterShort(uint64_t numVa isSelected[i] = HdfsPredicateCheckNull(m_predicate); } else if (m_checkPredicateOnRow) { int64_t intValue = (int64_t)tmpData[rowId]; +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueIntForLlvm(intValue, m_predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(intValue, m_predicate); +#endif } } } @@ -1515,7 +1527,11 @@ void CarbondataColumnReaderImpl::PredicateFilterInt(uint64_t numValu isSelected[i] = HdfsPredicateCheckNull(m_predicate); } else if (m_checkPredicateOnRow) { int64_t valueInt = (int64_t)pData[rowId]; +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueIntForLlvm(valueInt, m_predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(valueInt, m_predicate); +#endif } } } @@ -1534,7 +1550,11 @@ void CarbondataColumnReaderImpl::PredicateFilterLong(uint64_t numVal if (isNullValue(rowId)) { isSelected[i] = HdfsPredicateCheckNull(m_predicate); } else if (m_checkPredicateOnRow) { +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueIntForLlvm(dataPtr[rowId], m_predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(dataPtr[rowId], m_predicate); +#endif } } } @@ -1554,7 +1574,11 @@ void CarbondataColumnReaderImpl::PredicateFilterFloat(uint64_t numVa isSelected[i] = HdfsPredicateCheckNull(m_predicate); } else if (m_checkPredicateOnRow) { double tmpValue = static_cast(tmpData[rowId]); +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueDoubleForLlvm(tmpValue, m_predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(tmpValue, m_predicate); +#endif } } } @@ -1574,7 +1598,11 @@ void CarbondataColumnReaderImpl::PredicateFilterDouble(uint64_t numV isSelected[i] = HdfsPredicateCheckNull(m_predicate); } else if (m_checkPredicateOnRow) { double doubleValue = tmpData[rowId]; +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueDoubleForLlvm(doubleValue, m_predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(doubleValue, m_predicate); +#endif } } } @@ -1657,7 +1685,12 @@ void CarbondataColumnReaderImpl::PredicateFilterDecimal(uint64_t num if (isNullValue(rowId)) { isSelected[i] = HdfsPredicateCheckNull(m_predicate); } else if (m_checkPredicateOnRow) { +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueIntForLlvm(tmpData[rowId], + m_predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(tmpData[rowId], m_predicate); +#endif } } } @@ -1672,7 +1705,12 @@ void CarbondataColumnReaderImpl::PredicateFilterDecimal(uint64_t num if (isNullValue(rowId)) { isSelected[i] = HdfsPredicateCheckNull(m_predicate); } else if (m_checkPredicateOnRow) { +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueIntForLlvm(tmpData[rowId], + m_predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(tmpData[rowId], m_predicate); +#endif } } } diff --git a/src/gausskernel/storage/access/dfs/dfs_am.cpp b/src/gausskernel/storage/access/dfs/dfs_am.cpp index 28feba7e8..f449a0381 100644 --- a/src/gausskernel/storage/access/dfs/dfs_am.cpp +++ b/src/gausskernel/storage/access/dfs/dfs_am.cpp @@ -550,6 +550,7 @@ void ReaderImpl::begin(dfs::DFSConnector *conn, FileType type) /* notice: readerState should init ready before create file reader */ switch (type) { +#ifndef ENABLE_LITE_MODE case ORC: { reader = New(readerState->persistCtx) OrcReaderImpl(readerState, conn); reader->begin(); @@ -560,6 +561,7 @@ void ReaderImpl::begin(dfs::DFSConnector *conn, FileType type) reader->begin(); break; } +#endif case TEXT: { reader = New(readerState->persistCtx) CommonReader(readerState, conn, DFS_TEXT); reader->begin(); @@ -583,7 +585,11 @@ void ReaderImpl::begin(dfs::DFSConnector *conn, FileType type) #ifdef ENABLE_MULTIPLE_NODES "Only ORC/PARQUET/CARBONDATA/CSV/TEXT is supported for now." #else +#ifndef ENABLE_LITE_MODE "Only ORC/PARQUET/CSV/TEXT is supported for now." +#else + "Only CSV/TEXT is supported for now." +#endif #endif ))); } @@ -1919,6 +1925,7 @@ WriterImpl::~WriterImpl() void WriterImpl::init(IndexInsertInfo *indexInsertInfo) { +#ifndef ENABLE_LITE_MODE /* Initialize the connector info. */ Oid tbsOid = m_relation->rd_node.spcNode; DFSConnector *conn = createConnector(m_ctx, GetDfsSrvOptions(tbsOid), tbsOid); @@ -1940,6 +1947,9 @@ void WriterImpl::init(IndexInsertInfo *indexInsertInfo) m_transformScalarFunc = (transformScalarVector *)palloc0(sizeof(transformScalarVector) * m_relation->rd_att->natts); m_isNull = (bool *)palloc0(sizeof(bool) * BatchMaxSize); bindTransformFunc(); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } const char *WriterImpl::getRelName() diff --git a/src/gausskernel/storage/access/dfs/dfs_query_reader.cpp b/src/gausskernel/storage/access/dfs/dfs_query_reader.cpp index 4ef509f5f..9d325a9b6 100644 --- a/src/gausskernel/storage/access/dfs/dfs_query_reader.cpp +++ b/src/gausskernel/storage/access/dfs/dfs_query_reader.cpp @@ -141,10 +141,12 @@ bool HdfsScanPredicate::BuildHdfsScanPredicateFromClause(Expr *expr return runningTimeSet; } - /* Build IR according to expr node. */ +#ifdef ENABLE_LLVM_COMPILE + /* Build IR according to expr node. */ if (CodeGenThreadObjectReady()) { (void)ForeignScanExprCodeGen(expr, NULL, &m_jittedFunc); } +#endif if (IsA(rightop, Const)) { datumValue = ((Const *)rightop)->constvalue; diff --git a/src/gausskernel/storage/access/dfs/dfs_stream_factory.cpp b/src/gausskernel/storage/access/dfs/dfs_stream_factory.cpp index bbd2e90f7..598da4890 100644 --- a/src/gausskernel/storage/access/dfs/dfs_stream_factory.cpp +++ b/src/gausskernel/storage/access/dfs/dfs_stream_factory.cpp @@ -46,6 +46,7 @@ DFS_UNIQUE_PTR InputStreamFactory(dfs::DFSConnector *conn, c int32_t connect_type = conn->getType(); if (OBS_CONNECTOR == connect_type) { +#ifndef ENABLE_LITE_MODE /* * when we read file through foreign table, * readerState->currentFileID value is -1. @@ -65,6 +66,9 @@ DFS_UNIQUE_PTR InputStreamFactory(dfs::DFSConnector *conn, c } else { return dfs::readObsFile(reader_handler, path, readerState); } +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } else { ereport(ERROR, (errcode(ERRCODE_FDW_UNABLE_TO_ESTABLISH_CONNECTION), errmodule(MOD_ORC), errmsg("unsupport connector type %d", connect_type))); diff --git a/src/gausskernel/storage/access/dfs/orc/CMakeLists.txt b/src/gausskernel/storage/access/dfs/orc/CMakeLists.txt index 5579a4ef0..15f4a3b69 100755 --- a/src/gausskernel/storage/access/dfs/orc/CMakeLists.txt +++ b/src/gausskernel/storage/access/dfs/orc/CMakeLists.txt @@ -9,13 +9,13 @@ set(TGT_orc_INC ${PROJECT_SRC_DIR}/lib/gstrace ${LIBCGROUP_INCLUDE_PATH} ${PROJECT_SRC_DIR}/include/libcomm +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") ${LIBORC_INCLUDE_PATH} - ${PROTOBUF_INCLUDE_PATH} ${LIBPARQUET_INCLUDE_PATH} +endif() + ${PROTOBUF_INCLUDE_PATH} ${ZLIB_INCLUDE_PATH} ${LIBCURL_INCLUDE_PATH} - ${LIBORC_INCLUDE_PATH} - ${PROTOBUF_INCLUDE_PATH} ) set(orc_DEF_OPTIONS ${MACRO_OPTIONS}) diff --git a/src/gausskernel/storage/access/dfs/orc/OrcObsFile.cpp b/src/gausskernel/storage/access/dfs/orc/OrcObsFile.cpp index 2f412cb38..be3d1d033 100644 --- a/src/gausskernel/storage/access/dfs/orc/OrcObsFile.cpp +++ b/src/gausskernel/storage/access/dfs/orc/OrcObsFile.cpp @@ -26,8 +26,10 @@ /* Becareful: liborc header file must before openGauss header file */ #include "orc_rw.h" +#ifndef ENABLE_LITE_MODE #include "orc/Adaptor.hh" #include "orc/Exceptions.hh" +#endif #include "OrcObsFile.h" #include "access/obs/obs_am.h" #include "storage/cucache_mgr.h" diff --git a/src/gausskernel/storage/access/dfs/orc/orc_reader.cpp b/src/gausskernel/storage/access/dfs/orc/orc_reader.cpp index b111b9d12..c483cdf7e 100644 --- a/src/gausskernel/storage/access/dfs/orc/orc_reader.cpp +++ b/src/gausskernel/storage/access/dfs/orc/orc_reader.cpp @@ -21,11 +21,15 @@ * ------------------------------------------------------------------------- */ #include "orc_rw.h" +#ifndef ENABLE_LITE_MODE #include "orc/Adaptor.hh" #include "orc/Exceptions.hh" #include "orc/OrcFile.hh" +#endif #include "OrcObsFile.h" +#ifndef ENABLE_LITE_MODE #include "orc/Reader.hh" +#endif #include "storage/dfs/dfscache_mgr.h" #include "storage/cucache_mgr.h" #include "access/dfs/dfs_common.h" @@ -1909,7 +1913,11 @@ void OrcColumnReaderImpl::predicateFilter(uint64_t numValues, bool *i if (!nullFilter(hasNull, notNull, isSelected, i)) { int64_t tmpValue = data[i]; if (checkPredicateOnRow) { +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueIntForLlvm(tmpValue, predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(tmpValue, predicate); +#endif } if (checkBloomFilterOnRow && isSelected[i]) { @@ -1930,8 +1938,12 @@ void OrcColumnReaderImpl::predicateFilter(uint64_t numValues, bool *i if (!nullFilter(hasNull, notNull, isSelected, i)) { double tmpValue = data[i]; if (checkPredicateOnRow) { +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueDoubleForLlvm(tmpValue, predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(tmpValue, predicate); +#endif } if (checkBloomFilterOnRow && isSelected[i]) { diff --git a/src/gausskernel/storage/access/dfs/orc/orc_rw.h b/src/gausskernel/storage/access/dfs/orc/orc_rw.h index 940f9c37e..e8df6f86e 100644 --- a/src/gausskernel/storage/access/dfs/orc/orc_rw.h +++ b/src/gausskernel/storage/access/dfs/orc/orc_rw.h @@ -24,13 +24,17 @@ #ifndef ORC_RW_H #define ORC_RW_H +#include "pg_config.h" + #include #include "sstream" +#ifndef ENABLE_LITE_MODE #include "orc/orc-config.hh" #include "orc_proto.pb.h" #include "orc/OrcFile.hh" #include "orc/Writer.hh" +#endif #include "../dfs_reader.h" #include "../dfs_writer.h" #include "access/dfs/dfs_stream.h" @@ -65,6 +69,7 @@ namespace dfs { const int64_t PG_EPOCH_OFFSET_DEFAULT = ORC_EPOCH_IN_SECONDS - POSTGRESQL_EPOCH_IN_SECONDS; +#ifndef ENABLE_LITE_MODE typedef orc::TypeKind FieldKind; typedef void (*appendDatum)(orc::ColumnVectorBatch *colBatch, Datum *val, bool *isNull, uint32 length, @@ -1011,6 +1016,7 @@ private: bool m_indexInfoReady; }; } // namespace writer +#endif extern uint64 EstimateBufferRows(TupleDesc desc); } // namespace dfs diff --git a/src/gausskernel/storage/access/dfs/orc/orc_stream_adapter.h b/src/gausskernel/storage/access/dfs/orc/orc_stream_adapter.h index 992d7186b..0242ab40b 100644 --- a/src/gausskernel/storage/access/dfs/orc/orc_stream_adapter.h +++ b/src/gausskernel/storage/access/dfs/orc/orc_stream_adapter.h @@ -25,10 +25,12 @@ #define SRC_BACKEND_ACCESS_DFS_ORC_ORC_STREAM_ADAPTER_H #include +#ifndef ENABLE_LITE_MODE /* Becareful: liborc header file must before openGauss header file */ #include "orc/Adaptor.hh" #include "orc/Exceptions.hh" #include "orc/OrcFile.hh" +#endif namespace orc { class OrcInputStreamAdapter : public InputStream { diff --git a/src/gausskernel/storage/access/dfs/parquet/CMakeLists.txt b/src/gausskernel/storage/access/dfs/parquet/CMakeLists.txt index 35c2a9c94..6bb2b5d7b 100755 --- a/src/gausskernel/storage/access/dfs/parquet/CMakeLists.txt +++ b/src/gausskernel/storage/access/dfs/parquet/CMakeLists.txt @@ -9,9 +9,11 @@ set(TGT_parquet_INC ${PROJECT_SRC_DIR}/lib/gstrace ${LIBCGROUP_INCLUDE_PATH} ${PROJECT_SRC_DIR}/include/libcomm - ${LIBORC_INCLUDE_PATH} ${PROTOBUF_INCLUDE_PATH} +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + ${LIBORC_INCLUDE_PATH} ${LIBPARQUET_INCLUDE_PATH} +endif() ${ZLIB_INCLUDE_PATH} ${LIBCURL_INCLUDE_PATH} ) diff --git a/src/gausskernel/storage/access/dfs/parquet/parquet_column_reader.cpp b/src/gausskernel/storage/access/dfs/parquet/parquet_column_reader.cpp index 327d28161..40ae3c018 100644 --- a/src/gausskernel/storage/access/dfs/parquet/parquet_column_reader.cpp +++ b/src/gausskernel/storage/access/dfs/parquet/parquet_column_reader.cpp @@ -21,9 +21,11 @@ * * ------------------------------------------------------------------------- */ +#ifndef ENABLE_LITE_MODE #include "parquet/platform.h" #include "parquet/statistics.h" #include "parquet/types.h" +#endif #include "parquet_column_reader.h" #include "parquet_input_stream_adapter.h" @@ -33,7 +35,9 @@ #include "access/dfs/dfs_query.h" #include "access/dfs/dfs_query_check.h" #include "access/dfs/dfs_wrapper.h" +#ifndef ENABLE_LITE_MODE #include "arrow/util/bit-util.h" +#endif namespace dfs { namespace reader { @@ -1348,7 +1352,11 @@ void ParquetColumnReaderImpl::predicateFilter(uint64_t numValues, bo if (!nullFilter(isSelected, i)) { bool tmpValue = data[i]; if (checkPredicateOnRow) { +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueIntForLlvm(tmpValue, predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(tmpValue, predicate); +#endif } increaseBloomFilterRows(&isSelected[i], tmpValue); @@ -1375,8 +1383,12 @@ void ParquetColumnReaderImpl::predicateFilter(uint64_t numValues, bo if (!nullFilter(isSelected, i)) { int64_t tmpValue = static_cast(data[i]); if (checkPredicateOnRow) { +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueIntForLlvm(tmpValue, predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(tmpValue, predicate); +#endif } increaseBloomFilterRows(&isSelected[i], tmpValue); @@ -1393,7 +1405,11 @@ void ParquetColumnReaderImpl::predicateFilter(uint64_t numValues, bo if (!nullFilter(isSelected, i)) { int64_t tmpValue = static_cast(data[i]); if (checkPredicateOnRow) { +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueIntForLlvm(tmpValue, predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(tmpValue, predicate); +#endif } increaseBloomFilterRows(&isSelected[i], tmpValue); @@ -1432,7 +1448,11 @@ void ParquetColumnReaderImpl::predicateFilter(uint64_t numValues, bo if (!nullFilter(isSelected, i)) { double tmpValue = static_cast(data[i]); if (checkPredicateOnRow) { +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueIntForLlvm(tmpValue, predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(tmpValue, predicate); +#endif } increaseBloomFilterRows(&isSelected[i], tmpValue); @@ -1446,7 +1466,11 @@ void ParquetColumnReaderImpl::predicateFilter(uint64_t numValues, bo if (!nullFilter(isSelected, i)) { double tmpValue = static_cast(data[i]); if (checkPredicateOnRow) { +#ifdef ENABLE_LLVM_COMPILE isSelected[i] = HdfsPredicateCheckValueIntForLlvm(tmpValue, predicate); +#else + isSelected[i] = HdfsPredicateCheckValue(tmpValue, predicate); +#endif } increaseBloomFilterRows(&isSelected[i], tmpValue); diff --git a/src/gausskernel/storage/access/dfs/parquet/parquet_column_reader.h b/src/gausskernel/storage/access/dfs/parquet/parquet_column_reader.h index 37ff4c64d..4b6744451 100644 --- a/src/gausskernel/storage/access/dfs/parquet/parquet_column_reader.h +++ b/src/gausskernel/storage/access/dfs/parquet/parquet_column_reader.h @@ -24,7 +24,9 @@ #ifndef PQRQUET_COLUMN_READER_H #define PQRQUET_COLUMN_READER_H +#ifndef ENABLE_LITE_MODE #include "parquet/api/reader.h" +#endif #include "utils/date.h" #include "utils/timestamp.h" diff --git a/src/gausskernel/storage/access/dfs/parquet/parquet_file_reader.h b/src/gausskernel/storage/access/dfs/parquet/parquet_file_reader.h index 90ee1b663..8e71427d5 100644 --- a/src/gausskernel/storage/access/dfs/parquet/parquet_file_reader.h +++ b/src/gausskernel/storage/access/dfs/parquet/parquet_file_reader.h @@ -24,9 +24,11 @@ #ifndef PQRQUET_FILE_READER_H #define PQRQUET_FILE_READER_H +#ifndef ENABLE_LITE_MODE #include "parquet/api/reader.h" #include "parquet/types.h" #include "parquet/deprecated_io.h" +#endif #include "utils/dfs_vector.h" #include "utils/bloom_filter.h" #include "vecexecutor/vectorbatch.h" diff --git a/src/gausskernel/storage/access/dfs/parquet/parquet_input_stream_adapter.cpp b/src/gausskernel/storage/access/dfs/parquet/parquet_input_stream_adapter.cpp index 00487434d..728291b3c 100644 --- a/src/gausskernel/storage/access/dfs/parquet/parquet_input_stream_adapter.cpp +++ b/src/gausskernel/storage/access/dfs/parquet/parquet_input_stream_adapter.cpp @@ -21,8 +21,10 @@ * * ------------------------------------------------------------------------- */ +#ifndef ENABLE_LITE_MODE #include "parquet/platform.h" #include "parquet/types.h" +#endif #include "parquet_input_stream_adapter.h" #include "postgres.h" #include "knl/knl_variable.h" diff --git a/src/gausskernel/storage/access/dfs/parquet/parquet_input_stream_adapter.h b/src/gausskernel/storage/access/dfs/parquet/parquet_input_stream_adapter.h index e90411cb9..bba4bf680 100644 --- a/src/gausskernel/storage/access/dfs/parquet/parquet_input_stream_adapter.h +++ b/src/gausskernel/storage/access/dfs/parquet/parquet_input_stream_adapter.h @@ -26,7 +26,9 @@ #include "access/dfs/dfs_stream.h" #include "access/dfs/dfs_am.h" +#ifndef ENABLE_LITE_MODE #include "parquet/api/reader.h" +#endif #include "utils/memutils.h" namespace dfs { diff --git a/src/gausskernel/storage/access/dfs/parquet/parquet_reader.cpp b/src/gausskernel/storage/access/dfs/parquet/parquet_reader.cpp index 273a09323..a18509a27 100644 --- a/src/gausskernel/storage/access/dfs/parquet/parquet_reader.cpp +++ b/src/gausskernel/storage/access/dfs/parquet/parquet_reader.cpp @@ -22,8 +22,10 @@ * ------------------------------------------------------------------------- */ +#ifndef ENABLE_LITE_MODE #include "parquet/platform.h" #include "parquet/types.h" +#endif #include "parquet_reader.h" #include "parquet_file_reader.h" diff --git a/src/gausskernel/storage/access/gist/gist.cpp b/src/gausskernel/storage/access/gist/gist.cpp index f335de06d..4ab541080 100644 --- a/src/gausskernel/storage/access/gist/gist.cpp +++ b/src/gausskernel/storage/access/gist/gist.cpp @@ -251,7 +251,7 @@ bool gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, Buffer dist->buffer = buffer; dist->block.blkno = BufferGetBlockNumber(buffer); - dist->page = PageGetTempPageCopySpecial(BufferGetPage(buffer), false); + dist->page = PageGetTempPageCopySpecial(BufferGetPage(buffer)); /* clean all flags except F_LEAF */ GistPageGetOpaque(dist->page)->flags = (is_leaf) ? F_LEAF : 0; @@ -285,7 +285,7 @@ bool gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, Buffer int ndownlinks = 0; rootpg.buffer = buffer; - rootpg.page = PageGetTempPageCopySpecial(BufferGetPage(rootpg.buffer), false); + rootpg.page = PageGetTempPageCopySpecial(BufferGetPage(rootpg.buffer)); GistPageGetOpaque(rootpg.page)->flags = 0; /* Prepare a vector of all the downlinks */ diff --git a/src/gausskernel/storage/access/hash/Makefile b/src/gausskernel/storage/access/hash/Makefile index c9b9e8c83..0ca2fdd60 100644 --- a/src/gausskernel/storage/access/hash/Makefile +++ b/src/gausskernel/storage/access/hash/Makefile @@ -9,7 +9,7 @@ ifneq "$(MAKECMDGOALS)" "clean" endif endif endif -OBJS = hash.o hashfunc.o hashinsert.o hashovfl.o hashpage.o hashsearch.o\ - hashsort.o hashutil.o hash_xlog.o +OBJS = hash.o hashfunc.o hashinsert.o hashovfl.o hashpage.o hashscan.o \ + hashsearch.o hashsort.o hashutil.o include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/storage/access/hash/README b/src/gausskernel/storage/access/hash/README index be5491be4..da68545e2 100644 --- a/src/gausskernel/storage/access/hash/README +++ b/src/gausskernel/storage/access/hash/README @@ -58,51 +58,35 @@ rules to support a variable number of overflow pages while not having to move primary bucket pages around after they are created. Primary bucket pages (henceforth just "bucket pages") are allocated in -power-of-2 groups, called "split points" in the code. That means at every new -splitpoint we double the existing number of buckets. Allocating huge chunks -of bucket pages all at once isn't optimal and we will take ages to consume -those. To avoid this exponential growth of index size, we did use a trick to -break up allocation of buckets at the splitpoint into 4 equal phases. If -(2 ^ x) are the total buckets need to be allocated at a splitpoint (from now on -we shall call this as a splitpoint group), then we allocate 1/4th (2 ^ (x - 2)) -of total buckets at each phase of splitpoint group. Next quarter of allocation -will only happen if buckets of the previous phase have been already consumed. -For the initial splitpoint groups < 10 we will allocate all of their buckets in -single phase only, as number of buckets allocated at initial groups are small -in numbers. And for the groups >= 10 the allocation process is distributed -among four equal phases. At group 10 we allocate (2 ^ 9) buckets in 4 -different phases {2 ^ 7, 2 ^ 7, 2 ^ 7, 2 ^ 7}, the numbers in curly braces -indicate the number of buckets allocated within each phase of splitpoint group -10. And, for splitpoint group 11 and 12 allocation phases will be -{2 ^ 8, 2 ^ 8, 2 ^ 8, 2 ^ 8} and {2 ^ 9, 2 ^ 9, 2 ^ 9, 2 ^ 9} respectively. We -can see that at each splitpoint group we double the total number of buckets -from the previous group but in an incremental phase. The bucket pages -allocated within one phase of a splitpoint group will appear consecutively in -the index. This addressing scheme allows the physical location of a bucket -page to be computed from the bucket number relatively easily, using only a -small amount of control information. If we look at the function -_hash_spareindex for a given bucket number we first compute the -splitpoint group it belongs to and then the phase to which the bucket belongs -to. Adding them we get the global splitpoint phase number S to which the -bucket belongs and then simply add "hashm_spares[S] + 1" (where hashm_spares[] -is an array stored in the metapage) with given bucket number to compute its -physical address. The hashm_spares[S] can be interpreted as the total number -of overflow pages that have been allocated before the bucket pages of -splitpoint phase S. The hashm_spares[0] is always 0, so that buckets 0 and 1 -always appear at block numbers 1 and 2, just after the meta page. We always -have hashm_spares[N] <= hashm_spares[N+1], since the latter count includes the -former. The difference between the two represents the number of overflow pages -appearing between the bucket page groups of splitpoints phase N and N+1. +power-of-2 groups, called "split points" in the code. Buckets 0 and 1 +are created when the index is initialized. At the first split, buckets 2 +and 3 are allocated; when bucket 4 is needed, buckets 4-7 are allocated; +when bucket 8 is needed, buckets 8-15 are allocated; etc. All the bucket +pages of a power-of-2 group appear consecutively in the index. This +addressing scheme allows the physical location of a bucket page to be +computed from the bucket number relatively easily, using only a small +amount of control information. We take the log2() of the bucket number +to determine which split point S the bucket belongs to, and then simply +add "hashm_spares[S] + 1" (where hashm_spares[] is an array stored in the +metapage) to compute the physical address. hashm_spares[S] can be +interpreted as the total number of overflow pages that have been allocated +before the bucket pages of splitpoint S. hashm_spares[0] is always 0, +so that buckets 0 and 1 (which belong to splitpoint 0) always appear at +block numbers 1 and 2, just after the meta page. We always have +hashm_spares[N] <= hashm_spares[N+1], since the latter count includes the +former. The difference between the two represents the number of overflow +pages appearing between the bucket page groups of splitpoints N and N+1. + (Note: the above describes what happens when filling an initially minimally -sized hash index. In practice, we try to estimate the required index size and -allocate a suitable number of splitpoints phases immediately, to avoid +sized hash index. In practice, we try to estimate the required index size +and allocate a suitable number of splitpoints immediately, to avoid expensive re-splitting during initial index build.) When S splitpoints exist altogether, the array entries hashm_spares[0] through hashm_spares[S] are valid; hashm_spares[S] records the current total number of overflow pages. New overflow pages are created as needed at the end of the index, and recorded by incrementing hashm_spares[S]. -When it is time to create a new splitpoint phase's worth of bucket pages, we +When it is time to create a new splitpoint's worth of bucket pages, we copy hashm_spares[S] into hashm_spares[S+1] and increment S (which is stored in the hashm_ovflpoint field of the meta page). This has the effect of reserving the correct number of bucket pages at the end of the @@ -117,7 +101,7 @@ We have to allow the case "greater than" because it's possible that during an index extension we crash after allocating filesystem space and before updating the metapage. Note that on filesystems that allow "holes" in files, it's entirely likely that pages before the logical EOF are not yet -allocated: when we allocate a new splitpoint phase's worth of bucket pages, we +allocated: when we allocate a new splitpoint's worth of bucket pages, we physically zero the last such page to force the EOF up, and the first such page will be used immediately, but the intervening pages are not written until needed. @@ -142,98 +126,61 @@ the initially created buckets. Lock Definitions ---------------- -Concurrency control for hash indexes is provided using buffer content -locks, buffer pins, and cleanup locks. Here as elsewhere in PostgreSQL, -cleanup lock means that we hold an exclusive lock on the buffer and have -observed at some point after acquiring the lock that we hold the only pin -on that buffer. For hash indexes, a cleanup lock on a primary bucket page -represents the right to perform an arbitrary reorganization of the entire -bucket. Therefore, scans retain a pin on the primary bucket page for the -bucket they are currently scanning. Splitting a bucket requires a cleanup -lock on both the old and new primary bucket pages. VACUUM therefore takes -a cleanup lock on every bucket page in order to remove tuples. It can also -remove tuples copied to a new bucket by any previous split operation, because -the cleanup lock taken on the primary bucket page guarantees that no scans -which started prior to the most recent split can still be in progress. After -cleaning each page individually, it attempts to take a cleanup lock on the -primary bucket page in order to "squeeze" the bucket down to the minimum -possible number of pages. +We use both lmgr locks ("heavyweight" locks) and buffer context locks +(LWLocks) to control access to a hash index. lmgr locks are needed for +long-term locking since there is a (small) risk of deadlock, which we must +be able to detect. Buffer context locks are used for short-term access +control to individual pages of the index. -To avoid deadlocks, we must be consistent about the lock order in which we -lock the buckets for operations that requires locks on two different buckets. -We choose to always lock the lower-numbered bucket first. The metapage is -only ever locked after all bucket locks have been taken. +We define the following lmgr locks for a hash index: +LockPage(rel, 0) represents the right to modify the hash-code-to-bucket +mapping. A process attempting to enlarge the hash table by splitting a +bucket must exclusive-lock this lock before modifying the metapage data +representing the mapping. Processes intending to access a particular +bucket must share-lock this lock until they have acquired lock on the +correct target bucket. -Metapage Caching ----------------- +LockPage(rel, page), where page is the page number of a hash bucket page, +represents the right to split or compact an individual bucket. A process +splitting a bucket must exclusive-lock both old and new halves of the +bucket until it is done. A process doing VACUUM must exclusive-lock the +bucket it is currently purging tuples from. Processes doing scans or +insertions must share-lock the bucket they are scanning or inserting into. +(It is okay to allow concurrent scans and insertions.) -Both scanning the index and inserting tuples require locating the bucket -where a given tuple ought to be located. To do this, we need the bucket -count, highmask, and lowmask from the metapage; however, it's undesirable -for performance reasons to have to have to lock and pin the metapage for -every such operation. Instead, we retain a cached copy of the metapage -in each each backend's relcache entry. This will produce the correct -bucket mapping as long as the target bucket hasn't been split since the -last cache refresh. +The lmgr lock IDs corresponding to overflow pages are currently unused. +These are available for possible future refinements. -To guard against the possibility that such a split has occurred, the -primary page of each bucket chain stores the number of buckets that -existed as of the time the bucket was last split, or if never split as -of the time it was created, in the space normally used for the -previous block number (that is, hasho_prevblkno). This doesn't cost -anything because the primary bucket page is always the first page in -the chain, and the previous block number is therefore always, in -reality, InvalidBlockNumber. +Note that these lock definitions are conceptually distinct from any sort +of lock on the pages whose numbers they share. A process must also obtain +read or write buffer lock on the metapage or bucket page before accessing +said page. -After computing the ostensibly-correct bucket number based on our cached -copy of the metapage, we lock the corresponding primary bucket page and -check whether the bucket count stored in hasho_prevblkno is greater than -our the number of buckets stored in our cached copy of the metapage. If -so, the bucket has certainly been split, because the must originally -have been less than the number of buckets that existed at that time and -can't have increased except due to a split. If not, the bucket can't have -been split, because a split would have created a new bucket with a higher -bucket number than any we'd seen previously. In the latter case, we've -locked the correct bucket and can proceed; in the former case, we must -release the lock on this bucket, lock the metapage, update our cache, -unlock the metapage, and retry. +Processes performing hash index scans must hold share lock on the bucket +they are scanning throughout the scan. This seems to be essential, since +there is no reasonable way for a scan to cope with its bucket being split +underneath it. This creates a possibility of deadlock external to the +hash index code, since a process holding one of these locks could block +waiting for an unrelated lock held by another process. If that process +then does something that requires exclusive lock on the bucket, we have +deadlock. Therefore the bucket locks must be lmgr locks so that deadlock +can be detected and recovered from. This also forces the page-zero lock +to be an lmgr lock, because as we'll see below it is held while attempting +to acquire a bucket lock, and so it could also participate in a deadlock. -Needing to retry occasionally might seem expensive, but the number of times -any given bucket can be split is limited to a few dozen no matter how -many times the hash index is accessed, because the total number of -buckets is limited to less than 2^32. On the other hand, the number of -times we access a bucket is unbounded and will be several orders of -magnitude larger even in unsympathetic cases. +Processes must obtain read (share) buffer context lock on any hash index +page while reading it, and write (exclusive) lock while modifying it. +To prevent deadlock we enforce these coding rules: no buffer lock may be +held long term (across index AM calls), nor may any buffer lock be held +while waiting for an lmgr lock, nor may more than one buffer lock +be held at a time by any one process. (The third restriction is probably +stronger than necessary, but it makes the proof of no deadlock obvious.) -(The metapage cache is new in v10. Older hash indexes had the primary -bucket page's hasho_prevblkno initialized to InvalidBuffer.) Pseudocode Algorithms --------------------- -Various flags that are used in hash index operations are described as below: - -The bucket-being-split and bucket-being-populated flags indicate that split -the operation is in progress for a bucket. During split operation, a -bucket-being-split flag is set on the old bucket and bucket-being-populated -flag is set on new bucket. These flags are cleared once the split operation -is finished. - -The split-cleanup flag indicates that a bucket which has been recently split -still contains tuples that were also copied to the new bucket; it essentially -marks the split as incomplete. Once we're certain that no scans which -started before the new bucket was fully populated are still in progress, we -can remove the copies from the old bucket and clear the flag. We insist that -this flag must be clear before splitting a bucket; thus, a bucket can't be -split again until the previous split is totally complete. - -The moved-by-split flag on a tuple indicates that tuple is moved from old to -new bucket. Concurrent scans will skip such tuples until the split operation -is finished. Once the tuple is marked as moved-by-split, it will remain so -forever but that does no harm. We have intentionally not cleared it as that -can generate an additional I/O which is not necessary. - The operations we need to support are: readers scanning the index for entries of a particular hash code (which by definition are all in the same bucket); insertion of a new tuple into the correct bucket; enlarging the @@ -248,75 +195,57 @@ track of available overflow pages. The reader algorithm is: - lock the primary bucket page of the target bucket - if the target bucket is still being populated by a split: - release the buffer content lock on current bucket page - pin and acquire the buffer content lock on old bucket in shared mode - release the buffer content lock on old bucket, but not pin - retake the buffer content lock on new bucket - arrange to scan the old bucket normally and the new bucket for - tuples which are not moved-by-split + share-lock page 0 (to prevent active split) + read/sharelock meta page + compute bucket number for target hash key + release meta page + share-lock bucket page (to prevent split/compact of this bucket) + release page 0 share-lock -- then, per read request: - reacquire content lock on current page - step to next page if necessary (no chaining of content locks, but keep - the pin on the primary bucket throughout the scan; we also maintain - a pin on the page currently being scanned) + read/sharelock current page of bucket + step to next page if necessary (no chaining of locks) get tuple - release content lock + release current page -- at scan shutdown: - release all pins still held + release bucket share-lock -Holding the buffer pin on the primary bucket page for the whole scan prevents -the reader's current-tuple pointer from being invalidated by splits or -compactions. (Of course, other buckets can still be split or compacted.) +By holding the page-zero lock until lock on the target bucket is obtained, +the reader ensures that the target bucket calculation is valid (otherwise +the bucket might be split before the reader arrives at it, and the target +entries might go into the new bucket). Holding the bucket sharelock for +the remainder of the scan prevents the reader's current-tuple pointer from +being invalidated by splits or compactions. Notice that the reader's lock +does not prevent other buckets from being split or compacted. To keep concurrency reasonably good, we require readers to cope with concurrent insertions, which means that they have to be able to re-find -their current scan position after re-acquiring the buffer content lock on -page. Since deletion is not possible while a reader holds the pin on bucket, -and we assume that heap tuple TIDs are unique, this can be implemented by +their current scan position after re-acquiring the page sharelock. Since +deletion is not possible while a reader holds the bucket sharelock, and +we assume that heap tuple TIDs are unique, this can be implemented by searching for the same heap tuple TID previously returned. Insertion does not move index entries across pages, so the previously-returned index entry should always be on the same page, at the same or higher offset number, as it was before. -To allow for scans during a bucket split, if at the start of the scan, the -bucket is marked as bucket-being-populated, it scan all the tuples in that -bucket except for those that are marked as moved-by-split. Once it finishes -the scan of all the tuples in the current bucket, it scans the old bucket from -which this bucket is formed by split. - The insertion algorithm is rather similar: - lock the primary bucket page of the target bucket --- (so far same as reader, except for acquisition of buffer content lock in - exclusive mode on primary bucket page) - if the bucket-being-split flag is set for a bucket and pin count on it is - one, then finish the split - release the buffer content lock on current bucket - get the "new" bucket which was being populated by the split - scan the new bucket and form the hash table of TIDs - conditionally get the cleanup lock on old and new buckets - if we get the lock on both the buckets - finish the split using algorithm mentioned below for split - release the pin on old bucket and restart the insert from beginning. - if current page is full, first check if this page contains any dead tuples. - if yes, remove dead tuples from the current page and again check for the - availability of the space. If enough space found, insert the tuple else - release lock but not pin, read/exclusive-lock - next page; repeat as needed + share-lock page 0 (to prevent active split) + read/sharelock meta page + compute bucket number for target hash key + release meta page + share-lock bucket page (to prevent split/compact of this bucket) + release page 0 share-lock +-- (so far same as reader) + read/exclusive-lock current page of bucket + if full, release, read/exclusive-lock next page; repeat as needed >> see below if no space in any page of bucket - take buffer content lock in exclusive mode on metapage insert tuple at appropriate place in page - mark current page dirty + write/release current page + release bucket share-lock + read/exclusive-lock meta page increment tuple count, decide if split needed - mark meta page dirty - write WAL for insertion of tuple - release the buffer content lock on metapage - release buffer content lock on current page - if current page is not a bucket page, release the pin on bucket page - if split is needed, enter Split algorithm below - release the pin on metapage + write/release meta page + done if no split needed, else enter Split algorithm below To speed searches, the index entries within any individual index page are kept sorted by hash code; the insertion code must take care to insert new @@ -325,13 +254,11 @@ bucket that is being actively scanned, because readers can cope with this as explained above. We only need the short-term buffer locks to ensure that readers do not see a partially-updated page. -To avoid deadlock between readers and inserters, whenever there is a need -to lock multiple buckets, we always take in the order suggested in Lock -Definitions above. This algorithm allows them a very high degree of -concurrency. (The exclusive metapage lock taken to update the tuple count -is stronger than necessary, since readers do not care about the tuple count, -but the lock is held for such a short time that this is probably not an -issue.) +It is clearly impossible for readers and inserters to deadlock, and in +fact this algorithm allows them a very high degree of concurrency. +(The exclusive metapage lock taken to update the tuple count is stronger +than necessary, since readers do not care about the tuple count, but the +lock is held for such a short time that this is probably not an issue.) When an inserter cannot find space in any existing page of a bucket, it must obtain an overflow page and add that page to the bucket's chain. @@ -342,95 +269,82 @@ index is overfull (has a higher-than-wanted ratio of tuples to buckets). The algorithm attempts, but does not necessarily succeed, to split one existing bucket in two, thereby lowering the fill ratio: - pin meta page and take buffer content lock in exclusive mode - check split still needed - if split not needed anymore, drop buffer content lock and pin and exit - decide which bucket to split - try to take a cleanup lock on that bucket; if fail, give up - if that bucket is still being split or has split-cleanup work: - try to finish the split and the cleanup work - if that succeeds, start over; if it fails, give up - mark the old and new buckets indicating split is in progress - mark both old and new buckets as dirty - write WAL for allocation of new page for split - copy the tuples that belongs to new bucket from old bucket, marking - them as moved-by-split - write WAL record for moving tuples to new page once the new page is full - or all the pages of old bucket are finished - release lock but not pin for primary bucket page of old bucket, - read/shared-lock next page; repeat as needed - clear the bucket-being-split and bucket-being-populated flags - mark the old bucket indicating split-cleanup - write WAL for changing the flags on both old and new buckets + exclusive-lock page 0 (assert the right to begin a split) + read/exclusive-lock meta page + check split still needed + if split not needed anymore, drop locks and exit + decide which bucket to split + Attempt to X-lock old bucket number (definitely could fail) + Attempt to X-lock new bucket number (shouldn't fail, but...) + if above fail, drop locks and exit + update meta page to reflect new number of buckets + write/release meta page + release X-lock on page 0 + -- now, accesses to all other buckets can proceed. + Perform actual split of bucket, moving tuples as needed + >> see below about acquiring needed extra space + Release X-locks of old and new buckets -The split operation's attempt to acquire cleanup-lock on the old bucket number -could fail if another process holds any lock or pin on it. We do not want to -wait if that happens, because we don't want to wait while holding the metapage -exclusive-lock. So, this is a conditional LWLockAcquire operation, and if -it fails we just abandon the attempt to split. This is all right since the -index is overfull but perfectly functional. Every subsequent inserter will -try to split, and eventually one will succeed. If multiple inserters failed -to split, the index might still be overfull, but eventually, the index will +Note the page zero and metapage locks are not held while the actual tuple +rearrangement is performed, so accesses to other buckets can proceed in +parallel; in fact, it's possible for multiple bucket splits to proceed +in parallel. + +Split's attempt to X-lock the old bucket number could fail if another +process holds S-lock on it. We do not want to wait if that happens, first +because we don't want to wait while holding the metapage exclusive-lock, +and second because it could very easily result in deadlock. (The other +process might be out of the hash AM altogether, and could do something +that blocks on another lock this process holds; so even if the hash +algorithm itself is deadlock-free, a user-induced deadlock could occur.) +So, this is a conditional LockAcquire operation, and if it fails we just +abandon the attempt to split. This is all right since the index is +overfull but perfectly functional. Every subsequent inserter will try to +split, and eventually one will succeed. If multiple inserters failed to +split, the index might still be overfull, but eventually, the index will not be overfull and split attempts will stop. (We could make a successful splitter loop to see if the index is still overfull, but it seems better to distribute the split overhead across successive insertions.) -If a split fails partway through (e.g. due to insufficient disk space or an -interrupt), the index will not be corrupted. Instead, we'll retry the split -every time a tuple is inserted into the old bucket prior to inserting the new -tuple; eventually, we should succeed. The fact that a split is left -unfinished doesn't prevent subsequent buckets from being split, but we won't -try to split the bucket again until the prior split is finished. In other -words, a bucket can be in the middle of being split for some time, but it can't -be in the middle of two splits at the same time. +A problem is that if a split fails partway through (eg due to insufficient +disk space) the index is left corrupt. The probability of that could be +made quite low if we grab a free page or two before we update the meta +page, but the only real solution is to treat a split as a WAL-loggable, +must-complete action. I'm not planning to teach hash about WAL in this +go-round. The fourth operation is garbage collection (bulk deletion): next bucket := 0 - pin metapage and take buffer content lock in exclusive mode + read/sharelock meta page fetch current max bucket number - release meta page buffer content lock and pin + release meta page while next bucket <= max bucket do - acquire cleanup lock on primary bucket page - loop: - scan and remove tuples - mark the target page dirty - write WAL for deleting tuples from target page - if this is the last bucket page, break out of loop - pin and x-lock next page - release prior lock and pin (except keep pin on primary bucket page) - if the page we have locked is not the primary bucket page: - release lock and take exclusive lock on primary bucket page - if there are no other pins on the primary bucket page: - squeeze the bucket to remove free space - release the pin on primary bucket page + Acquire X lock on target bucket + Scan and remove tuples, compact free space as needed + Release X lock next bucket ++ end loop - pin metapage and take buffer content lock in exclusive mode + exclusive-lock meta page check if number of buckets changed - if so, release content lock and pin and return to for-each-bucket loop + if so, release lock and return to for-each-bucket loop else update metapage tuple count - mark meta page dirty and write WAL for update of metapage - release buffer content lock and pin + write/release meta page -Note that this is designed to allow concurrent splits and scans. If a split -occurs, tuples relocated into the new bucket will be visited twice by the -scan, but that does no harm. As we release the lock on bucket page during -cleanup scan of a bucket, it will allow concurrent scan to start on a bucket -and ensures that scan will always be behind cleanup. It is must to keep scans -behind cleanup, else vacuum could decrease the TIDs that are required to -complete the scan. Now, as the scan that returns multiple tuples from the -same bucket page always expect next valid TID to be greater than or equal to -the current TID, it might miss the tuples. This holds true for backward scans -as well (backward scans first traverse each bucket starting from first bucket -to last overflow page in the chain). We must be careful about the statistics +Note that this is designed to allow concurrent splits. If a split occurs, +tuples relocated into the new bucket will be visited twice by the scan, +but that does no harm. (We must however be careful about the statistics reported by the VACUUM operation. What we can do is count the number of -tuples scanned, and believe this in preference to the stored tuple count if -the stored tuple count and number of buckets did *not* change at any time -during the scan. This provides a way of correcting the stored tuple count if -it gets out of sync for some reason. But if a split or insertion does occur -concurrently, the scan count is untrustworthy; instead, subtract the number of -tuples deleted from the stored tuple count and use that. +tuples scanned, and believe this in preference to the stored tuple count +if the stored tuple count and number of buckets did *not* change at any +time during the scan. This provides a way of correcting the stored tuple +count if it gets out of sync for some reason. But if a split or insertion +does occur concurrently, the scan count is untrustworthy; instead, +subtract the number of tuples deleted from the stored tuple count and +use that.) + +The exclusive lock request could deadlock in some strange scenarios, but +we can just error out without any great harm being done. Free Space Management @@ -446,23 +360,25 @@ overflow page to the free pool. Obtaining an overflow page: - take metapage content lock in exclusive mode + read/exclusive-lock meta page determine next bitmap page number; if none, exit loop - release meta page content lock - pin bitmap page and take content lock in exclusive mode + release meta page lock + read/exclusive-lock bitmap page search for a free page (zero bit in bitmap) if found: set bit in bitmap - mark bitmap page dirty - take metapage buffer content lock in exclusive mode + write/release bitmap page + read/exclusive-lock meta page if first-free-bit value did not change, - update it and mark meta page dirty + update it and write meta page + release meta page + return page number else (not found): - release bitmap page buffer content lock + release bitmap page loop back to try next bitmap page, if any -- here when we have checked all bitmap pages; we hold meta excl. lock extend index to add another overflow page; update meta information - mark meta page dirty + write/release meta page return page number It is slightly annoying to release and reacquire the metapage lock @@ -482,17 +398,12 @@ like this: -- having determined that no space is free in the target bucket: remember last page of bucket, drop write lock on it + call free-page-acquire routine re-write-lock last page of bucket if it is not last anymore, step to the last page - execute free-page-acquire (obtaining an overflow page) mechanism - described above - update (former) last page to point to the new page and mark buffer dirty + update (former) last page to point to new page write-lock and initialize new page, with back link to former last page - write WAL for addition of overflow page - release the locks on meta page and bitmap page acquired in - free-page-acquire algorithm - release the lock on former last page - release the lock on new overflow page + write and release former last page insert tuple into new page -- etc. @@ -507,27 +418,27 @@ free page; there can be no other process holding lock on it. Bucket splitting uses a similar algorithm if it has to extend the new bucket, but it need not worry about concurrent extension since it has -buffer content lock in exclusive mode on the new bucket. +exclusive lock on the new bucket. -Freeing an overflow page requires the process to hold buffer content lock in -exclusive mode on the containing bucket, so need not worry about other -accessors of pages in the bucket. The algorithm is: +Freeing an overflow page is done by garbage collection and by bucket +splitting (the old bucket may contain no-longer-needed overflow pages). +In both cases, the process holds exclusive lock on the containing bucket, +so need not worry about other accessors of pages in the bucket. The +algorithm is: delink overflow page from bucket chain (this requires read/update/write/release of fore and aft siblings) - pin meta page and take buffer content lock in shared mode + read/share-lock meta page determine which bitmap page contains the free space bit for page - release meta page buffer content lock - pin bitmap page and take buffer content lock in exclusive mode - retake meta page buffer content lock in exclusive mode - move (insert) tuples that belong to the overflow page being freed + release meta page + read/exclusive-lock bitmap page update bitmap bit - mark bitmap page dirty + write/release bitmap page + if page number is less than what we saw as first-free-bit in meta: + read/exclusive-lock meta page if page number is still less than first-free-bit, - update first-free-bit field and mark meta page dirty - write WAL for delinking overflow page operation - release buffer content lock and pin - release meta page buffer content lock and pin + update first-free-bit field and write meta page + release meta page We have to do it this way because we must clear the bitmap bit before changing the first-free-bit field (hashm_firstfree). It is possible that @@ -537,96 +448,21 @@ page acquirer will scan more bitmap bits than he needs to. What must be avoided is having first-free-bit greater than the actual first free bit, because then that free page would never be found by searchers. -The reason of moving tuples from overflow page while delinking the later is -to make that as an atomic operation. Not doing so could lead to spurious reads -on standby. Basically, the user might see the same tuple twice. - - -WAL Considerations ------------------- - -The hash index operations like create index, insert, delete, bucket split, -allocate overflow page, and squeeze in themselves don't guarantee hash index -consistency after a crash. To provide robustness, we write WAL for each of -these operations. - -CREATE INDEX writes multiple WAL records. First, we write a record to cover -the initializatoin of the metapage, followed by one for each new bucket -created, followed by one for the initial bitmap page. It's not important for -index creation to appear atomic, because the index isn't yet visible to any -other transaction, and the creating transaction will roll back in the event of -a crash. It would be difficult to cover the whole operation with a single -write-ahead log record anyway, because we can log only a fixed number of -pages, as given by XLR_MAX_BLOCK_ID (32), with current XLog machinery. - -Ordinary item insertions (that don't force a page split or need a new overflow -page) are single WAL entries. They touch a single bucket page and the -metapage. The metapage is updated during replay as it is updated during -original operation. - -If an insertion causes the addition of an overflow page, there will be one -WAL entry for the new overflow page and second entry for insert itself. - -If an insertion causes a bucket split, there will be one WAL entry for insert -itself, followed by a WAL entry for allocating a new bucket, followed by a WAL -entry for each overflow bucket page in the new bucket to which the tuples are -moved from old bucket, followed by a WAL entry to indicate that split is -complete for both old and new buckets. A split operation which requires -overflow pages to complete the operation will need to write a WAL record for -each new allocation of an overflow page. - -As splitting involves multiple atomic actions, it's possible that the system -crashes between moving tuples from bucket pages of the old bucket to new -bucket. In such a case, after recovery, the old and new buckets will be -marked with bucket-being-split and bucket-being-populated flags respectively -which indicates that split is in progress for those buckets. The reader -algorithm works correctly, as it will scan both the old and new buckets when -the split is in progress as explained in the reader algorithm section above. - -We finish the split at next insert or split operation on the old bucket as -explained in insert and split algorithm above. It could be done during -searches, too, but it seems best not to put any extra updates in what would -otherwise be a read-only operation (updating is not possible in hot standby -mode anyway). It would seem natural to complete the split in VACUUM, but since -splitting a bucket might require allocating a new page, it might fail if you -run out of disk space. That would be bad during VACUUM - the reason for -running VACUUM in the first place might be that you run out of disk space, -and now VACUUM won't finish because you're out of disk space. In contrast, -an insertion can require enlarging the physical file anyway. - -Deletion of tuples from a bucket is performed for two reasons: to remove dead -tuples, and to remove tuples that were moved by a bucket split. A WAL entry -is made for each bucket page from which tuples are removed, and then another -WAL entry is made when we clear the needs-split-cleanup flag. If dead tuples -are removed, a separate WAL entry is made to update the metapage. - -As deletion involves multiple atomic operations, it is quite possible that -system crashes after (a) removing tuples from some of the bucket pages, (b) -before clearing the garbage flag, or (c) before updating the metapage. If the -system crashes before completing (b), it will again try to clean the bucket -during next vacuum or insert after recovery which can have some performance -impact, but it will work fine. If the system crashes before completing (c), -after recovery there could be some additional splits until the next vacuum -updates the metapage, but the other operations like insert, delete and scan -will work correctly. We can fix this problem by actually updating the -metapage based on delete operation during replay, but it's not clear whether -it's worth the complication. - -A squeeze operation moves tuples from one of the buckets later in the chain to -one of the bucket earlier in chain and writes WAL record when either the -bucket to which it is writing tuples is filled or bucket from which it -is removing the tuples becomes empty. - -As a squeeze operation involves writing multiple atomic operations, it is -quite possible that the system crashes before completing the operation on -entire bucket. After recovery, the operations will work correctly, but -the index will remain bloated and this can impact performance of read and -insert operations until the next vacuum squeeze the bucket completely. +All the freespace operations should be called while holding no buffer +locks. Since they need no lmgr locks, deadlock is not possible. Other Notes ----------- -Clean up locks prevent a split from occurring while *another* process is stopped -in a given bucket. It also ensures that one of our *own* backend's scans is not -stopped in the bucket. +All the shenanigans with locking prevent a split occurring while *another* +process is stopped in a given bucket. They do not ensure that one of +our *own* backend's scans is not stopped in the bucket, because lmgr +doesn't consider a process's own locks to conflict. So the Split +algorithm must check for that case separately before deciding it can go +ahead with the split. VACUUM does not have this problem since nothing +else can be happening within the vacuuming backend. + +Should we instead try to fix the state of any conflicting local scan? +Seems mighty ugly --- got to move the held bucket S-lock as well as lots +of other messiness. For now, just punt and don't split. diff --git a/src/gausskernel/storage/access/hash/hash.cpp b/src/gausskernel/storage/access/hash/hash.cpp index f6dfff88d..44cd00890 100644 --- a/src/gausskernel/storage/access/hash/hash.cpp +++ b/src/gausskernel/storage/access/hash/hash.cpp @@ -3,8 +3,8 @@ * hash.cpp * Implementation of Margo Seltzer's Hashing package for postgres. * - * Portions Copyright (c) 2021 Huawei Technologies Co.,Ltd. - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -20,8 +20,6 @@ #include "knl/knl_variable.h" #include "access/hash.h" -#include "access/hash_xlog.h" -#include "access/xloginsert.h" #include "access/tableam.h" #include "access/relscan.h" #include "catalog/index.h" @@ -36,7 +34,6 @@ typedef struct { HSpool *spool; /* NULL if not using spooling */ double indtuples; /* # tuples accepted into index */ - Relation heapRel; /* heap relation descriptor */ } HashBuildState; static void hashbuildCallback(Relation index, HeapTuple htup, Datum *values, const bool *isnull, bool tupleIsAlive, @@ -55,7 +52,6 @@ Datum hashbuild(PG_FUNCTION_ARGS) double reltuples; double allvisfrac; uint32 num_buckets; - long sort_threshold; HashBuildState buildstate; /* @@ -70,7 +66,7 @@ Datum hashbuild(PG_FUNCTION_ARGS) estimate_rel_size(heap, NULL, &relpages, &reltuples, &allvisfrac, NULL); /* Initialize the hash index metadata page and initial buckets */ - num_buckets = _hash_init(index, reltuples, MAIN_FORKNUM); + num_buckets = _hash_metapinit(index, reltuples, MAIN_FORKNUM); /* * If we just insert the tuples into the index in scan order, then * (assuming their hash codes are pretty random) there will be no locality @@ -78,38 +74,25 @@ Datum hashbuild(PG_FUNCTION_ARGS) * then we'll thrash horribly. To prevent that scenario, we can sort the * tuples by (expected) bucket number. However, such a sort is useless * overhead when the index does fit in RAM. We choose to sort if the - * initial index size exceeds maintenance_work_mem, or the number of - * buffers usable for the index, whichever is less. (Limiting by the - * number of buffers should reduce thrashing between PG buffers and kernel - * buffers, which seems useful even if no physical I/O results. Limiting - * by maintenance_work_mem is useful to allow easy testing of the sort - * code path, and may be useful to DBAs as an additional control knob.) + * initial index size exceeds NBuffers. * * NOTE: this test will need adjustment if a bucket is ever different from - * one page. Also, "initial index size" accounting does not include the - * metapage, nor the first bitmap page. + * one page. */ - sort_threshold = (u_sess->attr.attr_memory.maintenance_work_mem * 1024L) / BLCKSZ; - if (index->rd_rel->relpersistence != RELPERSISTENCE_TEMP) - sort_threshold = Min(sort_threshold, g_instance.attr.attr_storage.NBuffers); - else - sort_threshold = Min(sort_threshold, u_sess->storage_cxt.NLocBuffer); - - if (num_buckets >= (uint32)sort_threshold) - buildstate.spool = _h_spoolinit(heap, index, num_buckets, &indexInfo->ii_desc); + if (num_buckets >= (uint32)g_instance.attr.attr_storage.NBuffers) + buildstate.spool = _h_spoolinit(index, num_buckets, &indexInfo->ii_desc); else buildstate.spool = NULL; /* prepare to build the index */ buildstate.indtuples = 0; - buildstate.heapRel = heap; /* do the heap scan */ reltuples = tableam_index_build_scan(heap, index, indexInfo, true, hashbuildCallback, (void*)&buildstate, NULL); if (buildstate.spool != NULL) { /* sort the tuples and insert them into the index */ - _h_indexbuild(buildstate.spool, buildstate.heapRel); + _h_indexbuild(buildstate.spool); _h_spooldestroy(buildstate.spool); } @@ -131,7 +114,7 @@ Datum hashbuildempty(PG_FUNCTION_ARGS) { Relation index = (Relation)PG_GETARG_POINTER(0); - _hash_init(index, 0, INIT_FORKNUM); + _hash_metapinit(index, 0, INIT_FORKNUM); PG_RETURN_VOID(); } @@ -143,24 +126,21 @@ static void hashbuildCallback(Relation index, HeapTuple htup, Datum *values, con void *state) { HashBuildState *buildstate = (HashBuildState *)state; - Datum index_values[1]; - bool index_isnull[1]; IndexTuple itup; - /* convert data to a hash key; on failure, do not insert anything */ - if (!_hash_convert_tuple(index, - values, isnull, - index_values, index_isnull)) + /* Hash indexes don't index nulls, see notes in hashinsert */ + if (isnull[0]) { return; + } /* Either spool the tuple for sorting, or just put it into the index */ if (buildstate->spool != NULL) { - _h_spool(buildstate->spool, &htup->t_self, index_values, index_isnull); + _h_spool(buildstate->spool, &htup->t_self, values, isnull); } else { /* form an index tuple and point it at the heap tuple */ - itup = index_form_tuple(RelationGetDescr(index), index_values, index_isnull); + itup = _hash_form_tuple(index, values, isnull); itup->t_tid = htup->t_self; - _hash_doinsert(index, itup, buildstate->heapRel); + _hash_doinsert(index, itup); pfree(itup); } @@ -179,22 +159,30 @@ Datum hashinsert(PG_FUNCTION_ARGS) Datum *values = (Datum *)PG_GETARG_POINTER(1); bool *isnull = (bool *)PG_GETARG_POINTER(2); ItemPointer ht_ctid = (ItemPointer)PG_GETARG_POINTER(3); + +#ifdef NOT_USED Relation heapRel = (Relation)PG_GETARG_POINTER(4); - Datum index_values[1]; - bool index_isnull[1]; + IndexUniqueCheck checkUnique = (IndexUniqueCheck)PG_GETARG_INT32(5); +#endif IndexTuple itup; - /* convert data to a hash key; on failure, do not insert anything */ - if (!_hash_convert_tuple(rel, - values, isnull, - index_values, index_isnull)) - return false; + /* + * If the single index key is null, we don't insert it into the index. + * Hash tables support scans on '='. Relational algebra says that A = B + * returns null if either A or B is null. This means that no + * qualification used in an index scan could ever return true on a null + * attribute. It also means that indices can't be used by ISNULL or + * NOTNULL scans, but that's an artifact of the strategy map architecture + * chosen in 1986, not of the way nulls are handled here. + */ + if (isnull[0]) + PG_RETURN_BOOL(false); - /* form an index tuple and point it at the heap tuple */ - itup = index_form_tuple(RelationGetDescr(rel), index_values, index_isnull); + /* generate an index tuple */ + itup = _hash_form_tuple(rel, values, isnull); itup->t_tid = *ht_ctid; - _hash_doinsert(rel, itup, heapRel); + _hash_doinsert(rel, itup); pfree(itup); @@ -224,7 +212,7 @@ Datum hashgettuple(PG_FUNCTION_ARGS) * Reacquire the read lock here. */ if (BufferIsValid(so->hashso_curbuf)) - LockBuffer(so->hashso_curbuf, BUFFER_LOCK_SHARE); + _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ); /* * If we've already initialized this scan, we can just advance it in the @@ -236,21 +224,16 @@ Datum hashgettuple(PG_FUNCTION_ARGS) /* * An insertion into the current index page could have happened while * we didn't have read lock on it. Re-find our position by looking - * for the TID we previously returned. (Because we hold a pin on the - * primary bucket page, no deletions or splits could have occurred; - * therefore we can expect that the TID still exists in the current - * index page, at an offset >= where we were.) + * for the TID we previously returned. (Because we hold share lock on + * the bucket, no deletions or splits could have occurred; therefore + * we can expect that the TID still exists in the current index page, + * at an offset >= where we were.) */ OffsetNumber maxoffnum; buf = so->hashso_curbuf; Assert(BufferIsValid(buf)); page = BufferGetPage(buf); - - /* - * We don't need test for old snapshot here as the current buffer is - * pinned, so vacuum can't clean the page. - */ maxoffnum = PageGetMaxOffsetNumber(page); for (offnum = ItemPointerGetOffsetNumber(current); offnum <= maxoffnum; offnum = OffsetNumberNext(offnum)) { IndexTuple itup; @@ -270,22 +253,14 @@ Datum hashgettuple(PG_FUNCTION_ARGS) */ if (scan->kill_prior_tuple) { /* - * Yes, so remember it for later. (We'll deal with all such tuples - * at once right after leaving the index page or at end of scan.) - * In case if caller reverses the indexscan direction it is quite - * possible that the same item might get entered multiple times. - * But, we don't detect that; instead, we just forget any excess - * entries. + * Yes, so mark it by setting the LP_DEAD state in the item flags. */ - if (so->killedItems == NULL) - so->killedItems = (HashScanPosItem *)palloc(MaxIndexTuplesPerPage * sizeof(HashScanPosItem)); + ItemIdMarkDead(PageGetItemId(page, offnum)); - if (so->numKilled < MaxIndexTuplesPerPage) { - so->killedItems[so->numKilled].heapTid = so->hashso_heappos; - so->killedItems[so->numKilled].indexOffset = - ItemPointerGetOffsetNumber(&(so->hashso_curpos)); - so->numKilled++; - } + /* + * Since this can be redone later if needed, mark as a hint. + */ + MarkBufferDirtyHint(buf, true); } /* @@ -310,7 +285,7 @@ Datum hashgettuple(PG_FUNCTION_ARGS) /* Release read lock on current buffer, but keep it pinned */ if (BufferIsValid(so->hashso_curbuf)) - LockBuffer(so->hashso_curbuf, BUFFER_LOCK_UNLOCK); + _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_READ, HASH_NOLOCK); /* Return current heap TID on success */ scan->xs_ctup.t_self = so->hashso_heappos; @@ -385,21 +360,18 @@ Datum hashbeginscan(PG_FUNCTION_ARGS) scan = RelationGetIndexScan(rel, nkeys, norderbys); so = (HashScanOpaque)palloc(sizeof(HashScanOpaqueData)); + so->hashso_bucket_valid = false; + so->hashso_bucket_blkno = 0; so->hashso_curbuf = InvalidBuffer; - so->hashso_bucket_buf = InvalidBuffer; - so->hashso_split_bucket_buf = InvalidBuffer; /* set position invalid (this will cause _hash_first call) */ ItemPointerSetInvalid(&(so->hashso_curpos)); ItemPointerSetInvalid(&(so->hashso_heappos)); - so->hashso_buc_populated = false; - so->hashso_buc_split = false; - - so->killedItems = NULL; - so->numKilled = 0; - scan->opaque = so; + /* register scan in case we change pages it's using */ + _hash_regscan(scan); + PG_RETURN_POINTER(scan); } @@ -416,13 +388,14 @@ Datum hashrescan(PG_FUNCTION_ARGS) Relation rel = scan->indexRelation; /* release any pin we still hold */ - if (so->numKilled > 0) { - LockBuffer(so->hashso_curbuf, BUFFER_LOCK_SHARE); - _hash_kill_items(scan); - LockBuffer(so->hashso_curbuf, BUFFER_LOCK_UNLOCK); - } + if (BufferIsValid(so->hashso_curbuf)) + _hash_dropbuf(rel, so->hashso_curbuf); + so->hashso_curbuf = InvalidBuffer; - _hash_dropscanbuf(rel, so); + /* release lock on bucket, too */ + if (so->hashso_bucket_blkno) + _hash_droplock(rel, so->hashso_bucket_blkno, HASH_SHARE); + so->hashso_bucket_blkno = 0; /* set position invalid (this will cause _hash_first call) */ ItemPointerSetInvalid(&(so->hashso_curpos)); @@ -434,10 +407,9 @@ Datum hashrescan(PG_FUNCTION_ARGS) rc = memmove_s(scan->keyData, (unsigned)scan->numberOfKeys * sizeof(ScanKeyData), scankey, (unsigned)scan->numberOfKeys * sizeof(ScanKeyData)); securec_check(rc, "", ""); - } - so->hashso_buc_populated = false; - so->hashso_buc_split = false; + so->hashso_bucket_valid = false; + } PG_RETURN_VOID(); } @@ -451,20 +423,18 @@ Datum hashendscan(PG_FUNCTION_ARGS) HashScanOpaque so = (HashScanOpaque)scan->opaque; Relation rel = scan->indexRelation; - /* - * Before leaving current page, deal with any killed items. Also, ensure - * that we acquire lock on current page before calling _hash_kill_items. - */ - if (so->numKilled > 0) { - LockBuffer(so->hashso_curbuf, BUFFER_LOCK_SHARE); - _hash_kill_items(scan); - LockBuffer(so->hashso_curbuf, BUFFER_LOCK_UNLOCK); - } + /* don't need scan registered anymore */ + _hash_dropscan(scan); - _hash_dropscanbuf(rel, so); + /* release any pin we still hold */ + if (BufferIsValid(so->hashso_curbuf)) + _hash_dropbuf(rel, so->hashso_curbuf); + so->hashso_curbuf = InvalidBuffer; - if (so->killedItems != NULL) - pfree(so->killedItems); + /* release lock on bucket, too */ + if (so->hashso_bucket_blkno) + _hash_droplock(rel, so->hashso_bucket_blkno, HASH_SHARE); + so->hashso_bucket_blkno = 0; pfree(so); scan->opaque = NULL; @@ -495,9 +465,6 @@ Datum hashrestrpos(PG_FUNCTION_ARGS) * The set of target tuples is specified via a callback routine that tells * whether any given heap tuple (identified by ItemPointer) is being deleted. * - * This function also deletes the tuples that are moved by split to other - * bucket. - * * Result: a palloc'd struct containing statistical info for VACUUM displays. */ Datum hashbulkdelete(PG_FUNCTION_ARGS) @@ -513,24 +480,29 @@ Datum hashbulkdelete(PG_FUNCTION_ARGS) Bucket orig_maxbucket; Bucket cur_maxbucket; Bucket cur_bucket; - Buffer metabuf = InvalidBuffer; + Buffer metabuf; HashMetaPage metap; - HashMetaPage cachedmetap; + HashMetaPageData local_metapage; + errno_t rc; tuples_removed = 0; num_index_tuples = 0; /* - * We need a copy of the metapage so that we can use its hashm_spares[] - * values to compute bucket page addresses, but a cached copy should be - * good enough. (If not, we'll detect that further down and refresh the - * cache as necessary.) + * Read the metapage to fetch original bucket and tuple counts. Also, we + * keep a copy of the last-seen metapage so that we can use its + * hashm_spares[] values to compute bucket page addresses. This is a bit + * hokey but perfectly safe, since the interesting entries in the spares + * array cannot change under us; and it beats rereading the metapage for + * each bucket. */ - cachedmetap = _hash_getcachedmetap(rel, &metabuf, false); - Assert(cachedmetap != NULL); - - orig_maxbucket = cachedmetap->hashm_maxbucket; - orig_ntuples = cachedmetap->hashm_ntuples; + metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE); + metap = HashPageGetMeta(BufferGetPage(metabuf)); + orig_maxbucket = metap->hashm_maxbucket; + orig_ntuples = metap->hashm_ntuples; + rc = memcpy_s(&local_metapage, sizeof(local_metapage), metap, sizeof(local_metapage)); + securec_check(rc, "", ""); + _hash_relbuf(rel, metabuf); /* Scan the buckets that we know exist */ cur_bucket = 0; @@ -540,85 +512,90 @@ loop_top: while (cur_bucket <= cur_maxbucket) { BlockNumber bucket_blkno; BlockNumber blkno; - Buffer bucket_buf; - Buffer buf; - HashPageOpaque bucket_opaque; - Page page; - bool split_cleanup = false; + bool bucket_dirty = false; /* Get address of bucket's start page */ - bucket_blkno = BUCKET_TO_BLKNO(cachedmetap, cur_bucket); + bucket_blkno = BUCKET_TO_BLKNO(&local_metapage, cur_bucket); + /* Exclusive-lock the bucket so we can shrink it */ + _hash_getlock(rel, bucket_blkno, HASH_EXCLUSIVE); + + /* Shouldn't have any active scans locally, either */ + if (_hash_has_active_scan(rel, cur_bucket)) + ereport(ERROR, + (errcode(ERRCODE_SQL_ROUTINE_EXCEPTION), (errmsg("hash index has active scan during VACUUM.")))); + + /* Scan each page in bucket */ blkno = bucket_blkno; + while (BlockNumberIsValid(blkno)) { + Buffer buf; + Page page; + HashPageOpaque opaque; + OffsetNumber offno; + OffsetNumber maxoffno; + OffsetNumber deletable[MaxOffsetNumber]; + int ndeletable = 0; - /* - * We need to acquire a cleanup lock on the primary bucket page to out - * wait concurrent scans before deleting the dead tuples. - */ - buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, info->strategy); - LockBufferForCleanup(buf); - _hash_checkpage(rel, buf, LH_BUCKET_PAGE); + vacuum_delay_point(); - page = BufferGetPage(buf); - bucket_opaque = (HashPageOpaque) PageGetSpecialPointer(page); + buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE, info->strategy); + page = BufferGetPage(buf); + opaque = (HashPageOpaque)PageGetSpecialPointer(page); + Assert(opaque->hasho_bucket == cur_bucket); - /* - * If the bucket contains tuples that are moved by split, then we need - * to delete such tuples. We can't delete such tuples if the split - * operation on bucket is not finished as those are needed by scans. - */ - if (!H_BUCKET_BEING_SPLIT(bucket_opaque) && H_NEEDS_SPLIT_CLEANUP(bucket_opaque)) { - split_cleanup = true; + /* Scan each tuple in page */ + maxoffno = PageGetMaxOffsetNumber(page); + for (offno = FirstOffsetNumber; offno <= maxoffno; offno = OffsetNumberNext(offno)) { + IndexTuple itup; + ItemPointer htup; + + itup = (IndexTuple)PageGetItem(page, PageGetItemId(page, offno)); + htup = &(itup->t_tid); + if (callback(htup, callback_state, InvalidOid, InvalidBktId)) { + /* mark the item for deletion */ + deletable[ndeletable++] = offno; + tuples_removed += 1; + } else + num_index_tuples += 1; + } /* - * This bucket might have been split since we last held a lock on - * the metapage. If so, hashm_maxbucket, hashm_highmask and - * hashm_lowmask might be old enough to cause us to fail to remove - * tuples left behind by the most recent split. To prevent that, - * now that the primary page of the target bucket has been locked - * (and thus can't be further split), check whether we need to - * update our cached metapage data. + * Apply deletions and write page if needed, advance to next page. */ - Assert(bucket_opaque->hasho_prevblkno != InvalidBlockNumber); - if (bucket_opaque->hasho_prevblkno > cachedmetap->hashm_maxbucket) { - cachedmetap = _hash_getcachedmetap(rel, &metabuf, true); - Assert(cachedmetap != NULL); - } + blkno = opaque->hasho_nextblkno; + + if (ndeletable > 0) { + PageIndexMultiDelete(page, deletable, ndeletable); + _hash_wrtbuf(rel, buf); + bucket_dirty = true; + } else + _hash_relbuf(rel, buf); } - bucket_buf = buf; + /* If we deleted anything, try to compact free space */ + if (bucket_dirty) + _hash_squeezebucket(rel, cur_bucket, bucket_blkno, info->strategy); - hashbucketcleanup(rel, cur_bucket, bucket_buf, blkno, info->strategy, - cachedmetap->hashm_maxbucket, - cachedmetap->hashm_highmask, - cachedmetap->hashm_lowmask, &tuples_removed, - &num_index_tuples, split_cleanup, - callback, callback_state); - - _hash_dropbuf(rel, bucket_buf); + /* Release bucket lock */ + _hash_droplock(rel, bucket_blkno, HASH_EXCLUSIVE); /* Advance to next bucket */ cur_bucket++; } - if (BufferIsInvalid(metabuf)) - metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_NOLOCK, LH_META_PAGE); - /* Write-lock metapage and check for split since we started */ - LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); + metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE, LH_META_PAGE); metap = HashPageGetMeta(BufferGetPage(metabuf)); - if (cur_maxbucket != metap->hashm_maxbucket) { /* There's been a split, so process the additional bucket(s) */ - LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); - cachedmetap = _hash_getcachedmetap(rel, &metabuf, true); - Assert(cachedmetap != NULL); - cur_maxbucket = cachedmetap->hashm_maxbucket; + cur_maxbucket = metap->hashm_maxbucket; + rc = memcpy_s(&local_metapage, sizeof(local_metapage), metap, sizeof(local_metapage)); + securec_check(rc, "", ""); + _hash_relbuf(rel, metabuf); goto loop_top; } /* Okay, we're really done. Update tuple count in metapage. */ - START_CRIT_SECTION(); if (orig_maxbucket == metap->hashm_maxbucket && orig_ntuples == metap->hashm_ntuples) { /* * No one has split or inserted anything since start of scan, so @@ -639,27 +616,7 @@ loop_top: num_index_tuples = metap->hashm_ntuples; } - MarkBufferDirty(metabuf); - - /* XLOG stuff */ - if (RelationNeedsWAL(rel)) { - xl_hash_update_meta_page xlrec; - XLogRecPtr recptr; - - xlrec.ntuples = metap->hashm_ntuples; - - XLogBeginInsert(); - XLogRegisterData((char *) &xlrec, SizeOfHashUpdateMetaPage); - - XLogRegisterBuffer(0, metabuf, REGBUF_STANDARD); - - recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_UPDATE_META_PAGE); - PageSetLSN(BufferGetPage(metabuf), recptr); - } - - END_CRIT_SECTION(); - - _hash_relbuf(rel, metabuf); + _hash_wrtbuf(rel, metabuf); /* return statistics */ if (stats == NULL) @@ -695,244 +652,9 @@ Datum hashvacuumcleanup(PG_FUNCTION_ARGS) PG_RETURN_POINTER(stats); } -/* - * Helper function to perform deletion of index entries from a bucket. - * - * This function expects that the caller has acquired a cleanup lock on the - * primary bucket page, and will return with a write lock again held on the - * primary bucket page. The lock won't necessarily be held continuously, - * though, because we'll release it when visiting overflow pages. - * - * It would be very bad if this function cleaned a page while some other - * backend was in the midst of scanning it, because hashgettuple assumes - * that the next valid TID will be greater than or equal to the current - * valid TID. There can't be any concurrent scans in progress when we first - * enter this function because of the cleanup lock we hold on the primary - * bucket page, but as soon as we release that lock, there might be. We - * handle that by conspiring to prevent those scans from passing our cleanup - * scan. To do that, we lock the next page in the bucket chain before - * releasing the lock on the previous page. (This type of lock chaining is - * not ideal, so we might want to look for a better solution at some point.) - * - * We need to retain a pin on the primary bucket to ensure that no concurrent - * split can start. - */ -void hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf, - BlockNumber bucket_blkno, BufferAccessStrategy bstrategy, - uint32 maxbucket, uint32 highmask, uint32 lowmask, - double *tuples_removed, double *num_index_tuples, - bool split_cleanup, - IndexBulkDeleteCallback callback, void *callback_state) +void hash_redo(XLogReaderState *record) { - BlockNumber blkno; - Buffer buf; - Bucket new_bucket PG_USED_FOR_ASSERTS_ONLY = InvalidBucket; - bool bucket_dirty = false; - - blkno = bucket_blkno; - buf = bucket_buf; - - if (split_cleanup) - new_bucket = _hash_get_newbucket_from_oldbucket(rel, cur_bucket, - lowmask, maxbucket); - - /* Scan each page in bucket */ - for (;;) { - HashPageOpaque opaque; - OffsetNumber offno; - OffsetNumber maxoffno; - Buffer next_buf; - Page page; - OffsetNumber deletable[MaxOffsetNumber]; - int ndeletable = 0; - bool retain_pin = false; - bool clear_dead_marking = false; - - vacuum_delay_point(); - - page = BufferGetPage(buf); - opaque = (HashPageOpaque) PageGetSpecialPointer(page); - - /* Scan each tuple in page */ - maxoffno = PageGetMaxOffsetNumber(page); - for (offno = FirstOffsetNumber; offno <= maxoffno; offno = OffsetNumberNext(offno)) { - ItemPointer htup; - IndexTuple itup; - Bucket bucket; - bool kill_tuple = false; - - itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offno)); - htup = &(itup->t_tid); - - /* - * To remove the dead tuples, we strictly want to rely on results - * of callback function. refer btvacuumpage for detailed reason. - */ - if (callback && callback(htup, callback_state, InvalidOid, InvalidBktId)) { - kill_tuple = true; - if (tuples_removed) - *tuples_removed += 1; - } else if (split_cleanup) { - /* delete the tuples that are moved by split. */ - bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup), - maxbucket, highmask, lowmask); - /* mark the item for deletion */ - if (bucket != cur_bucket) { - /* - * We expect tuples to either belong to current bucket or - * new_bucket. This is ensured because we don't allow - * further splits from bucket that contains garbage. See - * comments in _hash_expandtable. - */ - Assert(bucket == new_bucket); - kill_tuple = true; - } - } - - if (kill_tuple) { - /* mark the item for deletion */ - deletable[ndeletable++] = offno; - } else { - /* we're keeping it, so count it */ - if (num_index_tuples) - *num_index_tuples += 1; - } - } - - /* retain the pin on primary bucket page till end of bucket scan */ - if (blkno == bucket_blkno) - retain_pin = true; - else - retain_pin = false; - - blkno = opaque->hasho_nextblkno; - - /* - * Apply deletions, advance to next page and write page if needed. - */ - if (ndeletable > 0) { - /* No ereport(ERROR) until changes are logged */ - START_CRIT_SECTION(); - - PageIndexMultiDelete(page, deletable, ndeletable); - bucket_dirty = true; - - /* - * Let us mark the page as clean if vacuum removes the DEAD tuples - * from an index page. We do this by clearing - * LH_PAGE_HAS_DEAD_TUPLES flag. - */ - if (tuples_removed && *tuples_removed > 0 && H_HAS_DEAD_TUPLES(opaque)) { - opaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES; - clear_dead_marking = true; - } - - MarkBufferDirty(buf); - - /* XLOG stuff */ - if (RelationNeedsWAL(rel)) { - xl_hash_delete xlrec; - XLogRecPtr recptr; - - xlrec.clear_dead_marking = clear_dead_marking; - xlrec.is_primary_bucket_page = (buf == bucket_buf) ? true : false; - - XLogBeginInsert(); - XLogRegisterData((char *) &xlrec, SizeOfHashDelete); - - /* - * bucket buffer needs to be registered to ensure that we can - * acquire a cleanup lock on it during replay. - */ - if (!xlrec.is_primary_bucket_page) { - XLogRegisterBuffer(0, bucket_buf, REGBUF_STANDARD | REGBUF_NO_IMAGE); - } - - XLogRegisterBuffer(1, buf, REGBUF_STANDARD); - XLogRegisterBufData(1, (char *) deletable, ndeletable * sizeof(OffsetNumber)); - - recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_DELETE); - if (!xlrec.is_primary_bucket_page) { - PageSetLSN(BufferGetPage(bucket_buf), recptr); - } - PageSetLSN(BufferGetPage(buf), recptr); - } - - END_CRIT_SECTION(); - } - - /* bail out if there are no more pages to scan. */ - if (!BlockNumberIsValid(blkno)) - break; - - next_buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE, - LH_OVERFLOW_PAGE, - bstrategy); - - /* - * release the lock on previous page after acquiring the lock on next - * page - */ - if (retain_pin) - LockBuffer(buf, BUFFER_LOCK_UNLOCK); - else - _hash_relbuf(rel, buf); - - buf = next_buf; - } - - /* - * lock the bucket page to clear the garbage flag and squeeze the bucket. - * if the current buffer is same as bucket buffer, then we already have - * lock on bucket page. - */ - if (buf != bucket_buf) { - _hash_relbuf(rel, buf); - LockBuffer(bucket_buf, BUFFER_LOCK_EXCLUSIVE); - } - - /* - * Clear the garbage flag from bucket after deleting the tuples that are - * moved by split. We purposefully clear the flag before squeeze bucket, - * so that after restart, vacuum shouldn't again try to delete the moved - * by split tuples. - */ - if (split_cleanup) { - HashPageOpaque bucket_opaque; - Page page; - - page = BufferGetPage(bucket_buf); - bucket_opaque = (HashPageOpaque) PageGetSpecialPointer(page); - - /* No ereport(ERROR) until changes are logged */ - START_CRIT_SECTION(); - - bucket_opaque->hasho_flag &= ~LH_BUCKET_NEEDS_SPLIT_CLEANUP; - MarkBufferDirty(bucket_buf); - - /* XLOG stuff */ - if (RelationNeedsWAL(rel)) { - XLogRecPtr recptr; - - XLogBeginInsert(); - XLogRegisterBuffer(0, bucket_buf, REGBUF_STANDARD); - - recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_CLEANUP); - PageSetLSN(page, recptr); - } - - END_CRIT_SECTION(); - } - - /* - * If we have deleted anything, try to compact free space. For squeezing - * the bucket, we must have a cleanup lock, else it can impact the - * ordering of tuples for a scan that has started before it. - */ - if (bucket_dirty && IsBufferCleanupOK(bucket_buf)) - _hash_squeezebucket(rel, cur_bucket, bucket_blkno, bucket_buf, bstrategy); - else - LockBuffer(bucket_buf, BUFFER_LOCK_UNLOCK); + ereport(PANIC, (errmsg("hash_redo: unimplemented"))); } Datum hashmerge(PG_FUNCTION_ARGS) diff --git a/src/gausskernel/storage/access/hash/hash_xlog.cpp b/src/gausskernel/storage/access/hash/hash_xlog.cpp deleted file mode 100644 index 9bf435b2b..000000000 --- a/src/gausskernel/storage/access/hash/hash_xlog.cpp +++ /dev/null @@ -1,861 +0,0 @@ -/* ------------------------------------------------------------------------- - * - * hash_xlog.cpp - * WAL replay logic for hash index. - * - * Portions Copyright (c) 2021 Huawei Technologies Co.,Ltd. - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * IDENTIFICATION - * src/gausskernel/storage/access/hash/hash_xlog.cpp - * - * ------------------------------------------------------------------------- - */ - -#include "access/xlogproc.h" -#include "access/hash.h" -#include "access/hash_xlog.h" -#include "access/xlogutils.h" -#include "access/xlog.h" -#include "access/transam.h" -#include "access/xlogproc.h" -#include "storage/procarray.h" -#include "miscadmin.h" - -/* - * replay a hash index meta page - */ -static void hash_xlog_init_meta_page(XLogReaderState *record) -{ - RedoBufferInfo metabuf; - ForkNumber forknum; - - /* create the index' metapage */ - XLogInitBufferForRedo(record, 0, &metabuf); - Assert(BufferIsValid(metabuf.buf)); - HashRedoInitMetaPageOperatorPage(&metabuf, XLogRecGetData(record)); - MarkBufferDirty(metabuf.buf); - - /* - * Force the on-disk state of init forks to always be in sync with the - * state in shared buffers. See XLogReadBufferForRedoExtended. We need - * special handling for init forks as create index operations don't log a - * full page image of the metapage. - */ - XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL); - if (forknum == INIT_FORKNUM) - FlushOneBuffer(metabuf.buf); - - /* all done */ - UnlockReleaseBuffer(metabuf.buf); -} - -/* - * replay a hash index bitmap page - */ -static void hash_xlog_init_bitmap_page(XLogReaderState *record) -{ - RedoBufferInfo bitmapbuf; - RedoBufferInfo metabuf; - ForkNumber forknum; - - /* - * Initialize bitmap page - */ - XLogInitBufferForRedo(record, 0, &bitmapbuf); - HashRedoInitBitmapPageOperatorBitmapPage(&bitmapbuf, XLogRecGetData(record)); - MarkBufferDirty(bitmapbuf.buf); - - /* - * Force the on-disk state of init forks to always be in sync with the - * state in shared buffers. See XLogReadBufferForRedoExtended. We need - * special handling for init forks as create index operations don't log a - * full page image of the metapage. - */ - XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL); - if (forknum == INIT_FORKNUM) - FlushOneBuffer(bitmapbuf.buf); - UnlockReleaseBuffer(bitmapbuf.buf); - - /* add the new bitmap page to the metapage's list of bitmaps */ - if (XLogReadBufferForRedo(record, 1, &metabuf) == BLK_NEEDS_REDO) { - /* - * Note: in normal operation, we'd update the metapage while still - * holding lock on the bitmap page. But during replay it's not - * necessary to hold that lock, since nobody can see it yet; the - * creating transaction hasn't yet committed. - */ - HashRedoInitBitmapPageOperatorMetaPage(&metabuf); - MarkBufferDirty(metabuf.buf); - - XLogRecGetBlockTag(record, 1, NULL, &forknum, NULL); - if (forknum == INIT_FORKNUM) - FlushOneBuffer(metabuf.buf); - } - if (BufferIsValid(metabuf.buf)) - UnlockReleaseBuffer(metabuf.buf); -} - -/* - * replay a hash index insert without split - */ -static void hash_xlog_insert(XLogReaderState *record) -{ - RedoBufferInfo buffer; - RedoBufferInfo metabuf; - - if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO) { - Size datalen; - char *datapos = XLogRecGetBlockData(record, 0, &datalen); - - HashRedoInsertOperatorPage(&buffer, XLogRecGetData(record), datapos, datalen); - MarkBufferDirty(buffer.buf); - } - if (BufferIsValid(buffer.buf)) - UnlockReleaseBuffer(buffer.buf); - - if (XLogReadBufferForRedo(record, 1, &metabuf) == BLK_NEEDS_REDO) { - /* - * Note: in normal operation, we'd update the metapage while still - * holding lock on the page we inserted into. But during replay it's - * not necessary to hold that lock, since no other index updates can - * be happening concurrently. - */ - HashRedoInsertOperatorMetaPage(&metabuf); - MarkBufferDirty(metabuf.buf); - } - if (BufferIsValid(metabuf.buf)) - UnlockReleaseBuffer(metabuf.buf); -} - -/* - * replay addition of overflow page for hash index - */ -static void hash_xlog_add_ovfl_page(XLogReaderState* record) -{ - RedoBufferInfo leftbuf; - RedoBufferInfo ovflbuf; - RedoBufferInfo metabuf; - BlockNumber leftblk; - BlockNumber rightblk; - char *data = NULL; - Size datalen; - - XLogRecGetBlockTag(record, 0, NULL, NULL, &rightblk); - XLogRecGetBlockTag(record, 1, NULL, NULL, &leftblk); - - XLogInitBufferForRedo(record, 0, &ovflbuf); - Assert(BufferIsValid(ovflbuf.buf)); - - data = XLogRecGetBlockData(record, 0, &datalen); - HashRedoAddOvflPageOperatorOvflPage(&ovflbuf, leftblk, data, datalen); - MarkBufferDirty(ovflbuf.buf); - - if (XLogReadBufferForRedo(record, 1, &leftbuf) == BLK_NEEDS_REDO) { - HashRedoAddOvflPageOperatorLeftPage(&leftbuf, rightblk); - MarkBufferDirty(leftbuf.buf); - } - - if (BufferIsValid(leftbuf.buf)) - UnlockReleaseBuffer(leftbuf.buf); - UnlockReleaseBuffer(ovflbuf.buf); - - /* - * Note: in normal operation, we'd update the bitmap and meta page while - * still holding lock on the overflow pages. But during replay it's not - * necessary to hold those locks, since no other index updates can be - * happening concurrently. - */ - if (XLogRecHasBlockRef(record, 2)) { - RedoBufferInfo mapbuffer; - - if (XLogReadBufferForRedo(record, 2, &mapbuffer) == BLK_NEEDS_REDO) { - data = XLogRecGetBlockData(record, 2, &datalen); - - HashRedoAddOvflPageOperatorMapPage(&mapbuffer, data); - MarkBufferDirty(mapbuffer.buf); - } - if (BufferIsValid(mapbuffer.buf)) - UnlockReleaseBuffer(mapbuffer.buf); - } - - if (XLogRecHasBlockRef(record, 3)) { - RedoBufferInfo newmapbuf; - - XLogInitBufferForRedo(record, 3, &newmapbuf); - - HashRedoAddOvflPageOperatorNewmapPage(&newmapbuf, XLogRecGetData(record)); - MarkBufferDirty(newmapbuf.buf); - - UnlockReleaseBuffer(newmapbuf.buf); - } - - if (XLogReadBufferForRedo(record, 4, &metabuf) == BLK_NEEDS_REDO) { - data = XLogRecGetBlockData(record, 4, &datalen); - - HashRedoAddOvflPageOperatorMetaPage(&metabuf, XLogRecGetData(record), data, datalen); - MarkBufferDirty(metabuf.buf); - } - if (BufferIsValid(metabuf.buf)) - UnlockReleaseBuffer(metabuf.buf); -} - -/* - * replay allocation of page for split operation - */ -static void hash_xlog_split_allocate_page(XLogReaderState *record) -{ - RedoBufferInfo oldbuf; - RedoBufferInfo newbuf; - RedoBufferInfo metabuf; - Size datalen PG_USED_FOR_ASSERTS_ONLY; - char *data = NULL; - XLogRedoAction action; - - /* - * To be consistent with normal operation, here we take cleanup locks on - * both the old and new buckets even though there can't be any concurrent - * inserts. - */ - - /* replay the record for old bucket */ - action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &oldbuf); - - /* - * Note that we still update the page even if it was restored from a full - * page image, because the special space is not included in the image. - */ - if (action == BLK_NEEDS_REDO || action == BLK_RESTORED) { - HashRedoSplitAllocatePageOperatorObukPage(&oldbuf, XLogRecGetData(record)); - MarkBufferDirty(oldbuf.buf); - } - - /* replay the record for new bucket */ - XLogInitBufferForRedo(record, 1, &newbuf); - HashRedoSplitAllocatePageOperatorNbukPage(&newbuf, XLogRecGetData(record)); - if (!IsBufferCleanupOK(newbuf.buf)) - elog(PANIC, "hash_xlog_split_allocate_page: failed to acquire cleanup lock"); - MarkBufferDirty(newbuf.buf); - - /* - * We can release the lock on old bucket early as well but doing here to - * consistent with normal operation. - */ - if (BufferIsValid(oldbuf.buf)) - UnlockReleaseBuffer(oldbuf.buf); - if (BufferIsValid(newbuf.buf)) - UnlockReleaseBuffer(newbuf.buf); - - /* - * Note: in normal operation, we'd update the meta page while still - * holding lock on the old and new bucket pages. But during replay it's - * not necessary to hold those locks, since no other bucket splits can be - * happening concurrently. - */ - - /* replay the record for metapage changes */ - if (XLogReadBufferForRedo(record, 2, &metabuf) == BLK_NEEDS_REDO) { - data = XLogRecGetBlockData(record, 2, &datalen); - - HashRedoSplitAllocatePageOperatorMetaPage(&metabuf, XLogRecGetData(record), data); - MarkBufferDirty(metabuf.buf); - } - - if (BufferIsValid(metabuf.buf)) - UnlockReleaseBuffer(metabuf.buf); -} - -/* - * replay of split operation - */ -static void hash_xlog_split_page(XLogReaderState *record) -{ - RedoBufferInfo buf; - - if (XLogReadBufferForRedo(record, 0, &buf) != BLK_RESTORED) - elog(ERROR, "Hash split record did not contain a full-page image"); - - if (BufferIsValid(buf.buf)) - UnlockReleaseBuffer(buf.buf); -} - -/* - * replay completion of split operation - */ -static void hash_xlog_split_complete(XLogReaderState *record) -{ - RedoBufferInfo oldbuf; - RedoBufferInfo newbuf; - XLogRedoAction action; - - /* replay the record for old bucket */ - action = XLogReadBufferForRedo(record, 0, &oldbuf); - - /* - * Note that we still update the page even if it was restored from a full - * page image, because the bucket flag is not included in the image. - */ - if (action == BLK_NEEDS_REDO || action == BLK_RESTORED) { - HashRedoSplitCompleteOperatorObukPage(&oldbuf, XLogRecGetData(record)); - MarkBufferDirty(oldbuf.buf); - } - if (BufferIsValid(oldbuf.buf)) - UnlockReleaseBuffer(oldbuf.buf); - - /* replay the record for new bucket */ - action = XLogReadBufferForRedo(record, 1, &newbuf); - - /* - * Note that we still update the page even if it was restored from a full - * page image, because the bucket flag is not included in the image. - */ - if (action == BLK_NEEDS_REDO || action == BLK_RESTORED) { - HashRedoSplitCompleteOperatorNbukPage(&newbuf, XLogRecGetData(record)); - MarkBufferDirty(newbuf.buf); - } - if (BufferIsValid(newbuf.buf)) - UnlockReleaseBuffer(newbuf.buf); -} - -/* - * replay move of page contents for squeeze operation of hash index - */ -static void hash_xlog_move_page_contents(XLogReaderState *record) -{ - XLogRecPtr lsn = record->EndRecPtr; - xl_hash_move_page_contents *xldata = (xl_hash_move_page_contents *) XLogRecGetData(record); - RedoBufferInfo bucketbuf; - RedoBufferInfo writebuf; - RedoBufferInfo deletebuf; - XLogRedoAction action; - - bucketbuf.buf = InvalidBuffer; - writebuf.buf = InvalidBuffer; - deletebuf.buf = InvalidBuffer; - - /* - * Ensure we have a cleanup lock on primary bucket page before we start - * with the actual replay operation. This is to ensure that neither a - * scan can start nor a scan can be already-in-progress during the replay - * of this operation. If we allow scans during this operation, then they - * can miss some records or show the same record multiple times. - */ - if (xldata->is_prim_bucket_same_wrt) { - action = XLogReadBufferForRedoExtended(record, 1, RBM_NORMAL, true, &writebuf); - } else { - /* - * we don't care for return value as the purpose of reading bucketbuf - * is to ensure a cleanup lock on primary bucket page. - */ - (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf); - - PageSetLSN(bucketbuf.pageinfo.page, lsn); - - action = XLogReadBufferForRedo(record, 1, &writebuf); - } - - /* replay the record for adding entries in overflow buffer */ - if (action == BLK_NEEDS_REDO) { - char *data = NULL; - Size datalen; - - data = XLogRecGetBlockData(record, 1, &datalen); - - HashXlogMoveAddPageOperatorPage(&writebuf, XLogRecGetData(record), (void *)data, datalen); - - MarkBufferDirty(writebuf.buf); - } - - /* replay the record for deleting entries from overflow buffer */ - if (XLogReadBufferForRedo(record, 2, &deletebuf) == BLK_NEEDS_REDO) { - char *ptr = NULL; - Size len; - - ptr = XLogRecGetBlockData(record, 2, &len); - - HashXlogMoveDeleteOvflPageOperatorPage(&deletebuf, (void *)ptr, len); - - MarkBufferDirty(deletebuf.buf); - } - - /* - * Replay is complete, now we can release the buffers. We release locks at - * end of replay operation to ensure that we hold lock on primary bucket - * page till end of operation. We can optimize by releasing the lock on - * write buffer as soon as the operation for same is complete, if it is - * not same as primary bucket page, but that doesn't seem to be worth - * complicating the code. - */ - if (BufferIsValid(deletebuf.buf)) - UnlockReleaseBuffer(deletebuf.buf); - - if (BufferIsValid(writebuf.buf)) - UnlockReleaseBuffer(writebuf.buf); - - if (BufferIsValid(bucketbuf.buf)) - UnlockReleaseBuffer(bucketbuf.buf); -} - -/* - * replay squeeze page operation of hash index - */ -static void hash_xlog_squeeze_page(XLogReaderState *record) -{ - XLogRecPtr lsn = record->EndRecPtr; - xl_hash_squeeze_page *xldata = (xl_hash_squeeze_page *) XLogRecGetData(record); - RedoBufferInfo bucketbuf; - RedoBufferInfo writebuf; - RedoBufferInfo ovflbuf; - RedoBufferInfo prevbuf; - RedoBufferInfo mapbuf; - XLogRedoAction action; - - bucketbuf.buf = InvalidBuffer; - prevbuf.buf = InvalidBuffer; - - /* - * Ensure we have a cleanup lock on primary bucket page before we start - * with the actual replay operation. This is to ensure that neither a - * scan can start nor a scan can be already-in-progress during the replay - * of this operation. If we allow scans during this operation, then they - * can miss some records or show the same record multiple times. - */ - if (xldata->is_prim_bucket_same_wrt) { - action = XLogReadBufferForRedoExtended(record, 1, RBM_NORMAL, true, &writebuf); - } else { - /* - * we don't care for return value as the purpose of reading bucketbuf - * is to ensure a cleanup lock on primary bucket page. - */ - (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf); - - PageSetLSN(bucketbuf.pageinfo.page, lsn); - - action = XLogReadBufferForRedo(record, 1, &writebuf); - } - - /* replay the record for adding entries in overflow buffer */ - if (action == BLK_NEEDS_REDO) { - char *data = NULL; - Size datalen; - - data = XLogRecGetBlockData(record, 1, &datalen); - - HashXlogSqueezeAddPageOperatorPage(&writebuf, XLogRecGetData(record), (void *)data, datalen); - - MarkBufferDirty(writebuf.buf); - } - - /* replay the record for initializing overflow buffer */ - if (XLogReadBufferForRedo(record, 2, &ovflbuf) == BLK_NEEDS_REDO) { - HashXlogSqueezeInitOvflbufOperatorPage(&ovflbuf, XLogRecGetData(record)); - - MarkBufferDirty(ovflbuf.buf); - } - if (BufferIsValid(ovflbuf.buf)) - UnlockReleaseBuffer(ovflbuf.buf); - - /* replay the record for page previous to the freed overflow page */ - if (!xldata->is_prev_bucket_same_wrt && - XLogReadBufferForRedo(record, 3, &prevbuf) == BLK_NEEDS_REDO) { - HashXlogSqueezeUpdatePrevPageOperatorPage(&prevbuf, XLogRecGetData(record)); - - MarkBufferDirty(prevbuf.buf); - } - if (BufferIsValid(prevbuf.buf)) - UnlockReleaseBuffer(prevbuf.buf); - - /* replay the record for page next to the freed overflow page */ - if (XLogRecHasBlockRef(record, 4)) { - RedoBufferInfo nextbuf; - - if (XLogReadBufferForRedo(record, 4, &nextbuf) == BLK_NEEDS_REDO) { - HashXlogSqueezeUpdateNextPageOperatorPage(&nextbuf, XLogRecGetData(record)); - - MarkBufferDirty(nextbuf.buf); - } - if (BufferIsValid(nextbuf.buf)) - UnlockReleaseBuffer(nextbuf.buf); - } - - if (BufferIsValid(writebuf.buf)) - UnlockReleaseBuffer(writebuf.buf); - - if (BufferIsValid(bucketbuf.buf)) - UnlockReleaseBuffer(bucketbuf.buf); - - /* - * Note: in normal operation, we'd update the bitmap and meta page while - * still holding lock on the primary bucket page and overflow pages. But - * during replay it's not necessary to hold those locks, since no other - * index updates can be happening concurrently. - */ - /* replay the record for bitmap page */ - if (XLogReadBufferForRedo(record, 5, &mapbuf) == BLK_NEEDS_REDO) { - char *data = NULL; - Size datalen; - - data = XLogRecGetBlockData(record, 5, &datalen); - HashXlogSqueezeUpdateBitmapOperatorPage(&mapbuf, (void *)data); - - MarkBufferDirty(mapbuf.buf); - } - if (BufferIsValid(mapbuf.buf)) - UnlockReleaseBuffer(mapbuf.buf); - - /* replay the record for meta page */ - if (XLogRecHasBlockRef(record, 6)) { - RedoBufferInfo metabuf; - - if (XLogReadBufferForRedo(record, 6, &metabuf) == BLK_NEEDS_REDO) { - char *data = NULL; - Size datalen; - - data = XLogRecGetBlockData(record, 6, &datalen); - HashXlogSqueezeUpdateMateOperatorPage(&metabuf, (void *)data); - - MarkBufferDirty(metabuf.buf); - } - if (BufferIsValid(metabuf.buf)) - UnlockReleaseBuffer(metabuf.buf); - } -} - -/* - * replay delete operation of hash index - */ -static void hash_xlog_delete(XLogReaderState *record) -{ - XLogRecPtr lsn = record->EndRecPtr; - xl_hash_delete *xldata = (xl_hash_delete *) XLogRecGetData(record); - RedoBufferInfo bucketbuf; - RedoBufferInfo deletebuf; - XLogRedoAction action; - - bucketbuf.buf = InvalidBuffer; - - /* - * Ensure we have a cleanup lock on primary bucket page before we start - * with the actual replay operation. This is to ensure that neither a - * scan can start nor a scan can be already-in-progress during the replay - * of this operation. If we allow scans during this operation, then they - * can miss some records or show the same record multiple times. - */ - if (xldata->is_primary_bucket_page) { - action = XLogReadBufferForRedoExtended(record, 1, RBM_NORMAL, true, &deletebuf); - } else { - /* - * we don't care for return value as the purpose of reading bucketbuf - * is to ensure a cleanup lock on primary bucket page. - */ - (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf); - - PageSetLSN(bucketbuf.pageinfo.page, lsn); - - action = XLogReadBufferForRedo(record, 1, &deletebuf); - } - - /* replay the record for deleting entries in bucket page */ - if (action == BLK_NEEDS_REDO) { - char *ptr = NULL; - Size len; - - ptr = XLogRecGetBlockData(record, 1, &len); - - HashXlogDeleteBlockOperatorPage(&deletebuf, XLogRecGetData(record), (void *)ptr, len); - - MarkBufferDirty(deletebuf.buf); - } - if (BufferIsValid(deletebuf.buf)) - UnlockReleaseBuffer(deletebuf.buf); - - if (BufferIsValid(bucketbuf.buf)) - UnlockReleaseBuffer(bucketbuf.buf); -} - -/* - * replay split cleanup flag operation for primary bucket page. - */ -static void hash_xlog_split_cleanup(XLogReaderState *record) -{ - RedoBufferInfo buffer; - - if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO) { - HashXlogSplitCleanupOperatorPage(&buffer); - - MarkBufferDirty(buffer.buf); - } - if (BufferIsValid(buffer.buf)) - UnlockReleaseBuffer(buffer.buf); -} - -/* - * replay for update meta page - */ -static void hash_xlog_update_meta_page(XLogReaderState *record) -{ - RedoBufferInfo metabuf; - - if (XLogReadBufferForRedo(record, 0, &metabuf) == BLK_NEEDS_REDO) { - HashXlogUpdateMetaOperatorPage(&metabuf, XLogRecGetData(record)); - - MarkBufferDirty(metabuf.buf); - } - if (BufferIsValid(metabuf.buf)) - UnlockReleaseBuffer(metabuf.buf); -} - -/* - * Get the latestRemovedXid from the heap pages pointed at by the index - * tuples being deleted. See also btree_xlog_delete_get_latestRemovedXid, - * on which this function is based. - */ -static TransactionId hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record) -{ - xl_hash_vacuum_one_page *xlrec; - OffsetNumber *unused = NULL; - Buffer ibuffer; - Buffer hbuffer; - Page ipage; - Page hpage; - RelFileNode rnode; - BlockNumber blkno; - ItemId iitemid; - ItemId hitemid; - IndexTuple itup; - BlockNumber hblkno; - OffsetNumber hoffnum; - TransactionId latestRemovedXid = InvalidTransactionId; - int i; - - xlrec = (xl_hash_vacuum_one_page *) XLogRecGetData(record); - - /* - * If there's nothing running on the standby we don't need to derive a - * full latestRemovedXid value, so use a fast path out of here. This - * returns InvalidTransactionId, and so will conflict with all HS - * transactions; but since we just worked out that that's zero people, - * it's OK. - * - * XXX There is a race condition here, which is that a new backend might - * start just after we look. If so, it cannot need to conflict, but this - * coding will result in throwing a conflict anyway. - */ - if (CountDBBackends(InvalidOid) == 0) - return latestRemovedXid; - - /* - * Check if WAL replay has reached a consistent database state. If not, we - * must PANIC. See the definition of - * btree_xlog_delete_get_latestRemovedXid for more details. - */ - if (!t_thrd.xlog_cxt.reachedConsistency) - elog(PANIC, "hash_xlog_vacuum_get_latestRemovedXid: cannot operate with inconsistent data"); - - /* - * Get index page. If the DB is consistent, this should not fail, nor - * should any of the heap page fetches below. If one does, we return - * InvalidTransactionId to cancel all HS transactions. That's probably - * overkill, but it's safe, and certainly better than panicking here. - */ - XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno); - ibuffer = XLogReadBufferExtended(rnode, MAIN_FORKNUM, blkno, RBM_NORMAL, NULL); - - if (!BufferIsValid(ibuffer)) - return InvalidTransactionId; - LockBuffer(ibuffer, HASH_READ); - ipage = (Page) BufferGetPage(ibuffer); - - /* - * Loop through the deleted index items to obtain the TransactionId from - * the heap items they point to. - */ - unused = (OffsetNumber *) ((char *) xlrec + SizeOfHashVacuumOnePage); - - for (i = 0; i < xlrec->ntuples; i++) { - /* - * Identify the index tuple about to be deleted. - */ - iitemid = PageGetItemId(ipage, unused[i]); - itup = (IndexTuple) PageGetItem(ipage, iitemid); - - /* - * Locate the heap page that the index tuple points at - */ - hblkno = ItemPointerGetBlockNumber(&(itup->t_tid)); - hbuffer = XLogReadBufferExtended(xlrec->hnode, MAIN_FORKNUM, hblkno, RBM_NORMAL, NULL); - - if (!BufferIsValid(hbuffer)) { - UnlockReleaseBuffer(ibuffer); - return InvalidTransactionId; - } - LockBuffer(hbuffer, HASH_READ); - hpage = (Page) BufferGetPage(hbuffer); - - /* - * Look up the heap tuple header that the index tuple points at by - * using the heap node supplied with the xlrec. We can't use - * heap_fetch, since it uses ReadBuffer rather than XLogReadBuffer. - * Note that we are not looking at tuple data here, just headers. - */ - hoffnum = ItemPointerGetOffsetNumber(&(itup->t_tid)); - hitemid = PageGetItemId(hpage, hoffnum); - - /* - * Follow any redirections until we find something useful. - */ - while (ItemIdIsRedirected(hitemid)) { - hoffnum = ItemIdGetRedirect(hitemid); - hitemid = PageGetItemId(hpage, hoffnum); - CHECK_FOR_INTERRUPTS(); - } - - /* - * If the heap item has storage, then read the header and use that to - * set latestRemovedXid. - * - * Some LP_DEAD items may not be accessible, so we ignore them. - */ - if (ItemIdHasStorage(hitemid)) { - HeapTupleData tuple; - tuple.t_data = (HeapTupleHeader) PageGetItem(hpage, hitemid); - HeapTupleCopyBaseFromPage(&tuple, &hpage); - HeapTupleHeaderAdvanceLatestRemovedXid(&tuple, &latestRemovedXid); - } else if (ItemIdIsDead(hitemid)) { - /* - * Conjecture: if hitemid is dead then it had xids before the xids - * marked on LP_NORMAL items. So we just ignore this item and move - * onto the next, for the purposes of calculating - * latestRemovedxids. - */ - } else - Assert(!ItemIdIsUsed(hitemid)); - - UnlockReleaseBuffer(hbuffer); - } - - UnlockReleaseBuffer(ibuffer); - - /* - * If all heap tuples were LP_DEAD then we will be returning - * InvalidTransactionId here, which avoids conflicts. This matches - * existing logic which assumes that LP_DEAD tuples must already be older - * than the latestRemovedXid on the cleanup record that set them as - * LP_DEAD, hence must already have generated a conflict. - */ - return latestRemovedXid; -} - -/* - * replay delete operation in hash index to remove - * tuples marked as DEAD during index tuple insertion. - */ -static void hash_xlog_vacuum_one_page(XLogReaderState *record) -{ - RedoBufferInfo buffer; - RedoBufferInfo metabuf; - XLogRedoAction action; - - /* - * If we have any conflict processing to do, it must happen before we - * update the page. - * - * Hash index records that are marked as LP_DEAD and being removed during - * hash index tuple insertion can conflict with standby queries. You might - * think that vacuum records would conflict as well, but we've handled - * that already. XLOG_HEAP2_CLEANUP_INFO records provide the highest xid - * cleaned by the vacuum of the heap and so we can resolve any conflicts - * just once when that arrives. After that we know that no conflicts - * exist from individual hash index vacuum records on that index. - */ - if (InHotStandby) { - TransactionId latestRemovedXid = hash_xlog_vacuum_get_latestRemovedXid(record); - RelFileNode rnode; - - XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL); - ResolveRecoveryConflictWithSnapshot(latestRemovedXid, rnode); - } - - action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &buffer); - - if (action == BLK_NEEDS_REDO) { - Size len; - - len = XLogRecGetDataLen(record); - HashXlogVacuumOnePageOperatorPage(&buffer, XLogRecGetData(record), len); - - MarkBufferDirty(buffer.buf); - } - if (BufferIsValid(buffer.buf)) - UnlockReleaseBuffer(buffer.buf); - - if (XLogReadBufferForRedo(record, 1, &metabuf) == BLK_NEEDS_REDO) { - HashXlogVacuumMateOperatorPage(&metabuf, XLogRecGetData(record)); - MarkBufferDirty(metabuf.buf); - } - if (BufferIsValid(metabuf.buf)) - UnlockReleaseBuffer(metabuf.buf); -} - -void hash_redo(XLogReaderState *record) -{ - uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; - - switch (info) { - case XLOG_HASH_INIT_META_PAGE: - hash_xlog_init_meta_page(record); - break; - case XLOG_HASH_INIT_BITMAP_PAGE: - hash_xlog_init_bitmap_page(record); - break; - case XLOG_HASH_INSERT: - hash_xlog_insert(record); - break; - case XLOG_HASH_ADD_OVFL_PAGE: - hash_xlog_add_ovfl_page(record); - break; - case XLOG_HASH_SPLIT_ALLOCATE_PAGE: - hash_xlog_split_allocate_page(record); - break; - case XLOG_HASH_SPLIT_PAGE: - hash_xlog_split_page(record); - break; - case XLOG_HASH_SPLIT_COMPLETE: - hash_xlog_split_complete(record); - break; - case XLOG_HASH_MOVE_PAGE_CONTENTS: - hash_xlog_move_page_contents(record); - break; - case XLOG_HASH_SQUEEZE_PAGE: - hash_xlog_squeeze_page(record); - break; - case XLOG_HASH_DELETE: - hash_xlog_delete(record); - break; - case XLOG_HASH_SPLIT_CLEANUP: - hash_xlog_split_cleanup(record); - break; - case XLOG_HASH_UPDATE_META_PAGE: - hash_xlog_update_meta_page(record); - break; - case XLOG_HASH_VACUUM_ONE_PAGE: - hash_xlog_vacuum_one_page(record); - break; - default: - elog(PANIC, "hash_redo: unknown op code %u", info); - } -} - -bool IsHashVacuumPages(XLogReaderState *record) -{ - uint8 info = (XLogRecGetInfo(record) & (~XLR_INFO_MASK)); - - if (XLogRecGetRmid(record) == RM_HASH_ID) { - if (info == XLOG_HASH_DELETE) { - return true; - } - } - - return false; -} diff --git a/src/gausskernel/storage/access/hash/hashinsert.cpp b/src/gausskernel/storage/access/hash/hashinsert.cpp index 6c28075a2..60a87e67f 100644 --- a/src/gausskernel/storage/access/hash/hashinsert.cpp +++ b/src/gausskernel/storage/access/hash/hashinsert.cpp @@ -3,8 +3,8 @@ * hashinsert.cpp * Item insertion in hash tables for Postgres. * - * Portions Copyright (c) 2021 Huawei Technologies Co.,Ltd. - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -17,30 +17,21 @@ #include "knl/knl_variable.h" #include "access/hash.h" -#include "access/hash_xlog.h" -#include "access/heapam.h" -#include "access/xloginsert.h" -#include "miscadmin.h" #include "utils/rel.h" #include "utils/rel_gs.h" -#include "storage/lock/lwlock.h" -#include "storage/buf/buf_internals.h" - -static void _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, RelFileNode hnode); /* * _hash_doinsert() -- Handle insertion of a single index tuple. * - * This routine is called by the public interface routines, hashbuild - * and hashinsert. By here, itup is completely filled in. + * This routine is called by the public interface routines, hashbuild + * and hashinsert. By here, itup is completely filled in. */ -void _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel) +void _hash_doinsert(Relation rel, IndexTuple itup) { Buffer buf; - Buffer bucket_buf; Buffer metabuf; HashMetaPage metap; - HashMetaPage usedmetap = NULL; + BlockNumber blkno; Page metapage; Page page; HashPageOpaque pageopaque; @@ -48,7 +39,7 @@ void _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel) bool do_expand = false; uint32 hashkey; Bucket bucket; - OffsetNumber itup_off; + /* * Get the hash key for the item (it's stored in the index tuple itself). */ @@ -58,16 +49,16 @@ void _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel) itemsz = IndexTupleDSize(*itup); itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we * need to be consistent */ - -restart_insert: - /* - * Read the metapage. We don't lock it yet; HashMaxItemSize() will - * examine pd_pagesize_version, but that can't change so we can examine it - * without a lock. + * Acquire shared split lock so we can compute the target bucket safely + * (see README). */ - metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_NOLOCK, LH_META_PAGE); + _hash_getlock(rel, 0, HASH_SHARE); + + /* Read the metapage */ + metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE); metapage = BufferGetPage(metabuf); + metap = HashPageGetMeta(metapage); /* * Check whether the item can fit on a hash page at all. (Eventually, we @@ -82,154 +73,87 @@ restart_insert: (unsigned long)HashMaxItemSize(metapage)), errhint("Values larger than a buffer page cannot be indexed."))); - /* Lock the primary bucket page for the target bucket. */ - buf = _hash_getbucketbuf_from_hashkey(rel, hashkey, HASH_WRITE, &usedmetap); - Assert(usedmetap != NULL); + /* + * Compute the target bucket number, and convert to block number. + */ + bucket = _hash_hashkey2bucket(hashkey, metap->hashm_maxbucket, metap->hashm_highmask, metap->hashm_lowmask); - /* remember the primary bucket buffer to release the pin on it at end. */ - bucket_buf = buf; + blkno = BUCKET_TO_BLKNO(metap, bucket); - page = BufferGetPage(buf); - pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); - bucket = pageopaque->hasho_bucket; + /* release lock on metapage, but keep pin since we'll need it again */ + _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK); /* - * If this bucket is in the process of being split, try to finish the - * split before inserting, because that might create room for the - * insertion to proceed without allocating an additional overflow page. - * It's only interesting to finish the split if we're trying to insert - * into the bucket from which we're removing tuples (the "old" bucket), - * not if we're trying to insert into the bucket into which tuples are - * being moved (the "new" bucket). + * Acquire share lock on target bucket; then we can release split lock. */ - if (H_BUCKET_BEING_SPLIT(pageopaque) && IsBufferCleanupOK(buf)) { - /* release the lock on bucket buffer, before completing the split. */ - LockBuffer(buf, BUFFER_LOCK_UNLOCK); + _hash_getlock(rel, blkno, HASH_SHARE); - _hash_finish_split(rel, metabuf, buf, bucket, - usedmetap->hashm_maxbucket, - usedmetap->hashm_highmask, - usedmetap->hashm_lowmask); + _hash_droplock(rel, 0, HASH_SHARE); - /* release the pin on old and meta buffer. retry for insert. */ - _hash_dropbuf(rel, buf); - _hash_dropbuf(rel, metabuf); - goto restart_insert; - } + /* Fetch the primary bucket page for the bucket */ + buf = _hash_getbuf(rel, blkno, HASH_WRITE, LH_BUCKET_PAGE); + page = BufferGetPage(buf); + pageopaque = (HashPageOpaque)PageGetSpecialPointer(page); + Assert(pageopaque->hasho_bucket == bucket); /* Do the insertion */ while (PageGetFreeSpace(page) < itemsz) { - BlockNumber nextblkno; - - /* - * Check if current page has any DEAD tuples. If yes, delete these - * tuples and see if we can get a space for the new item to be - * inserted before moving to the next page in the bucket chain. - */ - if (H_HAS_DEAD_TUPLES(pageopaque)) { - if (IsBufferCleanupOK(buf)) { - _hash_vacuum_one_page(rel, metabuf, buf, heapRel->rd_node); - - if (PageGetFreeSpace(page) >= itemsz) - break; /* OK, now we have enough space */ - } - } - /* * no space on this page; check for an overflow page */ - nextblkno = pageopaque->hasho_nextblkno; + BlockNumber nextblkno = pageopaque->hasho_nextblkno; if (BlockNumberIsValid(nextblkno)) { /* * ovfl page exists; go get it. if it doesn't have room, we'll - * find out next pass through the loop test above. we always - * release both the lock and pin if this is an overflow page, but - * only the lock if this is the primary bucket page, since the pin - * on the primary bucket must be retained throughout the scan. + * find out next pass through the loop test above. */ - if (buf != bucket_buf) - _hash_relbuf(rel, buf); - else - LockBuffer(buf, BUFFER_LOCK_UNLOCK); + _hash_relbuf(rel, buf); buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE); page = BufferGetPage(buf); } else { /* * we're at the end of the bucket chain and we haven't found a * page with enough room. allocate a new overflow page. + * + * release our write lock without modifying buffer */ - - /* release our write lock without modifying buffer */ - LockBuffer(buf, BUFFER_LOCK_UNLOCK); + _hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK); /* chain to a new overflow page */ - buf = _hash_addovflpage(rel, metabuf, buf, (buf == bucket_buf) ? true : false); + buf = _hash_addovflpage(rel, metabuf, buf); page = BufferGetPage(buf); /* should fit now, given test above */ Assert(PageGetFreeSpace(page) >= itemsz); } - pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); - Assert((pageopaque->hasho_flag & LH_PAGE_TYPE) == LH_OVERFLOW_PAGE); + pageopaque = (HashPageOpaque)PageGetSpecialPointer(page); + Assert(pageopaque->hasho_flag == LH_OVERFLOW_PAGE); Assert(pageopaque->hasho_bucket == bucket); } + /* found page with enough space, so add the item here */ + (void)_hash_pgaddtup(rel, buf, itemsz, itup); + + /* write and release the modified page */ + _hash_wrtbuf(rel, buf); + + /* We can drop the bucket lock now */ + _hash_droplock(rel, blkno, HASH_SHARE); + /* * Write-lock the metapage so we can increment the tuple count. After * incrementing it, check to see if it's time for a split. */ - LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); + _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE); - /* Do the update. No ereport(ERROR) until changes are logged */ - START_CRIT_SECTION(); - - /* found page with enough space, so add the item here */ - itup_off = _hash_pgaddtup(rel, buf, itemsz, itup); - MarkBufferDirty(buf); - - /* metapage operations */ - metap = HashPageGetMeta(metapage); metap->hashm_ntuples += 1; /* Make sure this stays in sync with _hash_expandtable() */ do_expand = metap->hashm_ntuples > (double)metap->hashm_ffactor * (metap->hashm_maxbucket + 1); - MarkBufferDirty(metabuf); - - /* XLOG stuff */ - if (RelationNeedsWAL(rel)) { - xl_hash_insert xlrec; - XLogRecPtr recptr; - - xlrec.offnum = itup_off; - - XLogBeginInsert(); - XLogRegisterData((char *) &xlrec, SizeOfHashInsert); - - XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD); - - XLogRegisterBuffer(0, buf, REGBUF_STANDARD); - XLogRegisterBufData(0, (char *) itup, IndexTupleDSize(*itup)); - - recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INSERT); - - PageSetLSN(BufferGetPage(buf), recptr); - PageSetLSN(BufferGetPage(metabuf), recptr); - } - - END_CRIT_SECTION(); - - /* drop lock on metapage, but keep pin */ - LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); - - /* - * Release the modified page and ensure to release the pin on primary - * page. - */ - _hash_relbuf(rel, buf); - if (buf != bucket_buf) - _hash_dropbuf(rel, bucket_buf); + /* Write out the metapage and drop lock, but keep pin */ + _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK); /* Attempt to split if a split is needed */ if (do_expand) @@ -268,130 +192,3 @@ OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf, Size itemsize, IndexTuple return itup_off; } - -/* - * _hash_pgaddmultitup() -- add a tuple vector to a particular page in the index. - * - * This routine has same requirements for locking and tuple ordering as - * _hash_pgaddtup(). - * - * Returns the offset number array at which the tuples were inserted. - */ -void _hash_pgaddmultitup(Relation rel, Buffer buf, IndexTuple *itups, OffsetNumber *itup_offsets, uint16 nitups) -{ - OffsetNumber itup_off; - Page page; - uint32 hashkey; - int i; - - _hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE); - page = BufferGetPage(buf); - - for (i = 0; i < nitups; i++) { - Size itemsize; - - itemsize = IndexTupleDSize(*itups[i]); - itemsize = MAXALIGN(itemsize); - - /* Find where to insert the tuple (preserving page's hashkey ordering) */ - hashkey = _hash_get_indextuple_hashkey(itups[i]); - itup_off = _hash_binsearch(page, hashkey); - - itup_offsets[i] = itup_off; - - if (PageAddItem(page, (Item) itups[i], itemsize, itup_off, false, false) == InvalidOffsetNumber) - elog(ERROR, "failed to add index item to \"%s\"", RelationGetRelationName(rel)); - } -} - -/* - * _hash_vacuum_one_page - vacuum just one index page. - * - * Try to remove LP_DEAD items from the given page. We must acquire cleanup - * lock on the page being modified before calling this function. - */ - -static void _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, RelFileNode hnode) -{ - OffsetNumber deletable[MaxOffsetNumber]; - int ndeletable = 0; - OffsetNumber offnum; - OffsetNumber maxoff; - Page page = BufferGetPage(buf); - HashPageOpaque pageopaque; - HashMetaPage metap; - - /* Scan each tuple in page to see if it is marked as LP_DEAD */ - maxoff = PageGetMaxOffsetNumber(page); - for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { - ItemId itemId = PageGetItemId(page, offnum); - - if (ItemIdIsDead(itemId)) - deletable[ndeletable++] = offnum; - } - - if (ndeletable > 0) { - /* - * Write-lock the meta page so that we can decrement tuple count. - */ - LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); - - /* No ereport(ERROR) until changes are logged */ - START_CRIT_SECTION(); - - PageIndexMultiDelete(page, deletable, ndeletable); - - /* - * Mark the page as not containing any LP_DEAD items. This is not - * certainly true (there might be some that have recently been marked, - * but weren't included in our target-item list), but it will almost - * always be true and it doesn't seem worth an additional page scan to - * check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint - * anyway. - */ - pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); - pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES; - - metap = HashPageGetMeta(BufferGetPage(metabuf)); - metap->hashm_ntuples -= ndeletable; - - MarkBufferDirty(buf); - MarkBufferDirty(metabuf); - - /* XLOG stuff */ - if (RelationNeedsWAL(rel)) { - xl_hash_vacuum_one_page xlrec; - XLogRecPtr recptr; - - xlrec.hnode = hnode; - xlrec.ntuples = ndeletable; - - XLogBeginInsert(); - XLogRegisterBuffer(0, buf, REGBUF_STANDARD); - XLogRegisterData((char *) &xlrec, SizeOfHashVacuumOnePage); - - /* - * We need the target-offsets array whether or not we store the - * whole buffer, to allow us to find the latestRemovedXid on a - * standby server. - */ - XLogRegisterData((char *) deletable, - ndeletable * sizeof(OffsetNumber)); - - XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD); - - recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_VACUUM_ONE_PAGE); - - PageSetLSN(BufferGetPage(buf), recptr); - PageSetLSN(BufferGetPage(metabuf), recptr); - } - - END_CRIT_SECTION(); - - /* - * Releasing write lock on meta page as we have updated the tuple - * count. - */ - LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); - } -} diff --git a/src/gausskernel/storage/access/hash/hashovfl.cpp b/src/gausskernel/storage/access/hash/hashovfl.cpp index acaa4ee62..7356fbc08 100644 --- a/src/gausskernel/storage/access/hash/hashovfl.cpp +++ b/src/gausskernel/storage/access/hash/hashovfl.cpp @@ -3,8 +3,8 @@ * hashovfl.cpp * Overflow page management code for the openGauss hash access method * - * Portions Copyright (c) 2021 Huawei Technologies Co.,Ltd. - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -20,12 +20,10 @@ #include "knl/knl_variable.h" #include "access/hash.h" -#include "access/hash_xlog.h" -#include "access/xloginsert.h" -#include "miscadmin.h" #include "utils/rel.h" #include "utils/rel_gs.h" +static Buffer _hash_getovflpage(Relation rel, Buffer metabuf); static uint32 _hash_firstfreebit(uint32 map); /* @@ -48,13 +46,13 @@ static BlockNumber bitno_to_blkno(HashMetaPage metap, uint32 ovflbitnum) * Convert to absolute page number by adding the number of bucket pages * that exist before this split point. */ - return (BlockNumber) (_hash_get_totalbuckets(i) + ovflbitnum); + return (BlockNumber)(((uint32)1 << i) + ovflbitnum); } /* * Convert overflow page block number to bit number for free-page bitmap. */ -uint32 _hash_ovflblkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno) +static uint32 blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno) { uint32 splitnum = metap->hashm_ovflpoint; uint32 i; @@ -62,84 +60,54 @@ uint32 _hash_ovflblkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno) /* Determine the split number containing this page */ for (i = 1; i <= splitnum; i++) { - if (ovflblkno <= (BlockNumber) _hash_get_totalbuckets(i)) + if (ovflblkno <= (BlockNumber)((uint32)1 << i)) break; /* oops */ - bitnum = ovflblkno - _hash_get_totalbuckets(i); - - /* - * bitnum has to be greater than number of overflow page added in - * previous split point. The overflow page at this splitnum (i) if any - * should start from (_hash_get_totalbuckets(i) + - * metap->hashm_spares[i - 1] + 1). - */ - if (bitnum > metap->hashm_spares[i - 1] && bitnum <= metap->hashm_spares[i]) - return bitnum - 1; /* -1 to convert 1-based to 0-based */ + bitnum = ovflblkno - ((uint32)1 << i); + if (bitnum <= metap->hashm_spares[i]) + return bitnum - 1; /* -1 to convert 1-based to 0-based */ } - - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid overflow block number %u", ovflblkno))); + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid overflow block number %u", ovflblkno))); return 0; /* keep compiler quiet */ } /* * _hash_addovflpage * - * Add an overflow page to the bucket whose last page is pointed to by 'buf'. + * Add an overflow page to the bucket whose last page is pointed to by 'buf'. * - * On entry, the caller must hold a pin but no lock on 'buf'. The pin is - * dropped before exiting (we assume the caller is not interested in 'buf' - * anymore) if not asked to retain. The pin will be retained only for the - * primary bucket. The returned overflow page will be pinned and - * write-locked; it is guaranteed to be empty. + * On entry, the caller must hold a pin but no lock on 'buf'. The pin is + * dropped before exiting (we assume the caller is not interested in 'buf' + * anymore). The returned overflow page will be pinned and write-locked; + * it is guaranteed to be empty. * - * The caller must hold a pin, but no lock, on the metapage buffer. - * That buffer is returned in the same state. + * The caller must hold a pin, but no lock, on the metapage buffer. + * That buffer is returned in the same state. + * + * The caller must hold at least share lock on the bucket, to ensure that + * no one else tries to compact the bucket meanwhile. This guarantees that + * 'buf' won't stop being part of the bucket while it's unlocked. * * NB: since this could be executed concurrently by multiple processes, * one should not assume that the returned overflow page will be the * immediate successor of the originally passed 'buf'. Additional overflow * pages might have been added to the bucket chain in between. */ -Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin) +Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf) { Buffer ovflbuf; Page page; Page ovflpage; HashPageOpaque pageopaque; HashPageOpaque ovflopaque; - HashMetaPage metap; - Buffer mapbuf = InvalidBuffer; - Buffer newmapbuf = InvalidBuffer; - BlockNumber blkno; - BlockNumber newmap_blkno = InvalidBlockNumber; - uint32 orig_firstfree; - uint32 splitnum; - uint32 *freep = NULL; - uint32 max_ovflpg; - uint32 bit; - uint32 bitmap_page_bit; - uint32 first_page; - uint32 last_bit; - uint32 last_page; - uint32 i; - uint32 j; - bool page_found = false; + + /* allocate and lock an empty overflow page */ + ovflbuf = _hash_getovflpage(rel, metabuf); /* - * Write-lock the tail page. Here, we need to maintain locking order such - * that, first acquire the lock on tail page of bucket, then on meta page - * to find and lock the bitmap page and if it is found, then lock on meta - * page is released, then finally acquire the lock on new overflow buffer. - * We need this locking order to avoid deadlock with backends that are - * doing inserts. - * - * Note: We could have avoided locking many buffers here if we made two - * WAL records for acquiring an overflow page (one to allocate an overflow - * page and another to add it to overflow bucket chain). However, doing - * so can leak an overflow page, if the system crashes after allocation. - * Needless to say, it is better to have a single record from a - * performance point of view as well. + * Write-lock the tail page. It is okay to hold two buffer locks here + * since there cannot be anyone else contending for access to ovflbuf. */ - LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); + _hash_chgbufaccess(rel, buf, HASH_NOLOCK, HASH_WRITE); /* probably redundant... */ _hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE); @@ -156,21 +124,55 @@ Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_p break; /* we assume we do not need to write the unmodified page */ - if (retain_pin) { - /* pin will be retained only for the primary bucket page */ - Assert((pageopaque->hasho_flag & LH_PAGE_TYPE) == LH_BUCKET_PAGE); - LockBuffer(buf, BUFFER_LOCK_UNLOCK); - } else { - _hash_relbuf(rel, buf); - } - - retain_pin = false; + _hash_relbuf(rel, buf); buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE); } + /* now that we have correct backlink, initialize new overflow page */ + ovflpage = BufferGetPage(ovflbuf); + ovflopaque = (HashPageOpaque)PageGetSpecialPointer(ovflpage); + ovflopaque->hasho_prevblkno = BufferGetBlockNumber(buf); + ovflopaque->hasho_nextblkno = InvalidBlockNumber; + ovflopaque->hasho_bucket = pageopaque->hasho_bucket; + ovflopaque->hasho_flag = LH_OVERFLOW_PAGE; + ovflopaque->hasho_page_id = HASHO_PAGE_ID; + + MarkBufferDirty(ovflbuf); + + /* logically chain overflow page to previous page */ + pageopaque->hasho_nextblkno = BufferGetBlockNumber(ovflbuf); + _hash_wrtbuf(rel, buf); + + return ovflbuf; +} + +/* + * Find an available overflow page and return it. The returned buffer + * is pinned and write-locked, and has had _hash_pageinit() applied, + * but it is caller's responsibility to fill the special space. + * + * The caller must hold a pin, but no lock, on the metapage buffer. + * That buffer is left in the same state at exit. + */ +static Buffer _hash_getovflpage(Relation rel, Buffer metabuf) +{ + HashMetaPage metap; + Buffer mapbuf = 0; + Buffer newbuf; + BlockNumber blkno; + uint32 orig_firstfree; + uint32 splitnum; + uint32 *freep = NULL; + uint32 max_ovflpg; + uint32 bit; + uint32 first_page; + uint32 last_bit; + uint32 last_page; + uint32 i, j; + /* Get exclusive lock on the meta page */ - LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); + _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE); _hash_checkpage(rel, metabuf, LH_META_PAGE); metap = HashPageGetMeta(BufferGetPage(metabuf)); @@ -207,44 +209,25 @@ Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_p last_inpage = BMPGSZ_BIT(metap) - 1; /* Release exclusive lock on metapage while reading bitmap page */ - LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); + _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK); mapbuf = _hash_getbuf(rel, mapblkno, HASH_WRITE, LH_BITMAP_PAGE); mappage = BufferGetPage(mapbuf); freep = HashPageGetBitmap(mappage); for (; bit <= last_inpage; j++, bit += BITS_PER_MAP) { - if (freep[j] != ALL_SET) { - page_found = true; - - /* Reacquire exclusive lock on the meta page */ - LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); - - /* convert bit to bit number within page */ - bit += _hash_firstfreebit(freep[j]); - bitmap_page_bit = bit; - - /* convert bit to absolute bit number */ - bit += (i << BMPG_SHIFT(metap)); - /* Calculate address of the recycled overflow page */ - blkno = bitno_to_blkno(metap, bit); - - /* Fetch and init the recycled page */ - ovflbuf = _hash_getinitbuf(rel, blkno); - + if (freep[j] != ALL_SET) goto found; - } } /* No free space here, try to advance to next map page */ _hash_relbuf(rel, mapbuf); - mapbuf = InvalidBuffer; i++; j = 0; /* scan from start of next map page */ bit = 0; /* Reacquire exclusive lock on the meta page */ - LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); + _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE); } /* @@ -261,15 +244,8 @@ Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_p * convenient to pre-mark them as "in use" too. */ bit = metap->hashm_spares[splitnum]; - - /* metapage already has a write lock */ - if (metap->hashm_nmaps >= HASH_MAX_BITMAPS) - ereport(ERROR, - (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("out of overflow pages in hash index \"%s\"", - RelationGetRelationName(rel)))); - - newmapbuf = _hash_getnewbuf(rel, bitno_to_blkno(metap, bit), MAIN_FORKNUM); + _hash_initbitmap(rel, metap, bitno_to_blkno(metap, bit), MAIN_FORKNUM); + metap->hashm_spares[splitnum]++; } else { /* * Nothing to do here; since the page will be past the last used page, @@ -278,8 +254,7 @@ Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_p } /* Calculate address of the new overflow page */ - bit = BufferIsValid(newmapbuf) ? - metap->hashm_spares[splitnum] + 1 : metap->hashm_spares[splitnum]; + bit = metap->hashm_spares[splitnum]; blkno = bitno_to_blkno(metap, bit); /* @@ -287,140 +262,60 @@ Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_p * relation length stays in sync with ours. XXX It's annoying to do this * with metapage write lock held; would be better to use a lock that * doesn't block incoming searches. - * - * It is okay to hold two buffer locks here (one on tail page of bucket - * and other on new overflow page) since there cannot be anyone else - * contending for access to ovflbuf. */ - ovflbuf = _hash_getnewbuf(rel, blkno, MAIN_FORKNUM); + newbuf = _hash_getnewbuf(rel, blkno, MAIN_FORKNUM); + + metap->hashm_spares[splitnum]++; + + /* + * Adjust hashm_firstfree to avoid redundant searches. But don't risk + * changing it if someone moved it while we were searching bitmap pages. + */ + if (metap->hashm_firstfree == orig_firstfree) + metap->hashm_firstfree = bit + 1; + + /* Write updated metapage and release lock, but not pin */ + _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK); + + return newbuf; found: + /* convert bit to bit number within page */ + bit += _hash_firstfreebit(freep[j]); + + /* mark page "in use" in the bitmap */ + SETBIT(freep, bit); + _hash_wrtbuf(rel, mapbuf); + + /* Reacquire exclusive lock on the meta page */ + _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE); + + /* convert bit to absolute bit number */ + bit += (i << BMPG_SHIFT(metap)); + + /* Calculate address of the recycled overflow page */ + blkno = bitno_to_blkno(metap, bit); /* - * Do the update. No ereport(ERROR) until changes are logged. We want to - * log the changes for bitmap page and overflow page together to avoid - * loss of pages in case the new page is added. - */ - START_CRIT_SECTION(); - - if (page_found) { - Assert(BufferIsValid(mapbuf)); - - /* mark page "in use" in the bitmap */ - SETBIT(freep, bitmap_page_bit); - MarkBufferDirty(mapbuf); - } else { - /* update the count to indicate new overflow page is added */ - metap->hashm_spares[splitnum]++; - - if (BufferIsValid(newmapbuf)) { - _hash_initbitmapbuffer(newmapbuf, metap->hashm_bmsize, false); - MarkBufferDirty(newmapbuf); - - /* add the new bitmap page to the metapage's list of bitmaps */ - newmap_blkno = BufferGetBlockNumber(newmapbuf); - metap->hashm_mapp[metap->hashm_nmaps] = newmap_blkno; - metap->hashm_nmaps++; - metap->hashm_spares[splitnum]++; - MarkBufferDirty(metabuf); - } - - /* - * for new overflow page, we don't need to explicitly set the bit in - * bitmap page, as by default that will be set to "in use". - */ - } - - /* - * Adjust hashm_firstfree to avoid redundant searches. But don't risk + * Adjust hashm_firstfree to avoid redundant searches. But don't risk * changing it if someone moved it while we were searching bitmap pages. */ if (metap->hashm_firstfree == orig_firstfree) { metap->hashm_firstfree = bit + 1; - MarkBufferDirty(metabuf); + + /* Write updated metapage and release lock, but not pin */ + _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK); + } else { + /* We didn't change the metapage, so no need to write */ + _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK); } - /* initialize new overflow page */ - ovflpage = BufferGetPage(ovflbuf); - ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage); - ovflopaque->hasho_prevblkno = BufferGetBlockNumber(buf); - ovflopaque->hasho_nextblkno = InvalidBlockNumber; - ovflopaque->hasho_bucket = pageopaque->hasho_bucket; - ovflopaque->hasho_flag = LH_OVERFLOW_PAGE; - ovflopaque->hasho_page_id = HASHO_PAGE_ID; - - MarkBufferDirty(ovflbuf); - - /* logically chain overflow page to previous page */ - pageopaque->hasho_nextblkno = BufferGetBlockNumber(ovflbuf); - - MarkBufferDirty(buf); - - /* XLOG stuff */ - if (RelationNeedsWAL(rel)) { - XLogRecPtr recptr; - xl_hash_add_ovfl_page xlrec; - - xlrec.bmpage_found = page_found; - xlrec.bmsize = metap->hashm_bmsize; - - XLogBeginInsert(); - XLogRegisterData((char *) &xlrec, SizeOfHashAddOvflPage); - - XLogRegisterBuffer(0, ovflbuf, REGBUF_WILL_INIT); - XLogRegisterBufData(0, (char *) &pageopaque->hasho_bucket, sizeof(Bucket)); - - XLogRegisterBuffer(1, buf, REGBUF_STANDARD); - - if (BufferIsValid(mapbuf)) { - XLogRegisterBuffer(2, mapbuf, REGBUF_STANDARD); - XLogRegisterBufData(2, (char *) &bitmap_page_bit, sizeof(uint32)); - } - - if (BufferIsValid(newmapbuf)) - XLogRegisterBuffer(3, newmapbuf, REGBUF_WILL_INIT); - - XLogRegisterBuffer(4, metabuf, REGBUF_STANDARD); - XLogRegisterBufData(4, (char *) &metap->hashm_firstfree, sizeof(uint32)); - if (BufferIsValid(newmapbuf)) - XLogRegisterBufData(4, (char *) &newmap_blkno, sizeof(BlockNumber)); - - recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_ADD_OVFL_PAGE); - - PageSetLSN(BufferGetPage(ovflbuf), recptr); - PageSetLSN(BufferGetPage(buf), recptr); - - if (BufferIsValid(mapbuf)) - PageSetLSN(BufferGetPage(mapbuf), recptr); - - if (BufferIsValid(newmapbuf)) - PageSetLSN(BufferGetPage(newmapbuf), recptr); - - PageSetLSN(BufferGetPage(metabuf), recptr); - } - - END_CRIT_SECTION(); - - if (retain_pin) - LockBuffer(buf, BUFFER_LOCK_UNLOCK); - else - _hash_relbuf(rel, buf); - - if (BufferIsValid(mapbuf)) - _hash_relbuf(rel, mapbuf); - - LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); - - if (BufferIsValid(newmapbuf)) - _hash_relbuf(rel, newmapbuf); - - return ovflbuf; + /* Fetch, init, and return the recycled page */ + return _hash_getinitbuf(rel, blkno); } /* - * _hash_firstfreebit() - * - * Return the number of the first bit that is not set in the word 'map'. + * Return the number of the first bit that is not set in the word 'map'. */ static uint32 _hash_firstfreebit(uint32 map) { @@ -439,31 +334,20 @@ static uint32 _hash_firstfreebit(uint32 map) } /* - * _hash_freeovflpage() + * Remove this overflow page from its bucket's chain, and mark the page as + * free. On entry, ovflbuf is write-locked; it is released before exiting. * - * Remove this overflow page from its bucket's chain, and mark the page as - * free. On entry, ovflbuf is write-locked; it is released before exiting. + * Since this function is invoked in VACUUM, we provide an access strategy + * parameter that controls fetches of the bucket pages. * - * Add the tuples (itups) to wbuf in this function. We could do that in the - * caller as well, but the advantage of doing it here is we can easily write - * the WAL for XLOG_HASH_SQUEEZE_PAGE operation. Addition of tuples and - * removal of overflow page has to done as an atomic operation, otherwise - * during replay on standby users might find duplicate records. + * Returns the block number of the page that followed the given page + * in the bucket, or InvalidBlockNumber if no following page. * - * Since this function is invoked in VACUUM, we provide an access strategy - * parameter that controls fetches of the bucket pages. - * - * Returns the block number of the page that followed the given page - * in the bucket, or InvalidBlockNumber if no following page. - * - * NB: caller must not hold lock on metapage, nor on page, that's next to - * ovflbuf in the bucket chain. We don't acquire the lock on page that's - * prior to ovflbuf in chain if it is same as wbuf because the caller already - * has a lock on same. + * NB: caller must not hold lock on metapage, nor on either page that's + * adjacent in the bucket chain. The caller had better hold exclusive lock + * on the bucket, too. */ -BlockNumber _hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf, - Buffer wbuf, IndexTuple *itups, OffsetNumber *itup_offsets, - Size *tups_size, uint16 nitups, BufferAccessStrategy bstrategy) +BlockNumber _hash_freeovflpage(Relation rel, Buffer ovflbuf, BufferAccessStrategy bstrategy) { HashMetaPage metap; Buffer metabuf; @@ -472,18 +356,13 @@ BlockNumber _hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf, BlockNumber prevblkno; BlockNumber blkno; BlockNumber nextblkno; - BlockNumber writeblkno; HashPageOpaque ovflopaque; Page ovflpage; Page mappage; uint32 *freep = NULL; uint32 ovflbitno; - int32 bitmappage; - int32 bitmapbit; + int32 bitmappage, bitmapbit; Bucket bucket PG_USED_FOR_ASSERTS_ONLY; - Buffer prevbuf = InvalidBuffer; - Buffer nextbuf = InvalidBuffer; - bool update_metap = false; /* Get information from the doomed page */ _hash_checkpage(rel, ovflbuf, LH_OVERFLOW_PAGE); @@ -492,40 +371,51 @@ BlockNumber _hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf, ovflopaque = (HashPageOpaque)PageGetSpecialPointer(ovflpage); nextblkno = ovflopaque->hasho_nextblkno; prevblkno = ovflopaque->hasho_prevblkno; - writeblkno = BufferGetBlockNumber(wbuf); bucket = ovflopaque->hasho_bucket; + /* + * Zero the page for debugging's sake; then write and release it. (Note: + * if we failed to zero the page here, we'd have problems with the Assert + * in _hash_pageinit() when the page is reused.) + */ + MemSet(ovflpage, 0, BufferGetPageSize(ovflbuf)); + _hash_wrtbuf(rel, ovflbuf); + /* * Fix up the bucket chain. this is a doubly-linked list, so we must fix * up the bucket chain members behind and ahead of the overflow page being - * deleted. Concurrency issues are avoided by using lock chaining as - * described atop hashbucketcleanup. + * deleted. No concurrency issues since we hold exclusive lock on the + * entire bucket. */ if (BlockNumberIsValid(prevblkno)) { - if (prevblkno == writeblkno) - prevbuf = wbuf; - else - prevbuf = _hash_getbuf_with_strategy(rel, - prevblkno, - HASH_WRITE, - LH_BUCKET_PAGE | LH_OVERFLOW_PAGE, - bstrategy); + Buffer prevbuf = _hash_getbuf_with_strategy(rel, prevblkno, HASH_WRITE, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE, + bstrategy); + Page prevpage = BufferGetPage(prevbuf); + HashPageOpaque prevopaque = (HashPageOpaque)PageGetSpecialPointer(prevpage); + + Assert(prevopaque->hasho_bucket == bucket); + prevopaque->hasho_nextblkno = nextblkno; + _hash_wrtbuf(rel, prevbuf); } - if (BlockNumberIsValid(nextblkno)) - nextbuf = _hash_getbuf_with_strategy(rel, - nextblkno, - HASH_WRITE, - LH_OVERFLOW_PAGE, - bstrategy); + if (BlockNumberIsValid(nextblkno)) { + Buffer nextbuf = _hash_getbuf_with_strategy(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE, bstrategy); + Page nextpage = BufferGetPage(nextbuf); + HashPageOpaque nextopaque = (HashPageOpaque)PageGetSpecialPointer(nextpage); - /* Note: bstrategy is intentionally not used for metapage and bitmap */ + Assert(nextopaque->hasho_bucket == bucket); + nextopaque->hasho_prevblkno = prevblkno; + _hash_wrtbuf(rel, nextbuf); + } - /* Read the metapage so we can determine which bitmap page to use */ + /* + * Note: bstrategy is intentionally not used for metapage and bitmap + * Read the metapage so we can determine which bitmap page to use + */ metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE); metap = HashPageGetMeta(BufferGetPage(metabuf)); /* Identify which bit to set */ - ovflbitno = _hash_ovflblkno_to_bitno(metap, ovflblkno); + ovflbitno = blkno_to_bitno(metap, ovflblkno); bitmappage = ovflbitno >> BMPG_SHIFT(metap); bitmapbit = ovflbitno & BMPG_MASK(metap); @@ -535,241 +425,109 @@ BlockNumber _hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf, blkno = metap->hashm_mapp[bitmappage]; /* Release metapage lock while we access the bitmap page */ - LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); + _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK); - /* read the bitmap page to clear the bitmap bit */ + /* Clear the bitmap bit to indicate that this overflow page is free */ mapbuf = _hash_getbuf(rel, blkno, HASH_WRITE, LH_BITMAP_PAGE); mappage = BufferGetPage(mapbuf); freep = HashPageGetBitmap(mappage); Assert(ISSET(freep, bitmapbit)); + CLRBIT(freep, bitmapbit); + _hash_wrtbuf(rel, mapbuf); /* Get write-lock on metapage to update firstfree */ - LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); - - /* This operation needs to log multiple tuples, prepare WAL for that */ - if (RelationNeedsWAL(rel)) - XLogEnsureRecordSpace(HASH_XLOG_FREE_OVFL_BUFS, 4 + nitups); - - START_CRIT_SECTION(); - - /* - * we have to insert tuples on the "write" page, being careful to preserve - * hashkey ordering. (If we insert many tuples into the same "write" page - * it would be worth qsort'ing them). - */ - if (nitups > 0) { - _hash_pgaddmultitup(rel, wbuf, itups, itup_offsets, nitups); - MarkBufferDirty(wbuf); - } - - /* - * Reinitialize the freed overflow page. Just zeroing the page won't - * work, because WAL replay routines expect pages to be initialized. See - * explanation of RBM_NORMAL mode atop XLogReadBufferExtended. We are - * careful to make the special space valid here so that tools like - * pageinspect won't get confused. - */ - _hash_pageinit(ovflpage, BufferGetPageSize(ovflbuf)); - - ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage); - - ovflopaque->hasho_prevblkno = InvalidBlockNumber; - ovflopaque->hasho_nextblkno = InvalidBlockNumber; - ovflopaque->hasho_bucket = -1; - ovflopaque->hasho_flag = LH_UNUSED_PAGE; - ovflopaque->hasho_page_id = HASHO_PAGE_ID; - - MarkBufferDirty(ovflbuf); - - if (BufferIsValid(prevbuf)) { - Page prevpage = BufferGetPage(prevbuf); - HashPageOpaque prevopaque = (HashPageOpaque) PageGetSpecialPointer(prevpage); - - Assert(prevopaque->hasho_bucket == bucket); - prevopaque->hasho_nextblkno = nextblkno; - MarkBufferDirty(prevbuf); - } - if (BufferIsValid(nextbuf)) { - Page nextpage = BufferGetPage(nextbuf); - HashPageOpaque nextopaque = (HashPageOpaque) PageGetSpecialPointer(nextpage); - - Assert(nextopaque->hasho_bucket == bucket); - nextopaque->hasho_prevblkno = prevblkno; - MarkBufferDirty(nextbuf); - } - - /* Clear the bitmap bit to indicate that this overflow page is free */ - CLRBIT(freep, bitmapbit); - MarkBufferDirty(mapbuf); + _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE); /* if this is now the first free page, update hashm_firstfree */ if (ovflbitno < metap->hashm_firstfree) { metap->hashm_firstfree = ovflbitno; - update_metap = true; - MarkBufferDirty(metabuf); + _hash_wrtbuf(rel, metabuf); + } else { + /* no need to change metapage */ + _hash_relbuf(rel, metabuf); } - /* XLOG stuff */ - if (RelationNeedsWAL(rel)) { - xl_hash_squeeze_page xlrec; - XLogRecPtr recptr; - int i; - - xlrec.prevblkno = prevblkno; - xlrec.nextblkno = nextblkno; - xlrec.ntups = nitups; - xlrec.is_prim_bucket_same_wrt = (wbuf == bucketbuf); - xlrec.is_prev_bucket_same_wrt = (wbuf == prevbuf); - - XLogBeginInsert(); - XLogRegisterData((char *) &xlrec, SizeOfHashSqueezePage); - - /* - * bucket buffer needs to be registered to ensure that we can acquire - * a cleanup lock on it during replay. - */ - if (!xlrec.is_prim_bucket_same_wrt) - XLogRegisterBuffer(0, bucketbuf, REGBUF_STANDARD | REGBUF_NO_IMAGE); - - XLogRegisterBuffer(1, wbuf, REGBUF_STANDARD); - if (xlrec.ntups > 0) { - XLogRegisterBufData(1, (char *) itup_offsets, - nitups * sizeof(OffsetNumber)); - for (i = 0; i < nitups; i++) - XLogRegisterBufData(1, (char *) itups[i], tups_size[i]); - } - - XLogRegisterBuffer(2, ovflbuf, REGBUF_STANDARD); - - /* - * If prevpage and the writepage (block in which we are moving tuples - * from overflow) are same, then no need to separately register - * prevpage. During replay, we can directly update the nextblock in - * writepage. - */ - if (BufferIsValid(prevbuf) && !xlrec.is_prev_bucket_same_wrt) - XLogRegisterBuffer(3, prevbuf, REGBUF_STANDARD); - - if (BufferIsValid(nextbuf)) - XLogRegisterBuffer(4, nextbuf, REGBUF_STANDARD); - - XLogRegisterBuffer(5, mapbuf, REGBUF_STANDARD); - XLogRegisterBufData(5, (char *) &bitmapbit, sizeof(uint32)); - - if (update_metap) { - XLogRegisterBuffer(6, metabuf, REGBUF_STANDARD); - XLogRegisterBufData(6, (char *) &metap->hashm_firstfree, sizeof(uint32)); - } - - recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SQUEEZE_PAGE); - - if (!xlrec.is_prim_bucket_same_wrt) { - PageSetLSN(BufferGetPage(bucketbuf), recptr); - } - PageSetLSN(BufferGetPage(wbuf), recptr); - PageSetLSN(BufferGetPage(ovflbuf), recptr); - - if (BufferIsValid(prevbuf) && !xlrec.is_prev_bucket_same_wrt) - PageSetLSN(BufferGetPage(prevbuf), recptr); - if (BufferIsValid(nextbuf)) - PageSetLSN(BufferGetPage(nextbuf), recptr); - - PageSetLSN(BufferGetPage(mapbuf), recptr); - - if (update_metap) - PageSetLSN(BufferGetPage(metabuf), recptr); - } - - END_CRIT_SECTION(); - - /* release previous bucket if it is not same as write bucket */ - if (BufferIsValid(prevbuf) && prevblkno != writeblkno) - _hash_relbuf(rel, prevbuf); - - if (BufferIsValid(ovflbuf)) - _hash_relbuf(rel, ovflbuf); - - if (BufferIsValid(nextbuf)) - _hash_relbuf(rel, nextbuf); - - _hash_relbuf(rel, mapbuf); - _hash_relbuf(rel, metabuf); - return nextblkno; } /* - * _hash_initbitmapbuffer() + * Initialize a new bitmap page. The metapage has a write-lock upon + * entering the function, and must be written by caller after return. * - * Initialize a new bitmap page. All bits in the new bitmap page are set to - * "1", indicating "in use". + * 'blkno' is the block number of the new bitmap page. + * + * All bits in the new bitmap page are set to "1", indicating "in use". */ -void _hash_initbitmapbuffer(Buffer buf, uint16 bmsize, bool initpage) +void _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno, ForkNumber forkNum) { + Buffer buf; Page pg; HashPageOpaque op; uint32 *freep = NULL; + /* + * It is okay to write-lock the new bitmap page while holding metapage + * write lock, because no one else could be contending for the new page. + * Also, the metapage lock makes it safe to extend the index using + * _hash_getnewbuf. + * + * There is some loss of concurrency in possibly doing I/O for the new + * page while holding the metapage lock, but this path is taken so seldom + * that it's not worth worrying about. + */ + buf = _hash_getnewbuf(rel, blkno, forkNum); pg = BufferGetPage(buf); - /* initialize the page */ - if (initpage) - _hash_pageinit(pg, BufferGetPageSize(buf)); - /* initialize the page's special space */ - op = (HashPageOpaque) PageGetSpecialPointer(pg); + op = (HashPageOpaque)PageGetSpecialPointer(pg); op->hasho_prevblkno = InvalidBlockNumber; op->hasho_nextblkno = InvalidBlockNumber; - op->hasho_bucket = -1; + op->hasho_bucket = INVALID_BUCKET_NUM; op->hasho_flag = LH_BITMAP_PAGE; op->hasho_page_id = HASHO_PAGE_ID; /* set all of the bits to 1 */ freep = HashPageGetBitmap(pg); - MemSet(freep, 0xFF, bmsize); + errno_t rc = memset_s(freep, HashGetMaxBitmapSize(pg), 0xFF, BMPGSZ_BYTE(metap)); + securec_check(rc, "", ""); - /* - * Set pd_lower just past the end of the bitmap page data. We could even - * set pd_lower equal to pd_upper, but this is more precise and makes the - * page look compressible to xlog.c. - */ - ((PageHeader) pg)->pd_lower = ((char *) freep + bmsize) - (char *) pg; + /* write out the new bitmap page (releasing write lock and pin) */ + _hash_wrtbuf(rel, buf); + + /* add the new bitmap page to the metapage's list of bitmaps */ + /* metapage already has a write lock */ + if (metap->hashm_nmaps >= HASH_MAX_BITMAPS) + ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("out of overflow pages in hash index \"%s\"", RelationGetRelationName(rel)))); + + metap->hashm_mapp[metap->hashm_nmaps] = blkno; + + metap->hashm_nmaps++; } - /* - * _hash_squeezebucket(rel, bucket) + * Try to squeeze the tuples onto pages occurring earlier in the + * bucket chain in an attempt to free overflow pages. When we start + * the "squeezing", the page from which we start taking tuples (the + * "read" page) is the last bucket in the bucket chain and the page + * onto which we start squeezing tuples (the "write" page) is the + * first page in the bucket chain. The read page works backward and + * the write page works forward; the procedure terminates when the + * read page and write page are the same page. * - * Try to squeeze the tuples onto pages occurring earlier in the - * bucket chain in an attempt to free overflow pages. When we start - * the "squeezing", the page from which we start taking tuples (the - * "read" page) is the last bucket in the bucket chain and the page - * onto which we start squeezing tuples (the "write" page) is the - * first page in the bucket chain. The read page works backward and - * the write page works forward; the procedure terminates when the - * read page and write page are the same page. + * At completion of this procedure, it is guaranteed that all pages in + * the bucket are nonempty, unless the bucket is totally empty (in + * which case all overflow pages will be freed). The original implementation + * required that to be true on entry as well, but it's a lot easier for + * callers to leave empty overflow pages and let this guy clean it up. * - * At completion of this procedure, it is guaranteed that all pages in - * the bucket are nonempty, unless the bucket is totally empty (in - * which case all overflow pages will be freed). The original implementation - * required that to be true on entry as well, but it's a lot easier for - * callers to leave empty overflow pages and let this guy clean it up. + * Caller must hold exclusive lock on the target bucket. This allows + * us to safely lock multiple pages in the bucket. * - * Caller must acquire cleanup lock on the primary page of the target - * bucket to exclude any scans that are in progress, which could easily - * be confused into returning the same tuple more than once or some tuples - * not at all by the rearrangement we are performing here. To prevent - * any concurrent scan to cross the squeeze scan we use lock chaining - * similar to hasbucketcleanup. Refer comments atop hashbucketcleanup. - * - * We need to retain a pin on the primary bucket to ensure that no concurrent - * split can start. - * - * Since this function is invoked in VACUUM, we provide an access strategy - * parameter that controls fetches of the bucket pages. + * Since this function is invoked in VACUUM, we provide an access strategy + * parameter that controls fetches of the bucket pages. */ -void _hash_squeezebucket(Relation rel, Bucket bucket, BlockNumber bucket_blkno, - Buffer bucket_buf, BufferAccessStrategy bstrategy) +void _hash_squeezebucket(Relation rel, Bucket bucket, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy) { BlockNumber wblkno; BlockNumber rblkno; @@ -779,21 +537,20 @@ void _hash_squeezebucket(Relation rel, Bucket bucket, BlockNumber bucket_blkno, Page rpage; HashPageOpaque wopaque; HashPageOpaque ropaque; + bool wbuf_dirty = false; /* - * start squeezing into the primary bucket page. + * start squeezing into the base bucket page. */ wblkno = bucket_blkno; - wbuf = bucket_buf; + wbuf = _hash_getbuf_with_strategy(rel, wblkno, HASH_WRITE, LH_BUCKET_PAGE, bstrategy); wpage = BufferGetPage(wbuf); - wopaque = (HashPageOpaque) PageGetSpecialPointer(wpage); - + wopaque = (HashPageOpaque)PageGetSpecialPointer(wpage); /* - * if there aren't any overflow pages, there's nothing to squeeze. caller - * is responsible for releasing the pin on primary bucket page. + * if there aren't any overflow pages, there's nothing to squeeze. */ if (!BlockNumberIsValid(wopaque->hasho_nextblkno)) { - LockBuffer(wbuf, BUFFER_LOCK_UNLOCK); + _hash_relbuf(rel, wbuf); return; } @@ -809,187 +566,76 @@ void _hash_squeezebucket(Relation rel, Bucket bucket, BlockNumber bucket_blkno, rblkno = ropaque->hasho_nextblkno; if (rbuf != InvalidBuffer) _hash_relbuf(rel, rbuf); - rbuf = _hash_getbuf_with_strategy(rel, - rblkno, - HASH_WRITE, - LH_OVERFLOW_PAGE, - bstrategy); + rbuf = _hash_getbuf_with_strategy(rel, rblkno, HASH_WRITE, LH_OVERFLOW_PAGE, bstrategy); rpage = BufferGetPage(rbuf); - ropaque = (HashPageOpaque) PageGetSpecialPointer(rpage); + ropaque = (HashPageOpaque)PageGetSpecialPointer(rpage); Assert(ropaque->hasho_bucket == bucket); } while (BlockNumberIsValid(ropaque->hasho_nextblkno)); /* * squeeze the tuples. */ + wbuf_dirty = false; for (;;) { OffsetNumber roffnum; OffsetNumber maxroffnum; OffsetNumber deletable[MaxOffsetNumber]; - IndexTuple itups[MaxIndexTuplesPerPage]; - Size tups_size[MaxIndexTuplesPerPage]; - OffsetNumber itup_offsets[MaxIndexTuplesPerPage]; - uint16 ndeletable = 0; - uint16 nitups = 0; - Size all_tups_size = 0; - int i; - bool retain_pin = false; + int ndeletable = 0; -readpage: /* Scan each tuple in "read" page */ maxroffnum = PageGetMaxOffsetNumber(rpage); for (roffnum = FirstOffsetNumber; roffnum <= maxroffnum; roffnum = OffsetNumberNext(roffnum)) { IndexTuple itup; Size itemsz; - /* skip dead tuples */ - if (ItemIdIsDead(PageGetItemId(rpage, roffnum))) - continue; - - itup = (IndexTuple) PageGetItem(rpage, PageGetItemId(rpage, roffnum)); + itup = (IndexTuple)PageGetItem(rpage, PageGetItemId(rpage, roffnum)); itemsz = IndexTupleDSize(*itup); itemsz = MAXALIGN(itemsz); /* * Walk up the bucket chain, looking for a page big enough for - * this item and all other accumulated items. Exit if we reach - * the read page. + * this item. Exit if we reach the read page. */ - while (PageGetFreeSpaceForMultipleTuples(wpage, nitups + 1) < (all_tups_size + itemsz)) { - Buffer next_wbuf = InvalidBuffer; - bool tups_moved = false; - + while (PageGetFreeSpace(wpage) < itemsz) { Assert(!PageIsEmpty(wpage)); - if (wblkno == bucket_blkno) - retain_pin = true; - wblkno = wopaque->hasho_nextblkno; Assert(BlockNumberIsValid(wblkno)); - /* don't need to move to next page if we reached the read page */ - if (wblkno != rblkno) - next_wbuf = _hash_getbuf_with_strategy(rel, - wblkno, - HASH_WRITE, - LH_OVERFLOW_PAGE, - bstrategy); - - if (nitups > 0) { - Assert(nitups == ndeletable); - - /* - * This operation needs to log multiple tuples, prepare - * WAL for that. - */ - if (RelationNeedsWAL(rel)) - XLogEnsureRecordSpace(0, 3 + nitups); - - START_CRIT_SECTION(); - - /* - * we have to insert tuples on the "write" page, being - * careful to preserve hashkey ordering. (If we insert - * many tuples into the same "write" page it would be - * worth qsort'ing them). - */ - _hash_pgaddmultitup(rel, wbuf, itups, itup_offsets, nitups); - MarkBufferDirty(wbuf); - - /* Delete tuples we already moved off read page */ - PageIndexMultiDelete(rpage, deletable, ndeletable); - MarkBufferDirty(rbuf); - - /* XLOG stuff */ - if (RelationNeedsWAL(rel)) { - XLogRecPtr recptr; - xl_hash_move_page_contents xlrec; - - xlrec.ntups = nitups; - xlrec.is_prim_bucket_same_wrt = (wbuf == bucket_buf) ? true : false; - - XLogBeginInsert(); - XLogRegisterData((char *) &xlrec, SizeOfHashMovePageContents); - - /* - * bucket buffer needs to be registered to ensure that - * we can acquire a cleanup lock on it during replay. - */ - if (!xlrec.is_prim_bucket_same_wrt) - XLogRegisterBuffer(0, bucket_buf, REGBUF_STANDARD | REGBUF_NO_IMAGE); - - XLogRegisterBuffer(1, wbuf, REGBUF_STANDARD); - XLogRegisterBufData(1, (char *) itup_offsets, - nitups * sizeof(OffsetNumber)); - for (i = 0; i < nitups; i++) - XLogRegisterBufData(1, (char *) itups[i], tups_size[i]); - - XLogRegisterBuffer(2, rbuf, REGBUF_STANDARD); - XLogRegisterBufData(2, (char *) deletable, - ndeletable * sizeof(OffsetNumber)); - - recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_MOVE_PAGE_CONTENTS); - - if (!xlrec.is_prim_bucket_same_wrt) { - PageSetLSN(BufferGetPage(bucket_buf), recptr); - } - - PageSetLSN(BufferGetPage(wbuf), recptr); - PageSetLSN(BufferGetPage(rbuf), recptr); - } - - END_CRIT_SECTION(); - - tups_moved = true; - } - - /* - * release the lock on previous page after acquiring the lock - * on next page - */ - if (retain_pin) - LockBuffer(wbuf, BUFFER_LOCK_UNLOCK); + if (wbuf_dirty) + _hash_wrtbuf(rel, wbuf); else _hash_relbuf(rel, wbuf); /* nothing more to do if we reached the read page */ if (rblkno == wblkno) { - _hash_relbuf(rel, rbuf); + if (ndeletable > 0) { + /* Delete tuples we already moved off read page */ + PageIndexMultiDelete(rpage, deletable, ndeletable); + _hash_wrtbuf(rel, rbuf); + } else + _hash_relbuf(rel, rbuf); return; } - wbuf = next_wbuf; + wbuf = _hash_getbuf_with_strategy(rel, wblkno, HASH_WRITE, LH_OVERFLOW_PAGE, bstrategy); wpage = BufferGetPage(wbuf); - wopaque = (HashPageOpaque) PageGetSpecialPointer(wpage); + wopaque = (HashPageOpaque)PageGetSpecialPointer(wpage); Assert(wopaque->hasho_bucket == bucket); - retain_pin = false; - - /* be tidy */ - for (i = 0; i < nitups; i++) - pfree(itups[i]); - nitups = 0; - all_tups_size = 0; - ndeletable = 0; - - /* - * after moving the tuples, rpage would have been compacted, - * so we need to rescan it. - */ - if (tups_moved) - goto readpage; + wbuf_dirty = false; } + /* + * we have found room so insert on the "write" page, being careful + * to preserve hashkey ordering. (If we insert many tuples into + * the same "write" page it would be worth qsort'ing instead of + * doing repeated _hash_pgaddtup.) + */ + (void)_hash_pgaddtup(rel, wbuf, itemsz, itup); + wbuf_dirty = true; + /* remember tuple for deletion from "read" page */ deletable[ndeletable++] = roffnum; - - /* - * we need a copy of index tuples as they can be freed as part of - * overflow page, however we need them to write a WAL record in - * _hash_freeovflpage. - */ - itups[nitups] = CopyIndexTuple(itup); - tups_size[nitups++] = itemsz; - all_tups_size += itemsz; } /* @@ -1001,36 +647,31 @@ readpage: * Tricky point here: if our read and write pages are adjacent in the * bucket chain, our write lock on wbuf will conflict with * _hash_freeovflpage's attempt to update the sibling links of the - * removed page. In that case, we don't need to lock it again. + * removed page. However, in that case we are done anyway, so we can + * simply drop the write lock before calling _hash_freeovflpage. */ rblkno = ropaque->hasho_prevblkno; Assert(BlockNumberIsValid(rblkno)); - /* free this overflow page (releases rbuf) */ - _hash_freeovflpage(rel, bucket_buf, rbuf, wbuf, itups, itup_offsets, - tups_size, nitups, bstrategy); - - /* be tidy */ - for (i = 0; i < nitups; i++) - pfree(itups[i]); - /* are we freeing the page adjacent to wbuf? */ if (rblkno == wblkno) { - /* retain the pin on primary bucket page till end of bucket scan */ - if (wblkno == bucket_blkno) - LockBuffer(wbuf, BUFFER_LOCK_UNLOCK); + /* yes, so release wbuf lock first */ + if (wbuf_dirty) + _hash_wrtbuf(rel, wbuf); else _hash_relbuf(rel, wbuf); + /* free this overflow page (releases rbuf) */ + _hash_freeovflpage(rel, rbuf, bstrategy); + /* done */ return; } - rbuf = _hash_getbuf_with_strategy(rel, - rblkno, - HASH_WRITE, - LH_OVERFLOW_PAGE, - bstrategy); + /* free this overflow page, then get the previous one */ + _hash_freeovflpage(rel, rbuf, bstrategy); + + rbuf = _hash_getbuf_with_strategy(rel, rblkno, HASH_WRITE, LH_OVERFLOW_PAGE, bstrategy); rpage = BufferGetPage(rbuf); - ropaque = (HashPageOpaque) PageGetSpecialPointer(rpage); + ropaque = (HashPageOpaque)PageGetSpecialPointer(rpage); Assert(ropaque->hasho_bucket == bucket); } diff --git a/src/gausskernel/storage/access/hash/hashpage.cpp b/src/gausskernel/storage/access/hash/hashpage.cpp index 2ee6c9eca..0524558eb 100644 --- a/src/gausskernel/storage/access/hash/hashpage.cpp +++ b/src/gausskernel/storage/access/hash/hashpage.cpp @@ -3,8 +3,8 @@ * hashpage.cpp * Hash table page management code for the openGauss hash access method * - * Portions Copyright (c) 2021 Huawei Technologies Co.,Ltd. - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -32,18 +32,14 @@ #include "access/hash.h" #include "storage/buf/buf_internals.h" -#include "access/hash_xlog.h" -#include "access/xloginsert.h" #include "miscadmin.h" #include "storage/lmgr.h" #include "storage/smgr/smgr.h" #include "utils/aiomem.h" static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks); -static void _hash_splitbucket(Relation rel, Buffer metabuf, Bucket obucket, - Bucket nbucket, Buffer obuf, Buffer nbuf, HTAB *htab, - uint32 maxbucket, uint32 highmask, uint32 lowmask); -static void log_split_page(Relation rel, Buffer buf); +static void _hash_splitbucket(Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket, BlockNumber start_oblkno, + BlockNumber start_nblkno, uint32 maxbucket, uint32 highmask, uint32 lowmask); /* * We use high-concurrency locking on hash indexes (see README for an overview @@ -54,6 +50,42 @@ static void log_split_page(Relation rel, Buffer buf); */ #define USELOCKING(rel) (!RELATION_IS_LOCAL(rel)) +/* + * _hash_getlock() -- Acquire an lmgr lock. + * + * 'whichlock' should be zero to acquire the split-control lock, or the + * block number of a bucket's primary bucket page to acquire the per-bucket + * lock. (See README for details of the use of these locks.) + * + * 'access' must be HASH_SHARE or HASH_EXCLUSIVE. + */ +void _hash_getlock(Relation rel, BlockNumber whichlock, int access) +{ + if (USELOCKING(rel)) + LockPage(rel, whichlock, access); +} + +/* + * _hash_try_getlock() -- Acquire an lmgr lock, but only if it's free. + * + * Same as above except we return FALSE without blocking if lock isn't free. + */ +bool _hash_try_getlock(Relation rel, BlockNumber whichlock, int access) +{ + if (USELOCKING(rel)) + return ConditionalLockPage(rel, whichlock, access); + else + return true; +} + +/* + * _hash_droplock() -- Release an lmgr lock. + */ +void _hash_droplock(Relation rel, BlockNumber whichlock, int access) +{ + if (USELOCKING(rel)) + UnlockPage(rel, whichlock, access); +} /* * _hash_getbuf() -- Get a buffer by block number for read or write. @@ -90,44 +122,18 @@ Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags) return buf; } -/* - * _hash_getbuf_with_condlock_cleanup() -- Try to get a buffer for cleanup. - * - * We read the page and try to acquire a cleanup lock. If we get it, - * we return the buffer; otherwise, we return InvalidBuffer. - */ -Buffer _hash_getbuf_with_condlock_cleanup(Relation rel, BlockNumber blkno, int flags) -{ - Buffer buf; - - if (blkno == P_NEW) - ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("hash AM does not use P_NEW"))); - - buf = ReadBuffer(rel, blkno); - - if (!ConditionalLockBufferForCleanup(buf)) { - ReleaseBuffer(buf); - return InvalidBuffer; - } - - /* ref count and lock type are correct */ - _hash_checkpage(rel, buf, flags); - - return buf; -} - /* * _hash_getinitbuf() -- Get and initialize a buffer by block number. * * This must be used only to fetch pages that are known to be before * the index's filesystem EOF, but are to be filled from scratch. - * _hash_pageinit() is applied automatically. Otherwise it has + * _hash_pageinit() is applied automatically. Otherwise it has * effects similar to _hash_getbuf() with access = HASH_WRITE. - * + * * When this routine returns, a write lock is set on the * requested buffer and its reference count has been incremented * (ie, the buffer is "locked and pinned"). - * + * * P_NEW is disallowed because this routine can only be used * to access pages that are known to be before the filesystem EOF. * Extending the index should be done with _hash_getnewbuf. @@ -150,34 +156,6 @@ Buffer _hash_getinitbuf(Relation rel, BlockNumber blkno) return buf; } -/* - * _hash_initbuf() -- Get and initialize a buffer by bucket number. - */ -void _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag, bool initpage) -{ - HashPageOpaque pageopaque; - Page page; - - page = BufferGetPage(buf); - - /* initialize the page */ - if (initpage) - _hash_pageinit(page, BufferGetPageSize(buf)); - - pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); - - /* - * Set hasho_prevblkno with current hashm_maxbucket. This value will be - * used to validate cached HashMetaPageData. See - * _hash_getbucketbuf_from_hashkey(). - */ - pageopaque->hasho_prevblkno = max_bucket; - pageopaque->hasho_nextblkno = InvalidBlockNumber; - pageopaque->hasho_bucket = num_bucket; - pageopaque->hasho_flag = flag; - pageopaque->hasho_page_id = HASHO_PAGE_ID; -} - /* * _hash_getnewbuf() -- Get a new page at the end of the index. * @@ -188,9 +166,7 @@ void _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag * EOF but before updating the metapage to reflect the added page.) * * It is caller's responsibility to ensure that only one process can - * extend the index at a time. In practice, this function is called - * only while holding write lock on the metapage, because adding a page - * is always associated with an update of metapage data. + * extend the index at a time. */ Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum) { @@ -270,37 +246,48 @@ void _hash_dropbuf(Relation rel, Buffer buf) } /* - * _hash_dropscanbuf() -- release buffers used in scan. + * _hash_wrtbuf() -- write a hash page to disk. * - * This routine unpins the buffers used during scan on which we - * hold no lock. + * This routine releases the lock held on the buffer and our refcount + * for it. It is an error to call _hash_wrtbuf() without a write lock + * and a pin on the buffer. + * + * NOTE: this routine should go away when/if hash indexes are WAL-ified. + * The correct sequence of operations is to mark the buffer dirty, then + * write the WAL record, then release the lock and pin; so marking dirty + * can't be combined with releasing. */ -void _hash_dropscanbuf(Relation rel, HashScanOpaque so) +void _hash_wrtbuf(Relation rel, Buffer buf) { - /* release pin we hold on primary bucket page */ - if (BufferIsValid(so->hashso_bucket_buf) && so->hashso_bucket_buf != so->hashso_curbuf) - _hash_dropbuf(rel, so->hashso_bucket_buf); - so->hashso_bucket_buf = InvalidBuffer; - - /* release pin we hold on primary bucket page of bucket being split */ - if (BufferIsValid(so->hashso_split_bucket_buf) && so->hashso_split_bucket_buf != so->hashso_curbuf) - _hash_dropbuf(rel, so->hashso_split_bucket_buf); - so->hashso_split_bucket_buf = InvalidBuffer; - - /* release any pin we still hold */ - if (BufferIsValid(so->hashso_curbuf)) - _hash_dropbuf(rel, so->hashso_curbuf); - so->hashso_curbuf = InvalidBuffer; - - /* reset split scan */ - so->hashso_buc_populated = false; - so->hashso_buc_split = false; + MarkBufferDirty(buf); + UnlockReleaseBuffer(buf); } +/* + * _hash_chgbufaccess() -- Change the lock type on a buffer, without + * dropping our pin on it. + * + * from_access and to_access may be HASH_READ, HASH_WRITE, or HASH_NOLOCK, + * the last indicating that no buffer-level lock is held or wanted. + * + * When from_access == HASH_WRITE, we assume the buffer is dirty and tell + * bufmgr it must be written out. If the caller wants to release a write + * lock on a page that's not been modified, it's okay to pass from_access + * as HASH_READ (a bit ugly, but handy in some places). + */ +void _hash_chgbufaccess(Relation rel, Buffer buf, int from_access, int to_access) +{ + if (from_access == HASH_WRITE) + MarkBufferDirty(buf); + if (from_access != HASH_NOLOCK) + LockBuffer(buf, BUFFER_LOCK_UNLOCK); + if (to_access != HASH_NOLOCK) + LockBuffer(buf, to_access); +} /* - * _hash_init() -- Initialize the metadata page of a hash index, - * the initial buckets, and the initial bitmap page. + * _hash_metapinit() -- Initialize the metadata page of a hash index, + * the initial buckets, and the initial bitmap page. * * The initial number of buckets is dependent on num_tuples, an estimate * of the number of tuples to be loaded into the index initially. The @@ -310,37 +297,30 @@ void _hash_dropscanbuf(Relation rel, HashScanOpaque so) * could be accessing this index. In particular the rule about not holding * multiple buffer locks is ignored. */ -uint32 _hash_init(Relation rel, double num_tuples, ForkNumber forkNum) +uint32 _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum) { + HashMetaPage metap; + HashPageOpaque pageopaque; Buffer metabuf; Buffer buf; - Buffer bitmapbuf; Page pg; - HashMetaPage metap; - RegProcedure procid; uint32 data_width; uint32 item_width; uint32 ffactor; + double dnumbuckets; uint32 num_buckets; + uint32 log2_num_buckets; uint32 i; - bool use_wal = false; /* safety check */ if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0) ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg("cannot initialize non-empty hash index \"%s\"", RelationGetRelationName(rel)))); - /* - * WAL log creation of pages if the relation is persistent, or this is the - * init fork. Init forks for unlogged relations always need to be WAL - * logged. - */ - use_wal = RelationNeedsWAL(rel) || forkNum == INIT_FORKNUM; - /* * Determine the target fill factor (in tuples per bucket) for this index. * The idea is to make the fill factor correspond to pages about as full - * as the user-settable fillfactor parameter says. We can compute it + * as the user-settable fillfactor parameter says. We can compute it * exactly since the index datatype (i.e. uint32 hash key) is fixed-width. */ data_width = sizeof(uint32); @@ -351,189 +331,55 @@ uint32 _hash_init(Relation rel, double num_tuples, ForkNumber forkNum) if (ffactor < 10) ffactor = 10; - procid = index_getprocid(rel, 1, HASHPROC); + /* + * Choose the number of initial bucket pages to match the fill factor + * given the estimated number of tuples. We round up the result to the + * next power of 2, however, and always force at least 2 bucket pages. The + * upper limit is determined by considerations explained in _hash_expandtable(). + */ + dnumbuckets = num_tuples / ffactor; + if (dnumbuckets <= 2.0) + num_buckets = 2; + else if (dnumbuckets >= (double)0x40000000) + num_buckets = 0x40000000; + else + num_buckets = ((uint32)1) << _hash_log2((uint32)dnumbuckets); + + log2_num_buckets = _hash_log2(num_buckets); + Assert(num_buckets == (((uint32)1) << log2_num_buckets)); + Assert(log2_num_buckets < HASH_MAX_SPLITPOINTS); /* * We initialize the metapage, the first N bucket pages, and the first * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend() - * calls to occur. This ensures that the smgr level has the right idea of + * calls to occur. This ensures that the smgr level has the right idea of * the physical index length. - * - * Critical section not required, because on error the creation of the - * whole relation will be rolled back. */ metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum); - _hash_init_metabuffer(metabuf, num_tuples, procid, ffactor, false); - MarkBufferDirty(metabuf); - pg = BufferGetPage(metabuf); - metap = HashPageGetMeta(pg); - /* XLOG stuff */ - if (use_wal) { - xl_hash_init_meta_page xlrec; - XLogRecPtr recptr; - - xlrec.num_tuples = num_tuples; - xlrec.procid = metap->hashm_procid; - xlrec.ffactor = metap->hashm_ffactor; - - XLogBeginInsert(); - XLogRegisterData((char *) &xlrec, SizeOfHashInitMetaPage); - XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT); - - recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_META_PAGE); - - PageSetLSN(BufferGetPage(metabuf), recptr); - } - - num_buckets = metap->hashm_maxbucket + 1; - - /* - * Release buffer lock on the metapage while we initialize buckets. - * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS - * won't accomplish anything. It's a bad idea to hold buffer locks for - * long intervals in any case, since that can block the bgwriter. - */ - LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); - - /* - * Initialize and WAL Log the first N buckets - */ - for (i = 0; i < num_buckets; i++) { - BlockNumber blkno; - - /* Allow interrupts, in case N is huge */ - CHECK_FOR_INTERRUPTS(); - - blkno = BUCKET_TO_BLKNO(metap, i); - buf = _hash_getnewbuf(rel, blkno, forkNum); - _hash_initbuf(buf, metap->hashm_maxbucket, i, LH_BUCKET_PAGE, false); - MarkBufferDirty(buf); - - if (use_wal) - log_newpage(&rel->rd_node, - forkNum, - blkno, - BufferGetPage(buf), - true); - _hash_relbuf(rel, buf); - } - - /* Now reacquire buffer lock on metapage */ - LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); - - /* - * Initialize bitmap page - */ - bitmapbuf = _hash_getnewbuf(rel, num_buckets + 1, forkNum); - _hash_initbitmapbuffer(bitmapbuf, metap->hashm_bmsize, false); - MarkBufferDirty(bitmapbuf); - - /* add the new bitmap page to the metapage's list of bitmaps */ - /* metapage already has a write lock */ - if (metap->hashm_nmaps >= HASH_MAX_BITMAPS) - ereport(ERROR, - (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("out of overflow pages in hash index \"%s\"", - RelationGetRelationName(rel)))); - - metap->hashm_mapp[metap->hashm_nmaps] = num_buckets + 1; - - metap->hashm_nmaps++; - MarkBufferDirty(metabuf); - - /* XLOG stuff */ - if (use_wal) { - xl_hash_init_bitmap_page xlrec; - XLogRecPtr recptr; - - xlrec.bmsize = metap->hashm_bmsize; - - XLogBeginInsert(); - XLogRegisterData((char *) &xlrec, SizeOfHashInitBitmapPage); - XLogRegisterBuffer(0, bitmapbuf, REGBUF_WILL_INIT); - - /* - * This is safe only because nobody else can be modifying the index at - * this stage; it's only visible to the transaction that is creating - * it. - */ - XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD); - - recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_BITMAP_PAGE); - - PageSetLSN(BufferGetPage(bitmapbuf), recptr); - PageSetLSN(BufferGetPage(metabuf), recptr); - } - - /* all done */ - _hash_relbuf(rel, bitmapbuf); - _hash_relbuf(rel, metabuf); - - return num_buckets; -} - -/* - * _hash_init_metabuffer() -- Initialize the metadata page of a hash index. - */ -void _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, - uint16 ffactor, bool initpage) -{ - HashMetaPage metap; - HashPageOpaque pageopaque; - Page page; - double dnumbuckets; - uint32 num_buckets; - uint32 spare_index; - uint32 i; - - /* - * Choose the number of initial bucket pages to match the fill factor - * given the estimated number of tuples. We round up the result to the - * total number of buckets which has to be allocated before using its - * _hashm_spare element. However always force at least 2 bucket pages. The - * upper limit is determined by considerations explained in - * _hash_expandtable(). - */ - Assert(ffactor != 0); - dnumbuckets = num_tuples / ffactor; - if (dnumbuckets <= 2.0) - num_buckets = 2; - else if (dnumbuckets >= (double) 0x40000000) - num_buckets = 0x40000000; - else - num_buckets = _hash_get_totalbuckets(_hash_spareindex(dnumbuckets)); - - spare_index = _hash_spareindex(num_buckets); - Assert(spare_index < HASH_MAX_SPLITPOINTS); - - page = BufferGetPage(buf); - if (initpage) - _hash_pageinit(page, BufferGetPageSize(buf)); - - pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); + pageopaque = (HashPageOpaque)PageGetSpecialPointer(pg); pageopaque->hasho_prevblkno = InvalidBlockNumber; pageopaque->hasho_nextblkno = InvalidBlockNumber; - pageopaque->hasho_bucket = -1; + pageopaque->hasho_bucket = INVALID_BUCKET_NUM; pageopaque->hasho_flag = LH_META_PAGE; pageopaque->hasho_page_id = HASHO_PAGE_ID; - metap = HashPageGetMeta(page); + metap = HashPageGetMeta(pg); metap->hashm_magic = HASH_MAGIC; metap->hashm_version = HASH_VERSION; metap->hashm_ntuples = 0; metap->hashm_nmaps = 0; - metap->hashm_ffactor = ffactor; - metap->hashm_bsize = HashGetMaxBitmapSize(page); + metap->hashm_ffactor = (uint16)ffactor; + metap->hashm_bsize = (uint16)HashGetMaxBitmapSize(pg); /* find largest bitmap array size that will fit in page size */ for (i = _hash_log2(metap->hashm_bsize); i > 0; --i) { - if ((1 << i) <= metap->hashm_bsize) + if (((uint32)1 << i) <= metap->hashm_bsize) break; } Assert(i > 0); - metap->hashm_bmsize = 1 << i; + metap->hashm_bmsize = (uint32)1 << i; metap->hashm_bmshift = i + BYTE_TO_BIT; Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1)); @@ -542,20 +388,15 @@ void _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, * pretty useless for normal operation (in fact, hashm_procid is not used * anywhere), but it might be handy for forensic purposes so we keep it. */ - metap->hashm_procid = procid; + metap->hashm_procid = index_getprocid(rel, 1, HASHPROC); /* * We initialize the index with N buckets, 0 .. N-1, occupying physical - * blocks 1 to N. The first freespace bitmap page is in block N+1. + * blocks 1 to N. The first freespace bitmap page is in block N+1. Since + * N is a power of 2, we can set the masks this way: */ - metap->hashm_maxbucket = num_buckets - 1; - - /* - * Set highmask as next immediate ((2 ^ x) - 1), which should be - * sufficient to cover num_buckets. - */ - metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1; - metap->hashm_lowmask = (metap->hashm_highmask >> 1); + metap->hashm_maxbucket = metap->hashm_lowmask = num_buckets - 1; + metap->hashm_highmask = (num_buckets << 1) - 1; errno_t ret = memset_s(metap->hashm_spares, sizeof(metap->hashm_spares), 0, sizeof(metap->hashm_spares)); securec_check(ret, "", ""); @@ -563,34 +404,65 @@ void _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, securec_check(ret, "", ""); /* Set up mapping for one spare page after the initial splitpoints */ - metap->hashm_spares[spare_index] = 1; - metap->hashm_ovflpoint = spare_index; + metap->hashm_spares[log2_num_buckets] = 1; + metap->hashm_ovflpoint = log2_num_buckets; metap->hashm_firstfree = 0; /* - * Set pd_lower just past the end of the metadata. This is to log full - * page image of metapage in xloginsert.c. + * Release buffer lock on the metapage while we initialize buckets. + * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS + * won't accomplish anything. It's a bad idea to hold buffer locks for + * long intervals in any case, since that can block the bgwriter. */ - ((PageHeader) page)->pd_lower = - ((char *) metap + sizeof(HashMetaPageData)) - (char *) page; + _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK); + + /* + * Initialize the first N buckets + */ + for (i = 0; i < num_buckets; i++) { + /* Allow interrupts, in case N is huge */ + CHECK_FOR_INTERRUPTS(); + + buf = _hash_getnewbuf(rel, BUCKET_TO_BLKNO(metap, i), forkNum); + pg = BufferGetPage(buf); + pageopaque = (HashPageOpaque)PageGetSpecialPointer(pg); + pageopaque->hasho_prevblkno = InvalidBlockNumber; + pageopaque->hasho_nextblkno = InvalidBlockNumber; + pageopaque->hasho_bucket = i; + pageopaque->hasho_flag = LH_BUCKET_PAGE; + pageopaque->hasho_page_id = HASHO_PAGE_ID; + _hash_wrtbuf(rel, buf); + } + + /* Now reacquire buffer lock on metapage */ + _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE); + + /* + * Initialize first bitmap page + */ + _hash_initbitmap(rel, metap, num_buckets + 1, forkNum); + + /* all done */ + _hash_wrtbuf(rel, metabuf); + + return num_buckets; } /* - * _hash_pageinit() -- Initialize a new hash index page. + * _hash_pageinit() -- Initialize a new hash index page. */ void _hash_pageinit(Page page, Size size) { + Assert(PageIsNew(page)); PageInit(page, size, sizeof(HashPageOpaqueData)); } /* * Attempt to expand the hash table by creating one new bucket. * - * This will silently do nothing if we don't get cleanup lock on old or - * new bucket. + * This will silently do nothing if it cannot get the needed locks. * - * Complete the pending splits and remove the tuples from old bucket, - * if there are any left over from the previous split. + * The caller should hold no locks on the hash index. * * The caller must hold a pin, but no lock, on the metapage buffer. * The buffer is returned in the same state. @@ -603,36 +475,37 @@ void _hash_expandtable(Relation rel, Buffer metabuf) uint32 spare_ndx; BlockNumber start_oblkno; BlockNumber start_nblkno; - Buffer buf_nblkno; - Buffer buf_oblkno; - Page opage; - Page npage; - HashPageOpaque oopaque; - HashPageOpaque nopaque; uint32 maxbucket; uint32 highmask; uint32 lowmask; - bool metap_update_masks = false; - bool metap_update_splitpoint = false; - -restart_expand: /* - * Write-lock the meta page. It used to be necessary to acquire a - * heavyweight lock to begin a split, but that is no longer required. + * Obtain the page-zero lock to assert the right to begin a split (see + * README). + * + * Note: deadlock should be impossible here. Our own backend could only be + * holding bucket sharelocks due to stopped indexscans; those will not + * block other holders of the page-zero lock, who are only interested in + * acquiring bucket sharelocks themselves. Exclusive bucket locks are + * only taken here and in hashbulkdelete, and neither of these operations + * needs any additional locks to complete. (If, due to some flaw in this + * reasoning, we manage to deadlock anyway, it's okay to error out; the + * index will be left in a consistent state.) */ - LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE); + _hash_getlock(rel, 0, HASH_EXCLUSIVE); + + /* Write-lock the meta page */ + _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE); _hash_checkpage(rel, metabuf, LH_META_PAGE); metap = HashPageGetMeta(BufferGetPage(metabuf)); - /* * Check to see if split is still needed; someone else might have already * done one while we waited for the lock. * * Make sure this stays in sync with _hash_doinsert() */ - if (metap->hashm_ntuples <= (double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1)) + if (metap->hashm_ntuples <= (double)metap->hashm_ffactor * (metap->hashm_maxbucket + 1)) goto fail; /* @@ -647,23 +520,17 @@ restart_expand: * _hash_alloc_buckets() would fail, but if we supported buckets smaller * than a disk block then this would be an independent constraint. * - * If you change this, see also the maximum initial number of buckets in - * _hash_init(). + * If you change this, see also the maximum initial number of buckets in _hash_metapinit(). */ - if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE) + if (metap->hashm_maxbucket >= (uint32)0x7FFFFFFE) goto fail; /* - * Determine which bucket is to be split, and attempt to take cleanup lock - * on the old bucket. If we can't get the lock, give up. + * Determine which bucket is to be split, and attempt to lock the old + * bucket. If we can't get the lock, give up. * - * The cleanup lock protects us not only against other backends, but - * against our own backend as well. - * - * The cleanup lock is mainly to protect the split from concurrent - * inserts. See src/backend/access/hash/README, Lock Definitions for - * further details. Due to this locking restriction, if there is any - * pending scan, the split will give up which is not good, but harmless. + * The lock protects us against other backends, but not against our own + * backend. Must check for active scans separately. */ new_bucket = metap->hashm_maxbucket + 1; @@ -671,84 +538,14 @@ restart_expand: start_oblkno = BUCKET_TO_BLKNO(metap, old_bucket); - buf_oblkno = _hash_getbuf_with_condlock_cleanup(rel, start_oblkno, LH_BUCKET_PAGE); - if (!buf_oblkno) + if (_hash_has_active_scan(rel, old_bucket)) goto fail; - opage = BufferGetPage(buf_oblkno); - oopaque = (HashPageOpaque) PageGetSpecialPointer(opage); + if (!_hash_try_getlock(rel, start_oblkno, HASH_EXCLUSIVE)) + goto fail; /* - * We want to finish the split from a bucket as there is no apparent - * benefit by not doing so and it will make the code complicated to finish - * the split that involves multiple buckets considering the case where new - * split also fails. We don't need to consider the new bucket for - * completing the split here as it is not possible that a re-split of new - * bucket starts when there is still a pending split from old bucket. - */ - if (H_BUCKET_BEING_SPLIT(oopaque)) { - /* - * Copy bucket mapping info now; refer the comment in code below where - * we copy this information before calling _hash_splitbucket to see - * why this is okay. - */ - maxbucket = metap->hashm_maxbucket; - highmask = metap->hashm_highmask; - lowmask = metap->hashm_lowmask; - - /* - * Release the lock on metapage and old_bucket, before completing the - * split. - */ - LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); - LockBuffer(buf_oblkno, BUFFER_LOCK_UNLOCK); - - _hash_finish_split(rel, metabuf, buf_oblkno, old_bucket, maxbucket, - highmask, lowmask); - - /* release the pin on old buffer and retry for expand. */ - _hash_dropbuf(rel, buf_oblkno); - - goto restart_expand; - } - - /* - * Clean the tuples remained from the previous split. This operation - * requires cleanup lock and we already have one on the old bucket, so - * let's do it. We also don't want to allow further splits from the bucket - * till the garbage of previous split is cleaned. This has two - * advantages; first, it helps in avoiding the bloat due to garbage and - * second is, during cleanup of bucket, we are always sure that the - * garbage tuples belong to most recently split bucket. On the contrary, - * if we allow cleanup of bucket after meta page is updated to indicate - * the new split and before the actual split, the cleanup operation won't - * be able to decide whether the tuple has been moved to the newly created - * bucket and ended up deleting such tuples. - */ - if (H_NEEDS_SPLIT_CLEANUP(oopaque)) { - /* - * Copy bucket mapping info now; refer to the comment in code below - * where we copy this information before calling _hash_splitbucket to - * see why this is okay. - */ - maxbucket = metap->hashm_maxbucket; - highmask = metap->hashm_highmask; - lowmask = metap->hashm_lowmask; - - /* Release the metapage lock. */ - LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); - - hashbucketcleanup(rel, old_bucket, buf_oblkno, start_oblkno, NULL, - maxbucket, highmask, lowmask, NULL, NULL, true, - NULL, NULL); - - _hash_dropbuf(rel, buf_oblkno); - - goto restart_expand; - } - - /* - * There shouldn't be any active scan on new bucket. + * Likewise lock the new bucket (should never fail). * * Note: it is safe to compute the new bucket's blkno here, even though we * may still need to update the BUCKET_TO_BLKNO mapping. This is because @@ -757,168 +554,89 @@ restart_expand: */ start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket); - /* - * If the split point is increasing we need to allocate a new batch of - * bucket pages. - */ - spare_ndx = _hash_spareindex(new_bucket + 1); - if (spare_ndx > metap->hashm_ovflpoint) { - uint32 buckets_to_add; + if (_hash_has_active_scan(rel, new_bucket)) + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("scan in progress on supposedly new bucket"))); + if (!_hash_try_getlock(rel, start_nblkno, HASH_EXCLUSIVE)) + ereport(ERROR, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), errmsg("could not get lock on supposedly new bucket"))); + /* + * If the split point is increasing (hashm_maxbucket's log base 2 + * increases), we need to allocate a new batch of bucket pages. + */ + spare_ndx = _hash_log2(new_bucket + 1); + if (spare_ndx > metap->hashm_ovflpoint) { Assert(spare_ndx == metap->hashm_ovflpoint + 1); /* - * We treat allocation of buckets as a separate WAL-logged action. - * Even if we fail after this operation, won't leak bucket pages; - * rather, the next split will consume this space. In any case, even - * without failure we don't use all the space in one split operation. + * The number of buckets in the new splitpoint is equal to the total + * number already in existence, i.e. new_bucket. Currently this maps + * one-to-one to blocks required, but someday we may need a more + * complicated calculation here. */ - buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket; - if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add)) { + if (!_hash_alloc_buckets(rel, start_nblkno, new_bucket)) { /* can't split due to BlockNumber overflow */ - _hash_relbuf(rel, buf_oblkno); + _hash_droplock(rel, start_oblkno, HASH_EXCLUSIVE); + _hash_droplock(rel, start_nblkno, HASH_EXCLUSIVE); goto fail; } } /* - * Physically allocate the new bucket's primary page. We want to do this - * before changing the metapage's mapping info, in case we can't get the - * disk space. Ideally, we don't need to check for cleanup lock on new - * bucket as no other backend could find this bucket unless meta page is - * updated. However, it is good to be consistent with old bucket locking. - */ - buf_nblkno = _hash_getnewbuf(rel, start_nblkno, MAIN_FORKNUM); - if (!IsBufferCleanupOK(buf_nblkno)) { - _hash_relbuf(rel, buf_oblkno); - _hash_relbuf(rel, buf_nblkno); - goto fail; - } - - /* - * Since we are scribbling on the pages in the shared buffers, establish a - * critical section. Any failure in this next code leaves us with a big + * Okay to proceed with split. Update the metapage bucket mapping info. + * + * Since we are scribbling on the metapage data right in the shared + * buffer, any failure in this next little bit leaves us with a big * problem: the metapage is effectively corrupt but could get written back - * to disk. + * to disk. We don't really expect any failure, but just to be sure, + * establish a critical section. */ START_CRIT_SECTION(); - /* - * Okay to proceed with split. Update the metapage bucket mapping info. - */ metap->hashm_maxbucket = new_bucket; if (new_bucket > metap->hashm_highmask) { /* Starting a new doubling */ metap->hashm_lowmask = metap->hashm_highmask; metap->hashm_highmask = new_bucket | metap->hashm_lowmask; - metap_update_masks = true; } /* - * If the split point is increasing we need to adjust the hashm_spares[] - * array and hashm_ovflpoint so that future overflow pages will be created - * beyond this new batch of bucket pages. + * If the split point is increasing (hashm_maxbucket's log base 2 + * increases), we need to adjust the hashm_spares[] array and + * hashm_ovflpoint so that future overflow pages will be created beyond + * this new batch of bucket pages. */ if (spare_ndx > metap->hashm_ovflpoint) { metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint]; metap->hashm_ovflpoint = spare_ndx; - metap_update_splitpoint = true; } - MarkBufferDirty(metabuf); + /* Done mucking with metapage */ + END_CRIT_SECTION(); /* * Copy bucket mapping info now; this saves re-accessing the meta page * inside _hash_splitbucket's inner loop. Note that once we drop the * split lock, other splits could begin, so these values might be out of - * date before _hash_splitbucket finishes. That's okay, since all it + * date before _hash_splitbucket finishes. That's okay, since all it * needs is to tell which of these two buckets to map hashkeys into. */ maxbucket = metap->hashm_maxbucket; highmask = metap->hashm_highmask; lowmask = metap->hashm_lowmask; - opage = BufferGetPage(buf_oblkno); - oopaque = (HashPageOpaque)PageGetSpecialPointer(opage); + /* Write out the metapage and drop lock, but keep pin */ + _hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK); - /* - * Mark the old bucket to indicate that split is in progress. (At - * operation end, we will clear the split-in-progress flag.) Also, for a - * primary bucket page, hasho_prevblkno stores the number of buckets that - * existed as of the last split, so we must update that value here. - */ - oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT; - oopaque->hasho_prevblkno = maxbucket; - - MarkBufferDirty(buf_oblkno); - - npage = BufferGetPage(buf_nblkno); - - /* - * initialize the new bucket's primary page and mark it to indicate that - * split is in progress. - */ - nopaque = (HashPageOpaque) PageGetSpecialPointer(npage); - nopaque->hasho_prevblkno = maxbucket; - nopaque->hasho_nextblkno = InvalidBlockNumber; - nopaque->hasho_bucket = new_bucket; - nopaque->hasho_flag = LH_BUCKET_PAGE | LH_BUCKET_BEING_POPULATED; - nopaque->hasho_page_id = HASHO_PAGE_ID; - - MarkBufferDirty(buf_nblkno); - - /* XLOG stuff */ - if (RelationNeedsWAL(rel)) { - xl_hash_split_allocate_page xlrec; - XLogRecPtr recptr; - - xlrec.new_bucket = maxbucket; - xlrec.old_bucket_flag = oopaque->hasho_flag; - xlrec.new_bucket_flag = nopaque->hasho_flag; - xlrec.flags = 0; - - XLogBeginInsert(); - - XLogRegisterBuffer(0, buf_oblkno, REGBUF_STANDARD); - XLogRegisterBuffer(1, buf_nblkno, REGBUF_WILL_INIT); - XLogRegisterBuffer(2, metabuf, REGBUF_STANDARD); - - if (metap_update_masks) { - xlrec.flags |= XLH_SPLIT_META_UPDATE_MASKS; - XLogRegisterBufData(2, (char *) &metap->hashm_lowmask, sizeof(uint32)); - XLogRegisterBufData(2, (char *) &metap->hashm_highmask, sizeof(uint32)); - } - - if (metap_update_splitpoint) { - xlrec.flags |= XLH_SPLIT_META_UPDATE_SPLITPOINT; - XLogRegisterBufData(2, (char *) &metap->hashm_ovflpoint, sizeof(uint32)); - XLogRegisterBufData(2, (char *) &metap->hashm_spares[metap->hashm_ovflpoint], sizeof(uint32)); - } - - XLogRegisterData((char *) &xlrec, SizeOfHashSplitAllocPage); - - recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_ALLOCATE_PAGE); - - PageSetLSN(BufferGetPage(buf_oblkno), recptr); - PageSetLSN(BufferGetPage(buf_nblkno), recptr); - PageSetLSN(BufferGetPage(metabuf), recptr); - } - - END_CRIT_SECTION(); - - /* drop lock, but keep pin */ - LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); + /* Release split lock; okay for other splits to occur now */ + _hash_droplock(rel, 0, HASH_EXCLUSIVE); /* Relocate records to the new bucket */ - _hash_splitbucket(rel, metabuf, - old_bucket, new_bucket, - buf_oblkno, buf_nblkno, NULL, - maxbucket, highmask, lowmask); + _hash_splitbucket(rel, metabuf, old_bucket, new_bucket, start_oblkno, start_nblkno, maxbucket, highmask, lowmask); - /* all done, now release the pins on primary buckets. */ - _hash_dropbuf(rel, buf_oblkno); - _hash_dropbuf(rel, buf_nblkno); + /* Release bucket locks, allowing others to access them */ + _hash_droplock(rel, start_oblkno, HASH_EXCLUSIVE); + _hash_droplock(rel, start_nblkno, HASH_EXCLUSIVE); return; @@ -926,9 +644,11 @@ restart_expand: fail: /* We didn't write the metapage, so just drop lock */ - LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); -} + _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK); + /* Release split lock */ + _hash_droplock(rel, 0, HASH_EXCLUSIVE); +} /* * _hash_alloc_buckets -- allocate a new splitpoint's worth of bucket pages @@ -946,7 +666,7 @@ fail: * hash indexes sequentially anyway, that probably doesn't matter. * * XXX It's annoying that this code is executed with the metapage lock held. - * We need to interlock against _hash_addovflpage() adding a new overflow page + * We need to interlock against _hash_getovflpage() adding a new overflow page * concurrently, but it'd likely be better to use LockRelationForExtension * for the purpose. OTOH, adding a splitpoint is a very infrequent operation, * so it may not be worth worrying about. @@ -957,9 +677,7 @@ fail: static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks) { BlockNumber lastblock; - char zerobuf[BLCKSZ]; - Page page; - HashPageOpaque ovflopaque; + char *zerobuf = NULL; lastblock = firstblock + nblocks - 1; /* @@ -977,93 +695,68 @@ static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nbl #endif ReleaseBuffer(buf); } else { - page = (Page)zerobuf; - - /* - * Initialize the page. Just zeroing the page won't work; see - * _hash_freeovflpage for similar usage. We take care to make the special - * space valid for the benefit of tools such as pageinspect. - */ - _hash_pageinit(page, BLCKSZ); - - ovflopaque = (HashPageOpaque) PageGetSpecialPointer(page); - - ovflopaque->hasho_prevblkno = InvalidBlockNumber; - ovflopaque->hasho_nextblkno = InvalidBlockNumber; - ovflopaque->hasho_bucket = -1; - ovflopaque->hasho_flag = LH_UNUSED_PAGE; - ovflopaque->hasho_page_id = HASHO_PAGE_ID; - - if (RelationNeedsWAL(rel)) - log_newpage(&rel->rd_node, - MAIN_FORKNUM, - lastblock, - zerobuf, - true); - + zerobuf = (char *)adio_align_alloc(BLCKSZ); + if (zerobuf != NULL) { + MemSet(zerobuf, 0, BLCKSZ); + } RelationOpenSmgr(rel); PageSetChecksumInplace(zerobuf, lastblock); smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf, false); + + adio_align_free(zerobuf); } return true; } - /* * _hash_splitbucket -- split 'obucket' into 'obucket' and 'nbucket' * - * This routine is used to partition the tuples between old and new bucket and - * is used to finish the incomplete split operations. To finish the previously - * interrupted split operation, the caller needs to fill htab. If htab is set, - * then we skip the movement of tuples that exists in htab, otherwise NULL - * value of htab indicates movement of all the tuples that belong to the new - * bucket. - * * We are splitting a bucket that consists of a base bucket page and zero * or more overflow (bucket chain) pages. We must relocate tuples that - * belong in the new bucket. + * belong in the new bucket, and compress out any free space in the old + * bucket. * - * The caller must hold cleanup locks on both buckets to ensure that + * The caller must hold exclusive locks on both buckets to ensure that * no one else is trying to access them (see README). * * The caller must hold a pin, but no lock, on the metapage buffer. * The buffer is returned in the same state. (The metapage is only * touched if it becomes necessary to add or remove overflow pages.) - * - * Split needs to retain pin on primary bucket pages of both old and new - * buckets till end of operation. This is to prevent vacuum from starting - * while a split is in progress. - * - * In addition, the caller must have created the new bucket's base page, - * which is passed in buffer nbuf, pinned and write-locked. The lock will be - * released here and pin must be released by the caller. (The API is set up - * this way because we must do _hash_getnewbuf() before releasing the metapage - * write lock. So instead of passing the new bucket's start block number, we - * pass an actual buffer.) */ -static void _hash_splitbucket(Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket, Buffer obuf, - Buffer nbuf, HTAB *htab, uint32 maxbucket, uint32 highmask, uint32 lowmask) +static void _hash_splitbucket(Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket, BlockNumber start_oblkno, + BlockNumber start_nblkno, uint32 maxbucket, uint32 highmask, uint32 lowmask) { - Buffer bucket_obuf; - Buffer bucket_nbuf; + BlockNumber oblkno; + BlockNumber nblkno; + Buffer obuf; + Buffer nbuf; Page opage; Page npage; HashPageOpaque oopaque; HashPageOpaque nopaque; - OffsetNumber itup_offsets[MaxIndexTuplesPerPage]; - IndexTuple itups[MaxIndexTuplesPerPage]; - Size all_tups_size = 0; - int i; - uint16 nitups = 0; - bucket_obuf = obuf; + /* + * It should be okay to simultaneously write-lock pages from each bucket, + * since no one else can be trying to acquire buffer lock on pages of + * either bucket. + */ + oblkno = start_oblkno; + obuf = _hash_getbuf(rel, oblkno, HASH_WRITE, LH_BUCKET_PAGE); opage = BufferGetPage(obuf); - oopaque = (HashPageOpaque) PageGetSpecialPointer(opage); + oopaque = (HashPageOpaque)PageGetSpecialPointer(opage); - bucket_nbuf = nbuf; + nblkno = start_nblkno; + nbuf = _hash_getnewbuf(rel, nblkno, MAIN_FORKNUM); npage = BufferGetPage(nbuf); - nopaque = (HashPageOpaque) PageGetSpecialPointer(npage); + + /* initialize the new bucket's primary page */ + nopaque = (HashPageOpaque)PageGetSpecialPointer(npage); + nopaque->hasho_prevblkno = InvalidBlockNumber; + nopaque->hasho_nextblkno = InvalidBlockNumber; + nopaque->hasho_bucket = nbucket; + nopaque->hasho_flag = LH_BUCKET_PAGE; + nopaque->hasho_page_id = HASHO_PAGE_ID; /* * Partition the tuples in the old bucket between the old bucket and the @@ -1072,9 +765,10 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf, Bucket obucket, Buck * once per page in old bucket. */ for (;;) { - BlockNumber oblkno; OffsetNumber ooffnum; OffsetNumber omaxoffnum; + OffsetNumber deletable[MaxOffsetNumber]; + int ndeletable = 0; /* Scan each tuple in old page */ omaxoffnum = PageGetMaxOffsetNumber(opage); @@ -1082,83 +776,43 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf, Bucket obucket, Buck IndexTuple itup; Size itemsz; Bucket bucket; - bool found = false; - - /* skip dead tuples */ - if (ItemIdIsDead(PageGetItemId(opage, ooffnum))) - continue; /* - * Before inserting a tuple, probe the hash table containing TIDs - * of tuples belonging to new bucket, if we find a match, then - * skip that tuple, else fetch the item's hash key (conveniently - * stored in the item) and determine which bucket it now belongs - * in. + * Fetch the item's hash key (conveniently stored in the item) and + * determine which bucket it now belongs in. */ - itup = (IndexTuple) PageGetItem(opage, PageGetItemId(opage, ooffnum)); - - if (htab) - (void) hash_search(htab, &itup->t_tid, HASH_FIND, &found); - - if (found) - continue; - - bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup), - maxbucket, highmask, lowmask); - + itup = (IndexTuple)PageGetItem(opage, PageGetItemId(opage, ooffnum)); + bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup), maxbucket, highmask, lowmask); if (bucket == nbucket) { - IndexTuple new_itup; - - /* - * make a copy of index tuple as we have to scribble on it. - */ - new_itup = CopyIndexTuple(itup); - - /* - * mark the index tuple as moved by split, such tuples are - * skipped by scan if there is split in progress for a bucket. - */ - new_itup->t_info |= INDEX_MOVED_BY_SPLIT_MASK; - /* * insert the tuple into the new bucket. if it doesn't fit on * the current page in the new bucket, we must allocate a new * overflow page and place the tuple on that page instead. */ - itemsz = IndexTupleDSize(*new_itup); + itemsz = IndexTupleDSize(*itup); itemsz = MAXALIGN(itemsz); - - if (PageGetFreeSpaceForMultipleTuples(npage, nitups + 1) < (all_tups_size + itemsz)) { - /* - * Change the shared buffer state in critical section, - * otherwise any error could make it unrecoverable. - */ - START_CRIT_SECTION(); - - _hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups); - MarkBufferDirty(nbuf); - /* log the split operation before releasing the lock */ - log_split_page(rel, nbuf); - - END_CRIT_SECTION(); - - /* drop lock, but keep pin */ - LockBuffer(nbuf, BUFFER_LOCK_UNLOCK); - - /* be tidy */ - for (i = 0; i < nitups; i++) - pfree(itups[i]); - nitups = 0; - all_tups_size = 0; - + if (PageGetFreeSpace(npage) < itemsz) { + /* write out nbuf and drop lock, but keep pin */ + _hash_chgbufaccess(rel, nbuf, HASH_WRITE, HASH_NOLOCK); /* chain to a new overflow page */ - nbuf = _hash_addovflpage(rel, metabuf, nbuf, (nbuf == bucket_nbuf) ? true : false); + nbuf = _hash_addovflpage(rel, metabuf, nbuf); npage = BufferGetPage(nbuf); - nopaque = (HashPageOpaque) PageGetSpecialPointer(npage); + /* we don't need nblkno or nopaque within the loop */ } - itups[nitups++] = new_itup; - all_tups_size += itemsz; + /* + * Insert tuple on new page, using _hash_pgaddtup to ensure + * correct ordering by hashkey. This is a tad inefficient + * since we may have to shuffle itempointers repeatedly. + * Possible future improvement: accumulate all the items for + * the new page and qsort them before insertion. + */ + (void)_hash_pgaddtup(rel, nbuf, itemsz, itup); + + /* + * Mark tuple for deletion from old page. + */ + deletable[ndeletable++] = ooffnum; } else { /* * the tuple stays on this page, so nothing to do. @@ -1169,382 +823,35 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf, Bucket obucket, Buck oblkno = oopaque->hasho_nextblkno; - /* retain the pin on the old primary bucket */ - if (obuf == bucket_obuf) - LockBuffer(obuf, BUFFER_LOCK_UNLOCK); - else + /* + * Done scanning this old page. If we moved any tuples, delete them + * from the old page. + */ + if (ndeletable > 0) { + PageIndexMultiDelete(opage, deletable, ndeletable); + _hash_wrtbuf(rel, obuf); + } else { _hash_relbuf(rel, obuf); + } /* Exit loop if no more overflow pages in old bucket */ if (!BlockNumberIsValid(oblkno)) { - /* - * Change the shared buffer state in critical section, otherwise - * any error could make it unrecoverable. - */ - START_CRIT_SECTION(); - - _hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups); - MarkBufferDirty(nbuf); - /* log the split operation before releasing the lock */ - log_split_page(rel, nbuf); - - END_CRIT_SECTION(); - - if (nbuf == bucket_nbuf) - LockBuffer(nbuf, BUFFER_LOCK_UNLOCK); - else - _hash_relbuf(rel, nbuf); - - /* be tidy */ - for (i = 0; i < nitups; i++) - pfree(itups[i]); break; } /* Else, advance to next old page */ - obuf = _hash_getbuf(rel, oblkno, HASH_READ, LH_OVERFLOW_PAGE); + obuf = _hash_getbuf(rel, oblkno, HASH_WRITE, LH_OVERFLOW_PAGE); opage = BufferGetPage(obuf); - oopaque = (HashPageOpaque) PageGetSpecialPointer(opage); + oopaque = (HashPageOpaque)PageGetSpecialPointer(opage); } /* * We're at the end of the old bucket chain, so we're done partitioning - * the tuples. Mark the old and new buckets to indicate split is - * finished. - * - * To avoid deadlocks due to locking order of buckets, first lock the old - * bucket and then the new bucket. + * the tuples. Before quitting, call _hash_squeezebucket to ensure the + * tuples remaining in the old bucket (including the overflow pages) are + * packed as tightly as possible. The new bucket is already tight. */ - LockBuffer(bucket_obuf, BUFFER_LOCK_EXCLUSIVE); - opage = BufferGetPage(bucket_obuf); - oopaque = (HashPageOpaque) PageGetSpecialPointer(opage); + _hash_wrtbuf(rel, nbuf); - LockBuffer(bucket_nbuf, BUFFER_LOCK_EXCLUSIVE); - npage = BufferGetPage(bucket_nbuf); - nopaque = (HashPageOpaque) PageGetSpecialPointer(npage); - - START_CRIT_SECTION(); - - oopaque->hasho_flag &= ~LH_BUCKET_BEING_SPLIT; - nopaque->hasho_flag &= ~LH_BUCKET_BEING_POPULATED; - - /* - * After the split is finished, mark the old bucket to indicate that it - * contains deletable tuples. We will clear split-cleanup flag after - * deleting such tuples either at the end of split or at the next split - * from old bucket or at the time of vacuum. - */ - oopaque->hasho_flag |= LH_BUCKET_NEEDS_SPLIT_CLEANUP; - - /* - * now write the buffers, here we don't release the locks as caller is - * responsible to release locks. - */ - MarkBufferDirty(bucket_obuf); - MarkBufferDirty(bucket_nbuf); - - if (RelationNeedsWAL(rel)) { - XLogRecPtr recptr; - xl_hash_split_complete xlrec; - - xlrec.old_bucket_flag = oopaque->hasho_flag; - xlrec.new_bucket_flag = nopaque->hasho_flag; - - XLogBeginInsert(); - - XLogRegisterData((char *) &xlrec, SizeOfHashSplitComplete); - - XLogRegisterBuffer(0, bucket_obuf, REGBUF_STANDARD); - XLogRegisterBuffer(1, bucket_nbuf, REGBUF_STANDARD); - - recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_COMPLETE); - - PageSetLSN(BufferGetPage(bucket_obuf), recptr); - PageSetLSN(BufferGetPage(bucket_nbuf), recptr); - } - - END_CRIT_SECTION(); - - /* - * If possible, clean up the old bucket. We might not be able to do this - * if someone else has a pin on it, but if not then we can go ahead. This - * isn't absolutely necessary, but it reduces bloat; if we don't do it - * now, VACUUM will do it eventually, but maybe not until new overflow - * pages have been allocated. Note that there's no need to clean up the - * new bucket. - */ - if (IsBufferCleanupOK(bucket_obuf)) { - LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK); - hashbucketcleanup(rel, obucket, bucket_obuf, - BufferGetBlockNumber(bucket_obuf), NULL, - maxbucket, highmask, lowmask, NULL, NULL, true, - NULL, NULL); - } else { - LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK); - LockBuffer(bucket_obuf, BUFFER_LOCK_UNLOCK); - } -} - -/* - * _hash_finish_split() -- Finish the previously interrupted split operation - * - * To complete the split operation, we form the hash table of TIDs in new - * bucket which is then used by split operation to skip tuples that are - * already moved before the split operation was previously interrupted. - * - * The caller must hold a pin, but no lock, on the metapage and old bucket's - * primary page buffer. The buffers are returned in the same state. (The - * metapage is only touched if it becomes necessary to add or remove overflow - * pages.) - */ -void _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket, - uint32 maxbucket, uint32 highmask, uint32 lowmask) -{ - HASHCTL hash_ctl; - HTAB *tidhtab; - Buffer bucket_nbuf = InvalidBuffer; - Buffer nbuf; - Page npage; - BlockNumber nblkno; - BlockNumber bucket_nblkno; - HashPageOpaque npageopaque; - Bucket nbucket; - bool found; - errno_t rc = EOK; - - /* Initialize hash tables used to track TIDs */ - rc = memset_s(&hash_ctl, sizeof(hash_ctl), 0, sizeof(hash_ctl)); - securec_check_c(rc, "", ""); - hash_ctl.keysize = sizeof(ItemPointerData); - hash_ctl.entrysize = sizeof(ItemPointerData); - hash_ctl.hcxt = CurrentMemoryContext; - - tidhtab = hash_create("bucket ctids", - 256, /* arbitrary initial size */ - &hash_ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - - bucket_nblkno = nblkno = _hash_get_newblock_from_oldbucket(rel, obucket); - - /* - * Scan the new bucket and build hash table of TIDs - */ - for (;;) { - OffsetNumber noffnum; - OffsetNumber nmaxoffnum; - - nbuf = _hash_getbuf(rel, nblkno, HASH_READ, - LH_BUCKET_PAGE | LH_OVERFLOW_PAGE); - - /* remember the primary bucket buffer to acquire cleanup lock on it. */ - if (nblkno == bucket_nblkno) - bucket_nbuf = nbuf; - - npage = BufferGetPage(nbuf); - npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage); - - /* Scan each tuple in new page */ - nmaxoffnum = PageGetMaxOffsetNumber(npage); - for (noffnum = FirstOffsetNumber; noffnum <= nmaxoffnum; noffnum = OffsetNumberNext(noffnum)) { - IndexTuple itup; - - /* Fetch the item's TID and insert it in hash table. */ - itup = (IndexTuple) PageGetItem(npage, PageGetItemId(npage, noffnum)); - - (void) hash_search(tidhtab, &itup->t_tid, HASH_ENTER, &found); - - Assert(!found); - } - - nblkno = npageopaque->hasho_nextblkno; - - /* - * release our write lock without modifying buffer and ensure to - * retain the pin on primary bucket. - */ - if (nbuf == bucket_nbuf) - LockBuffer(nbuf, BUFFER_LOCK_UNLOCK); - else - _hash_relbuf(rel, nbuf); - - /* Exit loop if no more overflow pages in new bucket */ - if (!BlockNumberIsValid(nblkno)) - break; - } - - /* - * Conditionally get the cleanup lock on old and new buckets to perform - * the split operation. If we don't get the cleanup locks, silently give - * up and next insertion on old bucket will try again to complete the - * split. - */ - if (!ConditionalLockBufferForCleanup(obuf)) { - hash_destroy(tidhtab); - return; - } - if (!ConditionalLockBufferForCleanup(bucket_nbuf)) { - LockBuffer(obuf, BUFFER_LOCK_UNLOCK); - hash_destroy(tidhtab); - return; - } - - npage = BufferGetPage(bucket_nbuf); - npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage); - nbucket = npageopaque->hasho_bucket; - - _hash_splitbucket(rel, metabuf, obucket, - nbucket, obuf, bucket_nbuf, tidhtab, - maxbucket, highmask, lowmask); - - _hash_dropbuf(rel, bucket_nbuf); - hash_destroy(tidhtab); -} - -/* - * log_split_page() -- Log the split operation - * - * We log the split operation when the new page in new bucket gets full, - * so we log the entire page. - * - * 'buf' must be locked by the caller which is also responsible for unlocking - * it. - */ -static void log_split_page(Relation rel, Buffer buf) -{ - if (RelationNeedsWAL(rel)) { - XLogRecPtr recptr; - - XLogBeginInsert(); - - XLogRegisterBuffer(0, buf, REGBUF_FORCE_IMAGE | REGBUF_STANDARD); - - recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_PAGE); - - PageSetLSN(BufferGetPage(buf), recptr); - } -} - -/* - * _hash_getcachedmetap() -- Returns cached metapage data. - * - * If metabuf is not InvalidBuffer, caller must hold a pin, but no lock, on - * the metapage. If not set, we'll set it before returning if we have to - * refresh the cache, and return with a pin but no lock on it; caller is - * responsible for releasing the pin. - * - * We refresh the cache if it's not initialized yet or force_refresh is true. - */ -HashMetaPage _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh) -{ - Page page; - - Assert(metabuf); - if (force_refresh || rel->rd_amcache == NULL) { - char *cache = NULL; - error_t rc = EOK; - - /* - * It's important that we don't set rd_amcache to an invalid value. - * Either MemoryContextAlloc or _hash_getbuf could fail, so don't - * install a pointer to the newly-allocated storage in the actual - * relcache entry until both have succeeeded. - */ - if (rel->rd_amcache == NULL) - cache = (char*)MemoryContextAlloc(rel->rd_indexcxt, sizeof(HashMetaPageData)); - - /* Read the metapage. */ - if (BufferIsValid(*metabuf)) - LockBuffer(*metabuf, BUFFER_LOCK_SHARE); - else - *metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, - LH_META_PAGE); - page = BufferGetPage(*metabuf); - - /* Populate the cache. */ - if (rel->rd_amcache == NULL) - rel->rd_amcache = cache; - rc = memcpy_s(rel->rd_amcache, sizeof(HashMetaPageData), HashPageGetMeta(page), sizeof(HashMetaPageData)); - securec_check_c(rc, "", ""); - - /* Release metapage lock, but keep the pin. */ - LockBuffer(*metabuf, BUFFER_LOCK_UNLOCK); - } - - return (HashMetaPage) rel->rd_amcache; -} - -/* - * _hash_getbucketbuf_from_hashkey() -- Get the bucket's buffer for the given - * hashkey. - * - * Bucket pages do not move or get removed once they are allocated. This give - * us an opportunity to use the previously saved metapage contents to reach - * the target bucket buffer, instead of reading from the metapage every time. - * This saves one buffer access every time we want to reach the target bucket - * buffer, which is very helpful savings in bufmgr traffic and contention. - * - * The access type parameter (HASH_READ or HASH_WRITE) indicates whether the - * bucket buffer has to be locked for reading or writing. - * - * The out parameter cachedmetap is set with metapage contents used for - * hashkey to bucket buffer mapping. Some callers need this info to reach the - * old bucket in case of bucket split, see _hash_doinsert(). - */ -Buffer _hash_getbucketbuf_from_hashkey(Relation rel, uint32 hashkey, int access, - HashMetaPage *cachedmetap) -{ - HashMetaPage metap; - Buffer buf; - Buffer metabuf = InvalidBuffer; - Page page; - Bucket bucket; - BlockNumber blkno; - HashPageOpaque opaque; - - /* We read from target bucket buffer, hence locking is must. */ - Assert(access == HASH_READ || access == HASH_WRITE); - - metap = _hash_getcachedmetap(rel, &metabuf, false); - Assert(metap != NULL); - - /* - * Loop until we get a lock on the correct target bucket. - */ - for (;;) { - /* - * Compute the target bucket number, and convert to block number. - */ - bucket = _hash_hashkey2bucket(hashkey, - metap->hashm_maxbucket, - metap->hashm_highmask, - metap->hashm_lowmask); - - blkno = BUCKET_TO_BLKNO(metap, bucket); - - /* Fetch the primary bucket page for the bucket */ - buf = _hash_getbuf(rel, blkno, access, LH_BUCKET_PAGE); - page = BufferGetPage(buf); - opaque = (HashPageOpaque) PageGetSpecialPointer(page); - Assert(opaque->hasho_bucket == bucket); - Assert(opaque->hasho_prevblkno != InvalidBlockNumber); - - /* - * If this bucket hasn't been split, we're done. - */ - if (opaque->hasho_prevblkno <= metap->hashm_maxbucket) - break; - - /* Drop lock on this buffer, update cached metapage, and retry. */ - _hash_relbuf(rel, buf); - metap = _hash_getcachedmetap(rel, &metabuf, true); - Assert(metap != NULL); - } - - if (BufferIsValid(metabuf)) - _hash_dropbuf(rel, metabuf); - - if (cachedmetap) - *cachedmetap = metap; - - return buf; + _hash_squeezebucket(rel, obucket, start_oblkno, NULL); } diff --git a/src/gausskernel/storage/access/hash/hashscan.cpp b/src/gausskernel/storage/access/hash/hashscan.cpp new file mode 100644 index 000000000..5f012abf4 --- /dev/null +++ b/src/gausskernel/storage/access/hash/hashscan.cpp @@ -0,0 +1,138 @@ +/* ------------------------------------------------------------------------- + * + * hashscan.cpp + * manage scans on hash tables + * + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/gausskernel/storage/access/hash/hashscan.cpp + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "access/hash.h" +#include "access/relscan.h" +#include "utils/memutils.h" +#include "utils/rel.h" +#include "utils/rel_gs.h" +#include "utils/resowner.h" + +/* + * We track all of a backend's active scans on hash indexes using a list + * of HashScanListData structs, which are allocated in t_thrd.top_mem_cxt. + * It's okay to use a long-lived context because we rely on the ResourceOwner + * mechanism to clean up unused entries after transaction or subtransaction + * abort. We can't safely keep the entries in the executor's per-query + * context, because that might be already freed before we get a chance to + * clean up the list. (XXX seems like there should be a better way to + * manage this...) + */ +typedef struct HashScanListData { + IndexScanDesc hashsl_scan; + ResourceOwner hashsl_owner; + struct HashScanListData *hashsl_next; +} HashScanListData; + +typedef HashScanListData *HashScanList; + +/* + * ReleaseResources_hash() --- clean up hash subsystem resources. + * + * This is here because it needs to touch this module's static var HashScans. + */ +void ReleaseResources_hash(void) +{ + HashScanList l = NULL; + HashScanList prev = NULL; + HashScanList next = NULL; + + /* + * Release all HashScanList items belonging to the current ResourceOwner. + * Note that we do not release the underlying IndexScanDesc; that's in + * executor memory and will go away on its own (in fact quite possibly has + * gone away already, so we mustn't try to touch it here). + * + * Note: this should be a no-op during normal query shutdown. However, in + * an abort situation ExecutorEnd is not called and so there may be open + * index scans to clean up. + */ + prev = NULL; + + for (l = u_sess->exec_cxt.HashScans; l != NULL; l = next) { + next = l->hashsl_next; + if (l->hashsl_owner == t_thrd.utils_cxt.CurrentResourceOwner) { + if (prev == NULL) + u_sess->exec_cxt.HashScans = next; + else + prev->hashsl_next = next; + + pfree(l); + /* prev does not change */ + } else + prev = l; + } +} + +/* + * _hash_regscan() -- register a new scan. + */ +void _hash_regscan(IndexScanDesc scan) +{ + HashScanList new_el; + + new_el = (HashScanList)MemoryContextAlloc( + SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE), sizeof(HashScanListData)); + new_el->hashsl_scan = scan; + new_el->hashsl_owner = t_thrd.utils_cxt.CurrentResourceOwner; + new_el->hashsl_next = u_sess->exec_cxt.HashScans; + u_sess->exec_cxt.HashScans = new_el; +} + +/* + * _hash_dropscan() -- drop a scan from the scan list + */ +void _hash_dropscan(IndexScanDesc scan) +{ + HashScanList chk = NULL; + HashScanList last = NULL; + + last = NULL; + for (chk = u_sess->exec_cxt.HashScans; chk != NULL && chk->hashsl_scan != scan; chk = chk->hashsl_next) + last = chk; + + if (chk == NULL) + ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("hash scan list trashed"))); + + if (last == NULL) + u_sess->exec_cxt.HashScans = chk->hashsl_next; + else + last->hashsl_next = chk->hashsl_next; + + pfree(chk); +} + +/* + * Is there an active scan in this bucket? + */ +bool _hash_has_active_scan(Relation rel, Bucket bucket) +{ + Oid relid = RelationGetRelid(rel); + HashScanList l = NULL; + + for (l = u_sess->exec_cxt.HashScans; l != NULL; l = l->hashsl_next) { + if (relid == l->hashsl_scan->indexRelation->rd_id) { + HashScanOpaque so = (HashScanOpaque)l->hashsl_scan->opaque; + + if (so->hashso_bucket_valid && so->hashso_bucket == bucket) + return true; + } + } + + return false; +} diff --git a/src/gausskernel/storage/access/hash/hashsearch.cpp b/src/gausskernel/storage/access/hash/hashsearch.cpp index 746e95ff3..e1ac4941d 100644 --- a/src/gausskernel/storage/access/hash/hashsearch.cpp +++ b/src/gausskernel/storage/access/hash/hashsearch.cpp @@ -3,8 +3,8 @@ * hashsearch.cpp * search code for openGauss hash tables * - * Portions Copyright (c) 2021 Huawei Technologies Co.,Ltd. - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -64,131 +64,40 @@ bool _hash_next(IndexScanDesc scan, ScanDirection dir) } /* - * Advance to next page in a bucket, if any. If we are scanning the bucket - * being populated during split operation then this function advances to the - * bucket being split after the last bucket page of bucket being populated. + * Advance to next page in a bucket, if any. */ -static void _hash_readnext(IndexScanDesc scan, Buffer* bufp, Page* pagep, HashPageOpaque* opaquep) +static void _hash_readnext(Relation rel, Buffer *bufp, Page *pagep, HashPageOpaque *opaquep) { BlockNumber blkno; - Relation rel = scan->indexRelation; - HashScanOpaque so = (HashScanOpaque)scan->opaque; - bool block_found = false; blkno = (*opaquep)->hasho_nextblkno; - - /* - * Retain the pin on primary bucket page till the end of scan. Refer the - * comments in _hash_first to know the reason of retaining pin. - */ - if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf) - LockBuffer(*bufp, BUFFER_LOCK_UNLOCK); - else - _hash_relbuf(rel, *bufp); - + _hash_relbuf(rel, *bufp); *bufp = InvalidBuffer; /* check for interrupts while we're not holding any buffer lock */ CHECK_FOR_INTERRUPTS(); if (BlockNumberIsValid(blkno)) { *bufp = _hash_getbuf(rel, blkno, HASH_READ, LH_OVERFLOW_PAGE); - block_found = true; - } else if (so->hashso_buc_populated && !so->hashso_buc_split) { - /* - * end of bucket, scan bucket being split if there was a split in - * progress at the start of scan. - */ - *bufp = so->hashso_split_bucket_buf; - - /* - * buffer for bucket being split must be valid as we acquire the pin - * on it before the start of scan and retain it till end of scan. - */ - Assert(BufferIsValid(*bufp)); - - LockBuffer(*bufp, BUFFER_LOCK_SHARE); - - /* - * setting hashso_buc_split to true indicates that we are scanning - * bucket being split. - */ - so->hashso_buc_split = true; - - block_found = true; - } - - if (block_found) { *pagep = BufferGetPage(*bufp); - *opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep); + *opaquep = (HashPageOpaque)PageGetSpecialPointer(*pagep); } } /* - * Advance to previous page in a bucket, if any. If the current scan has - * started during split operation then this function advances to bucket - * being populated after the first bucket page of bucket being split. + * Advance to previous page in a bucket, if any. */ -static void _hash_readprev(IndexScanDesc scan, Buffer* bufp, Page* pagep, HashPageOpaque* opaquep) +static void _hash_readprev(Relation rel, Buffer *bufp, Page *pagep, HashPageOpaque *opaquep) { BlockNumber blkno; - Relation rel = scan->indexRelation; - HashScanOpaque so = (HashScanOpaque) scan->opaque; - bool haveprevblk; - blkno = (*opaquep)->hasho_prevblkno; - /* - * Retain the pin on primary bucket page till the end of scan. Refer the - * comments in _hash_first to know the reason of retaining pin. - */ - if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf) { - LockBuffer(*bufp, BUFFER_LOCK_UNLOCK); - haveprevblk = false; - } else { - _hash_relbuf(rel, *bufp); - haveprevblk = true; - } + _hash_relbuf(rel, *bufp); *bufp = InvalidBuffer; /* check for interrupts while we're not holding any buffer lock */ CHECK_FOR_INTERRUPTS(); - if (haveprevblk) { - Assert(BlockNumberIsValid(blkno)); - *bufp = _hash_getbuf(rel, blkno, HASH_READ, - LH_BUCKET_PAGE | LH_OVERFLOW_PAGE); + if (BlockNumberIsValid(blkno)) { + *bufp = _hash_getbuf(rel, blkno, HASH_READ, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE); *pagep = BufferGetPage(*bufp); - *opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep); - - /* - * We always maintain the pin on bucket page for whole scan operation, - * so releasing the additional pin we have acquired here. - */ - if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf) - _hash_dropbuf(rel, *bufp); - } else if (so->hashso_buc_populated && so->hashso_buc_split) { - /* - * end of bucket, scan bucket being populated if there was a split in - * progress at the start of scan. - */ - *bufp = so->hashso_bucket_buf; - - /* - * buffer for bucket being populated must be valid as we acquire the - * pin on it before the start of scan and retain it till end of scan. - */ - Assert(BufferIsValid(*bufp)); - - LockBuffer(*bufp, BUFFER_LOCK_SHARE); - *pagep = BufferGetPage(*bufp); - *opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep); - - /* move to the end of bucket chain */ - while (BlockNumberIsValid((*opaquep)->hasho_nextblkno)) - _hash_readnext(scan, bufp, pagep, opaquep); - - /* - * setting hashso_buc_split to false indicates that we are scanning - * bucket being populated. - */ - so->hashso_buc_split = false; + *opaquep = (HashPageOpaque)PageGetSpecialPointer(*pagep); } } @@ -208,9 +117,12 @@ bool _hash_first(IndexScanDesc scan, ScanDirection dir) ScanKey cur; uint32 hashkey; Bucket bucket; + BlockNumber blkno; Buffer buf; + Buffer metabuf; Page page; HashPageOpaque opaque; + HashMetaPage metap; IndexTuple itup; ItemPointer current; OffsetNumber offnum; @@ -262,71 +174,48 @@ bool _hash_first(IndexScanDesc scan, ScanDirection dir) so->hashso_sk_hash = hashkey; - buf = _hash_getbucketbuf_from_hashkey(rel, hashkey, HASH_READ, NULL); - page = BufferGetPage(buf); - opaque = (HashPageOpaque) PageGetSpecialPointer(page); - bucket = opaque->hasho_bucket; - - so->hashso_bucket_buf = buf; /* - * If a bucket split is in progress, then while scanning the bucket being - * populated, we need to skip tuples that were copied from bucket being - * split. We also need to maintain a pin on the bucket being split to - * ensure that split-cleanup work done by vacuum doesn't remove tuples - * from it till this scan is done. We need to maintain a pin on the - * bucket being populated to ensure that vacuum doesn't squeeze that - * bucket till this scan is complete; otherwise, the ordering of tuples - * can't be maintained during forward and backward scans. Here, we have - * to be cautious about locking order: first, acquire the lock on bucket - * being split; then, release the lock on it but not the pin; then, - * acquire a lock on bucket being populated and again re-verify whether - * the bucket split is still in progress. Acquiring the lock on bucket - * being split first ensures that the vacuum waits for this scan to - * finish. + * Acquire shared split lock so we can compute the target bucket safely + * (see README). */ - if (H_BUCKET_BEING_POPULATED(opaque)) { - BlockNumber old_blkno; - Buffer old_buf; + _hash_getlock(rel, 0, HASH_SHARE); - old_blkno = _hash_get_oldblock_from_newbucket(rel, bucket); + /* Read the metapage */ + metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE); + metap = HashPageGetMeta(BufferGetPage(metabuf)); - /* - * release the lock on new bucket and re-acquire it after acquiring - * the lock on old bucket. - */ - LockBuffer(buf, BUFFER_LOCK_UNLOCK); + /* + * Compute the target bucket number, and convert to block number. + */ + bucket = _hash_hashkey2bucket(hashkey, metap->hashm_maxbucket, metap->hashm_highmask, metap->hashm_lowmask); - old_buf = _hash_getbuf(rel, old_blkno, HASH_READ, LH_BUCKET_PAGE); + blkno = BUCKET_TO_BLKNO(metap, bucket); - /* - * remember the split bucket buffer so as to use it later for - * scanning. - */ - so->hashso_split_bucket_buf = old_buf; - LockBuffer(old_buf, BUFFER_LOCK_UNLOCK); + /* done with the metapage */ + _hash_relbuf(rel, metabuf); - LockBuffer(buf, BUFFER_LOCK_SHARE); - page = BufferGetPage(buf); - opaque = (HashPageOpaque) PageGetSpecialPointer(page); - Assert(opaque->hasho_bucket == bucket); + /* + * Acquire share lock on target bucket; then we can release split lock. + */ + _hash_getlock(rel, blkno, HASH_SHARE); - if (H_BUCKET_BEING_POPULATED(opaque)) { - so->hashso_buc_populated = true; - } else { - _hash_dropbuf(rel, so->hashso_split_bucket_buf); - so->hashso_split_bucket_buf = InvalidBuffer; - } - } + _hash_droplock(rel, 0, HASH_SHARE); + + /* Update scan opaque state to show we have lock on the bucket */ + so->hashso_bucket = bucket; + so->hashso_bucket_valid = true; + so->hashso_bucket_blkno = blkno; + + /* Fetch the primary bucket page for the bucket */ + buf = _hash_getbuf(rel, blkno, HASH_READ, LH_BUCKET_PAGE); + page = BufferGetPage(buf); + opaque = (HashPageOpaque)PageGetSpecialPointer(page); + Assert(opaque->hasho_bucket == bucket); /* If a backwards scan is requested, move to the end of the chain */ if (ScanDirectionIsBackward(dir)) { - /* - * Backward scans that start during split needs to start from end of - * bucket being split. - */ - while (BlockNumberIsValid(opaque->hasho_nextblkno) || - (so->hashso_buc_populated && !so->hashso_buc_split)) - _hash_readnext(scan, &buf, &page, &opaque); + while (BlockNumberIsValid(opaque->hasho_nextblkno)) + _hash_readnext(rel, &buf, &page, &opaque); } /* Now find the first tuple satisfying the qualification */ @@ -350,12 +239,6 @@ bool _hash_first(IndexScanDesc scan, ScanDirection dir) * false. Else, return true and set the hashso_curpos for the * scan to the right thing. * - * Here we need to ensure that if the scan has started during split, then - * skip the tuples that are moved by split while scanning bucket being - * populated and then scan the bucket being split to cover all such - * tuples. This is done to ensure that we don't miss tuples in the scans - * that are started during split. - * * 'bufP' points to the current buffer, which is pinned and read-locked. * On success exit, we have pin and read-lock on whichever page * contains the right item; on failure, we have released all buffers. @@ -400,9 +283,9 @@ bool _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) do { switch (dir) { case ForwardScanDirection: - if (offnum != InvalidOffsetNumber) { + if (offnum != InvalidOffsetNumber) offnum = OffsetNumberNext(offnum); /* move forward */ - } else { + else { /* new page, locate starting position by binary search */ offnum = _hash_binsearch(page, so->hashso_sk_hash); } @@ -415,27 +298,14 @@ bool _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) if (offnum <= maxoff) { Assert(offnum >= FirstOffsetNumber); itup = (IndexTuple)PageGetItem(page, PageGetItemId(page, offnum)); - /* - * skip the tuples that are moved by split operation - * for the scan that has started when split was in - * progress - */ - if (so->hashso_buc_populated && !so->hashso_buc_split && - (itup->t_info & INDEX_MOVED_BY_SPLIT_MASK)) { - offnum = OffsetNumberNext(offnum); /* move forward */ - continue; - } - if (so->hashso_sk_hash == _hash_get_indextuple_hashkey(itup)) break; /* yes, so exit for-loop */ } - /* Before leaving current page, deal with any killed items */ - if (so->numKilled > 0) - _hash_kill_items(scan); + /* * ran off the end of this page, try the next */ - _hash_readnext(scan, &buf, &page, &opaque); + _hash_readnext(rel, &buf, &page, &opaque); if (BufferIsValid(buf)) { maxoff = PageGetMaxOffsetNumber(page); offnum = _hash_binsearch(page, so->hashso_sk_hash); @@ -448,9 +318,9 @@ bool _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) break; case BackwardScanDirection: - if (offnum != InvalidOffsetNumber) { + if (offnum != InvalidOffsetNumber) offnum = OffsetNumberPrev(offnum); /* move back */ - } else { + else { /* new page, locate starting position by binary search */ offnum = _hash_binsearch_last(page, so->hashso_sk_hash); } @@ -463,26 +333,14 @@ bool _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) if (offnum >= FirstOffsetNumber) { Assert(offnum <= maxoff); itup = (IndexTuple)PageGetItem(page, PageGetItemId(page, offnum)); - /* - * skip the tuples that are moved by split operation - * for the scan that has started when split was in - * progress - */ - if (so->hashso_buc_populated && !so->hashso_buc_split && - (itup->t_info & INDEX_MOVED_BY_SPLIT_MASK)) { - offnum = OffsetNumberPrev(offnum); /* move back */ - continue; - } if (so->hashso_sk_hash == _hash_get_indextuple_hashkey(itup)) break; /* yes, so exit for-loop */ } - /* Before leaving current page, deal with any killed items */ - if (so->numKilled > 0) - _hash_kill_items(scan); + /* * ran off the end of this page, try the next */ - _hash_readprev(scan, &buf, &page, &opaque); + _hash_readprev(rel, &buf, &page, &opaque); if (BufferIsValid(buf)) { maxoff = PageGetMaxOffsetNumber(page); offnum = _hash_binsearch_last(page, so->hashso_sk_hash); @@ -502,16 +360,9 @@ bool _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) } if (itup == NULL) { - /* - * We ran off the end of the bucket without finding a match. - * Release the pin on bucket buffers. Normally, such pins are - * released at end of scan, however scrolling cursors can - * reacquire the bucket lock and pin in the same scan multiple - * times. - */ + /* we ran off the end of the bucket without finding a match */ *bufP = so->hashso_curbuf = InvalidBuffer; ItemPointerSetInvalid(current); - _hash_dropscanbuf(rel, so); return false; } diff --git a/src/gausskernel/storage/access/hash/hashsort.cpp b/src/gausskernel/storage/access/hash/hashsort.cpp index f5a9aab4e..1c64ceceb 100644 --- a/src/gausskernel/storage/access/hash/hashsort.cpp +++ b/src/gausskernel/storage/access/hash/hashsort.cpp @@ -14,8 +14,8 @@ * plenty of locality of access. * * - * Portions Copyright (c) 2021 Huawei Technologies Co.,Ltd. - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -37,23 +37,15 @@ struct HSpool { Tuplesortstate *sortstate; /* state data for tuplesort.c */ Relation index; - /* - * We sort the hash keys based on the buckets they belong to. Below masks - * are used in _hash_hashkey2bucket to determine the bucket of given hash - * key. - */ - uint32 high_mask; - uint32 low_mask; - uint32 max_buckets; }; - /* * create and initialize a spool structure */ -HSpool *_h_spoolinit(Relation heap, Relation index, uint32 num_buckets, void *meminfo) +HSpool *_h_spoolinit(Relation index, uint32 num_buckets, void *meminfo) { HSpool *hspool = (HSpool *)palloc0(sizeof(HSpool)); + uint32 hash_mask; UtilityDesc *desc = (UtilityDesc *)meminfo; int work_mem = (desc->query_mem[0] > 0) ? desc->query_mem[0] : u_sess->attr.attr_memory.maintenance_work_mem; int max_mem = (desc->query_mem[1] > 0) ? desc->query_mem[1] : 0; @@ -65,26 +57,18 @@ HSpool *_h_spoolinit(Relation heap, Relation index, uint32 num_buckets, void *me * num_buckets buckets in the index, the appropriate mask can be computed * as follows. * - * NOTE : This hash mask calculation should be in sync with similar - * calculation in _hash_init_metabuffer. + * Note: at present, the passed-in num_buckets is always a power of 2, so + * we could just compute num_buckets - 1. We prefer not to assume that + * here, though. */ - hspool->high_mask = (((uint32) 1) << _hash_log2(num_buckets + 1)) - 1; - hspool->low_mask = (hspool->high_mask >> 1); - hspool->max_buckets = num_buckets - 1; + hash_mask = (((uint32)1) << _hash_log2(num_buckets)) - 1; /* * We size the sort area as maintenance_work_mem rather than work_mem to * speed index creation. This should be OK since a single backend can't * run multiple index creations in parallel. */ - hspool->sortstate = tuplesort_begin_index_hash(heap, - index, - hspool->high_mask, - hspool->low_mask, - hspool->max_buckets, - work_mem, - false, - max_mem); + hspool->sortstate = tuplesort_begin_index_hash(index, hash_mask, work_mem, false, max_mem); return hspool; } @@ -110,7 +94,7 @@ void _h_spool(HSpool *hspool, ItemPointer self, Datum *values, const bool *isnul * given a spool loaded by successive calls to _h_spool, * create an entire index. */ -void _h_indexbuild(HSpool *hspool, Relation heapRel) +void _h_indexbuild(HSpool *hspool) { IndexTuple itup; bool should_free = false; @@ -118,7 +102,7 @@ void _h_indexbuild(HSpool *hspool, Relation heapRel) tuplesort_performsort(hspool->sortstate); while ((itup = tuplesort_getindextuple(hspool->sortstate, true, &should_free)) != NULL) { - _hash_doinsert(hspool->index, itup, heapRel); + _hash_doinsert(hspool->index, itup); if (should_free) pfree(itup); } diff --git a/src/gausskernel/storage/access/hash/hashutil.cpp b/src/gausskernel/storage/access/hash/hashutil.cpp index 277b67d82..79cdb5157 100644 --- a/src/gausskernel/storage/access/hash/hashutil.cpp +++ b/src/gausskernel/storage/access/hash/hashutil.cpp @@ -3,8 +3,8 @@ * hashutil.cpp * Utility code for openGauss hash implementation. * - * Portions Copyright (c) 2021 Huawei Technologies Co.,Ltd. - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -22,9 +22,7 @@ #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/rel_gs.h" -#include "storage/buf/buf_internals.h" -#define CALC_NEW_BUCKET(old_bucket, lowmask) ((old_bucket) | ((lowmask) + 1)) /* * _hash_checkqual -- does the index tuple satisfy the scan conditions? */ @@ -135,70 +133,6 @@ uint32 _hash_log2(uint32 num) return i; } -/* - * _hash_spareindex -- returns spare index / global splitpoint phase of the bucket - */ -uint32 _hash_spareindex(uint32 num_bucket) -{ - uint32 splitpoint_group; - uint32 splitpoint_phases; - - splitpoint_group = _hash_log2(num_bucket); - - if (splitpoint_group < HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE) - return splitpoint_group; - - /* account for single-phase groups */ - splitpoint_phases = HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE; - - /* account for multi-phase groups before splitpoint_group */ - splitpoint_phases += - ((splitpoint_group - HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE) << - HASH_SPLITPOINT_PHASE_BITS); - - /* account for phases within current group */ - splitpoint_phases += - (((num_bucket - 1) >> - (splitpoint_group - (HASH_SPLITPOINT_PHASE_BITS + 1))) & - HASH_SPLITPOINT_PHASE_MASK); /* to 0-based value. */ - - return splitpoint_phases; -} - -/* - * _hash_get_totalbuckets -- returns total number of buckets allocated till - * the given splitpoint phase. - */ -uint32 _hash_get_totalbuckets(uint32 splitpoint_phase) -{ - uint32 splitpoint_group; - uint32 total_buckets; - uint32 phases_within_splitpoint_group; - - if (splitpoint_phase < HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE) - return (1 << splitpoint_phase); - - /* get splitpoint's group */ - splitpoint_group = HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE; - splitpoint_group += - ((splitpoint_phase - HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE) >> - HASH_SPLITPOINT_PHASE_BITS); - - /* account for buckets before splitpoint_group */ - total_buckets = (1 << (splitpoint_group - 1)); - - /* account for buckets within splitpoint_group */ - phases_within_splitpoint_group = - (((splitpoint_phase - HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE) & - HASH_SPLITPOINT_PHASE_MASK) + 1); /* from 0-based to 1-based */ - total_buckets += - (((1 << (splitpoint_group - 1)) >> HASH_SPLITPOINT_PHASE_BITS) * - phases_within_splitpoint_group); - - return total_buckets; -} - - /* * _hash_checkpage -- sanity checks on the format of all hash pages * @@ -282,36 +216,25 @@ uint32 _hash_get_indextuple_hashkey(IndexTuple itup) } /* - * _hash_convert_tuple - convert raw index data to hash key - * - * Inputs: values and isnull arrays for the user data column(s) - * Outputs: values and isnull arrays for the index tuple, suitable for - * passing to index_form_tuple(). - * - * Returns true if successful, false if not (because there are null values). - * On a false result, the given data need not be indexed. - * - * Note: callers know that the index-column arrays are always of length 1. - * In principle, there could be more than one input column, though we do not - * currently support that. + * _hash_form_tuple - form an index tuple containing hash code only */ -bool _hash_convert_tuple(Relation index, - Datum *user_values, const bool *user_isnull, - Datum *index_values, bool *index_isnull) +IndexTuple _hash_form_tuple(Relation index, Datum *values, const bool *isnull) { + IndexTuple itup; uint32 hashkey; + Datum hashkeydatum; + TupleDesc hashdesc; - /* - * We do not insert null values into hash indexes. This is okay because - * the only supported search operator is '=', and we assume it is strict. - */ - if (user_isnull[0]) - return false; - - hashkey = _hash_datum2hashkey(index, user_values[0]); - index_values[0] = UInt32GetDatum(hashkey); - index_isnull[0] = false; - return true; + if (isnull[0]) { + hashkeydatum = (Datum)0; + } else { + hashkey = _hash_datum2hashkey(index, values[0]); + hashkeydatum = UInt32GetDatum(hashkey); + } + hashdesc = RelationGetDescr(index); + Assert(hashdesc->natts == 1); + itup = index_form_tuple(hashdesc, &hashkeydatum, isnull); + return itup; } /* @@ -389,154 +312,3 @@ OffsetNumber _hash_binsearch_last(Page page, uint32 hash_value) return lower; } - -/* - * _hash_get_oldblock_from_newbucket() -- get the block number of a bucket - * from which current (new) bucket is being split. - */ -BlockNumber _hash_get_oldblock_from_newbucket(Relation rel, Bucket new_bucket) -{ - Bucket old_bucket; - uint32 mask; - Buffer metabuf; - HashMetaPage metap; - BlockNumber blkno; - - /* - * To get the old bucket from the current bucket, we need a mask to modulo - * into lower half of table. This mask is stored in meta page as - * hashm_lowmask, but here we can't rely on the same, because we need a - * value of lowmask that was prevalent at the time when bucket split was - * started. Masking the most significant bit of new bucket would give us - * old bucket. - */ - mask = (((uint32) 1) << (fls(new_bucket) - 1)) - 1; - old_bucket = new_bucket & mask; - - metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE); - metap = HashPageGetMeta(BufferGetPage(metabuf)); - - blkno = BUCKET_TO_BLKNO(metap, old_bucket); - - _hash_relbuf(rel, metabuf); - - return blkno; -} - -/* - * _hash_get_newblock_from_oldbucket() -- get the block number of a bucket - * that will be generated after split from old bucket. - * - * This is used to find the new bucket from old bucket based on current table - * half. It is mainly required to finish the incomplete splits where we are - * sure that not more than one bucket could have split in progress from old - * bucket. - */ -BlockNumber _hash_get_newblock_from_oldbucket(Relation rel, Bucket old_bucket) -{ - Bucket new_bucket; - Buffer metabuf; - HashMetaPage metap; - BlockNumber blkno; - - metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE); - metap = HashPageGetMeta(BufferGetPage(metabuf)); - - new_bucket = _hash_get_newbucket_from_oldbucket(rel, old_bucket, - metap->hashm_lowmask, - metap->hashm_maxbucket); - blkno = BUCKET_TO_BLKNO(metap, new_bucket); - - _hash_relbuf(rel, metabuf); - - return blkno; -} - -/* - * _hash_get_newbucket_from_oldbucket() -- get the new bucket that will be - * generated after split from current (old) bucket. - * - * This is used to find the new bucket from old bucket. New bucket can be - * obtained by OR'ing old bucket with most significant bit of current table - * half (lowmask passed in this function can be used to identify msb of - * current table half). There could be multiple buckets that could have - * been split from current bucket. We need the first such bucket that exists. - * Caller must ensure that no more than one split has happened from old - * bucket. - */ -Bucket _hash_get_newbucket_from_oldbucket(Relation rel, Bucket old_bucket, - uint32 lowmask, uint32 maxbucket) -{ - Bucket new_bucket; - - new_bucket = CALC_NEW_BUCKET(old_bucket, lowmask); - if (new_bucket > maxbucket) { - lowmask = lowmask >> 1; - new_bucket = CALC_NEW_BUCKET(old_bucket, lowmask); - } - - return new_bucket; -} - -/* - * _hash_kill_items - set LP_DEAD state for items an indexscan caller has - * told us were killed. - * - * scan->opaque, referenced locally through so, contains information about the - * current page and killed tuples thereon (generally, this should only be - * called if so->numKilled > 0). - * - * We match items by heap TID before assuming they are the right ones to - * delete. - */ -void _hash_kill_items(IndexScanDesc scan) -{ - HashScanOpaque so = (HashScanOpaque) scan->opaque; - Page page; - HashPageOpaque opaque; - OffsetNumber offnum; - OffsetNumber maxoff; - int numKilled = so->numKilled; - int i; - bool killedsomething = false; - - Assert(so->numKilled > 0); - Assert(so->killedItems != NULL); - - /* - * Always reset the scan state, so we don't look for same items on other - * pages. - */ - so->numKilled = 0; - - page = BufferGetPage(so->hashso_curbuf); - opaque = (HashPageOpaque) PageGetSpecialPointer(page); - maxoff = PageGetMaxOffsetNumber(page); - - for (i = 0; i < numKilled; i++) { - offnum = so->killedItems[i].indexOffset; - - while (offnum <= maxoff) { - ItemId iid = PageGetItemId(page, offnum); - IndexTuple ituple = (IndexTuple)PageGetItem(page, iid); - - if (ItemPointerEquals(&ituple->t_tid, &so->killedItems[i].heapTid)) { - /* found the item */ - ItemIdMarkDead(iid); - killedsomething = true; - break; /* out of inner search loop */ - } - offnum = OffsetNumberNext(offnum); - } - } - - /* - * Since this can be redone later if needed, mark as dirty hint. Whenever - * we mark anything LP_DEAD, we also set the page's - * LH_PAGE_HAS_DEAD_TUPLES flag, which is likewise just a hint. - */ - if (killedsomething) { - opaque->hasho_flag |= LH_PAGE_HAS_DEAD_TUPLES; - MarkBufferDirtyHint(so->hashso_curbuf, true); - } -} diff --git a/src/gausskernel/storage/access/hbstore/hbucket_am.cpp b/src/gausskernel/storage/access/hbstore/hbucket_am.cpp index b9e3e0794..37320006e 100644 --- a/src/gausskernel/storage/access/hbstore/hbucket_am.cpp +++ b/src/gausskernel/storage/access/hbstore/hbucket_am.cpp @@ -41,7 +41,7 @@ #include "optimizer/bucketpruning.h" #include "executor/node/nodeSeqscan.h" - +#ifdef ENABLE_MULTIPLE_NODES TableScanDesc GetTableScanDesc(TableScanDesc scan, Relation rel) { if (scan != NULL && rel != NULL && RELATION_CREATE_BUCKET(scan->rs_rd)) { @@ -59,6 +59,7 @@ IndexScanDesc GetIndexScanDesc(IndexScanDesc scan) return (IndexScanDesc)scan; } } +#endif oidvector *hbkt_load_buckets(Relation relation, BucketInfo *bktInfo) { diff --git a/src/gausskernel/storage/access/heap/heapam.cpp b/src/gausskernel/storage/access/heap/heapam.cpp index 91faac420..d94a09803 100755 --- a/src/gausskernel/storage/access/heap/heapam.cpp +++ b/src/gausskernel/storage/access/heap/heapam.cpp @@ -42,6 +42,7 @@ */ #include "postgres.h" #include "knl/knl_variable.h" +#include "access/csnlog.h" #include "access/heapam.h" #include "access/hio.h" #include "access/multixact.h" @@ -62,6 +63,7 @@ #include "catalog/catalog.h" #include "catalog/namespace.h" #include "catalog/pg_proc.h" +#include "catalog/pg_uid_fn.h" #include "commands/dbcommands.h" #include "commands/verify.h" #include "commands/matview.h" @@ -123,14 +125,16 @@ static void HeapParallelscanStartblockInit(HeapScanDesc scan); static BlockNumber HeapParallelscanNextpage(HeapScanDesc scan); static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, CommandId cid, int options); static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, HeapTuple oldtup, Buffer newbuf, HeapTuple newtup, - HeapTuple old_key_tup, bool all_visible_cleared, bool new_all_visible_cleared); + HeapTuple old_key_tup, bool all_visible_cleared, bool new_all_visible_cleared, char relreplident); static void HeapSatisfiesHOTUpdate(Relation relation, Bitmapset* hot_attrs, Bitmapset* key_attrs, Bitmapset* id_attrs, bool* satisfies_hot, bool *satisfies_key, bool* satisfies_id, HeapTuple oldtup, HeapTuple newtup, char* page); -static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified, bool* copy); +static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified, bool* copy, char *relreplident); static void SkipToNewPage( HeapScanDesc scan, ScanDirection dir, BlockNumber page, bool* finished, bool* isValidRelationPage); static bool VerifyHeapGetTup(HeapScanDesc scan, ScanDirection dir); static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup); +static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, + uint32 flags, ParallelHeapScanDesc parallel_scan, RangeScanInRedis rangeScanInRedis = {false, 0, 0}); extern void vacuum_set_xid_limits(Relation rel, int64 freeze_min_age, int64 freeze_table_age, TransactionId *oldestXmin, TransactionId *freezeLimit, TransactionId *freezeTableLimit, MultiXactId* multiXactFrzLimit); @@ -201,17 +205,6 @@ static inline void InitScanBlocks(HeapScanDesc scan, RangeScanInRedis rangeScanI } } - -static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, - uint32 flags, ParallelHeapScanDesc parallel_scan, RangeScanInRedis rangeScanInRedis = {false, 0, 0}); -static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, CommandId cid, int options); -static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified, bool *copy); -static void SkipToNewPage(HeapScanDesc scan, ScanDirection dir, BlockNumber page, bool* finished, - bool* is_valid_relation_page); -static bool VerifyHeapGetTup(HeapScanDesc scan, ScanDirection dir); -static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup); -extern void Start_Prefetch(TableScanDesc scan, SeqScanAccessor *pAccessor, ScanDirection dir); - /* ---------------- * initscan - scan code common to heap_beginscan and heap_rescan * ---------------- @@ -321,12 +314,13 @@ void heapgetpage(TableScanDesc sscan, BlockNumber page) bool all_visible = false; HeapScanDesc scan = (HeapScanDesc) sscan; +#ifdef USE_ASSERT_CHECKING if (!scan->rs_base.rs_rangeScanInRedis.isRangeScanInRedis) { Assert(page < scan->rs_base.rs_nblocks); } else { Assert(page < scan->rs_base.rs_nblocks + scan->rs_base.rs_startblock); } - +#endif /* release previous scan buffer, if any */ if (BufferIsValid(scan->rs_base.rs_cbuf)) { @@ -345,9 +339,6 @@ void heapgetpage(TableScanDesc sscan, BlockNumber page) scan->rs_base.rs_cbuf = ReadBufferExtended(scan->rs_base.rs_rd, MAIN_FORKNUM, page, RBM_NORMAL, scan->rs_base.rs_strategy); scan->rs_base.rs_cblock = page; - /* We've pinned the buffer, nobody can prune this buffer, check whether snapshot is valid. */ - CheckSnapshotIsValidException(scan->rs_base.rs_snapshot, "heapgetpage"); - if (!scan->rs_base.rs_pageatatime) { return; } @@ -361,7 +352,10 @@ void heapgetpage(TableScanDesc sscan, BlockNumber page) * since we use append mode and never look back holes in previous pages * anyway. */ - if (!scan->rs_base.rs_rangeScanInRedis.isRangeScanInRedis) { +#ifdef ENABLE_MULTIPLE_NODES + if (!scan->rs_base.rs_rangeScanInRedis.isRangeScanInRedis) +#endif + { heap_page_prune_opt(scan->rs_base.rs_rd, buffer); } @@ -385,7 +379,7 @@ void heapgetpage(TableScanDesc sscan, BlockNumber page) */ all_visible = PageIsAllVisible(dp) && !snapshot->takenDuringRecovery; - for (line_off = FirstOffsetNumber, lpp = PageGetItemId(dp, line_off); line_off <= lines; line_off++, lpp++) { + for (line_off = FirstOffsetNumber, lpp = HeapPageGetItemId(dp, line_off); line_off <= lines; line_off++, lpp++) { if (ItemIdIsNormal(lpp)) { HeapTupleData loctup; bool valid = false; @@ -886,26 +880,17 @@ static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey dp = (Page)BufferGetPage(scan->rs_base.rs_cbuf); line_off = ItemPointerGetOffsetNumber(&(tuple->t_self)); - /* Prevent concurrent page upgrades */ - bool is_lock = false; - if (PageIs4BXidVersion(dp)) { - LockBuffer(scan->rs_base.rs_cbuf, BUFFER_LOCK_SHARE); - is_lock = true; - } - - lpp = PageGetItemId(dp, line_off); + lpp = HeapPageGetItemId(dp, line_off); Assert(ItemIdIsNormal(lpp)); tuple->t_data = (HeapTupleHeader)PageGetItem((Page)dp, lpp); tuple->t_len = ItemIdGetLength(lpp); HeapTupleCopyBaseFromPage(tuple, dp); - if (is_lock) { - LockBuffer(scan->rs_base.rs_cbuf, BUFFER_LOCK_UNLOCK); - is_lock = false; - } - DECOMPRESS_HEAP_TUPLE( - HEAP_TUPLE_IS_COMPRESSED(tuple->t_data), tuple, &(scan->rs_ctbuf_hdr), (scan->rs_tupdesc), dp); + if (scan->rs_base.rs_rd->is_compressed && HEAP_TUPLE_IS_COMPRESSED(tuple->t_data)) { + DECOMPRESS_HEAP_TUPLE( + true, tuple, &(scan->rs_ctbuf_hdr), (scan->rs_tupdesc), dp); + } return; } @@ -913,7 +898,7 @@ static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey * advance the scan until we find a qualifying tuple or run out of stuff * to scan */ - lpp = PageGetItemId(dp, line_off); + lpp = HeapPageGetItemId(dp, line_off); for (;;) { while (lines_left > 0) { if (ItemIdIsNormal(lpp)) { @@ -933,8 +918,10 @@ static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey snapshot); if (valid) { /* make sure this tuple is visible and then uncompress it */ - DECOMPRESS_HEAP_TUPLE( - HEAP_TUPLE_IS_COMPRESSED(tuple->t_data), tuple, &(scan->rs_ctbuf_hdr), (scan->rs_tupdesc), dp); + if (scan->rs_base.rs_rd->is_compressed && HEAP_TUPLE_IS_COMPRESSED(tuple->t_data)) { + DECOMPRESS_HEAP_TUPLE( + true, tuple, &(scan->rs_ctbuf_hdr), (scan->rs_tupdesc), dp); + } if (key != NULL) { HeapKeyTest(tuple, (scan->rs_tupdesc), nkeys, key, valid); @@ -1028,14 +1015,14 @@ static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, int lines_left; ItemId lpp; - - if (tuple != NULL) - Assert(TUPLE_IS_HEAP_TUPLE(tuple)); + Assert(tuple != NULL && TUPLE_IS_HEAP_TUPLE(tuple)); /* IO collector and IO scheduler for seqsan */ +#ifdef ENABLE_MULTIPLE_NODES if (ENABLE_WORKLOAD_CONTROL) { IOSchedulerAndUpdate(IO_TYPE_READ, 1, IO_TYPE_ROW); } +#endif /* * calculate next starting line_index, given scan direction @@ -1133,31 +1120,21 @@ static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, /* Since the tuple was previously fetched, needn't lock page here */ dp = (Page)BufferGetPage(scan->rs_base.rs_cbuf); line_off = ItemPointerGetOffsetNumber(&(tuple->t_self)); - - /* Prevent concurrent page upgrades */ - bool is_lock = false; - if (PageIs4BXidVersion(dp)) { - LockBuffer(scan->rs_base.rs_cbuf, BUFFER_LOCK_SHARE); - is_lock = true; - } - lpp = PageGetItemId(dp, line_off); + lpp = HeapPageGetItemId(dp, line_off); Assert(ItemIdIsNormal(lpp)); tuple->t_data = (HeapTupleHeader)PageGetItem((Page)dp, lpp); tuple->t_len = ItemIdGetLength(lpp); - HeapTupleCopyBaseFromPage(tuple, dp); - if (is_lock) { - LockBuffer(scan->rs_base.rs_cbuf, BUFFER_LOCK_UNLOCK); - is_lock = false; - } /* check that rs_cindex is in sync */ Assert(scan->rs_base.rs_cindex < scan->rs_base.rs_ntuples); Assert(line_off == scan->rs_base.rs_vistuples[scan->rs_base.rs_cindex]); - DECOMPRESS_HEAP_TUPLE( - HEAP_TUPLE_IS_COMPRESSED(tuple->t_data), tuple, &(scan->rs_ctbuf_hdr), (scan->rs_tupdesc), dp); + if (scan->rs_base.rs_rd->is_compressed && HEAP_TUPLE_IS_COMPRESSED(tuple->t_data)) { + DECOMPRESS_HEAP_TUPLE( + true, tuple, &(scan->rs_ctbuf_hdr), (scan->rs_tupdesc), dp); + } return; } @@ -1165,17 +1142,10 @@ static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, * advance the scan until we find a qualifying tuple or run out of stuff * to scan */ - bool is_lock = false; for (;;) { - /* Prevent concurrent page upgrades */ - if (PageIs4BXidVersion(dp) && is_lock == false) { - LockBuffer(scan->rs_base.rs_cbuf, BUFFER_LOCK_SHARE); - is_lock = true; - } - while (lines_left > 0) { line_off = scan->rs_base.rs_vistuples[line_index]; - lpp = PageGetItemId(dp, line_off); + lpp = HeapPageGetItemId(dp, line_off); Assert(ItemIdIsNormal(lpp)); tuple->t_data = (HeapTupleHeader)PageGetItem((Page)dp, lpp); @@ -1188,9 +1158,10 @@ static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, * 1. reduce the UNCOMPRESS number within HeapKeyTest(); * 2. maybe reduce the number of palloc() within HeapKeyTest(); */ - DECOMPRESS_HEAP_TUPLE( - HEAP_TUPLE_IS_COMPRESSED(tuple->t_data), tuple, &(scan->rs_ctbuf_hdr), (scan->rs_tupdesc), dp); - + if (scan->rs_base.rs_rd->is_compressed && HEAP_TUPLE_IS_COMPRESSED(tuple->t_data)) { + DECOMPRESS_HEAP_TUPLE( + true, tuple, &(scan->rs_ctbuf_hdr), (scan->rs_tupdesc), dp); + } /* * if current tuple qualifies, return it. */ @@ -1201,18 +1172,10 @@ static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, if (valid) { scan->rs_base.rs_cindex = line_index; - if (is_lock) { - LockBuffer(scan->rs_base.rs_cbuf, BUFFER_LOCK_UNLOCK); - is_lock = false; - } return; } } else { scan->rs_base.rs_cindex = line_index; - if (is_lock) { - LockBuffer(scan->rs_base.rs_cbuf, BUFFER_LOCK_UNLOCK); - is_lock = false; - } return; } @@ -1226,10 +1189,6 @@ static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ++line_index; } } - if (is_lock) { - LockBuffer(scan->rs_base.rs_cbuf, BUFFER_LOCK_UNLOCK); - is_lock = false; - } /* * if we get here, it means we've exhausted the items on this page and * it's time to move to the next. @@ -1249,7 +1208,6 @@ static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, return; } - heap_prefetch(scan, dir); heapgetpage((TableScanDesc)scan, page); dp = (Page)BufferGetPage(scan->rs_base.rs_cbuf); @@ -1263,6 +1221,142 @@ static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, } } +/* + * Scan one page for batch scan mode. + * Return false if early stop reading the current page, and return true if needs to read next page. + */ +static bool ScanOnePageForBatchMode(HeapScanDesc scan, Page& dp, BlockNumber& page, int& lineIndex, int& rows) +{ + int lines = scan->rs_base.rs_ntuples; + int linesLeft = lines - lineIndex; + HeapTuple tuple = &scan->rs_ctupBatch[rows]; + + while (linesLeft > 0) { + OffsetNumber lineoff = scan->rs_base.rs_vistuples[lineIndex]; + ItemId lpp = PageGetItemId(dp, lineoff); + + tuple->tupTableType = HEAP_TUPLE; + tuple->t_data = (HeapTupleHeader)PageGetItem((Page)dp, lpp); + tuple->t_len = ItemIdGetLength(lpp); + ItemPointerSet(&(tuple->t_self), page, lineoff); + HeapTupleCopyBaseFromPage(tuple, dp); + + /* if current tuple qualifies, return it */ + if (scan->rs_base.rs_key != NULL) { + bool valid = false; + HeapKeyTest(tuple, (scan->rs_tupdesc), scan->rs_base.rs_nkeys, scan->rs_base.rs_key, valid); + + if (valid) { + scan->rs_base.rs_cindex = lineIndex; + rows++; + } + } else { + scan->rs_base.rs_cindex = lineIndex; + rows++; + } + + if (rows == scan->rs_base.rs_maxScanRows) { + scan->rs_base.rs_ctupRows = rows; + return false; + } + + /* otherwise move to the next item on the page */ + --linesLeft; + ++lineIndex; + + tuple = &scan->rs_ctupBatch[rows]; + } + + if (rows > 0) { + scan->rs_base.rs_ctupRows = rows; + return false; /* only for one page */ + } + + return true; +} + +/* + * Scan one page for batch scan mode. + * Return true if all the pages are exhausted. + */ +static bool ScanPagesForBatchMode(HeapScanDesc scan, BlockNumber page, int lineIndex) +{ + int lines, rows = 0; + bool continueScan, finished = false; + Page dp = (Page)BufferGetPage(scan->rs_base.rs_cbuf); + lines = scan->rs_base.rs_ntuples; + + /* advance the scan until we find a qualifying tuple or run out of stuff to scan */ + for (;;) { + continueScan = ScanOnePageForBatchMode(scan, dp, page, lineIndex, rows); + + if (!continueScan) { + return false; + } + /* + * if we get here, it means we've exhausted the items on this page and + * it's time to move to the next. + */ + finished = next_page(scan, ForwardScanDirection, page); + + /* return NULL if we've exhausted all the pages */ + if (finished) { + if (BufferIsValid(scan->rs_base.rs_cbuf)) { + ReleaseBuffer(scan->rs_base.rs_cbuf); + } + scan->rs_base.rs_cbuf = InvalidBuffer; + scan->rs_base.rs_cblock = InvalidBlockNumber; + scan->rs_ctupBatch[rows].t_data = NULL; + scan->rs_base.rs_inited = false; + scan->rs_base.rs_ctupRows = rows; + return true; + } + + heapgetpage((TableScanDesc)scan, page); + + dp = (Page)BufferGetPage(scan->rs_base.rs_cbuf); + lineIndex = 0; + } +} + +static bool HeapGetTupPageBatchmode(HeapScanDesc scan) +{ + HeapTuple tuple = &scan->rs_ctupBatch[0]; + BlockNumber page; + + int lineIndex; + + scan->rs_base.rs_ctupRows = 0; + + /* IO collector and IO scheduler for seqsan */ + if (ENABLE_WORKLOAD_CONTROL) { + IOSchedulerAndUpdate(IO_TYPE_READ, 1, IO_TYPE_ROW); + } + + /* calculate next starting lineindex, given scan direction */ + if (!scan->rs_base.rs_inited) { + /* + * return null immediately if relation is empty + */ + if (scan->rs_base.rs_nblocks == 0) { + Assert(!BufferIsValid(scan->rs_base.rs_cbuf)); + tuple->t_data = NULL; + scan->rs_base.rs_ctupRows = 0; + return true; + } + page = scan->rs_base.rs_startblock; + heapgetpage((TableScanDesc)scan, page); + lineIndex = 0; + scan->rs_base.rs_inited = true; + } else { + /* continue from previously returned page/tuple */ + page = scan->rs_base.rs_cblock; + lineIndex = scan->rs_base.rs_cindex + 1; + } + + return ScanPagesForBatchMode(scan, page, lineIndex); +} + #if defined(DISABLE_COMPLEX_MACRO) /* * This is formatted so oddly so that the correspondence to the macro @@ -1354,6 +1448,15 @@ Relation relation_open(Oid relationId, LOCKMODE lockmode, int2 bucketId) ERROR, (errcode(ERRCODE_RELATION_OPEN_ERROR), errmsg("could not open relation with OID %u", relationId))); } + if (r->xmin_csn != InvalidCommitSeqNo) { + Snapshot snapshot = GetActiveSnapshot(); + if (snapshot == NULL || r->xmin_csn > snapshot->snapshotcsn) { + ereport(ERROR, (errcode(ERRCODE_SNAPSHOT_INVALID), errmsg( + "current snapshot is invalid for this relation : %u, xmin->csn: %lu, snapshortcsn: %lu", + relationId, r->xmin_csn, snapshot == NULL ? -1 : snapshot->snapshotcsn))); + } + } + if (RELATION_IS_GLOBAL_TEMP(r)) r->rd_rel->relfilenode = r->rd_node.relNode; @@ -1727,12 +1830,14 @@ static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot scan->rs_base.rs_rd = relation; scan->rs_tupdesc = RelationGetDescr(relation); + scan->rs_base.rs_rd->is_compressed = RowRelationIsCompressed(relation); scan->rs_base.rs_snapshot = snapshot; scan->rs_base.rs_nkeys = nkeys; scan->rs_base.rs_flags = flags; scan->rs_base.rs_strategy = NULL; /* set in initscan */ scan->rs_base.rs_rangeScanInRedis = rangeScanInRedis; scan->rs_parallel = parallel_scan; + scan->rs_ctupBatch = NULL; /* * we can use page-at-a-time mode if it's an MVCC-safe snapshot @@ -1830,6 +1935,10 @@ void heap_endscan(TableScanDesc sscan) FreeAccessStrategy(scan->rs_base.rs_strategy); } + if (scan->rs_ctupBatch != NULL) { + pfree_ext(scan->rs_ctupBatch); + } + pfree(scan); scan = NULL; } @@ -2068,6 +2177,33 @@ HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction) return &(scan->rs_ctup); } +bool HeapamGetNextBatchMode(TableScanDesc sscan, ScanDirection direction) +{ + /* Note: no locking manipulations needed */ + HeapScanDesc scan = (HeapScanDesc)sscan; + bool finished = false; + scan->rs_base.rs_ctupRows = 0; + Assert(ScanDirectionIsForward(direction)); + if (likely(scan->rs_base.rs_pageatatime)) { + finished = HeapGetTupPageBatchmode(scan); + } else { + ereport(ERROR, (errcode(ERRCODE_RELATION_OPEN_ERROR), + errmsg("relation %s is temporarily unavalible", RelationGetRelationName(scan->rs_base.rs_rd)))); + } + + Assert(scan->rs_base.rs_ctupRows <= BatchMaxSize); + if (scan->rs_base.rs_ctupRows == 0) { + return true; + } + + if ((scan->rs_base.rs_rd)->pgstat_info != NULL) { + (scan->rs_base.rs_rd)->pgstat_info->t_counts.t_tuples_returned += (scan->rs_base.rs_ctupRows); + } + + return finished; +} + + /* * heap_fetch - retrieve tuple with given tid * @@ -2118,17 +2254,18 @@ bool heap_fetch( OffsetNumber offnum; bool valid = false; - /* another data space must be provided for decomperssing tuple. */ - Assert(tuple && tuple->t_data); + /* + * another data space must be provided for decomperssing tuple. And + * it is possible for non-compressed tuple came from + * heap_lock_updated_tuple_rec that tuple->t_data is NULL. + */ + Assert(tuple); /* * Fetch and pin the appropriate page of the relation. */ buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid)); - /* We've pinned the buffer, nobody can prune this buffer, check whether snapshot is valid. */ - CheckSnapshotIsValidException(snapshot, "heap_fetch"); - /* * Need share lock on buffer to examine tuple commit status. */ @@ -2740,6 +2877,17 @@ void HeapInsertTsStore(Relation relation, ResultRelInfo *resultRelInfo, HeapTupl } #endif /* ENABLE_MULTIPLE_NODES */ +/* + * Log CSN in xlog. + */ +void LogCSN(CommitSeqNo *curCSN) +{ + if (t_thrd.proc->workingVersionNum >= PARALLEL_DECODE_VERSION_NUM && XLogLogicalInfoActive()) { + (*curCSN) = pg_atomic_read_u64(&t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo); + XLogRegisterData((char*)curCSN, sizeof(CommitSeqNo)); + } +} + /* * heap_insert - insert tuple into a heap * @@ -2767,7 +2915,7 @@ void HeapInsertTsStore(Relation relation, ResultRelInfo *resultRelInfo, HeapTupl * TID where the tuple was stored. But note that any toasting of fields * within the tuple data is NOT reflected into *tup. */ -Oid heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate) +Oid heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate, bool istoast) { TransactionId xid = GetCurrentTransactionId(); HeapTuple heaptup; @@ -2775,10 +2923,11 @@ Oid heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, Bu Buffer vmbuffer = InvalidBuffer; bool all_visible_cleared = false; BlockNumber rel_end_block = InvalidBlockNumber; - +#ifdef USE_ASSERT_CHECKING if (tup != NULL) { Assert(TUPLE_IS_HEAP_TUPLE(tup)); } +#endif /* * Fill in tuple header fields, assign an OID, and toast the tuple if * necessary. @@ -2787,12 +2936,12 @@ Oid heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, Bu * into the relation; tup is the caller's original untoasted data. */ heaptup = heap_prepare_insert(relation, tup, cid, options); - +#ifdef USE_ASSERT_CHECKING /* All built-in functions are hard coded, and thus they should not be inserted into catalog.pg_proc */ if (!IsBootstrapProcessingMode() && u_sess->attr.attr_common.IsInplaceUpgrade == false) { Assert(!(IsProcRelation(relation) && IsSystemObjOid(HeapTupleGetOid(heaptup)))); } - +#endif /* * We're about to do the actual insert -- but check for conflict first, to * avoid possibly having to roll back work we've just done. @@ -2804,12 +2953,12 @@ Oid heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, Bu * buffer before making the call. */ CheckForSerializableConflictIn(relation, NULL, InvalidBuffer); - +#ifdef ENABLE_MULTIPLE_NODES if (RelationInClusterResizing(relation) && !RelationInClusterResizingReadOnly(relation)) { options |= HEAP_INSERT_SKIP_FSM; rel_end_block = RelationGetEndBlock(relation); } - +#endif /* * Find buffer to insert this tuple into. If the page is all visible, * this will also pin the requisite visibility map page. @@ -2851,7 +3000,7 @@ Oid heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, Bu Page page = BufferGetPage(buffer); uint8 info = XLOG_HEAP_INSERT; int bufflags = 0; - TdeInfo tdeinfo = {0}; + TdeInfo* tdeinfo = NULL; /* * If this is a catalog, we need to transmit combocids to properly @@ -2893,6 +3042,8 @@ Oid heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, Bu XLogRegisterData((char*)&((HeapPageHeader)(page))->pd_xid_base, sizeof(TransactionId)); } XLogRegisterData((char*)&xlrec, SizeOfHeapInsert); + CommitSeqNo curCSN = InvalidCommitSeqNo; + LogCSN(&curCSN); xlhdr.t_infomask2 = heaptup->t_data->t_infomask2; xlhdr.t_infomask = heaptup->t_data->t_infomask; @@ -2901,14 +3052,15 @@ Oid heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, Bu * For TDE relation, we intend to put the TdeoInfo(relevant to cipher, cmkid, etc.) after the pd_xid_base. */ if (RelationisEncryptEnable(relation)) { - GetTdeInfoFromRel(relation, &tdeinfo); + tdeinfo = (TdeInfo*)palloc0(sizeof(TdeInfo)); + GetTdeInfoFromRel(relation, tdeinfo); } /* * note we mark xlhdr as belonging to buffer; if XLogInsert decides to * write the whole page to the xlog, we don't need to store * xl_heap_header in the xlog. */ - XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags, &tdeinfo); + XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags, tdeinfo); XLogRegisterBufData(0, (char*)&xlhdr, SizeOfHeapHeader); /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */ XLogRegisterBufData(0, @@ -2918,9 +3070,13 @@ Oid heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, Bu /* filtering by origin on a row level is much more efficient */ XLogIncludeOrigin(); - recptr = XLogInsert(RM_HEAP_ID, info); + recptr = XLogInsert(RM_HEAP_ID, info, InvalidBktId, istoast); PageSetLSN(page, recptr); + + if (tdeinfo != NULL) { + pfree_ext(tdeinfo); + } } END_CRIT_SECTION(); @@ -3083,7 +3239,7 @@ void heap_abort_speculative(Relation relation, HeapTuple tuple) PageSetPrunable(page, xid); /* store transaction information of xact deleting the tuple */ - tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED); + tp.t_data->t_infomask &= ~HEAP_XMAX_BITS; tp.t_data->t_infomask2 &= ~(HEAP_XMAX_LOCK_ONLY | HEAP_KEYS_UPDATED); /* @@ -3118,6 +3274,8 @@ void heap_abort_speculative(Relation relation, HeapTuple tuple) XLogBeginInsert(); XLogRegisterData((char *)&xlrec, useOldXlog ? SizeOfOldHeapDelete : SizeOfHeapDelete); + CommitSeqNo curCSN = InvalidCommitSeqNo; + LogCSN(&curCSN); XLogRegisterBuffer(0, buffer, REGBUF_STANDARD); /* No replica identity & replication origin logged */ @@ -3669,19 +3827,6 @@ void heap_markpos(TableScanDesc sscan) */ static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, CommandId cid, int options) { - /* - * Parallel operations are required to be strictly read-only in a parallel - * worker. Parallel inserts are not safe even in the leader in the - * general case, because group locking means that heavyweight locks for - * relation extension or GIN page locks will not conflict between members - * of a lock group, but we don't prohibit that case here because there are - * useful special cases that we can safely allow, such as CREATE TABLE AS. - */ - if (false) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TRANSACTION_STATE), errmsg("cannot insert tuples in a parallel worker"))); - } - if (relation->rd_rel->relhasoids) { #ifdef NOT_USED /* this is redundant with an Assert in HeapTupleSetOid */ @@ -3705,7 +3850,9 @@ static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, CommandId if (!OidIsValid(HeapTupleGetOid(tup))) { HeapTupleSetOid(tup, GetNewOid(relation)); } - } else { + } +#ifdef USE_ASSERT_CHECKING + else { /* check there is not space for an OID */ Assert(!(tup->t_data->t_infomask & HEAP_HASOID)); } @@ -3713,11 +3860,11 @@ static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, CommandId if (tup != NULL) { Assert(TUPLE_IS_HEAP_TUPLE(tup)); } - +#endif tup->t_data->t_infomask &= ~(HEAP_XACT_MASK); tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK); tup->t_data->t_infomask |= HEAP_XMAX_INVALID; - HeapTupleSetXmin(tup, InvalidTransactionId); + tup->t_data->t_choice.t_heap.t_xmin = InvalidTransactionId; if (options & HEAP_INSERT_FROZEN) { HeapTupleHeaderSetXminFrozen(tup->t_data); } @@ -3726,14 +3873,12 @@ static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, CommandId HeapTupleSetXmax(tup, 0); /* for cleanliness */ tup->t_tableOid = RelationGetRelid(relation); tup->t_bucketId = RelationGetBktid(relation); -#ifdef PGXC +#ifdef ENABLE_MULTIPLE_NODES tup->t_xc_node_id = u_sess->pgxc_cxt.PGXCNodeIdentifier; -#endif - if (RelationIsRedistributeDest(relation)) { HeapTupleHeaderSetRedisColumns(tup->t_data); } - +#endif /* * If the new tuple is too big for storage or contains already toasted * out-of-line attributes from some other relation, invoke the toaster. @@ -3858,9 +4003,11 @@ int heap_multi_insert(Relation relation, Relation parent, HeapTuple* tuples, int } /* IO collector and IO scheduler */ +#ifdef ENABLE_MULTIPLE_NODES if (ENABLE_WORKLOAD_CONTROL) { IOSchedulerAndUpdate(IO_TYPE_WRITE, 1, IO_TYPE_ROW); } +#endif if (is_compressed) { buffer = RelationGetNewBufferForBulkInsert(relation, heap_tuples[ndone]->t_len, cmpr_size, bistate); @@ -4084,6 +4231,8 @@ int heap_multi_insert(Relation relation, Relation parent, HeapTuple* tuples, int if (RelationisEncryptEnable(relation)) { GetTdeInfoFromRel(relation, &tdeinfo); } + CommitSeqNo curCSN = InvalidCommitSeqNo; + LogCSN(&curCSN); XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags, &tdeinfo); @@ -4240,6 +4389,7 @@ TM_Result heap_delete(Relation relation, ItemPointer tid, CommandId cid, OffsetNumber maxoff; HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */ bool old_key_copied = false; + char identity; Assert(ItemPointerIsValid(tid)); @@ -4271,10 +4421,6 @@ TM_Result heap_delete(Relation relation, ItemPointer tid, CommandId cid, LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); - if (PageIs4BXidVersion(page)) { - (void)heap_page_upgrade(relation, buffer); - } - /* * If we didn't pin the visibility map page and the page has become all * visible while we were busy locking the buffer, we'll have to unlock and @@ -4308,6 +4454,17 @@ TM_Result heap_delete(Relation relation, ItemPointer tid, CommandId cid, HeapTupleCopyBaseFromPage(&tp, page); tmfd->xmin = HeapTupleHeaderGetXmin(page, tp.t_data); + if (RELATION_HAS_UIDS(relation) && HeapTupleHeaderHasUid(tp.t_data)) { + uint64 tupleUid = HeapTupleGetUid(&tp); + LockBuffer(buffer, BUFFER_LOCK_UNLOCK); + LockTupleUid(relation, tupleUid, ExclusiveLock, + u_sess->attr.attr_common.allow_concurrent_tuple_update, false); + LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); + /* need to recompute xid base after release buffer lock */ + HeapTupleCopyBaseFromPage(&tp, page); + tmfd->xmin = HeapTupleHeaderGetXmin(page, tp.t_data); + } + l1: result = HeapTupleSatisfiesUpdate(&tp, cid, buffer, allow_delete_self); @@ -4332,6 +4489,7 @@ l1: infomask2 = tp.t_data->t_infomask2; if (!u_sess->attr.attr_common.allow_concurrent_tuple_update) { + UnlockReleaseBuffer(buffer); ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("abort transaction due to concurrent update"))); } @@ -4472,7 +4630,7 @@ l1: * Compute replica identity tuple before entering the critical section so * we don't PANIC upon a memory allocation failure. */ - old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied); + old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied, &identity); /* * If this is the first possibly-multixact-able operation in the @@ -4518,7 +4676,7 @@ l1: } /* store transaction information of xact deleting the tuple */ - tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED); + tp.t_data->t_infomask &= ~HEAP_XMAX_BITS; tp.t_data->t_infomask2 &= ~(HEAP_XMAX_LOCK_ONLY | HEAP_KEYS_UPDATED); tp.t_data->t_infomask |= new_infomask; tp.t_data->t_infomask2 |= new_infomask2; @@ -4582,10 +4740,14 @@ l1: } else { xlrec.flags |= XLH_DELETE_CONTAINS_OLD_KEY; } + heap_freetuple(tuple); } XLogBeginInsert(); XLogRegisterData((char *)&xlrec, useOldXlog ? SizeOfOldHeapDelete : SizeOfHeapDelete); + CommitSeqNo curCSN = InvalidCommitSeqNo; + LogCSN(&curCSN); + XLogRegisterBuffer(0, buffer, REGBUF_STANDARD); /* @@ -4781,6 +4943,7 @@ TM_Result heap_update(Relation relation, Relation parentRelation, ItemPointer ot bool rel_in_redis = RelationInClusterResizing(relation); OffsetNumber maxoff; BlockNumber rel_end_block = InvalidBlockNumber; + char relreplident; LockTupleMode mode; Assert(ItemPointerIsValid(otid)); @@ -4838,10 +5001,6 @@ TM_Result heap_update(Relation relation, Relation parentRelation, ItemPointer ot LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); - if (PageIs4BXidVersion(page)) { - (void)heap_page_upgrade(relation, buffer); - } - lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid)); maxoff = PageGetMaxOffsetNumber(page); /* check otid */ @@ -4891,7 +5050,7 @@ TM_Result heap_update(Relation relation, Relation parentRelation, ItemPointer ot * afterwards.) */ MultiXactIdSetOldestMember(); - } else + } else #endif { mode = LockTupleExclusive; @@ -4902,6 +5061,14 @@ TM_Result heap_update(Relation relation, Relation parentRelation, ItemPointer ot *lockmode = mode; } + if (RELATION_HAS_UIDS(relation) && HeapTupleHeaderHasUid(oldtup.t_data)) { + uint64 tupleUid = HeapTupleGetUid(&oldtup); + LockBuffer(buffer, BUFFER_LOCK_UNLOCK); + LockTupleUid(relation, tupleUid, ExclusiveLock, + u_sess->attr.attr_common.allow_concurrent_tuple_update, false); + LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); + } + l2: checked_lockers = false; locker_remains = false; @@ -4944,6 +5111,7 @@ l2: infomask2 = oldtup.t_data->t_infomask2; if (!u_sess->attr.attr_common.allow_concurrent_tuple_update) { + UnlockReleaseBuffer(buffer); ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("abort transaction due to concurrent update"))); } @@ -5202,16 +5370,32 @@ l2: } } - if (t_thrd.proc->workingVersionNum < ENHANCED_TUPLE_LOCK_VERSION_NUM) { + if (t_thrd.proc->workingVersionNum < ENHANCED_TUPLE_LOCK_VERSION_NUM +#ifdef ENABLE_MULTIPLE_NODES + || true +#endif + ) { /* if the only locker of old tuple is ourselves, xmax_new_tuple may be xid and it would be valid */ if (TransactionIdIsValid(xmax_new_tuple) || !TransactionIdEquals(xmax_old_tuple, xid)) { ereport(DEBUG2, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), errmsg("New MultiXact feature isn't support in this version. Please upgrade to version: %d", ENHANCED_TUPLE_LOCK_VERSION_NUM))); } + /* Reset xmax, infomask and infomask2 of old tuple and new one */ + xmax_old_tuple = xid; + infomask_old_tuple = 0; + infomask2_old_tuple = 0; + infomask_new_tuple = HEAP_XMAX_INVALID; + infomask2_new_tuple = 0; xmax_new_tuple = 0; - infomask2_new_tuple &= ~(HEAP_KEYS_UPDATED | HEAP_XMAX_LOCK_ONLY); } + + if (RELATION_HAS_UIDS(relation)) { + uint64 uid = HeapTupleHeaderHasUid(oldtup.t_data) ? HeapTupleGetUid(&oldtup) + : GetNewUidForTuple((parentRelation ? parentRelation : relation)); + HeapTupleSetUid(newtup, uid, relation->rd_att->natts); + } + /* * Prepare the new tuple with the appropriate initial values of Xmin and * Xmax, as well as initial infomask bits as computed above. @@ -5234,7 +5418,7 @@ l2: HeapTupleSetXmin(newtup, xid); HeapTupleHeaderSetCmin(newtup->t_data, cid); - newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple; + newtup->t_data->t_infomask |= (HEAP_UPDATED | infomask_new_tuple); newtup->t_data->t_infomask2 |= infomask2_new_tuple; HeapTupleHeaderSetXmax(page, newtup->t_data, xmax_new_tuple); /* for cleanliness */ newtup->t_tableOid = RelationGetRelid(relation); @@ -5279,7 +5463,7 @@ l2: new_tup_size = MAXALIGN(newtup->t_len); if (need_toast || new_tup_size > pagefree || rel_in_redis) { /* Clear obsolete visibility flags ... */ - oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED); + oldtup.t_data->t_infomask &= ~HEAP_XMAX_BITS; oldtup.t_data->t_infomask2 &= ~(HEAP_XMAX_LOCK_ONLY | HEAP_KEYS_UPDATED); Assert(TransactionIdIsValid(xmax_old_tuple)); oldtup.t_data->t_infomask |= infomask_old_tuple; @@ -5393,7 +5577,8 @@ l2: * ExtractReplicaIdentity() will return NULL if nothing needs to be * logged. */ - old_key_tuple = ExtractReplicaIdentity(relation, &oldtup, !satisfies_id, &old_key_copied); + bool keyChanged = XLogLogicalInfoActive() ? true : !satisfies_id; + old_key_tuple = ExtractReplicaIdentity(relation, &oldtup, keyChanged, &old_key_copied, &relreplident); newpage = BufferGetPage(newbuf); if (newbuf != buffer) { @@ -5444,7 +5629,7 @@ l2: if (!already_marked) { /* Clear obsolete visibility flags ... */ - oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED); + oldtup.t_data->t_infomask &= ~HEAP_XMAX_BITS; oldtup.t_data->t_infomask2 &= ~(HEAP_XMAX_LOCK_ONLY | HEAP_KEYS_UPDATED); Assert(TransactionIdIsValid(xmax_old_tuple)); oldtup.t_data->t_infomask |= infomask_old_tuple; @@ -5485,6 +5670,7 @@ l2: (void)log_heap_new_cid(relation, &oldtup); (void)log_heap_new_cid(relation, heaptup); } + recptr = log_heap_update(relation, buffer, &oldtup, @@ -5492,7 +5678,8 @@ l2: heaptup, old_key_tuple, all_visible_cleared, - all_visible_cleared_new); + all_visible_cleared_new, + relreplident); if (newbuf != buffer) { PageSetLSN(BufferGetPage(newbuf), recptr); @@ -5569,7 +5756,7 @@ static XLogRecPtr log_heap_new_cid_insert(xl_heap_new_cid *xlrec, int bucketid) XLogBeginInsert(); XLogRegisterData((char *) xlrec, SizeOfHeapNewCid); /* will be looked at irrespective of origin */ - recptr = XLogInsert(RM_HEAP3_ID, XLOG_HEAP3_NEW_CID, false, bucketid); + recptr = XLogInsert(RM_HEAP3_ID, XLOG_HEAP3_NEW_CID, bucketid); return recptr; } @@ -5631,89 +5818,6 @@ static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup) return recptr; } -bool heap_page_upgrade(Relation relation, Buffer buffer) -{ - TransactionId xid = GetCurrentTransactionId(); - Page page = BufferGetPage(buffer); - Size page_free_space = 0; - Size save_free_space = 0; - bool is_upgrade = true; - int nline; - - /* Compute desired extra freespace due to fillfactor option */ - save_free_space = RelationGetTargetPageFreeSpace(relation, HEAP_DEFAULT_FILLFACTOR); - page_free_space = (int)((PageHeader)page)->pd_upper - (int)((PageHeader)page)->pd_lower; - - if (page_free_space - save_free_space >= SizeOfHeapPageUpgradeData) { - if (!PageIs4BXidVersion(page)) { - return true; - } - - START_CRIT_SECTION(); - PageLocalUpgrade(page); - } else { - nline = PageGetMaxOffsetNumber(page); - if (nline == 1) { - if (xid > MaxShortTransactionId) { - if (BufferIsValid(buffer)) { - UnlockReleaseBuffer(buffer); - } - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("relation \"%s\" has one big row which is not supported under 64bits XID system. " - "Current xid is %lu", - RelationGetRelationName(relation), - xid))); - } else - ereport(WARNING, - (errcode(ERRCODE_WARNING), - errmsg("block number %u for relation \"%s\" has one big row which is not supported under " - "64bits XID system. Current xid is %lu", - BufferGetBlockNumber(buffer), - RelationGetRelationName(relation), - xid), - handle_in_client(true))); - } else { - if (xid > MaxShortTransactionId) { - if (BufferIsValid(buffer)) { - UnlockReleaseBuffer(buffer); - } - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("relation \"%s\" has no free space to upgrade. Current xid is %lu, please VACUUM FULL " - "this relation!!!", - RelationGetRelationName(relation), - xid))); - } else - ereport(DEBUG1, - (errmsg("block number %u for relation \"%s\" has no free space to upgrade. Current xid is %lu, it " - "is safe before XID increased to 4294967296 !", - BufferGetBlockNumber(buffer), - RelationGetRelationName(relation), - xid))); - } - is_upgrade = false; - return false; - } - - /* xlog stuff */ - if (is_upgrade) { - MarkBufferDirty(buffer); - if (RelationNeedsWAL(relation)) { - XLogRecPtr recptr; - - XLogBeginInsert(); - XLogRegisterBuffer(0, buffer, REGBUF_STANDARD); - - recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_PAGE_UPGRADE); - - PageSetLSN(BufferGetPage(buffer), recptr); - } - END_CRIT_SECTION(); - } - return true; -} - /* * Check if the specified attribute's value is same in both given tuples. * Subroutine for HeapSatisfiesHOTUpdate. @@ -5743,6 +5847,7 @@ static bool heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum, HeapTuple tup if (attrnum != ObjectIdAttributeNumber && #ifdef PGXC attrnum != XC_NodeIdAttributeNumber && attrnum != BucketIdAttributeNumber && + attrnum != UidAttributeNumber && /* may should remove this attr */ #endif attrnum != TableOidAttributeNumber) { return false; @@ -6050,8 +6155,9 @@ static void CheckInfomaskCompatilibilty(TransactionId xid, uint16 infomask) * * See README.tuplock for a thorough explanation of this mechanism. */ -TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer* buffer, CommandId cid, - LockTupleMode mode, bool nowait, bool follow_updates, TM_FailureData *tmfd, bool allow_lock_self) +TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer* buffer, + CommandId cid, LockTupleMode mode, bool nowait, bool follow_updates, TM_FailureData *tmfd, bool allow_lock_self, + int waitSec) { TM_Result result; ItemPointer tid = &(tuple->t_self); @@ -6096,9 +6202,6 @@ TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer* buffer, Co LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE); page = BufferGetPage(*buffer); - if (PageIs4BXidVersion(page)) { - (void)heap_page_upgrade(relation, *buffer); - } lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid)); Assert(ItemIdIsNormal(lp)); @@ -6118,6 +6221,12 @@ TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer* buffer, Co tuple->t_xc_node_id = u_sess->pgxc_cxt.PGXCNodeIdentifier; #endif + if (RELATION_HAS_UIDS(relation) && HeapTupleHeaderHasUid(tuple->t_data)) { + uint64 tupleUid = HeapTupleGetUid(tuple); + LockBuffer(*buffer, BUFFER_LOCK_UNLOCK); + LockTupleUid(relation, tupleUid, TupleLockExtraInfo[mode].hwlock, !nowait, true); + LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE); + } l3: HeapTupleCopyBaseFromPage(tuple, page); result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer, allow_lock_self); @@ -6274,7 +6383,7 @@ l3: } } - LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE); + LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE); /* * Make sure it's still an appropriate lock, else start over. @@ -6391,7 +6500,7 @@ l3: RelationGetRelationName(relation)))); } } else { - LOCK_TUPLE_TUP_LOCK(relation, tid, mode); + LockTuple(relation, tid, TupleLockExtraInfo[mode].hwlock, true, waitSec); } have_tuple_lock = true; } @@ -6408,7 +6517,7 @@ l3: ereport(ERROR, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), errmsg( "could not obtain lock on row in relation \"%s\"", RelationGetRelationName(relation)))); } else { - MultiXactIdWait((MultiXactId)xwait, status, NULL); + MultiXactIdWait((MultiXactId)xwait, status, NULL, waitSec); } /* @@ -6427,7 +6536,7 @@ l3: ereport(ERROR, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), errmsg( "could not obtain lock on row in relation \"%s\"", RelationGetRelationName(relation)))); } else { - XactLockTableWait(xwait, true); + XactLockTableWait(xwait, true, waitSec); } } @@ -7257,7 +7366,7 @@ static TM_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPoin * tuple is an in-memory tuple structure containing the data to be written * over the target tuple. Also, tuple->t_self identifies the target tuple. */ -void heap_inplace_update(Relation relation, HeapTuple tuple) +void heap_inplace_update(Relation relation, HeapTuple tuple, bool waitFlush) { Buffer buffer; Page page; @@ -7309,10 +7418,11 @@ void heap_inplace_update(Relation relation, HeapTuple tuple) MarkBufferDirty(buffer); + XLogRecPtr recptr = InvalidXLogRecPtr; + /* XLOG stuff */ if (RelationNeedsWAL(relation)) { xl_heap_inplace xlrec; - XLogRecPtr recptr; xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self); @@ -7331,6 +7441,10 @@ void heap_inplace_update(Relation relation, HeapTuple tuple) UnlockReleaseBuffer(buffer); + if (waitFlush && (recptr != InvalidXLogRecPtr)) { + XLogWaitFlush(recptr); + } + /* * Send out shared cache inval if necessary. Note that because we only * pass the new version of the tuple, this mustn't be used for any @@ -7765,7 +7879,7 @@ XLogRecPtr log_heap_cleanup_info(const RelFileNode* rnode, TransactionId latest_ XLogBeginInsert(); XLogRegisterData((char*)&xlrec, SizeOfHeapCleanupInfo); - recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_CLEANUP_INFO, false, rnode->bucketNode); + recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_CLEANUP_INFO, rnode->bucketNode); return recptr; } @@ -7911,7 +8025,7 @@ XLogRecPtr log_cu_bcm(const RelFileNode* rnode, int col, uint64 block, int statu XLogBeginInsert(); XLogRegisterData((char*)&xlrec, SizeOfHeapBcm); - recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_BCM, false, rnode->bucketNode); + recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_BCM, rnode->bucketNode); return recptr; } @@ -7937,7 +8051,7 @@ XLogRecPtr log_heap_bcm(const RelFileNode* rnode, int col, uint64 block, int sta XLogBeginInsert(); XLogRegisterData((char*)&xlrec, SizeOfHeapBcm); - recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_BCM, false, rnode->bucketNode); + recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_BCM, rnode->bucketNode); return recptr; } @@ -7990,7 +8104,8 @@ XLogRecPtr log_heap_visible(RelFileNode rnode, BlockNumber block, Buffer heap_bu * have modified the buffer(s) and marked them dirty. */ static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, HeapTuple oldtup, Buffer newbuf, - HeapTuple newtup, HeapTuple old_key_tuple, bool all_visible_cleared, bool new_all_visible_cleared) + HeapTuple newtup, HeapTuple old_key_tuple, bool all_visible_cleared, bool new_all_visible_cleared, + char relreplident) { xl_heap_update xlrec; xl_heap_header xlhdr; @@ -8050,7 +8165,7 @@ static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, HeapTuple oldtup if (need_tuple_data) { xlrec.flags |= XLH_UPDATE_CONTAINS_NEW_TUPLE; if (old_key_tuple) { - if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL) + if (relreplident == REPLICA_IDENTITY_FULL) xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_TUPLE; else xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_KEY; @@ -8093,6 +8208,8 @@ static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, HeapTuple oldtup } XLogRegisterData((char *)&xlrec, useOldXlog ? SizeOfOldHeapUpdate : SizeOfHeapUpdate); + CommitSeqNo curCSN = InvalidCommitSeqNo; + LogCSN(&curCSN); /* We need to log a tuple identity */ if (need_tuple_data && old_key_tuple) { @@ -8140,18 +8257,19 @@ static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, HeapTuple oldtup * Returns NULL if there's no need to log an identity or if there's no suitable * key in the Relation relation. */ -static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool* copy) +static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool* copy, + char *relreplident) { TupleDesc desc = RelationGetDescr(relation); Oid replidindex; Relation idx_rel; - char relreplident; HeapTuple key_tuple = NULL; bool nulls[MaxHeapAttributeNumber]; Datum values[MaxHeapAttributeNumber]; int natt; errno_t rc = 0; *copy = false; + *relreplident = REPLICA_IDENTITY_NOTHING; if (!RelationIsLogicallyLogged(relation)) { return NULL; @@ -8175,16 +8293,16 @@ static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool ke heap_freetuple(tuple); if (is_null) { - relreplident = REPLICA_IDENTITY_NOTHING; + *relreplident = REPLICA_IDENTITY_NOTHING; } else { - relreplident = CharGetDatum(replident); + *relreplident = CharGetDatum(replident); } - if (replident == REPLICA_IDENTITY_NOTHING) { + if (*relreplident == REPLICA_IDENTITY_NOTHING) { return NULL; } - if (replident == REPLICA_IDENTITY_FULL) { + if (*relreplident == REPLICA_IDENTITY_FULL) { /* * When logging the entire old tuple, it very well could contain * toasted columns. If so, force them to be inlined. @@ -8355,7 +8473,7 @@ XLogRecPtr log_logical_newpage(RelFileNode* rnode, ForkNumber forkNum, BlockNumb */ XLogRegisterBuffer(0, buffer, REGBUF_NO_IMAGE); - recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOGICAL_NEWPAGE, false, rnode->bucketNode); + recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOGICAL_NEWPAGE, rnode->bucketNode); PageSetLSN(page, recptr); PageSetLogical(page); @@ -8394,7 +8512,7 @@ XLogRecPtr log_logical_newcu(RelFileNode* rnode, ForkNumber forkNum, int attid, if (cuData != NULL) { XLogRegisterData(cuData, size); } - recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOGICAL_NEWPAGE, false, rnode->bucketNode); + recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOGICAL_NEWPAGE, rnode->bucketNode); END_CRIT_SECTION(); @@ -8935,6 +9053,9 @@ static void heap_xlog_update(XLogReaderState* record, bool hot_update) if (isinit) { rec_data += sizeof(TransactionId); } + if ((record->decoded_record->xl_term & XLOG_CONTAIN_CSN) == XLOG_CONTAIN_CSN) { + rec_data += sizeof(CommitSeqNo); + } xlrec = (xl_heap_update*)rec_data; @@ -9042,19 +9163,6 @@ static void heap_xlog_update(XLogReaderState* record, bool hot_update) } } -void heap_xlog_page_upgrade(XLogReaderState* record) -{ - RedoBufferInfo buffer; - - if (BLK_NEEDS_REDO == XLogReadBufferForRedo(record, HEAP_PAGE_UPDATE_ORIG_BLOCK_NUM, &buffer)) { - HeapXlogPageUpgradeOperatorPage(&buffer); - MarkBufferDirty(buffer.buf); - } - if (BufferIsValid(buffer.buf)) { - UnlockReleaseBuffer(buffer.buf); - } -} - static void heap_xlog_lock(XLogReaderState* record) { RedoBufferInfo buffer; @@ -9168,9 +9276,6 @@ void heap2_redo(XLogReaderState* record) case XLOG_HEAP2_LOGICAL_NEWPAGE: heap_xlog_logical_new_page(record); break; - case XLOG_HEAP2_PAGE_UPGRADE: - heap_xlog_page_upgrade(record); - break; default: ereport(PANIC, (errmsg("heap2_redo: unknown op code %hhu", info))); } @@ -9325,6 +9430,18 @@ void partition_sync(Relation rel, Oid partition_id, LOCKMODE partition_lockmode) partitionClose(rel, partition, NoLock); } +static void ReportPartitionOpenError(Relation relation, Oid partition_id) +{ + ereport( + ERROR, + (errcode(ERRCODE_RELATION_OPEN_ERROR), errmsg("could not open partition with OID %u", partition_id), + errdetail("Check whether DDL operations exist on the current partition in the table %s, like " + "drop/exchange/split/merge partition", + RelationGetRelationName(relation)), + errcause("If there is a DDL operation, the cause is incorrect operation. Otherwise, it is a system error."), + erraction("Wait for DDL operation to complete or Contact engineer to support."))); +} + /* * If we are executing select for update/share operation, * directly hold RowShareLock to avoid deadlock with vacuum full @@ -9338,6 +9455,18 @@ static LOCKMODE GetPartitionLockMode(LOCKMODE lockmode) } } +static Partition SubPartitionOidGetPartitionWithRetry(Relation rel, Oid subPartOid, LOCKMODE lockmode, const char *stmt) +{ + Oid parentOid = partid_get_parentid(subPartOid); + Partition part = partitionOpenWithRetry(rel, parentOid, lockmode, stmt); + Relation partRel = partitionGetRelation(rel, part); + Partition subPart = partitionOpenWithRetry(partRel, subPartOid, lockmode, stmt); + releaseDummyRelation(&partRel); + partitionClose(rel, part, NoLock); + + return subPart; +} + /* * @@GaussDB@@ * Target : data partition @@ -9354,6 +9483,13 @@ Partition partitionOpenWithRetry(Relation relation, Oid partition_id, LOCKMODE l Assert(PointerIsValid(relation)); Assert(OidIsValid(partition_id)); + if (RelationIsSubPartitioned(relation) && relation->rd_id != partid_get_parentid(partition_id)) { + /* partition_id is subpartition oid */ + p = SubPartitionOidGetPartitionWithRetry(relation, partition_id, lockmode, stmt); + Assert(relation->rd_id == partid_get_parentid(p->pd_part->parentid)); + return p; + } + /* * If we are executing select for update/share operation, * directly hold RowShareLock to avoid deadlock with vacuum full @@ -9394,9 +9530,9 @@ Partition partitionOpenWithRetry(Relation relation, Oid partition_id, LOCKMODE l p = PartitionIdGetPartition(partition_id, RelationGetStorageType(relation)); if (!PartitionIsValid(p)) { - ereport(ERROR, - (errcode(ERRCODE_RELATION_OPEN_ERROR), errmsg("could not open partition with OID %u", partition_id))); + ReportPartitionOpenError(relation, partition_id); } + Assert(relation->rd_id == p->pd_part->parentid); /* Insert TDE key to buffer cache for tde table */ if (g_instance.attr.attr_security.enable_tde && IS_PGXC_DATANODE && RelationisEncryptEnable(relation)) { @@ -9438,6 +9574,13 @@ Partition partitionOpen(Relation relation, Oid partition_id, LOCKMODE lockmode, Assert(PointerIsValid(relation)); Assert(bucket_id < SegmentBktId); + if (RelationIsSubPartitioned(relation) && relation->rd_id != partid_get_parentid(partition_id)) { + /* partition_id is subpartition oid */ + p = SubPartitionOidGetPartition(relation, partition_id, lockmode); + Assert(relation->rd_id == partid_get_parentid(p->pd_part->parentid)); + return p; + } + /* * If we are executing select for update/share operation, * directly hold RowShareLock to avoid deadlock with vacuum full @@ -9468,10 +9611,20 @@ Partition partitionOpen(Relation relation, Oid partition_id, LOCKMODE lockmode, p = PartitionIdGetPartition(partition_id, RelationGetStorageType(relation)); if (!PartitionIsValid(p)) { - ereport(ERROR, - (errcode(ERRCODE_RELATION_OPEN_ERROR), errmsg("could not open partition with OID %u", partition_id))); + ReportPartitionOpenError(relation, partition_id); } + if (p->xmin_csn != InvalidCommitSeqNo) { + Snapshot snapshot = GetActiveSnapshot(); + if (p->xmin_csn > snapshot->snapshotcsn) { + ereport(ERROR, + (errcode(ERRCODE_SNAPSHOT_INVALID), + errmsg("current snapshot is invalid for this partition : %u.", partition_id))); + } + + } + Assert(relation->rd_id == p->pd_part->parentid); + /* Insert TDE key to buffer cache for tde table */ if (g_instance.attr.attr_security.enable_tde && IS_PGXC_DATANODE && RelationisEncryptEnable(relation)) { PartitionInsertTdeInfoToCache(relation, p); @@ -9492,6 +9645,53 @@ Partition partitionOpen(Relation relation, Oid partition_id, LOCKMODE lockmode, return p; } +static Partition TrySubPartitionOidGetPartition(Relation rel, Oid subPartOid, LOCKMODE lockmode) +{ + Oid parentOid = partid_get_parentid(subPartOid); + Assert(rel->rd_id == partid_get_parentid(parentOid)); + Partition part = tryPartitionOpen(rel, parentOid, lockmode); + if (part == NULL) { + return NULL; + } + Relation partRel = partitionGetRelation(rel, part); + Partition subPart = tryPartitionOpen(partRel, subPartOid, lockmode); + releaseDummyRelation(&partRel); + partitionClose(rel, part, NoLock); + + return subPart; +} + +static void GetPartitionLockBeforeOpenPartition(Relation relation, Oid partition_id, LOCKMODE lockmode) +{ + if (lockmode == NoLock) { + return; + } + + PartitionIdentifier* partID = NULL; + if (relation->rd_rel->relkind == RELKIND_RELATION) { + partID = partOidGetPartID(relation, partition_id); + switch (partID->partArea) { + case PART_AREA_RANGE: + case PART_AREA_LIST: + case PART_AREA_HASH: + LockPartition(relation->rd_id, partition_id, lockmode, PARTITION_LOCK); + break; + case PART_AREA_INTERVAL: + LockPartition(relation->rd_id, partition_id, lockmode, PARTITION_LOCK); + break; + default: + break; + } + pfree(partID); + } else if (relation->rd_rel->relkind == RELKIND_INDEX) { + LockPartition(relation->rd_id, partition_id, lockmode, PARTITION_LOCK); + } else { + ereport(ERROR, (errcode(ERRCODE_RELATION_OPEN_ERROR), + errmsg("openning partition %u, but relation %s %u is neither table nor index", partition_id, + RelationGetRelationName(relation), RelationGetRelid(relation)))); + } +} + /* * @@GaussDB@@ * Target : data partition @@ -9509,6 +9709,13 @@ Partition tryPartitionOpen(Relation relation, Oid partition_id, LOCKMODE lockmod Assert(PointerIsValid(relation)); Assert(OidIsValid(partition_id)); + if (RelationIsSubPartitioned(relation) && relation->rd_id != partid_get_parentid(partition_id)) { + /* partition_id is subpartition oid */ + p = TrySubPartitionOidGetPartition(relation, partition_id, lockmode); + Assert(relation->rd_id == partid_get_parentid(p->pd_part->parentid)); + return p; + } + /* * If we are executing select for update/share operation, * directly hold RowShareLock to avoid deadlock with vacuum full @@ -9516,33 +9723,7 @@ Partition tryPartitionOpen(Relation relation, Oid partition_id, LOCKMODE lockmod lockmode = GetPartitionLockMode(lockmode); /* Get the lock before trying to open the relcache entry */ - if (lockmode != NoLock) { - if (relation->rd_rel->relkind == RELKIND_RELATION) { - partID = partOidGetPartID(relation, partition_id); - switch (partID->partArea) { - case PART_AREA_RANGE: - case PART_AREA_LIST: - case PART_AREA_HASH: - LockPartition(relation->rd_id, partition_id, lockmode, PARTITION_LOCK); - break; - case PART_AREA_INTERVAL: - LockPartition(relation->rd_id, partition_id, lockmode, PARTITION_LOCK); - break; - default: - break; - } - pfree(partID); - } else if (relation->rd_rel->relkind == RELKIND_INDEX) { - LockPartition(relation->rd_id, partition_id, lockmode, PARTITION_LOCK); - } else { - ereport(ERROR, - (errcode(ERRCODE_RELATION_OPEN_ERROR), - errmsg("openning partition %u, but relation %s %u is neither table nor index", - partition_id, - RelationGetRelationName(relation), - RelationGetRelid(relation)))); - } - } + GetPartitionLockBeforeOpenPartition(relation, partition_id, lockmode); /* * Now that we have the lock, probe to see if the partition really exists @@ -9583,8 +9764,7 @@ Partition tryPartitionOpen(Relation relation, Oid partition_id, LOCKMODE lockmod p = PartitionIdGetPartition(partition_id, RelationGetStorageType(relation)); if (!PartitionIsValid(p)) { - ereport(ERROR, - (errcode(ERRCODE_RELATION_OPEN_ERROR), errmsg("could not open partition with OID %u", partition_id))); + ReportPartitionOpenError(relation, partition_id); } Assert(relation->rd_id == p->pd_part->parentid); @@ -9627,15 +9807,29 @@ void partitionClose(Relation relation, Partition partition, LOCKMODE lockmode) Assert(PointerIsValid(relation)); Assert(PointerIsValid(part)); - /* The partcache does the real work... */ - PartitionClose(part); - /* * If we are executing select for update/share operation, * directly hold RowShareLock to avoid deadlock with vacuum full */ lockmode = GetPartitionLockMode(lockmode); + if (RelationIsSubPartitioned(relation) && relation->rd_id != part->pd_part->parentid) { + Assert(relation->rd_id == partid_get_parentid(part->pd_part->parentid)); + if (lockmode != NoLock) { + UnlockPartition(relation->rd_id, part->pd_part->parentid, lockmode, PARTITION_LOCK); + } + PartitionClose(part); + if (lockmode != NoLock) { + UnlockPartition(part->pd_part->parentid, part->pd_id, lockmode, PARTITION_LOCK); + } + return; + } + + Assert(relation->rd_id == part->pd_part->parentid); + + /* The partcache does the real work... */ + PartitionClose(part); + if (lockmode != NoLock) { if (relation->rd_rel->relkind == RELKIND_RELATION) { partID = partOidGetPartID(relation, part->pd_id); @@ -9775,11 +9969,9 @@ HeapTuple heapam_index_fetch_tuple(IndexScanDesc scan, bool *all_dead) * replayed before the tid replayed. This is acceptable, so we return * null without reporting error. */ -#ifndef ENABLE_MULTIPLE_NODES if(!BufferIsValid(scan->xs_cbuf)) { return NULL; } -#endif /* * Prune page, but only if we weren't already on this page diff --git a/src/gausskernel/storage/access/heap/heapam_visibility.cpp b/src/gausskernel/storage/access/heap/heapam_visibility.cpp index 0ea95dc13..518b0dda1 100644 --- a/src/gausskernel/storage/access/heap/heapam_visibility.cpp +++ b/src/gausskernel/storage/access/heap/heapam_visibility.cpp @@ -1700,6 +1700,176 @@ static bool HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot, Bu return true; } +/* + * Decode MVCC check the visibility of uncommited xmin, return true if the visibility is determinate. + */ +static bool HeapTupleUncommitedXminCheckDecodeMVCC(HeapTupleHeader tuple, Snapshot snapshot, bool *visible, + TransactionIdStatus *status, Buffer buffer) +{ + Page page = BufferGetPage(buffer); + if (HeapTupleHeaderXminInvalid(tuple)) { + *visible = false; + return true; + } + + /* IMPORTANT: Version snapshot is independent of the current transaction. */ + if (!IsVersionMVCCSnapshot(snapshot) && + TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(page, tuple))) { + + if ((tuple->t_infomask & HEAP_XMAX_INVALID) || + HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask, tuple->t_infomask2)) { + *visible = true; + return true; + } + + Assert(!(tuple->t_infomask & HEAP_XMAX_IS_MULTI)); + + if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmax(page, tuple))) { + /* deleting subtransaction must have aborted */ + Assert(!TransactionIdDidCommit(HeapTupleHeaderGetXmax(page, tuple))); + SetHintBits(tuple, buffer, HEAP_XMAX_INVALID, InvalidTransactionId); + *visible = true; + return true; + } + } else { + bool xidVisible = XidVisibleInDecodeSnapshot(HeapTupleHeaderGetXmin(page, tuple), snapshot, status, buffer); + if (*status == XID_COMMITTED) + SetHintBits(tuple, buffer, HEAP_XMIN_COMMITTED, HeapTupleHeaderGetXmin(page, tuple)); + + if (*status == XID_ABORTED) { + if (!LatestFetchCSNDidAbort(HeapTupleHeaderGetXmin(page, tuple))) + LatestTransactionStatusError(HeapTupleHeaderGetXmin(page, tuple), + snapshot, + "HeapTupleSatisfiesMVCC set HEAP_XMIN_INVALID xid don't abort"); + + SetHintBits(tuple, buffer, HEAP_XMIN_INVALID, InvalidTransactionId); + } + + if (!xidVisible) { + if (!GTM_LITE_MODE || u_sess->attr.attr_common.xc_maintenance_mode || + snapshot->gtm_snapshot_type != GTM_SNAPSHOT_TYPE_LOCAL || + !IsXidVisibleInGtmLiteLocalSnapshot(HeapTupleHeaderGetXmin(page, tuple), snapshot, *status, + HeapTupleHeaderGetXmin(page, tuple) == HeapTupleHeaderGetXmax(page, tuple), buffer, NULL)) { + *visible = false; + return true; + } + } + } + return false; +} + +/* + * Decode MVCC check the visibility of commited xmin, return true if the visibility is determinate. + */ +static bool HeapTupleCommitedXminCheckDecodeMVCC(HeapTupleHeader tuple, Snapshot snapshot, bool *visible, + TransactionIdStatus *status, Buffer buffer) +{ + Page page = BufferGetPage(buffer); + /* xmin is committed, but maybe not according to our snapshot */ + if (!HeapTupleHeaderXminFrozen(tuple) && + !CommittedXidVisibleInDecodeSnapshot(HeapTupleHeaderGetXmin(page, tuple), snapshot, buffer)) { + /* tuple xmin has already committed, no need to use xc_maintenance_mod bypass */ + if (!GTM_LITE_MODE || snapshot->gtm_snapshot_type != GTM_SNAPSHOT_TYPE_LOCAL || + !IsXidVisibleInGtmLiteLocalSnapshot(HeapTupleHeaderGetXmin(page, tuple), snapshot, XID_COMMITTED, + HeapTupleHeaderGetXmin(page, tuple) == HeapTupleHeaderGetXmax(page, tuple), buffer, NULL)) { + *visible = false; + return true; /* treat as still in progress */ + } + } + return false; +} + +/* + * Decode MVCC check the visibility of xmin, return true if the visibility is determinate. + */ +static bool HeapTupleXmaxCheckDecodeMVCC(HeapTupleHeader tuple, Snapshot snapshot, bool *visible, + TransactionIdStatus *status, Buffer buffer) +{ + Page page = BufferGetPage(buffer); + if ((tuple->t_infomask & HEAP_XMAX_INVALID) || HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask, tuple->t_infomask2) || + (tuple->t_infomask & HEAP_XMAX_IS_MULTI)) { + *visible = true; + return true; + } + + if (!(tuple->t_infomask & HEAP_XMAX_COMMITTED)) { + bool xidVisible = XidVisibleInDecodeSnapshot(HeapTupleHeaderGetXmax(page, tuple), snapshot, status, buffer); + if (*status == XID_COMMITTED) { + /* xmax transaction committed */ + SetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED, HeapTupleHeaderGetXmax(page, tuple)); + } + if (*status == XID_ABORTED) { + if (!LatestFetchCSNDidAbort(HeapTupleHeaderGetXmax(page, tuple))) + LatestTransactionStatusError(HeapTupleHeaderGetXmax(page, tuple), + snapshot, + "HeapTupleSatisfiesMVCC set HEAP_XMAX_INVALID xid don't abort"); + + /* it must have aborted or crashed */ + SetHintBits(tuple, buffer, HEAP_XMAX_INVALID, InvalidTransactionId); + } + if (!xidVisible) { + if (!GTM_LITE_MODE || u_sess->attr.attr_common.xc_maintenance_mode || + snapshot->gtm_snapshot_type != GTM_SNAPSHOT_TYPE_LOCAL || + !IsXidVisibleInGtmLiteLocalSnapshot(HeapTupleHeaderGetXmax(page, tuple), + snapshot, *status, false, buffer, NULL)) { + *visible = true; + return true; + } + } + + } else { + /* xmax is committed, but maybe not according to our snapshot */ + if (!CommittedXidVisibleInDecodeSnapshot(HeapTupleHeaderGetXmax(page, tuple), snapshot, buffer)) { + if (!GTM_LITE_MODE || snapshot->gtm_snapshot_type != GTM_SNAPSHOT_TYPE_LOCAL || + !IsXidVisibleInGtmLiteLocalSnapshot(HeapTupleHeaderGetXmax(page, tuple), + snapshot, XID_COMMITTED, false, buffer, NULL)) { + *visible = true; + return true; + } + } + } + return false; +} + +/* + * MVCC used in parallel decoding, which is mainly based on CSN. + */ +static bool HeapTupleSatisfiesDecodeMVCC(HeapTuple htup, Snapshot snapshot, Buffer buffer) +{ + HeapTupleHeader tuple = htup->t_data; + Assert(ItemPointerIsValid(&htup->t_self)); + Assert(htup->t_tableOid != InvalidOid); + bool visible = false; + TransactionIdStatus hintstatus; + + /* + * Just valid for read-only transaction when u_sess->attr.attr_common.XactReadOnly is true. + * Show any tuples including dirty ones when u_sess->attr.attr_storage.enable_show_any_tuples is true. + * GUC param u_sess->attr.attr_storage.enable_show_any_tuples is just for analyse or maintenance + */ + if (u_sess->attr.attr_common.XactReadOnly && u_sess->attr.attr_storage.enable_show_any_tuples) + return true; + + bool getVisibility = false; + if (!HeapTupleHeaderXminCommitted(tuple)) { + getVisibility = HeapTupleUncommitedXminCheckDecodeMVCC(tuple, snapshot, &visible, &hintstatus, buffer); + if (getVisibility) { + return visible; + } + } else { + getVisibility = HeapTupleCommitedXminCheckDecodeMVCC(tuple, snapshot, &visible, &hintstatus, buffer); + if (getVisibility) { + return visible; + } + } + + getVisibility = HeapTupleXmaxCheckDecodeMVCC(tuple, snapshot, &visible, &hintstatus, buffer); + if (getVisibility) { + return visible; + } + return false; +} + /* * HeapTupleSatisfiesVisibility * True iff heap tuple satisfies a time qual. @@ -1748,6 +1918,9 @@ bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffe case SNAPSHOT_HISTORIC_MVCC: return HeapTupleSatisfiesHistoricMVCC(tup, snapshot, buffer); break; + case SNAPSHOT_DECODE_MVCC: + return HeapTupleSatisfiesDecodeMVCC(tup, snapshot, buffer); + break; } return false; /* keep compiler quiet */ diff --git a/src/gausskernel/storage/access/heap/hio.cpp b/src/gausskernel/storage/access/heap/hio.cpp index bf7ccd5b3..f0531a906 100644 --- a/src/gausskernel/storage/access/heap/hio.cpp +++ b/src/gausskernel/storage/access/heap/hio.cpp @@ -48,7 +48,7 @@ void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, Tra page_header = BufferGetPage(buffer); tuple->t_data->t_choice.t_heap.t_xmin = NormalTransactionIdToShort( - PageIs8BXidHeapVersion(page_header) ? ((HeapPageHeader)(page_header))->pd_xid_base : 0, xid); + ((HeapPageHeader)(page_header))->pd_xid_base, xid); offnum = PageAddItem(page_header, (Item)tuple->t_data, tuple->t_len, InvalidOffsetNumber, false, true); if (offnum == InvalidOffsetNumber) @@ -379,6 +379,7 @@ Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer other_buffe Size save_free_space = 0; BlockNumber target_block, other_block; bool need_lock = false; + bool last_page_tested = false; Size extralen = 0; HeapPageHeader phdr; @@ -453,6 +454,7 @@ Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer other_buffe if (nblocks > 0) { target_block = nblocks - 1; } + last_page_tested = true; } } /* When in append mode, cannot use cached block which smaller than rel end block */ @@ -542,24 +544,11 @@ loop: page = BufferGetPage(buffer); page_free_space = PageGetHeapFreeSpace(page); if (len + save_free_space <= page_free_space) { - if (PageIs4BXidVersion(page)) { - if (page_free_space - save_free_space - len >= SizeOfHeapPageUpgradeData) { - /* If the page is upgraded unsuccessfully, get a new page */ - if (!heap_page_upgrade(relation, buffer)) { - use_fsm = false; - goto newpage; - } - } else { - /* Find the page with enough space to upgrade */ - extralen = SizeOfHeapPageUpgradeData; - goto newpage; - } - } /* use this page as future insert target, too */ RelationSetTargetBlock(relation, target_block); return buffer; } - newpage: + /* * Not enough space, so we must give up our page locks and pin (if * any) and prepare to look elsewhere. We don't care which order we @@ -587,6 +576,21 @@ loop: ereport(DEBUG5, (errmodule(MOD_SEGMENT_PAGE), errmsg("RelationGetBufferForTuple, get target block %u from FSM, nblocks in relation is %u", target_block, smgrnblocks(relation->rd_smgr, MAIN_FORKNUM)))); + + /* + * If the FSM knows nothing of the rel, try the last page before we + * give up and extend. This's intend to use pages that are extended + * one by one and not recorded in FSM as possible. + * + * The best is to record all pages into FSM using bulk-extend in later. + */ + if (target_block == InvalidBlockNumber && !last_page_tested) { + BlockNumber nblocks = RelationGetNumberOfBlocks(relation); + if (nblocks > 0) { + target_block = nblocks - 1; + } + last_page_tested = true; + } } /* diff --git a/src/gausskernel/storage/access/heap/pruneheap.cpp b/src/gausskernel/storage/access/heap/pruneheap.cpp index caff361fb..964274ea0 100644 --- a/src/gausskernel/storage/access/heap/pruneheap.cpp +++ b/src/gausskernel/storage/access/heap/pruneheap.cpp @@ -204,7 +204,7 @@ int heap_page_prune(Relation relation, Buffer buffer, TransactionId oldest_xmin, continue; /* Nothing to do if slot is empty or already dead */ - itemid = PageGetItemId(page, offnum); + itemid = HeapPageGetItemId(page, offnum); if (!ItemIdIsUsed(itemid) || ItemIdIsDead(itemid)) continue; diff --git a/src/gausskernel/storage/access/heap/rewriteheap.cpp b/src/gausskernel/storage/access/heap/rewriteheap.cpp index 795483925..9b232db20 100644 --- a/src/gausskernel/storage/access/heap/rewriteheap.cpp +++ b/src/gausskernel/storage/access/heap/rewriteheap.cpp @@ -215,7 +215,9 @@ static void raw_heap_insert(RewriteState state, HeapTuple tup); static void RawUHeapInsert(RewriteState state, UHeapTuple tup); static void RawHeapCmprAndMultiInsert(RewriteState state, bool is_last); static void copyHeapTupleInfo(HeapTuple dest_tup, HeapTuple src_tup, TransactionId freeze_xid, MultiXactId freeze_mxid); +#ifndef ENABLE_LITE_MODE static void rewrite_page_list_write(RewriteState state); +#endif static void rewrite_flush_page(RewriteState state, Page page); static void rewrite_end_flush_page(RewriteState state); static void rewrite_write_one_page(RewriteState state, Page page); @@ -986,6 +988,7 @@ bool rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple) return false; } +#ifndef ENABLE_LITE_MODE /* * @Description: vacuum full use this api to list write block by adio. aioDescp->blockDesc.bufHdr = NULL; to figure * this is vacuum operate @@ -1073,6 +1076,7 @@ void rewrite_page_list_write(RewriteState state) return; } +#endif /* * @Description: rewrite flush page @@ -1084,6 +1088,7 @@ void rewrite_page_list_write(RewriteState state) */ static void rewrite_flush_page(RewriteState state, Page page) { +#ifndef ENABLE_LITE_MODE /* check aio is ready, for init db in single mode, no aio thread */ if (AioCompltrIsReady() && g_instance.attr.attr_storage.enable_adio_function) { /* pass null buffer to lower levels to use fallocate, systables do not use fallocate, @@ -1130,8 +1135,11 @@ static void rewrite_flush_page(RewriteState state, Page page) } } } else { +#endif smgrextend(state->rs_new_rel->rd_smgr, MAIN_FORKNUM, state->rs_blockno, (char *)page, true); +#ifndef ENABLE_LITE_MODE } +#endif return; } @@ -1142,6 +1150,7 @@ static void rewrite_flush_page(RewriteState state, Page page) */ static void rewrite_end_flush_page(RewriteState state) { +#ifndef ENABLE_LITE_MODE /* check aio is ready, for init db in single mode, no aio thread */ if (AioCompltrIsReady() && g_instance.attr.attr_storage.enable_adio_function) { if (state->rs_block_count > 0) { @@ -1156,6 +1165,7 @@ static void rewrite_end_flush_page(RewriteState state) (int)(pg_atomic_read_u32(&state->rs_buffers_handler[i].state) & BUF_FLAG_MASK)))); } } +#endif } /* diff --git a/src/gausskernel/storage/access/heap/tuptoaster.cpp b/src/gausskernel/storage/access/heap/tuptoaster.cpp index 627be68f3..4411f8af3 100644 --- a/src/gausskernel/storage/access/heap/tuptoaster.cpp +++ b/src/gausskernel/storage/access/heap/tuptoaster.cpp @@ -43,12 +43,18 @@ #include "utils/typcache.h" #include "commands/vacuum.h" #include "utils/snapmgr.h" +#include "mb/pg_wchar.h" #undef TOAST_DEBUG static bool toastid_valueid_exists(Oid toastrelid, Oid valueid, int2 bucketid); static struct varlena *toast_fetch_datum(struct varlena *attr); -static struct varlena *toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length); +static struct varlena *toast_fetch_datum_slice(struct varlena *attr, int64 sliceoffset, int32 length); +static struct varlena* toast_huge_fetch_datum_slice(struct varlena* attr, int64 sliceoffset, int32 length); +varlena* toast_huge_write_datum_slice(struct varlena* attr1, struct varlena* attr2, int64 sliceoffset, int32 length); +void toast_huge_fetch_and_copy(Relation srctoastrel, Relation srctoastidx, Relation destoastrel, + Relation destoastidx, varatt_lob_external large_toast_pointer, int32 *chunk_seq, Oid *firstchunkid, + Oid realtoastOid); /* ---------- * heap_tuple_fetch_attr - @@ -111,7 +117,7 @@ struct varlena *heap_tuple_fetch_attr(struct varlena *attr) * or external storage. * ---------- */ -struct varlena *heap_tuple_untoast_attr(struct varlena *attr) +struct varlena *heap_tuple_untoast_attr(struct varlena *attr, ScalarVector *arr) { if (VARATT_IS_EXTERNAL_ONDISK_B(attr)) { /* @@ -122,7 +128,11 @@ struct varlena *heap_tuple_untoast_attr(struct varlena *attr) if (VARATT_IS_COMPRESSED(attr)) { PGLZ_Header *tmp = (PGLZ_Header *)attr; - attr = (struct varlena *)palloc(PGLZ_RAW_SIZE(tmp) + VARHDRSZ); + if (arr == NULL) { + attr = (struct varlena *)palloc(PGLZ_RAW_SIZE(tmp) + VARHDRSZ); + } else { + attr = (struct varlena *)arr->m_buf->Allocate(tmp->rawsize + VARHDRSZ); + } SET_VARSIZE(attr, PGLZ_RAW_SIZE(tmp) + VARHDRSZ); pglz_decompress(tmp, VARDATA(attr)); pfree(tmp); @@ -135,17 +145,21 @@ struct varlena *heap_tuple_untoast_attr(struct varlena *attr) /* nested indirect Datums aren't allowed */ Assert(!VARATT_IS_EXTERNAL_INDIRECT(attr)); - attr = heap_tuple_untoast_attr(attr); + attr = heap_tuple_untoast_attr(attr, arr); } else if (VARATT_IS_COMPRESSED(attr)) { /* * This is a compressed value inside of the main tuple */ PGLZ_Header *tmp = (PGLZ_Header *)attr; - attr = (struct varlena *)palloc(PGLZ_RAW_SIZE(tmp) + VARHDRSZ); + if (arr == NULL) { + attr = (struct varlena *)palloc(PGLZ_RAW_SIZE(tmp) + VARHDRSZ); + } else { + attr = (struct varlena *)arr->m_buf->Allocate(tmp->rawsize + VARHDRSZ); + } SET_VARSIZE(attr, PGLZ_RAW_SIZE(tmp) + VARHDRSZ); pglz_decompress(tmp, VARDATA(attr)); - } else if (VARATT_IS_SHORT(attr)) { + } else if (VARATT_IS_SHORT(attr) && !VARATT_IS_HUGE_TOAST_POINTER(attr)) { /* * This is a short-header varlena --- convert to 4-byte header format */ @@ -154,7 +168,11 @@ struct varlena *heap_tuple_untoast_attr(struct varlena *attr) struct varlena *new_attr; errno_t rc = EOK; - new_attr = (struct varlena *)palloc(new_size); + if (arr == NULL) { + new_attr = (struct varlena *)palloc(new_size); + } else { + new_attr = (struct varlena *)arr->m_buf->Allocate(new_size); + } SET_VARSIZE(new_attr, new_size); rc = memcpy_s(VARDATA(new_attr), new_size, VARDATA_SHORT(attr), data_size); securec_check(rc, "", ""); @@ -171,7 +189,7 @@ struct varlena *heap_tuple_untoast_attr(struct varlena *attr) * from compression or external storage. * ---------- */ -struct varlena *heap_tuple_untoast_attr_slice(struct varlena *attr, int32 slice_offset, int32 slice_length) +struct varlena *heap_tuple_untoast_attr_slice(struct varlena *attr, int64 slice_offset, int32 slice_length) { struct varlena *preslice = NULL; struct varlena *result = NULL; @@ -198,8 +216,11 @@ struct varlena *heap_tuple_untoast_attr_slice(struct varlena *attr, int32 slice_ Assert(!VARATT_IS_EXTERNAL_INDIRECT(redirect.pointer)); return heap_tuple_untoast_attr_slice(redirect.pointer, slice_offset, slice_length); - } else + } else if (VARATT_IS_HUGE_TOAST_POINTER(attr)) { + return toast_huge_fetch_datum_slice(attr, slice_offset, slice_length); + } else { preslice = attr; + } if (VARATT_IS_COMPRESSED(preslice)) { PGLZ_Header *tmp = (PGLZ_Header *)preslice; @@ -270,6 +291,11 @@ Size toast_raw_datum_size(Datum value) } else if (VARATT_IS_COMPRESSED(attr)) { /* here, va_rawsize is just the payload size */ result = VARRAWSIZE_4B_C(attr) + VARHDRSZ; + } else if (VARATT_IS_HUGE_TOAST_POINTER(attr)) { + struct varatt_lob_external large_toast_pointer; + + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, attr); + result = large_toast_pointer.va_rawsize; } else if (VARATT_IS_SHORT(attr)) { /* * we have to normalize the header length to VARHDRSZ or else the @@ -313,6 +339,11 @@ Size toast_datum_size(Datum value) Assert(!VARATT_IS_EXTERNAL_INDIRECT(attr)); return toast_datum_size(PointerGetDatum(toast_pointer.pointer)); + } else if (VARATT_IS_HUGE_TOAST_POINTER(attr)) { + struct varatt_lob_external large_toast_pointer; + + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, attr); + result = large_toast_pointer.va_rawsize; } else if (VARATT_IS_SHORT(attr)) { result = VARSIZE_SHORT(attr); } else { @@ -325,6 +356,46 @@ Size toast_datum_size(Datum value) return result; } +int64 calculate_huge_length(text *t) +{ + ScanKeyData toastkey; + SysScanDesc toastscan; + HeapTuple ttup; + Pointer chunk; + bool isnull; + int2 bucketid; + int64 len = 0; + struct varatt_external toast_pointer; + struct varatt_lob_external large_toast_pointer; + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, t); + + Relation toastrel = heap_open(large_toast_pointer.va_toastrelid, AccessShareLock); + Relation toastidx = index_open(toastrel->rd_rel->reltoastidxid, AccessShareLock); + TupleDesc toast_tup_desc = toastrel->rd_att; + ScanKeyInit(&toastkey, (AttrNumber)1, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(large_toast_pointer.va_valueid)); + toastscan = systable_beginscan_ordered(toastrel, toastidx, SnapshotToast, 1, &toastkey); + + int offset = 0; + while ((ttup = systable_getnext_ordered(toastscan, ForwardScanDirection)) != NULL) { + chunk = DatumGetPointer(fastgetattr(ttup, CHUNK_DATA_ATTR, toast_tup_desc, &isnull)); + VARATT_EXTERNAL_GET_POINTER_B(toast_pointer, chunk, bucketid); + text *result = heap_tuple_untoast_attr((varlena *)chunk); + const char* str = VARDATA_ANY(result); + int limit = VARSIZE_ANY_EXHDR(result); + + /* multibyte string may be truncated! */ + limit += offset; + len += (int64)pg_mbstrlen_with_len_toast(str - offset, &limit); + offset = limit; + } + Assert(offset == 0); + systable_endscan_ordered(toastscan); + index_close(toastidx, AccessShareLock); + heap_close(toastrel, AccessShareLock); + return len; +} + /* ---------- * toast_delete - * @@ -380,6 +451,8 @@ void toast_delete(Relation rel, HeapTuple oldtup, int options) continue; else if (VARATT_IS_EXTERNAL_ONDISK_B(PointerGetDatum(value))) toast_delete_datum(rel, value, options); + else if (VARATT_IS_HUGE_TOAST_POINTER(PointerGetDatum(value))) + toast_huge_delete_datum(rel, value, options); else if (VARATT_IS_EXTERNAL_INDIRECT(PointerGetDatum(value))) ereport(ERROR, (errcode(ERRCODE_FETCH_DATA_FAILED), errmsg("attempt to delete tuple containing indirect datums"))); @@ -387,6 +460,69 @@ void toast_delete(Relation rel, HeapTuple oldtup, int options) } } +struct varlena *heap_tuple_fetch_and_copy(Relation rel, struct varlena *attr, bool needcheck) +{ + Relation srctoastrel; + Relation srctoastidx; + Relation destoastrel; + Relation destoastidx; + int32 chunk_seq = 0; + errno_t rc; + struct varlena *result; + Oid firstchunkid = InvalidOid; + struct varatt_lob_external large_toast_pointer; + + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, attr); + + if (needcheck && large_toast_pointer.va_toastrelid == rel->rd_rel->reltoastrelid) { + return NULL; + } + + srctoastrel = heap_open(large_toast_pointer.va_toastrelid, AccessShareLock); + srctoastidx = index_open(srctoastrel->rd_rel->reltoastidxid, AccessShareLock); + destoastrel = heap_open(rel->rd_rel->reltoastrelid, RowExclusiveLock); + destoastidx = index_open(destoastrel->rd_rel->reltoastidxid, RowExclusiveLock); + + if (OidIsValid(rel->rd_toastoid) && toastrel_valueid_exists(destoastrel, large_toast_pointer.va_valueid)) { + index_close(srctoastidx, AccessShareLock); + heap_close(srctoastrel, AccessShareLock); + index_close(destoastidx, RowExclusiveLock); + heap_close(destoastrel, RowExclusiveLock); + return NULL; + } + + toast_huge_fetch_and_copy(srctoastrel, srctoastidx, destoastrel, destoastidx, large_toast_pointer, &chunk_seq, + &firstchunkid, rel->rd_toastoid); + + result = (struct varlena *)palloc(LARGE_TOAST_POINTER_SIZE); + SET_HUGE_TOAST_POINTER_TAG(result, VARTAG_ONDISK); + if (OidIsValid(rel->rd_toastoid)) { + large_toast_pointer.va_toastrelid = rel->rd_toastoid; + } else { + large_toast_pointer.va_toastrelid = rel->rd_rel->reltoastrelid; + } + large_toast_pointer.va_valueid = firstchunkid; + rc = + memcpy_s(VARDATA_EXTERNAL(result), LARGE_TOAST_POINTER_SIZE, &large_toast_pointer, sizeof(large_toast_pointer)); + securec_check(rc, "", ""); + + index_close(srctoastidx, AccessShareLock); + heap_close(srctoastrel, AccessShareLock); + index_close(destoastidx, RowExclusiveLock); + heap_close(destoastrel, RowExclusiveLock); + + return result; +} + +void delete_old_tuple_toast(Relation rel, Datum toast_oldvalue, int options, bool allow_update_self) +{ + if (VARATT_IS_HUGE_TOAST_POINTER(DatumGetPointer(toast_oldvalue))) { + toast_huge_delete_datum(rel, toast_oldvalue, options, allow_update_self); + } else { + toast_delete_datum(rel, toast_oldvalue, options, allow_update_self); + } +} + /* ---------- * toast_insert_or_update - * @@ -493,8 +629,10 @@ HeapTuple toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtu * If the old value is stored on disk, check if it has changed so * we have to delete it later. */ - if (att[i]->attlen == -1 && !toast_oldisnull[i] && VARATT_IS_EXTERNAL_ONDISK_B(old_value)) { - if (toast_isnull[i] || !VARATT_IS_EXTERNAL_ONDISK_B(new_value) || + if (att[i]->attlen == -1 && !toast_oldisnull[i] && + (VARATT_IS_EXTERNAL_ONDISK_B(old_value) || VARATT_IS_HUGE_TOAST_POINTER(old_value))) { + if (toast_isnull[i] || RelationIsLogicallyLogged(rel) || + !(VARATT_IS_EXTERNAL_ONDISK_B(new_value) || VARATT_IS_HUGE_TOAST_POINTER(new_value)) || VARTAG_EXTERNAL(new_value) != VARTAG_EXTERNAL(old_value) || memcmp((char *)old_value, (char *)new_value, VARSIZE_EXTERNAL(old_value)) != 0) { /* @@ -547,7 +685,7 @@ HeapTuple toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtu * PLAIN storage). If necessary, we'll push it out as a new * external value below. */ - if (VARATT_IS_EXTERNAL(new_value)) { + if (VARATT_IS_EXTERNAL(new_value) && !VARATT_IS_HUGE_TOAST_POINTER(new_value)) { toast_oldexternal[i] = new_value; if (att[i]->attstorage == 'p') { new_value = heap_tuple_untoast_attr(new_value); @@ -558,6 +696,14 @@ HeapTuple toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtu toast_free[i] = true; need_change = true; need_free = true; + } else if (VARATT_IS_HUGE_TOAST_POINTER(new_value)) { + toast_oldexternal[i] = heap_tuple_fetch_and_copy(rel, new_value, oldtup != NULL); + if (toast_oldexternal[i] != NULL) { + new_value = toast_oldexternal[i]; + toast_values[i] = PointerGetDatum(new_value); + need_change = true; + need_free = true; + } } /* @@ -924,7 +1070,7 @@ HeapTuple toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtu if (need_delold) { for (i = 0; i < num_attrs; i++) { if (toast_delold[i]) { - toast_delete_datum(rel, toast_oldvalues[i], options, allow_update_self); + delete_old_tuple_toast(rel, toast_oldvalues[i], options, allow_update_self); } } } @@ -966,6 +1112,7 @@ HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tuple_desc) struct varlena *new_value; new_value = (struct varlena *)DatumGetPointer(toast_values[i]); + checkHugeToastPointer(new_value); if (VARATT_IS_EXTERNAL(new_value)) { new_value = toast_fetch_datum(new_value); toast_values[i] = PointerGetDatum(new_value); @@ -1146,6 +1293,592 @@ Datum toast_flatten_tuple_attribute(Datum value, Oid typeId, int32 typeMod) return PointerGetDatum(new_data); } +void toast_huge_fetch_and_copy_level2(Relation srctoastrel, Relation srctoastidx, Relation destoastrel, + Relation destoastidx, varatt_external toast_pointer, Oid chunk_id) +{ + ScanKeyData toastkey; + SysScanDesc toastscan; + TupleDesc src_toast_tup_desc = srctoastrel->rd_att; + TupleDesc dest_toast_tup_desc = destoastrel->rd_att; + int32 residx; + Pointer chunk = NULL; + HeapTuple ttup; + HeapTuple toasttup; + bool isnull = false; + Datum t_values[3]; + bool t_isnull[3]; + int32 totalsize = 0; + errno_t rc; + CommandId mycid = GetCurrentCommandId(true); + struct varlena *result = NULL; + + ScanKeyInit(&toastkey, (AttrNumber)1, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(toast_pointer.va_valueid)); + toastscan = systable_beginscan_ordered(srctoastrel, srctoastidx, SnapshotToast, 1, &toastkey); + while ((ttup = systable_getnext_ordered(toastscan, ForwardScanDirection)) != NULL) { + residx = DatumGetInt32(fastgetattr(ttup, CHUNK_ID_ATTR, src_toast_tup_desc, &isnull)); + chunk = DatumGetPointer(fastgetattr(ttup, CHUNK_DATA_ATTR, src_toast_tup_desc, &isnull)); + t_values[0] = ObjectIdGetDatum(chunk_id); + t_values[1] = Int32GetDatum(residx); + if (!VARATT_IS_EXTENDED(chunk)) { + totalsize = VARSIZE(chunk); + } else if (VARATT_IS_SHORT(chunk)) { + totalsize = VARSIZE_SHORT(chunk); + } + result = (varlena *)palloc(totalsize); + rc = memcpy_s(result, totalsize, chunk, totalsize); + securec_check(rc, "\0", "\0"); + t_values[2] = PointerGetDatum(result); + t_isnull[0] = false; + t_isnull[1] = false; + t_isnull[2] = false; + toasttup = heap_form_tuple(dest_toast_tup_desc, t_values, t_isnull); + (void)heap_insert(destoastrel, toasttup, mycid, 0, NULL); + (void)index_insert(destoastidx, t_values, t_isnull, &(toasttup->t_self), destoastrel, + destoastidx->rd_index->indisunique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO); + heap_freetuple(toasttup); + pfree_ext(result); + } + systable_endscan_ordered(toastscan); +} + +void toast_huge_fetch_and_copy_level1(Relation srctoastrel, Relation srctoastidx, Relation destoastrel, + Relation destoastidx, HeapTuple ttup, Oid firstchunkid, int32 *chunk_seq, Oid realtoastOid) +{ + Oid chunk_id = InvalidOid; + int2 bucketid; + Datum t_values[3]; + bool t_isnull[3]; + errno_t rc; + bool isnull; + TupleDesc dest_toast_tup_desc = destoastrel->rd_att; + TupleDesc src_toast_tup_desc = srctoastrel->rd_att; + CommandId mycid = GetCurrentCommandId(true); + Pointer chunk = DatumGetPointer(fastgetattr(ttup, CHUNK_DATA_ATTR, src_toast_tup_desc, &isnull)); + struct varatt_external toast_pointer; + VARATT_EXTERNAL_GET_POINTER_B(toast_pointer, chunk, bucketid); + if (OidIsValid(realtoastOid)) { + chunk_id = toast_pointer.va_valueid; + } else { + chunk_id = GetNewOidWithIndex(destoastrel, RelationGetRelid(destoastidx), (AttrNumber)1); + } + toast_huge_fetch_and_copy_level2(srctoastrel, srctoastidx, destoastrel, destoastidx, toast_pointer, chunk_id); + toast_pointer.va_valueid = chunk_id; + if (OidIsValid(realtoastOid)) { + toast_pointer.va_toastrelid = realtoastOid; + } else { + toast_pointer.va_toastrelid = destoastrel->rd_id; + } + struct varlena *tmp = (struct varlena *)palloc(TOAST_POINTER_SIZE); + SET_VARTAG_EXTERNAL(tmp, VARTAG_ONDISK); + rc = memcpy_s(VARDATA_EXTERNAL(tmp), TOAST_POINTER_SIZE, &toast_pointer, sizeof(toast_pointer)); + securec_check(rc, "", ""); + + t_values[0] = ObjectIdGetDatum(firstchunkid); + t_values[1] = Int32GetDatum((*chunk_seq)++); + t_values[2] = PointerGetDatum(tmp); + t_isnull[0] = false; + t_isnull[1] = false; + t_isnull[2] = false; + HeapTuple toasttup = heap_form_tuple(dest_toast_tup_desc, t_values, t_isnull); + toasttup->t_data->t_infomask &= (~HEAP_HASEXTERNAL); + (void)heap_insert(destoastrel, toasttup, mycid, 0, NULL); + (void)index_insert(destoastidx, t_values, t_isnull, &(toasttup->t_self), destoastrel, + destoastidx->rd_index->indisunique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO); + heap_freetuple(toasttup); + pfree_ext(tmp); +} + +void toast_huge_fetch_and_copy(Relation srctoastrel, Relation srctoastidx, Relation destoastrel, + Relation destoastidx, varatt_lob_external large_toast_pointer, int32 *chunk_seq, Oid *firstchunkid, + Oid realtoastOid) +{ + ScanKeyData toastkey; + SysScanDesc toastscan; + HeapTuple ttup; + struct varatt_external toast_pointer; + Pointer chunk; + bool isnull = false; + int2 bucketid; + + if (*firstchunkid == InvalidOid) { + if (OidIsValid(realtoastOid)) { + *firstchunkid = large_toast_pointer.va_valueid; + } else { + *firstchunkid = GetNewOidWithIndex(destoastrel, RelationGetRelid(destoastidx), (AttrNumber)1); + } + } + + ScanKeyInit(&toastkey, (AttrNumber)1, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(large_toast_pointer.va_valueid)); + toastscan = systable_beginscan_ordered(srctoastrel, srctoastidx, SnapshotToast, 1, &toastkey); + while ((ttup = systable_getnext_ordered(toastscan, ForwardScanDirection)) != NULL) { + bool isSameTransAndQuery = TransactionIdIsCurrentTransactionId(HeapTupleGetRawXmin(ttup)); + if (isSameTransAndQuery) { + isSameTransAndQuery = (GetCurrentCommandId(true) == HeapTupleGetCmin(ttup)); + } + + if (isSameTransAndQuery && srctoastrel->rd_id == destoastrel->rd_id) { + TupleDesc src_toast_tup_desc = srctoastrel->rd_att; + chunk = DatumGetPointer(fastgetattr(ttup, CHUNK_DATA_ATTR, src_toast_tup_desc, &isnull)); + VARATT_EXTERNAL_GET_POINTER_B(toast_pointer, chunk, bucketid); + } + toast_huge_fetch_and_copy_level1(srctoastrel, srctoastidx, destoastrel, destoastidx, ttup, *firstchunkid, + chunk_seq, realtoastOid); + if (isSameTransAndQuery && srctoastrel->rd_id == destoastrel->rd_id) { + toast_delete_datum_internal(toast_pointer, 0, true); + simple_heap_delete(destoastrel, &ttup->t_self, 0, true); + } + } + systable_endscan_ordered(toastscan); +} + +void toast_huge_concat_hugepointers(text* t1, text* t2, Oid *firstchunkid, Oid toastOid) +{ + Relation srctoastrel; + Relation srctoastidx; + Relation destoastrel; + Relation destoastidx; + int32 firstchunkseq = 0; + struct varatt_lob_external large_toast_pointer1; + struct varatt_lob_external large_toast_pointer2; + + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer1, t1); + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer2, t2); + destoastrel = heap_open(toastOid, RowExclusiveLock); + destoastidx = index_open(destoastrel->rd_rel->reltoastidxid, RowExclusiveLock); + srctoastrel = heap_open(large_toast_pointer1.va_toastrelid, AccessShareLock); + srctoastidx = index_open(srctoastrel->rd_rel->reltoastidxid, AccessShareLock); + toast_huge_fetch_and_copy(srctoastrel, srctoastidx, destoastrel, destoastidx, large_toast_pointer1, + &firstchunkseq, firstchunkid, InvalidOid); + index_close(srctoastidx, AccessShareLock); + heap_close(srctoastrel, AccessShareLock); + + srctoastrel = heap_open(large_toast_pointer2.va_toastrelid, AccessShareLock); + srctoastidx = index_open(srctoastrel->rd_rel->reltoastidxid, AccessShareLock); + toast_huge_fetch_and_copy(srctoastrel, srctoastidx, destoastrel, destoastidx, large_toast_pointer2, + &firstchunkseq, firstchunkid, InvalidOid); + index_close(srctoastidx, AccessShareLock); + heap_close(srctoastrel, AccessShareLock); + + index_close(destoastidx, RowExclusiveLock); + heap_close(destoastrel, RowExclusiveLock); +} + +void toast_huge_concat_varlenas_internal(Relation toastrel, Relation toastidx, text *t1, text *t2, Oid *firstchunkid, + int32 *chunkseq) +{ + text *tmp = NULL; + char *ptr = NULL; + errno_t rc; + HeapTuple toasttup; + Datum values[3]; + bool isnull[3]; + struct varatt_external first_toast_pointer; + char *data = NULL; + int32 data_all; + struct { + struct varlena hdr; + char data[TOAST_MAX_CHUNK_SIZE]; /* make struct big enough */ + int32 align_it; /* ensure struct is aligned well enough */ + } chunk_data; + int32 chunk_size; + int32 chunk_seq = 0; + CommandId mycid = GetCurrentCommandId(true); + int64 len1 = VARSIZE_ANY_EXHDR(t1); + int64 len2 = VARSIZE_ANY_EXHDR(t2); + int64 len = len1 + len2; + TupleDesc toast_tup_desc = toastrel->rd_att; + bool needfree = false; + Assert(*firstchunkid == InvalidOid); + *firstchunkid = GetNewOidWithIndex(toastrel, RelationGetRelid(toastidx), (AttrNumber)1); + + while (len > 0) { + chunk_seq = 0; + int l1 = len > MAX_TOAST_CHUNK_SIZE ? MAX_TOAST_CHUNK_SIZE : len; + tmp = (struct varlena *)palloc(l1 + VARHDRSZ); + SET_VARSIZE(tmp, l1 + VARHDRSZ); + ptr = VARDATA(tmp); + if (l1 == len) { + rc = memcpy_s(ptr, l1, VARDATA_ANY(t2) + (MAX_TOAST_CHUNK_SIZE - len1), l1); + securec_check(rc, "\0", "\0"); + } else { + rc = memcpy_s(ptr, len1, VARDATA_ANY(t1), len1); + securec_check(rc, "\0", "\0"); + rc = memcpy_s(ptr + len1, l1 - len1, VARDATA_ANY(t2), l1 - len1); + securec_check(rc, "\0", "\0"); + } + len -= MAX_TOAST_CHUNK_SIZE; + Pointer dval = DatumGetPointer(toast_compress_datum(PointerGetDatum(tmp))); + if (PointerIsValid(dval) && VARATT_IS_COMPRESSED(dval)) { + data = VARDATA(dval); + data_all = VARSIZE(dval) - VARHDRSZ; + /* rawsize in a compressed datum is just the size of the payload */ + first_toast_pointer.va_rawsize = VARRAWSIZE_4B_C(dval) + VARHDRSZ; + first_toast_pointer.va_extsize = data_all; + needfree = true; + /* Assert that the numbers look like it's compressed */ + Assert(VARATT_EXTERNAL_IS_COMPRESSED(first_toast_pointer)); + } else { + data = VARDATA(tmp); + data_all = VARSIZE(tmp) - VARHDRSZ; + first_toast_pointer.va_rawsize = VARSIZE(tmp); + first_toast_pointer.va_extsize = data_all; + } + + first_toast_pointer.va_toastrelid = toastrel->rd_id; + first_toast_pointer.va_valueid = GetNewOidWithIndex(toastrel, RelationGetRelid(toastidx), (AttrNumber)1); + rc = memset_s(&chunk_data, sizeof(chunk_data), 0, sizeof(chunk_data)); + securec_check(rc, "", ""); + values[0] = ObjectIdGetDatum(first_toast_pointer.va_valueid); + values[2] = PointerGetDatum(&chunk_data); + isnull[0] = false; + isnull[1] = false; + isnull[2] = false; + while (data_all > 0) { + chunk_size = Min(TOAST_MAX_CHUNK_SIZE, (uint32)data_all); + values[1] = Int32GetDatum(chunk_seq++); + SET_VARSIZE(&chunk_data, chunk_size + VARHDRSZ); + rc = memcpy_s(VARDATA(&chunk_data), TOAST_MAX_CHUNK_SIZE, data, chunk_size); + securec_check(rc, "", ""); + toasttup = heap_form_tuple(toast_tup_desc, values, isnull); + (void)heap_insert(toastrel, toasttup, mycid, 0, NULL); + (void)index_insert(toastidx, values, isnull, &(toasttup->t_self), toastrel, + toastidx->rd_index->indisunique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO); + heap_freetuple(toasttup); + data_all -= chunk_size; + data += chunk_size; + } + + struct varlena *result = (struct varlena *)palloc(TOAST_POINTER_SIZE); + SET_VARTAG_EXTERNAL(result, VARTAG_ONDISK); + rc = memcpy_s(VARDATA_EXTERNAL(result), TOAST_POINTER_SIZE, &first_toast_pointer, sizeof(first_toast_pointer)); + securec_check(rc, "", ""); + values[0] = ObjectIdGetDatum(*firstchunkid); + values[1] = Int32GetDatum((*chunkseq)++); + values[2] = PointerGetDatum(result); + isnull[0] = false; + isnull[1] = false; + isnull[2] = false; + toasttup = heap_form_tuple(toast_tup_desc, values, isnull); + toasttup->t_data->t_infomask &= (~HEAP_HASEXTERNAL); + (void)heap_insert(toastrel, toasttup, mycid, 0, NULL); + (void)index_insert(toastidx, values, isnull, &(toasttup->t_self), toastrel, + toastidx->rd_index->indisunique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO); + heap_freetuple(toasttup); + pfree_ext(tmp); + pfree_ext(result); + if (needfree) { + pfree_ext(dval); + } + } +} + +void toast_huge_concat_varlenas(text* t1, text* t2, Oid *firstchunkid, int32 *chunkseq, Oid toastOid) +{ + Relation toastrel = heap_open(toastOid, RowExclusiveLock); + Relation toastidx = index_open(toastrel->rd_rel->reltoastidxid, RowExclusiveLock); + toast_huge_concat_varlenas_internal(toastrel, toastidx, t1, t2, firstchunkid, chunkseq); + index_close(toastidx, RowExclusiveLock); + heap_close(toastrel, RowExclusiveLock); +} + +void toast_huge_fetch_and_append_datum(Relation toastrel, Relation toastidx, text *t, Oid *firstchunkid, int32 chunkseq) +{ + TupleDesc toast_tup_desc = toastrel->rd_att; + struct varatt_external toast_pointer1; + Datum toast_values[3]; + bool toast_isnull[3]; + char *data_all = NULL; + int32 data_size; + struct { + struct varlena hdr; + char data[TOAST_MAX_CHUNK_SIZE]; /* make struct big enough */ + int32 align_it; /* ensure struct is aligned well enough */ + } chunk_data; + int32 chunk_size; + int32 chunk_seq = 0; + errno_t rc; + HeapTuple toasttup; + struct varlena *result = NULL; + CommandId mycid = GetCurrentCommandId(true); + bool needfree = false; + if (*firstchunkid == InvalidOid) { + *firstchunkid = GetNewOidWithIndex(toastrel, RelationGetRelid(toastidx), (AttrNumber)1); + } + Pointer dval = DatumGetPointer(toast_compress_datum(PointerGetDatum(t))); + if (PointerIsValid(dval) && VARATT_IS_COMPRESSED(dval)) { + data_all = VARDATA(dval); + data_size = VARSIZE(dval) - VARHDRSZ; + /* rawsize in a compressed datum is just the size of the payload */ + toast_pointer1.va_rawsize = VARRAWSIZE_4B_C(dval) + VARHDRSZ; + toast_pointer1.va_extsize = data_size; + needfree = true; + /* Assert that the numbers look like it's compressed */ + Assert(VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer1)); + } else { + dval = (Pointer)t; + data_all = VARDATA(dval); + data_size = VARSIZE(dval) - VARHDRSZ; + toast_pointer1.va_rawsize = VARSIZE(dval); + toast_pointer1.va_extsize = data_size; + } + + toast_pointer1.va_toastrelid = toastrel->rd_id; + toast_pointer1.va_valueid = GetNewOidWithIndex(toastrel, RelationGetRelid(toastidx), (AttrNumber)1); + rc = memset_s(&chunk_data, sizeof(chunk_data), 0, sizeof(chunk_data)); + securec_check(rc, "", ""); + toast_values[0] = ObjectIdGetDatum(toast_pointer1.va_valueid); + toast_values[2] = PointerGetDatum(&chunk_data); + toast_isnull[0] = false; + toast_isnull[1] = false; + toast_isnull[2] = false; + while (data_size > 0) { + chunk_size = Min(TOAST_MAX_CHUNK_SIZE, (uint32)data_size); + toast_values[1] = Int32GetDatum(chunk_seq++); + SET_VARSIZE(&chunk_data, chunk_size + VARHDRSZ); + rc = memcpy_s(VARDATA(&chunk_data), TOAST_MAX_CHUNK_SIZE, data_all, chunk_size); + securec_check(rc, "", ""); + toasttup = heap_form_tuple(toast_tup_desc, toast_values, toast_isnull); + (void)heap_insert(toastrel, toasttup, mycid, 0, NULL); + (void)index_insert(toastidx, toast_values, toast_isnull, &(toasttup->t_self), toastrel, + toastidx->rd_index->indisunique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO); + heap_freetuple(toasttup); + data_size -= chunk_size; + data_all += chunk_size; + } + + result = (struct varlena *)palloc(TOAST_POINTER_SIZE); + SET_VARTAG_EXTERNAL(result, VARTAG_ONDISK); + rc = memcpy_s(VARDATA_EXTERNAL(result), TOAST_POINTER_SIZE, &toast_pointer1, sizeof(toast_pointer1)); + securec_check(rc, "", ""); + toast_values[0] = ObjectIdGetDatum(*firstchunkid); + toast_values[1] = Int32GetDatum(chunkseq); + toast_values[2] = PointerGetDatum(result); + toast_isnull[0] = false; + toast_isnull[1] = false; + toast_isnull[2] = false; + toasttup = heap_form_tuple(toast_tup_desc, toast_values, toast_isnull); + toasttup->t_data->t_infomask &= (~HEAP_HASEXTERNAL); + (void)heap_insert(toastrel, toasttup, mycid, 0, NULL); + (void)index_insert(toastidx, toast_values, toast_isnull, &(toasttup->t_self), toastrel, + toastidx->rd_index->indisunique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO); + heap_freetuple(toasttup); + pfree_ext(result); + if (needfree) { + pfree_ext(dval); + } +} + +void toast_huge_concat_varlena_after(text *t1, text *t2, Oid *firstchunkid, Oid toastOid) +{ + ScanKeyData toastkey; + SysScanDesc toastscan; + HeapTuple ttup; + Pointer chunk = NULL; + int32 residx; + bool isnull; + errno_t rc; + char *ptr = NULL; + struct varatt_lob_external large_toast_pointer; + struct varatt_external toast_pointer; + struct varlena *result = NULL; + struct varlena *tmp = NULL; + bool newChunk = false; + int2 bucketid; + bool islast = true; + + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, t1); + + Relation srctoastrel = heap_open(large_toast_pointer.va_toastrelid, AccessShareLock); + Relation srctoastidx = index_open(srctoastrel->rd_rel->reltoastidxid, AccessShareLock); + Relation destoastrel = heap_open(toastOid, RowExclusiveLock); + Relation destoastidx = index_open(destoastrel->rd_rel->reltoastidxid, RowExclusiveLock); + TupleDesc src_toast_tup_desc = srctoastrel->rd_att; + + ScanKeyInit(&toastkey, (AttrNumber)1, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(large_toast_pointer.va_valueid)); + toastscan = systable_beginscan_ordered(srctoastrel, srctoastidx, SnapshotToast, 1, &toastkey); + while ((ttup = systable_getnext_ordered(toastscan, BackwardScanDirection)) != NULL) { + bool isSameTransAndQuery = TransactionIdIsCurrentTransactionId(HeapTupleGetRawXmin(ttup)); + if (isSameTransAndQuery && large_toast_pointer.va_toastrelid == toastOid) { + isSameTransAndQuery = (GetCurrentCommandId(true) == HeapTupleGetCmin(ttup)); + } + residx = DatumGetInt32(fastgetattr(ttup, CHUNK_ID_ATTR, src_toast_tup_desc, &isnull)); + chunk = DatumGetPointer(fastgetattr(ttup, CHUNK_DATA_ATTR, src_toast_tup_desc, &isnull)); + VARATT_EXTERNAL_GET_POINTER_B(toast_pointer, chunk, bucketid); + if (islast) { + if (toast_pointer.va_rawsize == MAX_TOAST_CHUNK_SIZE + VARHDRSZ) { + newChunk = true; + } else { + result = heap_tuple_untoast_attr((varlena *)chunk); + Assert(VARSIZE_ANY_EXHDR(result) <= MAX_TOAST_CHUNK_SIZE); + } + int64 len1 = VARSIZE_ANY_EXHDR(result); + int64 len2 = VARSIZE_ANY_EXHDR(t2); + if (!newChunk && len1 + len2 > MAX_TOAST_CHUNK_SIZE) { + toast_huge_concat_varlenas_internal(destoastrel, destoastidx, result, t2, firstchunkid, &residx); + } else if (newChunk) { + toast_huge_fetch_and_append_datum(destoastrel, destoastidx, t2, firstchunkid, residx + 1); + toast_huge_fetch_and_copy_level1(srctoastrel, srctoastidx, destoastrel, destoastidx, ttup, + *firstchunkid, &residx, InvalidOid); + } else { + tmp = (varlena *)palloc(len1 + len2 + VARHDRSZ); + SET_VARSIZE(tmp, len1 + len2 + VARHDRSZ); + ptr = VARDATA(tmp); + rc = memcpy_s(ptr, len1, VARDATA_ANY(result), len1); + securec_check(rc, "\0", "\0"); + rc = memcpy_s(ptr + len1, len2, VARDATA_ANY(t2), len2); + securec_check(rc, "\0", "\0"); + toast_huge_fetch_and_append_datum(destoastrel, destoastidx, tmp, firstchunkid, residx); + pfree_ext(tmp); + } + islast = false; + pfree_ext(result); + if (isSameTransAndQuery && large_toast_pointer.va_toastrelid == toastOid) { + toast_delete_datum_internal(toast_pointer, 0, true); + simple_heap_delete(destoastrel, &ttup->t_self, 0, true); + } + continue; + } + toast_huge_fetch_and_copy_level1(srctoastrel, srctoastidx, destoastrel, destoastidx, ttup, *firstchunkid, + &residx, InvalidOid); + if (isSameTransAndQuery && large_toast_pointer.va_toastrelid == toastOid) { + toast_delete_datum_internal(toast_pointer, 0, true); + simple_heap_delete(destoastrel, &ttup->t_self, 0, true); + } + } + systable_endscan_ordered(toastscan); + + index_close(srctoastidx, AccessShareLock); + heap_close(srctoastrel, AccessShareLock); + index_close(destoastidx, RowExclusiveLock); + heap_close(destoastrel, RowExclusiveLock); +} + +void toast_huge_concat_varlena_before(text *t1, text *t2, Oid *firstchunkid, Oid toastOid) +{ + ScanKeyData toastkey; + SysScanDesc toastscan; + HeapTuple ttup; + int32 residx; + bool isnull = false; + struct varatt_lob_external large_toast_pointer; + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, t2); + + Relation srctoastrel = heap_open(large_toast_pointer.va_toastrelid, AccessShareLock); + Relation srctoastidx = index_open(srctoastrel->rd_rel->reltoastidxid, AccessShareLock); + Relation destoastrel = heap_open(toastOid, RowExclusiveLock); + Relation destoastidx = index_open(destoastrel->rd_rel->reltoastidxid, RowExclusiveLock); + TupleDesc src_toast_tup_desc = srctoastrel->rd_att; + + toast_huge_fetch_and_append_datum(destoastrel, destoastidx, t1, firstchunkid, 0); + ScanKeyInit(&toastkey, (AttrNumber)1, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(large_toast_pointer.va_valueid)); + toastscan = systable_beginscan_ordered(srctoastrel, srctoastidx, SnapshotToast, 1, &toastkey); + while ((ttup = systable_getnext_ordered(toastscan, BackwardScanDirection)) != NULL) { + residx = DatumGetInt32(fastgetattr(ttup, CHUNK_ID_ATTR, src_toast_tup_desc, &isnull)); + residx += 1; + toast_huge_fetch_and_copy_level1(srctoastrel, srctoastidx, destoastrel, destoastidx, ttup, *firstchunkid, + &residx, InvalidOid); + } + systable_endscan_ordered(toastscan); + + index_close(srctoastidx, AccessShareLock); + heap_close(srctoastrel, AccessShareLock); + index_close(destoastidx, RowExclusiveLock); + heap_close(destoastrel, RowExclusiveLock); +} + +Oid get_toast_oid() +{ + Oid toastOid = InvalidOid; + if (OidIsValid(t_thrd.xact_cxt.ActiveLobRelid)) { + Relation rel = heap_open(t_thrd.xact_cxt.ActiveLobRelid, AccessShareLock); + if (RelationIsUstoreFormat(rel)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Un-support clob/blob type more than 1GB of Ustore"))); + } + toastOid = rel->rd_rel->reltoastrelid; + heap_close(rel, AccessShareLock); + } + if (!OidIsValid(toastOid)) { + create_toast_by_sid(&toastOid); + } + + return toastOid; +} + +static text* text_catenate_convert_vartype(text *t) +{ + text *result = t; + if (!VARATT_IS_HUGE_TOAST_POINTER(t) && VARATT_IS_1B(t)) { + int len = VARSIZE_ANY_EXHDR(t); + result = (text*)palloc(len + VARHDRSZ); + SET_VARSIZE(result, len); + errno_t rc = memcpy_s(VARDATA(result), len, VARDATA_ANY(t), len); + securec_check(rc, "\0", "\0"); + } + return result; +} + +text *text_catenate_huge(text *t1, text *t2, Oid toastOid) +{ + text *result = NULL; + struct varatt_lob_external large_toast_pointer; + Oid firstchunkid = InvalidOid; + int32 chunkseq = 0; + int64 len1 = VARSIZE_ANY_EXHDR(t1); + int64 len2 = VARSIZE_ANY_EXHDR(t2); + errno_t rc; + + t1 = text_catenate_convert_vartype(t1); + t2 = text_catenate_convert_vartype(t2); + + if (VARATT_IS_HUGE_TOAST_POINTER(t1)) { + struct varatt_lob_external large_toast_pointer; + + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, t1); + len1 = large_toast_pointer.va_rawsize; + if (VARATT_IS_HUGE_TOAST_POINTER(t2)) { + struct varatt_lob_external large_toast_pointer; + + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, t2); + len2 = large_toast_pointer.va_rawsize; + toast_huge_concat_hugepointers(t1, t2, &firstchunkid, toastOid); + } else if (VARATT_IS_4B(t2)) { + toast_huge_concat_varlena_after(t1, t2, &firstchunkid, toastOid); + } else { + Assert(0); + } + } else if (VARATT_IS_4B(t1)) { + if (VARATT_IS_HUGE_TOAST_POINTER(t2)) { + struct varatt_lob_external large_toast_pointer; + + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, t2); + len2 = large_toast_pointer.va_rawsize; + toast_huge_concat_varlena_before(t1, t2, &firstchunkid, toastOid); + } else if (VARATT_IS_4B(t2)) { + toast_huge_concat_varlenas(t1, t2, &firstchunkid, &chunkseq, toastOid); + } else { + Assert(0); + } + } else { + Assert(0); + } + + int64 len = len1 + len2; + result = (struct varlena *)palloc(LARGE_TOAST_POINTER_SIZE); + SET_HUGE_TOAST_POINTER_TAG(result, VARTAG_ONDISK); + large_toast_pointer.va_rawsize = len; + large_toast_pointer.va_toastrelid = toastOid; + large_toast_pointer.va_valueid = firstchunkid; + rc = + memcpy_s(VARDATA_EXTERNAL(result), LARGE_TOAST_POINTER_SIZE, &large_toast_pointer, sizeof(large_toast_pointer)); + securec_check(rc, "", ""); + + return result; +} + /* ---------- * toast_compress_datum - * @@ -1379,7 +2112,7 @@ Datum toast_save_datum(Relation rel, Datum value, struct varlena *oldexternal, i securec_check(rc, "", ""); toasttup = heap_form_tuple(toast_tup_desc, t_values, t_isnull); - (void)heap_insert(toastrel, toasttup, mycid, options, NULL); + (void)heap_insert(toastrel, toasttup, mycid, options, NULL, true); /* * Create the index entry. We cheat a little here by not using @@ -1438,33 +2171,14 @@ Datum toast_save_datum(Relation rel, Datum value, struct varlena *oldexternal, i return PointerGetDatum(result); } -/* ---------- - * toast_delete_datum - - * - * Delete a single external stored value. - * ---------- - */ -void toast_delete_datum(Relation rel, Datum value, int options, bool allow_update_self) +void toast_delete_datum_internal(varatt_external toast_pointer, int options, bool allow_update_self, int2 bucketid) { - struct varlena *attr = (struct varlena *)DatumGetPointer(value); - struct varatt_external toast_pointer; Relation toastrel; Relation toastidx; ScanKeyData toastkey; SysScanDesc toastscan; HeapTuple toasttup; - int2 bucketid; - if (!VARATT_IS_EXTERNAL_ONDISK_B(attr)) - return; - - /* Must copy to access aligned fields */ - VARATT_EXTERNAL_GET_POINTER_B(toast_pointer, attr, bucketid); - - /* - * Open the toast relation and its index - */ - Assert(bucketid == InvalidBktId || (bucketid == rel->rd_node.bucketNode)); toastrel = heap_open(toast_pointer.va_toastrelid, RowExclusiveLock, bucketid); toastidx = index_open(toastrel->rd_rel->reltoastidxid, RowExclusiveLock, bucketid); @@ -1497,6 +2211,63 @@ void toast_delete_datum(Relation rel, Datum value, int options, bool allow_updat heap_close(toastrel, RowExclusiveLock); } +/* ---------- + * toast_delete_datum - + * + * Delete a single external stored value. + * ---------- + */ +void toast_delete_datum(Relation rel, Datum value, int options, bool allow_update_self) +{ + struct varlena *attr = (struct varlena *)DatumGetPointer(value); + struct varatt_external toast_pointer; + int2 bucketid; + if (!VARATT_IS_EXTERNAL_ONDISK_B(attr)) + return; + /* Must copy to access aligned fields */ + VARATT_EXTERNAL_GET_POINTER_B(toast_pointer, attr, bucketid); + + /* + * Open the toast relation and its index + */ + Assert(bucketid == InvalidBktId || (bucketid == rel->rd_node.bucketNode)); + toast_delete_datum_internal(toast_pointer, options, allow_update_self, bucketid); +} + +void toast_huge_delete_datum(Relation rel, Datum value, int options, bool allow_update_self) +{ + struct varlena *attr = (struct varlena *)DatumGetPointer(value); + ScanKeyData toastkey; + SysScanDesc toastscan; + HeapTuple ttup; + + Pointer chunk = NULL; + bool isnull = false; + struct varatt_external toast_pointer; + int2 bucketid; + struct varatt_lob_external large_toast_pointer; + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, attr); + + Relation toastrel = heap_open(large_toast_pointer.va_toastrelid, RowExclusiveLock); + TupleDesc toast_tup_desc = toastrel->rd_att; + Relation toastidx = index_open(toastrel->rd_rel->reltoastidxid, RowExclusiveLock); + ScanKeyInit(&toastkey, (AttrNumber)1, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(large_toast_pointer.va_valueid)); + toastscan = systable_beginscan_ordered(toastrel, toastidx, SnapshotToast, 1, &toastkey); + while ((ttup = systable_getnext_ordered(toastscan, ForwardScanDirection)) != NULL) { + chunk = DatumGetPointer(fastgetattr(ttup, CHUNK_DATA_ATTR, toast_tup_desc, &isnull)); + VARATT_EXTERNAL_GET_POINTER_B(toast_pointer, chunk, bucketid); + toast_delete_datum(rel, PointerGetDatum(chunk), options, allow_update_self); + simple_heap_delete(toastrel, &ttup->t_self, options, allow_update_self); + + if (u_sess->attr.attr_storage.enable_debug_vacuum) + elogVacuumInfo(toastrel, ttup, "toast_delete_datum", u_sess->cmd_cxt.OldestXmin); + } + systable_endscan_ordered(toastscan); + index_close(toastidx, RowExclusiveLock); + heap_close(toastrel, RowExclusiveLock); +} + /* ---------- * toastrel_valueid_exists - * @@ -1689,7 +2460,7 @@ struct varlena* heap_internal_toast_fetch_datum(struct varatt_external toast_poi } struct varlena* HeapInternalToastFetchDatumSlice(struct varatt_external toastPointer, - Relation toastrel, Relation toastidx, int32 sliceoffset, int32 length) + Relation toastrel, Relation toastidx, int64 sliceoffset, int32 length) { int32 attrsize; int32 residx; @@ -1936,7 +2707,7 @@ static struct varlena* toast_fetch_datum(struct varlena* attr) * in the toast relation * ---------- */ -static struct varlena* toast_fetch_datum_slice(struct varlena* attr, int32 sliceoffset, int32 length) +static struct varlena* toast_fetch_datum_slice(struct varlena* attr, int64 sliceoffset, int32 length) { Relation toastrel; Relation toastidx; @@ -1962,3 +2733,170 @@ static struct varlena* toast_fetch_datum_slice(struct varlena* attr, int32 slice return result; } +static struct varlena *toast_huge_fetch_datum_slice(struct varlena *attr, int64 sliceoffset, int32 length) +{ + ScanKeyData toastkey; + SysScanDesc toastscan; + HeapTuple ttup; + errno_t rc; + struct varlena *result = NULL; + struct varlena *first_chunk = NULL; + int32 curlength; + int32 totallength = 0; + Pointer chunk; + bool isnull; + struct varatt_lob_external large_toast_pointer; + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, attr); + + result = (struct varlena *)palloc(length + VARHDRSZ); + SET_VARSIZE(result, length + VARHDRSZ); + + Relation toastrel = heap_open(large_toast_pointer.va_toastrelid, AccessShareLock); + TupleDesc toast_tup_desc = toastrel->rd_att; + Relation toastidx = index_open(toastrel->rd_rel->reltoastidxid, AccessShareLock); + ScanKeyInit(&toastkey, (AttrNumber)1, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(large_toast_pointer.va_valueid)); + toastscan = systable_beginscan_ordered(toastrel, toastidx, SnapshotToast, 1, &toastkey); + while ((ttup = systable_getnext_ordered(toastscan, ForwardScanDirection)) != NULL) { + chunk = DatumGetPointer(fastgetattr(ttup, CHUNK_DATA_ATTR, toast_tup_desc, &isnull)); + struct varatt_external toast_pointer; + VARATT_EXTERNAL_GET_POINTER(toast_pointer, chunk); + + if (sliceoffset > toast_pointer.va_rawsize - VARHDRSZ) { + sliceoffset -= (toast_pointer.va_rawsize - VARHDRSZ); + continue; + } else { + if (length < (toast_pointer.va_rawsize - sliceoffset + 1)) { + curlength = length; + } else { + curlength = toast_pointer.va_rawsize - sliceoffset + 1; + } + first_chunk = heap_tuple_untoast_attr_slice((varlena *)chunk, sliceoffset, curlength); + rc = memcpy_s(VARDATA(result) + totallength, length, VARDATA(first_chunk), curlength); + securec_check(rc, "", ""); + length -= curlength; + totallength += curlength; + sliceoffset = 1; + } + if (length == 0) { + break; + } + } + systable_endscan_ordered(toastscan); + + index_close(toastrel, AccessShareLock); + heap_close(toastidx, AccessShareLock); + + return result; +} + +struct varlena *toast_huge_write_datum_slice(struct varlena *attr1, struct varlena *attr2, int64 sliceoffset, + int32 length) +{ + ScanKeyData toastkey; + HeapTuple ttup; + errno_t rc; + struct varlena *first_chunk = NULL; + int32 curlength; + int32 totallength = 0; + int32 residx; + Pointer chunk; + bool isnull; + Assert(length >= 1 && length <= MAX_TOAST_CHUNK_SIZE); + struct varatt_lob_external large_toast_pointer; + + VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, attr1); + + if (sliceoffset < 1 || sliceoffset > large_toast_pointer.va_rawsize) { + ereport(ERROR, (errcode(ERRCODE_UNEXPECTED_CHUNK_VALUE), errmsg("offset(%lu) is invalid.", sliceoffset))); + } + + Relation toastrel = heap_open(large_toast_pointer.va_toastrelid, RowExclusiveLock); + TupleDesc toast_tup_desc = toastrel->rd_att; + Relation toastidx = index_open(toastrel->rd_rel->reltoastidxid, RowExclusiveLock); + Oid firstchunkid = GetNewOidWithIndex(toastrel, RelationGetRelid(toastidx), (AttrNumber)1); + ScanKeyInit(&toastkey, (AttrNumber)1, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(large_toast_pointer.va_valueid)); + SysScanDesc toastscan = systable_beginscan_ordered(toastrel, toastidx, SnapshotToast, 1, &toastkey); + while ((ttup = systable_getnext_ordered(toastscan, ForwardScanDirection)) != NULL) { + residx = DatumGetInt32(fastgetattr(ttup, CHUNK_ID_ATTR, toast_tup_desc, &isnull)); + chunk = DatumGetPointer(fastgetattr(ttup, CHUNK_DATA_ATTR, toast_tup_desc, &isnull)); + struct varatt_external toast_pointer; + VARATT_EXTERNAL_GET_POINTER(toast_pointer, chunk); + + if (sliceoffset > toast_pointer.va_rawsize - VARHDRSZ) { + toast_huge_fetch_and_copy_level1(toastrel, toastidx, toastrel, toastidx, ttup, firstchunkid, &residx, + InvalidOid); + sliceoffset -= (toast_pointer.va_rawsize - VARHDRSZ); + continue; + } + if (length == 0) { + toast_huge_fetch_and_copy_level1(toastrel, toastidx, toastrel, toastidx, ttup, firstchunkid, &residx, + InvalidOid); + continue; + } + first_chunk = heap_tuple_untoast_attr((varlena *)chunk); + if (length < (toast_pointer.va_rawsize - VARHDRSZ - sliceoffset + 1)) { + curlength = length; + } else { + curlength = toast_pointer.va_rawsize - VARHDRSZ - sliceoffset + 1; + } + rc = memcpy_s(VARDATA(first_chunk) + sliceoffset - 1, curlength, VARDATA(attr2) + totallength, curlength); + securec_check(rc, "", ""); + length -= curlength; + totallength += curlength; + sliceoffset = 1; + toast_huge_fetch_and_append_datum(toastrel, toastidx, first_chunk, &firstchunkid, residx); + } + systable_endscan_ordered(toastscan); + index_close(toastrel, RowExclusiveLock); + heap_close(toastidx, RowExclusiveLock); + + struct varlena *result = (struct varlena *)palloc(LARGE_TOAST_POINTER_SIZE); + SET_HUGE_TOAST_POINTER_TAG(result, VARTAG_ONDISK); + large_toast_pointer.va_valueid = firstchunkid; + rc = + memcpy_s(VARDATA_EXTERNAL(result), LARGE_TOAST_POINTER_SIZE, &large_toast_pointer, sizeof(large_toast_pointer)); + securec_check(rc, "", ""); + + return result; +} + +void checkHugeToastPointer(struct varlena *value) +{ + if (VARATT_IS_HUGE_TOAST_POINTER(value)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("not support feature for more than 1G clob/blob"))); + } +} + +struct varlena * toast_pointer_fetch_data(TupleTableSlot* varSlot, Form_pg_attribute attr, int varNumber) +{ + MemoryContext oldcontext; + oldcontext = MemoryContextSwitchTo(varSlot->tts_mcxt); + + struct varlena *toast_pointer_lob = (struct varlena *)palloc(LOB_POINTER_SIZE); + HeapTuple tuple = (HeapTuple)(varSlot->tts_tuple); + Assert(toast_pointer_lob != NULL); + if (tuple != NULL) { + struct varatt_lob_pointer lob_pointer; + SET_VARTAG_1B_E(toast_pointer_lob, VARTAG_LOB); + + lob_pointer.relid = tuple->t_tableOid; + lob_pointer.columid = attr->attnum; + lob_pointer.bucketid = tuple->t_bucketId; + lob_pointer.bi_hi = tuple->t_self.ip_blkid.bi_hi; + lob_pointer.bi_lo = tuple->t_self.ip_blkid.bi_lo; + lob_pointer.ip_posid = tuple->t_self.ip_posid; + + errno_t rc = memcpy_s(VARDATA_EXTERNAL(toast_pointer_lob), LOB_POINTER_SIZE, &lob_pointer, + sizeof(varatt_lob_pointer)); + securec_check(rc, "", ""); + } else { + toast_pointer_lob = (varlena *)DatumGetPointer(varSlot->tts_lobPointers[varNumber]); + } + MemoryContextSwitchTo(oldcontext); + + return toast_pointer_lob; +} + diff --git a/src/gausskernel/storage/access/index/genam.cpp b/src/gausskernel/storage/access/index/genam.cpp index ed66c7cbe..b1dd6e881 100644 --- a/src/gausskernel/storage/access/index/genam.cpp +++ b/src/gausskernel/storage/access/index/genam.cpp @@ -590,14 +590,36 @@ static void GPILookupFakeRelCache(GPIScanDesc gpiScan, PartRelIdCacheKey fakeRel FakeRelationIdCacheLookup(fakeRels, fakeRelKey, gpiScan->fakePartRelation, gpiScan->partition); } +static void GPIInsertFakeParentRelCacheForSubpartition(GPIScanDesc gpiScan, MemoryContext cxt, LOCKMODE lmode) +{ + HTAB* fakeRels = gpiScan->fakeRelationTable; + Relation parentRel = gpiScan->parentRelation; + Oid parentPartOid = partid_get_parentid(gpiScan->currPartOid); + if (parentPartOid != parentRel->rd_id) { + PartRelIdCacheKey fakeRelKey = {parentPartOid, InvalidBktId}; + Partition parentPartition = NULL; + FakeRelationIdCacheLookup(fakeRels, fakeRelKey, parentRel, parentPartition); + if (!RelationIsValid(parentRel)) { + /* add current parentRel into fakeRelationTable */ + Oid baseRelOid = partid_get_parentid(parentPartOid); + Relation baseRel = relation_open(baseRelOid, lmode); + searchFakeReationForPartitionOid(fakeRels, cxt, baseRel, parentPartOid, parentRel, parentPartition, lmode); + relation_close(baseRel, NoLock); + } + gpiScan->parentRelation = parentRel; + } +} + /* Lookup partition information from hash table use key for global partition index scan */ static void GPIInsertFakeRelCache(GPIScanDesc gpiScan, MemoryContext cxt, LOCKMODE lmode) { Oid currPartOid = gpiScan->currPartOid; - Relation parentRel = gpiScan->parentRelation; HTAB* fakeRels = gpiScan->fakeRelationTable; Partition partition = NULL; + GPIInsertFakeParentRelCacheForSubpartition(gpiScan, cxt, lmode); + + Relation parentRel = gpiScan->parentRelation; /* Save search fake relation in gpiScan->fakeRelation */ searchFakeReationForPartitionOid( fakeRels, cxt, parentRel, currPartOid, gpiScan->fakePartRelation, partition, lmode); @@ -620,7 +642,8 @@ void GPIScanInit(GPIScanDesc* gpiScan) gpiInfo->currPartOid = InvalidOid; gpiInfo->fakePartRelation = NULL; - gpiInfo->invisiblePartTree = CreateOidRBTree(); + gpiInfo->invisiblePartTree = NULL; + gpiInfo->invisiblePartTreeForVacuum = CreateOidRBTree(); gpiInfo->parentRelation = NULL; gpiInfo->fakeRelationTable = NULL; gpiInfo->partition = NULL; @@ -640,6 +663,7 @@ void GPIScanEnd(GPIScanDesc gpiScan) } DestroyOidRBTree(&gpiScan->invisiblePartTree); + DestroyOidRBTree(&gpiScan->invisiblePartTreeForVacuum); pfree_ext(gpiScan); } @@ -680,6 +704,10 @@ bool GPIGetNextPartRelation(GPIScanDesc gpiScan, MemoryContext cxt, LOCKMODE lmo GPIInitFakeRelTable(gpiScan, cxt); } + if (!PointerIsValid(gpiScan->invisiblePartTree)) { + gpiScan->invisiblePartTree = CreateOidRBTree(); + } + /* First check invisible partition oid's bitmapset */ if (OidRBTreeMemberOid(gpiScan->invisiblePartTree, gpiScan->currPartOid)) { gpiScan->fakePartRelation = NULL; diff --git a/src/gausskernel/storage/access/index/indexam.cpp b/src/gausskernel/storage/access/index/indexam.cpp index f0da4de67..56ae3dcb8 100644 --- a/src/gausskernel/storage/access/index/indexam.cpp +++ b/src/gausskernel/storage/access/index/indexam.cpp @@ -677,8 +677,10 @@ Tuple index_getnext(IndexScanDesc scan, ScanDirection direction) for (;;) { /* IO collector and IO scheduler */ +#ifdef ENABLE_MULTIPLE_NODES if (ENABLE_WORKLOAD_CONTROL) IOSchedulerAndUpdate(IO_TYPE_READ, 1, IO_TYPE_ROW); +#endif if (likely(!scan->xs_continue_hot)) { /* Time to fetch the next TID from the index */ tid = index_getnext_tid(scan, direction); @@ -742,11 +744,15 @@ bool UHeapSysIndexGetnextSlot(SysScanDesc scan, ScanDirection direction, TupleTa bool IndexGetnextSlot(IndexScanDesc scan, ScanDirection direction, TupleTableSlot *slot) { ItemPointer tid; + TupleTableSlot* tmpslot = NULL; + tmpslot = MakeSingleTupleTableSlot(RelationGetDescr(scan->heapRelation), + false, scan->heapRelation->rd_tam_type); for (;;) { /* IO collector and IO scheduler */ +#ifdef ENABLE_MULTIPLE_NODES if (ENABLE_WORKLOAD_CONTROL) IOSchedulerAndUpdate(IO_TYPE_READ, 1, IO_TYPE_ROW); - +#endif if (likely(!scan->xs_continue_hot)) { /* Time to fetch the next TID from the index */ @@ -782,13 +788,17 @@ bool IndexGetnextSlot(IndexScanDesc scan, ScanDirection direction, TupleTableSlo if (IndexFetchUHeap(scan, slot)) { /* recheck IndexTuple when necessary */ - if (scan->xs_recheck_itup && !RecheckIndexTuple(scan, slot)) { - continue; + if (scan->xs_recheck_itup) { + if (!IndexFetchUHeap(scan, tmpslot)) + ereport(PANIC, (errmsg("Failed to refetch UHeapTuple. This shouldn't happen."))); + if (!RecheckIndexTuple(scan, tmpslot)) + continue; } + ExecDropSingleTupleTableSlot(tmpslot); return true; } } - + ExecDropSingleTupleTableSlot(tmpslot); return false; /* failure exit */ } diff --git a/src/gausskernel/storage/access/nbtree/README b/src/gausskernel/storage/access/nbtree/README index bb5a1c3ce..35f172b9b 100644 --- a/src/gausskernel/storage/access/nbtree/README +++ b/src/gausskernel/storage/access/nbtree/README @@ -324,6 +324,25 @@ positives, so long as it never gives a false negative. This makes it possible to implement the test with a small counter value stored on each index page. +Fastpath For Index Insertion +---------------------------- + +We optimize for a common case of insertion of increasing index key +values by caching the last page to which this backend inserted the last +value, if this page was the rightmost leaf page. For the next insert, we +can then quickly check if the cached page is still the rightmost leaf +page and also the correct place to hold the current value. We can avoid +the cost of walking down the tree in such common cases. + +The optimization works on the assumption that there can only be one +non-ignorable leaf rightmost page, and so even a RecentGlobalXmin style +interlock isn't required. We cannot fail to detect that our hint was +invalidated, because there can only be one such page in the B-Tree at +any time. It's possible that the page will be deleted and recycled +without a backend's cached page also being detected as invalidated, but +only when we happen to recycle a block that once again gets recycled as the +rightmost leaf page. + On-the-Fly Deletion Of Index Tuples ----------------------------------- diff --git a/src/gausskernel/storage/access/nbtree/nbtinsert.cpp b/src/gausskernel/storage/access/nbtree/nbtinsert.cpp index f64fe0e93..65088796e 100644 --- a/src/gausskernel/storage/access/nbtree/nbtinsert.cpp +++ b/src/gausskernel/storage/access/nbtree/nbtinsert.cpp @@ -44,6 +44,9 @@ } \ } while (0) +/* Minimum tree height for application of fastpath optimization */ +#define BTREE_FASTPATH_MIN_LEVEL 2 + typedef struct { /* context data for _bt_checksplitloc */ Size newitemsz; /* size of new item to be inserted */ @@ -79,6 +82,7 @@ static bool _bt_isequal(Relation idxrel, Page page, OffsetNumber offnum, int key static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel); static bool CheckItemIsAlive(ItemPointer tid, Relation relation, Snapshot snapshot, bool* all_dead, CUDescScan* cudescScan); +static bool CheckPartitionIsInvisible(GPIScanDesc gpiScan); /* * _bt_doinsert() -- Handle insertion of a single index tuple in the tree. @@ -103,10 +107,13 @@ bool _bt_doinsert(Relation rel, IndexTuple itup, IndexUniqueCheck checkUnique, R bool is_unique = false; int indnkeyatts; ScanKey itup_scankey; - BTStack stack; + BTStack stack = NULL; Buffer buf; OffsetNumber offset; Oid indexHeapRelOid = InvalidOid; + Relation indexHeapRel = NULL; + Partition part = NULL; + Relation partRel = NULL; GPIScanDesc gpiScan = NULL; CBIScanDesc cbiScan = NULL; @@ -115,12 +122,13 @@ bool _bt_doinsert(Relation rel, IndexTuple itup, IndexUniqueCheck checkUnique, R if (RelationIsGlobalIndex(rel)) { GPIScanInit(&gpiScan); indexHeapRelOid = IndexGetRelation(rel->rd_id, false); - if (indexHeapRelOid != heapRel->parentId) { - Relation indexHeapRel = relation_open(indexHeapRelOid, AccessShareLock); - Partition part = partitionOpen(indexHeapRel, heapRel->parentId, AccessShareLock); - gpiScan->parentRelation = partitionGetRelation(indexHeapRel, part); + if (indexHeapRelOid == heapRel->grandparentId) { // For subpartitiontable + indexHeapRel = relation_open(indexHeapRelOid, AccessShareLock); + Assert(RelationIsSubPartitioned(indexHeapRel)); + part = partitionOpen(indexHeapRel, heapRel->parentId, AccessShareLock); + partRel = partitionGetRelation(indexHeapRel, part); + gpiScan->parentRelation = partRel; partitionClose(indexHeapRel, part, AccessShareLock); - relation_close(indexHeapRel, AccessShareLock); } else { gpiScan->parentRelation = relation_open(heapRel->parentId, AccessShareLock); } @@ -144,7 +152,8 @@ bool _bt_doinsert(Relation rel, IndexTuple itup, IndexUniqueCheck checkUnique, R offset = element.offset; indnkeyatts = element.indnkeyatts; - if (checkUnique != UNIQUE_CHECK_EXISTING) { + /* skip insertion when we just want to check existing or find a conflict when executing upsert */ + if (checkUnique != UNIQUE_CHECK_EXISTING && !(checkUnique == UNIQUE_CHECK_UPSERT && !is_unique)) { /* * The only conflict predicate locking cares about for indexes is when * an index tuple insert conflicts with an existing lock. Since the @@ -163,12 +172,15 @@ bool _bt_doinsert(Relation rel, IndexTuple itup, IndexUniqueCheck checkUnique, R _bt_relbuf(rel, buf); } /* be tidy */ - _bt_freestack(stack); + if (stack) { + _bt_freestack(stack); + } _bt_freeskey(itup_scankey); if (gpiScan != NULL) { // means rel switch happened - if (indexHeapRelOid != heapRel->parentId) { - releaseDummyRelation(&(gpiScan->parentRelation)); + if (indexHeapRelOid == heapRel->grandparentId) { // For subpartitiontable + releaseDummyRelation(&partRel); + relation_close(indexHeapRel, AccessShareLock); } else { relation_close(gpiScan->parentRelation, AccessShareLock); } @@ -194,34 +206,120 @@ bool SearchBufferAndCheckUnique(Relation rel, IndexTuple itup, IndexUniqueCheck bool is_unique = false; int indnkeyatts; ScanKey itup_scankey; - BTStack stack; + BTStack stack = NULL; Buffer buf; OffsetNumber offset; + bool fastpath = false; indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); Assert(indnkeyatts != 0); /* we need an insertion scan key to do our search, so build one */ itup_scankey = _bt_mkscankey(rel, itup); + /* + * It's very common to have an index on an auto-incremented or + * monotonically increasing value. In such cases, every insertion happens + * towards the end of the index. We try to optimise that case by caching + * the right-most leaf of the index. If our cached block is still the + * rightmost leaf, has enough free space to accommodate a new entry and + * the insertion key is strictly greater than the first key in this page, + * then we can safely conclude that the new key will be inserted in the + * cached block. So we simply search within the cached block and insert the + * key at the appropriate location. We call it a fastpath. + * + * Testing has revealed, though, that the fastpath can result in increased + * contention on the exclusive-lock on the rightmost leaf page. So we + * conditionally check if the lock is available. If it's not available then + * we simply abandon the fastpath and take the regular path. This makes + * sense because unavailability of the lock also signals that some other + * backend might be concurrently inserting into the page, thus reducing our + * chances to finding an insertion place in this page. + */ top: - /* find the first page containing this key */ - stack = _bt_search(rel, indnkeyatts, itup_scankey, false, &buf, BT_WRITE); + fastpath = false; offset = InvalidOffsetNumber; + if (RelationGetTargetBlock(rel) != InvalidBlockNumber) { + Size itemsz; + Page page; + BTPageOpaqueInternal lpageop; - /* trade in our read lock for a write lock */ - LockBuffer(buf, BUFFER_LOCK_UNLOCK); - LockBuffer(buf, BT_WRITE); + /* + * Conditionally acquire exclusive lock on the buffer before doing any + * checks. If we don't get the lock, we simply follow slowpath. If we + * do get the lock, this ensures that the index state cannot change, as + * far as the rightmost part of the index is concerned. + */ + buf = ReadBuffer(rel, RelationGetTargetBlock(rel)); - /* - * If the page was split between the time that we surrendered our read - * lock and acquired our write lock, then this page may no longer be the - * right place for the key we want to insert. In this case, we need to - * move right in the tree. See Lehman and Yao for an excruciatingly - * precise description. - */ - buf = _bt_moveright(rel, buf, indnkeyatts, itup_scankey, false, true, stack, BT_WRITE); + if (ConditionalLockBuffer(buf)) { + _bt_checkpage(rel, buf); + page = BufferGetPage(buf); + + lpageop = (BTPageOpaqueInternal) PageGetSpecialPointer(page); + itemsz = IndexTupleSize(itup); + itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this + * but we need to be consistent */ + + /* + * Check if the page is still the rightmost leaf page, has enough + * free space to accommodate the new tuple, no split is in progress + * and the scankey is greater than or equal to the first key on the + * page. + */ + if (P_ISLEAF(lpageop) && P_RIGHTMOST(lpageop) && + !P_IGNORE(lpageop) && + (PageGetFreeSpace(page) > itemsz) && + PageGetMaxOffsetNumber(page) >= P_FIRSTDATAKEY(lpageop) && + _bt_compare(rel, indnkeyatts, itup_scankey, page, + P_FIRSTDATAKEY(lpageop)) > 0) { + /* + * The right-most block should never have incomplete split. But + * be paranoid and check for it anyway. + */ + Assert(!P_INCOMPLETE_SPLIT(lpageop)); + fastpath = true; + } else { + _bt_relbuf(rel, buf); + + /* + * Something did not workout. Just forget about the cached + * block and follow the normal path. It might be set again if + * the conditions are favourble. + */ + RelationSetTargetBlock(rel, InvalidBlockNumber); + } + } else { + ReleaseBuffer(buf); + + /* + * If someone's holding a lock, it's likely to change anyway, + * so don't try again until we get an updated rightmost leaf. + */ + RelationSetTargetBlock(rel, InvalidBlockNumber); + } + } + if (!fastpath) { + /* + * Find the first page containing this key. Buffer returned by + * _bt_search() is locked in exclusive mode. + */ + stack = _bt_search(rel, indnkeyatts, itup_scankey, false, &buf, BT_WRITE); + + /* trade in our read lock for a write lock */ + LockBuffer(buf, BUFFER_LOCK_UNLOCK); + LockBuffer(buf, BT_WRITE); + + /* + * If the page was split between the time that we surrendered our read + * lock and acquired our write lock, then this page may no longer be the + * right place for the key we want to insert. In this case, we need to + * move right in the tree. See Lehman and Yao for an excruciatingly + * precise description. + */ + buf = _bt_moveright(rel, buf, indnkeyatts, itup_scankey, false, true, stack, BT_WRITE); + } /* * If we're not allowing duplicates, make sure the key isn't already in * the index. @@ -255,7 +353,9 @@ top: _bt_relbuf(rel, buf); XactLockTableWait(xwait); /* start over... */ - _bt_freestack(stack); + if (stack) { + _bt_freestack(stack); + } goto top; } } @@ -365,7 +465,9 @@ TransactionId _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, if (curPartOid != gpiScan->currPartOid) { GPISetCurrPartOid(gpiScan, curPartOid); if (!GPIGetNextPartRelation(gpiScan, CurrentMemoryContext, AccessShareLock)) { - MarkItemDeadAndDirtyBuffer(curitemid, opaque, buf, nbuf); + if (CheckPartitionIsInvisible(gpiScan)) { + MarkItemDeadAndDirtyBuffer(curitemid, opaque, buf, nbuf); + } goto next; } else { hbktParentHeapRel = tarRel = gpiScan->fakePartRelation; @@ -417,7 +519,7 @@ TransactionId _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, * that it is a potential conflict and leave the full * check till later. */ - if (checkUnique == UNIQUE_CHECK_PARTIAL) { + if (IndexUniqueCheckNoError(checkUnique)) { if (nbuf != InvalidBuffer) _bt_relbuf(rel, nbuf); *is_unique = false; @@ -803,6 +905,24 @@ static void _bt_insertonpg(Relation rel, Buffer buf, Buffer cbuf, BTStack stack, bool newitemonleft = false; Buffer rbuf; + /* + * If we're here then a pagesplit is needed. We should never reach here + * if we're using the fastpath since we should have checked for all the + * required conditions, including the fact that this page has enough + * freespace. Note that this routine can in theory deal with the + * situation where a NULL stack pointer is passed (that's what would + * happen if the fastpath is taken), like it does during crash + * recovery. But that path is much slower, defeating the very purpose + * of the optimization. The following assertion should protect us from + * any future code changes that invalidate those assumptions. + * + * Note that whenever we fail to take the fastpath, we clear the + * cached block. Checking for a valid cached block at this point is + * enough to decide whether we're in a fastpath or not. + */ + Assert(!(P_ISLEAF(lpageop) && + BlockNumberIsValid(RelationGetTargetBlock(rel)))); + /* Choose the split point */ firstright = _bt_findsplitloc(rel, page, newitemoff, itemsz, &newitemonleft); @@ -833,6 +953,7 @@ static void _bt_insertonpg(Relation rel, Buffer buf, Buffer cbuf, BTStack stack, BTMetaPageData *metad = NULL; OffsetNumber itup_off; BlockNumber itup_blkno; + BlockNumber cachedBlock = InvalidBlockNumber; itup_off = newitemoff; itup_blkno = BufferGetBlockNumber(buf); @@ -884,6 +1005,15 @@ static void _bt_insertonpg(Relation rel, Buffer buf, Buffer cbuf, BTStack stack, } } + /* + * Cache the block information if we just inserted into the rightmost + * leaf page of the index and it's not the root page. For very small + * index where root is also the leaf, there is no point trying for any + * optimization. + */ + if (P_RIGHTMOST(lpageop) && P_ISLEAF(lpageop) && !P_ISROOT(lpageop)) + cachedBlock = BufferGetBlockNumber(buf); + /* XLOG stuff */ if (RelationNeedsWAL(rel)) { xl_btree_insert xlrec; @@ -968,6 +1098,22 @@ static void _bt_insertonpg(Relation rel, Buffer buf, Buffer cbuf, BTStack stack, } } _bt_relbuf(rel, buf); + + /* + * If we decided to cache the insertion target block, then set it now. + * But before that, check for the height of the tree and don't go for + * the optimization for small indexes. We defer that check to this + * point to ensure that we don't call _bt_getrootheight while holding + * lock on any other block. + * + * We do this after dropping locks on all buffers. So the information + * about whether the insertion block is still the rightmost block or + * not may have changed in between. But we will deal with that during + * next insert operation. No special care is required while setting it. + */ + if (BlockNumberIsValid(cachedBlock) && + _bt_getrootheight(rel) >= BTREE_FASTPATH_MIN_LEVEL) + RelationSetTargetBlock(rel, cachedBlock); } } @@ -2253,3 +2399,19 @@ static bool CheckItemIsAlive(ItemPointer tid, Relation relation, Snapshot snapsh return cudescScan->CheckItemIsAlive(tid); } } + +static bool CheckPartitionIsInvisible(GPIScanDesc gpiScan) +{ + if (OidRBTreeMemberOid(gpiScan->invisiblePartTreeForVacuum, gpiScan->currPartOid)) { + return true; + } + + PartStatus currStatus = PartitionGetMetadataStatus(gpiScan->currPartOid, true); + + if (currStatus == PART_METADATA_INVISIBLE) { + (void)OidRBTreeInsertOid(gpiScan->invisiblePartTreeForVacuum, gpiScan->currPartOid); + return true; + } + + return false; +} \ No newline at end of file diff --git a/src/gausskernel/storage/access/nbtree/nbtpage.cpp b/src/gausskernel/storage/access/nbtree/nbtpage.cpp index 2836205cb..3e0869c67 100644 --- a/src/gausskernel/storage/access/nbtree/nbtpage.cpp +++ b/src/gausskernel/storage/access/nbtree/nbtpage.cpp @@ -314,6 +314,57 @@ Buffer _bt_getroot(Relation rel, int access) return rootbuf; } +/* + * _bt_getrootheight() -- Get the height of the btree search tree. + * + * We return the level (counting from zero) of the current fast root. + * This represents the number of tree levels we'd have to descend through + * to start any btree index search. + * + * This is used by the planner for cost-estimation purposes. Since it's + * only an estimate, slightly-stale data is fine, hence we don't worry + * about updating previously cached data. + */ +int _bt_getrootheight(Relation rel) +{ + BTMetaPageData *metad; + + if (rel->rd_amcache == NULL) { + Buffer metabuf; + + metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ); + Page metapg = BufferGetPage(metabuf); + metad = BTPageGetMeta(metapg); + + /* + * If there's no root page yet, _bt_getroot() doesn't expect a cache + * to be made, so just stop here and report the index height is zero. + * (XXX perhaps _bt_getroot() should be changed to allow this case.) + */ + if (metad->btm_root == P_NONE) { + _bt_relbuf(rel, metabuf); + return 0; + } + + /* + * Cache the metapage data for next time + */ + rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt, sizeof(BTMetaPageData)); + errno_t rc = memcpy_s(rel->rd_amcache, sizeof(BTMetaPageData), metad, sizeof(BTMetaPageData)); + securec_check(rc, "", ""); + + _bt_relbuf(rel, metabuf); + } + + /* Get cached page */ + metad = (BTMetaPageData *) rel->rd_amcache; + /* We shouldn't have cached it if any of these fail */ + Assert(metad->btm_magic == BTREE_MAGIC); + Assert(metad->btm_fastroot != P_NONE); + + return metad->btm_fastlevel; +} + /* * _bt_gettrueroot() -- Get the true root page of the btree. * @@ -440,8 +491,7 @@ void _bt_checkpage(Relation rel, Buffer buf) /* * Additionally check that the special area looks sane. */ - Size _bt_specialsize = PageIs4BXidVersion(page) ? MAXALIGN(sizeof(BTPageOpaqueDataInternal)) - : MAXALIGN(sizeof(BTPageOpaqueData)); + Size _bt_specialsize = MAXALIGN(sizeof(BTPageOpaqueData)); if (RelationIsUstoreIndex(rel)) { _bt_specialsize = MAXALIGN(sizeof(UBTPageOpaqueData)); } @@ -477,7 +527,7 @@ static void _bt_log_reuse_page(const Relation rel, BlockNumber blkno, Transactio XLogBeginInsert(); XLogRegisterData((char *)&xlrec_reuse, SizeOfBtreeReusePage); - (void)XLogInsert(RM_BTREE_ID, XLOG_BTREE_REUSE_PAGE, false, rel->rd_node.bucketNode); + (void)XLogInsert(RM_BTREE_ID, XLOG_BTREE_REUSE_PAGE, rel->rd_node.bucketNode); } /* @@ -549,10 +599,7 @@ loop: */ if (XLogStandbyInfoActive() && RelationNeedsWAL(rel)) { BTPageOpaqueInternal opaque = (BTPageOpaqueInternal)PageGetSpecialPointer(page); - if (PageIs4BXidVersion(page)) - _bt_log_reuse_page(rel, blkno, opaque->btpo.xact_old); - else - _bt_log_reuse_page(rel, blkno, ((BTPageOpaque)opaque)->xact); + _bt_log_reuse_page(rel, blkno, ((BTPageOpaque)opaque)->xact); } /* Okay to use page. Re-initialize and return it */ @@ -679,13 +726,9 @@ bool _bt_page_recyclable(Page page) * interested in it. */ opaque = (BTPageOpaqueInternal)PageGetSpecialPointer(page); - if (PageIs4BXidVersion(page)) { - if (P_ISDELETED(opaque) && TransactionIdPrecedes(opaque->btpo.xact_old, u_sess->utils_cxt.RecentGlobalXmin)) - return true; - } else { - if (P_ISDELETED(opaque) && - TransactionIdPrecedes(((BTPageOpaque)opaque)->xact, u_sess->utils_cxt.RecentGlobalXmin)) - return true; + if (P_ISDELETED(opaque) && + TransactionIdPrecedes(((BTPageOpaque)opaque)->xact, u_sess->utils_cxt.RecentGlobalXmin)) { + return true; } return false; } @@ -833,7 +876,7 @@ void _bt_delitems_delete(const Relation rel, Buffer buf, OffsetNumber *itemnos, */ XLogRegisterData((char *)itemnos, nitems * sizeof(OffsetNumber)); int bucket_id = RelationIsValid(heapRel) ? heapRel->rd_node.bucketNode : InvalidBktId; - recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DELETE, false, bucket_id); + recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DELETE, bucket_id); PageSetLSN(page, recptr); } @@ -1439,10 +1482,6 @@ int _bt_pagedel_old(Relation rel, Buffer buf, BTStack stack) * of that scan. */ page = BufferGetPage(buf); - if (PageIs4BXidVersion(page)) { - _bt_page_localupgrade(page); - } - Assert(PageIs8BXidVersion(page)); opaque = (BTPageOpaqueInternal)PageGetSpecialPointer(page); opaque->btpo_flags &= ~BTP_HALF_DEAD; opaque->btpo_flags |= BTP_DELETED; @@ -2376,32 +2415,3 @@ static bool _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsi return true; } - -/* Upgrade the BTpage - * Upgrade the Btree page from PG_PAGE_4B_LAYOUT_VERSION(4) to - * PG_PAGE_LAYOUT_VERSION(5) and only be called when the page is delete. - */ -void _bt_page_localupgrade(Page page) -{ - PageHeader phdr = (PageHeader)page; - errno_t rc; - - if (PageIs8BXidVersion(page)) - return; - - Assert(PageIs4BXidVersion(page)); - - rc = memmove_s((char *)page + phdr->pd_special - sizeof(TransactionId), MAXALIGN(sizeof(BTPageOpaqueDataInternal)), - (char *)page + phdr->pd_special, MAXALIGN(sizeof(BTPageOpaqueDataInternal))); - securec_check(rc, "", ""); - - /* Update PageHeaderInfo */ - PageSetPageSizeAndVersion(page, BLCKSZ, PG_COMM_PAGE_LAYOUT_VERSION); - phdr->pd_special = BLCKSZ - MAXALIGN(sizeof(BTPageOpaqueData)); - - rc = memset_s(page + phdr->pd_special + MAXALIGN(sizeof(BTPageOpaqueDataInternal)), sizeof(TransactionId), 0, - sizeof(TransactionId)); - securec_check(rc, "", ""); - BTPageGetSpecial(page)->xact = 0; - ereport(DEBUG1, (errmsg("The Btree page has been upgraded to version %d. ", phdr->pd_pagesize_version))); -} diff --git a/src/gausskernel/storage/access/nbtree/nbtsearch.cpp b/src/gausskernel/storage/access/nbtree/nbtsearch.cpp index 45ee9dd4d..21bee165d 100644 --- a/src/gausskernel/storage/access/nbtree/nbtsearch.cpp +++ b/src/gausskernel/storage/access/nbtree/nbtsearch.cpp @@ -59,6 +59,7 @@ static void _bt_check_natts_correct(const Relation index, Page page, OffsetNumbe BTStack _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey, Buffer *bufP, int access, bool needStack) { BTStack stack_in = NULL; + int page_access = BT_READ; /* Get the root page to start with */ *bufP = _bt_getroot(rel, access); @@ -89,7 +90,7 @@ BTStack _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey, Buffe * if the leaf page is split and we insert to the parent page). But * this is a good opportunity to finish splits of internal pages too. */ - *bufP = _bt_moveright(rel, *bufP, keysz, scankey, nextkey, (access == BT_WRITE), stack_in, BT_READ); + *bufP = _bt_moveright(rel, *bufP, keysz, scankey, nextkey, (access == BT_WRITE), stack_in, page_access); /* if this is a leaf page, we're done */ page = BufferGetPage(*bufP); @@ -125,13 +126,41 @@ BTStack _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey, Buffe new_stack->bts_parent = stack_in; } + /* + * Page level 1 is lowest non-leaf page level prior to leaves. So, if + * we're on the level 1 and asked to lock leaf page in write mode, + * then lock next page in write mode, because it must be a leaf. + */ + if (opaque->btpo.level == 1 && access == BT_WRITE) + page_access = BT_WRITE; + /* drop the read lock on the parent page, acquire one on the child */ - *bufP = _bt_relandgetbuf(rel, *bufP, blkno, BT_READ); + *bufP = _bt_relandgetbuf(rel, *bufP, blkno, page_access); /* okay, all set to move down a level */ stack_in = new_stack; } + /* + * If we're asked to lock leaf in write mode, but didn't manage to, then + * relock. This should only happen when the root page is a leaf page (and + * the only page in the index other than the metapage). + */ + if (access == BT_WRITE && page_access == BT_READ) + { + /* trade in our read lock for a write lock */ + LockBuffer(*bufP, BUFFER_LOCK_UNLOCK); + LockBuffer(*bufP, BT_WRITE); + + /* + * Race -- the leaf page may have split after we dropped the read lock + * but before we acquired a write lock. If it has, we may need to + * move right to its new sibling. Do that. + */ + *bufP = _bt_moveright(rel, *bufP, keysz, scankey, nextkey, true, stack_in, BT_WRITE); + } + + return stack_in; } @@ -1036,8 +1065,6 @@ static bool _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber off /* we must have the buffer pinned and locked */ Assert(BufferIsValid(so->currPos.buf)); - /* We've pinned the buffer, nobody can prune this buffer, check whether snapshot is valid. */ - CheckSnapshotIsValidException(scan->xs_snapshot, "_bt_readpage"); page = BufferGetPage(so->currPos.buf); opaque = (BTPageOpaqueInternal)PageGetSpecialPointer(page); diff --git a/src/gausskernel/storage/access/nbtree/nbtsort.cpp b/src/gausskernel/storage/access/nbtree/nbtsort.cpp index ebc66b0a5..80d775ccf 100644 --- a/src/gausskernel/storage/access/nbtree/nbtsort.cpp +++ b/src/gausskernel/storage/access/nbtree/nbtsort.cpp @@ -199,9 +199,9 @@ void _bt_spooldestroy(BTSpool *btspool) /* * spool an index entry into the sort file. */ -void _bt_spool(BTSpool *btspool, ItemPointer self, Datum *values, const bool *isnull, IndexTransInfo* transInfo) +void _bt_spool(BTSpool *btspool, ItemPointer self, Datum *values, const bool *isnull) { - tuplesort_putindextuplevalues(btspool->sortstate, btspool->index, self, values, isnull, transInfo); + tuplesort_putindextuplevalues(btspool->sortstate, btspool->index, self, values, isnull); } /* diff --git a/src/gausskernel/storage/access/nbtree/nbtxlog.cpp b/src/gausskernel/storage/access/nbtree/nbtxlog.cpp index 83c7ba5ae..17e6cb9ba 100755 --- a/src/gausskernel/storage/access/nbtree/nbtxlog.cpp +++ b/src/gausskernel/storage/access/nbtree/nbtxlog.cpp @@ -456,7 +456,7 @@ static void btree_xlog_split(bool onleft, bool isroot, XLogReaderState *record, /* assure that memory is properly allocated, prevent from core dump caused by buffer unpin */ START_CRIT_SECTION(); - newlpage = PageGetTempPageCopySpecial(lpage, true); + newlpage = PageGetTempPageCopySpecial(lpage); END_CRIT_SECTION(); /* Set high key */ diff --git a/src/gausskernel/storage/access/obs/obs_am.cpp b/src/gausskernel/storage/access/obs/obs_am.cpp index a51a0f8a3..fa687e583 100755 --- a/src/gausskernel/storage/access/obs/obs_am.cpp +++ b/src/gausskernel/storage/access/obs/obs_am.cpp @@ -21,7 +21,9 @@ #define strpos(p, s) (strstr((p), (s)) != NULL ? strstr((p), (s)) - (p) : -1) #include "access/obs/obs_am.h" +#ifndef ENABLE_LITE_MODE #include "eSDKOBS.h" +#endif #include "lib/stringinfo.h" #include "miscadmin.h" @@ -38,6 +40,202 @@ #include "postmaster/alarmchecker.h" #include "replication/walreceiver.h" +/* ---------------------------------------------------------------------------- + * Utility functions + * ---------------------------------------------------------------------------- + */ +/* + * - Brief: Find nth sub-string from the given string, return -1 when not found + * - Parameter: + * @str: where to work + * @N: N'th ocurrence + * @find: what to 'find' + * - Return: + * value -1: when not found + * value > 0: the actual position in given string + * Notes: position index stats from 0. + */ +int find_Nth(const char *str, unsigned N, const char *find) +{ + int cursor, pos; + unsigned i = 0; + const char *curptr = str; + + Assert(str != NULL); + + if (N == 0) { + return -1; + } + + cursor = 0; + curptr = str + cursor; + + while (i < N) { + pos = strpos(curptr, find); + if (pos == -1) { + /* Not found, return directly */ + return -1; + } + + cursor += pos + 1; + curptr = str + cursor; + + i++; + } + return (cursor - 1); +} + +/* + * - Brief: Fetch hostname, bucket, prefix in given string + * - Parameter: + * @url: input URL that will be parsed into hostbame, bucket, prefix + * @hostname: output hostname in palloc()'ed string + * @bucket: output bucket in palloc()'ed string + * @prefix: output prefix in palloc()'ed string + * - Return: + * no return value + */ +void FetchUrlProperties(const char *url, char **hostname, char **bucket, char **prefix) +{ +#define LOCAL_STRING_BUFFER_SIZE 512 + + int ibegin = 0; + int iend = 0; + char buffer[LOCAL_STRING_BUFFER_SIZE]; + char *invalid_element = NULL; + error_t rc = EOK; + int copylen = 0; + + /* At least we should pass-in a valid url and one of to-be fetched properties */ + Assert(url != NULL && (hostname || bucket || prefix)); + + /* hostname is requred to fetch from Object's URL */ + if (hostname != NULL) { + rc = memset_s(buffer, LOCAL_STRING_BUFFER_SIZE, 0, LOCAL_STRING_BUFFER_SIZE); + securec_check(rc, "\0", "\0"); + ibegin = find_Nth(url, 2, "/"); + iend = find_Nth(url, 3, "/"); + + copylen = iend - ibegin - 1; + + /* if hostname is invalid, goto error message */ + if (ibegin < 0 || iend < 0 || copylen <= 0) { + invalid_element = "hostname"; + goto FETCH_URL_ERROR; + } + + rc = strncpy_s(buffer, LOCAL_STRING_BUFFER_SIZE, url + (ibegin + 1), copylen); + securec_check(rc, "", ""); + + *hostname = pstrdup(buffer); + } + + /* bucket is required to fetch from Object's URL */ + if (bucket != NULL) { + rc = memset_s(buffer, LOCAL_STRING_BUFFER_SIZE, 0, LOCAL_STRING_BUFFER_SIZE); + securec_check(rc, "\0", "\0"); + ibegin = find_Nth(url, 3, "/"); + iend = find_Nth(url, 4, "/"); + + copylen = iend - ibegin - 1; + + /* if bucket name is invalid, goto error message */ + if (ibegin < 0 || iend < 0 || copylen <= 0) { + invalid_element = "bucket"; + goto FETCH_URL_ERROR; + } + + rc = strncpy_s(buffer, LOCAL_STRING_BUFFER_SIZE, url + (ibegin + 1), copylen); + securec_check(rc, "", ""); + + *bucket = pstrdup(buffer); + } + + /* prefix is required to fetch from Object's URL */ + if (prefix != NULL) { + rc = memset_s(buffer, LOCAL_STRING_BUFFER_SIZE, 0, LOCAL_STRING_BUFFER_SIZE); + securec_check(rc, "\0", "\0"); + ibegin = find_Nth(url, 4, "/"); + /* if prefix is invalid, goto error message */ + if (ibegin < 0) { + invalid_element = "prefix"; + goto FETCH_URL_ERROR; + } + copylen = strlen(url) - ibegin; + + rc = strncpy_s(buffer, LOCAL_STRING_BUFFER_SIZE, url + (iend + 1), copylen); + securec_check(rc, "", ""); + + *prefix = pstrdup(buffer); + } + return; + +FETCH_URL_ERROR: + ereport(ERROR, + (errcode(ERRCODE_FDW_INVALID_OPTION_DATA), errmsg("OBS URL's %s is not valid '%s'", invalid_element, url))); +} + +/* + * @Description: get bucket and prefix from folder name + * @IN folderName: folder name + * @OUT bucket: bucket name + * @OUT prefix: perifx + * @See also: + * @Important: use current memory context for bucket and prefix + */ +void FetchUrlPropertiesForQuery(const char *folderName, char **bucket, char **prefix) +{ + Assert(folderName && bucket && prefix); + Assert(!(*bucket) && !(*prefix)); + + error_t rc = EOK; + char *invalid_element = NULL; + + int ibegin = 0; + int iend = 0; + int bucketLen = 0; + int prefixLen = 0; + + if (folderName[0] == '/') { + /* /bucket/prefix */ + ibegin = 1; + iend = find_Nth(folderName, 2, "/"); + } else { + /* bucket/prefix */ + ibegin = 0; + iend = find_Nth(folderName, 1, "/"); + } + + /* get bucket */ + bucketLen = iend - ibegin; + if (bucketLen <= 0) { + invalid_element = "bucket"; + goto FETCH_URL_ERROR2; + } + + *bucket = (char *)palloc0(bucketLen + 1); + rc = strncpy_s((*bucket), (bucketLen + 1), (folderName + ibegin), bucketLen); + securec_check(rc, "", ""); + + /* get prefix */ + prefixLen = strlen(folderName) - (iend + 1); + if (prefixLen < 0) { + invalid_element = "prefix"; + goto FETCH_URL_ERROR2; + } + + *prefix = (char *)palloc0(prefixLen + 1); + rc = strncpy_s((*prefix), (prefixLen + 1), (folderName + iend + 1), prefixLen); + securec_check(rc, "", ""); + + return; + +FETCH_URL_ERROR2: + ereport(ERROR, (errcode(ERRCODE_FDW_INVALID_OPTION_DATA), + errmsg("OBS URL's %s is not valid '%s'", invalid_element, folderName))); +} + +#ifndef ENABLE_LITE_MODE // Some Windows stuff #ifndef FOPEN_EXTRA_FLAGS #define FOPEN_EXTRA_FLAGS "" @@ -58,18 +256,12 @@ static int64 ifModifiedSince = -1; static int64 ifNotModifiedSince = -1; static char* ifMatch = 0; static char* ifNotMatch = 0; +static int headerLen = 22; - -#define OBS_CIPHER_LIST "DHE-RSA-AES128-GCM-SHA256:" \ - "DHE-RSA-AES256-GCM-SHA384:" \ - "DHE-DSS-AES128-GCM-SHA256:" \ - "DHE-DSS-AES256-GCM-SHA384:" \ - "ECDHE-ECDSA-AES128-GCM-SHA256:" \ +#define OBS_CIPHER_LIST "ECDHE-ECDSA-AES128-GCM-SHA256:" \ "ECDHE-ECDSA-AES256-GCM-SHA384:" \ "ECDHE-RSA-AES128-GCM-SHA256:" \ "ECDHE-RSA-AES256-GCM-SHA384:" \ - "DHE-RSA-AES128-CCM:" \ - "DHE-RSA-AES256-CCM:" \ "ECDHE-ECDSA-AES128-CCM:" \ "ECDHE-ECDSA-AES256-CCM" @@ -94,8 +286,6 @@ MemoryContext GetObsMemoryContext(void) return t_thrd.obs_cxt.ObsMemoryContext; } -int find_Nth(const char *str, unsigned N, const char *find); - /* Request results, saved as globals ----------------------------------------- */ static THR_LOCAL obs_status statusG = OBS_STATUS_OK; static THR_LOCAL char errorMessageG[ERROR_MESSAGE_LEN] = {0}; @@ -1395,201 +1585,6 @@ void DestroyObsReadWriteHandler(OBSReadWriteHandler *handler, bool obsQueryType) pfree_ext(handler); } -/* ---------------------------------------------------------------------------- - * Utility functions - * ---------------------------------------------------------------------------- - */ -/* - * - Brief: Find nth sub-string from the given string, return -1 when not found - * - Parameter: - * @str: where to work - * @N: N'th ocurrence - * @find: what to 'find' - * - Return: - * value -1: when not found - * value > 0: the actual position in given string - * Notes: position index stats from 0. - */ -int find_Nth(const char *str, unsigned N, const char *find) -{ - int cursor, pos; - unsigned i = 0; - const char *curptr = str; - - Assert(str != NULL); - - if (N == 0) { - return -1; - } - - cursor = 0; - curptr = str + cursor; - - while (i < N) { - pos = strpos(curptr, find); - if (pos == -1) { - /* Not found, return directly */ - return -1; - } - - cursor += pos + 1; - curptr = str + cursor; - - i++; - } - return (cursor - 1); -} - -/* - * - Brief: Fetch hostname, bucket, prefix in given string - * - Parameter: - * @url: input URL that will be parsed into hostbame, bucket, prefix - * @hostname: output hostname in palloc()'ed string - * @bucket: output bucket in palloc()'ed string - * @prefix: output prefix in palloc()'ed string - * - Return: - * no return value - */ -void FetchUrlProperties(const char *url, char **hostname, char **bucket, char **prefix) -{ -#define LOCAL_STRING_BUFFER_SIZE 512 - - int ibegin = 0; - int iend = 0; - char buffer[LOCAL_STRING_BUFFER_SIZE]; - char *invalid_element = NULL; - error_t rc = EOK; - int copylen = 0; - - /* At least we should pass-in a valid url and one of to-be fetched properties */ - Assert(url != NULL && (hostname || bucket || prefix)); - - /* hostname is requred to fetch from Object's URL */ - if (hostname != NULL) { - rc = memset_s(buffer, LOCAL_STRING_BUFFER_SIZE, 0, LOCAL_STRING_BUFFER_SIZE); - securec_check(rc, "\0", "\0"); - ibegin = find_Nth(url, 2, "/"); - iend = find_Nth(url, 3, "/"); - - copylen = iend - ibegin - 1; - - /* if hostname is invalid, goto error message */ - if (ibegin < 0 || iend < 0 || copylen <= 0) { - invalid_element = "hostname"; - goto FETCH_URL_ERROR; - } - - rc = strncpy_s(buffer, LOCAL_STRING_BUFFER_SIZE, url + (ibegin + 1), copylen); - securec_check(rc, "", ""); - - *hostname = pstrdup(buffer); - } - - /* bucket is required to fetch from Object's URL */ - if (bucket != NULL) { - rc = memset_s(buffer, LOCAL_STRING_BUFFER_SIZE, 0, LOCAL_STRING_BUFFER_SIZE); - securec_check(rc, "\0", "\0"); - ibegin = find_Nth(url, 3, "/"); - iend = find_Nth(url, 4, "/"); - - copylen = iend - ibegin - 1; - - /* if bucket name is invalid, goto error message */ - if (ibegin < 0 || iend < 0 || copylen <= 0) { - invalid_element = "bucket"; - goto FETCH_URL_ERROR; - } - - rc = strncpy_s(buffer, LOCAL_STRING_BUFFER_SIZE, url + (ibegin + 1), copylen); - securec_check(rc, "", ""); - - *bucket = pstrdup(buffer); - } - - /* prefix is required to fetch from Object's URL */ - if (prefix != NULL) { - rc = memset_s(buffer, LOCAL_STRING_BUFFER_SIZE, 0, LOCAL_STRING_BUFFER_SIZE); - securec_check(rc, "\0", "\0"); - ibegin = find_Nth(url, 4, "/"); - /* if prefix is invalid, goto error message */ - if (ibegin < 0) { - invalid_element = "prefix"; - goto FETCH_URL_ERROR; - } - copylen = strlen(url) - ibegin; - - rc = strncpy_s(buffer, LOCAL_STRING_BUFFER_SIZE, url + (iend + 1), copylen); - securec_check(rc, "", ""); - - *prefix = pstrdup(buffer); - } - return; - -FETCH_URL_ERROR: - ereport(ERROR, - (errcode(ERRCODE_FDW_INVALID_OPTION_DATA), errmsg("OBS URL's %s is not valid '%s'", invalid_element, url))); -} - -/* - * @Description: get bucket and prefix from folder name - * @IN folderName: folder name - * @OUT bucket: bucket name - * @OUT prefix: perifx - * @See also: - * @Important: use current memory context for bucket and prefix - */ -void FetchUrlPropertiesForQuery(const char *folderName, char **bucket, char **prefix) -{ - Assert(folderName && bucket && prefix); - Assert(!(*bucket) && !(*prefix)); - - error_t rc = EOK; - char *invalid_element = NULL; - - int ibegin = 0; - int iend = 0; - int bucketLen = 0; - int prefixLen = 0; - - if (folderName[0] == '/') { - /* /bucket/prefix */ - ibegin = 1; - iend = find_Nth(folderName, 2, "/"); - } else { - /* bucket/prefix */ - ibegin = 0; - iend = find_Nth(folderName, 1, "/"); - } - - /* get bucket */ - bucketLen = iend - ibegin; - if (bucketLen <= 0) { - invalid_element = "bucket"; - goto FETCH_URL_ERROR2; - } - - *bucket = (char *)palloc0(bucketLen + 1); - rc = strncpy_s((*bucket), (bucketLen + 1), (folderName + ibegin), bucketLen); - securec_check(rc, "", ""); - - /* get prefix */ - prefixLen = strlen(folderName) - (iend + 1); - if (prefixLen < 0) { - invalid_element = "prefix"; - goto FETCH_URL_ERROR2; - } - - *prefix = (char *)palloc0(prefixLen + 1); - rc = strncpy_s((*prefix), (prefixLen + 1), (folderName + iend + 1), prefixLen); - securec_check(rc, "", ""); - - return; - -FETCH_URL_ERROR2: - ereport(ERROR, (errcode(ERRCODE_FDW_INVALID_OPTION_DATA), - errmsg("OBS URL's %s is not valid '%s'", invalid_element, folderName))); -} - /* * @Description: check obs server adress is valid * @IN hostName: obs server adress @@ -1818,8 +1813,23 @@ static void fillBucketContext(OBSReadWriteHandler *handler, const char* key, Arc } /* Fill in obs full file path */ - rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", archive_obs->archive_prefix, key); - securec_check_ss(rc, "\0", "\0"); + if (strncmp(key, "global_barrier_records", headerLen) != 0) { + rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", archive_obs->archive_prefix, key); + securec_check_ss(rc, "\0", "\0"); + } else { + char pathPrefix[MAXPGPATH] = {0}; + rc = strcpy_s(pathPrefix, MAXPGPATH, archive_obs->archive_prefix); + securec_check_ss(rc, "\0", "\0"); + if (!IS_PGXC_COORDINATOR) { + char *p = strrchr(pathPrefix, '/'); + if (p == NULL) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Obs path prefix is invalid"))); + } + *p = '\0'; + } + rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", pathPrefix, key); + securec_check_ss(rc, "\0", "\0"); + } handler->m_object_info.key = pstrdup(xlogfpath); handler->m_object_info.version_id = NULL; @@ -1945,7 +1955,7 @@ void fillObsOption(obs_options *option, ArchiveConfig *obs_config = NULL) archive_obs = getArchiveConfig(); } - if (archive_obs == NULL) { + if (archive_obs == NULL || archive_obs->conn_config == NULL || archive_obs->conn_config->obs_address == NULL) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Cannot get obs bucket config from replication slots"))); } @@ -2518,9 +2528,23 @@ bool checkOBSFileExist(const char* file_path, ArchiveConfig *obs_config) &head_properties_callback, &head_complete_callback }; - - rc = snprintf_s(obsFilePath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", obs_config->archive_prefix, file_path); - securec_check_ss(rc, "\0", "\0"); + if (strncmp(file_path, "global_barrier_records", headerLen) != 0) { + rc = snprintf_s(obsFilePath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", obs_config->archive_prefix, file_path); + securec_check_ss(rc, "\0", "\0"); + } else { + char pathPrefix[MAXPGPATH] = {0}; + rc = strcpy_s(pathPrefix, MAXPGPATH, obs_config->archive_prefix); + securec_check_ss(rc, "\0", "\0"); + if (!IS_PGXC_COORDINATOR) { + char *p = strrchr(pathPrefix, '/'); + if (p == NULL) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Obs path prefix is invalid"))); + } + *p = '\0'; + } + rc = snprintf_s(obsFilePath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", pathPrefix, file_path); + securec_check_ss(rc, "\0", "\0"); + } ereport(DEBUG1, (errmsg("[OBS] before ListObjects check OBS file %s exist", obsFilePath))); @@ -2668,3 +2692,4 @@ bool DownloadOneItemFromOBS(char* netBackupPath, char* localPath, ArchiveConfig pfree_ext(metadataBuffer); return true; } +#endif diff --git a/src/gausskernel/storage/access/redo/redo_barrier.cpp b/src/gausskernel/storage/access/redo/redo_barrier.cpp index b99c5e01f..c9fb8ee9f 100644 --- a/src/gausskernel/storage/access/redo/redo_barrier.cpp +++ b/src/gausskernel/storage/access/redo/redo_barrier.cpp @@ -41,10 +41,10 @@ #include "tcop/dest.h" #include "securec_check.h" -void XLogRecSetBarrierState(XLogBlockBarrierParse *blockbarrierstate, XLogRecPtr startptr, XLogRecPtr endptr) +void XLogRecSetBarrierState(XLogBlockBarrierParse *blockbarrierstate, char *mainData, Size len) { - blockbarrierstate->startptr = startptr; - blockbarrierstate->endptr = endptr; + blockbarrierstate->maindata = mainData; + blockbarrierstate->maindatalen = len; } XLogRecParseState *barrier_redo_parse_to_block(XLogReaderState *record, uint32 *blocknum) @@ -63,8 +63,8 @@ XLogRecParseState *barrier_redo_parse_to_block(XLogReaderState *record, uint32 * filenode = RelFileNodeForkNumFill(NULL, InvalidBackendId, forknum, lowblknum); XLogRecSetBlockCommonState(record, BLOCK_DATA_BARRIER_TYPE, filenode, recordstatehead); - XLogRecSetBarrierState(&(recordstatehead->blockparse.extra_rec.blockbarrier), record->ReadRecPtr, - record->EndRecPtr); + XLogRecSetBarrierState(&(recordstatehead->blockparse.extra_rec.blockbarrier), XLogRecGetData(record), + XLogRecGetDataLen(record)); recordstatehead->isFullSync = record->isFullSync; return recordstatehead; } diff --git a/src/gausskernel/storage/access/redo/redo_bufpage.cpp b/src/gausskernel/storage/access/redo/redo_bufpage.cpp index d67d771d2..7dbcbb4a3 100644 --- a/src/gausskernel/storage/access/redo/redo_bufpage.cpp +++ b/src/gausskernel/storage/access/redo/redo_bufpage.cpp @@ -611,7 +611,7 @@ bool PageFreeDict(Page page) * The page is PageInit'd with the same special-space size as the * given page, and the special space is copied from the given page. */ -Page PageGetTempPageCopySpecial(Page page, bool isbtree) +Page PageGetTempPageCopySpecial(Page page) { Size pageSize; Page temp; @@ -622,10 +622,7 @@ Page PageGetTempPageCopySpecial(Page page, bool isbtree) Assert(!PageIsCompressed(page)); - if (isbtree && PageIs4BXidVersion(page)) - PageInit(temp, pageSize, PageGetSpecialSize(page) + sizeof(TransactionId)); - else - PageInit(temp, pageSize, PageGetSpecialSize(page)); + PageInit(temp, pageSize, PageGetSpecialSize(page)); rc = memcpy_s(PageGetSpecialPointer(temp), PageGetSpecialSize(page), PageGetSpecialPointer(page), PageGetSpecialSize(page)); securec_check(rc, "", ""); diff --git a/src/gausskernel/storage/access/redo/redo_clog.cpp b/src/gausskernel/storage/access/redo/redo_clog.cpp index 9a6c5dc07..79e2eaa13 100644 --- a/src/gausskernel/storage/access/redo/redo_clog.cpp +++ b/src/gausskernel/storage/access/redo/redo_clog.cpp @@ -75,7 +75,7 @@ XLogRecParseState *ClogXlogDdlParseToBlock(XLogReaderState *record, uint32 *bloc filenode = RelFileNodeForkNumFill(NULL, InvalidBackendId, forknum, lowblknum); XLogRecSetBlockCommonState(record, BLOCK_DATA_DDL_TYPE, filenode, recordstatehead); - XLogRecSetBlockDdlState(&(recordstatehead->blockparse.extra_rec.blockddlrec), ddltype, false, + XLogRecSetBlockDdlState(&(recordstatehead->blockparse.extra_rec.blockddlrec), ddltype, (char *)XLogRecGetData(record)); return recordstatehead; diff --git a/src/gausskernel/storage/access/redo/redo_hash.cpp b/src/gausskernel/storage/access/redo/redo_hash.cpp index 19435ec38..ad7f378f8 100644 --- a/src/gausskernel/storage/access/redo/redo_hash.cpp +++ b/src/gausskernel/storage/access/redo/redo_hash.cpp @@ -26,9 +26,7 @@ #include "knl/knl_variable.h" #include "access/hash.h" -#include "access/hash_xlog.h" #include "access/relscan.h" -#include "access/xlogutils.h" #include "access/xlogproc.h" #include "catalog/index.h" @@ -39,1312 +37,9 @@ #include "utils/rel.h" #include "utils/rel_gs.h" -static XLogRecParseState *HashXlogInitMetaPageParseBlock(XLogReaderState *record, uint32 *blocknum) -{ - XLogRecParseState *recordstatehead = NULL; - - *blocknum = 1; - XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); - if (recordstatehead == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_INIT_META_PAGE_NUM, recordstatehead); - - return recordstatehead; -} - -static XLogRecParseState *HashXlogInitBitmapPageParseBlock(XLogReaderState *record, uint32 *blocknum) -{ - XLogRecParseState *recordstatehead = NULL; - XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); - if (recordstatehead == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_INIT_BITMAP_PAGE_BITMAP_NUM, recordstatehead); - - XLogRecParseState *blockstate = NULL; - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_INIT_BITMAP_PAGE_META_NUM, blockstate); - - *blocknum = 2; - return recordstatehead; -} - -static XLogRecParseState *HashXlogInsertParseBlock(XLogReaderState *record, uint32 *blocknum) -{ - XLogRecParseState *recordstatehead = NULL; - XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); - if (recordstatehead == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_INSERT_PAGE_NUM, recordstatehead); - - XLogRecParseState *blockstate = NULL; - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_INSERT_META_NUM, blockstate); - - *blocknum = 2; - return recordstatehead; -} - -static XLogRecParseState *HashXlogAddOvflPageParseBlock(XLogReaderState *record, uint32 *blocknum) -{ - BlockNumber leftblk; - BlockNumber rightblk; - - XLogRecGetBlockTag(record, 0, NULL, NULL, &rightblk); - XLogRecGetBlockTag(record, 1, NULL, NULL, &leftblk); - - XLogRecParseState *recordstatehead = NULL; - XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); - if (recordstatehead == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_ADD_OVFL_PAGE_OVFL_NUM, recordstatehead); - XLogRecSetAuxiBlkNumState(&recordstatehead->blockparse.extra_rec.blockdatarec, leftblk, InvalidForkNumber); - - XLogRecParseState *blockstate = NULL; - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_ADD_OVFL_PAGE_LEFT_NUM, blockstate); - XLogRecSetAuxiBlkNumState(&blockstate->blockparse.extra_rec.blockdatarec, rightblk, InvalidForkNumber); - - *blocknum = 2; - - if (XLogRecHasBlockRef(record, XLOG_HASH_ADD_OVFL_PAGE_MAP_NUM)) { - (*blocknum)++; - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_ADD_OVFL_PAGE_MAP_NUM, blockstate); - } - - if (XLogRecHasBlockRef(record, XLOG_HASH_ADD_OVFL_PAGE_NEWMAP_NUM)) { - (*blocknum)++; - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_ADD_OVFL_PAGE_NEWMAP_NUM, blockstate); - } - - (*blocknum)++; - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_ADD_OVFL_PAGE_META_NUM, blockstate); - - return recordstatehead; -} - -static XLogRecParseState *HashXlogSplitAllocatePageParseBlock(XLogReaderState *record, uint32 *blocknum) -{ - XLogRecParseState *recordstatehead = NULL; - XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); - if (recordstatehead == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_SPLIT_ALLOCATE_PAGE_OBUK_NUM, recordstatehead); - - XLogRecParseState *blockstate = NULL; - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_SPLIT_ALLOCATE_PAGE_NBUK_NUM, blockstate); - - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_SPLIT_ALLOCATE_PAGE_META_NUM, blockstate); - - *blocknum = 3; - return recordstatehead; -} - -static XLogRecParseState *HashXlogSplitPageParseBlock(XLogReaderState *record, uint32 *blocknum) -{ - XLogRecParseState *recordstatehead = NULL; - XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); - if (recordstatehead == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_SPLIT_PAGE_NUM, recordstatehead); - - *blocknum = 1; - return recordstatehead; -} - -static XLogRecParseState *HashXlogSplitCompleteParseBlock(XLogReaderState *record, uint32 *blocknum) -{ - XLogRecParseState *recordstatehead = NULL; - XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); - if (recordstatehead == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_SPLIT_COMPLETE_OBUK_NUM, recordstatehead); - - XLogRecParseState *blockstate = NULL; - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, XLOG_HASH_SPLIT_COMPLETE_NBUK_NUM, blockstate); - - *blocknum = 2; - return recordstatehead; -} - -static XLogRecParseState *HashXlogMovePageContentsParseBlock(XLogReaderState *record, uint32 *blocknum) -{ - XLogRecParseState *recordstatehead = NULL; - XLogRecParseState *blockstate = NULL; - - xl_hash_move_page_contents *xldata = (xl_hash_move_page_contents *) XLogRecGetData(record); - - *blocknum = 1; - XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); - if (recordstatehead == NULL) { - return NULL; - } - - if (xldata->is_prim_bucket_same_wrt) { - XLogRecSetBlockDataState(record, HASH_MOVE_ADD_BLOCK_NUM, recordstatehead); - } else { - XLogRecParseState *blockstate = NULL; - XLogRecSetBlockDataState(record, HASH_MOVE_BUK_BLOCK_NUM, recordstatehead); - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - - XLogRecSetBlockDataState(record, HASH_MOVE_ADD_BLOCK_NUM, blockstate); - (*blocknum)++; - } - - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, HASH_MOVE_DELETE_OVFL_BLOCK_NUM, blockstate); - (*blocknum)++; - - return recordstatehead; -} - -static XLogRecParseState *HashXlogSqueezePageParseBlock(XLogReaderState *record, uint32 *blocknum) -{ - XLogRecParseState *recordstatehead = NULL; - XLogRecParseState *blockstate = NULL; - xl_hash_squeeze_page *xldata = (xl_hash_squeeze_page *) XLogRecGetData(record); - - *blocknum = 1; - XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); - if (recordstatehead == NULL) { - return NULL; - } - - if (xldata->is_prim_bucket_same_wrt) { - XLogRecSetBlockDataState(record, HASH_SQUEEZE_ADD_BLOCK_NUM, recordstatehead); - } else { - XLogRecSetBlockDataState(record, HASH_SQUEEZE_BUK_BLOCK_NUM, recordstatehead); - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, HASH_SQUEEZE_ADD_BLOCK_NUM, blockstate); - (*blocknum)++; - } - - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, HASH_SQUEEZE_INIT_OVFLBUF_BLOCK_NUM, blockstate); - - if (!xldata->is_prev_bucket_same_wrt) { - (*blocknum)++; - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, HASH_SQUEEZE_UPDATE_PREV_BLOCK_NUM, blockstate); - } - - if (XLogRecHasBlockRef(record, HASH_SQUEEZE_UPDATE_NEXT_BLOCK_NUM)) { - (*blocknum)++; - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, HASH_SQUEEZE_UPDATE_NEXT_BLOCK_NUM, blockstate); - } - - (*blocknum)++; - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, HASH_SQUEEZE_UPDATE_BITMAP_BLOCK_NUM, blockstate); - - if (XLogRecHasBlockRef(record, HASH_SQUEEZE_UPDATE_META_BLOCK_NUM)) { - (*blocknum)++; - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, HASH_SQUEEZE_UPDATE_META_BLOCK_NUM, blockstate); - } - - return recordstatehead; -} - -static XLogRecParseState *HashXlogDeleteParseBlock(XLogReaderState *record, uint32 *blocknum) -{ - XLogRecParseState *recordstatehead = NULL; - xl_hash_delete *xldata = (xl_hash_delete *)XLogRecGetData(record); - - *blocknum = 1; - XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); - if (recordstatehead == NULL) { - return NULL; - } - - if (xldata->is_primary_bucket_page) { - XLogRecSetBlockDataState(record, HASH_DELETE_OVFL_BLOCK_NUM, recordstatehead); - } else { - XLogRecParseState *blockstate = NULL; - XLogRecSetBlockDataState(record, HASH_DELETE_BUK_BLOCK_NUM, recordstatehead); - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - - XLogRecSetBlockDataState(record, HASH_DELETE_OVFL_BLOCK_NUM, blockstate); - (*blocknum)++; - } - - return recordstatehead; -} - -static XLogRecParseState *HashXlogSplitCleanupParseBlock(XLogReaderState *record, uint32 *blocknum) -{ - XLogRecParseState *recordstatehead = NULL; - - XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); - if (recordstatehead == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, HASH_SPLIT_CLEANUP_BLOCK_NUM, recordstatehead); - - *blocknum = 1; - return recordstatehead; -} - -static XLogRecParseState *HashXlogUpdateMetaPageParseBlock(XLogReaderState *record, uint32 *blocknum) -{ - XLogRecParseState *recordstatehead = NULL; - - XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); - if (recordstatehead == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, HASH_UPDATE_META_BLOCK_NUM, recordstatehead); - - *blocknum = 1; - return recordstatehead; -} - -static XLogRecParseState *HashXlogVacuumOnePageParseBlock(XLogReaderState *record, uint32 *blocknum) -{ - XLogRecParseState *recordstatehead = NULL; - XLogRecParseState *blockstate = NULL; - - XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); - if (recordstatehead == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, HASH_VACUUM_PAGE_BLOCK_NUM, recordstatehead); - - XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); - if (blockstate == NULL) { - return NULL; - } - XLogRecSetBlockDataState(record, HASH_VACUUM_META_BLOCK_NUM, blockstate); - - *blocknum = 2; - - return recordstatehead; -} - XLogRecParseState *HashRedoParseToBlock(XLogReaderState *record, uint32 *blocknum) { - uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; - XLogRecParseState *recordblockstate = NULL; - *blocknum = 0; - switch (info) { - case XLOG_HASH_INIT_META_PAGE: - recordblockstate = HashXlogInitMetaPageParseBlock(record, blocknum); - break; - case XLOG_HASH_INIT_BITMAP_PAGE: - recordblockstate = HashXlogInitBitmapPageParseBlock(record, blocknum); - break; - case XLOG_HASH_INSERT: - recordblockstate = HashXlogInsertParseBlock(record, blocknum); - break; - case XLOG_HASH_ADD_OVFL_PAGE: - recordblockstate = HashXlogAddOvflPageParseBlock(record, blocknum); - break; - case XLOG_HASH_SPLIT_ALLOCATE_PAGE: - recordblockstate = HashXlogSplitAllocatePageParseBlock(record, blocknum); - break; - case XLOG_HASH_SPLIT_PAGE: - recordblockstate = HashXlogSplitPageParseBlock(record, blocknum); - break; - case XLOG_HASH_SPLIT_COMPLETE: - recordblockstate = HashXlogSplitCompleteParseBlock(record, blocknum); - break; - case XLOG_HASH_MOVE_PAGE_CONTENTS: - recordblockstate = HashXlogMovePageContentsParseBlock(record, blocknum); - break; - case XLOG_HASH_SQUEEZE_PAGE: - recordblockstate = HashXlogSqueezePageParseBlock(record, blocknum); - break; - case XLOG_HASH_DELETE: - recordblockstate = HashXlogDeleteParseBlock(record, blocknum); - break; - case XLOG_HASH_SPLIT_CLEANUP: - recordblockstate = HashXlogSplitCleanupParseBlock(record, blocknum); - break; - case XLOG_HASH_UPDATE_META_PAGE: - recordblockstate = HashXlogUpdateMetaPageParseBlock(record, blocknum); - break; - case XLOG_HASH_VACUUM_ONE_PAGE: - recordblockstate = HashXlogVacuumOnePageParseBlock(record, blocknum); - break; - default: - ereport(PANIC, (errmsg("hash_redo_block: unknown op code %u", info))); - } - - return recordblockstate; + ereport(PANIC, (errmsg("HashRedoParseToBlock: unimplemented"))); + return NULL; } - -void HashRedoInitMetaPageOperatorPage(RedoBufferInfo *metabuf, void *recorddata) -{ - xl_hash_init_meta_page *xlrec = (xl_hash_init_meta_page *)recorddata; - _hash_init_metabuffer(metabuf->buf, xlrec->num_tuples, xlrec->procid, xlrec->ffactor, true); - PageSetLSN(metabuf->pageinfo.page, metabuf->lsn); -} - -void HashRedoInitBitmapPageOperatorBitmapPage(RedoBufferInfo *bitmapbuf, void *recorddata) -{ - xl_hash_init_bitmap_page *xlrec = (xl_hash_init_bitmap_page *)recorddata; - _hash_initbitmapbuffer(bitmapbuf->buf, xlrec->bmsize, true); - PageSetLSN(bitmapbuf->pageinfo.page, bitmapbuf->lsn); -} - -void HashRedoInitBitmapPageOperatorMetaPage(RedoBufferInfo *metabuf) -{ - uint32 num_buckets; - HashMetaPage metap; - - metap = HashPageGetMeta(metabuf->pageinfo.page); - num_buckets = metap->hashm_maxbucket + 1; - metap->hashm_mapp[metap->hashm_nmaps] = num_buckets + 1; - metap->hashm_nmaps++; - - PageSetLSN(metabuf->pageinfo.page, metabuf->lsn); -} - -void HashRedoInsertOperatorPage(RedoBufferInfo *buffer, void *recorddata, void *data, Size datalen) -{ - xl_hash_insert *xlrec = (xl_hash_insert *)recorddata; - Page page = buffer->pageinfo.page; - char *datapos = (char *)data; - - if (PageAddItem(page, (Item) datapos, datalen, xlrec->offnum, false, false) == InvalidOffsetNumber) { - ereport(PANIC, (errmsg("hash_xlog_insert: failed to add item"))); - } - - PageSetLSN(page, buffer->lsn); -} - -void HashRedoInsertOperatorMetaPage(RedoBufferInfo *metabuf) -{ - HashMetaPage metap; - - metap = HashPageGetMeta(metabuf->pageinfo.page); - metap->hashm_ntuples += 1; - - PageSetLSN(metabuf->pageinfo.page, metabuf->lsn); -} - -void HashRedoAddOvflPageOperatorOvflPage(RedoBufferInfo *ovflbuf, BlockNumber leftblk, void *data, Size datalen) -{ - Page ovflpage; - HashPageOpaque ovflopaque; - uint32 *num_bucket; - - num_bucket = (uint32 *)data; - Assert(datalen == sizeof(uint32)); - _hash_initbuf(ovflbuf->buf, InvalidBlockNumber, *num_bucket, LH_OVERFLOW_PAGE, true); - /* update backlink */ - ovflpage = ovflbuf->pageinfo.page; - ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage); - ovflopaque->hasho_prevblkno = leftblk; - - PageSetLSN(ovflpage, ovflbuf->lsn); -} - -void HashRedoAddOvflPageOperatorLeftPage(RedoBufferInfo *leftbuf, BlockNumber rightblk) -{ - Page leftpage; - HashPageOpaque leftopaque; - - leftpage = leftbuf->pageinfo.page; - leftopaque = (HashPageOpaque) PageGetSpecialPointer(leftpage); - leftopaque->hasho_nextblkno = rightblk; - - PageSetLSN(leftpage, leftbuf->lsn); -} - -void HashRedoAddOvflPageOperatorMapPage(RedoBufferInfo *mapbuf, void *data) -{ - uint32 *bitmap_page_bit = (uint32 *)data; - Page mappage = mapbuf->pageinfo.page; - uint32 *freep = NULL; - - freep = HashPageGetBitmap(mappage); - SETBIT(freep, *bitmap_page_bit); - - PageSetLSN(mappage, mapbuf->lsn); -} - -void HashRedoAddOvflPageOperatorNewmapPage(RedoBufferInfo *newmapbuf, void *recorddata) -{ - xl_hash_add_ovfl_page *xlrec = (xl_hash_add_ovfl_page *)recorddata; - - _hash_initbitmapbuffer(newmapbuf->buf, xlrec->bmsize, true); - - PageSetLSN(newmapbuf->pageinfo.page, newmapbuf->lsn); -} - -void HashRedoAddOvflPageOperatorMetaPage(RedoBufferInfo *metabuf, void *recorddata, void *data, Size datalen) -{ - HashMetaPage metap; - uint32 firstfree_ovflpage; - BlockNumber *newmapblk = NULL; - xl_hash_add_ovfl_page *xlrec = (xl_hash_add_ovfl_page *)recorddata; - errno_t rc = EOK; - - rc = memcpy_s(&firstfree_ovflpage, sizeof(uint32), data, sizeof(uint32)); - securec_check(rc, "", ""); - metap = HashPageGetMeta(metabuf->pageinfo.page); - metap->hashm_firstfree = firstfree_ovflpage; - - if (!xlrec->bmpage_found) { - metap->hashm_spares[metap->hashm_ovflpoint]++; - - if (datalen > sizeof(uint32)) { - Assert(datalen == sizeof(uint32) + sizeof(BlockNumber)); - - newmapblk = (BlockNumber *)((char *)data + sizeof(uint32)); - Assert(BlockNumberIsValid(*newmapblk)); - - metap->hashm_mapp[metap->hashm_nmaps] = *newmapblk; - metap->hashm_nmaps++; - metap->hashm_spares[metap->hashm_ovflpoint]++; - } - } - - PageSetLSN(metabuf->pageinfo.page, metabuf->lsn); -} - -void HashRedoSplitAllocatePageOperatorObukPage(RedoBufferInfo *oldbukbuf, void *recorddata) -{ - Page oldpage; - HashPageOpaque oldopaque; - xl_hash_split_allocate_page *xlrec = (xl_hash_split_allocate_page *)recorddata; - - oldpage = oldbukbuf->pageinfo.page; - oldopaque = (HashPageOpaque) PageGetSpecialPointer(oldpage); - - oldopaque->hasho_flag = xlrec->old_bucket_flag; - oldopaque->hasho_prevblkno = xlrec->new_bucket; - - PageSetLSN(oldpage, oldbukbuf->lsn); -} - -void HashRedoSplitAllocatePageOperatorNbukPage(RedoBufferInfo *newbukbuf, void *recorddata) -{ - xl_hash_split_allocate_page *xlrec = (xl_hash_split_allocate_page *)recorddata; - - _hash_initbuf(newbukbuf->buf, xlrec->new_bucket, xlrec->new_bucket, xlrec->new_bucket_flag, true); - - PageSetLSN(newbukbuf->pageinfo.page, newbukbuf->lsn); -} - -void HashRedoSplitAllocatePageOperatorMetaPage(RedoBufferInfo *metabuf, void *recorddata, void *blkdata) -{ - HashMetaPage metap; - char *data = (char *)blkdata; - xl_hash_split_allocate_page *xlrec = (xl_hash_split_allocate_page *)recorddata; - - metap = HashPageGetMeta(metabuf->pageinfo.page); - metap->hashm_maxbucket = xlrec->new_bucket; - - if (xlrec->flags & XLH_SPLIT_META_UPDATE_MASKS) { - uint32 lowmask; - uint32 *highmask = NULL; - errno_t rc = EOK; - - /* extract low and high masks. */ - rc = memcpy_s(&lowmask, sizeof(uint32), data, sizeof(uint32)); - securec_check(rc, "", ""); - highmask = (uint32 *)((char *)data + sizeof(uint32)); - - /* update metapage */ - metap->hashm_lowmask = lowmask; - metap->hashm_highmask = *highmask; - - data += sizeof(uint32) * 2; - } - - if (xlrec->flags & XLH_SPLIT_META_UPDATE_SPLITPOINT) { - uint32 ovflpoint; - uint32 *ovflpages = NULL; - errno_t rc = EOK; - - /* extract information of overflow pages. */ - rc = memcpy_s(&ovflpoint, sizeof(uint32), data, sizeof(uint32)); - securec_check(rc, "", ""); - ovflpages = (uint32 *)((char *)data + sizeof(uint32)); - - /* update metapage */ - metap->hashm_spares[ovflpoint] = *ovflpages; - metap->hashm_ovflpoint = ovflpoint; - } - - PageSetLSN(metabuf->pageinfo.page, metabuf->lsn); -} - -void HashRedoSplitCompleteOperatorObukPage(RedoBufferInfo *oldbukbuf, void *recorddata) -{ - Page oldpage; - HashPageOpaque oldopaque; - xl_hash_split_complete *xlrec = (xl_hash_split_complete *)recorddata; - - oldpage = oldbukbuf->pageinfo.page; - oldopaque = (HashPageOpaque) PageGetSpecialPointer(oldpage); - oldopaque->hasho_flag = xlrec->old_bucket_flag; - - PageSetLSN(oldpage, oldbukbuf->lsn); -} - -void HashRedoSplitCompleteOperatorNbukPage(RedoBufferInfo *newbukbuf, void *recorddata) -{ - Page newpage; - HashPageOpaque newopaque; - xl_hash_split_complete *xlrec = (xl_hash_split_complete *)recorddata; - - newpage = newbukbuf->pageinfo.page; - newopaque = (HashPageOpaque) PageGetSpecialPointer(newpage); - newopaque->hasho_flag = xlrec->new_bucket_flag; - - PageSetLSN(newpage, newbukbuf->lsn); -} - -void HashXlogMoveAddPageOperatorPage(RedoBufferInfo *redobuffer, void *recorddata, void *blkdata, Size len) -{ - Page writepage = redobuffer->pageinfo.page;; - char *begin = (char *)blkdata; - char *data = (char *)blkdata; - Size datalen = len; - uint16 ninserted = 0; - - xl_hash_move_page_contents *xldata = (xl_hash_move_page_contents *) (recorddata); - - if (xldata->ntups > 0) { - OffsetNumber *towrite = (OffsetNumber *) data; - - data += sizeof(OffsetNumber) * xldata->ntups; - - while ((Size)(data - begin) < datalen) { - IndexTuple itup = (IndexTuple) data; - Size itemsz; - OffsetNumber l; - - itemsz = IndexTupleDSize(*itup); - itemsz = MAXALIGN(itemsz); - - data += itemsz; - - l = PageAddItem(writepage, (Item) itup, itemsz, towrite[ninserted], false, false); - if (l == InvalidOffsetNumber) - elog(ERROR, "hash_xlog_move_page_contents: failed to add item to hash index page, size %d bytes", - (int) itemsz); - - ninserted++; - } - } - - /* - * number of tuples inserted must be same as requested in REDO record. - */ - Assert(ninserted == xldata->ntups); - - PageSetLSN(writepage, redobuffer->lsn); -} - -void HashXlogMoveDeleteOvflPageOperatorPage(RedoBufferInfo *redobuffer, void *blkdata, Size len) -{ - Page page = redobuffer->pageinfo.page;; - char *data = (char *)blkdata; - Size datalen = len; - - if (datalen > 0) { - OffsetNumber *unused; - OffsetNumber *unend; - - unused = (OffsetNumber *) data; - unend = (OffsetNumber *) ((char *) data + len); - - if ((unend - unused) > 0) - PageIndexMultiDelete(page, unused, unend - unused); - } - - PageSetLSN(page, redobuffer->lsn); -} - -/* adding item to overflow buffer(writepage) from free overflowpage */ -void HashXlogSqueezeAddPageOperatorPage(RedoBufferInfo *redobuffer, void *recorddata, void *blkdata, Size len) -{ - Page writepage = redobuffer->pageinfo.page; - char *begin = (char *)blkdata; - char *data = (char *)blkdata; - Size datalen = len; - uint16 ninserted = 0; - - xl_hash_squeeze_page *xldata = (xl_hash_squeeze_page *) (recorddata); - - if (xldata->ntups > 0) { - OffsetNumber *towrite = (OffsetNumber *) data; - - data += sizeof(OffsetNumber) * xldata->ntups; - - while ((Size)(data - begin) < datalen) { - IndexTuple itup = (IndexTuple) data; - Size itemsz; - OffsetNumber l; - - itemsz = IndexTupleDSize(*itup); - itemsz = MAXALIGN(itemsz); - - data += itemsz; - - l = PageAddItem(writepage, (Item) itup, itemsz, towrite[ninserted], false, false); - if (l == InvalidOffsetNumber) - elog(ERROR, "hash_xlog_squeeze_page: failed to add item to hash index page, size %d bytes", - (int) itemsz); - - ninserted++; - } - } - - /* - * number of tuples inserted must be same as requested in REDO record. - */ - Assert(ninserted == xldata->ntups); - - /* - * if the page on which are adding tuples is a page previous to freed - * overflow page, then update its nextblkno. - */ - if (xldata->is_prev_bucket_same_wrt) { - HashPageOpaque writeopaque = (HashPageOpaque) PageGetSpecialPointer(writepage); - - writeopaque->hasho_nextblkno = xldata->nextblkno; - } - - PageSetLSN(writepage, redobuffer->lsn); -} - -/* initializing free overflow page */ -void HashXlogSqueezeInitOvflbufOperatorPage(RedoBufferInfo *redobuffer, void *recorddata) -{ - Page ovflpage; - HashPageOpaque ovflopaque; - - ovflpage = redobuffer->pageinfo.page; - - _hash_pageinit(ovflpage, BufferGetPageSize(redobuffer->buf)); - - ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage); - - ovflopaque->hasho_prevblkno = InvalidBlockNumber; - ovflopaque->hasho_nextblkno = InvalidBlockNumber; - ovflopaque->hasho_bucket = InvalidBucket; - ovflopaque->hasho_flag = LH_UNUSED_PAGE; - ovflopaque->hasho_page_id = HASHO_PAGE_ID; - - PageSetLSN(ovflpage, redobuffer->lsn); -} - -void HashXlogSqueezeUpdatePrevPageOperatorPage(RedoBufferInfo *redobuffer, void *recorddata) -{ - xl_hash_squeeze_page *xldata = (xl_hash_squeeze_page *) (recorddata); - - Page prevpage = redobuffer->pageinfo.page; - HashPageOpaque prevopaque = (HashPageOpaque) PageGetSpecialPointer(prevpage); - - prevopaque->hasho_nextblkno = xldata->nextblkno; - - PageSetLSN(prevpage, redobuffer->lsn); -} - -void HashXlogSqueezeUpdateNextPageOperatorPage(RedoBufferInfo *redobuffer, void *recorddata) -{ - xl_hash_squeeze_page *xldata = (xl_hash_squeeze_page *) (recorddata); - - Page nextpage = redobuffer->pageinfo.page; - HashPageOpaque nextopaque = (HashPageOpaque) PageGetSpecialPointer(nextpage); - - nextopaque->hasho_prevblkno = xldata->prevblkno; - - PageSetLSN(nextpage, redobuffer->lsn); -} - -void HashXlogSqueezeUpdateBitmapOperatorPage(RedoBufferInfo *redobuffer, void *blkdata) -{ - Page mappage = redobuffer->pageinfo.page; - uint32 *freep = NULL; - char *data = (char *)blkdata; - uint32 *bitmap_page_bit; - - freep = HashPageGetBitmap(mappage); - - bitmap_page_bit = (uint32 *) data; - - CLRBIT(freep, *bitmap_page_bit); - - PageSetLSN(mappage, redobuffer->lsn); -} - -void HashXlogSqueezeUpdateMateOperatorPage(RedoBufferInfo *redobuffer, void *blkdata) -{ - HashMetaPage metap; - Page page = redobuffer->pageinfo.page; - char *data = (char *)blkdata; - uint32 *firstfree_ovflpage; - - firstfree_ovflpage = (uint32 *) data; - - metap = HashPageGetMeta(page); - metap->hashm_firstfree = *firstfree_ovflpage; - - PageSetLSN(page, redobuffer->lsn); -} - -void HashXlogDeleteBlockOperatorPage(RedoBufferInfo *redobuffer, void *recorddata, void *blkdata, Size len) -{ - xl_hash_delete *xldata = (xl_hash_delete *)(recorddata); - - Page page = redobuffer->pageinfo.page; - char *datapos = (char *)blkdata; - - if (len > 0) { - OffsetNumber *unused; - OffsetNumber *unend; - - unused = (OffsetNumber *) datapos; - unend = (OffsetNumber *) ((char *) datapos + len); - - if ((unend - unused) > 0) { - PageIndexMultiDelete(page, unused, unend - unused); - } - } - - /* - * Mark the page as not containing any LP_DEAD items only if - * clear_dead_marking flag is set to true. See comments in - * hashbucketcleanup() for details. - */ - if (xldata->clear_dead_marking) { - HashPageOpaque pageopaque; - - pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); - pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES; - } - - PageSetLSN(page, redobuffer->lsn); -} - -void HashXlogSplitCleanupOperatorPage(RedoBufferInfo *redobuffer) -{ - Page page; - HashPageOpaque bucket_opaque; - - page = redobuffer->pageinfo.page; - bucket_opaque = (HashPageOpaque) PageGetSpecialPointer(page); - - /* cleanup flag for finished split */ - bucket_opaque->hasho_flag &= ~LH_BUCKET_NEEDS_SPLIT_CLEANUP; - - PageSetLSN(page, redobuffer->lsn); -} - -void HashXlogUpdateMetaOperatorPage(RedoBufferInfo *redobuffer, void *recorddata) -{ - Page page; - HashMetaPage metap; - xl_hash_update_meta_page *xldata = (xl_hash_update_meta_page *) (recorddata); - - page = redobuffer->pageinfo.page; - metap = HashPageGetMeta(page); - - metap->hashm_ntuples = xldata->ntuples; - - PageSetLSN(page, redobuffer->lsn); - -} - -void HashXlogVacuumOnePageOperatorPage(RedoBufferInfo *redobuffer, void *recorddata, Size len) -{ - Page page = redobuffer->pageinfo.page; - xl_hash_vacuum_one_page *xldata; - HashPageOpaque pageopaque; - - xldata = (xl_hash_vacuum_one_page *) (recorddata); - - if (len > SizeOfHashVacuumOnePage) { - OffsetNumber *unused; - - unused = (OffsetNumber *) ((char *) xldata + SizeOfHashVacuumOnePage); - - PageIndexMultiDelete(page, unused, xldata->ntuples); - } - - /* - * Mark the page as not containing any LP_DEAD items. See comments in - * _hash_vacuum_one_page() for details. - */ - pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); - pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES; - - PageSetLSN(page, redobuffer->lsn); -} - -void HashXlogVacuumMateOperatorPage(RedoBufferInfo *redobuffer, void *recorddata) -{ - Page metapage; - HashMetaPage metap; - xl_hash_vacuum_one_page *xldata; - xldata = (xl_hash_vacuum_one_page *) (recorddata); - - metapage = redobuffer->pageinfo.page; - metap = HashPageGetMeta(metapage); - - metap->hashm_ntuples -= xldata->ntuples; - - PageSetLSN(metapage, redobuffer->lsn); -} - -static void HashXlogInitMetaPageBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, - RedoBufferInfo *bufferinfo) -{ - XLogBlockDataParse *datadecode = blockdatarec; - if (XLogBlockDataGetBlockId(datadecode) == XLOG_HASH_INIT_META_PAGE_NUM) { - char *maindata = XLogBlockDataGetMainData(datadecode, NULL); - HashRedoInitMetaPageOperatorPage(bufferinfo, maindata); - MakeRedoBufferDirty(bufferinfo); - if (blockhead->forknum == INIT_FORKNUM) { - FlushOneBuffer(bufferinfo->buf); - } - } -} - -static void HashXlogInitBitmapPageBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, - RedoBufferInfo *bufferinfo) -{ - XLogBlockDataParse *datadecode = blockdatarec; - bool modifypage = false; - if (XLogBlockDataGetBlockId(datadecode) == XLOG_HASH_INIT_BITMAP_PAGE_BITMAP_NUM) { - char *maindata = XLogBlockDataGetMainData(datadecode, NULL); - HashRedoInitBitmapPageOperatorBitmapPage(bufferinfo, maindata); - MakeRedoBufferDirty(bufferinfo); - modifypage = true; - } else { - XLogRedoAction action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - HashRedoInitBitmapPageOperatorMetaPage(bufferinfo); - MakeRedoBufferDirty(bufferinfo); - modifypage = true; - } - } - - if (blockhead->forknum == INIT_FORKNUM && modifypage) { - FlushOneBuffer(bufferinfo->buf); - } -} - -static void HashXlogInsertBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, - RedoBufferInfo *bufferinfo) -{ - XLogBlockDataParse *datadecode = blockdatarec; - - XLogRedoAction action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action != BLK_NEEDS_REDO) { - return; - } - - if (XLogBlockDataGetBlockId(datadecode) == XLOG_HASH_INSERT_PAGE_NUM) { - Size blkdatalen; - char *maindata = XLogBlockDataGetMainData(datadecode, NULL); - char *blkdata = XLogBlockDataGetBlockData(datadecode, &blkdatalen); - - HashRedoInsertOperatorPage(bufferinfo, (void *)maindata, (void *)blkdata, blkdatalen); - } else { - HashRedoInsertOperatorMetaPage(bufferinfo); - } - MakeRedoBufferDirty(bufferinfo); -} - -static void HashXlogAddOvflPageBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, - RedoBufferInfo *bufferinfo) -{ - XLogBlockDataParse *datadecode = blockdatarec; - if (XLogBlockDataGetBlockId(datadecode) == XLOG_HASH_ADD_OVFL_PAGE_OVFL_NUM) { - Size blkdatalen; - char *blkdata = NULL; - BlockNumber leftblk; - blkdata = XLogBlockDataGetBlockData(datadecode, &blkdatalen); - leftblk = XLogBlockDataGetAuxiBlock1(datadecode); - - HashRedoAddOvflPageOperatorOvflPage(bufferinfo, leftblk, blkdata, blkdatalen); - MakeRedoBufferDirty(bufferinfo); - } else if (XLogBlockDataGetBlockId(datadecode) == XLOG_HASH_ADD_OVFL_PAGE_LEFT_NUM) { - XLogRedoAction action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - BlockNumber rightblk = XLogBlockDataGetAuxiBlock1(datadecode); - HashRedoAddOvflPageOperatorLeftPage(bufferinfo, rightblk); - MakeRedoBufferDirty(bufferinfo); - } - } else if (XLogBlockDataGetBlockId(datadecode) == XLOG_HASH_ADD_OVFL_PAGE_MAP_NUM) { - XLogRedoAction action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - char *blkdata = XLogBlockDataGetBlockData(datadecode, NULL); - HashRedoAddOvflPageOperatorMapPage(bufferinfo, blkdata); - MakeRedoBufferDirty(bufferinfo); - } - } else if (XLogBlockDataGetBlockId(datadecode) == XLOG_HASH_ADD_OVFL_PAGE_NEWMAP_NUM) { - char *maindata = XLogBlockDataGetMainData(datadecode, NULL); - HashRedoAddOvflPageOperatorNewmapPage(bufferinfo, maindata); - MakeRedoBufferDirty(bufferinfo); - - } else { - XLogRedoAction action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - Size blkdatalen; - char *maindata = XLogBlockDataGetMainData(datadecode, NULL); - char *blkdata = XLogBlockDataGetBlockData(datadecode, &blkdatalen); - - HashRedoAddOvflPageOperatorMetaPage(bufferinfo, maindata, blkdata, blkdatalen); - MakeRedoBufferDirty(bufferinfo); - } - } -} - -static void HashXlogSplitAllocatePageBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, - RedoBufferInfo *bufferinfo) -{ - XLogBlockDataParse *datadecode = blockdatarec; - if (XLogBlockDataGetBlockId(datadecode) == XLOG_HASH_SPLIT_ALLOCATE_PAGE_OBUK_NUM) { - XLogRedoAction action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO || action == BLK_RESTORED) { - char *maindata = XLogBlockDataGetMainData(datadecode, NULL); - HashRedoSplitAllocatePageOperatorObukPage(bufferinfo, maindata); - MakeRedoBufferDirty(bufferinfo); - } - } else if (XLogBlockDataGetBlockId(datadecode) == XLOG_HASH_SPLIT_ALLOCATE_PAGE_NBUK_NUM) { - char *maindata = XLogBlockDataGetMainData(datadecode, NULL); - HashRedoSplitAllocatePageOperatorNbukPage(bufferinfo, maindata); - MakeRedoBufferDirty(bufferinfo); - } else { - XLogRedoAction action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - char *maindata = XLogBlockDataGetMainData(datadecode, NULL); - char *blkdata = XLogBlockDataGetBlockData(datadecode, NULL); - HashRedoSplitAllocatePageOperatorMetaPage(bufferinfo, maindata, blkdata); - MakeRedoBufferDirty(bufferinfo); - } - } -} - -static void HashXlogSplitPageBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, - RedoBufferInfo *bufferinfo) -{ - XLogBlockDataParse *datadecode = blockdatarec; - - XLogRedoAction action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action != BLK_RESTORED) { - ereport(ERROR, (errmsg("Hash split record did not contain a full-page image"))); - } - MakeRedoBufferDirty(bufferinfo); -} - -static void HashXlogSplitCompleteBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, - RedoBufferInfo *bufferinfo) -{ - XLogBlockDataParse *datadecode = blockdatarec; - - XLogRedoAction action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action != BLK_NEEDS_REDO && action != BLK_RESTORED) { - return; - } - - char *maindata = XLogBlockDataGetMainData(datadecode, NULL); - if (XLogBlockDataGetBlockId(datadecode) == XLOG_HASH_SPLIT_COMPLETE_OBUK_NUM) { - HashRedoSplitCompleteOperatorObukPage(bufferinfo, maindata); - } else { - HashRedoSplitCompleteOperatorNbukPage(bufferinfo, maindata); - } - MakeRedoBufferDirty(bufferinfo); -} - -static void HashXlogMovePageContentsBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, - RedoBufferInfo *bufferinfo) -{ - XLogBlockDataParse *datadecode = blockdatarec; - Size blkdatalen; - char *blkdata = NULL; - blkdata = XLogBlockDataGetBlockData(datadecode, &blkdatalen); - uint8 block_id = XLogBlockDataGetBlockId(datadecode); - char *maindata = XLogBlockDataGetMainData(datadecode, NULL); - - if (block_id == HASH_MOVE_BUK_BLOCK_NUM) { - PageSetLSN(bufferinfo->pageinfo.page, bufferinfo->lsn); - } - - if (block_id == HASH_MOVE_ADD_BLOCK_NUM) { - XLogRedoAction action; - action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - HashXlogMoveAddPageOperatorPage(bufferinfo, maindata, blkdata, blkdatalen); - MakeRedoBufferDirty(bufferinfo); - } - } - - if (block_id == HASH_MOVE_DELETE_OVFL_BLOCK_NUM) { - XLogRedoAction action; - action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - HashXlogMoveDeleteOvflPageOperatorPage(bufferinfo, blkdata, blkdatalen); - MakeRedoBufferDirty(bufferinfo); - } - } -} - -static void HashXlogSqueezePageBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, - RedoBufferInfo *bufferinfo) -{ - XLogBlockDataParse *datadecode = blockdatarec; - Size blkdatalen; - char *blkdata = NULL; - blkdata = XLogBlockDataGetBlockData(datadecode, &blkdatalen); - uint8 block_id = XLogBlockDataGetBlockId(datadecode); - char *maindata = XLogBlockDataGetMainData(datadecode, NULL); - - if (block_id == HASH_SQUEEZE_BUK_BLOCK_NUM) { - PageSetLSN(bufferinfo->pageinfo.page, bufferinfo->lsn); - } - - if (block_id == HASH_SQUEEZE_ADD_BLOCK_NUM) { - XLogRedoAction action; - action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - HashXlogSqueezeAddPageOperatorPage(bufferinfo, maindata, blkdata, blkdatalen); - MakeRedoBufferDirty(bufferinfo); - } - } - - if (block_id == HASH_SQUEEZE_INIT_OVFLBUF_BLOCK_NUM) { - XLogRedoAction action; - action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - HashXlogSqueezeInitOvflbufOperatorPage(bufferinfo, maindata); - MakeRedoBufferDirty(bufferinfo); - } - } - - if (block_id == HASH_SQUEEZE_UPDATE_PREV_BLOCK_NUM) { - XLogRedoAction action; - action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - xl_hash_squeeze_page *xldata = (xl_hash_squeeze_page *) (maindata); - if (!xldata->is_prev_bucket_same_wrt && action == BLK_NEEDS_REDO) { - HashXlogSqueezeUpdatePrevPageOperatorPage(bufferinfo, maindata); - MakeRedoBufferDirty(bufferinfo); - } - } - - if (block_id == HASH_SQUEEZE_UPDATE_NEXT_BLOCK_NUM) { - XLogRedoAction action; - action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - HashXlogSqueezeUpdateNextPageOperatorPage(bufferinfo, maindata); - MakeRedoBufferDirty(bufferinfo); - } - } - - if (block_id == HASH_SQUEEZE_UPDATE_BITMAP_BLOCK_NUM) { - XLogRedoAction action; - action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - HashXlogSqueezeUpdateBitmapOperatorPage(bufferinfo, blkdata); - MakeRedoBufferDirty(bufferinfo); - } - } - - if (block_id == HASH_SQUEEZE_UPDATE_META_BLOCK_NUM) { - XLogRedoAction action; - action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - HashXlogSqueezeUpdateMateOperatorPage(bufferinfo, blkdata); - MakeRedoBufferDirty(bufferinfo); - } - } -} - -static void HashXlogDeleteBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, - RedoBufferInfo *bufferinfo) -{ - XLogBlockDataParse *datadecode = blockdatarec; - char *maindata = XLogBlockDataGetMainData(datadecode, NULL); - uint8 block_id = XLogBlockDataGetBlockId(datadecode); - Size blkdatalen; - char *blkdata = NULL; - blkdata = XLogBlockDataGetBlockData(datadecode, &blkdatalen); - XLogRedoAction action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - - if (block_id == HASH_DELETE_OVFL_BLOCK_NUM) { - if (action == BLK_NEEDS_REDO) { - HashXlogDeleteBlockOperatorPage(bufferinfo, maindata, blkdata, blkdatalen); - MakeRedoBufferDirty(bufferinfo); - } - } else { - PageSetLSN(bufferinfo->pageinfo.page, bufferinfo->lsn); - } -} - -static void HashXlogSplitCleanupBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, - RedoBufferInfo *bufferinfo) -{ - XLogBlockDataParse *datadecode = blockdatarec; - - XLogRedoAction action; - action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - HashXlogSplitCleanupOperatorPage(bufferinfo); - MakeRedoBufferDirty(bufferinfo); - } -} - -static void HashXlogUpdateMetaPageBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, - RedoBufferInfo *bufferinfo) -{ - XLogBlockDataParse *datadecode = blockdatarec; - char *maindata = XLogBlockDataGetMainData(datadecode, NULL); - - XLogRedoAction action; - action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - HashXlogUpdateMetaOperatorPage(bufferinfo, (void *)maindata); - MakeRedoBufferDirty(bufferinfo); - } -} - -static void HashXlogVacuumOnePageBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, - RedoBufferInfo *bufferinfo) -{ - XLogBlockDataParse *datadecode = blockdatarec; - uint8 block_id = XLogBlockDataGetBlockId(datadecode); - Size maindatalen; - char *maindata = XLogBlockDataGetMainData(datadecode, &maindatalen); - - if (block_id == HASH_VACUUM_PAGE_BLOCK_NUM) { - XLogRedoAction action; - action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - HashXlogVacuumOnePageOperatorPage(bufferinfo, (void *)maindata, maindatalen); - MakeRedoBufferDirty(bufferinfo); - } - } else { - XLogRedoAction action; - action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (action == BLK_NEEDS_REDO) { - HashXlogVacuumMateOperatorPage(bufferinfo, (void *)maindata); - MakeRedoBufferDirty(bufferinfo); - } - } -} - -void HashRedoDataBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, RedoBufferInfo *bufferinfo) -{ - uint8 info = XLogBlockHeadGetInfo(blockhead) & ~XLR_INFO_MASK; - - switch (info) { - case XLOG_HASH_INIT_META_PAGE: - HashXlogInitMetaPageBlock(blockhead, blockdatarec, bufferinfo); - break; - case XLOG_HASH_INIT_BITMAP_PAGE: - HashXlogInitBitmapPageBlock(blockhead, blockdatarec, bufferinfo); - break; - case XLOG_HASH_INSERT: - HashXlogInsertBlock(blockhead, blockdatarec, bufferinfo); - break; - case XLOG_HASH_ADD_OVFL_PAGE: - HashXlogAddOvflPageBlock(blockhead, blockdatarec, bufferinfo); - break; - case XLOG_HASH_SPLIT_ALLOCATE_PAGE: - HashXlogSplitAllocatePageBlock(blockhead, blockdatarec, bufferinfo); - break; - case XLOG_HASH_SPLIT_PAGE: - HashXlogSplitPageBlock(blockhead, blockdatarec, bufferinfo); - break; - case XLOG_HASH_SPLIT_COMPLETE: - HashXlogSplitCompleteBlock(blockhead, blockdatarec, bufferinfo); - break; - case XLOG_HASH_MOVE_PAGE_CONTENTS: - HashXlogMovePageContentsBlock(blockhead, blockdatarec, bufferinfo); - break; - case XLOG_HASH_SQUEEZE_PAGE: - HashXlogSqueezePageBlock(blockhead, blockdatarec, bufferinfo); - break; - case XLOG_HASH_DELETE: - HashXlogDeleteBlock(blockhead, blockdatarec, bufferinfo); - break; - case XLOG_HASH_SPLIT_CLEANUP: - HashXlogSplitCleanupBlock(blockhead, blockdatarec, bufferinfo); - break; - case XLOG_HASH_UPDATE_META_PAGE: - HashXlogUpdateMetaPageBlock(blockhead, blockdatarec, bufferinfo); - break; - case XLOG_HASH_VACUUM_ONE_PAGE: - HashXlogVacuumOnePageBlock(blockhead, blockdatarec, bufferinfo); - break; - default: - ereport(PANIC, (errmsg("hash_redo_block: unknown op code %u", info))); - } -} \ No newline at end of file diff --git a/src/gausskernel/storage/access/redo/redo_heapam.cpp b/src/gausskernel/storage/access/redo/redo_heapam.cpp index 70c2ee177..b76ed0ae9 100755 --- a/src/gausskernel/storage/access/redo/redo_heapam.cpp +++ b/src/gausskernel/storage/access/redo/redo_heapam.cpp @@ -263,7 +263,7 @@ void HeapXlogDeleteOperatorPage(RedoBufferInfo *buffer, void *recorddata, Transa htup = (HeapTupleHeader)PageGetItem(page, lp); - htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED); + htup->t_infomask &= ~HEAP_XMAX_BITS; htup->t_infomask2 &= ~(HEAP_XMAX_LOCK_ONLY | HEAP_KEYS_UPDATED); HeapTupleHeaderClearHotUpdated(htup); @@ -519,7 +519,7 @@ void HeapXlogUpdateOperatorOldpage(RedoBufferInfo *buffer, void *recoreddata, bo htup = (HeapTupleHeader)PageGetItem(page, lp); - htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED); + htup->t_infomask &= ~HEAP_XMAX_BITS; htup->t_infomask2 &= ~(HEAP_XMAX_LOCK_ONLY | HEAP_KEYS_UPDATED); if (hot_update) HeapTupleHeaderSetHotUpdated(htup); @@ -662,14 +662,6 @@ void HeapXlogUpdateOperatorNewpage(RedoBufferInfo *buffer, void *recorddata, boo PageSetLSN(page, buffer->lsn); } -void HeapXlogPageUpgradeOperatorPage(RedoBufferInfo *buffer) -{ - Page page = buffer->pageinfo.page; - - PageLocalUpgrade(page); - PageSetLSN(page, buffer->lsn); -} - void HeapXlogLockOperatorPage(RedoBufferInfo *buffer, void *recorddata, bool isTupleLockUpgrade) { xl_heap_lock *xlrec = (xl_heap_lock *)recorddata; @@ -687,7 +679,7 @@ void HeapXlogLockOperatorPage(RedoBufferInfo *buffer, void *recorddata, bool isT htup = (HeapTupleHeader)PageGetItem(page, lp); - htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED); + htup->t_infomask &= ~HEAP_XMAX_BITS; htup->t_infomask2 &= ~(HEAP_XMAX_LOCK_ONLY | HEAP_KEYS_UPDATED); if (isTupleLockUpgrade) { @@ -932,7 +924,7 @@ static XLogRecParseState *HeapXlogUpdateParseBlock(XLogReaderState *record, uint XLogRecSetAuxiBlkNumState(&blockstate->blockparse.extra_rec.blockdatarec, newblk, InvalidForkNumber); // OLD BLOCK - if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) { + if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) { (*blocknum)++; XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); if (blockstate == NULL) { @@ -1286,21 +1278,6 @@ static XLogRecParseState *HeapXlogLogicalNewPageParseBlock(XLogReaderState *reco return recordstatehead; } -static XLogRecParseState *HeapXlogPageUpgradePareseBlock(XLogReaderState *record, uint32 *blocknum) -{ - XLogRecParseState *recordstatehead = NULL; - - *blocknum = 1; - - XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); - if (recordstatehead == NULL) { - return NULL; - } - - XLogRecSetBlockDataState(record, HEAP_PAGE_UPDATE_ORIG_BLOCK_NUM, recordstatehead); - return recordstatehead; -} - XLogRecParseState *Heap2RedoParseIoBlock(XLogReaderState *record, uint32 *blocknum) { uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; @@ -1329,9 +1306,6 @@ XLogRecParseState *Heap2RedoParseIoBlock(XLogReaderState *record, uint32 *blockn case XLOG_HEAP2_LOGICAL_NEWPAGE: recordblockstate = HeapXlogLogicalNewPageParseBlock(record, blocknum); break; - case XLOG_HEAP2_PAGE_UPGRADE: - recordblockstate = HeapXlogPageUpgradePareseBlock(record, blocknum); - break; default: ereport(PANIC, (errmsg("Heap2RedoParseIoBlock: unknown op code %u", info))); } @@ -1617,18 +1591,6 @@ static void HeapXlogMultiInsertBlock(XLogBlockHead *blockhead, XLogBlockDataPars } } -void HeapXlogPageUpgradeBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, RedoBufferInfo *bufferinfo) -{ - XLogBlockDataParse *datadecode = blockdatarec; - XLogRedoAction action; - - action = XLogCheckBlockDataRedoAction(datadecode, bufferinfo); - if (BLK_NEEDS_REDO == action) { - HeapXlogPageUpgradeOperatorPage(bufferinfo); - MakeRedoBufferDirty(bufferinfo); - } -} - void Heap2RedoDataBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, RedoBufferInfo *bufferinfo) { uint8 info = XLogBlockHeadGetInfo(blockhead) & ~XLR_INFO_MASK; @@ -1646,9 +1608,6 @@ void Heap2RedoDataBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatar case XLOG_HEAP2_MULTI_INSERT: HeapXlogMultiInsertBlock(blockhead, blockdatarec, bufferinfo); break; - case XLOG_HEAP2_PAGE_UPGRADE: - HeapXlogPageUpgradeBlock(blockhead, blockdatarec, bufferinfo); - break; default: ereport(PANIC, (errmsg("heap2_redo_block: unknown op code %u", info))); } diff --git a/src/gausskernel/storage/access/redo/redo_nbtxlog.cpp b/src/gausskernel/storage/access/redo/redo_nbtxlog.cpp index c13637b18..4620b0aed 100644 --- a/src/gausskernel/storage/access/redo/redo_nbtxlog.cpp +++ b/src/gausskernel/storage/access/redo/redo_nbtxlog.cpp @@ -207,7 +207,7 @@ void BtreeXlogSplitOperatorLeftpage(RedoBufferInfo *lbuf, void *recorddata, Bloc /* assure that memory is properly allocated, prevent from core dump caused by buffer unpin */ START_CRIT_SECTION(); - newlpage = PageGetTempPageCopySpecial(lpage, true); + newlpage = PageGetTempPageCopySpecial(lpage); END_CRIT_SECTION(); /* Set high key */ diff --git a/src/gausskernel/storage/access/redo/redo_segpage.cpp b/src/gausskernel/storage/access/redo/redo_segpage.cpp index d6ba7ac98..b898fcb3a 100644 --- a/src/gausskernel/storage/access/redo/redo_segpage.cpp +++ b/src/gausskernel/storage/access/redo/redo_segpage.cpp @@ -28,6 +28,8 @@ #include "access/xlogproc.h" #include "access/redo_common.h" +#include "access/double_write.h" +#include "commands/tablespace.h" #include "catalog/storage_xlog.h" #include "storage/smgr/fd.h" @@ -49,7 +51,7 @@ static XLogRecParseState *segpage_redo_parse_seg_truncate_to_block(XLogReaderSta XLogRecSetBlockCommonState(record, BLOCK_DATA_DDL_TYPE, filenode, recordstatehead); XLogRecSetBlockDdlState(&(recordstatehead->blockparse.extra_rec.blocksegddlrec.blockddlrec), - BLOCK_DDL_TRUNCATE_RELNODE, false, NULL); + BLOCK_DDL_TRUNCATE_RELNODE, NULL); XLogRecSetBlockDataStateContent(record, 0, &(recordstatehead->blockparse.extra_rec.blocksegddlrec.blockdatarec)); @@ -93,20 +95,463 @@ static XLogRecParseState *segpage_redo_parse_space_shrink(XLogReaderState *recor return recordstatehead; } +static XLogRecParseState *segpage_parse_segment_extend_page(XLogReaderState *record, uint32 *blocknum) +{ + XLogRecParseState *recordstatehead = NULL; + + (*blocknum)++; + XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); + + XLogRecSetBlockDataState(record, 0, recordstatehead); + + XLogRecParseState *blockstate = NULL; + (*blocknum)++; + XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); + + XLogRecSetBlockDataState(record, 1, blockstate); + + return recordstatehead; +} + +static XLogRecParseState *segpage_parse_atomic_xlog_page(XLogReaderState *record, uint32 *blocknum) +{ + int nbuffers = *(int *)XLogRecGetData(record); + Assert((nbuffers-1) == record->max_block_id); + XLogRecParseState *recordstatehead = NULL; + + for (int i = 0; i < nbuffers; i++) { + XLogRecParseState *blockstate = NULL; + if (recordstatehead == NULL) { + (*blocknum)++; + XLogParseBufferAllocListFunc(record, &blockstate, NULL); + recordstatehead = blockstate; + } else { + (*blocknum)++; + XLogParseBufferAllocListFunc(record, &blockstate, recordstatehead); + } + + XLogRecSetBlockDataState(record, i, blockstate); + } + return recordstatehead; +} + +typedef XLogRecParseState *(child_xlog_page_parse_func)(XLogReaderState *record, uint32 *blocknum); +static XLogRecParseState *segpage_redo_parse_child_xlog(XLogReaderState *record, uint32 *blocknum, + child_xlog_page_parse_func parse_fun, XLogBlockParseEnum parsetype) +{ + XLogRecParseState *recordstatehead = NULL; + + (*blocknum)++; + XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); + + XLogRecParseState *childState = parse_fun(record, blocknum); + RelFileNodeForkNum filenode = RelFileNodeForkNumFill(NULL, InvalidBackendId, InvalidForkNumber, InvalidBlockNumber); + XLogRecSetBlockCommonState(record, parsetype, filenode, recordstatehead); + XLogRecSetSegFullSyncState(&recordstatehead->blockparse.extra_rec.blocksegfullsyncrec, childState); + + return recordstatehead; +} + +static XLogRecParseState *segpage_redo_parse_extent_group(XLogReaderState *record, uint32 *blocknum) +{ + char *data = XLogRecGetData(record); + RelFileNode *rnode = (RelFileNode *)data; + ForkNumber forknum = *(ForkNumber *)(data + sizeof(RelFileNode)); + + XLogRecParseState *recordstatehead = NULL; + + (*blocknum)++; + XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); + + RelFileNodeForkNum filenode = + RelFileNodeForkNumFill(rnode, InvalidBackendId, forknum, InvalidBlockNumber); + XLogRecSetBlockCommonState(record, BLOCK_DATA_SEG_FULL_SYNC_TYPE, filenode, recordstatehead); + + recordstatehead->isFullSync = record->isFullSync; + return recordstatehead; +} + +static XLogRecParseState *segpage_parse_init_map_page(XLogReaderState *record, uint32 *blocknum) +{ + XLogRecParseState *recordstatehead = NULL; + + (*blocknum)++; + XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); + + XLogRecSetBlockDataState(record, 0, recordstatehead); + + recordstatehead->isFullSync = record->isFullSync; + return recordstatehead; +} + +static XLogRecParseState *segpage_parse_init_inverse_page(XLogReaderState *record, uint32 *blocknum) +{ + XLogRecParseState *recordstatehead = NULL; + + (*blocknum)++; + XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); + + XLogRecSetBlockDataState(record, 0, recordstatehead); + + recordstatehead->isFullSync = record->isFullSync; + return recordstatehead; +} + +static XLogRecParseState *segpage_parse_add_new_group(XLogReaderState *record, uint32 *blocknum) +{ + XLogRecParseState *recordstatehead = NULL; + + (*blocknum)++; + XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); + + XLogRecSetBlockDataState(record, 0, recordstatehead); + + recordstatehead->isFullSync = record->isFullSync; + return recordstatehead; +} + +static XLogRecParseState *segpage_parse_new_page(XLogReaderState *record, uint32 *blocknum) +{ + XLogRecParseState *recordstatehead = NULL; + + (*blocknum)++; + XLogParseBufferAllocListFunc(record, &recordstatehead, NULL); + + RelFileNodeForkNum filenode = + RelFileNodeForkNumFill(NULL, InvalidBackendId, InvalidForkNumber, InvalidBlockNumber); + XLogRecSetBlockCommonState(record, BLOCK_DATA_SEG_FULL_SYNC_TYPE, filenode, recordstatehead); + XLogRecSetSegNewPageInfo(&recordstatehead->blockparse.extra_rec.blocksegnewpageinfo, XLogRecGetData(record), + XLogRecGetDataLen(record)); + recordstatehead->isFullSync = record->isFullSync; + return recordstatehead; +} + XLogRecParseState *segpage_redo_parse_to_block(XLogReaderState *record, uint32 *blocknum) { uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; XLogRecParseState *recordstatehead = NULL; *blocknum = 0; - if (info == XLOG_SEG_TRUNCATE) { - recordstatehead = segpage_redo_parse_seg_truncate_to_block(record, blocknum); - } else if (info == XLOG_SEG_SPACE_DROP) { - recordstatehead = segpage_redo_parse_space_drop(record, blocknum); - } else if (info == XLOG_SEG_SPACE_SHRINK) { - recordstatehead = segpage_redo_parse_space_shrink(record, blocknum); - } else { - ereport(PANIC, (errmsg("segpage_redo_parse_to_block: unknown op code %u", info))); + switch (info) { + case XLOG_SEG_TRUNCATE: + recordstatehead = segpage_redo_parse_seg_truncate_to_block(record, blocknum); + break; + case XLOG_SEG_SPACE_DROP: + recordstatehead = segpage_redo_parse_space_drop(record, blocknum); + break; + case XLOG_SEG_SPACE_SHRINK: + recordstatehead = segpage_redo_parse_space_shrink(record, blocknum); + break; + case XLOG_SEG_SEGMENT_EXTEND: + recordstatehead = segpage_redo_parse_child_xlog(record, blocknum, segpage_parse_segment_extend_page, + BLOCK_DATA_SEG_EXTEND); + break; + case XLOG_SEG_ATOMIC_OPERATION: + recordstatehead = segpage_redo_parse_child_xlog(record, blocknum, segpage_parse_atomic_xlog_page, + BLOCK_DATA_SEG_FULL_SYNC_TYPE); + break; + case XLOG_SEG_CREATE_EXTENT_GROUP: + recordstatehead = segpage_redo_parse_extent_group(record, blocknum); + break; + case XLOG_SEG_INIT_INVRSPTR_PAGE: + recordstatehead = segpage_redo_parse_child_xlog(record, blocknum, segpage_parse_init_inverse_page, + BLOCK_DATA_SEG_FULL_SYNC_TYPE); + break; + case XLOG_SEG_INIT_MAPPAGE: + recordstatehead = segpage_redo_parse_child_xlog(record, blocknum, segpage_parse_init_map_page, + BLOCK_DATA_SEG_FULL_SYNC_TYPE); + break; + case XLOG_SEG_ADD_NEW_GROUP: + recordstatehead = segpage_redo_parse_child_xlog(record, blocknum, segpage_parse_add_new_group, + BLOCK_DATA_SEG_FULL_SYNC_TYPE); + break; + case XLOG_SEG_NEW_PAGE: + recordstatehead = segpage_parse_new_page(record, blocknum); + break; + default: + ereport(PANIC, (errmsg("segpage_redo_parse_to_block: unknown op code %u", info))); + break; } return recordstatehead; } + +void ProcRemainAtomicOperation(Buffer buf, DecodedXLogBlockOp *decoded_op, TransactionId xid) +{ + for (int j = 0; j < decoded_op->operations; j++) { + redo_xlog_deal_alloc_seg(decoded_op->op[j], buf, decoded_op->data[j], decoded_op->data_len[j], xid); + if (decoded_op->op[j] == SPCXLOG_SHRINK_SEGHEAD_UPDATE) { + XLogMoveExtent *move_extent_xlog = (XLogMoveExtent *)(decoded_op->data[j]); + move_extent_flush_buffer(move_extent_xlog); + } + } +} + +static void SegPageRedoAtomicOperationBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, + RedoBufferInfo *bufferinfo) +{ + XLogRedoAction action = XLogCheckBlockDataRedoAction(blockdatarec, bufferinfo); + if (action == BLK_NEEDS_REDO) { + Size blkdatalen; + char *blkdata = XLogBlockDataGetBlockData(blockdatarec, &blkdatalen); + SegmentCheck(blkdatalen != 0); + DecodedXLogBlockOp decoded_op = XLogAtomicDecodeBlockData(blkdata, blkdatalen); + + for (int j = 0; j < decoded_op.operations; j++) { + redo_atomic_xlog_dispatch(decoded_op.op[j], bufferinfo, decoded_op.data[j]); + } + + PageSetLSN(bufferinfo->pageinfo.page, bufferinfo->lsn); + MakeRedoBufferDirty(bufferinfo); + + bool is_need_log_remain_segs = IsNeedLogRemainSegs(blockhead->end_ptr); + if (is_need_log_remain_segs) { + ProcRemainAtomicOperation(bufferinfo->buf, &decoded_op, blockhead->xl_xid); + } + } +} + +static void SegPageRedoSegmentExtend(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, + RedoBufferInfo *bufferinfo) +{ + XLogRedoAction action = XLogCheckBlockDataRedoAction(blockdatarec, bufferinfo); + if (action == BLK_NEEDS_REDO) { + if (XLogBlockDataGetBlockId(blockdatarec) == 0) { + Size blkdatalen; + char *blkdata = XLogBlockDataGetBlockData(blockdatarec, &blkdatalen); + XLogDataSegmentExtend *xlog_data = (XLogDataSegmentExtend *)blkdata; + SegmentHead *seghead = (SegmentHead *)PageGetContents(bufferinfo->pageinfo.page); + if (seghead->nblocks != xlog_data->old_nblocks) { + ereport(PANIC, (errmsg("extreme rto:data inconsistent when redo seghead_extend, nblocks is %u on disk," + " but should be %u according to xlog", + seghead->nblocks, xlog_data->old_nblocks))); + } + seghead->nblocks = xlog_data->new_nblocks; + PageSetLSN(bufferinfo->pageinfo.page, bufferinfo->lsn); + } else if (XLogBlockDataGetBlockId(blockdatarec) == 1) { + memset_s(bufferinfo->pageinfo.page, BLCKSZ, 0, BLCKSZ); + PageSetLSN(bufferinfo->pageinfo.page, bufferinfo->lsn); + } else { + ereport(PANIC, (errmsg("SegPageRedoSegmentExtend block id error"))); + } + + MakeRedoBufferDirty(bufferinfo); + } +} + + +static void SegPageRedoInitMapPage(XLogBlockDataParse *blockdatarec, RedoBufferInfo *bufferinfo) +{ + BlockNumber first_page = *(BlockNumber *)XLogBlockDataGetMainData(blockdatarec, NULL); + eg_init_bitmap_page_content(bufferinfo->pageinfo.page, first_page); + PageSetLSN(bufferinfo->pageinfo.page, bufferinfo->lsn); + MakeRedoBufferDirty(bufferinfo); +} + +static void SegPageRedoInitInversePointPage(RedoBufferInfo *bufferinfo) +{ + SegPageInit(bufferinfo->pageinfo.page, BLCKSZ); + PageSetLSN(bufferinfo->pageinfo.page, bufferinfo->lsn); + MakeRedoBufferDirty(bufferinfo); +} + +static void SegPageRedoAddNewGroup(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, + RedoBufferInfo *bufferinfo) +{ + XLogRedoAction action = XLogCheckBlockDataRedoAction(blockdatarec, bufferinfo); + if (action == BLK_NEEDS_REDO) { + xl_new_map_group_info_t *new_group = (xl_new_map_group_info_t *)XLogBlockDataGetMainData(blockdatarec, NULL); + df_map_head_t *map_head = (df_map_head_t *)PageGetContents(bufferinfo->pageinfo.page); + map_head->group_count = new_group->group_count; + SegmentCheck(map_head->group_count > 0); + + df_map_group_t *map_group = &map_head->groups[map_head->group_count - 1]; + map_group->first_map = new_group->first_map_pageno; + map_group->page_count = new_group->group_size; + map_group->free_page = 0; + PageSetLSN(bufferinfo->pageinfo.page, bufferinfo->lsn); + + MakeRedoBufferDirty(bufferinfo); + } +} + + +void SegPageRedoDataBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, RedoBufferInfo *bufferinfo) +{ + uint8 info = XLogBlockHeadGetInfo(blockhead) & ~XLR_INFO_MASK; + + switch (info) { + case XLOG_SEG_ATOMIC_OPERATION: + SegPageRedoAtomicOperationBlock(blockhead, blockdatarec, bufferinfo); + break; + case XLOG_SEG_SEGMENT_EXTEND: + SegPageRedoSegmentExtend(blockhead, blockdatarec, bufferinfo); + break; + case XLOG_SEG_INIT_MAPPAGE: + SegPageRedoInitMapPage(blockdatarec, bufferinfo); + break; + case XLOG_SEG_INIT_INVRSPTR_PAGE: + SegPageRedoInitInversePointPage(bufferinfo); + break; + case XLOG_SEG_ADD_NEW_GROUP: + SegPageRedoAddNewGroup(blockhead, blockdatarec, bufferinfo); + break; + default: + ereport(PANIC, (errmsg("SegPageRedoDataBlock block id error"))); + break; + } +} + +void SegPageRedoExtendGroup(XLogBlockHead *blockHead) +{ + /* Create tablespace directory on the standby */ + TablespaceCreateDbspace(blockHead->spcNode, blockHead->dbNode, true); + /* Create SegSpace object in memory */ + SegSpace *spc = spc_init_space_node(blockHead->spcNode, blockHead->dbNode); + eg_init_data_files(&spc->extent_group[EXTENT_TYPE_TO_GROUPID(blockHead->relNode)][blockHead->forknum], true, + blockHead->end_ptr); +} + +void SegPageRedoSpaceShrink(XLogBlockHead *blockhead) +{ + SegSpace *spc = spc_open(blockhead->spcNode, blockhead->dbNode, false); + if (spc_status(spc) == SpaceDataFileStatus::EMPTY) { + ereport(LOG, (errmsg("extreme rto redo space shrink, target space <%u, %u, %u> does not exist", + blockhead->spcNode, blockhead->dbNode, blockhead->relNode))); + return; + } + SegExtentGroup *seg = &spc->extent_group[EXTENT_TYPE_TO_GROUPID(blockhead->relNode)][blockhead->forknum]; + RelFileNode rnode; + rnode.spcNode = blockhead->spcNode; + rnode.dbNode = blockhead->dbNode; + rnode.relNode = blockhead->relNode; + rnode.bucketNode = blockhead->bucketNode; + char *path = relpathperm(rnode, blockhead->forknum); + ereport(LOG, (errmsg("call space shrink files, filename: %s, xlog lsn: %lX", path, blockhead->end_ptr))); + pfree(path); + spc_shrink_files(seg, blockhead->blkno, true); + + /* forget metadata buffer that uses physical block number */ + XLogTruncateRelation(seg->rnode, seg->forknum, blockhead->blkno); + /* forget data buffer that uses logical block number */ + XLogTruncateSegmentSpace(seg->rnode, seg->forknum, blockhead->blkno); +} + +void SegPageRedoSpaceDrop(XLogBlockHead *blockhead) +{ + Assert(blockhead != NULL); + spc_drop(blockhead->spcNode, blockhead->dbNode, true); + XLogDropSegmentSpace(blockhead->spcNode, blockhead->dbNode); +} + +void SegPageRedoNewPage(XLogBlockHead *blockhead, XLogBlockSegNewPage *newPageInfo) +{ + Assert(newPageInfo->dataLen != 0); + BufferTag *tag = (BufferTag *)newPageInfo->mainData; + + seg_redo_new_page_copy_and_flush(tag, newPageInfo->mainData + sizeof(BufferTag), blockhead->end_ptr); +} + +void MarkSegPageRedoChildPageDirty(RedoBufferInfo *bufferinfo) +{ + BufferDesc *bufDesc = GetBufferDescriptor(bufferinfo->buf - 1); + if (bufferinfo->dirtyflag || XLByteLT(bufDesc->lsn_on_disk, PageGetLSN(bufferinfo->pageinfo.page))) { + if (IsSegmentPhysicalRelNode(bufferinfo->blockinfo.rnode)) { + SegMarkBufferDirty(bufferinfo->buf); + } else { + MarkBufferDirty(bufferinfo->buf); + } + if (!bufferinfo->dirtyflag && bufferinfo->blockinfo.forknum == MAIN_FORKNUM) { + int mode = WARNING; +#ifdef USE_ASSERT_CHECKING + mode = PANIC; +#endif + const uint32 shiftSz = 32; + ereport(mode, (errmsg("extreme_rto segment page not mark dirty:lsn %X/%X, lsn_disk %X/%X, \ + lsn_page %X/%X, page %u/%u/%u %u", + (uint32)(bufferinfo->lsn >> shiftSz), (uint32)(bufferinfo->lsn), + (uint32)(bufDesc->lsn_on_disk >> shiftSz), (uint32)(bufDesc->lsn_on_disk), + (uint32)(PageGetLSN(bufferinfo->pageinfo.page) >> shiftSz), + (uint32)(PageGetLSN(bufferinfo->pageinfo.page)), + bufferinfo->blockinfo.rnode.spcNode, bufferinfo->blockinfo.rnode.dbNode, + bufferinfo->blockinfo.rnode.relNode, bufferinfo->blockinfo.blkno))); + } +#ifdef USE_ASSERT_CHECKING + bufDesc->lsn_dirty = PageGetLSN(bufferinfo->pageinfo.page); +#endif + } + if (IsSegmentPhysicalRelNode(bufferinfo->blockinfo.rnode)) { + SegUnlockReleaseBuffer(bufferinfo->buf); /* release buffer */ + } else { + UnlockReleaseBuffer(bufferinfo->buf); + } +} + +void SegPageRedoChildState(XLogRecParseState *childStateList) +{ + XLogRecParseState *procState = childStateList; + RedoTimeCost timeCost1; + RedoTimeCost timeCost2; + + while (procState != NULL) { + XLogRecParseState *redoblockstate = procState; + procState = (XLogRecParseState *)procState->nextrecord; + RedoBufferInfo bufferinfo = {0}; + (void)XLogBlockRedoForExtremeRTO(redoblockstate, &bufferinfo, false, timeCost1, timeCost2); + if (bufferinfo.pageinfo.page != NULL) { + MarkSegPageRedoChildPageDirty(&bufferinfo); + } + } + + XLogBlockParseStateRelease(childStateList); +} + +void ProcSegPageJustFreeChildState(XLogRecParseState *parseState) +{ + Assert(XLogBlockHeadGetRmid(&parseState->blockparse.blockhead) == RM_SEGPAGE_ID); + uint8 info = XLogBlockHeadGetInfo(&parseState->blockparse.blockhead) & ~XLR_INFO_MASK; + if ((info == XLOG_SEG_ATOMIC_OPERATION) || (info == XLOG_SEG_SEGMENT_EXTEND) || + (info == XLOG_SEG_INIT_MAPPAGE) || (info == XLOG_SEG_INIT_INVRSPTR_PAGE) || + (info == XLOG_SEG_ADD_NEW_GROUP)) { + XLogRecParseState *child = + (XLogRecParseState *)parseState->blockparse.extra_rec.blocksegfullsyncrec.childState; + XLogBlockParseStateRelease(child); + parseState->blockparse.extra_rec.blocksegfullsyncrec.childState = NULL; + } +} + +void ProcSegPageCommonRedo(XLogRecParseState *parseState) +{ + Assert(XLogBlockHeadGetRmid(&parseState->blockparse.blockhead) == RM_SEGPAGE_ID); + uint8 info = XLogBlockHeadGetInfo(&parseState->blockparse.blockhead) & ~XLR_INFO_MASK; + switch (info) { + // has child list + case XLOG_SEG_ATOMIC_OPERATION: + case XLOG_SEG_SEGMENT_EXTEND: + case XLOG_SEG_INIT_MAPPAGE: + case XLOG_SEG_INIT_INVRSPTR_PAGE: + case XLOG_SEG_ADD_NEW_GROUP: + { + XLogRecParseState *child = + (XLogRecParseState *)parseState->blockparse.extra_rec.blocksegfullsyncrec.childState; + SegPageRedoChildState(child); + break; + } + case XLOG_SEG_CREATE_EXTENT_GROUP: + SegPageRedoExtendGroup(&parseState->blockparse.blockhead); + break; + case XLOG_SEG_SPACE_SHRINK: + SegPageRedoSpaceShrink(&parseState->blockparse.blockhead); + break; + case XLOG_SEG_NEW_PAGE: + SegPageRedoNewPage(&parseState->blockparse.blockhead, + &parseState->blockparse.extra_rec.blocksegnewpageinfo); + break; + case XLOG_SEG_SPACE_DROP: + SegPageRedoSpaceDrop(&parseState->blockparse.blockhead); + break; + default: + ereport(PANIC, (errmsg("ProcSegPageCommonRedo: unknown op code %u", info))); + break; + } +} + diff --git a/src/gausskernel/storage/access/redo/redo_storage.cpp b/src/gausskernel/storage/access/redo/redo_storage.cpp index 7f6c0da00..09e000519 100644 --- a/src/gausskernel/storage/access/redo/redo_storage.cpp +++ b/src/gausskernel/storage/access/redo/redo_storage.cpp @@ -62,7 +62,7 @@ XLogRecParseState *smgr_xlog_relnode_parse_to_block(XLogReaderState *record, uin RelFileNodeForkNum filenode = RelFileNodeForkNumFill(&tmp_node, InvalidBackendId, forknum, blkno); XLogRecSetBlockCommonState(record, BLOCK_DATA_DDL_TYPE, filenode, recordstatehead); - XLogRecSetBlockDdlState(&(recordstatehead->blockparse.extra_rec.blockddlrec), ddltype, colmrel, + XLogRecSetBlockDdlState(&(recordstatehead->blockparse.extra_rec.blockddlrec), ddltype, (char *)XLogRecGetData(record)); return recordstatehead; } diff --git a/src/gausskernel/storage/access/redo/redo_ubtxlog.cpp b/src/gausskernel/storage/access/redo/redo_ubtxlog.cpp index c04bdfc7e..9a62c38cd 100644 --- a/src/gausskernel/storage/access/redo/redo_ubtxlog.cpp +++ b/src/gausskernel/storage/access/redo/redo_ubtxlog.cpp @@ -223,7 +223,7 @@ void UBTreeXlogSplitOperatorLeftpage(RedoBufferInfo *lbuf, void *recorddata, Blo /* assure that memory is properly allocated, prevent from core dump caused by buffer unpin */ START_CRIT_SECTION(); - newlpage = PageGetTempPageCopySpecial(lpage, true); + newlpage = PageGetTempPageCopySpecial(lpage); END_CRIT_SECTION(); /* Set high key */ diff --git a/src/gausskernel/storage/access/redo/redo_xact.cpp b/src/gausskernel/storage/access/redo/redo_xact.cpp index 4a78b166b..4808ba9ba 100644 --- a/src/gausskernel/storage/access/redo/redo_xact.cpp +++ b/src/gausskernel/storage/access/redo/redo_xact.cpp @@ -73,94 +73,6 @@ #include "utils/timestamp.h" #include "access/redo_common.h" -static bool IsDroppedBucketList(ColFileNodeRel *xnodes, int nrels) -{ - RelFileNode preRnode = {0}; - - for (int i = 0; i < nrels; i++) { - ColFileNodeRel *colFileNodeRel = xnodes + i; - ColFileNode node; - ColFileNodeCopy(&node, colFileNodeRel); - if (IsValidColForkNum(node.forknum)) { - return false; - } - - Assert(node.filenode.relNode != InvalidOid); - if (!RelFileNodeRelEquals(node.filenode, preRnode)) { - preRnode = node.filenode; - } else { - ereport(DEBUG5, (errmodule(MOD_SEGMENT_PAGE), (errmsg("[XYQ] %dth founded to be same rnode," - "preRelNode [%u, %u, %u, %d], cur rnode [%u, %u, %u, %d] %d.", i, preRnode.spcNode, - preRnode.dbNode, preRnode.relNode, preRnode.bucketNode, node.filenode.spcNode, - node.filenode.dbNode, node.filenode.relNode, node.filenode.bucketNode, node.forknum)))); - return true; - } - } - - return false; -} - -XLogRecParseState *xact_redo_rmbktlist_parse_to_block(XLogReaderState *record, XLogRecParseState *recordstatehead, - uint32 *blocknum, ColFileNodeRel *xnodes, int nrels) -{ - uint32 bitmap[BktBitMaxMapCnt] = { 0 }; - RelFileNode preRnode = { 0 }; - - bool firstLoop = true; - bool newLoop = false; - XLogRecParseState *blockstate = NULL; - - for (int i = 0; i < nrels; ++i) { - ColFileNodeRel *colFileNodeRel = xnodes + i; - ColFileNode node; - ColFileNodeCopy(&node, colFileNodeRel); - Assert(!IsValidColForkNum(node.forknum)); - - if (IsSegmentFileNode(node.filenode)) { - SMgrRelation reln = smgropen(node.filenode, InvalidBackendId); - smgrclose(reln); - } - - if (!RelFileNodeRelEquals(node.filenode, preRnode)) { - newLoop = true; - if (firstLoop && preRnode.spcNode != InvalidOid) { - firstLoop = false; - } - preRnode = node.filenode; - } - - if (firstLoop && IsBucketFileNode(node.filenode)) { - SET_BKT_MAP_BIT(bitmap, node.filenode.bucketNode); - } else if (IsBucketFileNode(node.filenode)) { - SegmentCheck(GET_BKT_MAP_BIT(bitmap, node.filenode.bucketNode)); - } - - if (!newLoop) { - continue; - } - - newLoop = false; - (*blocknum)++; - XLogParseBufferAllocListStateFunc(record, &blockstate, &recordstatehead); - if (blockstate == NULL) { - return NULL; - } - - RelFileNodeForkNum filenode = RelFileNodeForkNumFill(&(node.filenode), InvalidBackendId, MAIN_FORKNUM, - InvalidBlockNumber); - XLogRecSetBlockCommonState(record, BLOCK_DATA_DDL_TYPE, filenode, blockstate); - - uint32 *bucketList = (uint32 *)palloc(sizeof(bitmap)); - errno_t err = memcpy_s(bucketList, sizeof(bitmap), bitmap, sizeof(bitmap)); - securec_check(err, "", ""); - - XLogRecSetBlockDdlState(&(blockstate->blockparse.extra_rec.blockddlrec), BLOCK_DDL_DROP_BKTLIST, - false, (char *)bucketList, node.ownerid); - } - - return recordstatehead; -} - /* * *************Add for batchredo begin*************** * for these func ,they don't hold the lock and parse the record @@ -174,61 +86,15 @@ XLogRecParseState *xact_redo_rmddl_parse_to_block(XLogReaderState *record, XLogR XLogRecParseState *blockstate = NULL; Assert(nrels > 0); - - for (int i = 0; i < nrels; ++i) { - ColFileNodeRel *colFileNode = xnodes + i; - ColFileNode node; - ColFileNodeCopy(&node, colFileNode); - if (!IsValidColForkNum(node.forknum)) { - if (SUPPORT_DFS_BATCH && ((IS_PGXC_COORDINATOR && IsValidPaxDfsForkNum(node.forknum)) || - (!IS_PGXC_COORDINATOR && IsTruncateDfsForkNum(node.forknum)))) { - (*blocknum)++; - XLogParseBufferAllocListStateFunc(record, &blockstate, &recordstatehead); - if (blockstate == NULL) { - return NULL; - } - ForkNumber fork = (ForkNumber)node.forknum; - RelFileNodeForkNum filenode = - RelFileNodeForkNumFill(&node.filenode, InvalidBackendId, fork, InvalidBlockNumber); - XLogRecSetBlockCommonState(record, BLOCK_DATA_DDL_TYPE, filenode, blockstate); - XLogRecSetBlockDdlState(&(blockstate->blockparse.extra_rec.blockddlrec), BLOCK_DDL_DROP_RELNODE, false, - NULL, node.ownerid); - } else { - (*blocknum)++; - XLogParseBufferAllocListStateFunc(record, &blockstate, &recordstatehead); - if (blockstate == NULL) { - return NULL; - } - RelFileNodeForkNum filenode = - RelFileNodeForkNumFill(&node.filenode, InvalidBackendId, MAIN_FORKNUM, InvalidBlockNumber); - XLogRecSetBlockCommonState(record, BLOCK_DATA_DDL_TYPE, filenode, blockstate); - XLogRecSetBlockDdlState(&(blockstate->blockparse.extra_rec.blockddlrec), BLOCK_DDL_DROP_RELNODE, false, - NULL, node.ownerid); - - if (IsSegmentFileNode(node.filenode)) { - SMgrRelation reln = smgropen(node.filenode, InvalidBackendId); - smgrclose(reln); - } - } - } else { - if (SUPPORT_COLUMN_BATCH) { - (*blocknum)++; - XLogParseBufferAllocListStateFunc(record, &blockstate, &recordstatehead); - if (blockstate == NULL) { - return NULL; - } - - Assert(IsValidColForkNum(node.forknum)); - int fork = (ForkNumber)node.forknum; - - RelFileNodeForkNum filenode = - RelFileNodeForkNumFill(&node.filenode, InvalidBackendId, fork, InvalidBlockNumber); - XLogRecSetBlockCommonState(record, BLOCK_DATA_DDL_TYPE, filenode, blockstate); - XLogRecSetBlockDdlState(&(blockstate->blockparse.extra_rec.blockddlrec), BLOCK_DDL_DROP_RELNODE, true, - NULL, node.ownerid); - } - } + (*blocknum)++; + XLogParseBufferAllocListStateFunc(record, &blockstate, &recordstatehead); + if (blockstate == NULL) { + return NULL; } + RelFileNodeForkNum filenode = RelFileNodeForkNumFill(NULL, InvalidBackendId, MAIN_FORKNUM, InvalidBlockNumber); + XLogRecSetBlockCommonState(record, BLOCK_DATA_DDL_TYPE, filenode, blockstate); + XLogRecSetBlockDdlState(&(blockstate->blockparse.extra_rec.blockddlrec), BLOCK_DDL_DROP_RELNODE, (char *)xnodes, + nrels); return recordstatehead; } @@ -491,12 +357,7 @@ XLogRecParseState *xact_xlog_commit_parse_to_block(XLogReaderState *record, XLog XLogRecSetXactDdlState(&blockstate->blockparse.extra_rec.blockxact, nrels, (void *)xnodes, invalidmsgnum, (void *)inval_msgs, nlibrary, (void *)libfilename); if (nrels > 0) { - bool isBucketList = IsDroppedBucketList(xnodes, nrels); - if (isBucketList) { - recordstatehead = xact_redo_rmbktlist_parse_to_block(record, recordstatehead, blocknum, xnodes, nrels); - } else { - recordstatehead = xact_redo_rmddl_parse_to_block(record, recordstatehead, blocknum, xnodes, nrels); - } + recordstatehead = xact_redo_rmddl_parse_to_block(record, recordstatehead, blocknum, xnodes, nrels); } return recordstatehead; } @@ -555,12 +416,7 @@ XLogRecParseState *xact_xlog_abort_parse_to_block(XLogReaderState *record, XLogR XLogRecSetXactDdlState(&blockstate->blockparse.extra_rec.blockxact, nrels, (void *)xnodes, 0, NULL, nlibrary, (void *)libfilename); if (nrels > 0) { - bool isBucketList = IsDroppedBucketList(xnodes, nrels); - if (isBucketList) { - recordstatehead = xact_redo_rmbktlist_parse_to_block(record, recordstatehead, blocknum, xnodes, nrels); - } else { - recordstatehead = xact_redo_rmddl_parse_to_block(record, recordstatehead, blocknum, xnodes, nrels); - } + recordstatehead = xact_redo_rmddl_parse_to_block(record, recordstatehead, blocknum, xnodes, nrels); } return recordstatehead; } diff --git a/src/gausskernel/storage/access/redo/redo_xlogutils.cpp b/src/gausskernel/storage/access/redo/redo_xlogutils.cpp index 8cbd493c5..ffbf77f4b 100644 --- a/src/gausskernel/storage/access/redo/redo_xlogutils.cpp +++ b/src/gausskernel/storage/access/redo/redo_xlogutils.cpp @@ -52,6 +52,7 @@ #include "commands/dbcommands.h" #include "access/twophase.h" #include "access/redo_common.h" +#include "access/extreme_rto/page_redo.h" THR_LOCAL RedoParseManager *g_parseManager = NULL; THR_LOCAL RedoBufferManager *g_bufferManager = NULL; @@ -59,6 +60,7 @@ THR_LOCAL RedoBufferManager *g_bufferManager = NULL; #ifdef BUILD_ALONE THR_LOCAL bool assert_enabled = true; #endif +const int XLOG_LSN_SWAP = 32; static const ReadBufferMethod G_BUFFERREADMETHOD = WITH_NORMAL_CACHE; @@ -86,7 +88,8 @@ static FORCE_INLINE bool XLogLsnCheckLogInvalidPage(const RedoBufferInfo *buffer return false; } -bool DoLsnCheck(const RedoBufferInfo *bufferinfo, bool willInit, XLogRecPtr lastLsn, const XLogPhyBlock *pblk) +bool DoLsnCheck(const RedoBufferInfo *bufferinfo, bool willInit, XLogRecPtr lastLsn, const XLogPhyBlock *pblk, + bool *needRepair) { XLogRecPtr lsn = bufferinfo->lsn; Page page = (Page)bufferinfo->pageinfo.page; @@ -128,12 +131,20 @@ bool DoLsnCheck(const RedoBufferInfo *bufferinfo, bool willInit, XLogRecPtr last } else if (pageCurLsn == InvalidXLogRecPtr && PageIsEmpty(page) && PageUpperIsInitNew(page)) { return XLogLsnCheckLogInvalidPage(bufferinfo, LSN_CHECK_ERROR, pblk); } else { - ereport(PANIC, (errmsg("lsn check error, lsn in record (%X/%X) ,lsn in current page %X/%X, " - "page info:%u/%u/%u forknum %d blknum:%u lsn %X/%X", - (uint32)(lastLsn >> 32), (uint32)(lastLsn), (uint32)(pageCurLsn >> 32), - (uint32)(pageCurLsn), blockinfo->rnode.spcNode, blockinfo->rnode.dbNode, - blockinfo->rnode.relNode, blockinfo->forknum, blockinfo->blkno, (uint32)(lsn >> 32), - (uint32)(lsn)))); + int elevel = PANIC; + if (CheckVerionSupportRepair() && IsPrimaryClusterStandbyDN() && g_instance.repair_cxt.support_repair) { + elevel = WARNING; + *needRepair = true; + XLogLsnCheckLogInvalidPage(bufferinfo, LSN_CHECK_ERROR, pblk); + } + ereport(elevel, + (errmsg("lsn check error, record last lsn (%X/%X) ,lsn in current page %X/%X, " + "page info:%u/%u/%u forknum %d blknum:%u lsn %X/%X", + (uint32)(lastLsn >> XLOG_LSN_SWAP), (uint32)(lastLsn), (uint32)(pageCurLsn >> XLOG_LSN_SWAP), + (uint32)(pageCurLsn), blockinfo->rnode.spcNode, blockinfo->rnode.dbNode, + blockinfo->rnode.relNode, blockinfo->forknum, blockinfo->blkno, (uint32)(lsn >> XLOG_LSN_SWAP), + (uint32)(lsn)))); + return false; } } return true; @@ -185,9 +196,6 @@ bool XLogBlockRefreshRedoBufferInfo(XLogBlockHead *blockhead, RedoBufferInfo *bu if (bufferinfo->blockinfo.rnode.relNode != XLogBlockHeadGetRelNode(blockhead)) { return false; } - if (bufferinfo->blockinfo.rnode.opt != XLogBlockHeadGetCompressOpt(blockhead)) { - return false; - } if (bufferinfo->blockinfo.forknum != XLogBlockHeadGetForkNum(blockhead)) { return false; } @@ -211,7 +219,6 @@ void XLogBlockInitRedoBlockInfo(XLogBlockHead *blockhead, RedoBufferTag *blockin blockinfo->rnode.dbNode = XLogBlockHeadGetDbNode(blockhead); blockinfo->rnode.relNode = XLogBlockHeadGetRelNode(blockhead); blockinfo->rnode.bucketNode = XLogBlockHeadGetBucketId(blockhead); - blockinfo->rnode.opt = XLogBlockHeadGetCompressOpt(blockhead); blockinfo->forknum = XLogBlockHeadGetForkNum(blockhead); blockinfo->blkno = XLogBlockHeadGetBlockNum(blockhead); blockinfo->pblk = XLogBlockHeadGetPhysicalBlock(blockhead); @@ -253,9 +260,23 @@ XLogRedoAction XLogCheckBlockDataRedoAction(XLogBlockDataParse *datadecode, Redo return BLK_DONE; } else { if (EnalbeWalLsnCheck && bufferinfo->blockinfo.forknum == MAIN_FORKNUM) { + bool needRepair = false; bool willinit = (XLogBlockDataGetBlockFlags(datadecode) & BKPBLOCK_WILL_INIT); bool notSkip = DoLsnCheck(bufferinfo, willinit, XLogBlockDataGetLastBlockLSN(datadecode), - (bufferinfo->blockinfo.pblk.relNode != InvalidOid) ? &bufferinfo->blockinfo.pblk : NULL); + (bufferinfo->blockinfo.pblk.relNode != InvalidOid) ? &bufferinfo->blockinfo.pblk : NULL, + &needRepair); + if (needRepair) { + XLogRecPtr pageCurLsn = PageGetLSN(bufferinfo->pageinfo.page); + UnlockReleaseBuffer(bufferinfo->buf); + extreme_rto::RecordBadBlockAndPushToRemote(datadecode, LSN_CHECK_FAIL, pageCurLsn, + bufferinfo->blockinfo.pblk); + bufferinfo->buf = InvalidBuffer; + bufferinfo->pageinfo = {0}; +#ifdef USE_ASSERT_CHECKING + bufferinfo->pageinfo.ignorecheck = true; +#endif + return BLK_NOTFOUND; + } if (!notSkip) { return BLK_DONE; } @@ -284,7 +305,7 @@ void XLogRecSetBlockCommonState(XLogReaderState *record, XLogBlockParseEnum bloc blockparse->blockhead.spcNode = filenode.rnode.node.spcNode; blockparse->blockhead.dbNode = filenode.rnode.node.dbNode; blockparse->blockhead.bucketNode = filenode.rnode.node.bucketNode; - blockparse->blockhead.opt = filenode.rnode.node.opt; + blockparse->blockhead.blkno = filenode.segno; blockparse->blockhead.forknum = filenode.forknumber; @@ -318,8 +339,25 @@ void DoRecordCheck(XLogRecParseState *recordstate, XLogRecPtr pageLsn, bool repl } #endif +static void AddReadBlock(XLogRecParseState *recordstate, uint32 readblocks) +{ + if (recordstate->refrecord == NULL) { + return; + } + + if (recordstate->blockparse.blockhead.block_valid != BLOCK_DATA_MAIN_DATA_TYPE) { + return; + } + + RedoParseManager *manager = recordstate->manager; + if (manager->refOperate != NULL) { + manager->refOperate->addReadBlock(recordstate->refrecord, readblocks); + } +} + static void DereferenceSrcRecord(RedoParseManager *parsemanager, void *record) { + Assert(parsemanager != NULL); if (parsemanager->refOperate != NULL) { parsemanager->refOperate->DerefCount(record); } @@ -366,7 +404,8 @@ void XLogRecSetBlockDataStateContent(XLogReaderState *record, uint32 blockid, XL blockdatarec->main_data_len = XLogRecGetDataLen(record); } -void XLogRecSetBlockDataState(XLogReaderState *record, uint32 blockid, XLogRecParseState *recordblockstate) +void XLogRecSetBlockDataState(XLogReaderState *record, uint32 blockid, XLogRecParseState *recordblockstate, + XLogBlockParseEnum type) { Assert(XLogRecHasBlockRef(record, blockid)); DecodedBkpBlock *decodebkp = &(record->blocks[blockid]); @@ -379,7 +418,7 @@ void XLogRecSetBlockDataState(XLogReaderState *record, uint32 blockid, XLogRecPa RelFileNodeForkNum filenode = RelFileNodeForkNumFill(&decodebkp->rnode, InvalidBackendId, decodebkp->forknum, decodebkp->blkno); - XLogRecSetBlockCommonState(record, BLOCK_DATA_MAIN_DATA_TYPE, filenode, recordblockstate, &pblk); + XLogRecSetBlockCommonState(record, type, filenode, recordblockstate, &pblk); XLogBlockDataParse *blockdatarec = &(recordblockstate->blockparse.extra_rec.blockdatarec); @@ -776,12 +815,11 @@ void XLogUpdateCopyedBlockState(XLogRecParseState *recordblockstate, XLogBlockPa recordblockstate->blockparse.blockhead.bucketNode = bucketNode; } -void XLogRecSetBlockDdlState(XLogBlockDdlParse *blockddlstate, uint32 blockddltype, uint32 columnrel, char *mainData, - Oid ownerid) +void XLogRecSetBlockDdlState(XLogBlockDdlParse *blockddlstate, uint32 blockddltype, char *mainData, int rels) { + Assert(blockddlstate != NULL); blockddlstate->blockddltype = blockddltype; - blockddlstate->columnrel = columnrel; - blockddlstate->ownerid = ownerid; + blockddlstate->rels = rels; blockddlstate->mainData = mainData; } @@ -871,6 +909,20 @@ void XLogRecSetPinVacuumState(XLogBlockVacuumPinParse *blockvacuum, BlockNumber blockvacuum->lastBlockVacuumed = lastblknum; } +void XLogRecSetSegFullSyncState(XLogBlockSegFullSyncParse *state, void *childState) +{ + Assert(state != NULL); + state->childState = childState; +} + +void XLogRecSetSegNewPageInfo(XLogBlockSegNewPage *state, char *mainData, Size len) +{ + Assert(state != NULL); + state->mainData = mainData; + state->dataLen = len; +} + + static inline bool AtomicCompareExchangeBuffer(volatile Buffer *ptr, Buffer *expected, Buffer newval) { bool ret = false; @@ -886,6 +938,11 @@ static inline Buffer AtomicReadBuffer(volatile Buffer *ptr) return *ptr; } +static inline void AtomicWriteBuffer(volatile Buffer* ptr, Buffer val) +{ + *ptr = val; +} + static inline Buffer AtomicExchangeBuffer(volatile Buffer *ptr, Buffer newval) { Buffer old; @@ -933,6 +990,7 @@ RedoMemSlot *XLogMemAlloc(RedoMemManager *memctl) do { if (memctl->firstfreeslot == InvalidBuffer) { memctl->firstfreeslot = AtomicExchangeBuffer(&memctl->firstreleaseslot, InvalidBuffer); + pg_read_barrier(); } if (memctl->firstfreeslot != InvalidBuffer) { @@ -961,9 +1019,10 @@ void XLogMemRelease(RedoMemManager *memctl, Buffer bufferid) } bufferslot = &(memctl->memslot[bufferid - 1]); Assert(bufferslot->freeNext == InvalidBuffer); - Buffer oldFirst = memctl->firstreleaseslot; + Buffer oldFirst = AtomicReadBuffer(&memctl->firstreleaseslot); + pg_memory_barrier(); do { - bufferslot->freeNext = oldFirst; + AtomicWriteBuffer(&bufferslot->freeNext, oldFirst); } while (!AtomicCompareExchangeBuffer(&memctl->firstreleaseslot, &oldFirst, bufferid)); } @@ -1145,20 +1204,18 @@ XLogRecParseState *XLogParseBufferAllocList(RedoParseManager *parsemanager, XLog allocslot = XLogMemAlloc(memctl); if (allocslot == NULL) { - ereport(WARNING, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + ereport(PANIC, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), errmsg("XLogParseBufferAlloc Allocated buffer failed!, taoalblknum:%u, usedblknum:%u", memctl->totalblknum, memctl->usedblknum))); - while (blkstatehead != NULL) { - recordstate = blkstatehead; - blkstatehead = (XLogRecParseState *)blkstatehead->nextrecord; - XLogParseBufferRelease(recordstate); /* release all recordstate, keep the xlog atomic */ - } return NULL; } + + pg_read_barrier(); Assert(allocslot->buf_id != InvalidBuffer); Assert(memctl->itemsize == (sizeof(XLogRecParseState) + sizeof(ParseBufferDesc))); descstate = (ParseBufferDesc *)((char *)parsemanager->parsebuffers + memctl->itemsize * (allocslot->buf_id - 1)); descstate->buff_id = allocslot->buf_id; + Assert(descstate->state == 0); descstate->state = 1; recordstate = (XLogRecParseState *)((char *)descstate + sizeof(ParseBufferDesc)); recordstate->nextrecord = NULL; @@ -1183,15 +1240,6 @@ XLogRecParseState *XLogParseBufferCopy(XLogRecParseState *srcState) errno_t rc = memcpy_s(&newState->blockparse, sizeof(newState->blockparse), &srcState->blockparse, sizeof(srcState->blockparse)); securec_check(rc, "\0", "\0"); - - if (newState->blockparse.blockhead.block_valid == BLOCK_DATA_DDL_TYPE && - newState->blockparse.extra_rec.blockddlrec.blockddltype == BLOCK_DDL_DROP_BKTLIST) { - uint32* bucketList = (uint32 *)palloc(BktBitMaxMapCnt * sizeof(uint32)); - rc = memcpy_s(bucketList, BktBitMaxMapCnt * sizeof(uint32), - srcState->blockparse.extra_rec.blockddlrec.mainData, BktBitMaxMapCnt * sizeof(uint32)); - securec_check(rc, "\0", "\0"); - newState->blockparse.extra_rec.blockddlrec.mainData = (char *)bucketList; - } newState->isFullSync = srcState->isFullSync; return newState; @@ -1203,19 +1251,14 @@ void XLogParseBufferRelease(XLogRecParseState *recordstate) ParseBufferDesc *descstate = NULL; descstate = (ParseBufferDesc *)((char *)recordstate - sizeof(ParseBufferDesc)); - if (!RedoMemIsValid(memctl, descstate->buff_id)) { + if (!RedoMemIsValid(memctl, descstate->buff_id) || descstate->state == 0) { ereport(PANIC, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), errmsg("XLogParseBufferRelease failed!, taoalblknum:%u, buf_id:%u", memctl->totalblknum, descstate->buff_id))); /* panic */ } - Assert(descstate->state != 0); - descstate->state = 0; - if (recordstate->blockparse.blockhead.block_valid == BLOCK_DATA_DDL_TYPE && - recordstate->blockparse.extra_rec.blockddlrec.blockddltype == BLOCK_DDL_DROP_BKTLIST) { - pfree_ext(recordstate->blockparse.extra_rec.blockddlrec.mainData); - } + descstate->state = 0; XLogMemRelease(memctl, descstate->buff_id); } @@ -1240,9 +1283,6 @@ void XLogBlockDataCommonRedo(XLogBlockHead *blockhead, void *blockrecbody, RedoB case RM_BTREE_ID: BtreeRedoDataBlock(blockhead, blockdatarec, bufferinfo); break; - case RM_HASH_ID: - HashRedoDataBlock(blockhead, blockdatarec, bufferinfo); - break; case RM_UBTREE_ID: UBTreeRedoDataBlock(blockhead, blockdatarec, bufferinfo); break; @@ -1272,6 +1312,9 @@ void XLogBlockDataCommonRedo(XLogBlockHead *blockhead, void *blockrecbody, RedoB case RM_UHEAPUNDO_ID: RedoUndoActionBlock(blockhead, blockdatarec, bufferinfo); break; + case RM_SEGPAGE_ID: + SegPageRedoDataBlock(blockhead, blockdatarec, bufferinfo); + break; default: ereport(PANIC, (errmsg("XLogBlockDataCommonRedo: unknown rmid %u", rmid))); } @@ -1374,7 +1417,7 @@ void XLogBlockDdlCommonRedo(XLogBlockHead *blockhead, void *blockrecbody, RedoBu rnode.dbNode = blockhead->dbNode; rnode.relNode = blockhead->relNode; rnode.bucketNode = blockhead->bucketNode; - rnode.opt = blockhead->opt; + switch (blockddlrec->blockddltype) { case BLOCK_DDL_CREATE_RELNODE: smgr_redo_create(rnode, blockhead->forknum, blockddlrec->mainData); @@ -1443,7 +1486,7 @@ void XLogBlockSegDdlDoRealAction(XLogBlockHead* blockhead, void* blockrecbody, R rnode.dbNode = blockhead->dbNode; rnode.relNode = blockhead->relNode; rnode.bucketNode = blockhead->bucketNode; - rnode.opt = blockhead->opt; + switch (segddlrec->blockddlrec.blockddltype) { case BLOCK_DDL_TRUNCATE_RELNODE: xlog_block_segpage_redo_truncate(rnode, blockhead, segddlrec); @@ -1468,7 +1511,7 @@ void XLogBlockDdlDoSmgrAction(XLogBlockHead *blockhead, void *blockrecbody, Redo rnode.dbNode = blockhead->dbNode; rnode.relNode = blockhead->relNode; rnode.bucketNode = blockhead->bucketNode; - rnode.opt = blockhead->opt; + switch (blockddlrec->blockddltype) { case BLOCK_DDL_CREATE_RELNODE: smgr_redo_create(rnode, blockhead->forknum, blockddlrec->mainData); @@ -1477,9 +1520,15 @@ void XLogBlockDdlDoSmgrAction(XLogBlockHead *blockhead, void *blockrecbody, Redo xlog_block_smgr_redo_truncate(rnode, blockhead->blkno, blockhead->end_ptr); break; case BLOCK_DDL_DROP_RELNODE: { - SMgrRelation reln = smgropen(bufferinfo->blockinfo.rnode, InvalidBackendId, - GetColumnNum(bufferinfo->blockinfo.forknum)); - smgrclose(reln); + ColFileNodeRel *xnodes = (ColFileNodeRel *)blockddlrec->mainData; + for (int i = 0; i < blockddlrec->rels; ++i) { + ColFileNodeRel *colFileNodeRel = xnodes + i; + ColFileNode colFileNode; + ColFileNodeCopy(&colFileNode, colFileNodeRel); + if (!IsValidColForkNum(colFileNode.forknum)) { + XlogDropRowReation(colFileNode.filenode); + } + } break; } default: @@ -1679,19 +1728,23 @@ void XLogSynAllBuffer() } } -bool XLogBlockRedoForExtremeRTO(XLogRecParseState *redoblocktate, RedoBufferInfo *bufferinfo, bool notfound) +bool XLogBlockRedoForExtremeRTO(XLogRecParseState *redoblocktate, RedoBufferInfo *bufferinfo, bool notfound, + RedoTimeCost &readBufCost, RedoTimeCost &redoCost) { XLogRedoAction redoaction; uint16 block_valid; void *blockrecbody; XLogBlockHead *blockhead; + long readcount = u_sess->instr_cxt.pg_buffer_usage->shared_blks_read; /* decode blockdata body */ blockhead = &redoblocktate->blockparse.blockhead; blockrecbody = &redoblocktate->blockparse.extra_rec; block_valid = XLogBlockHeadGetValidInfo(blockhead); + GetRedoStartTime(readBufCost); redoaction = XLogBlockGetOperatorBuffer(blockhead, blockrecbody, bufferinfo, notfound, G_BUFFERREADMETHOD); + CountRedoTime(readBufCost); if (redoaction == BLK_NOTFOUND) { #ifdef USE_ASSERT_CHECKING ereport(WARNING, (errmsg("XLogBlockRedoForExtremeRTO:lsn %X/%X, page %u/%u/%u %u not found", @@ -1707,12 +1760,16 @@ bool XLogBlockRedoForExtremeRTO(XLogRecParseState *redoblocktate, RedoBufferInfo ereport(PANIC, (errmsg("XLogBlockRedoForExtremeRTO: redobuffer checkfailed"))); } if (block_valid <= BLOCK_DATA_FSM_TYPE) { + GetRedoStartTime(redoCost); Assert(block_valid == g_xlogExtRtoRedoTable[block_valid].block_valid); g_xlogExtRtoRedoTable[block_valid].xlog_redoextrto(blockhead, blockrecbody, bufferinfo); + CountRedoTime(redoCost); #ifdef USE_ASSERT_CHECKING - if (block_valid != BLOCK_DATA_UNDO_TYPE) + if (block_valid != BLOCK_DATA_UNDO_TYPE && !bufferinfo->pageinfo.ignorecheck) { DoRecordCheck(redoblocktate, PageGetLSN(bufferinfo->pageinfo.page), true); + } #endif + AddReadBlock(redoblocktate, (u_sess->instr_cxt.pg_buffer_usage->shared_blks_read - readcount)); } else { ereport(WARNING, (errmsg("XLogBlockRedoForExtremeRTO: unsuport type %u, lsn %X/%X", (uint32)block_valid, (uint32)(blockhead->end_ptr >> 32), (uint32)(blockhead->end_ptr)))); diff --git a/src/gausskernel/storage/access/rmgrdesc/barrierdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/barrierdesc.cpp index 6dd6d2f7b..1a2414872 100644 --- a/src/gausskernel/storage/access/rmgrdesc/barrierdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/barrierdesc.cpp @@ -18,11 +18,33 @@ #include "pgxc/barrier.h" +const char* barrier_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + if (info == XLOG_BARRIER_CREATE) { + return "barrier_create"; + } else if (info == XLOG_BARRIER_COMMIT) { + return "barrier_commit"; + } else if (info == XLOG_BARRIER_SWITCHOVER) { + return "barrier_switchover"; + } else { + return "unknow_type"; + } +} + + void barrier_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); - Assert((XLogRecGetInfo(record) & ~XLR_INFO_MASK) == XLOG_BARRIER_CREATE); - appendStringInfo(buf, "BARRIER %s", rec); + uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; + if (info == XLOG_BARRIER_CREATE) { + appendStringInfo(buf, "BARRIER CREATE %s", rec); + } else if (info == XLOG_BARRIER_COMMIT) { + appendStringInfo(buf, "BARRIER COMMIT %s", rec); + } else if (info == XLOG_BARRIER_SWITCHOVER) { + appendStringInfo(buf, "BARRIER SWITCHOVER %s", rec); + } else + appendStringInfo(buf, "UNKNOWN"); } diff --git a/src/gausskernel/storage/access/rmgrdesc/clogdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/clogdesc.cpp index b6f6c3785..f60a76b50 100644 --- a/src/gausskernel/storage/access/rmgrdesc/clogdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/clogdesc.cpp @@ -20,6 +20,18 @@ #include "common/fe_memutils.h" #endif +const char *clog_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + if (info == CLOG_ZEROPAGE) { + return "clog_zeropage"; + } else if (info == CLOG_TRUNCATE) { + return "clog_truncate"; + } else { + return "unkown_type"; + } +} + void clog_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); diff --git a/src/gausskernel/storage/access/rmgrdesc/dbasedesc.cpp b/src/gausskernel/storage/access/rmgrdesc/dbasedesc.cpp index e46a9ad06..789caf535 100644 --- a/src/gausskernel/storage/access/rmgrdesc/dbasedesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/dbasedesc.cpp @@ -19,6 +19,18 @@ #include "commands/dbcommands.h" #include "lib/stringinfo.h" +const char* dbase_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + if (info == XLOG_DBASE_CREATE) { + return "db_create"; + } else if (info == XLOG_DBASE_DROP) { + return "db_drop"; + } else { + return "unkown_type"; + } +} + void dbase_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); diff --git a/src/gausskernel/storage/access/rmgrdesc/gindesc.cpp b/src/gausskernel/storage/access/rmgrdesc/gindesc.cpp index 802405834..8dbf4f118 100644 --- a/src/gausskernel/storage/access/rmgrdesc/gindesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/gindesc.cpp @@ -69,6 +69,46 @@ static void desc_recompress_leaf(StringInfo buf, ginxlogRecompressDataLeaf *inse } } +const char* gin_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + switch (info) { + case XLOG_GIN_CREATE_INDEX: + return "gin_create_index"; + break; + case XLOG_GIN_CREATE_PTREE: + return "gin_create_ptree"; + break; + case XLOG_GIN_INSERT: + return "gin_insert"; + break; + case XLOG_GIN_SPLIT: + return "gin_split"; + break; + case XLOG_GIN_VACUUM_PAGE: + return "gin_vacuum"; + break; + case XLOG_GIN_VACUUM_DATA_LEAF_PAGE: + return "gin_vacuum_leaf"; + break; + case XLOG_GIN_DELETE_PAGE: + return "gin_delete_page"; + break; + case XLOG_GIN_UPDATE_META_PAGE: + return "gin_update_metapage"; + break; + case XLOG_GIN_INSERT_LISTPAGE: + return "gin_insert_listpage"; + break; + case XLOG_GIN_DELETE_LISTPAGE: + return "gin_delete_listpage"; + break; + default: + return "unknow_type"; + break; + } +} + void gin_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); diff --git a/src/gausskernel/storage/access/rmgrdesc/gistdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/gistdesc.cpp index cfe1c3dba..2ada51240 100644 --- a/src/gausskernel/storage/access/rmgrdesc/gistdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/gistdesc.cpp @@ -25,6 +25,25 @@ static void out_gistxlogPageSplit(StringInfo buf, gistxlogPageSplit *xlrec) appendStringInfo(buf, "page_split: splits to %hu pages", xlrec->npage); } +const char* gist_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + switch (info) { + case XLOG_GIST_PAGE_UPDATE: + return "gist_page_update"; + break; + case XLOG_GIST_PAGE_SPLIT: + return "gist_page_split"; + break; + case XLOG_GIST_CREATE_INDEX: + return "gist_create_index"; + break; + default: + break; + } + return "unknow_type"; +} + void gist_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); diff --git a/src/gausskernel/storage/access/rmgrdesc/hashdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/hashdesc.cpp index 5533f09fe..9a6ccacc2 100644 --- a/src/gausskernel/storage/access/rmgrdesc/hashdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/hashdesc.cpp @@ -16,155 +16,14 @@ #include "postgres.h" #include "knl/knl_variable.h" -#include "access/rmgr.h" -#include "access/hash_xlog.h" +#include "access/hash.h" + +const char* hash_type_name(uint8 subtype) +{ + return "unknow_type"; +} void hash_desc(StringInfo buf, XLogReaderState *record) { - char *rec = XLogRecGetData(record); - uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; - - switch (info) { - case XLOG_HASH_INIT_META_PAGE: - { - xl_hash_init_meta_page *xlrec = (xl_hash_init_meta_page *) rec; - - appendStringInfo(buf, "num_tuples %g, fillfactor %d", - xlrec->num_tuples, xlrec->ffactor); - break; - } - case XLOG_HASH_INIT_BITMAP_PAGE: - { - xl_hash_init_bitmap_page *xlrec = (xl_hash_init_bitmap_page *) rec; - - appendStringInfo(buf, "bmsize %d", xlrec->bmsize); - break; - } - case XLOG_HASH_INSERT: - { - xl_hash_insert *xlrec = (xl_hash_insert *) rec; - - appendStringInfo(buf, "off %u", xlrec->offnum); - break; - } - case XLOG_HASH_ADD_OVFL_PAGE: - { - xl_hash_add_ovfl_page *xlrec = (xl_hash_add_ovfl_page *) rec; - - appendStringInfo(buf, "bmsize %d, bmpage_found %c", - xlrec->bmsize, (xlrec->bmpage_found) ? 'T' : 'F'); - break; - } - case XLOG_HASH_SPLIT_ALLOCATE_PAGE: - { - xl_hash_split_allocate_page *xlrec = (xl_hash_split_allocate_page *) rec; - - appendStringInfo(buf, "new_bucket %u, meta_page_masks_updated %c, issplitpoint_changed %c", - xlrec->new_bucket, - (xlrec->flags & XLH_SPLIT_META_UPDATE_MASKS) ? 'T' : 'F', - (xlrec->flags & XLH_SPLIT_META_UPDATE_SPLITPOINT) ? 'T' : 'F'); - break; - } - case XLOG_HASH_SPLIT_COMPLETE: - { - xl_hash_split_complete *xlrec = (xl_hash_split_complete *) rec; - - appendStringInfo(buf, "old_bucket_flag %u, new_bucket_flag %u", - xlrec->old_bucket_flag, xlrec->new_bucket_flag); - break; - } - case XLOG_HASH_MOVE_PAGE_CONTENTS: - { - xl_hash_move_page_contents *xlrec = (xl_hash_move_page_contents *) rec; - - appendStringInfo(buf, "ntups %d, is_primary %c", - xlrec->ntups, - xlrec->is_prim_bucket_same_wrt ? 'T' : 'F'); - break; - } - case XLOG_HASH_SQUEEZE_PAGE: - { - xl_hash_squeeze_page *xlrec = (xl_hash_squeeze_page *) rec; - - appendStringInfo(buf, "prevblkno %u, nextblkno %u, ntups %d, is_primary %c", - xlrec->prevblkno, - xlrec->nextblkno, - xlrec->ntups, - xlrec->is_prim_bucket_same_wrt ? 'T' : 'F'); - break; - } - case XLOG_HASH_DELETE: - { - xl_hash_delete *xlrec = (xl_hash_delete *) rec; - - appendStringInfo(buf, "clear_dead_marking %c, is_primary %c", - xlrec->clear_dead_marking ? 'T' : 'F', - xlrec->is_primary_bucket_page ? 'T' : 'F'); - break; - } - case XLOG_HASH_UPDATE_META_PAGE: - { - xl_hash_update_meta_page *xlrec = (xl_hash_update_meta_page *) rec; - - appendStringInfo(buf, "ntuples %g", - xlrec->ntuples); - break; - } - case XLOG_HASH_VACUUM_ONE_PAGE: - { - xl_hash_vacuum_one_page *xlrec = (xl_hash_vacuum_one_page *) rec; - - appendStringInfo(buf, "ntuples %d", - xlrec->ntuples); - break; - } - } -} - -const char *hash_identify(uint8 info) -{ - const char *id = NULL; - - switch (info & ~XLR_INFO_MASK) { - case XLOG_HASH_INIT_META_PAGE: - id = "INIT_META_PAGE"; - break; - case XLOG_HASH_INIT_BITMAP_PAGE: - id = "INIT_BITMAP_PAGE"; - break; - case XLOG_HASH_INSERT: - id = "INSERT"; - break; - case XLOG_HASH_ADD_OVFL_PAGE: - id = "ADD_OVFL_PAGE"; - break; - case XLOG_HASH_SPLIT_ALLOCATE_PAGE: - id = "SPLIT_ALLOCATE_PAGE"; - break; - case XLOG_HASH_SPLIT_PAGE: - id = "SPLIT_PAGE"; - break; - case XLOG_HASH_SPLIT_COMPLETE: - id = "SPLIT_COMPLETE"; - break; - case XLOG_HASH_MOVE_PAGE_CONTENTS: - id = "MOVE_PAGE_CONTENTS"; - break; - case XLOG_HASH_SQUEEZE_PAGE: - id = "SQUEEZE_PAGE"; - break; - case XLOG_HASH_DELETE: - id = "DELETE"; - break; - case XLOG_HASH_SPLIT_CLEANUP: - id = "SPLIT_CLEANUP"; - break; - case XLOG_HASH_UPDATE_META_PAGE: - id = "UPDATE_META_PAGE"; - break; - case XLOG_HASH_VACUUM_ONE_PAGE: - id = "VACUUM_ONE_PAGE"; - } - - return id; + /* nothing to do */ } \ No newline at end of file diff --git a/src/gausskernel/storage/access/rmgrdesc/heapdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/heapdesc.cpp index 018446e94..00126e95e 100644 --- a/src/gausskernel/storage/access/rmgrdesc/heapdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/heapdesc.cpp @@ -71,6 +71,30 @@ void heap3_new_cid(StringInfo buf, int bucket_id, xl_heap_new_cid *xlrec) appendStringInfo(buf, "; cmin: %u, cmax: %u, combo: %u", xlrec->cmin, xlrec->cmax, xlrec->combocid); } +const char* heap_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + info &= XLOG_HEAP_OPMASK; + if (info == XLOG_HEAP_INSERT) { + return "heap_insert"; + } else if (info == XLOG_HEAP_DELETE) { + return "heap_delete"; + } else if (info == XLOG_HEAP_UPDATE) { + return "heap_update"; + } else if (info == XLOG_HEAP_HOT_UPDATE) { + return "heap_hot_update"; + } else if (info == XLOG_HEAP_NEWPAGE) { + return "heap_newpage"; + } else if (info == XLOG_HEAP_LOCK) { + return "heap_lock"; + } else if (info == XLOG_HEAP_INPLACE) { + return "heap_inplace"; + } else if (info == XLOG_HEAP_BASE_SHIFT) { + return "base_shift"; + } + return "unkown_type"; +} + void heap_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); @@ -147,6 +171,31 @@ void heap_desc(StringInfo buf, XLogReaderState *record) appendStringInfo(buf, "UNKNOWN"); } +const char* heap2_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + info &= XLOG_HEAP_OPMASK; + if (info == XLOG_HEAP2_FREEZE) { + return "heap2_freeze"; + } else if (info == XLOG_HEAP2_CLEAN) { + return "heap2_clean"; + } else if (info == XLOG_HEAP2_PAGE_UPGRADE) { + return "heap2_page_udpate"; // not used + } else if (info == XLOG_HEAP2_CLEANUP_INFO) { + return "heap2_cleanup_info"; + } else if (info == XLOG_HEAP2_VISIBLE) { + return "heap2_visible"; + } else if (info == XLOG_HEAP2_BCM) { + return "heap2_bcm"; + } else if (info == XLOG_HEAP2_MULTI_INSERT) { + return "heap2_multi_insert"; + } else if (info == XLOG_HEAP2_LOGICAL_NEWPAGE) { + return "heap2_logical_newpage"; + } else { + return "unkown_type"; + } +} + void heap2_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); @@ -280,6 +329,19 @@ void heap2_desc(StringInfo buf, XLogReaderState *record) appendStringInfo(buf, "UNKNOWN"); } +const char* heap3_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + info &= XLOG_HEAP_OPMASK; + if (info == XLOG_HEAP3_NEW_CID) { + return "heap3_new_cid"; + } else if (info == XLOG_HEAP3_REWRITE) { + return "heap3_rewrite"; + } else { + return "unkown_type"; + } +} + void heap3_desc(StringInfo buf, XLogReaderState *record) { uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; diff --git a/src/gausskernel/storage/access/rmgrdesc/motdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/motdesc.cpp index 7bfb95607..1e64947a8 100644 --- a/src/gausskernel/storage/access/rmgrdesc/motdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/motdesc.cpp @@ -30,3 +30,8 @@ void MOTDesc(StringInfo buf, XLogReaderState* record) appendStringInfo(buf, "MOT Redo"); } +const char* MOT_type_name(uint8 subtype) +{ + return "mot_data"; +} + diff --git a/src/gausskernel/storage/access/rmgrdesc/mxactdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/mxactdesc.cpp index d7f3013b0..660b832d8 100644 --- a/src/gausskernel/storage/access/rmgrdesc/mxactdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/mxactdesc.cpp @@ -48,6 +48,21 @@ static void OutMember(StringInfo buf, TransactionId xidWithStatus) } } +const char* multixact_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + info = info & XLOG_MULTIXACT_MASK; + if (info == XLOG_MULTIXACT_ZERO_OFF_PAGE) { + return "muliti_zero_off_page"; + } else if (info == XLOG_MULTIXACT_ZERO_MEM_PAGE) { + return "muliti_zero_mem_page"; + } else if (info == XLOG_MULTIXACT_CREATE_ID) { + return "muliti_create_id"; + } else { + return "unkown_type"; + } +} + void multixact_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); diff --git a/src/gausskernel/storage/access/rmgrdesc/nbtdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/nbtdesc.cpp index 589592424..8960ad93a 100644 --- a/src/gausskernel/storage/access/rmgrdesc/nbtdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/nbtdesc.cpp @@ -19,6 +19,73 @@ #include "access/nbtree.h" #include "access/ubtree.h" +const char* btree_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + + switch (info) { + case XLOG_BTREE_INSERT_LEAF: { + return "bt_insert_leaf"; + break; + } + case XLOG_BTREE_INSERT_UPPER: { + return "bt_insert_upper"; + break; + } + case XLOG_BTREE_INSERT_META: { + return "bt_insert_meta"; + break; + } + case XLOG_BTREE_SPLIT_L: { + return "bt_split_left"; + break; + } + case XLOG_BTREE_SPLIT_R: { + return "bt_split_right"; + break; + } + case XLOG_BTREE_SPLIT_L_ROOT: { + return "bt_split_l_root"; + break; + } + case XLOG_BTREE_SPLIT_R_ROOT: { + return "bt_split_r_root"; + break; + } + case XLOG_BTREE_VACUUM: { + return "bt_vacuum"; + break; + } + case XLOG_BTREE_DELETE: { + return "bt_delete"; + break; + } + case XLOG_BTREE_UNLINK_PAGE: { + return "bt_unlink_page"; + break; + } + case XLOG_BTREE_UNLINK_PAGE_META: { + return "bt_unlink_page_meta"; + break; + } + case XLOG_BTREE_MARK_PAGE_HALFDEAD: { + return "bt_mark_page_halfdead"; + break; + } + case XLOG_BTREE_NEWROOT: { + return "bt_newroot"; + break; + } + case XLOG_BTREE_REUSE_PAGE: { + return "bt_reuse_page"; + break; + } + default: + return "unkown_type"; + break; + } +} + void btree_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); @@ -146,6 +213,81 @@ void btree_desc(StringInfo buf, XLogReaderState *record) } } +const char* ubtree_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + switch (info) { + case XLOG_UBTREE_INSERT_LEAF: { + return "ubt_insert_leaf"; + break; + } + case XLOG_UBTREE_INSERT_UPPER: { + return "ubt_insert_upper"; + break; + } + case XLOG_UBTREE_INSERT_META: { + return "ubt_insert_meta"; + break; + } + case XLOG_UBTREE_SPLIT_L: { + return "ubt_split_l"; + break; + } + case XLOG_UBTREE_SPLIT_R: { + return "ubt_split_r"; + break; + } + case XLOG_UBTREE_SPLIT_L_ROOT: { + return "ubt_split_l_root"; + break; + } + case XLOG_UBTREE_SPLIT_R_ROOT: { + return "ubt_split_r_root"; + break; + } + case XLOG_UBTREE_VACUUM: { + return "ubt_vacuum"; + break; + } + case XLOG_UBTREE_DELETE: { + return "ubt_delete"; + break; + } + case XLOG_UBTREE_UNLINK_PAGE: { + return "ubt_unlink_page"; + break; + } + case XLOG_UBTREE_UNLINK_PAGE_META: { + return "ubt_unlink_page_meta"; + break; + } + case XLOG_UBTREE_MARK_PAGE_HALFDEAD: { + return "ubt_mark_page_halfdead"; + break; + } + case XLOG_UBTREE_NEWROOT: { + return "ubt_newroot"; + break; + } + case XLOG_UBTREE_REUSE_PAGE: { + return "ubt_reuse_page"; + break; + } + case XLOG_UBTREE_MARK_DELETE: { + return "ubt_mark_delete"; + break; + } + case XLOG_UBTREE_PRUNE_PAGE: { + return "ubt_prune_page"; + break; + } + default: + return "unknown_type"; + break; + } +} + + void UBTreeDesc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); @@ -329,3 +471,34 @@ void UBTree2Desc(StringInfo buf, XLogReaderState* record) } } +const char* ubtree2_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + + switch (info) { + case XLOG_UBTREE2_SHIFT_BASE: { + return "ubt2_shift_base"; + break; + } + case XLOG_UBTREE2_RECYCLE_QUEUE_INIT_PAGE: { + return "ubt2_recycle_init_page"; + break; + } + case XLOG_UBTREE2_RECYCLE_QUEUE_ENDPOINT: { + return "ubt2_recycle_endpoint"; + break; + } + case XLOG_UBTREE2_RECYCLE_QUEUE_MODIFY: { + return "ubt2_recycle_modify"; + break; + } + case XLOG_UBTREE2_FREEZE: { + return "ubt2_freeze"; + break; + } + default: + return "unknown_type"; + break; + } +} + diff --git a/src/gausskernel/storage/access/rmgrdesc/relmapdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/relmapdesc.cpp index bda29dc55..f123f10e0 100644 --- a/src/gausskernel/storage/access/rmgrdesc/relmapdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/relmapdesc.cpp @@ -18,6 +18,16 @@ #include "utils/relmapper.h" +const char* relmap_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + if (info == XLOG_RELMAP_UPDATE) { + return "relmap_update"; + } else { + return "unkown_type"; + } +} + void relmap_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); diff --git a/src/gausskernel/storage/access/rmgrdesc/segpagedesc.cpp b/src/gausskernel/storage/access/rmgrdesc/segpagedesc.cpp index ced0b3475..48ac17249 100644 --- a/src/gausskernel/storage/access/rmgrdesc/segpagedesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/segpagedesc.cpp @@ -236,3 +236,32 @@ void segpage_smgr_desc(StringInfo buf, XLogReaderState *record) appendStringInfo(buf, "[segpage] UNKNOWN"); } } + +const char* segpage_smgr_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + if (info == XLOG_SEG_ATOMIC_OPERATION) { + return "segsmgr_atomic_operation"; + } else if (info == XLOG_SEG_SEGMENT_EXTEND) { + return "segsmgr_segment_extend"; + } else if (info == XLOG_SEG_CREATE_EXTENT_GROUP) { + return "segsmgr_create_extend_group"; + } else if (info == XLOG_SEG_INIT_MAPPAGE) { + return "segsmgr_init_mappage"; + } else if (info == XLOG_SEG_INIT_INVRSPTR_PAGE) { + return "segsmgr_init_inversptr_page"; + } else if (info == XLOG_SEG_ADD_NEW_GROUP) { + return "segsmgr_add_new_group"; + } else if (info == XLOG_SEG_TRUNCATE) { + return "segsmgr_truncate"; + } else if (info == XLOG_SEG_SPACE_SHRINK) { + return "segsmgr_space_shrink"; + } else if (info == XLOG_SEG_SPACE_DROP) { + return "segsmgr_space_drop"; + } else if (info == XLOG_SEG_NEW_PAGE) { + return "segsmgr_new_page"; + } else { + return "unknown_type"; + } +} + diff --git a/src/gausskernel/storage/access/rmgrdesc/seqdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/seqdesc.cpp index 649a35d02..d739a4776 100644 --- a/src/gausskernel/storage/access/rmgrdesc/seqdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/seqdesc.cpp @@ -18,6 +18,16 @@ #include "commands/sequence.h" +const char* seq_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + if (info == XLOG_SEQ_LOG) + return "seq_log"; + else { + return "unknown_type"; + } +} + void seq_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); diff --git a/src/gausskernel/storage/access/rmgrdesc/slotdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/slotdesc.cpp index 260154d8b..d31009d62 100644 --- a/src/gausskernel/storage/access/rmgrdesc/slotdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/slotdesc.cpp @@ -41,6 +41,24 @@ #include "replication/walsender.h" #include "replication/syncrep.h" +const char* slot_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + if (info == XLOG_SLOT_CREATE) { + return "slot_create"; + } else if (info == XLOG_SLOT_ADVANCE) { + return "slot_advance"; + } else if (info == XLOG_SLOT_DROP) { + return "slot_drop"; + } else if (info == XLOG_SLOT_CHECK) { + return "slot_check"; + } else if (info == XLOG_TERM_LOG) { + return "slot_term_log"; + } else { + return "unknown_type"; + } +} + void slot_desc(StringInfo buf, XLogReaderState *record) { appendStringInfo(buf, "slot info"); diff --git a/src/gausskernel/storage/access/rmgrdesc/smgrdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/smgrdesc.cpp index a574866fb..c132c14e6 100644 --- a/src/gausskernel/storage/access/rmgrdesc/smgrdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/smgrdesc.cpp @@ -21,6 +21,18 @@ #include "storage/custorage.h" #include "storage/smgr/relfilenode.h" +const char* smgr_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + if (info == XLOG_SMGR_CREATE) { + return "smgr_create"; + } else if (info == XLOG_SMGR_TRUNCATE) { + return "truncate"; + } else { + return "unkown_type"; + } +} + void smgr_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); diff --git a/src/gausskernel/storage/access/rmgrdesc/spgdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/spgdesc.cpp index ddda2fafa..cbf414b4d 100644 --- a/src/gausskernel/storage/access/rmgrdesc/spgdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/spgdesc.cpp @@ -19,6 +19,44 @@ #include "access/spgist_private.h" #include "utils/rel_gs.h" +const char* spg_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + switch (info) { + case XLOG_SPGIST_CREATE_INDEX: + return "spgist_create_index"; + break; + case XLOG_SPGIST_ADD_LEAF: + return "spgist_add_leaf"; + break; + case XLOG_SPGIST_MOVE_LEAFS: + return "spgist_move_leafs"; + break; + case XLOG_SPGIST_ADD_NODE: + return "spgist_add_node"; + break; + case XLOG_SPGIST_SPLIT_TUPLE: + return "spgist_split"; + break; + case XLOG_SPGIST_PICKSPLIT: + return "spgist_picksplit"; + break; + case XLOG_SPGIST_VACUUM_LEAF: + return "spgist_vacuum_leaf"; + break; + case XLOG_SPGIST_VACUUM_ROOT: + return "spgist_vacuum_root"; + break; + case XLOG_SPGIST_VACUUM_REDIRECT: + return "spgist_vacuum_redirect"; + break; + default: + break; + } + return "unknow_type"; +} + + void spg_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); diff --git a/src/gausskernel/storage/access/rmgrdesc/standbydesc.cpp b/src/gausskernel/storage/access/rmgrdesc/standbydesc.cpp index 7e1edadf4..50197b56f 100644 --- a/src/gausskernel/storage/access/rmgrdesc/standbydesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/standbydesc.cpp @@ -18,6 +18,28 @@ #include "storage/standby.h" +const char* standby_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + if (info == XLOG_STANDBY_LOCK) { + return "standby_lock"; + } else if (info == XLOG_RUNNING_XACTS) { + return "running_xact"; + } else if (info == XLOG_STANDBY_CSN) { + return "standby_csn"; + } else if (info == XLOG_STANDBY_UNLOCK) { + return "standby_unlock"; +#ifndef ENABLE_MULTIPLE_NODES + } else if (info == XLOG_STANDBY_CSN_COMMITTING) { + return "standby_csn_committing"; + } else if (info == XLOG_STANDBY_CSN_ABORTED) { + return "standby_csn_abort"; +#endif + } else { + return "unkown_type"; + } +} + void standby_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); @@ -45,14 +67,14 @@ void standby_desc(StringInfo buf, XLogReaderState *record) appendStringInfo(buf, " xid " XID_FMT " db %u rel %u", xlrec->locks[i].xid, xlrec->locks[i].dbOid, xlrec->locks[i].relOid); } -#ifndef ENABLE_MULTIPLE_NODES + } else if (info == XLOG_STANDBY_CSN_COMMITTING) { uint64* id = ((uint64 *)XLogRecGetData(record)); appendStringInfo(buf, " XLOG_STANDBY_CSN_COMMITTING, xid %lu, csn %lu", id[0], id[1]); } else if (info == XLOG_STANDBY_CSN_ABORTED) { uint64* id = ((uint64 *)XLogRecGetData(record)); appendStringInfo(buf, " XLOG_STANDBY_CSN_ABORTED, xid %lu", id[0]); -#endif + } else appendStringInfo(buf, "UNKNOWN"); } diff --git a/src/gausskernel/storage/access/rmgrdesc/tblspcdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/tblspcdesc.cpp index 972f456eb..cff3193ce 100644 --- a/src/gausskernel/storage/access/rmgrdesc/tblspcdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/tblspcdesc.cpp @@ -18,6 +18,20 @@ #include "commands/tablespace.h" +const char* tblspc_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + if (info == XLOG_TBLSPC_CREATE) { + return "tblspc_create"; + } else if (info == XLOG_TBLSPC_RELATIVE_CREATE) { + return "tblspc_relative_create"; + } else if (info == XLOG_TBLSPC_DROP) { + return "tblspc_drop"; + } else { + return "unkown_type"; + } +} + void tblspc_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); diff --git a/src/gausskernel/storage/access/rmgrdesc/uheapdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/uheapdesc.cpp index 4b0382778..58bf3344b 100644 --- a/src/gausskernel/storage/access/rmgrdesc/uheapdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/uheapdesc.cpp @@ -55,6 +55,39 @@ char *GetUndoHeader(XlUndoHeader *xlundohdr, Oid *partitionOid, UndoRecPtr *blkp return currLogPtr; } +const char* uheap_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + info &= XLOG_UHEAP_OPMASK; + switch (info) { + case XLOG_UHEAP_INSERT: + return "unheap_insert"; + break; + case XLOG_UHEAP_DELETE: + return "unheap_delete"; + break; + case XLOG_UHEAP_UPDATE: + return "unheap_update"; + break; + case XLOG_UHEAP_FREEZE_TD_SLOT: + return "unheap_freeze"; + break; + case XLOG_UHEAP_INVALID_TD_SLOT: + return "unheap_invalid_slot"; + break; + case XLOG_UHEAP_CLEAN: + return "unheap_clean"; + break; + case XLOG_UHEAP_MULTI_INSERT: + return "unheap_multi_insert"; + break; + default: + return "unknown_type"; + break; + } +} + + /* * For pg_xlogdump to dump out xlog info */ @@ -186,11 +219,16 @@ void UHeapDesc(StringInfo buf, XLogReaderState *record) xlundohdr->urecptr, blkprev, prevUrp, xlundohdr->relOid, partitionOid, xlundohdr->flag, subXid); if (xlrec->flags & XLZ_NON_INPLACE_UPDATE) { + appendStringInfo(buf, "NON_INPLACE_UPDATE. "); xlundohdr = (XlUndoHeader *)((char *)currLogPtr); currLogPtr = GetUndoHeader(xlundohdr, &partitionOid, &blkprev, &prevUrp, &subXid); appendStringInfo(buf, "relOid %u, urecptr %016lx, blkprev %016lx, prevurp %016lx, newflag %u, subXid %lu,", xlundohdr->relOid, xlundohdr->urecptr, blkprev, prevUrp, xlundohdr->flag, subXid); + } else if (xlrec->flags & XLZ_BLOCK_INPLACE_UPDATE) { + appendStringInfo(buf, "BLOCK_INPLACE_UPDATE. "); + } else { + appendStringInfo(buf, "INPLACE_UPDATE. "); } undo::XlogUndoMeta *xlundometa = (undo::XlogUndoMeta *)currLogPtr; @@ -300,6 +338,26 @@ void UHeapDesc(StringInfo buf, XLogReaderState *record) } } +const char* uheap2_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + info &= XLOG_UHEAP_OPMASK; + switch (info) { + case XLOG_UHEAP2_BASE_SHIFT: + return "uheap2_base_shift"; + break; + case XLOG_UHEAP2_FREEZE: + return "uheap2_freeze"; + break; + case XLOG_UHEAP2_EXTEND_TD_SLOTS: + return "uheap2_extend_slot"; + break; + default: + return "unknown_type"; + break; + } +} + void UHeap2Desc(StringInfo buf, XLogReaderState *record) { uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; @@ -328,6 +386,21 @@ void UHeap2Desc(StringInfo buf, XLogReaderState *record) } } +const char* uheap_undo_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + if (info == XLOG_UHEAPUNDO_PAGE) { + return "uheap_undo_page"; + } else if (info == XLOG_UHEAPUNDO_RESET_SLOT) { + return "uheap_undo_reset_slot"; + } else if (info == XLOG_UHEAPUNDO_ABORT_SPECINSERT) { + return "uheap_undo_abort"; + } else { + return "unknown_type"; + } +} + + void UHeapUndoDesc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); diff --git a/src/gausskernel/storage/access/rmgrdesc/undologdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/undologdesc.cpp index 9267f520d..d27366f46 100644 --- a/src/gausskernel/storage/access/rmgrdesc/undologdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/undologdesc.cpp @@ -19,6 +19,52 @@ #include "lib/stringinfo.h" #include "access/ustore/undo/knl_uundoxlog.h" +const char* undo::undo_xlog_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + + switch (info) { + case XLOG_UNDO_UNLINK: + { + return "undo_unlink"; + break; + } + case XLOG_UNDO_EXTEND: + { + return "undo_extend"; + break; + } + case XLOG_UNDO_CLEAN: + { + return "undo_clean"; + break; + } + case XLOG_SLOT_CLEAN: + { + return "undo_slot_clean"; + break; + } + case XLOG_UNDO_DISCARD: + { + return "undo_slot_discard"; + break; + } + case XLOG_SLOT_EXTEND: + { + return "undo_slot_extend"; + break; + } + case XLOG_SLOT_UNLINK: + { + return "undo_slot_unlink"; + break; + } + default: + break; + } + return "unknown_type"; +} + void undo::UndoXlogDesc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); @@ -88,6 +134,17 @@ void undo::UndoXlogDesc(StringInfo buf, XLogReaderState *record) } } +const char* undo::undo_xlog_roll_back_finish_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + if (info == XLOG_ROLLBACK_FINISH) { + return "undo_rollback_finish"; + } else { + return "unknown_type"; + } +} + + void undo::UndoXlogRollbackFinishDesc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); diff --git a/src/gausskernel/storage/access/rmgrdesc/xactdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/xactdesc.cpp index 950dda4e2..5376da82f 100644 --- a/src/gausskernel/storage/access/rmgrdesc/xactdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/xactdesc.cpp @@ -228,6 +228,31 @@ static void xact_desc_abort(StringInfo buf, xl_xact_abort *xlrec, bool abortXlog } } +const char *xact_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + if (info == XLOG_XACT_COMMIT_COMPACT) { + return "commit_compact"; + } else if (info == XLOG_XACT_COMMIT) { + return "commit"; + } else if (info == XLOG_XACT_ABORT) { + return "abort"; + } else if (info == XLOG_XACT_ABORT_WITH_XID) { + return "abort_with_xid"; + } else if (info == XLOG_XACT_PREPARE) { + return "prepare"; + } else if (info == XLOG_XACT_COMMIT_PREPARED) { + return "commit_prepared"; + } else if (info == XLOG_XACT_ABORT_PREPARED) { + return "abort_prepared"; + } else if (info == XLOG_XACT_ASSIGNMENT) { + return "assignment"; + } else { + return "unkown_type"; + } +} + + void xact_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); diff --git a/src/gausskernel/storage/access/rmgrdesc/xlogdesc.cpp b/src/gausskernel/storage/access/rmgrdesc/xlogdesc.cpp index ed9d2d93b..37f04b43e 100644 --- a/src/gausskernel/storage/access/rmgrdesc/xlogdesc.cpp +++ b/src/gausskernel/storage/access/rmgrdesc/xlogdesc.cpp @@ -36,6 +36,54 @@ struct config_enum_entry wal_level_options[] = { { NULL, 0, false } }; +const char *xlog_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + switch (info) { + case XLOG_CHECKPOINT_SHUTDOWN: + return "shutdown_checkpoint"; + break; + case XLOG_CHECKPOINT_ONLINE: + return "online_checkpoint"; + break; + case XLOG_NOOP: + return "noop"; + break; + case XLOG_NEXTOID: + return "nextoid"; + break; + case XLOG_SWITCH: + return "xlog switch"; + break; + case XLOG_BACKUP_END: + return "backup end"; + break; + case XLOG_PARAMETER_CHANGE: + return "parameter chage"; + break; + case XLOG_RESTORE_POINT: + return "restore point"; + break; + case XLOG_FPW_CHANGE: + return "restore point"; + break; + case XLOG_END_OF_RECOVERY: + return "end of recovery"; + break; + case XLOG_FPI_FOR_HINT: + return "fpi for hint"; + break; + case XLOG_FPI: + return "fpi"; + break; + case XLOG_DELAY_XLOG_RECYCLE: + return "delay recycle"; + break; + default: + return "unkown type"; + } +} + void xlog_desc(StringInfo buf, XLogReaderState *record) { char *rec = XLogRecGetData(record); diff --git a/src/gausskernel/storage/access/table/tableam.cpp b/src/gausskernel/storage/access/table/tableam.cpp index bf54e92a3..2359af34b 100644 --- a/src/gausskernel/storage/access/table/tableam.cpp +++ b/src/gausskernel/storage/access/table/tableam.cpp @@ -101,17 +101,21 @@ HeapTuple tableam_tslot_copy_heap_tuple(TupleTableSlot *slot) return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_copy_heap_tuple(slot); } -void tableam_tslot_store_tuple(Tuple tuple, TupleTableSlot *slot, Buffer buffer, bool should_free) +void tableam_tslot_store_tuple(Tuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree, bool batchMode) { - g_tableam_routines[GetTabelAmIndexTuple(tuple)]->tslot_store_tuple(tuple, slot, buffer, should_free); + g_tableam_routines[GetTabelAmIndexTuple(tuple)]->tslot_store_tuple(tuple, slot, buffer, shouldFree, batchMode); } - void tableam_tslot_getsomeattrs(TupleTableSlot *slot, int natts) { g_tableam_routines[slot->tts_tupslotTableAm]->tslot_getsomeattrs(slot, natts); } +void tableam_tslot_formbatch(TupleTableSlot* slot, VectorBatch* batch, int cur_rows, int natts) +{ + g_tableam_routines[slot->tts_tupslotTableAm]->tslot_formbatch(slot, batch, cur_rows, natts); +} + Datum tableam_tslot_getattr(TupleTableSlot *slot, int attnum, bool *isnull) { return g_tableam_routines[slot->tts_tupslotTableAm]->tslot_getattr(slot, attnum, isnull); @@ -129,6 +133,7 @@ bool tableam_tslot_attisnull(TupleTableSlot *slot, int attnum) Tuple tableam_tslot_get_tuple_from_slot(Relation relation, TupleTableSlot *slot) { + slot->tts_tupleDescriptor->tdhasuids = RELATION_HAS_UIDS(relation); return g_tableam_routines[relation->rd_tam_type]->tslot_get_tuple_from_slot(slot); } @@ -397,10 +402,12 @@ TM_Result tableam_tuple_update(Relation relation, Relation parentRelation, ItemP TM_Result tableam_tuple_lock(Relation relation, Tuple tuple, Buffer *buffer, CommandId cid, LockTupleMode mode, bool nowait, TM_FailureData *tmfd, bool allow_lock_self, bool follow_updates, bool eval, - Snapshot snapshot, ItemPointer tid, bool isSelectForUpdate, bool isUpsert, TransactionId conflictXid) + Snapshot snapshot, ItemPointer tid, bool isSelectForUpdate, bool isUpsert, TransactionId conflictXid, + int waitSec) { return g_tableam_routines[relation->rd_tam_type]->tuple_lock(relation, tuple, buffer, cid, mode, nowait, tmfd, - allow_lock_self, follow_updates, eval, snapshot, tid, isSelectForUpdate, isUpsert, conflictXid); + allow_lock_self, follow_updates, eval, snapshot, tid, isSelectForUpdate, isUpsert, conflictXid, + waitSec); } Tuple tableam_tuple_lock_updated(CommandId cid, Relation relation, int lockmode, ItemPointer tid, @@ -468,6 +475,11 @@ Tuple tableam_scan_getnexttuple(TableScanDesc sscan, ScanDirection direction) return g_tableam_routines[sscan->rs_rd->rd_tam_type]->scan_getnexttuple(sscan, direction); } +bool tableam_scan_gettuplebatchmode(TableScanDesc sscan, ScanDirection direction) +{ + return g_tableam_routines[sscan->rs_rd->rd_tam_type]->scan_GetNextBatch(sscan, direction); +} + void tableam_scan_getpage(TableScanDesc sscan, BlockNumber page) { return g_tableam_routines[sscan->rs_rd->rd_tam_type]->scan_getpage(sscan, page); @@ -687,9 +699,9 @@ HeapTuple HeapamTslotCopyHeapTuple(TupleTableSlot *slot) return heap_slot_copy_heap_tuple(slot); } -void HeapamTslotStoreHeapTuple(Tuple tuple, TupleTableSlot* slot, Buffer buffer, bool should_free) +void HeapamTslotStoreHeapTuple(Tuple tuple, TupleTableSlot* slot, Buffer buffer, bool shouldFree, bool batchMode) { - heap_slot_store_heap_tuple((HeapTuple)tuple, slot, buffer, should_free); + heap_slot_store_heap_tuple((HeapTuple)tuple, slot, buffer, shouldFree, batchMode); } void HeapamTslotGetsomeattrs(TupleTableSlot *slot, int natts) @@ -697,6 +709,11 @@ void HeapamTslotGetsomeattrs(TupleTableSlot *slot, int natts) heap_slot_getsomeattrs(slot, natts); } +void HeapamTslotFormBatch(TupleTableSlot *slot, VectorBatch* batch, int cur_rows, int natts) +{ + heap_slot_formbatch(slot, batch, cur_rows, natts); +} + Datum HeapamTslotGetattr(TupleTableSlot* slot, int attnum, bool* isnull) { return heap_slot_getattr(slot, attnum, isnull); @@ -712,7 +729,7 @@ bool HeapamTslotAttisnull(TupleTableSlot* slot, int attnum) return heap_slot_attisnull(slot, attnum); } -Tuple HeapamTslotGetTupleFromSlot(TupleTableSlot* slot) +FORCE_INLINE Tuple HeapamTslotGetTupleFromSlot(TupleTableSlot* slot) { HeapTuple tuple = ExecMaterializeSlot(slot); tuple->tupInfo = 0; @@ -860,12 +877,13 @@ TM_Result HeapamTupleUpdate(Relation relation, Relation parentRelation, ItemPoin } TM_Result HeapamTupleLock(Relation relation, Tuple tuple, Buffer *buffer, - CommandId cid, LockTupleMode mode, bool nowait, TM_FailureData *tmfd, - bool allow_lock_self, bool follow_updates, bool eval, Snapshot snapshot, - ItemPointer tid, bool isSelectForUpdate, bool isUpsert, TransactionId conflictXid) + CommandId cid, LockTupleMode mode, bool nowait, TM_FailureData *tmfd, + bool allow_lock_self, bool follow_updates, bool eval, Snapshot snapshot, + ItemPointer tid, bool isSelectForUpdate, bool isUpsert, TransactionId conflictXid, + int waitSec) { return heap_lock_tuple(relation, (HeapTuple)tuple, buffer, cid, mode, nowait, follow_updates, tmfd, - allow_lock_self); + allow_lock_self, waitSec); } Tuple HeapamTupleLockUpdated(CommandId cid, Relation relation, int lockmode, ItemPointer tid, @@ -991,6 +1009,9 @@ void HeapamTcapPromoteLock (Relation relation, LOCKMODE *lockmode) bool HeapamTcapValidateSnap(Relation relation, Snapshot snap) { + if (RelationIsUstoreIndex(relation)) { + return true; + } return snap->xmin >= GetGlobalOldestXmin(); } @@ -1018,6 +1039,7 @@ const TableAmRoutine g_heapam_methods = { tslot_copy_heap_tuple : HeapamTslotCopyHeapTuple, tslot_store_tuple : HeapamTslotStoreHeapTuple, tslot_getsomeattrs : HeapamTslotGetsomeattrs, + tslot_formbatch : HeapamTslotFormBatch, tslot_getattr : HeapamTslotGetattr, tslot_getallattrs : HeapamTslotGetallattrs, tslot_attisnull : HeapamTslotAttisnull, @@ -1075,6 +1097,7 @@ const TableAmRoutine g_heapam_methods = { scan_markpos : HeapamScanMarkpos, scan_init_parallel_seqscan : HeapamScanInitParallelSeqscan, scan_getnexttuple : HeapamScanGetnexttuple, + scan_GetNextBatch : HeapamGetNextBatchMode, scan_getpage : HeapamScanGetpage, scan_gettuple_for_verify : HeapamGetNextForVerify, scan_end : HeapamScanEnd, @@ -1175,6 +1198,11 @@ void UHeapamTslotGetallattrs(TupleTableSlot *slot) UHeapSlotGetAllAttrs(slot); } +void UHeapamTslotFormBatch(TupleTableSlot *slot, VectorBatch* batch, int cur_rows, int natts) +{ + UHeapSlotFormBatch(slot, batch, cur_rows, natts); +} + Datum UHeapamTslotGetattr(TupleTableSlot *slot, int attnum, bool *isnull) { return UHeapSlotGetAttr(slot, attnum, isnull); @@ -1474,12 +1502,12 @@ void UHeapamScanRescan(TableScanDesc sscan, ScanKey key) void UHeapamScanRestrpos(TableScanDesc sscan) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("not supported in ustore yet"))); + return UHeapRestRpos(sscan); } void UHeapamScanMarkpos(TableScanDesc sscan) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("not supported in ustore yet"))); + return UHeapMarkPos(sscan); } @@ -1494,6 +1522,23 @@ Tuple UHeapamScanGetnexttuple(TableScanDesc sscan, ScanDirection direction) return (Tuple)UHeapGetNext(sscan, direction); } +bool UHeapamGetNextBatchMode(TableScanDesc sscan, ScanDirection direction) +{ + /* Note: no locking manipulations needed */ + bool finished = false; + UHeapScanDesc scan = (UHeapScanDesc)sscan; + + scan->rs_base.rs_ctupRows = 0; + Assert(ScanDirectionIsForward(direction)); + if (likely(scan->rs_base.rs_pageatatime)) { + finished = UHeapGetTupPageBatchmode(scan, direction); + } else { + ereport(ERROR, (errcode(ERRCODE_RELATION_OPEN_ERROR), + errmsg("relation %s is temporarily unavalible", RelationGetRelationName(scan->rs_base.rs_rd)))); + } + return finished; +} + void UHeapamScanGetpage(TableScanDesc sscan, BlockNumber page) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("not supported in ustore yet"))); @@ -1504,9 +1549,9 @@ Tuple UHeapamGetNextForVerify(TableScanDesc sscan, ScanDirection direction, bool return (Tuple)UHeapGetNextForVerify(sscan, direction, isValidRelationPage); } -void UHeapamTslotStoreUHeapTuple(Tuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree) +void UHeapamTslotStoreUHeapTuple(Tuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree, bool batchMode) { - UHeapSlotStoreUHeapTuple((UHeapTuple)tuple, slot, shouldFree); + UHeapSlotStoreUHeapTuple((UHeapTuple)tuple, slot, shouldFree, batchMode); } /* ------------------------------------------------------------------------ @@ -1610,16 +1655,17 @@ TM_Result UHeapamTupleUpdate(Relation relation, Relation parentRelation, ItemPoi if (mode != NULL) { *mode = LockTupleExclusive; } + return result; } TM_Result UHeapamTupleLock(Relation relation, Tuple tuple, Buffer *buffer, CommandId cid, LockTupleMode mode, bool nowait, TM_FailureData *tmfd, bool allow_lock_self, bool follow_updates, bool eval, Snapshot snapshot, - ItemPointer tid, bool isSelectForUpdate, bool isUpsert, TransactionId conflictXid) + ItemPointer tid, bool isSelectForUpdate, bool isUpsert, TransactionId conflictXid, int waitSec) { return UHeapLockTuple(relation, (UHeapTuple)tuple, buffer, cid, mode, nowait, tmfd, follow_updates, eval, snapshot, - isSelectForUpdate, allow_lock_self, isUpsert, conflictXid); + isSelectForUpdate, allow_lock_self, isUpsert, conflictXid, waitSec); } Tuple UHeapamTupleLockUpdated(CommandId cid, Relation relation, int lockmode, ItemPointer tid, TransactionId priorXmax, @@ -1690,6 +1736,7 @@ const TableAmRoutine g_ustoream_methods = { tslot_copy_heap_tuple : UHeapamTslotCopyHeapTuple, tslot_store_tuple : UHeapamTslotStoreUHeapTuple, tslot_getsomeattrs : UHeapSlotGetSomeAttrs, + tslot_formbatch : UHeapamTslotFormBatch, tslot_getattr : UHeapamTslotGetattr, tslot_getallattrs : UHeapamTslotGetallattrs, tslot_attisnull : UHeapamTslotAttisnull, @@ -1747,6 +1794,7 @@ const TableAmRoutine g_ustoream_methods = { scan_init_parallel_seqscan : HeapamScanInitParallelSeqscan, scan_getnexttuple : UHeapamScanGetnexttuple, + scan_GetNextBatch : UHeapamGetNextBatchMode, scan_getpage : UHeapamScanGetpage, scan_gettuple_for_verify : UHeapamGetNextForVerify, diff --git a/src/gausskernel/storage/access/transam/CMakeLists.txt b/src/gausskernel/storage/access/transam/CMakeLists.txt index f9f26665f..bf6cd3273 100755 --- a/src/gausskernel/storage/access/transam/CMakeLists.txt +++ b/src/gausskernel/storage/access/transam/CMakeLists.txt @@ -5,6 +5,7 @@ list(APPEND TGT_transam_SRC ${CMAKE_CURRENT_SOURCE_DIR}/clog.cpp ${CMAKE_CURRENT_SOURCE_DIR}/csnlog.cpp ${CMAKE_CURRENT_SOURCE_DIR}/double_write.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/single_double_write.cpp ${CMAKE_CURRENT_SOURCE_DIR}/multi_redo_api.cpp ${CMAKE_CURRENT_SOURCE_DIR}/multi_redo_settings.cpp ${CMAKE_CURRENT_SOURCE_DIR}/multixact.cpp diff --git a/src/gausskernel/storage/access/transam/Makefile b/src/gausskernel/storage/access/transam/Makefile index e36f6adfd..7e86d13d1 100644 --- a/src/gausskernel/storage/access/transam/Makefile +++ b/src/gausskernel/storage/access/transam/Makefile @@ -11,12 +11,12 @@ ifneq "$(MAKECMDGOALS)" "clean" endif ifeq ($(enable_multiple_nodes), yes) OBJS = clog.o multixact.o rmgr.o slru.o csnlog.o transam.o twophase.o \ - twophase_rmgr.o varsup.o double_write.o seg_double_write.o redo_statistic.o multi_redo_api.o multi_redo_settings.o\ + twophase_rmgr.o varsup.o double_write.o single_double_write.o seg_double_write.o redo_statistic.o multi_redo_api.o multi_redo_settings.o\ xact.o xlog.o xlogfuncs.o \ xloginsert.o xlogreader.o xlogutils.o cbmparsexlog.o cbmfuncs.o else OBJS = clog.o gtm_single.o multixact.o rmgr.o slru.o csnlog.o transam.o twophase.o \ - twophase_rmgr.o varsup.o double_write.o seg_double_write.o redo_statistic.o multi_redo_api.o multi_redo_settings.o\ + twophase_rmgr.o varsup.o double_write.o single_double_write.o seg_double_write.o redo_statistic.o multi_redo_api.o multi_redo_settings.o\ xact.o xlog.o xlogfuncs.o \ xloginsert.o xlogreader.o xlogutils.o cbmparsexlog.o cbmfuncs.o endif diff --git a/src/gausskernel/storage/access/transam/cbmparsexlog.cpp b/src/gausskernel/storage/access/transam/cbmparsexlog.cpp index 53cc87f98..98f31e6a2 100644 --- a/src/gausskernel/storage/access/transam/cbmparsexlog.cpp +++ b/src/gausskernel/storage/access/transam/cbmparsexlog.cpp @@ -86,7 +86,7 @@ static void StartExistCBMFile(uint64 lastfileSize); static HTAB *CBMPageHashInitialize(MemoryContext memoryContext); static bool ParseXlogIntoCBMPages(TimeLineID timeLine, bool isRecEnd); static int CBMXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, - char *readBuf, TimeLineID *pageTLI); + char *readBuf, TimeLineID *pageTLI, char* xlog_path = NULL); static void TrackChangeBlock(XLogReaderState *record); static void TrackRelPageModification(XLogReaderState *record); @@ -901,7 +901,7 @@ static bool ParseXlogIntoCBMPages(TimeLineID timeLine, bool isRecEnd) /* XLogreader callback function, to read a WAL page */ static int CBMXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, - char *readBuf, TimeLineID *pageTLI) + char *readBuf, TimeLineID *pageTLI, char* xlog_path) { XLogPageReadPrivateCBM *readprivate = (XLogPageReadPrivateCBM *)xlogreader->private_data; uint32 targetPageOff; @@ -1543,6 +1543,10 @@ static void TrackUheapMultiInsert(XLogReaderState *record) currLogPtr += sizeof(uint16); } + if ((record->decoded_record->xl_term & XLOG_CONTAIN_CSN) != 0) { + currLogPtr += sizeof(CommitSeqNo); + } + currLogPtr += SizeOfUHeapMultiInsert; nranges = *(int *)currLogPtr; currLogPtr += sizeof(int); diff --git a/src/gausskernel/storage/access/transam/clog.cpp b/src/gausskernel/storage/access/transam/clog.cpp index aa2e1d0b5..1fb52bccb 100644 --- a/src/gausskernel/storage/access/transam/clog.cpp +++ b/src/gausskernel/storage/access/transam/clog.cpp @@ -38,6 +38,7 @@ #include "access/clog.h" #include "access/slru.h" #include "access/transam.h" +#include "access/twophase.h" #include "access/xlog.h" #include "access/xloginsert.h" #include "access/xlogutils.h" @@ -1055,6 +1056,7 @@ void TruncateCLOG(TransactionId oldestXact) /* Now we can remove the old CLOG segment(s) */ SimpleLruTruncate(ClogCtl(0), cutoffPage, NUM_CLOG_PARTITIONS); + DeleteObsoleteTwoPhaseFile(cutoffPage); ereport(LOG, (errmsg("Truncate CLOG at xid %lu", oldestXact))); } @@ -1123,6 +1125,7 @@ void clog_redo(XLogReaderState *record) ClogCtl(pageno)->shared->latest_page_number = pageno; SimpleLruTruncate(ClogCtl(0), pageno, NUM_CLOG_PARTITIONS); + DeleteObsoleteTwoPhaseFile(pageno); } else ereport(PANIC, (errmsg("clog_redo: unknown op code %u", (uint32)info))); } diff --git a/src/gausskernel/storage/access/transam/csnlog.cpp b/src/gausskernel/storage/access/transam/csnlog.cpp index c23808ce5..8afadd748 100644 --- a/src/gausskernel/storage/access/transam/csnlog.cpp +++ b/src/gausskernel/storage/access/transam/csnlog.cpp @@ -51,6 +51,7 @@ #include "storage/procarray.h" #include "gstrace/gstrace_infra.h" #include "gstrace/access_gstrace.h" +#include "replication/walreceiver.h" /* * Defines for CSNLOG page sizes. A page is the same BLCKSZ as is used @@ -117,9 +118,6 @@ void CSNLogSetCommitSeqNo(TransactionId xid, int nsubxids, TransactionId *subxid /* for standby node, don't set invalid or abort csn mark. */ if ((t_thrd.xact_cxt.useLocalSnapshot || -#ifdef ENABLE_MULTIPLE_NODES - RecoveryInProgress() || -#endif g_instance.attr.attr_storage.IsRoachStandbyCluster) && csn <= COMMITSEQNO_ABORTED) { return; @@ -157,6 +155,9 @@ void CSNLogSetCommitSeqNo(TransactionId xid, int nsubxids, TransactionId *subxid pageno = TransactionIdToCSNPage(subxids[offset]); xid = InvalidTransactionId; } + if (IS_DISASTER_RECOVER_MODE && COMMITSEQNO_IS_COMMITTED(csn)) { + UpdateXLogMaxCSN(csn); + } } /** @@ -457,6 +458,30 @@ CommitSeqNo CSNLogGetNestCommitSeqNo(TransactionId xid) return csn; } +/** + * @Description: Interrogate the CSN of a transaction in the CSN log recursively. + * @in xid - the transaction id + * @return - return the csn of the top parent of the input transaction + */ +CommitSeqNo CSNLogGetDRCommitSeqNo(TransactionId xid) +{ + CommitSeqNo csn = InvalidCommitSeqNo; + PG_TRY(); + { + csn = CSNLogGetCommitSeqNo(xid); + } + PG_CATCH(); + { + if (t_thrd.xact_cxt.slru_errcause == SLRU_OPEN_FAILED) { + csn = InvalidCommitSeqNo; + } else { + PG_RE_THROW(); + } + } + PG_END_TRY(); + return csn; +} + /** * @Description: Determine the CSN of a transaction, walking the * subtransaction tree if needed @@ -590,26 +615,12 @@ static int ZeroCSNLOGPage(int64 pageno) * @in oldestActiveXID - the oldest active transaction id * @return - no return */ -void StartupCSNLOG(bool isUpgrade) +void StartupCSNLOG() { - if (isUpgrade) { - /* for shutdown condition, we just rezero the next page of next_id */ - int64 endPage = TransactionIdToCSNPage(t_thrd.xact_cxt.ShmemVariableCache->nextXid); - - CSN_LWLOCK_ACQUIRE(endPage, LW_EXCLUSIVE); - (void)ZeroCSNLOGPage(endPage); - CSN_LWLOCK_RELEASE(endPage); - - t_thrd.xact_cxt.ShmemVariableCache->lastExtendCSNLogpage = endPage; - elog(LOG, "startup csnlog at xid:%lu, pageno:%ld", t_thrd.xact_cxt.ShmemVariableCache->nextXid, - t_thrd.xact_cxt.ShmemVariableCache->lastExtendCSNLogpage); - } else { - /* for non-shutdown condition, we jsut set the last extend page */ - int64 lastExtendPage = TransactionIdToCSNPage(t_thrd.xact_cxt.ShmemVariableCache->nextXid - 1); - t_thrd.xact_cxt.ShmemVariableCache->lastExtendCSNLogpage = lastExtendPage; - elog(LOG, "startup csnlog without extend at xid:%lu, pageno:%ld", - t_thrd.xact_cxt.ShmemVariableCache->nextXid - 1, t_thrd.xact_cxt.ShmemVariableCache->lastExtendCSNLogpage); - } + int64 lastExtendPage = TransactionIdToCSNPage(t_thrd.xact_cxt.ShmemVariableCache->nextXid - 1); + t_thrd.xact_cxt.ShmemVariableCache->lastExtendCSNLogpage = lastExtendPage; + elog(LOG, "startup csnlog without extend at xid:%lu, pageno:%ld", + t_thrd.xact_cxt.ShmemVariableCache->nextXid - 1, t_thrd.xact_cxt.ShmemVariableCache->lastExtendCSNLogpage); t_thrd.xact_cxt.ShmemVariableCache->startExtendCSNLogpage = 0; } @@ -785,7 +796,11 @@ void ExtendCSNLOG(TransactionId newestXact) void TruncateCSNLOG(TransactionId oldestXact) { int64 cutoffPage; - + TransactionId CatalogXmin = GetReplicationSlotCatalogXmin(); + if (CatalogXmin >= FirstNormalTransactionId) { + oldestXact = Min(oldestXact, CatalogXmin); + } + u_sess->utils_cxt.RecentDataXmin = oldestXact; /* * The cutoff point is the start of the segment containing oldestXact. We * pass the *page* containing oldestXact to SimpleLruTruncate. diff --git a/src/gausskernel/storage/access/transam/double_write.cpp b/src/gausskernel/storage/access/transam/double_write.cpp index c7422b143..b3ae62e6a 100644 --- a/src/gausskernel/storage/access/transam/double_write.cpp +++ b/src/gausskernel/storage/access/transam/double_write.cpp @@ -52,103 +52,18 @@ Datum dw_get_node_name() } } -const uint16 RESULT_LEN = 256; -Datum dw_get_single_flush_dwn() +Datum dw_get_file_id() { - if (dw_enabled()) { - char dwn[RESULT_LEN] = {0}; - errno_t rc; - if (g_instance.dw_single_cxt.dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { - uint32 dwn_first = (uint32)g_instance.dw_single_cxt.file_head->head.dwn; - uint32 dwn_second = (uint32)g_instance.dw_single_cxt.second_file_head->head.dwn; - rc = snprintf_s(dwn, RESULT_LEN, RESULT_LEN - 1, "%u/%u", dwn_first, dwn_second); - securec_check_ss(rc, "\0", "\0"); - } else { - uint32 dwn_old = (uint32)g_instance.dw_single_cxt.recovery_buf.file_head->head.dwn; - rc = snprintf_s(dwn, RESULT_LEN, RESULT_LEN - 1, "%u/0", dwn_old); - securec_check_ss(rc, "\0", "\0"); - } - return CStringGetTextDatum(dwn); - } - return CStringGetTextDatum("0/0"); -} - -Datum dw_get_single_flush_start() -{ - if (dw_enabled()) { - char start[RESULT_LEN] = {0}; - errno_t rc; - if (g_instance.dw_single_cxt.dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { - uint32 start_first = (uint32)g_instance.dw_single_cxt.file_head->start; - uint32 start_second = (uint32)g_instance.dw_single_cxt.second_file_head->start; - rc = snprintf_s(start, RESULT_LEN, RESULT_LEN - 1, "%u/%u", start_first, start_second); - securec_check_ss(rc, "\0", "\0"); - } else { - uint32 start_old = (uint32)g_instance.dw_single_cxt.recovery_buf.file_head->start; - rc = snprintf_s(start, RESULT_LEN, RESULT_LEN - 1, "%u/0", start_old); - securec_check_ss(rc, "\0", "\0"); - } - return CStringGetTextDatum(start); - } - - return CStringGetTextDatum("0/0"); -} - -Datum dw_get_single_flush_trunc_num() -{ - char trunc_num[RESULT_LEN] = {0}; - errno_t rc; - if (g_instance.dw_single_cxt.dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { - uint32 trunc_num_first = (uint32)g_instance.dw_single_cxt.single_stat_info.file_trunc_num; - uint32 trunc_num_second = (uint32)g_instance.dw_single_cxt.single_stat_info.second_file_trunc_num; - rc = snprintf_s(trunc_num, RESULT_LEN, RESULT_LEN - 1, "%u/%u", trunc_num_first, trunc_num_second); - securec_check_ss(rc, "\0", "\0"); - } else { - uint32 trunc_num_old = (uint32)g_instance.dw_single_cxt.single_stat_info.file_trunc_num; - rc = snprintf_s(trunc_num, RESULT_LEN, RESULT_LEN - 1, "%u/0", trunc_num_old); - securec_check_ss(rc, "\0", "\0"); - } - return CStringGetTextDatum(trunc_num); -} - -Datum dw_get_single_flush_reset_num() -{ - char reset_num[RESULT_LEN] = {0}; - errno_t rc; - if (g_instance.dw_single_cxt.dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { - uint32 reset_num_first = (uint32)g_instance.dw_single_cxt.single_stat_info.file_reset_num; - uint32 reset_num_second = (uint32)g_instance.dw_single_cxt.single_stat_info.second_file_reset_num; - rc = snprintf_s(reset_num, RESULT_LEN, RESULT_LEN - 1, "%u/%u", reset_num_first, reset_num_second); - securec_check_ss(rc, "\0", "\0"); - } else { - uint32 reset_num_old = (uint32)g_instance.dw_single_cxt.single_stat_info.file_reset_num; - rc = snprintf_s(reset_num, RESULT_LEN, RESULT_LEN - 1, "%u/0", reset_num_old); - securec_check_ss(rc, "\0", "\0"); - } - return CStringGetTextDatum(reset_num); -} - -Datum dw_get_single_flush_total_writes() -{ - char total_writes[RESULT_LEN] = {0}; - errno_t rc; - if (g_instance.dw_single_cxt.dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { - uint32 total_writes_first = (uint32)g_instance.dw_single_cxt.single_stat_info.total_writes; - uint32 total_writes_second = (uint32)g_instance.dw_single_cxt.single_stat_info.second_total_writes; - rc = snprintf_s(total_writes, RESULT_LEN, RESULT_LEN - 1, "%u/%u", total_writes_first, total_writes_second); - securec_check_ss(rc, "\0", "\0"); - } else { - uint32 total_writes_old = (uint32)g_instance.dw_single_cxt.single_stat_info.total_writes; - rc = snprintf_s(total_writes, RESULT_LEN, RESULT_LEN - 1, "%u/0", total_writes_old); - securec_check_ss(rc, "\0", "\0"); - } - return CStringGetTextDatum(total_writes); + return UInt64GetDatum((int64)g_stat_file_id); } Datum dw_get_dw_number() { + dw_batch_file_context *batch_file_cxt; + if (dw_enabled()) { - return UInt64GetDatum((uint64)g_instance.dw_batch_cxt.file_head->head.dwn); + batch_file_cxt = &g_instance.dw_batch_cxt.batch_file_cxts[g_stat_file_id]; + return UInt64GetDatum((uint64)batch_file_cxt->file_head->head.dwn); } return UInt64GetDatum(0); @@ -156,8 +71,11 @@ Datum dw_get_dw_number() Datum dw_get_start_page() { + dw_batch_file_context *batch_file_cxt; + if (dw_enabled()) { - return UInt64GetDatum((uint64)g_instance.dw_batch_cxt.file_head->start); + batch_file_cxt = &g_instance.dw_batch_cxt.batch_file_cxts[g_stat_file_id]; + return UInt64GetDatum((uint64)batch_file_cxt->file_head->start); } return UInt64GetDatum(0); @@ -165,75 +83,139 @@ Datum dw_get_start_page() Datum dw_get_file_trunc_num() { - return UInt64GetDatum(g_instance.dw_batch_cxt.batch_stat_info.file_trunc_num); + dw_batch_file_context *batch_file_cxt; + + if (dw_enabled()) { + batch_file_cxt = &g_instance.dw_batch_cxt.batch_file_cxts[g_stat_file_id]; + return UInt64GetDatum(batch_file_cxt->batch_stat_info.file_trunc_num); + } + + return UInt64GetDatum(0); } Datum dw_get_file_reset_num() { - return UInt64GetDatum(g_instance.dw_batch_cxt.batch_stat_info.file_reset_num); + dw_batch_file_context *batch_file_cxt; + + if (dw_enabled()) { + batch_file_cxt = &g_instance.dw_batch_cxt.batch_file_cxts[g_stat_file_id]; + return UInt64GetDatum(batch_file_cxt->batch_stat_info.file_reset_num); + } + + return UInt64GetDatum(0); } Datum dw_get_total_writes() { - return UInt64GetDatum(g_instance.dw_batch_cxt.batch_stat_info.total_writes); + dw_batch_file_context *batch_file_cxt; + + if (dw_enabled()) { + batch_file_cxt = &g_instance.dw_batch_cxt.batch_file_cxts[g_stat_file_id]; + return UInt64GetDatum(batch_file_cxt->batch_stat_info.total_writes); + } + + return UInt64GetDatum(0); } Datum dw_get_low_threshold_writes() { - return UInt64GetDatum(g_instance.dw_batch_cxt.batch_stat_info.low_threshold_writes); + dw_batch_file_context *batch_file_cxt; + + if (dw_enabled()) { + batch_file_cxt = &g_instance.dw_batch_cxt.batch_file_cxts[g_stat_file_id]; + return UInt64GetDatum(batch_file_cxt->batch_stat_info.low_threshold_writes); + } + + return UInt64GetDatum(0); } Datum dw_get_high_threshold_writes() { - return UInt64GetDatum(g_instance.dw_batch_cxt.batch_stat_info.high_threshold_writes); + dw_batch_file_context *batch_file_cxt; + + if (dw_enabled()) { + batch_file_cxt = &g_instance.dw_batch_cxt.batch_file_cxts[g_stat_file_id]; + return UInt64GetDatum(batch_file_cxt->batch_stat_info.high_threshold_writes); + } + + return UInt64GetDatum(0); } Datum dw_get_total_pages() { - return UInt64GetDatum(g_instance.dw_batch_cxt.batch_stat_info.total_pages); + dw_batch_file_context *batch_file_cxt; + + if (dw_enabled()) { + batch_file_cxt = &g_instance.dw_batch_cxt.batch_file_cxts[g_stat_file_id]; + return UInt64GetDatum(batch_file_cxt->batch_stat_info.total_pages); + } + + return UInt64GetDatum(0); } Datum dw_get_low_threshold_pages() { - return UInt64GetDatum(g_instance.dw_batch_cxt.batch_stat_info.low_threshold_pages); + dw_batch_file_context *batch_file_cxt; + + + if (dw_enabled()) { + batch_file_cxt = &g_instance.dw_batch_cxt.batch_file_cxts[g_stat_file_id]; + return UInt64GetDatum(batch_file_cxt->batch_stat_info.low_threshold_pages); + } + + return UInt64GetDatum(0); } Datum dw_get_high_threshold_pages() { - return UInt64GetDatum(g_instance.dw_batch_cxt.batch_stat_info.high_threshold_pages); + dw_batch_file_context *batch_file_cxt; + + if (dw_enabled()) { + batch_file_cxt = &g_instance.dw_batch_cxt.batch_file_cxts[g_stat_file_id]; + return UInt64GetDatum(batch_file_cxt->batch_stat_info.high_threshold_pages); + } + + return UInt64GetDatum(0); } /* double write statistic view */ const dw_view_col_t g_dw_view_col_arr[DW_VIEW_COL_NUM] = { - { "node_name", TEXTOID, dw_get_node_name }, - { "curr_dwn", INT8OID, dw_get_dw_number }, - { "curr_start_page", INT8OID, dw_get_start_page }, - { "file_trunc_num", INT8OID, dw_get_file_trunc_num }, - { "file_reset_num", INT8OID, dw_get_file_reset_num }, - { "total_writes", INT8OID, dw_get_total_writes }, - { "low_threshold_writes", INT8OID, dw_get_low_threshold_writes }, - { "high_threshold_writes", INT8OID, dw_get_high_threshold_writes }, - { "total_pages", INT8OID, dw_get_total_pages }, - { "low_threshold_pages", INT8OID, dw_get_low_threshold_pages }, - { "high_threshold_pages", INT8OID, dw_get_high_threshold_pages } + { "node_name", TEXTOID, dw_get_node_name}, + { "curr_dwn", INT8OID, dw_get_dw_number}, + { "curr_start_page", INT8OID, dw_get_start_page}, + { "file_trunc_num", INT8OID, dw_get_file_trunc_num}, + { "file_reset_num", INT8OID, dw_get_file_reset_num}, + { "total_writes", INT8OID, dw_get_total_writes}, + { "low_threshold_writes", INT8OID, dw_get_low_threshold_writes}, + { "high_threshold_writes", INT8OID, dw_get_high_threshold_writes}, + { "total_pages", INT8OID, dw_get_total_pages}, + { "low_threshold_pages", INT8OID, dw_get_low_threshold_pages}, + { "high_threshold_pages", INT8OID, dw_get_high_threshold_pages}, + { "file_id", INT8OID, dw_get_file_id} }; -const dw_view_col_t g_dw_single_view[DW_SINGLE_VIEW_COL_NUM] = { - {"node_name", TEXTOID, dw_get_node_name}, - {"curr_dwn", TEXTOID, dw_get_single_flush_dwn}, - {"curr_start_page", TEXTOID, dw_get_single_flush_start}, - {"total_writes", TEXTOID, dw_get_single_flush_total_writes}, - {"file_trunc_num", TEXTOID, dw_get_single_flush_trunc_num}, - {"file_reset_num", TEXTOID, dw_get_single_flush_reset_num} -}; +static int dw_fetch_file_id(int thread_id); +static void dw_fetch_thread_ids(int file_id, int &size, int *thread_ids); +static void dw_remove_batch_file(int dw_file_num); +static void dw_remove_batch_meta_file(); +static void dw_recover_partial_write_batch(dw_batch_file_context *cxt); +static void dw_write_meta_file(int fd, dw_batch_meta_file *batch_meta_file); +static int dw_create_file(const char* file_name); +static void dw_generate_batch_file(int file_id, uint64 dw_file_size); +void dw_cxt_init_batch(); -static void dw_generate_batch_file(); -static void dw_generate_single_file(); -static void dw_recovery_partial_write_single(); -static uint16 get_max_single_write_pos(bool is_first); -static bool dw_read_data_page(BufferTag buf_tag, SMgrRelation reln, char* data_block); -static void dw_encrypt_page(BufferTag tag, char* buf); -void dw_recovery_single_page(const dw_single_flush_item *item, uint16 item_num); + +void dw_remove_file(const char* file_name) +{ + if (file_exists(file_name)) { + ereport(LOG, (errcode_for_file_access(), errmodule(MOD_DW), errmsg("File: %s exists, deleting it", file_name))); + + if (unlink(file_name) != 0) { + ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_DW), + errmsg("Could not remove the file: %s.", file_name))); + } + } +} void dw_pread_file(int fd, void *buf, int size, int64 offset) { @@ -289,7 +271,8 @@ int64 dw_seek_file(int fd, int64 offset, int32 origin) return seek_offset; } -static void dw_extend_file(int fd, const void *buf, int buf_size, int64 size, int64 file_expect_size, bool single) +void dw_extend_file(int fd, const void *buf, int buf_size, int64 size, + int64 file_expect_size, bool single, char* file_name) { int64 offset = 0; int64 remain_size; @@ -298,7 +281,7 @@ static void dw_extend_file(int fd, const void *buf, int buf_size, int64 size, in if ((offset + size) > file_expect_size) { ereport(PANIC, - (errmodule(MOD_DW), + (errmodule(MOD_DW), errmsg("DW extend file failed, expected_file_size %ld, offset %ld, extend_size %ld", file_expect_size, offset, size))); } @@ -306,7 +289,7 @@ static void dw_extend_file(int fd, const void *buf, int buf_size, int64 size, in remain_size = size; while (remain_size > 0) { size = (remain_size > buf_size) ? buf_size : remain_size; - dw_pwrite_file(fd, buf, size, offset, (single ? SINGLE_DW_FILE_NAME : DW_FILE_NAME)); + dw_pwrite_file(fd, buf, size, offset, (single ? SINGLE_DW_FILE_NAME : file_name)); offset += size; remain_size -= size; } @@ -323,7 +306,7 @@ void dw_set_pg_checksum(char *page, BlockNumber blockNum) ((PageHeader)page)->pd_checksum = pg_checksum_page(page, blockNum); } -static bool dw_verify_pg_checksum(PageHeader page_header, BlockNumber blockNum, bool dw_file) +bool dw_verify_pg_checksum(PageHeader page_header, BlockNumber blockNum, bool dw_file) { /* new page donot have crc and lsn, we donot recovery it */ if (!CheckPageZeroCases(page_header)) { @@ -336,22 +319,18 @@ static bool dw_verify_pg_checksum(PageHeader page_header, BlockNumber blockNum, return checksum == page_header->pd_checksum; } -inline void dw_prepare_page(dw_batch_t *batch, uint16 page_num, uint16 page_id, uint16 dwn) +static void dw_prepare_page(dw_batch_t *batch, uint16 page_num, uint16 page_id, uint16 dwn, bool is_new_relfilenode) { - if (g_instance.dw_batch_cxt.is_new_relfilenode == true) { + if (is_new_relfilenode == true) { if (t_thrd.proc->workingVersionNum < DW_SUPPORT_SINGLE_FLUSH_VERSION) { page_num = page_num | IS_HASH_BKT_SEGPAGE_MASK; } - if (t_thrd.proc->workingVersionNum < PAGE_COMPRESSION_VERSION) { - batch->buftag_ver = HASHBUCKET_TAG; - } else { - batch->buftag_ver = PAGE_COMPRESS_TAG; - } + batch->buftag_ver = HASHBUCKET_TAG; } else { batch->buftag_ver = ORIGIN_TAG; } batch->page_num = page_num; - + batch->head.page_id = page_id; batch->head.dwn = dwn; DW_PAGE_TAIL(batch)->dwn = dwn; @@ -370,31 +349,30 @@ void dw_prepare_file_head(char *file_head, uint16 start, uint16 dwn, int32 dw_ve curr_head->head.page_id = 0; curr_head->head.dwn = dwn; curr_head->start = start; - curr_head->buftag_version = PAGE_COMPRESS_TAG; + curr_head->buftag_version = HASHBUCKET_TAG; curr_head->tail.dwn = dwn; curr_head->dw_version = dw_version; dw_calc_file_head_checksum(curr_head); } } -static uint32 dw_recover_file_head(knl_g_dw_context *cxt, bool single, bool first) +static uint32 dw_recover_batch_file_head(dw_batch_file_context *batch_file_cxt) { uint32 i; uint16 id; errno_t rc; int64 file_size; - dw_file_head_t *curr_head = NULL; - dw_file_head_t *working_head = NULL; - char *file_head = (char *)cxt->file_head; - uint32 dw_version = 0; - uint64 head_offset = 0; int64 offset; - if (single && !first) { - file_head = (char *)cxt->second_file_head; - head_offset = (1 + DW_FIRST_DATA_PAGE_NUM) * BLCKSZ; - } - dw_pread_file(cxt->fd, file_head, BLCKSZ, head_offset); + dw_file_head_t *curr_head = NULL; + dw_file_head_t *working_head = NULL; + uint64 head_offset = 0; + uint32 dw_version = 0; + char* file_head = (char *)batch_file_cxt->file_head; + char* file_name = batch_file_cxt->file_name; + int fd = batch_file_cxt->fd; + + dw_pread_file(fd, file_head, BLCKSZ, head_offset); for (i = 0; i < DW_FILE_HEAD_ID_NUM; i++) { id = g_dw_file_head_ids[i]; @@ -406,13 +384,13 @@ static uint32 dw_recover_file_head(knl_g_dw_context *cxt, bool single, bool firs } if (working_head == NULL) { - ereport(FATAL, (errcode_for_file_access(), errmodule(MOD_DW), errmsg("File header is broken"))); + ereport(FATAL, (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Batch file header is broken"))); /* we should not get here, since FATAL will do abort. But for ut, return is needed */ return dw_version; } - ereport(LOG, (errmodule(MOD_DW), errmsg("Found a valid file header: id %hu, file_head[dwn %hu, start %hu]", id, - working_head->head.dwn, working_head->start))); + ereport(LOG, (errmodule(MOD_DW), errmsg("Found a valid batch file header: id %hu, file_head[dwn %hu, start %hu]", + id, working_head->head.dwn, working_head->start))); for (i = 0; i < DW_FILE_HEAD_ID_NUM; i++) { id = g_dw_file_head_ids[i]; @@ -423,23 +401,20 @@ static uint32 dw_recover_file_head(knl_g_dw_context *cxt, bool single, bool firs } } - offset = dw_seek_file(cxt->fd, 0, SEEK_END); - if (single) { - dw_version = ((dw_file_head_t *)file_head)->dw_version; - file_size = (dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH ? DW_NEW_SINGLE_FILE_SIZE : DW_SINGLE_FILE_SIZE); - } else { - file_size = DW_FILE_SIZE; - } + offset = dw_seek_file(fd, 0, SEEK_END); + file_size = batch_file_cxt->file_size; + if (offset != file_size) { ereport(PANIC, (errmodule(MOD_DW), - errmsg("DW check file size failed, expected_size %ld, actual_size %ld", DW_FILE_SIZE, offset))); + errmsg("DW check file size failed, expected_size %ld, actual_size %ld", + batch_file_cxt->file_size, offset))); } - dw_pwrite_file(cxt->fd, file_head, BLCKSZ, head_offset, single ? SINGLE_DW_FILE_NAME : DW_FILE_NAME); + dw_pwrite_file(fd, file_head, BLCKSZ, head_offset, file_name); return dw_version; } -static inline void dw_log_page_header(PageHeader page) +void dw_log_page_header(PageHeader page) { ereport(DW_LOG_LEVEL, (errmodule(MOD_DW), @@ -502,21 +477,15 @@ static void dw_recover_pages(T1 *batch, T2 *buf_tag, PageHeader data_page, BufTa for (i = 0; i < GET_REL_PGAENUM(batch->page_num); i++) { buf_tag = &batch->buf_tag[i]; - relnode.dbNode = buf_tag->rnode.dbNode; - relnode.spcNode = buf_tag->rnode.spcNode; - relnode.relNode = buf_tag->rnode.relNode; if (tag_ver == HASHBUCKET_TAG) { - relnode.opt = 0; - // 2 bytes are used for bucketNode. - relnode.bucketNode = (int2)((BufferTagSecondVer *)buf_tag)->rnode.bucketNode; - } else if (tag_ver == PAGE_COMPRESS_TAG) { - relnode.opt = ((BufferTag *)buf_tag)->rnode.opt; + relnode.dbNode = buf_tag->rnode.dbNode; + relnode.spcNode = buf_tag->rnode.spcNode; + relnode.relNode = buf_tag->rnode.relNode; relnode.bucketNode = ((BufferTag *)buf_tag)->rnode.bucketNode; } else { relnode.dbNode = buf_tag->rnode.dbNode; relnode.spcNode = buf_tag->rnode.spcNode; relnode.relNode = buf_tag->rnode.relNode; - relnode.opt = 0; relnode.bucketNode = InvalidBktId; } dw_page = (PageHeader)((char *)batch + (i + 1) * BLCKSZ); @@ -551,6 +520,7 @@ static void dw_recover_pages(T1 *batch, T2 *buf_tag, PageHeader data_page, BufTa } } + void wait_all_dw_page_finish_flush() { if (g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc != NULL) { @@ -566,20 +536,60 @@ void wait_all_dw_page_finish_flush() return; } -int get_dw_page_min_idx() + +void wait_dw_page_finish_flush(int file_id) +{ + int i; + int size; + int thread_num; + int thread_id; + int *thread_ids; + + if (g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc != NULL) { + thread_num = g_instance.ckpt_cxt_ctl->pgwr_procs.num; + thread_ids = (int *)palloc0(thread_num * sizeof(int)); + dw_fetch_thread_ids(file_id, size, thread_ids); + + for (i = 0; i < size;) { + thread_id = thread_ids[i]; + if (g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[thread_id].thrd_dw_cxt.dw_page_idx == -1) { + i++; + continue; + } else { + (void)sched_yield(); + } + } + + pfree(thread_ids); + } +} + +int get_dw_page_min_idx(int file_id) { uint16 min_idx = 0; int dw_page_idx; + int size; + int thread_id; + int* thread_ids; + int thread_num; if (g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc != NULL) { - for (int i = 0; i < g_instance.ckpt_cxt_ctl->pgwr_procs.num; i++) { - dw_page_idx = g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[i].thrd_dw_cxt.dw_page_idx; + thread_num = g_instance.ckpt_cxt_ctl->pgwr_procs.num; + thread_ids = (int *)palloc0(thread_num * sizeof(int)); + + dw_fetch_thread_ids(file_id, size, thread_ids); + + for (int i = 0; i < size; i++) { + thread_id = thread_ids[i]; + dw_page_idx = g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc[thread_id].thrd_dw_cxt.dw_page_idx; if (dw_page_idx != -1) { if (min_idx == 0 || (uint16)dw_page_idx < min_idx) { min_idx = dw_page_idx; } } } + + pfree(thread_ids); } return min_idx; @@ -601,17 +611,19 @@ int get_dw_page_min_idx() * * Return FALSE if we can not grab conditional dw flush lock after smgrsync for truncate. */ -static bool dw_batch_file_recycle(knl_g_dw_context *cxt, uint16 pages_to_write, bool trunc_file) +static bool dw_batch_file_recycle(dw_batch_file_context *cxt, uint16 pages_to_write, bool trunc_file) { bool file_full = false; - uint16 min_idx; + uint16 min_idx = 0; dw_file_head_t *file_head = cxt->file_head; volatile uint16 org_start = file_head->start; volatile uint16 org_dwn = file_head->head.dwn; uint16 last_flush_page; + uint16 dw_batch_page_num; + + dw_batch_page_num = (uint16)(cxt->file_size / BLCKSZ); + file_full = (file_head->start + cxt->flush_page + pages_to_write >= dw_batch_page_num); - file_full = (file_head->start + cxt->flush_page + pages_to_write >= DW_FILE_PAGE); - Assert(!(file_full && trunc_file)); if (!file_full && !trunc_file) { return true; @@ -622,17 +634,17 @@ static bool dw_batch_file_recycle(knl_g_dw_context *cxt, uint16 pages_to_write, /* * Record min flush position for truncate because flush lock is not held during smgrsync. */ - min_idx = get_dw_page_min_idx(); + min_idx = get_dw_page_min_idx(cxt->id); LWLockRelease(cxt->flush_lock); } else { Assert(AmStartupProcess() || AmPageWriterProcess()); /* reset start position and flush page num for full recycle */ file_head->start = DW_BATCH_FILE_START; cxt->flush_page = 0; - wait_all_dw_page_finish_flush(); + wait_dw_page_finish_flush(cxt->id); } - CheckPointSyncForDw(); + PageWriterSync(); if (trunc_file) { if (!LWLockConditionalAcquire(cxt->flush_lock, LW_EXCLUSIVE)) { @@ -676,7 +688,7 @@ static bool dw_batch_file_recycle(knl_g_dw_context *cxt, uint16 pages_to_write, Assert(file_head->head.dwn == file_head->tail.dwn); pgstat_report_waitevent(WAIT_EVENT_DW_WRITE); - dw_pwrite_file(cxt->fd, file_head, BLCKSZ, 0, DW_FILE_NAME); + dw_pwrite_file(cxt->fd, file_head, BLCKSZ, 0, cxt->file_name); pgstat_report_waitevent(WAIT_EVENT_END); pg_atomic_add_fetch_u64(&cxt->batch_stat_info.file_trunc_num, 1); @@ -708,12 +720,14 @@ static inline void dw_discard_pages(dw_read_asst_t *read_asst, uint16 page_num) Assert(read_asst->buf_end >= read_asst->buf_start); } -static uint16 dw_calc_reading_pages(dw_read_asst_t *read_asst) +static uint16 dw_calc_reading_pages(dw_read_asst_t *read_asst, uint64 file_size) { dw_batch_t *curr_head; uint16 remain_pages, batch_pages, reading_pages; errno_t rc; + uint16 dw_batch_page_num; + dw_batch_page_num = (uint16) (file_size / BLCKSZ); remain_pages = read_asst->buf_end - read_asst->buf_start; curr_head = (dw_batch_t *)(read_asst->buf + read_asst->buf_start * BLCKSZ); batch_pages = (GET_REL_PGAENUM(curr_head->page_num) + DW_EXTRA_FOR_ONE_BATCH); @@ -735,22 +749,23 @@ static uint16 dw_calc_reading_pages(dw_read_asst_t *read_asst) Assert((char *)curr_head + (remain_pages + reading_pages) * BLCKSZ < read_asst->buf + read_asst->buf_capacity * BLCKSZ); - Assert(read_asst->file_start + reading_pages <= DW_FILE_PAGE); + Assert(read_asst->file_start + reading_pages <= dw_batch_page_num); return reading_pages; } -static void dw_recover_batch_head(knl_g_dw_context *cxt, dw_batch_t *curr_head) +static void dw_recover_batch_head(dw_batch_file_context *cxt, dw_batch_t *curr_head, bool is_new_relfilenode) { errno_t rc; + rc = memset_s(curr_head, BLCKSZ, 0, BLCKSZ); securec_check(rc, "\0", "\0"); - dw_prepare_page(curr_head, 0, cxt->file_head->start, cxt->file_head->head.dwn); + dw_prepare_page(curr_head, 0, cxt->file_head->start, cxt->file_head->head.dwn, is_new_relfilenode); pgstat_report_waitevent(WAIT_EVENT_DW_WRITE); - dw_pwrite_file(cxt->fd, curr_head, BLCKSZ, (curr_head->head.page_id * BLCKSZ), DW_FILE_NAME); + dw_pwrite_file(cxt->fd, curr_head, BLCKSZ, (curr_head->head.page_id * BLCKSZ), cxt->file_name); pgstat_report_waitevent(WAIT_EVENT_END); } -static inline void dw_log_recover_state(knl_g_dw_context *cxt, int elevel, const char *state, dw_batch_t *batch) +static inline void dw_log_recover_state(dw_batch_file_context *cxt, int elevel, const char *state, dw_batch_t *batch) { ereport(elevel, (errmodule(MOD_DW), @@ -761,7 +776,7 @@ static inline void dw_log_recover_state(knl_g_dw_context *cxt, int elevel, const GET_REL_PGAENUM(batch->page_num)))); } -static bool dw_batch_head_broken(knl_g_dw_context *cxt, dw_batch_t *curr_head) +static bool dw_batch_head_broken(dw_batch_file_context *cxt, dw_batch_t *curr_head) { bool broken = false; dw_batch_t *curr_tail = dw_batch_tail_page(curr_head); @@ -792,23 +807,76 @@ static bool dw_batch_head_broken(knl_g_dw_context *cxt, dw_batch_t *curr_head) return broken; } -static void dw_recover_partial_write_batch(knl_g_dw_context *cxt) +static void dw_check_batch_parameter_change(knl_g_dw_context *batch_cxt) +{ + int g_dw_file_num; + int g_dw_file_size; + int dw_file_num; + int dw_file_size; + dw_batch_meta_file batch_meta_file; + + dw_file_num = batch_cxt->batch_meta_file.dw_file_num; + dw_file_size = batch_cxt->batch_meta_file.dw_file_size; + + g_dw_file_num = g_instance.attr.attr_storage.dw_file_num; + g_dw_file_size = g_instance.attr.attr_storage.dw_file_size; + + if (g_dw_file_num != dw_file_num || g_dw_file_size != dw_file_size) { + ereport(LOG, (errmodule(MOD_DW), + errmsg("old batch parameter: dw_file_num [%d], dw_file_size [%d] MB \ + it is changed to dw_file_num [%d], dw_file_size [%d] MB", dw_file_num, dw_file_size, g_dw_file_num, g_dw_file_size))); + + /* free batch cxt resources, close file and reset state. */ + dw_exit(false); + + /* remove all meta and batch files. */ + dw_remove_batch_file(dw_file_num); + dw_remove_batch_meta_file(); + + /* generate new meta and batch files. */ + dw_generate_meta_file(&batch_meta_file); + dw_generate_batch_files(g_dw_file_num, DW_FILE_SIZE_UNIT * g_dw_file_size); + + /* init batch cxt */ + dw_cxt_init_batch(); + } +} + +static void dw_recover_all_partial_write_batch(knl_g_dw_context *batch_cxt) +{ + int i; + int dw_file_num; + dw_batch_file_context* batch_file_cxt; + + dw_file_num = batch_cxt->batch_meta_file.dw_file_num; + + for (i = 0; i < dw_file_num; i++) { + batch_file_cxt = &batch_cxt->batch_file_cxts[i]; + (void)LWLockAcquire(batch_file_cxt->flush_lock, LW_EXCLUSIVE); + dw_recover_partial_write_batch(batch_file_cxt); + LWLockRelease(batch_file_cxt->flush_lock); + } +} + +static void dw_recover_partial_write_batch(dw_batch_file_context *cxt) { dw_read_asst_t read_asst; dw_batch_t *curr_head = NULL; uint16 reading_pages; uint16 remain_pages; bool dw_file_broken = false; + bool is_new_relfilenode; char *data_page = NULL; + uint16 dw_batch_page_num = (uint16) (cxt->file_size / BLCKSZ); read_asst.fd = cxt->fd; read_asst.file_start = cxt->file_head->start; - read_asst.file_capacity = DW_FILE_PAGE; + read_asst.file_capacity = dw_batch_page_num; read_asst.buf_start = 0; read_asst.buf_end = 0; read_asst.buf_capacity = DW_BUF_MAX; read_asst.buf = cxt->buf; - reading_pages = Min(DW_BATCH_MAX_FOR_NOHBK, (DW_FILE_PAGE - cxt->file_head->start)); + reading_pages = Min(DW_BATCH_MAX_FOR_NOHBK, (dw_batch_page_num - cxt->file_head->start)); data_page = (char *)palloc0(BLCKSZ); @@ -820,12 +888,10 @@ static void dw_recover_partial_write_batch(knl_g_dw_context *cxt) dw_file_broken = dw_batch_head_broken(cxt, curr_head); break; } + if (t_thrd.proc->workingVersionNum < DW_SUPPORT_SINGLE_FLUSH_VERSION) { bool is_hashbucket = ((curr_head->page_num & IS_HASH_BKT_SEGPAGE_MASK) != 0); - curr_head->buftag_ver = is_hashbucket ? - (t_thrd.proc->workingVersionNum < PAGE_COMPRESSION_VERSION ? HASHBUCKET_TAG - : PAGE_COMPRESS_TAG) - : ORIGIN_TAG; + curr_head->buftag_ver = is_hashbucket ? HASHBUCKET_TAG : ORIGIN_TAG; } remain_pages = read_asst.buf_end - read_asst.buf_start; @@ -852,11 +918,11 @@ static void dw_recover_partial_write_batch(knl_g_dw_context *cxt) break; } - reading_pages = dw_calc_reading_pages(&read_asst); + reading_pages = dw_calc_reading_pages(&read_asst, cxt->file_size); } /* if free space not enough for one batch, reuse file. Otherwise, just do a truncate */ - if ((cxt->file_head->start + cxt->flush_page + DW_BUF_MAX) >= DW_FILE_PAGE) { + if ((cxt->file_head->start + cxt->flush_page + DW_BUF_MAX) >= dw_batch_page_num) { (void)dw_batch_file_recycle(cxt, DW_BUF_MAX, false); } else if (cxt->flush_page > 0) { if (!dw_batch_file_recycle(cxt, 0, true)) { @@ -864,39 +930,139 @@ static void dw_recover_partial_write_batch(knl_g_dw_context *cxt) errmsg("Could not truncate dw file during startup!"))); } } + + /* judge whether the buftag are hashbucket tag or origin tag. */ + is_new_relfilenode = ((curr_head->page_num & IS_HASH_BKT_SEGPAGE_MASK) != 0); if (dw_file_broken) { - dw_recover_batch_head(cxt, curr_head); + dw_recover_batch_head(cxt, curr_head, is_new_relfilenode); } dw_log_recover_state(cxt, LOG, "Finish", curr_head); pfree(data_page); } +void dw_check_file_num() +{ + int old_num = g_instance.attr.attr_storage.dw_file_num; + if (g_instance.attr.attr_storage.dw_file_num > g_instance.attr.attr_storage.pagewriter_thread_num) { + g_instance.attr.attr_storage.dw_file_num = g_instance.attr.attr_storage.pagewriter_thread_num; + + ereport(LOG, (errmodule(MOD_DW), + errmsg("dw_file_num no more than pagewriter_thread_num, so it is changed from [%d] to [%d]", + old_num, g_instance.attr.attr_storage.dw_file_num))); + } +} + /* only for init db */ void dw_bootstrap() { - dw_generate_batch_file(); - dw_generate_new_single_file(); -} + dw_batch_meta_file* batch_meta_file = &g_instance.dw_batch_cxt.batch_meta_file; -static void dw_generate_batch_file() -{ - char *file_head = NULL; - dw_batch_t *batch_head = NULL; - int64 remain_size; - int fd = -1; - char *unaligned_buf = NULL; - - if (file_exists(DW_FILE_NAME)) { - ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_DW), "DW batch flush file already exists")); + /* when double write is disabled, generate pg_dw_meta file with dw_file_num = 0 */ + if (!dw_enabled()) { + g_instance.attr.attr_storage.dw_file_num = 0; + dw_generate_meta_file(batch_meta_file); + return; } - ereport(LOG, (errmodule(MOD_DW), errmsg("DW bootstrap batch flush file"))); + ereport(LOG, (errmodule(MOD_DW), errmsg("dw_bootstrap run start"))); + + dw_check_file_num(); + dw_generate_meta_file(batch_meta_file); + dw_generate_batch_files(batch_meta_file->dw_file_num, DW_FILE_SIZE_UNIT * batch_meta_file->dw_file_size); + dw_generate_new_single_file(); + + ereport(LOG, (errmodule(MOD_DW), errmsg("dw_bootstrap run end"))); +} + +static void dw_prepare_meta_info_old(dw_batch_meta_file *batch_meta_file) +{ + errno_t rc; + + rc = memset_s(batch_meta_file, sizeof(dw_batch_meta_file), 0, sizeof(dw_batch_meta_file)); + securec_check(rc, "\0", "\0"); + + batch_meta_file->dw_file_num = 1; + batch_meta_file->dw_file_size = MAX_DW_FILE_SIZE_MB; + pg_atomic_write_u32(&batch_meta_file->dw_version, 0); + batch_meta_file->checksum = 0; +} + +static void dw_prepare_meta_info(dw_batch_meta_file *batch_meta_file) +{ + errno_t rc; + + rc = memset_s(batch_meta_file, sizeof(dw_batch_meta_file), 0, sizeof(dw_batch_meta_file)); + securec_check(rc, "\0", "\0"); + + batch_meta_file->dw_file_num = g_instance.attr.attr_storage.dw_file_num; + batch_meta_file->dw_file_size = g_instance.attr.attr_storage.dw_file_size; + + if (!ENABLE_INCRE_CKPT) { + batch_meta_file->record_state |= DW_FULL_CKPT; + } + pg_atomic_write_u32(&batch_meta_file->dw_version, DW_SUPPORT_MULTIFILE_FLUSH); + batch_meta_file->checksum = 0; +} + +void dw_write_meta_file(int fd, dw_batch_meta_file *batch_meta_file) +{ + uint32 i; + int buf_size; + char* buf; + char* unaligned_buf; + errno_t rc; + dw_batch_meta_file *tmp_batch_meta = NULL; + + buf_size = DW_META_FILE_BLOCK_NUM * BLCKSZ; + unaligned_buf = (char *)palloc0(buf_size + BLCKSZ); + buf = (char *)TYPEALIGN(BLCKSZ, unaligned_buf); + + dw_calc_meta_checksum(batch_meta_file); + + for (i = 0; i < DW_META_FILE_BLOCK_NUM; i++) { + tmp_batch_meta = (dw_batch_meta_file *)(buf + i * BLCKSZ); + rc = memmove_s(tmp_batch_meta, sizeof(dw_batch_meta_file), batch_meta_file, sizeof(dw_batch_meta_file)); + securec_check(rc, "\0", "\0"); + } + + dw_pwrite_file(fd, buf, buf_size, 0, DW_META_FILE); + pfree(unaligned_buf); +} + +void dw_generate_meta_file(dw_batch_meta_file* batch_meta_file) +{ + int fd; + + dw_prepare_meta_info(batch_meta_file); + fd = dw_create_file(DW_META_FILE); + + dw_write_meta_file(fd, batch_meta_file); + (void)close(fd); +} + +void dw_generate_batch_files(int batch_file_num, uint64 dw_file_size) +{ + for (int i = 0; i < batch_file_num; i++) { + dw_generate_batch_file(i, dw_file_size); + } +} + +static void dw_generate_batch_file(int file_id, uint64 dw_file_size) +{ + int64 remain_size; + char* file_head = NULL; + dw_batch_t* batch_head = NULL; + int fd = -1; + char* unaligned_buf = NULL; + char batch_file_name[PATH_MAX]; + + dw_fetch_batch_file_name(file_id, batch_file_name); /* create dw batch flush file */ - fd = open(DW_FILE_NAME, (DW_FILE_FLAG | O_CREAT), DW_FILE_PERM); + fd = open(batch_file_name, (DW_FILE_FLAG | O_CREAT), DW_FILE_PERM); if (fd == -1) { ereport(PANIC, - (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not create file \"%s\"", DW_FILE_NAME))); + (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not create file \"%s\"", batch_file_name))); } /* Open file with O_SYNC, to make sure the data and file system control info on file after block writing. */ @@ -904,15 +1070,13 @@ static void dw_generate_batch_file() file_head = (char *)TYPEALIGN(BLCKSZ, unaligned_buf); /* file head and first batch head will be writen */ - remain_size = (DW_FILE_PAGE * BLCKSZ) - BLCKSZ - BLCKSZ; + remain_size = dw_file_size - BLCKSZ - BLCKSZ; dw_prepare_file_head(file_head, DW_BATCH_FILE_START, 0); batch_head = (dw_batch_t *)(file_head + BLCKSZ); batch_head->head.page_id = DW_BATCH_FILE_START; dw_calc_batch_checksum(batch_head); - pgstat_report_waitevent(WAIT_EVENT_DW_WRITE); - dw_pwrite_file(fd, file_head, (BLCKSZ + BLCKSZ), 0, DW_FILE_NAME); - dw_extend_file(fd, file_head, DW_FILE_EXTEND_SIZE, remain_size, DW_FILE_SIZE, false); - pgstat_report_waitevent(WAIT_EVENT_END); + dw_pwrite_file(fd, file_head, (BLCKSZ + BLCKSZ), 0, batch_file_name); + dw_extend_file(fd, file_head, DW_FILE_EXTEND_SIZE, remain_size, dw_file_size, false, batch_file_name); ereport(LOG, (errmodule(MOD_DW), errmsg("Double write batch flush file created successfully"))); (void)close(fd); @@ -921,12 +1085,43 @@ static void dw_generate_batch_file() return; } -static void dw_free_resource(knl_g_dw_context *cxt) +static void dw_free_batch_file_resource(dw_batch_file_context *cxt) { int rc = close(cxt->fd); if (rc == -1) { ereport(ERROR, (errcode_for_file_access(), errmodule(MOD_DW), errmsg("DW file close failed"))); } + + cxt->fd = -1; + cxt->flush_lock = NULL; + cxt->buf = NULL; + + if (cxt->unaligned_buf != NULL) { + pfree(cxt->unaligned_buf); + cxt->unaligned_buf = NULL; + } +} + +static void dw_free_resource(knl_g_dw_context *cxt, bool single) +{ + int rc; + int dw_file_num = cxt->batch_meta_file.dw_file_num; + + if (!single) { + for (int i = 0; i < dw_file_num; i++) { + dw_free_batch_file_resource(&cxt->batch_file_cxts[i]); + } + + pfree(cxt->batch_file_cxts); + } + + if (cxt->fd > 0) { + rc = close(cxt->fd); + if (rc == -1) { + ereport(ERROR, (errcode_for_file_access(), errmodule(MOD_DW), errmsg("DW file close failed"))); + } + } + cxt->fd = -1; cxt->flush_lock = NULL; cxt->buf = NULL; @@ -944,6 +1139,7 @@ static void dw_free_resource(knl_g_dw_context *cxt) pfree(cxt->recovery_buf.unaligned_buf); cxt->recovery_buf.unaligned_buf = NULL; } + if (cxt->recovery_buf.single_flush_state != NULL) { pfree(cxt->recovery_buf.single_flush_state); cxt->recovery_buf.single_flush_state = NULL; @@ -953,72 +1149,157 @@ static void dw_free_resource(knl_g_dw_context *cxt) void dw_file_check_rebuild() { - if (file_exists(DW_BUILD_FILE_NAME)) { - ereport(LOG, (errmodule(MOD_DW), errmsg("Double write initializing after build"))); + int fd; + dw_batch_meta_file batch_meta_file; - if (file_exists(DW_FILE_NAME)) { - /* - * Probably the gaussdb was killed during the first time startup after build, resulting in a half-written - * DW file. So, log a warning message and remove the residual DW file. - */ - ereport(WARNING, (errcode_for_file_access(), errmodule(MOD_DW), "batch flush DW file exists, deleting it")); + if (!file_exists(DW_BUILD_FILE_NAME)) { + return; + } - if (unlink(DW_FILE_NAME) != 0) { - ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_DW), - errmsg("Could not remove the residual batch flush DW single flush file"))); - } - } - - if (file_exists(SINGLE_DW_FILE_NAME)) { - /* - * Probably the gaussdb was killed during the first time startup after build, resulting in a half-written - * DW file. So, log a warning message and remove the residual DW file. - */ - ereport(WARNING, (errcode_for_file_access(), errmodule(MOD_DW), "single flush DW file exists, deleting it")); + ereport(LOG, (errmodule(MOD_DW), errmsg("Double write initializing after build"))); - if (unlink(SINGLE_DW_FILE_NAME) != 0) { - ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_DW), - errmsg("Could not remove the residual single flush DW single flush file"))); - } - } + if (file_exists(OLD_DW_FILE_NAME)) { + /* + * Probably the gaussdb was killed during the first time startup after build, resulting in a half-written + * DW file. So, log a warning message and remove the residual DW file. + */ + ereport(WARNING, (errcode_for_file_access(), errmodule(MOD_DW), "batch flush DW file exists, deleting it")); - /* Create the DW file. */ - dw_generate_batch_file(); - - /* during C20 upgrade to R2C00 */ - if (t_thrd.proc->workingVersionNum >= DW_SUPPORT_SINGLE_FLUSH_VERSION && - t_thrd.proc->workingVersionNum < DW_SUPPORT_NEW_SINGLE_FLUSH) { - dw_generate_single_file(); - } else { - /* during C10 upgrade to R2C00 or now is R2C00, need check dw upgrade file */ - dw_generate_new_single_file(); - if (file_exists(DW_UPGRADE_FILE_NAME) && unlink(DW_UPGRADE_FILE_NAME) != 0) { - ereport(PANIC, - (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not remove the DW upgrade file"))); - } - } - - /* Remove the DW build file. */ - if (unlink(DW_BUILD_FILE_NAME) != 0) { - ereport(PANIC, - (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not remove the DW build file"))); + if (unlink(OLD_DW_FILE_NAME) != 0) { + ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_DW), + errmsg("Could not remove the residual batch flush DW single flush file"))); } } + + if (file_exists(SINGLE_DW_FILE_NAME)) { + /* + * Probably the gaussdb was killed during the first time startup after build, resulting in a half-written + * DW file. So, log a warning message and remove the residual DW file. + */ + ereport(WARNING, (errcode_for_file_access(), errmodule(MOD_DW), "single flush DW file exists, deleting it")); + + if (unlink(SINGLE_DW_FILE_NAME) != 0) { + ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_DW), + errmsg("Could not remove the residual single flush DW single flush file"))); + } + } + + /* read meta file and remove batch file then remove meta file */ + if (file_exists(DW_META_FILE)) { + ereport(WARNING, (errcode_for_file_access(), errmodule(MOD_DW), "batch meta file exists, deleting it")); + fd = dw_open_file(DW_META_FILE); + dw_recover_batch_meta_file(fd, &batch_meta_file); + close(fd); + + dw_remove_batch_file(batch_meta_file.dw_file_num); + + dw_remove_batch_meta_file(); + } + + /* Create the DW file. */ + if (t_thrd.proc->workingVersionNum >= DW_SUPPORT_MULTIFILE_FLUSH) { + dw_generate_meta_file(&batch_meta_file); + dw_generate_batch_files(batch_meta_file.dw_file_num, DW_FILE_SIZE_UNIT * batch_meta_file.dw_file_size); + } else { + g_instance.dw_batch_cxt.old_batch_version = true; + dw_generate_batch_file(-1, DW_FILE_SIZE); + } + + /* during C20 upgrade to R2C00 */ + if (t_thrd.proc->workingVersionNum >= DW_SUPPORT_SINGLE_FLUSH_VERSION && + t_thrd.proc->workingVersionNum < DW_SUPPORT_NEW_SINGLE_FLUSH) { + dw_generate_single_file(); + } else { + /* during C10 upgrade to R2C00 or now is R2C00, need check dw upgrade file */ + dw_generate_new_single_file(); + if (file_exists(DW_UPGRADE_FILE_NAME) && unlink(DW_UPGRADE_FILE_NAME) != 0) { + ereport(PANIC, + (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not remove the DW upgrade file"))); + } + } + + /* Remove the DW build file. */ + if (unlink(DW_BUILD_FILE_NAME) != 0) { + ereport(PANIC, + (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not remove the DW build file"))); + } + return; } +static bool dw_batch_upgrade_check() +{ + int fd; + uint64 dw_file_size; + dw_batch_meta_file* batch_meta_file; + bool old_batch_version = false; + + batch_meta_file = &g_instance.dw_batch_cxt.batch_meta_file; + + if (t_thrd.proc->workingVersionNum < DW_SUPPORT_MULTIFILE_FLUSH) { + return true; + } + + if (!file_exists(DW_BATCH_UPGRADE_META_FILE_NAME) && !file_exists(DW_BATCH_UPGRADE_BATCH_FILE_NAME)) { + if (file_exists(DW_META_FILE)) { + /* dw_batch_upgrade succesfully */ + dw_remove_file(OLD_DW_FILE_NAME); + } else { + /* stop before dw_batch_upgrade or upgrade in place */ + if (file_exists(OLD_DW_FILE_NAME)) { + old_batch_version = true; + } else { + /* initdb */ + dw_generate_meta_file(batch_meta_file); + + dw_file_size = DW_FILE_SIZE_UNIT * batch_meta_file->dw_file_size; + dw_generate_batch_files(batch_meta_file->dw_file_num, dw_file_size); + } + } + } else if (file_exists(DW_BATCH_UPGRADE_META_FILE_NAME)) { + /* stop in the process of meta file generation */ + dw_remove_file(DW_META_FILE); + dw_remove_file(DW_BATCH_UPGRADE_META_FILE_NAME); + + old_batch_version = true; + } else if (file_exists(DW_BATCH_UPGRADE_BATCH_FILE_NAME)) { + fd = dw_open_file(DW_META_FILE); + dw_recover_batch_meta_file(fd, batch_meta_file); + close(fd); + + dw_remove_batch_file(batch_meta_file->dw_file_num); + dw_remove_batch_meta_file(); + dw_remove_file(DW_BATCH_UPGRADE_BATCH_FILE_NAME); + + old_batch_version = true; + } + + return old_batch_version; +} + void dw_file_check() { + dw_batch_meta_file* batch_meta_file; + dw_file_check_rebuild(); - if (!file_exists(DW_FILE_NAME)) { - ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_DW), errmsg("batch flush DW file does not exist"))); + batch_meta_file = &g_instance.dw_batch_cxt.batch_meta_file; + g_instance.dw_batch_cxt.old_batch_version = dw_batch_upgrade_check(); + + if (g_instance.dw_batch_cxt.old_batch_version == true) { + g_instance.dw_batch_cxt.recovery_dw_file_num = g_instance.attr.attr_storage.dw_file_num; + g_instance.dw_batch_cxt.recovery_dw_file_size = g_instance.attr.attr_storage.dw_file_size; + + dw_prepare_meta_info_old(batch_meta_file); + + g_instance.attr.attr_storage.dw_file_num = batch_meta_file->dw_file_num; + g_instance.attr.attr_storage.dw_file_size = batch_meta_file->dw_file_size; } /* C20 or R2C00 version, the system must contain either dw single file or dw upgrade file. */ if (t_thrd.proc->workingVersionNum >= DW_SUPPORT_SINGLE_FLUSH_VERSION) { if (!file_exists(SINGLE_DW_FILE_NAME) && !file_exists(DW_UPGRADE_FILE_NAME)) { - ereport(PANIC, (errcode_for_file_access(), + ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_DW), errmsg("single flush DW file does not exist and dw_upgrade file does not exist"))); } } @@ -1030,7 +1311,7 @@ void dw_file_check() if (fd == -1) { ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_DW), - errmsg("Could not create file \"%s\"", DW_FILE_NAME))); + errmsg("Could not create file \"%s\"", DW_UPGRADE_FILE_NAME))); } ereport(LOG, (errmodule(MOD_DW), errmsg("first upgrade to DW_SUPPORT_NEW_SINGLE_FLUSH, need init the single file"))); @@ -1055,103 +1336,384 @@ void dw_file_check() } } -void dw_cxt_init_batch() +void dw_fetch_batch_file_name(int file_id, char* buf) +{ + errno_t rc = EOK; + + /* in upgrade process, when upragde is not submitted, all write is flushed to OLD_DW_FILE_NAME */ + if (g_instance.dw_batch_cxt.old_batch_version) { + rc = memmove_s(buf, PATH_MAX, OLD_DW_FILE_NAME, sizeof(OLD_DW_FILE_NAME)); + securec_check_c(rc, "\0", "\0"); + return; + } + + rc = memmove_s(buf, PATH_MAX, DW_FILE_NAME_PREFIX, sizeof(DW_FILE_NAME_PREFIX)); + securec_check_c(rc, "\0", "\0"); + + char* str_buf = (char *)palloc0(PATH_MAX); + rc = sprintf_s(str_buf, PATH_MAX, "%d", file_id); + securec_check_ss(rc, "", ""); + + rc = strcat_s(buf, PATH_MAX, str_buf); + securec_check_c(rc, "\0", "\0"); + + pfree(str_buf); +} + +static void dw_file_cxt_init_batch(int id, dw_batch_file_context *batch_file_cxt, uint64 file_size) { uint32 buf_size; char *buf = NULL; - knl_g_dw_context *batch_cxt = &g_instance.dw_batch_cxt; - Assert(batch_cxt->flush_lock == NULL); - batch_cxt->flush_lock = LWLockAssign(LWTRANCHE_DOUBLE_WRITE); + Assert(batch_file_cxt->flush_lock == NULL); + batch_file_cxt->flush_lock = LWLockAssign(LWTRANCHE_DOUBLE_WRITE); + + batch_file_cxt->id = id; + dw_fetch_batch_file_name(batch_file_cxt->id, batch_file_cxt->file_name); + batch_file_cxt->file_size = file_size; /* double write file disk space pre-allocated, O_DSYNC for less IO */ - batch_cxt->fd = open(DW_FILE_NAME, DW_FILE_FLAG, DW_FILE_PERM); - if (batch_cxt->fd == -1) { + batch_file_cxt->fd = open(batch_file_cxt->file_name, DW_FILE_FLAG, DW_FILE_PERM); + if (batch_file_cxt->fd == -1) { ereport(PANIC, - (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not open file \"%s\"", DW_FILE_NAME))); + (errcode_for_file_access(), errmodule(MOD_DW), + errmsg("Could not open file \"%s\"", batch_file_cxt->file_name))); } buf_size = DW_MEM_CTX_MAX_BLOCK_SIZE_FOR_NOHBK; + batch_file_cxt->unaligned_buf = (char *)palloc0(buf_size); /* one more BLCKSZ for alignment */ + buf = (char *)TYPEALIGN(BLCKSZ, batch_file_cxt->unaligned_buf); - batch_cxt->unaligned_buf = (char *)palloc0(buf_size); /* one more BLCKSZ for alignment */ - buf = (char *)TYPEALIGN(BLCKSZ, batch_cxt->unaligned_buf); - - batch_cxt->file_head = (dw_file_head_t *)buf; + batch_file_cxt->file_head = (dw_file_head_t *)buf; buf += BLCKSZ; - (void)dw_recover_file_head(batch_cxt, false, false); + (void)dw_recover_batch_file_head(batch_file_cxt); - batch_cxt->buf = buf; + batch_file_cxt->buf = buf; if (BBOX_BLACKLIST_DW_BUFFER) { bbox_blacklist_add(DW_BUFFER, buf, buf_size - BLCKSZ - BLCKSZ); } - batch_cxt->closed = 0; - batch_cxt->write_pos = 0; - batch_cxt->flush_page = 0; + + batch_file_cxt->write_pos = 0; + batch_file_cxt->flush_page = 0; } -void dw_cxt_init_single() +int dw_open_file(const char* file_name) { - char *buf = NULL; - knl_g_dw_context *single_cxt = &g_instance.dw_single_cxt; - uint32 dw_version = 0; - uint16 data_page_num = 0; - uint64 second_start_offset = 0; - - Assert(single_cxt->flush_lock == NULL); - single_cxt->flush_lock = LWLockAssign(LWTRANCHE_DW_SINGLE_FIRST); - single_cxt->second_flush_lock = LWLockAssign(LWTRANCHE_DW_SINGLE_SECOND); - single_cxt->second_buftag_lock = LWLockAssign(LWTRANCHE_DW_SINGLE_SECOND_BUFTAG); - - single_cxt->fd = open(SINGLE_DW_FILE_NAME, DW_FILE_FLAG, DW_FILE_PERM); - if (single_cxt->fd == -1) { + int fd = open(file_name, DW_FILE_FLAG, DW_FILE_PERM); + if (fd == -1) { ereport(PANIC, - (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not open file \"%s\"", DW_FILE_NAME))); + (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not open file \"%s\"", file_name))); + } + return fd; +} + +static int dw_create_file(const char* file_name) +{ + int fd = -1; + fd = open(file_name, (DW_FILE_FLAG | O_CREAT), DW_FILE_PERM); + if (fd == -1) { + ereport(PANIC, + (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not create file \"%s\"", file_name))); } - data_page_num = DW_FIRST_DATA_PAGE_NUM + DW_SECOND_DATA_PAGE_NUM; + return fd; +} - /* two file head plus one for alignment */ - single_cxt->unaligned_buf = (char *)palloc0((DW_SECOND_BUFTAG_PAGE_NUM + 1 + 1 + 1) * BLCKSZ); - buf = (char *)TYPEALIGN(BLCKSZ, single_cxt->unaligned_buf); - single_cxt->file_head = (dw_file_head_t *)buf; - buf += BLCKSZ; - single_cxt->second_file_head = (dw_file_head_t *)buf; - buf += BLCKSZ; - single_cxt->buf = buf; - single_cxt->single_flush_state = (bool*)palloc0(sizeof(bool) * data_page_num); +bool dw_verify_meta_info(dw_batch_meta_file *batch_meta_file) +{ + uint32 checksum; + uint16 org_cks = batch_meta_file->checksum; - dw_version = dw_recover_file_head(single_cxt, true, true); - if (dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { - dw_pread_file(single_cxt->fd, single_cxt->file_head, BLCKSZ, 0); - second_start_offset = (1 + DW_FIRST_DATA_PAGE_NUM) * BLCKSZ; - dw_recover_file_head(single_cxt, true, false); - dw_pread_file(single_cxt->fd, single_cxt->second_file_head, BLCKSZ, second_start_offset); + batch_meta_file->checksum = 0; + checksum = pg_checksum_block((char*)batch_meta_file, sizeof(dw_batch_meta_file)); + batch_meta_file->checksum = org_cks; + + return (org_cks == REDUCE_CKS2UINT16(checksum)); +} + +void dw_recover_batch_meta_file(int fd, dw_batch_meta_file *batch_meta_file) +{ + uint32 i; + char* buf; + char* unaligned_buf; + int buf_size; + errno_t rc; + dw_batch_meta_file *valid_batch_meta = NULL; + dw_batch_meta_file *tmp_batch_meta = NULL; + + buf_size = DW_META_FILE_BLOCK_NUM * BLCKSZ; + unaligned_buf = (char *)palloc0(buf_size + BLCKSZ); + buf = (char *)TYPEALIGN(BLCKSZ, unaligned_buf); + + dw_pread_file(fd, buf, buf_size, 0); + + for (i = 0; i < DW_META_FILE_BLOCK_NUM; i++) { + tmp_batch_meta = (dw_batch_meta_file *)(buf + i * BLCKSZ); + if (dw_verify_meta_info(tmp_batch_meta)) { + valid_batch_meta = tmp_batch_meta; + break; + } + } + + if (valid_batch_meta == NULL) { + ereport(FATAL, (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Meta File is broken"))); + return; + } + + ereport(LOG, (errmodule(MOD_DW), + errmsg("Found a valid batch meta file info: dw_file_num [%d], dw_file_size [%d] MB, dw_version [%d]", + valid_batch_meta->dw_file_num, valid_batch_meta->dw_file_size, valid_batch_meta->dw_version))); + + for (i = 0; i < DW_META_FILE_BLOCK_NUM; i++) { + tmp_batch_meta = (dw_batch_meta_file *)(buf + i * BLCKSZ); + if (tmp_batch_meta != valid_batch_meta) { + rc = memcpy_s(tmp_batch_meta, sizeof(dw_batch_meta_file), valid_batch_meta, sizeof(dw_batch_meta_file)); + securec_check(rc, "\0", "\0"); + } + } + + rc = memcpy_s(batch_meta_file, sizeof(dw_batch_meta_file), valid_batch_meta, sizeof(dw_batch_meta_file)); + securec_check(rc, "\0", "\0"); + + dw_pwrite_file(fd, buf, buf_size, 0, DW_META_FILE); + + pfree(unaligned_buf); +} + +void dw_remove_batch_meta_file() +{ + ereport(LOG, (errmodule(MOD_DW), errmsg("start remove dw_batch_meta_file."))); + dw_remove_file(DW_META_FILE); +} + +void dw_remove_batch_file(int dw_file_num) +{ + int i; + char batch_file_name[PATH_MAX]; + + ereport(LOG, (errmodule(MOD_DW), errmsg("start remove dw_batch_files."))); + + for (i = 0; i < dw_file_num; i++) { + dw_fetch_batch_file_name(i, batch_file_name); + dw_remove_file(batch_file_name); + } +} + +void dw_cxt_init_batch() +{ + int fd; + int i; + int dw_file_num; + uint dw_version; + dw_batch_file_context *batch_file_cxt; + knl_g_dw_context *dw_batch_cxt = &g_instance.dw_batch_cxt; + dw_batch_meta_file *batch_meta_file = &dw_batch_cxt->batch_meta_file; + + if (dw_batch_cxt->old_batch_version) { + dw_batch_cxt->fd = -1; + dw_version = 0; } else { - Assert(dw_version == 0); - /* one file head plus one for alignment */ - single_cxt->recovery_buf.unaligned_buf = - (char *)palloc0((DW_SINGLE_DIRTY_PAGE_NUM / SINGLE_BLOCK_TAG_NUM + 1 + 1) * BLCKSZ); - buf = (char *)TYPEALIGN(BLCKSZ, single_cxt->recovery_buf.unaligned_buf); - single_cxt->recovery_buf.file_head = (dw_file_head_t *)buf; - buf += BLCKSZ; - single_cxt->recovery_buf.buf = buf; - dw_pread_file(single_cxt->fd, single_cxt->recovery_buf.file_head, BLCKSZ, 0); - single_cxt->recovery_buf.single_flush_state = - (bool*)palloc0(sizeof(bool) * DW_SINGLE_DIRTY_PAGE_NUM); - single_cxt->recovery_buf.write_pos = 0; + fd = dw_open_file(DW_META_FILE); + dw_batch_cxt->fd = fd; + + /* init global batch meta file info */ + (void)dw_recover_batch_meta_file(fd, batch_meta_file); + dw_version = batch_meta_file->dw_version; } - pg_atomic_write_u32(&g_instance.dw_single_cxt.dw_version, dw_version); - single_cxt->closed = 0; - single_cxt->write_pos = 0; - single_cxt->second_write_pos = 0; - single_cxt->flush_page = 0; + if (dw_batch_cxt->flush_lock == NULL) { + dw_batch_cxt->flush_lock = LWLockAssign(LWTRANCHE_DOUBLE_WRITE); + } + + dw_file_num = batch_meta_file->dw_file_num; + dw_batch_cxt->batch_file_cxts = (dw_batch_file_context *)palloc0(dw_file_num * sizeof(dw_batch_file_context)); + + /* init global batch double write file head info */ + for (i = 0; i < dw_file_num; i++) { + batch_file_cxt = &dw_batch_cxt->batch_file_cxts[i]; + dw_file_cxt_init_batch(i, batch_file_cxt, DW_FILE_SIZE_UNIT * batch_meta_file->dw_file_size); + } + + pg_atomic_write_u32(&dw_batch_cxt->dw_version, dw_version); + + + /* they are used by single flush, and setted NULL here. */ + dw_batch_cxt->single_flush_state = NULL; + dw_batch_cxt->unaligned_buf = NULL; + dw_batch_cxt->recovery_buf.unaligned_buf = NULL; + dw_batch_cxt->recovery_buf.single_flush_state = NULL; + + dw_batch_cxt->closed = 0; +} + +static void dw_check_meta_file() +{ + int fd; + dw_batch_meta_file *batch_meta_file; + + if (file_exists(DW_META_FILE)) { + batch_meta_file = &g_instance.dw_batch_cxt.batch_meta_file; + fd = dw_open_file(DW_META_FILE); + dw_recover_batch_meta_file(fd, batch_meta_file); + close(fd); + + /* Notice the last time the database was started with full checkpoint or incremental checkpoint mode. */ + if (batch_meta_file->dw_file_num == 0) { + if ((batch_meta_file->record_state & DW_FULL_CKPT) > 0) { + ereport(LOG, (errmodule(MOD_DW), errmsg("The last time database run in full checkpoint mode."))); + } else { + ereport(LOG, (errmodule(MOD_DW), errmsg("The last time database run in incremental checkpoint mode."))); + } + + fd = dw_create_file(DW_BUILD_FILE_NAME); + close(fd); + } + } +} + +void dw_upgrade_renable_double_write() +{ + if (g_instance.attr.attr_storage.enable_double_write) { + return; + } + + ereport(LOG, (errmodule(MOD_DW), errmsg("support renable dw upgrade start"))); + + /* generate the pg_dw_meta with dw_file_num = 0 */ + g_instance.attr.attr_storage.dw_file_num = 0; + dw_generate_meta_file(&g_instance.dw_batch_cxt.batch_meta_file); + + /* old version dw files were not deleted when double write is disabled, delete here */ + dw_remove_file(OLD_DW_FILE_NAME); + dw_remove_file(SINGLE_DW_FILE_NAME); + + pg_atomic_write_u32(&g_instance.dw_batch_cxt.dw_version, DW_SUPPORT_REABLE_DOUBLE_WRITE); + + ereport(LOG, (errmodule(MOD_DW), errmsg("support renable dw upgrade end"))); +} + +static void dw_record_ckpt_state() +{ + int fd; + dw_batch_meta_file *batch_meta_file = &g_instance.dw_batch_cxt.batch_meta_file; + + if (file_exists(DW_META_FILE)) { + if (ENABLE_INCRE_CKPT) { + batch_meta_file->record_state &= (~DW_FULL_CKPT); + } else { + batch_meta_file->record_state |= DW_FULL_CKPT; + } + + fd = dw_open_file(DW_META_FILE); + dw_write_meta_file(fd, batch_meta_file); + close(fd); + } +} + +/* when double write and incremental checkpoint are enabled, init dw files here */ +void dw_enable_init() +{ + knl_g_dw_context *batch_cxt = &g_instance.dw_batch_cxt; + knl_g_dw_context *single_cxt = &g_instance.dw_single_cxt; + + dw_check_meta_file(); + dw_check_file_num(); + dw_file_check(); + + /* init dw batch and dw single */ + dw_cxt_init_batch(); + dw_cxt_init_single(); + + /* recovery batch flush dw file */ + dw_recover_all_partial_write_batch(batch_cxt); + + if (!batch_cxt->old_batch_version) { + /* Check batch flush whether there is a change in dw_file_num and dw_file_size */ + dw_check_batch_parameter_change(batch_cxt); + } + + /* recovery single flush dw file */ + (void)LWLockAcquire(single_cxt->flush_lock, LW_EXCLUSIVE); + dw_recovery_partial_write_single(); + LWLockRelease(single_cxt->flush_lock); +} + +/* when double write is disabled, init dw files here */ +bool dw_disable_init() +{ + int fd; + bool disable_dw_first_init = false; + knl_g_dw_context *batch_cxt = &g_instance.dw_batch_cxt; + knl_g_dw_context *single_cxt = &g_instance.dw_single_cxt; + + if (t_thrd.proc->workingVersionNum >= DW_SUPPORT_REABLE_DOUBLE_WRITE) { + /* perform build process when double write is diabled */ + if (file_exists(DW_BUILD_FILE_NAME)) { + g_instance.attr.attr_storage.dw_file_num = 0; + dw_generate_meta_file(&batch_cxt->batch_meta_file); + dw_remove_file(DW_BUILD_FILE_NAME); + } + + /* normal initialization */ + if (file_exists(DW_META_FILE)) { + fd = dw_open_file(DW_META_FILE); + dw_recover_batch_meta_file(fd, &batch_cxt->batch_meta_file); + close(fd); + + /* run for the first time after double write was disabled */ + if (batch_cxt->batch_meta_file.dw_file_num > 0) { + /* init dw batch and dw single */ + dw_cxt_init_batch(); + dw_cxt_init_single(); + + /* recovery batch flush dw file */ + dw_recover_all_partial_write_batch(batch_cxt); + + /* set dw_file_num to 0, remove all the dw batch files and keep the meta file */ + g_instance.attr.attr_storage.dw_file_num = 0; + dw_check_batch_parameter_change(batch_cxt); + + /* recovery single flush dw file */ + (void)LWLockAcquire(single_cxt->flush_lock, LW_EXCLUSIVE); + dw_recovery_partial_write_single(); + LWLockRelease(single_cxt->flush_lock); + + disable_dw_first_init = true; + } + pg_atomic_write_u32(&batch_cxt->dw_version, t_thrd.proc->workingVersionNum); + } else { + /* for upgrade process */ + pg_atomic_write_u32(&batch_cxt->dw_version, 0); + } + } else { + /* + * Between DW_SUPPORT_BCM_VERSION and DW_SUPPORT_REABLE_DOUBLE_WRITE, + * double write files are deleted when double write is disabled. When + * perform build in upgrade process, it will do nothing. + */ + if (t_thrd.proc->workingVersionNum >= DW_SUPPORT_BCM_VERSION) { + if (file_exists(DW_BUILD_FILE_NAME)) { + dw_remove_file(DW_BUILD_FILE_NAME); + } + } else { + /* + * In the other situtations including C00, C20 and C10 before DW_SUPPORT_BCM_VERSION, + * when double write is disabled, double write files are left. When perform build + * in the upgrade process, it will generate double write files again. + */ + dw_file_check_rebuild(); + } + pg_atomic_write_u32(&batch_cxt->dw_version, t_thrd.proc->workingVersionNum); + } + + return disable_dw_first_init; } void dw_init(bool shut_down) { MemoryContext old_mem_cxt; + bool disable_dw_first_init = false; knl_g_dw_context *batch_cxt = &g_instance.dw_batch_cxt; knl_g_dw_context *single_cxt = &g_instance.dw_single_cxt; @@ -1166,33 +1728,52 @@ void dw_init(bool shut_down) g_instance.dw_batch_cxt.mem_cxt = mem_cxt; g_instance.dw_single_cxt.mem_cxt = mem_cxt; pg_atomic_write_u32(&g_instance.dw_single_cxt.dw_version, 0); + pg_atomic_write_u32(&g_instance.dw_batch_cxt.dw_version, 0); old_mem_cxt = MemoryContextSwitchTo(mem_cxt); - - dw_file_check(); ereport(LOG, (errmodule(MOD_DW), errmsg("Double Write init"))); - dw_cxt_init_batch(); - dw_cxt_init_single(); + /* when double write is enabled, increamental checkpoint must be enabled too */ + if (dw_enabled()) { + dw_enable_init(); + } else { + /* Notice: double write can be disabled alone when the read/write page unit is atomic */ + disable_dw_first_init = dw_disable_init(); + } - /* recovery batch flush dw file */ - (void)LWLockAcquire(batch_cxt->flush_lock, LW_EXCLUSIVE); - dw_recover_partial_write_batch(batch_cxt); - LWLockRelease(batch_cxt->flush_lock); - - /* recovery single flush dw file */ - (void)LWLockAcquire(single_cxt->flush_lock, LW_EXCLUSIVE); - dw_recovery_partial_write_single(); - LWLockRelease(single_cxt->flush_lock); + dw_record_ckpt_state(); /* * After recovering partially written pages (if any), we will un-initialize, if the double write is disabled. */ - if (!dw_enabled()) { - dw_free_resource(batch_cxt); - dw_free_resource(single_cxt); + if (disable_dw_first_init) { + dw_free_resource(batch_cxt, false); + dw_free_resource(single_cxt, true); (void)MemoryContextSwitchTo(old_mem_cxt); MemoryContextDelete(g_instance.dw_batch_cxt.mem_cxt); + + if (file_exists(OLD_DW_FILE_NAME)) { + /* If the double write is disabled, log a warning message and remove the residual DW file. */ + ereport(WARNING, (errcode_for_file_access(), errmodule(MOD_DW), + "batch flush DW file exists, deleting it when the double write is disabled")); + + if (unlink(OLD_DW_FILE_NAME) != 0) { + ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_DW), + errmsg("Could not remove the residual batch flush DW single flush file"))); + } + } + + if (file_exists(SINGLE_DW_FILE_NAME)) { + /* If the double write is disabled, log a warning message and remove the single DW file. */ + ereport(WARNING, (errcode_for_file_access(), errmodule(MOD_DW), + "single flush DW file exists, deleting it when the double write is disabled")); + + if (unlink(SINGLE_DW_FILE_NAME) != 0) { + ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_DW), + errmsg("Could not remove the residual single flush DW single flush file"))); + } + } + ereport(LOG, (errmodule(MOD_DW), errmsg("Double write exit after recovering partial write"))); } else { if (pg_atomic_read_u32(&g_instance.dw_single_cxt.dw_version) != 0 && @@ -1253,7 +1834,7 @@ static XLogRecPtr dw_copy_page(ThrdDwCxt* thrd_dw_cxt, int buf_desc_id, bool* is } PinBuffer_Locked(buf_desc); - + /* We must use a conditional lock acquisition here to avoid deadlock. If * page_writer and double_write are enabled, only page_writer is allowed to * flush the buffers. So the backends (BufferAlloc, FlushRelationBuffers, @@ -1275,7 +1856,7 @@ static XLogRecPtr dw_copy_page(ThrdDwCxt* thrd_dw_cxt, int buf_desc_id, bool* is batch = (dw_batch_t*)thrd_dw_cxt->dw_buf; page_num = thrd_dw_cxt->write_pos; } else { - batch = (dw_batch_t*)(thrd_dw_cxt->dw_buf + + batch = (dw_batch_t*)(thrd_dw_cxt->dw_buf + (GET_DW_BATCH_DATA_PAGE_MAX(thrd_dw_cxt->is_new_relfilenode) + 1) * BLCKSZ); page_num = thrd_dw_cxt->write_pos - GET_DW_BATCH_DATA_PAGE_MAX(thrd_dw_cxt->is_new_relfilenode); } @@ -1315,9 +1896,8 @@ static XLogRecPtr dw_copy_page(ThrdDwCxt* thrd_dw_cxt, int buf_desc_id, bool* is return page_lsn; } -inline uint16 dw_batch_add_extra(uint16 page_num) +inline uint16 dw_batch_add_extra(uint16 page_num, bool is_new_relfilenode) { - bool is_new_relfilenode = g_instance.dw_batch_cxt.is_new_relfilenode; Assert(page_num <= GET_DW_DIRTY_PAGE_MAX(is_new_relfilenode)); if (page_num <= GET_DW_BATCH_DATA_PAGE_MAX(is_new_relfilenode)) { return page_num + DW_EXTRA_FOR_ONE_BATCH; @@ -1326,27 +1906,27 @@ inline uint16 dw_batch_add_extra(uint16 page_num) } } -static void dw_assemble_batch(knl_g_dw_context *dw_cxt, uint16 page_id, uint16 dwn) +static void dw_assemble_batch(dw_batch_file_context *dw_cxt, uint16 page_id, uint16 dwn, bool is_new_relfilenode) { dw_batch_t *batch = NULL; uint16 first_batch_pages; uint16 second_batch_pages; - if (dw_cxt->write_pos > GET_DW_BATCH_DATA_PAGE_MAX(dw_cxt->is_new_relfilenode)) { - first_batch_pages = GET_DW_BATCH_DATA_PAGE_MAX(dw_cxt->is_new_relfilenode); - second_batch_pages = dw_cxt->write_pos - GET_DW_BATCH_DATA_PAGE_MAX(dw_cxt->is_new_relfilenode); + if (dw_cxt->write_pos > GET_DW_BATCH_DATA_PAGE_MAX(is_new_relfilenode)) { + first_batch_pages = GET_DW_BATCH_DATA_PAGE_MAX(is_new_relfilenode); + second_batch_pages = dw_cxt->write_pos - GET_DW_BATCH_DATA_PAGE_MAX(is_new_relfilenode); } else { first_batch_pages = dw_cxt->write_pos; second_batch_pages = 0; } batch = (dw_batch_t *)dw_cxt->buf; - dw_prepare_page(batch, first_batch_pages, page_id, dwn); + dw_prepare_page(batch, first_batch_pages, page_id, dwn, is_new_relfilenode); /* tail of the first batch */ page_id = page_id + 1 + GET_REL_PGAENUM(batch->page_num); batch = dw_batch_tail_page(batch); - dw_prepare_page(batch, second_batch_pages, page_id, dwn); + dw_prepare_page(batch, second_batch_pages, page_id, dwn, is_new_relfilenode); if (second_batch_pages == 0) { return; @@ -1354,17 +1934,17 @@ static void dw_assemble_batch(knl_g_dw_context *dw_cxt, uint16 page_id, uint16 d /* also head of the second batch, if second batch not empty, prepare its tail */ page_id = page_id + 1 + GET_REL_PGAENUM(batch->page_num); batch = dw_batch_tail_page(batch); - dw_prepare_page(batch, 0, page_id, dwn); + dw_prepare_page(batch, 0, page_id, dwn, is_new_relfilenode); } -static inline void dw_stat_batch_flush(dw_stat_info_batch *stat_info, uint32 page_to_write) +static inline void dw_stat_batch_flush(dw_stat_info_batch *stat_info, uint32 page_to_write, bool is_new_relfilenode) { (void)pg_atomic_add_fetch_u64(&stat_info->total_writes, 1); (void)pg_atomic_add_fetch_u64(&stat_info->total_pages, page_to_write); if (page_to_write < DW_WRITE_STAT_LOWER_LIMIT) { (void)pg_atomic_add_fetch_u64(&stat_info->low_threshold_writes, 1); (void)pg_atomic_add_fetch_u64(&stat_info->low_threshold_pages, page_to_write); - } else if (page_to_write > GET_DW_BATCH_MAX(g_instance.dw_batch_cxt.is_new_relfilenode)) { + } else if (page_to_write > GET_DW_BATCH_MAX(is_new_relfilenode)) { (void)pg_atomic_add_fetch_u64(&stat_info->high_threshold_writes, 1); (void)pg_atomic_add_fetch_u64(&stat_info->high_threshold_pages, page_to_write); } @@ -1375,9 +1955,10 @@ static inline void dw_stat_batch_flush(dw_stat_info_batch *stat_info, uint32 pag * @param dw_cxt double write context * @param latest_lsn the latest lsn in the copied pages */ -static void dw_batch_flush(knl_g_dw_context* dw_cxt, XLogRecPtr latest_lsn, ThrdDwCxt* thrd_dw_cxt) +static void dw_batch_flush(dw_batch_file_context *dw_cxt, XLogRecPtr latest_lsn, ThrdDwCxt* thrd_dw_cxt) { uint16 offset_page; + bool is_new_relfilenode; uint16 pages_to_write = 0; dw_file_head_t* file_head = NULL; errno_t rc; @@ -1388,15 +1969,12 @@ static void dw_batch_flush(knl_g_dw_context* dw_cxt, XLogRecPtr latest_lsn, Thrd (void)LWLockAcquire(dw_cxt->flush_lock, LW_EXCLUSIVE); - if (thrd_dw_cxt->is_new_relfilenode) { - dw_cxt->is_new_relfilenode = true; - } - + is_new_relfilenode = thrd_dw_cxt->is_new_relfilenode; dw_cxt->write_pos = thrd_dw_cxt->write_pos; Assert(dw_cxt->write_pos > 0); file_head = dw_cxt->file_head; - pages_to_write = dw_batch_add_extra(dw_cxt->write_pos); + pages_to_write = dw_batch_add_extra(dw_cxt->write_pos, is_new_relfilenode); rc = memcpy_s(dw_cxt->buf, pages_to_write * BLCKSZ, thrd_dw_cxt->dw_buf, pages_to_write * BLCKSZ); securec_check(rc, "\0", "\0"); (void)dw_batch_file_recycle(dw_cxt, pages_to_write, false); @@ -1404,17 +1982,16 @@ static void dw_batch_flush(knl_g_dw_context* dw_cxt, XLogRecPtr latest_lsn, Thrd /* calculate it after checking file space, in case of updated by sync */ offset_page = file_head->start + dw_cxt->flush_page; - dw_assemble_batch(dw_cxt, offset_page, file_head->head.dwn); + dw_assemble_batch(dw_cxt, offset_page, file_head->head.dwn, is_new_relfilenode); pgstat_report_waitevent(WAIT_EVENT_DW_WRITE); - dw_pwrite_file(dw_cxt->fd, dw_cxt->buf, (pages_to_write * BLCKSZ), (offset_page * BLCKSZ), DW_FILE_NAME); + dw_pwrite_file(dw_cxt->fd, dw_cxt->buf, (pages_to_write * BLCKSZ), (offset_page * BLCKSZ), dw_cxt->file_name); pgstat_report_waitevent(WAIT_EVENT_END); - dw_stat_batch_flush(&dw_cxt->batch_stat_info, pages_to_write); + dw_stat_batch_flush(&dw_cxt->batch_stat_info, pages_to_write, is_new_relfilenode); /* the tail of this flushed batch is the head of the next batch */ dw_cxt->flush_page += (pages_to_write - 1); dw_cxt->write_pos = 0; - dw_cxt->is_new_relfilenode = false; thrd_dw_cxt->dw_page_idx = offset_page; LWLockRelease(dw_cxt->flush_lock); @@ -1425,10 +2002,10 @@ static void dw_batch_flush(knl_g_dw_context* dw_cxt, XLogRecPtr latest_lsn, Thrd pages_to_write))); } -void dw_perform_batch_flush(uint32 size, CkptSortItem *dirty_buf_list, ThrdDwCxt* thrd_dw_cxt) +void dw_perform_batch_flush(uint32 size, CkptSortItem *dirty_buf_list, int thread_id, ThrdDwCxt* thrd_dw_cxt) { uint16 batch_size; - knl_g_dw_context *dw_cxt = &g_instance.dw_batch_cxt; + int file_id; XLogRecPtr latest_lsn = InvalidXLogRecPtr; XLogRecPtr page_lsn; @@ -1437,7 +2014,10 @@ void dw_perform_batch_flush(uint32 size, CkptSortItem *dirty_buf_list, ThrdDwCxt return; } - if (SECUREC_UNLIKELY(pg_atomic_read_u32(&dw_cxt->closed))) { + file_id = dw_fetch_file_id(thread_id); + dw_batch_file_context *dw_cxt = &g_instance.dw_batch_cxt.batch_file_cxts[file_id]; + + if (SECUREC_UNLIKELY(pg_atomic_read_u32(&g_instance.dw_batch_cxt.closed))) { ereport(ERROR, (errmodule(MOD_DW), errmsg("[batch flush] Double write already closed"))); } @@ -1470,15 +2050,15 @@ void dw_perform_batch_flush(uint32 size, CkptSortItem *dirty_buf_list, ThrdDwCxt dw_batch_flush(dw_cxt, latest_lsn, thrd_dw_cxt); } } -void dw_batch_file_truncate() -{ - knl_g_dw_context *cxt = &g_instance.dw_batch_cxt; + +static void dw_batch_file_truncate(dw_batch_file_context *cxt) +{ ereport(DW_LOG_LEVEL, (errmodule(MOD_DW), errmsg("[batch flush] DW truncate start: file_head[dwn %hu, start %hu], total_pages %hu", cxt->file_head->head.dwn, cxt->file_head->start, cxt->flush_page))); - /* + /* * If we can grab dw flush lock, truncate dw file for faster recovery. * * Note: This is only for recovery optimization. we can not block on @@ -1490,79 +2070,33 @@ void dw_batch_file_truncate() errmsg("[batch flush] Can not get dw flush lock and skip dw truncate for this time"))); return; } + if (dw_batch_file_recycle(cxt, 0, true)) { LWLockRelease(cxt->flush_lock); } ereport(LOG, (errmodule(MOD_DW), errmsg("[batch flush] DW truncate end: file_head[dwn %hu, start %hu], total_pages %hu", - cxt->file_head->head.dwn, cxt->file_head->start, cxt->flush_page))); + cxt->file_head->head.dwn, cxt->file_head->start, cxt->flush_page))); } -void dw_single_file_truncate(bool is_first) +void dw_batch_file_truncate() { - uint16 max_idx = 0; - knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; - dw_file_head_t *file_head = NULL; - volatile uint16 org_start = 0; - volatile uint16 org_dwn = 0; - LWLock* flush_lock = NULL; - uint64 head_offset = 0; + int i; + int dw_file_num = g_instance.dw_batch_cxt.batch_meta_file.dw_file_num; + knl_g_dw_context *cxt = &g_instance.dw_batch_cxt; - if (is_first) { - file_head = single_cxt->file_head; - flush_lock = single_cxt->flush_lock; - head_offset = 0; - } else { - file_head = single_cxt->second_file_head; - flush_lock = single_cxt->second_flush_lock; - head_offset = (1 + DW_FIRST_DATA_PAGE_NUM) * BLCKSZ; - } - - if (!LWLockConditionalAcquire(flush_lock, LW_EXCLUSIVE)) { + if (!LWLockConditionalAcquire(cxt->flush_lock, LW_SHARED)) { ereport(LOG, (errmodule(MOD_DW), - errmsg("[single flush] can not get dw flush lock and skip dw truncate for this time"))); + errmsg("[batch flush] Can not get dw flush lock and skip dw truncate for this time"))); return; } - org_start = file_head->start; - org_dwn = file_head->head.dwn; - max_idx = get_max_single_write_pos(is_first); - if (max_idx == file_head->start) { - LWLockRelease(flush_lock); - return; - } - LWLockRelease(flush_lock); - - CheckPointSyncForDw(); - - if (!LWLockConditionalAcquire(flush_lock, LW_EXCLUSIVE)) { - ereport(LOG, (errmodule(MOD_DW), - errmsg("[single flush] can not get dw flush lock and skip dw truncate after sync for this time"))); - return; - } else if (org_start != file_head->start || org_dwn != file_head->head.dwn) { - LWLockRelease(flush_lock); - return; + for (i = 0; i < dw_file_num; i++) { + dw_batch_file_truncate(&cxt->batch_file_cxts[i]); } - file_head->start = max_idx; - dw_prepare_file_head((char *)file_head, file_head->start, file_head->head.dwn); - - Assert(file_head->head.dwn == file_head->tail.dwn); - dw_pwrite_file(single_cxt->fd, file_head, BLCKSZ, head_offset, SINGLE_DW_FILE_NAME); - LWLockRelease(flush_lock); - - ereport(LOG, (errmodule(MOD_DW), - errmsg("[single flush][%s] DW truncate end: file_head[dwn %hu, start %hu], write_pos %hu", - is_first ? "first version" : "second_version", - file_head->head.dwn, file_head->start, is_first ? single_cxt->write_pos : single_cxt->second_write_pos))); - - if (is_first) { - (void)pg_atomic_add_fetch_u64(&single_cxt->single_stat_info.file_trunc_num, 1); - } else { - (void)pg_atomic_add_fetch_u64(&single_cxt->single_stat_info.second_file_trunc_num, 1); - } - return; + LWLockRelease(cxt->flush_lock); } void dw_truncate() @@ -1608,125 +2142,17 @@ void dw_exit(bool single) ereport(LOG, (errmodule(MOD_DW), errmsg("Double write exit"))); - /* Do a final truncate before free resource. */ - if (single) { - if (pg_atomic_read_u32(&g_instance.dw_single_cxt.dw_version) == DW_SUPPORT_NEW_SINGLE_FLUSH) { - dw_single_file_truncate(true); - dw_single_file_truncate(false); - } else { - dw_single_old_file_truncate(); - } - } else { - dw_batch_file_truncate(); - } - - dw_free_resource(dw_cxt); + dw_free_resource(dw_cxt, single); } -static void dw_generate_single_file() -{ - char *file_head = NULL; - int64 remain_size; - int fd = -1; - errno_t rc; - char *unaligned_buf = NULL; - - if (file_exists(SINGLE_DW_FILE_NAME)) { - ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_DW), "DW single flush file already exists")); - } - - ereport(LOG, (errmodule(MOD_DW), errmsg("DW bootstrap single flush file"))); - - fd = open(SINGLE_DW_FILE_NAME, (DW_FILE_FLAG | O_CREAT), DW_FILE_PERM); - if (fd == -1) { - ereport(PANIC, - (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not create file \"%s\"", - SINGLE_DW_FILE_NAME))); - } - - unaligned_buf = (char *)palloc0(DW_FILE_EXTEND_SIZE + BLCKSZ); /* one more BLCKSZ for alignment */ - - file_head = (char *)TYPEALIGN(BLCKSZ, unaligned_buf); - - /* file head and first batch head will be writen */ - remain_size = (DW_SINGLE_DIRTY_PAGE_NUM + DW_SINGLE_BUFTAG_PAGE_NUM) * BLCKSZ; - dw_prepare_file_head(file_head, 0, 0, 0); - pgstat_report_waitevent(WAIT_EVENT_DW_WRITE); - dw_pwrite_file(fd, file_head, BLCKSZ, 0, SINGLE_DW_FILE_NAME); - rc = memset_s(file_head, BLCKSZ, 0, BLCKSZ); - securec_check(rc, "\0", "\0"); - dw_extend_file(fd, file_head, DW_FILE_EXTEND_SIZE, remain_size, DW_SINGLE_FILE_SIZE, true); - pgstat_report_waitevent(WAIT_EVENT_END); - ereport(LOG, (errmodule(MOD_DW), errmsg("Double write single flush file created successfully"))); - - (void)close(fd); - fd = -1; - pfree(unaligned_buf); - return; -} - -void dw_generate_new_single_file() -{ - char *file_head = NULL; - int64 extend_size; - int fd = -1; - errno_t rc; - char *unaligned_buf = NULL; - - if (file_exists(SINGLE_DW_FILE_NAME)) { - ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_DW), "DW single flush file already exists")); - } - - ereport(LOG, (errmodule(MOD_DW), errmsg("DW bootstrap new single flush file"))); - - fd = open(SINGLE_DW_FILE_NAME, (DW_FILE_FLAG | O_CREAT), DW_FILE_PERM); - if (fd == -1) { - ereport(PANIC, - (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not create file \"%s\"", - SINGLE_DW_FILE_NAME))); - } - - /* NO EREPORT(ERROR) from here till changes are logged */ - START_CRIT_SECTION(); - unaligned_buf = (char *)palloc0(DW_FILE_EXTEND_SIZE + BLCKSZ); /* one more BLCKSZ for alignment */ - - file_head = (char *)TYPEALIGN(BLCKSZ, unaligned_buf); - - /* first version page int */ - extend_size = DW_FIRST_DATA_PAGE_NUM * BLCKSZ; - dw_prepare_file_head(file_head, 0, 0, DW_SUPPORT_NEW_SINGLE_FLUSH); - dw_pwrite_file(fd, file_head, BLCKSZ, 0, SINGLE_DW_FILE_NAME); - - rc = memset_s(file_head, BLCKSZ, 0, BLCKSZ); - securec_check(rc, "\0", "\0"); - dw_extend_file(fd, file_head, DW_FILE_EXTEND_SIZE, extend_size, DW_NEW_SINGLE_FILE_SIZE, true); - - /* second version page init */ - extend_size = (DW_SECOND_BUFTAG_PAGE_NUM + DW_SECOND_DATA_PAGE_NUM) * BLCKSZ; - dw_prepare_file_head(file_head, 0, 0, DW_SUPPORT_NEW_SINGLE_FLUSH); - dw_pwrite_file(fd, file_head, BLCKSZ, (1 + DW_FIRST_DATA_PAGE_NUM) * BLCKSZ, SINGLE_DW_FILE_NAME); - - rc = memset_s(file_head, BLCKSZ, 0, BLCKSZ); - securec_check(rc, "\0", "\0"); - dw_extend_file(fd, file_head, DW_FILE_EXTEND_SIZE, extend_size, DW_NEW_SINGLE_FILE_SIZE, true); - END_CRIT_SECTION(); - - ereport(LOG, (errmodule(MOD_DW), errmsg("Double write single flush file created successfully"))); - - (void)close(fd); - fd = -1; - pfree(unaligned_buf); - return; -} - -static void dw_encrypt_page(BufferTag tag, char* buf) +void dw_encrypt_page(BufferTag tag, char* buf) { TdeInfo tde_info = {0}; TDE::TDEBufferCache::get_instance().search_cache(tag.rnode, &tde_info); if (strlen(tde_info.dek_cipher) == 0) { - ereport(ERROR, (errmodule(MOD_SEC_TDE), errcode(ERRCODE_UNEXPECTED_NULL_VALUE), + ereport(ERROR, (errmodule(MOD_SEC_TDE), errcode(ERRCODE_UNEXPECTED_NULL_VALUE), errmsg("double write copy page get TDE buffer cache entry failed, RelFileNode is %u/%u/%u/%u", - tag.rnode.spcNode, tag.rnode.dbNode, tag.rnode.relNode, + tag.rnode.spcNode, tag.rnode.dbNode, tag.rnode.relNode, tag.rnode.bucketNode), errdetail("N/A"), errcause("TDE cache miss this key"), @@ -1746,334 +2172,6 @@ bool free_space_enough(int buf_id) return false; } -uint16 atomic_get_dw_write_pos(bool is_first) -{ - knl_g_dw_context* dw_single_cxt = &g_instance.dw_single_cxt; - uint16 page_num = is_first ? DW_FIRST_DATA_PAGE_NUM : DW_SECOND_DATA_PAGE_NUM; - uint32 write_pos; - LWLock *lock = is_first ? dw_single_cxt->flush_lock : dw_single_cxt->second_flush_lock; - - pg_memory_barrier(); - write_pos = is_first ? pg_atomic_read_u32(&dw_single_cxt->write_pos) : - pg_atomic_read_u32(&dw_single_cxt->second_write_pos); - - while (true) { - if ((write_pos + 1 >= page_num)) { - (void)LWLockAcquire(lock, LW_EXCLUSIVE); - dw_single_file_recycle(is_first); - LWLockRelease(lock); - write_pos = is_first ? pg_atomic_read_u32(&dw_single_cxt->write_pos) : - pg_atomic_read_u32(&dw_single_cxt->second_write_pos); - /* fetch write_pos, we need to check write_pos + 1 again */ - continue; - } - - if (is_first) { - if (pg_atomic_compare_exchange_u32(&dw_single_cxt->write_pos, &write_pos, write_pos + 1)) { - return write_pos; - } - } else { - if (pg_atomic_compare_exchange_u32(&dw_single_cxt->second_write_pos, &write_pos, write_pos + 1)) { - return write_pos; - } - } - } - - return write_pos; -} - -uint16 first_version_dw_single_flush(BufferDesc *buf_desc) -{ - errno_t rc; - char *buf = t_thrd.proc->dw_buf; - knl_g_dw_context* dw_single_cxt = &g_instance.dw_single_cxt; - dw_file_head_t *file_head = dw_single_cxt->file_head; - uint16 actual_pos; - uint64 page_write_offset; - dw_first_flush_item item; - PageHeader pghr = NULL; - BufferTag phy_tag; - - uint32 buf_state = LockBufHdr(buf_desc); - Block block = BufHdrGetBlock(buf_desc); - XLogRecPtr page_lsn = BufferGetLSN(buf_desc); - UnlockBufHdr(buf_desc, buf_state); - - phy_tag = buf_desc->tag; - dw_transfer_phybuffer_addr(buf_desc, &phy_tag); - - Assert(buf_desc->buf_id < SegmentBufferStartID); - Assert(free_space_enough(buf_desc->buf_id)); - - /* first step, copy buffer to dw buf, than flush page lsn, the buffer content lock is already held */ - rc = memcpy_s(buf, BLCKSZ, block, BLCKSZ); - securec_check(rc, "\0", "\0"); - - XLogWaitFlush(page_lsn); - if (buf_desc->encrypt) { - dw_encrypt_page(buf_desc->tag, buf); - } - - actual_pos = atomic_get_dw_write_pos(true); - - item.dwn = file_head->head.dwn; - item.buf_tag = phy_tag; - pghr = (PageHeader)buf; - - rc = memcpy_s(buf + pghr->pd_lower, sizeof(dw_first_flush_item), &item, sizeof(dw_first_flush_item)); - securec_check(rc, "\0", "\0"); - - dw_set_pg_checksum(buf, item.buf_tag.blockNum); - page_write_offset = (1 + actual_pos) * BLCKSZ; - Assert(actual_pos < DW_FIRST_DATA_PAGE_NUM); - dw_pwrite_file(dw_single_cxt->fd, buf, BLCKSZ, page_write_offset, SINGLE_DW_FILE_NAME); - - (void)pg_atomic_add_fetch_u64(&dw_single_cxt->single_stat_info.total_writes, 1); - - return actual_pos; -} - -uint16 second_version_dw_single_flush(BufferTag tag, Block block, XLogRecPtr page_lsn, - bool encrypt, BufferTag phy_tag) -{ - errno_t rc; - uint16 actual_pos; - uint64 page_write_offset; - uint64 tag_write_offset; - uint16 block_offset; - dw_single_flush_item item; - knl_g_dw_context* dw_single_cxt = &g_instance.dw_single_cxt; - dw_file_head_t *file_head = dw_single_cxt->second_file_head; - char *buf = t_thrd.proc->dw_buf; - - /* first step, copy buffer to dw buf, than flush page lsn, the buffer content lock is already held */ - rc = memcpy_s(buf, BLCKSZ, block, BLCKSZ); - securec_check(rc, "\0", "\0"); - - XLogWaitFlush(page_lsn); - if (encrypt) { - dw_encrypt_page(tag, buf); - } - dw_set_pg_checksum(buf, phy_tag.blockNum); - - actual_pos = atomic_get_dw_write_pos(false); - - /* data page need skip head page and bufferTag page, bufferTag page need skip head page and first version page */ - page_write_offset = (actual_pos + DW_SECOND_DATA_START_IDX) * BLCKSZ; - tag_write_offset = DW_SECOND_BUFTAG_START_IDX * BLCKSZ + (actual_pos / SINGLE_BLOCK_TAG_NUM) * BLCKSZ; - block_offset = (actual_pos % SINGLE_BLOCK_TAG_NUM) * sizeof(dw_single_flush_item); - Assert(block_offset <= BLCKSZ - sizeof(dw_single_flush_item)); - Assert(actual_pos < DW_SECOND_DATA_PAGE_NUM); - Assert(page_write_offset < DW_NEW_SINGLE_FILE_SIZE && tag_write_offset < DW_SECOND_DATA_START_IDX * BLCKSZ); - - /* write the data page to dw file */ - dw_pwrite_file(dw_single_cxt->fd, buf, BLCKSZ, page_write_offset, SINGLE_DW_FILE_NAME); - - item.data_page_idx = actual_pos; - item.dwn = file_head->head.dwn; - item.buf_tag = phy_tag; - - /* Contents are protected with a CRC */ - INIT_CRC32C(item.crc); - COMP_CRC32C(item.crc, (char*)&item, offsetof(dw_single_flush_item, crc)); - FIN_CRC32C(item.crc); - - /* write the buffer tag item to dw file */ - (void)LWLockAcquire(dw_single_cxt->second_buftag_lock, LW_EXCLUSIVE); - dw_pread_file(dw_single_cxt->fd, buf, BLCKSZ, tag_write_offset); - rc = memcpy_s(buf + block_offset, BLCKSZ - block_offset, &item, sizeof(dw_single_flush_item)); - securec_check(rc, "\0", "\0"); - dw_pwrite_file(dw_single_cxt->fd, buf, BLCKSZ, tag_write_offset, SINGLE_DW_FILE_NAME); - - LWLockRelease(dw_single_cxt->second_buftag_lock); - (void)pg_atomic_add_fetch_u64(&dw_single_cxt->single_stat_info.second_total_writes, 1); - - return (actual_pos + DW_FIRST_DATA_PAGE_NUM); -} - -void dw_force_reset_single_file(uint32 dw_version) -{ - knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; - dw_file_head_t *file_head = single_cxt->file_head; - - CheckPointSyncForDw(); - - dw_prepare_file_head((char *)file_head, 0, file_head->head.dwn + 1); - dw_pwrite_file(single_cxt->fd, file_head, BLCKSZ, 0, SINGLE_DW_FILE_NAME); - (void)pg_atomic_add_fetch_u64(&single_cxt->single_stat_info.file_reset_num, 1); - - ereport(LOG, (errmodule(MOD_DW), - errmsg("DW single flush finish recovery, reset the file head[dwn %hu, start %hu].", - file_head->head.dwn, file_head->start))); - - if (dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { - file_head = single_cxt->second_file_head; - dw_prepare_file_head((char *)file_head, 0, file_head->head.dwn + 1); - dw_pwrite_file(single_cxt->fd, file_head, BLCKSZ, (1 + DW_FIRST_DATA_PAGE_NUM) * BLCKSZ, SINGLE_DW_FILE_NAME); - (void)pg_atomic_add_fetch_u64(&single_cxt->single_stat_info.second_file_reset_num, 1); - ereport(LOG, (errmodule(MOD_DW), - errmsg("DW single flush finish recovery [second version], reset the file head[dwn %hu, start %hu].", - file_head->head.dwn, file_head->start))); - } - - return; -} - -static uint16 get_max_single_write_pos(bool is_first) -{ - uint16 max_idx = 0; - uint16 i = 0; - uint16 start = 0; - uint16 end = 0; - dw_file_head_t *file_head = NULL; - knl_g_dw_context* dw_single_cxt = &g_instance.dw_single_cxt; - - /* single_flush_state, first */ - if (is_first) { - file_head = dw_single_cxt->file_head; - start = file_head->start; - end = pg_atomic_read_u32(&dw_single_cxt->write_pos); - } else { - file_head = dw_single_cxt->second_file_head; - start = file_head->start + DW_FIRST_DATA_PAGE_NUM; - end = pg_atomic_read_u32(&dw_single_cxt->second_write_pos) + DW_FIRST_DATA_PAGE_NUM; - } - - for (i = start; i < end; i++) { - if (dw_single_cxt->single_flush_state[i] == false) { - break; - } - } - max_idx = i; - - if (!is_first) { - max_idx = max_idx - DW_FIRST_DATA_PAGE_NUM; - } - - return max_idx; -} - -void wait_all_single_dw_finish_flush(bool is_first) -{ - uint16 start = 0; - uint16 end = 0; - dw_file_head_t *file_head = NULL; - knl_g_dw_context* dw_single_cxt = &g_instance.dw_single_cxt; - - /* single_flush_state, first */ - if (is_first) { - file_head = dw_single_cxt->file_head; - start = file_head->start; - end = pg_atomic_read_u32(&dw_single_cxt->write_pos); - } else { - file_head = dw_single_cxt->second_file_head; - start = file_head->start + DW_FIRST_DATA_PAGE_NUM; - end = pg_atomic_read_u32(&dw_single_cxt->second_write_pos) + DW_FIRST_DATA_PAGE_NUM; - } - - for (uint i = start; i < end;) { - if (dw_single_cxt->single_flush_state[i] != false) { - i++; - continue; - } else { - (void)sched_yield(); - } - } - return; -} - -void dw_single_file_recycle(bool is_first) -{ - bool file_full = false; - knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; - dw_file_head_t *file_head = NULL; - uint16 end = 0; - errno_t rc; - uint64 head_offset = 0; - uint16 flush_state_start = 0; - uint16 page_num = 0; - - if (is_first) { - file_head = single_cxt->file_head; - end = single_cxt->write_pos; - flush_state_start = 0; - page_num = DW_FIRST_DATA_PAGE_NUM; - head_offset = 0; - } else { - file_head = single_cxt->second_file_head; - end = single_cxt->second_write_pos; - flush_state_start = DW_FIRST_DATA_PAGE_NUM; - page_num = DW_SECOND_DATA_PAGE_NUM; - head_offset = (1 + DW_FIRST_DATA_PAGE_NUM) * BLCKSZ; - } - - file_full = end + 1 >= page_num; - - if (!file_full) { - return; - } - - /* reset start position and flush page num for full recycle */ - wait_all_single_dw_finish_flush(is_first); - - CheckPointSyncForDw(); - - rc = memset_s(single_cxt->single_flush_state + (flush_state_start * sizeof(bool)), - sizeof(bool) * page_num, 0, sizeof(bool) * page_num); - securec_check(rc, "\0", "\0"); - - dw_prepare_file_head((char *)file_head, 0, file_head->head.dwn + 1); - dw_pwrite_file(single_cxt->fd, file_head, BLCKSZ, head_offset, SINGLE_DW_FILE_NAME); - - /* The start and write_pos must be reset at the end. */ - file_head->start = 0; - if (is_first) { - single_cxt->write_pos = 0; - } else { - single_cxt->second_write_pos = 0; - } - - if (is_first) { - (void)pg_atomic_add_fetch_u64(&single_cxt->single_stat_info.file_reset_num, 1); - } else { - (void)pg_atomic_add_fetch_u64(&single_cxt->single_stat_info.second_file_reset_num, 1); - } - ereport(LOG, (errmodule(MOD_DW), errmsg("[single flush] [%s] Reset DW file: file_head[dwn %hu, start %hu], " - "writer pos is %hu", is_first ? "first version" : "second_version", file_head->head.dwn, file_head->start, - is_first ? single_cxt->write_pos : single_cxt->second_write_pos))); - return; -} - -bool dw_verify_item(const dw_single_flush_item* item, uint16 dwn) -{ - if (item->dwn != dwn) { - return false; - } - - if (item->buf_tag.forkNum == InvalidForkNumber || item->buf_tag.blockNum == InvalidBlockNumber || - item->buf_tag.rnode.relNode == InvalidOid) { - if (item->dwn != 0 || item->data_page_idx != 0) { - ereport(WARNING, - (errmsg("dw recovery, find invalid item [page_idx %hu dwn %hu] skip this item," - "buf_tag[rel %u/%u/%u blk %u fork %d]", item->data_page_idx, item->dwn, - item->buf_tag.rnode.spcNode, item->buf_tag.rnode.dbNode, item->buf_tag.rnode.relNode, - item->buf_tag.blockNum, item->buf_tag.forkNum))); - } - return false; - } - pg_crc32c crc; - /* Contents are protected with a CRC */ - INIT_CRC32C(crc); - COMP_CRC32C(crc, (char*)item, offsetof(dw_single_flush_item, crc)); - FIN_CRC32C(crc); - - if (EQ_CRC32C(crc, item->crc)) { - return true; - } else { - return false; - } -} - int buftag_compare(const void *pa, const void *pb) { const dw_single_flush_item *a = (dw_single_flush_item *)pa; @@ -2115,78 +2213,15 @@ int buftag_compare(const void *pa, const void *pb) } } -static inline void dw_log_recovery_page(int elevel, const char *state, BufferTag buf_tag) +void dw_log_recovery_page(int elevel, const char *state, BufferTag buf_tag) { ereport(elevel, (errmodule(MOD_DW), - errmsg("[single flush] recovery, %s: buf_tag[rel %u/%u/%u blk %u fork %d], compress: %u", + errmsg("[single flush] recovery, %s: buf_tag[rel %u/%u/%u blk %u fork %d]", state, buf_tag.rnode.spcNode, buf_tag.rnode.dbNode, buf_tag.rnode.relNode, buf_tag.blockNum, - buf_tag.forkNum, buf_tag.rnode.opt))); + buf_tag.forkNum))); } -static void dw_recovery_first_version_page() -{ - knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; - dw_file_head_t *file_head = single_cxt->file_head; - errno_t rc = 0; - uint64 offset = 0; - PageHeader pghr = NULL; - SMgrRelation reln = NULL; - char *unaligned_buf = (char *)palloc0(BLCKSZ + BLCKSZ); /* one more BLCKSZ for alignment */ - char *dw_block = (char *)TYPEALIGN(BLCKSZ, unaligned_buf); - char *data_block = (char *)palloc0(BLCKSZ); - dw_first_flush_item flush_item; - - for (uint16 i = file_head->start; i < DW_FIRST_DATA_PAGE_NUM; i++) { - offset = (i + 1) * BLCKSZ; /* need skip the file head */ - dw_pread_file(single_cxt->fd, dw_block, BLCKSZ, offset); - pghr = (PageHeader)dw_block; - - rc = memcpy_s(&flush_item, sizeof(dw_first_flush_item), dw_block + pghr->pd_lower, sizeof(dw_first_flush_item)); - securec_check(rc, "\0", "\0"); - - if (!dw_verify_pg_checksum((PageHeader)dw_block, flush_item.buf_tag.blockNum, true)) { - if (PageIsNew(dw_block)) { - Assert(flush_item.buf_tag.rnode.relNode == 0); - dw_log_recovery_page(LOG, "[first version] dw page is new, break this recovery", flush_item.buf_tag); - break; - } - dw_log_recovery_page(WARNING, "DW single page broken", flush_item.buf_tag); - dw_log_page_header((PageHeader)dw_block); - continue; - } - dw_log_recovery_page(DW_LOG_LEVEL, "DW page fine", flush_item.buf_tag); - - reln = smgropen(flush_item.buf_tag.rnode, InvalidBackendId, GetColumnNum(flush_item.buf_tag.forkNum)); - - /* read data page */ - if (!dw_read_data_page(flush_item.buf_tag, reln, data_block)) { - continue; - } - dw_log_page_header((PageHeader)data_block); - if (!dw_verify_pg_checksum((PageHeader)data_block, flush_item.buf_tag.blockNum, false) || - XLByteLT(PageGetLSN(data_block), PageGetLSN(dw_block))) { - memset_s(dw_block + pghr->pd_lower, sizeof(dw_first_flush_item), 0, sizeof(dw_first_flush_item)); - securec_check(rc, "\0", "\0"); - dw_set_pg_checksum(dw_block, flush_item.buf_tag.blockNum); - - if (IsSegmentPhysicalRelNode(flush_item.buf_tag.rnode)) { - // seg_space must be initialized before. - seg_physical_write(reln->seg_space, flush_item.buf_tag.rnode, flush_item.buf_tag.forkNum, - flush_item.buf_tag.blockNum, (const char *)dw_block, false); - } else { - smgrwrite(reln, flush_item.buf_tag.forkNum, flush_item.buf_tag.blockNum, - (const char *)dw_block, false); - } - dw_log_recovery_page(LOG, "Date page recovered", flush_item.buf_tag); - dw_log_page_header((PageHeader)data_block); - } - } - - pfree(unaligned_buf); - pfree(data_block); -} - -static bool dw_read_data_page(BufferTag buf_tag, SMgrRelation reln, char* data_block) +bool dw_read_data_page(BufferTag buf_tag, SMgrRelation reln, char* data_block) { BlockNumber blk_num; @@ -2218,165 +2253,15 @@ static bool dw_read_data_page(BufferTag buf_tag, SMgrRelation reln, char* data_b return true; } -static void dw_recovery_second_version_page() -{ - knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; - dw_single_flush_item *item = NULL; - uint16 rec_num = 0; - dw_file_head_t *file_head = single_cxt->second_file_head; - char *buf = single_cxt->buf; - uint64 second_offset = DW_SECOND_BUFTAG_START_IDX * BLCKSZ; - - item = (dw_single_flush_item*)palloc0(sizeof(dw_single_flush_item) * DW_SECOND_DATA_PAGE_NUM); - - /* read all buffer tag item, need skip head page */ - dw_pread_file(single_cxt->fd, buf, DW_SECOND_BUFTAG_PAGE_NUM * BLCKSZ, second_offset); - - uint64 offset = 0; - dw_single_flush_item *temp = NULL; - for (uint16 i = single_cxt->second_file_head->start; i < DW_SECOND_DATA_PAGE_NUM; i++) { - offset = i * sizeof(dw_single_flush_item); - temp = (dw_single_flush_item*)((char*)buf + offset); - if (dw_verify_item(temp, file_head->head.dwn)) { - item[rec_num].data_page_idx = temp->data_page_idx; - item[rec_num].dwn = temp->dwn; - item[rec_num].buf_tag = temp->buf_tag; - item[rec_num].crc = temp->crc; - rec_num++; - } - } - ereport(LOG, (errmodule(MOD_DW), errmsg("[second version] DW single flush file valid item num is %d.", rec_num))); - if (rec_num > 0) { - qsort(item, rec_num, sizeof(dw_single_flush_item), buftag_compare); - ereport(LOG, (errmodule(MOD_DW), - errmsg("[second version] DW single flush file valid buftag item qsort finish."))); - dw_recovery_single_page(item, rec_num); - } - - pfree(item); -} - -static void dw_recovery_old_single_dw_page() -{ - knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; - dw_single_flush_item *item = NULL; - dw_file_head_t *file_head = single_cxt->recovery_buf.file_head; - uint16 blk_num = DW_SINGLE_DIRTY_PAGE_NUM / SINGLE_BLOCK_TAG_NUM; - char *buf = single_cxt->recovery_buf.buf; - uint16 rec_num = 0; - - item = (dw_single_flush_item*)palloc0(sizeof(dw_single_flush_item) * DW_SINGLE_DIRTY_PAGE_NUM); - - /* read all buffer tag item, need skip head page */ - dw_pread_file(single_cxt->fd, buf, blk_num * BLCKSZ, BLCKSZ); - int offset = 0; - dw_single_flush_item *temp = NULL; - for (int i = 0; i < DW_SINGLE_DIRTY_PAGE_NUM; i++) { - offset = i * sizeof(dw_single_flush_item); - temp = (dw_single_flush_item*)((char*)buf + offset); - if (dw_verify_item(temp, file_head->head.dwn)) { - item[rec_num].data_page_idx = temp->data_page_idx; - item[rec_num].dwn = temp->dwn; - item[rec_num].buf_tag = temp->buf_tag; - item[rec_num].crc = temp->crc; - rec_num++; - } - } - - ereport(LOG, (errmodule(MOD_DW), errmsg("[old version] DW single flush file valid item num is %d.", rec_num))); - if (rec_num > 0) { - qsort(item, rec_num, sizeof(dw_single_flush_item), buftag_compare); - ereport(LOG, (errmodule(MOD_DW), errmsg("DW single flush file valid buftag item qsort finish."))); - dw_recovery_single_page(item, rec_num); - } - - pfree(item); -} - -void dw_recovery_single_page(const dw_single_flush_item *item, uint16 item_num) -{ - SMgrRelation reln; - uint32 offset = 0; - BufferTag buf_tag; - knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; - char *unaligned_buf = (char *)palloc0(BLCKSZ + BLCKSZ); /* one more BLCKSZ for alignment */ - char *dw_block = (char *)TYPEALIGN(BLCKSZ, unaligned_buf); - char *data_block = (char *)palloc0(BLCKSZ); - uint64 base_offset = 0; - - if (single_cxt->file_head->dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { - base_offset = 1 + DW_FIRST_DATA_PAGE_NUM + 1 + DW_SECOND_BUFTAG_PAGE_NUM; - } else { - base_offset = DW_SINGLE_BUFTAG_PAGE_NUM + 1; - } - - for (uint16 i = 0; i < item_num; i++) { - buf_tag = item[i].buf_tag; - - /* read dw file page */ - offset = (base_offset + item[i].data_page_idx) * BLCKSZ; - dw_pread_file(single_cxt->fd, dw_block, BLCKSZ, offset); - - if (!dw_verify_pg_checksum((PageHeader)dw_block, buf_tag.blockNum, true)) { - dw_log_recovery_page(WARNING, "DW single page broken", buf_tag); - dw_log_page_header((PageHeader)dw_block); - continue; - } - dw_log_recovery_page(DW_LOG_LEVEL, "DW page fine", buf_tag); - - reln = smgropen(buf_tag.rnode, InvalidBackendId, GetColumnNum(buf_tag.forkNum)); - - /* read data page */ - if (!dw_read_data_page(buf_tag, reln, data_block)) { - continue; - } - dw_log_page_header((PageHeader)data_block); - if (!dw_verify_pg_checksum((PageHeader)data_block, buf_tag.blockNum, false) || - XLByteLT(PageGetLSN(data_block), PageGetLSN(dw_block))) { - if (IsSegmentPhysicalRelNode(buf_tag.rnode)) { - // seg_space must be initialized before. - seg_physical_write(reln->seg_space, buf_tag.rnode, buf_tag.forkNum, buf_tag.blockNum, - (const char *)dw_block, false); - } else { - smgrwrite(reln, buf_tag.forkNum, buf_tag.blockNum, (const char *)dw_block, false); - } - dw_log_recovery_page(LOG, "Date page recovered", buf_tag); - dw_log_page_header((PageHeader)data_block); - } - } - - pfree(unaligned_buf); - pfree(data_block); - return; -} - -static void dw_recovery_partial_write_single() -{ - knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; - - if (single_cxt->file_head->dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { - dw_recovery_first_version_page(); - dw_recovery_second_version_page(); - } else { - dw_recovery_old_single_dw_page(); - } - - ereport(LOG, (errmodule(MOD_DW), errmsg("DW single flush file recovery finish."))); - - /* reset the file after the recovery is complete */ - dw_force_reset_single_file(single_cxt->file_head->dw_version); - return; -} - /* * If the dw is enable, and the pagewriter thread is running, indicates that the device is not in the initialization - * phase, when the version num smaller than DW_SUPPORT_NEW_SINGLE_FLUSH, not support the + * phase, when the version num smaller than DW_SUPPORT_NEW_SINGLE_FLUSH, not support the * backend thread flush dirty page. */ bool backend_can_flush_dirty_page() { - if (dw_enabled() && pg_atomic_read_u32(&g_instance.ckpt_cxt_ctl->current_page_writer_count) > 0 && - (t_thrd.proc->workingVersionNum < DW_SUPPORT_NEW_SINGLE_FLUSH || + if (dw_enabled() && pg_atomic_read_u32(&g_instance.ckpt_cxt_ctl->current_page_writer_count) > 0 && + (t_thrd.proc->workingVersionNum < DW_SUPPORT_NEW_SINGLE_FLUSH || pg_atomic_read_u32(&g_instance.dw_single_cxt.dw_version) < DW_SUPPORT_NEW_SINGLE_FLUSH)) { Assert(g_instance.dw_single_cxt.closed == 0); return false; @@ -2419,3 +2304,22 @@ void clean_proc_dw_buf() reset_dw_pos_flag(); } } + +static int dw_fetch_file_id(int thread_id) +{ + int file_num = g_instance.attr.attr_storage.dw_file_num; + return thread_id % file_num; +} + +static void dw_fetch_thread_ids(int file_id, int &size, int *thread_ids) +{ + int thread_num = g_instance.attr.attr_storage.pagewriter_thread_num + 1; + + size = 0; + for (int thread_id = 0; thread_id < thread_num; thread_id++) { + if (dw_fetch_file_id(thread_id) == file_id) { + thread_ids[size] = thread_id; + size++; + } + } +} diff --git a/src/gausskernel/storage/access/transam/extreme_rto/batch_redo.cpp b/src/gausskernel/storage/access/transam/extreme_rto/batch_redo.cpp index 5f4396d7c..707295a36 100644 --- a/src/gausskernel/storage/access/transam/extreme_rto/batch_redo.cpp +++ b/src/gausskernel/storage/access/transam/extreme_rto/batch_redo.cpp @@ -1,326 +1,307 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * batch_redo.cpp - * - * IDENTIFICATION - * src/gausskernel/storage/access/transam/extreme_rto/batch_redo.cpp - * - * ------------------------------------------------------------------------- - */ - -#include "postgres.h" - -#include "access/xact.h" -#include "access/xlog.h" -#include "access/xlog_internal.h" -#include "access/xlogreader.h" -#include "access/xlogproc.h" -#include "access/visibilitymap.h" -#include "catalog/storage_xlog.h" -#include "commands/dbcommands.h" -#include "commands/tablespace.h" -#include "storage/freespace.h" -#include "utils/relmapper.h" - -#include "access/extreme_rto/batch_redo.h" -#include "access/extreme_rto/redo_item.h" -#include "access/extreme_rto/dispatcher.h" -#include "access/extreme_rto/page_redo.h" - -#include "access/xlogproc.h" - -namespace extreme_rto { -static inline void PRXLogRecGetBlockTag(XLogRecParseState *recordBlockState, RelFileNode *rnode, BlockNumber *blknum, - ForkNumber *forknum) -{ - XLogBlockParse *blockparse = &(recordBlockState->blockparse); - - if (rnode != NULL) { - rnode->dbNode = blockparse->blockhead.dbNode; - rnode->relNode = blockparse->blockhead.relNode; - rnode->spcNode = blockparse->blockhead.spcNode; - rnode->bucketNode = blockparse->blockhead.bucketNode; - rnode->opt = blockparse->blockhead.opt; - } - if (blknum != NULL) { - *blknum = blockparse->blockhead.blkno; - } - if (forknum != NULL) { - *forknum = blockparse->blockhead.forknum; - } -} - -void PRInitRedoItemEntry(RedoItemHashEntry *redoItemHashEntry) -{ - redoItemHashEntry->redoItemNum = 0; - redoItemHashEntry->head = NULL; - redoItemHashEntry->tail = NULL; -} - -HTAB *PRRedoItemHashInitialize(MemoryContext context) -{ - HASHCTL ctl; - HTAB *hTab = NULL; - - /* - * create hashtable that indexes the redo items - */ - errno_t rc = memset_s(&ctl, sizeof(ctl), 0, sizeof(ctl)); - securec_check(rc, "\0", "\0"); - ctl.hcxt = context; - ctl.keysize = sizeof(RedoItemTag); - ctl.entrysize = sizeof(RedoItemHashEntry); - ctl.hash = tag_hash; - hTab = hash_create("Redo item hash by relfilenode and blocknum", INITredoItemHashSIZE, &ctl, - HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); - - return hTab; -} - -void PRRegisterBlockInsertToList(RedoItemHashEntry *redoItemHashEntry, XLogRecParseState *record) -{ - if (redoItemHashEntry->tail != NULL) { - redoItemHashEntry->tail->nextrecord = record; - redoItemHashEntry->tail = record; - } else { - redoItemHashEntry->tail = record; - redoItemHashEntry->head = record; - } - record->nextrecord = NULL; - redoItemHashEntry->redoItemNum++; -} - -void PRRegisterBlockChangeExtended(XLogRecParseState *recordBlockState, const RelFileNode rNode, ForkNumber forkNum, - BlockNumber blkNo, HTAB *redoItemHash) -{ - RedoItemTag redoItemTag; - RedoItemHashEntry *redoItemHashEntry = NULL; - bool found = true; - - INIT_REDO_ITEM_TAG(redoItemTag, rNode, forkNum, blkNo); - - redoItemHashEntry = (RedoItemHashEntry *)hash_search(redoItemHash, (void *)&redoItemTag, HASH_ENTER, &found); - if (redoItemHashEntry == NULL) { - ereport(ERROR, (errcode(ERRCODE_FETCH_DATA_FAILED), - errmsg("could not find or create redo item entry: rel %u/%u/%u " - "forknum %d blkno %u", - rNode.spcNode, rNode.dbNode, rNode.relNode, forkNum, blkNo))); - } - - if (!found) { - PRInitRedoItemEntry(redoItemHashEntry); - } - PRRegisterBlockInsertToList(redoItemHashEntry, recordBlockState); -} - -void PRTrackRemoveEntry(HTAB *hashMap, RedoItemHashEntry *entry) -{ - XLogRecParseState *recordBlockState = entry->head; -#ifdef USE_ASSERT_CHECKING - XLogRecParseState *nextBlockState = entry->head; - while (nextBlockState != NULL) { - XLogRecParseState *prev = nextBlockState; - nextBlockState = (XLogRecParseState *)(nextBlockState->nextrecord); - - if (prev->refrecord != NULL) { - DoRecordCheck(prev, InvalidXLogRecPtr, false); - } - - ereport(LOG, (errmsg("PRTrackRemoveEntry:record(%X/%X) relation %u/%u/%u forknum %u blocknum %u dropped(%p)", - (uint32)(prev->blockparse.blockhead.end_ptr >> 32), - (uint32)(prev->blockparse.blockhead.end_ptr), prev->blockparse.blockhead.spcNode, - prev->blockparse.blockhead.dbNode, prev->blockparse.blockhead.relNode, - prev->blockparse.blockhead.forknum, prev->blockparse.blockhead.blkno, prev->refrecord))); - } - -#endif - XLogBlockParseStateRelease(recordBlockState); - - if (hash_search(hashMap, entry, HASH_REMOVE, NULL) == NULL) { - ereport(ERROR, (errmsg("PRTrackRemoveEntry:Redo item hash table corrupted"))); - } -} - -void PRTrackRelTruncate(HTAB *hashMap, const RelFileNode rNode, ForkNumber forkNum, BlockNumber blkNo) -{ - HASH_SEQ_STATUS status; - RedoItemHashEntry *redoItemEntry = NULL; - hash_seq_init(&status, hashMap); - - while ((redoItemEntry = (RedoItemHashEntry *)hash_seq_search(&status)) != NULL) { - if (RelFileNodeEquals(redoItemEntry->redoItemTag.rNode, rNode) && - redoItemEntry->redoItemTag.forkNum == forkNum && (redoItemEntry->redoItemTag.blockNum >= blkNo)) { - PRTrackRemoveEntry(hashMap, redoItemEntry); - } - } -} - -void PRTrackTableSpaceDrop(XLogRecParseState *recordBlockState, HTAB *hashMap) -{ - HASH_SEQ_STATUS status; - RedoItemHashEntry *redoItemEntry = NULL; - hash_seq_init(&status, hashMap); - - RelFileNode rNode; - PRXLogRecGetBlockTag(recordBlockState, &rNode, NULL, NULL); -#ifdef USE_ASSERT_CHECKING - ereport(LOG, (errmsg("PRTrackRelTruncate:(%X/%X)clear table space %u record", - (uint32)(recordBlockState->blockparse.blockhead.end_ptr >> 32), - (uint32)(recordBlockState->blockparse.blockhead.end_ptr), rNode.spcNode))); -#endif - - while ((redoItemEntry = (RedoItemHashEntry *)hash_seq_search(&status)) != NULL) { - if (redoItemEntry->redoItemTag.rNode.spcNode == rNode.spcNode) { - PRTrackRemoveEntry(hashMap, redoItemEntry); - } - } - XLogBlockParseStateRelease(recordBlockState); -} - -void PRTrackDatabaseDrop(XLogRecParseState *recordBlockState, HTAB *hashMap) -{ - HASH_SEQ_STATUS status; - RedoItemHashEntry *redoItemEntry = NULL; - hash_seq_init(&status, hashMap); - - RelFileNode rNode; - PRXLogRecGetBlockTag(recordBlockState, &rNode, NULL, NULL); -#ifdef USE_ASSERT_CHECKING - ereport(LOG, (errmsg("PRTrackRelTruncate:(%X/%X)clear db %u/%u record", - (uint32)(recordBlockState->blockparse.blockhead.end_ptr >> 32), - (uint32)(recordBlockState->blockparse.blockhead.end_ptr), rNode.spcNode, rNode.dbNode))); -#endif - - while ((redoItemEntry = (RedoItemHashEntry *)hash_seq_search(&status)) != NULL) { - if (redoItemEntry->redoItemTag.rNode.spcNode == rNode.spcNode && - redoItemEntry->redoItemTag.rNode.dbNode == rNode.dbNode) { - PRTrackRemoveEntry(hashMap, redoItemEntry); - } - } - XLogBlockParseStateRelease(recordBlockState); -} - -void PRTrackRelStorageDrop(XLogRecParseState *recordBlockState, HTAB *redoItemHash) -{ - XLogBlockParse *blockparse = &(recordBlockState->blockparse); - XLogBlockDdlParse *ddlParse = NULL; - XLogBlockParseGetDdlParse(recordBlockState, ddlParse); - - RelFileNode rNode; - rNode.spcNode = blockparse->blockhead.spcNode; - rNode.dbNode = blockparse->blockhead.dbNode; - rNode.relNode = blockparse->blockhead.relNode; - rNode.bucketNode = blockparse->blockhead.bucketNode; - rNode.opt = blockparse->blockhead.opt; -#ifdef USE_ASSERT_CHECKING - ereport(LOG, (errmsg("PRTrackRelTruncate:(%X/%X)clear relation %u/%u/%u forknum %u record", - (uint32)(blockparse->blockhead.end_ptr >> 32), (uint32)(blockparse->blockhead.end_ptr), - rNode.spcNode, rNode.dbNode, rNode.relNode, blockparse->blockhead.forknum))); -#endif - - if (ddlParse->blockddltype == BLOCK_DDL_TRUNCATE_RELNODE) { - PRTrackRelTruncate(redoItemHash, rNode, blockparse->blockhead.forknum, blockparse->blockhead.blkno); - } else if (!IsValidColForkNum(blockparse->blockhead.forknum)) { - for (int i = 0; i < MAX_FORKNUM; ++i) - PRTrackRelTruncate(redoItemHash, rNode, i, 0); - } else { - PRTrackRelTruncate(redoItemHash, rNode, blockparse->blockhead.forknum, 0); - } - XLogBlockParseStateRelease(recordBlockState); -} - -void PRTrackRelStorageDropBktList(XLogRecParseState *recordBlockState, HTAB *redoItemHash) -{ - XLogBlockParse *blockparse = &(recordBlockState->blockparse); - XLogBlockDdlParse *ddlParse = NULL; - XLogBlockParseGetDdlParse(recordBlockState, ddlParse); - - RelFileNode rNode; - rNode.spcNode = blockparse->blockhead.spcNode; - rNode.dbNode = blockparse->blockhead.dbNode; - rNode.relNode = blockparse->blockhead.relNode; - uint32 *bktMap = (uint32 *)blockparse->extra_rec.blockddlrec.mainData; - - for (uint32 bktNode = 0; bktNode < MAX_BUCKETMAPLEN; bktNode++) { - if (!GET_BKT_MAP_BIT(bktMap, bktNode)) { - continue; - } - - rNode.bucketNode = bktNode; - -#ifdef USE_ASSERT_CHECKING - ereport(LOG, (errmsg("PRTrackRelTruncate:(%X/%X)clear relation %u/%u/%u forknum %u record", - (uint32)(blockparse->blockhead.end_ptr >> 32), (uint32)(blockparse->blockhead.end_ptr), - rNode.spcNode, rNode.dbNode, rNode.relNode, blockparse->blockhead.forknum))); -#endif - - for (int i = 0; i < MAX_FORKNUM; i++) { - PRTrackRelTruncate(redoItemHash, rNode, i, 0); - } - } - - XLogBlockParseStateRelease(recordBlockState); -} - -// Get relfile node fork num blockNum -void PRTrackRelPageModification(XLogRecParseState *recordBlockState, HTAB *redoItemHash) -{ - RelFileNode relnode; - ForkNumber forkNum; - BlockNumber blkNo; - - PRXLogRecGetBlockTag(recordBlockState, &relnode, &blkNo, &forkNum); - - PRRegisterBlockChangeExtended(recordBlockState, relnode, forkNum, blkNo, redoItemHash); -} - -/** - for block state, put it in to hash -*/ -void PRTrackAddBlock(XLogRecParseState *recordBlockState, HTAB *redoItemHash) -{ - Assert(recordBlockState->blockparse.blockhead.block_valid < BLOCK_DATA_DDL_TYPE); - PRTrackRelPageModification(recordBlockState, redoItemHash); -} - -/** - others state, clear related block state(including release), release it -*/ -void PRTrackClearBlock(XLogRecParseState *recordBlockState, HTAB *redoItemHash) -{ - XLogBlockParse *blockparse = &(recordBlockState->blockparse); - if (blockparse->blockhead.block_valid == BLOCK_DATA_DDL_TYPE) { - if (blockparse->extra_rec.blockddlrec.blockddltype == BLOCK_DDL_DROP_BKTLIST) { - PRTrackRelStorageDropBktList(recordBlockState, redoItemHash); - } else { - PRTrackRelStorageDrop(recordBlockState, redoItemHash); - } - } else if (blockparse->blockhead.block_valid == BLOCK_DATA_DROP_DATABASE_TYPE) { - PRTrackDatabaseDrop(recordBlockState, redoItemHash); - } else if (blockparse->blockhead.block_valid == BLOCK_DATA_DROP_TBLSPC_TYPE) { - PRTrackTableSpaceDrop(recordBlockState, redoItemHash); - } else { - const uint32 rightShiftSize = 32; - ereport(WARNING, - (errmsg("PRTrackClearBlock:(%X/%X) not identified %u/%u/%u forknum %d record", - (uint32)(blockparse->blockhead.end_ptr >> rightShiftSize), - (uint32)(blockparse->blockhead.end_ptr), blockparse->blockhead.spcNode, - blockparse->blockhead.dbNode, blockparse->blockhead.relNode, blockparse->blockhead.forknum))); - XLogBlockParseStateRelease(recordBlockState); - } -} - -} // namespace extreme_rto +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * batch_redo.cpp + * + * IDENTIFICATION + * src/gausskernel/storage/access/transam/extreme_rto/batch_redo.cpp + * + * ------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/xact.h" +#include "access/xlog.h" +#include "access/xlog_internal.h" +#include "access/xlogreader.h" +#include "access/xlogproc.h" +#include "access/visibilitymap.h" +#include "catalog/storage_xlog.h" +#include "commands/dbcommands.h" +#include "commands/tablespace.h" +#include "storage/freespace.h" +#include "utils/relmapper.h" + +#include "access/extreme_rto/batch_redo.h" +#include "access/extreme_rto/redo_item.h" +#include "access/extreme_rto/dispatcher.h" +#include "access/extreme_rto/page_redo.h" + +#include "access/xlogproc.h" + +namespace extreme_rto { +static inline void PRXLogRecGetBlockTag(XLogRecParseState *recordBlockState, RelFileNode *rnode, BlockNumber *blknum, + ForkNumber *forknum) +{ + XLogBlockParse *blockparse = &(recordBlockState->blockparse); + + if (rnode != NULL) { + rnode->dbNode = blockparse->blockhead.dbNode; + rnode->relNode = blockparse->blockhead.relNode; + rnode->spcNode = blockparse->blockhead.spcNode; + rnode->bucketNode = blockparse->blockhead.bucketNode; + } + if (blknum != NULL) { + *blknum = blockparse->blockhead.blkno; + } + if (forknum != NULL) { + *forknum = blockparse->blockhead.forknum; + } +} + +void PRInitRedoItemEntry(RedoItemHashEntry *redoItemHashEntry) +{ + redoItemHashEntry->redoItemNum = 0; + redoItemHashEntry->head = NULL; + redoItemHashEntry->tail = NULL; +} + +HTAB *PRRedoItemHashInitialize(MemoryContext context) +{ + HASHCTL ctl; + HTAB *hTab = NULL; + + /* + * create hashtable that indexes the redo items + */ + errno_t rc = memset_s(&ctl, sizeof(ctl), 0, sizeof(ctl)); + securec_check(rc, "\0", "\0"); + ctl.hcxt = context; + ctl.keysize = sizeof(RedoItemTag); + ctl.entrysize = sizeof(RedoItemHashEntry); + ctl.hash = tag_hash; + hTab = hash_create("Redo item hash by relfilenode and blocknum", INITredoItemHashSIZE, &ctl, + HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + + return hTab; +} + +void PRRegisterBlockInsertToList(RedoItemHashEntry *redoItemHashEntry, XLogRecParseState *record) +{ + if (redoItemHashEntry->tail != NULL) { + redoItemHashEntry->tail->nextrecord = record; + redoItemHashEntry->tail = record; + } else { + redoItemHashEntry->tail = record; + redoItemHashEntry->head = record; + } + record->nextrecord = NULL; + redoItemHashEntry->redoItemNum++; +} + +void PRRegisterBlockChangeExtended(XLogRecParseState *recordBlockState, const RelFileNode rNode, ForkNumber forkNum, + BlockNumber blkNo, HTAB *redoItemHash) +{ + RedoItemTag redoItemTag; + RedoItemHashEntry *redoItemHashEntry = NULL; + bool found = true; + + INIT_REDO_ITEM_TAG(redoItemTag, rNode, forkNum, blkNo); + + redoItemHashEntry = (RedoItemHashEntry *)hash_search(redoItemHash, (void *)&redoItemTag, HASH_ENTER, &found); + if (redoItemHashEntry == NULL) { + ereport(ERROR, (errcode(ERRCODE_FETCH_DATA_FAILED), + errmsg("could not find or create redo item entry: rel %u/%u/%u " + "forknum %d blkno %u", + rNode.spcNode, rNode.dbNode, rNode.relNode, forkNum, blkNo))); + } + + if (!found) { + PRInitRedoItemEntry(redoItemHashEntry); + } + PRRegisterBlockInsertToList(redoItemHashEntry, recordBlockState); +} + +void PRTrackRemoveEntry(HTAB *hashMap, RedoItemHashEntry *entry) +{ + XLogRecParseState *recordBlockState = entry->head; +#ifdef USE_ASSERT_CHECKING + XLogRecParseState *nextBlockState = entry->head; + while (nextBlockState != NULL) { + XLogRecParseState *prev = nextBlockState; + nextBlockState = (XLogRecParseState *)(nextBlockState->nextrecord); + + if (prev->refrecord != NULL) { + DoRecordCheck(prev, InvalidXLogRecPtr, false); + } + + ereport(LOG, (errmsg("PRTrackRemoveEntry:record(%X/%X) relation %u/%u/%u forknum %u blocknum %u dropped(%p)", + (uint32)(prev->blockparse.blockhead.end_ptr >> 32), + (uint32)(prev->blockparse.blockhead.end_ptr), prev->blockparse.blockhead.spcNode, + prev->blockparse.blockhead.dbNode, prev->blockparse.blockhead.relNode, + prev->blockparse.blockhead.forknum, prev->blockparse.blockhead.blkno, prev->refrecord))); + } + +#endif + XLogBlockParseStateRelease(recordBlockState); + + if (hash_search(hashMap, entry, HASH_REMOVE, NULL) == NULL) { + ereport(ERROR, (errmsg("PRTrackRemoveEntry:Redo item hash table corrupted"))); + } +} + +void PRTrackRelTruncate(HTAB *hashMap, const RelFileNode rNode, ForkNumber forkNum, BlockNumber blkNo) +{ + HASH_SEQ_STATUS status; + RedoItemHashEntry *redoItemEntry = NULL; + hash_seq_init(&status, hashMap); + + while ((redoItemEntry = (RedoItemHashEntry *)hash_seq_search(&status)) != NULL) { + if (RelFileNodeEquals(redoItemEntry->redoItemTag.rNode, rNode) && + redoItemEntry->redoItemTag.forkNum == forkNum && (redoItemEntry->redoItemTag.blockNum >= blkNo)) { + PRTrackRemoveEntry(hashMap, redoItemEntry); + } + } +} + +void PRTrackTableSpaceDrop(XLogRecParseState *recordBlockState, HTAB *hashMap) +{ + HASH_SEQ_STATUS status; + RedoItemHashEntry *redoItemEntry = NULL; + hash_seq_init(&status, hashMap); + + RelFileNode rNode; + PRXLogRecGetBlockTag(recordBlockState, &rNode, NULL, NULL); +#ifdef USE_ASSERT_CHECKING + ereport(LOG, (errmsg("PRTrackRelTruncate:(%X/%X)clear table space %u record", + (uint32)(recordBlockState->blockparse.blockhead.end_ptr >> 32), + (uint32)(recordBlockState->blockparse.blockhead.end_ptr), rNode.spcNode))); +#endif + + while ((redoItemEntry = (RedoItemHashEntry *)hash_seq_search(&status)) != NULL) { + if (redoItemEntry->redoItemTag.rNode.spcNode == rNode.spcNode) { + PRTrackRemoveEntry(hashMap, redoItemEntry); + } + } + XLogBlockParseStateRelease(recordBlockState); +} + +void PRTrackDatabaseDrop(XLogRecParseState *recordBlockState, HTAB *hashMap) +{ + HASH_SEQ_STATUS status; + RedoItemHashEntry *redoItemEntry = NULL; + hash_seq_init(&status, hashMap); + + RelFileNode rNode; + PRXLogRecGetBlockTag(recordBlockState, &rNode, NULL, NULL); +#ifdef USE_ASSERT_CHECKING + ereport(LOG, (errmsg("PRTrackRelTruncate:(%X/%X)clear db %u/%u record", + (uint32)(recordBlockState->blockparse.blockhead.end_ptr >> 32), + (uint32)(recordBlockState->blockparse.blockhead.end_ptr), rNode.spcNode, rNode.dbNode))); +#endif + + while ((redoItemEntry = (RedoItemHashEntry *)hash_seq_search(&status)) != NULL) { + if (redoItemEntry->redoItemTag.rNode.spcNode == rNode.spcNode && + redoItemEntry->redoItemTag.rNode.dbNode == rNode.dbNode) { + PRTrackRemoveEntry(hashMap, redoItemEntry); + } + } + XLogBlockParseStateRelease(recordBlockState); +} + +void PRTrackDropFiles(HTAB *redoItemHash, XLogBlockDdlParse *ddlParse, XLogRecPtr lsn) +{ + ColFileNodeRel *xnodes = (ColFileNodeRel *)ddlParse->mainData; + for (int i = 0; i < ddlParse->rels; ++i) { + ColFileNode colFileNode; + ColFileNodeRel *colFileNodeRel = xnodes + i; + ColFileNodeCopy(&colFileNode, colFileNodeRel); + if (!IsValidColForkNum(colFileNode.forknum)) { + for (int i = 0; i < MAX_FORKNUM; ++i) + PRTrackRelTruncate(redoItemHash, colFileNode.filenode, i, 0); + } else { + PRTrackRelTruncate(redoItemHash, colFileNode.filenode, colFileNode.forknum, 0); + } +#ifdef USE_ASSERT_CHECKING + ereport(LOG, (errmsg("PRTrackRelTruncate(drop):(%X/%X)clear relation %u/%u/%u forknum %d record", + (uint32)(lsn >> 32), (uint32)(lsn), colFileNode.filenode.spcNode, colFileNode.filenode.dbNode, + colFileNode.filenode.relNode, colFileNode.forknum))); +#endif + } +} + +void PRTrackRelStorageDrop(XLogRecParseState *recordBlockState, HTAB *redoItemHash) +{ + XLogBlockParse *blockparse = &(recordBlockState->blockparse); + XLogBlockDdlParse *ddlParse = NULL; + XLogBlockParseGetDdlParse(recordBlockState, ddlParse); + + if (ddlParse->blockddltype == BLOCK_DDL_TRUNCATE_RELNODE) { + RelFileNode rNode; + rNode.spcNode = blockparse->blockhead.spcNode; + rNode.dbNode = blockparse->blockhead.dbNode; + rNode.relNode = blockparse->blockhead.relNode; + rNode.bucketNode = blockparse->blockhead.bucketNode; +#ifdef USE_ASSERT_CHECKING + ereport(LOG, (errmsg("PRTrackRelTruncate:(%X/%X)clear relation %u/%u/%u forknum %u record", + (uint32)(blockparse->blockhead.end_ptr >> 32), (uint32)(blockparse->blockhead.end_ptr), rNode.spcNode, + rNode.dbNode, rNode.relNode, blockparse->blockhead.forknum))); +#endif + PRTrackRelTruncate(redoItemHash, rNode, blockparse->blockhead.forknum, blockparse->blockhead.blkno); + } else { + PRTrackDropFiles(redoItemHash, ddlParse, blockparse->blockhead.end_ptr); + } + + XLogBlockParseStateRelease(recordBlockState); +} + +// Get relfile node fork num blockNum +void PRTrackRelPageModification(XLogRecParseState *recordBlockState, HTAB *redoItemHash) +{ + RelFileNode relnode; + ForkNumber forkNum; + BlockNumber blkNo; + + PRXLogRecGetBlockTag(recordBlockState, &relnode, &blkNo, &forkNum); + + PRRegisterBlockChangeExtended(recordBlockState, relnode, forkNum, blkNo, redoItemHash); +} + +/** + for block state, put it in to hash +*/ +void PRTrackAddBlock(XLogRecParseState *recordBlockState, HTAB *redoItemHash) +{ + Assert(recordBlockState->blockparse.blockhead.block_valid < BLOCK_DATA_DDL_TYPE); + PRTrackRelPageModification(recordBlockState, redoItemHash); +} + +/** + others state, clear related block state(including release), release it +*/ +void PRTrackClearBlock(XLogRecParseState *recordBlockState, HTAB *redoItemHash) +{ + Assert(recordBlockState != NULL); + Assert(redoItemHash != NULL); + XLogBlockParse *blockparse = &(recordBlockState->blockparse); + if (blockparse->blockhead.block_valid == BLOCK_DATA_DDL_TYPE) { + PRTrackRelStorageDrop(recordBlockState, redoItemHash); + } else if (blockparse->blockhead.block_valid == BLOCK_DATA_DROP_DATABASE_TYPE) { + PRTrackDatabaseDrop(recordBlockState, redoItemHash); + } else if (blockparse->blockhead.block_valid == BLOCK_DATA_DROP_TBLSPC_TYPE) { + PRTrackTableSpaceDrop(recordBlockState, redoItemHash); + } else { + const uint32 rightShiftSize = 32; + ereport(WARNING, + (errmsg("PRTrackClearBlock:(%X/%X) not identified %u/%u/%u forknum %d record", + (uint32)(blockparse->blockhead.end_ptr >> rightShiftSize), + (uint32)(blockparse->blockhead.end_ptr), blockparse->blockhead.spcNode, + blockparse->blockhead.dbNode, blockparse->blockhead.relNode, blockparse->blockhead.forknum))); + XLogBlockParseStateRelease(recordBlockState); + } +} + +} // namespace extreme_rto diff --git a/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp b/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp index c120b6a22..5c04724fa 100755 --- a/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/extreme_rto/dispatcher.cpp @@ -33,7 +33,6 @@ #include "access/xlog_internal.h" #include "access/nbtree.h" #include "access/ubtree.h" -#include "access/hash_xlog.h" #include "access/xlogreader.h" #include "access/gist_private.h" #include "access/multixact.h" @@ -81,6 +80,13 @@ #include "pgxc/pgxc.h" #endif +#ifdef ENABLE_UT +#include "utils/utesteventutil.h" +#define STATIC +#else +#define STATIC static +#endif + extern THR_LOCAL bool redo_oldversion_xlog; namespace extreme_rto { @@ -90,11 +96,12 @@ static const int XLOG_INFO_SHIFT_SIZE = 4; /* xlog info flag shift size */ static const int32 MAX_PENDING = 1; static const int32 MAX_PENDING_STANDBY = 1; -static const int32 ITEM_QUQUE_SIZE_RATIO = 10; +static const int32 ITEM_QUQUE_SIZE_RATIO = 5; static const uint32 EXIT_WAIT_DELAY = 100; /* 100 us */ uint32 g_startupTriggerState = TRIGGER_NORMAL; uint32 g_readManagerTriggerFlag = TRIGGER_NORMAL; +static const int invalid_worker_id = -1; static const int UNDO_START_BLK = 1; static const int UHEAP_UPDATE_UNDO_START_BLK = 2; @@ -103,19 +110,18 @@ typedef void *(*GetStateFunc)(PageRedoWorker *worker); static void AddSlotToPLSet(uint32); static void **CollectStatesFromWorkers(GetStateFunc); -static void GetSlotIds(XLogReaderState *record, uint32 designatedSlot, bool rnodedispatch); +static void GetSlotIds(XLogReaderState *record); static void GetUndoSlotIds(XLogReaderState *record); -static LogDispatcher *CreateDispatcher(); +STATIC LogDispatcher *CreateDispatcher(); static void DestroyRecoveryWorkers(); -static void DispatchRecordWithPages(XLogReaderState *, List *, bool); +static void DispatchRecordWithPages(XLogReaderState *, List *); static void DispatchRecordWithoutPage(XLogReaderState *, List *); -static void DispatchTxnRecord(XLogReaderState *, List *, TimestampTz, bool, bool); +static void DispatchTxnRecord(XLogReaderState *, List *); static void StartPageRedoWorkers(uint32); static void StopRecoveryWorkers(int, Datum); -static bool XLogWillChangeStandbyState(const XLogReaderState *); static bool StandbyWillChangeStandbyState(const XLogReaderState *); -static void DispatchToSpecPageWorker(XLogReaderState *record, List *expectedTLIs, bool waittrxnsync); +static void DispatchToSpecPageWorker(XLogReaderState *record, List *expectedTLIs); static bool DispatchXLogRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime); static bool DispatchXactRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime); @@ -179,15 +185,12 @@ static const RmgrDispatchData g_dispatchTable[RM_MAX_ID + 1] = { XLOG_MULTIXACT_ZERO_OFF_PAGE, XLOG_MULTIXACT_CREATE_ID }, { DispatchRelMapRecord, RmgrRecordInfoValid, RM_RELMAP_ID, XLOG_RELMAP_UPDATE, XLOG_RELMAP_UPDATE }, -#ifdef ENABLE_MULTIPLE_NODES - { DispatchStandbyRecord, RmgrRecordInfoValid, RM_STANDBY_ID, XLOG_STANDBY_LOCK, XLOG_STANDBY_CSN }, -#else { DispatchStandbyRecord, RmgrRecordInfoValid, RM_STANDBY_ID, XLOG_STANDBY_LOCK, XLOG_STANDBY_CSN_ABORTED }, -#endif + { DispatchHeap2Record, RmgrRecordInfoValid, RM_HEAP2_ID, XLOG_HEAP2_FREEZE, XLOG_HEAP2_LOGICAL_NEWPAGE }, { DispatchHeapRecord, RmgrRecordInfoValid, RM_HEAP_ID, XLOG_HEAP_INSERT, XLOG_HEAP_INPLACE }, { DispatchBtreeRecord, RmgrRecordInfoValid, RM_BTREE_ID, XLOG_BTREE_INSERT_LEAF, XLOG_BTREE_REUSE_PAGE }, - { DispatchHashRecord, RmgrRecordInfoValid, RM_HASH_ID, XLOG_HASH_INIT_META_PAGE, XLOG_HASH_VACUUM_ONE_PAGE }, + { DispatchHashRecord, NULL, RM_HASH_ID, 0, 0 }, { DispatchGinRecord, RmgrRecordInfoValid, RM_GIN_ID, XLOG_GIN_CREATE_INDEX, XLOG_GIN_VACUUM_DATA_LEAF_PAGE }, /* XLOG_GIST_PAGE_DELETE is not used and info isn't continus */ { DispatchGistRecord, RmgrGistRecordInfoValid, RM_GIST_ID, 0, 0 }, @@ -195,7 +198,7 @@ static const RmgrDispatchData g_dispatchTable[RM_MAX_ID + 1] = { { DispatchSpgistRecord, RmgrRecordInfoValid, RM_SPGIST_ID, XLOG_SPGIST_CREATE_INDEX, XLOG_SPGIST_VACUUM_REDIRECT }, { DispatchRepSlotRecord, RmgrRecordInfoValid, RM_SLOT_ID, XLOG_SLOT_CREATE, XLOG_TERM_LOG }, { DispatchHeap3Record, RmgrRecordInfoValid, RM_HEAP3_ID, XLOG_HEAP3_NEW_CID, XLOG_HEAP3_REWRITE }, - { DispatchBarrierRecord, NULL, RM_BARRIER_ID, 0, 0 }, + { DispatchBarrierRecord, RmgrRecordInfoValid, RM_BARRIER_ID, XLOG_BARRIER_CREATE, XLOG_BARRIER_SWITCHOVER }, #ifdef ENABLE_MOT {DispatchMotRecord, NULL, RM_MOT_ID, 0, 0}, #endif @@ -213,19 +216,6 @@ static const RmgrDispatchData g_dispatchTable[RM_MAX_ID + 1] = { { DispatchRepOriginRecord, RmgrRecordInfoValid, RM_REPLORIGIN_ID, XLOG_REPLORIGIN_SET, XLOG_REPLORIGIN_DROP }, }; -void UpdateDispatcherStandbyState(HotStandbyState *state) -{ - if ((get_real_recovery_parallelism() > 1) && (GetBatchCount() > 0)) { - *state = (HotStandbyState)pg_atomic_read_u32(&(g_dispatcher->standbyState)); - } -} - -/* Run from the dispatcher and txn worker thread. */ -bool OnHotStandBy() -{ - return t_thrd.xlog_cxt.standbyState >= STANDBY_INITIALIZED; -} - const int REDO_WAIT_SLEEP_TIME = 5000; /* 5ms */ const int MAX_REDO_WAIT_LOOP = 24000; /* 5ms*24000 = 2min */ @@ -387,6 +377,9 @@ void HandleStartupInterruptsForExtremeRto() g_dispatcher->smartShutdown = true; } } + if (t_thrd.startup_cxt.check_repair) { + t_thrd.startup_cxt.check_repair = false; + } } /* Run from the dispatcher thread. */ @@ -447,7 +440,7 @@ void DumpDispatcher() } /* Run from the dispatcher thread. */ -static LogDispatcher *CreateDispatcher() +STATIC LogDispatcher *CreateDispatcher() { MemoryContext ctx = AllocSetContextCreate(g_instance.instance_context, "ParallelRecoveryDispatcher", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, @@ -472,6 +465,7 @@ static LogDispatcher *CreateDispatcher() newDispatcher->needImmediateCheckpoint = false; newDispatcher->needFullSyncCheckpoint = false; newDispatcher->smartShutdown = false; + newDispatcher->startupTimeCost = t_thrd.xlog_cxt.timeCost; return newDispatcher; } @@ -720,6 +714,9 @@ void DispatchRedoRecordToFile(XLogReaderState *record, List *expectedTLIs, Times { bool fatalerror = false; uint32 indexid = RM_NEXT_ID; + + Assert(record != NULL); + uint32 rmid = XLogRecGetRmid(record); uint32 term = XLogRecGetTerm(record); if (term > g_instance.comm_cxt.localinfo_cxt.term_from_xlog) { @@ -753,10 +750,14 @@ void DispatchRedoRecordToFile(XLogReaderState *record, List *expectedTLIs, Times #endif ResetChosedPageLineList(); if (fatalerror != true) { +#ifdef ENABLE_UT + TestXLogReaderProbe(UTEST_EVENT_RTO_DISPATCH_REDO_RECORD_TO_FILE, __FUNCTION__, record); +#endif g_dispatchTable[rmid].rm_dispatch(record, expectedTLIs, recordXTime); } else { DispatchDefaultRecord(record, expectedTLIs, recordXTime); DumpDispatcher(); + DumpItem(GetRedoItemPtr(record), "DispatchRedoRecordToFile"); ereport(PANIC, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), errmsg("[REDO_LOG_TRACE]DispatchRedoRecord encounter fatal error:rmgrID:%u, info:%u, indexid:%u", @@ -775,8 +776,7 @@ void DispatchRedoRecordToFile(XLogReaderState *record, List *expectedTLIs, Times * trxnthreadexe is true when the record need execute on trxn thread * pagethredexe is true when the record need execute on pageworker thread */ -static void DispatchSyncTxnRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime, - uint32 designatedWorker) +static void DispatchSyncTxnRecord(XLogReaderState *record, List *expectedTLIs) { RedoItem *item = GetRedoItemPtr(record); ReferenceRedoItem(item); @@ -823,8 +823,7 @@ static void DispatchToOnePageWorker(XLogReaderState *record, const RelFileNode r * trxn record's recordtime must set , see SetLatestXTime */ -static void DispatchTxnRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime, bool imcheckpoint, - bool isForceAll = false) +static void DispatchTxnRecord(XLogReaderState *record, List *expectedTLIs) { RedoItem *trxnItem = GetRedoItemPtr(record); ReferenceRedoItem(trxnItem); @@ -835,8 +834,11 @@ static void DispatchTxnRecord(XLogReaderState *record, List *expectedTLIs, Times static bool DispatchBarrierRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { RedoItem *item = GetRedoItemPtr(record); + uint8 info = (XLogRecGetInfo(record) & (~XLR_INFO_MASK)); ReferenceRedoItem(item); - item->record.isFullSync = true; + if (info != XLOG_BARRIER_COMMIT) { + item->record.isFullSync = true; + } for (uint32 i = 0; i < g_dispatcher->pageLineNum; ++i) { ReferenceRedoItem(item); AddPageRedoItem(g_dispatcher->pageLines[i].batchThd, item); @@ -849,7 +851,7 @@ static bool DispatchBarrierRecord(XLogReaderState *record, List *expectedTLIs, T #ifdef ENABLE_MOT static bool DispatchMotRecord(XLogReaderState* record, List* expectedTLIs, TimestampTz recordXTime) { - DispatchTxnRecord(record, expectedTLIs, recordXTime, false); + DispatchTxnRecord(record, expectedTLIs); return false; } #endif @@ -857,21 +859,21 @@ static bool DispatchMotRecord(XLogReaderState* record, List* expectedTLIs, Times /* Run from the dispatcher thread. */ static bool DispatchRepSlotRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { - DispatchTxnRecord(record, expectedTLIs, recordXTime, false); + DispatchTxnRecord(record, expectedTLIs); return false; } /* Run from the dispatcher thread. */ static bool DispatchHeap3Record(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { - DispatchTxnRecord(record, expectedTLIs, recordXTime, false); + DispatchTxnRecord(record, expectedTLIs); return false; } /* record of rmid or info error, we inter this function to make every worker run to this position */ static bool DispatchDefaultRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { - DispatchTxnRecord(record, expectedTLIs, recordXTime, false, true); + DispatchTxnRecord(record, expectedTLIs); return true; } @@ -882,7 +884,6 @@ static bool DispatchXLogRecord(XLogReaderState *record, List *expectedTLIs, Time uint8 info = (XLogRecGetInfo(record) & (~XLR_INFO_MASK)); if (IsCheckPoint(record)) { - isNeedFullSync = XLogWillChangeStandbyState(record); RedoItem *item = GetRedoItemPtr(record); item->needImmediateCheckpoint = g_dispatcher->needImmediateCheckpoint; item->record.isFullSync = g_dispatcher->needFullSyncCheckpoint; @@ -903,14 +904,10 @@ static bool DispatchXLogRecord(XLogReaderState *record, List *expectedTLIs, Time AddTxnRedoItem(g_dispatcher->trxnLine.managerThd, item); } else if ((info == XLOG_FPI) || (info == XLOG_FPI_FOR_HINT)) { - if (SUPPORT_FPAGE_DISPATCH) { - DispatchRecordWithPages(record, expectedTLIs, true); - } else { - DispatchRecordWithoutPage(record, expectedTLIs); /* fullpagewrite include btree, so need strong sync */ - } + DispatchRecordWithPages(record, expectedTLIs); } else { /* process in trxn thread and need to sync to other pagerredo thread */ - DispatchTxnRecord(record, expectedTLIs, recordXTime, false); + DispatchTxnRecord(record, expectedTLIs); } return isNeedFullSync; @@ -920,7 +917,7 @@ static bool DispatchXLogRecord(XLogReaderState *record, List *expectedTLIs, Time static bool DispatchRelMapRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { /* page redo worker directly use relnode, will not use relmapfile */ - DispatchTxnRecord(record, expectedTLIs, recordXTime, false); + DispatchTxnRecord(record, expectedTLIs); return false; } @@ -945,7 +942,7 @@ static bool DispatchXactRecord(XLogReaderState *record, List *expectedTLIs, Time * pageworker first exe and update lastcomplateLSN * then trx thread exe * first pageworker execute and update lsn, then trxn thread */ - DispatchSyncTxnRecord(record, expectedTLIs, recordXTime, ALL_WORKER); + DispatchSyncTxnRecord(record, expectedTLIs); if (hasSegpageRelFile) { doneFlag = pg_atomic_read_u32(&g_dispatcher->segpageXactDoneFlag); @@ -956,7 +953,7 @@ static bool DispatchXactRecord(XLogReaderState *record, List *expectedTLIs, Time } } else { /* process in trxn thread and need to sync to other pagerredo thread */ - DispatchTxnRecord(record, expectedTLIs, recordXTime, false); + DispatchTxnRecord(record, expectedTLIs); } return false; @@ -968,7 +965,7 @@ static bool DispatchStandbyRecord(XLogReaderState *record, List *expectedTLIs, T /* change standbystate, must be full sync, see UpdateStandbyState */ bool isNeedFullSync = StandbyWillChangeStandbyState(record); - DispatchTxnRecord(record, expectedTLIs, recordXTime, false, isNeedFullSync); + DispatchTxnRecord(record, expectedTLIs); return isNeedFullSync; } @@ -977,7 +974,7 @@ static bool DispatchStandbyRecord(XLogReaderState *record, List *expectedTLIs, T static bool DispatchMultiXactRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { /* page worker will not use multixact */ - DispatchTxnRecord(record, expectedTLIs, recordXTime, false); + DispatchTxnRecord(record, expectedTLIs); return false; } @@ -995,9 +992,9 @@ static void DispatchRecordWithoutPage(XLogReaderState *record, List *expectedTLI } /* Run from the dispatcher thread. */ -static void DispatchRecordWithPages(XLogReaderState *record, List *expectedTLIs, bool rnodedispatch) +static void DispatchRecordWithPages(XLogReaderState *record, List *expectedTLIs) { - GetSlotIds(record, ANY_WORKER, rnodedispatch); + GetSlotIds(record); RedoItem *item = GetRedoItemPtr(record); ReferenceRedoItem(item); @@ -1013,7 +1010,7 @@ static void DispatchRecordWithPages(XLogReaderState *record, List *expectedTLIs, static bool DispatchHeapRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { if (record->max_block_id >= 0) - DispatchRecordWithPages(record, expectedTLIs, SUPPORT_FPAGE_DISPATCH); + DispatchRecordWithPages(record, expectedTLIs); else DispatchRecordWithoutPage(record, expectedTLIs); @@ -1022,7 +1019,7 @@ static bool DispatchHeapRecord(XLogReaderState *record, List *expectedTLIs, Time static bool DispatchSeqRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { - DispatchRecordWithPages(record, expectedTLIs, SUPPORT_FPAGE_DISPATCH); + DispatchRecordWithPages(record, expectedTLIs); return false; } @@ -1076,28 +1073,18 @@ static bool DispatchSmgrRecord(XLogReaderState *record, List *expectedTLIs, Time if (info == XLOG_SMGR_CREATE) { /* only need to dispatch to one page worker */ - /* for parallel performance */ - if (SUPPORT_FPAGE_DISPATCH) { - xl_smgr_create *xlrec = (xl_smgr_create *)XLogRecGetData(record); - RelFileNode rnode; - RelFileNodeCopy(rnode, xlrec->rnode, XLogRecGetBucketId(record)); - DispatchToOnePageWorker(record, rnode, expectedTLIs); - } else { - DispatchRecordWithoutPage(record, expectedTLIs); - } + xl_smgr_create *xlrec = (xl_smgr_create *)XLogRecGetData(record); + RelFileNode rnode; + RelFileNodeCopy(rnode, xlrec->rnode, XLogRecGetBucketId(record)); + DispatchToOnePageWorker(record, rnode, expectedTLIs); } else if (IsSmgrTruncate(record)) { - if (SUPPORT_FPAGE_DISPATCH) { - xl_smgr_truncate *xlrec = (xl_smgr_truncate *)XLogRecGetData(record); - RelFileNode rnode; - RelFileNodeCopy(rnode, xlrec->rnode, XLogRecGetBucketId(record)); - uint32 id = GetSlotId(rnode, 0, 0, GetBatchCount()); - AddSlotToPLSet(id); - } else { - for (uint32 i = 0; i < g_dispatcher->pageLineNum; i++) { - AddSlotToPLSet(i); - } - } - DispatchToSpecPageWorker(record, expectedTLIs, false); + xl_smgr_truncate *xlrec = (xl_smgr_truncate *)XLogRecGetData(record); + RelFileNode rnode; + RelFileNodeCopy(rnode, xlrec->rnode, XLogRecGetBucketId(record)); + uint32 id = GetSlotId(rnode, 0, 0, GetBatchCount()); + AddSlotToPLSet(id); + + DispatchToSpecPageWorker(record, expectedTLIs); } return isNeedFullSync; @@ -1151,161 +1138,31 @@ static bool DispatchSegpageSmgrRecord(XLogReaderState *record, List *expectedTLI /* Run from the dispatcher thread. */ static bool DispatchRepOriginRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { - DispatchTxnRecord(record, expectedTLIs, recordXTime, false); + DispatchTxnRecord(record, expectedTLIs); return false; } /* Run from the dispatcher thread. */ static bool DispatchCLogRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { - DispatchTxnRecord(record, expectedTLIs, recordXTime, false); + DispatchTxnRecord(record, expectedTLIs); return false; } /* Run from the dispatcher thread. */ static bool DispatchHashRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { - bool isNeedFullSync = false; - /* index not support mvcc, so we need to sync with trx thread when the record is vacuum */ - if (IsHashVacuumPages(record) && g_supportHotStandby) { - GetSlotIds(record, ANY_WORKER, true); - /* sync with trxn thread */ - /* only need to process in pageworker thread, wait trxn sync */ - /* pageworker exe, trxn don't need exe */ - DispatchToSpecPageWorker(record, expectedTLIs, true); - } else { - DispatchRecordWithPages(record, expectedTLIs, true); - } - return isNeedFullSync; -} - -/* Run from the dispatcher thread. */ -static bool DispatchBtreeHotStandby(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) -{ - bool isNeedFullSync = false; - - /* index not support mvcc, so we need to sync with trx thread when the record is vacuum */ - if (IsBtreeVacuum(record)) { - uint32 id; - uint8 info = (XLogRecGetInfo(record) & (~XLR_INFO_MASK)); - - if (info == XLOG_BTREE_REUSE_PAGE) { - if (!(InHotStandby)) { - /* if not in hotstandby don't need to process */ - return isNeedFullSync; - } - - xl_btree_reuse_page *xlrec = (xl_btree_reuse_page *)XLogRecGetData(record); - RelFileNode tmp_node; - RelFileNodeCopy(tmp_node, xlrec->node, XLogRecGetBucketId(record)); - id = GetSlotId(tmp_node, 0, 0, GetBatchCount()); - AddSlotToPLSet(id); - } else if (info == XLOG_BTREE_VACUUM) { - GetSlotIds(record, ANY_WORKER, true); - - if (HotStandbyActiveInReplay() && IS_SINGLE_NODE) { - RelFileNode thisrnode; - BlockNumber thisblkno; - - bool getTagSuccess = XLogRecGetBlockTag(record, 0, &thisrnode, NULL, &thisblkno); - if (getTagSuccess) { - xl_btree_vacuum *xlrec = (xl_btree_vacuum *)XLogRecGetData(record); - /* for performance reserve */ - for (BlockNumber blkno = xlrec->lastBlockVacuumed + 1; blkno < thisblkno; blkno++) { - id = GetSlotId(thisrnode, 0, 0, GetBatchCount()); - AddSlotToPLSet(id); - } - } - } - } else { - GetSlotIds(record, ANY_WORKER, true); - } - - /* sync with trxn thread */ - if (info == XLOG_BTREE_REUSE_PAGE) { - /* only need to process in trx thread, pageworker only update lsn */ - DispatchSyncTxnRecord(record, expectedTLIs, recordXTime, TRXN_WORKER); - } else { - /* only need to process in pageworker thread, wait trxn sync */ - /* pageworker exe, trxn don't need exe */ - DispatchToSpecPageWorker(record, expectedTLIs, true); - } - } else { - DispatchRecordWithPages(record, expectedTLIs, true); - } - - return isNeedFullSync; -} - -/* Run from the dispatcher thread. */ -static bool DispatchUBTreeHotStandby(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) -{ - bool isNeedFullSync = false; - - /* index not support mvcc, so we need to sync with trx thread when the record is vacuum */ - if (IsUBTreeVacuum(record)) { - uint32 id; - uint8 info = (XLogRecGetInfo(record) & (~XLR_INFO_MASK)); - - if (info == XLOG_UBTREE_REUSE_PAGE) { - if (!(InHotStandby)) { - /* if not in hotstandby don't need to process */ - return isNeedFullSync; - } - - xl_btree_reuse_page *xlrec = (xl_btree_reuse_page *)XLogRecGetData(record); - RelFileNode tmp_node; - RelFileNodeCopy(tmp_node, xlrec->node, XLogRecGetBucketId(record)); - id = GetSlotId(tmp_node, 0, 0, GetBatchCount()); - AddSlotToPLSet(id); - } else if (info == XLOG_UBTREE_VACUUM) { - GetSlotIds(record, ANY_WORKER, true); - - if (HotStandbyActiveInReplay() && (g_instance.role == VSINGLENODE)) { - RelFileNode thisrnode; - BlockNumber thisblkno; - - bool getTagSuccess = XLogRecGetBlockTag(record, 0, &thisrnode, NULL, &thisblkno); - if (getTagSuccess) { - xl_btree_vacuum *xlrec = (xl_btree_vacuum *)XLogRecGetData(record); - /* for performance reserve */ - for (BlockNumber blkno = xlrec->lastBlockVacuumed + 1; blkno < thisblkno; blkno++) { - id = GetSlotId(thisrnode, 0, 0, GetBatchCount()); - AddSlotToPLSet(id); - } - } - } - } else { - GetSlotIds(record, ANY_WORKER, true); - } - - /* sync with trxn thread */ - if (info == XLOG_UBTREE_REUSE_PAGE) { - /* only need to process in trx thread, pageworker only update lsn */ - DispatchSyncTxnRecord(record, expectedTLIs, recordXTime, TRXN_WORKER); - } else { - /* only need to process in pageworker thread, wait trxn sync */ - /* pageworker exe, trxn don't need exe */ - DispatchToSpecPageWorker(record, expectedTLIs, true); - } - } else { - DispatchRecordWithPages(record, expectedTLIs, true); - } - - return isNeedFullSync; + DispatchTxnRecord(record, expectedTLIs); + return true; } static bool DispatchBtreeRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { - if (g_supportHotStandby) { - DispatchBtreeHotStandby(record, expectedTLIs, recordXTime); + uint8 info = (XLogRecGetInfo(record) & (~XLR_INFO_MASK)); + if (info == XLOG_BTREE_REUSE_PAGE) { + DispatchTxnRecord(record, expectedTLIs); } else { - uint8 info = (XLogRecGetInfo(record) & (~XLR_INFO_MASK)); - if (info == XLOG_BTREE_REUSE_PAGE) { - DispatchTxnRecord(record, expectedTLIs, recordXTime, false); - } else { - DispatchRecordWithPages(record, expectedTLIs, true); - } + DispatchRecordWithPages(record, expectedTLIs); } return false; @@ -1313,15 +1170,11 @@ static bool DispatchBtreeRecord(XLogReaderState *record, List *expectedTLIs, Tim static bool DispatchUBTreeRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { - if (g_supportHotStandby) { - DispatchUBTreeHotStandby(record, expectedTLIs, recordXTime); + uint8 info = (XLogRecGetInfo(record) & (~XLR_INFO_MASK)); + if (info == XLOG_UBTREE_REUSE_PAGE) { + DispatchTxnRecord(record, expectedTLIs); } else { - uint8 info = (XLogRecGetInfo(record) & (~XLR_INFO_MASK)); - if (info == XLOG_UBTREE_REUSE_PAGE) { - DispatchTxnRecord(record, expectedTLIs, recordXTime, false); - } else { - DispatchRecordWithPages(record, expectedTLIs, true); - } + DispatchRecordWithPages(record, expectedTLIs); } return false; @@ -1329,7 +1182,7 @@ static bool DispatchUBTreeRecord(XLogReaderState *record, List *expectedTLIs, Ti static bool DispatchUBTree2Record(XLogReaderState* record, List* expectedTLIs, TimestampTz recordXTime) { - DispatchRecordWithPages(record, expectedTLIs, true); + DispatchRecordWithPages(record, expectedTLIs); return false; } @@ -1350,13 +1203,13 @@ static bool DispatchGinRecord(XLogReaderState *record, List *expectedTLIs, Times /* index not support mvcc, so we need to sync with trx thread when the record is vacuum */ if (IsGinVacuumPages(record) && g_supportHotStandby) { - GetSlotIds(record, ANY_WORKER, true); + GetSlotIds(record); /* sync with trxn thread */ /* only need to process in pageworker thread, wait trxn sync */ /* pageworker exe, trxn don't need exe */ - DispatchToSpecPageWorker(record, expectedTLIs, true); + DispatchToSpecPageWorker(record, expectedTLIs); } else { - DispatchRecordWithPages(record, expectedTLIs, true); + DispatchRecordWithPages(record, expectedTLIs); } return false; @@ -1377,50 +1230,21 @@ static bool DispatchGistRecord(XLogReaderState *record, List *expectedTLIs, Time } } - /* index not support mvcc, so we need to sync with trx thread when the record is vacuum */ - if (IsGistPageUpdate(record) && g_supportHotStandby) { - GetSlotIds(record, ANY_WORKER, true); - /* sync with trx thread */ - /* only need to process in pageworker thread, wait trxn sync */ - /* pageworker exe, trxn don't need exe */ - DispatchToSpecPageWorker(record, expectedTLIs, true); - } else { - DispatchRecordWithPages(record, expectedTLIs, true); - } - + DispatchRecordWithPages(record, expectedTLIs); return false; } /* Run from the dispatcher thread. */ static bool DispatchSpgistRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { - /* index not support mvcc, so we need to sync with trx thread when the record is vacuum */ - if (IsSpgistVacuum(record) && g_supportHotStandby) { - uint8 info = (XLogRecGetInfo(record) & (~XLR_INFO_MASK)); - - GetSlotIds(record, ANY_WORKER, true); - /* sync with trx thread */ - if ((info == XLOG_SPGIST_VACUUM_REDIRECT) && (InHotStandby)) { - /* trxn thread first reslove confilict snapshot ,then do the page action */ - /* first pageworker update lsn, then trxn thread exe */ - DispatchSyncTxnRecord(record, expectedTLIs, recordXTime, TRXN_WORKER); - } else { - /* only need to process in pageworker thread, wait trxn sync */ - /* pageworker exe, trxn don't need exe */ - DispatchToSpecPageWorker(record, expectedTLIs, true); - } - - } else { - DispatchRecordWithPages(record, expectedTLIs, true); - } - + DispatchRecordWithPages(record, expectedTLIs); return false; } /** * dispatch record to a specified thread */ -static void DispatchToSpecPageWorker(XLogReaderState *record, List *expectedTLIs, bool waittrxnsync) +static void DispatchToSpecPageWorker(XLogReaderState *record, List *expectedTLIs) { RedoItem *item = GetRedoItemPtr(record); ReferenceRedoItem(item); @@ -1450,68 +1274,14 @@ static bool DispatchHeap2VacuumRecord(XLogReaderState *record, List *expectedTLI uint8 info = ((XLogRecGetInfo(record) & (~XLR_INFO_MASK)) & XLOG_HEAP_OPMASK); if (info == XLOG_HEAP2_CLEANUP_INFO) { - DispatchTxnRecord(record, expectedTLIs, recordXTime, false); + DispatchTxnRecord(record, expectedTLIs); } else { - DispatchRecordWithPages(record, expectedTLIs, SUPPORT_FPAGE_DISPATCH); + DispatchRecordWithPages(record, expectedTLIs); } return false; } -static bool DispatchHeap2VacuumHotStandby(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) -{ - /* - * base on mvcc , except vacumm action the record is always exist - * so many redo action can only execute in trx redo thread - * vacumm action must execute in full sync - */ - bool isNeedFullSync = false; - bool isSyncWithTrxn = false; - uint8 info = ((XLogRecGetInfo(record) & (~XLR_INFO_MASK)) & XLOG_HEAP_OPMASK); - - if (info == XLOG_HEAP2_CLEANUP_INFO) { - if (InHotStandby) { - /* for parallel redo performance */ - if (SUPPORT_FPAGE_DISPATCH) { - uint32 id; - xl_heap_cleanup_info *xlrec = (xl_heap_cleanup_info *)XLogRecGetData(record); - RelFileNode tmp_node; - RelFileNodeCopy(tmp_node, xlrec->node, XLogRecGetBucketId(record)); - id = GetSlotId(tmp_node, 0, 0, GetBatchCount()); - AddSlotToPLSet(id); - } else { - for (uint32 i = 0; i < g_dispatcher->pageLineNum; i++) - AddSlotToPLSet(i); - } - isSyncWithTrxn = true; - } else { - return false; - } - } else { - GetSlotIds(record, ANY_WORKER, SUPPORT_FPAGE_DISPATCH); - - if (info == XLOG_HEAP2_CLEAN) { - xl_heap_clean *xlrec = (xl_heap_clean *)XLogRecGetData(record); - if (InHotStandby && TransactionIdIsValid(xlrec->latestRemovedXid)) - isSyncWithTrxn = true; - } else { - isSyncWithTrxn = InHotStandby; - } - } - - if (isSyncWithTrxn) { - /* sync with trxn thread */ - /* trxn thread first reslove confilict snapshot ,then do the page action */ - /* first pageworker update lsn, then trxn thread exe */ - DispatchSyncTxnRecord(record, expectedTLIs, recordXTime, TRXN_WORKER); - } else { - /* pageworker exe, trxn don't need exe */ - DispatchToSpecPageWorker(record, expectedTLIs, false); - } - - return isNeedFullSync; -} - /* Run from the dispatcher thread. */ static bool DispatchHeap2Record(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { @@ -1519,40 +1289,33 @@ static bool DispatchHeap2Record(XLogReaderState *record, List *expectedTLIs, Tim uint8 info = ((XLogRecGetInfo(record) & (~XLR_INFO_MASK)) & XLOG_HEAP_OPMASK); - if ((info == XLOG_HEAP2_MULTI_INSERT) || (info == XLOG_HEAP2_PAGE_UPGRADE)) { - DispatchRecordWithPages(record, expectedTLIs, SUPPORT_FPAGE_DISPATCH); + if (info == XLOG_HEAP2_MULTI_INSERT) { + DispatchRecordWithPages(record, expectedTLIs); } else if (info == XLOG_HEAP2_BCM) { /* we use renode as dispatch key, so the same relation will dispath to the same page redo thread * although they have different fork num */ /* for parallel redo performance */ - if (SUPPORT_FPAGE_DISPATCH) { - xl_heap_bcm *xlrec = (xl_heap_bcm *)XLogRecGetData(record); - RelFileNode tmp_node; - RelFileNodeCopy(tmp_node, xlrec->node, XLogRecGetBucketId(record)); - DispatchToOnePageWorker(record, tmp_node, expectedTLIs); - } else { - DispatchRecordWithoutPage(record, expectedTLIs); - } + xl_heap_bcm *xlrec = (xl_heap_bcm *)XLogRecGetData(record); + RelFileNode tmp_node; + RelFileNodeCopy(tmp_node, xlrec->node, XLogRecGetBucketId(record)); + DispatchToOnePageWorker(record, tmp_node, expectedTLIs); + } else if (info == XLOG_HEAP2_LOGICAL_NEWPAGE) { if (IS_DN_MULTI_STANDYS_MODE()) { xl_heap_logical_newpage *xlrec = (xl_heap_logical_newpage *)XLogRecGetData(record); if (xlrec->type == COLUMN_STORE && xlrec->hasdata) { - /* for parallel redo performance */ - if (SUPPORT_FPAGE_DISPATCH) { - RelFileNode tmp_node; - RelFileNodeCopy(tmp_node, xlrec->node, XLogRecGetBucketId(record)); - DispatchToOnePageWorker(record, tmp_node, expectedTLIs); - } else - DispatchRecordWithoutPage(record, expectedTLIs); + RelFileNode tmp_node; + RelFileNodeCopy(tmp_node, xlrec->node, XLogRecGetBucketId(record)); + DispatchToOnePageWorker(record, tmp_node, expectedTLIs); } else { RedoItem *item = GetRedoItemPtr(record); #ifdef USE_ASSERT_CHECKING ereport(LOG, (errmsg("LOGICAL NEWPAGE %X/%X type:%u, hasdata:%u no need replay", (uint32)(record->EndRecPtr >> 32), (uint32)(record->EndRecPtr), (uint32)xlrec->type, (uint32)xlrec->hasdata))); - for (uint32 i = 0; i <= XLR_MAX_BLOCK_ID; ++i) { + for (int i = 0; i <= item->record.max_block_id; ++i) { if (item->record.blocks[i].in_use) { item->record.blocks[i].replayed = 1; } @@ -1563,13 +1326,13 @@ static bool DispatchHeap2Record(XLogReaderState *record, List *expectedTLIs, Tim } else { if (!g_instance.attr.attr_storage.enable_mix_replication) { isNeedFullSync = true; - DispatchTxnRecord(record, expectedTLIs, recordXTime, false, isNeedFullSync); + DispatchTxnRecord(record, expectedTLIs); } else { RedoItem *item = GetRedoItemPtr(record); #ifdef USE_ASSERT_CHECKING ereport(LOG, (errmsg("LOGICAL NEWPAGE %X/%X not multistandby,no need replay", (uint32)(record->EndRecPtr >> 32), (uint32)(record->EndRecPtr)))); - for (uint32 i = 0; i <= XLR_MAX_BLOCK_ID; ++i) { + for (int i = 0; i <= item->record.max_block_id; ++i) { if (item->record.blocks[i].in_use) { item->record.blocks[i].replayed = 1; } @@ -1579,40 +1342,21 @@ static bool DispatchHeap2Record(XLogReaderState *record, List *expectedTLIs, Tim } } } else { - if (g_supportHotStandby) - isNeedFullSync = DispatchHeap2VacuumHotStandby(record, expectedTLIs, recordXTime); - else - isNeedFullSync = DispatchHeap2VacuumRecord(record, expectedTLIs, recordXTime); + isNeedFullSync = DispatchHeap2VacuumRecord(record, expectedTLIs, recordXTime); } return isNeedFullSync; } /* Run from the dispatcher thread. */ -static void GetSlotIds(XLogReaderState *record, uint32 designatedWorker, bool rnodedispatch) +static void GetSlotIds(XLogReaderState *record) { - uint32 id; for (int i = 0; i <= record->max_block_id; i++) { DecodedBkpBlock *block = &record->blocks[i]; - if (block->in_use != true) { - /* blk number is not continue */ - continue; - } - - if (rnodedispatch) - id = GetSlotId(block->rnode, 0, 0, GetBatchCount()); - else - id = GetSlotId(block->rnode, block->blkno, 0, GetBatchCount()); - - AddSlotToPLSet(id); - } - - if ((designatedWorker != ANY_WORKER)) { - if (designatedWorker < GetBatchCount()) { - AddSlotToPLSet(designatedWorker); - } else { - /* output error info */ + if (block->in_use) { + uint32 id = GetSlotId(block->rnode, 0, 0, GetBatchCount()); + AddSlotToPLSet(id); } } } @@ -1662,7 +1406,7 @@ static void GetUndoSlotIds(XLogReaderState *record) return; } default: - elog(ERROR, "Invalid op in DispatchUHeapRecord"); + ereport(ERROR, (errmsg("Invalid op in DispatchUHeapRecord"))); } /* Get slot id for undo zone */ @@ -1689,45 +1433,6 @@ static void AddSlotToPLSet(uint32 id) ++(g_dispatcher->chosedPageLineIds[id]); } -/* Run from the dispatcher and each page worker thread. */ -bool XactWillRemoveRelFiles(XLogReaderState *record) -{ - /* - * Relation files under tablespace folders are removed only from - * applying transaction log record. - */ - int nrels = 0; - ColFileNodeRel *xnodes = NULL; - - if (XLogRecGetRmid(record) != RM_XACT_ID) { - return false; - } - - XactGetRelFiles(record, &xnodes, &nrels); - - return (nrels > 0); -} - -/* Run from the dispatcher thread. */ -static bool XLogWillChangeStandbyState(const XLogReaderState *record) -{ - /* - * If standbyState has reached SNAPSHOT_READY, it will not change - * anymore. Otherwise, it will change if the log record's redo - * function calls ProcArrayApplyRecoveryInfo(). - */ - if ((t_thrd.xlog_cxt.standbyState < STANDBY_INITIALIZED) || - (t_thrd.xlog_cxt.standbyState == STANDBY_SNAPSHOT_READY)) - return false; - - if ((XLogRecGetRmid(record) == RM_XLOG_ID) && - ((XLogRecGetInfo(record) & (~XLR_INFO_MASK)) == XLOG_CHECKPOINT_SHUTDOWN)) { - return true; - } - - return false; -} - /* Run from the dispatcher thread. */ static bool StandbyWillChangeStandbyState(const XLogReaderState *record) { @@ -1874,6 +1579,30 @@ void AllItemCheck() #endif +void ClearRecordInfo(XLogReaderState *xlogState) +{ + xlogState->decoded_record = NULL; + xlogState->main_data = NULL; + xlogState->main_data_len = 0; + + for (int i = 0; i <= xlogState->max_block_id; ++i) { + xlogState->blocks[i].data = NULL; + xlogState->blocks[i].data_len = 0; + xlogState->blocks[i].in_use = false; + xlogState->blocks[i].has_image = false; + xlogState->blocks[i].has_data = false; + xlogState->blocks[i].tdeinfo = NULL; +#ifdef USE_ASSERT_CHECKING + xlogState->blocks[i].replayed = 0; +#endif + } + xlogState->max_block_id = -1; + + xlogState->isDecode = false; + xlogState->isFullSync = false; + xlogState->refcount = 0; +} + /* Run from each page worker thread. */ void FreeRedoItem(RedoItem *item) { @@ -1883,15 +1612,26 @@ void FreeRedoItem(RedoItem *item) ItemLsnCheck(item); } #endif + CountXLogNumbers(&item->record); + ClearRecordInfo(&item->record); + pg_write_barrier(); RedoItem *oldHead = (RedoItem *)pg_atomic_read_uintptr((uintptr_t *)&g_dispatcher->freeHead); do { - item->freeNext = oldHead; + pg_atomic_write_uintptr((uintptr_t *)&item->freeNext, (uintptr_t)oldHead); } while (!pg_atomic_compare_exchange_uintptr((uintptr_t *)&g_dispatcher->freeHead, (uintptr_t *)&oldHead, (uintptr_t)item)); } void InitReaderStateByOld(XLogReaderState *newState, XLogReaderState *oldState, bool isNew) { + if (isNew) { + newState->read_page = oldState->read_page; + newState->system_identifier = oldState->system_identifier; + newState->private_data = oldState->private_data; + newState->errormsg_buf = oldState->errormsg_buf; + newState->isPRProcess = oldState->isPRProcess; + } + newState->ReadRecPtr = oldState->ReadRecPtr; newState->EndRecPtr = oldState->EndRecPtr; newState->readSegNo = oldState->readSegNo; @@ -1902,66 +1642,46 @@ void InitReaderStateByOld(XLogReaderState *newState, XLogReaderState *oldState, newState->latestPagePtr = oldState->latestPagePtr; newState->latestPageTLI = oldState->latestPageTLI; newState->currRecPtr = oldState->currRecPtr; - newState->readLen = oldState->readLen; newState->readBuf = oldState->readBuf; + newState->readLen = oldState->readLen; - if (isNew) { - newState->readRecordBuf = NULL; - newState->readRecordBufSize = 0; - newState->errormsg_buf = oldState->errormsg_buf; - newState->isPRProcess = oldState->isPRProcess; - newState->read_page = oldState->read_page; - newState->system_identifier = oldState->system_identifier; - newState->private_data = oldState->private_data; - } - + newState->decoded_record = NULL; newState->main_data = NULL; newState->main_data_len = 0; - newState->main_data_bufsz = 0; - for (int i = 0; i <= XLR_MAX_BLOCK_ID; i++) { - newState->blocks[i].data = NULL; - newState->blocks[i].data_len = 0; - newState->blocks[i].data_bufsz = 0; - newState->blocks[i].in_use = false; - newState->blocks[i].has_image = false; - newState->blocks[i].has_data = false; - newState->blocks[i].tdeinfo = NULL; -#ifdef USE_ASSERT_CHECKING - newState->blocks[i].replayed = 0; -#endif - } + newState->max_block_id = -1; - newState->refcount = 0; - newState->isDecode = false; - newState->isFullSync = false; + newState->readblocks = 0; + /* move block clear to FreeRedoItem because we used MCXT_ALLOC_ZERO to alloc buf, if the variable is not init to 0, + you should put it here. */ + } static XLogReaderState *GetXlogReader(XLogReaderState *readerState) { - XLogReaderState *retReaderState = NULL; + RedoItem *newItem = NULL; bool isNew = false; uint64 count = 0; do { if (g_dispatcher->freeStateHead != NULL) { - retReaderState = &g_dispatcher->freeStateHead->record; - g_dispatcher->freeStateHead = g_dispatcher->freeStateHead->freeNext; + newItem = g_dispatcher->freeStateHead; + g_dispatcher->freeStateHead = newItem->freeNext; + break; } else { RedoItem *head = (RedoItem *)pg_atomic_exchange_uintptr((uintptr_t *)&g_dispatcher->freeHead, - (uintptr_t)NULL); + (uintptr_t)NULL); if (head != NULL) { - retReaderState = &head->record; - g_dispatcher->freeStateHead = head->freeNext; + pg_read_barrier(); + newItem = head; + g_dispatcher->freeStateHead = newItem->freeNext; + break; } else if (g_dispatcher->maxItemNum > g_dispatcher->curItemNum) { - RedoItem *item = (RedoItem *)palloc_extended(MAXALIGN(sizeof(RedoItem)) + - sizeof(RedoItem *) * GetAllWorkerCount() + - sizeof(bool) * GetAllWorkerCount(), - MCXT_ALLOC_NO_OOM | MCXT_ALLOC_ZERO); - if (item != NULL) { - retReaderState = &item->record; - item->allocatedNext = g_dispatcher->allocatedRedoItem; - g_dispatcher->allocatedRedoItem = item; + newItem = (RedoItem *)palloc_extended(MAXALIGN(sizeof(RedoItem)), MCXT_ALLOC_NO_OOM | MCXT_ALLOC_ZERO); + if (newItem != NULL) { + newItem->allocatedNext = g_dispatcher->allocatedRedoItem; + g_dispatcher->allocatedRedoItem = newItem; isNew = true; ++(g_dispatcher->curItemNum); + break; } } @@ -1974,17 +1694,19 @@ static XLogReaderState *GetXlogReader(XLogReaderState *readerState) DumpDispatcher(); } } - if (retReaderState == NULL) { + if (newItem == NULL) { RedoInterruptCallBack(); } } - } while (retReaderState == NULL); + } while (newItem == NULL); - InitReaderStateByOld(retReaderState, readerState, isNew); + InitReaderStateByOld(&newItem->record, readerState, isNew); + newItem->freeNext = NULL; - return retReaderState; + return &newItem->record; } + void CopyDataFromOldReader(XLogReaderState *newReaderState, const XLogReaderState *oldReaderState) { errno_t rc = EOK; @@ -2029,7 +1751,7 @@ void CopyDataFromOldReader(XLogReaderState *newReaderState, const XLogReaderStat } } -XLogReaderState *NewReaderState(XLogReaderState *readerState, bool bCopyState) +XLogReaderState *NewReaderState(XLogReaderState *readerState) { Assert(readerState != NULL); if (!readerState->isPRProcess) @@ -2038,9 +1760,6 @@ XLogReaderState *NewReaderState(XLogReaderState *readerState, bool bCopyState) ereport(PANIC, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), errmsg("NewReaderState Dispatch is null"))); XLogReaderState *retReaderState = GetXlogReader(readerState); - if (bCopyState) { - CopyDataFromOldReader(retReaderState, readerState); - } return retReaderState; } @@ -2172,48 +1891,6 @@ static void **CollectStatesFromWorkers(GetStateFunc getStateFunc) return NULL; } -void DiagLogRedoRecord(XLogReaderState *record, const char *funcName) -{ - uint8 info; - RelFileNode oldRn = {0}; - RelFileNode newRn = {0}; - BlockNumber oldblk = InvalidBlockNumber; - BlockNumber newblk = InvalidBlockNumber; - bool newBlkExistFlg = false; - bool oldBlkExistFlg = false; - ForkNumber oldFk = InvalidForkNumber; - ForkNumber newFk = InvalidForkNumber; - StringInfoData buf; - - /* Support redo old version xlog during upgrade (Just the runningxact log with chekpoint online ) */ - uint32 rmid = redo_oldversion_xlog ? ((XLogRecordOld *)record->decoded_record)->xl_rmid : XLogRecGetRmid(record); - info = redo_oldversion_xlog ? ((((XLogRecordOld *)record->decoded_record)->xl_info) & ~XLR_INFO_MASK) - : (XLogRecGetInfo(record) & ~XLR_INFO_MASK); - - initStringInfo(&buf); - RmgrTable[rmid].rm_desc(&buf, record); - - if (XLogRecGetBlockTag(record, 0, &newRn, &newFk, &newblk)) { - newBlkExistFlg = true; - } - if (XLogRecGetBlockTag(record, 1, &oldRn, &oldFk, &oldblk)) { - oldBlkExistFlg = true; - } - ereport(DEBUG4, - (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("[REDO_LOG_TRACE]DiagLogRedoRecord: %s, ReadRecPtr:%lu,EndRecPtr:%lu," - "newBlkExistFlg:%u," - "newRn(spcNode:%u, dbNode:%u, relNode:%u),newFk:%d,newblk:%u," - "oldBlkExistFlg:%d," - "oldRn(spcNode:%u, dbNode:%u, relNode:%u),oldFk:%d,oldblk:%u," - "info:%u,redo_oldversion_xlog:%u, rm_name:%s, desc:%s," - "max_block_id:%d", - funcName, record->ReadRecPtr, record->EndRecPtr, newBlkExistFlg, newRn.spcNode, newRn.dbNode, - newRn.relNode, newFk, newblk, oldBlkExistFlg, oldRn.spcNode, oldRn.dbNode, oldRn.relNode, oldFk, - oldblk, info, redo_oldversion_xlog, RmgrTable[rmid].rm_name, buf.data, record->max_block_id))); - pfree_ext(buf.data); -} - XLogRecPtr GetSafeMinCheckPoint() { XLogRecPtr minSafeCheckPoint = MAX_XLOG_REC_PTR; @@ -2307,21 +1984,103 @@ void redo_get_wroker_statistic(uint32 *realNum, RedoWorkerStatsData *worker, uin SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.destroy_lock)); } -#ifndef ENABLE_MULTIPLE_NODES +void make_worker_static_info(RedoWorkerTimeCountsInfo *workerCountInfo, PageRedoWorker *redoWorker, + int piplineid, int id) +{ + const uint32 pipelineNumSize = 2; + const uint32 redoWorkerNumSize = 2; + const char *role_name = RedoWokerRole2Str(redoWorker->role); + uint32 allocSize = strlen(role_name) + pipelineNumSize + 1 + redoWorkerNumSize + 1; + workerCountInfo->worker_name = (char*)palloc0(allocSize); + if (id != invalid_worker_id) { + errno_t rc = sprintf_s(workerCountInfo->worker_name, allocSize, "%s%02d%02d", role_name, piplineid, id); + securec_check_ss(rc, "\0", "\0"); + } else { + errno_t rc = sprintf_s(workerCountInfo->worker_name, allocSize, "%s%02d", role_name, piplineid); + securec_check_ss(rc, "\0", "\0"); + } + workerCountInfo->time_cost = redoWorker->timeCostList; +} + +void redo_get_wroker_time_count(RedoWorkerTimeCountsInfo **workerCountInfoList, uint32 *realNum) +{ + SpinLockAcquire(&(g_instance.comm_cxt.predo_cxt.rwlock)); + knl_parallel_redo_state state = g_instance.comm_cxt.predo_cxt.state; + SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.rwlock)); + + if (state != REDO_IN_PROGRESS) { + *realNum = 0; + return; + } + + PageRedoWorker *redoWorker = NULL; + + SpinLockAcquire(&(g_instance.comm_cxt.predo_cxt.destroy_lock)); + if (g_dispatcher == NULL) { + SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.destroy_lock)); + *realNum = 0; + return; + } + *realNum = g_dispatcher->allWorkersCnt + 1; + RedoWorkerTimeCountsInfo *workerList = + (RedoWorkerTimeCountsInfo *)palloc0((*realNum) * sizeof(RedoWorkerTimeCountsInfo)); + errno_t rc; + uint32 cur_pos = 0; + uint32 allocSize; + for (int i = 0; i < (int)g_dispatcher->pageLineNum; ++i) { + redoWorker = (g_dispatcher->pageLines[i].batchThd); + make_worker_static_info(&workerList[cur_pos++], redoWorker, i, invalid_worker_id); + + redoWorker = (g_dispatcher->pageLines[i].managerThd); + make_worker_static_info(&workerList[cur_pos++], redoWorker, i, invalid_worker_id); + + for (int j = 0; j < (int)g_dispatcher->pageLines[i].redoThdNum; ++j) { + redoWorker = (g_dispatcher->pageLines[i].redoThd[j]); + make_worker_static_info(&workerList[cur_pos++], redoWorker, i, j); + } + } + + make_worker_static_info(&workerList[cur_pos++], g_dispatcher->trxnLine.managerThd, 0, invalid_worker_id); + make_worker_static_info(&workerList[cur_pos++], g_dispatcher->trxnLine.redoThd, 0, invalid_worker_id); + make_worker_static_info(&workerList[cur_pos++], g_dispatcher->readLine.readPageThd, 0, invalid_worker_id); + make_worker_static_info(&workerList[cur_pos++], g_dispatcher->readLine.readThd, 0, invalid_worker_id); + make_worker_static_info(&workerList[cur_pos++], g_dispatcher->readLine.managerThd, 0, invalid_worker_id); + + const char *startupName = "startup"; + allocSize = strlen(startupName) + 1; + workerList[cur_pos].worker_name = (char*)palloc0(allocSize); + rc = sprintf_s(workerList[cur_pos].worker_name, allocSize, "%s", startupName); + securec_check_ss(rc, "\0", "\0"); + workerList[cur_pos++].time_cost = g_dispatcher->startupTimeCost; + SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.destroy_lock)); + *workerCountInfoList = workerList; + Assert(cur_pos == *realNum); +} void CheckCommittingCsnList() { +#ifndef ENABLE_MULTIPLE_NODES for (uint32 i = 0; i < g_dispatcher->allWorkersCnt; ++i) { CleanUpMakeCommitAbort(reinterpret_cast(g_dispatcher->allWorkers[i]->committingCsnList)); g_dispatcher->allWorkers[i]->committingCsnList = NULL; } -} +#else + TransactionId clean_xid = InvalidTransactionId; + if (!IS_PGXC_COORDINATOR && t_thrd.proc->workingVersionNum >= DISASTER_READ_VERSION_NUM) { + if (log_min_messages <= DEBUG4) { + ereport(LOG, (errmsg("CheckCommittingCsnList: insert clean xlog"))); + } + XLogBeginInsert(); + XLogRegisterData((char*)(&clean_xid), sizeof(TransactionId)); + XLogInsert(RM_STANDBY_ID, XLOG_STANDBY_CSN_ABORTED); + } #endif +} /* uheap dispatch functions */ static bool DispatchUHeapRecord(XLogReaderState* record, List* expectedTLIs, TimestampTz recordXTime) { - GetSlotIds(record, ANY_WORKER, SUPPORT_FPAGE_DISPATCH); + GetSlotIds(record); GetUndoSlotIds(record); RedoItem *item = GetRedoItemPtr(record); @@ -2340,7 +2099,7 @@ static bool DispatchUHeapRecord(XLogReaderState* record, List* expectedTLIs, Tim static bool DispatchUHeap2Record(XLogReaderState* record, List* expectedTLIs, TimestampTz recordXTime) { - GetSlotIds(record, ANY_WORKER, SUPPORT_FPAGE_DISPATCH); + GetSlotIds(record); RedoItem *item = GetRedoItemPtr(record); ReferenceRedoItem(item); @@ -2441,7 +2200,7 @@ static bool DispatchUndoActionRecord(XLogReaderState* record, List* expectedTLIs } } - GetSlotIds(record, ANY_WORKER, SUPPORT_FPAGE_DISPATCH); + GetSlotIds(record); RedoItem *item = GetRedoItemPtr(record); ReferenceRedoItem(item); diff --git a/src/gausskernel/storage/access/transam/extreme_rto/page_redo.cpp b/src/gausskernel/storage/access/transam/extreme_rto/page_redo.cpp index e9d0839b5..35e07f5c0 100755 --- a/src/gausskernel/storage/access/transam/extreme_rto/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/extreme_rto/page_redo.cpp @@ -1,2650 +1,2989 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * page_redo.cpp - * PageRedoWorker is a thread of execution that replays data page logs. - * It provides a synchronization mechanism for replaying logs touching - * multiple pages. - * - * In the current implementation, logs modifying the same page must - * always be replayed by the same worker. There is no mechanism for - * an idle worker to "steal" work from a busy worker. - * - * IDENTIFICATION - * src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp - * - * ------------------------------------------------------------------------- - */ - -#include -#include - -#include "postgres.h" -#include "knl/knl_variable.h" -#include "gs_thread.h" -#include "miscadmin.h" -#include "access/xact.h" -#include "access/xlog.h" -#include "access/xlog_internal.h" -#include "access/xlogutils.h" -#include "access/xlogproc.h" -#include "access/nbtree.h" -#include "catalog/storage_xlog.h" -#include "gssignal/gs_signal.h" -#include "libpq/pqsignal.h" -#include "postmaster/postmaster.h" -#include "storage/ipc.h" -#include "storage/freespace.h" -#include "storage/smgr/smgr.h" -#include "storage/standby.h" -#include "storage/pmsignal.h" -#include "utils/guc.h" -#include "utils/palloc.h" -#include "portability/instr_time.h" -#include "postmaster/startup.h" - -#include "catalog/storage.h" -#include -#include -#include "commands/dbcommands.h" -#include "commands/tablespace.h" -#include "access/extreme_rto/page_redo.h" -#include "access/extreme_rto/dispatcher.h" -#include "access/extreme_rto/txn_redo.h" -#include "pgstat.h" -#include "access/extreme_rto/batch_redo.h" -#include "access/multi_redo_api.h" -#include "replication/walreceiver.h" -#include "replication/datareceiver.h" -#ifdef ENABLE_MOT -#include "storage/mot/mot_fdw.h" -#endif - -#ifdef EXTREME_RTO_DEBUG -#include -#include - -#include - -#include - -#endif - -namespace extreme_rto { -static const int MAX_PARSE_BUFF_NUM = PAGE_WORK_QUEUE_SIZE * 10 * 3; -static const int MAX_LOCAL_BUFF_NUM = PAGE_WORK_QUEUE_SIZE * 10 * 3; - -static const char *const PROCESS_TYPE_CMD_ARG = "--forkpageredo"; -static char g_AUXILIARY_TYPE_CMD_ARG[16] = {0}; - -THR_LOCAL PageRedoWorker *g_redoWorker = NULL; -THR_LOCAL RecordBufferState *g_recordbuffer = NULL; -RedoItem g_redoEndMark = { false, false, false, false, 0 }; -static RedoItem g_terminateMark = { false, false, false, false, 0 }; -RedoItem g_GlobalLsnForwarder; -RedoItem g_cleanupMark; - -static const int PAGE_REDO_WORKER_ARG = 3; -static const int REDO_SLEEP_50US = 50; -static const int REDO_SLEEP_100US = 100; - -static void ApplySinglePageRecord(RedoItem *); -static void InitGlobals(); -static void LastMarkReached(); -static void SetupSignalHandlers(); -static void SigHupHandler(SIGNAL_ARGS); -static ThreadId StartWorkerThread(PageRedoWorker *); - -void RedoThrdWaitForExit(const PageRedoWorker *wk); -void AddRefRecord(void *rec); -void SubRefRecord(void *rec); -void GlobalLsnUpdate(); -static void TrxnMangerQueueCallBack(); -#ifdef USE_ASSERT_CHECKING -void RecordBlockCheck(void *rec, XLogRecPtr curPageLsn, uint32 blockId, bool replayed); -#endif - -RefOperate recordRefOperate = { - AddRefRecord, - SubRefRecord, -#ifdef USE_ASSERT_CHECKING - RecordBlockCheck, -#endif -}; - -void UpdateRecordGlobals(RedoItem *item, HotStandbyState standbyState) -{ - t_thrd.xlog_cxt.ReadRecPtr = item->record.ReadRecPtr; - t_thrd.xlog_cxt.EndRecPtr = item->record.EndRecPtr; - t_thrd.xlog_cxt.expectedTLIs = item->expectedTLIs; - /* apply recoveryinfo will change standbystate see UpdateRecordGlobals */ - t_thrd.xlog_cxt.standbyState = standbyState; - t_thrd.xlog_cxt.XLogReceiptTime = item->syncXLogReceiptTime; - t_thrd.xlog_cxt.XLogReceiptSource = item->syncXLogReceiptSource; - u_sess->utils_cxt.RecentXmin = item->RecentXmin; - t_thrd.xlog_cxt.server_mode = item->syncServerMode; -} - -/* Run from the dispatcher thread. */ -PageRedoWorker *StartPageRedoWorker(PageRedoWorker *worker) -{ - Assert(worker); - uint32 id = worker->id; - ThreadId threadId = StartWorkerThread(worker); - if (threadId == 0) { - ereport(WARNING, (errmsg("Cannot create page-redo-worker thread: %u, %m.", id))); - DestroyPageRedoWorker(worker); - return NULL; - } else { - ereport(LOG, (errmsg("StartPageRedoWorker successfully create page-redo-worker id: %u, threadId:%lu.", id, - worker->tid.thid))); - } - g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[id].threadId = threadId; - SpinLockAcquire(&(g_instance.comm_cxt.predo_cxt.rwlock)); - uint32 state = pg_atomic_read_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[id].threadState)); - if (state != PAGE_REDO_WORKER_READY) { - g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[id].threadState = PAGE_REDO_WORKER_START; - } - SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.rwlock)); - return worker; -} - -void RedoWorkerQueueCallBack() -{ - RedoInterruptCallBack(); -} - -bool RedoWorkerIsUndoSpaceWorker() -{ - return g_redoWorker->isUndoSpaceWorker; -} - -/* Run from the dispatcher thread. */ -PageRedoWorker *CreateWorker(uint32 id) -{ - PageRedoWorker *tmp = (PageRedoWorker *)palloc0(sizeof(PageRedoWorker) + EXTREME_RTO_ALIGN_LEN); - PageRedoWorker *worker; - worker = (PageRedoWorker *)TYPEALIGN(EXTREME_RTO_ALIGN_LEN, tmp); - worker->selfOrinAddr = tmp; - worker->id = id; - worker->index = 0; - worker->tid.thid = InvalidTid; - worker->proc = NULL; - worker->initialServerMode = (ServerMode)t_thrd.xlog_cxt.server_mode; - worker->initialTimeLineID = t_thrd.xlog_cxt.ThisTimeLineID; - worker->expectedTLIs = t_thrd.xlog_cxt.expectedTLIs; - worker->recoveryTargetTLI = t_thrd.xlog_cxt.recoveryTargetTLI; - worker->recoveryRestoreCommand = t_thrd.xlog_cxt.recoveryRestoreCommand; - worker->ArchiveRecoveryRequested = t_thrd.xlog_cxt.ArchiveRecoveryRequested; - worker->StandbyModeRequested = t_thrd.xlog_cxt.StandbyModeRequested; - worker->InArchiveRecovery = t_thrd.xlog_cxt.InArchiveRecovery; - worker->InRecovery = t_thrd.xlog_cxt.InRecovery; - worker->ArchiveRestoreRequested = t_thrd.xlog_cxt.ArchiveRestoreRequested; - worker->minRecoveryPoint = t_thrd.xlog_cxt.minRecoveryPoint; - - worker->pendingHead = NULL; - worker->pendingTail = NULL; - worker->queue = SPSCBlockingQueueCreate(PAGE_WORK_QUEUE_SIZE, RedoWorkerQueueCallBack); - worker->lastCheckedRestartPoint = InvalidXLogRecPtr; - worker->lastReplayedEndRecPtr = InvalidXLogRecPtr; - worker->standbyState = (HotStandbyState)t_thrd.xlog_cxt.standbyState; - worker->StandbyMode = t_thrd.xlog_cxt.StandbyMode; - worker->latestObservedXid = t_thrd.storage_cxt.latestObservedXid; - worker->DataDir = t_thrd.proc_cxt.DataDir; - worker->RecentXmin = u_sess->utils_cxt.RecentXmin; - worker->xlogInvalidPages = NULL; - PosixSemaphoreInit(&worker->phaseMarker, 0); - worker->oldCtx = NULL; - worker->fullSyncFlag = 0; -#if (!defined __x86_64__) && (!defined __aarch64__) - SpinLockInit(&worker->ptrLck); -#endif - worker->parseManager.memctl.isInit = false; - worker->parseManager.parsebuffers = NULL; - return worker; -} - -/* Run from the dispatcher thread. */ -static ThreadId StartWorkerThread(PageRedoWorker *worker) -{ - worker->tid.thid = initialize_util_thread(PAGEREDO, worker); - return worker->tid.thid; -} - -/* Run from the dispatcher thread. */ -void DestroyPageRedoWorker(PageRedoWorker *worker) -{ - PosixSemaphoreDestroy(&worker->phaseMarker); - SPSCBlockingQueueDestroy(worker->queue); - XLogRedoBufferDestoryFunc(&(worker->bufferManager)); - XLogParseBufferDestoryFunc(&(worker->parseManager)); - pfree(worker->selfOrinAddr); -} - -/* automic write for lastReplayedReadRecPtr and lastReplayedEndRecPtr */ -void SetCompletedReadEndPtr(PageRedoWorker *worker, XLogRecPtr readPtr, XLogRecPtr endPtr) -{ - volatile PageRedoWorker *tmpWk = worker; -#if defined(__x86_64__) || defined(__aarch64__) - uint128_u exchange; - uint128_u current; - uint128_u compare = atomic_compare_and_swap_u128((uint128_u *)&tmpWk->lastReplayedReadRecPtr); - - Assert(sizeof(tmpWk->lastReplayedReadRecPtr) == 8); - Assert(sizeof(tmpWk->lastReplayedEndRecPtr) == 8); - - exchange.u64[0] = (uint64)readPtr; - exchange.u64[1] = (uint64)endPtr; - -loop: - current = atomic_compare_and_swap_u128((uint128_u *)&tmpWk->lastReplayedReadRecPtr, compare, exchange); - if (!UINT128_IS_EQUAL(compare, current)) { - UINT128_COPY(compare, current); - goto loop; - } -#else - SpinLockAcquire(&tmpWk->ptrLck); - tmpWk->lastReplayedReadRecPtr = readPtr; - tmpWk->lastReplayedEndRecPtr = endPtr; - SpinLockRelease(&tmpWk->ptrLck); -#endif /* __x86_64__ || __aarch64__ */ -} - -/* automic write for lastReplayedReadRecPtr and lastReplayedEndRecPtr */ -void GetCompletedReadEndPtr(PageRedoWorker *worker, XLogRecPtr *readPtr, XLogRecPtr *endPtr) -{ - volatile PageRedoWorker *tmpWk = worker; -#if defined(__x86_64__) || defined(__aarch64__) - uint128_u compare = atomic_compare_and_swap_u128((uint128_u *)&tmpWk->lastReplayedReadRecPtr); - Assert(sizeof(tmpWk->lastReplayedReadRecPtr) == 8); - Assert(sizeof(tmpWk->lastReplayedEndRecPtr) == 8); - - *readPtr = (XLogRecPtr)compare.u64[0]; - *endPtr = (XLogRecPtr)compare.u64[1]; -#else - SpinLockAcquire(&tmpWk->ptrLck); - *readPtr = tmpWk->lastReplayedReadRecPtr; - *endPtr = tmpWk->lastReplayedEndRecPtr; - SpinLockRelease(&tmpWk->ptrLck); -#endif /* __x86_64__ || __aarch64__ */ -} - -/* Run from both the dispatcher and the worker thread. */ -bool IsPageRedoWorkerProcess(int argc, char *argv[]) -{ - return strcmp(argv[1], PROCESS_TYPE_CMD_ARG) == 0; -} - -/* Run from the worker thread. */ -void AdaptArgvForPageRedoWorker(char *argv[]) -{ - if (g_AUXILIARY_TYPE_CMD_ARG[0] == 0) - sprintf_s(g_AUXILIARY_TYPE_CMD_ARG, sizeof(g_AUXILIARY_TYPE_CMD_ARG), "-x%d", PageRedoProcess); - argv[3] = g_AUXILIARY_TYPE_CMD_ARG; -} - -/* Run from the worker thread. */ -void GetThreadNameIfPageRedoWorker(int argc, char *argv[], char **threadNamePtr) -{ - if (*threadNamePtr == NULL && IsPageRedoWorkerProcess(argc, argv)) - *threadNamePtr = "PageRedoWorker"; -} - -/* Run from the worker thread. */ -uint32 GetMyPageRedoWorkerIdWithLock() -{ - bool isWorkerStarting = false; - SpinLockAcquire(&(g_instance.comm_cxt.predo_cxt.rwlock)); - isWorkerStarting = ((g_instance.comm_cxt.predo_cxt.state == REDO_STARTING_BEGIN) ? true : false); - SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.rwlock)); - if (!isWorkerStarting) { - ereport(WARNING, (errmsg("GetMyPageRedoWorkerIdWithLock Page-redo-worker exit."))); - proc_exit(0); - } - - return g_redoWorker->id; -} - -/* Run from any worker thread. */ -PGPROC *GetPageRedoWorkerProc(PageRedoWorker *worker) -{ - return worker->proc; -} - -void HandlePageRedoInterrupts() -{ - if (t_thrd.page_redo_cxt.got_SIGHUP) { - t_thrd.page_redo_cxt.got_SIGHUP = false; - ProcessConfigFile(PGC_SIGHUP); - } - - if (t_thrd.page_redo_cxt.shutdown_requested) { - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("page worker id %u exit for request", g_redoWorker->id))); - - pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[g_redoWorker->id].threadState), - PAGE_REDO_WORKER_EXIT); - - proc_exit(1); - } -} - -void ReferenceRedoItem(void *item) -{ - RedoItem *redoItem = (RedoItem *)item; - AddRefRecord(&redoItem->record); -} - -void DereferenceRedoItem(void *item) -{ - RedoItem *redoItem = (RedoItem *)item; - SubRefRecord(&redoItem->record); -} - -#define STRUCT_CONTAINER(type, membername, ptr) ((type *)((char *)(ptr)-offsetof(type, membername))) - -#ifdef USE_ASSERT_CHECKING -void RecordBlockCheck(void *rec, XLogRecPtr curPageLsn, uint32 blockId, bool replayed) -{ - XLogReaderState *record = (XLogReaderState *)rec; - if (record->blocks[blockId].forknum != MAIN_FORKNUM) { - return; - } - - if (replayed) { - uint32 rmid = XLogRecGetRmid(record); - uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; - - if (curPageLsn == InvalidXLogRecPtr && (rmid == RM_HEAP2_ID || rmid == RM_HEAP_ID || rmid == RM_HEAP3_ID)) { - uint32 shiftSize = 32; - ereport(LOG, (errmsg("pass checked, record lsn:%X/%X, type: %u %u", - static_cast(record->EndRecPtr >> shiftSize), static_cast(record->EndRecPtr), - record->decoded_record->xl_rmid, record->decoded_record->xl_info))); - } else if (!(rmid == RM_HEAP2_ID && info == XLOG_HEAP2_VISIBLE) && - !(rmid == RM_HEAP_ID && info == XLOG_HEAP_NEWPAGE)) { - Assert(XLByteLE(record->EndRecPtr, curPageLsn)); - } - } - - Assert(blockId < (XLR_MAX_BLOCK_ID + 1)); - record->blocks[blockId].replayed = 1; -} - -#endif - -void AddRefRecord(void *rec) -{ -#ifndef EXTREME_RTO_DEBUG - (void)pg_atomic_fetch_add_u32(&((XLogReaderState *)rec)->refcount, 1); -#else - uint32 relCount = pg_atomic_fetch_add_u32(&((XLogReaderState *)rec)->refcount, 1); - - const int stack_size = 5; - const int max_out_put_buf = 4096; - void *buffer[stack_size]; - int nptrs; - char output[max_out_put_buf]; - char **strings; - nptrs = backtrace(buffer, stack_size); - strings = backtrace_symbols(buffer, nptrs); - - int ret = sprintf_s(output, sizeof(output), "before add relcount %u lsn %X/%X call back trace: \n", relCount, - (uint32)(((XLogReaderState *)rec)->EndRecPtr >> 32), - (uint32)(((XLogReaderState *)rec)->EndRecPtr)); - securec_check_ss_c(ret, "\0", "\0"); - for (int i = 0; i < nptrs; ++i) { - ret = strcat_s(output, max_out_put_buf - strlen(output), strings[i]); - securec_check_ss_c(ret, "\0", "\0"); - ret = strcat_s(output, max_out_put_buf - strlen(output), "\n"); - securec_check_ss_c(ret, "\0", "\0"); - } - - free(strings); - ereport(LOG, (errcode(ERRCODE_DATA_CORRUPTED), errmsg(" AddRefRecord print: %s", output))); - -#endif -} - -void SubRefRecord(void *rec) -{ - Assert(((XLogReaderState *)rec)->refcount != 0); - uint32 relCount = pg_atomic_sub_fetch_u32(&((XLogReaderState *)rec)->refcount, 1); -#ifdef EXTREME_RTO_DEBUG - const int stack_size = 5; - const int max_out_put_buf = 4096; - void *buffer[stack_size]; - int nptrs; - char output[max_out_put_buf]; - char **strings; - nptrs = backtrace(buffer, stack_size); - strings = backtrace_symbols(buffer, nptrs); - - int ret = sprintf_s(output, sizeof(output), "after sub relcount %u lsn %X/%X call back trace:\n", relCount, - (uint32)(((XLogReaderState *)rec)->EndRecPtr >> 32), - (uint32)(((XLogReaderState *)rec)->EndRecPtr)); - securec_check_ss_c(ret, "\0", "\0"); - for (int i = 0; i < nptrs; ++i) { - ret = strcat_s(output, max_out_put_buf - strlen(output), strings[i]); - securec_check_ss_c(ret, "\0", "\0"); - ret = strcat_s(output, max_out_put_buf - strlen(output), "\n"); - securec_check_ss_c(ret, "\0", "\0"); - } - free(strings); - ereport(LOG, (errcode(ERRCODE_DATA_CORRUPTED), errmsg(" SubRefRecord print: %s", output))); - -#endif - - if (relCount == 0) { - RedoItem *item = STRUCT_CONTAINER(RedoItem, record, rec); - FreeRedoItem(item); - } -} - -bool BatchRedoParseItemAndDispatch(RedoItem *item) -{ - uint32 blockNum = 0; - XLogRecParseState *recordblockstate = XLogParseToBlockForExtermeRTO(&item->record, &blockNum); - if (recordblockstate == NULL) { - if (blockNum == 0) { - return false; - } - return true; /* out of mem */ - } - - PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; - AddPageRedoItem(myRedoLine->managerThd, recordblockstate); - return false; -} - -void BatchRedoDistributeEndMark(void) -{ - PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; - SendPageRedoEndMark(myRedoLine->managerThd); -} - -void BatchRedoProcLsnForwarder(RedoItem *lsnForwarder) -{ - SetCompletedReadEndPtr(g_redoWorker, lsnForwarder->record.ReadRecPtr, lsnForwarder->record.EndRecPtr); - (void)pg_atomic_sub_fetch_u32(&lsnForwarder->record.refcount, 1); - - PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; - AddPageRedoItem(myRedoLine->managerThd, lsnForwarder); -} - -void BatchRedoProcCleanupMark(RedoItem *cleanupMark) -{ - PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; - g_redoWorker->xlogInvalidPages = XLogGetInvalidPages(); - AddPageRedoItem(myRedoLine->managerThd, cleanupMark); - ereport(LOG, (errcode(ERRCODE_LOG), errmsg("[ForceFinish]BatchRedoProcCleanupMark has cleaned InvalidPages"))); -} - -static void WaitRedoPageManagerQueueEmpty() -{ - PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; - - while (!SPSCBlockingQueueIsEmpty(myRedoLine->managerThd->queue)) { - HandlePageRedoInterrupts(); - } -} - -void WaitAllRedoWorkerQueueEmpty() -{ - PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; - const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; - - for (uint32 i = 0; i < WorkerNumPerMng; ++i) { - while (!SPSCBlockingQueueIsEmpty(myRedoLine->redoThd[i]->queue)) { - HandlePageRedoInterrupts(); - } - } -} - -bool IsAllBatchThrdSyncInfo(uint8 info) -{ - return ((info == XLOG_SEG_ATOMIC_OPERATION) || (info == XLOG_SEG_CREATE_EXTENT_GROUP) || - (info == XLOG_SEG_INIT_INVRSPTR_PAGE) || (info == XLOG_SEG_INIT_MAPPAGE) || - (info == XLOG_SEG_ADD_NEW_GROUP) || (info == XLOG_SEG_SPACE_SHRINK) || - (info == XLOG_SEG_SPACE_DROP) || (info == XLOG_SEG_NEW_PAGE)); -} - -bool IsAllBatchThrdSyncReoItem(RedoItem *redo_item) -{ - XLogReaderState* record = &redo_item->record; - RmgrId rmgr_id = XLogRecGetRmid(record); - uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; - return (rmgr_id == RM_SEGPAGE_ID && IsAllBatchThrdSyncInfo(info)); -} - -#ifdef USE_ASSERT_CHECKING -static void SetRedoItemReplayed(RedoItem *item) -{ - for (uint32 i = 0; i <= XLR_MAX_BLOCK_ID; ++i) { - if (item->record.blocks[i].in_use) { - if (item->record.blocks[i].forknum == MAIN_FORKNUM) { - item->record.blocks[i].replayed = 1; - } - } else { - Assert((item->record.blocks[i].replayed == 0)); - } - } -} -#endif - -static void DispatchSyncItem(RedoItem *redo_item) -{ - XLogReaderState* record = &redo_item->record; - RmgrId rmgr_id = XLogRecGetRmid(record); - uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; - - /* sync_all means: it should dispatch parse items to pageworkers to do some clean works, as forget invalid page */ - bool sync_all = (rmgr_id == RM_SEGPAGE_ID && (info == XLOG_SEG_SPACE_DROP || info == XLOG_SEG_SPACE_SHRINK)); - if (sync_all) { - bool parsecomplete = false; - do { - parsecomplete = BatchRedoParseItemAndDispatch(redo_item); - RedoInterruptCallBack(); - } while (parsecomplete); - } -} - -/* - * The redo item is done by one parser, other parsers may need do some clean works for themselves. - */ -static void ParseredoSharedSyncItem(RedoItem *redo_item) -{ - XLogReaderState* record = &redo_item->record; - RmgrId rmgr_id = XLogRecGetRmid(record); - uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; - - if (rmgr_id == RM_SEGPAGE_ID) { - if (info == XLOG_SEG_SPACE_DROP) { - char *data = (char *)XLogRecGetData(record); - Oid spcNode = *(Oid *)data; - Oid dbNode = *(Oid *)(data + sizeof(Oid)); - XLogDropSegmentSpace(spcNode, dbNode); - } else if (info == XLOG_SEG_SPACE_SHRINK) { - XLogDataSpaceShrink *xlog_data = (XLogDataSpaceShrink *)XLogRecGetData(record); - - XLogTruncateRelation(xlog_data->rnode, xlog_data->forknum, xlog_data->target_size); - XLogTruncateSegmentSpace(xlog_data->rnode, xlog_data->forknum, xlog_data->target_size); - } - } -} - -static void BatchRedoAllSyncItem(RedoItem *redo_item) -{ - DispatchSyncItem(redo_item); - - /* Ensure page manager queue is empty */ - WaitRedoPageManagerQueueEmpty(); - WaitAllRedoWorkerQueueEmpty(); - - uint32 expected = 0; - const uint32 pipelineNum = g_dispatcher->pageLineNum; - pg_atomic_compare_exchange_u32(&g_dispatcher->batchThrdEnterNum, &expected, pipelineNum); - uint32 enterRelCnt = pg_atomic_sub_fetch_u32(&g_dispatcher->batchThrdEnterNum, 1); - if (enterRelCnt == 0) { - MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); - extreme_rto::ApplyRedoRecord(&redo_item->record, t_thrd.xlog_cxt.redo_oldversion_xlog); - (void)MemoryContextSwitchTo(oldCtx); -#ifdef USE_ASSERT_CHECKING - SetRedoItemReplayed(redo_item); -#endif - } else { - ParseredoSharedSyncItem(redo_item); - do { - RedoInterruptCallBack(); - enterRelCnt = pg_atomic_read_u32(&g_dispatcher->batchThrdEnterNum); - } while (enterRelCnt != 0); - } - - expected = 0; - pg_atomic_compare_exchange_u32(&g_dispatcher->batchThrdExitNum, &expected, pipelineNum); - uint32 exitRelCnt = pg_atomic_sub_fetch_u32(&g_dispatcher->batchThrdExitNum, 1); - while (exitRelCnt != 0) { - RedoInterruptCallBack(); - exitRelCnt = pg_atomic_read_u32(&g_dispatcher->batchThrdExitNum); - } -} - -bool IsBatchThrdProcessRedoItem(RedoItem *redo_item) -{ - XLogReaderState* record = &redo_item->record; - RmgrId rmgr_id = XLogRecGetRmid(record); - uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; - return (rmgr_id == RM_SEGPAGE_ID && (info == XLOG_SEG_SEGMENT_EXTEND)); -} - -bool BatchRedoDistributeItems(void **eleArry, uint32 eleNum) -{ - bool parsecomplete = false; - for (uint32 i = 0; i < eleNum; i++) { - if (eleArry[i] == (void *)&g_redoEndMark) { - return true; - } else if (eleArry[i] == (void *)&g_GlobalLsnForwarder) { - BatchRedoProcLsnForwarder((RedoItem *)eleArry[i]); - } else if (eleArry[i] == (void *)&g_cleanupMark) { - BatchRedoProcCleanupMark((RedoItem *)eleArry[i]); - } else { - RedoItem *item = (RedoItem *)eleArry[i]; - UpdateRecordGlobals(item, g_redoWorker->standbyState); - - if (IsBatchThrdProcessRedoItem(item)) { - /* Ensure page manager queue is empty */ - WaitRedoPageManagerQueueEmpty(); - - WaitAllRedoWorkerQueueEmpty(); - - MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); - extreme_rto::ApplyRedoRecord(&item->record, t_thrd.xlog_cxt.redo_oldversion_xlog); - (void)MemoryContextSwitchTo(oldCtx); -#ifdef USE_ASSERT_CHECKING - SetRedoItemReplayed(item); -#endif - } else if (IsAllBatchThrdSyncReoItem(item)) { - BatchRedoAllSyncItem(item); - } else { - do { - parsecomplete = BatchRedoParseItemAndDispatch(item); - RedoInterruptCallBack(); - } while (parsecomplete); - } - - DereferenceRedoItem(item); - } - } - - return false; -} - -void BatchRedoMain() -{ - void **eleArry; - uint32 eleNum; - - (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); - XLogParseBufferInitFunc(&(g_redoWorker->parseManager), MAX_PARSE_BUFF_NUM, &recordRefOperate, - RedoInterruptCallBack); - - while (SPSCBlockingQueueGetAll(g_redoWorker->queue, &eleArry, &eleNum)) { - bool isEnd = BatchRedoDistributeItems(eleArry, eleNum); - SPSCBlockingQueuePopN(g_redoWorker->queue, eleNum); - if (isEnd) - break; - - RedoInterruptCallBack(); - ADD_ABNORMAL_POSITION(1); - } - - RedoThrdWaitForExit(g_redoWorker); - XLogParseBufferDestoryFunc(&(g_redoWorker->parseManager)); -} - -uint32 GetWorkerId(const RedoItemTag *redoItemTag, uint32 workerCount) -{ - if (workerCount != 0) { - return tag_hash(redoItemTag, sizeof(RedoItemTag)) % workerCount; - } - return 0; -} - -uint32 GetWorkerId(const uint32 attId, const uint32 workerCount) -{ - if (workerCount != 0) { - return attId % workerCount; - } - return 0; -} - -void RedoPageManagerDistributeToAllOneBlock(XLogRecParseState *ddlParseState) -{ - PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; - const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; - - ddlParseState->nextrecord = NULL; - - for (uint32 i = 0; i < WorkerNumPerMng; ++i) { - XLogRecParseState *newState = XLogParseBufferCopy(ddlParseState); - AddPageRedoItem(myRedoLine->redoThd[i], newState); - } -} - -void RedoPageManagerDistributeBlockRecord(HTAB *redoItemHash, XLogRecParseState *parsestate) -{ - PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; - const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; - HASH_SEQ_STATUS status; - RedoItemHashEntry *redoItemEntry = NULL; - HTAB *curMap = redoItemHash; - hash_seq_init(&status, curMap); - - while ((redoItemEntry = (RedoItemHashEntry *)hash_seq_search(&status)) != NULL) { - uint32 workId = GetWorkerId(&redoItemEntry->redoItemTag, WorkerNumPerMng); - AddPageRedoItem(myRedoLine->redoThd[workId], redoItemEntry->head); - - if (hash_search(curMap, (void *)&redoItemEntry->redoItemTag, HASH_REMOVE, NULL) == NULL) - ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("hash table corrupted"))); - } - - if (parsestate != NULL) { - RedoPageManagerDistributeToAllOneBlock(parsestate); - } -} - -void WaitCurrentPipeLineRedoWorkersQueueEmpty() -{ - PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; - const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; - - for (uint32 i = 0; i < WorkerNumPerMng; ++i) { - while (!SPSCBlockingQueueIsEmpty(myRedoLine->redoThd[i]->queue)) { - RedoInterruptCallBack(); - } - } -} - -void DispatchEndMarkToRedoWorkerAndWait() -{ - PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; - const uint32 WorkerNumPerMng = get_page_redo_worker_num_per_manager(); - for (uint32 i = 0; i < WorkerNumPerMng; ++i) - SendPageRedoEndMark(myRedoLine->redoThd[i]); - - for (uint32 i = 0; i < myRedoLine->redoThdNum; i++) { - WaitPageRedoWorkerReachLastMark(myRedoLine->redoThd[i]); - } -} - -void RedoPageManagerDdlAction(XLogRecParseState *parsestate) -{ - switch (parsestate->blockparse.blockhead.block_valid) { - case BLOCK_DATA_DROP_DATABASE_TYPE: - xlog_db_drop(parsestate->blockparse.blockhead.dbNode, parsestate->blockparse.blockhead.spcNode); - break; - case BLOCK_DATA_CREATE_TBLSPC_TYPE: - xlog_create_tblspc(parsestate->blockparse.blockhead.spcNode, - parsestate->blockparse.extra_rec.blocktblspc.tblPath, - parsestate->blockparse.extra_rec.blocktblspc.isRelativePath); - break; - case BLOCK_DATA_CREATE_DATABASE_TYPE: - xlog_db_create(parsestate->blockparse.blockhead.dbNode, parsestate->blockparse.blockhead.spcNode, - parsestate->blockparse.extra_rec.blockdatabase.src_db_id, - parsestate->blockparse.extra_rec.blockdatabase.src_tablespace_id); - break; - case BLOCK_DATA_DROP_TBLSPC_TYPE: - xlog_drop_tblspc(parsestate->blockparse.blockhead.spcNode); - break; - case BLOCK_DATA_SEG_FILE_EXTEND_TYPE: - { - Assert(0); - } - break; - default: - break; - } -} - -void RedoPageManagerSmgrClose(XLogRecParseState *parsestate) -{ - switch (parsestate->blockparse.blockhead.block_valid) { - case BLOCK_DATA_DROP_DATABASE_TYPE: - smgrcloseall(); - break; - default: - break; - } -} - -void RedoPageManagerSyncDdlAction(XLogRecParseState *parsestate) -{ - /* at this monent, all worker queue is empty ,just find out which one will do it */ - uint32 expected = 0; - const uint32 pipelineNum = g_dispatcher->pageLineNum; - pg_atomic_compare_exchange_u32(&g_dispatcher->syncEnterCount, &expected, pipelineNum); - uint32 entershareCount = pg_atomic_sub_fetch_u32(&g_dispatcher->syncEnterCount, 1); - - MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); - if (entershareCount == 0) { - /* do actual work */ - RedoPageManagerDdlAction(parsestate); - } else { - RedoPageManagerSmgrClose(parsestate); - do { - RedoInterruptCallBack(); - entershareCount = pg_atomic_read_u32(&g_dispatcher->syncEnterCount); - } while (entershareCount != 0); - } - (void)MemoryContextSwitchTo(oldCtx); - - expected = 0; - pg_atomic_compare_exchange_u32(&g_dispatcher->syncExitCount, &expected, pipelineNum); - uint32 exitShareCount = pg_atomic_sub_fetch_u32(&g_dispatcher->syncExitCount, 1); - while (exitShareCount != 0) { - RedoInterruptCallBack(); - exitShareCount = pg_atomic_read_u32(&g_dispatcher->syncExitCount); - } - - parsestate->nextrecord = NULL; - XLogBlockParseStateRelease(parsestate); -} - -void RedoPageManagerDoDropAction(XLogRecParseState *parsestate, HTAB *hashMap) -{ - XLogRecParseState *newState = XLogParseBufferCopy(parsestate); - PRTrackClearBlock(newState, hashMap); - RedoPageManagerDistributeBlockRecord(hashMap, parsestate); - WaitCurrentPipeLineRedoWorkersQueueEmpty(); - RedoPageManagerSyncDdlAction(parsestate); -} - -void RedoPageManagerDoSmgrAction(XLogRecParseState *recordblockstate) -{ - RedoBufferInfo bufferinfo = {0}; - void *blockrecbody; - XLogBlockHead *blockhead; - - blockhead = &recordblockstate->blockparse.blockhead; - blockrecbody = &recordblockstate->blockparse.extra_rec; - - XLogBlockInitRedoBlockInfo(blockhead, &bufferinfo.blockinfo); - - MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); - if (blockhead->bucketNode != InvalidBktId) { - XLogBlockSegDdlDoRealAction(blockhead, blockrecbody, &bufferinfo); - } else { - XLogBlockDdlDoSmgrAction(blockhead, blockrecbody, &bufferinfo); - } - (void)MemoryContextSwitchTo(oldCtx); - - recordblockstate->nextrecord = NULL; - XLogBlockParseStateRelease(recordblockstate); -} - -void RedoPageManagerDoSmgrActionForBktList(XLogRecParseState *recordblockstate) -{ - Assert(recordblockstate->blockparse.extra_rec.blockddlrec.blockddltype == BLOCK_DDL_DROP_BKTLIST); - - XLogBlockHead *blockhead = &recordblockstate->blockparse.blockhead; - uint32 *bktMap = (uint32 *)recordblockstate->blockparse.extra_rec.blockddlrec.mainData; - - RelFileNode rnode; - rnode.spcNode = blockhead->spcNode; - rnode.dbNode = blockhead->dbNode; - rnode.relNode = blockhead->relNode; - - MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); - for (uint32 bktNode = 0; bktNode < MAX_BUCKETMAPLEN; bktNode++) { - if (!GET_BKT_MAP_BIT(bktMap, bktNode)) { - continue; - } - - rnode.bucketNode = bktNode; - SMgrRelation reln = smgropen(rnode, InvalidBackendId, 0); - smgrclose(reln); - } - (void)MemoryContextSwitchTo(oldCtx); - - recordblockstate->nextrecord = NULL; - XLogBlockParseStateRelease(recordblockstate); -} - -void RedoPageManagerDoDataTypeAction(XLogRecParseState *parsestate, HTAB *hashMap) -{ - XLogBlockDdlParse *ddlrecparse = NULL; - XLogBlockParseGetDdlParse(parsestate, ddlrecparse); - - if (ddlrecparse->blockddltype == BLOCK_DDL_DROP_RELNODE || - ddlrecparse->blockddltype == BLOCK_DDL_DROP_BKTLIST || - ddlrecparse->blockddltype == BLOCK_DDL_TRUNCATE_RELNODE) { - XLogRecParseState *newState = XLogParseBufferCopy(parsestate); - PRTrackClearBlock(newState, hashMap); - RedoPageManagerDistributeBlockRecord(hashMap, parsestate); - WaitCurrentPipeLineRedoWorkersQueueEmpty(); - } - - if (ddlrecparse->blockddltype == BLOCK_DDL_DROP_BKTLIST) { - RedoPageManagerDoSmgrActionForBktList(parsestate); - } else { - RedoPageManagerDoSmgrAction(parsestate); - } -} - -void PageManagerProcLsnForwarder(RedoItem *lsnForwarder) -{ - SetCompletedReadEndPtr(g_redoWorker, lsnForwarder->record.ReadRecPtr, lsnForwarder->record.EndRecPtr); - (void)pg_atomic_sub_fetch_u32(&lsnForwarder->record.refcount, 1); - - PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; - const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; - - for (uint32 i = 0; i < WorkerNumPerMng; ++i) { - AddPageRedoItem(myRedoLine->redoThd[i], lsnForwarder); - } -} - -void PageManagerDistributeBcmBlock(XLogRecParseState *preState) -{ - PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; - const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; - uint32 workId = GetWorkerId((uint32)preState->blockparse.blockhead.forknum, WorkerNumPerMng); - AddPageRedoItem(myRedoLine->redoThd[workId], preState); -} - -void PageManagerProcCleanupMark(RedoItem *cleanupMark) -{ - PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; - const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; - g_redoWorker->xlogInvalidPages = XLogGetInvalidPages(); - for (uint32 i = 0; i < WorkerNumPerMng; ++i) { - AddPageRedoItem(myRedoLine->redoThd[i], cleanupMark); - } - ereport(LOG, (errcode(ERRCODE_LOG), errmsg("[ForceFinish]PageManagerProcCleanupMark has cleaned InvalidPages"))); -} - -void PageManagerProcCheckPoint(HTAB *hashMap, XLogRecParseState *parseState) -{ - Assert(IsCheckPoint(parseState)); - RedoPageManagerDistributeBlockRecord(hashMap, parseState); - bool needWait = parseState->isFullSync; - if (needWait) { - pg_atomic_write_u32(&g_redoWorker->fullSyncFlag, 1); - } - - XLogBlockParseStateRelease(parseState); - uint32 val = pg_atomic_read_u32(&g_redoWorker->fullSyncFlag); - while (val != 0) { - RedoInterruptCallBack(); - val = pg_atomic_read_u32(&g_redoWorker->fullSyncFlag); - } - -#ifdef USE_ASSERT_CHECKING - int printLevel = WARNING; -#else - int printLevel = DEBUG1; -#endif - if (log_min_messages <= printLevel) { - GetThreadBufferLeakNum(); - } -} - - -void PageManagerProcCreateTableSpace(HTAB *hashMap, XLogRecParseState *parseState) -{ - RedoPageManagerDistributeBlockRecord(hashMap, NULL); - bool needWait = parseState->isFullSync; - if (needWait) { - pg_atomic_write_u32(&g_redoWorker->fullSyncFlag, 1); - } - - XLogBlockParseStateRelease(parseState); - uint32 val = pg_atomic_read_u32(&g_redoWorker->fullSyncFlag); - while (val != 0) { - RedoInterruptCallBack(); - val = pg_atomic_read_u32(&g_redoWorker->fullSyncFlag); - } -} - -bool PageManagerRedoDistributeItems(void **eleArry, uint32 eleNum) -{ - HTAB *hashMap = g_dispatcher->pageLines[g_redoWorker->slotId].managerThd->redoItemHash; - - for (uint32 i = 0; i < eleNum; i++) { - if (eleArry[i] == (void *)&g_redoEndMark) { - RedoPageManagerDistributeBlockRecord(hashMap, NULL); - return true; - } else if (eleArry[i] == (void *)&g_GlobalLsnForwarder) { - RedoPageManagerDistributeBlockRecord(hashMap, NULL); - PageManagerProcLsnForwarder((RedoItem *)eleArry[i]); - continue; - } else if (eleArry[i] == (void *)&g_cleanupMark) { - PageManagerProcCleanupMark((RedoItem *)eleArry[i]); - continue; - } - XLogRecParseState *recordblockstate = (XLogRecParseState *)eleArry[i]; - XLogRecParseState *nextState = recordblockstate; - do { - XLogRecParseState *preState = nextState; - nextState = (XLogRecParseState *)nextState->nextrecord; - preState->nextrecord = NULL; - - switch (preState->blockparse.blockhead.block_valid) { - case BLOCK_DATA_MAIN_DATA_TYPE: - case BLOCK_DATA_UNDO_TYPE: - case BLOCK_DATA_VM_TYPE: - case BLOCK_DATA_FSM_TYPE: - PRTrackAddBlock(preState, hashMap); - break; - case BLOCK_DATA_DDL_TYPE: - RedoPageManagerDoDataTypeAction(preState, hashMap); - break; - case BLOCK_DATA_DROP_DATABASE_TYPE: - RedoPageManagerDoDropAction(preState, hashMap); - break; - case BLOCK_DATA_DROP_TBLSPC_TYPE: - /* just make sure any other ddl before drop tblspc is done */ - XLogBlockParseStateRelease(preState); - break; - case BLOCK_DATA_CREATE_DATABASE_TYPE: - case BLOCK_DATA_SEG_FILE_EXTEND_TYPE: - RedoPageManagerDistributeBlockRecord(hashMap, NULL); - /* wait until queue empty */ - WaitCurrentPipeLineRedoWorkersQueueEmpty(); - /* do atcual action */ - RedoPageManagerSyncDdlAction(preState); - break; - case BLOCK_DATA_CREATE_TBLSPC_TYPE: - PageManagerProcCreateTableSpace(hashMap, preState); - break; - case BLOCK_DATA_XLOG_COMMON_TYPE: - PageManagerProcCheckPoint(hashMap, preState); - break; - case BLOCK_DATA_NEWCU_TYPE: - RedoPageManagerDistributeBlockRecord(hashMap, NULL); - PageManagerDistributeBcmBlock(preState); - break; - case BLOCK_DATA_SEG_SPACE_DROP: - case BLOCK_DATA_SEG_SPACE_SHRINK: - RedoPageManagerDistributeBlockRecord(hashMap, preState); - WaitCurrentPipeLineRedoWorkersQueueEmpty(); - XLogBlockParseStateRelease(preState); - break; - case BLOCK_DATA_BARRIER_TYPE: - RedoPageManagerDistributeBlockRecord(hashMap, preState); - XLogBlockParseStateRelease(preState); - break; - default: - XLogBlockParseStateRelease(preState); - break; - } - } while (nextState != NULL); - } - RedoPageManagerDistributeBlockRecord(hashMap, NULL); - return false; -} - -void RedoPageManagerMain() -{ - void **eleArry; - uint32 eleNum; - - (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); - g_redoWorker->redoItemHash = PRRedoItemHashInitialize(g_redoWorker->oldCtx); - XLogParseBufferInitFunc(&(g_redoWorker->parseManager), MAX_PARSE_BUFF_NUM, &recordRefOperate, - RedoInterruptCallBack); - - while (SPSCBlockingQueueGetAll(g_redoWorker->queue, &eleArry, &eleNum)) { - bool isEnd = PageManagerRedoDistributeItems(eleArry, eleNum); - SPSCBlockingQueuePopN(g_redoWorker->queue, eleNum); - if (isEnd) - break; - - RedoInterruptCallBack(); - ADD_ABNORMAL_POSITION(5); - } - - RedoThrdWaitForExit(g_redoWorker); - XLogParseBufferDestoryFunc(&(g_redoWorker->parseManager)); -} - -bool IsXactXlog(const XLogReaderState *record) -{ - if (XLogRecGetRmid(record) != RM_XACT_ID) { - return false; - } - return true; -} - -void TrxnManagerProcLsnForwarder(RedoItem *lsnForwarder) -{ - SetCompletedReadEndPtr(g_redoWorker, lsnForwarder->record.ReadRecPtr, lsnForwarder->record.EndRecPtr); - (void)pg_atomic_sub_fetch_u32(&lsnForwarder->record.refcount, 1); - - AddPageRedoItem(g_dispatcher->trxnLine.redoThd, lsnForwarder); -} - -void TrxnManagerProcCleanupMark(RedoItem *cleanupMark) -{ - g_redoWorker->xlogInvalidPages = XLogGetInvalidPages(); - AddPageRedoItem(g_dispatcher->trxnLine.redoThd, cleanupMark); - ereport(LOG, (errcode(ERRCODE_LOG), errmsg("[ForceFinish]TrxnManagerProcCleanupMark has cleaned InvalidPages"))); -} - -bool TrxnManagerDistributeItemsBeforeEnd(RedoItem *item) -{ - bool exitFlag = false; - if (item == &g_redoEndMark) { - exitFlag = true; - } else if (item == (RedoItem *)&g_GlobalLsnForwarder) { - TrxnManagerProcLsnForwarder(item); - } else if (item == (RedoItem *)&g_cleanupMark) { - TrxnManagerProcCleanupMark(item); - } else { - if (IsCheckPoint(&item->record) || IsTableSpaceDrop(&item->record) || IsTableSpaceCreate(&item->record) || - (IsXactXlog(&item->record) && XactWillRemoveRelFiles(&item->record)) || IsBarrierCreate(&item->record)) { - uint32 relCount; - do { - RedoInterruptCallBack(); - relCount = pg_atomic_read_u32(&item->record.refcount); - } while (relCount != 1); - } - - AddPageRedoItem(g_dispatcher->trxnLine.redoThd, item); - } - return exitFlag; -} - -void GlobalLsnUpdate() -{ - t_thrd.xlog_cxt.standbyState = g_redoWorker->standbyState; - if (LsnUpdate()) { - ExtremRtoUpdateMinCheckpoint(); - CheckRecoveryConsistency(); - } -} - -bool LsnUpdate() -{ - XLogRecPtr minStart = MAX_XLOG_REC_PTR; - XLogRecPtr minEnd = MAX_XLOG_REC_PTR; - GetReplayedRecPtr(&minStart, &minEnd); - if ((minEnd != MAX_XLOG_REC_PTR) && (minStart != MAX_XLOG_REC_PTR)) { - SetXLogReplayRecPtr(minStart, minEnd); - return true; - } - return false; -} - -static void TrxnMangerQueueCallBack() -{ - GlobalLsnUpdate(); - HandlePageRedoInterrupts(); -} - -void TrxnManagerMain() -{ - (void)RegisterRedoInterruptCallBack(TrxnMangerQueueCallBack); - t_thrd.xlog_cxt.max_page_flush_lsn = get_global_max_page_flush_lsn(); - ereport(LOG, - (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("TrxnManagerMain: first get_global_max_page_flush_lsn %08X/%08X", - (uint32)(t_thrd.xlog_cxt.max_page_flush_lsn >> 32), (uint32)(t_thrd.xlog_cxt.max_page_flush_lsn)))); - while (true) { - if (FORCE_FINISH_ENABLED && t_thrd.xlog_cxt.max_page_flush_lsn == MAX_XLOG_REC_PTR) { - t_thrd.xlog_cxt.max_page_flush_lsn = get_global_max_page_flush_lsn(); - if (t_thrd.xlog_cxt.max_page_flush_lsn != MAX_XLOG_REC_PTR) { - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("TrxnManagerMain: second get_global_max_page_flush_lsn %08X/%08X", - (uint32)(t_thrd.xlog_cxt.max_page_flush_lsn >> 32), - (uint32)(t_thrd.xlog_cxt.max_page_flush_lsn)))); - } - } - if (!SPSCBlockingQueueIsEmpty(g_redoWorker->queue)) { - RedoItem *item = (RedoItem *)SPSCBlockingQueueTop(g_redoWorker->queue); - bool isEnd = TrxnManagerDistributeItemsBeforeEnd(item); - SPSCBlockingQueuePop(g_redoWorker->queue); - - if (isEnd) { - break; - } - } else { - long sleeptime = 150 * 1000; - pg_usleep(sleeptime); - } - - ADD_ABNORMAL_POSITION(2); - RedoInterruptCallBack(); - } - - RedoThrdWaitForExit(g_redoWorker); - GlobalLsnUpdate(); -} - -void TrxnWorkerProcLsnForwarder(RedoItem *lsnForwarder) -{ - SetCompletedReadEndPtr(g_redoWorker, lsnForwarder->record.ReadRecPtr, lsnForwarder->record.EndRecPtr); - (void)pg_atomic_sub_fetch_u32(&lsnForwarder->record.refcount, 1); -} - -void TrxnWorkNotifyRedoWorker() -{ - for (uint32 i = 0; i < g_dispatcher->allWorkersCnt; ++i) { - if (g_dispatcher->allWorkers[i]->role == REDO_PAGE_WORKER || - g_dispatcher->allWorkers[i]->role == REDO_PAGE_MNG) { - pg_atomic_write_u32(&(g_dispatcher->allWorkers[i]->fullSyncFlag), 0); - } - } -} - -void TrxnWorkrProcCleanupMark(RedoItem *cleanupMark) -{ - g_redoWorker->xlogInvalidPages = XLogGetInvalidPages(); - ereport(LOG, (errcode(ERRCODE_LOG), errmsg("[ForceFinish]TrxnWorkrProcCleanupMark has cleaned InvalidPages"))); -} - -bool CheckFullSyncCheckpoint(RedoItem *item) -{ - if (!IsCheckPoint(&(item->record))) { - return true; - } - - if (XLByteLE(item->record.ReadRecPtr, t_thrd.shemem_ptr_cxt.ControlFile->checkPoint)) { - return true; - } - - return false; -} - -void TrxnWorkMain() -{ -#ifdef ENABLE_MOT - MOTBeginRedoRecovery(); -#endif - (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); - if (ParseStateWithoutCache()) { - XLogRedoBufferInitFunc(&(g_redoWorker->bufferManager), MAX_LOCAL_BUFF_NUM, &recordRefOperate, - RedoInterruptCallBack); - } - - RedoItem *item = NULL; - while ((item = (RedoItem *)SPSCBlockingQueueTop(g_redoWorker->queue)) != &g_redoEndMark) { - if ((void *)item == (void *)&g_GlobalLsnForwarder) { - TrxnWorkerProcLsnForwarder((RedoItem *)item); - SPSCBlockingQueuePop(g_redoWorker->queue); - } else if ((void *)item == (void *)&g_cleanupMark) { - TrxnWorkrProcCleanupMark((RedoItem *)item); - SPSCBlockingQueuePop(g_redoWorker->queue); - } else { - t_thrd.xlog_cxt.needImmediateCkp = item->needImmediateCheckpoint; - bool fullSync = item->record.isFullSync; - ApplySinglePageRecord(item); - SPSCBlockingQueuePop(g_redoWorker->queue); - SetCompletedReadEndPtr(g_redoWorker, item->record.ReadRecPtr, item->record.EndRecPtr); - if (fullSync) { - Assert(CheckFullSyncCheckpoint(item)); - TrxnWorkNotifyRedoWorker(); - } - - if (XactHasSegpageRelFiles(&item->record)) { - uint32 expected = 1; - pg_atomic_compare_exchange_u32((volatile uint32 *)&(g_dispatcher->segpageXactDoneFlag), &expected, 0); - } - - DereferenceRedoItem(item); - RedoInterruptCallBack(); - } - ADD_ABNORMAL_POSITION(3); - } - - SPSCBlockingQueuePop(g_redoWorker->queue); - if (ParseStateWithoutCache()) - XLogRedoBufferDestoryFunc(&(g_redoWorker->bufferManager)); -#ifdef ENABLE_MOT - MOTEndRedoRecovery(); -#endif -} - -void RedoPageWorkerCheckPoint(const XLogRecParseState *redoblockstate) -{ - CheckPoint checkPoint; - Assert(IsCheckPoint(redoblockstate)); - XLogSynAllBuffer(); - Assert(redoblockstate->blockparse.extra_rec.blockxlogcommon.maindatalen >= sizeof(checkPoint)); - errno_t rc = memcpy_s(&checkPoint, sizeof(checkPoint), - redoblockstate->blockparse.extra_rec.blockxlogcommon.maindata, sizeof(checkPoint)); - securec_check(rc, "\0", "\0"); - if (IsRestartPointSafe(checkPoint.redo)) { - pg_atomic_write_u64(&g_redoWorker->lastCheckedRestartPoint, redoblockstate->blockparse.blockhead.start_ptr); - } - - UpdateTimeline(&checkPoint); - -#ifdef USE_ASSERT_CHECKING - int printLevel = WARNING; -#else - int printLevel = DEBUG1; -#endif - if (log_min_messages <= printLevel) { - GetThreadBufferLeakNum(); - } -} - -void PageWorkerProcLsnForwarder(RedoItem *lsnForwarder) -{ - SetCompletedReadEndPtr(g_redoWorker, lsnForwarder->record.ReadRecPtr, lsnForwarder->record.EndRecPtr); - (void)pg_atomic_sub_fetch_u32(&lsnForwarder->record.refcount, 1); -} - -bool XlogNeedUpdateFsm(XLogRecParseState *procState, RedoBufferInfo *bufferinfo) -{ - XLogBlockHead *blockhead = &procState->blockparse.blockhead; - if (bufferinfo->pageinfo.page == NULL || !(bufferinfo->dirtyflag) || blockhead->forknum != MAIN_FORKNUM || - XLogBlockHeadGetValidInfo(blockhead) != BLOCK_DATA_MAIN_DATA_TYPE || blockhead->bucketNode != InvalidBktId) { - return false; - } - - Size freespace = PageGetHeapFreeSpace(bufferinfo->pageinfo.page); - - RmgrId rmid = XLogBlockHeadGetRmid(blockhead); - if (rmid == RM_HEAP2_ID) { - uint8 info = XLogBlockHeadGetInfo(blockhead) & ~XLR_INFO_MASK; - if (info == XLOG_HEAP2_CLEAN) { - return true; - } else if ((info == XLOG_HEAP2_MULTI_INSERT) && (freespace < BLCKSZ / 5)) { - return true; - } - - } else if (rmid == RM_HEAP_ID) { - uint8 info = XLogBlockHeadGetInfo(blockhead) & ~XLR_INFO_MASK; - if ((info == XLOG_HEAP_INSERT || info == XLOG_HEAP_UPDATE) && (freespace < BLCKSZ / 5)) { - return true; - } - } - - return false; -} - -void RedoPageWorkerRedoBcmBlock(XLogRecParseState *procState) -{ - RmgrId rmid = XLogBlockHeadGetRmid(&procState->blockparse.blockhead); - if (rmid == RM_HEAP2_ID) { - RelFileNode node; - node.spcNode = procState->blockparse.blockhead.spcNode; - node.dbNode = procState->blockparse.blockhead.dbNode; - node.relNode = procState->blockparse.blockhead.relNode; - node.bucketNode = procState->blockparse.blockhead.bucketNode; - node.opt = procState->blockparse.blockhead.opt; - XLogBlockNewCuParse *newCuParse = &(procState->blockparse.extra_rec.blocknewcu); - uint8 info = XLogBlockHeadGetInfo(&procState->blockparse.blockhead) & ~XLR_INFO_MASK; - switch (info & XLOG_HEAP_OPMASK) { - case XLOG_HEAP2_BCM: { - xl_heap_bcm *xlrec = (xl_heap_bcm *)(newCuParse->main_data); - heap_bcm_redo(xlrec, node, procState->blockparse.blockhead.end_ptr); - break; - } - case XLOG_HEAP2_LOGICAL_NEWPAGE: { - Assert(IsHeapFileNode(node)); - xl_heap_logical_newpage *xlrec = (xl_heap_logical_newpage *)(newCuParse->main_data); - char *cuData = newCuParse->main_data + SizeOfHeapLogicalNewPage; - heap_xlog_bcm_new_page(xlrec, node, cuData); - break; - } - default: - break; - } - } -} - -void RedoPageWorkerMain() -{ - (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); - - if (ParseStateWithoutCache()) { - XLogRedoBufferInitFunc(&(g_redoWorker->bufferManager), MAX_LOCAL_BUFF_NUM, &recordRefOperate, - RedoInterruptCallBack); - } - - XLogRecParseState *redoblockstateHead = NULL; - while ((redoblockstateHead = (XLogRecParseState *)SPSCBlockingQueueTop(g_redoWorker->queue)) != - (XLogRecParseState *)&g_redoEndMark) { - if ((void *)redoblockstateHead == (void *)&g_cleanupMark) { - g_redoWorker->xlogInvalidPages = XLogGetInvalidPages(); - SPSCBlockingQueuePop(g_redoWorker->queue); - ereport(LOG, (errcode(ERRCODE_LOG), errmsg("[ForceFinish]RedoPageWorkerMain has cleaned InvalidPages"))); - continue; - } - if ((void *)redoblockstateHead == (void *)&g_GlobalLsnForwarder) { - PageWorkerProcLsnForwarder((RedoItem *)redoblockstateHead); - SPSCBlockingQueuePop(g_redoWorker->queue); - continue; - } - RedoBufferInfo bufferinfo = {0}; - bool notfound = false; - bool updateFsm = false; - - XLogRecParseState *procState = redoblockstateHead; - - MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); - while (procState != NULL) { - XLogRecParseState *redoblockstate = procState; - procState = (XLogRecParseState *)procState->nextrecord; - - switch (XLogBlockHeadGetValidInfo(&redoblockstate->blockparse.blockhead)) { - case BLOCK_DATA_MAIN_DATA_TYPE: - case BLOCK_DATA_UNDO_TYPE: - case BLOCK_DATA_VM_TYPE: - case BLOCK_DATA_FSM_TYPE: - notfound = XLogBlockRedoForExtremeRTO(redoblockstate, &bufferinfo, notfound); - break; - case BLOCK_DATA_XLOG_COMMON_TYPE: - RedoPageWorkerCheckPoint(redoblockstate); - SetCompletedReadEndPtr(g_redoWorker, redoblockstate->blockparse.blockhead.start_ptr, - redoblockstate->blockparse.blockhead.end_ptr); - break; - case BLOCK_DATA_DDL_TYPE: - XLogForgetDDLRedo(redoblockstate); - SetCompletedReadEndPtr(g_redoWorker, redoblockstate->blockparse.blockhead.start_ptr, - redoblockstate->blockparse.blockhead.end_ptr); - break; - case BLOCK_DATA_DROP_DATABASE_TYPE: - XLogDropDatabase(redoblockstate->blockparse.blockhead.dbNode); - SetCompletedReadEndPtr(g_redoWorker, redoblockstate->blockparse.blockhead.start_ptr, - redoblockstate->blockparse.blockhead.end_ptr); - break; - case BLOCK_DATA_NEWCU_TYPE: - RedoPageWorkerRedoBcmBlock(redoblockstate); - break; - case BLOCK_DATA_SEG_SPACE_DROP: - XLogDropSegmentSpace(redoblockstate->blockparse.blockhead.spcNode, - redoblockstate->blockparse.blockhead.dbNode); - SetCompletedReadEndPtr(g_redoWorker, redoblockstate->blockparse.blockhead.start_ptr, - redoblockstate->blockparse.blockhead.end_ptr); - break; - case BLOCK_DATA_SEG_SPACE_SHRINK: - XLogDropSpaceShrink(redoblockstate); - SetCompletedReadEndPtr(g_redoWorker, redoblockstate->blockparse.blockhead.start_ptr, - redoblockstate->blockparse.blockhead.end_ptr); - break; - case BLOCK_DATA_BARRIER_TYPE: - SetCompletedReadEndPtr(g_redoWorker, redoblockstate->blockparse.blockhead.start_ptr, - redoblockstate->blockparse.blockhead.end_ptr); - default: - break; - } - } - (void)MemoryContextSwitchTo(oldCtx); - - updateFsm = XlogNeedUpdateFsm(redoblockstateHead, &bufferinfo); - bool needWait = redoblockstateHead->isFullSync; - if (needWait) { - pg_atomic_write_u32(&g_redoWorker->fullSyncFlag, 1); - } - XLogBlockParseStateRelease(redoblockstateHead); - /* the same page */ - ExtremeRtoFlushBuffer(&bufferinfo, updateFsm); - SPSCBlockingQueuePop(g_redoWorker->queue); - - pg_memory_barrier(); - uint32 val = pg_atomic_read_u32(&g_redoWorker->fullSyncFlag); - while (val != 0) { - RedoInterruptCallBack(); - val = pg_atomic_read_u32(&g_redoWorker->fullSyncFlag); - } - RedoInterruptCallBack(); - ADD_ABNORMAL_POSITION(4); - } - - SPSCBlockingQueuePop(g_redoWorker->queue); - if (ParseStateWithoutCache()) - XLogRedoBufferDestoryFunc(&(g_redoWorker->bufferManager)); -} - -void PutRecordToReadQueue(XLogReaderState *recordreader) -{ - SPSCBlockingQueuePut(g_dispatcher->readLine.readPageThd->queue, recordreader); -} - -inline void InitXLogRecordReadBuffer(XLogReaderState **initreader) -{ - XLogReaderState *newxlogreader; - XLogReaderState *readstate = g_dispatcher->rtoXlogBufState.initreader; - newxlogreader = NewReaderState(readstate); - g_dispatcher->rtoXlogBufState.initreader = NULL; - PutRecordToReadQueue(readstate); - SetCompletedReadEndPtr(g_redoWorker, readstate->ReadRecPtr, readstate->EndRecPtr); - *initreader = newxlogreader; -} - -void StartupSendFowarder(RedoItem *item) -{ - for (uint32 i = 0; i < g_dispatcher->pageLineNum; ++i) { - AddPageRedoItem(g_dispatcher->pageLines[i].batchThd, item); - } - - AddPageRedoItem(g_dispatcher->trxnLine.managerThd, item); -} - -void SendLsnFowarder() -{ - // update and read in the same thread, so no need atomic operation - g_GlobalLsnForwarder.record.ReadRecPtr = g_redoWorker->lastReplayedReadRecPtr; - g_GlobalLsnForwarder.record.EndRecPtr = g_redoWorker->lastReplayedEndRecPtr; - g_GlobalLsnForwarder.record.refcount = get_real_recovery_parallelism() - XLOG_READER_NUM; - g_GlobalLsnForwarder.record.isDecode = true; - PutRecordToReadQueue(&g_GlobalLsnForwarder.record); -} - -void PushToWorkerLsn(bool force) -{ - const uint32 max_record_count = PAGE_WORK_QUEUE_SIZE; - static uint32 cur_recor_count = 0; - - cur_recor_count++; - - if (!IsExtremeRtoRunning()) { - return; - } - - if (force) { - uint32 refCount; - do { - refCount = pg_atomic_read_u32(&g_GlobalLsnForwarder.record.refcount); - RedoInterruptCallBack(); - } while (refCount != 0); - cur_recor_count = 0; - SendLsnFowarder(); - } else { - uint32 refCount = pg_atomic_read_u32(&g_GlobalLsnForwarder.record.refcount); - - if (refCount != 0 || cur_recor_count < max_record_count) { - return; - } - - SendLsnFowarder(); - cur_recor_count = 0; - } -} - -void ResetRtoXlogReadBuf(XLogRecPtr targetPagePtr) -{ - uint32 startreadworker = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); - if (startreadworker == WORKER_STATE_STOP) { - WalRcvCtlAcquireExitLock(); - WalRcvCtlBlock *walrcb = getCurrentWalRcvCtlBlock(); - - if (walrcb == NULL) { - WalRcvCtlReleaseExitLock(); - return; - } - - int64 walwriteoffset; - XLogRecPtr startptr; - SpinLockAcquire(&walrcb->mutex); - walwriteoffset = walrcb->walWriteOffset; - startptr = walrcb->walStart; - SpinLockRelease(&walrcb->mutex); - - if (XLByteLT(startptr, targetPagePtr)) { - WalRcvCtlReleaseExitLock(); - return; - } - - int64 buflen = (int64)(startptr - targetPagePtr); - int64 walReadOffset = walwriteoffset - buflen; - Assert(walReadOffset >= 0); - const int64 recBufferSize = g_instance.attr.attr_storage.WalReceiverBufSize * 1024; - SpinLockAcquire(&walrcb->mutex); - walrcb->lastReadPtr = targetPagePtr; - walrcb->walReadOffset = walReadOffset; - if (walrcb->walReadOffset == recBufferSize) { - walrcb->walReadOffset = 0; - if (walrcb->walWriteOffset == recBufferSize) { - walrcb->walWriteOffset = 0; - if (walrcb->walFreeOffset == recBufferSize) - walrcb->walFreeOffset = 0; - } - } - SpinLockRelease(&walrcb->mutex); - - for (uint32 i = 0; i < MAX_ALLOC_SEGNUM; ++i) { - pg_atomic_write_u32(&(g_recordbuffer->xlogsegarray[i].bufState), NONE); - } - - XLogSegNo segno; - XLByteToSeg(targetPagePtr, segno); - g_recordbuffer->xlogsegarray[g_recordbuffer->applyindex].segno = segno; - g_recordbuffer->xlogsegarray[g_recordbuffer->applyindex].readlen = targetPagePtr % XLOG_SEG_SIZE; - - pg_atomic_write_u32(&(g_recordbuffer->readindex), g_recordbuffer->applyindex); - pg_atomic_write_u32(&(g_recordbuffer->xlogsegarray[g_recordbuffer->readindex].bufState), APPLYING); - - pg_atomic_write_u32(&(g_recordbuffer->readWorkerState), WORKER_STATE_RUN); - WalRcvCtlReleaseExitLock(); - } -} - -RecordBufferAarray *GetCurrentSegmentBuf(XLogRecPtr targetPagePtr) -{ - Assert(g_recordbuffer->applyindex < MAX_ALLOC_SEGNUM); - uint32 applyindex = g_recordbuffer->applyindex; - RecordBufferAarray *cursegbuffer = &g_recordbuffer->xlogsegarray[applyindex]; - uint32 bufState = pg_atomic_read_u32(&(cursegbuffer->bufState)); - - if (bufState != APPLYING) { - return NULL; - } - uint32 targetPageOff = (targetPagePtr % XLOG_SEG_SIZE); - XLogSegNo targetSegNo; - XLByteToSeg(targetPagePtr, targetSegNo); - if (cursegbuffer->segno == targetSegNo) { - cursegbuffer->segoffset = targetPageOff; - return cursegbuffer; - } else if (cursegbuffer->segno + 1 == targetSegNo) { - Assert(targetPageOff == 0); - pg_atomic_write_u32(&(cursegbuffer->bufState), APPLIED); - if ((applyindex + 1) == MAX_ALLOC_SEGNUM) { - applyindex = 0; - } else { - applyindex++; - } - - pg_atomic_write_u32(&(g_recordbuffer->applyindex), applyindex); - cursegbuffer = &g_recordbuffer->xlogsegarray[applyindex]; - bufState = pg_atomic_read_u32(&(cursegbuffer->bufState)); - if (bufState != APPLYING) { - return NULL; - } - - Assert(cursegbuffer->segno == targetSegNo); - cursegbuffer->segoffset = targetPageOff; - return cursegbuffer; - } else { - ereport(WARNING, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("SetReadBufferForExtRto targetPagePtr:%lu", targetPagePtr))); - DumpExtremeRtoReadBuf(); - t_thrd.xlog_cxt.failedSources |= XLOG_FROM_STREAM; - return NULL; - } -} - -static const int MAX_WAIT_TIMS = 512; - -bool XLogPageReadForExtRto(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen) -{ - uint32 startreadworker = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); - if (startreadworker == WORKER_STATE_RUN) { - RecordBufferAarray *cursegbuffer = GetCurrentSegmentBuf(targetPagePtr); - if (cursegbuffer == NULL) { - return false; - } - - uint32 readlen = pg_atomic_read_u32(&(cursegbuffer->readlen)); - - uint32 waitcount = 0; - while (readlen < (cursegbuffer->segoffset + reqLen)) { - readlen = pg_atomic_read_u32(&(cursegbuffer->readlen)); - if (waitcount >= MAX_WAIT_TIMS) { - return false; - } - waitcount++; - } - - Assert(cursegbuffer->segoffset == (targetPagePtr % XLogSegSize)); - xlogreader->readBuf = cursegbuffer->readsegbuf + cursegbuffer->segoffset; - return true; - } - - return false; -} - -void XLogReadWorkerSegFallback(XLogSegNo lastRplSegNo) -{ - errno_t errorno = EOK; - uint32 readindex = pg_atomic_read_u32(&(g_recordbuffer->readindex)); - uint32 applyindex = pg_atomic_read_u32(&(g_recordbuffer->applyindex)); - RecordBufferAarray *readseg = &g_recordbuffer->xlogsegarray[readindex]; - RecordBufferAarray *applyseg = &g_recordbuffer->xlogsegarray[applyindex]; - - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("XLogReadWorkerSegFallback: readindex: %u, readseg[%lu,%lu,%u,%u], applyindex: %u," - "applyseg[%lu,%lu,%u,%u]", - readindex, readseg->segno, readseg->segoffset, readseg->readlen, readseg->bufState, applyindex, - applyseg->segno, applyseg->segoffset, applyseg->readlen, applyseg->bufState))); - - pg_atomic_write_u32(&(g_recordbuffer->readindex), applyindex); - pg_atomic_write_u32(&(readseg->bufState), APPLIED); - applyseg->segno = lastRplSegNo; - applyseg->readlen = applyseg->segoffset; - errorno = memset_s(applyseg->readsegbuf, XLOG_SEG_SIZE, 0, XLOG_SEG_SIZE); - securec_check(errorno, "", ""); -} - -bool CloseReadFile() -{ - if (t_thrd.xlog_cxt.readFile >= 0) { - close(t_thrd.xlog_cxt.readFile); - t_thrd.xlog_cxt.readFile = -1; - return true; - } - return false; -} - -void DispatchCleanupMarkToAllRedoWorker() -{ - for (uint32 i = 0; i < g_dispatcher->allWorkersCnt; i++) { - PageRedoWorker *worker = g_dispatcher->allWorkers[i]; - if (worker->role == REDO_PAGE_WORKER) { - SPSCBlockingQueuePut(worker->queue, &g_cleanupMark); - } - } -} - -void WaitAllRedoWorkerIdle() -{ - instr_time startTime; - instr_time endTime; - bool allIdle = false; - INSTR_TIME_SET_CURRENT(startTime); - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("WaitAllRedoWorkerIdle begin, startTime: %lu us", INSTR_TIME_GET_MICROSEC(startTime)))); - while (!allIdle) { - allIdle = true; - for (uint32 i = 0; i < g_dispatcher->allWorkersCnt; i++) { - PageRedoWorker *worker = g_dispatcher->allWorkers[i]; - if (worker->role == REDO_READ_WORKER || worker->role == REDO_READ_MNG) { - continue; - } - if (!RedoWorkerIsIdle(worker)) { - allIdle = false; - break; - } - } - RedoInterruptCallBack(); - } - INSTR_TIME_SET_CURRENT(endTime); - INSTR_TIME_SUBTRACT(endTime, startTime); - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("WaitAllRedoWorkerIdle end, cost time: %lu us", INSTR_TIME_GET_MICROSEC(endTime)))); -} - -void XLogForceFinish(XLogReaderState *xlogreader, TermFileData *term_file) -{ - bool closed = false; - uint32 termId = term_file->term; - XLogSegNo lastRplSegNo; - - pg_atomic_write_u32(&(extreme_rto::g_recordbuffer->readWorkerState), extreme_rto::WORKER_STATE_STOPPING); - while (pg_atomic_read_u32(&(extreme_rto::g_recordbuffer->readWorkerState)) != WORKER_STATE_STOP) { - RedoInterruptCallBack(); - }; - ShutdownWalRcv(); - ShutdownDataRcv(); - pg_atomic_write_u32(&(g_recordbuffer->readSource), XLOG_FROM_PG_XLOG); - - PushToWorkerLsn(true); - g_cleanupMark.record.isDecode = true; - PutRecordToReadQueue(&g_cleanupMark.record); - WaitAllRedoWorkerIdle(); - - XLogRecPtr lastRplReadLsn; - XLogRecPtr lastRplEndLsn = GetXLogReplayRecPtr(NULL, &lastRplReadLsn); - XLogRecPtr receivedUpto = GetWalRcvWriteRecPtr(NULL); - XLogRecPtr endRecPtr = xlogreader->EndRecPtr; - ereport(WARNING, (errcode(ERRCODE_LOG), errmsg("[ForceFinish]ArchiveXlogForForceFinishRedo in extremeRTO " - "lastRplReadLsn:%08X/%08X, lastRplEndLsn:%08X/%08X, receivedUpto:%08X/%08X, ReadRecPtr:%08X/%08X, " - "EndRecPtr:%08X/%08X, readOff:%u, latestValidRecord:%08X/%08X", - (uint32)(lastRplReadLsn >> 32), (uint32)lastRplReadLsn,(uint32)(lastRplEndLsn >> 32), (uint32)lastRplEndLsn, - (uint32)(receivedUpto >> 32), (uint32)receivedUpto,(uint32)(xlogreader->ReadRecPtr >> 32), - (uint32)xlogreader->ReadRecPtr, (uint32)(xlogreader->EndRecPtr >> 32), (uint32)xlogreader->EndRecPtr, - xlogreader->readOff, (uint32)(latestValidRecord >> 32), (uint32)latestValidRecord))); - DumpExtremeRtoReadBuf(); - xlogreader->readOff = INVALID_READ_OFF; - XLByteToSeg(endRecPtr, lastRplSegNo); - XLogReadWorkerSegFallback(lastRplSegNo); - - closed = CloseReadFile(); - CopyXlogForForceFinishRedo(lastRplSegNo, termId, xlogreader, endRecPtr); - RenameXlogForForceFinishRedo(lastRplSegNo, xlogreader->readPageTLI, termId); - if (closed) { - ReOpenXlog(xlogreader); - } - t_thrd.xlog_cxt.invaildPageCnt = 0; - XLogCheckInvalidPages(); - SetSwitchHistoryFile(endRecPtr, receivedUpto, termId); - t_thrd.xlog_cxt.invaildPageCnt = 0; - set_wal_rcv_write_rec_ptr(endRecPtr); - t_thrd.xlog_cxt.receivedUpto = endRecPtr; - pg_atomic_write_u32(&(g_instance.comm_cxt.localinfo_cxt.is_finish_redo), 0); - ereport(WARNING, - (errcode(ERRCODE_LOG), errmsg("[ForceFinish]ArchiveXlogForForceFinishRedo in extremeRTO is over"))); -} - - - -static inline bool ReadPageWorkerStop() -{ - return g_dispatcher->recoveryStop; -} - -void CleanUpReadPageWorkerQueue() -{ - SPSCBlockingQueue *queue = g_dispatcher->readLine.readPageThd->queue; - uint32 state; - do { - while (!SPSCBlockingQueueIsEmpty(queue)) { - XLogReaderState *xlogreader = reinterpret_cast(SPSCBlockingQueueTake(queue)); - if (xlogreader == reinterpret_cast(&(g_redoEndMark.record)) || - xlogreader == reinterpret_cast(&(g_GlobalLsnForwarder.record)) || - xlogreader == reinterpret_cast(&(g_cleanupMark.record))) { - if (xlogreader == reinterpret_cast(&(g_GlobalLsnForwarder.record))) { - pg_atomic_write_u32(&g_GlobalLsnForwarder.record.refcount, 0); - } - continue; - } - - RedoItem *item = GetRedoItemPtr(xlogreader); - FreeRedoItem(item); - } - - RedoInterruptCallBack(); - state = pg_atomic_read_u32(&extreme_rto::g_dispatcher->rtoXlogBufState.readPageWorkerState); - } while (state != WORKER_STATE_EXIT); -} - -void ExtremeRtoStopHere() -{ - if ((get_real_recovery_parallelism() > 1) && (GetBatchCount() > 0)) { - g_dispatcher->recoveryStop = true; - CleanUpReadPageWorkerQueue(); - } -} - -void CheckToForcePushLsn(const XLogRecord *record) -{ - if (record == NULL) { - return; - } - if (record->xl_rmid == RM_BARRIER_ID && ((record->xl_info & ~XLR_INFO_MASK) == XLOG_BARRIER_CREATE)) { - PushToWorkerLsn(true); - } else { - PushToWorkerLsn(false); - } -} - -/* read xlog for parellel */ -void XLogReadPageWorkerMain() -{ - XLogReaderState *xlogreader = NULL; - - (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); - - g_recordbuffer = &g_dispatcher->rtoXlogBufState; - GetRecoveryLatch(); - /* init readstate */ - InitXLogRecordReadBuffer(&xlogreader); - - pg_atomic_write_u32(&(g_recordbuffer->readPageWorkerState), WORKER_STATE_RUN); - if (IsRecoveryDone()) { - t_thrd.xlog_cxt.readSource = XLOG_FROM_STREAM; - t_thrd.xlog_cxt.XLogReceiptSource = XLOG_FROM_STREAM; - pg_atomic_write_u32(&(g_recordbuffer->readSource), XLOG_FROM_STREAM); - } - - XLogRecord *record = XLogParallelReadNextRecord(xlogreader); - while (record != NULL) { - TermFileData term_file; - if (ReadPageWorkerStop()) { - break; - } - XLogReaderState *newxlogreader = NewReaderState(xlogreader); - PutRecordToReadQueue(xlogreader); - xlogreader = newxlogreader; - - g_redoWorker->lastReplayedReadRecPtr = xlogreader->ReadRecPtr; - g_redoWorker->lastReplayedEndRecPtr = xlogreader->EndRecPtr; - - RedoInterruptCallBack(); - if (FORCE_FINISH_ENABLED && CheckForForceFinishRedoTrigger(&term_file)) { - ereport(WARNING, - (errmsg("[ForceFinish] force finish triggered in XLogReadPageWorkerMain, ReadRecPtr:%08X/%08X, " - "EndRecPtr:%08X/%08X, StandbyMode:%u, startup_processing:%u, dummyStandbyMode:%u", - (uint32)(t_thrd.xlog_cxt.ReadRecPtr >> 32), (uint32)t_thrd.xlog_cxt.ReadRecPtr, - (uint32)(t_thrd.xlog_cxt.EndRecPtr >> 32), (uint32)t_thrd.xlog_cxt.EndRecPtr, - t_thrd.xlog_cxt.StandbyMode, t_thrd.xlog_cxt.startup_processing, dummyStandbyMode))); - XLogForceFinish(xlogreader, &term_file); - } - record = XLogParallelReadNextRecord(xlogreader); - CheckToForcePushLsn(record); - ADD_ABNORMAL_POSITION(8); - } - - uint32 workState = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); - while (workState == WORKER_STATE_STOPPING) { - workState = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); - } - - if (workState != WORKER_STATE_EXITING && workState != WORKER_STATE_EXIT) { - pg_atomic_write_u32(&(g_recordbuffer->readWorkerState), WORKER_STATE_EXITING); - } - - if (!ReadPageWorkerStop()) { - /* notify exit */ - PushToWorkerLsn(true); - g_redoEndMark.record = *xlogreader; - g_redoEndMark.record.isDecode = true; - PutRecordToReadQueue((XLogReaderState *)&g_redoEndMark.record); - } - - ReLeaseRecoveryLatch(); - pg_atomic_write_u32(&(g_recordbuffer->readPageWorkerState), WORKER_STATE_EXIT); -} - -void HandleReadWorkerRunInterrupts() -{ - if (t_thrd.page_redo_cxt.got_SIGHUP) { - t_thrd.page_redo_cxt.got_SIGHUP = false; - ProcessConfigFile(PGC_SIGHUP); - } - - if (t_thrd.page_redo_cxt.shutdown_requested) { - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("page worker id %u exit for request", g_redoWorker->id))); - - pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[g_redoWorker->id].threadState), - PAGE_REDO_WORKER_EXIT); - - proc_exit(1); - } -} - -static void InitReadBuf(uint32 bufIndex, XLogSegNo segno) -{ - if (bufIndex == MAX_ALLOC_SEGNUM) { - bufIndex = 0; - } - const uint32 sleepTime = 50; /* 50 us */ - RecordBufferAarray *nextreadseg = &g_recordbuffer->xlogsegarray[bufIndex]; - pg_memory_barrier(); - - uint32 bufState = pg_atomic_read_u32(&(nextreadseg->bufState)); - uint32 startreadworker = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); - while (bufState == APPLYING && startreadworker == WORKER_STATE_RUN) { - pg_usleep(sleepTime); - RedoInterruptCallBack(); - bufState = pg_atomic_read_u32(&(nextreadseg->bufState)); - startreadworker = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); - } - - nextreadseg->readlen = 0; - nextreadseg->segno = segno; - nextreadseg->segoffset = 0; - pg_atomic_write_u32(&(nextreadseg->bufState), APPLYING); - pg_atomic_write_u32(&(g_recordbuffer->readindex), bufIndex); -} - -static void XLogReadWorkRun() -{ - static uint32 waitcount = 0; - const uint32 sleepTime = 100; /* 50 us */ - XLogSegNo targetSegNo; - uint32 writeoffset; - uint32 reqlen; - - uint32 readindex = pg_atomic_read_u32(&(g_recordbuffer->readindex)); - Assert(readindex < MAX_ALLOC_SEGNUM); - pg_memory_barrier(); - RecordBufferAarray *readseg = &g_recordbuffer->xlogsegarray[readindex]; - - XLogRecPtr receivedUpto = GetWalStartPtr(); - XLByteToSeg(receivedUpto, targetSegNo); - - if (targetSegNo < readseg->segno) { - pg_usleep(sleepTime); - return; - } - - writeoffset = readseg->readlen; - if (targetSegNo != readseg->segno) { - reqlen = XLOG_SEG_SIZE - writeoffset; - } else { - uint32 targetPageOff = receivedUpto % XLOG_SEG_SIZE; - if (targetPageOff <= writeoffset) { - pg_usleep(sleepTime); - return; - } - reqlen = targetPageOff - writeoffset; - if (reqlen < XLOG_BLCKSZ) { - waitcount++; - uint32 flag = pg_atomic_read_u32(&g_readManagerTriggerFlag); - if (waitcount < MAX_WAIT_TIMS && flag == TRIGGER_NORMAL) { - pg_usleep(sleepTime); - return; - } - } - } - - waitcount = 0; - char *readBuf = readseg->readsegbuf + writeoffset; - XLogRecPtr targetSartPtr = readseg->segno * XLOG_SEG_SIZE + writeoffset; - uint32 readlen = 0; - bool result = XLogReadFromWriteBuffer(targetSartPtr, reqlen, readBuf, &readlen); - if (!result) { - return; - } - - pg_atomic_write_u32(&(readseg->readlen), (writeoffset + readlen)); - if (readseg->readlen == XLOG_SEG_SIZE) { - InitReadBuf(readindex + 1, readseg->segno + 1); - } -} - -void XLogReadManagerResponseSignal(uint32 tgigger) -{ - switch (tgigger) { - case TRIGGER_PRIMARY: - break; - case TRIGGER_FAILOVER: - if (t_thrd.xlog_cxt.is_cascade_standby) { - SendPostmasterSignal(PMSIGNAL_UPDATE_PROMOTING); - t_thrd.xlog_cxt.is_cascade_standby = false; - if (t_thrd.postmaster_cxt.HaShmData->is_cross_region) { - t_thrd.xlog_cxt.is_hadr_main_standby = true; - SpinLockAcquire(&t_thrd.postmaster_cxt.HaShmData->mutex); - t_thrd.postmaster_cxt.HaShmData->is_hadr_main_standby = true; - SpinLockRelease(&t_thrd.postmaster_cxt.HaShmData->mutex); - } - t_thrd.xlog_cxt.failover_triggered = false; - SendNotifySignal(NOTIFY_STANDBY, g_instance.pid_cxt.StartupPID); - SendPostmasterSignal(PMSIGNAL_UPDATE_NORMAL); - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("failover standby ready, notify postmaster to change state."))); - break; - } - t_thrd.xlog_cxt.failover_triggered = true; - SendPostmasterSignal(PMSIGNAL_UPDATE_PROMOTING); - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("failover ready, notify postmaster to change state."))); - break; - case TRIGGER_SWITCHOVER: - t_thrd.xlog_cxt.switchover_triggered = true; - SendPostmasterSignal(PMSIGNAL_UPDATE_PROMOTING); - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("switchover ready, notify postmaster to change state."))); - break; - default: - break; - } -} - -void XLogReadManagerProcInterrupt() -{ - if (t_thrd.page_redo_cxt.shutdown_requested) { - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("page worker id %u exit for request", g_redoWorker->id))); - - pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[g_redoWorker->id].threadState), - PAGE_REDO_WORKER_EXIT); - - proc_exit(1); - } - - if (t_thrd.page_redo_cxt.got_SIGHUP) { - t_thrd.page_redo_cxt.got_SIGHUP = false; - ProcessConfigFile(PGC_SIGHUP); - } -} - -void WaitPageReadWorkerExit() -{ - uint32 state; - do { - state = pg_atomic_read_u32(&extreme_rto::g_dispatcher->rtoXlogBufState.readPageWorkerState); - RedoInterruptCallBack(); - } while (state != WORKER_STATE_EXIT); -} - -static void HandleExtremeRtoCascadeStandbyPromote(uint32 trigger) -{ - if (!t_thrd.xlog_cxt.is_cascade_standby || t_thrd.xlog_cxt.server_mode != STANDBY_MODE || - !IS_DN_MULTI_STANDYS_MODE()) { - return; - } - - ShutdownWalRcv(); - pg_atomic_write_u32(&g_dispatcher->rtoXlogBufState.waitRedoDone, 1); - WakeupRecovery(); - XLogReadManagerResponseSignal(trigger); - pg_atomic_write_u32(&(extreme_rto::g_startupTriggerState), TRIGGER_NORMAL); -} - -bool XLogReadManagerCheckSignal() -{ - uint32 trigger = pg_atomic_read_u32(&(extreme_rto::g_startupTriggerState)); - load_server_mode(); - if (g_dispatcher->smartShutdown || trigger == TRIGGER_PRIMARY || trigger == TRIGGER_SWITCHOVER || - (trigger == TRIGGER_FAILOVER && t_thrd.xlog_cxt.server_mode == STANDBY_MODE) || - t_thrd.xlog_cxt.server_mode == PRIMARY_MODE) { - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("XLogReadManagerCheckSignal: smartShutdown:%u, trigger:%u, server_mode:%u", - g_dispatcher->smartShutdown, trigger, t_thrd.xlog_cxt.server_mode))); - if (t_thrd.xlog_cxt.is_cascade_standby && t_thrd.xlog_cxt.server_mode == STANDBY_MODE && - IS_DN_MULTI_STANDYS_MODE() && (trigger == TRIGGER_SWITCHOVER || trigger == TRIGGER_FAILOVER)) { - HandleExtremeRtoCascadeStandbyPromote(trigger); - return false; - } - ShutdownWalRcv(); - if (g_dispatcher->smartShutdown) { - pg_atomic_write_u32(&g_readManagerTriggerFlag, TRIGGER_SMARTSHUTDOWN); - } else { - pg_atomic_write_u32(&g_readManagerTriggerFlag, trigger); - } - WakeupRecovery(); - WaitPageReadWorkerExit(); - XLogReadManagerResponseSignal(trigger); - return true; - } - return false; -} - -void StartRequestXLogFromStream() -{ - volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; - XLogRecPtr expectLsn = pg_atomic_read_u64(&g_dispatcher->rtoXlogBufState.expectLsn); - if (walrcv->receivedUpto == InvalidXLogRecPtr || - (expectLsn != InvalidXLogRecPtr && XLByteLE(walrcv->receivedUpto, expectLsn))) { - uint32 readWorkerstate = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); - if (readWorkerstate == WORKER_STATE_RUN) { - pg_atomic_write_u32(&(g_recordbuffer->readWorkerState), WORKER_STATE_STOPPING); - } - SpinLockAcquire(&walrcv->mutex); - walrcv->receivedUpto = 0; - SpinLockRelease(&walrcv->mutex); - XLogRecPtr targetRecPtr = pg_atomic_read_u64(&g_dispatcher->rtoXlogBufState.targetRecPtr); - CheckMaxPageFlushLSN(targetRecPtr); - - uint32 shiftSize = 32; - if (IS_DISASTER_RECOVER_MODE) { - ereport(LOG, (errmsg("request xlog stream from obs at %X/%X.", (uint32)(targetRecPtr >> shiftSize), - (uint32)targetRecPtr))); - RequestXLogStreaming(&targetRecPtr, 0, REPCONNTARGET_OBS, 0); - } else if (IS_SHARED_STORAGE_STANBY_MODE && !IS_SHARED_STORAGE_MAIN_STANDBY_MODE) { -#ifndef ENABLE_MULTIPLE_NODES - rename_recovery_conf_for_roach(); -#endif - ereport(LOG, (errmsg("request xlog stream from shared storage at %X/%X.", - (uint32)(targetRecPtr >> shiftSize), - (uint32)targetRecPtr))); - RequestXLogStreaming(&targetRecPtr, 0, REPCONNTARGET_SHARED_STORAGE, 0); - } else { -#ifndef ENABLE_MULTIPLE_NODES - rename_recovery_conf_for_roach(); -#endif - ereport(LOG, (errmsg("request xlog stream at %X/%X.", (uint32)(targetRecPtr >> shiftSize), - (uint32)targetRecPtr))); - RequestXLogStreaming(&targetRecPtr, t_thrd.xlog_cxt.PrimaryConnInfo, REPCONNTARGET_PRIMARY, - u_sess->attr.attr_storage.PrimarySlotName); - } - } -} - - -void XLogReadManagerMain() -{ - const long sleepShortTime = 100000L; - const long sleepLongTime = 1000000L; - g_recordbuffer = &g_dispatcher->rtoXlogBufState; - uint32 xlogReadManagerState = READ_MANAGER_RUN; - - (void)RegisterRedoInterruptCallBack(XLogReadManagerProcInterrupt); - - while (xlogReadManagerState == READ_MANAGER_RUN) { - RedoInterruptCallBack(); - bool exitStatus = XLogReadManagerCheckSignal(); - if (exitStatus) { - break; - } - xlogReadManagerState = pg_atomic_read_u32(&g_dispatcher->rtoXlogBufState.xlogReadManagerState); - ADD_ABNORMAL_POSITION(7); - if (t_thrd.xlog_cxt.server_mode == STANDBY_MODE) { - uint32 readSource = pg_atomic_read_u32(&g_dispatcher->rtoXlogBufState.readSource); - uint32 failSource = pg_atomic_read_u32(&g_dispatcher->rtoXlogBufState.failSource); - if (readSource & XLOG_FROM_STREAM) { - uint32 disableConnectionNode = - pg_atomic_read_u32(&g_instance.comm_cxt.localinfo_cxt.need_disable_connection_node); - bool retryConnect = ((!disableConnectionNode) || (IS_SHARED_STORAGE_MODE && disableConnectionNode && - !knl_g_get_redo_finish_status() && - !pg_atomic_read_u32(&t_thrd.walreceiverfuncs_cxt.WalRcv->rcvDoneFromShareStorage))); - if (!WalRcvInProgress() && g_instance.pid_cxt.WalReceiverPID == 0 && retryConnect) { - StartRequestXLogFromStream(); - } else { - if (disableConnectionNode) { - if (IS_SHARED_STORAGE_MODE && WalRcvIsRunning()) { - ShutdownWalRcv(); - } - - if (!WalRcvInProgress() && !knl_g_get_redo_finish_status()) { - pg_atomic_write_u32(&g_dispatcher->rtoXlogBufState.waitRedoDone, 1); - WakeupRecovery(); - pg_usleep(sleepLongTime); - } else if (knl_g_get_redo_finish_status()) { - pg_atomic_write_u32(&g_instance.comm_cxt.localinfo_cxt.need_disable_connection_node, false); - pg_usleep(sleepLongTime); - } - - } - } - } - - if (failSource & XLOG_FROM_STREAM) { - ShutdownWalRcv(); - pg_atomic_write_u32(&(extreme_rto::g_dispatcher->rtoXlogBufState.failSource), 0); - } - } - pg_usleep(sleepShortTime); - } -} - -static void ReadWorkerStopCallBack(int code, Datum arg) -{ - pg_atomic_write_u32(&(g_recordbuffer->readWorkerState), WORKER_STATE_EXIT); -} - -void XLogReadWorkerMain() -{ - uint32 startreadworker; - const uint32 sleepTime = 50; /* 50 us */ - - on_shmem_exit(ReadWorkerStopCallBack, 0); - (void)RegisterRedoInterruptCallBack(HandleReadWorkerRunInterrupts); - - g_recordbuffer = &g_dispatcher->rtoXlogBufState; - startreadworker = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); - while (startreadworker != WORKER_STATE_EXITING) { - if (startreadworker == WORKER_STATE_RUN) { - XLogReadWorkRun(); - } else { - pg_usleep(sleepTime); - } - - RedoInterruptCallBack(); - startreadworker = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); - if (startreadworker == WORKER_STATE_STOPPING) { - pg_atomic_write_u32(&(g_recordbuffer->readWorkerState), WORKER_STATE_STOP); - } - ADD_ABNORMAL_POSITION(6); - }; - /* notify manger to exit */ - pg_atomic_write_u32(&(g_recordbuffer->readWorkerState), WORKER_STATE_EXIT); -} - -int RedoMainLoop() -{ - g_redoWorker->oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.predo_cxt.parallelRedoCtx); - - instr_time startTime; - instr_time endTime; - - INSTR_TIME_SET_CURRENT(startTime); - switch (g_redoWorker->role) { - case REDO_BATCH: - BatchRedoMain(); - break; - case REDO_PAGE_MNG: - RedoPageManagerMain(); - break; - case REDO_PAGE_WORKER: - RedoPageWorkerMain(); - break; - case REDO_TRXN_MNG: - TrxnManagerMain(); - break; - case REDO_TRXN_WORKER: - TrxnWorkMain(); - break; - case REDO_READ_WORKER: - XLogReadWorkerMain(); - break; - case REDO_READ_PAGE_WORKER: - XLogReadPageWorkerMain(); - break; - case REDO_READ_MNG: - XLogReadManagerMain(); - break; - default: - break; - } - - INSTR_TIME_SET_CURRENT(endTime); - INSTR_TIME_SUBTRACT(endTime, startTime); - - /* - * We need to get the exit code here before we allow the dispatcher - * to proceed and change the exit code. - */ - int exitCode = GetDispatcherExitCode(); - g_redoWorker->xlogInvalidPages = XLogGetInvalidPages(); -#ifndef ENABLE_MULTIPLE_NODES - g_redoWorker->committingCsnList = XLogReleaseAdnGetCommittingCsnList(); -#endif - - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("worker[%d]: exitcode = %d, total elapsed = %ld", g_redoWorker->id, exitCode, - INSTR_TIME_GET_MICROSEC(endTime)))); - - (void)MemoryContextSwitchTo(g_redoWorker->oldCtx); - - return exitCode; -} - -void ParallelRedoThreadRegister() -{ - bool isWorkerStarting = false; - SpinLockAcquire(&(g_instance.comm_cxt.predo_cxt.rwlock)); - isWorkerStarting = ((g_instance.comm_cxt.predo_cxt.state == REDO_STARTING_BEGIN) ? true : false); - if (isWorkerStarting) { - pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[g_redoWorker->id].threadState), - PAGE_REDO_WORKER_READY); - } - SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.rwlock)); - if (!isWorkerStarting) { - ereport(LOG, (errmsg("ParallelRedoThreadRegister Page-redo-worker %u exit.", (uint32)isWorkerStarting))); - SetPageWorkStateByThreadId(PAGE_REDO_WORKER_EXIT); - proc_exit(0); - } -} - -void WaitStateNormal() -{ - do { - RedoInterruptCallBack(); - } while (g_instance.comm_cxt.predo_cxt.state < REDO_IN_PROGRESS); -} - -/* Run from the worker thread. */ -void ParallelRedoThreadMain() -{ - ParallelRedoThreadRegister(); - ereport(LOG, (errmsg("Page-redo-worker thread %u started, role:%u, slotId:%u.", g_redoWorker->id, - g_redoWorker->role, g_redoWorker->slotId))); - // regitster default interrupt call back - (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); - SetupSignalHandlers(); - InitGlobals(); - - ResourceManagerStartup(); - WaitStateNormal(); - EnableSyncRequestForwarding(); - - int retCode = RedoMainLoop(); - ResourceManagerStop(); - ereport(LOG, (errmsg("Page-redo-worker thread %u terminated, role:%u, slotId:%u, retcode %u.", g_redoWorker->id, - g_redoWorker->role, g_redoWorker->slotId, retCode))); - LastMarkReached(); - - pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[g_redoWorker->id].threadState), - PAGE_REDO_WORKER_EXIT); - proc_exit(0); -} - -static void PageRedoShutdownHandler(SIGNAL_ARGS) -{ - t_thrd.page_redo_cxt.shutdown_requested = 1; -} - -static void PageRedoQuickDie(SIGNAL_ARGS) -{ - int status = 2; - gs_signal_setmask(&t_thrd.libpq_cxt.BlockSig, NULL); - on_exit_reset(); - exit(status); -} - -static void PageRedoUser2Handler(SIGNAL_ARGS) -{ - t_thrd.page_redo_cxt.sleep_long = 1; -} - -/* Run from the worker thread. */ -static void SetupSignalHandlers() -{ - (void)gspqsignal(SIGHUP, SigHupHandler); - (void)gspqsignal(SIGINT, SIG_IGN); - (void)gspqsignal(SIGTERM, PageRedoShutdownHandler); - (void)gspqsignal(SIGQUIT, PageRedoQuickDie); - (void)gspqsignal(SIGALRM, SIG_IGN); - (void)gspqsignal(SIGPIPE, SIG_IGN); - (void)gspqsignal(SIGUSR1, SIG_IGN); - (void)gspqsignal(SIGUSR2, PageRedoUser2Handler); - (void)gspqsignal(SIGCHLD, SIG_IGN); - (void)gspqsignal(SIGTTIN, SIG_IGN); - (void)gspqsignal(SIGTTOU, SIG_IGN); - (void)gspqsignal(SIGCONT, SIG_IGN); - (void)gspqsignal(SIGWINCH, SIG_IGN); - - gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL); - (void)gs_signal_unblock_sigusr2(); -} - -/* Run from the worker thread. */ -static void SigHupHandler(SIGNAL_ARGS) -{ - t_thrd.page_redo_cxt.got_SIGHUP = true; -} - -/* Run from the worker thread. */ -static void InitGlobals() -{ - t_thrd.utils_cxt.CurrentResourceOwner = ResourceOwnerCreate(NULL, "ExtremeRtoParallelRedoThread", - THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE)); - - t_thrd.xlog_cxt.server_mode = g_redoWorker->initialServerMode; - t_thrd.xlog_cxt.ThisTimeLineID = g_redoWorker->initialTimeLineID; - t_thrd.xlog_cxt.expectedTLIs = g_redoWorker->expectedTLIs; - /* apply recoveryinfo will change standbystate see UpdateRecordGlobals */ - t_thrd.xlog_cxt.standbyState = g_redoWorker->standbyState; - t_thrd.xlog_cxt.StandbyMode = g_redoWorker->StandbyMode; - t_thrd.xlog_cxt.InRecovery = true; - t_thrd.xlog_cxt.startup_processing = true; - t_thrd.proc_cxt.DataDir = g_redoWorker->DataDir; - u_sess->utils_cxt.RecentXmin = g_redoWorker->RecentXmin; - g_redoWorker->proc = t_thrd.proc; - t_thrd.storage_cxt.latestObservedXid = g_redoWorker->latestObservedXid; - t_thrd.xlog_cxt.recoveryTargetTLI = g_redoWorker->recoveryTargetTLI; - t_thrd.xlog_cxt.recoveryRestoreCommand= g_redoWorker->recoveryRestoreCommand; - t_thrd.xlog_cxt.ArchiveRecoveryRequested = g_redoWorker->ArchiveRecoveryRequested; - t_thrd.xlog_cxt.StandbyModeRequested = g_redoWorker->StandbyModeRequested; - t_thrd.xlog_cxt.InArchiveRecovery = g_redoWorker->InArchiveRecovery; - t_thrd.xlog_cxt.InRecovery = g_redoWorker->InRecovery; - t_thrd.xlog_cxt.ArchiveRestoreRequested = g_redoWorker->ArchiveRestoreRequested; - t_thrd.xlog_cxt.minRecoveryPoint = g_redoWorker->minRecoveryPoint; -} - -void WaitRedoWorkersQueueEmpty() -{ - bool queueIsEmpty = false; - while (!queueIsEmpty) { - queueIsEmpty = true; - for (uint32 i = 0; i < g_dispatcher->allWorkersCnt; i++) { - PageRedoWorker *worker = g_dispatcher->allWorkers[i]; - if (worker->role == REDO_TRXN_WORKER || worker->role == REDO_PAGE_WORKER) { - if (!RedoWorkerIsIdle(worker)) { - queueIsEmpty = false; - break; - } - } - } - RedoInterruptCallBack(); - } -} - -void RedoThrdWaitForExit(const PageRedoWorker *wk) -{ - uint32 sd = wk->slotId; - switch (wk->role) { - case REDO_BATCH: - SendPageRedoEndMark(g_dispatcher->pageLines[sd].managerThd); - WaitPageRedoWorkerReachLastMark(g_dispatcher->pageLines[sd].managerThd); - break; - case REDO_PAGE_MNG: - DispatchEndMarkToRedoWorkerAndWait(); - break; - case REDO_PAGE_WORKER: - break; /* Don't need to wait for anyone */ - case REDO_TRXN_MNG: - SendPageRedoEndMark(g_dispatcher->trxnLine.redoThd); - WaitRedoWorkersQueueEmpty(); - WaitPageRedoWorkerReachLastMark(g_dispatcher->trxnLine.redoThd); - break; - case REDO_TRXN_WORKER: - break; /* Don't need to wait for anyone */ - default: - break; - } -} - -/* Run from the worker thread. */ -static void ApplySinglePageRecord(RedoItem *item) -{ - XLogReaderState *record = &item->record; - bool bOld = item->oldVersion; - - MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); - ApplyRedoRecord(record, bOld); - (void)MemoryContextSwitchTo(oldCtx); -} - -/* Run from the worker thread. */ -static void LastMarkReached() -{ - PosixSemaphorePost(&g_redoWorker->phaseMarker); -} - -/* Run from the dispatcher thread. */ -void WaitPageRedoWorkerReachLastMark(PageRedoWorker *worker) -{ - PosixSemaphoreWait(&worker->phaseMarker); -} - -/* Run from the dispatcher thread. */ -void AddPageRedoItem(PageRedoWorker *worker, void *item) -{ - SPSCBlockingQueuePut(worker->queue, item); -} - -/* Run from the dispatcher thread. */ -bool SendPageRedoEndMark(PageRedoWorker *worker) -{ - return SPSCBlockingQueuePut(worker->queue, &g_redoEndMark); -} - -/* Run from the dispatcher thread. */ -bool SendPageRedoWorkerTerminateMark(PageRedoWorker *worker) -{ - return SPSCBlockingQueuePut(worker->queue, &g_terminateMark); -} - -/* Run from the txn worker thread. */ -void UpdatePageRedoWorkerStandbyState(PageRedoWorker *worker, HotStandbyState newState) -{ - /* - * Here we only save the new state into the worker struct. - * The actual update of the worker thread's state occurs inside - * the apply loop. - */ - worker->standbyState = newState; -} - -/* Run from the txn worker thread. */ -XLogRecPtr GetCompletedRecPtr(PageRedoWorker *worker) -{ - return pg_atomic_read_u64(&worker->lastReplayedEndRecPtr); -} - -void SetWorkerRestartPoint(PageRedoWorker *worker, XLogRecPtr restartPoint) -{ - pg_atomic_write_u64((uint64 *)&worker->lastCheckedRestartPoint, restartPoint); -} - -/* Run from the dispatcher thread. */ -void *GetXLogInvalidPages(PageRedoWorker *worker) -{ - return worker->xlogInvalidPages; -} - -bool RedoWorkerIsIdle(PageRedoWorker *worker) -{ - return SPSCBlockingQueueIsEmpty(worker->queue); -} - -void DumpPageRedoWorker(PageRedoWorker *worker) -{ - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("[REDO_LOG_TRACE]RedoWorker common info: id %u, tid %lu, " - "lastCheckedRestartPoint %lu, lastReplayedEndRecPtr %lu standbyState %u", - worker->id, worker->tid.thid, worker->lastCheckedRestartPoint, worker->lastReplayedEndRecPtr, - (uint32)worker->standbyState))); - DumpQueue(worker->queue); -} - -// TEST_LOG -void DumpItem(RedoItem *item, const char *funcName) -{ - return; - if (item == &g_redoEndMark || item == &g_terminateMark) { - return; - } - ereport(DEBUG4, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("[REDO_LOG_TRACE]DiagLogRedoRecord: %s, ReadRecPtr:%lu,EndRecPtr:%lu," - "oldVersion:%u," - "imcheckpoint:%u, shareCount:%d," - "designatedWorker:%u, recordXTime:%lu, refCount:%u, replayed:%d," - "syncXLogReceiptSource:%d, RecentXmin:%lu, syncServerMode:%u", - funcName, item->record.ReadRecPtr, item->record.EndRecPtr, item->oldVersion, - item->needImmediateCheckpoint, item->shareCount, item->designatedWorker, item->recordXTime, - item->refCount, item->replayed, - - item->syncXLogReceiptSource, item->RecentXmin, item->syncServerMode))); - DiagLogRedoRecord(&(item->record), funcName); -} - -void DumpExtremeRtoReadBuf() -{ - if (g_dispatcher == NULL) { - return; - } - - ereport(LOG, - (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("DumpExtremeRtoReadBuf: startworker %u, readindex %u, applyindex %u, readSource %u, failSource %u", - g_dispatcher->rtoXlogBufState.readWorkerState, g_dispatcher->rtoXlogBufState.readindex, - g_dispatcher->rtoXlogBufState.applyindex, g_dispatcher->rtoXlogBufState.readSource, - g_dispatcher->rtoXlogBufState.failSource))); - - for (uint32 i = 0; i < MAX_ALLOC_SEGNUM; ++i) { - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("DumpExtremeRtoReadBuf: buf %u, state %u, readlen %u, segno %lu, segoffset %lu", i, - g_dispatcher->rtoXlogBufState.xlogsegarray[i].bufState, - g_dispatcher->rtoXlogBufState.xlogsegarray[i].readlen, - g_dispatcher->rtoXlogBufState.xlogsegarray[i].segno, - g_dispatcher->rtoXlogBufState.xlogsegarray[i].segoffset))); - } -} - -bool XactHasSegpageRelFiles(XLogReaderState *record) -{ - int nrels = 0; - ColFileNodeRel *xnodes = NULL; - - if (XLogRecGetRmid(record) != RM_XACT_ID) { - return false; - } - - XactGetRelFiles(record, &xnodes, &nrels); - - for (int32 idx = 0; idx < nrels; idx++) { - ColFileNode colFileNode; - ColFileNodeRel *colFileNodeRel = xnodes + idx; - - ColFileNodeCopy(&colFileNode, colFileNodeRel); - - if (!IsValidColForkNum(colFileNode.forknum) && IsSegmentFileNode(colFileNode.filenode)) { - return true; - } - } - - return false; -} - -} // namespace extreme_rto +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * page_redo.cpp + * PageRedoWorker is a thread of execution that replays data page logs. + * It provides a synchronization mechanism for replaying logs touching + * multiple pages. + * + * In the current implementation, logs modifying the same page must + * always be replayed by the same worker. There is no mechanism for + * an idle worker to "steal" work from a busy worker. + * + * IDENTIFICATION + * src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp + * + * ------------------------------------------------------------------------- + */ + +#include +#include + +#include "postgres.h" +#include "knl/knl_variable.h" +#include "gs_thread.h" +#include "miscadmin.h" +#include "access/xact.h" +#include "access/xlog.h" +#include "access/xlog_internal.h" +#include "access/xlogutils.h" +#include "access/xlogproc.h" +#include "access/nbtree.h" +#include "catalog/storage_xlog.h" +#include "gssignal/gs_signal.h" +#include "libpq/pqsignal.h" +#include "postmaster/postmaster.h" +#include "storage/ipc.h" +#include "storage/freespace.h" +#include "storage/smgr/smgr.h" +#include "storage/standby.h" +#include "storage/pmsignal.h" +#include "utils/guc.h" +#include "utils/palloc.h" +#include "portability/instr_time.h" +#include "postmaster/startup.h" +#include "postmaster/pagerepair.h" +#include "catalog/storage.h" +#include +#include +#include "commands/dbcommands.h" +#include "commands/tablespace.h" +#include "access/extreme_rto/page_redo.h" +#include "access/extreme_rto/dispatcher.h" +#include "access/extreme_rto/txn_redo.h" +#include "pgstat.h" +#include "access/extreme_rto/batch_redo.h" +#include "access/multi_redo_api.h" +#include "replication/walreceiver.h" +#include "replication/datareceiver.h" +#include "pgxc/barrier.h" +#ifdef ENABLE_MOT +#include "storage/mot/mot_fdw.h" +#endif + +#ifdef EXTREME_RTO_DEBUG +#include +#include + +#include + +#include + +#endif + +#ifdef ENABLE_UT +#include "utils/utesteventutil.h" +#define STATIC +#else +#define STATIC static +#endif + +namespace extreme_rto { +static const int MAX_PARSE_BUFF_NUM = PAGE_WORK_QUEUE_SIZE * 10 * 3; +static const int MAX_LOCAL_BUFF_NUM = PAGE_WORK_QUEUE_SIZE * 10 * 3; + +static const char *const PROCESS_TYPE_CMD_ARG = "--forkpageredo"; +static char g_AUXILIARY_TYPE_CMD_ARG[16] = {0}; + +THR_LOCAL PageRedoWorker *g_redoWorker = NULL; +THR_LOCAL RecordBufferState *g_recordbuffer = NULL; +RedoItem g_redoEndMark = { false, false, NULL, 0, NULL, 0 }; +RedoItem g_terminateMark = { false, false, NULL, 0, NULL, 0 }; +RedoItem g_GlobalLsnForwarder; +RedoItem g_cleanupMark; +RedoItem g_closefdMark; +RedoItem g_cleanInvalidPageMark; + +static const int PAGE_REDO_WORKER_ARG = 3; +static const int REDO_SLEEP_50US = 50; +static const int REDO_SLEEP_100US = 100; + +static void ApplySinglePageRecord(RedoItem *); +static void InitGlobals(); +static void LastMarkReached(); +static void SetupSignalHandlers(); +static void SigHupHandler(SIGNAL_ARGS); +static ThreadId StartWorkerThread(PageRedoWorker *); + +void RedoThrdWaitForExit(const PageRedoWorker *wk); +void AddRefRecord(void *rec); +void SubRefRecord(void *rec); +void GlobalLsnUpdate(); +static void TrxnMangerQueueCallBack(); +#ifdef USE_ASSERT_CHECKING +void RecordBlockCheck(void *rec, XLogRecPtr curPageLsn, uint32 blockId, bool replayed); +#endif +void AddRecordReadBlocks(void *rec, uint32 readblocks); + +RefOperate recordRefOperate = { + AddRefRecord, + SubRefRecord, +#ifdef USE_ASSERT_CHECKING + RecordBlockCheck, +#endif + AddRecordReadBlocks, +}; + +void UpdateRecordGlobals(RedoItem *item, HotStandbyState standbyState) +{ + t_thrd.xlog_cxt.ReadRecPtr = item->record.ReadRecPtr; + t_thrd.xlog_cxt.EndRecPtr = item->record.EndRecPtr; + t_thrd.xlog_cxt.expectedTLIs = item->expectedTLIs; + /* apply recoveryinfo will change standbystate see UpdateRecordGlobals */ + t_thrd.xlog_cxt.standbyState = standbyState; + t_thrd.xlog_cxt.XLogReceiptTime = item->syncXLogReceiptTime; + t_thrd.xlog_cxt.XLogReceiptSource = item->syncXLogReceiptSource; + u_sess->utils_cxt.RecentXmin = item->RecentXmin; + t_thrd.xlog_cxt.server_mode = item->syncServerMode; +} + +/* Run from the dispatcher thread. */ +PageRedoWorker *StartPageRedoWorker(PageRedoWorker *worker) +{ + Assert(worker); + uint32 id = worker->id; + ThreadId threadId = StartWorkerThread(worker); + if (threadId == 0) { + ereport(WARNING, (errmsg("Cannot create page-redo-worker thread: %u, %m.", id))); + DestroyPageRedoWorker(worker); + return NULL; + } else { + ereport(LOG, (errmsg("StartPageRedoWorker successfully create page-redo-worker id: %u, threadId:%lu.", id, + worker->tid.thid))); + } + g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[id].threadId = threadId; + SpinLockAcquire(&(g_instance.comm_cxt.predo_cxt.rwlock)); + uint32 state = pg_atomic_read_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[id].threadState)); + if (state != PAGE_REDO_WORKER_READY) { + g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[id].threadState = PAGE_REDO_WORKER_START; + } + SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.rwlock)); + return worker; +} + +void RedoWorkerQueueCallBack() +{ + RedoInterruptCallBack(); +} + +bool RedoWorkerIsUndoSpaceWorker() +{ + return g_redoWorker->isUndoSpaceWorker; +} + +/* Run from the dispatcher thread. */ +PageRedoWorker *CreateWorker(uint32 id) +{ + PageRedoWorker *tmp = (PageRedoWorker *)palloc0(sizeof(PageRedoWorker) + EXTREME_RTO_ALIGN_LEN); + PageRedoWorker *worker; + worker = (PageRedoWorker *)TYPEALIGN(EXTREME_RTO_ALIGN_LEN, tmp); + worker->selfOrinAddr = tmp; + worker->id = id; + worker->index = 0; + worker->tid.thid = InvalidTid; + worker->proc = NULL; + worker->initialServerMode = (ServerMode)t_thrd.xlog_cxt.server_mode; + worker->initialTimeLineID = t_thrd.xlog_cxt.ThisTimeLineID; + worker->expectedTLIs = t_thrd.xlog_cxt.expectedTLIs; + worker->recoveryTargetTLI = t_thrd.xlog_cxt.recoveryTargetTLI; + worker->recoveryRestoreCommand = t_thrd.xlog_cxt.recoveryRestoreCommand; + worker->ArchiveRecoveryRequested = t_thrd.xlog_cxt.ArchiveRecoveryRequested; + worker->StandbyModeRequested = t_thrd.xlog_cxt.StandbyModeRequested; + worker->InArchiveRecovery = t_thrd.xlog_cxt.InArchiveRecovery; + worker->InRecovery = t_thrd.xlog_cxt.InRecovery; + worker->ArchiveRestoreRequested = t_thrd.xlog_cxt.ArchiveRestoreRequested; + worker->minRecoveryPoint = t_thrd.xlog_cxt.minRecoveryPoint; + + worker->pendingHead = NULL; + worker->pendingTail = NULL; + worker->queue = SPSCBlockingQueueCreate(PAGE_WORK_QUEUE_SIZE, RedoWorkerQueueCallBack); + worker->lastCheckedRestartPoint = InvalidXLogRecPtr; + worker->lastReplayedEndRecPtr = InvalidXLogRecPtr; + worker->standbyState = (HotStandbyState)t_thrd.xlog_cxt.standbyState; + worker->StandbyMode = t_thrd.xlog_cxt.StandbyMode; + worker->latestObservedXid = t_thrd.storage_cxt.latestObservedXid; + worker->DataDir = t_thrd.proc_cxt.DataDir; + worker->RecentXmin = u_sess->utils_cxt.RecentXmin; + worker->xlogInvalidPages = NULL; + PosixSemaphoreInit(&worker->phaseMarker, 0); + worker->oldCtx = NULL; + worker->fullSyncFlag = 0; +#if (!defined __x86_64__) && (!defined __aarch64__) + SpinLockInit(&worker->ptrLck); +#endif + worker->parseManager.memctl.isInit = false; + worker->parseManager.parsebuffers = NULL; + worker->remoteReadPageNum = 0; + worker->badPageHashTbl = BadBlockHashTblCreate(); + return worker; +} + +/* Run from the dispatcher thread. */ +static ThreadId StartWorkerThread(PageRedoWorker *worker) +{ + worker->tid.thid = initialize_util_thread(PAGEREDO, worker); + return worker->tid.thid; +} + +/* Run from the dispatcher thread. */ +void DestroyPageRedoWorker(PageRedoWorker *worker) +{ + PosixSemaphoreDestroy(&worker->phaseMarker); + SPSCBlockingQueueDestroy(worker->queue); + XLogRedoBufferDestoryFunc(&(worker->bufferManager)); + XLogParseBufferDestoryFunc(&(worker->parseManager)); + pfree(worker->selfOrinAddr); +} + +/* automic write for lastReplayedReadRecPtr and lastReplayedEndRecPtr */ +void SetCompletedReadEndPtr(PageRedoWorker *worker, XLogRecPtr readPtr, XLogRecPtr endPtr) +{ + volatile PageRedoWorker *tmpWk = worker; +#if defined(__x86_64__) || defined(__aarch64__) + uint128_u exchange; + uint128_u current; + uint128_u compare = atomic_compare_and_swap_u128((uint128_u *)&tmpWk->lastReplayedReadRecPtr); + + Assert(sizeof(tmpWk->lastReplayedReadRecPtr) == 8); + Assert(sizeof(tmpWk->lastReplayedEndRecPtr) == 8); + + exchange.u64[0] = (uint64)readPtr; + exchange.u64[1] = (uint64)endPtr; + +loop: + current = atomic_compare_and_swap_u128((uint128_u *)&tmpWk->lastReplayedReadRecPtr, compare, exchange); + if (!UINT128_IS_EQUAL(compare, current)) { + UINT128_COPY(compare, current); + goto loop; + } +#else + SpinLockAcquire(&tmpWk->ptrLck); + tmpWk->lastReplayedReadRecPtr = readPtr; + tmpWk->lastReplayedEndRecPtr = endPtr; + SpinLockRelease(&tmpWk->ptrLck); +#endif /* __x86_64__ || __aarch64__ */ +} + +/* automic write for lastReplayedReadRecPtr and lastReplayedEndRecPtr */ +void GetCompletedReadEndPtr(PageRedoWorker *worker, XLogRecPtr *readPtr, XLogRecPtr *endPtr) +{ + volatile PageRedoWorker *tmpWk = worker; +#if defined(__x86_64__) || defined(__aarch64__) + uint128_u compare = atomic_compare_and_swap_u128((uint128_u *)&tmpWk->lastReplayedReadRecPtr); + Assert(sizeof(tmpWk->lastReplayedReadRecPtr) == 8); + Assert(sizeof(tmpWk->lastReplayedEndRecPtr) == 8); + + *readPtr = (XLogRecPtr)compare.u64[0]; + *endPtr = (XLogRecPtr)compare.u64[1]; +#else + SpinLockAcquire(&tmpWk->ptrLck); + *readPtr = tmpWk->lastReplayedReadRecPtr; + *endPtr = tmpWk->lastReplayedEndRecPtr; + SpinLockRelease(&tmpWk->ptrLck); +#endif /* __x86_64__ || __aarch64__ */ +} + +/* Run from both the dispatcher and the worker thread. */ +bool IsPageRedoWorkerProcess(int argc, char *argv[]) +{ + return strcmp(argv[1], PROCESS_TYPE_CMD_ARG) == 0; +} + +/* Run from the worker thread. */ +void AdaptArgvForPageRedoWorker(char *argv[]) +{ + if (g_AUXILIARY_TYPE_CMD_ARG[0] == 0) + sprintf_s(g_AUXILIARY_TYPE_CMD_ARG, sizeof(g_AUXILIARY_TYPE_CMD_ARG), "-x%d", PageRedoProcess); + argv[3] = g_AUXILIARY_TYPE_CMD_ARG; +} + +/* Run from the worker thread. */ +void GetThreadNameIfPageRedoWorker(int argc, char *argv[], char **threadNamePtr) +{ + if (*threadNamePtr == NULL && IsPageRedoWorkerProcess(argc, argv)) + *threadNamePtr = "PageRedoWorker"; +} + +/* Run from the worker thread. */ +uint32 GetMyPageRedoWorkerIdWithLock() +{ + bool isWorkerStarting = false; + SpinLockAcquire(&(g_instance.comm_cxt.predo_cxt.rwlock)); + isWorkerStarting = ((g_instance.comm_cxt.predo_cxt.state == REDO_STARTING_BEGIN) ? true : false); + SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.rwlock)); + if (!isWorkerStarting) { + ereport(WARNING, (errmsg("GetMyPageRedoWorkerIdWithLock Page-redo-worker exit."))); + proc_exit(0); + } + + return g_redoWorker->id; +} + +/* Run from any worker thread. */ +PGPROC *GetPageRedoWorkerProc(PageRedoWorker *worker) +{ + return worker->proc; +} + +void HandlePageRedoPageRepair(RepairBlockKey key, XLogPhyBlock pblk) +{ + RecordBadBlockAndPushToRemote(g_redoWorker->curRedoBlockState, CRC_CHECK_FAIL, InvalidXLogRecPtr, pblk); +} + +void HandlePageRedoInterrupts() +{ + if (t_thrd.page_redo_cxt.got_SIGHUP) { + t_thrd.page_redo_cxt.got_SIGHUP = false; + ProcessConfigFile(PGC_SIGHUP); + } + + if (t_thrd.page_redo_cxt.check_repair) { + SeqCheckRemoteReadAndRepairPage(); + t_thrd.page_redo_cxt.check_repair = false; + } + + if (t_thrd.page_redo_cxt.shutdown_requested) { + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("page worker id %u exit for request", g_redoWorker->id))); + + pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[g_redoWorker->id].threadState), + PAGE_REDO_WORKER_EXIT); + + proc_exit(1); + } +} + +void ReferenceRedoItem(void *item) +{ + RedoItem *redoItem = (RedoItem *)item; + AddRefRecord(&redoItem->record); +} + +void DereferenceRedoItem(void *item) +{ + RedoItem *redoItem = (RedoItem *)item; + SubRefRecord(&redoItem->record); +} + +#define STRUCT_CONTAINER(type, membername, ptr) ((type *)((char *)(ptr)-offsetof(type, membername))) + +#ifdef USE_ASSERT_CHECKING +void RecordBlockCheck(void *rec, XLogRecPtr curPageLsn, uint32 blockId, bool replayed) +{ + XLogReaderState *record = (XLogReaderState *)rec; + if (record->blocks[blockId].forknum != MAIN_FORKNUM) { + return; + } + + if (replayed) { + uint32 rmid = XLogRecGetRmid(record); + uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; + + if (curPageLsn == InvalidXLogRecPtr && (rmid == RM_HEAP2_ID || rmid == RM_HEAP_ID || rmid == RM_HEAP3_ID)) { + uint32 shiftSize = 32; + ereport(LOG, (errmsg("pass checked, record lsn:%X/%X, type: %u %u", + static_cast(record->EndRecPtr >> shiftSize), static_cast(record->EndRecPtr), + record->decoded_record->xl_rmid, record->decoded_record->xl_info))); + } else if (!(rmid == RM_HEAP2_ID && info == XLOG_HEAP2_VISIBLE) && + !(rmid == RM_HEAP_ID && info == XLOG_HEAP_NEWPAGE)) { + Assert(XLByteLE(record->EndRecPtr, curPageLsn)); + } + } + + Assert(blockId < (XLR_MAX_BLOCK_ID + 1)); + record->blocks[blockId].replayed = 1; +} + +#endif + +void AddRecordReadBlocks(void *rec, uint32 readblocks) +{ + XLogReaderState *record = (XLogReaderState *)rec; + record->readblocks += readblocks; +} + +void AddRefRecord(void *rec) +{ + pg_memory_barrier(); +#ifndef EXTREME_RTO_DEBUG + (void)pg_atomic_fetch_add_u32(&((XLogReaderState *)rec)->refcount, 1); +#else + uint32 relCount = pg_atomic_fetch_add_u32(&((XLogReaderState *)rec)->refcount, 1); + + const int stack_size = 5; + const int max_out_put_buf = 4096; + void *buffer[stack_size]; + int nptrs; + char output[max_out_put_buf]; + char **strings; + nptrs = backtrace(buffer, stack_size); + strings = backtrace_symbols(buffer, nptrs); + + int ret = sprintf_s(output, sizeof(output), "before add relcount %u lsn %X/%X call back trace: \n", relCount, + (uint32)(((XLogReaderState *)rec)->EndRecPtr >> 32), + (uint32)(((XLogReaderState *)rec)->EndRecPtr)); + securec_check_ss_c(ret, "\0", "\0"); + for (int i = 0; i < nptrs; ++i) { + ret = strcat_s(output, max_out_put_buf - strlen(output), strings[i]); + securec_check_ss_c(ret, "\0", "\0"); + ret = strcat_s(output, max_out_put_buf - strlen(output), "\n"); + securec_check_ss_c(ret, "\0", "\0"); + } + + free(strings); + ereport(LOG, (errcode(ERRCODE_DATA_CORRUPTED), errmsg(" AddRefRecord print: %s", output))); + +#endif +} + +void SubRefRecord(void *rec) +{ + pg_memory_barrier(); + Assert(((XLogReaderState *)rec)->refcount != 0); + uint32 relCount = pg_atomic_sub_fetch_u32(&((XLogReaderState *)rec)->refcount, 1); +#ifdef EXTREME_RTO_DEBUG + const int stack_size = 5; + const int max_out_put_buf = 4096; + void *buffer[stack_size]; + int nptrs; + char output[max_out_put_buf]; + char **strings; + nptrs = backtrace(buffer, stack_size); + strings = backtrace_symbols(buffer, nptrs); + + int ret = sprintf_s(output, sizeof(output), "after sub relcount %u lsn %X/%X call back trace:\n", relCount, + (uint32)(((XLogReaderState *)rec)->EndRecPtr >> 32), + (uint32)(((XLogReaderState *)rec)->EndRecPtr)); + securec_check_ss_c(ret, "\0", "\0"); + for (int i = 0; i < nptrs; ++i) { + ret = strcat_s(output, max_out_put_buf - strlen(output), strings[i]); + securec_check_ss_c(ret, "\0", "\0"); + ret = strcat_s(output, max_out_put_buf - strlen(output), "\n"); + securec_check_ss_c(ret, "\0", "\0"); + } + free(strings); + ereport(LOG, (errcode(ERRCODE_DATA_CORRUPTED), errmsg(" SubRefRecord print: %s", output))); + +#endif + + if (relCount == 0) { + RedoItem *item = STRUCT_CONTAINER(RedoItem, record, rec); + FreeRedoItem(item); + } +} + +bool BatchRedoParseItemAndDispatch(RedoItem *item) +{ + uint32 blockNum = 0; + XLogRecParseState *recordblockstate = XLogParseToBlockForExtermeRTO(&item->record, &blockNum); + if (recordblockstate == NULL) { + if (blockNum == 0) { + return false; + } + return true; /* out of mem */ + } + + PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; + AddPageRedoItem(myRedoLine->managerThd, recordblockstate); + return false; +} + +void BatchRedoDistributeEndMark(void) +{ + PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; + SendPageRedoEndMark(myRedoLine->managerThd); +} + +void BatchRedoProcLsnForwarder(RedoItem *lsnForwarder) +{ + SetCompletedReadEndPtr(g_redoWorker, lsnForwarder->record.ReadRecPtr, lsnForwarder->record.EndRecPtr); + (void)pg_atomic_sub_fetch_u32(&lsnForwarder->record.refcount, 1); + + PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; + AddPageRedoItem(myRedoLine->managerThd, lsnForwarder); +} + +void BatchRedoProcCleanupMark(RedoItem *cleanupMark) +{ + PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; + g_redoWorker->xlogInvalidPages = XLogGetInvalidPages(); + AddPageRedoItem(myRedoLine->managerThd, cleanupMark); + ereport(LOG, (errcode(ERRCODE_LOG), errmsg("[ForceFinish]BatchRedoProcCleanupMark has cleaned InvalidPages"))); +} + +#ifdef ENABLE_DISTRIBUTE_TEST +// inject delay to slow the process and also can be used as UT mock stub +void InjectDelayWaitRedoPageManagerQueueEmpty() +{ + const uint32 sleepTime = 1000000; + ereport(LOG, (errmsg("ProcessRedoPageManagerQueueEmpty sleep"))); + pg_usleep(sleepTime); +} +#endif + +void WaitAllRedoWorkerQueueEmpty() +{ + if ((get_real_recovery_parallelism() > 1) && (GetBatchCount() > 0)) { + PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; + const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; + + for (uint32 i = 0; i < WorkerNumPerMng; ++i) { + while (!SPSCBlockingQueueIsEmpty(myRedoLine->redoThd[i]->queue)) { + RedoInterruptCallBack(); + } + } + } +} + +bool BatchRedoDistributeItems(void **eleArry, uint32 eleNum) +{ + bool parsecomplete = false; + for (uint32 i = 0; i < eleNum; i++) { + if (eleArry[i] == (void *)&g_redoEndMark) { + return true; + } else if (eleArry[i] == (void *)&g_GlobalLsnForwarder) { + BatchRedoProcLsnForwarder((RedoItem *)eleArry[i]); + } else if (eleArry[i] == (void *)&g_cleanupMark) { + BatchRedoProcCleanupMark((RedoItem *)eleArry[i]); + } else if (eleArry[i] == (void *)&g_closefdMark) { + smgrcloseall(); + } else if (eleArry[i] == (void *)&g_cleanInvalidPageMark) { + forget_range_invalid_pages((void *)eleArry[i]); + } else { + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); + RedoItem *item = (RedoItem *)eleArry[i]; + UpdateRecordGlobals(item, g_redoWorker->standbyState); + CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_3], + g_redoWorker->timeCostList[TIME_COST_STEP_6]); + do { + parsecomplete = BatchRedoParseItemAndDispatch(item); + RedoInterruptCallBack(); + } while (parsecomplete); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + DereferenceRedoItem(item); + } + } + + return false; +} + +void BatchRedoMain() +{ + void **eleArry; + uint32 eleNum; + + (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); + XLogParseBufferInitFunc(&(g_redoWorker->parseManager), MAX_PARSE_BUFF_NUM, &recordRefOperate, + RedoInterruptCallBack); + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_1]); + while (SPSCBlockingQueueGetAll(g_redoWorker->queue, &eleArry, &eleNum)) { + CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_1], g_redoWorker->timeCostList[TIME_COST_STEP_2]); + bool isEnd = BatchRedoDistributeItems(eleArry, eleNum); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_2]); + SPSCBlockingQueuePopN(g_redoWorker->queue, eleNum); + if (isEnd) + break; + + RedoInterruptCallBack(); + ADD_ABNORMAL_POSITION(1); + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_1]); + } + + RedoThrdWaitForExit(g_redoWorker); + XLogParseBufferDestoryFunc(&(g_redoWorker->parseManager)); +} + +uint32 GetWorkerId(const RedoItemTag *redoItemTag, uint32 workerCount) +{ + if (workerCount != 0) { + return tag_hash(redoItemTag, sizeof(RedoItemTag)) % workerCount; + } + return 0; +} + +uint32 GetWorkerId(const uint32 attId, const uint32 workerCount) +{ + if (workerCount != 0) { + return attId % workerCount; + } + return 0; +} + +void RedoPageManagerDistributeToAllOneBlock(XLogRecParseState *ddlParseState) +{ + PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; + const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; + + ddlParseState->nextrecord = NULL; + + for (uint32 i = 0; i < WorkerNumPerMng; ++i) { + XLogRecParseState *newState = XLogParseBufferCopy(ddlParseState); + AddPageRedoItem(myRedoLine->redoThd[i], newState); + } +} + +void RedoPageManagerDistributeBlockRecord(HTAB *redoItemHash, XLogRecParseState *parsestate) +{ + PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; + const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; + HASH_SEQ_STATUS status; + RedoItemHashEntry *redoItemEntry = NULL; + HTAB *curMap = redoItemHash; + hash_seq_init(&status, curMap); + + while ((redoItemEntry = (RedoItemHashEntry *)hash_seq_search(&status)) != NULL) { + uint32 workId = GetWorkerId(&redoItemEntry->redoItemTag, WorkerNumPerMng); + AddPageRedoItem(myRedoLine->redoThd[workId], redoItemEntry->head); + + if (hash_search(curMap, (void *)&redoItemEntry->redoItemTag, HASH_REMOVE, NULL) == NULL) + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("hash table corrupted"))); + } + + if (parsestate != NULL) { + RedoPageManagerDistributeToAllOneBlock(parsestate); + } +} + +void WaitCurrentPipeLineRedoWorkersQueueEmpty() +{ + PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; + const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; + + for (uint32 i = 0; i < WorkerNumPerMng; ++i) { + while (!SPSCBlockingQueueIsEmpty(myRedoLine->redoThd[i]->queue)) { + RedoInterruptCallBack(); + } + } +} + +void DispatchEndMarkToRedoWorkerAndWait() +{ + PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; + const uint32 WorkerNumPerMng = get_page_redo_worker_num_per_manager(); + for (uint32 i = 0; i < WorkerNumPerMng; ++i) + SendPageRedoEndMark(myRedoLine->redoThd[i]); + + for (uint32 i = 0; i < myRedoLine->redoThdNum; i++) { + WaitPageRedoWorkerReachLastMark(myRedoLine->redoThd[i]); + } +} + +void RedoPageManagerDdlAction(XLogRecParseState *parsestate) +{ + switch (parsestate->blockparse.blockhead.block_valid) { + case BLOCK_DATA_DROP_DATABASE_TYPE: + xlog_db_drop(parsestate->blockparse.blockhead.end_ptr, parsestate->blockparse.blockhead.dbNode, + parsestate->blockparse.blockhead.spcNode); + break; + case BLOCK_DATA_CREATE_DATABASE_TYPE: + xlog_db_create(parsestate->blockparse.blockhead.dbNode, parsestate->blockparse.blockhead.spcNode, + parsestate->blockparse.extra_rec.blockdatabase.src_db_id, + parsestate->blockparse.extra_rec.blockdatabase.src_tablespace_id); + break; + case BLOCK_DATA_DROP_TBLSPC_TYPE: + xlog_drop_tblspc(parsestate->blockparse.blockhead.spcNode); + break; + case BLOCK_DATA_SEG_FILE_EXTEND_TYPE: + { + Assert(0); + } + break; + case BLOCK_DATA_SEG_SPACE_DROP: + case BLOCK_DATA_SEG_FULL_SYNC_TYPE: + case BLOCK_DATA_SEG_EXTEND: + ProcSegPageCommonRedo(parsestate); + break; + default: + break; + } +} + +void RedoPageManagerSmgrClose(XLogRecParseState *parsestate) +{ + switch (parsestate->blockparse.blockhead.block_valid) { + case BLOCK_DATA_DROP_DATABASE_TYPE: + smgrcloseall(); + break; + case BLOCK_DATA_SEG_FULL_SYNC_TYPE: + ProcSegPageJustFreeChildState(parsestate); + break; + default: + break; + } +} + +void RedoPageManagerSyncDdlAction(XLogRecParseState *parsestate) +{ + /* at this monent, all worker queue is empty ,just find out which one will do it */ + uint32 expected = 0; + const uint32 pipelineNum = g_dispatcher->pageLineNum; + pg_atomic_compare_exchange_u32(&g_dispatcher->syncEnterCount, &expected, pipelineNum); + uint32 entershareCount = pg_atomic_sub_fetch_u32(&g_dispatcher->syncEnterCount, 1); + + MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); + if (entershareCount == 0) { + /* do actual work */ + RedoPageManagerDdlAction(parsestate); + } else { + RedoPageManagerSmgrClose(parsestate); + do { + RedoInterruptCallBack(); + entershareCount = pg_atomic_read_u32(&g_dispatcher->syncEnterCount); + } while (entershareCount != 0); + } + (void)MemoryContextSwitchTo(oldCtx); + + expected = 0; + pg_atomic_compare_exchange_u32(&g_dispatcher->syncExitCount, &expected, pipelineNum); + uint32 exitShareCount = pg_atomic_sub_fetch_u32(&g_dispatcher->syncExitCount, 1); + while (exitShareCount != 0) { + RedoInterruptCallBack(); + exitShareCount = pg_atomic_read_u32(&g_dispatcher->syncExitCount); + } + + parsestate->nextrecord = NULL; + XLogBlockParseStateRelease(parsestate); +} + +void RedoPageManagerDoDropAction(XLogRecParseState *parsestate, HTAB *hashMap) +{ + XLogRecParseState *newState = XLogParseBufferCopy(parsestate); + PRTrackClearBlock(newState, hashMap); + RedoPageManagerDistributeBlockRecord(hashMap, parsestate); + WaitCurrentPipeLineRedoWorkersQueueEmpty(); + RedoPageManagerSyncDdlAction(parsestate); +} + +void RedoPageManagerDoSmgrAction(XLogRecParseState *recordblockstate) +{ + RedoBufferInfo bufferinfo = {0}; + void *blockrecbody; + XLogBlockHead *blockhead; + + blockhead = &recordblockstate->blockparse.blockhead; + blockrecbody = &recordblockstate->blockparse.extra_rec; + + XLogBlockInitRedoBlockInfo(blockhead, &bufferinfo.blockinfo); + + MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); + XLogBlockDdlDoSmgrAction(blockhead, blockrecbody, &bufferinfo); + (void)MemoryContextSwitchTo(oldCtx); + + recordblockstate->nextrecord = NULL; + XLogBlockParseStateRelease(recordblockstate); +} + +void RedoPageManagerDoDataTypeAction(XLogRecParseState *parsestate, HTAB *hashMap) +{ + XLogBlockDdlParse *ddlrecparse = NULL; + XLogBlockParseGetDdlParse(parsestate, ddlrecparse); + + if (ddlrecparse->blockddltype == BLOCK_DDL_DROP_RELNODE || + ddlrecparse->blockddltype == BLOCK_DDL_TRUNCATE_RELNODE) { + XLogRecParseState *newState = XLogParseBufferCopy(parsestate); + PRTrackClearBlock(newState, hashMap); + RedoPageManagerDistributeBlockRecord(hashMap, parsestate); + WaitCurrentPipeLineRedoWorkersQueueEmpty(); + } + + RedoPageManagerDoSmgrAction(parsestate); + +} + +void PageManagerProcLsnForwarder(RedoItem *lsnForwarder) +{ + SetCompletedReadEndPtr(g_redoWorker, lsnForwarder->record.ReadRecPtr, lsnForwarder->record.EndRecPtr); + (void)pg_atomic_sub_fetch_u32(&lsnForwarder->record.refcount, 1); + + PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; + const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; + + for (uint32 i = 0; i < WorkerNumPerMng; ++i) { + AddPageRedoItem(myRedoLine->redoThd[i], lsnForwarder); + } +} + +void PageManagerDistributeBcmBlock(XLogRecParseState *preState) +{ + PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; + const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; + uint32 workId = GetWorkerId((uint32)preState->blockparse.blockhead.forknum, WorkerNumPerMng); + AddPageRedoItem(myRedoLine->redoThd[workId], preState); +} + +void PageManagerProcCleanupMark(RedoItem *cleanupMark) +{ + PageRedoPipeline *myRedoLine = &g_dispatcher->pageLines[g_redoWorker->slotId]; + const uint32 WorkerNumPerMng = myRedoLine->redoThdNum; + g_redoWorker->xlogInvalidPages = XLogGetInvalidPages(); + for (uint32 i = 0; i < WorkerNumPerMng; ++i) { + AddPageRedoItem(myRedoLine->redoThd[i], cleanupMark); + } + ereport(LOG, (errcode(ERRCODE_LOG), errmsg("[ForceFinish]PageManagerProcCleanupMark has cleaned InvalidPages"))); +} + +void PageManagerProcCheckPoint(HTAB *hashMap, XLogRecParseState *parseState) +{ + Assert(IsCheckPoint(parseState)); + RedoPageManagerDistributeBlockRecord(hashMap, parseState); + bool needWait = parseState->isFullSync; + if (needWait) { + pg_atomic_write_u32(&g_redoWorker->fullSyncFlag, 1); + } + + XLogBlockParseStateRelease(parseState); + uint32 val = pg_atomic_read_u32(&g_redoWorker->fullSyncFlag); + while (val != 0) { + RedoInterruptCallBack(); + val = pg_atomic_read_u32(&g_redoWorker->fullSyncFlag); + } + +#ifdef USE_ASSERT_CHECKING + int printLevel = WARNING; +#else + int printLevel = DEBUG1; +#endif + if (log_min_messages <= printLevel) { + GetThreadBufferLeakNum(); + } +} + +void PageManagerProcCreateTableSpace(HTAB *hashMap, XLogRecParseState *parseState) +{ + RedoPageManagerDistributeBlockRecord(hashMap, NULL); + bool needWait = parseState->isFullSync; + if (needWait) { + pg_atomic_write_u32(&g_redoWorker->fullSyncFlag, 1); + } + + XLogBlockParseStateRelease(parseState); + uint32 val = pg_atomic_read_u32(&g_redoWorker->fullSyncFlag); + while (val != 0) { + RedoInterruptCallBack(); + val = pg_atomic_read_u32(&g_redoWorker->fullSyncFlag); + } +} + +void PageManagerProcSegFullSyncState(HTAB *hashMap, XLogRecParseState *parseState) +{ + RedoPageManagerDistributeBlockRecord(hashMap, NULL); + WaitCurrentPipeLineRedoWorkersQueueEmpty(); + RedoPageManagerSyncDdlAction(parseState); +} + +void PageManagerProcSegPipeLineSyncState(HTAB *hashMap, XLogRecParseState *parseState) +{ + RedoPageManagerDistributeBlockRecord(hashMap, NULL); + WaitCurrentPipeLineRedoWorkersQueueEmpty(); + MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); + + RedoPageManagerDdlAction(parseState); + + (void)MemoryContextSwitchTo(oldCtx); + XLogBlockParseStateRelease(parseState); +} + +static void WaitNextBarrier(XLogRecParseState *parseState) +{ + char* barrier = parseState->blockparse.extra_rec.blockbarrier.maindata; + uint8 info = parseState->blockparse.blockhead.xl_info & ~XLR_INFO_MASK; + volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + + if (info == XLOG_BARRIER_COMMIT || !IS_DISASTER_RECOVER_MODE || !is_barrier_pausable(barrier)) + return; + + while (true) { + SpinLockAcquire(&walrcv->mutex); + if (BARRIER_LT((char *)barrier, (char *)walrcv->recoveryTargetBarrierId) || + BARRIER_LE((char *)barrier, (char *)walrcv->recoveryStopBarrierId)|| + BARRIER_EQ((char *)barrier, (char *)walrcv->recoverySwitchoverBarrierId)) { + SpinLockRelease(&walrcv->mutex); + break; + } else { + SpinLockRelease(&walrcv->mutex); + pg_usleep(1000L); + RedoInterruptCallBack(); + } + } +} + +void PageManagerRedoParseState(XLogRecParseState *preState) +{ + HTAB *hashMap = g_dispatcher->pageLines[g_redoWorker->slotId].managerThd->redoItemHash; + + switch (preState->blockparse.blockhead.block_valid) { + case BLOCK_DATA_MAIN_DATA_TYPE: + case BLOCK_DATA_UNDO_TYPE: + case BLOCK_DATA_VM_TYPE: + case BLOCK_DATA_FSM_TYPE: + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); + PRTrackAddBlock(preState, hashMap); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); + break; + case BLOCK_DATA_DDL_TYPE: + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_4]); + RedoPageManagerDoDataTypeAction(preState, hashMap); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_4]); + break; + case BLOCK_DATA_SEG_EXTEND: + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_4]); + PageManagerProcSegPipeLineSyncState(hashMap, preState); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_4]); + break; + case BLOCK_DATA_DROP_DATABASE_TYPE: + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_5]); + RedoPageManagerDoDropAction(preState, hashMap); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_5]); + break; + case BLOCK_DATA_DROP_TBLSPC_TYPE: + /* just make sure any other ddl before drop tblspc is done */ + XLogBlockParseStateRelease(preState); + break; + case BLOCK_DATA_CREATE_DATABASE_TYPE: + case BLOCK_DATA_SEG_FILE_EXTEND_TYPE: + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + RedoPageManagerDistributeBlockRecord(hashMap, NULL); + /* wait until queue empty */ + WaitCurrentPipeLineRedoWorkersQueueEmpty(); + /* do atcual action */ + RedoPageManagerSyncDdlAction(preState); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + break; + case BLOCK_DATA_SEG_FULL_SYNC_TYPE: + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_8]); + PageManagerProcSegFullSyncState(hashMap, preState); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_8]); + break; + case BLOCK_DATA_CREATE_TBLSPC_TYPE: + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_7]); + PageManagerProcCreateTableSpace(hashMap, preState); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_7]); + break; + case BLOCK_DATA_XLOG_COMMON_TYPE: + PageManagerProcCheckPoint(hashMap, preState); + break; + case BLOCK_DATA_NEWCU_TYPE: + RedoPageManagerDistributeBlockRecord(hashMap, NULL); + PageManagerDistributeBcmBlock(preState); + break; + case BLOCK_DATA_SEG_SPACE_DROP: + case BLOCK_DATA_SEG_SPACE_SHRINK: + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_8]); + RedoPageManagerDistributeBlockRecord(hashMap, preState); + WaitCurrentPipeLineRedoWorkersQueueEmpty(); + RedoPageManagerSyncDdlAction(preState); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_8]); + break; + case BLOCK_DATA_BARRIER_TYPE: + RedoPageManagerDistributeBlockRecord(hashMap, preState); + WaitNextBarrier(preState); + XLogBlockParseStateRelease(preState); + break; + default: + XLogBlockParseStateRelease(preState); + break; + } +} + +bool PageManagerRedoDistributeItems(void **eleArry, uint32 eleNum) +{ + HTAB *hashMap = g_dispatcher->pageLines[g_redoWorker->slotId].managerThd->redoItemHash; + + for (uint32 i = 0; i < eleNum; i++) { + if (eleArry[i] == (void *)&g_redoEndMark) { + RedoPageManagerDistributeBlockRecord(hashMap, NULL); + return true; + } else if (eleArry[i] == (void *)&g_GlobalLsnForwarder) { + RedoPageManagerDistributeBlockRecord(hashMap, NULL); + PageManagerProcLsnForwarder((RedoItem *)eleArry[i]); + continue; + } else if (eleArry[i] == (void *)&g_cleanupMark) { + PageManagerProcCleanupMark((RedoItem *)eleArry[i]); + continue; + } else if (eleArry[i] == (void *)&g_closefdMark) { + smgrcloseall(); + continue; + } else if (eleArry[i] == (void *)&g_cleanInvalidPageMark) { + forget_range_invalid_pages((void *)eleArry[i]); + continue; + } + XLogRecParseState *recordblockstate = (XLogRecParseState *)eleArry[i]; + XLogRecParseState *nextState = recordblockstate; + do { + XLogRecParseState *preState = nextState; + nextState = (XLogRecParseState *)nextState->nextrecord; + preState->nextrecord = NULL; +#ifdef ENABLE_UT + TestXLogRecParseStateEventProbe(UTEST_EVENT_RTO_PAGEMGR_REDO_BEFORE_DISTRIBUTE_ITEMS, + __FUNCTION__, preState); +#endif + + PageManagerRedoParseState(preState); +#ifdef ENABLE_UT + TestXLogRecParseStateEventProbe(UTEST_EVENT_RTO_PAGEMGR_REDO_AFTER_DISTRIBUTE_ITEMS, + __FUNCTION__, preState); +#endif + } while (nextState != NULL); + } + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_9]); + RedoPageManagerDistributeBlockRecord(hashMap, NULL); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_9]); + return false; +} + +void RedoPageManagerMain() +{ + void **eleArry; + uint32 eleNum; + + (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); + g_redoWorker->redoItemHash = PRRedoItemHashInitialize(g_redoWorker->oldCtx); + XLogParseBufferInitFunc(&(g_redoWorker->parseManager), MAX_PARSE_BUFF_NUM, &recordRefOperate, + RedoInterruptCallBack); + + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_1]); + while (SPSCBlockingQueueGetAll(g_redoWorker->queue, &eleArry, &eleNum)) { + CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_1], g_redoWorker->timeCostList[TIME_COST_STEP_2]); + bool isEnd = PageManagerRedoDistributeItems(eleArry, eleNum); + SPSCBlockingQueuePopN(g_redoWorker->queue, eleNum); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_2]); + if (isEnd) + break; + + RedoInterruptCallBack(); + ADD_ABNORMAL_POSITION(5); + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_1]); + } + + RedoThrdWaitForExit(g_redoWorker); + XLogParseBufferDestoryFunc(&(g_redoWorker->parseManager)); +} + +bool IsXactXlog(const XLogReaderState *record) +{ + if (XLogRecGetRmid(record) != RM_XACT_ID) { + return false; + } + return true; +} + +void TrxnManagerProcLsnForwarder(RedoItem *lsnForwarder) +{ + SetCompletedReadEndPtr(g_redoWorker, lsnForwarder->record.ReadRecPtr, lsnForwarder->record.EndRecPtr); + (void)pg_atomic_sub_fetch_u32(&lsnForwarder->record.refcount, 1); + + AddPageRedoItem(g_dispatcher->trxnLine.redoThd, lsnForwarder); +} + +void TrxnManagerProcCleanupMark(RedoItem *cleanupMark) +{ + g_redoWorker->xlogInvalidPages = XLogGetInvalidPages(); + AddPageRedoItem(g_dispatcher->trxnLine.redoThd, cleanupMark); + ereport(LOG, (errcode(ERRCODE_LOG), errmsg("[ForceFinish]TrxnManagerProcCleanupMark has cleaned InvalidPages"))); +} + +bool TrxnManagerDistributeItemsBeforeEnd(RedoItem *item) +{ + bool exitFlag = false; + if (item == &g_redoEndMark) { + exitFlag = true; + } else if (item == (RedoItem *)&g_GlobalLsnForwarder) { + TrxnManagerProcLsnForwarder(item); + } else if (item == (RedoItem *)&g_cleanupMark) { + TrxnManagerProcCleanupMark(item); + } else if (item == (void *)&g_closefdMark) { + smgrcloseall(); + } else if (item == (void *)&g_cleanInvalidPageMark) { + forget_range_invalid_pages((void *)item); + } else { + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_4]); + if (IsCheckPoint(&item->record) || IsTableSpaceDrop(&item->record) || IsTableSpaceCreate(&item->record) || + (IsXactXlog(&item->record) && XactWillRemoveRelFiles(&item->record)) || IsBarrierRelated(&item->record)) { + uint32 relCount; + do { + RedoInterruptCallBack(); + relCount = pg_atomic_read_u32(&item->record.refcount); + } while (relCount != 1); + } + CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_4], g_redoWorker->timeCostList[TIME_COST_STEP_5]); +#ifdef ENABLE_UT + TestXLogReaderProbe(UTEST_EVENT_RTO_TRXNMGR_DISTRIBUTE_ITEMS, + __FUNCTION__, &item->record); +#endif + AddPageRedoItem(g_dispatcher->trxnLine.redoThd, item); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_5]); + } + return exitFlag; +} + +void GlobalLsnUpdate() +{ + t_thrd.xlog_cxt.standbyState = g_redoWorker->standbyState; + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + if (LsnUpdate()) { + ExtremRtoUpdateMinCheckpoint(); + CheckRecoveryConsistency(); + } + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); +} + +bool LsnUpdate() +{ + XLogRecPtr minStart = MAX_XLOG_REC_PTR; + XLogRecPtr minEnd = MAX_XLOG_REC_PTR; + GetReplayedRecPtr(&minStart, &minEnd); + if ((minEnd != MAX_XLOG_REC_PTR) && (minStart != MAX_XLOG_REC_PTR)) { + SetXLogReplayRecPtr(minStart, minEnd); + return true; + } + return false; +} + +static void TrxnMangerQueueCallBack() +{ + GlobalLsnUpdate(); + HandlePageRedoInterrupts(); +} + +void TrxnManagerMain() +{ + (void)RegisterRedoInterruptCallBack(TrxnMangerQueueCallBack); + t_thrd.xlog_cxt.max_page_flush_lsn = get_global_max_page_flush_lsn(); + ereport(LOG, + (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("TrxnManagerMain: first get_global_max_page_flush_lsn %08X/%08X", + (uint32)(t_thrd.xlog_cxt.max_page_flush_lsn >> 32), (uint32)(t_thrd.xlog_cxt.max_page_flush_lsn)))); + while (true) { + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); + if (FORCE_FINISH_ENABLED && t_thrd.xlog_cxt.max_page_flush_lsn == MAX_XLOG_REC_PTR) { + t_thrd.xlog_cxt.max_page_flush_lsn = get_global_max_page_flush_lsn(); + if (t_thrd.xlog_cxt.max_page_flush_lsn != MAX_XLOG_REC_PTR) { + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("TrxnManagerMain: second get_global_max_page_flush_lsn %08X/%08X", + (uint32)(t_thrd.xlog_cxt.max_page_flush_lsn >> 32), + (uint32)(t_thrd.xlog_cxt.max_page_flush_lsn)))); + } + } + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); + if (!SPSCBlockingQueueIsEmpty(g_redoWorker->queue)) { + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_1]); + RedoItem *item = (RedoItem *)SPSCBlockingQueueTop(g_redoWorker->queue); + CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_1], + g_redoWorker->timeCostList[TIME_COST_STEP_2]); + bool isEnd = TrxnManagerDistributeItemsBeforeEnd(item); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_2]); + SPSCBlockingQueuePop(g_redoWorker->queue); + + if (isEnd) { + break; + } + } else { + long sleeptime = 80 * 1000; + pg_usleep(sleeptime); + } + + ADD_ABNORMAL_POSITION(2); + RedoInterruptCallBack(); + } + + RedoThrdWaitForExit(g_redoWorker); + GlobalLsnUpdate(); +} + +void TrxnWorkerProcLsnForwarder(RedoItem *lsnForwarder) +{ + SetCompletedReadEndPtr(g_redoWorker, lsnForwarder->record.ReadRecPtr, lsnForwarder->record.EndRecPtr); + (void)pg_atomic_sub_fetch_u32(&lsnForwarder->record.refcount, 1); +} + +void TrxnWorkNotifyRedoWorker() +{ + for (uint32 i = 0; i < g_dispatcher->allWorkersCnt; ++i) { + if (g_dispatcher->allWorkers[i]->role == REDO_PAGE_WORKER || + g_dispatcher->allWorkers[i]->role == REDO_PAGE_MNG) { + pg_atomic_write_u32(&(g_dispatcher->allWorkers[i]->fullSyncFlag), 0); + } + } +} + +void TrxnWorkrProcCleanupMark(RedoItem *cleanupMark) +{ + g_redoWorker->xlogInvalidPages = XLogGetInvalidPages(); + ereport(LOG, (errcode(ERRCODE_LOG), errmsg("[ForceFinish]TrxnWorkrProcCleanupMark has cleaned InvalidPages"))); +} + +bool CheckFullSyncCheckpoint(RedoItem *item) +{ + if (!IsCheckPoint(&(item->record))) { + return true; + } + + if (XLByteLE(item->record.ReadRecPtr, t_thrd.shemem_ptr_cxt.ControlFile->checkPoint)) { + return true; + } + + return false; +} + +void TrxnWorkMain() +{ +#ifdef ENABLE_MOT + MOTBeginRedoRecovery(); +#endif + (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); + if (ParseStateWithoutCache()) { + XLogRedoBufferInitFunc(&(g_redoWorker->bufferManager), MAX_LOCAL_BUFF_NUM, &recordRefOperate, + RedoInterruptCallBack); + } + + RedoItem *item = NULL; + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_1]); + while ((item = (RedoItem *)SPSCBlockingQueueTop(g_redoWorker->queue)) != &g_redoEndMark) { + CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_1], g_redoWorker->timeCostList[TIME_COST_STEP_2]); + if ((void *)item == (void *)&g_GlobalLsnForwarder) { + TrxnWorkerProcLsnForwarder((RedoItem *)item); + SPSCBlockingQueuePop(g_redoWorker->queue); + } else if ((void *)item == (void *)&g_cleanupMark) { + TrxnWorkrProcCleanupMark((RedoItem *)item); + SPSCBlockingQueuePop(g_redoWorker->queue); + } else if ((void *)item == (void *)&g_closefdMark) { + smgrcloseall(); + SPSCBlockingQueuePop(g_redoWorker->queue); + } else if ((void *)item == (void *)&g_cleanInvalidPageMark) { + forget_range_invalid_pages((void *)item); + SPSCBlockingQueuePop(g_redoWorker->queue); + } else { + t_thrd.xlog_cxt.needImmediateCkp = item->needImmediateCheckpoint; + bool fullSync = item->record.isFullSync; + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); + ApplySinglePageRecord(item); + CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_3], + g_redoWorker->timeCostList[TIME_COST_STEP_4]); + SPSCBlockingQueuePop(g_redoWorker->queue); + SetCompletedReadEndPtr(g_redoWorker, item->record.ReadRecPtr, item->record.EndRecPtr); + CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_4], + g_redoWorker->timeCostList[TIME_COST_STEP_5]); + if (fullSync) { + Assert(CheckFullSyncCheckpoint(item)); + TrxnWorkNotifyRedoWorker(); + } + + if (XactHasSegpageRelFiles(&item->record)) { + uint32 expected = 1; + pg_atomic_compare_exchange_u32((volatile uint32 *)&(g_dispatcher->segpageXactDoneFlag), &expected, 0); + } + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_5]); + DereferenceRedoItem(item); + RedoInterruptCallBack(); + } + ADD_ABNORMAL_POSITION(3); + CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_2], g_redoWorker->timeCostList[TIME_COST_STEP_1]); + } + + SPSCBlockingQueuePop(g_redoWorker->queue); + if (ParseStateWithoutCache()) + XLogRedoBufferDestoryFunc(&(g_redoWorker->bufferManager)); +#ifdef ENABLE_MOT + MOTEndRedoRecovery(); +#endif +} + +void RedoPageWorkerCheckPoint(const XLogRecParseState *redoblockstate) +{ + CheckPoint checkPoint; + Assert(IsCheckPoint(redoblockstate)); + XLogSynAllBuffer(); + Assert(redoblockstate->blockparse.extra_rec.blockxlogcommon.maindatalen >= sizeof(checkPoint)); + errno_t rc = memcpy_s(&checkPoint, sizeof(checkPoint), + redoblockstate->blockparse.extra_rec.blockxlogcommon.maindata, sizeof(checkPoint)); + securec_check(rc, "\0", "\0"); + if (IsRestartPointSafe(checkPoint.redo)) { + pg_atomic_write_u64(&g_redoWorker->lastCheckedRestartPoint, redoblockstate->blockparse.blockhead.start_ptr); + } + + UpdateTimeline(&checkPoint); + +#ifdef USE_ASSERT_CHECKING + int printLevel = WARNING; +#else + int printLevel = DEBUG1; +#endif + if (log_min_messages <= printLevel) { + GetThreadBufferLeakNum(); + } +} + +void PageWorkerProcLsnForwarder(RedoItem *lsnForwarder) +{ + SetCompletedReadEndPtr(g_redoWorker, lsnForwarder->record.ReadRecPtr, lsnForwarder->record.EndRecPtr); + (void)pg_atomic_sub_fetch_u32(&lsnForwarder->record.refcount, 1); +} + +bool XlogNeedUpdateFsm(XLogRecParseState *procState, RedoBufferInfo *bufferinfo) +{ + XLogBlockHead *blockhead = &procState->blockparse.blockhead; + if (bufferinfo->pageinfo.page == NULL || !(bufferinfo->dirtyflag) || blockhead->forknum != MAIN_FORKNUM || + XLogBlockHeadGetValidInfo(blockhead) != BLOCK_DATA_MAIN_DATA_TYPE || blockhead->bucketNode != InvalidBktId) { + return false; + } + + Size freespace = PageGetHeapFreeSpace(bufferinfo->pageinfo.page); + + RmgrId rmid = XLogBlockHeadGetRmid(blockhead); + if (rmid == RM_HEAP2_ID) { + uint8 info = XLogBlockHeadGetInfo(blockhead) & ~XLR_INFO_MASK; + if (info == XLOG_HEAP2_CLEAN) { + return true; + } else if ((info == XLOG_HEAP2_MULTI_INSERT) && (freespace < BLCKSZ / 5)) { + return true; + } + + } else if (rmid == RM_HEAP_ID) { + uint8 info = XLogBlockHeadGetInfo(blockhead) & ~XLR_INFO_MASK; + if ((info == XLOG_HEAP_INSERT || info == XLOG_HEAP_UPDATE) && (freespace < BLCKSZ / 5)) { + return true; + } + } + + return false; +} + +void RedoPageWorkerRedoBcmBlock(XLogRecParseState *procState) +{ + RmgrId rmid = XLogBlockHeadGetRmid(&procState->blockparse.blockhead); + if (rmid == RM_HEAP2_ID) { + RelFileNode node; + node.spcNode = procState->blockparse.blockhead.spcNode; + node.dbNode = procState->blockparse.blockhead.dbNode; + node.relNode = procState->blockparse.blockhead.relNode; + node.bucketNode = procState->blockparse.blockhead.bucketNode; + XLogBlockNewCuParse *newCuParse = &(procState->blockparse.extra_rec.blocknewcu); + uint8 info = XLogBlockHeadGetInfo(&procState->blockparse.blockhead) & ~XLR_INFO_MASK; + switch (info & XLOG_HEAP_OPMASK) { + case XLOG_HEAP2_BCM: { + xl_heap_bcm *xlrec = (xl_heap_bcm *)(newCuParse->main_data); + heap_bcm_redo(xlrec, node, procState->blockparse.blockhead.end_ptr); + break; + } + case XLOG_HEAP2_LOGICAL_NEWPAGE: { + Assert(IsHeapFileNode(node)); + xl_heap_logical_newpage *xlrec = (xl_heap_logical_newpage *)(newCuParse->main_data); + char *cuData = newCuParse->main_data + SizeOfHeapLogicalNewPage; + heap_xlog_bcm_new_page(xlrec, node, cuData); + break; + } + default: + break; + } + } +} + +void RedoPageWorkerMain() +{ + (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); + + if (ParseStateWithoutCache()) { + XLogRedoBufferInitFunc(&(g_redoWorker->bufferManager), MAX_LOCAL_BUFF_NUM, &recordRefOperate, + RedoInterruptCallBack); + } + + XLogRecParseState *redoblockstateHead = NULL; + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_1]); + while ((redoblockstateHead = (XLogRecParseState *)SPSCBlockingQueueTop(g_redoWorker->queue)) != + (XLogRecParseState *)&g_redoEndMark) { + CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_1], g_redoWorker->timeCostList[TIME_COST_STEP_2]); + if ((void *)redoblockstateHead == (void *)&g_cleanupMark) { + g_redoWorker->xlogInvalidPages = XLogGetInvalidPages(); + SPSCBlockingQueuePop(g_redoWorker->queue); + ereport(LOG, (errcode(ERRCODE_LOG), errmsg("[ForceFinish]RedoPageWorkerMain has cleaned InvalidPages"))); + continue; + } + + if ((void *)redoblockstateHead == (void *)&g_closefdMark) { + smgrcloseall(); + SPSCBlockingQueuePop(g_redoWorker->queue); + continue; + } + + if ((void *)redoblockstateHead == (void *)&g_cleanInvalidPageMark) { + forget_range_invalid_pages((void *)redoblockstateHead); + SPSCBlockingQueuePop(g_redoWorker->queue); + continue; + } + if ((void *)redoblockstateHead == (void *)&g_GlobalLsnForwarder) { + PageWorkerProcLsnForwarder((RedoItem *)redoblockstateHead); + SPSCBlockingQueuePop(g_redoWorker->queue); + continue; + } + RedoBufferInfo bufferinfo = {0}; + bool notfound = false; + bool updateFsm = false; + + XLogRecParseState *procState = redoblockstateHead; + + MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); + while (procState != NULL) { + XLogRecParseState *redoblockstate = procState; + g_redoWorker->curRedoBlockState = (XLogBlockDataParse*)(&redoblockstate->blockparse.extra_rec); + procState = (XLogRecParseState *)procState->nextrecord; + + switch (XLogBlockHeadGetValidInfo(&redoblockstate->blockparse.blockhead)) { + case BLOCK_DATA_MAIN_DATA_TYPE: + case BLOCK_DATA_UNDO_TYPE: + case BLOCK_DATA_VM_TYPE: + case BLOCK_DATA_FSM_TYPE: + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); + notfound = XLogBlockRedoForExtremeRTO(redoblockstate, &bufferinfo, notfound, + g_redoWorker->timeCostList[TIME_COST_STEP_4], g_redoWorker->timeCostList[TIME_COST_STEP_5]); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); + break; + case BLOCK_DATA_XLOG_COMMON_TYPE: + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + RedoPageWorkerCheckPoint(redoblockstate); + SetCompletedReadEndPtr(g_redoWorker, redoblockstate->blockparse.blockhead.start_ptr, + redoblockstate->blockparse.blockhead.end_ptr); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + break; + case BLOCK_DATA_DDL_TYPE: + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + XLogForgetDDLRedo(redoblockstate); + SetCompletedReadEndPtr(g_redoWorker, redoblockstate->blockparse.blockhead.start_ptr, + redoblockstate->blockparse.blockhead.end_ptr); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + break; + case BLOCK_DATA_DROP_DATABASE_TYPE: + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + XLogDropDatabase(redoblockstate->blockparse.blockhead.dbNode); + SetCompletedReadEndPtr(g_redoWorker, redoblockstate->blockparse.blockhead.start_ptr, + redoblockstate->blockparse.blockhead.end_ptr); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + break; + case BLOCK_DATA_NEWCU_TYPE: + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + RedoPageWorkerRedoBcmBlock(redoblockstate); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + break; + case BLOCK_DATA_SEG_SPACE_DROP: + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + XLogDropSegmentSpace(redoblockstate->blockparse.blockhead.spcNode, + redoblockstate->blockparse.blockhead.dbNode); + SetCompletedReadEndPtr(g_redoWorker, redoblockstate->blockparse.blockhead.start_ptr, + redoblockstate->blockparse.blockhead.end_ptr); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + break; + case BLOCK_DATA_SEG_SPACE_SHRINK: + XLogDropSpaceShrink(redoblockstate); + SetCompletedReadEndPtr(g_redoWorker, redoblockstate->blockparse.blockhead.start_ptr, + redoblockstate->blockparse.blockhead.end_ptr); + break; + case BLOCK_DATA_BARRIER_TYPE: + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + SetCompletedReadEndPtr(g_redoWorker, redoblockstate->blockparse.blockhead.start_ptr, + redoblockstate->blockparse.blockhead.end_ptr); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + break; + default: + break; + } + } + (void)MemoryContextSwitchTo(oldCtx); + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_7]); + updateFsm = XlogNeedUpdateFsm(redoblockstateHead, &bufferinfo); + bool needWait = redoblockstateHead->isFullSync; + if (needWait) { + pg_atomic_write_u32(&g_redoWorker->fullSyncFlag, 1); + } + XLogBlockParseStateRelease(redoblockstateHead); + /* the same page */ + ExtremeRtoFlushBuffer(&bufferinfo, updateFsm); + SPSCBlockingQueuePop(g_redoWorker->queue); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_7]); + pg_memory_barrier(); + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_8]); + uint32 val = pg_atomic_read_u32(&g_redoWorker->fullSyncFlag); + while (val != 0) { + RedoInterruptCallBack(); + val = pg_atomic_read_u32(&g_redoWorker->fullSyncFlag); + } + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_8]); + RedoInterruptCallBack(); + CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_2], g_redoWorker->timeCostList[TIME_COST_STEP_1]); + ADD_ABNORMAL_POSITION(4); + } + + SPSCBlockingQueuePop(g_redoWorker->queue); + if (ParseStateWithoutCache()) + XLogRedoBufferDestoryFunc(&(g_redoWorker->bufferManager)); +} + +void PutRecordToReadQueue(XLogReaderState *recordreader) +{ + SPSCBlockingQueuePut(g_dispatcher->readLine.readPageThd->queue, recordreader); +} + +inline void InitXLogRecordReadBuffer(XLogReaderState **initreader) +{ + XLogReaderState *newxlogreader; + XLogReaderState *readstate = g_dispatcher->rtoXlogBufState.initreader; + newxlogreader = NewReaderState(readstate); + g_dispatcher->rtoXlogBufState.initreader = NULL; + PutRecordToReadQueue(readstate); + SetCompletedReadEndPtr(g_redoWorker, readstate->ReadRecPtr, readstate->EndRecPtr); + *initreader = newxlogreader; +} + +void StartupSendFowarder(RedoItem *item) +{ + for (uint32 i = 0; i < g_dispatcher->pageLineNum; ++i) { + AddPageRedoItem(g_dispatcher->pageLines[i].batchThd, item); + } + + AddPageRedoItem(g_dispatcher->trxnLine.managerThd, item); +} + +void SendLsnFowarder() +{ + // update and read in the same thread, so no need atomic operation + g_GlobalLsnForwarder.record.ReadRecPtr = g_redoWorker->lastReplayedReadRecPtr; + g_GlobalLsnForwarder.record.EndRecPtr = g_redoWorker->lastReplayedEndRecPtr; + g_GlobalLsnForwarder.record.refcount = get_real_recovery_parallelism() - XLOG_READER_NUM; + g_GlobalLsnForwarder.record.isDecode = true; + PutRecordToReadQueue(&g_GlobalLsnForwarder.record); +} + +static inline bool ReadPageWorkerStop() +{ + return g_dispatcher->recoveryStop; +} + +void PushToWorkerLsn(bool force) +{ + const uint32 max_record_count = PAGE_WORK_QUEUE_SIZE; + static uint32 cur_recor_count = 0; + + cur_recor_count++; + + if (!IsExtremeRtoRunning()) { + return; + } + + if (force) { + uint32 refCount; + do { + refCount = pg_atomic_read_u32(&g_GlobalLsnForwarder.record.refcount); + RedoInterruptCallBack(); + } while (refCount != 0 && !ReadPageWorkerStop()); + cur_recor_count = 0; + SendLsnFowarder(); + } else { + uint32 refCount = pg_atomic_read_u32(&g_GlobalLsnForwarder.record.refcount); + + if (refCount != 0 || cur_recor_count < max_record_count) { + return; + } + + SendLsnFowarder(); + cur_recor_count = 0; + } +} + +void ResetRtoXlogReadBuf(XLogRecPtr targetPagePtr) +{ + uint32 startreadworker = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); + if (startreadworker == WORKER_STATE_STOP) { + WalRcvCtlAcquireExitLock(); + WalRcvCtlBlock *walrcb = getCurrentWalRcvCtlBlock(); + + if (walrcb == NULL) { + WalRcvCtlReleaseExitLock(); + return; + } + + int64 walwriteoffset; + XLogRecPtr startptr; + SpinLockAcquire(&walrcb->mutex); + walwriteoffset = walrcb->walWriteOffset; + startptr = walrcb->walStart; + SpinLockRelease(&walrcb->mutex); + + if (XLByteLT(startptr, targetPagePtr)) { + WalRcvCtlReleaseExitLock(); + return; + } + + + for (uint32 i = 0; i < MAX_ALLOC_SEGNUM; ++i) { + pg_atomic_write_u32(&(g_recordbuffer->xlogsegarray[i].bufState), NONE); + } + + XLogSegNo segno; + XLByteToSeg(targetPagePtr, segno); + g_recordbuffer->xlogsegarray[g_recordbuffer->applyindex].segno = segno; + g_recordbuffer->xlogsegarray[g_recordbuffer->applyindex].readlen = targetPagePtr % XLOG_SEG_SIZE; + + pg_atomic_write_u32(&(g_recordbuffer->readindex), g_recordbuffer->applyindex); + pg_atomic_write_u32(&(g_recordbuffer->xlogsegarray[g_recordbuffer->readindex].bufState), APPLYING); + + pg_atomic_write_u32(&(g_recordbuffer->readWorkerState), WORKER_STATE_RUN); + WalRcvCtlReleaseExitLock(); + } +} + +RecordBufferAarray *GetCurrentSegmentBuf(XLogRecPtr targetPagePtr) +{ + Assert(g_recordbuffer->applyindex < MAX_ALLOC_SEGNUM); + uint32 applyindex = g_recordbuffer->applyindex; + RecordBufferAarray *cursegbuffer = &g_recordbuffer->xlogsegarray[applyindex]; + uint32 bufState = pg_atomic_read_u32(&(cursegbuffer->bufState)); + + if (bufState != APPLYING) { + return NULL; + } + uint32 targetPageOff = (targetPagePtr % XLOG_SEG_SIZE); + XLogSegNo targetSegNo; + XLByteToSeg(targetPagePtr, targetSegNo); + if (cursegbuffer->segno == targetSegNo) { + cursegbuffer->segoffset = targetPageOff; + return cursegbuffer; + } else if (cursegbuffer->segno + 1 == targetSegNo) { + Assert(targetPageOff == 0); + pg_atomic_write_u32(&(cursegbuffer->bufState), APPLIED); + if ((applyindex + 1) == MAX_ALLOC_SEGNUM) { + applyindex = 0; + } else { + applyindex++; + } + + pg_atomic_write_u32(&(g_recordbuffer->applyindex), applyindex); + cursegbuffer = &g_recordbuffer->xlogsegarray[applyindex]; + bufState = pg_atomic_read_u32(&(cursegbuffer->bufState)); + if (bufState != APPLYING) { + return NULL; + } + + Assert(cursegbuffer->segno == targetSegNo); + cursegbuffer->segoffset = targetPageOff; + return cursegbuffer; + } else { + ereport(WARNING, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("SetReadBufferForExtRto targetPagePtr:%lu", targetPagePtr))); + DumpExtremeRtoReadBuf(); + t_thrd.xlog_cxt.failedSources |= XLOG_FROM_STREAM; + return NULL; + } +} + +static const int MAX_WAIT_TIMS = 512; + +bool XLogPageReadForExtRto(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen) +{ + uint32 startreadworker = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); + if (startreadworker == WORKER_STATE_RUN) { + RecordBufferAarray *cursegbuffer = GetCurrentSegmentBuf(targetPagePtr); + if (cursegbuffer == NULL) { + return false; + } + + uint32 readlen = pg_atomic_read_u32(&(cursegbuffer->readlen)); + + uint32 waitcount = 0; + while (readlen < (cursegbuffer->segoffset + reqLen)) { + readlen = pg_atomic_read_u32(&(cursegbuffer->readlen)); + if (waitcount >= MAX_WAIT_TIMS) { + return false; + } + waitcount++; + } + + Assert(cursegbuffer->segoffset == (targetPagePtr % XLogSegSize)); + xlogreader->readBuf = cursegbuffer->readsegbuf + cursegbuffer->segoffset; + return true; + } + + return false; +} + +void XLogReadWorkerSegFallback(XLogSegNo lastRplSegNo) +{ + errno_t errorno = EOK; + uint32 readindex = pg_atomic_read_u32(&(g_recordbuffer->readindex)); + uint32 applyindex = pg_atomic_read_u32(&(g_recordbuffer->applyindex)); + RecordBufferAarray *readseg = &g_recordbuffer->xlogsegarray[readindex]; + RecordBufferAarray *applyseg = &g_recordbuffer->xlogsegarray[applyindex]; + + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("XLogReadWorkerSegFallback: readindex: %u, readseg[%lu,%lu,%u,%u], applyindex: %u," + "applyseg[%lu,%lu,%u,%u]", + readindex, readseg->segno, readseg->segoffset, readseg->readlen, readseg->bufState, applyindex, + applyseg->segno, applyseg->segoffset, applyseg->readlen, applyseg->bufState))); + + pg_atomic_write_u32(&(g_recordbuffer->readindex), applyindex); + pg_atomic_write_u32(&(readseg->bufState), APPLIED); + applyseg->segno = lastRplSegNo; + applyseg->readlen = applyseg->segoffset; + errorno = memset_s(applyseg->readsegbuf, XLOG_SEG_SIZE, 0, XLOG_SEG_SIZE); + securec_check(errorno, "", ""); +} + +bool CloseReadFile() +{ + if (t_thrd.xlog_cxt.readFile >= 0) { + close(t_thrd.xlog_cxt.readFile); + t_thrd.xlog_cxt.readFile = -1; + return true; + } + return false; +} + +void DispatchCleanupMarkToAllRedoWorker() +{ + for (uint32 i = 0; i < g_dispatcher->allWorkersCnt; i++) { + PageRedoWorker *worker = g_dispatcher->allWorkers[i]; + if (worker->role == REDO_PAGE_WORKER) { + SPSCBlockingQueuePut(worker->queue, &g_cleanupMark); + } + } +} + +void DispatchClosefdMarkToAllRedoWorker() +{ + for (uint32 i = 0; i < g_dispatcher->allWorkersCnt; i++) { + PageRedoWorker *worker = g_dispatcher->allWorkers[i]; + if (worker->role == REDO_PAGE_WORKER || worker->role == REDO_PAGE_MNG || + worker->role == REDO_TRXN_MNG || worker->role == REDO_TRXN_WORKER) { + SPSCBlockingQueuePut(worker->queue, &g_closefdMark); + } + } +} + +void DispatchCleanInvalidPageMarkToAllRedoWorker(RepairFileKey key) +{ + for (uint32 i = 0; i < g_dispatcher->allWorkersCnt; i++) { + PageRedoWorker *worker = g_dispatcher->allWorkers[i]; + if (worker->role == REDO_PAGE_WORKER) { + errno_t rc = memcpy_s((char*)&g_cleanInvalidPageMark, + sizeof(RepairFileKey), (char*)&key, sizeof(RepairFileKey)); + securec_check(rc, "", ""); + SPSCBlockingQueuePut(worker->queue, &g_cleanInvalidPageMark); + } + } +} + +void WaitAllRedoWorkerIdle() +{ + instr_time startTime; + instr_time endTime; + bool allIdle = false; + INSTR_TIME_SET_CURRENT(startTime); + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("WaitAllRedoWorkerIdle begin, startTime: %lu us", INSTR_TIME_GET_MICROSEC(startTime)))); + while (!allIdle) { + allIdle = true; + for (uint32 i = 0; i < g_dispatcher->allWorkersCnt; i++) { + PageRedoWorker *worker = g_dispatcher->allWorkers[i]; + if (worker->role == REDO_READ_WORKER || worker->role == REDO_READ_MNG) { + continue; + } + if (!RedoWorkerIsIdle(worker)) { + allIdle = false; + break; + } + } + RedoInterruptCallBack(); + } + INSTR_TIME_SET_CURRENT(endTime); + INSTR_TIME_SUBTRACT(endTime, startTime); + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("WaitAllRedoWorkerIdle end, cost time: %lu us", INSTR_TIME_GET_MICROSEC(endTime)))); +} + +void WaitAllReplayWorkerIdle() +{ + instr_time startTime; + instr_time endTime; + bool allIdle = false; + INSTR_TIME_SET_CURRENT(startTime); + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("WaitAllReplayWorkerIdle begin, startTime: %lu us", INSTR_TIME_GET_MICROSEC(startTime)))); + while (!allIdle) { + allIdle = true; + for (uint32 i = 0; i < g_dispatcher->allWorkersCnt; i++) { + PageRedoWorker *worker = g_dispatcher->allWorkers[i]; + if (worker->role == REDO_READ_WORKER || worker->role == REDO_READ_MNG || + worker->role == REDO_READ_PAGE_WORKER) { + continue; + } + if (!RedoWorkerIsIdle(worker)) { + allIdle = false; + break; + } + } + RedoInterruptCallBack(); + } + INSTR_TIME_SET_CURRENT(endTime); + INSTR_TIME_SUBTRACT(endTime, startTime); + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("WaitAllReplayWorkerIdle end, cost time: %lu us", INSTR_TIME_GET_MICROSEC(endTime)))); +} + + +void XLogForceFinish(XLogReaderState *xlogreader, TermFileData *term_file) +{ + bool closed = false; + uint32 termId = term_file->term; + XLogSegNo lastRplSegNo; + + pg_atomic_write_u32(&(extreme_rto::g_recordbuffer->readWorkerState), extreme_rto::WORKER_STATE_STOPPING); + while (pg_atomic_read_u32(&(extreme_rto::g_recordbuffer->readWorkerState)) != WORKER_STATE_STOP) { + RedoInterruptCallBack(); + }; + ShutdownWalRcv(); + ShutdownDataRcv(); + pg_atomic_write_u32(&(g_recordbuffer->readSource), XLOG_FROM_PG_XLOG); + + PushToWorkerLsn(true); + g_cleanupMark.record.isDecode = true; + PutRecordToReadQueue(&g_cleanupMark.record); + WaitAllRedoWorkerIdle(); + + XLogRecPtr lastRplReadLsn; + XLogRecPtr lastRplEndLsn = GetXLogReplayRecPtr(NULL, &lastRplReadLsn); + XLogRecPtr receivedUpto = GetWalRcvWriteRecPtr(NULL); + XLogRecPtr endRecPtr = xlogreader->EndRecPtr; + ereport(WARNING, (errcode(ERRCODE_LOG), errmsg("[ForceFinish]ArchiveXlogForForceFinishRedo in extremeRTO " + "lastRplReadLsn:%08X/%08X, lastRplEndLsn:%08X/%08X, receivedUpto:%08X/%08X, ReadRecPtr:%08X/%08X, " + "EndRecPtr:%08X/%08X, readOff:%u, latestValidRecord:%08X/%08X", + (uint32)(lastRplReadLsn >> 32), (uint32)lastRplReadLsn,(uint32)(lastRplEndLsn >> 32), (uint32)lastRplEndLsn, + (uint32)(receivedUpto >> 32), (uint32)receivedUpto,(uint32)(xlogreader->ReadRecPtr >> 32), + (uint32)xlogreader->ReadRecPtr, (uint32)(xlogreader->EndRecPtr >> 32), (uint32)xlogreader->EndRecPtr, + xlogreader->readOff, (uint32)(latestValidRecord >> 32), (uint32)latestValidRecord))); + DumpExtremeRtoReadBuf(); + xlogreader->readOff = INVALID_READ_OFF; + XLByteToSeg(endRecPtr, lastRplSegNo); + XLogReadWorkerSegFallback(lastRplSegNo); + + closed = CloseReadFile(); + CopyXlogForForceFinishRedo(lastRplSegNo, termId, xlogreader, endRecPtr); + RenameXlogForForceFinishRedo(lastRplSegNo, xlogreader->readPageTLI, termId); + if (closed) { + ReOpenXlog(xlogreader); + } + t_thrd.xlog_cxt.invaildPageCnt = 0; + XLogCheckInvalidPages(); + SetSwitchHistoryFile(endRecPtr, receivedUpto, termId); + t_thrd.xlog_cxt.invaildPageCnt = 0; + set_wal_rcv_write_rec_ptr(endRecPtr); + t_thrd.xlog_cxt.receivedUpto = endRecPtr; + pg_atomic_write_u32(&(g_instance.comm_cxt.localinfo_cxt.is_finish_redo), 0); + ereport(WARNING, + (errcode(ERRCODE_LOG), errmsg("[ForceFinish]ArchiveXlogForForceFinishRedo in extremeRTO is over"))); +} + +void CleanUpReadPageWorkerQueue() +{ + SPSCBlockingQueue *queue = g_dispatcher->readLine.readPageThd->queue; + uint32 state; + do { + while (!SPSCBlockingQueueIsEmpty(queue)) { + XLogReaderState *xlogreader = reinterpret_cast(SPSCBlockingQueueTake(queue)); + if (xlogreader == reinterpret_cast(&(g_redoEndMark.record)) || + xlogreader == reinterpret_cast(&(g_GlobalLsnForwarder.record)) || + xlogreader == reinterpret_cast(&(g_cleanupMark.record))) { + if (xlogreader == reinterpret_cast(&(g_GlobalLsnForwarder.record))) { + pg_atomic_write_u32(&g_GlobalLsnForwarder.record.refcount, 0); + } + continue; + } + + RedoItem *item = GetRedoItemPtr(xlogreader); + FreeRedoItem(item); + } + + RedoInterruptCallBack(); + state = pg_atomic_read_u32(&extreme_rto::g_dispatcher->rtoXlogBufState.readPageWorkerState); + } while (state != WORKER_STATE_EXIT); +} + +void ExtremeRtoStopHere() +{ + if ((get_real_recovery_parallelism() > 1) && (GetBatchCount() > 0)) { + g_dispatcher->recoveryStop = true; + CleanUpReadPageWorkerQueue(); + } +} + +static void CheckAndDoForceFinish(XLogReaderState *xlogreader) +{ + TermFileData term_file; + if (CheckForForceFinishRedoTrigger(&term_file)) { + ereport(WARNING, + (errmsg("[ForceFinish] force finish triggered in XLogReadPageWorkerMain, ReadRecPtr:%08X/%08X, " + "EndRecPtr:%08X/%08X, StandbyMode:%u, startup_processing:%u, dummyStandbyMode:%u", + (uint32)(t_thrd.xlog_cxt.ReadRecPtr >> 32), (uint32)t_thrd.xlog_cxt.ReadRecPtr, + (uint32)(t_thrd.xlog_cxt.EndRecPtr >> 32), (uint32)t_thrd.xlog_cxt.EndRecPtr, + t_thrd.xlog_cxt.StandbyMode, t_thrd.xlog_cxt.startup_processing, dummyStandbyMode))); + XLogForceFinish(xlogreader, &term_file); + } +} + +/* read xlog for parellel */ +void XLogReadPageWorkerMain() +{ + XLogReaderState *xlogreader = NULL; + + (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); + + g_recordbuffer = &g_dispatcher->rtoXlogBufState; + GetRecoveryLatch(); + /* init readstate */ + InitXLogRecordReadBuffer(&xlogreader); + + pg_atomic_write_u32(&(g_recordbuffer->readPageWorkerState), WORKER_STATE_RUN); + if (IsRecoveryDone()) { + t_thrd.xlog_cxt.readSource = XLOG_FROM_STREAM; + t_thrd.xlog_cxt.XLogReceiptSource = XLOG_FROM_STREAM; + pg_atomic_write_u32(&(g_recordbuffer->readSource), XLOG_FROM_STREAM); + } + + XLogRecord *record = XLogParallelReadNextRecord(xlogreader); + while (record != NULL) { + if (ReadPageWorkerStop()) { + break; + } + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); + XLogReaderState *newxlogreader = NewReaderState(xlogreader); + CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_3], g_redoWorker->timeCostList[TIME_COST_STEP_4]); + PutRecordToReadQueue(xlogreader); + CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_4], g_redoWorker->timeCostList[TIME_COST_STEP_5]); + xlogreader = newxlogreader; + + g_redoWorker->lastReplayedReadRecPtr = xlogreader->ReadRecPtr; + g_redoWorker->lastReplayedEndRecPtr = xlogreader->EndRecPtr; + + if (FORCE_FINISH_ENABLED) { + CheckAndDoForceFinish(xlogreader); + } + CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_5], g_redoWorker->timeCostList[TIME_COST_STEP_1]); + record = XLogParallelReadNextRecord(xlogreader); + CountAndGetRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_1], g_redoWorker->timeCostList[TIME_COST_STEP_2]); + PushToWorkerLsn(false); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_2]); + RedoInterruptCallBack(); + ADD_ABNORMAL_POSITION(8); + } + + uint32 workState = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); + while (workState == WORKER_STATE_STOPPING) { + workState = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); + } + + if (workState != WORKER_STATE_EXITING && workState != WORKER_STATE_EXIT) { + pg_atomic_write_u32(&(g_recordbuffer->readWorkerState), WORKER_STATE_EXITING); + } + + if (!ReadPageWorkerStop()) { + /* notify exit */ + PushToWorkerLsn(true); + g_redoEndMark.record = *xlogreader; + g_redoEndMark.record.isDecode = true; + PutRecordToReadQueue((XLogReaderState *)&g_redoEndMark.record); + } + + ReLeaseRecoveryLatch(); + pg_atomic_write_u32(&(g_recordbuffer->readPageWorkerState), WORKER_STATE_EXIT); +} + +void HandleReadWorkerRunInterrupts() +{ + if (t_thrd.page_redo_cxt.got_SIGHUP) { + t_thrd.page_redo_cxt.got_SIGHUP = false; + ProcessConfigFile(PGC_SIGHUP); + } + + if (t_thrd.page_redo_cxt.shutdown_requested) { + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("page worker id %u exit for request", g_redoWorker->id))); + + pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[g_redoWorker->id].threadState), + PAGE_REDO_WORKER_EXIT); + + proc_exit(1); + } +} + +static void InitReadBuf(uint32 bufIndex, XLogSegNo segno) +{ + if (bufIndex == MAX_ALLOC_SEGNUM) { + bufIndex = 0; + } + const uint32 sleepTime = 50; /* 50 us */ + RecordBufferAarray *nextreadseg = &g_recordbuffer->xlogsegarray[bufIndex]; + pg_memory_barrier(); + + uint32 bufState = pg_atomic_read_u32(&(nextreadseg->bufState)); + uint32 startreadworker = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); + while (bufState == APPLYING && startreadworker == WORKER_STATE_RUN) { + pg_usleep(sleepTime); + RedoInterruptCallBack(); + bufState = pg_atomic_read_u32(&(nextreadseg->bufState)); + startreadworker = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); + } + + nextreadseg->readlen = 0; + nextreadseg->segno = segno; + nextreadseg->segoffset = 0; + pg_atomic_write_u32(&(nextreadseg->bufState), APPLYING); + pg_atomic_write_u32(&(g_recordbuffer->readindex), bufIndex); +} + +static void XLogReadWorkRun() +{ + static uint32 waitcount = 0; + const uint32 sleepTime = 100; /* 50 us */ + XLogSegNo targetSegNo; + uint32 writeoffset; + uint32 reqlen; + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_1]); + uint32 readindex = pg_atomic_read_u32(&(g_recordbuffer->readindex)); + Assert(readindex < MAX_ALLOC_SEGNUM); + pg_memory_barrier(); + RecordBufferAarray *readseg = &g_recordbuffer->xlogsegarray[readindex]; + + XLogRecPtr receivedUpto = GetWalRcvWriteRecPtr(NULL); + XLByteToSeg(receivedUpto, targetSegNo); + + if (targetSegNo < readseg->segno) { + pg_usleep(sleepTime); + return; + } + + writeoffset = readseg->readlen; + if (targetSegNo != readseg->segno) { + reqlen = XLOG_SEG_SIZE - writeoffset; + } else { + uint32 targetPageOff = receivedUpto % XLOG_SEG_SIZE; + if (targetPageOff <= writeoffset) { + pg_usleep(sleepTime); + return; + } + reqlen = targetPageOff - writeoffset; + if (reqlen < XLOG_BLCKSZ) { + waitcount++; + uint32 flag = pg_atomic_read_u32(&g_readManagerTriggerFlag); + if (waitcount < MAX_WAIT_TIMS && flag == TRIGGER_NORMAL) { + pg_usleep(sleepTime); + return; + } + } + } + + waitcount = 0; + char *readBuf = readseg->readsegbuf + writeoffset; + XLogRecPtr targetSartPtr = readseg->segno * XLOG_SEG_SIZE + writeoffset; + uint32 readlen = 0; + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_2]); + bool result = XLogReadFromWriteBuffer(targetSartPtr, reqlen, readBuf, &readlen); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_2]); + if (!result) { + return; + } + + pg_atomic_write_u32(&(readseg->readlen), (writeoffset + readlen)); + if (readseg->readlen == XLOG_SEG_SIZE) { + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); + InitReadBuf(readindex + 1, readseg->segno + 1); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); + } + + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_1]); +} + +void XLogReadManagerResponseSignal(uint32 tgigger) +{ + switch (tgigger) { + case TRIGGER_PRIMARY: + break; + case TRIGGER_FAILOVER: + if (t_thrd.xlog_cxt.is_cascade_standby) { + SendPostmasterSignal(PMSIGNAL_UPDATE_PROMOTING); + t_thrd.xlog_cxt.is_cascade_standby = false; + if (t_thrd.postmaster_cxt.HaShmData->is_cross_region) { + t_thrd.xlog_cxt.is_hadr_main_standby = true; + SpinLockAcquire(&t_thrd.postmaster_cxt.HaShmData->mutex); + t_thrd.postmaster_cxt.HaShmData->is_hadr_main_standby = true; + SpinLockRelease(&t_thrd.postmaster_cxt.HaShmData->mutex); + } + t_thrd.xlog_cxt.failover_triggered = false; + SendNotifySignal(NOTIFY_STANDBY, g_instance.pid_cxt.StartupPID); + SendPostmasterSignal(PMSIGNAL_UPDATE_NORMAL); + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("failover standby ready, notify postmaster to change state."))); + break; + } + t_thrd.xlog_cxt.failover_triggered = true; + SendPostmasterSignal(PMSIGNAL_UPDATE_PROMOTING); + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("failover ready, notify postmaster to change state."))); + break; + case TRIGGER_SWITCHOVER: + t_thrd.xlog_cxt.switchover_triggered = true; + SendPostmasterSignal(PMSIGNAL_UPDATE_PROMOTING); + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("switchover ready, notify postmaster to change state."))); + break; + default: + break; + } +} + +void XLogReadManagerProcInterrupt() +{ + if (t_thrd.page_redo_cxt.shutdown_requested) { + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("page worker id %u exit for request", g_redoWorker->id))); + + pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[g_redoWorker->id].threadState), + PAGE_REDO_WORKER_EXIT); + + proc_exit(1); + } + + if (t_thrd.page_redo_cxt.got_SIGHUP) { + t_thrd.page_redo_cxt.got_SIGHUP = false; + ProcessConfigFile(PGC_SIGHUP); + } +} + +void WaitPageReadWorkerExit() +{ + uint32 state; + do { + state = pg_atomic_read_u32(&extreme_rto::g_dispatcher->rtoXlogBufState.readPageWorkerState); + RedoInterruptCallBack(); + } while (state != WORKER_STATE_EXIT); +} + +static void HandleExtremeRtoCascadeStandbyPromote(uint32 trigger) +{ + if (!t_thrd.xlog_cxt.is_cascade_standby || t_thrd.xlog_cxt.server_mode != STANDBY_MODE || + !IS_DN_MULTI_STANDYS_MODE()) { + return; + } + + ShutdownWalRcv(); + pg_atomic_write_u32(&g_dispatcher->rtoXlogBufState.waitRedoDone, 1); + WakeupRecovery(); + XLogReadManagerResponseSignal(trigger); + pg_atomic_write_u32(&(extreme_rto::g_startupTriggerState), TRIGGER_NORMAL); +} + +bool XLogReadManagerCheckSignal() +{ + uint32 trigger = pg_atomic_read_u32(&(extreme_rto::g_startupTriggerState)); + load_server_mode(); + if (g_dispatcher->smartShutdown || trigger == TRIGGER_PRIMARY || trigger == TRIGGER_SWITCHOVER || + (trigger == TRIGGER_FAILOVER && t_thrd.xlog_cxt.server_mode == STANDBY_MODE) || + t_thrd.xlog_cxt.server_mode == PRIMARY_MODE) { + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("XLogReadManagerCheckSignal: smartShutdown:%u, trigger:%u, server_mode:%u", + g_dispatcher->smartShutdown, trigger, t_thrd.xlog_cxt.server_mode))); + if (t_thrd.xlog_cxt.is_cascade_standby && t_thrd.xlog_cxt.server_mode == STANDBY_MODE && + IS_DN_MULTI_STANDYS_MODE() && (trigger == TRIGGER_SWITCHOVER || trigger == TRIGGER_FAILOVER)) { + HandleExtremeRtoCascadeStandbyPromote(trigger); + return false; + } + ShutdownWalRcv(); + if (g_dispatcher->smartShutdown) { + pg_atomic_write_u32(&g_readManagerTriggerFlag, TRIGGER_SMARTSHUTDOWN); + } else { + pg_atomic_write_u32(&g_readManagerTriggerFlag, trigger); + } + WakeupRecovery(); + WaitPageReadWorkerExit(); + XLogReadManagerResponseSignal(trigger); + return true; + } + return false; +} + +void StartRequestXLogFromStream() +{ + volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + XLogRecPtr expectLsn = pg_atomic_read_u64(&g_dispatcher->rtoXlogBufState.expectLsn); + if (walrcv->receivedUpto == InvalidXLogRecPtr || + (expectLsn != InvalidXLogRecPtr && XLByteLE(walrcv->receivedUpto, expectLsn))) { + uint32 readWorkerstate = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); + if (readWorkerstate == WORKER_STATE_RUN) { + pg_atomic_write_u32(&(g_recordbuffer->readWorkerState), WORKER_STATE_STOPPING); + } + SpinLockAcquire(&walrcv->mutex); + walrcv->receivedUpto = 0; + SpinLockRelease(&walrcv->mutex); + XLogRecPtr targetRecPtr = pg_atomic_read_u64(&g_dispatcher->rtoXlogBufState.targetRecPtr); + CheckMaxPageFlushLSN(targetRecPtr); + + uint32 shiftSize = 32; + if (IS_OBS_DISASTER_RECOVER_MODE && !IsRoachRestore()) { + ereport(LOG, (errmsg("request xlog stream from obs at %X/%X.", (uint32)(targetRecPtr >> shiftSize), + (uint32)targetRecPtr))); + RequestXLogStreaming(&targetRecPtr, 0, REPCONNTARGET_OBS, 0); + } else if (IS_SHARED_STORAGE_STANBY_MODE && !IS_SHARED_STORAGE_MAIN_STANDBY_MODE) { +#ifndef ENABLE_MULTIPLE_NODES + rename_recovery_conf_for_roach(); +#endif + ereport(LOG, (errmsg("request xlog stream from shared storage at %X/%X.", + (uint32)(targetRecPtr >> shiftSize), + (uint32)targetRecPtr))); + RequestXLogStreaming(&targetRecPtr, 0, REPCONNTARGET_SHARED_STORAGE, 0); + } else { +#ifndef ENABLE_MULTIPLE_NODES + rename_recovery_conf_for_roach(); +#endif + ereport(LOG, (errmsg("request xlog stream at %X/%X.", (uint32)(targetRecPtr >> shiftSize), + (uint32)targetRecPtr))); + RequestXLogStreaming(&targetRecPtr, t_thrd.xlog_cxt.PrimaryConnInfo, REPCONNTARGET_PRIMARY, + u_sess->attr.attr_storage.PrimarySlotName); + } + } +} + + +void XLogReadManagerMain() +{ + const long sleepShortTime = 100000L; + const long sleepLongTime = 1000000L; + g_recordbuffer = &g_dispatcher->rtoXlogBufState; + uint32 xlogReadManagerState = READ_MANAGER_RUN; + + (void)RegisterRedoInterruptCallBack(XLogReadManagerProcInterrupt); + + while (xlogReadManagerState == READ_MANAGER_RUN) { + RedoInterruptCallBack(); + XLogRecPtr replay = InvalidXLogRecPtr; + bool exitStatus = XLogReadManagerCheckSignal(); + if (exitStatus) { + break; + } + + replay = GetXLogReplayRecPtr(NULL, NULL); + handleRecoverySusPend(replay); + + xlogReadManagerState = pg_atomic_read_u32(&g_dispatcher->rtoXlogBufState.xlogReadManagerState); + ADD_ABNORMAL_POSITION(7); + if (t_thrd.xlog_cxt.server_mode == STANDBY_MODE) { + uint32 readSource = pg_atomic_read_u32(&g_dispatcher->rtoXlogBufState.readSource); + uint32 failSource = pg_atomic_read_u32(&g_dispatcher->rtoXlogBufState.failSource); + if (readSource & XLOG_FROM_STREAM) { + uint32 disableConnectionNode = + pg_atomic_read_u32(&g_instance.comm_cxt.localinfo_cxt.need_disable_connection_node); + bool retryConnect = ((!disableConnectionNode) || (IS_SHARED_STORAGE_MODE && disableConnectionNode && + !knl_g_get_redo_finish_status() && + !pg_atomic_read_u32(&t_thrd.walreceiverfuncs_cxt.WalRcv->rcvDoneFromShareStorage))); + if (!WalRcvInProgress() && g_instance.pid_cxt.WalReceiverPID == 0 && retryConnect) { + StartRequestXLogFromStream(); + } else { + if (disableConnectionNode) { + if (IS_SHARED_STORAGE_MODE && WalRcvIsRunning()) { + ShutdownWalRcv(); + } + + if (!WalRcvInProgress() && !knl_g_get_redo_finish_status()) { + pg_atomic_write_u32(&g_dispatcher->rtoXlogBufState.waitRedoDone, 1); + WakeupRecovery(); + pg_usleep(sleepLongTime); + } else if (knl_g_get_redo_finish_status()) { + pg_atomic_write_u32(&g_instance.comm_cxt.localinfo_cxt.need_disable_connection_node, false); + pg_usleep(sleepLongTime); + } + + } + } + } + + if (failSource & XLOG_FROM_STREAM) { + ShutdownWalRcv(); + pg_atomic_write_u32(&(extreme_rto::g_dispatcher->rtoXlogBufState.failSource), 0); + } + } + pg_usleep(sleepShortTime); + } +} + +static void ReadWorkerStopCallBack(int code, Datum arg) +{ + pg_atomic_write_u32(&(g_recordbuffer->readWorkerState), WORKER_STATE_EXIT); + if (t_thrd.xlog_cxt.readFile >= 0) { + close(t_thrd.xlog_cxt.readFile); + t_thrd.xlog_cxt.readFile = -1; + } +} + +void XLogReadWorkerMain() +{ + uint32 startreadworker; + const uint32 sleepTime = 50; /* 50 us */ + + on_shmem_exit(ReadWorkerStopCallBack, 0); + (void)RegisterRedoInterruptCallBack(HandleReadWorkerRunInterrupts); + + g_recordbuffer = &g_dispatcher->rtoXlogBufState; + startreadworker = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); + while (startreadworker != WORKER_STATE_EXITING) { + if (startreadworker == WORKER_STATE_RUN) { + XLogReadWorkRun(); + } else { + pg_usleep(sleepTime); + } + + RedoInterruptCallBack(); + startreadworker = pg_atomic_read_u32(&(g_recordbuffer->readWorkerState)); + if (startreadworker == WORKER_STATE_STOPPING) { + pg_atomic_write_u32(&(g_recordbuffer->readWorkerState), WORKER_STATE_STOP); + } + ADD_ABNORMAL_POSITION(6); + }; + /* notify manger to exit */ + pg_atomic_write_u32(&(g_recordbuffer->readWorkerState), WORKER_STATE_EXIT); +} + +int RedoMainLoop() +{ + g_redoWorker->oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.predo_cxt.parallelRedoCtx); + + instr_time startTime; + instr_time endTime; + + (void)RegisterRedoPageRepairCallBack(HandlePageRedoPageRepair); + + INSTR_TIME_SET_CURRENT(startTime); + switch (g_redoWorker->role) { + case REDO_BATCH: + BatchRedoMain(); + break; + case REDO_PAGE_MNG: + RedoPageManagerMain(); + break; + case REDO_PAGE_WORKER: + RedoPageWorkerMain(); + break; + case REDO_TRXN_MNG: + TrxnManagerMain(); + break; + case REDO_TRXN_WORKER: + TrxnWorkMain(); + break; + case REDO_READ_WORKER: + XLogReadWorkerMain(); + break; + case REDO_READ_PAGE_WORKER: + XLogReadPageWorkerMain(); + break; + case REDO_READ_MNG: + XLogReadManagerMain(); + break; + default: + break; + } + + INSTR_TIME_SET_CURRENT(endTime); + INSTR_TIME_SUBTRACT(endTime, startTime); + + /* + * We need to get the exit code here before we allow the dispatcher + * to proceed and change the exit code. + */ + int exitCode = GetDispatcherExitCode(); + g_redoWorker->xlogInvalidPages = XLogGetInvalidPages(); + g_redoWorker->committingCsnList = XLogReleaseAndGetCommittingCsnList(); + + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("worker[%d]: exitcode = %d, total elapsed = %ld", g_redoWorker->id, exitCode, + INSTR_TIME_GET_MICROSEC(endTime)))); + + (void)MemoryContextSwitchTo(g_redoWorker->oldCtx); + + return exitCode; +} + +void ParallelRedoThreadRegister() +{ + bool isWorkerStarting = false; + SpinLockAcquire(&(g_instance.comm_cxt.predo_cxt.rwlock)); + isWorkerStarting = ((g_instance.comm_cxt.predo_cxt.state == REDO_STARTING_BEGIN) ? true : false); + if (isWorkerStarting) { + pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[g_redoWorker->id].threadState), + PAGE_REDO_WORKER_READY); + } + SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.rwlock)); + if (!isWorkerStarting) { + ereport(LOG, (errmsg("ParallelRedoThreadRegister Page-redo-worker %u exit.", (uint32)isWorkerStarting))); + SetPageWorkStateByThreadId(PAGE_REDO_WORKER_EXIT); + proc_exit(0); + } +} + +const char *RedoWokerRole2Str(RedoRole role) +{ + switch (role) { + case REDO_BATCH: + return "redo_batch"; + break; + case REDO_PAGE_MNG: + return "redo_manager"; + break; + case REDO_PAGE_WORKER: + return "redo_woker"; + break; + case REDO_TRXN_MNG: + return "trxn_manager"; + break; + case REDO_TRXN_WORKER: + return "trxn_worker"; + break; + case REDO_READ_WORKER: + return "read_worker"; + break; + case REDO_READ_PAGE_WORKER: + return "read_page_woker"; + break; + case REDO_READ_MNG: + return "read_manager"; + break; + default: + return "unkown"; + break; + } +} + +void WaitStateNormal() +{ + do { + RedoInterruptCallBack(); + } while (g_instance.comm_cxt.predo_cxt.state < REDO_IN_PROGRESS); +} + +/* Run from the worker thread. */ +void ParallelRedoThreadMain() +{ + ParallelRedoThreadRegister(); + ereport(LOG, (errmsg("Page-redo-worker thread %u started, role:%u, slotId:%u.", g_redoWorker->id, + g_redoWorker->role, g_redoWorker->slotId))); + // regitster default interrupt call back + (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); + SetupSignalHandlers(); + InitGlobals(); + + ResourceManagerStartup(); + WaitStateNormal(); + EnableSyncRequestForwarding(); + + int retCode = RedoMainLoop(); + ResourceManagerStop(); + ereport(LOG, (errmsg("Page-redo-worker thread %u terminated, role:%u, slotId:%u, retcode %u.", g_redoWorker->id, + g_redoWorker->role, g_redoWorker->slotId, retCode))); + LastMarkReached(); + + pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[g_redoWorker->id].threadState), + PAGE_REDO_WORKER_EXIT); + proc_exit(0); +} + +static void PageRedoShutdownHandler(SIGNAL_ARGS) +{ + t_thrd.page_redo_cxt.shutdown_requested = 1; +} + +static void PageRedoQuickDie(SIGNAL_ARGS) +{ + int status = 2; + gs_signal_setmask(&t_thrd.libpq_cxt.BlockSig, NULL); + on_exit_reset(); + exit(status); +} + +static void PageRedoUser1Handler(SIGNAL_ARGS) +{ + t_thrd.page_redo_cxt.check_repair = true; +} + +static void PageRedoUser2Handler(SIGNAL_ARGS) +{ + t_thrd.page_redo_cxt.sleep_long = 1; +} + +/* Run from the worker thread. */ +static void SetupSignalHandlers() +{ + (void)gspqsignal(SIGHUP, SigHupHandler); + (void)gspqsignal(SIGINT, SIG_IGN); + (void)gspqsignal(SIGTERM, PageRedoShutdownHandler); + (void)gspqsignal(SIGQUIT, PageRedoQuickDie); + (void)gspqsignal(SIGALRM, SIG_IGN); + (void)gspqsignal(SIGPIPE, SIG_IGN); + (void)gspqsignal(SIGUSR1, PageRedoUser1Handler); + (void)gspqsignal(SIGUSR2, PageRedoUser2Handler); + (void)gspqsignal(SIGCHLD, SIG_IGN); + (void)gspqsignal(SIGTTIN, SIG_IGN); + (void)gspqsignal(SIGTTOU, SIG_IGN); + (void)gspqsignal(SIGCONT, SIG_IGN); + (void)gspqsignal(SIGWINCH, SIG_IGN); + + gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL); + (void)gs_signal_unblock_sigusr2(); +} + +/* Run from the worker thread. */ +static void SigHupHandler(SIGNAL_ARGS) +{ + t_thrd.page_redo_cxt.got_SIGHUP = true; +} + +/* Run from the worker thread. */ +static void InitGlobals() +{ + t_thrd.utils_cxt.CurrentResourceOwner = ResourceOwnerCreate(NULL, "ExtremeRtoParallelRedoThread", + THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE)); + + t_thrd.xlog_cxt.server_mode = g_redoWorker->initialServerMode; + t_thrd.xlog_cxt.ThisTimeLineID = g_redoWorker->initialTimeLineID; + t_thrd.xlog_cxt.expectedTLIs = g_redoWorker->expectedTLIs; + /* apply recoveryinfo will change standbystate see UpdateRecordGlobals */ + t_thrd.xlog_cxt.standbyState = g_redoWorker->standbyState; + t_thrd.xlog_cxt.StandbyMode = g_redoWorker->StandbyMode; + t_thrd.xlog_cxt.InRecovery = true; + t_thrd.xlog_cxt.startup_processing = true; + t_thrd.proc_cxt.DataDir = g_redoWorker->DataDir; + u_sess->utils_cxt.RecentXmin = g_redoWorker->RecentXmin; + g_redoWorker->proc = t_thrd.proc; + t_thrd.storage_cxt.latestObservedXid = g_redoWorker->latestObservedXid; + t_thrd.xlog_cxt.recoveryTargetTLI = g_redoWorker->recoveryTargetTLI; + t_thrd.xlog_cxt.recoveryRestoreCommand= g_redoWorker->recoveryRestoreCommand; + t_thrd.xlog_cxt.ArchiveRecoveryRequested = g_redoWorker->ArchiveRecoveryRequested; + t_thrd.xlog_cxt.StandbyModeRequested = g_redoWorker->StandbyModeRequested; + t_thrd.xlog_cxt.InArchiveRecovery = g_redoWorker->InArchiveRecovery; + t_thrd.xlog_cxt.InRecovery = g_redoWorker->InRecovery; + t_thrd.xlog_cxt.ArchiveRestoreRequested = g_redoWorker->ArchiveRestoreRequested; + t_thrd.xlog_cxt.minRecoveryPoint = g_redoWorker->minRecoveryPoint; +} + +void WaitRedoWorkersQueueEmpty() +{ + bool queueIsEmpty = false; + while (!queueIsEmpty) { + queueIsEmpty = true; + for (uint32 i = 0; i < g_dispatcher->allWorkersCnt; i++) { + PageRedoWorker *worker = g_dispatcher->allWorkers[i]; + if (worker->role == REDO_TRXN_WORKER || worker->role == REDO_PAGE_WORKER) { + if (!RedoWorkerIsIdle(worker)) { + queueIsEmpty = false; + break; + } + } + } + RedoInterruptCallBack(); + } +} + +void RedoThrdWaitForExit(const PageRedoWorker *wk) +{ + uint32 sd = wk->slotId; + switch (wk->role) { + case REDO_BATCH: + SendPageRedoEndMark(g_dispatcher->pageLines[sd].managerThd); + WaitPageRedoWorkerReachLastMark(g_dispatcher->pageLines[sd].managerThd); + break; + case REDO_PAGE_MNG: + DispatchEndMarkToRedoWorkerAndWait(); + break; + case REDO_PAGE_WORKER: + break; /* Don't need to wait for anyone */ + case REDO_TRXN_MNG: + SendPageRedoEndMark(g_dispatcher->trxnLine.redoThd); + WaitRedoWorkersQueueEmpty(); + WaitPageRedoWorkerReachLastMark(g_dispatcher->trxnLine.redoThd); + break; + case REDO_TRXN_WORKER: + break; /* Don't need to wait for anyone */ + default: + break; + } +} + +/* Run from the worker thread. */ +static void ApplySinglePageRecord(RedoItem *item) +{ + XLogReaderState *record = &item->record; + + MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); + ApplyRedoRecord(record); + (void)MemoryContextSwitchTo(oldCtx); +} + +/* Run from the worker thread. */ +static void LastMarkReached() +{ + PosixSemaphorePost(&g_redoWorker->phaseMarker); +} + +/* Run from the dispatcher thread. */ +void WaitPageRedoWorkerReachLastMark(PageRedoWorker *worker) +{ + PosixSemaphoreWait(&worker->phaseMarker); +} + +/* Run from the dispatcher thread. */ +void AddPageRedoItem(PageRedoWorker *worker, void *item) +{ + SPSCBlockingQueuePut(worker->queue, item); +} + +/* Run from the dispatcher thread. */ +bool SendPageRedoEndMark(PageRedoWorker *worker) +{ + return SPSCBlockingQueuePut(worker->queue, &g_redoEndMark); +} + +/* Run from the dispatcher thread. */ +bool SendPageRedoWorkerTerminateMark(PageRedoWorker *worker) +{ + return SPSCBlockingQueuePut(worker->queue, &g_terminateMark); +} + +/* Run from the txn worker thread. */ +void UpdatePageRedoWorkerStandbyState(PageRedoWorker *worker, HotStandbyState newState) +{ + /* + * Here we only save the new state into the worker struct. + * The actual update of the worker thread's state occurs inside + * the apply loop. + */ + worker->standbyState = newState; +} + +/* Run from the dispatcher thread. */ +void *GetXLogInvalidPages(PageRedoWorker *worker) +{ + return worker->xlogInvalidPages; +} + +bool RedoWorkerIsIdle(PageRedoWorker *worker) +{ + return SPSCBlockingQueueIsEmpty(worker->queue); +} + +void DumpPageRedoWorker(PageRedoWorker *worker) +{ + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("[REDO_LOG_TRACE]RedoWorker common info: id %u, tid %lu, " + "lastCheckedRestartPoint %lu, lastReplayedEndRecPtr %lu standbyState %u", + worker->id, worker->tid.thid, worker->lastCheckedRestartPoint, worker->lastReplayedEndRecPtr, + (uint32)worker->standbyState))); + DumpQueue(worker->queue); +} + +void DumpExtremeRtoReadBuf() +{ + if (g_dispatcher == NULL) { + return; + } + + ereport(LOG, + (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("DumpExtremeRtoReadBuf: startworker %u, readindex %u, applyindex %u, readSource %u, failSource %u", + g_dispatcher->rtoXlogBufState.readWorkerState, g_dispatcher->rtoXlogBufState.readindex, + g_dispatcher->rtoXlogBufState.applyindex, g_dispatcher->rtoXlogBufState.readSource, + g_dispatcher->rtoXlogBufState.failSource))); + + for (uint32 i = 0; i < MAX_ALLOC_SEGNUM; ++i) { + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("DumpExtremeRtoReadBuf: buf %u, state %u, readlen %u, segno %lu, segoffset %lu", i, + g_dispatcher->rtoXlogBufState.xlogsegarray[i].bufState, + g_dispatcher->rtoXlogBufState.xlogsegarray[i].readlen, + g_dispatcher->rtoXlogBufState.xlogsegarray[i].segno, + g_dispatcher->rtoXlogBufState.xlogsegarray[i].segoffset))); + } +} + +bool XactHasSegpageRelFiles(XLogReaderState *record) +{ + int nrels = 0; + ColFileNodeRel *xnodes = NULL; + + if (XLogRecGetRmid(record) != RM_XACT_ID) { + return false; + } + + XactGetRelFiles(record, &xnodes, &nrels); + + for (int32 idx = 0; idx < nrels; idx++) { + ColFileNode colFileNode; + ColFileNodeRel *colFileNodeRel = xnodes + idx; + + ColFileNodeCopy(&colFileNode, colFileNodeRel); + + if (!IsValidColForkNum(colFileNode.forknum) && IsSegmentFileNode(colFileNode.filenode)) { + return true; + } + } + + return false; +} + + +void RepairPageAndRecoveryXLog(BadBlockRecEnt* page_info, const char *page) +{ + RedoBufferInfo buffer; + RedoBufferTag blockinfo; + bool updateFsm = false; + bool notfound = false; + errno_t rc; + BufferDesc *bufDesc = NULL; + RedoTimeCost timeCost1; + RedoTimeCost timeCost2; + + blockinfo.rnode = page_info->key.relfilenode; + blockinfo.forknum = page_info->key.forknum; + blockinfo.blkno = page_info->key.blocknum; + blockinfo.pblk = page_info->pblk; + + /* read page to buffer pool by RBM_ZERO_AND_LOCK mode and get buffer lock */ + (void)XLogReadBufferForRedoBlockExtend(&blockinfo, RBM_ZERO_AND_LOCK, false, &buffer, + page_info->rec_max_lsn, InvalidXLogRecPtr, false, WITH_NORMAL_CACHE); + + rc = memcpy_s(buffer.pageinfo.page, BLCKSZ, page, BLCKSZ); + securec_check(rc, "", ""); + + MarkBufferDirty(buffer.buf); + bufDesc = GetBufferDescriptor(buffer.buf - 1); + bufDesc->lsn_on_disk = PageGetLSN(buffer.pageinfo.page); + UnlockReleaseBuffer(buffer.buf); + + /* recovery the page xlog */ + rc = memset_s(&buffer, sizeof(RedoBufferInfo), 0, sizeof(RedoBufferInfo)); + securec_check(rc, "", ""); + + XLogRecParseState *procState = page_info->head; + MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); + while (procState != NULL) { + XLogRecParseState *redoblockstate = procState; + procState = (XLogRecParseState *)procState->nextrecord; + (void)XLogBlockRedoForExtremeRTO(redoblockstate, &buffer, notfound, timeCost1, timeCost2); + } + (void)MemoryContextSwitchTo(oldCtx); + updateFsm = XlogNeedUpdateFsm(page_info->head, &buffer); + XLogBlockParseStateRelease(page_info->head); + ExtremeRtoFlushBuffer(&buffer, updateFsm); +} + +HTAB* BadBlockHashTblCreate() +{ + HASHCTL ctl; + errno_t rc; + + rc = memset_s(&ctl, sizeof(ctl), 0, sizeof(ctl)); + securec_check(rc, "", ""); + + ctl.keysize = sizeof(RepairBlockKey); + ctl.entrysize = sizeof(BadBlockRecEnt); + ctl.hash = tag_hash; + + return hash_create("recovery thread bad block hashtbl", MAX_REMOTE_READ_INFO_NUM, &ctl, HASH_ELEM | HASH_FUNCTION); +} + + +/* ClearPageRepairHashTbl + * drop table, or truncate table, need clear the page repair hashTbl, if the + * repair page Filenode match need remove. + */ +void ClearRecoveryThreadHashTbl(const RelFileNode &node, ForkNumber forknum, BlockNumber minblkno, + bool segment_shrink) +{ + HTAB *bad_hash = g_redoWorker->badPageHashTbl; + bool found = false; + BadBlockRecEnt *entry = NULL; + HASH_SEQ_STATUS status; + + hash_seq_init(&status, bad_hash); + while ((entry = (BadBlockRecEnt *)hash_seq_search(&status)) != NULL) { + if (BlockNodeMatch(entry->key, entry->pblk, node, forknum, minblkno, segment_shrink)) { + if (hash_search(bad_hash, &(entry->key), HASH_REMOVE, &found) == NULL) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("recovery thread bad page hash table corrupted"))); + } + g_redoWorker->remoteReadPageNum--; + } + } + + return; +} + +/* BatchClearPageRepairHashTbl + * drop database, or drop segmentspace, need clear the page repair hashTbl, + * if the repair page key dbNode match and spcNode match, need remove. + */ +void BatchClearRecoveryThreadHashTbl(Oid spcNode, Oid dbNode) +{ + HTAB *bad_hash = g_redoWorker->badPageHashTbl; + bool found = false; + BadBlockRecEnt *entry = NULL; + HASH_SEQ_STATUS status; + + hash_seq_init(&status, bad_hash); + while ((entry = (BadBlockRecEnt *)hash_seq_search(&status)) != NULL) { + if (dbNodeandSpcNodeMatch(&(entry->key.relfilenode), spcNode, dbNode)) { + if (hash_search(bad_hash, &(entry->key), HASH_REMOVE, &found) == NULL) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("page repair hash table corrupted"))); + } + g_redoWorker->remoteReadPageNum--; + } + } + + return; +} + + +/* ClearSpecificsPageEntryAndMem + * If the page has been repair, need remove entry of bad page hashtable, + * and release the xlog record mem. + */ +void ClearSpecificsPageEntryAndMem(BadBlockRecEnt *entry) +{ + HTAB *bad_hash = g_redoWorker->badPageHashTbl; + bool found = false; + uint32 need_repair_num = 0; + HASH_SEQ_STATUS status; + BadBlockRecEnt *temp_entry = NULL; + + if ((BadBlockRecEnt*)hash_search(bad_hash, &(entry->key), HASH_REMOVE, &found) == NULL) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("recovery thread bad block hash table corrupted"))); + } + + hash_seq_init(&status, bad_hash); + while ((temp_entry = (BadBlockRecEnt *)hash_seq_search(&status)) != NULL) { + need_repair_num++; + } + + if (need_repair_num == 0) { + XLogParseBufferDestoryFunc(&(g_redoWorker->parseManager)); + } +} + +/* RecordBadBlockAndPushToRemote + * If the bad page has been stored, record the xlog. If the bad page + * has not been stored, need push to page repair thread hash table and record to + * recovery thread hash table. + */ +void RecordBadBlockAndPushToRemote(XLogBlockDataParse *datadecode, PageErrorType error_type, + XLogRecPtr old_lsn, XLogPhyBlock pblk) +{ + bool found = false; + RepairBlockKey key; + gs_thread_t tid; + XLogBlockParse *block = STRUCT_CONTAINER(XLogBlockParse, extra_rec, datadecode); + XLogRecParseState *state = STRUCT_CONTAINER(XLogRecParseState, blockparse, block); + + key.relfilenode.spcNode = state->blockparse.blockhead.spcNode; + key.relfilenode.dbNode = state->blockparse.blockhead.dbNode; + key.relfilenode.relNode = state->blockparse.blockhead.relNode; + key.relfilenode.bucketNode = state->blockparse.blockhead.bucketNode; + key.forknum = state->blockparse.blockhead.forknum; + key.blocknum = state->blockparse.blockhead.blkno; + + tid = gs_thread_get_cur_thread(); + found = PushBadPageToRemoteHashTbl(key, error_type, old_lsn, pblk, tid.thid); + + if (found) { + /* store the record for recovery */ + HTAB *bad_hash = g_redoWorker->badPageHashTbl; + bool thread_found = false; + BadBlockRecEnt *remoteReadInfo = (BadBlockRecEnt*)hash_search(bad_hash, &(key), HASH_FIND, &thread_found); + Assert(thread_found); + if (!thread_found) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("recovery thread bad block hash table corrupted"))); + } + XLogRecParseState *newState = XLogParseBufferCopy(state); + newState->nextrecord = NULL; + remoteReadInfo->tail->nextrecord = newState; + remoteReadInfo->tail = newState; + remoteReadInfo->rec_max_lsn = newState->blockparse.blockhead.end_ptr; + } else { + HTAB *bad_hash = g_redoWorker->badPageHashTbl; + bool thread_found = false; + BadBlockRecEnt *remoteReadInfo = (BadBlockRecEnt*)hash_search(bad_hash, &(key), HASH_ENTER, &thread_found); + + Assert(!thread_found); + if (g_parseManager == NULL) { + XLogParseBufferInitFunc(&(g_redoWorker->parseManager), + MAX_PARSE_BUFF_NUM, &recordRefOperate, RedoInterruptCallBack); + } + XLogRecParseState *newState = XLogParseBufferCopy(state); + newState->nextrecord = NULL; + + remoteReadInfo->key = key; + remoteReadInfo->pblk = pblk; + remoteReadInfo->rec_min_lsn = newState->blockparse.blockhead.end_ptr; + remoteReadInfo->rec_max_lsn = newState->blockparse.blockhead.end_ptr; + remoteReadInfo->head = newState; + remoteReadInfo->tail = newState; + g_redoWorker->remoteReadPageNum++; + + if (g_redoWorker->remoteReadPageNum >= MAX_REMOTE_READ_INFO_NUM) { + ereport(WARNING, (errmsg("recovery thread found %d error block.", g_redoWorker->remoteReadPageNum))); + } + } + + return; +} + +void CheckRemoteReadAndRepairPage(BadBlockRecEnt *entry) +{ + XLogRecPtr rec_min_lsn = InvalidXLogRecPtr; + XLogRecPtr rec_max_lsn = InvalidXLogRecPtr; + bool check = false; + RepairBlockKey key; + + key = entry->key; + rec_min_lsn = entry->rec_min_lsn; + rec_max_lsn = entry->rec_max_lsn; + check = CheckRepairPage(key, rec_min_lsn, rec_max_lsn, g_redoWorker->page); + if (check) { + /* copy page to buffer pool, and recovery the stored xlog */ + RepairPageAndRecoveryXLog(entry, g_redoWorker->page); + /* clear page repair thread hash table */ + ClearSpecificsPageRepairHashTbl(key); + /* clear this thread invalid page hash table */ + forget_specified_invalid_pages(key); + /* clear thread bad block hash entry */ + ClearSpecificsPageEntryAndMem(entry); + g_redoWorker->remoteReadPageNum--; + } +} + +void SeqCheckRemoteReadAndRepairPage() +{ + BadBlockRecEnt *entry = NULL; + HASH_SEQ_STATUS status; + + HTAB *bad_hash = g_redoWorker->badPageHashTbl; + + hash_seq_init(&status, bad_hash); + while ((entry = (BadBlockRecEnt *)hash_seq_search(&status)) != NULL) { + CheckRemoteReadAndRepairPage(entry); + } +} + +} // namespace extreme_rto diff --git a/src/gausskernel/storage/access/transam/extreme_rto/redo_item.cpp b/src/gausskernel/storage/access/transam/extreme_rto/redo_item.cpp index 989625192..50101ac23 100644 --- a/src/gausskernel/storage/access/transam/extreme_rto/redo_item.cpp +++ b/src/gausskernel/storage/access/transam/extreme_rto/redo_item.cpp @@ -1,106 +1,63 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * redo_item.cpp - * Each RedoItem represents a log record ready to be replayed by one of - * the redo threads. To decouple the lifetime of a RedoItem from its - * log record's original XLogReaderState, contents necessary for the - * actual replay are duplicated into RedoItem's internal XLogReaderState. - - * - * IDENTIFICATION - * src/gausskernel/storage/access/transam/extreme_rto/redo_item.cpp - * - * ------------------------------------------------------------------------- - */ - -#include -#include - -#include "postgres.h" -#include "knl/knl_variable.h" -#include "access/xlogrecord.h" -#include "access/xlog_internal.h" -#include "utils/palloc.h" -#include "utils/guc.h" - -#include "access/extreme_rto/dispatcher.h" -#include "access/extreme_rto/redo_item.h" -#include "postmaster/postmaster.h" -#include "access/xlog.h" - -namespace extreme_rto { - -/* Run from the dispatcher thread. */ -RedoItem *CreateRedoItem(XLogReaderState *record, uint32 shareCount, uint32 designatedWorker, List *expectedTLIs, - TimestampTz recordXTime, bool buseoriginal, bool isForceAll) -{ - RedoItem *item = GetRedoItemPtr(record); - if (t_thrd.xlog_cxt.redoItemIdx == 0) { - /* - * Some blocks are optional and redo functions rely on the correct - * value of in_use to determine if optional blocks are present. - * Explicitly set all unused blocks' in_use to false. - */ - for (int i = record->max_block_id + 1; i <= XLR_MAX_BLOCK_ID; i++) - item->record.blocks[i].in_use = false; - } - if (buseoriginal && (t_thrd.xlog_cxt.redoItemIdx == 0)) { - t_thrd.xlog_cxt.redoItemIdx++; - } else { - /* if shareCount is 1, we should make a copy of record in NewReaderState function */ - Assert(shareCount == 1); - /* not only need copy state, but also need copy data */ - item = GetRedoItemPtr(NewReaderState(record, true)); - } - - item->oldVersion = t_thrd.xlog_cxt.redo_oldversion_xlog; - item->needImmediateCheckpoint = false; - item->needFullSyncCheckpoint = false; - item->shareCount = shareCount; - item->designatedWorker = designatedWorker; - item->expectedTLIs = expectedTLIs; - item->recordXTime = recordXTime; - item->freeNext = NULL; - item->syncXLogReceiptTime = t_thrd.xlog_cxt.XLogReceiptTime; - item->syncXLogReceiptSource = t_thrd.xlog_cxt.XLogReceiptSource; - item->RecentXmin = u_sess->utils_cxt.RecentXmin; - item->syncServerMode = GetServerMode(); - item->isForceAll = isForceAll; - - pg_atomic_init_u32(&item->refCount, shareCount); - pg_atomic_init_u32(&item->replayed, 0); - pg_atomic_init_u32(&item->blkShareCount, 0); - pg_atomic_init_u32(&item->distributeCount, shareCount); - return item; -} - -void ApplyRedoRecord(XLogReaderState *record, bool bOld) -{ - t_thrd.xlog_cxt.redo_oldversion_xlog = bOld; - ErrorContextCallback errContext; - errContext.callback = rm_redo_error_callback; - errContext.arg = (void *)record; - errContext.previous = t_thrd.log_cxt.error_context_stack; - t_thrd.log_cxt.error_context_stack = &errContext; - if (module_logging_is_on(MOD_REDO)) { - DiagLogRedoRecord(record, "ApplyRedoRecord"); - } - RmgrTable[XLogRecGetRmid(record)].rm_redo(record); - - t_thrd.log_cxt.error_context_stack = errContext.previous; - t_thrd.xlog_cxt.redo_oldversion_xlog = false; -} -} // namespace extreme_rto +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * redo_item.cpp + * Each RedoItem represents a log record ready to be replayed by one of + * the redo threads. To decouple the lifetime of a RedoItem from its + * log record's original XLogReaderState, contents necessary for the + * actual replay are duplicated into RedoItem's internal XLogReaderState. + + * + * IDENTIFICATION + * src/gausskernel/storage/access/transam/extreme_rto/redo_item.cpp + * + * ------------------------------------------------------------------------- + */ + +#include +#include + +#include "postgres.h" +#include "knl/knl_variable.h" +#include "access/xlogrecord.h" +#include "access/xlog_internal.h" +#include "utils/palloc.h" +#include "utils/guc.h" + +#include "access/extreme_rto/dispatcher.h" +#include "access/extreme_rto/redo_item.h" +#include "postmaster/postmaster.h" +#include "access/xlog.h" +#include "access/multi_redo_api.h" + +namespace extreme_rto { +void DumpItem(RedoItem *item, const char *funcName) +{ + if (item == &g_redoEndMark || item == &g_terminateMark) { + return; + } + ereport(DEBUG4, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("[REDO_LOG_TRACE]DiagLogRedoRecord: %s, ReadRecPtr:%lu,EndRecPtr:%lu," + "imcheckpoint:%u, recordXTime:%lu," + "syncXLogReceiptSource:%d, RecentXmin:%lu, syncServerMode:%u", + funcName, item->record.ReadRecPtr, item->record.EndRecPtr, + item->needImmediateCheckpoint, item->recordXTime, + item->syncXLogReceiptSource, item->RecentXmin, item->syncServerMode))); + DiagLogRedoRecord(&(item->record), funcName); +} + + + +} // namespace extreme_rto diff --git a/src/gausskernel/storage/access/transam/gtm_single.cpp b/src/gausskernel/storage/access/transam/gtm_single.cpp index f5bda990c..45a9a55ed 100644 --- a/src/gausskernel/storage/access/transam/gtm_single.cpp +++ b/src/gausskernel/storage/access/transam/gtm_single.cpp @@ -95,19 +95,6 @@ void CloseGTM(void) } } -void ResetGtmHandleXmin(GTM_TransactionKey txnKey) -{ - /* in Single_Node mode, txnKey.txnHandle must be inValid, and t_thrd.xact_cxt.conn must be NULL, */ - /* so, just return here. */ - return; -} - -int SetGTMVacuumFlag(GTM_TransactionKey txnKey, bool is_vacuum) -{ - DISTRIBUTED_FEATURE_NOT_SUPPORTED(); - return GTM_RESULT_ERROR; -} - GTM_TransactionKey BeginTranGTM(GTM_Timestamp *timestamp) { GTM_TransactionKey txn; diff --git a/src/gausskernel/storage/access/transam/multi_redo_api.cpp b/src/gausskernel/storage/access/transam/multi_redo_api.cpp index 10776e8ef..d70e18443 100644 --- a/src/gausskernel/storage/access/transam/multi_redo_api.cpp +++ b/src/gausskernel/storage/access/transam/multi_redo_api.cpp @@ -1,339 +1,429 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * multi_redo_api.cpp - * Defines GUC options for parallel recovery. - * - * IDENTIFICATION - * src/gausskernel/storage/access/transam/multi_redo_api.cpp - * - * ------------------------------------------------------------------------- - */ - -#include -#include - -#include "postgres.h" -#include "knl/knl_variable.h" -#include "utils/guc.h" - -#include "access/multi_redo_settings.h" -#include "access/multi_redo_api.h" -#include "access/extreme_rto/dispatcher.h" -#include "access/parallel_recovery/dispatcher.h" -#include "access/extreme_rto/page_redo.h" -#include "access/parallel_recovery/page_redo.h" - - -#ifdef ENABLE_MULTIPLE_NODES -bool g_supportHotStandby = false; /* don't support consistency view */ -#else -bool g_supportHotStandby = true; /* don't support consistency view */ -#endif - - -void StartUpMultiRedo(XLogReaderState *xlogreader, uint32 privateLen) -{ - if (IsExtremeRedo()) { - extreme_rto::StartRecoveryWorkers(xlogreader, privateLen); - } else if (IsParallelRedo()) { - parallel_recovery::StartRecoveryWorkers(); - } -} - -bool IsMultiThreadRedoRunning() -{ - return (get_real_recovery_parallelism() > 1 && - (extreme_rto::g_dispatcher != 0 || parallel_recovery::g_dispatcher != 0)); -} - -bool IsExtremeRtoRunning() -{ - return (get_real_recovery_parallelism() > 1 && extreme_rto::g_dispatcher != 0 && - extreme_rto::g_dispatcher->pageLineNum > 0); -} - - -bool IsExtremeRtoSmartShutdown() -{ - if (!IsExtremeRtoRunning()) { - return false; - } - - if (extreme_rto::g_dispatcher->smartShutdown) { - extreme_rto::g_dispatcher->smartShutdown =false; - return true; - } - return false; -} - -void ExtremeRtoRedoManagerSendEndToStartup() -{ - if (!IsExtremeRtoRunning()) { - return; - } - - extreme_rto::g_redoEndMark.record.isDecode = true; - extreme_rto::PutRecordToReadQueue((XLogReaderState *)&extreme_rto::g_redoEndMark.record); -} - -bool IsExtremeRtoReadWorkerRunning() -{ - if (!IsExtremeRtoRunning()) { - return false; - } - - uint32 readWorkerState = pg_atomic_read_u32(&extreme_rto::g_dispatcher->rtoXlogBufState.readWorkerState); - if (readWorkerState == extreme_rto::WORKER_STATE_STOP || readWorkerState == extreme_rto::WORKER_STATE_EXIT) { - return false; - } - - return true; -} - -void DispatchRedoRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) -{ - if (IsExtremeRedo()) { - extreme_rto::DispatchRedoRecordToFile(record, expectedTLIs, recordXTime); - } else if (IsParallelRedo()) { - parallel_recovery::DispatchRedoRecordToFile(record, expectedTLIs, recordXTime); - } else { +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * multi_redo_api.cpp + * Defines GUC options for parallel recovery. + * + * IDENTIFICATION + * src/gausskernel/storage/access/transam/multi_redo_api.cpp + * + * ------------------------------------------------------------------------- + */ + +#include +#include + +#include "postgres.h" +#include "knl/knl_variable.h" +#include "utils/guc.h" + +#include "access/multi_redo_settings.h" +#include "access/multi_redo_api.h" +#include "access/extreme_rto/dispatcher.h" +#include "access/parallel_recovery/dispatcher.h" +#include "access/extreme_rto/page_redo.h" +#include "access/parallel_recovery/page_redo.h" +#include "access/xlog_internal.h" + +bool g_supportHotStandby = true; /* don't support consistency view */ + + +void StartUpMultiRedo(XLogReaderState *xlogreader, uint32 privateLen) +{ + if (IsExtremeRedo()) { + extreme_rto::StartRecoveryWorkers(xlogreader, privateLen); + } else if (IsParallelRedo()) { + parallel_recovery::StartRecoveryWorkers(); + } +} + +bool IsMultiThreadRedoRunning() +{ + return (get_real_recovery_parallelism() > 1 && + (extreme_rto::g_dispatcher != 0 || parallel_recovery::g_dispatcher != 0)); +} + +bool IsExtremeRtoRunning() +{ + return (get_real_recovery_parallelism() > 1 && extreme_rto::g_dispatcher != 0 && + extreme_rto::g_dispatcher->pageLineNum > 0); +} + + +bool IsExtremeRtoSmartShutdown() +{ + if (!IsExtremeRtoRunning()) { + return false; + } + + if (extreme_rto::g_dispatcher->smartShutdown) { + extreme_rto::g_dispatcher->smartShutdown =false; + return true; + } + return false; +} + +void ExtremeRtoRedoManagerSendEndToStartup() +{ + if (!IsExtremeRtoRunning()) { + return; + } + + extreme_rto::g_redoEndMark.record.isDecode = true; + extreme_rto::PutRecordToReadQueue((XLogReaderState *)&extreme_rto::g_redoEndMark.record); +} + +void DispatchRedoRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) +{ + if (IsExtremeRedo()) { + extreme_rto::DispatchRedoRecordToFile(record, expectedTLIs, recordXTime); + } else if (IsParallelRedo()) { + parallel_recovery::DispatchRedoRecordToFile(record, expectedTLIs, recordXTime); + } else { + g_instance.startup_cxt.current_record = record; uint32 term = XLogRecGetTerm(record); if (term > g_instance.comm_cxt.localinfo_cxt.term_from_xlog) { g_instance.comm_cxt.localinfo_cxt.term_from_xlog = term; } - parallel_recovery::ApplyRedoRecord(record, t_thrd.xlog_cxt.redo_oldversion_xlog); - if (XLogRecGetRmid(record) == RM_XACT_ID) - SetLatestXTime(recordXTime); - SetXLogReplayRecPtr(record->ReadRecPtr, record->EndRecPtr); - CheckRecoveryConsistency(); - } -} - -void GetThreadNameIfMultiRedo(int argc, char *argv[], char **threadNamePtr) -{ - if (IsExtremeRedo()) { - extreme_rto::GetThreadNameIfPageRedoWorker(argc, argv, threadNamePtr); - } else if (IsParallelRedo()) { - parallel_recovery::GetThreadNameIfPageRedoWorker(argc, argv, threadNamePtr); - } -} - -PGPROC *MultiRedoThreadPidGetProc(ThreadId pid) -{ - if (IsExtremeRedo()) { - return extreme_rto::StartupPidGetProc(pid); - } else { - return parallel_recovery::StartupPidGetProc(pid); - } -} - -void MultiRedoUpdateStandbyState(HotStandbyState newState) -{ - if (IsExtremeRedo()) { - extreme_rto::UpdateStandbyState(newState); - } else if (IsParallelRedo()) { - parallel_recovery::UpdateStandbyState(newState); - } -} - -uint32 MultiRedoGetWorkerId() -{ - if (IsExtremeRedo()) { - return extreme_rto::GetMyPageRedoWorkerIdWithLock(); - } else if (IsParallelRedo()) { - return parallel_recovery::GetMyPageRedoWorkerOrignId(); - } else { - ereport(ERROR, (errmsg("MultiRedoGetWorkerId parallel redo and extreme redo is close, should not be here!"))); - } - return 0; -} - -bool IsAllPageWorkerExit() -{ - if (get_real_recovery_parallelism() > 1) { - for (uint32 i = 0; i < g_instance.comm_cxt.predo_cxt.totalNum; ++i) { - uint32 state = pg_atomic_read_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[i].threadState)); - if (state != PAGE_REDO_WORKER_INVALID) { - return false; - } - } - g_instance.comm_cxt.predo_cxt.totalNum = 0; - } - ereport(LOG, - (errmodule(MOD_REDO), errcode(ERRCODE_LOG), errmsg("page workers all exit or not open parallel redo"))); - - return true; -} - -void SetPageRedoWorkerIndex(int index) -{ - if (IsExtremeRedo()) { - extreme_rto::g_redoWorker->index = index; - } else if (IsParallelRedo()) { - parallel_recovery::g_redoWorker->index = index; - } -} - -int GetPageRedoWorkerIndex(int index) -{ - if (IsExtremeRedo()) { - return extreme_rto::g_redoWorker->index; - } else if (IsParallelRedo()) { - return parallel_recovery::g_redoWorker->index; - } else { - return 0; - } -} - -PageRedoExitStatus CheckExitPageWorkers(ThreadId pid) -{ - PageRedoExitStatus checkStatus = NOT_PAGE_REDO_THREAD; - - for (uint32 i = 0; i < g_instance.comm_cxt.predo_cxt.totalNum; ++i) { - if (g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[i].threadId == pid) { - checkStatus = PAGE_REDO_THREAD_EXIT_NORMAL; - uint32 state = pg_atomic_read_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[i].threadState)); - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("page worker thread %lu exit, state %u", pid, state))); - if (state == PAGE_REDO_WORKER_READY) { - checkStatus = PAGE_REDO_THREAD_EXIT_ABNORMAL; - } - pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[i].threadState), - PAGE_REDO_WORKER_INVALID); - g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[i].threadId = 0; - break; - } - } - - return checkStatus; -} - -void ProcTxnWorkLoad(bool force) -{ - if (IsParallelRedo()) { - parallel_recovery::ProcessTrxnRecords(force); - } -} - -/* Run from the worker thread. */ -void SetMyPageRedoWorker(knl_thread_arg *arg) -{ - if (IsExtremeRedo()) { - extreme_rto::g_redoWorker = (extreme_rto::PageRedoWorker *)arg->payload; - } else if (IsParallelRedo()) { - parallel_recovery::g_redoWorker = (parallel_recovery::PageRedoWorker *)arg->payload; - } -} - -/* Run from the worker thread. */ -uint32 GetMyPageRedoWorkerId() -{ - if (IsExtremeRedo()) { - return extreme_rto::g_redoWorker->id; - } else if (IsParallelRedo()) { - return parallel_recovery::g_redoWorker->id; - } else { - return 0; - } -} - -void MultiRedoMain() -{ - if (IsExtremeRedo()) { - extreme_rto::ParallelRedoThreadMain(); - } else if (IsParallelRedo()) { - parallel_recovery::PageRedoWorkerMain(); - } else { - ereport(ERROR, (errmsg("MultiRedoMain parallel redo and extreme redo is close, should not be here!"))); - } -} - -void EndDispatcherContext() -{ - if (IsExtremeRedo()) { - (void)MemoryContextSwitchTo(extreme_rto::g_dispatcher->oldCtx); - - } else if (IsParallelRedo()) { - (void)MemoryContextSwitchTo(parallel_recovery::g_dispatcher->oldCtx); - } -} - -void SwitchToDispatcherContext() -{ - (void)MemoryContextSwitchTo(g_instance.comm_cxt.predo_cxt.parallelRedoCtx); -} - -void FreeAllocatedRedoItem() -{ - if (IsExtremeRedo()) { - extreme_rto::FreeAllocatedRedoItem(); - - } else if (IsParallelRedo()) { - parallel_recovery::FreeAllocatedRedoItem(); - } -} - -uint32 GetRedoWorkerCount() -{ - if (IsExtremeRedo()) { - return extreme_rto::GetAllWorkerCount(); - - } else if (IsParallelRedo()) { - return parallel_recovery::GetPageWorkerCount(); - } - - return 0; -} - -void **GetXLogInvalidPagesFromWorkers() -{ - if (IsExtremeRedo()) { - return extreme_rto::GetXLogInvalidPagesFromWorkers(); - - } else if (IsParallelRedo()) { - return parallel_recovery::GetXLogInvalidPagesFromWorkers(); - } - - return NULL; -} - -void SendRecoveryEndMarkToWorkersAndWaitForFinish(int code) -{ - if (IsExtremeRedo()) { - return extreme_rto::SendRecoveryEndMarkToWorkersAndWaitForFinish(code); - - } else if (IsParallelRedo()) { - return parallel_recovery::SendRecoveryEndMarkToWorkersAndWaitForFinish(code); - } -} - -RedoWaitInfo GetRedoIoEvent(int32 event_id) -{ - if (IsExtremeRedo()) { - return extreme_rto::redo_get_io_event(event_id); - } else { - return parallel_recovery::redo_get_io_event(event_id); - } -} - -void GetRedoWrokerStatistic(uint32 *realNum, RedoWorkerStatsData *worker, uint32 workerLen) -{ - if (IsExtremeRedo()) { - return extreme_rto::redo_get_wroker_statistic(realNum, worker, workerLen); - } else { - return parallel_recovery::redo_get_wroker_statistic(realNum, worker, workerLen); - } -} + + long readbufcountbefore = u_sess->instr_cxt.pg_buffer_usage->local_blks_read; + ApplyRedoRecord(record); + record->readblocks = u_sess->instr_cxt.pg_buffer_usage->local_blks_read - readbufcountbefore; + CountXLogNumbers(record); + if (XLogRecGetRmid(record) == RM_XACT_ID) + SetLatestXTime(recordXTime); + SetXLogReplayRecPtr(record->ReadRecPtr, record->EndRecPtr); + CheckRecoveryConsistency(); + } +} + +void GetThreadNameIfMultiRedo(int argc, char *argv[], char **threadNamePtr) +{ + if (IsExtremeRedo()) { + extreme_rto::GetThreadNameIfPageRedoWorker(argc, argv, threadNamePtr); + } else if (IsParallelRedo()) { + parallel_recovery::GetThreadNameIfPageRedoWorker(argc, argv, threadNamePtr); + } +} + +PGPROC *MultiRedoThreadPidGetProc(ThreadId pid) +{ + if (IsExtremeRedo()) { + return extreme_rto::StartupPidGetProc(pid); + } else { + return parallel_recovery::StartupPidGetProc(pid); + } +} + +void MultiRedoUpdateStandbyState(HotStandbyState newState) +{ + if (IsExtremeRedo()) { + extreme_rto::UpdateStandbyState(newState); + } else if (IsParallelRedo()) { + parallel_recovery::UpdateStandbyState(newState); + } +} + +uint32 MultiRedoGetWorkerId() +{ + if (IsExtremeRedo()) { + return extreme_rto::GetMyPageRedoWorkerIdWithLock(); + } else if (IsParallelRedo()) { + return parallel_recovery::GetMyPageRedoWorkerOrignId(); + } else { + ereport(ERROR, (errmsg("MultiRedoGetWorkerId parallel redo and extreme redo is close, should not be here!"))); + } + return 0; +} + +bool IsAllPageWorkerExit() +{ + if (get_real_recovery_parallelism() > 1) { + for (uint32 i = 0; i < g_instance.comm_cxt.predo_cxt.totalNum; ++i) { + uint32 state = pg_atomic_read_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[i].threadState)); + if (state != PAGE_REDO_WORKER_INVALID) { + return false; + } + } + g_instance.comm_cxt.predo_cxt.totalNum = 0; + } + ereport(LOG, + (errmodule(MOD_REDO), errcode(ERRCODE_LOG), errmsg("page workers all exit or not open parallel redo"))); + + return true; +} + +void SetPageRedoWorkerIndex(int index) +{ + if (IsExtremeRedo()) { + extreme_rto::g_redoWorker->index = index; + } else if (IsParallelRedo()) { + parallel_recovery::g_redoWorker->index = index; + } +} + +int GetPageRedoWorkerIndex(int index) +{ + if (IsExtremeRedo()) { + return extreme_rto::g_redoWorker->index; + } else if (IsParallelRedo()) { + return parallel_recovery::g_redoWorker->index; + } else { + return 0; + } +} + +PageRedoExitStatus CheckExitPageWorkers(ThreadId pid) +{ + PageRedoExitStatus checkStatus = NOT_PAGE_REDO_THREAD; + + for (uint32 i = 0; i < g_instance.comm_cxt.predo_cxt.totalNum; ++i) { + if (g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[i].threadId == pid) { + checkStatus = PAGE_REDO_THREAD_EXIT_NORMAL; + uint32 state = pg_atomic_read_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[i].threadState)); + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("page worker thread %lu exit, state %u", pid, state))); + if (state == PAGE_REDO_WORKER_READY) { + checkStatus = PAGE_REDO_THREAD_EXIT_ABNORMAL; + } + pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[i].threadState), + PAGE_REDO_WORKER_INVALID); + g_instance.comm_cxt.predo_cxt.pageRedoThreadStatusList[i].threadId = 0; + break; + } + } + + return checkStatus; +} + +void ProcTxnWorkLoad(bool force) +{ + if (IsParallelRedo()) { + parallel_recovery::ProcessTrxnRecords(force); + } +} + +/* Run from the worker thread. */ +void SetMyPageRedoWorker(knl_thread_arg *arg) +{ + if (IsExtremeRedo()) { + extreme_rto::g_redoWorker = (extreme_rto::PageRedoWorker *)arg->payload; + } else if (IsParallelRedo()) { + parallel_recovery::g_redoWorker = (parallel_recovery::PageRedoWorker *)arg->payload; + } +} + +/* Run from the worker thread. */ +uint32 GetMyPageRedoWorkerId() +{ + if (IsExtremeRedo()) { + return extreme_rto::g_redoWorker->id; + } else if (IsParallelRedo()) { + return parallel_recovery::g_redoWorker->id; + } else { + return 0; + } +} + +void MultiRedoMain() +{ + if (IsExtremeRedo()) { + extreme_rto::ParallelRedoThreadMain(); + } else if (IsParallelRedo()) { + parallel_recovery::PageRedoWorkerMain(); + } else { + ereport(ERROR, (errmsg("MultiRedoMain parallel redo and extreme redo is close, should not be here!"))); + } +} + +void EndDispatcherContext() +{ + if (IsExtremeRedo()) { + (void)MemoryContextSwitchTo(extreme_rto::g_dispatcher->oldCtx); + + } else if (IsParallelRedo()) { + (void)MemoryContextSwitchTo(parallel_recovery::g_dispatcher->oldCtx); + } +} + +void SwitchToDispatcherContext() +{ + (void)MemoryContextSwitchTo(g_instance.comm_cxt.predo_cxt.parallelRedoCtx); +} + +void FreeAllocatedRedoItem() +{ + if (IsExtremeRedo()) { + extreme_rto::FreeAllocatedRedoItem(); + + } else if (IsParallelRedo()) { + parallel_recovery::FreeAllocatedRedoItem(); + } +} + +uint32 GetRedoWorkerCount() +{ + if (IsExtremeRedo()) { + return extreme_rto::GetAllWorkerCount(); + + } else if (IsParallelRedo()) { + return parallel_recovery::GetPageWorkerCount(); + } + + return 0; +} + +void **GetXLogInvalidPagesFromWorkers() +{ + if (IsExtremeRedo()) { + return extreme_rto::GetXLogInvalidPagesFromWorkers(); + + } else if (IsParallelRedo()) { + return parallel_recovery::GetXLogInvalidPagesFromWorkers(); + } + + return NULL; +} + +void SendRecoveryEndMarkToWorkersAndWaitForFinish(int code) +{ + if (IsExtremeRedo()) { + return extreme_rto::SendRecoveryEndMarkToWorkersAndWaitForFinish(code); + + } else if (IsParallelRedo()) { + return parallel_recovery::SendRecoveryEndMarkToWorkersAndWaitForFinish(code); + } +} + +RedoWaitInfo GetRedoIoEvent(int32 event_id) +{ + if (IsExtremeRedo()) { + return extreme_rto::redo_get_io_event(event_id); + } else { + return parallel_recovery::redo_get_io_event(event_id); + } +} + +void GetRedoWrokerStatistic(uint32 *realNum, RedoWorkerStatsData *worker, uint32 workerLen) +{ + if (IsExtremeRedo()) { + return extreme_rto::redo_get_wroker_statistic(realNum, worker, workerLen); + } else { + return parallel_recovery::redo_get_wroker_statistic(realNum, worker, workerLen); + } +} + +void GetRedoWorkerTimeCount(RedoWorkerTimeCountsInfo **workerCountInfoList, uint32 *realNum) +{ + if (IsExtremeRedo()) { + extreme_rto::redo_get_wroker_time_count(workerCountInfoList, realNum); + } else if (IsParallelRedo()) { + parallel_recovery::redo_get_wroker_time_count(workerCountInfoList, realNum); + } else { + *realNum = 0; + } +} + +void CountXLogNumbers(XLogReaderState *record) +{ + const uint32 type_shift = 4; + RmgrId rm_id = XLogRecGetRmid(record); + uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; + if (rm_id == RM_HEAP_ID || rm_id == RM_HEAP2_ID || rm_id == RM_HEAP3_ID) { + info = info & XLOG_HEAP_OPMASK; + } else if (rm_id == RM_UHEAP_ID || rm_id == RM_UHEAP2_ID) { + info = info & XLOG_UHEAP_OPMASK; + } + + info = (info >> type_shift); + Assert(info < MAX_XLOG_INFO_NUM); + Assert(rm_id < RM_NEXT_ID); + (void)pg_atomic_add_fetch_u64(&g_instance.comm_cxt.predo_cxt.xlogStatics[rm_id][info].total_num, 1); + + if (record->max_block_id >= 0) { + (void)pg_atomic_add_fetch_u64(&g_instance.comm_cxt.predo_cxt.xlogStatics[rm_id][info].extra_num, + record->readblocks); + } else if (rm_id == RM_XACT_ID) { + ColFileNodeRel *xnodes = NULL; + int nrels = 0; + XactGetRelFiles(record, &xnodes, &nrels); + if (nrels > 0) { + (void)pg_atomic_add_fetch_u64(&g_instance.comm_cxt.predo_cxt.xlogStatics[rm_id][info].extra_num, nrels); + } + } +} + +void ResetXLogStatics() +{ + errno_t rc = memset_s((void *)g_instance.comm_cxt.predo_cxt.xlogStatics, + sizeof(g_instance.comm_cxt.predo_cxt.xlogStatics), 0, sizeof(g_instance.comm_cxt.predo_cxt.xlogStatics)); + securec_check(rc, "\0", "\0"); +} + +void DiagLogRedoRecord(XLogReaderState *record, const char *funcName) +{ + uint8 info; + RelFileNode oldRn = { 0 }; + RelFileNode newRn = { 0 }; + BlockNumber oldblk = InvalidBlockNumber; + BlockNumber newblk = InvalidBlockNumber; + bool newBlkExistFlg = false; + bool oldBlkExistFlg = false; + ForkNumber oldFk = InvalidForkNumber; + ForkNumber newFk = InvalidForkNumber; + StringInfoData buf; + + /* Support redo old version xlog during upgrade (Just the runningxact log with chekpoint online ) */ + uint32 rmid = XLogRecGetRmid(record); + info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; + + initStringInfo(&buf); + RmgrTable[rmid].rm_desc(&buf, record); + + if (XLogRecGetBlockTag(record, 0, &newRn, &newFk, &newblk)) { + newBlkExistFlg = true; + } + if (XLogRecGetBlockTag(record, 1, &oldRn, &oldFk, &oldblk)) { + oldBlkExistFlg = true; + } + ereport(DEBUG4, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("[REDO_LOG_TRACE]DiagLogRedoRecord: %s, ReadRecPtr:%lu,EndRecPtr:%lu," + "newBlkExistFlg:%d," + "newRn(spcNode:%u, dbNode:%u, relNode:%u),newFk:%d,newblk:%u," + "oldBlkExistFlg:%d," + "oldRn(spcNode:%u, dbNode:%u, relNode:%u),oldFk:%d,oldblk:%u," + "info:%u, rm_name:%s, desc:%s," + "max_block_id:%d", + funcName, record->ReadRecPtr, record->EndRecPtr, newBlkExistFlg, newRn.spcNode, newRn.dbNode, newRn.relNode, + newFk, newblk, oldBlkExistFlg, oldRn.spcNode, oldRn.dbNode, oldRn.relNode, oldFk, oldblk, (uint32)info, + RmgrTable[rmid].rm_name, buf.data, record->max_block_id))); + pfree_ext(buf.data); +} + +void ApplyRedoRecord(XLogReaderState *record) +{ + ErrorContextCallback errContext; + errContext.callback = rm_redo_error_callback; + errContext.arg = (void *)record; + errContext.previous = t_thrd.log_cxt.error_context_stack; + t_thrd.log_cxt.error_context_stack = &errContext; + if (module_logging_is_on(MOD_REDO)) { + DiagLogRedoRecord(record, "ApplyRedoRecord"); + } + RmgrTable[XLogRecGetRmid(record)].rm_redo(record); + + t_thrd.log_cxt.error_context_stack = errContext.previous; +} + diff --git a/src/gausskernel/storage/access/transam/multi_redo_settings.cpp b/src/gausskernel/storage/access/transam/multi_redo_settings.cpp index d8f697ae0..6ed92595e 100644 --- a/src/gausskernel/storage/access/transam/multi_redo_settings.cpp +++ b/src/gausskernel/storage/access/transam/multi_redo_settings.cpp @@ -1,128 +1,270 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * settings.cpp - * Defines GUC options for parallel recovery. - * - * IDENTIFICATION - * src/gausskernel/storage/access/transam/parallel_recovery/settings.cpp - * - * ------------------------------------------------------------------------- - */ -#include -#include - -#include "postgres.h" -#include "knl/knl_variable.h" -#include "utils/guc.h" - -#include "access/multi_redo_settings.h" -#include "access/multi_redo_api.h" - -static uint32 ComputeRecoveryParallelism(int); -static uint32 GetCPUCount(); - -void ConfigRecoveryParallelism() -{ - char buf[16]; /* 16 is enough */ - - if (g_instance.attr.attr_storage.recovery_parse_workers > 1) { - g_instance.comm_cxt.predo_cxt.redoType = EXTREME_REDO; - g_instance.attr.attr_storage.batch_redo_num = g_instance.attr.attr_storage.recovery_parse_workers; - uint32 total_recovery_parallelism = g_instance.attr.attr_storage.batch_redo_num * 2 + - g_instance.attr.attr_storage.recovery_redo_workers_per_paser_worker * - g_instance.attr.attr_storage.batch_redo_num + - TRXN_REDO_MANAGER_NUM + TRXN_REDO_WORKER_NUM + XLOG_READER_NUM; - sprintf_s(buf, sizeof(buf), "%u", total_recovery_parallelism); - - ereport(LOG, (errmsg("ConfigRecoveryParallelism, parse workers:%d, " - "redo workers per parse worker:%d, total workernums is %u", - g_instance.attr.attr_storage.recovery_parse_workers, - g_instance.attr.attr_storage.recovery_redo_workers_per_paser_worker, - total_recovery_parallelism))); - g_supportHotStandby = false; - SetConfigOption("recovery_parallelism", buf, PGC_POSTMASTER, PGC_S_OVERRIDE); - } else if (g_instance.attr.attr_storage.max_recovery_parallelism > 1) { - g_instance.comm_cxt.predo_cxt.redoType = PARALLEL_REDO; - uint32 true_max_recovery_parallelism = g_instance.attr.attr_storage.max_recovery_parallelism; - if (true_max_recovery_parallelism > MOST_FAST_RECOVERY_LIMIT) { - true_max_recovery_parallelism = MOST_FAST_RECOVERY_LIMIT; - } - sprintf_s(buf, sizeof(buf), "%u", ComputeRecoveryParallelism(true_max_recovery_parallelism)); - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("ConfigRecoveryParallelism, true_max_recovery_parallelism:%u, " - "max_recovery_parallelism:%d", - true_max_recovery_parallelism, g_instance.attr.attr_storage.max_recovery_parallelism))); - SetConfigOption("recovery_parallelism", buf, PGC_POSTMASTER, PGC_S_OVERRIDE); - } -} - -static uint32 ComputeRecoveryParallelism(int hint) -{ - /* - * Reciprocal of the shares of CPU used for recovery. The idea is that - * using the default value the standby is able to keep up with the master - * (assuming the standby and the master use the same hardware), while on - * machines with fewer CPUs, the user is able to boost up recovery - * performance by using more CPUs. A capped maximum is used to protect - * users from setting hugh values. - * - * The default is to use 1/32 of all CPUs. On beefy machines, the capped - * maximum is 1/4 of all CPUs. On smaller machines, the capped maximum - * is the number of CPUs or 8, whichever is smaller. - */ - static const uint32 DEFAULT_CPU_SHARE = 32; - static const uint32 MAX_CPU_SHARE = 4; - static const uint32 MIN_ALLOWED_MAX_PARALLELISM = 8; - uint32 g_cpu_count = 0; - - if (g_cpu_count == 0) - g_cpu_count = GetCPUCount(); - uint32 default_parallelism = g_cpu_count / DEFAULT_CPU_SHARE; - uint32 max_parallelism; - if (g_cpu_count < MIN_ALLOWED_MAX_PARALLELISM) - max_parallelism = g_cpu_count; - else if (g_cpu_count / MAX_CPU_SHARE < MIN_ALLOWED_MAX_PARALLELISM) - max_parallelism = MIN_ALLOWED_MAX_PARALLELISM; - else - max_parallelism = g_cpu_count / MAX_CPU_SHARE; - - uint32 actual_parallelism; - if (hint <= 0) - actual_parallelism = default_parallelism; - else if (((uint32)hint) < max_parallelism) - actual_parallelism = hint; - else - actual_parallelism = max_parallelism; - - /* We need to have at least one recovery thread. */ - if (actual_parallelism < 1) - actual_parallelism = 1; - - ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("Recovery parallelism, cpu count = %u, max = %d, actual = %u", g_cpu_count, hint, - actual_parallelism))); - return actual_parallelism; -} - -static uint32 GetCPUCount() -{ -#ifdef _SC_NPROCESSORS_ONLN - return (uint32)sysconf(_SC_NPROCESSORS_ONLN); -#else - static const uint32 DEFAULT_CPU_COUNT = 64; - return DEFAULT_CPU_COUNT; -#endif -} +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * settings.cpp + * Defines GUC options for parallel recovery. + * + * IDENTIFICATION + * src/gausskernel/storage/access/transam/parallel_recovery/settings.cpp + * + * ------------------------------------------------------------------------- + */ +#include +#include +#ifdef __USE_NUMA + #include +#endif +#include "postgres.h" +#include "knl/knl_variable.h" +#include "utils/guc.h" + +#include "access/multi_redo_settings.h" +#include "access/multi_redo_api.h" +#include "threadpool/threadpool_controler.h" + +static uint32 ComputeRecoveryParallelism(int); +static uint32 GetCPUCount(); +static const int bufSize = 1024; + +void ConfigRecoveryParallelism() +{ + char buf[16]; /* 16 is enough */ + + if (g_instance.attr.attr_storage.recovery_parse_workers > 1) { + g_instance.comm_cxt.predo_cxt.redoType = EXTREME_REDO; + g_instance.attr.attr_storage.batch_redo_num = g_instance.attr.attr_storage.recovery_parse_workers; + uint32 total_recovery_parallelism = g_instance.attr.attr_storage.batch_redo_num * 2 + + g_instance.attr.attr_storage.recovery_redo_workers_per_paser_worker * + g_instance.attr.attr_storage.batch_redo_num + + TRXN_REDO_MANAGER_NUM + TRXN_REDO_WORKER_NUM + XLOG_READER_NUM; + sprintf_s(buf, sizeof(buf), "%u", total_recovery_parallelism); + + ereport(LOG, (errmsg("ConfigRecoveryParallelism, parse workers:%d, " + "redo workers per parse worker:%d, total workernums is %u", + g_instance.attr.attr_storage.recovery_parse_workers, + g_instance.attr.attr_storage.recovery_redo_workers_per_paser_worker, + total_recovery_parallelism))); + g_supportHotStandby = false; + SetConfigOption("recovery_parallelism", buf, PGC_POSTMASTER, PGC_S_OVERRIDE); + } else if (g_instance.attr.attr_storage.max_recovery_parallelism > 1) { + g_instance.comm_cxt.predo_cxt.redoType = PARALLEL_REDO; + uint32 true_max_recovery_parallelism = g_instance.attr.attr_storage.max_recovery_parallelism; + if (true_max_recovery_parallelism > MOST_FAST_RECOVERY_LIMIT) { + true_max_recovery_parallelism = MOST_FAST_RECOVERY_LIMIT; + } + sprintf_s(buf, sizeof(buf), "%u", ComputeRecoveryParallelism(true_max_recovery_parallelism)); + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("ConfigRecoveryParallelism, true_max_recovery_parallelism:%u, " + "max_recovery_parallelism:%d", + true_max_recovery_parallelism, g_instance.attr.attr_storage.max_recovery_parallelism))); + SetConfigOption("recovery_parallelism", buf, PGC_POSTMASTER, PGC_S_OVERRIDE); + } +} + +static uint32 ComputeRecoveryParallelism(int hint) +{ + /* + * Reciprocal of the shares of CPU used for recovery. The idea is that + * using the default value the standby is able to keep up with the master + * (assuming the standby and the master use the same hardware), while on + * machines with fewer CPUs, the user is able to boost up recovery + * performance by using more CPUs. A capped maximum is used to protect + * users from setting hugh values. + * + * The default is to use 1/32 of all CPUs. On beefy machines, the capped + * maximum is 1/4 of all CPUs. On smaller machines, the capped maximum + * is the number of CPUs or 8, whichever is smaller. + */ + static const uint32 DEFAULT_CPU_SHARE = 32; + static const uint32 MAX_CPU_SHARE = 4; + static const uint32 MIN_ALLOWED_MAX_PARALLELISM = 8; + uint32 g_cpu_count = 0; + + if (g_cpu_count == 0) + g_cpu_count = GetCPUCount(); + uint32 default_parallelism = g_cpu_count / DEFAULT_CPU_SHARE; + uint32 max_parallelism; + if (g_cpu_count < MIN_ALLOWED_MAX_PARALLELISM) + max_parallelism = g_cpu_count; + else if (g_cpu_count / MAX_CPU_SHARE < MIN_ALLOWED_MAX_PARALLELISM) + max_parallelism = MIN_ALLOWED_MAX_PARALLELISM; + else + max_parallelism = g_cpu_count / MAX_CPU_SHARE; + + uint32 actual_parallelism; + if (hint <= 0) + actual_parallelism = default_parallelism; + else if (((uint32)hint) < max_parallelism) + actual_parallelism = hint; + else + actual_parallelism = max_parallelism; + + /* We need to have at least one recovery thread. */ + if (actual_parallelism < 1) + actual_parallelism = 1; + + ereport(LOG, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("Recovery parallelism, cpu count = %u, max = %d, actual = %u", g_cpu_count, hint, + actual_parallelism))); + return actual_parallelism; +} + +static uint32 GetCPUCount() +{ +#ifdef _SC_NPROCESSORS_ONLN + return (uint32)sysconf(_SC_NPROCESSORS_ONLN); +#else + static const uint32 DEFAULT_CPU_COUNT = 64; + return DEFAULT_CPU_COUNT; +#endif +} + +void ParseBindCpuInfo(RedoCpuBindControl *control) +{ + char* attr = TrimStr(g_instance.attr.attr_storage.redo_bind_cpu_attr); + if (attr == NULL) { + return; + } + + char* ptoken = NULL; + char* psave = NULL; + const char* pdelimiter = ":"; + + ptoken = TrimStr(strtok_r(attr, pdelimiter, &psave)); + ptoken = pg_strtolower(ptoken); + + int bindNum = 0; + if (strncmp("nobind", ptoken, strlen("nobind")) == 0) { + control->bindType = REDO_NO_CPU_BIND; + return; + } else if (strncmp("cpubind", ptoken, strlen("cpubind")) == 0) { + control->bindType = REDO_CPU_BIND; + control->isBindCpuArr = (bool*)palloc0(sizeof(bool) * control->totalCpuNum); + bindNum = ThreadPoolControler::ParseRangeStr(psave, control->isBindCpuArr, control->totalCpuNum, "cpubind"); + } else if (strncmp("nodebind", ptoken, strlen("nodebind")) == 0) { + control->bindType = REDO_NODE_BIND; + control->isBindNumaArr = (bool*)palloc0(sizeof(bool) * control->totalCpuNum); + bindNum = ThreadPoolControler::ParseRangeStr(psave, control->isBindNumaArr, control->totalNumaNum, "nodebind"); + } else { + ereport(FATAL, (errcode(ERRCODE_OPERATE_INVALID_PARAM), errmsg("Invalid attribute for multi redo."), + errdetail("redo bind config Only support 'nobind', 'cpubind', and 'nodebind'."))); + } + + if (bindNum == 0) { + ereport(FATAL, (errcode(ERRCODE_OPERATE_INVALID_PARAM), errmsg("Invalid attribute for multi redo."), + errdetail("Can not find valid CPU for thread binding, there are two possible reasons:\n" + "1. These CPUs are not active, use lscpu to check On-line CPU(s) list.\n" + "2. The process has been bind to other CPUs and there is no intersection," + "use taskset -pc to check process CPU bind info.\n"))); + } +} + +bool CpuCanConfiged(RedoCpuBindControl *contrl, int cpuid, int numaid) +{ + switch (contrl->bindType) { + case REDO_NODE_BIND: + return (contrl->isBindNumaArr[numaid] && contrl->isMcsCpuArr[cpuid] && CPU_ISSET(cpuid, &contrl->cpuSet)); + case REDO_CPU_BIND: + return (contrl->isBindCpuArr[cpuid] && contrl->isMcsCpuArr[cpuid] && CPU_ISSET(cpuid, &contrl->cpuSet)); + default: + return false; + } + return false; +} + +void ConfigBindCpuInfo(RedoCpuBindControl *contrl) +{ + contrl->isMcsCpuArr = ThreadPoolControler::GetMcsCpuInfo(contrl->totalCpuNum); + + NumaCpuId *sysNumaCpuIdList = (NumaCpuId*)palloc0(sizeof(NumaCpuId) * contrl->totalCpuNum); + int sysNumaCpuIdNum = 0; + ThreadPoolControler::GetActiveCpu(sysNumaCpuIdList, &sysNumaCpuIdNum); + + if (sysNumaCpuIdNum == 0) { + return; + } + + contrl->cpuArr = (int*)palloc0(sizeof(int) * contrl->totalCpuNum); + + for (int i = 0; i < sysNumaCpuIdNum; ++i) { + int cpuid = sysNumaCpuIdList[i].cpuId; + int numaid = sysNumaCpuIdList[i].numaId; + if (CpuCanConfiged(contrl, cpuid, numaid)) { + contrl->cpuArr[contrl->activeCpuNum++] = cpuid; + } + } + + pfree_ext(sysNumaCpuIdList); + ereport(LOG, (errmsg("ConfigBindCpuInfo redo bind cpu num: %d.", contrl->activeCpuNum))); + + CPU_ZERO(&contrl->configCpuSet); + for (int i = 0; i < contrl->activeCpuNum; ++i) { + CPU_SET(contrl->cpuArr[i], &contrl->configCpuSet); + } +} + +bool CheckRedoBindConfigValid(RedoCpuBindControl *contrl) +{ + if (contrl->bindType == REDO_NO_CPU_BIND) { + return false; + } + + if (contrl->totalNumaNum == 0 || contrl->totalCpuNum == 0) { + ereport(WARNING, (errmsg("CheckRedoBindConfigValid: Fail to read cpu num(%d) or numa num(%d).", + contrl->totalNumaNum, contrl->totalCpuNum))); + return false; + } +#ifdef __USE_NUMA + int numaNodeNum = numa_max_node() + 1; + if (numaNodeNum <= 1) { + ereport(WARNING, (errmsg("CheckRedoBindConfigValid No multiple NUMA nodes available: %d.", numaNodeNum))); + return false; + } else if (contrl->totalNumaNum != numaNodeNum) { + ereport(WARNING, (errmsg("Cannot activate NUMA distribute because NUMA nodes are unavailable."))); + return false; + } else { + return true; + } +#endif + return false; +} + +void ProcessRedoCpuBindInfo() +{ + RedoCpuBindControl *contrl = &g_instance.comm_cxt.predo_cxt.redoCpuBindcontrl; + ThreadPoolControler::GetInstanceBind(&contrl->cpuSet); + ThreadPoolControler::GetCpuAndNumaNum(&contrl->totalCpuNum, &contrl->totalNumaNum); + ParseBindCpuInfo(contrl); + if (CheckRedoBindConfigValid(contrl)) { + ConfigBindCpuInfo(contrl); + } +} + +void BindRedoThreadToSpecifiedCpu(knl_thread_role thread_role) +{ +#ifdef __USE_NUMA + if (thread_role == STARTUP || thread_role == PAGEREDO) { + RedoCpuBindControl *contrl = &g_instance.comm_cxt.predo_cxt.redoCpuBindcontrl; + if (contrl->activeCpuNum > 0) { + int ret = pthread_setaffinity_np(gs_thread_self(), sizeof(cpu_set_t), &contrl->configCpuSet); + if (ret != 0) { + ereport(WARNING, (errmsg("Fail to attach %d thread %lu active cpu num %d, %d", (int)thread_role, + gs_thread_self(), contrl->activeCpuNum, ret))); + } + } + } +#endif +} + diff --git a/src/gausskernel/storage/access/transam/multixact.cpp b/src/gausskernel/storage/access/transam/multixact.cpp index 2a3d12897..fccc7cf85 100644 --- a/src/gausskernel/storage/access/transam/multixact.cpp +++ b/src/gausskernel/storage/access/transam/multixact.cpp @@ -581,7 +581,7 @@ MultiXactId ReadNextMultiXactId(void) * Note that in case we return false, the number of remaining members is * not to be trusted. */ -bool DoMultiXactIdWait(MultiXactId multi, MultiXactStatus status, int *remaining, bool nowait) +bool DoMultiXactIdWait(MultiXactId multi, MultiXactStatus status, int *remaining, bool nowait, int waitSec) { bool result = true; MultiXactMember *members = NULL; @@ -615,7 +615,7 @@ bool DoMultiXactIdWait(MultiXactId multi, MultiXactStatus status, int *remaining break; } } else { - XactLockTableWait(memxid, true); + XactLockTableWait(memxid, true, waitSec); } } @@ -637,9 +637,9 @@ bool DoMultiXactIdWait(MultiXactId multi, MultiXactStatus status, int *remaining * We return (in *remaining, if not NULL) the number of members that are still * running, including any (non-aborted) subtransactions of our own transaction. */ -void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, int *remaining) +void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, int *remaining, int waitSec) { - DoMultiXactIdWait(multi, status, remaining, false); + DoMultiXactIdWait(multi, status, remaining, false, waitSec); } /* @@ -2095,8 +2095,8 @@ XLogRecParseState *multixact_xlog_ddl_parse_to_block(XLogReaderState *record, ui } filenode = RelFileNodeForkNumFill(NULL, InvalidBackendId, forknum, lowblknum); XLogRecSetBlockCommonState(record, BLOCK_DATA_DDL_TYPE, filenode, recordstatehead); - XLogRecSetBlockDdlState(&(recordstatehead->blockparse.extra_rec.blockddlrec), ddltype, false, - (char *)XLogRecGetData(record)); + XLogRecSetBlockDdlState(&(recordstatehead->blockparse.extra_rec.blockddlrec), ddltype, + (char *)XLogRecGetData(record)); return recordstatehead; } XLogRecParseState *multixact_xlog_offset_parse_to_block(XLogReaderState *record, uint32 *blocknum) diff --git a/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp b/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp index fbc44327c..dc20fd9a5 100755 --- a/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp +++ b/src/gausskernel/storage/access/transam/parallel_recovery/dispatcher.cpp @@ -31,7 +31,6 @@ #include "access/xact.h" #include "access/xlog_internal.h" #include "access/nbtree.h" -#include "access/hash_xlog.h" #include "access/ubtree.h" #include "access/xlogreader.h" #include "access/gist_private.h" @@ -96,7 +95,7 @@ static const int XLOG_INFO_SHIFT_SIZE = 4; /* xlog info flag shift size */ static const int32 MAX_PENDING = 1; static const int32 MAX_PENDING_STANDBY = 1; -static const int32 ITEM_QUQUE_SIZE_RATIO = 10; +static const int32 ITEM_QUQUE_SIZE_RATIO = 5; static const uint32 EXIT_WAIT_DELAY = 100; /* 100 us */ @@ -177,15 +176,12 @@ static const RmgrDispatchData g_dispatchTable[RM_MAX_ID + 1] = { { DispatchMultiXactRecord, RmgrRecordInfoValid, RM_MULTIXACT_ID, XLOG_MULTIXACT_ZERO_OFF_PAGE, XLOG_MULTIXACT_CREATE_ID }, { DispatchRelMapRecord, RmgrRecordInfoValid, RM_RELMAP_ID, XLOG_RELMAP_UPDATE, XLOG_RELMAP_UPDATE }, -#ifdef ENABLE_MULTIPLE_NODES - { DispatchStandbyRecord, RmgrRecordInfoValid, RM_STANDBY_ID, XLOG_STANDBY_LOCK, XLOG_STANDBY_CSN}, -#else { DispatchStandbyRecord, RmgrRecordInfoValid, RM_STANDBY_ID, XLOG_STANDBY_LOCK, XLOG_STANDBY_CSN_ABORTED}, -#endif + { DispatchHeap2Record, RmgrRecordInfoValid, RM_HEAP2_ID, XLOG_HEAP2_FREEZE, XLOG_HEAP2_LOGICAL_NEWPAGE }, { DispatchHeapRecord, RmgrRecordInfoValid, RM_HEAP_ID, XLOG_HEAP_INSERT, XLOG_HEAP_INPLACE }, { DispatchBtreeRecord, RmgrRecordInfoValid, RM_BTREE_ID, XLOG_BTREE_INSERT_LEAF, XLOG_BTREE_REUSE_PAGE}, - { DispatchHashRecord, RmgrRecordInfoValid, RM_HASH_ID, XLOG_HASH_INIT_META_PAGE, XLOG_HASH_VACUUM_ONE_PAGE }, + { DispatchHashRecord, NULL, RM_HASH_ID, 0, 0 }, { DispatchGinRecord, RmgrRecordInfoValid, RM_GIN_ID, XLOG_GIN_CREATE_INDEX, XLOG_GIN_VACUUM_DATA_LEAF_PAGE }, /* XLOG_GIST_PAGE_DELETE is not used and info isn't continus */ { DispatchGistRecord, RmgrGistRecordInfoValid, RM_GIST_ID, 0, 0 }, @@ -193,7 +189,7 @@ static const RmgrDispatchData g_dispatchTable[RM_MAX_ID + 1] = { { DispatchSpgistRecord, RmgrRecordInfoValid, RM_SPGIST_ID, XLOG_SPGIST_CREATE_INDEX, XLOG_SPGIST_VACUUM_REDIRECT }, { DispatchRepSlotRecord, RmgrRecordInfoValid, RM_SLOT_ID, XLOG_SLOT_CREATE, XLOG_TERM_LOG }, { DispatchHeap3Record, RmgrRecordInfoValid, RM_HEAP3_ID, XLOG_HEAP3_NEW_CID, XLOG_HEAP3_REWRITE }, - { DispatchBarrierRecord, NULL, RM_BARRIER_ID, 0, 0 }, + { DispatchBarrierRecord, RmgrRecordInfoValid, RM_BARRIER_ID, XLOG_BARRIER_CREATE, XLOG_BARRIER_SWITCHOVER }, #ifdef ENABLE_MOT { DispatchMotRecord, NULL, RM_MOT_ID, 0, 0}, @@ -389,6 +385,7 @@ static LogDispatcher *CreateDispatcher() newDispatcher->pprCostTime = 0; newDispatcher->dispatchReadRecPtr = 0; newDispatcher->dispatchEndRecPtr = 0; + newDispatcher->startupTimeCost = t_thrd.xlog_cxt.timeCost; return newDispatcher; } @@ -1024,7 +1021,7 @@ static void DealWithSegpageSyncRecord(XLogReaderState* record, List* expectedTLI /* Now, all proceding items are done, safe to replay the xlog */ RedoItem* item = CreateRedoItem(record, 1, ANY_WORKER, expectedTLIs, recordXTime, true); MemoryContext oldCtx = MemoryContextSwitchTo(g_dispatcher->oldCtx); - ApplyRedoRecord(&item->record, item->oldVersion); + ApplyRedoRecord(&item->record); (void)MemoryContextSwitchTo(oldCtx); FreeRedoItem(item); } @@ -1076,20 +1073,8 @@ static bool DispatchCLogRecord(XLogReaderState *record, List *expectedTLIs, Time /* Run from the dispatcher thread. */ static bool DispatchHashRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) { - bool isNeedFullSync = false; - - /* index not support mvcc, so we need to sync with trx thread when the record is vacuum */ - if (IsHashVacuumPages(record) && g_supportHotStandby) { - GetWorkerIds(record, ANY_WORKER, true); - /* sync with trxn thread */ - /* only need to process in pageworker thread, wait trxn sync */ - /* pageworker exe, trxn don't need exe */ - DispatchToSpecPageWorker(record, expectedTLIs, true); - } else { - DispatchRecordWithPages(record, expectedTLIs, true); - } - - return isNeedFullSync; + DispatchTxnRecord(record, expectedTLIs, recordXTime, false); + return true; } static bool DispatchBtreeRecord(XLogReaderState *record, List *expectedTLIs, TimestampTz recordXTime) @@ -1259,7 +1244,7 @@ static bool DispatchHeap2Record(XLogReaderState *record, List *expectedTLIs, Tim bool isNeedFullSync = false; uint8 info = ((XLogRecGetInfo(record) & (~XLR_INFO_MASK)) & XLOG_HEAP_OPMASK); - if ((info == XLOG_HEAP2_MULTI_INSERT) || (info == XLOG_HEAP2_PAGE_UPGRADE)) { + if (info == XLOG_HEAP2_MULTI_INSERT) { DispatchRecordWithPages(record, expectedTLIs, SUPPORT_FPAGE_DISPATCH); } else if (info == XLOG_HEAP2_BCM) { /* we use renode as dispatch key, so the same relation will dispath to the same page redo thread @@ -1381,25 +1366,6 @@ static void AddWorkerToSet(uint32 id) ++(g_dispatcher->chosedWorkerIds[id]); } -/* Run from the dispatcher and each page worker thread. */ -bool XactWillRemoveRelFiles(XLogReaderState *record) -{ - /* - * Relation files under tablespace folders are removed only from - * applying transaction log record. - */ - int nrels = 0; - ColFileNodeRel *xnodes = NULL; - - if (XLogRecGetRmid(record) != RM_XACT_ID) { - return false; - } - - XactGetRelFiles(record, &xnodes, &nrels); - - return (nrels > 0); -} - /* Run from the dispatcher thread. */ static bool XLogWillChangeStandbyState(XLogReaderState *record) { @@ -1487,6 +1453,21 @@ void ProcessTrxnRecords(bool fullSync) /* Run from each page worker thread. */ void FreeRedoItem(RedoItem *item) { + if (item->need_free) { + XLogReaderState *tmpRec = &(item->record); + + if (tmpRec->readRecordBuf) { + pfree(tmpRec->readRecordBuf); + tmpRec->readRecordBuf = NULL; + } + + pfree(item); + return; + } + + if (!IsLSNMarker(item)) { + CountXLogNumbers(&item->record); + } RedoItem *oldHead = (RedoItem *)pg_atomic_read_uintptr((uintptr_t *)&g_dispatcher->freeHead); uint32 freed = pg_atomic_read_u32(&item->freed); if (freed != 0) { /* if it happens, there must be problems! check it */ @@ -1536,7 +1517,7 @@ void InitReaderStateByOld(XLogReaderState *newState, XLogReaderState *oldState, newState->main_data = NULL; newState->main_data_len = 0; newState->main_data_bufsz = 0; - for (int i = 0; i <= XLR_MAX_BLOCK_ID; i++) { + for (int i = 0; i <= newState->max_block_id; i++) { newState->blocks[i].data = NULL; newState->blocks[i].data_len = 0; newState->blocks[i].data_bufsz = 0; @@ -1552,6 +1533,7 @@ void InitReaderStateByOld(XLogReaderState *newState, XLogReaderState *oldState, newState->refcount = 0; newState->isDecode = false; newState->isFullSync = false; + newState->readblocks = 0; } static XLogReaderState *GetXlogReader(XLogReaderState *readerState) @@ -1817,6 +1799,22 @@ void GetReplayedRecPtrFromUndoWorkers(XLogRecPtr *readPtr, XLogRecPtr *endPtr) *endPtr = minEnd; } +void GetReplayedRecPtrFromWorkers(XLogRecPtr *endPtr) +{ + XLogRecPtr minEnd = MAX_XLOG_REC_PTR; + + for (uint32 i = 0; i < g_dispatcher->pageWorkerCount; i++) { + if (!RedoWorkerIsIdle(g_dispatcher->pageWorkers[i])) { + XLogRecPtr end = GetCompletedRecPtr(g_dispatcher->pageWorkers[i]); + if (XLByteLT(end, minEnd)) { + minEnd = end; + } + } + } + + *endPtr = minEnd; +} + void GetReplayedRecPtrFromWorkers(XLogRecPtr *readPtr, XLogRecPtr *endPtr) { XLogRecPtr minRead = MAX_XLOG_REC_PTR; @@ -1887,45 +1885,47 @@ static void **CollectStatesFromWorkers(GetStateFunc getStateFunc) return NULL; } -void DiagLogRedoRecord(XLogReaderState *record, const char *funcName) +void redo_get_wroker_time_count(RedoWorkerTimeCountsInfo **workerCountInfoList, uint32 *realNum) { - uint8 info; - RelFileNode oldRn = { 0 }; - RelFileNode newRn = { 0 }; - BlockNumber oldblk = InvalidBlockNumber; - BlockNumber newblk = InvalidBlockNumber; - bool newBlkExistFlg = false; - bool oldBlkExistFlg = false; - ForkNumber oldFk = InvalidForkNumber; - ForkNumber newFk = InvalidForkNumber; - StringInfoData buf; + SpinLockAcquire(&(g_instance.comm_cxt.predo_cxt.rwlock)); + knl_parallel_redo_state state = g_instance.comm_cxt.predo_cxt.state; + SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.rwlock)); - /* Support redo old version xlog during upgrade (Just the runningxact log with chekpoint online ) */ - uint32 rmid = redo_oldversion_xlog ? ((XLogRecordOld *)record->decoded_record)->xl_rmid : XLogRecGetRmid(record); - info = redo_oldversion_xlog ? ((((XLogRecordOld *)record->decoded_record)->xl_info) & ~XLR_INFO_MASK) : - (XLogRecGetInfo(record) & ~XLR_INFO_MASK); - - initStringInfo(&buf); - RmgrTable[rmid].rm_desc(&buf, record); - - if (XLogRecGetBlockTag(record, 0, &newRn, &newFk, &newblk)) { - newBlkExistFlg = true; + if (state != REDO_IN_PROGRESS) { + *realNum = 0; + return; } - if (XLogRecGetBlockTag(record, 1, &oldRn, &oldFk, &oldblk)) { - oldBlkExistFlg = true; + + PageRedoWorker *redoWorker = NULL; + SpinLockAcquire(&(g_instance.comm_cxt.predo_cxt.destroy_lock)); + if (g_dispatcher == NULL) { + SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.destroy_lock)); + *realNum = 0; + return; } - ereport(DEBUG4, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), - errmsg("[REDO_LOG_TRACE]DiagLogRedoRecord: %s, ReadRecPtr:%lu,EndRecPtr:%lu," - "newBlkExistFlg:%d," - "newRn(spcNode:%u, dbNode:%u, relNode:%u),newFk:%d,newblk:%u," - "oldBlkExistFlg:%d," - "oldRn(spcNode:%u, dbNode:%u, relNode:%u),oldFk:%d,oldblk:%u," - "info:%u,redo_oldversion_xlog:%d, rm_name:%s, desc:%s," - "max_block_id:%d", - funcName, record->ReadRecPtr, record->EndRecPtr, newBlkExistFlg, newRn.spcNode, newRn.dbNode, newRn.relNode, - newFk, newblk, oldBlkExistFlg, oldRn.spcNode, oldRn.dbNode, oldRn.relNode, oldFk, oldblk, (uint32)info, - redo_oldversion_xlog, RmgrTable[rmid].rm_name, buf.data, record->max_block_id))); - pfree_ext(buf.data); + *realNum = g_dispatcher->pageWorkerCount + 1; + RedoWorkerTimeCountsInfo *workerList = + (RedoWorkerTimeCountsInfo *)palloc0((*realNum) * sizeof(RedoWorkerTimeCountsInfo)); + errno_t rc; + const uint32 workerNumSize = 2; + uint32 cur_pos = 0; + const char *workName = "pagewoker"; + const char *startupName = "startup"; + for (uint32 i = 0; i < g_dispatcher->pageWorkerCount; ++i) { + redoWorker = (g_dispatcher->pageWorkers[i]); + workerList[cur_pos].worker_name = (char*)palloc(strlen(workName) + workerNumSize + 1); + rc = sprintf_s(workerList[cur_pos].worker_name, strlen(workName) + workerNumSize + 1, "%s%u", workName, i); + securec_check_ss(rc, "\0", "\0"); + workerList[cur_pos++].time_cost = redoWorker->timeCostList; + } + + workerList[cur_pos].worker_name = (char*)palloc(strlen(startupName) + 1); + rc = sprintf_s(workerList[cur_pos].worker_name, strlen(startupName) + 1, "%s", startupName); + securec_check_ss(rc, "\0", "\0"); + workerList[cur_pos++].time_cost = g_dispatcher->startupTimeCost; + SpinLockRelease(&(g_instance.comm_cxt.predo_cxt.destroy_lock)); + *workerCountInfoList = workerList; + Assert(*realNum == cur_pos); } void redo_get_wroker_statistic(uint32 *realNum, RedoWorkerStatsData *worker, uint32 workerLen) @@ -2048,6 +2048,21 @@ void SendClearMarkToAllWorkers() } } +void SendClosefdMarkToAllWorkers() +{ + for (uint32 i = 0; i < g_dispatcher->pageWorkerCount; i++) { + SendPageRedoClosefdMark(g_dispatcher->pageWorkers[i]); + } +} + +void SendCleanInvalidPageMarkToAllWorkers(RepairFileKey key) +{ + for (uint32 i = 0; i < g_dispatcher->pageWorkerCount; i++) { + SendPageRedoCleanInvalidPageMark(g_dispatcher->pageWorkers[i], key); + } +} + + static void AddUndoSpaceAndTransGrpWorkersForUHeapRecord(XLogReaderState *record, XlUndoHeader *xlundohdr, undo::XlogUndoMeta *xlundometa, const char *commandString) { diff --git a/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp b/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp index c782e0a6c..4ec30a5cb 100755 --- a/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp +++ b/src/gausskernel/storage/access/transam/parallel_recovery/page_redo.cpp @@ -37,10 +37,12 @@ #include "access/xlog.h" #include "access/xlog_internal.h" #include "access/xlogutils.h" +#include "access/xlogproc.h" #include "catalog/storage_xlog.h" #include "gssignal/gs_signal.h" #include "libpq/pqsignal.h" #include "postmaster/postmaster.h" +#include "postmaster/pagerepair.h" #include "storage/ipc.h" #include "access/nbtree.h" #include "utils/guc.h" @@ -52,8 +54,8 @@ #include "commands/dbcommands.h" #include "access/parallel_recovery/page_redo.h" #include "access/parallel_recovery/dispatcher.h" -#include "pgstat.h" #include "access/multi_redo_api.h" +#include "pgstat.h" namespace parallel_recovery { @@ -65,7 +67,9 @@ THR_LOCAL PageRedoWorker *g_redoWorker = NULL; static RedoItem g_redoEndMark = {}; static RedoItem g_terminateMark = {}; -static RedoItem g_cleanupMark = { false, false, false, false, 0 }; +static RedoItem g_cleanupMark = { false, false, false, false, 0}; +static RedoItem g_closefdMark = { false, false, false, false, 0}; +static RedoItem g_cleanInvalidPageMark = { false, false, false, false, 0}; static const int PAGE_REDO_WORKER_ARG = 3; static const int REDO_SLEEP_50US = 50; @@ -185,6 +189,10 @@ static PageRedoWorker *CreateWorker(uint32 id) worker->statWaitReplay = 0; worker->oldCtx = NULL; worker->bufferPinWaitBufId = -1; + worker->remoteReadPageNum = 0; + + worker->badPageHashTbl = BadBlockHashTblCreate(); + pg_atomic_write_u32(&(worker->readyStatus), PAGE_REDO_WORKER_INVALID); #if (!defined __x86_64__) && (!defined __aarch64__) SpinLockInit(&worker->ptrLck); @@ -257,6 +265,10 @@ void HandlePageRedoInterrupts() t_thrd.page_redo_cxt.got_SIGHUP = false; ProcessConfigFile(PGC_SIGHUP); } + if (t_thrd.page_redo_cxt.check_repair) { + SeqCheckRemoteReadAndRepairPage(); + t_thrd.page_redo_cxt.check_repair = false; + } if (t_thrd.page_redo_cxt.shutdown_requested) { ereport(LOG, @@ -271,6 +283,16 @@ void HandlePageRedoInterrupts() } } +/* HandleRedoPageRepair + * if the page crc verify failed, call the function record the bad block. + */ +void HandleRedoPageRepair(RepairBlockKey key, XLogPhyBlock pblk) +{ + XLogReaderState *record = g_redoWorker->current_item; + RecordBadBlockAndPushToRemote(record, key, CRC_CHECK_FAIL, InvalidXLogRecPtr, pblk); + return; +} + /* Run from the worker thread. */ void PageRedoWorkerMain() { @@ -292,7 +314,8 @@ void PageRedoWorkerMain() SetupSignalHandlers(); (void)RegisterRedoInterruptCallBack(HandlePageRedoInterrupts); - + (void)RegisterRedoPageRepairCallBack(HandleRedoPageRepair); + InitGlobals(); ResourceManagerStartup(); @@ -315,6 +338,10 @@ static void PageRedoShutdownHandler(SIGNAL_ARGS) t_thrd.page_redo_cxt.shutdown_requested = true; } +static void PageRedoSigUser1Handler(SIGNAL_ARGS) +{ + t_thrd.page_redo_cxt.check_repair = true; +} static void PageRedoQuickDie(SIGNAL_ARGS) { int status = 2; @@ -337,7 +364,7 @@ static void SetupSignalHandlers() (void)gspqsignal(SIGQUIT, PageRedoQuickDie); (void)gspqsignal(SIGALRM, SIG_IGN); (void)gspqsignal(SIGPIPE, SIG_IGN); - (void)gspqsignal(SIGUSR1, SIG_IGN); + (void)gspqsignal(SIGUSR1, PageRedoSigUser1Handler); (void)gspqsignal(SIGUSR2, PageRedoUser2Handler); (void)gspqsignal(SIGCHLD, SIG_IGN); (void)gspqsignal(SIGTTIN, SIG_IGN); @@ -374,16 +401,11 @@ static void InitGlobals() void ApplyProcHead(RedoItem *head) { - uint32 isSkipItem; while (head != NULL) { RedoItem *cur = head; + g_redoWorker->current_item = &cur->record; head = head->nextByWorker[g_redoWorker->id + 1]; - isSkipItem = pg_atomic_read_u32(&g_redoWorker->skipItemFlg); - if (isSkipItem == PAGE_REDO_WORKER_APPLY_ITEM) { - ApplyAndFreeRedoItem(cur); - } else { - OnlyFreeRedoItem(cur); - } + ApplyAndFreeRedoItem(cur); } } @@ -395,16 +417,22 @@ static int ApplyRedoLoop() instr_time endTime; INSTR_TIME_SET_CURRENT(startTime); - + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_1]); while ((head = (RedoItem *)SPSCBlockingQueueTop(g_redoWorker->queue)) != &g_redoEndMark) { + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_1]); if (head == &g_cleanupMark) { g_redoWorker->btreeIncompleteActions = btree_get_incomplete_actions(); g_redoWorker->xlogInvalidPages = XLogGetInvalidPages(); + } else if (head == &g_closefdMark) { + smgrcloseall(); + } else if (head == &g_cleanInvalidPageMark) { + forget_range_invalid_pages((void*)head); } else { ApplyProcHead(head); } SPSCBlockingQueuePop(g_redoWorker->queue); RedoInterruptCallBack(); + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_1]); } SPSCBlockingQueuePop(g_redoWorker->queue); @@ -431,8 +459,9 @@ static int ApplyRedoLoop() /* Run from the worker thread. */ static void ApplyAndFreeRedoItem(RedoItem *item) { + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); UpdateRecordGlobals(item, g_redoWorker->standbyState); - + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_3]); /* * We need to save the LSN here because the following apply will * free the item. @@ -441,47 +470,49 @@ static void ApplyAndFreeRedoItem(RedoItem *item) XLogRecPtr readLSN = item->record.ReadRecPtr; if (item->replay_undo) { + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_4]); g_redoWorker->replay_undo = (g_redoWorker->role == UndoLogZidWorker); ApplyRecordWithoutSyncUndoLog(item); - pg_atomic_write_u64(&g_redoWorker->lastReplayedEndRecPtr, endLSN); + SetCompletedReadEndPtr(g_redoWorker, readLSN, endLSN); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_4]); } else if (IsLSNMarker(item)) { FreeRedoItem(item); SetCompletedReadEndPtr(g_redoWorker, readLSN, endLSN); - } else if (item->sharewithtrxn) + } else if (item->sharewithtrxn) { + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_5]); ApplyMultiPageShareWithTrxnRecord(item); - else if (item->blockbytrxn) + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_5]); + } else if (item->blockbytrxn) { + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); ApplyMultiPageSyncWithTrxnRecord(item); - else if (item->shareCount == 1) { + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_6]); + } else if (item->shareCount == 1) { + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_7]); ApplySinglePageRecord(item); SetCompletedReadEndPtr(g_redoWorker, readLSN, endLSN); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_7]); } else { if (item->designatedWorker == ALL_WORKER) { + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_8]); ApplyMultiPageAllWorkerRecord(item); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_8]); } else { + GetRedoStartTime(g_redoWorker->timeCostList[TIME_COST_STEP_9]); ApplyMultiPageRecord(item); SetCompletedReadEndPtr(g_redoWorker, readLSN, endLSN); + CountRedoTime(g_redoWorker->timeCostList[TIME_COST_STEP_9]); } } } -/* Run from the worker thread. */ -void OnlyFreeRedoItem(RedoItem *item) -{ - if (pg_atomic_read_u32(&item->freed) == 0) { - FreeRedoItem(item); - } -} - /* Run from the worker thread */ static void ApplyRecordWithoutSyncUndoLog(RedoItem *item) { - bool bOld = item->oldVersion; - /* No need to sync with undo log */ if (g_redoWorker->replay_undo) { Assert(g_redoWorker->role == UndoLogZidWorker); MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); - ApplyRedoRecord(&item->record, bOld); + ApplyRedoRecord(&item->record); (void)MemoryContextSwitchTo(oldCtx); uint32 shrCount = pg_atomic_read_u32(&item->shareCount); uint32 trueRefCount = pg_atomic_add_fetch_u32(&item->trueRefCount, 1); @@ -520,18 +551,19 @@ static void ApplyRecordWithoutSyncUndoLog(RedoItem *item) static void ApplySinglePageRecord(RedoItem *item, bool replayUndo) { XLogReaderState *record = &item->record; - bool bOld = item->oldVersion; - + long readbufcountbefore = u_sess->instr_cxt.pg_buffer_usage->local_blks_read; MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); - ApplyRedoRecord(record, bOld); + ApplyRedoRecord(record); (void)MemoryContextSwitchTo(oldCtx); + record->readblocks = u_sess->instr_cxt.pg_buffer_usage->local_blks_read - readbufcountbefore; if (replayUndo) { uint32 shrCount = pg_atomic_read_u32(&item->shareCount); uint32 trueRefCount = pg_atomic_add_fetch_u32(&item->trueRefCount, 1); uint32 undoZidWorkersNum = get_recovery_undozidworkers_num(); - if (trueRefCount == (shrCount + undoZidWorkersNum)) + if (trueRefCount == (shrCount + undoZidWorkersNum)) { FreeRedoItem(item); + } } else { FreeRedoItem(item); } @@ -591,7 +623,7 @@ static void WaitAndApplyMultiPageRecord(RedoItem *item) MemoryContext oldCtx = MemoryContextSwitchTo(g_redoWorker->oldCtx); /* Apply the record and wake up other workers. */ - ApplyRedoRecord(&item->record, item->oldVersion); + ApplyRedoRecord(&item->record); (void)MemoryContextSwitchTo(oldCtx); pg_memory_barrier(); @@ -674,7 +706,6 @@ static void ApplyMultiPageAllWorkerRecord(RedoItem *item) { g_redoWorker->statMulpageCnt++; XLogReaderState *record = &item->record; - bool bOld = item->oldVersion; XLogRecPtr endLSN = item->record.EndRecPtr; XLogRecPtr readLSN = item->record.ReadRecPtr; (void)pg_atomic_add_fetch_u32(&item->refCount, 1); @@ -682,14 +713,10 @@ static void ApplyMultiPageAllWorkerRecord(RedoItem *item) if (IsCheckPoint(record)) { CheckPoint checkPoint; - if (bOld) { - checkPoint = update_checkpoint(record); - } else { - rc = memcpy_s(&checkPoint, sizeof(checkPoint), XLogRecGetData(record), sizeof(checkPoint)); - securec_check(rc, "\0", "\0"); - } + rc = memcpy_s(&checkPoint, sizeof(checkPoint), XLogRecGetData(record), sizeof(checkPoint)); + securec_check(rc, "\0", "\0"); - if (HasTimelineUpdate(record, bOld)) { + if (HasTimelineUpdate(record)) { UpdateTimeline(&checkPoint); } @@ -881,6 +908,21 @@ bool SendPageRedoClearMark(PageRedoWorker *worker) return SPSCBlockingQueuePut(worker->queue, &g_cleanupMark); } +bool SendPageRedoClosefdMark(PageRedoWorker *worker) +{ + return SPSCBlockingQueuePut(worker->queue, &g_closefdMark); +} + + +bool SendPageRedoCleanInvalidPageMark(PageRedoWorker *worker, RepairFileKey key) +{ + errno_t rc = memcpy_s((char*)&g_cleanInvalidPageMark, sizeof(RepairFileKey), (char*)&key, sizeof(RepairFileKey)); + securec_check(rc, "", ""); + + return SPSCBlockingQueuePut(worker->queue, &g_cleanInvalidPageMark); +} + + bool SendPageRedoWorkerTerminateMark(PageRedoWorker *worker) { return SPSCBlockingQueuePut(worker->queue, &g_terminateMark); @@ -917,6 +959,7 @@ void UpdatePageRedoWorkerStandbyState(PageRedoWorker *worker, HotStandbyState ne /* Run from the txn worker thread. */ XLogRecPtr GetCompletedRecPtr(PageRedoWorker *worker) { + pg_read_barrier(); return pg_atomic_read_u64(&worker->lastReplayedEndRecPtr); } @@ -1051,4 +1094,278 @@ void WaitAllPageWorkersQueueEmpty() } } +void RepairPageAndRecoveryXLog(BadBlockRecEnt *page_info, const char *page) +{ + RedoBufferInfo buffer; + RedoBufferTag blockinfo; + errno_t rc; + RedoItem *item = NULL; + + blockinfo.rnode = page_info->key.relfilenode; + blockinfo.forknum = page_info->key.forknum; + blockinfo.blkno = page_info->key.blocknum; + blockinfo.pblk = page_info->pblk; + + /* read page to buffer pool by RBM_ZERO_AND_LOCK mode and get buffer lock */ + (void)XLogReadBufferForRedoBlockExtend(&blockinfo, RBM_ZERO_AND_LOCK, false, &buffer, + page_info->rec_max_lsn, InvalidXLogRecPtr, false, WITH_NORMAL_CACHE, false); + + rc = memcpy_s(buffer.pageinfo.page, BLCKSZ, page, BLCKSZ); + securec_check(rc, "", ""); + MarkBufferDirty(buffer.buf); + UnlockReleaseBuffer(buffer.buf); + + /* recovery the page xlog */ + item = page_info->head; + while (item != NULL) { + RedoItem *next = item->remoteNext; + ApplySinglePageRecord(item); + item = next; + } +} + +HTAB* BadBlockHashTblCreate() +{ + HASHCTL ctl; + errno_t rc; + + rc = memset_s(&ctl, sizeof(ctl), 0, sizeof(ctl)); + securec_check(rc, "", ""); + + ctl.keysize = sizeof(RepairBlockKey); + ctl.entrysize = sizeof(BadBlockRecEnt); + ctl.hash = tag_hash; + + return hash_create("recovery thread bad block hashtbl", MAX_REMOTE_READ_INFO_NUM, &ctl, HASH_ELEM | HASH_FUNCTION); +} + +/* RecordBadBlockAndPushToRemote + * If the bad page has been stored, record the xlog. If the bad page + * has not been stored, need push to page repair thread hash table and record to + * recovery thread hash table. + */ +void RecordBadBlockAndPushToRemote(XLogReaderState *record, RepairBlockKey key, + PageErrorType error_type, XLogRecPtr old_lsn, XLogPhyBlock pblk) +{ + bool found = false; + BadBlockRecEnt *remoteReadInfo = NULL; + RedoItem *newItem = NULL; + bool thread_found = false; + HTAB *bad_hash = NULL; + gs_thread_t tid = gs_thread_get_cur_thread(); + + if (AmPageRedoWorker()) { + bad_hash = g_redoWorker->badPageHashTbl; + } else { + Assert(AmStartupProcess()); + bad_hash = g_instance.startup_cxt.badPageHashTbl; + } + + found = PushBadPageToRemoteHashTbl(key, error_type, old_lsn, pblk, tid.thid); + + if (found) { + /* store the record for recovery */ + remoteReadInfo = (BadBlockRecEnt*)hash_search(bad_hash, &(key), HASH_FIND, &thread_found); + newItem = (RedoItem *)palloc_extended(MAXALIGN(sizeof(RedoItem)) + + sizeof(RedoItem *) * (GetPageWorkerCount() + 1), MCXT_ALLOC_NO_OOM | MCXT_ALLOC_ZERO); + + InitReaderStateByOld(&newItem->record, record, true); + CopyDataFromOldReader(&newItem->record, record); + newItem->need_free = true; + + Assert(thread_found); + if (!thread_found) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("recovery thread bad block hash table corrupted"))); + } + newItem->remoteNext = NULL; + remoteReadInfo->tail->remoteNext = newItem; + remoteReadInfo->tail = newItem; + remoteReadInfo->rec_max_lsn = newItem->record.EndRecPtr; + } else { + remoteReadInfo = (BadBlockRecEnt*)hash_search(bad_hash, &(key), HASH_ENTER, &thread_found); + Assert(!thread_found); + newItem = (RedoItem *)palloc_extended(MAXALIGN(sizeof(RedoItem)) + + sizeof(RedoItem *) * (GetPageWorkerCount() + 1), MCXT_ALLOC_NO_OOM | MCXT_ALLOC_ZERO); + + InitReaderStateByOld(&newItem->record, record, true); + CopyDataFromOldReader(&newItem->record, record); + newItem->need_free = true; + + remoteReadInfo->key.relfilenode = key.relfilenode; + remoteReadInfo->key.forknum = key.forknum; + remoteReadInfo->key.blocknum = key.blocknum; + remoteReadInfo->pblk = pblk; + remoteReadInfo->rec_min_lsn = newItem->record.EndRecPtr; + remoteReadInfo->rec_max_lsn = newItem->record.EndRecPtr; + newItem->remoteNext = NULL; + remoteReadInfo->head = newItem; + remoteReadInfo->tail = newItem; + + if (AmPageRedoWorker()) { + g_redoWorker->remoteReadPageNum++; + if (g_redoWorker->remoteReadPageNum >= MAX_REMOTE_READ_INFO_NUM) { + ereport(WARNING, (errmsg("recovery thread found %d error block.", g_redoWorker->remoteReadPageNum))); + } + } else { + Assert(AmStartupProcess()); + g_instance.startup_cxt.remoteReadPageNum++; + if (g_instance.startup_cxt.remoteReadPageNum >= MAX_REMOTE_READ_INFO_NUM) { + ereport(WARNING, (errmsg("startup thread found %d error block.", + g_instance.startup_cxt.remoteReadPageNum))); + } + } + + return; + } + + return; +} + +/* ClearPageRepairHashTbl + * drop table, or truncate table, need clear the page repair hashTbl, if the + * repair page Filenode match, need remove. + */ +void ClearRecoveryThreadHashTbl(const RelFileNode &node, ForkNumber forknum, BlockNumber minblkno, + bool segment_shrink) +{ + HTAB *bad_hash = NULL; + bool found = false; + BadBlockRecEnt *entry = NULL; + HASH_SEQ_STATUS status; + + if (AmPageRedoWorker()) { + bad_hash = g_redoWorker->badPageHashTbl; + } else { + Assert(AmStartupProcess()); + bad_hash = g_instance.startup_cxt.badPageHashTbl; + } + + hash_seq_init(&status, bad_hash); + while ((entry = (BadBlockRecEnt *)hash_seq_search(&status)) != NULL) { + if (BlockNodeMatch(entry->key, entry->pblk, node, forknum, minblkno, segment_shrink)) { + if (hash_search(bad_hash, &(entry->key), HASH_REMOVE, &found) == NULL) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("recovery thread bad page hash table corrupted"))); + } + g_redoWorker->remoteReadPageNum--; + } + } + + return; +} + +/* BatchClearPageRepairHashTbl + * drop database, or drop segmentspace, need clear the page repair hashTbl, + * if the repair page key dbNode match and spcNode match, need remove. + */ +void BatchClearRecoveryThreadHashTbl(Oid spcNode, Oid dbNode) +{ + HTAB *bad_hash = NULL; + bool found = false; + BadBlockRecEnt *entry = NULL; + HASH_SEQ_STATUS status; + + if (AmPageRedoWorker()) { + bad_hash = g_redoWorker->badPageHashTbl; + } else { + Assert(AmStartupProcess()); + bad_hash = g_instance.startup_cxt.badPageHashTbl; + } + + hash_seq_init(&status, bad_hash); + while ((entry = (BadBlockRecEnt *)hash_seq_search(&status)) != NULL) { + if (dbNodeandSpcNodeMatch(&(entry->key.relfilenode), spcNode, dbNode)) { + if (hash_search(bad_hash, &(entry->key), HASH_REMOVE, &found) == NULL) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("page repair hash table corrupted"))); + } + g_redoWorker->remoteReadPageNum--; + } + } + + return; +} + + +/* ClearSpecificsPageEntryAndMem + * If the page has been repair, need remove entry of bad page hashtable, + * and release the xlog record mem. + */ +void ClearSpecificsPageEntryAndMem(BadBlockRecEnt *entry) +{ + HTAB *bad_hash = NULL; + bool found = false; + + if (AmPageRedoWorker()) { + bad_hash = g_redoWorker->badPageHashTbl; + } else { + Assert(AmStartupProcess()); + bad_hash = g_instance.startup_cxt.badPageHashTbl; + } + + if ((BadBlockRecEnt*)hash_search(bad_hash, &(entry->key), HASH_REMOVE, &found) == NULL) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("recovery thread bad block hash table corrupted"))); + } +} + +/* CheckRemoteReadAndRepairPage + * check the page state of page repair hashtbl, if can repair, + * repair the page and recovery xlog, clean the page info. + */ +void CheckRemoteReadAndRepairPage(BadBlockRecEnt *entry) +{ + RepairBlockKey key; + XLogRecPtr rec_min_lsn = InvalidXLogRecPtr; + XLogRecPtr rec_max_lsn = InvalidXLogRecPtr; + bool check = false; + char *page = NULL; + + key = entry->key; + rec_min_lsn = entry->rec_min_lsn; + rec_max_lsn = entry->rec_max_lsn; + + if (AmPageRedoWorker()) { + page = g_redoWorker->page; + } else { + Assert(AmStartupProcess()); + page = g_instance.startup_cxt.page; + } + + check = CheckRepairPage(key, rec_min_lsn, rec_max_lsn, page); + if (check) { + /* copy page to buffer pool, and recovery the stored xlog */ + RepairPageAndRecoveryXLog(entry, page); + /* clear this thread invalid page hash table */ + forget_specified_invalid_pages(key); + /* clear thread bad block hash entry */ + ClearSpecificsPageEntryAndMem(entry); + /* clear page repair thread hash table */ + ClearSpecificsPageRepairHashTbl(key); + g_redoWorker->remoteReadPageNum--; + } + + return; +} + + +void SeqCheckRemoteReadAndRepairPage() +{ + BadBlockRecEnt *entry = NULL; + HASH_SEQ_STATUS status; + HTAB *bad_hash = NULL; + + if (AmPageRedoWorker()) { + bad_hash = g_redoWorker->badPageHashTbl; + } else { + Assert(AmStartupProcess()); + bad_hash = g_instance.startup_cxt.badPageHashTbl; + } + + hash_seq_init(&status, bad_hash); + while ((entry = (BadBlockRecEnt *)hash_seq_search(&status)) != NULL) { + CheckRemoteReadAndRepairPage(entry); + } +} + } // namespace parallel_recovery diff --git a/src/gausskernel/storage/access/transam/parallel_recovery/redo_item.cpp b/src/gausskernel/storage/access/transam/parallel_recovery/redo_item.cpp index c8e390305..9ac024fde 100644 --- a/src/gausskernel/storage/access/transam/parallel_recovery/redo_item.cpp +++ b/src/gausskernel/storage/access/transam/parallel_recovery/redo_item.cpp @@ -67,7 +67,6 @@ RedoItem *CreateRedoItem(XLogReaderState *record, uint32 shareCount, uint32 desi } item->replay_undo = false; - item->oldVersion = t_thrd.xlog_cxt.redo_oldversion_xlog; item->sharewithtrxn = false; item->blockbytrxn = false; item->imcheckpoint = false; @@ -101,7 +100,7 @@ RedoItem *CreateLSNMarker(XLogReaderState *record, List *expectedTLIs, bool buse /* don't need to copy data, only need copy state */ item = GetRedoItemPtr(NewReaderState(record, false)); } - item->oldVersion = t_thrd.xlog_cxt.redo_oldversion_xlog; + item->sharewithtrxn = false; item->blockbytrxn = false; item->imcheckpoint = false; @@ -126,21 +125,4 @@ bool IsLSNMarker(const RedoItem *item) return item->shareCount == LSN_MARKER; } -void ApplyRedoRecord(XLogReaderState *record, bool bOld) -{ - t_thrd.xlog_cxt.redo_oldversion_xlog = bOld; - ErrorContextCallback errContext; - errContext.callback = rm_redo_error_callback; - errContext.arg = (void *)record; - errContext.previous = t_thrd.log_cxt.error_context_stack; - t_thrd.log_cxt.error_context_stack = &errContext; - if (module_logging_is_on(MOD_REDO)) { - DiagLogRedoRecord(record, "ApplyRedoRecord"); - } - RmgrTable[XLogRecGetRmid(record)].rm_redo(record); - - t_thrd.log_cxt.error_context_stack = errContext.previous; - t_thrd.xlog_cxt.redo_oldversion_xlog = false; -} - } // namespace parallel_recovery diff --git a/src/gausskernel/storage/access/transam/parallel_recovery/txn_redo.cpp b/src/gausskernel/storage/access/transam/parallel_recovery/txn_redo.cpp index 4c0f18667..0c707c40f 100644 --- a/src/gausskernel/storage/access/transam/parallel_recovery/txn_redo.cpp +++ b/src/gausskernel/storage/access/transam/parallel_recovery/txn_redo.cpp @@ -40,6 +40,7 @@ #include "access/parallel_recovery/dispatcher.h" #include "access/parallel_recovery/txn_redo.h" +#include "access/multi_redo_api.h" #include "pgstat.h" namespace parallel_recovery { @@ -132,7 +133,7 @@ void ApplyReadyTxnShareLogRecords(RedoItem *item) pgstat_report_waitevent(WAIT_EVENT_END); MemoryContext oldCtx = MemoryContextSwitchTo(g_dispatcher->oldCtx); - ApplyRedoRecord(&item->record, item->oldVersion); + ApplyRedoRecord(&item->record); (void)MemoryContextSwitchTo(oldCtx); pg_memory_barrier(); @@ -189,7 +190,7 @@ void ApplyReadyAllShareLogRecords(RedoItem *item) } pgstat_report_waitevent(WAIT_EVENT_END); MemoryContext oldCtx = MemoryContextSwitchTo(g_dispatcher->oldCtx); - ApplyRedoRecord(&item->record, item->oldVersion); + ApplyRedoRecord(&item->record); (void)MemoryContextSwitchTo(oldCtx); FreeRedoItem(item); @@ -227,7 +228,7 @@ static RedoItem *ProcTxnItem(RedoItem *item) ApplyReadyAllShareLogRecords(item); } else { MemoryContext oldCtx = MemoryContextSwitchTo(g_dispatcher->oldCtx); - ApplyRedoRecord(&item->record, item->oldVersion); + ApplyRedoRecord(&item->record); (void)MemoryContextSwitchTo(oldCtx); FreeRedoItem(item); } @@ -250,14 +251,15 @@ static RedoItem *ProcTxnItem(RedoItem *item) void ApplyReadyTxnLogRecords(TxnRedoWorker *worker, bool forceAll) { RedoItem *item = worker->procHead; - + GetRedoStartTime(t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_5]); while (item != NULL) { XLogReaderState *record = &item->record; - XLogRecPtr lrRead; /* lastReplayedReadPtr */ XLogRecPtr lrEnd; - GetReplayedRecPtrFromWorkers(&lrRead, &lrEnd); if (forceAll) { + GetRedoStartTime(t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_6]); + XLogRecPtr lrRead; /* lastReplayedReadPtr */ + GetReplayedRecPtrFromWorkers(&lrRead, &lrEnd); /* we need to get lastCompletedPageLSN as soon as possible,so */ /* we can not sleep here. */ XLogRecPtr oldReplayedPageLSN = InvalidXLogRecPtr; @@ -270,8 +272,10 @@ void ApplyReadyTxnLogRecords(TxnRedoWorker *worker, bool forceAll) GetReplayedRecPtrFromWorkers(&lrRead, &lrEnd); RedoInterruptCallBack(); } + CountRedoTime(t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_6]); } + GetReplayedRecPtrFromWorkers(&lrEnd); /* * Make sure we can replay this record. This check is necessary * on the master and on the hot backup after it reaches consistency. @@ -309,6 +313,7 @@ void ApplyReadyTxnLogRecords(TxnRedoWorker *worker, bool forceAll) if (item == NULL) { worker->procTail = NULL; } + CountRedoTime(t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_5]); } void DumpTxnWorker(TxnRedoWorker *txnWorker) @@ -324,20 +329,6 @@ void DumpTxnWorker(TxnRedoWorker *txnWorker) DumpXlogCtl(); } -void FreeTxnItem() -{ - MoveTxnItemToApplyQueue(g_dispatcher->txnWorker); - RedoItem *item = g_dispatcher->txnWorker->procHead; - RedoItem *nextitem = NULL; - while (item != NULL) { - nextitem = item->nextByWorker[0]; - OnlyFreeRedoItem(item); - item = nextitem; - } - g_dispatcher->txnWorker->procHead = NULL; - g_dispatcher->txnWorker->procTail = NULL; -} - bool TxnQueueIsEmpty(TxnRedoWorker* worker) { return worker->procHead == NULL; diff --git a/src/gausskernel/storage/access/transam/redo_statistic.cpp b/src/gausskernel/storage/access/transam/redo_statistic.cpp index a403dc563..f46b36ed1 100644 --- a/src/gausskernel/storage/access/transam/redo_statistic.cpp +++ b/src/gausskernel/storage/access/transam/redo_statistic.cpp @@ -186,12 +186,17 @@ char *redo_get_name_by_wait_type(uint32 type) } } +static inline uint32 MinNumber(uint32 a, uint32 b) +{ + return (b < a ? b : a); +} + void redo_get_worker_info_text(char *info, uint32 max_info_len) { RedoWorkerStatsData worker[MAX_RECOVERY_THREAD_NUM] = {0}; uint32 worker_num = 0; errno_t errorno = EOK; - GetRedoWrokerStatistic(&worker_num, worker, MAX_RECOVERY_THREAD_NUM); + GetRedoWrokerStatistic(&worker_num, worker, MinNumber((uint32)MAX_RECOVERY_THREAD_NUM, max_info_len)); if (worker_num == 0) { errorno = snprintf_s(info, max_info_len, max_info_len - 1, "%-16s", "no redo worker"); diff --git a/src/gausskernel/storage/access/transam/rmgr.cpp b/src/gausskernel/storage/access/transam/rmgr.cpp index 6c3b7ce79..10529e12b 100644 --- a/src/gausskernel/storage/access/transam/rmgr.cpp +++ b/src/gausskernel/storage/access/transam/rmgr.cpp @@ -29,7 +29,6 @@ #include "access/gin.h" #include "access/gist_private.h" #include "access/hash.h" -#include "access/hash_xlog.h" #include "access/heapam.h" #include "access/ustore/knl_uredo.h" #include "access/multixact.h" @@ -56,8 +55,8 @@ #include "access/ustore/knl_uredo.h" /* must be kept in sync with RmgrData definition in xlog_internal.h */ -#define PG_RMGR(symname, name, redo, desc, startup, cleanup, safe_restartpoint, undo, undo_desc) \ - {name, redo, desc, startup, cleanup, safe_restartpoint, undo, undo_desc}, +#define PG_RMGR(symname, name, redo, desc, startup, cleanup, safe_restartpoint, undo, undo_desc, type_name) \ + {name, redo, desc, startup, cleanup, safe_restartpoint, undo, undo_desc, type_name}, const RmgrData RmgrTable[RM_MAX_ID + 1] = { #include "access/rmgrlist.h" diff --git a/src/gausskernel/storage/access/transam/seg_double_write.cpp b/src/gausskernel/storage/access/transam/seg_double_write.cpp index af1a90a5c..b060c77fc 100644 --- a/src/gausskernel/storage/access/transam/seg_double_write.cpp +++ b/src/gausskernel/storage/access/transam/seg_double_write.cpp @@ -153,7 +153,7 @@ bool dw_single_file_recycle_old(bool trunc_file) securec_check(rc, "\0", "\0"); } - CheckPointSyncForDw(); + PageWriterSync(); if (trunc_file) { if (!LWLockConditionalAcquire(single_cxt->second_flush_lock, LW_EXCLUSIVE)) { diff --git a/src/gausskernel/storage/access/transam/single_double_write.cpp b/src/gausskernel/storage/access/transam/single_double_write.cpp new file mode 100644 index 000000000..602b86f21 --- /dev/null +++ b/src/gausskernel/storage/access/transam/single_double_write.cpp @@ -0,0 +1,965 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * single_double_write.cpp + * Before flush dirty pages to data file, flush them to double write file, + * in case of half-flushed pages. Recover those half-flushed data file pages + * before replaying xlog when starting. This file is for single flush double + * write. + * + * IDENTIFICATION + * src/gausskernel/storage/access/transam/single_double_write.cpp + * + * ------------------------------------------------------------------------- + */ +#include "access/double_write.h" +#include "storage/smgr/segment.h" +#include "utils/builtins.h" + +const uint16 RESULT_LEN = 256; +Datum dw_get_single_flush_dwn() +{ + if (dw_enabled()) { + char dwn[RESULT_LEN] = {0}; + errno_t rc; + if (g_instance.dw_single_cxt.dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { + uint32 dwn_first = (uint32)g_instance.dw_single_cxt.file_head->head.dwn; + uint32 dwn_second = (uint32)g_instance.dw_single_cxt.second_file_head->head.dwn; + rc = snprintf_s(dwn, RESULT_LEN, RESULT_LEN - 1, "%u/%u", dwn_first, dwn_second); + securec_check_ss(rc, "\0", "\0"); + } else { + uint32 dwn_old = (uint32)g_instance.dw_single_cxt.recovery_buf.file_head->head.dwn; + rc = snprintf_s(dwn, RESULT_LEN, RESULT_LEN - 1, "%u/0", dwn_old); + securec_check_ss(rc, "\0", "\0"); + } + return CStringGetTextDatum(dwn); + } + return CStringGetTextDatum("0/0"); +} + +Datum dw_get_single_flush_start() +{ + if (dw_enabled()) { + char start[RESULT_LEN] = {0}; + errno_t rc; + if (g_instance.dw_single_cxt.dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { + uint32 start_first = (uint32)g_instance.dw_single_cxt.file_head->start; + uint32 start_second = (uint32)g_instance.dw_single_cxt.second_file_head->start; + rc = snprintf_s(start, RESULT_LEN, RESULT_LEN - 1, "%u/%u", start_first, start_second); + securec_check_ss(rc, "\0", "\0"); + } else { + uint32 start_old = (uint32)g_instance.dw_single_cxt.recovery_buf.file_head->start; + rc = snprintf_s(start, RESULT_LEN, RESULT_LEN - 1, "%u/0", start_old); + securec_check_ss(rc, "\0", "\0"); + } + return CStringGetTextDatum(start); + } + + return CStringGetTextDatum("0/0"); +} + +Datum dw_get_single_flush_trunc_num() +{ + char trunc_num[RESULT_LEN] = {0}; + errno_t rc; + if (g_instance.dw_single_cxt.dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { + uint32 trunc_num_first = (uint32)g_instance.dw_single_cxt.single_stat_info.file_trunc_num; + uint32 trunc_num_second = (uint32)g_instance.dw_single_cxt.single_stat_info.second_file_trunc_num; + rc = snprintf_s(trunc_num, RESULT_LEN, RESULT_LEN - 1, "%u/%u", trunc_num_first, trunc_num_second); + securec_check_ss(rc, "\0", "\0"); + } else { + uint32 trunc_num_old = (uint32)g_instance.dw_single_cxt.single_stat_info.file_trunc_num; + rc = snprintf_s(trunc_num, RESULT_LEN, RESULT_LEN - 1, "%u/0", trunc_num_old); + securec_check_ss(rc, "\0", "\0"); + } + return CStringGetTextDatum(trunc_num); +} + +Datum dw_get_single_flush_reset_num() +{ + char reset_num[RESULT_LEN] = {0}; + errno_t rc; + if (g_instance.dw_single_cxt.dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { + uint32 reset_num_first = (uint32)g_instance.dw_single_cxt.single_stat_info.file_reset_num; + uint32 reset_num_second = (uint32)g_instance.dw_single_cxt.single_stat_info.second_file_reset_num; + rc = snprintf_s(reset_num, RESULT_LEN, RESULT_LEN - 1, "%u/%u", reset_num_first, reset_num_second); + securec_check_ss(rc, "\0", "\0"); + } else { + uint32 reset_num_old = (uint32)g_instance.dw_single_cxt.single_stat_info.file_reset_num; + rc = snprintf_s(reset_num, RESULT_LEN, RESULT_LEN - 1, "%u/0", reset_num_old); + securec_check_ss(rc, "\0", "\0"); + } + return CStringGetTextDatum(reset_num); +} + +Datum dw_get_single_flush_total_writes() +{ + char total_writes[RESULT_LEN] = {0}; + errno_t rc; + if (g_instance.dw_single_cxt.dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { + uint32 total_writes_first = (uint32)g_instance.dw_single_cxt.single_stat_info.total_writes; + uint32 total_writes_second = (uint32)g_instance.dw_single_cxt.single_stat_info.second_total_writes; + rc = snprintf_s(total_writes, RESULT_LEN, RESULT_LEN - 1, "%u/%u", total_writes_first, total_writes_second); + securec_check_ss(rc, "\0", "\0"); + } else { + uint32 total_writes_old = (uint32)g_instance.dw_single_cxt.single_stat_info.total_writes; + rc = snprintf_s(total_writes, RESULT_LEN, RESULT_LEN - 1, "%u/0", total_writes_old); + securec_check_ss(rc, "\0", "\0"); + } + return CStringGetTextDatum(total_writes); +} + +const dw_view_col_t g_dw_single_view[DW_SINGLE_VIEW_COL_NUM] = { + {"node_name", TEXTOID, dw_get_node_name}, + {"curr_dwn", TEXTOID, dw_get_single_flush_dwn}, + {"curr_start_page", TEXTOID, dw_get_single_flush_start}, + {"total_writes", TEXTOID, dw_get_single_flush_total_writes}, + {"file_trunc_num", TEXTOID, dw_get_single_flush_trunc_num}, + {"file_reset_num", TEXTOID, dw_get_single_flush_reset_num} +}; + +static bool dw_verify_item(const dw_single_flush_item* item, uint16 dwn); +static uint16 get_max_single_write_pos(bool is_first); +static uint16 atomic_get_dw_write_pos(bool is_first); + +static void dw_recovery_first_version_page(); +static void dw_recovery_old_single_dw_page(); +static void dw_recovery_second_version_page(); +static void dw_recovery_single_page(const dw_single_flush_item *item, uint16 item_num); + +void dw_generate_single_file() +{ + char *file_head = NULL; + int64 remain_size; + int fd = -1; + errno_t rc; + char *unaligned_buf = NULL; + + if (file_exists(SINGLE_DW_FILE_NAME)) { + ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_DW), "DW single flush file already exists")); + } + + ereport(LOG, (errmodule(MOD_DW), errmsg("DW bootstrap single flush file"))); + + fd = open(SINGLE_DW_FILE_NAME, (DW_FILE_FLAG | O_CREAT), DW_FILE_PERM); + if (fd == -1) { + ereport(PANIC, + (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not create file \"%s\"", + SINGLE_DW_FILE_NAME))); + } + + unaligned_buf = (char *)palloc0(DW_FILE_EXTEND_SIZE + BLCKSZ); /* one more BLCKSZ for alignment */ + + file_head = (char *)TYPEALIGN(BLCKSZ, unaligned_buf); + + /* file head and first batch head will be writen */ + remain_size = (DW_SINGLE_DIRTY_PAGE_NUM + DW_SINGLE_BUFTAG_PAGE_NUM) * BLCKSZ; + dw_prepare_file_head(file_head, 0, 0, 0); + dw_pwrite_file(fd, file_head, BLCKSZ, 0, SINGLE_DW_FILE_NAME); + rc = memset_s(file_head, BLCKSZ, 0, BLCKSZ); + securec_check(rc, "\0", "\0"); + dw_extend_file(fd, file_head, DW_FILE_EXTEND_SIZE, remain_size, DW_SINGLE_FILE_SIZE, true, NULL); + ereport(LOG, (errmodule(MOD_DW), errmsg("Double write single flush file created successfully"))); + + (void)close(fd); + fd = -1; + pfree(unaligned_buf); + return; +} + +static void dw_recovery_first_version_page() +{ + knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; + dw_file_head_t *file_head = single_cxt->file_head; + errno_t rc = 0; + uint64 offset = 0; + PageHeader pghr = NULL; + SMgrRelation reln = NULL; + char *unaligned_buf = (char *)palloc0(BLCKSZ + BLCKSZ); /* one more BLCKSZ for alignment */ + char *dw_block = (char *)TYPEALIGN(BLCKSZ, unaligned_buf); + char *data_block = (char *)palloc0(BLCKSZ); + dw_first_flush_item flush_item; + + for (uint16 i = file_head->start; i < DW_FIRST_DATA_PAGE_NUM; i++) { + offset = (i + 1) * BLCKSZ; /* need skip the file head */ + dw_pread_file(single_cxt->fd, dw_block, BLCKSZ, offset); + pghr = (PageHeader)dw_block; + + rc = memcpy_s(&flush_item, sizeof(dw_first_flush_item), dw_block + pghr->pd_lower, sizeof(dw_first_flush_item)); + securec_check(rc, "\0", "\0"); + + if (!dw_verify_pg_checksum((PageHeader)dw_block, flush_item.buf_tag.blockNum, true)) { + if (PageIsNew(dw_block)) { + Assert(flush_item.buf_tag.rnode.relNode == 0); + dw_log_recovery_page(LOG, "[first version] dw page is new, break this recovery", flush_item.buf_tag); + break; + } + dw_log_recovery_page(WARNING, "DW single page broken", flush_item.buf_tag); + dw_log_page_header((PageHeader)dw_block); + continue; + } + dw_log_recovery_page(DW_LOG_LEVEL, "DW page fine", flush_item.buf_tag); + + reln = smgropen(flush_item.buf_tag.rnode, InvalidBackendId, GetColumnNum(flush_item.buf_tag.forkNum)); + /* read data page */ + if (!dw_read_data_page(flush_item.buf_tag, reln, data_block)) { + continue; + } + dw_log_page_header((PageHeader)data_block); + if (!dw_verify_pg_checksum((PageHeader)data_block, flush_item.buf_tag.blockNum, false) || + XLByteLT(PageGetLSN(data_block), PageGetLSN(dw_block))) { + memset_s(dw_block + pghr->pd_lower, sizeof(dw_first_flush_item), 0, sizeof(dw_first_flush_item)); + securec_check(rc, "\0", "\0"); + dw_set_pg_checksum(dw_block, flush_item.buf_tag.blockNum); + + if (IsSegmentPhysicalRelNode(flush_item.buf_tag.rnode)) { + // seg_space must be initialized before. + seg_physical_write(reln->seg_space, flush_item.buf_tag.rnode, flush_item.buf_tag.forkNum, + flush_item.buf_tag.blockNum, (const char *)dw_block, false); + } else { + smgrwrite(reln, flush_item.buf_tag.forkNum, flush_item.buf_tag.blockNum, + (const char *)dw_block, false); + } + dw_log_recovery_page(LOG, "Date page recovered", flush_item.buf_tag); + dw_log_page_header((PageHeader)data_block); + } + } + + pfree(unaligned_buf); + pfree(data_block); +} + +static bool dw_verify_item(const dw_single_flush_item* item, uint16 dwn) +{ + if (item->dwn != dwn) { + return false; + } + + if (item->buf_tag.forkNum == InvalidForkNumber || item->buf_tag.blockNum == InvalidBlockNumber || + item->buf_tag.rnode.relNode == InvalidOid) { + if (item->dwn != 0 || item->data_page_idx != 0) { + ereport(WARNING, + (errmsg("dw recovery, find invalid item [page_idx %hu dwn %hu] skip this item," + "buf_tag[rel %u/%u/%u blk %u fork %d]", item->data_page_idx, item->dwn, + item->buf_tag.rnode.spcNode, item->buf_tag.rnode.dbNode, item->buf_tag.rnode.relNode, + item->buf_tag.blockNum, item->buf_tag.forkNum))); + } + return false; + } + pg_crc32c crc; + /* Contents are protected with a CRC */ + INIT_CRC32C(crc); + COMP_CRC32C(crc, (char*)item, offsetof(dw_single_flush_item, crc)); + FIN_CRC32C(crc); + + if (EQ_CRC32C(crc, item->crc)) { + return true; + } else { + return false; + } +} + +static void dw_recovery_single_page(const dw_single_flush_item *item, uint16 item_num) +{ + SMgrRelation reln; + uint32 offset = 0; + BufferTag buf_tag; + knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; + char *unaligned_buf = (char *)palloc0(BLCKSZ + BLCKSZ); /* one more BLCKSZ for alignment */ + char *dw_block = (char *)TYPEALIGN(BLCKSZ, unaligned_buf); + char *data_block = (char *)palloc0(BLCKSZ); + uint64 base_offset = 0; + + if (single_cxt->file_head->dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { + base_offset = 1 + DW_FIRST_DATA_PAGE_NUM + 1 + DW_SECOND_BUFTAG_PAGE_NUM; + } else { + base_offset = DW_SINGLE_BUFTAG_PAGE_NUM + 1; + } + + for (uint16 i = 0; i < item_num; i++) { + buf_tag = item[i].buf_tag; + + /* read dw file page */ + offset = (base_offset + item[i].data_page_idx) * BLCKSZ; + dw_pread_file(single_cxt->fd, dw_block, BLCKSZ, offset); + + if (!dw_verify_pg_checksum((PageHeader)dw_block, buf_tag.blockNum, true)) { + dw_log_recovery_page(WARNING, "DW single page broken", buf_tag); + dw_log_page_header((PageHeader)dw_block); + continue; + } + dw_log_recovery_page(DW_LOG_LEVEL, "DW page fine", buf_tag); + + reln = smgropen(buf_tag.rnode, InvalidBackendId, GetColumnNum(buf_tag.forkNum)); + /* read data page */ + if (!dw_read_data_page(buf_tag, reln, data_block)) { + continue; + } + dw_log_page_header((PageHeader)data_block); + if (!dw_verify_pg_checksum((PageHeader)data_block, buf_tag.blockNum, false) || + XLByteLT(PageGetLSN(data_block), PageGetLSN(dw_block))) { + if (IsSegmentPhysicalRelNode(buf_tag.rnode)) { + // seg_space must be initialized before. + seg_physical_write(reln->seg_space, buf_tag.rnode, buf_tag.forkNum, buf_tag.blockNum, + (const char *)dw_block, false); + } else { + smgrwrite(reln, buf_tag.forkNum, buf_tag.blockNum, (const char *)dw_block, false); + } + dw_log_recovery_page(LOG, "Date page recovered", buf_tag); + dw_log_page_header((PageHeader)data_block); + } + } + + pfree(unaligned_buf); + pfree(data_block); + return; +} + +static void dw_recovery_second_version_page() +{ + knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; + dw_single_flush_item *item = NULL; + uint16 rec_num = 0; + dw_file_head_t *file_head = single_cxt->second_file_head; + char *buf = single_cxt->buf; + uint64 second_offset = DW_SECOND_BUFTAG_START_IDX * BLCKSZ; + + item = (dw_single_flush_item*)palloc0(sizeof(dw_single_flush_item) * DW_SECOND_DATA_PAGE_NUM); + + /* read all buffer tag item, need skip head page */ + dw_pread_file(single_cxt->fd, buf, DW_SECOND_BUFTAG_PAGE_NUM * BLCKSZ, second_offset); + + uint64 offset = 0; + dw_single_flush_item *temp = NULL; + for (uint16 i = single_cxt->second_file_head->start; i < DW_SECOND_DATA_PAGE_NUM; i++) { + offset = i * sizeof(dw_single_flush_item); + temp = (dw_single_flush_item*)((char*)buf + offset); + if (dw_verify_item(temp, file_head->head.dwn)) { + item[rec_num].data_page_idx = temp->data_page_idx; + item[rec_num].dwn = temp->dwn; + item[rec_num].buf_tag = temp->buf_tag; + item[rec_num].crc = temp->crc; + rec_num++; + } + } + ereport(LOG, (errmodule(MOD_DW), errmsg("[second version] DW single flush file valid item num is %d.", rec_num))); + if (rec_num > 0) { + qsort(item, rec_num, sizeof(dw_single_flush_item), buftag_compare); + ereport(LOG, (errmodule(MOD_DW), + errmsg("[second version] DW single flush file valid buftag item qsort finish."))); + dw_recovery_single_page(item, rec_num); + } + + pfree(item); +} + +static void dw_recovery_old_single_dw_page() +{ + knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; + dw_single_flush_item *item = NULL; + dw_file_head_t *file_head = single_cxt->recovery_buf.file_head; + uint16 blk_num = DW_SINGLE_DIRTY_PAGE_NUM / SINGLE_BLOCK_TAG_NUM; + char *buf = single_cxt->recovery_buf.buf; + uint16 rec_num = 0; + + item = (dw_single_flush_item*)palloc0(sizeof(dw_single_flush_item) * DW_SINGLE_DIRTY_PAGE_NUM); + + /* read all buffer tag item, need skip head page */ + dw_pread_file(single_cxt->fd, buf, blk_num * BLCKSZ, BLCKSZ); + int offset = 0; + dw_single_flush_item *temp = NULL; + for (int i = 0; i < DW_SINGLE_DIRTY_PAGE_NUM; i++) { + offset = i * sizeof(dw_single_flush_item); + temp = (dw_single_flush_item*)((char*)buf + offset); + if (dw_verify_item(temp, file_head->head.dwn)) { + item[rec_num].data_page_idx = temp->data_page_idx; + item[rec_num].buf_tag = temp->buf_tag; + item[rec_num].dwn = temp->dwn; + item[rec_num].crc = temp->crc; + rec_num++; + } + } + + ereport(LOG, (errmodule(MOD_DW), errmsg("[old version] DW single flush file valid item num is %d.", rec_num))); + if (rec_num > 0) { + qsort(item, rec_num, sizeof(dw_single_flush_item), buftag_compare); + ereport(LOG, (errmodule(MOD_DW), errmsg("DW single flush file valid buftag item qsort finish."))); + dw_recovery_single_page(item, rec_num); + } + + pfree(item); +} + +void dw_force_reset_single_file(uint32 dw_version) +{ + knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; + dw_file_head_t *file_head = single_cxt->file_head; + + if (USE_CKPT_THREAD_SYNC) { + ProcessSyncRequests(); + } else { + PageWriterSync(); + } + + dw_prepare_file_head((char *)file_head, 0, file_head->head.dwn + 1); + dw_pwrite_file(single_cxt->fd, file_head, BLCKSZ, 0, SINGLE_DW_FILE_NAME); + (void)pg_atomic_add_fetch_u64(&single_cxt->single_stat_info.file_reset_num, 1); + + ereport(LOG, (errmodule(MOD_DW), + errmsg("DW single flush finish recovery, reset the file head[dwn %hu, start %hu].", + file_head->head.dwn, file_head->start))); + + if (dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { + file_head = single_cxt->second_file_head; + dw_prepare_file_head((char *)file_head, 0, file_head->head.dwn + 1); + dw_pwrite_file(single_cxt->fd, file_head, BLCKSZ, (1 + DW_FIRST_DATA_PAGE_NUM) * BLCKSZ, SINGLE_DW_FILE_NAME); + (void)pg_atomic_add_fetch_u64(&single_cxt->single_stat_info.second_file_reset_num, 1); + ereport(LOG, (errmodule(MOD_DW), + errmsg("DW single flush finish recovery [second version], reset the file head[dwn %hu, start %hu].", + file_head->head.dwn, file_head->start))); + } + + return; +} + +void dw_recovery_partial_write_single() +{ + knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; + + if (single_cxt->file_head->dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { + dw_recovery_first_version_page(); + dw_recovery_second_version_page(); + } else { + dw_recovery_old_single_dw_page(); + } + + ereport(LOG, (errmodule(MOD_DW), errmsg("DW single flush file recovery finish."))); + + /* reset the file after the recovery is complete */ + dw_force_reset_single_file(single_cxt->file_head->dw_version); + return; +} + +void dw_single_file_truncate(bool is_first) +{ + uint16 max_idx = 0; + knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; + dw_file_head_t *file_head = NULL; + volatile uint16 org_start = 0; + volatile uint16 org_dwn = 0; + LWLock* flush_lock = NULL; + uint64 head_offset = 0; + + if (is_first) { + file_head = single_cxt->file_head; + flush_lock = single_cxt->flush_lock; + head_offset = 0; + } else { + file_head = single_cxt->second_file_head; + flush_lock = single_cxt->second_flush_lock; + head_offset = (1 + DW_FIRST_DATA_PAGE_NUM) * BLCKSZ; + } + + if (!LWLockConditionalAcquire(flush_lock, LW_EXCLUSIVE)) { + ereport(LOG, (errmodule(MOD_DW), + errmsg("[single flush] can not get dw flush lock and skip dw truncate for this time"))); + return; + } + + org_start = file_head->start; + org_dwn = file_head->head.dwn; + max_idx = get_max_single_write_pos(is_first); + if (max_idx == file_head->start) { + LWLockRelease(flush_lock); + return; + } + LWLockRelease(flush_lock); + + PageWriterSync(); + + if (!LWLockConditionalAcquire(flush_lock, LW_EXCLUSIVE)) { + ereport(LOG, (errmodule(MOD_DW), + errmsg("[single flush] can not get dw flush lock and skip dw truncate after sync for this time"))); + return; + } else if (org_start != file_head->start || org_dwn != file_head->head.dwn) { + LWLockRelease(flush_lock); + return; + } + + file_head->start = max_idx; + dw_prepare_file_head((char *)file_head, file_head->start, file_head->head.dwn); + + Assert(file_head->head.dwn == file_head->tail.dwn); + dw_pwrite_file(single_cxt->fd, file_head, BLCKSZ, head_offset, SINGLE_DW_FILE_NAME); + LWLockRelease(flush_lock); + + ereport(LOG, (errmodule(MOD_DW), + errmsg("[single flush][%s] DW truncate end: file_head[dwn %hu, start %hu], write_pos %hu", + is_first ? "first version" : "second_version", + file_head->head.dwn, file_head->start, is_first ? single_cxt->write_pos : single_cxt->second_write_pos))); + + if (is_first) { + (void)pg_atomic_add_fetch_u64(&single_cxt->single_stat_info.file_trunc_num, 1); + } else { + (void)pg_atomic_add_fetch_u64(&single_cxt->single_stat_info.second_file_trunc_num, 1); + } + return; +} + +static uint16 get_max_single_write_pos(bool is_first) +{ + uint16 max_idx = 0; + uint16 i = 0; + uint16 start = 0; + knl_g_dw_context* dw_single_cxt = &g_instance.dw_single_cxt; + uint16 end = 0; + dw_file_head_t *file_head = NULL; + + /* single_flush_state, first */ + if (!is_first) { + file_head = dw_single_cxt->second_file_head; + start = file_head->start + DW_FIRST_DATA_PAGE_NUM; + end = pg_atomic_read_u32(&dw_single_cxt->second_write_pos) + DW_FIRST_DATA_PAGE_NUM; + } else { + file_head = dw_single_cxt->file_head; + start = file_head->start; + end = pg_atomic_read_u32(&dw_single_cxt->write_pos); + } + + for (i = start; i < end; i++) { + if (dw_single_cxt->single_flush_state[i] == false) { + break; + } + } + max_idx = i; + + if (!is_first) { + max_idx = max_idx - DW_FIRST_DATA_PAGE_NUM; + } + + return max_idx; +} + +void dw_generate_new_single_file() +{ + char *file_head = NULL; + int64 extend_size; + int fd = -1; + errno_t rc; + char *unaligned_buf = NULL; + + if (file_exists(SINGLE_DW_FILE_NAME)) { + ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_DW), "DW single flush file already exists")); + } + + ereport(LOG, (errmodule(MOD_DW), errmsg("DW bootstrap new single flush file"))); + + fd = open(SINGLE_DW_FILE_NAME, (DW_FILE_FLAG | O_CREAT), DW_FILE_PERM); + if (fd == -1) { + ereport(PANIC, + (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not create file \"%s\"", + SINGLE_DW_FILE_NAME))); + } + + /* NO EREPORT(ERROR) from here till changes are logged */ + START_CRIT_SECTION(); + unaligned_buf = (char *)palloc0(DW_FILE_EXTEND_SIZE + BLCKSZ); /* one more BLCKSZ for alignment */ + + file_head = (char *)TYPEALIGN(BLCKSZ, unaligned_buf); + + /* first version page int */ + extend_size = DW_FIRST_DATA_PAGE_NUM * BLCKSZ; + dw_prepare_file_head(file_head, 0, 0, DW_SUPPORT_NEW_SINGLE_FLUSH); + dw_pwrite_file(fd, file_head, BLCKSZ, 0, SINGLE_DW_FILE_NAME); + + rc = memset_s(file_head, BLCKSZ, 0, BLCKSZ); + securec_check(rc, "\0", "\0"); + dw_extend_file(fd, file_head, DW_FILE_EXTEND_SIZE, extend_size, DW_NEW_SINGLE_FILE_SIZE, true, NULL); + + /* second version page init */ + extend_size = (DW_SECOND_BUFTAG_PAGE_NUM + DW_SECOND_DATA_PAGE_NUM) * BLCKSZ; + dw_prepare_file_head(file_head, 0, 0, DW_SUPPORT_NEW_SINGLE_FLUSH); + dw_pwrite_file(fd, file_head, BLCKSZ, (1 + DW_FIRST_DATA_PAGE_NUM) * BLCKSZ, SINGLE_DW_FILE_NAME); + + rc = memset_s(file_head, BLCKSZ, 0, BLCKSZ); + securec_check(rc, "\0", "\0"); + dw_extend_file(fd, file_head, DW_FILE_EXTEND_SIZE, extend_size, DW_NEW_SINGLE_FILE_SIZE, true, NULL); + END_CRIT_SECTION(); + + ereport(LOG, (errmodule(MOD_DW), errmsg("Double write single flush file created successfully"))); + + (void)close(fd); + fd = -1; + pfree(unaligned_buf); + return; +} + +uint16 first_version_dw_single_flush(BufferDesc *buf_desc) +{ + errno_t rc; + char *buf = t_thrd.proc->dw_buf; + knl_g_dw_context* dw_single_cxt = &g_instance.dw_single_cxt; + dw_file_head_t *file_head = dw_single_cxt->file_head; + uint16 actual_pos; + uint64 page_write_offset; + dw_first_flush_item item; + PageHeader pghr = NULL; + BufferTag phy_tag; + + uint32 buf_state = LockBufHdr(buf_desc); + Block block = BufHdrGetBlock(buf_desc); + XLogRecPtr page_lsn = BufferGetLSN(buf_desc); + UnlockBufHdr(buf_desc, buf_state); + + phy_tag = buf_desc->tag; + dw_transfer_phybuffer_addr(buf_desc, &phy_tag); + + Assert(buf_desc->buf_id < SegmentBufferStartID); + Assert(free_space_enough(buf_desc->buf_id)); + + /* first step, copy buffer to dw buf, than flush page lsn, the buffer content lock is already held */ + rc = memcpy_s(buf, BLCKSZ, block, BLCKSZ); + securec_check(rc, "\0", "\0"); + + XLogWaitFlush(page_lsn); + if (buf_desc->encrypt) { + dw_encrypt_page(buf_desc->tag, buf); + } + + actual_pos = atomic_get_dw_write_pos(true); + + item.dwn = file_head->head.dwn; + item.buf_tag = phy_tag; + pghr = (PageHeader)buf; + + rc = memcpy_s(buf + pghr->pd_lower, sizeof(dw_first_flush_item), &item, sizeof(dw_first_flush_item)); + securec_check(rc, "\0", "\0"); + + dw_set_pg_checksum(buf, item.buf_tag.blockNum); + page_write_offset = (1 + actual_pos) * BLCKSZ; + Assert(actual_pos < DW_FIRST_DATA_PAGE_NUM); + dw_pwrite_file(dw_single_cxt->fd, buf, BLCKSZ, page_write_offset, SINGLE_DW_FILE_NAME); + + (void)pg_atomic_add_fetch_u64(&dw_single_cxt->single_stat_info.total_writes, 1); + + return actual_pos; +} + +uint16 second_version_dw_single_flush(BufferTag tag, Block block, XLogRecPtr page_lsn, + bool encrypt, BufferTag phy_tag) +{ + errno_t rc; + uint16 actual_pos; + uint64 page_write_offset; + uint64 tag_write_offset; + uint16 block_offset; + dw_single_flush_item item; + knl_g_dw_context* dw_single_cxt = &g_instance.dw_single_cxt; + dw_file_head_t *file_head = dw_single_cxt->second_file_head; + char *buf = t_thrd.proc->dw_buf; + + /* first step, copy buffer to dw buf, than flush page lsn, the buffer content lock is already held */ + rc = memcpy_s(buf, BLCKSZ, block, BLCKSZ); + securec_check(rc, "\0", "\0"); + + XLogWaitFlush(page_lsn); + if (encrypt) { + dw_encrypt_page(tag, buf); + } + dw_set_pg_checksum(buf, phy_tag.blockNum); + + actual_pos = atomic_get_dw_write_pos(false); + + /* data page need skip head page and bufferTag page, bufferTag page need skip head page and first version page */ + page_write_offset = (actual_pos + DW_SECOND_DATA_START_IDX) * BLCKSZ; + tag_write_offset = DW_SECOND_BUFTAG_START_IDX * BLCKSZ + (actual_pos / SINGLE_BLOCK_TAG_NUM) * BLCKSZ; + block_offset = (actual_pos % SINGLE_BLOCK_TAG_NUM) * sizeof(dw_single_flush_item); + Assert(block_offset <= BLCKSZ - sizeof(dw_single_flush_item)); + Assert(actual_pos < DW_SECOND_DATA_PAGE_NUM); + Assert(page_write_offset < DW_NEW_SINGLE_FILE_SIZE && tag_write_offset < DW_SECOND_DATA_START_IDX * BLCKSZ); + + /* write the data page to dw file */ + dw_pwrite_file(dw_single_cxt->fd, buf, BLCKSZ, page_write_offset, SINGLE_DW_FILE_NAME); + + item.data_page_idx = actual_pos; + item.dwn = file_head->head.dwn; + item.buf_tag = phy_tag; + + /* Contents are protected with a CRC */ + INIT_CRC32C(item.crc); + COMP_CRC32C(item.crc, (char*)&item, offsetof(dw_single_flush_item, crc)); + FIN_CRC32C(item.crc); + + /* write the buffer tag item to dw file */ + (void)LWLockAcquire(dw_single_cxt->second_buftag_lock, LW_EXCLUSIVE); + dw_pread_file(dw_single_cxt->fd, buf, BLCKSZ, tag_write_offset); + rc = memcpy_s(buf + block_offset, BLCKSZ - block_offset, &item, sizeof(dw_single_flush_item)); + securec_check(rc, "\0", "\0"); + dw_pwrite_file(dw_single_cxt->fd, buf, BLCKSZ, tag_write_offset, SINGLE_DW_FILE_NAME); + + LWLockRelease(dw_single_cxt->second_buftag_lock); + (void)pg_atomic_add_fetch_u64(&dw_single_cxt->single_stat_info.second_total_writes, 1); + + return (actual_pos + DW_FIRST_DATA_PAGE_NUM); +} + +void wait_all_single_dw_finish_flush(bool is_first) +{ + uint16 start = 0; + uint16 end = 0; + dw_file_head_t *file_head = NULL; + knl_g_dw_context* dw_single_cxt = &g_instance.dw_single_cxt; + + /* single_flush_state, first */ + if (is_first) { + file_head = dw_single_cxt->file_head; + start = file_head->start; + end = pg_atomic_read_u32(&dw_single_cxt->write_pos); + } else { + file_head = dw_single_cxt->second_file_head; + start = file_head->start + DW_FIRST_DATA_PAGE_NUM; + end = pg_atomic_read_u32(&dw_single_cxt->second_write_pos) + DW_FIRST_DATA_PAGE_NUM; + } + + for (uint i = start; i < end;) { + if (dw_single_cxt->single_flush_state[i] != false) { + i++; + continue; + } else { + (void)sched_yield(); + } + } + return; +} + +void dw_single_file_recycle(bool is_first) +{ + bool file_full = false; + knl_g_dw_context* single_cxt = &g_instance.dw_single_cxt; + dw_file_head_t *file_head = NULL; + uint16 end = 0; + errno_t rc; + uint64 head_offset = 0; + uint16 flush_state_start = 0; + uint16 page_num = 0; + + if (is_first) { + file_head = single_cxt->file_head; + end = single_cxt->write_pos; + flush_state_start = 0; + page_num = DW_FIRST_DATA_PAGE_NUM; + head_offset = 0; + } else { + file_head = single_cxt->second_file_head; + end = single_cxt->second_write_pos; + flush_state_start = DW_FIRST_DATA_PAGE_NUM; + page_num = DW_SECOND_DATA_PAGE_NUM; + head_offset = (1 + DW_FIRST_DATA_PAGE_NUM) * BLCKSZ; + } + + file_full = end + 1 >= page_num; + + if (!file_full) { + return; + } + + /* reset start position and flush page num for full recycle */ + wait_all_single_dw_finish_flush(is_first); + + PageWriterSync(); + + rc = memset_s(single_cxt->single_flush_state + (flush_state_start * sizeof(bool)), + sizeof(bool) * page_num, 0, sizeof(bool) * page_num); + securec_check(rc, "\0", "\0"); + + dw_prepare_file_head((char *)file_head, 0, file_head->head.dwn + 1); + dw_pwrite_file(single_cxt->fd, file_head, BLCKSZ, head_offset, SINGLE_DW_FILE_NAME); + + /* The start and write_pos must be reset at the end. */ + file_head->start = 0; + if (is_first) { + single_cxt->write_pos = 0; + } else { + single_cxt->second_write_pos = 0; + } + + if (is_first) { + (void)pg_atomic_add_fetch_u64(&single_cxt->single_stat_info.file_reset_num, 1); + } else { + (void)pg_atomic_add_fetch_u64(&single_cxt->single_stat_info.second_file_reset_num, 1); + } + ereport(LOG, (errmodule(MOD_DW), errmsg("[single flush] [%s] Reset DW file: file_head[dwn %hu, start %hu], " + "writer pos is %hu", is_first ? "first version" : "second_version", file_head->head.dwn, file_head->start, + is_first ? single_cxt->write_pos : single_cxt->second_write_pos))); + return; +} + +static uint16 atomic_get_dw_write_pos(bool is_first) +{ + knl_g_dw_context* dw_single_cxt = &g_instance.dw_single_cxt; + uint16 page_num = is_first ? DW_FIRST_DATA_PAGE_NUM : DW_SECOND_DATA_PAGE_NUM; + uint32 write_pos; + LWLock *lock = is_first ? dw_single_cxt->flush_lock : dw_single_cxt->second_flush_lock; + + pg_memory_barrier(); + write_pos = is_first ? pg_atomic_read_u32(&dw_single_cxt->write_pos) : + pg_atomic_read_u32(&dw_single_cxt->second_write_pos); + + while (true) { + if ((write_pos + 1 >= page_num)) { + (void)LWLockAcquire(lock, LW_EXCLUSIVE); + dw_single_file_recycle(is_first); + LWLockRelease(lock); + write_pos = is_first ? pg_atomic_read_u32(&dw_single_cxt->write_pos) : + pg_atomic_read_u32(&dw_single_cxt->second_write_pos); + /* fetch write_pos, we need to check write_pos + 1 again */ + continue; + } + + if (is_first) { + if (pg_atomic_compare_exchange_u32(&dw_single_cxt->write_pos, &write_pos, write_pos + 1)) { + return write_pos; + } + } else { + if (pg_atomic_compare_exchange_u32(&dw_single_cxt->second_write_pos, &write_pos, write_pos + 1)) { + return write_pos; + } + } + } + + return write_pos; +} + +static uint32 dw_recover_file_head(knl_g_dw_context *cxt, bool single, bool first) +{ + uint32 i; + uint16 id; + errno_t rc; + int64 file_size; + dw_file_head_t *curr_head = NULL; + dw_file_head_t *working_head = NULL; + char *file_head = (char *)cxt->file_head; + uint32 dw_version = 0; + uint64 head_offset = 0; + int64 offset; + + if (single && !first) { + file_head = (char *)cxt->second_file_head; + head_offset = (1 + DW_FIRST_DATA_PAGE_NUM) * BLCKSZ; + } + dw_pread_file(cxt->fd, file_head, BLCKSZ, head_offset); + + for (i = 0; i < DW_FILE_HEAD_ID_NUM; i++) { + id = g_dw_file_head_ids[i]; + curr_head = (dw_file_head_t *)(file_head + sizeof(dw_file_head_t) * id); + if (dw_verify_file_head(curr_head)) { + working_head = curr_head; + break; + } + } + + if (working_head == NULL) { + ereport(FATAL, (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Single file header is broken"))); + /* we should not get here, since FATAL will do abort. But for ut, return is needed */ + return dw_version; + } + + ereport(LOG, (errmodule(MOD_DW), errmsg("Found a valid single file header: id %hu, file_head[dwn %hu, start %hu]", + id, working_head->head.dwn, working_head->start))); + + for (i = 0; i < DW_FILE_HEAD_ID_NUM; i++) { + id = g_dw_file_head_ids[i]; + curr_head = (dw_file_head_t *)(file_head + sizeof(dw_file_head_t) * id); + if (curr_head != working_head) { + rc = memcpy_s(curr_head, sizeof(dw_file_head_t), working_head, sizeof(dw_file_head_t)); + securec_check(rc, "\0", "\0"); + } + } + + offset = dw_seek_file(cxt->fd, 0, SEEK_END); + if (single) { + dw_version = ((dw_file_head_t *)file_head)->dw_version; + file_size = (dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH ? DW_NEW_SINGLE_FILE_SIZE : DW_SINGLE_FILE_SIZE); + } else { + file_size = DW_FILE_SIZE; + } + + if (offset != file_size) { + ereport(PANIC, (errmodule(MOD_DW), + errmsg("DW check file size failed, expected_size %ld, actual_size %ld", DW_FILE_SIZE, offset))); + } + + dw_pwrite_file(cxt->fd, file_head, BLCKSZ, head_offset, single ? SINGLE_DW_FILE_NAME : OLD_DW_FILE_NAME); + return dw_version; +} + +void dw_cxt_init_single() +{ + char *buf = NULL; + knl_g_dw_context *single_cxt = &g_instance.dw_single_cxt; + uint32 dw_version = 0; + uint16 data_page_num = 0; + uint64 second_start_offset = 0; + + Assert(single_cxt->flush_lock == NULL); + single_cxt->flush_lock = LWLockAssign(LWTRANCHE_DW_SINGLE_FIRST); + single_cxt->second_flush_lock = LWLockAssign(LWTRANCHE_DW_SINGLE_SECOND); + single_cxt->second_buftag_lock = LWLockAssign(LWTRANCHE_DW_SINGLE_SECOND_BUFTAG); + + single_cxt->fd = open(SINGLE_DW_FILE_NAME, DW_FILE_FLAG, DW_FILE_PERM); + if (single_cxt->fd == -1) { + ereport(PANIC, + (errcode_for_file_access(), errmodule(MOD_DW), errmsg("Could not open file \"%s\"", SINGLE_DW_FILE_NAME))); + } + + data_page_num = DW_FIRST_DATA_PAGE_NUM + DW_SECOND_DATA_PAGE_NUM; + + /* two file head plus one for alignment */ + single_cxt->unaligned_buf = (char *)palloc0((DW_SECOND_BUFTAG_PAGE_NUM + 1 + 1 + 1) * BLCKSZ); + buf = (char *)TYPEALIGN(BLCKSZ, single_cxt->unaligned_buf); + single_cxt->file_head = (dw_file_head_t *)buf; + buf += BLCKSZ; + single_cxt->second_file_head = (dw_file_head_t *)buf; + buf += BLCKSZ; + single_cxt->buf = buf; + single_cxt->single_flush_state = (bool*)palloc0(sizeof(bool) * data_page_num); + + dw_version = dw_recover_file_head(single_cxt, true, true); + if (dw_version == DW_SUPPORT_NEW_SINGLE_FLUSH) { + dw_pread_file(single_cxt->fd, single_cxt->file_head, BLCKSZ, 0); + second_start_offset = (1 + DW_FIRST_DATA_PAGE_NUM) * BLCKSZ; + dw_recover_file_head(single_cxt, true, false); + dw_pread_file(single_cxt->fd, single_cxt->second_file_head, BLCKSZ, second_start_offset); + } else { + Assert(dw_version == 0); + /* one file head plus one for alignment */ + single_cxt->recovery_buf.unaligned_buf = + (char *)palloc0((DW_SINGLE_DIRTY_PAGE_NUM / SINGLE_BLOCK_TAG_NUM + 1 + 1) * BLCKSZ); + buf = (char *)TYPEALIGN(BLCKSZ, single_cxt->recovery_buf.unaligned_buf); + single_cxt->recovery_buf.file_head = (dw_file_head_t *)buf; + buf += BLCKSZ; + single_cxt->recovery_buf.buf = buf; + dw_pread_file(single_cxt->fd, single_cxt->recovery_buf.file_head, BLCKSZ, 0); + single_cxt->recovery_buf.single_flush_state = + (bool*)palloc0(sizeof(bool) * DW_SINGLE_DIRTY_PAGE_NUM); + single_cxt->recovery_buf.write_pos = 0; + } + + pg_atomic_write_u32(&g_instance.dw_single_cxt.dw_version, dw_version); + single_cxt->closed = 0; + single_cxt->write_pos = 0; + single_cxt->second_write_pos = 0; + single_cxt->flush_page = 0; +} \ No newline at end of file diff --git a/src/gausskernel/storage/access/transam/transam.cpp b/src/gausskernel/storage/access/transam/transam.cpp index 936a5e9cb..d68a0d04f 100644 --- a/src/gausskernel/storage/access/transam/transam.cpp +++ b/src/gausskernel/storage/access/transam/transam.cpp @@ -25,11 +25,14 @@ #include "access/gtm.h" #include "access/subtrans.h" #include "access/transam.h" +#include "access/slru.h" #include "miscadmin.h" #include "pgxc/pgxc.h" #include "storage/procarray.h" #include "utils/guc.h" #include "utils/snapmgr.h" +#include "replication/walreceiver.h" +#include "storage/procarray.h" #ifdef PGXC #include "utils/builtins.h" @@ -102,12 +105,16 @@ RETRY: } else { xid = recentGlobalXmin; } + } else if (snapshot != NULL && snapshot->satisfies == SNAPSHOT_DECODE_MVCC) { + xid = GetReplicationSlotCatalogXmin(); + } else if (snapshot != NULL && IsMVCCSnapshot(snapshot)) { + xid = snapshot->xmin; } else { xid = u_sess->utils_cxt.RecentXmin; } Assert(TransactionIdIsValid(xid)); - if ((snapshot == NULL || !IsVersionMVCCSnapshot(snapshot)) && TransactionIdPrecedes(transactionId, xid)) { + if ((!IS_DISASTER_RECOVER_MODE) && (snapshot == NULL || !IsVersionMVCCSnapshot(snapshot)) && TransactionIdPrecedes(transactionId, xid)) { if (isCommit) { result = COMMITSEQNO_FROZEN; } else { @@ -131,7 +138,15 @@ RETRY: } PG_CATCH(); { - if (GTM_LITE_MODE && retry_times == 0) { + if ((IS_CN_DISASTER_RECOVER_MODE || IS_DISASTER_RECOVER_MODE) && + t_thrd.xact_cxt.slru_errcause == SLRU_OPEN_FAILED) + { + t_thrd.int_cxt.InterruptHoldoffCount = saveInterruptHoldoffCount; + FlushErrorState(); + ereport(LOG, (errmsg("TransactionIdGetCommitSeqNo: " + "Treat CSN as frozen when csnlog file cannot be found for the given xid: %lu", transactionId))); + result = COMMITSEQNO_FROZEN; + } else if (GTM_LITE_MODE && retry_times == 0) { t_thrd.int_cxt.InterruptHoldoffCount = saveInterruptHoldoffCount; FlushErrorState(); ereport(LOG, (errmsg("recentGlobalXmin has been updated, csn log may be truncated, try clog, xid" @@ -184,16 +199,6 @@ static CommitSeqNo TransactionIdGetCommitSeqNoForGSClean(TransactionId transacti if (!TransactionIdIsValid(xid)) { /* Fetch the newest global xmin from gtm and use it. */ TransactionId gtm_gloabl_xmin = InvalidTransactionId; - if (GTM_MODE) { - gtm_gloabl_xmin = GetGTMGlobalXmin(); - ereport(LOG, (errmsg("For gs_clean, fetch the recent global xmin %lu if" - "it is not fetched before.", - gtm_gloabl_xmin))); - TransactionId local_xmin = t_thrd.xact_cxt.ShmemVariableCache->recentLocalXmin; - if (TransactionIdPrecedes(local_xmin, gtm_gloabl_xmin)) { - gtm_gloabl_xmin = local_xmin; - } - } if (TransactionIdIsValid(gtm_gloabl_xmin)) { (void)pg_atomic_compare_exchange_u64(&t_thrd.xact_cxt.ShmemVariableCache->recentGlobalXmin, &xid, @@ -206,8 +211,6 @@ static CommitSeqNo TransactionIdGetCommitSeqNoForGSClean(TransactionId transacti */ xid = t_thrd.xact_cxt.ShmemVariableCache->recentLocalXmin; ereport(DEBUG1, (errmsg("For gs_clean, fetch the recent global xmin %lu from recent local xmin.", xid))); - if (GTM_MODE) - miss_global_xmin = true; } } @@ -411,7 +414,7 @@ bool UHeapTransactionIdDidCommit(TransactionId transactionId) return true; } if (TransactionIdIsNormal(transactionId) && - TransactionIdPrecedes(transactionId, pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo))) { + TransactionIdPrecedes(transactionId, pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo))) { return true; } return TransactionIdDidCommit(transactionId); @@ -478,7 +481,7 @@ bool UHeapTransactionIdDidAbort(TransactionId transactionId) if (transactionId == FrozenTransactionId) { return false; } - TransactionId oldestXidInUndo = pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo); + TransactionId oldestXidInUndo = pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo); if (TransactionIdIsNormal(transactionId) && TransactionIdPrecedes(transactionId, oldestXidInUndo)) { /* The transaction must be committed or rollback finish, not abort. */ ereport(PANIC, (errmsg("The transaction cannot rollback, transactionId = %lu, oldestXidInUndo = %lu.", @@ -667,24 +670,6 @@ XLogRecPtr TransactionIdGetCommitLSN(TransactionId xid) return result; } -/* - * TransactionIdLogicallyPrecedes --- is id1 logically < id2? - */ -bool TransactionIdLogicallyPrecedes(TransactionId id1, TransactionId id2) -{ - /* - * If either ID is a permanent XID then we can just do unsigned - * comparison. If both are normal, do a modulo-2^31 comparison. - */ - int32 diff; - - if (!TransactionIdIsNormal(id1) || !TransactionIdIsNormal(id2)) - return (id1 < id2); - - diff = (int32)(id1 - id2); - return (diff < 0); -} - /* * Returns the status of the tranaction. * diff --git a/src/gausskernel/storage/access/transam/twophase.cpp b/src/gausskernel/storage/access/transam/twophase.cpp index 477d53d7a..5463b3cef 100644 --- a/src/gausskernel/storage/access/transam/twophase.cpp +++ b/src/gausskernel/storage/access/transam/twophase.cpp @@ -84,6 +84,7 @@ #include "access/clog.h" #include "access/csnlog.h" #include "access/htup.h" +#include "access/slru.h" #include "access/subtrans.h" #include "access/transam.h" #include "access/twophase.h" @@ -361,16 +362,6 @@ static int IsPrepXidValid(ValidPrepXid prepXid, TransactionId xid) return -1; } -bool IsTransactionIdMarkedPrepared(TransactionId xid) -{ - /* Possible race condition here with SetNextPrepXid */ - TwoPhaseStateData *currentStatePtr = TwoPhaseState(xid); - ValidPrepXid prepXid = GetCurrPrepXid(currentStatePtr); - bool result = IsPrepXidValid(prepXid, xid) != -1; - ReleasePrepXid(prepXid); - return result; -} - /* if the prepared xid list is overflow, we need copy the list from * two-phase state struct */ @@ -731,6 +722,7 @@ static void MarkAsPreparingGuts(GTM_TransactionHandle handle, GlobalTransaction pgxact->xid = xid; pgxact->xmin = InvalidTransactionId; pgxact->csn_min = InvalidCommitSeqNo; + pgxact->csn_dr = InvalidCommitSeqNo; pgxact->delayChkpt = false; pgxact->vacuumFlags = 0; proc->pid = 0; @@ -2558,52 +2550,9 @@ void FinishPreparedTransaction(const char *gid, bool isCommit) ColFileNodeRel *colFileNodeRel = commitrels + i; ColFileNodeCopy(&colFileNode, colFileNodeRel); - - if (IsValidPaxDfsForkNum(colFileNode.forknum)) { - /* drop dfs table */ - if (IS_PGXC_COORDINATOR) { - if (isCommit && !IsConnFromCoord()) { - DropDfsDirectory(&colFileNode, false); - } - } else { - /* DN */ - DfsInsert::InvalidSpaceAllocCache(colFileNode.filenode.relNode); - - /* read dfs file list to get each file size */ - List *dfsfilelist = NIL; - ReadDfsFilelist(colFileNode.filenode, colFileNode.ownerid, &dfsfilelist); - - /* calculate file size */ - uint64 size = GetDfsDelFileSize(dfsfilelist, isCommit); - - /* decrease the permanent space on users' record */ - perm_space_decrease(colFileNode.ownerid, size, - find_tmptable_cache_key(colFileNode.filenode.relNode) ? SP_TEMP : SP_PERM); - - /* free list */ - list_free_deep(dfsfilelist); - dfsfilelist = NIL; - - /* drop delete file list */ - DropDfsFilelist(colFileNode.filenode); - } - - /* whatever commit or abort, it's always necessary to drop the mapper files. */ - DropMapperFile(colFileNode.filenode); - } - - if (IsTruncateDfsForkNum(colFileNode.forknum)) { - /* truncate dfs table */ - if (!IS_PGXC_COORDINATOR) { - if (isCommit) { - ClearDfsDirectory(&colFileNode, false); - } - - /* whatever commit or abort, it's always necessary to drop the mapper files. */ - DropMapperFile(colFileNode.filenode); - DropDfsFilelist(colFileNode.filenode); - } - } + /* dfs table is not supported */ + Assert(!IsValidPaxDfsForkNum(colFileNode.forknum)); + Assert(!IsTruncateDfsForkNum(colFileNode.forknum)); } /* second loop to handle abortrels */ for (i = 0; i < hdr->nabortrels; i++) { @@ -2611,20 +2560,7 @@ void FinishPreparedTransaction(const char *gid, bool isCommit) ColFileNodeRel *colFileNodeRel = abortrels + i; ColFileNodeCopy(&colFileNode, colFileNodeRel); - - if (IsValidPaxDfsForkNum(colFileNode.forknum)) { - /* create dfs table */ - if (IS_PGXC_COORDINATOR) { - if (!isCommit && !IsConnFromCoord()) { - DropDfsDirectory(&colFileNode, false); - } - } else { - DfsInsert::InvalidSpaceAllocCache(colFileNode.filenode.relNode); - } - - /* whatever commit or abort, it's always necessary to drop the mapper files. */ - DropMapperFile(colFileNode.filenode); - } + Assert(!IsValidPaxDfsForkNum(colFileNode.forknum)); } } PG_CATCH(); @@ -2829,6 +2765,38 @@ static void ProcessRecords(char *bufptr, TransactionId xid, const TwoPhaseCallba } } +void DeleteObsoleteTwoPhaseFile(int64 pageno) +{ + DIR *cldir = NULL; + struct dirent *clde = NULL; + int i; + int64 cutoffPage = pageno; + cutoffPage -= cutoffPage % SLRU_PAGES_PER_SEGMENT; + TransactionId cutoffXid = (TransactionId)CLOG_XACTS_PER_PAGE * cutoffPage; + cldir = AllocateDir(TWOPHASE_DIR); + for (i = 0; i < NUM_TWOPHASE_PARTITIONS; i++) { + TWOPAHSE_LWLOCK_ACQUIRE(i, LW_EXCLUSIVE); + } + while ((clde = ReadDir(cldir, TWOPHASE_DIR)) != NULL) { + if (strlen(clde->d_name) == 16 && strspn(clde->d_name, "0123456789ABCDEF") == 16) { + TransactionId xid = (TransactionId)pg_strtouint64(clde->d_name, NULL, 16); + if (xid < cutoffXid) { +#ifdef USE_ASSERT_CHECKING + int elevel = PANIC; +#else + int elevel = WARNING; +#endif + ereport(elevel, (errmsg("twophase file %lu is older than clog truncate xid :%lu", xid, cutoffXid))); + RemoveTwoPhaseFile(xid, true); + } + } + } + for (i = 0; i < NUM_TWOPHASE_PARTITIONS; i++) { + TWOPAHSE_LWLOCK_RELEASE(i); + } + FreeDir(cldir); +} + /* * Remove the 2PC file for the specified XID. * diff --git a/src/gausskernel/storage/access/transam/varsup.cpp b/src/gausskernel/storage/access/transam/varsup.cpp index ab46e72bf..fce31df80 100644 --- a/src/gausskernel/storage/access/transam/varsup.cpp +++ b/src/gausskernel/storage/access/transam/varsup.cpp @@ -51,16 +51,6 @@ void SetNextTransactionId(TransactionId xid, bool updateLatestCompletedXid) ereport(LOG, (errmodule(MOD_TRANS_XACT), errmsg("[from \"g\" msg]setting xid = " XID_FMT ", old_value = " XID_FMT, xid, t_thrd.xact_cxt.next_xid))); - /* cannot update latesetCompletedXid when we are gtm free mode */ - if (GTM_MODE && updateLatestCompletedXid && TransactionIdIsNormal(t_thrd.xact_cxt.next_xid)) { - if (TransactionIdPrecedes(t_thrd.xact_cxt.ShmemVariableCache->latestCompletedXid, t_thrd.xact_cxt.next_xid)) { - (void)LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); - /* Advance global latestCompletedXid if CN pass down XID */ - if (TransactionIdPrecedes(t_thrd.xact_cxt.ShmemVariableCache->latestCompletedXid, t_thrd.xact_cxt.next_xid)) - t_thrd.xact_cxt.ShmemVariableCache->latestCompletedXid = t_thrd.xact_cxt.next_xid; - LWLockRelease(ProcArrayLock); - } - } t_thrd.xact_cxt.next_xid = xid; @@ -113,9 +103,6 @@ TransactionId GetNewTransactionId(bool isSubXact) * during PG_TRY/PG_CATCH/PG_END_TRY */ volatile TransactionId xid; -#ifdef PGXC - bool increment_xid = true; -#endif /* * During bootstrap initialization, we return the special bootstrap @@ -131,144 +118,15 @@ TransactionId GetNewTransactionId(bool isSubXact) if (RecoveryInProgress()) ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_INITIATION), errmsg("cannot assign TransactionIds during recovery"))); - - if (GTM_MODE) { -#ifdef PGXC - /* Initialize transaction ID */ - xid = InvalidTransactionId; - - if ((IS_PGXC_COORDINATOR && !IsConnFromCoord()) || IsPGXCNodeXactDatanodeDirect()) { - /* - * Get XID from GTM before acquiring the lock as concurrent connections are - * being handled on GTM side even if the lock is acquired in a different - * order. - */ - PG_TRY(); - { - if (IsAutoVacuumWorkerProcess() && (t_thrd.pgxact->vacuumFlags & PROC_IN_VACUUM) && - (GetTransactionHandleIfAny(s) == InvalidTransactionHandle)) - xid = (TransactionId)BeginTranAutovacuumGTM(); - else - xid = (TransactionId)GetNewGxidGTM(s, isSubXact); - } - PG_CATCH(); - { - PG_RE_THROW(); - } - PG_END_TRY(); - } -#endif - /* acquire lock after receiving gxid from gtm */ - (void)LWLockAcquire(XidGenLock, LW_EXCLUSIVE); -#ifdef PGXC - /* Only remote Coordinator or a Datanode accessed directly by an application can get a GXID */ - if ((IS_PGXC_COORDINATOR && !IsConnFromCoord()) || IsPGXCNodeXactDatanodeDirect()) { - if (TransactionIdIsValid(xid)) { - /* Log some information about the new transaction ID obtained */ - if (IsAutoVacuumWorkerProcess() && (t_thrd.pgxact->vacuumFlags & PROC_IN_VACUUM)) - ereport(DEBUG1, (errmsg("Assigned new transaction ID from GTM for autovacuum = %lu", xid))); - else - ereport(DEBUG1, (errmsg("Assigned new transaction ID from GTM = %lu", xid))); - - if (!TransactionIdFollowsOrEquals(xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - increment_xid = false; - ereport(DEBUG1, (errmsg("xid (%lu) was less than ShmemVariableCache->nextXid (%lu)", xid, - t_thrd.xact_cxt.ShmemVariableCache->nextXid))); - } else - t_thrd.xact_cxt.ShmemVariableCache->nextXid = xid; - } else { - if (IsInitdb || g_instance.status > NoShutdown) { - xid = t_thrd.xact_cxt.ShmemVariableCache->nextXid; - ereport(LOG, (errmsg("Shutdown or Initdb get local xid: %lu", - t_thrd.xact_cxt.ShmemVariableCache->nextXid))); - } else { - /* release lwlock before ereport. */ - LWLockRelease(XidGenLock); - ereport(ERROR, (errcode(ERRCODE_CONNECTION_EXCEPTION), - errmsg("Can not connect to gtm when getting gxid, there is a connection error."))); - } - } - } else if (IS_PGXC_DATANODE || IsConnFromCoord()) { - if (IsAutoVacuumWorkerProcess()) { - Assert(isSubXact == false); - /* - * For an autovacuum worker process, get transaction ID directly from GTM. - * If this vacuum process is a vacuum analyze, its GXID has to be excluded - * from snapshots so use a special function for this purpose. - * For a simple worker get transaction ID like a normal transaction would do. - */ - if ((t_thrd.pgxact->vacuumFlags & PROC_IN_VACUUM) && - (GetTransactionHandleIfAny(s) == InvalidTransactionHandle)) - t_thrd.xact_cxt.next_xid = (TransactionId)BeginTranAutovacuumGTM(); - else - t_thrd.xact_cxt.next_xid = (TransactionId)GetNewGxidGTM(s, isSubXact); - } else if (!IsInitdb && (GetForceXidFromGTM() || !TransactionIdIsValid(t_thrd.xact_cxt.next_xid))) { - /* - * In normal processing(not initdb), if CN does not push down `GXID', - * try and get gxid directly from GTM - */ - if (isSubXact) { - LWLockRelease(XidGenLock); - ereport(FATAL, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), - errmsg("GTM Mode: remote node sub xact can not get gxid directly from gtm"))); - } - - ereport(DEBUG1, (errmsg("Force get XID from GTM"))); - t_thrd.xact_cxt.next_xid = (TransactionId)GetNewGxidGTM(s, isSubXact); - if (!TransactionIdIsValid(t_thrd.xact_cxt.next_xid)) { - if (g_instance.status > NoShutdown) - ereport(LOG, (errmsg("Can not get a vaild gxid from GTM"))); - else { - LWLockRelease(XidGenLock); - ereport(ERROR, - (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("Can not get a vaild gxid from GTM"))); - } - } - } - - if (TransactionIdIsValid(t_thrd.xact_cxt.next_xid)) { - xid = t_thrd.xact_cxt.next_xid; - /* check the next_xid cn passed down in GTM Mode, it should be larger than parent xid */ - if (isSubXact && TransactionIdFollowsOrEquals(GetParentTransactionIdIfAny(s), xid)) { - LWLockRelease(XidGenLock); - ereport(FATAL, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), - errmsg("GTM Mode: remote node sub xact xid should be larger than parent xid."))); - } - ereport(DEBUG1, (errmsg("TransactionId = %lu", t_thrd.xact_cxt.next_xid))); - t_thrd.xact_cxt.next_xid = InvalidTransactionId; /* reset */ - if (!TransactionIdFollowsOrEquals(xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid)) { - /* This should be ok, due to concurrency from multiple coords - * passing down the xids. - * We later do not want to bother incrementing the value - * in shared memory though. - */ - increment_xid = false; - ereport(DEBUG1, (errmsg("xid (%lu) does not follow ShmemVariableCache->nextXid (%lu)", xid, - t_thrd.xact_cxt.ShmemVariableCache->nextXid))); - } else - t_thrd.xact_cxt.ShmemVariableCache->nextXid = xid; - } else { - /* Fallback to default */ - if (IsInitdb || (g_instance.status > NoShutdown) || useLocalXid) { - xid = t_thrd.xact_cxt.ShmemVariableCache->nextXid; - ereport(LOG, (errmsg("Falling back to local Xid. Was = %lu, now is = %lu", t_thrd.xact_cxt.next_xid, - t_thrd.xact_cxt.ShmemVariableCache->nextXid))); - } else { - LWLockRelease(XidGenLock); - ereport(ERROR, (errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION), - errmsg("Falling back to local Xid. Was = %lu, now is = %lu", - t_thrd.xact_cxt.next_xid, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); - } - } - } -#else - xid = t_thrd.xact_cxt.ShmemVariableCache->nextXid; -#endif /* PGXC */ - } else { - (void)LWLockAcquire(XidGenLock, LW_EXCLUSIVE); - xid = t_thrd.xact_cxt.ShmemVariableCache->nextXid; + /* we are about to start streaming switch over, stop new xid to stop any xlog insert. */ + if (t_thrd.xlog_cxt.LocalXLogInsertAllowed == 0 && g_instance.streaming_dr_cxt.isInSwitchover == true) { + ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_INITIATION), + errmsg("cannot assign TransactionIds during streaming disaster recovery"))); } + (void)LWLockAcquire(XidGenLock, LW_EXCLUSIVE); + xid = t_thrd.xact_cxt.ShmemVariableCache->nextXid; + /* * Check to see if it's safe to assign another XID. * If we're past xidVacLimit, start trying to force autovacuum cycles. @@ -292,19 +150,8 @@ TransactionId GetNewTransactionId(bool isSubXact) /* Re-acquire lock and start over */ (void)LWLockAcquire(XidGenLock, LW_EXCLUSIVE); - /* Make the logical same to PG when GTM-Free */ - if (!GTM_MODE) - xid = t_thrd.xact_cxt.ShmemVariableCache->nextXid; -#ifndef PGXC - /* - * In the case of Postgres-XC, transaction ID is managed globally at GTM level, - * so updating the GXID here based on the cache that might have been changed - * by another session when checking for wraparound errors at this local node - * level breaks transaction ID consistency of cluster. - */ xid = t_thrd.xact_cxt.ShmemVariableCache->nextXid; -#endif } /* @@ -319,29 +166,7 @@ TransactionId GetNewTransactionId(bool isSubXact) ExtendCLOG(xid); ExtendCSNLOG(xid); - if (GTM_MODE) { - /* - * Now advance the nextXid counter. This must not happen until after we - * have successfully completed ExtendCLOG() --- if that routine fails, we - * want the next incoming transaction to try it again. We cannot assign - * more XIDs until there is CLOG space for them. - */ -#ifdef PGXC - /* - * But first bring nextXid in sync with global xid. Actually we get xid - * externally anyway, so it should not be needed to update nextXid in - * theory, but it is required to keep nextXid close to the gxid - * especially when vacuumfreeze is run using a standalone backend. - */ - if (increment_xid || !IsPostmasterEnvironment) { - t_thrd.xact_cxt.ShmemVariableCache->nextXid = xid; - TransactionIdAdvance(t_thrd.xact_cxt.ShmemVariableCache->nextXid); - } -#else - TransactionIdAdvance(t_thrd.xact_cxt.ShmemVariableCache->nextXid); -#endif - } else - TransactionIdAdvance(t_thrd.xact_cxt.ShmemVariableCache->nextXid); + TransactionIdAdvance(t_thrd.xact_cxt.ShmemVariableCache->nextXid); /* * We must store the new XID into the shared ProcArray before releasing @@ -386,8 +211,6 @@ TransactionId GetNewTransactionId(bool isSubXact) volatile PGXACT *mypgxact = t_thrd.pgxact; if (!isSubXact) { - if (GTM_MODE) - mypgxact->handle = GetTransactionHandleIfAny(s); mypgxact->xid = xid; } else { int nxids = mypgxact->nxids; @@ -419,7 +242,7 @@ TransactionId GetNewTransactionId(bool isSubXact) } } /* when we use local snapshot, latestcomplete xid is very important for us. CHECK this here */ - if (!GTM_MODE && TransactionIdFollowsOrEquals(t_thrd.xact_cxt.ShmemVariableCache->latestCompletedXid, xid)) + if (TransactionIdFollowsOrEquals(t_thrd.xact_cxt.ShmemVariableCache->latestCompletedXid, xid)) ereport(PANIC, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), errmsg("GTM-FREE-MODE: latestCompletedXid %lu larger than next alloc xid %lu.", t_thrd.xact_cxt.ShmemVariableCache->latestCompletedXid, xid))); diff --git a/src/gausskernel/storage/access/transam/xact.cpp b/src/gausskernel/storage/access/transam/xact.cpp index 183d648e1..205e035f5 100755 --- a/src/gausskernel/storage/access/transam/xact.cpp +++ b/src/gausskernel/storage/access/transam/xact.cpp @@ -57,6 +57,7 @@ #include "commands/tablespace.h" #include "commands/trigger.h" #include "commands/sequence.h" +#include "commands/verify.h" #include "catalog/pg_hashbucket_fn.h" #include "distributelayer/streamCore.h" #include "catalog/storage_xlog.h" @@ -103,6 +104,7 @@ #include "access/ustore/undo/knl_uundozone.h" #include "commands/sequence.h" #include "postmaster/bgworker.h" +#include "replication/walreceiver.h" #ifdef ENABLE_MULTIPLE_NODES #include "tsdb/cache/queryid_cachemgr.h" #include "tsdb/cache/part_cachemgr.h" @@ -121,6 +123,7 @@ extern void uuid_struct_destroy_function(); THR_LOCAL bool CancelStmtForReadOnly = false; /* just need cancel stmt once when DefaultXactReadOnly=true */ THR_LOCAL bool TwoPhaseCommit = false; + extern bool is_user_name_changed(); extern void HDFSAbortCacheBlock(); extern THR_LOCAL Oid lastUDFOid; @@ -330,8 +333,6 @@ static void AtStart_Memory(void); static void AtStart_ResourceOwner(void); static void CallSubXactCallbacks(SubXactEvent event, SubTransactionId mySubid, SubTransactionId parentSubid); #ifdef PGXC -static void CleanGTMCallbacks(void); -static void CallGTMCallbacks(GTMEvent event); static void CleanSequenceCallbacks(void); static void CallSequenceCallbacks(GTMEvent event); static void DeleteSavepoint(DList **dlist, DListCell *cell); @@ -490,6 +491,29 @@ void InitCurrentTransactionState(void) CurrentTransactionState = &TopTransactionStateData; } +/* ---------------------------------------------------------------- + * Get transaction list + * ---------------------------------------------------------------- + */ +List* GetTransactionList(List *head) +{ + TransactionState s = GetCurrentTransactionState(); + MemoryContext savedCxt = MemoryContextSwitchTo(t_thrd.mem_cxt.portal_mem_cxt); + int level = 0; + for (; s != NULL; s = s->parent) { + if (OidIsValid(s->prevUser)) { + transactionNode* node = (transactionNode *)palloc0(sizeof(transactionNode)); + node->level = level; + node->userId = s->prevUser; + node->secContext = s->prevSecContext; + head = lappend(head, node); + level++; + } + } + MemoryContextSwitchTo(savedCxt); + return head; +} + /* ---------------------------------------------------------------- * transaction state accessors * ---------------------------------------------------------------- @@ -666,45 +690,6 @@ TransactionId GetCurrentTransactionId(void) return s->transactionId; } -/* - * This will return a Top Transaction Id From the gid string, - * used for gid checking - */ -TransactionId GetTransactionIdFromGidStr(char *gid) -{ -#ifndef ENABLE_MULTIPLE_NODES - DISTRIBUTED_FEATURE_NOT_SUPPORTED(); - return InvalidTransactionId; -#else - TransactionId topxid = InvalidTransactionId; - char *substr = NULL; - int rc = -1; - - /* gid must start with 'T' */ - if ((gid == NULL) || (*gid != 'T')) { - ereport(WARNING, (errmsg("gid is invalid when trying to get xid from it"))); - return InvalidTransactionId; - } - - /* xid can be splited from gid by '_' */ - substr = strchr(gid + 1, '_'); - if (substr == NULL) - return InvalidTransactionId; - - int substrlen = substr - (gid + 1); - if (substrlen <= 0 || substrlen >= 63) - return InvalidTransactionId; - - char strxid[64] = ""; - rc = strncpy_s(strxid, sizeof(strxid), gid + 1, substrlen); - securec_check(rc, "", ""); - - topxid = (TransactionId)atol(strxid); - - return TransactionIdIsValid(topxid) ? topxid : InvalidTransactionId; -#endif -} - /* * Stream threads need not commit transaction at all. * Only parent thread allow to commit transacton, So only stream thread will @@ -739,20 +724,6 @@ TransactionId GetCurrentTransactionIdIfAny(void) return CurrentTransactionState->transactionId; } -GTM_TransactionKey GetCurrentTransactionKey(void) -{ - TransactionState s = CurrentTransactionState; - - if (!GlobalTransactionHandleIsValid(s->txnKey.txnHandle)) - s->txnKey = BeginTranGTM(NULL); - return s->txnKey; -} - -GTM_TransactionKey GetCurrentTransactionKeyIfAny(void) -{ - return CurrentTransactionState->txnKey; -} - GTM_TransactionHandle GetTransactionHandleIfAny(TransactionState s) { return s->txnKey.txnHandle; @@ -763,12 +734,6 @@ GTM_TransactionHandle GetCurrentTransactionHandleIfAny(void) return CurrentTransactionState->txnKey.txnHandle; } -GTM_Timeline GetCurrentTransactionTimeline(void) -{ - Assert(GlobalTransactionTimelineIsValid(CurrentTransactionState->txnKey.txnTimeline)); - return CurrentTransactionState->txnKey.txnTimeline; -} - /* * GetStableLatestTransactionId * @@ -824,43 +789,6 @@ void SetCurrentLocalParamStatus(bool status) } #endif -#ifndef ENABLE_MULTIPLE_NODES -TransactionId GetNewGxidGTM(TransactionState s, bool is_sub_xact) -{ - DISTRIBUTED_FEATURE_NOT_SUPPORTED(); - return InvalidTransactionId; -} -#else -static GTM_TransactionKey GetTransactionKey(TransactionState s) -{ - if (!GlobalTransactionHandleIsValid(s->txnKey.txnHandle)) - s->txnKey = BeginTranGTM(NULL); - return s->txnKey; -} - -TransactionId GetNewGxidGTM(TransactionState s, bool is_sub_xact) -{ - TransactionId xid = InvalidTransactionId; - GTM_TransactionKey key; - if (is_sub_xact) { - Assert(GlobalTransactionHandleIsValid(s->parent->txnKey.txnHandle)); - key = s->parent->txnKey; - } else { - key = GetTransactionKey(s); - } - - /* don't retry here, current handle cannot have valid transactionid */ - if (TransactionIdIsValid(s->transactionId)) - ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), - errmsg("current transaction with handle: (%d:%u) have a valid xid: %lu already", key.txnHandle, - key.txnTimeline, s->transactionId))); - - xid = (TransactionId)GetGxidGTM(key, is_sub_xact); - - return xid; -} -#endif - /* * MarkCurrentTransactionIdLoggedIfAny * @@ -969,7 +897,6 @@ static void AssignTransactionId(TransactionState s) ReportTopXid(s->transactionId); if (!isSubXact) instr_stmt_report_txid(s->transactionId); - FixCurrentSnapshotByGxid(s->transactionId); if (isSubXact) SubTransSetParent(s->transactionId, s->parent->transactionId); @@ -1245,11 +1172,6 @@ void SetCurrentStatementStartTimestamp(void) t_thrd.xact_cxt.stmtStartTimestamp = GetCurrentTimestamp(); } -void SetStatementStartTimestamp(TimestampTz timestamp) -{ - t_thrd.xact_cxt.stmtStartTimestamp = timestamp; -} - /* * SetCurrentTransactionStopTimestamp */ @@ -1478,10 +1400,6 @@ static void AtStart_Memory(void) t_thrd.xact_cxt.TransactionAbortContext = AllocSetContextCreate(t_thrd.top_mem_cxt, "TransactionAbortContext", 32 * 1024, 32 * 1024, 32 * 1024); -#ifndef ENABLE_PRIVATEGAUSS - /* Set global variable context_array to NIL at beginning of a transaction */ - u_sess->plsql_cxt.context_array = NIL; -#endif /* We shouldn't have a transaction context already. */ Assert(u_sess->top_transaction_mem_cxt == NULL); @@ -1510,6 +1428,8 @@ static void AtStart_ResourceOwner(void) /* We shouldn't have a transaction resource owner already. */ Assert(t_thrd.utils_cxt.TopTransactionResourceOwner == NULL); + Assert(CurrentResourceOwnerIsEmpty(t_thrd.utils_cxt.CurrentResourceOwner)); + Assert(!EnableLocalSysCache() || CurrentResourceOwnerIsEmpty(t_thrd.lsc_cxt.lsc->local_sysdb_resowner)); /* Create a toplevel resource owner for the transaction. */ s->curTransactionOwner = ResourceOwnerCreate(NULL, "TopTransaction", @@ -1588,6 +1508,27 @@ void UpdateNextMaxKnownCSN(CommitSeqNo csn) goto loop; } } +void XLogInsertStandbyCSNCommitting(TransactionId xid, CommitSeqNo csn, TransactionId *children, uint64 nchildren) +{ +#ifdef ENABLE_MULTIPLE_NODES + if (t_thrd.proc->workingVersionNum < DISASTER_READ_VERSION_NUM || IS_PGXC_COORDINATOR) { + return; + } +#endif + + if (!XLogStandbyInfoActive()) { + return; + } + XLogBeginInsert(); + XLogRegisterData((char *) (&xid), sizeof(TransactionId)); + XLogRegisterData((char *) (&csn), sizeof(CommitSeqNo)); + uint64 childrenxidnum = nchildren; + XLogRegisterData((char *) (&childrenxidnum), sizeof(uint64)); + if (childrenxidnum > 0) { + XLogRegisterData((char *)children, nchildren * sizeof(TransactionId)); + } + XLogInsert(RM_STANDBY_ID, XLOG_STANDBY_CSN_COMMITTING); +} /* ---------------------------------------------------------------- * CommitTransaction stuff @@ -1626,12 +1567,8 @@ static TransactionId RecordTransactionCommit(void) if (XLogStandbyInfoActive()) nmsgs = xactGetCommittedInvalidationMessages(&invalMessages, &RelcacheInitFileInval); nlibrary = libraryGetPendingDeletes(true, &library_name, &library_length); - wrote_xlog = (t_thrd.xlog_cxt.XactLastRecEnd != 0); - /* - * if g_instance.attr.attr_storage.enable_gtm_free is on, cn must flush xlog, gs_clean status depend on it. - */ - if (isExecCN && !GTM_MODE && u_sess->xact_cxt.savePrepareGID) - wrote_xlog = true; + /* cn must flush xlog, gs_clean status depend on it. */ + wrote_xlog = (t_thrd.xlog_cxt.XactLastRecEnd != 0) || (isExecCN && u_sess->xact_cxt.savePrepareGID); /* * If we haven't been assigned an XID yet, we neither can, nor do we want @@ -1694,45 +1631,59 @@ static TransactionId RecordTransactionCommit(void) */ BufmgrCommit(); - if (useLocalXid || !IsPostmasterEnvironment || GTM_FREE_MODE) { - - -#ifndef ENABLE_MULTIPLE_NODES - /* For hot standby, set csn to commit in progress */ - CommitSeqNo csn = SetXact2CommitInProgress(xid, 0); - if (XLogStandbyInfoActive()) { - XLogBeginInsert(); - XLogRegisterData((char *) (&xid), sizeof(TransactionId)); - XLogRegisterData((char *) (&csn), sizeof(CommitSeqNo)); - uint64 childrenxidnum = nchildren; - XLogRegisterData((char *) (&childrenxidnum), sizeof(uint64)); - if (childrenxidnum > 0) { - XLogRegisterData((char *)children, nchildren * sizeof(TransactionId)); - } - XLogInsert(RM_STANDBY_ID, XLOG_STANDBY_CSN_COMMITTING); - } -#else /* - * set commit CSN and update global CSN in gtm free mode. + * Mark ourselves as within our "commit critical section". This + * forces any concurrent checkpoint to wait until we've updated + * pg_clog. Without this, it is possible for the checkpoint to set + * REDO after the XLOG record but fail to flush the pg_clog update to + * disk, leading to loss of the transaction commit if the system + * crashes a little later. + * + * Note: we could, but don't bother to, set this flag in + * RecordTransactionAbort. That's because loss of a transaction abort + * is noncritical; the presumption would be that it aborted, anyway. + * + * It's safe to change the delayChkpt flag of our own backend without + * holding the ProcArrayLock, since we're the only one modifying it. + * This makes checkpoint's determination of which xacts are delayChkpt a + * bit fuzzy, but it doesn't matter. */ - SetXact2CommitInProgress(xid, 0); + START_CRIT_SECTION(); + t_thrd.pgxact->delayChkpt = true; + if (useLocalXid || !IsPostmasterEnvironment || GTM_FREE_MODE) { +#ifndef ENABLE_MULTIPLE_NODES + /* For hot standby, set csn to commit in progress */ + CommitSeqNo csn = SetXact2CommitInProgress(xid, 0); + XLogInsertStandbyCSNCommitting(xid, csn, children, nchildren); +#else + /* set commit CSN and update global CSN in gtm free mode. */ + SetXact2CommitInProgress(xid, 0); #endif - setCommitCsn(getLocalNextCSN()); + setCommitCsn(getLocalNextCSN()); } else { /* for dn auto commit condition, get a new next csn from gtm. */ if (TransactionIdIsNormal(xid) && (!(useLocalXid || !IsPostmasterEnvironment || GTM_FREE_MODE || GetForceXidFromGTM())) && (GetCommitCsn() == 0)) { /* First set csn to commit in progress */ - SetXact2CommitInProgress(xid, 0); - - /* Then get a new csn from gtm */ - ereport(LOG, (errmsg("Set a new csn from gtm for auto commit transactions."))); - if (GTM_MODE) - setCommitCsn(GetCSNGTM()); - else + CommitSeqNo csn = SetXact2CommitInProgress(xid, 0); + XLogInsertStandbyCSNCommitting(xid, csn, children, nchildren); + END_CRIT_SECTION(); + PG_TRY(); + { + /* Then get a new csn from gtm */ + ereport(LOG, (errmsg("Set a new csn from gtm for auto commit transactions."))); setCommitCsn(CommitCSNGTM(true)); + } + PG_CATCH(); + { + /* be careful to deal with delay chkpt flag. */ + t_thrd.pgxact->delayChkpt = false; + PG_RE_THROW(); + } + PG_END_TRY(); + START_CRIT_SECTION(); } } @@ -1745,25 +1696,7 @@ static TransactionId RecordTransactionCommit(void) CallXactCallbacks(XACT_EVENT_RECORD_COMMIT); #endif - /* - * Mark ourselves as within our "commit critical section". This - * forces any concurrent checkpoint to wait until we've updated - * pg_clog. Without this, it is possible for the checkpoint to set - * REDO after the XLOG record but fail to flush the pg_clog update to - * disk, leading to loss of the transaction commit if the system - * crashes a little later. - * - * Note: we could, but don't bother to, set this flag in - * RecordTransactionAbort. That's because loss of a transaction abort - * is noncritical; the presumption would be that it aborted, anyway. - * - * It's safe to change the delayChkpt flag of our own backend without - * holding the ProcArrayLock, since we're the only one modifying it. - * This makes checkpoint's determination of which xacts are delayChkpt a - * bit fuzzy, but it doesn't matter. - */ - START_CRIT_SECTION(); - t_thrd.pgxact->delayChkpt = true; + UpdateNextMaxKnownCSN(GetCommitCsn()); SetCurrentTransactionStopTimestamp(); @@ -1787,6 +1720,7 @@ static TransactionId RecordTransactionCommit(void) if (nrels > 0 || nmsgs > 0 || RelcacheInitFileInval || t_thrd.xact_cxt.forceSyncCommit || XLogLogicalInfoActive() || hasOrigin) { xl_xact_commit xlrec; + xl_xact_origin origin; /* Set flags required for recovery processing of commits. */ xlrec.xinfo = 0; @@ -1846,7 +1780,6 @@ static TransactionId RecordTransactionCommit(void) } if (hasOrigin) { - xl_xact_origin origin; origin.origin_lsn = u_sess->reporigin_cxt.originLsn; origin.origin_timestamp = u_sess->reporigin_cxt.originTs; XLogRegisterData((char*)&origin, sizeof(xl_xact_origin)); @@ -2036,21 +1969,11 @@ static void AtCommit_Memory(void) t_thrd.xact_cxt.PGXCBucketCnt = 0; t_thrd.xact_cxt.PGXCGroupOid = InvalidOid; t_thrd.xact_cxt.PGXCNodeId = -1; + t_thrd.xact_cxt.ActiveLobRelid = InvalidOid; CStoreMemAlloc::Reset(); } -#ifdef PGXC -static void CleanGTMCallbacks(void) -{ - /* - * The transaction is done, u_sess->top_transaction_mem_cxt as well as the GTM callback items - * are already cleaned, so we need here only to reset the GTM callback pointer properly. - */ - t_thrd.xact_cxt.GTM_callbacks = NULL; -} -#endif - /* ---------------------------------------------------------------- * CommitSubTransaction stuff * ---------------------------------------------------------------- @@ -2310,7 +2233,9 @@ static TransactionId RecordTransactionAbort(bool isSubXact) */ if (!isSubXact) XLogSetAsyncXactLSN(t_thrd.xlog_cxt.XactLastRecEnd); - + if (nrels > 0) { + XLogWaitFlush(abortRecLSN); + } /* * Mark the transaction aborted in clog. This is not absolutely necessary * but we may as well do it while we are here; also, in the subxact case @@ -2431,9 +2356,6 @@ static void AtCleanup_Memory(void) */ if (u_sess->top_transaction_mem_cxt != NULL) MemoryContextDelete(u_sess->top_transaction_mem_cxt); -#ifndef ENABLE_PRIVATEGAUSS - u_sess->plsql_cxt.context_array = NIL; -#endif u_sess->top_transaction_mem_cxt = NULL; t_thrd.mem_cxt.cur_transaction_mem_cxt = NULL; @@ -2477,7 +2399,6 @@ static void StartTransaction(bool begin_on_gtm) { TransactionState s; VirtualTransactionId vxid; - GTM_Timestamp gtm_timestamp; gstrace_entry(GS_TRC_ID_StartTransaction); /* clean stream snapshot register info */ @@ -2524,6 +2445,10 @@ static void StartTransaction(bool begin_on_gtm) if (RecoveryInProgress()) { s->startedInRecovery = true; u_sess->attr.attr_common.XactReadOnly = true; + } else if (t_thrd.xlog_cxt.LocalXLogInsertAllowed == 0 && g_instance.streaming_dr_cxt.isInSwitchover == true) { + /* we are about to start streaming switch over, writting is forbidden. */ + s->startedInRecovery = false; + u_sess->attr.attr_common.XactReadOnly = true; } else { s->startedInRecovery = false; u_sess->attr.attr_common.XactReadOnly = u_sess->attr.attr_storage.DefaultXactReadOnly; @@ -2634,12 +2559,7 @@ static void StartTransaction(bool begin_on_gtm) bool update_xact_time = !GTM_FREE_MODE && !u_sess->attr.attr_common.xc_maintenance_mode && normal_working && IS_PGXC_COORDINATOR && !IsConnFromCoord(); if (update_xact_time) { - if (GTM_MODE) { - s->txnKey = BeginTranGTM(>m_timestamp); - t_thrd.xact_cxt.GTMxactStartTimestamp = (TimestampTz)gtm_timestamp; - } else { - t_thrd.xact_cxt.GTMxactStartTimestamp = t_thrd.xact_cxt.xactStartTimestamp; - } + t_thrd.xact_cxt.GTMxactStartTimestamp = t_thrd.xact_cxt.xactStartTimestamp; SetCurrentGTMDeltaTimestamp(); SetCurrentStmtTimestamp(); @@ -2724,7 +2644,7 @@ void ThreadLocalFlagCleanUp() * the default value. this var may be changed druing this transaction. */ t_thrd.storage_cxt.EnlargeDeadlockTimeout = false; - u_sess->inval_cxt.deepthInAcceptInvalidationMessage = 0; + ResetDeepthInAcceptInvalidationMessage(0); t_thrd.xact_cxt.handlesDestroyedInCancelQuery = false; u_sess->mb_cxt.insertValuesBind_compatible_illegal_chars = false; } @@ -2741,7 +2661,6 @@ static void CommitTransaction(bool STP_commit) TransactionState s = CurrentTransactionState; TransactionId latestXid; bool barrierLockHeld = false; - bool use_old_version_gid = GTM_MODE || (t_thrd.proc->workingVersionNum <= GTM_OLD_VERSION_NUM); #ifdef ENABLE_MULTIPLE_NODES checkAndDoUpdateSequence(); #endif @@ -2779,7 +2698,7 @@ static void CommitTransaction(bool STP_commit) * the default value. this var may be changed during this transaction. */ t_thrd.storage_cxt.EnlargeDeadlockTimeout = false; - u_sess->inval_cxt.deepthInAcceptInvalidationMessage = 0; + ResetDeepthInAcceptInvalidationMessage(0); t_thrd.xact_cxt.handlesDestroyedInCancelQuery = false; ThreadLocalFlagCleanUp(); @@ -2787,7 +2706,8 @@ static void CommitTransaction(bool STP_commit) /* release ref for spi's cachedplan */ ReleaseSpiPlanRef(); - XactResumeSPIContext(true); + /* reset store procedure transaction context */ + stp_reset_xact(); } /* When commit within nested store procedure, it will create a plan cache. @@ -2827,15 +2747,6 @@ static void CommitTransaction(bool STP_commit) if (IsOnCommitActions() || ExecIsTempObjectIncluded()) ExecSetTempObjectIncluded(); - /* - * save top-level transaction xid for commit . regardless whether 2pc is needed or not - */ - t_thrd.xact_cxt.XactXidStoreForCheck = GetTopTransactionIdIfAny(); - if (module_logging_is_on(MOD_TRANS_XACT)) { - ereport(LOG, (errmodule(MOD_TRANS_XACT), - errmsg("reserved xid for commit check is %lu", t_thrd.xact_cxt.XactXidStoreForCheck))); - } - /* * If the local node has done some write activity, prepare the local node * first. If that fails, the transaction is aborted on all the remote @@ -2844,13 +2755,8 @@ static void CommitTransaction(bool STP_commit) if (IsTwoPhaseCommitRequired(t_thrd.xact_cxt.XactWriteLocalNode)) { errno_t errorno = EOK; u_sess->xact_cxt.prepareGID = (char *)MemoryContextAlloc(u_sess->top_transaction_mem_cxt, MAX_GID_LENGTH); - if (use_old_version_gid) { - errorno = snprintf_s(u_sess->xact_cxt.prepareGID, MAX_GID_LENGTH, MAX_GID_LENGTH - 1, "T%lu_%s", - GetTopTransactionId(), g_instance.attr.attr_common.PGXCNodeName); - } else { - errorno = snprintf_s(u_sess->xact_cxt.prepareGID, MAX_GID_LENGTH, MAX_GID_LENGTH - 1, "N%lu_%s", - GetTopTransactionId(), g_instance.attr.attr_common.PGXCNodeName); - } + errorno = snprintf_s(u_sess->xact_cxt.prepareGID, MAX_GID_LENGTH, MAX_GID_LENGTH - 1, "N%lu_%s", + GetTopTransactionId(), g_instance.attr.attr_common.PGXCNodeName); securec_check_ss(errorno, "", ""); u_sess->xact_cxt.savePrepareGID = MemoryContextStrdup( @@ -3000,13 +2906,6 @@ static void CommitTransaction(bool STP_commit) * CurrentTransactionState */ s = CurrentTransactionState; - - /* - * Callback on GTM if necessary, this needs to be done before HOLD_INTERRUPTS - * as this is not a part of the end of transaction processing involving clean up. - */ - CallGTMCallbacks(GTM_EVENT_COMMIT); - } else { if (!GTM_FREE_MODE) { /* @@ -3176,6 +3075,7 @@ static void CommitTransaction(bool STP_commit) AtEOXact_DBCleanup(true); #endif + AtEOXact_SysDBCache(true); ResourceOwnerRelease(t_thrd.utils_cxt.TopTransactionResourceOwner, RESOURCE_RELEASE_BEFORE_LOCKS, true, true); /* Check we've released all buffer pins */ @@ -3258,7 +3158,6 @@ static void CommitTransaction(bool STP_commit) AtCommit_Memory(); #ifdef PGXC /* Clean up GTM callbacks at the end of transaction */ - CleanGTMCallbacks(); CleanSequenceCallbacks(); #endif @@ -3315,10 +3214,6 @@ static void CommitTransaction(bool STP_commit) TransStateAsString(s->state)))); } - /* clear the transaction distribute check xid when transaction commit */ - t_thrd.xact_cxt.XactXidStoreForCheck = InvalidTransactionId; - t_thrd.xact_cxt.reserved_nextxid_check = InvalidTransactionId; - RESUME_INTERRUPTS(); AtEOXact_Remote(); @@ -3335,92 +3230,6 @@ static void CommitTransaction(bool STP_commit) } #ifdef ENABLE_MULTIPLE_NODES -static int finish_txn_gtm(bool commit) -{ - TransactionState s = CurrentTransactionState; - bool is_sub_xact = (s->parent != NULL); - - int ret = 0; - GlobalTransactionId gxid = InvalidTransactionId; - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) { - if (commit) { - if (GlobalTransactionHandleIsValid(s->txnKey.txnHandle)) { - ret = CommitTranHandleGTM(s->txnKey, s->transactionId, gxid); - - s->txnKey.txnHandle = InvalidTransactionHandle; - s->txnKey.txnTimeline = InvalidTransactionTimeline; - } else if (GlobalTransactionIdIsValid(s->transactionId)) - ret = CommitTranGTM(s->transactionId, NULL, 0); - } else { - /* for sub transaction's abort, nothing need to do on gtm. */ - if (!is_sub_xact) { - if (GlobalTransactionHandleIsValid(s->txnKey.txnHandle)) { - if (!IsConnFromCoord() && IsNormalProcessingMode()) - ret = RollbackTranHandleGTM(s->txnKey, gxid); - - s->txnKey.txnHandle = InvalidTransactionHandle; - s->txnKey.txnTimeline = InvalidTransactionTimeline; - } else { - /* - * XXX Why don't we have a single API to abort both the GXIDs - * together ? - */ - if (GlobalTransactionIdIsValid(s->transactionId)) - ret = RollbackTranGTM(s->transactionId, NULL, 0); - } - } - } - } else if (IS_PGXC_DATANODE || IsConnFromCoord()) { - /* If we are autovacuum, commit on GTM */ - if ((IsAutoVacuumWorkerProcess() || GetForceXidFromGTM()) && IsGTMConnected()) { - if (commit) { - if (GlobalTransactionHandleIsValid(s->txnKey.txnHandle)) { - ret = CommitTranHandleGTM(s->txnKey, s->transactionId, gxid); - - s->txnKey.txnHandle = InvalidTransactionHandle; - s->txnKey.txnTimeline = InvalidTransactionTimeline; - } else if (GlobalTransactionIdIsValid(s->transactionId)) - ret = CommitTranGTM(s->transactionId, NULL, 0); - } else { - if (GlobalTransactionHandleIsValid(s->txnKey.txnHandle)) { - ret = RollbackTranHandleGTM(s->txnKey, gxid); - - s->txnKey.txnHandle = InvalidTransactionHandle; - s->txnKey.txnTimeline = InvalidTransactionTimeline; - } else if (GlobalTransactionIdIsValid(s->transactionId)) - ret = RollbackTranGTM(s->transactionId, NULL, 0); - } - } else if (GlobalTransactionIdIsValid(t_thrd.xact_cxt.currentGxid) || - GlobalTransactionHandleIsValid(s->txnKey.txnHandle)) { - if (commit) { - if (GlobalTransactionHandleIsValid(s->txnKey.txnHandle)) { - ret = CommitTranHandleGTM(s->txnKey, s->transactionId, gxid); - - s->txnKey.txnHandle = InvalidTransactionHandle; - s->txnKey.txnTimeline = InvalidTransactionTimeline; - } - - if (GlobalTransactionIdIsValid(t_thrd.xact_cxt.currentGxid)) - ret = CommitTranGTM(t_thrd.xact_cxt.currentGxid, NULL, 0); - } else { - if (GlobalTransactionHandleIsValid(s->txnKey.txnHandle)) { - ret = RollbackTranHandleGTM(s->txnKey, gxid); - - s->txnKey.txnHandle = InvalidTransactionHandle; - s->txnKey.txnTimeline = InvalidTransactionTimeline; - } - - if (GlobalTransactionIdIsValid(t_thrd.xact_cxt.currentGxid)) - ret = RollbackTranGTM(t_thrd.xact_cxt.currentGxid, NULL, 0); - } - } - } - - s->txnKey.txnHandle = InvalidTransactionHandle; - s->txnKey.txnTimeline = InvalidTransactionTimeline; - return ret; -} - static int finish_txn_gtm_lite(bool commit, bool is_write) { TransactionState s = CurrentTransactionState; @@ -3475,11 +3284,7 @@ bool AtEOXact_GlobalTxn(bool commit, bool is_write) return false; #else int ret = 0; - if (GTM_MODE) - ret = finish_txn_gtm(commit); - else - ret = finish_txn_gtm_lite(commit, is_write); - + ret = finish_txn_gtm_lite(commit, is_write); SetNextTransactionId(InvalidTransactionId, true); return (ret < 0) ? false : true; @@ -3529,17 +3334,6 @@ static void PrepareTransaction(bool STP_commit) Assert((!StreamThreadAmI() && s->parent == NULL) || StreamThreadAmI()); #ifdef PGXC - /* check if the gid belongs to current transaction (DN or other CN received) */ - if (GTM_MODE && (IS_PGXC_DATANODE || IsConnFromCoord())) { - TransactionId topTransactionId = GetTopTransactionIdIfAny(); - if (topTransactionId != GetTransactionIdFromGidStr(u_sess->xact_cxt.prepareGID)) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_TRANSACTION_STATE), - errmsg("Node %s: prepare gid is %s, and top xid is %lu, different transaction!", - g_instance.attr.attr_common.PGXCNodeName, u_sess->xact_cxt.prepareGID, topTransactionId))); - } - } - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) { if (u_sess->xact_cxt.savePrepareGID) { pfree(u_sess->xact_cxt.savePrepareGID); @@ -3581,12 +3375,6 @@ static void PrepareTransaction(bool STP_commit) } /* white box test end */ #endif - - /* - * Callback on GTM if necessary, this needs to be done before HOLD_INTERRUPTS - * as this is not a part of the end of transaction processing involving clean up. - */ - CallGTMCallbacks(GTM_EVENT_PREPARE); } #endif @@ -3734,6 +3522,7 @@ static void PrepareTransaction(bool STP_commit) * that cure could be worse than the disease. */ + AtEOXact_SysDBCache(true); ResourceOwnerRelease(t_thrd.utils_cxt.TopTransactionResourceOwner, RESOURCE_RELEASE_BEFORE_LOCKS, true, true); /* Check we've released all buffer pins */ @@ -3834,11 +3623,6 @@ static void PrepareTransaction(bool STP_commit) AtCommit_Memory(); -#ifdef PGXC - /* Clean up GTM callbacks */ - CleanGTMCallbacks(); -#endif - s->transactionId = InvalidTransactionId; s->subTransactionId = InvalidSubTransactionId; s->nestingLevel = 0; @@ -3965,20 +3749,7 @@ static void AbortTransaction(bool PerfectRollback, bool STP_rollback) /* Clean node group status cache */ CleanNodeGroupStatus(); - /* - * reserve the current top-level transaction id for - * pgxc_node_remote_abort() check - */ - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) { - if (!TransactionIdIsValid(t_thrd.xact_cxt.XactXidStoreForCheck)) - t_thrd.xact_cxt.XactXidStoreForCheck = GetTopTransactionIdIfAny(); - - if (module_logging_is_on(MOD_TRANS_XACT)) { - ereport(LOG, (errmodule(MOD_TRANS_XACT), - errmsg("reserved xid for abort check is %lu", t_thrd.xact_cxt.XactXidStoreForCheck))); - } - } - +#ifdef ENABLE_LLVM_COMPILE /* * @llvm * when the query is abnormal exited, the (GsCodeGen *)t_thrd.codegen_cxt.thr_codegen_obj->codeGenState @@ -3987,6 +3758,7 @@ static void AbortTransaction(bool PerfectRollback, bool STP_rollback) * function. */ CodeGenThreadTearDown(); +#endif CancelAutoAnalyze(); lightProxy::setCurrentProxy(NULL); @@ -4017,7 +3789,8 @@ static void AbortTransaction(bool PerfectRollback, bool STP_rollback) /* release ref for spi's cachedplan */ ReleaseSpiPlanRef(); - XactResumeSPIContext(true); + /* reset store procedure transaction context */ + stp_reset_xact(); } #ifdef PGXC /* @@ -4077,11 +3850,6 @@ static void AbortTransaction(bool PerfectRollback, bool STP_rollback) } if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) { - /* - * Callback on GTM if necessary, this needs to be done before HOLD_INTERRUPTS - * as this is not a part of the end of transaction procesing involving clean up. - */ - CallGTMCallbacks(GTM_EVENT_ABORT); if (t_thrd.xact_cxt.XactLocalNodeCanAbort) { CallSequenceCallbacks(GTM_EVENT_ABORT); } else { @@ -4095,7 +3863,6 @@ static void AbortTransaction(bool PerfectRollback, bool STP_rollback) #ifdef PGXC /* Clean up GTM callbacks */ - CleanGTMCallbacks(); CleanSequenceCallbacks(); #endif @@ -4220,6 +3987,7 @@ static void AbortTransaction(bool PerfectRollback, bool STP_rollback) instr_report_workload_xact_info(false); CallXactCallbacks(XACT_EVENT_ABORT); + AtEOXact_SysDBCache(false); ResourceOwnerRelease(t_thrd.utils_cxt.TopTransactionResourceOwner, RESOURCE_RELEASE_BEFORE_LOCKS, false, true); AtEOXact_Buffers(false); AtEOXact_RelationCache(false); @@ -4269,6 +4037,8 @@ static void AbortTransaction(bool PerfectRollback, bool STP_rollback) #endif pgstat_report_xact_timestamp(0); + } else { + AtEOXact_SysDBCache(false); } #ifdef PGXC @@ -4308,8 +4078,9 @@ static void AbortTransaction(bool PerfectRollback, bool STP_rollback) TwoPhaseCommit = false; t_thrd.xact_cxt.bInAbortTransaction = false; - t_thrd.xact_cxt.XactXidStoreForCheck = InvalidTransactionId; - t_thrd.xact_cxt.reserved_nextxid_check = InvalidTransactionId; + t_thrd.xact_cxt.enable_lock_cancel = false; + t_thrd.xact_cxt.ActiveLobRelid = InvalidOid; + t_thrd.xact_cxt.isSelectInto = false; } static void CleanupTransaction(void) @@ -5164,54 +4935,6 @@ static void CallSubXactCallbacks(SubXactEvent event, SubTransactionId mySubid, S } #ifdef PGXC -/* - * Register or deregister callback functions for GTM at xact start or stop. - * Those operations are more or less the xact callbacks but we need to perform - * them before HOLD_INTERRUPTS as it is a part of transaction management and - * is not included in xact cleaning. - * - * The callback is called when xact finishes and may be initialized by events - * related to GTM that need to be taken care of at the end of a transaction block. - */ -void RegisterGTMCallback(GTMCallback callback, void *arg) -{ - GTMCallbackItem *item = NULL; - - item = (GTMCallbackItem *)MemoryContextAlloc(u_sess->top_transaction_mem_cxt, sizeof(GTMCallbackItem)); - item->callback = callback; - item->arg = arg; - item->next = t_thrd.xact_cxt.GTM_callbacks; - t_thrd.xact_cxt.GTM_callbacks = item; -} - -void UnregisterGTMCallback(GTMCallback callback, const void *arg) -{ - GTMCallbackItem *item = NULL; - GTMCallbackItem *prev = NULL; - - prev = NULL; - for (item = t_thrd.xact_cxt.GTM_callbacks; item; prev = item, item = item->next) { - if (item->callback == callback && item->arg == arg) { - if (prev != NULL) { - prev->next = item->next; - } else { - t_thrd.xact_cxt.GTM_callbacks = item->next; - } - pfree(item); - break; - } - } -} - -static void CallGTMCallbacks(GTMEvent event) -{ - GTMCallbackItem *item = NULL; - - for (item = t_thrd.xact_cxt.GTM_callbacks; item; item = item->next) { - (*item->callback)(event, item->arg); - } -} - /* * Similar as RegisterGTMCallback, but use t_thrd.top_mem_cxt instead * of u_sess->top_transaction_mem_cxt, because we want to delete the seqence @@ -6090,12 +5813,14 @@ void RollbackAndReleaseCurrentSubTransaction(bool inSTP) case TBLOCK_SUBINPROGRESS: case TBLOCK_SUBABORT: break; + case TBLOCK_SUBBEGIN: + if (inSTP) + break; /* These cases are invalid. */ case TBLOCK_DEFAULT: case TBLOCK_STARTED: case TBLOCK_BEGIN: - case TBLOCK_SUBBEGIN: case TBLOCK_INPROGRESS: case TBLOCK_END: case TBLOCK_SUBRELEASE: @@ -6535,11 +6260,6 @@ bool IsSubTransaction(void) return false; } -void SetCurrentTransactionId(TransactionId tid) -{ - CurrentTransactionState->transactionId = tid; -} - /* * StartSubTransaction * @@ -6719,7 +6439,8 @@ static void CommitSubTransaction(bool STP_commit) /* reserve some related resource once any SPI have referenced it. */ if (STP_commit) { - XactReserveSPIContext(); + stp_reserve_subxact_resowner(s->curTransactionOwner); + s->curTransactionOwner = NULL; } AtEOXact_GUC(true, s->gucNestLevel); @@ -6757,15 +6478,12 @@ static void CommitSubTransaction(bool STP_commit) PopTransaction(); } -void AbortSubTransaction(bool STP_rollback) +/* + * Clean up subtransaction's runtime context excluding transaction, xlog module. + */ +void AbortSubTxnRuntimeContext(TransactionState s, bool inPL) { u_sess->exec_cxt.isLockRows = false; - TransactionState s = CurrentTransactionState; - t_thrd.xact_cxt.bInAbortTransaction = true; - /* clean hash table for sub transaction in opfusion */ - if (IS_PGXC_DATANODE) { - OpFusion::ClearInSubUnexpectSituation(s->curTransactionOwner); - } /* * @dfs @@ -6780,8 +6498,10 @@ void AbortSubTransaction(bool STP_rollback) delete_ec_ctrl(); #endif +#ifdef ENABLE_LLVM_COMPILE /* reset machine code */ CodeGenThreadReset(); +#endif /* Reset the compatible illegal chars import flag */ u_sess->mb_cxt.insertValuesBind_compatible_illegal_chars = false; @@ -6790,8 +6510,10 @@ void AbortSubTransaction(bool STP_rollback) HOLD_INTERRUPTS(); /* Make sure we have a valid memory context and resource owner */ - AtSubAbort_Memory(); - AtSubAbort_ResourceOwner(); + if (!inPL) { + AtSubAbort_Memory(); + AtSubAbort_ResourceOwner(); + } /* abort CU cache inserting before release all LW locks */ CStoreAbortCU(); @@ -6819,7 +6541,40 @@ void AbortSubTransaction(bool STP_rollback) LockErrorCleanup(); + if (inPL) { + /* Notice GTM rollback sub xact */ + if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) { + /* Cancel query remote nodes and clear connection remaining data for connection reuse */ + SubXactCancel_Remote(); + } + +#ifdef ENABLE_MULTIPLE_NODES + reset_handles_at_abort(); +#endif + + /* + * Reset user ID which might have been changed transiently. (See notes in + * AbortTransaction.) + */ + SetUserIdAndSecContext(s->prevUser, s->prevSecContext); + u_sess->exec_cxt.is_exec_trigger_func = false; + } + RESUME_INTERRUPTS(); +} + +void AbortSubTransaction(bool STP_rollback) +{ + TransactionState s = CurrentTransactionState; + + t_thrd.xact_cxt.bInAbortTransaction = true; + + /* clean hash table for sub transaction in opfusion */ + if (IS_PGXC_DATANODE) { + OpFusion::ClearInSubUnexpectSituation(s->curTransactionOwner); + } + + AbortSubTxnRuntimeContext(s, false); /* * When copy failed in subTransaction, we should heap sync the relation when @@ -6870,12 +6625,6 @@ void AbortSubTransaction(bool STP_rollback) * ResourceOwner... */ if (s->curTransactionOwner) { - if (GTM_MODE && IS_PGXC_COORDINATOR && !IsConnFromCoord()) { - if (!AtEOXact_GlobalTxn(false, false)) { - ereport(WARNING, (errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION), - errmsg("Failed to receive GTM abort subtransaction response."))); - } - } AfterTriggerEndSubXact(false); AtSubAbort_Portals(s->subTransactionId, s->parent->subTransactionId, s->curTransactionOwner, s->parent->curTransactionOwner, STP_rollback); @@ -6930,6 +6679,7 @@ void AbortSubTransaction(bool STP_rollback) u_sess->attr.attr_common.XactReadOnly = s->prevXactReadOnly; t_thrd.xact_cxt.bInAbortTransaction = false; + t_thrd.xact_cxt.ActiveLobRelid = InvalidOid; RESUME_INTERRUPTS(); } @@ -6958,11 +6708,11 @@ void CleanupSubTransaction(bool inSTP) if (s->curTransactionOwner != NULL) { if (inSTP) { /* reserve ResourceOwner in STP running. */ - XactReserveSPIContext(); + stp_reserve_subxact_resowner(s->curTransactionOwner); } else { ResourceOwnerDelete(s->curTransactionOwner); - s->curTransactionOwner = NULL; } + s->curTransactionOwner = NULL; } AtSubCleanup_Memory(); @@ -7427,6 +7177,7 @@ void push_unlink_rel_to_hashtbl(ColFileNodeRel *xnodes, int nrels) entry->maxSegNo = -1; del_rel_num++; } + BatchClearBadBlock(colFileNode.filenode, colFileNode.forknum, 0); } } LWLockRelease(g_instance.bgwriter_cxt.rel_hashtbl_lock); @@ -7592,6 +7343,10 @@ static void xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn, Transac PANIC, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), errmsg("xact_redo_commit_internal: unknown csn state %lu", (uint64)csn))); } + if (EnableGlobalSysCache()) { + ProcessCommittedInvalidationMessages(inval_msgs, nmsgs, XactCompletionRelcacheInitFileInval(xinfo), + dbId, tsId); + } #endif } else { CSNLogRecordAssignedTransactionId(max_xid); @@ -7731,17 +7486,9 @@ static void xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn, Transac UpdateMinRecoveryPoint(lsn, false); } -#ifndef ENABLE_MULTIPLE_NODES if (RemoveCommittedCsnInfo(xid)) { XactLockTableDelete(xid); } - - for (int i = 0; i < nsubxacts; ++i) { - if (RemoveCommittedCsnInfo(sub_xids[i])) { - XactLockTableDelete(sub_xids[i]); - } - } -#endif } /* @@ -7877,7 +7624,7 @@ static void xact_redo_abort(xl_xact_abort *xlrec, TransactionId xid, XLogRecPtr t_thrd.xact_cxt.xactDelayDDL = true; else t_thrd.xact_cxt.xactDelayDDL = false; - + UpdateMinRecoveryPoint(lsn, false); /* Make sure files supposed to be dropped are dropped */ unlink_relfiles(xlrec->xnodes, xlrec->nrels); xact_redo_log_drop_segs(xlrec->xnodes, xlrec->nrels, lsn); @@ -7893,6 +7640,10 @@ static void xact_redo_abort(xl_xact_abort *xlrec, TransactionId xid, XLogRecPtr } parseAndRemoveLibrary(filename, xlrec->nlibrary); } + + if (RemoveCommittedCsnInfo(xid)) { + XactLockTableDelete(xid); + } } static void xact_redo_prepare(TransactionId xid) @@ -7937,6 +7688,12 @@ void xact_redo(XLogReaderState *record) */ (void)TWOPAHSE_LWLOCK_ACQUIRE(xid, LW_EXCLUSIVE); PrepareRedoAdd(XLogRecGetData(record), record->ReadRecPtr, record->EndRecPtr); + + if (IS_DISASTER_RECOVER_MODE) { + TwoPhaseFileHeader *hdr = (TwoPhaseFileHeader *) XLogRecGetData(record); + XactLockTableInsert(hdr->xid); + } + TWOPAHSE_LWLOCK_RELEASE(xid); /* Update prepare trx's csn to commit-in-progress. */ @@ -7949,6 +7706,9 @@ void xact_redo(XLogReaderState *record) /* Delete TwoPhaseState gxact entry and/or 2PC file. */ (void)TWOPAHSE_LWLOCK_ACQUIRE(xlrec->xid, LW_EXCLUSIVE); PrepareRedoRemove(xlrec->xid, false); + if (IS_DISASTER_RECOVER_MODE) { + XactLockTableDelete(xlrec->xid); + } TWOPAHSE_LWLOCK_RELEASE(xlrec->xid); } else if (info == XLOG_XACT_ABORT_PREPARED) { xl_xact_abort_prepared *xlrec = (xl_xact_abort_prepared *)XLogRecGetData(record); @@ -7958,6 +7718,9 @@ void xact_redo(XLogReaderState *record) /* Delete TwoPhaseState gxact entry and/or 2PC file. */ (void)TWOPAHSE_LWLOCK_ACQUIRE(xlrec->xid, LW_EXCLUSIVE); PrepareRedoRemove(xlrec->xid, false); + if (IS_DISASTER_RECOVER_MODE) { + XactLockTableDelete(xlrec->xid); + } TWOPAHSE_LWLOCK_RELEASE(xlrec->xid); } else if (info == XLOG_XACT_ASSIGNMENT) { } else { @@ -8011,6 +7774,24 @@ void XactGetRelFiles(XLogReaderState *record, ColFileNodeRel **xnodesPtr, int *n return; } +bool XactWillRemoveRelFiles(XLogReaderState *record) +{ + /* + * Relation files under tablespace folders are removed only from + * applying transaction log record. + */ + int nrels = 0; + ColFileNodeRel *xnodes = NULL; + + if (XLogRecGetRmid(record) != RM_XACT_ID) { + return false; + } + + XactGetRelFiles(record, &xnodes, &nrels); + + return (nrels > 0); +} + bool xactWillRemoveRelFiles(XLogReaderState *record) { int nrels = 0; @@ -8062,17 +7843,6 @@ void ForgetTransactionLocalNode(void) t_thrd.xact_cxt.XactReadLocalNode = t_thrd.xact_cxt.XactWriteLocalNode = false; } -/* Check if the local node is involved in the transaction */ -bool IsTransactionLocalNode(bool write) -{ - if (write && t_thrd.xact_cxt.XactWriteLocalNode) - return true; - else if (!write && t_thrd.xact_cxt.XactReadLocalNode) - return true; - else - return false; -} - /* Check if the given xid is form implicit 2PC */ bool IsXidImplicit(const char *xid) { @@ -8130,7 +7900,7 @@ void ReportCommandIdChange(CommandId cid) void ReportTopXid(TransactionId local_top_xid) { - if (GTM_MODE || (t_thrd.proc->workingVersionNum <= GTM_OLD_VERSION_NUM)) { + if ((t_thrd.proc->workingVersionNum <= GTM_OLD_VERSION_NUM)) { return; } StringInfoData buf; @@ -8541,7 +8311,9 @@ void ApplyUndoActions() for (int i = 0; i < UNDO_PERSISTENCE_LEVELS; i++) { if (s->latest_urp[i]) { + WaitState oldStatus = pgstat_report_waitstatus(STATE_WAIT_TRANSACTION_ROLLBACK); TryExecuteUndoActions(s, (UndoPersistence)i); + pgstat_report_waitstatus(oldStatus); } } @@ -8598,62 +8370,29 @@ CanPerformUndoActions(void) return s->perform_undo; } -typedef struct { - ResourceOwner resowner; - void *next; -} XactContextItem; - -/* - * Reserve the necessary context in current transaction for SPI. - */ -void XactReserveSPIContext() -{ - TransactionState s = CurrentTransactionState; - - MemoryContext oldcxt = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE)); - XactContextItem *item = (XactContextItem*)palloc(sizeof(XactContextItem)); - item->resowner = s->curTransactionOwner; - - ResourceOwnerNewParent(item->resowner, NULL); - ResourceOwnerMarkInvalid(item->resowner); - item->next = u_sess->plsql_cxt.spi_xact_context; - MemoryContextSwitchTo(oldcxt); - - u_sess->plsql_cxt.spi_xact_context = item; - s->curTransactionOwner = NULL; -} - -/* - * Restore the reserved context for SPI into CurrentTransaction. - */ -void XactResumeSPIContext(bool clean) -{ - while (u_sess->plsql_cxt.spi_xact_context != NULL) { - XactContextItem *item = (XactContextItem*)u_sess->plsql_cxt.spi_xact_context; - - if (clean) { - ResourceOwnerDelete(item->resowner); - } else { - ResourceOwnerNewParent(item->resowner, t_thrd.utils_cxt.CurrentResourceOwner); - } - - u_sess->plsql_cxt.spi_xact_context = item->next; - pfree(item); - } -} - /* * With longjump after ERROR, some resource, such as Snapshot, are not released as done by normal. - * Some cleanup are required for subtransactions inside PL exception block. + * Some cleanup are required for subtransactions inside PL exception block. This acts much like + * AbortSubTransaction except for dealing with more than one as read only and keeping them going. * * head: the first subtransaction in this Exception block, cleanup is required for all after this. - * hasAbort: whether or not rollback has been done for the lastest subtransaction. */ -void XactCleanExceptionSubTransaction(SubTransactionId head, bool hasAbort) +void XactCleanExceptionSubTransaction(SubTransactionId head) { TransactionState s = CurrentTransactionState; + t_thrd.xact_cxt.bInAbortTransaction = true; + + AbortSubTxnRuntimeContext(s, true); + + /* Prevent cancel/die interrupt while cleaning up */ + HOLD_INTERRUPTS(); + while (s->subTransactionId >= head && s->parent != NULL) { + if (IS_PGXC_DATANODE) { + OpFusion::ClearInSubUnexpectSituation(s->curTransactionOwner); + } + /* reset cursor's attribute var */ ResetPortalCursor(s->subTransactionId, InvalidOid, 0); @@ -8663,24 +8402,16 @@ void XactCleanExceptionSubTransaction(SubTransactionId head, bool hasAbort) ResourceOwnerRelease(s->curTransactionOwner, RESOURCE_RELEASE_BEFORE_LOCKS, false, false); ResourceOwnerRelease(s->curTransactionOwner, RESOURCE_RELEASE_AFTER_LOCKS, false, false); + AtEOSubXact_HashTables(false, s->nestingLevel); + AtEOSubXact_PgStat(false, s->nestingLevel); AtSubAbort_Snapshot(s->nestingLevel); s = s->parent; } - if (!hasAbort) { - AbortBufferIO(); - UnlockBuffers(); - LockErrorCleanup(); + t_thrd.xact_cxt.bInAbortTransaction = false; - /* cancel and clear connection remaining data for connection reuse */ - if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) { - SubXactCancel_Remote(); - } -#ifdef ENABLE_MULTIPLE_NODES - reset_handles_at_abort(); -#endif - } + RESUME_INTERRUPTS(); } /* diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index 03e988b37..ebbe0148f 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -76,6 +76,7 @@ #include "postmaster/startup.h" #include "postmaster/postmaster.h" #include "postmaster/pagewriter.h" +#include "postmaster/pagerepair.h" #include "replication/logical.h" #include "replication/bcm.h" #include "replication/basebackup.h" @@ -127,6 +128,7 @@ #include "gssignal/gs_signal.h" #include "gstrace/gstrace_infra.h" #include "gstrace/access_gstrace.h" +#include "postmaster/pagerepair.h" #include #include #include @@ -135,8 +137,14 @@ #include "storage/mot/mot_fdw.h" #endif +#ifndef ENABLE_MULTIPLE_NODES +#include "dcf_interface.h" +#endif /* just for libpqrcv_connect_for_TLI and ha_set_rebuild_connerror */ #include "replication/libpqwalreceiver.h" +/* Used for barrier preparse */ +#include "postmaster/barrier_preparse.h" + /* Used for parallel recovery */ #include "access/redo_statistic.h" @@ -146,6 +154,12 @@ #include "access/extreme_rto/spsc_blocking_queue.h" #include "access/extreme_rto/page_redo.h" #include "vectorsonic/vsonichash.h" +#ifdef ENABLE_UT +#define STATIC +#else +#define STATIC static +#endif + /* File path names (all relative to $PGDATA) */ #define RECOVERY_COMMAND_FILE "recovery.conf" #define RECOVERY_COMMAND_DONE "recovery.done" @@ -153,6 +167,7 @@ #define SWITCHOVER_SIGNAL_FILE "switchover" #define PRIMARY_SIGNAL_FILE "primary" #define STANDBY_SIGNAL_FILE "standby" +#define CASCADE_STANDBY_SIGNAL_FILE "cascade_standby" #define XLOG_SWITCH_HISTORY_FILE "switch.history" #define MAX_PATH_LEN 1024 #define MAX(A, B) ((B) > (A) ? (B) : (A)) @@ -169,11 +184,15 @@ #define MaxMacAddrList 10 #define BILLION 1000000000L +const int ONE_SECOND_TO_MICROSECOND = 1000000L; +const uint64 PAGE_SIZE_BYTES = 4096; +const uint64 XLOG_FLUSH_SIZE_INIT = 1024 * 1024; const int SIZE_OF_UINT64 = 8; const int SIZE_OF_UINT32 = 4; const int SIZE_OF_TWO_UINT64 = 16; const int XLOG_LSN_SWAP = 32; +const int XLOGFILENAMELEN = 24; const char *DemoteModeDescs[] = { "unknown", "smart", "fast", "immediate" }; const int DemoteModeNum = sizeof(DemoteModeDescs) / sizeof(char *); static const int g_retryTimes = 3; @@ -192,6 +211,8 @@ static const Size MaxSendSizeBytes = 1048576; THR_LOCAL bool redo_oldversion_xlog = false; +static const int ONE_SECOND = 1000000; + /* * XLOGfileslop is the maximum number of preallocated future XLOG segments. * When we are done with an old XLOG segment file, we will recycle it as a @@ -234,264 +255,6 @@ XLogSegNo XlogRemoveSegPrimary = InvalidXLogSegPtr; TransactionId NextXidAfterReovery; TransactionId OldestXidAfterRecovery; -/* - * Inserting to WAL is protected by a small fixed number of WAL insertion - * locks. To insert to the WAL, you must hold one of the locks - it doesn't - * matter which one. To lock out other concurrent insertions, you must hold - * of them. Each WAL insertion lock consists of a lightweight lock, plus an - * indicator of how far the insertion has progressed (insertingAt). - * - * The insertingAt values are read when a process wants to flush WAL from - * the in-memory buffers to disk, to check that all the insertions to the - * region the process is about to write out have finished. You could simply - * wait for all currently in-progress insertions to finish, but the - * insertingAt indicator allows you to ignore insertions to later in the WAL, - * so that you only wait for the insertions that are modifying the buffers - * you're about to write out. - * - * This isn't just an optimization. If all the WAL buffers are dirty, an - * inserter that's holding a WAL insert lock might need to evict an old WAL - * buffer, which requires flushing the WAL. If it's possible for an inserter - * to block on another inserter unnecessarily, deadlock can arise when two - * inserters holding a WAL insert lock wait for each other to finish their - * insertion. - * - * Small WAL records that don't cross a page boundary never update the value, - * the WAL record is just copied to the page and the lock is released. But - * to avoid the deadlock-scenario explained above, the indicator is always - * updated before sleeping while holding an insertion lock. - */ -typedef struct { - LWLock lock; -#ifdef __aarch64__ - pg_atomic_uint32 xlogGroupFirst; -#endif - XLogRecPtr insertingAt; -} WALInsertLock; - -/* - * All the WAL insertion locks are allocated as an array in shared memory. We - * force the array stride to be a power of 2, which saves a few cycles in - * indexing, but more importantly also ensures that individual slots don't - * cross cache line boundaries. (Of course, we have to also ensure that the - * array start address is suitably aligned.) - */ -typedef union WALInsertLockPadded { - WALInsertLock l; - char pad[PG_CACHE_LINE_SIZE]; -} WALInsertLockPadded; - -/* - * Shared state data for WAL insertion. - */ -typedef struct XLogCtlInsert { - /* - * CurrBytePos is the end of reserved WAL. The next record will be inserted - * at that position. PrevBytePos is the start position of the previously - * inserted (or rather, reserved) record - it is copied to the the prev- - * link of the next record. These are stored as "usable byte positions" - * rather than XLogRecPtrs (see XLogBytePosToRecPtr()). - */ - uint64 CurrBytePos; - uint32 PrevByteSize; - int32 CurrLRC; - -#if (!defined __x86_64__) && (!defined __aarch64__) - slock_t insertpos_lck; /* protects CurrBytePos and PrevBytePos */ -#endif - /* - * Make sure the above heavily-contended spinlock and byte positions are - * on their own cache line. In particular, the RedoRecPtr and full page - * write variables below should be on a different cache line. They are - * read on every WAL insertion, but updated rarely, and we don't want - * those reads to steal the cache line containing Curr/PrevBytePos. - */ - char pad[PG_CACHE_LINE_SIZE]; - /* - * WAL insertion locks. - */ - WALInsertLockPadded **WALInsertLocks; - - /* - * fullPageWrites is the master copy used by all backends to determine - * whether to write full-page to WAL, instead of using process-local one. - * This is required because, when full_page_writes is changed by SIGHUP, - * we must WAL-log it before it actually affects WAL-logging by backends. - * Checkpointer sets at startup or after SIGHUP. - * To read these fields, you must hold an insertion slot. To modify them, - * you must hold ALL the slots. - */ - XLogRecPtr RedoRecPtr; /* current redo point for insertions */ - bool forcePageWrites; /* forcing full-page writes for PITR? */ - bool fullPageWrites; - - /* - * exclusiveBackup is true if a backup started with pg_start_backup() is - * in progress, and nonExclusiveBackups is a counter indicating the number - * of streaming base backups currently in progress. forcePageWrites is set - * to true when either of these is non-zero. lastBackupStart is the latest - * checkpoint redo location used as a starting point for an online backup. - */ - bool exclusiveBackup; - int nonExclusiveBackups; - XLogRecPtr lastBackupStart; -} XLogCtlInsert; - -/* - * Total shared-memory state for XLOG. - */ -typedef struct XLogCtlData { - /* Protected by WALInsertLock: */ - XLogCtlInsert Insert; - - /* Protected by info_lck: */ - XLogwrtRqst LogwrtRqst; - XLogRecPtr RedoRecPtr; /* a recent copy of Insert->RedoRecPtr */ - TransactionId ckptXid; - XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */ - XLogRecPtr replicationSlotMinLSN; /* oldest LSN needed by any slot */ - XLogRecPtr replicationSlotMaxLSN; /* latest LSN for dummy startpoint */ - XLogSegNo lastRemovedSegNo; /* latest removed/recycled XLOG segment */ - - /* Time of last xlog segment switch. Protected by WALWriteLock. */ - pg_time_t lastSegSwitchTime; - - /* - * Protected by info_lck and WALWriteLock (you must hold either lock to - * read it, but both to update) - */ - XLogwrtResult LogwrtResult; - -#ifndef ENABLE_MULTIPLE_NODES - /* - * Protected by info_lck and WALWritePaxosLock (you must hold either lock to - * read it, but both to update) - */ - XLogwrtPaxos LogwrtPaxos; -#endif - - /* - * Latest initialized block index in cache. - * - * To change curridx and the identity of a buffer, you need to hold - * WALBufMappingLock. To change the identity of a buffer that's still - * dirty, the old page needs to be written out first, and for that you - * need WALWriteLock, and you need to ensure that there are no in-progress - * insertions to the page by calling WaitXLogInsertionsToFinish(). - */ - XLogRecPtr InitializedUpTo; - - /* - * These values do not change after startup, although the pointed-to pages - * and xlblocks values certainly do. xlblock values are protected by - * WALBufMappingLock. - */ - char *pages; /* buffers for unwritten XLOG pages */ - XLogRecPtr *xlblocks; /* 1st byte ptr-s + XLOG_BLCKSZ */ - int XLogCacheBlck; /* highest allocated xlog buffer index */ - TimeLineID ThisTimeLineID; - - /* - * archiveCleanupCommand is read from recovery.conf but needs to be in - * shared memory so that the checkpointer process can access it. - */ - char archiveCleanupCommand[MAXPGPATH]; - - /* - * SharedRecoveryInProgress indicates if we're still in crash or archive - * recovery. Protected by info_lck. - */ - bool SharedRecoveryInProgress; - - bool IsRecoveryDone; - - /* - * SharedHotStandbyActive indicates if we're still in crash or archive - * recovery. Protected by info_lck. - */ - bool SharedHotStandbyActive; - - /* - * WalWriterSleeping indicates whether the WAL writer is currently in - * low-power mode (and hence should be nudged if an async commit occurs). - * Protected by info_lck. - */ - bool WalWriterSleeping; - - /* - * recoveryWakeupLatch is used to wake up the startup process to continue - * WAL replay, if it is waiting for WAL to arrive or failover trigger file - * to appear. - */ - Latch recoveryWakeupLatch; - - Latch dataRecoveryLatch; - - /* - * During recovery, we keep a copy of the latest checkpoint record here. - * Used by the background writer when it wants to create a restartpoint. - * - * Protected by info_lck. - */ - XLogRecPtr lastCheckPointRecPtr; - CheckPoint lastCheckPoint; - - /* lastReplayedReadRecPtr points to the header of last apply lsn. */ - XLogRecPtr lastReplayedReadRecPtr; - /* - * lastReplayedEndRecPtr points to end+1 of the last record successfully - * replayed. When we're currently replaying a record, ie. in a redo - * function, replayEndRecPtr points to the end+1 of the record being - * replayed, otherwise it's equal to lastReplayedEndRecPtr. - */ - XLogRecPtr lastReplayedEndRecPtr; - XLogRecPtr replayEndRecPtr; - /* timestamp of last COMMIT/ABORT record replayed (or being replayed) */ - TimestampTz recoveryLastXTime; - /* current effective recovery target timeline */ - TimeLineID RecoveryTargetTLI; - - /* - * timestamp of when we started replaying the current chunk of WAL data, - * only relevant for replication or archive recovery - */ - TimestampTz currentChunkStartTime; - /* Are we requested to pause recovery? */ - bool recoveryPause; - - /* - * lastFpwDisableRecPtr points to the start of the last replayed - * XLOG_FPW_CHANGE record that instructs full_page_writes is disabled. - */ - XLogRecPtr lastFpwDisableRecPtr; - - /* - * After started up, we need to make sure that - * it will do full page write before the first checkpoint. - */ - bool FpwBeforeFirstCkpt; - - /* LSN of xlogs already tracked by CBM, which checkpoint can now recycle. */ - XLogRecPtr cbmTrackedLSN; - /* the bitmap file rotate lsn requested by outter */ - volatile XLogRecPtr cbmMaxRequestRotateLsn; - /* curr cbm file name like pg_xlog_xx_xxxx_xxxx.cbm */ - XLogRecPtr currCbmFileStartLsn; - - /* if true, stale xlog segments are not recycled during checkpoint, for backup purpose */ - bool delayXlogRecycle; - - /* start point from where dropped column relation files are delayed to do physical unlinking */ - XLogRecPtr ddlDelayStartPtr; - - /* start point for logging new remain segments or extents */ - XLogRecPtr remain_segs_start_point; - bool is_need_log_remain_segs; - XLogRecPtr remainCommitLsn; - - slock_t info_lck; /* locks shared variables shown above */ -} XLogCtlData; - /* * We maintain an image of pg_control in shared memory. */ @@ -614,22 +377,17 @@ static void RemoveNonParentXlogFiles(XLogRecPtr switchpoint, TimeLineID newTLI); static void UpdateLastRemovedPtr(const char *filename); static void ValidateXLOGDirectoryStructure(void); static void CleanupBackupHistory(void); -static XLogRecord *ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode, bool fetching_ckpt, - bool readoldversion = false); +static XLogRecord *ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode, bool fetching_ckpt); void CheckRecoveryConsistency(void); -static XLogRecord *ReadCheckpointRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int whichChkpt, - bool readoldversion = false); -static List *readTimeLineHistory(TimeLineID targetTLI); +static XLogRecord *ReadCheckpointRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int whichChkpt); static bool existsTimeLineHistory(TimeLineID probeTLI); static bool rescanLatestTimeLine(void); static TimeLineID findNewestTimeLine(TimeLineID startTLI); static bool timeLineInHistory(TimeLineID tli, List *expectedTLEs); -static void WriteControlFile(void); -static void ReadControlFile(void); +STATIC void WriteControlFile(void); +STATIC void ReadControlFile(void); static void RecoverControlFile(void); static char *str_time(pg_time_t tnow); -static bool CheckForFailoverTrigger(void); -static bool CheckForSwitchoverTrigger(void); static bool CheckForPrimaryTrigger(void); static bool CheckForStandbyTrigger(void); @@ -652,7 +410,7 @@ static void CopyXLogRecordToWAL(int write_len, bool isLogSwitch, XLogRecData *rd XLogRecPtr EndPos, int32 *const currlrc_ptr); static void ReserveXLogInsertLocation(uint32 size, XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr, int32 *const currlrc_ptr); -static bool ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr, bool isupgrade = false); +static bool ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr); static void StartSuspendWalInsert(int32 *const lastlrc_ptr); static void StopSuspendWalInsert(int32 lastlrc); @@ -662,7 +420,7 @@ static XLogRecPtr XLogBytePosToRecPtr(uint64 bytepos); static XLogRecPtr XLogBytePosToEndRecPtr(uint64 bytepos); static uint64 XLogRecPtrToBytePos(XLogRecPtr ptr); -static XLogRecPtr XLogInsertRecordSingle(XLogRecData *rdata, XLogRecPtr fpw_lsn, bool isupgrade); +static XLogRecPtr XLogInsertRecordSingle(XLogRecData *rdata, XLogRecPtr fpw_lsn); static bool DoEarlyExit(); void ArchiveXlogForForceFinishRedo(XLogReaderState *xlogreader, TermFileData *term_file); @@ -730,16 +488,16 @@ static void XLogInsertRecordGroupLeader(PGPROC *leader, uint64 *end_byte_pos_ptr record_size = MAXALIGN(((XLogRecord *)(leader->xlogGrouprdata->data))->xl_tot_len); Assert(record_size != 0); - if (likely(record_size != 0)) { - ReserveXLogInsertByteLocation(record_size, record_size, &start_byte_pos, &end_byte_pos, &prev_byte_pos, - ¤t_lrc); - dirty_page_queue_lsn = start_byte_pos; - } - /* insert the leader WAL first */ if (unlikely(leader->xlogGroupIsFPW)) { leader->xlogGroupIsFPW = false; + return; } else { + if (likely(record_size != 0)) { + ReserveXLogInsertByteLocation(record_size, record_size, &start_byte_pos, &end_byte_pos, &prev_byte_pos, + ¤t_lrc); + dirty_page_queue_lsn = start_byte_pos; + } XLogInsertRecordNolock(leader->xlogGrouprdata, leader, XLogBytePosToRecPtr(start_byte_pos), XLogBytePosToEndRecPtr( start_byte_pos + @@ -954,7 +712,9 @@ static XLogRecPtr XLogInsertRecordGroup(XLogRecData *rdata, XLogRecPtr fpw_lsn) /* I am the leader; Let me insert my own WAL first. */ PGPROC *leader = t_thrd.proc; XLogInsertRecordGroupLeader(leader, &end_byte_pos_leader, ¤t_lrc_leader); - XLogReportCopyLocation(current_lrc_leader, end_byte_pos_leader); + if (end_byte_pos_leader != 0) { + XLogReportCopyLocation(current_lrc_leader, end_byte_pos_leader); + } /* * Clear the list of processes waiting for group xlog insert, saving a pointer to the head of the list. @@ -981,7 +741,7 @@ static XLogRecPtr XLogInsertRecordGroup(XLogRecData *rdata, XLogRecPtr fpw_lsn) * If the status entry is owned by this thread, update it with the copied xlog record's * end LSN and set the WAL copy status to WAL_COPIED to signal copy completion as well */ - if (has_follower) { + if (has_follower && end_byte_pos != 0) { XLogReportCopyLocation(current_lrc, end_byte_pos); } @@ -1286,7 +1046,7 @@ inline void SetMinRecoverPointForStats(XLogRecPtr lsn) * @in isupgrade: whether this xlog insert is for upgrade. * @return: the LSN of the insert end position. */ -static XLogRecPtr XLogInsertRecordSingle(XLogRecData *rdata, XLogRecPtr fpw_lsn, bool isupgrade) +static XLogRecPtr XLogInsertRecordSingle(XLogRecData *rdata, XLogRecPtr fpw_lsn) { XLogCtlInsert *Insert = &t_thrd.shemem_ptr_cxt.XLogCtl->Insert; XLogRecord *rechdr = (XLogRecord *)rdata->data; @@ -1297,12 +1057,10 @@ static XLogRecPtr XLogInsertRecordSingle(XLogRecData *rdata, XLogRecPtr fpw_lsn, XLogRecPtr EndPos = InvalidXLogRecPtr; int32 currlrc = 0; - bool isLogSwitch = - ((isupgrade ? ((XLogRecordOld *)rechdr)->xl_rmid : ((XLogRecord *)rechdr)->xl_rmid) == RM_XLOG_ID && - (isupgrade ? ((XLogRecordOld *)rechdr)->xl_info : ((XLogRecord *)rechdr)->xl_info) == XLOG_SWITCH); + bool isLogSwitch = (rechdr->xl_rmid == RM_XLOG_ID && rechdr->xl_info == XLOG_SWITCH); /* we assume that all of the record header is in the first chunk */ - Assert(rdata->len >= (isupgrade ? SizeOfXLogRecordOld : SizeOfXLogRecord)); + Assert(rdata->len >= SizeOfXLogRecord); /* cross-check on whether we should be here or not */ if (!XLogInsertAllowed()) { @@ -1376,15 +1134,8 @@ static XLogRecPtr XLogInsertRecordSingle(XLogRecData *rdata, XLogRecPtr fpw_lsn, */ if (isLogSwitch) { XLogRecPtr tmp_xl_prev = InvalidXLogRecPtr; - inserted = ReserveXLogSwitch(&StartPos, &EndPos, &tmp_xl_prev, isupgrade); - if (isupgrade) { - ((XLogRecordOld *)rechdr)->xl_prev.xlogid = (uint32)(tmp_xl_prev >> 32); - ((XLogRecordOld *)rechdr)->xl_prev.xrecoff = (uint32)tmp_xl_prev; - ereport(LOG, (errmsg("A XLOG_SWITCH is started at %X/%X, ended at %X/%X", (uint32)(StartPos >> 32), - (uint32)StartPos, (uint32)(EndPos >> 32), (uint32)EndPos))); - } else { - rechdr->xl_prev = tmp_xl_prev; - } + inserted = ReserveXLogSwitch(&StartPos, &EndPos, &tmp_xl_prev); + rechdr->xl_prev = tmp_xl_prev; } else { ReserveXLogInsertLocation(rechdr->xl_tot_len, &StartPos, &EndPos, &rechdr->xl_prev, &currlrc); inserted = true; @@ -1398,18 +1149,12 @@ static XLogRecPtr XLogInsertRecordSingle(XLogRecData *rdata, XLogRecPtr fpw_lsn, &g_instance.wal_cxt.walInsertStatusTable[GET_STATUS_ENTRY_INDEX(current_entry)]; /* Now that xl_prev has been filled in, calculate CRC of the record header. */ - rdata_crc = (isupgrade ? ((XLogRecordOld *)rechdr)->xl_crc : ((XLogRecord *)rechdr)->xl_crc); - - if (isupgrade) { - /* using PG's CRC32 */ - COMP_CRC32(rdata_crc, (XLogRecordOld *)rechdr, offsetof(XLogRecordOld, xl_crc)); - } else { - /* using CRC32C */ - COMP_CRC32C(rdata_crc, (XLogRecord *)rechdr, offsetof(XLogRecord, xl_crc)); - } + rdata_crc = rechdr->xl_crc; + /* using CRC32C */ + COMP_CRC32C(rdata_crc, (XLogRecord *)rechdr, offsetof(XLogRecord, xl_crc)); FIN_CRC32C(rdata_crc); /* FIN_CRC32C as same as FIN_CRC32 */ - (isupgrade ? ((XLogRecordOld *)rechdr)->xl_crc : ((XLogRecord *)rechdr)->xl_crc) = rdata_crc; + rechdr->xl_crc = rdata_crc; /* * All the record data, including the header, is now ready to be @@ -1513,7 +1258,7 @@ static XLogRecPtr XLogInsertRecordSingle(XLogRecData *rdata, XLogRecPtr fpw_lsn, * xlog-switch record. */ if (inserted) { - EndPos = StartPos + (isupgrade ? MAXALIGN(SizeOfXLogRecordOld) : MAXALIGN(SizeOfXLogRecord)); + EndPos = StartPos + MAXALIGN(SizeOfXLogRecord); if (StartPos / XLOG_BLCKSZ != EndPos / XLOG_BLCKSZ && EndPos % XLOG_BLCKSZ != 0) { if (EndPos % XLOG_SEG_SIZE == EndPos % XLOG_BLCKSZ) { EndPos += SizeOfXLogLongPHD; @@ -1575,7 +1320,7 @@ static XLogRecPtr XLogInsertRecordSingle(XLogRecData *rdata, XLogRecPtr fpw_lsn, * @in isupgrade: whether this xlog insert is for upgrade. * @return: the LSN of the insert end position. */ -XLogRecPtr XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn, bool isupgrade) +XLogRecPtr XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn) { #ifdef __aarch64__ /* @@ -1584,16 +1329,14 @@ XLogRecPtr XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn, bool isupgra * situation, insert the record in group mode. */ XLogRecord *rechdr = (XLogRecord *)rdata->data; - bool isLogSwitch = - ((isupgrade ? ((XLogRecordOld *)rechdr)->xl_rmid : ((XLogRecord *)rechdr)->xl_rmid) == RM_XLOG_ID && - (isupgrade ? ((XLogRecordOld *)rechdr)->xl_info : ((XLogRecord *)rechdr)->xl_info) == XLOG_SWITCH); - if (isLogSwitch || isupgrade) { - return XLogInsertRecordSingle(rdata, fpw_lsn, isupgrade); + bool isLogSwitch = (rechdr->xl_rmid == RM_XLOG_ID && rechdr->xl_info == XLOG_SWITCH); + if (isLogSwitch) { + return XLogInsertRecordSingle(rdata, fpw_lsn); } else { return XLogInsertRecordGroup(rdata, fpw_lsn); } #else - return XLogInsertRecordSingle(rdata, fpw_lsn, isupgrade); + return XLogInsertRecordSingle(rdata, fpw_lsn); #endif /* __aarch64__ */ } @@ -1700,17 +1443,16 @@ loop1: * segment, *StartPos and *EndPos are set to the current location without * reserving any space, and the function returns false. */ -static bool ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr, bool isupgrade) +static bool ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr) { volatile XLogCtlInsert *Insert = &t_thrd.shemem_ptr_cxt.XLogCtl->Insert; uint64 startbytepos; uint64 endbytepos; uint32 prevbytesize; int32 currlrc; - uint32 size = isupgrade ? MAXALIGN(SizeOfXLogRecordOld) : MAXALIGN(SizeOfXLogRecord); + uint32 size = MAXALIGN(SizeOfXLogRecord); XLogRecPtr ptr; uint32 segleft; - uint32 freespace; /* * These calculations are a bit heavy-weight to be done while holding a @@ -1726,27 +1468,6 @@ static bool ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecP loop: startbytepos = compare.u64[0]; - if (isupgrade) { - ptr = XLogBytePosToRecPtr(startbytepos); - freespace = INSERT_FREESPACE(ptr); - - if (freespace < SizeOfXLogRecordOld) { - ereport(LOG, (errmsg("The switch xlog need not be inserted in upgrade " - "if the position is in the last 24 Bytes of xlog page."))); - startbytepos += (XLOG_BLCKSZ - (ptr % XLOG_BLCKSZ)); - exchange.u64[0] = startbytepos; - exchange.u64[1] = compare.u64[1]; - - current = atomic_compare_and_swap_u128((uint128_u *)&Insert->CurrBytePos, compare, exchange); - if (!UINT128_IS_EQUAL(compare, current)) { - UINT128_COPY(compare, current); - goto loop; - } - *EndPos = *StartPos = ptr; - return false; - } - } - ptr = XLogBytePosToEndRecPtr(startbytepos); if (ptr % XLOG_SEG_SIZE == 0) { *EndPos = *StartPos = ptr; @@ -1782,21 +1503,6 @@ loop: startbytepos = Insert->CurrBytePos; - if (isupgrade) { - ptr = XLogBytePosToRecPtr(startbytepos); - freespace = INSERT_FREESPACE(ptr); - - if (freespace < SizeOfXLogRecordOld) { - ereport(LOG, (errmsg("The switch xlog need not be inserted in upgrade if the position " - "is in the last 24 Bytes of xlog page."))); - startbytepos += (XLOG_BLCKSZ - (ptr % XLOG_BLCKSZ)); - Insert->CurrBytePos = startbytepos; - SpinLockRelease(&Insert->insertpos_lck); - *EndPos = *StartPos = ptr; - return false; - } - } - ptr = XLogBytePosToEndRecPtr(startbytepos); if (ptr % XLOG_SEG_SIZE == 0) { SpinLockRelease(&Insert->insertpos_lck); @@ -2057,7 +1763,7 @@ static void CopyXLogRecordToWAL(int write_len, bool isLogSwitch, XLogRecData *rd if (isLogSwitch && CurrPos % XLOG_SEG_SIZE != 0) { int currlrc = *currlrc_ptr; /* An xlog-switch record doesn't contain any data besides the header */ - Assert(write_len == SizeOfXLogRecord || write_len == SizeOfXLogRecordOld); + Assert(write_len == SizeOfXLogRecord); /* * We do this one page at a time, to make sure we don't deadlock @@ -2239,8 +1945,7 @@ static char *GetXLogBuffer(XLogRecPtr ptr, PGPROC *proc) t_thrd.xlog_cxt.cachedPage = ptr / XLOG_BLCKSZ; t_thrd.xlog_cxt.cachedPos = t_thrd.shemem_ptr_cxt.XLogCtl->pages + idx * (Size)XLOG_BLCKSZ; - Assert((((XLogPageHeader)t_thrd.xlog_cxt.cachedPos)->xlp_magic == XLOG_PAGE_MAGIC) || - (((XLogPageHeader)t_thrd.xlog_cxt.cachedPos)->xlp_magic == XLOG_PAGE_MAGIC_OLD)); + Assert((((XLogPageHeader)t_thrd.xlog_cxt.cachedPos)->xlp_magic == XLOG_PAGE_MAGIC)); Assert(((XLogPageHeader)t_thrd.xlog_cxt.cachedPos)->xlp_pageaddr == ptr - (ptr % XLOG_BLCKSZ) || (((((XLogPageHeader)t_thrd.xlog_cxt.cachedPos)->xlp_pageaddr >> 32) | (((XLogPageHeader)t_thrd.xlog_cxt.cachedPos)->xlp_pageaddr << 32))) == ptr - (ptr % XLOG_BLCKSZ)); @@ -2869,12 +2574,14 @@ static void XLogWrite(const XLogwrtRqst &WriteRqst, bool flexible) int startidx = 0; uint32 startoffset = 0; Size actualBytes; + XLogRecPtr lastWrited = InvalidXLogRecPtr; /* We should always be inside a critical section here */ Assert(t_thrd.int_cxt.CritSectionCount > 0); // Update local LogwrtResult (caller probably did this already, but...) *t_thrd.xlog_cxt.LogwrtResult = t_thrd.shemem_ptr_cxt.XLogCtl->LogwrtResult; + lastWrited = t_thrd.xlog_cxt.LogwrtResult->Write; /* * Since successive pages in the xlog cache are consecutively allocated, @@ -3016,6 +2723,12 @@ static void XLogWrite(const XLogwrtRqst &WriteRqst, bool flexible) t_thrd.xlog_cxt.openLogOff, (unsigned long)nbytes))); } + if (g_instance.wal_cxt.xlogFlushStats->statSwitch) { + ++g_instance.wal_cxt.xlogFlushStats->writeTimes; + g_instance.wal_cxt.xlogFlushStats->totalActualXlogSyncBytes += actualBytes; + g_instance.wal_cxt.xlogFlushStats->totalWriteTime += elapsedTime; + } + reportRedoWrite((PgStat_Counter)npages, elapsedTime); /* Update state for write */ @@ -3039,7 +2752,20 @@ static void XLogWrite(const XLogwrtRqst &WriteRqst, bool flexible) * checkpoint. */ if (finishing_seg) { + instr_time startTime; + instr_time endTime; + uint64 elapsedTime; + INSTR_TIME_SET_CURRENT(startTime); issue_xlog_fsync(t_thrd.xlog_cxt.openLogFile, t_thrd.xlog_cxt.openLogSegNo); + INSTR_TIME_SET_CURRENT(endTime); + INSTR_TIME_SUBTRACT(endTime, startTime); + elapsedTime = INSTR_TIME_GET_MICROSEC(endTime); + /* Add statistics */ + if (g_instance.wal_cxt.xlogFlushStats->statSwitch) { + ++g_instance.wal_cxt.xlogFlushStats->syncTimes; + g_instance.wal_cxt.xlogFlushStats->totalSyncTime += elapsedTime; + } + /* signal that we need to wakeup walsenders later */ WalSndWakeupRequest(); t_thrd.xlog_cxt.LogwrtResult->Flush = t_thrd.xlog_cxt.LogwrtResult->Write; /* end of page */ @@ -3099,7 +2825,18 @@ static void XLogWrite(const XLogwrtRqst &WriteRqst, bool flexible) t_thrd.xlog_cxt.openLogFile = XLogFileOpen(t_thrd.xlog_cxt.openLogSegNo); t_thrd.xlog_cxt.openLogOff = 0; } + instr_time startTime; + instr_time endTime; + uint64 elapsedTime; + INSTR_TIME_SET_CURRENT(startTime); issue_xlog_fsync(t_thrd.xlog_cxt.openLogFile, t_thrd.xlog_cxt.openLogSegNo); + INSTR_TIME_SET_CURRENT(endTime); + INSTR_TIME_SUBTRACT(endTime, startTime); + elapsedTime = INSTR_TIME_GET_MICROSEC(endTime); + if (g_instance.wal_cxt.xlogFlushStats->statSwitch) { + ++g_instance.wal_cxt.xlogFlushStats->syncTimes; + g_instance.wal_cxt.xlogFlushStats->totalSyncTime += elapsedTime; + } } /* signal that we need to wakeup walsenders later */ WalSndWakeupRequest(); @@ -3129,8 +2866,14 @@ static void XLogWrite(const XLogwrtRqst &WriteRqst, bool flexible) } SpinLockRelease(&xlogctl->info_lck); } + + if (g_instance.wal_cxt.xlogFlushStats->statSwitch) { + g_instance.wal_cxt.xlogFlushStats->totalXlogSyncBytes += (t_thrd.xlog_cxt.LogwrtResult->Write - lastWrited); + g_instance.wal_cxt.xlogFlushStats->currOpenXlogSegNo = t_thrd.xlog_cxt.openLogSegNo; + } } + /* * Record the LSN for an asynchronous transaction commit/abort * and nudge the WALWriter if there is work for it to do. @@ -3575,6 +3318,16 @@ bool XLogBackgroundFlush(void) next_entry_idx = start_entry_idx; next_entry_ptr = start_entry_ptr; + uint64 stTime = GetCurrentTimestamp(); + uint64 startLSN = start_entry_ptr->endLSN; + uint64 totalXlogIterBytes = g_instance.wal_cxt.totalXlogIterBytes; + uint64 totalXlogIterTimes = g_instance.wal_cxt.totalXlogIterTimes; + + // calculate current actual xlog flush length used for judge the sleep is need or not + // to accumulate more xlog when the first uncopied record found in the current loop + uint64 averageXlogFlushBytes = (totalXlogIterTimes == 0) ? 0 : totalXlogIterBytes / totalXlogIterTimes; + uint64 curAverageXlogFlushBytes = (averageXlogFlushBytes == 0) ? XLOG_FLUSH_SIZE_INIT : + (averageXlogFlushBytes / PAGE_SIZE_BYTES + 1) * PAGE_SIZE_BYTES; do { curr_entry_ptr = next_entry_ptr; curr_entry_idx = next_entry_idx; @@ -3595,15 +3348,24 @@ bool XLogBackgroundFlush(void) prev_entry_idx = curr_entry_idx; } #endif - - /* - * Flush if we have accumulate enough bytes or till the LSN in the entry before + if (((curr_entry_ptr->LRC + 1) & 0x7FFFFFFF) != next_entry_ptr->LRC) { + break; + } + /* + * Flush if accumulate enough bytes or till the LSN in the entry before * an entry associated with the first uncopied record found in the current loop. */ if (next_entry_ptr->status == WAL_NOT_COPIED) { - break; + if (((curr_entry_ptr->endLSN - startLSN) > curAverageXlogFlushBytes) || + (GetCurrentTimestamp() - stTime >= (uint64)g_instance.attr.attr_storage.wal_flush_timeout)) { + break; + } + pg_usleep(g_instance.attr.attr_storage.wal_flush_delay); + next_entry_idx = curr_entry_idx; + next_entry_ptr = curr_entry_ptr; + entry_count--; } - } while (((curr_entry_ptr->LRC + 1) & 0x7FFFFFFF) == next_entry_ptr->LRC); + } while (true); /* update continuous LRC entries that have been copied without a hole */ g_instance.wal_cxt.lastLRCScanned = curr_entry_ptr->LRC; @@ -3660,6 +3422,8 @@ bool XLogBackgroundFlush(void) g_instance.wal_cxt.lastWalStatusEntryFlushed = curr_entry_idx; g_instance.wal_cxt.lastLRCFlushed = curr_entry_ptr->LRC; + g_instance.wal_cxt.totalXlogIterTimes++; + g_instance.wal_cxt.totalXlogIterBytes += (curr_entry_ptr->endLSN - startLSN); /* * Update LRC and status in the status entry that is already flushed (from start to current entry). * All WAL insert threads need to examine both of the LRC and the status @@ -4078,6 +3842,9 @@ void PreInitXlogFileForPrimary(int advance_xlog_file_num) g_xlog_stat_shared->walAuxWakeNum++; + if (g_instance.wal_cxt.globalEndPosSegNo == InvalidXLogSegPtr) { + return; + } startSegNo = g_instance.wal_cxt.globalEndPosSegNo + 1; target = startSegNo + advance_xlog_file_num - 1; for (nextSegNo = startSegNo; nextSegNo <= target; nextSegNo++) { @@ -5089,8 +4856,6 @@ static void RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr endptr) struct dirent *xlde = NULL; char lastoff[MAXFNAMELEN]; errno_t errorno = EOK; - int64 old_sync_request; - int64 new_sync_request; xldir = AllocateDir(XLOGDIR); if (xldir == NULL) { @@ -5106,7 +4871,6 @@ static void RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr endptr) ereport(LOG, (errmsg("attempting to remove WAL segments older than log file %s", lastoff))); gstrace_entry(GS_TRC_ID_RemoveOldXlogFiles); - old_sync_request = CheckPointGetFsyncRequset(); while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL) { /* Ignore files that are not XLOG segments */ if (strlen(xlde->d_name) != 24 || strspn(xlde->d_name, "0123456789ABCDEF") != 24) { @@ -5132,11 +4896,6 @@ static void RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr endptr) RemoveXlogFile(xlde->d_name, endptr); } } - new_sync_request = CheckPointGetFsyncRequset(); - if (new_sync_request != old_sync_request) { - CheckPointSyncWithAbsorption(); - old_sync_request = new_sync_request; - } } FreeDir(xldir); gstrace_data(1, GS_TRC_ID_RemoveOldXlogFiles, TRC_DATA_FMT_DFLT, lastoff, MAXFNAMELEN); @@ -5300,15 +5059,14 @@ static void CleanupBackupHistory(void) inline static XLogReaderState *ReadNextRecordFromQueue(int emode) { char *errormsg = NULL; - bool readoldversion = false; extreme_rto::SPSCBlockingQueue *linequeue = extreme_rto::g_dispatcher->readLine.readPageThd->queue; XLogReaderState *xlogreader = NULL; do { xlogreader = (XLogReaderState *)extreme_rto::SPSCBlockingQueueTake(linequeue); if (!xlogreader->isDecode) { XLogRecord *record = (XLogRecord *)xlogreader->readRecordBuf; - ; - if (!DecodeXLogRecord(xlogreader, record, &errormsg, readoldversion)) { + GetRedoStartTime(t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_5]); + if (!DecodeXLogRecord(xlogreader, record, &errormsg)) { ereport(emode, (errmsg("ReadNextRecordFromQueue %X/%X decode error, %s", (uint32)(xlogreader->EndRecPtr >> 32), (uint32)(xlogreader->EndRecPtr), errormsg))); @@ -5319,6 +5077,7 @@ inline static XLogReaderState *ReadNextRecordFromQueue(int emode) xlogreader = NULL; } + CountRedoTime(t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_5]); } if ((void *)xlogreader == (void *)&(extreme_rto::g_GlobalLsnForwarder.record) || @@ -5342,7 +5101,6 @@ static XLogRecord *ReadNextXLogRecord(XLogReaderState **xlogreaderptr, int emode *xlogreaderptr = xlogreader; t_thrd.xlog_cxt.ReadRecPtr = xlogreader->ReadRecPtr; t_thrd.xlog_cxt.EndRecPtr = xlogreader->EndRecPtr; - g_instance.comm_cxt.predo_cxt.redoPf.read_ptr = t_thrd.xlog_cxt.ReadRecPtr; record = (XLogRecord *)xlogreader->readRecordBuf; } else { *xlogreaderptr = &extreme_rto::g_redoEndMark.record; @@ -5366,8 +5124,7 @@ static XLogRecord *ReadNextXLogRecord(XLogReaderState **xlogreaderptr, int emode * The record is copied into readRecordBuf, so that on successful return, * the returned record pointer always points there. */ -static XLogRecord *ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode, bool fetching_ckpt, - bool readoldversion) +static XLogRecord *ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode, bool fetching_ckpt) { XLogRecord *record = NULL; uint32 streamFailCount = 0; @@ -5384,14 +5141,11 @@ static XLogRecord *ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, in for (;;) { char *errormsg = NULL; - record = XLogReadRecord(xlogreader, RecPtr, &errormsg, readoldversion); + record = XLogReadRecord(xlogreader, RecPtr, &errormsg); t_thrd.xlog_cxt.ReadRecPtr = xlogreader->ReadRecPtr; t_thrd.xlog_cxt.EndRecPtr = xlogreader->EndRecPtr; g_instance.comm_cxt.predo_cxt.redoPf.read_ptr = t_thrd.xlog_cxt.ReadRecPtr; - if (((XLogPageHeader)xlogreader->readBuf)->xlp_magic == XLOG_PAGE_MAGIC) { - readoldversion = false; - } if (record == NULL) { if (t_thrd.xlog_cxt.readFile >= 0) { close(t_thrd.xlog_cxt.readFile); @@ -5530,7 +5284,7 @@ static XLogRecord *ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, in } } -int ParallelReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int reqLen, bool readoldversion) +int ParallelReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int reqLen) { int readLen; uint32 targetPageOff; @@ -5568,10 +5322,6 @@ int ParallelReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int req hdr = (XLogPageHeader)state->readBuf; - if (hdr->xlp_magic == XLOG_PAGE_MAGIC) { - readoldversion = false; - } - /* still not enough */ if (readLen < (int)XLogPageHeaderSize(hdr)) { readLen = ParallelXLogPageRead(state, pageptr, XLogPageHeaderSize(hdr), state->currRecPtr, &state->readPageTLI); @@ -5583,7 +5333,7 @@ int ParallelReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int req /* * Now that we know we have the full header, validate it. */ - if (!ValidXLogPageHeader(state, pageptr, hdr, readoldversion)) { + if (!ValidXLogPageHeader(state, pageptr, hdr)) { goto err; } @@ -5599,8 +5349,7 @@ err: return -1; } -XLogRecord *ParallelReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg, bool readoldversion, - bool doDecode) +XLogRecord *ParallelReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg) { XLogRecord *record = NULL; XLogRecPtr targetPagePtr; @@ -5623,20 +5372,10 @@ XLogRecord *ParallelReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char * *errormsg = NULL; state->errormsg_buf[0] = '\0'; - ResetDecoder(state); - - if (((XLogPageHeader)state->readBuf)->xlp_magic == XLOG_PAGE_MAGIC) { - readoldversion = false; - } - if (XLByteEQ(RecPtr, InvalidXLogRecPtr)) { /* No explicit start point; read the record after the one we just read */ RecPtr = state->EndRecPtr; - if (readoldversion && (XLOG_BLCKSZ - RecPtr % XLOG_BLCKSZ) < SizeOfXLogRecordOld) { - NextLogPage(RecPtr); - } - if (XLByteEQ(state->ReadRecPtr, InvalidXLogRecPtr)) randAccess = true; @@ -5669,18 +5408,12 @@ XLogRecord *ParallelReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char * * enough byte to cover the whole record header, or at least the part of * it that fits on the same page. */ - readOff = ParallelReadPageInternal(state, targetPagePtr, Min(targetRecOff + SizeOfXLogRecord, XLOG_BLCKSZ), - readoldversion); + readOff = ParallelReadPageInternal(state, targetPagePtr, Min(targetRecOff + SizeOfXLogRecord, XLOG_BLCKSZ)); if (readOff < 0) { report_invalid_record(state, "read xlog page failed at %X/%X", (uint32)(RecPtr >> 32), (uint32)RecPtr); goto err; } - /* The page has been read. Check the XLOG version again. */ - if (((XLogPageHeader)state->readBuf)->xlp_magic == XLOG_PAGE_MAGIC) { - readoldversion = false; - } - /* * ReadPageInternal always returns at least the page header, so we can * examine it now. @@ -5725,15 +5458,15 @@ XLogRecord *ParallelReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char * * record" code path below; otherwise we might fail to apply * ValidXLogRecordHeader at all. */ - if (targetRecOff <= XLOG_BLCKSZ - (readoldversion ? SizeOfXLogRecordOld : SizeOfXLogRecord)) { + if (targetRecOff <= XLOG_BLCKSZ - SizeOfXLogRecord) { if (!ValidXLogRecordHeader(state, RecPtr, state->ReadRecPtr, record, randAccess)) goto err; gotheader = true; } else { /* more validation should be done here */ - if (total_len < (readoldversion ? SizeOfXLogRecordOld : SizeOfXLogRecord) || total_len >= XLogRecordMaxSize) { + if (total_len < SizeOfXLogRecord || total_len >= XLogRecordMaxSize) { report_invalid_record(state, "invalid record length at %X/%X: wanted %u, got %u", (uint32)(RecPtr >> 32), - (uint32)RecPtr, (uint32)(readoldversion ? SizeOfXLogRecordOld : SizeOfXLogRecord), + (uint32)RecPtr, (uint32)(SizeOfXLogRecord), total_len); goto err; } @@ -5759,7 +5492,7 @@ XLogRecord *ParallelReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char * uint32 gotlen; errno_t errorno = EOK; - readOff = ParallelReadPageInternal(state, targetPagePtr, XLOG_BLCKSZ, readoldversion); + readOff = ParallelReadPageInternal(state, targetPagePtr, XLOG_BLCKSZ); if (readOff < 0) { goto err; } @@ -5776,8 +5509,7 @@ XLogRecord *ParallelReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char * /* Wait for the next page to become available */ readOff = ParallelReadPageInternal(state, targetPagePtr, - Min(total_len - gotlen + SizeOfXLogShortPHD, XLOG_BLCKSZ), - readoldversion); + Min(total_len - gotlen + SizeOfXLogShortPHD, XLOG_BLCKSZ)); if (readOff < 0) goto err; @@ -5804,7 +5536,7 @@ XLogRecord *ParallelReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char * /* Append the continuation from this page to the buffer */ pageHeaderSize = XLogPageHeaderSize(pageHeader); if (readOff < (int)pageHeaderSize) - readOff = ParallelReadPageInternal(state, targetPagePtr, pageHeaderSize, readoldversion); + readOff = ParallelReadPageInternal(state, targetPagePtr, pageHeaderSize); Assert((int)pageHeaderSize <= readOff); @@ -5814,7 +5546,7 @@ XLogRecord *ParallelReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char * len = pageHeader->xlp_rem_len; if (readOff < (int)(pageHeaderSize + len)) - readOff = ParallelReadPageInternal(state, targetPagePtr, pageHeaderSize + len, readoldversion); + readOff = ParallelReadPageInternal(state, targetPagePtr, pageHeaderSize + len); errorno = memcpy_s(buffer, total_len - gotlen, (char *)contdata, len); securec_check_c(errorno, "", ""); @@ -5842,8 +5574,7 @@ XLogRecord *ParallelReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char * XLByteAdvance(state->EndRecPtr, (pageHeaderSize + MAXALIGN(pageHeader->xlp_rem_len))); } else { /* Wait for the record data to become available */ - readOff = ParallelReadPageInternal(state, targetPagePtr, Min(targetRecOff + total_len, XLOG_BLCKSZ), - readoldversion); + readOff = ParallelReadPageInternal(state, targetPagePtr, Min(targetRecOff + total_len, XLOG_BLCKSZ)); if (readOff < 0) { goto err; } @@ -5864,27 +5595,13 @@ XLogRecord *ParallelReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char * /* * Special processing if it's an XLOG SWITCH record */ - - if (((XLogPageHeader)state->readBuf)->xlp_magic == XLOG_PAGE_MAGIC) { - readoldversion = false; - } - - if ((readoldversion ? ((XLogRecordOld *)record)->xl_rmid : ((XLogRecord *)record)->xl_rmid) == RM_XLOG_ID && - (readoldversion ? ((XLogRecordOld *)record)->xl_info : ((XLogRecord *)record)->xl_info) == XLOG_SWITCH) { + if (record->xl_rmid == RM_XLOG_ID && record->xl_info == XLOG_SWITCH) { /* Pretend it extends to end of segment */ state->EndRecPtr += XLogSegSize - 1; state->EndRecPtr -= state->EndRecPtr % XLogSegSize; } - if (doDecode) { - if (DecodeXLogRecord(state, record, errormsg, readoldversion)) { - return record; - } else - return NULL; - } else { - return record; - } - + return record; err: /* @@ -5935,7 +5652,7 @@ XLogRecord *XLogParallelReadNextRecord(XLogReaderState *xlogreader) for (;;) { char *errormsg = NULL; - record = ParallelReadRecord(xlogreader, InvalidXLogRecPtr, &errormsg, false, false); + record = ParallelReadRecord(xlogreader, InvalidXLogRecPtr, &errormsg); t_thrd.xlog_cxt.ReadRecPtr = xlogreader->ReadRecPtr; t_thrd.xlog_cxt.EndRecPtr = xlogreader->EndRecPtr; g_instance.comm_cxt.predo_cxt.redoPf.read_ptr = t_thrd.xlog_cxt.ReadRecPtr; @@ -6011,7 +5728,6 @@ XLogRecord *XLogParallelReadNextRecord(XLogReaderState *xlogreader) t_thrd.xlog_cxt.StandbyMode = true; /* construct a minrecoverypoint, update LSN */ UpdateMinrecoveryInAchive(); - CheckRecoveryConsistency(); /* * Before we retry, reset lastSourceFailed and currentSource * so that we will check the archive next. @@ -6037,7 +5753,7 @@ XLogRecord *XLogParallelReadNextRecord(XLogReaderState *xlogreader) * timeline has no parents, and return a list of just the specified timeline * ID. */ -static List *readTimeLineHistory(TimeLineID targetTLI) +List *readTimeLineHistory(TimeLineID targetTLI) { List *result = NIL; char path[MAXPGPATH]; @@ -6269,7 +5985,7 @@ static TimeLineID findNewestTimeLine(TimeLineID startTLI) * ReadControlFile() verifies they are correct. We could split out the * I/O and compatibility-check functions, but there seems no need currently. */ -static void WriteControlFile(void) +STATIC void WriteControlFile(void) { int fd = -1; char buffer[PG_CONTROL_SIZE]; /* need not be aligned */ @@ -6362,7 +6078,7 @@ static void WriteControlFile(void) } } -static void ReadControlFile(void) +STATIC void ReadControlFile(void) { pg_crc32c crc; int fd = -1; @@ -6450,9 +6166,8 @@ loop: } if (t_thrd.shemem_ptr_cxt.ControlFile->maxAlign != MAXIMUM_ALIGNOF) { ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with MAXALIGN %u (old version), %u," + errdetail("The database cluster was initialized with MAXALIGN %u," " but the server was compiled with MAXALIGN %d.", - ((ControlFileDataOld *)t_thrd.shemem_ptr_cxt.ControlFile)->maxAlign, t_thrd.shemem_ptr_cxt.ControlFile->maxAlign, MAXIMUM_ALIGNOF), errhint("It looks like you need to gs_initdb."))); } @@ -6465,57 +6180,50 @@ loop: } if (t_thrd.shemem_ptr_cxt.ControlFile->blcksz != BLCKSZ) { ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with BLCKSZ %u (old version), %u" + errdetail("The database cluster was initialized with BLCKSZ %u" " but the server was compiled with BLCKSZ %d.", - ((ControlFileDataOld *)t_thrd.shemem_ptr_cxt.ControlFile)->blcksz, t_thrd.shemem_ptr_cxt.ControlFile->blcksz, BLCKSZ), errhint("It looks like you need to recompile or gs_initdb."))); } if (t_thrd.shemem_ptr_cxt.ControlFile->relseg_size != RELSEG_SIZE) { ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with RELSEG_SIZE %u (old version), %u," + errdetail("The database cluster was initialized with RELSEG_SIZE %u," " but the server was compiled with RELSEG_SIZE %d.", - ((ControlFileDataOld *)t_thrd.shemem_ptr_cxt.ControlFile)->relseg_size, t_thrd.shemem_ptr_cxt.ControlFile->relseg_size, RELSEG_SIZE), errhint("It looks like you need to recompile or gs_initdb."))); } if (t_thrd.shemem_ptr_cxt.ControlFile->xlog_blcksz != XLOG_BLCKSZ) { ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with XLOG_BLCKSZ %u (old version), %u," + errdetail("The database cluster was initialized with XLOG_BLCKSZ %u," " but the server was compiled with XLOG_BLCKSZ %d.", - ((ControlFileDataOld *)t_thrd.shemem_ptr_cxt.ControlFile)->xlog_blcksz, t_thrd.shemem_ptr_cxt.ControlFile->xlog_blcksz, XLOG_BLCKSZ), errhint("It looks like you need to recompile or gs_initdb."))); } if (t_thrd.shemem_ptr_cxt.ControlFile->xlog_seg_size != XLOG_SEG_SIZE) { ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with XLOG_SEG_SIZE %d (old version), %u," + errdetail("The database cluster was initialized with XLOG_SEG_SIZE %u," " but the server was compiled with XLOG_SEG_SIZE %d.", - ((ControlFileDataOld *)t_thrd.shemem_ptr_cxt.ControlFile)->xlog_seg_size, t_thrd.shemem_ptr_cxt.ControlFile->xlog_seg_size, XLOG_SEG_SIZE), errhint("It looks like you need to recompile or gs_initdb."))); } if (t_thrd.shemem_ptr_cxt.ControlFile->nameDataLen != NAMEDATALEN) { ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with NAMEDATALEN %u (old version), %u," + errdetail("The database cluster was initialized with NAMEDATALEN %u," " but the server was compiled with NAMEDATALEN %d.", - ((ControlFileDataOld *)t_thrd.shemem_ptr_cxt.ControlFile)->nameDataLen, t_thrd.shemem_ptr_cxt.ControlFile->nameDataLen, NAMEDATALEN), errhint("It looks like you need to recompile or gs_initdb."))); } if (t_thrd.shemem_ptr_cxt.ControlFile->indexMaxKeys != INDEX_MAX_KEYS) { ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with INDEX_MAX_KEYS %u (old version), %u" + errdetail("The database cluster was initialized with INDEX_MAX_KEYS %u" " but the server was compiled with INDEX_MAX_KEYS %d.", - ((ControlFileDataOld *)t_thrd.shemem_ptr_cxt.ControlFile)->indexMaxKeys, t_thrd.shemem_ptr_cxt.ControlFile->indexMaxKeys, INDEX_MAX_KEYS), errhint("It looks like you need to recompile or gs_initdb."))); } if (t_thrd.shemem_ptr_cxt.ControlFile->toast_max_chunk_size != TOAST_MAX_CHUNK_SIZE) { ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %u (old version), %u," + errdetail("The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %u," " but the server was compiled with TOAST_MAX_CHUNK_SIZE %d.", - ((ControlFileDataOld *)t_thrd.shemem_ptr_cxt.ControlFile)->toast_max_chunk_size, t_thrd.shemem_ptr_cxt.ControlFile->toast_max_chunk_size, (int)TOAST_MAX_CHUNK_SIZE), errhint("It looks like you need to recompile or gs_initdb."))); } @@ -6776,27 +6484,6 @@ bool check_wal_buffers(int *newval, void **extra, GucSource source) return true; } -/* - * GUC check_hook for wal_insert_status_entries - */ -bool check_wal_insert_status_entries(int *newval, void **extra, GucSource source) -{ - // if newval not power of 2 - if (!((*newval != 0) && ((*newval & (*newval - 1)) == 0))) { - // get next Power Of 2 form newval - unsigned count = 0; - unsigned n = *newval; - while( n != 0) - { - n >>= 1; - count += 1; - } - *newval = 1 << count; - } - - return true; -} - /* * Initialization of shared memory for XLOG */ @@ -6985,7 +6672,6 @@ void XLOGShmemInit(void) static XLogSegNo GetOldestXLOGSegNo(const char *workingPath) { -#define XLOGFILENAMELEN 24 DIR *xlogDir = NULL; struct dirent *dirEnt = NULL; char xlogDirStr[MAXPGPATH] = {0}; @@ -7023,6 +6709,45 @@ static XLogSegNo GetOldestXLOGSegNo(const char *workingPath) return segno; } +XLogSegNo GetNewestXLOGSegNo(const char *workingPath) +{ + DIR *xlogDir = NULL; + struct dirent *dirEnt = NULL; + char xlogDirStr[MAXPGPATH] = {0}; + char newestXLogFileName[MAXPGPATH] = {0}; + TimeLineID tli = 0; + uint32 xlogReadLogid = -1; + uint32 xlogReadLogSeg = -1; + XLogSegNo segno; + errno_t rc = EOK; + + rc = snprintf_s(xlogDirStr, MAXPGPATH, MAXPGPATH - 1, "%s/%s", workingPath, XLOGDIR); + securec_check_ss(rc, "", ""); + xlogDir = opendir(xlogDirStr); + if (!xlogDir) { + ereport(ERROR, (errcode_for_file_access(), errmsg("could not open xlog dir in GetOldestXLOGSegNo."))); + } + while ((dirEnt = readdir(xlogDir)) != NULL) { + if (strlen(dirEnt->d_name) == XLOGFILENAMELEN && + strspn(dirEnt->d_name, "0123456789ABCDEF") == XLOGFILENAMELEN) { + if (strlen(newestXLogFileName) == 0 || strcmp(dirEnt->d_name, newestXLogFileName) > 0) { + rc = strncpy_s(newestXLogFileName, MAXPGPATH - 1, dirEnt->d_name, strlen(dirEnt->d_name) + 1); + securec_check_ss(rc, "", ""); + newestXLogFileName[strlen(dirEnt->d_name)] = '\0'; + } + } + } + + (void)closedir(xlogDir); + + if (sscanf_s(newestXLogFileName, "%08X%08X%08X", &tli, &xlogReadLogid, &xlogReadLogSeg) != 3) { + ereport(ERROR, (errcode_for_file_access(), errmsg("failed to translate name to xlog in GetOldestXLOGSegNo."))); + } + segno = (uint64)xlogReadLogid * XLogSegmentsPerXLogId + xlogReadLogSeg; + + return segno; +} + static uint64 GetMACAddr(void) { macaddr mac; @@ -7157,6 +6882,7 @@ void BootStrapXLOG(void) t_thrd.xact_cxt.ShmemVariableCache->oidCount = 0; t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo = COMMITSEQNO_FIRST_NORMAL + 1; + t_thrd.xact_cxt.ShmemVariableCache->xlogMaxCSN = t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo - 1; latestCompletedXid = checkPoint.nextXid; TransactionIdRetreat(latestCompletedXid); t_thrd.xact_cxt.ShmemVariableCache->latestCompletedXid = latestCompletedXid; @@ -7406,6 +7132,7 @@ static void readRecoveryCommandFile(void) #endif } else if (strcmp(item->name, "recovery_target_time_obs") == 0) { t_thrd.xlog_cxt.recoveryTarget = RECOVERY_TARGET_TIME_OBS; + g_instance.roach_cxt.targetTimeInPITR = pstrdup(item->value); /* * Convert the time string given by the user to TimestampTz form. @@ -7699,8 +7426,19 @@ static void RemoveNonParentXlogFiles(XLogRecPtr switchpoint, TimeLineID newTLI) * @Description: After date restore, truncate XLOG after barrier lsn of standby DN. * off - The offset of the XLOG log needs to be cleared . */ -void TruncateAndRemoveXLogForRoachRestore(const char *XLogFileName, uint32 off) +void TruncateAndRemoveXLogForRoachRestore(XLogReaderState *record) { + if (XLogRecPtrIsInvalid(t_thrd.shemem_ptr_cxt.ControlFile->minRecoveryPoint) || + XLByteLT(record->EndRecPtr, t_thrd.xlog_cxt.minRecoveryPoint) || + XLogRecPtrIsValid(t_thrd.shemem_ptr_cxt.ControlFile->backupStartPoint)) { + ereport(FATAL, (errmsg("truncate xlog LSN is before consistent recovery point"))); + } + uint32 xlogOff; + XLogSegNo xlogsegno; + char xlogFileName[1024] = {0}; + XLByteToSeg(record->EndRecPtr, xlogsegno); + XLogFileName(xlogFileName, MAXFNAMELEN, record->readPageTLI, xlogsegno); + xlogOff = (uint32)(record->EndRecPtr) % XLogSegSize; int fd = 0; char *writeContent = NULL; uint32 truncateLength = 0; @@ -7708,7 +7446,7 @@ void TruncateAndRemoveXLogForRoachRestore(const char *XLogFileName, uint32 off) errno_t rc = EOK; rc = snprintf_s(XLogFilePath, MAX_PATH_LEN, MAX_PATH_LEN - 1, "%s/pg_xlog/%s", t_thrd.proc_cxt.DataDir, - XLogFileName); + xlogFileName); securec_check_ss(rc, "", ""); fd = BasicOpenFile(XLogFilePath, @@ -7718,14 +7456,14 @@ void TruncateAndRemoveXLogForRoachRestore(const char *XLogFileName, uint32 off) ereport(FATAL, (errcode_for_file_access(), errmsg("could not open file \"%s\"", XLogFilePath))); } - if (lseek(fd, (off_t)off, SEEK_SET) < 0) { + if (lseek(fd, (off_t)xlogOff, SEEK_SET) < 0) { fprintf(stderr, "lseek error !\n"); close(fd); ereport(ERROR, (errcode_for_file_access(), errmsg("lseek file error \"%s\" ", XLogFilePath))); return; } - truncateLength = XLogSegSize - off; + truncateLength = XLogSegSize - xlogOff; writeContent = (char *)palloc0(truncateLength); if (write(fd, writeContent, truncateLength) != truncateLength) { close(fd); @@ -7743,7 +7481,8 @@ void TruncateAndRemoveXLogForRoachRestore(const char *XLogFileName, uint32 off) ereport(ERROR, (errcode_for_file_access(), errmsg("could not close file \"%s\" ", XLogFilePath))); } pfree(writeContent); - if ((IS_DISASTER_RECOVER_MODE == false) && (IS_SHARED_STORAGE_STANBY_CLUSTER_MODE == false)) + if (((IS_OBS_DISASTER_RECOVER_MODE == false) && (IS_DISASTER_RECOVER_MODE == false)) || + (t_thrd.xlog_cxt.recoveryTarget == RECOVERY_TARGET_TIME_OBS)) durable_rename(RECOVERY_COMMAND_FILE, RECOVERY_COMMAND_DONE, FATAL); DIR *xldir = NULL; @@ -7761,7 +7500,7 @@ void TruncateAndRemoveXLogForRoachRestore(const char *XLogFileName, uint32 off) continue; } - if (strcmp(xlde->d_name, XLogFileName) > 0) { + if (strcmp(xlde->d_name, xlogFileName) > 0) { rc = snprintf_s(XLogFilePath, MAX_PATH_LEN, MAX_PATH_LEN - 1, XLOGDIR "/%s", xlde->d_name); securec_check_ss(rc, "", ""); rc = unlink(XLogFilePath); @@ -7782,6 +7521,24 @@ static bool xlogCanStop(XLogReaderState *record) XLogRecGetRmid(record) != RM_XLOG_ID); } +#ifndef ENABLE_LITE_MODE +static void DropAllRecoverySlotForPitr() +{ + List *all_archive_slots = NIL; + all_archive_slots = GetAllRecoverySlotsName(); + if (all_archive_slots == NIL || all_archive_slots->length == 0) { + return; + } + + foreach_cell(cell, all_archive_slots) { + char* slotname = (char*)lfirst(cell); + ReplicationSlotDrop(slotname, false); + } + list_free_deep(all_archive_slots); + return; +} +#endif + /* * For point-in-time recovery, this function decides whether we want to * stop applying the XLOG at or after the current record. @@ -7812,7 +7569,6 @@ static bool recoveryStopsHere(XLogReaderState *record, bool *includeThis) #endif char *recordBarrierId = NULL; char *stopBarrierId = NULL; - char xlogFileName[1024] = {0}; if (xlogCanStop(record)) { return false; } @@ -7837,13 +7593,17 @@ static bool recoveryStopsHere(XLogReaderState *record, bool *includeThis) } #endif else if (XLogRecGetRmid(record) == RM_BARRIER_ID) { +#ifndef ENABLE_LITE_MODE if (record_info == XLOG_BARRIER_CREATE) { recordBarrierId = (char *)XLogRecGetData(record); ereport(u_sess->attr.attr_storage.HaModuleDebug ? LOG : DEBUG4, (errmsg("processing barrier xlog record for %s", recordBarrierId))); - if (IS_DISASTER_RECOVER_MODE) + if (IS_OBS_DISASTER_RECOVER_MODE) update_recovery_barrier(); } +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } else if (XLogRecGetRmid(record) == RM_XLOG_ID) { if (record_info == XLOG_RESTORE_POINT) { xl_restore_point *recordRestorePointData = (xl_restore_point *)XLogRecGetData(record); @@ -7880,7 +7640,7 @@ static bool recoveryStopsHere(XLogReaderState *record, bool *includeThis) } /* Do we have a PITR target at all? */ - if ((IS_DISASTER_RECOVER_MODE || IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) && + if ((IS_OBS_DISASTER_RECOVER_MODE || IS_DISASTER_RECOVER_MODE) && t_thrd.xlog_cxt.recoveryTarget == RECOVERY_TARGET_UNSET) { // Save timestamp of latest transaction commit/abort if this is a // transaction record @@ -7914,8 +7674,54 @@ static bool recoveryStopsHere(XLogReaderState *record, bool *includeThis) *includeThis = t_thrd.xlog_cxt.recoveryTargetInclusive; } } else if (t_thrd.xlog_cxt.recoveryTarget == RECOVERY_TARGET_TIME_OBS) { +#ifndef ENABLE_LITE_MODE stopsHere = false; - if ((XLogRecGetRmid(record) == RM_BARRIER_ID) && (record_info == XLOG_BARRIER_CREATE)) { + if (XLogRecGetRmid(record) == RM_XACT_ID && !g_instance.roach_cxt.isGtmFreeCsn) { + uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; + if (g_instance.roach_cxt.globalBarrierRecordForPITR != NULL && (info == XLOG_XACT_COMMIT || + info == XLOG_XACT_COMMIT_COMPACT || info == XLOG_XACT_COMMIT_PREPARED)) { + char* token = NULL; + char* lastPtr = NULL; + uint64 targetCsn = InvalidCommitSeqNo; + uint64 currCsn = InvalidCommitSeqNo; + long ts = 0; + if (strncmp(g_instance.roach_cxt.globalBarrierRecordForPITR, "csn_", 4) == 0) { + token = strtok_s(g_instance.roach_cxt.globalBarrierRecordForPITR, "-", &lastPtr); + if (sscanf_s(token, "csn_%021lu_%013ld", &targetCsn, &ts) != 2) { + ereport(PANIC, (errmsg("recovery target csn could not find!"))); + } + if (info == XLOG_XACT_COMMIT) { + xl_xact_commit* xlrec = (xl_xact_commit *)XLogRecGetData(record); + currCsn = xlrec->csn; + } else if (info == XLOG_XACT_COMMIT_COMPACT) { + xl_xact_commit_compact* xlrec = (xl_xact_commit_compact *)XLogRecGetData(record); + currCsn = xlrec->csn; + } else if (info == XLOG_XACT_COMMIT_PREPARED) { + xl_xact_commit_prepared* xlrec = (xl_xact_commit_prepared *)XLogRecGetData(record); + currCsn = xlrec->crec.csn; + } else { + ereport(PANIC, (errmsg("recovery target csn xact content error!"))); + } + if (currCsn > targetCsn) { + ereport(LOG, (errmsg("For PITR recovery, reach target csn %lu, current csn is %lu," + " and current xlog location is %08X/%08X.", targetCsn, currCsn, + (uint32)(record->EndRecPtr >> 32), (uint32)record->EndRecPtr))); + stopsHere = true; + *includeThis = true; + uint32 xlogOff; + pfree_ext(g_instance.roach_cxt.globalBarrierRecordForPITR); + pfree_ext(g_instance.roach_cxt.targetRestoreTimeFromMedia); + TruncateAndRemoveXLogForRoachRestore(record); + xlogOff = (uint32)(record->EndRecPtr) % XLOG_BLCKSZ; + if (xlogOff > 0 && xlogOff < XLOG_BLCKSZ) { + rc = memset_s(&record->readBuf[xlogOff], XLOG_BLCKSZ - xlogOff, 0, XLOG_BLCKSZ - xlogOff); + securec_check(rc, "", ""); + } + DropAllRecoverySlotForPitr(); + } + } + } + } else if ((XLogRecGetRmid(record) == RM_BARRIER_ID) && (record_info == XLOG_BARRIER_CREATE)) { ereport(DEBUG2, (errmsg("checking if barrier record matches the target obs time"))); Assert(t_thrd.xlog_cxt.obsRecoveryTargetTime != NULL); Assert(recordBarrierId != NULL); @@ -7923,32 +7729,35 @@ static bool recoveryStopsHere(XLogReaderState *record, bool *includeThis) ereport(PANIC, (errmsg("recovery target is RECOVERY_TARGET_TIME_OBS, but obsRecoveryTargetTime not set"))); } - if (strncmp(recordBarrierId, "hadr_", 5) == 0) { - char *time_str = strrchr(recordBarrierId, '_'); - time_str = time_str + 1; - if (strcmp((char *)t_thrd.xlog_cxt.obsRecoveryTargetTime, time_str) <= 0) { + if (strncmp(recordBarrierId, "hadr_", 5) == 0 || strncmp(recordBarrierId, "csn_", 4) == 0) { + char* barrierTime = strrchr(recordBarrierId, '_'); + barrierTime += 1; + if (g_instance.roach_cxt.targetRestoreTimeFromMedia != NULL && + strcmp(g_instance.roach_cxt.targetRestoreTimeFromMedia, barrierTime) == 0) { stopsAtThisBarrier = true; *includeThis = true; uint32 xlogOff; - XLogSegNo xlogsegno; ereport(u_sess->attr.attr_storage.HaModuleDebug ? LOG : DEBUG4, (errmsg("stop barrier success xlog record for %s", recordBarrierId))); + pfree_ext(g_instance.roach_cxt.globalBarrierRecordForPITR); + pfree_ext(g_instance.roach_cxt.targetRestoreTimeFromMedia); /* truncate XLOG after barrier time. */ - if (IS_DISASTER_RECOVER_MODE || IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) { - XLByteToSeg(record->EndRecPtr, xlogsegno); - XLogFileName(xlogFileName, record->readPageTLI, xlogsegno); - xlogOff = (uint32)(record->EndRecPtr) % XLogSegSize; - - TruncateAndRemoveXLogForRoachRestore(xlogFileName, xlogOff); + if (IS_OBS_DISASTER_RECOVER_MODE || IS_DISASTER_RECOVER_MODE) { + TruncateAndRemoveXLogForRoachRestore(record); xlogOff = (uint32)(record->EndRecPtr) % XLOG_BLCKSZ; if (xlogOff > 0 && xlogOff < XLOG_BLCKSZ) { rc = memset_s(&record->readBuf[xlogOff], XLOG_BLCKSZ - xlogOff, 0, XLOG_BLCKSZ - xlogOff); securec_check(rc, "", ""); } + /* after recovery at target time, drop recovery slots */ + DropAllRecoverySlotForPitr(); } } } } +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } else if (t_thrd.xlog_cxt.recoveryTarget == RECOVERY_TARGET_BARRIER) { stopsHere = false; if ((XLogRecGetRmid(record) == RM_BARRIER_ID) && (record_info == XLOG_BARRIER_CREATE)) { @@ -7957,16 +7766,12 @@ static bool recoveryStopsHere(XLogReaderState *record, bool *includeThis) strcmp(t_thrd.xlog_cxt.recoveryTargetBarrierId, recordBarrierId) == 0) || (recordBarrierId != NULL && strcmp((char *)walrcv->recoveryStopBarrierId, recordBarrierId) == 0)) { stopsAtThisBarrier = true; + *includeThis = true; uint32 xlogOff; - XLogSegNo xlogsegno; /* truncate XLOG after barrier lsn of standby DN. */ - if (t_thrd.xlog_cxt.server_mode == STANDBY_MODE || IS_DISASTER_RECOVER_MODE) { - XLByteToSeg(record->EndRecPtr, xlogsegno); - XLogFileName(xlogFileName, record->readPageTLI, xlogsegno); - xlogOff = (uint32)(record->EndRecPtr) % XLogSegSize; - - TruncateAndRemoveXLogForRoachRestore(xlogFileName, xlogOff); + if (t_thrd.xlog_cxt.server_mode == STANDBY_MODE || IS_OBS_DISASTER_RECOVER_MODE) { + TruncateAndRemoveXLogForRoachRestore(record); xlogOff = (uint32)(record->EndRecPtr) % XLOG_BLCKSZ; if (xlogOff > 0 && xlogOff < XLOG_BLCKSZ) { rc = memset_s(&record->readBuf[xlogOff], XLOG_BLCKSZ - xlogOff, 0, XLOG_BLCKSZ - xlogOff); @@ -8057,7 +7862,7 @@ static bool recoveryStopsHere(XLogReaderState *record, bool *includeThis) stopBarrierId = (char *)walrcv->recoverySwitchoverBarrierId; } ereport(LOG, (errmsg("recovery stopping at barrier %s, time %s", - IS_DISASTER_RECOVER_MODE ? stopBarrierId : t_thrd.xlog_cxt.recoveryTargetBarrierId, + IS_OBS_DISASTER_RECOVER_MODE ? stopBarrierId : t_thrd.xlog_cxt.recoveryTargetBarrierId, timestamptz_to_str(t_thrd.xlog_cxt.recoveryStopTime)))); return true; #endif @@ -8068,6 +7873,28 @@ static bool recoveryStopsHere(XLogReaderState *record, bool *includeThis) return stopsHere; } +bool RecoveryIsSuspend(void) +{ + /* use volatile pointer to prevent code rearrangement */ + volatile XLogCtlData *xlogctl = t_thrd.shemem_ptr_cxt.XLogCtl; + bool recoverySuspend = false; + + SpinLockAcquire(&xlogctl->info_lck); + recoverySuspend = xlogctl->recoverySusPend; + SpinLockRelease(&xlogctl->info_lck); + + return recoverySuspend; +} + +void SetRecoverySuspend(bool recoverySuspend) +{ + volatile XLogCtlData *xlogctl = t_thrd.shemem_ptr_cxt.XLogCtl; + + SpinLockAcquire(&xlogctl->info_lck); + xlogctl->recoverySusPend = recoverySuspend; + SpinLockRelease(&xlogctl->info_lck); +} + /* * Wait until shared recoveryPause flag is cleared. * @@ -8498,11 +8325,9 @@ void ResourceManagerStop(void) static void EndRedoXlog() { -#ifndef ENABLE_MULTIPLE_NODES if (IsExtremeRtoRunning()) { extreme_rto::CheckCommittingCsnList(); } -#endif if ((get_real_recovery_parallelism() > 1) && (!parallel_recovery::DispatchPtrIsNull())) { SwitchToDispatcherContext(); @@ -8994,11 +8819,6 @@ static void checkHadrInSwitchover() FILE *fd = NULL; char *switchoverFile = "cluster_maintance"; - /* Whether streaming DR is configured */ - if (!t_thrd.postmaster_cxt.HaShmData->is_cross_region) { - return; - } - fd = fopen(switchoverFile, "r"); if (fd == NULL) { @@ -9013,154 +8833,80 @@ static void checkHadrInSwitchover() return; } -static bool IsHadrAchiveInSwitchover() +void ExtremeRedoWaitRecoverySusPendFinish(XLogRecPtr lsn) { - errno_t rc = 0; - List *all_archive_slots = NIL; - all_archive_slots = GetAllArchiveSlotsName(); - if (all_archive_slots == NIL || all_archive_slots->length == 0) { - return false; - } - - char *slotname = (char *)lfirst((list_head(all_archive_slots))); - ArchiveSlotConfig *archive_conf = NULL; - archive_conf = getArchiveReplicationSlotWithName(slotname); - if (archive_conf == NULL) { - list_free_deep(all_archive_slots); - all_archive_slots = NULL; - return false; - } - - /* hadr only support OBS */ - if (archive_conf->archive_config.media_type != ARCHIVE_OBS) { - return false; - } - - char pathPrefix[MAXPGPATH] = {0}; - ArchiveConfig obsConfig; - /* copy OBS configs to temporary variable for customising file path */ - rc = memcpy_s(&obsConfig, sizeof(ArchiveConfig), &archive_conf->archive_config, sizeof(ArchiveConfig)); - securec_check(rc, "", ""); - - if (!IS_PGXC_COORDINATOR) { - rc = strcpy_s(pathPrefix, MAXPGPATH, obsConfig.archive_prefix); - securec_check_c(rc, "\0", "\0"); - - char *p = strrchr(pathPrefix, '/'); - if (p == NULL) { - list_free_deep(all_archive_slots); - all_archive_slots = NULL; - return false; + if (RecoveryIsSuspend()) { + pg_atomic_write_u64(&g_instance.startup_cxt.suspend_lsn, lsn); + pg_memory_barrier(); + while (RecoveryIsSuspend()) { + pg_usleep(ONE_SECOND_TO_MICROSECOND); /* sleep 1s wait file repair */ + RedoInterruptCallBack(); } - - *p = '\0'; - obsConfig.archive_prefix = pathPrefix; + pg_atomic_write_u64(&g_instance.startup_cxt.suspend_lsn, InvalidXLogRecPtr); } - - List *objectList = obsList(HADR_MASTER_CLUSTER_STAT_FILE, &obsConfig, false); - char masterClusterStat[MAX_DEFAULT_LENGTH] = {0}; - char switchoverBarrier[MAX_BARRIER_ID_LENGTH] = {0}; - if (objectList != NIL && objectList->length > 0) { - (void)obsRead(HADR_MASTER_CLUSTER_STAT_FILE, 0, masterClusterStat, MAX_DEFAULT_LENGTH, &obsConfig); - } - - objectList = obsList(HADR_SWITCHOVER_BARRIER_ID_FILE, &obsConfig, false); - if (objectList != NIL && objectList->length > 0) { - /* The HADR_SWITCHOVER_BARRIER_ID_FILE file indicates that it is in the process of switchover */ - g_instance.archive_obs_cxt.in_switchover = true; - (void)obsRead(HADR_SWITCHOVER_BARRIER_ID_FILE, 0, switchoverBarrier, MAX_BARRIER_ID_LENGTH, &obsConfig); - } - - ereport(LOG, (errmsg("===IsHadrAchiveInSwitchover===\n " - "masterClusterStat %s\n switchoverBarrier %s\n in_switchover %d", - masterClusterStat, switchoverBarrier, g_instance.archive_obs_cxt.in_switchover))); - - list_free_deep(all_archive_slots); - all_archive_slots = NULL; - - if (strncmp(masterClusterStat, HADR_SWITCHOVER_TO_STANDBY, strlen(HADR_SWITCHOVER_TO_STANDBY)) == 0 && - strncmp(switchoverBarrier, HADR_BARRIER_ID_HEAD, strlen(HADR_BARRIER_ID_HEAD)) == 0) { - ereport(LOG, (errmsg("[hadr_switchover]Cannot write xlogs " - "because service truncation has been completed."))); - return true; - } - - return false; } -static bool IsHadrRecoveryInSwitchoverOrfailover() +void handleRecoverySusPend(XLogRecPtr lsn) { - errno_t rc = 0; - List *all_archive_slots = NIL; - all_archive_slots = GetAllRecoverySlotsName(); - if (all_archive_slots == NIL || all_archive_slots->length == 0) { - return false; - } + if (RecoveryIsSuspend()) { + if (IsExtremeRedo()) { + extreme_rto::DispatchClosefdMarkToAllRedoWorker(); + extreme_rto::WaitAllReplayWorkerIdle(); + } else if (IsParallelRedo()) { + if (AmStartupProcess()) { + ProcTxnWorkLoad(true); + } + parallel_recovery::SendClosefdMarkToAllWorkers(); + parallel_recovery::WaitAllPageWorkersQueueEmpty(); + } else { + /* nothing */ + } + smgrcloseall(); - char *slotname = (char *)lfirst((list_head(all_archive_slots))); - ArchiveSlotConfig *archive_conf = NULL; - archive_conf = getArchiveRecoverySlotWithName(slotname); - if (archive_conf == NULL) { - list_free_deep(all_archive_slots); - all_archive_slots = NULL; - return false; - } + pg_atomic_write_u64(&g_instance.startup_cxt.suspend_lsn, lsn); + pg_memory_barrier(); + ereport(LOG, (errmsg("recovery suspend, set suspend lsn is %X/%X", + (uint32)(lsn >> XLOG_LSN_SWAP), (uint32)lsn))); - /* hadr only support OBS */ - if (archive_conf->archive_config.media_type != ARCHIVE_OBS) { - return false; - } + ThreadId PageRepairPID = g_instance.pid_cxt.PageRepairPID; + if (PageRepairPID != 0) { + gs_signal_send(PageRepairPID, SIGUSR2); + } + /* wait all dirty page flush */ + RequestCheckpoint(CHECKPOINT_FLUSH_DIRTY|CHECKPOINT_WAIT); + CheckNeedRenameFile(); + if (t_thrd.xlog_cxt.readFile >= 0) { + close(t_thrd.xlog_cxt.readFile); + t_thrd.xlog_cxt.readFile = -1; + } + while (RecoveryIsSuspend()) { + if (!WalRcvInProgress() && g_instance.pid_cxt.WalReceiverPID == 0) { + volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + SpinLockAcquire(&walrcv->mutex); + walrcv->receivedUpto = 0; + SpinLockRelease(&walrcv->mutex); - char pathPrefix[MAXPGPATH] = {0}; - ArchiveConfig obsConfig; - /* copy OBS configs to temporary variable for customising file path */ - rc = memcpy_s(&obsConfig, sizeof(ArchiveConfig), &archive_conf->archive_config, sizeof(ArchiveConfig)); - securec_check(rc, "", ""); - - if (!IS_PGXC_COORDINATOR) { - rc = strcpy_s(pathPrefix, MAXPGPATH, obsConfig.archive_prefix); - securec_check_c(rc, "\0", "\0"); - - char *p = strrchr(pathPrefix, '/'); - if (p == NULL) { - list_free_deep(all_archive_slots); - all_archive_slots = NULL; - return false; + RequestXLogStreaming(&lsn, t_thrd.xlog_cxt.PrimaryConnInfo, REPCONNTARGET_PRIMARY, + u_sess->attr.attr_storage.PrimarySlotName); + } + pg_usleep(ONE_SECOND_TO_MICROSECOND); /* sleep 1s wait file repair */ + RedoInterruptCallBack(); } - *p = '\0'; - obsConfig.archive_prefix = pathPrefix; + pg_atomic_write_u64(&g_instance.startup_cxt.suspend_lsn, InvalidXLogRecPtr); + ereport(LOG, (errmsg("cancle recovery suspend"))); } +} - List *objectList = obsList(HADR_MASTER_CLUSTER_STAT_FILE, &obsConfig, false); - - char masterClusterStat[MAX_DEFAULT_LENGTH] = {0}; - char standbyClusterStat[MAX_DEFAULT_LENGTH] = {0}; - if (objectList != NIL && objectList->length > 0) { - (void)obsRead(HADR_MASTER_CLUSTER_STAT_FILE, 0, masterClusterStat, MAX_DEFAULT_LENGTH, &obsConfig); +/* + * Read term from xlog and update term_from_xlog. + */ +static inline void UpdateTermFromXLog(uint32 xlTerm) +{ + uint32 readTerm = xlTerm & XLOG_MASK_TERM; + if (readTerm > g_instance.comm_cxt.localinfo_cxt.term_from_xlog) { + g_instance.comm_cxt.localinfo_cxt.term_from_xlog = readTerm; } - - objectList = obsList(HADR_STANDBY_CLUSTER_STAT_FILE, &obsConfig, false); - if (objectList != NIL && objectList->length > 0) { - (void)obsRead(HADR_STANDBY_CLUSTER_STAT_FILE, 0, standbyClusterStat, MAX_DEFAULT_LENGTH, &obsConfig); - } - - ereport(LOG, (errmsg("===IsHadrRecoveryInSwitchoverOrFailover===\n " - "masterClusterStat %s\n standbyClusterStat %s\n", - masterClusterStat, standbyClusterStat))); - - list_free_deep(all_archive_slots); - all_archive_slots = NULL; - - if ((IS_PGXC_DATANODE && - (strncmp(standbyClusterStat, HADR_SWITCHOVER_TO_MASTER, strlen(HADR_SWITCHOVER_TO_MASTER)) == 0 || - strncmp(standbyClusterStat, HADR_IN_FAILOVER, strlen(HADR_IN_FAILOVER)) == 0)) || - strncmp(masterClusterStat, HADR_SWITCHOVER_TO_STANDBY, strlen(HADR_SWITCHOVER_TO_STANDBY)) == 0) { - ereport(LOG, (errmsg("[hadr_switchover_failover]The recovery is complete and needs to be stopped."))); - return true; - } - - return false; } /* @@ -9228,6 +8974,9 @@ void StartupXLOG(void) g_instance.wal_cxt.walInsertStatusTable = (WALInsertStatusEntry *)TYPEALIGN(sizeof(WALInsertStatusEntry), g_instance.wal_cxt.walInsertStatusTable); + /* Initialize size of XlogFlushStats. */ + g_instance.wal_cxt.xlogFlushStats = (XlogFlushStatistics *)palloc0(sizeof(XlogFlushStatistics)); + (void)MemoryContextSwitchTo(old_context); } @@ -9360,6 +9109,11 @@ void StartupXLOG(void) // Check for recovery control file, and if so set up state for offline recovery readRecoveryCommandFile(); + if (t_thrd.xlog_cxt.recoveryTarget == RECOVERY_TARGET_UNSET) { + g_instance.repair_cxt.support_repair = true; + } else { + g_instance.repair_cxt.support_repair = false; + } if (t_thrd.xlog_cxt.ArchiveRestoreRequested) { t_thrd.xlog_cxt.ArchiveRecoveryRequested = true; @@ -9475,27 +9229,22 @@ void StartupXLOG(void) securec_check(rcm, "", ""); /* decide which kind of checkpoint is used */ - if (!GTM_MODE) { - recordLen = record->xl_tot_len; - if (record->xl_tot_len == CHECKPOINTNEW_LEN) { - rcm = memcpy_s(&checkPointNew, sizeof(checkPointNew), XLogRecGetData(xlogreader), - sizeof(checkPointNew)); - securec_check(rcm, "", ""); - } else if (record->xl_tot_len == CHECKPOINTPLUS_LEN) { - rcm = memcpy_s(&checkPointPlus, sizeof(checkPointPlus), XLogRecGetData(xlogreader), - sizeof(checkPointPlus)); - securec_check(rcm, "", ""); - } else if (record->xl_tot_len == CHECKPOINTUNDO_LEN) { - rcm = memcpy_s(&checkPointUndo, sizeof(checkPointUndo), XLogRecGetData(xlogreader), - sizeof(checkPointUndo)); - securec_check(rcm, "", ""); - } + recordLen = record->xl_tot_len; + if (record->xl_tot_len == CHECKPOINTNEW_LEN) { + rcm = memcpy_s(&checkPointNew, sizeof(checkPointNew), XLogRecGetData(xlogreader), + sizeof(checkPointNew)); + securec_check(rcm, "", ""); + } else if (record->xl_tot_len == CHECKPOINTPLUS_LEN) { + rcm = memcpy_s(&checkPointPlus, sizeof(checkPointPlus), XLogRecGetData(xlogreader), + sizeof(checkPointPlus)); + securec_check(rcm, "", ""); + } else if (record->xl_tot_len == CHECKPOINTUNDO_LEN) { + rcm = memcpy_s(&checkPointUndo, sizeof(checkPointUndo), XLogRecGetData(xlogreader), + sizeof(checkPointUndo)); + securec_check(rcm, "", ""); } - wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN); - if (record->xl_term > g_instance.comm_cxt.localinfo_cxt.term_from_xlog) { - g_instance.comm_cxt.localinfo_cxt.term_from_xlog = record->xl_term; - } + UpdateTermFromXLog(record->xl_term); ereport(DEBUG1, (errmsg("checkpoint record is at %X/%X", (uint32)(checkPointLoc >> 32), (uint32)checkPointLoc))); t_thrd.xlog_cxt.InRecovery = true; /* force recovery even if SHUTDOWNED */ @@ -9593,8 +9342,6 @@ void StartupXLOG(void) * minRecoveryPoint, up to backupEndPoint, or until we see an * end-of-backup record), and we can enter archive recovery directly. */ - bool readoldversion = false; - if (t_thrd.xlog_cxt.ArchiveRecoveryRequested && (!XLByteEQ(t_thrd.shemem_ptr_cxt.ControlFile->minRecoveryPoint, InvalidXLogRecPtr) || t_thrd.shemem_ptr_cxt.ControlFile->backupEndRequired || @@ -9613,7 +9360,7 @@ void StartupXLOG(void) checkPointLoc = t_thrd.shemem_ptr_cxt.ControlFile->checkPoint; t_thrd.xlog_cxt.RedoStartLSN = t_thrd.shemem_ptr_cxt.ControlFile->checkPointCopy.redo; g_instance.comm_cxt.predo_cxt.redoPf.redo_start_ptr = t_thrd.xlog_cxt.RedoStartLSN; - record = ReadCheckpointRecord(xlogreader, checkPointLoc, 1, true); + record = ReadCheckpointRecord(xlogreader, checkPointLoc, 1); if (record != NULL) { ereport(DEBUG1, @@ -9638,37 +9385,25 @@ void StartupXLOG(void) rcm = memcpy_s(&checkPoint, sizeof(CheckPoint), XLogRecGetData(xlogreader), sizeof(CheckPoint)); securec_check(rcm, "", ""); - if (!GTM_MODE) { - recordLen = record->xl_tot_len; - if (record->xl_tot_len == CHECKPOINTNEW_LEN) { - rcm = memcpy_s(&checkPointNew, sizeof(checkPointNew), XLogRecGetData(xlogreader), - sizeof(checkPointNew)); - securec_check(rcm, "", ""); - } else if (record->xl_tot_len == CHECKPOINTPLUS_LEN) { - rcm = memcpy_s(&checkPointPlus, sizeof(checkPointPlus), XLogRecGetData(xlogreader), - sizeof(checkPointPlus)); - securec_check(rcm, "", ""); - } else if (record->xl_tot_len == CHECKPOINTUNDO_LEN) { - rcm = memcpy_s(&checkPointUndo, sizeof(checkPointUndo), XLogRecGetData(xlogreader), - sizeof(checkPointUndo)); - securec_check(rcm, "", ""); - } + recordLen = record->xl_tot_len; + if (record->xl_tot_len == CHECKPOINTNEW_LEN) { + rcm = memcpy_s(&checkPointNew, sizeof(checkPointNew), XLogRecGetData(xlogreader), + sizeof(checkPointNew)); + securec_check(rcm, "", ""); + } else if (record->xl_tot_len == CHECKPOINTPLUS_LEN) { + rcm = memcpy_s(&checkPointPlus, sizeof(checkPointPlus), XLogRecGetData(xlogreader), + sizeof(checkPointPlus)); + securec_check(rcm, "", ""); + } else if (record->xl_tot_len == CHECKPOINTUNDO_LEN) { + rcm = memcpy_s(&checkPointUndo, sizeof(checkPointUndo), XLogRecGetData(xlogreader), + sizeof(checkPointUndo)); + securec_check(rcm, "", ""); } - if (((XLogPageHeader)xlogreader->readBuf)->xlp_magic == XLOG_PAGE_MAGIC_OLD) { - ereport(LOG, (errmsg("read old version XLog page with magic %X.", (uint32)XLOG_PAGE_MAGIC_OLD))); - readoldversion = true; - } - if (record->xl_term > g_instance.comm_cxt.localinfo_cxt.term_from_xlog) { - g_instance.comm_cxt.localinfo_cxt.term_from_xlog = record->xl_term; - } + UpdateTermFromXLog(record->xl_term); - uint8 xlinfo = readoldversion ? ((XLogRecordOld *)record)->xl_info : record->xl_info; - wasShutdown = (xlinfo == XLOG_CHECKPOINT_SHUTDOWN); - wasCheckpoint = wasShutdown || (xlinfo == XLOG_CHECKPOINT_ONLINE); - if (readoldversion && !wasCheckpoint) { - ereport(FATAL, (errmsg("old version XLog must be shutdown checkpoint or online checkpoint."))); - } + wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN); + wasCheckpoint = wasShutdown || (record->xl_info == XLOG_CHECKPOINT_ONLINE); } /* initialize double write, recover partial write */ @@ -9681,30 +9416,6 @@ void StartupXLOG(void) g_instance.policy_cxt.account_table_lock = LWLockAssign(LWTRANCHE_ACCOUNT_TABLE); t_thrd.xlog_cxt.LastRec = RecPtr = checkPointLoc; - bool isoldversion = false; - if (((XLogPageHeader)xlogreader->readBuf)->xlp_magic == XLOG_PAGE_MAGIC_OLD) { - isoldversion = true; - } - if (isoldversion) { - CheckPointOld checkPointOld; - rcm = memcpy_s(&checkPointOld, sizeof(CheckPointOld), XLogRecGetData(xlogreader), sizeof(CheckPointOld)); - securec_check(rcm, "", ""); - checkPoint.redo = XLogRecPtrSwap(checkPointOld.redo); - checkPoint.nextXid = checkPointOld.nextXid; - checkPoint.nextOid = checkPointOld.nextOid; - checkPoint.nextMulti = checkPointOld.nextMulti; - checkPoint.nextMultiOffset = checkPointOld.nextMultiOffset; - - if (checkPointOld.oldestXid > checkPointOld.nextXid && - TransactionIdLogicallyPrecedes(checkPointOld.oldestXid, FirstNormalTransactionId)) { - checkPoint.oldestXid = FirstNormalTransactionId; - } else { - checkPoint.oldestXid = checkPointOld.oldestXid; - } - checkPoint.oldestXidDB = checkPointOld.oldestXidDB; - checkPoint.time = checkPointOld.time; - checkPoint.oldestActiveXid = checkPointOld.oldestActiveXid; - } ereport(LOG, (errmsg("redo record is at %X/%X; shutdown %s", (uint32)(checkPoint.redo >> 32), (uint32)checkPoint.redo, wasShutdown ? "TRUE" : "FALSE"))); ereport(DEBUG1, (errmsg("next MultiXactId: " XID_FMT "; next MultiXactOffset: " XID_FMT, checkPoint.nextMulti, @@ -9768,17 +9479,6 @@ void StartupXLOG(void) ereport(LOG, (errmsg("%s Mode: start local next csn from next_xid %lu", "GTM-Free", t_thrd.xact_cxt.ShmemVariableCache->nextXid))); } - } else if (GTM_MODE) { - /* - * In initdb progress, it will use local next csn when use local xid. - * So we must ensure the first next csn is normal. - * for normal condition, 4 is only used for the first normal transaction - * when it is set to commit in progress, and will never used for the - * final result(final result is fetched from gtm), because normal transaction - * use local xid will not be permitted to commit with gtm. - */ - ereport(LOG, (errmsg("%s Mode: start local next csn from 4", "GTM"))); - t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo = COMMITSEQNO_FIRST_NORMAL + 1; } else { /* Init nextCommitSeqNo in GTM-Lite Mode */ t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo = COMMITSEQNO_FIRST_NORMAL + 1; @@ -9798,7 +9498,7 @@ void StartupXLOG(void) } /* initialize shared memory variable standbyXmin from checkpoint record */ - if (!GTM_MODE && !TransactionIdIsValid(t_thrd.xact_cxt.ShmemVariableCache->standbyXmin)) { + if (!TransactionIdIsValid(t_thrd.xact_cxt.ShmemVariableCache->standbyXmin)) { if (recordLen == CHECKPOINTPLUS_LEN) { t_thrd.xact_cxt.ShmemVariableCache->standbyXmin = checkPointPlus.recent_global_xmin; } else if (recordLen == CHECKPOINTUNDO_LEN) { @@ -9808,18 +9508,20 @@ void StartupXLOG(void) t_thrd.xact_cxt.ShmemVariableCache->recentGlobalXmin = InvalidTransactionId; /* Read oldest xid having undo from checkpoint and set in proc global. */ - if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM) { - pg_atomic_write_u64(&g_instance.proc_base->oldestXidInUndo, checkPointUndo.oldestXidInUndo); + if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM) { + pg_atomic_write_u64(&g_instance.undo_cxt.oldestXidInUndo, checkPointUndo.oldestXidInUndo); ereport(LOG, (errmsg("Startup: write global oldestXidInUndo %lu from checkpoint %lu", - g_instance.proc_base->oldestXidInUndo, checkPointUndo.oldestXidInUndo))); + g_instance.undo_cxt.oldestXidInUndo, checkPointUndo.oldestXidInUndo))); } else { - pg_atomic_write_u64(&g_instance.proc_base->oldestXidInUndo, InvalidTransactionId); + pg_atomic_write_u64(&g_instance.undo_cxt.oldestXidInUndo, InvalidTransactionId); } /* * Initialize replication slots, before there's a chance to remove * required resources. */ + + t_thrd.xact_cxt.ShmemVariableCache->xlogMaxCSN = t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo - 1; init_instance_slot(); init_instance_slot_thread(); StartupReplicationSlots(); @@ -9863,7 +9565,7 @@ void StartupXLOG(void) */ restoreTwoPhaseData(); - StartupCSNLOG(isoldversion); + StartupCSNLOG(); t_thrd.xlog_cxt.lastFullPageWrites = checkPoint.fullPageWrites; t_thrd.xlog_cxt.RedoRecPtr = t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr = @@ -9902,11 +9604,12 @@ void StartupXLOG(void) } ReadRemainSegsFile(); - + /* Determine whether it is currently in the switchover of streaming disaster recovery */ + checkHadrInSwitchover(); /* REDO */ if (t_thrd.xlog_cxt.InRecovery) { /* use volatile pointer to prevent code rearrangement */ - XLogCtlData *xlogctl = t_thrd.shemem_ptr_cxt.XLogCtl; + volatile XLogCtlData *xlogctl = t_thrd.shemem_ptr_cxt.XLogCtl; /* * Update pg_control to show that we are recovering and to show the @@ -10170,15 +9873,15 @@ void StartupXLOG(void) pg_atomic_write_u32(&t_thrd.walreceiverfuncs_cxt.WalRcv->rcvDoneFromShareStorage, false); // Allow read-only connections immediately if we're consistent already. CheckRecoveryConsistency(); - + ResetXLogStatics(); // Find the first record that logically follows the checkpoint --- it // might physically precede it, though. if (XLByteLT(checkPoint.redo, RecPtr)) { /* back up to find the record */ - record = ReadRecord(xlogreader, checkPoint.redo, PANIC, false, true); + record = ReadRecord(xlogreader, checkPoint.redo, PANIC, false); } else { /* just have to read next record after CheckPoint */ - record = ReadRecord(xlogreader, InvalidXLogRecPtr, LOG, false, true); + record = ReadRecord(xlogreader, InvalidXLogRecPtr, LOG, false); } XLogReaderState *oldXlogReader = xlogreader; @@ -10187,14 +9890,11 @@ void StartupXLOG(void) bool recoveryContinue = true; bool recoveryApply = true; TimestampTz xtime; - bool readoldversion = false; instr_time rec_startTime; instr_time rec_endTime; XLogRecPtr redoStartPtr = t_thrd.xlog_cxt.ReadRecPtr; uint64 redoTotalBytes; - if (record->xl_term > g_instance.comm_cxt.localinfo_cxt.term_from_xlog) { - g_instance.comm_cxt.localinfo_cxt.term_from_xlog = record->xl_term; - } + UpdateTermFromXLog(record->xl_term); t_thrd.xlog_cxt.InRedo = true; g_instance.comm_cxt.predo_cxt.redoPf.preEndPtr = 0; @@ -10217,7 +9917,9 @@ void StartupXLOG(void) ereport(LOG, (errmsg("redo starts at %X/%X", (uint32)(t_thrd.xlog_cxt.ReadRecPtr >> 32), (uint32)t_thrd.xlog_cxt.ReadRecPtr))); +#ifndef ENABLE_LITE_MODE update_stop_barrier(); +#endif INSTR_TIME_SET_CURRENT(rec_startTime); t_thrd.xlog_cxt.RedoStartLSN = t_thrd.xlog_cxt.ReadRecPtr; g_instance.comm_cxt.predo_cxt.redoPf.redo_start_ptr = t_thrd.xlog_cxt.RedoStartLSN; @@ -10226,9 +9928,6 @@ void StartupXLOG(void) do { TermFileData term_file; - if (((XLogPageHeader)xlogreader->readBuf)->xlp_magic == XLOG_PAGE_MAGIC_OLD) { - ereport(PANIC, (errmsg("redo can not support old version!!!!"))); - } #ifdef WAL_DEBUG if (u_sess->attr.attr_storage.XLOG_DEBUG || @@ -10267,7 +9966,7 @@ void StartupXLOG(void) if (xlogctl->recoveryPause) { recoveryPausesHere(); } - + GetRedoStartTime(t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_2]); // Have we reached our recovery target? if (recoveryStopsHere(xlogreader, &recoveryApply)) { reachedStopPoint = true; /* see below */ @@ -10276,12 +9975,16 @@ void StartupXLOG(void) /* Exit loop if we reached non-inclusive recovery target */ if (!recoveryApply && (t_thrd.xlog_cxt.server_mode == PRIMARY_MODE || t_thrd.xlog_cxt.server_mode == NORMAL_MODE || - (IS_DISASTER_RECOVER_MODE && (t_thrd.xlog_cxt.recoveryTarget != RECOVERY_TARGET_TIME_OBS)))) { + (IS_OBS_DISASTER_RECOVER_MODE && + (t_thrd.xlog_cxt.recoveryTarget != RECOVERY_TARGET_TIME_OBS)))) { + extreme_rto::WaitAllRedoWorkerQueueEmpty(); break; } } - + #ifndef ENABLE_MULTIPLE_NODES + CountAndGetRedoTime(t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_2], + t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_3]); /* * If we've been asked to lag the master, wait on latch until * enough time has passed. @@ -10298,6 +10001,9 @@ void StartupXLOG(void) recoveryPausesHere(); } } + CountRedoTime(t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_3]); +#else + CountRedoTime(t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_2]); #endif /* @@ -10336,16 +10042,18 @@ void StartupXLOG(void) if (xlogreader->isPRProcess && !IsExtremeRedo()) { newXlogReader = parallel_recovery::NewReaderState(xlogreader); } - + GetRedoStartTime(t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_4]); DispatchRedoRecord(xlogreader, t_thrd.xlog_cxt.expectedTLIs, xtime); - + CountRedoTime(t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_4]); /* Remember this record as the last-applied one */ t_thrd.xlog_cxt.LastRec = t_thrd.xlog_cxt.ReadRecPtr; /* Exit loop if we reached inclusive recovery target */ if (!recoveryContinue && (t_thrd.xlog_cxt.server_mode == PRIMARY_MODE || t_thrd.xlog_cxt.server_mode == NORMAL_MODE || - IS_DISASTER_RECOVER_MODE || IS_SHARED_STORAGE_STANBY_CLUSTER_MODE)) { + (IS_OBS_DISASTER_RECOVER_MODE && (t_thrd.xlog_cxt.recoveryTarget != RECOVERY_TARGET_TIME_OBS)) || + IS_DISASTER_RECOVER_MODE)) { + extreme_rto::WaitAllRedoWorkerQueueEmpty(); break; } @@ -10361,17 +10069,28 @@ void StartupXLOG(void) ArchiveXlogForForceFinishRedo(xlogreader, &term_file); newXlogReader->readOff = INVALID_READ_OFF; } + + if (xlogctl->recoverySusPend) { + if (IsExtremeRedo()) { + ExtremeRedoWaitRecoverySusPendFinish(xlogreader->EndRecPtr); + } else { + handleRecoverySusPend(xlogreader->EndRecPtr); + } + } + + GetRedoStartTime(t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_1]); if (xlogreader->isPRProcess && IsExtremeRedo()) { record = ReadNextXLogRecord(&xlogreader, LOG); } else { xlogreader = newXlogReader; - record = ReadRecord(xlogreader, InvalidXLogRecPtr, LOG, false, readoldversion); + record = ReadRecord(xlogreader, InvalidXLogRecPtr, LOG, false); } + CountRedoTime(t_thrd.xlog_cxt.timeCost[TIME_COST_STEP_1]); } while (record != NULL); // end of main redo apply loop SendRecoveryEndMarkToWorkersAndWaitForFinish(0); RecoveryXlogReader(oldXlogReader, xlogreader); - if (!(IS_DISASTER_RECOVER_MODE || IS_SHARED_STORAGE_STANBY_CLUSTER_MODE)) { + if (!(IS_OBS_DISASTER_RECOVER_MODE || IS_DISASTER_RECOVER_MODE)) { if (t_thrd.xlog_cxt.recoveryPauseAtTarget && reachedStopPoint) { SetRecoveryPause(true); recoveryPausesHere(); @@ -10386,24 +10105,6 @@ void StartupXLOG(void) (uint32)t_thrd.xlog_cxt.ReadRecPtr, (uint32)(t_thrd.xlog_cxt.EndRecPtr >> 32), (uint32)t_thrd.xlog_cxt.EndRecPtr))); - checkHadrInSwitchover(); - - bool isInHadrSwitchover = false; - /* In Hadr Switchover */ - if (IS_DISASTER_RECOVER_MODE || IS_CNDISASTER_RECOVER_MODE) { - isInHadrSwitchover = IsHadrRecoveryInSwitchoverOrfailover(); - } else { - isInHadrSwitchover = IsHadrAchiveInSwitchover(); - } - - if (isInHadrSwitchover) { - do { - pg_usleep(1000000L); /* 1000 ms */ - ereport(LOG, (errmsg("[hadr_switchover]=====sleep====="))); - RedoInterruptCallBack(); - } while (1); - } - INSTR_TIME_SET_CURRENT(rec_endTime); INSTR_TIME_SUBTRACT(rec_endTime, rec_startTime); redoTotalBytes = t_thrd.xlog_cxt.EndRecPtr - redoStartPtr; @@ -10420,7 +10121,7 @@ void StartupXLOG(void) redo_unlink_stats_file(); parallel_recovery::redo_dump_all_stats(); /* check all the received xlog have been redo when switchover */ - if ((!IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) && CheckForSwitchoverTrigger()) { + if ((!IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE) && CheckForSwitchoverTrigger()) { XLogRecPtr receivedUpto = GetWalRcvWriteRecPtr(NULL); XLogRecPtr EndRecPtrTemp = t_thrd.xlog_cxt.EndRecPtr; XLByteAdvance(EndRecPtrTemp, SizeOfXLogRecord); @@ -10442,6 +10143,19 @@ void StartupXLOG(void) ereport(LOG, (errmsg("redo is not required"))); } } + /* Set undoCountThreshold as a proper value after finish recovery. */ + undo::InitUndoCountThreshold(); + + /* + * There exists no XLOG_BACKUP_END(xlog) in backup of standby server. + * If we restore from backup of standby server, we check the backupEndPoint to + * determine whether the backup is complete. + * Since the backupEndPoint may be the last xlog in backup, we must check whether + * we reach the consistency point after redoing all the xlog. + */ + if (backupFromStandby) { + CheckRecoveryConsistency(); + } /* Update Remain Segment Start Point */ UpdateRemainSegsStartPoint(t_thrd.xlog_cxt.EndRecPtr); @@ -10462,7 +10176,7 @@ void StartupXLOG(void) */ XLogCheckInvalidPages(); - if (IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) { + if (IS_DISASTER_RECOVER_MODE) { ereport(LOG, (errmsg("reach stop barrier wait startupxlog here"))); while (reachedStopPoint) { pg_usleep(1000000L); /* 1000 ms */ @@ -10497,10 +10211,9 @@ void StartupXLOG(void) xlogreader->readSegNo = 0; xlogreader->readOff = 0; xlogreader->readLen = 0; - record = ReadRecord(xlogreader, t_thrd.xlog_cxt.LastRec, PANIC, false, true); - if (record->xl_term > g_instance.comm_cxt.localinfo_cxt.term_from_xlog) { - g_instance.comm_cxt.localinfo_cxt.term_from_xlog = record->xl_term; - } + record = ReadRecord(xlogreader, t_thrd.xlog_cxt.LastRec, PANIC, false); + UpdateTermFromXLog(record->xl_term); + EndOfLog = t_thrd.xlog_cxt.EndRecPtr; XLByteToPrevSeg(EndOfLog, endLogSegNo); uint32 redoReadOff = t_thrd.xlog_cxt.readOff; @@ -10630,18 +10343,6 @@ void StartupXLOG(void) AdvanceXLInsertBuffer(EndOfLog, false, NULL); g_instance.wal_cxt.sentResult = EndOfLog; - /* add switch at old version xlog page */ - if ((EndOfLog % XLOG_BLCKSZ != 0) && ((XLogPageHeader)xlogreader->readBuf)->xlp_magic == XLOG_PAGE_MAGIC_OLD) { - XLogRecPtr switchptr; - - LocalSetXLogInsertAllowed(); - /* XLOG SWITCH has no data */ - XLogBeginInsert(); - - switchptr = XLogInsert(RM_XLOG_ID, XLOG_SWITCH, true); - t_thrd.xlog_cxt.LocalXLogInsertAllowed = -1; - } - /* Pre-scan prepared transactions to find out the range of XIDs present */ oldestActiveXID = PrescanPreparedTransactions(NULL, NULL); @@ -10734,7 +10435,7 @@ void StartupXLOG(void) /* * Recover knowledge about replay progress of known replication partners. */ -#ifndef ENABLE_MULTIPLE_NODES +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) StartupReplicationOrigin(); #endif TrimCLOG(); @@ -10766,31 +10467,23 @@ void StartupXLOG(void) XLogReportParameters(); /* - * If enable_gtm_free, we do not have GTM to coordinate xid assignment. - * thus we have to bump up the nextXid number to avoid the last xid - * when CN was brought down (probably need recover/2pc clean up) to be - * re-used/overwritten by new CN init. + * if switch gtm-lite mode to gtm-free mode, local next csn maybe larger than next xid, + * set next xid to the larger one. */ - if (!GTM_MODE) { - /* - * if switch gtm-lite mode to gtm-free mode, local next csn maybe larger than next xid, - * set next xid to the larger one. - */ - if (GTM_FREE_MODE && - t_thrd.xact_cxt.ShmemVariableCache->nextXid < t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo) { - t_thrd.xact_cxt.ShmemVariableCache->nextXid = t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo; - } - t_thrd.xact_cxt.ShmemVariableCache->nextXid += g_instance.shmem_cxt.MaxBackends; - - /* - * nextxid may access the next clog page which has not extended yet, - * If we do checkpoint, nextxid record as a part of runningxacts, and - * we may try to access according clog page during recovery. - * Just extend it here to avoid this situation. - */ - ExtendCLOG(t_thrd.xact_cxt.ShmemVariableCache->nextXid); - ExtendCSNLOG(t_thrd.xact_cxt.ShmemVariableCache->nextXid); + if (GTM_FREE_MODE && + t_thrd.xact_cxt.ShmemVariableCache->nextXid < t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo) { + t_thrd.xact_cxt.ShmemVariableCache->nextXid = t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo; } + t_thrd.xact_cxt.ShmemVariableCache->nextXid += g_instance.shmem_cxt.MaxBackends; + + /* + * nextxid may access the next clog page which has not extended yet, + * If we do checkpoint, nextxid record as a part of runningxacts, and + * we may try to access according clog page during recovery. + * Just extend it here to avoid this situation. + */ + ExtendCLOG(t_thrd.xact_cxt.ShmemVariableCache->nextXid); + ExtendCSNLOG(t_thrd.xact_cxt.ShmemVariableCache->nextXid); /* * All done. Allow backends to write WAL. (Although the bool flag is @@ -10806,6 +10499,7 @@ void StartupXLOG(void) xlogctl->SharedRecoveryInProgress = false; xlogctl->IsRecoveryDone = true; SpinLockRelease(&xlogctl->info_lck); + NotifyGscRecoveryFinished(); if (ENABLE_INCRE_CKPT) { RecoveryQueueState *state = &g_instance.ckpt_cxt_ctl->ckpt_redo_state; (void)LWLockAcquire(state->recovery_queue_lock, LW_EXCLUSIVE); @@ -11059,9 +10753,6 @@ void backup_cut_xlog_file(XLogRecPtr lastReplayedEndRecPtr) static void sendPMBeginHotStby() { if (!t_thrd.xlog_cxt.LocalHotStandbyActive && -#ifdef ENABLE_MULTIPLE_NODES - t_thrd.xlog_cxt.standbyState == STANDBY_SNAPSHOT_READY && -#endif t_thrd.xlog_cxt.reachedConsistency && IsUnderPostmaster) { bool xminValid = true; XLogRecPtr lastReplayedEndRecPtr = t_thrd.shemem_ptr_cxt.XLogCtl->lastReplayedEndRecPtr; @@ -11082,6 +10773,15 @@ static void sendPMBeginHotStby() pg_atomic_write_u32(&(g_instance.comm_cxt.predo_cxt.hotStdby), ATOMIC_TRUE); ereport(LOG, (errmsg("send signal to be hot standby at %X/%X", (uint32)(lastReplayedEndRecPtr >> 32), (uint32)lastReplayedEndRecPtr))); +#ifdef ENABLE_MULTIPLE_NODES + /* + * If we are in cluster-standby-mode, we need launch barreir preparse + * thread from the minrecoverypoint point. + */ + if (IS_DISASTER_RECOVER_MODE && g_instance.pid_cxt.BarrierPreParsePID == 0) { + SetBarrierPreParseLsn(t_thrd.xlog_cxt.minRecoveryPoint); + } +#endif SendPostmasterSignal(PMSIGNAL_BEGIN_HOT_STANDBY); } } @@ -11094,6 +10794,13 @@ static void sendPMBeginHotStby() */ void CheckRecoveryConsistency(void) { + /* Standby can receive read connection when DCF thread started in dcf mode */ +#ifndef ENABLE_MULTIPLE_NODES + if (!IsDCFReadyOrDisabled()) { + return; + } +#endif + XLogRecPtr lastReplayedEndRecPtr; XLogCtlData *XLogCtl = t_thrd.shemem_ptr_cxt.XLogCtl; @@ -11153,6 +10860,8 @@ void CheckRecoveryConsistency(void) redo_unlink_stats_file(); } + CheckIsStopRecovery(); + /* * Have we got a valid starting snapshot that will allow queries to be * run? If so, we can tell postmaster that the database is consistent now, @@ -11286,8 +10995,7 @@ static void LocalSetXLogInsertAllowed(void) * whichChkpt identifies the checkpoint (merely for reporting purposes). * 1 for "primary", 2 for "secondary", 0 for "other" (backup_label) */ -static XLogRecord *ReadCheckpointRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int whichChkpt, - bool readoldversion) +static XLogRecord *ReadCheckpointRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int whichChkpt) { XLogRecord *record = NULL; @@ -11306,7 +11014,7 @@ static XLogRecord *ReadCheckpointRecord(XLogReaderState *xlogreader, XLogRecPtr return NULL; } - record = ReadRecord(xlogreader, RecPtr, LOG, true, readoldversion); + record = ReadRecord(xlogreader, RecPtr, LOG, true); if (record == NULL) { switch (whichChkpt) { @@ -11323,11 +11031,7 @@ static XLogRecord *ReadCheckpointRecord(XLogReaderState *xlogreader, XLogRecPtr return NULL; } - if (((XLogPageHeader)xlogreader->readBuf)->xlp_magic == XLOG_PAGE_MAGIC) { - readoldversion = false; - } - - if ((readoldversion ? ((XLogRecordOld *)record)->xl_rmid : ((XLogRecord *)record)->xl_rmid) != RM_XLOG_ID) { + if (record->xl_rmid != RM_XLOG_ID) { switch (whichChkpt) { case 1: ereport(LOG, (errmsg("invalid resource manager ID in primary checkpoint record"))); @@ -11341,10 +11045,7 @@ static XLogRecord *ReadCheckpointRecord(XLogReaderState *xlogreader, XLogRecPtr } return NULL; } - if ((readoldversion ? ((XLogRecordOld *)record)->xl_info : ((XLogRecord *)record)->xl_info) != - XLOG_CHECKPOINT_SHUTDOWN && - (readoldversion ? ((XLogRecordOld *)record)->xl_info : ((XLogRecord *)record)->xl_info) != - XLOG_CHECKPOINT_ONLINE) { + if (record->xl_info != XLOG_CHECKPOINT_SHUTDOWN && record->xl_info != XLOG_CHECKPOINT_ONLINE) { switch (whichChkpt) { case 1: ereport(LOG, (errmsg("invalid xl_info in primary checkpoint record"))); @@ -11359,9 +11060,7 @@ static XLogRecord *ReadCheckpointRecord(XLogReaderState *xlogreader, XLogRecPtr return NULL; } - if (((XLogRecordOld *)record)->xl_tot_len != - SizeOfXLogRecordOld + SizeOfXLogRecordDataHeaderShort + sizeof(CheckPointOld) && - record->xl_tot_len != CHECKPOINT_LEN && record->xl_tot_len != CHECKPOINTNEW_LEN && + if (record->xl_tot_len != CHECKPOINT_LEN && record->xl_tot_len != CHECKPOINTNEW_LEN && record->xl_tot_len != CHECKPOINTPLUS_LEN && record->xl_tot_len != CHECKPOINTUNDO_LEN) { switch (whichChkpt) { case 1: @@ -11606,6 +11305,11 @@ void ShutdownXLOG(int code, Datum arg) dw_exit(true); dw_exit(false); + /* try clear page repair thread mem again */ + ClearPageRepairTheadMem(); + g_instance.repair_cxt.page_repair_hashtbl_lock = NULL; + g_instance.repair_cxt.file_repair_hashtbl_lock = NULL; + if (IsInitdb) { ShutdownShareStorageXLogCopy(); } @@ -11874,7 +11578,6 @@ void CreateCheckPoint(int flags) StopSuspendWalInsert(lastlrc); LWLockRelease(CheckpointLock); END_CRIT_SECTION(); - CheckPointSyncWithAbsorption(); gstrace_exit(GS_TRC_ID_CreateCheckPoint); return; } @@ -11931,9 +11634,6 @@ void CreateCheckPoint(int flags) (uint32)(curMinRecLSN), flags, (uint32)(remaincommit >> XLOG_LSN_SWAP), (uint32)(remaincommit)))); } - if (dw_enabled()) { - CheckPointSyncWithAbsorption(); - } gstrace_exit(GS_TRC_ID_CreateCheckPoint); return; } else if (XLByteLT(curMinRecLSN, @@ -12047,7 +11747,7 @@ void CreateCheckPoint(int flags) MultiXactGetCheckptMulti(shutdown, &checkPoint.nextMulti, &checkPoint.nextMultiOffset); - checkPointUndo.oldestXidInUndo = pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo); + checkPointUndo.oldestXidInUndo = pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo); /* * Having constructed the checkpoint record, ensure all shmem disk buffers @@ -12089,15 +11789,8 @@ void CreateCheckPoint(int flags) * xacts we need to wait for. */ vxids = GetVirtualXIDsDelayingChkpt(&nvxids); - int64 old_request = CheckPointGetFsyncRequset(); if (nvxids > 0) { - CheckPointSyncWithAbsorption(); do { - int64 new_request = CheckPointGetFsyncRequset(); - if (new_request != old_request) { - CheckPointSyncWithAbsorption(); - old_request = new_request; - } pg_usleep(10000L); /* wait for 10 msec */ } while (HaveVirtualXIDsDelayingChkpt(vxids, nvxids)); } @@ -12151,15 +11844,9 @@ void CreateCheckPoint(int flags) * recovery we don't need to write running xact data. */ -#ifdef ENABLE_MULTIPLE_NODES if (!shutdown && XLogStandbyInfoActive()) { LogStandbySnapshot(); } -#else - if (XLogStandbyInfoActive() && !RecoveryInProgress()) { - LogStandbySnapshot(); - } -#endif START_CRIT_SECTION(); @@ -12206,7 +11893,7 @@ void CreateCheckPoint(int flags) XLogBeginInsert(); /* use version num to control */ - if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM && u_sess->attr.attr_common.upgrade_mode != 1) { + if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM) { errno_t rcm = memcpy_s(&checkPointUndo, sizeof(CheckPoint), &checkPoint, sizeof(CheckPoint)); securec_check(rcm, "", ""); if (IsBootstrapProcessingMode() || @@ -12219,7 +11906,7 @@ void CreateCheckPoint(int flags) checkPointUndo.next_csn = t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo; checkPointUndo.length = (uint64)sizeof(CheckPointUndo); checkPointUndo.recent_global_xmin = t_thrd.xact_cxt.ShmemVariableCache->recentGlobalXmin; - checkPointUndo.oldestXidInUndo = pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo); + checkPointUndo.oldestXidInUndo = pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo); } XLogRegisterData((char *)(&checkPointUndo), sizeof(checkPointUndo)); } else if (t_thrd.proc->workingVersionNum >= GTMLITE_VERSION_NUM && u_sess->attr.attr_common.upgrade_mode != 1) { @@ -12393,7 +12080,7 @@ static void CheckPointGuts(XLogRecPtr checkPointRedo, int flags, bool doFullChec * need wait pagewriter thread flush dirty page. */ CheckPointBuffers(flags, doFullCheckpoint); /* performs all required fsyncs */ -#ifndef ENABLE_MULTIPLE_NODES +#if !defined(ENABLE_MULTIPLE_NODES) && !defined(ENABLE_LITE_MODE) CheckPointReplicationOrigin(); #endif /* We deliberately delay 2PC checkpointing as long as possible */ @@ -12682,7 +12369,6 @@ bool CreateRestartPoint(int flags) if (!recoveryInProgress) { ereport(DEBUG2, (errmsg("skipping restartpoint, recovery has already ended"))); LWLockRelease(CheckpointLock); - CheckPointSyncWithAbsorption(); gstrace_exit(GS_TRC_ID_CreateRestartPoint); return false; } @@ -12738,7 +12424,6 @@ bool CreateRestartPoint(int flags) LWLockRelease(ControlFileLock); } LWLockRelease(CheckpointLock); - CheckPointSyncWithAbsorption(); gstrace_exit(GS_TRC_ID_CreateRestartPoint); return false; } @@ -12831,7 +12516,6 @@ bool CreateRestartPoint(int flags) (uint32)(MinRecLSN >> XLOG_LSN_SWAP), (uint32)MinRecLSN, (uint32)(lastCheckPointRecPtr >> XLOG_LSN_SWAP), (uint32)lastCheckPointRecPtr))); LWLockRelease(CheckpointLock); - CheckPointSyncWithAbsorption(); gstrace_exit(GS_TRC_ID_CreateRestartPoint); return false; } @@ -12918,7 +12602,7 @@ bool CreateRestartPoint(int flags) * in csnlog.c). When hot standby is disabled, though, we mustn't do * this because StartupCSNLOG hasn't been called yet. */ - if (g_instance.attr.attr_storage.EnableHotStandby) { + if (g_instance.attr.attr_storage.EnableHotStandby && !IS_DISASTER_RECOVER_MODE) { pg_time_t now; int elapsed_secs; now = (pg_time_t)time(NULL); @@ -12993,19 +12677,30 @@ static XLogSegNo CalcRecycleSegNo(XLogRecPtr curInsert, XLogRecPtr quorumMinRequ return SegNoCanRecycled; } -static XLogSegNo CalcRecycleSegNoForHadrMainStandby(XLogRecPtr curFlush, XLogSegNo segno) +static XLogSegNo CalcRecycleSegNoForHadrMainStandby(XLogRecPtr curFlush, XLogSegNo segno, XLogRecPtr minRequired) { XLogSegNo SegNoCanRecycled = segno; + + if (g_instance.attr.attr_storage.max_replication_slots > 0 && !XLByteEQ(minRequired, InvalidXLogRecPtr)) { + XLogSegNo slotSegNo; + + XLByteToSeg(minRequired, slotSegNo); + + if (slotSegNo <= 0) { + /* segno = 1 show all file should be keep */ + segno = 1; + ereport(LOG, (errmsg("main standby keep all the xlog segments, because the minimal replication slot segno " + "is less than or equal to zero"))); + } else if (slotSegNo < SegNoCanRecycled) { + SegNoCanRecycled = slotSegNo; + } + } uint64 maxKeepSize = ((uint64)u_sess->attr.attr_storage.max_size_for_xlog_prune << 10); if (WalSndInProgress(SNDROLE_PRIMARY_BUILDSTANDBY)) { - if (XLByteLT(maxKeepSize + XLogSegSize, curFlush)) { - XLByteToSeg(curFlush - maxKeepSize, SegNoCanRecycled); - } else { - SegNoCanRecycled = 1; - ereport(LOG, (errmsg("keep all the xlog segments in Main standby, " - "because there is a full standby building processing."))); - } + SegNoCanRecycled = 1; + ereport(LOG, (errmsg("keep all the xlog segments in Main standby, " + "because there is a full standby building processing."))); } else if (!WalSndAllInProgressForMainStandby(SNDROLE_PRIMARY_STANDBY)) { if (XLByteLT(maxKeepSize + XLogSegSize, curFlush)) { XLByteToSeg(curFlush - maxKeepSize, SegNoCanRecycled); @@ -13018,6 +12713,66 @@ static XLogSegNo CalcRecycleSegNoForHadrMainStandby(XLogRecPtr curFlush, XLogSeg return SegNoCanRecycled; } +static XLogSegNo CalcRecycleArchiveSegNo() +{ + XLogRecPtr min_required = InvalidXLogRecPtr; + XLogSegNo min_required_segno; + + for (int i = 0; i < g_instance.attr.attr_storage.max_replication_slots; i++) { + ReplicationSlot *s = &t_thrd.slot_cxt.ReplicationSlotCtl->replication_slots[i]; + volatile ReplicationSlot *vslot = s; + SpinLockAcquire(&s->mutex); + XLogRecPtr restart_lsn; + if (!s->in_use) { + SpinLockRelease(&s->mutex); + continue; + } + restart_lsn = vslot->data.restart_lsn; + if (s->extra_content == NULL) { + if ((!XLByteEQ(restart_lsn, InvalidXLogRecPtr)) && + (XLByteEQ(min_required, InvalidXLogRecPtr) || XLByteLT(restart_lsn, min_required))) { + min_required = restart_lsn; + } + } + SpinLockRelease(&s->mutex); + } + XLByteToSeg(min_required, min_required_segno); + return min_required_segno; +} + +static XLogSegNo CalculateCNRecycleSegNoForStreamingHadr(XLogRecPtr curInsert, XLogSegNo logSegNo, + XLogRecPtr minToolsRequired) +{ + XLogSegNo slotSegNo = logSegNo; + + uint64 maxKeepSize = ((uint64)u_sess->attr.attr_storage.max_size_for_xlog_prune << 10); + if (WalSndInProgress(SNDROLE_PRIMARY_BUILDSTANDBY)) { + slotSegNo = 1; + ereport(LOG, (errmsg("keep all the xlog segments in Main Coordinator, " + "because there is a full building processing."))); + } else if (!WalSndAllInProgressForMainStandby(SNDROLE_PRIMARY_STANDBY)) { + if (XLByteLT(maxKeepSize + XLogSegSize, curInsert)) { + XLByteToSeg(curInsert - maxKeepSize, slotSegNo); + } else { + slotSegNo = 1; + ereport(LOG, (errmsg("keep all the xlog segments in Main Coordinator, " + "because there is a standby Coordinator offline."))); + } + } + if (!XLByteEQ(minToolsRequired, InvalidXLogRecPtr)) { + XLogSegNo minToolsSegNo; + XLByteToSeg(minToolsRequired, minToolsSegNo); + if (minToolsSegNo < slotSegNo) { + slotSegNo = minToolsSegNo; + } + } + if (slotSegNo < logSegNo) { + return slotSegNo; + } else { + return logSegNo; + } +} + /* * Retreat *logSegNo to the last segment that we need to retain because of * either wal_keep_segments or replication slots. @@ -13089,6 +12844,15 @@ static void KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo, XLogRecPtr curIns load_server_mode(); + /* In streaming hadr mode, we need consider how to recycle xlog for Coordinator in Main cluster with standby. */ + if (t_thrd.xlog_cxt.server_mode == NORMAL_MODE && IS_PGXC_COORDINATOR) { + XLogSegNo mainCNSegNo = CalculateCNRecycleSegNoForStreamingHadr(curInsert, segno, + repl_slot_state.min_tools_required); + if (mainCNSegNo < segno && mainCNSegNo > 0) { + segno = mainCNSegNo; + } + } + /* * In primary mode, we should do additional check. * @@ -13123,6 +12887,18 @@ static void KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo, XLogRecPtr curIns segno = 1; ereport(LOG, (errmsg("keep all the xlog segments, because not all the wal senders are " "in the role PRIMARY_STANDBY"))); + } else { + uint64 maxKeepSize = ((uint64)u_sess->attr.attr_storage.max_size_for_xlog_prune << 10); + XLogSegNo archiveSlotSegNo; + XLByteToSeg(repl_slot_state.min_archive_slot_required, archiveSlotSegNo); + if (!XLByteEQ(repl_slot_state.min_archive_slot_required, InvalidXLogRecPtr) && + (XLByteLT(maxKeepSize + XLogSegSize, (curInsert - repl_slot_state.min_archive_slot_required)) && + archiveSlotSegNo == segno)) { + segno = CalcRecycleArchiveSegNo(); + g_instance.roach_cxt.isXLogForceRecycled = true; + ereport(LOG, (errmsg("force recycle archiving xlog, because archive required xlog keep segement size " + "is bigger than max keep size: %lu", maxKeepSize))); + } } if (XLByteEQ(keep, InvalidXLogRecPtr) && repl_slot_state.exist_in_use && !g_instance.attr.attr_storage.dcf_attr.enable_dcf) { @@ -13149,7 +12925,7 @@ static void KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo, XLogRecPtr curIns LWLockRelease(XlogRemoveSegLock); } if (t_thrd.xlog_cxt.server_mode == STANDBY_MODE && t_thrd.xlog_cxt.is_hadr_main_standby) { - XLogSegNo mainStandbySegNo = CalcRecycleSegNoForHadrMainStandby(recptr, segno); + XLogSegNo mainStandbySegNo = CalcRecycleSegNoForHadrMainStandby(recptr, segno, repl_slot_state.min_required); if (mainStandbySegNo < segno && mainStandbySegNo > 0) { segno = mainStandbySegNo; } @@ -13355,7 +13131,7 @@ bool IsCheckPoint(const XLogRecParseState *parseState) return rmid == RM_XLOG_ID && (info == XLOG_CHECKPOINT_SHUTDOWN || info == XLOG_CHECKPOINT_ONLINE); } -bool HasTimelineUpdate(XLogReaderState *record, bool bOld) +bool HasTimelineUpdate(XLogReaderState *record) { uint8 info; int rmid; @@ -13378,38 +13154,6 @@ void UpdateTimeline(CheckPoint *checkPoint) } } -/** - * @Description: Assign Checkpoint data from the old version to the new version - * @in: record, xlog record - */ -CheckPoint update_checkpoint(XLogReaderState *record) -{ - CheckPointOld checkPointOld; - CheckPoint checkPoint; - errno_t rc; - - rc = memcpy_s(&checkPointOld, sizeof(CheckPointOld), XLogRecGetData(record), sizeof(CheckPointOld)); - securec_check(rc, "", ""); - checkPoint.redo = XLogRecPtrSwap(checkPointOld.redo); - checkPoint.ThisTimeLineID = checkPointOld.ThisTimeLineID; - checkPoint.fullPageWrites = checkPointOld.fullPageWrites; - checkPoint.nextXid = checkPointOld.nextXid; - checkPoint.nextOid = checkPointOld.nextOid; - checkPoint.nextMulti = checkPointOld.nextMulti; - checkPoint.nextMultiOffset = checkPointOld.nextMultiOffset; - - if (checkPointOld.oldestXid > checkPointOld.nextXid && - TransactionIdLogicallyPrecedes(checkPointOld.oldestXid, FirstNormalTransactionId)) { - checkPoint.oldestXid = FirstNormalTransactionId; - } else { - checkPoint.oldestXid = checkPointOld.oldestXid; - } - checkPoint.oldestXidDB = checkPointOld.oldestXidDB; - checkPoint.time = checkPointOld.time; - checkPoint.oldestActiveXid = checkPointOld.oldestActiveXid; - checkPoint.remove_seg = InvalidXLogSegPtr; - return checkPoint; -} /* * XLOG resource manager's routines * @@ -13418,15 +13162,7 @@ CheckPoint update_checkpoint(XLogReaderState *record) */ void xlog_redo(XLogReaderState *record) { - uint8 info; - - /* Support redo old version xlog during upgrade (Just the chekpoint and xlog switch) */ - if (t_thrd.xlog_cxt.redo_oldversion_xlog) { - info = (((XLogRecordOld *)record->decoded_record)->xl_info) & ~XLR_INFO_MASK; - } else { - info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; - } - + uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; XLogRecPtr lsn = record->EndRecPtr; /* in XLOG rmgr, backup blocks are only used by XLOG_FPI records */ @@ -13455,17 +13191,17 @@ void xlog_redo(XLogReaderState *record) CheckPointUndo checkPointUndo; errno_t rc; - /* The checkpoint structure has changed to a new version, needs to be updated to the new version */ - if (t_thrd.xlog_cxt.redo_oldversion_xlog) { - checkPoint = update_checkpoint(record); - } else { - if (XLogRecGetDataLen(record) >= sizeof(checkPoint) && XLogRecGetDataLen(record) < sizeof(checkPointUndo)) { - rc = memcpy_s(&checkPoint, sizeof(CheckPoint), XLogRecGetData(record), sizeof(CheckPoint)); - securec_check(rc, "", ""); - } else if (XLogRecGetDataLen(record) >= sizeof(checkPointUndo)) { - rc = memcpy_s(&checkPointUndo, sizeof(CheckPointUndo), XLogRecGetData(record), sizeof(CheckPointUndo)); - securec_check(rc, "", ""); - checkPoint = checkPointUndo.ori_checkpoint; + if (XLogRecGetDataLen(record) >= sizeof(checkPoint) && XLogRecGetDataLen(record) < sizeof(checkPointUndo)) { + rc = memcpy_s(&checkPoint, sizeof(CheckPoint), XLogRecGetData(record), sizeof(CheckPoint)); + securec_check(rc, "", ""); + } else if (XLogRecGetDataLen(record) >= sizeof(checkPointUndo)) { + rc = memcpy_s(&checkPointUndo, sizeof(CheckPointUndo), XLogRecGetData(record), sizeof(CheckPointUndo)); + securec_check(rc, "", ""); + checkPoint = checkPointUndo.ori_checkpoint; + if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM) { + pg_atomic_write_u64(&g_instance.undo_cxt.oldestXidInUndo, checkPointUndo.oldestXidInUndo); + } else { + pg_atomic_write_u64(&g_instance.undo_cxt.oldestXidInUndo, InvalidTransactionId); } } /* In a SHUTDOWN checkpoint, believe the counters exactly */ @@ -13493,11 +13229,7 @@ void xlog_redo(XLogReaderState *record) XlogRemoveSegPrimary = checkPoint.remove_seg; LWLockRelease(XlogRemoveSegLock); MultiXactSetNextMXact(checkPoint.nextMulti, checkPoint.nextMultiOffset); - if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM) { - pg_atomic_write_u64(&g_instance.proc_base->oldestXidInUndo, checkPointUndo.oldestXidInUndo); - } else { - pg_atomic_write_u64(&g_instance.proc_base->oldestXidInUndo, InvalidTransactionId); - } + SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB); /* @@ -13580,17 +13312,17 @@ void xlog_redo(XLogReaderState *record) CheckPointUndo checkPointUndo; errno_t rc; - /* The checkpoint structure has changed to a new version, needs to be updated to the new version */ - if (t_thrd.xlog_cxt.redo_oldversion_xlog) { - checkPoint = update_checkpoint(record); - } else { - if (XLogRecGetDataLen(record) >= sizeof(checkPoint) && XLogRecGetDataLen(record) < sizeof(checkPointUndo)) { - rc = memcpy_s(&checkPoint, sizeof(CheckPoint), XLogRecGetData(record), sizeof(CheckPoint)); - securec_check(rc, "", ""); - } else if (XLogRecGetDataLen(record) >= sizeof(checkPointUndo)) { - rc = memcpy_s(&checkPointUndo, sizeof(CheckPointUndo), XLogRecGetData(record), sizeof(CheckPointUndo)); - securec_check(rc, "", ""); - checkPoint = checkPointUndo.ori_checkpoint; + if (XLogRecGetDataLen(record) >= sizeof(checkPoint) && XLogRecGetDataLen(record) < sizeof(checkPointUndo)) { + rc = memcpy_s(&checkPoint, sizeof(CheckPoint), XLogRecGetData(record), sizeof(CheckPoint)); + securec_check(rc, "", ""); + } else if (XLogRecGetDataLen(record) >= sizeof(checkPointUndo)) { + rc = memcpy_s(&checkPointUndo, sizeof(CheckPointUndo), XLogRecGetData(record), sizeof(CheckPointUndo)); + securec_check(rc, "", ""); + checkPoint = checkPointUndo.ori_checkpoint; + if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM) { + pg_atomic_write_u64(&g_instance.undo_cxt.oldestXidInUndo, checkPointUndo.oldestXidInUndo); + } else { + pg_atomic_write_u64(&g_instance.undo_cxt.oldestXidInUndo, InvalidTransactionId); } } /* In an ONLINE checkpoint, treat the XID counter as a minimum */ @@ -13606,11 +13338,6 @@ void xlog_redo(XLogReaderState *record) LWLockRelease(OidGenLock); MultiXactAdvanceNextMXact(checkPoint.nextMulti, checkPoint.nextMultiOffset); - if (t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM) { - pg_atomic_write_u64(&g_instance.proc_base->oldestXidInUndo, checkPointUndo.oldestXidInUndo); - } else { - pg_atomic_write_u64(&g_instance.proc_base->oldestXidInUndo, InvalidTransactionId); - } if (TransactionIdPrecedes(t_thrd.xact_cxt.ShmemVariableCache->oldestXid, checkPoint.oldestXid)) { SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB); @@ -15546,6 +15273,12 @@ XLogRecPtr enable_delay_ddl_recycle_with_slot(const char *slotname) errmsg("could not enable delay ddl when enable_cbm_tracking is off!"))); } + /* we are about to start streaming switch over, stop any xlog insert. */ + if (t_thrd.xlog_cxt.LocalXLogInsertAllowed == 0 && g_instance.streaming_dr_cxt.isInSwitchover == true) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot enable delay ddl during streaming disaster recovery"))); + } + ereport(LOG, (errmsg("start delaying ddl recycle with backup slot %s", slotname))); LWLockAcquire(DelayDDLLock, LW_EXCLUSIVE); @@ -15603,6 +15336,12 @@ void disable_delay_ddl_recycle_with_slot(const char *slotname, XLogRecPtr *start errmsg(" could not disable delay ddl when enable_cbm_tracking is off!"))); } + /* we are about to start streaming switch over, stop any xlog insert. */ + if (t_thrd.xlog_cxt.LocalXLogInsertAllowed == 0 && g_instance.streaming_dr_cxt.isInSwitchover == true) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot disable delay ddl during streaming disaster recovery"))); + } + ereport(LOG, (errmsg("stop delaying ddl recycle with backup slot %s", slotname))); LWLockAcquire(DelayDDLLock, LW_EXCLUSIVE); @@ -15967,6 +15706,15 @@ void SetXLogReplayRecPtr(XLogRecPtr readRecPtr, XLogRecPtr endRecPtr) RedoSpeedDiag(readRecPtr, endRecPtr); } update_dirty_page_queue_rec_lsn(readRecPtr); +#ifndef ENABLE_MULTIPLE_NODES + if (g_instance.attr.attr_storage.dcf_attr.enable_dcf) { + int ret = dcf_set_election_priority(1, endRecPtr); + if (ret != 0) { + ereport(WARNING, (errmsg("In dcf mode, could not update current replay location:%08X/%08X", + (uint32)(endRecPtr >> 32), (uint32)(endRecPtr)))); + } + } +#endif } void DumpXlogCtl() @@ -16367,90 +16115,62 @@ static bool IsRedoDonePromoting(void) bool XLogReadFromWriteBuffer(XLogRecPtr targetStartPtr, int reqLen, char *readBuf, uint32 *rereadlen) { - WalRcvCtlAcquireExitLock(); - WalRcvCtlBlock *walrcb = getCurrentWalRcvCtlBlock(); - const int64 recBufferSize = g_instance.attr.attr_storage.WalReceiverBufSize * 1024; - int64 walwriteoffset; - int64 walReadOffset; - char *walrecvbuf = NULL; - char *srcbuf = NULL; - XLogRecPtr lastreadptr; - XLogRecPtr startPtr; - int64 buflen1 = 0; - int64 buflen2 = 0; - int64 realLen = reqLen; - - if (walrcb == NULL) { - WalRcvCtlReleaseExitLock(); - return false; - } - SpinLockAcquire(&walrcb->mutex); - walwriteoffset = walrcb->walWriteOffset; - walReadOffset = walrcb->walReadOffset; - walrecvbuf = walrcb->walReceiverBuffer; - lastreadptr = walrcb->lastReadPtr; - startPtr = walrcb->walStart; - SpinLockRelease(&walrcb->mutex); - - if (!XLByteEQ(targetStartPtr, lastreadptr) || XLByteLT(startPtr, lastreadptr + reqLen)) { - WalRcvCtlReleaseExitLock(); - const uint32 rightShiftSize = 32; - ereport(WARNING, (errmodule(MOD_REDO), - errmsg("XLogReadFromWriteBuffer:" - "lastreadptr:%X/%X,EndRecPtr:%X/" - "%X,length:%d,reallen:%ld,buf1:%ld,buf2:%ld,writeoffset:%ld,readoffset:%ld", - (uint32)(lastreadptr >> rightShiftSize), (uint32)(lastreadptr), - (uint32)(startPtr >> rightShiftSize), (uint32)(startPtr), reqLen, realLen, buflen1, - buflen2, walwriteoffset, walReadOffset))); - - pg_atomic_write_u32(&(extreme_rto::g_recordbuffer->readWorkerState), extreme_rto::WORKER_STATE_STOPPING); - return false; - } - - if (walReadOffset <= walwriteoffset) { - buflen1 = (walwriteoffset - walReadOffset); - } else { - buflen1 = (recBufferSize - walReadOffset); - buflen2 = walwriteoffset; - } - - if ((buflen1 + buflen2) < realLen) { - realLen = buflen1 + buflen2; - } - - srcbuf = walrecvbuf + walReadOffset; - errno_t ss_rc = 0; - if (realLen <= buflen1) { - ss_rc = memcpy_s(readBuf, XLOG_SEG_SIZE, srcbuf, realLen); - securec_check(ss_rc, "\0", "\0"); - } else { - ss_rc = memcpy_s(readBuf, XLOG_SEG_SIZE, srcbuf, buflen1); - securec_check(ss_rc, "\0", "\0"); - srcbuf = walrecvbuf; - int64 remain = realLen - buflen1; - Assert(remain <= buflen2); - ss_rc = memcpy_s((readBuf + buflen1), (XLOG_SEG_SIZE - buflen1), srcbuf, remain); - securec_check(ss_rc, "\0", "\0"); - } - - SpinLockAcquire(&walrcb->mutex); - walrcb->lastReadPtr = lastreadptr + realLen; - walrcb->walReadOffset = walReadOffset + realLen; - - if (walrcb->walReadOffset == recBufferSize) { - walrcb->walReadOffset = 0; - if (walrcb->walWriteOffset == recBufferSize) { - walrcb->walWriteOffset = 0; - if (walrcb->walFreeOffset == recBufferSize) { - walrcb->walFreeOffset = 0; + uint32 startoff = targetStartPtr % XLogSegSize; + if (t_thrd.xlog_cxt.readFile < 0 || !XLByteInSeg(targetStartPtr, t_thrd.xlog_cxt.readSegNo)) { + char path[MAXPGPATH]; + if (t_thrd.xlog_cxt.readFile >= 0) { + (void)close(t_thrd.xlog_cxt.readFile); + } + XLByteToSeg(targetStartPtr, t_thrd.xlog_cxt.readSegNo); + XLogFilePath(path, MAXPGPATH, t_thrd.xlog_cxt.ThisTimeLineID, t_thrd.xlog_cxt.readSegNo); + t_thrd.xlog_cxt.readFile = BasicOpenFile(path, O_RDONLY | PG_BINARY, 0); + if (t_thrd.xlog_cxt.readFile < 0) { + /* + * If the file is not found, assume it's because the standby + * asked for a too old WAL segment that has already been + * removed or recycled. + */ + if (errno == ENOENT) { + ereport(ERROR, (errcode_for_file_access(), + errmsg("requested WAL segment %s has already been removed", + XLogFileNameP(t_thrd.xlog_cxt.ThisTimeLineID, t_thrd.xlog_cxt.readSegNo)))); + } else { + ereport(ERROR, (errcode_for_file_access(), + errmsg("could not open file \"%s\" (log segment %s): %m", path, + XLogFileNameP(t_thrd.xlog_cxt.ThisTimeLineID, t_thrd.xlog_cxt.readSegNo)))); } } + t_thrd.xlog_cxt.readOff = 0; } - SpinLockRelease(&walrcb->mutex); - WalRcvCtlReleaseExitLock(); + if (startoff != t_thrd.xlog_cxt.readOff) { + if (lseek(t_thrd.xlog_cxt.readFile, (off_t)startoff, SEEK_SET) < 0) { + (void)close(t_thrd.xlog_cxt.readFile); + t_thrd.xlog_cxt.readFile = -1; + ereport(ERROR, (errcode_for_file_access(), errmsg("could not seek in log segment %s to offset %u: %m", + XLogFileNameP(t_thrd.xlog_cxt.ThisTimeLineID, t_thrd.xlog_cxt.readSegNo), startoff))); + } + t_thrd.xlog_cxt.readOff = startoff; + } - *rereadlen = realLen; + if (reqLen > (int)(XLogSegSize - startoff)) { + reqLen = (int)(XLogSegSize - startoff); + } + + int readbytes = read(t_thrd.xlog_cxt.readFile, readBuf, reqLen); + if (readbytes <= 0) { + (void)close(t_thrd.xlog_cxt.readFile); + t_thrd.xlog_cxt.readFile = -1; + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not read from log segment %s, offset %u, length %lu: %m", + XLogFileNameP(t_thrd.xlog_cxt.ThisTimeLineID, t_thrd.xlog_cxt.readSegNo), + t_thrd.xlog_cxt.readOff, INT2ULONG(reqLen)))); + } + + t_thrd.xlog_cxt.readOff += reqLen; + + *rereadlen = reqLen; return true; } @@ -16546,7 +16266,11 @@ int ParallelXLogReadWorkBufRead(XLogReaderState *xlogreader, XLogRecPtr targetPa pg_atomic_write_u64(&extreme_rto::g_dispatcher->rtoXlogBufState.expectLsn, expectedRecPtr); for (;;) { // Check to see if the trigger file exists. If so, update the gaussdb state file. - if (CheckForStandbyTrigger()) { + if (CheckForStandbyTrigger() +#ifndef ENABLE_MULTIPLE_NODES + && IsDCFReadyOrDisabled() +#endif + ) { SendPostmasterSignal(PMSIGNAL_UPDATE_NORMAL); } @@ -16961,7 +16685,7 @@ static ReplConnTarget GetRepConntarget(void) } } -static void HandleCascadeStandbyPromote(XLogRecPtr *recptr) +void HandleCascadeStandbyPromote(XLogRecPtr *recptr) { if (!t_thrd.xlog_cxt.is_cascade_standby || t_thrd.xlog_cxt.server_mode != STANDBY_MODE || !IS_DN_MULTI_STANDYS_MODE()) { @@ -16980,21 +16704,21 @@ static void HandleCascadeStandbyPromote(XLogRecPtr *recptr) if (t_thrd.postmaster_cxt.HaShmData->is_cross_region) { t_thrd.xlog_cxt.is_hadr_main_standby = true; SpinLockAcquire(&t_thrd.postmaster_cxt.HaShmData->mutex); + t_thrd.postmaster_cxt.HaShmData->is_cascade_standby = false; t_thrd.postmaster_cxt.HaShmData->is_hadr_main_standby = true; SpinLockRelease(&t_thrd.postmaster_cxt.HaShmData->mutex); } t_thrd.xlog_cxt.switchover_triggered = false; - if (IsExtremeRtoRunning()) { - t_thrd.xlog_cxt.readfrombuffer = false; - /* restart from recvbuffer */ - pg_atomic_write_u32(&(extreme_rto::g_recordbuffer->readWorkerState), extreme_rto::WORKER_STATE_STOP); - } - /* failover */ ResetFailoverTriggered(); t_thrd.xlog_cxt.failover_triggered = false; + if (t_thrd.xlog_cxt.readFile >= 0) { + close(t_thrd.xlog_cxt.readFile); + t_thrd.xlog_cxt.readFile = -1; + } + SendPostmasterSignal(PMSIGNAL_UPDATE_NORMAL); /* request postmaster to start walreceiver again. */ @@ -17058,7 +16782,7 @@ void rename_recovery_conf_for_roach() { * Otherwise it keeps sleeping and retrying indefinitely. */ int XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, - char *readBuf, TimeLineID *readTLI) + char *readBuf, TimeLineID *readTLI, char* xlog_path) { /* Load reader private data */ XLogPageReadPrivate *readprivate = (XLogPageReadPrivate *)xlogreader->private_data; @@ -17138,7 +16862,11 @@ retry: bool havedata = false; // Check to see if the trigger file exists. If so, update the gaussdb state file. - if (CheckForStandbyTrigger()) { + if (CheckForStandbyTrigger() +#ifndef ENABLE_MULTIPLE_NODES + && IsDCFReadyOrDisabled() +#endif + ) { SendPostmasterSignal(PMSIGNAL_UPDATE_NORMAL); } @@ -17359,10 +17087,6 @@ retry: proc_exit(0); } - if (IsExtremeRtoSmartShutdown()) { - ExtremeRtoRedoManagerSendEndToStartup(); - } - if (!xlogctl->IsRecoveryDone) { g_instance.comm_cxt.predo_cxt.redoPf.redo_done_time = GetCurrentTimestamp(); g_instance.comm_cxt.predo_cxt.redoPf.recovery_done_ptr = t_thrd.xlog_cxt.ReadRecPtr; @@ -17470,7 +17194,7 @@ retry: } continue; } - } else if (IS_DISASTER_RECOVER_MODE) { + } else if (IS_OBS_DISASTER_RECOVER_MODE && !IsRoachRestore()) { ProcTxnWorkLoad(false); /* use volatile pointer to prevent code rearrangement */ volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; @@ -17635,8 +17359,7 @@ retry: * At this point, we have the right segment open and if we're streaming we * know the requested record is in it. */ - if (!t_thrd.xlog_cxt.readfrombuffer) - Assert(t_thrd.xlog_cxt.readFile != -1); + Assert(t_thrd.xlog_cxt.readFile != -1); /* * If the current segment is being streamed from master, calculate how @@ -17644,7 +17367,7 @@ retry: * requested record has been received, but this is for the benefit of * future calls, to allow quick exit at the top of this function. */ - if (t_thrd.xlog_cxt.readSource == XLOG_FROM_STREAM || (IsExtremeRtoRunning() && t_thrd.xlog_cxt.readfrombuffer)) { + if (t_thrd.xlog_cxt.readSource == XLOG_FROM_STREAM) { if ((targetPagePtr / XLOG_BLCKSZ) != (t_thrd.xlog_cxt.receivedUpto / XLOG_BLCKSZ)) { t_thrd.xlog_cxt.readLen = XLOG_BLCKSZ; } else { @@ -17759,7 +17482,7 @@ static int emode_for_corrupt_record(int emode, XLogRecPtr RecPtr) * failover request has arrived. If either condition holds, request postmaster * to shut down walreceiver, wait for it to exit, and return true. */ -static bool CheckForFailoverTrigger(void) +bool CheckForFailoverTrigger(void) { struct stat stat_buf; StringInfo slotname = NULL; @@ -17847,7 +17570,7 @@ bool CheckForForceFinishRedoTrigger(TermFileData *term_file) * switchover request has arrived. If either condition holds, request postmaster * to shut down walreceiver, wait for it to exit, and return true. */ -static bool CheckForSwitchoverTrigger(void) +bool CheckForSwitchoverTrigger(void) { if (t_thrd.xlog_cxt.switchover_triggered) { return true; @@ -17968,9 +17691,13 @@ extreme_rto::Enum_TriggeredState CheckForSatartupStatus(void) return extreme_rto::TRIGGER_STADNBY; } if (t_thrd.startup_cxt.failover_triggered) { + ereport(LOG, (errmsg("received failover request"))); + ResetFailoverTriggered(); return extreme_rto::TRIGGER_FAILOVER; } if (t_thrd.startup_cxt.switchover_triggered) { + ereport(LOG, (errmsg("received switchover request"))); + ResetSwitchoverTriggered(); return extreme_rto::TRIGGER_FAILOVER; } return extreme_rto::TRIGGER_NORMAL; @@ -18109,6 +17836,24 @@ bool CheckStandbySignal(void) return false; } +/* + * Check whether the signal is cascade standby signal + */ +bool CheckCascadeStandbySignal(void) +{ + struct stat stat_buf; + + if (stat(CASCADE_STANDBY_SIGNAL_FILE, &stat_buf) == 0) { + /* + * Since we are in a signal handler, it's not safe to elog. We + * silently ignore any error from unlink. + */ + (void)unlink(CASCADE_STANDBY_SIGNAL_FILE); + return true; + } + return false; +} + /* * Check to see if a switchover request has arrived and * read demote mode from switchover signal file. @@ -18961,7 +18706,7 @@ void update_max_page_flush_lsn(XLogRecPtr biggest_lsn, ThreadId thdId, bool is_f ereport(PANIC, (errcode_for_file_access(), errmodule(MOD_REDO), errmsg("Could not open file \"%s\"", MAX_PAGE_FLUSH_LSN_FILE))); } - offset = lock_id * BLCKSZ; + offset = (int64)lock_id * (int64)BLCKSZ; page = g_instance.comm_cxt.predo_cxt.ali_buf + offset; new_lsn = (XLogRecPtr *)page; *new_lsn = biggest_lsn; @@ -19194,6 +18939,66 @@ pg_crc32c CalShareStorageCtlInfoCrc(const ShareStorageXLogCtl *ctlInfo) return crc; } +void UpdatePostgresqlFile(const char *optName, const char *gucLine) +{ + char guc[MAXPGPATH]; + char gucBak[MAXPGPATH]; + char gucLock[MAXPGPATH]; + errno_t ret = snprintf_s(guc, MAXPGPATH, MAXPGPATH - 1, "%s/postgresql.conf", t_thrd.proc_cxt.DataDir); + securec_check_ss(ret, "\0", "\0"); + ret = snprintf_s(gucBak, MAXPGPATH, MAXPGPATH - 1, "%s/%s", t_thrd.proc_cxt.DataDir, CONFIG_BAK_FILENAME); + securec_check_ss(ret, "\0", "\0"); + + struct stat statbuf; + if (lstat(guc, &statbuf) != 0) { + if (errno != ENOENT) { + ereport(FATAL, (errmsg("UpdatePostgresqlFile could not stat file \"%s\": %m", guc))); + } + } + + ConfFileLock filelock = { NULL, 0 }; + ret = snprintf_s(gucLock, MAXPGPATH, MAXPGPATH - 1, "%s/postgresql.conf.lock", t_thrd.proc_cxt.DataDir); + securec_check_ss(ret, "\0", "\0"); + /* 1. lock postgresql.conf */ + if (get_file_lock(gucLock, &filelock) != CODE_OK) { + ereport(FATAL, (errmsg("UpdatePostgresqlFile:Modify postgresql.conf failed : can not get the file lock "))); + } + + copy_file_internal(guc, gucBak, true); + char** opt_lines = read_guc_file(gucBak); + if (opt_lines == NULL) { + release_file_lock(&filelock); + ereport(FATAL, (errmsg("UpdatePostgresqlFile:the config file has no data,please check it."))); + } + + modify_guc_one_line(&opt_lines, optName, gucLine); + ret = write_guc_file(gucBak, opt_lines); + release_opt_lines(opt_lines); + opt_lines = NULL; + + if (ret != CODE_OK) { + release_file_lock(&filelock); + ereport(FATAL, (errmsg("UpdatePostgresqlFile:the config file %s updates failed.", gucBak))); + } + + if (rename(gucBak, guc) != 0) { + release_file_lock(&filelock); + ereport(FATAL, (errcode_for_file_access(), errmsg("could not rename \"%s\" to \"%s\": %m", gucBak, guc))); + } + + if (lstat(guc, &statbuf) != 0) { + if (errno != ENOENT) { + release_file_lock(&filelock); + ereport(PANIC, (errmsg("UpdatePostgresqlFile2:could not stat file \"%s\": %m", guc))); + } + } + + if (statbuf.st_size > 0) { + copy_file_internal(guc, gucBak, true); + } + release_file_lock(&filelock); +} + void ShareStorageInit() { if (g_instance.attr.attr_storage.xlog_file_path != NULL && g_instance.attr.attr_storage.xlog_file_size > 0) { @@ -19201,7 +19006,8 @@ void ShareStorageInit() void *tmpBuf = ShmemInitStruct("share storage Ctl", CalShareStorageCtlSize(), &found); g_instance.xlog_cxt.shareStorageXLogCtl = (ShareStorageXLogCtl *)TYPEALIGN(MEMORY_ALIGNED_SIZE, tmpBuf); g_instance.xlog_cxt.shareStorageXLogCtlOrigin = tmpBuf; - InitDoradoStorage(g_instance.attr.attr_storage.xlog_file_path, g_instance.attr.attr_storage.xlog_file_size); + InitDoradoStorage(g_instance.attr.attr_storage.xlog_file_path, + (uint64)g_instance.attr.attr_storage.xlog_file_size); if (!IsInitdb && g_instance.attr.attr_storage.xlog_lock_file_path != NULL) { g_instance.xlog_cxt.shareStorageLockFd = BasicOpenFile(g_instance.attr.attr_storage.xlog_lock_file_path, O_CREAT | O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); @@ -19210,6 +19016,22 @@ void ShareStorageInit() g_instance.attr.attr_storage.xlog_lock_file_path))); } } + + if (((uint64)g_instance.attr.attr_storage.xlog_file_size != + g_instance.xlog_cxt.shareStorageopCtl.xlogFileSize) && (IS_SHARED_STORAGE_STANDBY_CLUSTER)) { + uint32 const bufSz = 256; + char buf[bufSz]; + + errno_t errorno = snprintf_s(buf, sizeof(buf), sizeof(buf) - 1, "%lu", + g_instance.xlog_cxt.shareStorageopCtl.xlogFileSize); + securec_check_ss(errorno, "", ""); + SetConfigOption("xlog_file_size", buf, PGC_POSTMASTER, PGC_S_ARGV); + char option[bufSz]; + errorno = snprintf_s(option, sizeof(option), sizeof(option) - 1, "xlog_file_size=%lu\n", + g_instance.xlog_cxt.shareStorageopCtl.xlogFileSize); + securec_check_ss(errorno, "", ""); + UpdatePostgresqlFile("xlog_file_size", option); + } } } @@ -19304,7 +19126,7 @@ void FindLastRecordCheckInfoOnShareStorage(XLogRecPtr *lastRecordPtr, pg_crc32 * Size SimpleValidatePage(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, char *page) { XLogPageHeader hdr = (XLogPageHeader)page; - bool ret = ValidXLogPageHeader(xlogreader, targetPagePtr, hdr, false); + bool ret = ValidXLogPageHeader(xlogreader, targetPagePtr, hdr); if (!ret) { elog(LOG, "SimpleValidatePage:%s", xlogreader->errormsg_buf); return 0; @@ -19314,7 +19136,7 @@ Size SimpleValidatePage(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, c } int SharedStorageXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, - XLogRecPtr targetRecPtr, char *readBuf, TimeLineID *readTLI) + XLogRecPtr targetRecPtr, char *readBuf, TimeLineID *readTLI, char* xlog_path) { int read_len = ReadXlogFromShareStorage(targetPagePtr, readBuf, Max(XLOG_BLCKSZ, reqLen)); return read_len; diff --git a/src/gausskernel/storage/access/transam/xlogfuncs.cpp b/src/gausskernel/storage/access/transam/xlogfuncs.cpp index dfa7397f0..bc5607d4c 100755 --- a/src/gausskernel/storage/access/transam/xlogfuncs.cpp +++ b/src/gausskernel/storage/access/transam/xlogfuncs.cpp @@ -48,6 +48,8 @@ #include "postmaster/bgwriter.h" #include "postmaster/postmaster.h" +const int msecPerSec = 1000; + typedef ArchiveSlotConfig*(*get_slot_func)(const char *); extern void validate_xlog_location(char *str); @@ -63,6 +65,11 @@ static inline void pg_check_xlog_func_permission() ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("recovery is in progress"), errhint("This xlog function cannot be executed during recovery."))); } + /* we are about to start streaming switch over, stop any xlog insert. */ + if (t_thrd.xlog_cxt.LocalXLogInsertAllowed == 0 && g_instance.streaming_dr_cxt.isInSwitchover == true) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("This xlog function cannot be executed during streaming disaster recovery"))); + } } /* @@ -100,7 +107,7 @@ Datum pg_start_backup(PG_FUNCTION_ARGS) if (strncmp(backupidstr, "gs_roach", strlen("gs_roach")) == 0) { startpoint = do_roach_start_backup(backupidstr); } else { - startpoint = do_pg_start_backup(backupidstr, fast, NULL, dir, NULL, NULL, false, true); + startpoint = do_pg_start_backup(backupidstr, fast, NULL, dir, NULL, NULL, false, true); RegisterAbortExclusiveBackup(); } @@ -226,6 +233,19 @@ Datum pg_stop_backup_v2(PG_FUNCTION_ARGS) errno_t rc; SessionBackupState status = u_sess->proc_cxt.sessionBackupState; + if (status == SESSION_BACKUP_NONE) { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("a backup is not in progress"))); + } + if (exclusive) { + if (status == SESSION_BACKUP_NON_EXCLUSIVE) + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("non-exclusive backup in progress"), errhint("Did you mean to use pg_stop_backup('f')?"))); + } else { + if (status != SESSION_BACKUP_NON_EXCLUSIVE) + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("non-exclusive backup is not in progress"))); + } oldcontext2 = MemoryContextSwitchTo(u_sess->probackup_context); @@ -257,18 +277,11 @@ Datum pg_stop_backup_v2(PG_FUNCTION_ARGS) securec_check(rc, "\0", "\0"); if (exclusive) { - if (status == SESSION_BACKUP_NON_EXCLUSIVE) - ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("non-exclusive backup in progress"), errhint("Did you mean to use pg_stop_backup('f')?"))); /* when delay xlog recycle is true, we do not copy xlog from archive */ stoppoint = do_pg_stop_backup(NULL, !GetDelayXlogRecycle()); nulls[1] = true; nulls[2] = true; } else { - if (status != SESSION_BACKUP_NON_EXCLUSIVE) - ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("non-exclusive backup is not in progress"))); - stoppoint = do_pg_stop_backup(u_sess->proc_cxt.LabelFile, !GetDelayXlogRecycle()); values[1] = CStringGetTextDatum(u_sess->proc_cxt.LabelFile); @@ -341,6 +354,12 @@ Datum pg_switch_xlog(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("recovery is in progress"), errhint("WAL control functions cannot be executed during recovery."))); + /* we are about to start streaming switch over, stop any xlog insert. */ + if (t_thrd.xlog_cxt.LocalXLogInsertAllowed == 0 && g_instance.streaming_dr_cxt.isInSwitchover == true) { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot switch xlog during streaming disaster recovery"))); + } + switchpoint = RequestXLogSwitch(); RequestCheckpoint(CHECKPOINT_FORCE | CHECKPOINT_WAIT | CHECKPOINT_IMMEDIATE); @@ -375,6 +394,12 @@ Datum gs_roach_switch_xlog(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("recovery is in progress"), errhint("WAL control functions cannot be executed during recovery."))); + /* we are about to start streaming switch over, stop any xlog insert. */ + if (t_thrd.xlog_cxt.LocalXLogInsertAllowed == 0 && g_instance.streaming_dr_cxt.isInSwitchover == true) { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot roach switch xlog during streaming disaster recovery"))); + } + /* hold lock to force cbm track */ if (u_sess->attr.attr_storage.enable_cbm_tracking) { LWLockAcquire(CBMParseXlogLock, LW_EXCLUSIVE); @@ -423,6 +448,12 @@ Datum pg_create_restore_point(PG_FUNCTION_ARGS) (errmsg("recovery is in progress"), errhint("WAL control functions cannot be executed during recovery.")))); + /* we are about to start streaming switch over, stop any xlog insert. */ + if (t_thrd.xlog_cxt.LocalXLogInsertAllowed == 0 && g_instance.streaming_dr_cxt.isInSwitchover == true) { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot create restore point during streaming disaster recovery"))); + } + if (!XLogIsNeeded()) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("WAL level not sufficient for creating a restore point"), @@ -1153,6 +1184,7 @@ Datum pg_get_flush_lsn(PG_FUNCTION_ARGS) Datum gs_set_obs_delete_location_with_slotname(PG_FUNCTION_ARGS) { +#ifndef ENABLE_LITE_MODE char* lsnLocation = PG_GETARG_CSTRING(0); char* currentSlotName = PG_GETARG_CSTRING(1); @@ -1188,6 +1220,10 @@ Datum gs_set_obs_delete_location_with_slotname(PG_FUNCTION_ARGS) securec_check_ss(errorno, "", ""); PG_RETURN_TEXT_P(cstring_to_text(xlogfilename)); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); + PG_RETURN_TEXT_P(NULL); +#endif } /* @@ -1197,6 +1233,7 @@ Datum gs_set_obs_delete_location_with_slotname(PG_FUNCTION_ARGS) */ Datum gs_set_obs_delete_location(PG_FUNCTION_ARGS) { +#ifndef ENABLE_LITE_MODE text *location = PG_GETARG_TEXT_P(0); char *locationstr = NULL; uint32 hi = 0; @@ -1234,10 +1271,15 @@ Datum gs_set_obs_delete_location(PG_FUNCTION_ARGS) securec_check_ss(errorno, "", ""); PG_RETURN_TEXT_P(cstring_to_text(xlogfilename)); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); + PG_RETURN_TEXT_P(NULL); +#endif } Datum gs_get_global_barrier_status(PG_FUNCTION_ARGS) { +#ifndef ENABLE_LITE_MODE #define PG_GET_GLOBAL_BARRIER_STATUS_COLS 2 char globalBarrierId[MAX_BARRIER_ID_LENGTH] = {0}; char globalAchiveBarrierId[MAX_BARRIER_ID_LENGTH] = {0}; @@ -1257,7 +1299,7 @@ Datum gs_get_global_barrier_status(PG_FUNCTION_ARGS) errno_t rc = 0; get_slot_func slot_func; List *all_archive_slots = NIL; - if (IS_DISASTER_RECOVER_MODE || IS_CNDISASTER_RECOVER_MODE) { + if (IS_OBS_DISASTER_RECOVER_MODE || IS_CN_OBS_DISASTER_RECOVER_MODE) { all_archive_slots = GetAllRecoverySlotsName(); slot_func = &getArchiveRecoverySlotWithName; } else { @@ -1329,10 +1371,15 @@ Datum gs_get_global_barrier_status(PG_FUNCTION_ARGS) result = HeapTupleGetDatum(resultHeapTuple); list_free_deep(objectList); PG_RETURN_DATUM(result); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); + PG_RETURN_DATUM(0); +#endif } Datum gs_get_global_barriers_status(PG_FUNCTION_ARGS) { +#ifndef ENABLE_LITE_MODE #define PG_GET_GLOBAL_BARRIERS_STATUS_COLS 3 char globalBarrierId[MAX_BARRIER_ID_LENGTH] = {0}; char globalAchiveBarrierId[MAX_BARRIER_ID_LENGTH] = {0}; @@ -1379,7 +1426,7 @@ Datum gs_get_global_barriers_status(PG_FUNCTION_ARGS) // global_barrierId:max global barrierId // global_achive_barrierId:max achive barrierId List *all_archive_slots = NIL; - if (IS_DISASTER_RECOVER_MODE || IS_CNDISASTER_RECOVER_MODE) { + if (IS_OBS_DISASTER_RECOVER_MODE || IS_CN_OBS_DISASTER_RECOVER_MODE) { all_archive_slots = GetAllRecoverySlotsName(); slot_func = &getArchiveRecoverySlotWithName; } else { @@ -1453,11 +1500,15 @@ Datum gs_get_global_barriers_status(PG_FUNCTION_ARGS) list_free_deep(all_archive_slots); list_free_deep(objectList); tuplestore_donestoring(tupstore); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif PG_RETURN_DATUM(0); } Datum gs_get_local_barrier_status(PG_FUNCTION_ARGS) { +#ifndef ENABLE_LITE_MODE #define PG_GET_LOCAL_BARRIER_STATUS_COLS 4 volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; XLogRecPtr flushLsn = InvalidXLogRecPtr; @@ -1480,11 +1531,13 @@ Datum gs_get_local_barrier_status(PG_FUNCTION_ARGS) // archive_LSN max archive xlog LSN // flush_LSN max flush xlog LSN - if (IS_DISASTER_RECOVER_MODE || IS_CNDISASTER_RECOVER_MODE) { + if (IS_OBS_DISASTER_RECOVER_MODE || IS_CN_OBS_DISASTER_RECOVER_MODE || IS_DISASTER_RECOVER_MODE) { + SpinLockAcquire(&walrcv->mutex); rc = strncpy_s((char *)barrierId, MAX_BARRIER_ID_LENGTH, (char *)walrcv->lastRecoveredBarrierId, MAX_BARRIER_ID_LENGTH - 1); securec_check(rc, "\0", "\0"); barrierLsn = walrcv->lastRecoveredBarrierLSN; + SpinLockRelease(&walrcv->mutex); } rc = snprintf_s(barrierLocation, MAXFNAMELEN, MAXFNAMELEN - 1, @@ -1528,10 +1581,15 @@ Datum gs_get_local_barrier_status(PG_FUNCTION_ARGS) PG_RETURN_DATUM(result); PG_RETURN_TEXT_P(cstring_to_text(flushLocation)); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); + PG_RETURN_TEXT_P(NULL); +#endif } Datum gs_hadr_do_switchover(PG_FUNCTION_ARGS) { +#ifndef ENABLE_LITE_MODE #define TIME_GET_MILLISEC(t) (((long)(t).tv_sec * 1000) + ((long)(t).tv_usec) / 1000) uint64_t barrier_index = 0; int ret; @@ -1556,7 +1614,7 @@ Datum gs_hadr_do_switchover(PG_FUNCTION_ARGS) ereport(LOG, (errmsg("[hadr switchover]obs_archive_slot does not exist."))); PG_RETURN_BOOL(false); } - barrier_index = GetObsBarrierIndex(archiveSlotNames); + barrier_index = GetObsBarrierIndex(archiveSlotNames, NULL); gettimeofday(&tv, NULL); @@ -1619,11 +1677,15 @@ Datum gs_hadr_do_switchover(PG_FUNCTION_ARGS) list_free_deep(archiveSlotNames); archiveSlotNames = NULL; +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif PG_RETURN_BOOL(true); } Datum gs_hadr_has_barrier_creator(PG_FUNCTION_ARGS) { +#ifndef ENABLE_LITE_MODE if (!superuser() && !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("Must be system admin or operator admin in operation mode to gs_hadr_has_barrier_creator.")))); @@ -1635,7 +1697,9 @@ Datum gs_hadr_has_barrier_creator(PG_FUNCTION_ARGS) #endif PG_RETURN_BOOL(true); } - +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif PG_RETURN_BOOL(false); } @@ -1658,6 +1722,7 @@ Datum gs_hadr_in_recovery(PG_FUNCTION_ARGS) Datum gs_upload_obs_file(PG_FUNCTION_ARGS) { +#ifndef ENABLE_LITE_MODE char* slotname = PG_GETARG_CSTRING(0); char* src = PG_GETARG_CSTRING(1); char* dest = PG_GETARG_CSTRING(2); @@ -1693,12 +1758,16 @@ Datum gs_upload_obs_file(PG_FUNCTION_ARGS) } else { ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), (errmsg("There is no replication slots.")))); } +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif PG_RETURN_VOID(); } Datum gs_download_obs_file(PG_FUNCTION_ARGS) { +#ifndef ENABLE_LITE_MODE char* slotname = PG_GETARG_CSTRING(0); char* src = PG_GETARG_CSTRING(1); char* dest = PG_GETARG_CSTRING(2); @@ -1711,7 +1780,7 @@ Datum gs_download_obs_file(PG_FUNCTION_ARGS) (errmsg("Must be system admin or operator admin in operation mode to gs_download_obs_file.")))); ArchiveSlotConfig *archive_conf = NULL; - if (IS_CNDISASTER_RECOVER_MODE) { + if (IS_CN_OBS_DISASTER_RECOVER_MODE) { archive_conf = GetArchiveRecoverySlot(); } else { archive_conf = getArchiveReplicationSlotWithName(slotname); @@ -1739,12 +1808,16 @@ Datum gs_download_obs_file(PG_FUNCTION_ARGS) } else { ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), (errmsg("There is no replication slots.")))); } +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif PG_RETURN_VOID(); } Datum gs_get_obs_file_context(PG_FUNCTION_ARGS) { +#ifndef ENABLE_LITE_MODE char fileContext[MAXPGPATH] = {0}; size_t readLen = 0; char* setFileName = PG_GETARG_CSTRING(0); @@ -1755,7 +1828,7 @@ Datum gs_get_obs_file_context(PG_FUNCTION_ARGS) (errmsg("Must be system admin or operator admin in operation mode to gs_get_obs_file_context.")))); ArchiveSlotConfig *archive_conf = NULL; - if (IS_CNDISASTER_RECOVER_MODE) { + if (IS_CN_OBS_DISASTER_RECOVER_MODE) { archive_conf = GetArchiveRecoverySlot(); } else { archive_conf = getArchiveReplicationSlotWithName(slotName); @@ -1785,10 +1858,15 @@ Datum gs_get_obs_file_context(PG_FUNCTION_ARGS) } PG_RETURN_TEXT_P(cstring_to_text(fileContext)); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); + PG_RETURN_TEXT_P(NULL); +#endif } Datum gs_set_obs_file_context(PG_FUNCTION_ARGS) { +#ifndef ENABLE_LITE_MODE int ret = 0; char* setFileName = PG_GETARG_CSTRING(0); char* setFileContext = PG_GETARG_CSTRING(1); @@ -1799,7 +1877,7 @@ Datum gs_set_obs_file_context(PG_FUNCTION_ARGS) (errmsg("Must be system admin or operator admin in operation mode to gs_get_obs_file_context.")))); ArchiveSlotConfig *archive_conf = NULL; - if (IS_CNDISASTER_RECOVER_MODE) { + if (IS_CN_OBS_DISASTER_RECOVER_MODE) { archive_conf = GetArchiveRecoverySlot(); } else { archive_conf = getArchiveReplicationSlotWithName(slotName); @@ -1820,10 +1898,15 @@ Datum gs_set_obs_file_context(PG_FUNCTION_ARGS) ret = obsWrite(setFileName, setFileContext, strlen(setFileContext), &archive_conf->archive_config); PG_RETURN_TEXT_P(cstring_to_text(setFileContext)); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); + PG_RETURN_TEXT_P(NULL); +#endif } Datum gs_get_hadr_key_cn(PG_FUNCTION_ARGS) { +#ifndef ENABLE_LITE_MODE #define GS_GET_HADR_KEY_CN_COLS 4 bool needLocalKeyCn = false; char localKeyCn[MAXFNAMELEN] = {0}; @@ -1869,12 +1952,15 @@ Datum gs_get_hadr_key_cn(PG_FUNCTION_ARGS) (void)MemoryContextSwitchTo(oldcontext); List *all_slots = NIL; - if (IS_CNDISASTER_RECOVER_MODE) { + if (IS_CN_OBS_DISASTER_RECOVER_MODE) { all_slots = GetAllRecoverySlotsName(); needLocalKeyCn = true; int rc = 0; - rc = snprintf_s(localKeyCn, MAXFNAMELEN, MAXFNAMELEN - 1, "%s", get_local_key_cn()); + char* keyCN = NULL; + keyCN = get_local_key_cn(); + rc = snprintf_s(localKeyCn, MAXFNAMELEN, MAXFNAMELEN - 1, "%s", keyCN); securec_check_ss(rc, "\0", "\0"); + pfree_ext(keyCN); } else { all_slots = GetAllArchiveSlotsName(); } @@ -1888,7 +1974,7 @@ Datum gs_get_hadr_key_cn(PG_FUNCTION_ARGS) bool isExitKey= true; bool isExitDelete = true; ArchiveSlotConfig *archive_conf = NULL; - if (IS_CNDISASTER_RECOVER_MODE) { + if (IS_CN_OBS_DISASTER_RECOVER_MODE) { archive_conf = GetArchiveRecoverySlot(); } else { archive_conf = getArchiveReplicationSlotWithName(slotname); @@ -1919,6 +2005,9 @@ Datum gs_get_hadr_key_cn(PG_FUNCTION_ARGS) } list_free_deep(all_slots); tuplestore_donestoring(tupstore); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif PG_RETURN_DATUM(0); } @@ -1928,12 +2017,19 @@ Datum gs_streaming_dr_in_switchover(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("Must be system admin or operator admin in operation mode to gs_streaming_dr_switchover.")))); - if (!g_instance.streaming_dr_cxt.isInStreaming_dr) { - ereport(LOG, (errmsg("Streaming disaster recovery is not started."))); + if (!g_instance.streaming_dr_cxt.isInSwitchover) { + ereport(LOG, (errmsg("Streaming disaster recovery is not in switchover."))); PG_RETURN_BOOL(false); } -#ifndef ENABLE_MULTIPLE_NODES +#ifdef ENABLE_MULTIPLE_NODES + char barrier_id[MAX_BARRIER_ID_LENGTH]; + int rc; + rc = snprintf_s(barrier_id, MAX_BARRIER_ID_LENGTH, MAX_BARRIER_ID_LENGTH - 1, CSN_BARRIER_NAME); + securec_check_ss_c(rc, "\0", "\0"); + + RequestBarrier(barrier_id, NULL, true); +#else CreateHadrSwitchoverBarrier(); #endif PG_RETURN_BOOL(true); @@ -1943,6 +2039,10 @@ Datum gs_streaming_dr_service_truncation_check(PG_FUNCTION_ARGS) { XLogRecPtr switchoverLsn = g_instance.streaming_dr_cxt.switchoverBarrierLsn; XLogRecPtr flushLsn = InvalidXLogRecPtr; + bool isInteractionCompleted = false; + int hadrWalSndNum = 0; + int InteractionCompletedNum = 0; + const uint32 shiftSize = 32; for (int i = 0; i < g_instance.attr.attr_storage.max_wal_senders; i++) { /* use volatile pointer to prevent code rearrangement */ @@ -1952,23 +2052,28 @@ Datum gs_streaming_dr_service_truncation_check(PG_FUNCTION_ARGS) } if (walsnd->is_cross_cluster) { + hadrWalSndNum++; SpinLockAcquire(&walsnd->mutex); flushLsn = walsnd->flush; + isInteractionCompleted = walsnd->isInteractionCompleted; SpinLockRelease(&walsnd->mutex); - if (g_instance.streaming_dr_cxt.isInteractionCompleted && + if (isInteractionCompleted && XLByteEQ(switchoverLsn, flushLsn)) { - PG_RETURN_BOOL(true); + InteractionCompletedNum++; } else { ereport(LOG, - (errmsg("the switchover Lsn is %X/%X, the hadr receiver flush Lsn is %X/%X", - (uint32)(switchoverLsn >> 32), (uint32)switchoverLsn, - (uint32)(flushLsn >> 32), (uint32)flushLsn))); - PG_RETURN_BOOL(false); + (errmsg("walsnd %d, the switchover Lsn is %X/%X, the hadr receiver flush Lsn is %X/%X", + i, (uint32)(switchoverLsn >> shiftSize), (uint32)switchoverLsn, + (uint32)(flushLsn >> shiftSize), (uint32)flushLsn))); } } } - PG_RETURN_BOOL(false); + if (hadrWalSndNum != 0 && hadrWalSndNum == InteractionCompletedNum) { + PG_RETURN_BOOL(true); + } else { + PG_RETURN_BOOL(false); + } } Datum gs_streaming_dr_get_switchover_barrier(PG_FUNCTION_ARGS) @@ -1978,23 +2083,33 @@ Datum gs_streaming_dr_get_switchover_barrier(PG_FUNCTION_ARGS) (errmsg("Must be system admin or operator admin in operation mode to gs_streaming_dr_get_switchover_barrier.")))); volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + XLogRecPtr last_flush_location = walrcv->receiver_flush_location; XLogRecPtr last_replay_location = GetXLogReplayRecPtr(NULL); load_server_mode(); ereport(LOG, - (errmsg("is_hadr_main_standby: %d, last switchover Lsn is %X/%X, " - "target switchover Lsn is %X/%X, last_replay_location %X/%X", + (errmsg("is_hadr_main_standby: %d, is_cn: %d, last switchover Lsn is %X/%X, " + "target switchover Lsn is %X/%X, last_replay_location %X/%X, last_flush_location %X/%X, ServerMode:%d", t_thrd.xlog_cxt.is_hadr_main_standby, + IS_PGXC_COORDINATOR, (uint32)(walrcv->lastSwitchoverBarrierLSN >> 32), (uint32)(walrcv->lastSwitchoverBarrierLSN), (uint32)(walrcv->targetSwitchoverBarrierLSN >> 32), (uint32)(walrcv->targetSwitchoverBarrierLSN), (uint32)(last_replay_location >> 32), - (uint32)(last_replay_location)))); + (uint32)(last_replay_location), + (uint32)(last_flush_location >> 32), + (uint32)(last_flush_location), t_thrd.xlog_cxt.server_mode))); - if (t_thrd.xlog_cxt.is_hadr_main_standby) { + if (t_thrd.xlog_cxt.server_mode != STANDBY_MODE) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("streaming switchover barrier can only be set in standby mode"))); + } + if (t_thrd.xlog_cxt.is_hadr_main_standby || IS_PGXC_COORDINATOR) { g_instance.streaming_dr_cxt.isInSwitchover = true; if (g_instance.streaming_dr_cxt.isInteractionCompleted && + XLByteEQ(last_flush_location, last_replay_location) && XLByteEQ(walrcv->targetSwitchoverBarrierLSN, last_replay_location)) { PG_RETURN_BOOL(true); } @@ -2007,3 +2122,442 @@ Datum gs_streaming_dr_get_switchover_barrier(PG_FUNCTION_ARGS) PG_RETURN_BOOL(false); } + +Datum gs_pitr_get_warning_for_xlog_force_recycle(PG_FUNCTION_ARGS) +{ +#ifndef ENABLE_LITE_MODE + if (g_instance.roach_cxt.isXLogForceRecycled) { + PG_RETURN_BOOL(true); + } else { + PG_RETURN_BOOL(false); + } +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); + PG_RETURN_BOOL(false); +#endif +} + +Datum gs_get_active_archiving_standby(PG_FUNCTION_ARGS) +{ +#ifndef ENABLE_LITE_MODE + int i; + int rc; + errno_t errorno = EOK; + const int cols = 3; + char standbyName[MAXPGPATH] = {0}; + char archiveLocation[MAXPGPATH] = {0}; + Datum values[cols]; + bool nulls[cols]; + int xlogFileCnt = 0; + XLogRecPtr endLsn = InvalidXLogRecPtr; + int j = 0; + TupleDesc tupdesc; + Tuplestorestate *tupstore = NULL; + ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + if (!(rsinfo->allowedModes & SFRM_Materialize)) + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not " + "allowed in this context"))); + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("return type must be a row type"))); + + /* + * We don't require any special permission to see this function's data + * because nothing should be sensitive. The most critical being the slot + * name, which shouldn't contain anything particularly sensitive. + */ + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + + tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupdesc; + + (void)MemoryContextSwitchTo(oldcontext); + if (!IS_PGXC_COORDINATOR) { + if (g_instance.archive_obs_cxt.chosen_walsender_index == -1) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("Could not find the correct walsender for active archiving standby.")))); + } + rc = memset_s(nulls, sizeof(nulls), 0, sizeof(nulls)); + securec_check(rc, "\0", "\0"); + int index = g_instance.archive_obs_cxt.chosen_walsender_index; + volatile WalSnd *walsnd = &t_thrd.walsender_cxt.WalSndCtl->walsnds[index]; + SpinLockAcquire(&walsnd->mutex); + if (walsnd->pid != 0 && ((walsnd->sendRole & SNDROLE_PRIMARY_STANDBY) == walsnd->sendRole)) { + rc = strncpy_s(standbyName, MAXPGPATH, g_instance.rto_cxt.rto_standby_data[index].id, + strlen(g_instance.rto_cxt.rto_standby_data[index].id)); + securec_check(rc, "\0", "\0"); + } + SpinLockRelease(&walsnd->mutex); + } else { + rc = strncpy_s(standbyName, MAXPGPATH, g_instance.attr.attr_common.PGXCNodeName, + strlen(g_instance.attr.attr_common.PGXCNodeName)); + securec_check(rc, "\0", "\0"); + } + values[j++] = CStringGetTextDatum(standbyName); + for (i = 0; i < g_instance.attr.attr_storage.max_replication_slots; i++) { + ReplicationSlot *slot = &t_thrd.slot_cxt.ReplicationSlotCtl->replication_slots[i]; + char currArchiveLocation[MAXPGPATH] = {0}; + SpinLockAcquire(&slot->mutex); + endLsn = slot->data.restart_lsn; + SpinLockRelease(&slot->mutex); + if (slot->in_use == true && slot->archive_config != NULL && slot->archive_config->is_recovery == false && + GET_SLOT_PERSISTENCY(slot->data) != RS_BACKUP) { + rc = snprintf_s(currArchiveLocation, MAXPGPATH, MAXPGPATH - 1, "%08X/%08X ", + (uint32)(endLsn >> 32), (uint32)(endLsn)); + securec_check_ss(errorno, "", ""); + xlogFileCnt += GetArchiveXLogFileTotalNum(slot->archive_config, endLsn); + rc = strcat_s(archiveLocation, MAXPGPATH, currArchiveLocation); + securec_check(rc, "\0", "\0"); + } + continue; + } + values[j++] = CStringGetTextDatum(archiveLocation); + values[j++] = Int32GetDatum(xlogFileCnt); + tuplestore_putvalues(tupstore, tupdesc, values, nulls); + + /* clean up and return the tuplestore */ + tuplestore_donestoring(tupstore); + return (Datum)0; +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); + PG_RETURN_DATUM(0); +#endif +} + +#ifndef ENABLE_LITE_MODE +static bool checkIsDigit(const char* arg) +{ + int i = 0; + while (arg[i] != '\0') { + if (isdigit(arg[i]) == 0) + return 0; + i++; + } + return 1; +} +#endif + +Datum gs_pitr_clean_history_global_barriers(PG_FUNCTION_ARGS) +{ +#ifndef ENABLE_LITE_MODE + if (!superuser() && !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode)) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("Must be system admin or operator admin in operation mode to " + "gs_pitr_clean_history_global_barriers.")))); + } + + char* stopBarrierTimestamp = PG_GETARG_CSTRING(0); + if (stopBarrierTimestamp == NULL || !checkIsDigit(stopBarrierTimestamp)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + (errmsg("Must input linux timestamp for gs_pitr_clean_history_global_barriers.")))); + } + + errno = 0; + long barrierTimestamp = strtol(stopBarrierTimestamp, NULL, 10); + if (errno == ERANGE) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + (errmsg("Must input linux timestamp for gs_pitr_clean_history_global_barriers.")))); + } + barrierTimestamp *= msecPerSec; + char* oldestBarrierForNow = DeleteStopBarrierRecordsOnMedia(barrierTimestamp); + if (oldestBarrierForNow == NULL) { + ereport(LOG, (errmsg("All barrier records have been deleted this time."))); + PG_RETURN_TEXT_P(cstring_to_text("NULL")); + } + PG_RETURN_TEXT_P(cstring_to_text(oldestBarrierForNow)); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); + PG_RETURN_TEXT_P(NULL); +#endif +} + +Datum gs_pitr_archive_slot_force_advance(PG_FUNCTION_ARGS) +{ +#ifndef ENABLE_LITE_MODE + XLogSegNo currArchslotSegNo; + XLogRecPtr archiveSlotLocNow = InvalidXLogRecPtr; + char location[MAXFNAMELEN]; + get_slot_func slot_func; + List *all_archive_slots = NIL; + char globalBarrierId[MAX_BARRIER_ID_LENGTH] = {0}; + long endBarrierTimestamp = 0; + int readLen = 0; + char* oldestBarrierForNow = NULL; + errno_t rc = EOK; + + if (!superuser() && !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode)) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("Must be system admin or operator admin in operation mode to " + "gs_pitr_archive_slot_force_advance.")))); + } + char* stopBarrierTimestamp = PG_GETARG_CSTRING(0); + if (stopBarrierTimestamp == NULL || !checkIsDigit(stopBarrierTimestamp)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + (errmsg("Must input linux timestamp for gs_pitr_clean_history_global_barriers.")))); + } + if (!g_instance.roach_cxt.isXLogForceRecycled) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), (errmsg("Must force advance when xlog is recycled")))); + } + + errno = 0; + long barrierTimestamp = strtol(stopBarrierTimestamp, NULL, 10); + if (errno == ERANGE) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + (errmsg("Must input linux timestamp for gs_pitr_clean_history_global_barriers.")))); + } + barrierTimestamp *= msecPerSec; + all_archive_slots = GetAllArchiveSlotsName(); + if (all_archive_slots == NULL) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + (errmsg("Archive slot does not exist.")))); + } + slot_func = &getArchiveReplicationSlotWithName; + foreach_cell(cell, all_archive_slots) { + long currendTimestamp = 0; + char* slotname = (char*)lfirst(cell); + ArchiveSlotConfig *archive_conf = NULL; + if ((archive_conf = slot_func(slotname)) == NULL) { + readLen = ArchiveRead(HADR_BARRIER_ID_FILE, 0, globalBarrierId, MAX_BARRIER_ID_LENGTH, + &archive_conf->archive_config); + if (readLen == 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("Cannot read global barrier ID in %s file!", HADR_BARRIER_ID_FILE)))); + } + char *tmpPoint = strrchr(globalBarrierId, '_'); + if (tmpPoint == NULL) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("The hadr barrier id file record timestamp is invalid.")))); + } + tmpPoint += 1; + errno = 0; + currendTimestamp = strtol(tmpPoint, NULL, 10); + if (errno == ERANGE) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + (errmsg("Must input linux timestamp for gs_pitr_clean_history_global_barriers.")))); + } + } + if (endBarrierTimestamp == 0 || currendTimestamp > endBarrierTimestamp) { + endBarrierTimestamp = currendTimestamp; + } + } + oldestBarrierForNow = DeleteStopBarrierRecordsOnMedia(barrierTimestamp, endBarrierTimestamp); + if (oldestBarrierForNow != NULL) { + ereport(LOG, ((errmsg("[gs_pitr_archive_slot_force_advance]the last barrier record is %s.", + oldestBarrierForNow)))); + } + ereport(LOG, ((errmsg("[gs_pitr_archive_slot_force_advance]delete barrier record from %ld to %ld.", + endBarrierTimestamp, barrierTimestamp)))); + for (int i = 0; i < g_instance.attr.attr_storage.max_replication_slots; i++) { + ReplicationSlot *slot = &t_thrd.slot_cxt.ReplicationSlotCtl->replication_slots[i]; + SpinLockAcquire(&slot->mutex); + if (slot->in_use == true && slot->archive_config != NULL && slot->archive_config->is_recovery == false && + GET_SLOT_PERSISTENCY(slot->data) != RS_BACKUP) { + XLByteToSeg(slot->data.restart_lsn, currArchslotSegNo); + XLogSegNo lastRemovedSegno = XLogGetLastRemovedSegno(); + if (currArchslotSegNo <= lastRemovedSegno) { + slot->data.restart_lsn = (lastRemovedSegno + 1) * XLogSegSize; + archiveSlotLocNow = (lastRemovedSegno + 1) * XLogSegSize; + } + } + SpinLockRelease(&slot->mutex); + } + + for (int i = 0; i < g_instance.attr.attr_storage.max_replication_slots; i++) { + if (g_instance.archive_thread_info.obsArchPID[i] != 0) { + signal_child(g_instance.archive_thread_info.obsArchPID[i], SIGUSR2, -1); + } + } + g_instance.roach_cxt.isXLogForceRecycled = false; + rc = snprintf_s(location, MAXFNAMELEN, MAXFNAMELEN - 1, "%08X/%08X", + (uint32)(archiveSlotLocNow >> 32), (uint32)(archiveSlotLocNow)); + securec_check_ss(rc, "\0", "\0"); + + PG_RETURN_TEXT_P(cstring_to_text(location)); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); + PG_RETURN_TEXT_P(NULL); +#endif +} + +Datum gs_get_standby_cluster_barrier_status(PG_FUNCTION_ARGS) +{ + volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + XLogRecPtr barrierLsn = InvalidXLogRecPtr; + char lastestbarrierId[MAX_BARRIER_ID_LENGTH] = {0}; + char recoveryBarrierId[MAX_BARRIER_ID_LENGTH] = {0}; + char targetBarrierId[MAX_BARRIER_ID_LENGTH] = {0}; + const uint32 PG_GET_STANDBY_BARRIER_STATUS_COLS = 4; + Datum values[PG_GET_STANDBY_BARRIER_STATUS_COLS]; + bool isnull[PG_GET_STANDBY_BARRIER_STATUS_COLS]; + char barrierLocation[MAXFNAMELEN] = {0}; + TupleDesc resultTupleDesc; + HeapTuple resultHeapTuple; + Datum result; + int rc = EOK; + const uint32 shiftSize = 32; + + if (!superuser() && !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode)) + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("Must be system admin or operator admin in operation mode to " + "gs_get_standby_cluster_barrier_status.")))); + + if (g_instance.attr.attr_common.stream_cluster_run_mode != RUN_MODE_STANDBY) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + (errmsg("gs_get_standby_cluster_barrier_status only support on standby-cluster-mode.")))); + + rc = strncpy_s((char *)lastestbarrierId, MAX_BARRIER_ID_LENGTH, (char *)walrcv->lastReceivedBarrierId, + MAX_BARRIER_ID_LENGTH - 1); + securec_check(rc, "\0", "\0"); + + SpinLockAcquire(&walrcv->mutex); + barrierLsn = walrcv->lastReceivedBarrierLSN; + + rc = snprintf_s(barrierLocation, MAXFNAMELEN, MAXFNAMELEN - 1, "%08X/%08X", (uint32)(barrierLsn >> shiftSize), + (uint32)(barrierLsn)); + securec_check_ss(rc, "\0", "\0"); + + rc = strncpy_s((char *)recoveryBarrierId, MAX_BARRIER_ID_LENGTH, (char *)walrcv->lastRecoveredBarrierId, + MAX_BARRIER_ID_LENGTH - 1); + securec_check(rc, "\0", "\0"); + + rc = strncpy_s((char *)targetBarrierId, MAX_BARRIER_ID_LENGTH, (char *)walrcv->recoveryTargetBarrierId, + MAX_BARRIER_ID_LENGTH - 1); + securec_check(rc, "\0", "\0"); + SpinLockRelease(&walrcv->mutex); + + ereport(LOG, (errmsg("gs_get_standby_cluster_barrier_status get the barrier ID is %s, the lastReceivedBarrierId " + "is %s,recovery barrier ID is %s, target barrier ID is %s", + lastestbarrierId, walrcv->lastReceivedBarrierId, walrcv->lastRecoveredBarrierId, + walrcv->recoveryTargetBarrierId))); + + /* + * Construct a tuple descriptor for the result row. + */ + resultTupleDesc = CreateTemplateTupleDesc(PG_GET_STANDBY_BARRIER_STATUS_COLS, false, TAM_HEAP); + TupleDescInitEntry(resultTupleDesc, (AttrNumber)1, "latest_id", TEXTOID, -1, 0); + TupleDescInitEntry(resultTupleDesc, (AttrNumber)2, "barrier_lsn", TEXTOID, -1, 0); + TupleDescInitEntry(resultTupleDesc, (AttrNumber)3, "recovery_id", TEXTOID, -1, 0); + TupleDescInitEntry(resultTupleDesc, (AttrNumber)4, "target_id", TEXTOID, -1, 0); + + resultTupleDesc = BlessTupleDesc(resultTupleDesc); + + values[0] = CStringGetTextDatum(lastestbarrierId); + isnull[0] = false; + values[1] = CStringGetTextDatum(barrierLocation); + isnull[1] = false; + values[2] = CStringGetTextDatum(recoveryBarrierId); + isnull[2] = false; + values[3] = CStringGetTextDatum(targetBarrierId); + isnull[3] = false; + + resultHeapTuple = heap_form_tuple(resultTupleDesc, values, isnull); + + result = HeapTupleGetDatum(resultHeapTuple); + + PG_RETURN_DATUM(result); +} + +Datum gs_set_standby_cluster_target_barrier_id(PG_FUNCTION_ARGS) +{ + volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + text *barrier = PG_GETARG_TEXT_P(0); + char *barrierstr = NULL; + char targetbarrier[MAX_BARRIER_ID_LENGTH]; + CommitSeqNo csn; + int64 ts; + char tmp[MAX_BARRIER_ID_LENGTH]; + errno_t errorno = EOK; + + if (!superuser() && !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode)) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("Must be system admin or operator admin in operation mode to " + "gs_set_standby_cluster_target_barrier_id.")))); + } + + if (g_instance.attr.attr_common.stream_cluster_run_mode != RUN_MODE_STANDBY) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + (errmsg("gs_set_standby_cluster_target_barrier_id only support on standby-cluster-mode.")))); + + barrierstr = text_to_cstring(barrier); + + /* test input string */ + int checkCsnBarrierRes = sscanf_s(barrierstr, "csn_%21lu_%13ld%s", &csn, &ts, &tmp, sizeof(tmp)); + int checkSwitchoverBarrierRes = sscanf_s(barrierstr, "csn_%21lu_%s", &csn, &tmp, sizeof(tmp)); + if (strlen(barrierstr) != (MAX_BARRIER_ID_LENGTH - 1) || + (checkCsnBarrierRes != 2 && (checkSwitchoverBarrierRes != 2 || strcmp(tmp, "dr_switchover") != 0))) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not parse barrier id %s", barrierstr))); + + errorno = snprintf_s(targetbarrier, MAX_BARRIER_ID_LENGTH, MAX_BARRIER_ID_LENGTH - 1, "%s", barrierstr); + securec_check_ss(errorno, "", ""); + + SpinLockAcquire(&walrcv->mutex); + errorno = strncpy_s((char *)walrcv->recoveryTargetBarrierId, MAX_BARRIER_ID_LENGTH, (char *)barrierstr, + MAX_BARRIER_ID_LENGTH - 1); + SpinLockRelease(&walrcv->mutex); + securec_check(errorno, "\0", "\0"); + + ereport(LOG, (errmsg("gs_set_standby_cluster_target_barrier_id set the barrier ID is %s", targetbarrier))); + + PG_RETURN_TEXT_P(cstring_to_text((char *)barrierstr)); +} + +Datum gs_query_standby_cluster_barrier_id_exist(PG_FUNCTION_ARGS) +{ + text *barrier = PG_GETARG_TEXT_P(0); + char *barrierstr = NULL; + CommitSeqNo *hentry = NULL; + bool found = false; + CommitSeqNo csn; + int64 ts; + char tmp[MAX_BARRIER_ID_LENGTH]; + + if (!superuser() && !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode)) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("Must be system admin or operator admin in operation mode to " + "gs_query_standby_cluster_barrier_id_exist.")))); + } + + if (g_instance.attr.attr_common.stream_cluster_run_mode != RUN_MODE_STANDBY) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + (errmsg("gs_query_standby_cluster_barrier_id_exist only support on standby-cluster-mode.")))); + + if (g_instance.csn_barrier_cxt.barrier_hash_table == NULL) { + ereport(WARNING, (errcode(ERRCODE_OPERATE_NOT_SUPPORTED), (errmsg("barrier hash table is NULL.")))); + PG_RETURN_BOOL(found); + } + + barrierstr = text_to_cstring(barrier); + + /* test input string */ + int checkCsnBarrierRes = sscanf_s(barrierstr, "csn_%21lu_%13ld%s", &csn, &ts, &tmp, sizeof(tmp)); + int checkSwitchoverBarrierRes = sscanf_s(barrierstr, "csn_%21lu_%s", &csn, &tmp, sizeof(tmp)); + if (strlen(barrierstr) != (MAX_BARRIER_ID_LENGTH - 1) || + (checkCsnBarrierRes != 2 && (checkSwitchoverBarrierRes != 2 || strcmp(tmp, "dr_switchover") != 0))) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not parse barrier id %s", barrierstr))); + + ereport(LOG, (errmsg("gs_query_standby_cluster_barrier_id_exist query the barrier ID is %s", barrierstr))); + + LWLockAcquire(g_instance.csn_barrier_cxt.barrier_hashtbl_lock, LW_SHARED); + hentry = (CommitSeqNo *)hash_search(g_instance.csn_barrier_cxt.barrier_hash_table, + (void *)barrierstr, HASH_FIND, NULL); + LWLockRelease(g_instance.csn_barrier_cxt.barrier_hashtbl_lock); + if (hentry != NULL) { + found = true; + } + PG_RETURN_BOOL(found); +} diff --git a/src/gausskernel/storage/access/transam/xloginsert.cpp b/src/gausskernel/storage/access/transam/xloginsert.cpp index 63f9f5bc6..1070bb048 100755 --- a/src/gausskernel/storage/access/transam/xloginsert.cpp +++ b/src/gausskernel/storage/access/transam/xloginsert.cpp @@ -69,7 +69,7 @@ typedef struct registered_buffer { SizeOfXLogRecordDataHeaderLong + SizeOfXlogOrigin) static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info, XLogFPWInfo fpw_info, XLogRecPtr *fpw_lsn, - bool isupgrade = false, int bucket_id = -1); + int bucket_id = -1, bool istoast = false); static void XLogResetLogicalPage(void); /* @@ -169,7 +169,18 @@ static void XLogResetLogicalPage(void) if (PageIsLogical(regbuf->page)) PageClearLogical(regbuf->page); + + regbuf->in_use = false; + regbuf->seg_block = 0; + regbuf->seg_file_no = 0; } + + t_thrd.xlog_cxt.num_rdatas = 0; + t_thrd.xlog_cxt.max_registered_block_id = 0; + t_thrd.xlog_cxt.mainrdata_len = 0; + t_thrd.xlog_cxt.mainrdata_last = (XLogRecData *)&t_thrd.xlog_cxt.mainrdata_head; + t_thrd.xlog_cxt.include_origin = false; + t_thrd.xlog_cxt.begininsert_called = false; } /* @@ -430,15 +441,15 @@ void XLogRegisterBufData(uint8 block_id, char *data, int len) regbuf->rdata_len += len; } -void XLogInsertTrace(RmgrId rmid, uint8 info, bool isupgrade, XLogRecPtr EndPos) +void XLogInsertTrace(RmgrId rmid, uint8 info, XLogRecPtr EndPos) { int block_id; ereport(DEBUG4, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), errmsg("[REDO_LOG_TRACE]XLogInsert: ProcLastRecPtr:%lu,XactLastRecEnd:%lu," - "rmid:%hhu,info:%hhu,isupgrade:%d,newPageLsn:%lu,", - t_thrd.xlog_cxt.ProcLastRecPtr, t_thrd.xlog_cxt.XactLastRecEnd, rmid, info, isupgrade, EndPos))); + "rmid:%hhu,info:%hhu,newPageLsn:%lu,", + t_thrd.xlog_cxt.ProcLastRecPtr, t_thrd.xlog_cxt.XactLastRecEnd, rmid, info, EndPos))); /* 'block_id<3'seems to be enough for problem location! */ for (block_id = 0; block_id < t_thrd.xlog_cxt.max_registered_block_id && block_id < 3; block_id++) { registered_buffer *regbuf = &t_thrd.xlog_cxt.registered_buffers[block_id]; @@ -455,6 +466,19 @@ void XLogInsertTrace(RmgrId rmid, uint8 info, bool isupgrade, XLogRecPtr EndPos) } } +void XlogInsertSleep(void) +{ + if (g_instance.streaming_dr_cxt.rpoSleepTime > 0) { + pgstat_report_waitevent(WAIT_EVENT_LOGCTRL_SLEEP); + if (g_instance.streaming_dr_cxt.rpoSleepTime < MAX_RPO_SLEEP_TIME) { + pg_usleep(g_instance.streaming_dr_cxt.rpoSleepTime); + } else { + pg_usleep(MAX_RPO_SLEEP_TIME); + } + pgstat_report_waitevent(WAIT_EVENT_END); + } +} + /* * Insert an XLOG record having the specified RMID and info bytes, * with the body of the record being the data chunk(s) described by @@ -470,21 +494,24 @@ void XLogInsertTrace(RmgrId rmid, uint8 info, bool isupgrade, XLogRecPtr EndPos) * though not on the data they reference. This is OK since the XLogRecData * structs are always just temporaries in the calling code. */ -XLogRecPtr XLogInsert(RmgrId rmid, uint8 info, bool isupgrade, int bucket_id, bool isSwitchoverBarrier) +XLogRecPtr XLogInsert(RmgrId rmid, uint8 info, int bucket_id, bool istoast) { XLogRecPtr EndPos; - + bool isSwitchoverBarrier = ((rmid == RM_BARRIER_ID) && (info == XLOG_BARRIER_SWITCHOVER)); if (g_instance.archive_obs_cxt.in_switchover == true || g_instance.streaming_dr_cxt.isInSwitchover == true) { LWLockAcquire(HadrSwitchoverLock, LW_EXCLUSIVE); + if (isSwitchoverBarrier) + t_thrd.storage_cxt.isSwitchoverLockHolder = true; } - + + XlogInsertSleep(); + /* * The caller can set rmgr bits and XLR_SPECIAL_REL_UPDATE; the rest are * reserved for use by me. */ - if ((info & ~(XLR_RMGR_INFO_MASK | XLR_SPECIAL_REL_UPDATE | - XLR_BTREE_UPGRADE_FLAG | XLR_REL_COMPRESS | XLR_IS_TOAST)) != 0) { + if ((info & ~(XLR_RMGR_INFO_MASK | XLR_SPECIAL_REL_UPDATE | XLR_BTREE_UPGRADE_FLAG | XLR_IS_TOAST)) != 0) { ereport(PANIC, (errmsg("invalid xlog info mask %hhx", info))); } @@ -512,9 +539,9 @@ XLogRecPtr XLogInsert(RmgrId rmid, uint8 info, bool isupgrade, int bucket_id, bo */ GetFullPageWriteInfo(&fpw_info); - rdt = XLogRecordAssemble(rmid, info, fpw_info, &fpw_lsn, isupgrade, bucket_id); + rdt = XLogRecordAssemble(rmid, info, fpw_info, &fpw_lsn, bucket_id, istoast); - EndPos = XLogInsertRecord(rdt, fpw_lsn, isupgrade); + EndPos = XLogInsertRecord(rdt, fpw_lsn); } while (XLByteEQ(EndPos, InvalidXLogRecPtr)); /* @@ -522,7 +549,7 @@ XLogRecPtr XLogInsert(RmgrId rmid, uint8 info, bool isupgrade, int bucket_id, bo * when log level belows DEBUG4 */ if (module_logging_is_on(MOD_REDO)) { - XLogInsertTrace(rmid, info, isupgrade, EndPos); + XLogInsertTrace(rmid, info, EndPos); } /* @@ -531,14 +558,15 @@ XLogRecPtr XLogInsert(RmgrId rmid, uint8 info, bool isupgrade, int bucket_id, bo * backuped page if any. Caller must hold the buffer lock. */ XLogResetLogicalPage(); - - XLogResetInsertion(); /* Switchover Barrier log will not release the lock */ if ((g_instance.archive_obs_cxt.in_switchover == true || - g_instance.streaming_dr_cxt.isInSwitchover == true) - && !isSwitchoverBarrier) { - LWLockRelease(HadrSwitchoverLock); + g_instance.streaming_dr_cxt.isInSwitchover == true)) { + if (!isSwitchoverBarrier) { + LWLockRelease(HadrSwitchoverLock); + } else { + t_thrd.xlog_cxt.LocalXLogInsertAllowed = 0; + } } return EndPos; @@ -646,7 +674,7 @@ static bool XLogNeedVMPhysicalLocation(RmgrId rmi, uint8 info, int blockId) * */ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info, XLogFPWInfo fpw_info, XLogRecPtr *fpw_lsn, - bool isupgrade, int bucket_id) + int bucket_id, bool istoast) { XLogRecData *rdt = NULL; uint32 total_len = 0; @@ -667,7 +695,7 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info, XLogFPWInfo fpw_ * The record begins with the fixed-size header */ rechdr = (XLogRecord *)scratch; - scratch += (isupgrade ? SizeOfXLogRecordOld : SizeOfXLogRecord); + scratch += SizeOfXLogRecord; t_thrd.xlog_cxt.ptr_hdr_rdt->next = NULL; rdt_datas_last = t_thrd.xlog_cxt.ptr_hdr_rdt; @@ -689,12 +717,6 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info, XLogFPWInfo fpw_ bool samerel = false; bool tde = false; - // must be uncompressed table during upgrade - bool isCompressedTable = regbuf->rnode.opt != 0; - if (t_thrd.proc->workingVersionNum < PAGE_COMPRESSION_VERSION) { - Assert(!isCompressedTable); - } - if (!regbuf->in_use) continue; @@ -842,7 +864,7 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info, XLogFPWInfo fpw_ samerel = false; prev_regbuf = regbuf; - if (!samerel && (IsSegmentFileNode(regbuf->rnode) || isCompressedTable)) { + if (!samerel && IsSegmentFileNode(regbuf->rnode)) { Assert(bkpb.id <= XLR_MAX_BLOCK_ID); bkpb.id += BKID_HAS_BUCKET_OR_SEGPAGE; } @@ -861,16 +883,6 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info, XLogFPWInfo fpw_ if (IsSegmentFileNode(regbuf->rnode)) { XLOG_ASSEMBLE_ONE_ITEM(scratch, sizeof(RelFileNode), ®buf->rnode, remained_size); hashbucket_flag = true; - } else if (isCompressedTable) { - if (t_thrd.proc->workingVersionNum < PAGE_COMPRESSION_VERSION) { - Assert(!isCompressedTable); - RelFileNodeV2 relFileNodeV2; - RelFileNodeV2Copy(relFileNodeV2, regbuf->rnode); - XLOG_ASSEMBLE_ONE_ITEM(scratch, sizeof(RelFileNodeV2), ®buf->rnode, remained_size); - } else { - info |= XLR_REL_COMPRESS; - XLOG_ASSEMBLE_ONE_ITEM(scratch, sizeof(RelFileNode), ®buf->rnode, remained_size); - } } else { XLOG_ASSEMBLE_ONE_ITEM(scratch, sizeof(RelFileNodeOld), ®buf->rnode, remained_size); no_hashbucket_flag = true; @@ -908,11 +920,9 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info, XLogFPWInfo fpw_ XLOG_ASSEMBLE_ONE_ITEM(scratch, sizeof(XLogRecPtr), ®buf->lastLsn, remained_size); } -#ifndef ENABLE_MULTIPLE_NODES - int m_session_id = u_sess->reporigin_cxt.originId; -#else - int m_session_id = u_sess->attr.attr_storage.replorigin_sesssion_origin; -#endif + int m_session_id = u_sess->reporigin_cxt.originId != InvalidRepOriginId ? + u_sess->reporigin_cxt.originId : + u_sess->attr.attr_storage.replorigin_sesssion_origin; bool m_include_origin = t_thrd.xlog_cxt.include_origin; /* followed by the record's origin, if any */ @@ -923,10 +933,14 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info, XLogFPWInfo fpw_ } /* followed by the record's origin, if any */ - if (m_include_origin && m_session_id != InvalidRepOriginId) { + if (m_include_origin && (m_session_id != InvalidRepOriginId || istoast)) { Assert(remained_size > 0); *(scratch++) = XLR_BLOCK_ID_ORIGIN; remained_size--; + if (istoast && t_thrd.proc->workingVersionNum >= PARALLEL_DECODE_VERSION_NUM && XLogLogicalInfoActive()) { + const int toastFlag = 1 << 8; + m_session_id |= toastFlag; + } XLOG_ASSEMBLE_ONE_ITEM(scratch, sizeof(m_session_id), &m_session_id, remained_size); } @@ -969,21 +983,13 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info, XLogFPWInfo fpw_ * the whole record in the order: rdata, then backup blocks, then record * header. */ - if (isupgrade) { - /* using PG's CRC32 */ - INIT_CRC32(rdata_crc); - COMP_CRC32(rdata_crc, t_thrd.xlog_cxt.hdr_scratch + SizeOfXLogRecordOld, - t_thrd.xlog_cxt.ptr_hdr_rdt->len - SizeOfXLogRecordOld); - for (rdt = t_thrd.xlog_cxt.ptr_hdr_rdt->next; rdt != NULL; rdt = rdt->next) - COMP_CRC32(rdata_crc, rdt->data, rdt->len); - } else { - /* using CRC32C */ - INIT_CRC32C(rdata_crc); - COMP_CRC32C(rdata_crc, t_thrd.xlog_cxt.hdr_scratch + SizeOfXLogRecord, - t_thrd.xlog_cxt.ptr_hdr_rdt->len - SizeOfXLogRecord); - for (rdt = t_thrd.xlog_cxt.ptr_hdr_rdt->next; rdt != NULL; rdt = rdt->next) - COMP_CRC32C(rdata_crc, rdt->data, rdt->len); - } + + /* using CRC32C */ + INIT_CRC32C(rdata_crc); + COMP_CRC32C(rdata_crc, t_thrd.xlog_cxt.hdr_scratch + SizeOfXLogRecord, + t_thrd.xlog_cxt.ptr_hdr_rdt->len - SizeOfXLogRecord); + for (rdt = t_thrd.xlog_cxt.ptr_hdr_rdt->next; rdt != NULL; rdt = rdt->next) + COMP_CRC32C(rdata_crc, rdt->data, rdt->len); /* * Fill in the fields in the record header. Prev-link is filled in later, @@ -992,25 +998,25 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info, XLogFPWInfo fpw_ */ bool isUHeap = (rmid >= RM_UHEAP_ID) && (rmid <= RM_UHEAPUNDO_ID); - if (isupgrade) { - ((XLogRecordOld *)rechdr)->xl_xid = (ShortTransactionId)((isUHeap) ? - GetTopTransactionIdIfAny() : GetCurrentTransactionIdIfAny()); - ((XLogRecordOld *)rechdr)->xl_tot_len = total_len; - ((XLogRecordOld *)rechdr)->xl_info = info; - ((XLogRecordOld *)rechdr)->xl_rmid = rmid; - ((XLogRecordOld *)rechdr)->xl_prev = InvalidXLogRecPtrOld; - ((XLogRecordOld *)rechdr)->xl_crc = rdata_crc; - } else { - rechdr->xl_xid = (isUHeap) ? GetTopTransactionIdIfAny() : GetCurrentTransactionIdIfAny(); - rechdr->xl_tot_len = total_len; - rechdr->xl_info = info; - rechdr->xl_rmid = rmid; - rechdr->xl_prev = InvalidXLogRecPtr; - rechdr->xl_crc = rdata_crc; - } + rechdr->xl_xid = (isUHeap) ? GetTopTransactionIdIfAny() : GetCurrentTransactionIdIfAny(); + rechdr->xl_tot_len = total_len; + rechdr->xl_info = info; + rechdr->xl_rmid = rmid; + rechdr->xl_prev = InvalidXLogRecPtr; + rechdr->xl_crc = rdata_crc; Assert(hashbucket_flag == false || no_hashbucket_flag == false); rechdr->xl_term = Max(g_instance.comm_cxt.localinfo_cxt.term_from_file, g_instance.comm_cxt.localinfo_cxt.term_from_xlog); + if ((rechdr->xl_term & XLOG_CONTAIN_CSN) != 0) { + ereport(PANIC, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), + errmsg("Term has exceeded the limit %u", XLOG_MASK_TERM), + errdetail("The highest bit of term is occupied for logical decoding"), + errcause("Failover or switchover occurs too frequently"), + erraction("Make sure the cluster is under stable state."))); + } + if (t_thrd.proc->workingVersionNum >= PARALLEL_DECODE_VERSION_NUM && XLogLogicalInfoActive()) { + rechdr->xl_term |= XLOG_CONTAIN_CSN; + } rechdr->xl_bucket_id = (uint2)(bucket_id + 1); #ifdef DEBUG_UHEAP diff --git a/src/gausskernel/storage/access/transam/xlogreader.cpp b/src/gausskernel/storage/access/transam/xlogreader.cpp index c8a83e662..9226791b3 100644 --- a/src/gausskernel/storage/access/transam/xlogreader.cpp +++ b/src/gausskernel/storage/access/transam/xlogreader.cpp @@ -46,8 +46,8 @@ static THR_LOCAL int xlogreadfd = -1; static THR_LOCAL XLogSegNo xlogreadsegno = 0; #endif -bool ValidXLogPageHeader(XLogReaderState *state, XLogRecPtr recptr, XLogPageHeader hdr, bool readoldversion); -static int ReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int reqLen, bool readoldversion); +bool ValidXLogPageHeader(XLogReaderState *state, XLogRecPtr recptr, XLogPageHeader hdr); +static int ReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int reqLen, char* xlog_path); void ResetDecoder(XLogReaderState *state); static inline void prepare_invalid_report(XLogReaderState *state, char *fname, const size_t fname_len, @@ -225,8 +225,8 @@ bool allocate_recordbuf(XLogReaderState *state, uint32 reclength) * The returned pointer (or *errormsg) points to an internal buffer that's * valid until the next call to XLogReadRecord. */ -XLogRecord *XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg, bool readoldversion, - bool doDecode) +XLogRecord *XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg, + bool doDecode, char* xlog_path) { XLogRecord *record = NULL; XLogRecPtr targetPagePtr; @@ -249,18 +249,10 @@ XLogRecord *XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **err ResetDecoder(state); - if (((XLogPageHeader)state->readBuf)->xlp_magic == XLOG_PAGE_MAGIC) { - readoldversion = false; - } - if (XLByteEQ(RecPtr, InvalidXLogRecPtr)) { /* No explicit start point; read the record after the one we just read */ RecPtr = state->EndRecPtr; - if (readoldversion && (XLOG_BLCKSZ - RecPtr % XLOG_BLCKSZ) < SizeOfXLogRecordOld) { - NextLogPage(RecPtr); - } - if (XLByteEQ(state->ReadRecPtr, InvalidXLogRecPtr)) randAccess = true; @@ -293,17 +285,12 @@ XLogRecord *XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **err * enough byte to cover the whole record header, or at least the part of * it that fits on the same page. */ - readOff = ReadPageInternal(state, targetPagePtr, Min(targetRecOff + SizeOfXLogRecord, XLOG_BLCKSZ), readoldversion); + readOff = ReadPageInternal(state, targetPagePtr, Min(targetRecOff + SizeOfXLogRecord, XLOG_BLCKSZ), xlog_path); if (readOff < 0) { report_invalid_record(state, "read xlog page failed at %X/%X", (uint32)(RecPtr >> 32), (uint32)RecPtr); goto err; } - /* The page has been read. Check the XLOG version again. */ - if (((XLogPageHeader)state->readBuf)->xlp_magic == XLOG_PAGE_MAGIC) { - readoldversion = false; - } - /* * ReadPageInternal always returns at least the page header, so we can * examine it now. @@ -348,15 +335,15 @@ XLogRecord *XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **err * record" code path below; otherwise we might fail to apply * static ValidXLogRecordHeader at all. */ - if (targetRecOff <= XLOG_BLCKSZ - (readoldversion ? SizeOfXLogRecordOld : SizeOfXLogRecord)) { + if (targetRecOff <= XLOG_BLCKSZ - SizeOfXLogRecord) { if (!ValidXLogRecordHeader(state, RecPtr, state->ReadRecPtr, record, randAccess)) goto err; gotheader = true; } else { /* more validation should be done here */ - if (total_len < (readoldversion ? SizeOfXLogRecordOld : SizeOfXLogRecord) || total_len >= XLogRecordMaxSize) { + if (total_len < SizeOfXLogRecord || total_len >= XLogRecordMaxSize) { report_invalid_record(state, "invalid record length at %X/%X: wanted %u, got %u", (uint32)(RecPtr >> 32), - (uint32)RecPtr, (uint32)(readoldversion ? SizeOfXLogRecordOld : SizeOfXLogRecord), + (uint32)RecPtr, (uint32)(SizeOfXLogRecord), total_len); goto err; } @@ -393,8 +380,7 @@ XLogRecord *XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **err XLByteAdvance(targetPagePtr, XLOG_BLCKSZ); /* Wait for the next page to become available */ - readOff = ReadPageInternal(state, targetPagePtr, Min(total_len - gotlen + SizeOfXLogShortPHD, XLOG_BLCKSZ), - readoldversion); + readOff = ReadPageInternal(state, targetPagePtr, Min(total_len - gotlen + SizeOfXLogShortPHD, XLOG_BLCKSZ), xlog_path); if (readOff < 0) goto err; @@ -422,7 +408,7 @@ XLogRecord *XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **err pageHeaderSize = XLogPageHeaderSize(pageHeader); if (readOff < (int)pageHeaderSize) - readOff = ReadPageInternal(state, targetPagePtr, pageHeaderSize, readoldversion); + readOff = ReadPageInternal(state, targetPagePtr, pageHeaderSize, xlog_path); Assert((int)pageHeaderSize <= readOff); @@ -432,7 +418,7 @@ XLogRecord *XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **err len = pageHeader->xlp_rem_len; if (readOff < (int)(pageHeaderSize + len)) - readOff = ReadPageInternal(state, targetPagePtr, pageHeaderSize + len, readoldversion); + readOff = ReadPageInternal(state, targetPagePtr, pageHeaderSize + len, xlog_path); errorno = memcpy_s(buffer, total_len - gotlen, (char *)contdata, len); securec_check_c(errorno, "", ""); @@ -460,7 +446,7 @@ XLogRecord *XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **err XLByteAdvance(state->EndRecPtr, (pageHeaderSize + MAXALIGN(pageHeader->xlp_rem_len))); } else { /* Wait for the record data to become available */ - readOff = ReadPageInternal(state, targetPagePtr, Min(targetRecOff + total_len, XLOG_BLCKSZ), readoldversion); + readOff = ReadPageInternal(state, targetPagePtr, Min(targetRecOff + total_len, XLOG_BLCKSZ), xlog_path); if (readOff < 0) { goto err; } @@ -482,20 +468,14 @@ XLogRecord *XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **err /* * Special processing if it's an XLOG SWITCH record */ - - if (((XLogPageHeader)state->readBuf)->xlp_magic == XLOG_PAGE_MAGIC) { - readoldversion = false; - } - - if ((readoldversion ? ((XLogRecordOld *)record)->xl_rmid : ((XLogRecord *)record)->xl_rmid) == RM_XLOG_ID && - (readoldversion ? ((XLogRecordOld *)record)->xl_info : ((XLogRecord *)record)->xl_info) == XLOG_SWITCH) { + if (((XLogRecord *)record)->xl_rmid == RM_XLOG_ID && ((XLogRecord *)record)->xl_info == XLOG_SWITCH) { /* Pretend it extends to end of segment */ state->EndRecPtr += XLogSegSize - 1; state->EndRecPtr -= state->EndRecPtr % XLogSegSize; } if (doDecode) { - if (DecodeXLogRecord(state, record, errormsg, readoldversion)) { + if (DecodeXLogRecord(state, record, errormsg)) { return record; } else { return NULL; @@ -517,205 +497,6 @@ err: return NULL; } -/* - * Avoid the record provided for XlogReadRecord is not a valid record, - * maybe necessary to skip the LogLongPHD in startpoint of segment - * or startpoint of page - */ -void AlignXlogPtrToNextPageIfNeeded(XLogRecPtr *recPtr) -{ - if (((*recPtr) % XLogSegSize) < SizeOfXLogLongPHD) { - *recPtr -= ((*recPtr) % XLogSegSize); - XLByteAdvance((*recPtr), SizeOfXLogLongPHD); - } else if (((*recPtr) % XLOG_BLCKSZ) < SizeOfXLogShortPHD) { - *recPtr -= ((*recPtr) % XLOG_BLCKSZ); - XLByteAdvance((*recPtr), SizeOfXLogShortPHD); - } -} - -/* - * Ref XLogFindNextRecord() to find the first record with at an lsn >= RecPtr. - * - * Useful for checking wether RecPtr is a valid xlog address for reading and to - * find the first valid address after some address when dumping records for - * debugging purposes. - */ -bool ValidateNextXLogRecordPtr(XLogReaderState *state, XLogRecPtr &cur_ptr, char **err_msg) -{ - if (state == NULL) { - Assert(false); - return false; - } - - XLogPageHeader header = NULL; - bool has_err = false; - XLogReaderState saved_state = *state; - XLogRecPtr RecPtr = cur_ptr; - XLogRecPtr targetPagePtr = InvalidXLogRecPtr; - XLogRecPtr err_ptr = InvalidXLogRecPtr; - int targetRecOff = 0; - uint32 pageHeaderSize = 0; - int read_len = 0; - int err_req_len = 0; - bool isOldFormat = false; - char *errormsg = NULL; - - /* reset error state */ - *err_msg = NULL; - state->errormsg_buf[0] = '\0'; - - /* - * Here MUST be adapted for 64 bit xid: - * Read the xlog page including cur_ptr and check the page header - * to ensure whether this xlog is old format or new format supporting - * the 64 bit xlog or not. - */ - targetRecOff = cur_ptr % XLOG_BLCKSZ; - - if (targetRecOff > 0) { - /* scroll back to page boundary */ - targetPagePtr = cur_ptr - targetRecOff; - - /* Read the page containing the record */ - read_len = ReadPageInternal(state, targetPagePtr, targetRecOff, true); - if (read_len < 0) { - err_ptr = targetPagePtr; - err_req_len = targetRecOff; - has_err = true; - goto err; - } - - header = (XLogPageHeader)state->readBuf; - - pageHeaderSize = XLogPageHeaderSize(header); - - /* make sure we have enough data for the page header */ - read_len = ReadPageInternal(state, targetPagePtr, pageHeaderSize, true); - if (read_len < 0) { - err_ptr = targetPagePtr; - err_req_len = (int)pageHeaderSize; - has_err = true; - goto err; - } - - isOldFormat = (header->xlp_magic == XLOG_PAGE_MAGIC_OLD); - } - - /* Ref findLastCheckpoint() and XLogReadRecord() to find the first valid xlog record following the start_ptr pos. */ - if (isOldFormat && (XLOG_BLCKSZ - cur_ptr % XLOG_BLCKSZ) < SizeOfXLogRecordOld) - NextLogPage(cur_ptr); - - /* - * If at page start, we must skip over the page header using xrecoff check. - */ - if (cur_ptr % XLogSegSize == 0) - XLByteAdvance(cur_ptr, SizeOfXLogLongPHD); - else if (cur_ptr % XLOG_BLCKSZ == 0) - XLByteAdvance(cur_ptr, SizeOfXLogShortPHD); - - /* - * skip over potential continuation data, keeping in mind that it may span - * multiple pages - */ - while (true) { - /* - * Compute targetRecOff. It should typically be equal or greater than - * short page-header since a valid record can't start anywhere before - * that, except when caller has explicitly specified the offset that - * falls somewhere there or when we are skipping multi-page - * continuation record. It doesn't matter though because - * ReadPageInternal() is prepared to handle that and will read at least - * short page-header worth of data - */ - targetRecOff = cur_ptr % XLOG_BLCKSZ; - - /* scroll back to page boundary */ - targetPagePtr = cur_ptr - targetRecOff; - - /* Read the page containing the record */ - read_len = ReadPageInternal(state, targetPagePtr, targetRecOff, true); - if (read_len < 0) { - err_ptr = targetPagePtr; - err_req_len = targetRecOff; - has_err = true; - goto err; - } - - header = (XLogPageHeader)state->readBuf; - - pageHeaderSize = XLogPageHeaderSize(header); - - /* make sure we have enough data for the page header */ - read_len = ReadPageInternal(state, targetPagePtr, pageHeaderSize, true); - if (read_len < 0) { - err_ptr = targetPagePtr; - err_req_len = (int)pageHeaderSize; - has_err = true; - goto err; - } - - /* skip over potential continuation data */ - if (header->xlp_info & XLP_FIRST_IS_CONTRECORD) { - /* - * If the length of the remaining continuation data is more than - * what can fit in this page, the continuation record crosses over - * this page. Read the next page and try again. xlp_rem_len in the - * next page header will contain the remaining length of the - * continuation data - * - * Note that record headers are MAXALIGN'ed - */ - if (MAXALIGN(header->xlp_rem_len) >= (XLOG_BLCKSZ - pageHeaderSize)) { - cur_ptr = targetPagePtr; - XLByteAdvance(cur_ptr, XLOG_BLCKSZ); - } else { - /* - * The previous continuation record ends in this page. Set - * tmpRecPtr to point to the first valid record - */ - cur_ptr = targetPagePtr; - XLByteAdvance(cur_ptr, pageHeaderSize + MAXALIGN(header->xlp_rem_len)); - break; - } - } else { - cur_ptr = targetPagePtr; - XLByteAdvance(cur_ptr, pageHeaderSize); - break; - } - } - /* - * we know now that cur_ptr is an address pointing to a valid XLogRecord - * because either we're at the first record after the beginning of a page - * or we just jumped over the remaining data of a continuation. - */ - while (XLogReadRecord(state, cur_ptr, &errormsg, true) != NULL) { - /* continue after the record */ - cur_ptr = InvalidXLogRecPtr; - - /* past the record we've found, break out */ - if (XLByteLE(RecPtr, state->ReadRecPtr)) { - cur_ptr = state->ReadRecPtr; - goto out; - } - } -err: - if (has_err) { - report_invalid_record(state, "Failed to read the xlog record info %d bytes at %X/%X.", err_req_len, - (uint32)(err_ptr >> 32), (uint32)err_ptr); - - *err_msg = state->errormsg_buf; - - return false; - } -out: - /* Reset state to what we had before finding the record */ - state->ReadRecPtr = saved_state.ReadRecPtr; - state->EndRecPtr = saved_state.EndRecPtr; - XLogReaderInvalReadState(state); - - return (cur_ptr != InvalidXLogRecPtr); -} - /* * Read a single xlog page including at least [pageptr, reqLen] of valid data * via the read_page() callback. @@ -726,7 +507,7 @@ out: * We fetch the page from a reader-local cache if we know we have the required * data and if there hasn't been any error since caching the data. */ -static int ReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int reqLen, bool readoldversion) +static int ReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int reqLen, char* xlog_path) { int readLen; uint32 targetPageOff; @@ -748,7 +529,7 @@ static int ReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int reqL * so that we can validate it. */ readLen = state->read_page(state, pageptr, Max(reqLen, (int)SizeOfXLogShortPHD), state->currRecPtr, state->readBuf, - &state->readPageTLI); + &state->readPageTLI, xlog_path); if (readLen < 0) { goto err; } @@ -764,14 +545,10 @@ static int ReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int reqL hdr = (XLogPageHeader)state->readBuf; - if (hdr->xlp_magic == XLOG_PAGE_MAGIC) { - readoldversion = false; - } - /* still not enough */ if (readLen < (int)XLogPageHeaderSize(hdr)) { readLen = state->read_page(state, pageptr, XLogPageHeaderSize(hdr), state->currRecPtr, state->readBuf, - &state->readPageTLI); + &state->readPageTLI, xlog_path); if (readLen < 0) { goto err; } @@ -780,7 +557,7 @@ static int ReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int reqL /* * Now that we know we have the full header, validate it. */ - if (!ValidXLogPageHeader(state, pageptr, hdr, readoldversion)) { + if (!ValidXLogPageHeader(state, pageptr, hdr)) { goto err; } @@ -816,35 +593,20 @@ void XLogReaderInvalReadState(XLogReaderState *state) bool ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr, XLogRecPtr PrevRecPtr, XLogRecord *record, bool randAccess) { - bool readoldversion = true; XLogRecPtr xl_prev; - if (((XLogPageHeader)state->readBuf)->xlp_magic == XLOG_PAGE_MAGIC) { - readoldversion = false; - } - - if ((readoldversion ? ((XLogRecordOld *)record)->xl_tot_len : ((XLogRecord *)record)->xl_tot_len) < - (readoldversion ? SizeOfXLogRecordOld : SizeOfXLogRecord) || - (readoldversion ? ((XLogRecordOld *)record)->xl_tot_len : ((XLogRecord *)record)->xl_tot_len) >= - XLogRecordMaxSize) { + if (record->xl_tot_len < SizeOfXLogRecord || record->xl_tot_len >= XLogRecordMaxSize) { report_invalid_record(state, "invalid record length at %X/%X: wanted %u, got %u", (uint32)(RecPtr >> 32), - (uint32)RecPtr, (uint32)(readoldversion ? SizeOfXLogRecordOld : SizeOfXLogRecord), - (readoldversion ? ((XLogRecordOld *)record)->xl_tot_len - : ((XLogRecord *)record)->xl_tot_len)); + (uint32)RecPtr, (uint32)SizeOfXLogRecord, record->xl_tot_len); return false; } - if ((readoldversion ? ((XLogRecordOld *)record)->xl_rmid : ((XLogRecord *)record)->xl_rmid) > RM_MAX_ID) { - report_invalid_record(state, "invalid resource manager ID %u at %X/%X", - (readoldversion ? ((XLogRecordOld *)record)->xl_rmid : ((XLogRecord *)record)->xl_rmid), + if (record->xl_rmid > RM_MAX_ID) { + report_invalid_record(state, "invalid resource manager ID %u at %X/%X", record->xl_rmid, (uint32)(RecPtr >> 32), (uint32)RecPtr); return false; } - if (readoldversion) { - xl_prev = XLogRecPtrSwap(((XLogRecordOld *)record)->xl_prev); - } else { - xl_prev = record->xl_prev; - } + xl_prev = record->xl_prev; if (randAccess) { /* * We can't exactly verify the prev-link, but surely it should be less @@ -883,60 +645,37 @@ bool ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr, XLogRecPtr */ bool ValidXLogRecord(XLogReaderState *state, XLogRecord *record, XLogRecPtr recptr) { - bool readoldversion = true; pg_crc32c crc; /* pg_crc32c is same as pg_crc32 */ - if (((XLogPageHeader)state->readBuf)->xlp_magic == XLOG_PAGE_MAGIC) { - readoldversion = false; - } + /* using CRC32C since V1R8C10 */ + INIT_CRC32C(crc); + COMP_CRC32C(crc, ((char *)record) + SizeOfXLogRecord, record->xl_tot_len - SizeOfXLogRecord); + /* include the record header last */ + COMP_CRC32C(crc, (char *)record, offsetof(XLogRecord, xl_crc)); + FIN_CRC32C(crc); - if (readoldversion && ((XLogPageHeader)state->readBuf)->xlp_magic == XLOG_PAGE_MAGIC_OLD) { - /* using PG's CRC32 before V1R8C10 */ - INIT_CRC32(crc); - COMP_CRC32(crc, ((char *)record) + SizeOfXLogRecordOld, - ((XLogRecordOld *)record)->xl_tot_len - SizeOfXLogRecordOld); - - /* include the record header last */ - COMP_CRC32(crc, (char *)record, offsetof(XLogRecordOld, xl_crc)); - FIN_CRC32(crc); - - if (!EQ_CRC32(((XLogRecordOld *)record)->xl_crc, crc)) { - report_invalid_record(state, "incorrect resource manager data checksum in old version record at %X/%X", - (uint32)(recptr >> 32), (uint32)recptr); - return false; - } - return true; - } else { - /* using CRC32C since V1R8C10 */ - INIT_CRC32C(crc); - COMP_CRC32C(crc, ((char *)record) + SizeOfXLogRecord, record->xl_tot_len - SizeOfXLogRecord); - /* include the record header last */ - COMP_CRC32C(crc, (char *)record, offsetof(XLogRecord, xl_crc)); - FIN_CRC32C(crc); - - if (!EQ_CRC32C(record->xl_crc, crc)) { + if (!EQ_CRC32C(record->xl_crc, crc)) { #ifdef FRONTEND - report_invalid_record(state, "incorrect resource manager data checksum in record at %X/%X", - (uint32)(recptr >> 32), (uint32)recptr); + report_invalid_record(state, "incorrect resource manager data checksum in record at %X/%X", + (uint32)(recptr >> 32), (uint32)recptr); #else - report_invalid_record(state, - "incorrect resource manager data checksum in record at %X/%X, " - "record info: xl_info=%d, xl_prev=%X/%X, xl_rmid=%d, xl_tot_len=%u, xl_xid= %lu", - (uint32)(recptr >> 32), (uint32)recptr, record->xl_info, - (uint32)(record->xl_prev >> 32), (uint32)(record->xl_prev), record->xl_rmid, - record->xl_tot_len, record->xl_xid); + report_invalid_record(state, + "incorrect resource manager data checksum in record at %X/%X, " + "record info: xl_info=%d, xl_prev=%X/%X, xl_rmid=%d, xl_tot_len=%u, xl_xid= %lu", + (uint32)(recptr >> 32), (uint32)recptr, record->xl_info, + (uint32)(record->xl_prev >> 32), (uint32)(record->xl_prev), record->xl_rmid, + record->xl_tot_len, record->xl_xid); #endif - return false; - } - - return true; + return false; } + + return true; } /* * Validate a page header */ -bool ValidXLogPageHeader(XLogReaderState *state, XLogRecPtr recptr, XLogPageHeader hdr, bool readoldversion) +bool ValidXLogPageHeader(XLogReaderState *state, XLogRecPtr recptr, XLogPageHeader hdr) { XLogRecPtr recaddr; XLogSegNo segno; @@ -953,17 +692,9 @@ bool ValidXLogPageHeader(XLogReaderState *state, XLogRecPtr recptr, XLogPageHead if (hdr->xlp_magic != XLOG_PAGE_MAGIC) { prepare_invalid_report(state, fname, MAXFNAMELEN, segno); - - if (hdr->xlp_magic == XLOG_PAGE_MAGIC_OLD && !readoldversion) { - report_invalid_record(state, "read old version XLog, magic number %04X in log segment %s, offset %u", - hdr->xlp_magic, fname, offset); - return false; - } - if (hdr->xlp_magic != XLOG_PAGE_MAGIC_OLD) { - report_invalid_record(state, "invalid magic number %04X in log segment %s, offset %u", hdr->xlp_magic, - fname, offset); - return false; - } + report_invalid_record(state, "invalid magic number %04X in log segment %s, offset %u", hdr->xlp_magic, + fname, offset); + return false; } if ((hdr->xlp_info & ~XLP_ALL_FLAGS) != 0) { @@ -1025,11 +756,8 @@ bool ValidXLogPageHeader(XLogReaderState *state, XLogRecPtr recptr, XLogPageHead return false; } - if (readoldversion) { - xlp_pageaddr = (((hdr->xlp_pageaddr) >> 32) | ((hdr->xlp_pageaddr) << 32)); - } else { - xlp_pageaddr = hdr->xlp_pageaddr; - } + + xlp_pageaddr = hdr->xlp_pageaddr; if (!XLByteEQ(xlp_pageaddr, recaddr)) { prepare_invalid_report(state, fname, MAXFNAMELEN, segno); @@ -1074,7 +802,7 @@ bool ValidXLogPageHeader(XLogReaderState *state, XLogRecPtr recptr, XLogPageHead * find the first valid address after some address when dumping records for * debugging purposes. */ -XLogRecPtr XLogFindNextRecord(XLogReaderState *state, XLogRecPtr RecPtr, XLogRecPtr *endPtr) +XLogRecPtr XLogFindNextRecord(XLogReaderState *state, XLogRecPtr RecPtr, XLogRecPtr *endPtr, char *xlog_path) { XLogReaderState saved_state = *state; XLogRecPtr tmpRecPtr; @@ -1110,7 +838,7 @@ XLogRecPtr XLogFindNextRecord(XLogReaderState *state, XLogRecPtr RecPtr, XLogRec targetPagePtr = tmpRecPtr - targetRecOff; /* Read the page containing the record */ - readLen = ReadPageInternal(state, targetPagePtr, Max(targetRecOff, (int)SizeOfXLogLongPHD), true); + readLen = ReadPageInternal(state, targetPagePtr, Max(targetRecOff, (int)SizeOfXLogLongPHD), xlog_path); if (readLen < 0) { goto out; } @@ -1120,7 +848,7 @@ XLogRecPtr XLogFindNextRecord(XLogReaderState *state, XLogRecPtr RecPtr, XLogRec pageHeaderSize = XLogPageHeaderSize(header); /* make sure we have enough data for the page header */ - readLen = ReadPageInternal(state, targetPagePtr, pageHeaderSize, true); + readLen = ReadPageInternal(state, targetPagePtr, pageHeaderSize, xlog_path); if (readLen < 0) { goto out; } @@ -1159,7 +887,7 @@ XLogRecPtr XLogFindNextRecord(XLogReaderState *state, XLogRecPtr RecPtr, XLogRec * because either we're at the first record after the beginning of a page * or we just jumped over the remaining data of a continuation. */ - while (XLogReadRecord(state, tmpRecPtr, &errormsg, true) != NULL) { + while (XLogReadRecord(state, tmpRecPtr, &errormsg, true, xlog_path) != NULL) { /* continue after the record */ tmpRecPtr = InvalidXLogRecPtr; @@ -1200,15 +928,13 @@ void ResetDecoder(XLogReaderState *state) state->blocks[block_id].has_image = false; state->blocks[block_id].has_data = false; state->blocks[block_id].data_len = 0; - if (state->blocks[block_id].tdeinfo != NULL) { - pfree(state->blocks[block_id].tdeinfo); - state->blocks[block_id].tdeinfo = NULL; - } + state->blocks[block_id].tdeinfo = NULL; #ifdef USE_ASSERT_CHECKING state->blocks[block_id].replayed = 0; #endif } state->max_block_id = -1; + state->readblocks = 0; } /* @@ -1223,25 +949,13 @@ void ResetDecoder(XLogReaderState *state) remaining -= sizeof(type); \ } while (0) -/** - * happens during the upgrade, copy the RelFileNodeV2 to RelFileNode - * support little-endian system - * @param relfileNode relfileNode - */ -static void CompressTableRecord(RelFileNode* relfileNode) -{ - if (relfileNode->bucketNode <= -1 && relfileNode->opt == 0xFFFF) { - relfileNode->opt = 0; - } -} - /* * Decode the previously read record. * * On error, a human-readable error message is returned in *errormsg, and * the return value is false. */ -bool DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg, bool readoldversion) +bool DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg) { char *ptr = NULL; uint32 remaining; @@ -1255,9 +969,8 @@ bool DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errorms state->record_origin = InvalidRepOriginId; ptr = (char *)record; - ptr += readoldversion ? SizeOfXLogRecordOld : SizeOfXLogRecord; - remaining = readoldversion ? ((XLogRecordOld *)record)->xl_tot_len - SizeOfXLogRecordOld - : record->xl_tot_len - SizeOfXLogRecord; + ptr += SizeOfXLogRecord; + remaining = record->xl_tot_len - SizeOfXLogRecord; /* Decode the headers */ datatotal = 0; @@ -1354,21 +1067,15 @@ bool DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errorms if (remaining < filenodelen) goto shortdata_err; blk->rnode.bucketNode = InvalidBktId; - blk->rnode.opt = 0; errno_t rc = memcpy_s(&blk->rnode, filenodelen, ptr, filenodelen); securec_check(rc, "\0", "\0"); - /* support decode old version of relfileNode */ - CompressTableRecord(&blk->rnode); ptr += filenodelen; remaining -= filenodelen; if (state->isTde) { - blk->tdeinfo = (TdeInfo*)palloc_extended(sizeof(TdeInfo), MCXT_ALLOC_NO_OOM); - if (blk->tdeinfo == NULL) { - report_invalid_record(state, "Failed to allocate memory for blk->tdeinfo"); - goto err; - } - DECODE_XLOG_ONE_ITEM(*(blk->tdeinfo), TdeInfo); + blk->tdeinfo = (TdeInfo*)ptr; + ptr += sizeof(TdeInfo); + remaining -= sizeof(TdeInfo); } DECODE_XLOG_ONE_ITEM(blk->extra_flag, uint16); @@ -1503,48 +1210,9 @@ char *XLogRecGetBlockImage(XLogReaderState *record, uint8 block_id, uint16 *hole return bkpb->bkp_image; } -/* - * Restore a full-page image from a backup block attached to an XLOG record. - * - * Returns the buffer number containing the page. - */ -bool RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page) -{ - DecodedBkpBlock *bkpb = NULL; - errno_t rc = EOK; - - if (!record->blocks[block_id].in_use) - return false; - if (!record->blocks[block_id].has_image) - return false; - - bkpb = &record->blocks[block_id]; - - if (bkpb->hole_length == 0) { - rc = memcpy_s(page, BLCKSZ, bkpb->bkp_image, BLCKSZ); - securec_check(rc, "", ""); - } else { - rc = memcpy_s(page, BLCKSZ, bkpb->bkp_image, bkpb->hole_offset); - securec_check(rc, "", ""); - /* must zero-fill the hole */ - rc = memset_s(page + bkpb->hole_offset, BLCKSZ - bkpb->hole_offset, 0, bkpb->hole_length); - securec_check(rc, "", ""); - - Assert(bkpb->hole_offset + bkpb->hole_length <= BLCKSZ); - if (bkpb->hole_offset + bkpb->hole_length == BLCKSZ) - return true; - - rc = memcpy_s(page + (bkpb->hole_offset + bkpb->hole_length), BLCKSZ - (bkpb->hole_offset + bkpb->hole_length), - bkpb->bkp_image + bkpb->hole_offset, BLCKSZ - (bkpb->hole_offset + bkpb->hole_length)); - securec_check(rc, "", ""); - } - - return true; -} - /* XLogreader callback function, to read a WAL page */ int SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, - char *readBuf, TimeLineID *pageTLI) + char *readBuf, TimeLineID *pageTLI, char* xlog_path) { XLogPageReadPrivate *readprivate = (XLogPageReadPrivate *)xlogreader->private_data; uint32 targetPageOff; diff --git a/src/gausskernel/storage/access/transam/xlogutils.cpp b/src/gausskernel/storage/access/transam/xlogutils.cpp index 28cba03ec..c6ca01577 100644 --- a/src/gausskernel/storage/access/transam/xlogutils.cpp +++ b/src/gausskernel/storage/access/transam/xlogutils.cpp @@ -29,6 +29,7 @@ #include "access/xlogproc.h" #include "access/multi_redo_api.h" #include "access/parallel_recovery/dispatcher.h" +#include "access/extreme_rto/page_redo.h" #include "catalog/catalog.h" #include "catalog/storage_xlog.h" #include "miscadmin.h" @@ -43,8 +44,8 @@ #include "utils/hsearch.h" #include "utils/rel.h" #include "utils/rel_gs.h" - #include "commands/dbcommands.h" +#include "postmaster/pagerepair.h" /* * During XLOG replay, we may see XLOG records for incremental updates of @@ -86,6 +87,11 @@ static void report_invalid_page(int elevel, xl_invalid_page *invalid_page) ereport(elevel, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("page %u (lsn: %lx) of relation %s (%u/%u) lsn check error", invalid_page->key.blkno, invalid_page->lsn, path, invalid_page->pblk.relNode, invalid_page->pblk.block))); + } else if (invalid_page->type == CRC_CHECK_ERROR) { + ereport(elevel, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("page %u of relation %s (%u/%u) crc check error", invalid_page->key.blkno, + path, invalid_page->pblk.relNode, invalid_page->pblk.block))); } else if (invalid_page->type == SEGPAGE_LSN_CHECK_ERROR) { ereport(elevel, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), @@ -196,6 +202,43 @@ void log_invalid_page(const RelFileNode &node, ForkNumber forkno, BlockNumber bl } } +static bool specified_invalid_page_match(xl_invalid_page *entry, RepairBlockKey key) +{ + if (RelFileNodeEquals(entry->key.node, key.relfilenode) && entry->key.forkno == key.forknum && + entry->key.blkno == key.blocknum) { + return true; + } + return false; +} + +void forget_specified_invalid_pages(RepairBlockKey key) +{ + HASH_SEQ_STATUS status; + xl_invalid_page *hentry = NULL; + + if (t_thrd.xlog_cxt.invalid_page_tab == NULL) + return; /* nothing to do */ + + MemoryContext oldCtx = NULL; + if (IsMultiThreadRedoRunning()) { + oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.predo_cxt.parallelRedoCtx); + } + + hash_seq_init(&status, t_thrd.xlog_cxt.invalid_page_tab); + + while ((hentry = (xl_invalid_page *)hash_seq_search(&status)) != NULL) { + if (specified_invalid_page_match(hentry, key)) { + if (hash_search(t_thrd.xlog_cxt.invalid_page_tab, (void *)&hentry->key, HASH_REMOVE, NULL) == NULL) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("hash table corrupted"))); + } + } + } + + if (IsMultiThreadRedoRunning()) { + (void)MemoryContextSwitchTo(oldCtx); + } +} + static bool single_invalid_page_match(xl_invalid_page *invalid_page, const RelFileNode &node, ForkNumber forkno, BlockNumber minblkno, bool segment_shrink) { @@ -211,6 +254,7 @@ static bool single_invalid_page_match(xl_invalid_page *invalid_page, const RelFi } } + /* * Forget any invalid pages >= minblkno, because they've been dropped * @@ -251,6 +295,71 @@ static void forget_invalid_pages(const RelFileNode &node, ForkNumber forkno, Blo } } +static bool range_invalid_page_match(xl_invalid_page *invalid_page, const RelFileNode &node, + ForkNumber forkno, BlockNumber minblkno, BlockNumber maxblkno) +{ + bool node_equal = false; + + if (invalid_page->pblk.relNode != EXTENT_INVALID) { + /* repair file, need compare the pblk info */ + RelFileNode rnode = invalid_page->key.node; + rnode.relNode = invalid_page->pblk.relNode; + + node_equal = RelFileNodeRelEquals(node, rnode); + return node_equal && invalid_page->key.forkno == forkno && invalid_page->pblk.block >= minblkno && + invalid_page->pblk.block <= maxblkno; + } else { + node_equal = RelFileNodeRelEquals(node, invalid_page->key.node); + + return node_equal && invalid_page->key.forkno == forkno && invalid_page->key.blkno >= minblkno && + invalid_page->key.blkno <= maxblkno; + } +} + +void forget_range_invalid_pages(void *pageinfo) +{ + HASH_SEQ_STATUS status; + xl_invalid_page *hentry = NULL; + RepairFileKey *key = NULL; + RelFileNode node; + ForkNumber forknum; + BlockNumber minblkno; + BlockNumber maxblkno; + + if (t_thrd.xlog_cxt.invalid_page_tab == NULL) + return; /* nothing to do */ + + key = (RepairFileKey*)pageinfo; + node = key->relfilenode; + forknum = key->forknum; + minblkno = key->segno * RELSEG_SIZE; + maxblkno = (key->segno + 1) * RELSEG_SIZE - 1; + + MemoryContext oldCtx = NULL; + if (IsMultiThreadRedoRunning()) { + oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.predo_cxt.parallelRedoCtx); + } + + hash_seq_init(&status, t_thrd.xlog_cxt.invalid_page_tab); + + while ((hentry = (xl_invalid_page *)hash_seq_search(&status)) != NULL) { + if (range_invalid_page_match(hentry, node, forknum, minblkno, maxblkno)) { + char *path = relpathperm(hentry->key.node, forknum); + ereport(DEBUG1, (errmodule(MOD_REDO), + errmsg("[file repair] file %s seg %u rename finish, clean invalid page, minblkno is %u, maxblkno is %u", + path, key->segno, minblkno, maxblkno))); + pfree(path); + + if (hash_search(t_thrd.xlog_cxt.invalid_page_tab, (void *)&hentry->key, HASH_REMOVE, NULL) == NULL) + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("hash table corrupted"))); + } + } + + if (IsMultiThreadRedoRunning()) { + (void)MemoryContextSwitchTo(oldCtx); + } +} + static inline bool invalid_page_match(RelFileNode *rnode, Oid spcNode, Oid dbNode) { if (OidIsValid(spcNode) && rnode->spcNode != spcNode) { @@ -574,7 +683,8 @@ XLogRedoAction XLogReadBufferForRedoBlockExtend(RedoBufferTag *redoblock, ReadBu * be skipped, as later commit xlog will remove the invalid page */ /* If lsn check fails, return invalidate buffer */ - if (!DoLsnCheck(redobufferinfo, willinit, last_lsn, pblk)) { + bool needRepair = false; /* Cannot determine whether the segment page can be repaired. */ + if (!DoLsnCheck(redobufferinfo, willinit, last_lsn, pblk, &needRepair)) { redobufferinfo->buf = InvalidBuffer; redobufferinfo->pageinfo = {0}; UnlockReleaseBuffer(buf); @@ -589,11 +699,23 @@ XLogRedoAction XLogReadBufferForRedoBlockExtend(RedoBufferTag *redoblock, ReadBu return BLK_NOTFOUND; } +void checkBlockFlag(ReadBufferMode mode, bool willinit) +{ + bool zeromode = false; + + zeromode = (mode == RBM_ZERO || mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK); + if (willinit && !zeromode) + ereport(PANIC, (errmsg("block with WILL_INIT flag in WAL record must be zeroed by redo routine"))); + if (!willinit && zeromode) + ereport(PANIC, + (errmsg( + "block to be initialized in redo routine must be marked with WILL_INIT flag in the WAL record"))); +} + XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, RedoBufferInfo *bufferinfo, ReadBufferMethod readmethod) { - bool zeromode = false; bool willinit = false; RedoBufferTag blockinfo; XLogRedoAction redoaction; @@ -610,14 +732,8 @@ XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 bloc * Make sure that if the block is marked with WILL_INIT, the caller is * going to initialize it. And vice versa. */ - zeromode = (mode == RBM_ZERO || mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK); willinit = (record->blocks[block_id].flags & BKPBLOCK_WILL_INIT) != 0; - if (willinit && !zeromode) - ereport(PANIC, (errmsg("block with WILL_INIT flag in WAL record must be zeroed by redo routine"))); - if (!willinit && zeromode) - ereport(PANIC, - (errmsg( - "block to be initialized in redo routine must be marked with WILL_INIT flag in the WAL record"))); + checkBlockFlag(mode, willinit); xloghasblockimage = XLogRecHasBlockImage(record, block_id); if (xloghasblockimage) { @@ -626,7 +742,6 @@ XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 bloc if (record->isTde) { tde = InsertTdeInfoToCache(blockinfo.rnode, record->blocks[block_id].tdeinfo); - pfree_ext(record->blocks[block_id].tdeinfo); } redoaction = XLogReadBufferForRedoBlockExtend(&blockinfo, mode, get_cleanup_lock, bufferinfo, record->EndRecPtr, @@ -657,8 +772,24 @@ XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 bloc if (!XLogRecGetBlockLastLsn(record, block_id, &lastLsn)) { ereport(PANIC, (errmsg("can not get xlog lsn from record page block %u", block_id))); } + bool needRepair = false; bool notSkip = DoLsnCheck(bufferinfo, willinit, lastLsn, - (blockinfo.pblk.relNode != InvalidOid) ? &blockinfo.pblk : NULL); + (blockinfo.pblk.relNode != InvalidOid) ? &blockinfo.pblk : NULL, &needRepair); + + if (needRepair) { + XLogRecPtr pageCurLsn = PageGetLSN(bufferinfo->pageinfo.page); + RepairBlockKey key; + + UnlockReleaseBuffer(bufferinfo->buf); + (void)XLogRecGetBlockTag(record, block_id, &key.relfilenode, &key.forknum, &key.blocknum); + parallel_recovery::RecordBadBlockAndPushToRemote(record, key, LSN_CHECK_FAIL, pageCurLsn, + blockinfo.pblk); + + bufferinfo->buf = InvalidBuffer; + bufferinfo->pageinfo = {0}; + return BLK_NOTFOUND; + } + if (!notSkip) { return BLK_DONE; } @@ -748,9 +879,18 @@ Buffer XLogReadBufferExtendedWithoutBuffer(RelFileNode rnode, ForkNumber forknum if (blkno < lastblock) { buffer = ReadBuffer_common_for_direct(rnode, RELPERSISTENCE_PERMANENT, forknum, blkno, mode); + if (BufferIsInvalid(buffer)) { + return InvalidBuffer; + } } else { if (mode == RBM_NORMAL) { log_invalid_page(rnode, forknum, blkno, NOT_PRESENT, NULL); + RepairFileKey key; + key.relfilenode = rnode; + key.forknum = forknum; + key.segno = blkno / RELSEG_SIZE; + + CheckNeedRecordBadFile(key, lastblock, blkno, NULL); return InvalidBuffer; } if (mode == RBM_NORMAL_NO_LOG) @@ -773,6 +913,9 @@ Buffer XLogReadBufferExtendedWithoutBuffer(RelFileNode rnode, ForkNumber forknum if (curblknum != blkno) { XLogRedoBufferReleaseFunc(buffer); buffer = ReadBuffer_common_for_direct(rnode, RELPERSISTENCE_PERMANENT, forknum, blkno, mode); + if (BufferIsInvalid(buffer)) { + return InvalidBuffer; + } } } @@ -782,6 +925,11 @@ Buffer XLogReadBufferExtendedWithoutBuffer(RelFileNode rnode, ForkNumber forknum Assert(!PageIsLogical(page)); XLogRedoBufferReleaseFunc(buffer); log_invalid_page(rnode, forknum, blkno, NOT_INITIALIZED, NULL); + RepairFileKey key; + key.relfilenode = rnode; + key.forknum = forknum; + key.segno = blkno / RELSEG_SIZE; + CheckNeedRecordBadFile(key, lastblock, blkno, NULL); return InvalidBuffer; } } @@ -824,6 +972,9 @@ static Buffer XLogReadBufferExceedFileRange(const RelFileNode &rnode, ForkNumber /* 2. extend to the aimed size */ spc_extend_file(spc, pblk->relNode, forknum, pblk->block + 1); buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno, mode, NULL, pblk); + if (BufferIsInvalid(buffer)) { + return buffer; + } } else { /* * OK to extend the file @@ -888,6 +1039,7 @@ Buffer XLogReadBufferExtendedForHeapDisk(const RelFileNode &rnode, ForkNumber fo Buffer buffer; Page page; SMgrRelation smgr; + BlockNumber lastblock = 0; Assert(blkno != P_NEW); @@ -910,24 +1062,36 @@ Buffer XLogReadBufferExtendedForHeapDisk(const RelFileNode &rnode, ForkNumber fo SegSpace *spc = spc_open(rnode.spcNode, rnode.dbNode, false); if (spc == NULL || !spc_datafile_exist(spc, pblk->relNode, forknum)) { pageExistsInFile = false; + lastblock = 0; } else { pageExistsInFile = seg_fork_exists(spc, smgr, forknum, pblk); + lastblock = spc_size(spc, pblk->relNode, forknum); } } else { smgrcreate(smgr, forknum, true); - BlockNumber lastblock = smgrnblocks(smgr, forknum); + lastblock = smgrnblocks(smgr, forknum); pageExistsInFile = blkno < lastblock; } smgr->encrypt = tde; - + if (pageExistsInFile) { /* page exists in file */ buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno, mode, NULL, pblk); + if (BufferIsInvalid(buffer)) { + return buffer; + } } else { buffer = XLogReadBufferExceedFileRange(rnode, forknum, blkno, mode, pblk); if (BufferIsInvalid(buffer)) { - // can not read + /* can not read */ + if (mode == RBM_NORMAL) { + RepairFileKey key; + key.relfilenode = rnode; + key.forknum = forknum; + key.segno = blkno / RELSEG_SIZE; + CheckNeedRecordBadFile(key, lastblock, blkno, pblk); + } return buffer; } } @@ -949,6 +1113,11 @@ Buffer XLogReadBufferExtendedForHeapDisk(const RelFileNode &rnode, ForkNumber fo if (PageIsNew(page)) { Assert(!PageIsLogical(page)); ReleaseBuffer(buffer); + RepairFileKey key; + key.relfilenode = rnode; + key.forknum = forknum; + key.segno = blkno / RELSEG_SIZE; + CheckNeedRecordBadFile(key, lastblock, blkno, pblk); log_invalid_page(rnode, forknum, blkno, NOT_INITIALIZED, pblk); return InvalidBuffer; } @@ -968,8 +1137,8 @@ Buffer XLogReadBufferExtendedForSegpage(const RelFileNode &rnode, ForkNumber for Buffer buffer; BlockNumber spc_nblocks = 0; SegSpace *spc = spc_open(rnode.spcNode, rnode.dbNode, true, true); - spc_datafile_create(spc, rnode.relNode, forknum); + spc_datafile_create(spc, rnode.relNode, forknum); spc_nblocks = spc_size(spc, rnode.relNode, forknum); if (blkno < spc_nblocks) { @@ -983,6 +1152,11 @@ Buffer XLogReadBufferExtendedForSegpage(const RelFileNode &rnode, ForkNumber for rnode.spcNode, rnode.dbNode, rnode.relNode, rnode.bucketNode, forknum, blkno, spc_nblocks))); /* As segpage use physical block number, we do not have to set pblk */ + RepairFileKey key; + key.relfilenode = rnode; + key.forknum = forknum; + key.segno = blkno / RELSEG_SIZE; + CheckNeedRecordBadFile(key, spc_nblocks, blkno, NULL); log_invalid_page(rnode, forknum, blkno, NOT_PRESENT, NULL); return InvalidBuffer; } @@ -997,6 +1171,11 @@ Buffer XLogReadBufferExtendedForSegpage(const RelFileNode &rnode, ForkNumber for if (PageIsNew(page)) { SegmentCheck(XLogRecPtrIsInvalid(PageGetLSN(page))); SegReleaseBuffer(buffer); + RepairFileKey key; + key.relfilenode = rnode; + key.forknum = forknum; + key.segno = blkno / RELSEG_SIZE; + CheckNeedRecordBadFile(key, spc_nblocks, blkno, NULL); log_invalid_page(rnode, forknum, blkno, NOT_INITIALIZED, NULL); return InvalidBuffer; } @@ -1133,39 +1312,18 @@ void XlogDropRowReation(RelFileNode rnode) smgrclosenode(rbnode); } -void XLogDropBktRowRelation(XLogRecParseState *redoblockstate) -{ - Assert(redoblockstate->blockparse.blockhead.block_valid == BLOCK_DATA_DDL_TYPE); - RelFileNode rnode; - rnode.spcNode = redoblockstate->blockparse.blockhead.spcNode; - rnode.dbNode = redoblockstate->blockparse.blockhead.dbNode; - rnode.relNode = redoblockstate->blockparse.blockhead.relNode; - rnode.opt = redoblockstate->blockparse.blockhead.opt; - uint32 *bktmap = (uint32 *)redoblockstate->blockparse.extra_rec.blockddlrec.mainData; - for (uint32 bktNode = 0; bktNode < MAX_BUCKETMAPLEN; bktNode++) { - if (!GET_BKT_MAP_BIT(bktmap, bktNode)) { - continue; - } - - rnode.bucketNode = bktNode; - XlogDropRowReation(rnode); - } -} - void XLogForgetDDLRedo(XLogRecParseState *redoblockstate) { XLogBlockDdlParse *ddlrecparse = &(redoblockstate->blockparse.extra_rec.blockddlrec); - if (redoblockstate->blockparse.extra_rec.blockddlrec.blockddltype == BLOCK_DDL_DROP_BKTLIST) { - XLogDropBktRowRelation(redoblockstate); - } else if (ddlrecparse->blockddltype == BLOCK_DDL_DROP_RELNODE) { - if (redoblockstate->blockparse.blockhead.forknum <= MAX_FORKNUM) { - RelFileNode relNode; - relNode.spcNode = redoblockstate->blockparse.blockhead.spcNode; - relNode.dbNode = redoblockstate->blockparse.blockhead.dbNode; - relNode.relNode = redoblockstate->blockparse.blockhead.relNode; - relNode.bucketNode = redoblockstate->blockparse.blockhead.bucketNode; - relNode.opt = redoblockstate->blockparse.blockhead.opt; - XlogDropRowReation(relNode); + if (ddlrecparse->blockddltype == BLOCK_DDL_DROP_RELNODE) { + ColFileNodeRel *xnodes = (ColFileNodeRel *)ddlrecparse->mainData; + for (int i = 0; i < ddlrecparse->rels; ++i) { + ColFileNodeRel *colFileNodeRel = xnodes + i; + ColFileNode colFileNode; + ColFileNodeCopy(&colFileNode, colFileNodeRel); + if (!IsValidColForkNum(colFileNode.forknum)) { + XlogDropRowReation(colFileNode.filenode); + } } } else if (ddlrecparse->blockddltype == BLOCK_DDL_TRUNCATE_RELNODE) { RelFileNode relNode; @@ -1173,7 +1331,6 @@ void XLogForgetDDLRedo(XLogRecParseState *redoblockstate) relNode.dbNode = redoblockstate->blockparse.blockhead.dbNode; relNode.relNode = redoblockstate->blockparse.blockhead.relNode; relNode.bucketNode = redoblockstate->blockparse.blockhead.bucketNode; - relNode.opt = redoblockstate->blockparse.blockhead.opt; XLogTruncateRelation(relNode, redoblockstate->blockparse.blockhead.forknum, redoblockstate->blockparse.blockhead.blkno); } @@ -1185,8 +1342,7 @@ void XLogDropSpaceShrink(XLogRecParseState *redoblockstate) .spcNode = redoblockstate->blockparse.blockhead.spcNode, .dbNode = redoblockstate->blockparse.blockhead.dbNode, .relNode = redoblockstate->blockparse.blockhead.relNode, - .bucketNode = redoblockstate->blockparse.blockhead.bucketNode, - .opt = redoblockstate->blockparse.blockhead.opt + .bucketNode = redoblockstate->blockparse.blockhead.bucketNode }; ForkNumber forknum = redoblockstate->blockparse.blockhead.forknum; BlockNumber target_size = redoblockstate->blockparse.blockhead.blkno; @@ -1203,6 +1359,19 @@ void XLogDropSpaceShrink(XLogRecParseState *redoblockstate) void XLogDropRelation(const RelFileNode &rnode, ForkNumber forknum) { forget_invalid_pages(rnode, forknum, 0, false); + + /* clear relfilenode match entry of recovery thread hashtbl */ + if (IsExtremeRedo()) { + extreme_rto::ClearRecoveryThreadHashTbl(rnode, forknum, 0, false); + } else { + parallel_recovery::ClearRecoveryThreadHashTbl(rnode, forknum, 0, false); + } + + /* clear relfilenode match entry of page repair thread hashtbl */ + ClearPageRepairHashTbl(rnode, forknum, 0, false); + if (!IsSegmentFileNode(rnode)) { + ClearBadFileHashTbl(rnode, forknum, 0); + } } bool IsDataBaseDrop(XLogReaderState *record) @@ -1239,10 +1408,12 @@ bool IsSegPageDropSpace(XLogReaderState *record) (XLogRecGetInfo(record) & ~XLR_INFO_MASK) == XLOG_SEG_SPACE_DROP); } -bool IsBarrierCreate(XLogReaderState *record) +bool IsBarrierRelated(XLogReaderState *record) { return (XLogRecGetRmid(record) == RM_BARRIER_ID && - (XLogRecGetInfo(record) & ~XLR_INFO_MASK) == XLOG_BARRIER_CREATE); + ((XLogRecGetInfo(record) & ~XLR_INFO_MASK) == XLOG_BARRIER_CREATE || + (XLogRecGetInfo(record) & ~XLR_INFO_MASK) == XLOG_BARRIER_COMMIT || + (XLogRecGetInfo(record) & ~XLR_INFO_MASK) == XLOG_BARRIER_SWITCHOVER)); } /* @@ -1261,6 +1432,15 @@ void XLogDropDatabase(Oid dbid) smgrcloseall(); forget_invalid_pages_batch(InvalidOid, dbid); + + /* clear dbNode match entry of recovery thread hashtbl */ + if (IsExtremeRedo()) { + extreme_rto::BatchClearRecoveryThreadHashTbl(InvalidOid, dbid); + } else { + parallel_recovery::BatchClearRecoveryThreadHashTbl(InvalidOid, dbid); + } + BatchClearPageRepairHashTbl(InvalidOid, dbid); + BatchClearBadFileHashTbl(InvalidOid, dbid); } /* @@ -1269,6 +1449,16 @@ void XLogDropDatabase(Oid dbid) void XLogDropSegmentSpace(Oid spcNode, Oid dbNode) { forget_invalid_pages_batch(spcNode, dbNode); + + /* clear spcNode and dbNode match entry of recovery thread hashtbl */ + if (IsExtremeRedo()) { + extreme_rto::BatchClearRecoveryThreadHashTbl(spcNode, dbNode); + } else { + parallel_recovery::BatchClearRecoveryThreadHashTbl(spcNode, dbNode); + } + /* clear spcNode and dbNode match entry of page repair thread hashtbl */ + BatchClearPageRepairHashTbl(spcNode, dbNode); + BatchClearBadFileHashTbl(spcNode, dbNode); } /* @@ -1276,19 +1466,32 @@ void XLogDropSegmentSpace(Oid spcNode, Oid dbNode) * * We need to clean up any open "invalid-page" records for the dropped pages. */ -void XLogTruncateRelation(XLogReaderState *record, const RelFileNode &rnode, ForkNumber forkNum, BlockNumber nblocks) -{ - forget_invalid_pages(rnode, forkNum, nblocks, false); -} - void XLogTruncateRelation(RelFileNode rnode, ForkNumber forkNum, BlockNumber nblocks) { forget_invalid_pages(rnode, forkNum, nblocks, false); + /* clear relfilenode match entry of recovery thread hashtbl */ + if (IsExtremeRedo()) { + extreme_rto::ClearRecoveryThreadHashTbl(rnode, forkNum, nblocks, false); + } else { + parallel_recovery::ClearRecoveryThreadHashTbl(rnode, forkNum, nblocks, false); + } + ClearPageRepairHashTbl(rnode, forkNum, nblocks, false); + int truncate_segno = (nblocks % RELSEG_SIZE) == 0 ? (nblocks / RELSEG_SIZE) : (nblocks / RELSEG_SIZE + 1); + ClearBadFileHashTbl(rnode, forkNum, truncate_segno); } void XLogTruncateSegmentSpace(RelFileNode rnode, ForkNumber forkNum, BlockNumber nblocks) { forget_invalid_pages(rnode, forkNum, nblocks, true); + /* clear relfilenode match entry of recovery thread hashtbl */ + if (IsExtremeRedo()) { + extreme_rto::ClearRecoveryThreadHashTbl(rnode, forkNum, nblocks, true); + } else { + parallel_recovery::ClearRecoveryThreadHashTbl(rnode, forkNum, nblocks, true); + } + ClearPageRepairHashTbl(rnode, forkNum, nblocks, true); + int truncate_segno = (nblocks % RELSEG_SIZE) == 0 ? (nblocks / RELSEG_SIZE) : (nblocks / RELSEG_SIZE + 1); + ClearBadFileHashTbl(rnode, forkNum, truncate_segno); } /* @@ -1408,7 +1611,7 @@ static void XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count) * loop for now. */ int read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, - char *cur_page, TimeLineID *pageTLI) + char *cur_page, TimeLineID *pageTLI, char* xlog_path) { XLogRecPtr read_upto, loc, loc_page; int count; diff --git a/src/gausskernel/storage/access/ubtree/Makefile b/src/gausskernel/storage/access/ubtree/Makefile index bddfdb761..6540bcf0a 100644 --- a/src/gausskernel/storage/access/ubtree/Makefile +++ b/src/gausskernel/storage/access/ubtree/Makefile @@ -10,6 +10,6 @@ ifneq "$(MAKECMDGOALS)" "clean" endif endif OBJS = ubtree.o ubtinsert.o ubtpage.o ubtsort.o ubtutils.o ubtsearch.o \ - ubtsplitloc.o ubtsplitloc_insertpt.o ubtxlog.o ubtrecycle.o + ubtsplitloc.o ubtsplitloc_insertpt.o ubtxlog.o ubtdump.o ubtrecycle.o include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/storage/access/ubtree/ubtdump.cpp b/src/gausskernel/storage/access/ubtree/ubtdump.cpp new file mode 100644 index 000000000..82d61e43e --- /dev/null +++ b/src/gausskernel/storage/access/ubtree/ubtdump.cpp @@ -0,0 +1,387 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * ubtdump.cpp + * Dump debug info for UBtree. + * + * IDENTIFICATION + * src/gausskernel/storage/access/ubtree/ubtdump.cpp + * ------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "access/nbtree.h" +#include "access/ubtree.h" +#include "utils/builtins.h" + +void UBTreeVerifyIndex(Relation rel, TupleDesc *tupDesc, Tuplestorestate *tupstore, uint32 cols) +{ + uint32 errVerified = 0; + UBTPageOpaqueInternal opaque = NULL; + BTScanInsert cmpKeys = UBTreeMakeScanKey(rel, NULL); + Buffer buf = UBTreeGetRoot(rel, BT_READ); + if (!BufferIsValid(buf)) { + pfree(cmpKeys); + return; /* empty index */ + } + /* find the left most leaf page */ + while (true) { + Page page = BufferGetPage(buf); + opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); + if (P_ISLEAF(opaque)) { + break; /* it's a leaf page, we are done */ + } + OffsetNumber offnum = P_FIRSTDATAKEY(opaque); + ItemId itemid = PageGetItemId(page, offnum); + IndexTuple itup = (IndexTuple) PageGetItem(page, itemid); + BlockNumber blkno = UBTreeTupleGetDownLink(itup); + /* drop the read lock on the parent page, acquire one on the child */ + buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ); + } + /* we got a leaf page, but now sure it's the left most page */ + while (!P_LEFTMOST(opaque)) { + BlockNumber blkno = opaque->btpo_prev; + buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ); + Page page = BufferGetPage(buf); + opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); + } + /* now we can scan over the whole tree to verify each page */ + IndexTuple prevHikey = NULL; + while (true) { + Page page = BufferGetPage(buf); + opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); + BlockNumber blkno = opaque->btpo_next; + if (P_IGNORE(opaque)) { + buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ); + continue; + } + int erroCode = UBTreeVerifyOnePage(rel, page, cmpKeys, prevHikey); + if (erroCode != VERIFY_NORMAL) { + UBTreeVerifyRecordOutput(VERIFY_MAIN_PAGE, BufferGetBlockNumber(buf), erroCode, tupDesc, tupstore, cols); + errVerified++; + } + if (P_RIGHTMOST(opaque) || P_LEFTMOST(opaque)) { + break; + } + prevHikey = (IndexTuple)PageGetItem(page, PageGetItemId(page, P_HIKEY)); + buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ); + } + _bt_relbuf(rel, buf); + pfree(cmpKeys); + /* last, we need to verify the recycle queue */ + errVerified += UBTreeVerifyRecycleQueue(rel, tupDesc, tupstore, cols); + /* every page is ok , output normal state */ + if (errVerified == 0) { + UBTreeVerifyRecordOutput(VERIFY_MAIN_PAGE, 0, VERIFY_NORMAL, tupDesc, tupstore, cols); + } +} + +void UBTreeVerifyRecordOutput(uint blkType, BlockNumber blkno, int errorCode, + TupleDesc *tupDesc, Tuplestorestate *tupstore, uint32 cols) +{ + Assert(cols == UBTREE_VERIFY_OUTPUT_PARAM_CNT); + bool nulls[cols] = {false}; + Datum values[cols]; + values[0] = CStringGetTextDatum(UBTGetVerifiedPageTypeStr(blkType)); + values[1] = UInt32GetDatum(blkno); + values[UBTREE_VERIFY_OUTPUT_PARAM_CNT - 1] = CStringGetTextDatum(UBTGetVerifiedResultStr((uint32) errorCode)); + tuplestore_putvalues(tupstore, *tupDesc, values, nulls); +} + +static Buffer StepNextRecyclePage(Relation rel, Buffer buf) +{ + Page page = BufferGetPage(buf); + UBTRecycleQueueHeader header = GetRecycleQueueHeader(page, BufferGetBlockNumber(buf)); + BlockNumber nextBlkno = header->nextBlkno; + Buffer nextBuf = ReadRecycleQueueBuffer(rel, nextBlkno); + UnlockReleaseBuffer(buf); + LockBuffer(nextBuf, BT_READ); + return nextBuf; +} +uint32 UBTreeVerifyRecycleQueueFork(Relation rel, UBTRecycleForkNumber forkNum, TupleDesc *tupDesc, + Tuplestorestate *tupstore, uint32 cols) +{ + uint32 errVerified = 0; + RelationOpenSmgr(rel); + BlockNumber urqBlocks = rel->rd_smgr->smgr_fsm_nblocks; + BlockNumber forkMetaBlkno = forkNum; + Buffer metaBuf = ReadRecycleQueueBuffer(rel, forkMetaBlkno); + LockBuffer(metaBuf, BT_READ); + UBTRecycleMeta metaData = (UBTRecycleMeta)PageGetContents(BufferGetPage(metaBuf)); + BlockNumber headBlkno = metaData->headBlkno; + BlockNumber tailBlkno = metaData->tailBlkno; + if (headBlkno > urqBlocks) { + errVerified++; + UBTreeVerifyRecordOutput(VERIFY_RECYCLE_QUEUE_PAGE, Int32GetDatum(forkMetaBlkno), + VERIFY_RECYCLE_QUEUE_HEAD_ERROR, tupDesc, tupstore, cols); + } + if (tailBlkno > urqBlocks) { + errVerified++; + UBTreeVerifyRecordOutput(VERIFY_RECYCLE_QUEUE_PAGE, Int32GetDatum(forkMetaBlkno), + VERIFY_RECYCLE_QUEUE_TAIL_ERROR, tupDesc, tupstore, cols); + } + BlockNumber nblocks = RelationGetNumberOfBlocks(rel); + if (metaData->nblocksUpper > nblocks) { + errVerified++; + UBTreeVerifyRecordOutput(VERIFY_RECYCLE_QUEUE_PAGE, Int32GetDatum(forkMetaBlkno), VERIFY_INCONSISTENT_USED_PAGE, + tupDesc, tupstore, cols); + } + + UnlockReleaseBuffer(metaBuf); + /* check that we can traverse from head to tail and back to head again */ + uint32 visitedPages = 0; + bool tailVisited = false; + Buffer buf = ReadRecycleQueueBuffer(rel, headBlkno); + LockBuffer(buf, BT_READ); + while (true) { + if (BufferGetBlockNumber(buf) == tailBlkno) { + tailVisited = true; + } + buf = StepNextRecyclePage(rel, buf); + if (visitedPages++ > urqBlocks) { + errVerified++; + UBTreeVerifyRecordOutput(VERIFY_RECYCLE_QUEUE_PAGE, Int32GetDatum(forkMetaBlkno), + VERIFY_RECYCLE_QUEUE_ENDLESS, tupDesc, tupstore, cols); + UnlockReleaseBuffer(buf); + return errVerified; + } + if (BufferGetBlockNumber(buf) == headBlkno) { + break; + } + } + + UnlockReleaseBuffer(buf); + if (!tailVisited) { + errVerified++; + UBTreeVerifyRecordOutput(VERIFY_RECYCLE_QUEUE_PAGE, Int32GetDatum(forkMetaBlkno), + VERIFY_RECYCLE_QUEUE_TAIL_MISSED, tupDesc, tupstore, cols); + return errVerified; + } + /* check that each entry and free list are well arranged */ + buf = ReadRecycleQueueBuffer(rel, headBlkno); + LockBuffer(buf, BT_READ); + Page page = BufferGetPage(buf); + UBTRecycleQueueHeader header = GetRecycleQueueHeader(page, BufferGetBlockNumber(buf)); + /* follow the chain to find the Head page */ + while ((header->flags & URQ_HEAD_PAGE) == 0) { + buf = StepNextRecyclePage(rel, buf); + page = BufferGetPage(buf); + header = GetRecycleQueueHeader(page, BufferGetBlockNumber(buf)); + } + /* now we traverse the whole queue from the head page */ + while (true) { + errVerified += UBTreeRecycleQueuePageDump(rel, buf, false, tupDesc, tupstore, cols); + buf = StepNextRecyclePage(rel, buf); + page = BufferGetPage(buf); + header = GetRecycleQueueHeader(page, BufferGetBlockNumber(buf)); + if ((header->flags & URQ_TAIL_PAGE) == 0) { + break; + } + } + UnlockReleaseBuffer(buf); + RelationCloseSmgr(rel); + return errVerified; +} + +uint32 UBTreeVerifyRecycleQueue(Relation rel, TupleDesc *tupDesc, Tuplestorestate *tupstore, uint32 cols) +{ + uint32 errVerified = 0; + RelationOpenSmgr(rel); + BlockNumber urqBlocks = rel->rd_smgr->smgr_fsm_nblocks; + if (urqBlocks < minRecycleQueueBlockNumber) { + UBTreeVerifyRecordOutput(VERIFY_RECYCLE_QUEUE_PAGE, 0, VERIFY_RECYCLE_QUEUE_PAGE_TOO_LESS, tupDesc, tupstore, + cols); + RelationCloseSmgr(rel); + errVerified++; + return errVerified; + } + errVerified += UBTreeVerifyRecycleQueueFork(rel, RECYCLE_EMPTY_FORK, tupDesc, tupstore, cols); + errVerified += UBTreeVerifyRecycleQueueFork(rel, RECYCLE_FREED_FORK, tupDesc, tupstore, cols); + RelationCloseSmgr(rel); + return errVerified; +} +int UBTreeVerifyOnePage(Relation rel, Page page, BTScanInsert cmpKeys, IndexTuple prevHikey) +{ + UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); + /* get compare info */ + TupleDesc tupdes = RelationGetDescr(rel); + int keysz = IndexRelationGetNumberOfKeyAttributes(rel); + OffsetNumber firstPos = P_FIRSTDATAKEY(opaque); + OffsetNumber lastPos = PageGetMaxOffsetNumber(page); + if (lastPos < firstPos) { + return VERIFY_NORMAL; /* empty */ + } + /* compare last key and HIKEY first */ + if (!P_RIGHTMOST(opaque)) { + IndexTuple lastKey = (IndexTuple)PageGetItem(page, PageGetItemId(page, lastPos)); + IndexTuple hikey = (IndexTuple)PageGetItem(page, PageGetItemId(page, P_HIKEY)); + /* we must hold: hikey > lastKey, it's equals to !(hikey <= lastKey) */ + if (_index_tuple_compare(tupdes, cmpKeys->scankeys, keysz, hikey, lastKey)) { + return VERIFY_HIKEY_ERROR; + } + } + /* if prevHikey passed in, we also need to check it */ + if (prevHikey) { + IndexTuple firstKey = (IndexTuple)PageGetItem(page, PageGetItemId(page, firstPos)); + /* we must hold: previous hikey <= firstKey */ + if (!_index_tuple_compare(tupdes, cmpKeys->scankeys, keysz, prevHikey, firstKey)) { + return VERIFY_PREV_HIKEY_ERROR; + } + } + /* now check key orders */ + IndexTuple curKey = (IndexTuple)PageGetItem(page, PageGetItemId(page, firstPos)); + for (OffsetNumber nxtPos = OffsetNumberNext(firstPos); nxtPos <= lastPos; nxtPos = OffsetNumberNext(nxtPos)) { + IndexTuple nextKey = (IndexTuple)PageGetItem(page, PageGetItemId(page, nxtPos)); + /* current key must <= next key */ + if (!_index_tuple_compare(tupdes, cmpKeys->scankeys, keysz, curKey, nextKey)) { + return VERIFY_ORDER_ERROR; + } + curKey = nextKey; + } + /* now check transaction info */ + if (P_ISLEAF(opaque)) { + TransactionId maxXid = ReadNewTransactionId(); + for (OffsetNumber pos = firstPos; pos <= lastPos; pos = OffsetNumberNext(pos)) { + IndexTuple itup = (IndexTuple)PageGetItem(page, PageGetItemId(page, pos)); + UstoreIndexXid uxid = (UstoreIndexXid)UstoreIndexTupleGetXid(itup); + if (TransactionIdFollowsOrEquals(opaque->xid_base, maxXid)) { + return VERIFY_XID_BASE_TOO_LARGE; + } + TransactionId xmin = ShortTransactionIdToNormal(opaque->xid_base, uxid->xmin); + TransactionId xmax = ShortTransactionIdToNormal(opaque->xid_base, uxid->xmax); + if (TransactionIdFollowsOrEquals(xmin, maxXid)) { + return VERIFY_XID_TOO_LARGE; + } + if (TransactionIdFollowsOrEquals(xmax, maxXid)) { + return VERIFY_XID_TOO_LARGE; + } + PG_TRY(); + { + if (TransactionIdDidCommit(xmax) && !TransactionIdDidCommit(xmin)) { + return VERIFY_XID_ORDER_ERROR; + } + CommitSeqNo csn1 = TransactionIdGetCommitSeqNo(xmin, false, false, false, NULL); + CommitSeqNo csn2 = TransactionIdGetCommitSeqNo(xmax, false, false, false, NULL); + if (COMMITSEQNO_IS_COMMITTED(csn1) && COMMITSEQNO_IS_COMMITTED(csn2) && + (csn1 > csn2)) { + return VERIFY_CSN_ORDER_ERROR; + } + bool xminCommittedByCSN = COMMITSEQNO_IS_COMMITTED(csn1); + bool xmaxCommittedByCSN = COMMITSEQNO_IS_COMMITTED(csn2); + if (TransactionIdDidCommit(xmin) != xminCommittedByCSN || + TransactionIdDidCommit(xmax) != xmaxCommittedByCSN) { + return VERIFY_INCONSISTENT_XID_STATUS; + } + } + PG_CATCH(); + { + /* hit some errors when fetching xid status */ + return VERIFY_XID_STATUS_ERROR; + } + PG_END_TRY(); + } + } + return VERIFY_NORMAL; +} + +void UBTreeDumpRecycleQueueFork(Relation rel, UBTRecycleForkNumber forkNum, TupleDesc *tupDesc, + Tuplestorestate *tupstore, uint32 cols) +{ + BlockNumber forkMetaBlkno = forkNum; + Buffer metaBuf = ReadRecycleQueueBuffer(rel, forkMetaBlkno); + LockBuffer(metaBuf, BT_READ); + UBTRecycleMeta metaData = (UBTRecycleMeta)PageGetContents(BufferGetPage(metaBuf)); + BlockNumber headBlkno = metaData->headBlkno; + UnlockReleaseBuffer(metaBuf); + + /* check that we can traverse from head to tail and back to head again */ + Buffer buf = ReadRecycleQueueBuffer(rel, headBlkno); + LockBuffer(buf, BT_READ); + Page page = BufferGetPage(buf); + UBTRecycleQueueHeader header = GetRecycleQueueHeader(page, BufferGetBlockNumber(buf)); + + /* now we traverse the whole queue from the head page */ + while (true) { + (void)UBTreeRecycleQueuePageDump(rel, buf, true, tupDesc, tupstore, cols); + buf = StepNextRecyclePage(rel, buf); + page = BufferGetPage(buf); + header = GetRecycleQueueHeader(page, BufferGetBlockNumber(buf)); + if ((header->flags & URQ_TAIL_PAGE) == 0) { + break; + } + } + UnlockReleaseBuffer(buf); +} + +char* UBTGetVerifiedPageTypeStr(uint32 type) +{ + switch (type) { + case VERIFY_MAIN_PAGE: + return "main page"; + case VERIFY_RECYCLE_QUEUE_PAGE: + return "recycle queue page"; + default: + return "unknown page"; + } +} + +char* UBTGetVerifiedResultStr(uint32 type) +{ + switch (type) { + case VERIFY_XID_BASE_TOO_LARGE: + return "xid base is too large"; + case VERIFY_XID_TOO_LARGE: + return "xid is too large"; + case VERIFY_HIKEY_ERROR: + return "hikey error"; + case VERIFY_PREV_HIKEY_ERROR: + return "prev hikey eror"; + case VERIFY_ORDER_ERROR: + return "index order error"; + case VERIFY_XID_ORDER_ERROR: + return "xid order error"; + case VERIFY_CSN_ORDER_ERROR: + return "csn order error"; + case VERIFY_INCONSISTENT_XID_STATUS: + return "inconsistent xid status"; + case VERIFY_XID_STATUS_ERROR: + return "xid status error"; + case VERIFY_RECYCLE_QUEUE_HEAD_ERROR: + return "recycle queue head error"; + case VERIFY_RECYCLE_QUEUE_TAIL_ERROR: + return "recycle queue tail error"; + case VERIFY_INCONSISTENT_USED_PAGE: + return "inconsistent used page"; + case VERIFY_RECYCLE_QUEUE_ENDLESS: + return "recycle queue endless"; + case VERIFY_RECYCLE_QUEUE_TAIL_MISSED: + return "recycle queue tail missed"; + case VERIFY_RECYCLE_QUEUE_PAGE_TOO_LESS: + return "recycle queue page too less"; + case VERIFY_RECYCLE_QUEUE_OFFSET_ERROR: + return "recycle queue offset error"; + case VERIFY_RECYCLE_QUEUE_XID_TOO_LARGE: + return "xid of recycle queue too large"; + case VERIFY_RECYCLE_QUEUE_UNEXPECTED_TAIL: + return "unexpected tail of recycle queue"; + case VERIFY_RECYCLE_QUEUE_FREE_LIST_ERROR: + return "recycle queue freelist error"; + case VERIFY_RECYCLE_QUEUE_FREE_LIST_INVALID_OFFSET: + return "invalid offset of recycle queue freelist"; + case VERIFY_NORMAL: + return "normal"; + default: + return "unknown verified results"; + } +} diff --git a/src/gausskernel/storage/access/ubtree/ubtinsert.cpp b/src/gausskernel/storage/access/ubtree/ubtinsert.cpp index e7f5394ae..ba7a823d0 100644 --- a/src/gausskernel/storage/access/ubtree/ubtinsert.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtinsert.cpp @@ -23,15 +23,18 @@ #include "access/xlog.h" #include "access/xloginsert.h" #include "access/genam.h" - +#include "catalog/index.h" #include "miscadmin.h" #include "storage/buf/bufmgr.h" #include "storage/indexfsm.h" #include "storage/lmgr.h" #include "storage/predicate.h" +#include "storage/smgr/smgr.h" #include "storage/proc.h" +#include "storage/procarray.h" #include "utils/inval.h" #include "utils/snapmgr.h" +#include "pgstat.h" static TransactionId UBTreeCheckUnique(Relation rel, IndexTuple itup, Relation heapRel, Buffer buf, OffsetNumber offset, BTScanInsert itup_key, IndexUniqueCheck checkUnique, bool *is_unique, GPIScanDesc gpiScan); @@ -66,7 +69,7 @@ bool UBTreePagePruneOpt(Relation rel, Buffer buf, bool tryDelete) { Page page = BufferGetPage(buf); UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); - TransactionId oldestXmin; + TransactionId oldestXmin = InvalidTransactionId; WHITEBOX_TEST_STUB("UBTreePagePruneOpt", WhiteboxDefaultErrorEmit); @@ -94,7 +97,7 @@ bool UBTreePagePruneOpt(Relation rel, Buffer buf, bool tryDelete) return false; } - oldestXmin = u_sess->utils_cxt.RecentGlobalDataXmin; + GetOldestXminForUndo(&oldestXmin); Assert(TransactionIdIsValid(oldestXmin)); @@ -152,8 +155,7 @@ bool UBTreePagePruneOpt(Relation rel, Buffer buf, bool tryDelete) * * Note: we won't delete the high key (if any). */ -bool UBTreePagePrune(Relation rel, Buffer buf, TransactionId oldestXmin, IndexBulkDeleteCallback callback, - void *callbackState) +bool UBTreePagePrune(Relation rel, Buffer buf, TransactionId oldestXmin, OidRBTree *invisibleParts) { int npreviousDead = 0; int16 activeTupleCount = 0; @@ -188,19 +190,17 @@ bool UBTreePagePrune(Relation rel, Buffer buf, TransactionId oldestXmin, IndexBu continue; } - if (callback && RelationIsGlobalIndex(rel)) { - bool isnull = false; - Oid partOid = InvalidOid; + if (RelationIsGlobalIndex(rel) && invisibleParts != NULL) { AttrNumber partitionOidAttr = IndexRelationGetNumberOfAttributes(rel); TupleDesc tupdesc = RelationGetDescr(rel); IndexTuple itup = (IndexTuple)PageGetItem(page, itemid); - ItemPointer htup = &(itup->t_tid); - partOid = DatumGetUInt32(index_getattr(itup, partitionOidAttr, tupdesc, &isnull)); + bool isnull = false; + Oid partOid = DatumGetUInt32(index_getattr(itup, partitionOidAttr, tupdesc, &isnull)); Assert(!isnull); - if (callback(htup, callbackState, partOid, InvalidBktId)) { + if (OidRBTreeMemberOid(invisibleParts, partOid)) { /* record dead */ prstate.nowdead[prstate.ndead] = offnum; prstate.ndead++; @@ -221,6 +221,7 @@ bool UBTreePagePrune(Relation rel, Buffer buf, TransactionId oldestXmin, IndexBu /* Have we found any prunable items? */ if (npreviousDead > 0 || prstate.ndead > 0) { /* Set up flags and try to repair page fragmentation */ + WaitState oldStatus = pgstat_report_waitstatus(STATE_PRUNE_INDEX); UBTreePagePruneExecute(page, prstate.previousdead, npreviousDead, &prstate); UBTreePagePruneExecute(page, prstate.nowdead, prstate.ndead, &prstate); @@ -256,6 +257,7 @@ bool UBTreePagePrune(Relation rel, Buffer buf, TransactionId oldestXmin, IndexBu PageSetLSN(page, recptr); } + pgstat_report_waitstatus(oldStatus); } else { /* * If we didn't prune anything, but have found a new value for the @@ -447,9 +449,14 @@ bool UBTreeDoInsert(Relation rel, IndexTuple itup, IndexUniqueCheck checkUnique, { bool is_unique = false; BTScanInsert itupKey; - BTStack stack; + BTStack stack = NULL; Buffer buf; OffsetNumber offset; + Oid indexHeapRelOid = InvalidOid; + Relation indexHeapRel = NULL; + Partition part = NULL; + Relation partRel = NULL; + bool checkingunique = (checkUnique != UNIQUE_CHECK_NO); WHITEBOX_TEST_STUB("UBTreeDoInsert-begin", WhiteboxDefaultErrorEmit); @@ -462,8 +469,21 @@ bool UBTreeDoInsert(Relation rel, IndexTuple itup, IndexUniqueCheck checkUnique, GPIScanDesc gpiScan = NULL; if (RelationIsGlobalIndex(rel)) { + Assert(RelationIsPartition(heapRel)); GPIScanInit(&gpiScan); - gpiScan->parentRelation = relation_open(heapRel->parentId, AccessShareLock); + indexHeapRelOid = IndexGetRelation(rel->rd_id, false); + Assert(OidIsValid(indexHeapRelOid)); + + if (indexHeapRelOid == heapRel->grandparentId) { // For subpartitiontable + indexHeapRel = relation_open(indexHeapRelOid, AccessShareLock); + Assert(RelationIsSubPartitioned(indexHeapRel)); + part = partitionOpen(indexHeapRel, heapRel->parentId, AccessShareLock); + partRel = partitionGetRelation(indexHeapRel, part); + gpiScan->parentRelation = partRel; + partitionClose(indexHeapRel, part, AccessShareLock); + } else { /* gpi of patitioned table */ + gpiScan->parentRelation = relation_open(heapRel->parentId, AccessShareLock); + } } if (checkingunique) { @@ -487,14 +507,80 @@ bool UBTreeDoInsert(Relation rel, IndexTuple itup, IndexUniqueCheck checkUnique, } } - top: /* - * Find the first page containing this key. Buffer returned by - * _bt_search() is locked in exclusive mode + * It's very common to have an index on an auto-incremented or + * monotonically increasing value. In such cases, every insertion happens + * towards the end of the index. We try to optimise that case by caching + * the right-most leaf of the index. If our cached block is still the + * rightmost leaf, has enough free space to accommodate a new entry and + * the insertion key is strictly greater than the first key in this page, + * then we can safely conclude that the new key will be inserted in the + * cached block. So we simply search within the cached block and insert the + * key at the appropriate location. We call it a fastpath. + * + * Testing has revealed, though, that the fastpath can result in increased + * contention on the exclusive-lock on the rightmost leaf page. So we + * conditionally check if the lock is available. If it's not available then + * we simply abandon the fastpath and take the regular path. This makes + * sense because unavailability of the lock also signals that some other + * backend might be concurrently inserting into the page, thus reducing our + * chances to finding an insertion place in this page. */ - stack = UBTreeSearch(rel, itupKey, &buf, BT_WRITE); +top: + bool fastpath = false; offset = InvalidOffsetNumber; + if (RelationGetTargetBlock(rel) != InvalidBlockNumber) { + /* + * Conditionally acquire exclusive lock on the buffer before doing any + * checks. If we don't get the lock, we simply follow slowpath. If we + * do get the lock, this ensures that the index state cannot change, as + * far as the rightmost part of the index is concerned. + */ + buf = ReadBuffer(rel, RelationGetTargetBlock(rel)); + if (ConditionalLockBuffer(buf)) { + _bt_checkpage(rel, buf); + + Page page = BufferGetPage(buf); + UBTPageOpaqueInternal lpageop = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); + Size itemsz = MAXALIGN(IndexTupleSize(itup)); + + /* + * Check if the page is still the rightmost leaf page, has enough + * free space to accommodate the new tuple, no split is in progress + * and the scankey is greater than or equal to the first key on the + * page. + */ + if (P_ISLEAF(lpageop) && P_RIGHTMOST(lpageop) && !P_INCOMPLETE_SPLIT(lpageop) && !P_IGNORE(lpageop) && + (PageGetFreeSpace(page) > itemsz) && PageGetMaxOffsetNumber(page) >= P_FIRSTDATAKEY(lpageop) && + UBTreeCompare(rel, itupKey, page, P_FIRSTDATAKEY(lpageop), InvalidBuffer) > 0) { + fastpath = true; + } else { + _bt_relbuf(rel, buf); + + /* + * Something did not workout. Just forget about the cached + * block and follow the normal path. It might be set again if + * the conditions are favourble. + */ + RelationSetTargetBlock(rel, InvalidBlockNumber); + } + } else { + ReleaseBuffer(buf); + + /* + * If someone's holding a lock, it's likely to change anyway, + * so don't try again until we get an updated rightmost leaf. + */ + RelationSetTargetBlock(rel, InvalidBlockNumber); + } + } + + if (!fastpath) { + /* find the first page containing this key, buffer returned by _bt_search() is locked in exclusive mode */ + stack = UBTreeSearch(rel, itupKey, &buf, BT_WRITE); + } + WHITEBOX_TEST_STUB("UBTreeDoInsert-found", WhiteboxDefaultErrorEmit); /* @@ -530,7 +616,9 @@ bool UBTreeDoInsert(Relation rel, IndexTuple itup, IndexUniqueCheck checkUnique, buf = InvalidBuffer; XactLockTableWait(xwait); /* start over... */ - _bt_freestack(stack); + if (stack) { + _bt_freestack(stack); + } goto top; } @@ -540,7 +628,8 @@ bool UBTreeDoInsert(Relation rel, IndexTuple itup, IndexUniqueCheck checkUnique, } } - if (checkUnique != UNIQUE_CHECK_EXISTING) { + /* skip insertion when we just want to check existing or find a conflict when executing upsert */ + if (checkUnique != UNIQUE_CHECK_EXISTING && !(checkUnique == UNIQUE_CHECK_UPSERT && !is_unique)) { /* * The only conflict predicate locking cares about for indexes is when * an index tuple insert conflicts with an existing lock. We don't @@ -560,11 +649,18 @@ bool UBTreeDoInsert(Relation rel, IndexTuple itup, IndexUniqueCheck checkUnique, /* be tidy */ - _bt_freestack(stack); + if (stack) { + _bt_freestack(stack); + } pfree(itupKey); if (gpiScan != NULL) { // means rel switch happened - relation_close(gpiScan->parentRelation, AccessShareLock); + if (indexHeapRelOid == heapRel->grandparentId) { // For subpartitiontable + releaseDummyRelation(&partRel); + relation_close(indexHeapRel, AccessShareLock); + } else { + relation_close(gpiScan->parentRelation, AccessShareLock); + } GPIScanEnd(gpiScan); } @@ -713,7 +809,7 @@ static TransactionId UBTreeCheckUnique(Relation rel, IndexTuple itup, Relation h &xmin, &xmax, &xminCommitted, &xmaxCommitted); /* here we guarantee the same semantics as UNIQUE_CHECK_PARTIAL */ - if (checkUnique == UNIQUE_CHECK_PARTIAL) { + if (IndexUniqueCheckNoError(checkUnique)) { if (TransactionIdIsValid(xmin) && !TransactionIdIsValid(xmax)) { if (nbuf != InvalidBuffer) _bt_relbuf(rel, nbuf); @@ -1159,9 +1255,17 @@ static void UBTreeInsertOnPage(Relation rel, BTScanInsert itup_key, Buffer buf, XLogBeginInsert(); XLogRegisterData((char *)&xlrec, SizeOfBtreeInsert); - if (P_ISLEAF(lpageop)) + if (P_ISLEAF(lpageop)) { xlinfo = XLOG_UBTREE_INSERT_LEAF; - else { + + /* + * Cache the block information if we just inserted into the + * rightmost leaf page of the index. + */ + if (P_RIGHTMOST(lpageop)) { + RelationSetTargetBlock(rel, BufferGetBlockNumber(buf)); + } + } else { /* * Register the left child whose INCOMPLETE_SPLIT flag was * cleared. diff --git a/src/gausskernel/storage/access/ubtree/ubtpage.cpp b/src/gausskernel/storage/access/ubtree/ubtpage.cpp index b4720d3e0..0083622f7 100644 --- a/src/gausskernel/storage/access/ubtree/ubtpage.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtpage.cpp @@ -361,6 +361,27 @@ Buffer UBTreeGetRoot(Relation rel, int access) return rootbuf; } +bool UBTreePageRecyclable(Page page) +{ + /* + * It's possible to find an all-zeroes page in an index --- for example, a + * backend might successfully extend the relation one page and then crash + * before it is able to make a WAL entry for adding the page. If we find a + * zeroed page then reclaim it. + */ + TransactionId frozenXmin = g_instance.undo_cxt.oldestFrozenXid; + if (PageIsNew(page)) { + return true; + } + + /* + * Otherwise, recycle if deleted and too old to have any processes + * interested in it. + */ + UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); + return P_ISDELETED(opaque) && TransactionIdPrecedes(((UBTPageOpaque)opaque)->xact, frozenXmin); +} + /* * UBTreeIsPageHalfDead() -- Returns true, if the given block has the half-dead flag set. */ @@ -1446,5 +1467,5 @@ static void UBTreeLogReusePage(Relation rel, BlockNumber blkno, TransactionId la XLogBeginInsert(); XLogRegisterData((char *)&xlrec, SizeOfBtreeReusePage); - (void)XLogInsert(RM_UBTREE_ID, XLOG_UBTREE_REUSE_PAGE, false, rel->rd_node.bucketNode); + (void)XLogInsert(RM_UBTREE_ID, XLOG_UBTREE_REUSE_PAGE, rel->rd_node.bucketNode); } diff --git a/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp b/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp index 009bb935e..36a7dc558 100644 --- a/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtrecycle.cpp @@ -27,31 +27,28 @@ #include "commands/tablespace.h" #include "storage/lmgr.h" #include "utils/aiomem.h" +#include "utils/builtins.h" static uint32 BlockGetMaxItems(BlockNumber blkno); -static UBTRecycleQueueHeader GetRecycleQueueHeader(Page page, BlockNumber blkno); static void UBTreeInitRecycleQueuePage(Relation rel, Page page, Size size, BlockNumber blkno); static void UBTreeRecycleQueueDiscardPage(Relation rel, UBTRecycleQueueAddress addr); static void UBTreeRecycleQueueAddPage(Relation rel, UBTRecycleForkNumber forkNumber, BlockNumber blkno, TransactionId xid); static Buffer StepNextPage(Relation rel, Buffer buf); static Buffer GetAvailablePageOnPage(Relation rel, UBTRecycleForkNumber forkNumber, Buffer buf, - TransactionId oldestXmin, UBTRecycleQueueAddress *addr, bool *continueScan); + TransactionId waterLevelXid, UBTRecycleQueueAddress *addr, bool *continueScan); static Buffer MoveToEndpointPage(Relation rel, Buffer buf, bool needHead, int access); static uint16 PageAllocateItem(Buffer buf); static void RecycleQueueLinkNewPage(Relation rel, Buffer leftBuf, Buffer newBuf); static bool QueuePageIsEmpty(Buffer buf); static Buffer AcquireNextAvailableQueuePage(Relation rel, Buffer buf, UBTRecycleForkNumber forkNumber); -static Buffer RecycleQueueGetEndpointPage(Relation rel, UBTRecycleForkNumber forkNumber, bool needHead, int access); static void InsertOnRecycleQueuePage(Relation rel, Buffer buf, uint16 offset, BlockNumber blkno, TransactionId xid); static void RemoveOneItemFromPage(Relation rel, Buffer buf, uint16 offset); const BlockNumber FirstBlockNumber = 0; const BlockNumber FirstNormalBlockNumber = 2; /* 0 and 1 are pages which include meta data */ const uint16 FirstNormalOffset = 0; -const uint16 InvalidOffset = ((uint16)0) - 1; const uint16 OtherBlockOffset = ((uint16)0) - 2; /* indicate that previous or next item is in other block */ -const BlockNumber minRecycleQueueBlockNumber = 6; #define IsMetaPage(blkno) (blkno < FirstNormalBlockNumber) #define IsNormalOffset(offset) (offset < OtherBlockOffset) @@ -65,7 +62,7 @@ static uint32 BlockGetMaxItems(BlockNumber blkno) return freeSpace / sizeof(UBTRecycleQueueItemData); } -static UBTRecycleQueueHeader GetRecycleQueueHeader(Page page, BlockNumber blkno) +UBTRecycleQueueHeader GetRecycleQueueHeader(Page page, BlockNumber blkno) { if (!IsMetaPage(blkno)) { return (UBTRecycleQueueHeader)PageGetContents(page); @@ -222,7 +219,7 @@ static void InitRecycleQueueInitialPage(Relation rel, Buffer buf) END_CRIT_SECTION(); } -static Buffer ReadRecycleQueueBuffer(Relation rel, BlockNumber blkno) +Buffer ReadRecycleQueueBuffer(Relation rel, BlockNumber blkno) { Buffer buf = ReadBufferExtended(rel, FSM_FORKNUM, blkno, RBM_NORMAL, NULL); /* initial pages may not initialized correctly, before return it we need to check */ @@ -290,7 +287,8 @@ static bool UBTreeTryRecycleEmptyPageInternal(Relation rel) return false; /* no available page to recycle */ } Page page = BufferGetPage(buf); - if (_bt_page_recyclable(page)) { + UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); + if (P_ISDELETED(opaque)) { /* deleted by other routine earlier, skip */ _bt_relbuf(rel, buf); UBTreeRecycleQueueDiscardPage(rel, addr); @@ -349,7 +347,7 @@ static Buffer StepNextPage(Relation rel, Buffer buf) } static Buffer GetAvailablePageOnPage(Relation rel, UBTRecycleForkNumber forkNumber, Buffer buf, - TransactionId oldestXmin, UBTRecycleQueueAddress *addr, bool *continueScan) + TransactionId WaterLevelXid, UBTRecycleQueueAddress *addr, bool *continueScan) { Page page = BufferGetPage(buf); UBTRecycleQueueHeader header = GetRecycleQueueHeader(page, BufferGetBlockNumber(buf)); @@ -357,7 +355,7 @@ static Buffer GetAvailablePageOnPage(Relation rel, UBTRecycleForkNumber forkNumb uint16 curOffset = header->head; while (IsNormalOffset(curOffset)) { UBTRecycleQueueItem item = HeaderGetItem(header, curOffset); - if (TransactionIdFollowsOrEquals(item->xid, oldestXmin)) { + if (TransactionIdFollowsOrEquals(item->xid, WaterLevelXid)) { *continueScan = false; return InvalidBuffer; } @@ -369,7 +367,7 @@ static Buffer GetAvailablePageOnPage(Relation rel, UBTRecycleForkNumber forkNumb _bt_checkbuffer_valid(rel, targetBuf); if (ConditionalLockBuffer(targetBuf)) { _bt_checkpage(rel, targetBuf); - if (forkNumber == RECYCLE_EMPTY_FORK || _bt_page_recyclable(BufferGetPage(targetBuf))) { + if (forkNumber == RECYCLE_EMPTY_FORK || UBTreePageRecyclable(BufferGetPage(targetBuf))) { WHITEBOX_TEST_STUB("GetAvailablePageOnPage-got", WhiteboxDefaultErrorEmit); *continueScan = false; @@ -396,13 +394,17 @@ static Buffer GetAvailablePageOnPage(Relation rel, UBTRecycleForkNumber forkNumb Buffer UBTreeGetAvailablePage(Relation rel, UBTRecycleForkNumber forkNumber, UBTRecycleQueueAddress *addr) { - TransactionId oldestXmin = u_sess->utils_cxt.RecentGlobalDataXmin; + TransactionId frozenXid = g_instance.undo_cxt.oldestFrozenXid; + TransactionId recycleXid = g_instance.undo_cxt.oldestXidInUndo; + + TransactionId waterLevelXid = ((forkNumber == RECYCLE_EMPTY_FORK) ? recycleXid : frozenXid); + Buffer queueBuf = RecycleQueueGetEndpointPage(rel, forkNumber, true, BT_READ); Buffer indexBuf = InvalidBuffer; bool continueScan = false; for (;;) { - indexBuf = GetAvailablePageOnPage(rel, forkNumber, queueBuf, oldestXmin, addr, &continueScan); + indexBuf = GetAvailablePageOnPage(rel, forkNumber, queueBuf, waterLevelXid, addr, &continueScan); if (!continueScan) { break; } @@ -704,7 +706,7 @@ static void TryFixMetaData(Buffer metaBuf, int32 oldval, int32 newval, bool isHe } } -static Buffer RecycleQueueGetEndpointPage(Relation rel, UBTRecycleForkNumber forkNumber, bool needHead, int access) +Buffer RecycleQueueGetEndpointPage(Relation rel, UBTRecycleForkNumber forkNumber, bool needHead, int access) { if (!RecycleQueueInitialized(rel)) { UBTreeInitializeRecycleQueue(rel); @@ -988,3 +990,101 @@ static void UBTreeRecycleQueueDiscardPage(Relation rel, UBTRecycleQueueAddress a UnlockReleaseBuffer(buf); } + +static void UBTRecyleQueueRecordOutput(BlockNumber blkno, uint16 offset, UBTRecycleQueueItem item, + TupleDesc *tupleDesc, Tuplestorestate *tupstore, uint32 cols) +{ + if (item == NULL) { + return; + } + + Assert(cols == UBTREE_RECYCLE_OUTPUT_PARAM_CNT); + bool nulls[cols] = {false}; + Datum values[cols]; + char xidStr[UBTREE_RECYCLE_OUTPUT_XID_STR_LEN] = {'\0'}; + errno_t ret = snprintf_s(xidStr, sizeof(xidStr), sizeof(xidStr) - 1, "%lu", item->xid); + securec_check_ss(ret, "\0", "\0"); + values[ARR_0] = ObjectIdGetDatum((Oid) blkno); + values[ARR_1] = ObjectIdGetDatum((Oid) offset); + values[ARR_2] = CStringGetTextDatum(xidStr); + values[ARR_3] = ObjectIdGetDatum((Oid) item->blkno); + values[ARR_4] = ObjectIdGetDatum((Oid) item->prev); + values[ARR_5] = ObjectIdGetDatum((Oid) item->next); + tuplestore_putvalues(tupstore, *tupleDesc, values, nulls); +} +/* + * call UBTreeVerifyRecordError() only when recordEachItem is false. + * + * recordEachItem: + * true - dump this page + * false - verfiy this page + */ +uint32 UBTreeRecycleQueuePageDump(Relation rel, Buffer buf, bool recordEachItem, + TupleDesc *tupleDesc, Tuplestorestate *tupstore, uint32 cols) +{ + uint32 errVerified = 0; + TransactionId maxXid = ReadNewTransactionId(); + Page page = BufferGetPage(buf); + UBTRecycleQueueHeader header = GetRecycleQueueHeader(page, BufferGetBlockNumber(buf)); + uint16 offset = header->head; + while (IsNormalOffset(offset)) { + UBTRecycleQueueItem item = NULL; + if (offset >= 0 && offset <= UBTRecycleMaxItems) { + item = HeaderGetItem(header, offset); + } else { + if (!recordEachItem) { + errVerified++; + UBTreeVerifyRecordOutput(VERIFY_RECYCLE_QUEUE_PAGE, BufferGetBlockNumber(buf), + VERIFY_RECYCLE_QUEUE_OFFSET_ERROR, tupleDesc, tupstore, cols); + } + break; + } + if (recordEachItem) { + UBTRecyleQueueRecordOutput(BufferGetBlockNumber(buf), offset, item, tupleDesc, tupstore, cols); + } else if (TransactionIdFollows(item->xid, maxXid)) { + errVerified++; + UBTreeVerifyRecordOutput(VERIFY_RECYCLE_QUEUE_PAGE, BufferGetBlockNumber(buf), + VERIFY_RECYCLE_QUEUE_XID_TOO_LARGE, tupleDesc, tupstore, cols); + } + offset = item->next; + } + if (recordEachItem) { + return 0; + } + /* dump work is already done, the following is pure verify logic */ + if (IsNormalOffset(header->head) && header->tail == InvalidOffset && (header->flags & URQ_TAIL_PAGE) == 0) { + /* head is normal, but tail is invalid. this should be a tail page, but it's not */ + errVerified++; + UBTreeVerifyRecordOutput(VERIFY_RECYCLE_QUEUE_PAGE, BufferGetBlockNumber(buf), + VERIFY_RECYCLE_QUEUE_UNEXPECTED_TAIL, tupleDesc, tupstore, cols); + } + + if (header->freeListHead != InvalidOffset) { + /* free list is valid, verify it */ + offset = header->freeListHead; + while (offset != InvalidOffset) { + if (offset == OtherBlockOffset) { + if (!recordEachItem) { + errVerified++; + UBTreeVerifyRecordOutput(VERIFY_RECYCLE_QUEUE_PAGE, BufferGetBlockNumber(buf), + VERIFY_RECYCLE_QUEUE_FREE_LIST_ERROR, tupleDesc, tupstore, cols); + } + break; + } + if (offset >= 0 && offset <= UBTRecycleMaxItems) { + UBTRecycleQueueItem item = HeaderGetItem(header, offset); + offset = item->next; + } else { + if (!recordEachItem) { + errVerified++; + UBTreeVerifyRecordOutput(VERIFY_RECYCLE_QUEUE_PAGE, BufferGetBlockNumber(buf), + VERIFY_RECYCLE_QUEUE_FREE_LIST_INVALID_OFFSET, tupleDesc, tupstore, cols); + } + break; + } + } + } + + return errVerified; +} + diff --git a/src/gausskernel/storage/access/ubtree/ubtree.cpp b/src/gausskernel/storage/access/ubtree/ubtree.cpp index afa00b042..8a4b3d588 100644 --- a/src/gausskernel/storage/access/ubtree/ubtree.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtree.cpp @@ -30,6 +30,7 @@ #include "storage/indexfsm.h" #include "storage/ipc.h" #include "storage/lmgr.h" +#include "storage/procarray.h" #include "storage/smgr/smgr.h" #include "tcop/tcopprot.h" #include "utils/aiomem.h" @@ -53,9 +54,8 @@ typedef struct { static void UBTreeBuildCallback(Relation index, HeapTuple htup, Datum *values, const bool *isnull, bool tupleIsAlive, void *state); -static void UBTreeVacuumScan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, - void *callbackState, BTCycleId cycleid); -static void UBTreeVacuumPage(BTVacState *vstate, BlockNumber blkno, BlockNumber origBlkno); +static void UBTreeVacuumScan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, BTCycleId cycleid); +static void UBTreeVacuumPage(BTVacState *vstate, OidRBTree* invisibleParts, BlockNumber blkno, BlockNumber origBlkno); static IndexTuple UBTreeGetIndexTuple(IndexScanDesc scan, ScanDirection dir, BlockNumber heapTupleBlkOffset); /* @@ -148,24 +148,14 @@ static void UBTreeBuildCallback(Relation index, HeapTuple htup, Datum *values, c void *state) { BTBuildState *buildstate = (BTBuildState *)state; - IndexTransInfo transInfo; - IndexTransInfo *transInfoArg = NULL; - - /* - * SNAPSHOT_NOW is used to scan the ustore table, and make the index - * unusable for old snapshots. So we can just treat every itup as Frozen. - */ - transInfo.xmin = FrozenTransactionId; - transInfo.xmax = InvalidTransactionId; - transInfoArg = &transInfo; // insert the index tuple into the appropriate spool file for subsequent processing if (tupleIsAlive || buildstate->spool2 == NULL) { - _bt_spool(buildstate->spool, &htup->t_self, values, isnull, transInfoArg); + _bt_spool(buildstate->spool, &htup->t_self, values, isnull); } else { /* dead tuples are put into spool2 */ buildstate->haveDead = true; - _bt_spool(buildstate->spool2, &htup->t_self, values, isnull, transInfoArg); + _bt_spool(buildstate->spool2, &htup->t_self, values, isnull); } buildstate->indtuples += 1; @@ -639,8 +629,6 @@ Datum ubtbulkdelete(PG_FUNCTION_ARGS) { IndexVacuumInfo *info = (IndexVacuumInfo *)PG_GETARG_POINTER(0); IndexBulkDeleteResult *volatile stats = (IndexBulkDeleteResult *)PG_GETARG_POINTER(1); - IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback)PG_GETARG_POINTER(2); - void *callbackState = (void *)PG_GETARG_POINTER(3); Relation rel = info->index; BTCycleId cycleid; @@ -654,7 +642,7 @@ Datum ubtbulkdelete(PG_FUNCTION_ARGS) { cycleid = _bt_start_vacuum(rel); - UBTreeVacuumScan(info, stats, callback, callbackState, cycleid); + UBTreeVacuumScan(info, stats, cycleid); } PG_END_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel)); _bt_end_vacuum(rel); @@ -688,7 +676,7 @@ Datum ubtvacuumcleanup(PG_FUNCTION_ARGS) */ if (stats == NULL) { stats = (IndexBulkDeleteResult *)palloc0(sizeof(IndexBulkDeleteResult)); - UBTreeVacuumScan(info, stats, NULL, NULL, 0); + UBTreeVacuumScan(info, stats, 0); } /* ubtree don't use Free Space Map */ @@ -718,21 +706,14 @@ Datum ubtvacuumcleanup(PG_FUNCTION_ARGS) * The caller is responsible for initially allocating/zeroing a stats struct * and for obtaining a vacuum cycle ID if necessary. */ -static void UBTreeVacuumScan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, - void *callbackState, BTCycleId cycleid) +static void UBTreeVacuumScan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, + BTCycleId cycleid) { Relation rel = info->index; BTVacState vstate; BlockNumber numPages; BlockNumber blkno; bool needLock = false; - bool vacuumGPI = false; - - /* Check if GPI relation has invisible partitions */ - if (callbackState) { - Bitmapset* invisMap = ((Bitmapset**)callbackState)[0]; - vacuumGPI = RelationIsGlobalIndex(rel) && invisMap != NULL; - } /* * Reset counts that will be incremented during the scan; needed in case @@ -745,8 +726,6 @@ static void UBTreeVacuumScan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats /* Set up info to pass down to btvacuumpage */ vstate.info = info; vstate.stats = stats; - vstate.callback = callback; - vstate.callback_state = callbackState; vstate.cycleid = cycleid; vstate.lastBlockVacuumed = BTREE_METAPAGE; /* Initialise at first block */ vstate.lastBlockLocked = BTREE_METAPAGE; @@ -798,7 +777,7 @@ static void UBTreeVacuumScan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats } /* Iterate over pages, then loop back to recheck length */ for (; blkno < numPages; blkno++) { - UBTreeVacuumPage(&vstate, blkno, blkno); + UBTreeVacuumPage(&vstate, info->invisibleParts, blkno, blkno); } } @@ -847,12 +826,10 @@ static void UBTreeVacuumScan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats * reached by the outer btvacuumscan loop (the same as blkno, unless we * are recursing to re-examine a previous page). */ -static void UBTreeVacuumPage(BTVacState *vstate, BlockNumber blkno, BlockNumber origBlkno) +static void UBTreeVacuumPage(BTVacState *vstate, OidRBTree *invisibleParts, BlockNumber blkno, BlockNumber origBlkno) { IndexVacuumInfo *info = vstate->info; IndexBulkDeleteResult *stats = vstate->stats; - IndexBulkDeleteCallback callback = vstate->callback; - void *callbackState = vstate->callback_state; Relation rel = info->index; bool deleteNow = false; BlockNumber recurseTo; @@ -895,7 +872,6 @@ restart: } } - bool impossible = false; /* Page is valid, see what to do with it */ if (_bt_page_recyclable(page)) { /* Okay to recycle this page */ @@ -907,142 +883,6 @@ restart: } else if (P_ISHALFDEAD(opaque)) { /* Half-dead, try to delete */ deleteNow = true; - } else if (P_ISLEAF(opaque) && impossible) { - OffsetNumber deletable[MaxOffsetNumber]; - int ndeletable; - OffsetNumber offnum, minoff, maxoff; - - /* - * Trade in the initial read lock for a super-exclusive write lock on - * this page. We must get such a lock on every leaf page over the - * course of the vacuum scan, whether or not it actually contains any - * deletable tuples --- see nbtree/README. - */ - LockBuffer(buf, BUFFER_LOCK_UNLOCK); - LockBufferForCleanup(buf); - - /* - * Remember highest leaf page number we've taken cleanup lock on; see - * notes in btvacuumscan - */ - if (blkno > vstate->lastBlockLocked) { - vstate->lastBlockLocked = blkno; - } - - /* - * Check whether we need to recurse back to earlier pages. What we - * are concerned about is a page split that happened since we started - * the vacuum scan. If the split moved some tuples to a lower page - * then we might have missed 'em. If so, set up for tail recursion. - * (Must do this before possibly clearing btpo_cycleid below!) - */ - if (vstate->cycleid != 0 && opaque->btpo_cycleid == vstate->cycleid && !(opaque->btpo_flags & BTP_SPLIT_END) && - !P_RIGHTMOST(opaque) && opaque->btpo_next < origBlkno) { - recurseTo = opaque->btpo_next; - } - - /* - * Scan over all items to see which ones need deleted according to the - * callback function. - */ - ndeletable = 0; - minoff = P_FIRSTDATAKEY(opaque); - maxoff = PageGetMaxOffsetNumber(page); - if (callback) { - AttrNumber partitionOidAttr = IndexRelationGetNumberOfAttributes(rel); - TupleDesc tupdesc = RelationGetDescr(rel); - for (offnum = minoff; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { - IndexTuple itup = (IndexTuple)PageGetItem(page, PageGetItemId(page, offnum)); - ItemPointer htup = &(itup->t_tid); - - /* - * During Hot Standby we currently assume that - * XLOG_BTREE_VACUUM records do not produce conflicts. That is - * only true as long as the callback function depends only - * upon whether the index tuple refers to heap tuples removed - * in the initial heap scan. When vacuum starts it derives a - * value of OldestXmin. Backends taking later snapshots could - * have a RecentGlobalXmin with a later xid than the vacuum's - * OldestXmin, so it is possible that row versions deleted - * after OldestXmin could be marked as killed by other - * backends. The callback function *could* look at the index - * tuple state in isolation and decide to delete the index - * tuple, though currently it does not. If it ever did, we - * would need to reconsider whether XLOG_BTREE_VACUUM records - * should cause conflicts. If they did cause conflicts they - * would be fairly harsh conflicts, since we haven't yet - * worked out a way to pass a useful value for - * latestRemovedXid on the XLOG_BTREE_VACUUM records. This - * applies to *any* type of index that marks index tuples as - * killed. - */ - Oid partOid = InvalidOid; - if (RelationIsGlobalIndex(rel)) { - bool isnull = false; - partOid = DatumGetUInt32(index_getattr(itup, partitionOidAttr, tupdesc, &isnull)); - Assert(!isnull); - } - if (callback(htup, callbackState, partOid, InvalidBktId)) { - deletable[ndeletable++] = offnum; - } - } - } - - /* - * Apply any needed deletes. We issue just one _bt_delitems_vacuum() - * call per page, so as to minimize WAL traffic. - */ - if (ndeletable > 0) { - /* - * Notice that the issued XLOG_BTREE_VACUUM WAL record includes an - * instruction to the replay code to get cleanup lock on all pages - * between the previous lastBlockVacuumed and this page. This - * ensures that WAL replay locks all leaf pages at some point. - * - * Since we can visit leaf pages out-of-order when recursing, - * replay might end up locking such pages an extra time, but it - * doesn't seem worth the amount of bookkeeping it'd take to avoid - * that. - */ - _bt_delitems_vacuum(rel, buf, deletable, ndeletable, vstate->lastBlockVacuumed); - - /* - * Remember highest leaf page number we've issued a - * XLOG_BTREE_VACUUM WAL record for. - */ - if (blkno > vstate->lastBlockVacuumed) { - vstate->lastBlockVacuumed = blkno; - } - - stats->tuples_removed += ndeletable; - /* must recompute maxoff */ - maxoff = PageGetMaxOffsetNumber(page); - } else { - /* - * If the page has been split during this vacuum cycle, it seems - * worth expending a write to clear btpo_cycleid even if we don't - * have any deletions to do. (If we do, _bt_delitems_vacuum takes - * care of this.) This ensures we won't process the page again. - * We treat this like a hint-bit update because there's no need to - * WAL-log it. - */ - if (vstate->cycleid != 0 && opaque->btpo_cycleid == vstate->cycleid) { - opaque->btpo_cycleid = 0; - MarkBufferDirtyHint(buf, true); - } - } - - /* - * If it's now empty, try to delete; else count the live tuples. We - * don't delete when recursing, though, to avoid putting entries into - * freePages out-of-order (doesn't seem worth any extra code to handle - * the case). - */ - if (minoff > maxoff) { - deleteNow = (blkno == origBlkno); - } else { - stats->num_index_tuples += maxoff - minoff + 1; - } } /* @@ -1058,12 +898,24 @@ restart: bool ignore; /* prune and freeze this index page */ - FreezeSingleIndexPage(rel, buf, &ignore); + FreezeSingleIndexPage(rel, buf, &ignore, invisibleParts); if (!P_RIGHTMOST(opaque) && PageGetMaxOffsetNumber(page) == 1) { /* already empty (only HIKEY left), ok to delete */ deleteNow = true; } + + OffsetNumber minoff = P_FIRSTDATAKEY(opaque); + OffsetNumber maxoff = PageGetMaxOffsetNumber(page); + /* + * If it's now empty, try to delete; else count the live tuples. We + * don't delete when recursing, though, to avoid putting entries into + * freePages out-of-order (doesn't seem worth any extra code to handle + * the case). + */ + if (minoff <= maxoff) { + stats->num_index_tuples += maxoff - minoff + 1; + } } if (deleteNow) { @@ -1388,16 +1240,17 @@ static void IndexPageShiftBase(Page page, int64 delta, bool needWal, Buffer buf) WHITEBOX_TEST_STUB("IndexPageShiftBase-end", WhiteboxDefaultErrorEmit); } -void FreezeSingleIndexPage(Relation rel, Buffer buf, bool *hasPruned) +void FreezeSingleIndexPage(Relation rel, Buffer buf, bool *hasPruned, OidRBTree *invisibleParts) { int nfrozen = 0; OffsetNumber nowfrozen[MaxIndexTuplesPerPage]; - TransactionId oldestXmin = u_sess->utils_cxt.RecentGlobalDataXmin; + TransactionId oldestXmin = InvalidTransactionId; + GetOldestXminForUndo(&oldestXmin); WHITEBOX_TEST_STUB("FreezeSingleIndexPage", WhiteboxDefaultErrorEmit); /* first prune the page, remove all Abort xid and Frozen xmax */ - *hasPruned = UBTreePagePrune(rel, buf, oldestXmin); + *hasPruned = UBTreePagePrune(rel, buf, oldestXmin, invisibleParts); Page page = BufferGetPage(buf); UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); diff --git a/src/gausskernel/storage/access/ubtree/ubtsearch.cpp b/src/gausskernel/storage/access/ubtree/ubtsearch.cpp index 239ecc1a8..aea30859c 100644 --- a/src/gausskernel/storage/access/ubtree/ubtsearch.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtsearch.cpp @@ -379,15 +379,19 @@ static void UBTreeFixActiveTupleCount(Relation rel, Buffer buf, UBTPageOpaqueInt { UstoreIndexXid uxid = (UstoreIndexXid)UstoreIndexTupleGetXid(itup); TransactionId xmin = ShortTransactionIdToNormal(opaque->xid_base, uxid->xmin); - if (UBTreeCheckXid(xmin) == XID_ABORTED) { - ItemIdMarkDead(iid); - opaque->activeTupleCount--; - if (opaque->activeTupleCount == 0) { - UBTreeRecordEmptyPage(rel, BufferGetBlockNumber(buf), opaque->last_delete_xid); + TransactionId oldestXmin = InvalidTransactionId; + GetOldestXminForUndo(&oldestXmin); + if (TransactionIdPrecedes(xmin, oldestXmin)) { + if (UBTreeCheckXid(xmin) == XID_ABORTED) { + ItemIdMarkDead(iid); + opaque->activeTupleCount--; + if (opaque->activeTupleCount == 0) { + UBTreeRecordEmptyPage(rel, BufferGetBlockNumber(buf), opaque->last_delete_xid); + } + } else { + /* freeze this index tuple */ + IndexItemIdSetFrozen(iid); } - } else if (TransactionIdPrecedes(xmin, u_sess->utils_cxt.RecentGlobalDataXmin)) { - /* freeze this index tuple */ - IndexItemIdSetFrozen(iid); } } @@ -1084,16 +1088,12 @@ bool UBTreeFirst(IndexScanDesc scan, ScanDirection dir) scan->xs_ctup.t_self = currItem->heapTid; scan->xs_recheck_itup = false; - if (scan->xs_want_itup) { + if (scan->xs_want_itup || currItem->needRecheck) { + /* in this case, currTuples and tupleOffset must be valid. */ + Assert(so->currTuples != NULL && currItem->tupleOffset != INVALID_TUPLE_OFFSET); scan->xs_itup = (IndexTuple)(so->currTuples + currItem->tupleOffset); - } else if (currItem->tupleOffset != INVALID_TUPLE_OFFSET) { - /* - * currItem->tupleOffset is normal, but we are not doing index only scan. - * This case is used to indicate that we have to recheck the IndexTuple after - * get the visible UHeap Tuple. - */ - scan->xs_itup = (IndexTuple)(so->currTuples + currItem->tupleOffset); - scan->xs_recheck_itup = true; + /* if we can't tell whether this tuple is visible with out CID, we must fetch UHeapTuple to recheck. */ + scan->xs_recheck_itup = currItem->needRecheck; } if (scan->xs_want_ext_oid && GPIScanCheckPartOid(scan->xs_gpi_scan, currItem->partitionOid)) { @@ -1153,16 +1153,12 @@ bool UBTreeNext(IndexScanDesc scan, ScanDirection dir) scan->xs_ctup.t_self = currItem->heapTid; scan->xs_recheck_itup = false; - if (scan->xs_want_itup) { + if (scan->xs_want_itup || currItem->needRecheck) { + /* in this case, currTuples and tupleOffset must be valid. */ + Assert(so->currTuples != NULL && currItem->tupleOffset != INVALID_TUPLE_OFFSET); scan->xs_itup = (IndexTuple)(so->currTuples + currItem->tupleOffset); - } else if (currItem->tupleOffset != INVALID_TUPLE_OFFSET) { - /* - * currItem->tupleOffset is normal, but we are not doing index only scan. - * This case is used to indicate that we have to recheck the IndexTuple after - * get the visible UHeap Tuple. - */ - scan->xs_itup = (IndexTuple)(so->currTuples + currItem->tupleOffset); - scan->xs_recheck_itup = true; + /* if we can't tell whether this tuple is visible with out CID, we must fetch UHeapTuple to recheck. */ + scan->xs_recheck_itup = currItem->needRecheck; } if (scan->xs_want_ext_oid && GPIScanCheckPartOid(scan->xs_gpi_scan, currItem->partitionOid)) { @@ -1209,8 +1205,6 @@ static bool UBTreeReadPage(IndexScanDesc scan, ScanDirection dir, OffsetNumber o /* we must have the buffer pinned and locked */ Assert(BufferIsValid(so->currPos.buf)); - /* We've pinned the buffer, nobody can prune this buffer, check whether snapshot is valid. */ - CheckSnapshotIsValidException(scan->xs_snapshot, "_bt_readpage"); page = BufferGetPage(so->currPos.buf); opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); @@ -1310,6 +1304,7 @@ static void UBTreeSaveItem(BTScanOpaque so, int itemIndex, OffsetNumber offnum, currItem->indexOffset = offnum; currItem->partitionOid = partOid; currItem->tupleOffset = INVALID_TUPLE_OFFSET; + currItem->needRecheck = needRecheck; if (needRecheck) { if (!so->currTuples) { @@ -1639,16 +1634,12 @@ static bool UBTreeEndPoint(IndexScanDesc scan, ScanDirection dir) scan->xs_ctup.t_self = currItem->heapTid; scan->xs_recheck_itup = false; - if (scan->xs_want_itup) { + if (scan->xs_want_itup || currItem->needRecheck) { + /* in this case, currTuples and tupleOffset must be valid. */ + Assert(so->currTuples != NULL && currItem->tupleOffset != INVALID_TUPLE_OFFSET); scan->xs_itup = (IndexTuple)(so->currTuples + currItem->tupleOffset); - } else if (currItem->tupleOffset != INVALID_TUPLE_OFFSET) { - /* - * currItem->tupleOffset is normal, but we are not doing index only scan. - * This case is used to indicate that we have to recheck the IndexTuple after - * get the visible UHeap Tuple. - */ - scan->xs_itup = (IndexTuple)(so->currTuples + currItem->tupleOffset); - scan->xs_recheck_itup = true; + /* if we can't tell whether this tuple is visible with out CID, we must fetch UHeapTuple to recheck. */ + scan->xs_recheck_itup = currItem->needRecheck; } if (scan->xs_want_ext_oid && GPIScanCheckPartOid(scan->xs_gpi_scan, currItem->partitionOid)) { diff --git a/src/gausskernel/storage/access/ubtree/ubtsort.cpp b/src/gausskernel/storage/access/ubtree/ubtsort.cpp index 9c709b332..e9f49c7e5 100644 --- a/src/gausskernel/storage/access/ubtree/ubtsort.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtsort.cpp @@ -87,7 +87,8 @@ static const int TXN_INFO_SIZE_DIFF = (sizeof(TransactionId) - sizeof(ShortTrans static Page UBTreeBlNewPage(Relation rel, uint32 level); static void UBTreeSlideLeft(Page page); -static void UBTreeSortAddTuple(Page page, Size itemsize, IndexTuple itup, OffsetNumber itup_off, bool isnew); +static void UBTreeSortAddTuple(Page page, Size itemsize, IndexTuple itup, + OffsetNumber itup_off, bool isnew, bool extXid); static void UBTreeLoad(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2); /* @@ -333,7 +334,8 @@ static void UBTreeSlideLeft(Page page) * answer for the page. Here, we don't know yet if the page will be * rightmost. Offset P_FIRSTKEY is always the first data key. */ -static void UBTreeSortAddTuple(Page page, Size itemsize, IndexTuple itup, OffsetNumber itup_off, bool isnew) +static void UBTreeSortAddTuple(Page page, Size itemsize, IndexTuple itup, + OffsetNumber itup_off, bool isnew, bool extXid) { UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(page); IndexTupleData trunctuple; @@ -352,44 +354,60 @@ static void UBTreeSortAddTuple(Page page, Size itemsize, IndexTuple itup, Offset ereport(PANIC, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("Index tuple cant fit in the page when creating index."))); } else { - /* - * This is different from _bt_pgaddtup(), the last 16B of itup are xmin/xmax (8B each TransactionId). - * But the actual space occupied by this part in page is 8B (4B each ShortTransactionId) with xid-base - * optimization. - * - * If we assume the actual IndexTuple's length is X, there are 3 lengths: - * 1. IndexTuple passed to _bt_buildadd() with length X + 16. - * 2. IndexTuple on disk with length X + 8, this is the same as the length in LP. - * 3. IndexTuple saw its length as X. - * - * At the beginning of _bt_buildadd(), we reset the size into X + 8 (actual storage size) to - * test whether there is enough space. - * - * The actual memory size of itup here is X + 16, so we just need to: - * 1. set IndexTuple's size into X (from X + 8). - * 2. extract xid, and put the xid where it should be with xid-base optimization. - * 3. call PageAddItem() with length X + 8 - */ Size storageSize = IndexTupleSize(itup); Size newsize = storageSize - TXNINFOSIZE; IndexTupleSetSize(itup, newsize); - IndexTransInfo *transInfo = (IndexTransInfo*)(((char*)itup) + newsize); - TransactionId xmin = transInfo->xmin; - TransactionId xmax = transInfo->xmax; + if (extXid) { + /* + * This is different from _bt_pgaddtup(), the last 16B of itup are xmin/xmax (8B each TransactionId). + * But the actual space occupied by this part in page is 8B (4B each ShortTransactionId) with xid-base + * optimization. + * + * If we assume the actual IndexTuple's length is X, there are 3 lengths: + * 1. IndexTuple passed to _bt_buildadd() with length X + 16. + * 2. IndexTuple on disk with length X + 8, this is the same as the length in LP. + * 3. IndexTuple saw its length as X. + * + * At the beginning of _bt_buildadd(), we reset the size into X + 8 (actual storage size) to + * test whether there is enough space. + * + * The actual memory size of itup here is X + 16, so we just need to: + * 1. set IndexTuple's size into X (from X + 8). + * 2. extract xid, and put the xid where it should be with xid-base optimization. + * 3. call PageAddItem() with length X + 8 + */ + IndexTransInfo *transInfo = (IndexTransInfo*)(((char*)itup) + newsize); + TransactionId xmin = transInfo->xmin; + TransactionId xmax = transInfo->xmax; - UstoreIndexXid uxid = (UstoreIndexXid)UstoreIndexTupleGetXid(itup); + /* setup pd_xid_base */ + IndexPagePrepareForXid(NULL, page, xmin, false, InvalidBuffer); + IndexPagePrepareForXid(NULL, page, xmax, false, InvalidBuffer); - /* setup pd_xid_base */ - IndexPagePrepareForXid(NULL, page, xmin, false, InvalidBuffer); - IndexPagePrepareForXid(NULL, page, xmax, false, InvalidBuffer); + UstoreIndexXid uxid = (UstoreIndexXid)UstoreIndexTupleGetXid(itup); + uxid->xmin = NormalTransactionIdToShort(opaque->xid_base, xmin); + uxid->xmax = NormalTransactionIdToShort(opaque->xid_base, xmax); - uxid->xmin = NormalTransactionIdToShort(opaque->xid_base, xmin); - uxid->xmax = NormalTransactionIdToShort(opaque->xid_base, xmax); + if (PageAddItem(page, (Item)itup, storageSize, itup_off, false, false) == InvalidOffsetNumber) { + ereport(PANIC, (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("Index tuple cant fit in the page when creating index."))); + } + } else { + /* reserve space for xmin/xmax and set into Frozen and Invalid */ + ((PageHeader)page)->pd_upper -= TXNINFOSIZE; + UstoreIndexXid uxid = (UstoreIndexXid)(((char*)page) + ((PageHeader)page)->pd_upper); + uxid->xmin = FrozenTransactionId; + uxid->xmax = InvalidTransactionId; - if (PageAddItem(page, (Item)itup, storageSize, itup_off, false, false) == InvalidOffsetNumber) - ereport(PANIC, - (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("Index tuple cant fit in the page when creating index."))); + if (PageAddItem(page, (Item)itup, newsize, itup_off, false, false) == InvalidOffsetNumber) { + ereport(PANIC, (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("Index tuple cant fit in the page when creating index."))); + } + + ItemId iid = PageGetItemId(page, itup_off); + ItemIdSetNormal(iid, ((PageHeader)page)->pd_upper, storageSize); + } opaque->activeTupleCount++; } @@ -430,43 +448,39 @@ static void UBTreeSortAddTuple(Page page, Size itemsize, IndexTuple itup, Offset */ void UBTreeBuildAdd(BTWriteState *wstate, BTPageState *state, IndexTuple itup, bool hasxid) { - Page npage; - UBTPageOpaqueInternal opaque; - BlockNumber nblkno; - OffsetNumber last_off; - Size pgspc; - Size itupsz; - bool isleaf = false; - /* * This is a handy place to check for cancel interrupts during the btree * load phase of index creation. */ CHECK_FOR_INTERRUPTS(); - npage = state->btps_page; - nblkno = state->btps_blkno; - last_off = state->btps_lastoff; + Page npage = state->btps_page; + BlockNumber nblkno = state->btps_blkno; + OffsetNumber last_off = state->btps_lastoff; + /* Leaf case has slightly different rules due to suffix truncation */ + bool isleaf = (state->btps_level == 0); + bool isPivot = UBTreeTupleIsPivot(itup); - opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(npage); + UBTPageOpaqueInternal opaque = (UBTPageOpaqueInternal)PageGetSpecialPointer(npage); - if (hasxid) { - /* - * Inserting a new IndexTuple: - * the last 16B of index entries is xmin/xmax. With xid-base optimization, the - * actual space occupied by this part should be only 8B, set the correct size. - */ - itupsz = IndexTupleSize(itup); - /* size -= 8B */ - IndexTupleSetSize(itup, itupsz - TXN_INFO_SIZE_DIFF); + if (!isPivot) { + /* normal index tuple, need reserve space for xmin and xmax */ + Size itupsz = IndexTupleSize(itup); + if (hasxid) { + /* + * Inserting a new IndexTuple following by 16B transaction information: + * the last 16B of index entries is xmin/xmax. With xid-base optimization, the + * actual space occupied by this part should be only 8B, set the correct size. + */ + IndexTupleSetSize(itup, itupsz - TXN_INFO_SIZE_DIFF); /* size -= 8B */ + } else { + IndexTupleSetSize(itup, itupsz + TXNINFOSIZE); /* size += 8B */ + } } - pgspc = PageGetFreeSpace(npage); - itupsz = IndexTupleDSize(*itup); + Size pgspc = PageGetFreeSpace(npage); + Size itupsz = IndexTupleDSize(*itup); itupsz = MAXALIGN(itupsz); - /* Leaf case has slightly different rules due to suffix truncation */ - isleaf = (state->btps_level == 0); - /* * Check whether the item can fit on a btree page at all. * @@ -517,7 +531,7 @@ void UBTreeBuildAdd(BTWriteState *wstate, BTPageState *state, IndexTuple itup, b Assert(last_off > P_FIRSTKEY); ii = PageGetItemId(opage, last_off); oitup = (IndexTuple)PageGetItem(opage, ii); - UBTreeSortAddTuple(npage, ItemIdGetLength(ii), oitup, P_FIRSTKEY, false); + UBTreeSortAddTuple(npage, ItemIdGetLength(ii), oitup, P_FIRSTKEY, false, false); /* * Move 'last' into the high key position on opage @@ -565,7 +579,7 @@ void UBTreeBuildAdd(BTWriteState *wstate, BTPageState *state, IndexTuple itup, b truncated = UBTreeTruncate(wstate->index, lastleft, oitup, wstate->inskey, false); /* delete "wrong" high key, insert truncated as P_HIKEY. */ PageIndexTupleDelete(opage, P_HIKEY); - UBTreeSortAddTuple(opage, IndexTupleSize(truncated), truncated, P_HIKEY, false); + UBTreeSortAddTuple(opage, IndexTupleSize(truncated), truncated, P_HIKEY, false, false); pfree(truncated); /* oitup should continue to point to the page's high key */ @@ -631,6 +645,12 @@ void UBTreeBuildAdd(BTWriteState *wstate, BTPageState *state, IndexTuple itup, b last_off = P_FIRSTKEY; } + /* + * Add the new item into the current page. + */ + last_off = OffsetNumberNext(last_off); + UBTreeSortAddTuple(npage, itupsz, itup, last_off, true, hasxid); + /* * If the new item is the first for its page, stash a copy for later. Note * this will only happen for the first item on a level; on later pages, @@ -639,19 +659,13 @@ void UBTreeBuildAdd(BTWriteState *wstate, BTPageState *state, IndexTuple itup, b * minus infinity downlink, and never as a high key, there is no need to * truncate away suffix attributes at this point. */ - if (last_off == P_HIKEY) { + if (last_off == OffsetNumberNext(P_HIKEY)) { Assert(state->btps_minkey == NULL); state->btps_minkey = CopyIndexTuple(itup); /* _bt_sortaddtup() will perform full truncation later */ UBTreeTupleSetNAtts(state->btps_minkey, 0, false); } - /* - * Add the new item into the current page. - */ - last_off = OffsetNumberNext(last_off); - UBTreeSortAddTuple(npage, itupsz, itup, last_off, true); - state->btps_page = npage; state->btps_blkno = nblkno; state->btps_lastoff = last_off; @@ -765,14 +779,14 @@ static void UBTreeLoad(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2 state = UBTreePageState(wstate, 0); if (load1) { - UBTreeBuildAdd(wstate, state, itup, true); + UBTreeBuildAdd(wstate, state, itup, false); if (should_free) { pfree(itup); itup = NULL; } itup = tuplesort_getindextuple(btspool->sortstate, true, &should_free); } else { - UBTreeBuildAdd(wstate, state, itup2, true); + UBTreeBuildAdd(wstate, state, itup2, false); if (should_free2) { pfree(itup2); itup2 = NULL; @@ -787,7 +801,7 @@ static void UBTreeLoad(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2 if (state == NULL) state = UBTreePageState(wstate, 0); - UBTreeBuildAdd(wstate, state, itup, true); + UBTreeBuildAdd(wstate, state, itup, false); if (should_free) { pfree(itup); itup = NULL; diff --git a/src/gausskernel/storage/access/ubtree/ubtutils.cpp b/src/gausskernel/storage/access/ubtree/ubtutils.cpp index 6d67b6ee5..12b7c7acd 100644 --- a/src/gausskernel/storage/access/ubtree/ubtutils.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtutils.cpp @@ -35,7 +35,7 @@ static bool UBTreeVisibilityCheckXid(TransactionId xmin, TransactionId xmax, boo Snapshot snapshot, bool isUpsert = false); static bool UBTreeXidSatisfiesMVCC(TransactionId xid, bool committed, Snapshot snapshot); static int UBTreeKeepNatts(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itupKey); -static bool UBTreeVisibilityCheckCid(IndexScanDesc scan, IndexTuple itup, TransactionId xmax, bool *needRecheck); +static bool UBTreeVisibilityCheckCid(IndexScanDesc scan, IndexTuple itup, bool *needRecheck); static bool UBTreeItupEquals(IndexTuple itup1, IndexTuple itup2); #define MAX(A, B) (((B) > (A)) ? (B) : (A)) @@ -162,7 +162,6 @@ IndexTuple UBTreeCheckKeys(IndexScanDesc scan, Page page, OffsetNumber offnum, S ItemId iid = PageGetItemId(page, offnum); bool tupleAlive = false; bool tupleVisible = true; - bool needVisibilityCheck = false; IndexTuple tuple; TupleDesc tupdesc; BTScanOpaque so; @@ -207,29 +206,6 @@ IndexTuple UBTreeCheckKeys(IndexScanDesc scan, Page page, OffsetNumber offnum, S so = (BTScanOpaque)scan->opaque; keysz = so->numberOfKeys; - needVisibilityCheck = scan->xs_snapshot->satisfies != SNAPSHOT_ANY && - scan->xs_snapshot->satisfies != SNAPSHOT_TOAST; - TransactionId xmin, xmax; - bool isDead = false; - bool xminCommitted = false; - bool xmaxCommitted = false; - isDead = UBTreeItupGetXminXmax(page, offnum, InvalidTransactionId, &xmin, &xmax, &xminCommitted, &xmaxCommitted); - tupleVisible = !isDead; /* without visibility check, return non-dead tuple */ - if (needVisibilityCheck) { - /* - * If this IndexTuple is not visible to the current Snapshot, try to get the next one. - * We're not going to tell heap to skip visibility check, because it doesn't cost a lot and we need heap - * to check the visibility with CID when snapshot's xid equals to xmin or xmax. - */ - if (scan->xs_snapshot->satisfies == SNAPSHOT_MVCC && - (TransactionIdIsCurrentTransactionId(xmin) || TransactionIdIsCurrentTransactionId(xmax))) { - tupleVisible = UBTreeVisibilityCheckCid(scan, tuple, xmax, needRecheck); /* need check cid */ - } else { - tupleVisible = (!isDead) && - UBTreeVisibilityCheckXid(xmin, xmax, xminCommitted, xmaxCommitted, scan->xs_snapshot, scan->isUpsert); - } - } - for (key = so->keyData, ikey = 0; ikey < keysz; key++, ikey++) { Datum datum; bool isNull = false; @@ -337,6 +313,29 @@ IndexTuple UBTreeCheckKeys(IndexScanDesc scan, Page page, OffsetNumber offnum, S } } + bool needVisibilityCheck = scan->xs_snapshot->satisfies != SNAPSHOT_ANY && + scan->xs_snapshot->satisfies != SNAPSHOT_TOAST; + TransactionId xmin, xmax; + bool isDead = false; + bool xminCommitted = false; + bool xmaxCommitted = false; + isDead = UBTreeItupGetXminXmax(page, offnum, InvalidTransactionId, &xmin, &xmax, &xminCommitted, &xmaxCommitted); + tupleVisible = !isDead; /* without visibility check, return non-dead tuple */ + if (needVisibilityCheck) { + /* + * If this IndexTuple is not visible to the current Snapshot, try to get the next one. + * We're not going to tell heap to skip visibility check, because it doesn't cost a lot and we need heap + * to check the visibility with CID when snapshot's xid equals to xmin or xmax. + */ + if (scan->xs_snapshot->satisfies == SNAPSHOT_MVCC && + (TransactionIdIsCurrentTransactionId(xmin) || TransactionIdIsCurrentTransactionId(xmax))) { + tupleVisible = UBTreeVisibilityCheckCid(scan, tuple, needRecheck); /* need check cid */ + } else { + tupleVisible = (!isDead) && + UBTreeVisibilityCheckXid(xmin, xmax, xminCommitted, xmaxCommitted, scan->xs_snapshot, scan->isUpsert); + } + } + /* Check for failure due to it being a killed tuple. */ if (!tupleAlive || !tupleVisible) { return NULL; @@ -443,7 +442,7 @@ bool UBTreeItupGetXminXmax(Page page, OffsetNumber offnum, TransactionId oldest_ /* if there is no passed oldest_xmin, we will ues the current oldest_xmin */ if (!TransactionIdIsValid(oldest_xmin)) { - oldest_xmin = u_sess->utils_cxt.RecentGlobalDataXmin; + GetOldestXminForUndo(&oldest_xmin); } if (!TransactionIdIsValid(*xmin)) { @@ -636,20 +635,9 @@ static bool UBTreeItupEquals(IndexTuple itup1, IndexTuple itup2) return memcmp(itup1, itup2, IndexTupleSize(itup1)) == 0; } -static bool UBTreeVisibilityCheckCid(IndexScanDesc scan, IndexTuple itup, TransactionId xmax, bool *needRecheck) +static bool UBTreeVisibilityCheckCid(IndexScanDesc scan, IndexTuple itup, bool *needRecheck) { BTScanOpaque so = (BTScanOpaque)scan->opaque; - Snapshot snapshot = scan->xs_snapshot; - bool isIndexOnlyScan = scan->xs_want_itup; - - if (isIndexOnlyScan && snapshot->curcid == GetCurrentCommandId(false)) { - /* when the snapshot is the latest, index only scan don't need to recheck */ - *needRecheck = false; - if (TransactionIdIsCurrentTransactionId(xmax)) { - return false; /* xmax is current xid: invisible */ - } - return true; /* xmin is current xid, no xmax yet: visible */ - } if (UBTreeItupEquals((IndexTuple)so->lastSelfModifiedItup, itup)) { *needRecheck = false; diff --git a/src/gausskernel/storage/access/ubtree/ubtxlog.cpp b/src/gausskernel/storage/access/ubtree/ubtxlog.cpp index a2d82e439..84131ff43 100644 --- a/src/gausskernel/storage/access/ubtree/ubtxlog.cpp +++ b/src/gausskernel/storage/access/ubtree/ubtxlog.cpp @@ -447,7 +447,7 @@ static void UBTreeXlogSplit(bool onleft, bool isroot, XLogReaderState *record, b /* assure that memory is properly allocated, prevent from core dump caused by buffer unpin */ START_CRIT_SECTION(); - newlpage = PageGetTempPageCopySpecial(lpage, true); + newlpage = PageGetTempPageCopySpecial(lpage); END_CRIT_SECTION(); /* Set high key */ diff --git a/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp b/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp index 277ad8648..2744e4652 100644 --- a/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_pruneuheap.cpp @@ -58,7 +58,7 @@ static int Itemoffcompare(const void *itemidp1, const void *itemidp2) bool UHeapPagePruneOptPage(Relation relation, Buffer buffer, TransactionId xid, bool acquireContionalLock) { Page page; - TransactionId oldestXmin; + TransactionId oldestXmin = InvalidTransactionId; TransactionId ignore = InvalidTransactionId; Size minfree; bool pruned = false; @@ -77,25 +77,7 @@ bool UHeapPagePruneOptPage(Relation relation, Buffer buffer, TransactionId xid, return false; } - /* - * Use the appropriate xmin horizon for this relation. If it's a proper - * catalog relation or a user defined, additional, catalog relation, we - * need to use the horizon that includes slots, otherwise the data-only - * horizon can be used. Note that the toast relation of user defined - * relations are *not* considered catalog relations. - * - * It is OK to apply the old snapshot limit before acquiring the cleanup - * lock because the worst that can happen is that we are not quite as - * aggressive about the cleanup (by however many transaction IDs are - * consumed between this point and acquiring the lock). This allows us to - * save significant overhead in the case where the page is found not to be - * prunable. - */ - if (IsCatalogRelation(relation) || RelationIsAccessibleInLogicalDecoding(relation)) - oldestXmin = u_sess->utils_cxt.RecentGlobalXmin; - else - oldestXmin = u_sess->utils_cxt.RecentGlobalDataXmin; - + GetOldestXminForUndo(&oldestXmin); Assert(TransactionIdIsValid(oldestXmin)); if ((t_thrd.xact_cxt.useLocalSnapshot && IsPostmasterEnvironment) || @@ -240,6 +222,7 @@ int UHeapPagePrune(const RelationBuffer *relbuf, TransactionId oldestXmin, bool * first print relation oid */ + WaitState oldStatus = pgstat_report_waitstatus(STATE_PRUNE_TABLE); UHeapPagePruneExecute(relbuf->buffer, InvalidOffsetNumber, &prstate); /* prune the dead line pointers */ @@ -307,6 +290,7 @@ int UHeapPagePrune(const RelationBuffer *relbuf, TransactionId oldestXmin, bool if (pruned) { *pruned = hasPruned; } + pgstat_report_waitstatus(oldStatus); } else { #ifdef DEBUG_UHEAP UHEAPSTAT_COUNT_PRUNEPAGE(PRUNE_PAGE_NO_SPACE); @@ -391,25 +375,7 @@ bool UHeapPagePruneOpt(Relation relation, Buffer buffer, OffsetNumber offnum, Si if (RecoveryInProgress()) return false; - /* - * Use the appropriate xmin horizon for this relation. If it's a proper - * catalog relation or a user defined, additional, catalog relation, we - * need to use the horizon that includes slots, otherwise the data-only - * horizon can be used. Note that the toast relation of user defined - * relations are *not* considered catalog relations. - * - * It is OK to apply the old snapshot limit before acquiring the cleanup - * lock because the worst that can happen is that we are not quite as - * aggressive about the cleanup (by however many transaction IDs are - * consumed between this point and acquiring the lock). This allows us to - * save significant overhead in the case where the page is found not to be - * prunable. - */ - if (IsCatalogRelation(relation) || RelationIsAccessibleInLogicalDecoding(relation)) - oldestXmin = u_sess->utils_cxt.RecentGlobalXmin; - else - oldestXmin = u_sess->utils_cxt.RecentGlobalDataXmin; - + GetOldestXminForUndo(&oldestXmin); Assert(TransactionIdIsValid(oldestXmin)); if (OffsetNumberIsValid(offnum)) { @@ -572,6 +538,7 @@ int UHeapPagePruneGuts(const RelationBuffer *relbuf, TransactionId oldestXmin, O * Apply the planned item changes, then repair page fragmentation, and * update the page's hint bit about whether it has free line pointers. */ + WaitState oldStatus = pgstat_report_waitstatus(STATE_PRUNE_TABLE); UHeapPagePruneExecute(relbuf->buffer, targetOffnum, &prstate); /* prune the dead line pointers */ @@ -639,6 +606,7 @@ int UHeapPagePruneGuts(const RelationBuffer *relbuf, TransactionId oldestXmin, O if (pruned) { *pruned = hasPruned; } + pgstat_report_waitstatus(oldStatus); } else { #ifdef DEBUG_UHEAP UHEAPSTAT_COUNT_PRUNEPAGE(PRUNE_PAGE_NO_SPACE); @@ -783,7 +751,7 @@ static int UHeapPruneItem(const RelationBuffer *relbuf, OffsetNumber offnum, Tra RowPtr *lp; Page dp = (Page)BufferGetPage(relbuf->buffer); int ndeleted = 0; - TransactionId xid; + TransactionId xid = InvalidTransactionId; bool tupdead = false; bool recentDead = false; @@ -853,7 +821,7 @@ static int UHeapPruneItem(const RelationBuffer *relbuf, OffsetNumber offnum, Tra if (TransactionIdIsValid(xid) && TransactionIdIsInProgress(xid)) { ereport(PANIC, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("Tuple will be pruned but xid is inprogress, xid=%lu, oldestxmin=%lu, oldestXidInUndo=%lu.", - xid, oldestXmin, g_instance.proc_base->oldestXidInUndo))); + xid, oldestXmin, g_instance.undo_cxt.oldestXidInUndo))); } /* short aligned */ *spaceFreed += SHORTALIGN(tup.disk_tuple_size); diff --git a/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp b/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp index 2842e4415..b83c61c85 100644 --- a/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uextremeredo.cpp @@ -575,7 +575,7 @@ void UHeapXlogInsertOperatorPage(RedoBufferInfo *buffer, void *recorddata, bool if (UPageAddItem(NULL, &bufpage, (Item)utup, newlen, xlrec->offnum, true) == InvalidOffsetNumber) ereport(PANIC, (errmsg("UHeapXlogInsertOperatorPage: failed to add tuple"))); - UHeapRecordPotentialFreeSpace(NULL, buffer->buf, -1 * SHORTALIGN(newlen)); + UHeapRecordPotentialFreeSpace(buffer->buf, -1 * SHORTALIGN(newlen)); // set undo int tdSlotId = UHeapTupleHeaderGetTDSlot(utup); @@ -634,7 +634,7 @@ void UHeapXlogDeleteOperatorPage(RedoBufferInfo *buffer, void *recorddata, Size } /* increment the potential freespace of this page */ - UHeapRecordPotentialFreeSpace(NULL, buffer->buf, SHORTALIGN(datalen)); + UHeapRecordPotentialFreeSpace(buffer->buf, SHORTALIGN(datalen)); utup.disk_tuple = (UHeapDiskTuple)UPageGetRowData(page, rp); utup.disk_tuple_size = RowPtrGetLen(rp); @@ -649,7 +649,7 @@ void UHeapXlogDeleteOperatorPage(RedoBufferInfo *buffer, void *recorddata, Size } void UHeapXlogUpdateOperatorOldpage(UpdateRedoBuffers* buffers, void *recorddata, - bool inplaceUpdate, UHeapTupleData *oldtup, bool sameBlock, + bool inplaceUpdate, bool blockInplaceUpdate, UHeapTupleData *oldtup, bool sameBlock, BlockNumber blk, TransactionId recordxid) { XLogRecPtr lsn = buffers->oldbuffer.lsn; @@ -686,11 +686,11 @@ void UHeapXlogUpdateOperatorOldpage(UpdateRedoBuffers* buffers, void *recorddata } /* Mark the page as a candidate for pruning and update the page potential freespace */ - if (!inplaceUpdate) { + if (!inplaceUpdate || blockInplaceUpdate) { UPageSetPrunable(oldpage, recordxid); if (!sameBlock) { - UHeapRecordPotentialFreeSpace(NULL, oldbuf, SHORTALIGN(oldtup->disk_tuple_size)); + UHeapRecordPotentialFreeSpace(oldbuf, SHORTALIGN(oldtup->disk_tuple_size)); } } @@ -698,8 +698,8 @@ void UHeapXlogUpdateOperatorOldpage(UpdateRedoBuffers* buffers, void *recorddata } Size UHeapXlogUpdateOperatorNewpage(UpdateRedoBuffers* buffers, void *recorddata, - bool inplaceUpdate, void *blkdata, UHeapTupleData *oldtup, Size recordlen, - Size data_len, bool isinit, bool istoast, bool sameBlock, + bool inplaceUpdate, bool blockInplaceUpdate, void *blkdata, UHeapTupleData *oldtup, + Size recordlen, Size data_len, bool isinit, bool istoast, bool sameBlock, TransactionId recordxid, UpdateRedoAffixLens *affixLens) { XLogRecPtr lsn = buffers->newbuffer.lsn; @@ -825,7 +825,7 @@ Size UHeapXlogUpdateOperatorNewpage(UpdateRedoBuffers* buffers, void *recorddata */ bool onlyCopyDelta = (inplaceUpdate && oldtup->disk_tuple->t_hoff == xlhdr.t_hoff && - newlen == oldtup->disk_tuple_size); + (newlen == oldtup->disk_tuple_size) && !blockInplaceUpdate); uint32 bitmaplen = 0; uint32 deltalen = 0; char *bitmapData = NULL; @@ -920,7 +920,16 @@ Size UHeapXlogUpdateOperatorNewpage(UpdateRedoBuffers* buffers, void *recorddata /* max offset number should be valid */ Assert(UHeapPageGetMaxOffsetNumber(newpage) + 1 >= xlrec->new_offnum); - if (inplaceUpdate) { + if (blockInplaceUpdate) { + Assert(!onlyCopyDelta); + RowPtr *rp = UPageGetRowPtr(oldpage, xlrec->old_offnum); + PutBlockInplaceUpdateTuple(oldpage, (Item)newtup, rp, newlen); + /* update the potential freespace */ + UHeapRecordPotentialFreeSpace(newbuf, newlen); + Assert(oldpage == newpage); + UPageSetPrunable(newpage, recordxid); + UHeapPageSetUndo(newbuf, xlrec->old_tuple_td_id, recordxid, urecptr); + } else if (inplaceUpdate) { /* * For inplace updates, we copy the entire data portion including * the tuple header. @@ -962,10 +971,10 @@ Size UHeapXlogUpdateOperatorNewpage(UpdateRedoBuffers* buffers, void *recorddata /* Update the page potential freespace */ if (newbuf != oldbuf) { - UHeapRecordPotentialFreeSpace(NULL, newbuf, -1 * SHORTALIGN(newlen)); + UHeapRecordPotentialFreeSpace(newbuf, -1 * SHORTALIGN(newlen)); } else { int delta = newlen - oldtup->disk_tuple_size; - UHeapRecordPotentialFreeSpace(NULL, newbuf, -1 * SHORTALIGN(delta)); + UHeapRecordPotentialFreeSpace(newbuf, -1 * SHORTALIGN(delta)); } if (sameBlock) { @@ -1127,7 +1136,7 @@ void UHeapXlogMultiInsertOperatorPage(RedoBufferInfo *buffer, void *recorddata, } /* decrement the potential freespace of this page */ - UHeapRecordPotentialFreeSpace(NULL, buffer->buf, SHORTALIGN(newlen)); + UHeapRecordPotentialFreeSpace(buffer->buf, SHORTALIGN(newlen)); if (!skipUndo) { tdSlot = UHeapTupleHeaderGetTDSlot(uhtup); @@ -1353,6 +1362,7 @@ static void UHeapXlogUpdateBlock(XLogBlockHead *blockhead, XLogBlockDataParse *b bool isinit = (XLogBlockHeadGetInfo(blockhead) & XLOG_UHEAP_INIT_PAGE) != 0; bool istoast = (XLogBlockHeadGetInfo(blockhead) & XLOG_UHEAP_INIT_TOAST_PAGE) != 0; bool inplaceUpdate = !(xlrec->flags & XLZ_NON_INPLACE_UPDATE); + bool blockInplaceUpdate = (xlrec->flags & XLZ_BLOCK_INPLACE_UPDATE); TransactionId recordxid = XLogBlockHeadGetXid(blockhead); bool sameBlock = false; XLogRedoAction action; @@ -1373,15 +1383,16 @@ static void UHeapXlogUpdateBlock(XLogBlockHead *blockhead, XLogBlockDataParse *b buffers.oldbuffer.lsn = buffers.newbuffer.lsn; UHeapXlogUpdateOperatorOldpage(&buffers, (void *)maindata, inplaceUpdate, - &oldtup, sameBlock, oldblk, recordxid); + blockInplaceUpdate, &oldtup, sameBlock, oldblk, recordxid); } blkdata = XLogBlockDataGetBlockData(datadecode, &blkdatalen); Assert(blkdata != NULL); Size recordlen = datadecode->main_data_len; Size dataLen = datadecode->blockdata.data_len; - freespace = UHeapXlogUpdateOperatorNewpage(&buffers, (void *)maindata, inplaceUpdate, (void *)blkdata, - &oldtup, recordlen, dataLen, isinit, istoast, sameBlock, recordxid, &affixLens); + freespace = UHeapXlogUpdateOperatorNewpage(&buffers, (void *)maindata, inplaceUpdate, + blockInplaceUpdate, (void *)blkdata, &oldtup, recordlen, dataLen, isinit, + istoast, sameBlock, recordxid, &affixLens); /* may should free space */ if (!inplaceUpdate && freespace < BLCKSZ / FREESPACE_FRACTION) { RelFileNode rnode; @@ -1389,7 +1400,6 @@ static void UHeapXlogUpdateBlock(XLogBlockHead *blockhead, XLogBlockDataParse *b rnode.dbNode = blockhead->dbNode; rnode.relNode = blockhead->relNode; rnode.bucketNode = blockhead->bucketNode; - rnode.opt = blockhead->opt; XLogRecordPageWithFreeSpace(rnode, bufferinfo->blockinfo.blkno, freespace); } } else { @@ -1402,7 +1412,7 @@ static void UHeapXlogUpdateBlock(XLogBlockHead *blockhead, XLogBlockDataParse *b } UHeapXlogUpdateOperatorOldpage(&buffers, (void *)maindata, inplaceUpdate, - &oldtup, sameBlock, newblk, recordxid); + blockInplaceUpdate, &oldtup, sameBlock, newblk, recordxid); } MakeRedoBufferDirty(bufferinfo); diff --git a/src/gausskernel/storage/access/ustore/knl_uheap.cpp b/src/gausskernel/storage/access/ustore/knl_uheap.cpp index 05dfd1b02..cf3048b26 100644 --- a/src/gausskernel/storage/access/ustore/knl_uheap.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uheap.cpp @@ -15,6 +15,7 @@ #include "postgres.h" #include "pgstat.h" +#include "catalog/pg_partition_fn.h" #include "nodes/relation.h" #include "utils/datum.h" #include "utils/snapmgr.h" @@ -44,14 +45,18 @@ static Bitmapset *UHeapDetermineModifiedColumns(Relation relation, Bitmapset *interesting_cols, UHeapTuple oldtup, UHeapTuple newtup); static void TtsUHeapMaterialize(TupleTableSlot *slot); -static void LogUHeapInsert(UHeapWALInfo *walinfo, Relation rel); +static void LogUHeapInsert(UHeapWALInfo *walinfo, Relation rel, bool isToast = false); static void LogUPageExtendTDSlots(Buffer buf, uint8 currTDSlots, uint8 numExtended); static void LogUHeapDelete(UHeapWALInfo *walinfo); static void LogUHeapUpdate(UHeapWALInfo *oldTupWalinfo, UHeapWALInfo *newTupWalinfo, bool isInplaceUpdate, - int undoXorDeltaSize, char *xlogXorDelta, uint16 xorPrefixlen, uint16 xorSurfixlen, Relation rel); + int undoXorDeltaSize, char *xlogXorDelta, uint16 xorPrefixlen, uint16 xorSurfixlen, Relation rel, + bool isBlockInplaceUpdate); static void LogUHeapMultiInsert(UHeapMultiInsertWALInfo *multiWalinfo, bool skipUndo, char *scratch, UndoRecPtr *urpvec); -static void UHeapPagePruneFSM(Relation relation, Buffer buffer, TransactionId fxid, Page page, BlockNumber blkno); +static bool UHeapWait(Relation relation, Buffer buffer, UHeapTuple utuple, LockTupleMode mode, bool nowait, + TransactionId updateXid, TransactionId lockerXid, SubTransactionId updateSubXid, SubTransactionId lockerSubXid, + bool *hasTupLock, bool *multixidIsMySelf, int waitSec = 0); + static Page GetPageBuffer(Relation relation, BlockNumber blkno, Buffer &buffer) { buffer = ReadBuffer(relation, blkno); @@ -65,7 +70,6 @@ static bool UHeapPageXidMinMax(Page page, bool multi, ShortTransactionId *min, S for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { UHeapDiskTuple utuple; - RowPtr *rowptr = UPageGetRowPtr(page, offnum); /* skip tuples which has been pruned */ @@ -73,19 +77,27 @@ static bool UHeapPageXidMinMax(Page page, bool multi, ShortTransactionId *min, S continue; } utuple = (UHeapDiskTuple)UPageGetRowData(page, rowptr); - if (!SINGLE_LOCKER_XID_IS_EXCL_LOCKED(utuple->flag) && !SINGLE_LOCKER_XID_IS_SHR_LOCKED(utuple->flag) && - !UHeapTupleHasMultiLockers(utuple->flag)) { - continue; - } /* If multi=true, we should only count in tuple marked as multilocked */ - if (TransactionIdIsNormal(utuple->xid) && (!multi || UHeapTupleHasMultiLockers(utuple->flag))) { - if (!found) { - found = true; - *min = *max = utuple->xid; - } else { - *min = Min(*min, utuple->xid); - *max = Max(*max, utuple->xid); + if (!multi) { + if (TransactionIdIsNormal(utuple->xid) && !UHeapTupleHasMultiLockers(utuple->flag)) { + if (!found) { + found = true; + *min = *max = utuple->xid; + } else { + *min = Min(*min, utuple->xid); + *max = Max(*max, utuple->xid); + } + } + } else { + if (TransactionIdIsNormal(utuple->xid) && UHeapTupleHasMultiLockers(utuple->flag)) { + if (!found) { + found = true; + *min = *max = utuple->xid; + } else { + *min = Min(*min, utuple->xid); + *max = Max(*max, utuple->xid); + } } } } @@ -147,14 +159,15 @@ void UHeapPageShiftBase(Buffer buffer, Page page, bool multi, int64 delta) } utuple = (UHeapDiskTuple)UPageGetRowData(page, rowptr); - if (!SINGLE_LOCKER_XID_IS_EXCL_LOCKED(utuple->flag) && !SINGLE_LOCKER_XID_IS_SHR_LOCKED(utuple->flag) && - !UHeapTupleHasMultiLockers(utuple->flag)) { - utuple->xid = (ShortTransactionId)FrozenTransactionId; - continue; - } - if (TransactionIdIsNormal(utuple->xid) && (!multi || UHeapTupleHasMultiLockers(utuple->flag))) { - utuple->xid -= delta; + if (!multi) { + if (TransactionIdIsNormal(utuple->xid) && !UHeapTupleHasMultiLockers(utuple->flag)) { + utuple->xid -= delta; + } + } else { + if (TransactionIdIsNormal(utuple->xid) && UHeapTupleHasMultiLockers(utuple->flag)) { + utuple->xid -= delta; + } } } @@ -179,7 +192,8 @@ static int FreezeSingleUHeapPage(Relation relation, Buffer buffer) RelationBuffer relbuf = {relation, buffer}; // get cutoff xid - TransactionId oldestXmin = GetOldestXmin(relation, false, true); + TransactionId oldestXmin = pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo); + Assert(TransactionIdIsNormal(oldestXmin)); UHeapPagePruneGuts(&relbuf, oldestXmin, InvalidOffsetNumber, 0, false, false, &latestRemovedXid, NULL); @@ -201,8 +215,8 @@ static int FreezeSingleUHeapPage(Relation relation, Buffer buffer) utuple.disk_tuple = (UHeapDiskTuple)UPageGetRowData(page, rowptr); utuple.disk_tuple_size = RowPtrGetLen(rowptr); utuple.table_oid = RelationGetRelid(relation); + utuple.t_bucketId = InvalidBktId; UHeapTupleCopyBaseFromPage(&utuple, page); - // XXX bucket id if (UHeapTupleHasMultiLockers(utuple.disk_tuple->flag)) { continue; @@ -362,7 +376,7 @@ enum UHeapDMLType { UHEAP_DELETE, }; -template void PgStatCountDML(Relation rel, const bool useInplaceUpdate, const bool slotReused) +template void PgStatCountDML(Relation rel, const bool useInplaceUpdate) { switch (dmlType) { case UHEAP_INSERT: { @@ -374,21 +388,7 @@ template void PgStatCountDML(Relation rel, const bool useI * As of now, we only count non-inplace updates as that are required to * decide whether to trigger autovacuum. */ - if (!useInplaceUpdate) { - /* - * If we've performed non-inplace update because of - * slotReused optimization, we shouldn't increase the - * update stats else, it'll trigger autovacuum unnecessarily. But, we - * want to autoanalyze the table periodically. Hence, we increase the - * insert count. - */ - if (!slotReused) - pgstat_count_heap_update(rel, false); - else - pgstat_count_heap_insert(rel, 1); - } else { - PgstatCountHeapUpdateInplace(rel); - } + pgstat_count_heap_update(rel, useInplaceUpdate); break; } case UHEAP_DELETE: { @@ -404,7 +404,7 @@ template void PgStatCountDML(Relation rel, const bool useI template void UHeapFinalizeDML(Relation rel, Buffer buffer, Buffer* newbuf, UHeapTuple utuple, UHeapTuple tuple, ItemPointer tid, const bool hasTupLock, - const bool useInplaceUpdate, const bool slotReused) + const bool useInplaceUpdate) { UndoPersistence persistence = UndoPersistenceForRelation(rel); @@ -424,7 +424,7 @@ template void UHeapFinalizeDML(Relation rel, Buffer buffer LockBuffer(buffer, BUFFER_LOCK_UNLOCK); } - PgStatCountDML(rel, useInplaceUpdate, slotReused); + PgStatCountDML(rel, useInplaceUpdate); switch (dmlType) { case UHEAP_INSERT: @@ -453,7 +453,7 @@ template void UHeapFinalizeDML(Relation rel, Buffer buffer } } -static void UHeapPagePruneFSM(Relation relation, Buffer buffer, TransactionId fxid, Page page, BlockNumber blkno) +void UHeapPagePruneFSM(Relation relation, Buffer buffer, TransactionId fxid, Page page, BlockNumber blkno) { bool hasPruned = UHeapPagePruneOptPage(relation, buffer, fxid); #ifdef DEBUG_UHEAP @@ -477,7 +477,19 @@ static void UHeapPagePruneFSM(Relation relation, Buffer buffer, TransactionId fx } } -Oid UHeapInsert(RelationData *rel, UHeapTupleData *utuple, CommandId cid, BulkInsertState bistate) +static ShortTransactionId UHeapTupleSetModifiedXid(Relation relation, + Buffer buffer, UHeapTuple utuple, TransactionId xid) +{ + TransactionId xidbase = InvalidTransactionId; + ShortTransactionId tupleXid = 0; + UHeapTupleCopyBaseFromPage(utuple, BufferGetPage(buffer)); + xidbase = utuple->t_xid_base; + tupleXid = NormalTransactionIdToShort(xidbase, xid); + utuple->disk_tuple->xid = tupleXid; + return tupleXid; +} + +Oid UHeapInsert(RelationData *rel, UHeapTupleData *utuple, CommandId cid, BulkInsertState bistate, bool isToast) { Page page; bool lockReacquired = false; @@ -491,9 +503,8 @@ Oid UHeapInsert(RelationData *rel, UHeapTupleData *utuple, CommandId cid, BulkIn BlockNumber blkno = 0; TransactionId minXidInTDSlots = InvalidTransactionId; uint16 lower; - bool switchBuf = false; - bool aggressiveTDSearch = false; - BlockNumber firstBlock = InvalidBlockNumber; + int retryTimes = 0; + int options = 0; WHITEBOX_TEST_STUB(UHEAP_INSERT_FAILED, WhiteboxDefaultErrorEmit); if (utuple == NULL) { @@ -507,7 +518,7 @@ Oid UHeapInsert(RelationData *rel, UHeapTupleData *utuple, CommandId cid, BulkIn /* Prepare Undo record before buffer lock since undo record length is fixed */ UndoPersistence persistence = UndoPersistenceForRelation(rel); - Oid relOid = RelationIsPartition(rel) ? rel->parentId : RelationGetRelid(rel); + Oid relOid = RelationIsPartition(rel) ? GetBaseRelOidOfParition(rel) : RelationGetRelid(rel); Oid partitionOid = RelationIsPartition(rel) ? RelationGetRelid(rel) : InvalidOid; urecPtr = UHeapPrepareUndoInsert(relOid, partitionOid, RelationGetRelFileNode(rel), RelationGetRnodeSpace(rel), persistence, InvalidBuffer, fxid, cid, @@ -520,20 +531,12 @@ Oid UHeapInsert(RelationData *rel, UHeapTupleData *utuple, CommandId cid, BulkIn /* Get buffer page from buffer pool and reserve TD slot */ reacquire_buffer: - buffer = RelationGetBufferForUTuple(rel, tuple->disk_tuple_size, InvalidBuffer, 0, bistate, switchBuf); + buffer = RelationGetBufferForUTuple(rel, tuple->disk_tuple_size, InvalidBuffer, + options, bistate); Assert(buffer != InvalidBuffer); - /* - * Do aggressive TD slot search after switching buffer - * and somehow ended up at the first block. - */ - BlockNumber currBlock = BufferGetBlockNumber(buffer); - if (firstBlock == InvalidBlockNumber) { - firstBlock = currBlock; - } else if (switchBuf && firstBlock == currBlock) { - aggressiveTDSearch = true; - } + (void)UHeapPagePrepareForXid(rel, buffer, fxid, false, false); page = BufferGetPage(buffer); Assert(PageIsValid(page)); @@ -544,7 +547,7 @@ reacquire_buffer: lower = phdr->pd_lower; tdSlot = UHeapPageReserveTransactionSlot(rel, buffer, fxid, &prevUrecptr, - &lockReacquired, InvalidBuffer, NULL, &minXidInTDSlots, aggressiveTDSearch); + &lockReacquired, InvalidBuffer, &minXidInTDSlots, false); /* * It is possible that available space on the page changed @@ -552,15 +555,18 @@ reacquire_buffer: */ if (lockReacquired || (lower < phdr->pd_lower)) { UnlockReleaseBuffer(buffer); - switchBuf = false; + LimitRetryTimes(retryTimes++); + if (retryTimes > FORCE_EXTEND_THRESHOLD) { + options |= UHEAP_INSERT_EXTEND; + } goto reacquire_buffer; } if (tdSlot == InvalidTDSlotId) { UnlockReleaseBuffer(buffer); UHeapSleepOrWaitForTDSlot(minXidInTDSlots, fxid, true); - // cant switch buffer anymore to avoid bouncing between blocks - switchBuf = !aggressiveTDSearch; + LimitRetryTimes(retryTimes++); + options |= UHEAP_INSERT_EXTEND; goto reacquire_buffer; } @@ -592,12 +598,12 @@ reacquire_buffer: UHeapTupleHeaderSetTDSlot(tuple->disk_tuple, tdSlot); UHeapTupleHeaderSetLockerTDSlot(tuple->disk_tuple, InvalidTDSlotId); - UHeapTupleSetRawXid(tuple, FrozenTransactionId); + UHeapTupleSetModifiedXid(rel, buffer, tuple, fxid); /* Put utuple into buffer page */ RelationPutUTuple(rel, buffer, tuple); - UHeapRecordPotentialFreeSpace(rel, buffer, -1 * SHORTALIGN(tuple->disk_tuple_size)); + UHeapRecordPotentialFreeSpace(buffer, -1 * SHORTALIGN(tuple->disk_tuple_size)); /* Update the UndoRecord now that we know where the tuple is located on the Page */ UndoRecord *undorec = (*u_sess->ustore_cxt.urecvec)[0]; @@ -654,13 +660,13 @@ reacquire_buffer: insWalInfo.xlum = &xlum; /* do the actual logging */ - LogUHeapInsert(&insWalInfo, rel); + LogUHeapInsert(&insWalInfo, rel, isToast); } undo::FinishUndoMeta(&xlum, persistence); END_CRIT_SECTION(); /* Clean up */ - UHeapFinalizeDML(rel, buffer, NULL, utuple, tuple, NULL, false, false, false); + UHeapFinalizeDML(rel, buffer, NULL, utuple, tuple, NULL, false, false); return InvalidOid; } @@ -674,8 +680,10 @@ static TransactionId UHeapFetchInsertXidGuts(UndoRecord *urec, UHeapTupleTransIn while (true) { urec->Reset(uheapinfo.urec_add); - int rc = FetchUndoRecord(urec, InplaceSatisfyUndoRecord, blk, offnum, uheapinfo.xid); - if (rc == UNDO_RET_FAIL) { + UndoTraversalState rc = FetchUndoRecord(urec, InplaceSatisfyUndoRecord, blk, offnum, uheapinfo.xid); + if (rc == UNDO_TRAVERSAL_ABORT) { + ereport(ERROR, (errmsg("snapshot too old! the undo record has been force discard."))); + } else if (rc != UNDO_TRAVERSAL_COMPLETE) { /* * Undo record could be null only when it's undo log is/about to * be discarded. We cannot use any assert for checking is the log @@ -760,6 +768,7 @@ UHeapTuple UHeapPrepareInsert(Relation rel, UHeapTupleData *tuple, int options) tuple->disk_tuple->td_id = UHEAPTUP_SLOT_FROZEN; tuple->disk_tuple->locker_td_id = UHEAPTUP_SLOT_FROZEN; tuple->table_oid = RelationGetRelid(rel); + tuple->t_bucketId = InvalidBktId; if (rel->rd_rel->relkind != RELKIND_RELATION) { /* toast table entries should never be recursively toasted */ @@ -773,303 +782,6 @@ UHeapTuple UHeapPrepareInsert(Relation rel, UHeapTupleData *tuple, int options) } } -/* - * ComputeNewXidInfomask - Given the old values of tuple header's infomask, - * compute the new values for tuple header which includes lock mode, new - * infomask and transaction slot. - * - * We don't clear the multi lockers bit in this function as for that we need - * to ensure that all the lockers are gone. Unfortunately, it is not easy to - * do that as we need to traverse all the undo chains for the current page to - * ensure the same and doing it here which is quite common code path doesn't - * seem advisable. We clear this bit lazily when we detect the conflict and - * we anyway need to traverse the undo chains for the page. - * - * We ensure that the tuple always point to the transaction slot of latest - * inserter/updater except for cases where we lock first and then update the - * tuple (aka locks via EvalPlanQual mechanism). This is because for visibility - * checks, we only need inserter/updater's xact information. Keeping their - * slot on the tuple avoids the overheads of fetching xact information from - * undo during visibility checks. Also, note that the latest inserter/updater - * can be an aborted transaction whose rollback actions are still pending. - * - * For example, say after a committed insert/update, a new request arrives to - * lock the tuple in key share mode, we will keep the inserter's/updater's slot - * on the tuple and set the multi-locker and key-share bit. If the inserter/ - * updater is already known to be having a frozen slot (visible to every one), - * we will set the key-share locker bit and the tuple will indicate a frozen - * slot. Similarly, for a new updater, if the tuple has a single locker, then - * the undo will have a frozen tuple and for multi-lockers, the undo of updater - * will have previous inserter/updater slot; in both cases the new tuple will - * point to the updaters slot. Now, the rollback of a single locker will set - * the frozen slot on tuple and the rollback of multi-locker won't change slot - * information on tuple. We don't want to keep the slot of locker on the - * tuple as after rollback, we will lose track of last updater/inserter. - * - * When we are locking for the purpose of updating the tuple, we don't need - * to preserve previous updater's information and we also keep the latest - * slot on tuple. This is only true when there are no previous lockers on - * the tuple. - */ -static void ComputeNewXidInfomask(UHeapTuple uhtup, Buffer buf, TransactionId tupXid, int tupTdSlot, - uint16 oldInfomask, TransactionId addToXid, int tdSlot, TransactionId singleLockerXid, LockTupleMode mode, - LockOper lockoper, uint16 *resultInfomask, int *resultTdSlot) -{ - int newTdSlot = tdSlot; - uint16 newInfomask = 0; - bool oldTupleHasUpdate = false; - bool transactionIdisInProgressForTupXid = false; - bool isUpdate = (lockoper == ForUpdate || lockoper == LockForUpdate); - uint32 needSync = 0; - - Assert(TransactionIdIsValid(addToXid)); - - if (lockoper == ForUpdate && TransactionIdIsCurrentTransactionId(singleLockerXid)) - mode = LockTupleExclusive; - else if (UHeapTupleHasMultiLockers(oldInfomask)) { - UGetMultiLockInfo(oldInfomask, tupXid, tupTdSlot, addToXid, &newInfomask, &newTdSlot, &mode, - &oldTupleHasUpdate, lockoper); - } else { - restart: - transactionIdisInProgressForTupXid = TransactionIdIsInProgress(tupXid, &needSync, true, false, true, false); - /* Since recording csnlog and clog before removing xid from procarray, need sync until proc remove the according - * xid. In case that we treat the tupXid is still in progress */ - if (needSync) { - needSync = 0; - SyncLocalXidWait(tupXid); - goto restart; - } - if ((IsUHeapTupleModified(oldInfomask) && transactionIdisInProgressForTupXid)) { - UGetMultiLockInfo(oldInfomask, tupXid, tupTdSlot, addToXid, &newInfomask, &newTdSlot, &mode, - &oldTupleHasUpdate, lockoper); - } else if (!isUpdate && TransactionIdIsInProgress(singleLockerXid, NULL, true, false, true)) { - LockTupleMode oldMode; - - /* - * When there is a single in-progress locker on the tuple and previous - * inserter/updater became all visible, we've to set multi-locker flag - * and highest lock mode. If current transaction tries to reacquire a - * lock, we don't set multi-locker flag. - */ - Assert(UHEAP_XID_IS_LOCKED_ONLY(oldInfomask)); - if (singleLockerXid != addToXid) { - elog(PANIC, "Set infomask UHEAP_MULTI_LOCKERS."); // not fall through here in ustore - newInfomask |= UHEAP_MULTI_LOCKERS; - newTdSlot = tupTdSlot; - } - - oldMode = GetOldLockMode(oldInfomask); - /* Acquire the strongest of both. */ - if (mode < oldMode) - mode = oldMode; - - /* Keep the old tuple slot as it is */ - newTdSlot = tupTdSlot; - } else if (!isUpdate && transactionIdisInProgressForTupXid) { - /* - * Normally if the tuple is not modified and the current transaction - * is in progress, the other transaction can't lock the tuple except - * itself. - * - * However, this can happen while locking the updated tuple chain. We - * keep the transaction slot of original tuple as that will allow us - * to check the visibility of tuple by just referring the current - * transaction slot. - */ - Assert((tupXid == addToXid)); - - if (tupXid != addToXid) { - elog(PANIC, "Set infomask UHEAP_MULTI_LOCKERS."); // not fall through here in ustore - newInfomask |= UHEAP_MULTI_LOCKERS; - } - - newTdSlot = tupTdSlot; - } else if (!isUpdate && tupTdSlot == UHEAPTUP_SLOT_FROZEN) { - /* - * It's a frozen update or insert, so the locker must not change the - * slot on a tuple. The lockmode to be used on tuple is computed - * below. There could be a single committed/aborted locker - * (multilocker case is handled in the first condition). In that case, - * we can ignore the locker. If the locker is still in progress, it'll - * be handled in above case. - */ - newTdSlot = UHEAPTUP_SLOT_FROZEN; - } else if (!isUpdate && !UHEAP_XID_IS_LOCKED_ONLY(oldInfomask) && tupTdSlot != UHEAPTUP_SLOT_FROZEN) { - /* - * It's a committed update/insert or an aborted update whose rollback - * action is still pending, so we gotta preserve him as updater of the - * tuple. Also, indicate that tuple has multiple lockers. - * - * Note that tuple xid could be invalid if the undo records - * corresponding to the tuple transaction is discarded. In that case, - * it can be considered as committed. - */ - elog(PANIC, "Set infomask UHEAP_MULTI_LOCKERS."); // not fall through here in ustore - newInfomask |= UHEAP_MULTI_LOCKERS; - oldTupleHasUpdate = true; - - if (UHeapTupleIsInPlaceUpdated(oldInfomask)) - newInfomask |= UHEAP_INPLACE_UPDATED; - else if (UHeapTupleIsUpdated(oldInfomask)) - newInfomask |= UHEAP_UPDATED; - else { - /* This is a freshly inserted tuple. */ - oldTupleHasUpdate = false; - } - - if (!oldTupleHasUpdate) { - /* - * This is a freshly inserted tuple, allow to set the requested - * lock mode on tuple. - */ - } else { - LockTupleMode oldMode; - - if (UHEAP_XID_IS_EXCL_LOCKED(oldInfomask)) - oldMode = LockTupleExclusive; - else if (UHEAP_XID_IS_NOKEY_EXCL_LOCKED(oldInfomask)) - oldMode = LockTupleNoKeyExclusive; - else { - /* - * Tuple must not be locked in any other mode as we are here - * because either the tuple is updated or inserted and the - * corresponding transaction is committed. - */ - Assert(!(UHEAP_XID_IS_KEYSHR_LOCKED(oldInfomask) || UHEAP_XID_IS_SHR_LOCKED(oldInfomask))); - - oldMode = LockTupleNoKeyExclusive; - } - - if (mode < oldMode) - mode = oldMode; - } - - newTdSlot = tupTdSlot; - } else if (!isUpdate && UHEAP_XID_IS_LOCKED_ONLY(oldInfomask) && tupTdSlot != UHEAPTUP_SLOT_FROZEN) { - LockTupleMode oldMode; - - /* - * This case arises for committed/aborted non-inplace updates where - * the newly inserted tuple is marked as locked-only, but multi-locker - * bit is not set. - * - * Note that tuple xid could be invalid if the undo records - * corresponding to the tuple transaction is discarded. In that case, - * it can be considered as committed. - */ - elog(PANIC, "Set infomask UHEAP_MULTI_LOCKERS."); // not fall through here in ustore - newInfomask |= UHEAP_MULTI_LOCKERS; - /* The tuple is locked-only. */ - Assert(!(oldInfomask & (UHEAP_DELETED | UHEAP_UPDATED | UHEAP_INPLACE_UPDATED))); - oldMode = GetOldLockMode(oldInfomask); - /* Acquire the strongest of both. */ - if (mode < oldMode) - mode = oldMode; - - /* Keep the old tuple slot as it is */ - newTdSlot = tupTdSlot; - } else if (isUpdate && TransactionIdIsValid(singleLockerXid) && !UHeapTransactionIdDidCommit(singleLockerXid)) { - LockTupleMode oldMode; - - /* - * There can be a non-conflicting in-progress key share locker on the - * tuple and we want to update the tuple in no-key exclusive mode. In - * that case, we should set the multilocker flag as well. - * - * Note that, the single locker xid can be aborted whose rollback - * actions are still pending. The scenario should be handled in the - * same way as an in-progress single locker, i.e., we should set the - * multilocker flag accordingly. Else, the rollback of single locker - * might resotre the infomask of the tuple incorrectly. - */ - Assert(UHEAP_XID_IS_LOCKED_ONLY(oldInfomask)); - if (singleLockerXid != addToXid) { - elog(PANIC, "Set infomask UHEAP_MULTI_LOCKERS."); // not fall through here in ustore - newInfomask |= UHEAP_MULTI_LOCKERS; - - /* - * If the tuple has multilocker and we're locking the tuple for - * update, we insert multilocker type of undo instead of - * lock-for-update undo. For multilocker undo, we keep the old - * tuple slot as it is. - */ - if (lockoper == LockForUpdate) - newTdSlot = tupTdSlot; - } - - oldMode = GetOldLockMode(oldInfomask); - if (oldMode == LockTupleExclusive) { - /* - * singleLockerXid is aborted and this xid is from t_xid, - * see inplaceheap_lock_tuple_guts - * LockOnly exclusive lock doesn't do actual rollback to data page. - */ - } else { - /* Acquire the strongest of both. */ - Assert(singleLockerXid == addToXid || mode > oldMode); - if (mode < oldMode) - mode = oldMode; - } - } - } - /* - * For LockOnly mode and LockForUpdate mode with multilocker flag on the - * tuple, we keep the old transaction slot as it is. Since we're not - * changing the xid slot in the tuple, we shouldn't remove the existing - * (if any) invalid xact flag from the tuple. - */ - if (!isUpdate || ((lockoper == LockForUpdate) && UHeapTupleHasMultiLockers(newInfomask))) { - if (UHeapTupleHasInvalidXact(oldInfomask)) - newInfomask |= UHEAP_INVALID_XACT_SLOT; - } - - if (isUpdate && !UHeapTupleHasMultiLockers(newInfomask)) { - if (lockoper == LockForUpdate) { - /* - * When we are locking for the purpose of updating the tuple, we - * don't need to preserve previous updater's information. - */ - newInfomask |= UHEAP_XID_LOCK_ONLY; - if (mode == LockTupleExclusive) - newInfomask |= UHEAP_XID_EXCL_LOCK; - else - newInfomask |= UHEAP_XID_NOKEY_EXCL_LOCK; - } else if (mode == LockTupleExclusive) - newInfomask |= UHEAP_XID_EXCL_LOCK; - } else { - if (lockoper != ForUpdate && !oldTupleHasUpdate) - newInfomask |= UHEAP_XID_LOCK_ONLY; - switch (mode) { - case LockTupleKeyShare: - newInfomask |= UHEAP_XID_KEYSHR_LOCK; - break; - case LockTupleShared: - newInfomask |= UHEAP_XID_SHR_LOCK; - break; - case LockTupleNoKeyExclusive: - newInfomask |= UHEAP_XID_NOKEY_EXCL_LOCK; - break; - case LockTupleExclusive: - newInfomask |= UHEAP_XID_EXCL_LOCK; - break; - default: - elog(ERROR, "invalid lock mode"); - } - } - - *resultInfomask = newInfomask; - - if (resultTdSlot) { - *resultTdSlot = newTdSlot; - } - - /* - * We store the reserved transaction slot only when we update the tuple. - * For lock only, we keep the old transaction slot in the tuple. - */ - Assert(isUpdate || newTdSlot == tupTdSlot); -} - static bool TestPriorXmaxGuts(UHeapTupleTransInfo *tdinfo, const UHeapTuple tuple, UHeapDiskTupleData *tupHdr, Buffer buffer, TransactionId priorXmax) { @@ -1078,7 +790,7 @@ static bool TestPriorXmaxGuts(UHeapTupleTransInfo *tdinfo, const UHeapTuple tupl ItemPointer tid = &(tuple->ctid); BlockNumber blkno = ItemPointerGetBlockNumber(tid); OffsetNumber offnum = ItemPointerGetOffsetNumber(tid); - int rc PG_USED_FOR_ASSERTS_ONLY; + UndoTraversalState rc = UNDO_TRAVERSAL_DEFAULT; do { int prev_trans_slot_id = tdinfo->td_slot; @@ -1087,9 +799,9 @@ static bool TestPriorXmaxGuts(UHeapTupleTransInfo *tdinfo, const UHeapTuple tupl urec->Reset(tdinfo->urec_add); rc = FetchUndoRecord(urec, InplaceSatisfyUndoRecord, blkno, offnum, tdinfo->xid); - - // the tuple cannot be all-visible, at least the current snapshot cannot see this tuple - Assert(rc != UNDO_RET_FAIL); + if (rc == UNDO_TRAVERSAL_ABORT) { + ereport(ERROR, (errmsg("snapshot too old! the undo record has been force discard."))); + } tdinfo->td_slot = UpdateTupleHeaderFromUndoRecord(urec, tupHdr, BufferGetPage(buffer)); @@ -1108,8 +820,11 @@ static bool TestPriorXmaxGuts(UHeapTupleTransInfo *tdinfo, const UHeapTuple tupl // td is reused, then check undo for trans info if (UHeapTupleHasInvalidXact(tupHdr->flag)) { - FetchTransInfoFromUndo(blkno, offnum, tdinfo->xid, tdinfo, NULL); - } + UndoTraversalState state = FetchTransInfoFromUndo(blkno, offnum, tdinfo->xid, tdinfo, NULL, false); + if (state == UNDO_TRAVERSAL_ABORT) { + ereport(ERROR, (errmsg("snapshot too old! the undo record has been force discard."))); + } + } } while (tdinfo->urec_add != 0); DELETE_EX(urec); @@ -1145,7 +860,10 @@ static bool TestPriorXmax(Relation relation, Buffer buffer, Snapshot snapshot, U return false; } - UHeapTupleGetTransInfo(buffer, offnum, &tdinfo); + UndoTraversalState state = UHeapTupleGetTransInfo(buffer, offnum, &tdinfo); + if (state == UNDO_TRAVERSAL_ABORT) { + ereport(ERROR, (errmsg("snapshot too old! the undo record has been force discard."))); + } if (TransactionIdEquals(priorXmax, tdinfo.xid)) { valid = true; @@ -1458,7 +1176,7 @@ out: */ static bool UHeapWait(Relation relation, Buffer buffer, UHeapTuple utuple, LockTupleMode mode, bool nowait, TransactionId updateXid, TransactionId lockerXid, SubTransactionId updateSubXid, SubTransactionId lockerSubXid, - bool *hasTupLock, bool *multixidIsMySelf) + bool *hasTupLock, bool *multixidIsMySelf, int waitSec) { Assert(utuple->tupTableType == UHEAP_TUPLE); uint16 flag = utuple->disk_tuple->flag; @@ -1491,7 +1209,7 @@ static bool UHeapWait(Relation relation, Buffer buffer, UHeapTuple utuple, LockT errmsg("could not obtain lock on row in relation \"%s\"", RelationGetRelationName(relation)))); } } else { - LockTuple(relation, &(utuple->ctid), tupleLockType, true); + LockTuple(relation, &(utuple->ctid), tupleLockType, true, waitSec); } *hasTupLock = true; @@ -1504,7 +1222,7 @@ static bool UHeapWait(Relation relation, Buffer buffer, UHeapTuple utuple, LockT errmsg("could not obtain lock on row in relation \"%s\"", RelationGetRelationName(relation)))); } } else { - MultiXactIdWait(xwait, GetMXactStatusForLock(mode, false), NULL); + MultiXactIdWait(xwait, GetMXactStatusForLock(mode, false), NULL, waitSec); } // reacquire lock @@ -1565,7 +1283,7 @@ static bool UHeapWait(Relation relation, Buffer buffer, UHeapTuple utuple, LockT "could not obtain lock on row in relation \"%s\"", RelationGetRelationName(relation)))); } } else { - LockTuple(relation, &(utuple->ctid), tupleLockType, true); + LockTuple(relation, &(utuple->ctid), tupleLockType, true, waitSec); } *hasTupLock = true; @@ -1594,10 +1312,10 @@ static bool UHeapWait(Relation relation, Buffer buffer, UHeapTuple utuple, LockT } } else { if (InvalidSubTransactionId != subXid) { - SubXactLockTableWait(topXid, subXid); + SubXactLockTableWait(topXid, subXid, waitSec); isSubXact = true; } else { - XactLockTableWait(topXid, true); + XactLockTableWait(topXid, true, waitSec); } } @@ -1622,17 +1340,14 @@ static bool UHeapWait(Relation relation, Buffer buffer, UHeapTuple utuple, LockT * Callers: UHeapUpdate, UHeapLockTuple * This function will do locking, UNDO and WAL logging part. */ -static void UHeapExecuteLockTuple(Relation relation, Buffer buffer, UHeapTuple utuple, LockTupleMode mode, - bool clearMultiXact) +static void UHeapExecuteLockTuple(Relation relation, Buffer buffer, UHeapTuple utuple, LockTupleMode mode) { Assert(utuple->tupTableType == UHEAP_TUPLE); TransactionId xid = InvalidTransactionId; TransactionId xidOnTup = InvalidTransactionId; TransactionId curxid = InvalidTransactionId; - - if (mode == LockTupleKeyShare || mode == LockTupleNoKeyExclusive) { - ereport(ERROR, (errmsg("For Key Share and For No Key Update is not support for ustore."))); - } + TransactionId xidbase = InvalidTransactionId; + bool multi = false; if (mode == LockTupleExclusive) { xid = GetCurrentTransactionId(); @@ -1642,23 +1357,16 @@ static void UHeapExecuteLockTuple(Relation relation, Buffer buffer, UHeapTuple u if (IsSubTransaction()) { curxid = GetCurrentTransactionId(); } - } START_CRIT_SECTION(); - if (clearMultiXact) - utuple->disk_tuple->flag &= ~UHEAP_MULTI_LOCKERS; - - uint16 infomask = 0, oldinfomask = utuple->disk_tuple->flag; + uint16 oldinfomask = utuple->disk_tuple->flag; // return new tuple header to caller utuple->disk_tuple->flag &= ~UHEAP_LOCK_STATUS_MASK; - utuple->disk_tuple->flag |= infomask; UHeapTupleHeaderClearSingleLocker(utuple->disk_tuple); - - TransactionId xidbase = InvalidTransactionId; - bool multi = false; + utuple->disk_tuple->flag |= SINGLE_LOCKER_XID_IS_LOCK; if (mode == LockTupleExclusive) { if (IsSubTransaction()) { @@ -1751,9 +1459,10 @@ static bool IsTupleLockedByUs(UHeapTuple utuple, TransactionId xid, LockTupleMod TM_Result UHeapLockTuple(Relation relation, UHeapTuple tuple, Buffer* buffer, - CommandId cid, LockTupleMode mode, bool nowait, TM_FailureData *tmfd, - bool follow_updates, bool eval, Snapshot snapshot, - bool isSelectForUpdate, bool allowLockSelf, bool isUpsert, TransactionId conflictXid) + CommandId cid, LockTupleMode mode, bool nowait, TM_FailureData *tmfd, + bool follow_updates, bool eval, Snapshot snapshot, + bool isSelectForUpdate, bool allowLockSelf, bool isUpsert, TransactionId conflictXid, + int waitSec) { RowPtr *rp = NULL; UHeapTupleData utuple; @@ -1762,7 +1471,6 @@ TM_Result UHeapLockTuple(Relation relation, UHeapTuple tuple, Buffer* buffer, OffsetNumber offnum; ItemPointerData ctid; ItemPointer tid = &tuple->ctid; - bool clearMultiXact = false; // shall we clear the multi-xact bit? this is used for shared lockers TM_Result result; TransactionId updateXid = InvalidTransactionId, lockerXid = InvalidTransactionId; @@ -1772,11 +1480,16 @@ TM_Result UHeapLockTuple(Relation relation, UHeapTuple tuple, Buffer* buffer, bool hasTupLock = false; UHeapTupleTransInfo tdinfo; bool multixidIsMyself = false; + int retryTimes = 0; if (mode == LockTupleShared && !isSelectForUpdate) { ereport(ERROR, (errmsg("UStore only supports share lock from select-for-share statement."))); } + if (mode == LockTupleKeyShare || mode == LockTupleNoKeyExclusive) { + ereport(ERROR, (errmsg("For Key Share and For No Key Update is not support for ustore."))); + } + // lock buffer and fetch tuple row pointer blkno = ItemPointerGetBlockNumber(tid); *buffer = ReadBuffer(relation, blkno); @@ -1800,7 +1513,9 @@ check_tup_satisfies_update: multixidIsMyself = false; if (result == TM_Invisible) { - Assert(0); + UnlockReleaseBuffer(*buffer); + ereport(defence_errlevel(), (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("attempted to lock invisible tuple"))); } else if (result == TM_BeingModified || result == TM_Ok) { #ifdef ENABLE_WHITEBOX if (result != TM_Ok) { @@ -1834,7 +1549,8 @@ check_tup_satisfies_update: if (result != TM_Ok) { // wait for remaining updater/locker to terminate if (!UHeapWait(relation, *buffer, &utuple, mode, nowait, updateXid, lockerXid, updateSubXid, lockerSubXid, - &hasTupLock, &multixidIsMyself)) { + &hasTupLock, &multixidIsMyself, waitSec)) { + LimitRetryTimes(retryTimes++); goto check_tup_satisfies_update; } } @@ -1885,9 +1601,7 @@ check_tup_satisfies_update: utuple.disk_tuple_size = RowPtrGetLen(rp); utuple.ctid = *tid; - /* clear multi-xact when our lock mode is exclusive */ - clearMultiXact = (mode == LockTupleExclusive); - (void)UHeapExecuteLockTuple(relation, *buffer, &utuple, mode, clearMultiXact); + (void)UHeapExecuteLockTuple(relation, *buffer, &utuple, mode); // return the locked tuple to caller UHeapCopyTupleWithBuffer(&utuple, tuple); @@ -1958,7 +1672,6 @@ bool TableFetchAndStore(Relation scanRelation, Snapshot snapshot, Tuple tuple, B return false; } - TM_Result UHeapDelete(Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, Snapshot snapshot, bool wait, TupleTableSlot** oldslot, TM_FailureData *tmfd, bool changingPart, bool allowDeleteSelf) { @@ -1966,7 +1679,6 @@ TM_Result UHeapDelete(Relation relation, ItemPointer tid, CommandId cid, Snapsho Buffer buffer; UndoRecPtr prevUrecptr; int transSlotId; - uint16 newInfomask; bool lockReacquired; TransactionId fxid = GetTopTransactionId(); UndoRecPtr urecptr = INVALID_UNDO_REC_PTR; @@ -1981,11 +1693,10 @@ TM_Result UHeapDelete(Relation relation, ItemPointer tid, CommandId cid, Snapsho SubTransactionId subxid = InvalidSubTransactionId; TM_Result result; UHeapTupleTransInfo tdinfo; - uint16 tempInfomask, newInfoMask; - int newTDSlotId; StringInfoData undotup; bool multixidIsMyself = false; TransactionId minXidInTDSlots = InvalidTransactionId; + int retryTimes = 0; Assert(ItemPointerIsValid(tid)); @@ -2023,6 +1734,7 @@ check_tup_satisfies_update: if (!UHeapWait(relation, buffer, &utuple, LockTupleExclusive, false, updateXid, lockerXid, updateSubXid, lockerSubXid, &hasTupLock, &multixidIsMyself)) { + LimitRetryTimes(retryTimes++); goto check_tup_satisfies_update; } @@ -2087,7 +1799,7 @@ check_tup_satisfies_update: } transSlotId = UHeapPageReserveTransactionSlot(relation, buffer, fxid, - &prevUrecptr, &lockReacquired, InvalidBuffer, NULL, &minXidInTDSlots); + &prevUrecptr, &lockReacquired, InvalidBuffer, &minXidInTDSlots); /* * We need to re-fetch the row information since it might @@ -2115,6 +1827,7 @@ check_tup_satisfies_update: * refetch the tuple here. */ Assert(!RowPtrIsDeleted(rp)); + (void)UHeapPagePrepareForXid(relation, buffer, fxid, false, false); utuple.disk_tuple = (UHeapDiskTuple)UPageGetRowData(page, rp); utuple.disk_tuple_size = RowPtrGetLen(rp); @@ -2140,15 +1853,6 @@ check_tup_satisfies_update: tdinfo.xid = InvalidTransactionId; } - utuple.disk_tuple->flag &= ~UHEAP_MULTI_LOCKERS; - tempInfomask = utuple.disk_tuple->flag; - - /* Compute the new flag to store into the tuple. */ - ComputeNewXidInfomask(&utuple, buffer, tdinfo.xid, tdinfo.td_slot, tempInfomask, fxid, transSlotId, lockerXid, - LockTupleExclusive, ForUpdate, &newInfoMask, &newTDSlotId); - - Assert(newTDSlotId == transSlotId); - if (TransactionIdOlderThanAllUndo(tdinfo.xid)) { tdinfo.xid = FrozenTransactionId; } @@ -2175,7 +1879,7 @@ check_tup_satisfies_update: /* Prepare Undo */ UndoPersistence persistence = UndoPersistenceForRelation(relation); - Oid relOid = RelationIsPartition(relation) ? relation->parentId : RelationGetRelid(relation); + Oid relOid = RelationIsPartition(relation) ? GetBaseRelOidOfParition(relation) : RelationGetRelid(relation); Oid partitionOid = RelationIsPartition(relation) ? RelationGetRelid(relation) : InvalidOid; urecptr = UHeapPrepareUndoDelete(relOid, partitionOid, RelationGetRelFileNode(relation), @@ -2198,16 +1902,12 @@ check_tup_satisfies_update: */ UPageSetPrunable(page, fxid); - UHeapRecordPotentialFreeSpace(relation, buffer, SHORTALIGN(utuple.disk_tuple_size)); - - /* Fixme: Temporary hack, this value should come from ComputeNewXidInfomask() */ - newInfomask = UHEAP_XID_EXCL_LOCK; + UHeapRecordPotentialFreeSpace(buffer, SHORTALIGN(utuple.disk_tuple_size)); UHeapTupleHeaderSetTDSlot(utuple.disk_tuple, transSlotId); utuple.disk_tuple->flag &= ~UHEAP_VIS_STATUS_MASK; - utuple.disk_tuple->flag |= UHEAP_DELETED | newInfomask; - - utuple.disk_tuple->flag &= ~SINGLE_LOCKER_XID_IS_LOCK; + utuple.disk_tuple->flag |= UHEAP_DELETED | UHEAP_XID_EXCL_LOCK; + UHeapTupleSetModifiedXid(relation, buffer, &utuple, fxid); /* Signal that this is actually a move into another partition */ if (changingPart) @@ -2269,11 +1969,66 @@ check_tup_satisfies_update: END_CRIT_SECTION(); pfree(undotup.data); - UHeapFinalizeDML(relation, buffer, NULL, &utuple, NULL, &(utuple.ctid), hasTupLock, false, false); + UHeapFinalizeDML(relation, buffer, NULL, &utuple, NULL, &(utuple.ctid), hasTupLock, false); return TM_Ok; } +static void PutInplaceUpdateTuple(UHeapTuple oldTup, UHeapTuple newTup, RowPtr *lp) +{ + /* + * For inplace updates, we copy the entire data portion including null + * bitmap of new tuple. + * + * For the special case where we are doing inplace updates even when + * the new tuple is bigger, we need to adjust the old tuple's location + * so that new tuple can be copied at that location as it is. + */ + RowPtrChangeLen(lp, newTup->disk_tuple_size); + errno_t rc = memcpy_s((char *)oldTup->disk_tuple + SizeOfUHeapDiskTupleData, + newTup->disk_tuple_size - SizeOfUHeapDiskTupleData, + (char *)newTup->disk_tuple + SizeOfUHeapDiskTupleData, + newTup->disk_tuple_size - SizeOfUHeapDiskTupleData); + securec_check(rc, "\0", "\0"); + /* + * Copy everything from new tuple in infomask apart from visibility + * flags. + */ + oldTup->disk_tuple->flag = oldTup->disk_tuple->flag & UHEAP_VIS_STATUS_MASK; + oldTup->disk_tuple->flag |= (newTup->disk_tuple->flag & ~UHEAP_VIS_STATUS_MASK); + /* Copy number of attributes in tuple. */ + UHeapTupleHeaderSetNatts(oldTup->disk_tuple, UHeapTupleHeaderGetNatts(newTup->disk_tuple)); + /* also update the tuple length and self pointer */ + oldTup->disk_tuple_size = newTup->disk_tuple_size; + oldTup->disk_tuple->t_hoff = newTup->disk_tuple->t_hoff; + return; +} + +void PutBlockInplaceUpdateTuple(Page page, Item item, RowPtr *lp, Size size) +{ + UHeapPageHeaderData *uphdr = (UHeapPageHeaderData *)page; + Size alignedSize = SHORTALIGN(size); + int upper = (int)uphdr->pd_upper - (int)alignedSize; + Assert((int)uphdr->pd_upper >= (int)alignedSize); + Assert(upper >= uphdr->pd_lower); + + if (upper < uphdr->pd_lower || (int)uphdr->pd_upper < (int)alignedSize) { + elog(PANIC, "upper=%d, lower=%d, size=%d, tuplesize=%d, newupper=%d.", + (int)uphdr->pd_upper, (int)uphdr->pd_lower, (int)size, (int)alignedSize, upper); + } + + /* set the item pointer */ + SetNormalRowPointer(lp, upper, size); + + /* copy the item's data onto the page */ + errno_t rc = memcpy_s((char *) page + upper, size, item, size); + securec_check(rc, "\0", "\0"); + + /* adjust page header */ + uphdr->pd_upper = (uint16)upper; + return; +} + /* * UHeapUpdate - update a tuple * @@ -2288,12 +2043,9 @@ TM_Result UHeapUpdate(Relation relation, Relation parentRelation, ItemPointer ot bool *indexkey_update_flag, Bitmapset **modifiedIdxAttrs, bool allow_inplace_update) { TM_Result result = TM_Ok; - TransactionId fxid; - TransactionId xid = GetTopTransactionId(); - fxid = xid; + TransactionId fxid = GetTopTransactionId(); TransactionId saveTupXid; TransactionId oldestXidHavingUndo; - TransactionId singleLockerXid = InvalidTransactionId; Bitmapset *inplaceUpdAttrs = NULL; Bitmapset *keyAttrs = NULL; Bitmapset *interestingAttrs = NULL; @@ -2316,11 +2068,11 @@ TM_Result UHeapUpdate(Relation relation, Relation parentRelation, ItemPointer ot Size pagefree = 0; int oldtupNewTransSlot = InvalidTDSlotId; int newtupTransSlot = InvalidTDSlotId; - int resultTransSlotId = InvalidTDSlotId; OffsetNumber oldOffnum = 0; bool haveTupleLock = false; bool isIndexUpdated = false; bool useInplaceUpdate = false; + bool useBlockInplaceUpdate = false; bool checkedLockers = false; bool lockerRemains = false; bool anyMultiLockerMemberAlive = false; @@ -2328,16 +2080,13 @@ TM_Result UHeapUpdate(Relation relation, Relation parentRelation, ItemPointer ot bool oldbufLockReacquired = false; bool needToast = false; bool hasSubXactLock = false; - bool slotReused = false; bool inplaceUpdated = false; bool doReacquire = false; UHeapTupleTransInfo txactinfo; - uint16 oldInfomask = 0; - uint16 newInfomask = 0; - uint16 tempInfomask = 0; uint16 infomaskOldTuple = 0; uint16 infomaskNewTuple = 0; TransactionId lockerXid = InvalidTransactionId; + ShortTransactionId tupleXid = 0; SubTransactionId lockerSubXid = InvalidSubTransactionId; SubTransactionId updateSubXid = InvalidSubTransactionId; SubTransactionId subxid = InvalidSubTransactionId; @@ -2350,6 +2099,7 @@ TM_Result UHeapUpdate(Relation relation, Relation parentRelation, ItemPointer ot LockTupleMode lockmode; TransactionId minXidInTDSlots = InvalidTransactionId; bool oldBufLockReleased = false; + int retryTimes = 0; Assert(newtup->tupTableType == UHEAP_TUPLE); Assert(ItemPointerIsValid(otid)); @@ -2427,8 +2177,8 @@ check_tup_satisfies_update: if (result == TM_Invisible) { UnlockReleaseBuffer(buffer); - ereport(PANIC, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("attempted to update invisible inplace heap tuple"))); + ereport(defence_errlevel(), (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("attempted to update invisible tuple"))); } else if ((result == TM_BeingModified) && wait) { #ifdef ENABLE_WHITEBOX ereport(WARNING, (errmsg("UHeapUpdate returned %d", result))); @@ -2504,8 +2254,8 @@ check_tup_satisfies_update: bms_free(inplaceUpdAttrs); bms_free(keyAttrs); - *indexkey_update_flag = - !UHeapTupleIsInPlaceUpdated(((UHeapTuple)newtup)->disk_tuple->flag) || *modifiedIdxAttrs != NULL; + *indexkey_update_flag = !UHeapTupleIsInPlaceUpdated(((UHeapTuple)newtup)->disk_tuple->flag) || + (modifiedIdxAttrs != NULL && *modifiedIdxAttrs != NULL); #ifdef ENABLE_WHITEBOX ereport(WARNING, (errmsg("UHeapUpdate returned %d", result))); @@ -2574,24 +2324,8 @@ check_tup_satisfies_update: } } else if (newtupsize <= oldtupsize) { useInplaceUpdate = true; - if (allow_inplace_update == false) { - useInplaceUpdate = false; - } - } else { - /* Pass delta space required to accommodate the new tuple. */ - useInplaceUpdate = UHeapPagePruneOpt(relation, buffer, oldOffnum, newtupsize - oldtupsize); - - if (allow_inplace_update == false) { - useInplaceUpdate = false; - } - -#ifdef DEBUG_UHEAP - if (!useInplaceUpdate) - UHEAPSTAT_COUNT_NONINPLACE_UPDATE_CAUSE(PAGE_PRUNE_FAILED); -#endif - /* The page might have been modified, so refresh disk_tuple */ - oldtup.disk_tuple = (UHeapDiskTuple)UPageGetRowData(page, lp); } + /* * Acquire subtransaction lock, if current transaction is a * subtransaction. @@ -2609,7 +2343,7 @@ check_tup_satisfies_update: * that by releasing the buffer lock. */ oldtupNewTransSlot = UHeapPageReserveTransactionSlot(relation, buffer, fxid, &prevUrecptr, - &lockReacquired, InvalidBuffer, &slotReused, &minXidInTDSlots); + &lockReacquired, InvalidBuffer, &minXidInTDSlots); /* * We need to re-fetch the row information since it might @@ -2620,6 +2354,7 @@ check_tup_satisfies_update: pagefree = PageGetUHeapFreeSpace(page); if (lockReacquired) { + LimitRetryTimes(retryTimes++); goto check_tup_satisfies_update; } @@ -2627,6 +2362,7 @@ check_tup_satisfies_update: LockBuffer(buffer, BUFFER_LOCK_UNLOCK); UHeapSleepOrWaitForTDSlot(minXidInTDSlots, fxid); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); + LimitRetryTimes(retryTimes++); goto check_tup_satisfies_update; } @@ -2638,41 +2374,10 @@ check_tup_satisfies_update: * refetch the tuple here. */ Assert(!RowPtrIsDeleted(lp)); + (void)UHeapPagePrepareForXid(relation, buffer, fxid, false, false); oldtup.disk_tuple = (UHeapDiskTuple)UPageGetRowData(page, lp); oldtup.disk_tuple_size = RowPtrGetLen(lp); - /* - * Using a transaction slot of transaction that is still not all-visible - * will lead to undo access during tuple visibility checks and that sucks - * the performance. To avoid accessing undo, we perform non-inplace - * updates so as to distribute the tuple across pages so that we don't - * face scarcity of transaction slots on the page. However, we must have - * a hard limit for this optimization, else the number of blocks will - * increase without any bound. - */ - if (slotReused) { - BlockNumber nblocks = RelationGetNumberOfBlocks(relation); - -#ifdef DEBUG_UHEAP - UHEAPSTAT_COUNT_NONINPLACE_UPDATE_CAUSE(SLOT_REUSED); -#endif - - if (nblocks <= NUM_BLOCKS_FOR_NON_INPLACE_UPDATES) { - useInplaceUpdate = false; -#ifdef DEBUG_UHEAP - UHEAPSTAT_COUNT_NONINPLACE_UPDATE_CAUSE(nblocks_LESS_THAN_NBLOCKS); -#endif - } else - slotReused = false; - } - -#ifdef DEBUG_UHEAP - if (!useInplaceUpdate) - UHEAPSTAT_COUNT_UPDATE(NON_INPLACE_UPDATE); - else - UHEAPSTAT_COUNT_UPDATE(INPLACE_UPDATE); -#endif - /* * If the slot is marked as frozen, the latest modifier of the tuple must * be frozen. @@ -2694,7 +2399,7 @@ check_tup_satisfies_update: * undo tuple belongs to previous epoch and hence all-visible. See * comments atop of file inplaceheapam_visibility.c. */ - oldestXidHavingUndo = pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo); + oldestXidHavingUndo = pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo); if (TransactionIdPrecedes(txactinfo.xid, oldestXidHavingUndo)) { txactinfo.xid = FrozenTransactionId; oldUpdaterXid = FrozenTransactionId; @@ -2702,14 +2407,29 @@ check_tup_satisfies_update: Assert(!UHeapTupleIsUpdated(oldtup.disk_tuple->flag)); + if (!allow_inplace_update) { + useInplaceUpdate = false; + useBlockInplaceUpdate = false; + } else if (!useInplaceUpdate) { + /* Pass delta space required to accommodate the new tuple. */ + useInplaceUpdate = UHeapPagePruneOpt(relation, buffer, oldOffnum, + newtupsize - oldtupsize); + /* The page might have been modified, so refresh disk_tuple */ + oldtup.disk_tuple = (UHeapDiskTuple)UPageGetRowData(page, lp); + if (!useInplaceUpdate && newtupsize <= pagefree) { + /* Reserved for link update. */ + useInplaceUpdate = false; + useBlockInplaceUpdate = false; + } + } + /* * updated tuple doesn't fit on current page or the toaster needs to be * activated or transaction slot has been reused. To prevent concurrent * sessions from updating the tuple, we have to temporarily mark it * locked, while we release the page lock. */ - Assert(!slotReused || !useInplaceUpdate); - if (slotReused || (!useInplaceUpdate && newtupsize > pagefree) || needToast) { + if (!useInplaceUpdate) { BlockNumber oldblk, newblk; TD oldTD; @@ -2717,7 +2437,7 @@ check_tup_satisfies_update: oldTD.undo_record_ptr = txactinfo.urec_add; if (!alreadyLocked) { - (void)UHeapExecuteLockTuple(relation, buffer, &oldtup, LockTupleExclusive, true); + (void)UHeapExecuteLockTuple(relation, buffer, &oldtup, LockTupleExclusive); } LockBuffer(buffer, BUFFER_LOCK_UNLOCK); @@ -2742,8 +2462,7 @@ check_tup_satisfies_update: * perform non-inplace update in a separate page so as to reduce * contention on transaction slots. */ - if (slotReused || newtupsize > pagefree) { - Assert(!useInplaceUpdate); + if (!needToast) { newbuf = RelationGetBufferForUTuple(relation, uheaptup->disk_tuple_size, buffer, 0, NULL); } else { /* Re-acquire the lock on the old tuple's page. */ @@ -2776,7 +2495,7 @@ check_tup_satisfies_update: lower = ((UHeapPageHeaderData *)npage)->pd_lower; newtupTransSlot = UHeapPageReserveTransactionSlot(relation, newbuf, fxid, &newPrevUrecptr, - &lockReacquired, InvalidBuffer, NULL, &minXidInTDSlots); + &lockReacquired, InvalidBuffer, &minXidInTDSlots); /* * It is possible that available space on the page changed @@ -2926,7 +2645,7 @@ check_tup_satisfies_update: oldTD.xactid = oldUpdaterXid; oldTD.undo_record_ptr = txactinfo.urec_add; UndoPersistence persistence = UndoPersistenceForRelation(relation); - Oid relOid = RelationIsPartition(relation) ? relation->parentId : RelationGetRelid(relation); + Oid relOid = RelationIsPartition(relation) ? GetBaseRelOidOfParition(relation) : RelationGetRelid(relation); Oid partitionOid = RelationIsPartition(relation) ? RelationGetRelid(relation) : InvalidOid; /* calculate xor delta for inplaceupdate, to allocate correct undo size */ @@ -3001,28 +2720,13 @@ check_tup_satisfies_update: appendBinaryStringInfo(undorec->Rawdata(), (char *)&xorDeltaFlags, sizeof(uint8)); } - oldtup.disk_tuple->flag &= ~UHEAP_MULTI_LOCKERS; - tempInfomask = oldtup.disk_tuple->flag; - - - /* Compute the new xid and infomask to store into the tuple. */ - ComputeNewXidInfomask(&oldtup, buffer, saveTupXid, txactinfo.td_slot, tempInfomask, xid, oldtupNewTransSlot, - singleLockerXid, lockmode, ForUpdate, &oldInfomask, &resultTransSlotId); - - /* - * There must not be any stronger locker than the current operation, - * otherwise it would have waited for it to finish. - */ - Assert(resultTransSlotId == oldtupNewTransSlot); - - newInfomask = 0; - if (useInplaceUpdate) { - infomaskOldTuple = infomaskNewTuple = oldInfomask | newInfomask | UHEAP_INPLACE_UPDATED; + infomaskOldTuple = infomaskNewTuple = UHEAP_XID_EXCL_LOCK | UHEAP_INPLACE_UPDATED; } else { - infomaskOldTuple = oldInfomask | UHEAP_UPDATED; - infomaskNewTuple = newInfomask; + infomaskOldTuple = UHEAP_XID_EXCL_LOCK | UHEAP_UPDATED; + infomaskNewTuple = 0; } + /* No ereport(ERROR) from here till changes are logged */ START_CRIT_SECTION(); /* @@ -3031,26 +2735,32 @@ check_tup_satisfies_update: * become DEAD sooner or later. If the transaction finally aborts, the * subsequent page pruning will be a no-op and the hint will be cleared. */ - if (!useInplaceUpdate || (uheaptup->disk_tuple_size < oldtup.disk_tuple_size)) { - UPageSetPrunable(page, xid); + if (!useInplaceUpdate || (uheaptup->disk_tuple_size < oldtup.disk_tuple_size) || useBlockInplaceUpdate) { + UPageSetPrunable(page, fxid); } /* oldtup should be pointing to right place in page */ Assert(oldtup.disk_tuple == (UHeapDiskTuple)UPageGetRowData(page, lp)); - UHeapTupleHeaderSetTDSlot(oldtup.disk_tuple, resultTransSlotId); + UHeapTupleHeaderSetTDSlot(oldtup.disk_tuple, oldtupNewTransSlot); oldtup.disk_tuple->flag &= ~UHEAP_VIS_STATUS_MASK; oldtup.disk_tuple->flag |= infomaskOldTuple; - - infomaskNewTuple &= ~UHEAP_XID_LOCK_ONLY; - infomaskNewTuple &= ~SINGLE_LOCKER_XID_IS_LOCK; + tupleXid = UHeapTupleSetModifiedXid(relation, buffer, &oldtup, fxid); /* keep the new tuple copy updated for the caller */ UHeapTupleHeaderSetTDSlot(uheaptup->disk_tuple, newtupTransSlot); uheaptup->disk_tuple->flag &= ~UHEAP_VIS_STATUS_MASK; uheaptup->disk_tuple->flag |= infomaskNewTuple; uheaptup->xc_node_id = u_sess->pgxc_cxt.PGXCNodeIdentifier; + if (buffer == newbuf) { + UHeapTupleSetRawXid(uheaptup, tupleXid); + } else { + (void)UHeapPagePrepareForXid(relation, newbuf, fxid, false, false); + UHeapTupleSetModifiedXid(relation, newbuf, uheaptup, fxid); + } + if (useInplaceUpdate) { + Assert(buffer == newbuf); if (prefixlen > 0) { appendBinaryStringInfo(undorec->Rawdata(), (char *)&prefixlen, sizeof(uint16)); } @@ -3076,31 +2786,13 @@ check_tup_satisfies_update: appendBinaryStringInfo(undorec->Rawdata(), (char *)&subxid, sizeof(SubTransactionId)); } - /* - * For inplace updates, we copy the entire data portion including null - * bitmap of new tuple. - * - * For the special case where we are doing inplace updates even when - * the new tuple is bigger, we need to adjust the old tuple's location - * so that new tuple can be copied at that location as it is. - */ - RowPtrChangeLen(lp, uheaptup->disk_tuple_size); - rc = memcpy_s((char *)oldtup.disk_tuple + SizeOfUHeapDiskTupleData, - uheaptup->disk_tuple_size - SizeOfUHeapDiskTupleData, - (char *)uheaptup->disk_tuple + SizeOfUHeapDiskTupleData, - uheaptup->disk_tuple_size - SizeOfUHeapDiskTupleData); - securec_check(rc, "\0", "\0"); - /* - * Copy everything from new tuple in infomask apart from visibility - * flags. - */ - oldtup.disk_tuple->flag = oldtup.disk_tuple->flag & UHEAP_VIS_STATUS_MASK; - oldtup.disk_tuple->flag |= (uheaptup->disk_tuple->flag & ~UHEAP_VIS_STATUS_MASK); - /* Copy number of attributes in tuple. */ - UHeapTupleHeaderSetNatts(oldtup.disk_tuple, UHeapTupleHeaderGetNatts(newtup->disk_tuple)); - /* also update the tuple length and self pointer */ - oldtup.disk_tuple_size = uheaptup->disk_tuple_size; - oldtup.disk_tuple->t_hoff = uheaptup->disk_tuple->t_hoff; + if (!useBlockInplaceUpdate) { + PutInplaceUpdateTuple(&oldtup, uheaptup, lp); + } else { + PutBlockInplaceUpdateTuple(page, (Item)uheaptup->disk_tuple, lp, uheaptup->disk_tuple_size); + /* update the potential freespace */ + UHeapRecordPotentialFreeSpace(buffer, SHORTALIGN(oldtupsize) - SHORTALIGN(newtupsize)); + } ItemPointerCopy(&oldtup.ctid, &uheaptup->ctid); } else { #ifdef USE_ASSERT_CHECKING @@ -3128,8 +2820,8 @@ check_tup_satisfies_update: } /* update the potential freespace */ - UHeapRecordPotentialFreeSpace(relation, buffer, SHORTALIGN(oldtupsize)); - UHeapRecordPotentialFreeSpace(relation, newbuf, -1 * SHORTALIGN(newtupsize)); + UHeapRecordPotentialFreeSpace(buffer, SHORTALIGN(oldtupsize)); + UHeapRecordPotentialFreeSpace(newbuf, -1 * SHORTALIGN(newtupsize)); } InsertPreparedUndo(u_sess->ustore_cxt.urecvec); @@ -3230,7 +2922,7 @@ check_tup_satisfies_update: Assert(oldupWalInfo.hZone != NULL); LogUHeapUpdate(&oldupWalInfo, &newupWalInfo, useInplaceUpdate, undoXorDeltaSize, xlogXorDelta, prefixlen, - suffixlen, relation); + suffixlen, relation, useBlockInplaceUpdate); } undo::FinishUndoMeta(&xlum, persistence); @@ -3242,7 +2934,7 @@ check_tup_satisfies_update: /* be tidy */ pfree(undotup.data); UHeapFinalizeDML(relation, buffer, &newbuf, newtup, uheaptup, &(oldtup.ctid), - haveTupleLock, useInplaceUpdate, slotReused); + haveTupleLock, useInplaceUpdate); bms_free(inplaceUpdAttrs); bms_free(interestingAttrs); @@ -3329,43 +3021,35 @@ void UHeapMultiInsert(Relation relation, UHeapTuple *tuples, int ntuples, Comman undo::XlogUndoMeta xlum; Buffer buffer = InvalidBuffer; int nthispage = 0; + int retryTimes = 0; int tdSlot = InvalidTDSlotId; UndoRecPtr urecPtr = INVALID_UNDO_REC_PTR, prevUrecptr = INVALID_UNDO_REC_PTR, first_urecptr = INVALID_UNDO_REC_PTR; OffsetNumber maxRequiredOffset; bool lockReacquired = false; UHeapFreeOffsetRanges *ufreeOffsetRanges = NULL; - bool switchBuf = false; - bool aggressiveTDSearch = false; - BlockNumber firstBlock = InvalidBlockNumber; + bool setTupleXid = false; + ShortTransactionId tupleXid = 0; CHECK_FOR_INTERRUPTS(); /* IO collector and IO scheduler */ +#ifdef ENABLE_MULTIPLE_NODES if (ENABLE_WORKLOAD_CONTROL) IOSchedulerAndUpdate(IO_TYPE_WRITE, 1, IO_TYPE_ROW); +#endif WHITEBOX_TEST_STUB(UHEAP_MULTI_INSERT_FAILED, WhiteboxDefaultErrorEmit); UHeapResetWaitTimeForTDSlot(); reacquire_buffer: - buffer = RelationGetBufferForUTuple(relation, uheaptuples[ndone]->disk_tuple_size, InvalidBuffer, options, - bistate, switchBuf); + buffer = RelationGetBufferForUTuple(relation, uheaptuples[ndone]->disk_tuple_size, InvalidBuffer, + options, bistate); + (void)UHeapPagePrepareForXid(relation, buffer, fxid, false, false); page = BufferGetPage(buffer); phdr = (UHeapPageHeaderData *)page; - /* - * Do aggressive TD slot search after switching buffer - * and somehow ended up at the first block. - */ - BlockNumber currBlock = BufferGetBlockNumber(buffer); - if (firstBlock == InvalidBlockNumber) { - firstBlock = currBlock; - } else if (switchBuf && firstBlock == currBlock) { - aggressiveTDSearch = true; - } - /* * Get the unused offset ranges in the page. This is required for * deciding the number of undo records to be prepared later. @@ -3383,28 +3067,31 @@ reacquire_buffer: if (!skipUndo) { lower = phdr->pd_lower; tdSlot = UHeapPageReserveTransactionSlot(relation, buffer, fxid, &prevUrecptr, &lockReacquired, - InvalidBuffer, NULL, &minXidInTDSlots, aggressiveTDSearch); + InvalidBuffer, &minXidInTDSlots); /* * It is possible that available space on the page changed * as part of TD reservation operation. If so, go back and reacquire the buffer. */ if (lockReacquired || lower < phdr->pd_lower) { UnlockReleaseBuffer(buffer); - switchBuf = false; + LimitRetryTimes(retryTimes++); + if (retryTimes > FORCE_EXTEND_THRESHOLD) { + options |= UHEAP_INSERT_EXTEND; + } goto reacquire_buffer; } if (tdSlot == InvalidTDSlotId) { UnlockReleaseBuffer(buffer); UHeapSleepOrWaitForTDSlot(minXidInTDSlots, fxid, true); - // cant switch buffer anymore to avoid bouncing between blocks - switchBuf = !aggressiveTDSearch; + LimitRetryTimes(retryTimes++); + options |= UHEAP_INSERT_EXTEND; goto reacquire_buffer; } Assert(tdSlot != InvalidTDSlotId); - Oid relOid = RelationIsPartition(relation) ? relation->parentId : RelationGetRelid(relation); + Oid relOid = RelationIsPartition(relation) ? GetBaseRelOidOfParition(relation) : RelationGetRelid(relation); Oid partitionOid = RelationIsPartition(relation) ? RelationGetRelid(relation) : InvalidOid; urecPtr = UHeapPrepareUndoMultiInsert(relOid, partitionOid, RelationGetRelFileNode(relation), @@ -3438,12 +3125,18 @@ reacquire_buffer: break; UHeapTupleHeaderSetTDSlot(uheaptup->disk_tuple, tdSlot); UHeapTupleHeaderSetLockerTDSlot(uheaptup->disk_tuple, InvalidTDSlotId); + if (!setTupleXid) { + tupleXid = UHeapTupleSetModifiedXid(relation, buffer, uheaptup, fxid); + setTupleXid = true; + } else { + UHeapTupleSetRawXid(uheaptup, tupleXid); + } #ifdef USE_ASSERT_CHECKING CheckTupleValidity(relation, uheaptup); #endif RelationPutUTuple(relation, buffer, uheaptup); - UHeapRecordPotentialFreeSpace(relation, buffer, -1 * SHORTALIGN(uheaptup->disk_tuple_size)); + UHeapRecordPotentialFreeSpace(buffer, -1 * SHORTALIGN(uheaptup->disk_tuple_size)); /* * Let's make sure that we've decided the offset ranges @@ -3556,7 +3249,7 @@ reacquire_buffer: } ndone += nthispage; - switchBuf = false; + options &= ~UHEAP_INSERT_EXTEND; } /* @@ -3652,7 +3345,7 @@ int UPageGetTDSlotId(Buffer buf, TransactionId fxid, UndoRecPtr *urecAdd) return InvalidTDSlotId; } -static bool UHeapPageReserveTransactionSlotReuseLoop(int *pslotNo, Page page, UndoRecPtr *urecPtr, bool *slotReused) +static bool UHeapPageReserveTransactionSlotReuseLoop(int *pslotNo, Page page, UndoRecPtr *urecPtr) { int slotNo; int tdCount = UPageGetTDSlotCount(page); @@ -3663,10 +3356,6 @@ static bool UHeapPageReserveTransactionSlotReuseLoop(int *pslotNo, Page page, Un if (!TransactionIdIsValid(thistrans->xactid)) { *urecPtr = thistrans->undo_record_ptr; - - if (slotReused && (*urecPtr != INVALID_UNDO_REC_PTR)) { - *slotReused = true; - } #ifdef DEBUG_UHEAP if (*urecPtr != INVALID_UNDO_REC_PTR) { /* Got a slot after invalidation */ @@ -3701,9 +3390,8 @@ static bool UHeapPageReserveTransactionSlotReuseLoop(int *pslotNo, Page page, Un * aggressiveSearch - we try to reuse td slots from committed and aborted txns. * If none, we extend the td slots beyond the initial threshold */ -int UHeapPageReserveTransactionSlot(Relation relation, Buffer buf, TransactionId fxid, - UndoRecPtr *urecPtr, bool *lockReacquired, Buffer otherBuf, bool *slotReused, - TransactionId *minXid, bool aggressiveSearch) +int UHeapPageReserveTransactionSlot(Relation relation, Buffer buf, TransactionId fxid, UndoRecPtr *urecPtr, + bool *lockReacquired, Buffer otherBuf, TransactionId *minXid, bool aggressiveSearch) { Page page = BufferGetPage(buf); int latestFreeTDSlot = InvalidTDSlotId; @@ -3714,6 +3402,7 @@ int UHeapPageReserveTransactionSlot(Relation relation, Buffer buf, TransactionId TransactionId currMinXid = MaxTransactionId; *lockReacquired = false; + WaitState oldStatus = pgstat_report_waitstatus(STATE_WAIT_RESERVE_TD); /* * For temp relations, we don't have to check all the slots since no other @@ -3736,6 +3425,7 @@ int UHeapPageReserveTransactionSlot(Relation relation, Buffer buf, TransactionId if (TransactionIdEquals(thistrans->xactid, fxid)) { *urecPtr = thistrans->undo_record_ptr; + pgstat_report_waitstatus(oldStatus); return (slotNo + 1); } else if (!TransactionIdIsValid(thistrans->xactid)) latestFreeTDSlot = slotNo; @@ -3750,6 +3440,7 @@ int UHeapPageReserveTransactionSlot(Relation relation, Buffer buf, TransactionId #ifdef DEBUG_UHEAP UHEAPSTAT_COUNT_GET_TRANSSLOT_FROM(TRANSSLOT_RESERVED_BY_CURRENT_XID); #endif + pgstat_report_waitstatus(oldStatus); return (slotNo + 1); } else { currMinXid = Min(currMinXid, thistrans->xactid); @@ -3763,6 +3454,7 @@ int UHeapPageReserveTransactionSlot(Relation relation, Buffer buf, TransactionId if (latestFreeTDSlot >= 0) { *urecPtr = tdPtr->td_info[latestFreeTDSlot].undo_record_ptr; + pgstat_report_waitstatus(oldStatus); return (latestFreeTDSlot + 1); } @@ -3777,16 +3469,19 @@ int UHeapPageReserveTransactionSlot(Relation relation, Buffer buf, TransactionId *minXid = currMinXid; /* no transaction slot available, try to reuse some existing slot */ - if (UHeapPageFreezeTransSlots(relation, buf, lockReacquired, NULL, otherBuf, aggressiveSearch)) { + if (UHeapPageFreezeTransSlots(relation, buf, lockReacquired, NULL, otherBuf)) { /* * If the lock is reacquired inside, then we allow callers to reverify * the condition whether then can still perform the required * operation. */ - if (*lockReacquired) + if (*lockReacquired) { + pgstat_report_waitstatus(oldStatus); return InvalidTDSlotId; + } - if (UHeapPageReserveTransactionSlotReuseLoop(&slotNo, page, urecPtr, slotReused)) { + if (UHeapPageReserveTransactionSlotReuseLoop(&slotNo, page, urecPtr)) { + pgstat_report_waitstatus(oldStatus); return (slotNo + 1); } @@ -3807,6 +3502,7 @@ int UHeapPageReserveTransactionSlot(Relation relation, Buffer buf, TransactionId */ ereport(DEBUG5, (errmsg("Could not extend TD slots beyond threshold Rel: %s, blkno: %d", RelationGetRelationName(relation), BufferGetBlockNumber(buf)))); + pgstat_report_waitstatus(oldStatus); return InvalidTDSlotId; } /* @@ -3821,6 +3517,7 @@ int UHeapPageReserveTransactionSlot(Relation relation, Buffer buf, TransactionId */ ereport(DEBUG5, (errmsg("TD array extended by %d slots for Rel: %s, blkno: %d", nExtended, RelationGetRelationName(relation), BufferGetBlockNumber(buf)))); + pgstat_report_waitstatus(oldStatus); return (tdCount + 1); } ereport(DEBUG5, (errmsg("Could not extend TD array for Rel: %s, blkno: %d", @@ -3830,6 +3527,7 @@ int UHeapPageReserveTransactionSlot(Relation relation, Buffer buf, TransactionId UHEAPSTAT_COUNT_GET_TRANSSLOT_FROM(TRANSSLOT_CANNOT_GET); #endif + pgstat_report_waitstatus(oldStatus); /* no transaction slot available */ return InvalidTDSlotId; } @@ -3896,7 +3594,7 @@ int UHeapPageReserveTransactionSlot(Relation relation, Buffer buf, TransactionId * false otherwise. */ bool UHeapPageFreezeTransSlots(Relation relation, Buffer buf, bool *lockReacquired, TD *transinfo, - Buffer otherBuf, bool aggressiveFreeze) + Buffer otherBuf) { int nFrozenSlots = 0; int *completedXactSlots = NULL; @@ -3909,7 +3607,7 @@ bool UHeapPageFreezeTransSlots(Relation relation, Buffer buf, bool *lockReacquir int numSlots = GetTDCount((UHeapPageHeaderData *)page); UHeapPageTDData *tdPtr = (UHeapPageTDData *)PageGetTDPointer(page); transinfo = tdPtr->td_info; - TransactionId oldestXid = pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo); + TransactionId oldestXid = pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo); /* * Clear the slot information from tuples. The basic idea is to collect @@ -3999,11 +3697,6 @@ bool UHeapPageFreezeTransSlots(Relation relation, Buffer buf, bool *lockReacquir goto cleanup; } - /* Some callers want a fast way to check if there is a reusable td slot or not. */ - if (!aggressiveFreeze) { - goto cleanup; - } - Assert(!RELATION_IS_LOCAL(relation)); completedXactSlots = (int *)palloc0(numSlots * sizeof(int)); abortedXactSlots = (int *)palloc0(numSlots * sizeof(int)); @@ -4098,7 +3791,9 @@ bool UHeapPageFreezeTransSlots(Relation relation, Buffer buf, bool *lockReacquir LockBuffer(otherBuf, BUFFER_LOCK_UNLOCK); for (i = 0; i < nAbortedXactSlots; i++) { + WaitState oldStatus = pgstat_report_waitstatus(STATE_WAIT_TD_ROLLBACK); ExecuteUndoActionsPage(urecptr[i], relation, buf, fxid[i]); + pgstat_report_waitstatus(oldStatus); } LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); @@ -4184,7 +3879,6 @@ void UHeapFreezeOrInvalidateTuples(Buffer buf, int nSlots, const int *slots, boo if (locker_td_id == slots[i]) { tupHdr = (UHeapDiskTuple)UPageGetRowData(page, rowptr); UHeapTupleHeaderSetLockerTDSlot(tupHdr, UHEAPTUP_SLOT_FROZEN); - tupHdr->flag &= ~UHEAP_XID_LOCK_ONLY; } if (tdSlot == slots[i]) { @@ -4268,7 +3962,7 @@ void UHeapReserveDualPageTDSlot(Relation relation, Buffer oldbuf, Buffer newbuf, /* Reserve the transaction slot for new buffer. */ *newbufTransSlotId = UHeapPageReserveTransactionSlot(relation, newbuf, fxid, - newbufPrevUrecptr, lockReacquired, oldbuf, NULL, minXidInTDSlots); + newbufPrevUrecptr, lockReacquired, oldbuf, minXidInTDSlots); /* * Try again if the buffer lock is released and reacquired. Or if we @@ -4285,7 +3979,7 @@ void UHeapReserveDualPageTDSlot(Relation relation, Buffer oldbuf, Buffer newbuf, /* Get the transaction slot for old buffer. */ *oldbufTransSlotId = UHeapPageReserveTransactionSlot(relation, oldbuf, fxid, oldbufPrevUrecptr, - oldbufLockReacquired, newbuf, NULL, minXidInTDSlots); + oldbufLockReacquired, newbuf, minXidInTDSlots); } /* @@ -4316,27 +4010,6 @@ void UHeapPageSetUndo(Buffer buffer, int transSlotId, TransactionId fxid, UndoRe } } - -/* - * Given two versions of the same "flag" for a tuple, compare them and - * return whether the relevant status for a tuple xid has changed. This is - * used after a buffer lock has been released and reacquired: we want to ensure - * that the tuple state continues to be the same it was when we previously - * examined it. - * - * Note the xid field itself must be compared separately. - */ -static inline bool XidInfomaskChanged(uint16 newInfomask, uint16 oldInfomask) -{ - const uint16 interesting = UHEAP_MULTI_LOCKERS | UHEAP_XID_LOCK_ONLY | UHEAP_LOCK_MASK; - - if ((newInfomask & interesting) != (oldInfomask & interesting)) - return true; - - return false; -} - - /* * UHeapDetermineModifiedColumns - Check which columns are being updated. * This is same as HeapDetermineModifiedColumns except that it takes @@ -4364,9 +4037,9 @@ CommandId UHeapTupleGetCid(UHeapTuple utuple, Buffer buffer) UndoRecord *urec = New(CurrentMemoryContext)UndoRecord(); urec->Reset(tdinfo.urec_add); - int rc = FetchUndoRecord(urec, InplaceSatisfyUndoRecord, ItemPointerGetBlockNumber(&utuple->ctid), + UndoTraversalState rc = FetchUndoRecord(urec, InplaceSatisfyUndoRecord, ItemPointerGetBlockNumber(&utuple->ctid), ItemPointerGetOffsetNumber(&utuple->ctid), InvalidTransactionId); - if (rc == UNDO_RET_FAIL) { + if (rc != UNDO_TRAVERSAL_COMPLETE) { return InvalidCommandId; } @@ -4616,65 +4289,6 @@ UndoRecPtr UHeapPrepareUndoDelete(Oid relOid, Oid partitionOid, Oid relfilenode, return urecptr; } -UndoRecPtr UHeapPrepareUndoLock(Oid relOid, Oid partitionOid, Oid relfilenode, Oid tablespace, - UndoPersistence persistence, Buffer buffer, OffsetNumber offnum, TransactionId xid, SubTransactionId subxid, - CommandId cid, UndoRecPtr prevurpInOneBlk, UndoRecPtr prevurpInOneXact, _in_ TD *oldtd, UHeapTuple oldtuple, - BlockNumber blk, XlUndoHeader *xlundohdr, undo::XlogUndoMeta *xlundometa) -{ - Assert(oldtuple->tupTableType == UHEAP_TUPLE); - - UndoRecord *urec = u_sess->ustore_cxt.undo_records[0]; - URecVector *urecvec = u_sess->ustore_cxt.urecvec; - - /* Just to keep compiler quite */ - urec->SetUtype(UNDO_XID_LOCK_ONLY); - urec->SetUinfo(UNDO_UREC_INFO_PAYLOAD); - urec->SetXid(xid); - urec->SetCid(cid); - urec->SetReloid(relOid); - urec->SetPartitionoid(partitionOid); - urec->SetBlkprev(prevurpInOneBlk); - urec->SetRelfilenode(relfilenode); - urec->SetTablespace(tablespace); - - if (t_thrd.xlog_cxt.InRecovery) { - urec->SetBlkno(blk); - } else { - if (BufferIsValid(buffer)) { - urec->SetBlkno(BufferGetBlockNumber(buffer)); - } else { - urec->SetBlkno(InvalidBlockNumber); - } - } - urec->SetOffset(offnum); - urec->SetPrevurp(t_thrd.xlog_cxt.InRecovery ? prevurpInOneXact : GetCurrentTransactionUndoRecPtr(persistence)); - if (oldtd) { - urec->SetOldXactId(oldtd->xactid); - } - urec->SetNeedInsert(true); - - /* Copy over the entire tuple header to the undorecord */ - initStringInfo(urec->Rawdata()); - appendBinaryStringInfo(urec->Rawdata(), (char *)oldtuple->disk_tuple + OffsetTdId, - SizeOfUHeapDiskTupleHeaderExceptXid); - if (subxid != InvalidSubTransactionId) { - urec->SetUinfo(UNDO_UREC_INFO_CONTAINS_SUBXACT); - appendBinaryStringInfo(urec->Rawdata(), (char *)&subxid, sizeof(SubTransactionId)); - } - - bool status = PrepareUndoRecord(urecvec, persistence, xlundohdr, xlundometa); - - /* Do not continue if there was a failure during Undo preparation */ - if (status != UNDO_RET_SUCC) { - ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg("Failed to generate UndoRecord"))); - } - - UndoRecPtr urecptr = urec->Urp(); - Assert(IS_VALID_UNDO_REC_PTR(urecptr)); - - return urecptr; -} - /* * Return the TD slot id assigned to xid on the Page, if any. * Return InvalidTDSlotId if there isn't any. @@ -4703,7 +4317,7 @@ int UHeapPageGetTDSlotId(Buffer buffer, TransactionId xid, UndoRecPtr *urp) static void PopulateXLUndoHeader(XlUndoHeader *xlundohdr, const UHeapWALInfo *walinfo, const Relation rel) { if (rel != NULL) { - xlundohdr->relOid = RelationIsPartition(rel) ? rel->parentId : RelationGetRelid(rel); + xlundohdr->relOid = RelationIsPartition(rel) ? GetBaseRelOidOfParition(rel) : RelationGetRelid(rel); } else { xlundohdr->relOid = walinfo->relOid; } @@ -4721,7 +4335,7 @@ static void PopulateXLUHeapHeader(XlUHeapHeader *xlhdr, const UHeapDiskTuple dis xlhdr->t_hoff = diskTuple->t_hoff; } -static void LogUHeapInsert(UHeapWALInfo *walinfo, Relation rel) +static void LogUHeapInsert(UHeapWALInfo *walinfo, Relation rel, bool isToast) { XlUndoHeader xlundohdr; XlUHeapInsert xlrec; @@ -4765,6 +4379,8 @@ static void LogUHeapInsert(UHeapWALInfo *walinfo, Relation rel) XLogBeginInsert(); XLogRegisterData((char *)&xlrec, SizeOfUHeapInsert); + CommitSeqNo curCSN = InvalidCommitSeqNo; + LogCSN(&curCSN); XLogRegisterData((char *)&xlundohdr, SizeOfXLUndoHeader); if ((walinfo->flag & XLOG_UNDO_HEADER_HAS_BLK_PREV) != 0) { ereport(DEBUG5, (errcode(ERRCODE_DATA_EXCEPTION), errmsg("blkprev=%lu", walinfo->blkprev))); @@ -4813,7 +4429,7 @@ static void LogUHeapInsert(UHeapWALInfo *walinfo, Relation rel) /* filtering by origin on a row level is much more efficient */ XLogIncludeOrigin(); - recptr = XLogInsert(RM_UHEAP_ID, info); + recptr = XLogInsert(RM_UHEAP_ID, info, InvalidBktId, isToast); PageSetLSN(page, recptr); SetUndoPageLSN(u_sess->ustore_cxt.urecvec, recptr); @@ -4850,6 +4466,8 @@ static void LogUHeapDelete(UHeapWALInfo *walinfo) XLogBeginInsert(); XLogRegisterData((char *)&xlrec, SizeOfUHeapDelete); XLogRegisterBuffer(0, buffer, REGBUF_STANDARD); + CommitSeqNo curCSN = InvalidCommitSeqNo; + LogCSN(&curCSN); XLogRegisterData((char *)&xlundohdr, SizeOfXLUndoHeader); @@ -4887,7 +4505,8 @@ static void LogUHeapDelete(UHeapWALInfo *walinfo) } static void LogUHeapUpdate(UHeapWALInfo *oldTupWalinfo, UHeapWALInfo *newTupWalinfo, bool isInplaceUpdate, - int undoXorDeltaSize, char *xlogXorDelta, uint16 xorPrefixlen, uint16 xorSurfixlen, Relation rel) + int undoXorDeltaSize, char *xlogXorDelta, uint16 xorPrefixlen, uint16 xorSurfixlen, Relation rel, + bool isBlockInplaceUpdate) { char *oldp = NULL; char *newp = NULL; @@ -4913,7 +4532,7 @@ static void LogUHeapUpdate(UHeapWALInfo *oldTupWalinfo, UHeapWALInfo *newTupWali Assert(oldTupWalinfo->oldUTuple.data); oldTup = (UHeapDiskTupleData *)oldTupWalinfo->oldUTuple.data; oldTupLen = oldTupWalinfo->oldUTuple.len; - inplaceTup = oldTupWalinfo->utuple; + inplaceTup = isBlockInplaceUpdate ? newTupWalinfo->utuple : oldTupWalinfo->utuple; Assert(inplaceTup->tupTableType == UHEAP_TUPLE); nonInplaceNewTup = newTupWalinfo->utuple; if (isInplaceUpdate) { @@ -5041,6 +4660,8 @@ static void LogUHeapUpdate(UHeapWALInfo *oldTupWalinfo, UHeapWALInfo *newTupWali if (rel->rd_rel->relkind == RELKIND_TOASTVALUE) { info |= XLOG_UHEAP_INIT_TOAST_PAGE; } + } else if (isBlockInplaceUpdate) { + xlrec.flags |= XLZ_BLOCK_INPLACE_UPDATE; } xlrec.flags |= XLZ_HAS_UPDATE_UNDOTUPLE; @@ -5052,6 +4673,9 @@ static void LogUHeapUpdate(UHeapWALInfo *oldTupWalinfo, UHeapWALInfo *newTupWali XLogBeginInsert(); XLogRegisterData((char *)&xlrec, SizeOfUHeapUpdate); + CommitSeqNo curCSN = InvalidCommitSeqNo; + LogCSN(&curCSN); + XLogRegisterData((char *)&xlundohdr, SizeOfXLUndoHeader); if ((oldTupWalinfo->flag & XLOG_UNDO_HEADER_HAS_SUB_XACT) != 0) { XLogRegisterData((char *)&(oldTupWalinfo->hasSubXact), sizeof(bool)); @@ -5265,13 +4889,16 @@ bool UHeapExecPendingUndoActions(Relation relation, Buffer buffer, TransactionId * crash, and after restart, status of this transaction will not be * aborted but we should still consider it as aborted because it dit not commit. */ - if (TransactionIdIsValid(xid) && UHeapTransactionIdDidAbort(xid)) { + if (TransactionIdIsValid(xid) && !UHeapTransactionIdDidCommit(xid) && + !TransactionIdIsInProgress(xid, NULL, false, false)) { /* * Release the buffer lock here to prevent deadlock. * This is because the actual rollback will reacquire the lock. */ LockBuffer(buffer, BUFFER_LOCK_UNLOCK); + WaitState oldStatus = pgstat_report_waitstatus(STATE_WAIT_TD_ROLLBACK); ExecuteUndoActionsPage(slotUrecPtr, relation, buffer, xid); + pgstat_report_waitstatus(oldStatus); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); /* We better not find this xid in any td slot anymore */ @@ -5413,7 +5040,7 @@ static void LogUHeapMultiInsert(UHeapMultiInsertWALInfo *multiWalinfo, bool skip * All undo records have same information apart from the payload data. * Hence, we can copy the same from the last record. */ - xlundohdr.relOid = RelationIsPartition(multiWalinfo->relation) ? multiWalinfo->relation->parentId : + xlundohdr.relOid = RelationIsPartition(multiWalinfo->relation) ? GetBaseRelOidOfParition(multiWalinfo->relation) : multiWalinfo->relation->rd_id; xlundohdr.urecptr = multiWalinfo->genWalInfo->urecptr; xlundohdr.flag = multiWalinfo->genWalInfo->flag; @@ -5522,6 +5149,9 @@ static void LogUHeapMultiInsert(UHeapMultiInsertWALInfo *multiWalinfo, bool skip XLogRegisterData((char *)&uheappage->td_count, sizeof(uint16)); } + CommitSeqNo curCSN = InvalidCommitSeqNo; + LogCSN(&curCSN); + /* copy xl_multi_insert_tuple in maindata */ XLogRegisterData((char *)xlrec, tupledata - scratch); @@ -5561,7 +5191,7 @@ void UHeapAbortSpeculative(Relation relation, UHeapTuple utuple) int tdSlot = InvalidTDSlotId; UHeapTupleTransInfo tdinfo; UndoRecord *urec = NULL; - int rc PG_USED_FOR_ASSERTS_ONLY; + UndoTraversalState rc = UNDO_TRAVERSAL_DEFAULT; Page page = NULL; int zoneId; @@ -5585,7 +5215,7 @@ void UHeapAbortSpeculative(Relation relation, UHeapTuple utuple) rc = FetchUndoRecord(urec, InplaceSatisfyUndoRecord, blkno, offnum, tdinfo.xid); /* the tuple cannot be all-visible because it's inserted by current transaction */ - Assert(rc != UNDO_RET_FAIL); + Assert(rc != UNDO_TRAVERSAL_DEFAULT); Assert(urec->Utype() == UNDO_INSERT && urec->Offset() == offnum && urec->Xid() == tdinfo.xid); START_CRIT_SECTION(); @@ -5618,7 +5248,7 @@ void UHeapAbortSpeculative(Relation relation, UHeapTuple utuple) urecOld->SetUrp(prevUrp); rc = FetchUndoRecord(urecOld, NULL, InvalidBlockNumber, InvalidOffsetNumber, InvalidTransactionId); - if (rc == UNDO_RET_FAIL || urecOld->Xid() != fxid) { + if (rc != UNDO_TRAVERSAL_COMPLETE || urecOld->Xid() != fxid) { xid = InvalidTransactionId; } DELETE_EX(urecOld); @@ -5711,7 +5341,8 @@ void UHeapAbortSpeculative(Relation relation, UHeapTuple utuple) * on the relation associated with the tuple). Any failure is reported * via ereport(). */ -void SimpleUHeapDelete(Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot** oldslot) +void SimpleUHeapDelete(Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot** oldslot, + TransactionId* tmfdXmin) { TM_Result result; TM_FailureData tmfd; @@ -5742,6 +5373,9 @@ void SimpleUHeapDelete(Relation relation, ItemPointer tid, Snapshot snapshot, Tu elog(ERROR, "unrecognized UHeapDelete status: %u", result); break; } + if (tmfdXmin != NULL) { + *tmfdXmin = tmfd.xmin; + } } void UHeapSleepOrWaitForTDSlot(TransactionId xWait, TransactionId myXid /* debug purposes only */,bool isInsert) @@ -5789,15 +5423,13 @@ uint8 UPageExtendTDSlots(Relation relation, Buffer buf) char *start; char *end; int i; - Page page; + Page page = BufferGetPage(buf); uint8 currTDSlots; - uint16 freeSpace; + uint16 freeSpace = PageGetUHeapFreeSpace(page); size_t linePtrSize; errno_t ret = EOK; TD *thistrans = NULL; UHeapPageTDData *tdPtr = NULL; - - page = BufferGetPage(buf); UHeapPageHeaderData *phdr = (UHeapPageHeaderData *)page; tdPtr = (UHeapPageTDData *)PageGetTDPointer(page); currTDSlots = phdr->td_count; @@ -5820,7 +5452,6 @@ uint8 UPageExtendTDSlots(Relation relation, Buffer buf) * TD array. In case of insufficient space, extend * according to free space */ - freeSpace = phdr->pd_upper - phdr->pd_lower; if (freeSpace < (numExtended * sizeof(TD))) { numExtended = freeSpace / sizeof(TD); } diff --git a/src/gausskernel/storage/access/ustore/knl_uhio.cpp b/src/gausskernel/storage/access/ustore/knl_uhio.cpp index 7ded965d1..7af98f1af 100644 --- a/src/gausskernel/storage/access/ustore/knl_uhio.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uhio.cpp @@ -40,19 +40,18 @@ static inline int CalcMaxBlocksToScan(Relation relation) return Max(1, maxBlocks); } -Buffer RelationGetBufferForUTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, - bool switchBuf) +Buffer RelationGetBufferForUTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate) { bool useFsm = !(options & UHEAP_INSERT_SKIP_FSM); + bool forceExtend = (options & UHEAP_INSERT_EXTEND); Buffer buffer = InvalidBuffer; Page page; Size pageFreeSpace = 0; Size saveFreeSpace = 0; BlockNumber targetBlock; BlockNumber otherBlock; - BlockNumber prev = InvalidBlockNumber; - BlockNumber curr = InvalidBlockNumber; bool needLock = false; + bool last_page_tested = false; len = SHORTALIGN(len); /* @@ -66,10 +65,11 @@ Buffer RelationGetBufferForUTuple(Relation relation, Size len, Buffer otherBuffe /* Compute desired extra freespace due to fillfactor option */ saveFreeSpace = RelationGetTargetPageFreeSpace(relation, HEAP_DEFAULT_FILLFACTOR); - if (otherBuffer != InvalidBuffer) + if (otherBuffer != InvalidBuffer) { otherBlock = BufferGetBlockNumber(otherBuffer); - else + } else { otherBlock = InvalidBlockNumber; /* just to keep compiler quiet */ + } if ((NULL != bistate) && BufferIsValid(bistate->current_buf)) { RelFileNode rnode; @@ -84,15 +84,6 @@ Buffer RelationGetBufferForUTuple(Relation relation, Size len, Buffer otherBuffe } } - if (switchBuf) { - prev = RelationGetPrevTargetBlock(relation); - curr = RelationGetTargetBlock(relation); - - Assert(curr != InvalidBlockNumber); - RelationSetTargetBlock(relation, prev); - RelationSetPrevTargetBlock(relation, curr); - } - /* * We first try to put the tuple on the same page we last inserted a tuple * on, as cached in the BulkInsertState or relcache entry. If that @@ -110,12 +101,15 @@ Buffer RelationGetBufferForUTuple(Relation relation, Size len, Buffer otherBuffe /* can't fit, don't bother asking FSM */ targetBlock = InvalidBlockNumber; useFsm = false; - } else if (bistate && bistate->current_buf != InvalidBuffer) + } else if (bistate && bistate->current_buf != InvalidBuffer) { targetBlock = BufferGetBlockNumber(bistate->current_buf); - else + } else { targetBlock = RelationGetTargetBlock(relation); + } - if (targetBlock == InvalidBlockNumber && useFsm) { + if (unlikely(forceExtend)) { + targetBlock = InvalidBlockNumber; + } else if (targetBlock == InvalidBlockNumber && useFsm) { /* * We have no cached target page, so ask the FSM for an initial * target. @@ -128,34 +122,15 @@ Buffer RelationGetBufferForUTuple(Relation relation, Size len, Buffer otherBuffe */ if (targetBlock == InvalidBlockNumber) { BlockNumber nblocks = RelationGetNumberOfBlocks(relation); - if (nblocks > 0) + if (nblocks > 0) { targetBlock = nblocks - 1; + } + last_page_tested = true; } } loop: while (targetBlock != InvalidBlockNumber) { - if (switchBuf) { - if (targetBlock == curr) { - buffer = ReadBufferBI(relation, targetBlock, bistate); - LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); - page = BufferGetPage(buffer); - pageFreeSpace = PageGetUHeapFreeSpace(page); - targetBlock = RecordAndGetPageWithFreeSpace(relation, - targetBlock, - 0, - len + saveFreeSpace); - RecordPageWithFreeSpace(relation, curr, pageFreeSpace); - elog(DEBUG5, "Post FSM hint - Buff: %d, Rel: %s, curr: %u, targetBlock: %u, nblocks: %u", buffer, - RelationGetRelationName(relation), curr, targetBlock, RelationGetNumberOfBlocks(relation)); - UnlockReleaseBuffer(buffer); - if ((targetBlock == curr) || (targetBlock == InvalidBlockNumber)) { - elog(DEBUG5, "Rel: %s adding another block", RelationGetRelationName(relation)); - break; - } - } - } - /* * Read and exclusive-lock the target block, as well as the other * block if one was given, taking suitable care with lock ordering and @@ -175,7 +150,6 @@ loop: LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); } else if (otherBlock == targetBlock) { buffer = otherBuffer; - /* also easy case */ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); } else if (otherBlock < targetBlock) { @@ -238,14 +212,31 @@ loop: * to try. */ targetBlock = RecordAndGetPageWithFreeSpace(relation, targetBlock, pageFreeSpace, len + saveFreeSpace); + + /* + * If the FSM knows nothing of the rel, try the last page before we + * give up and extend. This's intend to use pages that are extended + * one by one and not recorded in FSM as possible. + * + * The best is to record all pages into FSM using bulk-extend in later. + */ + if (targetBlock == InvalidBlockNumber && !last_page_tested) { + BlockNumber nblocks = RelationGetNumberOfBlocks(relation); + if (nblocks > 0) { + targetBlock = nblocks - 1; + } + last_page_tested = true; + } } /* * See if we can prune an existing block before extending the relation. */ - targetBlock = RelationPruneOptional(relation, len + saveFreeSpace); - if (targetBlock != InvalidBlockNumber) { - goto loop; + if (useFsm && !forceExtend) { + targetBlock = RelationPruneOptional(relation, len + saveFreeSpace); + if (targetBlock != InvalidBlockNumber) { + goto loop; + } } /* @@ -335,8 +326,9 @@ loop: * Release the file-extension lock; it's now OK for someone else to extend * the relation some more. */ - if (needLock) + if (needLock) { UnlockRelationForExtension(relation, ExclusiveLock); + } /* * Lock the other buffer. It's guaranteed to be of a lower page number @@ -587,7 +579,7 @@ BlockNumber RelationPruneBlockAndReturn(Relation relation, BlockNumber start_blo } freespace = PageGetUHeapFreeSpace(page); - if (UPageIsEmpty((UHeapPageHeaderData *)page, RelationGetInitTd(relation)) || required_size <= freespace) { + if (UPageIsEmpty((UHeapPageHeaderData *)page) || required_size <= freespace) { UnlockReleaseBuffer(buffer); RecordPageWithFreeSpace(relation, blkno, freespace); result = blkno; diff --git a/src/gausskernel/storage/access/ustore/knl_umultilocker.cpp b/src/gausskernel/storage/access/ustore/knl_umultilocker.cpp index c76efa24a..dec820fa2 100644 --- a/src/gausskernel/storage/access/ustore/knl_umultilocker.cpp +++ b/src/gausskernel/storage/access/ustore/knl_umultilocker.cpp @@ -58,91 +58,3 @@ bool UMultiLockMembersSame(const List *list1, const List *list2) return true; } - -LockTupleMode GetOldLockMode(uint16 infomask) -{ - LockTupleMode oldLockMode; - - /* - * Normally, if the tuple is not marked as locked only, it should not - * contain any locker information. But, during rollback of - * (in-)update/delete, we retain the multilocker information. See - * execute_undo_actions_page for details. - */ - if (UHEAP_XID_IS_LOCKED_ONLY(infomask) || !IsUHeapTupleModified(infomask)) { - if (UHEAP_XID_IS_KEYSHR_LOCKED(infomask)) - oldLockMode = LockTupleKeyShare; - else if (UHEAP_XID_IS_SHR_LOCKED(infomask)) - oldLockMode = LockTupleShared; - else if (UHEAP_XID_IS_NOKEY_EXCL_LOCKED(infomask)) - oldLockMode = LockTupleNoKeyExclusive; - else if (UHEAP_XID_IS_EXCL_LOCKED(infomask)) - oldLockMode = LockTupleExclusive; - else { - /* LOCK_ONLY can't be present alone */ - pg_unreachable(); - } - } else { - /* it's an update, but which kind? */ - if (infomask & UHEAP_XID_EXCL_LOCK) - oldLockMode = LockTupleExclusive; - else - oldLockMode = LockTupleNoKeyExclusive; - } - - return oldLockMode; -} - -/* - * UGetMultiLockInfo - Helper function for ComputeNewXidInfomask to - * get the multi lockers information. - */ -void UGetMultiLockInfo(uint16 oldInfomask, TransactionId tupXid, int tupTdSlot, TransactionId addToXid, - uint16 *newInfomask, int *newTdSlot, LockTupleMode *mode, bool *oldTupleHasUpdate, LockOper lockoper) -{ - LockTupleMode oldMode; - - oldMode = GetOldLockMode(oldInfomask); - - if (tupXid == addToXid) { - if (UHeapTupleHasMultiLockers(oldInfomask)) { - elog(PANIC, "Set infomask UHEAP_MULTI_LOCKERS."); // not fall through here in ustore - *newInfomask |= UHEAP_MULTI_LOCKERS; - } - /* acquire the strongest of both */ - if (*mode < oldMode) - *mode = oldMode; - } else { - elog(PANIC, "Set infomask UHEAP_MULTI_LOCKERS."); // not fall through here in ustore - *newInfomask |= UHEAP_MULTI_LOCKERS; - - /* - * Acquire the strongest of both and keep the transaction slot of the - * stronger lock. - */ - if (*mode < oldMode) { - *mode = oldMode; - } - - /* For lockers, we want to store the updater's transaction slot. */ - if (lockoper != ForUpdate) - *newTdSlot = tupTdSlot; - } - - /* - * We want to propagate the updaters information for lockers only provided - * the tuple is already locked by others (aka it has its multi-locker bit - * set). - */ - if (lockoper != ForUpdate && UHeapTupleHasMultiLockers(*newInfomask) && IsUHeapTupleModified(oldInfomask) && - !UHEAP_XID_IS_LOCKED_ONLY(oldInfomask)) { - *oldTupleHasUpdate = true; - - if (UHeapTupleIsInPlaceUpdated(oldInfomask)) { - *newInfomask |= UHEAP_INPLACE_UPDATED; - } else { - Assert(UHeapTupleIsUpdated(oldInfomask)); - *newInfomask |= UHEAP_UPDATED; - } - } -} diff --git a/src/gausskernel/storage/access/ustore/knl_undoaction.cpp b/src/gausskernel/storage/access/ustore/knl_undoaction.cpp index 0499b81fa..0c285a90a 100644 --- a/src/gausskernel/storage/access/ustore/knl_undoaction.cpp +++ b/src/gausskernel/storage/access/ustore/knl_undoaction.cpp @@ -70,9 +70,10 @@ void ExecuteUndoActions(TransactionId fullXid, UndoRecPtr fromUrecptr, UndoRecPt * worker processed it and backend tries to process it some later * point. */ - int rc = FetchUndoRecord(urec, NULL, InvalidBlockNumber, InvalidOffsetNumber, InvalidTransactionId); + UndoTraversalState rc = FetchUndoRecord(urec, NULL, InvalidBlockNumber, InvalidOffsetNumber, + InvalidTransactionId); /* already processed. */ - if (rc == UNDO_RET_FAIL) { + if (rc != UNDO_TRAVERSAL_COMPLETE) { DELETE_EX(urec); return; } @@ -184,7 +185,7 @@ void ExecuteUndoActionsPage(UndoRecPtr fromUrp, Relation rel, Buffer buffer, Tra break; } - Oid relOid = RelationIsPartition(rel) ? rel->parentId : RelationGetRelid(rel); + Oid relOid = RelationIsPartition(rel) ? GetBaseRelOidOfParition(rel) : RelationGetRelid(rel); Oid partitionOid = RelationIsPartition(rel) ? RelationGetRelid(rel) : InvalidOid; RmgrTable[RM_UHEAP_ID].rm_undo(urecvec, 0, urecvec->Size() - 1, xid, relOid, partitionOid, @@ -358,11 +359,11 @@ bool UHeapUndoActions(URecVector *urecvec, int startIdx, int endIdx, Transaction prevUrp = undorecord->Blkprev(); TransactionId oldestXid PG_USED_FOR_ASSERTS_ONLY = - pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo); + pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo); /* Either oldestXidInUndo is zero or it is always <= than aborting xid */ Assert(!TransactionIdIsValid(oldestXid) || TransactionIdPrecedesOrEquals( - pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo), undorecord->Xid())); + pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo), undorecord->Xid())); /* Store the minimum and maximum LP offsets of all undorecords */ if (undorecord->Offset() > InvalidOffsetNumber && undorecord->Offset() < xlogMinLPOffset) { @@ -427,8 +428,7 @@ bool UHeapUndoActions(URecVector *urecvec, int startIdx, int endIdx, Transaction case UNDO_DELETE: case UNDO_UPDATE: - case UNDO_INPLACE_UPDATE: - case UNDO_XID_LOCK_FOR_UPDATE: { + case UNDO_INPLACE_UPDATE: { UHeapDiskTuple tuple = CopyTupleFromUndoRecord(relationData.relation, undorecord, buffer); RestoreXactFromUndoRecord(undorecord, buffer, tuple); @@ -445,39 +445,6 @@ bool UHeapUndoActions(URecVector *urecvec, int startIdx, int endIdx, Transaction break; } - case UNDO_XID_LOCK_ONLY: { - UHeapDiskTuple utuple; - OffsetNumber offnum = undorecord->Offset(); - StringInfoData *undoData = undorecord->Rawdata(); - - RowPtr *rp = UPageGetRowPtr(page, offnum); - - utuple = (UHeapDiskTuple)UPageGetRowData(page, rp); - - // restore tuple header - error_t rc = memcpy_s((char *)utuple + OffsetTdId, SizeOfUHeapDiskTupleHeaderExceptXid, undoData->data, - SizeOfUHeapDiskTupleHeaderExceptXid); - securec_check(rc, "", ""); - utuple->xid = (ShortTransactionId)FrozenTransactionId; - - RestoreXactFromUndoRecord(undorecord, buffer, utuple); - - /* Store the page offsets where we start and end updating tuples */ - if (rp->offset < xlogCopyStartOffset) { - Assert(rp->offset > SizeOfUHeapPageHeaderData); - xlogCopyStartOffset = rp->offset; - } - if (rp->offset + (Offset)SizeOfUHeapDiskTupleData > xlogCopyEndOffset) { - Assert(rp->offset + (Offset)SizeOfUHeapDiskTupleData <= BLCKSZ); - xlogCopyEndOffset = rp->offset + SizeOfUHeapDiskTupleData; - } - - break; - } - case UNDO_XID_MULTI_LOCK_ONLY: { - Assert(0); - break; - } case UNDO_ITEMID_UNUSED: default: ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("Unsupported Rollback Action"))); @@ -500,9 +467,9 @@ bool UHeapUndoActions(URecVector *urecvec, int startIdx, int endIdx, Transaction } else if (IS_VALID_UNDO_REC_PTR(slotPrevUrp)) { UndoRecord *urec = New(CurrentMemoryContext)UndoRecord(); urec->SetUrp(slotPrevUrp); - int rc = + UndoTraversalState rc = FetchUndoRecord(urec, NULL, InvalidBlockNumber, InvalidOffsetNumber, InvalidTransactionId); - if (rc == UNDO_RET_FAIL || urec->Xid() != xid) { + if (rc != UNDO_TRAVERSAL_COMPLETE || urec->Xid() != xid) { xid = InvalidTransactionId; } DELETE_EX(urec); @@ -580,7 +547,7 @@ void ExecuteUndoForInsert(Relation rel, Buffer buffer, OffsetNumber off, Transac RowPtr *rp = UPageGetRowPtr(page, off); /* Rollback insert - increment the potential space for the Page */ - UHeapRecordPotentialFreeSpace(rel, buffer, SHORTALIGN(rp->len)); + UHeapRecordPotentialFreeSpace(buffer, SHORTALIGN(rp->len)); if (RelationGetForm(rel)->relhasindex) { RowPtrSetDead(rp); @@ -615,7 +582,7 @@ void ExecuteUndoForInsertRecovery(Buffer buffer, OffsetNumber off, TransactionId *tdid = UHeapTupleHeaderGetTDSlot(diskTuple); /* Rollback insert - increment the potential space for the Page */ - UHeapRecordPotentialFreeSpace(NULL, buffer, SHORTALIGN(rp->len)); + UHeapRecordPotentialFreeSpace(buffer, SHORTALIGN(rp->len)); if (relhasindex) { RowPtrSetDead(rp); @@ -671,7 +638,7 @@ static UHeapDiskTuple CopyTupleFromUndoRecord(Relation relation, UndoRecord *und diskTuple->xid = (ShortTransactionId)FrozenTransactionId; /* Rollback delete/update - decrement the potential space for the Page */ - UHeapRecordPotentialFreeSpace(relation, buffer, -1 * SHORTALIGN(newTupleLength)); + UHeapRecordPotentialFreeSpace(buffer, -1 * SHORTALIGN(newTupleLength)); break; } @@ -757,12 +724,9 @@ static UHeapDiskTuple CopyTupleFromUndoRecord(Relation relation, UndoRecord *und pfree(old_disktuple); break; } - case UNDO_XID_LOCK_FOR_UPDATE: - case UNDO_XID_LOCK_ONLY: case UNDO_ITEMID_UNUSED: case UNDO_INSERT: - case UNDO_MULTI_INSERT: - case UNDO_XID_MULTI_LOCK_ONLY: { + case UNDO_MULTI_INSERT: { elog(ERROR, "invalid undo record type for restoring tuple"); break; } @@ -798,7 +762,6 @@ static void RestoreXactFromUndoRecord(UndoRecord *undorecord, Buffer buffer, UHe } UHeapTupleHeaderSetLockerTDSlot(tuple, UHEAPTUP_SLOT_FROZEN); - tuple->flag &= ~UHEAP_XID_LOCK_ONLY; } int UpdateTupleHeaderFromUndoRecord(UndoRecord *urec, UHeapDiskTuple diskTuple, Page page) diff --git a/src/gausskernel/storage/access/ustore/knl_undolauncher.cpp b/src/gausskernel/storage/access/ustore/knl_undolauncher.cpp index 2f01ee8a0..587746fca 100644 --- a/src/gausskernel/storage/access/ustore/knl_undolauncher.cpp +++ b/src/gausskernel/storage/access/ustore/knl_undolauncher.cpp @@ -242,6 +242,9 @@ NON_EXEC_STATIC void UndoLauncherMain() /* Report the error to the server log */ EmitErrorReport(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + FlushErrorState(); /* Now we can allow interrupts again */ diff --git a/src/gausskernel/storage/access/ustore/knl_undoworker.cpp b/src/gausskernel/storage/access/ustore/knl_undoworker.cpp index d88cac45e..236da1ea2 100644 --- a/src/gausskernel/storage/access/ustore/knl_undoworker.cpp +++ b/src/gausskernel/storage/access/ustore/knl_undoworker.cpp @@ -236,6 +236,9 @@ NON_EXEC_STATIC void UndoWorkerMain() /* Report the error to the server log */ EmitErrorReport(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + FlushErrorState(); AbortOutOfAnyTransaction(); diff --git a/src/gausskernel/storage/access/ustore/knl_upage.cpp b/src/gausskernel/storage/access/ustore/knl_upage.cpp index e9d601c1e..56f723be7 100644 --- a/src/gausskernel/storage/access/ustore/knl_upage.cpp +++ b/src/gausskernel/storage/access/ustore/knl_upage.cpp @@ -444,6 +444,19 @@ UHeapTuple UHeapGetTuplePartial(Relation relation, Buffer buffer, OffsetNumber o return tuple; } +static Size UPageGetFreeSpace(Page page) +{ + int space = (int)((UHeapPageHeaderData *)page)->pd_upper - + (int)((UHeapPageHeaderData *)page)->pd_lower; + if (space < (int)sizeof(RowPtr)) { + return 0; + } + space -= sizeof(RowPtr); + + return (Size)space; +} + + /* * PageGetUHeapFreeSpace * Returns the size of the free (allocatable) space on a uheap page, @@ -456,7 +469,7 @@ Size PageGetUHeapFreeSpace(Page page) { Size space; - space = PageGetFreeSpace(page); + space = UPageGetFreeSpace(page); if (space > 0) { OffsetNumber offnum, nline; @@ -648,7 +661,7 @@ UHeapFreeOffsetRanges *UHeapGetUsableOffsetRanges(Buffer buffer, UHeapTuple *tup return ufreeOffsetRanges; } -void UHeapRecordPotentialFreeSpace(Relation rel, Buffer buffer, int delta) +void UHeapRecordPotentialFreeSpace(Buffer buffer, int delta) { UHeapPageHeaderData *page = (UHeapPageHeaderData *)BufferGetPage(buffer); Assert(page->potential_freespace >= 0); diff --git a/src/gausskernel/storage/access/ustore/knl_uredo.cpp b/src/gausskernel/storage/access/ustore/knl_uredo.cpp index 5c3daaa08..cf0e193c5 100644 --- a/src/gausskernel/storage/access/ustore/knl_uredo.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uredo.cpp @@ -107,9 +107,9 @@ static UndoRecPtr PrepareAndInsertUndoRecordForInsertRedo(XLogReaderState *recor UndoRecPtr invalidUrp = INVALID_UNDO_REC_PTR; Oid invalidPartitionOid = 0; - + bool hasCSN = (record->decoded_record->xl_term & XLOG_CONTAIN_CSN) == XLOG_CONTAIN_CSN; XlUHeapInsert *xlrec = (XlUHeapInsert *)XLogRecGetData(record); - XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapInsert); + XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapInsert + (hasCSN ? sizeof(CommitSeqNo) : 0)); char *currLogPtr = ((char *)xlundohdr + SizeOfXLUndoHeader); if ((xlundohdr->flag & XLOG_UNDO_HEADER_HAS_BLK_PREV) != 0) { blkprev = (UndoRecPtr *) ((char *)currLogPtr); @@ -188,8 +188,10 @@ static XLogRedoAction GetInsertRedoAction(XLogReaderState *record, RedoBufferInf XLogRedoAction action; if (XLogRecGetInfo(record) & XLOG_UHEAP_INIT_PAGE) { + bool hasCSN = (record->decoded_record->xl_term & XLOG_CONTAIN_CSN) == XLOG_CONTAIN_CSN; XlUHeapInsert *xlrec = (XlUHeapInsert *)XLogRecGetData(record); - XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapInsert); + XlUndoHeader *xlundohdr = + (XlUndoHeader *)((char *)xlrec + SizeOfUHeapInsert + (hasCSN ? sizeof(CommitSeqNo) : 0)); TransactionId *xidBase = (TransactionId *)((char *)xlundohdr + SizeOfXLUndoHeader + skipSize); uint16 *tdCount = (uint16 *)((char *)xidBase + sizeof(TransactionId)); @@ -249,7 +251,7 @@ static void PerformInsertRedoAction(XLogReaderState *record, const Buffer buf, c } /* decrement the potential freespace of this page */ - UHeapRecordPotentialFreeSpace(NULL, buf, -1 * SHORTALIGN(newlen)); + UHeapRecordPotentialFreeSpace(buf, -1 * SHORTALIGN(newlen)); // set undo tdSlotId = UHeapTupleHeaderGetTDSlot(utup); @@ -306,8 +308,9 @@ static UndoRecPtr PrepareAndInsertUndoRecordForDeleteRedo(XLogReaderState *recor bool defaultHasSubXact = false; uint32 readSize = 0; + bool hasCSN = (record->decoded_record->xl_term & XLOG_CONTAIN_CSN) == XLOG_CONTAIN_CSN; XlUHeapDelete *xlrec = (XlUHeapDelete *)XLogRecGetData(record); - XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapDelete); + XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapDelete + (hasCSN ? sizeof(CommitSeqNo) : 0)); char *currLogPtr = ((char *)xlundohdr + SizeOfXLUndoHeader); if ((xlundohdr->flag & XLOG_UNDO_HEADER_HAS_SUB_XACT) != 0) { hasSubXact = (bool *) ((char *)currLogPtr); @@ -415,7 +418,7 @@ static void PerformDeleteRedoAction(XLogReaderState *record, UHeapTupleData *utu } /* increment the potential freespace of this page */ - UHeapRecordPotentialFreeSpace(NULL, buffer->buf, SHORTALIGN(datalen)); + UHeapRecordPotentialFreeSpace(buffer->buf, SHORTALIGN(datalen)); utup->disk_tuple = (UHeapDiskTuple)UPageGetRowData(page, rp); utup->disk_tuple_size = RowPtrGetLen(rp); @@ -762,8 +765,9 @@ static UndoRecPtr PrepareAndInsertUndoRecordForUpdateRedo(XLogReaderState *recor bool inplaceUpdate = true; char *xlogXorDelta = NULL; + bool hasCSN = (record->decoded_record->xl_term & XLOG_CONTAIN_CSN) == XLOG_CONTAIN_CSN; XlUHeapUpdate *xlrec = (XlUHeapUpdate *)XLogRecGetData(record); - XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapUpdate); + XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapUpdate + (hasCSN ? sizeof(CommitSeqNo) : 0)); UndoRecPtr urecptr = xlundohdr->urecptr; char *curxlogptr = ((char *)xlundohdr) + SizeOfXLUndoHeader; if ((xlundohdr->flag & XLOG_UNDO_HEADER_HAS_SUB_XACT) != 0) { @@ -966,10 +970,12 @@ static void PerformUpdateOldRedoAction(XLogReaderState *record, UHeapTupleData * /* Mark the page as a candidate for pruning and update the page potential freespace */ if (xlrec->flags & XLZ_NON_INPLACE_UPDATE) { UPageSetPrunable(oldpage, xid); - if (buffers->newbuffer.buf != oldbuf) { - UHeapRecordPotentialFreeSpace(NULL, oldbuf, SHORTALIGN(oldtup->disk_tuple_size)); + UHeapRecordPotentialFreeSpace(oldbuf, SHORTALIGN(oldtup->disk_tuple_size)); } + } else if (xlrec->flags & XLZ_BLOCK_INPLACE_UPDATE) { + UPageSetPrunable(oldpage, xid); + UHeapRecordPotentialFreeSpace(oldbuf, SHORTALIGN(oldtup->disk_tuple_size)); } PageSetLSN(oldpage, lsn); @@ -1123,7 +1129,36 @@ static Size PerformUpdateNewRedoAction(XLogReaderState *record, UpdateRedoBuffer /* max offset number should be valid */ Assert(UHeapPageGetMaxOffsetNumber(newpage) + 1 >= xlrec->new_offnum); - if (!(xlrec->flags & XLZ_NON_INPLACE_UPDATE)) { + if (xlrec->flags & XLZ_NON_INPLACE_UPDATE) { + UHeapBufferPage bufpage = {newbuf, NULL}; + + if (UPageAddItem(NULL, &bufpage, (Item)tuples->newtup, newlen, xlrec->new_offnum, true) == + InvalidOffsetNumber) { + elog(PANIC, "failed to add tuple"); + } + + /* Update the page potential freespace */ + if (newbuf != oldbuf) { + UHeapRecordPotentialFreeSpace(newbuf, -1 * SHORTALIGN(newlen)); + } else { + int delta = newlen - tuples->oldtup->disk_tuple_size; + UHeapRecordPotentialFreeSpace(newbuf, -1 * SHORTALIGN(delta)); + } + + if (sameBlock) { + UHeapPageSetUndo(newbuf, xlrec->old_tuple_td_id, xid, xlnewundohdr->urecptr); + } else { + UHeapPageSetUndo(newbuf, tuples->newtup->td_id, xid, xlnewundohdr->urecptr); + } + } else if (xlrec->flags & XLZ_BLOCK_INPLACE_UPDATE) { + RowPtr *rp = UPageGetRowPtr(oldpage, xlrec->old_offnum); + PutBlockInplaceUpdateTuple(oldpage, (Item)tuples->newtup, rp, newlen); + /* update the potential freespace */ + UHeapRecordPotentialFreeSpace(newbuf, newlen); + Assert(oldpage == newpage); + UPageSetPrunable(newpage, XLogRecGetXid(record)); + UHeapPageSetUndo(newbuf, xlrec->old_tuple_td_id, xid, urecptr); + } else { /* * For inplace updates, we copy the entire data portion including * the tuple header. @@ -1140,27 +1175,6 @@ static Size PerformUpdateNewRedoAction(XLogReaderState *record, UpdateRedoBuffer } UHeapPageSetUndo(newbuf, xlrec->old_tuple_td_id, xid, urecptr); - } else { - UHeapBufferPage bufpage = {newbuf, NULL}; - - if (UPageAddItem(NULL, &bufpage, (Item)tuples->newtup, newlen, xlrec->new_offnum, true) == - InvalidOffsetNumber) { - elog(PANIC, "failed to add tuple"); - } - - /* Update the page potential freespace */ - if (newbuf != oldbuf) { - UHeapRecordPotentialFreeSpace(NULL, newbuf, -1 * SHORTALIGN(newlen)); - } else { - int delta = newlen - tuples->oldtup->disk_tuple_size; - UHeapRecordPotentialFreeSpace(NULL, newbuf, -1 * SHORTALIGN(delta)); - } - - if (sameBlock) { - UHeapPageSetUndo(newbuf, xlrec->old_tuple_td_id, xid, xlnewundohdr->urecptr); - } else { - UHeapPageSetUndo(newbuf, tuples->newtup->td_id, xid, xlnewundohdr->urecptr); - } } Size freespace = PageGetUHeapFreeSpace(newpage); /* needed to update FSM @@ -1335,7 +1349,9 @@ static UndoRecPtr PrepareAndInsertUndoRecordForMultiInsertRedo(XLogReaderState * curxlogptr += sizeof(uint16); } - *xlrec = (XlUHeapMultiInsert *)((char *)curxlogptr); + bool hasCSN = (record->decoded_record->xl_term & XLOG_CONTAIN_CSN) == XLOG_CONTAIN_CSN; + curxlogptr = curxlogptr + (hasCSN ? sizeof(CommitSeqNo) : 0); + (*xlrec) = (XlUHeapMultiInsert *)curxlogptr; curxlogptr = (char *)*xlrec + SizeOfUHeapMultiInsert; /* fetch number of distinct ranges */ @@ -1495,7 +1511,7 @@ static void PerformMultiInsertRedoAction(XLogReaderState *record, XlUHeapMultiIn } /* decrement the potential freespace of this page */ - UHeapRecordPotentialFreeSpace(NULL, buffer->buf, SHORTALIGN(newlen)); + UHeapRecordPotentialFreeSpace(buffer->buf, SHORTALIGN(newlen)); if (!skipUndo) { tdSlot = UHeapTupleHeaderGetTDSlot(uhtup); @@ -1972,10 +1988,11 @@ void UHeapUndoRedo(XLogReaderState *record) } } -static TransactionId UHeapXlogGetCurrentXidInsert(XLogReaderState *record) +static TransactionId UHeapXlogGetCurrentXidInsert(XLogReaderState *record, bool hasCSN) { XlUHeapInsert *xlrec = (XlUHeapInsert *)XLogRecGetData(record); - XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapInsert); + XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapInsert + + (hasCSN ? sizeof(CommitSeqNo) : 0)); bool hasCurrentXid = ((xlundohdr->flag & XLOG_UNDO_HEADER_HAS_CURRENT_XID) != 0); if (!hasCurrentXid) { @@ -1997,12 +2014,12 @@ static TransactionId UHeapXlogGetCurrentXidInsert(XLogReaderState *record) return *(TransactionId*)(currLogPtr); } -static TransactionId UHeapXlogGetCurrentXidDelete(XLogReaderState *record) +static TransactionId UHeapXlogGetCurrentXidDelete(XLogReaderState *record, bool hasCSN) { XlUHeapDelete *xlrec = (XlUHeapDelete *)XLogRecGetData(record); - XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapDelete); + XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapDelete + + (hasCSN ? sizeof(CommitSeqNo) : 0)); bool hasCurrentXid = ((xlundohdr->flag & XLOG_UNDO_HEADER_HAS_CURRENT_XID) != 0); - if (!hasCurrentXid) { return XLogRecGetXid(record); } @@ -2025,10 +2042,11 @@ static TransactionId UHeapXlogGetCurrentXidDelete(XLogReaderState *record) return *(TransactionId*)(currLogPtr); } -static TransactionId UHeapXlogGetCurrentXidUpdate(XLogReaderState *record) +static TransactionId UHeapXlogGetCurrentXidUpdate(XLogReaderState *record, bool hasCSN) { XlUHeapUpdate *xlrec = (XlUHeapUpdate *)XLogRecGetData(record); - XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapUpdate); + XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapUpdate + + (hasCSN ? sizeof(CommitSeqNo) : 0)); bool hasCurrentXid = ((xlundohdr->flag & XLOG_UNDO_HEADER_HAS_CURRENT_XID) != 0); if (!hasCurrentXid) { @@ -2077,22 +2095,23 @@ static TransactionId UHeapXlogGetCurrentXidMultiInsert(XLogReaderState *record) return *(TransactionId*)(curxlogptr); } -TransactionId UHeapXlogGetCurrentXid(XLogReaderState *record) +TransactionId UHeapXlogGetCurrentXid(XLogReaderState *record, bool hasCSN) { uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; switch (info & XLOG_UHEAP_OPMASK) { case XLOG_UHEAP_INSERT: - return UHeapXlogGetCurrentXidInsert(record); + return UHeapXlogGetCurrentXidInsert(record, hasCSN); case XLOG_UHEAP_DELETE: - return UHeapXlogGetCurrentXidDelete(record); + return UHeapXlogGetCurrentXidDelete(record, hasCSN); case XLOG_UHEAP_UPDATE: - return UHeapXlogGetCurrentXidUpdate(record); + return UHeapXlogGetCurrentXidUpdate(record, hasCSN); case XLOG_UHEAP_FREEZE_TD_SLOT: case XLOG_UHEAP_INVALID_TD_SLOT: case XLOG_UHEAP_CLEAN: break; case XLOG_UHEAP_MULTI_INSERT: + /* The way we get current xid in MULTI_INSERT is not affected */ return UHeapXlogGetCurrentXidMultiInsert(record); default: ereport(PANIC, (errmsg("UHeapRedo: unknown op code %u", (uint8)info))); diff --git a/src/gausskernel/storage/access/ustore/knl_uscan.cpp b/src/gausskernel/storage/access/ustore/knl_uscan.cpp index 1539c3646..62f0b4eba 100644 --- a/src/gausskernel/storage/access/ustore/knl_uscan.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uscan.cpp @@ -21,10 +21,13 @@ #include "storage/buf/bufmgr.h" #include "storage/freespace.h" #include "storage/predicate.h" +#include "storage/procarray.h" #include "access/tableam.h" +#include "access/transam.h" #include "access/ustore/knl_uscan.h" #include "access/ustore/knl_uvisibility.h" #include "access/ustore/knl_uheap.h" +#include "access/ustore/knl_undorequest.h" #include "access/ustore/knl_whitebox_test.h" #include "pgstat.h" #include @@ -78,6 +81,46 @@ static inline AttrNumber UHeapCheckScanDesc(const TableScanDesc sscan) return sscan->lastVar; } +FORCE_INLINE +bool NextUpage(UHeapScanDesc scan, ScanDirection dir, BlockNumber& page) +{ + bool finished = false; + /* + * advance to next/prior page and detect end of scan + */ + if (BackwardScanDirection == dir) { + finished = (page == scan->rs_base.rs_startblock); + if (page == 0) { + page = scan->rs_base.rs_nblocks; + } + page--; + } else { + page++; + if (page >= scan->rs_base.rs_nblocks) { + page = 0; + } + finished = (page == scan->rs_base.rs_startblock); + + /* + * Report our new scan position for synchronization purposes. We + * don't do that when moving backwards, however. That would just + * mess up any other forward-moving scanners. + * + * Note: we do this before checking for end of scan so that the + * final state of the position hint is back at the start of the + * rel. That's not strictly necessary, but otherwise when you run + * the same query multiple times the starting position would shift + * a little bit backwards on every invocation, which is confusing. + * We don't guarantee any specific ordering in general, though. + */ + if (scan->rs_allow_sync) { + ss_report_location(scan->rs_base.rs_rd, page); + } + } + + return finished; +} + /* * UHeapGetPage - Same as heapgetpage, but operate on uheap page and * in page-at-a-time mode, visible tuples are stored in rs_visibletuples. @@ -294,14 +337,26 @@ static UHeapTuple UHeapScanGetTuple(UHeapScanDesc scan, ScanDirection dir) linesleft = lineoff; } else { - /* - * In executor it seems NoMovementScanDirection is nothing but - * do-nothing flag so we should not be here. The else part is still - * here to keep the code as in heapgettup_pagemode. - */ - Assert(false); + if (!scan->rs_base.rs_inited || (tuple == NULL)) { + Assert(!BufferIsValid(scan->rs_base.rs_cbuf)); + tuple = NULL; + return tuple; + } - return NULL; + page = ItemPointerGetBlockNumber(&(tuple->ctid)); + valid = UHeapGetPage(&scan->rs_base, page); + if (!valid) { + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("Can not refetch prior page"))); + tuple = NULL; + return tuple; + } + + lineoff = ItemPointerGetOffsetNumber(&(tuple->ctid)); + /* Since the tuple was previously fetched, needn't lock page here */ + tuple = scan->rs_visutuples[lineoff]; + return tuple; } /* @@ -357,40 +412,8 @@ get_next_tuple: get_next_page: for (;;) { - /* - * advance to next/prior page and detect end of scan - */ - if (backward) { - finished = (page == scan->rs_base.rs_startblock); - if (page == 0) - page = scan->rs_base.rs_nblocks; - page--; - } else { - page++; - if (page >= scan->rs_base.rs_nblocks) - page = 0; - - finished = (page == scan->rs_base.rs_startblock); - - /* - * Report our new scan position for synchronization purposes. We - * don't do that when moving backwards, however. That would just - * mess up any other forward-moving scanners. - * - * Note: we do this before checking for end of scan so that the - * final state of the position hint is back at the start of the - * rel. That's not strictly necessary, but otherwise when you run - * the same query multiple times the starting position would shift - * a little bit backwards on every invocation, which is confusing. - * We don't guarantee any specific ordering in general, though. - */ - if (scan->rs_allow_sync) - ss_report_location(scan->rs_base.rs_rd, page); - } - - /* - * return NULL if we've exhausted all the pages - */ + finished = NextUpage(scan, dir, page); + /* return NULL if we've exhausted all the pages */ if (finished) { if (BufferIsValid(scan->rs_base.rs_cbuf)) ReleaseBuffer(scan->rs_base.rs_cbuf); @@ -523,13 +546,26 @@ UHeapTuple UHeapGetTupleFromPage(UHeapScanDesc scan, ScanDirection dir) linesleft = lineindex + 1; } else { - /* - * In executor it seems NoMovementScanDirection is nothing but - * do-nothing flag so we should not be here. The else part is still - * here to keep the code as in heapgettup_pagemode. - */ - Assert(false); - return NULL; + /* ''no movement'' scan direction: refetch prior tuple */ + if (!scan->rs_base.rs_inited || (tuple == NULL)) { + Assert(!BufferIsValid(scan->rs_base.rs_cbuf)); + tuple = NULL; + return tuple; + } + + page = ItemPointerGetBlockNumber(&(tuple->ctid)); + valid = UHeapGetPage(&scan->rs_base, page); + if (!valid) { + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("Can not refetch prior page"))); + tuple = NULL; + return tuple; + } + + /* Since the tuple was previously fetched, needn't lock page here */ + tuple = scan->rs_visutuples[scan->rs_base.rs_cindex]; + return tuple; } get_next_tuple: @@ -558,36 +594,8 @@ get_next_tuple: get_next_page: for (;;) { - if (backward) { - finished = (page == scan->rs_base.rs_startblock); - if (page == 0) - page = scan->rs_base.rs_nblocks; - page--; - } else { - page++; - if (page >= scan->rs_base.rs_nblocks) - page = 0; - - finished = (page == scan->rs_base.rs_startblock); - - /* - * - * Report our new scan position for synchronization purposes. We - * don't do that when moving backwards, however. That would just - * mess up any other forward-moving scanners. - * - * Note: we do this before checking for end of scan so that the - * final state of the position hint is back at the start of the - * rel. That's not strictly necessary, but otherwise when you run - * the same query multiple times the starting position would shift - * a little bit backwards on every invocation, which is confusing. - * We don't guarantee any specific ordering in general, though. - */ - } - - /* - * return NULL if we've exhausted all the pages - */ + finished = NextUpage(scan, dir, page); + /* return NULL if we've exhausted all the pages */ if (finished) { if (BufferIsValid(scan->rs_base.rs_cbuf)) ReleaseBuffer(scan->rs_base.rs_cbuf); @@ -630,9 +638,8 @@ TableScanDesc UHeapBeginScan(Relation relation, Snapshot snapshot, int nkeys) RelationIncrementReferenceCount(relation); } - uscan = (UHeapScanDesc)palloc(sizeof(UHeapScanDescData)); + uscan = (UHeapScanDesc)palloc0(sizeof(UHeapScanDescData)); - uscan->rs_base.rs_rd = relation; uscan->rs_tupdesc = RelationGetDescr(relation); uscan->rs_base.rs_rd = relation; uscan->rs_base.rs_snapshot = snapshot; @@ -640,10 +647,11 @@ TableScanDesc UHeapBeginScan(Relation relation, Snapshot snapshot, int nkeys) uscan->rs_base.rs_startblock = 0; uscan->rs_base.rs_ntuples = 0; uscan->rs_cutup = NULL; - if (nkeys > 0) - uscan->rs_base.rs_key = (ScanKey)palloc(sizeof(ScanKeyData) * nkeys); - else + if (nkeys > 0) { + uscan->rs_base.rs_key = (ScanKey)palloc0(sizeof(ScanKeyData) * nkeys); + } else { uscan->rs_base.rs_key = NULL; + } uscan->rs_base.rs_nblocks = RelationIsPartitioned(uscan->rs_base.rs_rd) ? 0 : RelationGetNumberOfBlocks(uscan->rs_base.rs_rd); @@ -655,6 +663,7 @@ TableScanDesc UHeapBeginScan(Relation relation, Snapshot snapshot, int nkeys) uscan->rs_base.rs_pageatatime = IsMVCCSnapshot(snapshot); uscan->rs_base.rs_strategy = NULL; uscan->rs_base.rs_ss_accessor = NULL; + uscan->rs_ctupBatch = NULL; return (TableScanDesc)uscan; } @@ -783,6 +792,10 @@ void UHeapEndScan(TableScanDesc scan) if (uscan->rs_base.rs_key) pfree(uscan->rs_base.rs_key); + if (uscan->rs_ctupBatch != NULL) { + pfree_ext(uscan->rs_ctupBatch); + } + pfree(uscan); } @@ -1072,6 +1085,72 @@ bool UHeapScanBitmapNextTuple(TableScanDesc sscan, TBMIterateResult *tbmres, Tup return true; } +void UHeapMarkPos(TableScanDesc uscan) +{ + UHeapScanDesc scan = (UHeapScanDesc) uscan; + + /* Note: no locking manipulations needed */ + if (scan->rs_cutup->disk_tuple != NULL) { + scan->rs_mctid = scan->rs_cutup->ctid; + if (scan->rs_base.rs_pageatatime) { + scan->rs_mindex = scan->rs_base.rs_cindex; + } + } else + ItemPointerSetInvalid(&scan->rs_mctid); +} + +void UHeapRestRpos(TableScanDesc sscan) +{ + UHeapScanDesc scan = (UHeapScanDesc) sscan; + /* XXX no amrestrpos checking that ammarkpos called */ + if (!ItemPointerIsValid(&scan->rs_mctid) && (scan->rs_cutup != NULL)) { + scan->rs_cutup->disk_tuple = NULL; + + /* + * unpin scan buffers + */ + if (BufferIsValid(scan->rs_base.rs_cbuf)) { + ReleaseBuffer(scan->rs_base.rs_cbuf); + } + scan->rs_base.rs_cbuf = InvalidBuffer; + scan->rs_base.rs_cblock = InvalidBlockNumber; + scan->rs_base.rs_inited = false; + } else { + /* + * If we reached end of scan, rs_inited will now be false. We must + * reset it to true to keep heapgettup from doing the wrong thing. + */ + scan->rs_base.rs_inited = true; + + UHeapTuple utuple; + UHeapTupleData uheaptupdata; + utuple = &uheaptupdata; + struct { + UHeapDiskTupleData hdr; + char data[MaxPossibleUHeapTupleSize]; + } tbuf; + + errno_t errorNo = EOK; + errorNo = memset_s(&tbuf, sizeof(tbuf), 0, sizeof(tbuf)); + securec_check(errorNo, "\0", "\0"); + + utuple->disk_tuple = &tbuf.hdr; + utuple->ctid = scan->rs_mctid; + if (scan->rs_cutup == NULL) { + scan->rs_cutup = utuple; + } else { + scan->rs_cutup->ctid = scan->rs_mctid; + } + + if (scan->rs_base.rs_pageatatime) { + scan->rs_base.rs_cindex = scan->rs_mindex; + UHeapGetTupleFromPage(scan, NoMovementScanDirection); + } else { + UHeapScanGetTuple(scan, NoMovementScanDirection); + } + } +} + UHeapTuple UHeapGetNext(TableScanDesc sscan, ScanDirection dir) { UHeapScanDesc scan = (UHeapScanDesc)sscan; @@ -1098,44 +1177,205 @@ UHeapTuple UHeapGetNext(TableScanDesc sscan, ScanDirection dir) return scan->rs_cutup; } -FORCE_INLINE -bool NextUpage(UHeapScanDesc scan, ScanDirection dir, BlockNumber& page) +Buffer UHeapIndexBuildNextBlock(UHeapScanDesc scan) { - bool finished = false; - /* - * advance to next/prior page and detect end of scan - */ - if (BackwardScanDirection == dir) { - finished = (page == scan->rs_base.rs_startblock); - if (page == 0) { - page = scan->rs_base.rs_nblocks; - } - page--; + BlockNumber blkno = InvalidBlockNumber; + if (scan->rs_base.rs_cblock == InvalidBlockNumber) { + /* first page, init rs_visutuples array and other information */ + blkno = 0; + scan->rs_base.rs_ntuples = 0; } else { - page++; - if (page >= scan->rs_base.rs_nblocks) { - page = 0; - } - finished = (page == scan->rs_base.rs_startblock); + blkno = scan->rs_base.rs_cblock + 1; + } + /* cleanup the workspace */ + for (int i = 0; i < scan->rs_base.rs_ntuples; i++) { + pfree(scan->rs_visutuples[i]); + } + scan->rs_base.rs_ntuples = 0; + scan->rs_cutup = NULL; - /* - * Report our new scan position for synchronization purposes. We - * don't do that when moving backwards, however. That would just - * mess up any other forward-moving scanners. - * - * Note: we do this before checking for end of scan so that the - * final state of the position hint is back at the start of the - * rel. That's not strictly necessary, but otherwise when you run - * the same query multiple times the starting position would shift - * a little bit backwards on every invocation, which is confusing. - * We don't guarantee any specific ordering in general, though. - */ - if (scan->rs_allow_sync) { - ss_report_location(scan->rs_base.rs_rd, page); + if (blkno >= scan->rs_base.rs_nblocks) { + return InvalidBuffer; /* we are done */ + } + scan->rs_base.rs_cblock = blkno; + + /* read the next page, and lock with exclusive mode */ + Buffer buf = ReadBufferExtended(scan->rs_base.rs_rd, MAIN_FORKNUM, blkno, RBM_NORMAL, scan->rs_base.rs_strategy); + LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); + return buf; +} + +bool UHeapIndexBuildNextPage(UHeapScanDesc scan) +{ + Buffer buf = UHeapIndexBuildNextBlock(scan); + if (!BufferIsValid(buf)) { + return false; + } + + Page page = BufferGetPage(buf); + + /* step 1: rollback all abort transactions */ + int numSlots = GetTDCount((UHeapPageHeaderData *)page); + TD *tdSlots = (TD *)PageGetTDPointer(page); + UndoRecPtr *urecptr = (UndoRecPtr *)palloc(numSlots * sizeof(UndoRecPtr)); + TransactionId *fxid = (TransactionId *)palloc(numSlots * sizeof(TransactionId)); + + int nAborted = 0; + for (int slotNo = 0; slotNo < numSlots; slotNo++) { + TransactionId xid = tdSlots[slotNo].xactid; + if (!TransactionIdIsValid(xid) || TransactionIdIsCurrentTransactionId(xid) || TransactionIdDidCommit(xid)) { + continue; /* xid visible in SnapshotNow */ + } + /* xid is abort, record and rollback later */ + urecptr[nAborted] = tdSlots[slotNo].undo_record_ptr; + fxid[nAborted] = xid; + nAborted++; + } + if (nAborted > 0) { + LockBuffer(buf, BUFFER_LOCK_UNLOCK); + for (int i = 0; i < nAborted; i++) { + ExecuteUndoActionsPage(urecptr[i], scan->rs_base.rs_rd, buf, fxid[i]); + } + LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); + } + pfree(urecptr); + pfree(fxid); + + /* step 2: try prune this page */ + TransactionId xid = GetCurrentTransactionId(); + UHeapPagePruneFSM(scan->rs_base.rs_rd, buf, xid, page, BufferGetBlockNumber(buf)); + + /* step 3: scan over all tuples and cache visible tuples */ + int ntup = 0; + OffsetNumber maxoff = UHeapPageGetMaxOffsetNumber(page); + for (OffsetNumber offnum = FirstOffsetNumber; offnum <= maxoff; offnum++) { + RowPtr *rp = UPageGetRowPtr(page, offnum); + if (!RowPtrIsNormal(rp)) { + continue; /* deleted or unused is not visible here */ + } + /* get the tuple without copy */ + UHeapTuple tuple = (UHeapTuple)uheaptup_alloc(UHeapTupleDataSize); + tuple->disk_tuple = (UHeapDiskTuple)UPageGetRowData(page, rp); + + uint16 infomask = tuple->disk_tuple->flag; + if ((infomask & (UHEAP_UPDATED | UHEAP_DELETED)) != 0) { + pfree(tuple); + continue; /* the last operation removed this tuple */ + } + /* set up other fields */ + tuple->table_oid = RelationGetRelid(scan->rs_base.rs_rd); + tuple->xc_node_id = u_sess->pgxc_cxt.PGXCNodeIdentifier; + tuple->disk_tuple_size = RowPtrGetLen(rp); + ItemPointerSet(&tuple->ctid, BufferGetBlockNumber(buf), offnum); + + /* save this tuple */ + scan->rs_visutuples[ntup++] = tuple; + } + scan->rs_base.rs_ntuples = ntup; + scan->rs_base.rs_cindex = 0; + + UnlockReleaseBuffer(buf); + return true; +} + +UHeapTuple UHeapIndexBuildGetNextTuple(UHeapScanDesc scan, TupleTableSlot *slot) +{ + if (scan->rs_base.rs_snapshot->satisfies != SNAPSHOT_NOW) { + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("We must use SnapshotNow to build a ustore index."))); + } + /* get the next page after all cached tuples of the current page are returned */ + while (scan->rs_base.rs_cblock == InvalidBlockNumber || scan->rs_base.rs_cindex >= scan->rs_base.rs_ntuples) { + if (!UHeapIndexBuildNextPage(scan)) { + ExecClearTuple(slot); + return NULL; + } + } + int lineindex = scan->rs_base.rs_cindex; + UHeapTuple tuple = scan->rs_visutuples[lineindex]; + scan->rs_base.rs_cindex++; /* now rs_cindex indicate the next tuple's index */ + scan->rs_cutup = tuple; + + ExecStoreTuple(tuple, slot, InvalidBuffer, false); + return tuple; +} + +/* + * Scan one page for batch scan mode. + * Return true if all the pages are exhausted. + */ +static void UHeapScanPagesForBatchMode(UHeapScanDesc scan, int lineIndex) +{ + int lines, rows = 0; + UHeapScanDesc uScan = (UHeapScanDesc)scan; + lines = scan->rs_base.rs_ntuples; + + while (lineIndex < lines) { + scan->rs_ctupBatch[rows++] = uScan->rs_visutuples[lineIndex]; + if (rows == scan->rs_base.rs_maxScanRows) { + break; + } + lineIndex++; + } + + scan->rs_base.rs_cindex = lineIndex; + scan->rs_base.rs_ctupRows = rows; +} + +bool UHeapGetTupPageBatchmode(UHeapScanDesc scan, ScanDirection dir) +{ + BlockNumber page; + + int lineIndex; + bool finished = false; + + scan->rs_base.rs_ctupRows = 0; + + /* IO collector and IO scheduler for seqsan */ + if (ENABLE_WORKLOAD_CONTROL) { + IOSchedulerAndUpdate(IO_TYPE_READ, 1, IO_TYPE_ROW); + } + + /* calculate next starting lineindex, given scan direction */ + if (!scan->rs_base.rs_inited) { + /* return null immediately if relation is empty */ + if (scan->rs_base.rs_nblocks == 0) { + Assert(!BufferIsValid(scan->rs_base.rs_cbuf)); + scan->rs_base.rs_ctupRows = 0; + return true; + } + page = scan->rs_base.rs_startblock; + UHeapGetPage((TableScanDesc)scan, page); + lineIndex = 0; + scan->rs_base.rs_inited = true; + } else { + /* continue from previously returned page/tuple */ + page = scan->rs_base.rs_cblock; + lineIndex = scan->rs_base.rs_cindex + 1; + } + + for (;;) { + if (lineIndex < scan->rs_base.rs_ntuples) { + UHeapScanPagesForBatchMode(scan, lineIndex); + break; + } + + finished = NextUpage(scan, dir, page); + /* return NULL if we've exhausted all the pages */ + if (finished) { + scan->rs_base.rs_cbuf = InvalidBuffer; + scan->rs_base.rs_cblock = InvalidBlockNumber; + scan->rs_base.rs_inited = false; + scan->rs_base.rs_ctupRows = 0; + return true; + } else { + if (unlikely(!UHeapGetPage((TableScanDesc)scan, page))) { + continue; + } + lineIndex = 0; } } - return finished; + return false; } static void SkipToNewUPage(UHeapScanDesc scan, ScanDirection dir, BlockNumber page, diff --git a/src/gausskernel/storage/access/ustore/knl_utuple.cpp b/src/gausskernel/storage/access/ustore/knl_utuple.cpp index 09f4f8306..f872b0e89 100644 --- a/src/gausskernel/storage/access/ustore/knl_utuple.cpp +++ b/src/gausskernel/storage/access/ustore/knl_utuple.cpp @@ -118,7 +118,7 @@ void UHeapFillDiskTuple(TupleDesc tupleDesc, Datum *values, const bool *isnull, int numberOfAttributes = tupleDesc->natts; Assert(NAttrsReserveSpace(numberOfAttributes) == enableReverseBitmap); - Assert(!enableReserve || (enableReserve && enableReverseBitmap)); + Assert(!enableReserve || enableReverseBitmap); Assert(enableReverseBitmap || (enableReverseBitmap == false && enableReserve == false)); Form_pg_attribute *att = tupleDesc->attrs; @@ -242,6 +242,11 @@ void UHeapFillDiskTuple(TupleDesc tupleDesc, Datum *values, const bool *isnull, Assert((size_t)(data - begin) == dataSize); } +template void UHeapFillDiskTuple(TupleDesc tupleDesc, Datum *values, const bool *isnull, + UHeapDiskTupleData *diskTuple, uint32 dataSize, bool enableReverseBitmap, bool enableReserve); +template void UHeapFillDiskTuple(TupleDesc tupleDesc, Datum *values, const bool *isnull, + UHeapDiskTupleData *diskTuple, uint32 dataSize, bool enableReverseBitmap, bool enableReserve); + /* * Copy a varlena column from source disk tuple (val) to destination (data) * Param names match UHeapCopyDiskTupleNoNull and UHeapCopyDiskTupleWithNulls @@ -1025,13 +1030,14 @@ Bitmapset *UHeapTupleAttrEquals(TupleDesc tupdesc, Bitmapset *att_list, UHeapTup */ UHeapTuple UHeapCopyTuple(UHeapTuple uhtup) { - Assert(uhtup->tupTableType == UHEAP_TUPLE); - UHeapTuple newTuple; errno_t rc = EOK; - if (!UHeapTupleIsValid(uhtup) || uhtup->disk_tuple == NULL) + if (!UHeapTupleIsValid(uhtup) || uhtup->disk_tuple == NULL) { return NULL; + } + + Assert(uhtup->tupTableType == UHEAP_TUPLE); newTuple = (UHeapTuple)uheaptup_alloc(UHeapTupleDataSize + uhtup->disk_tuple_size); newTuple->ctid = uhtup->ctid; @@ -1373,6 +1379,91 @@ void UHeapSlotGetSomeAttrs(TupleTableSlot *slot, int attnum) attnum); } +void UHeapSlotFormBatch(TupleTableSlot* slot, VectorBatch* batch, int cur_rows, int attnum) +{ + Assert(slot->tts_tupslotTableAm == TAM_USTORE); + + /* Quick out if we have all already */ + if (slot->tts_nvalid >= attnum) { + return; + } + + UHeapTuple utuple = (UHeapTuple)slot->tts_tuple; + TupleDesc rowDesc = slot->tts_tupleDescriptor; + bool isNull = slot->tts_isnull; + UHeapDiskTuple diskTuple = utuple->disk_tuple; + bool hasNull = UHeapDiskTupHasNulls(diskTuple); + Form_pg_attribute* att = rowDesc->attrs; + int attno; + bits8* bp = diskTuple->data; + long off = diskTuple->t_hoff; + char* tupPtr = (char*)diskTuple; + int nullcount = 0; + int tupleAttrs = UHeapTupleHeaderGetNatts(diskTuple); + bool enableReverseBitmap = NAttrsReserveSpace(tupleAttrs); + + WHITEBOX_TEST_STUB(UHEAP_DEFORM_TUPLE_FAILED, WhiteboxDefaultErrorEmit); + + int natts = Min(tupleAttrs, attnum); + for (attno = 0; attno < natts; attno++) { + Form_pg_attribute thisatt = att[attno]; + ScalarVector* pVector = &batch->m_arr[attno]; + + if (hasNull && att_isnull(attno, bp)) { + /* Skip attribute length in case the tuple was stored with + space reserved for null attributes */ + if (enableReverseBitmap && !att_isnull(tupleAttrs + nullcount, bp)) { + off += thisatt->attlen; + } + + nullcount++; + + pVector->m_vals[cur_rows] = (Datum)0; + SET_NULL(pVector->m_flag[cur_rows]); + /* stole the flag for perf */ + pVector->m_const = true; + continue; + } + + SET_NOTNULL(pVector->m_flag[cur_rows]); + + /* + * If this is a varlena, there might be alignment padding, if it has a + * 4-byte header. Otherwise, there will only be padding if it's not + * pass-by-value. + */ + if (thisatt->attlen == -1) { + off = att_align_pointer(off, thisatt->attalign, -1, tupPtr + off); + } else if (!thisatt->attbyval) { + off = att_align_nominal(off, thisatt->attalign); + } + + pVector->m_vals[cur_rows] = fetchatt(thisatt, tupPtr + off); + + off = att_addlength_pointer(off, thisatt->attlen, tupPtr + off); + } + + /* + * If tuple doesn't have all the atts indicated by tupleDesc, read the + * rest as null + */ + for (; attno < attnum; attno++) { + ScalarVector* pVector = &batch->m_arr[attno]; + /* get init default value from tupleDesc. + * The original Code is: + * example code: values[attnum] = (Datum) 0; + * example code: isNulls[attnum] = true; + */ + pVector->m_vals[cur_rows] = heapGetInitDefVal(attno + 1, rowDesc, &isNull); + if (isNull) { + SET_NULL(pVector->m_flag[cur_rows]); + pVector->m_const = true; + } else { + SET_NOTNULL(pVector->m_flag[cur_rows]); + } + } +} + /* * UHeapSlotAttIsNull * Detect whether an attribute of the slot is null, without @@ -1699,7 +1790,7 @@ void UHeapSlotStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool sh * @param slot: slot inwhich tuple needs to be stored. * @param should_free: whether slot assumes responsibility of freeing up the tuple. */ -void UHeapSlotStoreUHeapTuple(UHeapTuple utuple, TupleTableSlot *slot, bool shouldFree) +void UHeapSlotStoreUHeapTuple(UHeapTuple utuple, TupleTableSlot *slot, bool shouldFree, bool batchMode) { /* * sanity checks diff --git a/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp b/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp index f854cae33..21af41866 100644 --- a/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp +++ b/src/gausskernel/storage/access/ustore/knl_utuptoaster.cpp @@ -193,7 +193,7 @@ UHeapTuple UHeapToastInsertOrUpdate(Relation relation, UHeapTuple newtup, UHeapT * we have to delete it later. */ if (att->attlen == -1 && !toastOldIsNull[i] && VARATT_IS_EXTERNAL_ONDISK(oldValue)) { - if (toastIsNull[i] || !VARATT_IS_EXTERNAL_ONDISK(newValue) || + if (toastIsNull[i] || !VARATT_IS_EXTERNAL_ONDISK(newValue) || RelationIsLogicallyLogged(relation) || memcmp((char *)oldValue, (char *)newValue, VARSIZE_EXTERNAL(oldValue)) != 0) { /* * The old external stored value isn't needed any more @@ -228,6 +228,10 @@ UHeapTuple UHeapToastInsertOrUpdate(Relation relation, UHeapTuple newtup, UHeapT * Now look at varlena attributes */ if (att->attlen == -1) { + if (VARATT_IS_HUGE_TOAST_POINTER(newValue)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Un-support clob/blob type more than 1GB of Ustore"))); + } /* * If the table's attribute says PLAIN always, force it so. */ @@ -806,7 +810,7 @@ static Datum UHeapToastSaveDatum(Relation rel, Datum value, struct varlena *olde securec_check(rc, "", ""); toasttup = UHeapFormTuple(toastTupDesc, tValues, tIsnull); - (void)UHeapInsert(toastrel, toasttup, mycid, NULL); + (void)UHeapInsert(toastrel, toasttup, mycid, NULL, true); /* * Create the index entry. We cheat a little here by not using @@ -1065,7 +1069,7 @@ struct varlena *UHeapInternalToastFetchDatum(struct varatt_external toastPointer } struct varlena *UHeapInternalToastFetchDatumSlice(struct varatt_external toastPointer, Relation toastrel, - Relation toastidx, int32 sliceoffset, int32 length) + Relation toastidx, int64 sliceoffset, int32 length) { int32 attrsize; int32 residx; diff --git a/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp b/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp index d8c3c908c..c00076042 100644 --- a/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uundorecord.cpp @@ -425,19 +425,11 @@ bool UndoRecord::ReadUndoRecord(_in_ Page page, _in_ int startingByte, __inout i return true; } -static bool LoadUndoRecord(UndoRecord *urec) +static UndoRecordState LoadUndoRecord(UndoRecord *urec) { - if (!IS_VALID_UNDO_REC_PTR(urec->Urp())) { -#ifdef DEBUG_UHEAP - UHEAPSTAT_COUNT_UNDO_CHAIN_VISITED_MISS(); -#endif - return false; - } - if (!undo::CheckUndoRecordValid(urec->Urp(), true)) { -#ifdef DEBUG_UHEAP - UHEAPSTAT_COUNT_UNDO_CHAIN_VISITED_MISS(); -#endif - return false; + UndoRecordState state = undo::CheckUndoRecordValid(urec->Urp(), true); + if (state != UNDO_RECORD_NORMAL) { + return state; } int saveInterruptHoldoffCount = t_thrd.int_cxt.InterruptHoldoffCount; @@ -448,35 +440,34 @@ static bool LoadUndoRecord(UndoRecord *urec) } PG_CATCH(); { - if ((!t_thrd.undo_cxt.fetchRecord) && (!undo::CheckUndoRecordValid(urec->Urp(), true))) { + state = undo::CheckUndoRecordValid(urec->Urp(), true); + if ((!t_thrd.undo_cxt.fetchRecord) && (state == UNDO_RECORD_DISCARD || + state == UNDO_RECORD_FORCE_DISCARD)) { t_thrd.int_cxt.InterruptHoldoffCount = saveInterruptHoldoffCount; if (BufferIsValid(urec->Buff())) { ReleaseBuffer(urec->Buff()); urec->SetBuff(InvalidBuffer); } - ereport(LOG, (errmsg(UNDOFORMAT("fetch record fail, curr undo: %lu"), urec->Urp()))); - return false; + return state; } else { PG_RE_THROW(); } } PG_END_TRY(); t_thrd.undo_cxt.fetchRecord = false; - return true; + return UNDO_RECORD_NORMAL; } -int FetchUndoRecord(__inout UndoRecord *urec, _in_ SatisfyUndoRecordCallback callback, - _in_ BlockNumber blkno, _in_ OffsetNumber offset, _in_ TransactionId xid) +UndoTraversalState FetchUndoRecord(__inout UndoRecord *urec, _in_ SatisfyUndoRecordCallback callback, + _in_ BlockNumber blkno, _in_ OffsetNumber offset, _in_ TransactionId xid, bool isNeedBypass) { -#ifdef DEBUG_UHEAP int64 undo_chain_len = 0; /* len of undo chain for one tuple */ -#endif Assert(urec); if (RecoveryInProgress()) { uint64 blockcnt = 0; - while (undo::CheckUndoRecordRecoveryStatus(urec->Urp()) == UNDO_NOT_RECOVERY) { + while (undo::CheckUndoRecordValid(urec->Urp(), false) == UNDO_RECORD_NOT_INSERT) { ereport(LOG, (errmsg(UNDOFORMAT("urp: %ld is not replayed yet. ROS waiting for UndoRecord replay."), urec->Urp()))); @@ -486,19 +477,25 @@ int FetchUndoRecord(__inout UndoRecord *urec, _in_ SatisfyUndoRecordCallback cal CHECK_FOR_INTERRUPTS(); } } - if (undo::CheckUndoRecordRecoveryStatus(urec->Urp()) == UNDO_DISCARD) { - return UNDO_RET_FAIL; + if (undo::CheckUndoRecordValid(urec->Urp(), false) == UNDO_RECORD_DISCARD) { + return UNDO_TRAVERSAL_END; } } do { - if (!LoadUndoRecord(urec)) { - return UNDO_RET_FAIL; + UndoRecordState state = LoadUndoRecord(urec); + if (state == UNDO_RECORD_INVALID || state == UNDO_RECORD_DISCARD) { + return UNDO_TRAVERSAL_END; + } else if (state == UNDO_RECORD_FORCE_DISCARD) { + return UNDO_TRAVERSAL_ABORT; + } + + if (isNeedBypass && TransactionIdPrecedes(urec->Xid(), g_instance.undo_cxt.oldestFrozenXid)) { + ereport(DEBUG1, (errmsg(UNDOFORMAT("Check visibility by oldestFrozenXid")))); + return UNDO_TRAVERSAL_STOP; } -#ifdef DEBUG_UHEAP ++undo_chain_len; -#endif if (blkno == InvalidBlockNumber) { break; @@ -516,7 +513,11 @@ int FetchUndoRecord(__inout UndoRecord *urec, _in_ SatisfyUndoRecordCallback cal #ifdef DEBUG_UHEAP UHEAPSTAT_COUNT_UNDO_CHAIN_VISTIED(undo_chain_len) #endif - return UNDO_RET_SUCC; + g_instance.undo_cxt.undoChainTotalSize += undo_chain_len; + g_instance.undo_cxt.undo_chain_visited_count += 1; + g_instance.undo_cxt.maxChainSize = + g_instance.undo_cxt.maxChainSize > undo_chain_len ? g_instance.undo_cxt.maxChainSize : undo_chain_len; + return UNDO_TRAVERSAL_COMPLETE; } bool InplaceSatisfyUndoRecord(_in_ UndoRecord *urec, _in_ BlockNumber blkno, _in_ OffsetNumber offset, diff --git a/src/gausskernel/storage/access/ustore/knl_uundovec.cpp b/src/gausskernel/storage/access/ustore/knl_uundovec.cpp index 7ed69e450..8d72ae7b5 100644 --- a/src/gausskernel/storage/access/ustore/knl_uundovec.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uundovec.cpp @@ -362,7 +362,8 @@ static bool LoadUndoRecordRange(UndoRecord *urec, Buffer *buffer) * discard worker from discarding undo data while we are reading it. * On the other word, we need copy data to avoid discarded. */ - if (!undo::CheckUndoRecordValid(urec->Urp(), true)) { + UndoRecordState state = undo::CheckUndoRecordValid(urec->Urp(), false); + if (state != UNDO_RECORD_NORMAL) { return false; } @@ -380,8 +381,8 @@ static bool LoadUndoRecordRange(UndoRecord *urec, Buffer *buffer) ReleaseBuffer(urec->Buff()); urec->SetBuff(InvalidBuffer); } - if ((!t_thrd.undo_cxt.fetchRecord) && (!undo::CheckUndoRecordValid(urec->Urp(), true))) { - ereport(LOG, (errmsg(UNDOFORMAT("fetch range record fail, curr undo: %lu"), urec->Urp()))); + state = undo::CheckUndoRecordValid(urec->Urp(), false); + if ((!t_thrd.undo_cxt.fetchRecord) && state == UNDO_RECORD_DISCARD) { return false; } else { PG_RE_THROW(); @@ -532,9 +533,9 @@ int PrepareUndoRecord(_in_ URecVector *urecvec, _in_ UndoPersistence upersistenc urec->SetUinfo(UNDO_UREC_INFO_TRANSAC); } } else { - if (g_instance.attr.attr_storage.undo_zone_count == 0) { - ereport(ERROR, (errmsg(UNDOFORMAT("Invalid undo zone count, max count is %d, now is %d"), - g_instance.attr.attr_storage.undo_zone_count, g_instance.undo_cxt.uZoneCount))); + if (!g_instance.attr.attr_storage.enable_ustore) { + ereport(ERROR, (errmsg(UNDOFORMAT("Ustore is disabled, " + "please set GUC enable_ustore=on and restart database.")))); } needSwitch = undo::CheckNeedSwitch(upersistence, totalSize); if (needSwitch) { diff --git a/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp b/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp index 4008aa88b..a607fa53a 100644 --- a/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uvacuumlazy.cpp @@ -72,7 +72,7 @@ static int LazyVacuumUPage(Relation onerel, BlockNumber blkno, Buffer buffer, in TransactionId oldestXmin); static void LazySpaceUalloc(LVRelStats *vacrelstats, BlockNumber relblocks, Relation onerel); static IndexBulkDeleteResult **LazyScanUHeap(Relation onerel, LVRelStats *vacrelstats, Relation *Irel, - Relation *IPartRel, Partition *IPart, int nindexes, TransactionId oldestXmin); + int nindexes, TransactionId oldestXmin); static void LazyVacuumUHeap(Relation onerel, LVRelStats *vacrelstats, TransactionId oldestXmin); /* @@ -195,7 +195,7 @@ static void LazyVacuumUHeap(Relation onerel, LVRelStats *vacrelstats, Transactio * writing any undo; */ static IndexBulkDeleteResult **LazyScanUHeap(Relation onerel, LVRelStats *vacrelstats, Relation *Irel, - Relation *IPartRel, Partition *IPart, int nindexes, TransactionId oldestXmin) + int nindexes, TransactionId oldestXmin) { UHeapTupleData tuple; BlockNumber emptyPages; @@ -239,7 +239,7 @@ static IndexBulkDeleteResult **LazyScanUHeap(Relation onerel, LVRelStats *vacrel for (blkno = scanStartingBlock; blkno < nblocks; blkno++) { Buffer buf; Page page; - TransactionId xid; + TransactionId xid = InvalidTransactionId; OffsetNumber offnum; OffsetNumber maxoff; Size freespace; @@ -248,10 +248,11 @@ static IndexBulkDeleteResult **LazyScanUHeap(Relation onerel, LVRelStats *vacrel bool has_dead_tuples; /* IO collector and IO scheduler for vacuum */ +#ifdef ENABLE_MULTIPLE_NODES if (ENABLE_WORKLOAD_CONTROL) { IOSchedulerAndUpdate(IO_TYPE_READ, 1, IO_TYPE_ROW); } - +#endif vacuum_delay_point(); /* @@ -318,7 +319,7 @@ static IndexBulkDeleteResult **LazyScanUHeap(Relation onerel, LVRelStats *vacrel continue; } - if (UPageIsEmpty((UHeapPageHeaderData *)page, RelationGetInitTd(onerel))) { + if (UPageIsEmpty((UHeapPageHeaderData *)page)) { emptyPages++; freespace = PageGetUHeapFreeSpace(page); UnlockReleaseBuffer(buf); @@ -484,10 +485,11 @@ static IndexBulkDeleteResult **LazyScanUHeap(Relation onerel, LVRelStats *vacrel /* Do post-vacuum cleanup and statistics update for each index */ for (i = 0; i < nindexes; i++) { /* IO collector and IO scheduler for vacuum */ +#ifdef ENABLE_MULTIPLE_NODES if (ENABLE_WORKLOAD_CONTROL) { IOSchedulerAndUpdate(IO_TYPE_WRITE, 1, IO_TYPE_ROW); } - +#endif indstats[i] = lazy_cleanup_index(Irel[i], indstats[i], vacrelstats, vac_strategy); } @@ -511,6 +513,36 @@ static IndexBulkDeleteResult **LazyScanUHeap(Relation onerel, LVRelStats *vacrel return indstats; } +static void LazyScanURel(Relation onerel, LVRelStats *vacrelstats, VacuumStmt *vacstmt, Relation *irel, + int nindexes, TransactionId oldestXmin) +{ + IndexBulkDeleteResult **indstats = NULL; + int idx; + + if (nindexes > 0) { + vacrelstats->new_idx_pages = (BlockNumber*)palloc0(nindexes * sizeof(BlockNumber)); + vacrelstats->new_idx_tuples = (double*)palloc0(nindexes * sizeof(double)); + vacrelstats->idx_estimated = (bool*)palloc0(nindexes * sizeof(bool)); + } + + indstats = LazyScanUHeap(onerel, vacrelstats, irel, nindexes, oldestXmin); + + /* Vacuum the Free Space Map */ + FreeSpaceMapVacuum(onerel); + + for (idx = 0; idx < nindexes; idx++) { + /* summarize the index status information */ + if (indstats[idx] != NULL) { + vacrelstats->new_idx_pages[idx] = indstats[idx]->num_pages; + vacrelstats->new_idx_tuples[idx] = indstats[idx]->num_index_tuples; + vacrelstats->idx_estimated[idx] = indstats[idx]->estimated_count; + pfree_ext(indstats[idx]); + } + } + + pfree_ext(indstats); +} + /* * lazy_vacuum_uheap_rel() -- perform LAZY VACUUM for one uheap relation */ @@ -553,8 +585,18 @@ void LazyVacuumUHeapRel(Relation onerel, VacuumStmt *vacstmt, BufferAccessStrate * because like other backends operating on uheap, lazy vacuum also * reserves a transaction slot in the page for pruning purpose. */ - TransactionId oldestXmin = pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo); - Assert(TransactionIdIsNormal(oldestXmin)); + TransactionId oldestXmin = pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo); + if (!TransactionIdIsNormal(oldestXmin)) { + ereport(WARNING, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmodule(MOD_VACUUM), + errmsg("oldestXidInUndo(%lu) is abnormal.", oldestXmin), + errdetail("N/A"), + errcause("There is a high probability that the DML operation " + "has not been performed on any ustore table. "), + erraction("Check the value of oldestXidInUndo in undo_cxt."))); + return; + } vacrelstats = (LVRelStats *)palloc0(sizeof(LVRelStats)); @@ -581,30 +623,7 @@ void LazyVacuumUHeapRel(Relation onerel, VacuumStmt *vacstmt, BufferAccessStrate /* Always enable index vacuuming when the relation has an index. */ vacrelstats->hasindex = (nindexes > 0); - /* Do the vacuuming */ - IndexBulkDeleteResult **indstats = NULL; - - if (RelationIsPartition(onerel)) { - indstats = LazyScanUHeap(onerel, vacrelstats, irel, indexrel, indexpart, nindexes, oldestXmin); - } else { - indstats = LazyScanUHeap(onerel, vacrelstats, irel, NULL, NULL, nindexes, oldestXmin); - } - - /* Done with indexes */ - if (RelationIsPartition(onerel)) { - vac_close_part_indexes(nindexes, nindexesGlobal, irel, indexrel, indexpart, NoLock); - } else { - vac_close_indexes(nindexes, irel, NoLock); - } - - FreeSpaceMapVacuum(onerel); - - for (int idx = 0; idx < nindexes; idx++) { - /* summarize the index status information */ - if (indstats[idx] != NULL) { - pfree_ext(indstats[idx]); - } - } + LazyScanURel(onerel, vacrelstats, vacstmt, irel, nindexes, oldestXmin); /* * Update statistics in pg_class. @@ -651,20 +670,90 @@ void LazyVacuumUHeapRel(Relation onerel, VacuumStmt *vacstmt, BufferAccessStrate * will lead to misbehave when update other index usable partitions ---the horrible * misdguge as hot update even if update indexes columns. */ - vac_update_pgclass_partitioned_table(vacstmt->onepartrel, vacstmt->onepartrel->rd_rel->relhasindex, - newFrozenXid, InvalidMultiXactId); + if (!vacstmt->issubpartition) { + vac_update_pgclass_partitioned_table(vacstmt->onepartrel, vacstmt->onepartrel->rd_rel->relhasindex, + newFrozenXid, InvalidMultiXactId); + } else { + vac_update_pgclass_partitioned_table(vacstmt->parentpartrel, vacstmt->parentpartrel->rd_rel->relhasindex, + newFrozenXid, InvalidMultiXactId); + } + + /* update stats of local partition indexes */ + for (int idx = 0; idx < nindexes - nindexesGlobal; idx++) { + if (vacrelstats->idx_estimated[idx]) { + continue; + } + + vac_update_partstats(indexpart[idx], + vacrelstats->new_idx_pages[idx], + vacrelstats->new_idx_tuples[idx], + 0, + InvalidTransactionId, + InvalidMultiXactId); + + vac_update_pgclass_partitioned_table(indexrel[idx], false, InvalidTransactionId, InvalidMultiXactId); + } + + /* update stats of global partition indexes */ + Assert((nindexes - nindexesGlobal) >= 0); + Relation classRel = heap_open(RelationRelationId, RowExclusiveLock); + for (int idx = nindexes - nindexesGlobal; idx < nindexes; idx++) { + if (vacrelstats->idx_estimated[idx]) { + continue; + } + + vac_update_relstats(irel[idx], + classRel, + vacrelstats->new_idx_pages[idx], + vacrelstats->new_idx_tuples[idx], + 0, + false, + InvalidTransactionId, + InvalidMultiXactId); + } + heap_close(classRel, RowExclusiveLock); } else { Relation classRel = heap_open(RelationRelationId, RowExclusiveLock); vac_update_relstats(onerel, classRel, newRelPages, newRelTuples, newRelAllvisible, nindexes > 0, newFrozenXid, InvalidMultiXactId); + + for (int idx = 0; idx < nindexes; idx++) { + /* update index status */ + if (vacrelstats->idx_estimated[idx]) { + continue; + } + + vac_update_relstats(irel[idx], + classRel, + vacrelstats->new_idx_pages[idx], + vacrelstats->new_idx_tuples[idx], + 0, + false, + InvalidTransactionId, + InvalidMultiXactId); + } heap_close(classRel, RowExclusiveLock); } + /* Done with indexes */ + if (RelationIsPartition(onerel)) { + vac_close_part_indexes(nindexes, nindexesGlobal, irel, indexrel, indexpart, NoLock); + } else { + vac_close_indexes(nindexes, irel, NoLock); + } + /* report results to the stats collector, too */ newLiveTuples = newRelTuples - vacrelstats->new_dead_tuples; if (newLiveTuples < 0) { newLiveTuples = 0; /* just in case */ } + + if (nindexes > 0) { + pfree_ext(vacrelstats->new_idx_pages); + pfree_ext(vacrelstats->new_idx_tuples); + pfree_ext(vacrelstats->idx_estimated); + } + pgstat_report_vacuum(RelationGetRelid(onerel), onerel->parentId, onerel->rd_rel->relisshared, vacrelstats->tuples_deleted); diff --git a/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp b/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp index 5a860d35a..674ae040a 100644 --- a/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp +++ b/src/gausskernel/storage/access/ustore/knl_uvisibility.cpp @@ -30,22 +30,23 @@ #include "access/ustore/knl_utils.h" #include "access/ustore/knl_uvisibility.h" #include "access/ustore/undo/knl_uundoapi.h" +#include "access/ustore/knl_whitebox_test.h" static UTupleTidOp UHeapTidOpFromInfomask(uint16 infomask); -static UVersionSelector UHeapTupleSatisfies(UTupleTidOp op, bool locked_only, Snapshot snapshot, +static UVersionSelector UHeapTupleSatisfies(UTupleTidOp op, Snapshot snapshot, UHeapTupleTransInfo *uinfo, int *snapshot_requests); static UVersionSelector UHeapSelectVersionMVCC(UTupleTidOp op, TransactionId xid, Snapshot snapshot); static UVersionSelector UHeapSelectVersionNow(UTupleTidOp op, TransactionId xid); static UVersionSelector UHeapCheckCID(UTupleTidOp op, CommandId tuple_cid, CommandId visibility_cid); static UVersionSelector UHeapSelectVersionSelf(UTupleTidOp op, TransactionId xid); -static UVersionSelector UHeapSelectVersionDirty(UTupleTidOp op, bool lockedOnly, const UHeapTupleTransInfo *uinfo, +static UVersionSelector UHeapSelectVersionDirty(UTupleTidOp op, const UHeapTupleTransInfo *uinfo, Snapshot snapshot, int *snapshotRequests); static UVersionSelector UHeapSelectVersionUpdate(UTupleTidOp op, TransactionId xid, CommandId visibility_cid); static bool GetTupleFromUndoRecord(UndoRecPtr urecPtr, TransactionId xid, Buffer buffer, OffsetNumber offnum, UHeapDiskTuple hdr, UHeapTuple *tuple, bool *freeTuple, UHeapTupleTransInfo *uinfo, ItemPointer ctid); static bool GetTupleFromUndo(UndoRecPtr urecAdd, UHeapTuple currentTuple, UHeapTuple *visibleTuple, Snapshot snapshot, - CommandId curcid, Buffer buffer, OffsetNumber offnum, ItemPointer ctid, int tdSlot); + CommandId curcid, Buffer buffer, OffsetNumber offnum, ItemPointer ctid, int tdSlot, bool isFlashBack = false); static void UHeapPageGetNewCtid(Buffer buffer, ItemPointer ctid, UHeapTupleTransInfo *xactinfo); @@ -66,9 +67,9 @@ static UHeapTupleStatus UHeapTupleGetStatus(const UHeapTuple utup) if (UHeapTupleHasMultiLockers(infomask)) { return UHEAPTUPLESTATUS_MULTI_LOCKED; } else if ((SINGLE_LOCKER_XID_IS_EXCL_LOCKED(infomask) || SINGLE_LOCKER_XID_IS_SHR_LOCKED(infomask)) && - TransactionIdIsNormal(locker) && !TransactionIdOlderThanAllUndo(locker)) { + TransactionIdIsNormal(locker) && !TransactionIdOlderThanFrozenXid(locker)) { return UHEAPTUPLESTATUS_LOCKED; // locked by select-for-update or select-for-share - } else if ((infomask & UHEAP_INPLACE_UPDATED) || (infomask & UHEAP_XID_LOCK_ONLY)) { + } else if (infomask & UHEAP_INPLACE_UPDATED) { return UHEAPTUPLESTATUS_INPLACE_UPDATED; // modified or locked by lock-for-update } else if ((infomask & (UHEAP_UPDATED | UHEAP_DELETED)) != 0) { return UHEAPTUPLESTATUS_DELETED; @@ -76,6 +77,20 @@ static UHeapTupleStatus UHeapTupleGetStatus(const UHeapTuple utup) return UHEAPTUPLESTATUS_INSERTED; } + +static TransactionId UDiskTupleGetModifiedXid(UHeapDiskTuple diskTup, Page page, bool *isTupXid) +{ + *isTupXid = false; + if (TransactionIdIsNormal(diskTup->xid) && !UHeapTupleHasMultiLockers(diskTup->flag)) { + UHeapPageHeaderData *upage = (UHeapPageHeaderData *)page; + if (!UHEAP_XID_IS_LOCK(diskTup->flag)) { + *isTupXid = true; + } + return ShortTransactionIdToNormal(upage->pd_xid_base, diskTup->xid); + } + return InvalidTransactionId; +} + /* * UHeapTupleSatisfies * @@ -87,7 +102,7 @@ static UHeapTupleStatus UHeapTupleGetStatus(const UHeapTuple utup) * This function can only handle certain types of snapshots; it is a helper * function for UHeapTupleFetch, not a general-purpose facility. */ -static UVersionSelector UHeapTupleSatisfies(UTupleTidOp op, bool locked_only, Snapshot snapshot, +static UVersionSelector UHeapTupleSatisfies(UTupleTidOp op, Snapshot snapshot, UHeapTupleTransInfo *uinfo, int *snapshot_requests) { UVersionSelector selector = UVERSION_NONE; @@ -113,7 +128,7 @@ static UVersionSelector UHeapTupleSatisfies(UTupleTidOp op, bool locked_only, Sn } else if (snapshot->satisfies == SNAPSHOT_SELF) { selector = UHeapSelectVersionSelf(op, uinfo->xid); } else if (snapshot->satisfies == SNAPSHOT_DIRTY) { - selector = UHeapSelectVersionDirty(op, locked_only, uinfo, snapshot, snapshot_requests); + selector = UHeapSelectVersionDirty(op, uinfo, snapshot, snapshot_requests); } else if (snapshot->satisfies == SNAPSHOT_TOAST) { /* * We can only get here when a toast tuple is pruned and rp is marked as deleted, @@ -127,6 +142,19 @@ static UVersionSelector UHeapTupleSatisfies(UTupleTidOp op, bool locked_only, Sn return selector; } +static TransactionId UHeapTupleGetModifiedXid(UHeapTuple tup, bool *isTupXid) +{ + *isTupXid = false; + if (TransactionIdIsNormal(tup->disk_tuple->xid) && !UHeapTupleHasMultiLockers(tup->disk_tuple->flag)) { + if (!UHEAP_XID_IS_LOCK(tup->disk_tuple->flag)) { + *isTupXid = true; + } + return ShortTransactionIdToNormal(tup->t_xid_base, tup->disk_tuple->xid); + } + + return InvalidTransactionId; +} + /* * UHeapTupleSatisfiesVisibility * True iff uheap tuple satisfies a time qual. @@ -153,13 +181,16 @@ bool UHeapTupleSatisfiesVisibility(UHeapTuple uhtup, Snapshot snapshot, Buffer b UTupleTidOp op; TransactionId fxid = GetTopTransactionIdIfAny(); TransactionId lockerXid = InvalidTransactionId; + TransactionId tupXid = InvalidTransactionId; Page dp = BufferGetPage(buffer); bool tupleIsExclusivelyLocked = false; UHeapTupleTransInfo tdinfo; int tdSlot; - bool lockedOnly = false; bool isInvalidSlot = false; bool haveTransInfo = false; + bool isTupXid = false; + bool isFlashBack = IsVersionMVCCSnapshot(snapshot) ? true : false; + UndoTraversalState state = UNDO_TRAVERSAL_DEFAULT; OffsetNumber offnum = ItemPointerGetOffsetNumber(&uhtup->ctid); RowPtr *rp = UPageGetRowPtr(dp, offnum); /* @@ -175,6 +206,8 @@ bool UHeapTupleSatisfiesVisibility(UHeapTuple uhtup, Snapshot snapshot, Buffer b if (tdSlot > UHEAP_MAX_TD || tdSlot < 0) { ereport(PANIC, (errmsg("Normal rowptr, td_info abnormal, is %d", tdSlot))); } + UHeapTupleCopyBaseFromPage(utuple, dp); + tupXid = UHeapTupleGetModifiedXid(utuple, &isTupXid); isInvalidSlot = UHeapTupleHasInvalidXact(utuple->disk_tuple->flag); } else if (RowPtrIsDeleted(rp)) { utuple = NULL; @@ -195,15 +228,15 @@ bool UHeapTupleSatisfiesVisibility(UHeapTuple uhtup, Snapshot snapshot, Buffer b /* Get the current TD information on the current page */ GetTDSlotInfo(buffer, tdSlot, &tdinfo); if (tdinfo.td_slot != UHEAPTUP_SLOT_FROZEN) { - if (TransactionIdIsNormal(fxid) && IsMVCCSnapshot(snapshot) && + if (utuple != NULL && TransactionIdIsNormal(fxid) && IsMVCCSnapshot(snapshot) && SINGLE_LOCKER_XID_IS_EXCL_LOCKED(utuple->disk_tuple->flag)) { Assert(UHEAP_XID_IS_EXCL_LOCKED(utuple->disk_tuple->flag)); - UHeapTupleCopyBaseFromPage(utuple, dp); lockerXid = UHeapTupleGetRawXid(utuple); tupleIsExclusivelyLocked = true; } - uint64 oldestXidHavingUndo = pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo); - if (TransactionIdIsValid(tdinfo.xid) && TransactionIdPrecedes(tdinfo.xid, oldestXidHavingUndo)) { + uint64 oldestFrozenXid = isFlashBack ? pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo) : + pg_atomic_read_u64(&g_instance.undo_cxt.oldestFrozenXid); + if (TransactionIdIsValid(tdinfo.xid) && TransactionIdPrecedes(tdinfo.xid, oldestFrozenXid)) { /* The slot is old enough that we can treat it as frozen. */ tdinfo.td_slot = UHEAPTUP_SLOT_FROZEN; } else if (tupleIsExclusivelyLocked && TransactionIdEquals(fxid, lockerXid)) { @@ -211,38 +244,55 @@ bool UHeapTupleSatisfiesVisibility(UHeapTuple uhtup, Snapshot snapshot, Buffer b tdinfo.td_slot = UHEAPTUP_SLOT_FROZEN; } else if (isInvalidSlot) { TransactionIdStatus hintstatus; + /* * The slot has been reused, but we can still skip reading the * undo if the XID we got from the transaction slot is visible to * our snapshot. The real XID has to have committed before that * one, so it will be visible to our snapshot as well. */ - if (TransactionIdIsValid(tdinfo.xid) && IsMVCCSnapshot(snapshot) && - XidVisibleInSnapshot(tdinfo.xid, snapshot, &hintstatus, InvalidBuffer, NULL)) { + if (IsMVCCSnapshot(snapshot) && TransactionIdIsValid(tdinfo.xid) && + UHeapXidVisibleInSnapshot(tdinfo.xid, snapshot, &hintstatus, InvalidBuffer, NULL)) { + tdinfo.td_slot = UHEAPTUP_SLOT_FROZEN; + } else if (TransactionIdIsValid(tupXid) && isTupXid) { + Assert(TransactionIdDidCommit(tupXid)); + tdinfo.xid = tupXid; + if (tdinfo.xid < oldestFrozenXid) { + tdinfo.td_slot = UHEAPTUP_SLOT_FROZEN; + } + } else if (IsMVCCSnapshot(snapshot) && TransactionIdIsValid(tupXid) && + UHeapXidVisibleInSnapshot(tupXid, snapshot, &hintstatus, InvalidBuffer, NULL)) { tdinfo.td_slot = UHEAPTUP_SLOT_FROZEN; } else { /* Fetch the tuple's transaction information from the undo */ - FetchTransInfoFromUndo(BufferGetBlockNumber(buffer), ItemPointerGetOffsetNumber(&utuple->ctid), - InvalidTransactionId, &tdinfo, NULL); - haveTransInfo = true; - if (tdinfo.xid < oldestXidHavingUndo) { + state = FetchTransInfoFromUndo(BufferGetBlockNumber(buffer), ItemPointerGetOffsetNumber(&utuple->ctid), + InvalidTransactionId, &tdinfo, NULL, !isFlashBack); + if (state == UNDO_TRAVERSAL_COMPLETE) { + haveTransInfo = true; + } else if (state == UNDO_TRAVERSAL_STOP || state == UNDO_TRAVERSAL_END) { tdinfo.td_slot = UHEAPTUP_SLOT_FROZEN; + } else if (state == UNDO_TRAVERSAL_ABORT) { + if (IsMVCCSnapshot(snapshot) && TransactionIdIsValid(tdinfo.xid) && + UHeapXidVisibleInSnapshot(tdinfo.xid, snapshot, &hintstatus, InvalidBuffer, NULL)) { + tdinfo.td_slot = UHEAPTUP_SLOT_FROZEN; + } else { + ereport(ERROR, (errmsg("snapshot too old! the undo record has been force discard."))); + } } } } } if (utuple) { op = UHeapTidOpFromInfomask(utuple->disk_tuple->flag); - lockedOnly = UHEAP_XID_IS_LOCKED_ONLY(utuple->disk_tuple->flag); } else { op = UTUPLETID_GONE; - lockedOnly = false; } - uheapselect = UHeapTupleSatisfies(op, lockedOnly, snapshot, &tdinfo, &snapshotRequests); + uheapselect = UHeapTupleSatisfies(op, snapshot, &tdinfo, &snapshotRequests); if (uheapselect == UVERSION_CHECK_CID) { if (!haveTransInfo) { - FetchTransInfoFromUndo(BufferGetBlockNumber(buffer), ItemPointerGetOffsetNumber(&utuple->ctid), - InvalidTransactionId, &tdinfo, NULL); + state = FetchTransInfoFromUndo(BufferGetBlockNumber(buffer), ItemPointerGetOffsetNumber(&utuple->ctid), + InvalidTransactionId, &tdinfo, NULL, false); + Assert(state != UNDO_TRAVERSAL_ABORT); haveTransInfo = true; } if (tdinfo.cid == InvalidCommandId) @@ -376,7 +426,7 @@ static UVersionSelector UHeapSelectVersionSelf(UTupleTidOp op, TransactionId xid * transaction or is in-progress or is committed. */ -static UVersionSelector UHeapSelectVersionDirty(const UTupleTidOp op, const bool lockedOnly, +static UVersionSelector UHeapSelectVersionDirty(const UTupleTidOp op, const UHeapTupleTransInfo *uinfo, Snapshot snapshot, int *snapshotRequests) { if (op == UTUPLETID_GONE) { @@ -497,7 +547,11 @@ static void UHeapPageGetNewCtid(Buffer buffer, ItemPointer ctid, UHeapTupleTrans GetTDSlotInfo(buffer, tdSlot, xactinfo); /* Get new ctid from undo */ - FetchTransInfoFromUndo(BufferGetBlockNumber(buffer), offnum, InvalidTransactionId, xactinfo, ctid); + UndoTraversalState state = FetchTransInfoFromUndo(BufferGetBlockNumber(buffer), offnum, + InvalidTransactionId, xactinfo, ctid, false); + if (state == UNDO_TRAVERSAL_ABORT) { + ereport(ERROR, (errmsg("snapshot too old! the undo record has been force discard."))); + } } static void UHeapTupleGetSubXid(Buffer buffer, OffsetNumber offnum, UndoRecPtr urecptr, SubTransactionId *subxid) @@ -508,9 +562,9 @@ static void UHeapTupleGetSubXid(Buffer buffer, OffsetNumber offnum, UndoRecPtr u *subxid = InvalidSubTransactionId; urec->SetUrp(urecptr); - int rc = FetchUndoRecord(urec, InplaceSatisfyUndoRecord, BufferGetBlockNumber(buffer), offnum, + UndoTraversalState rc = FetchUndoRecord(urec, InplaceSatisfyUndoRecord, BufferGetBlockNumber(buffer), offnum, InvalidTransactionId); - if (rc == UNDO_RET_FAIL) { + if (rc != UNDO_TRAVERSAL_COMPLETE) { goto out; } @@ -571,14 +625,17 @@ bool UHeapTupleFetch(Relation rel, Buffer buffer, OffsetNumber offnum, Snapshot int snapshotRequests = 0; UHeapTuple utuple = NULL; UTupleTidOp op; - bool lockedOnly, isInvalidSlot; + bool isInvalidSlot; bool isFrozen = false; bool haveTransInfo = false; UVersionSelector uheapselect = UVERSION_NONE; UHeapTupleTransInfo tdinfo; bool valid = true; bool tupleFromUndo = false; - + bool isTupXid = false; + TransactionId tupXid = InvalidTransactionId; + bool isFlashBack = IsVersionMVCCSnapshot(snapshot) ? true : false; + UndoTraversalState state = UNDO_TRAVERSAL_DEFAULT; Snapshot savedSnapshot = NULL; if (snapshot->satisfies == SNAPSHOT_DELTA) { savedSnapshot = snapshot; @@ -607,7 +664,8 @@ bool UHeapTupleFetch(Relation rel, Buffer buffer, OffsetNumber offnum, Snapshot Assert(utuple->tupTableType == UHEAP_TUPLE); tdSlot = UHeapTupleHeaderGetTDSlot(utuple->disk_tuple); isInvalidSlot = UHeapTupleHasInvalidXact(utuple->disk_tuple->flag); - + UHeapTupleCopyBaseFromPage(utuple, dp); + tupXid = UHeapTupleGetModifiedXid(utuple, &isTupXid); savedTdSlot = tdSlot; } else if (RowPtrIsDeleted(rp)) { utuple = NULL; @@ -643,9 +701,11 @@ bool UHeapTupleFetch(Relation rel, Buffer buffer, OffsetNumber offnum, Snapshot GetTDSlotInfo(buffer, tdSlot, &tdinfo); if (tdinfo.td_slot != UHEAPTUP_SLOT_FROZEN) { - uint64 oldestXidHavingUndo = pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo); + uint64 oldestRecycleXidHavingUndo = pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo); + uint64 oldestXidHavingUndo = isFlashBack ? oldestRecycleXidHavingUndo : + pg_atomic_read_u64(&g_instance.undo_cxt.oldestFrozenXid); if (TransactionIdIsValid(tdinfo.xid) && TransactionIdPrecedes(tdinfo.xid, oldestXidHavingUndo)) { - isFrozen = true; + isFrozen = TransactionIdPrecedes(tdinfo.xid, oldestRecycleXidHavingUndo) ? true : false; /* The slot is old enough that we can treat it as frozen. */ tdinfo.td_slot = UHEAPTUP_SLOT_FROZEN; @@ -662,24 +722,46 @@ bool UHeapTupleFetch(Relation rel, Buffer buffer, OffsetNumber offnum, Snapshot * one, so it will be visible to our snapshot as well. */ if (TransactionIdIsValid(tdinfo.xid) && (snapshot->satisfies == SNAPSHOT_NOW || (IsMVCCSnapshot(snapshot) && - XidVisibleInSnapshot(tdinfo.xid, snapshot, &hintstatus, InvalidBuffer, NULL)))) { + UHeapXidVisibleInSnapshot(tdinfo.xid, snapshot, &hintstatus, InvalidBuffer, NULL)))) { isFrozen = false; tdinfo.td_slot = UHEAPTUP_SLOT_FROZEN; #ifdef DEBUG_UHEAP UHEAPSTAT_COUNT_VISIBILITY_CHECK_WITH_XID(VISIBILITY_CHECK_SUCCESS_INVALID_SLOT); #endif - } else { - /* Fetch the tuple's transaction information from the undo */ - FetchTransInfoFromUndo(BufferGetBlockNumber(buffer), offnum, InvalidTransactionId, &tdinfo, newCtid); - haveTransInfo = true; - savedTdSlot = tdinfo.td_slot; + } else if (TransactionIdIsValid(tupXid) && isTupXid) { + if (unlikely(!TransactionIdDidCommit(tupXid))) { + state = FetchTransInfoFromUndo(BufferGetBlockNumber(buffer), offnum, + InvalidTransactionId, &tdinfo, newCtid, !isFlashBack); + } else { + tdinfo.xid = tupXid; + } if (tdinfo.xid < oldestXidHavingUndo) { - isFrozen = true; + isFrozen = TransactionIdPrecedes(tdinfo.xid, oldestRecycleXidHavingUndo) ? true : false; tdinfo.td_slot = UHEAPTUP_SLOT_FROZEN; } -#ifdef DEBUG_UHEAP - UHEAPSTAT_COUNT_VISIBILITY_CHECK_WITH_XID(VISIBILITY_CHECK_SUCCESS_UNDO); -#endif + } else if (IsMVCCSnapshot(snapshot) && TransactionIdIsValid(tupXid) && + UHeapXidVisibleInSnapshot(tupXid, snapshot, &hintstatus, InvalidBuffer, NULL)) { + isFrozen = false; + tdinfo.td_slot = UHEAPTUP_SLOT_FROZEN; + } else { + /* Fetch the tuple's transaction information from the undo */ + state = FetchTransInfoFromUndo(BufferGetBlockNumber(buffer), offnum, InvalidTransactionId, + &tdinfo, newCtid, !isFlashBack); + if (state == UNDO_TRAVERSAL_COMPLETE) { + savedTdSlot = tdinfo.td_slot; + haveTransInfo = true; + } else if (state == UNDO_TRAVERSAL_END) { + isFrozen = true; + } else if (state == UNDO_TRAVERSAL_STOP) { + tdinfo.td_slot = UHEAPTUP_SLOT_FROZEN; + } else if (state == UNDO_TRAVERSAL_ABORT) { + if (IsMVCCSnapshot(snapshot) && TransactionIdIsValid(tdinfo.xid) && + UHeapXidVisibleInSnapshot(tdinfo.xid, snapshot, &hintstatus, InvalidBuffer, NULL)) { + tdinfo.td_slot = UHEAPTUP_SLOT_FROZEN; + } else { + ereport(ERROR, (errmsg("snapshot too old! the undo record has been force discard."))); + } + } } } } else { @@ -699,20 +781,19 @@ bool UHeapTupleFetch(Relation rel, Buffer buffer, OffsetNumber offnum, Snapshot /* Attempt to make a visibility determination. */ if (utuple == NULL) { op = UTUPLETID_GONE; - lockedOnly = false; } else { uint16 infomask = utuple->disk_tuple->flag; - op = UHeapTidOpFromInfomask(infomask); - lockedOnly = UHEAP_XID_IS_LOCKED_ONLY(infomask); } - uheapselect = UHeapTupleSatisfies(op, lockedOnly, snapshot, &tdinfo, &snapshotRequests); + uheapselect = UHeapTupleSatisfies(op, snapshot, &tdinfo, &snapshotRequests); /* If necessary, check CID against snapshot. */ if (uheapselect == UVERSION_CHECK_CID) { /* UHeapUNDO : Fetch the tuple's transaction information from the undo */ if (!haveTransInfo) { - FetchTransInfoFromUndo(BufferGetBlockNumber(buffer), offnum, InvalidTransactionId, &tdinfo, newCtid); + state = FetchTransInfoFromUndo(BufferGetBlockNumber(buffer), offnum, InvalidTransactionId, + &tdinfo, newCtid, false); + Assert(state != UNDO_TRAVERSAL_ABORT); haveTransInfo = true; savedTdSlot = tdinfo.td_slot; } @@ -738,7 +819,7 @@ bool UHeapTupleFetch(Relation rel, Buffer buffer, OffsetNumber offnum, Snapshot } GetTupleFromUndo(tdinfo.urec_add, utuple, &priorTuple, snapshot, snapshot->curcid, buffer, offnum, newCtid, - tdinfo.td_slot); + tdinfo.td_slot, isFlashBack); if (utuple != NULL && utuple != priorTuple && !savedTuple) pfree(utuple); @@ -784,7 +865,11 @@ bool UHeapTupleFetch(Relation rel, Buffer buffer, OffsetNumber offnum, Snapshot if (newCtid && !haveTransInfo && (uheapselect == UVERSION_NONE || snapshot->satisfies == SNAPSHOT_ANY) && utuple != NULL && !UHeapTupleIsMoved(utuple->disk_tuple->flag) && UHeapTupleIsUpdated(utuple->disk_tuple->flag)) { - FetchTransInfoFromUndo(BufferGetBlockNumber(buffer), offnum, InvalidTransactionId, &tdinfo, newCtid); + state = FetchTransInfoFromUndo(BufferGetBlockNumber(buffer), offnum, InvalidTransactionId, + &tdinfo, newCtid, false); + if (state == UNDO_TRAVERSAL_ABORT) { + ereport(ERROR, (errmsg("snapshot too old! the undo record has been force discard."))); + } haveTransInfo = true; savedTdSlot = tdinfo.td_slot; } @@ -824,13 +909,21 @@ out: static UTupleTidOp UHeapTidOpFromInfomask(uint16 infomask) { - if ((infomask & (UHEAP_INPLACE_UPDATED | UHEAP_XID_LOCK_ONLY)) != 0) + if ((infomask & (UHEAP_INPLACE_UPDATED)) != 0) return UTUPLETID_MODIFIED; if ((infomask & (UHEAP_UPDATED | UHEAP_DELETED)) != 0) return UTUPLETID_GONE; return UTUPLETID_NEW; } +static void FronzenTDInfo(UHeapTupleTransInfo *txactinfo) +{ + txactinfo->xid = InvalidTransactionId; + txactinfo->cid = InvalidCommandId; + txactinfo->urec_add = INVALID_UNDO_REC_PTR; + txactinfo->td_slot = UHEAPTUP_SLOT_FROZEN; +} + /* * UHeapTupleSatisfiesUpdate * @@ -873,12 +966,14 @@ TM_Result UHeapTupleSatisfiesUpdate(Relation rel, Snapshot snapshot, ItemPointer bool doFetchCid = false; bool fetchSubXid = false; bool hasCtid = false; - UHeapTupleTransInfo lockerTDInfo; + bool isTupXid = false; + TransactionId tupXid = InvalidTransactionId; TM_Result result = TM_Invisible; *lockerXid = InvalidTransactionId; *updateSubXid = InvalidTransactionId; *lockerSubXid = InvalidSubTransactionId; uint32 needSync = 0; + UndoTraversalState state = UNDO_TRAVERSAL_DEFAULT; Assert(utuple->tupTableType == UHEAP_TUPLE); @@ -886,7 +981,9 @@ TM_Result UHeapTupleSatisfiesUpdate(Relation rel, Snapshot snapshot, ItemPointer utuple->ctid = *tid; *inplaceUpdated = false; - *ctid = *tid; + if (ctid != NULL) { + *ctid = *tid; + } if (RowPtrIsDeleted(rp)) { utuple->disk_tuple = NULL; @@ -914,19 +1011,30 @@ TM_Result UHeapTupleSatisfiesUpdate(Relation rel, Snapshot snapshot, ItemPointer GetTDSlotInfo(buffer, tdSlot, tdinfo); if (UHeapTupleHasInvalidXact(tupleData->flag)) { + tupXid = UHeapTupleGetModifiedXid(utuple, &isTupXid); + /* If slot has been reused, then fetch the transaction information from the Undo */ if (tdinfo->td_slot == UHEAPTUP_SLOT_FROZEN || (TransactionIdIsValid(tdinfo->xid) && TransactionIdOlderThanAllUndo(tdinfo->xid))) { - tdinfo->td_slot = UHEAPTUP_SLOT_FROZEN; - tdinfo->xid = InvalidTransactionId; - tdinfo->cid = InvalidCommandId; - tdinfo->urec_add = INVALID_UNDO_REC_PTR; + FronzenTDInfo(tdinfo); + } else if (TransactionIdIsValid(tupXid) && isTupXid) { + if (unlikely(!TransactionIdDidCommit(tupXid))) { + state = FetchTransInfoFromUndo(blocknum, offnum, InvalidTransactionId, tdinfo, ctid, false); + } else { + tdinfo->xid = tupXid; + } + } else if (TransactionIdIsValid(tupXid) && TransactionIdOlderThanAllUndo(tupXid)) { + FronzenTDInfo(tdinfo); } else { - FetchTransInfoFromUndo(blocknum, offnum, InvalidTransactionId, tdinfo, ctid); + state = FetchTransInfoFromUndo(blocknum, offnum, InvalidTransactionId, tdinfo, ctid, false); + if (state == UNDO_TRAVERSAL_ABORT) { + ereport(ERROR, (errmsg("snapshot too old! the undo record has been force discard."))); + } hasCtid = true; } } else if (doFetchCid && TransactionIdIsCurrentTransactionId(tdinfo->xid)) { - FetchTransInfoFromUndo(blocknum, offnum, InvalidTransactionId, tdinfo, ctid); + state = FetchTransInfoFromUndo(blocknum, offnum, InvalidTransactionId, tdinfo, ctid, false); + Assert(state != UNDO_TRAVERSAL_ABORT); hasCtid = true; } @@ -951,7 +1059,7 @@ restart: /* Since we can lock the tuple, tuple must be visible to us */ return TM_Ok; } else { - if (tdinfo->td_slot == UHEAPTUP_SLOT_FROZEN || TransactionIdOlderThanAllUndo(tdinfo->xid)) { + if (tdinfo->td_slot == UHEAPTUP_SLOT_FROZEN || TransactionIdOlderThanFrozenXid(tdinfo->xid)) { elog(DEBUG5, "UHeapTupleSatisfiesUpdate[FROZEN], multixact %ld, multixidIsMyself %d", UHeapTupleGetRawXid(utuple), multixidIsMyself); return TM_BeingModified; @@ -1000,7 +1108,7 @@ restart: // no active locker on tuple, since we have acquired exclusive lock on buffer, simply clear the locker tdid UHeapTupleHeaderClearSingleLocker(tupleData); - if (tdinfo->td_slot == UHEAPTUP_SLOT_FROZEN || TransactionIdOlderThanAllUndo(tdinfo->xid)) { + if (tdinfo->td_slot == UHEAPTUP_SLOT_FROZEN || TransactionIdOlderThanFrozenXid(tdinfo->xid)) { result = TM_Ok; } else if (UHeapTransactionIdDidCommit(tdinfo->xid)) { if (TransactionIdIsValid(conflictXid) && isUpsert) { @@ -1042,41 +1150,10 @@ restart: // tuple is inplace updated *inplaceUpdated = true; - if (tdinfo->td_slot == UHEAPTUP_SLOT_FROZEN || TransactionIdOlderThanAllUndo(tdinfo->xid)) { - if (UHEAP_XID_IS_LOCKED_ONLY(tupleData->flag)) { - // There exists an exclusive lock on tuple, then fetch it - GetTDSlotInfo(buffer, UHeapTupleHeaderGetLockerTDSlot(tupleData), &lockerTDInfo); - - Assert(TransactionIdIsValid(lockerTDInfo.xid)); - - if (lockerTDInfo.td_slot == UHEAPTUP_SLOT_FROZEN || - TransactionIdOlderThanAllUndo(lockerTDInfo.xid)) { - result = TM_Ok; - } else if (TransactionIdIsCurrentTransactionId(lockerTDInfo.xid)) { - *lockerXid = lockerTDInfo.xid; - result = TM_Ok; // locked-for-update by ourself - } else if (TransactionIdIsInProgress(lockerTDInfo.xid)) { - *lockerXid = lockerTDInfo.xid; - result = TM_BeingModified; - fetchSubXid = true; - } else if (UHeapTransactionIdDidCommit(lockerTDInfo.xid)) { - result = TM_Ok; - } else { - *lockerXid = lockerTDInfo.xid; - result = TM_BeingModified; - } - } else { - // frozen and no lockers on tuple - result = TM_Ok; - } + if (tdinfo->td_slot == UHEAPTUP_SLOT_FROZEN || TransactionIdOlderThanFrozenXid(tdinfo->xid)) { + result = TM_Ok; } else if (TransactionIdIsCurrentTransactionId(tdinfo->xid)) { - if (UHEAP_XID_IS_LOCKED_ONLY(tupleData->flag)) { - GetTDSlotInfo(buffer, UHeapTupleHeaderGetLockerTDSlot(tupleData), &lockerTDInfo); - Assert(TransactionIdEquals(tdinfo->xid, lockerTDInfo.xid)); - - *lockerXid = lockerTDInfo.xid; - result = TM_Ok; - } else if (doFetchCid && tdinfo->cid > cid) { + if (doFetchCid && tdinfo->cid > cid) { result = TM_SelfModified; } else if (doFetchCid && tdinfo->cid == cid && !selfVisible) { result = TM_SelfUpdated; @@ -1087,50 +1164,20 @@ restart: result = TM_BeingModified; fetchSubXid = true; } else if (UHeapTransactionIdDidCommit(tdinfo->xid)) { - bool checkVisibility = true; - - if (UHEAP_XID_IS_LOCKED_ONLY(tupleData->flag)) { - GetTDSlotInfo(buffer, UHeapTupleHeaderGetLockerTDSlot(tupleData), &lockerTDInfo); - - Assert(TransactionIdIsValid(lockerTDInfo.xid)); - - checkVisibility = false; - - // current tuple has been locked by us - if (lockerTDInfo.td_slot == UHEAPTUP_SLOT_FROZEN || - TransactionIdOlderThanAllUndo(lockerTDInfo.xid)) { - checkVisibility = true; - } else if (TransactionIdIsCurrentTransactionId(lockerTDInfo.xid)) { - *lockerXid = lockerTDInfo.xid; - result = TM_Ok; // locked-for-update by ourself - } else if (TransactionIdIsInProgress(lockerTDInfo.xid)) { - *lockerXid = lockerTDInfo.xid; - result = TM_BeingModified; - fetchSubXid = true; - } else if (UHeapTransactionIdDidCommit(lockerTDInfo.xid)) { - checkVisibility = true; - } else { - *lockerXid = lockerTDInfo.xid; - result = TM_BeingModified; - } + if (TransactionIdIsValid(conflictXid) && isUpsert) { + bool isUpdatedByOthers = (conflictXid != tdinfo->xid); + return isUpdatedByOthers ? TM_Updated : TM_Ok; } - - if (checkVisibility) { - if (TransactionIdIsValid(conflictXid) && isUpsert) { - bool isUpdatedByOthers = (conflictXid != tdinfo->xid); - return isUpdatedByOthers ? TM_Updated : TM_Ok; - } - if (avoidVisCheck || CommittedXidVisibleInSnapshot(tdinfo->xid, snapshot, buffer)) { - result = TM_Ok; - } else { - result = TM_Updated; - } + if (avoidVisCheck || CommittedXidVisibleInSnapshot(tdinfo->xid, snapshot, buffer)) { + result = TM_Ok; + } else { + result = TM_Updated; } } else { // aborted result = TM_BeingModified; } } else { - if (tdinfo->td_slot == UHEAPTUP_SLOT_FROZEN || TransactionIdOlderThanAllUndo(tdinfo->xid)) { + if (tdinfo->td_slot == UHEAPTUP_SLOT_FROZEN || TransactionIdOlderThanFrozenXid(tdinfo->xid)) { result = TM_Ok; } else if (TransactionIdIsCurrentTransactionId(tdinfo->xid)) { if (doFetchCid && tdinfo->cid > cid) { @@ -1156,7 +1203,10 @@ restart: // fetch ctid when tuple is non-inplace updated if (ctid && !hasCtid && UHeapTupleIsUpdated(tupleData->flag) && !UHeapTupleIsMoved(tupleData->flag)) { - FetchTransInfoFromUndo(blocknum, offnum, InvalidTransactionId, tdinfo, ctid); + state = FetchTransInfoFromUndo(blocknum, offnum, InvalidTransactionId, tdinfo, ctid, false); + if (state == UNDO_TRAVERSAL_ABORT) { + ereport(ERROR, (errmsg("snapshot too old! the undo record has been force discard."))); + } if (ctid->ip_posid == 0) { elog(PANIC, "test"); } @@ -1164,18 +1214,13 @@ restart: // fetch subxid if any from undo if (fetchSubXid) { - if (TransactionIdIsNormal(*lockerXid)) { - Assert(TransactionIdEquals(*lockerXid, lockerTDInfo.xid)); - UHeapTupleGetSubXid(buffer, offnum, lockerTDInfo.urec_add, lockerSubXid); - } else { - UHeapTupleGetSubXid(buffer, offnum, tdinfo->urec_add, updateSubXid); - } + Assert(!TransactionIdIsValid(*lockerXid)); + UHeapTupleGetSubXid(buffer, offnum, tdinfo->urec_add, updateSubXid); } return result; } - /* * UHeapTupleGetTransInfo - Retrieve transaction information of transaction * that has modified the tuple. @@ -1192,11 +1237,14 @@ restart: * value in undo record, otherwise, that can break the visibility for * other concurrent session holding old snapshot. */ -void UHeapTupleGetTransInfo(Buffer buf, OffsetNumber offnum, UHeapTupleTransInfo *txactinfo) +UndoTraversalState UHeapTupleGetTransInfo(Buffer buf, OffsetNumber offnum, UHeapTupleTransInfo *txactinfo) { RowPtr *rp; Page page; bool isInvalidSlot = false; + bool isTupXid = false; + TransactionId tupXid = InvalidTransactionId; + UndoTraversalState state = UNDO_TRAVERSAL_DEFAULT; page = BufferGetPage(buf); rp = UPageGetRowPtr(page, offnum); @@ -1204,6 +1252,7 @@ void UHeapTupleGetTransInfo(Buffer buf, OffsetNumber offnum, UHeapTupleTransInfo if (!RowPtrIsDeleted(rp)) { UHeapDiskTuple hdr = (UHeapDiskTuple)UPageGetRowData(page, rp); txactinfo->td_slot = UHeapTupleHeaderGetTDSlot(hdr); + tupXid = UDiskTupleGetModifiedXid(hdr, page, &isTupXid); if (UHeapTupleHasInvalidXact(hdr->flag)) isInvalidSlot = true; } else { @@ -1224,7 +1273,7 @@ void UHeapTupleGetTransInfo(Buffer buf, OffsetNumber offnum, UHeapTupleTransInfo * is in active state and the entry's xid is older than oldestXidInUndo */ if (txactinfo->td_slot == UHEAPTUP_SLOT_FROZEN) - return; + return state; /* * We need to fetch all the transaction related information from undo @@ -1239,14 +1288,22 @@ void UHeapTupleGetTransInfo(Buffer buf, OffsetNumber offnum, UHeapTupleTransInfo * must be all-visible. */ if (TransactionIdIsValid(txactinfo->xid) && TransactionIdOlderThanAllUndo(txactinfo->xid)) { - txactinfo->td_slot = UHEAPTUP_SLOT_FROZEN; - txactinfo->xid = InvalidTransactionId; - txactinfo->cid = InvalidCommandId; - return; + FronzenTDInfo(txactinfo); + } else if (TransactionIdIsValid(tupXid) && isTupXid) { + if (unlikely(!TransactionIdDidCommit(tupXid))) { + state = FetchTransInfoFromUndo(BufferGetBlockNumber(buf), offnum, + InvalidTransactionId, txactinfo, NULL, false); + } else { + txactinfo->xid = tupXid; + } + } else if (TransactionIdIsValid(tupXid) && TransactionIdOlderThanAllUndo(tupXid)) { + FronzenTDInfo(txactinfo); + } else { + state = FetchTransInfoFromUndo(BufferGetBlockNumber(buf), offnum, + InvalidTransactionId, txactinfo, NULL, false); } - - FetchTransInfoFromUndo(BufferGetBlockNumber(buf), offnum, InvalidTransactionId, txactinfo, NULL); } + return state; } /* @@ -1294,7 +1351,10 @@ TransactionId UHeapTupleGetTransXid(UHeapTuple uhtup, Buffer buf, bool nobuflock } } - UHeapTupleGetTransInfo(buf, offnum, &txactinfo); + UndoTraversalState state = UHeapTupleGetTransInfo(buf, offnum, &txactinfo); + if (state == UNDO_TRAVERSAL_ABORT) { + ereport(ERROR, (errmsg("snapshot too old! the undo record has been force discard."))); + } /* Release any buffer lock we acquired. */ if (nobuflock) @@ -1326,7 +1386,7 @@ TransactionId UHeapTupleGetTransXid(UHeapTuple uhtup, Buffer buf, bool nobuflock * corresponding item id as dead. Because, when undo action for the same will * be performed, we need the item pointer. */ -UHTSVResult UHeapTupleSatisfiesOldestXmin(UHeapTuple uhtup, TransactionId OldestXmin, Buffer buffer, +UHTSVResult UHeapTupleSatisfiesOldestXmin(UHeapTuple uhtup, TransactionId oldestXmin, Buffer buffer, bool resolve_abort_in_progress, UHeapTuple *preabort_tuple, TransactionId *xid, SubTransactionId *subxid) { UHeapDiskTuple tuple = uhtup->disk_tuple; @@ -1340,7 +1400,7 @@ UHTSVResult UHeapTupleSatisfiesOldestXmin(UHeapTuple uhtup, TransactionId Oldest Assert(uhtup->table_oid != InvalidOid); /* Get transaction id */ - UHeapTupleGetTransInfo(buffer, offnum, &uinfo); + UndoTraversalState state = UHeapTupleGetTransInfo(buffer, offnum, &uinfo); *xid = uinfo.xid; if ((tuple->flag & UHEAP_DELETED) || (tuple->flag & UHEAP_UPDATED)) { @@ -1349,6 +1409,11 @@ UHTSVResult UHeapTupleSatisfiesOldestXmin(UHeapTuple uhtup, TransactionId Oldest * slot is cleared or latest xid that has changed the tuple precedes * smallest xid that has undo. */ + if (state == UNDO_TRAVERSAL_ABORT) { + *xid = InvalidTransactionId; + return UHEAPTUPLE_RECENTLY_DEAD; + } + if (uinfo.td_slot == UHEAPTUP_SLOT_FROZEN || TransactionIdOlderThanAllUndo(uinfo.xid)) { return UHEAPTUPLE_DEAD; } @@ -1371,7 +1436,7 @@ UHTSVResult UHeapTupleSatisfiesOldestXmin(UHeapTuple uhtup, TransactionId Oldest * Deleter committed, but perhaps it was recent enough that some * open transactions could still see the tuple. */ - if (!TransactionIdPrecedes(uinfo.xid, OldestXmin)) { + if (!TransactionIdPrecedes(uinfo.xid, oldestXmin)) { return UHEAPTUPLE_RECENTLY_DEAD; } @@ -1413,20 +1478,12 @@ UHTSVResult UHeapTupleSatisfiesOldestXmin(UHeapTuple uhtup, TransactionId Oldest return UHEAPTUPLE_DEAD; } } - } else if (tuple->flag & UHEAP_XID_LOCK_ONLY) { - /* - * We can't take any decision if the tuple is marked as locked-only. - * It's possible that inserted transaction took a lock on the tuple - * Later, if it rolled back, we should return HEAPTUPLE_DEAD, or if - * it's still in progress, we should return - * HEAPTUPLE_INSERT_IN_PROGRESS. Similarly, if the inserted - * transaction got committed, we should return HEAPTUPLE_LIVE. The - * subsequent checks already takes care of all these possible - * scenarios, so we don't need any extra checks here. - */ } - /* The tuple is either a newly inserted tuple or is in-place updated. */ + if (state == UNDO_TRAVERSAL_ABORT) { + *xid = InvalidTransactionId; + return UHEAPTUPLE_LIVE; + } /* * The tuple must be all visible if the transaction slot is cleared or @@ -1498,8 +1555,8 @@ UHTSVResult UHeapTupleSatisfiesOldestXmin(UHeapTuple uhtup, TransactionId Oldest return UHEAPTUPLE_LIVE; } -void FetchTransInfoFromUndo(BlockNumber blocknum, OffsetNumber offnum, TransactionId xid, - UHeapTupleTransInfo *txactinfo, ItemPointer newCtid) +UndoTraversalState FetchTransInfoFromUndo(BlockNumber blocknum, OffsetNumber offnum, TransactionId xid, + UHeapTupleTransInfo *txactinfo, ItemPointer newCtid, bool needByPass) { UndoRecord *urec = New(CurrentMemoryContext)UndoRecord(); @@ -1511,25 +1568,14 @@ void FetchTransInfoFromUndo(BlockNumber blocknum, OffsetNumber offnum, Transacti if (newCtid) ItemPointerSet(newCtid, blocknum, offnum); - while (1) { - urec->Reset(txactinfo->urec_add); - int rc = FetchUndoRecord(urec, InplaceSatisfyUndoRecord, blocknum, offnum, xid); - /* The undo record has been discarded. It should be all-visible. */ - if (rc == UNDO_RET_FAIL) { - txactinfo->xid = InvalidTransactionId; - txactinfo->cid = InvalidCommandId; - txactinfo->urec_add = INVALID_UNDO_REC_PTR; - txactinfo->td_slot = UHEAPTUP_SLOT_FROZEN; - goto out; - } - - /* Transaction information for lock-only will be saved in locker td slot. */ - if (urec->Utype() != UNDO_XID_LOCK_ONLY && urec->Utype() != UNDO_XID_MULTI_LOCK_ONLY) { - break; - } - - txactinfo->urec_add = urec->Blkprev(); - xid = InvalidTransactionId; + urec->Reset(txactinfo->urec_add); + UndoTraversalState rc = FetchUndoRecord(urec, InplaceSatisfyUndoRecord, blocknum, offnum, xid, needByPass); + /* The undo record has been discarded. It should be all-visible. */ + if (rc == UNDO_TRAVERSAL_END || rc == UNDO_TRAVERSAL_STOP) { + FronzenTDInfo(txactinfo); + goto out; + } else if (rc != UNDO_TRAVERSAL_COMPLETE) { + goto out; } txactinfo->xid = urec->Xid(); @@ -1547,6 +1593,7 @@ void FetchTransInfoFromUndo(BlockNumber blocknum, OffsetNumber offnum, Transacti out: DELETE_EX(urec); + return rc; } @@ -1559,6 +1606,7 @@ bool UHeapTupleIsSurelyDead(UHeapTuple uhtup, Buffer buffer, OffsetNumber offnum const UHeapTupleTransInfo *cachedTdInfo, const bool useCachedTdInfo) { UHeapTupleTransInfo tdinfo; + UndoTraversalState state = UNDO_TRAVERSAL_DEFAULT; if (uhtup != NULL && UHeapTidOpFromInfomask(uhtup->disk_tuple->flag) != UTUPLETID_GONE) return false; @@ -1567,11 +1615,15 @@ bool UHeapTupleIsSurelyDead(UHeapTuple uhtup, Buffer buffer, OffsetNumber offnum * Get transaction information. * Here we use a cached transaction information if there is any. */ - if (useCachedTdInfo) + if (useCachedTdInfo) { tdinfo = *cachedTdInfo; - else - UHeapTupleGetTransInfo(buffer, offnum, &tdinfo); + } else { + state = UHeapTupleGetTransInfo(buffer, offnum, &tdinfo); + } + if (state == UNDO_TRAVERSAL_ABORT) { + return false; + } /* * The tuple is deleted and must be all visible if the transaction slot is * cleared or latest xid that has changed the tuple precedes smallest xid @@ -1590,8 +1642,10 @@ static bool GetTupleFromUndoRecord(UndoRecPtr urecPtr, TransactionId xid, Buffer UndoRecord *urec = New(CurrentMemoryContext)UndoRecord(); urec->SetUrp(urecPtr); - int rc = FetchUndoRecord(urec, InplaceSatisfyUndoRecord, BufferGetBlockNumber(buffer), offnum, xid); - if (rc != UNDO_RET_SUCC) { + UndoTraversalState rc = FetchUndoRecord(urec, InplaceSatisfyUndoRecord, BufferGetBlockNumber(buffer), offnum, xid); + if (rc == UNDO_TRAVERSAL_ABORT) { + ereport(ERROR, (errmsg("snapshot too old! the undo record has been force discard."))); + } else if (rc != UNDO_TRAVERSAL_COMPLETE) { return false; } @@ -1602,7 +1656,10 @@ static bool GetTupleFromUndoRecord(UndoRecPtr urecPtr, TransactionId xid, Buffer * new tuple. If the caller wants it, extract it. */ int undotype = urec->Utype(); - if (tuple != NULL && (undotype == UNDO_UPDATE || undotype == UNDO_DELETE)) { + + Assert(tuple != NULL); + + if (undotype == UNDO_UPDATE || undotype == UNDO_DELETE) { StringInfo urecPayload = urec->Rawdata(); Assert(urecPayload); @@ -1772,7 +1829,7 @@ static inline UVersionSelector UHeapCheckUndoSnapshot(Snapshot snapshot, UHeapTu static bool GetTupleFromUndo(UndoRecPtr urecAdd, UHeapTuple currentTuple, UHeapTuple *visibleTuple, Snapshot snapshot, - CommandId curcid, Buffer buffer, OffsetNumber offnum, ItemPointer ctid, int tdSlot) + CommandId curcid, Buffer buffer, OffsetNumber offnum, ItemPointer ctid, int tdSlot, bool isFlashBack) { TransactionId prevUndoXid = InvalidTransactionId; int prevTdSlotId = tdSlot; @@ -1780,6 +1837,7 @@ static bool GetTupleFromUndo(UndoRecPtr urecAdd, UHeapTuple currentTuple, UHeapT bool freeTuple = false; BlockNumber blkno = BufferGetBlockNumber(buffer); UHeapDiskTupleData hdr; + UndoTraversalState state = UNDO_TRAVERSAL_DEFAULT; #ifdef DEBUG_UHEAP UHEAPSTAT_COUNT_TUPLE_OLD_VERSION_VISITS(); @@ -1847,16 +1905,9 @@ static bool GetTupleFromUndo(UndoRecPtr urecAdd, UHeapTuple currentTuple, UHeapT /* can't further operate on deleted or non-inplace-updated tuple */ Assert(op != UTUPLETID_GONE); - /* - * We need to fetch all the transaction related information from undo - * record for the tuples that point to a slot that gets invalidated - * for reuse at some point of time. See PageFreezeTransSlots. - */ - if (UHeapTupleHasInvalidXact(hdr.flag)) { - FetchTransInfoFromUndo(blkno, offnum, uinfo.xid, &uinfo, NULL); - haveCid = true; - } else if (uinfo.cid != InvalidCommandId) + if (uinfo.cid != InvalidCommandId) { haveCid = true; + } /* * The tuple must be all visible if the transaction slot is cleared or @@ -1874,7 +1925,8 @@ static bool GetTupleFromUndo(UndoRecPtr urecAdd, UHeapTuple currentTuple, UHeapT /* If necessary, get and check CID. */ if (selector == UVERSION_CHECK_CID) { if (!haveCid) { - FetchTransInfoFromUndo(blkno, offnum, uinfo.xid, &uinfo, NULL); + state = FetchTransInfoFromUndo(blkno, offnum, uinfo.xid, &uinfo, NULL, false); + Assert(state != UNDO_TRAVERSAL_ABORT); haveCid = true; } diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp index 4f816eebd..f71e4b023 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundoapi.cpp @@ -26,9 +26,11 @@ #include "knl/knl_session.h" #include "knl/knl_thread.h" #include "miscadmin.h" +#include "pgstat.h" #include "storage/smgr/fd.h" #include "storage/ipc.h" #include "threadpool/threadpool.h" +#include "utils/builtins.h" namespace undo { static uint64 g_maxUndoSizePerTrans = 0; @@ -55,7 +57,7 @@ bool CheckNeedSwitch(UndoPersistence upersistence, uint64 size, UndoRecPtr undoP zid = t_thrd.undo_cxt.zids[upersistence]; } Assert(zid != INVALID_ZONE_ID); - UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid); + UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid, true, upersistence); if (uzone == NULL) { ereport(PANIC, (errmsg("CheckNeedSwitch: uzone is NULL"))); } @@ -75,9 +77,13 @@ void RollbackIfUndoExceeds(TransactionId xid, uint64 size) UndoRecPtr AllocateUndoSpace(TransactionId xid, UndoPersistence upersistence, uint64 size, bool needSwitch, XlogUndoMeta *xlundometa) { + if (!g_instance.attr.attr_storage.enable_ustore) { + ereport(ERROR, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("Ustore is disabled, " + "please set GUC enable_ustore=on and restart database.")))); + } int zid = t_thrd.undo_cxt.zids[upersistence]; Assert(zid != INVALID_ZONE_ID); - UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid); + UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid, false, upersistence); if (uzone == NULL) { ereport(PANIC, (errmsg("AllocateUndoSpace: uzone is NULL"))); } @@ -119,7 +125,7 @@ UndoRecPtr AdvanceUndoPtr(UndoRecPtr undoPtr, uint64 size) void PrepareUndoMeta(XlogUndoMeta *meta, UndoPersistence upersistence, UndoRecPtr lastRecord, UndoRecPtr lastRecordSize) { int zid = t_thrd.undo_cxt.zids[upersistence]; - UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid); + UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid, false, upersistence); uzone->LockUndoZone(); WHITEBOX_TEST_STUB(UNDO_PREPAR_ZONE_FAILED, WhiteboxDefaultErrorEmit); @@ -144,7 +150,7 @@ void PrepareUndoMeta(XlogUndoMeta *meta, UndoPersistence upersistence, UndoRecPt void FinishUndoMeta(XlogUndoMeta *meta, UndoPersistence upersistence) { int zid = t_thrd.undo_cxt.zids[upersistence]; - UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid); + UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid, false, upersistence); if (uzone == NULL) { ereport(PANIC, (errmsg("FinishUndoMeta: uzone is NULL"))); } @@ -159,7 +165,7 @@ void UpdateTransactionSlot(TransactionId xid, XlogUndoMeta *meta, UndoRecPtr sta int zid = t_thrd.undo_cxt.zids[upersistence]; Assert(zid != INVALID_ZONE_ID); bool allocateTranslot = false; - UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid); + UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid, false, upersistence); if (uzone == NULL) { ereport(PANIC, (errmsg("UpdateTransactionSlot: uzone is NULL"))); } @@ -198,7 +204,7 @@ void UpdateTransactionSlot(TransactionId xid, XlogUndoMeta *meta, UndoRecPtr sta void SetUndoMetaLSN(XlogUndoMeta *meta, XLogRecPtr lsn) { int zid = t_thrd.undo_cxt.zids[UNDO_PERMANENT]; - UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid); + UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid, false, UNDO_PERMANENT); if (uzone == NULL) { ereport(PANIC, (errmsg("SetUndoMetaLSN: uzone is NULL"))); } @@ -250,50 +256,21 @@ void RedoUndoMeta(XLogReaderState *record, XlogUndoMeta *meta, UndoRecPtr startU } /* Check undo record valid.. */ -bool CheckUndoRecordValid(UndoRecPtr urp, bool checkForceRecycle) +UndoRecordState CheckUndoRecordValid(UndoRecPtr urp, bool checkForceRecycle) { if (!IS_VALID_UNDO_REC_PTR(urp)) { - return false; + return UNDO_RECORD_INVALID; } int zid = UNDO_PTR_GET_ZONE_ID(urp); Assert(IS_VALID_ZONE_ID(zid)); - UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid); + UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid, false); if (uzone == NULL) { - return false; + return UNDO_RECORD_INVALID; } else { return uzone->CheckUndoRecordValid(UNDO_PTR_GET_OFFSET(urp), checkForceRecycle); } } -/* Check undo record recovery status for ROS */ -UndoRecoveryStatus CheckUndoRecordRecoveryStatus(UndoRecPtr urp) -{ - if (!IS_VALID_UNDO_REC_PTR(urp)) { - return UNDO_NOT_VALID; - } - - UndoRecoveryStatus rc; - int zid = UNDO_PTR_GET_ZONE_ID(urp); - Assert(IS_VALID_ZONE_ID(zid)); - - UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid); - if (uzone == NULL) { - return UNDO_DISCARD; - } - UndoRecPtr insert = uzone->GetInsert(); - UndoRecPtr discard = uzone->GetDiscard(); - - if (urp < discard) { - rc = UNDO_DISCARD; // urp discarded - } else if (urp >= insert) { - rc = UNDO_NOT_RECOVERY; // nor recovery yet - } else { - rc = UNDO_RECOVERY; - } - - return rc; -} - /* * skip prepare undo record when undo record was invalid. */ @@ -321,7 +298,7 @@ bool IsSkipInsertSlot(UndoSlotPtr slotPtr) { Assert(IS_VALID_UNDO_REC_PTR(slotPtr)); int zid = UNDO_PTR_GET_ZONE_ID(slotPtr); - UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid); + UndoZone *uzone = UndoZoneGroup::GetUndoZone(zid, true); if (uzone == NULL) { return true; } @@ -348,9 +325,12 @@ bool IsSkipInsertSlot(UndoSlotPtr slotPtr) void CheckPointUndoSystemMeta(XLogRecPtr checkPointRedo) { #ifndef ENABLE_MULTIPLE_NODES + if (g_instance.undo_cxt.uZoneCount == 0) { + return; + } /* Open undo meta file. */ if (t_thrd.role == CHECKPOINT_THREAD) { - TransactionId oldestXidInUndo = pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo); + TransactionId oldestXidInUndo = pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo); ereport(LOG, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT( "undo metadata checkPointRedo = %lu, oldestXidInUndo = %lu."), checkPointRedo, oldestXidInUndo))); @@ -366,12 +346,36 @@ void CheckPointUndoSystemMeta(XLogRecPtr checkPointRedo) UndoSpace::CheckPointUndoSpace(fd, UNDO_SLOT_SPACE); /* Flush buffer data and close fd. */ + pgstat_report_waitevent(WAIT_EVENT_UNDO_META_SYNC); + PGSTAT_INIT_TIME_RECORD(); + PGSTAT_START_TIME_RECORD(); fsync(fd); + PGSTAT_END_TIME_RECORD(DATA_IO_TIME); + pgstat_report_waitevent(WAIT_EVENT_END); close(fd); } #endif } +void InitUndoCountThreshold() +{ + uint32 undoMemFactor = 4; + uint32 undoCountThreshold = 0; + uint32 maxConn = g_instance.attr.attr_network.MaxConnections; + uint32 maxThreadNum = 0; + + + if (g_instance.attr.attr_common.enable_thread_pool) { + maxThreadNum = g_threadPoolControler->GetThreadNum(); + } + + undoCountThreshold = (maxConn >= maxThreadNum) ? undoMemFactor * maxConn : undoMemFactor * maxThreadNum; + g_instance.undo_cxt.undoCountThreshold = (g_instance.undo_cxt.uZoneCount >= undoCountThreshold) ? + undoMemFactor * g_instance.undo_cxt.uZoneCount : undoCountThreshold; + g_instance.undo_cxt.undoCountThreshold = (g_instance.undo_cxt.undoCountThreshold > UNDO_ZONE_COUNT) ? + g_instance.undo_cxt.undoCountThreshold : UNDO_ZONE_COUNT; +} + static bool InitZoneMeta(int fd) { int rc = 0; @@ -568,7 +572,7 @@ void RecoveryUndoSystemMeta(void) g_instance.undo_cxt.undoTotalSize = 0; g_instance.undo_cxt.undoMetaSize = 0; g_maxUndoSizePerTrans = - (uint64)g_instance.attr.attr_storage.undo_limit_size_transaction * BLCKSZ; + (uint64)u_sess->attr.attr_storage.undo_limit_size_transaction * BLCKSZ; /* Recover undospace meta. */ undo::UndoZone::RecoveryUndoZone(fd); /* Recover undospace meta. */ @@ -618,7 +622,7 @@ void UpdateRollbackFinish(UndoSlotPtr slotPtr) undo::TransactionSlot *slot = buf.FetchTransactionSlot(slotPtr); Assert(slot->XactId() != InvalidTransactionId); Assert(slot->DbId() != InvalidOid); - TransactionId oldestXidInUndo = pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo); + TransactionId oldestXidInUndo = pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo); if (TransactionIdPrecedes(slot->XactId(), oldestXidInUndo)) { ereport(WARNING, (errmsg(UNDOFORMAT("curr xid having undo %lu < oldestXidInUndo %lu."), @@ -628,7 +632,7 @@ void UpdateRollbackFinish(UndoSlotPtr slotPtr) /* only persist level space need update transaction slot. */ START_CRIT_SECTION(); slot->UpdateRollbackProgress(); - ereport(LOG, (errmsg(UNDOFORMAT( + ereport(DEBUG1, (errmsg(UNDOFORMAT( "update zone %d slot %lu xid %lu dbid %u rollback progress from start %lu to end %lu, oldestXidInUndo %lu."), zid, slotPtr, slot->XactId(), slot->DbId(), slot->StartUndoPtr(), slot->EndUndoPtr(), oldestXidInUndo))); @@ -680,6 +684,9 @@ UndoRecPtr GetPrevUrp(UndoRecPtr currUrp) void ReleaseSlotBuffer() { + if (g_instance.undo_cxt.uZoneCount == 0) { + return; + } for (auto i = 0; i < UNDO_PERSISTENCE_LEVELS; i++) { UndoPersistence upersistence = static_cast(i); int zid = t_thrd.undo_cxt.zids[upersistence]; @@ -688,7 +695,7 @@ void ReleaseSlotBuffer() } UndoZone *uzone = (UndoZone *)g_instance.undo_cxt.uZones[zid]; if (uzone == NULL) { - return; + continue; } uzone->ReleaseSlotBuffer(); } diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp index 89b9246db..c31526e00 100755 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundorecycle.cpp @@ -44,17 +44,22 @@ #include "pgstat.h" #define TRANS_PARTITION_LINEAR_SPARE_TIME(degree) \ - ((TRANS_PARTITION_SPARE_TIME * degree) > 1048576 ? 1048576 : (TRANS_PARTITION_SPARE_TIME * degree)) -const int TRANS_PARTITION_SPARE_TIME = 1024; + (degree > 3000 ? 3000 : degree) #define ALLOCSET_UNDO_RECYCLE_MAXSIZE ALLOCSET_DEFAULT_MAXSIZE * 4 +typedef struct UndoXmins { + TransactionId oldestXmin; + TransactionId recycleXmin; +} UndoXmins; + namespace undo { const int EXIT_NUMBER = 2; const float FORCE_RECYCLE_PERCENT = 0.8; const int FORCE_RECYCLE_RETRY_TIMES = 5; const float FORCE_RECYCLE_PUSH_PERCENT = 0.2; const long SLOT_BUFFER_CACHE_SIZE = 16384; +const int UNDO_RECYCLE_TIMEOUT_DELTA = 50; static uint64 g_recycleLoops = 0; static int g_forceRecycleSize = 0; @@ -76,8 +81,107 @@ void UndoShutdownHandler(SIGNAL_ARGS) errno = save_errno; } -bool RecycleUndoSpace(UndoZone *zone, TransactionId oldestXmin, TransactionId *oldestRecycleXid, - TransactionId forceRecycleXid) +bool AsyncRollback(UndoZone *zone, UndoSlotPtr recycle, TransactionSlot *slot) +{ + if (slot->NeedRollback()) { + if (zone->GetPersitentLevel() == UNDO_TEMP || zone->GetPersitentLevel() == UNDO_UNLOGGED) { + return true; + } + UndoRecPtr prev = GetPrevUrp(slot->EndUndoPtr()); + AddRollbackRequest(slot->XactId(), prev, slot->StartUndoPtr(), + slot->DbId(), recycle); +#ifdef DEBUG_UHEAP + UHEAPSTAT_COUNT_UNDO_SPACE_UNRECYCLE(); +#endif + ereport(LOG, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT( + "rollback zone %d slot %lu: xid %lu, start %lu, " + "end %lu, dbid %u. recyclexid %lu loops %lu"), + zone->GetZoneId(), recycle, slot->XactId(), slot->StartUndoPtr(), slot->EndUndoPtr(), + slot->DbId(), slot->XactId(), g_recycleLoops))); + return true; + } + return false; +} + +void AdvanceFrozenXid(UndoZone *zone, TransactionId *oldestFozenXid, + TransactionId oldestXmin, UndoSlotPtr *oldestFrozenSlotPtr) +{ + TransactionSlot *slot = NULL; + UndoSlotPtr frozenSlotPtr = zone->GetFrozenSlotPtr(); + UndoSlotPtr recycle = zone->GetRecycle(); + UndoSlotPtr allocate = zone->GetAllocate(); + UndoSlotPtr currentSlotPtr = frozenSlotPtr > recycle ? frozenSlotPtr : recycle; + UndoSlotPtr start = INVALID_UNDO_SLOT_PTR; + while (currentSlotPtr < allocate) { + UndoSlotBuffer& slotBuf = g_slotBufferCache->FetchTransactionBuffer(currentSlotPtr); + slotBuf.PrepareTransactionSlot(currentSlotPtr); + start = currentSlotPtr; + bool finishAdvanceXid = false; + while (slotBuf.BufBlock() == UNDO_PTR_GET_BLOCK_NUM(currentSlotPtr) && (currentSlotPtr < allocate)) { + slot = slotBuf.FetchTransactionSlot(currentSlotPtr); + + WHITEBOX_TEST_STUB(UNDO_RECYCL_ESPACE_FAILED, WhiteboxDefaultErrorEmit); + + if (slot->StartUndoPtr() == INVALID_UNDO_REC_PTR) { + ereport(LOG, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT( + "invalid slotptr zone %d transaction slot %lu loops %lu."), + zone->GetZoneId(), currentSlotPtr, g_recycleLoops))); + finishAdvanceXid = true; + break; + } +#ifdef DEBUG_UHEAP + UHEAPSTAT_COUNT_UNDO_SPACE_RECYCLE(); +#endif + pg_read_barrier(); + if (!TransactionIdIsValid(slot->XactId())) { + ereport(PANIC, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT( + "recycle xid invalid: zone %d transaction slot %lu loops %lu."), + zone->GetZoneId(), currentSlotPtr, g_recycleLoops))); + } + ereport(DEBUG1, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT( + "zone id %d, currentSlotPtr %lu. slot xactid %lu, oldestXmin %lu"), + zone->GetZoneId(), currentSlotPtr, slot->XactId(), oldestXmin))); + *oldestFrozenSlotPtr = currentSlotPtr; + if (!UHeapTransactionIdDidCommit(slot->XactId()) && + TransactionIdPrecedes(slot->XactId(), oldestXmin)) { + if (AsyncRollback(zone, currentSlotPtr, slot)) { + *oldestFozenXid = slot->XactId(); + ereport(LOG, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT( + "Transaction add to async rollback queue, zone: %d, oldestFozenXid: %lu," + "oldestFrozenSlotPtr %lu. oldestXmin %lu"), + zone->GetZoneId(), *oldestFozenXid, *oldestFrozenSlotPtr, oldestXmin))); + finishAdvanceXid = true; + break; + } + } + if (TransactionIdFollowsOrEquals(slot->XactId(), oldestXmin)) { + ereport(DEBUG1, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT( + "zone %d, slotxid %lu, oldestXmin %lu, oldestFozenXid %lu, oldestFrozenSlotPtr %lu."), + zone->GetZoneId(), slot->XactId(), oldestXmin, *oldestFozenXid, *oldestFrozenSlotPtr))); + finishAdvanceXid = true; + break; + } + currentSlotPtr = GetNextSlotPtr(currentSlotPtr); + *oldestFrozenSlotPtr = currentSlotPtr; + if (slotBuf.BufBlock() != UNDO_PTR_GET_BLOCK_NUM(currentSlotPtr)) { + g_slotBufferCache->RemoveSlotBuffer(start); + slotBuf.Release(); + } + } + if (finishAdvanceXid) { + break; + } + } +} + +bool RecycleUndoSpace(UndoZone *zone, TransactionId recycleXmin, TransactionId frozenXid, + TransactionId *oldestRecycleXid, TransactionId forceRecycleXid) { UndoSlotPtr recycle = zone->GetRecycle(); UndoSlotPtr allocate = zone->GetAllocate(); @@ -90,7 +194,9 @@ bool RecycleUndoSpace(UndoZone *zone, TransactionId oldestXmin, TransactionId *o bool undoRecycled = false; bool result = false; UndoSlotPtr start = INVALID_UNDO_SLOT_PTR; - TransactionId oldestXidInUndo = pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo); + TransactionId oldestXidInUndo = pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo); + + *oldestRecycleXid = recycleXmin < frozenXid ? recycleXmin : frozenXid; if (zone->GetPersitentLevel() == UNDO_PERMANENT) { needWal = true; @@ -118,43 +224,49 @@ bool RecycleUndoSpace(UndoZone *zone, TransactionId oldestXmin, TransactionId *o if (!TransactionIdIsValid(slot->XactId()) || TransactionIdPrecedes(slot->XactId(), oldestXidInUndo)) { ereport(PANIC, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT( - "recycle xid invalid: zone %d transaction slot %lu xid %lu loops %lu, oldestXidInUndo %lu."), - zone->GetZoneId(), recycle, recycleXid, g_recycleLoops, oldestXidInUndo))); + "recycle xid invalid: zone %d transaction slot %lu, slot XactId %lu, " + "xid %lu loops %lu, oldestXidInUndo %lu."), + zone->GetZoneId(), recycle, slot->XactId(), recycleXid, g_recycleLoops, oldestXidInUndo))); } - if (TransactionIdPrecedes(slot->XactId(), oldestXmin)) { + if (TransactionIdPrecedes(slot->XactId(), recycleXmin)) { Assert(forceRecycle == false); - if (!UHeapTransactionIdDidCommit(slot->XactId())) { - if (slot->NeedRollback()) { - *oldestRecycleXid = slot->XactId(); - if (zone->GetPersitentLevel() == UNDO_TEMP || zone->GetPersitentLevel() == UNDO_UNLOGGED) { - break; - } - UndoRecPtr prev = GetPrevUrp(slot->EndUndoPtr()); - AddRollbackRequest(slot->XactId(), prev, slot->StartUndoPtr(), - slot->DbId(), recycle); -#ifdef DEBUG_UHEAP - UHEAPSTAT_COUNT_UNDO_SPACE_UNRECYCLE(); -#endif - ereport(LOG, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT( - "rollback zone %d slot %lu: xid %lu, start %lu, " - "end %lu, dbid %u. recyclexid %lu loops %lu, oldestXidInUndo %lu."), - zone->GetZoneId(), recycle, slot->XactId(), slot->StartUndoPtr(), slot->EndUndoPtr(), - slot->DbId(), recycleXid, g_recycleLoops, oldestXidInUndo))); - break; + +#ifdef ENABLE_WHITEBOX + if (TransactionIdPrecedes(slot->XactId(),frozenXid)) { + if (!UHeapTransactionIdDidCommit(slot->XactId()) && slot->NeedRollback()) { + ereport(PANIC, (errmodule(MOD_UNDO), + errmsg(UNDOFORMAT( + "Recycle visibility check wrong: zone %d " + "transaction slot %lu xid %lu slot->XactId() %lu, oldestXidInUndo %lu."), + zone->GetZoneId(), recycle, recycleXid, slot->XactId(), oldestXidInUndo))); } } - } else { - if (TransactionIdPrecedes(slot->XactId(), forceRecycleXid) && - UHeapTransactionIdDidCommit(slot->XactId())) { - ereport(LOG, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("ForceRecycle: slot=%lu, slotxid=%lu, " - "recyclexid=%lu, oldestXmin=%lu, startptr=%lu, endptr=%lu."), recycle, slot->XactId(), - forceRecycleXid, oldestXmin, UNDO_PTR_GET_OFFSET(slot->StartUndoPtr()), - UNDO_PTR_GET_OFFSET(slot->EndUndoPtr())))); - forceRecycle = true; - } else { +#endif + + if (TransactionIdFollowsOrEquals(slot->XactId(), frozenXid)) { break; } + } else { + bool forceRecycleXidCheck = (TransactionIdIsNormal(forceRecycleXid) + && TransactionIdPrecedes(slot->XactId(), forceRecycleXid)); + if (!forceRecycleXidCheck) { + break; + } + bool isInProgress = TransactionIdIsInProgress(slot->XactId()); + if (isInProgress) { + break; + } + bool slotTranactionStateCheck = (!UHeapTransactionIdDidCommit(slot->XactId()) + && !isInProgress && slot->NeedRollback()); + if (slotTranactionStateCheck) { + AsyncRollback(zone, recycle, slot); + break; + } + ereport(LOG, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("ForceRecycle: slot=%lu, slotxid=%lu, " + "recyclexid=%lu, recycleXmin=%lu, startptr=%lu, endptr=%lu."), recycle, slot->XactId(), + forceRecycleXid, recycleXmin, UNDO_PTR_GET_OFFSET(slot->StartUndoPtr()), + UNDO_PTR_GET_OFFSET(slot->EndUndoPtr())))); + forceRecycle = true; } #ifdef DEBUG_UHEAP UHEAPSTAT_COUNT_UNDO_SPACE_RECYCLE(); @@ -178,7 +290,6 @@ bool RecycleUndoSpace(UndoZone *zone, TransactionId oldestXmin, TransactionId *o slotBuf.Release(); } } - if (undoRecycled) { Assert(TransactionIdIsValid(recycleXid)); if (zone->GetRecycleXid() >= recycleXid) { @@ -218,7 +329,7 @@ bool RecycleUndoSpace(UndoZone *zone, TransactionId oldestXmin, TransactionId *o xlrec.startSlot = start; xlrec.recycleLoops = g_recycleLoops; xlrec.recycledXid = recycleXid; - xlrec.oldestXmin = oldestXmin; + xlrec.oldestXmin = recycleXmin; xlrec.endUndoPtr = endUndoPtr; XLogRecPtr lsn = WriteUndoXlog(&xlrec, XLOG_UNDO_DISCARD); zone->SetLSN(lsn); @@ -226,7 +337,7 @@ bool RecycleUndoSpace(UndoZone *zone, TransactionId oldestXmin, TransactionId *o (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("zone %d recycle slot start %lu end %lu from slot %lu " "to slot %lu lsn %lu xid %lu loops %lu oldestXmin %lu."), zone->GetZoneId(), startUndoPtr, endUndoPtr, start, recycle, - lsn, recycleXid, g_recycleLoops, oldestXmin))); + lsn, recycleXid, g_recycleLoops, recycleXmin))); } END_CRIT_SECTION(); @@ -273,9 +384,9 @@ static void UpdateXidHavingUndo(TransactionId *recycleMaxXIDs, uint32 count, Tra static bool NeedForceRecycle(void) { int totalSize = (int)pg_atomic_read_u32(&g_instance.undo_cxt.undoTotalSize); - int limitSize = (int)(g_instance.attr.attr_storage.undo_space_limit_size * FORCE_RECYCLE_PERCENT); + int limitSize = (int)(u_sess->attr.attr_storage.undo_space_limit_size * FORCE_RECYCLE_PERCENT); int metaSize = (int)g_instance.undo_cxt.undoMetaSize; - Assert(totalSize >= 0 && limitSize > 0 && metaSize > 0); + Assert(totalSize >= 0 && limitSize >= 0 && metaSize >= 0); g_forceRecycleSize = totalSize + metaSize - limitSize; if (g_forceRecycleSize >= 0) { ereport(LOG, (errmodule(MOD_UNDO), @@ -322,6 +433,8 @@ static void WaitRecycleThread(uint64 nonRecycled) int rc = 0; rc = WaitLatch(&t_thrd.proc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, TRANS_PARTITION_LINEAR_SPARE_TIME(nonRecycled)); + /* Clear any already-pending wakeups */ + ResetLatch(&t_thrd.proc->procLatch); if (((unsigned int)rc) & WL_POSTMASTER_DEATH) { gs_thread_exit(1); } @@ -329,7 +442,7 @@ static void WaitRecycleThread(uint64 nonRecycled) static void RecycleWaitIfNotUsed() { - if (g_instance.attr.attr_storage.undo_zone_count == 0 + if (!g_instance.attr.attr_storage.enable_ustore #ifdef ENABLE_MULTIPLE_NODES || true #endif @@ -351,9 +464,12 @@ void UndoRecycleMain() { sigjmp_buf localSigjmpBuf; bool recycled = false; + bool isFirstRecycle = false; TransactionId oldestXidHavingUndo = InvalidTransactionId; - uint64 nonRecycled = 0; + TransactionId oldestFrozenXidInUndo = InvalidTransactionId; + uint64 nonRecycled = 50; TransactionId *recycleMaxXIDs = NULL; + t_thrd.utils_cxt.CurrentResourceOwner = ResourceOwnerCreate(NULL, "undo recycler", THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE)); MemoryContext undoRecycleContext = AllocSetContextCreate(t_thrd.top_mem_cxt, @@ -402,6 +518,8 @@ void UndoRecycleMain() HOLD_INTERRUPTS(); /* Report the error to the server log */ EmitErrorReport(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); LWLockReleaseAll(); pgstat_report_waitevent(WAIT_EVENT_END); AbortBufferIO(); @@ -451,61 +569,87 @@ void UndoRecycleMain() } while (true) { - /* Clear any already-pending wakeups */ - ResetLatch(&t_thrd.proc->procLatch); if (t_thrd.undorecycler_cxt.got_SIGHUP) { t_thrd.undorecycler_cxt.got_SIGHUP = false; ProcessConfigFile(PGC_SIGHUP); } if (!RecoveryInProgress()) { - TransactionId oldestXmin = GetOldestXminForUndo(); - if (oldestXmin == InvalidTransactionId) { + TransactionId recycleXmin = InvalidTransactionId; + TransactionId oldestXmin = GetOldestXminForUndo(&recycleXmin); + isFirstRecycle = !TransactionIdIsValid(g_instance.undo_cxt.oldestFrozenXid); + if (!TransactionIdIsValid(recycleXmin) || + TransactionIdPrecedes(oldestXmin, g_instance.undo_cxt.oldestFrozenXid)) { WaitRecycleThread(nonRecycled); continue; } recycled = false; oldestXidHavingUndo = InvalidTransactionId; + oldestFrozenXidInUndo = oldestXmin; uint32 idx = 0; uint32 retry = 1; TransactionId recycleXid; TransactionId forceRecycleXid = InvalidTransactionId; uint32 recycleMaxXIDCount = 0; errno_t ret = 0; - if (g_instance.attr.attr_storage.undo_zone_count != 0) { + bool isAnyZoneUsed = false; + if (g_instance.undo_cxt.uZoneCount != 0) { if (NeedForceRecycle()) { - forceRecycleXid = GetForceRecycleXid(oldestXmin, retry); + forceRecycleXid = GetForceRecycleXid(recycleXmin, retry); } retry: ret = memset_s(recycleMaxXIDs, sizeof(TransactionId) * UNDO_ZONE_COUNT, 0, sizeof(TransactionId) * UNDO_ZONE_COUNT); securec_check(ret, "\0", "\0"); recycleMaxXIDCount = 0; + isAnyZoneUsed = false; for (idx = 0; idx < UNDO_ZONE_COUNT && !t_thrd.undorecycler_cxt.shutdown_requested; idx++) { - UndoZone *zone = UndoZoneGroup::GetUndoZone(idx); + UndoPersistence upersistence = UNDO_PERMANENT; + GET_UPERSISTENCE(idx, upersistence); + UndoZone *zone = UndoZoneGroup::GetUndoZone(idx, false, upersistence); + TransactionId frozenXid = oldestXmin; if (zone == NULL) { continue; } recycleXid = InvalidTransactionId; if (zone->Used()) { - if (RecycleUndoSpace(zone, oldestXmin, &recycleXid, forceRecycleXid)) { + UndoSlotPtr oldestFrozenSlotPtr = zone->GetFrozenSlotPtr(); + AdvanceFrozenXid(zone, &frozenXid, oldestXmin, &oldestFrozenSlotPtr); + zone->SetFrozenSlotPtr(oldestFrozenSlotPtr); + ereport(DEBUG1, (errmodule(MOD_UNDO), errmsg( + UNDOFORMAT("oldestFrozenXidInUndo: oldestFrozenXidInUndo=%lu, frozenXid=%lu" + " before RecycleUndoSpace"), + oldestFrozenXidInUndo, frozenXid))); + if (RecycleUndoSpace(zone, recycleXmin, frozenXid, &recycleXid, forceRecycleXid)) { recycled = true; } + isAnyZoneUsed = true; + ereport(DEBUG1, (errmodule(MOD_UNDO), errmsg( + UNDOFORMAT("oldestFrozenXidInUndo: oldestFrozenXidInUndo=%lu, frozenXid=%lu" + " after RecycleUndoSpace"), + oldestFrozenXidInUndo, frozenXid))); + oldestFrozenXidInUndo = oldestFrozenXidInUndo > frozenXid ? frozenXid : oldestFrozenXidInUndo; UpdateRecyledXid(recycleMaxXIDs, &recycleMaxXIDCount, recycleXid); } } } + if (isAnyZoneUsed) { + ereport(LOG, (errmodule(MOD_UNDO), errmsg( + UNDOFORMAT("oldestFrozenXidInUndo for update: oldestFrozenXidInUndo=%lu"), + oldestFrozenXidInUndo))); + pg_atomic_write_u64(&g_instance.undo_cxt.oldestFrozenXid, oldestFrozenXidInUndo); + } if (t_thrd.undorecycler_cxt.shutdown_requested) { ShutDownRecycle(recycleMaxXIDs); } - if (g_instance.attr.attr_storage.undo_zone_count != 0) { - UpdateXidHavingUndo(recycleMaxXIDs, recycleMaxXIDCount, &oldestXidHavingUndo, oldestXmin); + if (g_instance.undo_cxt.uZoneCount != 0) { + UpdateXidHavingUndo(recycleMaxXIDs, recycleMaxXIDCount, &oldestXidHavingUndo, recycleXmin); if (g_forceRecycleSize > 0) { retry++; - forceRecycleXid = GetForceRecycleXid(oldestXmin, retry); + forceRecycleXid = GetForceRecycleXid(recycleXmin, retry); if (TransactionIdIsValid(forceRecycleXid)) { ereport(DEBUG1, (errmodule(MOD_UNDO), errmsg( UNDOFORMAT("ForceRecycle: recycleXid=%lu, oldestXmin=%lu, forceRecycleSize=%d, retry=%u."), - forceRecycleXid, oldestXmin, g_forceRecycleSize, retry))); + forceRecycleXid, recycleXmin, g_forceRecycleSize, retry))); goto retry; } } @@ -513,16 +657,16 @@ void UndoRecycleMain() errmsg(UNDOFORMAT("update oldestXidInUndo = %lu."), oldestXidHavingUndo))); g_recycleLoops++; } - if (oldestXidHavingUndo != InvalidTransactionId) { - TransactionId oldestXidInUndo = pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo); + if (oldestXidHavingUndo != InvalidTransactionId && !isFirstRecycle) { + TransactionId oldestXidInUndo = pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo); if (TransactionIdPrecedes(oldestXidHavingUndo, oldestXidInUndo)) { ereport(PANIC, (errmsg(UNDOFORMAT("curr xid having undo %lu < global oldestXidInUndo %lu."), oldestXidHavingUndo, oldestXidInUndo))); } - pg_atomic_write_u64(&g_instance.proc_base->oldestXidInUndo, oldestXidHavingUndo); + pg_atomic_write_u64(&g_instance.undo_cxt.oldestXidInUndo, oldestXidHavingUndo); } if (!recycled) { - nonRecycled += 1; + nonRecycled += UNDO_RECYCLE_TIMEOUT_DELTA; WaitRecycleThread(nonRecycled); } else { nonRecycled = 0; diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp index 79dcb48ae..b75ae19ce 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundospace.cpp @@ -65,11 +65,11 @@ void UndoSpace::ExtendUndoLog(int zid, UndoLogOffset offset, uint32 dbId) WHITEBOX_TEST_STUB(UNDO_EXTEND_LOG_FAILED, WhiteboxDefaultErrorEmit); while (tail < offset) { if ((!t_thrd.xlog_cxt.InRecovery) && (static_cast(g_instance.undo_cxt.undoTotalSize) + - static_cast(g_instance.undo_cxt.undoMetaSize) >= g_instance.attr.attr_storage.undo_space_limit_size)) { + static_cast(g_instance.undo_cxt.undoMetaSize) >= u_sess->attr.attr_storage.undo_space_limit_size)) { ereport(ERROR, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT( "The undo space size %u > limit size %d. Please increase the undo_space_limit_size."), g_instance.undo_cxt.undoTotalSize + g_instance.undo_cxt.undoMetaSize, - g_instance.attr.attr_storage.undo_space_limit_size))); + u_sess->attr.attr_storage.undo_space_limit_size))); } blockno = (BlockNumber)(tail / BLCKSZ + 1); /* Create a new undo segment. */ @@ -311,17 +311,17 @@ void UndoSpace::RecoveryUndoSpace(int fd, UndoSpaceType type) char *persistBlock = (char *)palloc0(UNDO_META_PAGE_SIZE * PAGES_READ_NUM); oldContext = MemoryContextSwitchTo(g_instance.undo_cxt.undoContext); if (type == UNDO_LOG_SPACE) { - UNDOZONE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOZONE_COUNT_PER_PAGE, totalPageCnt); + UNDOSPACE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOZONE_COUNT_PER_PAGE, totalPageCnt); lseek(fd, totalPageCnt * UNDO_META_PAGE_SIZE, SEEK_SET); } else { - UNDOZONE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOZONE_COUNT_PER_PAGE, totalPageCnt); + UNDOSPACE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOZONE_COUNT_PER_PAGE, totalPageCnt); uint32 seek = totalPageCnt * UNDO_META_PAGE_SIZE; - UNDOZONE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOSPACE_COUNT_PER_PAGE, totalPageCnt); + UNDOSPACE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOSPACE_COUNT_PER_PAGE, totalPageCnt); seek += totalPageCnt * UNDO_META_PAGE_SIZE; lseek(fd, seek, SEEK_SET); } - UNDOZONE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOSPACE_COUNT_PER_PAGE, totalPageCnt); + UNDOSPACE_META_PAGE_COUNT(PERSIST_ZONE_COUNT, UNDOSPACE_COUNT_PER_PAGE, totalPageCnt); spaceMetaSize = totalPageCnt * UNDO_META_PAGE_SIZE / BLCKSZ; g_instance.undo_cxt.undoMetaSize += spaceMetaSize; @@ -372,11 +372,12 @@ void UndoSpace::RecoveryUndoSpace(int fd, UndoSpaceType type) int offset = zoneId % UNDOSPACE_COUNT_PER_PAGE; uspMetaInfo = (UndoSpaceMetaInfo *)(uspMetaBuffer + offset * sizeof(UndoSpaceMetaInfo)); - UndoZone *uzone = (UndoZone *)g_instance.undo_cxt.uZones[zoneId]; - if (uspMetaInfo->tail == 0 && uzone == NULL) { + if (uspMetaInfo->tail == 0 && + (g_instance.undo_cxt.uZones == NULL || g_instance.undo_cxt.uZones[zoneId] == NULL)) { continue; } - + undo::UndoZoneGroup::InitUndoCxtUzones(); + UndoZone *uzone = (UndoZone *)g_instance.undo_cxt.uZones[zoneId]; uzone = UndoZoneGroup::GetUndoZone(zoneId, true); UndoSpace *usp = uzone->GetSpace(type); usp->LockInit(); diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp index 0bc336a0e..03d05f099 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundotxn.cpp @@ -301,8 +301,13 @@ void UndoSlotBufferCache::RemoveSlotBuffer(UndoSlotPtr slotPtr) next->prev_ = entry->prev_; } size_--; - ereport(DEBUG1, (errmsg(UNDOFORMAT("release entry %lu head_ %lu tail_ %lu."), - entry->tag_, head_->tag_, tail_->tag_))); + if (size_ != 0) { + ereport(DEBUG1, (errmsg(UNDOFORMAT("release entry %lu head_ %lu tail_ %lu."), + entry->tag_, head_->tag_, tail_->tag_))); + } else { + ereport(DEBUG1, (errmsg(UNDOFORMAT("release entry %lu.SlotBuffer is empty"), + entry->tag_))); + } return; } diff --git a/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp b/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp index f52bef76f..14fed5cb9 100644 --- a/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp +++ b/src/gausskernel/storage/access/ustore/undo/knl_uundozone.cpp @@ -17,6 +17,7 @@ #include "access/ustore/undo/knl_uundozone.h" #include "access/ustore/undo/knl_uundoapi.h" #include "access/ustore/undo/knl_uundotxn.h" +#include "access/ustore/undo/knl_uundospace.h" #include "access/ustore/knl_whitebox_test.h" #include "knl/knl_thread.h" #include "miscadmin.h" @@ -29,6 +30,8 @@ namespace undo { +const int MAX_REALLOCATE_TIMES = 10; + UndoZone::UndoZone() : attached_(UNDO_ZONE_DETACHED), pLevel_(UNDO_PERSISTENT_BUTT), lsn_(0), dirty_(UNDOZONE_CLEAN) {} @@ -81,29 +84,35 @@ bool UndoZone::CheckRecycle(UndoRecPtr starturp, UndoRecPtr endurp) * Check whether the undo record is discarded or not. If it's already discarded * return false otherwise return true. Caller must hold the space discardLock_. */ -bool UndoZone::CheckUndoRecordValid(UndoLogOffset offset, bool checkForceRecycle) +UndoRecordState UndoZone::CheckUndoRecordValid(UndoLogOffset offset, bool checkForceRecycle) { Assert((offset < UNDO_LOG_MAX_SIZE) && (offset >= UNDO_LOG_BLOCK_HEADER_SIZE)); Assert(forceDiscard_ <= insertUndoPtr_); - if (offset <= this->insertUndoPtr_) { - if (offset >= this->forceDiscard_) { - return true; - } else if (offset >= this->discardUndoPtr_ && checkForceRecycle) { - TransactionId recycleXmin = GetOldestXminForUndo(); - if (TransactionIdPrecedes(recycleXid_, recycleXmin)) { - ereport(DEBUG1, (errmsg( - UNDOFORMAT("oldestxmin %lu > recyclexid %lu: zid=%d, forceDiscard=%lu, discard=%lu, offset=%lu."), - recycleXmin, recycleXid_, this->zid_, this->forceDiscard_, this->discardUndoPtr_, offset))); - return false; - } - ereport(ERROR, (errmsg(UNDOFORMAT("snapshoot too old, the record has been force recycled: zid=%d, " - "forceDiscard=%lu, discard=%lu, offset=%lu."), - this->zid_, this->forceDiscard_, this->discardUndoPtr_, offset))); - return false; - } + if (offset > this->insertUndoPtr_) { + ereport(DEBUG1, (errmsg(UNDOFORMAT("The undo record not insert yet: zid=%d, insert=%lu, offset=%lu."), + this->zid_, this->insertUndoPtr_, offset))); + return UNDO_RECORD_NOT_INSERT; } - return false; + if (offset >= this->forceDiscard_) { + return UNDO_RECORD_NORMAL; + } + if (offset >= this->discardUndoPtr_ && checkForceRecycle) { + TransactionId recycleXmin; + TransactionId oldestXmin = GetOldestXminForUndo(&recycleXmin); + if (TransactionIdPrecedes(recycleXid_, recycleXmin)) { + ereport(DEBUG1, (errmsg( + UNDOFORMAT("oldestxmin %lu, recycleXmin %lu > recyclexid %lu: zid=%d," + "forceDiscard=%lu, discard=%lu, offset=%lu."), + oldestXmin, recycleXmin, recycleXid_, this->zid_, this->forceDiscard_, + this->discardUndoPtr_, offset))); + return UNDO_RECORD_DISCARD; + } + ereport(LOG, (errmsg(UNDOFORMAT("The record has been force recycled: zid=%d, forceDiscard=%lu, " + "discard=%lu, offset=%lu."), this->zid_, this->forceDiscard_, this->discardUndoPtr_, offset))); + return UNDO_RECORD_FORCE_DISCARD; + } + return UNDO_RECORD_DISCARD; } /* @@ -483,7 +492,7 @@ void UndoZone::CheckPointUndoZone(int fd) uzone->UnlockUndoZone(); ereport(LOG, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("release zone memory %d."), cycle))); delete(uzone); - g_instance.undo_cxt.uZoneCount--; + pg_atomic_fetch_sub_u32(&g_instance.undo_cxt.uZoneCount, 1); /* False equals to 0, which means zone is used; true is 1, which means zone is unused. */ bool isZoneFree = bms_is_member(cycle, g_instance.undo_cxt.uZoneBitmap[UNDO_PERMANENT]); if (!isZoneFree) { @@ -540,6 +549,42 @@ void UndoZone::CheckPointUndoZone(int fd) } } } + +static void AllocateZoneMemory(UndoZone **uzone, UndoPersistence upersistence, int zid, bool needCheckZone) +{ + if (*uzone == NULL) { + int reallocateTimes = 1; +REALLOCATE: + MemoryContext oldContext = MemoryContextSwitchTo(g_instance.undo_cxt.undoContext); + *uzone = New(g_instance.undo_cxt.undoContext) UndoZone(); + MemoryContextSwitchTo(oldContext); + if (*uzone == NULL && reallocateTimes <= MAX_REALLOCATE_TIMES) { + reallocateTimes++; + ereport(DEBUG1, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("reallocate times %d."), reallocateTimes))); + goto REALLOCATE; + } + if (*uzone == NULL) { + ereport(PANIC, (errmsg(UNDOFORMAT("allocate zone failed.")))); + } + pg_atomic_fetch_add_u32(&g_instance.undo_cxt.uZoneCount, 1); + InitZone(*uzone, zid, upersistence); + InitUndoSpace(*uzone, UNDO_LOG_SPACE); + InitUndoSpace(*uzone, UNDO_SLOT_SPACE); + ereport(DEBUG1, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("attached free zone %d."), zid))); + } + + if (needCheckZone) { + bool isZoneFree = true; + ZONEID_IS_USED(zid, upersistence); + if (isZoneFree) { + ereport(PANIC, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("allocate zone fail.")))); + } + } + (*uzone)->InitSlotBuffer(); + pg_write_barrier(); + g_instance.undo_cxt.uZones[zid] = *uzone; +} + static void RecoveryZone(UndoZone *uzone, const UndoZoneMetaInfo *uspMetaInfo, const int zoneId) { @@ -553,11 +598,10 @@ static void RecoveryZone(UndoZone *uzone, uzone->SetAllocate(uspMetaInfo->allocate); uzone->SetRecycle(uspMetaInfo->recycle); uzone->SetRecycleXid(uspMetaInfo->recycleXid); - uzone->Detach(); } /* Initialize parameters in the undo zone. */ -static void InitZone(UndoZone *uzone, const int zoneId, UndoPersistence upersistence) +void InitZone(UndoZone *uzone, const int zoneId, UndoPersistence upersistence) { uzone->InitLock(); uzone->SetZoneId(zoneId); @@ -568,12 +612,13 @@ static void InitZone(UndoZone *uzone, const int zoneId, UndoPersistence upersist uzone->SetForceDiscard(UNDO_LOG_BLOCK_HEADER_SIZE); uzone->SetAllocate(UNDO_LOG_BLOCK_HEADER_SIZE); uzone->SetRecycle(UNDO_LOG_BLOCK_HEADER_SIZE); + uzone->SetFrozenSlotPtr(INVALID_UNDO_SLOT_PTR); uzone->SetRecycleXid(InvalidTransactionId); - uzone->Detach(); + uzone->SetAttachPid(u_sess->attachPid); } /* Initialize parameters in the undo space. */ -static void InitUndoSpace(UndoZone *uzone, UndoSpaceType type) +void InitUndoSpace(UndoZone *uzone, UndoSpaceType type) { UndoSpace *usp = uzone->GetSpace(type); usp->LockInit(); @@ -615,7 +660,6 @@ void UndoZone::RecoveryUndoZone(int fd) DECLARE_NODE_COUNT(); for (zoneId = 0; zoneId < PERSIST_ZONE_COUNT; zoneId++) { UndoZoneMetaInfo *uspMetaInfo = NULL; - UndoZone *uzone = (UndoZone *)g_instance.undo_cxt.uZones[zoneId]; if (zoneId % (UNDOZONE_COUNT_PER_PAGE * PAGES_READ_NUM) == 0) { Size readSize; if ((uint32)(PERSIST_ZONE_COUNT - zoneId) < UNDOZONE_COUNT_PER_PAGE * PAGES_READ_NUM) { @@ -666,106 +710,33 @@ void UndoZone::RecoveryUndoZone(int fd) continue; } if (uspMetaInfo->insert != UNDO_LOG_BLOCK_HEADER_SIZE) { - MemoryContext oldContext = MemoryContextSwitchTo(g_instance.undo_cxt.undoContext); - uzone = New(g_instance.undo_cxt.undoContext) UndoZone(); - MemoryContextSwitchTo(oldContext); - if (uzone == NULL) { - ereport(PANIC, (errmsg(UNDOFORMAT("failed to allocate memory for zone: %d."), zoneId))); - } - g_instance.undo_cxt.uZoneCount++; - /* Check if the total count of undo zone is enough during the recovery process. */ - if (g_instance.undo_cxt.uZoneCount > (uint32)g_instance.attr.attr_storage.undo_zone_count) { - ereport(FATAL, (errmsg(UNDOFORMAT("Undo zone is not enough, max count is %d, now is %d"), - g_instance.attr.attr_storage.undo_zone_count, g_instance.undo_cxt.uZoneCount))); - } + undo::UndoZoneGroup::InitUndoCxtUzones(); + UndoZone *uzone = (UndoZone *)g_instance.undo_cxt.uZones[zoneId]; + AllocateZoneMemory(&uzone, UNDO_PERMANENT, zoneId, false); RecoveryZone(uzone, uspMetaInfo, zoneId); - g_instance.undo_cxt.uZones[zoneId] = uzone; } } pfree(persistBlock); } -int UndoZoneGroup::AllocateZone(UndoPersistence upersistence) -{ - Assert(upersistence >= UNDO_PERMANENT && upersistence < UNDO_PERSISTENT_BUTT); - WHITEBOX_TEST_STUB(UNDO_ALLOCATE_ZONE_FAILED, WhiteboxDefaultErrorEmit); - if (g_instance.undo_cxt.uZoneCount > (uint32)g_instance.attr.attr_storage.undo_zone_count) { - ereport(ERROR, (errmsg(UNDOFORMAT("Invalid undo zone count, max count is %d, now is %d"), - g_instance.attr.attr_storage.undo_zone_count, g_instance.undo_cxt.uZoneCount))); - } - int zid = t_thrd.undo_cxt.zids[upersistence]; - if (IS_VALID_ZONE_ID(zid)) { - return zid; - } - - UndoZone *uzone = NULL; - int retZid = -1; - DECLARE_NODE_NO(); -reallocate: - ALLOCATE_ZONEID(upersistence, retZid); - if (!IS_VALID_ZONE_ID(retZid)) { - ereport(ERROR, (errmsg("AllocateZone: zone id is invalid, there're too many working threads."))); - } - uzone = (UndoZone *)g_instance.undo_cxt.uZones[retZid]; - /* Set bitmap to 0, whether or not it is need to request the memory for the zone. */ - if (uzone == NULL) { - MemoryContext oldContext = MemoryContextSwitchTo(g_instance.undo_cxt.undoContext); - uzone = New(g_instance.undo_cxt.undoContext) UndoZone(); - MemoryContextSwitchTo(oldContext); - if (uzone == NULL) { - RELEASE_ZONEID(upersistence, retZid); - ereport(ERROR, (errmsg(UNDOFORMAT("failed to allocate memory for zone: %d."), retZid))); - } - g_instance.undo_cxt.uZoneCount++; - InitZone(uzone, retZid, upersistence); - InitUndoSpace(uzone, UNDO_LOG_SPACE); - InitUndoSpace(uzone, UNDO_SLOT_SPACE); - ereport(DEBUG1, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("attached free zone %d."), retZid))); - } - - bool isZoneFree = true; - ZONEID_IS_USED(retZid, upersistence); - if (isZoneFree) { - ereport(PANIC, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("allocate zone fail.")))); - } - if (uzone->Attached()){ - ereport(WARNING, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("reallocate zone %d."), retZid))); - goto reallocate; - } - uzone->Attach(); - uzone->InitSlotBuffer(); - pg_write_barrier(); - t_thrd.undo_cxt.zids[upersistence] = retZid; - g_instance.undo_cxt.uZones[retZid] = uzone; - ereport(DEBUG1, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("allocate zid: %d, attach: %d."), - uzone->GetZoneId(), uzone->Attached()))); - if (!IS_VALID_ZONE_ID(retZid)) { - ereport(PANIC, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("zone free list null.")))); - } - return retZid; -} - void UndoZoneGroup::ReleaseZone(int zid, UndoPersistence upersistence) { Assert(IS_VALID_ZONE_ID(zid)); WHITEBOX_TEST_STUB(UNDO_RELEASE_ZONE_FAILED, WhiteboxDefaultErrorEmit); + if (g_instance.undo_cxt.uZones == NULL || g_instance.undo_cxt.uZoneCount == 0) { + return; + } UndoZone *uzone = (UndoZone *)g_instance.undo_cxt.uZones[zid]; - Assert(uzone != NULL); bool isZoneFree = true; ZONEID_IS_USED(zid, upersistence); - if (isZoneFree || !uzone->Attached()) { + if (isZoneFree) { ereport(PANIC, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("release zone fail.")))); } - - ereport(DEBUG1, (errmodule(MOD_UNDO), errmsg(UNDOFORMAT("release zid: %d, attach: %d."), - uzone->GetZoneId(), uzone->Attached()))); - if (!uzone->Attached()) { - ereport(PANIC, (errmodule(MOD_UNDO), - errmsg(UNDOFORMAT("used zone %d detached."), zid))); + if (uzone != NULL) { + uzone->NotKeepBuffer(); + uzone->SetAttachPid(0); } - uzone->Detach(); - uzone->NotKeepBuffer(); pg_write_barrier(); DECLARE_NODE_NO(); RELEASE_ZONEID(upersistence, zid); @@ -773,10 +744,11 @@ void UndoZoneGroup::ReleaseZone(int zid, UndoPersistence upersistence) UndoZone *UndoZoneGroup::SwitchZone(int zid, UndoPersistence upersistence) { - if (g_instance.undo_cxt.uZoneCount > (uint32)g_instance.attr.attr_storage.undo_zone_count) { - ereport(FATAL, (errmsg(UNDOFORMAT("Invalid undo zone count, max count is %d, now is %d"), - g_instance.attr.attr_storage.undo_zone_count, g_instance.undo_cxt.uZoneCount))); + if (g_instance.undo_cxt.uZoneCount > g_instance.undo_cxt.undoCountThreshold) { + ereport(FATAL, (errmsg(UNDOFORMAT("Too many undo zones are requested, max count is %d, now is %d"), + g_instance.undo_cxt.undoCountThreshold, g_instance.undo_cxt.uZoneCount))); } + InitUndoCxtUzones(); UndoZone *uzone = (UndoZone *)g_instance.undo_cxt.uZones[zid]; int retZid = -1; uzone->PrepareSwitch(); @@ -796,11 +768,10 @@ UndoZone *UndoZoneGroup::SwitchZone(int zid, UndoPersistence upersistence) RELEASE_ZONEID(upersistence, retZid); ereport(ERROR, (errmsg(UNDOFORMAT("failed to allocate memory for zone: %d."), retZid))); } - g_instance.undo_cxt.uZoneCount++; + pg_atomic_fetch_add_u32(&g_instance.undo_cxt.uZoneCount, 1); InitZone(uzone, retZid, UNDO_PERMANENT); InitUndoSpace(uzone, UNDO_LOG_SPACE); InitUndoSpace(uzone, UNDO_SLOT_SPACE); - uzone->Attach(); uzone->InitSlotBuffer(); pg_write_barrier(); g_instance.undo_cxt.uZones[retZid] = uzone; @@ -812,6 +783,7 @@ UndoZone *UndoZoneGroup::SwitchZone(int zid, UndoPersistence upersistence) if (uzone->GetUndoSpace()->Tail() != 0) { ereport(PANIC, (errmsg(UNDOFORMAT("new zone is not empty, tail=%lu."), uzone->GetUndoSpace()->Tail()))); } + uzone->InitSlotBuffer(); t_thrd.undo_cxt.zids[upersistence] = retZid; } else { ereport(ERROR, (errmsg(UNDOFORMAT("failed to switch a new zone.")))); @@ -820,38 +792,51 @@ UndoZone *UndoZoneGroup::SwitchZone(int zid, UndoPersistence upersistence) return uzone; } -UndoZone* UndoZoneGroup::GetUndoZone(int zid, bool needInit) +void UndoZoneGroup:: InitUndoCxtUzones() +{ + if (g_instance.undo_cxt.uZones == NULL) { + LWLockAcquire(UndoZoneLock, LW_EXCLUSIVE); + if (g_instance.undo_cxt.uZones == NULL) { + MemoryContext oldContext = MemoryContextSwitchTo(g_instance.undo_cxt.undoContext); + g_instance.undo_cxt.uZones = (void **)palloc0(UNDO_ZONE_COUNT * sizeof(void*)); + MemoryContextSwitchTo(oldContext); + } + if (g_instance.undo_cxt.uZones == NULL) { + ereport(PANIC, (errmsg(UNDOFORMAT("failed to allocate memory for UndoConext.")))); + } + LWLockRelease(UndoZoneLock); + } +} + +UndoZone* UndoZoneGroup::GetUndoZone(int zid, bool isNeedInitZone, UndoPersistence upersistence) { WHITEBOX_TEST_STUB(UNDO_GET_ZONE_FAILED, WhiteboxDefaultErrorEmit); if (!IS_VALID_ZONE_ID(zid)) { ereport(PANIC, (errmsg(UNDOFORMAT("zone id %d invalid."), zid))); } + InitUndoCxtUzones(); + + UndoPersistence upersistenceFromZoneid = UNDO_PERMANENT; + GET_UPERSISTENCE(zid, upersistenceFromZoneid); UndoZone *uzone = (UndoZone *)g_instance.undo_cxt.uZones[zid]; - if (uzone == NULL && (t_thrd.xlog_cxt.InRecovery || needInit)) { - /* False equals to 0, which means zone is used; true is 1, which means zone is unused. */ - bool isZoneFree = bms_is_member(zid, g_instance.undo_cxt.uZoneBitmap[UNDO_PERMANENT]); - if (!isZoneFree) { - /* If the zone is full, return NULL to skip redo process. */ - return NULL; + if (uzone == NULL) { + if (RecoveryInProgress()) { + /* False equals to 0, which means zone is used; true is 1, which means zone is unused. */ + bool isZoneFree = bms_is_member(zid, g_instance.undo_cxt.uZoneBitmap[upersistenceFromZoneid]); + if (!isZoneFree) { + /* If the zone is full, return NULL to skip redo process. */ + return NULL; + } + AllocateZoneMemory(&uzone, upersistenceFromZoneid, zid, false); + } else { + if (isNeedInitZone) { + AllocateZoneMemory(&uzone, upersistenceFromZoneid, zid, true); + } } - MemoryContext oldContext = MemoryContextSwitchTo(g_instance.undo_cxt.undoContext); - uzone = New(g_instance.undo_cxt.undoContext) UndoZone(); - MemoryContextSwitchTo(oldContext); - if (uzone == NULL) { - ereport(PANIC, (errmsg(UNDOFORMAT("failed to allocate memory for zone: %d."), zid))); - } - g_instance.undo_cxt.uZoneCount++; - /* Check if the total count of undo zone is enough during the recovery process. */ - if (g_instance.undo_cxt.uZoneCount > (uint32)g_instance.attr.attr_storage.undo_zone_count) { - ereport(FATAL, (errmsg(UNDOFORMAT("Undo zone is not enough, max count is %d, now is %d"), - g_instance.attr.attr_storage.undo_zone_count, g_instance.undo_cxt.uZoneCount))); - } - InitZone(uzone, zid, UNDO_PERMANENT); - InitUndoSpace(uzone, UNDO_LOG_SPACE); - InitUndoSpace(uzone, UNDO_SLOT_SPACE); - pg_write_barrier(); - g_instance.undo_cxt.uZones[zid] = uzone; + } + if (uzone != NULL && uzone->GetAttachPid() == 0) { + uzone->SetAttachPid(u_sess->attachPid); } return uzone; } @@ -860,17 +845,24 @@ void AllocateZonesBeforXid() { WHITEBOX_TEST_STUB(UNDO_ALLOCATE_ZONE_BEFO_XID_FAILED, WhiteboxDefaultErrorEmit); - /* Setting guc parameter "undo_zone_count" as 0 to disable undo module. */ - if (g_instance.attr.attr_storage.undo_zone_count == 0) { - return; - } for (auto i = 0; i < UNDO_PERSISTENCE_LEVELS; i++) { UndoPersistence upersistence = static_cast(i); - int zid = UndoZoneGroup::AllocateZone(upersistence); + int zid = -1; + if (IS_VALID_ZONE_ID(t_thrd.undo_cxt.zids[upersistence])) + continue; + DECLARE_NODE_NO(); + ALLOCATE_ZONEID(upersistence, zid); if (!IS_VALID_ZONE_ID(zid)) { ereport(ERROR, (errmsg(UNDOFORMAT("failed to allocate a zone")))); return; } + if (g_instance.undo_cxt.uZones != NULL) { + UndoZone *uzone = (UndoZone *)g_instance.undo_cxt.uZones[zid]; + if (uzone != NULL) { + uzone->InitSlotBuffer(); + } + } + t_thrd.undo_cxt.zids[upersistence] = zid; } return; } diff --git a/src/gausskernel/storage/buffer/bufmgr.cpp b/src/gausskernel/storage/buffer/bufmgr.cpp index 08628ce00..90d6eb080 100644 --- a/src/gausskernel/storage/buffer/bufmgr.cpp +++ b/src/gausskernel/storage/buffer/bufmgr.cpp @@ -47,6 +47,7 @@ #include "catalog/pg_hashbucket_fn.h" #include "catalog/storage_gtt.h" #include "commands/tablespace.h" +#include "commands/verify.h" #include "executor/instrument.h" #include "lib/binaryheap.h" #include "miscadmin.h" @@ -60,7 +61,6 @@ #include "storage/buf/bufmgr.h" #include "storage/ipc.h" #include "storage/proc.h" -#include "storage/smgr/smgr.h" #include "storage/smgr/segment.h" #include "storage/standby.h" #include "utils/aiomem.h" @@ -122,9 +122,12 @@ static inline int32 GetPrivateRefCount(Buffer buffer); void ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref); static void CheckForBufferLeaks(void); static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg); -static bool ReadBuffer_common_ReadBlock(SMgrRelation smgr, char relpersistence, - ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, bool isExtend, - Block bufBlock, const XLogPhyBlock *pblk); +static bool ReadBuffer_common_ReadBlock(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, + BlockNumber blockNum, ReadBufferMode mode, bool isExtend, Block bufBlock, const XLogPhyBlock *pblk, + bool *need_repair); +static Buffer ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, + ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit, const XLogPhyBlock *pblk); + /* * Return the PrivateRefCount entry for the passed buffer. It is searched @@ -350,13 +353,9 @@ void ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref) } } -static Buffer ReadBuffer_common(SMgrRelation reln, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, - ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit, - const XLogPhyBlock *pblk); static void BufferSync(int flags); static uint32 WaitBufHdrUnlocked(BufferDesc* buf); static void WaitIO(BufferDesc* buf); -static bool StartBufferIO(BufferDesc* buf, bool forInput); static void TerminateBufferIO_common(BufferDesc* buf, bool clear_dirty, uint32 set_flag_bits); void shared_buffer_write_error_callback(void* arg); static BufferDesc* BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, @@ -370,8 +369,10 @@ extern void PageRangeBackWrite( uint32 bufferIdx, int32 n, uint32 flags, SMgrRelation reln, int32* bufs_written, int32* bufs_reusable); extern void PageListBackWrite( uint32* bufList, int32 n, uint32 flags, SMgrRelation reln, int32* bufs_written, int32* bufs_reusable); +#ifndef ENABLE_LITE_MODE static volatile BufferDesc* PageListBufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, BufferAccessStrategy strategy, bool* foundPtr); +#endif static bool ConditionalStartBufferIO(BufferDesc* buf, bool forInput); /* @@ -554,6 +555,7 @@ static bool ConditionalStartBufferIO(BufferDesc *buf, bool for_input) * @Return: buffer desc ptr * @See also: */ +#ifndef ENABLE_LITE_MODE static volatile BufferDesc *PageListBufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber fork_num, BlockNumber block_num, BufferAccessStrategy strategy, bool *found) { @@ -774,6 +776,7 @@ static volatile BufferDesc *PageListBufferAlloc(SMgrRelation smgr, char relpersi return NULL; } } +#endif /* * @Description: Prefetch sequential buffers from a database relation fork. @@ -843,6 +846,7 @@ void PageRangePrefetch(Relation reln, ForkNumber fork_num, BlockNumber block_num void PageListPrefetch(Relation reln, ForkNumber fork_num, BlockNumber *block_list, int32 n, uint32 flags = 0, uint32 col = 0) { +#ifndef ENABLE_LITE_MODE AioDispatchDesc_t **dis_list; /* AIO dispatch list */ bool is_local_buf = false; /* local buf flag */ @@ -992,6 +996,7 @@ void PageListPrefetch(Relation reln, ForkNumber fork_num, BlockNumber *block_lis t_thrd.storage_cxt.InProgressAioDispatch = NULL; t_thrd.storage_cxt.InProgressAioDispatchCount = 0; t_thrd.storage_cxt.InProgressAioType = AioUnkown; +#endif } /* @@ -1748,7 +1753,7 @@ Buffer ReadUndoBufferWithoutRelcache(const RelFileNode& rnode, ForkNumber forkNu * parameters. */ Buffer ReadBufferForRemote(const RelFileNode &rnode, ForkNumber fork_num, BlockNumber block_num, ReadBufferMode mode, - BufferAccessStrategy strategy, bool *hit) + BufferAccessStrategy strategy, bool *hit, const XLogPhyBlock *pblk) { SMgrRelation smgr = smgropen(rnode, InvalidBackendId); @@ -1757,7 +1762,8 @@ Buffer ReadBufferForRemote(const RelFileNode &rnode, ForkNumber fork_num, BlockN errmsg("invalid forkNum %d, should be less than %d", fork_num, smgr->md_fdarray_size))); } - return ReadBuffer_common(smgr, RELPERSISTENCE_PERMANENT, fork_num, block_num, mode, strategy, hit, NULL); + + return ReadBuffer_common(smgr, RELPERSISTENCE_PERMANENT, fork_num, block_num, mode, strategy, hit, pblk); } /* @@ -1772,6 +1778,7 @@ Buffer ReadBuffer_common_for_localbuf(RelFileNode rnode, char relpersistence, Fo Block bufBlock; bool found = false; bool isExtend = false; + bool need_reapir = false; *hit = false; SMgrRelation smgr = smgropen(rnode, InvalidBackendId); @@ -1891,7 +1898,7 @@ Buffer ReadBuffer_common_for_localbuf(RelFileNode rnode, char relpersistence, Fo bufBlock = LocalBufHdrGetBlock(bufHdr); (void)ReadBuffer_common_ReadBlock(smgr, relpersistence, forkNum, blockNum, mode, - isExtend, bufBlock, NULL); + isExtend, bufBlock, NULL, &need_reapir); uint32 buf_state = pg_atomic_read_u32(&bufHdr->state); buf_state |= BM_VALID; @@ -1911,6 +1918,7 @@ Buffer ReadBuffer_common_for_direct(RelFileNode rnode, char relpersistence, Fork Block bufBlock; bool isExtend = false; RedoMemSlot *bufferslot = nullptr; + bool need_reapir = false; isExtend = (blockNum == P_NEW); SMgrRelation smgr = smgropen(rnode, InvalidBackendId); @@ -1924,8 +1932,10 @@ Buffer ReadBuffer_common_for_direct(RelFileNode rnode, char relpersistence, Fork Assert(bufBlock != NULL); (void)ReadBuffer_common_ReadBlock(smgr, relpersistence, forkNum, blockNum, mode, - isExtend, bufBlock, NULL); - + isExtend, bufBlock, NULL, &need_reapir); + if (need_reapir) { + return InvalidBuffer; + } XLogRedoBufferSetStateFunc(bufferslot, BM_VALID); return RedoBufferSlotGetBuffer(bufferslot); } @@ -1936,32 +1946,16 @@ Buffer ReadBuffer_common_for_direct(RelFileNode rnode, char relpersistence, Fork * * 2020-03-05 */ static bool ReadBuffer_common_ReadBlock(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, - BlockNumber blockNum, ReadBufferMode mode, bool isExtend, Block bufBlock, const XLogPhyBlock *pblk) + BlockNumber blockNum, ReadBufferMode mode, bool isExtend, Block bufBlock, const XLogPhyBlock *pblk, + bool *need_repair) { bool needputtodirty = false; if (isExtend) { /* new buffers are zero-filled */ MemSet((char *)bufBlock, 0, BLCKSZ); - ADIO_RUN() - { - /* pass null buffer to lower levels to use fallocate, systables do not use fallocate, - * relation id can distinguish systable or use table. "FirstNormalObjectId". - * but unfortunately , but in standby, there is no relation id, so relation id has no work. - * relation file node can not help becasue operation vacuum full or set table space can - * change systable file node - */ - if (u_sess->attr.attr_sql.enable_fast_allocate) { - smgrextend(smgr, forkNum, blockNum, NULL, false); - } else { - smgrextend(smgr, forkNum, blockNum, (char *)bufBlock, false); - } - } - ADIO_ELSE() - { - smgrextend(smgr, forkNum, blockNum, (char *)bufBlock, false); - } - ADIO_END(); + smgrextend(smgr, forkNum, blockNum, (char *)bufBlock, false); + /* * NB: we're *not* doing a ScheduleBufferTagForWriteback here; @@ -2018,6 +2012,9 @@ static bool ReadBuffer_common_ReadBlock(SMgrRelation smgr, char relpersistence, /* check for garbage data */ if (rdStatus == SMGR_RD_CRC_ERROR) { addBadBlockStat(&smgr->smgr_rnode.node, forkNum); + if (!RecoveryInProgress()) { + addGlobalRepairBadBlockStat(smgr->smgr_rnode, forkNum, blockNum); + } if (mode == RBM_ZERO_ON_ERROR || u_sess->attr.attr_security.zero_damaged_pages) { ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED), @@ -2033,18 +2030,40 @@ static bool ReadBuffer_common_ReadBlock(SMgrRelation smgr, char relpersistence, relpath(smgr->smgr_rnode, forkNum)), handle_in_client(true))); - RemoteReadBlock(smgr->smgr_rnode, forkNum, blockNum, (char *)bufBlock); + RemoteReadBlock(smgr->smgr_rnode, forkNum, blockNum, (char *)bufBlock, NULL); if (PageIsVerified((Page)bufBlock, blockNum)) { needputtodirty = true; + UpdateRepairTime(smgr->smgr_rnode.node, forkNum, blockNum); } else ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid page in block %u of relation %s, remote read data corrupted", blockNum, relpath(smgr->smgr_rnode, forkNum)))); - } else - ereport(ERROR, - (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid page in block %u of relation %s", - blockNum, relpath(smgr->smgr_rnode, forkNum)))); + } else { + /* record bad page, wait the pagerepair thread repair the page */ + *need_repair = CheckVerionSupportRepair() && + (AmStartupProcess() || AmPageRedoWorker()) && IsPrimaryClusterStandbyDN() && + g_instance.repair_cxt.support_repair; + if (*need_repair) { + RepairBlockKey key; + XLogPhyBlock pblk_bak = {0}; + key.relfilenode = smgr->smgr_rnode.node; + key.forknum = forkNum; + key.blocknum = blockNum; + if (pblk != NULL) { + pblk_bak = *pblk; + } + RedoPageRepairCallBack(key, pblk_bak); + log_invalid_page(smgr->smgr_rnode.node, forkNum, blockNum, CRC_CHECK_ERROR, pblk); + ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("invalid page in block %u of relation %s", + blockNum, relpath(smgr->smgr_rnode, forkNum)))); + return false; + } + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("invalid page in block %u of relation %s", + blockNum, relpath(smgr->smgr_rnode, forkNum)))); + } } PageDataDecryptIfNeed((Page)bufBlock); @@ -2075,6 +2094,7 @@ static Buffer ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumb bool found = false; bool isExtend = false; bool isLocalBuf = SmgrIsTemp(smgr); + bool need_repair = false; *hit = false; @@ -2102,7 +2122,6 @@ static Buffer ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumb * head may be re-used, i.e., the relfilenode may be reused. Thus the * smgrnblocks interface can not be used on standby. Just skip this check. */ -#ifndef ENABLE_MULTIPLE_NODES } else if (RecoveryInProgress() && !IsSegmentFileNode(smgr->smgr_rnode.node)) { BlockNumber totalBlkNum = smgrnblocks_cached(smgr, forkNum); @@ -2114,7 +2133,6 @@ static Buffer ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumb if (blockNum >= totalBlkNum) { return InvalidBuffer; } -#endif } if (isLocalBuf) { bufHdr = LocalBufferAlloc(smgr, forkNum, blockNum, &found); @@ -2255,7 +2273,13 @@ static Buffer ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumb bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr); bool needputtodirty = ReadBuffer_common_ReadBlock(smgr, relpersistence, forkNum, blockNum, - mode, isExtend, bufBlock, pblk); + mode, isExtend, bufBlock, pblk, &need_repair); + if (need_repair) { + LWLockRelease(((BufferDesc *)bufHdr)->io_in_progress_lock); + UnpinBuffer(bufHdr, true); + AbortBufferIO(); + return InvalidBuffer; + } if (needputtodirty) { /* set BM_DIRTY to overwrite later */ uint32 old_buf_state = LockBufHdr(bufHdr); @@ -2510,6 +2534,7 @@ static BufferDesc *BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumbe (void)sched_yield(); continue; } + /* * We need a share-lock on the buffer contents to write it out * (else we might write invalid data, eg because someone else is @@ -3694,6 +3719,7 @@ bool BgBufferSync(WritebackContext *wb_context) num_written = 0; reusable_buffers = reusable_buffers_est; +#ifndef ENABLE_LITE_MODE if (AioCompltrIsReady() == true && g_instance.attr.attr_storage.enable_adio_function) { /* Execute the LRU scan * @@ -3741,6 +3767,7 @@ bool BgBufferSync(WritebackContext *wb_context) } } } else { +#endif /* Make sure we can handle the pin inside SyncOneBuffer */ ResourceOwnerEnlargeBuffers(t_thrd.utils_cxt.CurrentResourceOwner); /* Execute the LRU scan */ @@ -3764,7 +3791,9 @@ bool BgBufferSync(WritebackContext *wb_context) } else { } } +#ifndef ENABLE_LITE_MODE } +#endif u_sess->stat_cxt.BgWriterStats->m_buf_written_clean += num_written; @@ -4127,8 +4156,7 @@ void CheckPointBuffers(int flags, bool doFullCheckpoint) * to data file. if IsBootstrapProcessingMode or pagewriter thread is not started also need call * BufferSync to flush dirty page. */ - if (!ENABLE_INCRE_CKPT || IsBootstrapProcessingMode() || - pg_atomic_read_u32(&g_instance.ckpt_cxt_ctl->current_page_writer_count) < 1) { + if (USE_CKPT_THREAD_SYNC) { BufferSync(flags); } else if (ENABLE_INCRE_CKPT && doFullCheckpoint) { long waitCount = 0; @@ -4147,10 +4175,6 @@ void CheckPointBuffers(int flags, bool doFullCheckpoint) /* sleep 1 ms wait the dirty page flush */ long sleepTime = ONE_MILLISECOND * MILLISECOND_TO_MICROSECOND; pg_usleep(sleepTime); - /* do smgrsync in case dw file recycle of pagewriter is being blocked */ - if (dw_enabled()) { - CheckPointSyncWithAbsorption(); - } if (((uint32)flags & CHECKPOINT_IS_SHUTDOWN) && !IsInitdb) { /* * since we use sleep time as counter so there will be some error in calculate the interval, @@ -4170,17 +4194,28 @@ void CheckPointBuffers(int flags, bool doFullCheckpoint) } g_instance.ckpt_cxt_ctl->flush_all_dirty_page = false; - /* When finish shutdown checkpoint, pagewriter thread can exit. */ + t_thrd.xlog_cxt.CheckpointStats->ckpt_sync_t = GetCurrentTimestamp(); + TRACE_POSTGRESQL_BUFFER_CHECKPOINT_SYNC_START(); + /* + * If the enable_incremental_checkpoint is off, checkpoint thread call ProcessSyncRequests handle the sync, + * if IsBootstrapProcessingMode or pagewriter thread is not started also need call ProcessSyncRequests. + */ + if (USE_CKPT_THREAD_SYNC) { + ProcessSyncRequests(); + } else { + /* incremental checkpoint, requeset the pagewriter handle the file sync */ + PageWriterSync(); + dw_truncate(); + } + + /* When finish shutdown checkpoint, pagewriter thread can exit after finish the file sync. */ if (((uint32)flags & CHECKPOINT_IS_SHUTDOWN)) { g_instance.ckpt_cxt_ctl->page_writer_can_exit = true; } - t_thrd.xlog_cxt.CheckpointStats->ckpt_sync_t = GetCurrentTimestamp(); - TRACE_POSTGRESQL_BUFFER_CHECKPOINT_SYNC_START(); - CheckPointSyncWithAbsorption(); + t_thrd.xlog_cxt.CheckpointStats->ckpt_sync_end_t = GetCurrentTimestamp(); TRACE_POSTGRESQL_BUFFER_CHECKPOINT_DONE(); - dw_truncate(); gstrace_exit(GS_TRC_ID_CheckPointBuffers); } @@ -4308,7 +4343,7 @@ char* PageDataEncryptForBuffer(Page page, BufferDesc *bufdesc, bool is_segbuf) * If the caller has an smgr reference for the buffer's relation, pass it * as the second parameter. If not, pass NULL. */ -void FlushBuffer(void *buf, SMgrRelation reln, ReadBufferMethod flushmethod) +void FlushBuffer(void *buf, SMgrRelation reln, ReadBufferMethod flushmethod, bool skipFsync) { bool logicalpage = false; ErrorContextCallback errcontext; @@ -4416,7 +4451,7 @@ void FlushBuffer(void *buf, SMgrRelation reln, ReadBufferMethod flushmethod) seg_physical_write(spc, fakenode, bufferinfo.blockinfo.forknum, bufdesc->seg_blockno, bufToWrite, false); } else { SegmentCheck(!IsSegmentFileNode(bufdesc->tag.rnode)); - smgrwrite(reln, bufferinfo.blockinfo.forknum, bufferinfo.blockinfo.blkno, bufToWrite, false); + smgrwrite(reln, bufferinfo.blockinfo.forknum, bufferinfo.blockinfo.blkno, bufToWrite, skipFsync); } if (u_sess->attr.attr_common.track_io_timing) { @@ -4608,6 +4643,69 @@ XLogRecPtr BufferGetLSNAtomic(Buffer buffer) return lsn; } +void DropSegRelNodeSharedBuffer(RelFileNode node, ForkNumber forkNum) +{ + for (int i = 0; i < g_instance.attr.attr_storage.NBuffers; i++) { + BufferDesc *buf_desc = GetBufferDescriptor(i); + uint32 buf_state; + + if (buf_desc->seg_fileno != node.relNode || buf_desc->tag.rnode.spcNode != node.spcNode || + buf_desc->tag.rnode.dbNode != node.dbNode) { + continue; + } + + buf_state = LockBufHdr(buf_desc); + if (buf_desc->seg_fileno == node.relNode && buf_desc->tag.rnode.spcNode == node.spcNode && + buf_desc->tag.rnode.dbNode == node.dbNode && buf_desc->tag.forkNum == forkNum) { + InvalidateBuffer(buf_desc); /* releases spinlock */ + } else { + UnlockBufHdr(buf_desc, buf_state); + } + } + + for (int i = SegmentBufferStartID; i < TOTAL_BUFFER_NUM; i++) { + BufferDesc *buf_desc = GetBufferDescriptor(i); + uint32 buf_state; + /* + * As in DropRelFileNodeBuffers, an unlocked precheck should be safe + * and saves some cycles. + */ + if (buf_desc->tag.rnode.spcNode != node.spcNode || buf_desc->tag.rnode.dbNode != node.dbNode) { + continue; + } + + buf_state = LockBufHdr(buf_desc); + if (buf_desc->tag.rnode.spcNode == node.spcNode && buf_desc->tag.rnode.dbNode == node.dbNode && + buf_desc->tag.rnode.relNode == node.relNode && buf_desc->tag.forkNum == forkNum) { + InvalidateBuffer(buf_desc); /* releases spinlock */ + } else { + UnlockBufHdr(buf_desc, buf_state); + } + } +} + +/* RangeForgetBuffer + * + */ +void RangeForgetBuffer(RelFileNode node, ForkNumber forkNum, BlockNumber firstDelBlock, + BlockNumber endDelBlock) +{ + for (int i = 0; i < g_instance.attr.attr_storage.NBuffers; i++) { + BufferDesc *buf_desc = GetBufferDescriptor(i); + uint32 buf_state; + + if (!RelFileNodeEquals(buf_desc->tag.rnode, node)) + continue; + + buf_state = LockBufHdr(buf_desc); + if (RelFileNodeEquals(buf_desc->tag.rnode, node) && buf_desc->tag.forkNum == forkNum && + buf_desc->tag.blockNum >= firstDelBlock && buf_desc->tag.blockNum < endDelBlock) + InvalidateBuffer(buf_desc); /* releases spinlock */ + else + UnlockBufHdr(buf_desc, buf_state); + } +} + void DropRelFileNodeShareBuffers(RelFileNode node, ForkNumber forkNum, BlockNumber firstDelBlock) { int i; @@ -4732,21 +4830,17 @@ void DropRelFileNodeAllBuffersUsingHash(HTAB *relfilenode_hashtbl) rd_node_bucketdir.bucketNode = SegmentBktId; hash_search(relfilenode_hashtbl, &(rd_node_bucketdir), HASH_FIND, &found); find_dir = found; - - if (!find_dir) { + if (!found) { hash_search(relfilenode_hashtbl, &(rd_node_snapshot), HASH_FIND, &found); } } else { /* no bucket buffer */ hash_search(relfilenode_hashtbl, &rd_node_snapshot, HASH_FIND, &found); } - if (!found) { continue; } - buf_state = LockBufHdr(buf_desc); - if (find_dir) { // matching the bucket dir equal = RelFileNodeRelEquals(buf_desc->tag.rnode, rd_node_snapshot); @@ -5663,51 +5757,6 @@ bool ConditionalLockBufferForCleanup(Buffer buffer) return false; } -/* - * IsBufferCleanupOK - as above, but we already have the lock - * - * Check whether it's OK to perform cleanup on a buffer we've already - * locked. If we observe that the pin count is 1, our exclusive lock - * happens to be a cleanup lock, and we can proceed with anything that - * would have been allowable had we sought a cleanup lock originally. - */ -bool IsBufferCleanupOK(Buffer buffer) -{ - BufferDesc *bufHdr; - uint32 buf_state; - - Assert(BufferIsValid(buffer)); - - if (BufferIsLocal(buffer)) { - /* There should be exactly one pin */ - if (u_sess->storage_cxt.LocalRefCount[-buffer - 1] != 1) - return false; - /* Nobody else to wait for */ - return true; - } - - /* There should be exactly one local pin */ - if (GetPrivateRefCount(buffer) != 1) - return false; - - bufHdr = GetBufferDescriptor(buffer - 1); - - /* caller must hold exclusive lock on buffer */ - Assert(LWLockHeldByMeInMode(bufHdr->content_lock, LW_EXCLUSIVE)); - - buf_state = LockBufHdr(bufHdr); - - Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0); - if (BUF_STATE_GET_REFCOUNT(buf_state) == 1) { - /* pincount is OK. */ - UnlockBufHdr(bufHdr, buf_state); - return true; - } - - UnlockBufHdr(bufHdr, buf_state); - return false; -} - /* * Functions for buffer I/O handling * @@ -5796,7 +5845,7 @@ void CheckIOState(volatile void *buf_desc) * Returns TRUE if we successfully marked the buffer as I/O busy, * FALSE if someone else already did the work. */ -static bool StartBufferIO(BufferDesc *buf, bool for_input) +bool StartBufferIO(BufferDesc *buf, bool for_input) { uint32 buf_state; @@ -6094,11 +6143,8 @@ void shared_buffer_write_error_callback(void *arg) /* Buffer is pinned, so we can read the tag without locking the spinlock */ if (buf_desc != NULL) { char *path = relpathperm(((BufferDesc *)buf_desc)->tag.rnode, ((BufferDesc *)buf_desc)->tag.forkNum); - if (((BufferDesc *)buf_desc)->tag.rnode.opt) { - (void)errcontext("writing block %u of relation %s_pcd", buf_desc->tag.blockNum, path); - } else { - (void)errcontext("writing block %u of relation %s", buf_desc->tag.blockNum, path); - } + + (void)errcontext("writing block %u of relation %s", buf_desc->tag.blockNum, path); pfree(path); } } @@ -6429,7 +6475,121 @@ static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg) } } -void RemoteReadBlock(const RelFileNodeBackend &rnode, ForkNumber fork_num, BlockNumber block_num, char *buf) + +/* RemoteReadFile + * primary dn use this function repair file. + */ +void RemoteReadFile(RemoteReadFileKey *key, char *buf, uint32 size, int timeout, uint32* remote_size) +{ + XLogRecPtr cur_lsn = InvalidXLogRecPtr; + int retry_times = 0; + char *remote_address = NULL; + XLogRecPtr remote_lsn = InvalidXLogRecPtr; + char remote_address1[MAXPGPATH] = {0}; /* remote_address1[0] = '\0'; */ + char remote_address2[MAXPGPATH] = {0}; /* remote_address2[0] = '\0'; */ + + /* get remote address */ + GetRemoteReadAddress(remote_address1, remote_address2, MAXPGPATH); + remote_address = remote_address1; + + /* primary get the xlog insert loc */ + cur_lsn = GetXLogInsertRecPtr(); + +retry: + if (remote_address[0] == '\0' || remote_address[0] == '@') { + ereport(ERROR, (errcode(ERRCODE_IO_ERROR), errmodule(MOD_REMOTE), errmsg("remote not available"))); + } + + ereport(LOG, (errmodule(MOD_REMOTE), errmsg("remote read page, file %s block start is %d from %s", + relpathperm(key->relfilenode, key->forknum), key->blockstart, + remote_address))); + + PROFILING_REMOTE_START(); + + int ret_code = RemoteGetFile(remote_address, key, cur_lsn, size, buf, &remote_lsn, remote_size, timeout); + + PROFILING_REMOTE_END_READ(size, (ret_code == REMOTE_READ_OK)); + + if (ret_code != REMOTE_READ_OK) { + if (IS_DN_DUMMY_STANDYS_MODE() || retry_times >= 1) { + ereport(ERROR, (errcode(ERRCODE_IO_ERROR), errmodule(MOD_REMOTE), + errmsg("remote read failed from %s, %s", remote_address, RemoteReadErrMsg(ret_code)))); + } else { + ereport(WARNING, + (errmodule(MOD_REMOTE), + errmsg("remote read failed from %s, %s. try another", remote_address, RemoteReadErrMsg(ret_code)), + handle_in_client(true))); + + /* Check interrupts */ + CHECK_FOR_INTERRUPTS(); + + remote_address = remote_address2; + ++retry_times; + goto retry; /* jump out retry_times >= 1 */ + } + } + + return; +} + +/* RemoteReadFileSize + * primary dn use this function get remote file size. + */ +int64 RemoteReadFileSize(RemoteReadFileKey *key, int timeout) +{ + int retry_times = 0; + int64 size = 0; + char *remote_address = NULL; + XLogRecPtr cur_lsn = InvalidXLogRecPtr; + char remote_address1[MAXPGPATH] = {0}; /* remote_address1[0] = '\0'; */ + char remote_address2[MAXPGPATH] = {0}; /* remote_address2[0] = '\0'; */ + + /* get remote address */ + GetRemoteReadAddress(remote_address1, remote_address2, MAXPGPATH); + remote_address = remote_address1; + + /* primary get the xlog insert loc */ + cur_lsn = GetXLogInsertRecPtr(); + +retry: + if (remote_address[0] == '\0' || remote_address[0] == '@') { + ereport(ERROR, (errcode(ERRCODE_IO_ERROR), errmodule(MOD_REMOTE), errmsg("remote not available"))); + } + + ereport(LOG, (errmodule(MOD_REMOTE), errmsg("remote read page size, file %s from %s", + relpathperm(key->relfilenode, key->forknum), remote_address))); + + PROFILING_REMOTE_START(); + + int ret_code = RemoteGetFileSize(remote_address, key, cur_lsn, &size, timeout); + + PROFILING_REMOTE_END_READ(sizeof(uint64) + sizeof(uint64), (ret_code == REMOTE_READ_OK)); + + if (ret_code != REMOTE_READ_OK) { + if (IS_DN_DUMMY_STANDYS_MODE() || retry_times >= 1) { + ereport(ERROR, (errcode(ERRCODE_IO_ERROR), errmodule(MOD_REMOTE), + errmsg("remote read failed from %s, %s", remote_address, RemoteReadErrMsg(ret_code)))); + } else { + ereport(WARNING, + (errmodule(MOD_REMOTE), + errmsg("remote read failed from %s, %s. try another", remote_address, RemoteReadErrMsg(ret_code)), + handle_in_client(true))); + + /* Check interrupts */ + CHECK_FOR_INTERRUPTS(); + + remote_address = remote_address2; + ++retry_times; + goto retry; /* jump out retry_times >= 1 */ + } + } + + return size; +} + + +void RemoteReadBlock(const RelFileNodeBackend &rnode, ForkNumber fork_num, BlockNumber block_num, + char *buf, const XLogPhyBlock *pblk, int timeout) { /* get current xlog insert lsn */ XLogRecPtr cur_lsn = GetInsertRecPtr(); @@ -6443,7 +6603,7 @@ void RemoteReadBlock(const RelFileNodeBackend &rnode, ForkNumber fork_num, Block int retry_times = 0; retry: - if (remote_address[0] == '\0' || remote_address[0] == ':') { + if (remote_address[0] == '\0' || remote_address[0] == '@') { ereport(ERROR, (errcode(ERRCODE_IO_ERROR), errmodule(MOD_REMOTE), errmsg("remote not available"))); } @@ -6452,8 +6612,12 @@ retry: PROFILING_REMOTE_START(); - int ret_code = RemoteGetPage(remote_address, rnode.node.spcNode, rnode.node.dbNode, rnode.node.relNode, - rnode.node.bucketNode, rnode.node.opt, fork_num, block_num, BLCKSZ, cur_lsn, buf); + RepairBlockKey key; + key.relfilenode = rnode.node; + key.forknum = fork_num; + key.blocknum = block_num; + + int ret_code = RemoteGetPage(remote_address, &key, BLCKSZ, cur_lsn, buf, pblk, timeout); PROFILING_REMOTE_END_READ(BLCKSZ, (ret_code == REMOTE_READ_OK)); diff --git a/src/gausskernel/storage/buffer/freelist.cpp b/src/gausskernel/storage/buffer/freelist.cpp index f8c0e7ae1..af45abcab 100644 --- a/src/gausskernel/storage/buffer/freelist.cpp +++ b/src/gausskernel/storage/buffer/freelist.cpp @@ -453,6 +453,7 @@ void StrategyInitialize(bool init) } } +const int MIN_REPAIR_FILE_SLOT_NUM = 32; /* ---------------------------------------------------------------- * Backend-private buffer ring management * ---------------------------------------------------------------- @@ -488,7 +489,9 @@ BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype) ring_size = g_instance.attr.attr_storage.NBuffers / 32 / Max(g_instance.attr.attr_storage.autovacuum_max_workers, 1); break; - + case BAS_REPAIR: + ring_size = Min(g_instance.attr.attr_storage.NBuffers, MIN_REPAIR_FILE_SLOT_NUM); + break; default: ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), (errmsg("unrecognized buffer access strategy: %d", (int)btype)))); diff --git a/src/gausskernel/storage/bulkload/dist_fdw--1.0.sql b/src/gausskernel/storage/bulkload/dist_fdw--1.0.sql index f7e468eba..ac35ae1cd 100644 --- a/src/gausskernel/storage/bulkload/dist_fdw--1.0.sql +++ b/src/gausskernel/storage/bulkload/dist_fdw--1.0.sql @@ -3,12 +3,12 @@ -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION dist_fdw" to load this file. \quit -CREATE FUNCTION dist_fdw_handler() +CREATE FUNCTION pg_catalog.dist_fdw_handler() RETURNS fdw_handler AS 'MODULE_PATHNAME' LANGUAGE C STRICT NOT FENCED; -CREATE FUNCTION dist_fdw_validator(text[], oid) +CREATE FUNCTION pg_catalog.dist_fdw_validator(text[], oid) RETURNS void AS 'MODULE_PATHNAME' LANGUAGE C STRICT NOT FENCED; diff --git a/src/gausskernel/storage/bulkload/dist_fdw.cpp b/src/gausskernel/storage/bulkload/dist_fdw.cpp index b315a1d24..44155d30f 100644 --- a/src/gausskernel/storage/bulkload/dist_fdw.cpp +++ b/src/gausskernel/storage/bulkload/dist_fdw.cpp @@ -224,8 +224,10 @@ extern void SetFixedAlignment(TupleDesc tupDesc, Relation rel, FixFormatter *for extern void VerifyEncoding(int encoding); extern void GetDistImportOptions(Oid relOid, DistImportPlanState *planstate, ForeignOptions *fOptions = NULL); +#ifndef ENABLE_LITE_MODE static void assignOBSTaskToDataNode(List *urllist, List **totalTask, List *dnNames, DistImportPlanState *planstate, int64 *fileNum = NULL); +#endif static void assignTaskToDataNodeInSharedMode(List *urllist, List **totalTask, List *dnNames); static void assignTaskToDataNodeInNormalMode(List *urllist, List **totalTask, List *dnNames, int dop); @@ -235,6 +237,7 @@ extern BlockNumber getPageCountForFt(void *additionalData); List *getOBSFileList(List *urllist, bool encrypt, const char *access_key, const char *secret_access_key, bool isAnalyze); +#ifndef ENABLE_LITE_MODE /* * In OBS parallel data loading case, we may have # of datanodes not * equal to # of objects, as one object can only be assign to one @@ -242,6 +245,7 @@ List *getOBSFileList(List *urllist, bool encrypt, const char *access_key, const * handlers basing on num_processed */ static void assignOBSFileToDataNode(List *urllist, List **totalTask, List *dnNames); +#endif /* * Foreign-data wrapper handler function: return a struct with pointers @@ -429,6 +433,7 @@ static bool distAnalyzeForeignTable(Relation relation, AcquireSampleRowsFunc *fu return true; } +#ifndef ENABLE_LITE_MODE /** * @Description: Scheduler file for dist obs foreign table. * @in foreignTableId, the given foreign table Oid. @@ -479,6 +484,7 @@ List *CNSchedulingForDistOBSFt(Oid foreignTableId) } return totalTask; } +#endif /** * @Description: Build the related scanState information. @@ -680,6 +686,7 @@ static int distAcquireSampleRows(Relation relation, int logLevel, HeapTuple *sam MemoryContextDelete(tupleContext); pfree(scanTupleSlot->tts_values); pfree(scanTupleSlot->tts_isnull); + pfree_ext(scanTupleSlot->tts_lobPointers); ForeignScan *fscan = (ForeignScan *)scanState->ss.ps.plan; List *workList = (List *)(((DefElem *)linitial(fscan->fdw_private))->arg); list_free(workList); @@ -1058,6 +1065,7 @@ List *assignFileSegmentList(List *segmentlist, List *dnNames) return totalTask; } +#ifndef ENABLE_LITE_MODE /* * @Description: get all matched files in obs for each url * @IN urllist: obs url list @@ -1187,6 +1195,7 @@ static void assignOBSTaskToDataNode(List *urllist, List **totalTask, List *dnNam pfree(obs_file_list); } +#endif /* * @Description: assign task to each data node in shared mode @@ -1304,9 +1313,13 @@ List *assignTaskToDataNode(List *urllist, ImportMode mode, List *nodeList, int d /* check it is obs source */ const char *first_url = strVal(lfirst(list_head(urllist))); if (is_obs_protocol(first_url)) { +#ifndef ENABLE_LITE_MODE assignOBSTaskToDataNode(urllist, &totalTask, dnNames, planstate, fileNum); list_free(dnNames); return totalTask; +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } if (IS_SHARED_MODE(mode)) { @@ -1835,7 +1848,7 @@ static char *distExportNextFileName(const char *abspath, const char *relname, co errno_t rc = EOK; uint32 segno = t_thrd.bulk_cxt.distExportNextSegNo++; Assert(t_thrd.bulk_cxt.distExportCurrXid != 0); - rc = snprintf_s(temp, MAX_PATH_LEN, MAX_PATH_LEN - 1, "%s%s_%lu_%s_%d.%s", abspath, relname, + rc = snprintf_s(temp, MAX_PATH_LEN, MAX_PATH_LEN - 1, "%s%s_%lu_%s_%u.%s", abspath, relname, t_thrd.bulk_cxt.distExportCurrXid, t_thrd.bulk_cxt.distExportTimestampStr, segno, suffix); securec_check_ss(rc, "\0", "\0"); diff --git a/src/gausskernel/storage/bulkload/foreignroutine.cpp b/src/gausskernel/storage/bulkload/foreignroutine.cpp index b00b58806..ce506a572 100644 --- a/src/gausskernel/storage/bulkload/foreignroutine.cpp +++ b/src/gausskernel/storage/bulkload/foreignroutine.cpp @@ -1569,7 +1569,9 @@ retry: CHECK_FOR_INTERRUPTS(); (void)ExecClearTuple(slot); MemoryContextReset(node->scanMcxt); +#ifndef ENABLE_LITE_MODE SetObsMemoryContext(((CopyState)importState)->copycontext); +#endif ReportIllegalCharExceptionThreshold(); PG_TRY(); diff --git a/src/gausskernel/storage/bulkload/importerror.cpp b/src/gausskernel/storage/bulkload/importerror.cpp index 4df95f410..85f26684b 100644 --- a/src/gausskernel/storage/bulkload/importerror.cpp +++ b/src/gausskernel/storage/bulkload/importerror.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2020 Huawei Technologies Co.,Ltd. * Portions Copyright (c) 2021, openGauss Contributors + * Portions Copyright (c) 2021, openGauss Contributors * * openGauss is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/gausskernel/storage/bulkload/parser.cpp b/src/gausskernel/storage/bulkload/parser.cpp index 7f7c4dd07..e716eb5e1 100644 --- a/src/gausskernel/storage/bulkload/parser.cpp +++ b/src/gausskernel/storage/bulkload/parser.cpp @@ -98,9 +98,11 @@ static void GetFileHeader(WritableParser* self, const char* path); #define parser_securec_check(rc) securec_check(rc, "\0", "\0") #define parser_securec_check_ss(rc) securec_check_ss(rc, "\0", "\0") +#ifndef ENABLE_LITE_MODE static size_t SourceRead_OBS(Source* self, void* buffer, size_t len); static bool SourceNext_OBS(Source* self); #endif +#endif static Source* CreateSource(const FileList* files, SourceType sourcetype); static void DestroyParser(Parser* self); @@ -464,8 +466,12 @@ void Source::SourceInit(bool isWrite) SourceNext = SourceNext_File; #else if (m_sourcetype == SOURCE_TYPE_OBS) { +#ifndef ENABLE_LITE_MODE SourceRead = SourceRead_OBS; SourceNext = SourceNext_OBS; +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } else { SourceRead = SourceRead_File; SourceNext = SourceNext_File; @@ -1296,8 +1302,12 @@ int GDS::LineBuffer::AppendLine(const char* buf, int buf_len, bool isComplete) parser_log(LEVEL_ERROR, "GDS max line size %d is exceeded.", (int)MaxAllocSize); } #else +#ifndef ENABLE_LITE_MODE /* for OBS, append the overload data in overload buffer */ SaveOverloadBuf(m_overload_buf, buf, buf_len, isComplete); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif #endif /* * Here the curent overload buffer is already done so the actual buf_len can be returned. @@ -1460,6 +1470,7 @@ int GDS::LineBuffer::SendOverloadBuf(evbuffer* dest, const char* buf, int buf_le #endif #ifdef OBS_SERVER +#ifndef ENABLE_LITE_MODE static size_t SourceRead_OBS(Source* self, void* buffer, size_t len) { size_t nread = 0; @@ -1679,5 +1690,5 @@ void GDS::LineBuffer::SaveOverloadBuf(StringInfo dest, const char* buf, int buf_ MemoryContextSwitchTo(oldcontext); } - +#endif #endif diff --git a/src/gausskernel/storage/bulkload/vecforeignroutine.cpp b/src/gausskernel/storage/bulkload/vecforeignroutine.cpp index f9a054a23..f7e46dafa 100644 --- a/src/gausskernel/storage/bulkload/vecforeignroutine.cpp +++ b/src/gausskernel/storage/bulkload/vecforeignroutine.cpp @@ -124,7 +124,9 @@ VectorBatch *distExecVecImport(VecForeignScanState *node) MemoryContextReset(scanMcxt); oldMemoryContext = MemoryContextSwitchTo(scanMcxt); +#ifndef ENABLE_LITE_MODE SetObsMemoryContext(((CopyState)importState)->copycontext); +#endif for (batch->m_rows = 0; batch->m_rows < BatchMaxSize; batch->m_rows++) { retry: PG_TRY(); diff --git a/src/gausskernel/storage/cstore/cstore_am.cpp b/src/gausskernel/storage/cstore/cstore_am.cpp index 6a4b0034c..41cde3ce2 100644 --- a/src/gausskernel/storage/cstore/cstore_am.cpp +++ b/src/gausskernel/storage/cstore/cstore_am.cpp @@ -463,6 +463,7 @@ void CStore::Destroy() */ void CStore::CUPrefetch(CUDesc* cudesc, int col, AioDispatchCUDesc_t** dList, int& count, File* vfdList) { +#ifndef ENABLE_LITE_MODE CU* cu_ptr = NULL; bool found = false; int slotId = CACHE_BLOCK_INVALID_IDX; @@ -589,6 +590,7 @@ void CStore::CUPrefetch(CUDesc* cudesc, int col, AioDispatchCUDesc_t** dList, in pgstat_count_cu_hdd_asyn(m_relation, tmp_count); } return; +#endif } /* @@ -635,6 +637,7 @@ void CStore::CUListPrefetch() } } if (t_thrd.cstore_cxt.InProgressAioCUDispatchCount > 0) { +#ifndef ENABLE_LITE_MODE int tmp_count = t_thrd.cstore_cxt.InProgressAioCUDispatchCount; HOLD_INTERRUPTS(); @@ -646,6 +649,7 @@ void CStore::CUListPrefetch() // stat cu hdd asyn read pgstatCountCUHDDAsynRead4SessionLevel(tmp_count); pgstat_count_cu_hdd_asyn(m_relation, tmp_count); +#endif } pfree(dList); diff --git a/src/gausskernel/storage/cstore/cstore_insert.cpp b/src/gausskernel/storage/cstore/cstore_insert.cpp index a0f468549..49633c614 100644 --- a/src/gausskernel/storage/cstore/cstore_insert.cpp +++ b/src/gausskernel/storage/cstore/cstore_insert.cpp @@ -1052,6 +1052,7 @@ void CStoreInsert::FlushIndexDataIfNeed() */ void CStoreInsert::CUListFlushAll(int attno) { +#ifndef ENABLE_LITE_MODE /* flush cu */ for (int col = 0; col < attno; ++col) { int count = m_aio_dispath_idx[col]; @@ -1071,6 +1072,7 @@ void CStoreInsert::CUListFlushAll(int attno) } m_aio_dispath_idx[col] = count; } +#endif } /* @@ -1081,6 +1083,7 @@ void CStoreInsert::CUListFlushAll(int attno) */ void CStoreInsert::CUWrite(int attno, int col) { +#ifndef ENABLE_LITE_MODE CU* cu = m_cuPPtr[col]; CUDesc* cuDesc = m_cuDescPPtr[col]; CUStorage* cuStorage = m_cuStorage[col]; @@ -1188,6 +1191,7 @@ void CStoreInsert::CUWrite(int attno, int col) col, cuDesc->cu_id, cuDesc->cu_pointer, cuDesc->cu_size))); return; +#endif } /* @@ -1266,6 +1270,7 @@ void CUListWriteAbort(int code, Datum arg) */ void CStoreInsert::CUListWriteCompeleteIO(int col, int count) { +#ifndef ENABLE_LITE_MODE int idx = 0; AioDispatchCUDesc_t** dList = m_aio_dispath_cudesc[col]; CUStorage* cuStorage = m_cuStorage[col]; @@ -1310,6 +1315,7 @@ void CStoreInsert::CUListWriteCompeleteIO(int col, int count) } FileAsyncCUClose(m_vfdList[col], count); +#endif } /* Write CU data and CUDesc */ diff --git a/src/gausskernel/storage/cstore/custorage.cpp b/src/gausskernel/storage/cstore/custorage.cpp index c21996016..d9964c5c3 100755 --- a/src/gausskernel/storage/cstore/custorage.cpp +++ b/src/gausskernel/storage/cstore/custorage.cpp @@ -792,7 +792,7 @@ void CUStorage::RemoteLoadCU(_in_ CU* cuPtr, _in_ uint64 offset, _in_ int size, int retry_times = 0; retry: - if (remote_address[0] == '\0' || remote_address[0] == ':') + if (remote_address[0] == '\0' || remote_address[0] == '@') ereport(ERROR, (errcode(ERRCODE_IO_ERROR), (errmodule(MOD_REMOTE), errmsg("remote not available")))); ereport(LOG, diff --git a/src/gausskernel/storage/dfs/CMakeLists.txt b/src/gausskernel/storage/dfs/CMakeLists.txt index 21dfad979..dc2d75c6a 100755 --- a/src/gausskernel/storage/dfs/CMakeLists.txt +++ b/src/gausskernel/storage/dfs/CMakeLists.txt @@ -20,8 +20,10 @@ set(dfs_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${W set(dfs_LINK_OPTIONS ${BIN_LINK_OPTIONS}) add_static_objtarget(gausskernel_storage_dfs TGT_dfs_SRC TGT_dfs_INC "${dfs_DEF_OPTIONS}" "${dfs_COMPILE_OPTIONS}" "${dfs_LINK_OPTIONS}") -set(CMAKE_MODULE_PATH - ${CMAKE_CURRENT_SOURCE_DIR}/obs -) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + set(CMAKE_MODULE_PATH + ${CMAKE_CURRENT_SOURCE_DIR}/obs + ) -add_subdirectory(obs) + add_subdirectory(obs) +endif() diff --git a/src/gausskernel/storage/dfs/Makefile b/src/gausskernel/storage/dfs/Makefile index 93023e399..f8261e4d9 100644 --- a/src/gausskernel/storage/dfs/Makefile +++ b/src/gausskernel/storage/dfs/Makefile @@ -25,7 +25,9 @@ subdir = src/gausskernel/storage/dfs top_builddir = ../../../.. include $(top_builddir)/src/Makefile.global +ifeq ($(enable_lite_mode), no) SUBDIRS = obs +endif ifneq "$(MAKECMDGOALS)" "clean" ifneq "$(MAKECMDGOALS)" "distclean" diff --git a/src/gausskernel/storage/dfs/dfs_connector.cpp b/src/gausskernel/storage/dfs/dfs_connector.cpp index fc035b69d..a71323912 100644 --- a/src/gausskernel/storage/dfs/dfs_connector.cpp +++ b/src/gausskernel/storage/dfs/dfs_connector.cpp @@ -37,7 +37,12 @@ DFSConnector *createConnector(MemoryContext ctx, Oid foreignTableId) switch (srvType) { case T_OBS_SERVER: case T_TXT_CSV_OBS_SERVER: { +#ifndef ENABLE_LITE_MODE return New(ctx) OBSConnector(ctx, foreignTableId); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); + return NULL; +#endif } case T_HDFS_SERVER: { FEATURE_NOT_PUBLIC_ERROR("HDFS is not yet supported."); @@ -62,7 +67,12 @@ DFSConnector *createConnector(MemoryContext ctx, ServerTypeOption srvType, void { switch (srvType) { case T_OBS_SERVER: { +#ifndef ENABLE_LITE_MODE return New(ctx) OBSConnector(ctx, (ObsOptions *)options); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); + return NULL; +#endif break; } case T_HDFS_SERVER: { diff --git a/src/gausskernel/storage/dfs/dfscache_mgr.cpp b/src/gausskernel/storage/dfs/dfscache_mgr.cpp index b42bcc136..50d96a95a 100644 --- a/src/gausskernel/storage/dfs/dfscache_mgr.cpp +++ b/src/gausskernel/storage/dfs/dfscache_mgr.cpp @@ -247,6 +247,7 @@ void MetaCacheMgr::NewSingletonInstance(void) ereport(LOG, (errmodule(MOD_CACHE), errmsg("set metadata cache size(%ld)", cache_size))); } +#ifndef ENABLE_LITE_MODE /* * @Description: must get pin first before used * @IN fileFooter: file footer @@ -265,6 +266,7 @@ void OrcMetaCacheSetBlock(CacheSlotId_t slotId, uint64 footerStart, const orc::p slotId, footerStart, postScript, fileFooter, stripeFooter, rowIndex, fileName, dataDNA); MetaCache->MetaBlockCompleteIO(slotId); } +#endif void CarbonMetaCacheSetBlock(CacheSlotId_t slotId, uint64 headerSize, uint64 footerSize, unsigned char* fileHeader, unsigned char* fileFooter, const char* fileName, const char* dataDNA) @@ -623,6 +625,7 @@ int MetaCacheMgr::CalcCarbonMetaBlockSize(CarbonMetadataValue* nvalue) const return size; } +#ifndef ENABLE_LITE_MODE /* * @Description: must get pin first before used * @IN fileFooter: file footer @@ -723,6 +726,7 @@ out_of_memory: ereport(ERROR, (errcode(ERRCODE_FDW_OUT_OF_MEMORY), errmodule(MOD_CACHE), errmsg("malloc fails, out of memory"))); return; } +#endif void MetaCacheMgr::SetCarbonMetaBlockValue(CacheSlotId_t slotId, uint64 headerSize, uint64 footerSize, unsigned char* fileHeader, unsigned char* fileFooter, const char* fileName, const char* dataDNA) diff --git a/src/gausskernel/storage/dorado_operation/dorado_fd.cpp b/src/gausskernel/storage/dorado_operation/dorado_fd.cpp index c3d3f39cc..9a075d771 100644 --- a/src/gausskernel/storage/dorado_operation/dorado_fd.cpp +++ b/src/gausskernel/storage/dorado_operation/dorado_fd.cpp @@ -48,7 +48,7 @@ void InitDoradoStorage(char *filePath, uint64 fileSize) { Assert(!g_instance.xlog_cxt.shareStorageopCtl.isInit); g_instance.xlog_cxt.shareStorageopCtl.xlogFilePath = filePath; - g_instance.xlog_cxt.shareStorageopCtl.xlogFileSize = fileSize; + g_instance.xlog_cxt.shareStorageopCtl.blkSize = MEMORY_ALIGNED_SIZE; g_instance.xlog_cxt.shareStorageopCtl.opereateIf = &doradoOperateIf; @@ -57,7 +57,15 @@ void InitDoradoStorage(char *filePath, uint64 fileSize) if (g_instance.xlog_cxt.shareStorageopCtl.fd < 0) { ereport(PANIC, (errcode_for_file_access(), errmsg("could not open xlog file \"%s\" : %m", filePath))); } + g_instance.xlog_cxt.shareStorageopCtl.isInit = true; + if (IsInitdb) { + g_instance.xlog_cxt.shareStorageopCtl.xlogFileSize = fileSize; + } else { + Assert(g_instance.xlog_cxt.shareStorageXLogCtl != NULL); + DoradoReadCtlInfo(g_instance.xlog_cxt.shareStorageXLogCtl); + g_instance.xlog_cxt.shareStorageopCtl.xlogFileSize = g_instance.xlog_cxt.shareStorageXLogCtl->xlogFileSize; + } } void DoradoWriteCtlInfo(const ShareStorageXLogCtl *ctlInfo) diff --git a/src/gausskernel/storage/file/fd.cpp b/src/gausskernel/storage/file/fd.cpp index 1a0d769c9..446a6bb6d 100644 --- a/src/gausskernel/storage/file/fd.cpp +++ b/src/gausskernel/storage/file/fd.cpp @@ -138,10 +138,10 @@ #define VFD_CLOSED (-1) #define FileIsValid(file) \ - ((file) > 0 && (file) < (int)u_sess->storage_cxt.SizeVfdCache && \ - u_sess->storage_cxt.VfdCache[file].fileName != NULL) + ((file) > 0 && (file) < (int)GetSizeVfdCache() && \ + GetVfdCache()[file].fileName != NULL) -#define FileIsNotOpen(file) (u_sess->storage_cxt.VfdCache[file].fd == VFD_CLOSED) +#define FileIsNotOpen(file) (GetVfdCache()[file].fd == VFD_CLOSED) #define FileUnknownPos ((off_t)-1) @@ -191,16 +191,6 @@ static pthread_mutex_t VFDLockArray[NUM_VFD_PARTITIONS]; #define VFDMappingPartitionLock(hashcode) \ (&VFDLockArray[VFDTableHashPartition(hashcode)]) -/* - * pc_munmap - */ -#define SAFE_MUNMAP(vfdP) \ - do { \ - if ((vfdP)->with_pcmap && (vfdP)->pcmap != NULL) { \ - UnReferenceAddrFile((vfdP)); \ - (vfdP)->pcmap = NULL; \ - } \ - } while (0) /* -------------------- * * Private Routines @@ -354,13 +344,11 @@ RelFileNodeForkNum RelFileNodeForkNumFill(RelFileNode* rnode, filenode.rnode.node.spcNode = rnode->spcNode; filenode.rnode.node.dbNode = rnode->dbNode; filenode.rnode.node.bucketNode = rnode->bucketNode; - filenode.rnode.node.opt = rnode->opt; } else { filenode.rnode.node.relNode = InvalidOid; filenode.rnode.node.spcNode = InvalidOid; filenode.rnode.node.dbNode = InvalidOid; filenode.rnode.node.bucketNode = InvalidBktId; - filenode.rnode.node.opt = 0; } filenode.rnode.backend = backend; @@ -581,24 +569,37 @@ void pg_flush_data(int fd, off_t offset, off_t nbytes) */ void InitFileAccess(void) { - Assert(u_sess->storage_cxt.SizeVfdCache == 0); /* call me only once */ - - /* initialize cache header entry */ - u_sess->storage_cxt.VfdCache = - (Vfd*)MemoryContextAlloc(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE), sizeof(vfd)); - - if (u_sess->storage_cxt.VfdCache == NULL) - ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); - - errno_t ret = memset_s((char*)&(u_sess->storage_cxt.VfdCache[0]), sizeof(Vfd), 0, sizeof(Vfd)); - securec_check(ret, "\0", "\0"); - u_sess->storage_cxt.VfdCache->fd = VFD_CLOSED; - - u_sess->storage_cxt.SizeVfdCache = 1; - - /* register proc-exit hook to ensure temp files are dropped at exit */ - if (!IS_THREAD_POOL_SESSION) + if (EnableLocalSysCache()) { + if(t_thrd.lsc_cxt.lsc->VfdCache != NULL) { + return; + } + /* initialize cache header entry */ + t_thrd.lsc_cxt.lsc->VfdCache = + (Vfd*)MemoryContextAlloc(LocalSmgrStorageMemoryCxt(), sizeof(vfd)); + if (t_thrd.lsc_cxt.lsc->VfdCache == NULL) + ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); + errno_t ret = memset_s((char*)&(t_thrd.lsc_cxt.lsc->VfdCache[0]), sizeof(Vfd), 0, sizeof(Vfd)); + securec_check(ret, "\0", "\0"); + t_thrd.lsc_cxt.lsc->VfdCache->fd = VFD_CLOSED; + t_thrd.lsc_cxt.lsc->SizeVfdCache = 1; + /* register proc-exit hook to ensure temp files are dropped at exit */ + on_proc_exit(AtProcExit_Files, 0); + } else { + Assert(u_sess->storage_cxt.SizeVfdCache == 0); /* call me only once */ + /* initialize cache header entry */ + u_sess->storage_cxt.VfdCache = + (Vfd*)MemoryContextAlloc(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE), sizeof(vfd)); + if (u_sess->storage_cxt.VfdCache == NULL) + ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); + errno_t ret = memset_s((char*)&(u_sess->storage_cxt.VfdCache[0]), sizeof(Vfd), 0, sizeof(Vfd)); + securec_check(ret, "\0", "\0"); + u_sess->storage_cxt.VfdCache->fd = VFD_CLOSED; + u_sess->storage_cxt.SizeVfdCache = 1; + /* register proc-exit hook to ensure temp files are dropped at exit */ + if (!IS_THREAD_POOL_SESSION) + on_proc_exit(AtProcExit_Files, 0); + } } /* @@ -863,8 +864,10 @@ tryAgain: static void _dump_lru(void) { - int mru = u_sess->storage_cxt.VfdCache[0].lruLessRecently; - Vfd* vfdP = &u_sess->storage_cxt.VfdCache[mru]; + vfd *vfdcache = GetVfdCache(); + int mru = vfdcache[0].lruLessRecently; + Vfd* vfdP = &vfdcache[mru]; + char buf[2048]; errno_t rc = EOK; @@ -873,7 +876,7 @@ static void _dump_lru(void) while (mru != 0) { mru = vfdP->lruLessRecently; - vfdP = &u_sess->storage_cxt.VfdCache[mru]; + vfdP = &vfdcache[mru]; rc = snprintf_s(buf + strlen(buf), sizeof(buf) - strlen(buf), sizeof(buf) - strlen(buf) - 1, "%d ", mru); securec_check_ss(rc, "", ""); } @@ -888,14 +891,15 @@ static void Delete(File file) Vfd* vfdP = NULL; Assert(file != 0); + vfd *vfdcache = GetVfdCache(); - DO_DB(ereport(LOG, (errmsg("Delete %d (%s)", file, u_sess->storage_cxt.VfdCache[file].fileName)))); + DO_DB(ereport(LOG, (errmsg("Delete %d (%s)", file, vfdcache[file].fileName)))); DO_DB(_dump_lru()); - vfdP = &u_sess->storage_cxt.VfdCache[file]; + vfdP = &vfdcache[file]; - u_sess->storage_cxt.VfdCache[vfdP->lruLessRecently].lruMoreRecently = vfdP->lruMoreRecently; - u_sess->storage_cxt.VfdCache[vfdP->lruMoreRecently].lruLessRecently = vfdP->lruLessRecently; + vfdcache[vfdP->lruLessRecently].lruMoreRecently = vfdP->lruMoreRecently; + vfdcache[vfdP->lruMoreRecently].lruLessRecently = vfdP->lruLessRecently; DO_DB(_dump_lru()); } @@ -905,19 +909,19 @@ static void LruDelete(File file) Vfd* vfdP = NULL; Assert(file != 0); + vfd *vfdcache = GetVfdCache(); - DO_DB(ereport(LOG, (errmsg("LruDelete %d (%s)", file, u_sess->storage_cxt.VfdCache[file].fileName)))); + DO_DB(ereport(LOG, (errmsg("LruDelete %d (%s)", file, vfdcache[file].fileName)))); - vfdP = &u_sess->storage_cxt.VfdCache[file]; + vfdP = &vfdcache[file]; - SAFE_MUNMAP(vfdP); /* delete the vfd record from the LRU ring */ Delete(file); /* close the file */ DataFileIdCloseFile(vfdP); - --u_sess->storage_cxt.nfile; + AddVfdNfile(-1); vfdP->fd = VFD_CLOSED; } @@ -926,16 +930,17 @@ static void Insert(File file) Vfd* vfdP = NULL; Assert(file != 0); + vfd *vfdcache = GetVfdCache(); - DO_DB(ereport(LOG, (errmsg("Insert %d (%s)", file, u_sess->storage_cxt.VfdCache[file].fileName)))); + DO_DB(ereport(LOG, (errmsg("Insert %d (%s)", file, vfdcache[file].fileName)))); DO_DB(_dump_lru()); - vfdP = &u_sess->storage_cxt.VfdCache[file]; + vfdP = &vfdcache[file]; vfdP->lruMoreRecently = 0; - vfdP->lruLessRecently = u_sess->storage_cxt.VfdCache[0].lruLessRecently; - u_sess->storage_cxt.VfdCache[0].lruLessRecently = file; - u_sess->storage_cxt.VfdCache[vfdP->lruLessRecently].lruMoreRecently = file; + vfdP->lruLessRecently = vfdcache[0].lruLessRecently; + vfdcache[0].lruLessRecently = file; + vfdcache[vfdP->lruLessRecently].lruMoreRecently = file; DO_DB(_dump_lru()); } @@ -947,10 +952,11 @@ static int LruInsert(File file) File rfile; Assert(file != 0); + vfd *vfdcache = GetVfdCache(); - DO_DB(ereport(LOG, (errmsg("LruInsert %d (%s)", file, u_sess->storage_cxt.VfdCache[file].fileName)))); + DO_DB(ereport(LOG, (errmsg("LruInsert %d (%s)", file, vfdcache[file].fileName)))); - vfdP = &u_sess->storage_cxt.VfdCache[file]; + vfdP = &vfdcache[file]; if (FileIsNotOpen(file)) { rfile = DataFileIdOpenFile(vfdP->fileName, vfdP->fileNode, vfdP->fileFlags, vfdP->fileMode, file); @@ -975,15 +981,16 @@ static int LruInsert(File file) */ static bool ReleaseLruFile(void) { - DO_DB(ereport(LOG, (errmsg("ReleaseLruFile. Opened %d", u_sess->storage_cxt.nfile)))); + DO_DB(ereport(LOG, (errmsg("ReleaseLruFile. Opened %d", GetVfdNfile())))); - if (u_sess->storage_cxt.nfile > 0) { + if (GetVfdNfile() > 0) { /* * There are opened files and so there should be at least one used vfd * in the ring. */ - Assert(u_sess->storage_cxt.VfdCache[0].lruMoreRecently != 0); - LruDelete(u_sess->storage_cxt.VfdCache[0].lruMoreRecently); + vfd *vfdcache = GetVfdCache(); + Assert(vfdcache[0].lruMoreRecently != 0); + LruDelete(vfdcache[0].lruMoreRecently); return true; /* freed a file */ } return false; /* no files available to free */ @@ -995,7 +1002,7 @@ static bool ReleaseLruFile(void) */ static void ReleaseLruFiles(void) { - while (u_sess->storage_cxt.nfile + u_sess->storage_cxt.numAllocatedDescs >= t_thrd.storage_cxt.max_safe_fds) { + while (GetVfdNfile() + u_sess->storage_cxt.numAllocatedDescs >= t_thrd.storage_cxt.max_safe_fds) { if (!ReleaseLruFile()) break; } @@ -1019,23 +1026,23 @@ static void ReallocVfdCache(Size newCacheSize) AutoMutexLock recursive_mutext_lock(recursive_mutex); recursive_mutext_lock.lock(); { - newVfdCache = (Vfd*)repalloc(u_sess->storage_cxt.VfdCache, sizeof(Vfd) * newCacheSize); + newVfdCache = (Vfd*)repalloc(GetVfdCache(), sizeof(Vfd) * newCacheSize); if (newVfdCache == NULL) { recursive_mutext_lock.unLock(); ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } - u_sess->storage_cxt.VfdCache = newVfdCache; + SetVfdCache(newVfdCache); } recursive_mutext_lock.unLock(); return; } #endif - newVfdCache = (Vfd*)repalloc(u_sess->storage_cxt.VfdCache, sizeof(Vfd) * newCacheSize); + newVfdCache = (Vfd*)repalloc(GetVfdCache(), sizeof(Vfd) * newCacheSize); if (newVfdCache == NULL) { ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } - u_sess->storage_cxt.VfdCache = newVfdCache; + SetVfdCache(newVfdCache); } static File AllocateVfd(void) @@ -1043,17 +1050,19 @@ static File AllocateVfd(void) Index i; File file; - DO_DB(ereport(LOG, (errmsg("AllocateVfd. Size %lu", (unsigned long)u_sess->storage_cxt.SizeVfdCache)))); + DO_DB(ereport(LOG, (errmsg("AllocateVfd. Size %lu", (unsigned long)GetSizeVfdCache())))); - Assert(u_sess->storage_cxt.SizeVfdCache > 0); /* InitFileAccess not called? */ + Assert(GetSizeVfdCache() > 0); /* InitFileAccess not called? */ - if (u_sess->storage_cxt.VfdCache[0].nextFree == 0) { + vfd *vfdcache = GetVfdCache(); + if (vfdcache[0].nextFree == 0) { + Size SizeVfdCache = GetSizeVfdCache(); /* * The free list is empty so it is time to increase the size of the * array. We choose to double it each time this happens. However, * there's not much point in starting *real* small. */ - Size newCacheSize = u_sess->storage_cxt.SizeVfdCache * 2; + Size newCacheSize = SizeVfdCache * 2; Size maxCacheSize = MaxAllocSize / sizeof(Vfd); if (newCacheSize < 32) { @@ -1063,31 +1072,33 @@ static File AllocateVfd(void) if (newCacheSize >= maxCacheSize) { /* sizeof(Vfd) = 96, if cache great than max cache size, change extend strategy to fix a number size 96M */ uint32 extendCacheCount = 1024 * 1024; - newCacheSize = u_sess->storage_cxt.SizeVfdCache + extendCacheCount; + newCacheSize = SizeVfdCache + extendCacheCount; } ReallocVfdCache(newCacheSize); + vfdcache = GetVfdCache(); /* Initialize the new entries and link them into the free list. */ - for (i = (Index)u_sess->storage_cxt.SizeVfdCache; i < newCacheSize; i++) { - errno_t ret = memset_s((char*)&(u_sess->storage_cxt.VfdCache[i]), sizeof(Vfd), 0, sizeof(Vfd)); + for (i = (Index)SizeVfdCache; i < newCacheSize; i++) { + errno_t ret = memset_s((char*)&(vfdcache[i]), sizeof(Vfd), 0, sizeof(Vfd)); securec_check(ret, "\0", "\0"); - u_sess->storage_cxt.VfdCache[i].nextFree = (File)(i + 1); - u_sess->storage_cxt.VfdCache[i].fd = VFD_CLOSED; + vfdcache[i].nextFree = (File)(i + 1); + vfdcache[i].fd = VFD_CLOSED; } - u_sess->storage_cxt.VfdCache[newCacheSize - 1].nextFree = 0; - u_sess->storage_cxt.VfdCache[0].nextFree = (File)u_sess->storage_cxt.SizeVfdCache; - u_sess->storage_cxt.SizeVfdCache = newCacheSize; /* Record the new size */ + vfdcache[newCacheSize - 1].nextFree = 0; + vfdcache[0].nextFree = (File)SizeVfdCache; + SetSizeVfdCache(newCacheSize); /* Record the new size */ } - file = u_sess->storage_cxt.VfdCache[0].nextFree; - u_sess->storage_cxt.VfdCache[0].nextFree = u_sess->storage_cxt.VfdCache[file].nextFree; + file = vfdcache[0].nextFree; + vfdcache[0].nextFree = vfdcache[file].nextFree; return file; } static void FreeVfd(File file) { - Vfd* vfdP = &u_sess->storage_cxt.VfdCache[file]; + vfd *vfdcache = GetVfdCache(); + Vfd* vfdP = &vfdcache[file]; DO_DB(ereport(LOG, (errmsg("FreeVfd: %d (%s)", file, vfdP->fileName ? vfdP->fileName : "")))); @@ -1104,8 +1115,8 @@ static void FreeVfd(File file) } vfdP->fdstate = 0x0; - vfdP->nextFree = u_sess->storage_cxt.VfdCache[0].nextFree; - u_sess->storage_cxt.VfdCache[0].nextFree = file; + vfdP->nextFree = vfdcache[0].nextFree; + vfdcache[0].nextFree = file; RESUME_INTERRUPTS(); } @@ -1113,8 +1124,8 @@ static void FreeVfd(File file) static int FileAccess(File file) { int returnValue; - - DO_DB(ereport(LOG, (errmsg("FileAccess %d (%s)", file, u_sess->storage_cxt.VfdCache[file].fileName)))); + vfd *vfdcache = GetVfdCache(); + DO_DB(ereport(LOG, (errmsg("FileAccess %d (%s)", file, vfdcache[file].fileName)))); /* * Is the file open? If not, open it and put it at the head of the LRU @@ -1125,7 +1136,7 @@ static int FileAccess(File file) if (returnValue != 0) { return returnValue; } - } else if (u_sess->storage_cxt.VfdCache[0].lruLessRecently != file) { + } else if (vfdcache[0].lruLessRecently != file) { /* * We now know that the file is open and that it is not the last one * accessed, so we need to move it to the head of the Lru ring. @@ -1161,11 +1172,12 @@ ReportTemporaryFileUsage(const char *path, off_t size) static void RegisterTemporaryFile(File file) { + vfd *vfdcache = GetVfdCache(); ResourceOwnerRememberFile(t_thrd.utils_cxt.CurrentResourceOwner, file); - u_sess->storage_cxt.VfdCache[file].resowner = t_thrd.utils_cxt.CurrentResourceOwner; + vfdcache[file].resowner = t_thrd.utils_cxt.CurrentResourceOwner; /* Backup mechanism for closing at end of xact. */ - u_sess->storage_cxt.VfdCache[file].fdstate |= FD_CLOSE_AT_EOXACT; + vfdcache[file].fdstate |= FD_CLOSE_AT_EOXACT; u_sess->storage_cxt.have_xact_temporary_files = true; } @@ -1241,7 +1253,7 @@ static void DataFileIdCloseFile(Vfd* vfdP) ereport(PANIC, (errmsg("file cache corrupted, file %s not opened with handle: %d", vfdP->fileName, vfdP->fd))); } - Assert(entry->fd >= 0 && entry->fd == vfdP->fd); + Assert(entry->fd >= 0 && (entry->fd == vfdP->fd || (entry->repaired_fd >= 0 && entry->repaired_fd == vfdP->fd))); // decrease reference count entry->refcount--; @@ -1249,15 +1261,22 @@ static void DataFileIdCloseFile(Vfd* vfdP) // need to close and remove from cache if (entry->refcount == 0) { + int fd = entry->fd; + int repaired_fd = entry->repaired_fd; entry = (DataFileIdCacheEntry*)hash_search_with_hash_value( t_thrd.storage_cxt.DataFileIdCache, (void*)&vfdP->fileNode, newHash, HASH_REMOVE, NULL); Assert(entry); vfdLock.unLock(); - if (close(vfdP->fd) < 0) { + if (close(fd) < 0) { ereport(LogLevelOfCloseFileFailed(vfdP), (errcode_for_file_access(), - errmsg("[Global] File(%s) fd(%d) have been closed, %m", vfdP->fileName, vfdP->fd))); + errmsg("[Global] File(%s) fd(%d) have been closed, %m", vfdP->fileName, fd))); + } + if (repaired_fd >= 0 && close(repaired_fd) < 0) { + ereport(LogLevelOfCloseFileFailed(vfdP), + (errcode_for_file_access(), + errmsg("[Global] File(%s) reapired_fd(%d) have been closed, %m", vfdP->fileName, repaired_fd))); } return; } @@ -1297,7 +1316,7 @@ static File PathNameOpenFile_internal( // Allocate a new VFD file if FILE_INVALID passed, or reopen it on the 'file' if (file == FILE_INVALID) { - fnamecopy = MemoryContextStrdup(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE), fileName); + fnamecopy = MemoryContextStrdup(LocalSmgrStorageMemoryCxt(), fileName); if (fnamecopy == NULL) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); @@ -1305,7 +1324,7 @@ static File PathNameOpenFile_internal( newVfd = true; } - vfdP = &u_sess->storage_cxt.VfdCache[file]; + vfdP = &GetVfdCache()[file]; /* Close excess kernel FDs. */ ReleaseLruFiles(); @@ -1344,7 +1363,7 @@ static File PathNameOpenFile_internal( } return -1; } - ++u_sess->storage_cxt.nfile; + AddVfdNfile(1); DO_DB(ereport(LOG, (errmsg("PathNameOpenFile: success %d", vfdP->fd)))); Insert(file); @@ -1382,6 +1401,7 @@ static File PathNameOpenFile_internal( } else { Assert(vfdP->fd >= 0); entry->fd = vfdP->fd; + entry->repaired_fd = -1; entry->refcount = 1; vfdLock.unLock(); } @@ -1497,8 +1517,9 @@ File OpenTemporaryFile(bool interXact) u_sess->proc_cxt.MyDatabaseTableSpace ? u_sess->proc_cxt.MyDatabaseTableSpace : DEFAULTTABLESPACE_OID, true); + vfd *vfdcache = GetVfdCache(); /* Mark it for deletion at close and temporary file size limit */ - u_sess->storage_cxt.VfdCache[file].fdstate |= FD_DELETE_AT_CLOSE | FD_TEMP_FILE_LIMIT; + vfdcache[file].fdstate |= FD_DELETE_AT_CLOSE | FD_TEMP_FILE_LIMIT; /* Register it with the current resource owner */ if (!interXact) { @@ -1560,10 +1581,11 @@ File OpenCacheFile(const char* pathname, bool unlink_owner) ERROR, (errcode_for_file_access(), errmsg("could not create temporary cache file \"%s\": %m", tmppath))); } + vfd *vfdcache = GetVfdCache(); /* mark the special flag for cache file */ - u_sess->storage_cxt.VfdCache[file].fdstate |= FD_ERRTBL_LOG; + vfdcache[file].fdstate |= FD_ERRTBL_LOG; if (unlink_owner) { - u_sess->storage_cxt.VfdCache[file].fdstate |= FD_ERRTBL_LOG_OWNER; + vfdcache[file].fdstate |= FD_ERRTBL_LOG_OWNER; } /* ensure cleanup happens at eoxact */ @@ -1680,15 +1702,13 @@ static File OpenTemporaryFileInTablespace(Oid tblspcOid, bool rejectError) */ void FileCloseWithThief(File file) { - Vfd* vfdP = &u_sess->storage_cxt.VfdCache[file]; + Vfd* vfdP = &GetVfdCache()[file]; if (!FileIsNotOpen(file)) { - SAFE_MUNMAP(vfdP); - /* remove the file from the lru ring */ Delete(file); /* the thief has close the real fd */ Assert(!vfdP->infdCache); - --u_sess->storage_cxt.nfile; + AddVfdNfile(-1); /* clean up fd flag */ vfdP->fd = VFD_CLOSED; } @@ -1731,7 +1751,7 @@ File PathNameCreateTemporaryFile(char *path, bool error_on_failure) } /* Mark it for temp_file_limit accounting. */ - u_sess->storage_cxt.VfdCache[file].fdstate |= FD_TEMP_FILE_LIMIT; + GetVfdCache()[file].fdstate |= FD_TEMP_FILE_LIMIT; /* Register it for automatic close. */ RegisterTemporaryFile(file); @@ -1816,14 +1836,13 @@ void FileClose(File file) Vfd* vfdP = NULL; Assert(FileIsValid(file)); + vfd *vfdcache = GetVfdCache(); - DO_DB(ereport(LOG, (errmsg("FileClose: %d (%s)", file, u_sess->storage_cxt.VfdCache[file].fileName)))); + DO_DB(ereport(LOG, (errmsg("FileClose: %d (%s)", file, vfdcache[file].fileName)))); - vfdP = &u_sess->storage_cxt.VfdCache[file]; + vfdP = &vfdcache[file]; if (!FileIsNotOpen(file)) { - SAFE_MUNMAP(vfdP); - /* remove the file from the lru ring */ Delete(file); @@ -1837,7 +1856,7 @@ void FileClose(File file) /* close the file */ DataFileIdCloseFile(vfdP); - --u_sess->storage_cxt.nfile; + AddVfdNfile(-1); vfdP->fd = VFD_CLOSED; RESUME_INTERRUPTS(); } @@ -1910,11 +1929,12 @@ int FilePrefetch(File file, off_t offset, int amount, uint32 wait_event_info) int returnCode; Assert(FileIsValid(file)); + vfd *vfdcache = GetVfdCache(); DO_DB(ereport(LOG, (errmsg("FilePrefetch: %d (%s) " INT64_FORMAT " %d", file, - u_sess->storage_cxt.VfdCache[file].fileName, + vfdcache[file].fileName, (int64)offset, amount)))); @@ -1923,7 +1943,7 @@ int FilePrefetch(File file, off_t offset, int amount, uint32 wait_event_info) return returnCode; pgstat_report_waitevent(wait_event_info); - returnCode = posix_fadvise(u_sess->storage_cxt.VfdCache[file].fd, offset, amount, POSIX_FADV_WILLNEED); + returnCode = posix_fadvise(vfdcache[file].fd, offset, amount, POSIX_FADV_WILLNEED); pgstat_report_waitevent(WAIT_EVENT_END); return returnCode; @@ -1938,11 +1958,11 @@ void FileWriteback(File file, off_t offset, off_t nbytes) int returnCode; Assert(FileIsValid(file)); - + vfd *vfdcache = GetVfdCache(); DO_DB(ereport(LOG, (errmsg("FileWriteback: %d (%s) " INT64_FORMAT " " INT64_FORMAT, file, - u_sess->storage_cxt.VfdCache[file].fileName, + vfdcache[file].fileName, (int64)offset, (int64)nbytes)))); @@ -1957,7 +1977,7 @@ void FileWriteback(File file, off_t offset, off_t nbytes) if (returnCode < 0) return; - pg_flush_data(u_sess->storage_cxt.VfdCache[file].fd, offset, nbytes); + pg_flush_data(vfdcache[file].fd, offset, nbytes); } // FilePRead @@ -1968,12 +1988,12 @@ int FilePRead(File file, char* buffer, int amount, off_t offset, uint32 wait_eve int returnCode; Assert(FileIsValid(file)); - + vfd *vfdcache = GetVfdCache(); DO_DB(ereport(LOG, (errmsg("FilePRead: %d (%s) " INT64_FORMAT " %d", file, - u_sess->storage_cxt.VfdCache[file].fileName, - (int64)u_sess->storage_cxt.VfdCache[file].seekPos, + vfdcache[file].fileName, + (int64)vfdcache[file].seekPos, amount)))); returnCode = FileAccess(file); @@ -1990,13 +2010,13 @@ retry: pgstat_report_waitevent(wait_event_info); PGSTAT_INIT_TIME_RECORD(); PGSTAT_START_TIME_RECORD(); - returnCode = pread(u_sess->storage_cxt.VfdCache[file].fd, buffer, (size_t)amount, offset); + returnCode = pread(vfdcache[file].fd, buffer, (size_t)amount, offset); PGSTAT_END_TIME_RECORD(DATA_IO_TIME); pgstat_report_waitevent(WAIT_EVENT_END); PROFILING_MDIO_END_READ((uint32)amount, returnCode); if (returnCode >= 0) - u_sess->storage_cxt.VfdCache[file].seekPos += returnCode; + vfdcache[file].seekPos += returnCode; else { /* * Windows may run out of kernel buffers and return "Insufficient @@ -2023,7 +2043,7 @@ retry: goto retry; /* Trouble, so assume we don't know the file position anymore */ - u_sess->storage_cxt.VfdCache[file].seekPos = FileUnknownPos; + vfdcache[file].seekPos = FileUnknownPos; } return returnCode; @@ -2038,22 +2058,23 @@ int FileWrite(File file, const char* buffer, int amount, off_t offset, int fastE * we did not consider dio here because fallocate has no relation with dio. * if buffer == NULL, means can use fallocate, caller will check it */ + vfd *vfdcache = GetVfdCache(); if (buffer == NULL) { if (fastExtendSize == 0) { /* fast allocate large disk space for this heap file each 8MB */ fastExtendSize = (int)(u_sess->attr.attr_storage.fast_extend_file_size * 1024LL); } - + /* fast allocate large disk space for this file each 8MB */ if ((offset % fastExtendSize) == 0) { - returnCode = fallocate(u_sess->storage_cxt.VfdCache[file].fd, FALLOC_FL_KEEP_SIZE, offset, fastExtendSize); + returnCode = fallocate(vfdcache[file].fd, FALLOC_FL_KEEP_SIZE, offset, fastExtendSize); if (returnCode != 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("fallocate(fd=%d, amount=%d, offset=%ld),write count(%d), errno(%d), " "maybe you use adio without XFS filesystem, if you really want do this," "please turn off GUC parameter enable_fast_allocate", - u_sess->storage_cxt.VfdCache[file].fd, + vfdcache[file].fd, fastExtendSize, (int64)offset, returnCode, @@ -2062,14 +2083,14 @@ int FileWrite(File file, const char* buffer, int amount, off_t offset, int fastE } /* write all zeros into a new page */ - returnCode = fallocate(u_sess->storage_cxt.VfdCache[file].fd, 0, offset, amount); + returnCode = fallocate(vfdcache[file].fd, 0, offset, amount); if (returnCode != 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("fallocate(fd=%d, amount=%d, offset=%ld),write count(%d), errno(%d), " "maybe you use adio without XFS filesystem, if you really want do this," "please turn off GUC parameter enable_fast_allocate", - u_sess->storage_cxt.VfdCache[file].fd, + vfdcache[file].fd, amount, (int64)offset, returnCode, @@ -2082,7 +2103,7 @@ int FileWrite(File file, const char* buffer, int amount, off_t offset, int fastE PROFILING_MDIO_START(); PGSTAT_INIT_TIME_RECORD(); PGSTAT_START_TIME_RECORD(); - returnCode = pwrite(u_sess->storage_cxt.VfdCache[file].fd, buffer, (size_t)amount, offset); + returnCode = pwrite(vfdcache[file].fd, buffer, (size_t)amount, offset); PGSTAT_END_TIME_RECORD(DATA_IO_TIME); PROFILING_MDIO_END_WRITE((uint32)amount, returnCode); } @@ -2097,12 +2118,12 @@ int FilePWrite(File file, const char* buffer, int amount, off_t offset, uint32 w int returnCode; Assert(FileIsValid(file)); - + vfd *vfdcache = GetVfdCache(); DO_DB(ereport(LOG, (errmsg("FilePWrite: %d (%s) " INT64_FORMAT " %d", file, - u_sess->storage_cxt.VfdCache[file].fileName, - (int64)u_sess->storage_cxt.VfdCache[file].seekPos, + vfdcache[file].fileName, + (int64)vfdcache[file].seekPos, amount)))); returnCode = FileAccess(file); @@ -2121,12 +2142,12 @@ int FilePWrite(File file, const char* buffer, int amount, off_t offset, uint32 w * message if we do that. All current callers would just throw error * immediately anyway, so this is safe at present. */ - if (u_sess->storage_cxt.VfdCache[file].fdstate & FD_TEMP_FILE_LIMIT) { - off_t newPos = u_sess->storage_cxt.VfdCache[file].seekPos + amount; + if (vfdcache[file].fdstate & FD_TEMP_FILE_LIMIT) { + off_t newPos = vfdcache[file].seekPos + amount; - if (newPos > u_sess->storage_cxt.VfdCache[file].fileSize) { + if (newPos > vfdcache[file].fileSize) { uint64 newTotal = u_sess->storage_cxt.temporary_files_size; - uint64 incSize = (uint64)(newPos - u_sess->storage_cxt.VfdCache[file].fileSize); + uint64 incSize = (uint64)(newPos - vfdcache[file].fileSize); WLMGeneralParam* g_wlm_params = &u_sess->wlm_cxt->wlm_params; unsigned char state = g_wlm_params->iostate; g_wlm_params->iostate = IOSTATE_WRITE; @@ -2134,7 +2155,7 @@ int FilePWrite(File file, const char* buffer, int amount, off_t offset, uint32 w g_wlm_params->iostate = state; if (u_sess->attr.attr_sql.temp_file_limit >= 0) { - newTotal += newPos - u_sess->storage_cxt.VfdCache[file].fileSize; + newTotal += newPos - vfdcache[file].fileSize; if (newTotal > (uint64)(uint32)u_sess->attr.attr_sql.temp_file_limit * (uint64)1024) ereport(ERROR, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), @@ -2156,15 +2177,15 @@ retry: errno = ENOSPC; if (returnCode >= 0) { - u_sess->storage_cxt.VfdCache[file].seekPos += returnCode; + vfdcache[file].seekPos += returnCode; /* maintain fileSize and temporary_files_size if it's a temp file */ - if (u_sess->storage_cxt.VfdCache[file].fdstate & FD_TEMP_FILE_LIMIT) { - off_t newPos = u_sess->storage_cxt.VfdCache[file].seekPos; + if (vfdcache[file].fdstate & FD_TEMP_FILE_LIMIT) { + off_t newPos = vfdcache[file].seekPos; - if (newPos > u_sess->storage_cxt.VfdCache[file].fileSize) { - u_sess->storage_cxt.temporary_files_size += newPos - u_sess->storage_cxt.VfdCache[file].fileSize; - u_sess->storage_cxt.VfdCache[file].fileSize = newPos; + if (newPos > vfdcache[file].fileSize) { + u_sess->storage_cxt.temporary_files_size += newPos - vfdcache[file].fileSize; + vfdcache[file].fileSize = newPos; } } } else { @@ -2189,12 +2210,13 @@ retry: goto retry; /* Trouble, so assume we don't know the file position anymore */ - u_sess->storage_cxt.VfdCache[file].seekPos = FileUnknownPos; + vfdcache[file].seekPos = FileUnknownPos; } return returnCode; } +#ifndef ENABLE_LITE_MODE template static int FileAsyncSubmitIO(io_context_t aio_context, dlistType dList, int dListCount) { @@ -2250,7 +2272,7 @@ static int FileAsyncSubmitIO(io_context_t aio_context, dlistType dList, int dLis int FileAsyncRead(AioDispatchDesc_t** dList, int32 dn) { int returnCode = 0; - + vfd *vfdcache = GetVfdCache(); for (int i = 0; i < dn; i++) { File file = dList[i]->aiocb.aio_fildes; @@ -2258,8 +2280,8 @@ int FileAsyncRead(AioDispatchDesc_t** dList, int32 dn) DO_DB(ereport(LOG, (errmsg("FileAsyncRead: fd(%d), filename(%s), seekpos(%ld)", file, - u_sess->storage_cxt.VfdCache[file].fileName, - (int64)u_sess->storage_cxt.VfdCache[file].seekPos)))); + vfdcache[file].fileName, + (int64)vfdcache[file].seekPos)))); // jeh FileAccess may block opening file returnCode = FileAccess(file); @@ -2270,7 +2292,7 @@ int FileAsyncRead(AioDispatchDesc_t** dList, int32 dn) } /* replace the virtual fd with the real one */ - dList[i]->aiocb.aio_fildes = u_sess->storage_cxt.VfdCache[file].fd; + dList[i]->aiocb.aio_fildes = vfdcache[file].fd; } /* @@ -2305,7 +2327,7 @@ int FileAsyncRead(AioDispatchDesc_t** dList, int32 dn) int FileAsyncWrite(AioDispatchDesc_t** dList, int32 dn) { int returnCode = 0; - + vfd *vfdcache = GetVfdCache(); for (int i = 0; i < dn; i++) { File file = dList[i]->aiocb.aio_fildes; @@ -2313,8 +2335,8 @@ int FileAsyncWrite(AioDispatchDesc_t** dList, int32 dn) DO_DB(ereport(LOG, (errmsg("FileAsyncRead: fd(%d), filename(%s), seekpos(%ld)", file, - u_sess->storage_cxt.VfdCache[file].fileName, - (int64)u_sess->storage_cxt.VfdCache[file].seekPos)))); + vfdcache[file].fileName, + (int64)vfdcache[file].seekPos)))); if ((returnCode = FileAccess(file)) < 0) { // aio debug error @@ -2323,7 +2345,7 @@ int FileAsyncWrite(AioDispatchDesc_t** dList, int32 dn) } /* replace the virtual fd with the real one */ - dList[i]->aiocb.aio_fildes = u_sess->storage_cxt.VfdCache[file].fd; + dList[i]->aiocb.aio_fildes = vfdcache[file].fd; } io_context_t aio_context = CompltrContext(dList[0]->blockDesc.reqType, 0); @@ -2355,7 +2377,7 @@ void FileAsyncCUClose(File* vfdList, int32 vfdnum) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("FileAsyncCUClose : invalid vfd(%d), SizeVfdCache(%lu)", file, - u_sess->storage_cxt.SizeVfdCache))); + GetSizeVfdCache()))); } } return; @@ -2372,15 +2394,15 @@ int FileAsyncCURead(AioDispatchCUDesc_t** dList, int32 dn) { int returnCode = 0; File file; - + vfd *vfdcache = GetVfdCache(); for (int i = 0; i < dn; i++) { file = dList[i]->aiocb.aio_fildes; Assert(FileIsValid(file)); DO_DB(ereport(LOG, (errmsg("FileAsyncRead: fd(%d), filename(%s), seekpos(%ld)", file, - u_sess->storage_cxt.VfdCache[file].fileName, - (int64)u_sess->storage_cxt.VfdCache[file].seekPos)))); + vfdcache[file].fileName, + (int64)vfdcache[file].seekPos)))); returnCode = FileAccess(file); if (returnCode < 0) { @@ -2388,7 +2410,7 @@ int FileAsyncCURead(AioDispatchCUDesc_t** dList, int32 dn) return returnCode; } /* replace the virtual fd with the real one */ - dList[i]->aiocb.aio_fildes = u_sess->storage_cxt.VfdCache[file].fd; + dList[i]->aiocb.aio_fildes = vfdcache[file].fd; } io_context_t aio_context = CompltrContext(dList[0]->cuDesc.reqType, 0); @@ -2413,7 +2435,7 @@ int FileAsyncCURead(AioDispatchCUDesc_t** dList, int32 dn) int FileAsyncCUWrite(AioDispatchCUDesc_t** dList, int32 dn) { int returnCode = 0; - + vfd *vfdcache = GetVfdCache(); Assert(dn > 0); for (int i = 0; i < dn; i++) { File file = dList[i]->aiocb.aio_fildes; @@ -2423,8 +2445,8 @@ int FileAsyncCUWrite(AioDispatchCUDesc_t** dList, int32 dn) DO_DB(ereport(LOG, (errmsg("FileAsyncCUWrite: fd(%d), filename(%s), seekpos(%ld)", file, - u_sess->storage_cxt.VfdCache[file].fileName, - (int64)u_sess->storage_cxt.VfdCache[file].seekPos)))); + vfdcache[file].fileName, + (int64)vfdcache[file].seekPos)))); returnCode = FileAccess(file); if (returnCode < 0) { @@ -2433,7 +2455,7 @@ int FileAsyncCUWrite(AioDispatchCUDesc_t** dList, int32 dn) return returnCode; } /* replace the virtual fd with the real one */ - dList[i]->aiocb.aio_fildes = u_sess->storage_cxt.VfdCache[file].fd; + dList[i]->aiocb.aio_fildes = vfdcache[file].fd; } io_context_t aio_context = CompltrContext(dList[0]->cuDesc.reqType, 0); @@ -2445,6 +2467,7 @@ int FileAsyncCUWrite(AioDispatchCUDesc_t** dList, int32 dn) return returnCode; } +#endif void FileFastExtendFile(File file, uint32 offset, uint32 size, bool keep_size) { @@ -2462,15 +2485,15 @@ void FileFastExtendFile(File file, uint32 offset, uint32 size, bool keep_size) if (keep_size) { mode = FALLOC_FL_KEEP_SIZE; } - - returnCode = fallocate(u_sess->storage_cxt.VfdCache[file].fd, mode, offset, size); + vfd *vfdcache = GetVfdCache(); + returnCode = fallocate(vfdcache[file].fd, mode, offset, size); if (returnCode != 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("fallocate(fd=%d, amount=%u, offset=%u),write count(%d), errno(%d), " "maybe you use adio without XFS filesystem, if you really want do this," "please turn off GUC parameter enable_fast_allocate", - u_sess->storage_cxt.VfdCache[file].fd, + vfdcache[file].fd, size, offset, returnCode, @@ -2485,8 +2508,9 @@ int FileSync(File file, uint32 wait_event_info) int returnCode; Assert(FileIsValid(file)); + vfd *vfdcache = GetVfdCache(); - DO_DB(ereport(LOG, (errmsg("FileSync: %d (%s)", file, u_sess->storage_cxt.VfdCache[file].fileName)))); + DO_DB(ereport(LOG, (errmsg("FileSync: %d (%s)", file, vfdcache[file].fileName)))); returnCode = FileAccess(file); if (returnCode < 0) @@ -2495,7 +2519,7 @@ int FileSync(File file, uint32 wait_event_info) pgstat_report_waitevent(wait_event_info); PGSTAT_INIT_TIME_RECORD(); PGSTAT_START_TIME_RECORD(); - returnCode = pg_fsync(u_sess->storage_cxt.VfdCache[file].fd); + returnCode = pg_fsync(vfdcache[file].fd); PGSTAT_END_TIME_RECORD(DATA_IO_TIME); pgstat_report_waitevent(WAIT_EVENT_END); @@ -2513,12 +2537,12 @@ off_t FileSeek(File file, off_t offset, int whence) Assert(FileIsValid(file)); /* only SEEK_END is valid for multithreading backends */ Assert(whence == SEEK_END && offset == 0); - + vfd *vfdcache = GetVfdCache(); DO_DB(ereport(LOG, (errmsg("FileSeek: %d (%s) " INT64_FORMAT " " INT64_FORMAT " %d", file, - u_sess->storage_cxt.VfdCache[file].fileName, - (int64)u_sess->storage_cxt.VfdCache[file].seekPos, + vfdcache[file].fileName, + (int64)vfdcache[file].seekPos, (int64)offset, whence)))); @@ -2528,8 +2552,8 @@ off_t FileSeek(File file, off_t offset, int whence) return returnCode; } - u_sess->storage_cxt.VfdCache[file].seekPos = lseek(u_sess->storage_cxt.VfdCache[file].fd, offset, whence); - return u_sess->storage_cxt.VfdCache[file].seekPos; + vfdcache[file].seekPos = lseek(vfdcache[file].fd, offset, whence); + return vfdcache[file].seekPos; } /* @@ -2539,8 +2563,9 @@ off_t FileSeek(File file, off_t offset, int whence) off_t FileTell(File file) { Assert(FileIsValid(file)); - DO_DB(ereport(LOG, (errmsg("FileTell %d (%s)", file, u_sess->storage_cxt.VfdCache[file].fileName)))); - return u_sess->storage_cxt.VfdCache[file].seekPos; + vfd *vfdcache = GetVfdCache(); + DO_DB(ereport(LOG, (errmsg("FileTell %d (%s)", file, vfdcache[file].fileName)))); + return vfdcache[file].seekPos; } #endif @@ -2549,24 +2574,25 @@ int FileTruncate(File file, off_t offset, uint32 wait_event_info) int returnCode; Assert(FileIsValid(file)); + vfd *vfdcache = GetVfdCache(); - DO_DB(ereport(LOG, (errmsg("FileTruncate %d (%s)", file, u_sess->storage_cxt.VfdCache[file].fileName)))); + DO_DB(ereport(LOG, (errmsg("FileTruncate %d (%s)", file, vfdcache[file].fileName)))); returnCode = FileAccess(file); if (returnCode < 0) return returnCode; pgstat_report_waitevent(wait_event_info); - returnCode = ftruncate(u_sess->storage_cxt.VfdCache[file].fd, offset); + returnCode = ftruncate(vfdcache[file].fd, offset); pgstat_report_waitevent(WAIT_EVENT_END); - if (returnCode == 0 && u_sess->storage_cxt.VfdCache[file].fileSize > offset) { + if (returnCode == 0 && vfdcache[file].fileSize > offset) { /* adjust our state for truncation of a temp file */ - Assert(u_sess->storage_cxt.VfdCache[file].fdstate & FD_TEMP_FILE_LIMIT); - uint64 descSize = (uint64)(u_sess->storage_cxt.VfdCache[file].fileSize - offset); + Assert(vfdcache[file].fdstate & FD_TEMP_FILE_LIMIT); + uint64 descSize = (uint64)(vfdcache[file].fileSize - offset); perm_space_decrease(GetUserId(), descSize, SP_SPILL); u_sess->storage_cxt.temporary_files_size -= descSize; - u_sess->storage_cxt.VfdCache[file].fileSize = offset; + vfdcache[file].fileSize = offset; } return returnCode; @@ -2581,8 +2607,9 @@ int FileTruncate(File file, off_t offset, uint32 wait_event_info) char* FilePathName(File file) { Assert(FileIsValid(file)); + vfd *vfdcache = GetVfdCache(); - return u_sess->storage_cxt.VfdCache[file].fileName; + return vfdcache[file].fileName; } /* @@ -2593,7 +2620,8 @@ char* FilePathName(File file) int FileFd(File file) { Assert(FileIsValid(file)); - return u_sess->storage_cxt.VfdCache[file].fd; + vfd *vfdcache = GetVfdCache(); + return vfdcache[file].fd; } /* @@ -2899,6 +2927,12 @@ int OpenTransientFile(FileName fileName, int fileFlags, int fileMode) DO_DB(ereport( LOG, (errmsg("OpenTransientFile: Allocated %d (%s)", u_sess->storage_cxt.numAllocatedDescs, fileName)))); + if (!ReserveAllocatedDesc()) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + errmsg("exceeded maxAllocatedDescs (%d) while trying to open file \"%s\"", + u_sess->storage_cxt.maxAllocatedDescs, fileName))); + } + ReleaseLruFiles(); /* * The test against MAX_ALLOCATED_DESCS prevents us from overflowing * allocatedFiles[]; the test against max_safe_fds prevents BasicOpenFile @@ -3195,10 +3229,11 @@ int FreeDir(DIR* dir) void closeAllVfds(void) { Index i; - - if (u_sess->storage_cxt.SizeVfdCache > 0) { + Size size = 0; + size = GetSizeVfdCache(); + if (size > 0) { Assert(FileIsNotOpen(0)); /* Make sure ring not corrupted */ - for (i = 1; i < u_sess->storage_cxt.SizeVfdCache; i++) { + for (i = 1; i < size; i++) { if (!FileIsNotOpen(i)) LruDelete((File)i); } @@ -3214,19 +3249,19 @@ void DestroyAllVfds(void) { Index i; ereport(DEBUG5, (errmsg("Thread \"%lu\" trace: closeAllVfds", (unsigned long int)t_thrd.proc_cxt.MyProcPid))); - if (u_sess->storage_cxt.SizeVfdCache > 0) { + if (GetSizeVfdCache() > 0) { Assert(FileIsNotOpen(0)); /* Make sure ring not corrupted */ - for (i = 1; i < u_sess->storage_cxt.SizeVfdCache; i++) { + for (i = 1; i < GetSizeVfdCache(); i++) { if (FileIsValid((int)i)) FileClose((File)i); } - - if (u_sess->storage_cxt.VfdCache != NULL) { - pfree(u_sess->storage_cxt.VfdCache); + vfd *vfdcache = GetVfdCache(); + if (vfdcache != NULL) { + pfree(vfdcache); } } - u_sess->storage_cxt.VfdCache = NULL; - u_sess->storage_cxt.SizeVfdCache = 0; + SetVfdCache(NULL); + SetSizeVfdCache(0); } /* @@ -3373,13 +3408,14 @@ static void CleanupTempFiles(bool isProcExit) * Careful here: at proc_exit we need extra cleanup, not just * xact_temporary files. */ + vfd *vfdcache = GetVfdCache(); if (isProcExit || u_sess->storage_cxt.have_xact_temporary_files) { Assert(FileIsNotOpen(0)); /* Make sure ring not corrupted */ - for (i = 1; i < u_sess->storage_cxt.SizeVfdCache; i++) { - unsigned short fdstate = u_sess->storage_cxt.VfdCache[i].fdstate; + for (i = 1; i < GetSizeVfdCache(); i++) { + unsigned short fdstate = vfdcache[i].fdstate; if (((fdstate & FD_DELETE_AT_CLOSE) || (fdstate & FD_CLOSE_AT_EOXACT)) && - u_sess->storage_cxt.VfdCache[i].fileName != NULL) { + vfdcache[i].fileName != NULL) { /* * If we're in the process of exiting a backend process, close * all temporary files. Otherwise, only close temporary files @@ -3392,15 +3428,15 @@ static void CleanupTempFiles(bool isProcExit) else if (fdstate & FD_CLOSE_AT_EOXACT) { ereport(WARNING, (errmsg("temporary file %s not closed at end-of-transaction", - u_sess->storage_cxt.VfdCache[i].fileName))); + vfdcache[i].fileName))); FileClose((File)i); } - } else if ((fdstate & FD_ERRTBL_LOG) && u_sess->storage_cxt.VfdCache[i].fileName != NULL) { + } else if ((fdstate & FD_ERRTBL_LOG) && vfdcache[i].fileName != NULL) { /* first unlink the physical file because FileClose() will destroy filename. */ - if ((fdstate & FD_ERRTBL_LOG_OWNER) && unlink(u_sess->storage_cxt.VfdCache[i].fileName)) { + if ((fdstate & FD_ERRTBL_LOG_OWNER) && unlink(vfdcache[i].fileName)) { ereport(WARNING, (errmsg("[ErrorTable/Abort]Unlink cache file failed: %s", - u_sess->storage_cxt.VfdCache[i].fileName))); + vfdcache[i].fileName))); } /* close this cache file about error table and clean vfd's flag */ FileClose((File)i); @@ -3719,29 +3755,28 @@ void GetFdGlobalVariables(void*** global_VfdCache, Size** global_SizeVfdCache) save_VfdCache = NULL; save_SizeVfdCache = 0; - *global_VfdCache = (void**)&u_sess->storage_cxt.VfdCache; - *global_SizeVfdCache = &u_sess->storage_cxt.SizeVfdCache; + *global_VfdCache = (void**)GetVfdCachePtr(); + *global_SizeVfdCache = GetSizeVfdCachePtr(); } void SwitchToGlobalVfdCache(void** vfd, Size* vfd_size) { - save_VfdCache = u_sess->storage_cxt.VfdCache; - save_SizeVfdCache = u_sess->storage_cxt.SizeVfdCache; + save_VfdCache = GetVfdCache(); + save_SizeVfdCache = GetSizeVfdCache(); - u_sess->storage_cxt.VfdCache = (Vfd*)*vfd; - u_sess->storage_cxt.SizeVfdCache = *vfd_size; + SetVfdCache((Vfd *)*vfd); + SetSizeVfdCache(*vfd_size); } /* Set the THR_LOCAL VFD information from y */ void ResetToLocalVfdCache() { if (save_VfdCache != NULL) { - u_sess->storage_cxt.VfdCache = save_VfdCache; + SetVfdCache(save_VfdCache); save_VfdCache = NULL; } - if (save_SizeVfdCache != 0) { - u_sess->storage_cxt.SizeVfdCache = save_SizeVfdCache; + SetSizeVfdCache(save_SizeVfdCache); save_SizeVfdCache = 0; } } @@ -3794,6 +3829,31 @@ bool FdRefcntIsZero(SMgrRelation reln, ForkNumber forkNum) return true; } +bool repair_deleted_file_check(RelFileNodeForkNum fileNode, int fd) +{ + bool result = true; + uint32 newHash = VFDTableHashCode((void*)&fileNode); + AutoMutexLock vfdLock(VFDMappingPartitionLock(newHash)); + vfdLock.lock(); + DataFileIdCacheEntry* entry = + (DataFileIdCacheEntry*)hash_search_with_hash_value(t_thrd.storage_cxt.DataFileIdCache, + (void*)&fileNode, newHash, HASH_FIND, NULL); + if (fd < 0) { + if (entry != NULL && entry->repaired_fd >= 0) { + result = false; + } + } else { + if (entry != NULL && entry->fd >= 0) { + entry->repaired_fd = entry->fd; + entry->fd = fd; + } else { + result = false; + } + } + vfdLock.unLock(); + return result; +} + /* * check whether the file is existed. */ @@ -3934,48 +3994,3 @@ static void UnlinkIfExistsFname(const char *fname, bool isdir, int elevel) } } -/* - * initialize page compress memory map. - * - */ -void SetupPageCompressMemoryMap(File file, RelFileNode node, const RelFileNodeForkNum& relFileNodeForkNum) -{ - Vfd *vfdP = &u_sess->storage_cxt.VfdCache[file]; - auto chunk_size = CHUNK_SIZE_LIST[GET_COMPRESS_CHUNK_SIZE(node.opt)]; - int returnCode = FileAccess(file); - if (returnCode < 0) { - ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("Failed to open file %s: %m", vfdP->fileName))); - } - RelFileNodeForkNum newOne(relFileNodeForkNum); - newOne.forknumber = PCA_FORKNUM; - PageCompressHeader *map = GetPageCompressHeader(vfdP, chunk_size, newOne); - vfdP->with_pcmap = true; - vfdP->pcmap = map; -} - -/* - * Return the page compress memory map. - * - */ -PageCompressHeader *GetPageCompressMemoryMap(File file, uint32 chunk_size) -{ - int returnCode; - Vfd *vfdP = &u_sess->storage_cxt.VfdCache[file]; - PageCompressHeader *map = NULL; - - Assert(FileIsValid(file)); - - returnCode = FileAccess(file); - if (returnCode < 0) { - ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("Failed to open file %s: %m", vfdP->fileName))); - } - - Assert(vfdP->with_pcmap); - if (vfdP->pcmap == NULL) { - map = GetPageCompressHeader(vfdP, chunk_size, vfdP->fileNode); - vfdP->with_pcmap = true; - vfdP->pcmap = map; - } - - return vfdP->pcmap; -} \ No newline at end of file diff --git a/src/gausskernel/storage/ipc/ipc.cpp b/src/gausskernel/storage/ipc/ipc.cpp index 88e803806..115640deb 100644 --- a/src/gausskernel/storage/ipc/ipc.cpp +++ b/src/gausskernel/storage/ipc/ipc.cpp @@ -85,6 +85,7 @@ static const pg_on_exit_callback on_sess_exit_list[] = { ShutdownPostgres, PGXCNodeCleanAndRelease, PlDebugerCleanUp, + cleanGPCPlanProcExit, #ifdef ENABLE_MOT /* * 1. Must come after ShutdownPostgres(), in case there is abort/rollback callback. @@ -120,8 +121,6 @@ void WaitGraceThreadsExit(void) for (loop = 0; loop < WAITTIME; loop++) { if (0 != alive_threads_waitted) { pg_usleep(100); - } else { - break; } } } @@ -392,8 +391,6 @@ void proc_exit_prepare(int code) { sigset_t old_sigset; - closeAllVfds(); - PreventInterrupt(); /* @@ -433,6 +430,9 @@ void proc_exit_prepare(int code) } } + /* release all refcount and lock */ + CloseLocalSysDBCache(); + t_thrd.storage_cxt.on_proc_exit_index = 0; gs_signal_recover_mask(old_sigset); @@ -482,7 +482,9 @@ void sess_exit_prepare(int code) { sigset_t old_sigset; - closeAllVfds(); + if (!EnableLocalSysCache()) { + closeAllVfds(); + } PreventInterrupt(); HOLD_INTERRUPTS(); @@ -497,9 +499,14 @@ void sess_exit_prepare(int code) } } - for (; u_sess->on_sess_exit_index < on_sess_exit_size; u_sess->on_sess_exit_index++) + for (; u_sess->on_sess_exit_index < on_sess_exit_size; u_sess->on_sess_exit_index++) { + if (EnableLocalSysCache() && on_sess_exit_list[u_sess->on_sess_exit_index] == AtProcExit_Files) { + // we close this only on proc exit + continue; + } (*on_sess_exit_list[u_sess->on_sess_exit_index])(code, UInt32GetDatum(NULL)); - + } + t_thrd.storage_cxt.on_proc_exit_index = 0; RESUME_INTERRUPTS(); gs_signal_recover_mask(old_sigset); @@ -548,6 +555,16 @@ void on_proc_exit(pg_on_exit_callback function, Datum arg) { if (t_thrd.storage_cxt.on_proc_exit_index >= MAX_ON_EXITS) ereport(FATAL, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg_internal("out of on_proc_exit slots"))); + + /* reregister when cache miss on thread pool mode */ + if ((function == AtProcExit_Files || function == smgrshutdown) && EnableLocalSysCache()) { + for (int i = 0; i < t_thrd.storage_cxt.on_proc_exit_index; i++) { + if (t_thrd.storage_cxt.on_proc_exit_list[i].function == function) { + Assert(t_thrd.storage_cxt.on_proc_exit_list[i].arg == arg); + return; + } + } + } t_thrd.storage_cxt.on_proc_exit_list[t_thrd.storage_cxt.on_proc_exit_index].function = function; t_thrd.storage_cxt.on_proc_exit_list[t_thrd.storage_cxt.on_proc_exit_index].arg = arg; diff --git a/src/gausskernel/storage/ipc/ipci.cpp b/src/gausskernel/storage/ipc/ipci.cpp index 4ec164324..832468591 100644 --- a/src/gausskernel/storage/ipc/ipci.cpp +++ b/src/gausskernel/storage/ipc/ipci.cpp @@ -26,6 +26,7 @@ #include "access/subtrans.h" #include "access/twophase.h" #include "access/double_write.h" +#include "catalog/pg_uid_fn.h" #include "catalog/storage_gtt.h" #include "access/ustore/undo/knl_uundoapi.h" #include "access/ustore/knl_undoworker.h" @@ -44,6 +45,7 @@ #include "postmaster/bgwriter.h" #include "postmaster/bgworker.h" #include "postmaster/pagewriter.h" +#include "postmaster/pagerepair.h" #include "postmaster/postmaster.h" #include "postmaster/snapcapturer.h" #include "postmaster/rbcleaner.h" @@ -73,16 +75,21 @@ #include "storage/dfs/dfs_connector.h" #include "storage/xlog_share_storage/xlog_share_storage.h" #include "utils/memprot.h" +#include "pgaudit.h" #ifdef ENABLE_MULTIPLE_NODES #include "tsdb/cache/queryid_cachemgr.h" #endif /* ENABLE_MULTIPLE_NODES */ #include "replication/dcf_replication.h" +#include "commands/verify.h" /* we use semaphore not LWLOCK, because when thread InitGucConfig, it does not get a t_thrd.proc */ pthread_mutex_t gLocaleMutex = PTHREAD_MUTEX_INITIALIZER; extern void SetShmemCxt(void); +#ifdef ENABLE_MULTIPLE_NODES +extern void InitDisasterCache(); +#endif /* * RequestAddinShmemSpace @@ -142,6 +149,7 @@ Size ComputeTotalSizeOfShmem() size = add_size(size, PMSignalShmemSize()); size = add_size(size, ProcSignalShmemSize()); size = add_size(size, CheckpointerShmemSize()); + size = add_size(size, PageWriterShmemSize()); size = add_size(size, AutoVacuumShmemSize()); size = add_size(size, WalSndShmemSize()); size = add_size(size, WalRcvShmemSize()); @@ -393,12 +401,19 @@ void CreateSharedMemoryAndSemaphores(bool makePrivate, int port) * Set up thread shared fd cache */ InitDataFileIdCache(); + InitUidCache(); /* * Set up seg spc cache */ InitSegSpcCache(); +#ifdef ENABLE_MULTIPLE_NODES + if (IS_DISASTER_RECOVER_MODE) { + InitDisasterCache(); + } +#endif + /* * Set up CStoreSpaceAllocator */ @@ -413,10 +428,12 @@ void CreateSharedMemoryAndSemaphores(bool makePrivate, int port) } #endif /* ENABLE_MULTIPLE_NODES */ +#ifndef ENABLE_LITE_MODE /* * Set up DfsConnector cache */ dfs::InitOBSConnectorCacheLock(); +#endif /* set up Dummy server cache */ InitDummyServrCache(); @@ -426,7 +443,11 @@ void CreateSharedMemoryAndSemaphores(bool makePrivate, int port) */ DfsInsert::InitDfsSpaceCache(); - LsnXlogFlushChkShmInit(); + LsnXlogFlushChkShmInit(); + + PageRepairHashTblInit(); + FileRepairHashTblInit(); + initRepairBadBlockStat(); if (g_instance.ckpt_cxt_ctl->prune_queue_lock == NULL) { g_instance.ckpt_cxt_ctl->prune_queue_lock = LWLockAssign(LWTRANCHE_PRUNE_DIRTY_QUEUE); @@ -438,6 +459,16 @@ void CreateSharedMemoryAndSemaphores(bool makePrivate, int port) (void)MemoryContextSwitchTo(oldcontext); } + if (g_instance.pid_cxt.PgAuditPID == NULL) { + MemoryContext oldcontext = MemoryContextSwitchTo(g_instance.audit_cxt.global_audit_context); + int thread_num = g_instance.attr.attr_security.audit_thread_num; + g_instance.pid_cxt.PgAuditPID = (ThreadId*)palloc0(sizeof(ThreadId) * thread_num); + (void)MemoryContextSwitchTo(oldcontext); + } + + if (ENABLE_INCRE_CKPT) { + PageWriterSyncShmemInit(); + } if (ENABLE_INCRE_CKPT && g_instance.ckpt_cxt_ctl->pgwr_procs.writer_proc == NULL) { incre_ckpt_pagewriter_cxt_init(); } diff --git a/src/gausskernel/storage/ipc/procarray.cpp b/src/gausskernel/storage/ipc/procarray.cpp index ec8f40bd5..0dc6f2557 100755 --- a/src/gausskernel/storage/ipc/procarray.cpp +++ b/src/gausskernel/storage/ipc/procarray.cpp @@ -105,6 +105,7 @@ #include "access/gtm.h" #include "storage/ipc.h" #include "pgxc/nodemgr.h" + #include"replication/walreceiver.h" /* PGXC_DATANODE */ #include "postmaster/autovacuum.h" #include "postmaster/postmaster.h" @@ -189,15 +190,10 @@ static void ResetProcXidCache(PGPROC* proc, bool needlock); /* for local multi version snapshot */ void CalculateLocalLatestSnapshot(bool forceCalc); static TransactionId GetMultiSnapshotOldestXmin(); -#ifdef ENABLE_MULTIPLE_NODES - static TransactionId FixSnapshotXminByLocal(TransactionId xid); -#endif static inline void ProcArrayEndTransactionInternal(PGPROC* proc, PGXACT* pgxact, TransactionId latestXid, - TransactionId* xid, uint32* nsubxids, bool isCommit); + TransactionId* xid, uint32* nsubxids); static void ProcArrayGroupClearXid(PGPROC* proc, TransactionId latestXid); -static void UpdateCSNAtTransactionCommit(CommitSeqNo maxCommitCSN); - extern bool StreamTopConsumerAmI(); @@ -449,9 +445,6 @@ void ProcArrayRemove(PGPROC* proc, TransactionId latestXid) arrayP->pgprocnos[arrayP->numProcs - 1] = -1; /* for debugging */ arrayP->numProcs--; - /* Update csn in shared memory after transaction commit. */ - UpdateCSNAtTransactionCommit(0); - /* Calc new sanpshot. */ if (TransactionIdIsValid(latestXid)) CalculateLocalLatestSnapshot(false); @@ -532,7 +525,7 @@ void ProcArrayEndTransaction(PGPROC* proc, TransactionId latestXid, bool isCommi TransactionId xid; uint32 nsubxids; - ProcArrayEndTransactionInternal(proc, pgxact, latestXid, &xid, &nsubxids, isCommit); + ProcArrayEndTransactionInternal(proc, pgxact, latestXid, &xid, &nsubxids); CalculateLocalLatestSnapshot(false); LWLockRelease(ProcArrayLock); } else @@ -550,6 +543,7 @@ void ProcArrayEndTransaction(PGPROC* proc, TransactionId latestXid, bool isCommi pgxact->next_xid = InvalidTransactionId; pgxact->xmin = InvalidTransactionId; pgxact->csn_min = InvalidCommitSeqNo; + pgxact->csn_dr = InvalidCommitSeqNo; /* must be cleared with xid/xmin: */ pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK; ProcArrayClearAutovacuum(pgxact); @@ -574,7 +568,7 @@ void ProcArrayEndTransaction(PGPROC* proc, TransactionId latestXid, bool isCommi * We don't do any locking here; caller must handle that. */ static inline void ProcArrayEndTransactionInternal(PGPROC* proc, PGXACT* pgxact, TransactionId latestXid, - TransactionId* xid, uint32* nsubxids, bool isCommit) + TransactionId* xid, uint32* nsubxids) { /* Store xid and nsubxids to update csnlog */ *xid = pgxact->xid; @@ -591,6 +585,7 @@ static inline void ProcArrayEndTransactionInternal(PGPROC* proc, PGXACT* pgxact, proc->lxid = InvalidLocalTransactionId; pgxact->xmin = InvalidTransactionId; pgxact->csn_min = InvalidCommitSeqNo; + pgxact->csn_dr = InvalidCommitSeqNo; /* must be cleared with xid/xmin: */ pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK; ProcArrayClearAutovacuum(pgxact); @@ -604,9 +599,6 @@ static inline void ProcArrayEndTransactionInternal(PGPROC* proc, PGXACT* pgxact, if (TransactionIdPrecedes(t_thrd.xact_cxt.ShmemVariableCache->latestCompletedXid, latestXid)) t_thrd.xact_cxt.ShmemVariableCache->latestCompletedXid = latestXid; - if (TransactionIdIsNormal(latestXid) && isCommit) - UpdateCSNAtTransactionCommit(0); - /* Clear commit csn after csn update */ proc->commitCSN = 0; pgxact->needToSyncXid = 0; @@ -702,15 +694,12 @@ static void ProcArrayGroupClearXid(PGPROC* proc, TransactionId latestXid) if (proc_member->commitCSN > maxcsn) maxcsn = proc_member->commitCSN; ProcArrayEndTransactionInternal( - proc_member, pgxact, proc_member->procArrayGroupMemberXid, &xid[index], &nsubxids[index], false); + proc_member, pgxact, proc_member->procArrayGroupMemberXid, &xid[index], &nsubxids[index]); /* Move to next proc in list. */ nextidx = pg_atomic_read_u32(&proc_member->procArrayGroupNext); index++; } - /* Update CSN only once after loop. */ - UpdateCSNAtTransactionCommit(maxcsn); - /* already hold lock, caculate snapshot after last invocation */ CalculateLocalLatestSnapshot(false); @@ -767,6 +756,7 @@ void ProcArrayClearTransaction(PGPROC* proc) proc->lxid = InvalidLocalTransactionId; pgxact->xmin = InvalidTransactionId; pgxact->csn_min = InvalidCommitSeqNo; + pgxact->csn_dr = InvalidCommitSeqNo; proc->recoveryConflictPending = false; /* redundant, but just in case */ @@ -782,32 +772,6 @@ void ProcArrayClearTransaction(PGPROC* proc) ResetProcXidCache(proc, true); } -/* Update csn in shared memory. - * - * Input param maxCommitCSN is used at group commit with gtm, - * it's the max commit csn of group commit transactions, - * else 0. - */ -static void UpdateCSNAtTransactionCommit(CommitSeqNo maxCommitCSN) -{ - CommitSeqNo result = 0; - - /* under following condition, nextCSN has already been updated, just return */ - if (useLocalXid || !IsPostmasterEnvironment || GTM_FREE_MODE || GTM_LITE_MODE) { - return; - } - /* Get CSN and update nextCommitSeqNo to csn+1 */ - if (maxCommitCSN) - result = maxCommitCSN; - else - result = GetCommitCsn(); - - if (t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo < result + 1) - t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo = result + 1; - - return; -} - void UpdateCSNLogAtTransactionEND( TransactionId xid, int nsubxids, TransactionId* subXids, CommitSeqNo csn, bool isCommit) { @@ -1309,7 +1273,7 @@ bool TransactionIdIsInProgress(TransactionId xid, uint32* needSync, bool shortcu * local must sync with gtm. */ if (shortcutByRecentXmin && - TransactionIdPrecedes(xid, pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo))) { + TransactionIdPrecedes(xid, pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo))) { xc_by_recent_xmin_inc(); /* @@ -1567,16 +1531,18 @@ TransactionId GetOldestXmin(Relation rel, bool bFixRecentGlobalXmin, bool bRecen (!TransactionIdIsValid(result) || TransactionIdPrecedes(currGlobalXmin, result))) result = currGlobalXmin; } - } else + } else { /* directly fetch recentGlobalXmin from ShmemVariableCache */ result = pg_atomic_read_u64(&t_thrd.xact_cxt.ShmemVariableCache->recentGlobalXmin); + } /* Update by vacuum_defer_cleanup_age */ - if (TransactionIdPrecedes(result, (uint64)u_sess->attr.attr_storage.vacuum_defer_cleanup_age)) + if (TransactionIdPrecedes(result, (uint64)u_sess->attr.attr_storage.vacuum_defer_cleanup_age)) { result = FirstNormalTransactionId; - else + } else { result -= u_sess->attr.attr_storage.vacuum_defer_cleanup_age; - + } + /* Check whether there's a replication slot requiring an older xmin. */ if (TransactionIdIsNormal(replication_slot_xmin) && TransactionIdPrecedes(replication_slot_xmin, result)) result = replication_slot_xmin; @@ -1617,27 +1583,26 @@ TransactionId GetGlobalOldestXmin() result = GetMultiSnapshotOldestXmin(); /* Update by vacuum_defer_cleanup_age */ - if (TransactionIdPrecedes(result, (uint64)u_sess->attr.attr_storage.vacuum_defer_cleanup_age)) + if (TransactionIdPrecedes(result, (uint64)u_sess->attr.attr_storage.vacuum_defer_cleanup_age)) { result = FirstNormalTransactionId; - else + } else { result -= u_sess->attr.attr_storage.vacuum_defer_cleanup_age; - + } return result; } -TransactionId GetOldestXminForUndo(void) +TransactionId GetOldestXminForUndo(TransactionId *recycleXmin) { TransactionId oldestXmin = GetMultiSnapshotOldestXmin(); - TransactionId oldestXidInUndo = pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo); + *recycleXmin = oldestXmin; + TransactionId oldestXidInUndo = pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo); + TransactionId xmin = FirstNormalTransactionId; if (ENABLE_TCAP_VERSION) { - if (TransactionIdPrecedes(oldestXmin, (uint64)u_sess->attr.attr_storage.version_retention_age)) { - oldestXmin = FirstNormalTransactionId; - } else { - oldestXmin -= u_sess->attr.attr_storage.version_retention_age; - } + xmin = g_instance.flashback_cxt.oldestXminInFlashback; + *recycleXmin = (oldestXmin < xmin) ? oldestXmin : xmin; } - if (unlikely(TransactionIdPrecedes(oldestXmin, oldestXidInUndo))) { - oldestXmin = oldestXidInUndo; + if (unlikely(TransactionIdPrecedes(*recycleXmin, oldestXidInUndo))) { + *recycleXmin = oldestXidInUndo; } return oldestXmin; } @@ -1718,8 +1683,7 @@ Snapshot GetSnapshotData(Snapshot snapshot, bool force_local_snapshot) t_thrd.xact_cxt.useLocalSnapshot = false; - if (GTM_MODE || - (GTM_LITE_MODE && ((is_exec_cn && !force_local_snapshot) || /* GTM_LITE exec cn */ + if (IS_DISASTER_RECOVER_MODE || (GTM_LITE_MODE && ((is_exec_cn && !force_local_snapshot) || /* GTM_LITE exec cn */ (!is_exec_cn && u_sess->utils_cxt.snapshot_source == SNAPSHOT_COORDINATOR)))) { /* GTM_LITE other node */ /* * Obtain a global snapshot for a openGauss session @@ -1732,8 +1696,6 @@ Snapshot GetSnapshotData(Snapshot snapshot, bool force_local_snapshot) } } } - /* For gtm mode, use local snapshot */ - t_thrd.xact_cxt.useLocalSnapshot = GTM_MODE ? true : false; /* first we try to get multiversion snapshot */ if (t_thrd.postmaster_cxt.HaShmData->current_mode == PRIMARY_MODE || @@ -1915,27 +1877,32 @@ RETRY_GET: * different way of computing it than GetOldestXmin uses, but should give * the same result. */ - if (TransactionIdPrecedes(xmin, globalxmin)) + if (TransactionIdPrecedes(xmin, globalxmin)) { globalxmin = xmin; + } /* When initdb we set vacuum_defer_cleanup_age to zero, so we can vacuum freeze three default database to avoid that localxid larger than GTM next_xid. */ - if (isSingleMode) + if (isSingleMode) { u_sess->attr.attr_storage.vacuum_defer_cleanup_age = 0; + } /* Update global variables too */ - if (TransactionIdPrecedes(globalxmin, (uint64)u_sess->attr.attr_storage.vacuum_defer_cleanup_age)) + if (TransactionIdPrecedes(globalxmin, (uint64)u_sess->attr.attr_storage.vacuum_defer_cleanup_age)) { u_sess->utils_cxt.RecentGlobalXmin = FirstNormalTransactionId; - else + } else { u_sess->utils_cxt.RecentGlobalXmin = globalxmin - u_sess->attr.attr_storage.vacuum_defer_cleanup_age; + } - if (!TransactionIdIsNormal(u_sess->utils_cxt.RecentGlobalXmin)) + if (!TransactionIdIsNormal(u_sess->utils_cxt.RecentGlobalXmin)) { u_sess->utils_cxt.RecentGlobalXmin = FirstNormalTransactionId; + } /* Check whether there's a replication slot requiring an older xmin. */ if (TransactionIdIsValid(replication_slot_xmin) && - TransactionIdPrecedes(replication_slot_xmin, u_sess->utils_cxt.RecentGlobalXmin)) + TransactionIdPrecedes(replication_slot_xmin, u_sess->utils_cxt.RecentGlobalXmin)) { u_sess->utils_cxt.RecentGlobalXmin = replication_slot_xmin; + } /* Non-catalog tables can be vacuumed if older than this xid */ u_sess->utils_cxt.RecentGlobalDataXmin = u_sess->utils_cxt.RecentGlobalXmin; @@ -1944,8 +1911,9 @@ RETRY_GET: * xmin. */ if (TransactionIdIsNormal(replication_slot_catalog_xmin) && - NormalTransactionIdPrecedes(replication_slot_catalog_xmin, u_sess->utils_cxt.RecentGlobalXmin)) + NormalTransactionIdPrecedes(replication_slot_catalog_xmin, u_sess->utils_cxt.RecentGlobalXmin)) { u_sess->utils_cxt.RecentGlobalXmin = replication_slot_catalog_xmin; + } u_sess->utils_cxt.RecentXmin = xmin; #ifndef ENABLE_MULTIPLE_NODES @@ -2650,6 +2618,7 @@ VirtualTransactionId* GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbO continue; if (!OidIsValid(dbOid) || proc->databaseId == dbOid) { +#ifndef ENABLE_MULTIPLE_NODES /* Fetch xmin just once - can't change on us, but good coding */ TransactionId pxmin = pgxact->xmin; @@ -2667,6 +2636,24 @@ VirtualTransactionId* GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbO if (VirtualTransactionIdIsValid(vxid)) t_thrd.storage_cxt.proc_vxids[count++] = vxid; } +#else + CommitSeqNo xact_csn = pgxact->csn_dr; + CommitSeqNo limitXminCSN = InvalidCommitSeqNo; + + if (TransactionIdIsValid(limitXmin)) { + limitXminCSN = CSNLogGetDRCommitSeqNo(limitXmin); + } + + if (!TransactionIdIsValid(limitXmin) || (limitXminCSN >= xact_csn && xact_csn != InvalidCommitSeqNo)) { + VirtualTransactionId vxid; + GET_VXID_FROM_PGPROC(vxid, *proc); + + if (VirtualTransactionIdIsValid(vxid)) { + t_thrd.storage_cxt.proc_vxids[count++] = vxid; + } + } + +#endif } } #ifndef ENABLE_MULTIPLE_NODES @@ -3157,7 +3144,7 @@ int CountUserBackends(Oid roleid) if (proc->pid == 0) continue; /* do not count prepared xacts */ - if (proc->roleId == roleid) + if (proc->roleId == roleid && (t_thrd.role != STREAM_WORKER)) count++; } @@ -3301,15 +3288,24 @@ void ReloadConnInfoOnBackends(void) if (proc->pid == 0) continue; /* useless on prepared xacts */ - - if (!OidIsValid(proc->databaseId)) - continue; /* ignore backends not connected to a database */ - + if (pgxact->vacuumFlags & PROC_IN_VACUUM) continue; /* ignore vacuum processes */ - - if (ENABLE_THREAD_POOL && proc->sessionid > 0) - continue; + + if (EnableGlobalSysCache()) { + /* syscache is on thread in gsc mode. when enable thread pool, + * even the thread does not connect to a database, we still need send signal to it */ + if (!OidIsValid(proc->databaseId) && !ENABLE_THREAD_POOL) { + continue; + } + } else { + if (!OidIsValid(proc->databaseId)) { + continue; + } + if (ENABLE_THREAD_POOL && proc->sessionid > 0) { + continue; + } + } pid = proc->pid; /* @@ -3591,7 +3587,7 @@ static bool GetPGXCSnapshotData(Snapshot snapshot) * If this node is in recovery phase, * snapshot has to be taken directly from WAL information. */ - if (RecoveryInProgress()) + if (!IS_DISASTER_RECOVER_MODE && RecoveryInProgress()) return false; /* @@ -3660,26 +3656,10 @@ static bool GetSnapshotDataDataNode(Snapshot snapshot) if (IsAutoVacuumWorkerProcess() || GetForceXidFromGTM()) { GTM_Snapshot gtm_snapshot; - bool canbe_grouped = (!u_sess->utils_cxt.FirstSnapshotSet) || (!IsolationUsesXactSnapshot()); - ereport(DEBUG1, (errmsg("Getting snapshot for autovacuum. Current XID = " XID_FMT, GetCurrentTransactionIdIfAny()))); - if (GTM_MODE) { - if (TransactionIdIsValid(GetCurrentTransactionIdIfAny())) { - gtm_snapshot = GetSnapshotGTM(GetCurrentTransactionKeyIfAny(), - GetCurrentTransactionIdIfAny(), - canbe_grouped, - t_thrd.pgxact->vacuumFlags & PROC_IN_VACUUM); - } else { /* no valid xid */ - gtm_snapshot = GetSnapshotGTM(GetCurrentTransactionKey(), - InvalidTransactionId, - canbe_grouped, - t_thrd.pgxact->vacuumFlags & PROC_IN_VACUUM); - } - } else { - gtm_snapshot = GetSnapshotGTMLite(); - } + gtm_snapshot = IS_DISASTER_RECOVER_MODE ? GetSnapshotGTMDR() : GetSnapshotGTMLite(); if (!gtm_snapshot) { if (g_instance.status > NoShutdown) { @@ -3690,40 +3670,6 @@ static bool GetSnapshotDataDataNode(Snapshot snapshot) } else { ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("GTM error, could not obtain snapshot"))); } - } else if (GTM_MODE) { - *u_sess->utils_cxt.g_GTM_Snapshot = *gtm_snapshot; - - u_sess->utils_cxt.snapshot_source = SNAPSHOT_DIRECT; - u_sess->utils_cxt.gxmin = gtm_snapshot->sn_xmin; - u_sess->utils_cxt.gxmax = gtm_snapshot->sn_xmax; - u_sess->utils_cxt.g_snapshotcsn = gtm_snapshot->csn; - u_sess->utils_cxt.GtmTimeline = GetCurrentTransactionTimeline(); - u_sess->utils_cxt.RecentGlobalXmin = gtm_snapshot->sn_recent_global_xmin; - - /* - * Fix RecentGlobalXmin using GetOldestXmin, considering local xmins. - * As we might prune or vacuum dead tuples deleted by xids older than RecentGlobalXmin. - * We should keep RecentGlobalXmin is the minnimum xmin. - * If RecentGlobalXmin is larger than local xmins, tuples being accessed might be cleaned wrongly. - */ - u_sess->utils_cxt.RecentGlobalXmin = GetOldestXmin(NULL, true); - - if (!TransactionIdIsNormal(u_sess->utils_cxt.RecentGlobalXmin)) - u_sess->utils_cxt.RecentGlobalXmin = FirstNormalTransactionId; - - if (module_logging_is_on(MOD_TRANS_SNAPSHOT)) { - ereport(LOG, (errmodule(MOD_TRANS_SNAPSHOT), errmsg("for autovacuum from GTM: xmin = " XID_FMT - ", xmax = " XID_FMT ", csn = %lu, RecGlobXmin = " XID_FMT, - u_sess->utils_cxt.gxmin, u_sess->utils_cxt.gxmax, u_sess->utils_cxt.g_snapshotcsn, - u_sess->utils_cxt.RecentGlobalXmin))); - } - - elog(DEBUG1, - "for autovacuum from GTM: xmin = " XID_FMT ", xmax = " XID_FMT ", csn = %lu, RecGlobXmin = " XID_FMT, - u_sess->utils_cxt.gxmin, - u_sess->utils_cxt.gxmax, - u_sess->utils_cxt.g_snapshotcsn, - u_sess->utils_cxt.RecentGlobalXmin); } else { *u_sess->utils_cxt.g_GTM_Snapshot = *gtm_snapshot; u_sess->utils_cxt.snapshot_source = SNAPSHOT_DIRECT; @@ -3741,63 +3687,38 @@ static bool GetSnapshotDataDataNode(Snapshot snapshot) } } - if (GTM_MODE && - (u_sess->utils_cxt.snapshot_source == SNAPSHOT_COORDINATOR || - u_sess->utils_cxt.snapshot_source == SNAPSHOT_DIRECT) && - (TransactionIdIsValid(u_sess->utils_cxt.gxmin))) { - TransactionId xmin = FixSnapshotXminByLocal(u_sess->utils_cxt.gxmin); - snapshot->xmin = xmin; - snapshot->xmax = u_sess->utils_cxt.gxmax; - snapshot->snapshotcsn = u_sess->utils_cxt.g_snapshotcsn; - snapshot->timeline = u_sess->utils_cxt.GtmTimeline; - snapshot->curcid = GetCurrentCommandId(false); - - if (!TransactionIdIsValid(t_thrd.pgxact->xmin)) { - t_thrd.pgxact->xmin = u_sess->utils_cxt.TransactionXmin = xmin; - t_thrd.pgxact->handle = GetCurrentTransactionHandleIfAny(); - } else { - /* - * if we get snapshot from CN, we have fixed mypgxact, now we set - * TransactionXmin and gxmin - */ - u_sess->utils_cxt.TransactionXmin = t_thrd.pgxact->xmin; - t_thrd.pgxact->handle = GetCurrentTransactionHandleIfAny(); - } - - /* - * We should update RecentXmin here. But we have recently seen some - * issues with that - so skipping it for the time being. - * - * !!description - */ - u_sess->utils_cxt.RecentXmin = xmin; - - /* - * This is a new snapshot, so set both refcounts are zero, and mark it - * as not copied in persistent memory. - */ - snapshot->active_count = 0; - snapshot->regd_count = 0; - snapshot->copied = false; - snapshot->user_data = NULL; - - CheckSnapshotIsValidException(snapshot, "GetSnapshotDataDataNode"); - - return true; - } else if (GTM_LITE_MODE && u_sess->utils_cxt.snapshot_source == SNAPSHOT_COORDINATOR) { + if (GTM_LITE_MODE && u_sess->utils_cxt.snapshot_source == SNAPSHOT_COORDINATOR) { TransactionId save_recentglobalxmin = u_sess->utils_cxt.RecentGlobalXmin; snapshot->gtm_snapshot_type = u_sess->utils_cxt.is_autovacuum_snapshot ? GTM_SNAPSHOT_TYPE_AUTOVACUUM : GTM_SNAPSHOT_TYPE_GLOBAL; - /* only use gtm csn */ - Snapshot ret; - ret = GetLocalSnapshotData(snapshot); - Assert(ret != NULL); - snapshot->snapshotcsn = u_sess->utils_cxt.g_snapshotcsn; - (void)set_proc_csn_and_check("GetSnapshotDataDataNodeFromCN", snapshot->snapshotcsn, - snapshot->gtm_snapshot_type, SNAPSHOT_COORDINATOR); - /* reset RecentGlobalXmin */ - u_sess->utils_cxt.RecentGlobalXmin = save_recentglobalxmin; - /* too late to check and set */ + if (IS_DISASTER_RECOVER_MODE) { + snapshot->snapshotcsn = u_sess->utils_cxt.g_snapshotcsn; + t_thrd.pgxact->csn_dr = snapshot->snapshotcsn; + pg_memory_barrier(); + CommitSeqNo lastReplayedConflictCSN = (CommitSeqNo)pg_atomic_read_u64( + &(g_instance.comm_cxt.predo_cxt.last_replayed_conflict_csn)); + if (lastReplayedConflictCSN != 0 && snapshot->snapshotcsn - 1 <= lastReplayedConflictCSN) { + ereport(ERROR, (errmsg("gtm csn small: gtm csn %lu, lastReplayedConflictCSN %lu", + snapshot->snapshotcsn, lastReplayedConflictCSN))); + } + LWLockAcquire(XLogMaxCSNLock, LW_SHARED); + if (t_thrd.xact_cxt.ShmemVariableCache->xlogMaxCSN + 1 < snapshot->snapshotcsn) { + ereport(ERROR, (errmsg("dn data invisible: local csn %lu, gtm snapshotcsn %lu", + t_thrd.xact_cxt.ShmemVariableCache->xlogMaxCSN, snapshot->snapshotcsn))); + } + LWLockRelease(XLogMaxCSNLock); + } else { + /* only use gtm csn */ + Snapshot ret; + ret = GetLocalSnapshotData(snapshot); + Assert(ret != NULL); + snapshot->snapshotcsn = u_sess->utils_cxt.g_snapshotcsn; + (void)set_proc_csn_and_check("GetSnapshotDataDataNodeFromCN", snapshot->snapshotcsn, + snapshot->gtm_snapshot_type, SNAPSHOT_COORDINATOR); + /* reset RecentGlobalXmin */ + u_sess->utils_cxt.RecentGlobalXmin = save_recentglobalxmin; + /* too late to check and set */ + } return true; } @@ -3812,13 +3733,10 @@ static bool GetSnapshotDataDataNode(Snapshot snapshot) */ static bool GetSnapshotDataCoordinator(Snapshot snapshot) { - bool canbe_grouped = false; GTM_Snapshot gtm_snapshot; Assert(IS_PGXC_COORDINATOR || IsPGXCNodeXactDatanodeDirect()); - canbe_grouped = (!u_sess->utils_cxt.FirstSnapshotSet) || (!IsolationUsesXactSnapshot()); - /* Log some information about snapshot obtention */ if (IsAutoVacuumWorkerProcess()) { ereport(DEBUG1, @@ -3827,17 +3745,7 @@ static bool GetSnapshotDataCoordinator(Snapshot snapshot) ereport(DEBUG1, (errmsg("Getting snapshot. Current XID = " XID_FMT, GetCurrentTransactionIdIfAny()))); } - if (GTM_MODE) { - if (TransactionIdIsValid(GetCurrentTransactionIdIfAny())) { - gtm_snapshot = GetSnapshotGTM(GetCurrentTransactionKeyIfAny(), GetCurrentTransactionIdIfAny(), - canbe_grouped, t_thrd.pgxact->vacuumFlags & PROC_IN_VACUUM); - } else { /* no valid xid */ - gtm_snapshot = GetSnapshotGTM(GetCurrentTransactionKey(), - InvalidTransactionId, canbe_grouped, t_thrd.pgxact->vacuumFlags & PROC_IN_VACUUM); - } - } else { - gtm_snapshot = GetSnapshotGTMLite(); - } + gtm_snapshot = IS_DISASTER_RECOVER_MODE ? GetSnapshotGTMDR() : GetSnapshotGTMLite(); if (!gtm_snapshot) { if (g_instance.status > NoShutdown) { @@ -3848,81 +3756,29 @@ static bool GetSnapshotDataCoordinator(Snapshot snapshot) (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("GTM error, could not obtain snapshot XID = " XID_FMT, GetCurrentTransactionIdIfAny()))); } - } else if (GTM_MODE) { - *u_sess->utils_cxt.g_GTM_Snapshot = *gtm_snapshot; - - u_sess->utils_cxt.RecentGlobalXmin = gtm_snapshot->sn_recent_global_xmin; - - /* - * Fix RecentGlobalXmin using GetOldestXmin, considering local xmins. - * As we might prune or vacuum dead tuples deleted by xids older than RecentGlobalXmin. - * We should keep RecentGlobalXmin is the minnimum xmin. - * If RecentGlobalXmin is larger than local xmins, tuples being accessed might be cleaned wrongly. - */ - u_sess->utils_cxt.RecentGlobalXmin = GetOldestXmin(NULL, true); - - if (!TransactionIdIsNormal(u_sess->utils_cxt.RecentGlobalXmin)) - u_sess->utils_cxt.RecentGlobalXmin = FirstNormalTransactionId; - - snapshot->xmin = FixSnapshotXminByLocal(gtm_snapshot->sn_xmin); - snapshot->xmax = gtm_snapshot->sn_xmax; - snapshot->snapshotcsn = gtm_snapshot->csn; - - snapshot->timeline = GetCurrentTransactionTimeline(); - - ereport(DEBUG1, - (errmsg("from GTM: xmin = " XID_FMT " , xmax = " XID_FMT ", RecGlobalXmin = " XID_FMT, - gtm_snapshot->sn_xmin, - gtm_snapshot->sn_xmax, - gtm_snapshot->sn_recent_global_xmin))); - - ereport(DEBUG1, - (errmsg("on CN: xmin = " XID_FMT ", xmax = " XID_FMT ", RecGlobalXmin = " XID_FMT, - snapshot->xmin, - snapshot->xmax, - u_sess->utils_cxt.RecentGlobalXmin))); - - snapshot->curcid = GetCurrentCommandId(false); - - if (!TransactionIdIsValid(t_thrd.pgxact->xmin)) { - t_thrd.pgxact->xmin = u_sess->utils_cxt.TransactionXmin = snapshot->xmin; - t_thrd.pgxact->handle = GetCurrentTransactionHandleIfAny(); - } - - /* We should update RecentXmin here. */ - u_sess->utils_cxt.RecentXmin = snapshot->xmin; - - /* - * This is a new snapshot, so set both refcounts are zero, and mark it - * as not copied in persistent memory. - */ - snapshot->active_count = 0; - snapshot->regd_count = 0; - snapshot->copied = false; - snapshot->user_data = NULL; - - if (module_logging_is_on(MOD_TRANS_SNAPSHOT)) - ereport(LOG, - (errmodule(MOD_TRANS_SNAPSHOT), - errmsg("CN gets Snapshot from: gtm_snapshot->sn_xmin = %lu, gtm_snapshot->sn_recent_global_xmin = " - "%lu.", - gtm_snapshot->sn_xmin, - gtm_snapshot->sn_recent_global_xmin))); - - return true; } else { snapshot->gtm_snapshot_type = GTM_SNAPSHOT_TYPE_GLOBAL; *u_sess->utils_cxt.g_GTM_Snapshot = *gtm_snapshot; - /* only use gtm csn */ - Snapshot ret; - ret = GetLocalSnapshotData(snapshot); - Assert(ret != NULL); + if (IS_DISASTER_RECOVER_MODE) { + snapshot->snapshotcsn = gtm_snapshot->csn; + t_thrd.pgxact->csn_dr = snapshot->snapshotcsn; + LWLockAcquire(XLogMaxCSNLock, LW_SHARED); + if (t_thrd.xact_cxt.ShmemVariableCache->xlogMaxCSN + 1 < snapshot->snapshotcsn) { + ereport(ERROR, (errmsg("cn data invisible: local csn %lu, gtm snapshotcsn %lu", t_thrd.xact_cxt.ShmemVariableCache->xlogMaxCSN, snapshot->snapshotcsn))); + } + LWLockRelease(XLogMaxCSNLock); + } else { + /* only use gtm csn */ + Snapshot ret; + ret = GetLocalSnapshotData(snapshot); + Assert(ret != NULL); - snapshot->snapshotcsn = set_proc_csn_and_check("GetSnapshotDataCoordinator", gtm_snapshot->csn, - snapshot->gtm_snapshot_type, SNAPSHOT_DIRECT); + snapshot->snapshotcsn = set_proc_csn_and_check("GetSnapshotDataCoordinator", gtm_snapshot->csn, + snapshot->gtm_snapshot_type, SNAPSHOT_DIRECT); - u_sess->utils_cxt.g_GTM_Snapshot->csn = snapshot->snapshotcsn; - u_sess->utils_cxt.RecentGlobalXmin = GetOldestXmin(NULL, true); + u_sess->utils_cxt.g_GTM_Snapshot->csn = snapshot->snapshotcsn; + u_sess->utils_cxt.RecentGlobalXmin = GetOldestXmin(NULL, true); + } if (module_logging_is_on(MOD_TRANS_SNAPSHOT)) { ereport(LOG, (errmodule(MOD_TRANS_SNAPSHOT), errmsg("CN gets snapshot from gtm_snapshot, csn = %lu.", snapshot->snapshotcsn))); @@ -4062,6 +3918,8 @@ void SyncWaitXidEnd(TransactionId xid, Buffer buffer) */ void SyncLocalXidWait(TransactionId xid) { + ReleaseAllGSCRdConcurrentLock(); + int64 remainingNapTime = (int64)u_sess->attr.attr_common.transaction_sync_naptime * 1000000; /* us */ int64 remainingTimeout = (int64)u_sess->attr.attr_common.transaction_sync_timeout * 1000000; /* us */ const int64 sleepTime = 1000; @@ -4282,6 +4140,7 @@ void CreateSharedRingBuffer(void) static void IncrRefCount(snapxid_t* s) { t_thrd.proc->snap_refcnt_bitmap |= 1 << (SNAPXID_INDEX(s) % 64); + pg_write_barrier(); } /* @@ -4290,6 +4149,7 @@ static void IncrRefCount(snapxid_t* s) static void DecrRefCount(snapxid_t* s) { t_thrd.proc->snap_refcnt_bitmap &= ~(1 << (SNAPXID_INDEX(s) % 64)); + pg_write_barrier(); } /* @@ -4457,17 +4317,20 @@ Snapshot GetLocalSnapshotData(Snapshot snapshot) t_thrd.pgxact->handle = GetCurrentTransactionHandleIfAny(); } - if (TransactionIdPrecedes(snapxid->localxmin, (uint64)u_sess->attr.attr_storage.vacuum_defer_cleanup_age)) + if (TransactionIdPrecedes(snapxid->localxmin, (uint64)u_sess->attr.attr_storage.vacuum_defer_cleanup_age)) { u_sess->utils_cxt.RecentGlobalXmin = FirstNormalTransactionId; - else + } else { u_sess->utils_cxt.RecentGlobalXmin = snapxid->localxmin - u_sess->attr.attr_storage.vacuum_defer_cleanup_age; + } - if (!TransactionIdIsNormal(u_sess->utils_cxt.RecentGlobalXmin)) + if (!TransactionIdIsNormal(u_sess->utils_cxt.RecentGlobalXmin)) { u_sess->utils_cxt.RecentGlobalXmin = FirstNormalTransactionId; + } if (TransactionIdIsNormal(replication_slot_xmin) && - TransactionIdPrecedes(replication_slot_xmin, u_sess->utils_cxt.RecentGlobalXmin)) + TransactionIdPrecedes(replication_slot_xmin, u_sess->utils_cxt.RecentGlobalXmin)) { u_sess->utils_cxt.RecentGlobalXmin = replication_slot_xmin; + } u_sess->utils_cxt.RecentXmin = snapxid->xmin; snapshot->xmin = snapxid->xmin; @@ -4530,6 +4393,11 @@ static void init_shmem_csn_cleanup_instr(void) LWLockRelease(CsnMinLock); } +static bool ForceCalculateSnapshotXmin(bool forceCalc) +{ + return (!u_sess->attr.attr_storage.enable_defer_calculate_snapshot || forceCalc); +} + void CalculateLocalLatestSnapshot(bool forceCalc) { /* @@ -4561,12 +4429,14 @@ void CalculateLocalLatestSnapshot(bool forceCalc) /* * We calculate xmin under the fllowing conditions: - * 1. we didn't calculate snapshot for GTM_MAX_PENDING_SNAPSHOT_CNT times - * 2. we didn't calculate snapshot for GTM_CALC_SNAPSHOT_TIMEOUT seconds + * 1. we always calculate snapshot if disable defer_calculate_snpshot. + * 2. we didn't calculate snapshot for GTM_MAX_PENDING_SNAPSHOT_CNT times. + * 3. we didn't calculate snapshot for GTM_CALC_SNAPSHOT_TIMEOUT seconds. */ currentTimeStamp = GetCurrentTimestamp(); - if (forceCalc || ((++snapshotPendingCnt == MAX_PENDING_SNAPSHOT_CNT) || - (TimestampDifferenceExceeds(snapshotTimeStamp, currentTimeStamp, CALC_SNAPSHOT_TIMEOUT)))) { + if (ForceCalculateSnapshotXmin(forceCalc) || ((++snapshotPendingCnt == MAX_PENDING_SNAPSHOT_CNT) || + (TimestampDifferenceExceeds(snapshotTimeStamp, currentTimeStamp, CALC_SNAPSHOT_TIMEOUT)))) { + pg_read_barrier(); snapshotPendingCnt = 0; snapshotTimeStamp = currentTimeStamp; @@ -4692,24 +4562,6 @@ static TransactionId GetMultiSnapshotOldestXmin() return ((snapxid_t*)g_snap_current)->localxmin; } -#ifdef ENABLE_MULTIPLE_NODES -/* - * Transaction end first on GTM, but it may not end in CN/DN. - * We need to fix xmin by local snapshot, else there may be visibility error - * after recovery of a two-phase transaction failure. - * - */ -static TransactionId FixSnapshotXminByLocal(TransactionId xid) -{ - snapxid_t* x = (snapxid_t*)g_snap_current; - - if (TransactionIdIsNormal(x->xmin) && TransactionIdPrecedes(x->xmin, xid)) - return x->xmin; - else - return xid; -} -#endif - void ProcArrayResetXmin(PGPROC* proc) { PGXACT* pgxact = &g_instance.proc_base_all_xacts[proc->pgprocno]; @@ -4778,73 +4630,6 @@ TransactionId SubTransGetTopParentXidFromProcs(TransactionId xid) return InvalidTransactionId; } -void FixCurrentSnapshotByGxid(TransactionId gxid) -{ - if (u_sess->attr.attr_common.xc_maintenance_mode || !GTM_MODE || !TransactionIdIsNormal(gxid)) - return; - - volatile snapxid_t* x = (volatile snapxid_t*)g_snap_current; - if (TransactionIdPrecedes(gxid, x->xmin)) { - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); - - /* recheck again */ - x = (volatile snapxid_t*)g_snap_current; - if (TransactionIdPrecedes(gxid, x->xmin)) { - ereport(LOG, - (errmsg("There is an old xid %lu arrived, so we need to " - "recalculate the local snapshot which should include it.", - gxid))); - CalculateLocalLatestSnapshot(true); - } - LWLockRelease(ProcArrayLock); - } -} - -/* check whether snapshot is valid */ -void CheckSnapshotIsValidException(Snapshot snapshot, const char* location) -{ - if (!GTM_MODE) - return; - if (!u_sess->attr.attr_common.xc_maintenance_mode && !u_sess->utils_cxt.cn_xc_maintain_mode && - !IsAutoVacuumWorkerProcess()) { - Assert(snapshot); - if (snapshot->satisfies == SNAPSHOT_MVCC && - TransactionIdIsValid(u_sess->utils_cxt.g_GTM_Snapshot->sn_xmin)) { - TransactionId newestOldestXmin = pg_atomic_read_u64(&t_thrd.xact_cxt.ShmemVariableCache->recentGlobalXmin); - if ((pg_strcasecmp(location, "GetSnapshotDataDataNode") == 0) && - TransactionIdPrecedes(u_sess->utils_cxt.g_GTM_Snapshot->sn_xmin, newestOldestXmin)) - ereport(ERROR, - (errcode(ERRCODE_SNAPSHOT_INVALID), - errmsg("Snapshot is invalid at %s, this is a safe error " - "if there is breakdown in gtm log", - location), - errdetail("Snaphot xmin %lu is lower than " - "newestOldestXmin: %lu", - u_sess->utils_cxt.g_GTM_Snapshot->sn_xmin, - newestOldestXmin), - errhint("This is a safe error report, will not impact " - "data consistency, retry your query if needed."))); - } - } -} - -/* - * xid comparator for qsort/bsearch - */ -#ifdef ENABLE_MULTIPLE_NODES -static int cmp_xid(const void* aa, const void* bb) -{ - TransactionId a = *(const TransactionId*)aa; - TransactionId b = *(const TransactionId*)bb; - - if (TransactionIdPrecedes(a, b)) - return -1; - if (TransactionIdFollows(a, b)) - return 1; - return 0; -} -#endif - Datum pgxc_gtm_snapshot_status(PG_FUNCTION_ARGS) { #ifndef ENABLE_MULTIPLE_NODES @@ -4852,131 +4637,10 @@ Datum pgxc_gtm_snapshot_status(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported view in single node mode."))); SRF_RETURN_DONE(funcctx); #else -#define GTM_SNAPSHOT_ATTRS 6 - /* check gtm mode, only gtm support this function */ - if (!GTM_MODE) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unsupported function or view in %s mode.", GTM_LITE_MODE ? "GTM-Lite" : "GTM-Free"))); - } - uint64 xcnt = 0; - int64 index = 0; - TransactionId* xids = NULL; FuncCallContext* funcctx = NULL; - ProcArrayStruct* arrayP = g_instance.proc_array_idx; - GTM_SnapshotStatus snapshot_status = NULL; - - if (SRF_IS_FIRSTCALL()) { - TupleDesc tupdesc; - MemoryContext oldcontext; - - funcctx = SRF_FIRSTCALL_INIT(); - oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); - /* Build tupdesc for result tuples */ - tupdesc = CreateTemplateTupleDesc(GTM_SNAPSHOT_ATTRS, false); - TupleDescInitEntry(tupdesc, (AttrNumber)1, "xmin", XIDOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)2, "xmax", XIDOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)3, "csn", XIDOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)4, "oldestxmin", XIDOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)5, "xcnt", XIDOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber)6, "running_xids", TEXTOID, -1, 0); - - funcctx->tuple_desc = BlessTupleDesc(tupdesc); - /* Only one tuple */ - funcctx->max_calls = 1; - - MemoryContextSwitchTo(oldcontext); - } - - xids = (TransactionId*)MemoryContextAlloc( - SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE), arrayP->numProcs * sizeof(TransactionId)); - - if (xids == NULL) - ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("Out of memory at palloc memory for xids!"))); - - /* - * Ensure that no xids enter or leave the procarray while we obtain - * snapshot. - */ - LWLockAcquire(ProcArrayLock, LW_SHARED); - - funcctx = SRF_PERCALL_SETUP(); - - for (index = 0; index < arrayP->numProcs; index++) { - int pgprocno = arrayP->pgprocnos[index]; - volatile PGXACT* pgxact = &g_instance.proc_base_all_xacts[pgprocno]; - TransactionId xid = 0; - - xid = pgxact->xid; - - /* Skip self */ - if (pgxact == t_thrd.pgxact) - continue; - - if (!TransactionIdIsValid(xid)) - continue; - - xids[xcnt++] = xid; - } - - LWLockRelease(ProcArrayLock); - - funcctx = SRF_PERCALL_SETUP(); - if (funcctx->call_cntr < funcctx->max_calls) { - Datum values[GTM_SNAPSHOT_ATTRS]; - bool nulls[GTM_SNAPSHOT_ATTRS]; - StringInfoData str; - HeapTuple tuple; - Datum result; - errno_t rc = 0; - uint64 i; - - /* Form tuple with appropriate data. */ - rc = memset_s(values, sizeof(values), 0, sizeof(values)); - securec_check_c(rc, "\0", "\0"); - rc = memset_s(nulls, sizeof(nulls), 0, sizeof(nulls)); - securec_check_c(rc, "\0", "\0"); - - snapshot_status = GetGTMSnapshotStatus(GetCurrentTransactionKey()); - if (snapshot_status == NULL) { - ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("GTM error, could not obtain valid gtm snapshot status."))); - } - values[0] = TransactionIdGetDatum(snapshot_status->xmin); - values[1] = TransactionIdGetDatum(snapshot_status->xmax); - values[2] = TransactionIdGetDatum(snapshot_status->csn); - values[3] = TransactionIdGetDatum(snapshot_status->recent_global_xmin); - values[4] = TransactionIdGetDatum(xcnt); - - /* Form running xids */ - if (xcnt > 0) { - initStringInfo(&str); - - /* Sort transaction id */ - if (xcnt > 1) - qsort(xids, xcnt, sizeof(TransactionId), cmp_xid); - - for (i = 0; i < xcnt; i++) { - if (i > 0) - appendStringInfoChar(&str, ','); - appendStringInfo(&str, "%lu", xids[i]); - } - - values[5] = CStringGetTextDatum(str.data); - } else { - nulls[5] = true; - } - - /* Build and return the tuple. */ - tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); - result = HeapTupleGetDatum(tuple); - - /* free memory */ - pfree(xids); - SRF_RETURN_NEXT(funcctx, result); - } - - /* free memory */ - pfree(xids); + /* check gtm mode, only gtm support this function */ + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unsupported function or view in %s mode.", GTM_LITE_MODE ? "GTM-Lite" : "GTM-Free"))); SRF_RETURN_DONE(funcctx); #endif } @@ -5211,4 +4875,17 @@ calculate_local_csn_min() LWLockRelease(ProcArrayLock); return local_csn_min; } +/* + * Updates the maximum value for CSN read from XLog + */ +void UpdateXLogMaxCSN(CommitSeqNo xlogCSN) +{ + if (xlogCSN > t_thrd.xact_cxt.ShmemVariableCache->xlogMaxCSN) { + LWLockAcquire(XLogMaxCSNLock, LW_EXCLUSIVE); + if (xlogCSN > t_thrd.xact_cxt.ShmemVariableCache->xlogMaxCSN) { + t_thrd.xact_cxt.ShmemVariableCache->xlogMaxCSN = xlogCSN; + } + LWLockRelease(XLogMaxCSNLock); + } +} diff --git a/src/gausskernel/storage/ipc/sinval.cpp b/src/gausskernel/storage/ipc/sinval.cpp index 224872d0a..7cceaddfa 100644 --- a/src/gausskernel/storage/ipc/sinval.cpp +++ b/src/gausskernel/storage/ipc/sinval.cpp @@ -19,6 +19,7 @@ #include "access/xact.h" #include "commands/async.h" #include "miscadmin.h" +#include "postmaster/bgworker.h" #include "storage/ipc.h" #include "storage/sinvaladt.h" #include "utils/globalplancache.h" @@ -40,14 +41,44 @@ */ THR_LOCAL volatile sig_atomic_t catchupInterruptPending = false; +static void GlobalInvalidSharedInvalidMessages(const SharedInvalidationMessage* msgs, int n, bool is_commit) +{ + Assert(EnableGlobalSysCache()); + for (int i = 0; i < n; i++) { + SharedInvalidationMessage *msg = (SharedInvalidationMessage *)(msgs + i); + if (msg->id >= 0) { + t_thrd.lsc_cxt.lsc->systabcache.CacheIdHashValueInvalidateGlobal(msg->cc.dbId, msg->cc.id, + msg->cc.hashValue, is_commit); + } else if (msg->id == SHAREDINVALCATALOG_ID) { + t_thrd.lsc_cxt.lsc->systabcache.CatalogCacheFlushCatalogGlobal(msg->cat.dbId, msg->cat.catId, is_commit); + } else if (msg->id == SHAREDINVALRELCACHE_ID) { + t_thrd.lsc_cxt.lsc->tabdefcache.InvalidateGlobalRelation(msg->rc.dbId, msg->rc.relId, is_commit); + } else if (msg->id == SHAREDINVALPARTCACHE_ID) { + t_thrd.lsc_cxt.lsc->partdefcache.InvalidateGlobalPartition(msg->pc.dbId, msg->pc.partId, is_commit); + } + /* global relmap are backups of relmapfile, so no need to deal with the msg */ + } +} + +void GlobalExecuteSharedInvalidMessages(const SharedInvalidationMessage* msgs, int n) +{ + /* threads who not support gsc dont need invalid global before commit */ + if (!EnableLocalSysCache()) { + return; + } + GlobalInvalidSharedInvalidMessages(msgs, n, IS_THREAD_POOL_STREAM || IsBgWorkerProcess()); +} /* * SendSharedInvalidMessages * Add shared-cache-invalidation message(s) to the global SI message queue. */ void SendSharedInvalidMessages(const SharedInvalidationMessage* msgs, int n) { + /* threads who not support gsc still need invalid global when commit */ + if (EnableGlobalSysCache()) { + GlobalInvalidSharedInvalidMessages(msgs, n, true); + } SIInsertDataEntries(msgs, n); - if (ENABLE_GPC && g_instance.plan_cache != NULL) { g_instance.plan_cache->InvalMsg(msgs, n); } @@ -69,39 +100,48 @@ void SendSharedInvalidMessages(const SharedInvalidationMessage* msgs, int n) * and counters; it's so that a recursive call can process messages already * sucked out of sinvaladt.c. */ -void ReceiveSharedInvalidMessages(void (*invalFunction)(SharedInvalidationMessage* msg), void (*resetFunction)(void)) +void ReceiveSharedInvalidMessages(void (*invalFunction)(SharedInvalidationMessage* msg), void (*resetFunction)(void), + bool worksession) { + knl_u_inval_context *inval_cxt; + if (!EnableLocalSysCache()) { + inval_cxt = &u_sess->inval_cxt; + } else if (worksession) { + inval_cxt = &u_sess->inval_cxt; + } else { + inval_cxt = &t_thrd.lsc_cxt.lsc->inval_cxt; + } /* Deal with any messages still pending from an outer recursion */ - while (u_sess->inval_cxt.nextmsg < u_sess->inval_cxt.nummsgs) { - SharedInvalidationMessage msg = u_sess->inval_cxt.messages[u_sess->inval_cxt.nextmsg++]; + while (inval_cxt->nextmsg < inval_cxt->nummsgs) { + SharedInvalidationMessage msg = inval_cxt->messages[inval_cxt->nextmsg++]; - u_sess->inval_cxt.SharedInvalidMessageCounter++; + inval_cxt->SIMCounter++; invalFunction(&msg); } do { int getResult; - u_sess->inval_cxt.nextmsg = u_sess->inval_cxt.nummsgs = 0; + inval_cxt->nextmsg = inval_cxt->nummsgs = 0; /* Try to get some more messages */ - getResult = SIGetDataEntries(u_sess->inval_cxt.messages, MAXINVALMSGS); + getResult = SIGetDataEntries(inval_cxt->messages, MAXINVALMSGS, worksession); if (getResult < 0) { /* got a reset message */ ereport(DEBUG4, (errmsg("cache state reset"))); - u_sess->inval_cxt.SharedInvalidMessageCounter++; + inval_cxt->SIMCounter++; resetFunction(); break; /* nothing more to do */ } /* Process them, being wary that a recursive call might eat some */ - u_sess->inval_cxt.nextmsg = 0; - u_sess->inval_cxt.nummsgs = getResult; + inval_cxt->nextmsg = 0; + inval_cxt->nummsgs = getResult; - while (u_sess->inval_cxt.nextmsg < u_sess->inval_cxt.nummsgs) { - SharedInvalidationMessage msg = u_sess->inval_cxt.messages[u_sess->inval_cxt.nextmsg++]; + while (inval_cxt->nextmsg < inval_cxt->nummsgs) { + SharedInvalidationMessage msg = inval_cxt->messages[inval_cxt->nextmsg++]; - u_sess->inval_cxt.SharedInvalidMessageCounter++; + inval_cxt->SIMCounter++; invalFunction(&msg); } @@ -109,7 +149,7 @@ void ReceiveSharedInvalidMessages(void (*invalFunction)(SharedInvalidationMessag * We only need to loop if the last SIGetDataEntries call (which might * have been within a recursive call) returned a full buffer. */ - } while (u_sess->inval_cxt.nummsgs == MAXINVALMSGS); + } while (inval_cxt->nummsgs == MAXINVALMSGS); /* * We are now caught up. If we received a catchup signal, reset that diff --git a/src/gausskernel/storage/ipc/sinvaladt.cpp b/src/gausskernel/storage/ipc/sinvaladt.cpp index 0b4dad11b..49d810404 100644 --- a/src/gausskernel/storage/ipc/sinvaladt.cpp +++ b/src/gausskernel/storage/ipc/sinvaladt.cpp @@ -200,8 +200,15 @@ Size SInvalShmemSize(void) Size size; size = offsetof(SISeg, procState); - if (g_instance.attr.attr_common.enable_thread_pool) - size = add_size(size, mul_size(sizeof(ProcState), MAX_SESSION_SLOT_COUNT)); + if (g_instance.attr.attr_common.enable_thread_pool) { + if (EnableGlobalSysCache()) { + /* palloc another g_instance.shmem_cxt.MaxBackends for gsc, register share msg event for threads */ + size = add_size(size, mul_size(sizeof(ProcState), MAX_THREAD_POOL_SIZE + + MAX_SESSION_SLOT_COUNT)); + } else { + size = add_size(size, mul_size(sizeof(ProcState), MAX_SESSION_SLOT_COUNT)); + } + } else size = add_size(size, mul_size(sizeof(ProcState), (Size)g_instance.shmem_cxt.MaxBackends)); @@ -219,12 +226,7 @@ void CreateSharedInvalidationState(void) bool found = false; /* Allocate space in shared memory */ - size = offsetof(SISeg, procState); - if (g_instance.attr.attr_common.enable_thread_pool) { - size = add_size(size, mul_size(sizeof(ProcState), MAX_SESSION_SLOT_COUNT)); - } else { - size = add_size(size, mul_size(sizeof(ProcState), (Size)g_instance.shmem_cxt.MaxBackends)); - } + size = SInvalShmemSize(); t_thrd.shemem_ptr_cxt.shmInvalBuffer = (SISeg*)ShmemInitStruct("shmInvalBuffer", size, &found); @@ -362,7 +364,6 @@ static void SharedInvalWorkSessionInit(bool sendOnly) stateP = &segP->procState[index]; if (stateP->procPid != 0) { - t_thrd.proc_cxt.MyBackendId = InvalidBackendId; LWLockRelease(SInvalWriteLock); ereport(FATAL, (errcode(ERRCODE_TOO_MANY_CONNECTIONS), @@ -643,7 +644,7 @@ void SIInsertDataEntries(const SharedInvalidationMessage* data, int n) * Note: we assume that "datasize" is not so large that it might be important * to break our hold on SInvalReadLock into segments. */ -int SIGetDataEntries(SharedInvalidationMessage* data, int datasize) +int SIGetDataEntries(SharedInvalidationMessage* data, int datasize, bool worksession) { SISeg* segP = NULL; ProcState* stateP = NULL; @@ -651,9 +652,17 @@ int SIGetDataEntries(SharedInvalidationMessage* data, int datasize) int n; segP = t_thrd.shemem_ptr_cxt.shmInvalBuffer; + /* fetch inval msg state info for backend or session */ if (IS_THREAD_POOL_WORKER) { - stateP = &segP->procState[u_sess->session_ctr_index]; + if (EnableLocalSysCache() && !worksession) { + /* GSC is on, and this is a thread pool worker, fetch inval msg slot by backendid */ + stateP = &segP->procState[t_thrd.proc_cxt.MyBackendId - 1]; + } else { + /* this is a thread pool worker, fetch inval msg slot by session index */ + stateP = &segP->procState[u_sess->session_ctr_index]; + } } else { + /* if not thread pool worker(either threadpool mode or not), proc slot is located by current backendId */ stateP = &segP->procState[t_thrd.proc_cxt.MyBackendId - 1]; } diff --git a/src/gausskernel/storage/ipc/standby.cpp b/src/gausskernel/storage/ipc/standby.cpp index d7c9145c3..7e12099e2 100755 --- a/src/gausskernel/storage/ipc/standby.cpp +++ b/src/gausskernel/storage/ipc/standby.cpp @@ -34,15 +34,14 @@ #include "utils/ps_status.h" #include "utils/timestamp.h" #include "utils/snapmgr.h" - +#include "replication/walreceiver.h" static void ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId* waitlist, ProcSignalReason reason); static void ResolveRecoveryConflictWithLock(Oid dbOid, Oid relOid); static void LogAccessExclusiveLocks(int nlocks, xl_standby_lock* locks); static void LogReleaseAccessExclusiveLocks(int nlocks, xl_standby_lock* locks); static XLogRecPtr LogCurrentRunningXacts(RunningTransactions CurrRunningXacts); -#ifndef ENABLE_MULTIPLE_NODES static void RecordCommittingCsnInfo(TransactionId xid); -#endif + /* * InitRecoveryTransactionEnvironment * Initialize tracking of in-progress transactions in master @@ -112,16 +111,10 @@ void ShutdownRecoveryTransactionEnvironment(void) * transactions. Returns zero (a time safely in the past) if we are willing * to wait forever. */ -static TimestampTz GetStandbyLimitTime(void) +static TimestampTz GetStandbyLimitTime(TimestampTz startTime) { - TimestampTz rtime; - bool fromStream = false; - - /* - * The cutoff time is the last WAL data receipt time plus the appropriate - * delay variable. Delay of -1 means wait forever. - */ - GetXLogReceiptTime(&rtime, &fromStream); + TimestampTz rtime = startTime; + bool fromStream = (t_thrd.xlog_cxt.XLogReceiptSource == XLOG_FROM_STREAM); if (fromStream) { if (u_sess->attr.attr_storage.max_standby_streaming_delay < 0) @@ -143,12 +136,12 @@ static TimestampTz GetStandbyLimitTime(void) * We wait here for a while then return. If we decide we can't wait any * more then we return true, if we can wait some more return false. */ -static bool WaitExceedsMaxStandbyDelay(void) +static bool WaitExceedsMaxStandbyDelay(TimestampTz startTime) { - TimestampTz ltime; + TimestampTz ltime = 0; /* Are we past the limit time? */ - ltime = GetStandbyLimitTime(); + ltime = GetStandbyLimitTime(startTime); if (ltime && GetCurrentTimestamp() >= ltime) return true; @@ -216,7 +209,7 @@ static void ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId* waitlis } /* Is it time to kill it? */ - if (WaitExceedsMaxStandbyDelay()) { + if (WaitExceedsMaxStandbyDelay(waitStart)) { ThreadId pid; /* @@ -259,6 +252,21 @@ void ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, const R if (!TransactionIdIsValid(latestRemovedXid)) return; + if (IS_DISASTER_RECOVER_MODE) { + CommitSeqNo limitXminCSN = CSNLogGetDRCommitSeqNo(latestRemovedXid); + while (true) { + CommitSeqNo lastReplayedConflictCSN = (CommitSeqNo)pg_atomic_read_u64( + &(g_instance.comm_cxt.predo_cxt.last_replayed_conflict_csn)); + if (limitXminCSN <= lastReplayedConflictCSN) { + break; + } + if (pg_atomic_compare_exchange_u64(&(g_instance.comm_cxt.predo_cxt.last_replayed_conflict_csn), + &lastReplayedConflictCSN, limitXminCSN)) { + break; + } + } + } + backends = GetConflictingVirtualXIDs(latestRemovedXid, node.dbNode, lsn); ResolveRecoveryConflictWithVirtualXIDs(backends, PROCSIG_RECOVERY_CONFLICT_SNAPSHOT); @@ -393,8 +401,8 @@ void ResolveRecoveryConflictWithBufferPin(void) Assert(InHotStandby); - ltime = GetStandbyLimitTime(); now = GetCurrentTimestamp(); + ltime = GetStandbyLimitTime(now); if (!ltime) { /* @@ -702,7 +710,7 @@ bool HasStandbyLocks() return (t_thrd.storage_cxt.RecoveryLockList == NIL); } -#ifndef ENABLE_MULTIPLE_NODES + /* * For hot standby, maintain a list for csn commting status compensation * when primary dn restart after crash. @@ -719,10 +727,6 @@ void DealCSNLogForHotStby(XLogReaderState* record, uint8 info) XactLockTableInsert(id[0]); CSNLogSetCommitSeqNo(id[0], (int)childrenxidnum, &id[3], (COMMITSEQNO_COMMIT_INPROGRESS | id[1])); RecordCommittingCsnInfo(id[0]); - for (uint64 i = 0; i < childrenxidnum; ++i) { - XactLockTableInsert(id[i + 3]); - RecordCommittingCsnInfo(id[i + 3]); - } } else { XactLockTableInsert(id[0]); CSNLogSetCommitSeqNo(id[0], 0, 0, (COMMITSEQNO_COMMIT_INPROGRESS | id[1])); @@ -731,14 +735,23 @@ void DealCSNLogForHotStby(XLogReaderState* record, uint8 info) return; } else { uint64* id = ((uint64 *)XLogRecGetData(record)); +#ifndef ENABLE_MULTIPLE_NODES CSNLogSetCommitSeqNo(id[0], 0, NULL, COMMITSEQNO_ABORTED); XactLockTableDelete(id[0]); (void)RemoveCommittedCsnInfo(id[0]); - +#else + if (id[0] == InvalidTransactionId) { + RemoveAllCommittedCsnInfo(); + } else { + Assert(t_thrd.proc->workingVersionNum < DISASTER_READ_VERSION_NUM); + CSNLogSetCommitSeqNo(id[0], 0, NULL, COMMITSEQNO_ABORTED); + XactLockTableDelete(id[0]); + (void)RemoveCommittedCsnInfo(id[0]); + } +#endif return; } } -#endif @@ -751,22 +764,15 @@ void DealCSNLogForHotStby(XLogReaderState* record, uint8 info) */ void standby_redo(XLogReaderState* record) { - uint8 info; - - /* Support redo old version xlog during upgrade (Just the runningxact log with chekpoint online ) */ - if (t_thrd.xlog_cxt.redo_oldversion_xlog) - info = (((XLogRecordOld*)record->decoded_record)->xl_info) & ~XLR_INFO_MASK; - else - info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; + uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; /* Backup blocks are not used in standby records */ Assert(!XLogRecHasAnyBlockRefs(record)); -#ifndef ENABLE_MULTIPLE_NODES + if (info == XLOG_STANDBY_CSN_COMMITTING || info == XLOG_STANDBY_CSN_ABORTED) { DealCSNLogForHotStby(record, info); return; } -#endif /* Do nothing if we're not in hot standby mode */ if (t_thrd.xlog_cxt.standbyState == STANDBY_DISABLED) @@ -780,17 +786,11 @@ void standby_redo(XLogReaderState* record) StandbyAcquireAccessExclusiveLock(xlrec->locks[i].xid, xlrec->locks[i].dbOid, xlrec->locks[i].relOid); } else if (info == XLOG_RUNNING_XACTS) { RunningTransactionsData running; - if (t_thrd.xlog_cxt.redo_oldversion_xlog) { - xl_running_xacts_old* xlrc = (xl_running_xacts_old*)XLogRecGetData(record); - running.nextXid = xlrc->nextXid; - running.oldestRunningXid = xlrc->oldestRunningXid; - running.latestCompletedXid = xlrc->latestCompletedXid; - } else { - xl_running_xacts* xlrec = (xl_running_xacts*)XLogRecGetData(record); - running.nextXid = xlrec->nextXid; - running.oldestRunningXid = xlrec->oldestRunningXid; - running.latestCompletedXid = xlrec->latestCompletedXid; - } + + xl_running_xacts* xlrec = (xl_running_xacts*)XLogRecGetData(record); + running.nextXid = xlrec->nextXid; + running.oldestRunningXid = xlrec->oldestRunningXid; + running.latestCompletedXid = xlrec->latestCompletedXid; ProcArrayApplyRecoveryInfo(&running); } else if (info == XLOG_STANDBY_CSN) { TransactionId new_global_xmin = *((TransactionId*)XLogRecGetData(record)); @@ -1083,26 +1083,28 @@ void LogReleaseAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid) } - -#ifndef ENABLE_MULTIPLE_NODES void StandbyXlogStartup(void) { t_thrd.xlog_cxt.committing_csn_list = NIL; } -void *XLogReleaseAdnGetCommittingCsnList() +void *XLogReleaseAndGetCommittingCsnList() { ListCell* l = NULL; foreach (l, t_thrd.xlog_cxt.committing_csn_list) { TransactionId* action = (TransactionId*)lfirst(l); if (log_min_messages <= DEBUG4) { - ereport(LOG, (errmsg("XLogReleaseAdnGetCommittingCsnList: action xid:%lu", *action))); + ereport(LOG, (errmsg("XLogReleaseAndGetCommittingCsnList: action xid:%lu", *action))); } CSNLogSetCommitSeqNo(*action, 0, NULL, COMMITSEQNO_ABORTED); XactLockTableDelete(*action); } List* committingCsnList = t_thrd.xlog_cxt.committing_csn_list; +#ifdef ENABLE_MULTIPLE_NODES + list_free(t_thrd.xlog_cxt.committing_csn_list); + committingCsnList = NIL; +#endif t_thrd.xlog_cxt.committing_csn_list = NIL; return committingCsnList; } @@ -1119,7 +1121,6 @@ void CleanUpMakeCommitAbort(List* committingCsnList) XLogRegisterData((char *)action, sizeof(TransactionId)); XLogInsert(RM_STANDBY_ID, XLOG_STANDBY_CSN_ABORTED); } - MemoryContext oldCtx = NULL; if (IsExtremeRtoRunning()) { oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.predo_cxt.parallelRedoCtx); @@ -1130,8 +1131,6 @@ void CleanUpMakeCommitAbort(List* committingCsnList) } } - - void StandbyXlogCleanup(void) { ListCell* l = NULL; @@ -1150,11 +1149,25 @@ void StandbyXlogCleanup(void) } CSNLogSetCommitSeqNo(*action, 0, NULL, COMMITSEQNO_ABORTED); XactLockTableDelete(*action); +#ifndef ENABLE_MULTIPLE_NODES XLogBeginInsert(); XLogRegisterData((char *)action, sizeof(TransactionId)); XLogInsert(RM_STANDBY_ID, XLOG_STANDBY_CSN_ABORTED); - +#endif } + +#ifdef ENABLE_MULTIPLE_NODES + TransactionId clean_xid = InvalidTransactionId; + if (t_thrd.role == STARTUP && !IS_PGXC_COORDINATOR && t_thrd.proc->workingVersionNum >= DISASTER_READ_VERSION_NUM) { + if (log_min_messages <= DEBUG4) { + ereport(LOG, (errmsg("StandbyXlogCleanup: insert clean xlog"))); + } + XLogBeginInsert(); + XLogRegisterData((char*)(&clean_xid), sizeof(TransactionId)); + XLogInsert(RM_STANDBY_ID, XLOG_STANDBY_CSN_ABORTED); + } +#endif + MemoryContext oldCtx = NULL; if (IsExtremeRtoRunning()) { oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.predo_cxt.parallelRedoCtx); @@ -1168,8 +1181,23 @@ void StandbyXlogCleanup(void) bool StandbySafeRestartpoint(void) { - if (t_thrd.xlog_cxt.committing_csn_list) +#ifdef ENABLE_MULTIPLE_NODES + ListCell* l = NULL; + if (t_thrd.xlog_cxt.committing_csn_list && IS_DISASTER_RECOVER_MODE) { + foreach (l, t_thrd.xlog_cxt.committing_csn_list) { + TransactionId* action = (TransactionId*)lfirst(l); + if (log_min_messages <= DEBUG4) { + ereport(LOG, + (errmsg("StandbySafeRestartpoint: action xid:%lu", *action))); + } + } return false; + } +#else + if (t_thrd.xlog_cxt.committing_csn_list) { + return false; + } +#endif return true; } @@ -1221,4 +1249,19 @@ bool RemoveCommittedCsnInfo(TransactionId xid) } return false; } -#endif + +void RemoveAllCommittedCsnInfo() +{ + ListCell* l = NULL; + foreach (l, t_thrd.xlog_cxt.committing_csn_list) { + TransactionId* action = (TransactionId*)lfirst(l); + if (log_min_messages <= DEBUG4) { + ereport(LOG, + (errmsg("RemoveAllCommittedCsnInfo successfully: action xid:%lu", *action))); + } + CSNLogSetCommitSeqNo(*action, 0, NULL, COMMITSEQNO_ABORTED); + XactLockTableDelete(*action); + } + list_free_deep(t_thrd.xlog_cxt.committing_csn_list); + t_thrd.xlog_cxt.committing_csn_list = NIL; +} \ No newline at end of file diff --git a/src/gausskernel/storage/lmgr/CMakeLists.txt b/src/gausskernel/storage/lmgr/CMakeLists.txt index d0b57dc4a..38a8944a4 100755 --- a/src/gausskernel/storage/lmgr/CMakeLists.txt +++ b/src/gausskernel/storage/lmgr/CMakeLists.txt @@ -1,7 +1,12 @@ #This is the main CMAKE for build bin. execute_process( COMMAND perl generate-lwlocknames.pl ${CMAKE_CURRENT_SOURCE_DIR}/lwlocknames.txt - COMMAND ln -fs ${CMAKE_CURRENT_SOURCE_DIR}/lwlocknames.h ${PROJECT_SRC_DIR}/include/storage/lwlocknames.h + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + OUTPUT_VARIABLE LWLOCKNAMES +) + +execute_process( + COMMAND cp -f ${CMAKE_CURRENT_SOURCE_DIR}/lwlocknames.h ${PROJECT_SRC_DIR}/include/storage/lwlocknames.h WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE LWLOCKNAMES ) diff --git a/src/gausskernel/storage/lmgr/lmgr.cpp b/src/gausskernel/storage/lmgr/lmgr.cpp index 3bda7f3c7..2314575b9 100755 --- a/src/gausskernel/storage/lmgr/lmgr.cpp +++ b/src/gausskernel/storage/lmgr/lmgr.cpp @@ -85,7 +85,7 @@ void LockRelationOid(Oid relid, LOCKMODE lockmode) * modifies the rel, the relcache update happens via * CommandCounterIncrement, not here.) */ - if (res != LOCKACQUIRE_ALREADY_HELD || u_sess->inval_cxt.deepthInAcceptInvalidationMessage > 0) + if (res != LOCKACQUIRE_ALREADY_HELD || DeepthInAcceptInvalidationMessageNotZero()) AcceptInvalidationMessages(); } @@ -114,7 +114,7 @@ bool ConditionalLockRelationOid(Oid relid, LOCKMODE lockmode) * Now that we have the lock, check for invalidation messages; see notes * in LockRelationOid. */ - if (res != LOCKACQUIRE_ALREADY_HELD || u_sess->inval_cxt.deepthInAcceptInvalidationMessage > 0) + if (res != LOCKACQUIRE_ALREADY_HELD || DeepthInAcceptInvalidationMessageNotZero()) AcceptInvalidationMessages(); return true; @@ -168,7 +168,7 @@ void LockRelation(Relation relation, LOCKMODE lockmode) * Now that we have the lock, check for invalidation messages; see notes * in LockRelationOid. */ - if (res != LOCKACQUIRE_ALREADY_HELD || u_sess->inval_cxt.deepthInAcceptInvalidationMessage > 0) + if (res != LOCKACQUIRE_ALREADY_HELD || DeepthInAcceptInvalidationMessageNotZero()) AcceptInvalidationMessages(); } @@ -194,7 +194,7 @@ bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode) * Now that we have the lock, check for invalidation messages; see notes * in LockRelationOid. */ - if (res != LOCKACQUIRE_ALREADY_HELD || u_sess->inval_cxt.deepthInAcceptInvalidationMessage > 0) + if (res != LOCKACQUIRE_ALREADY_HELD || DeepthInAcceptInvalidationMessageNotZero()) AcceptInvalidationMessages(); return true; @@ -256,7 +256,7 @@ bool ConditionalLockCStoreFreeSpace(Relation relation) * Now that we have the lock, check for invalidation messages; see notes * in LockRelationOid. */ - if (res != LOCKACQUIRE_ALREADY_HELD || u_sess->inval_cxt.deepthInAcceptInvalidationMessage > 0) + if (res != LOCKACQUIRE_ALREADY_HELD || DeepthInAcceptInvalidationMessageNotZero()) AcceptInvalidationMessages(); return true; @@ -286,6 +286,17 @@ bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode) return LockHasWaiters(&tag, lockmode, false); } +bool LockHasWaitersPartition(Relation relation, LOCKMODE lockmode) +{ + LOCKTAG tag; + Assert(RelationIsPartition(relation)); + + SET_LOCKTAG_PARTITION(tag, relation->rd_lockInfo.lockRelId.dbId, relation->parentId, relation->rd_id); + + return LockHasWaiters(&tag, lockmode, false); +} + + /* * LockRelationIdForSession * @@ -470,7 +481,7 @@ void UnlockPage(Relation relation, BlockNumber blkno, LOCKMODE lockmode) * because we can't afford to keep a separate lock in shared memory for every * tuple. See heap_lock_tuple before using this! */ -void LockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode, bool allow_con_update) +void LockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode, bool allow_con_update, int waitSec) { LOCKTAG tag; @@ -481,7 +492,28 @@ void LockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode, bool allow ItemPointerGetBlockNumber(tid), ItemPointerGetOffsetNumber(tid)); - (void)LockAcquire(&tag, lockmode, false, false, allow_con_update); + (void)LockAcquire(&tag, lockmode, false, false, allow_con_update, waitSec); +} + +#define UID_LOW_BIT (32) +void LockTupleUid(Relation relation, uint64 uid, LOCKMODE lockmode, bool allow_con_update, bool lockTuple) +{ + LOCKTAG tag; + + SET_LOCKTAG_UID(tag, relation->rd_lockInfo.lockRelId.dbId, relation->rd_lockInfo.lockRelId.relId, + (uint32)((uint64)uid >> UID_LOW_BIT), (uint32)uid); + + if (allow_con_update) { + (void)LockAcquire(&tag, lockmode, false, false, allow_con_update); + } else if (LockAcquire(&tag, lockmode, false, true) != LOCKACQUIRE_NOT_AVAIL) { + if (lockTuple) { + ereport(ERROR, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), + errmsg("could not obtain lock on row in relation \"%s\"", RelationGetRelationName(relation)))); + } else { + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("abort transaction due to concurrent update"))); + } + } } /* @@ -565,7 +597,7 @@ void XactLockTableDelete(TransactionId xid) * successfully or unsuccessfully. So we have to check if it's "still running" * and if so wait for its parent. */ -void XactLockTableWait(TransactionId xid, bool allow_con_update) +void XactLockTableWait(TransactionId xid, bool allow_con_update, int waitSec) { LOCKTAG tag; CLogXidStatus status = CLOG_XID_STATUS_IN_PROGRESS; @@ -579,7 +611,7 @@ void XactLockTableWait(TransactionId xid, bool allow_con_update) SET_LOCKTAG_TRANSACTION(tag, xid); - (void)LockAcquire(&tag, ShareLock, false, false, allow_con_update); + (void)LockAcquire(&tag, ShareLock, false, false, allow_con_update, waitSec); (void)LockRelease(&tag, ShareLock, false); @@ -706,7 +738,7 @@ SubXactLockTableDelete(SubTransactionId subxid) * still in progress. */ void -SubXactLockTableWait(TransactionId xid, SubTransactionId subxid) +SubXactLockTableWait(TransactionId xid, SubTransactionId subxid, int waitSec) { LOCKTAG tag; Assert(TransactionIdIsValid(xid)); @@ -715,7 +747,7 @@ SubXactLockTableWait(TransactionId xid, SubTransactionId subxid) SET_LOCKTAG_SUBTRANSACTION(tag, xid, subxid); - (void) LockAcquire(&tag, ShareLock, false, false); + (void) LockAcquire(&tag, ShareLock, false, false, waitSec); LockRelease(&tag, ShareLock, false); } @@ -890,6 +922,12 @@ void DescribeLockTag(StringInfo buf, const LOCKTAG *tag) tag->locktag_field5, tag->locktag_field1); break; + case LOCKTAG_UID: + appendStringInfo(buf, + _("tuple uid %lu of (relation %u) of database %u"), + (((uint64)tag->locktag_field3) << UID_LOW_BIT) + tag->locktag_field4, + tag->locktag_field2, + tag->locktag_field1); case LOCKTAG_TRANSACTION: appendStringInfo(buf, _("transaction %u"), tag->locktag_field1); break; @@ -1013,7 +1051,7 @@ void LockPartitionOid(Oid relid, uint32 seq, LOCKMODE lockmode) * modifies the rel, the relcache update happens via * CommandCounterIncrement, not here.) */ - if (res != LOCKACQUIRE_ALREADY_HELD || u_sess->inval_cxt.deepthInAcceptInvalidationMessage > 0) + if (res != LOCKACQUIRE_ALREADY_HELD || DeepthInAcceptInvalidationMessageNotZero()) AcceptInvalidationMessages(); } @@ -1032,7 +1070,7 @@ bool ConditionalLockPartitionOid(Oid relid, uint32 seq, LOCKMODE lockmode) * Now that we have the lock, check for invalidation messages; see notes * in LockRelationOid. */ - if (res != LOCKACQUIRE_ALREADY_HELD || u_sess->inval_cxt.deepthInAcceptInvalidationMessage > 0) + if (res != LOCKACQUIRE_ALREADY_HELD || DeepthInAcceptInvalidationMessageNotZero()) AcceptInvalidationMessages(); return true; @@ -1076,7 +1114,7 @@ void LockPartitionSeq(Oid relid, uint32 seq, LOCKMODE lockmode) * modifies the rel, the relcache update happens via * CommandCounterIncrement, not here.) */ - if (res != LOCKACQUIRE_ALREADY_HELD || u_sess->inval_cxt.deepthInAcceptInvalidationMessage > 0) + if (res != LOCKACQUIRE_ALREADY_HELD || DeepthInAcceptInvalidationMessageNotZero()) AcceptInvalidationMessages(); } @@ -1095,7 +1133,7 @@ bool ConditionalLockPartitionSeq(Oid relid, uint32 seq, LOCKMODE lockmode) * Now that we have the lock, check for invalidation messages; see notes * in LockRelationOid. */ - if (res != LOCKACQUIRE_ALREADY_HELD || u_sess->inval_cxt.deepthInAcceptInvalidationMessage > 0) + if (res != LOCKACQUIRE_ALREADY_HELD || DeepthInAcceptInvalidationMessageNotZero()) AcceptInvalidationMessages(); return true; diff --git a/src/gausskernel/storage/lmgr/lock.cpp b/src/gausskernel/storage/lmgr/lock.cpp index eb39f4052..e49970366 100644 --- a/src/gausskernel/storage/lmgr/lock.cpp +++ b/src/gausskernel/storage/lmgr/lock.cpp @@ -127,94 +127,13 @@ static const LockMethodData user_lockmethod = { AccessExclusiveLock, /* highest * map from lock method id to the lock table data structures */ static const LockMethod LockMethods[] = {NULL, &default_lockmethod, &user_lockmethod}; - -/* Record that's written to 2PC state file when a lock is persisted */ -typedef struct TwoPhaseLockRecord { - LOCKTAG locktag; - LOCKMODE lockmode; -} TwoPhaseLockRecord; - -/* Macros for manipulating proc->fpLockBits */ -#define FAST_PATH_BITS_PER_SLOT 3 -#define FAST_PATH_LOCKNUMBER_OFFSET 1 -#define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1) -#define FAST_PATH_GET_BITS(proc, n) (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * (n))) & FAST_PATH_MASK) -#define FAST_PATH_BIT_POSITION(n, l) \ - (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \ - AssertMacro((l) < FAST_PATH_BITS_PER_SLOT + FAST_PATH_LOCKNUMBER_OFFSET), \ - AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \ - ((l)-FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (n))) -#define FAST_PATH_SET_LOCKMODE(proc, n, l) \ - (proc)->fpLockBits |= UINT64CONST(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)) -#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \ - (proc)->fpLockBits &= ~(UINT64CONST(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))) -#define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \ - ((proc)->fpLockBits & (UINT64CONST(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))) - -#define PRINT_WAIT_LENTH (8 + 1) -#define CHECK_LOCKMETHODID(lockMethodId) \ - do { \ - if (unlikely((lockMethodId) == 0 || (lockMethodId) >= lengthof(LockMethods))) { \ - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), \ - errmsg("unrecognized lock method: %hu", (lockMethodId)))); \ - } \ - } while (0) -#define CHECK_LOCKMODE(lockMode, lockMethodTable) \ - do { \ - if (unlikely((lockMode) <= 0 || (lockMode) > (lockMethodTable)->numLockModes)) { \ - ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), \ - errmsg("unrecognized lock mode: %d", (lockMode)))); \ - } \ - } while (0) - -/* - * The fast-path lock mechanism is concerned only with relation locks on - * unshared relations by backends bound to a database. The fast-path - * mechanism exists mostly to accelerate acquisition and release of locks - * that rarely conflict. Because ShareUpdateExclusiveLock is - * self-conflicting, it can't use the fast-path mechanism; but it also does - * not conflict with any of the locks that do, so we can ignore it completely. - */ -#define EligibleForRelationFastPath(locktag, mode) \ - ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \ - ((locktag)->locktag_type == LOCKTAG_RELATION || (locktag)->locktag_type == LOCKTAG_PARTITION) && \ - (mode) < ShareUpdateExclusiveLock) -#define ConflictsWithRelationFastPath(locktag, mode) \ - ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \ - ((locktag)->locktag_type == LOCKTAG_RELATION || (locktag)->locktag_type == LOCKTAG_PARTITION) && \ - (mode) > ShareUpdateExclusiveLock) - static bool FastPathGrantRelationLock(const FastPathTag &tag, LOCKMODE lockmode); static bool FastPathUnGrantRelationLock(const FastPathTag &tag, LOCKMODE lockmode); static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode); static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock); - -/* - * To make the fast-path lock mechanism work, we must have some way of - * preventing the use of the fast-path when a conflicting lock might be - * present. We partition* the locktag space into FAST_PATH_HASH_BUCKETS - * partitions, and maintain an integer count of the number of "strong" lockers - * in each partition. When any "strong" lockers are present (which is - * hopefully not very often), the fast-path mechanism can't be used, and we - * must fall back to the slower method of pushing matching locks directly - * into the main lock tables. - * - * The deadlock detector does not know anything about the fast path mechanism, - * so any locks that might be involved in a deadlock must be transferred from - * the fast-path queues to the main lock table. - */ -#define FAST_PATH_STRONG_LOCK_HASH_BITS 10 -#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS (1 << FAST_PATH_STRONG_LOCK_HASH_BITS) -#define FastPathStrongLockHashPartition(hashcode) ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS) - -typedef struct FastPathStrongRelationLockData { - slock_t mutex; - uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]; -} FastPathStrongRelationLockData; - static LockAcquireResult LockAcquireExtendedXC(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, bool only_increment, - bool allow_con_update = false); + bool allow_con_update = false, int waitSec = 0); #if defined(LOCK_DEBUG) || defined(USE_ASSERT_CHECKING) @@ -303,7 +222,7 @@ static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, cons static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner); static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode); static void FinishStrongLockAcquire(void); -static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner, bool allow_con_update); +static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner, bool allow_con_update, int waitSec); static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock); static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable); static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded); @@ -449,13 +368,12 @@ uint32 LockTagHashCode(const LOCKTAG *locktag) static uint32 proclock_hash(const void *key, Size keysize) { const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *)key; - uint32 lockhash; Datum procptr; Assert(keysize == sizeof(PROCLOCKTAG)); /* Look into the associated LOCK object, and compute its hash code */ - lockhash = LockTagHashCode(&proclocktag->myLock->tag); + uint32 lockhash = LockTagHashCode(&proclocktag->myLock->tag); /* * To make the hash code also depend on the PGPROC, we xor the proc @@ -479,12 +397,11 @@ static uint32 proclock_hash(const void *key, Size keysize) static inline uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode) { uint32 lockhash = hashcode; - Datum procptr; /* * This must match proclock_hash()! */ - procptr = PointerGetDatum(proclocktag->myProc); + Datum procptr = PointerGetDatum(proclocktag->myProc); lockhash ^= ((uint32)procptr) << LOG2_NUM_LOCK_PARTITIONS; return lockhash; @@ -497,16 +414,11 @@ static inline uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 has bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock) { LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid; - LockMethod lockMethodTable; LOCALLOCKTAG localtag; - LOCALLOCK *locallock = NULL; - LOCK *lock = NULL; - PROCLOCK *proclock = NULL; - LWLock *partitionLock = NULL; bool hasWaiters = false; CHECK_LOCKMETHODID(lockmethodid); - lockMethodTable = LockMethods[lockmethodid]; + LockMethod lockMethodTable = LockMethods[lockmethodid]; CHECK_LOCKMODE(lockmode, lockMethodTable); #ifdef LOCK_DEBUG @@ -524,7 +436,8 @@ bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock) localtag.lock = *locktag; localtag.mode = lockmode; - locallock = (LOCALLOCK *)hash_search(t_thrd.storage_cxt.LockMethodLocalHash, (void *)&localtag, HASH_FIND, NULL); + LOCALLOCK *locallock = + (LOCALLOCK *)hash_search(t_thrd.storage_cxt.LockMethodLocalHash, (void *)&localtag, HASH_FIND, NULL); /* * let the caller print its own error message, too. Do not ereport(ERROR). */ @@ -536,7 +449,7 @@ bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock) /* * Check the shared lock table. */ - partitionLock = LockHashPartitionLock(locallock->hashcode); + LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode); LWLockAcquire(partitionLock, LW_SHARED); @@ -545,9 +458,9 @@ bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock) * addresses in the locallock table, and they couldn't have been removed * while we were holding a lock on them. */ - lock = locallock->lock; + LOCK *lock = locallock->lock; LOCK_PRINT("LockHasWaiters: found", lock, lockmode); - proclock = locallock->proclock; + PROCLOCK *proclock = locallock->proclock; PROCLOCK_PRINT("LockHasWaiters: found", proclock); /* @@ -598,9 +511,9 @@ bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock) * short of aborting the transaction. */ LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, - bool allow_con_update) + bool allow_con_update, int waitSec) { - return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait, true, allow_con_update); + return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait, true, allow_con_update, waitSec); } #ifdef PGXC @@ -612,10 +525,8 @@ LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool se */ bool LockIncrementIfExists(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock) { - int ret; - - ret = LockAcquireExtendedXC(locktag, lockmode, sessionLock, true, /* never wait */ - true, true); + int ret = LockAcquireExtendedXC(locktag, lockmode, sessionLock, true, /* never wait */ + true, true); return (ret == LOCKACQUIRE_ALREADY_HELD); } @@ -631,9 +542,10 @@ bool LockIncrementIfExists(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessi * retrying the action. */ LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, - bool reportMemoryError, bool allow_con_update) + bool reportMemoryError, bool allow_con_update, int waitSec) { - return LockAcquireExtendedXC(locktag, lockmode, sessionLock, dontWait, reportMemoryError, false, allow_con_update); + return LockAcquireExtendedXC(locktag, lockmode, sessionLock, dontWait, reportMemoryError, false, allow_con_update, + waitSec); } /* @@ -641,11 +553,9 @@ LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag, LOCKMODE lockmode, */ bool IsOtherProcRedistribution(PGPROC *otherProc) { - PGXACT *pgxact = NULL; + PGXACT *pgxact = &g_instance.proc_base_all_xacts[otherProc->pgprocno]; bool isRedis = false; - pgxact = &g_instance.proc_base_all_xacts[otherProc->pgprocno]; - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); if (pgxact->vacuumFlags & PROC_IS_REDIST) { isRedis = true; @@ -655,13 +565,90 @@ bool IsOtherProcRedistribution(PGPROC *otherProc) return isRedis; } +/* + * when query run as stream mode, the topConsumer and producer thread hold differnt + * Procs, but we treat them as one transaction + */ +bool inline IsInSameTransaction(PGPROC *proc1, PGPROC *proc2) +{ + return u_sess->stream_cxt.global_obj == NULL ? false + : u_sess->stream_cxt.global_obj->inNodeGroup(proc1->pid, proc2->pid); +} + +static bool IsPrepareXact(const PGPROC *proc) +{ + PGXACT *pgxact = &g_instance.proc_base_all_xacts[proc->pgprocno]; + if (pgxact->prepare_xid != InvalidTransactionId) { + ereport(DEBUG1, (errmsg("IsPrepareXact skip process %lu", proc->pid))); + return true; + } + return false; +} + +void CancelConflictLockWaiter(PROCLOCK *proclock, LOCK *lock, LockMethod lockMethodTable, LOCKMODE lockmode) +{ + PROC_QUEUE *waitQueue = &(lock->waitProcs); + PGPROC *proc = (PGPROC *)waitQueue->links.prev; + PGPROC *leader1 = (t_thrd.proc->lockGroupLeader == NULL) ? t_thrd.proc : t_thrd.proc->lockGroupLeader; + + /* do nothing if we are not in lock cancle mode */ + if (!t_thrd.xact_cxt.enable_lock_cancel || lockmode < ExclusiveLock) { + return; + } + for (int j = 0; j < waitQueue->size; j++) { + bool conflictLocks = + ((lockMethodTable->conflictTab[lockmode] & LOCKBIT_ON((unsigned int)proc->waitLockMode)) != 0); + PGPROC *leader2 = (proc->lockGroupLeader == NULL) ? proc : proc->lockGroupLeader; + bool isSameTrans = ((StreamTopConsumerAmI() || StreamThreadAmI()) && IsInSameTransaction(proc, t_thrd.proc)) || + (leader1 == leader2); + /* send term to waitqueue proc while conflict and not in a stream or lock group */ + if (conflictLocks && !isSameTrans && !IsPrepareXact(proc) && + proc->pid != 0 && gs_signal_send(proc->pid, SIGTERM) < 0) { + /* Just a warning to allow multiple callers */ + ereport(WARNING, (errmsg("could not send signal to process %lu: %m", proc->pid))); + } + proc = (PGPROC *)proc->links.prev; + } +} + +void CancelConflictLockHolder(PROCLOCK *proclock, LOCK *lock, LockMethod lockMethodTable, LOCKMODE lockmode) +{ + SHM_QUEUE *otherProcLocks = &(lock->procLocks); + PROCLOCK *otherProcLock = (PROCLOCK *)SHMQueueNext(otherProcLocks, otherProcLocks, offsetof(PROCLOCK, lockLink)); + PGPROC *leader1 = (t_thrd.proc->lockGroupLeader == NULL) ? t_thrd.proc : t_thrd.proc->lockGroupLeader; + + /* do nothing if we are not in lock cancle mode */ + if (!t_thrd.xact_cxt.enable_lock_cancel || lockmode < ExclusiveLock) { + return; + } + while (otherProcLock != NULL) { + if (otherProcLock->tag.myProc->pid != t_thrd.proc->pid && otherProcLock->tag.myProc->pid != 0) { + bool conflictLocks = ((lockMethodTable->conflictTab[lockmode] & otherProcLock->holdMask) != 0); + PGPROC *leader2 = (otherProcLock->tag.myProc->lockGroupLeader == NULL) ? + otherProcLock->tag.myProc : otherProcLock->tag.myProc->lockGroupLeader; + bool isSameTrans = ((StreamTopConsumerAmI() || StreamThreadAmI()) && + IsInSameTransaction(otherProcLock->tag.myProc, t_thrd.proc)) || (leader1 == leader2); + /* send term to holder proc while conflict and not in a stream or lock group */ + if (conflictLocks && !isSameTrans && !IsPrepareXact(otherProcLock->tag.myProc) && + otherProcLock->tag.myProc->pid != 0 && gs_signal_send(otherProcLock->tag.myProc->pid, SIGTERM) < 0) { + /* Just a warning to allow multiple callers */ + ereport(WARNING, + (errmsg("could not send signal to process %lu: %m", otherProcLock->tag.myProc->pid))); + } + } + /* otherProcLock iterate to next. */ + otherProcLock = + (PROCLOCK *)SHMQueueNext(otherProcLocks, &otherProcLock->lockLink, offsetof(PROCLOCK, lockLink)); + } +} + /* * LockAcquireExtendedXC - additional parameter only_increment. This is XC * specific. Check comments for the function LockIncrementIfExists() */ static LockAcquireResult LockAcquireExtendedXC(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, bool only_increment, - bool allow_con_update) + bool allow_con_update, int waitSec) { LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid; LockMethod lockMethodTable; @@ -670,7 +657,7 @@ static LockAcquireResult LockAcquireExtendedXC(const LOCKTAG *locktag, LOCKMODE LOCK *lock = NULL; PROCLOCK *proclock = NULL; bool found = false; - ResourceOwner owner; + ResourceOwner owner = NULL; uint32 hashcode; LWLock *partitionLock = NULL; int status; @@ -696,10 +683,9 @@ static LockAcquireResult LockAcquireExtendedXC(const LOCKTAG *locktag, LOCKMODE instr_stmt_report_lock(LOCK_START, lockmode, locktag); /* Identify owner for lock */ - if (sessionLock) - owner = NULL; - else + if (!sessionLock) { owner = t_thrd.utils_cxt.CurrentResourceOwner; + } /* * Find or create a LOCALLOCK entry for this lock and lockmode @@ -805,7 +791,7 @@ static LockAcquireResult LockAcquireExtendedXC(const LOCKTAG *locktag, LOCKMODE * for now we don't worry about that case either. */ if (EligibleForRelationFastPath(locktag, lockmode) && - t_thrd.storage_cxt.FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND) { + (uint32)t_thrd.storage_cxt.FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND) { uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode); bool acquired = false; @@ -999,6 +985,10 @@ static LockAcquireResult LockAcquireExtendedXC(const LOCKTAG *locktag, LOCKMODE if (status == STATUS_FOUND_NEED_CANCEL) { CancelBlockedRedistWorker(lock, lockmode); u_sess->catalog_cxt.redistribution_cancelable = false; + } else { + /* do blocker cancle if needed */ + CancelConflictLockWaiter(proclock, lock, lockMethodTable, lockmode); + CancelConflictLockHolder(proclock, lock, lockMethodTable, lockmode); } /* @@ -1012,7 +1002,7 @@ static LockAcquireResult LockAcquireExtendedXC(const LOCKTAG *locktag, LOCKMODE TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1, locktag->locktag_field2, locktag->locktag_field3, locktag->locktag_field4, locktag->locktag_type, lockmode); - WaitOnLock(locallock, owner, allow_con_update); + WaitOnLock(locallock, owner, allow_con_update, waitSec); TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1, locktag->locktag_field2, locktag->locktag_field3, locktag->locktag_field4, locktag->locktag_type, lockmode); @@ -1254,23 +1244,12 @@ static void RemoveLocalLock(LOCALLOCK *locallock) ereport(WARNING, (errmsg("locallock table corrupted"))); } -/* - * when query run as stream mode, the topConsumer and producer thread hold differnt - * Procs, but we treat them as one transaction - */ -bool inline IsInSameTransaction(PGPROC *proc1, PGPROC *proc2) -{ - return u_sess->stream_cxt.global_obj == NULL ? false - : u_sess->stream_cxt.global_obj->inNodeGroup(proc1->pid, proc2->pid); -} - bool inline IsInSameLockGroup(const PROCLOCK *proclock1, const PROCLOCK *proclock2) { Assert(proclock1->groupLeader != t_thrd.proc || t_thrd.proc->lockGroupLeader != NULL); return proclock1 != proclock2 && proclock1->groupLeader == proclock2->groupLeader; } - /* * LockCheckConflicts -- test whether requested lock conflicts * with those already granted @@ -1691,7 +1670,7 @@ static void ReportWaitLockInfo(const LOCALLOCK *locallock) * * The appropriate partition lock must be held at entry. */ -static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner, bool allow_con_update) +static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner, bool allow_con_update, int waitSec) { LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock); LockMethod lockMethodTable = LockMethods[lockmethodid]; @@ -1741,7 +1720,7 @@ static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner, bool allow_con */ PG_TRY(); { - if (ProcSleep(locallock, lockMethodTable, allow_con_update) != STATUS_OK) { + if (ProcSleep(locallock, lockMethodTable, allow_con_update, waitSec) != STATUS_OK) { /* * We failed as a result of a deadlock, see CheckDeadLock(). Quit * now. @@ -2281,7 +2260,7 @@ void Check_FastpathBit() } /* reset fastpath bit num and use count, also report leak */ t_thrd.storage_cxt.FastPathLocalUseCount = 0; - t_thrd.proc->fpLockBits = 0; + FAST_PATH_SET_LOCKBITS_ZERO(t_thrd.proc); if (leaked == true) ereport(WARNING, (errmsg("Fast path bit num leak."))); } @@ -3284,7 +3263,6 @@ LockData *GetLockStatusData(void) for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f) { LockInstanceData *instance = NULL; uint32 lockbits = FAST_PATH_GET_BITS(proc, f); - /* Skip unallocated slots. */ if (!lockbits) continue; diff --git a/src/gausskernel/storage/lmgr/lwlock.cpp b/src/gausskernel/storage/lmgr/lwlock.cpp index 726f94b82..225d50c5b 100644 --- a/src/gausskernel/storage/lmgr/lwlock.cpp +++ b/src/gausskernel/storage/lmgr/lwlock.cpp @@ -97,6 +97,10 @@ #include "instruments/instr_statement.h" #include "tsan_annotation.h" +#ifndef MAX +#define MAX(A, B) ((B) > (A) ? (B) : (A)) +#endif + #define LW_FLAG_HAS_WAITERS ((uint32)1 << 30) #define LW_FLAG_RELEASE_OK ((uint32)1 << 29) #define LW_FLAG_LOCKED ((uint32)1 << 28) @@ -140,7 +144,7 @@ static const char *BuiltinTrancheNames[] = { "UniqueSQLMappingLock", "InstrUserLockId", "GPCMappingLock", - "UspagrpMappingLock", + "UspagrpMappingLock", "ProcXactMappingLock", "ASPMappingLock", "GlobalSeqLock", @@ -184,7 +188,12 @@ static const char *BuiltinTrancheNames[] = { "SegmentHeadPartitionLock", "TwoPhaseStatePartLock", "RoleIdPartLock", - "ReplicationOriginLock" + "PgwrSyncQueueLock", + "BarrierHashTblLock", + "PageRepairHashTblLock", + "FileRepairHashTblLock", + "ReplicationOriginLock", + "AuditIndextblLock" }; static void RegisterLWLockTranches(void); @@ -340,6 +349,12 @@ static lwlock_stats *get_lwlock_stats_entry(LWLock *lock) int NumLWLocks(void) { int numLocks; + uint32 maxConn = g_instance.attr.attr_network.MaxConnections; + uint32 maxThreadNum = 0; + if (ENABLE_THREAD_POOL) { + maxThreadNum = g_threadPoolControler->GetThreadNum(); + } + uint32 numLockFactor = 4; /* * Possibly this logic should be spread out among the affected modules, @@ -354,7 +369,7 @@ int NumLWLocks(void) numLocks += 2 * TOTAL_BUFFER_NUM; /* each zone owns undo space lock */ - numLocks += g_instance.attr.attr_storage.undo_zone_count * UNDO_ZONE_LOCK; + numLocks += MAX(maxConn, maxThreadNum) * numLockFactor * UNDO_ZONE_LOCK; /* cucache_mgr.cpp CU Cache calculates its own requirements */ numLocks += DataCacheMgrNumLocks(); @@ -406,6 +421,14 @@ int NumLWLocks(void) /* for unlink rel hashtbl, one is for all fork relation hashtable, one is for one fork relation hash table */ numLocks += 2; + /* for incre ckpt sync request queue lock */ + numLocks +=1; + /* for page repair hash table and file repair hash table */ + numLocks += 2; + + /* for barrier preparse hashtbl */ + numLocks += 1; + /* * Add any requested by loadable modules; for backwards-compatibility * reasons, allocate at least NUM_USER_DEFINED_LWLOCKS of them even if @@ -1844,9 +1867,19 @@ void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val) */ void LWLockReleaseAll(void) { - while (t_thrd.storage_cxt.num_held_lwlocks > 0) { + int index = t_thrd.storage_cxt.num_held_lwlocks - 1; + while (index >= 0) { + // SwitchoverLockHolder never release switchover lock in LWLockReleaseAll + if (t_thrd.storage_cxt.isSwitchoverLockHolder && (g_instance.archive_obs_cxt.in_switchover || + g_instance.streaming_dr_cxt.isInSwitchover) && + (t_thrd.storage_cxt.held_lwlocks[index].lock == HadrSwitchoverLock)) { + index--; + continue; + } HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */ - LWLockRelease(t_thrd.storage_cxt.held_lwlocks[t_thrd.storage_cxt.num_held_lwlocks - 1].lock); + + LWLockRelease(t_thrd.storage_cxt.held_lwlocks[index].lock); + index--; } } diff --git a/src/gausskernel/storage/lmgr/lwlocknames.txt b/src/gausskernel/storage/lmgr/lwlocknames.txt index fbf2ce91b..a95d116a4 100755 --- a/src/gausskernel/storage/lmgr/lwlocknames.txt +++ b/src/gausskernel/storage/lmgr/lwlocknames.txt @@ -122,5 +122,13 @@ TDEKeyCacheLock 112 RoleIdLock 113 UniqueSqlEvictLock 114 InstrStmtTrackCtlLock 115 -ReplicationOriginLock 116 -LogicalRepWorkerLock 117 +HashUidLock 116 +ParallelDecodeLock 117 +XLogMaxCSNLock 118 +DisasterCacheLock 119 +MaxCSNArrayLock 120 +RepairBadBlockStatHashLock 121 +ReplicationOriginLock 122 +LogicalRepWorkerLock 123 +XGBoostLibLock 124 +NgroupDestoryLock 125 diff --git a/src/gausskernel/storage/lmgr/proc.cpp b/src/gausskernel/storage/lmgr/proc.cpp index 9ca479fec..604e269e5 100755 --- a/src/gausskernel/storage/lmgr/proc.cpp +++ b/src/gausskernel/storage/lmgr/proc.cpp @@ -75,6 +75,7 @@ #include "instruments/instr_statement.h" #include "utils/builtins.h" #include "instruments/ash.h" +#include "pgaudit.h" #ifdef ENABLE_MULTIPLE_NODES #include "tsdb/compaction/compaction_worker_entry.h" #endif /* ENABLE_MULTIPLE_NODES */ @@ -90,7 +91,6 @@ static bool CheckStatementTimeout(void); static void CheckSessionTimeout(void); static bool CheckStandbyTimeout(void); -extern void ResetGtmHandleXmin(GTM_TransactionKey txnKey); static void FiniNuma(int code, Datum arg); static inline void ReleaseChildSlot(void); @@ -207,6 +207,18 @@ static void FiniNuma(int code, Datum arg) #endif } +int GetThreadPoolStreamProcNum() +{ + int thread_pool_stream_thread_num = g_threadPoolControler->GetStreamThreadNum(); + float thread_pool_stream_proc_ratio = g_threadPoolControler->GetStreamProcRatio(); + int thread_pool_stream_proc_num = thread_pool_stream_thread_num * thread_pool_stream_proc_ratio; + if (thread_pool_stream_proc_num == 0) { + int thread_pool_thread_num = g_threadPoolControler->GetThreadNum(); + thread_pool_stream_proc_num = DEFAULT_THREAD_POOL_STREAM_PROC_RATIO * thread_pool_thread_num; + } + return thread_pool_stream_proc_num; +} + /* * InitProcGlobal - * Initialize the global process table during postmaster or standalone @@ -267,6 +279,7 @@ void InitProcGlobal(void) g_instance.proc_base->walwriterLatch = NULL; g_instance.proc_base->walwriterauxiliaryLatch = NULL; g_instance.proc_base->checkpointerLatch = NULL; + g_instance.proc_base->pgwrMainThreadLatch = NULL; g_instance.proc_base->bgworkerFreeProcs = NULL; g_instance.proc_base->cbmwriterLatch = NULL; g_instance.proc_base->ShareStoragexlogCopyerLatch = NULL; @@ -284,7 +297,10 @@ void InitProcGlobal(void) int nNumaNodes = g_instance.shmem_cxt.numaNodeNum; /* since myProcLocks is a various array, need palloc actrual size */ - Size actrualPgProcSize = offsetof(PGPROC, myProcLocks) + NUM_LOCK_PARTITIONS * sizeof(SHM_QUEUE); + Size actrualPgProcSize = MAXALIGN(offsetof(PGPROC, myProcLocks) + NUM_LOCK_PARTITIONS * sizeof(SHM_QUEUE)) + + MAXALIGN(FP_LOCKBIT_NUM * sizeof(uint64)) + MAXALIGN(FP_LOCK_SLOTS_PER_BACKEND * sizeof(FastPathTag)); + Size fpLockBitsOffset = MAXALIGN(offsetof(PGPROC, myProcLocks) + NUM_LOCK_PARTITIONS * sizeof(SHM_QUEUE)); + Size fpRelIdOffset = fpLockBitsOffset + MAXALIGN(FP_LOCKBIT_NUM * sizeof(uint64)); #ifdef __USE_NUMA if (nNumaNodes > 1) { ereport(INFO, (errmsg("InitProcGlobal nNumaNodes: %d, inheritThreadPool: %d, groupNum: %d", @@ -356,6 +372,12 @@ void InitProcGlobal(void) securec_check(rc, "", ""); } + int thread_pool_stream_proc_num = 0; + if (g_threadPoolControler != NULL) { + thread_pool_stream_proc_num = GetThreadPoolStreamProcNum(); + ereport(LOG, (errmsg("Get stream thread proc num [%d].", thread_pool_stream_proc_num))); + } + for (i = 0; (unsigned int)(i) < TotalProcs; i++) { /* Common initialization for all PGPROCs, regardless of type. * @@ -386,21 +408,21 @@ void InitProcGlobal(void) * search. PGPROCs for prepared transactions are added to a free list * by TwoPhaseShmemInit(). */ - if (i < g_instance.shmem_cxt.MaxConnections + AUXILIARY_BACKENDS) { + if (i < g_instance.shmem_cxt.MaxConnections + thread_pool_stream_proc_num + AUXILIARY_BACKENDS) { /* PGPROC for normal backend and auxiliary backend, add to freeProcs list */ procs[i]->links.next = (SHM_QUEUE *)g_instance.proc_base->freeProcs; g_instance.proc_base->freeProcs = procs[i]; - } else if (i < g_instance.shmem_cxt.MaxConnections + AUXILIARY_BACKENDS + + } else if (i < g_instance.shmem_cxt.MaxConnections + thread_pool_stream_proc_num + AUXILIARY_BACKENDS + g_instance.attr.attr_sql.job_queue_processes + 1) { /* PGPROC for pg_job backend, add to pgjobfreeProcs list, 1 for Job Schedule Lancher */ procs[i]->links.next = (SHM_QUEUE *)g_instance.proc_base->pgjobfreeProcs; g_instance.proc_base->pgjobfreeProcs = procs[i]; - } else if (i < g_instance.shmem_cxt.MaxConnections + AUXILIARY_BACKENDS + + } else if (i < g_instance.shmem_cxt.MaxConnections + thread_pool_stream_proc_num + AUXILIARY_BACKENDS + g_instance.attr.attr_sql.job_queue_processes + 1 + NUM_DCF_CALLBACK_PROCS) { /* PGPROC for external thread, add to externalFreeProcs list */ procs[i]->links.next = (SHM_QUEUE *)g_instance.proc_base->externalFreeProcs; g_instance.proc_base->externalFreeProcs = procs[i]; - } else if (i < g_instance.shmem_cxt.MaxConnections + AUXILIARY_BACKENDS + + } else if (i < g_instance.shmem_cxt.MaxConnections + thread_pool_stream_proc_num + AUXILIARY_BACKENDS + g_instance.attr.attr_sql.job_queue_processes + 1 + NUM_DCF_CALLBACK_PROCS + NUM_CMAGENT_PROCS) { /* * This pointer indicates the first position of cm anget's procs. @@ -409,7 +431,7 @@ void InitProcGlobal(void) */ procs[i]->links.next = (SHM_QUEUE*)g_instance.proc_base->cmAgentFreeProcs; g_instance.proc_base->cmAgentFreeProcs = procs[i]; - } else if (i < g_instance.shmem_cxt.MaxConnections + AUXILIARY_BACKENDS + + } else if (i < g_instance.shmem_cxt.MaxConnections + thread_pool_stream_proc_num + AUXILIARY_BACKENDS + g_instance.attr.attr_sql.job_queue_processes + 1 + NUM_CMAGENT_PROCS + g_max_worker_processes + NUM_DCF_CALLBACK_PROCS) { procs[i]->links.next = (SHM_QUEUE*)g_instance.proc_base->bgworkerFreeProcs; @@ -428,6 +450,9 @@ void InitProcGlobal(void) SHMQueueInit(&(procs[i]->myProcLocks[j])); procs[i]->logictid = i; + /* Initialize fast path slots memory */ + procs[i]->fpLockBits = (uint64*)((char*)procs[i] + fpLockBitsOffset); + procs[i]->fpRelId = (FastPathTag*)((char*)procs[i] + fpRelIdOffset); /* Initialize lockGroupMembers list. */ dlist_init(&procs[i]->lockGroupMembers); @@ -482,14 +507,99 @@ PGPROC *GetFreeProc() return current; } +/* + * If no free proc is available for cm_agent, print all thread status information. + * Information includes application name, pid, sessionid, wait status, etc. + */ +void PgStatCMAThreadStatus() +{ + const char* appName = "cm_agent"; + MemoryContext oldContext = MemoryContextSwitchTo(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_EXECUTOR)); + /* get all threads from global status entries which name is 'cm_agent' */ + PgBackendStatusNode* result = pgstat_get_backend_status_by_appname(appName, NULL); + + if (result == NULL) { + (void)MemoryContextSwitchTo(oldContext); + return; + } + + PgBackendStatusNode* tempNode = result; + tempNode = tempNode->next; + + while (tempNode != NULL) { + PgBackendStatus* beentry = tempNode->data; + tempNode = tempNode->next; + if (beentry == NULL) { + continue; + } + + char* wait_status = getThreadWaitStatusDesc(beentry); + ereport(LOG, (errmsg("Print cm_agent thread information when proc is going to be not available, node_name<%s>," + " datid<%u>, app_name<%s>, query_id<%lu>, tid<%lu>, lwtid<%d>, parent_sessionid<%lu>, " + "thread_level<%d>, wait_status<%s>", + g_instance.attr.attr_common.PGXCNodeName, + beentry->st_databaseid, + beentry->st_appname ? beentry->st_appname : "unnamed thread", + beentry->st_queryid, + beentry->st_procpid, + beentry->st_tid, + beentry->st_parent_sessionid, + beentry->st_thread_level, + wait_status))); + + pfree_ext(wait_status); + } + + /* Free node list memory */ + FreeBackendStatusNodeMemory(result); + (void)MemoryContextSwitchTo(oldContext); +} + /* * GetFreeCMAgentProc -- try to find the first free CM Agent's PGPROC */ PGPROC* GetFreeCMAgentProc() { - PGPROC* current = g_instance.proc_base->cmAgentFreeProcs; - ereport(DEBUG5, (errmsg("Get free proc from CMA-proc list."))); - return current; + /* sleep 100ms every time while proc has not init */ + const int sleepTime = 100000; + const int maxRepeatTimes = 10; + /* Set threshold value. If proc consumed number is more than threshold, send single to print thread stack */ + const float procThreshold = 0.8; + uint32 procWarningCount = NUM_CMAGENT_PROCS * procThreshold; + int times = 0; + while ((g_instance.conn_cxt.CurCMAProcCount == 0) && (g_instance.proc_base->cmAgentFreeProcs == NULL)) { + times++; + pg_usleep(sleepTime); + /* Check interrupt in order to reveive cancel signal and break loop */ + CHECK_FOR_INTERRUPTS(); + ereport(WARNING, (errmsg("CMA-proc list has not init completely, current %d times, total %d us period", + times, times * sleepTime))); + if (times >= maxRepeatTimes) { + break; + } + } + + PGPROC* cmaProc = g_instance.proc_base->cmAgentFreeProcs; + + if (cmaProc != NULL) { + (void)pg_atomic_add_fetch_u32(&g_instance.conn_cxt.CurCMAProcCount, 1); + SpinLockRelease(&g_instance.conn_cxt.ConnCountLock); + ereport(DEBUG5, (errmsg("Get free proc from CMA-proc list, proc location is %p. Current proc count %d", + cmaProc, g_instance.conn_cxt.CurCMAProcCount))); + } + + /* + * If proc consumed number for cm_agent is more than threshold, print all threads wait status which appname + * equals to 'cm_agent' + */ + if (g_instance.conn_cxt.CurCMAProcCount >= procWarningCount) { + ereport(WARNING, (errmsg("Get free proc from CMA-proc list, proc location is %p." + " Current proc count %d is more than threshold %d. Ready to print thread wait status", + cmaProc, g_instance.conn_cxt.CurCMAProcCount, procWarningCount))); + PgStatCMAThreadStatus(); + } + + return cmaProc; } /* Relase child slot in some cases, other role will release slot in CleanupBackend */ @@ -505,6 +615,27 @@ static inline void ReleaseChildSlot(void) } } +static void GetProcFromFreeList() +{ + if (IsAnyAutoVacuumProcess()) { + t_thrd.proc = g_instance.proc_base->autovacFreeProcs; + } else if (IsJobSchedulerProcess() || IsJobWorkerProcess()) { + t_thrd.proc = g_instance.proc_base->pgjobfreeProcs; + } else if (IsBgWorkerProcess()) { + t_thrd.proc = g_instance.proc_base->bgworkerFreeProcs; + } else if (t_thrd.dcf_cxt.is_dcf_thread) { + t_thrd.proc = g_instance.proc_base->externalFreeProcs; + } else if (u_sess->libpq_cxt.IsConnFromCmAgent) { + t_thrd.proc = GetFreeCMAgentProc(); + } else { +#ifndef __USE_NUMA + t_thrd.proc = g_instance.proc_base->freeProcs; +#else + t_thrd.proc = GetFreeProc(); +#endif + } +} + /* * InitProcess -- initialize a per-process data structure for this backend */ @@ -542,23 +673,7 @@ void InitProcess(void) set_spins_per_delay(g_instance.proc_base->spins_per_delay); #endif - if (IsAnyAutoVacuumProcess()) { - t_thrd.proc = g_instance.proc_base->autovacFreeProcs; - } else if (IsJobSchedulerProcess() || IsJobWorkerProcess()) { - t_thrd.proc = g_instance.proc_base->pgjobfreeProcs; - } else if (IsBgWorkerProcess()) { - t_thrd.proc = g_instance.proc_base->bgworkerFreeProcs; - } else if (t_thrd.dcf_cxt.is_dcf_thread) { - t_thrd.proc = g_instance.proc_base->externalFreeProcs; - } else if (u_sess->libpq_cxt.IsConnFromCmAgent) { - t_thrd.proc = GetFreeCMAgentProc(); - } else { -#ifndef __USE_NUMA - t_thrd.proc = g_instance.proc_base->freeProcs; -#else - t_thrd.proc = GetFreeProc(); -#endif - } + GetProcFromFreeList(); if (t_thrd.proc != NULL) { t_thrd.myLogicTid = t_thrd.proc->logictid; @@ -600,11 +715,19 @@ void InitProcess(void) */ ReleaseChildSlot(); + char cmaConnNumInfo[CONNINFOLEN]; + if (u_sess->libpq_cxt.IsConnFromCmAgent) { + int rc = sprintf_s(cmaConnNumInfo, CONNINFOLEN, "All CMA proc [%d], uses[%d];", + NUM_CMAGENT_PROCS, g_instance.conn_cxt.CurCMAProcCount); + securec_check_ss(rc, "\0", "\0"); + } + ereport(FATAL, (errcode(ERRCODE_TOO_MANY_CONNECTIONS), errmsg("No free proc is available to create a new connection for %s. Please check whether the IP " - "address and port are available, and the CN/DN process can be connected", - u_sess->proc_cxt.applicationName))); + "address and port are available, and the CN/DN process can be connected. %s", + u_sess->proc_cxt.applicationName, + (u_sess->libpq_cxt.IsConnFromCmAgent) ? cmaConnNumInfo : ""))); } #ifdef __USE_NUMA @@ -648,13 +771,14 @@ void InitProcess(void) t_thrd.proc->lxid = InvalidLocalTransactionId; t_thrd.proc->fpVXIDLock = false; t_thrd.proc->fpLocalTransactionId = InvalidLocalTransactionId; - t_thrd.proc->fpLockBits = 0; + FAST_PATH_SET_LOCKBITS_ZERO(t_thrd.proc); t_thrd.proc->commitCSN = 0; t_thrd.pgxact->handle = InvalidTransactionHandle; t_thrd.pgxact->xid = InvalidTransactionId; t_thrd.pgxact->next_xid = InvalidTransactionId; t_thrd.pgxact->xmin = InvalidTransactionId; t_thrd.pgxact->csn_min = InvalidCommitSeqNo; + t_thrd.pgxact->csn_dr = InvalidCommitSeqNo; t_thrd.pgxact->prepare_xid = InvalidTransactionId; t_thrd.proc->pid = t_thrd.proc_cxt.MyProcPid; /* if enable thread pool, session id will be overwritten at coupling session */ @@ -892,6 +1016,7 @@ void InitAuxiliaryProcess(void) t_thrd.pgxact->next_xid = InvalidTransactionId; t_thrd.pgxact->xmin = InvalidTransactionId; t_thrd.pgxact->csn_min = InvalidCommitSeqNo; + t_thrd.pgxact->csn_dr = InvalidCommitSeqNo; t_thrd.proc->backendId = InvalidBackendId; t_thrd.proc->databaseId = InvalidOid; t_thrd.proc->roleId = InvalidOid; @@ -1147,6 +1272,9 @@ static void ProcPutBackToFreeList() } else if (u_sess->libpq_cxt.IsConnFromCmAgent) { t_thrd.proc->links.next = (SHM_QUEUE*)g_instance.proc_base->cmAgentFreeProcs; g_instance.proc_base->cmAgentFreeProcs = t_thrd.proc; + (void)pg_atomic_sub_fetch_u32(&g_instance.conn_cxt.CurCMAProcCount, 1); + ereport(DEBUG5, (errmsg("Proc exit, put cm_agent to free list, current cm_agent proc count is %d", + g_instance.conn_cxt.CurCMAProcCount))); if (u_sess->proc_cxt.PassConnLimit) { SpinLockAcquire(&g_instance.conn_cxt.ConnCountLock); g_instance.conn_cxt.CurCMAConnCount--; @@ -1249,6 +1377,8 @@ static void ProcKill(int code, Datum arg) (errcode(ERRCODE_LOCK_NOT_AVAILABLE), errmsg("failed to release mutex lock for deleMemContextMutex."))); clean_proc_dw_buf(); + /* Clean subxid cache if needed. */ + ProcSubXidCacheClean(); pthread_mutex_lock(&g_instance.proc_base_lock); /* @@ -1269,8 +1399,6 @@ static void ProcKill(int code, Datum arg) errno_t rc = memset_s(t_thrd.proc->myProgName, sizeof(t_thrd.proc->myProgName), 0, sizeof(t_thrd.proc->myProgName)); securec_check(rc, "", ""); - /* Clean subxid cache if needed. */ - ProcSubXidCacheClean(); /* PGPROC struct isn't mine anymore */ t_thrd.proc = NULL; @@ -1519,7 +1647,7 @@ void CancelBlockedRedistWorker(LOCK* lock, LOCKMODE lockmode) * P() on the semaphore should put us to sleep. The process * semaphore is normally zero, so when we try to acquire it, we sleep. */ -int ProcSleep(LOCALLOCK* locallock, LockMethod lockMethodTable, bool allow_con_update) +int ProcSleep(LOCALLOCK* locallock, LockMethod lockMethodTable, bool allow_con_update, int waitSec) { LOCKMODE lockmode = locallock->tag.mode; LOCK* lock = locallock->lock; @@ -1861,7 +1989,11 @@ int ProcSleep(LOCALLOCK* locallock, LockMethod lockMethodTable, bool allow_con_u /* ereport when we reach lock wait timeout to avoid distributed deadlock. */ if (t_thrd.storage_cxt.deadlock_state == DS_LOCK_TIMEOUT) { - ereport(ERROR, (errcode(ERRCODE_LOCK_WAIT_TIMEOUT), + if (waitSec > 0) { + ereport(ERROR, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), + errmsg("could not obtain lock on row in relation,waitSec = %d", waitSec))); + } else { + ereport(ERROR, (errcode(ERRCODE_LOCK_WAIT_TIMEOUT), (errmsg("Lock wait timeout: thread %lu on node %s waiting for %s on %s after %ld.%03d ms", t_thrd.proc_cxt.MyProcPid, g_instance.attr.attr_common.PGXCNodeName, modename, buf.data, msecs, usecs), @@ -1873,6 +2005,7 @@ int ProcSleep(LOCALLOCK* locallock, LockMethod lockMethodTable, bool allow_con_u pgstat_get_backend_current_activity(t_thrd.storage_cxt.conflicting_lock_thread_id, false), t_thrd.storage_cxt.conflicting_lock_by_holdlock ? " hold " : " requested ", t_thrd.storage_cxt.conflicting_lock_mode_name)))); + } } /* @@ -1889,12 +2022,15 @@ int ProcSleep(LOCALLOCK* locallock, LockMethod lockMethodTable, bool allow_con_u * Set timer so we can wake up after awhile and check for a lock acquire * time out. If time out, ereport and abort current transaction. */ + int needWaitTime = Max(1000, (allow_con_update ? u_sess->attr.attr_storage.LockWaitUpdateTimeout : + u_sess->attr.attr_storage.LockWaitTimeout) - u_sess->attr.attr_storage.DeadlockTimeout); + if (waitSec > 0) { + needWaitTime =Max(1, (waitSec * 1000) - u_sess->attr.attr_storage.DeadlockTimeout); + } + if (myWaitStatus == STATUS_WAITING && u_sess->attr.attr_storage.LockWaitTimeout > 0 && t_thrd.storage_cxt.deadlock_timeout_active == false) { - if (!enable_lockwait_sig_alarm( - Max(1000, (allow_con_update ? u_sess->attr.attr_storage.LockWaitUpdateTimeout : - u_sess->attr.attr_storage.LockWaitTimeout) - - u_sess->attr.attr_storage.DeadlockTimeout))) { + if (!enable_lockwait_sig_alarm(needWaitTime)) { ereport(FATAL, (errcode(ERRCODE_SYSTEM_ERROR), errmsg("could not set timer for process wakeup"))); } } @@ -2377,11 +2513,10 @@ bool enable_session_sig_alarm(int delayms) return true; } /* disable session timeout */ - if (u_sess->attr.attr_common.SessionTimeout == 0 && IS_SINGLE_NODE) { + if (u_sess->attr.attr_common.SessionTimeout == 0) { return true; } - /* if disable sessiontimeout in cluster, we reset gtm xmin per 10 min when session is idle */ - int delay_time_ms = u_sess->attr.attr_common.SessionTimeout ? delayms : 600 * 1000; + int delay_time_ms = delayms; /* Set session_timeout flag true. */ u_sess->storage_cxt.session_timeout_active = true; @@ -2426,13 +2561,7 @@ bool disable_session_sig_alarm(void) /* already disable */ if (!u_sess->attr.attr_common.SessionTimeout && \ - (!u_sess->storage_cxt.session_timeout_active || IS_SINGLE_NODE)) { -#ifdef ENABLE_MULTIPLE_NODES - /* has already try to reset gtm/single_node xmin, log here to avoid to - * interrupt atomic memory opertion in readcomamd. - */ - ereport(LOG, (errmsg("reset handle xmin."))); -#endif + (!u_sess->storage_cxt.session_timeout_active)) { return true; } /* @@ -2609,13 +2738,6 @@ static void CheckSessionTimeout(void) ereport(WARNING, (errmsg("Session unused timeout."))); (void)gs_signal_canceltimer(); (void)gs_signal_send(t_thrd.proc_cxt.MyProcPid, SIGTERM); - } else { - /* cannot do any log here, memory operation is dangerous */ - if (gs_signal_canceltimer()) - ereport(FATAL, - (errcode(ERRCODE_SYSTEM_ERROR), - errmsg("could not disable timer for reset txn xmin in Read Committed Mode."))); - ResetGtmHandleXmin(GetCurrentTransactionKeyIfAny()); } } } diff --git a/src/gausskernel/storage/mot/core/Makefile.local b/src/gausskernel/storage/mot/core/Makefile.local index 5cdaaff10..d69fa3750 100644 --- a/src/gausskernel/storage/mot/core/Makefile.local +++ b/src/gausskernel/storage/mot/core/Makefile.local @@ -158,7 +158,7 @@ DEPS := $(OBJS:.o=.d) ifeq ($(UNDERPG), yes) ifeq ($(SECURE), yes) CFLAGS += -DMOT_SECURE - LDFLAGS += -L$(LIBOBS_LIB_PATH) -lsecurec + LDFLAGS += -L$(LIBOBS_LIB_PATH) -l$(SECURE_C_CHECK) endif endif diff --git a/src/gausskernel/storage/mot/jit_exec/jit_llvm.cpp b/src/gausskernel/storage/mot/jit_exec/jit_llvm.cpp index 838cf77b4..cb7482926 100644 --- a/src/gausskernel/storage/mot/jit_exec/jit_llvm.cpp +++ b/src/gausskernel/storage/mot/jit_exec/jit_llvm.cpp @@ -33,9 +33,11 @@ #include "storage/mot/jit_exec.h" #include "jit_llvm.h" +#ifdef ENABLE_LLVM_COMPILE // for checking if LLVM_ENABLE_DUMP is defined and for using LLVM_VERSION_STRING #include "llvm/Config/llvm-config.h" #include "llvm/Support/Host.h" +#endif extern bool GlobalCodeGenEnvironmentSuccess; diff --git a/src/gausskernel/storage/mot/jit_exec/jit_llvm_query_codegen.cpp b/src/gausskernel/storage/mot/jit_exec/jit_llvm_query_codegen.cpp index f6a65fdba..d931d30c2 100644 --- a/src/gausskernel/storage/mot/jit_exec/jit_llvm_query_codegen.cpp +++ b/src/gausskernel/storage/mot/jit_exec/jit_llvm_query_codegen.cpp @@ -35,8 +35,10 @@ #include "mot_error.h" #include "utilities.h" +#ifdef ENABLE_LLVM_COMPILE // for checking if LLVM_ENABLE_DUMP is defined and for using LLVM_VERSION_STRING #include "llvm/Config/llvm-config.h" +#endif namespace JitExec { DECLARE_LOGGER(JitLlvmQueryCodegen, JitExec) diff --git a/src/gausskernel/storage/page/Makefile b/src/gausskernel/storage/page/Makefile index d2b75b9a9..844720b5c 100644 --- a/src/gausskernel/storage/page/Makefile +++ b/src/gausskernel/storage/page/Makefile @@ -9,6 +9,6 @@ ifneq "$(MAKECMDGOALS)" "clean" endif endif endif -OBJS = bufpage.o checksum.o itemptr.o pagecompress.o checksum_impl.o +OBJS = bufpage.o checksum.o itemptr.o pagecompress.o checksum_impl.o pageparse.o gs_xlogdump.o include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/storage/page/bufpage.cpp b/src/gausskernel/storage/page/bufpage.cpp index 8e599e4b6..5cf27f813 100644 --- a/src/gausskernel/storage/page/bufpage.cpp +++ b/src/gausskernel/storage/page/bufpage.cpp @@ -145,7 +145,6 @@ bool PageHeaderIsValid(PageHeader page) /* Check normal case */ if (PageGetPageSize(page) == BLCKSZ && (PageGetPageLayoutVersion(page) == PG_COMM_PAGE_LAYOUT_VERSION || - PageGetPageLayoutVersion(page) == PG_PAGE_4B_LAYOUT_VERSION || PageGetPageLayoutVersion(page) == PG_HEAP_PAGE_LAYOUT_VERSION || PageGetPageLayoutVersion(page) == PG_SEGMENT_PAGE_LAYOUT_VERSION) && (page->pd_flags & ~PD_VALID_FLAG_BITS) == 0 && page->pd_lower >= headersize && @@ -269,42 +268,6 @@ Size PageGetExactFreeSpace(Page page) return (Size)(uint32)space; } - -static void upgrade_page_ver_4_to_5(Page page) -{ - HeapPageHeader phdr = (HeapPageHeader)page; - - PageSetPageSizeAndVersion(page, BLCKSZ, PG_HEAP_PAGE_LAYOUT_VERSION); - - phdr->pd_xid_base = 0; - phdr->pd_multi_base = 0; - ereport(DEBUG1, (errmsg("The page has been upgraded to version %d ", phdr->pd_pagesize_version))); - return; -} - -/* Upgrade the page from PG_PAGE_4B_LAYOUT_VERSION(4) to PG_PAGE_LAYOUT_VERSION(5) */ -void PageLocalUpgrade(Page page) -{ - PageHeader phdr = (PageHeader)page; - errno_t rc = EOK; - Size movesize = phdr->pd_lower - SizeOfPageHeaderData; - - Assert(PageIs4BXidVersion(page)); - - if (movesize > 0) { - rc = memmove_s((char*)page + SizeOfHeapPageHeaderData, - phdr->pd_upper - SizeOfHeapPageHeaderData, - (char*)page + SizeOfPageHeaderData, - phdr->pd_lower - SizeOfPageHeaderData); - - securec_check(rc, "", ""); - } - - /* Update PageHeaderInfo */ - phdr->pd_lower += SizeOfHeapPageUpgradeData; - upgrade_page_ver_4_to_5(page); -} - static inline void AllocPageCopyMem() { if (t_thrd.storage_cxt.pageCopy == NULL) { @@ -473,28 +436,3 @@ void PageSetChecksumInplace(Page page, BlockNumber blkno) ((PageHeader)page)->pd_checksum = pg_checksum_page((char*)page, blkno); } - -/* - * PageGetFreeSpaceForMultipleTuples - * Returns the size of the free (allocatable) space on a page, - * reduced by the space needed for multiple new line pointers. - * - * Note: this should usually only be used on index pages. Use - * PageGetHeapFreeSpace on heap pages. - */ -Size PageGetFreeSpaceForMultipleTuples(Page page, int ntups) -{ - int space; - - /* - * Use signed arithmetic here so that we behave sensibly if pd_lower > - * pd_upper. - */ - space = (int)((PageHeader)page)->pd_upper - (int)((PageHeader)page)->pd_lower; - - if (space < (int)(ntups * sizeof(ItemIdData))) - return 0; - space -= ntups * sizeof(ItemIdData); - - return (Size) space; -} diff --git a/src/gausskernel/storage/page/checksum_impl.cpp b/src/gausskernel/storage/page/checksum_impl.cpp index 43dcb4927..0d306ef2e 100644 --- a/src/gausskernel/storage/page/checksum_impl.cpp +++ b/src/gausskernel/storage/page/checksum_impl.cpp @@ -29,8 +29,10 @@ uint32 pg_checksum_block(char* data, uint32 size) uint32 result = 0; uint32 i, j; +#ifndef ROACH_COMMON /* ensure that the size is compatible with the algorithm */ Assert((size % (sizeof(uint32) * N_SUMS)) == 0); +#endif /* initialize partial checksums to their corresponding offsets */ for (j = 0; j < N_SUMS; j += 2) { diff --git a/src/gausskernel/storage/page/gs_xlogdump.cpp b/src/gausskernel/storage/page/gs_xlogdump.cpp new file mode 100644 index 000000000..3f3e92aa5 --- /dev/null +++ b/src/gausskernel/storage/page/gs_xlogdump.cpp @@ -0,0 +1,523 @@ +/* --------------------------------------------------------------------------------------- + * * + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * gs_xlogdump.cpp + * + * IDENTIFICATION + * src/gausskernel/storage/page/gs_xlogdump.cpp + * + * --------------------------------------------------------------------------------------- + */ + +#include +#include "c.h" +#include "access/tupdesc.h" +#include "access/xact.h" +#include "access/xlog_basic.h" +#include "access/xlogdefs.h" +#include "access/xlogreader.h" +#include "access/xlog_internal.h" +#include "catalog/catalog.h" +#include "lib/stringinfo.h" +#include "nodes/pg_list.h" +#include "postgres.h" +#include "storage/smgr/relfilenode.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/palloc.h" +#include "pageparse.h" + +typedef struct XLogPrivate { + const char *datadir; + TimeLineID tli; +} XLogPrivate; + +typedef struct XLogFilter { + TransactionId by_xid; + bool by_xid_enabled; + bool by_tablepath_enabled; + bool by_block; + RelFileNode by_relfilenode; + BlockNumber blocknum; +} XLogFilter; + +static void GenerateOutputFileName(char *outputFilename, char *start_lsn_str, char *end_lsn_str) +{ + List *elemlist = NIL; + SplitIdentifierString(start_lsn_str, '/', &elemlist); + char *start_lsn_str_p2 = (char *)lsecond(elemlist); + list_free_ext(elemlist); + SplitIdentifierString(end_lsn_str, '/', &elemlist); + char *end_lsn_str_p2 = (char *)lsecond(elemlist); + int rc = snprintf_s(outputFilename + (int)strlen(outputFilename), MAXFILENAME, MAXFILENAME - 1, "%s/%s_%s.xlog", + t_thrd.proc_cxt.DataDir, start_lsn_str_p2, end_lsn_str_p2); + securec_check_ss(rc, "\0", "\0"); +} + +static void ValidateLSN(char *lsn_str, XLogRecPtr *lsn_ptr) +{ + uint32 hi = 0; + uint32 lo = 0; + validate_xlog_location(lsn_str); + + if (sscanf_s(lsn_str, "%X/%X", &hi, &lo) != TWO) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("could not parse xlog location \"%s\"", lsn_str))); + *lsn_ptr = (((uint64)hi) << XIDTHIRTYTWO) | lo; +} + +static void ValidateStartEndLSN(char *start_lsn_str, char *end_lsn_str, XLogRecPtr *start_lsn, XLogRecPtr *end_lsn) +{ + ValidateLSN(start_lsn_str, start_lsn); + ValidateLSN(end_lsn_str, end_lsn); + + if (XLByteLT(*end_lsn, *start_lsn)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("start xlog location %X/%X should be smaller than or equal to end xlog location %X/%X", + (uint32)(*start_lsn >> XIDTHIRTYTWO), (uint32)(*start_lsn), (uint32)(*end_lsn >> XIDTHIRTYTWO), + (uint32)(*end_lsn)))); +} + +static XLogRecPtr GetMinLSN() +{ + XLogSegNo lastRemovedSegNo = XLogGetLastRemovedSegno(); + XLogRecPtr current_recptr = (lastRemovedSegNo + 1) * XLogSegSize; + return current_recptr; +} + +static XLogRecPtr GetMaxLSN() +{ + if (RecoveryInProgress()) + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("recovery is in progress"), + errhint("Can't get local max LSN during recovery."))); + XLogRecPtr current_recptr = GetXLogWriteRecPtr(); + return current_recptr; +} + +static void XLogDumpDisplayRecord(XLogReaderState *record, char *strOutput) +{ + errno_t rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, + "start_lsn: %X/%X \nend_lsn: %X/%X \nxid: " XID_FMT " \nterm: %u \ntotal length: %u \ndesc: %s - ", + (uint32)(record->ReadRecPtr >> XIDTHIRTYTWO), (uint32)record->ReadRecPtr, + (uint32)(record->EndRecPtr >> XIDTHIRTYTWO), (uint32)record->EndRecPtr, XLogRecGetXid(record), + XLogRecGetTerm(record), XLogRecGetTotalLen(record), RmgrTable[XLogRecGetRmid(record)].rm_name); + securec_check_ss(rc, "\0", "\0"); + StringInfoData buf; + initStringInfo(&buf); + RmgrTable[XLogRecGetRmid(record)].rm_desc(&buf, record); + rc = strcat_s(strOutput, MAXOUTPUTLEN, buf.data); + securec_check(rc, "\0", "\0"); + if (!XLogRecHasAnyBlockRefs(record)) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "\n\n"); + securec_check(rc, "\0", "\0"); + return; + } + + if (record->max_block_id >= 0) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "\nothers: "); + securec_check(rc, "\0", "\0"); + } + + for (int block_id = 0; block_id <= record->max_block_id; block_id++) { + if (!XLogRecHasBlockRef(record, block_id)) + continue; + /* Print format: others: rel %u/%u/%u/%d fork %s blk %u */ + RelFileNode rnode; + ForkNumber forknum; + BlockNumber blk; + if (!XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blk)) + continue; + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, "\n\trel %u/%u/%u", + rnode.spcNode, rnode.dbNode, rnode.relNode); + securec_check_ss(rc, "\0", "\0"); + if (IsBucketFileNode(rnode)) { /* check between InvalidBktId and SegmentBktId */ + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, + "/%d", rnode.bucketNode); + securec_check_ss(rc, "\0", "\0"); + } + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, ", fork %s", + forkNames[forknum]); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, ", blk %u", blk); + securec_check_ss(rc, "\0", "\0"); + /* others: lastlsn %X/%X" */ + XLogRecPtr lsn; + XLogRecGetBlockLastLsn(record, block_id, &lsn); + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, ", lastlsn %X/%X", + (uint32)(lsn >> XIDTHIRTYTWO), (uint32)lsn); + securec_check_ss(rc, "\0", "\0"); + } + rc = strcat_s(strOutput, MAXOUTPUTLEN, "\n\n"); + securec_check(rc, "\0", "\0"); +} + +void CheckOpenFile(FILE *outputfile, char *outputFilename) +{ + if (outputfile == NULL) + ereport(ERROR, (errcode(ERRCODE_FILE_READ_FAILED), (errmsg("Cannot read %s", outputFilename)))); +} + +void CheckWriteFile(int result, int cnt_len, char *outputFilename) +{ + if (result != cnt_len) + ereport(ERROR, (errcode(ERRCODE_FILE_WRITE_FAILED), (errmsg("Cannot write %s", outputFilename)))); +} + +void CheckCloseFile(int result, char *outputFilename) +{ + if (0 != result) + ereport(ERROR, (errcode(ERRCODE_IO_ERROR), (errmsg("Cannot close %s", outputFilename)))); +} + +static bool CheckValidRecord(XLogReaderState *xlogreader_state, XLogFilter *filter) +{ + bool found = false; + for (int i = 0; i <= xlogreader_state->max_block_id; i++) { + RelFileNode rnode; + ForkNumber forknum; + BlockNumber blk; + if (!XLogRecGetBlockTag(xlogreader_state, i, &rnode, &forknum, &blk)) + continue; + if (RelFileNodeEquals(rnode, filter->by_relfilenode)) + /* if equal to specific block or check all blocks, found = ture; */ + found = (!filter->by_block) || (blk == filter->blocknum); + if (found) { + return found; + } + } + return found; +} + +static XLogRecPtr UpdateNextLSN(XLogRecPtr cur_lsn, XLogRecPtr end_lsn, XLogReaderState *xlogreader_state, bool *found) +{ + XLogRecPtr next_record = InvalidXLogRecPtr; + for (int tryTimes = 0; tryTimes < FIVE; tryTimes++) { + XLogRecPtr start_lsn = Max(cur_lsn, (g_instance.comm_cxt.predo_cxt.redoPf.oldest_segment) * XLogSegSize); + next_record = XLogFindNextRecord(xlogreader_state, start_lsn); + if (!XLByteEQ(next_record, InvalidXLogRecPtr) && XLByteLT(next_record, end_lsn)) { + *found = true; + return next_record; + } + } + return next_record; +} + +static void XLogDump(XLogRecPtr start_lsn, XLogRecPtr end_lsn, XLogFilter *filter, char *outputFilename) +{ + /* start reading */ + errno_t rc = EOK; + XLogPrivate readprivate; + rc = memset_s(&readprivate, sizeof(XLogPrivate), 0, sizeof(XLogPrivate)); + securec_check_c(rc, "\0", "\0"); + readprivate.datadir = t_thrd.proc_cxt.DataDir; + readprivate.tli = 1; + XLogReaderState *xlogreader_state = XLogReaderAllocate(&SimpleXLogPageRead, &readprivate); + if (!xlogreader_state) + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + (errmsg("memory is temporarily unavailable while allocate xlog reader")))); + + /* get the first valid xlog record location */ + XLogRecPtr first_record = XLogFindNextRecord(xlogreader_state, start_lsn); + /* if we are recycling or removing log files concurrently, we can't find the next record right after. + * Hence, we need to update the min_lsn */ + if (XLByteEQ(first_record, InvalidXLogRecPtr)) { + ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("XLogFindNextRecord: could not find a valid record after %X/%X. Retry.", + (uint32)(start_lsn >> XIDTHIRTYTWO), (uint32)start_lsn)))); + bool found = false; + first_record = UpdateNextLSN(start_lsn, end_lsn, xlogreader_state, &found); + if (!found) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("XLogFindNextRecord: could not find a valid record between %X/%X and %X/%X.", + (uint32)(start_lsn >> XIDTHIRTYTWO), (uint32)start_lsn, + (uint32)(end_lsn >> XIDTHIRTYTWO), (uint32)end_lsn)))); + } + + XLogRecPtr valid_start_lsn = first_record; + XLogRecPtr valid_end_lsn = valid_start_lsn; + + FILE *outputfile = fopen(outputFilename, "w"); + CheckOpenFile(outputfile, outputFilename); + char *strOutput = (char *)palloc(MAXOUTPUTLEN * sizeof(char)); + rc = memset_s(strOutput, MAXOUTPUTLEN, 0, MAXOUTPUTLEN); + securec_check(rc, "\0", "\0"); + /* valid first record is not the given one */ + if (!XLByteEQ(first_record, start_lsn) && (start_lsn % XLogSegSize) != 0) { + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, + "first record is after %X/%X, at %X/%X, skipping over %lu bytes\n", (uint32)(start_lsn >> XIDTHIRTYTWO), + (uint32)start_lsn, (uint32)(first_record >> XIDTHIRTYTWO), (uint32)first_record, + XLByteDifference(first_record, start_lsn)); + securec_check_ss(rc, "\0", "\0"); + } + CheckWriteFile(fwrite(strOutput, 1, strlen(strOutput), outputfile), strlen(strOutput), outputFilename); + pfree_ext(strOutput); + + int count = 0; + char *errormsg = NULL; + XLogRecord *record = NULL; + while (XLByteLT(xlogreader_state->EndRecPtr, end_lsn)) { + record = XLogReadRecord(xlogreader_state, first_record, &errormsg); + valid_end_lsn = xlogreader_state->EndRecPtr; + if (!record && XLByteLT(valid_end_lsn, end_lsn)) { + /* if we are recycling or removing log files concurrently, and we can't find the next record right after. + * In this case, we try to read from the current oldest xlog file. */ + bool found = false; + XLogRecPtr temp_start_lsn = Max(xlogreader_state->EndRecPtr, start_lsn); + first_record = UpdateNextLSN(temp_start_lsn, end_lsn, xlogreader_state, &found); + if (found) { + ereport(WARNING, (errcode(ERRCODE_WARNING), + (errmsg("We cannot read %X/%X. After retried, we jump to read the next available %X/%X. " \ + "The missing part might be recycled or removed.", + (uint32)(temp_start_lsn >> XIDTHIRTYTWO), (uint32)temp_start_lsn, + (uint32)(first_record >> XIDTHIRTYTWO), (uint32)first_record)))); + continue; + } + + if (errormsg != NULL) + ereport(LOG, + (errcode(ERRCODE_LOG), + (errmsg("could not read WAL record at %X/%X: %s", + (uint32)(xlogreader_state->ReadRecPtr >> XIDTHIRTYTWO), + (uint32)xlogreader_state->ReadRecPtr, + errormsg)))); + else + ereport(LOG, + (errcode(ERRCODE_LOG), + (errmsg("could not read WAL record at %X/%X", + (uint32)(xlogreader_state->ReadRecPtr >> XIDTHIRTYTWO), + (uint32)xlogreader_state->ReadRecPtr)))); + break; + } + first_record = InvalidXLogRecPtr; /* No explicit start point; read the record after the one we just read */ + if (filter->by_xid_enabled && filter->by_xid != record->xl_xid) { + continue; + } + + if (filter->by_tablepath_enabled) { /* filter by table path */ + if (!XLogRecHasAnyBlockRefs(xlogreader_state)) { + continue; + } else if (!CheckValidRecord(xlogreader_state, filter)) { + /* at least have one block ref, but not match filter */ + continue; + } + } + strOutput = (char *)palloc(MAXOUTPUTLEN * sizeof(char)); + rc = memset_s(strOutput, MAXOUTPUTLEN, 0, MAXOUTPUTLEN); + securec_check(rc, "\0", "\0"); + + XLogDumpDisplayRecord(xlogreader_state, strOutput); + count++; + + CheckWriteFile(fwrite(strOutput, 1, strlen(strOutput), outputfile), strlen(strOutput), outputFilename); + pfree_ext(strOutput); + } + + XLogReaderFree(xlogreader_state); + strOutput = (char *)palloc(MAXOUTPUTLEN * sizeof(char)); + rc = memset_s(strOutput, MAXOUTPUTLEN, 0, MAXOUTPUTLEN); + securec_check(rc, "\0", "\0"); + + /* Summary(xx total): valid start_lsn: xxx, valid end_lsn: xxx */ + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, + "\nSummary (%d total): valid start_lsn: %X/%X, valid end_lsn: %X/%X\n", count, + (uint32)(valid_start_lsn >> XIDTHIRTYTWO), (uint32)(valid_start_lsn), (uint32)(valid_end_lsn >> XIDTHIRTYTWO), + (uint32)(valid_end_lsn)); + securec_check_ss(rc, "\0", "\0"); + /* generate output file */ + CheckWriteFile(fwrite(strOutput, 1, strlen(strOutput), outputfile), strlen(strOutput), outputFilename); + CheckCloseFile(fclose(outputfile), outputFilename); + pfree_ext(strOutput); + CloseXlogFile(); +} + +/* There are only two parameters in PG_FUNCTION_ARGS: start_lsn and end_lsn */ +Datum gs_xlogdump_lsn(PG_FUNCTION_ARGS) +{ + errno_t rc = EOK; + /* check user's right */ + const char fName[MAXFNAMELEN] = "gs_xlogdump_lsn"; + CheckUser(fName); + + /* read in parameters */ + char *start_lsn_str = text_to_cstring(PG_GETARG_TEXT_P(0)); + char *end_lsn_str = text_to_cstring(PG_GETARG_TEXT_P(1)); + + /* validate lsn */ + XLogRecPtr start_lsn, end_lsn; + ValidateStartEndLSN(start_lsn_str, end_lsn_str, &start_lsn, &end_lsn); + + char *outputFilename = (char *)palloc(MAXFILENAME * sizeof(char)); + rc = memset_s(outputFilename, MAXFILENAME, 0, MAXFILENAME); + securec_check(rc, "\0", "\0"); + GenerateOutputFileName(outputFilename, start_lsn_str, end_lsn_str); + + /* update start_lsn and end_lsn based on cur min_lsn and max_lsn */ + XLogRecPtr min_lsn = GetMinLSN(); + XLogRecPtr max_lsn = GetMaxLSN(); + + if (XLByteLT(start_lsn, min_lsn)) { + start_lsn = min_lsn; + } + if (XLByteLT(max_lsn, end_lsn)) { + end_lsn = max_lsn; + } + + XLogFilter filter; + rc = memset_s(&filter, sizeof(XLogFilter), 0, sizeof(XLogFilter)); + securec_check_ss(rc, "\0", "\0"); + XLogDump(start_lsn, end_lsn, &filter, outputFilename); + PG_RETURN_TEXT_P(cstring_to_text(outputFilename)); +} + +/* There are only one parameter in PG_FUNCTION_ARGS: c_xid */ +Datum gs_xlogdump_xid(PG_FUNCTION_ARGS) +{ + errno_t rc = EOK; + /* check user's right */ + const char fName[MAXFNAMELEN] = "gs_xlogdump_xid"; + CheckUser(fName); + + /* read in parameters */ + TransactionId c_xid = PG_GETARG_TRANSACTIONID(0); + /* check parameters */ + TransactionId topXid = GetTopTransactionId(); + if (TransactionIdPrecedes(topXid, c_xid)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("Xid should less than or equal to %lu.", topXid)))); + + /* generate output file name */ + char *outputFilename = (char *)palloc(MAXFILENAME * sizeof(char)); + rc = memset_s(outputFilename, MAXFILENAME, 0, MAXFILENAME); + securec_check(rc, "\0", "\0"); + rc = snprintf_s(outputFilename + (int)strlen(outputFilename), MAXFILENAME, MAXFILENAME - 1, "%s/%lu.xlog", + t_thrd.proc_cxt.DataDir, c_xid); + securec_check_ss(rc, "\0", "\0"); + /* update start_lsn and end_lsn based on cur min_lsn and max_lsn */ + XLogRecPtr min_lsn = GetMinLSN(); + XLogRecPtr max_lsn = GetMaxLSN(); + XLogFilter filter; + rc = memset_s(&filter, sizeof(XLogFilter), 0, sizeof(XLogFilter)); + securec_check(rc, "\0", "\0"); + filter.by_xid_enabled = true; + filter.by_xid = c_xid; + XLogDump(min_lsn, max_lsn, &filter, outputFilename); + PG_RETURN_TEXT_P(cstring_to_text(outputFilename)); +} + +/* There are only three parameters in PG_FUNCTION_ARGS: path, blocknum, relation_type */ +Datum gs_xlogdump_tablepath(PG_FUNCTION_ARGS) +{ + errno_t rc = EOK; + /* check user's right */ + const char fName[MAXFNAMELEN] = "gs_xlogdump_tablepath"; + CheckUser(fName); + + /* read in parameters */ + char *path = text_to_cstring(PG_GETARG_TEXT_P(0)); + int64 blocknum = PG_GETARG_INT64(1); + char *relation_type = text_to_cstring(PG_GETARG_TEXT_P(2)); + /* check parameters */ + if (blocknum > MaxBlockNumber || blocknum < -1) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("Blocknum should be between -1 and %u.", MaxBlockNumber)))); + + /* update start_lsn and end_lsn based on cur min_lsn and max_lsn */ + XLogRecPtr min_lsn = GetMinLSN(); + XLogRecPtr max_lsn = GetMaxLSN(); + XLogFilter filter; + rc = memset_s(&filter, sizeof(XLogFilter), 0, sizeof(XLogFilter)); + securec_check_c(rc, "\0", "\0"); + filter.by_tablepath_enabled = true; + + if (blocknum == -1) { /* care all blocks */ + filter.by_block = false; + } else { /* only one block */ + filter.by_block = true; + filter.blocknum = blocknum; + } + + /* generate output file name */ + char *outputFilename = (char *)palloc(MAXFILENAME * sizeof(char)); + rc = memset_s(outputFilename, MAXFILENAME, 0, MAXFILENAME); + securec_check(rc, "\0", "\0"); + PrepForRead(path, blocknum, relation_type, outputFilename, &(filter.by_relfilenode), false); + ValidateParameterPath(filter.by_relfilenode, path); + XLogDump(min_lsn, max_lsn, &filter, outputFilename); + + PG_RETURN_TEXT_P(cstring_to_text(outputFilename)); +} + +Datum gs_xlogdump_parsepage_tablepath(PG_FUNCTION_ARGS) +{ + /* check user's right */ + const char fName[MAXFNAMELEN] = "gs_xlogdump_parsepage_tablepath"; + CheckUser(fName); + + /* read in parameters */ + char *path = text_to_cstring(PG_GETARG_TEXT_P(0)); + int64 blocknum = PG_GETARG_INT64(1); + char *relation_type = text_to_cstring(PG_GETARG_TEXT_P(2)); + bool read_memory = PG_GETARG_BOOL(3); + int rc = -1; + + /* page part, copy path since for segment we will edit path */ + char *path_cpy = (char *)palloc(MAXFILENAME + 1); + rc = strcpy_s(path_cpy, MAXFILENAME + 1, path); + securec_check(rc, "\0", "\0"); + + /* In order to avoid querying the shared buffer and applying LW locks, blocking the business. */ + /* In the case of finding all pages, force to check disk */ + if (blocknum == -1) { + read_memory = false; + } + char *outputFilenamePage = ParsePage(path_cpy, blocknum, relation_type, read_memory); + pfree_ext(path_cpy); + /* xlog part */ + /* update start_lsn and end_lsn based on cur min_lsn and max_lsn */ + XLogFilter filter; + rc = memset_s(&filter, sizeof(XLogFilter), 0, sizeof(XLogFilter)); + securec_check_c(rc, "\0", "\0"); + + if (blocknum == -1) { /* care all blocks */ + filter.by_block = false; + } else { /* only one block */ + filter.by_block = true; + filter.blocknum = blocknum; + } + filter.by_tablepath_enabled = true; + /* generate output file name */ + char *outputFilename = (char *)palloc(MAXFILENAME * sizeof(char)); + rc = memset_s(outputFilename, MAXFILENAME, 0, MAXFILENAME); + securec_check(rc, "\0", "\0"); + PrepForRead(path, blocknum, relation_type, outputFilename, &(filter.by_relfilenode), false); + XLogRecPtr min_lsn = GetMinLSN(); + XLogRecPtr max_lsn = GetMaxLSN(); + XLogDump(min_lsn, max_lsn, &filter, outputFilename); + + int outputLen = strlen(outputFilename) + strlen(outputFilenamePage) + 100; + if (outputLen <= 0) + ereport(ERROR, (errcode(ERRCODE_IO_ERROR), (errmsg("Cannot generate right output file name.")))); + char *result = (char *)palloc(outputLen * sizeof(char)); + rc = memset_s(result, outputLen, 0, outputLen); + securec_check(rc, "\0", "\0"); + + rc = snprintf_s(result + (int)strlen(result), outputLen, outputLen - 1, + "Output file for parsing xlog: %s\nOutput file for parsing data page: %s", outputFilename, outputFilenamePage); + securec_check_ss(rc, "\0", "\0"); + pfree_ext(outputFilename); + PG_RETURN_TEXT_P(cstring_to_text(result)); +} diff --git a/src/gausskernel/storage/page/itemptr.cpp b/src/gausskernel/storage/page/itemptr.cpp index 0529b7e7d..4f3ab240f 100644 --- a/src/gausskernel/storage/page/itemptr.cpp +++ b/src/gausskernel/storage/page/itemptr.cpp @@ -35,6 +35,15 @@ bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2) return false; } +bool ItemPointerEqualsNoCheck(ItemPointer pointer1, ItemPointer pointer2) +{ + if ((ItemPointerGetBlockNumberNoCheck(pointer1) == ItemPointerGetBlockNumberNoCheck(pointer2)) && + (ItemPointerGetOffsetNumberNoCheck(pointer1) == ItemPointerGetOffsetNumberNoCheck(pointer2))) + return true; + else + return false; +} + /* * ItemPointerCompare * Generic btree-style comparison for item pointers. diff --git a/src/gausskernel/storage/page/pageparse.cpp b/src/gausskernel/storage/page/pageparse.cpp new file mode 100644 index 000000000..b3e181339 --- /dev/null +++ b/src/gausskernel/storage/page/pageparse.cpp @@ -0,0 +1,1205 @@ +/* --------------------------------------------------------------------------------------- + * * + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * pageparse.cpp + * + * IDENTIFICATION + * src/gausskernel/storage/page/pageparse.cpp + * + * --------------------------------------------------------------------------------------- + */ + +#include +#include "access/htup.h" +#include "access/itup.h" +#include "access/nbtree.h" +#include "access/xlogdefs.h" +#include "commands/tablespace.h" +#include "catalog/catalog.h" +#include "knl/knl_variable.h" +#include "miscadmin.h" +#include "nodes/pg_list.h" +#include "pgstat.h" +#include "postgres.h" +#include "postgres_ext.h" +#include "storage/buf/bufmgr.h" +#include "storage/buf/bufpage.h" +#include "storage/buf/buf_internals.h" +#include "storage/checksum.h" +#include "storage/smgr/relfilenode.h" +#include "storage/smgr/segment.h" +#include "storage/smgr/smgr.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/palloc.h" +#include "utils/relmapper.h" +#include "pageparse.h" + +typedef enum { + BTREE_INDEX = 0, + UBTREE_INDEX, + INDEX_BOTT +} INDEX_TYPE; + +void CheckUser(const char *fName) +{ + if (!superuser() && !(isOperatoradmin(GetUserId()) && u_sess->attr.attr_security.operation_mode)) + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("Must be system admin or operator admin in operation mode can use %s.", fName)))); +} + +static void ParseUHeapPageHeader(void *page, BlockNumber blkno, BlockNumber endBlk, char *output); + +static void ParseUHeapPageTDInfo(void *page, char *output); + +static void ParseUHeapPageItem(Item item, UHeapTuple tuple, char *output); + +static void ParseUHeapPageData(void *page, char *output); + +static void ParseUHeapPageSpecialInfo(void *page, char *output); + +static void ParseUHeapPage(void *page, BlockNumber blkno, BlockNumber endBlk, char *output); + +static void ParseIndexPageHeader(void *page, int type, BlockNumber blkno, BlockNumber endBlk, char *output); + +static void ParseIndexPageItem(Item item, int type, uint32 len, char *output); + +static void ParseIndexPageSpecialInfo(void *page, int type, char *output); + +static void ParseIndexPageData(void *page, int type, char *output); + +static void ParseIndexPage(void *page, int type, BlockNumber blkno, BlockNumber endBlk, char *output); + +static void formatBitmap(const unsigned char *start, int len, char bit1, char bit0, char *strOutput) +{ + errno_t rc = EOK; + for (int i = 0; i < len; ++i) { + unsigned char ch = start[i]; + unsigned char bitmask = 1; + /* print 8 bits within a loop */ + do { + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, "%c", + ((ch & bitmask) ? bit1 : bit0)); + securec_check_ss(rc, "\0", "\0"); + bitmask <<= 1; + } while (bitmask != 0); + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, " "); + securec_check_ss(rc, "\0", "\0"); + } +} + +/* init RelFileNode and outputFilename */ +void PrepForRead(char *path, int64 blocknum, char *relation_type, char *outputFilename, RelFileNode *relnode, + bool parse_page) +{ + char *pathFirstpart = (char *)palloc(MAXFNAMELEN * sizeof(char)); + errno_t rc = memset_s(pathFirstpart, MAXFNAMELEN, 0, MAXFNAMELEN); + securec_check(rc, "\0", "\0"); + + if ((strcmp(relation_type, "segment") == 0)) { + relnode->bucketNode = SegmentBktId; + char *bucketNodestr = strstr(path, "_b"); + if (NULL != bucketNodestr) { + bucketNodestr += TWO; /* delete first two chars: _b */ + relnode->bucketNode = (int4)pg_strtouint64(bucketNodestr, NULL, TENBASE); + rc = strncpy_s(pathFirstpart, strlen(path) - strlen(bucketNodestr) - 1, path, + strlen(path) - strlen(bucketNodestr) - TWO); + securec_check(rc, "\0", "\0"); + } + } else { + relnode->bucketNode = InvalidBktId; + } + RelFileNodeForkNum relfilenode; + if (strlen(pathFirstpart) == 0) { + relfilenode = relpath_to_filenode(path); + } else { + relfilenode = relpath_to_filenode(pathFirstpart); + } + if (relfilenode.rnode.node.spcNode == 0) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("The tablespace oid is 0. Please check the first parameter path. " + "If you are not sure about the table path, please check pg_relation_filepath.")))); + RelFileNodeRelCopy(*relnode, relfilenode.rnode.node); + + char *pagesuffix = "page"; + char *xlogsuffix = "xlog"; + rc = snprintf_s(outputFilename + (int)strlen(outputFilename), MAXFILENAME, MAXFILENAME - 1, "%s/%u_%u_%u_%d.%s", + t_thrd.proc_cxt.DataDir, relnode->spcNode, relnode->dbNode, relnode->relNode, blocknum, + (parse_page ? pagesuffix : xlogsuffix)); + securec_check_ss(rc, "\0", "\0"); + pfree_ext(pathFirstpart); +} + +static void ParseHeapHeader(const PageHeader page, char *strOutput, BlockNumber blockNum, BlockNumber block_endpoint) +{ + errno_t rc = EOK; + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, + "Page information of block %u/%u", blockNum, block_endpoint); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, "\n\tpd_lsn: %X/%X", + (uint32)(PageGetLSN(page) >> XIDTHIRTYTWO), (uint32)PageGetLSN(page)); + securec_check_ss(rc, "\0", "\0"); + bool checksum_matched = false; + if (CheckPageZeroCases(page)) { + uint16 checksum = pg_checksum_page((char *)page, (BlockNumber)blockNum); + checksum_matched = (checksum == page->pd_checksum); + } + + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, + "\n\tpd_checksum: 0x%X, verify %s", page->pd_checksum, checksum_matched ? "success" : "fail"); + securec_check_ss(rc, "\0", "\0"); + + rc = strcat_s(strOutput, MAXOUTPUTLEN, "\n\tpd_flags: "); + securec_check(rc, "\0", "\0"); + if (PageHasFreeLinePointers(page)) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "PD_HAS_FREE_LINES "); + securec_check(rc, "\0", "\0"); + } + if (PageIsFull(page)) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "PD_PAGE_FULL "); + securec_check(rc, "\0", "\0"); + } + if (PageIsAllVisible(page)) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "PD_ALL_VISIBLE "); + securec_check(rc, "\0", "\0"); + } + if (PageIsCompressed(page)) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "PD_COMPRESSED_PAGE "); + securec_check(rc, "\0", "\0"); + } + if (PageIsLogical(page)) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "PD_LOGICAL_PAGE "); + securec_check(rc, "\0", "\0"); + } + if (PageIsEncrypt(page)) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "PD_ENCRYPT_PAGE "); + securec_check(rc, "\0", "\0"); + } + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, "\n\tpd_lower: %u, %s", + page->pd_lower, PageIsEmpty(page) ? "empty" : "non-empty"); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, "\n\tpd_upper: %u, %s", + page->pd_upper, PageIsNew(page) ? "new" : "old"); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, "\n\tpd_special: %u, size %u", + page->pd_special, PageGetSpecialSize(page)); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, + "\n\tPage size & version: %u, %u", (uint16)PageGetPageSize(page), (uint16)PageGetPageLayoutVersion(page)); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, + "\n\tpd_xid_base: %lu, pd_multi_base: %lu", ((HeapPageHeader)(page))->pd_xid_base, + ((HeapPageHeader)(page))->pd_multi_base); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, "\n\tpd_prune_xid: %lu", + ((HeapPageHeader)(page))->pd_prune_xid + ((HeapPageHeader)(page))->pd_xid_base); + securec_check_ss(rc, "\0", "\0"); +} + +static void PrintInfomask(HeapTupleHeader tup, char *strOutput) +{ + errno_t rc = EOK; + if (tup->t_infomask & HEAP_HASNULL) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_HASNULL "); + securec_check(rc, "\0", "\0"); + } + if (tup->t_infomask & HEAP_HASVARWIDTH) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_HASVARWIDTH "); + securec_check(rc, "\0", "\0"); + } + if (tup->t_infomask & HEAP_HASEXTERNAL) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_HASEXTERNAL "); + securec_check(rc, "\0", "\0"); + } + if (tup->t_infomask & HEAP_HASOID) { + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, "HEAP_HASOID(%d) ", + HeapTupleHeaderGetOid(tup)); + securec_check_ss(rc, "\0", "\0"); + } + if (tup->t_infomask & HEAP_COMPRESSED) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_COMPRESSED "); + securec_check(rc, "\0", "\0"); + } + if (tup->t_infomask & HEAP_COMBOCID) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_COMBOCID "); + securec_check(rc, "\0", "\0"); + } + if (tup->t_infomask & HEAP_XMAX_EXCL_LOCK) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_XMAX_EXCL_LOCK "); + securec_check(rc, "\0", "\0"); + } + if (tup->t_infomask & HEAP_XMAX_SHARED_LOCK) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_XMAX_SHARED_LOCK "); + securec_check(rc, "\0", "\0"); + } + if (tup->t_infomask & HEAP_XMIN_COMMITTED) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_XMIN_COMMITTED "); + securec_check(rc, "\0", "\0"); + } + if (tup->t_infomask & HEAP_XMIN_INVALID) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_XMIN_INVALID "); + securec_check(rc, "\0", "\0"); + } + if (tup->t_infomask & HEAP_XMAX_COMMITTED) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_XMAX_COMMITTED "); + securec_check(rc, "\0", "\0"); + } + if (tup->t_infomask & HEAP_XMAX_INVALID) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_XMAX_INVALID "); + securec_check(rc, "\0", "\0"); + } + if (tup->t_infomask & HEAP_XMAX_IS_MULTI) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_XMAX_IS_MULTI "); + securec_check(rc, "\0", "\0"); + } + if (tup->t_infomask & HEAP_UPDATED) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_UPDATED "); + securec_check(rc, "\0", "\0"); + } + if ((tup->t_infomask & HEAP_HAS_8BYTE_UID)) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_HAS_8BYTE_UID "); + securec_check(rc, "\0", "\0"); + } else { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_HAS_NO_UID "); + securec_check(rc, "\0", "\0"); + } + rc = strcat_s(strOutput, MAXOUTPUTLEN, "\n\t\t\tt_infomask2: "); + securec_check(rc, "\0", "\0"); + if (tup->t_infomask2 & HEAP_HOT_UPDATED) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_HOT_UPDATED "); + securec_check(rc, "\0", "\0"); + } + if (tup->t_infomask2 & HEAP_ONLY_TUPLE) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "HEAP_ONLY_TUPLE "); + securec_check(rc, "\0", "\0"); + } +} + +static void ParseTupleHeader(const PageHeader page, uint lineno, char *strOutput) +{ + errno_t rc = EOK; + ItemId lp = PageGetItemId(page, lineno); + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, + "\n\n\t\tTuple #%u is normal: length %u, offset %u", lineno, ItemIdGetLength(lp), ItemIdGetOffset(lp)); + securec_check_ss(rc, "\0", "\0"); + + HeapTupleData dummyTuple; + HeapTupleHeader tup = (HeapTupleHeader)(PageGetItem(page, lp)); + dummyTuple.t_data = tup; + dummyTuple.t_xid_base = ((HeapPageHeader)(page))->pd_xid_base; + dummyTuple.t_multi_base = ((HeapPageHeader)(page))->pd_multi_base; + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, + "\n\t\t\tt_xmin/t_xmax/t_cid: %lu/%lu/%u", HeapTupleGetRawXmin(&dummyTuple), HeapTupleGetRawXmax(&dummyTuple), + HeapTupleHeaderGetRawCommandId(tup)); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, + "\n\t\t\tctid:(block %u/%u, offset %u)", tup->t_ctid.ip_blkid.bi_hi, tup->t_ctid.ip_blkid.bi_lo, + tup->t_ctid.ip_posid); + securec_check_ss(rc, "\0", "\0"); + + rc = strcat_s(strOutput, MAXOUTPUTLEN, "\n\t\t\tt_infomask: "); + securec_check(rc, "\0", "\0"); + + PrintInfomask(tup, strOutput); + + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, "Attrs Num: %d", + HeapTupleHeaderGetNatts(tup, NULL)); + securec_check_ss(rc, "\0", "\0"); + + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, "\n\t\t\tt_hoff: %u", + tup->t_hoff); + securec_check_ss(rc, "\0", "\0"); + rc = strcat_s(strOutput, MAXOUTPUTLEN, "\n\t\t\tt_bits: "); + securec_check(rc, "\0", "\0"); + + formatBitmap((const unsigned char *)tup->t_bits, BITMAPLEN(HeapTupleHeaderGetNatts(tup, NULL)), 'V', 'N', + strOutput); +} + +static void ParseHeapPage(const PageHeader page, BlockNumber blockNum, char *strOutput, BlockNumber block_endpoint) +{ + errno_t rc = EOK; + if (page->pd_lower < GetPageHeaderSize(page) || page->pd_lower > page->pd_upper || + page->pd_upper > page->pd_special || page->pd_special > BLCKSZ || + page->pd_special != MAXALIGN(page->pd_special)) { + rc = snprintf_s(strOutput + (int)strlen(strOutput), + MAXOUTPUTLEN, + MAXOUTPUTLEN - 1, + "The page data is corrupted, corrupted page pointers: lower = %u, upper = %u, special = %u\n", + page->pd_lower, + page->pd_upper, + page->pd_special); + securec_check_ss(rc, "\0", "\0"); + return; + } + + ParseHeapHeader(page, strOutput, blockNum, block_endpoint); + /* parse tuple header */ + rc = strcat_s(strOutput, MAXOUTPUTLEN, "\n\n\tHeap tuple information on this page"); + securec_check(rc, "\0", "\0"); + + uint nline = PageGetMaxOffsetNumber((Page)page); + uint nunused = 0, nnormal = 0, ndead = 0; + for (uint i = (OffsetNumber)1; i <= nline; i++) { + ItemId lp = PageGetItemId(page, i); + if (ItemIdIsNormal(lp)) { + nnormal++; + ParseTupleHeader(page, i, strOutput); + } else if (ItemIdIsDead(lp)) { + ndead++; + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, + "\n\n\t\tTuple #%u is dead: length %u, offset %u", i, ItemIdGetLength(lp), ItemIdGetOffset(lp)); + securec_check_ss(rc, "\0", "\0"); + } else { + nunused++; + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, + "\n\n\t\tTuple #%u is nunused: ", i); + securec_check_ss(rc, "\0", "\0"); + } + } + rc = snprintf_s(strOutput + (int)strlen(strOutput), MAXOUTPUTLEN, MAXOUTPUTLEN - 1, + "\n\tSummary (%u total): %u normal, %u unused, %u dead\n\nNormal Heap Page, special space is %u\n\n", nline, + nnormal, nunused, ndead, (uint)PageGetSpecialSize(page)); + securec_check_ss(rc, "\0", "\0"); +} + +static void ParseOnePage(const PageHeader page, BlockNumber blockNum, char *strOutput, char *relation_type, + BlockNumber block_endpoint) +{ + errno_t rc = EOK; + if (strcmp(relation_type, "heap") == 0) { + if (PG_HEAP_PAGE_LAYOUT_VERSION != (uint16)PageGetPageLayoutVersion(page) || PageGetSpecialSize(page) != 0) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("The target page is not heap, the given page version is: %u", + (uint16)PageGetPageLayoutVersion(page))))); + } + ParseHeapPage(page, blockNum, strOutput, block_endpoint); + } else if (strcmp(relation_type, "uheap") == 0) { + if (PG_UHEAP_PAGE_LAYOUT_VERSION != (uint16)PageGetPageLayoutVersion(page) || PageGetSpecialSize(page) != 0) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("The target page is not uheap, the given page version is: %u", + (uint16)PageGetPageLayoutVersion(page))))); + } + ParseUHeapPage((void *)page, blockNum, block_endpoint, strOutput); + } else if (strcmp(relation_type, "btree") == 0) { + if (PG_COMM_PAGE_LAYOUT_VERSION != (uint16)PageGetPageLayoutVersion(page) || PageGetSpecialSize(page) == 0) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("The target page is not btree, the given page version is: %u", + (uint16)PageGetPageLayoutVersion(page))))); + } + ParseIndexPage((void *)page, BTREE_INDEX, blockNum, block_endpoint, strOutput); + } else if (strcmp(relation_type, "ubtree") == 0) { + if (PG_COMM_PAGE_LAYOUT_VERSION != (uint16)PageGetPageLayoutVersion(page) || PageGetSpecialSize(page) == 0) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("The target page is not ubtree, the given page version is: %u", + (uint16)PageGetPageLayoutVersion(page))))); + } + ParseIndexPage((void *)page, UBTREE_INDEX, blockNum, block_endpoint, strOutput); + } else if (strcmp(relation_type, "segment") == 0) { + rc = strcat_s(strOutput, MAXOUTPUTLEN, "Parse segment table."); + securec_check(rc, "\0", "\0"); + ParseHeapPage(page, blockNum, strOutput, block_endpoint); + } else { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_PARAMETER), (errmsg("Only support heap, uheap, btree, ubtree, segment.")))); + } +} + +static void check_rdStatus(SMGR_READ_STATUS rdStatus, BlockNumber blockNum, const PageHeader page, char *strOutput1) +{ + errno_t rc = EOK; + if (rdStatus == SMGR_RD_CRC_ERROR) { + uint16 checksum = pg_checksum_page((char *)page, blockNum); + rc = snprintf_s(strOutput1, SHORTOUTPUTLEN, SHORTOUTPUTLEN - 1, + "\nFor page %u, page verification failed, calculated checksum 0x%X but expected 0x%X.\n", + blockNum, checksum, page->pd_checksum); + securec_check_ss(rc, "\0", "\0"); + } else if (rdStatus == SMGR_RD_NO_BLOCK) { + rc = snprintf_s(strOutput1, SHORTOUTPUTLEN, SHORTOUTPUTLEN - 1, "\tThe page %u does not exist.\n", blockNum); + securec_check_ss(rc, "\0", "\0"); + } +} + +static bool readFromMemory(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, char *relation_type, + char *strOutput, FILE *outputfile, char *outputFilename) +{ + errno_t rc = EOK; + SegPageLocation loc; + loc.blocknum = 0; + /* for segment, update to physical blocknum */ + if (strcmp(relation_type, "segment") == 0) { + loc = seg_get_physical_location(smgr->smgr_rnode.node, forkNum, blockNum); + } + BufferTag new_tag; + INIT_BUFFERTAG(new_tag, smgr->smgr_rnode.node, forkNum, blockNum); + uint32 new_hash = BufTableHashCode(&new_tag); + LWLock *partition_lock = BufMappingPartitionLock(new_hash); + ResourceOwnerEnlargeBuffers(t_thrd.utils_cxt.CurrentResourceOwner); + /* LW lock to avoid concurrent read/write */ + (void)LWLockAcquire(partition_lock, LW_SHARED); + int buf_id = BufTableLookup(&new_tag, new_hash); + + do { + if (buf_id >= 0) { /* read from memory */ + BufferDesc *bufDesc = GetBufferDescriptor(buf_id); + bool valid = (strcmp(relation_type, "segment") == 0) ? SegPinBuffer(bufDesc) : PinBuffer(bufDesc, NULL); + if (!valid) + break; /* pin failed, read disk */ + LWLockRelease(partition_lock); + (void)LWLockAcquire(BufferDescriptorGetContentLock(bufDesc), LW_SHARED); /* acquire content_lock */ + Buffer buf = BufferDescriptorGetBuffer(bufDesc); + const PageHeader page = (const PageHeader)BufferGetPage(buf); + rc = strcat_s(strOutput, MAXOUTPUTLEN, "The target page is from memory. "); + securec_check(rc, "\0", "\0"); + if (strcmp(relation_type, "segment") == 0) { + ParseOnePage(page, loc.blocknum, strOutput, relation_type, loc.blocknum); /* physical blocknum */ + LWLockRelease(BufferDescriptorGetContentLock(bufDesc)); /* release content_lock */ + SegUnpinBuffer(bufDesc); + } else { + ParseOnePage(page, blockNum, strOutput, relation_type, blockNum); + LWLockRelease(BufferDescriptorGetContentLock(bufDesc)); /* release content_lock */ + UnpinBuffer(bufDesc, true); + } + CheckWriteFile(fwrite(strOutput, 1, strlen(strOutput), outputfile), strlen(strOutput), outputFilename); + pfree_ext(strOutput); + return true; + } + } while (0); + LWLockRelease(partition_lock); + return false; +} + +static void CheckSegment(RelFileNode *relnode, ForkNumber forkNum) +{ + SegSpace *spc = spc_open(relnode->spcNode, relnode->dbNode, false); + if (spc == NULL || !spc_datafile_exist(spc, 1, forkNum)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), (errmsg("Page doesn't exist.")))); + BlockNumber size = spc_size(spc, 1, forkNum); + if (relnode->relNode >= size) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("The target page %u doesn't exist, the current max is %u.", relnode->relNode, size - 1)))); + RelFileNode *relnodeHead = (RelFileNode *)palloc(sizeof(RelFileNode)); + relnodeHead->spcNode = relnode->spcNode; + relnodeHead->dbNode = relnode->dbNode; + relnodeHead->relNode = 1; + relnodeHead->bucketNode = relnode->bucketNode; + Buffer buffer_temp = ReadBufferFast(spc, *relnodeHead, forkNum, relnode->relNode, RBM_NORMAL); + if (!BufferIsValid(buffer_temp)) + ereport(ERROR, (errcode_for_file_access(), errmsg("Segment Head is invalid %u/%u/%u %d %u", + relnodeHead->spcNode, relnodeHead->dbNode, relnodeHead->relNode, forkNum, relnode->relNode))); + pfree_ext(relnodeHead); + SegmentHead *head = (SegmentHead *)PageGetContents(BufferGetPage(buffer_temp)); + SegReleaseBuffer(buffer_temp); + if (!(IsNormalSegmentHead(head) && relnode->bucketNode == SegmentBktId) && + !(IsBucketMainHead(head) && IsBucketFileNode(*relnode))) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("Page header does not match with corresponding segment type.")))); +} + +void ValidateParameterPath(RelFileNode rnode, char *str) +{ + char *path = relpathbackend(rnode, InvalidBackendId, MAIN_FORKNUM); + if ((strcmp(path, str) != 0)) + ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("\"%s\" is invalid input", str))); +} + +char *ParsePage(char *path, int64 blocknum, char *relation_type, bool read_memory) +{ + errno_t rc = EOK; + + /* check parameters */ + if (blocknum > MaxBlockNumber || blocknum < -1) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("Blocknum should be between -1 and %u.", MaxBlockNumber)))); + /* initialize */ + char *strOutput = (char *)palloc(MAXOUTPUTLEN * sizeof(char)); + rc = memset_s(strOutput, MAXOUTPUTLEN, 0, MAXOUTPUTLEN); + securec_check(rc, "\0", "\0"); + + RelFileNode *relnode = (RelFileNode *)palloc(sizeof(RelFileNode)); + char *outputFilename = (char *)palloc(MAXFILENAME * sizeof(char)); + + rc = memset_s(outputFilename, MAXFILENAME, 0, MAXFILENAME); + securec_check(rc, "\0", "\0"); + + PrepForRead(path, blocknum, relation_type, outputFilename, relnode, true); + ValidateParameterPath(*relnode, path); + + FILE *outputfile = fopen(outputFilename, "w"); + CheckOpenFile(outputfile, outputFilename); + + ForkNumber forkNum = MAIN_FORKNUM; + SMgrRelation smgr = smgropen(*relnode, InvalidBackendId, GetColumnNum(forkNum)); + if (strcmp(relation_type, "segment") == 0) + CheckSegment(relnode, forkNum); + BlockNumber maxBlockNum = smgrnblocks(smgr, forkNum) - 1; + if (blocknum > maxBlockNum) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("Blocknum should be between -1 and %u.", maxBlockNum)))); + + pfree_ext(relnode); + BlockNumber blockNum = 0; + BlockNumber block_endpoint = 0; + /* only parse one block, check if it is in memory */ + if (read_memory) { + blockNum = (BlockNumber)blocknum; + if (readFromMemory(smgr, forkNum, blockNum, relation_type, strOutput, outputfile, outputFilename)) { + /* found in memory */ + CheckCloseFile(fclose(outputfile), outputFilename); + smgrcloseall(); + return outputFilename; + } + } + /* read from disk */ + rc = strcat_s(strOutput, MAXOUTPUTLEN, "The target page is from disk. "); + securec_check(rc, "\0", "\0"); + CheckWriteFile(fwrite(strOutput, 1, strlen(strOutput), outputfile), strlen(strOutput), outputFilename); + pfree_ext(strOutput); + if (blocknum >= 0) { /* only parse one block */ + blockNum = blocknum; + block_endpoint = blocknum; + } else { /* blocknum == -1, parse all blocks */ + block_endpoint = maxBlockNum; + } + /* if not declare a single block, then loop all blocks */ + while (blockNum <= block_endpoint) { + char *strOutput = (char *)palloc(MAXOUTPUTLEN * sizeof(char)); + rc = memset_s(strOutput, MAXOUTPUTLEN, 0, MAXOUTPUTLEN); + securec_check(rc, "\0", "\0"); + char *buffer = (char *)palloc0(BLCKSZ); + SMGR_READ_STATUS rdStatus = smgrread(smgr, forkNum, blockNum, buffer); + const PageHeader page = (const PageHeader)buffer; + char *strOutput1 = (char *)palloc(MAXOUTPUTLEN * sizeof(char)); + rc = memset_s(strOutput1, SHORTOUTPUTLEN, 0, SHORTOUTPUTLEN); + securec_check(rc, "\0", "\0"); + check_rdStatus(rdStatus, blockNum, page, strOutput1); + CheckWriteFile(fwrite(strOutput1, 1, strlen(strOutput1), outputfile), strlen(strOutput1), outputFilename); + pfree_ext(strOutput1); + if (strcmp(relation_type, "segment") == 0) { + SegPageLocation loc = seg_get_physical_location(smgr->smgr_rnode.node, forkNum, blockNum); + SegPageLocation endloc = seg_get_physical_location(smgr->smgr_rnode.node, forkNum, block_endpoint); + ParseOnePage(page, loc.blocknum, strOutput, relation_type, endloc.blocknum); /* physical blocknum */ + } else + ParseOnePage(page, blockNum, strOutput, relation_type, block_endpoint); + + CheckWriteFile(fwrite(strOutput, 1, strlen(strOutput), outputfile), strlen(strOutput), outputFilename); + pfree_ext(strOutput); + pfree_ext(buffer); + blockNum++; + } + CheckCloseFile(fclose(outputfile), outputFilename); + smgrcloseall(); + return outputFilename; +} + +static void ParseUHeapPageHeader(void *page, BlockNumber blkno, BlockNumber endBlk, char *output) +{ + errno_t rc = EOK; + bool chksumResult = false; + UHeapPageHeader pageHeader = NULL; + + if (page == NULL || output == NULL) { + return; + } + + pageHeader = (UHeapPageHeader)page; + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "Page information of block %u/%u\n", + blkno, endBlk); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tpd_lsn: %X/%X\n", + (uint32)(PageGetLSN(pageHeader) >> XIDTHIRTYTWO), (uint32)PageGetLSN(pageHeader)); + securec_check_ss(rc, "\0", "\0"); + + if (CheckPageZeroCases((PageHeader)pageHeader)) { + uint16 checksum = pg_checksum_page((char *)pageHeader, (BlockNumber)blkno); + chksumResult = (checksum == pageHeader->pd_checksum); + } + + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tpd_checksum: 0x%X, verify %s\n", + pageHeader->pd_checksum, chksumResult ? "success" : "fail"); + securec_check_ss(rc, "\0", "\0"); + + if (UPageHasFreeLinePointers(pageHeader)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "PAGE_HAS_FREE_LINES."); + securec_check_ss(rc, "\0", "\0"); + } + + if (UPageIsFull(pageHeader)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "PAGE_FULL."); + securec_check_ss(rc, "\0", "\0"); + } + + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\tpd_lower: %u, %s\n\tpd_upper: %u, %s\n\tpd_special: %u, size %u\n", pageHeader->pd_lower, + PageIsEmpty(pageHeader) ? "empty" : "non-empty", pageHeader->pd_upper, + PageIsNew(pageHeader) ? "new page" : "old page", pageHeader->pd_special, PageGetSpecialSize(pageHeader)); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tPage size & version: %u, %u\n", + (uint16)PageGetPageSize(pageHeader), (uint16)PageGetPageLayoutVersion(pageHeader)); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tpotential_freespace: %u\n", + pageHeader->potential_freespace); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\ttd_count: %u\n", pageHeader->td_count); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tpd_prune_xid: %lu\n", + pageHeader->pd_prune_xid); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\tpd_xid_base: %lu, pd_multi_base: %lu\n", pageHeader->pd_xid_base, pageHeader->pd_multi_base); + securec_check_ss(rc, "\0", "\0"); + return; +} + +static void ParseUHeapPageTDInfo(void *page, char *output) +{ + TD *tdInfo = NULL; + uint16 tdCount = 0; + UHeapPageTDData *tdPtr = NULL; + UHeapPageHeader pageHeader = NULL; + errno_t rc = EOK; + + if (page == NULL || output == NULL) { + return; + } + + pageHeader = (UHeapPageHeader)page; + tdPtr = (UHeapPageTDData *)PageGetTDPointer(page); + tdCount = pageHeader->td_count; + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\n\n\tUHeap Page TD information, nTDSlots = %u\n", tdCount); + securec_check_ss(rc, "\0", "\0"); + + for (uint16 i = 0; i < tdCount; i++) { + tdInfo = &(tdPtr->td_info[i]); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\n\t\t TD Slot #%d, xid:%lu, urp:%lu\n", i + 1, tdInfo->xactid, tdInfo->undo_record_ptr); + securec_check_ss(rc, "\0", "\0"); + } + return; +} + +static void ParseUHeapPageItem(Item item, UHeapTuple tuple, char *output) +{ + if (item == NULL || tuple == NULL || output == NULL) { + return; + } + + UHeapDiskTuple diskTuple = (UHeapDiskTuple)item; + errno_t rc = EOK; + tuple->disk_tuple = diskTuple; + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\t\t\txid:%lu, td:%d, locker_td %d\n", + UHeapTupleGetRawXid(tuple), diskTuple->td_id, diskTuple->locker_td_id); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\t\t\tFlag:%u", diskTuple->flag); + securec_check_ss(rc, "\0", "\0"); + + if (diskTuple->flag & UHEAP_HAS_NULL) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n\t\t\tUHEAP_HASNULL"); + securec_check_ss(rc, "\0", "\0"); + } + if (diskTuple->flag & UHEAP_DELETED) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n\t\t\tUHEAP_DELETED\n"); + securec_check_ss(rc, "\0", "\0"); + } + if (diskTuple->flag & UHEAP_INPLACE_UPDATED) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n\t\t\tUHEAP_INPLACE_UPDATED "); + securec_check_ss(rc, "\0", "\0"); + } + if (diskTuple->flag & UHEAP_UPDATED) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n\t\t\tUHEAP_UPDATED "); + securec_check_ss(rc, "\0", "\0"); + } + if (diskTuple->flag & UHEAP_XID_KEYSHR_LOCK) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n\t\t\tUHEAP_XID_KEYSHR_LOCK "); + securec_check_ss(rc, "\0", "\0"); + } + if (diskTuple->flag & UHEAP_XID_NOKEY_EXCL_LOCK) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n\t\t\tUHEAP_XID_NOKEY_EXCL_LOCK "); + securec_check_ss(rc, "\0", "\0"); + } + if (diskTuple->flag & UHEAP_XID_EXCL_LOCK) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n\t\t\tUHEAP_XID_EXCL_LOCK "); + securec_check_ss(rc, "\0", "\0"); + } + if (diskTuple->flag & UHEAP_MULTI_LOCKERS) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n\t\t\tUHEAP_MULTI_LOCKERS "); + securec_check_ss(rc, "\0", "\0"); + } + if (diskTuple->flag & UHEAP_INVALID_XACT_SLOT) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n\t\t\tUHEAP_INVALID_XACT_SLOT "); + securec_check_ss(rc, "\0", "\0"); + } + if (diskTuple->flag & SINGLE_LOCKER_XID_IS_LOCK) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n\t\t\tSINGLE_LOCKER_XID_IS_LOCK "); + securec_check_ss(rc, "\0", "\0"); + } + if (diskTuple->flag & SINGLE_LOCKER_XID_IS_SUBXACT) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\n\t\t\tSINGLE_LOCKER_XID_IS_SUBXACT "); + securec_check_ss(rc, "\0", "\0"); + } + + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n\t\t\tFlag2:"); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n\t\t\tNumber of columns:: %d\n", + UHeapTupleHeaderGetNatts(diskTuple)); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\t\t\tHoff: %u\n", diskTuple->t_hoff); + securec_check_ss(rc, "\0", "\0"); + return; +} + +static void ParseUHeapPageData(void *page, char *output) +{ + errno_t rc; + uint32 rowPtrCnt = 0; + uint32 storeCnt = 0; + uint32 unusedCnt = 0; + uint32 normalCnt = 0; + uint32 deadCnt = 0; + uint32 redirectCnt = 0; + Item item; + RowPtr *rowptr; + UHeapTupleData utuple; + UHeapPageHeader pageHeader = NULL; + + if (page == NULL || output == NULL) { + return; + } + + pageHeader = (UHeapPageHeader)page; + if (pageHeader->pd_lower <= SizeOfUHeapPageHeaderData) { + rowPtrCnt = 0; + } else { + rowPtrCnt = + (pageHeader->pd_lower - (SizeOfUHeapPageHeaderData + SizeOfUHeapTDData(pageHeader))) / sizeof(RowPtr); + } + + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\n\tUHeap tuple information on this page\n"); + securec_check_ss(rc, "\0", "\0"); + + for (uint32 i = FirstOffsetNumber; i <= rowPtrCnt; i++) { + rowptr = UPageGetRowPtr(pageHeader, i); + if (RowPtrIsUsed(rowptr)) { + if (RowPtrHasStorage(rowptr)) { + storeCnt++; + } + + if (RowPtrIsNormal(rowptr)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\n\t\tTuple #%u is normal: length %u, offset %u\n", i, RowPtrGetLen(rowptr), + RowPtrGetOffset(rowptr)); + securec_check_ss(rc, "\0", "\0"); + normalCnt++; + item = UPageGetRowData(pageHeader, rowptr); + UHeapTupleCopyBaseFromPage(&utuple, pageHeader); + ParseUHeapPageItem(item, &utuple, output); + } else if (RowPtrIsDead(rowptr)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\n\t\tTuple #%u is dead: length %u, offset %u", i, RowPtrGetLen(rowptr), RowPtrGetOffset(rowptr)); + securec_check_ss(rc, "\0", "\0"); + deadCnt++; + } else { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\n\t\tTuple #%u is redirected: length %u, offset %u", i, RowPtrGetLen(rowptr), + RowPtrGetOffset(rowptr)); + securec_check_ss(rc, "\0", "\0"); + redirectCnt++; + } + } else { + unusedCnt++; + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n\t\tTuple #%u is unused\n", i); + securec_check_ss(rc, "\0", "\0"); + } + } + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\tSummary (%u total): %u unused, %u normal, %u dead, %u redirect\n", rowPtrCnt, unusedCnt, normalCnt, deadCnt, + redirectCnt); + securec_check_ss(rc, "\0", "\0"); +} + +static void ParseUHeapPageSpecialInfo(void *page, char *output) +{ + errno_t rc = 0; + + if (page == NULL || output == NULL) { + return; + } + + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\n\tSpecial area information on this page\n"); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n\tNot used currently.\n"); + securec_check_ss(rc, "\0", "\0"); +} + +static void ParseUHeapPage(void *page, BlockNumber blkno, BlockNumber endBlk, char *output) +{ + uint16 pageHeaderSize = 0; + UHeapPageHeader pageHeader = NULL; + errno_t rc; + + if (page == NULL || output == NULL) { + return; + } + + /* Parse header of uheap page. */ + pageHeader = (UHeapPageHeader)page; + + /* Check invalidation of uheap page head. */ + pageHeaderSize = GetPageHeaderSize(pageHeader); + if (pageHeader->pd_lower < pageHeaderSize || pageHeader->pd_lower > pageHeader->pd_upper || + pageHeader->pd_upper > pageHeader->pd_special || pageHeader->pd_special > BLCKSZ || + pageHeader->pd_special != MAXALIGN(pageHeader->pd_special)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "The page data is corrupted, corrupted page pointers: lower = %u, upper = %u, special = %u\n", + pageHeader->pd_lower, pageHeader->pd_upper, pageHeader->pd_special); + securec_check_ss(rc, "\0", "\0"); + return; + } + + ParseUHeapPageHeader((void *)pageHeader, blkno, endBlk, output); + if (PageIsNew(pageHeader)) { + return; + } + + /* Parse td slot info of uheap page. */ + ParseUHeapPageTDInfo(pageHeader, output); + /* Parse items of uheap page. */ + ParseUHeapPageData(pageHeader, output); + /* Parse special area of uheap page. */ + ParseUHeapPageSpecialInfo(pageHeader, output); +} + +static void ParseIndexPageHeader(void *page, int type, BlockNumber blkno, BlockNumber endBlk, char *output) +{ + errno_t rc = 0; + bool chksumResult = false; + PageHeader pageHeader = NULL; + + if (page == NULL || output == NULL) { + return; + } + + pageHeader = (PageHeader)page; + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "%s index page information of block %u/%u\n", (type == BTREE_INDEX) ? "Btree" : "UBtree", blkno, endBlk); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tpd_lsn: %X/%X\n", + (uint32)(PageGetLSN((Page)pageHeader) >> XIDTHIRTYTWO), (uint32)PageGetLSN((Page)pageHeader)); + securec_check_ss(rc, "\0", "\0"); + + if (CheckPageZeroCases(pageHeader)) { + uint16 checksum = pg_checksum_page((char *)pageHeader, (BlockNumber)blkno); + chksumResult = (checksum == pageHeader->pd_checksum); + } + + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tpd_checksum: 0x%X, verify %s\n", + pageHeader->pd_checksum, chksumResult ? "success" : "fail"); + securec_check_ss(rc, "\0", "\0"); + + if (PageHasFreeLinePointers((Page)pageHeader)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "PD_HAS_FREE_LINES."); + securec_check_ss(rc, "\0", "\0"); + } + if (PageIsFull(pageHeader)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "PD_PAGE_FULL."); + securec_check_ss(rc, "\0", "\0"); + } + if (PageIsAllVisible(pageHeader)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "PD_ALL_VISIBLE."); + securec_check_ss(rc, "\0", "\0"); + } + if (PageIsCompressed(pageHeader)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "PD_COMPRESSED_PAGE."); + securec_check_ss(rc, "\0", "\0"); + } + if (PageIsLogical(pageHeader)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "PD_LOGICAL_PAGE."); + securec_check_ss(rc, "\0", "\0"); + } + if (PageIsEncrypt(pageHeader)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "PD_ENCRYPT_PAGE."); + securec_check_ss(rc, "\0", "\0"); + } + + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\tpd_lower: %u, %s\n\tpd_upper: %u, %s\n\tpd_special: %u, size %u\n", pageHeader->pd_lower, + PageIsEmpty((Page)pageHeader) ? "empty" : "non-empty", pageHeader->pd_upper, + PageIsNew((Page)pageHeader) ? "new page" : "old page", pageHeader->pd_special, + PageGetSpecialSize((Page)pageHeader)); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tPage size & version: %u, %u\n", + (uint16)PageGetPageSize(page), (uint16)PageGetPageLayoutVersion(page)); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tpd_prune_xid: %lu\n", + pageHeader->pd_prune_xid + ((HeapPageHeader)(pageHeader))->pd_xid_base); + securec_check_ss(rc, "\0", "\0"); + rc = + snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tpd_xid_base: %lu, pd_multi_base: %lu\n", + ((HeapPageHeader)(pageHeader))->pd_xid_base, ((HeapPageHeader)(pageHeader))->pd_multi_base); + securec_check_ss(rc, "\0", "\0"); + + return; +} + +static void ParseIndexPageItem(Item item, int type, uint32 len, char *output) +{ + if (item == NULL || output == NULL) { + return; + } + + errno_t rc = 0; + IndexTuple itup = (IndexTuple)item; + bool hasnull = (itup->t_info & INDEX_NULL_MASK); + unsigned int tuplen = (itup->t_info & INDEX_SIZE_MASK); + if (tuplen != len) { + if (type == UBTREE_INDEX) { + UstoreIndexXid uxid = (UstoreIndexXid)UstoreIndexTupleGetXid(itup); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\t\t\txmin:%d xmax:%d Heap Tid: block %u/%u, offset %u\n", uxid->xmin, uxid->xmax, + itup->t_tid.ip_blkid.bi_hi, itup->t_tid.ip_blkid.bi_lo, itup->t_tid.ip_posid); + securec_check_ss(rc, "\0", "\0"); + } + } else { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\t\t\tHeap Tid: block %u/%u, offset %u\n", itup->t_tid.ip_blkid.bi_hi, itup->t_tid.ip_blkid.bi_lo, + itup->t_tid.ip_posid); + securec_check_ss(rc, "\0", "\0"); + } + + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\t\t\tLength: %u", tuplen); + securec_check_ss(rc, "\0", "\0"); + + if (itup->t_info & INDEX_VAR_MASK) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, ", has var-width attrs"); + securec_check_ss(rc, "\0", "\0"); + } + + if (hasnull) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, ", has nulls "); + securec_check_ss(rc, "\0", "\0"); + } + + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n"); + securec_check_ss(rc, "\0", "\0"); + return; +} + +static void ParseIndexPageSpecialInfo(void *page, int type, char *output) +{ + errno_t rc = 0; + BTPageOpaqueInternal opaque = NULL; + UBTPageOpaqueInternal uopaque = NULL; + PageHeader pageHeader = NULL; + + if (page == NULL || output == NULL || type >= INDEX_BOTT) { + return; + } + + pageHeader = (PageHeader)page; + opaque = (BTPageOpaqueInternal)PageGetSpecialPointer((Page)pageHeader); + if (PageGetSpecialSize((Page)pageHeader) > MAXALIGN(sizeof(BTPageOpaqueData))) { + uopaque = (UBTPageOpaqueInternal)opaque; + } + + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n%s index special information:\n", + (type == BTREE_INDEX) ? "BTree" : "UBTree"); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tbtree left sibling: %u\n", + opaque->btpo_prev); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tbtree right sibling: %u\n", + opaque->btpo_next); + securec_check_ss(rc, "\0", "\0"); + + if (!P_ISDELETED(opaque)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tbtree tree level: %u\n", + opaque->btpo.level); + securec_check_ss(rc, "\0", "\0"); + } else { + if (uopaque) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tnext txid (deleted): %lu\n", + ((UBTPageOpaque)uopaque)->xact); + securec_check_ss(rc, "\0", "\0"); + } else { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tnext txid (deleted): %lu\n", + ((BTPageOpaque)opaque)->xact); + securec_check_ss(rc, "\0", "\0"); + } + } + + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tbtree flag: "); + securec_check_ss(rc, "\0", "\0"); + + if (P_ISLEAF(opaque)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "BTP_LEAF "); + securec_check_ss(rc, "\0", "\0"); + } else { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "BTP_INTERNAL "); + securec_check_ss(rc, "\0", "\0"); + } + if (P_ISROOT(opaque)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "BTP_ROOT "); + securec_check_ss(rc, "\0", "\0"); + } + if (P_ISDELETED(opaque)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "BTP_DELETED "); + securec_check_ss(rc, "\0", "\0"); + } + if (P_ISHALFDEAD(opaque)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "BTP_HALF_DEAD "); + securec_check_ss(rc, "\0", "\0"); + } + if (P_HAS_GARBAGE(opaque)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "BTP_HAS_GARBAGE "); + securec_check_ss(rc, "\0", "\0"); + } + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n"); + securec_check_ss(rc, "\0", "\0"); + + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tbtree cycle ID: %u\n ", + opaque->btpo_cycleid); + securec_check_ss(rc, "\0", "\0"); + + if (uopaque) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\tubtree active tuples: %d\n", + uopaque->activeTupleCount); + securec_check_ss(rc, "\0", "\0"); + } + + return; +} + +static void ParseIndexPageData(void *page, int type, char *output) +{ + errno_t rc; + uint32 rowPtrCnt = 0; + uint32 storeCnt = 0; + uint32 notusedCnt = 0; + uint32 normalCnt = 0; + uint32 deadCnt = 0; + uint32 redirectCnt = 0; + Item item; + ItemId lp; + PageHeader pageHeader = NULL; + + if (page == NULL || output == NULL) { + return; + } + + pageHeader = (PageHeader)page; + rowPtrCnt = PageGetMaxOffsetNumber((Page)pageHeader); + + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\n\tIndex tuple information on this page\n"); + securec_check_ss(rc, "\0", "\0"); + + for (uint32 i = FirstOffsetNumber; i <= rowPtrCnt; i++) { + lp = PageGetItemId((Page)pageHeader, i); + if (ItemIdIsUsed(lp)) { + if (ItemIdHasStorage(lp)) { + storeCnt++; + } + if (ItemIdIsNormal(lp) || (IndexItemIdIsFrozen(lp))) { + rc = ItemIdIsNormal(lp) ? (snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\n\t\tTuple #%u is normal: length %u, offset %u\n", i, ItemIdGetLength(lp), ItemIdGetOffset(lp))) : + (snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\n\t\tTuple #%u is frozen: length %u, offset %u\n", i, ItemIdGetLength(lp), ItemIdGetOffset(lp))); + securec_check_ss(rc, "\0", "\0"); + normalCnt++; + item = PageGetItem((Page)pageHeader, lp); + ParseIndexPageItem(item, type, ItemIdGetLength(lp), output); + } else if (ItemIdIsDead(lp)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\n\t\tTuple #%u is dead: length %u, offset %u", i, ItemIdGetLength(lp), ItemIdGetOffset(lp)); + securec_check_ss(rc, "\0", "\0"); + deadCnt++; + } else { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\n\t\tTuple #%u is redirected: length %u, offset %u", i, ItemIdGetLength(lp), ItemIdGetOffset(lp)); + securec_check_ss(rc, "\0", "\0"); + redirectCnt++; + } + } else { + notusedCnt++; + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, "\n\t\tTuple #%u is unused\n", i); + securec_check_ss(rc, "\0", "\0"); + } + } + + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "\tSummary (%u total): %u unused, %u normal, %u dead, %u redirect\n", rowPtrCnt, notusedCnt, normalCnt, deadCnt, + redirectCnt); + securec_check_ss(rc, "\0", "\0"); + return; +} + +static void ParseIndexPage(void *page, int type, BlockNumber blkno, BlockNumber endBlk, char *output) +{ + PageHeader pageHeader = NULL; + uint16 headersize = 0; + errno_t rc = 0; + + if (page == NULL || output == NULL) { + return; + } + + pageHeader = (PageHeader)page; + headersize = GetPageHeaderSize(pageHeader); + if (pageHeader->pd_lower < headersize || pageHeader->pd_lower > pageHeader->pd_upper || + pageHeader->pd_upper > pageHeader->pd_special || pageHeader->pd_special > BLCKSZ || + pageHeader->pd_special != MAXALIGN(pageHeader->pd_special)) { + rc = snprintf_s(output + (int)strlen(output), MAXOUTPUTLEN, MAXOUTPUTLEN, + "The page data is corrupted, corrupted page pointers: lower = %u, upper = %u, special = %u\n", + pageHeader->pd_lower, pageHeader->pd_upper, pageHeader->pd_special); + securec_check_ss(rc, "\0", "\0"); + return; + } + + ParseIndexPageHeader((void *)page, type, blkno, endBlk, output); + if (PageIsNew(pageHeader)) { + return; + } + + ParseIndexPageData((void *)page, type, output); + ParseIndexPageSpecialInfo((void *)page, type, output); + return; +} + +Datum gs_parse_page_bypath(PG_FUNCTION_ARGS) +{ + /* check user's right */ + const char fName[MAXFNAMELEN] = "gs_parse_page_bypath"; + CheckUser(fName); + + /* read in parameters */ + char *path = text_to_cstring(PG_GETARG_TEXT_P(0)); + int64 blocknum = PG_GETARG_INT64(1); + char *relation_type = text_to_cstring(PG_GETARG_TEXT_P(2)); + bool read_memory = PG_GETARG_BOOL(3); + + /* In order to avoid querying the shared buffer and applying LW locks, blocking the business. */ + /* In the case of finding all pages, force to check disk */ + if (blocknum == -1) { + read_memory = false; + } + char *outputFilename = ParsePage(path, blocknum, relation_type, read_memory); + PG_RETURN_TEXT_P(cstring_to_text(outputFilename)); +} diff --git a/src/gausskernel/storage/page/pageparse.h b/src/gausskernel/storage/page/pageparse.h new file mode 100644 index 000000000..1c83cd824 --- /dev/null +++ b/src/gausskernel/storage/page/pageparse.h @@ -0,0 +1,54 @@ +/* --------------------------------------------------------------------------------------- + * * + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * pageparse.h + * + * IDENTIFICATION + * src/gausskernel/storage/page/pageparse.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef OPENGAUSS_PAGEPARSE_H +#define OPENGAUSS_PAGEPARSE_H + +extern void CheckUser(const char *fName); + +extern void PrepForRead(char *path, int64 blocknum, char *relation_type, char *outputFilename, RelFileNode *relnode, + bool parse_page); + +extern void ValidateParameterPath(RelFileNode rnode, char *str); + +extern char *ParsePage(char *path, int64 blocknum, char *relation_type, bool read_memory); + +extern void CheckOpenFile(FILE *outputfile, char *outputFilename); + +extern void CheckWriteFile(int result, int cnt_len, char *outputFilename); + +extern void CheckCloseFile(int result, char *outputFilename); + +extern void validate_xlog_location(char *str); + +#define MAXFILENAME 4096 +#define MAXFNAMELEN 64 +#define MAXOUTPUTLEN 1048576 +#define TENBASE 10 +#define XIDTHIRTYTWO 32 +#define TWO 2 +#define FIVE 5 +#define SHORTOUTPUTLEN 200 + +#endif /* OPENGAUSS_PAGEPARSE_H */ diff --git a/src/gausskernel/storage/remote/remote_adapter.cpp b/src/gausskernel/storage/remote/remote_adapter.cpp index 400764f27..1ebeaa7fe 100755 --- a/src/gausskernel/storage/remote/remote_adapter.cpp +++ b/src/gausskernel/storage/remote/remote_adapter.cpp @@ -32,6 +32,8 @@ #include "pgstat.h" #include "funcapi.h" #include "postmaster/postmaster.h" +#include "storage/smgr/segment_internal.h" +#include "storage/smgr/segment.h" #include "storage/custorage.h" #include "storage/ipc.h" #include "storage/remote_adapter.h" @@ -39,8 +41,12 @@ #include "utils/guc.h" #include "utils/memutils.h" #include "utils/builtins.h" +#include "utils/aiomem.h" -#define MAX_WAIT_TIMES 5 +const int DEFAULT_WAIT_TIMES = 60; +int ReadFileForRemote(RemoteReadFileKey *key, XLogRecPtr lsn, bytea** fileData, int timeout); + +int ReadCOrCsnFileForRemote(RelFileNode rnode, bytea** fileData); /* * @Description: wait lsn to replay @@ -48,7 +54,7 @@ * @Return: remote read error code * @See also: */ -int XLogWaitForReplay(uint64 primary_insert_lsn) +int XLogWaitForReplay(uint64 primary_insert_lsn, int timeout = DEFAULT_WAIT_TIMES) { int wait_times = 0; @@ -58,7 +64,7 @@ int XLogWaitForReplay(uint64 primary_insert_lsn) /* if primary_insert_lsn > standby_replay_lsn then need wait */ while (!XLByteLE(primary_insert_lsn, standby_replay_lsn)) { /* if sleep to much times */ - if (wait_times >= MAX_WAIT_TIMES) { + if (wait_times >= timeout) { ereport(LOG, (errmodule(MOD_REMOTE), errmsg("replay slow. requre lsn %X/%X, replayed lsn %X/%X", @@ -95,6 +101,7 @@ Datum gs_read_block_from_remote(PG_FUNCTION_ARGS) uint64 lsn; bool isForCU = false; bytea* result = NULL; + int timeout = 0; if (GetUserId() != BOOTSTRAP_SUPERUSERID) { ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be initial account to read files")))); @@ -109,14 +116,22 @@ Datum gs_read_block_from_remote(PG_FUNCTION_ARGS) blockSize = PG_GETARG_UINT32(6); lsn = (uint64)PG_GETARG_TRANSACTIONID(7); isForCU = PG_GETARG_BOOL(8); + timeout = PG_GETARG_INT32(9); + + RepairBlockKey key; + key.relfilenode.spcNode = spcNode; + key.relfilenode.dbNode = dbNode; + key.relfilenode.relNode = relNode; + key.relfilenode.bucketNode = bucketNode; + key.forknum = forkNum; + key.blocknum = blockNum; /* get block from local buffer */ if (isForCU) { /* if request to read CU block, we use forkNum column to replace colid. */ - (void)StandbyReadCUforPrimary(spcNode, dbNode, relNode, forkNum, blockNum, blockSize, lsn, &result); + (void)StandbyReadCUforPrimary(key, blockNum, blockSize, lsn, timeout, &result); } else { - (void)StandbyReadPageforPrimary(spcNode, dbNode, relNode, bucketNode, 0, forkNum, blockNum, blockSize, - lsn, &result); + (void)StandbyReadPageforPrimary(key, blockSize, lsn, &result, timeout, NULL); } if (NULL != result) { @@ -126,53 +141,6 @@ Datum gs_read_block_from_remote(PG_FUNCTION_ARGS) } } -/* - * Read block from buffer from primary, returning it as bytea - */ -Datum gs_read_block_from_remote_compress(PG_FUNCTION_ARGS) -{ - uint32 spcNode; - uint32 dbNode; - uint32 relNode; - int16 bucketNode; - uint16 opt = 0; - int32 forkNum; - uint64 blockNum; - uint32 blockSize; - uint64 lsn; - bool isForCU = false; - bytea* result = NULL; - - if (GetUserId() != BOOTSTRAP_SUPERUSERID) { - ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be initial account to read files")))); - } - /* handle optional arguments */ - spcNode = PG_GETARG_UINT32(0); - dbNode = PG_GETARG_UINT32(1); - relNode = PG_GETARG_UINT32(2); - bucketNode = PG_GETARG_INT16(3); - opt = PG_GETARG_UINT16(4); - forkNum = PG_GETARG_INT32(5); - blockNum = (uint64)PG_GETARG_TRANSACTIONID(6); - blockSize = PG_GETARG_UINT32(7); - lsn = (uint64)PG_GETARG_TRANSACTIONID(8); - isForCU = PG_GETARG_BOOL(9); - /* get block from local buffer */ - if (isForCU) { - /* if request to read CU block, we use forkNum column to replace colid. */ - (void)StandbyReadCUforPrimary(spcNode, dbNode, relNode, forkNum, blockNum, blockSize, lsn, &result); - } else { - (void)StandbyReadPageforPrimary(spcNode, dbNode, relNode, bucketNode, opt, forkNum, blockNum, blockSize, - lsn, &result); - } - - if (NULL != result) { - PG_RETURN_BYTEA_P(result); - } else { - PG_RETURN_NULL(); - } -} - /* * @Description: read cu for primary * @IN spcnode: tablespace id @@ -187,25 +155,27 @@ Datum gs_read_block_from_remote_compress(PG_FUNCTION_ARGS) * @Return: remote read error code * @See also: */ -int StandbyReadCUforPrimary(uint32 spcnode, uint32 dbnode, uint32 relnode, int32 colid, uint64 offset, int32 size, - uint64 lsn, bytea** cudata) +int StandbyReadCUforPrimary(RepairBlockKey key, uint64 offset, int32 size, uint64 lsn, int32 timeout, + bytea** cudata) { Assert(cudata); int ret_code = REMOTE_READ_OK; /* wait request lsn for replay */ - ret_code = XLogWaitForReplay(lsn); - if (ret_code != REMOTE_READ_OK) { - ereport(ERROR, (errmodule(MOD_REMOTE), errmsg("could not redo request lsn."))); - return ret_code; + if (RecoveryInProgress()) { + ret_code = XLogWaitForReplay(lsn, timeout); + if (ret_code != REMOTE_READ_OK) { + ereport(ERROR, (errmodule(MOD_REMOTE), errmsg("could not redo request lsn."))); + return ret_code; + } } - RelFileNode relfilenode {spcnode, dbnode, relnode, InvalidBktId}; + RelFileNode relfilenode {key.relfilenode.spcNode, key.relfilenode.dbNode, key.relfilenode.relNode, InvalidBktId}; { /* read from disk */ - CFileNode cfilenode(relfilenode, colid, MAIN_FORKNUM); + CFileNode cfilenode(relfilenode, key.forknum, MAIN_FORKNUM); CUStorage* custorage = New(CurrentMemoryContext) CUStorage(cfilenode); CU* cu = New(CurrentMemoryContext) CU(); @@ -221,7 +191,7 @@ int StandbyReadCUforPrimary(uint32 spcnode, uint32 dbnode, uint32 relnode, int32 } else { bytea* buf = (bytea*)palloc0(VARHDRSZ + size); SET_VARSIZE(buf, size + VARHDRSZ); - errno_t rc = memcpy_s(VARDATA(buf), size + 1, cu->m_compressedLoadBuf, size); + errno_t rc = memcpy_s(VARDATA(buf), size, cu->m_compressedLoadBuf, size); if (rc != EOK) { ereport(ERROR, (errmodule(MOD_REMOTE), errmsg("memcpy_s error, retcode=%d", rc))); ret_code = REMOTE_READ_MEMCPY_ERROR; @@ -250,8 +220,8 @@ int StandbyReadCUforPrimary(uint32 spcnode, uint32 dbnode, uint32 relnode, int32 * @Return: remote read error code * @See also: */ -int StandbyReadPageforPrimary(uint32 spcnode, uint32 dbnode, uint32 relnode, int16 bucketnode, uint2 opt, int32 forknum, - uint32 blocknum, uint32 blocksize, uint64 lsn, bytea** pagedata) +int StandbyReadPageforPrimary(RepairBlockKey key, uint32 blocksize, uint64 lsn, bytea** pagedata, + int timeout, const XLogPhyBlock *pblk) { Assert(pagedata); @@ -261,21 +231,62 @@ int StandbyReadPageforPrimary(uint32 spcnode, uint32 dbnode, uint32 relnode, int int ret_code = REMOTE_READ_OK; /* wait request lsn for replay */ - ret_code = XLogWaitForReplay(lsn); - if (ret_code != REMOTE_READ_OK) { - ereport(ERROR, (errmodule(MOD_REMOTE), errmsg("could not redo request lsn."))); - return ret_code; + if (RecoveryInProgress()) { + ret_code = XLogWaitForReplay(lsn, timeout); + if (ret_code != REMOTE_READ_OK) { + ereport(ERROR, (errmodule(MOD_REMOTE), errmsg("could not redo request lsn."))); + return ret_code; + } } - RelFileNode relfilenode {spcnode, dbnode, relnode, bucketnode, opt}; + RelFileNode relfilenode {key.relfilenode.spcNode, key.relfilenode.dbNode, key.relfilenode.relNode, + key.relfilenode.bucketNode}; - { - bytea* pageData = (bytea*)palloc(BLCKSZ + VARHDRSZ); - SET_VARSIZE(pageData, BLCKSZ + VARHDRSZ); + if (NULL != pblk) { + SegPageLocation loc = seg_get_physical_location(relfilenode, key.forknum, key.blocknum); + uint8 standby_relNode = (uint8) EXTENT_SIZE_TO_TYPE(loc.extent_size); + BlockNumber standby_block = loc.blocknum; + if (standby_relNode != pblk->relNode || standby_block != pblk->block) { + ereport(ERROR, (errmodule(MOD_REMOTE), + errmsg("Standby page file is invalid! Standby relnode is %u, " + "master is %u; Standby block is %u, master is %u.", + standby_relNode, pblk->relNode, standby_block, pblk->block))); + return REMOTE_READ_BLCKSZ_NOT_SAME; + } + } + + bytea* pageData = (bytea*)palloc(BLCKSZ + VARHDRSZ); + SET_VARSIZE(pageData, BLCKSZ + VARHDRSZ); + if (IsSegmentPhysicalRelNode(relfilenode)) { + Buffer buffer = InvalidBuffer; + SegSpace *spc = spc_open(relfilenode.spcNode, relfilenode.dbNode, false, false); + BlockNumber spc_nblocks = spc_size(spc, relfilenode.relNode, key.forknum); + + if (key.blocknum < spc_nblocks) { + buffer = ReadBufferFast(spc, relfilenode, key.forknum, key.blocknum, RBM_FOR_REMOTE); + } + + if (BufferIsValid(buffer)) { + LockBuffer(buffer, BUFFER_LOCK_SHARE); + Block block = BufferGetBlock(buffer); + + errno_t rc = memcpy_s(VARDATA(pageData), BLCKSZ, block, BLCKSZ); + if (rc != EOK) { + pfree(pageData); + ereport(ERROR, (errmodule(MOD_REMOTE), errmsg("memcpy_s error, retcode=%d", rc))); + ret_code = REMOTE_READ_MEMCPY_ERROR; + } + + LockBuffer(buffer, BUFFER_LOCK_UNLOCK); + ReleaseBuffer(buffer); + } else { + ret_code = REMOTE_READ_SIZE_ERROR; + } + } else { bool hit = false; /* read page, if PageIsVerified failed will long jump to PG_CATCH() */ - Buffer buf = ReadBufferForRemote(relfilenode, forknum, blocknum, RBM_FOR_REMOTE, NULL, &hit); + Buffer buf = ReadBufferForRemote(relfilenode, key.forknum, key.blocknum, RBM_FOR_REMOTE, NULL, &hit, pblk); if (BufferIsInvalid(buf)) { ereport(ERROR, (errmodule(MOD_REMOTE), errmsg("standby page buffer is invalid!"))); @@ -286,18 +297,438 @@ int StandbyReadPageforPrimary(uint32 spcnode, uint32 dbnode, uint32 relnode, int errno_t rc = memcpy_s(VARDATA(pageData), BLCKSZ, block, BLCKSZ); if (rc != EOK) { + pfree(pageData); ereport(ERROR, (errmodule(MOD_REMOTE), errmsg("memcpy_s error, retcode=%d", rc))); ret_code = REMOTE_READ_MEMCPY_ERROR; } LockBuffer(buf, BUFFER_LOCK_UNLOCK); ReleaseBuffer(buf); + } - if (ret_code == REMOTE_READ_OK) { - *pagedata = pageData; - PageSetChecksumInplace((Page) VARDATA(*pagedata), blocknum); - } + if (ret_code == REMOTE_READ_OK) { + *pagedata = pageData; + PageSetChecksumInplace((Page) VARDATA(*pagedata), key.blocknum); } return ret_code; } + +const int RES_COL_NUM = 2; +Datum gs_read_file_from_remote(PG_FUNCTION_ARGS) +{ + RelFileNode rnode; + RemoteReadFileKey key; + int32 forknum; + uint32 blockstart; + uint64 lsn; + bytea* result = NULL; + Datum values[RES_COL_NUM]; + bool nulls[RES_COL_NUM] = {false}; + HeapTuple tuple = NULL; + TupleDesc tupdesc; + int ret_code = 0; + int32 timeout; + int parano = 0; + XLogRecPtr current_lsn = InvalidXLogRecPtr; + + if (GetUserId() != BOOTSTRAP_SUPERUSERID) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be initial account to read files")))); + } + /* handle optional arguments */ + rnode.spcNode = PG_GETARG_UINT32(parano++); + rnode.dbNode = PG_GETARG_UINT32(parano++); + rnode.relNode = PG_GETARG_UINT32(parano++); + rnode.bucketNode = PG_GETARG_INT32(parano++); + forknum = PG_GETARG_INT32(parano++); + blockstart = PG_GETARG_INT32(parano++); + lsn = (uint64)PG_GETARG_TRANSACTIONID(parano++); + timeout = PG_GETARG_INT32(parano++); + + if (rnode.spcNode != 1 && rnode.spcNode != 2) { + /* get tale data file */ + key.relfilenode = rnode; + key.forknum = forknum; + key.blockstart = blockstart; + if (forknum != MAIN_FORKNUM) { + ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("Forknum should be 0. Now is %d. \n", forknum)))); + PG_RETURN_NULL(); + } + ret_code = ReadFileForRemote(&key, lsn, &result, timeout); + } else { + ret_code = ReadCOrCsnFileForRemote(rnode, &result); + } + + if (ret_code != REMOTE_READ_OK) { + PG_RETURN_NULL(); + } + + if (!RecoveryInProgress()) { + current_lsn = GetXLogInsertRecPtr(); + } + + tupdesc = CreateTemplateTupleDesc(RES_COL_NUM, false, TAM_HEAP); + parano = 1; + TupleDescInitEntry(tupdesc, (AttrNumber)parano++, "file", BYTEAOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)parano++, "lsn", XIDOID, -1, 0); + values[0] = PointerGetDatum(result); + nulls[0] = false; + values[1] = UInt64GetDatum(current_lsn); + nulls[1] = false; + + tupdesc = BlessTupleDesc(tupdesc); + tuple = heap_form_tuple(tupdesc, values, nulls); + PG_RETURN_DATUM(HeapTupleGetDatum(tuple)); +} + +Datum gs_read_file_size_from_remote(PG_FUNCTION_ARGS) +{ + RelFileNode rnode; + int32 forknum; + uint64 lsn; + int64 size = 0; + int32 timeout; + int parano = 0; + int ret_code = REMOTE_READ_OK; + + if (GetUserId() != BOOTSTRAP_SUPERUSERID) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be initial account to read files")))); + } + /* handle optional arguments */ + rnode.spcNode = PG_GETARG_UINT32(parano++); + rnode.dbNode = PG_GETARG_UINT32(parano++); + rnode.relNode = PG_GETARG_UINT32(parano++); + rnode.bucketNode = PG_GETARG_INT32(parano++); + forknum = PG_GETARG_INT32(parano++); + lsn = (uint64)PG_GETARG_TRANSACTIONID(parano++); + timeout = PG_GETARG_INT32(parano++); + + /* get file size */ + ret_code = ReadFileSizeForRemote(rnode, forknum, lsn, &size, timeout); + if (ret_code == REMOTE_READ_OK) { + PG_RETURN_INT64(size); + } else { + PG_RETURN_NULL(); + } +} + +int ReadFileSizeForRemote(RelFileNode rnode, int32 forknum, XLogRecPtr lsn, int64* res, int timeout) +{ + SMgrRelation smgr = NULL; + int64 nblock = 0; + int ret_code = REMOTE_READ_OK; + + /* wait request lsn for replay */ + if (RecoveryInProgress()) { + ret_code = XLogWaitForReplay(lsn, timeout); + if (ret_code != REMOTE_READ_OK) { + ereport(ERROR, (errmodule(MOD_REMOTE), errmsg("could not redo request lsn."))); + return ret_code; + } + } + + RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT); + + /* check whether the file exists. not exist, return size -1 */ + struct stat statBuf; + char* path = relpathperm(rnode, forknum); + if (stat(path, &statBuf) < 0 && errno == ENOENT) { + *res = -1; + pfree(path); + return ret_code; + } + pfree(path); + + if (!IsSegmentFileNode(rnode)) { + smgr = smgropen(rnode, InvalidBackendId); + nblock = smgrnblocks(smgr, forknum); + smgrcloseall(); + } else { + SegSpace *spc = spc_open(rnode.spcNode, rnode.dbNode, true, true); + spc_datafile_create(spc, rnode.relNode, forknum); + + nblock = spc_size(spc, rnode.relNode, forknum); + } + *res = nblock * BLCKSZ; + + return ret_code; +} + +int ReadFileByReadBufferComom(RemoteReadFileKey *key, bytea* pageData, uint32 nblock) +{ + int ret_code = REMOTE_READ_OK; + uint32 i = 0; + uint32 j = 0; + uint32 blk_start; + uint32 blk_end; + bool hit = false; + char* bufBlock = NULL; + errno_t rc; + + /* get the segno file block start and block end */ + blk_start = key->blockstart; + blk_end = (nblock >= blk_start + MAX_BATCH_READ_BLOCKNUM ? blk_start + MAX_BATCH_READ_BLOCKNUM : nblock); + + for (i = blk_start, j = 0; i < blk_end; i++, j++) { + /* read page, if PageIsVerified failed will long jump */ + BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_REPAIR); + Buffer buf = ReadBufferForRemote(key->relfilenode, key->forknum, i, RBM_FOR_REMOTE, bstrategy, &hit, NULL); + + if (BufferIsInvalid(buf)) { + ereport(WARNING, (errmodule(MOD_REMOTE), errmsg("repair file failed!"))); + return REMOTE_READ_BLCKSZ_NOT_SAME; + } + + LockBuffer(buf, BUFFER_LOCK_SHARE); + bufBlock = (char*)BufferGetBlock(buf); + + rc = memcpy_s(VARDATA(pageData) + j * BLCKSZ, BLCKSZ, bufBlock, BLCKSZ); + if (rc != EOK) { + ereport(WARNING, (errmodule(MOD_REMOTE), errmsg("repair file failed, memcpy_s error, retcode=%d", rc))); + ret_code = REMOTE_READ_MEMCPY_ERROR; + LockBuffer(buf, BUFFER_LOCK_UNLOCK); + ReleaseBuffer(buf); + return ret_code; + } + + LockBuffer(buf, BUFFER_LOCK_UNLOCK); + ReleaseBuffer(buf); + PageSetChecksumInplace((Page) (VARDATA(pageData) + j * BLCKSZ), i); + } + + return ret_code; +} + +const int MAX_RETRY_TIMES = 60; +int ReadFileByReadDisk(SegSpace* spc, RemoteReadFileKey *key, char* bufBlock, BlockNumber blocknum) +{ + int ret_code = REMOTE_READ_OK; + int pageStatus; + int retryTimes = 0; + + if (IsSegmentFileNode(key->relfilenode)) { + RelFileNode fakenode = { + .spcNode = key->relfilenode.spcNode, + .dbNode = key->relfilenode.dbNode, + .relNode = key->relfilenode.relNode, + .bucketNode = SegmentBktId + }; +SEG_RETRY: + seg_physical_read(spc, fakenode, key->forknum, blocknum, (char *)bufBlock); + retryTimes++; + if (PageIsVerified((Page)bufBlock, blocknum)) { + pageStatus = SMGR_RD_OK; + } else { + pageStatus = SMGR_RD_CRC_ERROR; + if (retryTimes < MAX_RETRY_TIMES) { + /* sleep 10ms */ + pg_usleep(10000L); + goto SEG_RETRY; + } else { + pfree(bufBlock); + ereport(WARNING, (errmodule(MOD_REMOTE), + errmsg("repair file failed, read page crc check error, page: %u/%u/%u/%d, " + "forknum is %d, block num is %u", key->relfilenode.spcNode, key->relfilenode.dbNode, + key->relfilenode.relNode, key->relfilenode.bucketNode, key->forknum, blocknum))); + ret_code = REMOTE_READ_CRC_ERROR; + return ret_code; + } + } + } else { + SMgrRelation smgr = smgropen(key->relfilenode, InvalidBackendId); + /* standby read page, replay finish, there will be no synchronous changes. */ + pageStatus = smgrread(smgr, key->forknum, blocknum, (char *)bufBlock); + retryTimes++; + if (pageStatus != SMGR_RD_OK) { + pfree(bufBlock); + ereport(WARNING, (errmodule(MOD_REMOTE), + errmsg("repair file failed, read page crc check error, page: %u/%u/%u/%d, " + "forknum is %d, block num is %u", key->relfilenode.spcNode, key->relfilenode.dbNode, + key->relfilenode.relNode, key->relfilenode.bucketNode, key->forknum, blocknum))); + ret_code = REMOTE_READ_CRC_ERROR; + smgrclose(smgr); + return ret_code; + } + smgrclose(smgr); + } + return ret_code; +} + +int ReadFileForRemote(RemoteReadFileKey *key, XLogRecPtr lsn, bytea** fileData, int timeout) +{ + int ret_code = REMOTE_READ_OK; + SMgrRelation smgr = NULL; + SegSpace* spc = NULL; + bytea* pageData = NULL; + char* bufBlock = NULL; + uint32 nblock = 0; + uint32 blk_start = 0; + uint32 blk_end = 0; + uint32 i = 0; + uint32 j = 0; + errno_t rc; + + /* wait request lsn for replay */ + if (RecoveryInProgress()) { + ret_code = XLogWaitForReplay(lsn, timeout); + if (ret_code != REMOTE_READ_OK) { + ereport(ERROR, (errmodule(MOD_REMOTE), errmsg("could not redo request lsn."))); + return ret_code; + } + } + if (RecoveryInProgress() || IsSegmentFileNode(key->relfilenode)) { + RequestCheckpoint(CHECKPOINT_WAIT | CHECKPOINT_FORCE | CHECKPOINT_IMMEDIATE); + } + + /* get block num */ + if (!IsSegmentFileNode(key->relfilenode)) { + smgr = smgropen(key->relfilenode, InvalidBackendId); + nblock = smgrnblocks(smgr, key->forknum); + smgrclose(smgr); + } else { + spc = spc_open(key->relfilenode.spcNode, key->relfilenode.dbNode, false, false); + if (!spc) { + ereport(WARNING, (errmodule(MOD_REMOTE), + errmsg("Spc open failed. spcNode is: %u, dbNode is %u", + key->relfilenode.spcNode, key->relfilenode.dbNode))); + return REMOTE_READ_IO_ERROR; + } + nblock = spc_size(spc, key->relfilenode.relNode, key->forknum); + } + + if (nblock <= key->blockstart) { + ret_code = REMOTE_READ_SIZE_ERROR; + return ret_code; + } + + /* get the segno file block start and block end */ + blk_start = key->blockstart; + blk_end = (nblock >= blk_start + MAX_BATCH_READ_BLOCKNUM ? blk_start + MAX_BATCH_READ_BLOCKNUM : nblock); + + pageData = (bytea*)palloc((blk_end - blk_start) * BLCKSZ + VARHDRSZ); + SET_VARSIZE(pageData, ((blk_end - blk_start) * BLCKSZ + VARHDRSZ)); + + /* primary read page, need read page by ReadBuffer_common */ + if (!IsSegmentFileNode(key->relfilenode) && !RecoveryInProgress()) { + ret_code = ReadFileByReadBufferComom(key, pageData, nblock); + if (ret_code != REMOTE_READ_OK) { + pfree(pageData); + ereport(ERROR, (errmodule(MOD_REMOTE), errmsg("read file failed!"))); + return ret_code; + } + } else { + ADIO_RUN() + { + bufBlock = (Page)adio_align_alloc(BLCKSZ); + } + ADIO_ELSE() + { + bufBlock = (Page)palloc(BLCKSZ); + } + ADIO_END(); + for (i = blk_start, j = 0; i < blk_end; i++, j++) { + ret_code = ReadFileByReadDisk(spc, key, bufBlock, i); + if (ret_code != REMOTE_READ_OK) { + pfree(bufBlock); + pfree(pageData); + ereport(ERROR, (errmodule(MOD_REMOTE), errmsg("repair file failed, read block %u error, retcode=%d", + i, rc))); + return ret_code; + } + rc = memcpy_s(VARDATA(pageData) + j * BLCKSZ, BLCKSZ, bufBlock, BLCKSZ); + if (rc != EOK) { + pfree(bufBlock); + pfree(pageData); + ret_code = REMOTE_READ_MEMCPY_ERROR; + ereport(ERROR, (errmodule(MOD_REMOTE), errmsg("repair file failed, memcpy_s error, retcode=%d", rc))); + return ret_code; + } + } + pfree(bufBlock); + if (ret_code != REMOTE_READ_OK) { + pfree(pageData); + ereport(ERROR, (errmodule(MOD_REMOTE), errmsg("read file failed!"))); + return ret_code; + } + } + + *fileData = pageData; + + return ret_code; +} + +const int REGR_MCR_SIZE_1MB = 1048576; +const int CLOG_NODE = 1; +const int CSN_NODE = 2; +int ReadCOrCsnFileForRemote(RelFileNode rnode, bytea** fileData) +{ + uint32 flags = O_RDWR | PG_BINARY; + int fd = -1; + char* logType = NULL; + char* path = (char*)palloc0(MAX_PATH); + errno_t rc; + uint32 logSize = 16 * REGR_MCR_SIZE_1MB; + char *buffer = (char*)palloc(logSize); + int result = -1; + + if (rnode.spcNode == CLOG_NODE) { + logType = "pg_clog"; + } else if (rnode.spcNode == CSN_NODE) { + logType = "pg_csnlog"; + } else { + ereport(LOG, (errmodule(MOD_SEGMENT_PAGE), errmsg("File type\"%u\" does not exist, stop read here.", + rnode.spcNode))); + } + + rc = snprintf_s(path, MAX_PATH, MAX_PATH - 1, "%s/%012u", logType, rnode.relNode); + securec_check_ss(rc, "\0", "\0"); + + fd = BasicOpenFile(path, flags, S_IWUSR | S_IRUSR); + if (fd < 0) { + pfree(buffer); + if (errno != ENOENT) { + ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", path))); + } + // The file does not exist, break. + ereport(LOG, + (errmodule(MOD_SEGMENT_PAGE), errmsg("File \"%s\" does not exist, stop read here.", path))); + pfree(path); + return -1; + } + pgstat_report_waitevent(WAIT_EVENT_DATA_FILE_READ); + uint32 nbytes = pread(fd, buffer, logSize, 0); + pgstat_report_waitevent(WAIT_EVENT_END); + if (close(fd)) { + pfree(path); + pfree(buffer); + ereport(ERROR, (errcode_for_file_access(), errmsg("could not close file \"%s\": %m", path))); + } + + if (nbytes > logSize) { + pfree(buffer); + ereport(ERROR, + (errcode(MOD_SEGMENT_PAGE), + errcode_for_file_access(), + errmsg("could not read file %s. nbytes:%u, logSize:%u", path, nbytes, logSize))); + pfree(path); + return -1; + } else { + bytea* pageData = (bytea*)palloc(nbytes + VARHDRSZ); + SET_VARSIZE(pageData, (nbytes + VARHDRSZ)); + rc = memcpy_s(VARDATA(pageData), nbytes, buffer, nbytes); + if (rc != EOK) { + pfree(path); + pfree(pageData); + pfree(buffer); + ereport(ERROR, (errmodule(MOD_REMOTE), errmsg("repair file failed, memcpy_s error, retcode=%d", rc))); + return -1; + } else { + *fileData = pageData; + result = 0; + } + } + pfree(path); + pfree(buffer); + return result; +} diff --git a/src/gausskernel/storage/remote/remote_read.cpp b/src/gausskernel/storage/remote/remote_read.cpp index b88d03923..505e159a7 100755 --- a/src/gausskernel/storage/remote/remote_read.cpp +++ b/src/gausskernel/storage/remote/remote_read.cpp @@ -68,6 +68,9 @@ const char* RemoteReadErrMsg(int error_code) case REMOTE_READ_MEMCPY_ERROR: error_msg = "memcpy_s error"; break; + case REMOTE_READ_CONN_ERROR: + error_msg = "remote connect status not ok"; + break; default: error_msg = "error code unknown"; break; @@ -93,7 +96,7 @@ void GetPrimaryServiceAddress(char *address, size_t address_len) return; SpinLockAcquire(&walrcv->mutex); - rc = snprintf_s(address, address_len, (address_len - 1), "%s:%d", walrcv->conn_channel.remotehost, + rc = snprintf_s(address, address_len, (address_len - 1), "%s@%d", walrcv->conn_channel.remotehost, walrcv->conn_channel.remoteport); securec_check_ss(rc, "\0", "\0"); SpinLockRelease(&walrcv->mutex); @@ -125,7 +128,7 @@ void GetRemoteReadAddress(char* firstAddress, char* secondAddress, size_t addres if (t_thrd.postmaster_cxt.ReplConnArray[1]) { rc = snprintf_s(firstAddress, addressLen, (addressLen - 1), - "%s:%d", t_thrd.postmaster_cxt.ReplConnArray[1]->remotehost, + "%s@%d", t_thrd.postmaster_cxt.ReplConnArray[1]->remotehost, t_thrd.postmaster_cxt.ReplConnArray[1]->remoteport); securec_check_ss(rc, "", ""); } @@ -134,20 +137,20 @@ void GetRemoteReadAddress(char* firstAddress, char* secondAddress, size_t addres GetFastestReplayStandByServiceAddress(firstAddress, secondAddress, addressLen); if (firstAddress[0] != '\0') { GetIPAndPort(firstAddress, ip, port, MAX_IPADDR_LEN); - rc = snprintf_s(firstAddress, addressLen, (addressLen - 1), "%s:%s", ip, port); + rc = snprintf_s(firstAddress, addressLen, (addressLen - 1), "%s@%s", ip, port); securec_check_ss(rc, "", ""); } if (secondAddress[0] != '\0') { GetIPAndPort(secondAddress, ip, port, MAX_IPADDR_LEN); - rc = snprintf_s(secondAddress, addressLen, (addressLen - 1), "%s:%s", ip, port); + rc = snprintf_s(secondAddress, addressLen, (addressLen - 1), "%s@%s", ip, port); securec_check_ss(rc, "", ""); } } else if (serverMode == STANDBY_MODE) { GetPrimaryServiceAddress(firstAddress, addressLen); if (firstAddress[0] != '\0') { GetIPAndPort(firstAddress, ip, port, MAX_IPADDR_LEN); - rc = snprintf_s(firstAddress, addressLen, (addressLen - 1), "%s:%s", ip, port); + rc = snprintf_s(firstAddress, addressLen, (addressLen - 1), "%s@%s", ip, port); securec_check_ss(rc, "", ""); } } @@ -163,8 +166,8 @@ void GetIPAndPort(char* address, char* ip, char* port, size_t len) char* tmpIp; char* tempPort; - tmpIp = strtok_r(address, ":", &outerPtr); - tempPort = strtok_r(NULL, ":", &outerPtr); + tmpIp = strtok_r(address, "@", &outerPtr); + tempPort = strtok_r(NULL, "@", &outerPtr); if (tmpIp != NULL && tmpIp[0] != '\0' && tempPort != NULL && tempPort[0] != '\0' && strlen(tmpIp) + strlen(tempPort) + 1 < len) { rc = strcpy_s(ip, MAX_IPADDR_LEN, tmpIp); @@ -185,7 +188,7 @@ bool CanRemoteRead() ServerMode serveMode = hashmdata->current_mode; if (IsRemoteReadModeOn() && !IS_DN_WITHOUT_STANDBYS_MODE() && IS_PGXC_DATANODE && serveMode != NORMAL_MODE && - serveMode != PENDING_MODE) { + serveMode != PENDING_MODE && serveMode != STANDBY_MODE) { return true; } return false; diff --git a/src/gausskernel/storage/replication/archive_walreceiver.cpp b/src/gausskernel/storage/replication/archive_walreceiver.cpp index 576ac2ede..a40a0c030 100644 --- a/src/gausskernel/storage/replication/archive_walreceiver.cpp +++ b/src/gausskernel/storage/replication/archive_walreceiver.cpp @@ -35,46 +35,18 @@ #include "storage/proc.h" #include "utils/guc.h" #include "pgxc/pgxc.h" + +#ifndef ENABLE_LITE_MODE #define CUR_OBS_FILE_VERSION 1 +#define TIMEOUT_FOR_ARCHIVE_RECEIVER 600 + +const int BARRIER_NAME_LEN = 40; + +#define BARRIER_PATH "global_barrier_records/hadr_end_barrier" static char *path_skip_prefix(char *path); static bool GetOBSArchiveLastStartTime(ArchiveSlotConfig* obsArchiveSlot); - -static bool IsArchiveXlogBeyondRequest(XLogRecPtr startPtr, const List *object_list) -{ - char* fileName = NULL; - char* xlogFileName = NULL; - char* tempToken = NULL; - uint32 xlogReadLogid = -1; - uint32 xlogReadLogSeg = -1; - TimeLineID tli = 0; - uint32 startSeg; - ListCell* cell = NULL; - - if (object_list == NIL || object_list->head->next == NULL) { - ereport(ERROR, (errmsg("there is no xlog file on archive server."))); - return false; - } - cell = list_head(object_list)->next; - fileName = (char*)lfirst(cell); - fileName = strrchr(fileName, '/'); - fileName = fileName + 1; - xlogFileName = strtok_s(fileName, "_", &tempToken); - if (xlogFileName == NULL) { - ereport(ERROR, (errmsg("Failed get xlog file name from fileName %s.", fileName))); - } - if (sscanf_s(xlogFileName, "%08X%08X%08X", &tli, &xlogReadLogid, &xlogReadLogSeg) != 3) { - ereport(ERROR, (errmsg("failed to translate name to xlog: %s\n", xlogFileName))); - } - XLByteToSeg(startPtr, startSeg); - if ((startSeg / XLogSegmentsPerXLogId) < xlogReadLogid || - ((startSeg / XLogSegmentsPerXLogId) == xlogReadLogid && - (startSeg % XLogSegmentsPerXLogId) < xlogReadLogSeg)) { - ereport(LOG, (errmsg("the xlog file on archive server is newer than local request, need build.\n"))); - return true; - } - return false; -} +static char* FindGlobalBarrierRecordForPITR(ArchiveConfig *archive_config); bool ReadArchiveReplicationFile(const char* fileName, char *buffer, const int length, ArchiveConfig *archive_config) { @@ -162,7 +134,7 @@ void update_stop_barrier() errno_t rc = EOK; volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; - if (IS_DISASTER_RECOVER_MODE && (t_thrd.xlog_cxt.recoveryTarget != RECOVERY_TARGET_TIME_OBS)) { + if (IS_OBS_DISASTER_RECOVER_MODE && (t_thrd.xlog_cxt.recoveryTarget != RECOVERY_TARGET_TIME_OBS)) { bool hasFailoverBarrier = false; bool hasSwitchoverBarrier = false; char failoverBarrier[MAX_BARRIER_ID_LENGTH] = {0}; @@ -193,12 +165,14 @@ void update_stop_barrier() securec_check(rc, "\0", "\0"); ereport(LOG, (errmsg("Get switchover barrierID %s", (char *)walrcv->recoverySwitchoverBarrierId))); } - } else if (IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) { - if (strlen(g_instance.stopBarrierId) != 0) { + } else if (IS_DISASTER_RECOVER_MODE) { + if (strlen(g_instance.csn_barrier_cxt.stopBarrierId) != 0) { + SpinLockAcquire(&walrcv->mutex); rc = strncpy_s((char *)walrcv->recoveryStopBarrierId, MAX_BARRIER_ID_LENGTH, - (char *)g_instance.stopBarrierId, MAX_BARRIER_ID_LENGTH - 1); + (char *)g_instance.csn_barrier_cxt.stopBarrierId, MAX_BARRIER_ID_LENGTH - 1); + SpinLockRelease(&walrcv->mutex); securec_check(rc, "\0", "\0"); - ereport(LOG, (errmsg("Get stop barrierID %s", (char *)walrcv->recoveryStopBarrierId))); + ereport(LOG, (errmsg("Get stop barrierID %s", (char *)g_instance.csn_barrier_cxt.stopBarrierId))); } } } @@ -210,13 +184,16 @@ void update_recovery_barrier() volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; char barrier[MAX_BARRIER_ID_LENGTH] = {0}; if (ArchiveReplicationReadFile(HADR_BARRIER_ID_FILE, (char *)barrier, MAX_BARRIER_ID_LENGTH)) { + SpinLockAcquire(&walrcv->mutex); if (strcmp((char *)barrier, (char *)walrcv->recoveryTargetBarrierId) < 0) { - ereport(ERROR, (errmodule(MOD_REDO), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("The new global barrier is smaller than the last one."))); + SpinLockRelease(&walrcv->mutex); + ereport(ERROR, (errmodule(MOD_REDO), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("The new global barrier is smaller than the last one."))); } else { rc = strncpy_s((char *)walrcv->recoveryTargetBarrierId, MAX_BARRIER_ID_LENGTH, (char *)barrier, MAX_BARRIER_ID_LENGTH - 1); securec_check(rc, "\0", "\0"); + SpinLockRelease(&walrcv->mutex); } } } @@ -245,6 +222,32 @@ bool archive_connect(char* conninfo, XLogRecPtr* startpoint, char* slotname, int return false; } + if (g_instance.roach_cxt.targetTimeInPITR != NULL) { + char* flag = NULL; + char* token = NULL; + char* outerPtr = NULL; + char* tmpStr = NULL; + g_instance.roach_cxt.globalBarrierRecordForPITR = + FindGlobalBarrierRecordForPITR(&walrcv->archive_slot->archive_config); + ereport(LOG, (errmsg("archive_walreceiver find global barrier is %s for PITR recovery.", + g_instance.roach_cxt.globalBarrierRecordForPITR))); + tmpStr = pstrdup(g_instance.roach_cxt.globalBarrierRecordForPITR); + token = strtok_s(tmpStr, "_", &outerPtr); + token = strtok_s(NULL, "_", &outerPtr); + token = strtok_s(NULL, "-", &outerPtr); + if (token == NULL) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("parse pitr target barrier record failed in archive connect.")))); + } + g_instance.roach_cxt.targetRestoreTimeFromMedia = pstrdup(token); + flag = strrchr(outerPtr, '-'); + if (flag == NULL) { + g_instance.roach_cxt.isGtmFreeCsn = false; + } else { + g_instance.roach_cxt.isGtmFreeCsn = true; + } + pfree_ext(tmpStr); + } /* HADR only support OBS currently */ if (walrcv->archive_slot->archive_config.media_type != ARCHIVE_OBS) { return true; @@ -367,13 +370,14 @@ void archive_disconnect(void) return; } -static char *archive_replication_get_xlog_prefix(XLogRecPtr recptr, bool onlyPath) +static char *archive_replication_get_xlog_prefix(XLogRecPtr recptr, bool onlyPath, bool noNeedKeyCN) { errno_t rc = EOK; char xlogfname[MAXFNAMELEN]; char xlogfpath[MAXPGPATH]; XLogSegNo xlogSegno = 0; TimeLineID timeLine = DEFAULT_TIMELINE_ID; + char* keyCN = NULL; rc = memset_s(xlogfname, MAXFNAMELEN, 0, MAXFNAMELEN); securec_check_ss_c(rc, "", ""); @@ -389,13 +393,18 @@ static char *archive_replication_get_xlog_prefix(XLogRecPtr recptr, bool onlyPat securec_check_ss_c(rc, "", ""); } if (IS_PGXC_COORDINATOR) { - if (IS_CNDISASTER_RECOVER_MODE) { - if (get_local_key_cn() == NULL) { + if (IS_CN_OBS_DISASTER_RECOVER_MODE) { + keyCN = get_local_key_cn(); + if (keyCN == NULL) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FILE), errmsg("There is no hadr_key_cn"))); return NULL; } rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, "%s/%s/%s", - get_local_key_cn(), XLOGDIR, xlogfname); + keyCN, XLOGDIR, xlogfname); + securec_check_ss_c(rc, "", ""); + } else if (noNeedKeyCN) { + rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, "%s/%s/%s", + g_instance.attr.attr_common.PGXCNodeName, XLOGDIR, xlogfname); securec_check_ss_c(rc, "", ""); } else { rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, "%s/%s/%s", @@ -406,7 +415,7 @@ static char *archive_replication_get_xlog_prefix(XLogRecPtr recptr, bool onlyPat rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, XLOGDIR "/%s", xlogfname); securec_check_ss_c(rc, "", ""); } - + pfree_ext(keyCN); return pstrdup(xlogfpath); } @@ -448,46 +457,42 @@ static char *archive_replication_get_last_xlog_slice(XLogRecPtr startPtr, bool o char *fileNamePrefix = NULL; char *fileName = NULL; List *object_list = NIL; - List *obsXlogList = NIL; char xlogfpath[MAXPGPATH]; + char* keyCN = NULL; errno_t rc = EOK; - fileNamePrefix = archive_replication_get_xlog_prefix(startPtr, onlyPath); - - if (IS_CNDISASTER_RECOVER_MODE) { - if (get_local_key_cn() == NULL) { + fileNamePrefix = archive_replication_get_xlog_prefix(startPtr, onlyPath, false); + if (IS_CN_OBS_DISASTER_RECOVER_MODE) { + keyCN = get_local_key_cn(); + if (keyCN == NULL) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FILE), errmsg("There is no hadr_key_cn"))); return NULL; } - rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", get_local_key_cn(), XLOGDIR); + rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", keyCN, XLOGDIR); securec_check_ss_c(rc, "", ""); } else { rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, "%s", XLOGDIR); securec_check_ss_c(rc, "", ""); } - if (needUpdateDBState) { - obsXlogList = ArchiveList(xlogfpath, archive_obs); - if (IsArchiveXlogBeyondRequest(startPtr, obsXlogList)) { + object_list = ArchiveList(fileNamePrefix, archive_obs); + if (object_list == NIL || object_list->length <= 0) { + ereport(LOG, (errmsg("The OBS objects with the prefix %s cannot be found.", fileNamePrefix))); + pfree(fileNamePrefix); + if (needUpdateDBState && g_instance.roach_cxt.globalBarrierRecordForPITR == NULL) { + pfree_ext(keyCN); SetObsRebuildReason(WALSEGMENT_REBUILD); ereport(ERROR, (errcode(ERRCODE_INVALID_STATUS), errmsg("standby's local request lsn[%X/%X] mismatched with remote server", (uint32)(startPtr >> 32), (uint32)startPtr))); } - list_free_deep(obsXlogList); - obsXlogList = NIL; - } - - object_list = ArchiveList(fileNamePrefix, archive_obs); - if (object_list == NIL || object_list->length <= 0) { - ereport(LOG, (errmsg("The OBS objects with the prefix %s cannot be found.", fileNamePrefix))); - pfree(fileNamePrefix); + pfree_ext(keyCN); return NULL; } - if (IS_CNDISASTER_RECOVER_MODE) { + if (IS_CN_OBS_DISASTER_RECOVER_MODE) { char tmpFileName[MAXPGPATH]; - rc = snprintf_s(tmpFileName, MAXPGPATH, MAXPGPATH - 1, "%s/%s", get_local_key_cn(), + rc = snprintf_s(tmpFileName, MAXPGPATH, MAXPGPATH - 1, "%s/%s", keyCN, get_last_filename_from_list(object_list)); securec_check_ss_c(rc, "", ""); fileName = pstrdup(tmpFileName); @@ -501,6 +506,7 @@ static char *archive_replication_get_last_xlog_slice(XLogRecPtr startPtr, bool o pfree(fileNamePrefix); list_free_deep(object_list); + pfree_ext(keyCN); object_list = NIL; return fileName; } @@ -518,6 +524,8 @@ int archive_replication_receive(XLogRecPtr startPtr, char **buffer, int *bufferL size_t readLen = 0; char *xlogBuff = NULL; uint32 actualXlogLen = 0; + struct timeval currTime; + gettimeofday(&currTime, NULL); volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; errno_t rc = EOK; TimestampTz start_time; @@ -528,9 +536,15 @@ int archive_replication_receive(XLogRecPtr startPtr, char **buffer, int *bufferL } *bufferLength = 0; - + if (g_instance.comm_cxt.lastArchiveRcvTime == 0) { + g_instance.comm_cxt.lastArchiveRcvTime = currTime.tv_sec; + } fileName = archive_replication_get_last_xlog_slice(startPtr, false, true, &walrcv->archive_slot->archive_config); if (fileName == NULL || strlen(fileName) == 0) { + if (currTime.tv_sec - g_instance.comm_cxt.lastArchiveRcvTime > TIMEOUT_FOR_ARCHIVE_RECEIVER) { + ereport(PANIC, (errmsg("Cannot find xlog file with LSN: %lu and timeout is reached", startPtr))); + return -1; + } ereport(LOG, (errmsg("Cannot find xlog file with LSN: %lu", startPtr))); return -1; } @@ -553,6 +567,7 @@ int archive_replication_receive(XLogRecPtr startPtr, char **buffer, int *bufferL /* Start timing */ start_time = GetCurrentTimestamp(); + g_instance.comm_cxt.lastArchiveRcvTime = currTime.tv_sec; do { readLen = ArchiveRead(fileName, 0, xlogBuff, OBS_XLOG_SLICE_FILE_SIZE, &walrcv->archive_slot->archive_config); if (readLen < sizeof(int)) { @@ -616,7 +631,7 @@ int ArchiveReplicationAchiver(const ArchiveXlogMessage *xlogInfo) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Invalid Lsn: %lu", xlogInfo->targetLsn))); } - XLogFileName(xlogfname, DEFAULT_TIMELINE_ID, xlogSegno); + XLogFileName(xlogfname, MAXFNAMELEN, DEFAULT_TIMELINE_ID, xlogSegno); rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, "%s/" XLOGDIR "/%s", t_thrd.proc_cxt.DataDir, xlogfname); securec_check_ss(rc, "\0", "\0"); @@ -650,7 +665,7 @@ int ArchiveReplicationAchiver(const ArchiveXlogMessage *xlogInfo) close(xlogreadfd); /* Get xlog slice file path on archive server */ - fileNamePrefix = archive_replication_get_xlog_prefix(xlogInfo->targetLsn, false); + fileNamePrefix = archive_replication_get_xlog_prefix(xlogInfo->targetLsn, false, false); fileName = (char*)palloc0(MAX_PATH_LEN); /* {xlog_name}_{sliece_num}_01(version_num)_00000001{tli}_00000001{subTerm} */ @@ -673,7 +688,8 @@ static bool GetOBSArchiveLastStartTime(ArchiveSlotConfig* obsArchiveSlot) List *objectList = NIL; int readLen = 0; char buffer[MAXPGPATH] = {0}; - char* lastStartTime; + char* lastStartTime = NULL; + char* lastStartLsn = NULL; char* tempToken = NULL; if (!IS_PGXC_COORDINATOR) { @@ -700,6 +716,14 @@ static bool GetOBSArchiveLastStartTime(ArchiveSlotConfig* obsArchiveSlot) objectList = NIL; return false; } + (void)strtok_s(NULL, "_", &tempToken); + lastStartLsn = strtok_s(NULL, "-", &tempToken); + if (lastStartLsn == NULL) { + ereport(LOG, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("Get start lsn location failed when reading status file.")))); + } else { + t_thrd.arch.arch_start_lsn = (XLogRecPtr)strtoul(lastStartLsn, NULL, 0); + } #ifdef HAVE_INT64_TIMESTAMP t_thrd.arch.arch_start_timestamp = atol(lastStartTime); #else @@ -718,7 +742,9 @@ void update_archive_start_end_location_file(XLogRecPtr endPtr, long endTime) XLogRecPtr locStartPtr; char* fileName = NULL; char* obsfileName = NULL; + char xlogLocation[MAXPGPATH] = {0}; char preFileName[MAXPGPATH] = {0}; + char archiveStartEndFile[MAXPGPATH] = {0}; char* xlogFileName = NULL; char* tempToken = NULL; uint32 xlogReadLogid = -1; @@ -732,43 +758,70 @@ void update_archive_start_end_location_file(XLogRecPtr endPtr, long endTime) if (obs_archive_slot == NULL) { return; } - - if (!IS_PGXC_COORDINATOR) { - if (t_thrd.arch.arch_start_timestamp == 0 && !GetOBSArchiveLastStartTime(obs_archive_slot)) { - t_thrd.arch.arch_start_timestamp = endTime; - } - initStringInfo(&buffer); - obsXlogList = ArchiveList(XLOGDIR, &obs_archive_slot->archive_config); + if (IS_PGXC_COORDINATOR) { + rc = snprintf_s(xlogLocation, MAXPGPATH, MAXPGPATH - 1, "%s/%s", + g_instance.attr.attr_common.PGXCNodeName, XLOGDIR); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(preFileName, MAXPGPATH, MAXPGPATH - 1, "%s/%s/%s", + obs_archive_slot->archive_config.archive_prefix, g_instance.attr.attr_common.PGXCNodeName, XLOGDIR); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(archiveStartEndFile, MAXPGPATH, MAXPGPATH - 1, "%s/%s", + g_instance.attr.attr_common.PGXCNodeName, OBS_ARCHIVE_STATUS_FILE); + securec_check_ss(rc, "\0", "\0"); + } else { + rc = snprintf_s(xlogLocation, MAXPGPATH, MAXPGPATH - 1, "%s", XLOGDIR); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(preFileName, MAXPGPATH, MAXPGPATH - 1, "%s/%s", + obs_archive_slot->archive_config.archive_prefix, XLOGDIR); + securec_check_ss(rc, "\0", "\0"); + rc = snprintf_s(archiveStartEndFile, MAXPGPATH, MAXPGPATH - 1, "%s", OBS_ARCHIVE_STATUS_FILE); + securec_check_ss(rc, "\0", "\0"); + } + if (t_thrd.arch.arch_start_timestamp == 0 && !GetOBSArchiveLastStartTime(obs_archive_slot)) { + t_thrd.arch.arch_start_timestamp = endTime; + } + if (t_thrd.arch.arch_start_lsn == InvalidXLogRecPtr) { + obsXlogList = ArchiveList(xlogLocation, &obs_archive_slot->archive_config); if (obsXlogList == NIL || obsXlogList->length <= 0) { + ereport(WARNING, (errmsg("failed to list all xlog file for update archive start end status file."))); return; } cell = list_head(obsXlogList); fileName = (char*)lfirst(cell); obsfileName = strrchr(fileName, '/'); - rc = memcpy_s(preFileName, MAXPGPATH, fileName, strlen(fileName) - strlen(obsfileName)); - securec_check(rc, "", ""); + if (obsfileName == NULL) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("The archive path content error when update status file."))); + } obsfileName = obsfileName + 1; tempToken = NULL; xlogFileName = strtok_s(obsfileName, "_", &tempToken); if (xlogFileName == NULL) { + list_free_deep(obsXlogList); + obsXlogList = NIL; ereport(ERROR, (errmsg("Failed get xlog file name from obsfileName %s.", obsfileName))); } if (sscanf_s(xlogFileName, "%08X%08X%08X", &tli, &xlogReadLogid, &xlogReadLogSeg) != 3) { + list_free_deep(obsXlogList); + obsXlogList = NIL; ereport(ERROR, (errmsg("failed to translate name to xlog: %s\n", xlogFileName))); } XLogSegNoOffsetToRecPtr(xlogReadLogid * XLogSegmentsPerXLogId + xlogReadLogSeg, 0, locStartPtr); - appendStringInfo(&buffer, "%ld-%ld_%lu-%lu_00000001_%s\n", t_thrd.arch.arch_start_timestamp, endTime, - locStartPtr, endPtr, preFileName); - - ArchiveWrite(OBS_ARCHIVE_STATUS_FILE, buffer.data, buffer.len, &obs_archive_slot->archive_config); - pfree(buffer.data); + t_thrd.arch.arch_start_lsn = locStartPtr; /* release result list */ list_free_deep(obsXlogList); obsXlogList = NIL; } + initStringInfo(&buffer); + + appendStringInfo(&buffer, "%ld-%ld_%lu-%lu_00000001_%s\n", t_thrd.arch.arch_start_timestamp, endTime, + t_thrd.arch.arch_start_lsn, endPtr, preFileName); + + ArchiveWrite(archiveStartEndFile, buffer.data, buffer.len, &obs_archive_slot->archive_config); + pfree(buffer.data); } -int archive_replication_cleanup(XLogRecPtr recptr, ArchiveConfig *archive_config) +int archive_replication_cleanup(XLogRecPtr recptr, ArchiveConfig *archive_config, bool reverse) { char *fileNamePrefix = NULL; List *object_list = NIL; @@ -791,8 +844,11 @@ int archive_replication_cleanup(XLogRecPtr recptr, ArchiveConfig *archive_config securec_check_ss_c(rc, "", ""); len = strlen(xlogfname); - fileNamePrefix = archive_replication_get_xlog_prefix(recptr, true); - + if (reverse) { + fileNamePrefix = archive_replication_get_xlog_prefix(recptr, true, true); + } else { + fileNamePrefix = archive_replication_get_xlog_prefix(recptr, true, false); + } ereport(LOG, (errmsg("The OBS objects with the prefix %s ", fileNamePrefix))); if (archive_config == NULL) { object_list = ArchiveList(fileNamePrefix, &walrcv->archive_slot->archive_config); @@ -825,11 +881,26 @@ int archive_replication_cleanup(XLogRecPtr recptr, ArchiveConfig *archive_config ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Invalid OBS object key: %s", (char *)lfirst(cell)))); } - - if (strncmp(basename(key), xlogfname, len) < 0) { - /* Ahead of the target lsn, need to delete */ + if (IS_PGXC_COORDINATOR) { + rc = snprintf_s(obsXlogPath, MAXPGPATH, MAXPGPATH - 1, "%s/%s/%s", + g_instance.attr.attr_common.PGXCNodeName, XLOGDIR, key); + securec_check_ss_c(rc, "", ""); + } else { rc = snprintf_s(obsXlogPath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", XLOGDIR, key); securec_check_ss_c(rc, "", ""); + } + if (reverse) { + if (strncmp(basename(key), xlogfname, len) >= 0) { + ret = ArchiveDelete(obsXlogPath, archive_config); + if (ret != 0) { + ereport(WARNING, (errcode(ERRCODE_UNDEFINED_FILE), + errmsg("The OBS objects delete fail, ret=%d, key=%s", ret, key))); + } + } + continue; + } + if (strncmp(basename(key), xlogfname, len) < 0) { + /* Ahead of the target lsn, need to delete */ if (archive_config == NULL) { ret = ArchiveDelete(obsXlogPath, &walrcv->archive_slot->archive_config); } else { @@ -954,3 +1025,676 @@ char* get_local_key_cn(void) return pstrdup(key_cn); } } + +static void WriteOneBarrierRecordToBarrierList(const char* barrierFileName, const char* availableCNName, + const char* id, ArchiveConfig *archive_config) +{ + char barrierRecordBuff[OBS_XLOG_SLICE_FILE_SIZE] = {0}; + char buffer[MAXPGPATH] = {0}; + char endBuffer[MAXPGPATH] = {0}; + size_t readLen = 0; + errno_t rc = EOK; + int ret = 0; + + if (GTM_FREE_MODE) { + rc = snprintf_s(buffer, MAXPGPATH, MAXPGPATH - 1, "%s-%s_%08lu-free\n", id, availableCNName, + t_thrd.barrier_creator_cxt.first_cn_timeline); + securec_check_ss_c(rc, "", ""); + } else { + rc = snprintf_s(buffer, MAXPGPATH, MAXPGPATH - 1, "%s-%s_%08lu\n", id, availableCNName, + t_thrd.barrier_creator_cxt.first_cn_timeline); + securec_check_ss_c(rc, "", ""); + } + rc = snprintf_s(endBuffer, MAXPGPATH, MAXPGPATH - 1, "%s-%s_%08lu", id, availableCNName, + t_thrd.barrier_creator_cxt.first_cn_timeline); + securec_check_ss_c(rc, "", ""); + int messageLen = strlen(buffer); + int endBufLen = strlen(endBuffer); + if (ArchiveFileExist(barrierFileName, archive_config) == false) { + ret = ArchiveWrite(barrierFileName, buffer, messageLen, archive_config); + if (ret != 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The barrier list file: %s could not been write now, and record is %s.", + barrierFileName, buffer)))); + } + ereport(LOG, (errmsg("create new hadr barrier list file: %s.", barrierFileName))); + ret = ArchiveWrite(BARRIER_PATH, endBuffer, endBufLen, archive_config); + if (ret != 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The barrier list file: hadr_end_barrier could not been write now.")))); + } + } else { + readLen = 0; + readLen = ArchiveRead(barrierFileName, 0, barrierRecordBuff, OBS_XLOG_SLICE_FILE_SIZE, archive_config); + if (readLen <= 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The barrier list file: %s could not been read now.", barrierFileName)))); + } + ret = strcat_s(barrierRecordBuff, OBS_XLOG_SLICE_FILE_SIZE, buffer); + securec_check_c(ret, "\0", "\0"); + messageLen = strlen(barrierRecordBuff); + ret = 0; + + ret = ArchiveWrite(barrierFileName, barrierRecordBuff, messageLen, archive_config); + if (ret != 0) { + ereport(WARNING, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The hadr barrier list file:%s, could not been write now, now record is %s.", + barrierFileName, buffer)))); + } + ret = ArchiveWrite(BARRIER_PATH, endBuffer, endBufLen, archive_config); + if (ret != 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The barrier list file: hadr_end_barrier could not been write now.")))); + } + } +} + +static void UpdateLastBarrierTimeInfo(char* slotName, long lastBrrierStartTime) +{ + bool needCreateNew = false; + + for (int i = 0; i < g_instance.attr.attr_storage.max_replication_slots; i++) { + if (t_thrd.barrier_creator_cxt.barrier_update_last_time_info[i].archiveSlotName != NULL && + strcmp(t_thrd.barrier_creator_cxt.barrier_update_last_time_info[i].archiveSlotName, slotName) == 0) { + t_thrd.barrier_creator_cxt.barrier_update_last_time_info[i].lastBarrierFileStartTimestamp = + lastBrrierStartTime; + return; + } else { + needCreateNew = true; + } + } + if (needCreateNew) { + for (int i = 0; i < g_instance.attr.attr_storage.max_replication_slots; i++) { + if (t_thrd.barrier_creator_cxt.barrier_update_last_time_info[i].archiveSlotName == NULL && + t_thrd.barrier_creator_cxt.barrier_update_last_time_info[i].lastBarrierFileStartTimestamp == 0) { + t_thrd.barrier_creator_cxt.barrier_update_last_time_info[i].lastBarrierFileStartTimestamp = + lastBrrierStartTime; + t_thrd.barrier_creator_cxt.barrier_update_last_time_info[i].archiveSlotName = pstrdup(slotName); + ereport(DEBUG1, (errmsg("Update last barrier start time success %lu and slot is %s", + lastBrrierStartTime, slotName))); + return; + } + } + } +} + +static long FindLastBarrierTimeInfo(char* slotName) +{ + for (int i = 0; i < g_instance.attr.attr_storage.max_replication_slots; i++) { + if (t_thrd.barrier_creator_cxt.barrier_update_last_time_info[i].archiveSlotName != NULL && + strcmp(t_thrd.barrier_creator_cxt.barrier_update_last_time_info[i].archiveSlotName, slotName) == 0) { + ereport(DEBUG1, (errmsg("find last barrier start time success %lu and slot is %s", + t_thrd.barrier_creator_cxt.barrier_update_last_time_info[i].lastBarrierFileStartTimestamp, slotName))); + return t_thrd.barrier_creator_cxt.barrier_update_last_time_info[i].lastBarrierFileStartTimestamp; + } + } + return 0; +} + + +static void WriteGlobalBarrierListRecordsOnMedia(const char* id, const char* availableCNName, char* slotName, + long curBarrierTimeStamp, ArchiveConfig *archive_config) +{ + size_t readLen = 0; + errno_t rc = EOK; + char startFileName[MAXPGPATH]; + char barrierFileName[MAXPGPATH]; + char buffer[MAXPGPATH]; + char startTimeBuff[MAXPGPATH] = {0}; + long startTimeStamp = 0; + long lastBarrierFileStartTimestamp = 0; + + rc = memset_s(startFileName, MAXPGPATH, 0, MAXPGPATH); + securec_check_ss_c(rc, "", ""); + rc = memset_s(barrierFileName, MAXPGPATH, 0, MAXPGPATH); + securec_check_ss_c(rc, "", ""); + rc = memset_s(buffer, MAXPGPATH, 0, MAXPGPATH); + securec_check_ss_c(rc, "", ""); + + if (ArchiveFileExist("global_barrier_records/hadr_start_barrier", archive_config) != true) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The file name hadr_start_barrier cannot be find when write global barrier records.")))); + } + if (t_thrd.barrier_creator_cxt.is_first_barrier) { + rc = snprintf_s(startFileName, MAXPGPATH, MAXPGPATH - 1, "global_barrier_records/hadr_start_barrier"); + securec_check_ss_c(rc, "", ""); + readLen = ArchiveRead(startFileName, 0, startTimeBuff, 24, archive_config); + if (readLen <= 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The file name hadr_start_barrier cannot be read when write global barrier records.")))); + } + char *startTimeStr = strrchr(startTimeBuff, '_'); + if (startTimeStr == NULL) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("parse barrier start time failed when update a record to barrier list.")))); + } + startTimeStr = startTimeStr + 1; + startTimeStamp = atol(startTimeStr); + if (curBarrierTimeStamp < startTimeStamp) { + ereport(ERROR, (errmsg("the start time write in hadr_start_barrier file is bigger than " + "current barrier time, may recover old records."))); + } else { + int fileCnt = (curBarrierTimeStamp - startTimeStamp) / FILE_TIME_INTERVAL; + if (fileCnt == 0) { + rc = snprintf_s(barrierFileName, MAXPGPATH, MAXPGPATH - 1, + "global_barrier_records/hadr_barrier_%013ld", startTimeStamp); + securec_check_ss_c(rc, "", ""); + + WriteOneBarrierRecordToBarrierList(barrierFileName, availableCNName, id, archive_config); + } else { + rc = snprintf_s(barrierFileName, MAXPGPATH, MAXPGPATH - 1, + "global_barrier_records/hadr_barrier_%013ld", + startTimeStamp + fileCnt * FILE_TIME_INTERVAL); + securec_check_ss_c(rc, "", ""); + + WriteOneBarrierRecordToBarrierList(barrierFileName, availableCNName, id, archive_config); + } + UpdateLastBarrierTimeInfo(slotName, startTimeStamp + fileCnt * FILE_TIME_INTERVAL); + } + } else { + lastBarrierFileStartTimestamp = FindLastBarrierTimeInfo(slotName); + if (curBarrierTimeStamp - lastBarrierFileStartTimestamp > FILE_TIME_INTERVAL) { + rc = snprintf_s(barrierFileName, MAXPGPATH, MAXPGPATH - 1, "global_barrier_records/hadr_barrier_%013ld", + lastBarrierFileStartTimestamp + FILE_TIME_INTERVAL); + securec_check_ss_c(rc, "", ""); + + WriteOneBarrierRecordToBarrierList(barrierFileName, availableCNName, id, archive_config); + UpdateLastBarrierTimeInfo(slotName, lastBarrierFileStartTimestamp + FILE_TIME_INTERVAL); + } else { + rc = snprintf_s(barrierFileName, MAXPGPATH, MAXPGPATH - 1, "global_barrier_records/hadr_barrier_%013ld", + lastBarrierFileStartTimestamp); + securec_check_ss_c(rc, "", ""); + + WriteOneBarrierRecordToBarrierList(barrierFileName, availableCNName, id, archive_config); + } + } +} + +void UpdateGlobalBarrierListOnMedia(const char* id, const char* availableCNName) +{ + ArchiveSlotConfig *archive_conf = NULL; + char *startTimestampStr = strrchr((char*)id, '_'); + if (startTimestampStr == NULL) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("Get barrier timestamp for barrier %s failed because parse content error.", id)))); + } + startTimestampStr = startTimestampStr + 1; + long startTimestamp = atol(startTimestampStr); + + foreach_cell(cell, t_thrd.barrier_creator_cxt.archive_slot_names) { + char* slotName = (char*)lfirst(cell); + if (slotName == NULL || strlen(slotName) == 0) { + continue; + } + archive_conf = getArchiveReplicationSlotWithName(slotName); + if (archive_conf != NULL) { + WriteGlobalBarrierListRecordsOnMedia(id, availableCNName, slotName, startTimestamp, + &archive_conf->archive_config); + } + } + if (t_thrd.barrier_creator_cxt.is_first_barrier) { + t_thrd.barrier_creator_cxt.is_first_barrier = false; + } +} + +void WriteGlobalBarrierListStartTimeOnMedia(long cur_time) +{ + ArchiveSlotConfig *archive_conf = NULL; + errno_t rc = EOK; + char buffer[MAXPGPATH]; + char fileNamePrefix[MAXPGPATH]; + int ret = 0; + + rc = memset_s(fileNamePrefix, MAXPGPATH, 0, MAXPGPATH); + securec_check_ss_c(rc, "", ""); + rc = memset_s(buffer, MAXPGPATH, 0, MAXPGPATH); + securec_check_ss_c(rc, "", ""); + + foreach_cell(cell, t_thrd.barrier_creator_cxt.archive_slot_names) { + char* slotName = (char*)lfirst(cell); + if (slotName == NULL || strlen(slotName) == 0) { + continue; + } + archive_conf = getArchiveReplicationSlotWithName(slotName); + if (archive_conf != NULL) { + if (ArchiveFileExist("global_barrier_records/hadr_start_barrier", + &archive_conf->archive_config) == true) { + ereport(LOG, (errmsg("hadr_start_barrier file is already exist, no need write a new one."))); + continue; + } + rc = snprintf_s(fileNamePrefix, MAXPGPATH, MAXPGPATH - 1, "global_barrier_records/hadr_start_barrier"); + securec_check_ss_c(rc, "", ""); + rc = snprintf_s(buffer, MAXPGPATH, MAXPGPATH - 1, "starttime_%013ld", cur_time); + securec_check_ss_c(rc, "", ""); + int messageLen = strlen(buffer); + ret = ArchiveWrite(fileNamePrefix, buffer, messageLen, &archive_conf->archive_config); + if (ret != 0) { + ereport(WARNING, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The hadr_start_barrier file could not been write now.")))); + continue; + } + } + } +} + +static char* FindTargetGlobalBarrierInRecords(char* globalBarrierRecords) +{ + char* token = NULL; + char* outerPtr = NULL; + char* outputStr = NULL; + char* lastRecord = NULL; + char* lastTimestamp = NULL; + const int msec = 1000; + const int requestTimeInterval = 5000; + token = strtok_s(globalBarrierRecords, "\n", &outerPtr); + while (token != NULL) { + char* tmptoken = NULL; + char* lastPtr = NULL; + outputStr = pstrdup(token); + tmptoken = strtok_s(token, "_", &lastPtr); + tmptoken = strtok_s(NULL, "_", &lastPtr); + tmptoken = strtok_s(NULL, "-", &lastPtr); + if (tmptoken == NULL) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("parse one barrier time failed when find correct pitr record from barrier list.")))); + } + if (strncasecmp(tmptoken, g_instance.roach_cxt.targetTimeInPITR, + strlen(g_instance.roach_cxt.targetTimeInPITR)) == 0) { + break; + } else if (strncasecmp(tmptoken, g_instance.roach_cxt.targetTimeInPITR, + strlen(g_instance.roach_cxt.targetTimeInPITR)) < 0) { + lastTimestamp = tmptoken; + lastRecord = pstrdup(outputStr); + pfree_ext(outputStr); + token = strtok_s(NULL, "\n", &outerPtr); + } else { + if (atol(g_instance.roach_cxt.targetTimeInPITR) * msec - atol(lastTimestamp) <= requestTimeInterval) { + outputStr = pstrdup(lastRecord); + break; + } + pfree_ext(outputStr); + break; + } + } + pfree_ext(lastRecord); + if (outputStr != NULL) { + return outputStr; + } else { + ereport(ERROR, (errmsg("Find target PITR barrier record failed on obs record list"))); + return NULL; + } +} + +static char* FindGlobalBarrierRecordForPITR(ArchiveConfig *archive_config) +{ + char barrierFileName[MAXPGPATH] = {0}; + char barrierRecordBuff[OBS_XLOG_SLICE_FILE_SIZE] = {0}; + char startTimeBuff[MAXPGPATH] = {0}; + long startTimeStamp = 0; + long targetTimeInPITR = 0; + const int msec = 1000; + char* targetGlobalBarrier = NULL; + int cntFile = 0; + size_t readLen = 0; + errno_t rc = EOK; + + if (g_instance.roach_cxt.targetTimeInPITR == NULL) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), (errmsg("The target PITR time is null.")))); + } + targetTimeInPITR = atol(g_instance.roach_cxt.targetTimeInPITR) * msec; + if (ArchiveFileExist("global_barrier_records/hadr_start_barrier", archive_config) != true) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The file name hadr_start_barrier cannot be find when recovery" + " for PITR to find global barrier.")))); + } + readLen = ArchiveRead("global_barrier_records/hadr_start_barrier", 0, startTimeBuff, 24, archive_config); + if (readLen <= 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The file name hadr_start_barrier cannot be read when recovery" + " for PITR to find global barrier records.")))); + } + char *startTimeStr = strrchr(startTimeBuff, '_'); + if (startTimeStr == NULL) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("parse first barrier time failed when find correct pitr record from barrier list.")))); + } + startTimeStr = startTimeStr + 1; + startTimeStamp = atol(startTimeStr); + if (targetTimeInPITR - startTimeStamp < 0) { + ereport(PANIC, (errmsg("The target time for PITR is less than global barrier start time," + " could not recovery."))); + } + cntFile = (targetTimeInPITR - startTimeStamp) / FILE_TIME_INTERVAL; + rc = snprintf_s(barrierFileName, MAXPGPATH, MAXPGPATH - 1, "global_barrier_records/hadr_barrier_%013ld", + startTimeStamp + cntFile * FILE_TIME_INTERVAL); + securec_check_ss_c(rc, "", ""); + if (ArchiveFileExist(barrierFileName, archive_config) == false) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The barrier list file name %s cannot be find when recovery for PITR to find global barrier.", + barrierFileName)))); + } else { + readLen = 0; + readLen = ArchiveRead(barrierFileName, 0, barrierRecordBuff, OBS_XLOG_SLICE_FILE_SIZE, archive_config); + if (readLen <= 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The barrier list file: %s could not been read when PITR recovery processing.", + barrierFileName)))); + } + targetGlobalBarrier = FindTargetGlobalBarrierInRecords(barrierRecordBuff); + } + pfree_ext(g_instance.roach_cxt.targetTimeInPITR); + + return targetGlobalBarrier; +} + +uint64 ReadBarrierTimelineRecordFromObs(const char* archiveSlotName) +{ + ArchiveConfig* archiveConfig = NULL; + ArchiveSlotConfig* archiveSlot = getArchiveReplicationSlotWithName(archiveSlotName); + int readLen = 0; + uint64 lastTimeline = 0; + char* token = NULL; + char* outerPtr = NULL; + char endbarrierRecordBuff[MAXPGPATH] = {0}; + + if (archiveSlot == NULL) { + ereport(ERROR, (errmsg("Cannot get archive config from replication slots when read barrier cn timeline"))); + } + archiveConfig = &archiveSlot->archive_config; + if (ArchiveFileExist("global_barrier_records/hadr_start_barrier", archiveConfig) != true) { + ereport(LOG, (errmsg("this is the first time we make a barrier"))); + return 0; + } else { + if (ArchiveFileExist(BARRIER_PATH, archiveConfig) == true) { + readLen = ArchiveRead(BARRIER_PATH, 0, endbarrierRecordBuff, + MAXPGPATH, archiveConfig); + if (readLen <= 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The hadr_end_barrier file could not been read.")))); + } + token = strtok_s(endbarrierRecordBuff, "_", &outerPtr); + token = strtok_s(NULL, "_", &outerPtr); + token = strtok_s(NULL, "-", &outerPtr); + if (outerPtr == NULL) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), (errmsg("parse barrier end file record failed.")))); + } + char* timelineStr = strrchr(outerPtr, '_'); + if (timelineStr == NULL) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("parse cn names from barrier end file record failed.")))); + } + timelineStr += 1; + lastTimeline = strtoul(timelineStr, NULL, 10); + if (strncasecmp(outerPtr, g_instance.attr.attr_common.PGXCNodeName, + strlen(g_instance.attr.attr_common.PGXCNodeName)) == 0) { + return lastTimeline; + } else { + return lastTimeline + 1; + } + } else { + ereport(LOG, (errmsg("could not read hadr end barrier file, timeline set to 0"))); + return 0; + } + } +} + +int GetArchiveXLogFileTotalNum(ArchiveConfig *archiverConfig, XLogRecPtr endLsn) +{ + char buffer[MAXPGPATH] = {0}; + char statusFile[MAXPGPATH] = {0}; + int readLen = 0; + char* lastPtr = NULL; + char* token = NULL; + int result = 0; + errno_t rc = EOK; + + if (IS_PGXC_COORDINATOR) { + rc = snprintf_s(statusFile, MAXPGPATH, MAXPGPATH - 1, "%s/%s", + g_instance.attr.attr_common.PGXCNodeName, OBS_ARCHIVE_STATUS_FILE); + securec_check_ss(rc, "\0", "\0"); + } else { + rc = snprintf_s(statusFile, MAXPGPATH, MAXPGPATH - 1, "%s", OBS_ARCHIVE_STATUS_FILE); + securec_check_ss(rc, "\0", "\0"); + } + + readLen = ArchiveRead(statusFile, 0, buffer, MAXPGPATH, archiverConfig); + if (readLen < 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("Could not read obs_archive_start_end_record file when get total xlog files.")))); + } + token = strtok_s(buffer, "-", &lastPtr); + token = strtok_s(NULL, "_", &lastPtr); + token = strtok_s(NULL, "-", &lastPtr); + if (token == NULL) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("Get archive status file record failed because could not parse context.")))); + } + XLogRecPtr startLsn = (XLogRecPtr)strtoul(token, NULL, 10); + result = (endLsn / XLogSegSize) - (startLsn / XLogSegSize); + return result; +} + +static char* DeleteBarrierRecordAndUpdateOnMedia(char* globalBarrierRecords, long stopBarrierTimestamp, + char* barrierFileName, ArchiveConfig* currSlotArchConfig, bool reverse) +{ + char* token = NULL; + char* outerPtr = NULL; + char* outputStr = NULL; + int messageLen = 0; + errno_t rc = EOK; + int ret = 0; + if (globalBarrierRecords[0] == '\0') { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The barrier record is null when delete oldest barrier records.")))); + } + token = strtok_s(globalBarrierRecords, "\n", &outerPtr); + if (token == NULL) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("Parse barrier records file %s faild, because token is null.", barrierFileName)))); + } + if (!reverse) { + while (token != NULL) { + char* tmptoken = NULL; + char* lastPtr = NULL; + outputStr = pstrdup(token); + tmptoken = strtok_s(token, "_", &lastPtr); + tmptoken = strtok_s(NULL, "_", &lastPtr); + tmptoken = strtok_s(NULL, "-", &lastPtr); + if (tmptoken == NULL) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("Parse barrier records file %s record %s faild, because tmptoken is null.", + barrierFileName, outputStr)))); + } + if (atol(tmptoken) < (stopBarrierTimestamp)) { + token = strtok_s(NULL, "\n", &outerPtr); + pfree_ext(outputStr); + continue; + } + break; + } + messageLen = strlen(outerPtr); + ret = ArchiveWrite(barrierFileName, outerPtr, messageLen, currSlotArchConfig); + if (ret != 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The %s file could not been update when delete old barrier records.", barrierFileName)))); + } + } else { + char newRecords[OBS_XLOG_SLICE_FILE_SIZE] = {0}; + char record[MAXPGPATH] = {0}; + while (token != NULL) { + char* tmptoken = NULL; + char* lastPtr = NULL; + outputStr = pstrdup(token); + tmptoken = strtok_s(token, "_", &lastPtr); + tmptoken = strtok_s(NULL, "_", &lastPtr); + tmptoken = strtok_s(NULL, "-", &lastPtr); + if (tmptoken == NULL) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("Parse barrier records file %s record %s faild, because tmptoken is null.", + barrierFileName, outputStr)))); + } + if (atol(tmptoken) <= stopBarrierTimestamp) { + rc = snprintf_s(record, MAXPGPATH, MAXPGPATH - 1, "%s\n", outputStr); + securec_check_ss_c(rc, "", ""); + strcat_s(newRecords, OBS_XLOG_SLICE_FILE_SIZE, record); + securec_check_c(ret, "\0", "\0"); + token = strtok_s(NULL, "\n", &outerPtr); + pfree_ext(outputStr); + continue; + } + break; + } + messageLen = strlen(newRecords); + ret = ArchiveWrite(barrierFileName, newRecords, messageLen, currSlotArchConfig); + if (ret != 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The %s file could not been update when delete old barrier records.", barrierFileName)))); + } + } + return outputStr; +} + +char* DeleteStopBarrierRecordsOnMedia(long stopBarrierTimestamp, long endBarrierTimestamp) +{ + ArchiveConfig* currSlotArchConfig = NULL; + char startTimeBuff[MAXPGPATH] = {0}; + char barrierFileName[MAXPGPATH] = {0}; + char barrierRecordBuff[OBS_XLOG_SLICE_FILE_SIZE] = {0}; + long startTimeStamp = 0; + int readLen = 0; + int totalFiles = 0; + int startFilesNum = 0; + const int msecPerSec = 1000; + char* currOldestRecord = NULL; + errno_t rc = EOK; + + for (int i = 0; i < g_instance.attr.attr_storage.max_replication_slots; i++) { + ReplicationSlot *slot = &t_thrd.slot_cxt.ReplicationSlotCtl->replication_slots[i]; + currSlotArchConfig = NULL; + SpinLockAcquire(&slot->mutex); + if (slot->in_use == true && slot->archive_config != NULL && slot->archive_config->is_recovery == false && + GET_SLOT_PERSISTENCY(slot->data) != RS_BACKUP) { + currSlotArchConfig = slot->archive_config; + } + SpinLockRelease(&slot->mutex); + if (currSlotArchConfig != NULL) { + if (ArchiveFileExist("global_barrier_records/hadr_start_barrier", currSlotArchConfig) != true) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The file name hadr_start_barrier cannot be find when delete old barrier records.")))); + } + readLen = ArchiveRead("global_barrier_records/hadr_start_barrier", 0, startTimeBuff, 24, + currSlotArchConfig); + if (readLen <= 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The file name hadr_start_barrier cannot be read when delete old barrier records.")))); + } + char *startTimeStr = strrchr(startTimeBuff, '_'); + if (startTimeStr == NULL) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("delete barrier failed because could not parse barrier first start timestamp.")))); + } + startTimeStr = startTimeStr + 1; + startTimeStamp = atol(startTimeStr); + if ((stopBarrierTimestamp / msecPerSec) < (startTimeStamp / msecPerSec)) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errmsg("The delete time is smaller than barrier start time, could not delete any records.")))); + } + if (endBarrierTimestamp > startTimeStamp) { + startFilesNum = (endBarrierTimestamp - startTimeStamp) / FILE_TIME_INTERVAL; + totalFiles = (stopBarrierTimestamp - endBarrierTimestamp) / FILE_TIME_INTERVAL; + rc = snprintf_s(barrierFileName, MAXPGPATH, MAXPGPATH - 1, + "global_barrier_records/hadr_barrier_%013ld", startTimeStamp + startFilesNum * FILE_TIME_INTERVAL); + securec_check_ss_c(rc, "", ""); + readLen = 0; + readLen = ArchiveRead(barrierFileName, 0, barrierRecordBuff, OBS_XLOG_SLICE_FILE_SIZE, + currSlotArchConfig); + (void)DeleteBarrierRecordAndUpdateOnMedia(barrierRecordBuff, endBarrierTimestamp, + barrierFileName, currSlotArchConfig, true); + int currFileCnt = 1; + while (currFileCnt < totalFiles) { + CHECK_FOR_INTERRUPTS(); + rc = snprintf_s(barrierFileName, MAXPGPATH, MAXPGPATH - 1, + "global_barrier_records/hadr_barrier_%013ld", + startTimeStamp + ((startFilesNum + currFileCnt) * FILE_TIME_INTERVAL)); + securec_check_ss_c(rc, "", ""); + if (!ArchiveFileExist(barrierFileName, currSlotArchConfig)) { + currFileCnt++; + continue; + } + ArchiveDelete(barrierFileName, currSlotArchConfig); + currFileCnt++; + } + rc = snprintf_s(barrierFileName, MAXPGPATH, MAXPGPATH - 1, + "global_barrier_records/hadr_barrier_%013ld", + startTimeStamp + ((startFilesNum + currFileCnt) * FILE_TIME_INTERVAL)); + securec_check_ss_c(rc, "", ""); + if (!ArchiveFileExist(barrierFileName, currSlotArchConfig)) { + return NULL; + } + readLen = 0; + readLen = ArchiveRead(barrierFileName, 0, barrierRecordBuff, OBS_XLOG_SLICE_FILE_SIZE, + currSlotArchConfig); + if (readLen <= 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The file name %s cannot be read when delete old barrier records.", + barrierFileName)))); + } + currOldestRecord = DeleteBarrierRecordAndUpdateOnMedia(barrierRecordBuff, stopBarrierTimestamp, + barrierFileName, currSlotArchConfig, false); + return currOldestRecord; + } + totalFiles = (stopBarrierTimestamp - startTimeStamp) / FILE_TIME_INTERVAL; + int currFileCnt = 0; + while (currFileCnt < totalFiles) { + CHECK_FOR_INTERRUPTS(); + rc = snprintf_s(barrierFileName, MAXPGPATH, MAXPGPATH - 1, + "global_barrier_records/hadr_barrier_%013ld", startTimeStamp + currFileCnt * FILE_TIME_INTERVAL); + securec_check_ss_c(rc, "", ""); + if (!ArchiveFileExist(barrierFileName, currSlotArchConfig)) { + currFileCnt++; + continue; + } + ArchiveDelete(barrierFileName, currSlotArchConfig); + currFileCnt++; + } + rc = snprintf_s(barrierFileName, MAXPGPATH, MAXPGPATH - 1, + "global_barrier_records/hadr_barrier_%013ld", startTimeStamp + currFileCnt * FILE_TIME_INTERVAL); + securec_check_ss_c(rc, "", ""); + if (!ArchiveFileExist(barrierFileName, currSlotArchConfig)) { + return NULL; + } + readLen = 0; + readLen = ArchiveRead(barrierFileName, 0, barrierRecordBuff, OBS_XLOG_SLICE_FILE_SIZE, + currSlotArchConfig); + if (readLen <= 0) { + ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), + (errmsg("The file name %s cannot be read when delete old barrier records.", barrierFileName)))); + } + currOldestRecord = DeleteBarrierRecordAndUpdateOnMedia(barrierRecordBuff, stopBarrierTimestamp, + barrierFileName, currSlotArchConfig, false); + } + } + return currOldestRecord; +} +#else +bool archive_connect(char* conninfo, XLogRecPtr* startpoint, char* slotname, int channel_identifier) +{ + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); + return false; +} + +bool archive_receive(int timeout, unsigned char* type, char** buffer, int* len) +{ + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); + return false; +} + +void archive_send(const char *buffer, int nbytes) +{ + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +} +void archive_disconnect(void) +{ + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +} +#endif diff --git a/src/gausskernel/storage/replication/basebackup.cpp b/src/gausskernel/storage/replication/basebackup.cpp index b789b0197..ca1727c6d 100755 --- a/src/gausskernel/storage/replication/basebackup.cpp +++ b/src/gausskernel/storage/replication/basebackup.cpp @@ -21,6 +21,7 @@ #include "access/cbmparsexlog.h" #include "catalog/catalog.h" #include "catalog/pg_type.h" +#include "gs_thread.h" #include "lib/stringinfo.h" #include "libpq/libpq.h" #include "libpq/pqformat.h" @@ -33,7 +34,6 @@ #include "access/xlog.h" #include "storage/smgr/fd.h" #include "storage/ipc.h" -#include "storage/page_compression.h" #include "storage/pmsignal.h" #include "storage/checksum.h" #ifdef ENABLE_MOT @@ -81,7 +81,7 @@ const int MATCH_SEVEN = 7; #define EREPORT_WAL_NOT_FOUND(segno) \ do { \ char walErrorName[MAXFNAMELEN]; \ - XLogFileName(walErrorName, t_thrd.xlog_cxt.ThisTimeLineID, segno); \ + XLogFileName(walErrorName, MAXFNAMELEN, t_thrd.xlog_cxt.ThisTimeLineID, segno); \ ereport(ERROR, (errmsg("could not find WAL file \"%s\"", walErrorName))); \ } while (0) @@ -116,9 +116,6 @@ static void send_xlog_header(const char *linkpath); static void save_xlogloc(const char *xloglocation); static XLogRecPtr GetMinArchiveSlotLSN(void); -/* compressed Function */ -static void SendCompressedFile(char* readFileName, int basePathLen, struct stat& statbuf, bool missingOk, int64* size); - /* * save xlog location */ @@ -375,6 +372,7 @@ static void perform_base_backup(basebackup_options *opt, DIR *tblspcdir) char *labelfile = NULL; char* tblspc_map_file = NULL; List* tablespaces = NIL; + XLogSegNo startSegNo; if (opt->isBuildFromStandby) { startptr = StandbyDoStartBackup(opt->label, &labelfile, &tblspc_map_file, &tablespaces, @@ -400,7 +398,9 @@ static void perform_base_backup(basebackup_options *opt, DIR *tblspcdir) } disasterSlotRestartPtr = GetMinArchiveSlotLSN(); if (disasterSlotRestartPtr != InvalidXLogRecPtr && XLByteLT(disasterSlotRestartPtr, startptr)) { - startptr = disasterSlotRestartPtr; + if (XlogFileIsExisted(t_thrd.proc_cxt.DataDir, disasterSlotRestartPtr, DEFAULT_TIMELINE_ID) == true) { + startptr = disasterSlotRestartPtr; + } } LWLockAcquire(FullBuildXlogCopyStartPtrLock, LW_EXCLUSIVE); XlogCopyStartPtr = startptr; @@ -412,6 +412,11 @@ static void perform_base_backup(basebackup_options *opt, DIR *tblspcdir) #ifdef ENABLE_MULTIPLE_NODES cbm_rotate_file(startptr); #endif + XLByteToSeg(startptr, startSegNo); + XLogSegNo lastRemovedSegno = XLogGetLastRemovedSegno(); + if (startSegNo <= lastRemovedSegno) { + startptr = (lastRemovedSegno + 1) * XLOG_SEG_SIZE; + } SendXlogRecPtrResult(startptr); PG_ENSURE_ERROR_CLEANUP(base_backup_cleanup, (Datum)0); @@ -455,9 +460,9 @@ static void perform_base_backup(basebackup_options *opt, DIR *tblspcdir) * including them. */ XLByteToSeg(startptr, startsegno); - XLogFileName(firstoff, t_thrd.xlog_cxt.ThisTimeLineID, startsegno); + XLogFileName(firstoff, MAXFNAMELEN, t_thrd.xlog_cxt.ThisTimeLineID, startsegno); XLByteToPrevSeg(endptr, endsegno); - XLogFileName(lastoff, t_thrd.xlog_cxt.ThisTimeLineID, endsegno); + XLogFileName(lastoff, MAXFNAMELEN, t_thrd.xlog_cxt.ThisTimeLineID, endsegno); DIR* dir = AllocateDir("pg_xlog"); if (!dir) { @@ -588,7 +593,7 @@ static void perform_base_backup(basebackup_options *opt, DIR *tblspcdir) * walreceiver.c always doing a XLogArchiveForceDone() after a * complete segment. */ - StatusFilePath(pathbuf, walFiles[i], ".done"); + StatusFilePath(pathbuf, MAXPGPATH, walFiles[i], ".done"); sendFileWithContent(pathbuf, ""); } @@ -604,7 +609,7 @@ static void perform_base_backup(basebackup_options *opt, DIR *tblspcdir) foreach (lc, historyFileList) { char* fname = (char*)lfirst(lc); - int rt = snprintf_s(pathbuf, MAXPGPATH, MAXPGPATH-1, XLOGDIR "/%s", fname); + int rt = snprintf_s(pathbuf, MAXPGPATH, MAXPGPATH-1, "/%s", fname); securec_check_ss_c(rt, "\0", "\0"); if (lstat(pathbuf, &statbuf) != 0) ereport(ERROR, (errcode_for_file_access(), errmsg("could not stat file \"%s\": %m", pathbuf))); @@ -612,7 +617,7 @@ static void perform_base_backup(basebackup_options *opt, DIR *tblspcdir) sendFile(pathbuf, pathbuf, &statbuf, false); /* unconditionally mark file as archived */ - StatusFilePath(pathbuf, fname, ".done"); + StatusFilePath(pathbuf, MAXPGPATH, fname, ".done"); sendFileWithContent(pathbuf, ""); } @@ -1149,14 +1154,39 @@ bool IsSkipDir(const char * dirName) if (strcmp(dirName, DISABLE_CONN_FILE) == 0) return true; - return false; + return false; } +int IsBeginWith(const char *str1, char *str2) +{ + if (str1 == NULL || str2 == NULL) + return -1; + int len1 = strlen(str1); + int len2 = strlen(str2); + if ((len1 < len2) || (len1 == 0 || len2 == 0)) { + return -1; + } + + char *p = str2; + int i = 0; + while (*p != '\0') { + if (*p != str1[i]) { + return 0; + } + p++; + i++; + } + return 1; +} + + bool IsSkipPath(const char * pathName) { /* Skip pg_control here to back up it last */ if (strcmp(pathName, "./global/pg_control") == 0) return true; + if (IsBeginWith(pathName, "./global/pg_dw") > 0) + return true; if (strcmp(pathName, "./global/pg_dw") == 0) return true; if (strcmp(pathName, "./global/pg_dw_single") == 0) @@ -1189,7 +1219,7 @@ bool IsSkipPath(const char * pathName) if (t_thrd.walsender_cxt.is_obsmode == true && strcmp(pathName, "./pg_replslot") == 0) return true; - return false; + return false; } static bool IsDCFPath(const char *pathname) @@ -1229,35 +1259,6 @@ static bool IsDCFPath(const char *pathname) return false; } -#define SEND_DIR_ADD_SIZE(size, statbuf) ((size) = (size) + (((statbuf).st_size + 511) & ~511) + BUILD_PATH_LEN) - -/** - * send file or compressed file - * @param sizeOnly send or not - * @param pathbuf path - * @param pathBufLen pathLen - * @param basepathlen subfix of path - * @param statbuf path stat - */ -static void SendRealFile(bool sizeOnly, char* pathbuf, size_t pathBufLen, int basepathlen, struct stat* statbuf) -{ - int64 size = 0; - // we must ensure the page integrity when in IncrementalCheckpoint - if (!sizeOnly && g_instance.attr.attr_storage.enableIncrementalCheckpoint && - IsCompressedFile(pathbuf, strlen(pathbuf)) != COMPRESSED_TYPE_UNKNOWN) { - SendCompressedFile(pathbuf, basepathlen, (*statbuf), true, &size); - } else { - bool sent = false; - if (!sizeOnly) { - sent = sendFile(pathbuf, pathbuf + basepathlen + 1, statbuf, true); - } - if (sent || sizeOnly) { - /* Add size, rounded up to 512byte block */ - SEND_DIR_ADD_SIZE(size, (*statbuf)); - } - } -} - /* * Include all files from the given directory in the output tar stream. If * 'sizeonly' is true, we just calculate a total length and return it, without @@ -1313,7 +1314,7 @@ static int64 sendDir(const char *path, int basepathlen, bool sizeonly, List *tab if (strcmp(pathbuf, "./postmaster.pid") == 0 || strcmp(pathbuf, "./postmaster.opts") == 0 || strcmp(pathbuf, "./gs_gazelle.conf") == 0) continue; - + /* For gs_basebackup, we should not skip these files */ if (strcmp(u_sess->attr.attr_common.application_name, "gs_basebackup") != 0) { /* For gs_backup, we should not skip these files */ @@ -1378,6 +1379,7 @@ static int64 sendDir(const char *path, int basepathlen, bool sizeonly, List *tab */ if (strcmp(path, "./pg_replslot") == 0) { bool isphysicalslot = false; + bool isArchiveSlot = false; LWLockAcquire(ReplicationSlotControlLock, LW_SHARED); for (int i = 0; i < g_instance.attr.attr_storage.max_replication_slots; i++) { ReplicationSlot *s = &t_thrd.slot_cxt.ReplicationSlotCtl->replication_slots[i]; @@ -1386,10 +1388,15 @@ static int64 sendDir(const char *path, int basepathlen, bool sizeonly, List *tab isphysicalslot = true; break; } + if (s->in_use && s->data.database == InvalidOid && strcmp(de->d_name, NameStr(s->data.name)) == 0 && + GET_SLOT_PERSISTENCY(s->data) != RS_BACKUP && s->extra_content != NULL) { + isArchiveSlot = true; + break; + } } LWLockRelease(ReplicationSlotControlLock); - if (isphysicalslot) + if (isphysicalslot || (isArchiveSlot && AM_WAL_HADR_DNCN_SENDER)) continue; } @@ -1550,7 +1557,15 @@ static int64 sendDir(const char *path, int basepathlen, bool sizeonly, List *tab if (!skip_this_dir) size += sendDir(pathbuf, basepathlen, sizeonly, tablespaces, sendtblspclinks); } else if (S_ISREG(statbuf.st_mode)) { - SendRealFile(sizeonly, pathbuf, strlen(pathbuf), basepathlen, &statbuf); + bool sent = false; + + if (!sizeonly) + sent = sendFile(pathbuf, pathbuf + basepathlen + 1, &statbuf, true); + + if (sent || sizeonly) { + /* Add size, rounded up to 512byte block */ + size = size + ((statbuf.st_size + 511) & ~511) + BUILD_PATH_LEN; + } } else ereport(WARNING, (errmsg("skipping special file \"%s\"", pathbuf))); } @@ -1677,15 +1692,6 @@ bool is_row_data_file(const char *path, int *segNo, UndoFileType *undoFileType) int nmatch; char *fname = NULL; - /* Skip compressed page files */ - size_t pathLen = strlen(path); - if (pathLen >= 4) { - const char* suffix = path + pathLen - 4; - if (strncmp(suffix, "_pca", 4) == 0 || strncmp(suffix, "_pcd", 4) == 0) { - return false; - } - } - if ((fname = strstr((char *)path, "pg_tblspc/")) != NULL) { nmatch = sscanf_s(fname, "pg_tblspc/%u/%*[^/]/%u/%s", &spcNode, &dbNode, buf, sizeof(buf)); if (nmatch == 3) { @@ -1783,7 +1789,7 @@ static void SendTableSpaceForBackup(basebackup_options* opt, List* tablespaces, /* In the main tar, include the last timeline history file at last. */ primay_tli = t_thrd.xlog_cxt.ThisTimeLineID; while (primay_tli > 1) { - TLHistoryFilePath(path, primay_tli); + TLHistoryFilePath(path, MAXPGPATH, primay_tli); if (lstat(path, &statbuf) == 0) sendFile(path, path, &statbuf, false); primay_tli--; @@ -1803,245 +1809,6 @@ static void SendTableSpaceForBackup(basebackup_options* opt, List* tablespaces, } } -/** - * init buf_block if not yet; repalloc PqSendBuffer if necessary - */ -static void SendFilePreInit(void) -{ - if (t_thrd.basebackup_cxt.buf_block == NULL) { - MemoryContext oldcxt = MemoryContextSwitchTo(THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE)); - t_thrd.basebackup_cxt.buf_block = (char *)palloc0(TAR_SEND_SIZE); - MemoryContextSwitchTo(oldcxt); - } - - /* - * repalloc to `MaxBuildAllocSize' in one time, to avoid many small step repalloc in `pq_putmessage_noblock' - * and low performance. - */ - if (INT2SIZET(t_thrd.libpq_cxt.PqSendBufferSize) < MaxBuildAllocSize) { - t_thrd.libpq_cxt.PqSendBuffer = (char *)repalloc(t_thrd.libpq_cxt.PqSendBuffer, MaxBuildAllocSize); - t_thrd.libpq_cxt.PqSendBufferSize = MaxBuildAllocSize; - } -} - -/** - * check file - * @param readFileName - * @param statbuf - * @param supress error if missingOk is false when file is not found - * @return return null if file.size > MAX_TAR_MEMBER_FILELEN or file cant found - */ -static FILE *SizeCheckAndAllocate(char *readFileName, const struct stat &statbuf, bool missingOk) -{ - /* - * Some compilers will throw a warning knowing this test can never be true - * because pgoff_t can't exceed the compared maximum on their platform. - */ - if (statbuf.st_size > MAX_TAR_MEMBER_FILELEN) { - ereport(WARNING, (errcode(ERRCODE_NAME_TOO_LONG), - errmsg("archive member \"%s\" too large for tar format", readFileName))); - return NULL; - } - - FILE *fp = AllocateFile(readFileName, "rb"); - if (fp == NULL) { - if (errno == ENOENT && missingOk) - return NULL; - ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", readFileName))); - } - return fp; - -} - -static void TransferPcaFile(const char *readFileName, int basePathLen, const struct stat &statbuf, - PageCompressHeader *transfer, - size_t len) -{ - const char *tarfilename = readFileName + basePathLen + 1; - _tarWriteHeader(tarfilename, NULL, (struct stat*)(&statbuf)); - char *data = (char *) transfer; - size_t lenBuffer = len; - while (lenBuffer > 0) { - size_t transferLen = Min(TAR_SEND_SIZE, lenBuffer); - if (pq_putmessage_noblock('d', data, transferLen)) { - ereport(ERROR, (errcode_for_file_access(), errmsg("base backup could not send data, aborting backup"))); - } - data = data + transferLen; - lenBuffer -= transferLen; - } - size_t pad = ((len + 511) & ~511) - len; - if (pad > 0) { - securec_check(memset_s(t_thrd.basebackup_cxt.buf_block, pad, 0, pad), "", ""); - (void) pq_putmessage_noblock('d', t_thrd.basebackup_cxt.buf_block, pad); - } -} - -static void FileStat(char* path, struct stat* fileStat) -{ - if (stat(path, fileStat) != 0) { - if (errno != ENOENT) { - ereport(ERROR, (errcode_for_file_access(), errmsg("could not stat file or directory \"%s\": %m", path))); - } - } -} - -static void SendCompressedFile(char* readFileName, int basePathLen, struct stat& statbuf, bool missingOk, int64* size) -{ - char* tarfilename = readFileName + basePathLen + 1; - SendFilePreInit(); - FILE* fp = SizeCheckAndAllocate(readFileName, statbuf, missingOk); - if (fp == NULL) { - return; - } - - size_t readFileNameLen = strlen(readFileName); - /* dont send pca file */ - if (readFileNameLen < 4 || strncmp(readFileName + readFileNameLen - 4, "_pca", 4) == 0 || - strncmp(readFileName + readFileNameLen - 4, "_pcd", 4) != 0) { - FreeFile(fp); - return; - } - - char tablePath[MAXPGPATH] = {0}; - securec_check_c(memcpy_s(tablePath, MAXPGPATH, readFileName, readFileNameLen - 4), "", ""); - int segmentNo = 0; - UndoFileType undoFileType = UNDO_INVALID; - if (!is_row_data_file(tablePath, &segmentNo, &undoFileType)) { - ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), errmsg("%s is not a relation file.", tablePath))); - } - - char pcaFilePath[MAXPGPATH]; - securec_check_c(memcpy_s(pcaFilePath, MAXPGPATH, readFileName, readFileNameLen), "", ""); - pcaFilePath[readFileNameLen - 1] = 'a'; - - FILE* pcaFile = AllocateFile(pcaFilePath, "rb"); - if (pcaFile == NULL) { - if (errno == ENOENT && missingOk) { - FreeFile(fp); - return; - } - ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", pcaFilePath))); - } - - uint16 chunkSize = ReadChunkSize(pcaFile, pcaFilePath, MAXPGPATH); - - struct stat pcaStruct; - FileStat((char*)pcaFilePath, &pcaStruct); - - size_t pcaFileLen = SIZE_OF_PAGE_COMPRESS_ADDR_FILE(chunkSize); - PageCompressHeader* map = pc_mmap_real_size(fileno(pcaFile), pcaFileLen, true); - if (map == MAP_FAILED) { - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("Failed to mmap page compression address file %s: %m", pcaFilePath))); - } - - PageCompressHeader* transfer = (PageCompressHeader*)palloc0(pcaFileLen); - /* decompressed page buffer, avoid frequent allocation */ - BlockNumber blockNum = 0; - size_t chunkIndex = 1; - off_t totalLen = 0; - off_t sendLen = 0; - /* send the pkg header containing msg like file size */ - BlockNumber totalBlockNum = (BlockNumber)pg_atomic_read_u32(&map->nblocks); - - /* some chunks may have been allocated but not used. - * Reserve 0 chunks for avoiding the error when the size of a compressed block extends */ - auto reservedChunks = 0; - securec_check(memcpy_s(transfer, pcaFileLen, map, pcaFileLen), "", ""); - decltype(statbuf.st_size) realSize = (map->allocated_chunks + reservedChunks) * chunkSize; - statbuf.st_size = statbuf.st_size >= realSize ? statbuf.st_size : realSize; - _tarWriteHeader(tarfilename, NULL, (struct stat*)(&statbuf)); - bool* onlyExtend = (bool*)palloc0(totalBlockNum * sizeof(bool)); - - /* allocated in advance to prevent repeated allocated */ - char pageBuffer[BLCKSZ]; - ReadBlockChunksStruct rbStruct{map, pageBuffer, BLCKSZ, fp, segmentNo, readFileName}; - for (blockNum = 0; blockNum < totalBlockNum; blockNum++) { - PageCompressAddr* addr = GET_PAGE_COMPRESS_ADDR(transfer, chunkSize, blockNum); - /* skip some blocks which only extends. The size of blocks is 0. */ - if (addr->nchunks == 0) { - onlyExtend[blockNum] = true; - continue; - } - /* read block to t_thrd.basebackup_cxt.buf_block */ - size_t bufferSize = TAR_SEND_SIZE - sendLen; - size_t len = ReadAllChunkOfBlock(t_thrd.basebackup_cxt.buf_block + sendLen, bufferSize, blockNum, rbStruct); - /* merge Blocks */ - sendLen += len; - if (totalLen + (off_t)len > statbuf.st_size) { - ReleaseMap(map, readFileName); - ereport(ERROR, - (errcode_for_file_access(), - errmsg("some blocks in %s had been changed. Retry backup please. PostBlocks:%u, currentReadBlocks " - ":%u, transferSize: %lu. totalLen: %lu, len: %lu", - readFileName, - totalBlockNum, - blockNum, - statbuf.st_size, - totalLen, - len))); - } - if (sendLen > TAR_SEND_SIZE - BLCKSZ) { - if (pq_putmessage_noblock('d', t_thrd.basebackup_cxt.buf_block, sendLen)) { - ReleaseMap(map, readFileName); - ereport(ERROR, (errcode_for_file_access(), errmsg("base backup could not send data, aborting backup"))); - } - sendLen = 0; - } - uint8 nchunks = len / chunkSize; - addr->nchunks = addr->allocated_chunks = nchunks; - for (size_t i = 0; i < nchunks; i++) { - addr->chunknos[i] = chunkIndex++; - } - addr->checksum = AddrChecksum32(blockNum, addr, chunkSize); - totalLen += len; - } - ReleaseMap(map, readFileName); - - if (sendLen != 0) { - if (pq_putmessage_noblock('d', t_thrd.basebackup_cxt.buf_block, sendLen)) { - ereport(ERROR, (errcode_for_file_access(), errmsg("base backup could not send data, aborting backup"))); - } - } - - /* If the file was truncated while we were sending it, pad it with zeros */ - if (totalLen < statbuf.st_size) { - securec_check(memset_s(t_thrd.basebackup_cxt.buf_block, TAR_SEND_SIZE, 0, TAR_SEND_SIZE), "", ""); - while (totalLen < statbuf.st_size) { - size_t cnt = Min(TAR_SEND_SIZE, statbuf.st_size - totalLen); - (void)pq_putmessage_noblock('d', t_thrd.basebackup_cxt.buf_block, cnt); - totalLen += cnt; - } - } - - size_t pad = ((totalLen + 511) & ~511) - totalLen; - if (pad > 0) { - securec_check(memset_s(t_thrd.basebackup_cxt.buf_block, pad, 0, pad), "", ""); - (void)pq_putmessage_noblock('d', t_thrd.basebackup_cxt.buf_block, pad); - } - SEND_DIR_ADD_SIZE(*size, statbuf); - - // allocate chunks of some pages which only extend - for (size_t blockNum = 0; blockNum < totalBlockNum; ++blockNum) { - if (onlyExtend[blockNum]) { - PageCompressAddr* addr = GET_PAGE_COMPRESS_ADDR(transfer, chunkSize, blockNum); - for (size_t i = 0; i < addr->allocated_chunks; i++) { - addr->chunknos[i] = chunkIndex++; - } - } - } - transfer->nblocks = transfer->last_synced_nblocks = blockNum; - transfer->last_synced_allocated_chunks = transfer->allocated_chunks = chunkIndex; - TransferPcaFile(pcaFilePath, basePathLen, pcaStruct, transfer, pcaFileLen); - - SEND_DIR_ADD_SIZE(*size, pcaStruct); - FreeFile(pcaFile); - FreeFile(fp); - pfree(transfer); - pfree(onlyExtend); -} - /* * Given the member, write the TAR header & send the file. * @@ -2066,12 +1833,40 @@ static bool sendFile(char *readfilename, char *tarfilename, struct stat *statbuf int retryCnt = 0; UndoFileType undoFileType = UNDO_INVALID; - SendFilePreInit(); - fp = SizeCheckAndAllocate(readfilename, *statbuf, missing_ok); - if (fp == NULL) { + if (t_thrd.basebackup_cxt.buf_block == NULL) { + MemoryContext oldcxt = NULL; + + oldcxt = MemoryContextSwitchTo(THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE)); + t_thrd.basebackup_cxt.buf_block = (char *)palloc0(TAR_SEND_SIZE); + MemoryContextSwitchTo(oldcxt); + } + + /* + * repalloc to `MaxBuildAllocSize' in one time, to avoid many small step repalloc in `pq_putmessage_noblock' + * and low performance. + */ + if (INT2SIZET(t_thrd.libpq_cxt.PqSendBufferSize) < MaxBuildAllocSize) { + t_thrd.libpq_cxt.PqSendBuffer = (char *)repalloc(t_thrd.libpq_cxt.PqSendBuffer, MaxBuildAllocSize); + t_thrd.libpq_cxt.PqSendBufferSize = MaxBuildAllocSize; + } + + /* + * Some compilers will throw a warning knowing this test can never be true + * because pgoff_t can't exceed the compared maximum on their platform. + */ + if (statbuf->st_size > MAX_FILE_SIZE_LIMIT) { + ereport(WARNING, (errcode(ERRCODE_NAME_TOO_LONG), + errmsg("archive member \"%s\" too large for tar format", tarfilename))); return false; } + fp = AllocateFile(readfilename, "rb"); + if (fp == NULL) { + if (errno == ENOENT && missing_ok) + return false; + ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", readfilename))); + } + isNeedCheck = is_row_data_file(readfilename, &segNo, &undoFileType); ereport(DEBUG1, (errmsg("sendFile, filename is %s, isNeedCheck is %d", readfilename, isNeedCheck))); diff --git a/src/gausskernel/storage/replication/catchup.cpp b/src/gausskernel/storage/replication/catchup.cpp index 2489eef84..9274b1c28 100755 --- a/src/gausskernel/storage/replication/catchup.cpp +++ b/src/gausskernel/storage/replication/catchup.cpp @@ -285,6 +285,9 @@ NON_EXEC_STATIC void CatchupMain() /* Abort the current transaction in order to recover */ AbortCurrentTransaction(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + /* * Now return to normal top-level context and clear ErrorContext for * next time. diff --git a/src/gausskernel/storage/replication/datarcvwriter.cpp b/src/gausskernel/storage/replication/datarcvwriter.cpp index 7064b2bef..812470349 100755 --- a/src/gausskernel/storage/replication/datarcvwriter.cpp +++ b/src/gausskernel/storage/replication/datarcvwriter.cpp @@ -162,6 +162,9 @@ void DataRcvWriterMain(void) /* abort async io, must before LWlock release */ AbortAsyncListIO(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + /* * These operations are really just a minimal subset of * AbortTransaction(). We don't have very many resources to worry diff --git a/src/gausskernel/storage/replication/dcf/Makefile b/src/gausskernel/storage/replication/dcf/Makefile index d4d49cbe8..7b862825a 100644 --- a/src/gausskernel/storage/replication/dcf/Makefile +++ b/src/gausskernel/storage/replication/dcf/Makefile @@ -27,7 +27,7 @@ include $(top_builddir)/src/Makefile.global override CPPFLAGS := -I$(srcdir) $(CPPFLAGS) -OBJS = dcf_replication.o dcf_callbackfuncs.o +OBJS = dcf_replication.o dcf_callbackfuncs.o dcf_flowcontrol.o include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/storage/replication/dcf/dcf_callbackfuncs.cpp b/src/gausskernel/storage/replication/dcf/dcf_callbackfuncs.cpp index fb109ead5..39bde35e8 100755 --- a/src/gausskernel/storage/replication/dcf/dcf_callbackfuncs.cpp +++ b/src/gausskernel/storage/replication/dcf/dcf_callbackfuncs.cpp @@ -28,6 +28,7 @@ #include "postmaster/startup.h" #include "access/xlog.h" #include "replication/dcf_data.h" +#include "replication/dcf_flowcontrol.h" #include "replication/dcf_replication.h" #include "replication/walreceiver.h" #include "replication/syncrep.h" @@ -225,200 +226,6 @@ void ProcessStandbyFileTimeMessage(unsigned int src_node_id, const char* msg) } } -static bool GetNodeInfo(const uint32 nodeID, char *nodeIP, uint32 nodeIPLen, uint32 *nodePort) -{ - cJSON *nodeInfos = nullptr; - /* nodeInfos is null when GetNodeInfos returned false */ - if (!GetNodeInfos(&nodeInfos)) { - return false; - } - const cJSON *nodeJsons = cJSON_GetObjectItem(nodeInfos, "nodes"); - if (nodeJsons == nullptr) { - cJSON_Delete(nodeInfos); - ereport(ERROR, (errmodule(MOD_DCF), errmsg("Get nodes info failed from DCF!"))); - return false; - } - const int DCF_ROLE_LEN = 64; - char localDCFRole[DCF_ROLE_LEN] = {0}; - if (!GetDCFNodeInfo(nodeJsons, nodeID, localDCFRole, DCF_ROLE_LEN, nodeIP, nodeIPLen, (int*)nodePort)) { - cJSON_Delete(nodeInfos); - return false; - } - cJSON_Delete(nodeInfos); - return true; -} - -static void SetGlobalCurRto(int nodeIndex, int64 currentRto) -{ - /* Update global rto to show */ - if (currentRto != -1) { - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].prev_RTO = - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].current_RTO; - - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].current_RTO = currentRto; - - g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex].current_rto = - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].current_RTO; - ereport(DEBUG1, (errmsg("current rto is %ld", - g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex].current_rto))); - } -} - -static void DoActualSleep(int nodeIndex, bool forceUpdate) -{ - /* try to control log sent rate so that standby can flush and apply log under RTO seconds */ - if (IS_PGXC_DATANODE) { - if (t_thrd.dcf_cxt.dcfCtxInfo->targetRTO > 0 || t_thrd.dcf_cxt.dcfCtxInfo->targetRPO > 0) { - if ((t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].sleep_count % - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].sleep_count_limit == 0) - || t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].just_keep_alive || forceUpdate) { - DCFLogCtrlCalculateSleepTime(&(t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex])); - DCFLogCtrlCountSleepLimit(&(t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex])); - } - if (!t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].just_keep_alive) { - pgstat_report_waitevent(WAIT_EVENT_LOGCTRL_SLEEP); - /* The logical replication sleep isn't considered now */ - SleepNodeReplication(nodeIndex); - pgstat_report_waitevent(WAIT_EVENT_END); - } - } - } - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].sleep_count++; -} - -static void SetGlobalRtoData(int nodeIndex, uint32 srcNodeID) -{ - if (IS_PGXC_DATANODE) { - g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex].current_rto = - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].current_RTO; - if (t_thrd.dcf_cxt.dcfCtxInfo->targetRTO == 0) { - g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex].current_sleep_time = 0; - } else { - g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex].current_sleep_time = - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].sleep_time; - } - if (g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex].target_rto != t_thrd.dcf_cxt.dcfCtxInfo->targetRTO) { - ereport(LOG, (errmodule(MOD_RTO_RPO), - errmsg("target_rto changes to %d, previous target_rto is %d, current the sleep time is %ld", - t_thrd.dcf_cxt.dcfCtxInfo->targetRTO, - g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex].target_rto, - g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex].current_sleep_time))); - - g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex].target_rto = t_thrd.dcf_cxt.dcfCtxInfo->targetRTO; - } - - char *remoteIP = g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex].dest_ip; - uint32 remotePort = 0; - char *localIP = g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex].source_ip; - uint32 localPort = 0; - uint32 leaderID = -1; - if (GetNodeInfo(srcNodeID, remoteIP, IP_LEN, &remotePort)) { - g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex].dest_port = (int)remotePort; - } else { - ereport(WARNING, (errmsg("Get ip and port of node with nodeID %u failed", srcNodeID))); - } - if (QueryLeaderNodeInfo(&leaderID, localIP, IP_LEN, &localPort)) { - g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex].source_port = (int)localPort; - } else { - ereport(WARNING, (errmsg("Get ip and port of leader failed"))); - } - } -} - -static bool GetFlowControlParam(void) -{ - /* Get current rto and rpo */ - const int rtoLen = 10; /* 10 is enough for rto is between 0 and 3600 */ - char tempRTO[rtoLen] = {0}; - const int rpoLen = 10; /* 10 is enough for rpo is between 0 and 3600 */ - char tempRPO[rtoLen] = {0}; - if (dcf_get_param("DN_FLOW_CONTROL_RTO", tempRTO, rtoLen) != 0) { - ereport(WARNING, (errmodule(MOD_RTO_RPO), errmsg("Get rto from dcf failed!"))); - return false; - } - t_thrd.dcf_cxt.dcfCtxInfo->targetRTO = atoi(tempRTO); - ereport(DEBUG1, (errmodule(MOD_RTO_RPO), - errmsg("target rto got from dcf is %d", - t_thrd.dcf_cxt.dcfCtxInfo->targetRTO))); - if (dcf_get_param("DN_FLOW_CONTROL_RPO", tempRPO, rpoLen) != 0) { - ereport(WARNING, (errmodule(MOD_RTO_RPO), errmsg("Get rpo from dcf failed!"))); - return false; - } - t_thrd.dcf_cxt.dcfCtxInfo->targetRPO = atoi(tempRPO); - ereport(DEBUG1, (errmodule(MOD_RTO_RPO), - errmsg("target rpo got from dcf is %d", - t_thrd.dcf_cxt.dcfCtxInfo->targetRPO))); - return true; -} - -/* - * Regular reply from standby advising of WAL positions on standby server. - */ -static void DCFProcessStandbyReplyMessage(uint32 srcNodeID, const char* msg, uint32 msgSize) -{ - DCFStandbyReplyMessage reply; - int rc; - errno_t errorno = EOK; - char *buf = NULL; - int nodeIndex = -1; - bool forceUpdate = false; - if (msgSize < (sizeof(DCFStandbyReplyMessage) + 1)) { - ereport(WARNING, (errmsg("The size of msg didn't meet reply message and the size is %u\n", msgSize))); - return; - } - - /* skip first char */ - buf = const_cast(msg) + 1; - - errorno = memcpy_s(&reply, - sizeof(DCFStandbyReplyMessage), - buf, - sizeof(DCFStandbyReplyMessage)); - securec_check(errorno, "\0", "\0"); - - ereport(DEBUG1, (errmsg("The src node id is %u", srcNodeID))); - ereport(DEBUG1, (errmsg("id is %s and receive %X/%X write %X/%X flush %X/%X apply %X/%X", - reply.id, (uint32)(reply.receive >> 32), - (uint32)reply.receive, (uint32)(reply.write >> 32), (uint32)reply.write, - (uint32)(reply.flush >> 32), (uint32)reply.flush, (uint32)(reply.apply >> 32), - (uint32)reply.apply))); - bool isSet = SetNodeInfoByNodeID(srcNodeID, reply, &nodeIndex); - if (!isSet) { - ereport(WARNING, (errmsg("Set node info with node ID %u failed!", srcNodeID))); - return; - } - /* Update standby node name */ - char* id = g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex].id; - rc = strncpy_s(id, DCF_STANDBY_NAME_SIZE, reply.id, strlen(reply.id)); - securec_check(rc, "\0", "\0"); - - if (t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].sleep_count_limit == 0) { - ereport(WARNING, (errmsg("Sleep count limit of the node with id %d is 0", srcNodeID))); - return; - } - - if (t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].prev_reply_time > 0) { - forceUpdate = IsForceUpdate(t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].prev_reply_time, reply.sendTime); - } - if (IS_PGXC_DATANODE && - ((t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].sleep_count % - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].sleep_count_limit) == 0 || - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].just_keep_alive || forceUpdate)) { - int64 currentRto = DCFLogCtrlCalculateCurrentRTO(&reply, - &(t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex])); - SetGlobalCurRto(nodeIndex, currentRto); - /* Current RPO calculation is not required now. */ - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].prev_reply_time = reply.sendTime; - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].prev_flush = reply.flush; - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].prev_apply = reply.apply; - } - if (!GetFlowControlParam()) { - return; - } - DoActualSleep(nodeIndex, forceUpdate); - SetGlobalRtoData(nodeIndex, srcNodeID); -} - /* receive xlog archive task response from follower */ static void ProcessArchiveFeedbackMessage(uint32 srcNodeID, const char* msg, uint32 msgSize) { @@ -784,7 +591,6 @@ static void ProcessArchiveXlogMessage(uint32 srcNodeID, const char* msg, uint32 * lock for archiver get PITR_TASK_GET flag, but works on old task. * if archiver works between set flag and set task details. */ - SpinLockAcquire(&archive_task_status->mutex); while (pg_atomic_compare_exchange_u32(pitr_task_status, &expected, PITR_TASK_GET) == false) { /* some task arrived before last task done if expected not equal to NONE */ expected = PITR_TASK_NONE; @@ -794,11 +600,10 @@ static void ProcessArchiveXlogMessage(uint32 srcNodeID, const char* msg, uint32 archive_xlog_message->slot_name, static_cast(archive_xlog_message->targetLsn >> 32), static_cast(archive_xlog_message->targetLsn)))); - SpinLockRelease(&archive_task_status->mutex); return; } } - + SpinLockAcquire(&archive_task_status->mutex); errorno = memcpy_s(&archive_task_status->archive_task, sizeof(ArchiveXlogMessage), archive_xlog_message, @@ -975,11 +780,7 @@ int ReceiveLogCbFunc(unsigned int stream_id, unsigned long long index, char* copyStart = const_cast(buf) + alignOffset; XLogRecPtr copyStartPtr = paxosStartPtr + alignOffset; - if (IsExtremeRedo()) { - XLogWalRcvReceiveInBuf(copyStart, receive_len, copyStartPtr); - } else { - XLogWalRcvReceive(copyStart, receive_len, copyStartPtr); - } + XLogWalRcvReceive(copyStart, receive_len, copyStartPtr); DcfUpdateAppliedRecordIndex(index, lsn); diff --git a/src/gausskernel/storage/replication/dcf/dcf_flowcontrol.cpp b/src/gausskernel/storage/replication/dcf/dcf_flowcontrol.cpp new file mode 100644 index 000000000..e7130c462 --- /dev/null +++ b/src/gausskernel/storage/replication/dcf/dcf_flowcontrol.cpp @@ -0,0 +1,661 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * dcf_flowcontrol.cpp + * + * IDENTIFICATION + * src/gausskernel/storage/replication/dcf/dcf_flowcontrol.cpp + * + * --------------------------------------------------------------------------------------- + */ +#include "c.h" +#include "postgres.h" +#include "knl/knl_variable.h" +#include "storage/shmem.h" +#include "utils/timestamp.h" +#include "cjson/cJSON.h" +#include "dcf_interface.h" +#include "replication/dcf_flowcontrol.h" +#include "pgstat.h" + +#ifndef ENABLE_MULTIPLE_NODES + +#ifdef ENABLE_UT +#define static +#endif + +/* Statistics for log control */ +static const int MICROSECONDS_PER_SECONDS = 1000000; +static const int MILLISECONDS_PER_SECONDS = 1000; +static const int MILLISECONDS_PER_MICROSECONDS = 1000; +static const int INIT_CONTROL_REPLY = 3; +static const int MAX_CONTROL_REPLY = 1000; +static const int SLEEP_MORE = 200; +static const int SLEEP_LESS = 400; +static const int SHIFT_SPEED = 3; + +inline static void SetNodeInfo(int nodeIndex, uint32 nodeID, DCFStandbyReplyMessage reply) +{ + DCFStandbyInfo *standbyNodeInfo = &(t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[nodeIndex]); + standbyNodeInfo->nodeID = nodeID; + standbyNodeInfo->isMember = true; + standbyNodeInfo->isActive = true; + standbyNodeInfo->receive = reply.receive; + standbyNodeInfo->write = reply.write; + standbyNodeInfo->flush = reply.flush; + standbyNodeInfo->apply = reply.apply; + standbyNodeInfo->peer_role = reply.peer_role; + standbyNodeInfo->peer_state = reply.peer_state; + standbyNodeInfo->sendTime = reply.sendTime; +} + +static void ResetDCFNodeInfo(int index) +{ + DCFStandbyInfo *standbyNodeInfo = &(t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[index]); + standbyNodeInfo->isMember = false; + standbyNodeInfo->isActive = false; + standbyNodeInfo->nodeID = 0; + standbyNodeInfo->receive = InvalidXLogRecPtr; + standbyNodeInfo->write = InvalidXLogRecPtr; + standbyNodeInfo->flush = InvalidXLogRecPtr; + standbyNodeInfo->apply = InvalidXLogRecPtr; + standbyNodeInfo->applyRead = InvalidXLogRecPtr; + standbyNodeInfo->peer_role = UNKNOWN_MODE; + standbyNodeInfo->peer_state = UNKNOWN_STATE; + standbyNodeInfo->sendTime = 0; +} + +static void ResetDCFNodeLogCtl(int index) +{ + DCFLogCtrlData *logCtrl = &(t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[index]); + logCtrl->prev_sleep_time = -1; + logCtrl->sleep_time = 0; + logCtrl->balance_sleep_time = 0; + logCtrl->prev_RTO = -1; + logCtrl->current_RTO = -1; + logCtrl->sleep_count = 0; + logCtrl->sleep_count_limit = MAX_CONTROL_REPLY; + logCtrl->prev_flush = 0; + logCtrl->prev_apply = 0; + logCtrl->prev_reply_time = 0; + logCtrl->pre_rate1 = 0; + logCtrl->pre_rate2 = 0; + logCtrl->prev_RPO = -1; + logCtrl->current_RPO = -1; +} + +bool ResetDCFNodeInfoWithNodeID(uint32 nodeID) +{ + for (int i = 0; i < DCF_MAX_NODES; i++) { + if (t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[i].nodeID == nodeID) { + ResetDCFNodeInfo(i); + ResetDCFNodeLogCtl(i); + return true; + } + } + return false; +} + +void ResetDCFNodesInfo(void) +{ + for (int i = 0; i < DCF_MAX_NODES; i++) { + ResetDCFNodeInfo(i); + ResetDCFNodeLogCtl(i); + } +} + +/* It is necessary to free nodeinfos outside when call this function */ +bool GetNodeInfos(cJSON **nodeInfos) +{ + char replicInfo[DCF_MAX_STREAM_INFO_LEN] = {0}; + int len = dcf_query_stream_info(1, replicInfo, DCF_MAX_STREAM_INFO_LEN * sizeof(char)); + if (len == 0) { + ereport(WARNING, (errmodule(MOD_DCF), errmsg("Failed to query dcf config!"))); + return false; + } + *nodeInfos = cJSON_Parse(replicInfo); + if (*nodeInfos == nullptr) { + const char* errorPtr = cJSON_GetErrorPtr(); + if (errorPtr != nullptr) { + ereport(WARNING, (errmodule(MOD_DCF), errmsg("Failed to parse dcf config: %s!", errorPtr))); + } else { + ereport(WARNING, (errmodule(MOD_DCF), errmsg("Failed to parse dcf config: unkonwn error."))); + } + return false; + } + return true; +} + +/* Get dcf role, ip and port from cJSON corresponding to its nodeID */ +bool GetDCFNodeInfo(const cJSON *nodeJsons, int nodeID, char *role, int roleLen, char *ip, int ipLen, int *port) +{ + if (!cJSON_IsArray(nodeJsons)) { + ereport(WARNING, (errmodule(MOD_DCF), errmsg("Must exist array format in the json file."))); + return false; + } + const cJSON* nodeJson = nullptr; + errno_t rc = EOK; + cJSON_ArrayForEach(nodeJson, nodeJsons) + { + cJSON *idJson = cJSON_GetObjectItem(nodeJson, "node_id"); + if (idJson == nullptr) { + ereport(WARNING, (errmodule(MOD_DCF), errmsg("No items with node id %d!", nodeID))); + return false; + } + if (idJson->valueint == nodeID) { + cJSON *roleJson = cJSON_GetObjectItem(nodeJson, "role"); + if (roleJson == nullptr) { + ereport(WARNING, (errmodule(MOD_DCF), errmsg("No role item with node id %d!", nodeID))); + return false; + } + rc = strcpy_s(role, roleLen, roleJson->valuestring); + securec_check(rc, "\0", "\0"); + cJSON *ipJson = cJSON_GetObjectItem(nodeJson, "ip"); + if (ipJson == nullptr) { + ereport(WARNING, (errmodule(MOD_DCF), errmsg("No ip item with node id %d!", nodeID))); + return false; + } + rc = strcpy_s(ip, ipLen, ipJson->valuestring); + securec_check(rc, "\0", "\0"); + cJSON *portJson = cJSON_GetObjectItem(nodeJson, "port"); + if (portJson == nullptr) { + ereport(WARNING, (errmodule(MOD_DCF), errmsg("No port item with node id %d!", nodeID))); + return false; + } + *port = portJson->valueint; + return true; + } + } + ereport(WARNING, (errmodule(MOD_DCF), errmsg("No node item with node id %d found!", nodeID))); + return false; +} + +bool GetNodeInfo(const uint32 nodeID, char *nodeIP, uint32 nodeIPLen, uint32 *nodePort) +{ + cJSON *nodeInfos = nullptr; + /* nodeInfos is null when GetNodeInfos returned false */ + if (!GetNodeInfos(&nodeInfos)) { + return false; + } + const cJSON *nodeJsons = cJSON_GetObjectItem(nodeInfos, "nodes"); + if (nodeJsons == nullptr) { + cJSON_Delete(nodeInfos); + ereport(ERROR, (errmodule(MOD_DCF), errmsg("Get nodes info failed from DCF!"))); + return false; + } + const int DCF_ROLE_LEN = 64; + char localDCFRole[DCF_ROLE_LEN] = {0}; + if (!GetDCFNodeInfo(nodeJsons, nodeID, localDCFRole, DCF_ROLE_LEN, nodeIP, nodeIPLen, (int*)nodePort)) { + cJSON_Delete(nodeInfos); + return false; + } + cJSON_Delete(nodeInfos); + return true; +} + +bool SetNodeInfoByNodeID(uint32 nodeID, DCFStandbyReplyMessage reply, int *nodeIndex) +{ + *nodeIndex = -1; + DCFStandbyInfo nodeInfo; + /* Find if the src_node_id has added to nodes info */ + for (int i = 0; i < DCF_MAX_NODES; i++) { + nodeInfo = t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[i]; + if (nodeInfo.nodeID == nodeID) { + *nodeIndex = i; + break; + } + } + if (*nodeIndex == -1) { + for (int i = 0; i < DCF_MAX_NODES; i++) { + nodeInfo = t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[i]; + if (!nodeInfo.isMember) { + *nodeIndex = i; + break; + } + } + } + if (*nodeIndex == -1) { + ereport(WARNING, + (errmsg("Can't add the node info with node id %u for no space in dcf nodes info array", nodeID))); + return false; + } + SetNodeInfo(*nodeIndex, nodeID, reply); + return true; +} + +bool IsForceUpdate(TimestampTz preSendTime, TimestampTz curSendTime) +{ + /* It is required to update rto when the time period of updating rto exceeds 2 second */ + long secToTime; + int microsecToTime; + long millisecTimeDiff = 0; + TimestampDifference(preSendTime, curSendTime, &secToTime, µsecToTime); + millisecTimeDiff = secToTime * MILLISECONDS_PER_SECONDS + + microsecToTime / MILLISECONDS_PER_MICROSECONDS; + ereport(DEBUG1, (errmsg("The millisec_time_diff is %ld", millisecTimeDiff))); + /* Update rto forcefully when the time interval exceeding 2s. */ + int secondsNum = 2; + return (millisecTimeDiff > secondsNum * MILLISECONDS_PER_SECONDS); +} + +static bool IsUpdateRto(int nodeIndex, TimestampTz sendTime) +{ + bool forceUpdate = false; + uint64 curSleepCount = t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].sleep_count; + uint64 sleepCountLimit = t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].sleep_count_limit; + if (t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].prev_reply_time > 0) { + forceUpdate = IsForceUpdate(t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].prev_reply_time, sendTime); + } + /* It is required to update rto every sleepCountLimit or when updating time period exceeds 1s */ + return ((sleepCountLimit > 0 && curSleepCount % sleepCountLimit == 0) || forceUpdate); +} + +/* If it's called in call back func, ASSERT DcfCallBackThreadShmemInit true */ +bool QueryLeaderNodeInfo(uint32* leaderID, char* leaderIP, uint32 ipLen, uint32 *leaderPort) +{ + Assert((t_thrd.dcf_cxt.is_dcf_thread && t_thrd.dcf_cxt.isDcfShmemInited) || + !t_thrd.dcf_cxt.is_dcf_thread); + /* dcf_node_id > 0 */ + *leaderID = 0; + uint32 tmpPort = 0; + uint32 *port = &tmpPort; + char tmpIP[DCF_MAX_IP_LEN] = {0}; + char *ip = tmpIP; + uint32 leaderIPLen = DCF_MAX_IP_LEN; + if (leaderPort != NULL) { + port = leaderPort; + } + if (leaderIP != NULL) { + ip= leaderIP; + } + if (ipLen != 0) { + leaderIPLen = ipLen; + } + bool success = (dcf_query_leader_info(1, ip, leaderIPLen, port, leaderID) == 0); + if (!success) { + ereport(WARNING, (errmsg("DCF failed to query leader info."))); + return false; + } + if (*leaderID == 0) { + ereport(WARNING, (errmsg("DCF leader does not exist."))); + return false; + } + return true; +} + +static void SetGlobalRtoData(int nodeIndex, int srcNodeID, char *nodename) +{ + /* Update global rto info to show */ + RTOStandbyData *standbyData = &(g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex]); + int rc = strncpy_s(standbyData->id, DCF_STANDBY_NAME_SIZE, nodename, strlen(nodename)); + securec_check(rc, "\0", "\0"); + standbyData->current_rto = t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].current_RTO; + + standbyData->current_sleep_time = t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].sleep_time; + + standbyData->target_rto = t_thrd.dcf_cxt.dcfCtxInfo->targetRTO; + + char *remoteIP = standbyData->dest_ip; + uint32 remotePort = 0; + char *localIP = standbyData->source_ip; + uint32 localPort = 0; + uint32 leaderID = -1; + if (GetNodeInfo(srcNodeID, remoteIP, IP_LEN, &remotePort)) { + standbyData->dest_port = (int)remotePort; + } else { + ereport(WARNING, (errmsg("Get ip and port of node with nodeID %u failed", srcNodeID))); + } + if (QueryLeaderNodeInfo(&leaderID, localIP, IP_LEN, &localPort)) { + standbyData->source_port = (int)localPort; + } else { + ereport(WARNING, (errmsg("Get ip and port of leader failed"))); + } +} + +static inline uint64 LogCtrlCountBigSpeed(uint64 originSpeed, uint64 curSpeed) +{ + uint64 updateSpeed = (((originSpeed << SHIFT_SPEED) - originSpeed) >> SHIFT_SPEED) + curSpeed; + return updateSpeed; +} + +/* + * Estimate the time standby need to flush and apply log. + */ +static void DCFLogCtrlCalculateCurrentRTO(const DCFStandbyReplyMessage *reply, const int nodeIndex) +{ + volatile DCFLogCtrlData *logCtrl = &(t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex]); + long secToTime; + int microsecToTime; + if (XLByteLT(reply->receive, reply->flush) || XLByteLT(reply->flush, reply->apply) || + XLByteLT(reply->flush, logCtrl->prev_flush) || XLByteLT(reply->apply, logCtrl->prev_apply)) { + return; + } + if (XLByteEQ(reply->receive, reply->apply)) { + logCtrl->current_RTO = 0; + return; + } + if (logCtrl->prev_reply_time == 0) { + return; + } + uint64 part1 = reply->receive - reply->flush; + uint64 part2 = reply->flush - reply->apply; + uint64 part1Diff = reply->flush - logCtrl->prev_flush; + uint64 part2Diff = reply->apply - logCtrl->prev_apply; + + TimestampDifference(logCtrl->prev_reply_time, reply->sendTime, &secToTime, µsecToTime); + long millisecTimeDiff = secToTime * MILLISECONDS_PER_SECONDS + (microsecToTime / MILLISECONDS_PER_MICROSECONDS); + long timeThreshold = 10; + if (millisecTimeDiff <= timeThreshold) { + return; + } + + /* + * consumeRatePart1 and consumeRatePart2 is based on 7/8 previous_speed(walsnd->log_ctrl.pre_rate1) and 1/8 + * speed_now(part1_diff / millisec_time_diff). To be more precise and keep more decimal point, we expand speed_now + * by multiply first then divide, which is (8 * previous_speed * 7/8 + speed_now) / 8. + */ + if (logCtrl->pre_rate1 != 0) { + logCtrl->pre_rate1 = LogCtrlCountBigSpeed(logCtrl->pre_rate1, (uint64)(part1Diff / millisecTimeDiff)); + } else { + logCtrl->pre_rate1 = ((part1Diff / (uint64)millisecTimeDiff) << SHIFT_SPEED); + } + if (logCtrl->pre_rate2 != 0) { + logCtrl->pre_rate2 = LogCtrlCountBigSpeed(logCtrl->pre_rate2, (uint64)(part2Diff / millisecTimeDiff)); + } else { + logCtrl->pre_rate2 = ((uint64)(part2Diff / millisecTimeDiff) << SHIFT_SPEED); + } + + uint64 consumeRatePart1 = (logCtrl->pre_rate1 >> SHIFT_SPEED); + uint64 consumeRatePart2 = (logCtrl->pre_rate2 >> SHIFT_SPEED); + if (consumeRatePart1 == 0) + consumeRatePart1 = 1; + + if (consumeRatePart2 == 0) + consumeRatePart2 = 1; + + uint64 secRTOPart1 = (part1 / consumeRatePart1) / MILLISECONDS_PER_SECONDS; + uint64 secRTOPart2 = ((part1 + part2) / consumeRatePart2) / MILLISECONDS_PER_SECONDS; + uint64 secRTO = (secRTOPart1 > secRTOPart2) ? secRTOPart1 : secRTOPart2; + ereport(DEBUG4, (errmodule(MOD_RTO_RPO), + errmsg("The RTO estimated is = : %lu seconds. reply->reveive is %lu, reply->flush is %lu, " + "reply->apply is %lu, pre_flush is %lu, pre_apply is %lu, TimestampDifference is %ld, " + "consumeRatePart1 is %lu, consumeRatePart2 is %lu", + secRTO, reply->receive, reply->flush, reply->apply, logCtrl->prev_flush, + logCtrl->prev_apply, millisecTimeDiff, consumeRatePart1, consumeRatePart2))); + t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].current_RTO = secRTO; +} + +/* Calculate the RTO and RPO changes and control the changes as long as one changes. */ +static void DCFLogCtrlCalculateIndicatorChange(DCFLogCtrlData *logCtrl, int64 *gapDiff, int64 *gap) +{ + int64 rtoPrevGap = 0; + int64 rtoGapDiff = 0; + int64 rtoGap = 0; + int64 rpoPrevGap = 0; + int64 rpoGapDiff = 0; + int64 rpoGap = 0; + + if (t_thrd.dcf_cxt.dcfCtxInfo->targetRTO > 0) { + if (logCtrl->prev_RTO < 0) { + logCtrl->prev_RTO = logCtrl->current_RTO; + } + /* The base number compared is targetRTO/2 */ + int balanceFactor = 2; + int targetRTO = t_thrd.dcf_cxt.dcfCtxInfo->targetRTO / balanceFactor; + int64 currentRTO = logCtrl->current_RTO; + rtoGap = currentRTO - targetRTO; + rtoPrevGap = logCtrl->prev_RTO - targetRTO; + rtoGapDiff = rtoGap - rtoPrevGap; + } + + if (t_thrd.dcf_cxt.dcfCtxInfo->targetRPO > 0) { + if (logCtrl->prev_RPO < 0) { + logCtrl->prev_RPO = logCtrl->current_RPO; + } + + int targetRPO = t_thrd.dcf_cxt.dcfCtxInfo->targetRPO; + int64 currentRPO = logCtrl->current_RPO; + rpoGap = currentRPO - targetRPO; + rpoPrevGap = logCtrl->prev_RPO - targetRPO; + rpoGapDiff = rpoGap - rpoPrevGap; + } + + if (abs(rpoGapDiff) > abs(rtoGapDiff)) { + *gapDiff = rpoGapDiff; + *gap = rpoGap; + } else { + *gapDiff = rtoGapDiff; + *gap = rtoGap; + } + ereport(DEBUG4, (errmodule(MOD_RTO_RPO), + errmsg("[LogCtrlCalculateIndicatorChange] rto_gap=%d, rto_gap_diff=%d," + "rpo_gap=%d, rpo_gap_diff=%d, gap=%d, gap_diff=%d", + (int)rtoGap, (int)rtoGapDiff, (int)rpoGap, + (int)rpoGapDiff, (int)*gap, (int)*gapDiff))); +} + +/* + * If current RTO/RPO is less than target_rto/time_to_target_rpo, primary need less sleep. + * If current RTO/RPO is more than target_rto/time_to_target_rpo, primary need more sleep. + * If current RTO/RPO equals to target_rto/time_to_target_rpo, primary will sleep. + * according to balance_sleep to maintain rto. + */ +static void DCFLogCtrlCalculateSleepTime(int nodeIndex) +{ + DCFLogCtrlData *logCtrl = &(t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex]); + if ((t_thrd.dcf_cxt.dcfCtxInfo->targetRTO == 0 && t_thrd.dcf_cxt.dcfCtxInfo->targetRPO == 0) || + logCtrl->current_RTO == 0) { + logCtrl->sleep_time = 0; + return; + } + int64 gapDiff; + int64 gap; + DCFLogCtrlCalculateIndicatorChange(logCtrl, &gapDiff, &gap); + + int64 sleepTime = logCtrl->sleep_time; + /* use for rto log */ + int64 preTime = logCtrl->sleep_time; + int balanceRange = 1; + + /* mark balance sleep time */ + if (abs(gapDiff) <= balanceRange) { + if (logCtrl->balance_sleep_time == 0) { + logCtrl->balance_sleep_time = sleepTime; + } else { + int64 balanceFactor = 2; + logCtrl->balance_sleep_time = (logCtrl->balance_sleep_time + sleepTime) / balanceFactor; + } + ereport(DEBUG4, (errmodule(MOD_RTO_RPO), errmsg("The balance time for log control is : %ld microseconds", + logCtrl->balance_sleep_time))); + } + + if (abs(gap) <= balanceRange) { /* rto balance, currentRTO close to targetRTO */ + if (logCtrl->balance_sleep_time != 0) { + sleepTime = logCtrl->balance_sleep_time; + } else { + sleepTime -= SLEEP_LESS; + } + } else if (gap > balanceRange) { /* need more sleep, currentRTO larger than targetRTO */ + sleepTime += SLEEP_MORE; + } else if (gap < -balanceRange) { /* need less sleep, currentRTO less than targetRTO */ + sleepTime -= SLEEP_LESS; + } + sleepTime = (sleepTime >= 0) ? sleepTime : 0; + sleepTime = (sleepTime < MICROSECONDS_PER_SECONDS) ? sleepTime : MICROSECONDS_PER_SECONDS; + logCtrl->sleep_time = sleepTime; + /* Report when sleep time exceeds 500ms */ + int threshold = 500000; + if (logCtrl->sleep_time >= threshold) { + volatile DCFStandbyInfo *nodeInfo = &(t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[nodeIndex]); + ereport(WARNING, + (errmodule(MOD_RTO_RPO), + errmsg("Flow control report for node %d: current sleep time is %ld micronseconds, " + "current rto is %ld, target rto is %d, standby receive lsn is %X/%X, " + "previous flush lsn is %X/%X, flush lsn is %X/%X, previous apply lsn is %X/%X, " + "apply lsn is %X/%X, previous send time is %ld, current send time is %ld, " + "flush speed is %ld B/ms, replay speed is %ld B/ms", + nodeInfo->nodeID, logCtrl->sleep_time, + logCtrl->current_RTO, t_thrd.dcf_cxt.dcfCtxInfo->targetRTO, + (uint32)(nodeInfo->receive >> 32), (uint32)(nodeInfo->receive), + (uint32)(logCtrl->prev_flush >> 32), (uint32)(logCtrl->prev_flush), + (uint32)(nodeInfo->flush >> 32), (uint32)(nodeInfo->flush), + (uint32)(logCtrl->prev_apply >> 32), (uint32)(logCtrl->prev_apply), + (uint32)(nodeInfo->apply >> 32), (uint32)(nodeInfo->apply), + logCtrl->prev_reply_time, nodeInfo->sendTime, + logCtrl->pre_rate1 >> SHIFT_SPEED, logCtrl->pre_rate2 >> SHIFT_SPEED))); + } + /* log control take effect */ + if (preTime == 0 && logCtrl->sleep_time != 0) { + ereport(LOG, + (errmodule(MOD_RTO_RPO), + errmsg("Log control take effect, target_rto is %d, " + "current_rto is %ld, current the sleep time is %ld microseconds", + t_thrd.dcf_cxt.dcfCtxInfo->targetRTO, logCtrl->current_RTO, + logCtrl->sleep_time))); + } + /* log control take does not effect */ + if (preTime != 0 && logCtrl->sleep_time == 0) { + ereport(LOG, + (errmodule(MOD_RTO_RPO), + errmsg("Log control does not take effect, target_rto is %d, " + "current_rto is %ld, current the sleep time is %ld microseconds", + t_thrd.dcf_cxt.dcfCtxInfo->targetRTO, + logCtrl->current_RTO, logCtrl->sleep_time))); + } + ereport(DEBUG4, + (errmodule(MOD_RTO_RPO), + errmsg("The sleep time for log control is : %ld microseconds", + logCtrl->sleep_time))); +} + +/* + * Count the limit for sleep_count, it is based on sleep time. + */ +static void DCFLogCtrlCountSleepLimit(const int nodeIndex) +{ + volatile DCFLogCtrlData *logCtrl = &(t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex]); + int64 sleepCountLimitCount = 0; + if (logCtrl->sleep_time == 0) { + sleepCountLimitCount = MAX_CONTROL_REPLY; + } else { + sleepCountLimitCount = INIT_CONTROL_REPLY * MICROSECONDS_PER_SECONDS / logCtrl->sleep_time; + sleepCountLimitCount = (sleepCountLimitCount > MAX_CONTROL_REPLY) ? + MAX_CONTROL_REPLY : sleepCountLimitCount; + } + if (sleepCountLimitCount <= 0) { + sleepCountLimitCount = INIT_CONTROL_REPLY; + } + logCtrl->sleep_count_limit = sleepCountLimitCount; + ereport(DEBUG1, (errmsg("Sleep count limit is %ld.", logCtrl->sleep_count_limit))); +} +static void DoActualSleep(int nodeID, int nodeIndex) +{ + /* try to control log sent rate so that standby can flush and apply log under RTO seconds */ + volatile DCFLogCtrlData *logCtrl = &(t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex]); + if (logCtrl->sleep_time >= 0 && logCtrl->sleep_time != logCtrl->prev_sleep_time) { + /* The logical replication sleep isn't considered now */ + dcf_pause_rep(1, nodeID, logCtrl->sleep_time); + } + logCtrl->sleep_count++; +} + +static bool GetFlowControlParam(void) +{ + /* Get current rto and rpo */ + const int rtoLen = 10; /* 10 is enough for rto is between 0 and 3600 */ + char tempRTO[rtoLen] = {0}; + const int rpoLen = 10; /* 10 is enough for rpo is between 0 and 3600 */ + char tempRPO[rtoLen] = {0}; + if (dcf_get_param("DN_FLOW_CONTROL_RTO", tempRTO, rtoLen) != 0) { + ereport(WARNING, (errmodule(MOD_RTO_RPO), errmsg("Get rto from dcf failed!"))); + return false; + } + t_thrd.dcf_cxt.dcfCtxInfo->targetRTO = atoi(tempRTO); + ereport(DEBUG1, (errmodule(MOD_RTO_RPO), + errmsg("target rto got from dcf is %d", + t_thrd.dcf_cxt.dcfCtxInfo->targetRTO))); + if (dcf_get_param("DN_FLOW_CONTROL_RPO", tempRPO, rpoLen) != 0) { + ereport(WARNING, (errmodule(MOD_RTO_RPO), errmsg("Get rpo from dcf failed!"))); + return false; + } + t_thrd.dcf_cxt.dcfCtxInfo->targetRPO = atoi(tempRPO); + ereport(DEBUG1, (errmodule(MOD_RTO_RPO), + errmsg("target rpo got from dcf is %d", + t_thrd.dcf_cxt.dcfCtxInfo->targetRPO))); + return true; +} + +static void ResetPreviousValue(int nodeIndex, DCFStandbyReplyMessage reply) +{ + DCFLogCtrlData *logCtrl = &(t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex]); + logCtrl->prev_reply_time = reply.sendTime; + logCtrl->prev_flush = reply.flush; + logCtrl->prev_apply = reply.apply; + logCtrl->prev_RTO = logCtrl->current_RTO; + logCtrl->prev_sleep_time = logCtrl->sleep_time; +} + +/* + * Regular reply from standby advising of WAL positions on standby server. + */ +void DCFProcessStandbyReplyMessage(uint32 srcNodeID, const char* msg, uint32 msgSize) +{ + DCFStandbyReplyMessage reply; + errno_t errorno = EOK; + char *buf = NULL; + int nodeIndex = -1; + if (msgSize < (sizeof(DCFStandbyReplyMessage) + 1)) { + ereport(WARNING, (errmsg("The size of msg didn't meet reply message and the size is %u\n", msgSize))); + return; + } + + /* skip first char */ + buf = const_cast(msg) + 1; + errorno = memcpy_s(&reply, + sizeof(DCFStandbyReplyMessage), + buf, + sizeof(DCFStandbyReplyMessage)); + securec_check(errorno, "\0", "\0"); + + ereport(DEBUG1, (errmsg("The src node id is %u, standby node name is %s " + "and receive %X/%X write %X/%X flush %X/%X apply %X/%X", + srcNodeID, reply.id, + (uint32)(reply.receive >> 32), (uint32)reply.receive, + (uint32)(reply.write >> 32), (uint32)reply.write, + (uint32)(reply.flush >> 32), (uint32)reply.flush, + (uint32)(reply.apply >> 32), (uint32)reply.apply))); + /* Get which log_ctrl index stores the info of node id with srcNodeID */ + bool isSet = SetNodeInfoByNodeID(srcNodeID, reply, &nodeIndex); + if (!isSet) { + ereport(WARNING, (errmsg("Set node info with node ID %u failed!", srcNodeID))); + return; + } + + /* Get the current target rto and rpo */ + if (!GetFlowControlParam()) { + ereport(WARNING, (errmsg("Get flow control parameters from dcf failed!"))); + return; + } + if (IsUpdateRto(nodeIndex, reply.sendTime)) { + /* Current RPO calculation is not required now. */ + DCFLogCtrlCalculateCurrentRTO(&reply, nodeIndex); + DCFLogCtrlCalculateSleepTime(nodeIndex); + DCFLogCtrlCountSleepLimit(nodeIndex); + DoActualSleep(srcNodeID, nodeIndex); + ResetPreviousValue(nodeIndex, reply); + } + SetGlobalRtoData(nodeIndex, srcNodeID, reply.id); +} +#endif diff --git a/src/gausskernel/storage/replication/dcf/dcf_replication.cpp b/src/gausskernel/storage/replication/dcf/dcf_replication.cpp index 5b569dd44..92b83f0b0 100755 --- a/src/gausskernel/storage/replication/dcf/dcf_replication.cpp +++ b/src/gausskernel/storage/replication/dcf/dcf_replication.cpp @@ -31,9 +31,11 @@ #include #include #include "storage/shmem.h" +#include "replication/dcf_flowcontrol.h" #include "replication/dcf_replication.h" #include "replication/walreceiver.h" #include "utils/timestamp.h" +#include "utils/guc.h" #include "storage/copydir.h" #include "postmaster/postmaster.h" #include "port/pg_crc32c.h" @@ -46,18 +48,18 @@ #endif #define TEMP_CONF_FILE "postgresql.conf.bak" -#define CONFIG_BAK_FILENAME "postgresql.conf.bak" -/* Statistics for log control */ -static const int MICROSECONDS_PER_SECONDS = 1000000; -static const int MILLISECONDS_PER_SECONDS = 1000; -static const int MILLISECONDS_PER_MICROSECONDS = 1000; -static const int INIT_CONTROL_REPLY = 3; -static const int MAX_CONTROL_REPLY = 10; -static const int SLEEP_MORE = 200; -static const int SLEEP_LESS = 200; -static const int SHIFT_SPEED = 3; +bool IsDCFReadyOrDisabled(void) +{ + if (g_instance.attr.attr_storage.dcf_attr.enable_dcf) { + if (!t_thrd.dcf_cxt.dcfCtxInfo->isDcfStarted) { + ereport(DEBUG1, (errmodule(MOD_DCF), errmsg("DCF thread has not been started."))); + } + return t_thrd.dcf_cxt.dcfCtxInfo->isDcfStarted; + } + return true; +} /* The dcf interfaces */ bool DCFSendMsg(uint32 streamID, uint32 destNodeID, const char* msg, uint32 msgSize) { @@ -76,13 +78,14 @@ static bool SetDCFReplyMsgIfNeed() XLogRecPtr receivePtr = InvalidXLogRecPtr; XLogRecPtr writePtr = InvalidXLogRecPtr; XLogRecPtr flushPtr = InvalidXLogRecPtr; + XLogRecPtr applyPtr = InvalidXLogRecPtr; XLogRecPtr replayReadPtr = InvalidXLogRecPtr; int rc = 0; volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; volatile HaShmemData *hashmdata = t_thrd.postmaster_cxt.HaShmData; volatile DcfContextInfo *dcfCtx = t_thrd.dcf_cxt.dcfCtxInfo; XLogRecPtr sndFlushPtr; - + applyPtr = GetXLogReplayRecPtr(nullptr, &replayReadPtr); SpinLockAcquire(&t_thrd.walreceiver_cxt.walRcvCtlBlock->mutex); receivePtr = t_thrd.walreceiver_cxt.walRcvCtlBlock->receivePtr; writePtr = t_thrd.walreceiver_cxt.walRcvCtlBlock->writePtr; @@ -121,7 +124,7 @@ static bool SetDCFReplyMsgIfNeed() dcfCtx->dcf_reply_message->receive = receivePtr; dcfCtx->dcf_reply_message->write = writePtr; dcfCtx->dcf_reply_message->flush = flushPtr; - dcfCtx->dcf_reply_message->apply = GetXLogReplayRecPtr(nullptr, &replayReadPtr); + dcfCtx->dcf_reply_message->apply = applyPtr; dcfCtx->dcf_reply_message->applyRead = replayReadPtr; dcfCtx->dcf_reply_message->sendTime = now; dcfCtx->dcf_reply_message->replyRequested = false; @@ -417,6 +420,13 @@ void InitDcfSSL() dcf_guc_param = u_sess->attr.attr_security.ssl_cert_notify_time; SetDcfParam("SSL_CERT_NOTIFY_TIME", std::to_string(dcf_guc_param).c_str()); + + /* set dcf ssl_cipher to TLS1.2 */ + SetDcfParam("SSL_CIPHER", + "ECDHE-ECDSA-AES256-GCM-SHA384:" + "ECDHE-ECDSA-AES128-GCM-SHA256:" + "ECDHE-RSA-AES256-GCM-SHA384:" + "ECDHE-RSA-AES128-GCM-SHA256:"); } bool SetDcfParams() @@ -530,359 +540,6 @@ bool SetDcfParams() return true; } -inline static void SetNodeInfo(int nodeIndex, uint32 nodeID, DCFStandbyReplyMessage reply) -{ - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[nodeIndex].nodeID = nodeID; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[nodeIndex].isMember = true; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[nodeIndex].isActive = true; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[nodeIndex].receive = reply.receive; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[nodeIndex].write = reply.write; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[nodeIndex].flush = reply.flush; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[nodeIndex].apply = reply.apply; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[nodeIndex].peer_role = reply.peer_role; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[nodeIndex].peer_state = reply.peer_state; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[nodeIndex].sendTime = reply.sendTime; -} - -bool SetNodeInfoByNodeID(uint32 nodeID, DCFStandbyReplyMessage reply, int *nodeIndex) -{ - *nodeIndex = -1; - DCFStandbyInfo nodeInfo; - /* Find if the src_node_id has added to nodes info */ - for (int i = 0; i < DCF_MAX_NODES; i++) { - nodeInfo = t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[i]; - if (nodeInfo.nodeID == nodeID) { - *nodeIndex = i; - break; - } - } - if (*nodeIndex == -1) { - for (int i = 0; i < DCF_MAX_NODES; i++) { - nodeInfo = t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[i]; - if (!nodeInfo.isMember) { - *nodeIndex = i; - break; - } - } - } - if (*nodeIndex == -1) { - ereport(WARNING, - (errmsg("Can't add the node info with node id %u for no space in dcf nodes info array", nodeID))); - return false; - } - SetNodeInfo(*nodeIndex, nodeID, reply); - return true; -} - -bool IsForceUpdate(TimestampTz preSendTime, TimestampTz curSendTime) -{ - long secToTime; - int microsecToTime; - long millisecTimeDiff = 0; - TimestampDifference(preSendTime, - curSendTime, &secToTime, µsecToTime); - millisecTimeDiff = secToTime * MILLISECONDS_PER_SECONDS - + microsecToTime / MILLISECONDS_PER_MICROSECONDS; - ereport(DEBUG1, (errmsg("The millisec_time_diff is %ld", millisecTimeDiff))); - return (millisecTimeDiff > MILLISECONDS_PER_SECONDS); -} -static inline uint64 LogCtrlCountBigSpeed(uint64 originSpeed, uint64 curSpeed) -{ - uint64 updateSpeed = (((originSpeed << SHIFT_SPEED) - originSpeed) >> SHIFT_SPEED) + curSpeed; - return updateSpeed; -} - -/* - * Estimate the time standby need to flush and apply log. - */ -int DCFLogCtrlCalculateCurrentRTO(const DCFStandbyReplyMessage *reply, DCFLogCtrlData* logCtrl) -{ - long secToTime; - int microsecToTime; - if (XLByteLT(reply->receive, reply->flush) || XLByteLT(reply->flush, reply->apply) || - XLByteLT(reply->flush, logCtrl->prev_flush) || XLByteLT(reply->apply, logCtrl->prev_apply)) { - return -1; - } - if (XLByteEQ(reply->receive, reply->apply)) { - logCtrl->prev_RTO = logCtrl->current_RTO; - logCtrl->current_RTO = 0; - return -1; - } - uint64 part1 = reply->receive - reply->flush; - uint64 part2 = reply->flush - reply->apply; - uint64 part1Diff = reply->flush - logCtrl->prev_flush; - uint64 part2Diff = reply->apply - logCtrl->prev_apply; - if (logCtrl->prev_reply_time == 0) { - return -1; - } - - TimestampDifference(logCtrl->prev_reply_time, reply->sendTime, &secToTime, µsecToTime); - long millisecTimeDiff = secToTime * MILLISECONDS_PER_SECONDS + microsecToTime / MILLISECONDS_PER_MICROSECONDS; - if (millisecTimeDiff <= 10) { - return -1; - } - - /* - * consumeRatePart1 and consumeRatePart2 is based on 7/8 previous_speed(walsnd->log_ctrl.pre_rate1) and 1/8 - * speed_now(part1_diff / millisec_time_diff). To be more precise and keep more decimal point, we expand speed_now - * by multiply first then divide, which is (8 * previous_speed * 7/8 + speed_now) / 8. - */ - if (logCtrl->pre_rate1 != 0) { - logCtrl->pre_rate1 = LogCtrlCountBigSpeed(logCtrl->pre_rate1, (uint64)(part1Diff / millisecTimeDiff)); - } else { - logCtrl->pre_rate1 = ((part1Diff / (uint64)millisecTimeDiff) << SHIFT_SPEED); - } - if (logCtrl->pre_rate2 != 0) { - logCtrl->pre_rate2 = LogCtrlCountBigSpeed(logCtrl->pre_rate2, (uint64)(part2Diff / millisecTimeDiff)); - } else { - logCtrl->pre_rate2 = ((uint64)(part2Diff / millisecTimeDiff) << SHIFT_SPEED); - } - - uint64 consumeRatePart1 = (logCtrl->pre_rate1 >> SHIFT_SPEED); - uint64 consumeRatePart2 = (logCtrl->pre_rate2 >> SHIFT_SPEED); - if (consumeRatePart1 == 0) - consumeRatePart1 = 1; - - if (consumeRatePart2 == 0) - consumeRatePart2 = 1; - - uint64 secRTOPart1 = (part1 / consumeRatePart1) / MILLISECONDS_PER_SECONDS; - uint64 secRTOPart2 = ((part1 + part2) / consumeRatePart2) / MILLISECONDS_PER_SECONDS; - uint64 secRTO = (secRTOPart1 > secRTOPart2) ? secRTOPart1 : secRTOPart2; - ereport(DEBUG4, (errmodule(MOD_RTO_RPO), - errmsg("The RTO estimated is = : %lu seconds. reply->reveive is %lu, reply->flush is %lu, " - "reply->apply is %lu, pre_flush is %lu, pre_apply is %lu, TimestampDifference is %ld, " - "consumeRatePart1 is %lu, consumeRatePart2 is %lu", - secRTO, reply->receive, reply->flush, reply->apply, logCtrl->prev_flush, - logCtrl->prev_apply, millisecTimeDiff, consumeRatePart1, consumeRatePart2))); - return secRTO; -} - -/* Calculate the RTO and RPO changes and control the changes as long as one changes. */ -static void DCFLogCtrlCalculateIndicatorChange(DCFLogCtrlData *logCtrl, int64 *gapDiff, int64 *gap) -{ - int64 rtoPrevGap = 0; - int64 rtoGapDiff = 0; - int64 rtoGap = 0; - int64 rpoPrevGap = 0; - int64 rpoGapDiff = 0; - int64 rpoGap = 0; - - if (t_thrd.dcf_cxt.dcfCtxInfo->targetRTO > 0) { - if (logCtrl->prev_RTO < 0) { - logCtrl->prev_RTO = logCtrl->current_RTO; - } - - int targetRTO = t_thrd.dcf_cxt.dcfCtxInfo->targetRTO; - int64 currentRTO = logCtrl->current_RTO; - rtoGap = currentRTO - targetRTO; - rtoPrevGap = logCtrl->prev_RTO - targetRTO; - rtoGapDiff = rtoGap - rtoPrevGap; - } - - if (t_thrd.dcf_cxt.dcfCtxInfo->targetRPO > 0) { - if (logCtrl->prev_RPO < 0) { - logCtrl->prev_RPO = logCtrl->current_RPO; - } - - int targetRPO = t_thrd.dcf_cxt.dcfCtxInfo->targetRPO; - int64 currentRPO = logCtrl->current_RPO; - rpoGap = currentRPO - targetRPO; - rpoPrevGap = logCtrl->prev_RPO - targetRPO; - rpoGapDiff = rpoGap - rpoPrevGap; - } - - if (abs(rpoGapDiff) > abs(rtoGapDiff)) { - *gapDiff = rpoGapDiff; - *gap = rpoGap; - } else { - *gapDiff = rtoGapDiff; - *gap = rtoGap; - } - ereport(DEBUG4, (errmodule(MOD_RTO_RPO), errmsg("[LogCtrlCalculateIndicatorChange] rto_gap=%d, rto_gap_diff=%d," - "rpo_gap=%d, rpo_gap_diff=%d, gap=%d, gap_diff=%d", - (int)rtoGap, (int)rtoGapDiff, (int)rpoGap, (int)rpoGapDiff, (int)*gap, (int)*gapDiff))); -} - -static bool HandleKeepAlive(DCFLogCtrlData *logCtrl) -{ - if (logCtrl->just_keep_alive) { - if (logCtrl->current_RTO == 0) { - logCtrl->sleep_time = 0; - } else { - logCtrl->sleep_time -= (SLEEP_LESS * 10); - } - if (logCtrl->sleep_time < 0) { - logCtrl->sleep_time = 0; - } - return true; - } - return false; -} - -/* - * If current RTO/RPO is less than target_rto/time_to_target_rpo, primary need less sleep. - * If current RTO/RPO is more than target_rto/time_to_target_rpo, primary need more sleep. - * If current RTO/RPO equals to target_rto/time_to_target_rpo, primary will sleep. - * according to balance_sleep to maintain rto. - */ -void DCFLogCtrlCalculateSleepTime(DCFLogCtrlData *logCtrl) -{ - int64 gapDiff; - int64 gap; - DCFLogCtrlCalculateIndicatorChange(logCtrl, &gapDiff, &gap); - - int64 sleepTime = logCtrl->sleep_time; - /* use for rto log */ - int64 preTime = logCtrl->sleep_time; - int balanceRange = 1; - if (HandleKeepAlive(logCtrl)) { - return; - } - - /* mark balance sleep time */ - if (abs(gapDiff) <= balanceRange) { - if (logCtrl->balance_sleep_time == 0) { - logCtrl->balance_sleep_time = sleepTime; - } else { - logCtrl->balance_sleep_time = (logCtrl->balance_sleep_time + sleepTime) / 2; - } - ereport(DEBUG4, (errmodule(MOD_RTO_RPO), errmsg("The balance time for log control is : %ld microseconds", - logCtrl->balance_sleep_time))); - } - - /* rto balance, currentRTO close to targetRTO */ - if (abs(gap) <= balanceRange) { - if (logCtrl->balance_sleep_time != 0) { - logCtrl->sleep_time = logCtrl->balance_sleep_time; - } else { - sleepTime -= SLEEP_LESS; - logCtrl->sleep_time = (sleepTime >= 0) ? sleepTime : 0; - } - } - - /* need more sleep, currentRTO larger than targetRTO - * get bigger, but no more than 1s - */ - if (gap > balanceRange) { - sleepTime += SLEEP_MORE; - logCtrl->sleep_time = (sleepTime < 1 * MICROSECONDS_PER_SECONDS) ? sleepTime : MICROSECONDS_PER_SECONDS; - } - - /* need less sleep, currentRTO less than targetRTO */ - if (gap < -balanceRange) { - sleepTime -= SLEEP_LESS; - logCtrl->sleep_time = (sleepTime >= 0) ? sleepTime : 0; - } - /* log control take effect */ - if (preTime == 0 && logCtrl->sleep_time != 0) { - ereport(LOG, - (errmodule(MOD_RTO_RPO), - errmsg("Log control take effect, target_rto is %d, current_rto is %ld, current the sleep time is %ld " - "microseconds", - t_thrd.dcf_cxt.dcfCtxInfo->targetRTO, logCtrl->current_RTO, - logCtrl->sleep_time))); - } - /* log control take does not effect */ - if (preTime != 0 && logCtrl->sleep_time == 0) { - ereport(LOG, (errmodule(MOD_RTO_RPO), - errmsg("Log control does not take effect, target_rto is %d, current_rto is %ld, current the sleep time " - "is %ld microseconds", - t_thrd.dcf_cxt.dcfCtxInfo->targetRTO, logCtrl->current_RTO, logCtrl->sleep_time))); - } - ereport(DEBUG4, (errmodule(MOD_RTO_RPO), - errmsg("The sleep time for log control is : %ld microseconds", logCtrl->sleep_time))); -} - -/* - * Count the limit for sleep_count, it is based on sleep time. - */ -void DCFLogCtrlCountSleepLimit(DCFLogCtrlData *logCtrl) -{ - int64 sleepCountLimitCount; - if (logCtrl->sleep_time == 0) { - sleepCountLimitCount = MAX_CONTROL_REPLY; - } else { - sleepCountLimitCount = INIT_CONTROL_REPLY * MICROSECONDS_PER_SECONDS / logCtrl->sleep_time; - sleepCountLimitCount = (sleepCountLimitCount > MAX_CONTROL_REPLY) ? - MAX_CONTROL_REPLY : sleepCountLimitCount; - } - if (sleepCountLimitCount <= 0) { - sleepCountLimitCount = INIT_CONTROL_REPLY; - } - logCtrl->sleep_count_limit = sleepCountLimitCount; - ereport(DEBUG1, (errmsg("Sleep count limit is %ld.", logCtrl->sleep_count_limit))); -} - -/* - * Update the sleep time for primary. - */ -void SleepNodeReplication(int nodeIndex) -{ - if (t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].sleep_time > MICROSECONDS_PER_SECONDS) { - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].sleep_time = MICROSECONDS_PER_SECONDS; - } - if (t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].sleep_time >= 0) { - dcf_pause_rep(1, t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[nodeIndex].nodeID, - (uint32)t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].sleep_time); - } - g_instance.rto_cxt.dcf_rto_standby_data[nodeIndex].current_sleep_time = - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[nodeIndex].sleep_time; -} - -static void ResetDCFNodeInfo(int index) -{ - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[index].isMember = false; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[index].isActive = false; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[index].nodeID = 0; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[index].receive = InvalidXLogRecPtr; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[index].write = InvalidXLogRecPtr; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[index].flush = InvalidXLogRecPtr; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[index].apply = InvalidXLogRecPtr; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[index].applyRead = InvalidXLogRecPtr; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[index].peer_role = UNKNOWN_MODE; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[index].peer_state = UNKNOWN_STATE; - t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[index].sendTime = 0; -} - -static void ResetDCFNodeLogCtl(int index) -{ - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[index].sleep_time = 0; - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[index].balance_sleep_time = 0; - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[index].prev_RTO = -1; - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[index].current_RTO = -1; - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[index].sleep_count = 0; - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[index].sleep_count_limit = MAX_CONTROL_REPLY; - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[index].prev_flush = 0; - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[index].prev_apply = 0; - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[index].prev_reply_time = 0; - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[index].pre_rate1 = 0; - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[index].pre_rate2 = 0; - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[index].prev_RPO = -1; - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[index].current_RPO = -1; - t_thrd.dcf_cxt.dcfCtxInfo->log_ctrl[index].just_keep_alive = false; -} - -bool ResetDCFNodeInfoWithNodeID(uint32 nodeID) -{ - for (int i = 0; i < DCF_MAX_NODES; i++) { - if (t_thrd.dcf_cxt.dcfCtxInfo->nodes_info[i].nodeID == nodeID) { - ResetDCFNodeInfo(i); - ResetDCFNodeLogCtl(i); - return true; - } - } - return false; -} - -void ResetDCFNodesInfo(void) -{ - for (int i = 0; i < DCF_MAX_NODES; i++) { - ResetDCFNodeInfo(i); - ResetDCFNodeLogCtl(i); - } -} - bool InitDcfAndStart() { ResetDCFNodesInfo(); @@ -1047,39 +704,6 @@ void LaunchPaxos() CheckConfigFile(true); } -/* If it's called in call back func, ASSERT DcfCallBackThreadShmemInit true */ -bool QueryLeaderNodeInfo(uint32* leaderID, char* leaderIP, uint32 ipLen, uint32 *leaderPort) -{ - Assert((t_thrd.dcf_cxt.is_dcf_thread && t_thrd.dcf_cxt.isDcfShmemInited) || - !t_thrd.dcf_cxt.is_dcf_thread); - /* dcf_node_id > 0 */ - *leaderID = 0; - uint32 tmpPort = 0; - uint32 *port = &tmpPort; - char tmpIP[DCF_MAX_IP_LEN] = {0}; - char *ip = tmpIP; - uint32 leaderIPLen = DCF_MAX_IP_LEN; - if (leaderPort != NULL) { - port = leaderPort; - } - if (leaderIP != NULL) { - ip= leaderIP; - } - if (ipLen != 0) { - leaderIPLen = ipLen; - } - bool success = (dcf_query_leader_info(1, ip, leaderIPLen, port, leaderID) == 0); - if (!success) { - ereport(WARNING, (errmsg("DCF failed to query leader info."))); - return false; - } - if (*leaderID == 0) { - ereport(WARNING, (errmsg("DCF leader does not exist."))); - return false; - } - return true; -} - /* * Synchronise standby's configure file once the HA build successfully. */ @@ -1449,70 +1073,4 @@ void DcfSendArchiveXlogResponse(ArchiveTaskStatus *archive_task_status) pg_memory_barrier(); pg_atomic_write_u32(pitr_task_status, PITR_TASK_NONE); } - -/* It is necessary to free nodeinfos outside when call this function */ -bool GetNodeInfos(cJSON **nodeInfos) -{ - char replicInfo[DCF_MAX_STREAM_INFO_LEN] = {0}; - int len = dcf_query_stream_info(1, replicInfo, DCF_MAX_STREAM_INFO_LEN * sizeof(char)); - if (len == 0) { - ereport(WARNING, (errmodule(MOD_DCF), errmsg("Failed to query dcf config!"))); - return false; - } - *nodeInfos = cJSON_Parse(replicInfo); - if (*nodeInfos == nullptr) { - const char* errorPtr = cJSON_GetErrorPtr(); - if (errorPtr != nullptr) { - ereport(WARNING, (errmodule(MOD_DCF), errmsg("Failed to parse dcf config: %s!", errorPtr))); - } else { - ereport(WARNING, (errmodule(MOD_DCF), errmsg("Failed to parse dcf config: unkonwn error."))); - } - return false; - } - return true; -} - -/* Get dcf role, ip and port from cJSON corresponding to its nodeID */ -bool GetDCFNodeInfo(const cJSON *nodeJsons, int nodeID, char *role, int roleLen, char *ip, int ipLen, int *port) -{ - if (!cJSON_IsArray(nodeJsons)) { - ereport(WARNING, (errmodule(MOD_DCF), errmsg("Must exist array format in the json file."))); - return false; - } - const cJSON* nodeJson = nullptr; - errno_t rc = EOK; - cJSON_ArrayForEach(nodeJson, nodeJsons) - { - cJSON *idJson = cJSON_GetObjectItem(nodeJson, "node_id"); - if (idJson == nullptr) { - ereport(WARNING, (errmodule(MOD_DCF), errmsg("No items with node id %d!", nodeID))); - return false; - } - if (idJson->valueint == nodeID) { - cJSON *roleJson = cJSON_GetObjectItem(nodeJson, "role"); - if (roleJson == nullptr) { - ereport(WARNING, (errmodule(MOD_DCF), errmsg("No role item with node id %d!", nodeID))); - return false; - } - rc = strcpy_s(role, roleLen, roleJson->valuestring); - securec_check(rc, "\0", "\0"); - cJSON *ipJson = cJSON_GetObjectItem(nodeJson, "ip"); - if (ipJson == nullptr) { - ereport(WARNING, (errmodule(MOD_DCF), errmsg("No ip item with node id %d!", nodeID))); - return false; - } - rc = strcpy_s(ip, ipLen, ipJson->valuestring); - securec_check(rc, "\0", "\0"); - cJSON *portJson = cJSON_GetObjectItem(nodeJson, "port"); - if (portJson == nullptr) { - ereport(WARNING, (errmodule(MOD_DCF), errmsg("No port item with node id %d!", nodeID))); - return false; - } - *port = portJson->valueint; - return true; - } - } - ereport(WARNING, (errmodule(MOD_DCF), errmsg("No node item with node id %d found!", nodeID))); - return false; -} #endif diff --git a/src/gausskernel/storage/replication/heartbeat.cpp b/src/gausskernel/storage/replication/heartbeat.cpp index a0b557cbf..911f7b9db 100755 --- a/src/gausskernel/storage/replication/heartbeat.cpp +++ b/src/gausskernel/storage/replication/heartbeat.cpp @@ -118,7 +118,7 @@ static int deal_with_sigup() * dynamically modify the ha socket. */ for (j = 1; j < MAX_REPLNODE_NUM; j++) { - if (t_thrd.postmaster_cxt.ReplConnChanged[j]) { + if (t_thrd.postmaster_cxt.ReplConnChangeType[j] == OLD_REPL_CHANGE_IP_OR_PORT) { break; } } @@ -129,7 +129,7 @@ static int deal_with_sigup() } } for (int i = 1; i < MAX_REPLNODE_NUM; i++) { - t_thrd.postmaster_cxt.ReplConnChanged[i] = false; + t_thrd.postmaster_cxt.ReplConnChangeType[i] = NO_CHANGE; } if (g_heartbeat_client != NULL) { /* The client will auto connect later. */ @@ -236,6 +236,9 @@ static void heartbeat_handle_exception(MemoryContext heartbeat_context) /* Report the error to the server log */ EmitErrorReport(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + /* Buffer pins are released here: */ ResourceOwnerRelease(t_thrd.utils_cxt.CurrentResourceOwner, RESOURCE_RELEASE_BEFORE_LOCKS, false, true); diff --git a/src/gausskernel/storage/replication/heartbeat/heartbeat_client.cpp b/src/gausskernel/storage/replication/heartbeat/heartbeat_client.cpp index d6df36b42..231805e70 100644 --- a/src/gausskernel/storage/replication/heartbeat/heartbeat_client.cpp +++ b/src/gausskernel/storage/replication/heartbeat/heartbeat_client.cpp @@ -47,6 +47,8 @@ HeartbeatClient::~HeartbeatClient() struct replconninfo* GetHeartBeatServerConnInfo(void) { volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + char* ipNoZone = NULL; + char ipNoZoneData[IP_LEN] = {0}; if (walrcv->pid <= 0) { return NULL; @@ -66,8 +68,11 @@ struct replconninfo* GetHeartBeatServerConnInfo(void) continue; } + /* remove any '%zone' part from an IPv6 address string */ + ipNoZone = remove_ipv6_zone(conninfo->remotehost, ipNoZoneData, IP_LEN); + /* find target conninfo by conn_channel */ - if (strcmp(conninfo->remotehost, (char*)walrcv->conn_channel.remotehost) == 0 && + if (strcmp(ipNoZone, (char*)walrcv->conn_channel.remotehost) == 0 && conninfo->remoteport == walrcv->conn_channel.remoteport) { return conninfo; } @@ -89,25 +94,19 @@ bool HeartbeatClient::Connect() PurePort *port = NULL; int rcs = 0; int remotePort = -1; - int i = 0; - while (i < MAX_REPLNODE_NUM) { - conninfo = GetHeartBeatServerConnInfo(); - if (conninfo == NULL) { - sleep(SLEEP_TIME); - i++; - continue; - } + conninfo = GetHeartBeatServerConnInfo(); + if (conninfo == NULL) { + return false; + } - rcs = snprintf_s(connstr, MAX_CONN_INFO, MAX_CONN_INFO - 1, "host=%s port=%d localhost=%s localport=%d", - conninfo->remotehost, conninfo->remoteheartbeatport, conninfo->localhost, - conninfo->localheartbeatport); - securec_check_ss(rcs, "", ""); - port = PQconnect(connstr); - if (port != NULL) { - remotePort = conninfo->remoteheartbeatport; - ereport(LOG, (errmsg("Connected to heartbeat primary :%s success.", connstr))); - break; - } + rcs = snprintf_s(connstr, MAX_CONN_INFO, MAX_CONN_INFO - 1, "host=%s port=%d localhost=%s localport=%d connect_timeout=2", + conninfo->remotehost, conninfo->remoteheartbeatport, conninfo->localhost, + conninfo->localheartbeatport); + securec_check_ss(rcs, "", ""); + port = PQconnect(connstr); + if (port != NULL) { + remotePort = conninfo->remoteheartbeatport; + ereport(LOG, (errmsg("Connected to heartbeat primary :%s success.", connstr))); } if (port != NULL) { @@ -179,6 +178,8 @@ bool HeartbeatClient::InitConnection(HeartbeatConnection *con, int remotePort) static int GetChannelId(char remotehost[IP_LEN], int remoteheartbeatport) { struct replconninfo *replconninfo = NULL; + char* ipNoZone = NULL; + char ipNoZoneData[IP_LEN] = {0}; for (int i = 0; i < MAX_REPLNODE_NUM; i++) { replconninfo = t_thrd.postmaster_cxt.ReplConnArray[i]; @@ -186,7 +187,10 @@ static int GetChannelId(char remotehost[IP_LEN], int remoteheartbeatport) continue; } - if (strncmp((char *)replconninfo->remotehost, (char *)remotehost, IP_LEN) == 0 && + /* remove any '%zone' part from an IPv6 address string */ + ipNoZone = remove_ipv6_zone(replconninfo->remotehost, ipNoZoneData, IP_LEN); + + if (strncmp((char *)ipNoZone, (char *)remotehost, IP_LEN) == 0 && replconninfo->remoteheartbeatport == remoteheartbeatport) { return replconninfo->localheartbeatport; } diff --git a/src/gausskernel/storage/replication/heartbeat/heartbeat_conn.cpp b/src/gausskernel/storage/replication/heartbeat/heartbeat_conn.cpp index c8c04f860..084be4aad 100644 --- a/src/gausskernel/storage/replication/heartbeat/heartbeat_conn.cpp +++ b/src/gausskernel/storage/replication/heartbeat/heartbeat_conn.cpp @@ -30,6 +30,7 @@ #include "utils/elog.h" #include "replication/heartbeat/libpq/libpq.h" #include "replication/heartbeat/libpq/libpq-be.h" +#include "replication/walreceiver.h" #include "replication/heartbeat/heartbeat_conn.h" static char *GetIp(struct sockaddr *addr) @@ -39,7 +40,8 @@ static char *GetIp(struct sockaddr *addr) char *result = NULL; if (AF_INET6 == saddr->sin_family) { - result = inet_net_ntop(AF_INET6, &saddr->sin_addr, AF_INET6_MAX_BITS, ip, IP_LEN); + struct sockaddr_in6 *saddr6 = (sockaddr_in6 *)addr; + result = inet_net_ntop(AF_INET6, &saddr6->sin6_addr, AF_INET6_MAX_BITS, ip, IP_LEN); if (result == NULL) { ereport(WARNING, (errmsg("inet_net_ntop failed, error: %d", EAFNOSUPPORT))); } @@ -62,13 +64,19 @@ static char *GetIp(struct sockaddr *addr) static bool IsIpInWhiteList(const char *ip) { + char* ipNoZone = NULL; + char ipNoZoneData[IP_LEN] = {0}; + for (int i = START_REPLNODE_NUM; i < MAX_REPLNODE_NUM; i++) { ReplConnInfo *replconninfo = t_thrd.postmaster_cxt.ReplConnArray[i]; if (replconninfo == NULL) { continue; } - if (strncmp((char *)replconninfo->remotehost, ip, IP_LEN) == 0) { + /* remove any '%zone' part from an IPv6 address string */ + ipNoZone = remove_ipv6_zone(replconninfo->remotehost, ipNoZoneData, IP_LEN); + + if (strncmp((char *)ipNoZone, ip, IP_LEN) == 0) { return true; } } @@ -168,6 +176,8 @@ void EventDel(int epollFd, HeartbeatConnection *con) void UpdateLastHeartbeatTime(const char *remoteHost, int remotePort, TimestampTz timestamp) { volatile heartbeat_state *stat = t_thrd.heartbeat_cxt.state; + char* ipNoZone = NULL; + char ipNoZoneData[IP_LEN] = {0}; ReplConnInfo* replconninfo = NULL; SpinLockAcquire(&stat->mutex); @@ -180,7 +190,10 @@ void UpdateLastHeartbeatTime(const char *remoteHost, int remotePort, TimestampTz continue; } - if (strncmp((char *)replconninfo->remotehost, (char *)remoteHost, IP_LEN) == 0 && + /* remove any '%zone' part from an IPv6 address string */ + ipNoZone = remove_ipv6_zone(replconninfo->remotehost, ipNoZoneData, IP_LEN); + + if (strncmp((char *)ipNoZone, (char *)remoteHost, IP_LEN) == 0 && replconninfo->remoteheartbeatport == remotePort) { stat->channel_array[i].last_reply_timestamp = timestamp; ereport(DEBUG2, (errmsg("Update last heartbeat time: remotehost:%s, port:%d, time:%ld", remoteHost, diff --git a/src/gausskernel/storage/replication/heartbeat/heartbeat_server.cpp b/src/gausskernel/storage/replication/heartbeat/heartbeat_server.cpp index b11cd08a1..9baa66897 100644 --- a/src/gausskernel/storage/replication/heartbeat/heartbeat_server.cpp +++ b/src/gausskernel/storage/replication/heartbeat/heartbeat_server.cpp @@ -31,6 +31,7 @@ #include "utils/timestamp.h" #include "replication/heartbeat/libpq/libpq.h" #include "replication/heartbeat/heartbeat_conn.h" +#include "replication/walreceiver.h" #include "replication/heartbeat/heartbeat_server.h" using namespace PureLibpq; @@ -203,7 +204,7 @@ bool HeartbeatServer::IsAlreadyListen(const char *ip, int port) const for (listen_index = 0; listen_index < MAX_REPLNODE_NUM; ++listen_index) { if (serverListenSocket_[listen_index] != PGINVALID_SOCKET) { - struct sockaddr_in saddr; + struct sockaddr_storage saddr; socklen_t slen; char *result = NULL; rc = memset_s(&saddr, sizeof(saddr), 0, sizeof(saddr)); @@ -215,22 +216,35 @@ bool HeartbeatServer::IsAlreadyListen(const char *ip, int port) const continue; } - if (saddr.sin_family == AF_INET6) { - result = inet_net_ntop(AF_INET6, &saddr.sin_addr, AF_INET6_MAX_BITS, sock_ip, IP_LEN); + if (((struct sockaddr *) &saddr)->sa_family == AF_INET6) { + char* ipNoZone = NULL; + char ipNoZoneData[IP_LEN] = {0}; + + /* remove any '%zone' part from an IPv6 address string */ + ipNoZone = remove_ipv6_zone((char *)ip, ipNoZoneData, IP_LEN); + + result = inet_net_ntop(AF_INET6, &((struct sockaddr_in6 *) &saddr)->sin6_addr, + AF_INET6_MAX_BITS, sock_ip, IP_LEN); if (result == NULL) { ereport(WARNING, (errmsg("inet_net_ntop failed, error: %d", EAFNOSUPPORT))); } - } else if (saddr.sin_family == AF_INET) { - result = inet_net_ntop(AF_INET, &saddr.sin_addr, AF_INET_MAX_BITS, sock_ip, IP_LEN); + + if ((strcmp(ipNoZone, sock_ip) == 0) && (ntohs(((struct sockaddr_in6 *) &saddr)->sin6_port)) == port) { + return true; + } + } else if (((struct sockaddr *) &saddr)->sa_family == AF_INET) { + result = inet_net_ntop(AF_INET, &((struct sockaddr_in *) &saddr)->sin_addr, + AF_INET_MAX_BITS, sock_ip, IP_LEN); if (result == NULL) { ereport(WARNING, (errmsg("inet_net_ntop failed, error: %d", EAFNOSUPPORT))); } - } else if (saddr.sin_family == AF_UNIX) { + + if ((strcmp(ip, sock_ip) == 0) && (ntohs(((struct sockaddr_in *) &saddr)->sin_port)) == port) { + return true; + } + } else if (((struct sockaddr *) &saddr)->sa_family == AF_UNIX) { continue; } - if ((strcmp(ip, sock_ip) == 0) && (ntohs(saddr.sin_port)) == port) { - return true; - } } } @@ -246,13 +260,19 @@ bool HeartbeatServer::IsAlreadyListen(const char *ip, int port) const bool HeartbeatServer::AddConnection(HeartbeatConnection *con, HeartbeatConnection **releasedConnPtr) { *releasedConnPtr = NULL; + char* ipNoZone = NULL; + char ipNoZoneData[IP_LEN] = {0}; + for (int i = START_REPLNODE_NUM; i < MAX_REPLNODE_NUM; i++) { ReplConnInfo *replconninfo = t_thrd.postmaster_cxt.ReplConnArray[i]; if (replconninfo == NULL) { continue; } - if (strncmp((char *)replconninfo->remotehost, con->remoteHost, IP_LEN) == 0 && + /* remove any '%zone' part from an IPv6 address string */ + ipNoZone = remove_ipv6_zone(replconninfo->remotehost, ipNoZoneData, IP_LEN); + + if (strncmp((char *)ipNoZone, con->remoteHost, IP_LEN) == 0 && replconninfo->remoteheartbeatport == con->channelIdentifier) { if (identifiedConns_[i] != NULL) { /* remove old connection if has duplicated connections. */ @@ -281,13 +301,19 @@ bool HeartbeatServer::AddConnection(HeartbeatConnection *con, HeartbeatConnectio */ void HeartbeatServer::RemoveConnection(HeartbeatConnection *con) { + char* ipNoZone = NULL; + char ipNoZoneData[IP_LEN] = {0}; + for (int i = START_REPLNODE_NUM; i < MAX_REPLNODE_NUM; i++) { ReplConnInfo *replconninfo = t_thrd.postmaster_cxt.ReplConnArray[i]; if (replconninfo == NULL) { continue; } - if (strncmp((char *)replconninfo->remotehost, con->remoteHost, IP_LEN) == 0 && + /* remove any '%zone' part from an IPv6 address string */ + ipNoZone = remove_ipv6_zone(replconninfo->remotehost, ipNoZoneData, IP_LEN); + + if (strncmp((char *)ipNoZone, con->remoteHost, IP_LEN) == 0 && replconninfo->remoteheartbeatport == con->channelIdentifier) { if (identifiedConns_[i] == NULL) { ereport(COMMERROR, (errmsg("The connection is not existed, remote ip: %s, remote heartbeat port:%d.", diff --git a/src/gausskernel/storage/replication/heartbeat/libpq/fe-connect.cpp b/src/gausskernel/storage/replication/heartbeat/libpq/fe-connect.cpp index f699e2f4d..e25e12941 100644 --- a/src/gausskernel/storage/replication/heartbeat/libpq/fe-connect.cpp +++ b/src/gausskernel/storage/replication/heartbeat/libpq/fe-connect.cpp @@ -84,6 +84,7 @@ static PQconninfoOption *conninfo_parse(const char *conninfo, bool use_defaults) static char *conninfo_getval(PQconninfoOption *connOptions, const char *keyword); static bool parseConnParam(const char *conninfo, ConnParam *param); static int internalConnect(ConnParam *param); +static int internalConnect_v6(ConnParam *param); static void PQconninfoFree(PQconninfoOption *connOptions); static int connectNoDelay(int sock); static void ConnParamFree(ConnParam *param); @@ -98,6 +99,7 @@ Port *PQconnect(const char *conninfo) { Port *port = NULL; ConnParam *param = (ConnParam *)palloc0(sizeof(ConnParam)); + int sock = 0; /* * Parse the conninfo string @@ -107,7 +109,12 @@ Port *PQconnect(const char *conninfo) return NULL; } - int sock = internalConnect(param); + if (strchr(param->remoteIp, ':') != NULL) { + sock = internalConnect_v6(param); + } else { + sock = internalConnect(param); + } + ConnParamFree(param); if (sock < 0) { return NULL; @@ -225,6 +232,123 @@ static int internalConnect(ConnParam *param) return sock; } +/* support IPV6 */ +static int internalConnect_v6(ConnParam *param) +{ + int sock = -1; + errno_t rc = 0; + int res = 0; + char portstr[MAXPGPATH]; + struct addrinfo *addrs = NULL; + struct addrinfo *addrs_local = NULL; + struct addrinfo hint, hint_local; + errno_t ss_rc = 0; + + /* Initialize hint structure */ + ss_rc = memset_s(&hint, sizeof(hint), 0, sizeof(struct addrinfo)); + securec_check(ss_rc, "\0", "\0"); + hint.ai_socktype = SOCK_STREAM; + hint.ai_family = AF_UNSPEC; + + snprintf_s(portstr, MAXPGPATH, MAXPGPATH - 1, "%d", param->remotePort); + + /* Use pg_getaddrinfo_all() to resolve the address */ + res = pg_getaddrinfo_all(param->remoteIp, portstr, &hint, &addrs); + if (res || !addrs) { + ereport(WARNING, (errmsg("pg_getaddrinfo_all remoteAddr address %s failed\n", param->remoteIp))); + return -1; + } + + /* Initialize hint structure */ + ss_rc = memset_s(&hint_local, sizeof(hint_local), 0, sizeof(struct addrinfo)); + securec_check(ss_rc, "\0", "\0"); + hint_local.ai_socktype = SOCK_STREAM; + hint_local.ai_family = AF_UNSPEC; + hint_local.ai_flags = AI_PASSIVE; + + /* Use pg_getaddrinfo_all() to resolve the address */ + res = pg_getaddrinfo_all(param->localIp, NULL, &hint_local, &addrs_local); + if (res || !addrs_local) { + ereport(WARNING, (errmsg("pg_getaddrinfo_all localAddr address %s failed\n", param->localIp))); + return -1; + } + + /* Open a socket */ + sock = socket(addrs->ai_family, SOCK_STREAM, 0); + if (sock < 0) { + ereport(COMMERROR, (errmsg("could not create socket."))); + return sock; + } + + rc = bind(sock, addrs_local->ai_addr, addrs_local->ai_addrlen); + if (rc != 0) { + ereport(COMMERROR, (errmsg("could not bind localhost:%s, result is %d", param->localIp, rc))); + close(sock); + return -1; + } + +#ifdef F_SETFD + if (fcntl(sock, F_SETFD, FD_CLOEXEC) == -1) { + ereport(COMMERROR, (errmsg("could not set socket(FD_CLOEXEC): %d", SOCK_ERRNO))); + close(sock); + return -1; + } +#endif /* F_SETFD */ + + /* + * Random_Port_Reuse need set SO_REUSEADDR on. + * Random_Port_Reuse must not use bind interface, + * because socket owns a random port private when used bind interface. + * SO_REUSEPORT solve this problem in kernel 3.9. + */ + if (!IS_AF_UNIX(addrs->ai_family)) { + int on = 1; + + if ((setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&on, sizeof(on))) == -1) { + ereport(COMMERROR, (errmsg("could not set socket(FD_CLOEXEC): %d", SOCK_ERRNO))); + close(sock); + return -1; + } + } + + /* + * Select socket options: no delay of outgoing data for + * TCP sockets, nonblock mode, close-on-exec. Fail if any + * of this fails. + */ + if (!IS_AF_UNIX(addrs->ai_family)) { + if (!connectNoDelay(sock)) { + close(sock); + return -1; + } + } + + if (param->connTimeout > 0) { + struct timeval timeout = { 0, 0 }; + timeout.tv_sec = param->connTimeout; + (void)setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, &timeout, sizeof(timeout)); + } + + /* + * Start/make connection. This should not block, since we + * are in nonblock mode. If it does, well, too bad. + */ + if (connect(sock, addrs->ai_addr, addrs->ai_addrlen) < 0) { + ereport(COMMERROR, (errmsg("Connect failed."))); + close(sock); + return -1; + } + + if (addrs) { + pg_freeaddrinfo_all(hint.ai_family, addrs); + } + if (addrs_local) { + pg_freeaddrinfo_all(hint_local.ai_family, addrs_local); + } + + return sock; +} + /* * parseConnParam * diff --git a/src/gausskernel/storage/replication/libpqwalreceiver.cpp b/src/gausskernel/storage/replication/libpqwalreceiver.cpp index b2d338d96..41bab8a79 100755 --- a/src/gausskernel/storage/replication/libpqwalreceiver.cpp +++ b/src/gausskernel/storage/replication/libpqwalreceiver.cpp @@ -25,6 +25,7 @@ #include "access/xlog_internal.h" #include "miscadmin.h" #include "replication/walreceiver.h" +#include "replication/walsender_private.h" #include "replication/libpqwalreceiver.h" #include "storage/pmsignal.h" #include "storage/proc.h" @@ -219,7 +220,7 @@ static bool CheckRemoteServerSharedStorage(ServerMode remoteMode, PGresult* res) "main standby, current is %s", wal_get_role_string(remoteMode, true)))); return false; } - } else if (IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) { + } else if (IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE) { if (!IS_PRIMARY_NORMAL(remoteMode) && remoteMode != MAIN_STANDBY_MODE) { PQclear(res); ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), @@ -453,7 +454,7 @@ static ServerMode IdentifyRemoteMode() ereport(ERROR, (errcode(ERRCODE_INVALID_STATUS), errmsg("the mode of the remote server must be primary, current is %s", - wal_get_role_string(remoteMode)))); + wal_get_role_string(remoteMode, true)))); } if (t_thrd.postmaster_cxt.HaShmData->is_cascade_standby && remoteMode != STANDBY_MODE && @@ -471,7 +472,6 @@ static ServerMode IdentifyRemoteMode() if (IS_SHARED_STORAGE_MODE) { if (!CheckRemoteServerSharedStorage(remoteMode, res)) { - PQclear(res); return UNKNOWN_MODE; } } @@ -525,7 +525,7 @@ static int32 IdentifyRemoteVersion() (errcode(ERRCODE_INVALID_STATUS), errmsg("could not get the local protocal version, make sure the PG_PROTOCOL_VERSION is defined"))); } - if (!IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) { + if (!IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE) { if (walrcv->conn_target != REPCONNTARGET_DUMMYSTANDBY && (localTerm == 0 || localTerm > remoteTerm) && !AM_HADR_WAL_RECEIVER) { PQclear(res); @@ -541,7 +541,6 @@ static int32 IdentifyRemoteVersion() */ if (remoteSversion != localSversion || strncmp(remotePversion, localPversion, strlen(PG_PROTOCOL_VERSION)) != 0) { PQclear(res); - pfree_ext(localPversion); if (t_thrd.role != APPLY_WORKER) { ha_set_rebuild_connerror(VERSION_REBUILD, REPL_INFO_ERROR); } @@ -563,6 +562,19 @@ static int32 IdentifyRemoteVersion() return remoteTerm; } +static void SetWalSendTermChanged(void) +{ + for (int i = 0; i < g_instance.attr.attr_storage.max_wal_senders; i++) { + volatile WalSnd *walsnd = &t_thrd.walsender_cxt.WalSndCtl->walsnds[i]; + + if (walsnd->pid == 0) + continue; + + SpinLockAcquire(&walsnd->mutex); + walsnd->isTermChanged = true; + SpinLockRelease(&walsnd->mutex); + } +} /* * Establish the connection to the primary server for XLOG streaming @@ -601,15 +613,19 @@ bool libpqrcv_connect(char *conninfo, XLogRecPtr *startpoint, char *slotname, in "fallback_application_name=dummystandby " "connect_timeout=%d", conninfo, u_sess->attr.attr_storage.wal_receiver_connect_timeout); +#ifndef ENABLE_MULTIPLE_NODES } else if (AM_HADR_WAL_RECEIVER) { +#else + } else if (AM_HADR_WAL_RECEIVER || AM_HADR_CN_WAL_RECEIVER) { +#endif char passwd[MAXPGPATH] = {'\0'}; char username[MAXPGPATH] = {'\0'}; GetPasswordForHadrStreamingReplication(username, passwd); nRet = snprintf_s(conninfoRepl, sizeof(conninfoRepl), sizeof(conninfoRepl) - 1, - "%s dbname=postgres replication=hadr_main_standby " + "%s dbname=postgres replication=%s " "fallback_application_name=hadr_%s " "connect_timeout=%d user=%s password=%s", - conninfo, + conninfo, AM_HADR_WAL_RECEIVER ? "hadr_main_standby" : "hadr_standby_cn", (u_sess->attr.attr_common.application_name && strlen(u_sess->attr.attr_common.application_name) > 0) ? u_sess->attr.attr_common.application_name @@ -617,7 +633,7 @@ bool libpqrcv_connect(char *conninfo, XLogRecPtr *startpoint, char *slotname, in u_sess->attr.attr_storage.wal_receiver_connect_timeout, username, passwd); rc = memset_s(passwd, MAXPGPATH, 0, MAXPGPATH); securec_check(rc, "\0", "\0"); - } else if (IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) { + } else if (IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE) { nRet = snprintf_s(conninfoRepl, sizeof(conninfoRepl), sizeof(conninfoRepl) - 1, "%s dbname=postgres replication=standby_cluster " "fallback_application_name=%s_hass " @@ -642,7 +658,11 @@ bool libpqrcv_connect(char *conninfo, XLogRecPtr *startpoint, char *slotname, in } securec_check_ss(nRet, "", ""); +#ifndef ENABLE_MULTIPLE_NODES if (AM_HADR_WAL_RECEIVER) { +#else + if (AM_HADR_WAL_RECEIVER || AM_HADR_CN_WAL_RECEIVER) { +#endif char *tmp = NULL; char *tok = NULL; char printConnInfo[MAXCONNINFO] = {0}; @@ -655,6 +675,8 @@ bool libpqrcv_connect(char *conninfo, XLogRecPtr *startpoint, char *slotname, in tok = strtok_s(copyConnInfo, " ", &tmp); if (tok == NULL) { if (copyConnInfo != NULL) { + rc = memset_s(copyConnInfo, sizeof(copyConnInfo), 0, sizeof(copyConnInfo)); + securec_check(rc, "\0", "\0"); pfree_ext(copyConnInfo); } ereport(ERROR, @@ -669,8 +691,9 @@ bool libpqrcv_connect(char *conninfo, XLogRecPtr *startpoint, char *slotname, in } ereport(LOG, (errmsg("Connecting to remote server :%s", printConnInfo))); if (copyConnInfo != NULL) { - pfree(copyConnInfo); - copyConnInfo = NULL; + rc = memset_s(copyConnInfo, sizeof(copyConnInfo), 0, sizeof(copyConnInfo)); + securec_check(rc, "\0", "\0"); + pfree_ext(copyConnInfo); } } else { ereport(LOG, (errmsg("Connecting to remote server :%s", conninfoRepl))); @@ -705,7 +728,11 @@ retry: } ereport(LOG, (errmsg("Connected to remote server :%s success.", conninfo))); +#ifndef ENABLE_MULTIPLE_NODES if (AM_HADR_WAL_RECEIVER) { +#else + if (AM_HADR_WAL_RECEIVER || AM_HADR_CN_WAL_RECEIVER) { +#endif rc = memset_s(conninfoRepl, MAXCONNINFO + 75, 0, MAXCONNINFO + 75); securec_check(rc, "\0", "\0"); } @@ -729,7 +756,7 @@ retry: } #endif - if (IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) { + if (IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE) { if (walrcv->conn_target != REPCONNTARGET_DUMMYSTANDBY && (localTerm == 0 || localTerm > remoteTerm)) { PQclear(res); ha_set_rebuild_connerror(WALSEGMENT_REBUILD, REPL_INFO_ERROR); @@ -997,7 +1024,7 @@ retry: if (AM_HADR_WAL_RECEIVER && slotname != NULL) { rc = strcat_s(slotname, NAMEDATALEN - 1, "_hadr"); securec_check(rc, "", ""); - } else if (IS_SHARED_STORAGE_STANBY_CLUSTER_MODE && slotname != NULL) { + } else if (IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE && slotname != NULL) { rc = strcat_s(slotname, NAMEDATALEN - 1, "_hass"); securec_check(rc, "", ""); } @@ -1019,12 +1046,21 @@ retry: (errmsg("streaming replication successfully connected to primary, the connection is %s, start from %X/%X ", conninfo, (uint32)(*startpoint >> 32), (uint32)(*startpoint)))); + if (IS_DISASTER_RECOVER_MODE && t_thrd.postmaster_cxt.HaShmData->is_hadr_main_standby && localTerm != remoteTerm) { + SetWalSendTermChanged(); + } + if (!t_thrd.walreceiver_cxt.AmWalReceiverForFailover) { SpinLockAcquire(&walrcv->mutex); walrcv->peer_role = remoteMode; SpinLockRelease(&walrcv->mutex); } + volatile HaShmemData *hashmdata = t_thrd.postmaster_cxt.HaShmData; + SpinLockAcquire(&hashmdata->mutex); + hashmdata->disconnect_count[hashmdata->current_repl] = 0; + hashmdata->prev_repl = hashmdata->current_repl; + SpinLockRelease(&hashmdata->mutex); /* * If the streaming replication successfully connected to primary, * then clean the rebuild reason in HaShmData. @@ -1177,11 +1213,9 @@ static PGresult *libpqrcv_PQexec(const char *query) void libpqrcv_check_conninfo(const char *conninfo) { - char *err = NULL; - - PQconninfoOption *opts = PQconninfoParse(conninfo, &err); + PQconninfoOption *opts = PQconninfoParse(conninfo, NULL); if (opts == NULL) { - ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid connection string syntax: %s", err))); + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid connection string syntax"))); } PQconninfoFree(opts); @@ -1310,13 +1344,20 @@ void libpqrcv_send(const char *buffer, int nbytes) PQerrorMessage(t_thrd.libwalreceiver_cxt.streamConn)))); } -bool libpqrcv_command(const char *cmd, char **err) +bool libpqrcv_command(const char *cmd, char **err, int *sqlstate) { PGresult *res = libpqrcv_PQexec(cmd); if (PQresultStatus(res) != PGRES_COMMAND_OK) { PQclear(res); *err = pstrdup(PQerrorMessage(t_thrd.libwalreceiver_cxt.streamConn)); + if (sqlstate != NULL && t_thrd.libwalreceiver_cxt.streamConn != NULL) { + *sqlstate = MAKE_SQLSTATE(t_thrd.libwalreceiver_cxt.streamConn->last_sqlstate[0], + t_thrd.libwalreceiver_cxt.streamConn->last_sqlstate[1], + t_thrd.libwalreceiver_cxt.streamConn->last_sqlstate[2], + t_thrd.libwalreceiver_cxt.streamConn->last_sqlstate[3], + t_thrd.libwalreceiver_cxt.streamConn->last_sqlstate[4]); + } return false; } @@ -1361,7 +1402,7 @@ static void ha_set_conn_channel() return; } if (laddr->sa_family == AF_INET6) { - result = inet_net_ntop(AF_INET6, &((struct sockaddr_in *)laddr)->sin_addr, 128, local_ip, IP_LEN); + result = inet_net_ntop(AF_INET6, &((struct sockaddr_in6 *)laddr)->sin6_addr, 128, local_ip, IP_LEN); if (result == NULL) { ereport(WARNING, (errmsg("inet_net_ntop failed, error: %d", EAFNOSUPPORT))); } @@ -1373,7 +1414,7 @@ static void ha_set_conn_channel() } if (raddr->sa_family == AF_INET6) { - result = inet_net_ntop(AF_INET6, &((struct sockaddr_in *)raddr)->sin_addr, 128, remote_ip, IP_LEN); + result = inet_net_ntop(AF_INET6, &((struct sockaddr_in6 *)raddr)->sin6_addr, 128, remote_ip, IP_LEN); if (result == NULL) { ereport(WARNING, (errmsg("inet_net_ntop failed, error: %d", EAFNOSUPPORT))); } @@ -1389,12 +1430,22 @@ static void ha_set_conn_channel() IP_LEN - 1); securec_check(rc, "", ""); walrcv->conn_channel.localhost[IP_LEN - 1] = '\0'; - walrcv->conn_channel.localport = ntohs(((struct sockaddr_in *)laddr)->sin_port); + if (laddr->sa_family == AF_INET6) { + walrcv->conn_channel.localport = ntohs(((struct sockaddr_in6 *)laddr)->sin6_port); + } else if (laddr->sa_family == AF_INET) { + walrcv->conn_channel.localport = ntohs(((struct sockaddr_in *)laddr)->sin_port); + } + rc = strncpy_s((char *)walrcv->conn_channel.remotehost, sizeof(walrcv->conn_channel.remotehost), remote_ip, IP_LEN - 1); securec_check(rc, "", ""); walrcv->conn_channel.remotehost[IP_LEN - 1] = '\0'; - walrcv->conn_channel.remoteport = ntohs(((struct sockaddr_in *)raddr)->sin_port); + if (raddr->sa_family == AF_INET6) { + walrcv->conn_channel.remoteport = ntohs(((struct sockaddr_in6 *)raddr)->sin6_port); + } else if (raddr->sa_family == AF_INET) { + walrcv->conn_channel.remoteport = ntohs(((struct sockaddr_in *)raddr)->sin_port); + } + SpinLockRelease(&walrcv->mutex); } diff --git a/src/gausskernel/storage/replication/logical/Makefile b/src/gausskernel/storage/replication/logical/Makefile index 0603a668d..176e913a9 100644 --- a/src/gausskernel/storage/replication/logical/Makefile +++ b/src/gausskernel/storage/replication/logical/Makefile @@ -6,6 +6,6 @@ include $(top_builddir)/src/Makefile.global override CPPFLAGS := -I$(srcdir) $(CPPFLAGS) -OBJS = decode.o launcher.o logical.o logicalfuncs.o origin.o proto.o relation.o reorderbuffer.o snapbuild.o worker.o +OBJS = decode.o launcher.o logical.o logicalfuncs.o origin.o proto.o relation.o reorderbuffer.o snapbuild.o worker.o parallel_decode_worker.o parallel_decode.o parallel_reorderbuffer.o logical_queue.o logical_parse.o include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/gausskernel/storage/replication/logical/decode.cpp b/src/gausskernel/storage/replication/logical/decode.cpp index 095e654fd..8d55420e3 100644 --- a/src/gausskernel/storage/replication/logical/decode.cpp +++ b/src/gausskernel/storage/replication/logical/decode.cpp @@ -45,6 +45,9 @@ #include "storage/standby.h" #include "utils/memutils.h" +#include "utils/relfilenodemap.h" +#include "utils/lsyscache.h" + typedef struct XLogRecordBuffer { XLogRecPtr origptr; @@ -56,21 +59,45 @@ typedef struct XLogRecordBuffer { /* RMGR Handlers */ static void DecodeXLogOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); static void DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); +static void AreaDecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); + static void DecodeHeap2Op(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); +static void AreaDecodeHeap2Op(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); + static void DecodeHeap3Op(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); static void DecodeUheapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); +static void AreaDecodeUheapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); + static void DecodeXactOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); +static void AreaDecodeXactOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); + static void DecodeStandbyOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); /* individual record(group)'s handlers */ static void DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); +static void AreaDecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); + static void DecodeUInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); +static void AreaDecodeUInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); + static void DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); +static void AreaDecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); + static void DecodeUUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); +static void AreaDecodeUUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); + static void DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); +static void AreaDecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); + static void DecodeUDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); +static void AreaDecodeUDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); + static void DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); +static void AreaDecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); + static void DecodeUMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); +static void AreaDecodeUMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); + static void DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, TransactionId xid, CommitSeqNo csn, Oid dboid, TimestampTz commit_time, int nsubxacts, TransactionId *sub_xids, int ninval_msgs, SharedInvalidationMessage *msg, xl_xact_origin *origin); @@ -78,8 +105,9 @@ static void DecodeAbort(LogicalDecodingContext *ctx, XLogRecPtr lsn, Transaction int nsubxacts); /* common function to decode tuples */ -static void DecodeXLogTuple(const char *data, Size len, ReorderBufferTupleBuf *tup); -static void DecodeXLogUTuple(const char *data, Size len, ReorderBufferUTupleBuf *tuple); +extern ReorderBufferTXN *ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create, bool *is_new,XLogRecPtr lsn, bool create_as_top); + +extern Pointer UGetXlrec(XLogReaderState * record); Pointer GetXlrec(XLogReaderState *record) { @@ -99,7 +127,7 @@ Pointer UGetXlrec(XLogReaderState *record) return recData; } -static size_t DecodeUndoMeta(const char* data) +size_t DecodeUndoMeta(const char* data) { uint64 info = (*(uint64*)data) & XLOG_UNDOMETA_INFO_SLOT; if (info == 0) { @@ -109,7 +137,7 @@ static size_t DecodeUndoMeta(const char* data) } } -Pointer UGetMultiInsertXlrec(XLogReaderState *record) +Pointer UGetMultiInsertXlrec(XLogReaderState *record, CommitSeqNo* curCSN) { Pointer recData = (Pointer)XLogRecGetData(record); bool isinit = (XLogRecGetInfo(record) & XLOG_UHEAP_INIT_PAGE) != 0; @@ -134,9 +162,47 @@ Pointer UGetMultiInsertXlrec(XLogReaderState *record) if (isinit) { recData += sizeof(TransactionId) + sizeof(uint16); } + if ((record->decoded_record->xl_term & XLOG_CONTAIN_CSN) != 0) { + *curCSN = *(CommitSeqNo *)recData; + recData += sizeof(CommitSeqNo); + } return recData; } +bool IsRecordProcess(XLogReaderState *record) +{ + switch ((RmgrIds)XLogRecGetRmid(record)) { + case RM_SMGR_ID: + case RM_CLOG_ID: + case RM_DBASE_ID: + case RM_TBLSPC_ID: + case RM_MULTIXACT_ID: + case RM_RELMAP_ID: + case RM_BTREE_ID: + case RM_HASH_ID: + case RM_GIN_ID: + case RM_GIST_ID: + case RM_SEQ_ID: + case RM_SPGIST_ID: + case RM_SLOT_ID: + case RM_BARRIER_ID: + case RM_UHEAP2_ID: + case RM_UNDOLOG_ID: + case RM_UHEAPUNDO_ID: + case RM_UNDOACTION_ID: + case RM_UBTREE_ID: + case RM_UBTREE2_ID: + case RM_SEGPAGE_ID: + case RM_REPLORIGIN_ID: +#ifdef ENABLE_MOT + case RM_MOT_ID: +#endif + return false; + default: + return true; + } +} + /* * Take every XLogReadRecord()ed record and perform the actions required to * decode it using the output plugin already setup in the logical decoding @@ -153,6 +219,9 @@ void LogicalDecodingProcessRecord(LogicalDecodingContext *ctx, XLogReaderState * buf.endptr = ctx->reader->EndRecPtr; buf.record = record; buf.record_data = GetXlrec(record); + if (!IsRecordProcess(record)) { + return; + } /* cast so we get a warning when new rmgrs are added */ switch ((RmgrIds)XLogRecGetRmid(record)) { @@ -179,44 +248,49 @@ void LogicalDecodingProcessRecord(LogicalDecodingContext *ctx, XLogReaderState * case RM_HEAP_ID: DecodeHeapOp(ctx, &buf); break; - /* - * Rmgrs irrelevant for logical decoding; they describe stuff not - * represented in logical decoding. Add new rmgrs in rmgrlist.h's - * order. - */ - case RM_SMGR_ID: - case RM_CLOG_ID: - case RM_DBASE_ID: - case RM_TBLSPC_ID: - case RM_MULTIXACT_ID: - case RM_RELMAP_ID: - case RM_BTREE_ID: - case RM_HASH_ID: - case RM_GIN_ID: - case RM_GIST_ID: - case RM_SEQ_ID: - case RM_SPGIST_ID: - case RM_SLOT_ID: - case RM_BARRIER_ID: - case RM_REPLORIGIN_ID: - break; case RM_HEAP3_ID: DecodeHeap3Op(ctx, &buf); break; -#ifdef ENABLE_MOT - case RM_MOT_ID: - break; -#endif case RM_UHEAP_ID: DecodeUheapOp(ctx, &buf); break; - case RM_UHEAP2_ID: - case RM_UNDOLOG_ID: - case RM_UHEAPUNDO_ID: - case RM_UNDOACTION_ID: - case RM_UBTREE_ID: - case RM_UBTREE2_ID: - case RM_SEGPAGE_ID: + default: + break; + } +} + +/* + * Process the record for area decoding. + */ +void AreaLogicalDecodingProcessRecord(LogicalDecodingContext *ctx, XLogReaderState *record) +{ + XLogRecordBuffer buf = { 0, 0, NULL, NULL }; + + buf.origptr = ctx->reader->ReadRecPtr; + buf.endptr = ctx->reader->EndRecPtr; + buf.record = record; + buf.record_data = GetXlrec(record); + if(!IsRecordProcess(record)) { + return; + } + + /* cast so we get a warning when new rmgrs are added */ + switch ((RmgrIds)XLogRecGetRmid(record)) { + case RM_XACT_ID: + AreaDecodeXactOp(ctx, &buf); + break; + case RM_HEAP2_ID: + AreaDecodeHeap2Op(ctx, &buf); + break; + case RM_HEAP_ID: + AreaDecodeHeapOp(ctx, &buf); + break; + case RM_XLOG_ID: + case RM_STANDBY_ID: + case RM_HEAP3_ID: + break; + case RM_UHEAP_ID: + AreaDecodeUheapOp(ctx, &buf); break; default: ereport(WARNING, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), @@ -224,6 +298,7 @@ void LogicalDecodingProcessRecord(LogicalDecodingContext *ctx, XLogReaderState * } } + /* * Handle rmgr XLOG_ID records for DecodeRecordIntoReorderBuffer(). */ @@ -405,6 +480,118 @@ static void DecodeXactOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) } } +static void SetTxnVal(ReorderBufferTXN *txn, TransactionId xid, XLogRecPtr final_lsn, + XLogRecPtr end_lsn, RepOriginId origin_id, CommitSeqNo csn, TimestampTz commit_time) +{ + txn->xid = xid; + txn->final_lsn = final_lsn; + txn->end_lsn = end_lsn; + txn->origin_id = origin_id; + if (csn != InvalidCommitSeqNo) { + txn->csn = csn; + } + if (commit_time != -1) { + txn->commit_time = commit_time; + } +} + +/* + * Handle rmgr XACT_ID records for DecodeRecordIntoReorderBuffer(). + */ +static void AreaDecodeXactOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) +{ + XLogReaderState *r = buf->record; + uint8 info = XLogRecGetInfo(r) & ~XLR_INFO_MASK; + ReorderBuffer *rb = ctx->reorder; + ReorderBufferTXN *txn = (struct ReorderBufferTXN *)palloc(sizeof(struct ReorderBufferTXN)); + if (txn == NULL) + return; + + switch (info) { + case XLOG_XACT_COMMIT: { + xl_xact_commit *xlrec = NULL; + TransactionId *subxacts = NULL; + + xlrec = (xl_xact_commit *)buf->record_data; + + subxacts = (TransactionId *)&(xlrec->xnodes[xlrec->nrels]); + for (int nxact = 0; nxact < xlrec->nsubxacts; nxact++) { + TransactionId subxid = subxacts[nxact]; + SetTxnVal(txn, subxid, buf->origptr, buf->endptr, XLogRecGetOrigin(buf->record), xlrec->csn, xlrec->xact_time); + rb->commit(rb, txn, buf->origptr); + } + + TransactionId xid = XLogRecGetXid(r); + SetTxnVal(txn, xid, buf->origptr, buf->endptr, XLogRecGetOrigin(buf->record), xlrec->csn, xlrec->xact_time); + rb->commit(rb, txn, buf->origptr); + + break; + } + case XLOG_XACT_COMMIT_PREPARED: { + xl_xact_commit_prepared *prec = NULL; + xl_xact_commit *xlrec = NULL; + TransactionId *subxacts = NULL; + + /* Prepared commits contain a normal commit record... */ + prec = (xl_xact_commit_prepared *)buf->record_data; + xlrec = &prec->crec; + + subxacts = (TransactionId *)&(xlrec->xnodes[xlrec->nrels]); + + TransactionId xid = prec->xid; + SetTxnVal(txn, xid, buf->origptr, buf->endptr, XLogRecGetOrigin(buf->record), xlrec->csn, xlrec->xact_time); + rb->commit(rb, txn, buf->origptr); + + break; + } + case XLOG_XACT_COMMIT_COMPACT: { + xl_xact_commit_compact *xlrec = NULL; + xlrec = (xl_xact_commit_compact *)buf->record_data; + TransactionId xid = XLogRecGetXid(r); + SetTxnVal(txn, xid, buf->origptr, buf->endptr, XLogRecGetOrigin(buf->record), xlrec->csn, xlrec->xact_time); + rb->commit(rb, txn, buf->origptr); + break; + } + case XLOG_XACT_ABORT: { + TransactionId xid = XLogRecGetXid(r); + xl_xact_abort *xlrec = NULL; + xlrec = (xl_xact_abort *)buf->record_data; + SetTxnVal(txn, xid, buf->origptr, buf->endptr, XLogRecGetOrigin(buf->record), InvalidCommitSeqNo, -1); + rb->abort(rb, txn); + break; + } + case XLOG_XACT_ABORT_WITH_XID: { + xl_xact_abort *xlrec = NULL; + xlrec = (xl_xact_abort *)buf->record_data; + TransactionId xid = XLogRecGetXid(r); + xid = *(TransactionId *)((char*)&(xlrec->xnodes[xlrec->nrels]) + + (unsigned)(xlrec->nsubxacts) * sizeof(TransactionId)); + SetTxnVal(txn, xid, buf->origptr, buf->endptr, XLogRecGetOrigin(buf->record), InvalidCommitSeqNo, -1); + rb->abort(rb, txn); + + break; + } + case XLOG_XACT_ABORT_PREPARED: { + xl_xact_abort_prepared *prec = NULL; + prec = (xl_xact_abort_prepared *)buf->record_data; + TransactionId xid = prec->xid; + SetTxnVal(txn, xid, buf->origptr, buf->endptr, XLogRecGetOrigin(buf->record), InvalidCommitSeqNo, -1); + rb->abort(rb, txn); + break; + } + case XLOG_XACT_ASSIGNMENT: + break; + case XLOG_XACT_PREPARE: + SetTxnVal(txn, XLogRecGetXid(r), buf->origptr, buf->endptr, XLogRecGetOrigin(buf->record), InvalidCommitSeqNo, -1); + rb->prepare(rb,txn); + break; + default: + ereport(WARNING, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unexpected RM_XACT_ID record type: %u", info))); + } +} + + /* * Handle rmgr STANDBY_ID records for DecodeRecordIntoReorderBuffer(). */ @@ -434,10 +621,9 @@ static void DecodeStandbyOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) } break; case XLOG_STANDBY_LOCK: case XLOG_STANDBY_CSN: -#ifndef ENABLE_MULTIPLE_NODES case XLOG_STANDBY_CSN_COMMITTING: case XLOG_STANDBY_CSN_ABORTED: -#endif + break; default: ereport(WARNING, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), @@ -445,6 +631,46 @@ static void DecodeStandbyOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) } } +/* + * Decode multi insert for area decoding. + */ +static void AreaDecodeHeap2Op(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) +{ + uint8 info = XLogRecGetInfo(buf->record) & XLOG_HEAP_OPMASK; + TransactionId xid = XLogRecGetXid(buf->record); + ctx->write_xid = xid; + + switch (info) { + case XLOG_HEAP2_MULTI_INSERT: + AreaDecodeMultiInsert(ctx, buf); + break; + case XLOG_HEAP2_FREEZE: + /* + * Although these records only exist to serve the needs of logical + * decoding, all the work happens as part of crash or archive + * recovery, so we don't need to do anything here. + */ + break; + /* + * Everything else here is just low level physical stuff we're + * not interested in. + */ + case XLOG_HEAP2_CLEAN: + case XLOG_HEAP2_CLEANUP_INFO: + case XLOG_HEAP2_VISIBLE: + case XLOG_HEAP2_LOGICAL_NEWPAGE: + case XLOG_HEAP2_BCM: + case XLOG_HEAP2_PAGE_UPGRADE: + + break; + default: + ereport(WARNING, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unexpected RM_HEAP2_ID record type: %u", info))); + } +} + + + /* * Handle rmgr HEAP2_ID records for DecodeRecordIntoReorderBuffer(). */ @@ -484,7 +710,6 @@ static void DecodeHeap2Op(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) case XLOG_HEAP2_VISIBLE: case XLOG_HEAP2_LOGICAL_NEWPAGE: case XLOG_HEAP2_BCM: - case XLOG_HEAP2_PAGE_UPGRADE: break; default: @@ -611,6 +836,59 @@ static void DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) } } +/* + * Handle rmgr HEAP_ID records for area decoding. + */ +static void AreaDecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) +{ + uint8 info = XLogRecGetInfo(buf->record) & XLOG_HEAP_OPMASK; + TransactionId xid = XLogRecGetXid(buf->record); + ctx->write_xid = xid; + + switch (info) { + case XLOG_HEAP_INSERT: + AreaDecodeInsert(ctx, buf); + break; + + /* + * Treat HOT update as normal updates. There is no useful + * information in the fact that we could make it a HOT update + * locally and the WAL layout is compatible. + */ + case XLOG_HEAP_HOT_UPDATE: + case XLOG_HEAP_UPDATE: + AreaDecodeUpdate(ctx, buf); + break; + + case XLOG_HEAP_DELETE: + AreaDecodeDelete(ctx, buf); + break; + + case XLOG_HEAP_NEWPAGE: + /* + * This is only used in places like indexams and CLUSTER which + * don't contain changes relevant for logical replication. + */ + break; + + case XLOG_HEAP_INPLACE: + break; + + case XLOG_HEAP_LOCK: + /* we don't care about row level locks for now */ + break; + + case XLOG_HEAP_BASE_SHIFT: + break; + + default: + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unexpected RM_HEAP_ID record type: %u", info))); + break; + } +} + + static void DecodeUheapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) { uint8 info = XLogRecGetInfo(buf->record) & XLOG_UHEAP_OPMASK; @@ -662,14 +940,109 @@ static void DecodeUheapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) } } -static inline bool FilterByOrigin(LogicalDecodingContext *ctx, RepOriginId origin_id) +static void AreaDecodeUheapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) { + uint8 info = XLogRecGetInfo(buf->record) & XLOG_UHEAP_OPMASK; + TransactionId xid = XLogRecGetXid(buf->record); + ctx->write_xid = xid; + switch (info) { + case XLOG_UHEAP_INSERT: + AreaDecodeUInsert(ctx, buf); + break; + case XLOG_UHEAP_UPDATE: + AreaDecodeUUpdate(ctx, buf); + break; + + case XLOG_UHEAP_DELETE: + AreaDecodeUDelete(ctx, buf); + break; + + case XLOG_UHEAP_FREEZE_TD_SLOT: + case XLOG_UHEAP_INVALID_TD_SLOT: + case XLOG_UHEAP_CLEAN: + break; + + case XLOG_UHEAP_MULTI_INSERT: + AreaDecodeUMultiInsert(ctx, buf); + break; + default: + ereport(WARNING, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unexpected RM_UHEAP_ID record type: %u", info))); + break; + } +} + + + +bool FilterByOrigin(LogicalDecodingContext *ctx, RepOriginId origin_id) +{ + const int toastMask = (1 << 8) - 1; + origin_id &= toastMask; + if (ctx->callbacks.filter_by_origin_cb == NULL) return false; return filter_by_origin_cb_wrapper(ctx, origin_id); } +static void AreaDecodingChange(ReorderBufferChange *change, LogicalDecodingContext *ctx, XLogRecordBuffer *buf) +{ + change->lsn = buf->origptr; + + Relation relation = NULL; + Oid reloid; + Oid partitionReltoastrelid = InvalidOid;; + + reloid = RelidByRelfilenode(change->data.tp.relnode.spcNode, change->data.tp.relnode.relNode, false); + if (reloid == InvalidOid) { + reloid = PartitionRelidByRelfilenode(change->data.tp.relnode.spcNode, + change->data.tp.relnode.relNode, partitionReltoastrelid, NULL, false); + } + /* + * Catalog tuple without data, emitted while catalog was + * in the process of being rewritten. + */ + if (reloid == InvalidOid && change->data.tp.newtuple == NULL && change->data.tp.oldtuple == NULL) + return; + else if (reloid == InvalidOid) { + /* + * When we try to decode a table who is already dropped. + * Maybe we could not find it relnode.In this time, we will undecode this log. + * It will be solve when we use MVCC. + */ + ereport(DEBUG1, (errmsg("could not lookup relation %s", + relpathperm(change->data.tp.relnode, MAIN_FORKNUM)))); + return; + } + + relation = RelationIdGetRelation(reloid); + if (relation == NULL) { + ereport(DEBUG1, (errmsg("could open relation descriptor %s", + relpathperm(change->data.tp.relnode, MAIN_FORKNUM)))); + return; + } + + if (CSTORE_NAMESPACE == get_rel_namespace(RelationGetRelid(relation))) { + return; + } + + if (RelationIsLogicallyLogged(relation)) { + /* + * For now ignore sequence changes entirely. Most of + * the time they don't log changes using records we + * understand, so it doesn't make sense to handle the + * few cases we do. + */ + if (relation->rd_rel->relkind != RELKIND_SEQUENCE && + !IsToastRelation(relation)) { + ctx->reorder->apply_change(ctx->reorder, NULL, relation, change); + } + } + RelationClose(relation); +} + + + /* * Consolidated commit record handling between the different form of commit * records. @@ -768,11 +1141,14 @@ static void DecodeAbort(LogicalDecodingContext *ctx, XLogRecPtr lsn, Transaction static void DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) { XLogReaderState *r = buf->record; - xl_heap_insert *xlrec = NULL; - ReorderBufferChange *change = NULL; - RelFileNode target_node; - xlrec = (xl_heap_insert *)GetXlrec(r); - int rc = 0; + RelFileNode target_node = {0, 0, 0, 0}; + xl_heap_insert *xlrec = (xl_heap_insert *)GetXlrec(r); + CommitSeqNo curCSN = InvalidCommitSeqNo; + bool hasCSN = (r->decoded_record->xl_term & XLOG_CONTAIN_CSN) == 0 ? false : true; + if (hasCSN) { + curCSN = *(CommitSeqNo *)((char *)xlrec + SizeOfHeapInsert); + } + /* only interested in our database */ Size tuplelen; char *tupledata = XLogRecGetBlockData(r, 0, &tuplelen); @@ -787,23 +1163,69 @@ static void DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) return; - change = ReorderBufferGetChange(ctx->reorder); + ReorderBufferChange *change = ReorderBufferGetChange(ctx->reorder); change->action = REORDER_BUFFER_CHANGE_INSERT; change->origin_id = XLogRecGetOrigin(r); - rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &target_node, sizeof(RelFileNode)); + errno_t rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &target_node, sizeof(RelFileNode)); securec_check(rc, "\0", "\0"); if (xlrec->flags & XLH_INSERT_CONTAINS_NEW_TUPLE) { change->data.tp.newtuple = ReorderBufferGetTupleBuf(ctx->reorder, tuplelen); - DecodeXLogTuple(tupledata, tuplelen, change->data.tp.newtuple); + DecodeXLogTuple(tupledata, tuplelen, change->data.tp.newtuple, true); } + change->data.tp.snapshotcsn = curCSN; change->data.tp.clear_toast_afterwards = true; ReorderBufferQueueChange(ctx->reorder, XLogRecGetXid(r), buf->origptr, change); } +/* + * Decode insert sql for area decoding. + * + */ +static void AreaDecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) +{ + XLogReaderState *r = buf->record; + RelFileNode target_node = {0, 0, 0, 0}; + xl_heap_insert *xlrec = (xl_heap_insert *)GetXlrec(r); + CommitSeqNo curCSN = InvalidCommitSeqNo; + bool hasCSN = (r->decoded_record->xl_term & XLOG_CONTAIN_CSN) == 0 ? false : true; + if (hasCSN) { + curCSN = *(CommitSeqNo *)((char *)xlrec + SizeOfHeapInsert); + } + Size tuplelen; + char *tupledata = XLogRecGetBlockData(r, 0, &tuplelen); + if (tuplelen == 0 && !AllocSizeIsValid(tuplelen)) { + ereport(WARNING, (errmsg("tuplelen is invalid(%lu), don't decode it", tuplelen))); + return; + } + XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL); + /* output plugin doesn't look for this origin, no need to queue */ + if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) { + return; + } + + ReorderBufferChange *change = ReorderBufferGetChange(ctx->reorder); + change->action = REORDER_BUFFER_CHANGE_INSERT; + change->origin_id = XLogRecGetOrigin(r); + + errno_t rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &target_node, sizeof(RelFileNode)); + securec_check(rc, "\0", "\0"); + + if (xlrec->flags & XLH_INSERT_CONTAINS_NEW_TUPLE) { + change->data.tp.newtuple = ReorderBufferGetTupleBuf(ctx->reorder, tuplelen); + + DecodeXLogTuple(tupledata, tuplelen, change->data.tp.newtuple, true); + } + + change->data.tp.snapshotcsn = curCSN; + change->data.tp.clear_toast_afterwards = true; + AreaDecodingChange(change, ctx, buf); +} + + /* * Parse XLOG_HEAP_UINSERT (not MULTI_UINSERT!) records into tuplebufs. */ @@ -812,6 +1234,11 @@ static void DecodeUInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) XLogReaderState *r = buf->record; RelFileNode targetNode = {0, 0, 0, 0}; XlUHeapInsert *xlrec = (XlUHeapInsert *)UGetXlrec(r); + CommitSeqNo curCSN = InvalidCommitSeqNo; + bool hasCSN = (r->decoded_record->xl_term & XLOG_CONTAIN_CSN) == 0 ? false : true; + if (hasCSN) { + curCSN = *(CommitSeqNo *)((char *)xlrec + SizeOfUHeapInsert); + } Size tuplelen = 0; char *tupledata = XLogRecGetBlockData(r, 0, &tuplelen); if (tuplelen == 0 && !AllocSizeIsValid(tuplelen)) { @@ -829,20 +1256,60 @@ static void DecodeUInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) ReorderBufferChange *change = ReorderBufferGetChange(ctx->reorder); change->action = REORDER_BUFFER_CHANGE_UINSERT; change->origin_id = XLogRecGetOrigin(r); - int rc = memcpy_s(&change->data.utp.relnode, sizeof(RelFileNode), &targetNode, sizeof(RelFileNode)); + errno_t rc = memcpy_s(&change->data.utp.relnode, sizeof(RelFileNode), &targetNode, sizeof(RelFileNode)); securec_check(rc, "\0", "\0"); if (xlrec->flags & XLOG_UHEAP_CONTAINS_NEW_TUPLE) { change->data.utp.newtuple = ReorderBufferGetUTupleBuf(ctx->reorder, tuplelen); - DecodeXLogUTuple(tupledata, tuplelen, change->data.utp.newtuple); + DecodeXLogTuple(tupledata, tuplelen, (ReorderBufferTupleBuf *)change->data.utp.newtuple, false); } + change->data.utp.snapshotcsn = curCSN; change->data.tp.clear_toast_afterwards = true; - ereport(LOG, (errmsg("DecodeUInsert: cur xid = %lu", UHeapXlogGetCurrentXid(r)))); - TransactionId tid = UHeapXlogGetCurrentXid(r); - ReorderBufferQueueChange(ctx->reorder, tid, buf->origptr, change); + ReorderBufferQueueChange(ctx->reorder, UHeapXlogGetCurrentXid(r, hasCSN), buf->origptr, change); } +/* + * Parse XLOG_HEAP_UINSERT (not MULTI_UINSERT!) records into tuplebufs for area decoding. + */ +static void AreaDecodeUInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) +{ + RelFileNode targetNode = {0, 0, 0, 0}; + XLogReaderState *r = buf->record; + XlUHeapInsert *xlrec = (XlUHeapInsert *)UGetXlrec(r); + CommitSeqNo curCSN = InvalidCommitSeqNo; + bool hasCSN = (r->decoded_record->xl_term & XLOG_CONTAIN_CSN) == 0 ? false : true; + if (hasCSN) { + curCSN = *(CommitSeqNo *)((char *)xlrec + SizeOfUHeapInsert); + } + Size tuplelen = 0; + char *tupledata = XLogRecGetBlockData(r, 0, &tuplelen); + + if (tuplelen == 0 && !AllocSizeIsValid(tuplelen)) { + ereport(WARNING, (errmsg("tuplelen is invalid(%lu), don't decode it", tuplelen))); + return; + } + XLogRecGetBlockTag(r, 0, &targetNode, NULL, NULL); + + /* output plugin doesn't look for this origin, no need to queue */ + if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) { + return; + } + ReorderBufferChange *change = ReorderBufferGetChange(ctx->reorder); + change->action = REORDER_BUFFER_CHANGE_UINSERT; + change->origin_id = XLogRecGetOrigin(r); + int rc = memcpy_s(&change->data.utp.relnode, sizeof(RelFileNode), &targetNode, sizeof(RelFileNode)); + securec_check(rc, "\0", "\0"); + if (xlrec->flags & XLOG_UHEAP_CONTAINS_NEW_TUPLE) { + change->data.utp.newtuple = ReorderBufferGetUTupleBuf(ctx->reorder, tuplelen); + DecodeXLogTuple(tupledata, tuplelen, (ReorderBufferTupleBuf *)change->data.utp.newtuple, false); + } + change->data.utp.snapshotcsn = curCSN; + change->data.utp.clear_toast_afterwards = true; + AreaDecodingChange(change, ctx, buf); +} + + /* * Parse XLOG_HEAP_UPDATE and XLOG_HEAP_HOT_UPDATE, which have the same layout * in the record, from wal into proper tuplebufs. @@ -852,83 +1319,157 @@ static void DecodeUInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) static void DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) { XLogReaderState *r = buf->record; - xl_heap_update *xlrec = NULL; - ReorderBufferChange *change = NULL; - RelFileNode target_node; - xlrec = (xl_heap_update *)GetXlrec(r); - int rc = 0; - XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL); - /* only interested in our database */ - if (target_node.dbNode != ctx->slot->data.database) - return; - Size datalen_new; - Size tuplelen_new; - char *data_new = NULL; - data_new = XLogRecGetBlockData(r, 0, &datalen_new); - tuplelen_new = datalen_new - SizeOfHeapHeader; - if (tuplelen_new == 0 && !AllocSizeIsValid(tuplelen_new)) { - ereport(WARNING, (errmsg("tuplelen is invalid(%lu), tuplelen, don't decode it", tuplelen_new))); - return; - } - Size datalen_old; - Size tuplelen_old; - char *data_old = NULL; - Size heapUpdateSize; + RelFileNode target_node = {0, 0, 0, 0}; + xl_heap_update *xlrec = (xl_heap_update *)GetXlrec(r); + CommitSeqNo curCSN = InvalidCommitSeqNo; + Size heapUpdateSize = 0; if ((XLogRecGetInfo(r) & XLOG_TUPLE_LOCK_UPGRADE_FLAG) == 0) { heapUpdateSize = SizeOfOldHeapUpdate; } else { heapUpdateSize = SizeOfHeapUpdate; } + bool hasCSN = (r->decoded_record->xl_term & XLOG_CONTAIN_CSN) == 0 ? false : true; + if (hasCSN) { + curCSN = *(CommitSeqNo *)((char *)xlrec + heapUpdateSize); + } + + XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL); + /* only interested in our database */ + if (target_node.dbNode != ctx->slot->data.database) { + return; + } + Size datalen_new = 0; + char *data_new = XLogRecGetBlockData(r, 0, &datalen_new); + Size tuplelen_new = datalen_new - SizeOfHeapHeader; + if (tuplelen_new == 0 && !AllocSizeIsValid(tuplelen_new)) { + ereport(WARNING, (errmsg("tuplelen is invalid(%lu), tuplelen, don't decode it", tuplelen_new))); + return; + } + Size datalen_old = 0; /* adapt 64 xid, if this tuple is the first tuple of a new page */ bool is_init = (XLogRecGetInfo(r) & XLOG_HEAP_INIT_PAGE) != 0; /* caution, remaining data in record is not aligned */ - data_old = (char *)xlrec + heapUpdateSize; + char *data_old = (char *)xlrec + heapUpdateSize + (hasCSN ? sizeof(CommitSeqNo) : 0); if (is_init) { datalen_old = XLogRecGetDataLen(r) - heapUpdateSize - sizeof(TransactionId); } else { datalen_old = XLogRecGetDataLen(r) - heapUpdateSize; } - tuplelen_old = datalen_old - SizeOfHeapHeader; + datalen_old -= hasCSN ? sizeof(CommitSeqNo) : 0; + Size tuplelen_old = datalen_old - SizeOfHeapHeader; if (tuplelen_old == 0 && !AllocSizeIsValid(tuplelen_old)) { - ereport(WARNING, (errmsg("tuplelen is invalid(%lu), tuplelen, don't decode it", tuplelen_old))); + ereport(WARNING, (errmsg("tuplelen is invalid(%lu), don't decode it", tuplelen_old))); return; } /* output plugin doesn't look for this origin, no need to queue */ - if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) + if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) { return; + } - change = ReorderBufferGetChange(ctx->reorder); + ReorderBufferChange *change = ReorderBufferGetChange(ctx->reorder); change->action = REORDER_BUFFER_CHANGE_UPDATE; change->origin_id = XLogRecGetOrigin(r); - rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &target_node, sizeof(RelFileNode)); + errno_t rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &target_node, sizeof(RelFileNode)); securec_check(rc, "\0", "\0"); if (xlrec->flags & XLH_UPDATE_CONTAINS_NEW_TUPLE) { change->data.tp.newtuple = ReorderBufferGetTupleBuf(ctx->reorder, tuplelen_new); - DecodeXLogTuple(data_new, datalen_new, change->data.tp.newtuple); + DecodeXLogTuple(data_new, datalen_new, change->data.tp.newtuple, true); } if (xlrec->flags & XLH_UPDATE_CONTAINS_OLD) { change->data.tp.oldtuple = ReorderBufferGetTupleBuf(ctx->reorder, tuplelen_old); - DecodeXLogTuple(data_old, datalen_old, change->data.tp.oldtuple); + DecodeXLogTuple(data_old, datalen_old, change->data.tp.oldtuple, true); } + change->data.tp.snapshotcsn = curCSN; change->data.tp.clear_toast_afterwards = true; ReorderBufferQueueChange(ctx->reorder, XLogRecGetXid(r), buf->origptr, change); } +/* + * Decode update sql for area decoding. + * + */ +static void AreaDecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) +{ + XLogReaderState *r = buf->record; + RelFileNode target_node = {0, 0, 0, 0}; + xl_heap_update *xlrec = (xl_heap_update *)GetXlrec(r); + CommitSeqNo curCSN = InvalidCommitSeqNo; + Size heapUpdateSize = 0; + if ((XLogRecGetInfo(r) & XLOG_TUPLE_LOCK_UPGRADE_FLAG) == 0) { + heapUpdateSize = SizeOfOldHeapUpdate; + } else { + heapUpdateSize = SizeOfHeapUpdate; + } + bool hasCSN = (r->decoded_record->xl_term & XLOG_CONTAIN_CSN) == 0 ? false : true; + if (hasCSN) { + curCSN = *(CommitSeqNo *)((char *)xlrec + heapUpdateSize); + } + XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL); + Size datalen_new = 0; + char *data_new = XLogRecGetBlockData(r, 0, &datalen_new); + Size tuplelen_new = datalen_new - SizeOfHeapHeader; + if (tuplelen_new == 0 && !AllocSizeIsValid(tuplelen_new)) { + ereport(WARNING, (errmsg("tuplelen is invalid(%lu), don't decode it", tuplelen_new))); + return; + } + Size datalen_old = 0; + + /* adapt 64 xid, if this tuple is the first tuple of a new page */ + bool is_init = (XLogRecGetInfo(r) & XLOG_HEAP_INIT_PAGE) != 0; + /* caution, remaining data in record is not aligned */ + char *data_old = (char *)xlrec + heapUpdateSize + (hasCSN ? sizeof(CommitSeqNo) : 0); + if (is_init) { + datalen_old = XLogRecGetDataLen(r) - heapUpdateSize - sizeof(TransactionId); + } else { + datalen_old = XLogRecGetDataLen(r) - heapUpdateSize; + } + datalen_old -= hasCSN ? sizeof(CommitSeqNo) : 0; + Size tuplelen_old = datalen_old - SizeOfHeapHeader; + if (tuplelen_old == 0 && !AllocSizeIsValid(tuplelen_old)) { + ereport(WARNING, (errmsg("tuplelen is invalid(%lu), tuplelen, don't decode it", tuplelen_old))); + return; + } + + /* output plugin doesn't look for this origin, no need to queue */ + if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) { + return; + } + + ReorderBufferChange *change = ReorderBufferGetChange(ctx->reorder); + change->action = REORDER_BUFFER_CHANGE_UPDATE; + change->origin_id = XLogRecGetOrigin(r); + errno_t rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &target_node, sizeof(RelFileNode)); + securec_check(rc, "\0", "\0"); + if (xlrec->flags & XLH_UPDATE_CONTAINS_OLD) { + change->data.tp.oldtuple = ReorderBufferGetTupleBuf(ctx->reorder, tuplelen_old); + DecodeXLogTuple(data_old, datalen_old, change->data.tp.oldtuple, true); + } + if (xlrec->flags & XLH_UPDATE_CONTAINS_NEW_TUPLE) { + change->data.tp.newtuple = ReorderBufferGetTupleBuf(ctx->reorder, tuplelen_new); + DecodeXLogTuple(data_new, datalen_new, change->data.tp.newtuple, true); + } + change->data.tp.snapshotcsn = curCSN; + change->data.tp.clear_toast_afterwards = true; + AreaDecodingChange(change, ctx, buf); +} + + + /* * Filter out records that we don't need to decode. */ -static bool FilterRecord(LogicalDecodingContext *ctx, XLogReaderState *r, uint8 flags, RelFileNode* rnode) +bool FilterRecord(LogicalDecodingContext *ctx, XLogReaderState *r, uint8 flags, RelFileNode* rnode) { if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) { return true; } if (((flags & XLZ_UPDATE_PREFIX_FROM_OLD) != 0) || ((flags & XLZ_UPDATE_SUFFIX_FROM_OLD) != 0)) { - ereport(WARNING, (errmsg("update tuple has affix, don't decode it"))); + ereport(LOG, (errmsg("update tuple has affix, don't decode it"))); return true; } XLogRecGetBlockTag(r, 0, rnode, NULL, NULL); @@ -938,7 +1479,7 @@ static bool FilterRecord(LogicalDecodingContext *ctx, XLogReaderState *r, uint8 return false; } -static void UpdateUndoBody(Size* addLenPtr, uint8 flag) +void UpdateUndoBody(Size* addLenPtr, uint8 flag) { if ((flag & XLOG_UNDO_HEADER_HAS_SUB_XACT) != 0) { *addLenPtr += sizeof(bool); @@ -959,7 +1500,7 @@ static void UpdateUndoBody(Size* addLenPtr, uint8 flag) /* * Calc the length of old tuples in XLOG_UHEAP_UPDATEs. */ -static void UpdateOldTupleCalc(bool isInplaceUpdate, XLogReaderState *r, char **tupleOld, Size *tuplelenOld) +void UpdateOldTupleCalc(bool isInplaceUpdate, XLogReaderState *r, char **tupleOld, Size *tuplelenOld) { XlUndoHeader *xlundohdr = (XlUndoHeader *)(*tupleOld); Size addLen = SizeOfXLUndoHeader; @@ -1000,6 +1541,11 @@ static void DecodeUUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) XLogReaderState *r = buf->record; RelFileNode targetNode = {0, 0, 0, 0}; XlUHeapUpdate *xlrec = (XlUHeapUpdate *)UGetXlrec(r); + CommitSeqNo curCSN = InvalidCommitSeqNo; + bool hasCSN = (r->decoded_record->xl_term & XLOG_CONTAIN_CSN) == 0 ? false: true; + if (hasCSN) { + curCSN = *(CommitSeqNo *)((char *)xlrec + SizeOfUHeapUpdate); + } bool isInplaceUpdate = (xlrec->flags & XLZ_NON_INPLACE_UPDATE) == 0; if (FilterRecord(ctx, r, xlrec->flags, &targetNode)) { return; @@ -1012,8 +1558,8 @@ static void DecodeUUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) return; } - Size tuplelenOld = XLogRecGetDataLen(r) - SizeOfUHeapUpdate; - char *dataOld = (char *)xlrec + SizeOfUHeapUpdate; + Size tuplelenOld = XLogRecGetDataLen(r) - SizeOfUHeapUpdate - (hasCSN ? sizeof(CommitSeqNo) : 0); + char *dataOld = (char *)xlrec + SizeOfUHeapUpdate + (hasCSN ? sizeof(CommitSeqNo) : 0); UpdateOldTupleCalc(isInplaceUpdate, r, &dataOld, &tuplelenOld); if (tuplelenOld == 0 && !AllocSizeIsValid(tuplelenOld)) { @@ -1024,30 +1570,88 @@ static void DecodeUUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) ReorderBufferChange *change = ReorderBufferGetChange(ctx->reorder); change->action = REORDER_BUFFER_CHANGE_UUPDATE; change->origin_id = XLogRecGetOrigin(r); - int rc = memcpy_s(&change->data.utp.relnode, sizeof(RelFileNode), &targetNode, sizeof(RelFileNode)); + errno_t rc = memcpy_s(&change->data.utp.relnode, sizeof(RelFileNode), &targetNode, sizeof(RelFileNode)); securec_check(rc, "\0", "\0"); change->data.utp.newtuple = ReorderBufferGetUTupleBuf(ctx->reorder, datalenNew); - DecodeXLogUTuple(dataNew, datalenNew, change->data.utp.newtuple); + DecodeXLogTuple(dataNew, datalenNew, (ReorderBufferTupleBuf *)change->data.utp.newtuple, false); if (xlrec->flags & XLZ_HAS_UPDATE_UNDOTUPLE) { change->data.utp.oldtuple = ReorderBufferGetUTupleBuf(ctx->reorder, tuplelenOld); if (!isInplaceUpdate) { - DecodeXLogUTuple(dataOld, tuplelenOld, change->data.utp.oldtuple); + DecodeXLogTuple(dataOld, tuplelenOld, (ReorderBufferTupleBuf *)change->data.utp.oldtuple, false); } else if ((xlrec->flags & XLOG_UHEAP_CONTAINS_OLD_HEADER) != 0) { int undoXorDeltaSize = *(int *)dataOld; dataOld += sizeof(int) + undoXorDeltaSize; tuplelenOld -= sizeof(int) + undoXorDeltaSize; - DecodeXLogUTuple(dataOld, tuplelenOld, change->data.utp.oldtuple); + DecodeXLogTuple(dataOld, tuplelenOld, (ReorderBufferTupleBuf *)change->data.utp.oldtuple, false); } else { ereport(LOG, (errmsg("current tuple is not fully logged, don't decode it"))); return; } } + change->data.utp.snapshotcsn = curCSN; change->data.utp.clear_toast_afterwards = true; - ReorderBufferQueueChange(ctx->reorder, UHeapXlogGetCurrentXid(r), buf->origptr, change); + ReorderBufferQueueChange(ctx->reorder, UHeapXlogGetCurrentXid(r, hasCSN), buf->origptr, change); } +static void AreaDecodeUUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) +{ + XLogReaderState *r = buf->record; + RelFileNode targetNode = {0, 0, 0, 0}; + XlUHeapUpdate *xlrec = (XlUHeapUpdate *)UGetXlrec(r); + CommitSeqNo curCSN = InvalidCommitSeqNo; + XLogRecGetBlockTag(r, 0, &targetNode, NULL, NULL); + bool hasCSN = (r->decoded_record->xl_term & XLOG_CONTAIN_CSN) == 0 ? false: true; + if (hasCSN) { + curCSN = *(CommitSeqNo *)((char *)xlrec + SizeOfUHeapUpdate); + } + bool isInplaceUpdate = (xlrec->flags & XLZ_NON_INPLACE_UPDATE) == 0; + Size tuplelenOld = XLogRecGetDataLen(r) - SizeOfUHeapUpdate - (hasCSN ? sizeof(CommitSeqNo) : 0); + char *dataOld = (char *)xlrec + SizeOfUHeapUpdate + (hasCSN ? sizeof(CommitSeqNo) : 0); + UpdateOldTupleCalc(isInplaceUpdate, r, &dataOld, &tuplelenOld); + if (tuplelenOld == 0 && !AllocSizeIsValid(tuplelenOld)) { + ereport(WARNING, (errmsg("tuplelen is invalid(%lu), don't decode it", tuplelenOld))); + return; + } + + Size datalenNew = 0; + char *dataNew = XLogRecGetBlockData(r, 0, &datalenNew); + if (datalenNew == 0 && !AllocSizeIsValid(datalenNew)) { + ereport(WARNING, (errmsg("tuplelen is invalid(%lu), don't decode it", datalenNew))); + return; + } + + ReorderBufferChange *change = ReorderBufferGetChange(ctx->reorder); + change->action = REORDER_BUFFER_CHANGE_UUPDATE; + change->origin_id = XLogRecGetOrigin(r); + int ret = memcpy_s(&change->data.utp.relnode, sizeof(RelFileNode), &targetNode, sizeof(RelFileNode)); + securec_check(ret, "\0", "\0"); + change->data.utp.newtuple = ReorderBufferGetUTupleBuf(ctx->reorder, datalenNew); + + DecodeXLogTuple(dataNew, datalenNew, (ReorderBufferTupleBuf *)change->data.utp.newtuple, false); + if (xlrec->flags & XLZ_HAS_UPDATE_UNDOTUPLE) { + change->data.utp.oldtuple = ReorderBufferGetUTupleBuf(ctx->reorder, tuplelenOld); + if (!isInplaceUpdate) { + DecodeXLogTuple(dataOld, tuplelenOld, (ReorderBufferTupleBuf *)change->data.utp.oldtuple, false); + } else if ((xlrec->flags & XLOG_UHEAP_CONTAINS_OLD_HEADER) != 0) { + int undoXorDeltaSize = *(int *)dataOld; + dataOld += sizeof(int) + undoXorDeltaSize; + tuplelenOld -= sizeof(int) + undoXorDeltaSize; + DecodeXLogTuple(dataOld, tuplelenOld, (ReorderBufferTupleBuf *)change->data.utp.oldtuple, false); + } else { + ereport(WARNING, (errmsg("current tuple is not fully logged, don't decode it"))); + return; + } + + } + change->data.utp.snapshotcsn = curCSN; + change->data.utp.clear_toast_afterwards = true; + AreaDecodingChange(change, ctx, buf); +} + + + /* * Parse XLOG_HEAP_DELETE from wal into proper tuplebufs. * @@ -1056,10 +1660,8 @@ static void DecodeUUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) static void DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) { XLogReaderState *r = buf->record; - xl_heap_delete *xlrec = NULL; - ReorderBufferChange *change = NULL; - RelFileNode target_node; - int rc = 0; + RelFileNode target_node = {0, 0, 0, 0}; + xl_heap_delete *xlrec = (xl_heap_delete *)GetXlrec(r); Size heapDeleteSize; if ((XLogRecGetInfo(r) & XLOG_TUPLE_LOCK_UPGRADE_FLAG) == 0) { heapDeleteSize = SizeOfOldHeapDelete; @@ -1067,24 +1669,31 @@ static void DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) heapDeleteSize = SizeOfHeapDelete; } - xlrec = (xl_heap_delete *)GetXlrec(r); + CommitSeqNo curCSN = InvalidCommitSeqNo; + bool hasCSN = (r->decoded_record->xl_term & XLOG_CONTAIN_CSN) == 0 ? false: true; + if (hasCSN) { + curCSN = *(CommitSeqNo *)((char *)xlrec + heapDeleteSize); + } + /* only interested in our database */ XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL); - if (target_node.dbNode != ctx->slot->data.database) + if (target_node.dbNode != ctx->slot->data.database) { return; + } /* output plugin doesn't look for this origin, no need to queue */ - if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) + if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) { return; + } Size datalen = XLogRecGetDataLen(r) - heapDeleteSize; if (datalen == 0 && !AllocSizeIsValid(datalen)) { ereport(WARNING, (errmsg("tuplelen is invalid(%lu), tuplelen, don't decode it", datalen))); return; } - change = ReorderBufferGetChange(ctx->reorder); + ReorderBufferChange *change = ReorderBufferGetChange(ctx->reorder); change->action = REORDER_BUFFER_CHANGE_DELETE; change->origin_id = XLogRecGetOrigin(r); - rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &target_node, sizeof(RelFileNode)); + errno_t rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &target_node, sizeof(RelFileNode)); securec_check(rc, "\0", "\0"); /* old primary key stored */ @@ -1092,13 +1701,67 @@ static void DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) Assert(XLogRecGetDataLen(r) > (heapDeleteSize + SizeOfHeapHeader)); change->data.tp.oldtuple = ReorderBufferGetTupleBuf(ctx->reorder, datalen); - DecodeXLogTuple((char *)xlrec + heapDeleteSize, datalen, change->data.tp.oldtuple); + DecodeXLogTuple((char *)xlrec + heapDeleteSize + (hasCSN ? sizeof(CommitSeqNo) : 0), + datalen - (hasCSN ? sizeof(CommitSeqNo) : 0), change->data.tp.oldtuple, true); } + change->data.tp.snapshotcsn = curCSN; change->data.tp.clear_toast_afterwards = true; ReorderBufferQueueChange(ctx->reorder, XLogRecGetXid(r), buf->origptr, change); } +/* + * Decode insert sql for area decoding. + * + */ +static void AreaDecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) +{ + XLogReaderState *r = buf->record; + RelFileNode target_node = {0, 0, 0, 0}; + xl_heap_delete *xlrec = (xl_heap_delete *)GetXlrec(r); + Size heapDeleteSize = 0; + if ((XLogRecGetInfo(r) & XLOG_TUPLE_LOCK_UPGRADE_FLAG) == 0) { + heapDeleteSize = SizeOfOldHeapDelete; + } else { + heapDeleteSize = SizeOfHeapDelete; + } + + CommitSeqNo curCSN = InvalidCommitSeqNo; + bool hasCSN = (r->decoded_record->xl_term & XLOG_CONTAIN_CSN) == 0 ? false: true; + if (hasCSN) { + curCSN = *(CommitSeqNo *)((char *)xlrec + heapDeleteSize); + } + XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL); + /* output plugin doesn't look for this origin, no need to queue */ + if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) { + return; + } + + Size datalen = XLogRecGetDataLen(r) - heapDeleteSize; + if (datalen == 0 && !AllocSizeIsValid(datalen)) { + ereport(WARNING, (errmsg("datalen is invalid(%lu), tuplelen, don't decode it", datalen))); + return; + } + ReorderBufferChange *change = ReorderBufferGetChange(ctx->reorder); + change->action = REORDER_BUFFER_CHANGE_DELETE; + change->origin_id = XLogRecGetOrigin(r); + errno_t rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &target_node, sizeof(RelFileNode)); + securec_check(rc, "\0", "\0"); + + /* old primary key stored */ + if (xlrec->flags & XLH_DELETE_CONTAINS_OLD) { + Assert(XLogRecGetDataLen(r) > (heapDeleteSize + SizeOfHeapHeader)); + change->data.tp.oldtuple = ReorderBufferGetTupleBuf(ctx->reorder, datalen); + + DecodeXLogTuple((char *)xlrec + heapDeleteSize + (hasCSN ? sizeof(CommitSeqNo) : 0), + datalen - (hasCSN ? sizeof(CommitSeqNo) : 0), change->data.tp.oldtuple, true); + } + change->data.tp.snapshotcsn = curCSN; + change->data.tp.clear_toast_afterwards = true; + AreaDecodingChange(change, ctx, buf); + +} + /* * Parse XLOG_UHEAP_DELETE from wal into proper tuplebufs. * @@ -1110,6 +1773,11 @@ static void DecodeUDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) XlUHeapDelete *xlrec = NULL; RelFileNode targetNode = {0, 0, 0, 0}; xlrec = (XlUHeapDelete *)UGetXlrec(r); + CommitSeqNo curCSN = InvalidCommitSeqNo; + bool hasCSN = (r->decoded_record->xl_term & XLOG_CONTAIN_CSN) == 0 ? false: true; + if (hasCSN) { + curCSN = *(CommitSeqNo *)((char *)xlrec + SizeOfUHeapDelete); + } XLogRecGetBlockTag(r, 0, &targetNode, NULL, NULL); if (targetNode.dbNode != ctx->slot->data.database) { @@ -1119,12 +1787,13 @@ static void DecodeUDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) { return; } - XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapDelete); - Size datalen = XLogRecGetDataLen(r) - SizeOfUHeapDelete - SizeOfXLUndoHeader; + XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapDelete + (hasCSN ? sizeof(CommitSeqNo) : 0)); + Size datalen = XLogRecGetDataLen(r) - SizeOfUHeapDelete - SizeOfXLUndoHeader - (hasCSN ? sizeof(CommitSeqNo) : 0); Size addLen = 0; UpdateUndoBody(&addLen, xlundohdr->flag); - Size metaLen = DecodeUndoMeta((char*)xlrec + SizeOfUHeapDelete + SizeOfXLUndoHeader + addLen); + Size metaLen = DecodeUndoMeta((char*)xlrec + SizeOfUHeapDelete + (hasCSN ? sizeof(CommitSeqNo) : 0) + + SizeOfXLUndoHeader + addLen); addLen += metaLen; if (datalen == 0 && !AllocSizeIsValid(datalen)) { ereport(WARNING, (errmsg("tuplelen is invalid(%lu), don't decode it", datalen))); @@ -1133,16 +1802,65 @@ static void DecodeUDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) ReorderBufferChange* change = ReorderBufferGetChange(ctx->reorder); change->action = REORDER_BUFFER_CHANGE_UDELETE; change->origin_id = XLogRecGetOrigin(r); - int rc = memcpy_s(&change->data.utp.relnode, sizeof(RelFileNode), &targetNode, sizeof(RelFileNode)); + errno_t rc = memcpy_s(&change->data.utp.relnode, sizeof(RelFileNode), &targetNode, sizeof(RelFileNode)); securec_check(rc, "\0", "\0"); change->data.utp.oldtuple = ReorderBufferGetUTupleBuf(ctx->reorder, datalen); - DecodeXLogUTuple((char *)xlrec + SizeOfUHeapDelete + SizeOfXLUndoHeader + addLen, datalen - addLen, - change->data.utp.oldtuple); + DecodeXLogTuple((char *)xlrec + SizeOfUHeapDelete + (hasCSN ? sizeof(CommitSeqNo) : 0) + SizeOfXLUndoHeader + + addLen, datalen - addLen, (ReorderBufferTupleBuf *)change->data.utp.oldtuple, false); change->data.utp.clear_toast_afterwards = true; - ReorderBufferQueueChange(ctx->reorder, UHeapXlogGetCurrentXid(r), buf->origptr, change); + change->data.utp.snapshotcsn = curCSN; + ReorderBufferQueueChange(ctx->reorder, UHeapXlogGetCurrentXid(r, hasCSN), buf->origptr, change); } + +static void AreaDecodeUDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) +{ + XLogReaderState *r = buf->record; + XlUHeapDelete *xlrec = NULL; + RelFileNode targetNode = {0, 0, 0, 0}; + xlrec = (XlUHeapDelete *)UGetXlrec(r); + CommitSeqNo curCSN = InvalidCommitSeqNo; + bool hasCSN = (r->decoded_record->xl_term & XLOG_CONTAIN_CSN) == 0 ? false: true; + if (hasCSN) { + curCSN = *(CommitSeqNo *)((char *)xlrec + SizeOfUHeapDelete); + } + XLogRecGetBlockTag(r, 0, &targetNode, NULL, NULL); + + /* output plugin doesn't look for this origin, no need to queue */ + if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) { + return; + } + XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapDelete + (hasCSN ? sizeof(CommitSeqNo) : 0)); + Size datalen = XLogRecGetDataLen(r) - SizeOfUHeapDelete - SizeOfXLUndoHeader - (hasCSN ? sizeof(CommitSeqNo) : 0); + Size addLen = 0; + UpdateUndoBody(&addLen, xlundohdr->flag); + + Size metaLen = DecodeUndoMeta((char*)xlrec + SizeOfUHeapDelete + (hasCSN ? sizeof(CommitSeqNo) : 0) + + SizeOfXLUndoHeader + addLen); + addLen += metaLen; + + if (datalen == 0 && !AllocSizeIsValid(datalen)) { + ereport(WARNING, (errmsg("tuplelen is invalid(%lu), don't decode it", datalen))); + return; + } + + ReorderBufferChange* change = ReorderBufferGetChange(ctx->reorder); + change->action = REORDER_BUFFER_CHANGE_UDELETE; + change->origin_id = XLogRecGetOrigin(r); + errno_t rc = memcpy_s(&change->data.utp.relnode, sizeof(RelFileNode), &targetNode, sizeof(RelFileNode)); + securec_check(rc, "\0", "\0"); + + change->data.utp.oldtuple = ReorderBufferGetUTupleBuf(ctx->reorder, datalen); + DecodeXLogTuple((char *)xlrec + SizeOfUHeapDelete + (hasCSN ? sizeof(CommitSeqNo) : 0) + SizeOfXLUndoHeader + + addLen, datalen - addLen, (ReorderBufferTupleBuf *)change->data.utp.oldtuple, false); + change->data.utp.clear_toast_afterwards = true; + change->data.utp.snapshotcsn = curCSN; + AreaDecodingChange(change, ctx, buf); +} + + + /* * Decode XLOG_HEAP2_MULTI_INSERT_insert record into multiple tuplebufs. * @@ -1151,37 +1869,38 @@ static void DecodeUDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) static void DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) { XLogReaderState *r = buf->record; - xl_heap_multi_insert *xlrec = NULL; - int i = 0; - char *data = NULL; - char *tupledata = NULL; Size tuplelen = 0; - RelFileNode rnode; - int rc = 0; - xlrec = (xl_heap_multi_insert *)GetXlrec(r); + RelFileNode rnode = {0, 0, 0, 0}; + xl_heap_multi_insert *xlrec = (xl_heap_multi_insert *)GetXlrec(r); + CommitSeqNo curCSN = InvalidCommitSeqNo; + bool hasCSN = (r->decoded_record->xl_term & XLOG_CONTAIN_CSN) == 0 ? false: true; + if (hasCSN) { + curCSN = *(CommitSeqNo *)((char *)xlrec + SizeOfHeapMultiInsert); + } - if (xlrec->isCompressed) + if (xlrec->isCompressed) { return; + } /* only interested in our database */ XLogRecGetBlockTag(r, 0, &rnode, NULL, NULL); - if (rnode.dbNode != ctx->slot->data.database) + if (rnode.dbNode != ctx->slot->data.database) { return; + } /* output plugin doesn't look for this origin, no need to queue */ - if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) + if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) { return; + } - tupledata = XLogRecGetBlockData(r, 0, &tuplelen); - data = tupledata; - for (i = 0; i < xlrec->ntuples; i++) { - ReorderBufferChange *change = NULL; + char *tupledata = XLogRecGetBlockData(r, 0, &tuplelen); + char *data = tupledata; + for (int i = 0; i < xlrec->ntuples; i++) { xl_multi_insert_tuple *xlhdr = NULL; int datalen = 0; - ReorderBufferTupleBuf *tuple = NULL; - change = ReorderBufferGetChange(ctx->reorder); + ReorderBufferChange *change = ReorderBufferGetChange(ctx->reorder); change->action = REORDER_BUFFER_CHANGE_INSERT; change->origin_id = XLogRecGetOrigin(r); - rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &rnode, sizeof(RelFileNode)); + errno_t rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &rnode, sizeof(RelFileNode)); securec_check(rc, "", ""); /* @@ -1202,7 +1921,7 @@ static void DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf datalen = xlhdr->datalen; if (datalen != 0 && AllocSizeIsValid((uint)datalen)) { change->data.tp.newtuple = ReorderBufferGetTupleBuf(ctx->reorder, datalen); - tuple = change->data.tp.newtuple; + ReorderBufferTupleBuf *tuple = change->data.tp.newtuple; header = tuple->tuple.t_data; /* not a disk based tuple */ @@ -1230,7 +1949,7 @@ static void DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf } data += datalen; } - + change->data.tp.snapshotcsn = curCSN; /* * Reset toast reassembly state only after the last row in the last * xl_multi_insert_tuple record emitted by one heap_multi_insert() @@ -1246,6 +1965,99 @@ static void DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf } } +/* + * Decode multiInsert sql for area decoding. + * + */ +static void AreaDecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) +{ + XLogReaderState *r = buf->record; + Size tuplelen = 0; + RelFileNode rnode = {0, 0, 0, 0}; + xl_heap_multi_insert *xlrec = (xl_heap_multi_insert *)GetXlrec(r); + CommitSeqNo curCSN = InvalidCommitSeqNo; + bool hasCSN = (r->decoded_record->xl_term & XLOG_CONTAIN_CSN) == 0 ? false: true; + if (hasCSN) { + curCSN = *(CommitSeqNo *)((char *)xlrec + SizeOfHeapMultiInsert); + } + + if (xlrec->isCompressed) { + return; + } + XLogRecGetBlockTag(r, 0, &rnode, NULL, NULL); + /* output plugin doesn't look for this origin, no need to queue */ + if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) { + return; + } + + char *tupledata = XLogRecGetBlockData(r, 0, &tuplelen); + char *data = tupledata; + for (int i = 0; i < xlrec->ntuples; i++) { + xl_multi_insert_tuple *xlhdr = NULL; + + ReorderBufferChange *change = ReorderBufferGetChange(ctx->reorder); + change->action = REORDER_BUFFER_CHANGE_INSERT; + change->origin_id = XLogRecGetOrigin(r); + errno_t rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &rnode, sizeof(RelFileNode)); + securec_check(rc, "", ""); + + /* + * CONTAINS_NEW_TUPLE will always be set currently as multi_insert + * isn't used for catalogs, but better be future proof. + * + * We decode the tuple in pretty much the same way as DecodeXLogTuple, + * but since the layout is slightly different, we can't use it here. + */ + if (xlrec->flags & XLH_INSERT_CONTAINS_NEW_TUPLE) { + HeapTupleHeader header; + if ((data - tupledata) % ALIGNOF_SHORT == 0) { + xlhdr = (xl_multi_insert_tuple *)data; + } else { + xlhdr = (xl_multi_insert_tuple *)(data + ALIGNOF_SHORT - (data - tupledata) % ALIGNOF_SHORT); + } + data = ((char *)xlhdr) + SizeOfMultiInsertTuple; + int datalen = xlhdr->datalen; + if (datalen != 0 && AllocSizeIsValid((uint)datalen)) { + change->data.tp.newtuple = ReorderBufferGetTupleBuf(ctx->reorder, datalen); + ReorderBufferTupleBuf *tuple = change->data.tp.newtuple; + header = tuple->tuple.t_data; + + /* not a disk based tuple */ + ItemPointerSetInvalid(&tuple->tuple.t_self); + + /* + * We can only figure this out after reassembling the + * transactions. + */ + tuple->tuple.t_bucketId = InvalidBktId; + tuple->tuple.t_tableOid = InvalidOid; + tuple->tuple.t_len = datalen + SizeofHeapTupleHeader; + + rc = memset_s(header, SizeofHeapTupleHeader, 0, SizeofHeapTupleHeader); + securec_check(rc, "\0", "\0"); + rc = memcpy_s((char *)tuple->tuple.t_data + SizeofHeapTupleHeader, datalen, (char *)data, datalen); + securec_check(rc, "\0", "\0"); + + header->t_hoff = xlhdr->t_hoff; + header->t_infomask = xlhdr->t_infomask; + header->t_infomask2 = xlhdr->t_infomask2; + } else { + ereport(WARNING, (errmsg("tuplelen is invalid(%d), tuplelen, don't decode it", datalen))); + return; + } + data = data + datalen; + } + change->data.tp.snapshotcsn = curCSN; + + if ((xlrec->flags & XLH_INSERT_LAST_IN_MULTI) && ((i + 1) == xlrec->ntuples)) { + change->data.tp.clear_toast_afterwards = true; + } else { + change->data.tp.clear_toast_afterwards = false; + } + AreaDecodingChange(change, ctx, buf); + } +} + static ReorderBufferChange* GetUheapChange(LogicalDecodingContext *ctx, XLogReaderState *r, RelFileNode* node) { ReorderBufferChange *change = ReorderBufferGetChange(ctx->reorder); @@ -1261,7 +2073,8 @@ static void DecodeUMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *bu XLogReaderState *r = buf->record; Size tuplelen = 0; RelFileNode rnode = {0, 0, 0, 0}; - XlUHeapMultiInsert *xlrec = (XlUHeapMultiInsert *)UGetMultiInsertXlrec(r); + CommitSeqNo curCSN = 0; + XlUHeapMultiInsert *xlrec = (XlUHeapMultiInsert *)UGetMultiInsertXlrec(r, &curCSN); XLogRecGetBlockTag(r, 0, &rnode, NULL, NULL); if (rnode.dbNode != ctx->slot->data.database) { @@ -1327,10 +2140,78 @@ static void DecodeUMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *bu } else { change->data.utp.clear_toast_afterwards = false; } - ReorderBufferQueueChange(ctx->reorder, UHeapXlogGetCurrentXid(r), buf->origptr, change); + change->data.utp.snapshotcsn = curCSN; + ReorderBufferQueueChange(ctx->reorder, UHeapXlogGetCurrentXid(r, false), buf->origptr, change); } } +static void AreaDecodeUMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) +{ + XLogReaderState *r = buf->record; + int i; + Size tuplelen = 0; + RelFileNode rnode = {0, 0, 0, 0}; + CommitSeqNo curCSN = 0; + XlUHeapMultiInsert *xlrec = (XlUHeapMultiInsert *)UGetMultiInsertXlrec(r, &curCSN); + + XLogRecGetBlockTag(r, 0, &rnode, NULL, NULL); + + /* output plugin doesn't look for this origin, no need to queue */ + if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) { + return; + } + char *data = XLogRecGetBlockData(r, 0, &tuplelen); + for (i = 0; i < xlrec->ntuples; i++) { + ReorderBufferChange *change = GetUheapChange(ctx, r, &rnode); + + if (xlrec->flags & XLOG_UHEAP_CONTAINS_NEW_TUPLE) { + XlMultiInsertUTuple *xlhdr = (XlMultiInsertUTuple *)data; + int len = xlhdr->datalen; + data = ((char *)xlhdr) + SizeOfMultiInsertUTuple; + if (len != 0 && AllocSizeIsValid((uint)len)) { + change->data.utp.newtuple = ReorderBufferGetUTupleBuf(ctx->reorder, len); + ReorderBufferUTupleBuf *tuple = change->data.utp.newtuple; + UHeapDiskTuple header = tuple->tuple.disk_tuple; + + /* not a disk based tuple */ + ItemPointerSetInvalid(&tuple->tuple.ctid); + + tuple->tuple.t_bucketId = InvalidBktId; + tuple->tuple.table_oid = InvalidOid; + tuple->tuple.disk_tuple_size = len + SizeOfUHeapDiskTupleData; + + errno_t ret = memset_s(header, SizeOfUHeapDiskTupleData, 0, SizeOfUHeapDiskTupleData); + securec_check(ret, "\0", "\0"); + ret = memcpy_s((char *)tuple->tuple.disk_tuple + SizeOfUHeapDiskTupleData, len, (char *)data, len); + securec_check(ret, "\0", "\0"); + + header->flag2 = xlhdr->flag2; + header->t_hoff = xlhdr->t_hoff; + header->flag = xlhdr->flag; + } else { + ereport(WARNING, (errmsg("tuplelen is invalid(%d), don't decode it", len))); + return; + } + data += len; + } + + /* + * Reset toast reassembly state only after the last row in the last + * xl_multi_insert_tuple record emitted by one heap_multi_insert() + * call. + */ + if ((i + 1) == xlrec->ntuples) { + change->data.utp.clear_toast_afterwards = true; + } else { + change->data.utp.clear_toast_afterwards = false; + } + change->data.tp.snapshotcsn = curCSN; + AreaDecodingChange(change, ctx, buf); + } +} + + + /* * Read a HeapTuple as WAL logged by heap_insert, heap_update and heap_delete * (but not by heap_multi_insert) into a tuplebuf. @@ -1338,67 +2219,67 @@ static void DecodeUMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *bu * The size 'len' and the pointer 'data' in the record need to be * computed outside as they are record specific. */ -static void DecodeXLogTuple(const char *data, Size len, ReorderBufferTupleBuf *tuple) +void DecodeXLogTuple(const char *data, Size len, ReorderBufferTupleBuf *tuple, bool isHeapTuple) { - xl_heap_header xlhdr; - int datalen = len - SizeOfHeapHeader; - HeapTupleHeader header; + int datalen = 0; int rc = 0; - Assert(datalen >= 0); - tuple->tuple.t_len = datalen + SizeofHeapTupleHeader; - header = tuple->tuple.t_data; + if (isHeapTuple) { + xl_heap_header xlhdr; + datalen = len - SizeOfHeapHeader; + Assert(datalen >= 0); + tuple->tuple.t_len = datalen + SizeofHeapTupleHeader; + HeapTupleHeader header = tuple->tuple.t_data; - /* not a disk based tuple */ - ItemPointerSetInvalid(&tuple->tuple.t_self); + /* not a disk based tuple */ + ItemPointerSetInvalid(&tuple->tuple.t_self); - /* we can only figure this out after reassembling the transactions */ - tuple->tuple.t_tableOid = InvalidOid; - tuple->tuple.t_bucketId = InvalidBktId; + /* we can only figure this out after reassembling the transactions */ + tuple->tuple.t_tableOid = InvalidOid; + tuple->tuple.t_bucketId = InvalidBktId; - /* data is not stored aligned, copy to aligned storage */ - rc = memcpy_s((char *)&xlhdr, SizeOfHeapHeader, data, SizeOfHeapHeader); - securec_check(rc, "", ""); - rc = memset_s(header, SizeofHeapTupleHeader, 0, SizeofHeapTupleHeader); - securec_check(rc, "", ""); - rc = memcpy_s(((char *)tuple->tuple.t_data) + SizeofHeapTupleHeader, datalen, data + SizeOfHeapHeader, datalen); - securec_check(rc, "", ""); + /* data is not stored aligned, copy to aligned storage */ + rc = memcpy_s((char *)&xlhdr, SizeOfHeapHeader, data, SizeOfHeapHeader); + securec_check(rc, "", ""); + rc = memset_s(header, SizeofHeapTupleHeader, 0, SizeofHeapTupleHeader); + securec_check(rc, "", ""); + rc = memcpy_s(((char *)tuple->tuple.t_data) + SizeofHeapTupleHeader, datalen, data + SizeOfHeapHeader, datalen); + securec_check(rc, "", ""); - header->t_infomask = xlhdr.t_infomask; - header->t_infomask2 = xlhdr.t_infomask2; - header->t_hoff = xlhdr.t_hoff; -} - -static void DecodeXLogUTuple(const char *data, Size len, ReorderBufferUTupleBuf *tuple) -{ - XlUHeapHeader xlhdr; - int datalen = len - SizeOfUHeapHeader; - - UHeapDiskTuple header; - int rc = 0; - Assert(datalen >= 0); - - tuple->tuple.disk_tuple_size = datalen + SizeOfUHeapDiskTupleData; - header = tuple->tuple.disk_tuple; - - /* not a disk based tuple */ - ItemPointerSetInvalid(&tuple->tuple.ctid); - - /* we can only figure this out after reassembling the transactions */ - tuple->tuple.table_oid = InvalidOid; - tuple->tuple.t_bucketId = InvalidBktId; - - /* data is not stored aligned, copy to aligned storage */ - rc = memcpy_s((char *)&xlhdr, SizeOfUHeapHeader, data, SizeOfUHeapHeader); - securec_check(rc, "", ""); - rc = memset_s(header, SizeOfUHeapDiskTupleData, 0, SizeOfUHeapDiskTupleData); - securec_check(rc, "", ""); - rc = memcpy_s(((char *)tuple->tuple.disk_tuple) + SizeOfUHeapDiskTupleData, datalen, data + SizeOfUHeapHeader, - datalen); - securec_check(rc, "", ""); - - header->flag = xlhdr.flag; - header->flag2 = xlhdr.flag2; - header->t_hoff = xlhdr.t_hoff; + tuple->tuple.t_len = datalen + SizeofHeapTupleHeader; + header->t_infomask = xlhdr.t_infomask; + header->t_infomask2 = xlhdr.t_infomask2; + header->t_hoff = xlhdr.t_hoff; + } else { + XlUHeapHeader xlhdr; + datalen = len - SizeOfUHeapHeader; + Assert(datalen >= 0); + UHeapDiskTuple header; + /* in this case, we reinterpret the tupleData as UHeapTupleData */ + UHeapTupleData* utuple = (UHeapTupleData *)(&tuple->tuple); + + header = utuple->disk_tuple; + + /* not a disk based tuple */ + ItemPointerSetInvalid(&utuple->ctid); + + /* we can only figure this out after reassembling the transactions */ + utuple->table_oid = InvalidOid; + utuple->t_bucketId = InvalidBktId; + + /* data is not stored aligned, copy to aligned storage */ + rc = memcpy_s((char *)&xlhdr, SizeOfUHeapHeader, data, SizeOfUHeapHeader); + securec_check(rc, "", ""); + rc = memset_s(header, SizeOfUHeapDiskTupleData, 0, SizeOfUHeapDiskTupleData); + securec_check(rc, "", ""); + rc = memcpy_s(((char *)utuple->disk_tuple) + SizeOfUHeapDiskTupleData, datalen, data + SizeOfUHeapHeader, + datalen); + securec_check(rc, "", ""); + utuple->disk_tuple_size = datalen + SizeOfUHeapDiskTupleData; + + header->flag = xlhdr.flag; + header->flag2 = xlhdr.flag2; + header->t_hoff = xlhdr.t_hoff; + } } diff --git a/src/gausskernel/storage/replication/logical/logical.cpp b/src/gausskernel/storage/replication/logical/logical.cpp index 987fd5ff1..bbd1ed957 100644 --- a/src/gausskernel/storage/replication/logical/logical.cpp +++ b/src/gausskernel/storage/replication/logical/logical.cpp @@ -49,6 +49,7 @@ #include "storage/proc.h" #include "storage/procarray.h" +#include "utils/builtins.h" #include "utils/memutils.h" /* data for errcontext callback */ typedef struct LogicalErrorCallbackState { @@ -57,15 +58,30 @@ typedef struct LogicalErrorCallbackState { XLogRecPtr report_location; } LogicalErrorCallbackState; +typedef struct ParallelLogicalErrorCallbackState { + ParallelLogicalDecodingContext *ctx; + const char *callback_name; + XLogRecPtr report_location; +} ParallelLogicalErrorCallbackState; + +LogicalDispatcher g_Logicaldispatcher[20]; + /* wrappers around output plugin callbacks */ static void output_plugin_error_callback(void *arg); static void startup_cb_wrapper(LogicalDecodingContext *ctx, OutputPluginOptions *opt, bool is_init); -static void shutdown_cb_wrapper(LogicalDecodingContext *ctx); +void shutdown_cb_wrapper(LogicalDecodingContext *ctx); static void begin_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn); static void commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, XLogRecPtr commit_lsn); +static void abort_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn); +static void prepare_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn); + static void change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, Relation relation, ReorderBufferChange *change); +static void parallel_change_cb_wrapper(ParallelReorderBuffer *cache, ReorderBufferTXN *txn, Relation relation, + ParallelReorderBufferChange *change); + static void LoadOutputPlugin(OutputPluginCallbacks *callbacks, const char *plugin); +static void LoadOutputPlugin(ParallelOutputPluginCallbacks *callbacks, const char *plugin); /* * Make sure the current settings & environment are capable of doing logical @@ -166,6 +182,109 @@ static LogicalDecodingContext *StartupDecodingContext(List *output_plugin_option return ctx; } +static LogicalDecodingContext *StartupDecodingContextForArea(List *output_plugin_options, XLogRecPtr start_lsn, + TransactionId xmin_horizon, bool need_full_snapshot, + bool fast_forward, XLogPageReadCB read_page, + LogicalOutputPluginWriterPrepareWrite prepare_write, + LogicalOutputPluginWriterWrite do_write) +{ + MemoryContext context, old_context; + LogicalDecodingContext *ctx = NULL; + + context = AllocSetContextCreate(CurrentMemoryContext, "Changeset Extraction Context", ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); + old_context = MemoryContextSwitchTo(context); + ctx = (LogicalDecodingContext *)palloc0(sizeof(LogicalDecodingContext)); + + ctx->context = context; + + ctx->reader = XLogReaderAllocate(read_page, ctx); + if (unlikely(ctx->reader == NULL)) + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + errmsg("memory is temporarily unavailable while allocate xlog reader"))); + + ctx->reader->private_data = ctx; + + ctx->reorder = ReorderBufferAllocate(); + + ctx->reorder->private_data = ctx; + + /* wrap output plugin callbacks, so we can add error context information */ + ctx->reorder->begin = begin_cb_wrapper; + ctx->reorder->apply_change = change_cb_wrapper; + ctx->reorder->commit = commit_cb_wrapper; + ctx->reorder->abort = abort_cb_wrapper; + ctx->reorder->prepare = prepare_cb_wrapper; + + + ctx->out = makeStringInfo(); + ctx->prepare_write = prepare_write; + ctx->write = do_write; + + ctx->output_plugin_options = output_plugin_options; + ctx->fast_forward = fast_forward; + + (void)MemoryContextSwitchTo(old_context); + + return ctx; +} + + +static ParallelLogicalDecodingContext *ParallelStartupDecodingContext(List *output_plugin_options, XLogRecPtr start_lsn, + TransactionId xmin_horizon, bool need_full_snapshot, bool fast_forward, XLogPageReadCB read_page, int slotId) +{ + ReplicationSlot *slot = NULL; + MemoryContext old_context; + ParallelLogicalDecodingContext *ctx = NULL; + + /* shorter lines... */ + slot = t_thrd.slot_cxt.MyReplicationSlot; + + old_context = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + ctx = (ParallelLogicalDecodingContext *)palloc0(sizeof(ParallelLogicalDecodingContext)); + + ctx->context = g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx; + + /* (re-)load output plugins, so we detect a bad (removed) output plugin now. */ + if (!fast_forward && slot != NULL) + LoadOutputPlugin(&ctx->callbacks, NameStr(slot->data.plugin)); + + /* + * Now that the slot's xmin has been set, we can announce ourselves as a + * logical decoding backend which doesn't need to be checked individually + * when computing the xmin horizon because the xmin is enforced via + * replication slots. + */ + (void)LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); + t_thrd.pgxact->vacuumFlags |= PROC_IN_LOGICAL_DECODING; + LWLockRelease(ProcArrayLock); + + ctx->reader = XLogReaderAllocate(read_page, ctx); + if (unlikely(ctx->reader == NULL)) + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + errmsg("memory is temporarily unavailable while allocate xlog reader"))); + + ctx->reader->private_data = ctx; + ctx->slot = slot; + + ctx->reorder = ParallelReorderBufferAllocate(slotId); + ctx->reorder->private_data = ctx; + + /* wrap output plugin callbacks, so we can add error context information */ + ctx->reorder->begin = begin_cb_wrapper; + ctx->reorder->apply_change = parallel_change_cb_wrapper; + ctx->reorder->commit = commit_cb_wrapper; + + ctx->out = makeStringInfo(); + + ctx->output_plugin_options = output_plugin_options; + ctx->fast_forward = fast_forward; + + (void)MemoryContextSwitchTo(old_context); + + return ctx; +} + /* * Create a new decoding context, for a new logical slot. * @@ -402,6 +521,45 @@ LogicalDecodingContext *CreateDecodingContext(XLogRecPtr start_lsn, List *output return ctx; } + +LogicalDecodingContext *CreateDecodingContextForArea(XLogRecPtr start_lsn, const char* plugin, List *output_plugin_options, bool fast_forward, + XLogPageReadCB read_page, + LogicalOutputPluginWriterPrepareWrite prepare_write, + LogicalOutputPluginWriterWrite do_write) +{ + LogicalDecodingContext *ctx = NULL; + MemoryContext old_context = NULL; + + ctx = StartupDecodingContextForArea(output_plugin_options, start_lsn, InvalidTransactionId, false, fast_forward, read_page, + prepare_write, do_write); + LoadOutputPlugin(&ctx->callbacks, plugin); + + /* call output plugin initialization callback */ + old_context = MemoryContextSwitchTo(ctx->context); + if (ctx->callbacks.startup_cb != NULL) + startup_cb_wrapper(ctx, &ctx->options, false); + (void)MemoryContextSwitchTo(old_context); + + + return ctx; +} + +ParallelLogicalDecodingContext *ParallelCreateDecodingContext(XLogRecPtr start_lsn, List *output_plugin_options, + bool fast_forward, XLogPageReadCB read_page, int slotId) +{ + ParallelLogicalDecodingContext *ctx = NULL; + + ctx = ParallelStartupDecodingContext(output_plugin_options, start_lsn, InvalidTransactionId, false, fast_forward, + read_page, slotId); + + if (!RecoveryInProgress()) { + ereport(LOG, (errmsg("starting logical decoding for slot "), + errdetail("streaming transactions committing after"))); + } + + return ctx; +} + /* * Returns true if an consistent initial decoding snapshot has been built. */ @@ -492,12 +650,22 @@ void OutputPluginWrite(struct LogicalDecodingContext *ctx, bool last_write) ctx->prepared_write = false; } +static void CheckLogicalAllowedPlugin(const char *plugin) +{ + bool have_slash = (first_dir_separator(plugin) != NULL); + if (have_slash) { + ereport(ERROR, (errcode(ERRCODE_LOGICAL_DECODE_ERROR), + errmsg("The path cannot be specified for the decoding plugin."))); + } +} + /* * Load the output plugin, lookup its output plugin init function, and check * that it provides the required callbacks. */ static void LoadOutputPlugin(OutputPluginCallbacks *callbacks, const char *plugin) { + CheckLogicalAllowedPlugin(plugin); LogicalOutputPluginInit plugin_init; CFunInfo tmpCF = load_external_function(plugin, "_PG_output_plugin_init", false, false); plugin_init = (LogicalOutputPluginInit)tmpCF.user_fn; @@ -518,22 +686,93 @@ static void LoadOutputPlugin(OutputPluginCallbacks *callbacks, const char *plugi if (callbacks->commit_cb == NULL) ereport(ERROR, (errcode(ERRCODE_LOGICAL_DECODE_ERROR), errmsg("output plugins have to register a commit callback"))); + if (callbacks->abort_cb == NULL) + ereport(WARNING, + (errcode(ERRCODE_LOGICAL_DECODE_ERROR), errmsg("output plugins have to register a abort callback"))); +} + +static void LoadOutputPlugin(ParallelOutputPluginCallbacks *callbacks, const char *plugin) +{ + CheckLogicalAllowedPlugin(plugin); + ParallelLogicalOutputPluginInit plugin_init; + CFunInfo tmpCF = load_external_function(plugin, "_PG_output_plugin_init", false, false); + plugin_init = (ParallelLogicalOutputPluginInit)tmpCF.user_fn; + + if (plugin_init == NULL) { + ereport(ERROR, (errcode(ERRCODE_LOGICAL_DECODE_ERROR), + errmsg("output plugins have to declare the _PG_output_plugin_init symbol"))); + } + + /* ask the output plugin to fill the callback struct */ + plugin_init(callbacks); + + if (callbacks->begin_cb == NULL) { + ereport(ERROR, + (errcode(ERRCODE_LOGICAL_DECODE_ERROR), errmsg("output plugins have to register a begin callback"))); + } + if (callbacks->change_cb == NULL) { + ereport(ERROR, + (errcode(ERRCODE_LOGICAL_DECODE_ERROR), errmsg("output plugins have to register a change callback"))); + } + if (callbacks->commit_cb == NULL) { + ereport(ERROR, + (errcode(ERRCODE_LOGICAL_DECODE_ERROR), errmsg("output plugins have to register a commit callback"))); + } } static void output_plugin_error_callback(void *arg) { LogicalErrorCallbackState *state = (LogicalErrorCallbackState *)arg; /* not all callbacks have an associated LSN */ - if (!XLByteEQ(state->report_location, InvalidXLogRecPtr)) { - (void)errcontext("slot \"%s\", output plugin \"%s\", in the %s callback, associated LSN %X/%X", - NameStr(state->ctx->slot->data.name), NameStr(state->ctx->slot->data.plugin), - state->callback_name, (uint32)(state->report_location >> 32), (uint32)state->report_location); + if (!t_thrd.logical_cxt.IsAreaDecode) { + if (!XLByteEQ(state->report_location, InvalidXLogRecPtr)) { + (void)errcontext("slot \"%s\", output plugin \"%s\", in the %s callback, associated LSN %X/%X", + NameStr(state->ctx->slot->data.name), NameStr(state->ctx->slot->data.plugin), + state->callback_name, (uint32)(state->report_location >> 32), (uint32)state->report_location); + } else { + (void)errcontext("slot \"%s\", output plugin \"%s\", in the %s callback", NameStr(state->ctx->slot->data.name), + NameStr(state->ctx->slot->data.plugin), state->callback_name); + } } else { - (void)errcontext("slot \"%s\", output plugin \"%s\", in the %s callback", NameStr(state->ctx->slot->data.name), - NameStr(state->ctx->slot->data.plugin), state->callback_name); + if (!XLByteEQ(state->report_location, InvalidXLogRecPtr)) { + (void)errcontext("Area decode:in the %s callback, associated LSN %X/%X", + state->callback_name, (uint32)(state->report_location >> 32), (uint32)state->report_location); + } else { + (void)errcontext("Area decode:in the %s callback", + state->callback_name); + } } } +static void parallel_change_cb_wrapper(ParallelReorderBuffer *cache, ReorderBufferTXN *txn, Relation relation, + ParallelReorderBufferChange *change) +{ + ParallelLogicalDecodingContext *ctx = (ParallelLogicalDecodingContext *)cache->private_data; + ParallelLogicalErrorCallbackState state; + ErrorContextCallback errcallback; + Assert(!ctx->fast_forward); + /* Push callback + info on the error context stack */ + state.ctx = ctx; + state.callback_name = "change"; + state.report_location = change->lsn; + errcallback.previous = t_thrd.log_cxt.error_context_stack; + errcallback.callback = output_plugin_error_callback; + errcallback.arg = (void *)&state; + t_thrd.log_cxt.error_context_stack = &errcallback; + /* set output state */ + ctx->accept_writes = true; + /* + * report this change's lsn so replies from clients can give an up2date + * answer. This won't ever be enough (and shouldn't be!) to confirm + * receipt of this transaction, but it might allow another transaction's + * commit to be confirmed with one message. + */ + ctx->write_location = change->lsn; + ctx->callbacks.change_cb(ctx, txn, relation, change); + /* Pop the error context stack */ + t_thrd.log_cxt.error_context_stack = errcallback.previous; +} + static void startup_cb_wrapper(LogicalDecodingContext *ctx, OutputPluginOptions *opt, bool is_init) { LogicalErrorCallbackState state; @@ -559,7 +798,7 @@ static void startup_cb_wrapper(LogicalDecodingContext *ctx, OutputPluginOptions t_thrd.log_cxt.error_context_stack = errcallback.previous; } -static void shutdown_cb_wrapper(LogicalDecodingContext *ctx) +void shutdown_cb_wrapper(LogicalDecodingContext *ctx) { LogicalErrorCallbackState state; ErrorContextCallback errcallback; @@ -584,6 +823,67 @@ static void shutdown_cb_wrapper(LogicalDecodingContext *ctx) t_thrd.log_cxt.error_context_stack = errcallback.previous; } +static void abort_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn) +{ + LogicalDecodingContext *ctx = (LogicalDecodingContext *)cache->private_data; + LogicalErrorCallbackState state; + ErrorContextCallback errcallback; + + Assert(!ctx->fast_forward); + + /* Push callback + info on the error context stack */ + state.ctx = ctx; + state.callback_name = "abort"; + state.report_location = txn->final_lsn; /* beginning of abort record */ + errcallback.callback = output_plugin_error_callback; + errcallback.arg = (void *)&state; + errcallback.previous = t_thrd.log_cxt.error_context_stack; + t_thrd.log_cxt.error_context_stack = &errcallback; + + /* set output state */ + ctx->accept_writes = true; + ctx->write_xid = txn->xid; + ctx->write_location = txn->end_lsn; /* points to the end of the record */ + + /* do the actual work: call callback */ + ctx->callbacks.abort_cb(ctx, txn); + + /* Pop the error context stack */ + t_thrd.log_cxt.error_context_stack = errcallback.previous; +} + +static void prepare_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn) +{ + LogicalDecodingContext *ctx = (LogicalDecodingContext *)cache->private_data; + LogicalErrorCallbackState state; + ErrorContextCallback errcallback; + + Assert(!ctx->fast_forward); + + /* Push callback + info on the error context stack */ + state.ctx = ctx; + state.callback_name = "prepare"; + state.report_location = txn->final_lsn; /* beginning of abort record */ + errcallback.callback = output_plugin_error_callback; + errcallback.arg = (void *)&state; + errcallback.previous = t_thrd.log_cxt.error_context_stack; + t_thrd.log_cxt.error_context_stack = &errcallback; + + /* set output state */ + ctx->accept_writes = true; + ctx->write_xid = txn->xid; + ctx->write_location = txn->end_lsn; /* points to the end of the record */ + + /* do the actual work: call callback */ + if (ctx->callbacks.prepare_cb) { + ctx->callbacks.prepare_cb(ctx, txn); + } + + /* Pop the error context stack */ + t_thrd.log_cxt.error_context_stack = errcallback.previous; +} + + /* * Callbacks for ReorderBuffer which add in some more information and then call * output_plugin.h plugins. @@ -665,7 +965,9 @@ static void change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, Relat /* set output state */ ctx->accept_writes = true; - ctx->write_xid = txn->xid; + if (txn != NULL) { + ctx->write_xid = txn->xid; + } /* * report this change's lsn so replies from clients can give an up2date * answer. This won't ever be enough (and shouldn't be!) to confirm @@ -1115,3 +1417,50 @@ void NotifyPrimaryAdvance(XLogRecPtr restart, XLogRecPtr flush) PQclear(res); } + +/* Check a boolean option with a default value */ +void CheckBooleanOption(DefElem *elem, bool *booleanOption, bool defaultValue) +{ + if (elem->arg == NULL) { + *booleanOption = defaultValue; + } else if (!parse_bool(strVal(elem->arg), booleanOption)) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname), + errdetail("N/A"), errcause("Wrong input value"), erraction("Input \"on\" or \"off\""))); + } +} + +/* parse each decoding option */ +void ParseDecodingOptionPlugin(ListCell* option, PluginTestDecodingData* data, OutputPluginOptions* opt) +{ + DefElem* elem = (DefElem*)lfirst(option); + + Assert(elem->arg == NULL || IsA(elem->arg, String)); + + if (strncmp(elem->defname, "force-binary", sizeof("force-binary")) == 0) { + bool force_binary = false; + CheckBooleanOption(elem, &force_binary, false); + if (force_binary) { + opt->output_type = OUTPUT_PLUGIN_BINARY_OUTPUT; + } + } else if (strncmp(elem->defname, "include-xids", sizeof("include-xids")) == 0) { + /* if option does not provide a value, it means its value is true */ + CheckBooleanOption(elem, &data->include_xids, true); + } else if (strncmp(elem->defname, "include-timestamp", sizeof("include-timestamp")) == 0) { + CheckBooleanOption(elem, &data->include_timestamp, false); + } else if (strncmp(elem->defname, "skip-empty-xacts", sizeof("skip-empty-xacts")) == 0) { + CheckBooleanOption(elem, &data->skip_empty_xacts, false); + } else if (strncmp(elem->defname, "only-local", sizeof("only-local"))== 0) { + CheckBooleanOption(elem, &data->only_local, true); + } else if (strncmp(elem->defname, "white-table-list", sizeof("white-table-list")) == 0) { + ParseWhiteList(&data->tableWhiteList, elem); + } else if (strncmp(elem->defname, "standby-connection", sizeof("standby-connection")) == 0 && + t_thrd.role != WORKER) { + CheckBooleanOption(elem, &t_thrd.walsender_cxt.standbyConnection, false); + } else if (strncmp(elem->defname, "parallel-decode-num", sizeof("parallel-decode-num")) != 0) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("option \"%s\" = \"%s\" is unknown", elem->defname, elem->arg ? strVal(elem->arg) : "(null)"), + errdetail("N/A"), errcause("Wrong input option"), erraction("Please check documents for help"))); + } +} + diff --git a/src/gausskernel/storage/replication/logical/logical_parse.cpp b/src/gausskernel/storage/replication/logical/logical_parse.cpp new file mode 100644 index 000000000..7e5636477 --- /dev/null +++ b/src/gausskernel/storage/replication/logical/logical_parse.cpp @@ -0,0 +1,1279 @@ +/* --------------------------------------------------------------------------------------- + * * + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * logical_parse.cpp + * This module includes parsing the xlog log, splicing the parsed tuples, + * and then decoding these tuples by the decoding module. + * In addition to DML logs, it also contains toast tuple splicing logic. + * + * IDENTIFICATION + * src/gausskernel/storage/replication/logical/logical_parse.cpp + * + * --------------------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "access/heapam.h" +#include "access/transam.h" +#include "access/xact.h" +#include "access/xlog_internal.h" +#include "access/xlogreader.h" + +#include "catalog/pg_control.h" + +#include "storage/standby.h" +#include "utils/lsyscache.h" +#include "utils/builtins.h" + +#include "utils/memutils.h" +#include "utils/relfilenodemap.h" +#include "utils/atomic.h" +#include "cjson/cJSON.h" + +#include "replication/decode.h" +#include "replication/logical.h" +#include "replication/reorderbuffer.h" +#include "replication/snapbuild.h" +#include "replication/parallel_decode.h" +#include "replication/parallel_reorderbuffer.h" +#include "replication/logical_parse.h" + +extern void ParseUpdateXlog(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker); +extern void ParseDeleteXlog(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker); +extern void ParseCommitXlog(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, TransactionId xid, CommitSeqNo csn, + Oid dboid, TimestampTz commit_time, int nsubxacts, TransactionId *sub_xids, int ninval_msgs, + SharedInvalidationMessage *msgs, ParallelDecodeReaderWorker *worker); +extern void ParseXactOp(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker); +void ParseUInsert(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker); +void ParseUUpdate(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker); +void ParseUDelete(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker); +void ParseMultiInsert(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker); +void ParseUMultiInsert(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker); + +void ParseHeap3Op(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker); +void ParseUheapOp(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker); +extern Pointer GetXlrec(XLogReaderState *record); + +static const int toastMask = 1 << 8; +/* + * After initially parsing the log, put tuple change into the queue + * and wait for the decoder thread to parse it. + */ + void PutChangeQueue(int slotId, ParallelReorderBufferChange *change) { + int decodeWorkerId = g_Logicaldispatcher[slotId].decodeWorkerId; + LogicalQueue* changeQueue = g_Logicaldispatcher[slotId].decodeWorkers[decodeWorkerId]->changeQueue; + + g_Logicaldispatcher[slotId].decodeWorkerId = (decodeWorkerId + 1) % GetDecodeParallelism(slotId); + LogicalQueuePut(changeQueue, change); +} + +static bool ParallelFilterByOrigin(ParallelLogicalDecodingContext *ctx, RepOriginId origin_id) +{ + ParallelDecodingData* data = (ParallelDecodingData *)ctx->output_plugin_private; + origin_id &= toastMask - 1; + if (data->pOptions.only_local && origin_id != InvalidRepOriginId) { + return true; + } + return false; +} + +/* + * If an invalidation message is in the commit log, + * all decoder threads are updated catalog cache from disk synchronously. + */ +void setInvalidationsMessageToDecodeQueue(ParallelLogicalDecodingContext *ctx, ParallelDecodeReaderWorker *worker, + int ninval_msgs, SharedInvalidationMessage *msgs) { + int slotId = worker->slotId; + ParallelReorderBufferChange *change = NULL; + int rc = 0; + + if (ninval_msgs > 0) { + for (int i = 0; i < GetDecodeParallelism(slotId); i++) { + change = ParallelReorderBufferGetChange(ctx->reorder, slotId); + + change->action = PARALLEL_REORDER_BUFFER_INVALIDATIONS_MESSAGE; + MemoryContext oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + change->invalidations = (SharedInvalidationMessage *)palloc0(sizeof(SharedInvalidationMessage) * ninval_msgs); + + rc = memcpy_s(change->invalidations, sizeof(SharedInvalidationMessage) * ninval_msgs, msgs, + sizeof(SharedInvalidationMessage) * ninval_msgs); + securec_check(rc, "", ""); + + MemoryContextSwitchTo(oldCtx); + change->ninvalidations = ninval_msgs; + PutChangeQueue(slotId, change); + } + } +} + +/* + * Advance restart_lsn and catalog_xmin, the walsender thread receives + * the flush LSN returned by the client to promote slot LSN and xmin. + */ +void ParseRunningXactsXlog(ParallelLogicalDecodingContext *ctx, XLogRecPtr lsn, xl_running_xacts *running, + ParallelDecodeReaderWorker *worker) +{ + ParallelReorderBufferChange *change = NULL; + int slotId = worker->slotId; + ParallelDecodeReaderWorker* readWorker = g_Logicaldispatcher[slotId].readWorker; + SpinLockAcquire(&(readWorker->rwlock)); + XLogRecPtr candidateOldestXminLsn = readWorker->candidate_oldest_xmin_lsn; + XLogRecPtr candidateOldestXmin = readWorker->candidate_oldest_xmin; + XLogRecPtr currentLsn = readWorker->current_lsn; + XLogRecPtr restartLsn = readWorker->restart_lsn; + XLogRecPtr flushLSN = readWorker->flushLSN; + SpinLockRelease(&(readWorker->rwlock)); + int id = g_Logicaldispatcher[slotId].decodeWorkerId; + + LogicalIncreaseXminForSlot(candidateOldestXminLsn, candidateOldestXmin); + + if (restartLsn != InvalidXLogRecPtr) { + LogicalIncreaseRestartDecodingForSlot(currentLsn, restartLsn); + } + + if (!XLByteEQ(flushLSN, InvalidXLogRecPtr)) { + LogicalConfirmReceivedLocation(flushLSN); + } + + change = ParallelReorderBufferGetChange(ctx->reorder, slotId); + change->action = PARALLEL_REORDER_BUFFER_CHANGE_RUNNING_XACT; + change->oldestXmin = running->oldestRunningXid; + change->lsn = lsn; + + g_Logicaldispatcher[slotId].decodeWorkerId = (id + 1) % GetDecodeParallelism(slotId); + LogicalQueuePut(g_Logicaldispatcher[slotId].decodeWorkers[id]->changeQueue, change); +} + + +/* + * Filter out records that we don't need to decode. + */ +static bool ParallelFilterRecord(ParallelLogicalDecodingContext *ctx, XLogReaderState *r, uint8 flags, RelFileNode* rnode) +{ + if (ParallelFilterByOrigin(ctx, XLogRecGetOrigin(r))) { + return true; + } + if (((flags & XLZ_UPDATE_PREFIX_FROM_OLD) != 0) || ((flags & XLZ_UPDATE_SUFFIX_FROM_OLD) != 0)) { + ereport(WARNING, (errmsg("update tuple has affix, don't decode it"))); + return true; + } + XLogRecGetBlockTag(r, 0, rnode, NULL, NULL); + if (rnode->dbNode != ctx->slot->data.database) { + return true; + } + return false; +} + +/* + * Splice toast tuples + * The return value represents whether the current tuple needs to be decoded. + * Ordinary tables (non system tables, toast tables, etc.) need to be decoded. + */ +void SplicingToastTuple(ParallelReorderBufferChange *change, ParallelLogicalDecodingContext *ctx, bool *needDecode, + bool istoast, ParallelDecodeReaderWorker *worker, bool isHeap) +{ + Oid reloid; + Relation relation = NULL; + Oid partitionReltoastrelid = InvalidOid; + ParallelReorderBufferTXN *txn; + if (u_sess->utils_cxt.HistoricSnapshot == NULL) { + u_sess->utils_cxt.HistoricSnapshot = GetLocalSnapshot(ctx->context); + } + u_sess->utils_cxt.HistoricSnapshot->snapshotcsn = change->data.tp.snapshotcsn; + + /* + * It is neither a toast table nor a toast tuple, + * and there is no need to trigger toast tuple splicing logic. + */ + if (!istoast) { + txn = ParallelReorderBufferTXNByXid(ctx->reorder, change->xid, false, NULL, change->lsn, true); + if (txn == NULL || txn->toast_hash == NULL) { + *needDecode = true; + return; + } + } + + bool isSegment = IsSegmentFileNode(change->data.tp.relnode); + reloid = RelidByRelfilenode(change->data.tp.relnode.spcNode, change->data.tp.relnode.relNode, isSegment); + + if (reloid == InvalidOid) { + reloid = PartitionRelidByRelfilenode(change->data.tp.relnode.spcNode, + change->data.tp.relnode.relNode, partitionReltoastrelid, NULL,isSegment); + } + + /* + * Catalog tuple without data, emitted while catalog was + * in the process of being rewritten. + */ + if (reloid == InvalidOid) { + /* + * description: + * When we try to decode a table who is already dropped. + * Maybe we could not find its relnode. In this time, we will not decode this log. + */ + ereport(DEBUG1, (errmsg("could not lookup relation %s", + relpathperm(change->data.tp.relnode, MAIN_FORKNUM)))); + *needDecode = false; + return; + } + + relation = RelationIdGetRelation(reloid); + if (relation == NULL) { + ereport(DEBUG1, (errmsg("could open relation descriptor %s", + relpathperm(change->data.tp.relnode, MAIN_FORKNUM)))); + *needDecode = false; + return; + } + + if (CSTORE_NAMESPACE == get_rel_namespace(RelationGetRelid(relation))) { + RelationClose(relation); + *needDecode = false; + return ; + } + + /* + * If the table to which this tuple belongs is a toast table, + * put it into the toast list. If it is a normal table, + * query whether there is a toast record before it. + * If so, splice it into a complete toast tuple. + */ + if (CheckToastTuple(change, ctx, relation, istoast)) { + *needDecode = false; + } else if (!IsToastRelation(relation)) { + ToastTupleReplace(ctx->reorder, relation, change, partitionReltoastrelid, worker, isHeap); + *needDecode = true; + } + + return; +} + +/* + * Judge whether this tuple is a toast tuple, and if so, + * put it into toast. In the hash table, wait for decoding to the main table. + * Return value means whether current table is as toast table. + */ +bool CheckToastTuple(ParallelReorderBufferChange *change, ParallelLogicalDecodingContext *ctx, Relation relation, + bool istoast) +{ + if (!istoast) { + return false; + } + + if ((change->action != PARALLEL_REORDER_BUFFER_CHANGE_INSERT && + change->action != PARALLEL_REORDER_BUFFER_CHANGE_UINSERT) || change->data.tp.newtuple == NULL) { + return false; + } + + if (RelationIsLogicallyLogged(relation)) { + /* + * For now ignore sequence changes entirely. Most of + * the time they don't log changes using records we + * understand, so it doesn't make sense to handle the + * few cases we do. + */ + + if (relation->rd_rel->relkind == RELKIND_SEQUENCE) { + } else if (!IsToastRelation(relation)) { /* user-triggered change */ + return false; + } else if (change->action == PARALLEL_REORDER_BUFFER_CHANGE_INSERT || + change->action == PARALLEL_REORDER_BUFFER_CHANGE_UINSERT) { + ToastTupleAppendChunk(ctx->reorder, relation, change); + return true; + } + } + return false; +} + +/* + * Parse commit log and set commit info to decode queue. + */ +void ParseCommitXlog(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, TransactionId xid, CommitSeqNo csn, + Oid dboid, TimestampTz commit_time, int nsubxacts, TransactionId *sub_xids, int ninval_msgs, + SharedInvalidationMessage *msgs, ParallelDecodeReaderWorker *worker) +{ + ParallelReorderBufferChange *change = NULL; + int slotId = worker->slotId; + + setInvalidationsMessageToDecodeQueue(ctx, worker, ninval_msgs, msgs); + + change = ParallelReorderBufferGetChange(ctx->reorder, slotId); + + change->action = PARALLEL_REORDER_BUFFER_CHANGE_COMMIT; + change->xid = xid; + change->csn = csn; + change->lsn =ctx->reader->ReadRecPtr; + change->finalLsn = buf->origptr; + change->endLsn = buf->endptr; + change->nsubxacts = nsubxacts; + change->commitTime = commit_time; + + if (nsubxacts > 0) { + MemoryContext oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + change->subXids = (TransactionId *)palloc0(sizeof(TransactionId) * nsubxacts); + MemoryContextSwitchTo(oldCtx); + errno_t rc = + memcpy_s(change->subXids, sizeof(TransactionId) * nsubxacts, sub_xids, sizeof(TransactionId) * nsubxacts); + securec_check(rc, "", ""); + } + PutChangeQueue(slotId, change); +} + +/* + * Parse abort log and set commit info to decode queue + */ +void ParseAbortXlog(ParallelLogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, TransactionId *sub_xids, + int nsubxacts, ParallelDecodeReaderWorker *worker) +{ + ParallelReorderBufferChange *change = NULL; + int slotId = worker->slotId; + + change = ParallelReorderBufferGetChange(ctx->reorder, slotId); + + change->action = PARALLEL_REORDER_BUFFER_CHANGE_ABORT; + change->xid = xid; + change->lsn =ctx->reader->ReadRecPtr; + change->finalLsn = lsn; + change->nsubxacts = nsubxacts; + if (nsubxacts > 0) { + MemoryContext oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + change->subXids = (TransactionId *)palloc0(sizeof(TransactionId) * nsubxacts); + errno_t rc = + memcpy_s(change->subXids, sizeof(TransactionId) * nsubxacts, sub_xids, sizeof(TransactionId) * nsubxacts); + securec_check(rc, "", ""); + MemoryContextSwitchTo(oldCtx); + } + PutChangeQueue(slotId, change); +} + +/* + * Handle rmgr HEAP2_ID records. + */ +void ParseHeap2Op(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker) +{ + uint8 info = XLogRecGetInfo(buf->record) & XLOG_HEAP_OPMASK; + switch (info) { + case XLOG_HEAP2_MULTI_INSERT: + ParseMultiInsert(ctx, buf, worker); + break; + case XLOG_HEAP2_FREEZE: + /* + * Although these records only exist to serve the needs of logical + * decoding, all the work happens as part of crash or archive + * recovery, so we don't need to do anything here. + */ + break; + /* + * Everything else here is just low level physical stuff we're + * not interested in. + */ + case XLOG_HEAP2_CLEAN: + case XLOG_HEAP2_CLEANUP_INFO: + case XLOG_HEAP2_VISIBLE: + case XLOG_HEAP2_LOGICAL_NEWPAGE: + case XLOG_HEAP2_BCM: + case XLOG_HEAP2_PAGE_UPGRADE: + + break; + default: + ereport(WARNING, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unexpected RM_HEAP2_ID record type: %u", info))); + } +} + +/* + * Handle rmgr HEAP_ID records for DecodeRecordIntoReorderBuffer(). + */ +void ParseHeapOp(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker) +{ + uint8 info = XLogRecGetInfo(buf->record) & XLOG_HEAP_OPMASK; + + + switch (info) { + case XLOG_HEAP_INSERT: + ParseInsertXlog(ctx, buf, worker); + break; + + /* + * Treat HOT update as normal updates. There is no useful + * information in the fact that we could make it a HOT update + * locally and the WAL layout is compatible. + */ + case XLOG_HEAP_HOT_UPDATE: + case XLOG_HEAP_UPDATE: + ParseUpdateXlog(ctx, buf, worker); + break; + + case XLOG_HEAP_DELETE: + ParseDeleteXlog(ctx, buf, worker); + break; + + case XLOG_HEAP_NEWPAGE: + /* + * This is only used in places like indexams and CLUSTER which + * don't contain changes relevant for logical replication. + */ + break; + + case XLOG_HEAP_INPLACE: + /* + * Inplace updates are only ever performed on catalog tuples and + * can, per definition, not change tuple visibility. Since we + * don't decode catalog tuples, we're not interested in the + * record's contents. + * + * In-place updates can be used either by XID-bearing transactions + * (e.g. in CREATE INDEX CONCURRENTLY) or by XID-less + * transactions (e.g. VACUUM). In the former case, the commit + * record will include cache invalidations, so we mark the + * transaction as catalog modifying here. Currently that's + * redundant because the commit will do that as well, but once we + * support decoding in-progress relations, this will be important. + */ + break; + + case XLOG_HEAP_BASE_SHIFT: + break; + + case XLOG_HEAP_LOCK: + /* we don't care about row level locks for now */ + break; + + default: + ereport(WARNING, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unexpected RM_HEAP_ID record type: %u", info))); + break; + } +} + +void ParseNewCid(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker) +{ + int slotId = worker->slotId; + ParallelReorderBufferChange *change = ParallelReorderBufferGetChange(ctx->reorder, slotId); + change->action = PARALLEL_REORDER_BUFFER_NEW_CID; + change->xid = XLogRecGetXid(buf->record); + PutChangeQueue(slotId, change); +} + +void ParseHeap3Op(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker) +{ + uint8 info = XLogRecGetInfo(buf->record) & XLOG_HEAP_OPMASK; + + switch (info) { + case XLOG_HEAP3_NEW_CID: { + ParseNewCid(ctx, buf, worker); + break; + } + case XLOG_HEAP3_REWRITE: + break; + default: + ereport(WARNING, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unexpected RM_HEAP3_ID record type: %u", info))); + } +} + +void ParseUheapOp(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker) +{ + uint8 info = XLogRecGetInfo(buf->record) & XLOG_UHEAP_OPMASK; + + switch (info) { + case XLOG_UHEAP_INSERT: + ParseUInsert(ctx, buf, worker); + break; + + case XLOG_UHEAP_UPDATE: + ParseUUpdate(ctx, buf, worker); + break; + + case XLOG_UHEAP_DELETE: + ParseUDelete(ctx, buf, worker); + break; + + case XLOG_UHEAP_FREEZE_TD_SLOT: + case XLOG_UHEAP_INVALID_TD_SLOT: + case XLOG_UHEAP_CLEAN: + break; + + case XLOG_UHEAP_MULTI_INSERT: + ParseUMultiInsert(ctx, buf, worker); + break; + + default: + ereport(WARNING, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unexpected RM_HEAP3_ID record type: %u", info))); + } +} + +/* + * Parse XLOG_HEAP_INSERT (not MULTI_INSERT!) records into decode change queue. + * + * Deletes can contain the new tuple. + */ +void ParseInsertXlog(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker) +{ + XLogReaderState *r = buf->record; + xl_heap_insert *xlrec = NULL; + ParallelReorderBufferChange *change = NULL; + RelFileNode target_node = {0, 0, 0, 0}; + xlrec = (xl_heap_insert *)GetXlrec(r); + CommitSeqNo curCSN = *(CommitSeqNo *)((char *)xlrec + SizeOfHeapInsert); + int rc = 0; + bool needDecode = false; + /* only interested in our database */ + Size tuplelen; + char *tupledata = XLogRecGetBlockData(r, 0, &tuplelen); + int slotId = worker->slotId; + + if (tuplelen == 0 && !AllocSizeIsValid(tuplelen)) { + ereport(ERROR, (errmsg("ParseInsertXlog tuplelen is invalid(%lu), don't decode it", tuplelen))); + return; + } + XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL); + if (target_node.dbNode != ctx->slot->data.database) { + return; + } + + /* output plugin doesn't look for this origin, no need to queue */ + if (ParallelFilterByOrigin(ctx, XLogRecGetOrigin(r))) { + return; + } + + /* + * Reuse the origin field in the log. If bit 8 is non-zero, + * the current table is considered to be toast table. + */ + bool istoast = (((XLogRecGetOrigin(r)) & toastMask) != 0); + if (istoast) { + ereport(DEBUG2, (errmsg("ParallelDecodeInsert %d", istoast))); + } + + change = ParallelReorderBufferGetChange(ctx->reorder, slotId); + change->action = PARALLEL_REORDER_BUFFER_CHANGE_INSERT; + change->lsn = ctx->reader->ReadRecPtr; + change->xid = XLogRecGetXid(r); + rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &target_node, sizeof(RelFileNode)); + securec_check(rc, "\0", "\0"); + + if (xlrec->flags & XLH_INSERT_CONTAINS_NEW_TUPLE) { + change->data.tp.newtuple = ParallelReorderBufferGetTupleBuf(ctx->reorder, tuplelen, worker, true); + DecodeXLogTuple(tupledata, tuplelen, change->data.tp.newtuple, true); + } + change->data.tp.snapshotcsn = curCSN; + change->data.tp.clear_toast_afterwards = true; + + SplicingToastTuple(change, ctx, &needDecode, istoast, worker, true); + + if (needDecode) { + PutChangeQueue(slotId, change); + } +} + +/* + * Parse XLOG_UHEAP_INSERT (not MULTI_INSERT!) records into decode change queue. + * + * Deletes can contain the new tuple. + */ +void ParseUInsert(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker) +{ + XLogReaderState *r = buf->record; + XlUHeapInsert *xlrec = NULL; + ParallelReorderBufferChange *change = NULL; + RelFileNode target_node = {0, 0, 0, 0}; + xlrec = (XlUHeapInsert *)UGetXlrec(r); + CommitSeqNo curCSN = *(CommitSeqNo *)((char *)xlrec + SizeOfUHeapInsert); + bool needDecode = false; + + /* only interested in our database */ + Size tuplelen; + char *tupledata = XLogRecGetBlockData(r, 0, &tuplelen); + int slotId = worker->slotId; + + if (tuplelen == 0 && !AllocSizeIsValid(tuplelen)) { + ereport(ERROR, (errmsg("ParseUinsert tuplelen is invalid(%lu), don't decode it", tuplelen))); + return; + } + XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL); + + if (target_node.dbNode != ctx->slot->data.database) { + return; + } + + /* output plugin doesn't look for this origin, no need to queue */ + if (ParallelFilterByOrigin(ctx, XLogRecGetOrigin(r))) { + return; + } + + change = ParallelReorderBufferGetChange(ctx->reorder, slotId); + change->action = PARALLEL_REORDER_BUFFER_CHANGE_UINSERT; + change->lsn = ctx->reader->ReadRecPtr; + change->xid = UHeapXlogGetCurrentXid(r, true); + errno_t rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &target_node, sizeof(RelFileNode)); + securec_check(rc, "\0", "\0"); + + if (xlrec->flags & XLH_INSERT_CONTAINS_NEW_TUPLE) { + change->data.tp.newtuple = ParallelReorderBufferGetTupleBuf(ctx->reorder, tuplelen, worker, false); + DecodeXLogTuple(tupledata, tuplelen, change->data.tp.newtuple, false); + } + change->data.tp.snapshotcsn = curCSN; + change->data.tp.clear_toast_afterwards = true; + + bool isToast = (((XLogRecGetOrigin(r)) & toastMask) != 0); + SplicingToastTuple(change, ctx, &needDecode, isToast, worker, false); + if (needDecode) { + PutChangeQueue(slotId, change); + } +} + +/* + * Parse XLOG_HEAP_UPDATE records into decode change queue. + * + * Updates can possibly contain a new tuple and the old primary key. + */ +void ParseUpdateXlog(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker) +{ + XLogReaderState *r = buf->record; + xl_heap_update *xlrec = NULL; + ParallelReorderBufferChange *change = NULL; + RelFileNode target_node = {0, 0, 0, 0}; + Size datalen_new; + Size tuplelen_new; + char *data_new = NULL; + Size datalen_old; + Size tuplelen_old; + char *data_old = NULL; + int slotId = worker->slotId; + bool is_init = false; + bool needDecode = false; + + Size heapUpdateSize = 0; + if ((XLogRecGetInfo(r) & XLOG_TUPLE_LOCK_UPGRADE_FLAG) == 0) { + heapUpdateSize = SizeOfOldHeapUpdate; + } else { + heapUpdateSize = SizeOfHeapUpdate; + } + xlrec = (xl_heap_update *)GetXlrec(r); + CommitSeqNo curCSN = *(CommitSeqNo *)((char *)xlrec + heapUpdateSize); + int rc = 0; + XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL); + + /* only interested in our database */ + if (target_node.dbNode != ctx->slot->data.database) { + return; + } + + data_new = XLogRecGetBlockData(r, 0, &datalen_new); + tuplelen_new = datalen_new - SizeOfHeapHeader; + if (tuplelen_new == 0 && !AllocSizeIsValid(tuplelen_new)) { + ereport(WARNING, (errmsg("tuplelen is invalid(%lu), tuplelen, don't decode it", tuplelen_new))); + return; + } + + /* adapt 64 xid, if this tuple is the first tuple of a new page */ + is_init = (XLogRecGetInfo(r) & XLOG_HEAP_INIT_PAGE) != 0; + /* caution, remaining data in record is not aligned */ + data_old = (char *)xlrec + heapUpdateSize + sizeof(CommitSeqNo); + if (is_init) { + datalen_old = XLogRecGetDataLen(r) - heapUpdateSize - sizeof(CommitSeqNo) - sizeof(TransactionId); + } else { + datalen_old = XLogRecGetDataLen(r) - heapUpdateSize - sizeof(CommitSeqNo); + } + tuplelen_old = datalen_old - SizeOfHeapHeader; + if (tuplelen_old == 0 && !AllocSizeIsValid(tuplelen_old)) { + ereport(WARNING, (errmsg("tuplelen is invalid(%lu), tuplelen, don't decode it", tuplelen_old))); + return; + } + + /* output plugin doesn't look for this origin, no need to queue */ + if (ParallelFilterByOrigin(ctx, XLogRecGetOrigin(r))) + return; + + /* + * Reuse the origin field in the log. If bit 8 is non-zero, + * the current table is considered to be toast table. + */ + bool istoast = (((XLogRecGetOrigin(r)) & toastMask) != 0); + + change = ParallelReorderBufferGetChange(ctx->reorder, slotId); + change->action = PARALLEL_REORDER_BUFFER_CHANGE_UPDATE; + change->lsn = ctx->reader->ReadRecPtr; + change->xid = XLogRecGetXid(r); + rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &target_node, sizeof(RelFileNode)); + securec_check(rc, "\0", "\0"); + + if (xlrec->flags & XLH_UPDATE_CONTAINS_NEW_TUPLE) { + change->data.tp.newtuple = ParallelReorderBufferGetTupleBuf(ctx->reorder, tuplelen_new, worker, true); + DecodeXLogTuple(data_new, datalen_new, change->data.tp.newtuple, true); + } + if (xlrec->flags & XLH_UPDATE_CONTAINS_OLD) { + change->data.tp.oldtuple = ParallelReorderBufferGetTupleBuf(ctx->reorder, tuplelen_old, worker, true); + DecodeXLogTuple(data_old, datalen_old, change->data.tp.oldtuple, true); + } + + change->data.tp.snapshotcsn = curCSN; + change->data.tp.clear_toast_afterwards = true; + + SplicingToastTuple(change, ctx, &needDecode, istoast, worker, true); + + if (needDecode) { + PutChangeQueue(slotId, change); + } +} + +/* + * Parse XLOG_UHEAP_UPDATE records into decode change queue. + * + * Deletes can possibly contain the old primary key. + */ + +void ParseUUpdate(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker) +{ + XLogReaderState *r = buf->record; + RelFileNode targetNode = {0, 0, 0, 0}; + XlUHeapUpdate *xlrec = (XlUHeapUpdate *)UGetXlrec(r); + CommitSeqNo curCSN = *(CommitSeqNo *)((char *)xlrec + SizeOfUHeapUpdate); + int slotId = worker->slotId; + bool isInplaceUpdate = (xlrec->flags & XLZ_NON_INPLACE_UPDATE) == 0; + bool needDecode = false; + if (ParallelFilterRecord(ctx, r, xlrec->flags, &targetNode)) { + return; + } + Size datalenNew = 0; + char *dataNew = XLogRecGetBlockData(r, 0, &datalenNew); + + if (datalenNew == 0 && !AllocSizeIsValid(datalenNew)) { + ereport(WARNING, (errmsg("tuplelen is invalid(%lu), don't decode it", datalenNew))); + return; + } + + Size tuplelenOld = XLogRecGetDataLen(r) - SizeOfUHeapUpdate - sizeof(CommitSeqNo); + char *dataOld = (char *)xlrec + SizeOfUHeapUpdate + sizeof(CommitSeqNo); + UpdateOldTupleCalc(isInplaceUpdate, r, &dataOld, &tuplelenOld); + + if (tuplelenOld == 0 && !AllocSizeIsValid(tuplelenOld)) { + ereport(ERROR, (errmsg("tuplelen is invalid(%lu), don't decode it", tuplelenOld))); + return; + } + + bool isToast = (((XLogRecGetOrigin(r)) & toastMask) != 0); + ParallelReorderBufferChange *change = ParallelReorderBufferGetChange(ctx->reorder, slotId); + change->action = PARALLEL_REORDER_BUFFER_CHANGE_UUPDATE; + change->lsn = ctx->reader->ReadRecPtr; + change->xid = UHeapXlogGetCurrentXid(r, true); + + int rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &targetNode, sizeof(RelFileNode)); + securec_check(rc, "\0", "\0"); + change->data.tp.newtuple = ParallelReorderBufferGetTupleBuf(ctx->reorder, datalenNew, worker, false); + + DecodeXLogTuple(dataNew, datalenNew, change->data.tp.newtuple, false); + if (xlrec->flags & XLZ_HAS_UPDATE_UNDOTUPLE) { + change->data.tp.oldtuple = ParallelReorderBufferGetTupleBuf(ctx->reorder, tuplelenOld, worker, false); + if (!isInplaceUpdate) { + DecodeXLogTuple(dataOld, tuplelenOld, change->data.tp.oldtuple, false); + } else if ((xlrec->flags & XLOG_UHEAP_CONTAINS_OLD_HEADER) != 0) { + int undoXorDeltaSize = *(int *)dataOld; + dataOld += sizeof(int) + undoXorDeltaSize; + tuplelenOld -= sizeof(int) + undoXorDeltaSize; + DecodeXLogTuple(dataOld, tuplelenOld, change->data.tp.oldtuple, false); + } else { + ereport(LOG, (errmsg("current tuple is not fully logged, don't decode it"))); + return; + } + } + change->data.tp.snapshotcsn = curCSN; + change->data.tp.clear_toast_afterwards = true; + + SplicingToastTuple(change, ctx, &needDecode, isToast, worker, false); + if (needDecode) { + PutChangeQueue(slotId, change); + } +} + +/* + * Parse XLOG_HEAP_UPDATE records into decode change queue. + * + * Deletes can possibly contain the old primary key. + */ +void ParseDeleteXlog(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker) +{ + XLogReaderState *r = buf->record; + xl_heap_delete *xlrec = NULL; + + int slotId = worker->slotId; + ParallelReorderBufferChange *change = NULL; + RelFileNode target_node = {0, 0, 0, 0}; + int rc = 0; + Size datalen = 0; + bool needDecode = false; + bool istoast = (((XLogRecGetOrigin(r)) & toastMask) != 0); + if (istoast) { + ereport(DEBUG2, (errmsg("ParallelDecodeDelete %d", istoast))); + } + + Size heapDeleteSize = 0; + if ((XLogRecGetInfo(r) & XLOG_TUPLE_LOCK_UPGRADE_FLAG) == 0) { + heapDeleteSize = SizeOfOldHeapDelete; + } else { + heapDeleteSize = SizeOfHeapDelete; + } + xlrec = (xl_heap_delete *)GetXlrec(r); + CommitSeqNo curCSN = *(CommitSeqNo *)((char *)xlrec + heapDeleteSize); + + /* only interested in our database */ + XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL); + if (target_node.dbNode != ctx->slot->data.database) { + return; + } + /* output plugin doesn't look for this origin, no need to queue */ + if (ParallelFilterByOrigin(ctx, XLogRecGetOrigin(r))) { + return; + } + + datalen = XLogRecGetDataLen(r) - heapDeleteSize; + if (datalen == 0 && !AllocSizeIsValid(datalen)) { + ereport(WARNING, (errmsg("tuplelen is invalid(%lu), tuplelen, don't decode it", datalen))); + return; + } + + change = ParallelReorderBufferGetChange(ctx->reorder, slotId); + change->action = PARALLEL_REORDER_BUFFER_CHANGE_DELETE; + change->lsn = ctx->reader->ReadRecPtr; + change->xid = XLogRecGetXid(r); + rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &target_node, sizeof(RelFileNode)); + securec_check(rc, "\0", "\0"); + + /* old primary key stored */ + if (xlrec->flags & XLH_DELETE_CONTAINS_OLD) { + Assert(XLogRecGetDataLen(r) > (heapDeleteSize + SizeOfHeapHeader)); + change->data.tp.oldtuple = ParallelReorderBufferGetTupleBuf(ctx->reorder, datalen, worker, true); + + DecodeXLogTuple((char *)xlrec + heapDeleteSize + sizeof(CommitSeqNo), datalen - sizeof(CommitSeqNo), + change->data.tp.oldtuple, true); + } + change->data.tp.snapshotcsn = curCSN; + change->data.tp.clear_toast_afterwards = true; + + SplicingToastTuple(change, ctx, &needDecode, istoast, worker, true); + if (needDecode) { + PutChangeQueue(slotId, change); + } +} + +void ParseUDelete(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker) +{ + XLogReaderState *r = buf->record; + XlUHeapDelete *xlrec = NULL; + int slotId = worker->slotId; + RelFileNode targetNode = {0, 0, 0, 0}; + bool needDecode = false; + bool isToast = (((XLogRecGetOrigin(r)) & toastMask) != 0); + xlrec = (XlUHeapDelete *)UGetXlrec(r); + CommitSeqNo curCSN = *(CommitSeqNo *)((char *)xlrec + SizeOfUHeapDelete); + + XLogRecGetBlockTag(r, 0, &targetNode, NULL, NULL); + if (targetNode.dbNode != ctx->slot->data.database) { + return; + } + /* output plugin doesn't look for this origin, no need to queue */ + if (ParallelFilterByOrigin(ctx, XLogRecGetOrigin(r))) { + return; + } + + XlUndoHeader *xlundohdr = (XlUndoHeader *)((char *)xlrec + SizeOfUHeapDelete + sizeof(CommitSeqNo)); + Size datalen = XLogRecGetDataLen(r) - SizeOfUHeapDelete - SizeOfXLUndoHeader - sizeof(CommitSeqNo); + Size addLen = 0; + UpdateUndoBody(&addLen, xlundohdr->flag); + + Size metaLen = DecodeUndoMeta((char*)xlrec + SizeOfUHeapDelete + sizeof(CommitSeqNo) + SizeOfXLUndoHeader + addLen); + addLen += metaLen; + + if (datalen == 0 && !AllocSizeIsValid(datalen)) { + ereport(WARNING, (errmsg("tuplelen is invalid(%lu), don't decode it", datalen))); + return; + } + + ParallelReorderBufferChange* change = ParallelReorderBufferGetChange(ctx->reorder, slotId); + change->action = PARALLEL_REORDER_BUFFER_CHANGE_UDELETE; + change->lsn = ctx->reader->ReadRecPtr; + change->xid = UHeapXlogGetCurrentXid(r, true); + + int rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &targetNode, sizeof(RelFileNode)); + securec_check(rc, "\0", "\0"); + + change->data.tp.oldtuple = ParallelReorderBufferGetTupleBuf(ctx->reorder, datalen, worker, false); + DecodeXLogTuple((char *)xlrec + SizeOfUHeapDelete + sizeof(CommitSeqNo) + SizeOfXLUndoHeader + addLen, + datalen - addLen, change->data.tp.oldtuple, false); + + change->data.tp.snapshotcsn = curCSN; + change->data.tp.clear_toast_afterwards = true; + + SplicingToastTuple(change, ctx, &needDecode, isToast, worker, false); + if (needDecode) { + PutChangeQueue(slotId, change); + } +} + +void ParseMultiInsert(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker) +{ + XLogReaderState *r = buf->record; + xl_heap_multi_insert *xlrec = NULL; + int i = 0; + char *data = NULL; + char *tupledata = NULL; + Size tuplelen = 0; + RelFileNode rnode = {0, 0, 0, 0}; + int rc = 0; + xlrec = (xl_heap_multi_insert *)GetXlrec(r); + CommitSeqNo curCSN = *(CommitSeqNo *)((char *)xlrec + SizeOfHeapMultiInsert); + int slotId = worker->slotId; + if (xlrec->isCompressed) + return; + /* only interested in our database */ + XLogRecGetBlockTag(r, 0, &rnode, NULL, NULL); + if (rnode.dbNode != ctx->slot->data.database) + return; + /* output plugin doesn't look for this origin, no need to queue */ + if (ParallelFilterByOrigin(ctx, XLogRecGetOrigin(r))) + return; + + tupledata = XLogRecGetBlockData(r, 0, &tuplelen); + data = tupledata; + for (i = 0; i < xlrec->ntuples; i++) { + ParallelReorderBufferChange *change = NULL; + xl_multi_insert_tuple *xlhdr = NULL; + int datalen = 0; + ReorderBufferTupleBuf *tuple = NULL; + + change = ParallelReorderBufferGetChange(ctx->reorder, slotId); + change->action = PARALLEL_REORDER_BUFFER_CHANGE_INSERT; + change->lsn = ctx->reader->ReadRecPtr; + change->xid = XLogRecGetXid(r); + rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &rnode, sizeof(RelFileNode)); + securec_check(rc, "", ""); + + /* + * CONTAINS_NEW_TUPLE will always be set currently as multi_insert + * isn't used for catalogs, but better be future proof. + * + * We decode the tuple in pretty much the same way as DecodeXLogTuple, + * but since the layout is slightly different, we can't use it here. + */ + if (xlrec->flags & XLH_INSERT_CONTAINS_NEW_TUPLE) { + HeapTupleHeader header; + xlhdr = (xl_multi_insert_tuple *)SHORTALIGN(data); + data = ((char *)xlhdr) + SizeOfMultiInsertTuple; + datalen = xlhdr->datalen; + if (datalen != 0 && AllocSizeIsValid((uint)datalen)) { + change->data.tp.newtuple = ParallelReorderBufferGetTupleBuf(ctx->reorder, datalen, worker, true); + tuple = change->data.tp.newtuple; + header = tuple->tuple.t_data; + + /* not a disk based tuple */ + ItemPointerSetInvalid(&tuple->tuple.t_self); + + /* + * We can only figure this out after reassembling the + * transactions. + */ + tuple->tuple.t_bucketId = InvalidBktId; + tuple->tuple.t_tableOid = InvalidOid; + tuple->tuple.t_len = datalen + SizeofHeapTupleHeader; + + rc = memset_s(header, SizeofHeapTupleHeader, 0, SizeofHeapTupleHeader); + securec_check(rc, "\0", "\0"); + rc = memcpy_s((char *)tuple->tuple.t_data + SizeofHeapTupleHeader, datalen, (char *)data, datalen); + securec_check(rc, "\0", "\0"); + + header->t_hoff = xlhdr->t_hoff; + header->t_infomask = xlhdr->t_infomask; + header->t_infomask2 = xlhdr->t_infomask2; + } else { + ereport(ERROR, (errmsg("tuplelen is invalid(%d), tuplelen, don't decode it", datalen))); + return; + } + data += datalen; + } + /* + * Reset toast reassembly state only after the last row in the last + * xl_multi_insert_tuple record emitted by one heap_multi_insert() + * call. + */ + if ((xlrec->flags & XLH_INSERT_LAST_IN_MULTI) && ((i + 1) == xlrec->ntuples)) { + change->data.tp.clear_toast_afterwards = true; + } else { + change->data.tp.clear_toast_afterwards = false; + } + change->data.tp.snapshotcsn = curCSN; + + PutChangeQueue(slotId, change); + } +} + +void ParseUMultiInsert(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker) +{ + XLogReaderState *r = buf->record; + Size tuplelen = 0; + RelFileNode rnode = {0, 0, 0, 0}; + CommitSeqNo curCSN = 0; + XlUHeapMultiInsert *xlrec = (XlUHeapMultiInsert *)UGetMultiInsertXlrec(r, &curCSN); + int slotId = worker->slotId; + + XLogRecGetBlockTag(r, 0, &rnode, NULL, NULL); + if (rnode.dbNode != ctx->slot->data.database) { + return; + } + /* output plugin doesn't look for this origin, no need to queue */ + if (ParallelFilterByOrigin(ctx, XLogRecGetOrigin(r))) { + return; + } + char *data = XLogRecGetBlockData(r, 0, &tuplelen); + for (int i = 0; i < xlrec->ntuples; i++) { + ParallelReorderBufferChange *change = ParallelReorderBufferGetChange(ctx->reorder, slotId); + change->action = PARALLEL_REORDER_BUFFER_CHANGE_UINSERT; + change->lsn = ctx->reader->ReadRecPtr; + change->xid = UHeapXlogGetCurrentXid(r, true); + int rc = memcpy_s(&change->data.tp.relnode, sizeof(RelFileNode), &rnode, sizeof(RelFileNode)); + securec_check(rc, "", ""); + + /* + * CONTAINS_NEW_TUPLE will always be set currently as multi_insert + * isn't used for catalogs, but better be future proof. + * + * We decode the tuple in pretty much the same way as DecodeXLogTuple, + * but since the layout is slightly different, we can't use it here. + */ + if (xlrec->flags & XLOG_UHEAP_CONTAINS_NEW_TUPLE) { + XlMultiInsertUTuple *xlhdr = (XlMultiInsertUTuple *)data; + data = ((char *)xlhdr) + SizeOfMultiInsertUTuple; + int len = xlhdr->datalen; + if (len != 0 && AllocSizeIsValid((uint)len)) { + change->data.tp.newtuple = ParallelReorderBufferGetTupleBuf(ctx->reorder, len, worker, false); + ReorderBufferTupleBuf* tuple = change->data.tp.newtuple; + UHeapTupleData utuple = *(UHeapTupleData *)(&tuple->tuple); + UHeapDiskTuple header = utuple.disk_tuple; + + /* not a disk based tuple */ + ItemPointerSetInvalid(&utuple.ctid); + + /* + * We can only figure this out after reassembling the + * transactions. + */ + utuple.table_oid = InvalidOid; + utuple.t_bucketId = InvalidBktId; + utuple.disk_tuple_size = len + SizeOfUHeapDiskTupleData; + + rc = memset_s(header, SizeOfUHeapDiskTupleData, 0, SizeOfUHeapDiskTupleData); + securec_check(rc, "\0", "\0"); + rc = memcpy_s((char *)utuple.disk_tuple + SizeOfUHeapDiskTupleData, len, (char *)data, len); + securec_check(rc, "\0", "\0"); + + header->flag = xlhdr->flag; + header->flag2 = xlhdr->flag2; + header->t_hoff = xlhdr->t_hoff; + } else { + ereport(ERROR, (errmsg("tuplelen is invalid(%d), don't decode it", len))); + return; + } + data += len; + } + + /* + * Reset toast reassembly state only after the last row in the last + * xl_multi_insert_tuple record emitted by one heap_multi_insert() + * call. + */ + if ((i + 1) == xlrec->ntuples) { + change->data.tp.clear_toast_afterwards = true; + } else { + change->data.tp.clear_toast_afterwards = false; + } + change->data.tp.snapshotcsn = curCSN; + PutChangeQueue(slotId, change); + } +} + +/* + * Handle rmgr STANDBY_ID records for DecodeRecordIntoReorderBuffer(). + */ +void ParseStandbyOp(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker) +{ + XLogReaderState *r = buf->record; + uint8 info = XLogRecGetInfo(r) & ~XLR_INFO_MASK; + + switch (info) { + case XLOG_RUNNING_XACTS: { + xl_running_xacts *running = (xl_running_xacts *)buf->record_data; + ParseRunningXactsXlog(ctx, buf->origptr, running, worker); + } break; + case XLOG_STANDBY_CSN: + case XLOG_STANDBY_LOCK: + case XLOG_STANDBY_CSN_ABORTED: + case XLOG_STANDBY_CSN_COMMITTING: + break; + default: + ereport(WARNING, (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unexpected RM_STANDBY_ID record type: %u", info))); + } +} + +/* + * Take every XLogReadRecord()ed record and perform the actions required to + * decode it using the output plugin already setup in the logical decoding + * context. + * + * We also support the ability to fast forward thru records, skipping some + * record types completely - see individual record types for details. + */ +void ParseProcessRecord(ParallelLogicalDecodingContext *ctx, XLogReaderState *record, ParallelDecodeReaderWorker *worker) +{ + XLogRecordBuffer buf = { 0, 0, NULL, NULL }; + + buf.origptr = ctx->reader->ReadRecPtr; + buf.endptr = ctx->reader->EndRecPtr; + buf.record = record; + buf.record_data = GetXlrec(record); + + /* cast so we get a warning when new rmgrs are added */ + switch ((RmgrIds)XLogRecGetRmid(record)) { + /* + * Rmgrs we care about for logical decoding. Add new rmgrs in + * rmgrlist.h's order. + */ + case RM_XLOG_ID: + break; + + case RM_XACT_ID: + ParseXactOp(ctx, &buf, worker); + break; + + case RM_STANDBY_ID: + ParseStandbyOp(ctx, &buf, worker); + break; + + case RM_HEAP2_ID: + ParseHeap2Op(ctx, &buf, worker); + break; + + case RM_HEAP_ID: + ParseHeapOp(ctx, &buf, worker); + break; + case RM_HEAP3_ID: + ParseHeap3Op(ctx, &buf, worker); + break; + case RM_UHEAP_ID: + ParseUheapOp(ctx, &buf, worker); + break; + default: + break; + } +} + +/* + * Handle rmgr XACT_ID records for parsing records into ParallelReorderBuffer. + */ +void ParseXactOp(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker) +{ + XLogReaderState *r = buf->record; + uint8 info = XLogRecGetInfo(r) & ~XLR_INFO_MASK; + /* + * No point in doing anything yet, data could not be decoded anyway. It's + * ok not to call ReorderBufferProcessXid() in that case, except in the + * assignment case there'll not be any later records with the same xid; + * and in the assignment case we'll not decode those xacts. + */ + + switch (info) { + case XLOG_XACT_COMMIT: { + xl_xact_commit *xlrec = NULL; + TransactionId *subxacts = NULL; + SharedInvalidationMessage *invals = NULL; + + xlrec = (xl_xact_commit *)buf->record_data; + + subxacts = (TransactionId *)&(xlrec->xnodes[xlrec->nrels]); + invals = (SharedInvalidationMessage *)&(subxacts[xlrec->nsubxacts]); + + ParseCommitXlog(ctx, buf, XLogRecGetXid(r), xlrec->csn, xlrec->dbId, xlrec->xact_time, xlrec->nsubxacts, + subxacts, xlrec->nmsgs, invals, worker); + + break; + } + case XLOG_XACT_COMMIT_PREPARED: { + /* Prepared commits contain a normal commit record... */ + xl_xact_commit_prepared *prec = (xl_xact_commit_prepared *)buf->record_data; + xl_xact_commit *xlrec = &prec->crec; + + TransactionId *subxacts = (TransactionId *)&(xlrec->xnodes[xlrec->nrels]); + SharedInvalidationMessage *invals = (SharedInvalidationMessage *)&(subxacts[xlrec->nsubxacts]); + + ParseCommitXlog(ctx, buf, prec->xid, xlrec->csn, xlrec->dbId, xlrec->xact_time, xlrec->nsubxacts, subxacts, + xlrec->nmsgs, invals, worker); + break; + } + case XLOG_XACT_COMMIT_COMPACT: { + xl_xact_commit_compact *xlrec = NULL; + + xlrec = (xl_xact_commit_compact *)buf->record_data; + + ParseCommitXlog(ctx, buf, XLogRecGetXid(r), xlrec->csn, InvalidOid, xlrec->xact_time, xlrec->nsubxacts, + xlrec->subxacts, 0, NULL, worker); + break; + } + case XLOG_XACT_ABORT: { + xl_xact_abort *xlrec = NULL; + TransactionId *sub_xids = NULL; + + xlrec = (xl_xact_abort *)buf->record_data; + + sub_xids = (TransactionId *)&(xlrec->xnodes[xlrec->nrels]); + + ParseAbortXlog(ctx, buf->origptr, XLogRecGetXid(r), sub_xids, xlrec->nsubxacts, worker); + break; + } + case XLOG_XACT_ABORT_WITH_XID: { + xl_xact_abort *xlrec = (xl_xact_abort *)buf->record_data; + TransactionId curId = XLogRecGetXid(r); + TransactionId *sub_xids = (TransactionId *)(&(xlrec->xnodes[xlrec->nrels])); + curId = *(TransactionId *)((char*)&(xlrec->xnodes[xlrec->nrels]) + + (unsigned)(xlrec->nsubxacts) * sizeof(TransactionId)); + + ParseAbortXlog(ctx, buf->origptr, curId, sub_xids, xlrec->nsubxacts, worker); + break; + } + case XLOG_XACT_ABORT_PREPARED: { + xl_xact_abort_prepared *prec = NULL; + xl_xact_abort *xlrec = NULL; + TransactionId *sub_xids = NULL; + + /* prepared abort contain a normal commit abort... */ + prec = (xl_xact_abort_prepared *)buf->record_data; + xlrec = &prec->arec; + + sub_xids = (TransactionId *)&(xlrec->xnodes[xlrec->nrels]); + + /* r->xl_xid is committed in a separate record */ + ParseAbortXlog(ctx, buf->origptr, XLogRecGetXid(r), sub_xids, xlrec->nsubxacts, worker); + break; + } + case XLOG_XACT_ASSIGNMENT: + case XLOG_XACT_PREPARE: + break; + default: + ereport(WARNING, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unexpected RM_XACT_ID record type: %u", info))); + } +} + diff --git a/src/gausskernel/storage/replication/logical/logical_queue.cpp b/src/gausskernel/storage/replication/logical/logical_queue.cpp new file mode 100644 index 000000000..37e973792 --- /dev/null +++ b/src/gausskernel/storage/replication/logical/logical_queue.cpp @@ -0,0 +1,155 @@ +/* --------------------------------------------------------------------------------------- + * + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * logical_queue.cpp + * A bounded queue that supports operations that wait for the queue to + * become non-empty when retrieving an element, and wait for space to + * become available in the queue when storing an element. + * + * This structure uses memory barrier in case of disorder. + * + * IDENTIFICATION + * src/gausskernel/storage/replication/logical/logical_queue.cpp + * + * --------------------------------------------------------------------------------------- + */ +#include + +#include "postgres.h" +#include "knl/knl_variable.h" +#include "utils/atomic.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "replication/logical_queue.h" +#include "storage/ipc.h" + +static const int gMaxCnt = 10; + +LogicalQueue *LogicalQueueCreate(uint32 capacity, uint32 slotId, CallBackFunc func) +{ + /* + * We require the capacity to be a power of 2, so index wrap can be + * handled by a bit-wise and. The actual capacity is one less than + * the specified, so the minimum capacity is 2. + */ + Assert(capacity >= QUEUE_CAPACITY_MIN_LIMIT && POWER_OF_TWO(capacity)); + + size_t allocSize = sizeof(LogicalQueue) + sizeof(void *) * capacity; + MemoryContext oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + LogicalQueue *queue = (LogicalQueue *)palloc0(allocSize); + MemoryContextSwitchTo(oldCtx); + + uint32 mask = capacity - 1; + pg_atomic_init_u32(&queue->writeHead, 0); + pg_atomic_init_u32(&queue->readTail, 0); + + queue->mask = mask; + queue->maxUsage = 0; + queue->totalCnt = 0; + queue->capacity = capacity; + queue->callBackFunc = func; + return queue; +} + +bool LogicalQueuePut(LogicalQueue *queue, void *element) +{ + uint32 head = 0; + uint32 tail = 0; + uint64 cnt = 0; + + head = pg_atomic_read_u32(&queue->writeHead); + do { + if (t_thrd.logicalreadworker_cxt.shutdown_requested || + t_thrd.parallel_decode_cxt.shutdown_requested) { + ereport(LOG, (errmsg("Parallel Decode Worker stop"))); + proc_exit(0); + } + tail = pg_atomic_read_u32(&queue->readTail); + cnt++; + + if (cnt >= gMaxCnt) { + pg_usleep(500L); + cnt = 0; + } + } while (SPACE(head, tail, queue->mask) == 0); + + /* + * Make sure the following write to the buffer happens after the read + * of the tail. Combining this with the corresponding barrier in Take() + * which guarantees that the tail is updated after reading the buffer, + * we can be sure that we cannot update a slot's value before it has + * been read. + */ + pg_memory_barrier(); + uint32 tmpCount = COUNT(head, tail, queue->mask); + if (tmpCount > queue->maxUsage) { + pg_atomic_write_u32(&queue->maxUsage, tmpCount); + } + + queue->buffer[head] = element; + + /* Make sure the index is updated after the buffer has been written. */ + pg_write_barrier(); + + pg_atomic_write_u32(&queue->writeHead, (head + 1) & queue->mask); + + return true; +} + +void *LogicalQueueTop(LogicalQueue *queue) +{ + uint32 head = 0; + uint32 tail = 0; + uint32 count = 0; + tail = pg_atomic_read_u32(&queue->readTail); + do { + head = pg_atomic_read_u32(&queue->writeHead); + ++count; + /* here we sleep, let the cpu to do other important work */ + + if (count >= gMaxCnt) { + pg_usleep(1000L); + return NULL; + } + + if (queue->callBackFunc != NULL) { + queue->callBackFunc(); + } + } while (COUNT(head, tail, queue->mask) == 0); + + pg_read_barrier(); + void *elem = queue->buffer[tail]; + return elem; +} + +void LogicalQueuePop(LogicalQueue *queue) +{ + uint32 head; + uint32 tail; + uint64 totalCnt = pg_atomic_read_u64(&queue->totalCnt); + tail = pg_atomic_read_u32(&queue->readTail); + head = pg_atomic_read_u32(&queue->writeHead); + if (COUNT(head, tail, queue->mask) == 0) { + ereport(WARNING, (errmodule(MOD_REDO), errcode(ERRCODE_LOG), errmsg("LogicalQueuePop queue error!"))); + return; + } + + /* Make sure the read of the buffer finishes before updating the tail. */ + pg_memory_barrier(); + pg_atomic_write_u64(&queue->totalCnt, (totalCnt + 1)); + pg_atomic_write_u32(&queue->readTail, (tail + 1) & queue->mask); +} + diff --git a/src/gausskernel/storage/replication/logical/logicalfuncs.cpp b/src/gausskernel/storage/replication/logical/logicalfuncs.cpp index afcf344dc..2f590f05a 100755 --- a/src/gausskernel/storage/replication/logical/logicalfuncs.cpp +++ b/src/gausskernel/storage/replication/logical/logicalfuncs.cpp @@ -55,6 +55,8 @@ typedef struct DecodingOutputState { bool binary_output; int64 returned_rows; } DecodingOutputState; +extern void shutdown_cb_wrapper(LogicalDecodingContext *ctx); + /* * Prepare for a output plugin write. @@ -115,11 +117,28 @@ void check_permissions(bool for_backup) } } +void CheckLogicalPremissions(Oid userId) +{ + if (g_instance.attr.attr_security.enablePrivilegesSeparate && + !superuser_arg(userId) && !has_rolreplication(userId)&& + !is_member_of_role(userId, DEFAULT_ROLE_REPLICATION)) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("must be replication role or a member of the gs_role_replication role " + "to use replication slots when separation of privileges is used")))); + } else if (!g_instance.attr.attr_security.enablePrivilegesSeparate && + !superuser_arg(userId) && !systemDBA_arg(userId) && !has_rolreplication(userId) && + !is_member_of_role(userId, DEFAULT_ROLE_REPLICATION)) { + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("must be system admin or replication role or a member of the gs_role_replication role " + "to use replication slots")))); + } +} + /* * This is duplicate code with pg_xlogdump, similar to walsender.c, but * we currently don't have the infrastructure (elog!) to share it. */ -static void XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count) +static void XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count, char* xlog_path) { char *p = NULL; XLogRecPtr recptr = InvalidXLogRecPtr; @@ -138,6 +157,8 @@ static void XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count) if (t_thrd.logical_cxt.sendFd < 0 || !(((recptr) / XLogSegSize) == t_thrd.logical_cxt.sendSegNo)) { char path[MAXPGPATH]; + errno_t rc = memset_s(path, MAXPGPATH, 0, sizeof(path)); + securec_check(rc, "", ""); /* Switch to another logfile segment */ if (t_thrd.logical_cxt.sendFd >= 0) { @@ -145,11 +166,17 @@ static void XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count) } t_thrd.logical_cxt.sendSegNo = (recptr) / XLogSegSize; - int nRet = 0; - nRet = snprintf_s(path, MAXPGPATH, MAXPGPATH - 1, XLOGDIR "/%08X%08X%08X", tli, - (uint32)((t_thrd.logical_cxt.sendSegNo) / XLogSegmentsPerXLogId), - (uint32)((t_thrd.logical_cxt.sendSegNo) % XLogSegmentsPerXLogId)); - securec_check_ss(nRet, "", ""); + int ret = 0; + if (xlog_path == NULL) { + ret = snprintf_s(path, MAXPGPATH, MAXPGPATH - 1, XLOGDIR "/%08X%08X%08X", tli, + (uint32)((t_thrd.logical_cxt.sendSegNo) / XLogSegmentsPerXLogId), + (uint32)((t_thrd.logical_cxt.sendSegNo) % XLogSegmentsPerXLogId)); + } else { + ret = snprintf_s(path, MAXPGPATH, MAXPGPATH - 1, "%s/%08X%08X%08X", xlog_path, tli, + (uint32)((t_thrd.logical_cxt.sendSegNo) / XLogSegmentsPerXLogId), + (uint32)((t_thrd.logical_cxt.sendSegNo) % XLogSegmentsPerXLogId)); + } + securec_check_ss(ret, "", ""); t_thrd.logical_cxt.sendFd = BasicOpenFile(path, O_RDONLY | PG_BINARY, 0); @@ -168,9 +195,16 @@ static void XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count) if (lseek(t_thrd.logical_cxt.sendFd, (off_t)startoff, SEEK_SET) < 0) { char path[MAXPGPATH] = "\0"; - int nRet = snprintf_s(path, MAXPGPATH, MAXPGPATH - 1, XLOGDIR "/%08X%08X%08X", tli, - (uint32)((t_thrd.logical_cxt.sendSegNo) / XLogSegmentsPerXLogId), - (uint32)((t_thrd.logical_cxt.sendSegNo) % XLogSegmentsPerXLogId)); + int nRet = 0; + if (xlog_path == NULL) { + nRet = snprintf_s(path, MAXPGPATH, MAXPGPATH - 1, XLOGDIR "/%08X%08X%08X", tli, + (uint32)((t_thrd.logical_cxt.sendSegNo) / XLogSegmentsPerXLogId), + (uint32)((t_thrd.logical_cxt.sendSegNo) % XLogSegmentsPerXLogId)); + } else { + nRet = snprintf_s(path, MAXPGPATH, MAXPGPATH - 1, "%s/%08X%08X%08X", xlog_path, tli, + (uint32)((t_thrd.logical_cxt.sendSegNo) / XLogSegmentsPerXLogId), + (uint32)((t_thrd.logical_cxt.sendSegNo) % XLogSegmentsPerXLogId)); + } securec_check_ss(nRet, "", ""); ereport(ERROR, (errcode_for_file_access(), @@ -188,12 +222,18 @@ static void XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count) readbytes = read(t_thrd.logical_cxt.sendFd, p, segbytes); if (readbytes <= 0) { + int rc = 0; char path[MAXPGPATH] = "\0"; - - int nRet = snprintf_s(path, MAXPGPATH, MAXPGPATH - 1, XLOGDIR "/%08X%08X%08X", tli, - (uint32)((t_thrd.logical_cxt.sendSegNo) / XLogSegmentsPerXLogId), - (uint32)((t_thrd.logical_cxt.sendSegNo) % XLogSegmentsPerXLogId)); - securec_check_ss(nRet, "", ""); + if (xlog_path == NULL) { + rc = snprintf_s(path, MAXPGPATH, MAXPGPATH - 1, XLOGDIR "/%08X%08X%08X", tli, + (uint32)((t_thrd.logical_cxt.sendSegNo) / XLogSegmentsPerXLogId), + (uint32)((t_thrd.logical_cxt.sendSegNo) % XLogSegmentsPerXLogId)); + } else { + rc = snprintf_s(path, MAXPGPATH, MAXPGPATH - 1, "%s/%08X%08X%08X", xlog_path, tli, + (uint32)((t_thrd.logical_cxt.sendSegNo) / XLogSegmentsPerXLogId), + (uint32)((t_thrd.logical_cxt.sendSegNo) % XLogSegmentsPerXLogId)); + } + securec_check_ss(rc, "", ""); ereport(ERROR, (errcode_for_file_access(), errmsg("could not read from log segment %s, offset %u, length %lu: %m", @@ -221,7 +261,7 @@ static void XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count) * loop for now. */ int logical_read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, - char *cur_page, TimeLineID *pageTLI) + char *cur_page, TimeLineID *pageTLI, char* xlog_path) { XLogRecPtr flushptr, loc; int count; @@ -259,7 +299,7 @@ int logical_read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePt else count = (flushptr) - (targetPagePtr); - XLogRead(cur_page, *pageTLI, targetPagePtr, XLOG_BLCKSZ); + XLogRead(cur_page, *pageTLI, targetPagePtr, XLOG_BLCKSZ, xlog_path); return count; } @@ -315,8 +355,10 @@ static Datum pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool conf int rc = 0; char path[MAXPGPATH]; struct stat st; + t_thrd.logical_cxt.IsAreaDecode = false; - check_permissions(); + Oid userId = GetUserId(); + CheckLogicalPremissions(userId); ValidateName(NameStr(*name)); if (RecoveryInProgress() && confirm) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("couldn't advance in recovery"))); @@ -503,6 +545,306 @@ static Datum pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool conf return (Datum)0; } +static XLogRecPtr getStartLsn(FunctionCallInfo fcinfo) +{ + XLogRecPtr start_lsn = InvalidXLogRecPtr; + if (PG_ARGISNULL(0)) + start_lsn = InvalidXLogRecPtr; + else { + const char *str_start_lsn = TextDatumGetCString(PG_GETARG_DATUM(0)); + ValidateName(str_start_lsn); + if (!AssignLsn(&start_lsn, str_start_lsn)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type lsn: \"%s\" " + "of start_lsn", + str_start_lsn))); + } + } + return start_lsn; +} + +static XLogRecPtr getUpToLsn(FunctionCallInfo fcinfo) +{ + XLogRecPtr upto_lsn = InvalidXLogRecPtr; + if (PG_ARGISNULL(1)) + upto_lsn = InvalidXLogRecPtr; + else { + const char *str_upto_lsn = TextDatumGetCString(PG_GETARG_DATUM(1)); + ValidateName(str_upto_lsn); + if (!AssignLsn(&upto_lsn, str_upto_lsn)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type lsn: \"%s\" " + "of upto_lsn", + str_upto_lsn))); + } + } + return upto_lsn; +} + +static int64 getUpToNChanges(FunctionCallInfo fcinfo) +{ + if (PG_ARGISNULL(2)) + return 0; + else { + return PG_GETARG_INT32(2); + } +} + +static void CheckSupportTupleStore(FunctionCallInfo fcinfo) +{ + ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + if (!(rsinfo->allowedModes & SFRM_Materialize)) + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); +} + +char* getXlogDirUpdateLsn(FunctionCallInfo fcinfo, XLogRecPtr *start_lsn, XLogRecPtr *upto_lsn) { + char* xlog_path = TextDatumGetCString(PG_GETARG_DATUM(4)); + char* xlog_name = basename(xlog_path); + char* xlog_dir = dirname(xlog_path); + uint32 log; + uint32 seg; + uint32 tli; + errno_t ret = sscanf_s(xlog_name, "%08X%08X%08X", &tli, &log, &seg); + char *str_lsn = NULL; + str_lsn = (char*)palloc(MAXPGPATH); + ret = memset_s(str_lsn, MAXPGPATH, '\0', MAXPGPATH); + securec_check(ret, "", ""); + sprintf_s(str_lsn, MAXPGPATH, "%X/%X000000", log, seg); + ValidateName(str_lsn); + if (!AssignLsn(start_lsn, str_lsn)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid lsn: \"%s\" " + "of start_lsn", + str_lsn))); + } + ret = memset_s(str_lsn, MAXPGPATH, '\0', MAXPGPATH); + securec_check(ret, "", ""); + sprintf_s(str_lsn, MAXPGPATH, "%X/%XFFFFFF", log, seg); + ValidateName(str_lsn); + if (!AssignLsn(upto_lsn, str_lsn)) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid lsn: \"%s\" " + "of upto_lsn", + str_lsn))); + } + return xlog_dir; +} + +void CheckAreaDecodeOption(FunctionCallInfo fcinfo) +{ + ArrayType *arr = PG_GETARG_ARRAYTYPE_P(5); + Size ndim = (Size)(uint)(ARR_NDIM(arr)); + + if (ndim > 1) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("array must be one-dimensional"))); + } else if (array_contains_nulls(arr)) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("array must not contain nulls"))); + } else if (ndim == 1 && ARR_ELEMTYPE(arr) != TEXTOID) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("array must be TEXTOID"))); + } + +} + +/* + * Decode and ouput changes from start lsn to upto lsn. + */ +static Datum pg_logical_get_area_changes_guts(FunctionCallInfo fcinfo) +{ + XLogRecPtr start_lsn = InvalidXLogRecPtr; + XLogRecPtr upto_lsn = InvalidXLogRecPtr; + int64 upto_nchanges = 0; + ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; + MemoryContext per_query_ctx = NULL; + MemoryContext oldcontext = NULL; + XLogRecPtr startptr = InvalidXLogRecPtr; + XLogRecPtr end_of_wal = InvalidXLogRecPtr; + LogicalDecodingContext *ctx = NULL; + ResourceOwner old_resowner = t_thrd.utils_cxt.CurrentResourceOwner; + ArrayType *arr = NULL; + Size ndim = 0; + List *options = NIL; + DecodingOutputState *p = NULL; + char* xlog_dir = NULL; + t_thrd.logical_cxt.IsAreaDecode = true; + + Oid userId = GetUserId(); + CheckLogicalPremissions(userId); + if (PG_ARGISNULL(0) && PG_ARGISNULL(4)) { + ereport(ERROR, (errcode(ERRCODE_LOGICAL_DECODE_ERROR), + errmsg("start_lsn and xlog_dir cannot be null at the same time."))); + } + + /* arg1 get start lsn start_lsn */ + start_lsn = getStartLsn(fcinfo); + /* arg2 get end lsn upto_lsn */ + upto_lsn = getUpToLsn(fcinfo); + /* arg3 how many statements to output upto_nchanges */ + upto_nchanges = getUpToNChanges(fcinfo); + /* arg4 output format plugin */ + Name plugin = PG_GETARG_NAME(3); + ValidateName(NameStr(*plugin)); + + /* check to see if caller supports us returning a tuplestore */ + CheckSupportTupleStore(fcinfo); + + /* state to write output to */ + p = (DecodingOutputState *)palloc0(sizeof(DecodingOutputState)); + + p->binary_output = false; + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &p->tupdesc) != TYPEFUNC_COMPOSITE) + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("return type must be a row type"))); + /* + * arg5 which xlog file to decode. If it is NULL, decode the origin xlog. + * If xlog_dir is not null, get the xlog dir and update the lsn. + */ + if (PG_ARGISNULL(4)) { + xlog_dir = NULL; + } else { + xlog_dir = getXlogDirUpdateLsn(fcinfo, &start_lsn, &upto_lsn); + } + /* The memory is controlled. The number of decoded files cannot exceed 10. */ + XLogRecPtr max_lsn_distance = XLOG_SEG_SIZE * 10; + if (upto_lsn < start_lsn) { + ereport(ERROR, (errcode(ERRCODE_LOGICAL_DECODE_ERROR), errmsg("upto_lsn can not be smaller than start_lsn."))); + } + if (upto_lsn == InvalidXLogRecPtr || ((upto_lsn - start_lsn) > max_lsn_distance)) { + upto_lsn = start_lsn + max_lsn_distance; + } + + arr = PG_GETARG_ARRAYTYPE_P(5); + ndim = (Size)(uint)(ARR_NDIM(arr)); + + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + + CheckAreaDecodeOption(fcinfo); + + if (ndim == 1) { + int i = 0; + Datum *datum_opts = NULL; + int nelems = 0; + + deconstruct_array(arr, TEXTOID, -1, false, 'i', &datum_opts, NULL, &nelems); + + if (nelems % 2 != 0) + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("array must have even number of elements"))); + + for (i = 0; i < nelems; i = i + 2) { + char *dname = TextDatumGetCString(datum_opts[i]); + char *opt = TextDatumGetCString(datum_opts[i + 1]); + ValidateName(dname); + options = lappend(options, makeDefElem(dname, (Node *)makeString(opt))); + } + } + + p->tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); + rsinfo->setDesc = p->tupdesc; + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = p->tupstore; + + /* compute the current end-of-wal */ + if (!RecoveryInProgress()) + end_of_wal = GetFlushRecPtr(); + else + end_of_wal = GetXLogReplayRecPtr(NULL); + + CheckLogicalDecodingRequirements(u_sess->proc_cxt.MyDatabaseId); + + PG_TRY(); + { + ctx = CreateDecodingContextForArea(InvalidXLogRecPtr, NameStr(*plugin), options, false, logical_read_local_xlog_page, + LogicalOutputPrepareWrite, LogicalOutputWrite); + + (void)MemoryContextSwitchTo(oldcontext); + + /* + * Check whether the output pluggin writes textual output if that's + * what we need. + */ + if (ctx->options.output_type != OUTPUT_PLUGIN_TEXTUAL_OUTPUT) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("output plugin cannot produce binary output"))); + + ctx->output_writer_private = p; + + /* find a valid recptr to start from */ + startptr = XLogFindNextRecord(ctx->reader, start_lsn, NULL, xlog_dir); + + t_thrd.utils_cxt.CurrentResourceOwner = ResourceOwnerCreate(t_thrd.utils_cxt.CurrentResourceOwner, + "logical decoding", THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE)); + + /* invalidate non-timetravel entries */ + InvalidateSystemCaches(); + + + while ((!XLByteEQ(startptr, InvalidXLogRecPtr) && XLByteLT(startptr, end_of_wal)) || + (!XLByteEQ(ctx->reader->EndRecPtr, InvalidXLogRecPtr) && XLByteLT(ctx->reader->EndRecPtr, end_of_wal))) { + XLogRecord *record = NULL; + char *errm = NULL; + + record = XLogReadRecord(ctx->reader, startptr, &errm, true, xlog_dir); + if (errm != NULL) { + ereport(WARNING, (errcode(ERRCODE_LOGICAL_DECODE_ERROR), + errmsg("Stopped to parse any valid XLog Record at %X/%X: %s.", + (uint32)(ctx->reader->EndRecPtr >> 32), (uint32)ctx->reader->EndRecPtr, errm))); + break; + } + + startptr = InvalidXLogRecPtr; + + /* + * The {begin_txn,change,commit_txn}_wrapper callbacks above will + * store the description into our tuplestore. + */ + if (record != NULL) + AreaLogicalDecodingProcessRecord(ctx, ctx->reader); + + /* check limits */ + if (!XLByteEQ(upto_lsn, InvalidXLogRecPtr) && XLByteLE(upto_lsn, ctx->reader->EndRecPtr)) + break; + if (upto_nchanges != 0 && upto_nchanges <= p->returned_rows) + break; + CHECK_FOR_INTERRUPTS(); + } + } + PG_CATCH(); + { + /* clear all timetravel entries */ + InvalidateSystemCaches(); + + PG_RE_THROW(); + } + PG_END_TRY(); + tuplestore_donestoring(tupstore); + t_thrd.utils_cxt.CurrentResourceOwner = old_resowner; + + if (t_thrd.logical_cxt.sendFd >= 0) { + (void)close(t_thrd.logical_cxt.sendFd); + t_thrd.logical_cxt.sendFd = -1; + } + + /* free context, call shutdown callback */ + if (ctx->callbacks.shutdown_cb != NULL) + shutdown_cb_wrapper(ctx); + + ReorderBufferFree(ctx->reorder); + XLogReaderFree(ctx->reader); + MemoryContextDelete(ctx->context); + InvalidateSystemCaches(); + + return (Datum)0; + +} + /* * SQL function returning the changestream as text, consuming the data. */ @@ -539,11 +881,25 @@ Datum pg_logical_slot_peek_binary_changes(PG_FUNCTION_ARGS) return ret; } +Datum pg_logical_get_area_changes(PG_FUNCTION_ARGS) +{ + Datum ret = pg_logical_get_area_changes_guts(fcinfo); + return ret; +} + + Datum gs_write_term_log(PG_FUNCTION_ARGS) { if (RecoveryInProgress()) { PG_RETURN_BOOL(false); } + + /* we are about to start streaming switch over, stop any xlog insert. */ + if (t_thrd.xlog_cxt.LocalXLogInsertAllowed == 0 && g_instance.streaming_dr_cxt.isInSwitchover == true) { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot write term log during streaming disaster recovery"))); + } + uint32 term_cur = Max(g_instance.comm_cxt.localinfo_cxt.term_from_file, g_instance.comm_cxt.localinfo_cxt.term_from_xlog); write_term_log(term_cur); diff --git a/src/gausskernel/storage/replication/logical/origin.cpp b/src/gausskernel/storage/replication/logical/origin.cpp index 36da812b1..15fa6cd43 100644 --- a/src/gausskernel/storage/replication/logical/origin.cpp +++ b/src/gausskernel/storage/replication/logical/origin.cpp @@ -115,7 +115,7 @@ typedef struct ReplicationStateOnDisk { static void replorigin_check_prerequisites(bool check_slots, bool recoveryOK) { -#ifdef ENABLE_MULTIPLE_NODES +#if defined(ENABLE_MULTIPLE_NODES) || defined(ENABLE_LITE_MODE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("openGauss does not support replication origin yet"), @@ -672,7 +672,21 @@ void StartupReplicationOrigin(void) /* copy data to shared memory */ u_sess->reporigin_cxt.repStatesShm->states[last_state].roident = disk_state.roident; - u_sess->reporigin_cxt.repStatesShm->states[last_state].remote_lsn = disk_state.remote_lsn; + if (u_sess->reporigin_cxt.repStatesShm->states[last_state].remote_lsn > disk_state.remote_lsn) { + /* + * This may happen in standby when doing switchover. before calling startup, standby has + * already redo a xact_commit, which set a lastest remote_lsn to shared memory(check + * xact_redo_commit_internal -> replorigin_advance). In this case, we can just ignore the + * value in disk. + */ + XLogRecPtr currentLsn = u_sess->reporigin_cxt.repStatesShm->states[last_state].remote_lsn; + ereport(LOG, (errmsg("try to recover a older replication state from disk, ignore it. " + "current remote_lsn: %X/%X, remote_lsn in disk: %X/%X", + (uint32)(currentLsn >> BITS_PER_INT), (uint32)currentLsn, + (uint32)(disk_state.remote_lsn >> BITS_PER_INT), (uint32)disk_state.remote_lsn))); + } else { + u_sess->reporigin_cxt.repStatesShm->states[last_state].remote_lsn = disk_state.remote_lsn; + } last_state++; elog(LOG, "recovered replication state of node %u to %X/%X", disk_state.roident, @@ -728,6 +742,17 @@ void replorigin_redo(XLogReaderState *record) } } +const char* replorigin_type_name(uint8 subtype) +{ + uint8 info = subtype & ~XLR_INFO_MASK; + if (info == XLOG_REPLORIGIN_SET) { + return "set_replication_origin"; + } else if (info == XLOG_REPLORIGIN_DROP) { + return "drop_replication_origin"; + } else { + return "unkown_type"; + } +} /* * Tell the replication origin progress machinery that a commit from 'node' @@ -1242,7 +1267,9 @@ Datum pg_replication_origin_advance(PG_FUNCTION_ARGS) /* lock to prevent the replication origin from vanishing */ LockRelationOid(ReplicationOriginRelationId, RowExclusiveLock); - node = replorigin_by_name(text_to_cstring(name), false); + char *originName = text_to_cstring(name); + node = replorigin_by_name(originName, false); + pfree(originName); /* * Can't sensibly pass a local commit to be flushed at checkpoint - this @@ -1279,6 +1306,7 @@ Datum pg_replication_origin_progress(PG_FUNCTION_ARGS) roident = replorigin_by_name(name, false); Assert(OidIsValid(roident)); + pfree(name); remote_lsn = replorigin_get_progress(roident, flush); if (remote_lsn == InvalidXLogRecPtr) PG_RETURN_NULL(); diff --git a/src/gausskernel/storage/replication/logical/parallel_decode.cpp b/src/gausskernel/storage/replication/logical/parallel_decode.cpp new file mode 100644 index 000000000..b6e280131 --- /dev/null +++ b/src/gausskernel/storage/replication/logical/parallel_decode.cpp @@ -0,0 +1,940 @@ +/* --------------------------------------------------------------------------------------- + * + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * parallel_decode.cpp + * This module decodes WAL records read using xlogreader.h's APIs for the + * purpose of parallel logical decoding by passing logical log information to the + * parallel_reorderbuffer. + * It mainly involves the logic of reading data by reader thread and parsing + * logic log by decoder thread. + * + * IDENTIFICATION + * src/gausskernel/storage/replication/logical/parallel_decode.cpp + * + * --------------------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "access/heapam.h" +#include "access/transam.h" +#include "access/xact.h" +#include "access/xlog_internal.h" +#include "access/xlogreader.h" + +#include "catalog/gs_matview.h" +#include "catalog/pg_control.h" + +#include "libpq/pqformat.h" + +#include "storage/standby.h" +#include "utils/lsyscache.h" +#include "utils/builtins.h" + +#include "utils/acl.h" +#include "utils/memutils.h" +#include "utils/relfilenodemap.h" +#include "utils/atomic.h" +#include "cjson/cJSON.h" + +#include "replication/decode.h" +#include "replication/logical.h" +#include "replication/parallel_decode.h" +#include "replication/parallel_reorderbuffer.h" +#include "replication/reorderbuffer.h" +#include "replication/snapbuild.h" + +/* RMGR Handlers */ +ParallelReorderBufferTXN *ParallelReorderBufferGetOldestTXN(ParallelReorderBuffer *rb) +{ + ParallelReorderBufferTXN *txn = NULL; + + if (dlist_is_empty(&rb->toplevel_by_lsn)) + return NULL; + + txn = dlist_head_element(ParallelReorderBufferTXN, node, &rb->toplevel_by_lsn); + + Assert(!txn->is_known_as_subxact); + Assert(!XLByteEQ(txn->first_lsn, InvalidXLogRecPtr)); + return txn; +} + +void tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls) +{ + if (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data)) + return; + + Oid oid = 0; + + /* print oid of tuple, it's not included in the TupleDesc */ + if ((oid = HeapTupleHeaderGetOid(tuple->t_data)) != InvalidOid) { + appendStringInfo(s, " oid[oid]:%u", oid); + } + + /* print all columns individually */ + for (int natt = 0; natt < tupdesc->natts; natt++) { + Form_pg_attribute attr; /* the attribute itself */ + Oid typid; /* type of current attribute */ + Oid typoutput; /* output function */ + bool typisvarlena = false; + Datum origval; /* possibly toasted Datum */ + bool isnull = true; /* column is null? */ + + attr = tupdesc->attrs[natt]; + + if (attr->attisdropped || attr->attnum < 0) + continue; + + typid = attr->atttypid; + + /* get Datum from tuple */ + if (tuple->tupTableType == HEAP_TUPLE) { + origval = heap_getattr(tuple, natt + 1, tupdesc, &isnull); + } else { + origval = uheap_getattr((UHeapTuple)tuple, natt + 1, tupdesc, &isnull); + } + + if (isnull && skip_nulls) { + continue; + } + + /* print attribute name */ + appendStringInfoChar(s, ' '); + appendStringInfoString(s, quote_identifier(NameStr(attr->attname))); + + /* print attribute type */ + appendStringInfoChar(s, '['); + char* type_name = format_type_be(typid); + if (strlen(type_name) == strlen("clob") && strncmp(type_name, "clob", strlen("clob")) == 0) { + errno_t rc = strcpy_s(type_name, sizeof("text"), "text"); + securec_check_c(rc, "\0", "\0"); + } + appendStringInfoString(s, type_name); + appendStringInfoChar(s, ']'); + + /* query output function */ + getTypeOutputInfo(typid, &typoutput, &typisvarlena); + + /* print separator */ + appendStringInfoChar(s, ':'); + + /* print data */ + if (isnull) { + appendStringInfoString(s, "null"); + } else if (typisvarlena && VARATT_IS_EXTERNAL_ONDISK_B(origval)) { + appendStringInfoString(s, "unchanged-toast-datum"); + } else if (!typisvarlena) { + PrintLiteral(s, typid, OidOutputFunctionCall(typoutput, origval)); + } else { + Datum val = PointerGetDatum(PG_DETOAST_DATUM(origval)); + PrintLiteral(s, typid, OidOutputFunctionCall(typoutput, val)); + } + } +} + +/* + * Print literal `outputstr' already represented as string of type `typid' + * into stringbuf `s'. + * + * Some builtin types aren't quoted, the rest is quoted. Escaping is done as + * if u_sess->parser_cxt.standard_conforming_strings were enabled. + */ +void PrintLiteral(StringInfo s, Oid typid, char* outputstr) +{ + const char* valptr = NULL; + + switch (typid) { + case INT1OID: + case INT2OID: + case INT4OID: + case INT8OID: + case OIDOID: + case FLOAT4OID: + case FLOAT8OID: + case NUMERICOID: + /* NB: We don't care about Inf, NaN et al. */ + appendStringInfoString(s, outputstr); + break; + + case BITOID: + case VARBITOID: + appendStringInfo(s, "B'%s'", outputstr); + break; + + case BOOLOID: + if (strcmp(outputstr, "t") == 0) + appendStringInfoString(s, "true"); + else + appendStringInfoString(s, "false"); + break; + + default: + appendStringInfoChar(s, '\''); + for (valptr = outputstr; *valptr; valptr++) { + char ch = *valptr; + + if (SQL_STR_DOUBLE(ch, false)) + appendStringInfoChar(s, ch); + appendStringInfoChar(s, ch); + } + appendStringInfoChar(s, '\''); + break; + } +} + +/* parallel decoding filter results by white list */ +static bool FilterWhiteList(const char *schema, const char *table, int slotId, MemoryContext old, MemoryContext ctx) +{ + if (g_Logicaldispatcher[slotId].pOptions.tableWhiteList != NIL && + !CheckWhiteList(g_Logicaldispatcher[slotId].pOptions.tableWhiteList, schema, table)) { + (void)MemoryContextSwitchTo(old); + MemoryContextReset(ctx); + return true; + } + return false; +} + +/* parallel logical decoding callback with decode style: text */ +void parallel_decode_change_to_text(Relation relation, ParallelReorderBufferChange* change, logicalLog *logChange, + ParallelLogicalDecodingContext* ctx, int slotId) +{ + Form_pg_class class_form; + TupleDesc tupdesc; + logChange->type = LOGICAL_LOG_DML; + logChange->lsn = change->lsn; + logChange->xid = change->xid; + MemoryContext old; + ParallelDecodingData *data = (ParallelDecodingData *)ctx->output_plugin_private; + old = MemoryContextSwitchTo(data->context); + + class_form = RelationGetForm(relation); + tupdesc = RelationGetDescr(relation); + + char *schema = get_namespace_name(class_form->relnamespace); + char *table = NameStr(class_form->relname); + if (FilterWhiteList(schema, table, slotId, old, data->context)) { + return; + } + + int curPos = logChange->out->len; + uint32 changeLen = 0; + if (g_Logicaldispatcher[slotId].pOptions.sending_batch > 0) { + pq_sendint32(logChange->out, changeLen); + pq_sendint64(logChange->out, change->lsn); + } + + appendStringInfo(logChange->out, "table %s %s", schema, table); + + switch (change->action) { + case PARALLEL_REORDER_BUFFER_CHANGE_INSERT: + case PARALLEL_REORDER_BUFFER_CHANGE_UINSERT: + appendStringInfoString(logChange->out, " INSERT:"); + if (change->data.tp.newtuple == NULL) + appendStringInfoString(logChange->out, " (no-tuple-data)"); + else + tuple_to_stringinfo(logChange->out, tupdesc, &change->data.tp.newtuple->tuple, false); + break; + + case PARALLEL_REORDER_BUFFER_CHANGE_UPDATE: + case PARALLEL_REORDER_BUFFER_CHANGE_UUPDATE: + appendStringInfoString(logChange->out, " UPDATE:"); + if (change->data.tp.oldtuple != NULL) { + appendStringInfoString(logChange->out, " old-key:"); + tuple_to_stringinfo(logChange->out, tupdesc, &change->data.tp.oldtuple->tuple, true); + appendStringInfoString(logChange->out, " new-tuple:"); + } + + if (change->data.tp.newtuple == NULL) + appendStringInfoString(logChange->out, " (no-tuple-data)"); + else + tuple_to_stringinfo(logChange->out, tupdesc, &change->data.tp.newtuple->tuple, false); + break; + + case PARALLEL_REORDER_BUFFER_CHANGE_DELETE: + case PARALLEL_REORDER_BUFFER_CHANGE_UDELETE: + appendStringInfoString(logChange->out, " DELETE:"); + + /* if there was no PK, we only know that a delete happened */ + if (change->data.tp.oldtuple == NULL) + appendStringInfoString(logChange->out, " (no-tuple-data)"); + /* In DELETE, only the replica identity is present; display that */ + else + tuple_to_stringinfo(logChange->out, tupdesc, &change->data.tp.oldtuple->tuple, true); + break; + + default: + break; + } + + if (g_Logicaldispatcher[slotId].pOptions.sending_batch > 0) { + changeLen = htonl((uint32)(logChange->out->len - curPos) - (uint32)sizeof(uint32)); + errno_t rc = memcpy_s(logChange->out->data + curPos, sizeof(uint32), &changeLen, sizeof(uint32)); + securec_check(rc, "", ""); + } + MemoryContextSwitchTo(old); + MemoryContextReset(data->context); +} + +static void TupleToJsoninfo( + cJSON* cols_name, cJSON* cols_type, cJSON* cols_val, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls) +{ + if ((tuple->tupTableType == HEAP_TUPLE) && (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data) || + (int)HeapTupleHeaderGetNatts(tuple->t_data, tupdesc) > tupdesc->natts)) { + return; + } + + /* print all columns individually */ + for (int natt = 0; natt < tupdesc->natts; natt++) { + Form_pg_attribute attr = tupdesc->attrs[natt]; /* the attribute itself */ + if (attr->attisdropped || attr->attnum < 0) { + continue; + } + + Oid typid = attr->atttypid; /* type of current attribute */ + Datum origval = 0; /* possibly toasted Datum */ + + /* get Datum from tuple */ + bool isnull = false; /* column is null? */ + if (tuple->tupTableType == HEAP_TUPLE) { + origval = heap_getattr(tuple, natt + 1, tupdesc, &isnull); + } else { + origval = uheap_getattr((UHeapTuple)tuple, natt + 1, tupdesc, &isnull); + } + if (isnull && skip_nulls) { + continue; + } + + /* print attribute name */ + + cJSON* colName = cJSON_CreateString(quote_identifier(NameStr(attr->attname))); + cJSON_AddItemToArray(cols_name, colName); + + /* print attribute type */ + if (cols_type != NULL) { + char* typeName = format_type_be(typid); + if (strlen(typeName) == strlen("clob") && strncmp(typeName, "clob", strlen("clob")) == 0) { + errno_t rc = strcpy_s(typeName, sizeof("clob"), "text"); + securec_check_c(rc, "\0", "\0"); + } + cJSON* colType = cJSON_CreateString(typeName); + cJSON_AddItemToArray(cols_type, colType); + } + + /* query output function */ + Oid typoutput = 0; + bool typisvarlena = false; + getTypeOutputInfo(typid, &typoutput, &typisvarlena); + + /* print separator */ + StringInfo val_str = makeStringInfo(); + /* print data */ + if (isnull) { + appendStringInfoString(val_str, "null"); + } else if (!typisvarlena) { + PrintLiteral(val_str, typid, OidOutputFunctionCall(typoutput, origval)); + } else { + Datum val = PointerGetDatum(PG_DETOAST_DATUM(origval)); + PrintLiteral(val_str, typid, OidOutputFunctionCall(typoutput, val)); + } + cJSON* col_val = cJSON_CreateString(val_str->data); + cJSON_AddItemToArray(cols_val, col_val); + } +} + +/* parallel logical decoding callback with decode style: json */ +void parallel_decode_change_to_json(Relation relation, ParallelReorderBufferChange* change, logicalLog *logChange, + ParallelLogicalDecodingContext* ctx, int slotId) +{ + Form_pg_class class_form = NULL; + TupleDesc tupdesc = NULL; + MemoryContext old; + char* res = NULL; + + logChange->type = LOGICAL_LOG_DML; + logChange->lsn = change->lsn; + logChange->xid = change->xid; + + ParallelDecodingData *data = (ParallelDecodingData*)ctx->output_plugin_private; + + data->pOptions.xact_wrote_changes = true; + + class_form = RelationGetForm(relation); + tupdesc = RelationGetDescr(relation); + + /* Avoid leaking memory by using and resetting our own context */ + old = MemoryContextSwitchTo(data->context); + + char *schema = get_namespace_name(class_form->relnamespace); + char *table = NameStr(class_form->relname); + if (FilterWhiteList(schema, table, slotId, old, data->context)) { + return; + } + + uint32 changeLen = 0; + int curPos = logChange->out->len; + if (g_Logicaldispatcher[slotId].pOptions.sending_batch > 0) { + pq_sendint32(logChange->out, changeLen); + pq_sendint64(logChange->out, change->lsn); + } + + cJSON* root = cJSON_CreateObject(); + cJSON* tableName = NULL; + cJSON* opType = NULL; + cJSON* columnsVal = NULL; + cJSON* columnsName = NULL; + cJSON* columnsType = NULL; + cJSON* oldKeysName = NULL; + cJSON* oldKeysVal = NULL; + cJSON* oldKeysType = NULL; + tableName = cJSON_CreateString(quote_qualified_identifier(schema, table)); + cJSON_AddItemToObject(root, "table_name", tableName); + + columnsVal = cJSON_CreateArray(); + columnsName = cJSON_CreateArray(); + columnsType = cJSON_CreateArray(); + oldKeysName = cJSON_CreateArray(); + oldKeysVal = cJSON_CreateArray(); + oldKeysType = cJSON_CreateArray(); + + switch (change->action) { + case PARALLEL_REORDER_BUFFER_CHANGE_INSERT: + case PARALLEL_REORDER_BUFFER_CHANGE_UINSERT: + opType = cJSON_CreateString("INSERT"); + if (change->data.tp.newtuple != NULL) { + TupleToJsoninfo(columnsName, columnsType, columnsVal, tupdesc, &change->data.tp.newtuple->tuple, false); + } + break; + case PARALLEL_REORDER_BUFFER_CHANGE_UPDATE: + case PARALLEL_REORDER_BUFFER_CHANGE_UUPDATE: + opType = cJSON_CreateString("UPDATE"); + if (change->data.tp.oldtuple != NULL) { + TupleToJsoninfo(oldKeysName, oldKeysType, oldKeysVal, tupdesc, &change->data.tp.oldtuple->tuple, true); + } + + if (change->data.tp.newtuple != NULL) { + TupleToJsoninfo(columnsName, columnsType, columnsVal, tupdesc, &change->data.tp.newtuple->tuple, false); + } + break; + case PARALLEL_REORDER_BUFFER_CHANGE_DELETE: + case PARALLEL_REORDER_BUFFER_CHANGE_UDELETE: + opType = cJSON_CreateString("DELETE"); + if (change->data.tp.oldtuple != NULL) { + TupleToJsoninfo(oldKeysName, oldKeysType, oldKeysVal, tupdesc, &change->data.tp.oldtuple->tuple, true); + } + /* if there was no PK, we only know that a delete happened */ + break; + + default: + Assert(false); + } + + cJSON_AddItemToObject(root, "op_type", opType); + cJSON_AddItemToObject(root, "columns_name", columnsName); + cJSON_AddItemToObject(root, "columns_type", columnsType); + cJSON_AddItemToObject(root, "columns_val", columnsVal); + cJSON_AddItemToObject(root, "old_keys_name", oldKeysName); + cJSON_AddItemToObject(root, "old_keys_type", oldKeysType); + cJSON_AddItemToObject(root, "old_keys_val", oldKeysVal); + + res = cJSON_PrintUnformatted(root); + if (res != NULL) { + appendStringInfoString(logChange->out, res); + } + + if (g_Logicaldispatcher[slotId].pOptions.sending_batch > 0) { + changeLen = htonl((uint32)(logChange->out->len - curPos) - (uint32)sizeof(uint32)); + errno_t rc = memcpy_s(logChange->out->data + curPos, sizeof(uint32), &changeLen, sizeof(uint32)); + securec_check(rc, "", ""); + } + + MemoryContextSwitchTo(old); + MemoryContextReset(data->context); +} + +/* append schema and table info */ +static void AppendRelation(StringInfo s, TupleDesc tupdesc, const char * schema, const char * table) +{ + pq_sendint16(s, (uint16)strlen(schema)); + appendStringInfoString(s, schema); + pq_sendint16(s, (uint16)strlen(table)); + appendStringInfoString(s, table); +} + +/* handle circumstances that should not be decoded */ +static inline bool AppendInvalidations(StringInfo s, TupleDesc tupdesc, HeapTuple tuple) +{ + if ((tuple->tupTableType == HEAP_TUPLE) && (HEAP_TUPLE_IS_COMPRESSED(tuple->t_data) || + (int)HeapTupleHeaderGetNatts(tuple->t_data, tupdesc) > tupdesc->natts)) { + pq_sendint16(s, 0); + return true; + } + return false; +} + +/* decode a tuple into binary style */ +static void AppendTuple(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skipNulls) +{ + if (AppendInvalidations(s, tupdesc, tuple)) { + return; + } + int curPos = s->len; + uint16 attrNum = 0; + pq_sendint16(s, (uint16)(tupdesc->natts)); + for (int natt = 0; natt < tupdesc->natts; natt++) { + Form_pg_attribute attr = tupdesc->attrs[natt]; + if (attr->attisdropped || attr->attnum < 0) { + continue; + } + + Oid typid = attr->atttypid; + bool isnull = false; + Datum origval = 0; + if (tuple->tupTableType == HEAP_TUPLE) { + origval = heap_getattr(tuple, natt + 1, tupdesc, &isnull); + } else { + origval = uheap_getattr((UHeapTuple)tuple, natt + 1, tupdesc, &isnull); + } + if (isnull && skipNulls) { + continue; + } + attrNum++; + const char *columnName = quote_identifier(NameStr(attr->attname)); + pq_sendint16(s, (uint16)strlen(columnName)); + appendStringInfoString(s, columnName); + pq_sendint32(s, typid); + Oid typoutput = 0; + bool typisvarlena = false; + getTypeOutputInfo(typid, &typoutput, &typisvarlena); + const uint32 nullTag = 0xFFFFFFFF; + if (isnull) { + pq_sendint32(s, nullTag); + } else if (!typisvarlena) { + char *data = OidOutputFunctionCall(typoutput, origval); + pq_sendint32(s, strlen(data)); + appendStringInfoString(s, data); + } else { + Datum val = PointerGetDatum(PG_DETOAST_DATUM(origval)); + char *data = OidOutputFunctionCall(typoutput, val); + pq_sendint32(s, strlen(data)); + appendStringInfoString(s, data); + } + } + attrNum = ntohs(attrNum); + errno_t rc = memcpy_s(s->data + curPos, sizeof(uint16), &attrNum, sizeof(uint16)); + securec_check(rc, "", ""); +} + +/* parallel logical decoding callback with decode style: binary */ +void parallel_decode_change_to_bin(Relation relation, ParallelReorderBufferChange* change, logicalLog *logChange, + ParallelLogicalDecodingContext* ctx, int slotId) +{ + logChange->type = LOGICAL_LOG_DML; + logChange->lsn = change->lsn; + logChange->xid = change->xid; + ParallelDecodingData *data = (ParallelDecodingData *)ctx->output_plugin_private; + MemoryContext old = MemoryContextSwitchTo(data->context); + + Form_pg_class class_form = RelationGetForm(relation); + TupleDesc tupdesc = RelationGetDescr(relation); + + char *schema = get_namespace_name(class_form->relnamespace); + char *table = NameStr(class_form->relname); + if (FilterWhiteList(schema, table, slotId, old, data->context)) { + return; + } + + int curPos = logChange->out->len; + uint32 changeLen = 0; + pq_sendint32(logChange->out, changeLen); + pq_sendint64(logChange->out, change->lsn); + switch (change->action) { + case PARALLEL_REORDER_BUFFER_CHANGE_INSERT: + case PARALLEL_REORDER_BUFFER_CHANGE_UINSERT: + appendStringInfoChar(logChange->out, 'I'); + AppendRelation(logChange->out, tupdesc, schema, table); + if (change->data.tp.newtuple != NULL) { + appendStringInfoChar(logChange->out, 'N'); + AppendTuple(logChange->out, tupdesc, &change->data.tp.newtuple->tuple, false); + } + break; + + case PARALLEL_REORDER_BUFFER_CHANGE_UPDATE: + case PARALLEL_REORDER_BUFFER_CHANGE_UUPDATE: + appendStringInfoChar(logChange->out, 'U'); + AppendRelation(logChange->out, tupdesc, schema, table); + + if (change->data.tp.newtuple != NULL) { + appendStringInfoChar(logChange->out, 'N'); + AppendTuple(logChange->out, tupdesc, &change->data.tp.newtuple->tuple, false); + } + if (change->data.tp.oldtuple != NULL) { + appendStringInfoChar(logChange->out, 'O'); + AppendTuple(logChange->out, tupdesc, &change->data.tp.oldtuple->tuple, true); + } + break; + + case PARALLEL_REORDER_BUFFER_CHANGE_DELETE: + case PARALLEL_REORDER_BUFFER_CHANGE_UDELETE: + appendStringInfoChar(logChange->out, 'D'); + AppendRelation(logChange->out, tupdesc, schema, table); + /* if there was no PK, we only know that a delete happened */ + if (change->data.tp.oldtuple != NULL) { + appendStringInfoChar(logChange->out, 'O'); + AppendTuple(logChange->out, tupdesc, &change->data.tp.oldtuple->tuple, true); + } + break; + + default: + break; + } + changeLen = htonl((uint32)(logChange->out->len - curPos) - (uint32)sizeof(uint32)); + errno_t rc = memcpy_s(logChange->out->data + curPos, sizeof(uint32), &changeLen, sizeof(uint32)); + securec_check(rc, "", ""); + MemoryContextSwitchTo(old); + MemoryContextReset(data->context); +} + +/* + * Use caching to reduce frequent memory requests and releases. + * Use worker->freegetlogicalloghead to store logchanges that should be free. + * logicalLog is requested in the reader thread and free in the decoder thread. + */ +logicalLog* GetLogicalLog(ParallelDecodeWorker *worker) +{ + logicalLog *logChange = NULL; + MemoryContext oldCtx; + int slotId = worker->slotId; + do { + if (worker->freeGetLogicalLogHead!= NULL) { + logChange = worker->freeGetLogicalLogHead; + worker->freeGetLogicalLogHead = worker->freeGetLogicalLogHead->freeNext; + } else { + logicalLog *head = (logicalLog *)pg_atomic_exchange_uintptr( + (uintptr_t *)&g_Logicaldispatcher[slotId].freeLogicalLogHead, (uintptr_t)NULL); + if (head != NULL) { + logChange = head; + worker->freeGetLogicalLogHead = head->freeNext; + } else { + (void)pg_atomic_add_fetch_u32(&g_Logicaldispatcher[slotId].curLogNum, 1); + oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + logChange = (logicalLog *)palloc(sizeof(logicalLog)); + logChange->out = NULL; + logChange->freeNext = NULL; + MemoryContextSwitchTo(oldCtx); + } + } + } while (logChange == NULL); + + logChange->type = LOGICAL_LOG_EMPTY; + + if (logChange->out) { + resetStringInfo(logChange->out); + } else { + oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + logChange->out = makeStringInfo(); + MemoryContextSwitchTo(oldCtx); + } + return logChange; +} + +/* + * Set logical log to cache. + */ +void FreeLogicalLog(logicalLog *logChange, int slotId) +{ + logicalLog *oldHead = + (logicalLog *)pg_atomic_read_uintptr((uintptr_t *)&g_Logicaldispatcher[slotId].freeLogicalLogHead); + uint32 curLogNum = g_Logicaldispatcher[slotId].curLogNum; + + /* If the palloced memory exceeds the threshold, we just free it instead of cache it. */ + if (curLogNum >= max_decode_cache_num) { + if (logChange->out != NULL && logChange->out->data != NULL) { + FreeStringInfo(logChange->out); + } + pfree(logChange); + (void)pg_atomic_sub_fetch_u32(&g_Logicaldispatcher[slotId].curLogNum, 1); + return; + } + do { + if (logChange->out != NULL) { + resetStringInfo(logChange->out); + errno_t rc = memset_s(logChange->out->data, logChange->out->maxlen, 0, logChange->out->maxlen); + securec_check(rc, "\0", "\0"); + } + logChange->freeNext = oldHead; + + } while (!pg_atomic_compare_exchange_uintptr((uintptr_t *)&(g_Logicaldispatcher[slotId].freeLogicalLogHead), + (uintptr_t *)&oldHead, (uintptr_t)logChange)); +} + +/* + * The parser thread polls and puts tuples into the decoder queue in LSN order. + * When there is a log that does not need to be parsed, the empty logical log should + * also be inserted into the queue to ensure that the order is preserved when the slicer + * polls to obtain the logical log. + */ +void setVoidLogicalLog2queue(ParallelDecodeWorker *worker) +{ + logicalLog *logChange = GetLogicalLog(worker); + LogicalQueuePut(worker->LogicalLogQueue, logChange); +} + +Snapshot GetLocalSnapshot(MemoryContext ctx) +{ + Size ssize = sizeof(SnapshotData); + Snapshot snapshot = (Snapshot)MemoryContextAllocZero(ctx, ssize); + snapshot->satisfies = SNAPSHOT_DECODE_MVCC; + + snapshot->xmin = FirstNormalTransactionId; + snapshot->xmax = MaxTransactionId; + + snapshot->suboverflowed = false; + snapshot->takenDuringRecovery = false; + snapshot->copied = false; + snapshot->curcid = FirstCommandId; + snapshot->active_count = 0; + snapshot->regd_count = 0; + + snapshot->xip = NULL; + snapshot->xcnt = 0; + + snapshot->subxcnt = 0; + snapshot->subxip = NULL; + return snapshot; +} + +/* + * decode insert,update,delete record to logical log. + * put logical log to queue and waiting for walsender thread send it. + */ +void setIUDToLogicalQueue(ParallelReorderBufferChange* change, ParallelLogicalDecodingContext* ctx, + ParallelDecodeWorker *worker) +{ + Oid reloid = InvalidOid; + Oid partitionReltoastrelid = InvalidOid; + Relation relation = NULL; + int slotId = worker->slotId; + + if (u_sess->utils_cxt.HistoricSnapshot == NULL) { + u_sess->utils_cxt.HistoricSnapshot = GetLocalSnapshot(ctx->context); + } + u_sess->utils_cxt.HistoricSnapshot->snapshotcsn = change->data.tp.snapshotcsn; + bool isSegment = IsSegmentFileNode(change->data.tp.relnode); + reloid = RelidByRelfilenode(change->data.tp.relnode.spcNode, change->data.tp.relnode.relNode, isSegment); + if (reloid == InvalidOid) { + reloid = PartitionRelidByRelfilenode(change->data.tp.relnode.spcNode, + change->data.tp.relnode.relNode, partitionReltoastrelid, NULL, isSegment); + } + /* + * Catalog tuple without data, emitted while catalog was + * in the process of being rewritten. + */ + if (change->data.tp.newtuple == NULL && change->data.tp.oldtuple == NULL) { + setVoidLogicalLog2queue(worker); + return; + } else if (reloid == InvalidOid) { + /* + * description: + * When we try to decode a table who is already dropped. + * Maybe we could not find it relnode.In this time, we will undecode this log. + * However, we still set an empty logical logqueue, because we need to ensure + * that the logical logs obtained by the walsender are in order. + */ + ereport(DEBUG1, (errmsg("could not lookup relation %s", relpathperm(change->data.tp.relnode, MAIN_FORKNUM)))); + setVoidLogicalLog2queue(worker); + return; + } + /* + * Do not decode private tables, otherwise there will be security problems. + */ + if (is_role_independent(FindRoleid(reloid))) { + setVoidLogicalLog2queue(worker); + return; + } + + relation = RelationIdGetRelation(reloid); + if (relation == NULL) { + ereport(DEBUG1, (errmsg("could open relation descriptor %s", + relpathperm(change->data.tp.relnode, MAIN_FORKNUM)))); + setVoidLogicalLog2queue(worker); + return; + } + + if (CSTORE_NAMESPACE == get_rel_namespace(RelationGetRelid(relation))) { + setVoidLogicalLog2queue(worker); + RelationClose(relation); + return; + } + + if (RelationIsLogicallyLogged(relation)) { + /* + * For now ignore sequence changes entirely. Most of + * the time they don't log changes using records we + * understand, so it doesn't make sense to handle the + * few cases we do. + */ + + if (relation->rd_rel->relkind == RELKIND_SEQUENCE) { + } else if (!IsToastRelation(relation)) { /* user-triggered change */ + logicalLog *logChange = GetLogicalLog(worker); + g_Logicaldispatcher[slotId].pOptions.decode_change(relation, change, logChange, ctx, slotId); + LogicalQueuePut(worker->LogicalLogQueue, logChange); + RelationClose(relation); + return; + } + } + setVoidLogicalLog2queue(worker); + RelationClose(relation); +} + +/* + * decode commit or abort change. + */ +static void ParallelDecodeCommitOrAbort(ParallelReorderBufferChange* change, ParallelDecodeWorker *worker, + LogicalLogType logType) +{ + logicalLog *logChange = GetLogicalLog(worker); + logChange->lsn = change->lsn; + logChange->xid = change->xid; + logChange->type = logType; + logChange->csn = change->csn; + logChange->finalLsn = change->finalLsn; + logChange->endLsn = change->endLsn; + logChange->nsubxacts = change->nsubxacts; + logChange->commitTime = change->commitTime; + int slotId = worker->slotId; + Size subXidSize = sizeof(TransactionId) * change->nsubxacts; + if (subXidSize > 0) { + MemoryContext oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + logChange->subXids = (TransactionId *)palloc0(subXidSize); + MemoryContextSwitchTo(oldCtx); + errno_t rc = memcpy_s(logChange->subXids, subXidSize, change->subXids, subXidSize); + securec_check(rc, "", ""); + } + LogicalQueuePut(worker->LogicalLogQueue, logChange); +} + +/* + * decode change tuple, put it into logical queue. + */ +void ParallelDecodeChange(ParallelReorderBufferChange* change, ParallelLogicalDecodingContext* ctx, + ParallelDecodeWorker *worker) +{ + u_sess->attr.attr_common.extra_float_digits = LOGICAL_DECODE_EXTRA_FLOAT_DIGITS; + switch (change->action) { + case PARALLEL_REORDER_BUFFER_INVALIDATIONS_MESSAGE: { + for (int i = 0; i < change->ninvalidations; i++) { + LocalExecuteThreadAndSessionInvalidationMessage(&change->invalidations[i]); + } + break; + } + + case PARALLEL_REORDER_BUFFER_CHANGE_COMMIT: { + ParallelDecodeCommitOrAbort(change, worker, LOGICAL_LOG_COMMIT); + break; + } + + case PARALLEL_REORDER_BUFFER_CHANGE_ABORT: { + ParallelDecodeCommitOrAbort(change, worker, LOGICAL_LOG_ABORT); + break; + } + + case PARALLEL_REORDER_BUFFER_CHANGE_RUNNING_XACT: { + logicalLog *logChange = GetLogicalLog(worker); + logChange->lsn = change->lsn; + logChange->xid = change->xid; + logChange->oldestXmin = change->oldestXmin; + logChange->type = LOGICAL_LOG_RUNNING_XACTS; + logChange->csn = change->csn; + LogicalQueuePut(worker->LogicalLogQueue, logChange); + break; + } + + case PARALLEL_REORDER_BUFFER_CHANGE_INSERT: + case PARALLEL_REORDER_BUFFER_CHANGE_UPDATE: + case PARALLEL_REORDER_BUFFER_CHANGE_DELETE: + case PARALLEL_REORDER_BUFFER_CHANGE_UINSERT: + case PARALLEL_REORDER_BUFFER_CHANGE_UUPDATE: + case PARALLEL_REORDER_BUFFER_CHANGE_UDELETE: { + setIUDToLogicalQueue(change, ctx, worker); + break; + } + + case PARALLEL_REORDER_BUFFER_CHANGE_CONFIRM_FLUSH: { + logicalLog *logChange = GetLogicalLog(worker); + logChange->lsn = change->lsn; + logChange->type = LOGICAL_LOG_CONFIRM_FLUSH; + LogicalQueuePut(worker->LogicalLogQueue, logChange); + break; + } + case PARALLEL_REORDER_BUFFER_NEW_CID: { + logicalLog *logChange = GetLogicalLog(worker); + logChange->xid = change->xid; + logChange->type = LOGICAL_LOG_NEW_CID; + LogicalQueuePut(worker->LogicalLogQueue, logChange); + break; + } + } +} + +ParallelStatusData *GetParallelDecodeStatus(uint32 *num) +{ + const uint32 slotNum = 20; + ParallelStatusData *result = (ParallelStatusData *)palloc0(slotNum * sizeof(ParallelStatusData)); + uint32 id = 0; + for (uint32 i = 0; i < slotNum; i++) { + if (!g_Logicaldispatcher[i].active) { + continue; + } + + errno_t rc = memcpy_s(result[id].slotName, NAMEDATALEN, g_Logicaldispatcher[i].slotName, NAMEDATALEN); + securec_check(rc, "", ""); + result[id].parallelDecodeNum = g_Logicaldispatcher[i].totalWorkerCount; + StringInfoData readQueueLen; + StringInfoData decodeQueueLen; + initStringInfo(&readQueueLen); + initStringInfo(&decodeQueueLen); + for (int j = 0; j < result[id].parallelDecodeNum; j++) { + ParallelDecodeWorker *worker = g_Logicaldispatcher[i].decodeWorkers[j]; + LogicalQueue *readQueue = worker->changeQueue; + uint32 readHead = pg_atomic_read_u32(&readQueue->writeHead); + uint32 readTail = pg_atomic_read_u32(&readQueue->readTail); + uint32 readCnt = COUNT(readHead, readTail, readQueue->mask); + appendStringInfo(&readQueueLen, "queue%d: %u", j, readCnt); + + LogicalQueue *decodeQueue = worker->LogicalLogQueue; + uint32 decodeHead = pg_atomic_read_u32(&decodeQueue->writeHead); + uint32 decodeTail = pg_atomic_read_u32(&decodeQueue->readTail); + uint32 decodeCnt = COUNT(decodeHead, decodeTail, decodeQueue->mask); + appendStringInfo(&decodeQueueLen, "queue%d: %u", j, decodeCnt); + + if (j < result[id].parallelDecodeNum - 1) { + appendStringInfoString(&readQueueLen, ", "); + appendStringInfoString(&decodeQueueLen, ", "); + } + } + rc = memcpy_s(result[id].readQueueLen, QUEUE_RESULT_LEN, readQueueLen.data, readQueueLen.len); + securec_check(rc, "", ""); + rc = memcpy_s(result[id].decodeQueueLen, QUEUE_RESULT_LEN, decodeQueueLen.data, decodeQueueLen.len); + securec_check(rc, "", ""); + FreeStringInfo(&readQueueLen); + FreeStringInfo(&decodeQueueLen); + id++; + } + *num = id; + return result; +} + diff --git a/src/gausskernel/storage/replication/logical/parallel_decode_worker.cpp b/src/gausskernel/storage/replication/logical/parallel_decode_worker.cpp new file mode 100644 index 000000000..e8a98ca9f --- /dev/null +++ b/src/gausskernel/storage/replication/logical/parallel_decode_worker.cpp @@ -0,0 +1,1235 @@ +/* --------------------------------------------------------------------------------------- + * + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * parallel_decode_worker.cpp + * This module is used for creating a reader, and several decoders + * in parallel decoding. + * + * IDENTIFICATION + * src/gausskernel/storage/replication/logical/parallel_decode_worker.cpp + * + * --------------------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "knl/knl_variable.h" + +#include +#include + + +#include "access/rewriteheap.h" +#include "access/transam.h" +#include "access/tuptoaster.h" +#include "access/xact.h" + +#include "miscadmin.h" + +#include "replication/logical.h" +#include "replication/reorderbuffer.h" +#include "replication/slot.h" +#include "replication/snapbuild.h" /* just for SnapBuildSnapDecRefcount */ +#include "replication/parallel_decode_worker.h" +#include "replication/parallel_decode.h" +#include "replication/logical_parse.h" + +#include "replication/parallel_reorderbuffer.h" +#include "access/xlog_internal.h" + +#include "storage/smgr/fd.h" +#include "storage/sinval.h" + +#include "utils/lsyscache.h" +#include "utils/builtins.h" +#include "utils/combocid.h" +#include "utils/guc.h" +#include "utils/memutils.h" +#include "utils/relcache.h" +#include "utils/relfilenodemap.h" +#include "knl/knl_thread.h" +#include "utils/postinit.h" +#include "utils/ps_status.h" +#include "storage/ipc.h" + +#include "catalog/catalog.h" +#include "catalog/pg_namespace.h" +#include "lib/binaryheap.h" +void SendSignalToDecodeWorker(int signal, int slotId); +void SendSignalToReaderWorker(int signal, int slotId); + +static const uint64 OUTPUT_WAIT_COUNT = 0x7FFFFFF; +static const uint64 PRINT_ALL_WAIT_COUNT = 0x7FFFFFFFF; +static const uint32 maxQueueLen = 1024; + +/* + * Parallel decoding kill a decoder. + */ +static void ParallelDecodeKill(int code, Datum arg) +{ + knl_t_parallel_decode_worker_context tDecodeCxt = t_thrd.parallel_decode_cxt; + int slotId = tDecodeCxt.slotId; + int id = tDecodeCxt.parallelDecodeId; + knl_g_parallel_decode_context *gDecodeCxt = g_instance.comm_cxt.pdecode_cxt; + SpinLockAcquire(&(gDecodeCxt[slotId].rwlock)); + gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[id].threadState = PARALLEL_DECODE_WORKER_EXIT; + g_Logicaldispatcher[slotId].abnormal = true; + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + SendSignalToReaderWorker(SIGTERM, DatumGetInt32(arg)); + SendSignalToDecodeWorker(SIGTERM, DatumGetInt32(arg)); +} + +/* + * Send signal to a decoder thread. + */ +void SendSignalToDecodeWorker(int signal, int slotId) +{ + knl_g_parallel_decode_context *gDecodeCxt = g_instance.comm_cxt.pdecode_cxt; + for (int i = 0; i < gDecodeCxt[slotId].totalNum; ++i) { + SpinLockAcquire(&(gDecodeCxt[slotId].rwlock)); + uint32 state = gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[i].threadState; + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + if (state != PARALLEL_DECODE_WORKER_INVALID) { + int err = gs_signal_send(gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[i].threadId, signal); + if (err != 0) { + ereport(WARNING, (errmsg("Kill logicalDecoder(pid %lu, signal %d) failed: \"%s\",", + gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[i].threadId, signal, gs_strerror(err)))); + } else { + ereport(LOG, (errmsg("Kill logicalDecoder(pid %lu, signal %d) successfully: \"%s\",", + gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[i].threadId, signal, gs_strerror(err)))); + } + } + } +} + +/* + * Send signal to the reader thread. + */ +void SendSignalToReaderWorker(int signal, int slotId) +{ + knl_g_parallel_decode_context *gDecodeCxt = g_instance.comm_cxt.pdecode_cxt; + SpinLockAcquire(&(gDecodeCxt[slotId].rwlock)); + uint32 state = gDecodeCxt[slotId].ParallelReaderWorkerStatus.threadState; + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + + if (state != PARALLEL_DECODE_WORKER_INVALID) { + int err = gs_signal_send(gDecodeCxt[slotId].ParallelReaderWorkerStatus.threadId, signal); + if (err != 0) { + ereport(WARNING, (errmsg("Kill logicalReader(pid %lu, signal %d) failed: \"%s\",", + gDecodeCxt[slotId].ParallelReaderWorkerStatus.threadId, signal, gs_strerror(err)))); + } else { + ereport(LOG, (errmsg("Kill logicalReader(pid %lu, signal %d) Successfully: \"%s\",", + gDecodeCxt[slotId].ParallelReaderWorkerStatus.threadId, signal, gs_strerror(err)))); + } + } +} + +/* Run from the worker thread. */ +static void ParallelDecodeSigHupHandler(SIGNAL_ARGS) +{ + t_thrd.parallel_decode_cxt.got_SIGHUP = true; +} + +static void ParallelDecodeShutdownHandler(SIGNAL_ARGS) +{ + t_thrd.parallel_decode_cxt.shutdown_requested = true; + ereport(LOG, (errmsg("ParallelDecodeShutdownHandler process shutdown."))); +} + +static void ParallelReaderShutdownHandler(SIGNAL_ARGS) +{ + t_thrd.logicalreadworker_cxt.shutdown_requested = true; + ereport(LOG, (errmsg("ParallelReaderShutdownHandler process shutdown."))); +} + +static void ParallelDecodeQuickDie(SIGNAL_ARGS) +{ + ereport(LOG, (errmsg("ParallelDecodeQuickDie process shutdown."))); + + int status = 2; + gs_signal_setmask(&t_thrd.libpq_cxt.BlockSig, NULL); + on_exit_reset(); + proc_exit(status); +} + +/* + * Run from the worker thread, set up the common part of signal handlers + * for logical decoding workers. + */ +static void SetupLogicalWorkerCommonHandlers() +{ + (void)gspqsignal(SIGHUP, ParallelDecodeSigHupHandler); + (void)gspqsignal(SIGINT, SIG_IGN); + (void)gspqsignal(SIGQUIT, ParallelDecodeQuickDie); + (void)gspqsignal(SIGALRM, SIG_IGN); + (void)gspqsignal(SIGPIPE, SIG_IGN); + (void)gspqsignal(SIGUSR1, SIG_IGN); + (void)gspqsignal(SIGUSR2, SIG_IGN); + (void)gspqsignal(SIGCHLD, SIG_IGN); + (void)gspqsignal(SIGTTIN, SIG_IGN); + (void)gspqsignal(SIGTTOU, SIG_IGN); + (void)gspqsignal(SIGCONT, SIG_IGN); + (void)gspqsignal(SIGWINCH, SIG_IGN); +} + +/* + * Run from the worker thread, set up signal handlers for a decoder. + */ +static void SetupDecoderSignalHandlers() +{ + SetupLogicalWorkerCommonHandlers(); + (void)gspqsignal(SIGTERM, ParallelDecodeShutdownHandler); + + gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL); + (void)gs_signal_unblock_sigusr2(); +} + +/* + * Run from the worker thread, set up signal handlers for the reader. + */ +static void SetupLogicalReaderSignalHandlers() +{ + SetupLogicalWorkerCommonHandlers(); + (void)gspqsignal(SIGTERM, ParallelReaderShutdownHandler); + + gs_signal_setmask(&t_thrd.libpq_cxt.UnBlockSig, NULL); + (void)gs_signal_unblock_sigusr2(); +} + +/* + * Parallel decoding kill a reader. + */ +static void LogicalReadKill(int code, Datum arg) +{ + ereport(LOG, (errmsg("LogicalReader process shutdown."))); + + /* Make sure active replication slots are released */ + if (t_thrd.slot_cxt.MyReplicationSlot != NULL) { + ReplicationSlotRelease(); + } + int slotId = DatumGetInt32(arg); + SpinLockAcquire(&(g_instance.comm_cxt.pdecode_cxt[slotId].rwlock)); + knl_g_parallel_decode_context *gDecodeCxt = g_instance.comm_cxt.pdecode_cxt; + gDecodeCxt[slotId].ParallelReaderWorkerStatus.threadState = PARALLEL_DECODE_WORKER_EXIT; + g_Logicaldispatcher[slotId].abnormal = true; + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + SendSignalToReaderWorker(SIGTERM, DatumGetInt32(arg)); + SendSignalToDecodeWorker(SIGTERM, DatumGetInt32(arg)); +} + +/* + * Parallel decoding release resource. + */ +void ReleaseParallelDecodeResource(int slotId) +{ + SpinLockAcquire(&(g_instance.comm_cxt.pdecode_cxt[slotId].destroy_lock)); + MemoryContextDelete(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx = NULL; + SpinLockRelease(&(g_instance.comm_cxt.pdecode_cxt[slotId].destroy_lock)); + g_Logicaldispatcher[slotId].active = false; + ereport(LOG, (errmsg("g_Logicaldispatcher[%d].active = false", slotId))); + g_Logicaldispatcher[slotId].abnormal = false; + return; +} + +/* + * Start a decoder thread. + */ +ThreadId StartDecodeWorkerThread(ParallelDecodeWorker *worker) +{ + worker->tid.thid = initialize_util_thread(PARALLEL_DECODE, worker); + return worker->tid.thid; +} + +/* + * Start the reader thread. + */ +ThreadId StartLogicalReadWorkerThread(ParallelDecodeReaderWorker *worker) +{ + worker->tid = initialize_util_thread(LOGICAL_READ_RECORD, worker); + return worker->tid; +} + +/* + * Parallel decoding start a decode worker. + */ +ParallelDecodeWorker* StartLogicalDecodeWorker(uint32 id, uint32 slotId, char* dbUser, char* dbName, char* slotname) +{ + ParallelDecodeWorker *worker = CreateLogicalDecodeWorker(id, dbUser, dbName, slotname, slotId); + worker->slotId = slotId; + knl_g_parallel_decode_context *gDecodeCxt = g_instance.comm_cxt.pdecode_cxt; + ThreadId threadId = StartDecodeWorkerThread(worker); + if (threadId == 0) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), + errmsg("Create parallel logical decoder thread failed"), errdetail("id = %u, slotId = %u, %m", id, slotId), + errcause("System error."), erraction("Retry it in a few minutes."))); + SpinLockAcquire(&(gDecodeCxt[slotId].rwlock)); + gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[id].threadState = PARALLEL_DECODE_WORKER_EXIT; + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + ReleaseParallelDecodeResource(slotId); + return NULL; + } else { + ereport(LOG, (errmsg("StartParallelDecodeWorker successfully create logical decoder id: %u, threadId:%lu.", id, + worker->tid.thid))); + } + + SpinLockAcquire(&(gDecodeCxt[slotId].rwlock)); + gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[id].threadId = threadId; + uint32 state = pg_atomic_read_u32(&(gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[id].threadState)); + if (state != PARALLEL_DECODE_WORKER_READY) { + gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[id].threadState = PARALLEL_DECODE_WORKER_START; + } + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + return worker; +} + +/* + * Parallel decoding start all decode workers. + */ +void StartLogicalDecodeWorkers(int parallelism, uint32 slotId, char* dbUser, char* dbName, char* slotname) +{ + MemoryContext oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + g_Logicaldispatcher[slotId].decodeWorkers = (ParallelDecodeWorker **)palloc0(sizeof(ParallelDecodeWorker *) + * parallelism); + MemoryContextSwitchTo(oldCtx); + + /* + * This is necessary to avoid the cache coherence problem. + * Because we are using atomic operation to do the synchronization. + */ + int started = 0; + for (; started < parallelism; started++) { + g_Logicaldispatcher[slotId].decodeWorkers[started] = + StartLogicalDecodeWorker(started, slotId, dbUser, dbName, slotname); + if (g_Logicaldispatcher[slotId].decodeWorkers[started] == NULL) { + break; + } + } + + if (started == 0) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), + errmsg("Start logical decode workers failed."), + errdetail("We need at least one worker thread, slotId = %u", slotId), + errcause("System error."), erraction("Retry it in a few minutes."))); + } + g_Logicaldispatcher[slotId].totalWorkerCount = started; + g_instance.comm_cxt.pdecode_cxt[slotId].totalNum = started; + + g_Logicaldispatcher[slotId].curChangeNum = 0; + g_Logicaldispatcher[slotId].curTupleNum = 0; + g_Logicaldispatcher[slotId].curLogNum = 0; + oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + g_Logicaldispatcher[slotId].chosedWorkerIds = (uint32 *)palloc0(sizeof(uint32) * started); + MemoryContextSwitchTo(oldCtx); + + g_Logicaldispatcher[slotId].chosedWorkerCount = 0; +} + +/* + * Parallel decoding create the read worker. + */ +ParallelDecodeReaderWorker *CreateLogicalReadWorker(uint32 slotId, char* dbUser, char* dbName, char* slotname, + List *options) +{ + errno_t rc; + MemoryContext oldCtx = NULL; + oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + ParallelDecodeReaderWorker *worker = (ParallelDecodeReaderWorker *)palloc0(sizeof(ParallelDecodeReaderWorker)); + MemoryContextSwitchTo(oldCtx); + + worker->slotId = slotId; + worker->tid = InvalidTid; + + worker->queue = LogicalQueueCreate(maxQueueLen, slotId); + rc = memcpy_s(worker->slotname, NAMEDATALEN, slotname, strlen(slotname)); + securec_check(rc, "\0", "\0"); + rc = memcpy_s(worker->dbUser, NAMEDATALEN, dbUser, strlen(dbUser)); + securec_check(rc, "\0", "\0"); + + rc = memcpy_s(worker->dbName, NAMEDATALEN, dbName, strlen(dbName)); + securec_check(rc, "\0", "\0"); + + SpinLockAcquire(&(g_instance.comm_cxt.pdecode_cxt[slotId].rwlock)); + g_instance.comm_cxt.pdecode_cxt[slotId].ParallelReaderWorkerStatus.threadState = PARALLEL_DECODE_WORKER_INVALID; + SpinLockRelease(&(g_instance.comm_cxt.pdecode_cxt[slotId].rwlock)); + SpinLockInit(&(worker->rwlock)); + return worker; +} + +/* + * Parallel decoding start the read worker. + */ +ThreadId StartDecodeReadWorker(ParallelDecodeReaderWorker *worker) +{ + ThreadId threadId = StartLogicalReadWorkerThread(worker); + int slotId = worker->slotId; + if (threadId == 0) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), errmsg("Cannot create readworker thread"), + errdetail("N/A"), errcause("System error."), erraction("Retry it in a few minutes."))); + SpinLockAcquire(&(g_instance.comm_cxt.pdecode_cxt[slotId].rwlock)); + g_instance.comm_cxt.pdecode_cxt[slotId].ParallelReaderWorkerStatus.threadState = PARALLEL_DECODE_WORKER_EXIT; + SpinLockRelease(&(g_instance.comm_cxt.pdecode_cxt[slotId].rwlock)); + return threadId; + } else { + ereport(LOG, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), + errmsg("StartDecodeReadWorker successfully create decodeReaderWorker id:%u,threadId:%lu", + worker->id, threadId))); + } + SpinLockAcquire(&(g_instance.comm_cxt.pdecode_cxt[slotId].rwlock)); + g_instance.comm_cxt.pdecode_cxt[worker->slotId].ParallelReaderWorkerStatus.threadId = threadId; + uint32 state = g_instance.comm_cxt.pdecode_cxt[worker->slotId].ParallelReaderWorkerStatus.threadState; + SpinLockRelease(&(g_instance.comm_cxt.pdecode_cxt[slotId].rwlock)); + if (state != PARALLEL_DECODE_WORKER_READY) { + SpinLockAcquire(&(g_instance.comm_cxt.pdecode_cxt[slotId].rwlock)); + g_instance.comm_cxt.pdecode_cxt[worker->slotId].ParallelReaderWorkerStatus.threadState = + PARALLEL_DECODE_WORKER_START; + SpinLockRelease(&(g_instance.comm_cxt.pdecode_cxt[slotId].rwlock)); + } + + return threadId; +} + +/* + * Check whether there are decoding threads that have not exited. + */ +void CheckAliveDecodeWorkers(uint32 slotId) +{ + uint32 state = -1; + + /* + * Check reader thread state. + */ + SpinLockAcquire(&(g_instance.comm_cxt.pdecode_cxt[slotId].rwlock)); + state = g_instance.comm_cxt.pdecode_cxt[slotId].ParallelReaderWorkerStatus.threadState; + SpinLockRelease(&(g_instance.comm_cxt.pdecode_cxt[slotId].rwlock)); + if (state != PARALLEL_DECODE_WORKER_INVALID) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), + errmsg("Check alive reader worker failed"), + errdetail("Logical reader thread %lu is still alive", + g_instance.comm_cxt.pdecode_cxt[slotId].ParallelReaderWorkerStatus.threadId), + errcause("Previous reader thread exits too slow"), + erraction("Make sure all previous worker threads have exited."))); + } + + g_instance.comm_cxt.pdecode_cxt[slotId].ParallelReaderWorkerStatus.threadId = 0; + + /* + * Check decoder thread state. + */ + for (uint32 i = 0; i < MAX_PARALLEL_DECODE_NUM; ++i) { + SpinLockAcquire(&(g_instance.comm_cxt.pdecode_cxt[slotId].rwlock)); + state = g_instance.comm_cxt.pdecode_cxt[slotId].ParallelDecodeWorkerStatusList[i].threadState; + SpinLockRelease(&(g_instance.comm_cxt.pdecode_cxt[slotId].rwlock)); + if (state != PARALLEL_DECODE_WORKER_INVALID) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), + errmsg("Check alive decoder workers failed"), + errdetail("Logical decoder thread %lu is still alive", + g_instance.comm_cxt.pdecode_cxt[slotId].ParallelDecodeWorkerStatusList[i].threadId), + errcause("Previous decoder thread exits too slow"), + erraction("Make sure all previous worker threads have exited."))); + } + g_instance.comm_cxt.pdecode_cxt[slotId].ParallelDecodeWorkerStatusList[i].threadId = 0; + } + g_instance.comm_cxt.pdecode_cxt[slotId].totalNum = 0; +} + +/* + * Parallel decoding get paralle decode num. + */ +int GetDecodeParallelism(int slotId) +{ + return g_Logicaldispatcher[slotId].pOptions.parallel_decode_num; +} + +/* + * Parallel decoding init logical dispatcher, which is used for recording all worker states. + */ +void InitLogicalDispatcher(LogicalDispatcher *dispatcher) +{ + dispatcher->totalCostTime = 0; + dispatcher->txnCostTime = 0; + dispatcher->pprCostTime = 0; + dispatcher->active = false; + dispatcher->decodeWorkerId = 0; + dispatcher->num = 0; + dispatcher->firstLoop = true; + dispatcher->freeGetTupleHead = NULL; + dispatcher->freeTupleHead = NULL; + dispatcher->freeChangeHead = NULL; + dispatcher->freeLogicalLogHead = NULL; +} + +/* + * Get the number of ready decoders. + */ +int GetReadyDecodeWorker(uint32 slotId) +{ + int readyWorkerCnt = 0; + knl_g_parallel_decode_context *gDecodeCxt = g_instance.comm_cxt.pdecode_cxt; + SpinLockAcquire(&(gDecodeCxt[slotId].rwlock)); + for (int i = 0; i < gDecodeCxt[slotId].totalNum; i++) { + uint32 state = gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[i].threadState; + if (state == PARALLEL_DECODE_WORKER_READY) { + ++readyWorkerCnt; + } + } + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + return readyWorkerCnt; +} + +/* + * Get the number of ready reader. + */ +uint32 GetReadyDecodeReader(uint32 slotId) +{ + knl_g_parallel_decode_context *gDecodeCxt = g_instance.comm_cxt.pdecode_cxt; + SpinLockAcquire(&(gDecodeCxt[slotId].rwlock)); + uint32 state = gDecodeCxt[slotId].ParallelReaderWorkerStatus.threadState; + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + if (state == PARALLEL_DECODE_WORKER_READY) { + return 1; + } + return 0; +} + +/* + * Wait until all workers are ready. + */ +void WaitWorkerReady(uint32 slotId) +{ + uint32 loop = 0; + int readyDecoder = 0; + uint32 readyReader = 0; + const uint32 maxWaitSecondsForDecoder = 6000; + knl_g_parallel_decode_context *gDecodeCxt = g_instance.comm_cxt.pdecode_cxt; + + /* MAX wait 2min */ + for (loop = 0; loop < maxWaitSecondsForDecoder; ++loop) { + /* This connection exit if logical thread abnormal */ + if (g_Logicaldispatcher[slotId].abnormal) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), + errmsg("Wait for worker ready failed."), errdetail("At least one thread is abnormal."), + errcause("System error."), erraction("Retry it in a few minutes."))); + } + + readyDecoder = GetReadyDecodeWorker(slotId); + readyReader = GetReadyDecodeReader(slotId); + if ((readyDecoder == gDecodeCxt[slotId].totalNum) && (readyReader == 1)) { + ereport(LOG, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), + errmsg("WaitWorkerReady total worker count:%d, readyWorkerCnt:%d", + g_Logicaldispatcher[slotId].totalWorkerCount, readyDecoder))); + break; + } + const long sleepTime = 1000; + pg_usleep(sleepTime); + } + SpinLockAcquire(&(gDecodeCxt[slotId].rwlock)); + gDecodeCxt[slotId].state = DECODE_STARTING_END; + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + readyDecoder = GetReadyDecodeWorker(slotId); + readyReader = GetReadyDecodeReader(slotId); + int totalDecoders = gDecodeCxt[slotId].totalNum; + if (loop == maxWaitSecondsForDecoder && + (readyDecoder != totalDecoders || readyReader != 1)) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), errmsg("Wait for worker ready failed"), + errdetail("Not all workers or no reader are ready for work. totalWorkerCount: %d", + g_Logicaldispatcher[slotId].totalWorkerCount), + errcause("System error."), erraction("Retry it in a few minutes."))); + } + + ereport(LOG, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), + errmsg("WaitWorkerReady total worker count:%u, readyWorkerCnt:%d", + g_Logicaldispatcher[slotId].totalWorkerCount, readyDecoder))); +} + +/* + * Send signal to the reader and all decoders. + */ +void SendSingalToReaderAndDecoder(uint32 slotId, int signal) +{ + ereport(LOG, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), errmsg("Send signal to reader and decoders."))); + uint32 state = -1; + knl_g_parallel_decode_context *gDecodeCxt = g_instance.comm_cxt.pdecode_cxt; + /* kill reader */ + SpinLockAcquire(&(gDecodeCxt[slotId].rwlock)); + state = gDecodeCxt[slotId].ParallelReaderWorkerStatus.threadState; + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + if (state != PARALLEL_DECODE_WORKER_EXIT) { + int err = gs_signal_send(gDecodeCxt[slotId].ParallelReaderWorkerStatus.threadId, signal); + if (0 != err) { + ereport(WARNING, (errmsg("Walsender kill reader(pid %lu, signal %d) failed: \"%s\",", + gDecodeCxt[slotId].ParallelReaderWorkerStatus.threadId, signal, gs_strerror(err)))); + } + } + + /* kill decoders */ + for (int i = 0; i < gDecodeCxt[slotId].totalNum; ++i) { + SpinLockAcquire(&(gDecodeCxt[slotId].rwlock)); + state = gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[i].threadState; + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + if (state != PARALLEL_DECODE_WORKER_EXIT) { + int err = gs_signal_send(gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[i].threadId, + signal); + if (0 != err) { + ereport(WARNING, (errmsg("Walsender kill decoder(pid %lu, signal %d) failed: \"%s\",", + gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[i].threadId, signal, gs_strerror(err)))); + } + } + } +} + +/* Check if logicalReader and all logicalDecoder threads is PARALLEL_DECODE_WORKER_EXIT. */ +bool logicalWorkerCouldExit(int slotId) +{ + uint32 state = -1; + knl_g_parallel_decode_context *gDecodeCxt = g_instance.comm_cxt.pdecode_cxt; + SpinLockAcquire(&(gDecodeCxt[slotId].rwlock)); + state = gDecodeCxt[slotId].ParallelReaderWorkerStatus.threadState; + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + if (state != PARALLEL_DECODE_WORKER_EXIT) { + return false; + } + + for (int i = 0; i < gDecodeCxt[slotId].totalNum; ++i) { + SpinLockAcquire(&(gDecodeCxt[slotId].rwlock)); + state = gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[i].threadState; + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + if (state != PARALLEL_DECODE_WORKER_EXIT) { + return false; + } + } + + return true; +} + +/* + * Stop all decode workers. + */ +void StopParallelDecodeWorkers(int code, Datum arg) +{ + uint32 slotId = DatumGetUInt32(arg); + SendSingalToReaderAndDecoder(slotId, SIGTERM); + knl_g_parallel_decode_context *gDecodeCxt = g_instance.comm_cxt.pdecode_cxt; + + uint64 count = 0; + while (!logicalWorkerCouldExit(slotId)) { + ++count; + if ((count & OUTPUT_WAIT_COUNT) == OUTPUT_WAIT_COUNT) { + ereport(WARNING, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), + errmsg("StopDecodeWorkers wait reader and decoder exit"))); + if ((count & PRINT_ALL_WAIT_COUNT) == PRINT_ALL_WAIT_COUNT) { + ereport(PANIC, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), + errmsg("StopDecodeWorkers wait too long!!!"), errdetail("N/A"), errcause("System error."), + erraction("Retry it in a few minutes."))); + } + const long sleepCheckTime = 100; + pg_usleep(sleepCheckTime); + } + } + + SpinLockAcquire(&(gDecodeCxt[slotId].rwlock)); + gDecodeCxt[slotId].ParallelReaderWorkerStatus.threadState = PARALLEL_DECODE_WORKER_INVALID; + for (int i = 0; i < gDecodeCxt[slotId].totalNum; ++i) { + gDecodeCxt[slotId].ParallelDecodeWorkerStatusList[i].threadState = PARALLEL_DECODE_WORKER_INVALID; + } + + gDecodeCxt[slotId].state = DECODE_DONE; + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + ReleaseParallelDecodeResource(slotId); + ereport(LOG, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), + errmsg("parallel decode thread exit in startup"))); +} + +/* + * Get init a logicalDispatcher slot. + * The return value of the function is logicalDispatcher slot ID. + */ +int GetLogicalDispatcher() +{ + int slotId = -1; + int max_replication_slots = g_instance.attr.attr_storage.max_replication_slots; + LWLockAcquire(ParallelDecodeLock, LW_EXCLUSIVE); + MemoryContext ctx = AllocSetContextCreate(g_instance.instance_context, "ParallelDecodeDispatcher", + ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE, SHARED_CONTEXT); + knl_g_parallel_decode_context *gDecodeCxt = g_instance.comm_cxt.pdecode_cxt; + static int i = 0; + + for (; i < max_replication_slots; i++) { + if (g_Logicaldispatcher[i].active == false) { + slotId = i; + errno_t rc = memset_s(&g_Logicaldispatcher[slotId], sizeof(LogicalDispatcher), 0, + sizeof(LogicalDispatcher)); + securec_check(rc, "", ""); + InitLogicalDispatcher(&g_Logicaldispatcher[slotId]); + g_Logicaldispatcher[i].active = true; + + ereport(LOG, (errmsg("g_Logicaldispatcher[%d].active = true", slotId))); + g_Logicaldispatcher[i].abnormal = false; + gDecodeCxt[i].parallelDecodeCtx = ctx; + break; + } + } + LWLockRelease(ParallelDecodeLock); + if(slotId == -1) { + return slotId; + } + + SpinLockAcquire(&(gDecodeCxt[slotId].rwlock)); + int state = gDecodeCxt[slotId].state; + if (state == DECODE_IN_PROGRESS) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), errmsg("Get logical dispatcher failed"), + errdetail("walsender reconnected thread exit."), errcause("System error."), + erraction("Retry it in a few minutes."))); + } + gDecodeCxt[slotId].state = DECODE_STARTING_BEGIN; + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + return slotId; +} + +/* + * Check whether a table is in the table white list. + */ +bool CheckWhiteList(const List *whiteList, const char *schema, const char *table) +{ + /* If the white list is empty, we do not need to filter tables. */ + if (list_length(whiteList) <= 0) { + return true; + } + ListCell *lc = NULL; + foreach(lc, whiteList) { + chosenTable *cTable = (chosenTable *)lfirst(lc); + + if ((cTable->schema == NULL || strncmp(cTable->schema, schema, strlen(schema)) == 0) && + (cTable->table == NULL || strncmp(cTable->table, table, strlen(table)) == 0)) { + return true; + } + } + return false; +} + +/* + * Parse a list of raw table names into a list of schema and table names. + */ +static bool ParseSchemaAndTableName(List *tableList, List **tableWhiteList) +{ + ListCell *lc = NULL; + char *str = NULL; + char *startPos = NULL; + char *curPos = NULL; + size_t len = 0; + chosenTable *cTable = NULL; + bool anySchema = false; + bool anyTable = false; + errno_t rc = 0; + + foreach(lc, tableList) { + str = (char*)lfirst(lc); + cTable = (chosenTable *)palloc(sizeof(chosenTable)); + + if (*str == '*' && *(str + 1) == '.') { + cTable->schema = NULL; + anySchema = true; + } + startPos = str; + curPos = str; + while (*curPos != '\0' && *curPos != '.') { + curPos++; + } + len = (size_t)(curPos - startPos); + + if (*curPos == '\0') { + pfree(cTable); + return false; + } else { + if (!anySchema) { + cTable->schema = (char *)palloc0((len + 1) * sizeof(char)); + errno_t rc = strncpy_s(cTable->schema, len + 1, startPos, len); + securec_check(rc, "", ""); + } + + curPos++; + startPos = curPos; + + if (*startPos == '*' && *(startPos + 1) == '\0') { + cTable->table = NULL; + anyTable = true; + } + while (*curPos != '\0') { + curPos++; + } + len = (size_t)(curPos - startPos); + + if (!anyTable) { + cTable->table = (char *)palloc((len + 1) * sizeof(char)); + rc = strncpy_s(cTable->table, len + 1, startPos, len); + securec_check(rc, "", ""); + } + } + *tableWhiteList = lappend(*tableWhiteList, cTable); + } + return true; +} + +/* + * Parse a rawstring to a list of table names. + */ +bool ParseStringToWhiteList(char *tableString, List **tableWhiteList) +{ + char *curPos = tableString; + bool finished = false; + List *tableList = NIL; + while (isspace(*curPos)) { + curPos++; + } + if (*curPos == '\0') { + return true; + } + + do { + char* tmpName = curPos; + while (*curPos != '\0' && *curPos != ',' && !isspace(*curPos)) { + curPos++; + } + char *tmpEnd = curPos; + if (tmpName == curPos) { + list_free_deep(tableList); + return false; + } + while (isspace(*curPos)) { + curPos++; + } + if (*curPos == '\0') { + finished = true; + } else if (*curPos == ',') { + curPos++; + while (isspace(*curPos)) { + curPos++; + } + } else { + list_free_deep(tableList); + return false; + } + *tmpEnd = '\0'; + char *tableName = pstrdup(tmpName); + tableList = lappend(tableList, tableName); + } while (!finished); + + if (!ParseSchemaAndTableName(tableList, tableWhiteList)) { + list_free_deep(tableList); + return false; + } + + list_free_deep(tableList); + return true; +} + +/* + * Check decode_style, 't' for text and 'j' for json. + */ +static inline void CheckDecodeStyle(ParallelDecodeOption *data, DefElem* elem) +{ + if (elem->arg != NULL) { + data->decode_style = *strVal(elem->arg); + if (data->decode_style != 'j' && data->decode_style != 't' && data->decode_style != 'b') { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname), + errdetail("N/A"), errcause("Wrong input value"), erraction("Input \'j\' or \'t\'"))); + } + if (data->decode_style == 'j') { + data->decode_change = parallel_decode_change_to_json; + } else if (data->decode_style == 't') { + data->decode_change = parallel_decode_change_to_text; + } else { + data->decode_change = parallel_decode_change_to_bin; + } + } +} + +/* + * Parse white list for logical decoding. + */ +void ParseWhiteList(List **whiteList, DefElem* elem) +{ + list_free_deep(*whiteList); + *whiteList = NIL; + if (elem->arg != NULL) { + char *tableString = pstrdup(strVal(elem->arg)); + if (!ParseStringToWhiteList(tableString, whiteList)) { + pfree(tableString); + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname), + errdetail("N/A"), errcause("Wrong input value"), erraction("Use \' to seperate table names."))); + } + pfree(tableString); + } +} + +/* + * Parse parallel decode num for logical decoding + */ +void ParseParallelDecodeNum(const DefElem *elem, int * const parallelDecodeNum) +{ + if (strncmp(elem->defname, "parallel-decode-num", sizeof("parallel-decode-num")) == 0) { + if (elem->arg != NULL && (!parse_int(strVal(elem->arg), parallelDecodeNum, 0, NULL) || + *parallelDecodeNum <= 0 || *parallelDecodeNum > MAX_PARALLEL_DECODE_NUM)) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname), + errdetail("N/A"), errcause("Wrong input value"), erraction("Input a number between 1 and 20."))); + } + } +} + +int ParseParallelDecodeNumOnly(List *options) +{ + ListCell *option = NULL; + int parallelDecodeNum = 1; + foreach (option, options) { + DefElem* elem = (DefElem*)lfirst(option); + ParseParallelDecodeNum(elem, ¶llelDecodeNum); + } + return parallelDecodeNum; +} + +static void CheckBatchSendingOption(const DefElem* elem, int *sendingBatch) +{ + if (elem->arg != NULL && (!parse_int(strVal(elem->arg), sendingBatch, 0, NULL) || + *sendingBatch < 0 || *sendingBatch > 1)) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname), + errdetail("N/A"), errcause("Wrong input value"), erraction("Input 0 or 1."))); + } +} + +/* + * Parse a single logical decoding option. + */ +static void ParseDecodingOption(ParallelDecodeOption *data, ListCell *option) +{ + DefElem* elem = (DefElem*)lfirst(option); + + if (strncmp(elem->defname, "include-xids", sizeof("include-xids")) == 0) { + CheckBooleanOption(elem, &data->include_xids, true); + } else if (strncmp(elem->defname, "include-timestamp", sizeof("include-timestamp")) == 0) { + CheckBooleanOption(elem, &data->include_timestamp, false); + } else if (strncmp(elem->defname, "skip-empty-xacts", sizeof("skip-empty-xacts")) == 0) { + CheckBooleanOption(elem, &data->skip_empty_xacts, false); + } else if (strncmp(elem->defname, "only-local", sizeof("only-local")) == 0) { + CheckBooleanOption(elem, &data->only_local, true); + } else if (strncmp(elem->defname, "standby-connection", sizeof("standby-connection")) == 0) { + CheckBooleanOption(elem, &t_thrd.walsender_cxt.standbyConnection, false); + } else if (strncmp(elem->defname, "sending-batch", sizeof("sending-batch")) == 0) { + CheckBatchSendingOption(elem, &data->sending_batch); + } else if (strncmp(elem->defname, "decode-style", sizeof("decode-style")) == 0) { + CheckDecodeStyle(data, elem); + } else if (strncmp(elem->defname, "white-table-list", sizeof("white-table-list")) == 0) { + ParseWhiteList(&data->tableWhiteList, elem); + } else if (strncmp(elem->defname, "parallel-decode-num", sizeof("parallel-decode-num")) != 0) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("option \"%s\" = \"%s\" is unknown", elem->defname, elem->arg ? strVal(elem->arg) : "(null)"), + errdetail("N/A"), errcause("Wrong input option"), erraction("Please check documents for help"))); + } +} + +/* + * Parse logical decoding options. + */ +static void ParseDecodingOptions(ParallelDecodeOption *data, List *options) +{ + ListCell *option = NULL; + foreach (option, options) { + ParseDecodingOption(data, option); + } +} + +/* + * Start a reader thread and N decoder threads. + * When the client initiates a streaming decoding connection, if it is in parallel decoding mode, + * we need to start a group of parallel decoding threads, including reader, decoder and sender. + * Each group of decoding threads will occupy a logicaldispatcher slot. + * The return value of the function is logicaldispatcher slot ID. + */ +int StartLogicalLogWorkers(char* dbUser, char* dbName, char* slotname, List *options, int parallelDecodeNum) +{ + int slotId = GetLogicalDispatcher(); + if (slotId == -1) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), + errmsg("can't create logical decode dispatcher"), errdetail("N/A"), errcause("System error."), + erraction("Retry it in a few minutes."))); + } + on_shmem_exit(StopParallelDecodeWorkers, slotId); + if (parallelDecodeNum > 1) { + CheckAliveDecodeWorkers(slotId); + ParallelDecodeOption *pOptions = &g_Logicaldispatcher[slotId].pOptions; + pOptions->include_xids = true; + pOptions->include_timestamp = false; + pOptions->skip_empty_xacts = false; + pOptions->only_local = true; + pOptions->decode_style = 'b'; + pOptions->parallel_decode_num = parallelDecodeNum; + pOptions->sending_batch = 0; + pOptions->decode_change = parallel_decode_change_to_text; + ParseDecodingOptions(&g_Logicaldispatcher[slotId].pOptions, options); + errno_t rc = memcpy_s(g_Logicaldispatcher[slotId].slotName, NAMEDATALEN, slotname, strlen(slotname)); + securec_check(rc, "", ""); + g_Logicaldispatcher[slotId].readWorker = CreateLogicalReadWorker(slotId, dbUser, dbName, slotname, options); + g_Logicaldispatcher[slotId].readWorker->tid = StartDecodeReadWorker(g_Logicaldispatcher[slotId].readWorker); + if (g_Logicaldispatcher[slotId].readWorker != NULL) + StartLogicalDecodeWorkers(parallelDecodeNum, slotId, dbUser, dbName, slotname); + + WaitWorkerReady(slotId); + knl_g_parallel_decode_context *gDecodeCxt = g_instance.comm_cxt.pdecode_cxt; + SpinLockAcquire(&(gDecodeCxt[slotId].rwlock)); + gDecodeCxt[slotId].state = DECODE_IN_PROGRESS; + SpinLockRelease(&(gDecodeCxt[slotId].rwlock)); + } + return slotId; +} + +void ParallelDecodeWorkerMain(void* point) +{ + ParallelDecodeWorker *worker = (ParallelDecodeWorker*)point; + ParallelReorderBufferChange* LogicalChangeHead; + + /* we are a postmaster subprocess now */ + IsUnderPostmaster = true; + + t_thrd.proc_cxt.MyProcPid = gs_thread_self(); + /* record Start Time for logging */ + t_thrd.proc_cxt.MyStartTime = time(NULL); + t_thrd.proc_cxt.MyProgName = "LogicalDecodeWorker"; + + t_thrd.role = PARALLEL_DECODE; + /* Identify myself via ps */ + init_ps_display("Logical decoding worker process", "", "", ""); + + ereport(LOG, (errmsg("Logical Decoding process started"))); + + SetProcessingMode(InitProcessing); + + on_shmem_exit(ParallelDecodeKill, Int32GetDatum(worker->slotId)); + + t_thrd.proc_cxt.PostInit->SetDatabaseAndUser(worker->dbName, + InvalidOid, worker->dbUser); + t_thrd.proc_cxt.PostInit->InitParallelDecode(); + SetProcessingMode(NormalProcessing); + + t_thrd.logicalreadworker_cxt.ReadWorkerCxt = AllocSetContextCreate(t_thrd.top_mem_cxt, "Read Worker", + ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); + (void)MemoryContextSwitchTo(t_thrd.logicalreadworker_cxt.ReadWorkerCxt); + + struct ParallelLogicalDecodingContext* ctx = ParallelCreateDecodingContext( + 0, NULL, false, logical_read_xlog_page, worker->slotId); + ParallelDecodingData *data = (ParallelDecodingData *)palloc0(sizeof(ParallelDecodingData)); + data->context = (MemoryContext)AllocSetContextCreate(ctx->context, "text conversion context", + ALLOCSET_DEFAULT_SIZES); + errno_t rc = memcpy_s(&data->pOptions, sizeof(ParallelDecodeOption), &g_Logicaldispatcher[worker->slotId].pOptions, + sizeof(ParallelDecodeOption)); + securec_check(rc, "", ""); + ctx->output_plugin_private = data; + + t_thrd.role = PARALLEL_DECODE; + + ereport(LOG, (errmsg("Parallel Decode Worker started"))); + + SetupDecoderSignalHandlers(); + t_thrd.parallel_decode_cxt.slotId = worker->slotId; + t_thrd.parallel_decode_cxt.parallelDecodeId = worker->id; + + knl_g_parallel_decode_context* pdecode_cxt = g_instance.comm_cxt.pdecode_cxt; + int id = worker->id; + SpinLockAcquire(&(g_instance.comm_cxt.pdecode_cxt[worker->slotId].rwlock)); + pdecode_cxt[worker->slotId].ParallelDecodeWorkerStatusList[id].threadState = PARALLEL_DECODE_WORKER_READY; + SpinLockRelease(&(g_instance.comm_cxt.pdecode_cxt[worker->slotId].rwlock)); + + while (true) { + if (t_thrd.parallel_decode_cxt.shutdown_requested) { + ereport(LOG, (errmsg("Parallel Decode Worker stop"))); + proc_exit(0); + } + + LogicalChangeHead = (ParallelReorderBufferChange *)LogicalQueueTop(worker->changeQueue); + if (LogicalChangeHead == NULL) { + continue; + } + + ParallelDecodeChange(LogicalChangeHead, ctx, worker); + LogicalQueuePop(worker->changeQueue); + ParallelFreeChange(LogicalChangeHead, worker->slotId); + } +} + +ParallelDecodeWorker *CreateLogicalDecodeWorker(uint32 id, char* dbUser, char* dbName, char* slotname, uint32 slotId) +{ + MemoryContext oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + ParallelDecodeWorker *worker = (ParallelDecodeWorker *)palloc0(sizeof(ParallelDecodeWorker)); + MemoryContextSwitchTo(oldCtx); + + worker->id = id; + worker->tid.thid = InvalidTid; + worker->changeQueue = LogicalQueueCreate(maxQueueLen, slotId); + worker->LogicalLogQueue = LogicalQueueCreate(maxQueueLen, slotId); + errno_t rc = memcpy_s(worker->slotname, NAMEDATALEN, slotname, strlen(slotname)); + securec_check(rc, "\0", "\0"); + rc = memcpy_s(worker->dbUser, NAMEDATALEN, dbUser, strlen(dbUser)); + securec_check(rc, "\0", "\0"); + rc = memcpy_s(worker->dbName, NAMEDATALEN, dbName, strlen(dbName)); + securec_check(rc, "\0", "\0"); + return worker; +} + +/* + * The main functions of read and parse logs in parallel decoding. + */ +void LogicalReadWorkerMain(void* point) +{ + ParallelDecodeReaderWorker *reader = (ParallelDecodeReaderWorker*)point; + /* we are a postmaster subprocess now */ + IsUnderPostmaster = true; + + t_thrd.proc_cxt.MyProcPid = gs_thread_self(); + /* record Start Time for logging */ + t_thrd.proc_cxt.MyStartTime = time(NULL); + t_thrd.proc_cxt.MyProgName = "LogicalReadWorker"; + + t_thrd.role = LOGICAL_READ_RECORD; + /* Identify myself via ps */ + init_ps_display("Logical readworker process", "", "", ""); + t_thrd.xlog_cxt.ThisTimeLineID = GetRecoveryTargetTLI(); + ereport(LOG, (errmsg("LogicalReader process started, TimeLineId = %u", t_thrd.xlog_cxt.ThisTimeLineID))); + + SetProcessingMode(InitProcessing); + + on_shmem_exit(LogicalReadKill, Int32GetDatum(reader->slotId)); + + t_thrd.proc_cxt.PostInit->SetDatabaseAndUser(reader->dbName, + InvalidOid, reader->dbUser); + t_thrd.proc_cxt.PostInit->InitParallelDecode(); + SetProcessingMode(NormalProcessing); + + t_thrd.logicalreadworker_cxt.ReadWorkerCxt = AllocSetContextCreate(t_thrd.top_mem_cxt, "Read Worker", + ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); + (void)MemoryContextSwitchTo(t_thrd.logicalreadworker_cxt.ReadWorkerCxt); + + SetupLogicalReaderSignalHandlers(); + SpinLockAcquire(&(g_instance.comm_cxt.pdecode_cxt[reader->slotId].rwlock)); + g_instance.comm_cxt.pdecode_cxt[reader->slotId].ParallelReaderWorkerStatus.threadState = + PARALLEL_DECODE_WORKER_READY; + SpinLockRelease(&(g_instance.comm_cxt.pdecode_cxt[reader->slotId].rwlock)); + + LogicalReadRecordMain(reader); +} + +void LogicalReadRecordMain(ParallelDecodeReaderWorker *worker) +{ + struct ParallelLogicalDecodingContext* ctx; + + /* make sure that our requirements are still fulfilled */ + CheckLogicalDecodingRequirements(u_sess->proc_cxt.MyDatabaseId); + + XLogRecPtr startptr = InvalidXLogRecPtr; + Assert(!t_thrd.slot_cxt.MyReplicationSlot); + ReplicationSlotAcquire(worker->slotname, false); + { + /* + * Rebuild snap dir + */ + char snappath[MAXPGPATH]; + struct stat st; + int rc = 0; + + rc = snprintf_s(snappath, + MAXPGPATH, + MAXPGPATH - 1, + "pg_replslot/%s/snap", + NameStr(t_thrd.slot_cxt.MyReplicationSlot->data.name)); + securec_check_ss(rc, "\0", "\0"); + + if (stat(snappath, &st) == 0 && S_ISDIR(st.st_mode)) { + if (!rmtree(snappath, true)) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode_for_file_access(), + errmsg("could not remove directory \"%s\": %m", snappath), errdetail("N/A"), + errcause("System error."), erraction("Retry it in a few minutes."))); + } + } + if (mkdir(snappath, S_IRWXU) < 0) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode_for_file_access(), + errmsg("could not create directory \"%s\": %m", snappath), errdetail("N/A"), errcause("System error."), + erraction("Retry it in a few minutes."))); + } + } + /* + * Initialize position to the last ack'ed one, then the xlog records begin + * to be shipped from that position. + */ + int slotId = worker->slotId; + g_Logicaldispatcher[slotId].MyReplicationSlot = t_thrd.slot_cxt.MyReplicationSlot; + ctx = ParallelCreateDecodingContext( + 0, NULL, false, logical_read_xlog_page, worker->slotId); + MemoryContext oldContext = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + ParallelDecodingData *data = (ParallelDecodingData *)palloc0(sizeof(ParallelDecodingData)); + data->pOptions.only_local = g_Logicaldispatcher[slotId].pOptions.only_local; + ctx->output_plugin_private = data; + MemoryContextSwitchTo(oldContext); + + /* Start reading WAL from the oldest required WAL. */ + t_thrd.walsender_cxt.logical_startptr = t_thrd.slot_cxt.MyReplicationSlot->data.restart_lsn; + + /* + * Report the location after which we'll send out further commits as the + * current sentPtr. + */ + t_thrd.walsender_cxt.sentPtr = t_thrd.slot_cxt.MyReplicationSlot->data.confirmed_flush; + startptr = t_thrd.slot_cxt.MyReplicationSlot->data.restart_lsn; + + ereport(LOG, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOGICAL_DECODE_ERROR), + errmsg("init decode parallel"))); + + ParallelReorderBufferChange *change = NULL; + + change = ParallelReorderBufferGetChange(ctx->reorder, slotId); + + change->action = PARALLEL_REORDER_BUFFER_CHANGE_CONFIRM_FLUSH; + change->lsn = t_thrd.slot_cxt.MyReplicationSlot->data.confirmed_flush; + PutChangeQueue(slotId, change); + + while (true) { + if (t_thrd.logicalreadworker_cxt.shutdown_requested) { + ereport(LOG, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOGICAL_DECODE_ERROR), + errmsg("Reader worker exit due to request"))); + proc_exit(1); + } + XLogRecord *record = NULL; + char *errm = NULL; + + record = XLogReadRecord(ctx->reader, startptr, &errm); + if (errm != NULL) { + const uint32 upperLen = 32; + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOGICAL_DECODE_ERROR), + errmsg("Stopped to parse any valid XLog Record at %X/%X: %s.", + (uint32)(ctx->reader->EndRecPtr >> upperLen), (uint32)ctx->reader->EndRecPtr, errm), + errdetail("N/A"), errcause("Xlog damaged or removed."), + erraction("Contact engineer to recover xlog files."))); + } + startptr = InvalidXLogRecPtr; + + if (record != NULL) { + ParseProcessRecord(ctx, ctx->reader, worker); + } + + pg_atomic_write_u64(&g_Logicaldispatcher[slotId].sentPtr, ctx->reader->EndRecPtr); + }; + ReplicationSlotRelease(); +} + diff --git a/src/gausskernel/storage/replication/logical/parallel_reorderbuffer.cpp b/src/gausskernel/storage/replication/logical/parallel_reorderbuffer.cpp new file mode 100644 index 000000000..3991af570 --- /dev/null +++ b/src/gausskernel/storage/replication/logical/parallel_reorderbuffer.cpp @@ -0,0 +1,1567 @@ +/* --------------------------------------------------------------------------------------- + * + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * parallel_reorderbuffer.cpp + * openGauss parallel decoding reorder buffer management + * + * IDENTIFICATION + * src/gausskernel/storage/replication/logical/parallel_reorderbuffer.cpp + * + * NOTES + * This module handles the same question with reorderbuffer.cpp, + * under the circumstances of parallel decoding. + * --------------------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "knl/knl_variable.h" + +#include +#include + +#include "access/heapam.h" +#include "miscadmin.h" + +#include "access/rewriteheap.h" +#include "access/transam.h" +#include "access/tuptoaster.h" +#include "access/xact.h" + +#include "catalog/catalog.h" +#include "catalog/pg_namespace.h" +#include "lib/binaryheap.h" +#include "libpq/pqformat.h" + +#include "replication/logical.h" +#include "replication/reorderbuffer.h" +#include "replication/parallel_decode.h" +#include "replication/logical.h" +#include "replication/slot.h" +#include "replication/snapbuild.h" /* just for SnapBuildSnapDecRefcount */ +#include "access/xlog_internal.h" +#include "utils/atomic.h" + +#include "storage/buf/bufmgr.h" +#include "storage/smgr/fd.h" +#include "storage/sinval.h" + +#include "utils/lsyscache.h" +#include "utils/builtins.h" +#include "utils/combocid.h" +#include "utils/memutils.h" +#include "utils/relcache.h" + +#include "utils/relfilenodemap.h" + +static void ParallelReorderBufferSerializeReserve(ParallelReorderBuffer *rb, Size sz); +static void ParallelReorderBufferCheckSerializeTXN(ParallelReorderBuffer *rb, ParallelReorderBufferTXN *txn, + int slotId); +static void ParallelReorderBufferSerializeTXN(ParallelReorderBuffer *rb, ParallelReorderBufferTXN *txn, int slotId); +static void ParallelReorderBufferSerializeChange(ParallelReorderBuffer *rb, ParallelReorderBufferTXN *txn, int fd, + logicalLog *change); +static Size ParallelReorderBufferRestoreChanges(ParallelReorderBuffer *rb, ParallelReorderBufferTXN *txn, int *fd, + XLogSegNo *segno, int slotId); +static void ParallelReorderBufferRestoreChange(ParallelReorderBuffer *rb, ParallelReorderBufferTXN *txn, char *data, + int slotId); + + +/* Parallel decoding batch sending unit length is set to 1MB. */ +static const int g_batch_unit_length = 1 * 1024 * 1024; + +void ParallelReorderBufferQueueChange(ParallelReorderBuffer *rb, logicalLog *change, int slotId) +{ + ParallelReorderBufferTXN *txn = NULL; + txn = ParallelReorderBufferTXNByXid(rb, change->xid, true, NULL, change->lsn, true); + + Assert(InvalidXLogRecPtr != change->lsn); + dlist_push_tail(&txn->changes, &change->node); + txn->nentries++; + txn->nentries_mem++; + + ParallelReorderBufferCheckSerializeTXN(rb, txn, slotId); +} + +void ParallelFreeTuple(ReorderBufferTupleBuf *tuple, int slotId) +{ + uint32 curTupleNum = g_Logicaldispatcher[slotId].curTupleNum; + if (curTupleNum >= max_decode_cache_num || + tuple->alloc_tuple_size > Max(MaxHeapTupleSize, MaxPossibleUHeapTupleSize)) { + pfree(tuple); + (void)pg_atomic_sub_fetch_u32(&g_Logicaldispatcher[slotId].curTupleNum, 1); + return; + } + + ReorderBufferTupleBuf *oldHead = + (ReorderBufferTupleBuf *)pg_atomic_read_uintptr((uintptr_t *)&g_Logicaldispatcher[slotId].freeTupleHead); + do { + tuple->freeNext = oldHead; + } while (!pg_atomic_compare_exchange_uintptr((uintptr_t *)&g_Logicaldispatcher[slotId].freeTupleHead, + (uintptr_t *)&oldHead, (uintptr_t)tuple)); +} + +void ParallelFreeChange(ParallelReorderBufferChange *change, int slotId) +{ + ParallelReorderBufferToastReset(change, slotId); + uint32 curChangeNum = g_Logicaldispatcher[slotId].curChangeNum; + + if (change->data.tp.newtuple) { + ParallelFreeTuple(change->data.tp.newtuple, slotId); + } + if (change->data.tp.oldtuple) { + ParallelFreeTuple(change->data.tp.oldtuple, slotId); + } + + if (curChangeNum >= max_decode_cache_num) { + pfree(change); + (void)pg_atomic_sub_fetch_u32(&g_Logicaldispatcher[slotId].curChangeNum, 1); + return; + } + + ParallelReorderBufferChange *oldHead = + (ParallelReorderBufferChange *)pg_atomic_read_uintptr((uintptr_t *)&g_Logicaldispatcher[slotId].freeChangeHead); + + do { + change->freeNext = oldHead; + } while (!pg_atomic_compare_exchange_uintptr((uintptr_t *)&g_Logicaldispatcher[slotId].freeChangeHead, + (uintptr_t *)&oldHead, (uintptr_t)change)); +} + +/* + * Get a unused ParallelReorderBufferChange, which may be preallocated in a global resource list. + */ +ParallelReorderBufferChange* ParallelReorderBufferGetChange(ParallelReorderBuffer *rb, int slotId) +{ + ParallelReorderBufferChange *change = NULL; + MemoryContext oldCtx = NULL; + + errno_t rc = 0; + do { + if (g_Logicaldispatcher[slotId].freeGetChangeHead != NULL) { + change = g_Logicaldispatcher[slotId].freeGetChangeHead; + g_Logicaldispatcher[slotId].freeGetChangeHead = g_Logicaldispatcher[slotId].freeGetChangeHead->freeNext; + } else { + ParallelReorderBufferChange *head = + (ParallelReorderBufferChange *)pg_atomic_exchange_uintptr( + (uintptr_t *)&g_Logicaldispatcher[slotId].freeChangeHead, + (uintptr_t)NULL); + if (head != NULL) { + change = head; + g_Logicaldispatcher[slotId].freeGetChangeHead = head->freeNext; + } else { + (void)pg_atomic_add_fetch_u32(&g_Logicaldispatcher[slotId].curChangeNum, 1); + oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + change = (ParallelReorderBufferChange *)palloc0(sizeof(ParallelReorderBufferChange)); + rc = memset_s(change, sizeof(ParallelReorderBufferChange), 0, sizeof(ParallelReorderBufferChange)); + securec_check(rc, "", ""); + MemoryContextSwitchTo(oldCtx); + } + } + } while (change == NULL); + rc = memset_s(&change->data, sizeof(change->data), 0, sizeof(change->data)); + securec_check(rc, "", ""); + change->ninvalidations = 0; + change->invalidations = NULL; + change->toast_hash = NULL; + return change; +} + +ReorderBufferTupleBuf *ParallelReorderBufferGetTupleBuf(ParallelReorderBuffer *rb, Size tuple_len, + ParallelDecodeReaderWorker *worker, bool isHeapTuple) +{ + int slotId = worker->slotId; + ReorderBufferTupleBuf *tuple = NULL; + Size allocLen = 0; + Size maxSize = 0; + + allocLen = tuple_len + (isHeapTuple ? SizeofHeapTupleHeader : SizeOfUHeapDiskTupleData); + maxSize = Max(MaxHeapTupleSize, MaxPossibleUHeapTupleSize); + MemoryContext oldCtx = NULL; + + /* + * Most tuples are below maxSize, so we use a slab allocator for those. + * Thus always allocate at least maxSize. Note that tuples generated for + * oldtuples can be bigger, as they don't have out-of-line toast columns. + */ + if (allocLen < maxSize) { + allocLen = maxSize; + } + + /* if small enough, check the slab cache */ + if (allocLen <= maxSize) { + do { + if (g_Logicaldispatcher[slotId].freeGetTupleHead != NULL) { + tuple = g_Logicaldispatcher[slotId].freeGetTupleHead; + g_Logicaldispatcher[slotId].freeGetTupleHead = g_Logicaldispatcher[slotId].freeGetTupleHead->freeNext; + continue; + } + ReorderBufferTupleBuf *head = (ReorderBufferTupleBuf *)pg_atomic_exchange_uintptr( + (uintptr_t *)&g_Logicaldispatcher[slotId].freeTupleHead, (uintptr_t)NULL); + if (head != NULL) { + tuple = head; + g_Logicaldispatcher[slotId].freeGetTupleHead = head->freeNext; + } else { + (void)pg_atomic_add_fetch_u32(&g_Logicaldispatcher[slotId].curTupleNum, 1); + oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + tuple = (ReorderBufferTupleBuf *)palloc0(sizeof(ReorderBufferTupleBuf) + allocLen); + MemoryContextSwitchTo(oldCtx); + tuple->alloc_tuple_size = allocLen; + tuple->tuple.t_data = isHeapTuple ? ReorderBufferTupleBufData(tuple) : + (HeapTupleHeader)ReorderBufferUTupleBufData((ReorderBufferUTupleBuf *)tuple); + } + } while (tuple == NULL); + } else { + oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + tuple = (ReorderBufferTupleBuf *)palloc0(sizeof(ReorderBufferTupleBuf) + allocLen); + MemoryContextSwitchTo(oldCtx); + tuple->alloc_tuple_size = allocLen; + tuple->tuple.t_data = isHeapTuple ? ReorderBufferTupleBufData(tuple) : + (HeapTupleHeader)ReorderBufferUTupleBufData((ReorderBufferUTupleBuf *)tuple); + } + tuple->tuple.tupTableType = isHeapTuple ? HEAP_TUPLE : UHEAP_TUPLE; + return tuple; +} + +ParallelReorderBufferTXN *ParallelReorderBufferGetTXN(ParallelReorderBuffer *rb) +{ + ParallelReorderBufferTXN *txn = NULL; + int rc = 0; + /* check the slab cache */ + txn = (ParallelReorderBufferTXN *)palloc(sizeof(ParallelReorderBufferTXN)); + + rc = memset_s(txn, sizeof(ParallelReorderBufferTXN), 0, sizeof(ParallelReorderBufferTXN)); + securec_check(rc, "", ""); + + dlist_init(&txn->changes); + dlist_init(&txn->tuplecids); + dlist_init(&txn->subtxns); + + return txn; +} +void ParallelReorderBufferReturnTXN(ParallelReorderBuffer *rb, ParallelReorderBufferTXN *txn) +{ + /* clean the lookup cache if we were cached (quite likely) */ + if (rb->by_txn_last_xid == txn->xid) { + rb->by_txn_last_xid = InvalidTransactionId; + rb->by_txn_last_txn = NULL; + } + + /* free data that's contained */ + if (txn->tuplecid_hash != NULL) { + hash_destroy(txn->tuplecid_hash); + txn->tuplecid_hash = NULL; + } + + if (txn->invalidations) { + pfree(txn->invalidations); + txn->invalidations = NULL; + } + + txn->nentries = 0; + txn->nentries_mem = 0; +} +/* --------------------------------------- + * toast reassembly support + * --------------------------------------- + * + * + * Initialize per tuple toast reconstruction support. + */ +static void parallelReorderBufferToastInitHash(ParallelReorderBuffer *rb, ParallelReorderBufferTXN *txn) +{ + HASHCTL hash_ctl; + int rc = 0; + Assert(txn->toast_hash == NULL); + + rc = memset_s(&hash_ctl, sizeof(hash_ctl), 0, sizeof(hash_ctl)); + securec_check(rc, "", ""); + + hash_ctl.keysize = sizeof(Oid); + hash_ctl.entrysize = sizeof(ReorderBufferToastEnt); + hash_ctl.hash = tag_hash; + hash_ctl.hcxt = rb->context; + const long toastHashNelem = 5; + txn->toast_hash = hash_create("ParallelReorderBufferToastHash", toastHashNelem, &hash_ctl, + HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_SHRCTX); +} + +void ToastTupleAppendChunk(ParallelReorderBuffer *rb, Relation relation, + ParallelReorderBufferChange *change) +{ + ReorderBufferToastEnt *ent = NULL; + ReorderBufferTupleBuf *newtup = NULL; + bool found = false; + int32 chunksize = 0; + bool isnull = false; + Pointer chunk = NULL; + TupleDesc desc = RelationGetDescr(relation); + Oid chunk_id = InvalidOid; + Oid chunk_seq = InvalidOid; + ParallelReorderBufferTXN *txn = ParallelReorderBufferTXNByXid(rb, change->xid, true, NULL, change->lsn, true); + if (txn->toast_hash == NULL) { + parallelReorderBufferToastInitHash(rb, txn); + } + + Assert(IsToastRelation(relation)); + + const int chunkIdIndex = 1; + const int chunkSeqIndex = 2; + const int chunkIndex = 3; + newtup = change->data.tp.newtuple; + if (change->action == PARALLEL_REORDER_BUFFER_CHANGE_INSERT) { + chunk_id = DatumGetObjectId(fastgetattr(&newtup->tuple, chunkIdIndex, desc, &isnull)); + Assert(!isnull); + chunk_seq = DatumGetInt32(fastgetattr(&newtup->tuple, chunkSeqIndex, desc, &isnull)); + Assert(!isnull); + } else { + chunk_id = DatumGetObjectId(UHeapFastGetAttr((UHeapTuple)&newtup->tuple, 1, desc, &isnull)); + Assert(!isnull); + chunk_seq = DatumGetInt32(UHeapFastGetAttr((UHeapTuple)&newtup->tuple, 2, desc, &isnull)); + Assert(!isnull); + } + + ent = (ReorderBufferToastEnt *)hash_search(txn->toast_hash, (void *)&chunk_id, HASH_ENTER, &found); + + if (!found) { + Assert(ent->chunk_id == chunk_id); + ent->num_chunks = 0; + ent->last_chunk_seq = 0; + ent->size = 0; + ent->reconstructed = NULL; + dlist_init(&ent->chunks); + + if (chunk_seq != 0) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("got sequence entry %u for toast chunk %u instead of seq 0", chunk_seq, chunk_id), + errdetail("N/A"), errcause("Toast file damaged."), + erraction("Contact engineer to recover toast files."))); + } + } else if (found && chunk_seq != (Oid)ent->last_chunk_seq + 1) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("got sequence entry %u for toast chunk %u instead of seq %d", chunk_seq, chunk_id, + ent->last_chunk_seq + 1), + errdetail("N/A"), errcause("Toast file damaged."), + erraction("Contact engineer to recover toast files."))); + } + if (change->action == PARALLEL_REORDER_BUFFER_CHANGE_INSERT) { + chunk = DatumGetPointer(fastgetattr(&newtup->tuple, chunkIndex, desc, &isnull)); + } else { + chunk = DatumGetPointer(UHeapFastGetAttr((UHeapTuple)&newtup->tuple, chunkIndex, desc, &isnull)); + } + Assert(!isnull); + if (isnull) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("fail to get toast chunk"), + errdetail("N/A"), errcause("Toast file damaged."), + erraction("Contact engineer to recover toast files."))); + } + checkHugeToastPointer((varlena *)chunk); + /* calculate size so we can allocate the right size at once later */ + if (!VARATT_IS_EXTENDED(chunk)) { + chunksize = VARSIZE(chunk) - VARHDRSZ; + } else if (VARATT_IS_SHORT(chunk)) { + /* could happen due to heap_form_tuple doing its thing */ + chunksize = VARSIZE_SHORT(chunk) - VARHDRSZ_SHORT; + } else { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unexpected type of toast chunk"), + errdetail("N/A"), errcause("Toast file damaged."), + erraction("Contact engineer to recover toast files."))); + } + + ent->size += chunksize; + ent->last_chunk_seq = chunk_seq; + ent->num_chunks++; + ereport(LOG, (errmsg("applying mapping: %lX %lX %lX %lX", uint64(ent), change->xid, change->lsn, + uint64(change->data.tp.newtuple)))); + dlist_push_tail(&ent->chunks, &change->node); +} + +/* + * Free all resources allocated for toast reconstruction. + */ +void ParallelReorderBufferToastReset(ParallelReorderBufferChange *change, int slotId) +{ + HASH_SEQ_STATUS hstat; + ReorderBufferToastEnt *ent = NULL; + + if (change->toast_hash == NULL) { + return; + } + + /* sequentially walk over the hash and free everything */ + hash_seq_init(&hstat, change->toast_hash); + while ((ent = (ReorderBufferToastEnt *)hash_seq_search(&hstat)) != NULL) { + dlist_mutable_iter it; + + if (ent->reconstructed != NULL) { + pfree(ent->reconstructed); + ent->reconstructed = NULL; + } + + dlist_foreach_modify(it, &ent->chunks) + { + ParallelReorderBufferChange *change = dlist_container(ParallelReorderBufferChange, node, it.cur); + + dlist_delete(&change->node); + ParallelFreeChange(change, slotId); + } + } + + hash_destroy(change->toast_hash); + change->toast_hash = NULL; +} + +/* + * Splice toast tuple. + */ +void ToastTupleSplicing(Datum *attrs, TupleDesc desc, bool *isnull, TupleDesc toast_desc, ParallelReorderBufferTXN *txn, + bool isHeap, bool *free) +{ + int rc = 0; + int natt = 0; + const int toast_index = 3; /* toast index in tuple is 3 */ + + for (natt = 0; natt < desc->natts; natt++) { + Form_pg_attribute attr = desc->attrs[natt]; + ReorderBufferToastEnt *ent = NULL; + struct varlena *varlena = NULL; + + /* va_rawsize is the size of the original datum -- including header */ + struct varatt_external toast_pointer; + struct varatt_indirect redirect_pointer; + struct varlena *new_datum = NULL; + struct varlena *reconstructed = NULL; + dlist_iter it; + Size data_done = 0; + int2 bucketid; + + /* system columns aren't toasted */ + if (attr->attnum < 0) + continue; + + if (attr->attisdropped) + continue; + + /* not a varlena datatype */ + if (attr->attlen != -1) + continue; + + /* no data */ + if (isnull[natt]) + continue; + + /* ok, we know we have a toast datum */ + varlena = (struct varlena *)DatumGetPointer(attrs[natt]); + checkHugeToastPointer(varlena); + /* no need to do anything if the tuple isn't external */ + if (!VARATT_IS_EXTERNAL(varlena)) + continue; + + VARATT_EXTERNAL_GET_POINTER_B(toast_pointer, varlena, bucketid); + + /* + * Check whether the toast tuple changed, replace if so. + */ + ent = (ReorderBufferToastEnt *)hash_search(txn->toast_hash, (void *)&toast_pointer.va_valueid, HASH_FIND, NULL); + if (ent == NULL) + continue; + + new_datum = (struct varlena *)palloc0(INDIRECT_POINTER_SIZE); + + free[natt] = true; + + reconstructed = (struct varlena *)palloc0(toast_pointer.va_rawsize); + + ent->reconstructed = reconstructed; + + /* stitch toast tuple back together from its parts */ + dlist_foreach(it, &ent->chunks) + { + bool isnul = false; + ParallelReorderBufferChange *cchange = NULL; + ReorderBufferTupleBuf *ctup = NULL; + Pointer chunk = NULL; + + cchange = dlist_container(ParallelReorderBufferChange, node, it.cur); + ctup = cchange->data.tp.newtuple; + if (isHeap) { + chunk = DatumGetPointer(fastgetattr(&ctup->tuple, toast_index, toast_desc, &isnul)); + } else { + chunk = DatumGetPointer(UHeapFastGetAttr((UHeapTuple)&ctup->tuple, toast_index, toast_desc, &isnul)); + } + if (chunk == NULL) { + continue; + } + Assert(!isnul); + Assert(!VARATT_IS_EXTERNAL(chunk)); + Assert(!VARATT_IS_SHORT(chunk)); + rc = memcpy_s(VARDATA(reconstructed) + data_done, VARSIZE(chunk) - VARHDRSZ, VARDATA(chunk), + VARSIZE(chunk) - VARHDRSZ); + securec_check(rc, "", ""); + data_done += VARSIZE(chunk) - VARHDRSZ; + } + Assert(data_done == (Size)toast_pointer.va_extsize); + + /* make sure its marked as compressed or not */ + if (VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer)) { + SET_VARSIZE_COMPRESSED(reconstructed, data_done + VARHDRSZ); + } else { + SET_VARSIZE(reconstructed, data_done + VARHDRSZ); + } + + rc = memset_s(&redirect_pointer, sizeof(redirect_pointer), 0, sizeof(redirect_pointer)); + securec_check(rc, "", ""); + redirect_pointer.pointer = reconstructed; + + SET_VARTAG_EXTERNAL(new_datum, VARTAG_INDIRECT); + rc = memcpy_s(VARDATA_EXTERNAL(new_datum), sizeof(redirect_pointer), &redirect_pointer, + sizeof(redirect_pointer)); + securec_check(rc, "", ""); + + attrs[natt] = PointerGetDatum(new_datum); + } +} +/* + * Query whether toast tuples have been stored in the current Txn->toast_hash before. + * If so, merge the tuples previously stored in the current Txn->toast_hash with the toast tuples + * in the current table into a complete toast tuple + */ +void ToastTupleReplace(ParallelReorderBuffer *rb, Relation relation, ParallelReorderBufferChange *change, + Oid partationReltoastrelid, ParallelDecodeReaderWorker *worker, bool isHeap) +{ + TupleDesc desc = NULL; + Datum *attrs = NULL; + bool *isnull = NULL; + bool *free = NULL; + HeapTuple tmphtup = NULL; + Relation toast_rel = NULL; + TupleDesc toast_desc = NULL; + MemoryContext oldcontext = NULL; + ReorderBufferTupleBuf *newtup = NULL; + ParallelReorderBufferTXN *txn; + int rc = 0; + + txn = ParallelReorderBufferTXNByXid(rb, change->xid, false, NULL, change->lsn, true); + if (txn == NULL) { + return; + } + /* no toast tuples changed */ + if (txn->toast_hash == NULL) + return; + oldcontext = MemoryContextSwitchTo(rb->context); + + /* we should only have toast tuples in an INSERT or UPDATE */ + Assert(change->data.tp.newtuple); + + desc = RelationGetDescr(relation); + if (relation->rd_rel->reltoastrelid != InvalidOid) + toast_rel = RelationIdGetRelation(relation->rd_rel->reltoastrelid); + else + toast_rel = RelationIdGetRelation(partationReltoastrelid); + if (toast_rel == NULL) { + ereport(ERROR, (errcode(ERRCODE_LOGICAL_DECODE_ERROR), errmodule(MOD_MEM), + errmsg("toast_rel should not be NULL!"), + errdetail("N/A"), errcause("Toast file damaged."), + erraction("Contact engineer to recover toast files."))); + } + toast_desc = RelationGetDescr(toast_rel); + + /* should we allocate from stack instead? */ + attrs = (Datum *)palloc0(sizeof(Datum) * desc->natts); + isnull = (bool *)palloc0(sizeof(bool) * desc->natts); + free = (bool *)palloc0(sizeof(bool) * desc->natts); + + newtup = change->data.tp.newtuple; + + if (isHeap) { + heap_deform_tuple(&newtup->tuple, desc, attrs, isnull); + } else { + UHeapDeformTuple((UHeapTuple)&newtup->tuple, desc, attrs, isnull); + } + + ToastTupleSplicing(attrs, desc, isnull, toast_desc, txn, isHeap, free); + + /* + * Build tuple in separate memory & copy tuple back into the tuplebuf + * passed to the output plugin. We can't directly heap_fill_tuple() into + * the tuplebuf because attrs[] will point back into the current content. + */ + if (isHeap) { + tmphtup = heap_form_tuple(desc, attrs, isnull); + } else { + tmphtup = (HeapTuple)UHeapFormTuple(desc, attrs, isnull); + } + + Assert(ReorderBufferTupleBufData(newtup) == newtup->tuple.t_data); + + rc = memcpy_s(newtup->tuple.t_data, newtup->alloc_tuple_size, tmphtup->t_data, tmphtup->t_len); + securec_check(rc, "", ""); + newtup->tuple.t_len = tmphtup->t_len; + + /* + * free resources we won't further need, more persistent stuff will be + * free'd in ReorderBufferToastReset(). + */ + RelationClose(toast_rel); + pfree(tmphtup); + tmphtup = NULL; + for (int natt = 0; natt < desc->natts; natt++) { + if (free[natt]) { + pfree(DatumGetPointer(attrs[natt])); + } + } + pfree(attrs); + attrs = NULL; + pfree(isnull); + isnull = NULL; + pfree(free); + free = NULL; + + (void)MemoryContextSwitchTo(oldcontext); + change->toast_hash = txn->toast_hash; + txn->toast_hash = NULL; +} + +/* + * Remove all on-disk stored logical log in parallel decoding. + */ +static void ParallelReorderBufferRestoreCleanup(ParallelReorderBufferTXN *txn, XLogRecPtr lsn = InvalidXLogRecPtr) +{ + if (txn->final_lsn == InvalidXLogRecPtr) { + txn->final_lsn = lsn; + } + Assert(!XLByteEQ(txn->first_lsn, InvalidXLogRecPtr)); + + XLogSegNo first = (txn->first_lsn) / XLogSegSize; + XLogSegNo last = (txn->final_lsn) / XLogSegSize; + + /* iterate over all possible filenames, and delete them */ + for (XLogSegNo cur = first; cur <= last; cur++) { + char path[MAXPGPATH]; + XLogRecPtr recptr; + recptr = (cur * XLOG_SEG_SIZE); + errno_t rc = sprintf_s(path, sizeof(path), "pg_replslot/%s/snap/xid-%lu-lsn-%X-%X.snap", + t_thrd.walsender_cxt.slotname, txn->xid, (uint32)(recptr >> 32), uint32(recptr)); + securec_check_ss(rc, "", ""); + if (unlink(path) != 0 && errno != ENOENT) { + ereport(ERROR, (errcode_for_file_access(), errmsg("could not unlink file \"%s\": %m", path))); + } + } +} + +/* + * Parallel decoding cleanup txn. + */ +void ParallelReorderBufferCleanupTXN(ParallelReorderBuffer *rb, ParallelReorderBufferTXN *txn, + XLogRecPtr lsn = InvalidXLogRecPtr) +{ + bool found = false; + dlist_mutable_iter iter; + dlist_foreach_modify(iter, &txn->subtxns) + { + ParallelReorderBufferTXN *subtxn = NULL; + + subtxn = dlist_container(ParallelReorderBufferTXN, node, iter.cur); + + /* + * Subtransactions are always associated to the toplevel TXN, even if + * they originally were happening inside another subtxn, so we won't + * ever recurse more than one level deep here. + */ + Assert(subtxn->is_known_as_subxact); + Assert(subtxn->nsubtxns == 0); + ParallelReorderBufferCleanupTXN(rb, subtxn, lsn); + } + /* + * Remove TXN from its containing list. + * + * Note: if txn->is_known_as_subxact, we are deleting the TXN from its + * parent's list of known subxacts; this leaves the parent's nsubxacts + * count too high, but we don't care. Otherwise, we are deleting the TXN + * from the LSN-ordered list of toplevel TXNs. + */ + dlist_delete(&txn->node); + + /* now remove reference from buffer */ + (void)hash_search(rb->by_txn, (void *)&txn->xid, HASH_REMOVE, &found); + Assert(found); + + /* remove entries spilled to disk */ + if (txn->serialized) { + ParallelReorderBufferRestoreCleanup(txn, lsn); + } + + /* deallocate */ + ParallelReorderBufferReturnTXN(rb, txn); +} + +/* + * --------------------------------------- + * Disk serialization support + * --------------------------------------- + * + * + * Ensure the IO buffer is >= sz. + */ +static void ParallelReorderBufferSerializeReserve(ParallelReorderBuffer *rb, Size sz) +{ + if (!rb->outbufsize) { + rb->outbuf = (char *)MemoryContextAlloc(rb->context, sz); + rb->outbufsize = sz; + } else if (rb->outbufsize < sz) { + rb->outbuf = (char *)repalloc(rb->outbuf, sz); + rb->outbufsize = sz; + } +} + +/* + * Check whether the transaction tx should spill its data to disk. + */ +static void ParallelReorderBufferCheckSerializeTXN(ParallelReorderBuffer *rb, ParallelReorderBufferTXN *txn, int slotId) +{ + if (txn->nentries_mem >= (unsigned)g_instance.attr.attr_common.max_changes_in_memory) { + ParallelReorderBufferSerializeTXN(rb, txn, slotId); + Assert(txn->nentries_mem == 0); + } +} + +/* + * Spill data of a large transaction (and its subtransactions) to disk. + */ + +static void ParallelReorderBufferSerializeTXN(ParallelReorderBuffer *rb, ParallelReorderBufferTXN *txn, int slotId) +{ + dlist_iter subtxn_i; + dlist_mutable_iter change_i; + int fd = -1; + XLogSegNo curOpenSegNo = 0; + + Size spilled = 0; + char path[MAXPGPATH]; + int nRet = 0; + + if (!RecoveryInProgress()) { + ereport(DEBUG2, (errmsg("spill %u changes in tx %lu to disk", (uint32)txn->nentries_mem, txn->xid))); + } + + /* do the same to all child TXs */ + if (&txn->subtxns == NULL) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("txn->subtxns is illegal point"))); + } + dlist_foreach(subtxn_i, &txn->subtxns) + { + ParallelReorderBufferTXN *subtxn = NULL; + + subtxn = dlist_container(ParallelReorderBufferTXN, node, subtxn_i.cur); + ParallelReorderBufferSerializeTXN(rb, subtxn, slotId); + } + + /* serialize changestream */ + dlist_foreach_modify(change_i, &txn->changes) + { + logicalLog *change = NULL; + + change = dlist_container(logicalLog, node, change_i.cur); + /* + * store in segment in which it belongs by start lsn, don't split over + * multiple segments tho + */ + if (fd == -1 || !((change->lsn) / XLogSegSize == curOpenSegNo)) { + XLogRecPtr recptr; + + if (fd != -1) { + (void)CloseTransientFile(fd); + } + curOpenSegNo = (change->lsn) / XLogSegSize; + + recptr = (curOpenSegNo * XLogSegSize); + + /* + * No need to care about TLIs here, only used during a single run, + * so each LSN only maps to a specific WAL record. + */ + + nRet = sprintf_s(path, MAXPGPATH, "pg_replslot/%s/snap/xid-%lu-lsn-%X-%X.snap", + t_thrd.walsender_cxt.slotname, txn->xid, (uint32)(recptr >> 32), + (uint32)recptr); + + securec_check_ss(nRet, "", ""); + /* open segment, create it if necessary */ + fd = OpenTransientFile(path, O_CREAT | O_WRONLY | O_APPEND | PG_BINARY, S_IRUSR | S_IWUSR); + if (fd < 0) { + ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", path))); + } + } + + ParallelReorderBufferSerializeChange(rb, txn, fd, change); + dlist_delete(&change->node); + FreeLogicalLog(change, slotId); + spilled++; + } + + Assert(spilled == txn->nentries_mem); + Assert(dlist_is_empty(&txn->changes)); + txn->serialized = true; + txn->nentries_mem = 0; + + if (fd != -1) { + (void)CloseTransientFile(fd); + } +} + +/* + * Serialize individual change to disk. + */ +static void ParallelReorderBufferSerializeChange(ParallelReorderBuffer *rb, ParallelReorderBufferTXN *txn, int fd, + logicalLog *change) +{ + Size sz = sizeof(sz) + sizeof(XLogRecPtr) + change->out->len; + ParallelReorderBufferSerializeReserve(rb, sz); + + char* tmp = rb->outbuf; + errno_t rc = memcpy_s(tmp, sizeof(Size), &sz, sizeof(Size)); + securec_check(rc, "", ""); + tmp += sizeof(Size); + + rc = memcpy_s(tmp, sizeof(XLogRecPtr), &change->lsn, sizeof(XLogRecPtr)); + securec_check(rc, "", ""); + tmp += sizeof(XLogRecPtr); + if (change->out->len != 0) { + rc = memcpy_s(tmp, change->out->len, change->out->data, change->out->len); + securec_check(rc, "", ""); + } + + if ((Size)(write(fd, rb->outbuf, sz)) != sz) { + (void)CloseTransientFile(fd); + ereport(ERROR, (errcode_for_file_access(), errmsg("could not write to xid %lu's data file: %m", txn->xid))); + } +} + +/* + * Restore changes from disk. + */ +static Size ParallelReorderBufferRestoreChanges(ParallelReorderBuffer *rb, ParallelReorderBufferTXN *txn, int *fd, + XLogSegNo *segno, int slotId) +{ + Size restored = 0; + XLogSegNo last_segno; + int rc = 0; + dlist_mutable_iter cleanup_iter; + + Assert(!XLByteEQ(txn->first_lsn, InvalidXLogRecPtr)); + Assert(!XLByteEQ(txn->final_lsn, InvalidXLogRecPtr)); + + /* free current entries, so we have memory for more */ + dlist_foreach_modify(cleanup_iter, &txn->changes) + { + logicalLog *cleanup = dlist_container(logicalLog, node, cleanup_iter.cur); + + dlist_delete(&cleanup->node); + FreeLogicalLog(cleanup, slotId); + } + txn->nentries_mem = 0; + Assert(dlist_is_empty(&txn->changes)); + + last_segno = (txn->final_lsn) / XLogSegSize; + while (restored < (unsigned)g_instance.attr.attr_common.max_changes_in_memory && *segno <= last_segno) { + int readBytes = 0; + + if (*fd == -1) { + XLogRecPtr recptr; + char path[MAXPGPATH]; + + /* first time in */ + if (*segno == 0) { + *segno = (txn->first_lsn) / XLogSegSize; + } + + Assert(*segno != 0 || dlist_is_empty(&txn->changes)); + + recptr = (*segno * XLOG_SEG_SIZE); + + /* + * No need to care about TLIs here, only used during a single run, + * so each LSN only maps to a specific WAL record. + */ + + rc = sprintf_s(path, sizeof(path), "pg_replslot/%s/snap/xid-%lu-lsn-%X-%X.snap", + t_thrd.walsender_cxt.slotname, txn->xid, (uint32)(recptr >> 32), + (uint32)recptr); + + securec_check_ss(rc, "", ""); + *fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0); + if (*fd < 0 && errno == ENOENT) { + *fd = -1; + (*segno)++; + continue; + } else if (*fd < 0) + ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", path))); + } + + /* + * Read the statically sized part of a change which has information + * about the total size. If we couldn't read a record, we're at the + * end of this file. + */ + ParallelReorderBufferSerializeReserve(rb, sizeof(Size) + sizeof(XLogRecPtr)); + readBytes = read(*fd, rb->outbuf, sizeof(Size) + sizeof(XLogRecPtr)); + /* eof */ + if (readBytes == 0) { + (void)CloseTransientFile(*fd); + *fd = -1; + (*segno)++; + continue; + } else if (readBytes < 0) { + ereport(ERROR, (errcode_for_file_access(), errmsg("could not read from reorderbuffer spill file: %m"))); + } else if (readBytes != sizeof(Size) + sizeof(XLogRecPtr)) { + ereport(ERROR, (errcode_for_file_access(), + errmsg("incomplete read from reorderbuffer spill file: read %d instead of %lu", readBytes, + sizeof(Size) + sizeof(XLogRecPtr)))); + } + + Size ondiskSize = *(Size *)rb->outbuf; + ParallelReorderBufferSerializeReserve(rb, ondiskSize); + + if (ondiskSize == sizeof(Size) + sizeof(XLogRecPtr)) { + /* Nothing serialized on disk, so skip it */ + restored++; + continue; + } + readBytes = read(*fd, rb->outbuf + sizeof(Size) + sizeof(XLogRecPtr), + ondiskSize - sizeof(Size) - sizeof(XLogRecPtr)); + if (readBytes < 0) { + ereport(ERROR, (errcode_for_file_access(), errmsg("could not read from reorderbuffer spill file: %m"))); + } else if (INT2SIZET(readBytes) != ondiskSize - sizeof(Size) - sizeof(XLogRecPtr)) { + ereport(ERROR, (errcode_for_file_access(), + errmsg("could not read from reorderbuffer spill file: read %d instead of %lu", readBytes, + ondiskSize - sizeof(Size) - sizeof(XLogRecPtr)))); + } + + /* + * ok, read a full change from disk, now restore it into proper + * in-memory format + */ + ParallelReorderBufferRestoreChange(rb, txn, rb->outbuf, slotId); + restored++; + } + + return restored; +} + +/* + * Use caching to reduce frequent memory requests and releases. + * Use worker->freegetlogicalloghead to store logchanges that should be free. + * logicalLog is requested in the reader thread and free in the decoder thread. + */ +logicalLog* RestoreLogicalLog(int slotId) { + logicalLog *logChange = NULL; + MemoryContext oldCtx; + oldCtx = MemoryContextSwitchTo(g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx); + + do { + if (t_thrd.walsender_cxt.restoreLogicalLogHead != NULL) { + logChange = t_thrd.walsender_cxt.restoreLogicalLogHead; + t_thrd.walsender_cxt.restoreLogicalLogHead = t_thrd.walsender_cxt.restoreLogicalLogHead->freeNext; + } else { + logicalLog *head = (logicalLog *)pg_atomic_exchange_uintptr( + (uintptr_t *)&g_Logicaldispatcher[slotId].freeLogicalLogHead, (uintptr_t)NULL); + if (head != NULL) { + logChange = head; + t_thrd.walsender_cxt.restoreLogicalLogHead = head->freeNext; + } else { + logChange = (logicalLog *)palloc(sizeof(logicalLog)); + logChange->out = NULL; + logChange->freeNext = NULL; + } + } + } while (logChange == NULL); + + logChange->type = LOGICAL_LOG_EMPTY; + + if (logChange->out) { + resetStringInfo(logChange->out); + } else { + logChange->out = makeStringInfo(); + } + + MemoryContextSwitchTo(oldCtx); + return logChange; +} + +/* + * Convert change from its on-disk format to in-memory format and queue it onto + * the TXN's ->changes list. + */ +static void ParallelReorderBufferRestoreChange(ParallelReorderBuffer *rb, ParallelReorderBufferTXN *txn, char *data, + int slotId) +{ + logicalLog *change = NULL; + ParallelDecodingData* pdata = (ParallelDecodingData*)t_thrd.walsender_cxt.parallel_logical_decoding_ctx-> + output_plugin_private; + + change = RestoreLogicalLog(slotId); + Size changeSize = 0; + errno_t rc = memcpy_s(&changeSize, sizeof(Size), data, sizeof(Size)); + securec_check(rc, "", ""); + data += sizeof(Size); + + rc = memcpy_s(&change->lsn, sizeof(XLogRecPtr), data, sizeof(XLogRecPtr)); + securec_check(rc, "", ""); + data += sizeof(XLogRecPtr); + + /* copy static part */ + MemoryContext oldCtx = MemoryContextSwitchTo(pdata->context); + change->freeNext = NULL; + appendBinaryStringInfo(change->out, data, changeSize - sizeof(Size) - sizeof(XLogRecPtr)); + dlist_push_tail(&txn->changes, &change->node); + txn->nentries_mem++; + MemoryContextSwitchTo(oldCtx); +} + +static int ParallelReorderBufferIterCompare(Datum a, Datum b, void *arg) +{ + ParallelReorderBufferIterTXNState *state = (ParallelReorderBufferIterTXNState *)arg; + XLogRecPtr pos_a = state->entries[DatumGetInt32(a)].lsn; + XLogRecPtr pos_b = state->entries[DatumGetInt32(b)].lsn; + if (XLByteLT(pos_a, pos_b)) + return 1; + else if (XLByteEQ(pos_a, pos_b)) + return 0; + return -1; +} + +/* + * Allocate & initialize an iterator which iterates in lsn order over a transaction + * and all its subtransactions under parallel decoding circumstances. + */ +static ParallelReorderBufferIterTXNState *ParallelReorderBufferIterTXNInit + (ParallelReorderBuffer *rb, ParallelReorderBufferTXN *txn, int slotId) +{ + Size nr_txns = 0; + ParallelReorderBufferIterTXNState *state = NULL; + dlist_iter cur_txn_i; + Size off; + + if (txn->nentries > 0) { + nr_txns++; + } + + dlist_foreach(cur_txn_i, &txn->subtxns) + { + ParallelReorderBufferTXN *cur_txn = NULL; + + cur_txn = dlist_container(ParallelReorderBufferTXN, node, cur_txn_i.cur); + if (cur_txn->nentries > 0) + nr_txns++; + } + + state = (ParallelReorderBufferIterTXNState *)MemoryContextAllocZero(rb->context, + sizeof(ParallelReorderBufferIterTXNState) + sizeof(ParallelReorderBufferIterTXNEntry) * nr_txns); + + state->nr_txns = nr_txns; + dlist_init(&state->old_change); + + for (off = 0; off < state->nr_txns; off++) { + state->entries[off].fd = -1; + state->entries[off].segno = 0; + } + + /* allocate heap */ + state->heap = binaryheap_allocate(state->nr_txns, ParallelReorderBufferIterCompare, state); + + /* + * Now insert items into the binary heap, in an unordered fashion. (We + * will run a heap assembly step at the end; this is more efficient.) + */ + off = 0; + /* add toplevel transaction if it contains changes */ + if (txn->nentries > 0) { + logicalLog *cur_change = NULL; + + if (txn->serialized) { + /* serialize remaining changes */ + ParallelReorderBufferSerializeTXN(rb, txn, slotId); + (void)ParallelReorderBufferRestoreChanges(rb, txn, &state->entries[off].fd, + &state->entries[off].segno, slotId); + } + + if (!dlist_is_empty(&txn->changes)) { + cur_change = dlist_head_element(logicalLog, node, &txn->changes); + + state->entries[off].lsn = cur_change->lsn; + state->entries[off].change = cur_change; + state->entries[off].txn = txn; + + binaryheap_add_unordered(state->heap, Int32GetDatum(off++)); + } + } + + /* add subtransactions if they contain changes */ + dlist_foreach(cur_txn_i, &txn->subtxns) + { + ParallelReorderBufferTXN *cur_txn = NULL; + + cur_txn = dlist_container(ParallelReorderBufferTXN, node, cur_txn_i.cur); + if (cur_txn->nentries > 0) { + logicalLog *cur_change = NULL; + + if (cur_txn->serialized) { + /* serialize remaining changes */ + ParallelReorderBufferSerializeTXN(rb, cur_txn, slotId); + (void)ParallelReorderBufferRestoreChanges(rb, cur_txn, &state->entries[off].fd, + &state->entries[off].segno, slotId); + } + + if (!dlist_is_empty(&txn->changes)) { + cur_change = dlist_head_element(logicalLog, node, &cur_txn->changes); + + state->entries[off].lsn = cur_change->lsn; + state->entries[off].change = cur_change; + state->entries[off].txn = cur_txn; + + binaryheap_add_unordered(state->heap, Int32GetDatum(off++)); + } + } + } + + /* assemble a valid binary heap */ + binaryheap_build(state->heap); + + return state; +} + +/* + * Return the next change when iterating over a transaction and its + * subtransactions. + * + * Returns NULL when no further changes exist. + */ +static logicalLog *ParallelReorderBufferIterTXNNext(ParallelReorderBuffer *rb, + ParallelReorderBufferIterTXNState *state, int slotId) +{ + logicalLog *change = NULL; + ParallelReorderBufferIterTXNEntry *entry = NULL; + int32 off = 0; + + /* nothing there anymore */ + if (state->heap->bh_size == 0) + return NULL; + + off = DatumGetInt32(binaryheap_first(state->heap)); + entry = &state->entries[off]; + + if (!dlist_is_empty(&state->old_change)) { + Assert(dlist_is_empty(&state->old_change)); + } + + change = entry->change; + + /* + * update heap with information about which transaction has the next + * relevant change in LSN order + * + * there are in-memory changes + */ + if (dlist_has_next(&entry->txn->changes, &entry->change->node)) { + dlist_node *next = dlist_next_node(&entry->txn->changes, &change->node); + logicalLog *next_change = dlist_container(logicalLog, node, next); + + /* txn stays the same */ + state->entries[off].lsn = next_change->lsn; + state->entries[off].change = next_change; + + binaryheap_replace_first(state->heap, Int32GetDatum(off)); + return change; + } + + /* try to load changes from disk */ + if (entry->txn->nentries != entry->txn->nentries_mem) { + /* + * Ugly: restoring changes will reuse *Change records, thus delete the + * current one from the per-tx list and only free in the next call. + */ + dlist_delete(&change->node); + dlist_push_tail(&state->old_change, &change->node); + + if (ParallelReorderBufferRestoreChanges(rb, entry->txn, &entry->fd, &state->entries[off].segno, slotId)) { + /* successfully restored changes from disk */ + logicalLog *next_change = dlist_head_element(logicalLog, node, &entry->txn->changes); + + if (!RecoveryInProgress()) + ereport(DEBUG2, (errmsg("restored %u/%u changes from disk", (uint32)entry->txn->nentries_mem, + (uint32)entry->txn->nentries))); + + Assert(entry->txn->nentries_mem); + /* txn stays the same */ + state->entries[off].lsn = next_change->lsn; + state->entries[off].change = next_change; + binaryheap_replace_first(state->heap, Int32GetDatum(off)); + + return change; + } + } + + /* ok, no changes there anymore, remove */ + (void)binaryheap_remove_first(state->heap); + + return change; +} + +/* + * Forget the contents of a transaction if we aren't interested in it's + * contents. Needs to be first called for subtransactions and then for the + * toplevel xid. + */ +void ParallelReorderBufferForget(ParallelReorderBuffer *rb, int slotId, ParallelReorderBufferTXN *txn) +{ + logicalLog *logChange = NULL; + if (txn == NULL) + return; + ParallelReorderBufferIterTXNState *volatile iterstate = NULL; + + iterstate = ParallelReorderBufferIterTXNInit(rb, txn, slotId); + while ((logChange = ParallelReorderBufferIterTXNNext(rb, iterstate, slotId)) != NULL) { + dlist_delete(&logChange->node); + FreeLogicalLog(logChange, slotId); + } + ParallelReorderBufferCleanupTXN(rb, txn); +} + +/* + * Parallel decoding check whether this batch should prepare write. + */ +static void ParallelCheckPrepare(StringInfo out, logicalLog *change, ParallelDecodingData *pdata, int slotId, + MemoryContext *oldCtxPtr) +{ + if (!g_Logicaldispatcher[slotId].remainPatch) { + WalSndPrepareWriteHelper(out, change->lsn, change->xid, true); + } + *oldCtxPtr = MemoryContextSwitchTo(pdata->context); +} + +/* + * Parallel decoding check whether this batch should be sent. + */ +static void ParallelCheckBatch(StringInfo out, ParallelDecodingData *pdata, int slotId, + MemoryContext *oldCtxPtr, bool emptyXact) +{ + if (emptyXact) { + MemoryContextSwitchTo(*oldCtxPtr); + return; + } + if (out->len > g_Logicaldispatcher[slotId].pOptions.sending_batch * g_batch_unit_length) { + if (pdata->pOptions.decode_style == 'b') { + appendStringInfoChar(out, 'F'); // the finishing char + } else if (g_Logicaldispatcher[slotId].pOptions.sending_batch > 0) { + pq_sendint32(out, 0); + } + MemoryContextSwitchTo(*oldCtxPtr); + WalSndWriteDataHelper(out, 0, 0, false); + g_Logicaldispatcher[slotId].decodeTime = GetCurrentTimestamp(); + g_Logicaldispatcher[slotId].remainPatch = false; + } else { + if (pdata->pOptions.decode_style == 'b') { + appendStringInfoChar(out, 'P'); + } + g_Logicaldispatcher[slotId].remainPatch = true; + MemoryContextSwitchTo(*oldCtxPtr); + } +} + +/* + * Parallel decoding deal with batch sending. + */ +static inline void ParallelHandleBatch(StringInfo out, logicalLog *change, ParallelDecodingData *pdata, int slotId, + MemoryContext *oldCtxPtr) +{ + ParallelCheckBatch(out, pdata, slotId, oldCtxPtr, false); + ParallelCheckPrepare(out, change, pdata, slotId, oldCtxPtr); +} + +/* + * Parallel decoding output begin message. + */ +static void ParallelOutputBegin(StringInfo out, logicalLog *change, ParallelDecodingData *pdata, + ParallelReorderBufferTXN *txn, bool batchSending) +{ + if (pdata->pOptions.decode_style == 'b') { + int curPos = out->len; + const uint32 beginBaseLen = 25; /* this length does not include the seperator 'P' */ + pq_sendint32(out, beginBaseLen); + pq_sendint64(out, txn->first_lsn); + appendStringInfoChar(out, 'B'); + pq_sendint64(out, change->csn); + pq_sendint64(out, txn->first_lsn); + if (pdata->pOptions.include_timestamp) { + appendStringInfoChar(out, 'T'); + const char *timeStamp = timestamptz_to_str(txn->commit_time); + pq_sendint32(out, (uint32)(strlen(timeStamp))); + appendStringInfoString(out, timeStamp); + uint32 beginLen = htonl(beginBaseLen + 1 + sizeof(uint32) + strlen(timeStamp)); + errno_t rc = memcpy_s(out->data + curPos, sizeof(uint32), &beginLen, sizeof(uint32)); + securec_check(rc, "", ""); + } + } else { + int curPos = out->len; + uint32 beginLen = 0; + if (batchSending) { + pq_sendint32(out, beginLen); + pq_sendint64(out, txn->first_lsn); + } + const uint32 upperPart = 32; + appendStringInfo(out, "BEGIN CSN: %lu commit_lsn: %X/%X", change->csn, (uint32)(txn->first_lsn >> upperPart), + (uint32)(txn->first_lsn)); + if (pdata->pOptions.include_timestamp) { + const char *timeStamp = timestamptz_to_str(txn->commit_time); + appendStringInfo(out, " commit_time: %s", timeStamp); + } + if (batchSending) { + beginLen = htonl((uint32)(out->len - curPos) - (uint32)sizeof(uint32)); + errno_t rc = memcpy_s(out->data + curPos, sizeof(uint32), &beginLen, sizeof(uint32)); + securec_check(rc, "", ""); + } + } +} + +/* + * Parallel decoding output commit message. + */ +static void ParallelOutputCommit(StringInfo out, logicalLog *change, ParallelDecodingData *pdata, + ParallelReorderBufferTXN *txn, bool batchSending) +{ + if (pdata->pOptions.decode_style == 'b') { + int curPos = out->len; + uint32 commitLen = 0; + pq_sendint32(out, commitLen); + pq_sendint64(out, change->endLsn); + appendStringInfoChar(out, 'C'); + commitLen += sizeof(uint64) + 1; + if (pdata->pOptions.include_xids) { + appendStringInfoChar(out, 'X'); + pq_sendint64(out, change->xid); + commitLen += 1 + sizeof(uint64); + } + if (pdata->pOptions.include_timestamp) { + appendStringInfoChar(out, 'T'); + const char *timeStamp = timestamptz_to_str(txn->commit_time); + pq_sendint32(out, (uint32)(strlen(timeStamp))); + appendStringInfoString(out, timeStamp); + commitLen += 1 + sizeof(uint32) + strlen(timeStamp); + } + + commitLen = htonl(commitLen); + errno_t rc = memcpy_s(out->data + curPos, sizeof(uint32), &commitLen, sizeof(uint32)); + securec_check(rc, "", ""); + } else { + int curPos = out->len; + uint32 commitLen = 0; + if (batchSending) { + pq_sendint32(out, commitLen); + pq_sendint64(out, change->endLsn); + } + if (pdata->pOptions.include_xids) { + appendStringInfo(out, "commit xid: %lu", change->xid); + } else { + appendStringInfoString(out, "commit"); + } + if (pdata->pOptions.include_timestamp) { + appendStringInfo(out, " (at %s)", timestamptz_to_str(txn->commit_time)); + } + if (batchSending) { + commitLen = htonl((uint32)(out->len - curPos) - (uint32)sizeof(uint32)); + errno_t rc = memcpy_s(out->data + curPos, sizeof(uint32), &commitLen, sizeof(uint32)); + securec_check(rc, "", ""); + } + } +} + +/* + * Get and send all the logical logs in the ParallelReorderBufferTXN linked list. + */ +void ParallelReorderBufferCommit(ParallelReorderBuffer *rb, logicalLog *change, int slotId, + ParallelReorderBufferTXN *txn) +{ + logicalLog *logChange = NULL; + + /* unknown transaction, nothing to replay */ + if (txn == NULL) { + return; + } + + ParallelLogicalDecodingContext *ctx = (ParallelLogicalDecodingContext *)rb->private_data; + ParallelDecodingData* pdata = (ParallelDecodingData*)t_thrd.walsender_cxt. + parallel_logical_decoding_ctx->output_plugin_private; + pdata->pOptions.xact_wrote_changes = false; + + MemoryContext oldCtx = NULL; + ParallelCheckPrepare(ctx->out, change, pdata, slotId, &oldCtx); + + if (!pdata->pOptions.skip_empty_xacts) { + ParallelOutputBegin(ctx->out, change, pdata, txn, g_Logicaldispatcher[slotId].pOptions.sending_batch > 0); + ParallelHandleBatch(ctx->out, change, pdata, slotId, &oldCtx); + } + + ParallelReorderBufferIterTXNState *volatile iterstate = NULL; + iterstate = ParallelReorderBufferIterTXNInit(rb, txn, slotId); + bool stopDecode = false; + while ((logChange = ParallelReorderBufferIterTXNNext(rb, iterstate, slotId)) != NULL) { + if (logChange->type == LOGICAL_LOG_NEW_CID && !stopDecode) { + stopDecode = true; + } + if (!stopDecode && logChange->out != NULL && logChange->out->len != 0) { + if (pdata->pOptions.skip_empty_xacts && !pdata->pOptions.xact_wrote_changes) { + ParallelOutputBegin(ctx->out, change, pdata, txn, + g_Logicaldispatcher[slotId].pOptions.sending_batch > 0); + ParallelHandleBatch(ctx->out, change, pdata, slotId, &oldCtx); + pdata->pOptions.xact_wrote_changes = true; + } + appendBinaryStringInfo(ctx->out, logChange->out->data, logChange->out->len); + ParallelHandleBatch(ctx->out, change, pdata, slotId, &oldCtx); + } + dlist_delete(&logChange->node); + + FreeLogicalLog(logChange, slotId); + } + + if (!pdata->pOptions.skip_empty_xacts || pdata->pOptions.xact_wrote_changes) { + ParallelOutputCommit(ctx->out, change, pdata, txn, + g_Logicaldispatcher[slotId].pOptions.sending_batch > 0); + } + ParallelReorderBufferCleanupTXN(rb, txn); + + ParallelCheckBatch(ctx->out, pdata, slotId, &oldCtx, + (pdata->pOptions.skip_empty_xacts && !pdata->pOptions.xact_wrote_changes)); + MemoryContextReset(pdata->context); +} + +/* + * Return the ParallelReorderBufferTXN from the given buffer, specified by Xid. + * If create is true, and a transaction doesn't already exist, create it + * (with the given LSN, and as top transaction if that's specified); + * when this happens, is_new is set to true. + */ +ParallelReorderBufferTXN *ParallelReorderBufferTXNByXid(ParallelReorderBuffer *rb, TransactionId xid, + bool create, bool *is_new, XLogRecPtr lsn, bool create_as_top) +{ + ParallelReorderBufferTXN *txn = NULL; + ParallelReorderBufferTXNByIdEnt *ent = NULL; + bool found = false; + + Assert(TransactionIdIsValid(xid)); + + /* + * Check the one-entry lookup cache first + */ + if (TransactionIdIsValid(rb->by_txn_last_xid) && rb->by_txn_last_xid == xid) { + txn = rb->by_txn_last_txn; + + if (txn != NULL) { + /* found it, and it's valid */ + if (is_new != NULL) { + *is_new = false; + } + return txn; + } + + /* + * cached as non-existant, and asked not to create? Then nothing else + * to do. + */ + if (!create) { + return NULL; + } + /* otherwise fall through to create it */ + } + + /* + * If the cache wasn't hit or it yielded an "does-not-exist" and we want + * to create an entry. + * + * search the lookup table + */ + ent = (ParallelReorderBufferTXNByIdEnt *)hash_search(rb->by_txn, (void *)&xid, create ? HASH_ENTER : HASH_FIND, + &found); + if (found) { + txn = ent->txn; + } else if (create) { + /* initialize the new entry, if creation was requested */ + Assert(ent != NULL); + Assert(lsn != InvalidXLogRecPtr); + ent->txn = ParallelReorderBufferGetTXN(rb); + ent->txn->xid = xid; + txn = ent->txn; + txn->first_lsn = lsn; + txn->restart_decoding_lsn = rb->current_restart_decoding_lsn; + txn->oldestXid = rb->lastRunningXactOldestXmin; + + if (create_as_top) { + dlist_push_tail(&rb->toplevel_by_lsn, &txn->node); + } + } else { + txn = NULL; /* not found and not asked to create */ + } + + /* update cache */ + rb->by_txn_last_txn = txn; + rb->by_txn_last_xid = xid; + + if (is_new != NULL) { + *is_new = !found; + } + + Assert(!create || !!txn); + return txn; +} + +ParallelReorderBuffer *ParallelReorderBufferAllocate(int slotId) +{ + /* allocate memory in own context, to have better accountability */ + MemoryContext new_ctx = g_instance.comm_cxt.pdecode_cxt[slotId].parallelDecodeCtx; + + ParallelReorderBuffer *buffer = + (ParallelReorderBuffer *)MemoryContextAllocZero(new_ctx, sizeof(ParallelReorderBuffer)); + + HASHCTL hash_ctl; + errno_t rc = memset_s(&hash_ctl, sizeof(hash_ctl), 0, sizeof(hash_ctl)); + securec_check(rc, "", ""); + + buffer->context = new_ctx; + + hash_ctl.keysize = sizeof(TransactionId); + hash_ctl.entrysize = sizeof(ReorderBufferTXNByIdEnt); + hash_ctl.hash = tag_hash; + hash_ctl.hcxt = buffer->context; + + const long reorderBufferNelem = 1000; + buffer->by_txn = hash_create("ReorderBufferByXid", reorderBufferNelem, + &hash_ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + + buffer->by_txn_last_xid = InvalidTransactionId; + buffer->by_txn_last_txn = NULL; + + buffer->nr_cached_changes = 0; + + buffer->current_restart_decoding_lsn = InvalidXLogRecPtr; + buffer->outbuf = NULL; + buffer->outbufsize = 0; + + dlist_init(&buffer->toplevel_by_lsn); + dlist_init(&buffer->txns_by_base_snapshot_lsn); + dlist_init(&buffer->cached_changes); + slist_init(&buffer->cached_tuplebufs); + + return buffer; +} + diff --git a/src/gausskernel/storage/replication/logical/relation.cpp b/src/gausskernel/storage/replication/logical/relation.cpp index 42f578de0..6a1a4efd0 100644 --- a/src/gausskernel/storage/replication/logical/relation.cpp +++ b/src/gausskernel/storage/replication/logical/relation.cpp @@ -68,9 +68,11 @@ static void logicalrep_relmap_init() { HASHCTL ctl; - if (!t_thrd.applyworker_cxt.logicalRepRelMapContext) + if (!t_thrd.applyworker_cxt.logicalRepRelMapContext) { t_thrd.applyworker_cxt.logicalRepRelMapContext = - AllocSetContextCreate(u_sess->cache_mem_cxt, "LogicalRepRelMapContext", ALLOCSET_DEFAULT_SIZES); + AllocSetContextCreate(THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_DEFAULT), + "LogicalRepRelMapContext", ALLOCSET_DEFAULT_SIZES); + } /* Initialize the relation hash table. */ int rc = memset_s(&ctl, sizeof(ctl), 0, sizeof(ctl)); @@ -83,7 +85,7 @@ static void logicalrep_relmap_init() DEFAULT_LOGICAL_RELMAP_HASH_ELEM, &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); /* Watch for invalidation events. */ - CacheRegisterRelcacheCallback(logicalrep_relmap_invalidate_cb, (Datum)0); + CacheRegisterThreadRelcacheCallback(logicalrep_relmap_invalidate_cb, (Datum)0); } /* diff --git a/src/gausskernel/storage/replication/logical/reorderbuffer.cpp b/src/gausskernel/storage/replication/logical/reorderbuffer.cpp index 3b9997319..a99969091 100644 --- a/src/gausskernel/storage/replication/logical/reorderbuffer.cpp +++ b/src/gausskernel/storage/replication/logical/reorderbuffer.cpp @@ -62,6 +62,7 @@ #include "access/xact.h" #include "catalog/catalog.h" +#include "catalog/gs_matview.h" #include "catalog/pg_namespace.h" #include "lib/binaryheap.h" @@ -75,6 +76,7 @@ #include "storage/smgr/fd.h" #include "storage/sinval.h" +#include "utils/acl.h" #include "utils/lsyscache.h" #include "utils/builtins.h" #include "utils/combocid.h" @@ -82,60 +84,6 @@ #include "utils/relcache.h" #include "utils/relfilenodemap.h" -/* entry for a hash table we use to map from xid to our transaction state */ -typedef struct ReorderBufferTXNByIdEnt { - TransactionId xid; - ReorderBufferTXN *txn; -} ReorderBufferTXNByIdEnt; - -/* data structures for (relfilenode, ctid) => (cmin, cmax) mapping */ -typedef struct ReorderBufferTupleCidKey { - RelFileNode relnode; - ItemPointerData tid; -} ReorderBufferTupleCidKey; - -typedef struct ReorderBufferTupleCidEnt { - ReorderBufferTupleCidKey key; - CommandId cmin; - CommandId cmax; - CommandId combocid; /* just for debugging */ -} ReorderBufferTupleCidEnt; - -/* k-way in-order change iteration support structures */ -typedef struct ReorderBufferIterTXNEntry { - XLogRecPtr lsn; - ReorderBufferChange *change; - ReorderBufferTXN *txn; - int fd; - XLogSegNo segno; -} ReorderBufferIterTXNEntry; - -typedef struct ReorderBufferIterTXNState { - binaryheap *heap; - Size nr_txns; - dlist_head old_change; - ReorderBufferIterTXNEntry entries[FLEXIBLE_ARRAY_MEMBER]; -} ReorderBufferIterTXNState; - -/* toast datastructures */ -typedef struct ReorderBufferToastEnt { - Oid chunk_id; /* toast_table.chunk_id */ - int32 last_chunk_seq; /* toast_table.chunk_seq of the last chunk we - * have seen */ - Size num_chunks; /* number of chunks we've already seen */ - Size size; /* combined size of chunks seen */ - dlist_head chunks; /* linked list of chunks */ - struct varlena *reconstructed; /* reconstructed varlena now pointed - * to in main tup */ -} ReorderBufferToastEnt; - -/* Disk serialization support datastructures */ -typedef struct ReorderBufferDiskChange { - Size size; - ReorderBufferChange change; - /* data follows */ -} ReorderBufferDiskChange; - /* * We use a very simple form of a slab allocator for frequently allocated * objects, simply keeping a fixed number in a linked list when unused, @@ -146,13 +94,14 @@ typedef struct ReorderBufferDiskChange { static const Size max_cached_changes = 4096 * 2; static const Size g_max_cached_transactions = 512; +#define MaxReorderBufferTupleSize (Max(MaxHeapTupleSize, MaxPossibleUHeapTupleSize)) /* --------------------------------------- * primary reorderbuffer support routines * --------------------------------------- */ static ReorderBufferTXN *ReorderBufferGetTXN(ReorderBuffer *rb); static void ReorderBufferReturnTXN(ReorderBuffer *rb, ReorderBufferTXN *txn); -static ReorderBufferTXN *ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create, bool *is_new, +ReorderBufferTXN *ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create, bool *is_new, XLogRecPtr lsn, bool create_as_top); static void AssertTXNLsnOrder(ReorderBuffer *rb); @@ -378,6 +327,15 @@ void ReorderBufferReturnChange(ReorderBuffer *rb, ReorderBufferChange *change) case REORDER_BUFFER_CHANGE_UINSERT: case REORDER_BUFFER_CHANGE_UUPDATE: case REORDER_BUFFER_CHANGE_UDELETE: + if (change->data.utp.newtuple) { + ReorderBufferReturnTupleBuf(rb, (ReorderBufferTupleBuf*)change->data.utp.newtuple); + change->data.utp.newtuple = NULL; + } + + if (change->data.utp.oldtuple) { + ReorderBufferReturnTupleBuf(rb, (ReorderBufferTupleBuf*)change->data.utp.oldtuple); + change->data.utp.oldtuple = NULL; + } break; } @@ -394,16 +352,16 @@ ReorderBufferTupleBuf *ReorderBufferGetTupleBuf(ReorderBuffer *rb, Size tuple_le ReorderBufferTupleBuf *tuple = NULL; Size alloc_len = tuple_len + SizeofHeapTupleHeader; /* - * Most tuples are below MaxHeapTupleSize, so we use a slab allocator for - * those. Thus always allocate at least MaxHeapTupleSize. Note that tuples + * Most tuples are below MaxReorderBufferTupleSize, so we use a slab allocator for + * those. Thus always allocate at least MaxReorderBufferTupleSize. Note that tuples * tuples generated for oldtuples can be bigger, as they don't have * out-of-line toast columns. */ - if (alloc_len < MaxHeapTupleSize) - alloc_len = MaxHeapTupleSize; + if (alloc_len < MaxReorderBufferTupleSize) + alloc_len = MaxReorderBufferTupleSize; /* if small enough, check the slab cache */ - if (alloc_len <= MaxHeapTupleSize && rb->nr_cached_tuplebufs) { + if (alloc_len <= MaxReorderBufferTupleSize && rb->nr_cached_tuplebufs) { rb->nr_cached_tuplebufs--; tuple = slist_container(ReorderBufferTupleBuf, node, slist_pop_head_node(&rb->cached_tuplebufs)); #ifdef USE_ASSERT_CHECKING @@ -426,21 +384,25 @@ ReorderBufferTupleBuf *ReorderBufferGetTupleBuf(ReorderBuffer *rb, Size tuple_le return tuple; } +/* + * Get an unused, possibly preallocated, ReorderBufferUTupleBuf fitting at + * least a tuple of size tupleLen (excluding header overhead). + */ ReorderBufferUTupleBuf *ReorderBufferGetUTupleBuf(ReorderBuffer *rb, Size tupleLen) { ReorderBufferUTupleBuf *tuple = NULL; Size allocLen = add_size(tupleLen, SizeOfUHeapDiskTupleData); /* - * Most tuples are below MaxHeapTupleSize, so we use a slab allocator for - * those. Thus always allocate at least MaxHeapTupleSize. Note that tuples + * Most tuples are below MaxReorderBufferTupleSize, so we use a slab allocator for + * those. Thus always allocate at least MaxReorderBufferTupleSize. Note that tuples * tuples generated for oldtuples can be bigger, as they don't have * out-of-line toast columns. */ - if (allocLen < MaxPossibleUHeapTupleSize) - allocLen = MaxPossibleUHeapTupleSize; + if (allocLen < MaxReorderBufferTupleSize) + allocLen = MaxReorderBufferTupleSize; /* if small enough, check the slab cache */ - if (allocLen <= MaxPossibleUHeapTupleSize && rb->nr_cached_tuplebufs) { + if (allocLen <= MaxReorderBufferTupleSize && rb->nr_cached_tuplebufs) { rb->nr_cached_tuplebufs--; tuple = slist_container(ReorderBufferUTupleBuf, node, slist_pop_head_node(&rb->cached_tuplebufs)); #ifdef USE_ASSERT_CHECKING @@ -472,7 +434,7 @@ ReorderBufferUTupleBuf *ReorderBufferGetUTupleBuf(ReorderBuffer *rb, Size tupleL void ReorderBufferReturnTupleBuf(ReorderBuffer *rb, ReorderBufferTupleBuf *tuple) { /* check whether to put into the slab cache, oversized tuples never are */ - if (tuple->alloc_tuple_size == MaxHeapTupleSize && + if (tuple->alloc_tuple_size == MaxReorderBufferTupleSize && rb->nr_cached_tuplebufs < (unsigned)g_instance.attr.attr_common.max_cached_tuplebufs) { rb->nr_cached_tuplebufs++; slist_push_head(&rb->cached_tuplebufs, &tuple->node); @@ -488,7 +450,7 @@ void ReorderBufferReturnTupleBuf(ReorderBuffer *rb, ReorderBufferTupleBuf *tuple * (with the given LSN, and as top transaction if that's specified); * when this happens, is_new is set to true. */ -static ReorderBufferTXN *ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create, bool *is_new, +ReorderBufferTXN *ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create, bool *is_new, XLogRecPtr lsn, bool create_as_top) { ReorderBufferTXN *txn = NULL; @@ -1051,7 +1013,7 @@ static void ReorderBufferIterTXNFinish(ReorderBuffer *rb, ReorderBufferIterTXNSt * Cleanup the contents of a transaction, usually after the transaction * committed or aborted. */ -static void ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, XLogRecPtr lsn = InvalidXLogRecPtr) +void ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, XLogRecPtr lsn = InvalidXLogRecPtr) { bool found = false; dlist_mutable_iter iter; @@ -1289,6 +1251,7 @@ void ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, XLogRecPtr commit volatile Snapshot snapshot_now = NULL; volatile bool txn_started = false; volatile bool subtxn_started = false; + u_sess->attr.attr_common.extra_float_digits = LOGICAL_DECODE_EXTRA_FLOAT_DIGITS; txn = ReorderBufferTXNByXid(rb, xid, false, NULL, InvalidXLogRecPtr, false); /* unknown transaction, nothing to replay */ @@ -1358,6 +1321,7 @@ void ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, XLogRecPtr commit case REORDER_BUFFER_CHANGE_INSERT: case REORDER_BUFFER_CHANGE_UPDATE: case REORDER_BUFFER_CHANGE_DELETE: + u_sess->utils_cxt.HistoricSnapshot->snapshotcsn = change->data.tp.snapshotcsn; Assert(snapshot_now); isSegment = IsSegmentFileNode(change->data.tp.relnode); @@ -1392,6 +1356,13 @@ void ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, XLogRecPtr commit continue; } + /* + * Do not decode private tables, otherwise there will be security problems. + */ + if (is_role_independent(FindRoleid(reloid))) { + continue; + } + if (CSTORE_NAMESPACE == get_rel_namespace(RelationGetRelid(relation))) { continue; } @@ -1488,6 +1459,7 @@ void ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, XLogRecPtr commit case REORDER_BUFFER_CHANGE_UINSERT: case REORDER_BUFFER_CHANGE_UDELETE: case REORDER_BUFFER_CHANGE_UUPDATE: + u_sess->utils_cxt.HistoricSnapshot->snapshotcsn = change->data.utp.snapshotcsn; Assert(snapshot_now); reloid = @@ -1880,8 +1852,9 @@ static void ReorderBufferExecuteInvalidations(ReorderBuffer *rb, ReorderBufferTX { uint32 i; - for (i = 0; i < txn->ninvalidations; i++) - LocalExecuteInvalidationMessage(&txn->invalidations[i]); + for (i = 0; i < txn->ninvalidations; i++) { + LocalExecuteThreadAndSessionInvalidationMessage(&txn->invalidations[i]); + } } /* @@ -2067,10 +2040,11 @@ static void ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *tx switch (change->action) { case REORDER_BUFFER_CHANGE_INSERT: - /* fall through */ case REORDER_BUFFER_CHANGE_UPDATE: - /* fall through */ - case REORDER_BUFFER_CHANGE_DELETE: { + case REORDER_BUFFER_CHANGE_DELETE: + case REORDER_BUFFER_CHANGE_UINSERT: + case REORDER_BUFFER_CHANGE_UDELETE: + case REORDER_BUFFER_CHANGE_UUPDATE: { char *data = NULL; ReorderBufferTupleBuf *oldtup = NULL; ReorderBufferTupleBuf *newtup = NULL; @@ -2157,10 +2131,6 @@ static void ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *tx case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID: /* ReorderBufferChange contains everything important */ break; - case REORDER_BUFFER_CHANGE_UINSERT: - case REORDER_BUFFER_CHANGE_UDELETE: - case REORDER_BUFFER_CHANGE_UUPDATE: - break; } ondisk->size = sz; @@ -2369,6 +2339,37 @@ static void ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn, case REORDER_BUFFER_CHANGE_UINSERT: case REORDER_BUFFER_CHANGE_UDELETE: case REORDER_BUFFER_CHANGE_UUPDATE: + if (change->data.utp.oldtuple) { + Size tuplelen = ((UHeapTuple)data)->disk_tuple_size; + change->data.utp.oldtuple = ReorderBufferGetUTupleBuf(rb, tuplelen - SizeOfUHeapHeader); + + /* restore ->tuple */ + rc = memcpy_s(&change->data.utp.oldtuple->tuple, sizeof(UHeapTupleData), data, sizeof(UHeapTupleData)); + securec_check(rc, "", ""); + data += sizeof(UHeapTupleData); + change->data.utp.oldtuple->tuple.disk_tuple = ReorderBufferUTupleBufData(change->data.utp.oldtuple); + /* restore tuple data itself */ + rc = memcpy_s(change->data.utp.oldtuple->tuple.disk_tuple, tuplelen, data, tuplelen); + securec_check(rc, "", ""); + data += tuplelen; + } + if (change->data.utp.newtuple) { + Size tuplelen = ((UHeapTuple)data)->disk_tuple_size; + change->data.utp.newtuple = ReorderBufferGetUTupleBuf(rb, tuplelen - SizeOfUHeapHeader); + + /* restore ->tuple */ + rc = memcpy_s(&change->data.utp.newtuple->tuple, sizeof(UHeapTupleData), data, sizeof(UHeapTupleData)); + securec_check(rc, "", ""); + data += sizeof(UHeapTupleData); + + /* reset t_data pointer into the new tuplebuf */ + change->data.utp.newtuple->tuple.disk_tuple = ReorderBufferUTupleBufData(change->data.utp.newtuple); + + /* restore tuple data itself */ + rc = memcpy_s(change->data.utp.newtuple->tuple.disk_tuple, tuplelen, data, tuplelen); + securec_check(rc, "", ""); + data += tuplelen; + } break; } @@ -2411,6 +2412,31 @@ static void ReorderBufferRestoreCleanup(ReorderBuffer *rb, ReorderBufferTXN *txn } } +/* + * Check dirent. + */ +static void CheckPath(char* path, int length, struct dirent *logical_de) +{ + struct dirent *spill_de = NULL; + DIR *spill_dir = NULL; + int rc = 0; + + spill_dir = AllocateDir(path); + while ((spill_de = ReadDir(spill_dir, path)) != NULL) { + if (strcmp(spill_de->d_name, ".") == 0 || strcmp(spill_de->d_name, "..") == 0) + continue; + + /* only look at names that can be ours */ + if (strncmp(spill_de->d_name, "xid", 3) == 0) { + rc = sprintf_s(path, sizeof(path), "pg_replslot/%s/%s", logical_de->d_name, spill_de->d_name); + securec_check_ss(rc, "", ""); + if (unlink(path) != 0) + ereport(PANIC, (errcode_for_file_access(), errmsg("could not unlink file \"%s\": %m", path))); + } + } + (void)FreeDir(spill_dir); +} + /* * Delete all data spilled to disk after we've restarted/crashed. It will be * recreated when the respective slots are reused. @@ -2420,8 +2446,6 @@ void StartupReorderBuffer(void) DIR *logical_dir = NULL; struct dirent *logical_de = NULL; - DIR *spill_dir = NULL; - struct dirent *spill_de = NULL; int rc = 0; logical_dir = AllocateDir("pg_replslot"); while ((logical_de = ReadDir(logical_dir, "pg_replslot")) != NULL) { @@ -2444,21 +2468,7 @@ void StartupReorderBuffer(void) /* we're only creating directories here, skip if it's not our's */ if (lstat(path, &statbuf) == 0 && !S_ISDIR(statbuf.st_mode)) continue; - - spill_dir = AllocateDir(path); - while ((spill_de = ReadDir(spill_dir, path)) != NULL) { - if (strcmp(spill_de->d_name, ".") == 0 || strcmp(spill_de->d_name, "..") == 0) - continue; - - /* only look at names that can be ours */ - if (strncmp(spill_de->d_name, "xid", 3) == 0) { - rc = sprintf_s(path, sizeof(path), "pg_replslot/%s/%s", logical_de->d_name, spill_de->d_name); - securec_check_ss(rc, "", ""); - if (unlink(path) != 0) - ereport(PANIC, (errcode_for_file_access(), errmsg("could not unlink file \"%s\": %m", path))); - } - } - (void)FreeDir(spill_dir); + CheckPath(path, MAXPGPATH, logical_de); } (void)FreeDir(logical_dir); } @@ -2542,7 +2552,7 @@ static void ReorderBufferToastAppendChunk(ReorderBuffer *rb, ReorderBufferTXN *t if (isnull) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("fail to get toast chunk"))); } - + checkHugeToastPointer((varlena *)chunk); /* calculate size so we can allocate the right size at once later */ if (!VARATT_IS_EXTENDED(chunk)) { chunksize = VARSIZE(chunk) - VARHDRSZ; @@ -2641,6 +2651,7 @@ static void ReorderBufferToastReplace(ReorderBuffer *rb, ReorderBufferTXN *txn, /* ok, we know we have a toast datum */ varlena = (struct varlena *)DatumGetPointer(attrs[natt]); + checkHugeToastPointer(varlena); /* no need to do anything if the tuple isn't external */ if (!VARATT_IS_EXTERNAL(varlena)) continue; diff --git a/src/gausskernel/storage/replication/logical/snapbuild.cpp b/src/gausskernel/storage/replication/logical/snapbuild.cpp index c1bbb7681..025f1626e 100644 --- a/src/gausskernel/storage/replication/logical/snapbuild.cpp +++ b/src/gausskernel/storage/replication/logical/snapbuild.cpp @@ -816,6 +816,30 @@ static void SnapBuildPurgeCommittedTxn(SnapBuild *builder) workspace = NULL; } +/* + * SnapBuild handle commtted transaction. + */ +static void SnapBuildChange(bool forced_timetravel, bool *top_needs_timetravel, bool sub_needs_timetravel, + SnapBuild *builder, TransactionId xid) +{ + if (forced_timetravel) { + if (!RecoveryInProgress()) { + ereport(DEBUG1, (errmsg("forced transaction %lu to do timetravel.", xid))); + } + SnapBuildAddCommittedTxn(builder, xid); + } else if (ReorderBufferXidHasCatalogChanges(builder->reorder, xid)) { + /* add toplevel transaction to base snapshot */ + if (!RecoveryInProgress()) { + ereport(DEBUG2, (errmsg("found top level transaction %lu, with catalog changes!", xid))); + } + *top_needs_timetravel = true; + SnapBuildAddCommittedTxn(builder, xid); + } else if (sub_needs_timetravel) { + /* mark toplevel txn as timetravel as well */ + SnapBuildAddCommittedTxn(builder, xid); + } +} + /* * Handle everything that needs to be done when a transaction commits */ @@ -880,22 +904,7 @@ void SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid, i } } - if (forced_timetravel) { - if (!RecoveryInProgress()) { - ereport(DEBUG1, (errmsg("forced transaction %lu to do timetravel.", xid))); - } - SnapBuildAddCommittedTxn(builder, xid); - } else if (ReorderBufferXidHasCatalogChanges(builder->reorder, xid)) { - /* add toplevel transaction to base snapshot */ - if (!RecoveryInProgress()) { - ereport(DEBUG2, (errmsg("found top level transaction %lu, with catalog changes!", xid))); - } - top_needs_timetravel = true; - SnapBuildAddCommittedTxn(builder, xid); - } else if (sub_needs_timetravel) { - /* mark toplevel txn as timetravel as well */ - SnapBuildAddCommittedTxn(builder, xid); - } + SnapBuildChange(forced_timetravel, &top_needs_timetravel, sub_needs_timetravel, builder, xid); /* if there's any reason to build a historic snapshot, to so now */ if (forced_timetravel || top_needs_timetravel || sub_needs_timetravel) { @@ -1478,6 +1487,20 @@ out: ReorderBufferSetRestartPoint(builder->reorder, builder->last_serialized_snapshot); } +static void CheckDiskFile(SnapBuildOnDisk* ondisk, char* path, size_t pathLen) +{ + if (ondisk->magic != SNAPBUILD_MAGIC) { + ereport(ERROR, (errcode(ERRCODE_LOGICAL_DECODE_ERROR), + errmsg("snapbuild state file \"%s\" has wrong magic %u instead of %d", path, ondisk->magic, + SNAPBUILD_MAGIC))); + } + + if (ondisk->version != SNAPBUILD_VERSION) { + ereport(ERROR, (errcode(ERRCODE_LOGICAL_DECODE_ERROR), + errmsg("snapbuild state file \"%s\" has unsupported version %u instead of %d", path, + ondisk->version, SNAPBUILD_VERSION))); + } +} /* * Restore a snapshot into 'builder' if previously one has been stored at the * location indicated by 'lsn'. Returns true if successful, false otherwise. @@ -1524,17 +1547,7 @@ static bool SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn) readBytes, SnapBuildOnDiskConstantSize))); } - if (ondisk.magic != SNAPBUILD_MAGIC) { - ereport(ERROR, (errcode(ERRCODE_LOGICAL_DECODE_ERROR), - errmsg("snapbuild state file \"%s\" has wrong magic %u instead of %d", path, ondisk.magic, - SNAPBUILD_MAGIC))); - } - - if (ondisk.version != SNAPBUILD_VERSION) { - ereport(ERROR, (errcode(ERRCODE_LOGICAL_DECODE_ERROR), - errmsg("snapbuild state file \"%s\" has unsupported version %u instead of %d", path, - ondisk.version, SNAPBUILD_VERSION))); - } + CheckDiskFile(&ondisk, path, strlen(path)); INIT_CRC32(checksum); COMP_CRC32(checksum, ((char *)&ondisk) + SnapBuildOnDiskNotChecksummedSize, @@ -1651,6 +1664,35 @@ snapshot_not_interesting: return false; } +static DIR *CheckSnapDir(const char *basedirpath, const char *snsdirpath) +{ + DIR *snap_dir = AllocateDir(snsdirpath); + if (!(snap_dir == NULL && errno == ENOENT)) { + return snap_dir; + } + + /* create dir if not exist */ + if (mkdir(snsdirpath, S_IRWXU) < 0) { + if (errno == ENOENT) { + /* create parent dir first if not exist */ + if (mkdir(basedirpath, S_IRWXU) < 0) { + ereport(ERROR, + (errcode_for_file_access(), errmsg("could not create directory \"%s\": %m", basedirpath))); + } + /* and then create the sub dir */ + if (mkdir(snsdirpath, S_IRWXU) < 0) { + ereport(ERROR, + (errcode_for_file_access(), errmsg("could not create directory \"%s\": %m", snsdirpath))); + } + } else { + /* Failure other than not exists */ + ereport(ERROR, + (errcode_for_file_access(), errmsg("could not create directory \"%s\": %m", snsdirpath))); + } + } + snap_dir = AllocateDir(snsdirpath); + return snap_dir; +} /* * Remove all serialized snapshots that are not required anymore because no * slot can need them. This doesn't actually have to run during a checkpoint, @@ -1682,29 +1724,7 @@ void CheckPointSnapBuild(void) cutoff = redo; } - snap_dir = AllocateDir(snsdirpath); - if (snap_dir == NULL && errno == ENOENT) { - /* create dir if not exist */ - if (mkdir(snsdirpath, S_IRWXU) < 0) { - if (errno == ENOENT) { - /* create parent dir first if not exist */ - if (mkdir(basedirpath, S_IRWXU) < 0) { - ereport(ERROR, - (errcode_for_file_access(), errmsg("could not create directory \"%s\": %m", basedirpath))); - } - /* and then create the sub dir */ - if (mkdir(snsdirpath, S_IRWXU) < 0) { - ereport(ERROR, - (errcode_for_file_access(), errmsg("could not create directory \"%s\": %m", snsdirpath))); - } - } else { - /* Failure other than not exists */ - ereport(ERROR, - (errcode_for_file_access(), errmsg("could not create directory \"%s\": %m", snsdirpath))); - } - } - snap_dir = AllocateDir(snsdirpath); - } + snap_dir = CheckSnapDir(basedirpath, snsdirpath); while ((snap_de = ReadDir(snap_dir, snsdirpath)) != NULL) { uint32 hi; uint32 lo; diff --git a/src/gausskernel/storage/replication/logical/worker.cpp b/src/gausskernel/storage/replication/logical/worker.cpp index e3e9fd779..6647249ea 100644 --- a/src/gausskernel/storage/replication/logical/worker.cpp +++ b/src/gausskernel/storage/replication/logical/worker.cpp @@ -93,6 +93,7 @@ #include "utils/relcache.h" static const int NAPTIME_PER_CYCLE = 10; /* max sleep time between cycles (10ms) */ +static const float HALF = 0.5; typedef struct FlushPosition { dlist_node node; @@ -965,7 +966,7 @@ static bool CheckTimeout(bool *pingSent, TimestampTz lastRecvTimestamp) */ if (!(*pingSent)) { timeout = TimestampTzPlusMilliseconds(lastRecvTimestamp, - (u_sess->attr.attr_storage.wal_receiver_timeout / 2)); + (u_sess->attr.attr_storage.wal_receiver_timeout * HALF)); if (now >= timeout) { requestReply = true; *pingSent = true; @@ -976,6 +977,14 @@ static bool CheckTimeout(bool *pingSent, TimestampTz lastRecvTimestamp) return requestReply; } +static inline void ProcessApplyWorkerInterrupts(void) +{ + if (t_thrd.applyworker_cxt.got_SIGTERM) { + ereport(FATAL, (errcode(ERRCODE_ADMIN_SHUTDOWN), + errmsg("terminating logical replication worker due to administrator command"))); + } +} + /* * Apply main loop. */ @@ -1015,6 +1024,7 @@ static void ApplyLoop(void) ApplyWorkerProcessMsg(type, &s, &last_received); while ((WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_receive(0, &type, &buf, &len)) { + ProcessApplyWorkerInterrupts(); last_recv_timestamp = GetCurrentTimestamp(); ping_sent = false; s.data = buf; @@ -1395,7 +1405,7 @@ void ApplyWorkerMain() PGC_BACKEND, PGC_S_OVERRIDE); /* Keep us informed about subscription changes. */ - CacheRegisterSyscacheCallback(SUBSCRIPTIONOID, subscription_change_cb, (Datum)0); + CacheRegisterThreadSyscacheCallback(SUBSCRIPTIONOID, subscription_change_cb, (Datum)0); ereport(LOG, (errmsg("logical replication apply for worker subscription \"%s\" has started", t_thrd.applyworker_cxt.mySubscription->name))); @@ -1412,16 +1422,12 @@ void ApplyWorkerMain() CommitTransactionCommand(); - /* Sensitive options for subscription, will be encrypted when saved to catalog. */ - const char* sensitiveOptionsArray[] = {"password"}; - const int sensitiveArrayLength = lengthof(sensitiveOptionsArray); - List *defList = ConninfoToDefList(t_thrd.applyworker_cxt.mySubscription->conninfo); - DecryptOptions(defList, sensitiveOptionsArray, sensitiveArrayLength, SUBSCRIPTION_MODE); - char *conninfo = DefListToString(defList); - bool connectSuccess = (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_connect(conninfo, NULL, + char *decryptConnInfo = DecryptConninfo(t_thrd.applyworker_cxt.mySubscription->conninfo); + bool connectSuccess = (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_connect(decryptConnInfo, NULL, t_thrd.applyworker_cxt.mySubscription->name, -1); - pfree_ext(defList); - pfree_ext(conninfo); + rc = memset_s(decryptConnInfo, strlen(decryptConnInfo), 0, strlen(decryptConnInfo)); + securec_check(rc, "", ""); + pfree_ext(decryptConnInfo); if (!connectSuccess) { ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("could not connect to the publisher"))); } @@ -1474,6 +1480,7 @@ List* ConninfoToDefList(const char *conn) char* cp = NULL; char* cp2 = NULL; char *buf = pstrdup(conn); + size_t len = strlen(buf); cp = buf; @@ -1531,7 +1538,7 @@ List* ConninfoToDefList(const char *conn) for (;;) { if (*cp == '\0') { pfree(buf); - ereport(LOG, (errmsg("unterminated quoted string in connection info string"))); + ereport(ERROR, (errmsg("unterminated quoted string in connection info string"))); } if (*cp == '\\') { cp++; @@ -1554,14 +1561,18 @@ List* ConninfoToDefList(const char *conn) */ result = lappend(result, makeDefElem(pstrdup(pname), (Node *)makeString(pstrdup(pval)))); } + + int rc = memset_s(buf, len, 0, len); + securec_check(rc, "", ""); pfree(buf); return result; } char* DefListToString(const List *defList) { - if (defList == NIL) { - return NULL; + if (unlikely(defList == NIL)) { + ereport(ERROR, (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), + errmsg("LogicDecode[Publication]: failed to transfer DefElem list to string, null input"))); } ListCell* cell = NULL; diff --git a/src/gausskernel/storage/replication/obswalreceiver.cpp b/src/gausskernel/storage/replication/obswalreceiver.cpp deleted file mode 100755 index 9f89f90ae..000000000 --- a/src/gausskernel/storage/replication/obswalreceiver.cpp +++ /dev/null @@ -1,854 +0,0 @@ -/* - * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * Portions Copyright (c) 2010-2012, PostgreSQL Global Development Group - * - * obswalreceiver.cpp - * - * Description: This file contains the obs-specific parts of walreceiver. It's - * loaded as a dynamic module to avoid linking the main server binary with - * obs. - * - * IDENTIFICATION - * src/gausskernel/storage/replication/obswalreceiver.cpp - * - * ------------------------------------------------------------------------- - */ -#include "postgres.h" -#include "knl/knl_variable.h" - -#include - -#include "replication/obswalreceiver.h" -#include "libpq/libpq-int.h" -#include "access/xlog.h" -#include "access/xlog_internal.h" -#include "nodes/pg_list.h" -#include "access/obs/obs_am.h" -#include "utils/timestamp.h" -#include "miscadmin.h" -#include "replication/walreceiver.h" -#include "replication/obswalreceiver.h" -#include "replication/slot.h" -#include "storage/pmsignal.h" -#include "storage/proc.h" -#include "utils/guc.h" -#include "pgxc/pgxc.h" -#define CUR_OBS_FILE_VERSION 1 -#define OBS_ARCHIVE_STATUS_FILE "obs_archive_start_end_record" - -static char *path_skip_prefix(char *path); - -static bool IsObsXlogBeyondRequest(XLogRecPtr startPtr, const List *object_list) -{ - char* fileName; - char* xlogFileName; - char* tempToken = NULL; - uint32 xlogReadLogid = -1; - uint32 xlogReadLogSeg = -1; - TimeLineID tli = 0; - uint32 startSeg; - ListCell* cell = NULL; - - if (object_list == NIL || object_list->head->next == NULL) { - ereport(ERROR, (errmsg("there is no xlog file on obs server."))); - return false; - } - cell = list_head(object_list)->next; - fileName = (char*)lfirst(cell); - fileName = strrchr(fileName, '/'); - fileName = fileName + 1; - xlogFileName = strtok_s(fileName, "_", &tempToken); - if (sscanf_s(xlogFileName, "%08X%08X%08X", &tli, &xlogReadLogid, &xlogReadLogSeg) != 3) { - ereport(ERROR, (errmsg("failed to translate name to xlog: %s\n", xlogFileName))); - } - XLByteToSeg(startPtr, startSeg); - if ((startSeg / XLogSegmentsPerXLogId) < xlogReadLogid || - ((startSeg / XLogSegmentsPerXLogId) == xlogReadLogid && - (startSeg % XLogSegmentsPerXLogId) < xlogReadLogSeg)) { - ereport(LOG, (errmsg("the xlog file on obs server is newer than local request, need build.\n"))); - return true; - } - return false; -} - -bool obs_replication_read_file(const char* fileName, char* content, int contentLen, const char *slotName) -{ - List *object_list = NIL; - size_t readLen = 0; - errno_t rc = 0; - ObsArchiveConfig obsConfig; - ObsArchiveConfig *archive_obs = NULL; - char pathPrefix[MAXPGPATH] = {0}; - ArchiveSlotConfig *obsArchiveSlot = NULL; - if (slotName != NULL) { - obsArchiveSlot = getObsReplicationSlotWithName(slotName); - if (obsArchiveSlot == NULL) { - ereport(LOG, (errmsg("Cannot get obs bucket config from replication slots"))); - return false; - } - archive_obs = &obsArchiveSlot->archive_obs; - } else { - archive_obs = getObsArchiveConfig(); - if (archive_obs == NULL) { - ereport(LOG, (errmsg("Cannot get obs bucket config from replication slots"))); - return false; - } - } - - /* copy OBS configs to temporary variable for customising file path */ - rc = memcpy_s(&obsConfig, sizeof(ObsArchiveConfig), archive_obs, sizeof(ObsArchiveConfig)); - securec_check(rc, "", ""); - - if (!IS_PGXC_COORDINATOR) { - rc = strcpy_s(pathPrefix, MAXPGPATH, obsConfig.obs_prefix); - securec_check_c(rc, "\0", "\0"); - - char *p = strrchr(pathPrefix, '/'); - if (p == NULL) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Obs path prefix is invalid"))); - } - *p = '\0'; - obsConfig.obs_prefix = pathPrefix; - } - - object_list = obsList(fileName, &obsConfig, false); - if (object_list == NIL || object_list->length <= 0) { - ereport(LOG, (errmsg("The file named %s cannot be found.", fileName))); - return false; - } - - readLen = obsRead(fileName, 0, content, contentLen, &obsConfig); - if (readLen == 0) { - ereport(LOG, (errmsg("Cannot read content in %s file!", fileName))); - return false; - } - return true; -} - -void update_stop_barrier() -{ - errno_t rc = EOK; - bool hasFailoverBarrier = false; - bool hasSwitchoverBarrier = false; - /* use volatile pointer to prevent code rearrangement */ - volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; - char failoverBarrier[MAX_BARRIER_ID_LENGTH] = {0}; - char switchoverBarrier[MAX_BARRIER_ID_LENGTH] = {0}; - - // The failover and switchover procedures cannot coexist. - hasFailoverBarrier = obs_replication_read_file(HADR_FAILOVER_BARRIER_ID_FILE, - (char *)failoverBarrier, MAX_BARRIER_ID_LENGTH); - hasSwitchoverBarrier = obs_replication_read_file(HADR_SWITCHOVER_BARRIER_ID_FILE, - (char *)switchoverBarrier, MAX_BARRIER_ID_LENGTH); - if (hasFailoverBarrier == true && hasSwitchoverBarrier == true) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("The failover and switchover procedures cannot coexist." - "failover barrierID %s, switchover barrierID %s.", - (char *)failoverBarrier, (char *)switchoverBarrier))); - } - - if (hasFailoverBarrier == true) { - rc = strncpy_s((char *)walrcv->recoveryStopBarrierId, MAX_BARRIER_ID_LENGTH, - (char *)failoverBarrier, MAX_BARRIER_ID_LENGTH - 1); - securec_check(rc, "\0", "\0"); - ereport(LOG, (errmsg("Get failover barrierID %s", (char *)walrcv->recoveryStopBarrierId))); - } - - if (hasSwitchoverBarrier == true) { - rc = strncpy_s((char *)walrcv->recoverySwitchoverBarrierId, MAX_BARRIER_ID_LENGTH, - (char *)switchoverBarrier, MAX_BARRIER_ID_LENGTH - 1); - securec_check(rc, "\0", "\0"); - ereport(LOG, (errmsg("Get switchover barrierID %s", (char *)walrcv->recoverySwitchoverBarrierId))); - } -} - -void update_recovery_barrier() -{ - errno_t rc = EOK; - /* use volatile pointer to prevent code rearrangement */ - volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; - char barrier[MAX_BARRIER_ID_LENGTH] = {0}; - if (obs_replication_read_file(HADR_BARRIER_ID_FILE, (char *)barrier, MAX_BARRIER_ID_LENGTH)) { - if (strcmp((char *)barrier, (char *)walrcv->recoveryTargetBarrierId) < 0) { - ereport(ERROR, (errmodule(MOD_REDO), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("The new global barrier is smaller than the last one."))); - } else { - rc = strncpy_s((char *)walrcv->recoveryTargetBarrierId, MAX_BARRIER_ID_LENGTH, - (char *)barrier, MAX_BARRIER_ID_LENGTH - 1); - securec_check(rc, "\0", "\0"); - } - } -} - -bool obs_connect(char* conninfo, XLogRecPtr* startpoint, char* slotname, int channel_identifier) -{ - volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; - walrcv->archive_slot = getObsRecoverySlot(); - if (walrcv->archive_slot == NULL) { - ereport(ERROR, (errcode(ERRCODE_UNEXPECTED_NULL_VALUE), - errmsg("[walreceiver_connect_obs]could not get obs relication slot"))); - return false; - } - - walrcv->peer_role = PRIMARY_MODE; - walrcv->peer_state = NORMAL_STATE; - walrcv->isFirstTimeAccessObs = true; - if (t_thrd.libwalreceiver_cxt.recvBuf != NULL) { - PQfreemem(t_thrd.libwalreceiver_cxt.recvBuf); - } - t_thrd.libwalreceiver_cxt.recvBuf = NULL; - uint32 totalLen = sizeof(WalDataMessageHeader) + OBS_XLOG_SLICE_BLOCK_SIZE + 1; - t_thrd.libwalreceiver_cxt.recvBuf = (char*)malloc(totalLen); - if (t_thrd.libwalreceiver_cxt.recvBuf == NULL) { - ereport(LOG, (errmsg("obs_receive:Receive Buffer out of memory.\n"))); - return false; - } - /* The full recovery of disaster recovery scenarios has ended */ - g_instance.roach_cxt.isRoachRestore = false; - - /* Use OBS to complete DR and set the original replication link status to normal. */ - volatile HaShmemData *hashmdata = t_thrd.postmaster_cxt.HaShmData; - char standbyClusterStat[MAX_DEFAULT_LENGTH] = {0}; - obs_replication_read_file(HADR_STANDBY_CLUSTER_STAT_FILE, standbyClusterStat, - MAX_DEFAULT_LENGTH); - - if (strncmp(standbyClusterStat, HADR_IN_NORMAL, strlen(HADR_IN_NORMAL)) == 0) { - ereport(WARNING, (errmsg("===obs_connect===\n " - "The cluster DR relationship has been removed, " - "but the instance slot still exists. slot name is %s", walrcv->archive_slot->slotname))); - ReplicationSlotDrop(walrcv->archive_slot->slotname); - SpinLockAcquire(&hashmdata->mutex); - hashmdata->repl_reason[hashmdata->current_repl] = WALSEGMENT_REBUILD; - SpinLockRelease(&hashmdata->mutex); - - SpinLockAcquire(&walrcv->mutex); - walrcv->conn_errno = REPL_INFO_ERROR; - SpinLockRelease(&walrcv->mutex); - } else { - SpinLockAcquire(&hashmdata->mutex); - hashmdata->repl_reason[hashmdata->current_repl] = NONE_REBUILD; - SpinLockRelease(&hashmdata->mutex); - - SpinLockAcquire(&walrcv->mutex); - walrcv->conn_errno = NONE_ERROR; - walrcv->node_state = NODESTATE_NORMAL; - SpinLockRelease(&walrcv->mutex); - } - - /* Only postmaster can update gaussdb.state file */ - SendPostmasterSignal(PMSIGNAL_UPDATE_HAREBUILD_REASON); - return true; -} - -bool obs_receive(int timeout, unsigned char* type, char** buffer, int* len) -{ - int dataLength; - XLogRecPtr startPtr; - char* recvBuf = t_thrd.libwalreceiver_cxt.recvBuf; - errno_t rc = EOK; - volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; - XLogRecPtr lastReplayPtr; - - // The start LSN used for the first access to OBS - // is the same as that of the streaming replication function. - if (walrcv->isFirstTimeAccessObs) { - startPtr = walrcv->receiveStart; - walrcv->isFirstTimeAccessObs = false; - } else { - // t_thrd.walreceiver_cxt.walRcvCtlBlock->receivePtr will been updated in XLogWalRcvReceive() - SpinLockAcquire(&t_thrd.walreceiver_cxt.walRcvCtlBlock->mutex); - startPtr = t_thrd.walreceiver_cxt.walRcvCtlBlock->receivePtr; - SpinLockRelease(&t_thrd.walreceiver_cxt.walRcvCtlBlock->mutex); - } - /* the unit of max_size_for_xlog_receiver is KB */ - uint64 maxRequestSize = ((uint64)g_instance.attr.attr_storage.max_size_for_xlog_receiver << 10); - lastReplayPtr = GetXLogReplayRecPtr(NULL); - if ((startPtr > lastReplayPtr) && (startPtr - lastReplayPtr >= maxRequestSize)) { - ereport(WARNING, (errmsg("The xlog local requested %08X/%08X is beyond local max xlog size, stop requested", - (uint32)(startPtr >> 32), (uint32)startPtr))); - pg_usleep(timeout * 1000); - return false; - } - - WalDataMessageHeader msghdr; - // init - msghdr.dataStart = startPtr; - msghdr.walEnd = InvalidXLogRecPtr; - msghdr.sendTime = GetCurrentTimestamp(); - msghdr.sender_sent_location = InvalidXLogRecPtr; - msghdr.sender_write_location = InvalidXLogRecPtr; - msghdr.sender_replay_location = InvalidXLogRecPtr; - msghdr.sender_flush_location = InvalidXLogRecPtr; - msghdr.catchup = false; - - int headLen = sizeof(WalDataMessageHeader); - int totalLen = headLen + OBS_XLOG_SLICE_BLOCK_SIZE + 1; - // copy WalDataMessageHeader - rc = memcpy_s(recvBuf, totalLen, &msghdr, headLen); - securec_check(rc, "", ""); - // copy xlog from obs - char* dataLocation = recvBuf + headLen; - (void)obs_replication_receive(startPtr, &dataLocation, &dataLength, timeout, NULL); - if (dataLength <= 0) { - return false; - } - - int validLen = headLen + dataLength; - recvBuf[validLen] = '\0'; /* Add terminating null */ - - elog(LOG,"[obs_receive]get xlog startlsn %08X/%08X, len %X\n", - (uint32)(startPtr >> 32), (uint32)startPtr, (uint32)validLen); - - /* Return received messages to caller */ - *type = 'w'; - *buffer = recvBuf; - *len = validLen; - return true; -} - - -void obs_send(const char *buffer, int nbytes) -{ -} - -void obs_disconnect(void) -{ - if (t_thrd.libwalreceiver_cxt.recvBuf != NULL) { - PQfreemem(t_thrd.libwalreceiver_cxt.recvBuf); - } - t_thrd.libwalreceiver_cxt.recvBuf = NULL; - - return; -} - -static char *obs_replication_get_xlog_prefix(XLogRecPtr recptr, bool onlyPath) -{ - errno_t rc = EOK; - char xlogfname[MAXFNAMELEN]; - char xlogfpath[MAXPGPATH]; - XLogSegNo xlogSegno = 0; - TimeLineID timeLine = DEFAULT_TIMELINE_ID; - - rc = memset_s(xlogfname, MAXFNAMELEN, 0, MAXFNAMELEN); - securec_check_ss_c(rc, "", ""); - rc = memset_s(xlogfpath, MAXPGPATH, 0, MAXPGPATH); - securec_check_ss_c(rc, "", ""); - - /* Generate directory path of pg_xlog on OBS when onlyPath is true */ - if (onlyPath == false) { - XLByteToSeg(recptr, xlogSegno); - rc = snprintf_s(xlogfname, MAXFNAMELEN, MAXFNAMELEN - 1, "%08X%08X%08X_%02u", timeLine, - (uint32)((xlogSegno) / XLogSegmentsPerXLogId), (uint32)((xlogSegno) % XLogSegmentsPerXLogId), - (uint32)((recptr / OBS_XLOG_SLICE_BLOCK_SIZE) & OBS_XLOG_SLICE_NUM_MAX)); - securec_check_ss_c(rc, "", ""); - } - if (IS_PGXC_COORDINATOR) { - if (IS_CNDISASTER_RECOVER_MODE) { - if (get_local_key_cn() == NULL) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FILE), errmsg("There is no hadr_key_cn"))); - return NULL; - } - rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, "%s/%s/%s", - get_local_key_cn(), XLOGDIR, xlogfname); - securec_check_ss_c(rc, "", ""); - } else { - rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, "%s/%s/%s", - g_instance.attr.attr_common.PGXCNodeName, XLOGDIR, xlogfname); - securec_check_ss_c(rc, "", ""); - } - } else { - rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, XLOGDIR "/%s", xlogfname); - securec_check_ss_c(rc, "", ""); - } - - return pstrdup(xlogfpath); -} - -static char *path_skip_prefix(char *path) -{ - char *key = path; - /* Skip path prefix, prefix format:'xxxx/cn/' */ - key = strrchr(key, '/'); - if (key == NULL) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("The xlog file path is invalid"))); - } - key = key + 1; // Skip character '/' - return key; -} - -static char *get_last_filename_from_list(const List *object_list) -{ - // The list returned from OBS is in lexicographic order. - ListCell* cell = list_tail(object_list); - if (cell == NULL || (lfirst(cell) == NULL)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FILE), - errmsg("The xlog file on obs is not exist"))); - } - - /* Skip path prefix, prefix format:'xxxx/cn/' */ - char *key = strstr((char *)lfirst(cell), XLOGDIR); - - return pstrdup(key); -} - -/* - * xlog slice name format: {fileNamePrefix}/{timeline}+{LSN/16M/256}+{LSN/16M%256}_{slice}_{term}_{subTerm} - * samples: obs://{bucket}/xxxx/cn/pg_xlog/000000010000000000000003_08_00000002_00000005 - */ -static char *obs_replication_get_last_xlog_slice(XLogRecPtr startPtr, bool onlyPath, bool needUpdateDBState, - ObsArchiveConfig* archive_obs) -{ - char *fileNamePrefix = NULL; - char *fileName = NULL; - List *object_list = NIL; - List *obsXlogList = NIL; - char xlogfpath[MAXPGPATH]; - errno_t rc = EOK; - - fileNamePrefix = obs_replication_get_xlog_prefix(startPtr, onlyPath); - - if (IS_CNDISASTER_RECOVER_MODE) { - if (get_local_key_cn() == NULL) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FILE), errmsg("There is no hadr_key_cn"))); - return NULL; - } - rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, "%s/%s", get_local_key_cn(), XLOGDIR); - securec_check_ss_c(rc, "", ""); - } else { - rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, "%s", XLOGDIR); - securec_check_ss_c(rc, "", ""); - } - - if (needUpdateDBState) { - obsXlogList = obsList(xlogfpath, archive_obs); - if (IsObsXlogBeyondRequest(startPtr, obsXlogList)) { - SetObsRebuildReason(WALSEGMENT_REBUILD); - ereport(ERROR, (errcode(ERRCODE_INVALID_STATUS), - errmsg("standby's local request lsn[%X/%X] mismatched with remote server", - (uint32)(startPtr >> 32), (uint32)startPtr))); - } - } - - object_list = obsList(fileNamePrefix, archive_obs); - if (object_list == NIL || object_list->length <= 0) { - ereport(LOG, (errmsg("The OBS objects with the prefix %s cannot be found.", fileNamePrefix))); - pfree(fileNamePrefix); - return NULL; - } - - if (IS_CNDISASTER_RECOVER_MODE) { - char tmpFileName[MAXPGPATH]; - rc = snprintf_s(tmpFileName, MAXPGPATH, MAXPGPATH - 1, "%s/%s", get_local_key_cn(), - get_last_filename_from_list(object_list)); - securec_check_ss_c(rc, "", ""); - fileName = pstrdup(tmpFileName); - } else { - fileName = get_last_filename_from_list(object_list); - } - if (fileName == NULL) { - ereport(LOG, (errmsg("Cannot get xlog file name with prefix:%s, obs list length:%d", - fileNamePrefix, object_list->length))); - } - - pfree(fileNamePrefix); - list_free_deep(object_list); - return fileName; -} - - -/* - * Read the Xlog file that is calculated using the start LSN and whose name contains the maximum term. - * Returns the Xlog from the start position to the last. - */ -int obs_replication_receive(XLogRecPtr startPtr, char **buffer, int *bufferLength, - int timeout_ms, char* inner_buff) -{ - char *fileName = NULL; - uint32 offset = 0; - size_t readLen = 0; - char *xlogBuff = NULL; - uint32 actualXlogLen = 0; - volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; - errno_t rc = EOK; - TimestampTz start_time; - - if (buffer == NULL || *buffer == NULL || bufferLength == NULL) { - ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), - errmsg("Invalid parameter"))); - } - - *bufferLength = 0; - - fileName = obs_replication_get_last_xlog_slice(startPtr, false, true, &walrcv->archive_slot->archive_obs); - if (fileName == NULL || strlen(fileName) == 0) { - ereport(LOG, (errmsg("Cannot find xlog file with LSN: %lu", startPtr))); - return -1; - } - - if (inner_buff != NULL) { - xlogBuff = inner_buff; - } else { - xlogBuff = (char*)palloc(OBS_XLOG_SLICE_FILE_SIZE); - if (xlogBuff == NULL) { - pfree(fileName); - return -1; - } - } - - rc = memset_s(xlogBuff, OBS_XLOG_SLICE_FILE_SIZE, 0, OBS_XLOG_SLICE_FILE_SIZE); - securec_check(rc, "", ""); - - /* calc begin offset */ - offset = ((uint32)(startPtr % XLogSegSize)) & ((uint32)(OBS_XLOG_SLICE_BLOCK_SIZE - 1)); - - /* Start timing */ - start_time = GetCurrentTimestamp(); - do { - readLen = obsRead(fileName, 0, xlogBuff, OBS_XLOG_SLICE_FILE_SIZE, &walrcv->archive_slot->archive_obs); - if (readLen < sizeof(int)) { - ereport(LOG, (errmsg("Cannot get xlog from OBS, object key:%s", fileName))); - /* retry */ - continue; - } - - /* Analysis file header to calc the actual file length */ - actualXlogLen = ntohl(*(uint32*)xlogBuff); - - Assert(actualXlogLen + sizeof(int) <= readLen); - Assert(actualXlogLen <= (int)(OBS_XLOG_SLICE_BLOCK_SIZE)); - - if (actualXlogLen > offset && (actualXlogLen + sizeof(int) <= readLen)) { - *bufferLength = actualXlogLen - offset; - rc = memcpy_s(*buffer, OBS_XLOG_SLICE_BLOCK_SIZE, - xlogBuff + OBS_XLOG_SLICE_HEADER_SIZE + offset, - *bufferLength); - securec_check(rc, "", ""); - break; - } - - pg_usleep(10 * 1000); // 10ms - } while (ComputeTimeStamp(start_time) < timeout_ms); - - if (inner_buff == NULL) { - /* xlogBuff is palloc from this function */ - pfree(xlogBuff); - } - pfree(fileName); - return 0; -} - -int obs_replication_archive(const ArchiveXlogMessage *xlogInfo) -{ - errno_t rc = EOK; - int ret = 0; - char *fileName = NULL; - char *fileNamePrefix = NULL; - - int xlogreadfd; - char xlogfpath[MAXPGPATH]; - char xlogfname[MAXFNAMELEN]; - - char *xlogBuff = NULL; - uint32 actualXlogLen = 0; - uint offset = 0; - - XLogSegNo xlogSegno = 0; - ArchiveSlotConfig *archive_slot = NULL; - archive_slot = getObsReplicationSlot(); - - xlogBuff = (char *)palloc(OBS_XLOG_SLICE_FILE_SIZE); - rc = memset_s(xlogBuff, OBS_XLOG_SLICE_FILE_SIZE, 0, OBS_XLOG_SLICE_FILE_SIZE); - securec_check(rc, "", ""); - - /* generate xlog path */ - XLByteToSeg(xlogInfo->targetLsn, xlogSegno); - if (xlogSegno == InvalidXLogSegPtr) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Invalid Lsn: %lu", xlogInfo->targetLsn))); - } - XLogFileName(xlogfname, DEFAULT_TIMELINE_ID, xlogSegno); - - rc = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, "%s/" XLOGDIR "/%s", t_thrd.proc_cxt.DataDir, xlogfname); - securec_check_ss(rc, "\0", "\0"); - - xlogreadfd = open(xlogfpath, O_RDONLY | PG_BINARY, 0); - if (xlogreadfd < 0) { - ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("Can not open file \"%s\": %s", xlogfpath, strerror(errno)))); - } - - /* Align down to 2M */ - offset = TYPEALIGN_DOWN(OBS_XLOG_SLICE_BLOCK_SIZE, ((xlogInfo->targetLsn) % XLogSegSize)); - if (lseek(xlogreadfd, (off_t)offset, SEEK_SET) < 0) { - ereport(ERROR, (errcode(ERRCODE_FILE_READ_FAILED), - errmsg("Can not locate to offset[%u] of xlog file \"%s\": %s", - offset, xlogfpath, strerror(errno)))); - } - - if (read(xlogreadfd, xlogBuff + OBS_XLOG_SLICE_HEADER_SIZE, OBS_XLOG_SLICE_BLOCK_SIZE) - != OBS_XLOG_SLICE_BLOCK_SIZE) { - ereport(ERROR, (errcode(ERRCODE_FILE_READ_FAILED), - errmsg("Can not read local xlog file \"%s\": %s", xlogfpath, strerror(errno)))); - } - - /* Add xlog slice header for recording the actual xlog length */ - actualXlogLen = (((uint32)((xlogInfo->targetLsn) % XLogSegSize)) & (OBS_XLOG_SLICE_BLOCK_SIZE - 1)) + 1; - - *(uint32*)xlogBuff = htonl(actualXlogLen); - - close(xlogreadfd); - - /* Get xlog slice file path on OBS */ - fileNamePrefix = obs_replication_get_xlog_prefix(xlogInfo->targetLsn, false); - fileName = (char*)palloc0(MAX_PATH_LEN); - - /* {xlog_name}_{sliece_num}_01(version_num)_00000001{tli}_00000001{subTerm} */ - rc = sprintf_s(fileName, MAX_PATH_LEN, "%s_%02d_%08u_%08u_%08d", fileNamePrefix, - CUR_OBS_FILE_VERSION, xlogInfo->term, xlogInfo->tli, xlogInfo->sub_term); - securec_check_ss(rc, "\0", "\0"); - - /* Upload xlog slice file to OBS */ - ret = obsWrite(fileName, xlogBuff, OBS_XLOG_SLICE_FILE_SIZE, &archive_slot->archive_obs); - - pfree(xlogBuff); - pfree(fileNamePrefix); - pfree(fileName); - - return ret; -} - -void obs_update_archive_start_end_location_file(XLogRecPtr endPtr, long endTime) -{ - StringInfoData buffer; - XLogRecPtr locStartPtr; - char* fileName; - char* obsfileName; - char preFileName[MAXPGPATH] = {0}; - char* xlogFileName; - char* tempToken = NULL; - uint32 xlogReadLogid = -1; - uint32 xlogReadLogSeg = -1; - TimeLineID tli = 0; - List *obsXlogList = NIL; - ListCell* cell = NULL; - errno_t rc = EOK; - - ArchiveSlotConfig* obs_archive_slot = getObsReplicationSlot(); - if (obs_archive_slot == NULL) { - return; - } - - if (!IS_PGXC_COORDINATOR) { - initStringInfo(&buffer); - obsXlogList = obsList(XLOGDIR, &obs_archive_slot->archive_obs); - if (obsXlogList == NIL || obsXlogList->length <= 0) { - return; - } - cell = list_head(obsXlogList); - fileName = (char*)lfirst(cell); - obsfileName = strrchr(fileName, '/'); - rc = memcpy_s(preFileName, MAXPGPATH, fileName, strlen(fileName) - strlen(obsfileName)); - securec_check(rc, "", ""); - obsfileName = obsfileName + 1; - tempToken = NULL; - xlogFileName = strtok_s(obsfileName, "_", &tempToken); - if (sscanf_s(xlogFileName, "%08X%08X%08X", &tli, &xlogReadLogid, &xlogReadLogSeg) != 3) { - ereport(ERROR, (errmsg("failed to translate name to xlog: %s\n", xlogFileName))); - } - XLogSegNoOffsetToRecPtr(xlogReadLogid * XLogSegmentsPerXLogId + xlogReadLogSeg, 0, locStartPtr); - appendStringInfo(&buffer, "%ld-%ld_%lu-%lu_00000001_%s\n", t_thrd.arch.arch_start_timestamp, endTime, - locStartPtr, endPtr, preFileName); - - obsWrite(OBS_ARCHIVE_STATUS_FILE, buffer.data, buffer.len, &obs_archive_slot->archive_obs); - pfree(buffer.data); - } -} - -int obs_replication_cleanup(XLogRecPtr recptr, ObsArchiveConfig *obs_config) -{ - char *fileNamePrefix = NULL; - List *object_list = NIL; - ListCell *cell = NULL; - char *key = NULL; - - errno_t rc = EOK; - int ret = 0; - char xlogfname[MAXFNAMELEN]; - char obsXlogPath[MAXPGPATH] = {0}; - int maxDelNum = 0; - size_t len = 0; - XLogSegNo xlogSegno = 0; - volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; - - XLByteToSeg(recptr, xlogSegno); - rc = snprintf_s(xlogfname, MAXFNAMELEN, MAXFNAMELEN - 1, "%08X%08X%08X_%02u", DEFAULT_TIMELINE_ID, - (uint32)((xlogSegno) / XLogSegmentsPerXLogId), (uint32)((xlogSegno) % XLogSegmentsPerXLogId), - (uint32)((recptr / OBS_XLOG_SLICE_BLOCK_SIZE) & OBS_XLOG_SLICE_NUM_MAX)); - securec_check_ss_c(rc, "", ""); - len = strlen(xlogfname); - - fileNamePrefix = obs_replication_get_xlog_prefix(recptr, true); - - ereport(LOG, (errmsg("The OBS objects with the prefix %s ", fileNamePrefix))); - if (obs_config == NULL) { - object_list = obsList(fileNamePrefix, &walrcv->archive_slot->archive_obs); - } else { - object_list = obsList(fileNamePrefix, obs_config); - } - - if (object_list == NIL || object_list->length <= 0) { - ereport(LOG, (errmsg("The OBS objects with the prefix %s cannot be found.", fileNamePrefix))); - - pfree(fileNamePrefix); - return -1; - } - - /* At least 100 GB-OBS_XLOG_SAVED_FILES_NUM log files must be retained on obs. */ - if (obs_config == NULL) { - if (object_list->length <= OBS_XLOG_SAVED_FILES_NUM) { - ereport(LOG, (errmsg("[obs_replication_cleanup]Archive logs do not need to be deleted."))); - return 0; - } else { - maxDelNum = object_list->length - OBS_XLOG_SAVED_FILES_NUM; - ereport(LOG, (errmsg("[obs_replication_cleanup]Delete archive xlog before %s," - "number of deleted files is %d", xlogfname, maxDelNum))); - } - } - - foreach (cell, object_list) { - key = path_skip_prefix((char *)lfirst(cell)); - if (key == NULL) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Invalid OBS object key: %s", (char *)lfirst(cell)))); - } - - if (strncmp(basename(key), xlogfname, len) < 0) { - /* Ahead of the target lsn, need to delete */ - rc = snprintf_s(obsXlogPath,MAXPGPATH, MAXPGPATH - 1, "%s/%s", XLOGDIR, key); - securec_check_ss_c(rc, "", ""); - if (obs_config == NULL) { - ret = obsDelete(obsXlogPath, &walrcv->archive_slot->archive_obs); - } else { - ret = obsDelete(obsXlogPath, obs_config); - } - if (ret != 0) { - ereport(WARNING, (errcode(ERRCODE_UNDEFINED_FILE), - errmsg("The OBS objects delete fail, ret=%d, key=%s", ret, key))); - } else { - /* The number of files to be deleted has reached the maximum. */ - if ((maxDelNum--) <= 0 && obs_config == NULL) { - break; - } - } - } else { - /* Reach the target lsn */ - break; - } - } - - /* release result list */ - list_free_deep(object_list); - object_list = NIL; - pfree(fileNamePrefix); - - return 0; -} - -int obs_replication_get_last_xlog(ArchiveXlogMessage *xlogInfo, ObsArchiveConfig* archive_obs) -{ - char *filePath = NULL; - char *fileBaseName = NULL; - errno_t rc = EOK; - TimeLineID timeLine; - int xlogSegId; - int xlogSegOffset; - int version = 0; - if (xlogInfo == NULL) { - return -1; - } - - filePath = obs_replication_get_last_xlog_slice(0, true, false, archive_obs); - if (filePath == NULL) { - ereport(LOG, (errmsg("Cannot find xlog file on OBS"))); - return -1; - } - - fileBaseName = basename(filePath); - ereport(DEBUG1, (errmsg("The last xlog on OBS: %s", filePath))); - - rc = sscanf_s(fileBaseName, "%8X%8X%8X_%2u_%02d_%08u_%08u_%08d", &timeLine, &xlogSegId, - &xlogSegOffset, &xlogInfo->slice, &version, &xlogInfo->term, &xlogInfo->tli, &xlogInfo->sub_term); - securec_check_for_sscanf_s(rc, 6, "\0", "\0"); - - ereport(DEBUG1, (errmsg("Parse xlog filename is %8X%8X%8X_%2u_%02d_%08u_%08u_%08d", timeLine, xlogSegId, - xlogSegOffset, xlogInfo->slice, version, xlogInfo->term, xlogInfo->tli, xlogInfo->sub_term))); - - XLogSegNoOffsetToRecPtr(xlogSegId * XLogSegmentsPerXLogId + xlogSegOffset, 0, xlogInfo->targetLsn); - - pfree(filePath); - return 0; -} - -static void check_danger_character(const char *inputEnvValue) -{ - if (inputEnvValue == NULL) { - return; - } - - const char *dangerCharacterList[] = { ";", "`", "\\", "'", "\"", ">", "<", "&", "|", "!", NULL }; - int i = 0; - - for (i = 0; dangerCharacterList[i] != NULL; i++) { - if (strstr(inputEnvValue, dangerCharacterList[i]) != NULL) { - ereport(ERROR, (errmsg("Failed to check input value: invalid token \"%s\".\n", dangerCharacterList[i]))); - } - } -} - -char* get_local_key_cn(void) -{ - int ret = 0; - int fd; - char* gausshome = NULL; - char key_cn_file[MAXPGPATH] = {0}; - char key_cn[MAXFNAMELEN] = {0}; - - gausshome = getGaussHome(); - if (gausshome == NULL) { - ereport(ERROR, (errmsg("Failed get gausshome"))); - return NULL; - } - ret = snprintf_s(key_cn_file, MAXPGPATH, MAXPGPATH - 1, "%s/bin/hadr_key_cn", gausshome); - securec_check_ss(ret, "\0", "\0"); - - if (!file_exists(key_cn_file)) { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FILE), errmsg("There is no hadr_key_cn"))); - return NULL; - } else { - canonicalize_path(key_cn_file); - fd = open(key_cn_file, O_RDONLY | PG_BINARY, 0); - if (fd < 0) - ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\"", key_cn_file))); - off_t size = lseek(fd, 0, SEEK_END); - if (size == -1 || size > MAXFNAMELEN - 1) { - close(fd); - ereport(ERROR, (errcode(ERRCODE_FILE_READ_FAILED), errmsg("Failed to read local hadr_key_cn"))); - return NULL; - } - (void)lseek(fd, 0, SEEK_SET); - - ret = read(fd, &key_cn, size); - if (ret != size) { - (void)close(fd); - ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND), - (errmsg("The file name hadr_key_cn cannot be read now.")))); - return NULL; - } - (void)close(fd); - key_cn[size] = '\0'; - check_danger_character(key_cn); - return pstrdup(key_cn); - } -} diff --git a/src/gausskernel/storage/replication/pgoutput/CMakeLists.txt b/src/gausskernel/storage/replication/pgoutput/CMakeLists.txt index fd379d6da..23aa035d2 100644 --- a/src/gausskernel/storage/replication/pgoutput/CMakeLists.txt +++ b/src/gausskernel/storage/replication/pgoutput/CMakeLists.txt @@ -2,9 +2,8 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} TGT_pgoutput_SRC) set(pgoutput_DEF_OPTIONS ${MACRO_OPTIONS}) -set(pgoutput_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${LIB_SECURE_OPTIONS} ${CHECK_OPTIONS}) +set(pgoutput_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${LIB_SECURE_OPTIONS} ${CHECK_OPTIONS} -fstack-protector-all) set(pgoutput_LINK_OPTIONS ${LIB_LINK_OPTIONS}) -#add_shared_libtarget(gausskernel_storage_replication_pgoutput TGT_pgoutput_SRC TGT_pgoutput_INC "${pgoutput_DEF_OPTIONS}" "${pgoutput_COMPILE_OPTIONS}" "${pgoutput_LINK_OPTIONS}") add_shared_libtarget(pgoutput TGT_pgoutput_SRC "" "${pgoutput_DEF_OPTIONS}" "${pgoutput_COMPILE_OPTIONS}" "${pgoutput_LINK_OPTIONS}") set_target_properties(pgoutput PROPERTIES PREFIX "") install(TARGETS pgoutput LIBRARY DESTINATION lib/postgresql) diff --git a/src/gausskernel/storage/replication/pgoutput/Makefile b/src/gausskernel/storage/replication/pgoutput/Makefile index 489f7048d..5b337f5cf 100644 --- a/src/gausskernel/storage/replication/pgoutput/Makefile +++ b/src/gausskernel/storage/replication/pgoutput/Makefile @@ -9,6 +9,9 @@ OBJS = \ PGFILEDESC = "pgoutput - standard logical replication output plugin" NAME = pgoutput +override CPPFLAGS := $(filter-out -fPIE, $(CPPFLAGS)) -fPIC -fstack-protector-all +override CFLAGS := $(filter-out -fPIE, $(CFLAGS)) -fPIC -fstack-protector-all + all: all-shared-lib include $(top_builddir)/src/Makefile.shlib diff --git a/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp b/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp index 912b6fd75..425fd6d28 100644 --- a/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp +++ b/src/gausskernel/storage/replication/pgoutput/pgoutput.cpp @@ -37,6 +37,7 @@ static void pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *o static void pgoutput_shutdown(LogicalDecodingContext *ctx); static void pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn); static void pgoutput_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr commit_lsn); +static void pgoutput_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn); static void pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, Relation rel, ReorderBufferChange *change); static bool pgoutput_origin_filter(LogicalDecodingContext *ctx, RepOriginId origin_id); @@ -68,6 +69,7 @@ void _PG_output_plugin_init(OutputPluginCallbacks *cb) cb->begin_cb = pgoutput_begin_txn; cb->change_cb = pgoutput_change; cb->commit_cb = pgoutput_commit_txn; + cb->abort_cb = pgoutput_abort_txn; cb->filter_by_origin_cb = pgoutput_origin_filter; cb->shutdown_cb = pgoutput_shutdown; } @@ -152,10 +154,10 @@ static void pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *o /* Init publication state. */ data->publications = NIL; t_thrd.publication_cxt.publications_valid = false; - CacheRegisterSyscacheCallback(PUBLICATIONOID, publication_invalidation_cb, (Datum)0); + CacheRegisterThreadSyscacheCallback(PUBLICATIONOID, publication_invalidation_cb, (Datum)0); /* Initialize relation schema cache. */ - init_rel_sync_cache(u_sess->cache_mem_cxt); + init_rel_sync_cache(THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_DEFAULT)); } } @@ -206,6 +208,15 @@ static void pgoutput_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *t OutputPluginWrite(ctx, true); } +/* + * ABORT callback + */ +static void pgoutput_abort_txn(LogicalDecodingContext* ctx, ReorderBufferTXN* txn) +{ + /* Should not happen */ + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("abort transaction not currently supported"))); +} + /* Check whether the buffer change type is supported, return true if supported */ static inline bool CheckAction(ReorderBufferChangeType type, PublicationActions pubAction) { @@ -426,8 +437,8 @@ static void init_rel_sync_cache(MemoryContext cachectx) Assert(t_thrd.publication_cxt.RelationSyncCache != NULL); - CacheRegisterRelcacheCallback(rel_sync_cache_relation_cb, (Datum)0); - CacheRegisterSyscacheCallback(PUBLICATIONRELMAP, rel_sync_cache_publication_cb, (Datum)0); + CacheRegisterThreadRelcacheCallback(rel_sync_cache_relation_cb, (Datum)0); + CacheRegisterThreadSyscacheCallback(PUBLICATIONRELMAP, rel_sync_cache_publication_cb, (Datum)0); } static void RefreshRelationEntry(RelationSyncEntry *entry, PGOutputData *data, Oid relid) @@ -474,7 +485,7 @@ static void RefreshRelationEntry(RelationSyncEntry *entry, PGOutputData *data, O unsigned int u; int n; - if (sscanf_s(relname, "pg_temp_%u%n", &u, &n) == 1 && relname[n] == '\0' && + if (relname != NULL && sscanf_s(relname, "pg_temp_%u%n", &u, &n) == 1 && relname[n] == '\0' && get_rel_relkind(u) == RELKIND_RELATION) { break; } @@ -490,7 +501,7 @@ static void RefreshRelationEntry(RelationSyncEntry *entry, PGOutputData *data, O break; } - list_free(pubids); + list_free_ext(pubids); entry->replicate_valid = true; } diff --git a/src/gausskernel/storage/replication/repl_scanner.l b/src/gausskernel/storage/replication/repl_scanner.l index 26cdd11f1..cb8ed016a 100755 --- a/src/gausskernel/storage/replication/repl_scanner.l +++ b/src/gausskernel/storage/replication/repl_scanner.l @@ -36,6 +36,14 @@ static char *litbufdup(void); static void addlit(char *ytext, int yleng); static void addlitchar(unsigned char ychar); +/* + * define replication_scanner_yylex for flex >= 2.6 + */ +#if FLEX_MAJOR_VERSION >= 2 && FLEX_MINOR_VERSION >= 6 +#define YY_DECL int replication_scanner_yylex \ + (YYSTYPE * yylval_param, YYLTYPE * yylloc_param , yyscan_t yyscanner) +#endif + %} %option reentrant diff --git a/src/gausskernel/storage/replication/rto_statistic.cpp b/src/gausskernel/storage/replication/rto_statistic.cpp index d08e911b3..f6b1ec3cc 100644 --- a/src/gausskernel/storage/replication/rto_statistic.cpp +++ b/src/gausskernel/storage/replication/rto_statistic.cpp @@ -70,6 +70,7 @@ void rto_get_standby_info_text(char *info, uint32 max_info_len) Datum rto_get_standby_info() { Datum value; + const uint32 RTO_INFO_BUFFER_SIZE = 2048 * (1 + g_instance.attr.attr_storage.max_wal_senders); char *info = (char *)palloc0(sizeof(char) * RTO_INFO_BUFFER_SIZE); rto_get_standby_info_text(info, RTO_INFO_BUFFER_SIZE); value = CStringGetTextDatum(info); @@ -97,8 +98,6 @@ RTOStandbyData *GetDCFRTOStat(uint32 *num) if (strstr(standby_names, "hadr_") != NULL) { continue; } - ereport(LOG, (errmsg("Step into GetDCFRTOStat and id is %s.", - g_instance.rto_cxt.dcf_rto_standby_data[i].id))); char *local_ip = (char *)(result[readDCFNode].source_ip); rc = strncpy_s(local_ip, IP_LEN, (char *)g_instance.rto_cxt.dcf_rto_standby_data[i].source_ip, strlen((char *)g_instance.rto_cxt.dcf_rto_standby_data[i].source_ip)); @@ -112,12 +111,7 @@ RTOStandbyData *GetDCFRTOStat(uint32 *num) result[readDCFNode].source_port = g_instance.rto_cxt.dcf_rto_standby_data[i].source_port; result[readDCFNode].dest_port = g_instance.rto_cxt.dcf_rto_standby_data[i].dest_port; result[readDCFNode].current_rto = g_instance.rto_cxt.dcf_rto_standby_data[i].current_rto; - - if (u_sess->attr.attr_storage.target_rto == 0) { - result[readDCFNode].current_sleep_time = 0; - } else { - result[readDCFNode].current_sleep_time = g_instance.rto_cxt.dcf_rto_standby_data[i].current_sleep_time; - } + result[readDCFNode].current_sleep_time = g_instance.rto_cxt.dcf_rto_standby_data[i].current_sleep_time; result[readDCFNode].target_rto = u_sess->attr.attr_storage.target_rto; readDCFNode++; } @@ -144,7 +138,7 @@ RTOStandbyData *GetRTOStat(uint32 *num) rc = strncpy_s(standby_names, IP_LEN, g_instance.rto_cxt.rto_standby_data[i].id, strlen(g_instance.rto_cxt.rto_standby_data[i].id)); securec_check(rc, "\0", "\0"); - if (strstr(standby_names, "hadr_") != NULL) { + if ((strstr(standby_names, "hadr_") != NULL) || (strstr(standby_names, "hass") != NULL)) { SpinLockRelease(&walsnd->mutex); continue; } @@ -189,7 +183,8 @@ HadrRTOAndRPOData *HadrGetRTOStat(uint32 *num) for (i = 0; i < g_instance.attr.attr_storage.max_wal_senders; i++) { /* use volatile pointer to prevent code rearrangement */ volatile WalSnd *walsnd = &t_thrd.walsender_cxt.WalSndCtl->walsnds[i]; - if (walsnd->pid != 0 && (strstr(g_instance.rto_cxt.rto_standby_data[i].id, "hadr_") != NULL)) { + if (walsnd->pid != 0 && ((strstr(g_instance.rto_cxt.rto_standby_data[i].id, "hadr_") != NULL) || + (strstr(g_instance.rto_cxt.rto_standby_data[i].id, "hass") != NULL))) { char *standby_names = (char *)(result[readWalSnd].id); rc = strncpy_s(standby_names, IP_LEN, g_instance.rto_cxt.rto_standby_data[i].id, strlen(g_instance.rto_cxt.rto_standby_data[i].id)); @@ -210,11 +205,15 @@ HadrRTOAndRPOData *HadrGetRTOStat(uint32 *num) result[readWalSnd].current_rto = g_instance.rto_cxt.rto_standby_data[i].current_rto; result[readWalSnd].current_rpo = walsnd->log_ctrl.current_RPO < 0 ? 0 : walsnd->log_ctrl.current_RPO; - if (u_sess->attr.attr_storage.hadr_recovery_time_target == 0 && - u_sess->attr.attr_storage.hadr_recovery_point_target == 0) { - result[readWalSnd].current_sleep_time = 0; + if (u_sess->attr.attr_storage.hadr_recovery_time_target == 0) { + result[readWalSnd].rto_sleep_time = 0; } else { - result[readWalSnd].current_sleep_time = g_instance.rto_cxt.rto_standby_data[i].current_sleep_time; + result[readWalSnd].rto_sleep_time = g_instance.rto_cxt.rto_standby_data[i].current_sleep_time; + } + if (u_sess->attr.attr_storage.hadr_recovery_point_target == 0) { + result[readWalSnd].rpo_sleep_time = 0; + } else { + result[readWalSnd].rpo_sleep_time = g_instance.streaming_dr_cxt.rpoSleepTime; } result[readWalSnd].target_rto = u_sess->attr.attr_storage.hadr_recovery_time_target; result[readWalSnd].target_rpo = u_sess->attr.attr_storage.hadr_recovery_point_target; diff --git a/src/gausskernel/storage/replication/shared_storage_walreceiver.cpp b/src/gausskernel/storage/replication/shared_storage_walreceiver.cpp index a0b275972..2cb759984 100644 --- a/src/gausskernel/storage/replication/shared_storage_walreceiver.cpp +++ b/src/gausskernel/storage/replication/shared_storage_walreceiver.cpp @@ -54,7 +54,7 @@ bool SimpleCheckBlockheader(XLogReaderState *xlogreader, XLogRecPtr targetPagePt checkSize += XLOG_BLCKSZ; } while (checkSize < *size); - if (checkSize != *size) { + if (checkSize < *size) { *size = checkSize; } @@ -64,27 +64,28 @@ bool SimpleCheckBlockheader(XLogReaderState *xlogreader, XLogRecPtr targetPagePt bool SharedStorageXlogReadCheck(XLogReaderState *xlogreader, XLogRecPtr readEnd, XLogRecPtr readPageStart, char *localBuff, int *readLen) { - uint64 diffLen = ((readEnd - readPageStart) >= ShareStorageBufSize) ? ShareStorageBufSize - : (readEnd - readPageStart); - uint64 calcLen = readPageStart % ShareStorageAlnSize; - if (calcLen) { - diffLen = ((diffLen + calcLen) > ShareStorageAlnSize) ? (ShareStorageAlnSize - calcLen) : diffLen; + int diffLen; + Assert((readPageStart % XLOG_BLCKSZ) == 0); + XLogRecPtr alignReadEnd = readPageStart - readPageStart % ShareStorageBufSize + ShareStorageBufSize; + if (alignReadEnd > readEnd) { + XLogRecPtr ActualCopyEnd = TYPEALIGN(XLOG_BLCKSZ, readEnd); + diffLen = static_cast(ActualCopyEnd - readPageStart); + } else { + diffLen = static_cast(alignReadEnd - readPageStart); } - bool readResult = false; int readBytes = ReadXlogFromShareStorage(readPageStart, localBuff, (int)TYPEALIGN(g_instance.xlog_cxt.shareStorageopCtl.blkSize, diffLen)); if (readBytes > 0) { - *readLen = (readBytes > (int)diffLen) ? diffLen : readBytes; + *readLen = (int)((XLogRecPtr)readBytes > (readEnd - readPageStart)) ? (readEnd - readPageStart) : readBytes; readResult = SimpleCheckBlockheader(xlogreader, readPageStart, localBuff, readLen); } else { *readLen = 0; ereport(FATAL, (errcode(ERRCODE_INVALID_STATUS), - errmsg("read zero length of xlog from shared storage startlsn : %lx, readlen :%lx, inserthead :%lx", + errmsg("read zero length of xlog from shared storage startlsn : %lx, readlen :%d, inserthead :%lx", readPageStart, diffLen, readEnd))); } - return readResult; } @@ -138,12 +139,15 @@ bool shared_storage_xlog_read(int timeout, unsigned char *type, char **buffer, i ReadShareStorageCtlInfo(ctlInfo); uint32 lastTerm = pg_atomic_read_u32(&t_thrd.walreceiverfuncs_cxt.WalRcv->shareStorageTerm); - if (ctlInfo->term > lastTerm) { + if (ctlInfo->term > lastTerm || ctlInfo->xlogFileSize != (uint64)g_instance.attr.attr_storage.xlog_file_size) { t_thrd.walreceiver_cxt.termChanged = true; if (isStopping) return false; - else - ereport(ERROR, (errcode(ERRCODE_INVALID_STATUS), errmsg("the term on shared storage is changed"))); + else { + ereport(ERROR, (errcode(ERRCODE_INVALID_STATUS), + errmsg("the term(%u:%u) or xlog file size(%lu:%lu) on shared storage is changed", ctlInfo->term, + lastTerm, ctlInfo->xlogFileSize, (uint64)g_instance.attr.attr_storage.xlog_file_size))); + } } if (SharedStorageXlogReadCheck(xlogreader, ctlInfo->insertHead, page_lsn, local_buff, &read_len)) { @@ -227,7 +231,7 @@ void shared_storage_xlog_check_consistency() char *errormsg = NULL; if (XLByteLT(localRec + recordLen, sharedStorageCtl->insertHead)) { - XLogRecord *record = XLogReadRecord(xlogReader, localRec, &errormsg, false, false); // don't need decode + XLogRecord *record = XLogReadRecord(xlogReader, localRec, &errormsg, false); // don't need decode if (record == NULL) { ereport(WARNING, (errcode(ERRCODE_INVALID_STATUS), errmsg("standby's local request lsn[%lx] is not valid record", localRec))); diff --git a/src/gausskernel/storage/replication/slot.cpp b/src/gausskernel/storage/replication/slot.cpp index 201da0eb6..8671ac22c 100755 --- a/src/gausskernel/storage/replication/slot.cpp +++ b/src/gausskernel/storage/replication/slot.cpp @@ -248,6 +248,49 @@ void redo_slot_reset_for_backup(const ReplicationSlotPersistentData *xlrec) ReplicationSlotRelease(); } +/* + * Drop the old hadr replication slot when hadr main standby is changed. + */ +static void DropOldHadrReplicationSlot(const char *name) +{ + if (strstr(name, "_hadr") == NULL) { + return; + } + + ReplicationSlot *slot; + char dropSlotName[NAMEDATALEN] = {0}; + errno_t rc; + + LWLockAcquire(ReplicationSlotControlLock, LW_SHARED); + for (int i = 0; i < g_instance.attr.attr_storage.max_replication_slots; i++) { + slot = &t_thrd.slot_cxt.ReplicationSlotCtl->replication_slots[i]; + if (slot->in_use && !slot->active && + strcmp(name, NameStr(slot->data.name)) != 0 && + strstr(NameStr(slot->data.name), "_hadr") != NULL) { + rc = strcpy_s(dropSlotName, NAMEDATALEN, NameStr(slot->data.name)); + securec_check_ss(rc, "\0", "\0"); + break; + } + } + LWLockRelease(ReplicationSlotControlLock); + + if (strlen(dropSlotName) > 0) { + ReplicationSlotDrop(dropSlotName); + } +} + +static bool HasArchiveSlot() +{ + for (int i = 0; i < g_instance.attr.attr_storage.max_replication_slots; i++) { + ReplicationSlot *s = &t_thrd.slot_cxt.ReplicationSlotCtl->replication_slots[i]; + if (s->in_use && s->data.database == InvalidOid && GET_SLOT_PERSISTENCY(s->data) != RS_BACKUP && + s->extra_content != NULL) { + return true; + } + } + return false; +} + /* * Create a new replication slot and mark it as used by this backend. * @@ -266,6 +309,9 @@ void ReplicationSlotCreate(const char *name, ReplicationSlotPersistency persiste (void)ReplicationSlotValidateName(name, ERROR); + /* Drop old hadr replication slot for streaming disaster cluster. */ + DropOldHadrReplicationSlot(name); + /* * If some other backend ran this code currently with us, we'd likely * both allocate the same slot, and that would be bad. We'd also be @@ -281,9 +327,11 @@ void ReplicationSlotCreate(const char *name, ReplicationSlotPersistency persiste * nobody else can change the in_use flags while we're looking at them. */ LWLockAcquire(ReplicationSlotControlLock, LW_SHARED); + if (extra_content != NULL && strlen(extra_content) != 0 && HasArchiveSlot()) { + ereport(ERROR, (errmsg("currently multi-archive replication slot isn't supported"))); + } for (i = 0; i < g_instance.attr.attr_storage.max_replication_slots; i++) { ReplicationSlot *s = &t_thrd.slot_cxt.ReplicationSlotCtl->replication_slots[i]; - if (s->in_use && strcmp(name, NameStr(s->data.name)) == 0) { LWLockRelease(ReplicationSlotControlLock); LWLockRelease(ReplicationSlotAllocationLock); @@ -835,6 +883,36 @@ void ReplicationSlotsComputeRequiredXmin(bool already_locked) ProcArraySetReplicationSlotXmin(agg_xmin, agg_catalog_xmin, already_locked); } +static void CalculateMinAndMaxRequiredPtr(ReplicationSlot *s, XLogRecPtr* standby_slots_list, int i, + XLogRecPtr restart_lsn, XLogRecPtr* min_tools_required, XLogRecPtr* min_required, + XLogRecPtr* min_archive_restart_lsn, XLogRecPtr* max_required) +{ + if (s->data.database == InvalidOid && GET_SLOT_PERSISTENCY(s->data) != RS_BACKUP && s->extra_content == NULL) { + standby_slots_list[i] = restart_lsn; + } else { + XLogRecPtr min_tools_lsn = restart_lsn; + if (s->extra_content == NULL && (!XLByteEQ(min_tools_lsn, InvalidXLogRecPtr)) && + (XLByteEQ(*min_tools_required, InvalidXLogRecPtr) || XLByteLT(min_tools_lsn, *min_tools_required))) { + *min_tools_required = min_tools_lsn; + } + } + if (s->extra_content != NULL) { + if ((!XLByteEQ(restart_lsn, InvalidXLogRecPtr)) && + (XLByteEQ(*min_archive_restart_lsn, InvalidXLogRecPtr) + || XLByteLT(restart_lsn, *min_archive_restart_lsn))) { + *min_archive_restart_lsn = restart_lsn; + } + } + if ((!XLByteEQ(restart_lsn, InvalidXLogRecPtr)) && + (XLByteEQ(*min_required, InvalidXLogRecPtr) || XLByteLT(restart_lsn, *min_required))) { + *min_required = restart_lsn; + } + + if (XLByteLT(*max_required, restart_lsn)) { + *max_required = restart_lsn; + } +} + /* * Compute the oldest restart LSN across all slots and inform xlog module. */ @@ -845,6 +923,7 @@ void ReplicationSlotsComputeRequiredLSN(ReplicationSlotState *repl_slt_state) XLogRecPtr min_tools_required = InvalidXLogRecPtr; XLogRecPtr max_required = InvalidXLogRecPtr; XLogRecPtr standby_slots_list[g_instance.attr.attr_storage.max_replication_slots]; + XLogRecPtr min_archive_restart_lsn = InvalidXLogRecPtr; bool in_use = false; errno_t rc = EOK; @@ -866,6 +945,7 @@ void ReplicationSlotsComputeRequiredLSN(ReplicationSlotState *repl_slt_state) SpinLockAcquire(&s->mutex); XLogRecPtr restart_lsn; bool isNoNeedCheck = (t_thrd.xlog_cxt.server_mode != PRIMARY_MODE && + !(t_thrd.xlog_cxt.server_mode == STANDBY_MODE && t_thrd.xlog_cxt.is_hadr_main_standby) && t_thrd.xlog_cxt.server_mode != PENDING_MODE && s->data.database == InvalidOid && GET_SLOT_PERSISTENCY(s->data) != RS_BACKUP); if (isNoNeedCheck) { @@ -880,23 +960,8 @@ void ReplicationSlotsComputeRequiredLSN(ReplicationSlotState *repl_slt_state) restart_lsn = vslot->data.restart_lsn; SpinLockRelease(&s->mutex); - if (s->data.database == InvalidOid && GET_SLOT_PERSISTENCY(s->data) != RS_BACKUP) { - standby_slots_list[i] = restart_lsn; - } else { - XLogRecPtr min_tools_lsn = restart_lsn; - if ((!XLByteEQ(min_tools_lsn, InvalidXLogRecPtr)) && - (XLByteEQ(min_tools_required, InvalidXLogRecPtr) || XLByteLT(min_tools_lsn, min_tools_required))) { - min_tools_required = min_tools_lsn; - } - } - if ((!XLByteEQ(restart_lsn, InvalidXLogRecPtr)) && - (XLByteEQ(min_required, InvalidXLogRecPtr) || XLByteLT(restart_lsn, min_required))) { - min_required = restart_lsn; - } - - if (XLByteLT(max_required, restart_lsn)) { - max_required = restart_lsn; - } + CalculateMinAndMaxRequiredPtr(s, standby_slots_list, i, restart_lsn, &min_tools_required, &min_required, + &min_archive_restart_lsn, &max_required); continue; lock_release: SpinLockRelease(&s->mutex); @@ -909,9 +974,14 @@ void ReplicationSlotsComputeRequiredLSN(ReplicationSlotState *repl_slt_state) if (repl_slt_state != NULL) { repl_slt_state->min_required = min_required; repl_slt_state->max_required = max_required; - if (*standby_slots_list == InvalidXLogRecPtr) { + if (*standby_slots_list == InvalidXLogRecPtr || t_thrd.syncrep_cxt.SyncRepConfig == NULL) { repl_slt_state->quorum_min_required = InvalidXLogRecPtr; - } else if (t_thrd.syncrep_cxt.SyncRepConfig != NULL) { + repl_slt_state->min_tools_required = min_tools_required; + repl_slt_state->min_archive_slot_required = min_archive_restart_lsn; + repl_slt_state->exist_in_use = in_use; + return; + } + if (t_thrd.syncrep_cxt.SyncRepConfig != NULL) { for (i = t_thrd.syncrep_cxt.SyncRepConfig->num_sync - 1; i >= 0; i--) { if (standby_slots_list[i] != InvalidXLogRecPtr) { repl_slt_state->quorum_min_required = standby_slots_list[i]; @@ -920,6 +990,7 @@ void ReplicationSlotsComputeRequiredLSN(ReplicationSlotState *repl_slt_state) } } repl_slt_state->min_tools_required = min_tools_required; + repl_slt_state->min_archive_slot_required = min_archive_restart_lsn; repl_slt_state->exist_in_use = in_use; } } @@ -1609,10 +1680,9 @@ loop: slot->candidate_restart_lsn = InvalidXLogRecPtr; slot->candidate_restart_valid = InvalidXLogRecPtr; slot->in_use = true; - slot->active = (extra_content != NULL ? true : false); + slot->active = false; slot->extra_content = extra_content; slot->archive_config = archive_cfg; - slot->active = (archive_cfg != NULL); restored = true; if (extra_content != NULL) { MarkArchiveSlotOperate(); @@ -2012,9 +2082,14 @@ ArchiveSlotConfig* getArchiveReplicationSlot() if (slot->archive_config->conn_config != NULL) { t_thrd.arch.archive_config->archive_config.conn_config = (ArchiveConnConfig *)palloc0(sizeof(ArchiveConnConfig)); - int rc = memcpy_s(t_thrd.arch.archive_config->archive_config.conn_config, sizeof(ArchiveConnConfig), - slot->archive_config->conn_config, sizeof(ArchiveConnConfig)); - securec_check(rc, "\0", "\0"); + t_thrd.arch.archive_config->archive_config.conn_config->obs_address = + pstrdup(slot->archive_config->conn_config->obs_address); + t_thrd.arch.archive_config->archive_config.conn_config->obs_bucket = + pstrdup(slot->archive_config->conn_config->obs_bucket); + t_thrd.arch.archive_config->archive_config.conn_config->obs_ak = + pstrdup(slot->archive_config->conn_config->obs_ak); + t_thrd.arch.archive_config->archive_config.conn_config->obs_sk = + pstrdup(slot->archive_config->conn_config->obs_sk); } t_thrd.arch.archive_config->archive_config.archive_prefix = pstrdup_ext(slot->archive_config->archive_prefix); @@ -2065,7 +2140,8 @@ List *GetAllArchiveSlotsName() for (int slotno = 0; slotno < g_instance.attr.attr_storage.max_replication_slots; slotno++) { ReplicationSlot *slot = &t_thrd.slot_cxt.ReplicationSlotCtl->replication_slots[slotno]; SpinLockAcquire(&slot->mutex); - if (slot->in_use == true && slot->archive_config != NULL && slot->archive_config->is_recovery == false) { + if (slot->in_use == true && slot->archive_config != NULL && slot->archive_config->is_recovery == false && + GET_SLOT_PERSISTENCY(slot->data) != RS_BACKUP) { result = lappend(result, (void *)pstrdup(slot->data.name.data)); } SpinLockRelease(&slot->mutex); @@ -2090,17 +2166,24 @@ List *GetAllRecoverySlotsName() void AdvanceArchiveSlot(XLogRecPtr restart_pos) { volatile int *slot_idx = &t_thrd.arch.slot_idx; + char* extra_content = NULL; if (likely(*slot_idx != -1) && *slot_idx < g_instance.attr.attr_storage.max_replication_slots) { ReplicationSlot *slot = &t_thrd.slot_cxt.ReplicationSlotCtl->replication_slots[*slot_idx]; SpinLockAcquire(&slot->mutex); if (slot->in_use == true && slot->archive_config != NULL) { slot->data.restart_lsn = restart_pos; + extra_content = slot->extra_content; } else { ereport(WARNING, (errcode_for_file_access(), errmsg("slot idx not valid, obs slot %X/%X not advance ", (uint32)(restart_pos >> 32), (uint32)(restart_pos)))); } SpinLockRelease(&slot->mutex); + if (extra_content == NULL) { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("archive thread could not get slot extra content when advance slot."))); + } + log_slot_advance(&slot->data, extra_content); } } @@ -2279,6 +2362,7 @@ void MarkArchiveSlotOperate() SpinLockRelease(&g_instance.archive_obs_cxt.mutex); } +#ifndef ENABLE_LITE_MODE void get_hadr_cn_info(char* keyCn, bool* isExitKey, char* deleteCn, bool* isExitDelete, ArchiveSlotConfig *archive_conf) { @@ -2305,3 +2389,4 @@ void get_hadr_cn_info(char* keyCn, bool* isExitKey, char* deleteCn, bool* isExit ereport(LOG, ((errmsg("The file named %s cannot be found.", HADR_DELETE_CN_FILE)))); } } +#endif diff --git a/src/gausskernel/storage/replication/slotfuncs.cpp b/src/gausskernel/storage/replication/slotfuncs.cpp index 9099007cc..0622ed650 100755 --- a/src/gausskernel/storage/replication/slotfuncs.cpp +++ b/src/gausskernel/storage/replication/slotfuncs.cpp @@ -30,15 +30,18 @@ #include "postgres.h" #include "knl/knl_variable.h" #include "replication/replicainternal.h" +#include "replication/walreceiver.h" #include "replication/walsender.h" #include "replication/syncrep.h" +#include "replication/archive_walreceiver.h" #define AllSlotInUse(a, b) ((a) == (b)) extern void *internal_load_library(const char *libname); extern bool PMstateIsRun(void); static void redo_slot_create(const ReplicationSlotPersistentData *slotInfo, char* extra_content = NULL); static XLogRecPtr create_physical_replication_slot_for_backup(const char* slot_name, bool is_dummy, char* extra); -static XLogRecPtr create_physical_replication_slot_for_archive(const char* slot_name, bool is_dummy, char* extra); +static XLogRecPtr create_physical_replication_slot_for_archive(const char* slot_name, bool is_dummy, char* extra, + XLogRecPtr currFlushPtr = InvalidXLogRecPtr); static void slot_advance(const char* slotname, XLogRecPtr &moveto, NameData &database, char *EndLsn, bool for_backup = false); @@ -76,9 +79,9 @@ void log_slot_create(const ReplicationSlotPersistentData *slotInfo, char* extra_ } } -void log_slot_advance(const ReplicationSlotPersistentData *slotInfo) +void log_slot_advance(const ReplicationSlotPersistentData *slotInfo, char* extra_content) { - if (!u_sess->attr.attr_sql.enable_slot_log || !PMstateIsRun()) { + if ((!u_sess->attr.attr_sql.enable_slot_log && t_thrd.role != ARCH) || !PMstateIsRun()) { return; } @@ -91,7 +94,9 @@ void log_slot_advance(const ReplicationSlotPersistentData *slotInfo) XLogBeginInsert(); XLogRegisterData((char *)&xlrec, ReplicationSlotPersistentDataConstSize); - + if (extra_content != NULL && strlen(extra_content) != 0) { + XLogRegisterData(extra_content, strlen(extra_content) + 1); + } Ptr = XLogInsert(RM_SLOT_ID, XLOG_SLOT_ADVANCE); XLogWaitFlush(Ptr); if (g_instance.attr.attr_storage.max_wal_senders > 0) @@ -256,6 +261,8 @@ Datum pg_create_physical_replication_slot_extern(PG_FUNCTION_ARGS) { Name name = PG_GETARG_NAME(0); bool isDummyStandby = PG_GETARG_BOOL(1); + XLogRecPtr currFlushPtr = InvalidXLogRecPtr; + bool isNeedRecycleXlog = false; text* extra_content_text = NULL; char* extra_content = NULL; const int TUPLE_FIELDS = 2; @@ -277,6 +284,9 @@ Datum pg_create_physical_replication_slot_extern(PG_FUNCTION_ARGS) extra_content_text = PG_GETARG_TEXT_P(2); extra_content = text_to_cstring(extra_content_text); } + if (!PG_ARGISNULL(3)) { + isNeedRecycleXlog = PG_GETARG_BOOL(3); + } t_thrd.slot_cxt.MyReplicationSlot = NULL; @@ -289,7 +299,28 @@ Datum pg_create_physical_replication_slot_extern(PG_FUNCTION_ARGS) if (for_backup) { restart_lsn = create_physical_replication_slot_for_backup(NameStr(*name), isDummyStandby, extra_content); } else { - restart_lsn = create_physical_replication_slot_for_archive(NameStr(*name), isDummyStandby, extra_content); +#ifndef ENABLE_LITE_MODE + if (isNeedRecycleXlog) { + ArchiveConfig* archive_config = formArchiveConfigFromStr(extra_content, false); + XLogRecPtr switchPtr = InvalidXLogRecPtr; + XLogRecPtr waitPtr = InvalidXLogRecPtr; + switchPtr = waitPtr = RequestXLogSwitch(); + waitPtr += XLogSegSize - 1; + waitPtr -= waitPtr % XLogSegSize; + XLogWaitFlush(waitPtr); + currFlushPtr = GetFlushRecPtr(); + ereport(LOG, (errmsg("push slot to switch point %lu, wait point %lu, flush point %lu", + switchPtr, waitPtr, currFlushPtr))); + (void)archive_replication_cleanup(currFlushPtr, archive_config, true); + pfree_ext(archive_config); + } else { + currFlushPtr = GetFlushRecPtr(); + } + currFlushPtr -= currFlushPtr % XLogSegSize; + ereport(LOG, (errmsg("create archive slot, start point %lu", currFlushPtr))); +#endif + restart_lsn = create_physical_replication_slot_for_archive(NameStr(*name), isDummyStandby, extra_content, + currFlushPtr); } values[0] = CStringGetTextDatum(NameStr(*name)); @@ -421,10 +452,16 @@ Datum pg_create_logical_replication_slot(PG_FUNCTION_ARGS) ValidateName(NameStr(*plugin)); str_tmp_lsn = (char *)palloc0(128); - check_permissions(); + Oid userId = GetUserId(); + CheckLogicalPremissions(userId); if (RecoveryInProgress()) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Standby mode doesn't support create logical slot"))); + /* we are about to start streaming switch over, stop any xlog insert. */ + if (t_thrd.xlog_cxt.LocalXLogInsertAllowed == 0 && g_instance.streaming_dr_cxt.isInSwitchover == true) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot create logical slot during streaming disaster recovery"))); + } if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("return type must be a row type"))); @@ -462,10 +499,20 @@ Datum pg_drop_replication_slot(PG_FUNCTION_ARGS) check_permissions(for_backup); isLogical = IsLogicalReplicationSlot(NameStr(*name)); + if (isLogical) { + Oid userId = GetUserId(); + CheckLogicalPremissions(userId); + } if (isLogical && RecoveryInProgress()) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Standby mode doesn't support drop logical slot."))); + /* we are about to start streaming switch over, stop any xlog insert. */ + if (t_thrd.xlog_cxt.LocalXLogInsertAllowed == 0 && g_instance.streaming_dr_cxt.isInSwitchover == true) { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot drop logical slot during streaming disaster recovery"))); + } + CheckSlotRequirements(); ReplicationSlotDrop(NameStr(*name), for_backup); @@ -832,6 +879,13 @@ Datum pg_replication_slot_advance(PG_FUNCTION_ARGS) if (RecoveryInProgress()) { ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), errmsg("couldn't advance in recovery"))); } + + /* we are about to start streaming switch over, stop any xlog insert. */ + if (t_thrd.xlog_cxt.LocalXLogInsertAllowed == 0 && g_instance.streaming_dr_cxt.isInSwitchover == true) { + ereport(ERROR, (errcode(ERRCODE_INVALID_OPERATION), + errmsg("cannot advance slot during streaming disaster recovery"))); + } + if (PG_ARGISNULL(1)) { if (!RecoveryInProgress()) moveto = GetFlushRecPtr(); @@ -853,6 +907,8 @@ Datum pg_replication_slot_advance(PG_FUNCTION_ARGS) check_permissions(for_backup); if (!for_backup) { + Oid userId = GetUserId(); + CheckLogicalPremissions(userId); CheckLogicalDecodingRequirements(u_sess->proc_cxt.MyDatabaseId); } slot_advance(NameStr(*slotname), moveto, database, EndLsn, for_backup); @@ -878,11 +934,9 @@ void redo_slot_advance(const ReplicationSlotPersistentData *slotInfo) * standby notify the primary to advance the logical replication slot. * Thus, we do not redo the slot_advance log. */ -#ifndef ENABLE_MULTIPLE_NODES if (IsReplicationSlotActive(NameStr(slotInfo->name))) { return; } -#endif /* Acquire the slot so we "own" it */ ReplicationSlotAcquire(NameStr(slotInfo->name), false); @@ -958,12 +1012,23 @@ int get_in_use_slot_number() void slot_redo(XLogReaderState *record) { + char* extra_content = NULL; uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; ReplicationSlotPersistentData *xlrec = (ReplicationSlotPersistentData *)XLogRecGetData(record); LogicalPersistentData *LogicalSlot = (LogicalPersistentData *)XLogRecGetData(record); + if ((IS_DISASTER_RECOVER_MODE && (info == XLOG_SLOT_CREATE || info == XLOG_SLOT_ADVANCE)) || + IsRoachRestore() || t_thrd.xlog_cxt.recoveryTarget == RECOVERY_TARGET_TIME_OBS) { + return; + } + + /* Backup blocks are not used in xlog records */ Assert(!XLogRecHasAnyBlockRefs(record)); + if (GET_SLOT_EXTRA_DATA_LENGTH(*xlrec) != 0) { + extra_content = (char*)XLogRecGetData(record) + ReplicationSlotPersistentDataConstSize; + Assert(strlen(extra_content) == (uint32)(GET_SLOT_EXTRA_DATA_LENGTH(*xlrec))); + } switch (info) { /* * Rmgrs we care about for logical decoding. Add new rmgrs in @@ -981,11 +1046,6 @@ void slot_redo(XLogReaderState *record) if (AllSlotInUse(SlotCount, g_instance.attr.attr_storage.max_replication_slots)) { break; } else { - char* extra_content = NULL; - if (GET_SLOT_EXTRA_DATA_LENGTH(*xlrec) != 0) { - extra_content = (char*)XLogRecGetData(record) + ReplicationSlotPersistentDataConstSize; - Assert(strlen(extra_content) == (uint32)(GET_SLOT_EXTRA_DATA_LENGTH(*xlrec))); - } redo_slot_create(xlrec, extra_content); } } else { @@ -993,8 +1053,13 @@ void slot_redo(XLogReaderState *record) } break; case XLOG_SLOT_ADVANCE: - if (ReplicationSlotFind(xlrec->name.data)) + if (!ReplicationSlotFind(xlrec->name.data) && extra_content != NULL && + GET_SLOT_PERSISTENCY(*xlrec) != RS_BACKUP) { + return; + } + if (ReplicationSlotFind(xlrec->name.data)) { redo_slot_advance(xlrec); + } else redo_slot_create(xlrec); break; @@ -1038,7 +1103,8 @@ bool is_archive_slot(ReplicationSlotPersistentData data) return GET_SLOT_EXTRA_DATA_LENGTH(data) != 0; } -XLogRecPtr create_physical_replication_slot_for_archive(const char* slot_name, bool is_dummy, char* extra_content) +XLogRecPtr create_physical_replication_slot_for_archive(const char* slot_name, bool is_dummy, char* extra_content, + XLogRecPtr currFlushPtr) { /* before upgrade commit, we can not use new slot extra field */ if (t_thrd.proc->workingVersionNum < EXTRA_SLOT_VERSION_NUM) { @@ -1048,7 +1114,7 @@ XLogRecPtr create_physical_replication_slot_for_archive(const char* slot_name, b check_permissions(); /* create persistent replication slot with extra archive configuration */ - ReplicationSlotCreate(slot_name, RS_PERSISTENT, is_dummy, InvalidOid, InvalidXLogRecPtr, extra_content); + ReplicationSlotCreate(slot_name, RS_PERSISTENT, is_dummy, InvalidOid, currFlushPtr, extra_content); /* log slot creation */ log_slot_create(&t_thrd.slot_cxt.MyReplicationSlot->data, t_thrd.slot_cxt.MyReplicationSlot->extra_content); MarkArchiveSlotOperate(); @@ -1076,6 +1142,12 @@ XLogRecPtr create_physical_replication_slot_for_backup(const char* slot_name, bo errhint("roach backup cannot be executed during recovery."))); } + /* we are about to start streaming switch over, stop any xlog insert. */ + if (t_thrd.xlog_cxt.LocalXLogInsertAllowed == 0 && g_instance.streaming_dr_cxt.isInSwitchover == true) { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot create physical replication slot during streaming disaster recovery"))); + } + (void)LWLockAcquire(ControlFileLock, LW_SHARED); restart_lsn = t_thrd.shemem_ptr_cxt.ControlFile->checkPointCopy.redo; LWLockRelease(ControlFileLock); @@ -1190,9 +1262,14 @@ void add_archive_slot_to_instance(ReplicationSlot *slot) if (slot->archive_config->conn_config != NULL) { archive_config->archive_config.conn_config = (ArchiveConnConfig *)palloc0(sizeof(ArchiveConnConfig)); - rc = memcpy_s(archive_config->archive_config.conn_config, sizeof(ArchiveConnConfig), - slot->archive_config->conn_config, sizeof(ArchiveConnConfig)); - securec_check(rc, "\0", "\0"); + archive_config->archive_config.conn_config->obs_address = + pstrdup(slot->archive_config->conn_config->obs_address); + archive_config->archive_config.conn_config->obs_bucket = + pstrdup(slot->archive_config->conn_config->obs_bucket); + archive_config->archive_config.conn_config->obs_ak = + pstrdup(slot->archive_config->conn_config->obs_ak); + archive_config->archive_config.conn_config->obs_sk = + pstrdup(slot->archive_config->conn_config->obs_sk); } archive_config->archive_config.archive_prefix = pstrdup(slot->archive_config->archive_prefix); archive_config->archive_config.is_recovery = slot->archive_config->is_recovery; @@ -1351,3 +1428,48 @@ ArchiveTaskStatus* walreceiver_find_archive_task_status(unsigned int expected_pi } return NULL; } + +Datum gs_get_parallel_decode_status(PG_FUNCTION_ARGS) +{ + FuncCallContext* funcctx = NULL; + MemoryContext oldcontext = NULL; + ParallelStatusData *entry = NULL; + const int columnNum = 4; + + if (SRF_IS_FIRSTCALL()) { + funcctx = SRF_FIRSTCALL_INIT(); + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + TupleDesc tupdesc = CreateTemplateTupleDesc(columnNum, false); + + TupleDescInitEntry(tupdesc, (AttrNumber)ARR_1, "slot_name", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)ARR_2, "parallel_decode_num", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)ARR_3, "read_change_queue_length", TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber)ARR_4, "decode_change_queue_length", TEXTOID, -1, 0); + funcctx->tuple_desc = BlessTupleDesc(tupdesc); + funcctx->user_fctx = (void *)GetParallelDecodeStatus(&(funcctx->max_calls)); + (void)MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + entry = (ParallelStatusData *)funcctx->user_fctx; + if (funcctx->call_cntr < funcctx->max_calls) { + Datum values[columnNum]; + bool nulls[columnNum] = {false}; + HeapTuple tuple = NULL; + errno_t rc = memset_s(values, sizeof(values), 0, sizeof(values)); + securec_check(rc, "\0", "\0"); + rc = memset_s(nulls, sizeof(nulls), 0, sizeof(nulls)); + securec_check(rc, "\0", "\0"); + + entry += funcctx->call_cntr; + values[ARG_0] = CStringGetTextDatum(entry->slotName); + values[ARG_1] = Int32GetDatum(entry->parallelDecodeNum); + values[ARG_2] = CStringGetTextDatum(entry->readQueueLen); + values[ARG_3] = CStringGetTextDatum(entry->decodeQueueLen); + tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls); + + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); + } + SRF_RETURN_DONE(funcctx); +} + diff --git a/src/gausskernel/storage/replication/subscription_walreceiver.cpp b/src/gausskernel/storage/replication/subscription_walreceiver.cpp index 206be0447..95cd326ef 100644 --- a/src/gausskernel/storage/replication/subscription_walreceiver.cpp +++ b/src/gausskernel/storage/replication/subscription_walreceiver.cpp @@ -43,10 +43,18 @@ bool sub_connect(char *conninfo, XLogRecPtr *startpoint, char *appname, int chan vals[i] = NULL; t_thrd.libwalreceiver_cxt.streamConn = PQconnectdbParams(keys, vals, true); + if ((t_thrd.libwalreceiver_cxt.streamConn != NULL) && (t_thrd.libwalreceiver_cxt.streamConn->pgpass != NULL)) { + /* clear password related memory to avoid leaks */ + int rc = memset_s(t_thrd.libwalreceiver_cxt.streamConn->pgpass, + strlen(t_thrd.libwalreceiver_cxt.streamConn->pgpass), + 0, strlen(t_thrd.libwalreceiver_cxt.streamConn->pgpass)); + securec_check_c(rc, "\0", "\0"); + } if (PQstatus(t_thrd.libwalreceiver_cxt.streamConn) != CONNECTION_OK) { ereport(WARNING, (errcode(ERRCODE_CONNECTION_TIMED_OUT), - errmsg("apply worker could not connect to the remote server : %s", - PQerrorMessage(t_thrd.libwalreceiver_cxt.streamConn)))); + errmsg("apply worker could not connect to the remote server"))); + PQfinish(t_thrd.libwalreceiver_cxt.streamConn); + t_thrd.libwalreceiver_cxt.streamConn = NULL; return false; } diff --git a/src/gausskernel/storage/replication/syncrep.cpp b/src/gausskernel/storage/replication/syncrep.cpp index 87f6494c0..98e9c69f5 100755 --- a/src/gausskernel/storage/replication/syncrep.cpp +++ b/src/gausskernel/storage/replication/syncrep.cpp @@ -250,9 +250,8 @@ void SyncRepWaitForLSN(XLogRecPtr XactCommitLSN, bool enableHandleCancel) * walsender changes the state to SYNC_REP_WAIT_COMPLETE, it will never * update it again, so we can't be seeing a stale value in that case. */ - if (t_thrd.proc->syncRepState == SYNC_REP_WAIT_COMPLETE && !DelayIntoMostAvaSync(true)) { + if (t_thrd.proc->syncRepState == SYNC_REP_WAIT_COMPLETE && !DelayIntoMostAvaSync(true)) break; - } /* * If a wait for synchronous replication is pending, we can neither @@ -603,7 +602,8 @@ void SyncRepReleaseWaiters(void) LWLockRelease(SyncRepLock); ereport(DEBUG3, - (errmsg("released %d procs up to receive %X/%X, %d procs up to write %X/%X, %d procs up to flush %X/%X, %d procs up to apply %X/%X", + (errmsg("released %d procs up to receive %X/%X, %d procs up to write %X/%X, " + "%d procs up to flush %X/%X, %d procs up to apply %X/%X", numreceive, (uint32)(receivePtr >> 32), (uint32)receivePtr, numwrite, (uint32)(writePtr >> 32), (uint32)writePtr, numflush, (uint32)(flushPtr >> 32), (uint32)flushPtr, numapply, (uint32)(replayPtr >> 32), (uint32)replayPtr))); @@ -909,7 +909,7 @@ static int SyncRepGetStandbyPriority(void) * Since synchronous cascade replication is not allowed, we always set the * priority of cascading walsender to zero. */ - if (AM_WAL_STANDBY_SENDER || AM_WAL_SHARE_STORE_SENDER || AM_WAL_HADR_SENDER) + if (AM_WAL_STANDBY_SENDER || AM_WAL_SHARE_STORE_SENDER || AM_WAL_HADR_DNCN_SENDER || AM_WAL_DB_SENDER) return 0; if (!SyncStandbysDefined() || t_thrd.syncrep_cxt.SyncRepConfig == NULL || !SyncRepRequested()) @@ -1436,6 +1436,7 @@ void SyncRepCheckSyncStandbyAlive(void) ereport(LOG, (errmsg("synchronous master is now standalone"))); t_thrd.walsender_cxt.WalSndCtl->sync_master_standalone = true; + /* * If there is any waiting sender, then wake-up them as * master has switched to standalone mode diff --git a/src/gausskernel/storage/replication/syncrep_gram.y b/src/gausskernel/storage/replication/syncrep_gram.y index 9f082cafa..bfcb29d32 100644 --- a/src/gausskernel/storage/replication/syncrep_gram.y +++ b/src/gausskernel/storage/replication/syncrep_gram.y @@ -172,4 +172,5 @@ create_syncrep_config(const char *num_sync, List *members, uint8 syncrep_method) #undef yylloc #undef yylex +#undef yylex #include "syncrep_scanner.inc" diff --git a/src/gausskernel/storage/replication/syncrep_scanner.l b/src/gausskernel/storage/replication/syncrep_scanner.l index cefd461de..b461a25bc 100755 --- a/src/gausskernel/storage/replication/syncrep_scanner.l +++ b/src/gausskernel/storage/replication/syncrep_scanner.l @@ -36,6 +36,13 @@ fprintf_to_ereport(const char *fmt, const char *msg) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("%s", msg))); } +/* + * define syncrep_scanner_yylex for flex >= 2.6 + */ +#if FLEX_MAJOR_VERSION >= 2 && FLEX_MINOR_VERSION >= 6 +#define YY_DECL int syncrep_scanner_yylex \ + (YYSTYPE * yylval_param, YYLTYPE * yylloc_param , yyscan_t yyscanner) +#endif %} %option reentrant diff --git a/src/gausskernel/storage/replication/walrcvwriter.cpp b/src/gausskernel/storage/replication/walrcvwriter.cpp index 2ea1ed79d..e55959696 100755 --- a/src/gausskernel/storage/replication/walrcvwriter.cpp +++ b/src/gausskernel/storage/replication/walrcvwriter.cpp @@ -39,6 +39,10 @@ #include "gssignal/gs_signal.h" #include "gs_bbox.h" +#ifdef ENABLE_MULTIPLE_NODES +#include "postmaster/barrier_preparse.h" +#endif + /* * These variables are used similarly to openLogFile/SegNo/Off, * but for walreceiver to write the XLOG. recvFileTLI is the TimeLineID @@ -157,7 +161,7 @@ static void XLogWalRcvWrite(WalRcvCtlBlock *walrcb, char *buf, Size nbytes, XLog * Create .done file forcibly to prevent the restored segment from * being archived again later. */ - XLogFileName(xlogfname, recvFileTLI, recvSegNo); + XLogFileName(xlogfname, MAXFNAMELEN, recvFileTLI, recvSegNo); XLogArchiveForceDone(xlogfname); } recvFile = -1; @@ -231,6 +235,10 @@ static void XLogWalRcvWrite(WalRcvCtlBlock *walrcb, char *buf, Size nbytes, XLog } SpinLockRelease(&walrcv->mutex); +#ifdef ENABLE_MULTIPLE_NODES + WakeUpBarrierPreParseBackend(); + +#endif /* Signal the startup process and walsender that new WAL has arrived */ WakeupRecovery(); if (AllowCascadeReplication()) @@ -567,19 +575,11 @@ int walRcvWrite(WalRcvCtlBlock *walrcb) SpinLockAcquire(&walrcb->mutex); if (walrcb->walFreeOffset == walrcb->walWriteOffset) { - if (IsExtremeRtoReadWorkerRunning()) { - if (walrcb->walFreeOffset != walrcb->walReadOffset && - pg_atomic_read_u32(&(g_instance.comm_cxt.localinfo_cxt.is_finish_redo)) == ATOMIC_FALSE) { - nbytes = 1; - } - } - SpinLockRelease(&walrcb->mutex); LWLockRelease(WALWriteLock); END_CRIT_SECTION(); - - return nbytes; + return 0; } walfreeoffset = walrcb->walFreeOffset; walwriteoffset = walrcb->walWriteOffset; @@ -597,19 +597,11 @@ int walRcvWrite(WalRcvCtlBlock *walrcb) SpinLockAcquire(&walrcb->mutex); walrcb->walWriteOffset += nbytes; walrcb->walStart = startptr; - if (IsExtremeRedo()) { - if (walrcb->walWriteOffset == recBufferSize && (walrcb->walReadOffset > 0)) { - walrcb->walWriteOffset = 0; - if (walrcb->walFreeOffset == recBufferSize) { - walrcb->walFreeOffset = 0; - } - } - } else { - if (walrcb->walWriteOffset == recBufferSize) { - walrcb->walWriteOffset = 0; - if (walrcb->walFreeOffset == recBufferSize) { - walrcb->walFreeOffset = 0; - } + + if (walrcb->walWriteOffset == recBufferSize) { + walrcb->walWriteOffset = 0; + if (walrcb->walFreeOffset == recBufferSize) { + walrcb->walFreeOffset = 0; } } walfreeoffset = walrcb->walFreeOffset; @@ -752,6 +744,9 @@ void walrcvWriterMain(void) /* abort async io, must before LWlock release */ AbortAsyncListIO(); + /* release resource held by lsc */ + AtEOXact_SysDBCache(false); + /* * These operations are really just a minimal subset of * AbortTransaction(). We don't have very many resources to worry diff --git a/src/gausskernel/storage/replication/walreceiver.cpp b/src/gausskernel/storage/replication/walreceiver.cpp index 391aa6fb5..9d59a2bc4 100755 --- a/src/gausskernel/storage/replication/walreceiver.cpp +++ b/src/gausskernel/storage/replication/walreceiver.cpp @@ -80,13 +80,12 @@ #include "hotpatch/hotpatch.h" #include "utils/distribute_test.h" +#include "lz4.h" bool wal_catchup = false; #define NAPTIME_PER_CYCLE 1 /* max sleep time between cycles (1ms) */ -#define CONFIG_BAK_FILENAME "postgresql.conf.bak" - #define WAL_DATA_LEN ((sizeof(uint32) + 1 + sizeof(XLogRecPtr))) #define TEMP_CONF_FILE "postgresql.conf.bak" @@ -134,8 +133,11 @@ const char *g_reserve_param[RESERVE_SIZE] = { "enable_upsert_to_merge", "archive_dest", "cluster_run_mode", + "stream_cluster_run_mode", "xlog_file_size", "xlog_file_path", + "xlog_lock_file_path", + "auto_csn_barrier", #ifndef ENABLE_MULTIPLE_NODES "recovery_min_apply_delay", "sync_config_strategy", @@ -197,6 +199,9 @@ static void ProcessArchiveXlogMessage(const ArchiveXlogMessage* archive_xlog_mes static void WalRecvSendArchiveXlogResponse(ArchiveTaskStatus *archive_status); static void ProcessHadrSwitchoverRequest(HadrSwitchoverMessage *hadrSwitchoverMessage); static void WalRecvHadrSwitchoverResponse(); +#ifdef ENABLE_MULTIPLE_NODES +static void WalRecvHadrSendReply(); +#endif void ProcessWalRcvInterrupts(void) @@ -420,6 +425,9 @@ void WalRcvrProcessData(TimestampTz *last_recv_timestamp, bool *ping_sent) XLogWalRcvSendReply(requestReply, requestReply); XLogWalRcvSendHSFeedback(); } +#ifdef ENABLE_MULTIPLE_NODES + WalRecvHadrSendReply(); +#endif ConfigFileTimer(); } @@ -538,7 +546,7 @@ void WalReceiverMain(void) int walreplindex = hashmdata->current_repl; SpinLockRelease(&hashmdata->mutex); - if (!IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) { + if (!IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE) { replConnInfo = t_thrd.postmaster_cxt.ReplConnArray[walreplindex]; } else if (walreplindex >= MAX_REPLNODE_NUM) { replConnInfo = t_thrd.postmaster_cxt.CrossClusterReplConnArray[walreplindex - MAX_REPLNODE_NUM]; @@ -700,7 +708,7 @@ static inline TimestampTz CalculateTimeout(TimestampTz last_reply_time) */ static bool WalRecCheckTimeOut(TimestampTz nowtime, TimestampTz last_recv_timestamp, bool ping_sent) { - if (IS_SHARED_STORAGE_STANBY_CLUSTER_MODE || (IS_SHARED_STORAGE_PRIMARY_CLUSTER_STANDBY_MODE + if (IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE || (IS_SHARED_STORAGE_PRIMARY_CLUSTER_STANDBY_MODE && !t_thrd.libwalreceiver_cxt.streamConn)) return false; bool requestReply = false; @@ -801,20 +809,6 @@ bool HasBuildReason() } } -static bool IsExtremeRtoNotOpenOrRunning() -{ - if (!IsExtremeRedo()) { - return true; - } - - if (IsExtremeRtoReadWorkerRunning()) { - return true; - } - - return false; -} - - static void rcvAllXlog() { if (HasBuildReason() || t_thrd.walreceiver_cxt.checkConsistencyOK == false) { @@ -827,7 +821,7 @@ static void rcvAllXlog() char *buf = NULL; int len; ereport(LOG, (errmsg("rcvAllXlog before walreceiver quit"))); - while(WalReceiverFuncTable[GET_FUNC_IDX].walrcv_receive(0, &type, &buf, &len) && IsExtremeRtoNotOpenOrRunning()) { + while(WalReceiverFuncTable[GET_FUNC_IDX].walrcv_receive(0, &type, &buf, &len)) { XLogWalRcvProcessMsg(type, buf, len); t_thrd.walreceiver_cxt.hasReceiveNewData = true; } @@ -854,7 +848,7 @@ static void WalRcvDie(int code, Datum arg) { /* use volatile pointer to prevent code rearrangement */ volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; - if (IS_SHARED_STORAGE_MODE && !t_thrd.walreceiver_cxt.termChanged && IsExtremeRtoNotOpenOrRunning()) { + if (IS_SHARED_STORAGE_MODE && !t_thrd.walreceiver_cxt.termChanged) { SpinLockAcquire(&walrcv->mutex); walrcv->walRcvState = WALRCV_STOPPING; SpinLockRelease(&walrcv->mutex); @@ -862,7 +856,7 @@ static void WalRcvDie(int code, Datum arg) rcvAllXlog(); uint32 disableConnectionNode = pg_atomic_read_u32(&g_instance.comm_cxt.localinfo_cxt.need_disable_connection_node); - if (disableConnectionNode && !t_thrd.walreceiver_cxt.termChanged && IsExtremeRtoNotOpenOrRunning()) { + if (disableConnectionNode && !t_thrd.walreceiver_cxt.termChanged) { pg_atomic_write_u32(&t_thrd.walreceiverfuncs_cxt.WalRcv->rcvDoneFromShareStorage, true); } } @@ -909,6 +903,11 @@ static void WalRcvDie(int code, Datum arg) t_thrd.libwalreceiver_cxt.recvBuf = NULL; } + if (t_thrd.libwalreceiver_cxt.decompressBuf != NULL) { + pfree(t_thrd.libwalreceiver_cxt.decompressBuf); + t_thrd.libwalreceiver_cxt.decompressBuf = NULL; + } + /* reset conn_channel */ errno_t rc = memset_s((void*)&walrcv->conn_channel, sizeof(walrcv->conn_channel), 0, sizeof(walrcv->conn_channel)); @@ -1043,22 +1042,16 @@ static void XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len) case 'w': /* WAL records */ { WalDataMessageHeader msghdr; - if (len < sizeof(WalDataMessageHeader)) - ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg_internal("invalid WAL message received from primary"))); - /* memcpy is required here for alignment reasons */ - errorno = memcpy_s(&msghdr, sizeof(WalDataMessageHeader), buf, sizeof(WalDataMessageHeader)); - securec_check(errorno, "", ""); - - ProcessWalHeaderMessage(&msghdr); - - buf += sizeof(WalDataMessageHeader); - len -= sizeof(WalDataMessageHeader); - if (IsExtremeRedo()) { - XLogWalRcvReceiveInBuf(buf, len, msghdr.dataStart); - } else { - XLogWalRcvReceive(buf, len, msghdr.dataStart); - } + XLogWalRecordsPreProcess(&buf, &len, &msghdr); + XLogWalRcvReceive(buf, len, msghdr.dataStart); + break; + } + case 'C': /* Compressed WAL records */ + { + WalDataMessageHeader msghdr; + XLogWalRecordsPreProcess(&buf, &len, &msghdr); + Size decompressedSize = (Size)XLogDecompression(buf, len, msghdr.dataStart); + XLogWalRcvReceive(t_thrd.libwalreceiver_cxt.decompressBuf, decompressedSize, msghdr.dataStart); break; } case 'd': /* Data page replication for the logical xlog */ @@ -1097,7 +1090,11 @@ static void XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len) } case 'm': /* config file */ { - if (IS_SHARED_STORAGE_STANBY_CLUSTER_MODE || AM_HADR_WAL_RECEIVER) { +#ifndef ENABLE_MULTIPLE_NODES + if (IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE || AM_HADR_WAL_RECEIVER) { +#else + if (IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE || AM_HADR_WAL_RECEIVER || AM_HADR_CN_WAL_RECEIVER) { +#endif break; } if (len < sizeof(ConfigModifyTimeMessage)) { @@ -1153,6 +1150,41 @@ static void XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len) } } +void XLogWalRecordsPreProcess(char **buf, Size *len, WalDataMessageHeader *msghdr) +{ + errno_t errorno = EOK; + if (*len < sizeof(WalDataMessageHeader)) + ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg_internal("invalid WAL message received from primary"))); + /* memcpy is required here for alignment reasons */ + errorno = memcpy_s(msghdr, sizeof(WalDataMessageHeader), *buf, sizeof(WalDataMessageHeader)); + securec_check(errorno, "", ""); + + ProcessWalHeaderMessage(msghdr); + + *buf += sizeof(WalDataMessageHeader); + *len -= sizeof(WalDataMessageHeader); +} + +int XLogDecompression(const char *buf, Size len, XLogRecPtr dataStart) +{ + char *decompressBuff = t_thrd.libwalreceiver_cxt.decompressBuf; + const int maxBlockSize = g_instance.attr.attr_storage.WalReceiverBufSize * 1024; + if (decompressBuff == NULL) { + t_thrd.libwalreceiver_cxt.decompressBuf = (char *)palloc0(maxBlockSize); + decompressBuff = t_thrd.libwalreceiver_cxt.decompressBuf; + } + int decompressedSize = LZ4_decompress_safe(buf, decompressBuff, len, maxBlockSize); + if (decompressedSize <= 0) { + ereport(ERROR, (errmsg("[DecompressFailed] startPtr %X/%X, compressedSize: %ld, decompressSize: %d", + (uint32)(dataStart >> 32), (uint32)dataStart, len, decompressedSize))); + } + ereport(DEBUG4, ((errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("[XLOG_COMPRESS] xlog decompression working! startPtr %X/%X, compressedSize %ld, decompressSize %d", + (uint32)(dataStart >> 32), (uint32)dataStart, len, decompressedSize)))); + return decompressedSize; +} + void WSDataRcvCheck(char *data_buf, Size nbytes) { errno_t errorno = EOK; @@ -1285,169 +1317,35 @@ static void WalDataRcvReceive(char *buf, Size nbytes, XLogRecPtr recptr) wakeupWalRcvWriter(); } -void UpdateWalRcvCtl(struct WalRcvCtlBlock* walRcvCtlBlock, const XLogRecPtr recptr, const int segbytes) +static void ProcessReplyFlags(void) { - const int64 recBufferSize = g_instance.attr.attr_storage.WalReceiverBufSize * 1024; - SpinLockAcquire(&walRcvCtlBlock->mutex); - walRcvCtlBlock->walFreeOffset += segbytes; - if (walRcvCtlBlock->walFreeOffset == recBufferSize && walRcvCtlBlock->walWriteOffset > 0 && - walRcvCtlBlock->walReadOffset > 0) { - walRcvCtlBlock->walFreeOffset = 0; - } - walRcvCtlBlock->receivePtr = recptr; - SpinLockRelease(&walRcvCtlBlock->mutex); -} - -inline void WalReceiverWaitCopyXLogCount(XLogRecPtr recptr, XLogRecPtr startptr, int64 walfreeoffset, - int64 walwriteoffset, int64 walreadoffset) -{ - static uint64 waitCount = 0; - ++waitCount; - const uint64 printInterval = 0xFFFF; - if ((waitCount & printInterval) == 0) { - const uint32 rightShiftSize = 32; - ereport(WARNING, (errmsg("WalReceiverWaitCopyXLogCount: recptr(%X:%X),walfreeoffset(%ld)," - "walwriteoffset(%ld),walreadoffset(%ld),startptr(%X:%X)", - (uint32)(recptr >> rightShiftSize), (uint32)recptr, walfreeoffset, walwriteoffset, - walreadoffset, (uint32)(startptr >> rightShiftSize), (uint32)startptr))); - } -} - -static void PushWalRcvWrite() -{ - if (WalRcvWriterInProgress()) { - wakeupWalRcvWriter(); - WakeupRecovery(); - /* Process any requests or signals received recently */ - ProcessWalRcvInterrupts(); - /* Keepalived with primary when waiting flush wal data */ - XLogWalRcvSendReply(false, false); - pg_usleep(1000); /* 1ms */ +#ifdef ENABLE_MULTIPLE_NODES + volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + if (IS_DISASTER_RECOVER_MODE) { + SpinLockAcquire(&walrcv->mutex); + if (walrcv->isPauseByTargetBarrier) { + t_thrd.walreceiver_cxt.reply_message->replyFlags |= IS_PAUSE_BY_TARGET_BARRIER; + } else { + t_thrd.walreceiver_cxt.reply_message->replyFlags &= ~IS_PAUSE_BY_TARGET_BARRIER; + } + SpinLockRelease(&walrcv->mutex); + ereport(DEBUG4, ((errmodule(MOD_REDO), errcode(ERRCODE_LOG), errmsg( + "[RcvSendReply] replyFlags=%d, receive=%lu, flush=%lu, apply=%lu", + t_thrd.walreceiver_cxt.reply_message->replyFlags, t_thrd.walreceiver_cxt.reply_message->receive, + t_thrd.walreceiver_cxt.reply_message->flush, t_thrd.walreceiver_cxt.reply_message->apply)))); } else { - walRcvDataCleanup(); - WakeupRecovery(); - ProcessWalRcvInterrupts(); + t_thrd.walreceiver_cxt.reply_message->replyFlags &= ~IS_PAUSE_BY_TARGET_BARRIER; } -} +#else + t_thrd.walreceiver_cxt.reply_message->replyFlags &= ~IS_PAUSE_BY_TARGET_BARRIER; +#endif -/* - * Receive XLOG data into receiver buffer. - */ -void XLogWalRcvReceiveInBuf(char *buf, Size nbytes, XLogRecPtr recptr) -{ - int64 walfreeoffset; - int64 walwriteoffset; - int64 walreadoffset; - char *walrecvbuf = NULL; - XLogRecPtr startptr; - int64 recBufferSize = g_instance.attr.attr_storage.WalReceiverBufSize * 1024; - - while (nbytes > 0) { - int segbytes; - int endPoint = recBufferSize; - - SpinLockAcquire(&t_thrd.walreceiver_cxt.walRcvCtlBlock->mutex); - if (t_thrd.walreceiver_cxt.walRcvCtlBlock->walFreeOffset == - t_thrd.walreceiver_cxt.walRcvCtlBlock->walWriteOffset) { - // no data to be flushed - t_thrd.walreceiver_cxt.walRcvCtlBlock->walStart = recptr; - } else if (t_thrd.walreceiver_cxt.walRcvCtlBlock->walFreeOffset == recBufferSize && - t_thrd.walreceiver_cxt.walRcvCtlBlock->walWriteOffset > 0 && - t_thrd.walreceiver_cxt.walRcvCtlBlock->walReadOffset > 0) { - t_thrd.walreceiver_cxt.walRcvCtlBlock->walFreeOffset = 0; - } - walfreeoffset = t_thrd.walreceiver_cxt.walRcvCtlBlock->walFreeOffset; - walwriteoffset = t_thrd.walreceiver_cxt.walRcvCtlBlock->walWriteOffset; - walreadoffset = t_thrd.walreceiver_cxt.walRcvCtlBlock->walReadOffset; - walrecvbuf = t_thrd.walreceiver_cxt.walRcvCtlBlock->walReceiverBuffer; - startptr = t_thrd.walreceiver_cxt.walRcvCtlBlock->walStart; - SpinLockRelease(&t_thrd.walreceiver_cxt.walRcvCtlBlock->mutex); - - ereport(DEBUG5, (errmsg("XLogWalRcvReceive: recptr(%X:%X),nbytes(%d)," - "walfreeoffset(%ld),walwriteoffset(%ld),startptr(%X:%X)", - (uint32)(recptr >> 32), (uint32)recptr, (int)nbytes, walfreeoffset, walwriteoffset, - (uint32)(startptr >> 32), (uint32)startptr))); - - XLogWalRcvSendReply(false, false); - - Assert(walrecvbuf != NULL); - Assert(walfreeoffset <= recBufferSize); - Assert(walwriteoffset <= recBufferSize); - Assert(walreadoffset <= recBufferSize); - - if (walfreeoffset < walreadoffset) { - endPoint = walreadoffset - 1; - } - - if (endPoint == walfreeoffset) { - if (WalRcvWriterInProgress()) { - wakeupWalRcvWriter(); - WakeupRecovery(); - /* Process any requests or signals received recently */ - ProcessWalRcvInterrupts(); - /* Keepalived with primary when waiting flush wal data */ - XLogWalRcvSendReply(false, false); - pg_usleep(1000); - WalReceiverWaitCopyXLogCount(recptr, startptr, walfreeoffset, walwriteoffset, walreadoffset); - } else { - walRcvDataCleanup(); - WakeupRecovery(); - ProcessWalRcvInterrupts(); - if (IS_SHARED_STORAGE_MODE && !IsExtremeRtoReadWorkerRunning()) { - return; - } - } - continue; - } - - segbytes = ((walfreeoffset + (int)nbytes > endPoint) ? (endPoint - walfreeoffset) : (int)nbytes); - - /* Need to seek in the buffer? */ - if (walfreeoffset != walwriteoffset) { - if (walfreeoffset > walwriteoffset) { - XLByteAdvance(startptr, (uint32)(walfreeoffset - walwriteoffset)); - } else { - XLByteAdvance(startptr, (uint32)(recBufferSize - walwriteoffset + walfreeoffset)); - } - if (!XLByteEQ(startptr, recptr)) { - /* wait for finishing flushing all wal data */ - while (true) { - SpinLockAcquire(&t_thrd.walreceiver_cxt.walRcvCtlBlock->mutex); - if (t_thrd.walreceiver_cxt.walRcvCtlBlock->walFreeOffset == - t_thrd.walreceiver_cxt.walRcvCtlBlock->walWriteOffset) { - t_thrd.walreceiver_cxt.walRcvCtlBlock->walStart = recptr; - SpinLockRelease(&t_thrd.walreceiver_cxt.walRcvCtlBlock->mutex); - break; - } - SpinLockRelease(&t_thrd.walreceiver_cxt.walRcvCtlBlock->mutex); - - PushWalRcvWrite(); - } - - ereport(FATAL, - (errmsg("Unexpected seek in the walreceiver buffer. " - "xlogrecptr is (%X:%X) but local xlogptr is (%X:%X)." - "nbyte is %lu, walfreeoffset is %ld walwriteoffset is %ld walreadoffset is %ld", - (uint32)(recptr >> 32), (uint32)recptr, (uint32)(startptr >> 32), (uint32)startptr, - nbytes, walfreeoffset, walwriteoffset, walreadoffset))); - } - } - - /* OK to receive the logs */ - Assert(walfreeoffset + segbytes <= recBufferSize); - errno_t errorno = memcpy_s(walrecvbuf + walfreeoffset, recBufferSize - walfreeoffset, buf, segbytes); - securec_check(errorno, "\0", "\0"); - - XLByteAdvance(recptr, (uint32)segbytes); - - nbytes -= segbytes; - buf += segbytes; - - // update shared memory - UpdateWalRcvCtl(t_thrd.walreceiver_cxt.walRcvCtlBlock, recptr, segbytes); + /* if the standby has bad file, need cancel log ctl */ + if (RecoveryIsSuspend()) { + t_thrd.walreceiver_cxt.reply_message->replyFlags |= IS_CANCEL_LOG_CTRL; + } else { + t_thrd.walreceiver_cxt.reply_message->replyFlags &= ~IS_CANCEL_LOG_CTRL; } - - wakeupWalRcvWriter(); } /* @@ -1643,20 +1541,13 @@ void XLogWalRcvSendReply(bool force, bool requestReply) t_thrd.walreceiver_cxt.reply_message->receive = receivePtr; t_thrd.walreceiver_cxt.reply_message->write = writePtr; t_thrd.walreceiver_cxt.reply_message->flush = flushPtr; + ProcessReplyFlags(); if (!dummyStandbyMode) { if (AM_HADR_WAL_RECEIVER) { /* for streaming disaster cluster, the main standby should collect all cascade standby info * then send lsn which satisfied Quorum to main cluster. */ GetMinLsnRecordsFromHadrCascadeStandby(); - if (t_thrd.walreceiver_cxt.reply_message->apply > t_thrd.walreceiver_cxt.reply_message->flush) { - ereport(LOG, (errmsg( - "In disaster cluster, the reply message of quorum flush location is less than replay location," - "flush is %X/%X, replay is %X/%X.", (uint32)(t_thrd.walreceiver_cxt.reply_message->flush >> 32), - (uint32)t_thrd.walreceiver_cxt.reply_message->flush, - (uint32)(t_thrd.walreceiver_cxt.reply_message->apply >> 32), - (uint32)t_thrd.walreceiver_cxt.reply_message->apply))); - } } else { t_thrd.walreceiver_cxt.reply_message->apply = GetXLogReplayRecPtr(NULL, &ReplayReadPtr); t_thrd.walreceiver_cxt.reply_message->applyRead = ReplayReadPtr; @@ -1669,7 +1560,7 @@ void XLogWalRcvSendReply(bool force, bool requestReply) t_thrd.walreceiver_cxt.reply_message->replyRequested = requestReply; SpinLockAcquire(&hashmdata->mutex); - if (!IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) + if (!IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE) t_thrd.walreceiver_cxt.reply_message->peer_role = hashmdata->current_mode; else t_thrd.walreceiver_cxt.reply_message->peer_role = STANDBY_CLUSTER_MODE; @@ -1710,6 +1601,9 @@ void XLogWalRcvSendReply(bool force, bool requestReply) */ static void XLogWalRcvSendHSFeedback(void) { +#ifdef ENABLE_MULTIPLE_NODES + return; +#endif char buf[sizeof(StandbyHSFeedbackMessage) + 1]; TimestampTz now; TransactionId xmin; @@ -1718,7 +1612,7 @@ static void XLogWalRcvSendHSFeedback(void) * If the user doesn't want status to be reported to the master, be sure * to exit before doing anything at all. */ - if (u_sess->attr.attr_storage.wal_receiver_status_interval <= 0 || !u_sess->attr.attr_storage.hot_standby_feedback) + if (u_sess->attr.attr_storage.wal_receiver_status_interval <= 0) return; /* Get current timestamp. */ @@ -1746,8 +1640,10 @@ static void XLogWalRcvSendHSFeedback(void) /* Get updated RecentGlobalXmin */ GetSnapshotData(u_sess->utils_cxt.CurrentSnapshotData, true, true); #endif - xmin = GetOldestXmin(NULL); - + if (u_sess->attr.attr_storage.hot_standby_feedback) + xmin = GetOldestXmin(NULL); + else + xmin = InvalidTransactionId; /* * Always send feedback message. */ @@ -1951,7 +1847,6 @@ static void ProcessRmXLogMessage(RmXLogMessage *rmXLogMessage) t_thrd.walreceiver_cxt.walRcvCtlBlock->flushPtr = InvalidXLogRecPtr; t_thrd.walreceiver_cxt.walRcvCtlBlock->walStart = InvalidXLogRecPtr; t_thrd.walreceiver_cxt.walRcvCtlBlock->walWriteOffset = t_thrd.walreceiver_cxt.walRcvCtlBlock->walFreeOffset = 0; - t_thrd.walreceiver_cxt.walRcvCtlBlock->walReadOffset = 0; SpinLockRelease(&t_thrd.walreceiver_cxt.walRcvCtlBlock->mutex); /* Now rm the WAL files. */ @@ -2018,7 +1913,6 @@ static void ProcessArchiveXlogMessage(const ArchiveXlogMessage* archive_xlog_mes /* lock for archiver get PITR_TASK_GET flag, but works on old task . * if archiver works between set flag and set task details. */ - SpinLockAcquire(&archive_task->mutex); while (pg_atomic_compare_exchange_u32(pitr_task_status, &expected, PITR_TASK_GET) == false) { /* some task arrived before last task done if expected not equal to NONE */ expected = PITR_TASK_NONE; @@ -2028,11 +1922,10 @@ static void ProcessArchiveXlogMessage(const ArchiveXlogMessage* archive_xlog_mes archive_xlog_message->slot_name, (uint32)(archive_xlog_message->targetLsn >> 32), (uint32)(archive_xlog_message->targetLsn)))); - SpinLockRelease(&archive_task->mutex); return; } } - + SpinLockAcquire(&archive_task->mutex); errorno = memcpy_s(&archive_task->archive_task, sizeof(ArchiveXlogMessage) + 1, archive_xlog_message, @@ -2230,7 +2123,7 @@ Datum pg_stat_get_wal_receiver(PG_FUNCTION_ARGS) volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; volatile HaShmemData *hashmdata = t_thrd.postmaster_cxt.HaShmData; - char location[MAXFNAMELEN] = {0}; + char location[MAXFNAMELEN * 3] = {0}; XLogRecPtr rcvRedo; XLogRecPtr rcvWrite; @@ -2776,6 +2669,15 @@ static void ProcessHadrSwitchoverRequest(HadrSwitchoverMessage *hadrSwitchoverMe g_instance.streaming_dr_cxt.isInteractionCompleted = true; WalRecvHadrSwitchoverResponse(); } + + ereport(LOG, + (errmsg("ProcessHadrSwitchoverRequest: target switchover barrier lsn %X/%X, " + "receive switchover barrier lsn %X/%X, isInteractionCompleted %d", + (uint32)(walrcv->targetSwitchoverBarrierLSN >> 32), + (uint32)(walrcv->targetSwitchoverBarrierLSN), + (uint32)(walrcv->lastSwitchoverBarrierLSN >> 32), + (uint32)(walrcv->lastSwitchoverBarrierLSN), + g_instance.streaming_dr_cxt.isInteractionCompleted))); } /* @@ -2802,3 +2704,58 @@ static void WalRecvHadrSwitchoverResponse() ereport(LOG, (errmsg("send streaming dr switchover response to primary"))); } +/* remove any '%zone' part from an IPv6 address string */ +char* remove_ipv6_zone(char* addr_src, char* addr_dest, int len) +{ + /* check if has ipv6 and ipv6 addr include '%zone' */ + if (strchr(addr_src, '%') == NULL) { + return addr_src; + } else { + char* pct = NULL; + errno_t rc = 0; + rc = strncpy_s(addr_dest, len, addr_src, len - 1); + securec_check(rc, "", ""); + addr_dest[len - 1] = '\0'; + pct = strchr(addr_dest, '%'); + if (pct != NULL) { + *pct = '\0'; + } + return addr_dest; + } + + /* for compile no error */ + return addr_src; +} + +#ifdef ENABLE_MULTIPLE_NODES +static void WalRecvHadrSendReply() +{ + if (!(AM_HADR_WAL_RECEIVER || AM_HADR_CN_WAL_RECEIVER)) { + return; + } + volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; + HadrReplyMessage hadrReply; + char buf[sizeof(HadrReplyMessage) + 1]; + errno_t errorno = EOK; + + SpinLockAcquire(&walrcv->mutex); + if (!IS_CSN_BARRIER((char *)walrcv->recoveryTargetBarrierId)) { + SpinLockRelease(&walrcv->mutex); + return; + } + errorno = strncpy_s((char *)hadrReply.targetBarrierId, MAX_BARRIER_ID_LENGTH, + (char *)walrcv->recoveryTargetBarrierId, MAX_BARRIER_ID_LENGTH); + SpinLockRelease(&walrcv->mutex); + securec_check(errorno, "\0", "\0"); + hadrReply.sendTime = GetCurrentTimestamp(); + + /* Prepend with the message type and send it. */ + buf[0] = 'R'; + errorno = memcpy_s(&buf[1], sizeof(HadrReplyMessage), &hadrReply, sizeof(HadrReplyMessage)); + securec_check(errorno, "\0", "\0"); + (WalReceiverFuncTable[GET_FUNC_IDX]).walrcv_send(buf, sizeof(HadrReplyMessage) + 1); + + ereport(DEBUG5, (errmsg("send streaming dr reply to primary, barrier id %s", hadrReply.targetBarrierId))); +} +#endif + diff --git a/src/gausskernel/storage/replication/walreceiverfuncs.cpp b/src/gausskernel/storage/replication/walreceiverfuncs.cpp index 9373c65c0..a165ab446 100755 --- a/src/gausskernel/storage/replication/walreceiverfuncs.cpp +++ b/src/gausskernel/storage/replication/walreceiverfuncs.cpp @@ -29,6 +29,7 @@ #include "pgxc/pgxc.h" #include "access/xlog_internal.h" #include "access/multi_redo_api.h" +#include "libpq/libpq-fe.h" #include "postmaster/startup.h" #include "postmaster/postmaster.h" #include "replication/dataqueue.h" @@ -39,6 +40,7 @@ #include "replication/walsender_private.h" #include "storage/pmsignal.h" #include "storage/shmem.h" +#include "utils/builtins.h" #include "utils/guc.h" #include "utils/timestamp.h" #include "gssignal/gs_signal.h" @@ -57,7 +59,7 @@ * entryway ReplConnArray2 = (channel4, channel5, channel6) */ extern bool dummyStandbyMode; - +static const int MAX_CONNECT_ERROR_COUNT = 3000; static void SetWalRcvConninfo(ReplConnTarget conn_target); static void SetFailoverFailedState(void); @@ -70,6 +72,8 @@ extern void SetDataRcvDummyStandbySyncPercent(int percent); */ #define WALRCV_STARTUP_TIMEOUT 3 #define FAILOVER_HOST_FOR_DUMMY "failover_host_for_dummy" +static int NORMAL_IP_LEN = 16; /* ipv4 len */ +static const char* HADRUSERINFO_CONIG_NAME = "hadr_user_info"; bool walRcvCtlBlockIsEmpty(void) { @@ -86,14 +90,8 @@ bool walRcvCtlBlockIsEmpty(void) bool retState = false; SpinLockAcquire(&walrcb->mutex); - if (IsExtremeRedo()) { - if (walrcb->walFreeOffset == walrcb->walReadOffset) { - retState = true; - } - } else { - if (walrcb->walFreeOffset == walrcb->walWriteOffset) { - retState = true; - } + if (walrcb->walFreeOffset == walrcb->walWriteOffset) { + retState = true; } SpinLockRelease(&walrcb->mutex); @@ -149,6 +147,17 @@ static void SetFailoverFailedState(void) return; } +static int GetConnectErrorCont(int index) +{ + volatile HaShmemData *hashmdata = t_thrd.postmaster_cxt.HaShmData; + int ret = 0; + SpinLockAcquire(&hashmdata->mutex); + ret = hashmdata->disconnect_count[index]; + SpinLockRelease(&hashmdata->mutex); + return ret; +} + + /* * Find next connect channel , and try to connect. According to the ReplFlag, * ReplIndex , connect error in the walrcv, find next channel, save it in @@ -160,6 +169,7 @@ static void SetWalRcvConninfo(ReplConnTarget conn_target) volatile HaShmemData *hashmdata = t_thrd.postmaster_cxt.HaShmData; ReplConnInfo *conninfo = NULL; int checknum = MAX_REPLNODE_NUM; + int useIndex = 0; if (IS_DN_DUMMY_STANDYS_MODE()) { if (conn_target == REPCONNTARGET_PRIMARY) { @@ -186,19 +196,43 @@ static void SetWalRcvConninfo(ReplConnTarget conn_target) g_instance.comm_cxt.localinfo_cxt.disable_conn_node.disable_conn_node_data; SpinLockRelease(&g_instance.comm_cxt.localinfo_cxt.disable_conn_node.info_lck); + SpinLockAcquire(&hashmdata->mutex); + int prev_index = hashmdata->prev_repl; + SpinLockRelease(&hashmdata->mutex); + + if (prev_index > 0 && (GetConnectErrorCont(prev_index) < MAX_CONNECT_ERROR_COUNT) && + IS_CN_DISASTER_RECOVER_MODE) { + t_thrd.walreceiverfuncs_cxt.WalReplIndex = prev_index; + ereport(LOG, (errmsg(" SetWalRcvConninfo reuse connection %d.", prev_index))); + pg_usleep(200000L); + } + /* * Skip other connections if you specify a connection host. */ while (checknum--) { + char* ipNoZone = NULL; + char ipNoZoneData[IP_LEN] = {0}; conninfo = GetRepConnArray(&t_thrd.walreceiverfuncs_cxt.WalReplIndex); + if (conninfo == NULL) { + t_thrd.walreceiverfuncs_cxt.WalReplIndex++; + break; + } + + /* remove any '%zone' part from an IPv6 address string */ + ipNoZone = remove_ipv6_zone(conninfo->remotehost, ipNoZoneData, IP_LEN); if (connNode.conn_mode == SPECIFY_CONNECTION && (conninfo != NULL && - strcmp(connNode.disable_conn_node_host, (char *)conninfo->remotehost) == 0) && + strcmp(connNode.disable_conn_node_host, (char *)ipNoZone) == 0) && connNode.disable_conn_node_port == conninfo->remoteport) { + useIndex = t_thrd.walreceiverfuncs_cxt.WalReplIndex; + t_thrd.walreceiverfuncs_cxt.WalReplIndex++; break; } if (connNode.conn_mode != SPECIFY_CONNECTION) { + useIndex = t_thrd.walreceiverfuncs_cxt.WalReplIndex; + t_thrd.walreceiverfuncs_cxt.WalReplIndex++; break; } @@ -212,20 +246,24 @@ static void SetWalRcvConninfo(ReplConnTarget conn_target) rcs = snprintf_s((char *)walrcv->conninfo, MAXCONNINFO, MAXCONNINFO - 1, "host=%s port=%d localhost=%s localport=%d", conninfo->remotehost, conninfo->remoteport, conninfo->localhost, conninfo->localport); +#ifdef ENABLE_LITE_MODE + if (*conninfo->sslmode != '\0') { + rcs = snprintf_s((char *)walrcv->conninfo, MAXCONNINFO, MAXCONNINFO - 1, + "%s sslmode=%s", (char *)walrcv->conninfo, conninfo->sslmode); + } +#endif securec_check_ss(rcs, "\0", "\0"); walrcv->conninfo[MAXCONNINFO - 1] = '\0'; walrcv->conn_errno = NONE_ERROR; walrcv->conn_target = conn_target; SpinLockRelease(&walrcv->mutex); - ereport(LOG, (errmsg("wal receiver try to connect to %s.", walrcv->conninfo))); + ereport(LOG, (errmsg("wal receiver try to connect to %s index %d .", walrcv->conninfo, useIndex))); SpinLockAcquire(&hashmdata->mutex); - if (!IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) - hashmdata->current_repl = t_thrd.walreceiverfuncs_cxt.WalReplIndex; + if (!IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE) + hashmdata->current_repl = useIndex; else - hashmdata->current_repl = MAX_REPLNODE_NUM + t_thrd.walreceiverfuncs_cxt.WalReplIndex; - hashmdata->disconnect_count[hashmdata->current_repl] = 0; + hashmdata->current_repl = MAX_REPLNODE_NUM + useIndex; SpinLockRelease(&hashmdata->mutex); - t_thrd.walreceiverfuncs_cxt.WalReplIndex++; } } @@ -334,6 +372,34 @@ StringInfo get_rcv_slot_name(void) return slotname; } +/* trim char ':' and '%' */ +static char* trim_ipv6_char(char* str, char* dest) +{ + char* s = dest; + char* cp_location = str; + int len = 0; + + /* if ipv4,do nothing */ + if (strchr(str, ':') == NULL) { + return str; + } + + for (; *cp_location != '\0' && len < NORMAL_IP_LEN; cp_location++) { + + /* skip the char ':' and '%' */ + if (*cp_location == ':' || *cp_location == '%') { + continue; + } + + *s = *cp_location; + s++; + len++; + } + *s = '\0'; + + return dest; +} + /* * Set current walrcv's slotname. * depend on have setting the hashmdata->current_repl @@ -350,7 +416,7 @@ static void set_rcv_slot_name(const char *slotname) SpinLockAcquire(&hashmdata->mutex); replIdx = hashmdata->current_repl; SpinLockRelease(&hashmdata->mutex); - if (IS_SHARED_STORAGE_STANBY_CLUSTER_MODE && replIdx >= MAX_REPLNODE_NUM) { + if (IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE && replIdx >= MAX_REPLNODE_NUM) { replIdx = replIdx - MAX_REPLNODE_NUM; } conninfo = GetRepConnArray(&replIdx); @@ -371,8 +437,14 @@ static void set_rcv_slot_name(const char *slotname) rc = snprintf_s((char *)walrcv->slotname, NAMEDATALEN, NAMEDATALEN - 1, "%s", g_instance.attr.attr_common.PGXCNodeName); } else if (conninfo != NULL) { + char slotData[NAMEDATALEN] = {'\0'}; + char *slotTmp = NULL; + + /* trim char ':' and '%' */ + slotTmp = trim_ipv6_char(conninfo->localhost, slotData); + rc = snprintf_s((char *)walrcv->slotname, NAMEDATALEN, NAMEDATALEN - 1, "%s_%s_%d", - g_instance.attr.attr_common.PGXCNodeName, conninfo->localhost, conninfo->localport); + g_instance.attr.attr_common.PGXCNodeName, slotTmp, conninfo->localport); } securec_check_ss(rc, "\0", "\0"); } else @@ -494,6 +566,15 @@ void ShutdownWalRcv(void) */ void RequestXLogStreaming(XLogRecPtr *recptr, const char *conninfo, ReplConnTarget conn_target, const char *slotname) { + if (IS_SHARED_STORAGE_MODE) { + ShareStorageXLogCtl *ctlInfo = g_instance.xlog_cxt.shareStorageXLogCtl; + ReadShareStorageCtlInfo(ctlInfo); + if ((uint64)g_instance.attr.attr_storage.xlog_file_size != ctlInfo->xlogFileSize) { + ereport(FATAL, (errmsg("maybe primary cluster changed xlog_file_size to %lu, current is %lu," + "we need exit for change.", ctlInfo->xlogFileSize, g_instance.attr.attr_storage.xlog_file_size))); + } + } + if (HasBuildReason()) { ereport(LOG, (errmsg("Stop to start walreceiver due to have build reason"))); pg_usleep(500000L); @@ -769,7 +850,7 @@ ReplConnInfo *GetRepConnArray(int *cur_idx) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid replication node index:%d", *cur_idx))); } - if (!IS_SHARED_STORAGE_STANBY_CLUSTER_MODE) + if (!IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE) replConnInfoArray = &t_thrd.postmaster_cxt.ReplConnArray[0]; else replConnInfoArray = &t_thrd.postmaster_cxt.CrossClusterReplConnArray[0]; @@ -782,23 +863,23 @@ ReplConnInfo *GetRepConnArray(int *cur_idx) replConnInfo = replConnInfoArray[*cur_idx]; if (replConnInfo != NULL) { if (t_thrd.postmaster_cxt.HaShmData->is_cross_region) { - if (t_thrd.postmaster_cxt.HaShmData->is_hadr_main_standby) { + if (t_thrd.postmaster_cxt.HaShmData->is_hadr_main_standby || IS_PGXC_COORDINATOR) { if (replConnInfo->isCrossRegion) { - break; + return replConnInfo; } } else { - if (replConnInfo->isCascade) { - break; + if (replConnInfo->isCascade || (!replConnInfo->isCascade && !replConnInfo->isCrossRegion)) { + return replConnInfo; } } } else { - break; + return replConnInfo; } } (*cur_idx)++; } - return replConnInfo; + return NULL; } void get_failover_host_conninfo_for_dummy(int *repl) @@ -845,10 +926,19 @@ static int get_repl_idx(const char *host, int port) { int i = 0; int replIdx = -1; + char* ipNoZone = NULL; + char ipNoZoneData[IP_LEN] = {0}; for (i = 0; i < MAX_REPLNODE_NUM; ++i) { - if (t_thrd.postmaster_cxt.ReplConnArray[i] != NULL && - strcmp(t_thrd.postmaster_cxt.ReplConnArray[i]->remotehost, host) == 0 && + ReplConnInfo* replconninfo = t_thrd.postmaster_cxt.ReplConnArray[i]; + if (replconninfo == NULL) { + continue; + } + + /* remove any '%zone' part from an IPv6 address string */ + ipNoZone = remove_ipv6_zone(replconninfo->remotehost, ipNoZoneData, IP_LEN); + + if (strcmp(ipNoZone, host) == 0 && t_thrd.postmaster_cxt.ReplConnArray[i]->remoteport == port) { replIdx = i; break; @@ -1045,7 +1135,7 @@ static void wal_get_ha_rebuild_reason_with_dummy(char *buildReason, ServerMode l volatile HaShmemData *hashmdata = t_thrd.postmaster_cxt.HaShmData; int nRet = 0; load_server_mode(); - if (local_role == NORMAL_MODE || local_role == PRIMARY_MODE || IS_DISASTER_RECOVER_MODE) { + if (local_role == NORMAL_MODE || local_role == PRIMARY_MODE || IS_OBS_DISASTER_RECOVER_MODE) { nRet = snprintf_s(buildReason, MAXFNAMELEN, MAXFNAMELEN - 1, "%s", "Normal"); securec_check_ss(nRet, "\0", "\0"); return; @@ -1100,7 +1190,7 @@ static void wal_get_ha_rebuild_reason_with_multi(char *buildReason, ServerMode l } if ((replConnInfo != NULL && (walrcv->conn_target == REPCONNTARGET_PRIMARY || am_cascade_standby() || IS_SHARED_STORAGE_MODE)) || - IS_DISASTER_RECOVER_MODE) { + IS_OBS_DISASTER_RECOVER_MODE) { if (hashmdata->repl_reason[hashmdata->current_repl] == NONE_REBUILD && isRunning) { rcs = snprintf_s(buildReason, MAXFNAMELEN, MAXFNAMELEN - 1, "%s", "Normal"); securec_check_ss(rcs, "\0", "\0"); @@ -1141,12 +1231,15 @@ static int cmp_min_lsn(const void *a, const void *b) void GetMinLsnRecordsFromHadrCascadeStandby(void) { + volatile WalRcvData *walrcv = t_thrd.walreceiverfuncs_cxt.WalRcv; XLogRecPtr standbyReceiveList[g_instance.attr.attr_storage.max_wal_senders]; XLogRecPtr standbyFlushList[g_instance.attr.attr_storage.max_wal_senders]; XLogRecPtr standbyApplyList[g_instance.attr.attr_storage.max_wal_senders]; + uint32 standbyFlagsList[g_instance.attr.attr_storage.max_wal_senders]; + uint32 standbyFlags = 0; int i; - XLogRecPtr applyLoc; - XLogRecPtr ReplayReadPtr; + XLogRecPtr applyLoc = InvalidXLogRecPtr; + XLogRecPtr ReplayReadPtr = InvalidXLogRecPtr; bool needReport = false; errno_t rc = EOK; @@ -1156,6 +1249,8 @@ void GetMinLsnRecordsFromHadrCascadeStandby(void) securec_check(rc, "\0", "\0"); rc = memset_s(standbyApplyList, sizeof(standbyApplyList), 0, sizeof(standbyApplyList)); securec_check(rc, "\0", "\0"); + rc = memset_s(standbyFlagsList, sizeof(standbyFlagsList), 0, sizeof(standbyFlagsList)); + securec_check(rc, "\0", "\0"); for (i = 0; i < g_instance.attr.attr_storage.max_wal_senders; i++) { volatile WalSnd *walsnd = &t_thrd.walsender_cxt.WalSndCtl->walsnds[i]; @@ -1165,6 +1260,7 @@ void GetMinLsnRecordsFromHadrCascadeStandby(void) standbyApplyList[i] = walsnd->apply; standbyReceiveList[i] = walsnd->receive; standbyFlushList[i] = walsnd->flush; + standbyFlagsList[i] = walsnd->replyFlags; } SpinLockRelease(&walsnd->mutex); } @@ -1209,78 +1305,96 @@ void GetMinLsnRecordsFromHadrCascadeStandby(void) ereport(DEBUG1, (errmsg( "In disaster cluster, all cascade standbys are abnormal, using local apply location."))); } + /* all cascade standby is not pause by targetBarrier, we set flag 0 */ + standbyFlags |= standbyFlagsList[i] & IS_PAUSE_BY_TARGET_BARRIER; } + SpinLockAcquire(&walrcv->mutex); + t_thrd.walreceiver_cxt.reply_message->replyFlags |= standbyFlags; + SpinLockRelease(&walrcv->mutex); + if (t_thrd.walreceiver_cxt.reply_message->apply > t_thrd.walreceiver_cxt.reply_message->flush) { + ereport(LOG, (errmsg( + "In disaster cluster, the reply message of quorum flush location is less than replay location," + "flush is %X/%X, replay is %X/%X.", (uint32)(t_thrd.walreceiver_cxt.reply_message->flush >> 32), + (uint32)t_thrd.walreceiver_cxt.reply_message->flush, + (uint32)(t_thrd.walreceiver_cxt.reply_message->apply >> 32), + (uint32)t_thrd.walreceiver_cxt.reply_message->apply))); + } +} + +static void GetHadrUserInfo(char *hadr_user_info) +{ + char conninfo[MAXPGPATH] = {0}; + char query[MAXPGPATH] = {0}; + char conn_error_msg[MAXPGPATH] = {0}; + PGconn* pgconn = NULL; + PGresult* res = NULL; + char* value = NULL; + errno_t rc; + + rc = snprintf_s(query, + sizeof(query), + sizeof(query) - 1, + "SELECT VALUE FROM GS_GLOBAL_CONFIG WHERE NAME = '%s';", + HADRUSERINFO_CONIG_NAME); + securec_check_ss_c(rc, "\0", "\0"); + rc = snprintf_s(conninfo, + sizeof(conninfo), + sizeof(conninfo) - 1, + "dbname=postgres port=%d host=%s " + "connect_timeout=60 application_name='local_hadr_walrcv' " + "options='-c xc_maintenance_mode=on'", + g_instance.attr.attr_network.PostPortNumber, + g_instance.attr.attr_network.tcp_link_addr); + securec_check_ss_c(rc, "\0", "\0"); + + pgconn = PQconnectdb(conninfo); + if (PQstatus(pgconn) != CONNECTION_OK) { + rc = snprintf_s(conn_error_msg, MAXPGPATH, MAXPGPATH - 1, + "%s", PQerrorMessage(pgconn)); + securec_check_ss(rc, "\0", "\0"); + + PQfinish(pgconn); + ereport(ERROR, (errmsg("hadr walreceiver connect to local database fail: %s", conn_error_msg))); + return; + } + + res = PQexec(pgconn, query); + if (res == NULL || PQresultStatus(res) != PGRES_TUPLES_OK || PQntuples(res) == 0) { + rc = snprintf_s(conn_error_msg, MAXPGPATH, MAXPGPATH - 1, + "%s", PQresultErrorMessage(res)); + securec_check_ss(rc, "\0", "\0"); + + PQclear(res); + PQfinish(pgconn); + ereport(ERROR, (errmsg("hadr walreceiver could not obtain hadr_user_info: %s", conn_error_msg))); + return; + } + + value = PQgetvalue(res, 0, 0); + rc = strcpy_s(hadr_user_info, MAXPGPATH, value); + securec_check_ss(rc, "\0", "\0"); + + PQclear(res); + PQfinish(pgconn); } void GetPasswordForHadrStreamingReplication(char user[], char password[]) { - char superUserKeyCipherFile[MAXPGPATH] = {0}; - char superUserKeyRandFile[MAXPGPATH] = {0}; - char fileIndex[MAXPGPATH] = {0}; - char* username = NULL; - struct stat cipherFileStat; - struct stat randFileStat; - int num = 0; - int rcs = 0; + char hadr_user_info[MAXPGPATH] = {0}; + char plain_hadr_user_info[MAXPGPATH] = {0}; + errno_t rc = EOK; - if (u_sess->attr.attr_storage.hadr_super_user_record_path == NULL || - u_sess->attr.attr_storage.hadr_super_user_record_path[0] == '\0') { - ereport(ERROR, (errmsg( - "In disaster cluster, main standby could not get user record path, so there is not invalid user."))); - return; + GetHadrUserInfo(hadr_user_info); + if (!decryptECString(hadr_user_info, plain_hadr_user_info, MAXPGPATH, HADR_MODE)) { + rc = memset_s(plain_hadr_user_info, sizeof(plain_hadr_user_info), 0, sizeof(plain_hadr_user_info)); + securec_check(rc, "\0", "\0"); + ereport(ERROR, (errmsg("In disaster cluster, decrypt hadr_user_info fail."))); } - username = strrchr(u_sess->attr.attr_storage.hadr_super_user_record_path, '/'); - username = username + 1; - rcs = strcat_s(user, MAXPGPATH - 1, username); - securec_check_ss(rcs, "\0", "\0"); - while (true) { - rcs = snprintf_s(superUserKeyCipherFile, MAXPGPATH, MAXPGPATH - 1, "%s/cipher/key_%d/server.key.cipher", - u_sess->attr.attr_storage.hadr_super_user_record_path, num); - securec_check_ss(rcs, "\0", "\0"); - canonicalize_path(superUserKeyCipherFile); - - rcs = snprintf_s(superUserKeyRandFile, MAXPGPATH, MAXPGPATH - 1, "%s/rand/key_%d/server.key.rand", - u_sess->attr.attr_storage.hadr_super_user_record_path, num); - securec_check_ss(rcs, "\0", "\0"); - canonicalize_path(superUserKeyRandFile); - if (lstat(superUserKeyCipherFile, &cipherFileStat) != 0) { - if (errno == ENOENT) { - if (lstat(superUserKeyRandFile, &randFileStat) != 0) { - if (errno == ENOENT) { - break; - } else { - ereport(ERROR, (errmsg( - "In disaster cluster, main standby could not get password without stat rand file."))); - return; - } - } else { - ereport(ERROR, (errmsg( - "In disaster cluster, main standby could not get password without cipher file."))); - return; - } - } else { - ereport(ERROR, (errmsg( - "In disaster cluster, main standby could not get password without stat cipher file."))); - return; - } - } - if (lstat(superUserKeyRandFile, &randFileStat) != 0) { - ereport(ERROR, (errmsg("In disaster cluster, main standby could not lstat rand file."))); - return; - } - rcs = snprintf_s(fileIndex, MAXPGPATH, MAXPGPATH - 1, "key_%d", num); - securec_check_ss(rcs, "\0", "\0"); - GS_UCHAR* plaintext = NULL; - plaintext = (GS_UCHAR*)palloc(RANDOM_LEN + 1); - if (plaintext == NULL) { - ereport(ERROR, (errmsg("In disaster cluster, main standby memory is temporarily unavailable."))); - } - decode_cipher_files(HADR_MODE, u_sess->attr.attr_storage.hadr_super_user_record_path, fileIndex, plaintext); - - rcs = strcat_s(password, MAXPGPATH - 1, (char*)plaintext); - securec_check_ss(rcs, "\0", "\0"); - pfree(plaintext); - plaintext = NULL; - num++; + if (sscanf_s(plain_hadr_user_info, "%[^|]|%s", user, MAXPGPATH, password, MAXPGPATH) != 2) { + rc = memset_s(plain_hadr_user_info, sizeof(plain_hadr_user_info), 0, sizeof(plain_hadr_user_info)); + securec_check(rc, "\0", "\0"); + ereport(ERROR, (errmsg("In disaster cluster, parse plain hadr_user_info fail."))); } + rc = memset_s(plain_hadr_user_info, sizeof(plain_hadr_user_info), 0, sizeof(plain_hadr_user_info)); + securec_check(rc, "\0", "\0"); } diff --git a/src/gausskernel/storage/replication/walsender.cpp b/src/gausskernel/storage/replication/walsender.cpp index d87d4d73e..2232dc1e4 100755 --- a/src/gausskernel/storage/replication/walsender.cpp +++ b/src/gausskernel/storage/replication/walsender.cpp @@ -82,7 +82,10 @@ #include "replication/walsender_private.h" #include "replication/datasender.h" #include "replication/dataqueue.h" +#include "replication/dcf_flowcontrol.h" #include "replication/dcf_replication.h" +#include "replication/parallel_decode.h" +#include "replication/parallel_reorderbuffer.h" #include "storage/buf/bufmgr.h" #include "storage/smgr/fd.h" #include "storage/ipc.h" @@ -106,6 +109,7 @@ #include "alarm/alarm.h" #include "utils/distribute_test.h" #include "gs_bbox.h" +#include "lz4.h" #define CRC_LEN 11 @@ -124,6 +128,8 @@ static int g_appname_extra_len = 3; /* [+]+\0 */ #define AmWalSenderToDummyStandby() (t_thrd.walsender_cxt.MyWalSnd->sendRole == SNDROLE_PRIMARY_DUMMYSTANDBY) #define AmWalSenderOnDummyStandby() (t_thrd.walsender_cxt.MyWalSnd->sendRole == SNDROLE_DUMMYSTANDBY_STANDBY) +#define AmWalSenderToStandby() (t_thrd.walsender_cxt.MyWalSnd->sendRole == SNDROLE_PRIMARY_STANDBY) + #define USE_PHYSICAL_XLOG_SEND \ (AM_WAL_HADR_SENDER || !IS_SHARED_STORAGE_MODE || (walsnd->sendRole == SNDROLE_PRIMARY_BUILDSTANDBY)) #define USE_SYNC_REP_FLUSH_PTR (AM_WAL_HADR_SENDER && !IS_SHARED_STORAGE_MODE) @@ -139,17 +145,10 @@ static const int SLEEP_LESS = 400; static const int NODENAMELEN = 1024; static const int SHIFT_SPEED = 3; static const int EAGER_MODE_MULTIPLE = 20; +static const int CALCULATE_INTERVAL_MILLISECOND = 2000; #define NEED_CALCULATE_RTO \ - (IS_PGXC_DATANODE && ((walsnd->log_ctrl.sleep_count % walsnd->log_ctrl.sleep_count_limit) == 0 \ - || forceUpdate)) - -/* - * This is set while we are streaming. When not set, SIGUSR2 signal will be - * handled like SIGTERM. When set, the main loop is responsible for checking - * t_thrd.walsender_cxt.walsender_ready_to_stop and terminating when it's set (after streaming any - * remaining WAL). - */ -static volatile sig_atomic_t replication_active = false; + (((IS_PGXC_DATANODE && t_thrd.postmaster_cxt.HaShmData->current_mode == PRIMARY_MODE) || AM_WAL_HADR_CN_SENDER) \ + && ((walsnd->log_ctrl.sleep_count % walsnd->log_ctrl.sleep_count_limit) == 0 || forceUpdate)) typedef struct { bool replicationStarted; @@ -186,14 +185,18 @@ static void ProcessStandbyReplyMessage(void); static void ProcessStandbyHSFeedbackMessage(void); static void ProcessStandbySwitchRequestMessage(void); static void ProcessRepliesIfAny(void); +static void ProcessLogCtrl(StandbyReplyMessage reply); static bool LogicalSlotSleepFlag(void); static void LogCtrlDoActualSleep(volatile WalSnd *walsnd, bool forceUpdate); static void LogCtrlExecuteSleeping(volatile WalSnd *walsnd, bool forceUpdate, bool logicalSlotSleepFlag); static void LogCtrlCountSleepLimit(void); static void LogCtrlSleep(void); -static void LogCtrlCalculateCurrentRTO(StandbyReplyMessage *reply); +static void LogCtrlCalculateCurrentRTO(StandbyReplyMessage *reply, bool *needRefresh); static void LogCtrlCalculateCurrentRPO(StandbyReplyMessage *reply); -static void LogCtrlCalculateSleepTime(void); +#ifdef ENABLE_MULTIPLE_NODES +static void LogCtrlCalculateHadrCurrentRPO(void); +#endif +static void LogCtrlCalculateSleepTime(int64 logCtrlSleepTime, int64 balanceSleepTime, const bool isHadrRPO); static void WalSndKeepalive(bool requestReply); static void WalSndRmXLog(bool requestReply); static void WalSndSyncDummyStandbyDone(bool requestReply); @@ -233,6 +236,7 @@ static ArchiveXlogMessage* get_archive_task_from_list(); static void CalCatchupRate(); static void WalSndHadrSwitchoverRequest(); static void ProcessHadrSwitchoverMessage(); +static void ProcessHadrReplyMessage(); char *DataDir = "."; @@ -790,6 +794,10 @@ void IdentifyMode(void) hashmdata->repl_reason[hashmdata->current_repl] == CONNECT_REBUILD)) { current_mode = hashmdata->is_hadr_main_standby? MAIN_STANDBY_MODE : hashmdata->current_mode; } +#ifdef ENABLE_MULTIPLE_NODES + } else if (IS_PGXC_COORDINATOR && hashmdata->current_mode == NORMAL_MODE && RecoveryInProgress()) { + current_mode = RECOVERY_MODE; +#endif } else { current_mode = hashmdata->current_mode; } @@ -1193,8 +1201,8 @@ static void StartReplication(StartReplicationCmd *cmd) * which has to do a plain sleep/busy loop, because the walsender's latch gets * set everytime WAL is flushed. */ -static int logical_read_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, - char *cur_page, TimeLineID *pageTLI) +int logical_read_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, + char *cur_page, TimeLineID *pageTLI, char* xlog_path) { XLogRecPtr flushptr; int count; @@ -1493,8 +1501,6 @@ static void StartLogicalReplication(StartReplicationCmd *cmd) SpinLockRelease(&walsnd->mutex); } - replication_active = true; - SyncRepInitConfig(); /* Main loop of walsender */ @@ -1503,7 +1509,6 @@ static void StartLogicalReplication(StartReplicationCmd *cmd) FreeDecodingContext(t_thrd.walsender_cxt.logical_decoding_ctx); ReplicationSlotRelease(); - replication_active = false; if (t_thrd.walsender_cxt.walsender_ready_to_stop) proc_exit(0); WalSndSetState(WALSNDSTATE_STARTUP); @@ -1512,6 +1517,53 @@ static void StartLogicalReplication(StartReplicationCmd *cmd) EndCommand("COPY 0", DestRemote); } +/* + * Load previously initiated logical slot and prepare for sending data in parallel decoding (via WalSndLoop). + */ +static void StartParallelLogicalReplication(StartReplicationCmd *cmd) +{ + StringInfoData buf; + int slotId = t_thrd.walsender_cxt.LogicalSlot; + if (strlen(cmd->slotname) >= NAMEDATALEN) { + ereport(ERROR, (errmsg("slotname should be shorter than %d! slotname is %s", NAMEDATALEN, cmd->slotname))); + } + errno_t rc = memcpy_s(t_thrd.walsender_cxt.slotname, NAMEDATALEN, cmd->slotname, strlen(cmd->slotname)); + securec_check(rc, "\0", "\0"); + + /* Send a CopyBothResponse message, and start streaming */ + pq_beginmessage(&buf, 'W'); + pq_sendbyte(&buf, 0); + pq_sendint(&buf, 0, 2); + pq_endmessage(&buf); + pq_flush(); + t_thrd.walsender_cxt.parallel_logical_decoding_ctx = ParallelCreateDecodingContext(cmd->startpoint, cmd->options, + false, logical_read_xlog_page, slotId); + ParallelDecodingData *data = (ParallelDecodingData *)palloc0(sizeof(ParallelDecodingData)); + data->context = (MemoryContext)AllocSetContextCreate(t_thrd.walsender_cxt.parallel_logical_decoding_ctx->context, + "restore text conversion context", ALLOCSET_DEFAULT_SIZES); + + rc = memcpy_s(&data->pOptions, sizeof(ParallelDecodeOption), &g_Logicaldispatcher[slotId].pOptions, + sizeof(ParallelDecodeOption)); + securec_check(rc, "\0", "\0"); + g_Logicaldispatcher[slotId].startpoint = cmd->startpoint; + t_thrd.walsender_cxt.parallel_logical_decoding_ctx->output_plugin_private = data; + + SyncRepInitConfig(); + + /* Main loop of walsender */ + WalSndLoop(XLogSendPararllelLogical); + + FreeDecodingContext(t_thrd.walsender_cxt.logical_decoding_ctx); + + if (t_thrd.walsender_cxt.walsender_ready_to_stop) { + proc_exit(0); + } + WalSndSetState(WALSNDSTATE_STARTUP); + + /* Get out of COPY mode (CommandComplete). */ + EndCommand("COPY 0", DestRemote); +} + /* * Notify the primary to advance logical replication slot. */ @@ -1618,44 +1670,46 @@ static void AdvanceLogicalReplication(AdvanceReplicationCmd *cmd) } /* - * LogicalDecodingContext 'prepare_write' callback. - * * Prepare a write into a StringInfo. * * Don't do anything lasting in here, it's quite possible that nothing will done * with the data. */ -static void WalSndPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write) +void WalSndPrepareWriteHelper(StringInfo out, XLogRecPtr lsn, TransactionId xid, bool last_write) { /* can't have sync rep confused by sending the same LSN several times */ - if (!last_write) + if (!last_write) { lsn = InvalidXLogRecPtr; - - resetStringInfo(ctx->out); - - pq_sendbyte(ctx->out, 'w'); - pq_sendint64(ctx->out, lsn); /* dataStart */ - pq_sendint64(ctx->out, lsn); /* walEnd */ + } + resetStringInfo(out); + pq_sendbyte(out, 'w'); + pq_sendint64(out, lsn); /* dataStart */ + pq_sendint64(out, lsn); /* walEnd */ /* * Fill out the sendtime later, just as it's done in XLogSendPhysical, but * reserve space here. */ - pq_sendint64(ctx->out, 0); /* sendtime */ + pq_sendint64(out, 0); /* sendtime */ +} +/* + * LogicalDecodingContext 'prepare_write' callback. + */ +static void WalSndPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write) +{ + WalSndPrepareWriteHelper(ctx->out, lsn, xid, last_write); } /* - * LogicalDecodingContext 'write' callback. - * * Actually write out data previously prepared by WalSndPrepareWrite out to * the network. Take as long as needed, but process replies from the other * side and check timeouts during that. */ -static void WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write) +void WalSndWriteDataHelper(StringInfo out, XLogRecPtr lsn, TransactionId xid, bool last_write) { errno_t rc; /* output previously gathered data in a CopyData packet */ - pq_putmessage_noblock('d', ctx->out->data, ctx->out->len); + pq_putmessage_noblock('d', out->data, out->len); /* * Fill the send timestamp last, so that it is taken as late as @@ -1664,7 +1718,7 @@ static void WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, Transac */ resetStringInfo(t_thrd.walsender_cxt.tmpbuf); pq_sendint64(t_thrd.walsender_cxt.tmpbuf, GetCurrentTimestamp()); - rc = memcpy_s(&(ctx->out->data[1 + sizeof(int64) + sizeof(int64)]), ctx->out->len, + rc = memcpy_s(&(out->data[1 + sizeof(int64) + sizeof(int64)]), out->len, t_thrd.walsender_cxt.tmpbuf->data, sizeof(int64)); securec_check(rc, "\0", "\0"); @@ -1686,7 +1740,7 @@ static void WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, Transac * necessity for manual cleanup of all postmaster children. */ if (!PostmasterIsAlive()) - exit(1); + proc_exit(1); /* Process any requests or signals received recently */ if (t_thrd.walsender_cxt.got_SIGHUP) { @@ -1727,6 +1781,42 @@ static void WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, Transac SetLatch(&t_thrd.walsender_cxt.MyWalSnd->latch); } +/* + * LogicalDecodingContext 'write' callback. + */ +static void WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write) +{ + WalSndWriteDataHelper(ctx->out, lsn, xid, last_write); +} + +/* + * Walsender process messages. + */ +static void WalSndHandleMessage(XLogRecPtr *RecentFlushPoint) +{ + /* Process any requests or signals received recently */ + if (t_thrd.walsender_cxt.got_SIGHUP) { + t_thrd.walsender_cxt.got_SIGHUP = false; + ProcessConfigFile(PGC_SIGHUP); + SyncRepInitConfig(); + } + + if (!AM_LOGICAL_READ_RECORD) { + /* Check for input from the client */ + ProcessRepliesIfAny(); + + /* Clear any already-pending wakeups */ + ResetLatch(&t_thrd.walsender_cxt.MyWalSnd->latch); + } + + /* Update our idea of the currently flushed position. */ + if (!RecoveryInProgress()) { + *RecentFlushPoint = GetFlushRecPtr(); + } else { + *RecentFlushPoint = GetXLogReplayRecPtr(NULL); + } +} + /* * Wait till WAL < loc is flushed to disk so it can be safely read. */ @@ -1760,24 +1850,7 @@ static XLogRecPtr WalSndWaitForWal(XLogRecPtr loc) if (!PostmasterIsAlive()) exit(1); - /* Process any requests or signals received recently */ - if (t_thrd.walsender_cxt.got_SIGHUP) { - t_thrd.walsender_cxt.got_SIGHUP = false; - ProcessConfigFile(PGC_SIGHUP); - SyncRepInitConfig(); - } - - /* Check for input from the client */ - ProcessRepliesIfAny(); - - /* Clear any already-pending wakeups */ - ResetLatch(&t_thrd.walsender_cxt.MyWalSnd->latch); - - /* Update our idea of the currently flushed position. */ - if (!RecoveryInProgress()) - RecentFlushPtr = GetFlushRecPtr(); - else - RecentFlushPtr = GetXLogReplayRecPtr(NULL); + WalSndHandleMessage(&RecentFlushPtr); /* * If postmaster asked us to stop, don't wait here anymore. This will @@ -1797,10 +1870,12 @@ static XLogRecPtr WalSndWaitForWal(XLogRecPtr loc) * possibly are waiting for a later location. So we send pings * containing the flush location every now and then. */ - if (XLByteLT(t_thrd.walsender_cxt.MyWalSnd->flush, t_thrd.walsender_cxt.sentPtr) && - !t_thrd.walsender_cxt.waiting_for_ping_response) { - WalSndKeepalive(false); - t_thrd.walsender_cxt.waiting_for_ping_response = true; + if (!AM_LOGICAL_READ_RECORD) { + if (XLByteLT(t_thrd.walsender_cxt.MyWalSnd->flush, t_thrd.walsender_cxt.sentPtr) && + !t_thrd.walsender_cxt.waiting_for_ping_response) { + WalSndKeepalive(false); + t_thrd.walsender_cxt.waiting_for_ping_response = true; + } } /* check whether we're done */ @@ -1816,7 +1891,7 @@ static XLogRecPtr WalSndWaitForWal(XLogRecPtr loc) * to flush. Otherwise we might just sit on output data while waiting * for new WAL being generated. */ - if (pq_flush_if_writable() != 0) + if (pq_flush_if_writable() != 0 || t_thrd.logicalreadworker_cxt.shutdown_requested) WalSndShutdown(); now = GetCurrentTimestamp(); @@ -1831,13 +1906,17 @@ static XLogRecPtr WalSndWaitForWal(XLogRecPtr loc) /* Sleep until something happens or we time out */ t_thrd.int_cxt.ImmediateInterruptOK = true; CHECK_FOR_INTERRUPTS(); - WaitLatchOrSocket(&t_thrd.walsender_cxt.MyWalSnd->latch, wakeEvents, u_sess->proc_cxt.MyProcPort->sock, - sleeptime); + if (!AM_LOGICAL_READ_RECORD) { + WaitLatchOrSocket(&t_thrd.walsender_cxt.MyWalSnd->latch, wakeEvents, u_sess->proc_cxt.MyProcPort->sock, + sleeptime); + } t_thrd.int_cxt.ImmediateInterruptOK = false; } /* reactivate latch so WalSndLoop knows to continue */ - SetLatch(&t_thrd.walsender_cxt.MyWalSnd->latch); + if (!AM_LOGICAL_READ_RECORD) { + SetLatch(&t_thrd.walsender_cxt.MyWalSnd->latch); + } return RecentFlushPtr; } @@ -1941,6 +2020,18 @@ static bool cmdStringLengthCheck(const char* cmd_string) return true; } +bool isLogicalSlotExist(char* slotName) +{ + for (int slotno = 0; slotno < g_instance.attr.attr_storage.max_replication_slots; slotno++) { + ReplicationSlot *slot = &t_thrd.slot_cxt.ReplicationSlotCtl->replication_slots[slotno]; + if (slot->data.database != InvalidOid && + strcmp(slotName, slot->data.name.data) == 0) { + return true; + } + } + return false; +} + static void IdentifyCommand(Node* cmd_node, ReplicationCxt* repCxt, const char *cmd_string){ switch (cmd_node->type) { case T_IdentifySystemCmd: @@ -2000,15 +2091,24 @@ static void IdentifyCommand(Node* cmd_node, ReplicationCxt* repCxt, const char * case T_StartReplicationCmd: { StartReplicationCmd *cmd = (StartReplicationCmd *)cmd_node; + int parallelDecodeNum = ParseParallelDecodeNumOnly(cmd->options); if (cmd->kind == REPLICATION_KIND_PHYSICAL) { StartReplication(cmd); /* break out of the loop */ repCxt->replicationStarted = true; + } else if (t_thrd.proc->workingVersionNum >= PARALLEL_DECODE_VERSION_NUM && parallelDecodeNum > 1) { + /* if the slot does not exist,the walsender exit */ + if (!isLogicalSlotExist(cmd->slotname)) { + ereport(ERROR, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOG), + errmsg("Invalid logical replication slot %s.", cmd->slotname), errdetail("N/A"), + errcause("Replication slot does not exist."), + erraction("Create the replication slot first."))); + } + t_thrd.walsender_cxt.LogicalSlot = StartLogicalLogWorkers(u_sess->proc_cxt.MyProcPort->user_name, + u_sess->proc_cxt.MyProcPort->database_name, cmd->slotname, cmd->options, parallelDecodeNum); + StartParallelLogicalReplication(cmd); } else { MarkPostmasterChildNormal(); -#ifdef ENABLE_MULTIPLE_NODES - CheckPMstateAndRecoveryInProgress(); -#endif StartLogicalReplication(cmd); } break; @@ -2200,6 +2300,10 @@ static void ProcessStandbyMessage(void) ProcessHadrSwitchoverMessage(); break; + case 'R': + ProcessHadrReplyMessage(); + break; + default: ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("unexpected message type \"%d\"", msgtype))); @@ -2330,8 +2434,8 @@ static void LogCtrlDoActualSleep(volatile WalSnd *walsnd, bool forceUpdate) { bool logical_slot_sleep_flag = LogicalSlotSleepFlag(); /* try to control log sent rate so that standby can flush and apply log under RTO seconds */ - if (walsnd->state > WALSNDSTATE_BACKUP && IS_PGXC_DATANODE) { - if (AM_WAL_HADR_SENDER) { + if (walsnd->state > WALSNDSTATE_BACKUP && (IS_PGXC_DATANODE || AM_WAL_HADR_CN_SENDER)) { + if (AM_WAL_HADR_DNCN_SENDER) { if (u_sess->attr.attr_storage.hadr_recovery_time_target > 0 || u_sess->attr.attr_storage.hadr_recovery_point_target > 0) { LogCtrlExecuteSleeping(walsnd, forceUpdate, logical_slot_sleep_flag); @@ -2341,7 +2445,7 @@ static void LogCtrlDoActualSleep(volatile WalSnd *walsnd, bool forceUpdate) } } } else { - if (u_sess->attr.attr_storage.target_rto > 0 || u_sess->attr.attr_storage.time_to_target_rpo > 0) { + if (u_sess->attr.attr_storage.target_rto > 0) { LogCtrlExecuteSleeping(walsnd, forceUpdate, logical_slot_sleep_flag); } else { if (logical_slot_sleep_flag) { @@ -2356,7 +2460,14 @@ static void LogCtrlDoActualSleep(volatile WalSnd *walsnd, bool forceUpdate) static void LogCtrlExecuteSleeping(volatile WalSnd *walsnd, bool forceUpdate, bool logicalSlotSleepFlag) { if (walsnd->log_ctrl.sleep_count % walsnd->log_ctrl.sleep_count_limit == 0 || forceUpdate) { - LogCtrlCalculateSleepTime(); + if (AM_WAL_HADR_DNCN_SENDER && (u_sess->attr.attr_storage.hadr_recovery_point_target > 0)) { + LogCtrlCalculateSleepTime(g_instance.streaming_dr_cxt.rpoSleepTime, + g_instance.streaming_dr_cxt.rpoBalanceSleepTime, true); + } + if ((!AM_WAL_HADR_DNCN_SENDER && u_sess->attr.attr_storage.target_rto > 0) || + (AM_WAL_HADR_DNCN_SENDER && u_sess->attr.attr_storage.hadr_recovery_time_target > 0)) { + LogCtrlCalculateSleepTime(walsnd->log_ctrl.sleep_time, walsnd->log_ctrl.balance_sleep_time, false); + } LogCtrlCountSleepLimit(); } pgstat_report_waitevent(WAIT_EVENT_LOGCTRL_SLEEP); @@ -2382,7 +2493,27 @@ static void LogCtrlNeedForceUpdate(bool *forceUpdate, const StandbyReplyMessage } } -static void LogCtrlProcessTargetRTOChanged() +static bool IsRtoRpoOverTarget() +{ + /* use volatile pointer to prevent code rearrangement */ + volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; + if (AM_WAL_HADR_DNCN_SENDER) { + if ((u_sess->attr.attr_storage.hadr_recovery_time_target != 0 && + walsnd->log_ctrl.current_RTO > u_sess->attr.attr_storage.hadr_recovery_time_target) || + (u_sess->attr.attr_storage.hadr_recovery_point_target != 0 && + walsnd->log_ctrl.current_RPO > u_sess->attr.attr_storage.hadr_recovery_point_target)) { + return true; + } + } else { + if ((u_sess->attr.attr_storage.target_rto != 0 && + walsnd->log_ctrl.current_RTO > u_sess->attr.attr_storage.target_rto)) { + return true; + } + } + return false; +} + +static void ProcessTargetRtoRpoChanged() { /* use volatile pointer to prevent code rearrangement */ volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; @@ -2390,14 +2521,18 @@ static void LogCtrlProcessTargetRTOChanged() int rc = strncpy_s(standby_name, NODENAMELEN, u_sess->attr.attr_common.application_name, strlen(u_sess->attr.attr_common.application_name)); securec_check(rc, "\0", "\0"); - if (u_sess->attr.attr_storage.target_rto == 0 && u_sess->attr.attr_storage.hadr_recovery_time_target == 0) { + if ((!AM_WAL_HADR_DNCN_SENDER && u_sess->attr.attr_storage.target_rto == 0) || + (AM_WAL_HADR_DNCN_SENDER && u_sess->attr.attr_storage.hadr_recovery_time_target == 0)) { g_instance.rto_cxt.rto_standby_data[walsnd->index].current_sleep_time = 0; walsnd->log_ctrl.sleep_time = 0; } else { g_instance.rto_cxt.rto_standby_data[walsnd->index].current_sleep_time = walsnd->log_ctrl.sleep_time; } + if (AM_WAL_HADR_DNCN_SENDER && u_sess->attr.attr_storage.hadr_recovery_point_target == 0) { + g_instance.streaming_dr_cxt.rpoSleepTime = 0; + } - if (!AM_WAL_HADR_SENDER && + if (!AM_WAL_HADR_DNCN_SENDER && g_instance.rto_cxt.rto_standby_data[walsnd->index].target_rto != u_sess->attr.attr_storage.target_rto) { ereport(LOG, (errmodule(MOD_RTO_RPO), errmsg("target_rto changes to %d, previous target_rto is %d, current the sleep time is %ld", @@ -2406,7 +2541,7 @@ static void LogCtrlProcessTargetRTOChanged() g_instance.rto_cxt.rto_standby_data[walsnd->index].current_sleep_time))); g_instance.rto_cxt.rto_standby_data[walsnd->index].target_rto = u_sess->attr.attr_storage.target_rto; - } else if (AM_WAL_HADR_SENDER && g_instance.rto_cxt.rto_standby_data[walsnd->index].target_rto != + } else if (AM_WAL_HADR_DNCN_SENDER && g_instance.rto_cxt.rto_standby_data[walsnd->index].target_rto != u_sess->attr.attr_storage.hadr_recovery_time_target) { ereport(LOG, (errmodule(MOD_RTO_RPO), errmsg("hadr_target_rto changes to %d, previous target_rto is %d, " @@ -2418,32 +2553,16 @@ static void LogCtrlProcessTargetRTOChanged() g_instance.rto_cxt.rto_standby_data[walsnd->index].target_rto = u_sess->attr.attr_storage.hadr_recovery_time_target; } - /* Extreme RTO and flow control cannot be enabled at the same time. */ - if (!AM_WAL_HADR_SENDER && - u_sess->attr.attr_storage.target_rto > 0 && g_instance.attr.attr_storage.recovery_parse_workers > 1) { - ereport(WARNING, (errmodule(MOD_RTO_RPO), - errmsg("Extreme RTO and flow control cannot be enabled at the same time! " - "We have reset the flow control to the off state."), - errhint("You need to change the recovery_time_target parameter " - "to 0 in the postgresql.conf file."))); - u_sess->attr.attr_storage.target_rto = 0; - g_instance.rto_cxt.rto_standby_data[walsnd->index].target_rto = 0; - } else if (AM_WAL_HADR_SENDER && - u_sess->attr.attr_storage.hadr_recovery_time_target > 0 && - g_instance.attr.attr_storage.recovery_parse_workers > 1) { - ereport(WARNING, (errmodule(MOD_RTO_RPO), - errmsg("Extreme RTO and flow control cannot be enabled at the same time! " - "We have reset the flow control to the off state."), - errhint("You need to change the hadr_recovery_time_target parameter " - "to 0 in the postgresql.conf file."))); - - u_sess->attr.attr_storage.hadr_recovery_time_target = 0; - g_instance.rto_cxt.rto_standby_data[walsnd->index].target_rto = 0; - } } static void AdvanceReplicationSlot(XLogRecPtr flush) { + if (t_thrd.walsender_cxt.LogicalSlot != -1) { + int slotId = t_thrd.walsender_cxt.LogicalSlot; + SpinLockAcquire(&(g_Logicaldispatcher[slotId].readWorker->rwlock)); + g_Logicaldispatcher[slotId].readWorker->flushLSN = flush; + SpinLockRelease(&(g_Logicaldispatcher[slotId].readWorker->rwlock)); + } if (t_thrd.slot_cxt.MyReplicationSlot && (!XLByteEQ(flush, InvalidXLogRecPtr))) { if (t_thrd.slot_cxt.MyReplicationSlot->data.database != InvalidOid) { LogicalConfirmReceivedLocation(flush); @@ -2457,6 +2576,39 @@ static void AdvanceReplicationSlot(XLogRecPtr flush) } } +static void ProcessLogCtrl(StandbyReplyMessage reply) +{ + /* use volatile pointer to prevent code rearrangement */ + volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; + bool forceUpdate = false; + LogCtrlNeedForceUpdate(&forceUpdate, &reply); + if (NEED_CALCULATE_RTO) { + bool needRefresh = true; + LogCtrlCalculateCurrentRTO(&reply, &needRefresh); +#ifndef ENABLE_MULTIPLE_NODES + if ((AM_WAL_HADR_DNCN_SENDER || AM_WAL_SHARE_STORE_SENDER) && needRefresh) { + LogCtrlCalculateCurrentRPO(&reply); + } +#else + if (AM_WAL_SHARE_STORE_SENDER && needRefresh) { + LogCtrlCalculateCurrentRPO(&reply); + } else if (AM_WAL_HADR_DNCN_SENDER && needRefresh) { + LogCtrlCalculateHadrCurrentRPO(); + } +#endif + if (needRefresh) { + walsnd->log_ctrl.prev_reply_time = reply.sendTime; + walsnd->log_ctrl.prev_flush = reply.flush; + walsnd->log_ctrl.prev_apply = reply.apply; + } + } + if (!IS_SHARED_STORAGE_MODE) { + LogCtrlDoActualSleep(walsnd, forceUpdate); + } else { + walsnd->log_ctrl.sleep_count++; + } +} + /* * Regular reply from standby advising of WAL positions on standby server. */ @@ -2475,6 +2627,12 @@ static void ProcessStandbyReplyMessage(void) if (reply.replyRequested) { WalSndKeepalive(false); } + + if ((reply.replyFlags & IS_CANCEL_LOG_CTRL) != 0) { + t_thrd.walsender_cxt.cancelLogCtl = true; + } else { + t_thrd.walsender_cxt.cancelLogCtl = false; + } /* use volatile pointer to prevent code rearrangement */ volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; @@ -2490,30 +2648,20 @@ static void ProcessStandbyReplyMessage(void) walsnd->apply = reply.apply; walsnd->peer_role = reply.peer_role; walsnd->peer_state = reply.peer_state; + walsnd->replyFlags = reply.replyFlags; SpinLockRelease(&walsnd->mutex); } /* * Only sleep when local role is not WAL_DB_SENDER. */ - if (!AM_WAL_DB_SENDER && (AM_WAL_HADR_SENDER || !IS_SHARED_STORAGE_MODE) && + if (!t_thrd.walsender_cxt.cancelLogCtl && !AM_WAL_DB_SENDER && walsnd->sendRole != SNDROLE_PRIMARY_BUILDSTANDBY) { - bool forceUpdate = false; - LogCtrlNeedForceUpdate(&forceUpdate, &reply); - if (NEED_CALCULATE_RTO) { - LogCtrlCalculateCurrentRTO(&reply); - if (AM_WAL_HADR_SENDER && (u_sess->attr.attr_storage.hadr_recovery_point_target > 0)) { - LogCtrlCalculateCurrentRPO(&reply); - } - walsnd->log_ctrl.prev_reply_time = reply.sendTime; - walsnd->log_ctrl.prev_flush = reply.flush; - walsnd->log_ctrl.prev_apply = reply.apply; - } - LogCtrlDoActualSleep(walsnd, forceUpdate); + ProcessLogCtrl(reply); } - if (IS_PGXC_DATANODE) { - LogCtrlProcessTargetRTOChanged(); + if (IS_PGXC_DATANODE || AM_WAL_HADR_CN_SENDER) { + ProcessTargetRtoRpoChanged(); } if (!AM_WAL_STANDBY_SENDER) { @@ -2735,18 +2883,24 @@ static void ProcessArchiveFeedbackMessage(void) static void LogCtrlCountSleepLimit(void) { volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; - int64 sleep_count_limit_count; + int64 countLimit1; + int64 countLimit2; + if (walsnd->log_ctrl.sleep_time == 0) { - sleep_count_limit_count = MAX_CONTROL_REPLY; + countLimit1 = MAX_CONTROL_REPLY; } else { - sleep_count_limit_count = INIT_CONTROL_REPLY * MICROSECONDS_PER_SECONDS / walsnd->log_ctrl.sleep_time; - sleep_count_limit_count = (sleep_count_limit_count > MAX_CONTROL_REPLY) ? MAX_CONTROL_REPLY - : sleep_count_limit_count; + countLimit1 = INIT_CONTROL_REPLY * MICROSECONDS_PER_SECONDS / walsnd->log_ctrl.sleep_time; + countLimit1 = (countLimit1 > MAX_CONTROL_REPLY) ? MAX_CONTROL_REPLY : countLimit1; + countLimit1 = (countLimit1 <= 0) ? INIT_CONTROL_REPLY : countLimit1; } - if (sleep_count_limit_count <= 0) { - sleep_count_limit_count = INIT_CONTROL_REPLY; + if (g_instance.streaming_dr_cxt.rpoSleepTime == 0) { + countLimit2 = MAX_CONTROL_REPLY; + } else { + countLimit2 = INIT_CONTROL_REPLY * MICROSECONDS_PER_SECONDS / g_instance.streaming_dr_cxt.rpoSleepTime; + countLimit2 = (countLimit2 > MAX_CONTROL_REPLY) ? MAX_CONTROL_REPLY : countLimit2; + countLimit2 = (countLimit2 <= 0) ? INIT_CONTROL_REPLY : countLimit2; } - walsnd->log_ctrl.sleep_count_limit = sleep_count_limit_count; + walsnd->log_ctrl.sleep_count_limit = (countLimit1 <= countLimit2) ? countLimit1 : countLimit2; } /* @@ -2773,67 +2927,127 @@ static inline uint64 LogCtrlCountBigSpeed(uint64 originSpeed, uint64 curSpeed) return updateSpeed; } -/* - * Estimate the time standby need to flush and apply log. - */ -static void LogCtrlCalculateCurrentRTO(StandbyReplyMessage *reply) +static bool ReplyMessageCheck(StandbyReplyMessage *reply, bool *needRefresh) { volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; - long sec_to_time; - int microsec_to_time; + bool checkResult = true; if (XLByteLT(reply->receive, reply->flush) || XLByteLT(reply->flush, reply->apply) || XLByteLT(reply->flush, walsnd->log_ctrl.prev_flush) || XLByteLT(reply->apply, walsnd->log_ctrl.prev_apply)) { - return; + checkResult = false; } if (XLByteEQ(reply->receive, reply->apply)) { walsnd->log_ctrl.prev_RTO = walsnd->log_ctrl.current_RTO; walsnd->log_ctrl.current_RTO = 0; - return; + checkResult = false; + } + if (AM_WAL_HADR_DNCN_SENDER) { + if (!STANDBY_IN_BARRIER_PAUSE && reply->apply == walsnd->log_ctrl.prev_apply) { + *needRefresh = false; + checkResult = false; + } + ereport(DEBUG4, (errmodule(MOD_RTO_RPO), + errmsg("reply->replyFlags is %d, reply->reveive is %lu, reply->flush is %lu, reply->apply is %lu", + reply->replyFlags, reply->receive, reply->flush, reply->apply))); } - uint64 part1 = reply->receive - reply->flush; - uint64 part2 = reply->flush - reply->apply; - uint64 part1_diff = reply->flush - walsnd->log_ctrl.prev_flush; - uint64 part2_diff = reply->apply - walsnd->log_ctrl.prev_apply; if (walsnd->log_ctrl.prev_reply_time == 0) { + checkResult = false; + } + if (walsnd->log_ctrl.prev_calculate_time == 0 || (!checkResult && *needRefresh)) { + walsnd->log_ctrl.prev_calculate_time = reply->sendTime; + walsnd->log_ctrl.period_total_flush = 0; + walsnd->log_ctrl.period_total_apply = 0; + checkResult = false; + } + g_instance.rto_cxt.rto_standby_data[walsnd->index].current_rto = walsnd->log_ctrl.current_RTO; + return checkResult; +} + +/* + * Estimate the time standby need to flush and apply log. + */ +static void LogCtrlCalculateCurrentRTO(StandbyReplyMessage *reply, bool *needRefresh) +{ + volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; + long sec_to_time; + int microsec_to_time; + + if (!ReplyMessageCheck(reply, needRefresh)) { return; } TimestampDifference(walsnd->log_ctrl.prev_reply_time, reply->sendTime, &sec_to_time, µsec_to_time); - long millisec_time_diff = sec_to_time * MILLISECONDS_PER_SECONDS + microsec_to_time / MILLISECONDS_PER_MICROSECONDS; - if (millisec_time_diff <= 10) { + long millisec_time_diff = sec_to_time * MILLISECONDS_PER_SECONDS + + microsec_to_time / MILLISECONDS_PER_MICROSECONDS; + if (millisec_time_diff <= 100) { + *needRefresh = false; + return; + } + + uint64 needFlush = reply->receive - reply->flush; + uint64 needApply = reply->flush - reply->apply; + uint64 newFlush = reply->flush - walsnd->log_ctrl.prev_flush; + uint64 newApply = reply->apply - walsnd->log_ctrl.prev_apply; + uint64 periodTotalFlush = walsnd->log_ctrl.period_total_flush + newFlush; + uint64 periodTotalApply = walsnd->log_ctrl.period_total_apply + newApply; + + /* To reduce the speed fluctuation caused by frequent calculation, we calculate the speed every 2s(time_range). */ + TimestampDifference(walsnd->log_ctrl.prev_calculate_time, reply->sendTime, &sec_to_time, µsec_to_time); + long calculate_time_diff = sec_to_time * MILLISECONDS_PER_SECONDS + + microsec_to_time / MILLISECONDS_PER_MICROSECONDS; + if (calculate_time_diff <= 0) { return; } /* - * consumeRatePart1 and consumeRatePart2 is based on 7/8 previous_speed(walsnd->log_ctrl.pre_rate1) and 1/8 - * speed_now(part1_diff / millisec_time_diff). To be more precise and keep more decimal point, we expand speed_now - * by multiply first then divide, which is (8 * previous_speed * 7/8 + speed_now) / 8. + * consumeRatePart1 and consumeRatePart2 is based on 7/8 previous_speed(walsnd->log_ctrl.flush_rate or apply_rate) + * and 1/8 speed_now(newFlush or newApply / millisec_time_diff). To be more precise and keep more decimal point, + * we expand speed_now by multiply first then divide, which is (8 * previous_speed * 7/8 + speed_now) / 8. */ - if (walsnd->log_ctrl.pre_rate1 != 0) { - walsnd->log_ctrl.pre_rate1 = LogCtrlCountBigSpeed(walsnd->log_ctrl.pre_rate1, - (uint64)(part1_diff / millisec_time_diff)); + if ((walsnd->log_ctrl.flush_rate >> SHIFT_SPEED) > 1) { + if (calculate_time_diff > CALCULATE_INTERVAL_MILLISECOND || IsRtoRpoOverTarget()) { + walsnd->log_ctrl.flush_rate = LogCtrlCountBigSpeed(walsnd->log_ctrl.flush_rate, + (uint64)(periodTotalFlush / calculate_time_diff)); + walsnd->log_ctrl.prev_calculate_time = reply->sendTime; + } } else { - walsnd->log_ctrl.pre_rate1 = ((part1_diff / (uint64)millisec_time_diff) << SHIFT_SPEED); + walsnd->log_ctrl.flush_rate = ((newFlush / (uint64)millisec_time_diff) << SHIFT_SPEED); } - if (walsnd->log_ctrl.pre_rate2 != 0) { - walsnd->log_ctrl.pre_rate2 = LogCtrlCountBigSpeed(walsnd->log_ctrl.pre_rate2, - (uint64)(part2_diff / millisec_time_diff)); + if ((walsnd->log_ctrl.apply_rate >> SHIFT_SPEED) > 1) { + if (calculate_time_diff > CALCULATE_INTERVAL_MILLISECOND || IsRtoRpoOverTarget()) { + walsnd->log_ctrl.apply_rate = LogCtrlCountBigSpeed(walsnd->log_ctrl.apply_rate, + (uint64)(periodTotalApply / calculate_time_diff)); + walsnd->log_ctrl.prev_calculate_time = reply->sendTime; + } } else { - walsnd->log_ctrl.pre_rate2 = ((uint64)(part2_diff / millisec_time_diff) << SHIFT_SPEED); + walsnd->log_ctrl.apply_rate = ((uint64)(newApply / millisec_time_diff) << SHIFT_SPEED); } - uint64 consumeRatePart1 = (walsnd->log_ctrl.pre_rate1 >> SHIFT_SPEED); - uint64 consumeRatePart2 = (walsnd->log_ctrl.pre_rate2 >> SHIFT_SPEED); - if (consumeRatePart1 == 0) { - consumeRatePart1 = 1; + if (walsnd->log_ctrl.prev_calculate_time == reply->sendTime) { + walsnd->log_ctrl.period_total_flush = 0; + walsnd->log_ctrl.period_total_apply = 0; + } else { + walsnd->log_ctrl.period_total_flush = periodTotalFlush; + walsnd->log_ctrl.period_total_apply = periodTotalApply; + } + if (AM_WAL_HADR_DNCN_SENDER && STANDBY_IN_BARRIER_PAUSE) { + walsnd->log_ctrl.prev_RTO = walsnd->log_ctrl.current_RTO; + walsnd->log_ctrl.current_RTO = 0; + g_instance.rto_cxt.rto_standby_data[walsnd->index].current_rto = walsnd->log_ctrl.current_RTO; + return; } - if (consumeRatePart2 == 0) { - consumeRatePart2 = 1; + uint64 flushSpeed = (walsnd->log_ctrl.flush_rate >> SHIFT_SPEED); + uint64 applySpeed = (walsnd->log_ctrl.apply_rate >> SHIFT_SPEED); + if (flushSpeed == 0) { + flushSpeed = 1; } - uint64 sec_RTO_part1 = (part1 / consumeRatePart1) / MILLISECONDS_PER_SECONDS; - uint64 sec_RTO_part2 = ((part1 + part2) / consumeRatePart2) / MILLISECONDS_PER_SECONDS; + if (applySpeed == 0) { + applySpeed = 1; + } + + uint64 sec_RTO_part1 = (needFlush / flushSpeed) / MILLISECONDS_PER_SECONDS; + uint64 sec_RTO_part2 = ((needFlush + needApply) / applySpeed) / MILLISECONDS_PER_SECONDS; uint64 sec_RTO = sec_RTO_part1 > sec_RTO_part2 ? sec_RTO_part1 : sec_RTO_part2; walsnd->log_ctrl.prev_RTO = walsnd->log_ctrl.current_RTO; walsnd->log_ctrl.current_RTO = sec_RTO; @@ -2842,105 +3056,70 @@ static void LogCtrlCalculateCurrentRTO(StandbyReplyMessage *reply) ereport(DEBUG4, (errmodule(MOD_RTO_RPO), errmsg("The RTO estimated is = : %lu seconds. reply->reveive is %lu, reply->flush is %lu, " "reply->apply is %lu, pre_flush is %lu, pre_apply is %lu, TimestampDifference is %ld, " - "consumeRatePart1 is %lu, consumeRatePart2 is %lu", + "flushSpeed is %lu, applySpeed is %lu, flush_sec is %ld ms, apply_sec is %ld ms", sec_RTO, reply->receive, reply->flush, reply->apply, walsnd->log_ctrl.prev_flush, - walsnd->log_ctrl.prev_apply, millisec_time_diff, consumeRatePart1, consumeRatePart2))); + walsnd->log_ctrl.prev_apply, millisec_time_diff, flushSpeed, applySpeed, + needFlush / flushSpeed, (needFlush + needApply) / applySpeed))); } +#ifndef ENABLE_LITE_MODE /* Calculate the RTO and RPO changes and control the changes as long as one changes. */ -static void LogCtrlCalculateIndicatorChange(int64 *gapDiff, int64 *gap, bool *isEagerMode) +static void LogCtrlCalculateIndicatorChange(int64 *gapDiff, int64 *gap, bool *isEagerMode, const bool isHadrRPO) { volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; - int64 rtoPrevGap = 0; - int64 rtoGapDiff = 0; - int64 rtoGap = 0; - int64 rpoPrevGap = 0; - int64 rpoGapDiff = 0; - int64 rpoGap = 0; - - if (u_sess->attr.attr_storage.target_rto > 0) { - - if (walsnd->log_ctrl.prev_RTO < 0) { - walsnd->log_ctrl.prev_RTO = walsnd->log_ctrl.current_RTO; - } - - int targetRTO = u_sess->attr.attr_storage.target_rto / 2; - int64 currentRTO = walsnd->log_ctrl.current_RTO; - rtoGap = currentRTO - targetRTO; - rtoPrevGap = walsnd->log_ctrl.prev_RTO - targetRTO; - rtoGapDiff = rtoGap - rtoPrevGap; - } - - if (u_sess->attr.attr_storage.time_to_target_rpo > 0) { + if (isHadrRPO) { + int targetRPO = u_sess->attr.attr_storage.hadr_recovery_point_target / 2; if (walsnd->log_ctrl.prev_RPO < 0) { walsnd->log_ctrl.prev_RPO = walsnd->log_ctrl.current_RPO; } - int targetRPO = u_sess->attr.attr_storage.time_to_target_rpo; - int64 currentRPO = walsnd->log_ctrl.current_RPO; - rpoGap = currentRPO - targetRPO; - rpoPrevGap = walsnd->log_ctrl.prev_RPO - targetRPO; - rpoGapDiff = rpoGap - rpoPrevGap; - } - if (AM_WAL_HADR_SENDER && u_sess->attr.attr_storage.hadr_recovery_time_target) { - if (walsnd->log_ctrl.prev_RTO < 0) { - walsnd->log_ctrl.prev_RTO = walsnd->log_ctrl.current_RTO; - } - int targetRTO = u_sess->attr.attr_storage.hadr_recovery_time_target / 2; - int64 currentRTO = walsnd->log_ctrl.current_RTO; - rtoGap = currentRTO - targetRTO; - rtoPrevGap = walsnd->log_ctrl.prev_RTO - targetRTO; - rtoGapDiff = rtoGap - rtoPrevGap; - } - if (AM_WAL_HADR_SENDER && u_sess->attr.attr_storage.hadr_recovery_point_target) { - if (walsnd->log_ctrl.prev_RPO < 0) { - walsnd->log_ctrl.prev_RPO = walsnd->log_ctrl.current_RPO; - } - - int targetRPO = u_sess->attr.attr_storage.hadr_recovery_point_target; - int64 currentRPO = walsnd->log_ctrl.current_RPO; - rpoGap = currentRPO - targetRPO; - rpoPrevGap = walsnd->log_ctrl.prev_RPO - targetRPO; - rpoGapDiff = rpoGap - rpoPrevGap; - } - - if (abs(rpoGapDiff) > abs(rtoGapDiff)) { - *gapDiff = rpoGapDiff; - *gap = rpoGap; - if (AM_WAL_HADR_SENDER) { - *isEagerMode = walsnd->log_ctrl.current_RPO > u_sess->attr.attr_storage.hadr_recovery_point_target - || walsnd->log_ctrl.current_RPO <= 1; - } else { - *isEagerMode = walsnd->log_ctrl.current_RPO > u_sess->attr.attr_storage.time_to_target_rpo - || walsnd->log_ctrl.current_RPO <= 1; - } + *gap = walsnd->log_ctrl.current_RPO - targetRPO; + *gapDiff = walsnd->log_ctrl.current_RPO - walsnd->log_ctrl.prev_RPO; + *isEagerMode = walsnd->log_ctrl.current_RPO > u_sess->attr.attr_storage.hadr_recovery_point_target || + walsnd->log_ctrl.current_RPO <= 1; } else { - *gapDiff = rtoGapDiff; - *gap = rtoGap; - if (AM_WAL_HADR_SENDER) { - *isEagerMode = walsnd->log_ctrl.current_RTO > u_sess->attr.attr_storage.hadr_recovery_time_target - || walsnd->log_ctrl.current_RTO <= 1; + int targetRTO = 0; + if (AM_WAL_HADR_DNCN_SENDER && u_sess->attr.attr_storage.hadr_recovery_time_target) { + targetRTO = u_sess->attr.attr_storage.hadr_recovery_time_target / 2; + *isEagerMode = walsnd->log_ctrl.current_RTO > u_sess->attr.attr_storage.hadr_recovery_time_target || + walsnd->log_ctrl.current_RTO <= 1; + } else if(!AM_WAL_HADR_DNCN_SENDER && u_sess->attr.attr_storage.target_rto > 0) { + targetRTO = u_sess->attr.attr_storage.target_rto / 2; + *isEagerMode = walsnd->log_ctrl.current_RTO > u_sess->attr.attr_storage.target_rto || + walsnd->log_ctrl.current_RTO <= 1; } else { - *isEagerMode = walsnd->log_ctrl.current_RTO > u_sess->attr.attr_storage.target_rto - || walsnd->log_ctrl.current_RTO <= 1; + ereport(WARNING, + (errmsg("[CalculateIndicatorChange] got the wrong targetRTO, target_rto is %d, " + "hadr_recovery_time_target is %d, hadr_recovery_point_target is %d", + u_sess->attr.attr_storage.target_rto, u_sess->attr.attr_storage.hadr_recovery_time_target, + u_sess->attr.attr_storage.hadr_recovery_point_target))); } + if (walsnd->log_ctrl.prev_RTO < 0) { + walsnd->log_ctrl.prev_RTO = walsnd->log_ctrl.current_RTO; + } + *gap = walsnd->log_ctrl.current_RTO - targetRTO; + *gapDiff = walsnd->log_ctrl.current_RTO - walsnd->log_ctrl.prev_RTO; } - ereport(DEBUG4, (errmodule(MOD_RTO_RPO), errmsg("[LogCtrlCalculateIndicatorChange] rto_gap=%d, rto_gap_diff=%d," - "rpo_gap=%d, rpo_gap_diff=%d, gap=%d, gap_diff=%d", - (int)rtoGap, (int)rtoGapDiff, (int)rpoGap, (int)rpoGapDiff, (int)*gap, (int)*gapDiff))); + ereport(DEBUG4, (errmodule(MOD_RTO_RPO), + errmsg("[CalculateIndicatorChange] gap=%d, gap_diff=%d," + "isEagerMode=%d, isHadrRPO=%d", + (int)*gap, (int)*gapDiff,(int)*isEagerMode, (int)isHadrRPO))); + } -static void LogCtrlTreatNoDataSend() +static void LogCtrlTreatNoDataSend(int64 *sleepTime) { volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; - int64 sleepTime = walsnd->log_ctrl.sleep_time; if (walsnd->log_ctrl.prev_send_time > 0 && ComputeTimeStamp(walsnd->log_ctrl.prev_send_time) > (MILLISECONDS_PER_SECONDS * 3)) { - sleepTime -= SLEEP_LESS * EAGER_MODE_MULTIPLE; - walsnd->log_ctrl.sleep_time = (sleepTime >= 0) ? sleepTime : 0; + *sleepTime -= SLEEP_LESS * EAGER_MODE_MULTIPLE; + *sleepTime = (*sleepTime >= 0) ? *sleepTime : 0; + ereport(DEBUG4, (errmodule(MOD_RTO_RPO), + errmsg("Walsender send data interval more than 3s, sleep time reduced to: %ld ", *sleepTime))); } } +#endif /* * If current RTO/RPO is less than target_rto/time_to_target_rpo, primary need less sleep. @@ -2948,38 +3127,64 @@ static void LogCtrlTreatNoDataSend() * If current RTO/RPO equals to target_rto/time_to_target_rpo, primary will sleep * according to balance_sleep to maintain rto. */ -static void LogCtrlCalculateSleepTime(void) +static void LogCtrlCalculateSleepTime(int64 logCtrlSleepTime, int64 balanceSleepTime, const bool isHadrRPO) { - LogCtrlTreatNoDataSend(); +#ifndef ENABLE_LITE_MODE volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; - int64 sleepTime = walsnd->log_ctrl.sleep_time; + int addLevel; + int reduceLevel; + + /* use for rto log */ + int64 pre_time = logCtrlSleepTime; + + /* Range to allow rto/rpo to fluctuate up and down the target/2 */ + int balance_range = 1; + const int NEEDS_LARGER_RANGE = 60; + if (isHadrRPO) { + addLevel = SLEEP_MORE / EAGER_MODE_MULTIPLE; + reduceLevel = SLEEP_LESS / EAGER_MODE_MULTIPLE; + } else { + addLevel = SLEEP_MORE; + reduceLevel = SLEEP_LESS; + if (AM_WAL_HADR_DNCN_SENDER && u_sess->attr.attr_storage.hadr_recovery_time_target >= NEEDS_LARGER_RANGE) { + balance_range = 2; + } else if(!AM_WAL_HADR_DNCN_SENDER && u_sess->attr.attr_storage.target_rto >= NEEDS_LARGER_RANGE) { + balance_range = 2; + } + LogCtrlTreatNoDataSend(&logCtrlSleepTime); + } + int64 sleepTime = logCtrlSleepTime; int64 gapDiff; int64 gap; bool isEagerMode = false; - LogCtrlCalculateIndicatorChange(&gapDiff, &gap, &isEagerMode); - - /* use for rto log */ - int64 pre_time = walsnd->log_ctrl.sleep_time; - int balance_range = 1; + LogCtrlCalculateIndicatorChange(&gapDiff, &gap, &isEagerMode, isHadrRPO); /* mark balance sleep time */ - if (abs(gapDiff) <= balance_range) { - if (walsnd->log_ctrl.balance_sleep_time == 0) { - walsnd->log_ctrl.balance_sleep_time = sleepTime; + if (abs(gapDiff) <= 1) { + if (balanceSleepTime == 0) { + balanceSleepTime = sleepTime; } else { - walsnd->log_ctrl.balance_sleep_time = (walsnd->log_ctrl.balance_sleep_time + sleepTime) / 2; + balanceSleepTime = (int64)LogCtrlCountBigSpeed((uint64)balanceSleepTime, (uint64)sleepTime >> SHIFT_SPEED); + } + if (isHadrRPO) { + ereport(DEBUG4, (errmodule(MOD_RTO_RPO), + errmsg("The RPO balance time for log control is : %ld microseconds", balanceSleepTime))); + } else { + ereport(DEBUG4, (errmodule(MOD_RTO_RPO), + errmsg("The RTO balance time for log control is : %ld microseconds", balanceSleepTime))); } - ereport(DEBUG4, (errmodule(MOD_RTO_RPO), errmsg("The balance time for log control is : %ld microseconds", - walsnd->log_ctrl.balance_sleep_time))); } /* rto balance, currentRTO close to targetRTO */ - if (abs(gap) <= balance_range) { - if (walsnd->log_ctrl.balance_sleep_time != 0) { - walsnd->log_ctrl.sleep_time = walsnd->log_ctrl.balance_sleep_time; + if (abs(gap) <= balance_range && !isEagerMode) { + if (balanceSleepTime != 0) { + logCtrlSleepTime = balanceSleepTime; + } else if (gapDiff < 0) { + sleepTime -= reduceLevel; + logCtrlSleepTime = (sleepTime >= 0) ? sleepTime : 0; } else { - sleepTime -= SLEEP_LESS; - walsnd->log_ctrl.sleep_time = (sleepTime >= 0) ? sleepTime : 0; + sleepTime += addLevel; + logCtrlSleepTime = (sleepTime < 1 * MICROSECONDS_PER_SECONDS) ? sleepTime : MICROSECONDS_PER_SECONDS; } } @@ -2987,76 +3192,101 @@ static void LogCtrlCalculateSleepTime(void) * get bigger, but no more than 1s */ if (gap > balance_range) { - sleepTime += isEagerMode ? (SLEEP_MORE * EAGER_MODE_MULTIPLE) : (SLEEP_MORE); - walsnd->log_ctrl.sleep_time = (sleepTime < 1 * MICROSECONDS_PER_SECONDS) ? sleepTime : MICROSECONDS_PER_SECONDS; + if (isEagerMode) { + sleepTime += addLevel * EAGER_MODE_MULTIPLE; + } else if (gapDiff > 0) { + sleepTime += addLevel; + } + logCtrlSleepTime = (sleepTime < 1 * MICROSECONDS_PER_SECONDS) ? sleepTime : MICROSECONDS_PER_SECONDS; } /* need less sleep, currentRTO less than targetRTO */ if (gap < -balance_range) { - sleepTime -= isEagerMode ? (SLEEP_LESS * EAGER_MODE_MULTIPLE) : (SLEEP_LESS); - walsnd->log_ctrl.sleep_time = (sleepTime >= 0) ? sleepTime : 0; + if (isEagerMode) { + sleepTime -= (balanceSleepTime == 0) ? sleepTime : reduceLevel * EAGER_MODE_MULTIPLE; + } else if (gapDiff < 0) { + sleepTime -= reduceLevel; + } + logCtrlSleepTime = (sleepTime >= 0) ? sleepTime : 0; + } + + int targetRTO = 0; + if (AM_WAL_HADR_DNCN_SENDER) { + targetRTO = u_sess->attr.attr_storage.hadr_recovery_time_target; + } else { + targetRTO = u_sess->attr.attr_storage.target_rto; } /* log control take effect */ - if (pre_time == 0 && walsnd->log_ctrl.sleep_time != 0) { - if (AM_WAL_HADR_SENDER) { + if (pre_time == 0 && logCtrlSleepTime != 0) { + if (isHadrRPO) { ereport(LOG, (errmodule(MOD_RTO_RPO), - errmsg("Log control take effect, target_rto is %d, current_rto is %ld, current the sleep time is %ld " - "microseconds", - u_sess->attr.attr_storage.hadr_recovery_time_target, walsnd->log_ctrl.current_RTO, - walsnd->log_ctrl.sleep_time))); + errmsg("Log control take effect due to RPO, target_rpo is %d, current_rpo is %ld, " + "current the sleep time is %ld microseconds", + u_sess->attr.attr_storage.hadr_recovery_point_target, + walsnd->log_ctrl.current_RPO, logCtrlSleepTime))); } else { ereport(LOG, (errmodule(MOD_RTO_RPO), - errmsg("Log control take effect, target_rto is %d, current_rto is %ld, current the sleep time is %ld " - "microseconds", - u_sess->attr.attr_storage.target_rto, walsnd->log_ctrl.current_RTO, - walsnd->log_ctrl.sleep_time))); + errmsg("Log control take effect due to RTO, target_rto is %d, current_rto is %ld, " + "current the sleep time is %ld microseconds", + targetRTO, walsnd->log_ctrl.current_RTO, logCtrlSleepTime))); } } /* log control take does not effect */ - if (pre_time != 0 && walsnd->log_ctrl.sleep_time == 0) { - if (AM_WAL_HADR_SENDER) { - ereport( - LOG, + if (pre_time != 0 && logCtrlSleepTime == 0) { + if (isHadrRPO) { + ereport(LOG, (errmodule(MOD_RTO_RPO), - errmsg("Log control does not take effect, target_rto is %d, current_rto is %ld, current the sleep time " - "is %ld microseconds", - u_sess->attr.attr_storage.hadr_recovery_time_target, walsnd->log_ctrl.current_RTO, - walsnd->log_ctrl.sleep_time))); + errmsg("Log control does not take effect due to RPO, target_rpo is %d, current_rpo is %ld, " + "current the sleep time is %ld microseconds", + u_sess->attr.attr_storage.hadr_recovery_point_target, + walsnd->log_ctrl.current_RPO, logCtrlSleepTime))); } else { - ereport( - LOG, + ereport(LOG, (errmodule(MOD_RTO_RPO), - errmsg("Log control does not take effect, target_rto is %d, current_rto is %ld, current the sleep time " - "is %ld microseconds", - u_sess->attr.attr_storage.target_rto, walsnd->log_ctrl.current_RTO, walsnd->log_ctrl.sleep_time))); + errmsg("Log control does not take effect because of RTO, target_rto is %d, current_rto is %ld, " + "current the sleep time is %ld microseconds", + targetRTO, walsnd->log_ctrl.current_RTO, logCtrlSleepTime))); } } - ereport(DEBUG4, (errmodule(MOD_RTO_RPO), - errmsg("The sleep time for log control is : %ld microseconds", walsnd->log_ctrl.sleep_time))); + /* return the value and print debug log */ + if (isHadrRPO) { + g_instance.streaming_dr_cxt.rpoSleepTime = logCtrlSleepTime; + g_instance.streaming_dr_cxt.rpoBalanceSleepTime = balanceSleepTime; + ereport(DEBUG4, (errmodule(MOD_RTO_RPO), + errmsg("The RPO sleep time for log control is : %ld microseconds, current RPO is : %ld", + logCtrlSleepTime, walsnd->log_ctrl.current_RPO))); + } else { + walsnd->log_ctrl.sleep_time = logCtrlSleepTime; + walsnd->log_ctrl.balance_sleep_time = balanceSleepTime; + ereport(DEBUG4, (errmodule(MOD_RTO_RPO), + errmsg("The RTO sleep time for log control is : %ld microseconds, current RTO is : %ld", + logCtrlSleepTime, walsnd->log_ctrl.current_RTO))); + } +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } static void LogCtrlCalculateCurrentRPO(StandbyReplyMessage *reply) { +#ifndef ENABLE_LITE_MODE volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; - long barrierTime = 0; - uint64 barrierId; - int matchNum; - char barrier[MAX_BARRIER_ID_LENGTH] = {0}; XLogRecPtr receivePtr; XLogRecPtr writePtr; XLogRecPtr flushPtr; XLogRecPtr replayPtr; - uint64 gap; bool got_recptr = false; bool amSync = false; long sec_to_time; int microsec_to_time; - struct timeval timeVal; - long timeToRPO = 0; - if (AM_WAL_HADR_SENDER) { + if (AM_WAL_HADR_CN_SENDER) { + flushPtr = GetFlushRecPtr(); + } else if (AM_WAL_SHARE_STORE_SENDER) { + flushPtr = g_instance.xlog_cxt.shareStorageXLogCtl->insertHead; + } else { got_recptr = SyncRepGetSyncRecPtr(&receivePtr, &writePtr, &flushPtr, &replayPtr, &amSync, false); if (got_recptr != true) { ereport(WARNING, @@ -3064,48 +3294,104 @@ static void LogCtrlCalculateCurrentRPO(StandbyReplyMessage *reply) "flush location"))); return; } - gap = flushPtr - reply->flush; - if (walsnd->log_ctrl.prev_reply_time == 0) { - return; - } - TimestampDifference(walsnd->log_ctrl.prev_reply_time, reply->sendTime, &sec_to_time, µsec_to_time); - long millisec_time_diff = sec_to_time * MILLISECONDS_PER_SECONDS + - microsec_to_time / MILLISECONDS_PER_MICROSECONDS; - if (millisec_time_diff <= 10) { - return; - } - if (walsnd->log_ctrl.pre_rpo_rate != 0) { - walsnd->log_ctrl.pre_rpo_rate = LogCtrlCountBigSpeed(walsnd->log_ctrl.pre_rpo_rate, - (uint64)(gap / millisec_time_diff)); - } else { - walsnd->log_ctrl.pre_rpo_rate = ((gap / (uint64)millisec_time_diff) << SHIFT_SPEED); - } - uint64 consumeRatePart = (walsnd->log_ctrl.pre_rpo_rate >> SHIFT_SPEED); - if (consumeRatePart == 0) { - consumeRatePart = 1; - } - ereport(DEBUG4, (errmodule(MOD_RTO_RPO), - errmsg("[LogCtrlCalculateCurrentRPO] timeToRPO is %ld ms", (gap / consumeRatePart)))); - walsnd->log_ctrl.prev_RPO = walsnd->log_ctrl.current_RPO; - walsnd->log_ctrl.current_RPO = (gap / consumeRatePart) / MILLISECONDS_PER_SECONDS; - } else if (ArchiveReplicationReadFile(HADR_BARRIER_ID_FILE, (char *)barrier, MAX_BARRIER_ID_LENGTH)) { - ereport(DEBUG4, (errmodule(MOD_RTO_RPO), - errmsg("[LogCtrlCalculateCurrentRPO] read barrier id from obs %s", barrier))); - matchNum = sscanf_s(barrier, "hadr_%020" PRIu64 "_%013ld", &barrierId, &barrierTime); - if (matchNum != 2) { - return; - } - gettimeofday(&timeVal, NULL); - timeToRPO = timeVal.tv_sec * MILLISECONDS_PER_SECONDS + - timeVal.tv_usec / MILLISECONDS_PER_SECONDS - barrierTime; // ms - ereport(DEBUG4, (errmodule(MOD_RTO_RPO), - errmsg("[LogCtrlCalculateCurrentRPO] timeToRPO is %ld ms", timeToRPO))); - - walsnd->log_ctrl.prev_RPO = walsnd->log_ctrl.current_RPO; - walsnd->log_ctrl.current_RPO = timeToRPO / MILLISECONDS_PER_SECONDS; } + if (XLByteLT(flushPtr, reply->flush) || XLByteLT(reply->flush, walsnd->log_ctrl.prev_flush)) { + return; + } + if (XLByteEQ(flushPtr, reply->flush)) { + walsnd->log_ctrl.prev_RPO = walsnd->log_ctrl.current_RPO; + walsnd->log_ctrl.current_RPO = 0; + return; + } + + if (walsnd->log_ctrl.prev_reply_time == 0) { + return; + } + TimestampDifference(walsnd->log_ctrl.prev_reply_time, reply->sendTime, &sec_to_time, µsec_to_time); + long millisec_time_diff = sec_to_time * MILLISECONDS_PER_SECONDS + + microsec_to_time / MILLISECONDS_PER_MICROSECONDS; + if (millisec_time_diff <= 100) { + return; + } + + uint64 needFlush = flushPtr - reply->flush; + uint64 newFlush = reply->flush - walsnd->log_ctrl.prev_flush; + + if ((walsnd->log_ctrl.local_flush_rate >> SHIFT_SPEED) > 1) { + walsnd->log_ctrl.local_flush_rate = LogCtrlCountBigSpeed(walsnd->log_ctrl.local_flush_rate, + (uint64)(newFlush / millisec_time_diff)); + } else { + walsnd->log_ctrl.local_flush_rate = ((newFlush / (uint64)millisec_time_diff) << SHIFT_SPEED); + } + + uint64 consumeRatePart = (walsnd->log_ctrl.local_flush_rate >> SHIFT_SPEED); + if (consumeRatePart == 0) { + consumeRatePart = 1; + } + walsnd->log_ctrl.prev_RPO = walsnd->log_ctrl.current_RPO; + walsnd->log_ctrl.current_RPO = (needFlush / consumeRatePart) / MILLISECONDS_PER_SECONDS; + ereport(DEBUG4, (errmodule(MOD_RTO_RPO), + errmsg("The RPO estimated is = : %ld seconds. local flush is %lu, reply->flush is %lu, " + "prev_flush is %lu, TimestampDifference is %ld, " + "consumeRatePart is %lu, RPO millisecond is %ld", + walsnd->log_ctrl.current_RPO, flushPtr, reply->flush, walsnd->log_ctrl.prev_flush, + millisec_time_diff, consumeRatePart, needFlush / consumeRatePart))); +#else + FEATURE_ON_LITE_MODE_NOT_SUPPORTED(); +#endif } +#ifdef ENABLE_MULTIPLE_NODES +static void LogCtrlCalculateHadrCurrentRPO(void) +{ + volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; + int64 targetBarrierTimeStamp; + int64 currentBarrierTimeStamp; + int64 timeDiff; + errno_t errorno = EOK; + char targetBarrierId[MAX_BARRIER_ID_LENGTH]; + char currentBarrierId[MAX_BARRIER_ID_LENGTH]; + + SpinLockAcquire(&g_instance.streaming_dr_cxt.mutex); + errorno = strncpy_s((char *)targetBarrierId, MAX_BARRIER_ID_LENGTH, + (char *)g_instance.streaming_dr_cxt.targetBarrierId, MAX_BARRIER_ID_LENGTH); + securec_check(errorno, "\0", "\0"); + errorno = strncpy_s((char *)currentBarrierId, MAX_BARRIER_ID_LENGTH, + (char *)g_instance.streaming_dr_cxt.currentBarrierId, MAX_BARRIER_ID_LENGTH); + securec_check(errorno, "\0", "\0"); + SpinLockRelease(&g_instance.streaming_dr_cxt.mutex); + + if (!IS_CSN_BARRIER(currentBarrierId) || !IS_CSN_BARRIER(targetBarrierId)) { + return; + } + + if (IsSwitchoverBarrier(targetBarrierId)) { + walsnd->log_ctrl.prev_RPO = walsnd->log_ctrl.current_RPO; + walsnd->log_ctrl.current_RPO = 0; + return; + } + + targetBarrierTimeStamp = CsnBarrierNameGetTimeStamp(targetBarrierId); + currentBarrierTimeStamp = CsnBarrierNameGetTimeStamp(currentBarrierId); + if (targetBarrierTimeStamp == 0 || currentBarrierTimeStamp == 0) { + ereport(WARNING, + (errmsg("[HADR_RPO] get barrierName failure, targetBarrierId: %s, currentBarrierId: %s", + targetBarrierId, currentBarrierId))); + return; + } + timeDiff = currentBarrierTimeStamp - targetBarrierTimeStamp; + if (timeDiff < 0) { + timeDiff = 0; + } + walsnd->log_ctrl.prev_RPO = walsnd->log_ctrl.current_RPO; + walsnd->log_ctrl.current_RPO = timeDiff / MILLISECONDS_PER_SECONDS; + ereport(DEBUG4, (errmodule(MOD_RTO_RPO), + errmsg("The RPO estimated is = : %ld seconds. targetBarrierId: %s, currentBarrierId: %s, " + "RPO millisecond is %ld", + walsnd->log_ctrl.current_RPO, targetBarrierId, currentBarrierId, timeDiff))); +} +#endif + static void ChooseStartPointForDummyStandby(void) { XLogRecPtr initSentPtr; @@ -3155,7 +3441,7 @@ static void ChooseStartPointForDummyStandby(void) (uint32)t_thrd.walsender_cxt.sentPtr))); } static int WSXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, - char *readBuf, TimeLineID *pageTLI) + char *readBuf, TimeLineID *pageTLI, char* xlog_path = NULL) { WSXLogPageReadPrivate *ws_private = (WSXLogPageReadPrivate *)xlogreader->private_data; uint32 targetPageOff; @@ -3177,7 +3463,7 @@ static int WSXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, XLByteToSeg(targetPagePtr, ws_private->xlogreadlogsegno); if (ws_private->xlogreadfd < 0) { - XLogFileName(xlogfile, ws_private->tli, ws_private->xlogreadlogsegno); + XLogFileName(xlogfile, MAXFNAMELEN, ws_private->tli, ws_private->xlogreadlogsegno); nRetCode = snprintf_s(xlogfpath, MAXPGPATH, MAXPGPATH - 1, XLOGDIR "/%s", xlogfile); securec_check_ss(nRetCode, "\0", "\0"); @@ -3388,6 +3674,15 @@ static int WalSndLoop(WalSndSendDataCallback send_data) } } + /* + * In the scenario where the stream replication connection is set to standby connection, + * if the role is the primary, disconnect the connection. + * This is to avoid the decoding thread occupying the primary resources. + */ + if (t_thrd.walsender_cxt.standbyConnection == true && PMstateIsRun()) { + ereport(ERROR, (errmsg("walsender closed because of role is primary."))); + } + /* if changed to stream replication, request for catchup. */ if (u_sess->attr.attr_storage.enable_stream_replication && !marked_stream_replication) { marked_stream_replication = u_sess->attr.attr_storage.enable_stream_replication; @@ -3633,7 +3928,7 @@ static int WalSndLoop(WalSndSendDataCallback send_data) now = GetCurrentTimestamp(); - if (u_sess->proc_cxt.MyDatabaseId != InvalidOid) + if (u_sess->proc_cxt.MyDatabaseId != InvalidOid && t_thrd.slot_cxt.MyReplicationSlot != NULL) WalSndWriteLogicalAdvanceXLog(now); /* @@ -3696,6 +3991,17 @@ static int WalSndLoop(WalSndSendDataCallback send_data) g_instance.streaming_dr_cxt.switchoverBarrierLsn != InvalidXLogRecPtr) { WalSndHadrSwitchoverRequest(); } + /* In distributed streaming dr cluster, shutdown walsender of main standby when term is changed */ + volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; + if (IS_DISASTER_RECOVER_MODE && + walsnd->isTermChanged && + t_thrd.postmaster_cxt.HaShmData->is_hadr_main_standby) { + ereport(LOG, (errmsg("Shutdown walsender of main standby due to the term change."))); + SpinLockAcquire(&walsnd->mutex); + walsnd->isTermChanged = false; + SpinLockRelease(&walsnd->mutex); + break; + } } WalSndShutdown(); @@ -3897,10 +4203,16 @@ static void InitWalSnd(void) walsnd->sendKeepalive = true; walsnd->replSender = false; walsnd->peer_role = UNKNOWN_MODE; - if (AM_WAL_HADR_SENDER) { + if (AM_WAL_HADR_SENDER || AM_WAL_HADR_CN_SENDER) { walsnd->is_cross_cluster = true; g_instance.streaming_dr_cxt.isInStreaming_dr = true; + walsnd->isInteractionCompleted = false; + walsnd->lastRequestTimestamp = GetCurrentTimestamp(); + rc = memset_s(g_instance.streaming_dr_cxt.targetBarrierId, MAX_BARRIER_ID_LENGTH, 0, + sizeof(g_instance.streaming_dr_cxt.targetBarrierId)); + securec_check(rc, "\0", "\0"); } + walsnd->isTermChanged = false; walsnd->peer_state = NORMAL_STATE; walsnd->channel_get_replc = 0; rc = memset_s((void *)&walsnd->receive, sizeof(XLogRecPtr), 0, sizeof(XLogRecPtr)); @@ -3925,10 +4237,14 @@ static void InitWalSnd(void) walsnd->log_ctrl.sleep_count_limit = MAX_CONTROL_REPLY; walsnd->log_ctrl.prev_flush = 0; walsnd->log_ctrl.prev_apply = 0; + walsnd->log_ctrl.period_total_flush = 0; + walsnd->log_ctrl.period_total_apply = 0; + walsnd->log_ctrl.local_prev_flush = 0; walsnd->log_ctrl.prev_reply_time = 0; - walsnd->log_ctrl.pre_rate1 = 0; - walsnd->log_ctrl.pre_rate2 = 0; - walsnd->log_ctrl.pre_rpo_rate = 0; + walsnd->log_ctrl.prev_calculate_time = 0; + walsnd->log_ctrl.flush_rate = 0; + walsnd->log_ctrl.apply_rate = 0; + walsnd->log_ctrl.local_flush_rate = 0; walsnd->log_ctrl.prev_RPO = -1; walsnd->log_ctrl.current_RPO = -1; walsnd->log_ctrl.prev_send_time = 0; @@ -3968,6 +4284,7 @@ static void WalSndReset(WalSnd *walsnd) walsnd->wal_sender_channel.remoteport = 0; walsnd->channel_get_replc = 0; walsnd->is_cross_cluster = false; + walsnd->isTermChanged = false; rc = memset_s(walsnd->wal_sender_channel.localhost, sizeof(walsnd->wal_sender_channel.localhost), 0, sizeof(walsnd->wal_sender_channel.localhost)); securec_check_c(rc, "\0", "\0"); @@ -4104,7 +4421,7 @@ retry: } XLByteToSeg(recptr, t_thrd.walsender_cxt.sendSegNo); - XLogFilePath(path, t_thrd.xlog_cxt.ThisTimeLineID, t_thrd.walsender_cxt.sendSegNo); + XLogFilePath(path, MAXPGPATH, t_thrd.xlog_cxt.ThisTimeLineID, t_thrd.walsender_cxt.sendSegNo); t_thrd.walsender_cxt.sendFile = BasicOpenFile(path, O_RDONLY | PG_BINARY, 0); if (t_thrd.walsender_cxt.sendFile < 0) { @@ -4208,14 +4525,238 @@ retry: WalSegmemtRemovedhappened = false; } +/* + * Handle logical log with type LOGICAL_LOG_COMMIT and LOGICAL_LOG_ABORT. + */ +static void LogicalLogHandleAbortOrCommit(logicalLog *logChange, ParallelReorderBuffer *prb, int slotId, bool isCommit) +{ + ParallelReorderBufferTXN *txn = NULL; + + txn = ParallelReorderBufferTXNByXid(prb, logChange->xid, true, NULL, logChange->lsn, true); + if (txn == NULL) { + ereport(LOG, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOGICAL_DECODE_ERROR), + errmsg("Logical log commit with no txn found"))); + FreeLogicalLog(logChange, slotId); + return; + } + txn->final_lsn = logChange->finalLsn; + txn->nsubtxns = logChange->nsubxacts; + txn->commit_time = logChange->commitTime; + + bool isForget = false; + if (XLByteLT(txn->final_lsn, g_Logicaldispatcher[slotId].startpoint)) { + isForget = true; + } + for (int i = 0; i < logChange->nsubxacts; i++) { + TransactionId subXid = logChange->subXids[i]; + bool newSub = false; + ParallelReorderBufferTXN *subtxn = + ParallelReorderBufferTXNByXid(prb, subXid, false, &newSub, InvalidXLogRecPtr, false); + if (subtxn != NULL && !newSub && !subtxn->is_known_as_subxact) { + dlist_delete(&subtxn->node); + } + if (subtxn != NULL) { + subtxn->is_known_as_subxact = true; + subtxn->final_lsn = logChange->finalLsn; + subtxn->commit_time = logChange->commitTime; + if (!isForget) { + dlist_push_tail(&txn->subtxns, &subtxn->node); + } else { + ParallelReorderBufferForget(prb, slotId, subtxn); + } + } + } + + if (isForget) { + ParallelReorderBufferForget(prb, slotId, txn); + } else if (isCommit && logChange->lsn > t_thrd.walsender_cxt.sentPtr) { + ParallelReorderBufferCommit(prb, logChange, slotId, txn); + } else { + ParallelReorderBufferForget(prb, slotId, txn); + } + FreeLogicalLog(logChange, slotId); +} + +/* + * Poll and read the logical log queue of each decoder thread. + * Send to the client after processing. + */ +void LogicalLogHandle(logicalLog *logChange) +{ + int slotId = t_thrd.walsender_cxt.LogicalSlot; + ParallelDecodeReaderWorker* readWorker = g_Logicaldispatcher[slotId].readWorker; + ParallelReorderBuffer *prb = t_thrd.walsender_cxt.parallel_logical_decoding_ctx->reorder; + + switch (logChange->type) { + case LOGICAL_LOG_EMPTY: { + FreeLogicalLog(logChange, slotId); + break; + } + case LOGICAL_LOG_DML: { + ParallelReorderBufferQueueChange(prb, logChange, slotId); + break; + } + case LOGICAL_LOG_RUNNING_XACTS: { + ParallelReorderBufferTXN *txn = NULL; + txn = ParallelReorderBufferGetOldestTXN(prb); + /* + * oldest ongoing txn might have started when we didn't yet serialize + * anything because we hadn't reached a consistent state yet. + */ + prb->lastRunningXactOldestXmin = logChange->oldestXmin; + SpinLockAcquire(&(readWorker->rwlock)); + readWorker->current_lsn = logChange->lsn; + prb->current_restart_decoding_lsn = logChange->lsn; + + /* + * Every time the running Xact log is decoded, + * the LSN and xmin in the decoding log are recorded and + * pre pushed in the parallel logical reader thread. + */ + if (txn == NULL) { + readWorker->restart_lsn = logChange->lsn; + readWorker->candidate_oldest_xmin = logChange->oldestXmin; + } else { + readWorker->restart_lsn = txn->restart_decoding_lsn; + readWorker->candidate_oldest_xmin = txn->oldestXid; + } + + ereport(DEBUG2, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_LOGICAL_DECODE_ERROR), + errmsg("LogicalLogHandle restart_lsn at %X/%X, current_lsn %X/%X.", + (uint32)(readWorker->restart_lsn >> 32), + (uint32)readWorker->restart_lsn, + (uint32)(readWorker->current_lsn >> 32), + (uint32)readWorker->current_lsn))); + readWorker->candidate_oldest_xmin_lsn = logChange->lsn; + SpinLockRelease(&(readWorker->rwlock)); + /* + * Iterate through all (potential) toplevel TXNs and abort all that are + * older than what possibly can be running. + */ + while (true) { + ParallelReorderBufferTXN *txn = ParallelReorderBufferGetOldestTXN(prb); + if (txn != NULL && txn->xid < logChange->oldestXmin) { + if (!RecoveryInProgress()) { + ereport(DEBUG2, (errmsg("aborting old transaction %lu", txn->xid))); + } + /* remove potential on-disk data, and deallocate this tx */ + ParallelReorderBufferForget(prb, slotId, txn); + } else { + break; + } + } + FreeLogicalLog(logChange, slotId); + break; + } + case LOGICAL_LOG_COMMIT: { + LogicalLogHandleAbortOrCommit(logChange, prb, slotId, true); + break; + } + + case LOGICAL_LOG_ABORT: { + LogicalLogHandleAbortOrCommit(logChange, prb, slotId, false); + break; + } + case LOGICAL_LOG_CONFIRM_FLUSH: { + t_thrd.walsender_cxt.sentPtr = logChange->lsn; + ereport(LOG, (errmodule(MOD_LOGICAL_DECODE), errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("t_thrd.walsender_cxt.sentPtr %lu", t_thrd.walsender_cxt.sentPtr))); + break; + } + case LOGICAL_LOG_NEW_CID: { + ParallelReorderBufferQueueChange(prb, logChange, slotId); + break; + } + default: + break; + } +} + +/* + * Get the logical logs in logical queue in turn, and send them after processing. + */ +void XLogSendPararllelLogical() +{ + int slotId = t_thrd.walsender_cxt.LogicalSlot; + + if (t_thrd.slot_cxt.MyReplicationSlot == NULL && g_Logicaldispatcher[slotId].MyReplicationSlot != NULL) { + t_thrd.slot_cxt.MyReplicationSlot = g_Logicaldispatcher[slotId].MyReplicationSlot; + } + + /* + * Initialize when entering the loop for the first time. + * Ensure to get the correct logical log in logical queue every time. + */ + if (g_Logicaldispatcher[slotId].firstLoop) { + g_Logicaldispatcher[slotId].id = 0; + g_Logicaldispatcher[slotId].firstLoop = false; + } + + /* After 1 second with no new transactions, all decoded results should be sent automatically. */ + { + const int expTime = 1000; + ParallelReorderBuffer *prb = t_thrd.walsender_cxt.parallel_logical_decoding_ctx->reorder; + ParallelLogicalDecodingContext *ctx = (ParallelLogicalDecodingContext *)prb->private_data; + if (TimestampDifferenceExceeds(g_Logicaldispatcher[slotId].decodeTime, GetCurrentTimestamp(), expTime) && + g_Logicaldispatcher[slotId].remainPatch) { + if (g_Logicaldispatcher[slotId].pOptions.decode_style == 'b' || + g_Logicaldispatcher[slotId].pOptions.sending_batch > 0) { + pq_sendint32(ctx->out, 0); /* We send a zero to display that no other decoding result is followed */ + } + WalSndWriteDataHelper(ctx->out, 0, 0, false); + g_Logicaldispatcher[slotId].remainPatch = false; + t_thrd.walsender_cxt.sentPtr = pg_atomic_read_u64(&g_Logicaldispatcher[slotId].sentPtr); + WalSndKeepalive(false); + g_Logicaldispatcher[slotId].decodeTime = GetCurrentTimestamp(); + } else if (TimestampDifferenceExceeds(g_Logicaldispatcher[slotId].decodeTime, GetCurrentTimestamp(), expTime)) { + t_thrd.walsender_cxt.sentPtr = pg_atomic_read_u64(&g_Logicaldispatcher[slotId].sentPtr); + WalSndKeepalive(false); + g_Logicaldispatcher[slotId].decodeTime = GetCurrentTimestamp(); + } + } + + /* + * Poll from all decoder threads to get logical logs. + * Since the reader polls the logs sent to the decode thread, + * the logical logs obtained by polling are in order. + */ + for (;; g_Logicaldispatcher[slotId].id = (g_Logicaldispatcher[slotId].id + 1) % GetDecodeParallelism(slotId)) { + + if (g_Logicaldispatcher[slotId].abnormal) { + /* + * Exit the current thread. When a thread in the thread group exits abnormally + */ + ereport(ERROR, (errmsg("walsender send SIGTERM to all parallel logical threads."))); + } + + ParallelDecodeWorker* decodeWorker = g_Logicaldispatcher[slotId].decodeWorkers[g_Logicaldispatcher[slotId].id]; + logicalLog *logChange = (logicalLog*)LogicalQueueTop(decodeWorker->LogicalLogQueue); + + if (logChange != NULL) { + LogicalLogHandle(logChange); + LogicalQueuePop(decodeWorker->LogicalLogQueue); + } else { + return; + } + } + + /* Update shared memory status */ + { + /* use volatile pointer to prevent code rearrangement */ + volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; + t_thrd.walsender_cxt.sentPtr = pg_atomic_read_u64(&g_Logicaldispatcher[slotId].sentPtr); + + SpinLockAcquire(&walsnd->mutex); + walsnd->sentPtr = t_thrd.walsender_cxt.sentPtr; + SpinLockRelease(&walsnd->mutex); + } +} + /* * Stream out logically decoded data. */ static void XLogSendLogical(void) { -#ifdef ENABLE_MULTIPLE_NODES - CheckPMstateAndRecoveryInProgress(); -#endif XLogRecord *record = NULL; char *errm = NULL; @@ -4379,11 +4920,21 @@ static void XLogSendPhysical(void) * Read the log directly into the output buffer to avoid extra memcpy * calls. */ - XLogRead(t_thrd.walsender_cxt.output_xlog_message + 1 + sizeof(WalDataMessageHeader), startptr, nbytes); - ereport(DEBUG5, (errmsg("conninfo:(%s,%d) start: %X/%X, end: %X/%X, %lu bytes", - t_thrd.walsender_cxt.MyWalSnd->wal_sender_channel.localhost, - t_thrd.walsender_cxt.MyWalSnd->wal_sender_channel.localport, (uint32)(startptr >> 32), - (uint32)startptr, (uint32)(endptr >> 32), (uint32)endptr, nbytes))); + + /* read into temp buffer, compress, then copy to output buffer */ + int compressedSize = 0; + + if (AmWalSenderToStandby() && g_instance.attr.attr_storage.enable_wal_shipping_compression && + AM_WAL_HADR_DNCN_SENDER) { + t_thrd.walsender_cxt.output_xlog_message[0] = 'C'; + XLogCompression(&compressedSize, startptr, nbytes); + } else { + XLogRead(t_thrd.walsender_cxt.output_xlog_message + 1 + sizeof(WalDataMessageHeader), startptr, nbytes); + ereport(DEBUG5, (errmsg("conninfo:(%s,%d) start: %X/%X, end: %X/%X, %lu bytes", + t_thrd.walsender_cxt.MyWalSnd->wal_sender_channel.localhost, + t_thrd.walsender_cxt.MyWalSnd->wal_sender_channel.localport, (uint32)(startptr >> 32), + (uint32)startptr, (uint32)(endptr >> 32), (uint32)endptr, nbytes))); + } /* * We fill the message header last so that the send timestamp is taken as @@ -4417,8 +4968,14 @@ static void XLogSendPhysical(void) pgstat_report_waitevent(WAIT_EVENT_LOGCTRL_SLEEP); LogCtrlSleep(); pgstat_report_waitevent(WAIT_EVENT_END); - (void)pq_putmessage_noblock('d', t_thrd.walsender_cxt.output_xlog_message, - 1 + sizeof(WalDataMessageHeader) + nbytes); + + if (t_thrd.walsender_cxt.output_xlog_message[0] == 'C') { + (void)pq_putmessage_noblock('d', t_thrd.walsender_cxt.output_xlog_message, + 1 + sizeof(WalDataMessageHeader) + compressedSize); + } else { + (void)pq_putmessage_noblock('d', t_thrd.walsender_cxt.output_xlog_message, + 1 + sizeof(WalDataMessageHeader) + nbytes); + } t_thrd.walsender_cxt.sentPtr = endptr; @@ -4447,6 +5004,56 @@ static void XLogSendPhysical(void) return; } +void XLogCompression(int* compressedSize, XLogRecPtr startPtr, Size nbytes) +{ + /* for xlog shipping performances */ + char *xlogReadBuf = t_thrd.walsender_cxt.xlogReadBuf; + char *compressedBuf = t_thrd.walsender_cxt.compressBuf; + errno_t errorno = EOK; + + if (xlogReadBuf == NULL) { + t_thrd.walsender_cxt.xlogReadBuf = (char *)palloc(1 + sizeof(WalDataMessageHeader) + + (int)WS_MAX_SEND_SIZE); + xlogReadBuf = t_thrd.walsender_cxt.xlogReadBuf; + } + if (compressedBuf == NULL) { + t_thrd.walsender_cxt.compressBuf = (char *)palloc(1 + sizeof(WalDataMessageHeader) + + (int)WS_MAX_SEND_SIZE); + compressedBuf = t_thrd.walsender_cxt.compressBuf; + } + + XLogRead(xlogReadBuf, startPtr, nbytes); + *compressedSize = LZ4_compress_default(xlogReadBuf, compressedBuf, nbytes, LZ4_compressBound(nbytes)); + if (*compressedSize > g_instance.attr.attr_storage.MaxSendSize * 1024) { + ereport(WARNING, (errmsg("[CompressWarning] compressed size big than MaxSendSize! startPtr %X/%X, " + "originsize %ld, compressedSize %d, compressBound %d", + (uint32)(startPtr >> 32), (uint32)startPtr, nbytes, *compressedSize, + LZ4_compressBound(nbytes)))); + t_thrd.walsender_cxt.output_xlog_message[0] = 'w'; + errorno = memcpy_s(t_thrd.walsender_cxt.output_xlog_message + 1 + sizeof(WalDataMessageHeader), + nbytes, xlogReadBuf, nbytes); + securec_check(errorno, "\0", "\0"); + return; + } + if (*compressedSize <= 0) { + ereport(WARNING, (errmsg("[CompressFailed] startPtr %X/%X, originsize %ld, compressedSize %d, compressBound %d", + (uint32)(startPtr >> 32), (uint32)startPtr, nbytes, *compressedSize, + LZ4_compressBound(nbytes)))); + t_thrd.walsender_cxt.output_xlog_message[0] = 'w'; + errorno = memcpy_s(t_thrd.walsender_cxt.output_xlog_message + 1 + sizeof(WalDataMessageHeader), + nbytes, xlogReadBuf, nbytes); + securec_check(errorno, "\0", "\0"); + return; + } + errorno = memcpy_s(t_thrd.walsender_cxt.output_xlog_message + 1 + sizeof(WalDataMessageHeader), + *compressedSize, compressedBuf, *compressedSize); + securec_check(errorno, "\0", "\0"); + ereport(DEBUG4, ((errmodule(MOD_REDO), errcode(ERRCODE_LOG), + errmsg("[XLOG_COMPRESS] xlog compression working! startPtr %X/%X, origSize %ld, compressedSize %d", + (uint32)(startPtr >> 32), (uint32)startPtr, nbytes, *compressedSize)))); + +} + /* * Request walsenders to reload the currently-open WAL file */ @@ -4882,55 +5489,15 @@ static const char *WalSndGetStateString(WalSndState state) return "Unknown"; } -/* - * Build tuple desc and store for the caller result - * return the tuple store, the tupdesc would be return by pointer. - */ -Tuplestorestate *BuildTupleResult(FunctionCallInfo fcinfo, TupleDesc *tupdesc) -{ - ReturnSetInfo *rsinfo = (ReturnSetInfo *)fcinfo->resultinfo; - Tuplestorestate *tupstore = NULL; - - MemoryContext per_query_ctx; - MemoryContext oldcontext; - - /* check to see if caller supports returning a tuplestore */ - if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("set-valued function called in context that cannot accept a set"))); - } - - if (!(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not " - "allowed in this context"))); - - /* Build a tuple descriptor for our result type */ - if (get_call_result_type(fcinfo, NULL, tupdesc) != TYPEFUNC_COMPOSITE) - ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("return type must be a row type"))); - - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); - - tupstore = tuplestore_begin_heap(true, false, u_sess->attr.attr_memory.work_mem); - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setResult = tupstore; - rsinfo->setDesc = *tupdesc; - - (void)MemoryContextSwitchTo(oldcontext); - - return tupstore; -} - - static void set_xlog_location(ServerMode local_role, XLogRecPtr* sndWrite, XLogRecPtr* sndFlush, XLogRecPtr* sndReplay){ if (local_role == PRIMARY_MODE) { - *sndWrite = GetXLogWriteRecPtr(); *sndFlush = GetFlushRecPtr(); + *sndWrite = GetXLogWriteRecPtr(); *sndReplay = *sndFlush; } else { - *sndWrite = GetWalRcvWriteRecPtr(NULL); - *sndFlush = GetStandbyFlushRecPtr(NULL); *sndReplay = GetXLogReplayRecPtr(NULL); + *sndFlush = GetStandbyFlushRecPtr(NULL); + *sndWrite = GetWalRcvWriteRecPtr(NULL); } } @@ -4981,18 +5548,6 @@ Datum get_paxos_replication_info(PG_FUNCTION_ARGS) securec_check(rc, "\0", "\0"); } else { -#ifndef ENABLE_MULTIPLE_NODES - /* paxos_write_location */ - if (local_role == PRIMARY_MODE) - paxosWrite = GetPaxosWriteRecPtr(); -#endif - if (local_role == STANDBY_MODE) - paxosWrite = localWrite; - ret = snprintf_s(location, sizeof(location), sizeof(location) - 1, - "%X/%X", static_cast(paxosWrite >> 32), static_cast(paxosWrite)); - securec_check_ss(ret, "\0", "\0"); - values[j++] = CStringGetTextDatum(location); - #ifndef ENABLE_MULTIPLE_NODES /* paxos_commit_location */ if (local_role == PRIMARY_MODE) @@ -5000,6 +5555,20 @@ Datum get_paxos_replication_info(PG_FUNCTION_ARGS) #endif if (local_role == STANDBY_MODE) paxosCommit = localWrite; + +#ifndef ENABLE_MULTIPLE_NODES + /* paxos_write_location */ + if (local_role == PRIMARY_MODE) + paxosWrite = GetPaxosWriteRecPtr(); +#endif + if (local_role == STANDBY_MODE) + paxosWrite = localWrite; + + ret = snprintf_s(location, sizeof(location), sizeof(location) - 1, + "%X/%X", static_cast(paxosWrite >> 32), static_cast(paxosWrite)); + securec_check_ss(ret, "\0", "\0"); + values[j++] = CStringGetTextDatum(location); + ret = snprintf_s(location, sizeof(location), sizeof(location) - 1, "%X/%X", static_cast(paxosCommit >> 32), static_cast(paxosCommit)); securec_check_ss(ret, "\0", "\0"); @@ -5136,10 +5705,10 @@ Datum gs_paxos_stat_replication(PG_FUNCTION_ARGS) SpinLockAcquire(&hashmdata->mutex); localRole = hashmdata->current_mode; SpinLockRelease(&hashmdata->mutex); - sndWrite = GetXLogWriteRecPtr(); - sndCommit = GetPaxosConsensusRecPtr(); sndFlush = GetFlushRecPtr(); sndReplay = sndFlush; + sndCommit = GetPaxosConsensusRecPtr(); + sndWrite = GetPaxosWriteRecPtr(); /* local role */ values[j++] = CStringGetTextDatum(wal_get_role_string(localRole)); @@ -5276,7 +5845,7 @@ Datum pg_stat_get_wal_senders(PG_FUNCTION_ARGS) for (i = 0; i < g_instance.attr.attr_storage.max_wal_senders; i++) { /* use volatile pointer to prevent code rearrangement */ volatile WalSnd *walsnd = &t_thrd.walsender_cxt.WalSndCtl->walsnds[i]; - char location[MAXFNAMELEN] = {0}; + char location[MAXFNAMELEN * 3] = {0}; XLogRecPtr sentRecPtr, local_write; XLogRecPtr flush, apply; WalSndState state; @@ -5658,6 +6227,7 @@ static void WalSndResponseSwitchover(char *msgbuf) { PrimarySwitchResponseMessage response_message; volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; + int maxWalSenders = g_instance.attr.attr_storage.max_wal_senders; errno_t errorno = EOK; if (walsnd == NULL) @@ -5668,8 +6238,14 @@ static void WalSndResponseSwitchover(char *msgbuf) response_message.switchResponse = SWITCHOVER_PROMOTE_REQUEST; /* clean view data. */ int rc; - rc = memset_s(&(g_instance.rto_cxt), sizeof(knl_g_rto_context), 0, sizeof(knl_g_rto_context)); + rc = memset_s(g_instance.rto_cxt.rto_standby_data, sizeof(RTOStandbyData) * maxWalSenders, + 0, sizeof(RTOStandbyData) * maxWalSenders); securec_check(rc, "", ""); +#ifndef ENABLE_MULTIPLE_NODES + rc = memset_s(&(g_instance.rto_cxt.dcf_rto_standby_data), sizeof(RTOStandbyData) * DCF_MAX_NODE_NUM, + 0, sizeof(RTOStandbyData) * DCF_MAX_NODE_NUM); + securec_check(rc, "", ""); +#endif break; case NODESTATE_PRIMARY_DEMOTING_WAIT_CATCHUP: response_message.switchResponse = SWITCHOVER_DEMOTE_CATCHUP_EXIST; @@ -5770,7 +6346,7 @@ static void SetHaWalSenderChannel() errno_t rc = 0; if (laddr->sa_family == AF_INET6) { - result = inet_net_ntop(AF_INET6, &((struct sockaddr_in *)laddr)->sin_addr, 128, local_ip, IP_LEN); + result = inet_net_ntop(AF_INET6, &((struct sockaddr_in6 *)laddr)->sin6_addr, 128, local_ip, IP_LEN); if (result == NULL) { ereport(WARNING, (errmsg("inet_net_ntop failed"))); } @@ -5782,7 +6358,7 @@ static void SetHaWalSenderChannel() } if (raddr->sa_family == AF_INET6) { - result = inet_net_ntop(AF_INET6, &((struct sockaddr_in *)raddr)->sin_addr, 128, remote_ip, IP_LEN); + result = inet_net_ntop(AF_INET6, &((struct sockaddr_in6 *)raddr)->sin6_addr, 128, remote_ip, IP_LEN); if (result == NULL) { ereport(WARNING, (errmsg("inet_net_ntop failed"))); } @@ -5797,11 +6373,22 @@ static void SetHaWalSenderChannel() rc = strncpy_s((char *)walsnd->wal_sender_channel.localhost, IP_LEN, local_ip, IP_LEN - 1); securec_check(rc, "\0", "\0"); walsnd->wal_sender_channel.localhost[IP_LEN - 1] = '\0'; - walsnd->wal_sender_channel.localport = ntohs(((struct sockaddr_in *)laddr)->sin_port); + + if (laddr->sa_family == AF_INET6) { + walsnd->wal_sender_channel.localport = ntohs(((struct sockaddr_in6 *)laddr)->sin6_port); + } else if (laddr->sa_family == AF_INET) { + walsnd->wal_sender_channel.localport = ntohs(((struct sockaddr_in *)laddr)->sin_port); + } rc = strncpy_s((char *)walsnd->wal_sender_channel.remotehost, IP_LEN, remote_ip, IP_LEN - 1); securec_check(rc, "\0", "\0"); walsnd->wal_sender_channel.remotehost[IP_LEN - 1] = '\0'; - walsnd->wal_sender_channel.remoteport = ntohs(((struct sockaddr_in *)raddr)->sin_port); + + if (raddr->sa_family == AF_INET6) { + walsnd->wal_sender_channel.remoteport = ntohs(((struct sockaddr_in6 *)raddr)->sin6_port); + } else if (raddr->sa_family == AF_INET) { + walsnd->wal_sender_channel.remoteport = ntohs(((struct sockaddr_in *)raddr)->sin_port); + } + SpinLockRelease(&walsnd->mutex); if (IS_PGXC_DATANODE) { @@ -5817,6 +6404,8 @@ static bool UpdateHaWalSenderChannel(int ha_remote_listen_port) volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; bool is_found = false; int i = 0; + char* ipNoZone = NULL; + char ipNoZoneData[IP_LEN] = {0}; for (i = 1; i < DOUBLE_MAX_REPLNODE_NUM; i++) { ReplConnInfo *replconninfo = nullptr; @@ -5828,7 +6417,10 @@ static bool UpdateHaWalSenderChannel(int ha_remote_listen_port) if (replconninfo == NULL) continue; - if (strncmp((char *)replconninfo->remotehost, (char *)walsnd->wal_sender_channel.remotehost, IP_LEN) == 0 && + /* remove any '%zone' part from an IPv6 address string */ + ipNoZone = remove_ipv6_zone(replconninfo->remotehost, ipNoZoneData, IP_LEN); + + if (strncmp((char *)ipNoZone, (char *)walsnd->wal_sender_channel.remotehost, IP_LEN) == 0 && replconninfo->remoteport == ha_remote_listen_port) { SpinLockAcquire(&walsnd->mutex); walsnd->channel_get_replc = i; @@ -6047,7 +6639,7 @@ void GetFastestReplayStandByServiceAddress(char *fastest_remote_address, char *s if (!XLogRecPtrIsInvalid(fastest_replay)) { volatile WalSnd *walsnd = &t_thrd.walsender_cxt.WalSndCtl->walsnds[fastest]; SpinLockAcquire(&walsnd->mutex); - rc = snprintf_s(fastest_remote_address, address_len, (address_len - 1), "%s:%d", + rc = snprintf_s(fastest_remote_address, address_len, (address_len - 1), "%s@%d", walsnd->wal_sender_channel.remotehost, walsnd->wal_sender_channel.remoteport); SpinLockRelease(&walsnd->mutex); @@ -6058,7 +6650,7 @@ void GetFastestReplayStandByServiceAddress(char *fastest_remote_address, char *s if (!XLogRecPtrIsInvalid(second_fastest_replay)) { volatile WalSnd *walsnd = &t_thrd.walsender_cxt.WalSndCtl->walsnds[second_fastest]; SpinLockAcquire(&walsnd->mutex); - rc = snprintf_s(second_fastest_remote_address, address_len, (address_len - 1), "%s:%d", + rc = snprintf_s(second_fastest_remote_address, address_len, (address_len - 1), "%s@%d", walsnd->wal_sender_channel.remotehost, walsnd->wal_sender_channel.remoteport); SpinLockRelease(&walsnd->mutex); @@ -6227,7 +6819,7 @@ static void WalSndHadrSwitchoverRequest() HadrSwitchoverMessage hadrSwithoverMessage; errno_t errorno = EOK; - TimestampTz lastRequestTimestamp = g_instance.streaming_dr_cxt.lastRequestTimestamp; + TimestampTz lastRequestTimestamp = t_thrd.walsender_cxt.MyWalSnd->lastRequestTimestamp; int interval = ComputeTimeStamp(lastRequestTimestamp); /* Send a request in 5 second */ @@ -6235,7 +6827,7 @@ static void WalSndHadrSwitchoverRequest() return; } - g_instance.streaming_dr_cxt.lastRequestTimestamp = GetCurrentTimestamp(); + t_thrd.walsender_cxt.MyWalSnd->lastRequestTimestamp = GetCurrentTimestamp(); /* Construct a new message */ hadrSwithoverMessage.switchoverBarrierLsn = g_instance.streaming_dr_cxt.switchoverBarrierLsn; @@ -6255,6 +6847,7 @@ static void ProcessHadrSwitchoverMessage() { HadrSwitchoverMessage hadrSwithoverMessage; XLogRecPtr switchoverBarrierLsn; + volatile WalSnd *walsnd = t_thrd.walsender_cxt.MyWalSnd; pq_copymsgbytes(t_thrd.walsender_cxt.reply_message, (char*)&hadrSwithoverMessage, sizeof(HadrSwitchoverMessage)); @@ -6263,14 +6856,28 @@ static void ProcessHadrSwitchoverMessage() /* Whether the interaction between the main cluster and the disaster recovery cluster is completed */ if (g_instance.streaming_dr_cxt.switchoverBarrierLsn != InvalidXLogRecPtr && XLByteEQ(g_instance.streaming_dr_cxt.switchoverBarrierLsn, switchoverBarrierLsn)) { - g_instance.streaming_dr_cxt.isInteractionCompleted = true; + SpinLockAcquire(&walsnd->mutex); + walsnd->isInteractionCompleted = true; + SpinLockRelease(&walsnd->mutex); } ereport(LOG, (errmsg("ProcessHadrSwitchoverMessage: target switchover barrier lsn %X/%X, " - "receive switchover barrier lsn %X/%X", + "receive switchover barrier lsn %X/%X, isInteractionCompleted %d", (uint32)(g_instance.streaming_dr_cxt.switchoverBarrierLsn >> 32), (uint32)(g_instance.streaming_dr_cxt.switchoverBarrierLsn), - (uint32)(switchoverBarrierLsn >> 32), (uint32)(switchoverBarrierLsn)))); + (uint32)(switchoverBarrierLsn >> 32), (uint32)(switchoverBarrierLsn), + walsnd->isInteractionCompleted))); +} + +static void ProcessHadrReplyMessage() +{ + HadrReplyMessage hadrReply; + pq_copymsgbytes(t_thrd.walsender_cxt.reply_message, (char*)&hadrReply, sizeof(HadrReplyMessage)); + SpinLockAcquire(&g_instance.streaming_dr_cxt.mutex); + int rc = strncpy_s((char *)g_instance.streaming_dr_cxt.targetBarrierId, MAX_BARRIER_ID_LENGTH, + hadrReply.targetBarrierId, MAX_BARRIER_ID_LENGTH - 1); + securec_check(rc, "\0", "\0"); + SpinLockRelease(&g_instance.streaming_dr_cxt.mutex); } diff --git a/src/gausskernel/storage/roach/parse_util.c b/src/gausskernel/storage/roach/parse_util.c new file mode 100644 index 000000000..d0505c656 --- /dev/null +++ b/src/gausskernel/storage/roach/parse_util.c @@ -0,0 +1,1471 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * parse_util.cpp + * roach parse method definitions. + * + * IDENTIFICATION + * src/gausskernel/storage/roach/parse_util.cpp + * + * ------------------------------------------------------------------------- + */ + +#include +#include +#include "parse_util.h" + +/** +* @brief get configuration from cfg file. +* @param path +* @param options +* @param bParsePostgres +* @return + +* @exception None + +* @date 2015/3/9 +*/ +ERROR_CODE roachReadopt(const char* path, roach_option opts[], size_t optsNum, bool bParsePostgres) +{ + FILE* fp = NULL; + char buf[MAX_BUF_SZ] = {0}; + char key[MAX_BUF_SZ] = {0}; + char value[MAX_BUF_SZ] = {0}; + + /* If no options passed + */ + if (!opts) { + return EC_SUCCESS; + } + + /* Open the cfg file + */ + if ((fp = FOPEN(path, "rb")) == NULL) { + LOGERROR("could not open file \"%s\": %s", path, gs_strerror(errno)); + return EC_FILE_OPEN_FAILED; + } + + /* Read the configuration from file + */ + while (fgets(buf, lengthof(buf), fp)) { + size_t i; + + for (i = strlen(buf); i > 0 && isspace(buf[i - 1]); i--) { + buf[i - 1] = '\0'; + } + + /* Parse the configuration content + */ + if (parse_pair(buf, key, value, MAX_BUF_SZ, MAX_BUF_SZ, MAX_BUF_SZ)) { + for (i = 0; (i < optsNum) && opts[i].type; i++) { + roach_option* opt = &opts[i]; + + /* Check if config matches the config passed + */ + if (key_equals(key, opt->lname)) { + if (opt->allowed == SOURCE_DEFAULT || opt->allowed > SOURCE_FILE) { + LOGERROR("option %s cannot specified in file %s", opt->lname, path); + fclose(fp); + return EC_INVALID_ARGUMENT; + } else if (opt->source <= SOURCE_FILE) { + /* Get the configuration value + */ + if (getArgvValueForOption(opt, value, SOURCE_FILE) != EC_SUCCESS) { + fclose(fp); + return EC_INVALID_ARGUMENT; + } + } + + break; + } + } + } + } + + fclose(fp); + return EC_SUCCESS; +} + +/** + * parse_next_token_nonspl + * @brief get the token from the string (other than spl control chars) + * @param src + * @param dst + * @param i + * @param j + * @return + * */ +void parse_next_token_nonspl(const char* src, char* dst, size_t i, size_t j) +{ + switch (src[i]) { + case '0': + case '1': + case TWO: + case THREE: + case FOUR: + case FIVE: + case SIX: + case SEVEN: { + size_t k; + uint64_t octVal = 0; + + for (k = 0; src[i + k] >= '0' && src[i + k] <= SEVEN && k < SHIFT_THREE; k++) { + octVal = (octVal << SHIFT_THREE) + (src[i + k] - '0'); + } + + i += k - 1; + if (octVal < 128) { + dst[j] = ((char)octVal); + } + } break; + default: + dst[j] = src[i]; + break; + } + + UNUSED(i); +} + +/** + * parse_next_token + * @brief get the token from the string + * @param src + * @param dst + * @param i + * @param j + * @return + * */ +void parse_next_token(const char* src, char* dst, size_t i, size_t j) +{ + if (src[i] == 'b') { + dst[j] = '\b'; + } else if (src[i] == 'f') { + dst[j] = '\f'; + } else if (src[i] == 'n') { + dst[j] = '\n'; + } else if (src[i] == 'r') { + dst[j] = '\r'; + } else if (src[i] == 't') { + dst[j] = '\t'; + } else { + parse_next_token_nonspl(src, dst, i, j); + } + + UNUSED(i); +} + +/** +* @brief parse the key and values +* @param buffer +* @param key +* @param value +* @return + +* @exception None + +* @date 2015/3/9 +*/ +bool parse_pair(const char buffer[], char key[], char value[], size_t lenBuf, size_t lenKey, size_t lenValue) +{ + const char* start = NULL; + const char* end = NULL; + int nRet = 0; + size_t len; + key[0] = value[0] = '\0'; + + /* + * parse key + */ + start = buffer; + len = strcspn(start, "=# \n\r\t\v"); + if (len > lenKey) { + LOGERROR("Key length %lu is more than Maximum length allowed", len); + return false; + } + if (*(start = skip_space(start)) == '\0') { + return false; + } + + end = start + strcspn(start, "=# \n\r\t\v"); + /* skip blank buffer */ + if (end - start <= 0) { + if (*start == '=') { + LOGERROR("syntax error in \"%s\"", buffer); + } + + return false; + } + + /* key found */ + nRet = strncpy_s(key, lenKey, start, (size_t)(end - start)); + securec_check_c(nRet, "\0", "\0"); + key[end - start] = '\0'; + + /* + fgets() fucntion call always takes care of data read and its length, so + end - start can always be less than MAX_BUF_SZ so there won't be any overlapping could occur. + */ + + /* find key and value split char */ + if (*(start = skip_space(end)) == '\0') { + return false; + } + + if (*start != '=') { + LOGERROR("syntax error in \"%s\"", buffer); + return false; + } + + start++; + + /* + * parse value + */ + if (*(start = skip_space(start)) == '\0') { + return false; + } + + len = strcspn(start, "# \n\r\t\v"); + if (len > lenValue) { + LOGERROR("Value length %lu is more than Maximum length allowed", len); + return false; + } + if ((end = get_next_token(start, value, lenValue, buffer)) == NULL) { + return false; + } + + if (*(start = skip_space(end)) == '\0') { + return false; + } + + if (*start != '\0' && *start != '#') { + LOGERROR("syntax error in \"%s\"", buffer); + return false; + } + + return true; +} + +/** + * parseBool + * Try to interpret value as boolean value. Valid values are: true, + * false, yes, no, on, off, 1, 0; as well as unique prefixes thereof. + * If the string parses okay, return true, else false. + * If okay and result is not NULL, return the value in *result. + * @param value + * @param result + * @return + */ +bool parseBool(const char* value, bool* result) +{ + return parseBoolWithLen(value, strlen(value), result); +} + +/** + * parseBoolWithLen + * + * Parse and get the boolean values specified in the conf file. + * Supports T/F, t/f, Y/N, y/n, on/off, ON/OFF, 1/0 + * @param value - pointer to the boolean string from conf file + * @param len - length of the boolean value in buffer + * @param result - boolean result + * @return - return boolean value. + */ +bool parseBoolWithLen(const char* value, size_t len, bool* result) +{ + if (parseBoolCheckTF(value, len, result)) { + return true; + } + + if (parseBoolCheckYN(value, len, result)) { + return true; + } + + if (parseBoolCheckOnOff(value, len, result)) { + return true; + } + + if (parseBoolCheckOneZero(value, len, result)) { + return true; + } + + if (result != NULL) { + *result = false; /* suppress compiler warning */ + } + + return false; +} + +/** + * parseBoolCheckTF + * + * Check "True/False" code, splitted from "parseBoolWithLen" + * @param value - pointer to the boolean string from conf file + * @param len - length of the boolean value in buffer + * @param result - boolean result + * @return - return boolean value. + */ +bool parseBoolCheckTF(const char* value, size_t len, bool* result) +{ + if (strNCaseCmp(value, "true", len) == 0) { + if (result != NULL) { + *result = true; + } + + return true; + } + + if (strNCaseCmp(value, "false", len) == 0) { + if (result != NULL) { + *result = false; + } + + return true; + } + + return false; +} + +/** + * parseBoolCheckYN + * + * Check "Yes/No" code, splitted from "parseBoolWithLen" + * @param value - pointer to the boolean string from conf file + * @param len - length of the boolean value in buffer + * @param result - boolean result + * @return - return boolean value. + */ +bool parseBoolCheckYN(const char* value, size_t len, bool* result) +{ + if (strNCaseCmp(value, "yes", len) == 0) { + if (result != NULL) { + *result = true; + } + + return true; + } + + if (strNCaseCmp(value, "no", len) == 0) { + if (result != NULL) { + *result = false; + } + + return true; + } + + return false; +} + +/** + * parseBoolCheckOnOff + * + * Check "On/Off" code, splitted from "parseBoolWithLen" + * @param value - pointer to the boolean string from conf file + * @param len - length of the boolean value in buffer + * @param result - boolean result + * @return - return boolean value. + */ +bool parseBoolCheckOnOff(const char* value, size_t len, bool* result) +{ + if (strNCaseCmp(value, "on", (len > OPT_VALUE_LEN_TWO ? len : OPT_VALUE_LEN_TWO)) == 0) { + if (result != NULL) { + *result = true; + } + + return true; + } else if (strNCaseCmp(value, "off", (len > OPT_VALUE_LEN_TWO ? len : OPT_VALUE_LEN_TWO)) == 0) { + if (result != NULL) { + *result = false; + } + + return true; + } + + return false; +} + +/** + * parseBoolCheckOneZero + * + * Check "1/0" code, splitted from "parseBoolWithLen" + * @param value - pointer to the boolean string from conf file + * @param len - length of the boolean value in buffer + * @param result - boolean result + * @return - return boolean value. + */ +bool parseBoolCheckOneZero(const char* value, size_t len, bool* result) +{ + if (*value == '1') { + if (len == 1) { + if (result != NULL) { + *result = true; + } + return true; + } + } else if (*value == '0') { + if (len == 1) { + if (result != NULL) { + *result = false; + } + return true; + } + } + + return false; +} + +/** + * parseLoggingLevel + * Parse for the logging level + * @param value + * @return + */ +ROACH_LOG_LEVEL parseLoggingLevel(const char* value) +{ + const char* v = value; + + while (isspace(*v)) { + v++; + } + + if (strNCaseCmp(LOGGING_LEVEL_FATAL, v, sizeof(LOGGING_LEVEL_FATAL)) == 0) { + return ROACH_LOG_FATAL; + } else if (strNCaseCmp(LOGGING_LEVEL_ERROR, v, sizeof(LOGGING_LEVEL_ERROR)) == 0) { + return ROACH_LOG_ERROR; + } else if (strNCaseCmp(LOGGING_LEVEL_WARNING, v, sizeof(LOGGING_LEVEL_WARNING)) == 0) { + return ROACH_LOG_WARNING; + } else if (strNCaseCmp(LOGGING_LEVEL_INFO, v, sizeof(LOGGING_LEVEL_INFO)) == 0) { + return ROACH_LOG_INFO; + } else if (strNCaseCmp(LOGGING_LEVEL_DEBUG, v, sizeof(LOGGING_LEVEL_DEBUG)) == 0) { + return ROACH_LOG_DEBUG; + } else if (strNCaseCmp(LOGGING_LEVEL_DEBUG2, v, sizeof(LOGGING_LEVEL_DEBUG2)) == 0) { + return ROACH_LOG_DEBUG2; + } + + LOGERROR("Invalid logging level specified %s\n", value); + + return ROACH_LOG_NONE; +} + +/** + * @brief parse the backup type + * @param value + * + * @return + */ +BACKUP_MODE_TYPE parseBackupType(const char* value) +{ + const char* v = value; + + while (isspace(*v)) { + v++; + } + if (strNCaseCmp(BACKUP_TYPE_FULL, v, sizeof(BACKUP_TYPE_FULL)) == 0) { + return BACKUP_MODE_FULL; + } else if (strNCaseCmp(BACKUP_TYPE_LOGICAL_MTABLE, v, sizeof(BACKUP_TYPE_LOGICAL_MTABLE)) == 0) { + return BACKUP_MODE_LOGICAL_MTABLE; + } else if (strNCaseCmp(BACKUP_TYPE_TABLE, v, sizeof(BACKUP_TYPE_TABLE)) == 0) { + return BACKUP_MODE_TABLE; + } else if (strNCaseCmp(BACKUP_TYPE_INCREMENTAL, v, sizeof(BACKUP_TYPE_INCREMENTAL)) == 0) { + return BACKUP_MODE_INCREMENTAL; + } else { + LOGERROR("Invalid backup type option \"%s\"", value); + return BACKUP_MODE_INVALID; + } +} + +/** + * parseMediaType + * Parse for the media type + * @param value + * @return + */ +ROACH_MEDIA_TYPE parseMediaType(const char* media) +{ + const char* v = media; + + while (isspace(*v)) { + v++; + } + + if (strNCaseCmp(MEDIA_DISK, v, sizeof(MEDIA_DISK)) == 0) { + return ROACH_MEDIA_TYPE_DISK; + } else if (strNCaseCmp(MEDIA_NBU, v, sizeof(MEDIA_NBU)) == 0) { + return ROACH_MEDIA_TYPE_NBU; + } else if (strNCaseCmp(MEDIA_OBS, v, sizeof(MEDIA_OBS)) == 0) { + return ROACH_MEDIA_TYPE_OBS; + } else if (strNCaseCmp(MEDIA_EISOO, v, sizeof(MEDIA_EISOO)) == 0) { + return ROACH_MEDIA_TYPE_EISOO; + } else if (strNCaseCmp(MEDIA_NAS, v, sizeof(MEDIA_NAS)) == 0) { + return ROACH_MEDIA_TYPE_NAS; + } else { + return ROACH_MEDIA_INVALID; + } +} + +/** + * parseValidationType + * Parse for the media type + * @param value + * @return + */ +ROACH_VALIDATION_TYPE parseValidationType(const char* media) +{ + const char* v = media; + + while (isspace(*v)) { + v++; + } + + if (strNCaseCmp(VALIDATION_TYPE_FULL, v, sizeof(VALIDATION_TYPE_FULL)) == 0) { + clioptions.enableCrc = true; + return ROACH_VALIDATION_TYPE_FULL; + } else if (strNCaseCmp(VALIDATION_TYPE_PARTIAL, v, sizeof(VALIDATION_TYPE_PARTIAL)) == 0) { + clioptions.enableCrc = false; + return ROACH_VALIDATION_TYPE_PARTIAL; + } else { + return ROACH_VALIDATION_INVALID; + } +} + +/* + * parseInt32 + * Parse string as 32bit signed int. + * valid range: -2147483648 ~ 2147483647 + * @param value + * @param result + * @param uiLeastValue + */ +bool parseInt32(const char* value, int* result, int uiLeastValue, bool bCheckChar) +{ + int64_t val; + char* endptr = NULL; + + if (roachStrCmp(value, INFINITE_STR) == 0) { + *result = INT_MAX; + return true; + } + + errno = 0; + val = strtol(value, &endptr, 0); + if (val < uiLeastValue) { + errno = ERANGE; + } + + if (endptr == value || (bCheckChar && *endptr)) { + return false; + } + + if (errno == ERANGE || val != (int64_t)((int32_t)val)) { + return false; + } + + *result = (int32_t)val; + + return true; +} +/* + * parseUInt32 + * Parse string as 32bit unsigned signed int. + * valid range: 0 ~ 4294967295 + * @param value + * @param result + * @param uiLeastValue + */ +bool parseUInt32(const char* value, uint32_t* result, uint32_t uiLeastValue, bool bCheckChar) +{ + uint64_t val; + char* endptr = NULL; + + if (roachStrCmp(value, INFINITE_STR) == 0) { + *result = UINT_MAX; + return true; + } + + errno = 0; + val = strtoul(value, &endptr, 0); + if (val < uiLeastValue) { + errno = ERANGE; + return false; + } + if (endptr == value || (bCheckChar && *endptr)) { + return false; + } + + *result = (unsigned int)val; + + return true; +} + +/* + * parseUInt64 + * Parse string as 32bit unsigned signed int. + * valid range: 0 ~ 4294967295 + * @param value + * @param result + * @param uiLeastValue + */ +bool parseUInt64(const char* value, uint64_t* result, uint64_t uiLeastValue, bool bCheckChar) +{ + uint64_t val; + char* endptr = NULL; + + if (roachStrCmp(value, INFINITE_STR) == 0) { + *result = UINT_MAX; + return true; + } + + errno = 0; + val = strtoul(value, &endptr, 0); + if (val < uiLeastValue) { + errno = ERANGE; + return false; + } + if (endptr == value || (bCheckChar && *endptr)) { + return false; + } + + *result = val; + + return true; +} + +/** +* printMsgForInvalidValueSpecified +* @brief Print error message +* @param opt +* @param optargs +* @param message +* @return + +* @exception None +*/ +void printMsgForInvalidValueSpecified(roach_option* opt, const char* optargs, const char* message) +{ + if (isprint(opt->sname)) { + LOGERROR("option -%c, --%s should be %s: '%s'", opt->sname, opt->lname, message, optargs); + } else { + LOGERROR("option --%s should be %s: '%s'", opt->lname, message, optargs); + } +} + +ERROR_CODE FinalizeOptionValue(INSTANCE_OPERATIONS operation) +{ + ERROR_CODE ec = EC_SUCCESS; + + if (clioptions.bufblksize == 0) { + if (clioptions.mediaType == ROACH_MEDIA_TYPE_OBS) { + clioptions.bufblksize = DEFAULT_BUFFER_BLK_SZ_OBS; + } else { + clioptions.bufblksize = DEFAULT_BUFFER_BLK_SZ; + } + } + + if (clioptions.bufferSize == 0) { + if (clioptions.mediaType == ROACH_MEDIA_TYPE_OBS) { + clioptions.bufferSize = DEFAULT_BUFFER_SZ_OBS; + } else { + clioptions.bufferSize = DEFAULT_BUFFER_SZ; + } + } + + /* if not provided, use backup-key specific replication slot name for backup */ + if (clioptions.slotName == NULL && clioptions.backupKey != NULL) { + int length; + char * tmpSlotName = NULL; + int nRet; + + if (operation == BACKUP && clioptions.priorBackupKey != NULL) { + length = strlen(ROACH_INC_BAK_SLOT) + strlen("_") + strlen(clioptions.backupKey) + STRTERMINATIONLEN; + tmpSlotName = (char *)MALLOC0(length); + if (tmpSlotName == NULL) { + LOGERROR("Failed to allocate memory for slot name"); + return EC_MEMALLOC_FAILURE; + } + + nRet = snprintf_s(tmpSlotName, length, length - 1, + "%s_%s", ROACH_INC_BAK_SLOT, clioptions.backupKey); + securec_check_ss_c(nRet, "", ""); + } else { + length = strlen(ROACH_FULL_BAK_SLOT) + strlen("_") + strlen(clioptions.backupKey) + STRTERMINATIONLEN; + tmpSlotName = (char *)MALLOC0(length); + if (tmpSlotName == NULL) { + LOGERROR("Failed to allocate memory for slot name"); + return EC_MEMALLOC_FAILURE; + } + + nRet = snprintf_s(tmpSlotName, length, length - 1, + "%s_%s", ROACH_FULL_BAK_SLOT, clioptions.backupKey); + securec_check_ss_c(nRet, "", ""); + } + + clioptions.slotName = tmpSlotName; + } + + return ec; +} + +/** +* getArgvValueForBufferOption +* @brief Parse and assign the buffer value for the option +* @param opt +* @param optargs +* @return + +* @exception None +*/ +ERROR_CODE getArgvValueForBufferOption(roach_option* opt, const char* optargs) +{ + const char* message = NULL; + bool ret = true; + + do { + ret = parseInt32(optargs, (int*)(opt->var), 0, true); + + // buffer size should be 256 to 16GB + if (strCaseCmp(opt->lname, "buffer-size") == 0) { + if (!ret || (*(int*)opt->var < MIN_BUFFER_SZ) || (*(int*)opt->var > MAX_BUFFER_SZ)) { + message = "an integer (256 to 16384)"; + break; + } + } + // buffer size should be 8192KB * clioptions.prefetchBlock(1-8192) to 256MB + else if (strCaseCmp(opt->lname, "buffer-block-size") == 0) { + if (!ret || (*(int*)opt->var < (MIN_BUFFER_BLK_SZ * clioptions.prefetchBlock)) || + (*(int*)opt->var > MAX_BUFFER_BLK_SZ)) { + message = "an integer (8192 * prefetch-block(1-8192) to 268435456‬)"; + break; + } + } else /* if option doesnt matach the above */ + { + return EC_OPTION_NOT_MATCH; + } + + if (true == ret) { + return EC_SUCCESS; + } + } while (false); + + printMsgForInvalidValueSpecified(opt, optargs, message); + return EC_INVALID_OPTION; +} + +/** +* getArgvValueForCpuOption +* @brief Parse and assign the cpu-relinquish-time/size value for the option +* @param opt +* @param optargs +* @return + +* @exception None +*/ +ERROR_CODE getArgvValueForCpuOption(roach_option* opt, const char* optargs) +{ + const char* message = NULL; + bool ret = true; + + do { + ret = parseInt32(optargs, (int*)(opt->var), 0, true); + + // "cpu relinquish time" should be in the range of to + if (strCaseCmp(opt->lname, "cpu-relinquish-time") == 0) { + if (!ret || (*(int*)opt->var < MIN_CPU_RELINQUISH_TIME) || (*(int*)opt->var > MAX_CPU_RELINQUISH_TIME)) { + message = "an integer (0 to 3600)"; + break; + } + } + + // "cpu relinquish size" should be in the range of to + else if (strCaseCmp(opt->lname, "cpu-relinquish-size") == 0) { + if (!ret || (*(int*)opt->var < MIN_CPU_RELINQUISH_SIZE) || (*(int*)opt->var > MAX_CPU_RELINQUISH_SIZE)) { + message = "an integer (1 to 10000)"; + break; + } + } else /* if option doesnt matach the above */ + { + return EC_OPTION_NOT_MATCH; + } + + if (true == ret) { + return EC_SUCCESS; + } + } while (false); + + printMsgForInvalidValueSpecified(opt, optargs, message); + return EC_INVALID_OPTION; +} + +/** +* getArgvValueForRetryOption +* @brief Parse and assign the retry value for the option +* @param opt +* @param optargs +* @return + +* @exception None +*/ +ERROR_CODE getArgvValueForRetryOption(roach_option* opt, const char* optargs) +{ + const char* message = NULL; + bool ret = true; + + do { + ret = parseInt32(optargs, (int*)(opt->var), 0, true); + + // failure retry count should be in the range of 0 to 256 + if (strCaseCmp(opt->lname, "failure-retry-count") == 0) { + if (!ret || (*(int*)opt->var < MIN_FAIL_RETRY_COUNT) || (*(int*)opt->var > MAX_FAIL_RETRY_COUNT)) { + message = "an integer (0 to 256)"; + break; + } + } else if (strCaseCmp(opt->lname, "resource-retry-count") == 0) { + if (!ret || (*(int*)opt->var < MIN_RESOURCE_RETRY_COUNT) || (*(int*)opt->var > MAX_RESOURCE_RETRY_COUNT)) { + message = "an integer (0 to 256)"; + break; + } + } + + // transaction commit time should be in the range of 60 to 60 * 60 * 10 + else if (strCaseCmp(opt->lname, "retry-wait-time") == 0) { + if (!ret || (*(int*)opt->var < MIN_RETRY_WAIT_TIME) || (*(int*)opt->var > MAX_RETRY_WAIT_TIME)) { + message = "an integer (1 to 3600)"; + break; + } + } else /* if option doesnt matach the above */ + { + return EC_OPTION_NOT_MATCH; + } + + if (true == ret) { + return EC_SUCCESS; + } + } while (false); + printMsgForInvalidValueSpecified(opt, optargs, message); + return EC_INVALID_OPTION; +} + +/** + * getCLIOptionsMasterPortAgentPortCmp + * @brief Code chunk derived from splitting up "getArgvValueForCmnOption" + * to check "compression level", and "parallel process" , and "cbmRecycle level"options + * @param opt : Option struct pointer + * @param message : String to hold hint message + * @param ret : boolean retval from "parse" function + * @return success/failure + */ +ERROR_CODE getCLIOptionsCompLvlParallelProcCmp(roach_option* opt, const char** message, bool ret) +{ + ERROR_CODE ec = EC_SUCCESS; + + // compression level should be in the range of 0 to 9 + if (strCaseCmp(opt->lname, "compression-level") == 0) { + if (!ret || (*(int*)opt->var < 0) || (*(int*)opt->var > MAX_COMPRESSION_LEVEL)) { + *message = "an integer (0 to 9)"; + return EC_INVALID_OPTION; + } + } + + // parallel-process should be in the range of 1 to 128 + else if (strCaseCmp(opt->lname, "parallel-process") == 0) { + if (!ret || (*(int*)opt->var < 1) || (*(int*)opt->var > MAX_PROCESS)) { + *message = "an integer (1 to 32)"; + return EC_INVALID_OPTION; + } + } else { + return EC_OPTION_NOT_MATCH; + } + + return ec; +} + +/** + * getCLIOptionsMasterPortAgentPortCmp + * @brief Code chunk derived from splitting up "getArgvValueForCmnOption" + * to check "master-port", and "agent-port" options + * @param opt : Option struct pointer + * @param message : String to hold hint message + * @param ret : boolean retval from "parse" function + * @return success/failure + */ +ERROR_CODE getCLIOptionsMasterPortAgentPortCmp(roach_option* opt, const char** message, bool ret) +{ + ERROR_CODE ec = EC_SUCCESS; + + if (strCaseCmp(opt->lname, "master-port") == 0 || strCaseCmp(opt->lname, "agent-port") == 0) { + if (!ret || (*(int*)opt->var < MIN_VALID_PORT) || (*(int*)opt->var > MAX_VALID_PORT)) { + *message = "an integer (1024 to 65535)"; + return EC_INVALID_OPTION; + } + } else { + ec = getCLIOptionsCompLvlParallelProcCmp(opt, message, ret); + if (ec != EC_SUCCESS) { + return ec; + } + } + + return ec; +} + +/** + * getArgvValueForCmnOptionMstPrtCmprParProc + * @brief Code chunk derived from splitting up "getArgvValueForCmnOption" + * to check "master-port", "compression-level" ,"parallel-process" and cbm-recycle-level options + * @param opt : Option struct pointer + * @param message : String to hold hint message + * @param ret : boolean retval from "parse" function + * @return success/failure + */ +ERROR_CODE getArgvValueForCmnOptionMstPrtCmprParProc(roach_option* opt, const char** message, bool ret) +{ + ERROR_CODE ec = EC_SUCCESS; + UNUSED(message); + + ec = getCLIOptionsMasterPortAgentPortCmp(opt, message, ret); + if (ec != EC_SUCCESS) { + return ec; + } + + return EC_SUCCESS; +} + +/** + * getCLIOptionsRestoreBufferThresholdCmp + * @brief Code chunk derived from splitting up "getArgvValueForCmnOption" + * to check "Restore Buffer Threshold" options + * @param opt : Option struct pointer + * @param message : String to hold hint message + * @param ret : boolean retval from "parse" function + * @return success/failure + */ +ERROR_CODE getCLIOptionsRestoreBufferThresholdCmp(roach_option* opt, const char** message, bool ret) +{ + ERROR_CODE ec = EC_SUCCESS; + + if (strCaseCmp(opt->lname, "restore-buffer-threshold") == 0) { + if (!ret || (*(int*)opt->var < MIN_AVAIL_BUFF_PERCENT) || (*(int*)opt->var > MAX_AVAIL_BUFF_PERCENT)) { + *message = "an integer (1 to 100)"; + return EC_INVALID_OPTION; + } + } else { /* if option doesnt matach the above */ + return EC_OPTION_NOT_MATCH; + } + + return ec; +} + +/** + * getCLIOptionsFileSplitSizeCmp + * @brief Code chunk derived from splitting up "getArgvValueForCmnOption" + * to check "File Split size" options + * @param opt : Option struct pointer + * @param message : String to hold hint message + * @param ret : boolean retval from "parse" function + * @return success/failure + */ +ERROR_CODE getCLIOptionsFileSplitSizeCmp(roach_option* opt, const char** message, bool ret) +{ + ERROR_CODE ec = EC_SUCCESS; + + if (strCaseCmp(opt->lname, "filesplit-size") == 0) { + if (!ret || (*(int*)opt->var < MIN_SPLIT_FILE_SIZE) || (*(int*)opt->var > MAX_SPLIT_FILE_SIZE) || + ((*(int*)opt->var) % SPLIT_FILE_MULTIPLE_OF_SIZE != 0)) { + *message = "an integer (0 to 1024, multiple of 4)"; + return EC_INVALID_OPTION; + } + } else { + ec = getCLIOptionsRestoreBufferThresholdCmp(opt, message, ret); + if (ec != EC_SUCCESS) { + return ec; + } + } + + return ec; +} + +/** + * getCLIOptionsAfterThresholdCmp + * @brief Code chunk derived from splitting up "getArgvValueForCmnOption" + * to check "After Threshold" options + * @param opt : Option struct pointer + * @param message : String to hold hint message + * @param ret : boolean retval from "parse" function + * @return success/failure + */ +ERROR_CODE getCLIOptionsAfterThresholdCmp(roach_option* opt, const char** message, bool ret) +{ + ERROR_CODE ec = EC_SUCCESS; + + // "getdata-waittime-afterthreshold" should be in the range of to + if (strCaseCmp(opt->lname, "getdata-waittime-afterthreshold") == 0) { + if (!ret || (*(int*)opt->var < MIN_GETDATA_WAITTIME_AFTERTHRESHOLD) || + (*(int*)opt->var > MAX_GETDATA_WAITTIME_AFTERTHRESHOLD)) { + *message = "an integer (0 to 1800000000)"; + return EC_INVALID_OPTION; + } + } else { + ec = getCLIOptionsFileSplitSizeCmp(opt, message, ret); + if (ec != EC_SUCCESS) { + return ec; + } + } + + return ec; +} + +/** + * getArgvValueForCmnOptionThrshldFileSplt + * @brief Code chunk derived from splitting up "getArgvValueForCmnOption" + * to check "getdata-waittime-afterthreshold", "filesplit-size" and + * "restore-buffer-threshold" options + * @param opt : Option struct pointer + * @param message : String to hold hint message + * @param ret : boolean retval from "parse" function + * @return success/failure + */ +ERROR_CODE getArgvValueForCmnOptionThrshldFileSplt(roach_option* opt, const char** message, bool ret) +{ + ERROR_CODE ec = EC_SUCCESS; + UNUSED(message); + + ec = getCLIOptionsAfterThresholdCmp(opt, message, ret); + if (ec != EC_SUCCESS) { + return ec; + } + + return EC_SUCCESS; +} + +/** +* getArgvValueForCmnOption +* @brief Parse and assign the value for option +* @param opt +* @param optargs +* @return + +* @exception None +*/ +ERROR_CODE getArgvValueForCmnOption(roach_option* opt, const char* optargs) +{ + const char* message = NULL; + bool ret = true; + ERROR_CODE ulRet = EC_BUTT; + + ret = parseInt32(optargs, (int*)(opt->var), 0, true); + do { + ulRet = getArgvValueForCmnOptionMstPrtCmprParProc(opt, &message, ret); + if (ulRet == EC_INVALID_OPTION) { + break; + } else if (ulRet != EC_SUCCESS) { + ulRet = getArgvValueForCmnOptionThrshldFileSplt(opt, &message, ret); + if (ulRet == EC_INVALID_OPTION) { + break; + } else if (ulRet != EC_SUCCESS) { + return EC_OPTION_NOT_MATCH; + } + } + if (true == ret) { + return EC_SUCCESS; + } + } while (false); + printMsgForInvalidValueSpecified(opt, optargs, message); + + return EC_INVALID_OPTION; +} + +/** +* getArgvValueForLogOption +* @brief Parse and assign the log value for the option +* @param opt +* @param optargs +* @return + +* @exception None +*/ +ERROR_CODE getArgvValueForLogOption(roach_option* opt, const char* optargs) +{ + const char* message = NULL; + bool ret = true; + + do { + ret = parseInt32(optargs, (int*)(opt->var), 0, true); + + // log filesize should be in the range of 5 to 20 + if (strCaseCmp(opt->lname, "log-filesize") == 0) { + if (!ret || (*(int*)opt->var < MIN_LOGFILE_SZ) || (*(int*)opt->var > MAX_LOGFILE_SZ)) { + message = "an integer (5 to 20)"; + break; + } + } + + // log filecount should be in the range of 5 to 1024 + else if (strCaseCmp(opt->lname, "log-filecount") == 0) { + if (!ret || (*(int*)opt->var < MIN_LOGFILE_CNT) || (*(int*)opt->var > MAX_LOGFILE_CNT)) { + message = "an integer (5 to 1024)"; + break; + } + } else /* if option doesnt matach the above */ + { + return EC_OPTION_NOT_MATCH; + } + + if (true == ret) { + return EC_SUCCESS; + } + } while (false); + + printMsgForInvalidValueSpecified(opt, optargs, message); + return EC_INVALID_OPTION; +} + +/** +* getArgvValueForIntOption +* @brief Parse and assign the integer value for the option +* @param opt +* @param optargs +* @return + +* @exception None +*/ +ERROR_CODE getArgvValueForIntOption(roach_option* opt, const char* optargs) +{ + const char* message = NULL; + bool ret = true; + ERROR_CODE ec = EC_SUCCESS; + + ret = parseInt32(optargs, (int*)(opt->var), 0, true); + if (false == ret) { + message = "an integer (1 to 2147483647)"; + printMsgForInvalidValueSpecified(opt, optargs, message); + return EC_INVALID_OPTION; + } + + /* get value for common interger options */ + ec = getArgvValueForCmnOption(opt, optargs); + if (ec != EC_OPTION_NOT_MATCH) { + return ec; + } + + /* get value for all buferr options */ + ec = getArgvValueForBufferOption(opt, optargs); + if (ec != EC_OPTION_NOT_MATCH) { + return ec; + } + + /* get value for cpu relinquish options */ + ec = getArgvValueForCpuOption(opt, optargs); + if (ec != EC_OPTION_NOT_MATCH) { + return ec; + } + + /* get value for retry options */ + ec = getArgvValueForRetryOption(opt, optargs); + if (ec != EC_OPTION_NOT_MATCH) { + return ec; + } + + /* get value for log options */ + ec = getArgvValueForLogOption(opt, optargs); + if (ec != EC_OPTION_NOT_MATCH) { + return ec; + } + + return EC_SUCCESS; +} + +/** +* getArgvValueForStringOption +* @brief Assign the string value for the option +* @param opt +* @param optargs +* @return + +* @exception None +*/ +ERROR_CODE getArgvValueForStringOption(roach_option* opt, const char* optargs) +{ + if (strlen(optargs) == 0) { + LOGERROR("Invalid value specified for %s", opt->lname); + return EC_INVALID_ARGUMENT; + } + + // Check logging level + if (strNCaseCmp(opt->lname, "logging-level", strlen("logging-level")) == 0) { + clioptions.loggingLevel = parseLoggingLevel(optargs); + if (clioptions.loggingLevel == ROACH_LOG_NONE) { + LOGERROR("Invalid logging level specified"); + + return EC_INVALID_ARGUMENT; + } + + return EC_SUCCESS; + } + // Check Backup Type + else if (strNCaseCmp(opt->lname, "backup-type", strlen("backup-type")) == 0) { + clioptions.eBackupType = parseBackupType(optargs); + if (clioptions.eBackupType == BACKUP_MODE_INVALID) { + LOGERROR("Invalid backup type specified"); + return EC_INVALID_ARGUMENT; + } + return EC_SUCCESS; + } + /* Check Media Type */ + else if (strNCaseCmp(opt->lname, "media-type", strlen("media-type")) == 0) { + clioptions.mediaType = parseMediaType(optargs); + if (clioptions.mediaType == ROACH_MEDIA_INVALID) { + LOGERROR("Invalid media type specified"); + + return EC_INVALID_ARGUMENT; + } + + return EC_SUCCESS; + } + /* Check Validation Type */ + else if (strNCaseCmp(opt->lname, "validation-type", strlen("validation-type")) == 0) { + clioptions.validationType = parseValidationType(optargs); + if (clioptions.validationType == ROACH_VALIDATION_INVALID) { + LOGERROR("Invalid Validation Type Specified\n"); + + return EC_INVALID_ARGUMENT; + } + + return EC_SUCCESS; + } + /* Check Tablename only in Master machine */ + if (!clioptions.bMaster && strCaseCmp(opt->lname, "tablename") == 0) { + return EC_SUCCESS; + } + if (opt->source != SOURCE_DEFAULT) { + FREE(*(char**)opt->var); + } + + *(char**)opt->var = STRDUP(optargs); + if (*(char**)opt->var == NULL) { + LOGERROR("String duplication failed when get string option value"); + return EC_MEMALLOC_FAILURE; + } + + return EC_SUCCESS; +} + +/* compare two strings ignore cases and ignore -_ */ +bool key_equals(const char* lhs, const char* rhs) +{ + // Check the string + for (; *lhs && *rhs; lhs++, rhs++) { + if (strchr("-_ ", *lhs)) { + if (!strchr("-_ ", *rhs)) { + return false; + } + } else if (tolower(*lhs) != tolower(*rhs)) { // Check case insensitive + return false; + } + } + + return *lhs == '\0' && *rhs == '\0'; // all characters matching +} + +/** +* @brief assign the value to the passed option +* @param opt +* @param optarg +* @param src +* @return + +* @exception None + +* @date 2015/3/9 +*/ +ERROR_CODE getArgvValueForOption(roach_option* opt, const char* optargs, pgut_optsrc src) +{ + const char* message = NULL; + ERROR_CODE ulRet = EC_SUCCESS; + // if opt not specified + if (opt == NULL) { + LOGERROR("Invalid option provided"); + return EC_INVALID_OPTION; + } + + if (opt->source > src) { + /* high prior value has been set already. */ + return EC_SUCCESS; + } else { + /* can be overwritten if non-command line source */ + opt->source = src; + + switch (opt->type) { + /* get string arguments + */ + case 's': + return getArgvValueForStringOption(opt, optargs); + + /* get boolean arguments + */ + case 'b': + ulRet = getBooleanArguments(opt, optargs); + if (ulRet != EC_SUCCESS) { + message = "a boolean"; + break; + } + + return EC_SUCCESS; + + /* get integer arguments + */ + case 'i': + return getArgvValueForIntOption(opt, optargs); + + /* + * For no password option + */ + + case 'Y': + ulRet = getArgvValueForNoPassword(opt, optargs); + if (ulRet != EC_SUCCESS) { + message = "a boolean"; + break; + } + + return EC_SUCCESS; + + /* + * For password option + */ + case 'y': + return getBooleanArguments(opt, optargs); + + /* Invalid option type + */ + default: + LOGERROR("invalid option type: %c", opt->type); + return EC_INVALID_OPTION; /* keep compiler quiet */ + } + } + + /* Print the error message + */ + printMsgForInvalidValueSpecified(opt, optargs, message); + return EC_INVALID_OPTION; +} + +ERROR_CODE getBooleanArguments(roach_option* opt, const char* optargs) +{ + if (optargs != NULL) { + if (parseBool(optargs, (bool*)opt->var)) { + return EC_SUCCESS; + } + } else { + *((bool*)opt->var) = true; + return EC_SUCCESS; + } + + return EC_INVALID_OPTION; +} + +ERROR_CODE getArgvValueForNoPassword(roach_option* opt, const char* optargs) +{ + if (optargs == NULL) { + *(YesNo*)opt->var = (opt->type == 'y' ? YES : NO); + return EC_SUCCESS; + } else { + bool value = false; + if (parseBool(optargs, &value)) { + *(YesNo*)opt->var = (value ? YES : NO); + return EC_SUCCESS; + } + } + + return EC_INVALID_OPTION; +} + +/** +* skip_space +* @brief skip the space from the input string +* @param str +* @param line +* @return + +* @exception None + +* @date 2015/3/9 +*/ +const char* skip_space(const char* str) +{ + while (isspace(*str)) { + str++; + } + + return str; +} + +/** +* get_next_token +* @brief get the token from the string +* @param src +* @param dst +* @param line +* @return + +* @exception None + +* @date 2015/3/9 +*/ +const char* get_next_token(const char* src, char* dst, size_t dstLen, const char* line) +{ + const char* s = NULL; + size_t i; + size_t j; + int nRet = 0; + + if (*(s = skip_space(src)) == '\0') { + return NULL; + } + + /* + * parse quoted string + */ + if (*s == '\'') { + s++; + for (i = 0, j = 0; s[i] != '\0'; i++) { + /* Return NULL if dst length is insufficient. Reserve 1 byte for '\0' */ + if (j + 1 >= dstLen) { + return NULL; + } + if (s[i] == '\\') { + i++; + parse_next_token(s, dst, i, j); + } else if (s[i] == '\'') { + i++; + + /* doubled quote becomes just one quote */ + if (s[i] == '\'') { + dst[j] = s[i]; + } else { + break; + } + } else { + dst[j] = s[i]; + } + + j++; + } + } else { + i = j = strcspn(s, "# \n\r\t\v"); + nRet = memcpy_s(dst, dstLen - 1, s, j); + securec_check_c(nRet, "\0", "\0"); + } + + dst[j] = '\0'; + return s + i; +} diff --git a/src/gausskernel/storage/smgr/Makefile b/src/gausskernel/storage/smgr/Makefile index 46b25e0cf..caeaba334 100644 --- a/src/gausskernel/storage/smgr/Makefile +++ b/src/gausskernel/storage/smgr/Makefile @@ -9,7 +9,7 @@ ifneq "$(MAKECMDGOALS)" "clean" endif endif endif -OBJS = md.o smgr.o smgrtype.o knl_uundofile.o segstore.o page_compression.o mmap_shared.o +OBJS = md.o smgr.o smgrtype.o knl_uundofile.o segstore.o SUBDIRS = segment diff --git a/src/gausskernel/storage/smgr/knl_uundofile.cpp b/src/gausskernel/storage/smgr/knl_uundofile.cpp index 6196a8c7f..5cc6123e2 100644 --- a/src/gausskernel/storage/smgr/knl_uundofile.cpp +++ b/src/gausskernel/storage/smgr/knl_uundofile.cpp @@ -103,7 +103,13 @@ static inline uint32 UNDO_FILE_BLOCK(uint32 dbId) /* allocate UndoFileState memory. */ static UndoFileState *AllocUndoFileState(void) { - return (UndoFileState *)MemoryContextAlloc(u_sess->storage_cxt.UndoFileCxt, sizeof(UndoFileState)); + MemoryContext current; + if (EnableLocalSysCache()) { + current = t_thrd.lsc_cxt.lsc->lsc_mydb_memcxt; + } else { + current = u_sess->storage_cxt.UndoFileCxt; + } + return (UndoFileState *)MemoryContextAlloc(current, sizeof(UndoFileState)); } static inline void SetUndoFileState(UndoFileState *state, int segno, File file) @@ -114,6 +120,9 @@ static inline void SetUndoFileState(UndoFileState *state, int segno, File file) void InitUndoFile(void) { + if (EnableLocalSysCache()) { + return; + } Assert(u_sess->storage_cxt.UndoFileCxt == NULL); u_sess->storage_cxt.UndoFileCxt = AllocSetContextCreate(u_sess->top_mem_cxt, "UndoFileSmgr", ALLOCSET_DEFAULT_SIZES); } @@ -288,7 +297,7 @@ void ExtendUndoFile(SMgrRelation reln, ForkNumber forknum, BlockNumber blockno, } } - if (fstat(u_sess->storage_cxt.VfdCache[fd].fd, &statBuffer) < 0) { + if (fstat(GetVfdCache()[fd].fd, &statBuffer) < 0) { CloseUndoFile(reln, forknum, InvalidBlockNumber); ereport(ERROR, (errmsg(UNDOFORMAT("could not stat file \"%s\": %m."), path))); } @@ -555,13 +564,14 @@ void UnlinkUndoFile(const RelFileNodeBackend& rnode, ForkNumber forkNum, bool is if (!RelFileNodeBackendIsTemp(rnode)) { RegisterForgetUndoRequests(rnode, segno); } - + pgstat_report_waitevent(WAIT_EVENT_UNDO_FILE_UNLINK); if (unlink(path) < 0 && errno != ENOENT) { /* try again */ if ((unlink(path) < 0) && (errno != ENOENT) && !isRedo) { ereport(WARNING, (errmsg(UNDOFORMAT("could not remove file \"%s\": %m."), path))); } } + pgstat_report_waitevent(WAIT_EVENT_END); } else { /* truncate(2) would be easier here, but Windows hasn't got it */ int fd; diff --git a/src/gausskernel/storage/smgr/md.cpp b/src/gausskernel/storage/smgr/md.cpp index 0ce3ce340..fe2616dcf 100644 --- a/src/gausskernel/storage/smgr/md.cpp +++ b/src/gausskernel/storage/smgr/md.cpp @@ -29,11 +29,11 @@ #include "portability/instr_time.h" #include "postmaster/bgwriter.h" #include "postmaster/pagewriter.h" +#include "postmaster/pagerepair.h" #include "storage/smgr/fd.h" #include "storage/buf/bufmgr.h" #include "storage/smgr/relfilenode.h" #include "storage/copydir.h" -#include "storage/page_compression.h" #include "storage/smgr/knl_usync.h" #include "storage/smgr/smgr.h" #include "utils/aiomem.h" @@ -55,13 +55,6 @@ (tag).segno = (segNo); \ } while (false); -constexpr mode_t FILE_RW_PERMISSION = 0600; - -inline static uint4 PageCompressChunkSize(SMgrRelation reln) -{ - return CHUNK_SIZE_LIST[GET_COMPRESS_CHUNK_SIZE((reln)->smgr_rnode.node.opt)]; -} - /* * The magnetic disk storage manager keeps track of open file * descriptors in its own descriptor pool. This is done to make it @@ -104,8 +97,6 @@ inline static uint4 PageCompressChunkSize(SMgrRelation reln) */ typedef struct _MdfdVec { File mdfd_vfd; /* fd number in fd.c's pool */ - File mdfd_vfd_pca; /* page compression address file 's fd number in fd.c's pool */ - File mdfd_vfd_pcd; /* page compression data file 's fd number in fd.c's pool */ BlockNumber mdfd_segno; /* segment number, from 0 */ struct _MdfdVec *mdfd_chain; /* next segment, or NULL */ } MdfdVec; @@ -121,10 +112,6 @@ static BlockNumber _mdnblocks(SMgrRelation reln, ForkNumber forknum, const MdfdV static void register_dirty_segment(SMgrRelation reln, ForkNumber forknum, const MdfdVec *seg); static void register_unlink_segment(RelFileNodeBackend rnode, ForkNumber forknum, BlockNumber segno); -/* function of compressed table */ -static int sync_pcmap(PageCompressHeader *pcMap, uint32 wait_event_info); - - bool check_unlink_rel_hashtbl(RelFileNode rnode, ForkNumber forknum) { HTAB* relfilenode_hashtbl = g_instance.bgwriter_cxt.unlink_rel_hashtbl; @@ -146,44 +133,6 @@ bool check_unlink_rel_hashtbl(RelFileNode rnode, ForkNumber forknum) return found; } -static int OpenPcaFile(const char *path, const RelFileNodeBackend &node, const ForkNumber &forkNum, const uint32 &segNo, int oflags = 0) -{ - Assert(node.node.opt != 0 && forkNum == MAIN_FORKNUM); - char dst[MAXPGPATH]; - CopyCompressedPath(dst, path, COMPRESSED_TABLE_PCA_FILE); - uint32 flags = O_RDWR | PG_BINARY | oflags; - return DataFileIdOpenFile(dst, RelFileNodeForkNumFill(node, PCA_FORKNUM, segNo), (int)flags, S_IRUSR | S_IWUSR); -} - -static int OpenPcdFile(const char *path, const RelFileNodeBackend &node, const ForkNumber &forkNum, const uint32 &segNo, int oflags = 0) -{ - Assert(node.node.opt != 0 && forkNum == MAIN_FORKNUM); - char dst[MAXPGPATH]; - CopyCompressedPath(dst, path, COMPRESSED_TABLE_PCD_FILE); - uint32 flags = O_RDWR | PG_BINARY | oflags; - return DataFileIdOpenFile(dst, RelFileNodeForkNumFill(node, PCD_FORKNUM, segNo), (int)flags, S_IRUSR | S_IWUSR); -} - -static void RegisterCompressDirtySegment(SMgrRelation reln, ForkNumber forknum, const MdfdVec *seg) -{ - PageCompressHeader *pcMap = GetPageCompressMemoryMap(seg->mdfd_vfd_pca, PageCompressChunkSize(reln)); - if (sync_pcmap(pcMap, WAIT_EVENT_COMPRESS_ADDRESS_FILE_SYNC) != 0) { - if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { - ereport(DEBUG1, (errmsg("could not fsync file \"%s\": %m", FilePathName(seg->mdfd_vfd)))); - return; - } - ereport(data_sync_elevel(ERROR), (errcode_for_file_access(), errmsg("could not msync file \"%s\": %m", - FilePathName(seg->mdfd_vfd_pca)))); - } - if (FileSync(seg->mdfd_vfd_pcd, WAIT_EVENT_DATA_FILE_SYNC) < 0) { - if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { - ereport(DEBUG1, (errmsg("could not fsync file \"%s\": %m", FilePathName(seg->mdfd_vfd)))); - return; - } - ereport(data_sync_elevel(ERROR), (errcode_for_file_access(), errmsg("could not fsync file \"%s\": %m", - FilePathName(seg->mdfd_vfd_pcd)))); - } -} /* * register_dirty_segment() -- Mark a relation segment as needing fsync * @@ -204,17 +153,14 @@ static void register_dirty_segment(SMgrRelation reln, ForkNumber forknum, const if (!RegisterSyncRequest(&tag, SYNC_REQUEST, false /* retryOnError */)) { ereport(DEBUG1, (errmsg("could not forward fsync request because request queue is full"))); - if (IS_COMPRESSED_MAINFORK(reln, forknum)) { - RegisterCompressDirtySegment(reln, forknum, seg); - } else { - if (FileSync(seg->mdfd_vfd, WAIT_EVENT_DATA_FILE_SYNC) < 0) { - if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { - ereport(DEBUG1, (errmsg("could not fsync file \"%s\": %m", FilePathName(seg->mdfd_vfd)))); - return; - } - ereport(data_sync_elevel(ERROR), (errcode_for_file_access(), errmsg("could not fsync file \"%s\": %m", - FilePathName(seg->mdfd_vfd)))); + + if (FileSync(seg->mdfd_vfd, WAIT_EVENT_DATA_FILE_SYNC) < 0) { + if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { + ereport(DEBUG1, (errmsg("could not fsync file \"%s\": %m", FilePathName(seg->mdfd_vfd)))); + return; } + ereport(data_sync_elevel(ERROR), (errcode_for_file_access(), + errmsg("could not fsync file \"%s\": %m", FilePathName(seg->mdfd_vfd)))); } } } @@ -245,26 +191,25 @@ void md_register_forget_request(RelFileNode rnode, ForkNumber forknum, BlockNumb RegisterSyncRequest(&tag, SYNC_FORGET_REQUEST, true /* retryOnError */); } -static void allocate_chunk_check(PageCompressAddr *pcAddr, uint32 chunk_size, BlockNumber blocknum, MdfdVec *v) +int openrepairfile(char* path, RelFileNodeForkNum filenode) { - /* check allocated chunk number */ - Assert(chunk_size == BLCKSZ / 2 || chunk_size == BLCKSZ / 4 || chunk_size == BLCKSZ / 8 || - chunk_size == BLCKSZ / 16); - if (pcAddr->allocated_chunks > BLCKSZ / chunk_size) { - ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid chunks %u of block %u in file \"%s\"", - pcAddr->allocated_chunks, blocknum, - FilePathName(v->mdfd_vfd_pca)))); + int fd = -1; + const int TEMPLEN = 8; + volatile uint32 repair_flags = O_RDWR | PG_BINARY; + char *temppath = (char *)palloc(strlen(path) + TEMPLEN); + errno_t rc = sprintf_s(temppath, strlen(path) + TEMPLEN, "%s.repair", path); + securec_check_ss(rc, "", ""); + ADIO_RUN() + { + repair_flags |= O_DIRECT; } - - auto maxChunkNumbers = MAX_CHUNK_NUMBER(chunk_size); - for (auto i = 0; i < pcAddr->allocated_chunks; i++) { - if (pcAddr->chunknos[i] <= 0 || pcAddr->chunknos[i] > maxChunkNumbers) { - ereport(ERROR, - (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid chunk number %u of block %u in file \"%s\"", - pcAddr->chunknos[i], blocknum, - FilePathName(v->mdfd_vfd_pca)))); - } + ADIO_END(); + fd = DataFileIdOpenFile(temppath, filenode, (int)repair_flags, 0600); + if (fd < 0) { + ereport(WARNING, (errmsg("[file repair] could not open repair file %s: %m", temppath))); } + pfree(temppath); + return fd; } /* @@ -272,6 +217,9 @@ static void allocate_chunk_check(PageCompressAddr *pcAddr, uint32 chunk_size, Bl */ void mdinit(void) { + if (EnableLocalSysCache()) { + return; + } u_sess->storage_cxt.MdCxt = AllocSetContextCreate(u_sess->top_mem_cxt, "MdSmgr", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); } @@ -292,44 +240,6 @@ bool mdexists(SMgrRelation reln, ForkNumber forkNum, BlockNumber blockNum) return (mdopen(reln, forkNum, EXTENSION_RETURN_NULL) != NULL); } -static int RetryDataFileIdOpenFile(bool isRedo, char* path, const RelFileNodeForkNum &filenode, uint32 flags) -{ - int save_errno = errno; - int fd = -1; - /* - * During bootstrap, there are cases where a system relation will be - * accessed (by internal backend processes) before the bootstrap - * script nominally creates it. Therefore, allow the file to exist - * already, even if isRedo is not set. (See also mdopen) - * - * During inplace upgrade, the physical catalog files may be present - * due to previous failure and rollback. Since the relfilenodes of these - * new catalogs can by no means be used by other relations, we simply - * truncate them. - */ - if (isRedo || IsBootstrapProcessingMode() || - (u_sess->attr.attr_common.IsInplaceUpgrade && filenode.rnode.node.relNode < FirstNormalObjectId)) { - ADIO_RUN() - { - flags = O_RDWR | PG_BINARY | O_DIRECT | (u_sess->attr.attr_common.IsInplaceUpgrade ? O_TRUNC : 0); - } - ADIO_ELSE() - { - flags = O_RDWR | PG_BINARY | (u_sess->attr.attr_common.IsInplaceUpgrade ? O_TRUNC : 0); - } - ADIO_END(); - - fd = DataFileIdOpenFile(path, filenode, flags, FILE_RW_PERMISSION); - } - - if (fd < 0) { - /* be sure to report the error reported by create, not open */ - errno = save_errno; - ereport(ERROR, (errcode_for_file_access(), errmsg("could not create file \"%s\": %m", path))); - } - return fd; -} - /* * mdcreate() -- Create a new relation on magnetic disk. * @@ -357,46 +267,56 @@ void mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo) } ADIO_END(); - fd = DataFileIdOpenFile(path, filenode, flags, FILE_RW_PERMISSION); + if (isRedo && (AmStartupProcess() || AmPageRedoWorker() || AmPageWriterProcess() || AmCheckpointerProcess()) && + CheckFileRepairHashTbl(reln->smgr_rnode.node, forkNum, 0)) { + fd = openrepairfile(path, filenode); + if (fd >= 0) { + ereport(LOG, (errmsg("[file repair] open repair file %s.repair", path))); + } + } else { + fd = DataFileIdOpenFile(path, filenode, flags, 0600); + } if (fd < 0) { - fd = RetryDataFileIdOpenFile(isRedo, path, filenode, flags); - } + int save_errno = errno; - File fd_pca = -1; - File fd_pcd = -1; - if (unlikely(IS_COMPRESSED_MAINFORK(reln, forkNum))) { - // close main fork file - FileClose(fd); - fd = -1; - - /* open page compress address file */ - char pcfile_path[MAXPGPATH]; - errno_t rc = snprintf_s(pcfile_path, MAXPGPATH, MAXPGPATH - 1, PCA_SUFFIX, path); - securec_check_ss(rc, "\0", "\0"); - RelFileNodeForkNum pcaFienode = RelFileNodeForkNumFill(reln->smgr_rnode, PCA_FORKNUM, 0); - fd_pca = DataFileIdOpenFile(pcfile_path, pcaFienode, flags, FILE_RW_PERMISSION); + /* + * During bootstrap, there are cases where a system relation will be + * accessed (by internal backend processes) before the bootstrap + * script nominally creates it. Therefore, allow the file to exist + * already, even if isRedo is not set. (See also mdopen) + * + * During inplace upgrade, the physical catalog files may be present + * due to previous failure and rollback. Since the relfilenodes of these + * new catalogs can by no means be used by other relations, we simply + * truncate them. + */ + if (isRedo || IsBootstrapProcessingMode() || + (u_sess->attr.attr_common.IsInplaceUpgrade && filenode.rnode.node.relNode < FirstNormalObjectId)) { + ADIO_RUN() + { + flags = O_RDWR | PG_BINARY | O_DIRECT | (u_sess->attr.attr_common.IsInplaceUpgrade ? O_TRUNC : 0); + } + ADIO_ELSE() + { + flags = O_RDWR | PG_BINARY | (u_sess->attr.attr_common.IsInplaceUpgrade ? O_TRUNC : 0); + } + ADIO_END(); - if (fd_pca < 0) { - fd_pca = RetryDataFileIdOpenFile(isRedo, pcfile_path, pcaFienode, flags); + fd = DataFileIdOpenFile(path, filenode, flags, 0600); } - rc = snprintf_s(pcfile_path, MAXPGPATH, MAXPGPATH - 1, PCD_SUFFIX, path); - securec_check_ss(rc, "\0", "\0"); - RelFileNodeForkNum pcdFileNode = RelFileNodeForkNumFill(reln->smgr_rnode, PCD_FORKNUM, 0); - fd_pcd = DataFileIdOpenFile(pcfile_path, pcdFileNode, flags, FILE_RW_PERMISSION); - if (fd_pcd < 0) { - fd_pcd = RetryDataFileIdOpenFile(isRedo, pcfile_path, pcdFileNode, flags); + if (fd < 0) { + /* be sure to report the error reported by create, not open */ + errno = save_errno; + ereport(ERROR, (errcode_for_file_access(), errmsg("could not create file \"%s\": %m", path))); } - SetupPageCompressMemoryMap(fd_pca, reln->smgr_rnode.node, filenode); } - pfree(path); reln->md_fd[forkNum] = _fdvec_alloc(); - reln->md_fd[forkNum]->mdfd_vfd_pca = fd_pca; - reln->md_fd[forkNum] ->mdfd_vfd_pcd = fd_pcd; + reln->md_fd[forkNum]->mdfd_vfd = fd; reln->md_fd[forkNum]->mdfd_segno = 0; reln->md_fd[forkNum]->mdfd_chain = NULL; @@ -496,69 +416,20 @@ void set_max_segno_delrel(int max_segno, RelFileNode rnode, ForkNumber forknum) return; } -static int ResetPcMap(char *path, const RelFileNodeBackend& rnode) +static void mdcleanrepairfile(char *segpath) { - int ret; - char pcfile_path[MAXPGPATH]; - int rc = snprintf_s(pcfile_path, MAXPGPATH, MAXPGPATH - 1, PCA_SUFFIX, path); - securec_check_ss(rc, "\0", "\0"); - int fd_pca = BasicOpenFile(pcfile_path, O_RDWR | PG_BINARY, 0); - if (fd_pca >= 0) { - int save_errno; - int chunkSize = CHUNK_SIZE_LIST[GET_COMPRESS_CHUNK_SIZE(rnode.node.opt)]; - int mapRealSize = SIZE_OF_PAGE_COMPRESS_ADDR_FILE(chunkSize); - PageCompressHeader *map = pc_mmap_real_size(fd_pca, mapRealSize, false); - if (map == MAP_FAILED) { - ereport(WARNING, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("Failed to mmap page compression address file %s: %m", pcfile_path))); - } else { - pg_atomic_write_u32(&map->nblocks, 0); - pg_atomic_write_u32(&map->allocated_chunks, 0); - error_t rc = memset_s((char *)map + SIZE_OF_PAGE_COMPRESS_HEADER_DATA, - mapRealSize - SIZE_OF_PAGE_COMPRESS_HEADER_DATA, 0, - SIZE_OF_PAGE_COMPRESS_ADDR_FILE(chunkSize) - SIZE_OF_PAGE_COMPRESS_HEADER_DATA); - securec_check_c(rc, "\0", "\0"); - map->sync = false; - if (sync_pcmap(map, WAIT_EVENT_COMPRESS_ADDRESS_FILE_SYNC) != 0) { - ereport(WARNING, (errcode_for_file_access(), errmsg("could not msync file \"%s\": %m", pcfile_path))); - } + const int TEMPLEN = 8; + struct stat statBuf; - if (pc_munmap(map) != 0) { - ereport(WARNING, (errcode_for_file_access(), errmsg("could not munmap file \"%s\": %m", pcfile_path))); - } - } - save_errno = errno; - (void)close(fd_pca); - errno = save_errno; - } else { - ret = -1; - } - if (ret < 0 && errno != ENOENT) { - ereport(WARNING, (errcode_for_file_access(), errmsg("could not truncate file \"%s\": %m", pcfile_path))); - } - return ret; -} - -static void UnlinkCompressedFile(const RelFileNode& node, ForkNumber forkNum, char* path) -{ - if (!IS_COMPRESSED_RNODE(node, forkNum)) { - return; - } - /* remove pca */ - char pcfile_path[MAXPGPATH]; - errno_t rc = snprintf_s(pcfile_path, MAXPGPATH, MAXPGPATH - 1, PCA_SUFFIX, path); - securec_check_ss(rc, "\0", "\0"); - int ret = unlink(pcfile_path); - if (ret < 0 && errno != ENOENT) { - ereport(WARNING, (errcode_for_file_access(), errmsg("could not remove file \"%s\": %m", pcfile_path))); - } - /* remove pcd */ - rc = snprintf_s(pcfile_path, MAXPGPATH, MAXPGPATH - 1, PCD_SUFFIX, path); - securec_check_ss(rc, "\0", "\0"); - ret = unlink(pcfile_path); - if (ret < 0 && errno != ENOENT) { - ereport(WARNING, (errcode_for_file_access(), errmsg("could not remove file \"%s\": %m", pcfile_path))); + char *temppath = (char *)palloc(strlen(segpath) + TEMPLEN); + errno_t rc = sprintf_s(temppath, strlen(segpath) + TEMPLEN, "%s.repair", segpath); + securec_check_ss(rc, "", ""); + if (stat(temppath, &statBuf) >= 0) { + (void)unlink(temppath); + ereport(LOG, (errcode_for_file_access(), + errmsg("remove repair file \"%s\"", temppath))); } + pfree(temppath); } static void mdunlinkfork(const RelFileNodeBackend& rnode, ForkNumber forkNum, bool isRedo) @@ -567,7 +438,7 @@ static void mdunlinkfork(const RelFileNodeBackend& rnode, ForkNumber forkNum, bo int ret; path = relpath(rnode, forkNum); - + /* * Delete or truncate the first segment. */ @@ -584,7 +455,9 @@ static void mdunlinkfork(const RelFileNodeBackend& rnode, ForkNumber forkNum, bo if (ret < 0 && errno != ENOENT) { ereport(WARNING, (errcode_for_file_access(), errmsg("could not remove file \"%s\": %m", path))); } - UnlinkCompressedFile(rnode.node, forkNum, path); + if (isRedo) { + mdcleanrepairfile(path); + } } else { /* truncate(2) would be easier here, but Windows hasn't got it */ int fd; @@ -604,29 +477,6 @@ static void mdunlinkfork(const RelFileNodeBackend& rnode, ForkNumber forkNum, bo ereport(WARNING, (errcode_for_file_access(), errmsg("could not truncate file \"%s\": %m", path))); } - if (IS_COMPRESSED_RNODE(rnode.node, forkNum)) { - // dont truncate pca! pca may be occupied by other threads by mmap - ret = ResetPcMap(path, rnode); - - // remove pcd - char dataPath[MAXPGPATH]; - int rc = snprintf_s(dataPath, MAXPGPATH, MAXPGPATH - 1, PCD_SUFFIX, path); - securec_check_ss(rc, "\0", "\0"); - int fd_pcd = BasicOpenFile(dataPath, O_RDWR | PG_BINARY, 0); - if (fd_pcd >= 0) { - int save_errno; - ret = ftruncate(fd_pcd, 0); - save_errno = errno; - (void)close(fd_pcd); - errno = save_errno; - } else { - ret = -1; - } - if (ret < 0 && errno != ENOENT) { - ereport(WARNING, (errcode_for_file_access(), errmsg("could not truncate file \"%s\": %m", dataPath))); - } - } - /* Register request to unlink first segment later */ register_unlink_segment(rnode, forkNum, 0); } @@ -661,7 +511,6 @@ static void mdunlinkfork(const RelFileNodeBackend& rnode, ForkNumber forkNum, bo } break; } - if (!RelFileNodeBackendIsTemp(rnode)) { md_register_forget_request(rnode.node, forkNum, segno); } @@ -678,24 +527,12 @@ static void mdunlinkfork(const RelFileNodeBackend& rnode, ForkNumber forkNum, bo rc = sprintf_s(segpath, strlen(path) + 12, "%s.%u", path, segno); securec_check_ss(rc, "", ""); if (unlink(segpath) < 0) { - ereport(WARNING, (errcode_for_file_access(), errmsg("could not remove file \"%s\": %m", segpath))); + ereport(WARNING, (errcode_for_file_access(), + errmsg("could not remove file \"%s\": %m", segpath))); } - - if (IS_COMPRESSED_RNODE(rnode.node, forkNum)) { - char pcfile_segpath[MAXPGPATH]; - errno_t rc = snprintf_s(pcfile_segpath, MAXPGPATH, MAXPGPATH - 1, PCA_SUFFIX, segpath); - securec_check_ss(rc, "\0", "\0"); - if (unlink(pcfile_segpath) < 0) { - ereport(WARNING, - (errcode_for_file_access(), errmsg("could not remove file \"%s\": %m", pcfile_segpath))); - } - - rc = snprintf_s(pcfile_segpath, MAXPGPATH, MAXPGPATH - 1, PCD_SUFFIX, segpath); - securec_check_ss(rc, "\0", "\0"); - if (unlink(pcfile_segpath) < 0) { - ereport(WARNING, - (errcode_for_file_access(), errmsg("could not remove file \"%s\": %m", pcfile_segpath))); - } + /* try clean the repair file if exists */ + if (isRedo) { + mdcleanrepairfile(path); } } pfree(segpath); @@ -704,155 +541,6 @@ static void mdunlinkfork(const RelFileNodeBackend& rnode, ForkNumber forkNum, bo pfree(path); } -static inline void ExtendChunksOfBlock(PageCompressHeader* pcMap, PageCompressAddr* pcAddr, int needChunks, MdfdVec* v) -{ - if (pcAddr->allocated_chunks < needChunks) { - auto allocateNumber = needChunks - pcAddr->allocated_chunks; - int chunkno = (pc_chunk_number_t)pg_atomic_fetch_add_u32(&pcMap->allocated_chunks, allocateNumber) + 1; - for (int i = pcAddr->allocated_chunks; i < needChunks; ++i, ++chunkno) { - pcAddr->chunknos[i] = chunkno; - } - pcAddr->allocated_chunks = needChunks; - - if (pg_atomic_read_u32(&pcMap->allocated_chunks) - pg_atomic_read_u32(&pcMap->last_synced_allocated_chunks) > - COMPRESS_ADDRESS_FLUSH_CHUNKS) { - pcMap->sync = false; - if (sync_pcmap(pcMap, WAIT_EVENT_COMPRESS_ADDRESS_FILE_FLUSH) != 0) { - ereport(data_sync_elevel(ERROR), (errcode_for_file_access(), errmsg("could not msync file \"%s\": %m", - FilePathName(v->mdfd_vfd_pca)))); - } - } - } -} - -/* - * mdextend_pc() -- Add a block to the specified page compressed relation. - * - */ -static void mdextend_pc(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const char* buffer, bool skipFsync) -{ -#ifdef CHECK_WRITE_VS_EXTEND - Assert(blocknum >= mdnblocks(reln, forknum)); -#endif - Assert(IS_COMPRESSED_MAINFORK(reln, forknum)); - - MdfdVec* v = _mdfd_getseg(reln, MAIN_FORKNUM, blocknum, skipFsync, EXTENSION_CREATE); - RelFileCompressOption option; - TransCompressOptions(reln->smgr_rnode.node, &option); - uint32 chunk_size = CHUNK_SIZE_LIST[option.compressChunkSize]; - uint8 algorithm = option.compressAlgorithm; - uint8 prealloc_chunk = option.compressPreallocChunks; - PageCompressHeader *pcMap = GetPageCompressMemoryMap(v->mdfd_vfd_pca, chunk_size); - Assert(blocknum % RELSEG_SIZE >= pg_atomic_read_u32(&pcMap->nblocks)); - - uint32 maxAllocChunkNum = (uint32)(BLCKSZ / chunk_size - 1); - PageCompressAddr* pcAddr = GET_PAGE_COMPRESS_ADDR(pcMap, chunk_size, blocknum); - - prealloc_chunk = (prealloc_chunk > maxAllocChunkNum) ? maxAllocChunkNum : prealloc_chunk; - - /* check allocated chunk number */ - if (pcAddr->allocated_chunks > BLCKSZ / chunk_size) { - ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid chunks %u of block %u in file \"%s\"", - pcAddr->allocated_chunks, blocknum, - FilePathName(v->mdfd_vfd_pca)))); - } - - for (int i = 0; i < pcAddr->allocated_chunks; ++i) { - if (pcAddr->chunknos[i] <= 0 || pcAddr->chunknos[i] > (BLCKSZ / chunk_size) * RELSEG_SIZE) { - ereport(ERROR, - (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid chunk number %u of block %u in file \"%s\"", - pcAddr->chunknos[i], blocknum, - FilePathName(v->mdfd_vfd_pca)))); - } - } - - /* compress page only for initialized page */ - char *work_buffer = NULL; - int nchunks = 0; - if (!PageIsNew(buffer)) { - int work_buffer_size = CompressPageBufferBound(buffer, algorithm); - if (work_buffer_size < 0) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("mdextend_pc unrecognized compression algorithm %d", algorithm))); - } - work_buffer = (char *) palloc(work_buffer_size); - int compressed_page_size = CompressPage(buffer, work_buffer, work_buffer_size, option); - if (compressed_page_size < 0) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("mdextend_pc unrecognized compression algorithm %d", algorithm))); - } - nchunks = (compressed_page_size - 1) / chunk_size + 1; - if (nchunks * chunk_size >= BLCKSZ) { - pfree(work_buffer); - work_buffer = (char *) buffer; - nchunks = BLCKSZ / chunk_size; - } else { - /* fill zero in the last chunk */ - int storageSize = chunk_size * nchunks; - if (compressed_page_size < storageSize) { - error_t rc = memset_s(work_buffer + compressed_page_size, work_buffer_size - compressed_page_size, 0, - storageSize - compressed_page_size); - securec_check_c(rc, "\0", "\0"); - } - } - } - - int need_chunks = prealloc_chunk > nchunks ? prealloc_chunk : nchunks; - ExtendChunksOfBlock(pcMap, pcAddr, need_chunks, v); - - /* write chunks of compressed page - * worker_buffer = NULL -> nchunks = 0 - */ - for (int i = 0; i < nchunks; i++) { - char* buffer_pos = work_buffer + chunk_size * i; - off_t seekpos = (off_t) OFFSET_OF_PAGE_COMPRESS_CHUNK(chunk_size, pcAddr->chunknos[i]); - // write continuous chunks - int range = 1; - while (i < nchunks - 1 && pcAddr->chunknos[i + 1] == pcAddr->chunknos[i] + 1) { - range++; - i++; - } - int write_amount = chunk_size * range; - int nbytes; - if ((nbytes = FileWrite(v->mdfd_vfd_pcd, buffer_pos, write_amount, seekpos)) != write_amount) { - if (nbytes < 0) { - ereport(ERROR, (errcode_for_file_access(), errmsg("could not extend file \"%s\": %m", - FilePathName(v->mdfd_vfd_pcd)), errhint( - "Check free disk space."))); - } - /* short write: complain appropriately */ - ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg( - "could not extend file \"%s\": wrote only %d of %d bytes at block %u", FilePathName(v->mdfd_vfd_pcd), - nbytes, write_amount, blocknum), errhint("Check free disk space."))); - } - } - - /* finally update size of this page and global nblocks */ - if (pcAddr->nchunks != nchunks) { - pcAddr->nchunks = nchunks; - } - - /* write checksum */ - pcAddr->checksum = AddrChecksum32(blocknum, pcAddr, chunk_size); - - if (pg_atomic_read_u32(&pcMap->nblocks) < blocknum % RELSEG_SIZE + 1) { - pg_atomic_write_u32(&pcMap->nblocks, blocknum % RELSEG_SIZE + 1); - } - - pcMap->sync = false; - if (work_buffer != NULL && work_buffer != buffer) { - pfree(work_buffer); - } - - if (!skipFsync && !SmgrIsTemp(reln)) { - register_dirty_segment(reln, forknum, v); - } - - Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE)); -} - /* * mdextend() -- Add a block to the specified relation. * @@ -884,10 +572,6 @@ void mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, errmsg("cannot extend file \"%s\" beyond %u blocks", relpath(reln->smgr_rnode, forknum), InvalidBlockNumber))); } - if (IS_COMPRESSED_MAINFORK(reln, forknum)) { - mdextend_pc(reln, forknum, blocknum, buffer, skipFsync); - return; - } v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_CREATE); if (v == NULL) { @@ -930,30 +614,52 @@ void mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber)RELSEG_SIZE)); } -static int MdOpenRetryOpenFile(char* path, const RelFileNodeForkNum &filenode, ExtensionBehavior behavior, uint32 flags) +static File mdopenagain(SMgrRelation reln, ForkNumber forknum, ExtensionBehavior behavior, char *path) { - int fd = -1; + uint32 flags = O_RDWR | PG_BINARY; + File fd = -1; + RelFileNodeForkNum filenode = RelFileNodeForkNumFill(reln->smgr_rnode, forknum, 0); + + ADIO_RUN() + { + flags |= O_DIRECT; + } + ADIO_END(); + /* - * During bootstrap, there are cases where a system relation will be - * accessed (by internal backend processes) before the bootstrap - * script nominally creates it. Therefore, accept mdopen() as a - * substitute for mdcreate() in bootstrap mode only. (See mdcreate) - */ + * During bootstrap, there are cases where a system relation will be + * accessed (by internal backend processes) before the bootstrap + * script nominally creates it. Therefore, accept mdopen() as a + * substitute for mdcreate() in bootstrap mode only. (See mdcreate) + */ if (IsBootstrapProcessingMode()) { flags |= (O_CREAT | O_EXCL); - fd = DataFileIdOpenFile(path, filenode, (int)flags, FILE_RW_PERMISSION); + fd = DataFileIdOpenFile(path, filenode, (int)flags, 0600); } if (fd < 0) { if (behavior == EXTENSION_RETURN_NULL && FILE_POSSIBLY_DELETED(errno)) { - return -1; + pfree(path); + return fd; } - if (check_unlink_rel_hashtbl(filenode.rnode.node, filenode.forknumber)) { + if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { ereport(DEBUG1, (errmsg("\"%s\": %m, this relation has been removed", path))); - return -1; + pfree(path); + return fd; + } + if ((AmStartupProcess() || AmPageRedoWorker() || AmPageWriterProcess() || AmCheckpointerProcess()) && + CheckFileRepairHashTbl(reln->smgr_rnode.node, forknum, 0)) { + fd = openrepairfile(path, filenode); + if (fd < 0) { + ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file %s.repair: %m", path))); + } else { + ereport(LOG, (errmsg("[file repair] open repair file %s.repair", path))); + } + } else { + ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", path))); } - ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", path))); } + return fd; } @@ -971,8 +677,8 @@ static MdfdVec *mdopen(SMgrRelation reln, ForkNumber forknum, ExtensionBehavior { MdfdVec *mdfd = NULL; char *path = NULL; - File fd = -1; - RelFileNodeForkNum filenode = RelFileNodeForkNumFill(reln->smgr_rnode, forknum, 0); + File fd; + RelFileNodeForkNum filenode; uint32 flags = O_RDWR | PG_BINARY; /* No work if already open */ @@ -982,50 +688,19 @@ static MdfdVec *mdopen(SMgrRelation reln, ForkNumber forknum, ExtensionBehavior path = relpath(reln->smgr_rnode, forknum); - File fd_pca = -1; - File fd_pcd = -1; - if (IS_COMPRESSED_MAINFORK(reln, forknum)) { - /* open page compression address file */ - char pcfile_path[MAXPGPATH]; - errno_t rc = snprintf_s(pcfile_path, MAXPGPATH, MAXPGPATH - 1, PCA_SUFFIX, path); - securec_check_ss(rc, "\0", "\0"); - RelFileNodeForkNum pcaRelFileNode = RelFileNodeForkNumFill(reln->smgr_rnode, PCA_FORKNUM, 0); - fd_pca = DataFileIdOpenFile(pcfile_path, pcaRelFileNode, flags, FILE_RW_PERMISSION); - if (fd_pca < 0) { - fd_pca = MdOpenRetryOpenFile(pcfile_path, pcaRelFileNode, behavior, flags); - if (fd_pca < 0) { - pfree(path); - return NULL; - } - } + filenode = RelFileNodeForkNumFill(reln->smgr_rnode, forknum, 0); - /* open page compression data file */ - rc = snprintf_s(pcfile_path, MAXPGPATH, MAXPGPATH - 1, PCD_SUFFIX, path); - securec_check_ss(rc, "\0", "\0"); - RelFileNodeForkNum pcdRelFileNode = RelFileNodeForkNumFill(reln->smgr_rnode, PCD_FORKNUM, 0); - fd_pcd = DataFileIdOpenFile(pcfile_path, pcdRelFileNode, flags, FILE_RW_PERMISSION); - if (fd_pcd < 0) { - fd_pcd = MdOpenRetryOpenFile(pcfile_path, pcaRelFileNode, behavior, flags); - if (fd_pca < 0) { - pfree(path); - return NULL; - } - } - SetupPageCompressMemoryMap(fd_pca, reln->smgr_rnode.node, filenode); - } else { - ADIO_RUN() - { - flags |= O_DIRECT; - } - ADIO_END(); - filenode = RelFileNodeForkNumFill(reln->smgr_rnode, forknum, 0); - fd = DataFileIdOpenFile(path, filenode, (int)flags, 0600); + ADIO_RUN() + { + flags |= O_DIRECT; + } + ADIO_END(); + + fd = DataFileIdOpenFile(path, filenode, (int)flags, 0600); + if (fd < 0) { + fd = mdopenagain(reln, forknum, behavior, path); if (fd < 0) { - fd = MdOpenRetryOpenFile(path, filenode, behavior, flags); - if (fd < 0) { - pfree(path); - return NULL; - } + return NULL; } } @@ -1035,8 +710,6 @@ static MdfdVec *mdopen(SMgrRelation reln, ForkNumber forknum, ExtensionBehavior mdfd->mdfd_vfd = fd; mdfd->mdfd_segno = 0; - mdfd->mdfd_vfd_pca = fd_pca; - mdfd->mdfd_vfd_pcd = fd_pcd; mdfd->mdfd_chain = NULL; Assert(_mdnblocks(reln, forknum, mdfd) <= ((BlockNumber)RELSEG_SIZE)); @@ -1061,17 +734,8 @@ void mdclose(SMgrRelation reln, ForkNumber forknum, BlockNumber blockNum) MdfdVec *ov = v; /* if not closed already */ - if (IS_COMPRESSED_MAINFORK(reln, forknum)) { - if (v->mdfd_vfd_pca >= 0) { - FileClose(v->mdfd_vfd_pca); - } - if (v->mdfd_vfd_pcd >= 0) { - FileClose(v->mdfd_vfd_pcd); - } - } else { - if (v->mdfd_vfd >= 0) { - FileClose(v->mdfd_vfd); - } + if (v->mdfd_vfd >= 0) { + FileClose(v->mdfd_vfd); } /* Now free vector */ @@ -1094,46 +758,11 @@ void mdprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum) return; } - if (IS_COMPRESSED_MAINFORK(reln, forknum)) { - int chunk_size = PageCompressChunkSize(reln); - PageCompressHeader *pcMap = GetPageCompressMemoryMap(v->mdfd_vfd_pca, chunk_size); - PageCompressAddr *pcAddr = GET_PAGE_COMPRESS_ADDR(pcMap, chunk_size, blocknum); - /* check chunk number */ - if (pcAddr->nchunks < 0 || pcAddr->nchunks > BLCKSZ / chunk_size) { - if (u_sess->attr.attr_security.zero_damaged_pages || t_thrd.xlog_cxt.InRecovery) { - return; - } else { - ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid chunks %u of block %u in file \"%s\"", pcAddr->nchunks, blocknum, - FilePathName(v->mdfd_vfd_pca)))); - } - } + seekpos = (off_t)BLCKSZ * (blocknum % ((BlockNumber)RELSEG_SIZE)); - for (uint8 i = 0; i < pcAddr->nchunks; i++) { - if (pcAddr->chunknos[i] <= 0 || pcAddr->chunknos[i] > (uint32)(BLCKSZ / chunk_size) * RELSEG_SIZE) { - if (u_sess->attr.attr_security.zero_damaged_pages || t_thrd.xlog_cxt.InRecovery) { - return; - } else { - ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid chunk number %u of block %u in file \"%s\"", pcAddr->chunknos[i], - blocknum, FilePathName(v->mdfd_vfd_pca)))); - } - } - seekpos = (off_t)OFFSET_OF_PAGE_COMPRESS_CHUNK(chunk_size, pcAddr->chunknos[i]); - int range = 1; - while (i < pcAddr->nchunks - 1 && pcAddr->chunknos[i + 1] == pcAddr->chunknos[i] + 1) { - range++; - i++; - } - (void)FilePrefetch(v->mdfd_vfd_pcd, seekpos, chunk_size * range, WAIT_EVENT_DATA_FILE_PREFETCH); - } - } else { - seekpos = (off_t)BLCKSZ * (blocknum % ((BlockNumber)RELSEG_SIZE)); + Assert(seekpos < (off_t)BLCKSZ * RELSEG_SIZE); - Assert(seekpos < (off_t)BLCKSZ * RELSEG_SIZE); - - (void)FilePrefetch(v->mdfd_vfd, seekpos, BLCKSZ, WAIT_EVENT_DATA_FILE_PREFETCH); - } + (void)FilePrefetch(v->mdfd_vfd, seekpos, BLCKSZ, WAIT_EVENT_DATA_FILE_PREFETCH); #endif /* USE_PREFETCH */ } @@ -1176,41 +805,10 @@ void mdwriteback(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, Bl Assert(nflush >= 1); Assert(nflush <= nblocks); - if (IS_COMPRESSED_MAINFORK(reln, forknum)) { - uint32 chunk_size = PageCompressChunkSize(reln); - PageCompressHeader *pcMap = GetPageCompressMemoryMap(v->mdfd_vfd_pca, chunk_size); - pc_chunk_number_t seekpos_chunk; - pc_chunk_number_t last_chunk; - bool firstEnter = true; - for (BlockNumber iblock = 0; iblock < nflush; ++iblock) { - PageCompressAddr *pcAddr = GET_PAGE_COMPRESS_ADDR(pcMap, chunk_size, blocknum + iblock); - // find continue chunk to write back - for (uint8 i = 0; i < pcAddr->nchunks; ++i) { - if (firstEnter) { - seekpos_chunk = pcAddr->chunknos[i]; - last_chunk = seekpos_chunk; - firstEnter = false; - } else if (pcAddr->chunknos[i] == last_chunk + 1) { - last_chunk++; - } else { - seekpos = (off_t)OFFSET_OF_PAGE_COMPRESS_CHUNK(chunk_size, seekpos_chunk); - pc_chunk_number_t nchunks = last_chunk - seekpos_chunk + 1; - FileWriteback(v->mdfd_vfd_pcd, seekpos, (off_t)nchunks * chunk_size); - seekpos_chunk = pcAddr->chunknos[i]; - last_chunk = seekpos_chunk; - } - } - } - /* flush the rest chunks */ - if (!firstEnter) { - seekpos = (off_t)OFFSET_OF_PAGE_COMPRESS_CHUNK(chunk_size, seekpos_chunk); - pc_chunk_number_t nchunks = last_chunk - seekpos_chunk + 1; - FileWriteback(v->mdfd_vfd_pcd, seekpos, (off_t)nchunks * chunk_size); - } - } else { - seekpos = (off_t) BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE)); - FileWriteback(v->mdfd_vfd, seekpos, (off_t) BLCKSZ * nflush); - } + seekpos = (off_t)BLCKSZ * (blocknum % ((BlockNumber)RELSEG_SIZE)); + + FileWriteback(v->mdfd_vfd, seekpos, (off_t)BLCKSZ * nflush); + nblocks -= nflush; blocknum += nflush; } @@ -1232,6 +830,7 @@ void mdwriteback(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, Bl */ void mdasyncread(SMgrRelation reln, ForkNumber forkNum, AioDispatchDesc_t **dList, int32 dn) { +#ifndef ENABLE_LITE_MODE for (int i = 0; i < dn; i++) { off_t offset; MdfdVec *v = NULL; @@ -1278,6 +877,7 @@ void mdasyncread(SMgrRelation reln, ForkNumber forkNum, AioDispatchDesc_t **dLis /* Dispatch the I/O */ (void)FileAsyncRead(dList, dn); +#endif } /* @@ -1333,6 +933,7 @@ int CompltrReadReq(void *aioDesc, long res) */ void mdasyncwrite(SMgrRelation reln, ForkNumber forkNumber, AioDispatchDesc_t **dList, int32 dn) { +#ifndef ENABLE_LITE_MODE for (int i = 0; i < dn; i++) { off_t offset; MdfdVec *v = NULL; @@ -1404,6 +1005,7 @@ void mdasyncwrite(SMgrRelation reln, ForkNumber forkNumber, AioDispatchDesc_t ** /* Dispatch the I/O */ (void)FileAsyncWrite(dList, dn); +#endif } /* @@ -1499,158 +1101,6 @@ static void check_file_stat(char *file_name) (c) = (value); \ } while (0) -/* - * mdread_pc() -- Read the specified block from a page compressed relation. - */ -bool mdread_pc(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer) -{ - Assert(IS_COMPRESSED_MAINFORK(reln, forknum)); - - MdfdVec *v = _mdfd_getseg(reln, forknum, blocknum, false, EXTENSION_FAIL); - - RelFileCompressOption option; - TransCompressOptions(reln->smgr_rnode.node, &option); - uint32 chunk_size = CHUNK_SIZE_LIST[option.compressChunkSize]; - uint8 algorithm = option.compressAlgorithm; - PageCompressHeader *pcMap = GetPageCompressMemoryMap(v->mdfd_vfd_pca, chunk_size); - PageCompressAddr *pcAddr = GET_PAGE_COMPRESS_ADDR(pcMap, chunk_size, blocknum); - uint8 nchunks = pcAddr->nchunks; - if (nchunks == 0) { - MemSet(buffer, 0, BLCKSZ); - return true; - } - - if (nchunks > BLCKSZ / chunk_size) { - if (u_sess->attr.attr_security.zero_damaged_pages || t_thrd.xlog_cxt.InRecovery) { - MemSet(buffer, 0, BLCKSZ); - return true; - } else { -#ifndef ENABLE_MULTIPLE_NODES - if (RecoveryInProgress()) { - return false; - } -#endif - ereport(ERROR, - (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid chunks %u of block %u in file \"%s\"", nchunks, - blocknum, FilePathName(v->mdfd_vfd_pca)))); - } - } - - for (auto i = 0; i < nchunks; ++i) { - if (pcAddr->chunknos[i] <= 0 || pcAddr->chunknos[i] > MAX_CHUNK_NUMBER(chunk_size)) { - if (u_sess->attr.attr_security.zero_damaged_pages || t_thrd.xlog_cxt.InRecovery) { - MemSet(buffer, 0, BLCKSZ); - return true; - } else { - check_file_stat(FilePathName(v->mdfd_vfd_pcd)); - force_backtrace_messages = true; -#ifndef ENABLE_MULTIPLE_NODES - if (RecoveryInProgress()) { - return false; - } -#endif - ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid chunks %u of block %u in file \"%s\"", - nchunks, blocknum, - FilePathName(v->mdfd_vfd_pca)))); - } - } - } - - // read chunk data - char *buffer_pos = NULL; - uint8 start; - int read_amount; - char *compress_buffer = (char*)palloc(chunk_size * nchunks); - for (uint8 i = 0; i < nchunks; ++i) { - buffer_pos = compress_buffer + chunk_size * i; - off_t seekpos = (off_t) OFFSET_OF_PAGE_COMPRESS_CHUNK(chunk_size, pcAddr->chunknos[i]); - start = i; - while (i < nchunks - 1 && pcAddr->chunknos[i + 1] == pcAddr->chunknos[i] + 1) { - i++; - } - read_amount = chunk_size * (i - start + 1); - TRACE_POSTGRESQL_SMGR_MD_READ_START(forknum, blocknum, reln->smgr_rnode.node.spcNode, - reln->smgr_rnode.node.dbNode, reln->smgr_rnode.node.relNode, - reln->smgr_rnode.backend); - int nbytes = FilePRead(v->mdfd_vfd_pcd, buffer_pos, read_amount, seekpos, WAIT_EVENT_DATA_FILE_READ); - TRACE_POSTGRESQL_SMGR_MD_READ_DONE(forknum, blocknum, reln->smgr_rnode.node.spcNode, - reln->smgr_rnode.node.dbNode, reln->smgr_rnode.node.relNode, - reln->smgr_rnode.backend, nbytes, BLCKSZ); - - if (nbytes != read_amount) { - if (nbytes < 0) { -#ifndef ENABLE_MULTIPLE_NODES - if (RecoveryInProgress()) { - return false; - } -#endif - ereport(ERROR, - (errcode_for_file_access(), errmsg("could not read block %u in file \"%s\": %m", blocknum, - FilePathName(v->mdfd_vfd_pcd)))); - } - /* - * Short read: we are at or past EOF, or we read a partial block at - * EOF. Normally this is an error; upper levels should never try to - * read a nonexistent block. However, if zero_damaged_pages is ON or - * we are InRecovery, we should instead return zeroes without - * complaining. This allows, for example, the case of trying to - * update a block that was later truncated away. - */ - if (u_sess->attr.attr_security.zero_damaged_pages || t_thrd.xlog_cxt.InRecovery) { - MemSet(buffer, 0, BLCKSZ); - return true; - } else { - check_file_stat(FilePathName(v->mdfd_vfd_pcd)); - force_backtrace_messages = true; - -#ifndef ENABLE_MULTIPLE_NODES - if (RecoveryInProgress()) { - return false; - } -#endif - ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg( - "could not read block %u in file \"%s\": read only %d of %d bytes", blocknum, - FilePathName(v->mdfd_vfd_pcd), nbytes, read_amount))); - } - } - } - - /* decompress chunk data */ - int nbytes; - if (pcAddr->nchunks == BLCKSZ / chunk_size) { - error_t rc = memcpy_s(buffer, BLCKSZ, compress_buffer, BLCKSZ); - securec_check(rc, "", ""); - } else { - nbytes = DecompressPage(compress_buffer, buffer, algorithm); - if (nbytes != BLCKSZ) { - if (nbytes == -2) { - ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg( - "could not recognized compression algorithm %d for file \"%s\"", algorithm, - FilePathName(v->mdfd_vfd_pcd)))); - } - if (u_sess->attr.attr_security.zero_damaged_pages || t_thrd.xlog_cxt.InRecovery) { - pfree(compress_buffer); - MemSet(buffer, 0, BLCKSZ); - return true; - } else { - check_file_stat(FilePathName(v->mdfd_vfd_pcd)); - force_backtrace_messages = true; - -#ifndef ENABLE_MULTIPLE_NODES - if (RecoveryInProgress()) { - return false; - } -#endif - ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg( - "could not decompress block %u in file \"%s\": decompress %d of %d bytes", blocknum, - FilePathName(v->mdfd_vfd_pcd), nbytes, BLCKSZ))); - } - } - } - pfree(compress_buffer); - return true; -} - /* * mdread() -- Read the specified block from a relation. */ @@ -1673,10 +1123,6 @@ SMGR_READ_STATUS mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber block static THR_LOCAL Oid lstDb = InvalidOid; static THR_LOCAL Oid lstSpc = InvalidOid; - if (IS_COMPRESSED_MAINFORK(reln, forknum)) { - return mdread_pc(reln, forknum, blocknum, buffer) ? SMGR_RD_OK : SMGR_RD_CRC_ERROR; - } - (void)INSTR_TIME_SET_CURRENT(startTime); TRACE_POSTGRESQL_SMGR_MD_READ_START(forknum, blocknum, reln->smgr_rnode.node.spcNode, reln->smgr_rnode.node.dbNode, @@ -1775,137 +1221,6 @@ SMGR_READ_STATUS mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber block } } -/* - * mdwrite_pc() -- Write the supplied block at the appropriate location for page compressed relation. - * - * This is to be used only for updating already-existing blocks of a - * relation (ie, those before the current EOF). To extend a relation, - * use mdextend(). - */ -static void mdwrite_pc(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const char *buffer, bool skipFsync) -{ - /* This assert is too expensive to have on normally ... */ -#ifdef CHECK_WRITE_VS_EXTEND - Assert(blocknum < mdnblocks(reln, forknum)); -#endif - Assert(IS_COMPRESSED_MAINFORK(reln, forknum)); - bool mmapSync = false; - MdfdVec *v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_FAIL); - - if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { - ereport(DEBUG1, (errmsg("could not write block %u in file \"%s\": %m, this relation has been removed", - blocknum, FilePathName(v->mdfd_vfd)))); - /* this file need skip sync */ - return; - } - - RelFileCompressOption option; - TransCompressOptions(reln->smgr_rnode.node, &option); - uint32 chunk_size = CHUNK_SIZE_LIST[option.compressChunkSize]; - uint8 algorithm = option.compressAlgorithm; - int8 level = option.compressLevelSymbol ? option.compressLevel : -option.compressLevel; - uint8 prealloc_chunk = option.compressPreallocChunks; - - PageCompressHeader *pcMap = GetPageCompressMemoryMap(v->mdfd_vfd_pca, chunk_size); - PageCompressAddr *pcAddr = GET_PAGE_COMPRESS_ADDR(pcMap, chunk_size, blocknum); - Assert(blocknum % RELSEG_SIZE < pg_atomic_read_u32(&pcMap->nblocks)); - auto maxChunkSize = BLCKSZ / chunk_size - 1; - if (prealloc_chunk > maxChunkSize) { - prealloc_chunk = maxChunkSize; - } - - allocate_chunk_check(pcAddr, chunk_size, blocknum, v); - - /* compress page */ - auto work_buffer_size = CompressPageBufferBound(buffer, algorithm); - if (work_buffer_size < 0) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg( - "mdwrite_pc unrecognized compression algorithm %d,chunk_size:%ud,level:%d,prealloc_chunk:%ud", algorithm, - chunk_size, level, prealloc_chunk))); - } - char *work_buffer = (char *) palloc(work_buffer_size); - auto compress_buffer_size = CompressPage(buffer, work_buffer, work_buffer_size, option); - if (compress_buffer_size < 0) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg( - "mdwrite_pc unrecognized compression algorithm %d,chunk_size:%ud,level:%d,prealloc_chunk:%ud", algorithm, - chunk_size, level, prealloc_chunk))); - } - - uint8 nchunks = (compress_buffer_size - 1) / chunk_size + 1; - auto bufferSize = chunk_size * nchunks; - if (bufferSize >= BLCKSZ) { - /* store original page if can not save space? */ - pfree(work_buffer); - work_buffer = (char *) buffer; - nchunks = BLCKSZ / chunk_size; - } else { - /* fill zero in the last chunk */ - if ((uint32) compress_buffer_size < bufferSize) { - auto leftSize = bufferSize - compress_buffer_size; - errno_t rc = memset_s(work_buffer + compress_buffer_size, leftSize, 0, leftSize); - securec_check(rc, "", ""); - } - } - - uint8 need_chunks = prealloc_chunk > nchunks ? prealloc_chunk : nchunks; - ExtendChunksOfBlock(pcMap, pcAddr, need_chunks, v); - - // write chunks of compressed page - for (auto i = 0; i < nchunks; ++i) { - auto buffer_pos = work_buffer + chunk_size * i; - off_t seekpos = (off_t) OFFSET_OF_PAGE_COMPRESS_CHUNK(chunk_size, pcAddr->chunknos[i]); - auto start = i; - while (i < nchunks - 1 && pcAddr->chunknos[i + 1] == pcAddr->chunknos[i] + 1) { - i++; - } - int write_amount = chunk_size * (i - start + 1); - - TRACE_POSTGRESQL_SMGR_MD_WRITE_START(forknum, blocknum, reln->smgr_rnode.node.spcNode, - reln->smgr_rnode.node.dbNode, reln->smgr_rnode.node.relNode, - reln->smgr_rnode.backend); - int nbytes = FilePWrite(v->mdfd_vfd_pcd, buffer_pos, write_amount, seekpos, WAIT_EVENT_DATA_FILE_WRITE); - TRACE_POSTGRESQL_SMGR_MD_WRITE_DONE(forknum, blocknum, reln->smgr_rnode.node.spcNode, - reln->smgr_rnode.node.dbNode, reln->smgr_rnode.node.relNode, - reln->smgr_rnode.backend, nbytes, BLCKSZ); - - if (nbytes != write_amount) { - if (nbytes < 0) { - ereport(ERROR, - (errcode_for_file_access(), errmsg("could not write block %u in file \"%s\": %m", blocknum, - FilePathName(v->mdfd_vfd_pcd)))); - } - /* short write: complain appropriately */ - ereport(ERROR, (errcode(ERRCODE_DISK_FULL), errmsg( - "could not write block %u in file \"%s\": wrote only %d of %d bytes", blocknum, - FilePathName(v->mdfd_vfd_pcd), nbytes, BLCKSZ), errhint("Check free disk space."))); - } - } - - /* finally update size of this page and global nblocks */ - if (pcAddr->nchunks != nchunks) { - mmapSync = true; - pcAddr->nchunks = nchunks; - } - - /* write checksum */ - if (mmapSync) { - pcMap->sync = false; - pcAddr->checksum = AddrChecksum32(blocknum, pcAddr, chunk_size); - } - - /* write checksum */ - pcAddr->checksum = AddrChecksum32(blocknum, pcAddr, chunk_size); - - mmapSync = false; - if (work_buffer != NULL && work_buffer != buffer) { - pfree(work_buffer); - } - - if (!skipFsync && !SmgrIsTemp(reln)) { - register_dirty_segment(reln, forknum, v); - } -} - /* * mdwrite() -- Write the supplied block at the appropriate location. * @@ -1919,13 +1234,6 @@ void mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const int nbytes; MdfdVec *v = NULL; - /* This assert is too expensive to have on normally ... */ -#ifdef CHECK_WRITE_VS_EXTEND - if (!check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { - Assert(blocknum < mdnblocks(reln, forknum)); - } -#endif - instr_time start_time; instr_time end_time; PgStat_Counter time_diff = 0; @@ -1941,6 +1249,13 @@ void mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const (void)INSTR_TIME_SET_CURRENT(start_time); + /* This assert is too expensive to have on normally ... */ +#ifdef CHECK_WRITE_VS_EXTEND + if (!check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { + Assert(blocknum < mdnblocks(reln, forknum)); + } +#endif + TRACE_POSTGRESQL_SMGR_MD_WRITE_START(forknum, blocknum, reln->smgr_rnode.node.spcNode, reln->smgr_rnode.node.dbNode, reln->smgr_rnode.node.relNode, reln->smgr_rnode.backend); @@ -1949,20 +1264,15 @@ void mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const return; } - bool compressed = IS_COMPRESSED_MAINFORK(reln, forknum); - if (compressed) { - mdwrite_pc(reln, forknum, blocknum, buffer, skipFsync); - } else { - seekpos = (off_t)BLCKSZ * (blocknum % ((BlockNumber)RELSEG_SIZE)); + seekpos = (off_t)BLCKSZ * (blocknum % ((BlockNumber)RELSEG_SIZE)); - Assert(seekpos < (off_t)BLCKSZ * RELSEG_SIZE); + Assert(seekpos < (off_t)BLCKSZ * RELSEG_SIZE); - nbytes = FilePWrite(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_WRITE); + nbytes = FilePWrite(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_WRITE); + + TRACE_POSTGRESQL_SMGR_MD_WRITE_DONE(forknum, blocknum, reln->smgr_rnode.node.spcNode, reln->smgr_rnode.node.dbNode, + reln->smgr_rnode.node.relNode, reln->smgr_rnode.backend, nbytes, BLCKSZ); - TRACE_POSTGRESQL_SMGR_MD_WRITE_DONE(forknum, blocknum, reln->smgr_rnode.node.spcNode, - reln->smgr_rnode.node.dbNode, reln->smgr_rnode.node.relNode, - reln->smgr_rnode.backend, nbytes, BLCKSZ); - } (void)INSTR_TIME_SET_CURRENT(end_time); INSTR_TIME_SUBTRACT(end_time, start_time); time_diff = (PgStat_Counter)INSTR_TIME_GET_MICROSEC(end_time); @@ -2009,9 +1319,7 @@ void mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const if (max_time < time_diff) { max_time = time_diff; } - if (compressed) { - return; - } + if (nbytes != BLCKSZ) { if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { ereport(DEBUG1, (errmsg("could not write block %u in file \"%s\": %m, this relation has been removed", @@ -2117,9 +1425,6 @@ void mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) MdfdVec *v = NULL; BlockNumber curnblk; BlockNumber prior_blocks; - int chunk_size, i; - PageCompressHeader *pcMap = NULL; - PageCompressAddr *pcAddr = NULL; /* * NOTE: mdnblocks makes sure we have opened all active segments, so that @@ -2154,43 +1459,16 @@ void mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) * from the mdfd_chain). We truncate the file, but do not delete * it, for reasons explained in the header comments. */ - if (IS_COMPRESSED_MAINFORK(reln, forknum)) { - chunk_size = PageCompressChunkSize(reln); - pcMap = GetPageCompressMemoryMap(v->mdfd_vfd_pca, chunk_size); - pg_atomic_write_u32(&pcMap->nblocks, 0); - pg_atomic_write_u32(&pcMap->allocated_chunks, 0); - MemSet((char *)pcMap + SIZE_OF_PAGE_COMPRESS_HEADER_DATA, 0, - SIZE_OF_PAGE_COMPRESS_ADDR_FILE(chunk_size) - SIZE_OF_PAGE_COMPRESS_HEADER_DATA); - - pcMap->sync = false; - if (sync_pcmap(pcMap, WAIT_EVENT_COMPRESS_ADDRESS_FILE_SYNC) != 0) { - ereport(data_sync_elevel(ERROR), - (errcode_for_file_access(), - errmsg("could not msync file \"%s\": %m", FilePathName(v->mdfd_vfd_pca)))); - } - if (FileTruncate(v->mdfd_vfd_pcd, 0, WAIT_EVENT_DATA_FILE_TRUNCATE) < 0) { - if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { - ereport(DEBUG1, (errmsg("could not truncate file \"%s\": %m, this relation has been removed", - FilePathName(v->mdfd_vfd_pcd)))); - FileClose(ov->mdfd_vfd_pcd); - pfree(ov); - break; - } - ereport(ERROR, (errcode_for_file_access(), - errmsg("could not truncate file \"%s\": %m", FilePathName(v->mdfd_vfd_pcd)))); - } - } else { - if (FileTruncate(v->mdfd_vfd, 0, WAIT_EVENT_DATA_FILE_TRUNCATE) < 0) { - if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { - ereport(DEBUG1, (errmsg("could not truncate file \"%s\": %m, this relation has been removed", - FilePathName(v->mdfd_vfd)))); - FileClose(ov->mdfd_vfd); - pfree(ov); - break; - } - ereport(ERROR, (errcode_for_file_access(), - errmsg("could not truncate file \"%s\": %m", FilePathName(v->mdfd_vfd)))); + if (FileTruncate(v->mdfd_vfd, 0, WAIT_EVENT_DATA_FILE_TRUNCATE) < 0) { + if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { + ereport(DEBUG1, (errmsg("could not truncate file \"%s\": %m, this relation has been removed", + FilePathName(v->mdfd_vfd)))); + FileClose(ov->mdfd_vfd); + pfree(ov); + break; } + ereport(ERROR, (errcode_for_file_access(), + errmsg("could not truncate file \"%s\": %m", FilePathName(v->mdfd_vfd)))); } if (!SmgrIsTemp(reln)) { @@ -2199,12 +1477,7 @@ void mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) v = v->mdfd_chain; Assert(ov != reln->md_fd[forknum]); /* we never drop the 1st segment */ - if (IS_COMPRESSED_MAINFORK(reln, forknum)) { - FileClose(ov->mdfd_vfd_pca); - FileClose(ov->mdfd_vfd_pcd); - } else { - FileClose(ov->mdfd_vfd); - } + FileClose(ov->mdfd_vfd); pfree(ov); } else if (prior_blocks + ((BlockNumber)RELSEG_SIZE) > nblocks) { /* @@ -2216,70 +1489,15 @@ void mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) * given in the header comments. */ BlockNumber last_seg_blocks = nblocks - prior_blocks; - if (IS_COMPRESSED_MAINFORK(reln, forknum)) { - pc_chunk_number_t max_used_chunkno = (pc_chunk_number_t) 0; - uint32 allocated_chunks; - chunk_size = PageCompressChunkSize(reln); - pcMap = GetPageCompressMemoryMap(v->mdfd_vfd_pca, chunk_size); - for (BlockNumber blk = 0; blk < RELSEG_SIZE; ++blk) { - pcAddr = GET_PAGE_COMPRESS_ADDR(pcMap, chunk_size, blk); - pcAddr->nchunks = 0; - pcAddr->checksum = AddrChecksum32(blk, pcAddr, chunk_size); - } - pg_atomic_write_u32(&pcMap->nblocks, last_seg_blocks); - pcMap->sync = false; - if (sync_pcmap(pcMap, WAIT_EVENT_COMPRESS_ADDRESS_FILE_SYNC) != 0) { - ereport(data_sync_elevel(ERROR), - (errcode_for_file_access(), errmsg("could not msync file \"%s\": %m", - FilePathName(v->mdfd_vfd_pca)))); - } - allocated_chunks = pg_atomic_read_u32(&pcMap->allocated_chunks); - /* find the max used chunkno */ - for (BlockNumber blk = (BlockNumber) 0; blk < (BlockNumber) last_seg_blocks; blk++) { - pcAddr = GET_PAGE_COMPRESS_ADDR(pcMap, chunk_size, blk); - - /* check allocated_chunks for one page */ - if (pcAddr->allocated_chunks > BLCKSZ / chunk_size) { - ereport(ERROR, - (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid chunks %u of block %u in file \"%s\"", - pcAddr->allocated_chunks, blk, - FilePathName(v->mdfd_vfd_pca)))); - } - - /* check chunknos for one page */ - for (i = 0; i < pcAddr->allocated_chunks; i++) { - if (pcAddr->chunknos[i] == 0 || pcAddr->chunknos[i] > allocated_chunks) { - ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg( - "invalid chunk number %u of block %u in file \"%s\"", pcAddr->chunknos[i], blk, - FilePathName(v->mdfd_vfd_pca)))); - } - - if (pcAddr->chunknos[i] > max_used_chunkno) { - max_used_chunkno = pcAddr->chunknos[i]; - } - } - } - off_t compressedOffset = (off_t)max_used_chunkno * chunk_size; - if (FileTruncate(v->mdfd_vfd_pcd, compressedOffset, WAIT_EVENT_DATA_FILE_TRUNCATE) < 0) { - if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { - ereport(DEBUG1, (errmsg("could not truncate file \"%s\": %m, this relation has been removed", - FilePathName(v->mdfd_vfd)))); - break; - } - ereport(ERROR, (errcode_for_file_access(), errmsg("could not truncate file \"%s\" to %u blocks: %m", - FilePathName(v->mdfd_vfd), nblocks))); - } - } else { - if (FileTruncate(v->mdfd_vfd, (off_t)last_seg_blocks * BLCKSZ, WAIT_EVENT_DATA_FILE_TRUNCATE) < 0) { - if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { - ereport(DEBUG1, (errmsg("could not truncate file \"%s\": %m, this relation has been removed", - FilePathName(v->mdfd_vfd)))); - break; - } - ereport(ERROR, (errcode_for_file_access(), errmsg("could not truncate file \"%s\" to %u blocks: %m", - FilePathName(v->mdfd_vfd), nblocks))); + if (FileTruncate(v->mdfd_vfd, (off_t)last_seg_blocks * BLCKSZ, WAIT_EVENT_DATA_FILE_TRUNCATE) < 0) { + if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { + ereport(DEBUG1, (errmsg("could not truncate file \"%s\": %m, this relation has been removed", + FilePathName(v->mdfd_vfd)))); + break; } + ereport(ERROR, (errcode_for_file_access(), errmsg("could not truncate file \"%s\" to %u blocks: %m", + FilePathName(v->mdfd_vfd), nblocks))); } if (!SmgrIsTemp(reln)) { @@ -2298,29 +1516,6 @@ void mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) } } -static bool CompressMdImmediateSync(SMgrRelation reln, ForkNumber forknum, MdfdVec* v) -{ - PageCompressHeader* pcMap = GetPageCompressMemoryMap(v->mdfd_vfd_pca, PageCompressChunkSize(reln)); - if (sync_pcmap(pcMap, WAIT_EVENT_COMPRESS_ADDRESS_FILE_SYNC) != 0) { - if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { - ereport(DEBUG1, (errmsg("could not fsync file \"%s\": %m, this relation has been removed", - FilePathName(v->mdfd_vfd_pca)))); - return false; - } - ereport(data_sync_elevel(ERROR), - (errcode_for_file_access(), errmsg("could not msync file \"%s\": %m", FilePathName(v->mdfd_vfd_pca)))); - } - if (FileSync(v->mdfd_vfd_pcd, WAIT_EVENT_DATA_FILE_IMMEDIATE_SYNC) < 0) { - if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { - ereport(DEBUG1, (errmsg("could not fsync file \"%s\": %m, this relation has been removed", - FilePathName(v->mdfd_vfd_pcd)))); - return false; - } - ereport(data_sync_elevel(ERROR), - (errcode_for_file_access(), errmsg("could not fsync file \"%s\": %m", FilePathName(v->mdfd_vfd_pcd)))); - } - return true; -} /* * mdimmedsync() -- Immediately sync a relation to stable storage. * @@ -2340,20 +1535,15 @@ void mdimmedsync(SMgrRelation reln, ForkNumber forknum) v = mdopen(reln, forknum, EXTENSION_FAIL); while (v != NULL) { - if (IS_COMPRESSED_MAINFORK(reln, forknum)) { - if (!CompressMdImmediateSync(reln, forknum, v)) { + if (FileSync(v->mdfd_vfd, WAIT_EVENT_DATA_FILE_IMMEDIATE_SYNC) < 0) { + if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { + ereport(DEBUG1, + (errmsg("could not fsync file \"%s\": %m, this relation has been removed", + FilePathName(v->mdfd_vfd)))); break; } - } else { - if (FileSync(v->mdfd_vfd, WAIT_EVENT_DATA_FILE_IMMEDIATE_SYNC) < 0) { - if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { - ereport(DEBUG1, (errmsg("could not fsync file \"%s\": %m, this relation has been removed", - FilePathName(v->mdfd_vfd)))); - break; - } - ereport(data_sync_elevel(ERROR), (errcode_for_file_access(), errmsg("could not fsync file \"%s\": %m", - FilePathName(v->mdfd_vfd)))); - } + ereport(data_sync_elevel(ERROR), + (errcode_for_file_access(), errmsg("could not fsync file \"%s\": %m", FilePathName(v->mdfd_vfd)))); } v = v->mdfd_chain; } @@ -2381,7 +1571,13 @@ void mdForgetDatabaseFsyncRequests(Oid dbid) */ static MdfdVec *_fdvec_alloc(void) { - return (MdfdVec *)MemoryContextAlloc(u_sess->storage_cxt.MdCxt, sizeof(MdfdVec)); + MemoryContext current; + if (EnableLocalSysCache()) { + current = t_thrd.lsc_cxt.lsc->lsc_mydb_memcxt; + } else { + current = u_sess->storage_cxt.MdCxt; + } + return (MdfdVec *)MemoryContextAlloc(current, sizeof(MdfdVec)); } /* Get Path from RelFileNode */ @@ -2459,39 +1655,30 @@ static MdfdVec *_mdfd_openseg(SMgrRelation reln, ForkNumber forknum, BlockNumber ADIO_END(); /* open the file */ - fd = DataFileIdOpenFile(fullpath, filenode, O_RDWR | PG_BINARY | oflags, FILE_RW_PERMISSION); + if (RecoveryInProgress() && CheckFileRepairHashTbl(reln->smgr_rnode.node, forknum, segno) && + (AmStartupProcess() || AmPageRedoWorker() || AmPageWriterProcess() || AmCheckpointerProcess())) { + fd = openrepairfile(fullpath, filenode); + if (fd < 0) { + pfree(fullpath); + return NULL; + } else { + ereport(LOG, (errmsg("[file repair] open repair file %s.repair", fullpath))); + } + } else { + fd = DataFileIdOpenFile(fullpath, filenode, O_RDWR | PG_BINARY | oflags, 0600); + } + pfree(fullpath); if (fd < 0) { return NULL; } - int fd_pca = -1; - int fd_pcd = -1; - if (IS_COMPRESSED_MAINFORK(reln, forknum)) { - FileClose(fd); - fd = -1; - fd_pca = OpenPcaFile(fullpath, reln->smgr_rnode, MAIN_FORKNUM, segno, oflags); - if (fd_pca < 0) { - pfree(fullpath); - return NULL; - } - fd_pcd = OpenPcdFile(fullpath, reln->smgr_rnode, MAIN_FORKNUM, segno, oflags); - if (fd_pcd < 0) { - pfree(fullpath); - return NULL; - } - SetupPageCompressMemoryMap(fd_pca, reln->smgr_rnode.node, filenode); - } - - pfree(fullpath); /* allocate an mdfdvec entry for it */ v = _fdvec_alloc(); /* fill the entry */ v->mdfd_vfd = fd; - v->mdfd_vfd_pca = fd_pca; - v->mdfd_vfd_pcd = fd_pcd; v->mdfd_segno = segno; v->mdfd_chain = NULL; Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber)RELSEG_SIZE)); @@ -2605,10 +1792,7 @@ static MdfdVec *_mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber static BlockNumber _mdnblocks(SMgrRelation reln, ForkNumber forknum, const MdfdVec *seg) { off_t len; - if (IS_COMPRESSED_MAINFORK(reln, forknum)) { - PageCompressHeader *pcMap = GetPageCompressMemoryMap(seg->mdfd_vfd_pca, PageCompressChunkSize(reln)); - return (BlockNumber) pg_atomic_read_u32(&pcMap->nblocks); - } + len = FileSeek(seg->mdfd_vfd, 0L, SEEK_END); if (len < 0) { if (check_unlink_rel_hashtbl(reln->smgr_rnode.node, forknum)) { @@ -2624,13 +1808,6 @@ static BlockNumber _mdnblocks(SMgrRelation reln, ForkNumber forknum, const MdfdV return (BlockNumber)(len / BLCKSZ); } -int OpenForkFile(const char *path, const RelFileNodeBackend& rnode, const ForkNumber& forkNumber, const uint32& segNo) -{ - RelFileNodeForkNum fileNode = RelFileNodeForkNumFill(rnode, forkNumber, segNo); - uint32 flags = O_RDWR | PG_BINARY; - return DataFileIdOpenFile((char*)path, fileNode, (int)flags, S_IRUSR | S_IWUSR); -} - /* * Sync a file to disk, given a file tag. Write the path into an output * buffer so the caller can use it in error messages. @@ -2642,11 +1819,10 @@ int SyncMdFile(const FileTag *ftag, char *path) SMgrRelation reln = smgropen(ftag->rnode, InvalidBackendId, GetColumnNum(ftag->forknum)); MdfdVec *v; char *p; - File file = -1; - File pcaFd = -1; - File pcdFd = -1; + File file; int result; int savedErrno; + bool needClose = false; /* Provide the path for informational messages. */ p = _mdfd_segpath(reln, ftag->forknum, ftag->segno); @@ -2656,56 +1832,39 @@ int SyncMdFile(const FileTag *ftag, char *path) /* Try to open the requested segment. */ v = _mdfd_getseg(reln, ftag->forknum, ftag->segno * (BlockNumber) RELSEG_SIZE, false, EXTENSION_RETURN_NULL); - - if (IS_COMPRESSED_RNODE(ftag->rnode, ftag->forknum)) { - if (v == NULL) { - pcaFd = OpenPcaFile(path, reln->smgr_rnode, ftag->forknum, ftag->segno); - if (pcaFd < 0) { - return -1; - } - pcdFd = OpenPcdFile(path, reln->smgr_rnode, ftag->forknum, ftag->segno); - if (pcdFd < 0) { - savedErrno = errno; - FileClose(pcaFd); - errno = savedErrno; - return -1; - } - } else { - pcaFd = v->mdfd_vfd_pca; - pcdFd = v->mdfd_vfd_pcd; - } - - PageCompressHeader *map = GetPageCompressMemoryMap(pcaFd, PageCompressChunkSize(reln)); - result = sync_pcmap(map, WAIT_EVENT_COMPRESS_ADDRESS_FILE_SYNC); - if (result == 0) { - result = FileSync(pcdFd, WAIT_EVENT_DATA_FILE_SYNC); - } else { - ereport(data_sync_elevel(WARNING), - (errcode_for_file_access(), errmsg("could not fsync pcmap \"%s\": %m", path))); - } - } else { - if (v == NULL) { - file = OpenForkFile(path, reln->smgr_rnode, ftag->forknum, ftag->segno); - if (file < 0) { - return -1; - } - } else { - file = v->mdfd_vfd; - } - /* Try to fsync the file. */ - result = FileSync(file, WAIT_EVENT_DATA_FILE_SYNC); - } - savedErrno = errno; if (v == NULL) { - if (IS_COMPRESSED_RNODE(ftag->rnode, ftag->forknum)) { - FileClose(pcaFd); - FileClose(pcdFd); - } else { - FileClose(file); + RelFileNodeForkNum filenode = RelFileNodeForkNumFill(reln->smgr_rnode, + ftag->forknum, ftag->segno); + uint32 flags = O_RDWR | PG_BINARY; + file = DataFileIdOpenFile(path, filenode, (int)flags, S_IRUSR | S_IWUSR); + if (file < 0 && + (AmStartupProcess() || AmPageRedoWorker() || AmPageWriterProcess() || AmCheckpointerProcess()) && + CheckFileRepairHashTbl(reln->smgr_rnode.node, ftag->forknum, ftag->segno)) { + const int TEMPLEN = 8; + char *temppath = (char *)palloc(strlen(path) + TEMPLEN); + errno_t rc = sprintf_s(temppath, strlen(path) + TEMPLEN, "%s.repair", path); + securec_check_ss(rc, "", ""); + file = DataFileIdOpenFile(temppath, filenode, (int)flags, S_IRUSR | S_IWUSR); + if (file < 0) { + pfree(temppath); + return -1; + } + pfree(temppath); + } else if (file < 0) { + return -1; } + needClose = true; + } else { + file = v->mdfd_vfd; + } + + /* Try to fsync the file. */ + result = FileSync(file, WAIT_EVENT_DATA_FILE_SYNC); + savedErrno = errno; + if (needClose) { + FileClose(file); } errno = savedErrno; - return result; } @@ -2725,7 +1884,6 @@ int UnlinkMdFile(const FileTag *ftag, char *path) pfree(p); /* Try to unlink the file. */ - UnlinkCompressedFile(ftag->rnode, MAIN_FORKNUM, path); return unlink(path); } @@ -2744,30 +1902,3 @@ bool MatchMdFileTag(const FileTag *ftag, const FileTag *candidate) */ return ftag->rnode.dbNode == candidate->rnode.dbNode; } - -static int sync_pcmap(PageCompressHeader *pcMap, uint32 wait_event_info) -{ - if (pg_atomic_read_u32(&pcMap->sync) == true) { - return 0; - } - int returnCode; - uint32 nblocks, allocated_chunks, last_synced_nblocks, last_synced_allocated_chunks; - - nblocks = pg_atomic_read_u32(&pcMap->nblocks); - allocated_chunks = pg_atomic_read_u32(&pcMap->allocated_chunks); - last_synced_nblocks = pg_atomic_read_u32(&pcMap->last_synced_nblocks); - last_synced_allocated_chunks = pg_atomic_read_u32(&pcMap->last_synced_allocated_chunks); - returnCode = pc_msync(pcMap); - if (returnCode == 0) { - if (last_synced_nblocks != nblocks) { - pg_atomic_write_u32(&pcMap->last_synced_nblocks, nblocks); - } - - if (last_synced_allocated_chunks != allocated_chunks) { - pg_atomic_write_u32(&pcMap->last_synced_allocated_chunks, allocated_chunks); - } - } - - pcMap->sync = true; - return returnCode; -} diff --git a/src/gausskernel/storage/smgr/mmap_shared.cpp b/src/gausskernel/storage/smgr/mmap_shared.cpp deleted file mode 100644 index 9d4a1c4c5..000000000 --- a/src/gausskernel/storage/smgr/mmap_shared.cpp +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright (c) 2021 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * --------------------------------------------------------------------------------------- - * - * - * - * IDENTIFICATION - * src/gausskernel/storage/smgr/mmap_shared.cpp - * - * --------------------------------------------------------------------------------------- - */ -#include "postgres.h" -#include "miscadmin.h" -#include "catalog/pg_type.h" -#include "utils/datum.h" -#include "utils/relcache.h" - -#include "utils/memutils.h" -#include "utils/memprot.h" - -#include "storage/page_compression.h" -#include "executor/executor.h" -#include "storage/vfd.h" - -struct MmapEntry { - RelFileNodeForkNum relFileNodeForkNum; - /* - * the following are setting sin runtime - */ - size_t reference = 0; - PageCompressHeader *pcmap = NULL; -}; - -constexpr size_t LOCK_ARRAY_SIZE = 1024; -static pthread_mutex_t mmapLockArray[LOCK_ARRAY_SIZE]; - -static inline uint32 MmapTableHashCode(const RelFileNodeForkNum &relFileNodeForkNum) -{ - return tag_hash((void *)&relFileNodeForkNum, sizeof(RelFileNodeForkNum)); -} - -static inline pthread_mutex_t *MmapPartitionLock(size_t hashCode) -{ - return &mmapLockArray[hashCode % LOCK_ARRAY_SIZE]; -} - -static inline PageCompressHeader *MmapSharedMapFile(Vfd *vfdP, uint16 chunkSize, uint2 opt, bool readonly) -{ - auto map = pc_mmap_real_size(vfdP->fd, SIZE_OF_PAGE_COMPRESS_ADDR_FILE(chunkSize), false); - if (map->chunk_size == 0 || map->algorithm == 0) { - map->chunk_size = chunkSize; - map->algorithm = GET_COMPRESS_ALGORITHM(opt); - if (pc_msync(map) != 0) { - ereport(data_sync_elevel(ERROR), - (errcode_for_file_access(), errmsg("could not msync file \"%s\": %m", vfdP->fileName))); - } - } - if (RecoveryInProgress() && !map->sync) { - CheckAndRepairCompressAddress(map, chunkSize, map->algorithm, vfdP->fileName); - } - return map; -} - -void RealInitialMMapLockArray() -{ - for (size_t i = 0; i < LOCK_ARRAY_SIZE; ++i) { - pthread_mutex_init(&mmapLockArray[i], NULL); - } - - HASHCTL ctl; - /* hash accessed by database file id */ - errno_t rc = memset_s(&ctl, sizeof(ctl), 0, sizeof(ctl)); - securec_check(rc, "", ""); - - ctl.keysize = sizeof(RelFileNodeForkNum); - ctl.entrysize = sizeof(MmapEntry); - ctl.hash = tag_hash; - ctl.num_partitions = LOCK_ARRAY_SIZE; - const size_t initLen = 256; - g_instance.mmapCache = HeapMemInitHash( - "mmap hash", initLen, - (Max(g_instance.attr.attr_common.max_files_per_process, t_thrd.storage_cxt.max_userdatafiles)) / 2, &ctl, - HASH_ELEM | HASH_FUNCTION | HASH_PARTITION); -} - -PageCompressHeader *GetPageCompressHeader(void *vfd, uint16 chunkSize, const RelFileNodeForkNum &relFileNodeForkNum) -{ - Vfd *currentVfd = (Vfd *)vfd; - uint32 hashCode = MmapTableHashCode(relFileNodeForkNum); - AutoMutexLock mmapLock(MmapPartitionLock(hashCode)); - - mmapLock.lock(); - bool find = false; - MmapEntry *mmapEntry = (MmapEntry *)hash_search_with_hash_value(g_instance.mmapCache, (void *)&relFileNodeForkNum, - hashCode, HASH_ENTER, &find); - if (!find) { - mmapEntry->pcmap = NULL; - mmapEntry->reference = 0; - } - if (mmapEntry->pcmap == NULL) { - mmapEntry->pcmap = MmapSharedMapFile(currentVfd, chunkSize, relFileNodeForkNum.rnode.node.opt, false); - } - ++mmapEntry->reference; - mmapLock.unLock(); - return mmapEntry->pcmap; -} - -void UnReferenceAddrFile(void *vfd) -{ - Vfd *currentVfd = (Vfd *)vfd; - RelFileNodeForkNum relFileNodeForkNum = currentVfd->fileNode; - uint32 hashCode = MmapTableHashCode(relFileNodeForkNum); - AutoMutexLock mmapLock(MmapPartitionLock(hashCode)); - mmapLock.lock(); - - MmapEntry *mmapEntry = (MmapEntry *)hash_search_with_hash_value(g_instance.mmapCache, (void *)&relFileNodeForkNum, - hashCode, HASH_FIND, NULL); - if (mmapEntry == NULL) { - ereport(ERROR, (errcode_for_file_access(), - errmsg("UnReferenceAddrFile failed! mmap not found, filePath: %s", currentVfd->fileName))); - } - --mmapEntry->reference; - if (mmapEntry->reference == 0) { - if (pc_munmap(mmapEntry->pcmap) != 0) { - ereport(ERROR, - (errcode_for_file_access(), errmsg("could not munmap file \"%s\": %m", currentVfd->fileName))); - } - if (hash_search_with_hash_value(g_instance.mmapCache, (void *)&relFileNodeForkNum, hashCode, HASH_REMOVE, - NULL) == NULL) { - ereport(ERROR, - (errcode_for_file_access(), - errmsg("UnReferenceAddrFile failed! remove hash key failed, filePath: %s", currentVfd->fileName))); - } - } else if (mmapEntry->reference < 0) { - ereport(FATAL, (errcode_for_file_access(), errmsg("could not munmap file \"%s\": %m", currentVfd->fileName))); - } - mmapLock.unLock(); -} \ No newline at end of file diff --git a/src/gausskernel/storage/smgr/page_compression.cpp b/src/gausskernel/storage/smgr/page_compression.cpp deleted file mode 100644 index ba632c264..000000000 --- a/src/gausskernel/storage/smgr/page_compression.cpp +++ /dev/null @@ -1,472 +0,0 @@ -/* - * Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. - * Copyright (c) 2020, PostgreSQL Global Development Group - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * ------------------------------------------------------------------------- - * - * page_compression.cpp - * Routines for page compression - * - * There are two implementations at the moment: zstd, and the Postgres - * pg_lzcompress(). zstd support requires that the server was compiled - * with --with-zstd. - * IDENTIFICATION - * ./src/gausskernel/storage/smgr/page_compression.cpp - * - * ------------------------------------------------------------------------- - */ -#include "postgres.h" -#include "miscadmin.h" -#include "catalog/pg_type.h" -#include "utils/datum.h" -#include "utils/relcache.h" - -#include "utils/timestamp.h" -#include "storage/checksum.h" -#include "storage/page_compression.h" -#include "storage/page_compression_impl.h" - -static void CheckHeaderOfCompressAddr(PageCompressHeader* pcMap, uint16 chunk_size, uint8 algorithm, const char* path) -{ - if (pcMap->chunk_size != chunk_size || pcMap->algorithm != algorithm) { - if (u_sess->attr.attr_security.zero_damaged_pages) { - ereport(WARNING, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid chunk_size %u or algorithm %u in head of compress relation address file \"%s\", " - "and reinitialized it.", - pcMap->chunk_size, - pcMap->algorithm, - path))); - - pcMap->algorithm = algorithm; - pg_atomic_write_u32(&pcMap->nblocks, RELSEG_SIZE); - pg_atomic_write_u32(&pcMap->allocated_chunks, 0); - pg_atomic_write_u32(&pcMap->last_synced_allocated_chunks, 0); - pcMap->chunk_size = chunk_size; - } else { - ereport(ERROR, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid chunk_size %u or algorithm %u in head of compress relation address file \"%s\"", - pcMap->chunk_size, - pcMap->algorithm, - path))); - } - } -} - -void CheckAndRepairCompressAddress(PageCompressHeader *pcMap, uint16 chunk_size, uint8 algorithm, const char *path) -{ - TimestampTz lastRecoveryTime = pcMap->last_recovery_start_time; - TimestampTz pgStartTime = t_thrd.time_cxt.pg_start_time; - error_t rc; - /* if the relation had been checked in this startup, skip */ - if (lastRecoveryTime == pgStartTime) { - return; - } - - /* check head of compress address file */ - CheckHeaderOfCompressAddr(pcMap, chunk_size, algorithm, path); - - uint32 nblocks = pg_atomic_read_u32(&pcMap->nblocks); - uint32 allocated_chunks = pg_atomic_read_u32(&pcMap->allocated_chunks); - BlockNumber *global_chunknos = (BlockNumber *)palloc0(MAX_CHUNK_NUMBER(chunk_size) * sizeof(BlockNumber)); - - BlockNumber max_blocknum = (BlockNumber)-1; - BlockNumber max_nonzero_blocknum = (BlockNumber)-1; - BlockNumber max_allocated_chunkno = (pc_chunk_number_t)0; - - /* check compress address of every pages */ - for (BlockNumber blocknum = 0; blocknum < (BlockNumber)RELSEG_SIZE; ++blocknum) { - PageCompressAddr *pcAddr = GET_PAGE_COMPRESS_ADDR(pcMap, chunk_size, blocknum); - if (pcAddr->checksum != AddrChecksum32(blocknum, pcAddr, chunk_size)) { - ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid checkum %u of block %u in file \"%s\"", - pcAddr->checksum, blocknum, path))); - pcAddr->allocated_chunks = pcAddr->nchunks = 0; - for (int i = 0; i < BLCKSZ / chunk_size; ++i) { - pcAddr->chunknos[i] = 0; - } - pcAddr->checksum = 0; - } - /* - * skip when found first zero filled block after nblocks - * if(blocknum >= (BlockNumber)nblocks && pcAddr->allocated_chunks == 0) - * break; - */ - - /* check allocated_chunks for one page */ - if (pcAddr->allocated_chunks > BLCKSZ / chunk_size) { - if (u_sess->attr.attr_security.zero_damaged_pages) { - rc = memset_s((void *)pcAddr, SIZE_OF_PAGE_COMPRESS_ADDR(chunk_size), 0, - SIZE_OF_PAGE_COMPRESS_ADDR(chunk_size)); - securec_check_c(rc, "\0", "\0"); - ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid allocated_chunks %u of block %u in file \"%s\", and zero this block", - pcAddr->allocated_chunks, blocknum, path))); - continue; - } else { - pfree(global_chunknos); - ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid allocated_chunks %u of block %u in file \"%s\"", - pcAddr->allocated_chunks, blocknum, path))); - } - } - - /* check chunknos for one page */ - for (int i = 0; i < pcAddr->allocated_chunks; ++i) { - /* check for invalid chunkno */ - if (pcAddr->chunknos[i] == 0 || pcAddr->chunknos[i] > MAX_CHUNK_NUMBER(chunk_size)) { - if (u_sess->attr.attr_security.zero_damaged_pages) { - rc = memset_s((void *)pcAddr, SIZE_OF_PAGE_COMPRESS_ADDR(chunk_size), 0, - SIZE_OF_PAGE_COMPRESS_ADDR(chunk_size)); - securec_check_c(rc, "\0", "\0"); - ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid chunk number %u of block %u in file \"%s\", and zero this block", - pcAddr->chunknos[i], blocknum, path))); - continue; - } else { - pfree(global_chunknos); - ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid chunk number %u of block %u in file \"%s\"", pcAddr->chunknos[i], - blocknum, path))); - } - } - - /* check for duplicate chunkno */ - if (global_chunknos[pcAddr->chunknos[i] - 1] != 0) { - if (u_sess->attr.attr_security.zero_damaged_pages) { - rc = memset_s((void *)pcAddr, SIZE_OF_PAGE_COMPRESS_ADDR(chunk_size), 0, - SIZE_OF_PAGE_COMPRESS_ADDR(chunk_size)); - securec_check_c(rc, "\0", "\0"); - ereport( - WARNING, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg( - "chunk number %u of block %u duplicate with block %u in file \"%s\", and zero this block", - pcAddr->chunknos[i], blocknum, global_chunknos[pcAddr->chunknos[i] - 1], path))); - continue; - } else { - pfree(global_chunknos); - ereport(ERROR, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("chunk number %u of block %u duplicate with block %u in file \"%s\"", - pcAddr->chunknos[i], blocknum, global_chunknos[pcAddr->chunknos[i] - 1], path))); - } - } - } - - /* clean chunknos beyond allocated_chunks for one page */ - for (int i = pcAddr->allocated_chunks; i < BLCKSZ / chunk_size; ++i) { - if (pcAddr->chunknos[i] != 0) { - pcAddr->chunknos[i] = 0; - ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("clear chunk number %u beyond allocated_chunks %u of block %u in file \"%s\"", - pcAddr->chunknos[i], pcAddr->allocated_chunks, blocknum, path))); - } - } - - /* check nchunks for one page */ - if (pcAddr->nchunks > pcAddr->allocated_chunks) { - if (u_sess->attr.attr_security.zero_damaged_pages) { - rc = memset_s((void *)pcAddr, SIZE_OF_PAGE_COMPRESS_ADDR(chunk_size), 0, - SIZE_OF_PAGE_COMPRESS_ADDR(chunk_size)); - securec_check_c(rc, "\0", "\0"); - ereport( - WARNING, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("nchunks %u exceeds allocated_chunks %u of block %u in file \"%s\", and zero this block", - pcAddr->nchunks, pcAddr->allocated_chunks, blocknum, path))); - continue; - } else { - pfree(global_chunknos); - ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("nchunks %u exceeds allocated_chunks %u of block %u in file \"%s\"", - pcAddr->nchunks, pcAddr->allocated_chunks, blocknum, path))); - } - } - - max_blocknum = blocknum; - if (pcAddr->nchunks > 0) { - max_nonzero_blocknum = blocknum; - } - - for (int i = 0; i < pcAddr->allocated_chunks; ++i) { - global_chunknos[pcAddr->chunknos[i] - 1] = blocknum + 1; - if (pcAddr->chunknos[i] > max_allocated_chunkno) { - max_allocated_chunkno = pcAddr->chunknos[i]; - } - } - } - - int unused_chunks = 0; - /* check for holes in allocated chunks */ - for (BlockNumber i = 0; i < max_allocated_chunkno; i++) { - if (global_chunknos[i] == 0) { - unused_chunks++; - } - } - - if (unused_chunks > 0) { - ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("there are %u chunks of total allocated chunks %u can not be use in file \"%s\"", - unused_chunks, max_allocated_chunkno, path), - errhint("You may need to run VACUMM FULL to optimize space allocation."))); - } - - /* update nblocks in head of compressed file */ - if (nblocks < max_nonzero_blocknum + 1) { - pg_atomic_write_u32(&pcMap->nblocks, max_nonzero_blocknum + 1); - pg_atomic_write_u32(&pcMap->last_synced_nblocks, max_nonzero_blocknum + 1); - - ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("update nblocks head of compressed file \"%s\". old: %u, new: %u", path, nblocks, - max_nonzero_blocknum + 1))); - } - - /* update allocated_chunks in head of compress file */ - if (allocated_chunks != max_allocated_chunkno) { - pg_atomic_write_u32(&pcMap->allocated_chunks, max_allocated_chunkno); - pg_atomic_write_u32(&pcMap->last_synced_allocated_chunks, max_allocated_chunkno); - - ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("update allocated_chunks in head of compressed file \"%s\". old: %u, new: %u", path, - allocated_chunks, max_allocated_chunkno))); - } - - /* clean compress address after max_blocknum + 1 */ - for (BlockNumber blocknum = max_blocknum + 1; blocknum < (BlockNumber)RELSEG_SIZE; blocknum++) { - char buf[128]; - char *p = NULL; - PageCompressAddr *pcAddr = GET_PAGE_COMPRESS_ADDR(pcMap, chunk_size, blocknum); - - /* skip zero block */ - if (pcAddr->allocated_chunks == 0 && pcAddr->nchunks == 0) { - continue; - } - - /* clean compress address and output content of the address */ - rc = memset_s(buf, sizeof(buf), 0, sizeof(buf)); - securec_check_c(rc, "\0", "\0"); - p = buf; - - for (int i = 0; i < pcAddr->allocated_chunks; i++) { - if (pcAddr->chunknos[i]) { - const char *formatStr = i == 0 ? "%u" : ",%u"; - errno_t rc = - snprintf_s(p, sizeof(buf) - (p - buf), sizeof(buf) - (p - buf) - 1, formatStr, pcAddr->chunknos[i]); - securec_check_ss(rc, "\0", "\0"); - p += strlen(p); - } - } - - rc = - memset_s((void *)pcAddr, SIZE_OF_PAGE_COMPRESS_ADDR(chunk_size), 0, SIZE_OF_PAGE_COMPRESS_ADDR(chunk_size)); - securec_check_c(rc, "\0", "\0"); - ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("clean unused compress address of block %u in file \"%s\", old " - "allocated_chunks/nchunks/chunknos: %u/%u/{%s}", - blocknum, path, pcAddr->allocated_chunks, pcAddr->nchunks, buf))); - } - - pfree(global_chunknos); - - if (pc_msync(pcMap) != 0) { - ereport(ERROR, (errcode_for_file_access(), errmsg("could not msync file \"%s\": %m", path))); - } - - pcMap->last_recovery_start_time = pgStartTime; -} - -int64 CalculateMainForkSize(char* pathName, RelFileNode* rnode, ForkNumber forkNumber) -{ - Assert(IS_COMPRESSED_RNODE((*rnode), forkNumber)); - Assert(rnode->bucketNode == -1); - return CalculateCompressMainForkSize(pathName); -} - -void CopyCompressedPath(char dst[MAXPGPATH], const char* pathName, CompressedFileType compressFileType) -{ - int rc; - if (compressFileType == COMPRESSED_TABLE_PCA_FILE) { - rc = snprintf_s(dst, MAXPGPATH, MAXPGPATH - 1, PCA_SUFFIX, pathName); - } else { - rc = snprintf_s(dst, MAXPGPATH, MAXPGPATH - 1, PCD_SUFFIX, pathName); - } - securec_check_ss(rc, "\0", "\0"); -} - -int64 CalculateCompressMainForkSize(char* pathName, bool suppressedENOENT) -{ - int64 totalsize = 0; - - char pcFilePath[MAXPGPATH]; - CopyCompressedPath(pcFilePath, pathName, COMPRESSED_TABLE_PCA_FILE); - totalsize += CalculateFileSize(pcFilePath, MAXPGPATH, suppressedENOENT); - - CopyCompressedPath(pcFilePath, pathName, COMPRESSED_TABLE_PCD_FILE); - totalsize += CalculateFileSize(pcFilePath, MAXPGPATH, suppressedENOENT); - - return totalsize; -} - -uint16 ReadChunkSize(FILE* pcaFile, char* pcaFilePath, size_t len) -{ - uint16 chunkSize; - if (fseeko(pcaFile, (off_t)offsetof(PageCompressHeader, chunk_size), SEEK_SET) != 0) { - ereport(ERROR, - (errcode_for_file_access(), errmsg("could not seek in file \"%s\": \"%lu\": %m", pcaFilePath, len))); - } - - if (fread(&chunkSize, sizeof(chunkSize), 1, pcaFile) <= 0) { - ereport(ERROR, - (errcode_for_file_access(), errmsg("could not open file \"%s\": \"%lu\": %m", pcaFilePath, len))); - } - return chunkSize; -} - -int64 CalculateFileSize(char* pathName, size_t size, bool suppressedENOENT) -{ - struct stat structstat; - if (stat(pathName, &structstat)) { - if (errno == ENOENT) { - if (suppressedENOENT) { - return 0; - } - ereport(ERROR, (errcode_for_file_access(), errmsg("could not FIND file \"%s\": %m", pathName))); - } else { - ereport(ERROR, (errcode_for_file_access(), errmsg("could not stat file \"%s\": %m", pathName))); - } - } - return structstat.st_size; -} - -uint1 ConvertChunkSize(uint32 compressedChunkSize, bool *success) -{ - uint1 chunkSize = INDEX_OF_HALF_BLCKSZ; - switch (compressedChunkSize) { - case BLCKSZ / 2: - chunkSize = INDEX_OF_HALF_BLCKSZ; - break; - case BLCKSZ / 4: - chunkSize = INDEX_OF_QUARTER_BLCKSZ; - break; - case BLCKSZ / 8: - chunkSize = INDEX_OF_EIGHTH_BRICK_BLCKSZ; - break; - case BLCKSZ / 16: - chunkSize = INDEX_OF_SIXTEENTHS_BLCKSZ; - break; - default: - *success = false; - return chunkSize; - } - *success = true; - return chunkSize; -} - -constexpr int MAX_RETRY_LIMIT = 60; -constexpr long RETRY_SLEEP_TIME = 1000000L; - -size_t ReadAllChunkOfBlock(char *dst, size_t destLen, BlockNumber blockNumber, ReadBlockChunksStruct& rbStruct) -{ - PageCompressHeader* header = rbStruct.header; - if (blockNumber >= header->nblocks) { - ereport(ERROR, - (ERRCODE_INVALID_PARAMETER_VALUE, - errmsg("blocknum \"%u\" exceeds max block number", blockNumber))); - } - char* pageBuffer = rbStruct.pageBuffer; - const char* fileName = rbStruct.fileName; - decltype(PageCompressHeader::chunk_size) chunkSize = header->chunk_size; - decltype(ReadBlockChunksStruct::segmentNo) segmentNo = rbStruct.segmentNo; - PageCompressAddr* currentAddr = GET_PAGE_COMPRESS_ADDR(header, chunkSize, blockNumber); - - size_t tryCount = 0; - /* for empty chunks write */ - uint8 allocatedChunks; - uint8 nchunks; - do { - allocatedChunks = currentAddr->allocated_chunks; - nchunks = currentAddr->nchunks; - for (uint8 i = 0; i < nchunks; ++i) { - off_t seekPos = (off_t)OFFSET_OF_PAGE_COMPRESS_CHUNK(chunkSize, currentAddr->chunknos[i]); - uint8 start = i; - while (i < nchunks - 1 && currentAddr->chunknos[i + 1] == currentAddr->chunknos[i] + 1) { - i++; - } - if (fseeko(rbStruct.fp, seekPos, SEEK_SET) != 0) { - ReleaseMap(header, fileName); - ereport(ERROR, (errcode_for_file_access(), errmsg("could not seek in file \"%s\": %m", fileName))); - } - size_t readAmount = chunkSize * (i - start + 1); - if (fread(dst + start * chunkSize, 1, readAmount, rbStruct.fp) != readAmount && ferror(rbStruct.fp)) { - ReleaseMap(header, fileName); - ereport(ERROR, (errcode_for_file_access(), errmsg("could not read file \"%s\": %m", fileName))); - } - } - if (nchunks == 0) { - break; - } - if (DecompressPage(dst, pageBuffer, header->algorithm) == BLCKSZ) { - PageHeader phdr = PageHeader(pageBuffer); - BlockNumber blkNo = blockNumber + segmentNo * ((BlockNumber)RELSEG_SIZE); - if (PageIsNew(phdr) || pg_checksum_page(pageBuffer, blkNo) == phdr->pd_checksum) { - break; - } - } - - if (tryCount < MAX_RETRY_LIMIT) { - ++tryCount; - pg_usleep(RETRY_SLEEP_TIME); - } else { - ReleaseMap(header, fileName); - ereport(ERROR, - (errcode_for_file_access(), - errmsg("base backup cheksum or Decompressed blockno %u failed in file \"%s\", aborting backup. " - "nchunks: %u, allocatedChunks: %u, segno: %d.", - blockNumber, - fileName, - nchunks, - allocatedChunks, - segmentNo))); - } - } while (true); - if (allocatedChunks > nchunks) { - auto currentWriteSize = nchunks * chunkSize; - securec_check( - memset_s(dst + currentWriteSize, destLen - currentWriteSize, 0, (allocatedChunks - nchunks) * chunkSize), - "", - ""); - } - return allocatedChunks * chunkSize; -} - -CompressedFileType IsCompressedFile(char *fileName, size_t fileNameLen) -{ - size_t suffixLen = 4; - if (fileNameLen >= suffixLen) { - const char *suffix = fileName + fileNameLen - suffixLen; - if (strncmp(suffix, "_pca", suffixLen) == 0) { - return COMPRESSED_TABLE_PCA_FILE; - } else if (strncmp(suffix, "_pcd", suffixLen) == 0) { - return COMPRESSED_TABLE_PCD_FILE; - } - } - return COMPRESSED_TYPE_UNKNOWN; -} - -void ReleaseMap(PageCompressHeader* map, const char* fileName) -{ - if (map != NULL && pc_munmap(map) != 0) { - ereport(WARNING, (errcode_for_file_access(), errmsg("could not munmap file \"%s\": %m", fileName))); - } -} diff --git a/src/gausskernel/storage/smgr/segment/data_file.cpp b/src/gausskernel/storage/smgr/segment/data_file.cpp index e4962c73b..4201cb2d3 100644 --- a/src/gausskernel/storage/smgr/segment/data_file.cpp +++ b/src/gausskernel/storage/smgr/segment/data_file.cpp @@ -25,11 +25,13 @@ #include "catalog/storage_xlog.h" #include "commands/tablespace.h" +#include "commands/verify.h" #include "executor/executor.h" #include "pgstat.h" #include "storage/smgr/fd.h" #include "storage/smgr/knl_usync.h" #include "storage/smgr/segment.h" +#include "postmaster/pagerepair.h" static const mode_t SEGMENT_FILE_MODE = S_IWUSR | S_IRUSR; @@ -48,12 +50,16 @@ static int dv_open_file(char *filename, uint32 flags, int mode) { int fd = -1; fd = BasicOpenFile(filename, flags, mode); + int err = errno; + ereport(LOG, (errmsg("dv_open_file filename: %s, flags is %u, mode is %d, fd is %d", filename, flags, mode, fd))); + errno = err; return fd; } static void dv_close_file(int fd) { close(fd); + ereport(LOG, (errmsg("dv_close_file fd is %d", fd))); } /* Return a palloc string, and callers should free it */ @@ -157,6 +163,32 @@ static SegPhysicalFile df_get_physical_file(SegLogicFile *sf, int sliceno, Block return spf; } +void df_flush_data(SegLogicFile *sf, BlockNumber blocknum, BlockNumber nblocks) +{ + int128 remainBlocks = nblocks; + while (remainBlocks > 0) { + BlockNumber nflush = remainBlocks; + int slice_start = blocknum / DF_FILE_SLICE_BLOCKS; + int slice_end = (blocknum + remainBlocks - 1) / DF_FILE_SLICE_BLOCKS; + + /* cross slice */ + if (slice_start != slice_end) { + nflush = DF_FILE_SLICE_BLOCKS - (blocknum % DF_FILE_SLICE_BLOCKS); + } + + SegPhysicalFile spf = df_get_physical_file(sf, slice_start, blocknum); + if (spf.fd < 0) { + return; + } + + off_t seekpos = (off_t)BLCKSZ * (blocknum % DF_FILE_SLICE_BLOCKS); + pg_flush_data(spf.fd, seekpos, (off_t)nflush * BLCKSZ); + + remainBlocks -= nflush; + blocknum += nflush; + } +} + /* * Extend the file array in SegLogicFile */ @@ -178,6 +210,142 @@ void df_extend_file_vector(SegLogicFile *sf) sf->vector_capacity = new_capacity; } +void df_close_all_file(RepairFileKey key, int32 max_sliceno) +{ + Oid relNode = key.relfilenode.relNode; + ForkNumber forknum = key.forknum; + struct stat statBuf; + SegSpace *spc = spc_init_space_node(key.relfilenode.spcNode, key.relfilenode.dbNode); + + Assert(relNode <= EXTENT_8192 && relNode > 0); + AutoMutexLock spc_lock(&spc->lock); + spc_lock.lock(); + + SegExtentGroup *eg = &spc->extent_group[relNode - 1][forknum]; + AutoMutexLock lock(&eg->lock); + lock.lock(); + + AutoMutexLock filelock(&eg->segfile->filelock); + filelock.lock(); + + char *tempfilename = NULL; + for (int i = 0; i <= max_sliceno; i++) { + tempfilename = slice_filename(eg->segfile->filename, i); + if (stat(tempfilename, &statBuf) < 0) { + /* ENOENT is expected after the last segment... */ + if (errno != ENOENT) { + ereport(WARNING, (errcode_for_file_access(), + errmsg("could not stat file \"%s\": %m", tempfilename))); + } + } else { + if (eg->segfile->segfiles[i].fd > 0) { + dv_close_file(eg->segfile->segfiles[i].fd); + eg->segfile->segfiles[i].fd = -1; + } + } + pfree(tempfilename); + eg->segfile->segfiles[i].fd = -1; + eg->segfile->segfiles[i].sliceno = i; + } + eg->segfile->file_num = 0; + + filelock.unLock(); + lock.unLock(); + spc_lock.unLock(); + return; +} + +void df_clear_and_close_all_file(RepairFileKey key, int32 max_sliceno) +{ + Oid relNode = key.relfilenode.relNode; + ForkNumber forknum = key.forknum; + SegSpace *spc = spc_init_space_node(key.relfilenode.spcNode, key.relfilenode.dbNode); + + Assert(relNode <= EXTENT_8192 && relNode > 0); + AutoMutexLock spc_lock(&spc->lock); + spc_lock.lock(); + + SegExtentGroup *eg = &spc->extent_group[relNode - 1][forknum]; + AutoMutexLock lock(&eg->lock); + lock.lock(); + + AutoMutexLock filelock(&eg->segfile->filelock); + filelock.lock(); + + char *tempfilename = NULL; + for (int i = 0; i <= max_sliceno; i++) { + tempfilename = slice_filename(eg->segfile->filename, i); + int ret = unlink(tempfilename); + if (ret < 0 && errno != ENOENT) { + ereport(WARNING, (errcode_for_file_access(), + errmsg("could not remove file \"%s\": %m", tempfilename))); + } + if (ret >= 0) { + ereport(LOG, (errcode_for_file_access(), + errmsg("[file repair] clear segment file \"%s\"", tempfilename))); + if (eg->segfile->segfiles[i].fd > 0) { + dv_close_file(eg->segfile->segfiles[i].fd); + eg->segfile->segfiles[i].fd = -1; + } + } + pfree(tempfilename); + eg->segfile->segfiles[i].fd = -1; + eg->segfile->segfiles[i].sliceno = i; + } + eg->segfile->file_num = 0; + + filelock.unLock(); + lock.unLock(); + spc_lock.unLock(); + return; +} + +void df_open_all_file(RepairFileKey key, int32 max_sliceno) +{ + int fd = -1; + char *filename = NULL; + uint32 flags = O_RDWR | PG_BINARY; + Oid relNode = key.relfilenode.relNode; + ForkNumber forknum = key.forknum; + SegSpace *spc = spc_init_space_node(key.relfilenode.spcNode, key.relfilenode.dbNode); + + Assert(relNode <= EXTENT_8192 && relNode > 0); + AutoMutexLock spc_lock(&spc->lock); + spc_lock.lock(); + + SegExtentGroup *eg = &spc->extent_group[relNode - 1][forknum]; + AutoMutexLock eg_lock(&eg->lock); + eg_lock.lock(); + + AutoMutexLock file_lock(&eg->segfile->filelock); + file_lock.lock(); + + for (int i = 0; i <= max_sliceno; i++) { + filename = slice_filename(eg->segfile->filename, i); + fd = dv_open_file(filename, flags, SEGMENT_FILE_MODE); + if (fd < 0) { + ereport(ERROR, (errmsg("[file repair] open segment file failed, %s", filename))); + } + eg->segfile->segfiles[i].fd = fd; + eg->segfile->segfiles[i].sliceno = i; + pfree(filename); + } + + int maxopenslicefd = eg->segfile->segfiles[max_sliceno].fd; + eg->segfile->file_num = max_sliceno + 1; + off_t size = lseek(maxopenslicefd, 0L, SEEK_END); + if (max_sliceno == 0) { + eg->segfile->total_blocks = size; + } else { + eg->segfile->total_blocks = max_sliceno * RELSEG_SIZE + size / BLCKSZ; + } + + file_lock.unLock(); + eg_lock.unLock(); + spc_lock.unLock(); + return; +} + /* * sliceno == 0, means opening all files; otherwises open until the target slice. */ @@ -192,6 +360,9 @@ static void df_open_target_files(SegLogicFile *sf, int targetno) } char *filename = slice_filename(sf->filename, sliceno); + if (sliceno >= sf->vector_capacity) { + df_extend_file_vector(sf); + } int fd = dv_open_file(filename, flags, SEGMENT_FILE_MODE); if (fd < 0) { if (errno != ENOENT) { @@ -203,9 +374,6 @@ static void df_open_target_files(SegLogicFile *sf, int targetno) pfree(filename); break; } - if (sliceno >= sf->vector_capacity) { - df_extend_file_vector(sf); - } sf->segfiles[sliceno].fd = fd; sf->segfiles[sliceno].sliceno = sliceno; @@ -246,13 +414,16 @@ void df_extend_internal(SegLogicFile *sf) if (last_file_size == DF_FILE_SLICE_SIZE) { int new_sliceno = sf->file_num; char *filename = slice_filename(sf->filename, new_sliceno); - int new_fd = dv_open_file(filename, O_RDWR | O_CREAT, SEGMENT_FILE_MODE); - pfree(filename); - if (new_sliceno >= sf->vector_capacity) { df_extend_file_vector(sf); } + int new_fd = dv_open_file(filename, O_RDWR | O_CREAT, SEGMENT_FILE_MODE); + if (new_fd < 0) { + ereport(ERROR, (errcode_for_file_access(), errmsg("[segpage] could not create file \"%s\": %m", filename))); + } + if (ftruncate(new_fd, DF_FILE_EXTEND_STEP_SIZE) != 0) { + dv_close_file(new_fd); ereport(ERROR, (errmodule(MOD_SEGMENT_PAGE), errcode_for_file_access(), @@ -263,6 +434,7 @@ void df_extend_internal(SegLogicFile *sf) sf->segfiles[new_sliceno] = {.fd = new_fd, .sliceno = new_sliceno}; sf->file_num++; sf->total_blocks += DF_FILE_EXTEND_STEP_BLOCKS; + pfree(filename); } else { ssize_t new_size; if (last_file_size % DF_FILE_EXTEND_STEP_SIZE == 0) { diff --git a/src/gausskernel/storage/smgr/segment/extent_group.cpp b/src/gausskernel/storage/smgr/segment/extent_group.cpp index 8a0a133b3..9cd35131b 100644 --- a/src/gausskernel/storage/smgr/segment/extent_group.cpp +++ b/src/gausskernel/storage/smgr/segment/extent_group.cpp @@ -436,7 +436,7 @@ void eg_init_bitmap_page(SegExtentGroup *seg, BlockNumber pageno, BlockNumber fi XLogBeginInsert(); XLogRegisterData((char *)&first_page, sizeof(BlockNumber)); XLogRegisterBuffer(0, buffer, REGBUF_WILL_INIT); - XLogRecPtr rec_ptr = XLogInsert(RM_SEGPAGE_ID, XLOG_SEG_INIT_MAPPAGE, false, SegmentBktId); + XLogRecPtr rec_ptr = XLogInsert(RM_SEGPAGE_ID, XLOG_SEG_INIT_MAPPAGE, SegmentBktId); PageSetLSN(BufferGetPage(buffer), rec_ptr); END_CRIT_SECTION(); @@ -456,7 +456,7 @@ void eg_init_invrsptr_page(SegExtentGroup *seg, BlockNumber pageno) XLogBeginInsert(); XLogRegisterBuffer(0, buffer, REGBUF_WILL_INIT); - XLogRecPtr rec_ptr = XLogInsert(RM_SEGPAGE_ID, XLOG_SEG_INIT_INVRSPTR_PAGE, false, SegmentBktId); + XLogRecPtr rec_ptr = XLogInsert(RM_SEGPAGE_ID, XLOG_SEG_INIT_INVRSPTR_PAGE, SegmentBktId); PageSetLSN(page, rec_ptr); END_CRIT_SECTION(); @@ -522,7 +522,7 @@ void eg_add_map_group(SegExtentGroup *seg, BlockNumber pageno, uint8 group_size, XLogRegisterData((char *)&new_map_group_info, sizeof(xl_new_map_group_info_t)); XLogRegisterBuffer(0, map_head_buffer, REGBUF_STANDARD); - XLogRecPtr recptr = XLogInsert(RM_SEGPAGE_ID, XLOG_SEG_ADD_NEW_GROUP, false, SegmentBktId); + XLogRecPtr recptr = XLogInsert(RM_SEGPAGE_ID, XLOG_SEG_ADD_NEW_GROUP, SegmentBktId); PageSetLSN(page, recptr); END_CRIT_SECTION(); @@ -741,7 +741,7 @@ void eg_create_if_necessary(SegExtentGroup *seg) XLogBeginInsert(); XLogRegisterData((char *)&seg->rnode, sizeof(RelFileNode)); XLogRegisterData((char *)&seg->forknum, sizeof(ForkNumber)); - XLogRecPtr xlog = XLogInsert(RM_SEGPAGE_ID, XLOG_SEG_CREATE_EXTENT_GROUP, false, SegmentBktId); + XLogRecPtr xlog = XLogInsert(RM_SEGPAGE_ID, XLOG_SEG_CREATE_EXTENT_GROUP, SegmentBktId); XLogWaitFlush(xlog); eg_init_data_files(seg, false, xlog); diff --git a/src/gausskernel/storage/smgr/segment/segbuffer.cpp b/src/gausskernel/storage/smgr/segment/segbuffer.cpp index b82141127..df84a6ba0 100644 --- a/src/gausskernel/storage/smgr/segment/segbuffer.cpp +++ b/src/gausskernel/storage/smgr/segment/segbuffer.cpp @@ -48,8 +48,6 @@ static const int TEN_MICROSECOND = 10; #define BufHdrLocked(bufHdr) ((bufHdr)->state & BM_LOCKED) #define SegBufferIsPinned(bufHdr) ((bufHdr)->state & BUF_REFCOUNT_MASK) -static BufferDesc *SegBufferAlloc(SegSpace *spc, RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, - bool *foundPtr); static BufferDesc *SegStrategyGetBuffer(uint32 *buf_state); static bool SegStartBufferIO(BufferDesc *buf, bool forInput); static void SegTerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits); @@ -185,7 +183,7 @@ static uint32 SegWaitBufHdrUnlocked(BufferDesc *buf) return buf_state; } -static bool SegPinBuffer(BufferDesc *buf) +bool SegPinBuffer(BufferDesc *buf) { ereport(DEBUG5, (errmodule(MOD_SEGMENT_PAGE), errmsg("[SegPinBuffer] (%u %u %u %d) %d %u ", buf->tag.rnode.spcNode, buf->tag.rnode.dbNode, @@ -246,7 +244,7 @@ static bool SegPinBufferLocked(BufferDesc *buf, const BufferTag *tag) return buf_state & BM_VALID; } -static void SegUnpinBuffer(BufferDesc *buf) +void SegUnpinBuffer(BufferDesc *buf) { ereport(DEBUG5, (errmodule(MOD_SEGMENT_PAGE), errmsg("[SegUnpinBuffer] (%u %u %u %d) %d %u ", buf->tag.rnode.spcNode, buf->tag.rnode.dbNode, @@ -399,6 +397,24 @@ void SegFlushBuffer(BufferDesc *buf, SMgrRelation reln) } } +void ReportInvalidPage(RepairBlockKey key) +{ + /* record bad page, wait the pagerepair thread repair the page */ + if (CheckVerionSupportRepair() && (AmStartupProcess() || AmPageRedoWorker()) && + IsPrimaryClusterStandbyDN() && g_instance.repair_cxt.support_repair) { + XLogPhyBlock pblk_bak = {0}; + RedoPageRepairCallBack(key, pblk_bak); + log_invalid_page(key.relfilenode, key.forknum, key.blocknum, CRC_CHECK_ERROR, NULL); + ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("invalid page in block %u of relation %s", + key.blocknum, relpathperm(key.relfilenode, key.forknum)))); + return; + } + ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid page in block %u of relation %s", + key.blocknum, relpathperm(key.relfilenode, key.forknum)))); + return; +} + Buffer ReadBufferFast(SegSpace *spc, RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode) { bool found = false; @@ -415,8 +431,12 @@ Buffer ReadBufferFast(SegSpace *spc, RelFileNode rnode, ForkNumber forkNum, Bloc } else { seg_physical_read(spc, rnode, forkNum, blockNum, bufBlock); if (!PageIsVerified(bufBlock, blockNum)) { - ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid page in block %u of relation %s", - blockNum, relpathperm(rnode, forkNum)))); + RepairBlockKey key; + key.relfilenode = rnode; + key.forknum = forkNum; + key.blocknum = blockNum; + ReportInvalidPage(key); + return InvalidBuffer; } if (!PageIsSegmentVersion(bufBlock) && !PageIsNew(bufBlock)) { ereport(PANIC, @@ -424,6 +444,10 @@ Buffer ReadBufferFast(SegSpace *spc, RelFileNode rnode, ForkNumber forkNum, Bloc blockNum, relpathperm(rnode, forkNum), PageGetPageLayoutVersion(bufBlock)))); } } + bufHdr->lsn_on_disk = PageGetLSN(bufBlock); +#ifdef USE_ASSERT_CHECKING + bufHdr->lsn_dirty = InvalidXLogRecPtr; +#endif SegTerminateBufferIO(bufHdr, false, BM_VALID); } @@ -465,7 +489,7 @@ BufferDesc * FoundBufferInHashTable(int buf_id, LWLock *new_partition_lock, bool return buf; } -static BufferDesc *SegBufferAlloc(SegSpace *spc, RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, +BufferDesc *SegBufferAlloc(SegSpace *spc, RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, bool *foundPtr) { BufferDesc *buf; @@ -592,11 +616,12 @@ static BufferDesc* get_segbuf_from_candidate_list(uint32* buf_state) BufferDesc* buf = NULL; uint32 local_buf_state; int buf_id = 0; - int list_num = g_instance.ckpt_cxt_ctl->pgwr_procs.sub_num; - volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; - int list_id = beentry->st_tid > 0 ? (beentry->st_tid % list_num) : (beentry->st_sessionid % list_num); if (ENABLE_INCRE_CKPT && pg_atomic_read_u32(&g_instance.ckpt_cxt_ctl->current_page_writer_count) > 0) { + int list_num = g_instance.ckpt_cxt_ctl->pgwr_procs.sub_num; + volatile PgBackendStatus* beentry = t_thrd.shemem_ptr_cxt.MyBEEntry; + int list_id = beentry->st_tid > 0 ? (beentry->st_tid % list_num) : (beentry->st_sessionid % list_num); + for (int i = 0; i < list_num; i++) { /* the pagewriter sub thread store normal buffer pool, sub thread starts from 1 */ int thread_id = (list_id + i) % list_num + 1; @@ -618,8 +643,9 @@ static BufferDesc* get_segbuf_from_candidate_list(uint32* buf_state) UnlockBufHdr(buf, local_buf_state); } } + wakeup_pagewriter_thread(); } - wakeup_pagewriter_thread(); + return NULL; } diff --git a/src/gausskernel/storage/smgr/segment/segxlog.cpp b/src/gausskernel/storage/smgr/segment/segxlog.cpp index f37bb89d2..05954cd08 100644 --- a/src/gausskernel/storage/smgr/segment/segxlog.cpp +++ b/src/gausskernel/storage/smgr/segment/segxlog.cpp @@ -455,7 +455,7 @@ static void redo_xlog_log_shrink_extent(Buffer buffer, const char* data) remain_segs_lock.unLock(); } -static void redo_xlog_deal_alloc_seg(uint8 opCode, Buffer buffer, const char* data, int data_len, TransactionId xid) +void redo_xlog_deal_alloc_seg(uint8 opCode, Buffer buffer, const char* data, int data_len, TransactionId xid) { if (opCode == SPCXLOG_INIT_SEGHEAD) { unsigned char is_seg_head = *(unsigned char *)(data + sizeof(XLogRecPtr)); @@ -472,12 +472,12 @@ static void redo_xlog_deal_alloc_seg(uint8 opCode, Buffer buffer, const char* da } } -static void redo_atomic_xlog_dispatch(uint8 opCode, RedoBufferInfo redo_buf, const char *data) +void redo_atomic_xlog_dispatch(uint8 opCode, RedoBufferInfo *redo_buf, const char *data) { - Buffer buffer = redo_buf.buf; + Buffer buffer = redo_buf->buf; ereport(DEBUG5, (errmodule(MOD_SEGMENT_PAGE), errmsg("redo_atomic_xlog_dispatch opCode: %u", opCode))); if (opCode == SPCXLOG_SET_BITMAP) { - redo_set_bitmap(redo_buf.blockinfo, buffer, data); + redo_set_bitmap(redo_buf->blockinfo, buffer, data); } else if (opCode == SPCXLOG_FREE_BITMAP) { redo_unset_bitmap(buffer, data); } else if (opCode == SPCXLOG_MAPHEAD_ALLOCATED_EXTENTS) { @@ -518,7 +518,7 @@ static void redo_atomic_xlog_dispatch(uint8 opCode, RedoBufferInfo redo_buf, con } } -static void move_extent_flush_buffer(XLogMoveExtent *xlog_data) +void move_extent_flush_buffer(XLogMoveExtent *xlog_data) { BlockNumber logic_start = ExtentIdToLogicBlockNum(xlog_data->extent_id); for (int i=0; iextent_id); i++) { @@ -601,7 +601,7 @@ static void redo_atomic_xlog(XLogReaderState *record) if (redo_action == BLK_NEEDS_REDO) { for (int j = 0; j < decoded_op.operations; j++) { - redo_atomic_xlog_dispatch(decoded_op.op[j], redo_buf, decoded_op.data[j]); + redo_atomic_xlog_dispatch(decoded_op.op[j], &redo_buf, decoded_op.data[j]); } PageSetLSN(redo_buf.pageinfo.page, redo_buf.lsn); @@ -766,25 +766,17 @@ static void redo_space_drop(XLogReaderState *record) XLogDropSegmentSpace(spcNode, dbNode); } -/* - * This xlog only copy data to the new block, without modifying data in buffer. If the logic block being in the - * buffer pool, its pblk points to the old block. The buffer descriptor can not have the logic blocknumber and the new - * physical block number because we do not know whether we should use the old or thew new physical block for the same - * logic block, as later segment head modification can either success or fail. - */ -static void redo_new_page(XLogReaderState *record) +void seg_redo_new_page_copy_and_flush(BufferTag *tag, char *data, XLogRecPtr lsn) { - BufferTag *tag = (BufferTag *)XLogRecGetData(record); - char page[BLCKSZ]; - errno_t er = memcpy_s(page, BLCKSZ, (char *)XLogRecGetData(record) + sizeof(BufferTag), BLCKSZ); + errno_t er = memcpy_s(page, BLCKSZ, data, BLCKSZ); securec_check(er, "\0", "\0"); - PageSetLSN(page, record->EndRecPtr); + PageSetLSN(page, lsn); PageSetChecksumInplace(page, tag->blockNum); if (FORCE_FINISH_ENABLED) { - update_max_page_flush_lsn(record->EndRecPtr, t_thrd.proc_cxt.MyProcPid, false); + update_max_page_flush_lsn(lsn, t_thrd.proc_cxt.MyProcPid, false); } bool flush_old_file = false; @@ -803,6 +795,20 @@ static void redo_new_page(XLogReaderState *record) t_thrd.proc->dw_pos = -1; } + +/* + * This xlog only copy data to the new block, without modifying data in buffer. If the logic block being in the + * buffer pool, its pblk points to the old block. The buffer descriptor can not have the logic blocknumber and the new + * physical block number because we do not know whether we should use the old or thew new physical block for the same + * logic block, as later segment head modification can either success or fail. + */ +static void redo_new_page(XLogReaderState *record) +{ + Assert(record != NULL); + BufferTag *tag = (BufferTag *)XLogRecGetData(record); + seg_redo_new_page_copy_and_flush(tag, (char *)XLogRecGetData(record) + sizeof(BufferTag), record->EndRecPtr); +} + void segpage_smgr_redo(XLogReaderState *record) { uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; diff --git a/src/gausskernel/storage/smgr/segment/space.cpp b/src/gausskernel/storage/smgr/segment/space.cpp index b2bb86c47..6b18abca9 100644 --- a/src/gausskernel/storage/smgr/segment/space.cpp +++ b/src/gausskernel/storage/smgr/segment/space.cpp @@ -104,29 +104,10 @@ void spc_write_block(SegSpace *spc, RelFileNode relNode, ForkNumber forknum, con void spc_writeback(SegSpace *spc, int extent_size, ForkNumber forknum, BlockNumber blocknum, BlockNumber nblocks) { - while (nblocks > 0) { - BlockNumber nflush = nblocks; + int egid = EXTENT_SIZE_TO_GROUPID(extent_size); + SegLogicFile *sf = spc->extent_group[egid][forknum].segfile; - int slice_start = blocknum / DF_FILE_SLICE_SIZE; - int slice_end = (blocknum + nblocks - 1) / DF_FILE_SLICE_SIZE; - - /* cross slice */ - if (slice_start != slice_end) { - nflush = DF_FILE_SLICE_SIZE - (blocknum % DF_FILE_SLICE_SIZE); - } - - int egid = EXTENT_SIZE_TO_GROUPID(extent_size); - SegPhysicalFile *sf = &spc->extent_group[egid][forknum].segfile->segfiles[slice_start]; - - off_t seekpos = (off_t)BLCKSZ * (blocknum % DF_FILE_SLICE_SIZE); - - if (sf->fd < 0) { - return; - } - pg_flush_data(sf->fd, seekpos, (off_t)BLCKSZ * nflush); - - nblocks -= nflush; - } + df_flush_data(sf, blocknum, nblocks); } BlockNumber spc_size(SegSpace *spc, BlockNumber egRelNode, ForkNumber forknum) @@ -604,8 +585,13 @@ Oid get_valid_relation_oid(Oid spcNode, Oid relNode) { Oid reloid, oldreloid; bool retry = false; + uint64 sess_inval_count; + uint64 thrd_inval_count = 0; for (;;) { - uint64 inval_count = u_sess->inval_cxt.SharedInvalidMessageCounter; + sess_inval_count = u_sess->inval_cxt.SIMCounter; + if (EnableLocalSysCache()) { + thrd_inval_count = t_thrd.lsc_cxt.lsc->inval_cxt.SIMCounter; + } reloid = get_relation_oid(spcNode, relNode); if (retry) { @@ -627,8 +613,15 @@ Oid get_valid_relation_oid(Oid spcNode, Oid relNode) } /* No invalidation message */ - if (inval_count == u_sess->inval_cxt.SharedInvalidMessageCounter) { - return reloid; + if (EnableLocalSysCache()) { + if (sess_inval_count == u_sess->inval_cxt.SIMCounter && + thrd_inval_count == t_thrd.lsc_cxt.lsc->inval_cxt.SIMCounter) { + return reloid; + } + } else { + if (sess_inval_count == u_sess->inval_cxt.SIMCounter) { + return reloid; + } } retry = true; oldreloid = reloid; @@ -873,6 +866,7 @@ void move_extents(SegExtentGroup *seg, BlockNumber target_size) BlockNumber victim = selector.next(); while (victim != InvalidBlockNumber) { + CHECK_FOR_INTERRUPTS(); move_one_extent(seg, victim, &ipbuf); victim = selector.next(); } diff --git a/src/gausskernel/storage/smgr/segment/xlog_atomic_op.cpp b/src/gausskernel/storage/smgr/segment/xlog_atomic_op.cpp index 611fa2d88..d425bd389 100644 --- a/src/gausskernel/storage/smgr/segment/xlog_atomic_op.cpp +++ b/src/gausskernel/storage/smgr/segment/xlog_atomic_op.cpp @@ -216,7 +216,7 @@ struct XLogAtomicOperation { void XLogStart(); void RegisterBuffer(Buffer buffer, uint8 flags, uint8 op, uint32 clean_flag); void RegisterBufData(char *data, int len, bool is_opcode); - void XLogCommit(RmgrId rmid, uint8 info, bool isupgrade, int bucket_id); + void XLogCommit(RmgrId rmid, uint8 info, int bucket_id); int CurrentBlockId(); }; @@ -292,7 +292,7 @@ void XLogAtomicOperation::RegisterBufData(char *data, int len, bool is_opcode) stack.RegisterOpData(block_id, opData, len, is_opcode); } -void XLogAtomicOperation::XLogCommit(RmgrId rmid, uint8 info, bool isupgrade, int bucket_id) +void XLogAtomicOperation::XLogCommit(RmgrId rmid, uint8 info, int bucket_id) { /* * Do actual xlog begin/register/insert. @@ -328,7 +328,7 @@ void XLogAtomicOperation::XLogCommit(RmgrId rmid, uint8 info, bool isupgrade, in } // 4. xlog insert - XLogRecPtr recptr = XLogInsert(rmid, info, isupgrade, bucket_id); + XLogRecPtr recptr = XLogInsert(rmid, info, bucket_id); // 5. mark dirty, set lsn, and release lock for (int i = 0; i < stack.depth; i++) { @@ -413,7 +413,7 @@ void XLogAtomicOpRegisterBuffer(Buffer buffer, uint8 flags, uint8 op, uint32 cle void XLogAtomicOpCommit() { SegmentCheck(t_thrd.xlog_cxt.xlog_atomic_op != NULL); - XLogAtomicOpMgr->XLogCommit(RM_SEGPAGE_ID, XLOG_SEG_ATOMIC_OPERATION, false, SegmentBktId); + XLogAtomicOpMgr->XLogCommit(RM_SEGPAGE_ID, XLOG_SEG_ATOMIC_OPERATION, SegmentBktId); } void XLogAtomicOpReset() diff --git a/src/gausskernel/storage/smgr/segstore.cpp b/src/gausskernel/storage/smgr/segstore.cpp index 069c2aee8..efd4120fb 100755 --- a/src/gausskernel/storage/smgr/segstore.cpp +++ b/src/gausskernel/storage/smgr/segstore.cpp @@ -477,7 +477,13 @@ static bool normal_open_segment(SMgrRelation reln, int forknum, bool create) open_segment(reln, MAIN_FORKNUM, false); main_buffer = ReadSegmentBuffer(reln->seg_space, reln->seg_desc[MAIN_FORKNUM]->head_blocknum); main_head = (SegmentHead *)PageGetContents(BufferGetBlock(main_buffer)); - SegmentCheck(IsNormalSegmentHead(main_head)); + if (unlikely(!IsNormalSegmentHead(main_head))) { + ereport(PANIC, (errmodule(MOD_SEGMENT_PAGE), errmsg("Segment head magic value 0x%lx is invalid," + "head lsn 0x%lx(maybe wrong). Rnode [%u, %u, %u, %d], head blocknum %u.", + main_head->magic, main_head->lsn, reln->smgr_rnode.node.spcNode, reln->smgr_rnode.node.dbNode, + reln->smgr_rnode.node.relNode, reln->smgr_rnode.node.bucketNode, + reln->seg_desc[MAIN_FORKNUM]->head_blocknum))); + } /* * For non-main fork, the segment head is stored in the main fork segment head. @@ -534,7 +540,7 @@ CREATE_DESC: * Initialize the segment descriptor in SMgrRelationData. */ SegmentDesc *fork_desc = - (SegmentDesc *)MemoryContextAlloc(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE), sizeof(SegmentDesc)); + (SegmentDesc *)MemoryContextAlloc(LocalSmgrStorageMemoryCxt(), sizeof(SegmentDesc)); fork_desc->head_blocknum = fork_head_blocknum; fork_desc->timeline = seg_get_drop_timeline(); SegmentCheck(fork_head_blocknum >= DF_MAP_GROUP_SIZE); @@ -872,7 +878,7 @@ static bool bucket_open_segment(SMgrRelation reln, int forknum, bool create, XLo } SegmentDesc *seg_desc = - (SegmentDesc *)MemoryContextAlloc(SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE), sizeof(SegmentDesc)); + (SegmentDesc *)MemoryContextAlloc(LocalSmgrStorageMemoryCxt(), sizeof(SegmentDesc)); seg_desc->head_blocknum = head_blocknum; seg_desc->timeline = seg_get_drop_timeline(); SegmentCheck(head_blocknum >= DF_MAP_GROUP_SIZE); @@ -1427,7 +1433,7 @@ void seg_extend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, cha XLogRegisterBuffer(0, seg_buffer, REGBUF_KEEP_DATA); XLogRegisterBufData(0, (char *)&xlog_data, sizeof(xlog_data)); XLogRegisterBuffer(1, buf, REGBUF_WILL_INIT); - XLogRecPtr xlog_rec = XLogInsert(RM_SEGPAGE_ID, XLOG_SEG_SEGMENT_EXTEND, false, SegmentBktId); + XLogRecPtr xlog_rec = XLogInsert(RM_SEGPAGE_ID, XLOG_SEG_SEGMENT_EXTEND, SegmentBktId); END_CRIT_SECTION(); PageSetLSN(BufferGetPage(seg_buffer), xlog_rec); diff --git a/src/gausskernel/storage/smgr/smgr.cpp b/src/gausskernel/storage/smgr/smgr.cpp index 6ba08dbc6..210045dae 100644 --- a/src/gausskernel/storage/smgr/smgr.cpp +++ b/src/gausskernel/storage/smgr/smgr.cpp @@ -137,8 +137,6 @@ static inline int ChooseSmgrManager(RelFileNode rnode) return MD_MANAGER; } -/* local function prototypes */ -static void smgrshutdown(int code, Datum arg); /* * smgrinit(), smgrshutdown() -- Initialize or shut down storage @@ -159,7 +157,7 @@ void smgrinit(void) } /* register the shutdown proc */ - if (!IS_THREAD_POOL_SESSION) { + if (!IS_THREAD_POOL_SESSION || EnableLocalSysCache()) { on_proc_exit(smgrshutdown, 0); } @@ -169,7 +167,7 @@ void smgrinit(void) /* * on_proc_exit hook for smgr cleanup during backend shutdown */ -static void smgrshutdown(int code, Datum arg) +void smgrshutdown(int code, Datum arg) { int i; @@ -198,7 +196,7 @@ SMgrRelation smgropen(const RelFileNode& rnode, BackendId backend, int col /* = Assert(col >= 0); int fdNeeded = 1 + MAX_FORKNUM + col; - if (u_sess->storage_cxt.SMgrRelationHash == NULL) { + if (GetSMgrRelationHash() == NULL) { /* First time through: initialize the hash table */ HASHCTL hashCtrl; @@ -207,10 +205,18 @@ SMgrRelation smgropen(const RelFileNode& rnode, BackendId backend, int col /* = hashCtrl.keysize = sizeof(RelFileNodeBackend); hashCtrl.entrysize = sizeof(SMgrRelationData); hashCtrl.hash = tag_hash; - hashCtrl.hcxt = (MemoryContext)u_sess->cache_mem_cxt; - u_sess->storage_cxt.SMgrRelationHash = - hash_create("smgr relation table", 400, &hashCtrl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); - dlist_init(&u_sess->storage_cxt.unowned_reln); + if (EnableLocalSysCache()) { + hashCtrl.hcxt = t_thrd.lsc_cxt.lsc->lsc_mydb_memcxt; + t_thrd.lsc_cxt.lsc->SMgrRelationHash = hash_create("smgr relation table", 400, + &hashCtrl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + dlist_init(&t_thrd.lsc_cxt.lsc->unowned_reln); + } else { + hashCtrl.hcxt = u_sess->cache_mem_cxt; + u_sess->storage_cxt.SMgrRelationHash = hash_create("smgr relation table", 400, + &hashCtrl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + dlist_init(&u_sess->storage_cxt.unowned_reln); + } + } START_CRIT_SECTION(); @@ -218,11 +224,10 @@ SMgrRelation smgropen(const RelFileNode& rnode, BackendId backend, int col /* = /* Look up or create an entry */ brnode.node = rnode; brnode.backend = backend; - reln = (SMgrRelation)hash_search(u_sess->storage_cxt.SMgrRelationHash, (void*)&brnode, HASH_ENTER, &found); + reln = (SMgrRelation)hash_search(GetSMgrRelationHash(), (void*)&brnode, HASH_ENTER, &found); /* Initialize it if not present before */ if (!found) { - int forknum; int colnum; /* hash_search already filled in the lookup key */ @@ -234,8 +239,7 @@ SMgrRelation smgropen(const RelFileNode& rnode, BackendId backend, int col /* = reln->encrypt = false; temp = col + 1; reln->smgr_bcm_nblocks = (BlockNumber*)MemoryContextAllocZero( - SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE), - temp * sizeof(BlockNumber)); + LocalSmgrStorageMemoryCxt(), temp * sizeof(BlockNumber)); reln->smgr_bcmarry_size = temp; for (colnum = 0; colnum < reln->smgr_bcmarry_size; colnum++) { reln->smgr_bcm_nblocks[colnum] = InvalidBlockNumber; @@ -246,29 +250,25 @@ SMgrRelation smgropen(const RelFileNode& rnode, BackendId backend, int col /* = reln->fileState = NULL; /* mark it not open */ - temp = fdNeeded; + reln->seg_space = NULL; if (reln->smgr_which == SEGMENT_MANAGER) { + reln->md_fdarray_size = fdNeeded; reln->seg_desc = (struct SegmentDesc **)MemoryContextAllocZero( - SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE), temp * sizeof(struct SegmentDesc *)); + LocalSmgrStorageMemoryCxt(), fdNeeded * sizeof(struct SegmentDesc *)); reln->md_fd = NULL; - } else { + } else if (reln->smgr_which == MD_MANAGER) { + reln->md_fdarray_size = fdNeeded; reln->md_fd = (struct _MdfdVec**)MemoryContextAllocZero( - SESS_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_STORAGE), temp * sizeof(struct _MdfdVec*)); + LocalSmgrStorageMemoryCxt(), fdNeeded * sizeof(struct _MdfdVec*)); + reln->seg_desc = NULL; + } else { + reln->md_fdarray_size = 1; + reln->md_fd = NULL; reln->seg_desc = NULL; } - reln->seg_space = NULL; - reln->md_fdarray_size = temp; - for (forknum = 0; forknum < reln->md_fdarray_size; forknum++) { - if (reln->smgr_which == SEGMENT_MANAGER) { - reln->seg_desc[forknum] = NULL; - } else { - reln->md_fd[forknum] = NULL; - } - } - /* it has no owner yet */ - dlist_push_tail(&u_sess->storage_cxt.unowned_reln, &reln->node); + dlist_push_tail(getUnownedReln(), &reln->node); /* If it is a bucket smgr node, it should be linked in its parent smgr node */ dlist_init(&reln->bucket_smgr_head); @@ -359,7 +359,7 @@ void smgrclearowner(SMgrRelation *owner, SMgrRelation reln) /* unset our reference to the owner */ reln->smgr_owner = NULL; - dlist_push_tail(&u_sess->storage_cxt.unowned_reln, &reln->node); + dlist_push_tail(getUnownedReln(), &reln->node); } /* @@ -414,7 +414,7 @@ void smgrclose(SMgrRelation reln, BlockNumber blockNum) } } - if (hash_search(u_sess->storage_cxt.SMgrRelationHash, (void *)&(reln->smgr_rnode), HASH_REMOVE, NULL) == NULL) { + if (hash_search(GetSMgrRelationHash(), (void *)&(reln->smgr_rnode), HASH_REMOVE, NULL) == NULL) { ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("SMgrRelation hashtable corrupted"))); } else { pfree_ext(reln->smgr_bcm_nblocks); @@ -440,12 +440,12 @@ void smgrcloseall(void) SMgrRelation reln; /* Nothing to do if hashtable not set up */ - if (u_sess->storage_cxt.SMgrRelationHash == NULL) { + if (GetSMgrRelationHash() == NULL) { return; } bool smgr_has_children = false; - hash_seq_init(&status, u_sess->storage_cxt.SMgrRelationHash); + hash_seq_init(&status, GetSMgrRelationHash()); while ((reln = (SMgrRelation)hash_seq_search(&status)) != NULL) { if (smgrhaschildern(reln)) { @@ -460,7 +460,7 @@ void smgrcloseall(void) } if (smgr_has_children) { - hash_seq_init(&status, u_sess->storage_cxt.SMgrRelationHash); + hash_seq_init(&status, GetSMgrRelationHash()); while ((reln = (SMgrRelation)hash_seq_search(&status)) != NULL) { /* In the second loop, there are only hash bucket parent nodes existing in the hashtable. */ @@ -469,7 +469,27 @@ void smgrcloseall(void) } } } +static void smgrcleanbolcknum(SMgrRelation reln) +{ + reln->smgr_targblock = InvalidBlockNumber; +} +void smgrcleanblocknumall(void) +{ + HASH_SEQ_STATUS status; + SMgrRelation reln; + + /* Nothing to do if hashtable not set up */ + if (GetSMgrRelationHash() == NULL) { + return; + } + + hash_seq_init(&status, GetSMgrRelationHash()); + + while ((reln = (SMgrRelation)hash_seq_search(&status)) != NULL) { + smgrcleanbolcknum(reln); + } +} /* * smgrclosenode() -- Close SMgrRelation object for given RelFileNode, * if one exists. @@ -483,11 +503,11 @@ void smgrclosenode(const RelFileNodeBackend &rnode) SMgrRelation reln; /* Nothing to do if hashtable not set up */ - if (u_sess->storage_cxt.SMgrRelationHash == NULL) { + if (GetSMgrRelationHash() == NULL) { return; } - reln = (SMgrRelation)hash_search(u_sess->storage_cxt.SMgrRelationHash, (void *)&rnode, HASH_FIND, NULL); + reln = (SMgrRelation)hash_search(GetSMgrRelationHash(), (void *)&rnode, HASH_FIND, NULL); if (reln != NULL) { smgrclose(reln); } @@ -865,7 +885,9 @@ void partition_create_new_storage(Relation rel, Partition part, const RelFileNod /* * Schedule unlinking of the old storage at transaction commit. */ - PartitionDropStorage(rel, part); + if (!u_sess->attr.attr_storage.enable_recyclebin) { + PartitionDropStorage(rel, part); + } } /* @@ -889,8 +911,9 @@ void AtEOXact_SMgr(void) * incurs closing all its children node. dlist_foreach_modify does not allow * removing the adjacent nodes. */ - while (!dlist_is_empty(&u_sess->storage_cxt.unowned_reln)) { - dlist_node *cur = u_sess->storage_cxt.unowned_reln.head.next; + dlist_head *unowned_reln = getUnownedReln(); + while (!dlist_is_empty(unowned_reln)) { + dlist_node *cur = unowned_reln->head.next; SMgrRelation rel = dlist_container(SMgrRelationData, node, cur); Assert(rel->smgr_owner == NULL); smgrclose(rel); diff --git a/src/gausskernel/storage/sync/knl_usync.cpp b/src/gausskernel/storage/sync/knl_usync.cpp index faf1413a9..87a627683 100644 --- a/src/gausskernel/storage/sync/knl_usync.cpp +++ b/src/gausskernel/storage/sync/knl_usync.cpp @@ -26,6 +26,7 @@ #include "commands/tablespace.h" #include "portability/instr_time.h" #include "postmaster/bgwriter.h" +#include "postmaster/pagewriter.h" #include "storage/buf/bufmgr.h" #include "storage/ipc.h" #include "storage/smgr/segment.h" @@ -109,17 +110,9 @@ static const SyncOps SYNCSW[] = { static const int NSync = lengthof(SYNCSW); -/* - * Initialize data structures for the file sync tracking. - */ -void InitSync(void) +void InitPendingOps(void) { - /* - * Create pending-operations hashtable if we need it. Currently, we need - * it if we are standalone (not under a postmaster) or if we are a startup - * or checkpointer auxiliary process. - */ - if (!IsUnderPostmaster || AmStartupProcess() || AmCheckpointerProcess()) { + if (!IsUnderPostmaster || AmStartupProcess() || AmCheckpointerProcess() || AmPageWriterMainProcess()) { HASHCTL hashCtl; errno_t rc; @@ -143,6 +136,21 @@ void InitSync(void) hashCtl.hash = tag_hash; u_sess->storage_cxt.pendingOps = hash_create("Pending Ops Table", 100L, &hashCtl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + } +} + +/* + * Initialize data structures for the file sync tracking. + */ +void InitSync(void) +{ + /* + * Create pending-operations hashtable if we need it. Currently, we need + * it if we are standalone (not under a postmaster) or if we are a startup + * or checkpointer auxiliary process. + */ + if (!IsUnderPostmaster || AmStartupProcess() || AmCheckpointerProcess() || AmPageWriterMainProcess()) { + InitPendingOps(); u_sess->storage_cxt.pendingUnlinks = NIL; } } @@ -223,49 +231,34 @@ void SyncPostCheckpoint(void) * (note it might try to delete list entries). */ if (--absorbCounter <= 0) { - AbsorbFsyncRequests(); + CkptAbsorbFsyncRequests(); absorbCounter = UNLINKS_PER_ABSORB; } } + + /* + * 1. incremental checkpoint mode, checkpoint thread need reset the pendingOps hashtable; + * 2. full checkpoint mode, call ProcessSyncRequests, can remove the entry from pendingOps. + */ + if (ENABLE_INCRE_CKPT) { + hash_destroy(u_sess->storage_cxt.pendingOps); + InitPendingOps(); + } } -/* - * ProcessSyncRequests() -- Process queued fsync requests. - */ -void ProcessSyncRequests(void) +static void AbsorbFsyncRequests(void) +{ + if (AmPageWriterMainProcess()) { + PgwrAbsorbFsyncRequests(); + } else { + CkptAbsorbFsyncRequests(); + } +} + +static void HandleAbnormalSyncExit(bool syncInProgress) { - static bool syncInProgress = false; HASH_SEQ_STATUS hstat; PendingFsyncEntry *entry; - int absorbCounter; - - /* Statistics on sync times */ - int processed = 0; - instr_time syncStart; - instr_time syncEnd; - instr_time syncDiff; - uint64 elapsed; - uint64 longest = 0; - uint64 totalElapsed = 0; - - /* - * This is only called during checkpoints, and checkpoints should only - * occur in processes that have created a pendingOps. - */ - if (!u_sess->storage_cxt.pendingOps) { - elog(ERROR, "cannot sync without a pendingOps table"); - } - - /* - * If we are in the checkpointer, the sync had better include all fsync - * requests that were queued by backends up to this point. The tightest - * race condition that could occur is that a buffer that must be written - * and fsync'd for the checkpoint could have been dumped by a backend just - * before it was visited by BufferSync(). We know the backend will have - * queued an fsync request before clearing the buffer's dirtybit, so we - * are safe as long as we do an Absorb after completing BufferSync(). - */ - AbsorbFsyncRequests(); /* * To avoid excess fsync'ing (in the worst case, maybe a never-terminating @@ -298,7 +291,48 @@ void ProcessSyncRequests(void) entry->cycle_ctr = u_sess->storage_cxt.sync_cycle_ctr; } } + return; +} +/* + * ProcessSyncRequests() -- Process queued fsync requests. + */ +void ProcessSyncRequests(void) +{ + static bool syncInProgress = false; + HASH_SEQ_STATUS hstat; + PendingFsyncEntry *entry; + int absorbCounter; + + /* Statistics on sync times */ + int processed = 0; + instr_time syncStart; + instr_time syncEnd; + instr_time syncDiff; + uint64 elapsed; + uint64 longest = 0; + uint64 totalElapsed = 0; + + /* + * This is only called during checkpoints, and checkpoints should only + * occur in processes that have created a pendingOps. + */ + if (!u_sess->storage_cxt.pendingOps) { + ereport(ERROR, (errmsg("cannot sync without a pendingOps table"))); + } + + /* + * If we are in the checkpointer, the sync had better include all fsync + * requests that were queued by backends up to this point. The tightest + * race condition that could occur is that a buffer that must be written + * and fsync'd for the checkpoint could have been dumped by a backend just + * before it was visited by BufferSync(). We know the backend will have + * queued an fsync request before clearing the buffer's dirtybit, so we + * are safe as long as we do an Absorb after completing BufferSync(). + */ + AbsorbFsyncRequests(); + + HandleAbnormalSyncExit(syncInProgress); /* Advance counter so that new hashtable entries are distinguishable */ u_sess->storage_cxt.sync_cycle_ctr++; @@ -371,8 +405,8 @@ void ProcessSyncRequests(void) processed++; if (u_sess->attr.attr_common.log_checkpoints) { - elog(DEBUG1, "checkpoint sync: number=%d file=%s time=%.3f msec", - processed, path, (double) elapsed / MSEC_PER_MICROSEC); + ereport(DEBUG1, (errmsg("checkpoint sync: number=%d file=%s time=%.3f msec", + processed, path, (double) elapsed / MSEC_PER_MICROSEC))); } break; /* out of retry loop */ } @@ -388,6 +422,7 @@ void ProcessSyncRequests(void) (errmsg("could not fsync file \"%s\": %m, this relation has been remove", path))); break; } + /* * Absorb incoming requests and check to see if a cancel arrived * for this relation fork. @@ -416,19 +451,43 @@ void ProcessSyncRequests(void) /* We are done with this entry, remove it */ if (hash_search(u_sess->storage_cxt.pendingOps, &entry->tag, HASH_REMOVE, NULL) == NULL) { - elog(ERROR, "pendingOps corrupted"); + ereport(ERROR, (errmsg("pendingOps corrupted"))); } } /* Return sync performance metrics for report at checkpoint end */ - t_thrd.xlog_cxt.CheckpointStats->ckpt_sync_rels = processed; - t_thrd.xlog_cxt.CheckpointStats->ckpt_longest_sync = longest; - t_thrd.xlog_cxt.CheckpointStats->ckpt_agg_sync_time = totalElapsed; + if (!AmPageWriterMainProcess()) { + t_thrd.xlog_cxt.CheckpointStats->ckpt_sync_rels = processed; + t_thrd.xlog_cxt.CheckpointStats->ckpt_longest_sync = longest; + t_thrd.xlog_cxt.CheckpointStats->ckpt_agg_sync_time = totalElapsed; + } /* Flag successful completion of ProcessSyncRequests */ syncInProgress = false; } +void ProcessUnlinkList(const FileTag *ftag) +{ + ListCell *cell; + ListCell *prev; + ListCell *next; + + if (!AmPageWriterMainProcess()) { + prev = NULL; + for (cell = list_head(u_sess->storage_cxt.pendingUnlinks); cell; cell = next) { + PendingUnlinkEntry *entry = (PendingUnlinkEntry *) lfirst(cell); + next = lnext(cell); + if (entry->tag.handler == ftag->handler && SYNCSW[ftag->handler].matchfiletag(ftag, &entry->tag)) { + u_sess->storage_cxt.pendingUnlinks = + list_delete_cell(u_sess->storage_cxt.pendingUnlinks, cell, prev); + pfree(entry); + } else { + prev = cell; + } + } + } +} + /* * RememberSyncRequest() -- callback from checkpointer side of sync request * @@ -453,9 +512,6 @@ void RememberSyncRequest(const FileTag *ftag, SyncRequestType type) } else if (type == SYNC_FILTER_REQUEST) { HASH_SEQ_STATUS hstat; PendingFsyncEntry *entry; - ListCell *cell; - ListCell *prev; - ListCell *next; /* Cancel matching fsync requests */ hash_seq_init(&hstat, u_sess->storage_cxt.pendingOps); @@ -466,19 +522,10 @@ void RememberSyncRequest(const FileTag *ftag, SyncRequestType type) } } - /* Remove matching unlink requests */ - prev = NULL; - for (cell = list_head(u_sess->storage_cxt.pendingUnlinks); cell; cell = next) { - PendingUnlinkEntry *entry = (PendingUnlinkEntry *) lfirst(cell); - next = lnext(cell); - if (entry->tag.handler == ftag->handler && SYNCSW[ftag->handler].matchfiletag(ftag, &entry->tag)) { - u_sess->storage_cxt.pendingUnlinks = list_delete_cell(u_sess->storage_cxt.pendingUnlinks, cell, prev); - pfree(entry); - } else { - prev = cell; - } - } + /* Remove matching unlink requests, only the checkpoint thread handle the pendingUnlinks */ + ProcessUnlinkList(ftag); } else if (type == SYNC_UNLINK_REQUEST) { + Assert(!AmPageWriterMainProcess()); /* Unlink request: put it in the linked list */ MemoryContext oldcxt = MemoryContextSwitchTo(u_sess->storage_cxt.pendingOpsCxt); PendingUnlinkEntry *entry = (PendingUnlinkEntry*)palloc(sizeof(PendingUnlinkEntry)); @@ -511,6 +558,50 @@ void RememberSyncRequest(const FileTag *ftag, SyncRequestType type) } } +static bool ForwardSyncRequest(const FileTag *ftag, SyncRequestType type) +{ + bool ret = false; + + /* + * Notify the checkpointer about it. If we fail to queue a message in + * retryOnError mode, we have to sleep and try again ... ugly, but + * hopefully won't happen often. + * + * XXX should we CHECK_FOR_INTERRUPTS in this loop? Escaping with an + * error in the case of SYNC_UNLINK_REQUEST would leave the + * no-longer-used file still present on disk, which would be bad, so + * I'm inclined to assume that the checkpointer will always empty the + * queue soon. + */ + if (USE_CKPT_THREAD_SYNC) { + ret = CkptForwardSyncRequest(ftag, type); + } else { + switch (type) { + case SYNC_REQUEST: + ret = PgwrForwardSyncRequest(ftag, type); + break; + case SYNC_UNLINK_REQUEST: + ret = CkptForwardSyncRequest(ftag, type); + break; + case SYNC_FORGET_REQUEST: + ret = PgwrForwardSyncRequest(ftag, type); + break; + case SYNC_FILTER_REQUEST: + ret = PgwrForwardSyncRequest(ftag, type); + if (!ret) { + break; + } + ret = CkptForwardSyncRequest(ftag, type); + break; + default: + ereport(ERROR, + (errmsg("Incremental ckpt, Error SyncRequestType, the type is %d", type))); + break; + } + } + return ret; +} + /* * Register the sync request locally, or forward it to the checkpointer. * @@ -519,7 +610,7 @@ void RememberSyncRequest(const FileTag *ftag, SyncRequestType type) */ bool RegisterSyncRequest(const FileTag *ftag, SyncRequestType type, bool retryOnError) { - bool ret; + bool ret = false; if (u_sess->storage_cxt.pendingOps != NULL) { /* standalone backend or startup process: fsync state is local */ @@ -529,9 +620,10 @@ bool RegisterSyncRequest(const FileTag *ftag, SyncRequestType type, bool retryOn for (;;) { /* - * Notify the checkpointer about it. If we fail to queue a message in - * retryOnError mode, we have to sleep and try again ... ugly, but - * hopefully won't happen often. + * Incremental checkpoint mode, notify the pagewriter about it, + * full checkpoint mode, notify checkpointer about it. If we fail + * to queue a message in retryOnError mode, we have to sleep + * and try again ... ugly, but hopefully won't happen often. * * XXX should we CHECK_FOR_INTERRUPTS in this loop? Escaping with an * error in the case of SYNC_UNLINK_REQUEST would leave the @@ -586,3 +678,252 @@ void ForgetDatabaseSyncRequests(Oid dbid) } } } + +/* + * Because pagewriter.cpp exceends the maximum file line limit, some functions about checkpoint are moved here + */ + +const float HALF = 0.5; +static bool CompactPageWriterRequestQueue(void) +{ + bool* skip_slot = NULL; + int preserve_count = 0; + int num_skipped = 0; + IncreCkptSyncShmemStruct *incre_ckpt_sync_shmem = g_instance.ckpt_cxt_ctl->incre_ckpt_sync_shmem; + + /* Initialize skip_slot array */ + skip_slot = (bool*)palloc0(sizeof(bool) * incre_ckpt_sync_shmem->num_requests); + + /* must hold the request queue in exclusive mode */ + Assert(LWLockHeldByMe(incre_ckpt_sync_shmem->sync_queue_lwlock)); + + num_skipped = getDuplicateRequest(incre_ckpt_sync_shmem->requests, incre_ckpt_sync_shmem->num_requests, skip_slot); + if (num_skipped == 0) { + pfree(skip_slot); + return false; + } + + /* We found some duplicates; remove them. */ + for (int n = 0; n < incre_ckpt_sync_shmem->num_requests; n++) { + if (skip_slot[n]) + continue; + incre_ckpt_sync_shmem->requests[preserve_count++] = incre_ckpt_sync_shmem->requests[n]; + } + + ereport(DEBUG1, + (errmsg("pagewriter compacted fsync request queue from %d entries to %d entries", + incre_ckpt_sync_shmem->num_requests, preserve_count))); + + incre_ckpt_sync_shmem->num_requests = preserve_count; + pfree(skip_slot); + return true; +} + + /* PgwrForwardSyncRequest + * Forward a file-fsync request from a backend to the pagewriter. + */ +bool PgwrForwardSyncRequest(const FileTag *ftag, SyncRequestType type) +{ + CheckpointerRequest* request = NULL; + bool too_full = false; + IncreCkptSyncShmemStruct *incre_ckpt_sync_shmem = g_instance.ckpt_cxt_ctl->incre_ckpt_sync_shmem; + LWLock *sync_queue_lwlock = incre_ckpt_sync_shmem->sync_queue_lwlock; + + if (AmPageWriterMainProcess()) { + ereport(ERROR, + (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION), + errmsg("PgwrForwardSyncRequest must not be called in pagewriter main thread"))); + } + + LWLockAcquire(sync_queue_lwlock, LW_EXCLUSIVE); + + /* + * If the pagewriter main thread isn't running or the request queue is full, the + * backend will have to perform its own fsync request. But before forcing + * that to happen, we can try to compact the request queue. + */ + if (incre_ckpt_sync_shmem->pagewritermain_pid == 0 || (incre_ckpt_sync_shmem->num_requests >= + incre_ckpt_sync_shmem->max_requests && !CompactPageWriterRequestQueue())) { + LWLockRelease(sync_queue_lwlock); + return false; + } + + /* OK, insert request */ + request = &incre_ckpt_sync_shmem->requests[incre_ckpt_sync_shmem->num_requests++]; + request->ftag = *ftag; + request->type = type; + + /* If queue is more than half full, nudge the pagewriter to empty it */ + too_full = (incre_ckpt_sync_shmem->num_requests >= incre_ckpt_sync_shmem->max_requests * HALF); + + LWLockRelease(sync_queue_lwlock); + + /* wakeup the pagwriter main thread after release lock */ + if (too_full && g_instance.proc_base->pgwrMainThreadLatch) { + SetLatch(g_instance.proc_base->pgwrMainThreadLatch); + } + + return true; +} + +/* PgwrAbsorbFsyncRequests + * pagewriter main thread Absorb the backend or pagewriter sub thread fsync request. + */ +void PgwrAbsorbFsyncRequests(void) +{ + CheckpointerRequest* requests = NULL; + CheckpointerRequest* request = NULL; + IncreCkptSyncShmemStruct *incre_ckpt_sync_shmem = g_instance.ckpt_cxt_ctl->incre_ckpt_sync_shmem; + LWLock *sync_queue_lwlock = incre_ckpt_sync_shmem->sync_queue_lwlock; + int n; + + Assert(AmPageWriterMainProcess()); + + /* + * We have to PANIC if we fail to absorb all the pending requests (eg, + * because our hashtable runs out of memory). This is because the system + * cannot run safely if we are unable to fsync what we have been told to + * fsync. Fortunately, the hashtable is so small that the problem is + * quite unlikely to arise in practice. + */ + START_CRIT_SECTION(); + + /* We try to avoid holding the lock for a long time by copying the request array. */ + LWLockAcquire(sync_queue_lwlock, LW_EXCLUSIVE); + + n = incre_ckpt_sync_shmem->num_requests; + + if (n > 0) { + errno_t rc; + requests = (CheckpointerRequest*)palloc(n * sizeof(CheckpointerRequest)); + rc = memcpy_s(requests, n * sizeof(CheckpointerRequest), incre_ckpt_sync_shmem->requests, + n * sizeof(CheckpointerRequest)); + securec_check(rc, "\0", "\0"); + } + + incre_ckpt_sync_shmem->num_requests = 0; + + LWLockRelease(sync_queue_lwlock); + + for (request = requests; n > 0; request++, n--) { + RememberSyncRequest(&request->ftag, request->type); + } + if (requests != NULL) { + pfree(requests); + } + END_CRIT_SECTION(); +} + +/* + * PageWriterSyncWithAbsorption() -- Sync files to disk and reset fsync flags. + * incremental checkpoint, pagewriter main thread handle the file sync. + */ +void PageWriterSyncWithAbsorption(void) +{ + volatile IncreCkptSyncShmemStruct* cps = g_instance.ckpt_cxt_ctl->incre_ckpt_sync_shmem; + + SpinLockAcquire(&cps->sync_lock); + cps->fsync_start++; + SpinLockRelease(&cps->sync_lock); + + ProcessSyncRequests(); + + SpinLockAcquire(&cps->sync_lock); + cps->fsync_done = cps->fsync_start; + SpinLockRelease(&cps->sync_lock); +} + +const int WAIT_THREAD_START = 600; /* every time sleep 0.1 sec, the 1min = 600 * 0.1 sec */ +void RequestPgwrSync(void) +{ + volatile IncreCkptSyncShmemStruct* cps = g_instance.ckpt_cxt_ctl->incre_ckpt_sync_shmem; + + /* + * Send signal to request sync. It's possible that the pagewriter main thread + * hasn't started yet, or is in process of restarting, so we will retry a + * few times if needed. Also, if not told to wait for the pagewriter main thread to do sync, + * we consider failure to send the signal to be nonfatal and merely + * LOG it. + */ + for (int ntries = 0;; ntries++) { + if (cps->pagewritermain_pid == 0) { + /* max wait 1min */ + if (ntries >= WAIT_THREAD_START || pmState == PM_SHUTDOWN) { + ereport(LOG, (errmsg("could not request pagewriter handle sync because pagewriter not running"))); + break; + } + } else if (gs_signal_send(cps->pagewritermain_pid, SIGINT) != 0) { + /* max wait 1min */ + if (ntries >= WAIT_THREAD_START) { + ereport(LOG, + (errmsg("could not signal for pagewriter main thread: %m, thread pid is %lu", + cps->pagewritermain_pid))); + break; + } + } else { + break; /* signal sent successfully */ + } + + CHECK_FOR_INTERRUPTS(); + pg_usleep(100000L); /* wait 0.1 sec, then retry */ + } + return; +} + +/* + * PageWriterSyncForDw() -- File sync before dw file can be truncated or recycled. + * Normally, file sync operation is solely handled by pagewirter main process. + * For standalone backends, as well as for startup process performing dw init, + * they can handle fsync request. + */ +void PageWriterSync(void) +{ + Assert(ENABLE_INCRE_CKPT); + /* Incremental checkpoint mode, the checkpoint thread only handle the unlink list */ + if (u_sess->storage_cxt.pendingOps && !AmCheckpointerProcess()) { + Assert(!IsUnderPostmaster || AmStartupProcess() || AmPageWriterMainProcess()); + ProcessSyncRequests(); + } else { + int64 old_fsync_start = 0; + int64 new_fsync_start = 0; + int64 new_fsync_done = 0; + volatile IncreCkptSyncShmemStruct* cps = g_instance.ckpt_cxt_ctl->incre_ckpt_sync_shmem; + SpinLockAcquire(&cps->sync_lock); + old_fsync_start = cps->fsync_start; + SpinLockRelease(&cps->sync_lock); + + RequestPgwrSync(); + + /* Wait for a new sync to start. */ + for (;;) { + SpinLockAcquire(&cps->sync_lock); + new_fsync_start = cps->fsync_start; + SpinLockRelease(&cps->sync_lock); + + if (new_fsync_start != old_fsync_start) { + break; + } + + CHECK_FOR_INTERRUPTS(); + pg_usleep(100000L); + } + + /* + * We are waiting for fsync_done >= new_fsync_start, in a modulo sense. + */ + for (;;) { + SpinLockAcquire(&cps->sync_lock); + new_fsync_done = cps->fsync_done; + SpinLockRelease(&cps->sync_lock); + + if (new_fsync_done - new_fsync_start >= 0) { + break; + } + + CHECK_FOR_INTERRUPTS(); + pg_usleep(100000L); + } + } + + return; +} \ No newline at end of file diff --git a/src/gausskernel/storage/tcap/tcap_drop.cpp b/src/gausskernel/storage/tcap/tcap_drop.cpp index 72bccdccb..930c7c2c7 100644 --- a/src/gausskernel/storage/tcap/tcap_drop.cpp +++ b/src/gausskernel/storage/tcap/tcap_drop.cpp @@ -1,1312 +1,1324 @@ -/* - * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * --------------------------------------------------------------------------------------- - * - * tcap_drop.cpp - * Routines to support Timecapsule `Recyclebin-based query, restore`. - * We use Tr prefix to indicate it in following coding. - * - * IDENTIFICATION - * src/gausskernel/storage/tcap/tcap_drop.cpp - * - * --------------------------------------------------------------------------------------- - */ - -#include "postgres.h" - -#include "pgstat.h" -#include "access/reloptions.h" -#include "access/sysattr.h" -#include "access/xlog.h" -#include "catalog/dependency.h" -#include "catalog/heap.h" -#include "catalog/index.h" -#include "catalog/indexing.h" -#include "catalog/objectaccess.h" -#include "catalog/pg_collation_fn.h" -#include "catalog/pg_collation.h" -#include "catalog/pg_constraint.h" -#include "catalog/pg_conversion_fn.h" -#include "catalog/pg_conversion.h" -#include "catalog/pg_depend.h" -#include "catalog/pg_extension_data_source.h" -#include "catalog/pg_extension.h" -#include "catalog/pg_foreign_data_wrapper.h" -#include "catalog/pg_foreign_server.h" -#include "catalog/pg_job.h" -#include "catalog/pg_language.h" -#include "catalog/pg_largeobject.h" -#include "catalog/pg_object.h" -#include "catalog/pg_opclass.h" -#include "catalog/pg_operator.h" -#include "catalog/pg_opfamily.h" -#include "catalog/pg_proc.h" -#include "catalog/pg_recyclebin.h" -#include "catalog/pg_rewrite.h" -#include "catalog/pg_rlspolicy.h" -#include "catalog/pg_synonym.h" -#include "catalog/pg_tablespace.h" -#include "catalog/pg_trigger.h" -#include "catalog/pg_ts_config.h" -#include "catalog/pg_ts_dict.h" -#include "catalog/pg_ts_parser.h" -#include "catalog/pg_ts_template.h" -#include "catalog/pgxc_class.h" -#include "catalog/storage.h" -#include "commands/comment.h" -#include "commands/dbcommands.h" -#include "commands/directory.h" -#include "commands/extension.h" -#include "commands/proclang.h" -#include "commands/schemacmds.h" -#include "commands/seclabel.h" -#include "commands/sec_rls_cmds.h" -#include "commands/tablecmds.h" -#include "commands/tablespace.h" -#include "commands/trigger.h" -#include "commands/typecmds.h" -#include "executor/node/nodeModifyTable.h" -#include "rewrite/rewriteRemove.h" -#include "storage/lmgr.h" -#include "storage/predicate.h" -#include "storage/smgr/relfilenode.h" -#include "utils/acl.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/inval.h" -#include "utils/lsyscache.h" -#include "utils/relcache.h" -#include "utils/snapmgr.h" -#include "utils/syscache.h" - -#include "storage/tcap.h" -#include "storage/tcap_impl.h" - -static void TrRenameClass(TrObjDesc *baseDesc, ObjectAddress *object, const char *newName) -{ - Relation rel; - HeapTuple tup; - HeapTuple newtup; - char rbname[NAMEDATALEN]; - Datum values[Natts_pg_class] = { 0 }; - bool nulls[Natts_pg_class] = { false }; - bool replaces[Natts_pg_class] = { false }; - Oid relid = object->objectId; - errno_t rc = EOK; - - if (newName) { - rc = strncpy_s(rbname, NAMEDATALEN, newName, strlen(newName)); - securec_check_ss_c(rc, "\0", "\0"); - } else { - TrGenObjName(rbname, object->classId, relid); - } - - replaces[Anum_pg_class_relname - 1] = true; - values[Anum_pg_class_relname - 1] = CStringGetDatum(rbname); - - rel = heap_open(RelationRelationId, RowExclusiveLock); - - tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tup)) { - ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", relid))); - } - - newtup = heap_modify_tuple(tup, RelationGetDescr(rel), values, nulls, replaces); - - simple_heap_update(rel, &newtup->t_self, newtup); - - CatalogUpdateIndexes(rel, newtup); - - ReleaseSysCache(tup); - - heap_freetuple_ext(newtup); - - heap_close(rel, RowExclusiveLock); -} - -static void TrRenameCommon(TrObjDesc *baseDesc, ObjectAddress *object, Oid relid, int natts, int oidAttrNum, - Oid oidIndexId, char *objTag) -{ - Relation rel; - HeapTuple tup; - HeapTuple newtup; - char rbname[NAMEDATALEN]; - Datum *values = (Datum *)palloc0(sizeof(Datum) * natts); - bool *nulls = (bool *)palloc0(sizeof(bool) * natts); - bool *replaces = (bool *)palloc0(sizeof(bool) * natts); - ScanKeyData skey[1]; - SysScanDesc sd; - - TrGenObjName(rbname, object->classId, object->objectId); - - replaces[oidAttrNum - 1] = true; - values[oidAttrNum - 1] = CStringGetDatum(rbname); - - rel = heap_open(relid, RowExclusiveLock); - - ScanKeyInit(&skey[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->objectId)); - sd = systable_beginscan(rel, oidIndexId, true, NULL, 1, skey); - - tup = systable_getnext(sd); - if (!HeapTupleIsValid(tup)) { - pfree(values); - pfree(nulls); - pfree(replaces); - ereport(ERROR, - (errcode(ERRCODE_NO_DATA_FOUND), errmsg("could not find tuple for %s %u", objTag, object->objectId))); - } - - newtup = heap_modify_tuple(tup, RelationGetDescr(rel), values, nulls, replaces); - - simple_heap_update(rel, &newtup->t_self, newtup); - - CatalogUpdateIndexes(rel, newtup); - - heap_freetuple_ext(newtup); - - systable_endscan(sd); - - heap_close(rel, RowExclusiveLock); - - pfree(values); - pfree(nulls); - pfree(replaces); -} - -static void TrDeleteBaseid(Oid baseid) -{ - Relation rbRel; - SysScanDesc sd; - ScanKeyData skey[1]; - HeapTuple tup; - - rbRel = heap_open(RecyclebinRelationId, RowExclusiveLock); - - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcybaseid, BTEqualStrategyNumber, F_INT8EQ, ObjectIdGetDatum(baseid)); - - sd = systable_beginscan(rbRel, RecyclebinBaseidIndexId, true, NULL, 1, skey); - while (HeapTupleIsValid(tup = systable_getnext(sd))) { - simple_heap_delete(rbRel, &tup->t_self); - } - - systable_endscan(sd); - heap_close(rbRel, RowExclusiveLock); - - /* - * CommandCounterIncrement here to ensure that preceding changes are all - * visible to the next deletion step. - */ - CommandCounterIncrement(); -} - -static void TrDeleteId(Oid id) -{ - Relation rbRel; - SysScanDesc sd; - ScanKeyData skey[1]; - HeapTuple tup; - - rbRel = heap_open(RecyclebinRelationId, RowExclusiveLock); - - ScanKeyInit(&skey[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(id)); - - sd = systable_beginscan(rbRel, RecyclebinIdIndexId, true, NULL, 1, skey); - if (HeapTupleIsValid(tup = systable_getnext(sd))) { - simple_heap_delete(rbRel, &tup->t_self); - } - - systable_endscan(sd); - heap_close(rbRel, RowExclusiveLock); - - /* - * CommandCounterIncrement here to ensure that preceding changes are all - * visible to the next deletion step. - */ - CommandCounterIncrement(); -} - -static inline bool TrNeedLogicDrop(const ObjectAddress *object) -{ - return object->rbDropMode == RB_DROP_MODE_LOGIC; -} - -static bool TrCanPurge(const TrObjDesc *baseDesc, const ObjectAddress *object, char relKind) -{ - Relation depRel; - SysScanDesc sd; - HeapTuple tuple; - ScanKeyData key[3]; - int nkeys; - bool found = false; - - if (relKind != RELKIND_INDEX && relKind != RELKIND_RELATION) { - return false; - } - - depRel = heap_open(DependRelationId, AccessShareLock); - - ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->classId)); - ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->objectId)); - nkeys = 2; - if (object->objectSubId != 0) { - ScanKeyInit(&key[2], Anum_pg_depend_objsubid, BTEqualStrategyNumber, F_INT4EQ, - Int32GetDatum(object->objectSubId)); - nkeys = 3; - } - - sd = systable_beginscan(depRel, DependDependerIndexId, true, NULL, nkeys, key); - while (HeapTupleIsValid(tuple = systable_getnext(sd))) { - Form_pg_depend depForm = (Form_pg_depend)GETSTRUCT(tuple); - if (depForm->refclassid == RelationRelationId && depForm->refobjid == baseDesc->relid) { - if (depForm->deptype != DEPENDENCY_AUTO) { - found = false; - break; - } - found = true; - } - } - - systable_endscan(sd); - heap_close(depRel, AccessShareLock); - return found; -} - -static void TrDoDropIndex(TrObjDesc *baseDesc, ObjectAddress *object) -{ - Assert(object->objectSubId == 0); - - if (TrNeedLogicDrop(object)) { - TrObjDesc desc = *baseDesc; - - if (!TR_IS_BASE_OBJ(baseDesc, object)) { - /* Deletion lock already accquired before single object drop. */ - Relation rel = relation_open(object->objectId, NoLock); - - TrDescInit(rel, &desc, RB_OPER_DROP, TrGetObjType(RelationGetNamespace(rel), RELKIND_INDEX), - TrCanPurge(baseDesc, object, RelationGetRelkind(rel))); - relation_close(rel, NoLock); - - TrDescWrite(&desc); - } - - TrRenameClass(baseDesc, object, desc.name); - } else { - index_drop(object->objectId, false); - } - - return; -} - -static void TrDoDropTable(TrObjDesc *baseDesc, ObjectAddress *object, char relKind) -{ - if (TrNeedLogicDrop(object)) { - TrObjDesc desc; - - if (object->objectSubId != 0 || relKind == RELKIND_VIEW || relKind == RELKIND_COMPOSITE_TYPE || - relKind == RELKIND_FOREIGN_TABLE) { - TrRenameClass(baseDesc, object, NULL); - return; - } - - desc = *baseDesc; - if (!TR_IS_BASE_OBJ(baseDesc, object)) { - /* Deletion lock already accquired before single object drop. */ - Relation rel = relation_open(object->objectId, NoLock); - - TrDescInit(rel, &desc, RB_OPER_DROP, TrGetObjType(InvalidOid, relKind), - TrCanPurge(baseDesc, object, relKind)); - relation_close(rel, NoLock); - - TrDescWrite(&desc); - } - - TrRenameClass(baseDesc, object, desc.name); - - return; - } - - /* - * relation_open() must be before the heap_drop_with_catalog(). If you reload - * relation after drop, it may cause other exceptions during the drop process. - */ - if (object->objectSubId != 0) - RemoveAttributeById(object->objectId, object->objectSubId); - else - heap_drop_with_catalog(object->objectId); - - /* - * IMPORANT: The relation must not be reloaded after heap_drop_with_catalog() - * is executed to drop this relation.If you reload relation after drop, it may - * cause other exceptions during the drop process - */ - - return; -} - -static void TrDoDropType(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, TypeRelationId, Natts_pg_type, Anum_pg_type_typname, TypeOidIndexId, "type"); - } else { - RemoveTypeById(object->objectId); - } - - return; -} - -static void TrDoDropConstraint(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, ConstraintRelationId, Natts_pg_constraint, Anum_pg_constraint_conname, - ConstraintOidIndexId, "constraint"); - } else { - RemoveConstraintById(object->objectId); - } - - return; -} - -static void TrDoDropTrigger(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, TriggerRelationId, Natts_pg_trigger, Anum_pg_trigger_tgname, TriggerOidIndexId, - "trigger"); - } else { - RemoveTriggerById(object->objectId); - } - - return; -} - -static void TrDoDropRewrite(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - /* nothing to do as rule-based view requires that origin rule name preserved. */ - } else { - RemoveRewriteRuleById(object->objectId); - } - - return; -} - -static void TrDoDropAttrdef(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - /* nothing to do as no name attribute in system catalog */ - } else { - RemoveAttrDefaultById(object->objectId); - } - - return; -} - -static void TrDoDropProc(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, ProcedureRelationId, Natts_pg_proc, Anum_pg_proc_proname, ProcedureOidIndexId, - "procedure"); - } else { - RemoveFunctionById(object->objectId); - } - - return; -} - -static void TrDoDropCast(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - /* nothing to do as no name attribute in system catalog */ - } else { - DropCastById(object->objectId); - } - - return; -} - -static void TrDoDropCollation(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, CollationRelationId, Natts_pg_collation, Anum_pg_collation_collname, - CollationOidIndexId, "collation"); - } else { - RemoveCollationById(object->objectId); - } - - return; -} - -static void TrDoDropConversion(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, ConversionRelationId, Natts_pg_conversion, Anum_pg_conversion_conname, - ConversionOidIndexId, "conversion"); - } else { - RemoveConversionById(object->objectId); - } - - return; -} - - -static void TrDoDropProceduralLanguage(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, LanguageRelationId, Natts_pg_language, Anum_pg_language_lanname, - LanguageOidIndexId, "language"); - } else { - DropProceduralLanguageById(object->objectId); - } - - return; -} - -static void TrDoDropLargeObject(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - /* nothing to do as no name attribute in system catalog */ - } else { - LargeObjectDrop(object->objectId); - } - - return; -} - -static void TrDoDropOperator(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, OperatorRelationId, Natts_pg_operator, Anum_pg_operator_oprname, - OperatorOidIndexId, "operator"); - } else { - RemoveOperatorById(object->objectId); - } - - return; -} - -static void TrDoDropOpClass(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, OperatorClassRelationId, Natts_pg_opclass, Anum_pg_opclass_opcname, - OpclassOidIndexId, "opclass"); - } else { - RemoveOpClassById(object->objectId); - } - - return; -} - -static void TrDoDropOpFamily(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, OperatorFamilyRelationId, Natts_pg_opfamily, Anum_pg_opfamily_opfname, - OpfamilyOidIndexId, "opfamily"); - } else { - RemoveOpFamilyById(object->objectId); - } - - return; -} - - -static void TrDoDropAmOp(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - /* nothing to do as no name attribute in system catalog */ - } else { - RemoveAmOpEntryById(object->objectId); - } - - return; -} - -static void TrDoDropAmProc(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - /* nothing to do as no name attribute in system catalog */ - } else { - RemoveAmProcEntryById(object->objectId); - } - - return; -} - -static void TrDoDropSchema(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, NamespaceRelationId, Natts_pg_namespace, Anum_pg_namespace_nspname, - NamespaceOidIndexId, "namespace"); - } else { - RemoveSchemaById(object->objectId); - } - - return; -} - -static void TrDoDropTSParser(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, TSParserRelationId, Natts_pg_ts_parser, Anum_pg_ts_parser_prsname, - TSParserOidIndexId, "ts parser"); - } else { - RemoveTSParserById(object->objectId); - } - - return; -} - -static void TrDoDropTSDictionary(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, TSDictionaryRelationId, Natts_pg_ts_dict, Anum_pg_ts_dict_dictname, - TSDictionaryOidIndexId, "ts dictionary"); - } else { - RemoveTSDictionaryById(object->objectId); - } - - return; -} - -static void TrDoDropTSTemplate(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, TSTemplateRelationId, Natts_pg_ts_template, Anum_pg_ts_template_tmplname, - TSTemplateOidIndexId, "ts template"); - } else { - RemoveTSTemplateById(object->objectId); - } - - return; -} - -static void TrDoDropTSConfiguration(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, TSConfigRelationId, Natts_pg_ts_config, Anum_pg_ts_config_cfgname, - TSConfigOidIndexId, "ts configuration"); - } else { - RemoveTSConfigurationById(object->objectId); - } - - return; -} - -static void TrDoDropForeignDataWrapper(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, ForeignDataWrapperRelationId, Natts_pg_foreign_data_wrapper, - Anum_pg_foreign_data_wrapper_fdwname, ForeignDataWrapperOidIndexId, "foreign data wrapper"); - } else { - RemoveForeignDataWrapperById(object->objectId); - } - - return; -} - -static void TrDoDropForeignServer(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, ForeignServerRelationId, Natts_pg_foreign_server, - Anum_pg_foreign_server_srvname, ForeignServerOidIndexId, "foreign server"); - } else { - RemoveForeignServerById(object->objectId); - } - - return; -} - -static void TrDoDropUserMapping(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - /* nothing to do as no name attribute in system catalog */ - } else { - RemoveUserMappingById(object->objectId); - } - - return; -} - - -static void TrDoDropDefaultACL(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - /* nothing to do as no name attribute in system catalog */ - } else { - RemoveDefaultACLById(object->objectId); - } - - return; -} - -static void TrDoDropPgxcClass(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - /* nothing to do as no name attribute in system catalog */ - } else { - RemovePgxcClass(object->objectId); - } - - return; -} - -static void TrDoDropExtension(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, ExtensionRelationId, Natts_pg_extension, Anum_pg_extension_extname, - ExtensionOidIndexId, "extension"); - } else { - RemoveExtensionById(object->objectId); - } - - return; -} - -static void TrDoDropDataSource(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, DataSourceRelationId, Natts_pg_extension_data_source, - Anum_pg_extension_data_source_srcname, DataSourceOidIndexId, "extension data source"); - } else { - RemoveDataSourceById(object->objectId); - } - - return; -} - -static void TrDoDropDirectory(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, PgDirectoryRelationId, Natts_pg_directory, Anum_pg_directory_directory_name, - PgDirectoryOidIndexId, "directory"); - } else { - RemoveDirectoryById(object->objectId); - } - - return; -} - -static void TrDoDropRlsPolicy(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, RlsPolicyRelationId, Natts_pg_rlspolicy, Anum_pg_rlspolicy_polname, - PgRlspolicyOidIndex, "rlspolicy"); - } else { - RemoveRlsPolicyById(object->objectId); - } - - return; -} - -static void TrDoDropJob(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - /* nothing to do as no name attribute in system catalog */ - } else { - RemoveJobById(object->objectId); - } - - return; -} - -static void TrDoDropSynonym(TrObjDesc *baseDesc, ObjectAddress *object) -{ - if (TrNeedLogicDrop(object)) { - TrRenameCommon(baseDesc, object, PgSynonymRelationId, Natts_pg_synonym, Anum_pg_synonym_synname, - SynonymOidIndexId, "synonym"); - } else { - RemoveSynonymById(object->objectId); - } - - return; -} - -/* - * doDeletion: delete a single object - * return false if logic deleted, - * return true if physical deleted, - */ -static void TrDoDrop(TrObjDesc *baseDesc, ObjectAddress *object) -{ - switch (getObjectClass(object)) { - case OCLASS_CLASS: { - char relKind = get_rel_relkind(object->objectId); - if (relKind == RELKIND_INDEX) { - TrDoDropIndex(baseDesc, object); - } else { - /* - * We use a unified entry for others: - * RELKIND_RELATION, RELKIND_SEQUENCE, - * RELKIND_TOASTVALUE, RELKIND_VIEW, - * RELKIND_COMPOSITE_TYPE, RELKIND_FOREIGN_TABLE - */ - TrDoDropTable(baseDesc, object, relKind); - } - break; - } - - case OCLASS_TYPE: - TrDoDropType(baseDesc, object); - break; - - case OCLASS_CONSTRAINT: - TrDoDropConstraint(baseDesc, object); - break; - - case OCLASS_TRIGGER: - TrDoDropTrigger(baseDesc, object); - break; - - case OCLASS_REWRITE: - TrDoDropRewrite(baseDesc, object); - break; - - case OCLASS_DEFAULT: - TrDoDropAttrdef(baseDesc, object); - break; - - case OCLASS_PROC: - TrDoDropProc(baseDesc, object); - break; - - case OCLASS_CAST: - TrDoDropCast(baseDesc, object); - break; - - case OCLASS_COLLATION: - TrDoDropCollation(baseDesc, object); - break; - - case OCLASS_CONVERSION: - TrDoDropConversion(baseDesc, object); - break; - - case OCLASS_LANGUAGE: - TrDoDropProceduralLanguage(baseDesc, object); - break; - - case OCLASS_LARGEOBJECT: - TrDoDropLargeObject(baseDesc, object); - break; - - case OCLASS_OPERATOR: - TrDoDropOperator(baseDesc, object); - break; - - case OCLASS_OPCLASS: - TrDoDropOpClass(baseDesc, object); - break; - - case OCLASS_OPFAMILY: - TrDoDropOpFamily(baseDesc, object); - break; - - case OCLASS_AMOP: - TrDoDropAmOp(baseDesc, object); - break; - - case OCLASS_AMPROC: - TrDoDropAmProc(baseDesc, object); - break; - - case OCLASS_SCHEMA: - TrDoDropSchema(baseDesc, object); - break; - - case OCLASS_TSPARSER: - TrDoDropTSParser(baseDesc, object); - break; - - case OCLASS_TSDICT: - TrDoDropTSDictionary(baseDesc, object); - break; - - case OCLASS_TSTEMPLATE: - TrDoDropTSTemplate(baseDesc, object); - break; - - case OCLASS_TSCONFIG: - TrDoDropTSConfiguration(baseDesc, object); - break; - - /* - * OCLASS_ROLE, OCLASS_DATABASE, OCLASS_TBLSPACE intentionally not - * handled here - */ - - case OCLASS_FDW: - TrDoDropForeignDataWrapper(baseDesc, object); - break; - - case OCLASS_FOREIGN_SERVER: - TrDoDropForeignServer(baseDesc, object); - break; - - case OCLASS_USER_MAPPING: - TrDoDropUserMapping(baseDesc, object); - break; - - case OCLASS_DEFACL: - TrDoDropDefaultACL(baseDesc, object); - break; - - case OCLASS_PGXC_CLASS: - TrDoDropPgxcClass(baseDesc, object); - break; - - case OCLASS_EXTENSION: - TrDoDropExtension(baseDesc, object); - break; - - case OCLASS_DATA_SOURCE: - TrDoDropDataSource(baseDesc, object); - break; - - case OCLASS_DIRECTORY: - TrDoDropDirectory(baseDesc, object); - break; - - case OCLASS_RLSPOLICY: - TrDoDropRlsPolicy(baseDesc, object); - break; - - case OCLASS_PG_JOB: - if ((IS_PGXC_COORDINATOR && !IsConnFromCoord()) || (g_instance.role == VSINGLENODE)) - TrDoDropJob(baseDesc, object); - break; - - case OCLASS_SYNONYM: - TrDoDropSynonym(baseDesc, object); - break; - - default: - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unrecognized object class: %u", object->classId))); - break; - } - - return; -} - -/* - * deleteOneObject: delete a single object for TrDrop. - * - * *depRel is the already-open pg_depend relation. - */ -static void TrDropOneObject(TrObjDesc *baseDesc, ObjectAddress *object, Relation *depRel) -{ - ScanKeyData key[3]; - int nkeys; - SysScanDesc scan; - HeapTuple tup; - - /* DROP hook of the objects being removed */ - if (object_access_hook) { - ObjectAccessDrop dropArg; - - dropArg.dropflags = PERFORM_DELETION_INVALID; - InvokeObjectAccessHook(OAT_DROP, object->classId, object->objectId, object->objectSubId, &dropArg); - } - - /* - * Delete the object itself, in an object-type-dependent way. - * - * We used to do this after removing the outgoing dependency links, but it - * seems just as reasonable to do it beforehand. In the concurrent case - * we *must *do it in this order, because we can't make any transactional - * updates before calling doDeletion() --- they'd get committed right - * away, which is not cool if the deletion then fails. - */ - TrDoDrop(baseDesc, object); - - /* - * In logical drop mode, we will keep all related system entries, including - * linked entries such as pg_depend records. It is done! - */ - if (TrNeedLogicDrop(object)) { - /* - * CommandCounterIncrement here to ensure that preceding changes are all - * visible to the next deletion step. - */ - CommandCounterIncrement(); - - /* - * Logic Drop done! - */ - return; - } - - /* - * In physical drop mode, we continue to remove all related system entries. - */ - - /* - * Now remove any pg_depend records that link from this object to others. - * (Any records linking to this object should be gone already.) - * - * When dropping a whole object (subId = 0), remove all pg_depend records - * for its sub-objects too. - */ - ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->classId)); - ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->objectId)); - if (object->objectSubId != 0) { - ScanKeyInit(&key[2], Anum_pg_depend_objsubid, BTEqualStrategyNumber, F_INT4EQ, - Int32GetDatum(object->objectSubId)); - nkeys = 3; - } else - nkeys = 2; - - scan = systable_beginscan(*depRel, DependDependerIndexId, true, NULL, nkeys, key); - - while (HeapTupleIsValid(tup = systable_getnext(scan))) { - simple_heap_delete(*depRel, &tup->t_self); - } - - systable_endscan(scan); - - /* - * Delete shared dependency references related to this object. Again, if - * subId = 0, remove records for sub-objects too. - */ - deleteSharedDependencyRecordsFor(object->classId, object->objectId, object->objectSubId); - - /* - * Delete any comments or security labels associated with this object. - * (This is a convenient place to do these things, rather than having - * every object type know to do it.) - */ - DeleteComments(object->objectId, object->classId, object->objectSubId); - DeleteSecurityLabel(object); - - /* - * CommandCounterIncrement here to ensure that preceding changes are all - * visible to the next deletion step. - */ - CommandCounterIncrement(); - - /* - * Physical Drop done! - */ -} - -static bool TrObjIsInList(const ObjectAddresses *targetObjects, const ObjectAddress *thisobj) -{ - ObjectAddress *item = NULL; - - for (int i = 0; i < targetObjects->numrefs; i++) { - item = targetObjects->refs + i; - if (TrObjIsEqual(thisobj, item)) { - return true; - } - } - return false; -} - -static ObjectAddress *TrFindIdxInTarget(ObjectAddresses *targetObjects, ObjectAddress *item) -{ - ObjectAddress *thisobj = NULL; - - for (int i = 0; i < targetObjects->numrefs; i++) { - thisobj = targetObjects->refs + i; - if (TrObjIsEqual(item, thisobj)) { - return thisobj; - } - } - - return NULL; -} - -/* - * output: refthisobjs - */ -static void TrFindAllSubObjs(Relation depRel, const ObjectAddress *refobj, ObjectAddresses *refthisobjs) -{ - SysScanDesc sd; - HeapTuple tuple; - ScanKeyData key[3]; - int nkeys; - - ScanKeyInit(&key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(refobj->classId)); - ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(refobj->objectId)); - nkeys = 2; - if (refobj->objectSubId != 0) { - ScanKeyInit(&key[2], Anum_pg_depend_refobjsubid, BTEqualStrategyNumber, F_INT4EQ, - Int32GetDatum(refobj->objectSubId)); - nkeys = 3; - } - - sd = systable_beginscan(depRel, DependReferenceIndexId, true, NULL, nkeys, key); - while (HeapTupleIsValid(tuple = systable_getnext(sd))) { - Form_pg_depend depForm = (Form_pg_depend)GETSTRUCT(tuple); - - /* add the refs to list */ - add_object_address_ext(depForm->classid, depForm->objid, depForm->objsubid, depForm->deptype, refthisobjs); - } - - systable_endscan(sd); - return; -} - -static void TrTagPhyDeleteSubObjs(Relation depRel, ObjectAddresses *targetObjects, ObjectAddress *thisobj) -{ - ObjectAddress *item = NULL; - - ObjectAddresses *refthisobjs = new_object_addresses(); - - /* Tag this obj RB_DROP_MODE_PHYSICAL */ - thisobj->rbDropMode = RB_DROP_MODE_PHYSICAL; - - /* Find all sub objs refered to this obj */ - TrFindAllSubObjs(depRel, thisobj, refthisobjs); - - for (int i = 0; i < refthisobjs->numrefs; i++) { - item = refthisobjs->refs + i; - - /* the item must exists in targetObjects. */ - item = TrFindIdxInTarget(targetObjects, item); - if (item == NULL || item->rbDropMode == RB_DROP_MODE_PHYSICAL) { - continue; - } - TrTagPhyDeleteSubObjs(depRel, targetObjects, item); - } - - free_object_addresses(refthisobjs); - return; -} - -static bool TrNeedPhyDelete(Relation depRel, ObjectAddresses *targetObjects, ObjectAddress *thisobj) -{ - ObjectAddress *item = NULL; - ObjectAddresses *refobjs = new_object_addresses(); - bool result = false; - - /* Find all objs this obj refered */ - TrFindAllRefObjs(depRel, thisobj, refobjs); - - /* Step 1: tag refobjs of thisobj, return directly if ALL refobjs not need physical drop. */ - for (int i = 0; i < refobjs->numrefs; i++) { - item = refobjs->refs + i; - if (!TrObjIsInList(targetObjects, item)) { - result = true; - break; - } - } - if (!result) { - free_object_addresses(refobjs); - return result; - } - - /* Step 2: tag refobjs with 'i' deptype to physical drop. */ - for (int i = 0; i < refobjs->numrefs; i++) { - item = refobjs->refs + i; - if (item->deptype == 'i') { - item = TrFindIdxInTarget(targetObjects, item); - Assert(item != NULL); - if (item->rbDropMode == RB_DROP_MODE_PHYSICAL) { - continue; - } - TrTagPhyDeleteSubObjs(depRel, targetObjects, item); - } - } - - free_object_addresses(refobjs); - return result; -} - -static void TrResetDropMode(const ObjectAddresses *targetObjects, const ObjectAddress *baseObj) -{ - ObjectAddress *thisobj = NULL; - - for (int i = 0; i < targetObjects->numrefs; i++) { - thisobj = targetObjects->refs + i; - if (TrObjIsEqual(thisobj, baseObj)) { - thisobj->rbDropMode = RB_DROP_MODE_LOGIC; - continue; - } - thisobj->rbDropMode = RB_DROP_MODE_INVALID; - } - return; -} - -static void TrTagDependentObjects(Relation depRel, ObjectAddresses *targetObjects, const ObjectAddress *baseObj) -{ - ObjectAddress *thisobj = NULL; - - TrResetDropMode(targetObjects, baseObj); - for (int i = 0; i < targetObjects->numrefs; i++) { - thisobj = targetObjects->refs + i; - if (TrDropModeIsAlreadySet(thisobj)) { - continue; - } - - if (TrNeedPhyDelete(depRel, targetObjects, thisobj)) { - TrTagPhyDeleteSubObjs(depRel, targetObjects, thisobj); - } else { - thisobj->rbDropMode = RB_DROP_MODE_LOGIC; - } - } - - return; -} - -bool TrCheckRecyclebinDrop(const DropStmt *stmt, ObjectAddresses *objects) -{ - Relation depRel; - bool rbDrop = false; - - /* No work if no objects... */ - if (objects->numrefs <= 0) - return false; - - if (/* - * Disable Recyclebin-based-Drop when target object is not OBJECT_TABLE, or - */ - stmt->removeType != OBJECT_TABLE || - /* in concurrent drop mode, or */ - stmt->concurrent || - /* with purge option, or */ - stmt->purge || - /* multi objects drop. */ - list_length(stmt->objects) != 1) { - return false; - } - - if (!NeedTrComm(objects->refs->objectId)) { - return false; - } - - depRel = heap_open(DependRelationId, AccessShareLock); - rbDrop = !TrNeedPhyDelete(depRel, objects, &objects->refs[0]); - heap_close(depRel, AccessShareLock); - - return rbDrop; -} - -void TrDrop(const ObjectAddresses *objects, DropBehavior behavior) -{ - Relation depRel; - Relation baseRel; - TrObjDesc baseDesc; - ObjectAddresses *targetObjects = NULL; - ObjectAddress *baseObj = objects->refs; - - /* - * We save some cycles by opening pg_depend just once and passing the - * Relation pointer down to all the recursive deletion steps. - */ - depRel = heap_open(DependRelationId, RowExclusiveLock); - - /* - * Construct a list of objects to delete (ie, the given objects plus - * everything directly or indirectly dependent on them). Note that - * because we pass the whole objects list as pendingObjects context, we - * won't get a failure from trying to delete an object that is internally - * dependent on another one in the list; we'll just skip that object and - * delete it when we reach its owner. - */ - targetObjects = new_object_addresses(); - - /* - * Acquire deletion lock on each target object. (Ideally the caller - * has done this already, but many places are sloppy about it.) - */ - AcquireDeletionLock(baseObj, PERFORM_DELETION_INVALID); - - /* - * Finds all subobjects that reference the base table recursively. - */ - findDependentObjects(baseObj, DEPFLAG_ORIGINAL, NULL, /* empty stack */ - targetObjects, objects, &depRel); - ereport(LOG, (errmsg("Delete object %u/%u/%d", baseObj->classId, baseObj->objectId, baseObj->objectSubId))); - - /* - * Check if deletion is allowed, and report about cascaded deletes. - * - * If there's exactly one object being deleted, report it the same way as - * in performDeletion(), else we have to be vaguer. - */ - reportDependentObjects(targetObjects, behavior, NOTICE, baseObj); - - /* - * Tag all subobjects' drop mode: LOGIC_DROP, PYHSICAL_DROP. - */ - TrTagDependentObjects(depRel, targetObjects, baseObj); - - /* - * Initialize the baseDesc structure so that the logic dropped subobjects - * can be correctly processed when renamed or placed in recycle bin. Notice - * that base object already locked. - */ - baseRel = relation_open(baseObj->objectId, NoLock); - TrDescInit(baseRel, &baseDesc, RB_OPER_DROP, RB_OBJ_TABLE, true, true); - baseDesc.id = baseDesc.baseid = TrDescWrite(&baseDesc); - TrUpdateBaseid(&baseDesc); - relation_close(baseRel, NoLock); - - /* - * Drop all the objects in the proper order. - */ - for (int i = 0; i < targetObjects->numrefs; i++) { - ObjectAddress *thisobj = targetObjects->refs + i; - TrDropOneObject(&baseDesc, thisobj, &depRel); - } - - /* And clean up */ - free_object_addresses(targetObjects); - heap_close(depRel, RowExclusiveLock); -} - -void TrDoPurgeObjectDrop(TrObjDesc *desc) -{ - ObjectAddresses *objects; - ObjectAddress obj; - - objects = new_object_addresses(); - - obj.classId = RelationRelationId; - obj.objectId = desc->relid; - obj.objectSubId = 0; - add_exact_object_address(&obj, objects); - - performMultipleDeletions(objects, DROP_CASCADE, PERFORM_DELETION_INVALID); - - if (desc->type == RB_OBJ_TABLE) { - TrDeleteBaseid(desc->baseid); - } else { /* RB_OBJ_INDEX */ - TrDeleteId(desc->id); - } - - free_object_addresses(objects); - return; -} - -/* TIMECAPSULE TABLE { table_name } TO BEFORE DROP [RENAME TO new_tablename] */ -void TrRestoreDrop(const TimeCapsuleStmt *stmt) -{ - TrObjDesc desc; - ObjectAddress obj; - - TrOperFetch(stmt->relation, RB_OBJ_TABLE, &desc, RB_OPER_RESTORE_DROP); - - desc.authid = GetUserId(); - TrOperPrep(&desc, RB_OPER_RESTORE_DROP); - - obj.classId = RelationRelationId; - obj.objectId = desc.relid; - obj.objectSubId = 0; - - TrRenameClass(&desc, &obj, stmt->new_relname ? stmt->new_relname : desc.originname); - - TrDeleteBaseid(desc.baseid); - - return; -} +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * tcap_drop.cpp + * Routines to support Timecapsule `Recyclebin-based query, restore`. + * We use Tr prefix to indicate it in following coding. + * + * IDENTIFICATION + * src/gausskernel/storage/tcap/tcap_drop.cpp + * + * --------------------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "pgstat.h" +#include "access/reloptions.h" +#include "access/sysattr.h" +#include "access/xlog.h" +#include "catalog/dependency.h" +#include "catalog/heap.h" +#include "catalog/index.h" +#include "catalog/indexing.h" +#include "catalog/objectaccess.h" +#include "catalog/pg_collation_fn.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_constraint.h" +#include "catalog/pg_conversion_fn.h" +#include "catalog/pg_conversion.h" +#include "catalog/pg_depend.h" +#include "catalog/pg_extension_data_source.h" +#include "catalog/pg_extension.h" +#include "catalog/pg_foreign_data_wrapper.h" +#include "catalog/pg_foreign_server.h" +#include "catalog/pg_job.h" +#include "catalog/pg_language.h" +#include "catalog/pg_largeobject.h" +#include "catalog/pg_object.h" +#include "catalog/pg_opclass.h" +#include "catalog/pg_operator.h" +#include "catalog/pg_opfamily.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_recyclebin.h" +#include "catalog/pg_rewrite.h" +#include "catalog/pg_rlspolicy.h" +#include "catalog/pg_synonym.h" +#include "catalog/pg_tablespace.h" +#include "catalog/pg_trigger.h" +#include "catalog/pg_ts_config.h" +#include "catalog/pg_ts_dict.h" +#include "catalog/pg_ts_parser.h" +#include "catalog/pg_ts_template.h" +#include "catalog/pgxc_class.h" +#include "catalog/storage.h" +#include "commands/comment.h" +#include "commands/dbcommands.h" +#include "commands/directory.h" +#include "commands/extension.h" +#include "commands/proclang.h" +#include "commands/schemacmds.h" +#include "commands/seclabel.h" +#include "commands/sec_rls_cmds.h" +#include "commands/tablecmds.h" +#include "commands/tablespace.h" +#include "commands/trigger.h" +#include "commands/typecmds.h" +#include "executor/node/nodeModifyTable.h" +#include "rewrite/rewriteRemove.h" +#include "storage/lmgr.h" +#include "storage/predicate.h" +#include "storage/smgr/relfilenode.h" +#include "utils/acl.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/relcache.h" +#include "utils/snapmgr.h" +#include "utils/syscache.h" + +#include "storage/tcap.h" +#include "storage/tcap_impl.h" + +static void TrRenameClass(TrObjDesc *baseDesc, ObjectAddress *object, const char *newName) +{ + Relation rel; + HeapTuple tup; + HeapTuple newtup; + char rbname[NAMEDATALEN]; + Datum values[Natts_pg_class] = { 0 }; + bool nulls[Natts_pg_class] = { false }; + bool replaces[Natts_pg_class] = { false }; + Oid relid = object->objectId; + errno_t rc = EOK; + + if (newName) { + rc = strncpy_s(rbname, NAMEDATALEN, newName, strlen(newName)); + securec_check(rc, "\0", "\0"); + } else { + TrGenObjName(rbname, object->classId, relid); + } + + replaces[Anum_pg_class_relname - 1] = true; + values[Anum_pg_class_relname - 1] = CStringGetDatum(rbname); + + rel = heap_open(RelationRelationId, RowExclusiveLock); + + tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + if (!HeapTupleIsValid(tup)) { + ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for relation %u", relid))); + } + + newtup = heap_modify_tuple(tup, RelationGetDescr(rel), values, nulls, replaces); + + simple_heap_update(rel, &newtup->t_self, newtup); + + CatalogUpdateIndexes(rel, newtup); + + ReleaseSysCache(tup); + + heap_freetuple_ext(newtup); + + heap_close(rel, RowExclusiveLock); +} + +static void TrRenameCommon(TrObjDesc *baseDesc, ObjectAddress *object, Oid relid, int natts, int oidAttrNum, + Oid oidIndexId, char *objTag) +{ + Relation rel; + HeapTuple tup; + HeapTuple newtup; + char rbname[NAMEDATALEN]; + Datum *values = (Datum *)palloc0(sizeof(Datum) * natts); + bool *nulls = (bool *)palloc0(sizeof(bool) * natts); + bool *replaces = (bool *)palloc0(sizeof(bool) * natts); + ScanKeyData skey[1]; + SysScanDesc sd; + + TrGenObjName(rbname, object->classId, object->objectId); + + replaces[oidAttrNum - 1] = true; + values[oidAttrNum - 1] = CStringGetDatum(rbname); + + rel = heap_open(relid, RowExclusiveLock); + + ScanKeyInit(&skey[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->objectId)); + sd = systable_beginscan(rel, oidIndexId, true, NULL, 1, skey); + + tup = systable_getnext(sd); + if (!HeapTupleIsValid(tup)) { + pfree(values); + pfree(nulls); + pfree(replaces); + ereport(ERROR, + (errcode(ERRCODE_NO_DATA_FOUND), errmsg("could not find tuple for %s %u", objTag, object->objectId))); + } + + newtup = heap_modify_tuple(tup, RelationGetDescr(rel), values, nulls, replaces); + + simple_heap_update(rel, &newtup->t_self, newtup); + + CatalogUpdateIndexes(rel, newtup); + + heap_freetuple_ext(newtup); + + systable_endscan(sd); + + heap_close(rel, RowExclusiveLock); + + pfree(values); + pfree(nulls); + pfree(replaces); +} + +static void TrDeleteBaseid(Oid baseid) +{ + Relation rbRel; + SysScanDesc sd; + ScanKeyData skey[1]; + HeapTuple tup; + + rbRel = heap_open(RecyclebinRelationId, RowExclusiveLock); + + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcybaseid, BTEqualStrategyNumber, F_INT8EQ, ObjectIdGetDatum(baseid)); + + sd = systable_beginscan(rbRel, RecyclebinBaseidIndexId, true, NULL, 1, skey); + while (HeapTupleIsValid(tup = systable_getnext(sd))) { + simple_heap_delete(rbRel, &tup->t_self); + } + + systable_endscan(sd); + heap_close(rbRel, RowExclusiveLock); + + /* + * CommandCounterIncrement here to ensure that preceding changes are all + * visible to the next deletion step. + */ + CommandCounterIncrement(); +} + +static void TrDeleteId(Oid id) +{ + Relation rbRel; + SysScanDesc sd; + ScanKeyData skey[1]; + HeapTuple tup; + + rbRel = heap_open(RecyclebinRelationId, RowExclusiveLock); + + ScanKeyInit(&skey[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(id)); + + sd = systable_beginscan(rbRel, RecyclebinIdIndexId, true, NULL, 1, skey); + if (HeapTupleIsValid(tup = systable_getnext(sd))) { + simple_heap_delete(rbRel, &tup->t_self); + } + + systable_endscan(sd); + heap_close(rbRel, RowExclusiveLock); + + /* + * CommandCounterIncrement here to ensure that preceding changes are all + * visible to the next deletion step. + */ + CommandCounterIncrement(); +} + +static inline bool TrNeedLogicDrop(const ObjectAddress *object) +{ + return object->rbDropMode == RB_DROP_MODE_LOGIC; +} + +static bool TrCanPurge(const TrObjDesc *baseDesc, const ObjectAddress *object, char relKind) +{ + Relation depRel; + SysScanDesc sd; + HeapTuple tuple; + ScanKeyData key[3]; + int nkeys; + bool found = false; + + if (relKind != RELKIND_INDEX && relKind != RELKIND_GLOBAL_INDEX && relKind != RELKIND_RELATION) { + return false; + } + + depRel = heap_open(DependRelationId, AccessShareLock); + + ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->classId)); + ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->objectId)); + nkeys = 2; + if (object->objectSubId != 0) { + ScanKeyInit(&key[2], Anum_pg_depend_objsubid, BTEqualStrategyNumber, F_INT4EQ, + Int32GetDatum(object->objectSubId)); + nkeys = 3; + } + + sd = systable_beginscan(depRel, DependDependerIndexId, true, NULL, nkeys, key); + while (HeapTupleIsValid(tuple = systable_getnext(sd))) { + Form_pg_depend depForm = (Form_pg_depend)GETSTRUCT(tuple); + if (depForm->refclassid == RelationRelationId && depForm->refobjid == baseDesc->relid) { + if (depForm->deptype != DEPENDENCY_AUTO) { + found = false; + break; + } + found = true; + } + } + + systable_endscan(sd); + heap_close(depRel, AccessShareLock); + return found; +} + +static void TrDoDropIndex(TrObjDesc *baseDesc, ObjectAddress *object) +{ + Assert(object->objectSubId == 0); + + if (TrNeedLogicDrop(object)) { + TrObjDesc desc = *baseDesc; + + if (!TR_IS_BASE_OBJ(baseDesc, object)) { + /* Deletion lock already accquired before single object drop. */ + Relation rel = relation_open(object->objectId, NoLock); + + TrDescInit(rel, &desc, RB_OPER_DROP, TrGetObjType(RelationGetNamespace(rel), RELKIND_INDEX), + TrCanPurge(baseDesc, object, RelationGetRelkind(rel))); + relation_close(rel, NoLock); + + TrDescWrite(&desc); + } + + TrRenameClass(baseDesc, object, desc.name); + } else { + index_drop(object->objectId, false); + } + + return; +} + +static void TrDoDropTable(TrObjDesc *baseDesc, ObjectAddress *object, char relKind) +{ + if (TrNeedLogicDrop(object)) { + TrObjDesc desc; + + if (object->objectSubId != 0 || relKind == RELKIND_VIEW || relKind == RELKIND_COMPOSITE_TYPE || + relKind == RELKIND_FOREIGN_TABLE) { + TrRenameClass(baseDesc, object, NULL); + return; + } + + desc = *baseDesc; + if (!TR_IS_BASE_OBJ(baseDesc, object)) { + /* Deletion lock already accquired before single object drop. */ + Relation rel = relation_open(object->objectId, NoLock); + + TrDescInit(rel, &desc, RB_OPER_DROP, TrGetObjType(InvalidOid, relKind), + TrCanPurge(baseDesc, object, relKind)); + relation_close(rel, NoLock); + + TrDescWrite(&desc); + } + + TrRenameClass(baseDesc, object, desc.name); + + return; + } + + /* + * relation_open() must be before the heap_drop_with_catalog(). If you reload + * relation after drop, it may cause other exceptions during the drop process. + */ + if (object->objectSubId != 0) + RemoveAttributeById(object->objectId, object->objectSubId); + else + heap_drop_with_catalog(object->objectId); + + /* + * IMPORANT: The relation must not be reloaded after heap_drop_with_catalog() + * is executed to drop this relation.If you reload relation after drop, it may + * cause other exceptions during the drop process + */ + + return; +} + +static void TrDoDropType(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, TypeRelationId, Natts_pg_type, Anum_pg_type_typname, TypeOidIndexId, "type"); + } else { + RemoveTypeById(object->objectId); + } + + return; +} + +static void TrDoDropConstraint(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, ConstraintRelationId, Natts_pg_constraint, Anum_pg_constraint_conname, + ConstraintOidIndexId, "constraint"); + } else { + RemoveConstraintById(object->objectId); + } + + return; +} + +static void TrDoDropTrigger(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, TriggerRelationId, Natts_pg_trigger, Anum_pg_trigger_tgname, TriggerOidIndexId, + "trigger"); + } else { + RemoveTriggerById(object->objectId); + } + + return; +} + +static void TrDoDropRewrite(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + /* nothing to do as rule-based view requires that origin rule name preserved. */ + } else { + RemoveRewriteRuleById(object->objectId); + } + + return; +} + +static void TrDoDropAttrdef(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + /* nothing to do as no name attribute in system catalog */ + } else { + RemoveAttrDefaultById(object->objectId); + } + + return; +} + +static void TrDoDropProc(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, ProcedureRelationId, Natts_pg_proc, Anum_pg_proc_proname, ProcedureOidIndexId, + "procedure"); + } else { + RemoveFunctionById(object->objectId); + } + + return; +} + +static void TrDoDropCast(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + /* nothing to do as no name attribute in system catalog */ + } else { + DropCastById(object->objectId); + } + + return; +} + +static void TrDoDropCollation(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, CollationRelationId, Natts_pg_collation, Anum_pg_collation_collname, + CollationOidIndexId, "collation"); + } else { + RemoveCollationById(object->objectId); + } + + return; +} + +static void TrDoDropConversion(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, ConversionRelationId, Natts_pg_conversion, Anum_pg_conversion_conname, + ConversionOidIndexId, "conversion"); + } else { + RemoveConversionById(object->objectId); + } + + return; +} + + +static void TrDoDropProceduralLanguage(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, LanguageRelationId, Natts_pg_language, Anum_pg_language_lanname, + LanguageOidIndexId, "language"); + } else { + DropProceduralLanguageById(object->objectId); + } + + return; +} + +static void TrDoDropLargeObject(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + /* nothing to do as no name attribute in system catalog */ + } else { + LargeObjectDrop(object->objectId); + } + + return; +} + +static void TrDoDropOperator(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, OperatorRelationId, Natts_pg_operator, Anum_pg_operator_oprname, + OperatorOidIndexId, "operator"); + } else { + RemoveOperatorById(object->objectId); + } + + return; +} + +static void TrDoDropOpClass(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, OperatorClassRelationId, Natts_pg_opclass, Anum_pg_opclass_opcname, + OpclassOidIndexId, "opclass"); + } else { + RemoveOpClassById(object->objectId); + } + + return; +} + +static void TrDoDropOpFamily(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, OperatorFamilyRelationId, Natts_pg_opfamily, Anum_pg_opfamily_opfname, + OpfamilyOidIndexId, "opfamily"); + } else { + RemoveOpFamilyById(object->objectId); + } + + return; +} + + +static void TrDoDropAmOp(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + /* nothing to do as no name attribute in system catalog */ + } else { + RemoveAmOpEntryById(object->objectId); + } + + return; +} + +static void TrDoDropAmProc(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + /* nothing to do as no name attribute in system catalog */ + } else { + RemoveAmProcEntryById(object->objectId); + } + + return; +} + +static void TrDoDropSchema(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, NamespaceRelationId, Natts_pg_namespace, Anum_pg_namespace_nspname, + NamespaceOidIndexId, "namespace"); + } else { + RemoveSchemaById(object->objectId); + } + + return; +} + +static void TrDoDropTSParser(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, TSParserRelationId, Natts_pg_ts_parser, Anum_pg_ts_parser_prsname, + TSParserOidIndexId, "ts parser"); + } else { + RemoveTSParserById(object->objectId); + } + + return; +} + +static void TrDoDropTSDictionary(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, TSDictionaryRelationId, Natts_pg_ts_dict, Anum_pg_ts_dict_dictname, + TSDictionaryOidIndexId, "ts dictionary"); + } else { + RemoveTSDictionaryById(object->objectId); + } + + return; +} + +static void TrDoDropTSTemplate(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, TSTemplateRelationId, Natts_pg_ts_template, Anum_pg_ts_template_tmplname, + TSTemplateOidIndexId, "ts template"); + } else { + RemoveTSTemplateById(object->objectId); + } + + return; +} + +static void TrDoDropTSConfiguration(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, TSConfigRelationId, Natts_pg_ts_config, Anum_pg_ts_config_cfgname, + TSConfigOidIndexId, "ts configuration"); + } else { + RemoveTSConfigurationById(object->objectId); + } + + return; +} + +static void TrDoDropForeignDataWrapper(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, ForeignDataWrapperRelationId, Natts_pg_foreign_data_wrapper, + Anum_pg_foreign_data_wrapper_fdwname, ForeignDataWrapperOidIndexId, "foreign data wrapper"); + } else { + RemoveForeignDataWrapperById(object->objectId); + } + + return; +} + +static void TrDoDropForeignServer(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, ForeignServerRelationId, Natts_pg_foreign_server, + Anum_pg_foreign_server_srvname, ForeignServerOidIndexId, "foreign server"); + } else { + RemoveForeignServerById(object->objectId); + } + + return; +} + +static void TrDoDropUserMapping(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + /* nothing to do as no name attribute in system catalog */ + } else { + RemoveUserMappingById(object->objectId); + } + + return; +} + + +static void TrDoDropDefaultACL(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + /* nothing to do as no name attribute in system catalog */ + } else { + RemoveDefaultACLById(object->objectId); + } + + return; +} + +static void TrDoDropPgxcClass(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + /* nothing to do as no name attribute in system catalog */ + } else { + RemovePgxcClass(object->objectId); + } + + return; +} + +static void TrDoDropExtension(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, ExtensionRelationId, Natts_pg_extension, Anum_pg_extension_extname, + ExtensionOidIndexId, "extension"); + } else { + RemoveExtensionById(object->objectId); + } + + return; +} + +static void TrDoDropDataSource(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, DataSourceRelationId, Natts_pg_extension_data_source, + Anum_pg_extension_data_source_srcname, DataSourceOidIndexId, "extension data source"); + } else { + RemoveDataSourceById(object->objectId); + } + + return; +} + +static void TrDoDropDirectory(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, PgDirectoryRelationId, Natts_pg_directory, Anum_pg_directory_directory_name, + PgDirectoryOidIndexId, "directory"); + } else { + RemoveDirectoryById(object->objectId); + } + + return; +} + +static void TrDoDropRlsPolicy(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, RlsPolicyRelationId, Natts_pg_rlspolicy, Anum_pg_rlspolicy_polname, + PgRlspolicyOidIndex, "rlspolicy"); + } else { + RemoveRlsPolicyById(object->objectId); + } + + return; +} + +static void TrDoDropJob(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + /* nothing to do as no name attribute in system catalog */ + } else { + RemoveJobById(object->objectId); + } + + return; +} + +static void TrDoDropSynonym(TrObjDesc *baseDesc, ObjectAddress *object) +{ + if (TrNeedLogicDrop(object)) { + TrRenameCommon(baseDesc, object, PgSynonymRelationId, Natts_pg_synonym, Anum_pg_synonym_synname, + SynonymOidIndexId, "synonym"); + } else { + RemoveSynonymById(object->objectId); + } + + return; +} + +/* + * doDeletion: delete a single object + * return false if logic deleted, + * return true if physical deleted, + */ +static void TrDoDrop(TrObjDesc *baseDesc, ObjectAddress *object) +{ + switch (getObjectClass(object)) { + case OCLASS_CLASS: { + char relKind = get_rel_relkind(object->objectId); + if (relKind == RELKIND_INDEX) { + TrDoDropIndex(baseDesc, object); + } else { + /* + * We use a unified entry for others: + * RELKIND_RELATION, RELKIND_SEQUENCE, + * RELKIND_TOASTVALUE, RELKIND_VIEW, + * RELKIND_COMPOSITE_TYPE, RELKIND_FOREIGN_TABLE + */ + TrDoDropTable(baseDesc, object, relKind); + } + break; + } + + case OCLASS_TYPE: + TrDoDropType(baseDesc, object); + break; + + case OCLASS_CONSTRAINT: + TrDoDropConstraint(baseDesc, object); + break; + + case OCLASS_TRIGGER: + TrDoDropTrigger(baseDesc, object); + break; + + case OCLASS_REWRITE: + TrDoDropRewrite(baseDesc, object); + break; + + case OCLASS_DEFAULT: + TrDoDropAttrdef(baseDesc, object); + break; + + case OCLASS_PROC: + TrDoDropProc(baseDesc, object); + break; + + case OCLASS_CAST: + TrDoDropCast(baseDesc, object); + break; + + case OCLASS_COLLATION: + TrDoDropCollation(baseDesc, object); + break; + + case OCLASS_CONVERSION: + TrDoDropConversion(baseDesc, object); + break; + + case OCLASS_LANGUAGE: + TrDoDropProceduralLanguage(baseDesc, object); + break; + + case OCLASS_LARGEOBJECT: + TrDoDropLargeObject(baseDesc, object); + break; + + case OCLASS_OPERATOR: + TrDoDropOperator(baseDesc, object); + break; + + case OCLASS_OPCLASS: + TrDoDropOpClass(baseDesc, object); + break; + + case OCLASS_OPFAMILY: + TrDoDropOpFamily(baseDesc, object); + break; + + case OCLASS_AMOP: + TrDoDropAmOp(baseDesc, object); + break; + + case OCLASS_AMPROC: + TrDoDropAmProc(baseDesc, object); + break; + + case OCLASS_SCHEMA: + TrDoDropSchema(baseDesc, object); + break; + + case OCLASS_TSPARSER: + TrDoDropTSParser(baseDesc, object); + break; + + case OCLASS_TSDICT: + TrDoDropTSDictionary(baseDesc, object); + break; + + case OCLASS_TSTEMPLATE: + TrDoDropTSTemplate(baseDesc, object); + break; + + case OCLASS_TSCONFIG: + TrDoDropTSConfiguration(baseDesc, object); + break; + + /* + * OCLASS_ROLE, OCLASS_DATABASE, OCLASS_TBLSPACE intentionally not + * handled here + */ + + case OCLASS_FDW: + TrDoDropForeignDataWrapper(baseDesc, object); + break; + + case OCLASS_FOREIGN_SERVER: + TrDoDropForeignServer(baseDesc, object); + break; + + case OCLASS_USER_MAPPING: + TrDoDropUserMapping(baseDesc, object); + break; + + case OCLASS_DEFACL: + TrDoDropDefaultACL(baseDesc, object); + break; + + case OCLASS_PGXC_CLASS: + TrDoDropPgxcClass(baseDesc, object); + break; + + case OCLASS_EXTENSION: + TrDoDropExtension(baseDesc, object); + break; + + case OCLASS_DATA_SOURCE: + TrDoDropDataSource(baseDesc, object); + break; + + case OCLASS_DIRECTORY: + TrDoDropDirectory(baseDesc, object); + break; + + case OCLASS_RLSPOLICY: + TrDoDropRlsPolicy(baseDesc, object); + break; + + case OCLASS_PG_JOB: + if ((IS_PGXC_COORDINATOR && !IsConnFromCoord()) || (g_instance.role == VSINGLENODE)) + TrDoDropJob(baseDesc, object); + break; + + case OCLASS_SYNONYM: + TrDoDropSynonym(baseDesc, object); + break; + + default: + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("unrecognized object class: %u", object->classId))); + break; + } + + return; +} + +/* + * deleteOneObject: delete a single object for TrDrop. + * + * *depRel is the already-open pg_depend relation. + */ +static void TrDropOneObject(TrObjDesc *baseDesc, ObjectAddress *object, Relation *depRel) +{ + ScanKeyData key[3]; + int nkeys; + SysScanDesc scan; + HeapTuple tup; + + /* DROP hook of the objects being removed */ + if (object_access_hook) { + ObjectAccessDrop dropArg; + + dropArg.dropflags = PERFORM_DELETION_INVALID; + InvokeObjectAccessHook(OAT_DROP, object->classId, object->objectId, object->objectSubId, &dropArg); + } + + /* + * Delete the object itself, in an object-type-dependent way. + * + * We used to do this after removing the outgoing dependency links, but it + * seems just as reasonable to do it beforehand. In the concurrent case + * we *must *do it in this order, because we can't make any transactional + * updates before calling doDeletion() --- they'd get committed right + * away, which is not cool if the deletion then fails. + */ + TrDoDrop(baseDesc, object); + + /* + * In logical drop mode, we will keep all related system entries, including + * linked entries such as pg_depend records. It is done! + */ + if (TrNeedLogicDrop(object)) { + /* + * CommandCounterIncrement here to ensure that preceding changes are all + * visible to the next deletion step. + */ + CommandCounterIncrement(); + + /* + * Logic Drop done! + */ + return; + } + + /* + * In physical drop mode, we continue to remove all related system entries. + */ + + /* + * Now remove any pg_depend records that link from this object to others. + * (Any records linking to this object should be gone already.) + * + * When dropping a whole object (subId = 0), remove all pg_depend records + * for its sub-objects too. + */ + ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->classId)); + ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(object->objectId)); + if (object->objectSubId != 0) { + ScanKeyInit(&key[2], Anum_pg_depend_objsubid, BTEqualStrategyNumber, F_INT4EQ, + Int32GetDatum(object->objectSubId)); + nkeys = 3; + } else + nkeys = 2; + + scan = systable_beginscan(*depRel, DependDependerIndexId, true, NULL, nkeys, key); + + while (HeapTupleIsValid(tup = systable_getnext(scan))) { + simple_heap_delete(*depRel, &tup->t_self); + } + + systable_endscan(scan); + + /* + * Delete shared dependency references related to this object. Again, if + * subId = 0, remove records for sub-objects too. + */ + deleteSharedDependencyRecordsFor(object->classId, object->objectId, object->objectSubId); + + /* + * Delete any comments or security labels associated with this object. + * (This is a convenient place to do these things, rather than having + * every object type know to do it.) + */ + DeleteComments(object->objectId, object->classId, object->objectSubId); + DeleteSecurityLabel(object); + + /* + * CommandCounterIncrement here to ensure that preceding changes are all + * visible to the next deletion step. + */ + CommandCounterIncrement(); + + /* + * Physical Drop done! + */ +} + +static bool TrObjIsInList(const ObjectAddresses *targetObjects, const ObjectAddress *thisobj) +{ + ObjectAddress *item = NULL; + + for (int i = 0; i < targetObjects->numrefs; i++) { + item = targetObjects->refs + i; + if (TrObjIsEqual(thisobj, item)) { + return true; + } + } + return false; +} + +static ObjectAddress *TrFindIdxInTarget(ObjectAddresses *targetObjects, ObjectAddress *item) +{ + ObjectAddress *thisobj = NULL; + + for (int i = 0; i < targetObjects->numrefs; i++) { + thisobj = targetObjects->refs + i; + if (TrObjIsEqual(item, thisobj)) { + return thisobj; + } + } + + return NULL; +} + +/* + * output: refthisobjs + */ +static void TrFindAllSubObjs(Relation depRel, const ObjectAddress *refobj, ObjectAddresses *refthisobjs) +{ + SysScanDesc sd; + HeapTuple tuple; + ScanKeyData key[3]; + int nkeys; + + ScanKeyInit(&key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(refobj->classId)); + ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(refobj->objectId)); + nkeys = 2; + if (refobj->objectSubId != 0) { + ScanKeyInit(&key[2], Anum_pg_depend_refobjsubid, BTEqualStrategyNumber, F_INT4EQ, + Int32GetDatum(refobj->objectSubId)); + nkeys = 3; + } + + sd = systable_beginscan(depRel, DependReferenceIndexId, true, NULL, nkeys, key); + while (HeapTupleIsValid(tuple = systable_getnext(sd))) { + Form_pg_depend depForm = (Form_pg_depend)GETSTRUCT(tuple); + + /* add the refs to list */ + add_object_address_ext(depForm->classid, depForm->objid, depForm->objsubid, depForm->deptype, refthisobjs); + } + + systable_endscan(sd); + return; +} + +static void TrTagPhyDeleteSubObjs(Relation depRel, ObjectAddresses *targetObjects, ObjectAddress *thisobj) +{ + ObjectAddress *item = NULL; + + ObjectAddresses *refthisobjs = new_object_addresses(); + + /* Tag this obj RB_DROP_MODE_PHYSICAL */ + thisobj->rbDropMode = RB_DROP_MODE_PHYSICAL; + + /* Find all sub objs refered to this obj */ + TrFindAllSubObjs(depRel, thisobj, refthisobjs); + + for (int i = 0; i < refthisobjs->numrefs; i++) { + item = refthisobjs->refs + i; + + /* the item must exists in targetObjects. */ + item = TrFindIdxInTarget(targetObjects, item); + if (item == NULL || item->rbDropMode == RB_DROP_MODE_PHYSICAL) { + continue; + } + TrTagPhyDeleteSubObjs(depRel, targetObjects, item); + } + + free_object_addresses(refthisobjs); + return; +} + +static bool TrNeedPhyDelete(Relation depRel, ObjectAddresses *targetObjects, ObjectAddress *thisobj) +{ + ObjectAddress *item = NULL; + ObjectAddresses *refobjs = new_object_addresses(); + bool result = false; + + /* Find all objs this obj refered */ + TrFindAllRefObjs(depRel, thisobj, refobjs); + + /* Step 1: tag refobjs of thisobj, return directly if ALL refobjs not need physical drop. */ + for (int i = 0; i < refobjs->numrefs; i++) { + item = refobjs->refs + i; + if (!TrObjIsInList(targetObjects, item)) { + result = true; + break; + } + } + if (!result) { + free_object_addresses(refobjs); + return result; + } + + /* Step 2: tag refobjs with 'i' deptype to physical drop. */ + for (int i = 0; i < refobjs->numrefs; i++) { + item = refobjs->refs + i; + if (item->deptype == 'i') { + item = TrFindIdxInTarget(targetObjects, item); + Assert(item != NULL); + if (item->rbDropMode == RB_DROP_MODE_PHYSICAL) { + continue; + } + TrTagPhyDeleteSubObjs(depRel, targetObjects, item); + } + } + + free_object_addresses(refobjs); + return result; +} + +static void TrResetDropMode(const ObjectAddresses *targetObjects, const ObjectAddress *baseObj) +{ + ObjectAddress *thisobj = NULL; + + for (int i = 0; i < targetObjects->numrefs; i++) { + thisobj = targetObjects->refs + i; + if (TrObjIsEqual(thisobj, baseObj)) { + thisobj->rbDropMode = RB_DROP_MODE_LOGIC; + continue; + } + thisobj->rbDropMode = RB_DROP_MODE_INVALID; + } + return; +} + +static void TrTagDependentObjects(Relation depRel, ObjectAddresses *targetObjects, const ObjectAddress *baseObj) +{ + ObjectAddress *thisobj = NULL; + + TrResetDropMode(targetObjects, baseObj); + for (int i = 0; i < targetObjects->numrefs; i++) { + thisobj = targetObjects->refs + i; + if (TrDropModeIsAlreadySet(thisobj)) { + continue; + } + + if (TrNeedPhyDelete(depRel, targetObjects, thisobj)) { + TrTagPhyDeleteSubObjs(depRel, targetObjects, thisobj); + } else { + thisobj->rbDropMode = RB_DROP_MODE_LOGIC; + } + } + + return; +} + +bool TrCheckRecyclebinDrop(const DropStmt *stmt, ObjectAddresses *objects) +{ + Relation depRel; + bool rbDrop = false; + + /* No work if no objects... */ + if (objects->numrefs <= 0) + return false; + + if (/* + * Disable Recyclebin-based-Drop when target object is not OBJECT_TABLE, or + */ + stmt->removeType != OBJECT_TABLE || + /* in concurrent drop mode, or */ + stmt->concurrent || + /* with purge option, or */ + stmt->purge || + /* multi objects drop. */ + list_length(stmt->objects) != 1) { + return false; + } + + if (!NeedTrComm(objects->refs->objectId)) { + return false; + } + + depRel = heap_open(DependRelationId, AccessShareLock); + rbDrop = !TrNeedPhyDelete(depRel, objects, &objects->refs[0]); + heap_close(depRel, AccessShareLock); + + return rbDrop; +} + +void TrDrop(const DropStmt* drop, const ObjectAddresses *objects, DropBehavior behavior) +{ + Relation depRel; + Relation baseRel; + TrObjDesc baseDesc; + ObjectAddresses *targetObjects = NULL; + ObjectAddress *baseObj = objects->refs; + + /* + * We save some cycles by opening pg_depend just once and passing the + * Relation pointer down to all the recursive deletion steps. + */ + depRel = heap_open(DependRelationId, RowExclusiveLock); + + /* + * Construct a list of objects to delete (ie, the given objects plus + * everything directly or indirectly dependent on them). Note that + * because we pass the whole objects list as pendingObjects context, we + * won't get a failure from trying to delete an object that is internally + * dependent on another one in the list; we'll just skip that object and + * delete it when we reach its owner. + */ + targetObjects = new_object_addresses(); + + /* + * Acquire deletion lock on each target object. (Ideally the caller + * has done this already, but many places are sloppy about it.) + */ + AcquireDeletionLock(baseObj, PERFORM_DELETION_INVALID); + + /* + * Finds all subobjects that reference the base table recursively. + */ + findDependentObjects(baseObj, DEPFLAG_ORIGINAL, NULL, /* empty stack */ + targetObjects, objects, &depRel); + ereport(LOG, (errmsg("Delete object %u/%u/%d", baseObj->classId, baseObj->objectId, baseObj->objectSubId))); + + /* + * Check if deletion is allowed, and report about cascaded deletes. + * + * If there's exactly one object being deleted, report it the same way as + * in performDeletion(), else we have to be vaguer. + */ + reportDependentObjects(targetObjects, behavior, NOTICE, baseObj); + + /* + * Tag all subobjects' drop mode: LOGIC_DROP, PYHSICAL_DROP. + */ + TrTagDependentObjects(depRel, targetObjects, baseObj); + + /* + * Initialize the baseDesc structure so that the logic dropped subobjects + * can be correctly processed when renamed or placed in recycle bin. Notice + * that base object already locked. + */ + baseRel = relation_open(baseObj->objectId, NoLock); + TrDescInit(baseRel, &baseDesc, RB_OPER_DROP, RB_OBJ_TABLE, true, true); + baseDesc.id = baseDesc.baseid = TrDescWrite(&baseDesc); + TrUpdateBaseid(&baseDesc); + relation_close(baseRel, NoLock); + + /* + * Drop all the objects in the proper order. + */ + for (int i = 0; i < targetObjects->numrefs; i++) { + ObjectAddress *thisobj = targetObjects->refs + i; + TrDropOneObject(&baseDesc, thisobj, &depRel); + } + + /* And clean up */ + free_object_addresses(targetObjects); + heap_close(depRel, RowExclusiveLock); +} + +void TrDoPurgeObjectDrop(TrObjDesc *desc) +{ + ObjectAddresses *objects; + ObjectAddress obj; + + objects = new_object_addresses(); + + obj.classId = RelationRelationId; + obj.objectId = desc->relid; + obj.objectSubId = 0; + add_exact_object_address(&obj, objects); + + performMultipleDeletions(objects, DROP_CASCADE, PERFORM_DELETION_INVALID); + + if (desc->type == RB_OBJ_TABLE) { + TrDeleteBaseid(desc->baseid); + } else { /* RB_OBJ_INDEX */ + TrDeleteId(desc->id); + } + + free_object_addresses(objects); + return; +} + +/* TIMECAPSULE TABLE { table_name } TO BEFORE DROP [RENAME TO new_tablename] */ +void TrRestoreDrop(const TimeCapsuleStmt *stmt) +{ + TrObjDesc desc; + ObjectAddress obj; + Relation rel; + + desc.relid = 0; + TrOperFetch(stmt->relation, RB_OBJ_TABLE, &desc, RB_OPER_RESTORE_DROP); + if (desc.relid != 0 && (desc.type == RB_OBJ_TABLE)) { + stmt->relation->relname = desc.name; + rel = heap_openrv(stmt->relation, AccessExclusiveLock); + if (rel->rd_tam_type == TAM_HEAP) { + heap_close(rel, NoLock); + elog(ERROR, "timecapsule does not support astore yet"); + return; + } + heap_close(rel, NoLock); + } + + desc.authid = GetUserId(); + TrOperPrep(&desc, RB_OPER_RESTORE_DROP); + + obj.classId = RelationRelationId; + obj.objectId = desc.relid; + obj.objectSubId = 0; + + TrRenameClass(&desc, &obj, stmt->new_relname ? stmt->new_relname : desc.originname); + + TrDeleteBaseid(desc.baseid); + + return; +} diff --git a/src/gausskernel/storage/tcap/tcap_manager.cpp b/src/gausskernel/storage/tcap/tcap_manager.cpp index 688338404..0df59d4ba 100644 --- a/src/gausskernel/storage/tcap/tcap_manager.cpp +++ b/src/gausskernel/storage/tcap/tcap_manager.cpp @@ -1,1793 +1,1888 @@ -/* - * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * --------------------------------------------------------------------------------------- - * - * tcap_manager.cpp - * Routines to support Timecapsule `Recyclebin-based query, restore`. - * We use Tr prefix to indicate it in following coding. - * - * IDENTIFICATION - * src/gausskernel/storage/tcap/tcap_manager.cpp - * - * --------------------------------------------------------------------------------------- - */ - -#include "postgres.h" - -#include "pgstat.h" -#include "access/reloptions.h" -#include "access/sysattr.h" -#include "access/xlog.h" -#include "catalog/pg_database.h" -#include "catalog/dependency.h" -#include "catalog/heap.h" -#include "catalog/index.h" -#include "catalog/indexing.h" -#include "catalog/objectaccess.h" -#include "catalog/pg_collation_fn.h" -#include "catalog/pg_collation.h" -#include "catalog/pg_constraint.h" -#include "catalog/pg_conversion_fn.h" -#include "catalog/pg_conversion.h" -#include "catalog/pg_depend.h" -#include "catalog/pg_extension_data_source.h" -#include "catalog/pg_extension.h" -#include "catalog/pg_foreign_data_wrapper.h" -#include "catalog/pg_foreign_server.h" -#include "catalog/pg_job.h" -#include "catalog/pg_language.h" -#include "catalog/pg_largeobject.h" -#include "catalog/pg_object.h" -#include "catalog/pg_opclass.h" -#include "catalog/pg_operator.h" -#include "catalog/pg_opfamily.h" -#include "catalog/pg_proc.h" -#include "catalog/pg_recyclebin.h" -#include "catalog/pg_rewrite.h" -#include "catalog/pg_rlspolicy.h" -#include "catalog/pg_synonym.h" -#include "catalog/pg_tablespace.h" -#include "catalog/pg_trigger.h" -#include "catalog/pg_ts_config.h" -#include "catalog/pg_ts_dict.h" -#include "catalog/pg_ts_parser.h" -#include "catalog/pg_ts_template.h" -#include "catalog/pgxc_class.h" -#include "catalog/storage.h" -#include "commands/comment.h" -#include "commands/dbcommands.h" -#include "commands/directory.h" -#include "commands/extension.h" -#include "commands/proclang.h" -#include "commands/schemacmds.h" -#include "commands/seclabel.h" -#include "commands/sec_rls_cmds.h" -#include "commands/tablecmds.h" -#include "commands/tablespace.h" -#include "commands/trigger.h" -#include "commands/typecmds.h" -#include "executor/node/nodeModifyTable.h" -#include "rewrite/rewriteRemove.h" -#include "storage/lmgr.h" -#include "storage/predicate.h" -#include "storage/smgr/relfilenode.h" -#include "utils/acl.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/inval.h" -#include "utils/lsyscache.h" -#include "utils/relcache.h" -#include "utils/snapmgr.h" -#include "utils/syscache.h" - -#include "storage/tcap.h" -#include "storage/tcap_impl.h" - -static bool TrIsRefRbObject(const ObjectAddress *obj, Relation depRel = NULL); -void TrDoPurgeObjectDrop(TrObjDesc *desc); - -char *TrGenObjName(char *rbname, Oid classId, Oid objid) -{ - int rc = EOK; - - rc = snprintf_s(rbname, NAMEDATALEN, NAMEDATALEN - 1, "BIN$%X%X%X$%llX==$0", - u_sess->proc_cxt.MyDatabaseId, classId, objid, (uint64)GetXLogInsertRecPtr()); - securec_check_ss_c(rc, "\0", "\0"); - - return rbname; -} - -static TransactionId TrRbGetRcyfrozenxid64(HeapTuple rbtup, Relation rbRel = NULL) -{ - Datum datum; - bool isNull = false; - TransactionId rcyfrozenxid64; - bool relArgNull = rbRel == NULL; - - if (relArgNull) { - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - } - - datum = heap_getattr(rbtup, Anum_pg_recyclebin_rcyfrozenxid64, RelationGetDescr(rbRel), &isNull); - Assert(!isNull); - - rcyfrozenxid64 = DatumGetTransactionId(datum); - - if (relArgNull) { - heap_close(rbRel, AccessShareLock); - } - - return rcyfrozenxid64; -} - -void TrDescInit(Relation rel, TrObjDesc *desc, TrObjOperType operType, - TrObjType objType, bool canpurge, bool isBaseObj) -{ - errno_t rc = EOK; - - /* Notice: desc->id, desc->baseid will be assigned by invoker later. */ - desc->dbid = u_sess->proc_cxt.MyDatabaseId; - desc->relid = RelationGetRelid(rel); - - (void)TrGenObjName(desc->name, RelationRelationId, desc->relid); - - rc = strncpy_s(desc->originname, NAMEDATALEN, RelationGetRelationName(rel), - strlen(RelationGetRelationName(rel))); - securec_check_ss_c(rc, "\0", "\0"); - - desc->operation = operType; - desc->type = objType; - desc->recyclecsn = t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo; - desc->recycletime = GetCurrentTimestamp(); - desc->createcsn = RelationGetCreatecsn(rel); - desc->changecsn = RelationGetChangecsn(rel); - desc->nspace = RelationGetNamespace(rel); - desc->owner = RelationGetOwner(rel); - desc->tablespace = RelationGetTablespace(rel); - desc->relfilenode = RelationGetRelFileNode(rel); - desc->frozenxid = RelationGetRelFrozenxid(rel); - desc->frozenxid64 = RelationGetRelFrozenxid64(rel); - desc->canrestore = objType == RB_OBJ_TABLE; - desc->canpurge = canpurge; -} - -static void TrDescRead(TrObjDesc *desc, HeapTuple rbtup) -{ - Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(rbtup); - - desc->id = HeapTupleGetOid(rbtup); - desc->baseid = rbForm->rcybaseid; - - desc->dbid = rbForm->rcydbid; - desc->relid = rbForm->rcyrelid; - (void)namestrcpy((Name)desc->name, NameStr(rbForm->rcyname)); - (void)namestrcpy((Name)desc->originname, NameStr(rbForm->rcyoriginname)); - desc->operation = (rbForm->rcyoperation == 'd') ? RB_OPER_DROP : RB_OPER_TRUNCATE; - desc->type = (TrObjType)rbForm->rcytype; - desc->recyclecsn = rbForm->rcyrecyclecsn; - desc->recycletime = rbForm->rcyrecycletime; - desc->createcsn = rbForm->rcycreatecsn; - desc->changecsn = rbForm->rcychangecsn; - desc->nspace = rbForm->rcynamespace; - desc->owner = rbForm->rcyowner; - desc->tablespace = rbForm->rcytablespace; - desc->relfilenode = rbForm->rcyrelfilenode; - desc->canrestore = rbForm->rcycanrestore; - desc->canpurge = rbForm->rcycanpurge; - desc->frozenxid = rbForm->rcyfrozenxid; - desc->frozenxid64 = TrRbGetRcyfrozenxid64(rbtup); -} - -Oid TrDescWrite(TrObjDesc *desc) -{ - Relation rel; - HeapTuple tup; - bool nulls[Natts_pg_recyclebin] = {0}; - Datum values[Natts_pg_recyclebin]; - NameData name; - NameData originname; - Oid rbid; - - values[Anum_pg_recyclebin_rcydbid - 1] = ObjectIdGetDatum(desc->dbid); - values[Anum_pg_recyclebin_rcybaseid - 1] = ObjectIdGetDatum(desc->baseid); - values[Anum_pg_recyclebin_rcyrelid - 1] = ObjectIdGetDatum(desc->relid); - (void)namestrcpy(&name, desc->name); - values[Anum_pg_recyclebin_rcyname - 1] = NameGetDatum(&name); - (void)namestrcpy(&originname, desc->originname); - values[Anum_pg_recyclebin_rcyoriginname - 1] = NameGetDatum(&originname); - values[Anum_pg_recyclebin_rcyoperation - 1] = (desc->operation == RB_OPER_DROP) ? 'd' : 't'; - values[Anum_pg_recyclebin_rcytype - 1] = Int32GetDatum(desc->type); - values[Anum_pg_recyclebin_rcyrecyclecsn - 1] = Int64GetDatum(desc->recyclecsn); - values[Anum_pg_recyclebin_rcyrecycletime - 1] = TimestampTzGetDatum(desc->recycletime); - values[Anum_pg_recyclebin_rcycreatecsn - 1] = Int64GetDatum(desc->createcsn); - values[Anum_pg_recyclebin_rcychangecsn - 1] = Int64GetDatum(desc->changecsn); - values[Anum_pg_recyclebin_rcynamespace - 1] = ObjectIdGetDatum(desc->nspace); - values[Anum_pg_recyclebin_rcyowner - 1] = ObjectIdGetDatum(desc->owner); - values[Anum_pg_recyclebin_rcytablespace - 1] = ObjectIdGetDatum(desc->tablespace); - values[Anum_pg_recyclebin_rcyrelfilenode - 1] = ObjectIdGetDatum(desc->relfilenode); - values[Anum_pg_recyclebin_rcycanrestore - 1] = BoolGetDatum(desc->canrestore); - values[Anum_pg_recyclebin_rcycanpurge - 1] = BoolGetDatum(desc->canpurge); - values[Anum_pg_recyclebin_rcyfrozenxid - 1] = ShortTransactionIdGetDatum(desc->frozenxid); - values[Anum_pg_recyclebin_rcyfrozenxid64 - 1] = TransactionIdGetDatum(desc->frozenxid64); - - rel = heap_open(RecyclebinRelationId, RowExclusiveLock); - - tup = heap_form_tuple(RelationGetDescr(rel), values, nulls); - - rbid = simple_heap_insert(rel, tup); - - CatalogUpdateIndexes(rel, tup); - - heap_freetuple_ext(tup); - - heap_close(rel, RowExclusiveLock); - - CommandCounterIncrement(); - - return rbid; -} - -static bool TrFetchOrinameImpl(Oid nspId, const char *oriname, TrObjType type, - TrObjDesc *desc, TrOperMode operMode) -{ - Relation rbRel; - SysScanDesc sd; - ScanKeyData skey[3]; - HeapTuple tup; - bool found = false; - - if (!OidIsValid(nspId)) { - return false; - } - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcynamespace, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(nspId)); - ScanKeyInit(&skey[1], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); - ScanKeyInit(&skey[2], Anum_pg_recyclebin_rcyoriginname, BTEqualStrategyNumber, - F_NAMEEQ, CStringGetDatum(oriname)); - - sd = systable_beginscan(rbRel, RecyclebinDbidNspOrinameIndexId, true, NULL, 3, skey); - while ((tup = systable_getnext(sd)) != NULL) { - Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); - if (rbForm->rcytype != type || (operMode == RB_OPER_RESTORE_DROP && rbForm->rcyoperation != 'd') || - (operMode == RB_OPER_RESTORE_TRUNCATE && rbForm->rcyoperation != 't')) { - continue; - } - found = true; - TrDescRead(desc, tup); - break; - } - - systable_endscan(sd); - heap_close(rbRel, AccessShareLock); - - return found; -} - -static bool TrFetchName(const char *rcyname, TrObjType type, TrObjDesc *desc) -{ - Relation rbRel; - SysScanDesc sd; - ScanKeyData skey[1]; - HeapTuple tup; - bool found = false; - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcyname, BTEqualStrategyNumber, - F_NAMEEQ, CStringGetDatum(rcyname)); - - sd = systable_beginscan(rbRel, RecyclebinNameIndexId, true, NULL, 1, skey); - if ((tup = systable_getnext(sd)) != NULL) { - Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); - if (rbForm->rcytype != type) { - ereport(ERROR, - (errmsg("The recycle object \"%s\" type mismatched.", rcyname))); - } - found = true; - TrDescRead(desc, tup); - } - - systable_endscan(sd); - heap_close(rbRel, AccessShareLock); - - return found; -} - -static bool TrFetchOriname(const char *schemaname, const char *relname, TrObjType type, - TrObjDesc *desc, TrOperMode operMode) -{ - bool found = false; - Oid nspId; - - if (schemaname) { - nspId = get_namespace_oid(schemaname, true); - found = TrFetchOrinameImpl(nspId, relname, type, desc, operMode); - } else { - List *activeSearchPath = NIL; - ListCell *l = NULL; - - recomputeNamespacePath(); - activeSearchPath = list_copy(u_sess->catalog_cxt.activeSearchPath); - foreach (l, activeSearchPath) { - nspId = lfirst_oid(l); - if (TrFetchOrinameImpl(nspId, relname, type, desc, operMode)) { - found = true; - break; - } - } - list_free_ext(activeSearchPath); - } - - return found; -} - -void TrUpdateBaseid(const TrObjDesc *desc) -{ - Relation rbRel; - SysScanDesc sd; - ScanKeyData skey[1]; - HeapTuple tup; - HeapTuple newtup; - Datum values[Natts_pg_recyclebin] = { 0 }; - bool nulls[Natts_pg_recyclebin] = { false }; - bool replaces[Natts_pg_recyclebin] = { false }; - - rbRel = heap_open(RecyclebinRelationId, RowExclusiveLock); - - ScanKeyInit(&skey[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(desc->id)); - - sd = systable_beginscan(rbRel, RecyclebinIdIndexId, true, NULL, 1, skey); - if ((tup = systable_getnext(sd)) == NULL) { - ereport(ERROR, (errmsg("recycle object %u does not exist", desc->id))); - } - - replaces[Anum_pg_recyclebin_rcybaseid - 1] = true; - values[Anum_pg_recyclebin_rcybaseid - 1] = ObjectIdGetDatum(desc->baseid); - - newtup = heap_modify_tuple(tup, RelationGetDescr(rbRel), values, nulls, replaces); - - simple_heap_update(rbRel, &newtup->t_self, newtup); - - CatalogUpdateIndexes(rbRel, newtup); - - heap_freetuple_ext(newtup); - - systable_endscan(sd); - heap_close(rbRel, RowExclusiveLock); - - return; -} - -static void TrLockRelationImpl(Oid relid) -{ - /* - * Lock failed may due to concurrently purge/timecapsule/DQL - * on recycle object, or access on normal relation. - */ - if (!ConditionalLockRelationOid(relid, AccessExclusiveLock)) { - ereport(ERROR, - (errcode(ERRCODE_RBIN_LOCK_NOT_AVAILABLE), - errmsg("could not obtain lock on relation \"%u\"", relid))); - } - - /* - * Now that we have the lock, probe to see if the relation - * really exists or not. - */ - AcceptInvalidationMessages(); - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) { - /* Clean already held locks if error return. */ - UnlockRelationOid(relid, AccessExclusiveLock); - ereport(ERROR, - (errcode(ERRCODE_RBIN_UNDEFINED_OBJECT), - errmsg("relation \"%u\" does not exist", relid))); - } -} - -static void TrLockRelation(TrObjDesc *desc) -{ - Oid heapOid = InvalidOid; - - /* Lock heap relation for index first */ - if (desc->type == RB_OBJ_INDEX) { - heapOid = IndexGetRelation(desc->relid, true); - if (!OidIsValid(heapOid)) { - ereport(ERROR, - (errcode(ERRCODE_RBIN_UNDEFINED_OBJECT), - errmsg("relation \"%u\" does not exist", desc->relid))); - } - TrLockRelationImpl(heapOid); - } - - /* Use TRY-CATCH block to clean locks already held if error. */ - PG_TRY(); - { - /* Lock relation self */ - TrLockRelationImpl(desc->relid); - } - PG_CATCH(); - { - if (desc->type == RB_OBJ_INDEX) { - UnlockRelationOid(heapOid, AccessExclusiveLock); - } - PG_RE_THROW(); - } - PG_END_TRY(); -} - -static void TrUnlockTrItem(TrObjDesc *desc) -{ - UnlockDatabaseObject(RecyclebinRelationId, desc->id, 0, - AccessExclusiveLock); -} - -static void TrLockTrItem(TrObjDesc *desc) -{ - Relation rbRel; - SysScanDesc sd; - ScanKeyData skey[1]; - HeapTuple tup; - - /* 1. Try to lock rb item in AccessExclusiveLock */ - if (!ConditionalLockDatabaseObject(RecyclebinRelationId, desc->id, 0, AccessExclusiveLock)) { - ereport(ERROR, - (errcode(ERRCODE_RBIN_LOCK_NOT_AVAILABLE), - errmsg("could not obtain lock on recycle object '%s'", desc->name))); - } - - /* - * 2. Now that we have the lock, probe to see if the rb item really - * exists or not. - */ - AcceptInvalidationMessages(); - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - - ScanKeyInit(&skey[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(desc->id)); - - sd = systable_beginscan(rbRel, RecyclebinIdIndexId, true, NULL, 1, skey); - if ((tup = systable_getnext(sd)) == NULL) { - UnlockDatabaseObject(RecyclebinRelationId, desc->id, 0, AccessExclusiveLock); - systable_endscan(sd); - heap_close(rbRel, AccessShareLock); - ereport(ERROR, - (errcode(ERRCODE_RBIN_UNDEFINED_OBJECT), - errmsg("recycle object \"%s\" does not exist", desc->name))); - } - - systable_endscan(sd); - heap_close(rbRel, AccessShareLock); - - return; -} - -static void TrOperMatch(const TrObjDesc *desc, TrOperMode operMode) -{ - switch (operMode) { - case RB_OPER_PURGE: - if (!desc->canpurge) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("recycle object \"%s\" cannot be purged", desc->name))); - } - break; - - case RB_OPER_RESTORE_DROP: - if (!desc->canrestore || desc->operation != RB_OPER_DROP) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("recycle object \"%s\" cannot be restored", desc->name))); - } - break; - - case RB_OPER_RESTORE_TRUNCATE: - if (!desc->canrestore || desc->operation != RB_OPER_TRUNCATE) { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OPERATION), - errmsg("recycle object \"%s\" cannot be restored", desc->name))); - } - break; - - default: - ereport(ERROR, - (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), - errmsg("unrecognized recyclebin operation: %u", operMode))); - break; - } -} - -/* - * Fetch object from recycle bin for rb operations - purge, restore : - * Prefer to fetch as original name, then recycle name. - */ -void TrOperFetch(const RangeVar *purobj, TrObjType objtype, - TrObjDesc *desc, TrOperMode operMode) -{ - bool found = false; - - AcceptInvalidationMessages(); - - /* Prefer to fetch as original name */ - found = TrFetchOriname(purobj->schemaname, purobj->relname, objtype, desc, operMode); - /* if not found, then fetch as recycle name */ - if (!found) { - found = TrFetchName(purobj->relname, objtype, desc); - } - - /* not found, throw error */ - if (!found) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_TABLE), - errmsg("recycle object \"%s\" desired does not exist", purobj->relname))); - } - - TrOperMatch(desc, operMode); - - return; -} -static void TrPermRestore(TrObjDesc *desc, TrOperMode operMode) -{ - AclResult aclCreateResult; - - /* Check namespace permissions. */ - aclCreateResult = pg_namespace_aclcheck(desc->nspace, desc->authid, ACL_CREATE); - if (aclCreateResult != ACLCHECK_OK) { - aclcheck_error(aclCreateResult, ACL_KIND_NAMESPACE, get_namespace_name(desc->nspace)); - } - - AclResult aclUsageResult = pg_namespace_aclcheck(desc->nspace, desc->authid, ACL_USAGE); - if (aclUsageResult != ACLCHECK_OK) { - aclcheck_error(aclUsageResult, ACL_KIND_NAMESPACE, get_namespace_name(desc->nspace)); - } - - /* Allow restore to either table owner or schema owner */ - if (!pg_class_ownercheck(desc->relid, desc->authid) && !pg_namespace_ownercheck(desc->nspace, desc->authid)) { - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, desc->name); - return; - } - - if (operMode == RB_OPER_RESTORE_TRUNCATE) { - AclResult aclTruncateResult = pg_class_aclcheck(desc->relid, desc->authid, ACL_TRUNCATE); - if (aclTruncateResult != ACLCHECK_OK) { - aclcheck_error(aclTruncateResult, ACL_KIND_CLASS, desc->name); - } - } -} - -static void TrPermPurge(TrObjDesc *desc, TrOperMode operMode) -{ - AclResult result; - - result = pg_namespace_aclcheck(desc->nspace, desc->authid, ACL_USAGE); - if (result != ACLCHECK_OK) { - aclcheck_error(result, ACL_KIND_NAMESPACE, get_namespace_name(desc->nspace)); - } - if (!pg_class_ownercheck(desc->relid, desc->authid) && !pg_namespace_ownercheck(desc->nspace, desc->authid)) { - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, desc->name); - } -} - -/* - * Check permission for rb operations - purge, restore - */ -static void TrPerm(TrObjDesc *desc, TrOperMode operMode) -{ - switch (operMode) { - case RB_OPER_RESTORE_DROP: - case RB_OPER_RESTORE_TRUNCATE: - TrPermRestore(desc, operMode); - break; - case RB_OPER_PURGE: - TrPermPurge(desc, operMode); - break; - default: - /* Never reached here. */ - Assert(0); - break; - } -} - -/* - * Prepare for rb operations - purge, restore : - * check permission, lock objects - */ -void TrOperPrep(TrObjDesc *desc, TrOperMode operMode) -{ - bool needLockRelation = false; - - /* - * 1. Check permission. - */ - TrPerm(desc, operMode); - - /* - * 2. Acquire lock on rb item, avoid concurrently purge, restore. - */ - TrLockTrItem(desc); - - /* - * 3. Acquire lock on relation, avoid concurrently DQL. - * Notice: ignore this step when we purge truncated relation - * as base relation may not exists. - */ - needLockRelation = !(operMode == RB_OPER_PURGE && desc->operation == RB_OPER_TRUNCATE); - if (needLockRelation) { - /* Use TRY-CATCH block to clean locks already held if error. */ - PG_TRY(); - { - TrLockRelation(desc); - } - PG_CATCH(); - { - TrUnlockTrItem(desc); - PG_RE_THROW(); - } - PG_END_TRY(); - } -} - -bool NeedTrComm(Oid relid) -{ - Relation rel; - Form_pg_class classForm; - - if (/* - *Disable Recyclebin-based-Drop/Truncate when - */ - /* recyclebin disabled, or */ - !u_sess->attr.attr_storage.enable_recyclebin || - /* target db is template1, or */ - u_sess->proc_cxt.MyDatabaseId == TemplateDbOid || - /* in maintenance mode, or */ - u_sess->attr.attr_common.xc_maintenance_mode || - /* in in-place upgrade mode, or */ - t_thrd.proc->workingVersionNum < 92350 || - /* in non-singlenode mode, or */ - (g_instance.role != VSINGLENODE) || - /* in bootstrap mode. */ - IsInitdb) { - return false; - } - - rel = relation_open(relid, NoLock); - classForm = rel->rd_rel; - if (/* - * Disable Recyclebin-based-Drop/Truncate if - */ - /* table is non ordinary table, or */ - classForm->relkind != RELKIND_RELATION || - /* is non heap table, or */ - rel->rd_tam_type != TAM_HEAP || - /* is non regular table, or */ - classForm->relpersistence != RELPERSISTENCE_PERMANENT || - /* is partitioned table, or */ - classForm->parttype != PARTTYPE_NON_PARTITIONED_RELATION || - /* is shared table across databases, or */ - classForm->relisshared || - /* has derived classes, or */ - classForm->relhassubclass || - /* has any PARTIAL CLUSTER KEY, or */ - classForm->relhasclusterkey || - /* is cstore table, or */ - (rel->rd_options && StdRelOptIsColStore(rel->rd_options)) || RelationIsColStore(rel) || - /* is hbkt table, or */ - (RELATION_HAS_BUCKET(rel) || RELATION_OWN_BUCKET(rel)) || - /* is dfs table, or */ - RelationIsPAXFormat(rel) || - /* is resizing, or */ - RelationInClusterResizing(rel) || - /* is in system namespace. */ - (IsSystemNamespace(classForm->relnamespace) || IsToastNamespace(classForm->relnamespace) || - IsCStoreNamespace(classForm->relnamespace))) { - relation_close(rel, NoLock); - return false; - } - - relation_close(rel, NoLock); - - return true; -} - -TrObjType TrGetObjType(Oid nspId, char relKind) -{ - TrObjType type = RB_OBJ_TABLE; - - switch (relKind) { - case RELKIND_INDEX: - type = IsToastNamespace(nspId) ? RB_OBJ_TOAST_INDEX : RB_OBJ_INDEX; - break; - case RELKIND_RELATION: - type = RB_OBJ_TABLE; - break; - case RELKIND_SEQUENCE: - case RELKIND_LARGE_SEQUENCE: - type = RB_OBJ_SEQUENCE; - break; - case RELKIND_TOASTVALUE: - type = RB_OBJ_TOAST; - break; - default: - /* Never reached here. */ - Assert(0); - break; - } - - return type; -} - -static bool TrObjAddrExists(Oid classid, Oid objid, ObjectAddresses *objSet) -{ - int i; - - for (i = 0; i < objSet->numrefs; i++) { - if (TrObjIsEqualEx(classid, objid, &objSet->refs[i])) { - return true; - } - } - - return false; -} - -/* - * output: refobjs - */ -void TrFindAllRefObjs(Relation depRel, const ObjectAddress *subobj, - ObjectAddresses *refobjs, bool ignoreObjSubId) -{ - SysScanDesc sd; - HeapTuple tuple; - ScanKeyData key[3]; - int nkeys; - - ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(subobj->classId)); - ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(subobj->objectId)); - nkeys = 2; - if (!ignoreObjSubId && subobj->objectSubId != 0) { - ScanKeyInit(&key[2], Anum_pg_depend_objsubid, BTEqualStrategyNumber, F_INT4EQ, - Int32GetDatum(subobj->objectSubId)); - nkeys = 3; - } - - sd = systable_beginscan(depRel, DependDependerIndexId, true, NULL, nkeys, key); - while (HeapTupleIsValid(tuple = systable_getnext(sd))) { - Form_pg_depend depForm = (Form_pg_depend)GETSTRUCT(tuple); - /* Cascaded clean rb object in `DROP SCHEMA` command. */ - if (depForm->refclassid == NamespaceRelationId) { - continue; - } - - /* We keep `objSet` unique when `ignoreObjSubId = true` to avoid circle recursive. */ - if (!ignoreObjSubId || !TrObjAddrExists(depForm->refclassid, depForm->refobjid, refobjs)) { - add_object_address_ext(depForm->refclassid, depForm->refobjid, - depForm->refobjsubid, depForm->deptype, refobjs); - } - } - - systable_endscan(sd); - return; -} - -static void TrFindAllInternalObjs(Relation depRel, const ObjectAddress *refobj, - ObjectAddresses *objSet, bool ignoreObjSubId = false) -{ - SysScanDesc sd; - HeapTuple tuple; - ScanKeyData key[3]; - int nkeys; - - ScanKeyInit(&key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(refobj->classId)); - ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(refobj->objectId)); - nkeys = 2; - if (!ignoreObjSubId && refobj->objectSubId != 0) { - ScanKeyInit(&key[2], Anum_pg_depend_refobjsubid, BTEqualStrategyNumber, F_INT4EQ, - Int32GetDatum(refobj->objectSubId)); - nkeys = 3; - } - - sd = systable_beginscan(depRel, DependReferenceIndexId, true, NULL, nkeys, key); - while (HeapTupleIsValid(tuple = systable_getnext(sd))) { - Form_pg_depend depForm = (Form_pg_depend)GETSTRUCT(tuple); - if (depForm->deptype != 'i') { - continue; - } - - /* We keep `objSet` unique when `ignoreObjSubId = true` to avoid circle recursive. */ - if (!ignoreObjSubId || !TrObjAddrExists(depForm->classid, depForm->objid, objSet)) { - add_object_address_ext(depForm->classid, depForm->objid, - depForm->objsubid, depForm->deptype, objSet); - } - } - - systable_endscan(sd); - return; -} - -static void TrDoPurgeObject(TrObjDesc *desc) -{ - if (desc->operation == RB_OPER_DROP) { - TrDoPurgeObjectDrop(desc); - } else { - TrDoPurgeObjectTruncate(desc); - } -} - -void TrPurgeObject(const RangeVar *purobj, TrObjType type) -{ - TrObjDesc desc; - - TrOperFetch(purobj, type, &desc, RB_OPER_PURGE); - - desc.authid = GetUserId(); - TrOperPrep(&desc, RB_OPER_PURGE); - - TrDoPurgeObject(&desc); - - return; -} - -const int PURGE_BATCH = 64; -const int PURGE_SINGL = 64; -typedef void (*TrFetchBeginHook)(SysScanDesc *sd, Oid objId); -typedef bool (*TrFetchMatchHook)(Relation rbRel, HeapTuple rbTup, Oid objId); - -static void TrFetchBegin(TrFetchBeginHook fetchHook, SysScanDesc *sd, Oid objId) -{ - fetchHook(sd, objId); -} - -// @return: true for eof -static bool TrFetchExec(TrFetchMatchHook matchHook, Oid objId, SysScanDesc sd, TrObjDesc *desc) -{ - HeapTuple tup; - while ((tup = systable_getnext(sd)) != NULL) { - Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); - if (rbForm->rcytype == RB_OBJ_TABLE && matchHook(sd->heap_rel, tup, objId)) { - Assert (rbForm->rcycanpurge); - TrDescRead(desc, tup); - return false; - } - } - return true; -} - -static void TrFetchEnd(SysScanDesc sd) -{ - Relation rbRel = sd->heap_rel; - - systable_endscan(sd); - heap_close(rbRel, AccessShareLock); -} - -static bool TrPurgeBatch(TrFetchBeginHook beginHook, TrFetchMatchHook matchHook, - Oid objId, Oid roleid, uint32 maxBatch, PurgeMsgRes *localRes) -{ - SysScanDesc sd = NULL; - TrObjDesc desc; - uint32 count = 0; - bool eof = false; - - RbMsgResetRes(localRes); - - StartTransactionCommand(); - - TrFetchBegin(beginHook, &sd, objId); - while (!(eof = TrFetchExec(matchHook, objId, sd, &desc))) { - CHECK_FOR_INTERRUPTS(); - - PG_TRY(); - { - desc.authid = roleid; - TrOperPrep(&desc, RB_OPER_PURGE); - - TrDoPurgeObject(&desc); - localRes->purgedNum++; - } - PG_CATCH(); - { - int errcode = geterrcode(); - if (errcode == ERRCODE_RBIN_LOCK_NOT_AVAILABLE) { - errno_t rc; - rc = strncpy_s(localRes->errMsg, RB_MAX_ERRMSG_SIZE, Geterrmsg(), RB_MAX_ERRMSG_SIZE - 1); - securec_check_c(rc, "\0", "\0"); - localRes->skippedNum++; - } else if (errcode == ERRCODE_RBIN_UNDEFINED_OBJECT) { - localRes->undefinedNum++; - } else { - PG_RE_THROW(); - } - } - PG_END_TRY(); - - if (++count >= maxBatch) { - break; - } - } - - TrFetchEnd(sd); - - CommitTransactionCommand(); - - return eof; -} - -static void TrFetchBeginSpace(SysScanDesc *sd, Oid spcId) -{ - ScanKeyData skey[2]; - Relation rbRel; - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcytablespace, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(spcId)); - ScanKeyInit(&skey[1], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); - - *sd = systable_beginscan(rbRel, RecyclebinDbidSpcidRcycsnIndexId, true, NULL, 2, skey); -} - -static bool TrFetchMatchSpace(Relation rbRel, HeapTuple rbTup, Oid objId) -{ - Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(rbTup); - return rbForm->rcytablespace == objId; -} - -void TrPurgeTablespace(int64 id) -{ - PurgeMsgReq *req = &RbMsg(id)->req; - PurgeMsgRes localRes; - bool eof = false; - - do { - eof = TrPurgeBatch(TrFetchBeginSpace, TrFetchMatchSpace, req->objId, req->authId, PURGE_BATCH, &localRes); - RbMsgSetStatistics(id, &localRes); - } while (!eof && localRes.skippedNum == 0); -} - -void TrPurgeTablespaceDML(int64 id) -{ - PurgeMsgReq *req = &RbMsg(id)->req; - PurgeMsgRes localRes; - bool eof = false; - - do { - eof = TrPurgeBatch(TrFetchBeginSpace, TrFetchMatchSpace, req->objId, req->authId, PURGE_SINGL, &localRes); - RbMsgSetStatistics(id, &localRes); - } while (!eof && localRes.purgedNum == 0); -} - -static void TrFetchBeginRecyclebin(SysScanDesc *sd, Oid objId) -{ - ScanKeyData skey[2]; - Relation rbRel; - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); - - *sd = systable_beginscan(rbRel, RecyclebinDbidRelidIndexId, true, NULL, 1, skey); -} - -static bool TrFetchMatchRecyclebin(Relation rbRel, HeapTuple rbTup, Oid objId) -{ - return true; -} - -void TrPurgeRecyclebin(int64 id) -{ - PurgeMsgReq *req = &RbMsg(id)->req; - PurgeMsgRes localRes; - bool eof = false; - - do { - eof = TrPurgeBatch(TrFetchBeginRecyclebin, TrFetchMatchRecyclebin, - InvalidOid, req->authId, PURGE_BATCH, &localRes); - RbMsgSetStatistics(id, &localRes); - } while (!eof && localRes.skippedNum == 0); -} - -static void TrFetchBeginSchema(SysScanDesc *sd, Oid objId) -{ - ScanKeyData skey[2]; - Relation rbRel; - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcynamespace, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(objId)); - ScanKeyInit(&skey[1], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); - - *sd = systable_beginscan(rbRel, RecyclebinDbidNspOrinameIndexId, true, NULL, 2, skey); -} - -static bool TrFetchMatchSchema(Relation rbRel, HeapTuple rbTup, Oid objId) -{ - Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(rbTup); - return rbForm->rcynamespace == objId; -} - -void TrPurgeSchema(int64 id) -{ - PurgeMsgReq *req = &RbMsg(id)->req; - PurgeMsgRes localRes; - bool eof = false; - - do { - eof = TrPurgeBatch(TrFetchBeginSchema, TrFetchMatchSchema, req->objId, req->authId, PURGE_BATCH, &localRes); - RbMsgSetStatistics(id, &localRes); - } while (!eof && localRes.skippedNum == 0); -} - -static void TrFetchBeginUser(SysScanDesc *sd, Oid objId) -{ - ScanKeyData skey[2]; - Relation rbRel; - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); - - *sd = systable_beginscan(rbRel, RecyclebinDbidRelidIndexId, true, NULL, 1, skey); -} - -static bool TrFetchMatchUser(Relation rbRel, HeapTuple rbTup, Oid objId) -{ - Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(rbTup); - return rbForm->rcyowner == objId; -} - -void TrPurgeUser(int64 id) -{ - PurgeMsgReq *req = &RbMsg(id)->req; - PurgeMsgRes localRes; - bool eof = false; - - do { - eof = TrPurgeBatch(TrFetchBeginUser, TrFetchMatchUser, req->objId, req->authId, PURGE_BATCH, &localRes); - RbMsgSetStatistics(id, &localRes); - } while (!eof && localRes.skippedNum == 0); -} - -static void TrFetchBeginAuto(SysScanDesc *sd, Oid objId) -{ - ScanKeyData skey[2]; - Relation rbRel; - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); - - *sd = systable_beginscan(rbRel, RecyclebinDbidRelidIndexId, true, NULL, 1, skey); -} - -static bool TrFetchMatchAuto(Relation rbRel, HeapTuple rbTup, Oid objId) -{ - bool isNull = false; - Datum datumRcyTime = heap_getattr(rbTup, Anum_pg_recyclebin_rcyrecycletime, - RelationGetDescr(rbRel), &isNull); - - long secs; - int msecs; - TimestampDifference(isNull ? 0 : DatumGetTimestampTz(datumRcyTime), - GetCurrentTimestamp(), &secs, &msecs); - - return secs > u_sess->attr.attr_storage.recyclebin_retention || secs < 0; -} - -void TrPurgeAuto(int64 id) -{ - PurgeMsgReq *req = &RbMsg(id)->req; - PurgeMsgRes localRes; - bool eof = false; - do { - eof = TrPurgeBatch(TrFetchBeginAuto, TrFetchMatchAuto, InvalidOid, req->authId, PURGE_BATCH, &localRes); - RbMsgSetStatistics(id, &localRes); - } while (!eof); -} - -void TrSwapRelfilenode(Relation rbRel, HeapTuple rbTup) -{ - Relation relRel; - HeapTuple relTup; - HeapTuple newTup; - TrObjDesc desc; - int maxNattr = Max(Natts_pg_class, Natts_pg_recyclebin); - Datum *values = NULL; - bool *nulls = NULL; - bool *replaces = NULL; - NameData name; - errno_t rc = EOK; - - TrDescRead(&desc, rbTup); - - /* 1. Update pg_class */ - relRel = heap_open(RelationRelationId, RowExclusiveLock); - relTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(desc.relid)); - if (!HeapTupleIsValid(relTup)) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_TABLE), - errmsg("cache lookup failed for relation %u", desc.relid))); - } - - values = (Datum *)palloc0(sizeof(Datum) * maxNattr); - nulls = (bool *)palloc0(sizeof(bool) * maxNattr); - replaces = (bool *)palloc0(sizeof(bool) * maxNattr); - - replaces[Anum_pg_class_relfilenode - 1] = true; - values[Anum_pg_class_relfilenode - 1] = ObjectIdGetDatum(desc.relfilenode); - - replaces[Anum_pg_class_relfrozenxid - 1] = true; - values[Anum_pg_class_relfrozenxid - 1] = ShortTransactionIdGetDatum(desc.frozenxid); - - replaces[Anum_pg_class_relfrozenxid64 - 1] = true; - values[Anum_pg_class_relfrozenxid64 - 1] = TransactionIdGetDatum(desc.frozenxid64); - - newTup = heap_modify_tuple(relTup, RelationGetDescr(relRel), values, nulls, replaces); - - simple_heap_update(relRel, &newTup->t_self, newTup); - - CatalogUpdateIndexes(relRel, newTup); - - heap_freetuple_ext(newTup); - - /* 2. Update pg_recyclebin */ - rc = memset_s(values, sizeof(Datum) * maxNattr, 0, sizeof(Datum) * maxNattr); - securec_check(rc, "\0", "\0"); - rc = memset_s(nulls, sizeof(bool) * maxNattr, false, sizeof(bool) * maxNattr); - securec_check(rc, "\0", "\0"); - rc = memset_s(replaces, sizeof(bool) * maxNattr, false, sizeof(bool) * maxNattr); - securec_check(rc, "\0", "\0"); - - (void)TrGenObjName(NameStr(name), RelationRelationId, desc.relid); - replaces[Anum_pg_recyclebin_rcyname - 1] = true; - values[Anum_pg_recyclebin_rcyname - 1] = NameGetDatum(&name); - - replaces[Anum_pg_recyclebin_rcyoriginname - 1] = true; - values[Anum_pg_recyclebin_rcyoriginname - 1] = - NameGetDatum(&((Form_pg_class)GETSTRUCT(relTup))->relname); - - replaces[Anum_pg_recyclebin_rcyrecyclecsn - 1] = true; - values[Anum_pg_recyclebin_rcyrecyclecsn - 1] = - Int64GetDatum(t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo); - - replaces[Anum_pg_recyclebin_rcyrecycletime - 1] = true; - values[Anum_pg_recyclebin_rcyrecycletime - 1] = TimestampTzGetDatum(GetCurrentTimestamp()); - - replaces[Anum_pg_recyclebin_rcyrelfilenode - 1] = true; - values[Anum_pg_recyclebin_rcyrelfilenode - 1] = - ObjectIdGetDatum(((Form_pg_class)GETSTRUCT(relTup))->relfilenode); - - replaces[Anum_pg_recyclebin_rcyfrozenxid - 1] = true; - values[Anum_pg_recyclebin_rcyfrozenxid - 1] = - ShortTransactionIdGetDatum(((Form_pg_class)GETSTRUCT(relTup))->relfrozenxid); - - replaces[Anum_pg_recyclebin_rcyfrozenxid64 - 1] = true; - values[Anum_pg_recyclebin_rcyfrozenxid64 - 1] = - TransactionIdGetDatum(((Form_pg_class)GETSTRUCT(relTup))->relfrozenxid64); - - newTup = heap_modify_tuple(rbTup, RelationGetDescr(rbRel), values, nulls, replaces); - - simple_heap_update(rbRel, &newTup->t_self, newTup); - - CatalogUpdateIndexes(rbRel, newTup); - - heap_freetuple_ext(newTup); - - pfree(values); - pfree(nulls); - pfree(replaces); - - heap_freetuple_ext(relTup); - heap_close(relRel, RowExclusiveLock); -} - -void TrBaseRelMatched(TrObjDesc *baseDesc) -{ - ObjectAddress obj = {RelationRelationId, baseDesc->relid}; - if (TrIsRefRbObject(&obj)) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("relation \"%s\" does not exist", baseDesc->originname))); - } - - Relation rel = RelationIdGetRelation(baseDesc->relid); - Assert(RelationIsValid(rel)); - if (RelationGetCreatecsn(rel) != (CommitSeqNo)baseDesc->createcsn) { - ereport(ERROR, - (errmsg("The recycle object \"%s\" and relation \"%s\" mismatched.", - baseDesc->name, RelationGetRelationName(rel)))); - } - - if (RelationGetChangecsn(rel) > (CommitSeqNo)baseDesc->changecsn) { - ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("The table definition of \"%s\" has been changed.", - RelationGetRelationName(rel)))); - } - - RelationClose(rel); -} - -void TrAdjustFrozenXid64(Oid dbid, TransactionId *frozenXID) -{ - Relation rbRel; - SysScanDesc sd; - HeapTuple rbtup; - - if (!TcapFeatureAvail()) { - return; - } - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - - sd = systable_beginscan(rbRel, InvalidOid, false, NULL, 0, NULL); - while ((rbtup = systable_getnext(sd)) != NULL) { - Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(rbtup); - TransactionId rcyfrozenxid64; - - if (rbForm->rcydbid != dbid || (rbForm->rcytype != RB_OBJ_TABLE && rbForm->rcytype != RB_OBJ_TOAST)) { - continue; - } - - rcyfrozenxid64 = TrRbGetRcyfrozenxid64(rbtup, rbRel); - Assert(TransactionIdIsNormal(rcyfrozenxid64)); - - if (TransactionIdPrecedes(rcyfrozenxid64, *frozenXID)) { - *frozenXID = rcyfrozenxid64; - } - } - - systable_endscan(sd); - heap_close(rbRel, AccessShareLock); - - return; -} - -bool TrRbIsEmptyDb(Oid dbid) -{ - Relation rbRel; - SysScanDesc sd; - HeapTuple tup; - ScanKeyData skey[1]; - - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(dbid)); - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - sd = systable_beginscan(rbRel, RecyclebinDbidRelidIndexId, true, NULL, 1, skey); - tup = systable_getnext(sd); - systable_endscan(sd); - heap_close(rbRel, AccessShareLock); - - return tup == NULL; -} - -bool TrRbIsEmptySpc(Oid spcId) -{ - Relation rbRel; - SysScanDesc sd; - ScanKeyData skey[1]; - HeapTuple tup; - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcytablespace, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(spcId)); - - sd = systable_beginscan(rbRel, RecyclebinDbidSpcidRcycsnIndexId, true, NULL, 1, skey); - tup = systable_getnext(sd); - systable_endscan(sd); - heap_close(rbRel, AccessShareLock); - - return tup == NULL; -} - -bool TrRbIsEmptySchema(Oid nspId) -{ - Relation rbRel; - SysScanDesc sd; - ScanKeyData skey[2]; - HeapTuple tup; - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcynamespace, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(nspId)); - ScanKeyInit(&skey[1], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); - - sd = systable_beginscan(rbRel, RecyclebinDbidNspOrinameIndexId, true, NULL, 2, skey); - tup = systable_getnext(sd); - - systable_endscan(sd); - heap_close(rbRel, AccessShareLock); - - return tup == NULL; -} - -bool TrRbIsEmptyUser(Oid roleId) -{ - Relation rbRel; - SysScanDesc sd; - HeapTuple tup; - bool found = false; - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - - sd = systable_beginscan(rbRel, InvalidOid, false, NULL, 0, NULL); - - while ((tup = systable_getnext(sd)) != NULL) { - Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); - if ((TrObjType)rbForm->rcytype != RB_OBJ_TABLE || rbForm->rcyowner != roleId) { - continue; - } - - found = true; - break; - } - - systable_endscan(sd); - heap_close(rbRel, AccessShareLock); - - return !found; -} - -static bool TrOidExists(const List *lOid, Oid oid) -{ - ListCell *cell = NULL; - if (lOid == NULL) { - return false; - } - - foreach (cell, lOid) { - if (oid == (*(Oid *)lfirst(cell))) { - return true; - } - } - return false; -} - -List *TrGetDbListRcy(void) -{ - Relation rbRel; - SysScanDesc sd; - HeapTuple tup; - - List *lName = NIL; - List *lOid = NIL; - char *dbname = NULL; - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - - sd = systable_beginscan(rbRel, InvalidOid, false, NULL, 0, NULL); - - while ((tup = systable_getnext(sd)) != NULL) { - Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); - if (TrOidExists(lOid, rbForm->rcydbid)) { - continue; - } - Oid *oid = (Oid *)palloc0(sizeof(Oid)); - *oid = rbForm->rcydbid; - lOid = lappend(lOid, oid); - - dbname = get_database_name(rbForm->rcydbid); - if (dbname == NULL) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_DATABASE), - errmsg("database \"%u\" does not exist", rbForm->rcydbid))); - } - lName = lappend(lName, dbname); - } - - list_free_deep(lOid); - - systable_endscan(sd); - heap_close(rbRel, AccessShareLock); - - return lName; -} - -List *TrGetDbListSpc(Oid spcId) -{ - List *lName = NIL; - List *lOid = NIL; - char *dbname = NULL; - - Relation rbRel; - SysScanDesc sd; - ScanKeyData skey[1]; - HeapTuple tup; - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcytablespace, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(spcId)); - - sd = systable_beginscan(rbRel, RecyclebinDbidSpcidRcycsnIndexId, true, NULL, 1, skey); - - while ((tup = systable_getnext(sd)) != NULL) { - Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); - if (TrOidExists(lOid, rbForm->rcydbid)) { - continue; - } - Oid *oid = (Oid *)palloc0(sizeof(Oid)); - *oid = rbForm->rcydbid; - lOid = lappend(lOid, oid); - - dbname = get_database_name(rbForm->rcydbid); - if (dbname == NULL) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_DATABASE), - errmsg("database \"%u\" does not exist", rbForm->rcydbid))); - } - lName = lappend(lName, dbname); - } - - list_free_deep(lOid); - - systable_endscan(sd); - heap_close(rbRel, AccessShareLock); - - return lName; -} - -List *TrGetDbListSchema(Oid nspId) -{ - Relation rbRel; - SysScanDesc sd; - ScanKeyData skey[1]; - HeapTuple tup; - - List *lName = NIL; - List *lOid = NIL; - char *dbname = NULL; - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcynamespace, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(nspId)); - - sd = systable_beginscan(rbRel, RecyclebinDbidNspOrinameIndexId, true, NULL, 1, skey); - - while ((tup = systable_getnext(sd)) != NULL) { - Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); - if (TrOidExists(lOid, rbForm->rcydbid)) { - continue; - } - Oid *oid = (Oid *)palloc0(sizeof(Oid)); - *oid = rbForm->rcydbid; - lOid = lappend(lOid, oid); - - dbname = get_database_name(rbForm->rcydbid); - if (dbname == NULL) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_DATABASE), - errmsg("database \"%u\" does not exist", rbForm->rcydbid))); - } - lName = lappend(lName, dbname); - } - - list_free_deep(lOid); - - systable_endscan(sd); - heap_close(rbRel, AccessShareLock); - - return lName; -} - -List *TrGetDbListUser(Oid roleId) -{ - Relation rbRel; - SysScanDesc sd; - HeapTuple tup; - - List *lName = NIL; - List *lOid = NIL; - char *dbname = NULL; - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - - sd = systable_beginscan(rbRel, InvalidOid, false, NULL, 0, NULL); - - while ((tup = systable_getnext(sd)) != NULL) { - Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); - if ((TrObjType)rbForm->rcytype != RB_OBJ_TABLE || rbForm->rcyowner != roleId) { - continue; - } - if (TrOidExists(lOid, rbForm->rcydbid)) { - continue; - } - Oid *oid = (Oid *)palloc0(sizeof(Oid)); - *oid = rbForm->rcydbid; - lOid = lappend(lOid, oid); - - dbname = get_database_name(rbForm->rcydbid); - if (dbname == NULL) { - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_DATABASE), - errmsg("database \"%u\" does not exist", rbForm->rcydbid))); - } - - lName = lappend(lName, dbname); - } - - list_free_deep(lOid); - - systable_endscan(sd); - heap_close(rbRel, AccessShareLock); - - return lName; -} - -/* - * TrGetDatabaseList - * Return a list of all databases found in pg_database. - */ -List *TrGetDbListAuto(void) -{ - List* dblist = NIL; - Relation rel; - SysScanDesc sd; - HeapTuple tup; - - rel = heap_open(DatabaseRelationId, AccessShareLock); - sd = systable_beginscan(rel, InvalidOid, false, NULL, 0, NULL); - - while ((tup = systable_getnext(sd)) != NULL) { - Form_pg_database pgdatabase = (Form_pg_database)GETSTRUCT(tup); - if (strcmp(NameStr(pgdatabase->datname), "template0") == 0 || - strcmp(NameStr(pgdatabase->datname), "template1") == 0) { - continue; - } - dblist = lappend(dblist, pstrdup(NameStr(pgdatabase->datname))); - } - - systable_endscan(sd); - heap_close(rel, AccessShareLock); - - return dblist; -} - -static bool TrObjInRecyclebin(const ObjectAddress *obj) -{ - Relation rbRel; - SysScanDesc sd; - HeapTuple tup; - ScanKeyData skey[2]; - bool found = false; - - if (getObjectClass(obj) != OCLASS_CLASS) { - return false; - } - - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); - ScanKeyInit(&skey[1], Anum_pg_recyclebin_rcyrelid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(obj->objectId)); - - rbRel = heap_open(RecyclebinRelationId, AccessShareLock); - sd = systable_beginscan(rbRel, RecyclebinDbidRelidIndexId, true, NULL, 2, skey); - while ((tup = systable_getnext(sd)) != NULL) { - Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); - if ((TrObjType)rbForm->rcyoperation == 'd') { - found = true; - break; - } - } - systable_endscan(sd); - heap_close(rbRel, AccessShareLock); - - return found; -} - -/* - * May this object be a recyclebin object? - * true: with "BIN$" prefix, or not a Relation\Type\Trigger\Constraint\Rule - * false: without "BIN$" prefix, or not exists - */ -static bool TrMaybeRbObject(Oid classid, Oid objid, const char *objname = NULL) -{ - HeapTuple tup; - - /* Note: we preserve rule origin name when RbDrop. */ - if (classid != RewriteRelationId && objname) { - return strncmp(objname, "BIN$", 4) == 0; - } - - switch (classid) { - case RelationRelationId: - tup = SearchSysCache1(RELOID, ObjectIdGetDatum(objid)); - if (tup != NULL) { - objname = NameStr(((Form_pg_class)GETSTRUCT(tup))->relname); - ReleaseSysCache(tup); - } - break; - case TypeRelationId: - tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(objid)); - if (tup != NULL) { - objname = NameStr(((Form_pg_type)GETSTRUCT(tup))->typname); - ReleaseSysCache(tup); - } - break; - case TriggerRelationId: { - Relation relTrig; - ScanKeyData skey[1]; - SysScanDesc sd; - - relTrig = heap_open(TriggerRelationId, AccessShareLock); - ScanKeyInit(&skey[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, - F_OIDEQ, ObjectIdGetDatum(objid)); - sd = systable_beginscan(relTrig, TriggerOidIndexId, true, NULL, 1, skey); - if ((tup = systable_getnext(sd)) != NULL) { - objname = NameStr(((Form_pg_trigger)GETSTRUCT(tup))->tgname); - } - systable_endscan(sd); - heap_close(relTrig, AccessShareLock); - break; - } - case ConstraintRelationId: - tup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(objid)); - if (tup != NULL) { - objname = NameStr(((Form_pg_constraint)GETSTRUCT(tup))->conname); - ReleaseSysCache(tup); - } - break; - case NamespaceRelationId: - /* Treate Namespace as non-recyclebin object. */ - return false; - default: - /* May be a recyclebin object. */ - return true; - } - - if (objname) { - return strncmp(objname, "BIN$", 4) == 0; - } - - return false; -} - -static bool TrIsRefRbObjectImpl(Relation depRel, const ObjectAddress *obj, ObjectAddresses *objSet) -{ - int startIdx; - - if (TrObjInRecyclebin(obj)) { - return true; - } - - startIdx = objSet->numrefs; - - if (!TrMaybeRbObject(obj->classId, obj->objectId)) { - return false; - } - - TrFindAllRefObjs(depRel, obj, objSet, true); - TrFindAllInternalObjs(depRel, obj, objSet, true); - - for (int i = startIdx; i < objSet->numrefs; i++) { - if (TrIsRefRbObjectImpl(depRel, &objSet->refs[i], objSet)) { - return true; - } - } - - return false; -} - -/* object is a rb object, or reference to a rb object. */ -static bool TrIsRefRbObject(const ObjectAddress *obj, Relation depRel) -{ - ObjectAddresses *objSet = new_object_addresses(); - bool relArgNull = depRel == NULL; - bool result = false; - - if (relArgNull) { - depRel = heap_open(DependRelationId, AccessShareLock); - } - - /* Note: we not care obj->deptype here. */ - add_object_address_ext1(obj, objSet); - - result = TrIsRefRbObjectImpl(depRel, obj, objSet); - - free_object_addresses(objSet); - - if (relArgNull) { - heap_close(depRel, AccessShareLock); - } - - return result; -} - -bool TrIsRefRbObjectEx(Oid classid, Oid objid, const char *objname) -{ - if (!TcapFeatureAvail()) { - return false; - } - - /* Note: we preserve rule origin name when RbDrop. */ - if (TrRbIsEmptyDb(u_sess->proc_cxt.MyDatabaseId)) { - return false; - } - - if (classid != RewriteRelationId && objname && strncmp(objname, "BIN$", 4) != 0) { - return false; - } - - ObjectAddress obj = {classid, objid}; - - return TrIsRefRbObject(&obj); -} - -void TrForbidAccessRbDependencies(Relation depRel, const ObjectAddress *depender, - const ObjectAddress *referenced, int nreferenced) -{ - if (!TcapFeatureAvail()) { - return; - } - - if (IsInitdb || TrRbIsEmptyDb(u_sess->proc_cxt.MyDatabaseId)) { - return; - } - - if (TrIsRefRbObject(depender, depRel)) { - elog (ERROR, "can not access recycle object."); - } - - for (int i = 0; i < nreferenced; i++, referenced++) { - if (TrIsRefRbObject(referenced, depRel)) { - elog (ERROR, "can not access recycle object."); - } - } - - return; -} - -void TrForbidAccessRbObject(Oid classid, Oid objid, const char *objname) -{ - if (!TcapFeatureAvail()) { - return; - } - - if (TrRbIsEmptyDb(u_sess->proc_cxt.MyDatabaseId) || !TrMaybeRbObject(classid, objid, objname)) { - return; - } - - ObjectAddress obj = {classid, objid}; - if (TrIsRefRbObject(&obj)) { - elog (ERROR, "can not access recycle object."); - } - - return; -} - -Datum gs_is_recycle_object(PG_FUNCTION_ARGS) -{ - int classid = PG_GETARG_INT32(0); - int objid = PG_GETARG_INT32(1); - Name objname = PG_GETARG_NAME(2); - bool result = false; - result = TrIsRefRbObjectEx(classid, objid, NameStr(*objname)); - PG_RETURN_BOOL(result); -} +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * tcap_manager.cpp + * Routines to support Timecapsule `Recyclebin-based query, restore`. + * We use Tr prefix to indicate it in following coding. + * + * IDENTIFICATION + * src/gausskernel/storage/tcap/tcap_manager.cpp + * + * --------------------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "pgstat.h" +#include "access/reloptions.h" +#include "access/sysattr.h" +#include "access/xlog.h" +#include "catalog/pg_database.h" +#include "catalog/dependency.h" +#include "catalog/heap.h" +#include "catalog/index.h" +#include "catalog/indexing.h" +#include "catalog/objectaccess.h" +#include "catalog/pg_collation_fn.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_constraint.h" +#include "catalog/pg_conversion_fn.h" +#include "catalog/pg_conversion.h" +#include "catalog/pg_depend.h" +#include "catalog/pg_extension_data_source.h" +#include "catalog/pg_extension.h" +#include "catalog/pg_foreign_data_wrapper.h" +#include "catalog/pg_foreign_server.h" +#include "catalog/pg_job.h" +#include "catalog/pg_language.h" +#include "catalog/pg_largeobject.h" +#include "catalog/pg_object.h" +#include "catalog/pg_opclass.h" +#include "catalog/pg_operator.h" +#include "catalog/pg_opfamily.h" +#include "catalog/pg_partition_fn.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_recyclebin.h" +#include "catalog/pg_rewrite.h" +#include "catalog/pg_rlspolicy.h" +#include "catalog/pg_synonym.h" +#include "catalog/pg_tablespace.h" +#include "catalog/pg_trigger.h" +#include "catalog/pg_ts_config.h" +#include "catalog/pg_ts_dict.h" +#include "catalog/pg_ts_parser.h" +#include "catalog/pg_ts_template.h" +#include "catalog/pgxc_class.h" +#include "catalog/pg_partition.h" +#include "catalog/storage.h" +#include "commands/comment.h" +#include "commands/dbcommands.h" +#include "commands/directory.h" +#include "commands/extension.h" +#include "commands/proclang.h" +#include "commands/schemacmds.h" +#include "commands/seclabel.h" +#include "commands/sec_rls_cmds.h" +#include "commands/tablecmds.h" +#include "commands/tablespace.h" +#include "commands/trigger.h" +#include "commands/typecmds.h" +#include "executor/node/nodeModifyTable.h" +#include "rewrite/rewriteRemove.h" +#include "storage/lmgr.h" +#include "storage/predicate.h" +#include "storage/smgr/relfilenode.h" +#include "utils/acl.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/relcache.h" +#include "utils/snapmgr.h" +#include "utils/syscache.h" + +#include "storage/tcap.h" +#include "storage/tcap_impl.h" + +static bool TrIsRefRbObject(const ObjectAddress *obj, Relation depRel = NULL); +void TrDoPurgeObjectDrop(TrObjDesc *desc); + +char *TrGenObjName(char *rbname, Oid classId, Oid objid) +{ + int rc = EOK; + + rc = snprintf_s(rbname, NAMEDATALEN, NAMEDATALEN - 1, "BIN$%X%X%X$%llX==$0", + u_sess->proc_cxt.MyDatabaseId, classId, objid, (uint64)GetXLogInsertRecPtr()); + securec_check_ss_c(rc, "\0", "\0"); + + return rbname; +} + +static TransactionId TrRbGetRcyfrozenxid64(HeapTuple rbtup, Relation rbRel = NULL) +{ + Datum datum; + bool isNull = false; + TransactionId rcyfrozenxid64; + bool relArgNull = rbRel == NULL; + + if (relArgNull) { + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + } + + datum = heap_getattr(rbtup, Anum_pg_recyclebin_rcyfrozenxid64, RelationGetDescr(rbRel), &isNull); + Assert(!isNull); + + rcyfrozenxid64 = DatumGetTransactionId(datum); + + if (relArgNull) { + heap_close(rbRel, AccessShareLock); + } + + return rcyfrozenxid64; +} + +void TrDescInit(Relation rel, TrObjDesc *desc, TrObjOperType operType, + TrObjType objType, bool canpurge, bool isBaseObj) +{ + errno_t rc = EOK; + + /* Notice: desc->id, desc->baseid will be assigned by invoker later. */ + desc->dbid = u_sess->proc_cxt.MyDatabaseId; + desc->relid = RelationGetRelid(rel); + + (void)TrGenObjName(desc->name, RelationRelationId, desc->relid); + + rc = strncpy_s(desc->originname, NAMEDATALEN, RelationGetRelationName(rel), + strlen(RelationGetRelationName(rel))); + securec_check(rc, "\0", "\0"); + + desc->operation = operType; + desc->type = objType; + desc->recyclecsn = t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo; + desc->recycletime = GetCurrentTimestamp(); + desc->createcsn = RelationGetCreatecsn(rel); + desc->changecsn = RelationGetChangecsn(rel); + desc->nspace = RelationGetNamespace(rel); + desc->owner = RelationGetOwner(rel); + desc->tablespace = RelationGetTablespace(rel); + desc->relfilenode = RelationGetRelFileNode(rel); + desc->frozenxid = RelationGetRelFrozenxid(rel); + desc->frozenxid64 = RelationGetRelFrozenxid64(rel); + desc->canrestore = objType == RB_OBJ_TABLE; + desc->canpurge = canpurge; +} + +void TrPartDescInit(Relation rel, Partition part, TrObjDesc *desc, TrObjOperType operType, + TrObjType objType, bool canpurge, bool isBaseObj) +{ + errno_t rc = EOK; + + /* Notice: desc->id, desc->baseid will be assigned by invoker later. */ + desc->dbid = u_sess->proc_cxt.MyDatabaseId; + desc->relid = part->pd_id; + + (void)TrGenObjName(desc->name, PartitionRelationId, desc->relid); + + rc = strncpy_s(desc->originname, NAMEDATALEN, RelationGetRelationName(rel), + strlen(RelationGetRelationName(rel))); + securec_check(rc, "\0", "\0"); + + int len = strlen(PartitionGetPartitionName(part)) + strlen(RelationGetRelationName(rel)) + 1; + rc = strcat_s(desc->originname, len, PartitionGetPartitionName(part)); + securec_check(rc, "\0", "\0"); + + desc->operation = operType; + desc->type = objType; + desc->recyclecsn = t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo; + desc->recycletime = GetCurrentTimestamp(); + desc->createcsn = RelationGetCreatecsn(rel); + desc->changecsn = RelationGetChangecsn(rel); + desc->nspace = RelationGetNamespace(rel); + desc->owner = RelationGetOwner(rel); + desc->tablespace = part->pd_part->reltablespace; + desc->relfilenode = part->pd_part->relfilenode; + desc->frozenxid = part->pd_part->relfrozenxid; + desc->frozenxid64 = PartGetRelFrozenxid64(part); + desc->canrestore = false; + desc->canpurge = canpurge; +} + +static void TrDescRead(TrObjDesc *desc, HeapTuple rbtup) +{ + Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(rbtup); + + desc->id = HeapTupleGetOid(rbtup); + desc->baseid = rbForm->rcybaseid; + + desc->dbid = rbForm->rcydbid; + desc->relid = rbForm->rcyrelid; + (void)namestrcpy((Name)desc->name, NameStr(rbForm->rcyname)); + (void)namestrcpy((Name)desc->originname, NameStr(rbForm->rcyoriginname)); + desc->operation = (rbForm->rcyoperation == 'd') ? RB_OPER_DROP : RB_OPER_TRUNCATE; + desc->type = (TrObjType)rbForm->rcytype; + desc->recyclecsn = rbForm->rcyrecyclecsn; + desc->recycletime = rbForm->rcyrecycletime; + desc->createcsn = rbForm->rcycreatecsn; + desc->changecsn = rbForm->rcychangecsn; + desc->nspace = rbForm->rcynamespace; + desc->owner = rbForm->rcyowner; + desc->tablespace = rbForm->rcytablespace; + desc->relfilenode = rbForm->rcyrelfilenode; + desc->canrestore = rbForm->rcycanrestore; + desc->canpurge = rbForm->rcycanpurge; + desc->frozenxid = rbForm->rcyfrozenxid; + desc->frozenxid64 = TrRbGetRcyfrozenxid64(rbtup); +} + +Oid TrDescWrite(TrObjDesc *desc) +{ + Relation rel; + HeapTuple tup; + bool nulls[Natts_pg_recyclebin] = {0}; + Datum values[Natts_pg_recyclebin]; + NameData name; + NameData originname; + Oid rbid; + + values[Anum_pg_recyclebin_rcydbid - 1] = ObjectIdGetDatum(desc->dbid); + values[Anum_pg_recyclebin_rcybaseid - 1] = ObjectIdGetDatum(desc->baseid); + values[Anum_pg_recyclebin_rcyrelid - 1] = ObjectIdGetDatum(desc->relid); + (void)namestrcpy(&name, desc->name); + values[Anum_pg_recyclebin_rcyname - 1] = NameGetDatum(&name); + (void)namestrcpy(&originname, desc->originname); + values[Anum_pg_recyclebin_rcyoriginname - 1] = NameGetDatum(&originname); + values[Anum_pg_recyclebin_rcyoperation - 1] = (desc->operation == RB_OPER_DROP) ? 'd' : 't'; + values[Anum_pg_recyclebin_rcytype - 1] = Int32GetDatum(desc->type); + values[Anum_pg_recyclebin_rcyrecyclecsn - 1] = Int64GetDatum(desc->recyclecsn); + values[Anum_pg_recyclebin_rcyrecycletime - 1] = TimestampTzGetDatum(desc->recycletime); + values[Anum_pg_recyclebin_rcycreatecsn - 1] = Int64GetDatum(desc->createcsn); + values[Anum_pg_recyclebin_rcychangecsn - 1] = Int64GetDatum(desc->changecsn); + values[Anum_pg_recyclebin_rcynamespace - 1] = ObjectIdGetDatum(desc->nspace); + values[Anum_pg_recyclebin_rcyowner - 1] = ObjectIdGetDatum(desc->owner); + values[Anum_pg_recyclebin_rcytablespace - 1] = ObjectIdGetDatum(desc->tablespace); + values[Anum_pg_recyclebin_rcyrelfilenode - 1] = ObjectIdGetDatum(desc->relfilenode); + values[Anum_pg_recyclebin_rcycanrestore - 1] = BoolGetDatum(desc->canrestore); + values[Anum_pg_recyclebin_rcycanpurge - 1] = BoolGetDatum(desc->canpurge); + values[Anum_pg_recyclebin_rcyfrozenxid - 1] = ShortTransactionIdGetDatum(desc->frozenxid); + values[Anum_pg_recyclebin_rcyfrozenxid64 - 1] = TransactionIdGetDatum(desc->frozenxid64); + + rel = heap_open(RecyclebinRelationId, RowExclusiveLock); + + tup = heap_form_tuple(RelationGetDescr(rel), values, nulls); + + rbid = simple_heap_insert(rel, tup); + + CatalogUpdateIndexes(rel, tup); + + heap_freetuple_ext(tup); + + heap_close(rel, RowExclusiveLock); + + CommandCounterIncrement(); + + return rbid; +} + +static bool TrFetchOrinameImpl(Oid nspId, const char *oriname, TrObjType type, + TrObjDesc *desc, TrOperMode operMode) +{ + Relation rbRel; + SysScanDesc sd; + ScanKeyData skey[3]; + HeapTuple tup; + bool found = false; + + if (!OidIsValid(nspId)) { + return false; + } + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcynamespace, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(nspId)); + ScanKeyInit(&skey[1], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); + ScanKeyInit(&skey[2], Anum_pg_recyclebin_rcyoriginname, BTEqualStrategyNumber, + F_NAMEEQ, CStringGetDatum(oriname)); + + sd = systable_beginscan(rbRel, RecyclebinDbidNspOrinameIndexId, true, NULL, 3, skey); + while ((tup = systable_getnext(sd)) != NULL) { + Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); + if ((rbForm->rcytype != type && rbForm->rcytype == RB_OBJ_TABLE) || + (rbForm->rcytype != type && rbForm->rcytype == RB_OBJ_INDEX)) { + continue; + } + + found = true; + TrDescRead(desc, tup); + break; + } + + systable_endscan(sd); + heap_close(rbRel, AccessShareLock); + + return found; +} + +bool TrFetchName(const char *rcyname, TrObjType type, TrObjDesc *desc, TrOperMode operMode) +{ + Relation rbRel; + SysScanDesc sd; + ScanKeyData skey[1]; + HeapTuple tup; + bool found = false; + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcyname, BTEqualStrategyNumber, + F_NAMEEQ, CStringGetDatum(rcyname)); + + sd = systable_beginscan(rbRel, RecyclebinNameIndexId, true, NULL, 1, skey); + if ((tup = systable_getnext(sd)) != NULL) { + Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); + if ((rbForm->rcytype != type && rbForm->rcytype == RB_OBJ_TABLE) || + (rbForm->rcytype != type && rbForm->rcytype == RB_OBJ_INDEX)) { + ereport(ERROR, + (errmsg("The recycle object \"%s\" type mismatched.", rcyname))); + } + found = true; + TrDescRead(desc, tup); + } + + systable_endscan(sd); + heap_close(rbRel, AccessShareLock); + + return found; +} + +static bool TrFetchOriname(const char *schemaname, const char *relname, TrObjType type, + TrObjDesc *desc, TrOperMode operMode) +{ + bool found = false; + Oid nspId; + + if (schemaname) { + nspId = get_namespace_oid(schemaname, true); + found = TrFetchOrinameImpl(nspId, relname, type, desc, operMode); + } else { + List *activeSearchPath = NIL; + ListCell *l = NULL; + + recomputeNamespacePath(); + activeSearchPath = list_copy(u_sess->catalog_cxt.activeSearchPath); + foreach (l, activeSearchPath) { + nspId = lfirst_oid(l); + if (TrFetchOrinameImpl(nspId, relname, type, desc, operMode)) { + found = true; + break; + } + } + list_free_ext(activeSearchPath); + if (!found) { + nspId = PG_TOAST_NAMESPACE; + found = TrFetchOrinameImpl(nspId, relname, type, desc, operMode); + } + } + + return found; +} + +void TrUpdateBaseid(const TrObjDesc *desc) +{ + Relation rbRel; + SysScanDesc sd; + ScanKeyData skey[1]; + HeapTuple tup; + HeapTuple newtup; + Datum values[Natts_pg_recyclebin] = { 0 }; + bool nulls[Natts_pg_recyclebin] = { false }; + bool replaces[Natts_pg_recyclebin] = { false }; + + rbRel = heap_open(RecyclebinRelationId, RowExclusiveLock); + + ScanKeyInit(&skey[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(desc->id)); + + sd = systable_beginscan(rbRel, RecyclebinIdIndexId, true, NULL, 1, skey); + if ((tup = systable_getnext(sd)) == NULL) { + ereport(ERROR, (errmsg("recycle object %u does not exist", desc->id))); + } + + replaces[Anum_pg_recyclebin_rcybaseid - 1] = true; + values[Anum_pg_recyclebin_rcybaseid - 1] = ObjectIdGetDatum(desc->baseid); + + newtup = heap_modify_tuple(tup, RelationGetDescr(rbRel), values, nulls, replaces); + + simple_heap_update(rbRel, &newtup->t_self, newtup); + + CatalogUpdateIndexes(rbRel, newtup); + + heap_freetuple_ext(newtup); + + systable_endscan(sd); + heap_close(rbRel, RowExclusiveLock); + + return; +} + +static void TrLockRelationImpl(Oid relid, TrObjType type) +{ + /* + * Lock failed may due to concurrently purge/timecapsule/DQL + * on recycle object, or access on normal relation. + */ + if (!ConditionalLockRelationOid(relid, AccessExclusiveLock)) { + ereport(ERROR, + (errcode(ERRCODE_RBIN_LOCK_NOT_AVAILABLE), + errmsg("could not obtain lock on relation \"%u\"", relid))); + } + + /* + * Now that we have the lock, probe to see if the relation + * really exists or not. + */ + AcceptInvalidationMessages(); + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid)) && type != RB_OBJ_PARTITION) { + /* Clean already held locks if error return. */ + UnlockRelationOid(relid, AccessExclusiveLock); + ereport(ERROR, + (errcode(ERRCODE_RBIN_UNDEFINED_OBJECT), + errmsg("relation \"%u\" does not exist", relid))); + } else if (!SearchSysCacheExists1(PARTRELID, ObjectIdGetDatum(relid)) && type == RB_OBJ_PARTITION) { + /* Clean already held locks if error return. */ + UnlockRelationOid(relid, AccessExclusiveLock); + ereport(ERROR, + (errcode(ERRCODE_RBIN_UNDEFINED_OBJECT), + errmsg("partition \"%u\" does not exist", relid))); + } +} + +static void TrLockRelation(TrObjDesc *desc) +{ + Oid heapOid = InvalidOid; + + /* Lock heap relation for index first */ + if (desc->type == RB_OBJ_INDEX) { + heapOid = IndexGetRelation(desc->relid, true); + if (!OidIsValid(heapOid)) { + ereport(ERROR, + (errcode(ERRCODE_RBIN_UNDEFINED_OBJECT), + errmsg("relation \"%u\" does not exist", desc->relid))); + } + TrLockRelationImpl(heapOid, desc->type); + } + + /* Use TRY-CATCH block to clean locks already held if error. */ + PG_TRY(); + { + /* Lock relation self */ + TrLockRelationImpl(desc->relid, desc->type); + } + PG_CATCH(); + { + if (desc->type == RB_OBJ_INDEX) { + UnlockRelationOid(heapOid, AccessExclusiveLock); + } + PG_RE_THROW(); + } + PG_END_TRY(); +} + +static void TrUnlockTrItem(TrObjDesc *desc) +{ + UnlockDatabaseObject(RecyclebinRelationId, desc->id, 0, + AccessExclusiveLock); +} + +static void TrLockTrItem(TrObjDesc *desc) +{ + Relation rbRel; + SysScanDesc sd; + ScanKeyData skey[1]; + HeapTuple tup; + + /* 1. Try to lock rb item in AccessExclusiveLock */ + if (!ConditionalLockDatabaseObject(RecyclebinRelationId, desc->id, 0, AccessExclusiveLock)) { + ereport(ERROR, + (errcode(ERRCODE_RBIN_LOCK_NOT_AVAILABLE), + errmsg("could not obtain lock on recycle object '%s'", desc->name))); + } + + /* + * 2. Now that we have the lock, probe to see if the rb item really + * exists or not. + */ + AcceptInvalidationMessages(); + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + + ScanKeyInit(&skey[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(desc->id)); + + sd = systable_beginscan(rbRel, RecyclebinIdIndexId, true, NULL, 1, skey); + if ((tup = systable_getnext(sd)) == NULL) { + UnlockDatabaseObject(RecyclebinRelationId, desc->id, 0, AccessExclusiveLock); + systable_endscan(sd); + heap_close(rbRel, AccessShareLock); + ereport(ERROR, + (errcode(ERRCODE_RBIN_UNDEFINED_OBJECT), + errmsg("recycle object \"%s\" does not exist", desc->name))); + } + + systable_endscan(sd); + heap_close(rbRel, AccessShareLock); + + return; +} + +static void TrOperMatch(const TrObjDesc *desc, TrOperMode operMode) +{ + switch (operMode) { + case RB_OPER_PURGE: + if (!desc->canpurge && desc->type != RB_OBJ_PARTITION) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_OPERATION), + errmsg("recycle object \"%s\" cannot be purged", desc->name))); + } + break; + + case RB_OPER_RESTORE_DROP: + if ((!desc->canrestore && desc->type != RB_OBJ_PARTITION) || desc->operation != RB_OPER_DROP) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_OPERATION), + errmsg("recycle object \"%s\" cannot be restored", desc->name))); + } + break; + + case RB_OPER_RESTORE_TRUNCATE: + if ((!desc->canrestore && desc->type != RB_OBJ_PARTITION) || desc->operation != RB_OPER_TRUNCATE) { + ereport(ERROR, + (errcode(ERRCODE_INVALID_OPERATION), + errmsg("recycle object \"%s\" cannot be restored", desc->name))); + } + break; + + default: + ereport(ERROR, + (errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), + errmsg("unrecognized recyclebin operation: %u", operMode))); + break; + } +} + +/* + * Fetch object from recycle bin for rb operations - purge, restore : + * Prefer to fetch as original name, then recycle name. + */ +void TrOperFetch(const RangeVar *purobj, TrObjType objtype, TrObjDesc *desc, TrOperMode operMode) +{ + bool found = false; + + AcceptInvalidationMessages(); + + /* Prefer to fetch as original name */ + found = TrFetchOriname(purobj->schemaname, purobj->relname, objtype, desc, operMode); + /* if not found, then fetch as recycle name */ + if (!found) { + found = TrFetchName(purobj->relname, objtype, desc, operMode); + } + + /* not found, throw error */ + if (!found) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_TABLE), + errmsg("recycle object \"%s\" desired does not exist", purobj->relname))); + } + + TrOperMatch(desc, operMode); + + return; +} + +static void TrPermRestore(TrObjDesc *desc, TrOperMode operMode) +{ + AclResult aclCreateResult; + + /* Check namespace permissions. */ + aclCreateResult = pg_namespace_aclcheck(desc->nspace, desc->authid, ACL_CREATE); + if (aclCreateResult != ACLCHECK_OK) { + aclcheck_error(aclCreateResult, ACL_KIND_NAMESPACE, get_namespace_name(desc->nspace)); + } + + AclResult aclUsageResult = pg_namespace_aclcheck(desc->nspace, desc->authid, ACL_USAGE); + if (aclUsageResult != ACLCHECK_OK) { + aclcheck_error(aclUsageResult, ACL_KIND_NAMESPACE, get_namespace_name(desc->nspace)); + } + + /* Allow restore to either table owner or schema owner */ + if (!pg_class_ownercheck(desc->relid, desc->authid) && !pg_namespace_ownercheck(desc->nspace, desc->authid)) { + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, desc->name); + return; + } + + if (operMode == RB_OPER_RESTORE_TRUNCATE) { + AclResult aclTruncateResult = pg_class_aclcheck(desc->relid, desc->authid, ACL_TRUNCATE); + if (aclTruncateResult != ACLCHECK_OK) { + aclcheck_error(aclTruncateResult, ACL_KIND_CLASS, desc->name); + } + } +} + +static void TrPermPurge(TrObjDesc *desc, TrOperMode operMode) +{ + AclResult result; + + result = pg_namespace_aclcheck(desc->nspace, desc->authid, ACL_USAGE); + if (result != ACLCHECK_OK) { + aclcheck_error(result, ACL_KIND_NAMESPACE, get_namespace_name(desc->nspace)); + } + if (!pg_class_ownercheck(desc->relid, desc->authid) && !pg_namespace_ownercheck(desc->nspace, desc->authid)) { + aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, desc->name); + } +} + +/* + * Check permission for rb operations - purge, restore + */ +static void TrPerm(TrObjDesc *desc, TrOperMode operMode) +{ + switch (operMode) { + case RB_OPER_RESTORE_DROP: + case RB_OPER_RESTORE_TRUNCATE: + TrPermRestore(desc, operMode); + break; + case RB_OPER_PURGE: + TrPermPurge(desc, operMode); + break; + default: + /* Never reached here. */ + Assert(0); + break; + } +} + +/* + * Prepare for rb operations - purge, restore : + * check permission, lock objects + */ +void TrOperPrep(TrObjDesc *desc, TrOperMode operMode) +{ + bool needLockRelation = false; + + /* + * 1. Check permission. + */ + TrPerm(desc, operMode); + + /* + * 2. Acquire lock on rb item, avoid concurrently purge, restore. + */ + TrLockTrItem(desc); + + /* + * 3. Acquire lock on relation, avoid concurrently DQL. + * Notice: ignore this step when we purge truncated relation + * as base relation may not exists. + */ + needLockRelation = !(operMode == RB_OPER_PURGE && desc->operation == RB_OPER_TRUNCATE); + if (needLockRelation) { + /* Use TRY-CATCH block to clean locks already held if error. */ + PG_TRY(); + { + TrLockRelation(desc); + } + PG_CATCH(); + { + TrUnlockTrItem(desc); + PG_RE_THROW(); + } + PG_END_TRY(); + } +} + +bool NeedTrComm(Oid relid) +{ + Relation rel; + Form_pg_class classForm; + + if (/* + *Disable Recyclebin-based-Drop/Truncate when + */ + /* recyclebin disabled, or */ + !u_sess->attr.attr_storage.enable_recyclebin || + /* target db is template1, or */ + u_sess->proc_cxt.MyDatabaseId == TemplateDbOid || + /* in maintenance mode, or */ + u_sess->attr.attr_common.xc_maintenance_mode || + /* in in-place upgrade mode, or */ + t_thrd.proc->workingVersionNum < 92350 || + /* in non-singlenode mode, or */ + (g_instance.role != VSINGLENODE) || + /* in bootstrap mode. */ + IsInitdb) { + return false; + } + + rel = relation_open(relid, NoLock); + classForm = rel->rd_rel; + if (/* + * Disable Recyclebin-based-Drop/Truncate if + */ + /* table is non ordinary table, or */ + classForm->relkind != RELKIND_RELATION || + /* is non heap table, or */ + rel->rd_tam_type == TAM_HEAP || + /* is non regular table, or */ + classForm->relpersistence != RELPERSISTENCE_PERMANENT || + /* is shared table across databases, or */ + classForm->relisshared || + /* has derived classes, or */ + classForm->relhassubclass || + /* has any PARTIAL CLUSTER KEY, or */ + classForm->relhasclusterkey || + /* is cstore table, or */ + (rel->rd_options && StdRelOptIsColStore(rel->rd_options)) || RelationIsColStore(rel) || + /* is hbkt table, or */ + (RELATION_HAS_BUCKET(rel) || RELATION_OWN_BUCKET(rel)) || + /* is dfs table, or */ + RelationIsPAXFormat(rel) || + /* is resizing, or */ + RelationInClusterResizing(rel) || + /* is in system namespace. */ + (IsSystemNamespace(classForm->relnamespace) || IsToastNamespace(classForm->relnamespace) || + IsCStoreNamespace(classForm->relnamespace))) { + relation_close(rel, NoLock); + return false; + } + + relation_close(rel, NoLock); + + return true; +} + +TrObjType TrGetObjType(Oid nspId, char relKind) +{ + TrObjType type = RB_OBJ_TABLE; + + switch (relKind) { + case RELKIND_INDEX: + type = IsToastNamespace(nspId) ? RB_OBJ_TOAST_INDEX : RB_OBJ_INDEX; + break; + case RELKIND_RELATION: + type = RB_OBJ_TABLE; + break; + case RELKIND_SEQUENCE: + case RELKIND_LARGE_SEQUENCE: + type = RB_OBJ_SEQUENCE; + break; + case RELKIND_TOASTVALUE: + type = RB_OBJ_TOAST; + break; + case PARTTYPE_PARTITIONED_RELATION: + type = RB_OBJ_PARTITION; + break; + case RELKIND_GLOBAL_INDEX: + type = RB_OBJ_GLOBAL_INDEX; + break; + case RELKIND_MATVIEW: + type = RB_OBJ_MATVIEW; + break; + default: + /* Never reached here. */ + Assert(0); + break; + } + + return type; +} + +static bool TrObjAddrExists(Oid classid, Oid objid, ObjectAddresses *objSet) +{ + int i; + + for (i = 0; i < objSet->numrefs; i++) { + if (TrObjIsEqualEx(classid, objid, &objSet->refs[i])) { + return true; + } + } + + return false; +} + +/* + * output: refobjs + */ +void TrFindAllRefObjs(Relation depRel, const ObjectAddress *subobj, + ObjectAddresses *refobjs, bool ignoreObjSubId) +{ + SysScanDesc sd; + HeapTuple tuple; + ScanKeyData key[3]; + int nkeys; + + ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(subobj->classId)); + ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(subobj->objectId)); + nkeys = 2; + if (!ignoreObjSubId && subobj->objectSubId != 0) { + ScanKeyInit(&key[2], Anum_pg_depend_objsubid, BTEqualStrategyNumber, F_INT4EQ, + Int32GetDatum(subobj->objectSubId)); + nkeys = 3; + } + + sd = systable_beginscan(depRel, DependDependerIndexId, true, NULL, nkeys, key); + while (HeapTupleIsValid(tuple = systable_getnext(sd))) { + Form_pg_depend depForm = (Form_pg_depend)GETSTRUCT(tuple); + /* Cascaded clean rb object in `DROP SCHEMA` command. */ + if (depForm->refclassid == NamespaceRelationId) { + continue; + } + + /* We keep `objSet` unique when `ignoreObjSubId = true` to avoid circle recursive. */ + if (!ignoreObjSubId || !TrObjAddrExists(depForm->refclassid, depForm->refobjid, refobjs)) { + add_object_address_ext(depForm->refclassid, depForm->refobjid, + depForm->refobjsubid, depForm->deptype, refobjs); + } + } + + systable_endscan(sd); + return; +} + +static void TrFindAllInternalObjs(Relation depRel, const ObjectAddress *refobj, + ObjectAddresses *objSet, bool ignoreObjSubId = false) +{ + SysScanDesc sd; + HeapTuple tuple; + ScanKeyData key[3]; + int nkeys; + + ScanKeyInit(&key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(refobj->classId)); + ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(refobj->objectId)); + nkeys = 2; + if (!ignoreObjSubId && refobj->objectSubId != 0) { + ScanKeyInit(&key[2], Anum_pg_depend_refobjsubid, BTEqualStrategyNumber, F_INT4EQ, + Int32GetDatum(refobj->objectSubId)); + nkeys = 3; + } + + sd = systable_beginscan(depRel, DependReferenceIndexId, true, NULL, nkeys, key); + while (HeapTupleIsValid(tuple = systable_getnext(sd))) { + Form_pg_depend depForm = (Form_pg_depend)GETSTRUCT(tuple); + if (depForm->deptype != 'i') { + continue; + } + + /* We keep `objSet` unique when `ignoreObjSubId = true` to avoid circle recursive. */ + if (!ignoreObjSubId || !TrObjAddrExists(depForm->classid, depForm->objid, objSet)) { + add_object_address_ext(depForm->classid, depForm->objid, + depForm->objsubid, depForm->deptype, objSet); + } + } + + systable_endscan(sd); + return; +} + +static void TrDoPurgeObject(TrObjDesc *desc) +{ + if (desc->operation == RB_OPER_DROP) { + TrDoPurgeObjectDrop(desc); + } else { + TrDoPurgeObjectTruncate(desc); + } +} + +void TrPurgeObject(RangeVar *purobj, TrObjType type) +{ + TrObjDesc desc; + + TrOperFetch(purobj, type, &desc, RB_OPER_PURGE); + + desc.authid = GetUserId(); + TrOperPrep(&desc, RB_OPER_PURGE); + + TrDoPurgeObject(&desc); + + return; +} + +const int PURGE_BATCH = 64; +const int PURGE_SINGL = 64; +typedef void (*TrFetchBeginHook)(SysScanDesc *sd, Oid objId); +typedef bool (*TrFetchMatchHook)(Relation rbRel, HeapTuple rbTup, Oid objId); + +static void TrFetchBegin(TrFetchBeginHook fetchHook, SysScanDesc *sd, Oid objId) +{ + fetchHook(sd, objId); +} + +// @return: true for eof +static bool TrFetchExec(TrFetchMatchHook matchHook, Oid objId, SysScanDesc sd, TrObjDesc *desc) +{ + HeapTuple tup; + while ((tup = systable_getnext(sd)) != NULL) { + Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); + if ((rbForm->rcytype == RB_OBJ_TABLE) && matchHook(sd->heap_rel, tup, objId)) { + Assert (rbForm->rcycanpurge); + TrDescRead(desc, tup); + return false; + } else if ((rbForm->rcytype == RB_OBJ_PARTITION) && matchHook(sd->heap_rel, tup, objId)) { + Assert (!rbForm->rcycanpurge); + TrDescRead(desc, tup); + return false; + } + } + return true; +} + +static void TrFetchEnd(SysScanDesc sd) +{ + Relation rbRel = sd->heap_rel; + + systable_endscan(sd); + heap_close(rbRel, AccessShareLock); +} + +static bool TrPurgeBatch(TrFetchBeginHook beginHook, TrFetchMatchHook matchHook, + Oid objId, Oid roleid, uint32 maxBatch, PurgeMsgRes *localRes) +{ + SysScanDesc sd = NULL; + TrObjDesc desc; + uint32 count = 0; + bool eof = false; + + RbMsgResetRes(localRes); + + StartTransactionCommand(); + + TrFetchBegin(beginHook, &sd, objId); + while (!(eof = TrFetchExec(matchHook, objId, sd, &desc))) { + CHECK_FOR_INTERRUPTS(); + + PG_TRY(); + { + desc.authid = roleid; + TrOperPrep(&desc, RB_OPER_PURGE); + + TrDoPurgeObject(&desc); + localRes->purgedNum++; + } + PG_CATCH(); + { + int errcode = geterrcode(); + if (errcode == ERRCODE_RBIN_LOCK_NOT_AVAILABLE) { + errno_t rc; + rc = strncpy_s(localRes->errMsg, RB_MAX_ERRMSG_SIZE, Geterrmsg(), RB_MAX_ERRMSG_SIZE - 1); + securec_check(rc, "\0", "\0"); + localRes->skippedNum++; + } else if (errcode == ERRCODE_RBIN_UNDEFINED_OBJECT) { + localRes->undefinedNum++; + } else { + PG_RE_THROW(); + } + } + PG_END_TRY(); + + if (++count >= maxBatch) { + break; + } + } + + TrFetchEnd(sd); + + CommitTransactionCommand(); + + return eof; +} + +static void TrFetchBeginSpace(SysScanDesc *sd, Oid spcId) +{ + ScanKeyData skey[2]; + Relation rbRel; + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcytablespace, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(spcId)); + ScanKeyInit(&skey[1], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); + + *sd = systable_beginscan(rbRel, RecyclebinDbidSpcidRcycsnIndexId, true, NULL, 2, skey); +} + +static bool TrFetchMatchSpace(Relation rbRel, HeapTuple rbTup, Oid objId) +{ + Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(rbTup); + return rbForm->rcytablespace == objId; +} + +void TrPurgeTablespace(int64 id) +{ + PurgeMsgReq *req = &RbMsg(id)->req; + PurgeMsgRes localRes; + bool eof = false; + + do { + eof = TrPurgeBatch(TrFetchBeginSpace, TrFetchMatchSpace, req->objId, req->authId, PURGE_BATCH, &localRes); + RbMsgSetStatistics(id, &localRes); + } while (!eof && localRes.skippedNum == 0); +} + +void TrPurgeTablespaceDML(int64 id) +{ + PurgeMsgReq *req = &RbMsg(id)->req; + PurgeMsgRes localRes; + bool eof = false; + + do { + eof = TrPurgeBatch(TrFetchBeginSpace, TrFetchMatchSpace, req->objId, req->authId, PURGE_SINGL, &localRes); + RbMsgSetStatistics(id, &localRes); + } while (!eof && localRes.purgedNum == 0); +} + +static void TrFetchBeginRecyclebin(SysScanDesc *sd, Oid objId) +{ + ScanKeyData skey[2]; + Relation rbRel; + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); + + *sd = systable_beginscan(rbRel, RecyclebinDbidRelidIndexId, true, NULL, 1, skey); +} + +static bool TrFetchMatchRecyclebin(Relation rbRel, HeapTuple rbTup, Oid objId) +{ + return true; +} + +void TrPurgeRecyclebin(int64 id) +{ + PurgeMsgReq *req = &RbMsg(id)->req; + PurgeMsgRes localRes; + bool eof = false; + + do { + eof = TrPurgeBatch(TrFetchBeginRecyclebin, TrFetchMatchRecyclebin, + InvalidOid, req->authId, PURGE_BATCH, &localRes); + RbMsgSetStatistics(id, &localRes); + } while (!eof && localRes.skippedNum == 0); +} + +static void TrFetchBeginSchema(SysScanDesc *sd, Oid objId) +{ + ScanKeyData skey[2]; + Relation rbRel; + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcynamespace, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(objId)); + ScanKeyInit(&skey[1], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); + + *sd = systable_beginscan(rbRel, RecyclebinDbidNspOrinameIndexId, true, NULL, 2, skey); +} + +static bool TrFetchMatchSchema(Relation rbRel, HeapTuple rbTup, Oid objId) +{ + Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(rbTup); + return rbForm->rcynamespace == objId; +} + +void TrPurgeSchema(int64 id) +{ + PurgeMsgReq *req = &RbMsg(id)->req; + PurgeMsgRes localRes; + bool eof = false; + + do { + eof = TrPurgeBatch(TrFetchBeginSchema, TrFetchMatchSchema, req->objId, req->authId, PURGE_BATCH, &localRes); + RbMsgSetStatistics(id, &localRes); + } while (!eof && localRes.skippedNum == 0); +} + +static void TrFetchBeginUser(SysScanDesc *sd, Oid objId) +{ + ScanKeyData skey[2]; + Relation rbRel; + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); + + *sd = systable_beginscan(rbRel, RecyclebinDbidRelidIndexId, true, NULL, 1, skey); +} + +static bool TrFetchMatchUser(Relation rbRel, HeapTuple rbTup, Oid objId) +{ + Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(rbTup); + return rbForm->rcyowner == objId; +} + +void TrPurgeUser(int64 id) +{ + PurgeMsgReq *req = &RbMsg(id)->req; + PurgeMsgRes localRes; + bool eof = false; + + do { + eof = TrPurgeBatch(TrFetchBeginUser, TrFetchMatchUser, req->objId, req->authId, PURGE_BATCH, &localRes); + RbMsgSetStatistics(id, &localRes); + } while (!eof && localRes.skippedNum == 0); +} + +static void TrFetchBeginAuto(SysScanDesc *sd, Oid objId) +{ + ScanKeyData skey[2]; + Relation rbRel; + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); + + *sd = systable_beginscan(rbRel, RecyclebinDbidRelidIndexId, true, NULL, 1, skey); +} + +static bool TrFetchMatchAuto(Relation rbRel, HeapTuple rbTup, Oid objId) +{ + bool isNull = false; + Datum datumRcyTime = heap_getattr(rbTup, Anum_pg_recyclebin_rcyrecycletime, + RelationGetDescr(rbRel), &isNull); + + long secs; + int msecs; + TimestampDifference(isNull ? 0 : DatumGetTimestampTz(datumRcyTime), + GetCurrentTimestamp(), &secs, &msecs); + + return secs > u_sess->attr.attr_storage.recyclebin_retention_time || secs < 0; +} + +void TrPurgeAuto(int64 id) +{ + PurgeMsgReq *req = &RbMsg(id)->req; + PurgeMsgRes localRes; + bool eof = false; + do { + eof = TrPurgeBatch(TrFetchBeginAuto, TrFetchMatchAuto, InvalidOid, req->authId, PURGE_BATCH, &localRes); + RbMsgSetStatistics(id, &localRes); + } while (!eof); +} + +void TrSwapRelfilenode(Relation rbRel, HeapTuple rbTup, bool isPart) +{ + Relation relRel; + HeapTuple relTup; + HeapTuple newTup; + TrObjDesc desc; + int maxNattr = 0; + Datum *values = NULL; + bool *nulls = NULL; + bool *replaces = NULL; + NameData name; + errno_t rc = EOK; + bool isNull = false; + int relfilenoIndex = 0; + int frozenxidIndex = 0; + int frozenxid64Index = 0; + bool isPartition = false; + + TrDescRead(&desc, rbTup); + + if (desc.type == RB_OBJ_PARTITION || (desc.type == RB_OBJ_INDEX && isPart)) { + isPartition = true; + } + if (isPartition) { + maxNattr = Max(Natts_pg_partition, Natts_pg_recyclebin); + relRel = heap_open(PartitionRelationId, RowExclusiveLock); + relTup = SearchSysCacheCopy1(PARTRELID, ObjectIdGetDatum(desc.relid)); + relfilenoIndex = Anum_pg_partition_relfilenode; + frozenxidIndex = Anum_pg_partition_relfrozenxid; + frozenxid64Index = Anum_pg_partition_relfrozenxid64; + } else { + maxNattr = Max(Natts_pg_class, Natts_pg_recyclebin); + relRel = heap_open(RelationRelationId, RowExclusiveLock); + relTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(desc.relid)); + relfilenoIndex = Anum_pg_class_relfilenode; + frozenxidIndex = Anum_pg_class_relfrozenxid; + frozenxid64Index = Anum_pg_class_relfrozenxid64; + } + + /* 1. Update pg_class or pg_partition */ + if (!HeapTupleIsValid(relTup)) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_TABLE), + errmsg("cache lookup failed for relation %u", desc.relid))); + } + + values = (Datum *)palloc0(sizeof(Datum) * maxNattr); + nulls = (bool *)palloc0(sizeof(bool) * maxNattr); + replaces = (bool *)palloc0(sizeof(bool) * maxNattr); + + replaces[relfilenoIndex - 1] = true; + values[relfilenoIndex - 1] = ObjectIdGetDatum(desc.relfilenode); + + replaces[frozenxidIndex - 1] = true; + values[frozenxidIndex - 1] = ShortTransactionIdGetDatum(desc.frozenxid); + + replaces[frozenxid64Index - 1] = true; + values[frozenxid64Index - 1] = TransactionIdGetDatum(desc.frozenxid64); + + newTup = heap_modify_tuple(relTup, RelationGetDescr(relRel), values, nulls, replaces); + + simple_heap_update(relRel, &newTup->t_self, newTup); + + CatalogUpdateIndexes(relRel, newTup); + + heap_freetuple_ext(newTup); + + /* 2. Update pg_recyclebin */ + rc = memset_s(values, sizeof(Datum) * maxNattr, 0, sizeof(Datum) * maxNattr); + securec_check(rc, "\0", "\0"); + rc = memset_s(nulls, sizeof(bool) * maxNattr, false, sizeof(bool) * maxNattr); + securec_check(rc, "\0", "\0"); + rc = memset_s(replaces, sizeof(bool) * maxNattr, false, sizeof(bool) * maxNattr); + securec_check(rc, "\0", "\0"); + + (void)TrGenObjName(NameStr(name), RelationRelationId, desc.relid); + replaces[Anum_pg_recyclebin_rcyname - 1] = true; + values[Anum_pg_recyclebin_rcyname - 1] = NameGetDatum(&name); + + replaces[Anum_pg_recyclebin_rcyoriginname - 1] = true; + if (isPartition) { + values[Anum_pg_recyclebin_rcyoriginname - 1] = NameGetDatum(&desc.originname); + } else { + values[Anum_pg_recyclebin_rcyoriginname - 1] = NameGetDatum(&((Form_pg_class)GETSTRUCT(relTup))->relname); + } + + replaces[Anum_pg_recyclebin_rcyrecyclecsn - 1] = true; + values[Anum_pg_recyclebin_rcyrecyclecsn - 1] = Int64GetDatum(t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo); + + replaces[Anum_pg_recyclebin_rcyrecycletime - 1] = true; + values[Anum_pg_recyclebin_rcyrecycletime - 1] = TimestampTzGetDatum(GetCurrentTimestamp()); + + replaces[Anum_pg_recyclebin_rcyrelfilenode - 1] = true; + if (isPartition) { + values[Anum_pg_recyclebin_rcyrelfilenode - 1] = + ObjectIdGetDatum(((Form_pg_partition)GETSTRUCT(relTup))->relfilenode); + } else { + values[Anum_pg_recyclebin_rcyrelfilenode - 1] = + ObjectIdGetDatum(((Form_pg_class)GETSTRUCT(relTup))->relfilenode); + } + + replaces[Anum_pg_recyclebin_rcyfrozenxid - 1] = true; + if (isPartition) { + values[Anum_pg_recyclebin_rcyfrozenxid - 1] = + ShortTransactionIdGetDatum(((Form_pg_partition)GETSTRUCT(relTup))->relfrozenxid); + } else { + values[Anum_pg_recyclebin_rcyfrozenxid - 1] = + ShortTransactionIdGetDatum(((Form_pg_class)GETSTRUCT(relTup))->relfrozenxid); + } + + replaces[Anum_pg_recyclebin_rcyfrozenxid64 - 1] = true; + Datum xid64datum = heap_getattr(relTup, frozenxid64Index, RelationGetDescr(relRel), &isNull); + values[Anum_pg_recyclebin_rcyfrozenxid64 - 1] = DatumGetTransactionId(xid64datum); + + newTup = heap_modify_tuple(rbTup, RelationGetDescr(rbRel), values, nulls, replaces); + + simple_heap_update(rbRel, &newTup->t_self, newTup); + + CatalogUpdateIndexes(rbRel, newTup); + + heap_freetuple_ext(newTup); + + pfree(values); + pfree(nulls); + pfree(replaces); + + heap_freetuple_ext(relTup); + heap_close(relRel, RowExclusiveLock); + return; +} + +void TrBaseRelMatched(TrObjDesc *baseDesc) +{ + ObjectAddress obj = {RelationRelationId, baseDesc->relid}; + if (TrIsRefRbObject(&obj)) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("relation \"%s\" does not exist", baseDesc->originname))); + } + + Relation rel = RelationIdGetRelation(baseDesc->relid); + Assert(RelationIsValid(rel)); + if (RelationGetCreatecsn(rel) != (CommitSeqNo)baseDesc->createcsn) { + ereport(ERROR, + (errmsg("The recycle object \"%s\" and relation \"%s\" mismatched.", + baseDesc->name, RelationGetRelationName(rel)))); + } + + if (RelationGetChangecsn(rel) > (CommitSeqNo)baseDesc->changecsn) { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("The table definition of \"%s\" has been changed.", + RelationGetRelationName(rel)))); + } + + RelationClose(rel); +} + +void TrAdjustFrozenXid64(Oid dbid, TransactionId *frozenXID) +{ + Relation rbRel; + SysScanDesc sd; + HeapTuple rbtup; + + if (!TcapFeatureAvail()) { + return; + } + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + + sd = systable_beginscan(rbRel, InvalidOid, false, NULL, 0, NULL); + while ((rbtup = systable_getnext(sd)) != NULL) { + Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(rbtup); + TransactionId rcyfrozenxid64; + + if (rbForm->rcydbid != dbid || (rbForm->rcytype != RB_OBJ_TABLE && rbForm->rcytype != RB_OBJ_TOAST)) { + continue; + } + + rcyfrozenxid64 = TrRbGetRcyfrozenxid64(rbtup, rbRel); + Assert(TransactionIdIsNormal(rcyfrozenxid64)); + + if (TransactionIdPrecedes(rcyfrozenxid64, *frozenXID)) { + *frozenXID = rcyfrozenxid64; + } + } + + systable_endscan(sd); + heap_close(rbRel, AccessShareLock); + + return; +} + +bool TrRbIsEmptyDb(Oid dbid) +{ + Relation rbRel; + SysScanDesc sd; + HeapTuple tup; + ScanKeyData skey[1]; + + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(dbid)); + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + sd = systable_beginscan(rbRel, RecyclebinDbidRelidIndexId, true, NULL, 1, skey); + tup = systable_getnext(sd); + systable_endscan(sd); + heap_close(rbRel, AccessShareLock); + + return tup == NULL; +} + +bool TrRbIsEmptySpc(Oid spcId) +{ + Relation rbRel; + SysScanDesc sd; + ScanKeyData skey[1]; + HeapTuple tup; + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcytablespace, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(spcId)); + + sd = systable_beginscan(rbRel, RecyclebinDbidSpcidRcycsnIndexId, true, NULL, 1, skey); + tup = systable_getnext(sd); + systable_endscan(sd); + heap_close(rbRel, AccessShareLock); + + return tup == NULL; +} + +bool TrRbIsEmptySchema(Oid nspId) +{ + Relation rbRel; + SysScanDesc sd; + ScanKeyData skey[2]; + HeapTuple tup; + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcynamespace, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(nspId)); + ScanKeyInit(&skey[1], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); + + sd = systable_beginscan(rbRel, RecyclebinDbidNspOrinameIndexId, true, NULL, 2, skey); + tup = systable_getnext(sd); + + systable_endscan(sd); + heap_close(rbRel, AccessShareLock); + + return tup == NULL; +} + +bool TrRbIsEmptyUser(Oid roleId) +{ + Relation rbRel; + SysScanDesc sd; + HeapTuple tup; + bool found = false; + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + + sd = systable_beginscan(rbRel, InvalidOid, false, NULL, 0, NULL); + + while ((tup = systable_getnext(sd)) != NULL) { + Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); + if ((TrObjType)rbForm->rcytype != RB_OBJ_TABLE || rbForm->rcyowner != roleId) { + continue; + } + + found = true; + break; + } + + systable_endscan(sd); + heap_close(rbRel, AccessShareLock); + + return !found; +} + +static bool TrOidExists(const List *lOid, Oid oid) +{ + ListCell *cell = NULL; + if (lOid == NULL) { + return false; + } + + foreach (cell, lOid) { + if (oid == (*(Oid *)lfirst(cell))) { + return true; + } + } + return false; +} + +List *TrGetDbListRcy(void) +{ + Relation rbRel; + SysScanDesc sd; + HeapTuple tup; + + List *lName = NIL; + List *lOid = NIL; + char *dbname = NULL; + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + + sd = systable_beginscan(rbRel, InvalidOid, false, NULL, 0, NULL); + + while ((tup = systable_getnext(sd)) != NULL) { + Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); + if (TrOidExists(lOid, rbForm->rcydbid)) { + continue; + } + Oid *oid = (Oid *)palloc0(sizeof(Oid)); + *oid = rbForm->rcydbid; + lOid = lappend(lOid, oid); + + dbname = get_database_name(rbForm->rcydbid); + if (dbname == NULL) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_DATABASE), + errmsg("database \"%u\" does not exist", rbForm->rcydbid))); + } + lName = lappend(lName, dbname); + } + + list_free_deep(lOid); + + systable_endscan(sd); + heap_close(rbRel, AccessShareLock); + + return lName; +} + +List *TrGetDbListSpc(Oid spcId) +{ + List *lName = NIL; + List *lOid = NIL; + char *dbname = NULL; + + Relation rbRel; + SysScanDesc sd; + ScanKeyData skey[1]; + HeapTuple tup; + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcytablespace, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(spcId)); + + sd = systable_beginscan(rbRel, RecyclebinDbidSpcidRcycsnIndexId, true, NULL, 1, skey); + + while ((tup = systable_getnext(sd)) != NULL) { + Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); + if (TrOidExists(lOid, rbForm->rcydbid)) { + continue; + } + Oid *oid = (Oid *)palloc0(sizeof(Oid)); + *oid = rbForm->rcydbid; + lOid = lappend(lOid, oid); + + dbname = get_database_name(rbForm->rcydbid); + if (dbname == NULL) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_DATABASE), + errmsg("database \"%u\" does not exist", rbForm->rcydbid))); + } + lName = lappend(lName, dbname); + } + + list_free_deep(lOid); + + systable_endscan(sd); + heap_close(rbRel, AccessShareLock); + + return lName; +} + +List *TrGetDbListSchema(Oid nspId) +{ + Relation rbRel; + SysScanDesc sd; + ScanKeyData skey[1]; + HeapTuple tup; + + List *lName = NIL; + List *lOid = NIL; + char *dbname = NULL; + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcynamespace, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(nspId)); + + sd = systable_beginscan(rbRel, RecyclebinDbidNspOrinameIndexId, true, NULL, 1, skey); + + while ((tup = systable_getnext(sd)) != NULL) { + Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); + if (TrOidExists(lOid, rbForm->rcydbid)) { + continue; + } + Oid *oid = (Oid *)palloc0(sizeof(Oid)); + *oid = rbForm->rcydbid; + lOid = lappend(lOid, oid); + + dbname = get_database_name(rbForm->rcydbid); + if (dbname == NULL) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_DATABASE), + errmsg("database \"%u\" does not exist", rbForm->rcydbid))); + } + lName = lappend(lName, dbname); + } + + list_free_deep(lOid); + + systable_endscan(sd); + heap_close(rbRel, AccessShareLock); + + return lName; +} + +List *TrGetDbListUser(Oid roleId) +{ + Relation rbRel; + SysScanDesc sd; + HeapTuple tup; + + List *lName = NIL; + List *lOid = NIL; + char *dbname = NULL; + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + + sd = systable_beginscan(rbRel, InvalidOid, false, NULL, 0, NULL); + + while ((tup = systable_getnext(sd)) != NULL) { + Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); + if ((TrObjType)rbForm->rcytype != RB_OBJ_TABLE || rbForm->rcyowner != roleId) { + continue; + } + if (TrOidExists(lOid, rbForm->rcydbid)) { + continue; + } + Oid *oid = (Oid *)palloc0(sizeof(Oid)); + *oid = rbForm->rcydbid; + lOid = lappend(lOid, oid); + + dbname = get_database_name(rbForm->rcydbid); + if (dbname == NULL) { + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_DATABASE), + errmsg("database \"%u\" does not exist", rbForm->rcydbid))); + } + + lName = lappend(lName, dbname); + } + + list_free_deep(lOid); + + systable_endscan(sd); + heap_close(rbRel, AccessShareLock); + + return lName; +} + +/* + * TrGetDatabaseList + * Return a list of all databases found in pg_database. + */ +List *TrGetDbListAuto(void) +{ + List* dblist = NIL; + Relation rel; + SysScanDesc sd; + HeapTuple tup; + + rel = heap_open(DatabaseRelationId, AccessShareLock); + sd = systable_beginscan(rel, InvalidOid, false, NULL, 0, NULL); + + while ((tup = systable_getnext(sd)) != NULL) { + Form_pg_database pgdatabase = (Form_pg_database)GETSTRUCT(tup); + if (strcmp(NameStr(pgdatabase->datname), "template0") == 0 || + strcmp(NameStr(pgdatabase->datname), "template1") == 0) { + continue; + } + dblist = lappend(dblist, pstrdup(NameStr(pgdatabase->datname))); + } + + systable_endscan(sd); + heap_close(rel, AccessShareLock); + + return dblist; +} + +static bool TrObjInRecyclebin(const ObjectAddress *obj) +{ + Relation rbRel; + SysScanDesc sd; + HeapTuple tup; + ScanKeyData skey[2]; + bool found = false; + + if (getObjectClass(obj) != OCLASS_CLASS) { + return false; + } + + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcydbid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(u_sess->proc_cxt.MyDatabaseId)); + ScanKeyInit(&skey[1], Anum_pg_recyclebin_rcyrelid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(obj->objectId)); + + rbRel = heap_open(RecyclebinRelationId, AccessShareLock); + sd = systable_beginscan(rbRel, RecyclebinDbidRelidIndexId, true, NULL, 2, skey); + while ((tup = systable_getnext(sd)) != NULL) { + Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); + if ((TrObjType)rbForm->rcyoperation == 'd') { + found = true; + break; + } + } + systable_endscan(sd); + heap_close(rbRel, AccessShareLock); + + return found; +} + +/* + * May this object be a recyclebin object? + * true: with "BIN$" prefix, or not a Relation\Type\Trigger\Constraint\Rule + * false: without "BIN$" prefix, or not exists + */ +static bool TrMaybeRbObject(Oid classid, Oid objid, const char *objname = NULL) +{ + HeapTuple tup; + + /* Note: we preserve rule origin name when RbDrop. */ + if (classid != RewriteRelationId && objname) { + return strncmp(objname, "BIN$", 4) == 0; + } + + switch (classid) { + case RelationRelationId: + tup = SearchSysCache1(RELOID, ObjectIdGetDatum(objid)); + if (tup != NULL) { + objname = NameStr(((Form_pg_class)GETSTRUCT(tup))->relname); + ReleaseSysCache(tup); + } + break; + case TypeRelationId: + tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(objid)); + if (tup != NULL) { + objname = NameStr(((Form_pg_type)GETSTRUCT(tup))->typname); + ReleaseSysCache(tup); + } + break; + case TriggerRelationId: { + Relation relTrig; + ScanKeyData skey[1]; + SysScanDesc sd; + + relTrig = heap_open(TriggerRelationId, AccessShareLock); + ScanKeyInit(&skey[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(objid)); + sd = systable_beginscan(relTrig, TriggerOidIndexId, true, NULL, 1, skey); + if ((tup = systable_getnext(sd)) != NULL) { + objname = NameStr(((Form_pg_trigger)GETSTRUCT(tup))->tgname); + } + systable_endscan(sd); + heap_close(relTrig, AccessShareLock); + break; + } + case ConstraintRelationId: + tup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(objid)); + if (tup != NULL) { + objname = NameStr(((Form_pg_constraint)GETSTRUCT(tup))->conname); + ReleaseSysCache(tup); + } + break; + case NamespaceRelationId: + /* Treate Namespace as non-recyclebin object. */ + return false; + default: + /* May be a recyclebin object. */ + return true; + } + + if (objname) { + return strncmp(objname, "BIN$", 4) == 0; + } + + return false; +} + +static bool TrIsRefRbObjectImpl(Relation depRel, const ObjectAddress *obj, ObjectAddresses *objSet) +{ + int startIdx; + + if (TrObjInRecyclebin(obj)) { + return true; + } + + startIdx = objSet->numrefs; + + if (!TrMaybeRbObject(obj->classId, obj->objectId)) { + return false; + } + + TrFindAllRefObjs(depRel, obj, objSet, true); + TrFindAllInternalObjs(depRel, obj, objSet, true); + + for (int i = startIdx; i < objSet->numrefs; i++) { + if (TrIsRefRbObjectImpl(depRel, &objSet->refs[i], objSet)) { + return true; + } + } + + return false; +} + +/* object is a rb object, or reference to a rb object. */ +static bool TrIsRefRbObject(const ObjectAddress *obj, Relation depRel) +{ + ObjectAddresses *objSet = new_object_addresses(); + bool relArgNull = depRel == NULL; + bool result = false; + + if (relArgNull) { + depRel = heap_open(DependRelationId, AccessShareLock); + } + + /* Note: we not care obj->deptype here. */ + add_object_address_ext1(obj, objSet); + + result = TrIsRefRbObjectImpl(depRel, obj, objSet); + + free_object_addresses(objSet); + + if (relArgNull) { + heap_close(depRel, AccessShareLock); + } + + return result; +} + +bool TrIsRefRbObjectEx(Oid classid, Oid objid, const char *objname) +{ + if (!TcapFeatureAvail()) { + return false; + } + + /* Note: we preserve rule origin name when RbDrop. */ + if (TrRbIsEmptyDb(u_sess->proc_cxt.MyDatabaseId)) { + return false; + } + + if (classid != RewriteRelationId && objname && strncmp(objname, "BIN$", 4) != 0) { + return false; + } + + ObjectAddress obj = {classid, objid}; + + return TrIsRefRbObject(&obj); +} + +void TrForbidAccessRbDependencies(Relation depRel, const ObjectAddress *depender, + const ObjectAddress *referenced, int nreferenced) +{ + if (!TcapFeatureAvail()) { + return; + } + + if (IsInitdb || TrRbIsEmptyDb(u_sess->proc_cxt.MyDatabaseId)) { + return; + } + + if (TrIsRefRbObject(depender, depRel)) { + elog (ERROR, "can not access recycle object."); + } + + for (int i = 0; i < nreferenced; i++, referenced++) { + if (TrIsRefRbObject(referenced, depRel)) { + elog (ERROR, "can not access recycle object."); + } + } + + return; +} + +void TrForbidAccessRbObject(Oid classid, Oid objid, const char *objname) +{ + if (!TcapFeatureAvail()) { + return; + } + + if (TrRbIsEmptyDb(u_sess->proc_cxt.MyDatabaseId) || !TrMaybeRbObject(classid, objid, objname)) { + return; + } + + ObjectAddress obj = {classid, objid}; + if (TrIsRefRbObject(&obj)) { + elog (ERROR, "can not access recycle object."); + } + + return; +} + +Datum gs_is_recycle_object(PG_FUNCTION_ARGS) +{ + int classid = PG_GETARG_INT32(0); + int objid = PG_GETARG_INT32(1); + Name objname = PG_GETARG_NAME(2); + bool result = false; + result = TrIsRefRbObjectEx(classid, objid, NameStr(*objname)); + PG_RETURN_BOOL(result); +} diff --git a/src/gausskernel/storage/tcap/tcap_truncate.cpp b/src/gausskernel/storage/tcap/tcap_truncate.cpp index a713daa0a..ea048f21a 100644 --- a/src/gausskernel/storage/tcap/tcap_truncate.cpp +++ b/src/gausskernel/storage/tcap/tcap_truncate.cpp @@ -1,338 +1,491 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * --------------------------------------------------------------------------------------- - * - * tcap_truncate.cpp - * Routines to support Timecapsule `Recyclebin-based query, restore`. - * We use Tr prefix to indicate it in following coding. - * - * IDENTIFICATION - * src/gausskernel/storage/tcap/tcap_truncate.cpp - * - * --------------------------------------------------------------------------------------- - */ - -#include "postgres.h" - -#include "pgstat.h" -#include "access/reloptions.h" -#include "access/sysattr.h" -#include "access/xlog.h" -#include "catalog/dependency.h" -#include "catalog/heap.h" -#include "catalog/index.h" -#include "catalog/indexing.h" -#include "catalog/objectaccess.h" -#include "catalog/pg_collation_fn.h" -#include "catalog/pg_collation.h" -#include "catalog/pg_constraint.h" -#include "catalog/pg_conversion_fn.h" -#include "catalog/pg_conversion.h" -#include "catalog/pg_depend.h" -#include "catalog/pg_extension_data_source.h" -#include "catalog/pg_extension.h" -#include "catalog/pg_foreign_data_wrapper.h" -#include "catalog/pg_foreign_server.h" -#include "catalog/pg_job.h" -#include "catalog/pg_language.h" -#include "catalog/pg_largeobject.h" -#include "catalog/pg_object.h" -#include "catalog/pg_opclass.h" -#include "catalog/pg_operator.h" -#include "catalog/pg_opfamily.h" -#include "catalog/pg_proc.h" -#include "catalog/pg_recyclebin.h" -#include "catalog/pg_rewrite.h" -#include "catalog/pg_rlspolicy.h" -#include "catalog/pg_synonym.h" -#include "catalog/pg_tablespace.h" -#include "catalog/pg_trigger.h" -#include "catalog/pg_ts_config.h" -#include "catalog/pg_ts_dict.h" -#include "catalog/pg_ts_parser.h" -#include "catalog/pg_ts_template.h" -#include "catalog/pgxc_class.h" -#include "catalog/storage.h" -#include "commands/comment.h" -#include "commands/dbcommands.h" -#include "commands/directory.h" -#include "commands/extension.h" -#include "commands/proclang.h" -#include "commands/schemacmds.h" -#include "commands/seclabel.h" -#include "commands/sec_rls_cmds.h" -#include "commands/tablecmds.h" -#include "commands/tablespace.h" -#include "commands/trigger.h" -#include "commands/typecmds.h" -#include "executor/node/nodeModifyTable.h" -#include "rewrite/rewriteRemove.h" -#include "storage/lmgr.h" -#include "storage/predicate.h" -#include "storage/smgr/relfilenode.h" -#include "utils/acl.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/inval.h" -#include "utils/lsyscache.h" -#include "utils/relcache.h" -#include "utils/snapmgr.h" -#include "utils/syscache.h" - -#include "storage/tcap.h" -#include "storage/tcap_impl.h" - -void TrRelationSetNewRelfilenode(Relation relation, TransactionId freezeXid, void *baseDesc) -{ - TrObjDesc desc; - TrObjDesc *trBaseDesc = (TrObjDesc *)baseDesc; - RelFileNodeBackend newrnode; - - /* Indexes, sequences must have Invalid frozenxid; other rels must not. */ - Assert((((relation->rd_rel->relkind == RELKIND_INDEX) || (RELKIND_IS_SEQUENCE(relation->rd_rel->relkind))) ? - (freezeXid == InvalidTransactionId) : - TransactionIdIsNormal(freezeXid)) || - relation->rd_rel->relkind == RELKIND_RELATION); - - /* Record the old relfilenode to recyclebin. */ - desc = *trBaseDesc; - if (!TR_IS_BASE_OBJ_EX(trBaseDesc, RelationGetRelid(relation))) { - TrDescInit(relation, &desc, RB_OPER_TRUNCATE, TrGetObjType(InvalidOid, RelationGetRelkind(relation)), false); - TrDescWrite(&desc); - } - - /* Allocate a new relfilenode, create storage for the main fork. */ - newrnode = CreateNewRelfilenode(relation, freezeXid); - - /* - * NOTE: if the relation was created in this transaction, it will now be - * present in the pending-delete list twice, once with atCommit true and - * once with atCommit false. Hence, it will be physically deleted at end - * of xact in either case (and the other entry will be ignored by - * smgrDoPendingDeletes, so no error will occur). We could instead remove - * the existing list entry and delete the physical file immediately, but - * for now I'll keep the logic simple. - */ - RelationCloseSmgr(relation); - - /* - * Update pg_class entry for new relfilenode. - */ - UpdatePgclass(relation, freezeXid, &newrnode); - - /* - * Make the pg_class row change visible, as well as the relation map - * change if any. This will cause the relcache entry to get updated, too. - */ - CommandCounterIncrement(); - - /* - * Mark the rel as having been given a new relfilenode in the current - * (sub) transaction. This is a hint that can be used to optimize later - * operations on the rel in the same transaction. - */ - relation->rd_newRelfilenodeSubid = GetCurrentSubTransactionId(); - - /* ... and now we have eoxact cleanup work to do */ - u_sess->relcache_cxt.need_eoxact_work = true; -} - -bool TrCheckRecyclebinTruncate(const TruncateStmt *stmt) -{ - RangeVar *rel = NULL; - Oid relid; - - if (/* - * Disable Recyclebin-based-Truncate when with purge option, or - */ - /* recyblebin disabled, or */ - !u_sess->attr.attr_storage.enable_recyclebin || - /* with purge option, or */ - stmt->purge || - /* with restart_seqs option, or */ - stmt->restart_seqs || - /* multi objects truncate. */ - list_length(stmt->relations) != 1) { - return false; - } - - rel = (RangeVar *)linitial(stmt->relations); - relid = RangeVarGetRelid(rel, NoLock, false); - - return NeedTrComm(relid); -} - -void TrTruncate(const TruncateStmt *stmt) -{ - RangeVar *rv = (RangeVar*)linitial(stmt->relations); - Relation rel; - Oid relid; - Oid toastRelid; - TrObjDesc baseDesc; - - /* - * 1. Open relation in AccessExclusiveLock, and check permission, etc. - */ - - rel = heap_openrv(rv, AccessExclusiveLock); - relid = RelationGetRelid(rel); - - TrForbidAccessRbObject(RelationRelationId, relid, rv->relname); - - truncate_check_rel(rel); - - /* - * This effectively deletes all rows in the table, and may be done - * in a serializable transaction. In that case we must record a - * rw-conflict in to this transaction from each transaction - * holding a predicate lock on the table. - */ - CheckTableForSerializableConflictIn(rel); - - /* - * 2. Create a new empty storage file for the relation, and assign it - * as the relfilenode value, and record the old relfilenode to recyclebin. - */ - - TrDescInit(rel, &baseDesc, RB_OPER_TRUNCATE, RB_OBJ_TABLE, true, true); - baseDesc.id = baseDesc.baseid = TrDescWrite(&baseDesc); - TrUpdateBaseid(&baseDesc); - - TrRelationSetNewRelfilenode(rel, u_sess->utils_cxt.RecentXmin, &baseDesc); - - /* - * 3. The same for the toast table, if any. - */ - - toastRelid = rel->rd_rel->reltoastrelid; - if (OidIsValid(toastRelid)) { - Relation relToast = relation_open(toastRelid, AccessExclusiveLock); - TrRelationSetNewRelfilenode(relToast, u_sess->utils_cxt.RecentXmin, &baseDesc); - heap_close(relToast, NoLock); - } - - /* - * 4. Reconstruct the indexes to match, and we're done. - */ - - (void)ReindexRelation(relid, REINDEX_REL_PROCESS_TOAST, REINDEX_ALL_INDEX, &baseDesc); - - /* - * 5. Report stat, and clean. - */ - - /* report truncate to PgStatCollector */ - pgstat_report_truncate(relid, InvalidOid, false); - - /* Record time of truancate relation. */ - recordRelationMTime(relid, rel->rd_rel->relkind); - - heap_close(rel, NoLock); -} - -/* - * RelationDropStorage - * Schedule unlinking of physical storage at transaction commit. - */ -void TrDoPurgeObjectTruncate(TrObjDesc *desc) -{ - Relation rbRel; - SysScanDesc sd; - ScanKeyData skey[1]; - HeapTuple tup; - - Assert (desc->type == RB_OBJ_TABLE && desc->canpurge); - - rbRel = heap_open(RecyclebinRelationId, RowExclusiveLock); - - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcybaseid, BTEqualStrategyNumber, - F_INT8EQ, Int64GetDatum(desc->baseid)); - - sd = systable_beginscan(rbRel, RecyclebinBaseidIndexId, true, NULL, 1, skey); - while (HeapTupleIsValid(tup = systable_getnext(sd))) { - Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); - RelFileNode rnode; - - rnode.spcNode = ConvertToRelfilenodeTblspcOid(rbForm->rcytablespace); - rnode.dbNode = (rnode.spcNode == GLOBALTABLESPACE_OID) ? InvalidOid : - u_sess->proc_cxt.MyDatabaseId; - rnode.relNode = rbForm->rcyrelfilenode; - rnode.bucketNode = InvalidBktId; - - /* - * Schedule unlinking of the old storage at transaction commit. - */ - InsertStorageIntoPendingList( - &rnode, InvalidAttrNumber, InvalidBackendId, rbForm->rcyowner, true, false); - - simple_heap_delete(rbRel, &tup->t_self); - - ereport(LOG, (errmsg("Delete truncated object %u/%u/%u", rnode.spcNode, - rnode.dbNode, rnode.relNode))); - } - - systable_endscan(sd); - heap_close(rbRel, RowExclusiveLock); - - /* ... and now we have eoxact cleanup work to do */ - u_sess->relcache_cxt.need_eoxact_work = true; - - /* - * CommandCounterIncrement here to ensure that preceding changes are all - * visible to the next deletion step. - */ - CommandCounterIncrement(); -} - -/* flashback table to before truncate */ -void TrRestoreTruncate(const TimeCapsuleStmt *stmt) -{ - TrObjDesc baseDesc; - Relation rbRel; - SysScanDesc sd; - ScanKeyData skey[1]; - HeapTuple tup; - - /* 1. Fetch the latest available recycle object. */ - TrOperFetch(stmt->relation, RB_OBJ_TABLE, &baseDesc, RB_OPER_RESTORE_TRUNCATE); - - /* 2. Lock recycle object and base relation. */ - baseDesc.authid = GetUserId(); - TrOperPrep(&baseDesc, RB_OPER_RESTORE_TRUNCATE); - - /* 3. Check base relation whether normal and matched. */ - TrBaseRelMatched(&baseDesc); - - /* 4. Do restore. */ - rbRel = heap_open(RecyclebinRelationId, RowExclusiveLock); - - ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcybaseid, BTEqualStrategyNumber, - F_INT8EQ, Int64GetDatum(baseDesc.id)); - - sd = systable_beginscan(rbRel, RecyclebinBaseidIndexId, true, NULL, 1, skey); - while (HeapTupleIsValid(tup = systable_getnext(sd))) { - TrSwapRelfilenode(rbRel, tup); - } - - systable_endscan(sd); - heap_close(rbRel, RowExclusiveLock); - - /* - * CommandCounterIncrement here to ensure that preceding changes are all - * visible to the next deletion step. - */ - CommandCounterIncrement(); - - return; -} +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * tcap_truncate.cpp + * Routines to support Timecapsule `Recyclebin-based query, restore`. + * We use Tr prefix to indicate it in following coding. + * + * IDENTIFICATION + * src/gausskernel/storage/tcap/tcap_truncate.cpp + * + * --------------------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "pgstat.h" +#include "access/reloptions.h" +#include "access/sysattr.h" +#include "access/xlog.h" +#include "catalog/dependency.h" +#include "catalog/heap.h" +#include "catalog/index.h" +#include "catalog/indexing.h" +#include "catalog/objectaccess.h" +#include "catalog/pg_collation_fn.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_constraint.h" +#include "catalog/pg_conversion_fn.h" +#include "catalog/pg_conversion.h" +#include "catalog/pg_depend.h" +#include "catalog/pg_extension_data_source.h" +#include "catalog/pg_extension.h" +#include "catalog/pg_foreign_data_wrapper.h" +#include "catalog/pg_foreign_server.h" +#include "catalog/pg_job.h" +#include "catalog/pg_language.h" +#include "catalog/pg_largeobject.h" +#include "catalog/pg_object.h" +#include "catalog/pg_opclass.h" +#include "catalog/pg_operator.h" +#include "catalog/pg_opfamily.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_recyclebin.h" +#include "catalog/pg_rewrite.h" +#include "catalog/pg_rlspolicy.h" +#include "catalog/pg_synonym.h" +#include "catalog/pg_tablespace.h" +#include "catalog/pg_trigger.h" +#include "catalog/pg_ts_config.h" +#include "catalog/pg_ts_dict.h" +#include "catalog/pg_ts_parser.h" +#include "catalog/pg_ts_template.h" +#include "catalog/pgxc_class.h" +#include "catalog/storage.h" +#include "catalog/pg_partition_fn.h" +#include "commands/comment.h" +#include "commands/dbcommands.h" +#include "commands/directory.h" +#include "commands/extension.h" +#include "commands/matview.h" +#include "commands/proclang.h" +#include "commands/schemacmds.h" +#include "commands/seclabel.h" +#include "commands/sec_rls_cmds.h" +#include "commands/tablecmds.h" +#include "commands/tablespace.h" +#include "commands/trigger.h" +#include "commands/typecmds.h" +#include "executor/node/nodeModifyTable.h" +#include "rewrite/rewriteRemove.h" +#include "storage/lmgr.h" +#include "storage/predicate.h" +#include "storage/smgr/relfilenode.h" +#include "utils/acl.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/relcache.h" +#include "utils/snapmgr.h" +#include "utils/syscache.h" + +#include "storage/tcap.h" +#include "storage/tcap_impl.h" + +void TrRelationSetNewRelfilenode(Relation relation, TransactionId freezeXid, void *baseDesc) +{ + TrObjDesc desc; + TrObjDesc *trBaseDesc = (TrObjDesc *)baseDesc; + RelFileNodeBackend newrnode; + + /* Indexes, sequences must have Invalid frozenxid; other rels must not. */ + Assert((((relation->rd_rel->relkind == RELKIND_INDEX) || (relation->rd_rel->relkind == RELKIND_GLOBAL_INDEX) || + (RELKIND_IS_SEQUENCE(relation->rd_rel->relkind))) ? + (freezeXid == InvalidTransactionId) : + TransactionIdIsNormal(freezeXid)) || + relation->rd_rel->relkind == RELKIND_RELATION); + + /* Record the old relfilenode to recyclebin. */ + desc = *trBaseDesc; + if (!TR_IS_BASE_OBJ_EX(trBaseDesc, RelationGetRelid(relation))) { + TrDescInit(relation, &desc, RB_OPER_TRUNCATE, + TrGetObjType(RelationGetNamespace(relation), RelationGetRelkind(relation)), false); + TrDescWrite(&desc); + } + + /* Allocate a new relfilenode, create storage for the main fork. */ + newrnode = CreateNewRelfilenode(relation, freezeXid); + + /* + * NOTE: if the relation was created in this transaction, it will now be + * present in the pending-delete list twice, once with atCommit true and + * once with atCommit false. Hence, it will be physically deleted at end + * of xact in either case (and the other entry will be ignored by + * smgrDoPendingDeletes, so no error will occur). We could instead remove + * the existing list entry and delete the physical file immediately, but + * for now I'll keep the logic simple. + */ + RelationCloseSmgr(relation); + + /* + * Update pg_class entry for new relfilenode. + */ + UpdatePgclass(relation, freezeXid, &newrnode); + + /* + * Make the pg_class row change visible, as well as the relation map + * change if any. This will cause the relcache entry to get updated, too. + */ + CommandCounterIncrement(); + + /* + * Mark the rel as having been given a new relfilenode in the current + * (sub) transaction. This is a hint that can be used to optimize later + * operations on the rel in the same transaction. + */ + relation->rd_newRelfilenodeSubid = GetCurrentSubTransactionId(); + + /* ... and now we have eoxact cleanup work to do */ + u_sess->relcache_cxt.need_eoxact_work = true; +} + +void TrPartitionSetNewRelfilenode(Relation parent, Partition part, TransactionId freezeXid, void *baseDesc) +{ + RelFileNodeBackend newrnode; + TrObjDesc desc; + TrObjDesc *trBaseDesc = (TrObjDesc *)baseDesc; + + Assert((parent->rd_rel->relkind == RELKIND_INDEX || RELKIND_IS_SEQUENCE(parent->rd_rel->relkind)) + ? freezeXid == InvalidTransactionId + : TransactionIdIsNormal(freezeXid)); + + /* Record the old relfilenode to recyclebin. */ + desc = *trBaseDesc; + if (!TR_IS_BASE_OBJ_EX(trBaseDesc, part->pd_id)) { + TrPartDescInit(parent, part, &desc, RB_OPER_TRUNCATE, + TrGetObjType(RelationGetNamespace(parent), RelationGetRelkind(parent)), false); + TrDescWrite(&desc); + } + + /* Allocate a new relfilenode */ + newrnode = CreateNewRelfilenodePart(parent, part); + + UpdatePartition(parent, part, freezeXid, &newrnode); + + CommandCounterIncrement(); + + /* + * Mark the part as having been given a new relfilenode in the current + * (sub) transaction. This is a hint that can be used to optimize later + * operations on the rel in the same transaction. + */ + part->pd_newRelfilenodeSubid = GetCurrentSubTransactionId(); + + /* ... and now we have eoxact cleanup work to do */ + u_sess->cache_cxt.part_cache_need_eoxact_work = true; +} + +bool TrCheckRecyclebinTruncate(const TruncateStmt *stmt) +{ + RangeVar *rel = NULL; + Oid relid; + + if (/* + * Disable Recyclebin-based-Truncate when with purge option, or + */ + /* recyblebin disabled, or */ + !u_sess->attr.attr_storage.enable_recyclebin || + /* with purge option, or */ + stmt->purge || + /* with restart_seqs option, or */ + stmt->restart_seqs || + /* multi objects truncate. */ + list_length(stmt->relations) != 1) { + return false; + } + + rel = (RangeVar *)linitial(stmt->relations); + relid = RangeVarGetRelid(rel, NoLock, false); + + return NeedTrComm(relid); +} + +void TrTruncateOnePart(Relation rel, HeapTuple tup, Oid insertBaseid) +{ + Oid toastOid = ((Form_pg_partition)GETSTRUCT(tup))->reltoastrelid; + Relation toastRel = NULL; + Oid partOid = HeapTupleGetOid(tup); + Partition p = partitionOpen(rel, partOid, AccessExclusiveLock); + TrObjDesc baseDesc; + TrPartDescInit(rel, p, &baseDesc, RB_OPER_TRUNCATE, RB_OBJ_PARTITION, false); + baseDesc.id = baseDesc.baseid = insertBaseid; + (void)TrDescWrite(&baseDesc); + TrUpdateBaseid(&baseDesc); + + TrPartitionSetNewRelfilenode(rel, p, u_sess->utils_cxt.RecentXmin, &baseDesc); + + /* process the toast table */ + if (OidIsValid(toastOid)) { + Assert(rel->rd_rel->relpersistence != RELPERSISTENCE_UNLOGGED); + toastRel = heap_open(toastOid, AccessExclusiveLock); + TrRelationSetNewRelfilenode(toastRel, u_sess->utils_cxt.RecentXmin, &baseDesc); + heap_close(toastRel, AccessExclusiveLock); + } + partitionClose(rel, p, AccessExclusiveLock); + + /* report truncate partition to PgStatCollector */ + pgstat_report_truncate(partOid, rel->rd_id, rel->rd_rel->relisshared); +} + +void TrPartitionTableProcess(Relation rel, Oid insertBaseid) +{ + /* truncate partitioned table */ + List* partTupleList = NIL; + ListCell* partCell = NULL; + Oid heap_relid; + bool is_shared = rel->rd_rel->relisshared; + + heap_relid = RelationGetRelid(rel); + /* partitioned table unspport the unlogged table */ + Assert(rel->rd_rel->relpersistence != RELPERSISTENCE_UNLOGGED); + + /* process all partition */ + partTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_PARTITION, rel->rd_id); + foreach (partCell, partTupleList) { + if (RelationIsSubPartitioned(rel)) { + /* the "tup" just for get partOid, UHeapTup has no HEAP_HASOID flag, so here use HeapTuple */ + HeapTuple tup = (HeapTuple)lfirst(partCell); + Oid partOid = HeapTupleGetOid(tup); + Partition p = partitionOpen(rel, partOid, AccessExclusiveLock); + Relation partRel = partitionGetRelation(rel, p); + List* subPartTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_SUB_PARTITION, partOid); + ListCell* subPartCell = NULL; + foreach (subPartCell, subPartTupleList) { + HeapTuple tup = (HeapTuple)lfirst(subPartCell); + TrTruncateOnePart(partRel, tup, insertBaseid); + } + freePartList(subPartTupleList); + + releaseDummyRelation(&partRel); + partitionClose(rel, p, AccessExclusiveLock); + } else { + HeapTuple tup = (HeapTuple)lfirst(partCell); + TrTruncateOnePart(rel, tup, insertBaseid); + } + } + + freePartList(partTupleList); + /* report truncate partitioned table to PgStatCollector */ + pgstat_report_truncate(heap_relid, InvalidOid, is_shared); +} + +void TrTruncate(const TruncateStmt *stmt) +{ + RangeVar *rv = (RangeVar*)linitial(stmt->relations); + Relation rel; + Oid relid; + Oid toastRelid; + TrObjDesc baseDesc; + + /* + * 1. Open relation in AccessExclusiveLock, and check permission, etc. + */ + + rel = heap_openrv(rv, AccessExclusiveLock); + relid = RelationGetRelid(rel); + + /* find matview exists or not. */ + Oid mlogid = find_matview_mlog_table(relid); + if (OidIsValid(mlogid)) { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Not support truncate table under materialized view."))); + } + + TrForbidAccessRbObject(RelationRelationId, relid, rv->relname); + + truncate_check_rel(rel); + + /* + * This effectively deletes all rows in the table, and may be done + * in a serializable transaction. In that case we must record a + * rw-conflict in to this transaction from each transaction + * holding a predicate lock on the table. + */ + CheckTableForSerializableConflictIn(rel); + + /* + * 2. Create a new empty storage file for the relation, and assign it + * as the relfilenode value, and record the old relfilenode to recyclebin. + */ + + TrDescInit(rel, &baseDesc, RB_OPER_TRUNCATE, RB_OBJ_TABLE, true, true); + baseDesc.id = baseDesc.baseid = TrDescWrite(&baseDesc); + TrUpdateBaseid(&baseDesc); + + /* + * step 2.1. If rel is partition table, find all partitions, and Create some new empty + * storage file for the all partitions of the relation, and assign them as the + * relfilenodes value, and record the old relfilenodes to recyclebin. + */ + if (RELATION_IS_PARTITIONED(rel)) { + TrPartitionTableProcess(rel, baseDesc.baseid); + } + + TrRelationSetNewRelfilenode(rel, u_sess->utils_cxt.RecentXmin, &baseDesc); + + /* + * 3. The same for the toast table, if any. + */ + + toastRelid = rel->rd_rel->reltoastrelid; + if (OidIsValid(toastRelid) && !RELATION_IS_PARTITIONED(rel)) { + Relation relToast = relation_open(toastRelid, AccessExclusiveLock); + TrRelationSetNewRelfilenode(relToast, u_sess->utils_cxt.RecentXmin, &baseDesc); + heap_close(relToast, NoLock); + } + + /* + * 4. Reconstruct the indexes to match, and we're done. + */ + + (void)ReindexRelation(relid, REINDEX_REL_PROCESS_TOAST, REINDEX_ALL_INDEX, &baseDesc); + + /* + * 5. Report stat, and clean. + */ + + /* report truncate to PgStatCollector */ + pgstat_report_truncate(relid, InvalidOid, false); + + /* Record time of truancate relation. */ + recordRelationMTime(relid, rel->rd_rel->relkind); + + heap_close(rel, NoLock); +} + +/* + * RelationDropStorage + * Schedule unlinking of physical storage at transaction commit. + */ +void TrDoPurgeObjectTruncate(TrObjDesc *desc) +{ + Relation rbRel; + SysScanDesc sd; + ScanKeyData skey[1]; + HeapTuple tup; + + Assert (((desc->type == RB_OBJ_TABLE) && desc->canpurge) || ((desc->type == RB_OBJ_PARTITION) && !desc->canpurge)); + + rbRel = heap_open(RecyclebinRelationId, RowExclusiveLock); + + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcybaseid, BTEqualStrategyNumber, + F_INT8EQ, Int64GetDatum(desc->baseid)); + + sd = systable_beginscan(rbRel, RecyclebinBaseidIndexId, true, NULL, 1, skey); + while (HeapTupleIsValid(tup = systable_getnext(sd))) { + Form_pg_recyclebin rbForm = (Form_pg_recyclebin)GETSTRUCT(tup); + RelFileNode rnode; + + rnode.spcNode = ConvertToRelfilenodeTblspcOid(rbForm->rcytablespace); + rnode.dbNode = (rnode.spcNode == GLOBALTABLESPACE_OID) ? InvalidOid : + u_sess->proc_cxt.MyDatabaseId; + rnode.relNode = rbForm->rcyrelfilenode; + rnode.bucketNode = InvalidBktId; + + /* + * Schedule unlinking of the old storage at transaction commit. + */ + InsertStorageIntoPendingList( + &rnode, InvalidAttrNumber, InvalidBackendId, rbForm->rcyowner, true, false); + + simple_heap_delete(rbRel, &tup->t_self); + + ereport(LOG, (errmsg("Delete truncated object %u/%u/%u", rnode.spcNode, + rnode.dbNode, rnode.relNode))); + } + + systable_endscan(sd); + heap_close(rbRel, RowExclusiveLock); + + /* ... and now we have eoxact cleanup work to do */ + u_sess->relcache_cxt.need_eoxact_work = true; + + /* + * CommandCounterIncrement here to ensure that preceding changes are all + * visible to the next deletion step. + */ + CommandCounterIncrement(); +} + +/* flashback table to before truncate */ +void TrRestoreTruncate(const TimeCapsuleStmt *stmt) +{ + TrObjDesc baseDesc; + Relation rbRel; + SysScanDesc sd; + ScanKeyData skey[1]; + HeapTuple tup; + Relation rel; + Oid relid; + bool found = false; + TrObjDesc desc; + + /* process restore truncate fail: TIMECAPSULE TABLE "BIN$3C534EBE021$4930808==$0" TO BEFORE TRUNCATE; */ + found = TrFetchName(stmt->relation->relname, RB_OBJ_TABLE, &desc, RB_OPER_RESTORE_TRUNCATE); + if (found) { + stmt->relation->relname = desc.originname; + } + + rel = heap_openrv(stmt->relation, AccessExclusiveLock); + relid = RelationGetRelid(rel); + if (rel->rd_tam_type == TAM_HEAP) { + heap_close(rel, NoLock); + elog(ERROR, "timecapsule does not support astore yet"); + return; + } + + if (found) { + stmt->relation->relname = desc.name; + } + + /* 1. Fetch the latest available recycle object. */ + TrOperFetch(stmt->relation, RB_OBJ_TABLE, &baseDesc, RB_OPER_RESTORE_TRUNCATE); + + /* 2. Lock recycle object and base relation. */ + baseDesc.authid = GetUserId(); + TrOperPrep(&baseDesc, RB_OPER_RESTORE_TRUNCATE); + + /* 3. Check base relation whether normal and matched. */ + TrBaseRelMatched(&baseDesc); + + /* 4. Do restore. */ + rbRel = heap_open(RecyclebinRelationId, RowExclusiveLock); + + ScanKeyInit(&skey[0], Anum_pg_recyclebin_rcybaseid, BTEqualStrategyNumber, + F_INT8EQ, Int64GetDatum(baseDesc.id)); + + sd = systable_beginscan(rbRel, RecyclebinBaseidIndexId, true, NULL, 1, skey); + while (HeapTupleIsValid(tup = systable_getnext(sd))) { + if (!RELATION_IS_PARTITIONED(rel)) { + TrSwapRelfilenode(rbRel, tup, false); + } else { + TrSwapRelfilenode(rbRel, tup, true); + } + } + + systable_endscan(sd); + heap_close(rbRel, RowExclusiveLock); + heap_close(rel, NoLock); + + /* + * CommandCounterIncrement here to ensure that preceding changes are all + * visible to the next deletion step. + */ + CommandCounterIncrement(); + + return; +} diff --git a/src/gausskernel/storage/tcap/tcap_version.cpp b/src/gausskernel/storage/tcap/tcap_version.cpp index 0225363df..aad39ed3f 100644 --- a/src/gausskernel/storage/tcap/tcap_version.cpp +++ b/src/gausskernel/storage/tcap/tcap_version.cpp @@ -27,8 +27,10 @@ #include "access/tableam.h" #include "catalog/indexing.h" +#include "catalog/pg_partition_fn.h" #include "catalog/pg_snapshot.h" #include "commands/tablecmds.h" +#include "commands/matview.h" #include "executor/node/nodeModifyTable.h" #include "fmgr.h" #include "nodes/plannodes.h" @@ -133,8 +135,8 @@ static bool TvFeatureSupport(Oid relid, char **errstr, bool isTimecapsuleTable) *errstr = "timecapsule feature does not support system table"; } else if (classForm->relpersistence != RELPERSISTENCE_PERMANENT) { *errstr = "timecapsule feature does not support non-permanent table"; - } else if (classForm->parttype != PARTTYPE_NON_PARTITIONED_RELATION) { - *errstr = "timecapsule feature does not support partitioned table"; + } else if (rel->rd_tam_type == TAM_HEAP) { + *errstr = "timecapsule feature does not support heap table"; } else if ((RELATION_HAS_BUCKET(rel) || RELATION_OWN_BUCKET(rel))) { *errstr = "timecapsule feature does not support hash-bucket table"; } else if (!RelationIsRowFormat(rel)) { @@ -249,8 +251,45 @@ static Const *TvEvalVerExpr(TvVersionType tvtype, Node *tvver) } /* + * Function function: The undo recycle thread obtains the current time and obtains SnpXmin + * from the flashback snapshot to calculate the transaction XID reserved in the old version. * We use the round-down way to obtain snapshots. that is, - * select * from pg_snapshot where snptime <= :tz order by snptime desc limit 1; + * select SnpXmin from gs_txn_snapshot where snptime <= :tz order by snptime desc limit 1; + */ +TransactionId TvFetchSnpxminRecycle(TimestampTz tz) +{ + Relation rel; + SysScanDesc sd; + ScanKeyData skey[3]; + HeapTuple tup; + Datum value; + bool isnull = false; + TransactionId snapxmin = FirstNormalTransactionId; + + rel = heap_open(SnapshotRelationId, AccessShareLock); + + ScanKeyInit(&skey[0], Anum_pg_snapshot_snptime, BTLessEqualStrategyNumber, + F_TIMESTAMP_LE, TimestampTzGetDatum(tz)); + + sd = systable_beginscan(rel, SnapshotTimeCsnIndexId, true, NULL, 1, skey); + tup = systable_getnext(sd); + /* Limit 1 */ + if (tup == NULL) { + elog(WARNING, "cannot find the restore point, return FirstNormalTransactionId, prevent undo recycle move"); + } else { + value = heap_getattr(tup, Anum_pg_snapshot_snpxmin, RelationGetDescr(rel), &isnull); + snapxmin = Int64GetDatum(value); + } + + systable_endscan(sd); + heap_close(rel, AccessShareLock); + + return snapxmin; +} + +/* + * We use the round-down way to obtain snapshots. that is, + * select * from gs_txn_snapshot where snptime <= :tz order by snptime desc limit 1; */ static void TvFetchSnapTz(TimestampTz tz, Snapshot snap) { @@ -296,7 +335,7 @@ static void TvFetchSnapTz(TimestampTz tz, Snapshot snap) /* * We use the round-down way to obtain snapshots. that is, - * select * from pg_snapshot where snpcsn <= :csn order by snpcsn desc limit 1; + * select * from gs_txn_snapshot where snpcsn <= :csn order by snpcsn desc limit 1; */ static void TvFetchSnapCsn(int64 csn, Snapshot snap) { @@ -418,8 +457,8 @@ Snapshot TvChooseScanSnap(Relation relation, Scan *scan, ScanState *ss) RangeTblEntry *rte = rt_fetch(scan->scanrelid, estate->es_range_table); TimeCapsuleClause *tcc = rte->timecapsule; - if (likely(tcc == NULL)) { - return snap; + if (likely(tcc == NULL)) { + return snap; } else { bool isnull = false; ExprContext *econtext; @@ -461,20 +500,17 @@ void TvDeleteDelta(Oid relid, Snapshot snap) return; } -void TvUheapDeleteDelta(Oid relid, Snapshot snap) +void TvUheapDeleteDeltaRel(Relation rel, Relation partRel, Partition p, Snapshot snap) { - Relation rel; TableScanDesc sd; UHeapTuple tup; TupleTableSlot *oldslot = NULL; + TransactionId tmfdXmin = InvalidTransactionId; Snapshot snapshotNow = (Snapshot)palloc0(sizeof(SnapshotData)); (void)GetSnapshotData(snapshotNow, false); snap->user_data = (void *)snapshotNow; - /* Notice: invoker already acquired lock */ - rel = heap_open(relid, NoLock); - EState *estate = CreateExecutorState(); /* * We need a ResultRelInfo so we can use the regular executor's @@ -489,27 +525,90 @@ void TvUheapDeleteDelta(Oid relid, Snapshot snap) estate->es_num_result_relations = 1; estate->es_result_relation_info = resultRelInfo; - sd = tableam_scan_begin(rel, snap, 0, NULL); + Relation relRel = (partRel != NULL) ? partRel : rel; + sd = tableam_scan_begin(relRel, snap, 0, NULL); while ((tup = (UHeapTuple)tableam_scan_getnexttuple(sd, ForwardScanDirection)) != NULL) { - SimpleUHeapDelete(rel, &tup->ctid, snapshotNow, &oldslot); - ExecDeleteIndexTuples(oldslot, &tup->ctid, estate, rel, NULL, NULL, false); + SimpleUHeapDelete(relRel, &tup->ctid, snapshotNow, &oldslot, &tmfdXmin); + ExecDeleteIndexTuples(oldslot, &tup->ctid, estate, relRel, p, NULL, false); + if (relRel != NULL && relRel->rd_mlogoid != InvalidOid) { + insert_into_mlog_table(relRel, relRel->rd_mlogoid, NULL, &tup->ctid, tmfdXmin, 'D'); + } if (oldslot) { ExecDropSingleTupleTableSlot(oldslot); oldslot = NULL; } } tableam_scan_end(sd); - heap_close(rel, NoLock); FreeSnapshotDeepForce(snapshotNow); snap->user_data = NULL; ExecCloseIndices(resultRelInfo); + + /* free the fakeRelationCache */ + if (estate->esfRelations != NULL) { + FakeRelationCacheDestroy(estate->esfRelations); + } + pfree(resultRelInfo); return; } +void TvUheapDeleteDeltaPart(Relation rel, Oid relid, Snapshot snap) +{ + List* partTupleList = NIL; + ListCell* partCell = NULL; + + /* Open partition table, find all partition names based on the parentId. + * partitioned table unspport the unlogged table. + */ + Assert(rel->rd_rel->relpersistence != RELPERSISTENCE_UNLOGGED); + + /* process all partition */ + partTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_PARTITION, relid); + foreach (partCell, partTupleList) { + /* the "tup" just for get partOid, UHeapTup has no HEAP_HASOID flag, so here use HeapTuple */ + HeapTuple tup = (HeapTuple)lfirst(partCell); + Oid partOid = HeapTupleGetOid(tup); + Partition p = partitionOpen(rel, partOid, AccessExclusiveLock); + Relation partRel = partitionGetRelation(rel, p); + + if (RelationIsSubPartitioned(rel)) { + List* subPartTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_SUB_PARTITION, partOid); + ListCell* subPartCell = NULL; + foreach (subPartCell, subPartTupleList) { + HeapTuple subTup = (HeapTuple)lfirst(subPartCell); + Oid subPartOid = HeapTupleGetOid(subTup); + Partition subPar = partitionOpen(partRel, subPartOid, AccessExclusiveLock); + Relation subPartRel = partitionGetRelation(partRel, subPar); + TvUheapDeleteDeltaRel(rel, subPartRel, subPar, snap); + releaseDummyRelation(&subPartRel); + partitionClose(partRel, subPar, NoLock); + } + freePartList(subPartTupleList); + } else { + TvUheapDeleteDeltaRel(rel, partRel, p, snap); + } + releaseDummyRelation(&partRel); + partitionClose(rel, p, NoLock); + } + freePartList(partTupleList); + return; +} + +void TvUheapDeleteDelta(Oid relid, Snapshot snap) +{ + Relation rel = heap_open(relid, NoLock); + if (RELATION_IS_PARTITIONED(rel)) { + TvUheapDeleteDeltaPart(rel, relid, snap); + } else { + TvUheapDeleteDeltaRel(rel, NULL, NULL, snap); + } + + heap_close(rel, NoLock); +} + typedef HeapTuple (*TvFetchTupleHook)(void *arg); static HeapTuple TvFetchTuple(void *arg) { @@ -674,7 +773,8 @@ static void TvInsertLostImpl(Relation rel, Snapshot snap, TvFetchTupleHook fetch return; } -static void TvUheapInsertLostImpl(Relation rel, Snapshot snap, TvUheapFetchTupleHook fetchTupleHook, void *arg) +static void TvUheapInsertLostImpl(Relation rel, Relation partRel, Partition p, + Snapshot snap, TvUheapFetchTupleHook fetchTupleHook, void *arg) { UHeapTuple tuple; ResultRelInfo *resultRelInfo; @@ -694,9 +794,10 @@ static void TvUheapInsertLostImpl(Relation rel, Snapshot snap, TvUheapFetchTuple estate->es_num_result_relations = 1; estate->es_result_relation_info = resultRelInfo; + Relation relRel = (partRel != NULL) ? partRel : rel; /* Set up a tuple slot too */ myslot = ExecInitExtraTupleSlot(estate, TAM_USTORE); - ExecSetSlotDescriptor(myslot, RelationGetDescr(rel)); + ExecSetSlotDescriptor(myslot, RelationGetDescr(relRel)); /* Switch into its memory context */ MemoryContext oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); @@ -712,13 +813,20 @@ static void TvUheapInsertLostImpl(Relation rel, Snapshot snap, TvUheapFetchTuple ExecStoreTuple(tuple, slot, InvalidBuffer, false); /* Check the constraints of the tuple */ - if (rel->rd_att->constr) { + if (relRel->rd_att->constr) { ExecConstraints(resultRelInfo, slot, estate); } - UHeapInsert(rel, tuple, mycid, NULL); + UHeapInsert(relRel, tuple, mycid, NULL); List *recheckIndexes = NULL; - recheckIndexes = ExecInsertIndexTuples(myslot, &tuple->ctid, estate, NULL, NULL, InvalidBktId, NULL, NULL); + recheckIndexes = ExecInsertIndexTuples(myslot, &tuple->ctid, estate, partRel, p, InvalidBktId, NULL, NULL); + if (relRel != NULL && relRel->rd_mlogoid != InvalidOid) { + HeapTuple htup = NULL; + Assert(relRel->rd_tam_type == TAM_USTORE); + htup = (HeapTuple)UHeapToHeap(relRel->rd_att, (UHeapTuple)tuple); + insert_into_mlog_table(relRel, relRel->rd_mlogoid, (HeapTuple)tuple, + &(((HeapTuple)tuple)->t_self), GetCurrentTransactionId(), 'I'); + } list_free(recheckIndexes); } MemoryContextSwitchTo(oldcontext); @@ -727,6 +835,11 @@ static void TvUheapInsertLostImpl(Relation rel, Snapshot snap, TvUheapFetchTuple ExecCloseIndices(resultRelInfo); + /* free the fakeRelationCache */ + if (estate->esfRelations != NULL) { + FakeRelationCacheDestroy(estate->esfRelations); + } + FreeExecutorState(estate); pfree(resultRelInfo); @@ -752,17 +865,71 @@ void TvInsertLost(Oid relid, Snapshot snap) return; } +void TvUheapInsertLostRel(Relation rel, Relation partRel, Partition p, Snapshot snap) +{ + TableScanDesc sd; + if (partRel == NULL) { + sd = tableam_scan_begin(rel, snap, 0, NULL); + } else { + sd = tableam_scan_begin(partRel, snap, 0, NULL); + } + /* 1. Insert one by one. */ + TvUheapInsertLostImpl(rel, partRel, p, snap, TvUheapFetchTuple, (void *)sd); + tableam_scan_end(sd); + return; +} + +void TvUheapInsertLostPart(Relation rel, Oid relid, Snapshot snap) +{ + List* partTupleList = NIL; + ListCell* partCell = NULL; + + /* Open partition table, find all partition names based on the parentId. + * partitioned table unspport the unlogged table + */ + Assert(rel->rd_rel->relpersistence != RELPERSISTENCE_UNLOGGED); + + /* process all partition */ + partTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_PARTITION, relid); + foreach (partCell, partTupleList) { + /* the "tup" just for get partOid, UHeapTup has no HEAP_HASOID flag, so here use HeapTuple */ + HeapTuple tup = (HeapTuple)lfirst(partCell); + Oid partOid = HeapTupleGetOid(tup); + Partition p = partitionOpen(rel, partOid, AccessExclusiveLock); + Relation partRel = partitionGetRelation(rel, p); + + if (RelationIsSubPartitioned(rel)) { + List* subPartTupleList = searchPgPartitionByParentId(PART_OBJ_TYPE_TABLE_SUB_PARTITION, partOid); + ListCell* subPartCell = NULL; + foreach (subPartCell, subPartTupleList) { + HeapTuple subTup = (HeapTuple)lfirst(subPartCell); + Oid subPartOid = HeapTupleGetOid(subTup); + Partition subPar = partitionOpen(partRel, subPartOid, AccessExclusiveLock); + Relation subPartRel = partitionGetRelation(partRel, subPar); + TvUheapInsertLostRel(rel, subPartRel, subPar, snap); + releaseDummyRelation(&subPartRel); + partitionClose(partRel, subPar, AccessExclusiveLock); + } + freePartList(subPartTupleList); + } else { + TvUheapInsertLostRel(rel, partRel, p, snap); + } + releaseDummyRelation(&partRel); + partitionClose(rel, p, AccessExclusiveLock); + } + freePartList(partTupleList); + return; + +} + void TvUheapInsertLost(Oid relid, Snapshot snap) { - Relation rel; - TableScanDesc sd; - /* 1. Prepare to fetch lost tuples. */ - rel = heap_open(relid, NoLock); - sd = tableam_scan_begin(rel, snap, 0, NULL); - /* 2. Insert one by one. */ - TvUheapInsertLostImpl(rel, snap, TvUheapFetchTuple, (void *)sd); - /* 3. Done, clean. */ - tableam_scan_end(sd); + Relation rel = heap_open(relid, NoLock); + if (RELATION_IS_PARTITIONED(rel)) { + TvUheapInsertLostPart(rel, relid, snap); + } else { + TvUheapInsertLostRel(rel, NULL, NULL, snap); + } heap_close(rel, NoLock); return; } diff --git a/src/gausskernel/storage/test/CMakeLists.txt b/src/gausskernel/storage/test/CMakeLists.txt new file mode 100644 index 000000000..574255bd7 --- /dev/null +++ b/src/gausskernel/storage/test/CMakeLists.txt @@ -0,0 +1,11 @@ +#This is the main CMAKE for build all components. +AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} TGT_storage_test_SRC) + +set(TGT_storage_test_INC + ${PROJECT_SRC_DIR}/include +) + +set(storage_test_DEF_OPTIONS ${MACRO_OPTIONS}) +set(storage_test_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) +set(storage_test_LINK_OPTIONS ${BIN_LINK_OPTIONS}) +add_static_objtarget(gausskernel_storage_test TGT_storage_test_SRC TGT_storage_test_INC "${storage_test_DEF_OPTIONS}" "${storage_test_COMPILE_OPTIONS}" "${storage_test_LINK_OPTIONS}") diff --git a/src/gausskernel/storage/test/utesteventutil.cpp b/src/gausskernel/storage/test/utesteventutil.cpp new file mode 100644 index 000000000..ec64fd682 --- /dev/null +++ b/src/gausskernel/storage/test/utesteventutil.cpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * utesteventutil.cpp + * Routines to support unit test. Used to retrieve information during running to help test. + * + * IDENTIFICATION + * src/gausskernel/storage/test/utesteventutil.cpp + * + * --------------------------------------------------------------------------------------- + */ + +#include "utils/utesteventutil.h" + +#ifdef ENABLE_UT +/* + * The following functions only print information. In UT, they can be mocked to + * retrieve whatever you want. + */ +void TestXLogRecParseStateEventProbe(EnumTestEventType eventType, + const char* sourceName, const XLogRecParseState* parseState) +{ + ereport(INFO, (errmsg("test XLogRecParseState event probe. event type:%u, source:%s", + eventType, sourceName))); +} + +void TestXLogReaderProbe(EnumTestEventType eventType, + const char* sourceName, const XLogReaderState* readerState) +{ + ereport(INFO, (errmsg("test XLogReader event probe. event type:%u, source:%s", + eventType, sourceName))); +} + +#endif + diff --git a/src/gausskernel/storage/xlog_share_storage/xlog_share_storage.cpp b/src/gausskernel/storage/xlog_share_storage/xlog_share_storage.cpp index 86d175bfb..0eb3b6429 100644 --- a/src/gausskernel/storage/xlog_share_storage/xlog_share_storage.cpp +++ b/src/gausskernel/storage/xlog_share_storage/xlog_share_storage.cpp @@ -138,7 +138,7 @@ void LocalXLogRead(char *buf, XLogRecPtr startptr, Size count) } XLByteToSeg(recptr, t_thrd.sharestoragexlogcopyer_cxt.readSegNo); - XLogFilePath(path, t_thrd.xlog_cxt.ThisTimeLineID, t_thrd.sharestoragexlogcopyer_cxt.readSegNo); + XLogFilePath(path, MAXPGPATH, t_thrd.xlog_cxt.ThisTimeLineID, t_thrd.sharestoragexlogcopyer_cxt.readSegNo); t_thrd.sharestoragexlogcopyer_cxt.readFile = BasicOpenFile(path, O_RDONLY | PG_BINARY, 0); if (t_thrd.sharestoragexlogcopyer_cxt.readFile < 0) { @@ -251,7 +251,7 @@ static inline int CalcWriteLen(XLogRecPtr startWrite, XLogRecPtr endPtr) Assert((startWrite % XLOG_BLCKSZ) == 0); XLogRecPtr alignWriteEnd = startWrite - startWrite % ShareStorageBufSize + ShareStorageBufSize; if (alignWriteEnd > endPtr) { - XLogRecPtr ActualCopyEnd = endPtr - endPtr % XLOG_BLCKSZ + XLOG_BLCKSZ; + XLogRecPtr ActualCopyEnd = TYPEALIGN(XLOG_BLCKSZ, endPtr); return static_cast(ActualCopyEnd - startWrite); } else { return static_cast(alignWriteEnd - startWrite); @@ -477,7 +477,7 @@ void CheckShareStorageCtlInfo(XLogRecPtr localEnd) char path[MAXPGPATH]; XLogSegNo sendSegNo; XLByteToSeg(ctlInfo->insertHead, sendSegNo); - XLogFilePath(path, t_thrd.xlog_cxt.ThisTimeLineID, sendSegNo); + XLogFilePath(path, MAXPGPATH, t_thrd.xlog_cxt.ThisTimeLineID, sendSegNo); struct stat stat_buf; if (stat(path, &stat_buf) != 0) { @@ -523,12 +523,14 @@ void UpdateShareStorageCtlInfo() changed = true; } - if (ctlInfo->xlogFileSize != g_instance.xlog_cxt.shareStorageopCtl.xlogFileSize) { + if (ctlInfo->xlogFileSize != (uint64)g_instance.attr.attr_storage.xlog_file_size) { + Assert(g_instance.xlog_cxt.shareStorageopCtl.xlogFileSize == ctlInfo->xlogFileSize); if (!FileSizeCanUpdate()) { ereport(FATAL, (errmsg("could not update share storage size."), errdetail("current size:%lu, new size:%lu", ctlInfo->xlogFileSize, - g_instance.xlog_cxt.shareStorageopCtl.xlogFileSize))); + g_instance.attr.attr_storage.xlog_file_size))); } + g_instance.xlog_cxt.shareStorageopCtl.xlogFileSize = g_instance.attr.attr_storage.xlog_file_size; ctlInfo->xlogFileSize = g_instance.xlog_cxt.shareStorageopCtl.xlogFileSize; changed = true; } diff --git a/src/get_PlatForm_str.sh b/src/get_PlatForm_str.sh index df614e947..fe29f94ed 100755 --- a/src/get_PlatForm_str.sh +++ b/src/get_PlatForm_str.sh @@ -19,18 +19,18 @@ set -e kernel="" if [ -f "/etc/euleros-release" ] then - kernel=$(cat /etc/euleros-release | awk -F ' ' '{print $1}' | tr A-Z a-z) + kernel=$(cat /etc/euleros-release | awk -F ' ' '{print $1}' | tr A-Z a-z) elif [ -f "/etc/openEuler-release" ] then - kernel=$(cat /etc/openEuler-release | awk -F ' ' '{print $1}' | tr A-Z a-z) + kernel=$(cat /etc/openEuler-release | awk -F ' ' '{print $1}' | tr A-Z a-z) elif [ -f "/etc/centos-release" ] then - kernel=$(cat /etc/centos-release | awk -F ' ' '{print $1}' | tr A-Z a-z) + kernel=$(cat /etc/centos-release | awk -F ' ' '{print $1}' | tr A-Z a-z) elif [ -f "/etc/kylin-release" ] then - kernel=$(cat /etc/kylin-release | awk -F ' ' '{print $1}' | tr A-Z a-z) + kernel=$(cat /etc/kylin-release | awk -F ' ' '{print $1}' | tr A-Z a-z) else - kernel=$(lsb_release -d | awk -F ' ' '{print $2}'| tr A-Z a-z) + kernel=$(lsb_release -d | awk -F ' ' '{print $2}'| tr A-Z a-z) fi ## to solve kernel="name=openeuler" @@ -51,7 +51,7 @@ plat_form_str="" ################################################################################## if [ "$kernel"x = "red"x ] then - plat_form_str=redhat6.4_"$cpu_bit" + plat_form_str=redhat6.4_"$cpu_bit" fi ################################################################################## @@ -60,7 +60,7 @@ fi ################################################################################## if [ "$kernel"x = "fedora"x ] then - plat_form_str=redhat6.4_"$cpu_bit" + plat_form_str=redhat6.4_"$cpu_bit" fi ################################################################################## @@ -69,13 +69,8 @@ fi ################################################################################## if [ "$kernel"x = "suse"x ] then - version=$(lsb_release -r | awk -F ' ' '{print $2}') - if [ "$version"x = "12"x ] - then - plat_form_str=suse12_"$cpu_bit" - else - plat_form_str=suse11_sp1_"$cpu_bit" - fi + version=$(lsb_release -r | awk -F ' ' '{print $2}') + plat_form_str=suse${version%%\.*}_sp${version##*\.}_"$cpu_bit" fi ################################################################################## @@ -84,8 +79,11 @@ fi ################################################################################## if [ "$kernel"x = "euleros"x ] then - version=$(cat /etc/euleros-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) - plat_form_str=euleros2.0_"$version"_"$cpu_bit" + major_version=$(cat /etc/euleros-release | awk '{print $3}') + minor_version=$(cat /etc/euleros-release | awk -F '(' '{print $2}'| awk -F ')' '{print $1}' | tr A-Z a-z) + minor_version=${minor_version%x86_64} + minor_version=${minor_version%aarch64} + plat_form_str=euleros${major_version}_"$minor_version"_"$cpu_bit" fi ################################################################################## @@ -94,11 +92,11 @@ fi ################################################################################## if [ "$kernel"x = "deepin"x ] then - if [ X"$cpu_bit" = X"unknown" ] - then - cpu_bit=$(uname -m) - fi - plat_form_str=deepin15.2_"$cpu_bit" + if [ X"$cpu_bit" = X"unknown" ] + then + cpu_bit=$(uname -m) + fi + plat_form_str=deepin15.2_"$cpu_bit" fi ################################################################################## # centos7.6_x86_64 platform @@ -107,12 +105,12 @@ fi ################################################################################## if [ "$kernel"x = "centos"x ] then - if [ X"$cpu_bit" = X"aarch64" ] - then - plat_form_str=centos_7.5_aarch64 + if [ X"$cpu_bit" = X"aarch64" ] + then + plat_form_str=centos_7.5_aarch64 else plat_form_str=centos7.6_"$cpu_bit" - fi + fi fi @@ -138,6 +136,15 @@ if [ "$kernel"x = "kylin"x ];then fi fi +################################################################################## +# ubuntu platform +# the result form like this: ubuntu_x86_64 +################################################################################## +if [ "$kernel"x = "ubuntu"x ] +then + plat_form_str=ubuntu18.04_"$cpu_bit" +fi + ################################################################################## # # other platform @@ -145,8 +152,8 @@ fi ################################################################################## if [ -z "$plat_form_str" ] then - echo "Failed" + echo "Failed" else - echo $plat_form_str + echo $plat_form_str fi diff --git a/src/include/Makefile b/src/include/Makefile index 0ff1b9a68..2e8c9d72d 100644 --- a/src/include/Makefile +++ b/src/include/Makefile @@ -27,7 +27,8 @@ SUBDIRS = bootstrap catalog commands client_logic datatype executor foreign gs_p storage vecexecutor access streaming access/obs \ hotpatch \ gstrace \ - knl knl/knl_guc threadpool workload + knl knl/knl_guc threadpool workload lite + ifneq ($(enable_multiple_nodes), yes) SUBDIRS += storage/smgr storage/lock storage/buf storage/item storage/cstore storage/mot fmgr endif @@ -54,8 +55,11 @@ install: all installdirs $(INSTALL_DATA) utils/errcodes.h '$(DESTDIR)$(includedir_server)/utils' $(INSTALL_DATA) utils/fmgroids.h '$(DESTDIR)$(includedir_server)/utils' # These headers are needed by madlib - $(INSTALL_DATA) $(with_3rd)/$(BINARYPATH)/cjson/comm/include/cjson/cJSON.h '$(DESTDIR)$(includedir_server)'/cjson/cJSON.h + $(MKDIR_P) '$(DESTDIR)$(includedir_server)'/cjson/ + $(INSTALL_DATA) $(CJSON_INCLUDE_PATH)/cjson/cJSON.h '$(DESTDIR)$(includedir_server)'/cjson/cJSON.h +ifeq ($(enable_lite_mode), no) $(INSTALL_DATA) $(with_3rd)/$(BINARYPATH)/libobs/comm/include/eSDKOBS.h '$(DESTDIR)$(includedir_server)'/access/obs/eSDKOBS.h +endif $(INSTALL_DATA) $(srcdir)/access/hash.inl '$(DESTDIR)$(includedir_server)'/access/hash.inl # These headers are needed by fdw $(INSTALL_DATA) gssignal/gs_signal.h '$(DESTDIR)$(includedir_server)/gssignal/gs_signal.h' @@ -102,6 +106,7 @@ install: all installdirs $(INSTALL_DATA) distributelayer/streamTransportCore.h '$(DESTDIR)$(includedir_server)/distributelayer/streamTransportCore.h' $(INSTALL_DATA) fmgr/fmgr_comp.h '$(DESTDIR)$(includedir_server)/fmgr/fmgr_comp.h' $(INSTALL_DATA) fmgr/fmgr_core.h '$(DESTDIR)$(includedir_server)/fmgr/fmgr_core.h' + $(INSTALL_DATA) lite/memory_lite.h '$(DESTDIR)$(includedir_server)/lite/memory_lite.h' # These headers are needed by postgis $(INSTALL_DATA) access/ustore/undo/knl_uundotype.h '$(DESTDIR)$(includedir_server)/access/ustore/undo/knl_uundotype.h' $(INSTALL_DATA) access/ustore/knl_uheap.h '$(DESTDIR)$(includedir_server)/access/ustore/knl_uheap.h' diff --git a/src/include/access/archive/archive_am.h b/src/include/access/archive/archive_am.h index 4238d2981..8f349d77f 100644 --- a/src/include/access/archive/archive_am.h +++ b/src/include/access/archive/archive_am.h @@ -40,5 +40,6 @@ int ArchiveWrite(const char* fileName, const char *buffer, const int bufferLengt int ArchiveDelete(const char* fileName, ArchiveConfig *archive_config = NULL); List* ArchiveList(const char* prefix, ArchiveConfig *archive_config = NULL, bool reportError = true, bool shortenConnTime = false); +bool ArchiveFileExist(const char* file_path, ArchiveConfig *archive_config); #endif /* ARCHIVE_AM_H */ diff --git a/src/include/access/archive/nas_am.h b/src/include/access/archive/nas_am.h index 5749a2e90..88887239e 100644 --- a/src/include/access/archive/nas_am.h +++ b/src/include/access/archive/nas_am.h @@ -34,5 +34,6 @@ size_t NasRead(const char* fileName, int offset, char *buffer, int length, Archi int NasWrite(const char* fileName, const char *buffer, const int bufferLength, ArchiveConfig *nas_config = NULL); int NasDelete(const char* fileName, ArchiveConfig *nas_config = NULL); List* NasList(const char* prefix, ArchiveConfig *nas_config = NULL); +bool checkNASFileExist(const char* file_path, ArchiveConfig *nas_config); #endif /* NAS_AM_H */ diff --git a/src/include/access/clog.h b/src/include/access/clog.h index f2aba4acf..ec8a27b42 100644 --- a/src/include/access/clog.h +++ b/src/include/access/clog.h @@ -85,6 +85,7 @@ extern bool IsCLogTruncate(XLogReaderState* record); extern void clog_redo(XLogReaderState* record); extern void clog_desc(StringInfo buf, XLogReaderState* record); +extern const char *clog_type_name(uint8 subtype); #ifdef USE_ASSERT_CHECKING diff --git a/src/include/access/csnlog.h b/src/include/access/csnlog.h index 6ca031d8c..7d93c21d2 100644 --- a/src/include/access/csnlog.h +++ b/src/include/access/csnlog.h @@ -24,12 +24,13 @@ extern void CSNLogSetCommitSeqNo(TransactionId xid, int nsubxids, TransactionId* subxids, CommitSeqNo csn); extern CommitSeqNo CSNLogGetCommitSeqNo(TransactionId xid); extern CommitSeqNo CSNLogGetNestCommitSeqNo(TransactionId xid); +extern CommitSeqNo CSNLogGetDRCommitSeqNo(TransactionId xid); extern Size CSNLOGShmemBuffers(void); extern Size CSNLOGShmemSize(void); extern void CSNLOGShmemInit(void); extern void BootStrapCSNLOG(void); -extern void StartupCSNLOG(bool isUpgrade); +extern void StartupCSNLOG(); extern void ShutdownCSNLOG(void); extern void CheckPointCSNLOG(void); extern void ExtendCSNLOG(TransactionId newestXact); diff --git a/src/include/access/dfs/dfs_common.h b/src/include/access/dfs/dfs_common.h index fe4f02af8..89f8bdc18 100644 --- a/src/include/access/dfs/dfs_common.h +++ b/src/include/access/dfs/dfs_common.h @@ -1,178 +1,182 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * --------------------------------------------------------------------------------------- - * - * dfs_common.h - * - * IDENTIFICATION - * src/include/access/dfs/dfs_common.h - * - * --------------------------------------------------------------------------------------- - */ - -#ifndef DFS_COMMON_H -#define DFS_COMMON_H - -#include "orc/Exceptions.hh" -#include "access/dfs/dfs_am.h" -#include "catalog/pg_collation.h" -#include "nodes/execnodes.h" -#include "nodes/pg_list.h" -#include "optimizer/clauses.h" -#include "optimizer/subselect.h" -#include "utils/biginteger.h" -#include "utils/builtins.h" -#include "utils/date.h" -#include "utils/dfs_vector.h" -#include "utils/numeric.h" -#include "utils/numeric_gs.h" -#include "utils/lsyscache.h" - -#ifndef MIN -#define MIN(A, B) ((B) < (A) ? (B) : (A)) -#endif - -#define DFS_PRIVATE_ITEM "DfsPrivateItem" -#define DFS_NUMERIC64_MAX_PRECISION 18 - -/* MACROS which help to catch and print the exception. */ -#define DFS_TRY() \ - bool saveStatus = t_thrd.int_cxt.ImmediateInterruptOK; \ - t_thrd.int_cxt.ImmediateInterruptOK = false; \ - bool errOccur = false; \ - int errNo = ERRCODE_SYSTEM_ERROR; \ - StringInfo errMsg = makeStringInfo(); \ - StringInfo errDetail = makeStringInfo(); \ - try -#define DFS_CATCH() \ - catch (abi::__forced_unwind &) \ - { \ - throw; \ - } \ - catch (orc::OrcException & ex) \ - { \ - errOccur = true; \ - errNo = ex.getErrNo(); \ - try { \ - appendStringInfo(errMsg, "%s", ex.what()); \ - appendStringInfo(errDetail, "%s", ex.msg().c_str()); \ - } catch (abi::__forced_unwind &) { \ - throw; \ - } catch (...) { \ - } \ - } \ - catch (std::exception & ex) \ - { \ - errOccur = true; \ - try { \ - appendStringInfo(errMsg, "%s", ex.what()); \ - } catch (abi::__forced_unwind &) { \ - throw; \ - } catch (...) { \ - } \ - } \ - catch (...) \ - { \ - errOccur = true; \ - } \ - t_thrd.int_cxt.ImmediateInterruptOK = saveStatus; \ - saveStatus = InterruptPending; \ - InterruptPending = false; \ - if (errOccur && errDetail->len > 0) { \ - ereport(LOG, (errmodule(MOD_DFS), errmsg("Caught exceptiion for: %s.", errDetail->data))); \ - } \ - InterruptPending = saveStatus; \ - pfree_ext(errDetail->data); \ - pfree_ext(errDetail); - -#define DFS_ERRREPORT(msg, module) \ - if (errOccur) { \ - destroy(); \ - ereport(ERROR, (errcode(errNo), errmodule(module), \ - errmsg(msg, errMsg->data, g_instance.attr.attr_common.PGXCNodeName))); \ - } \ - pfree_ext(errMsg->data); \ - pfree_ext(errMsg); - -#define DFS_ERRREPORT_WITHARGS(msg, module, ...) \ - if (errOccur) { \ - ereport(ERROR, (errcode(errNo), errmodule(module), \ - errmsg(msg, __VA_ARGS__, errMsg->data, g_instance.attr.attr_common.PGXCNodeName))); \ - } \ - pfree_ext(errMsg->data); \ - pfree_ext(errMsg); - -#define DFS_ERRREPORT_WITHOUTARGS(msg, module) \ - if (errOccur) { \ - ereport(ERROR, (errcode(errNo), errmodule(module), \ - errmsg(msg, errMsg->data, g_instance.attr.attr_common.PGXCNodeName))); \ - } \ - pfree_ext(errMsg->data); \ - pfree_ext(errMsg); - - -#define DEFAULT_HIVE_NULL "__HIVE_DEFAULT_PARTITION__" -#define DEFAULT_HIVE_NULL_LENGTH 26 - -/* - * Check partition signature creation exception in case of the content exceeding - * max allowed partition length - */ -#define partition_err_msg \ - "The length of the partition directory exceeds the current value(%d) of the option " \ - "\"dfs_partition_directory_length\", change the option to the greater value." -#define CHECK_PARTITION_SIGNATURE(rc, dirname) do { \ - if (rc != 0) { \ - ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmodule(MOD_DFS), \ - errmsg(partition_err_msg, u_sess->attr.attr_storage.dfs_max_parsig_length), \ - errdetail("the path name is \"%s\".", dirname))); \ - } \ - securec_check(rc, "\0", "\0"); \ -} while (0) - -#define strpos(p, s) (strstr(p, s) != NULL ? strstr(p, s) - p : -1) -#define basename_len(p, s) (strrchr(p, s) != NULL ? strrchr(p, s) - p : -1) - -#define INT_CMP_HDFS(arg1, arg2, compare) do { \ - if ((arg1) < (arg2)) { \ - compare = -1; \ - } else if ((arg1) > (arg2)) { \ - compare = 1; \ - } else { \ - compare = 0; \ - } \ -} while (0) - -/* - * 1. NAN = NAN - * 2. NAN > non-NAN - * 3. non-NAN < NAN - * 4. non-NAN cmp non-NAN - * 5. arg2 will never be NAN here - */ -#define FLOAT_CMP_HDFS(arg1, arg2, compare) do { \ - if (isnan(arg1)) { \ - compare = 1; \ - } else { \ - if ((arg1) > (arg2)) { \ - compare = 1; \ - } else if ((arg1) < (arg2)) { \ - compare = -1; \ - } else { \ - compare = 0; \ - } \ - } \ -} while (0) - -#endif +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * dfs_common.h + * + * IDENTIFICATION + * src/include/access/dfs/dfs_common.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef DFS_COMMON_H +#define DFS_COMMON_H + +#include "pg_config.h" + +#ifndef ENABLE_LITE_MODE +#include "orc/Exceptions.hh" +#endif +#include "access/dfs/dfs_am.h" +#include "catalog/pg_collation.h" +#include "nodes/execnodes.h" +#include "nodes/pg_list.h" +#include "optimizer/clauses.h" +#include "optimizer/subselect.h" +#include "utils/biginteger.h" +#include "utils/builtins.h" +#include "utils/date.h" +#include "utils/dfs_vector.h" +#include "utils/numeric.h" +#include "utils/numeric_gs.h" +#include "utils/lsyscache.h" + +#ifndef MIN +#define MIN(A, B) ((B) < (A) ? (B) : (A)) +#endif + +#define DFS_PRIVATE_ITEM "DfsPrivateItem" +#define DFS_NUMERIC64_MAX_PRECISION 18 + +/* MACROS which help to catch and print the exception. */ +#define DFS_TRY() \ + bool saveStatus = t_thrd.int_cxt.ImmediateInterruptOK; \ + t_thrd.int_cxt.ImmediateInterruptOK = false; \ + bool errOccur = false; \ + int errNo = ERRCODE_SYSTEM_ERROR; \ + StringInfo errMsg = makeStringInfo(); \ + StringInfo errDetail = makeStringInfo(); \ + try +#define DFS_CATCH() \ + catch (abi::__forced_unwind &) \ + { \ + throw; \ + } \ + catch (orc::OrcException & ex) \ + { \ + errOccur = true; \ + errNo = ex.getErrNo(); \ + try { \ + appendStringInfo(errMsg, "%s", ex.what()); \ + appendStringInfo(errDetail, "%s", ex.msg().c_str()); \ + } catch (abi::__forced_unwind &) { \ + throw; \ + } catch (...) { \ + } \ + } \ + catch (std::exception & ex) \ + { \ + errOccur = true; \ + try { \ + appendStringInfo(errMsg, "%s", ex.what()); \ + } catch (abi::__forced_unwind &) { \ + throw; \ + } catch (...) { \ + } \ + } \ + catch (...) \ + { \ + errOccur = true; \ + } \ + t_thrd.int_cxt.ImmediateInterruptOK = saveStatus; \ + saveStatus = InterruptPending; \ + InterruptPending = false; \ + if (errOccur && errDetail->len > 0) { \ + ereport(LOG, (errmodule(MOD_DFS), errmsg("Caught exceptiion for: %s.", errDetail->data))); \ + } \ + InterruptPending = saveStatus; \ + pfree_ext(errDetail->data); \ + pfree_ext(errDetail); + +#define DFS_ERRREPORT(msg, module) \ + if (errOccur) { \ + destroy(); \ + ereport(ERROR, (errcode(errNo), errmodule(module), \ + errmsg(msg, errMsg->data, g_instance.attr.attr_common.PGXCNodeName))); \ + } \ + pfree_ext(errMsg->data); \ + pfree_ext(errMsg); + +#define DFS_ERRREPORT_WITHARGS(msg, module, ...) \ + if (errOccur) { \ + ereport(ERROR, (errcode(errNo), errmodule(module), \ + errmsg(msg, __VA_ARGS__, errMsg->data, g_instance.attr.attr_common.PGXCNodeName))); \ + } \ + pfree_ext(errMsg->data); \ + pfree_ext(errMsg); + +#define DFS_ERRREPORT_WITHOUTARGS(msg, module) \ + if (errOccur) { \ + ereport(ERROR, (errcode(errNo), errmodule(module), \ + errmsg(msg, errMsg->data, g_instance.attr.attr_common.PGXCNodeName))); \ + } \ + pfree_ext(errMsg->data); \ + pfree_ext(errMsg); + + +#define DEFAULT_HIVE_NULL "__HIVE_DEFAULT_PARTITION__" +#define DEFAULT_HIVE_NULL_LENGTH 26 + +/* + * Check partition signature creation exception in case of the content exceeding + * max allowed partition length + */ +#define partition_err_msg \ + "The length of the partition directory exceeds the current value(%d) of the option " \ + "\"dfs_partition_directory_length\", change the option to the greater value." +#define CHECK_PARTITION_SIGNATURE(rc, dirname) do { \ + if (rc != 0) { \ + ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmodule(MOD_DFS), \ + errmsg(partition_err_msg, u_sess->attr.attr_storage.dfs_max_parsig_length), \ + errdetail("the path name is \"%s\".", dirname))); \ + } \ + securec_check(rc, "\0", "\0"); \ +} while (0) + +#define strpos(p, s) (strstr(p, s) != NULL ? strstr(p, s) - p : -1) +#define basename_len(p, s) (strrchr(p, s) != NULL ? strrchr(p, s) - p : -1) + +#define INT_CMP_HDFS(arg1, arg2, compare) do { \ + if ((arg1) < (arg2)) { \ + compare = -1; \ + } else if ((arg1) > (arg2)) { \ + compare = 1; \ + } else { \ + compare = 0; \ + } \ +} while (0) + +/* + * 1. NAN = NAN + * 2. NAN > non-NAN + * 3. non-NAN < NAN + * 4. non-NAN cmp non-NAN + * 5. arg2 will never be NAN here + */ +#define FLOAT_CMP_HDFS(arg1, arg2, compare) do { \ + if (isnan(arg1)) { \ + compare = 1; \ + } else { \ + if ((arg1) > (arg2)) { \ + compare = 1; \ + } else if ((arg1) < (arg2)) { \ + compare = -1; \ + } else { \ + compare = 0; \ + } \ + } \ +} while (0) + +#endif diff --git a/src/include/access/dfs/dfs_query.h b/src/include/access/dfs/dfs_query.h index 7e5f8c134..90e5935a7 100644 --- a/src/include/access/dfs/dfs_query.h +++ b/src/include/access/dfs/dfs_query.h @@ -26,7 +26,11 @@ #ifndef DFS_QUERY_H #define DFS_QUERY_H +#include "pg_config.h" + +#ifndef ENABLE_LITE_MODE #include "orc/Exceptions.hh" +#endif #include "access/dfs/dfs_am.h" #include "catalog/pg_collation.h" #include "nodes/execnodes.h" @@ -44,6 +48,7 @@ #define DFS_PRIVATE_ITEM "DfsPrivateItem" #define DFS_NUMERIC64_MAX_PRECISION 18 +#ifdef ENABLE_LLVM_COMPILE extern bool CodeGenThreadObjectReady(); extern bool ForeignScanExprCodeGen(Expr *expr, PlanState *parent, void **jittedFunc); @@ -52,6 +57,7 @@ extern bool ForeignScanExprCodeGen(Expr *expr, PlanState *parent, void **jittedF */ typedef bool (*evaPredicateDouble)(double value); typedef bool (*evaPredicateInt)(int64_t value); +#endif /* * define the strategy numbers for hdfs foriegn scan. -1 is invalid, @@ -103,9 +109,11 @@ public: Oid m_collation; // collation to use, if needed bool m_keepFalse; // the check result keeps false if it is true T *m_argument; // store value +#ifdef ENABLE_LLVM_COMPILE char *m_predFunc; /* IR function pointer */ bool m_isPredJitted; /* whether use LLVM optimization or not. if the m_isPredJitted is true, use it */ void *m_jittedFunc; /* machine code address pointer. */ +#endif public: /* @@ -124,9 +132,11 @@ public: m_keepFalse(false) { m_argument = New(CurrentMemoryContext) T(strategy); +#ifdef ENABLE_LLVM_COMPILE m_predFunc = NULL; m_isPredJitted = false; m_jittedFunc = NULL; +#endif } ~HdfsScanPredicate() @@ -184,6 +194,7 @@ public: } }; +#ifdef ENABLE_LLVM_COMPILE /* * Brief : Function to check if a value of basic type can match the clauses list pushed * down. Here we do not check the length of scanClauses, and the caller need ensure it. @@ -261,6 +272,8 @@ bool HdfsPredicateCheckValueDoubleForLlvm(baseType &value, List *&scanClauses) return true; } +#endif + /** * @Description: Identify the qual which could be pushed down to diff --git a/src/include/access/dfs/dfs_wrapper.h b/src/include/access/dfs/dfs_wrapper.h index 8fa4fb302..b77ccf0cb 100644 --- a/src/include/access/dfs/dfs_wrapper.h +++ b/src/include/access/dfs/dfs_wrapper.h @@ -1,482 +1,484 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * --------------------------------------------------------------------------------------- - * - * dfs_wrapper.h - * - * IDENTIFICATION - * src/include/access/dfs/dfs_wrapper.h - * - * --------------------------------------------------------------------------------------- - */ - -#ifndef DFS_WRAPPER_H -#define DFS_WRAPPER_H -#include "orc/Exceptions.hh" -#include "access/dfs/dfs_am.h" -#include "access/dfs/dfs_common.h" -#include "catalog/pg_collation.h" -#include "nodes/execnodes.h" -#include "nodes/pg_list.h" -#include "optimizer/clauses.h" -#include "optimizer/subselect.h" -#include "utils/biginteger.h" -#include "utils/builtins.h" -#include "utils/date.h" -#include "utils/dfs_vector.h" -#include "utils/numeric.h" -#include "utils/numeric_gs.h" -#include "utils/lsyscache.h" - -/* - * The base template class for all the data type wrapper to inherit. - */ -template -class HdfsCheckWrapper : public BaseObject { -public: - virtual ~HdfsCheckWrapper() - { - } - /* - * Abstract function to compare the argument and velue. - * @_in param argument: The target to be compared with the member value. - * @_in param collation: the collation of the var, it is used in function varstr_cmp. - * @return Return 1=eq, -1=lt, 1=gt. - */ - virtual int compareT(T argument, Oid collation) = 0; - - /* - * Set the value in the class which is converted from datum. - * @_in param datumValue: the datum value to be set. - * @_in param datumType: the data type oid of the datum. - * @_in param typeMod: the mod of the datum type. - */ - virtual void SetValueFromDatum(Datum datumValue, Oid datumType, int32 typeMod) = 0; - - /* - * A template function to check if the argument transfered in can be filtered. All the non-string - * wrapper will inherit this function for call. - * - * @_in param argument: the value transfered in to be checked - * @_in param strategy: the strategy used in the predicate. - * @_in param collation: the collation of the var, it is used in function varstr_cmp. - * @return True: match and not filtered; False: filtered and set isSelected to false. - */ - inline bool CheckPredicate(T argument, Oid collation) - { - int cmpResult = compareT(argument, collation); - return HdfsCheckCompareResult(cmpResult); - } - - /* - * Get the value of the wrapper. - * @return Return the value in the class. - */ - inline const T getValue() - { - return value; - } - - /* - * Get the datum of the wrapper. - * @return the datum value. - */ - inline Datum getDatum() - { - return dValue; - } - -private: - /* - * Check the compare result depending on the strategy of the predicate pushed down. - * - * @_in param cmpResult: The result of comparision, (o=eq, -1=lt, 1=gt) - * - * @return True: indicates the value of the row and column match the preidcate pushed down; False: - * indicates the value does not match the predicate and need to be filtered. - */ - bool HdfsCheckCompareResult(int cmpResult); - -protected: - /* Store the real value of the wrapper with type T. */ - T value; - Datum dValue; - - /* - * The strategy of the predicate's operator, which must between HDFS_QUERY_EQ - * and HDFS_QUERY_NE2. - */ - HdfsQueryOperator strategy; - int32 typeMod; - Oid datumType; -}; - -/* - * Check the result of comparation according to the strategy. - */ -template -inline bool HdfsCheckWrapper::HdfsCheckCompareResult(int cmpResult) -{ - bool result = false; - switch (strategy) { - case HDFS_QUERY_LT: { - if (0 > cmpResult) - result = true; - break; - } - case HDFS_QUERY_LTE: { - if (0 >= cmpResult) - result = true; - break; - } - case HDFS_QUERY_EQ: { - if (0 == cmpResult) - result = true; - break; - } - case HDFS_QUERY_GTE: { - if (0 <= cmpResult) - result = true; - break; - } - case HDFS_QUERY_GT: { - if (0 < cmpResult) - result = true; - break; - } - case HDFS_QUERY_NE1: - case HDFS_QUERY_NE2: { - if (0 != cmpResult) - result = true; - break; - } - case HDFS_QUERY_INVALID: - default: { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmodule(MOD_DFS), - errmsg("Find unsupported strategy %d!", strategy))); - } - } - - return result; -} - -/* - * A wrapper class of int64 type, inherit HdfsCheckWrapper. Here we do not seperate - * int16, int 32 and int64 wrapper just because what we read from orc file directly for int16,int32 and int64 - * is int64, then it will convert to int16,int32 or int64 datum. Since the predicate need to be checked before - * the conversion from basic type to datum, so we will compare the int64 value directly and needn't - * Int32Wrapper and Int16Wrapper. - */ -class Int64Wrapper : public HdfsCheckWrapper { -public: - Int64Wrapper(HdfsQueryOperator _strategy) - { - value = 0; - dValue = PointerGetDatum(NULL); - strategy = _strategy; - typeMod = 0; - datumType = InvalidOid; - } - virtual ~Int64Wrapper() - { - } - inline void SetValueFromDatum(Datum datumValue, Oid datumType, int32 typeMod) - { - dValue = datumValue; - - if (INT1OID == datumType) { - value = DatumGetChar(datumValue); - } else if (INT2OID == datumType) { - value = DatumGetInt16(datumValue); - } else if (INT4OID == datumType) { - value = DatumGetInt32(datumValue); - } else if (NUMERICOID == datumType) { - int dscale = (unsigned int)(typeMod - VARHDRSZ) & 0xffff; - Numeric n = DatumGetNumeric(datumValue); - if (NUMERIC_IS_BI(n)) { - Assert(dscale == NUMERIC_BI_SCALE(n)); - Assert(NUMERIC_IS_BI64(n)); - - value = NUMERIC_64VALUE(n); - } else - value = convert_short_numeric_to_int64_byscale(n, dscale); - } else { - value = DatumGetInt64(datumValue); - } - } - -private: - inline int compareT(int64 argument, Oid collation) - { - int cmp = 0; - INT_CMP_HDFS(argument, value, cmp); - return cmp; - } -}; - -/* just for decimal128, 18 < p <= 38 */ -class Int128Wrapper : public HdfsCheckWrapper { -public: - Int128Wrapper(HdfsQueryOperator _strategy) - { - value = 0; - dValue = PointerGetDatum(NULL); - strategy = _strategy; - typeMod = 0; - datumType = InvalidOid; - } - virtual ~Int128Wrapper() - { - } - inline void SetValueFromDatum(Datum datumValue, Oid datumType, int32 typeMod) - { - int dscale = (unsigned int)(typeMod - VARHDRSZ) & 0xffff; - - dValue = datumValue; - Numeric n = DatumGetNumeric(datumValue); - if (NUMERIC_IS_BI(n)) { - Assert(dscale == NUMERIC_BI_SCALE(n)); - - if (NUMERIC_IS_BI128(n)) { - errno_t rc = EOK; - rc = memcpy_s(&value, sizeof(int128), (n)->choice.n_bi.n_data, sizeof(int128)); - securec_check(rc, "\0", "\0"); - } else - value = NUMERIC_64VALUE(n); - } else - convert_short_numeric_to_int128_byscale(n, dscale, value); - } - -private: - inline int compareT(int128 argument, Oid collation) - { - int cmp = 0; - INT_CMP_HDFS(argument, value, cmp); - return cmp; - } -}; - -/* - * A wrapper class of bool type, inherit HdfsCheckWrapper. The actual value stored is a char here. - */ -class BoolWrapper : public HdfsCheckWrapper { -public: - BoolWrapper(HdfsQueryOperator _strategy) - { - value = 0; - dValue = PointerGetDatum(NULL); - strategy = _strategy; - typeMod = 0; - datumType = InvalidOid; - } - virtual ~BoolWrapper() - { - } - inline void SetValueFromDatum(Datum datumValue, Oid datumType, int32 typeMod) - { - dValue = datumValue; - value = DatumGetBool(datumValue); - } - -private: - inline int compareT(char argument, Oid collation) - { - int cmp = 0; - INT_CMP_HDFS(argument, value, cmp); - return cmp; - } -}; - -/* - * A wrapper class of float8 type, inherit HdfsCheckWrapper. - */ -class Float8Wrapper : public HdfsCheckWrapper { -public: - Float8Wrapper(HdfsQueryOperator _strategy) - { - value = 0; - dValue = PointerGetDatum(NULL); - strategy = _strategy; - typeMod = 0; - datumType = InvalidOid; - } - virtual ~Float8Wrapper() - { - } - inline void SetValueFromDatum(Datum datumValue, Oid _datumType, int32 typeMod) - { - datumType = _datumType; - /* - * For float4/8, there may be data convertion of the const value, so we - * need to process seperately here. - */ - if (FLOAT4OID == datumType) { - value = (float8)DatumGetFloat4(datumValue); - dValue = Float8GetDatum(value); - } else // float8 - { - value = DatumGetFloat8(datumValue); - dValue = datumValue; - } - } - -private: - inline int compareT(float8 argument, Oid collation) - { - int cmp = 0; - FLOAT_CMP_HDFS(argument, value, cmp); - return cmp; - } -}; - -/* - * A wrapper class of Timestamp type, inherit HdfsCheckWrapper. Since there is not actual Date type - * in PG, we just need the wrapper for Timestamp here, and convert the hive date to timestamp before - * the comparasion with predicate. - */ -class TimestampWrapper : public HdfsCheckWrapper { -public: - TimestampWrapper(HdfsQueryOperator _strategy) - { - value = 0; - dValue = PointerGetDatum(NULL); - strategy = _strategy; - typeMod = 0; - datumType = InvalidOid; - } - virtual ~TimestampWrapper() - { - } - inline void SetValueFromDatum(Datum datumValue, Oid datumType, int32 typeMod) - { - dValue = datumValue; - - /* - * For different time types, we use different ways to convert to timestamp. - */ - if (DATEOID == datumType) { - value = date2timestamp(DatumGetDateADT(datumValue)); - } else if (TIMESTAMPTZOID == datumType) { - Datum out = DirectFunctionCall1(timestamptz_out, datumValue); - Datum in = DirectFunctionCall3(timestamp_in, out, ObjectIdGetDatum(InvalidOid), Int32GetDatum(typeMod)); - value = DatumGetTimestamp(in); - } else { - value = DatumGetTimestamp(datumValue); - } - } - -private: - inline int compareT(Timestamp argument, Oid collation) - { - int cmp = 0; -#ifdef HAVE_INT64_TIMESTAMP - INT_CMP_HDFS(argument, value, cmp); -#else - FLOAT_CMP_HDFS(argument, value, cmp); -#endif - return cmp; - } -}; - -/* - * Since we only care about equality or not-equality, we can avoid all the - * expense of strcoll() here, and just do bitwise comparison. - * The caller must ensure that both src1 and src2 are valid strings with '\0' at end. - * return 0 if the two strings are equal. - */ -static int stringEQ(const char *src1, const char *src2) -{ - int result = 0; - int len1 = strlen(src1); - int len2 = strlen(src2); - if (len1 != len2) { - result = 1; - } else { - result = memcmp(src1, src2, len1); - } - return result; -} - -/* - * A wrapper class of string type includes varchar,bpchar,text because these types is actually the same for hive. - */ -class StringWrapper : public HdfsCheckWrapper { -public: - StringWrapper(HdfsQueryOperator _strategy) - { - value = NULL; - dValue = PointerGetDatum(NULL); - strategy = _strategy; - typeMod = 0; - datumType = InvalidOid; - } - virtual ~StringWrapper() - { - } - inline void SetValueFromDatum(Datum datumValue, Oid _datumType, int32 typeMod) - { - dValue = datumValue; - datumType = _datumType; - - if (BPCHAROID == datumType) { - int varLen = 0; - char *str = TextDatumGetCString(datumValue); - int strLen = strlen(str); - - /* variable length */ - if (typeMod < (int32)VARHDRSZ) { - varLen = strLen; - } else /* fixed length */ - { - varLen = typeMod - VARHDRSZ; - } - - /* - * When the length of the var is larger than the const string's length, it needs to - * add some blanks in the tail. - */ - if (varLen >= strLen) { - Datum bpchar = DirectFunctionCall3(bpcharin, CStringGetDatum(str), ObjectIdGetDatum(InvalidOid), - Int32GetDatum(typeMod)); - value = TextDatumGetCString(bpchar); - } else { - value = str; - } - } else { - value = TextDatumGetCString(datumValue); - } - } - -private: - inline int compareT(char *argument, Oid collation) - { - int cmp = 0; - if (HDFS_QUERY_EQ == strategy || HDFS_QUERY_NE1 == strategy || HDFS_QUERY_NE2 == strategy) { - cmp = stringEQ(argument, value); - } else { - cmp = varstr_cmp(argument, strlen(argument), value, strlen(value), collation); - } - return cmp; - } -}; - -/* - * This wrapper is only used for HdfsPredicateCheckNull, and there is need to store any value here. We don't - * remove it because Null check is processed specially and keeping a special empty wrapper for null can - * make it less confused. - */ -class NullWrapper { -}; - -#endif +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * dfs_wrapper.h + * + * IDENTIFICATION + * src/include/access/dfs/dfs_wrapper.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef DFS_WRAPPER_H +#define DFS_WRAPPER_H +#ifndef ENABLE_LITE_MODE +#include "orc/Exceptions.hh" +#endif +#include "access/dfs/dfs_am.h" +#include "access/dfs/dfs_common.h" +#include "catalog/pg_collation.h" +#include "nodes/execnodes.h" +#include "nodes/pg_list.h" +#include "optimizer/clauses.h" +#include "optimizer/subselect.h" +#include "utils/biginteger.h" +#include "utils/builtins.h" +#include "utils/date.h" +#include "utils/dfs_vector.h" +#include "utils/numeric.h" +#include "utils/numeric_gs.h" +#include "utils/lsyscache.h" + +/* + * The base template class for all the data type wrapper to inherit. + */ +template +class HdfsCheckWrapper : public BaseObject { +public: + virtual ~HdfsCheckWrapper() + { + } + /* + * Abstract function to compare the argument and velue. + * @_in param argument: The target to be compared with the member value. + * @_in param collation: the collation of the var, it is used in function varstr_cmp. + * @return Return 1=eq, -1=lt, 1=gt. + */ + virtual int compareT(T argument, Oid collation) = 0; + + /* + * Set the value in the class which is converted from datum. + * @_in param datumValue: the datum value to be set. + * @_in param datumType: the data type oid of the datum. + * @_in param typeMod: the mod of the datum type. + */ + virtual void SetValueFromDatum(Datum datumValue, Oid datumType, int32 typeMod) = 0; + + /* + * A template function to check if the argument transfered in can be filtered. All the non-string + * wrapper will inherit this function for call. + * + * @_in param argument: the value transfered in to be checked + * @_in param strategy: the strategy used in the predicate. + * @_in param collation: the collation of the var, it is used in function varstr_cmp. + * @return True: match and not filtered; False: filtered and set isSelected to false. + */ + inline bool CheckPredicate(T argument, Oid collation) + { + int cmpResult = compareT(argument, collation); + return HdfsCheckCompareResult(cmpResult); + } + + /* + * Get the value of the wrapper. + * @return Return the value in the class. + */ + inline const T getValue() + { + return value; + } + + /* + * Get the datum of the wrapper. + * @return the datum value. + */ + inline Datum getDatum() + { + return dValue; + } + +private: + /* + * Check the compare result depending on the strategy of the predicate pushed down. + * + * @_in param cmpResult: The result of comparision, (o=eq, -1=lt, 1=gt) + * + * @return True: indicates the value of the row and column match the preidcate pushed down; False: + * indicates the value does not match the predicate and need to be filtered. + */ + bool HdfsCheckCompareResult(int cmpResult); + +protected: + /* Store the real value of the wrapper with type T. */ + T value; + Datum dValue; + + /* + * The strategy of the predicate's operator, which must between HDFS_QUERY_EQ + * and HDFS_QUERY_NE2. + */ + HdfsQueryOperator strategy; + int32 typeMod; + Oid datumType; +}; + +/* + * Check the result of comparation according to the strategy. + */ +template +inline bool HdfsCheckWrapper::HdfsCheckCompareResult(int cmpResult) +{ + bool result = false; + switch (strategy) { + case HDFS_QUERY_LT: { + if (0 > cmpResult) + result = true; + break; + } + case HDFS_QUERY_LTE: { + if (0 >= cmpResult) + result = true; + break; + } + case HDFS_QUERY_EQ: { + if (0 == cmpResult) + result = true; + break; + } + case HDFS_QUERY_GTE: { + if (0 <= cmpResult) + result = true; + break; + } + case HDFS_QUERY_GT: { + if (0 < cmpResult) + result = true; + break; + } + case HDFS_QUERY_NE1: + case HDFS_QUERY_NE2: { + if (0 != cmpResult) + result = true; + break; + } + case HDFS_QUERY_INVALID: + default: { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmodule(MOD_DFS), + errmsg("Find unsupported strategy %d!", strategy))); + } + } + + return result; +} + +/* + * A wrapper class of int64 type, inherit HdfsCheckWrapper. Here we do not seperate + * int16, int 32 and int64 wrapper just because what we read from orc file directly for int16,int32 and int64 + * is int64, then it will convert to int16,int32 or int64 datum. Since the predicate need to be checked before + * the conversion from basic type to datum, so we will compare the int64 value directly and needn't + * Int32Wrapper and Int16Wrapper. + */ +class Int64Wrapper : public HdfsCheckWrapper { +public: + Int64Wrapper(HdfsQueryOperator _strategy) + { + value = 0; + dValue = PointerGetDatum(NULL); + strategy = _strategy; + typeMod = 0; + datumType = InvalidOid; + } + virtual ~Int64Wrapper() + { + } + inline void SetValueFromDatum(Datum datumValue, Oid datumType, int32 typeMod) + { + dValue = datumValue; + + if (INT1OID == datumType) { + value = DatumGetChar(datumValue); + } else if (INT2OID == datumType) { + value = DatumGetInt16(datumValue); + } else if (INT4OID == datumType) { + value = DatumGetInt32(datumValue); + } else if (NUMERICOID == datumType) { + int dscale = (unsigned int)(typeMod - VARHDRSZ) & 0xffff; + Numeric n = DatumGetNumeric(datumValue); + if (NUMERIC_IS_BI(n)) { + Assert(dscale == NUMERIC_BI_SCALE(n)); + Assert(NUMERIC_IS_BI64(n)); + + value = NUMERIC_64VALUE(n); + } else + value = convert_short_numeric_to_int64_byscale(n, dscale); + } else { + value = DatumGetInt64(datumValue); + } + } + +private: + inline int compareT(int64 argument, Oid collation) + { + int cmp = 0; + INT_CMP_HDFS(argument, value, cmp); + return cmp; + } +}; + +/* just for decimal128, 18 < p <= 38 */ +class Int128Wrapper : public HdfsCheckWrapper { +public: + Int128Wrapper(HdfsQueryOperator _strategy) + { + value = 0; + dValue = PointerGetDatum(NULL); + strategy = _strategy; + typeMod = 0; + datumType = InvalidOid; + } + virtual ~Int128Wrapper() + { + } + inline void SetValueFromDatum(Datum datumValue, Oid datumType, int32 typeMod) + { + int dscale = (unsigned int)(typeMod - VARHDRSZ) & 0xffff; + + dValue = datumValue; + Numeric n = DatumGetNumeric(datumValue); + if (NUMERIC_IS_BI(n)) { + Assert(dscale == NUMERIC_BI_SCALE(n)); + + if (NUMERIC_IS_BI128(n)) { + errno_t rc = EOK; + rc = memcpy_s(&value, sizeof(int128), (n)->choice.n_bi.n_data, sizeof(int128)); + securec_check(rc, "\0", "\0"); + } else + value = NUMERIC_64VALUE(n); + } else + convert_short_numeric_to_int128_byscale(n, dscale, value); + } + +private: + inline int compareT(int128 argument, Oid collation) + { + int cmp = 0; + INT_CMP_HDFS(argument, value, cmp); + return cmp; + } +}; + +/* + * A wrapper class of bool type, inherit HdfsCheckWrapper. The actual value stored is a char here. + */ +class BoolWrapper : public HdfsCheckWrapper { +public: + BoolWrapper(HdfsQueryOperator _strategy) + { + value = 0; + dValue = PointerGetDatum(NULL); + strategy = _strategy; + typeMod = 0; + datumType = InvalidOid; + } + virtual ~BoolWrapper() + { + } + inline void SetValueFromDatum(Datum datumValue, Oid datumType, int32 typeMod) + { + dValue = datumValue; + value = DatumGetBool(datumValue); + } + +private: + inline int compareT(char argument, Oid collation) + { + int cmp = 0; + INT_CMP_HDFS(argument, value, cmp); + return cmp; + } +}; + +/* + * A wrapper class of float8 type, inherit HdfsCheckWrapper. + */ +class Float8Wrapper : public HdfsCheckWrapper { +public: + Float8Wrapper(HdfsQueryOperator _strategy) + { + value = 0; + dValue = PointerGetDatum(NULL); + strategy = _strategy; + typeMod = 0; + datumType = InvalidOid; + } + virtual ~Float8Wrapper() + { + } + inline void SetValueFromDatum(Datum datumValue, Oid _datumType, int32 typeMod) + { + datumType = _datumType; + /* + * For float4/8, there may be data convertion of the const value, so we + * need to process seperately here. + */ + if (FLOAT4OID == datumType) { + value = (float8)DatumGetFloat4(datumValue); + dValue = Float8GetDatum(value); + } else // float8 + { + value = DatumGetFloat8(datumValue); + dValue = datumValue; + } + } + +private: + inline int compareT(float8 argument, Oid collation) + { + int cmp = 0; + FLOAT_CMP_HDFS(argument, value, cmp); + return cmp; + } +}; + +/* + * A wrapper class of Timestamp type, inherit HdfsCheckWrapper. Since there is not actual Date type + * in PG, we just need the wrapper for Timestamp here, and convert the hive date to timestamp before + * the comparasion with predicate. + */ +class TimestampWrapper : public HdfsCheckWrapper { +public: + TimestampWrapper(HdfsQueryOperator _strategy) + { + value = 0; + dValue = PointerGetDatum(NULL); + strategy = _strategy; + typeMod = 0; + datumType = InvalidOid; + } + virtual ~TimestampWrapper() + { + } + inline void SetValueFromDatum(Datum datumValue, Oid datumType, int32 typeMod) + { + dValue = datumValue; + + /* + * For different time types, we use different ways to convert to timestamp. + */ + if (DATEOID == datumType) { + value = date2timestamp(DatumGetDateADT(datumValue)); + } else if (TIMESTAMPTZOID == datumType) { + Datum out = DirectFunctionCall1(timestamptz_out, datumValue); + Datum in = DirectFunctionCall3(timestamp_in, out, ObjectIdGetDatum(InvalidOid), Int32GetDatum(typeMod)); + value = DatumGetTimestamp(in); + } else { + value = DatumGetTimestamp(datumValue); + } + } + +private: + inline int compareT(Timestamp argument, Oid collation) + { + int cmp = 0; +#ifdef HAVE_INT64_TIMESTAMP + INT_CMP_HDFS(argument, value, cmp); +#else + FLOAT_CMP_HDFS(argument, value, cmp); +#endif + return cmp; + } +}; + +/* + * Since we only care about equality or not-equality, we can avoid all the + * expense of strcoll() here, and just do bitwise comparison. + * The caller must ensure that both src1 and src2 are valid strings with '\0' at end. + * return 0 if the two strings are equal. + */ +static int stringEQ(const char *src1, const char *src2) +{ + int result = 0; + int len1 = strlen(src1); + int len2 = strlen(src2); + if (len1 != len2) { + result = 1; + } else { + result = memcmp(src1, src2, len1); + } + return result; +} + +/* + * A wrapper class of string type includes varchar,bpchar,text because these types is actually the same for hive. + */ +class StringWrapper : public HdfsCheckWrapper { +public: + StringWrapper(HdfsQueryOperator _strategy) + { + value = NULL; + dValue = PointerGetDatum(NULL); + strategy = _strategy; + typeMod = 0; + datumType = InvalidOid; + } + virtual ~StringWrapper() + { + } + inline void SetValueFromDatum(Datum datumValue, Oid _datumType, int32 typeMod) + { + dValue = datumValue; + datumType = _datumType; + + if (BPCHAROID == datumType) { + int varLen = 0; + char *str = TextDatumGetCString(datumValue); + int strLen = strlen(str); + + /* variable length */ + if (typeMod < (int32)VARHDRSZ) { + varLen = strLen; + } else /* fixed length */ + { + varLen = typeMod - VARHDRSZ; + } + + /* + * When the length of the var is larger than the const string's length, it needs to + * add some blanks in the tail. + */ + if (varLen >= strLen) { + Datum bpchar = DirectFunctionCall3(bpcharin, CStringGetDatum(str), ObjectIdGetDatum(InvalidOid), + Int32GetDatum(typeMod)); + value = TextDatumGetCString(bpchar); + } else { + value = str; + } + } else { + value = TextDatumGetCString(datumValue); + } + } + +private: + inline int compareT(char *argument, Oid collation) + { + int cmp = 0; + if (HDFS_QUERY_EQ == strategy || HDFS_QUERY_NE1 == strategy || HDFS_QUERY_NE2 == strategy) { + cmp = stringEQ(argument, value); + } else { + cmp = varstr_cmp(argument, strlen(argument), value, strlen(value), collation); + } + return cmp; + } +}; + +/* + * This wrapper is only used for HdfsPredicateCheckNull, and there is need to store any value here. We don't + * remove it because Null check is processed specially and keeping a special empty wrapper for null can + * make it less confused. + */ +class NullWrapper { +}; + +#endif diff --git a/src/include/access/double_write.h b/src/include/access/double_write.h index 6dac0fc6f..2b2e26a29 100644 --- a/src/include/access/double_write.h +++ b/src/include/access/double_write.h @@ -12,11 +12,11 @@ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. * --------------------------------------------------------------------------------------- - * + * * double_write.h * Define some inline function of double write and export some interfaces. - * - * + * + * * IDENTIFICATION * src/include/access/double_write.h * @@ -31,8 +31,7 @@ typedef enum BufTagVer { ORIGIN_TAG = 0, - HASHBUCKET_TAG, - PAGE_COMPRESS_TAG + HASHBUCKET_TAG } BufTagVer; typedef struct st_dw_batch { @@ -124,6 +123,10 @@ const uint16 SINGLE_BLOCK_TAG_NUM = BLCKSZ / sizeof(dw_single_flush_item); static const uint32 DW_BOOTSTRAP_VERSION = 91261; const uint32 DW_SUPPORT_SINGLE_FLUSH_VERSION = 92266; const uint32 DW_SUPPORT_NEW_SINGLE_FLUSH = 92433; +const uint32 DW_SUPPORT_MULTIFILE_FLUSH = 92568; +const uint32 DW_SUPPORT_BCM_VERSION = 92550; +const uint32 DW_SUPPORT_REABLE_DOUBLE_WRITE = 92590; + /* dw single flush file information, version is DW_SUPPORT_SINGLE_FLUSH_VERSION */ /* file head + storage buffer tag page + data page */ @@ -170,6 +173,15 @@ inline bool dw_verify_file_head(dw_file_head_t* file_head) return file_head->head.dwn == file_head->tail.dwn && dw_verify_file_head_checksum(file_head); } +inline void dw_calc_meta_checksum(dw_batch_meta_file* meta) +{ + uint32 checksum; + meta->checksum = 0; + checksum = pg_checksum_block((char*)meta, sizeof(dw_batch_meta_file)); + meta->checksum = REDUCE_CKS2UINT16(checksum); +} + + inline void dw_calc_file_head_checksum(dw_file_head_t* file_head) { uint32 checksum; @@ -264,7 +276,7 @@ inline bool dw_enabled() * @param buf_id_arr the buffer id array which is used to get page from global buffer * @param size the array size */ -void dw_perform_batch_flush(uint32 size, CkptSortItem *dirty_buf_list, ThrdDwCxt* thrd_dw_cxt); +void dw_perform_batch_flush(uint32 size, CkptSortItem *dirty_buf_list, int thread_id, ThrdDwCxt* thrd_dw_cxt); /** * truncate the pages in double write file after ckpt or before exit @@ -291,6 +303,19 @@ inline bool dw_page_writer_running() extern bool free_space_enough(int buf_id); +extern void dw_generate_single_file(); +extern void dw_recovery_partial_write_single(); +extern void dw_single_file_truncate(bool is_first); +extern void dw_generate_new_single_file(); +extern void dw_cxt_init_single(); + +extern bool dw_verify_pg_checksum(PageHeader page_header, BlockNumber blockNum, bool dw_file); +extern void dw_log_recovery_page(int elevel, const char *state, BufferTag buf_tag); +extern bool dw_read_data_page(BufferTag buf_tag, SMgrRelation reln, char* data_block); +extern void dw_log_page_header(PageHeader page); +extern int buftag_compare(const void *pa, const void *pb); +extern void dw_encrypt_page(BufferTag tag, char* buf); + extern uint16 first_version_dw_single_flush(BufferDesc *buf_desc); extern void dw_single_file_recycle(bool is_first); extern bool backend_can_flush_dirty_page(); @@ -298,9 +323,10 @@ extern void dw_force_reset_single_file(); extern void reset_dw_pos_flag(); extern void clean_proc_dw_buf(); extern void init_proc_dw_buf(); -extern void dw_generate_new_single_file(); extern void dw_prepare_file_head(char *file_head, uint16 start, uint16 dwn, int32 dw_version = -1); extern void dw_set_pg_checksum(char *page, BlockNumber blockNum); +extern void dw_extend_file(int fd, const void *buf, int buf_size, int64 size, + int64 file_expect_size, bool single, char* file_name); extern void dw_transfer_phybuffer_addr(const BufferDesc *buf_desc, BufferTag *buf_tag); uint16 second_version_dw_single_flush(BufferTag tag, Block block, XLogRecPtr page_lsn, @@ -313,4 +339,16 @@ extern uint16 dw_single_flush_internal_old(BufferTag tag, Block block, XLogRecPt BufferTag phy_tag, bool *dw_flush); extern void dw_single_old_file_truncate(); +extern void dw_recover_batch_meta_file(int fd, dw_batch_meta_file *batch_meta_file); +extern void dw_fetch_batch_file_name(int i, char* buf); +extern void wait_all_dw_page_finish_flush(); +extern void dw_generate_meta_file(dw_batch_meta_file* batch_meta_file); +extern void dw_generate_batch_files(int batch_file_num, uint64 dw_file_size); +extern void dw_cxt_init_batch(); +extern void dw_remove_file(const char* file_name); +extern int dw_open_file(const char* file_name); +extern void dw_upgrade_renable_double_write(); + +extern int g_stat_file_id; + #endif /* DOUBLE_WRITE_H */ diff --git a/src/include/access/double_write_basic.h b/src/include/access/double_write_basic.h index 41bcda731..4cf4b474a 100644 --- a/src/include/access/double_write_basic.h +++ b/src/include/access/double_write_basic.h @@ -12,11 +12,11 @@ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. * --------------------------------------------------------------------------------------- - * + * * double_write_basic.h * Define some basic structs of double write which is needed in knl_instance.h - * - * + * + * * IDENTIFICATION * src/include/access/double_write_basic.h * @@ -33,11 +33,14 @@ static const uint32 HALF_K = 512; -static const char DW_FILE_NAME[] = "global/pg_dw"; +static const char OLD_DW_FILE_NAME[] = "global/pg_dw"; +static const char DW_FILE_NAME_PREFIX[] = "global/pg_dw_"; static const char SINGLE_DW_FILE_NAME[] = "global/pg_dw_single"; static const char DW_BUILD_FILE_NAME[] = "global/pg_dw.build"; static const char DW_UPGRADE_FILE_NAME[] = "global/dw_upgrade"; - +static const char DW_BATCH_UPGRADE_META_FILE_NAME[] = "global/dw_batch_upgrade_meta"; +static const char DW_BATCH_UPGRADE_BATCH_FILE_NAME[] = "global/dw_batch_upgrade_files"; +static const char DW_META_FILE[] = "global/pg_dw_meta"; static const uint32 DW_TRY_WRITE_TIMES = 8; #ifndef WIN32 @@ -52,6 +55,9 @@ static const uint16 DW_FILE_PAGE = 32768; static const int64 DW_FILE_SIZE = (DW_FILE_PAGE * BLCKSZ); +static const uint64 DW_FILE_SIZE_UNIT = 1024 * 1024; +static const uint32 MAX_DW_FILE_SIZE_MB = 256; + /** * | file_head | batch head | data pages | batch tail/next batch head | ... | * | 0 | 1 | 409 at most | 1 | ... | @@ -59,6 +65,18 @@ static const int64 DW_FILE_SIZE = (DW_FILE_PAGE * BLCKSZ); static const uint16 DW_BATCH_FILE_START = 1; #define REDUCE_CKS2UINT16(cks) (((cks) >> 16) ^ ((cks)&0xFFFF)) +#define DW_FULL_CKPT 0x1 + +static const uint32 DW_META_FILE_ALIGN_BYTES = 512 - sizeof(uint32) - sizeof(uint32) - sizeof(uint16) - sizeof(uint16) - sizeof(uint16); + +typedef struct st_dw_batch_meta_file{ + uint32 dw_version; + uint32 dw_file_size; /* double write file size */ + uint16 dw_file_num; /* double write file quantity */ + uint8 unused[DW_META_FILE_ALIGN_BYTES]; /* make meta file 512B total */ + uint16 record_state; /* record database bool guc parameter */ + uint16 checksum; +}dw_batch_meta_file; typedef struct st_dw_page_head { uint16 page_id; /* page_id in file */ @@ -82,6 +100,9 @@ typedef struct st_dw_file_head { dw_page_tail_t tail; } dw_file_head_t; +/* write the st_dw_meta_file data into the first three sector of the page */ +static const uint32 DW_META_FILE_BLOCK_NUM = 3; + static const uint32 DW_FILE_HEAD_ID_NUM = 3; /* write file head 3 times, distributed in start, middle, end of the first page of dw file */ @@ -91,7 +112,7 @@ const static uint64 DW_SLEEP_US = 1000L; const static uint16 DW_WRITE_STAT_LOWER_LIMIT = 16; -const static int DW_VIEW_COL_NUM = 11; +const static int DW_VIEW_COL_NUM = 12; const static int DW_SINGLE_VIEW_COL_NUM = 6; const static uint32 DW_VIEW_COL_NAME_LEN = 32; diff --git a/src/include/access/extreme_rto/dispatcher.h b/src/include/access/extreme_rto/dispatcher.h index 5890af217..a9b1f5f9b 100644 --- a/src/include/access/extreme_rto/dispatcher.h +++ b/src/include/access/extreme_rto/dispatcher.h @@ -174,6 +174,8 @@ typedef struct { #endif RedoInterruptCallBackFunc oldStartupIntrruptFunc; volatile bool recoveryStop; + volatile XLogRedoNumStatics xlogStatics[RM_NEXT_ID][MAX_XLOG_INFO_NUM]; + RedoTimeCost *startupTimeCost; } LogDispatcher; typedef struct { @@ -192,6 +194,7 @@ extern THR_LOCAL RecordBufferState *g_recordbuffer; const static uint64 OUTPUT_WAIT_COUNT = 0x7FFFFFF; const static uint64 PRINT_ALL_WAIT_COUNT = 0x7FFFFFFFF; extern RedoItem g_redoEndMark; +extern RedoItem g_terminateMark; extern uint32 g_startupTriggerState; extern uint32 g_readManagerTriggerFlag; @@ -230,7 +233,6 @@ int GetDispatcherExitCode(); bool DispatchPtrIsNull(); uint32 GetBatchCount(); uint32 GetAllWorkerCount(); -bool OnHotStandBy(); PGPROC *StartupPidGetProc(ThreadId pid); extern void SetStartupBufferPinWaitBufId(int bufid); extern void GetStartupBufferPinWaitBufId(int *bufids, uint32 len); @@ -241,22 +243,19 @@ void **GetXLogInvalidPagesFromWorkers(); /* Other utility functions. */ uint32 GetSlotId(const RelFileNode node, BlockNumber block, ForkNumber forkNum, uint32 workerCount); -bool XactWillRemoveRelFiles(XLogReaderState *record); bool XactHasSegpageRelFiles(XLogReaderState *record); -XLogReaderState *NewReaderState(XLogReaderState *readerState, bool bCopyState = false); +XLogReaderState *NewReaderState(XLogReaderState *readerState); void FreeAllocatedRedoItem(); -void DiagLogRedoRecord(XLogReaderState *record, const char *funcName); List *CheckImcompleteAction(List *imcompleteActionList); void SetPageWorkStateByThreadId(uint32 threadState); -void UpdateDispatcherStandbyState(HotStandbyState *state); void GetReplayedRecPtr(XLogRecPtr *startPtr, XLogRecPtr *endPtr); void StartupSendFowarder(RedoItem *item); XLogRecPtr GetSafeMinCheckPoint(); RedoWaitInfo redo_get_io_event(int32 event_id); void redo_get_wroker_statistic(uint32 *realNum, RedoWorkerStatsData *worker, uint32 workerLen); -#ifndef ENABLE_MULTIPLE_NODES void CheckCommittingCsnList(); -#endif +void redo_get_wroker_time_count(RedoWorkerTimeCountsInfo **workerCountInfoList, uint32 *realNum); +void DumpDispatcher(); } // namespace extreme_rto diff --git a/src/include/access/extreme_rto/page_redo.h b/src/include/access/extreme_rto/page_redo.h index db138d153..3ffa739e6 100644 --- a/src/include/access/extreme_rto/page_redo.h +++ b/src/include/access/extreme_rto/page_redo.h @@ -36,13 +36,14 @@ #include "access/extreme_rto/posix_semaphore.h" #include "access/extreme_rto/spsc_blocking_queue.h" #include "access/xlogproc.h" +#include "postmaster/pagerepair.h" namespace extreme_rto { -static const uint32 PAGE_WORK_QUEUE_SIZE = 4096; +static const uint32 PAGE_WORK_QUEUE_SIZE = 8192; static const uint32 EXTREME_RTO_ALIGN_LEN = 16; /* need 128-bit aligned */ - +static const uint32 MAX_REMOTE_READ_INFO_NUM = 100; typedef enum { REDO_BATCH, @@ -56,6 +57,15 @@ typedef enum { REDO_ROLE_NUM, } RedoRole; +typedef struct BadBlockRecEnt{ + RepairBlockKey key; + XLogPhyBlock pblk; + XLogRecPtr rec_min_lsn; + XLogRecPtr rec_max_lsn; + XLogRecParseState *head; + XLogRecParseState *tail; +} BadBlockRecEnt; + struct PageRedoWorker { /* * The last successfully applied log record's end position + 1 as an @@ -145,9 +155,9 @@ struct PageRedoWorker { */ /* XLog invalid pages. */ void *xlogInvalidPages; -#ifndef ENABLE_MULTIPLE_NODES + void *committingCsnList; -#endif + /* --------------------------------------------- * Phase barrier. * @@ -170,8 +180,14 @@ struct PageRedoWorker { uint32 fullSyncFlag; RedoParseManager parseManager; RedoBufferManager bufferManager; + RedoTimeCost timeCostList[TIME_COST_NUM]; + uint32 remoteReadPageNum; + HTAB *badPageHashTbl; + char page[BLCKSZ]; + XLogBlockDataParse *curRedoBlockState; }; + extern THR_LOCAL PageRedoWorker *g_redoWorker; /* Worker lifecycle. */ @@ -198,18 +214,12 @@ void WaitPageRedoWorkerReachLastMark(PageRedoWorker *worker); /* Redo processing. */ void AddPageRedoItem(PageRedoWorker *worker, void *item); -/* Run-time worker states. */ -uint64 GetCompletedRecPtr(PageRedoWorker *worker); -void SetWorkerRestartPoint(PageRedoWorker *worker, XLogRecPtr restartPoint); - void UpdatePageRedoWorkerStandbyState(PageRedoWorker *worker, HotStandbyState newState); /* Redo end states. */ void ClearBTreeIncompleteActions(PageRedoWorker *worker); void *GetXLogInvalidPages(PageRedoWorker *worker); bool RedoWorkerIsIdle(PageRedoWorker *worker); -void PageRedoSetAffinity(uint32 id); - void DumpPageRedoWorker(PageRedoWorker *worker); PageRedoWorker *CreateWorker(uint32 id); extern void UpdateRecordGlobals(RedoItem *item, HotStandbyState standbyState); @@ -224,6 +234,25 @@ bool LsnUpdate(); void ResetRtoXlogReadBuf(XLogRecPtr targetPagePtr); bool XLogPageReadForExtRto(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen); void ExtremeRtoStopHere(); +void WaitAllRedoWorkerQueueEmpty(); +void WaitAllReplayWorkerIdle(); +void DispatchClosefdMarkToAllRedoWorker(); +void DispatchCleanInvalidPageMarkToAllRedoWorker(RepairFileKey key); + +const char *RedoWokerRole2Str(RedoRole role); + + +/* block or file repair function */ +HTAB* BadBlockHashTblCreate(); +void RepairPageAndRecoveryXLog(BadBlockRecEnt *page_info, const char *page); +void CheckRemoteReadAndRepairPage(BadBlockRecEnt *entry); +void ClearSpecificsPageEntryAndMem(BadBlockRecEnt *entry); +void ClearRecoveryThreadHashTbl(const RelFileNode &node, ForkNumber forknum, BlockNumber minblkno, + bool segment_shrink); +void BatchClearRecoveryThreadHashTbl(Oid spcNode, Oid dbNode); +void RecordBadBlockAndPushToRemote(XLogBlockDataParse *datadecode, PageErrorType error_type, + XLogRecPtr old_lsn, XLogPhyBlock pblk); +void SeqCheckRemoteReadAndRepairPage(); } // namespace extreme_rto #endif diff --git a/src/include/access/extreme_rto/redo_item.h b/src/include/access/extreme_rto/redo_item.h index 36fd1dcce..f7cf0bbf7 100644 --- a/src/include/access/extreme_rto/redo_item.h +++ b/src/include/access/extreme_rto/redo_item.h @@ -39,24 +39,14 @@ namespace extreme_rto { typedef struct RedoItem_s { - /* Old version. */ - bool oldVersion; bool needImmediateCheckpoint; bool needFullSyncCheckpoint; - /* Number of workers sharing this item. */ - uint32 shareCount; - /* Id of the worker designated to apply this item. */ - uint32 designatedWorker; /* The expected timelines for this record. */ List *expectedTLIs; /* The timestamp of the log record if it is a transaction record. */ TimestampTz recordXTime; /* Next item on the free list. */ struct RedoItem_s *freeNext; - /* Number of workers holding a reference to this item. */ - pg_atomic_uint32 refCount; - /* If this item has been replayed. */ - pg_atomic_uint32 replayed; /* A "deep" copy of the log record. */ XLogReaderState record; /* Used for really free */ @@ -65,12 +55,6 @@ typedef struct RedoItem_s { int syncXLogReceiptSource; TransactionId RecentXmin; ServerMode syncServerMode; - - /* temp variable indicate number of redo items with same block number */ - pg_atomic_uint32 blkShareCount; - - bool isForceAll; - pg_atomic_uint32 distributeCount; } RedoItem; static const int32 ANY_BLOCK_ID = -1; @@ -79,15 +63,13 @@ static const uint32 TRXN_WORKER = (uint32)-2; static const uint32 ALL_WORKER = (uint32)-3; static const uint32 USTORE_WORKER = (uint32)-4; +void DumpItem(RedoItem *item, const char *funcName); + static inline RedoItem *GetRedoItemPtr(XLogReaderState *record) { return (RedoItem *)(((char *)record) - offsetof(RedoItem, record)); } -RedoItem *CreateRedoItem(XLogReaderState *record, uint32 shareCount, uint32 designatedWorker, List *expectedTLIs, - TimestampTz recordXTime, bool buseoriginal, bool isForceAll = false); - -void ApplyRedoRecord(XLogReaderState *record, bool bOld); } // namespace extreme_rto #endif diff --git a/src/include/access/genam.h b/src/include/access/genam.h index 27cc47303..12bb68ec1 100644 --- a/src/include/access/genam.h +++ b/src/include/access/genam.h @@ -49,6 +49,7 @@ typedef struct IndexVacuumInfo { int message_level; /* ereport level for progress messages */ double num_heap_tuples; /* tuples remaining in heap */ BufferAccessStrategy strategy; /* access strategy for reads */ + OidRBTree *invisibleParts; /* used for Ustore GPI */ } IndexVacuumInfo; /* @@ -107,12 +108,15 @@ struct ScanState; * check for conflicting live tuples (possibly blocking). */ typedef enum IndexUniqueCheck { - UNIQUE_CHECK_NO, /* Don't do any uniqueness checking */ - UNIQUE_CHECK_YES, /* Enforce uniqueness at insertion time */ - UNIQUE_CHECK_PARTIAL, /* Test uniqueness, but no error */ - UNIQUE_CHECK_EXISTING /* Check if existing tuple is unique */ + UNIQUE_CHECK_NO, /* Don't do any uniqueness checking */ + UNIQUE_CHECK_YES, /* Enforce uniqueness at insertion time */ + UNIQUE_CHECK_PARTIAL, /* Test uniqueness, but no error */ + UNIQUE_CHECK_EXISTING, /* Check if existing tuple is unique */ + UNIQUE_CHECK_UPSERT /* Test uniqueness, but no error and no insertion when a conflict is found */ } IndexUniqueCheck; +#define IndexUniqueCheckNoError(unique) ((unique) == UNIQUE_CHECK_PARTIAL || (unique) == UNIQUE_CHECK_UPSERT) + /* * generalized index_ interface routines (in indexam.c) */ @@ -183,12 +187,16 @@ HeapTuple systable_getnext_back(SysScanDesc sysscan); * global partition index access method support routines (in genam.c) */ typedef struct GPIScanDescData { - HTAB* fakeRelationTable; /* fake partition relation and partition hash table */ - OidRBTree* invisiblePartTree; /* cache invisible partition oid in GPI */ - Relation parentRelation; /* parent relation of partition */ - Relation fakePartRelation; /* fake-relation using partition */ - Partition partition; /* partition use to fake partition rel */ - Oid currPartOid; /* current partition oid in GPI */ + HTAB* fakeRelationTable; /* fake partition relation and partition hash table */ + OidRBTree* invisiblePartTree; /* cache invisible partition oid in GPI */ + OidRBTree* invisiblePartTreeForVacuum; /* only _bt_check_unique() may use it + * to determine the index tuple + * can be marked as dead + */ + Relation parentRelation; /* parent relation of partition */ + Relation fakePartRelation; /* fake-relation using partition */ + Partition partition; /* partition use to fake partition rel */ + Oid currPartOid; /* current partition oid in GPI */ } GPIScanDescData; typedef GPIScanDescData* GPIScanDesc; diff --git a/src/include/access/gin.h b/src/include/access/gin.h index 272ab6955..2fe76b1e0 100644 --- a/src/include/access/gin.h +++ b/src/include/access/gin.h @@ -71,6 +71,7 @@ extern void ginUpdateStats(Relation index, const GinStatsData *stats); /* ginxlog.c */ extern void gin_redo(XLogReaderState *record); extern void gin_desc(StringInfo buf, XLogReaderState *record); +extern const char* gin_type_name(uint8 subtype); extern void gin_xlog_startup(void); extern void gin_xlog_cleanup(void); extern bool IsGinVacuumPages(XLogReaderState *record); diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h index 1be3fed36..61970bc0b 100644 --- a/src/include/access/gist_private.h +++ b/src/include/access/gist_private.h @@ -419,6 +419,7 @@ extern SplitedPageLayout *gistSplit(Relation r, Page page, IndexTuple *itup, int /* gistxlog.c */ extern void gist_redo(XLogReaderState *record); extern void gist_desc(StringInfo buf, XLogReaderState *record); +extern const char *gist_type_name(uint8 subtype); extern void gist_xlog_startup(void); extern void gist_xlog_cleanup(void); diff --git a/src/include/access/gtm.h b/src/include/access/gtm.h index ec9d7f1b3..6e97600ca 100644 --- a/src/include/access/gtm.h +++ b/src/include/access/gtm.h @@ -31,8 +31,10 @@ extern bool IsGTMConnected(void); extern GtmHostIndex InitGTM(bool useCache = true); extern void CloseGTM(void); -extern void ResetGtmHandleXmin(GTM_TransactionKey txnKey); -extern int SetGTMVacuumFlag(GTM_TransactionKey txnKey, bool is_vacuum); + +extern bool SetDisasterClusterGTM(char *disasterCluster); +extern bool DelDisasterClusterGTM(); +extern bool GetDisasterClusterGTM(char** disasterCluster); extern GTM_TransactionKey BeginTranGTM(GTM_Timestamp *timestamp); extern GlobalTransactionId GetGxidGTM(GTM_TransactionKey txnKey, bool is_sub_xact); @@ -56,6 +58,7 @@ extern GTM_Snapshot GetSnapshotGTM(GTM_TransactionKey txnKey, GlobalTransactionI extern GTM_Snapshot GetSnapshotGTMLite(void); extern GTM_SnapshotStatus GetGTMSnapshotStatus(GTM_TransactionKey txnKey); extern GTMLite_Status GetGTMLiteStatus(void); +extern GTM_Snapshot GetSnapshotGTMDR(void); /* Sequence interface APIs with GTM */ extern GTM_UUID GetSeqUUIDGTM(); @@ -76,4 +79,6 @@ extern void InitGTM_Reporttimeline(void); extern void SetGTMInterruptFlag(); extern bool PingGTM(struct gtm_conn* conn); +extern bool SetConsistencyPointCSNGTM(CommitSeqNo consistencyPointCSN); + #endif /* ACCESS_GTM_H */ diff --git a/src/include/access/hash.h b/src/include/access/hash.h index 2f673838b..e43b761cd 100644 --- a/src/include/access/hash.h +++ b/src/include/access/hash.h @@ -4,7 +4,7 @@ * header file for openGauss hash access method implementation * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/hash.h @@ -33,59 +33,36 @@ */ typedef uint32 Bucket; -#define InvalidBucket ((Bucket) 0xFFFFFFFF) -#define BUCKET_TO_BLKNO(metap, B) ((BlockNumber)((B) + ((B) ? (metap)->hashm_spares[_hash_spareindex((B) + 1) - 1] : 0)) + 1) +#define INVALID_BUCKET_NUM (0xFFFFFFFF) +#define BUCKET_TO_BLKNO(metap, B) ((BlockNumber)((B) + ((B) ? (metap)->hashm_spares[_hash_log2((B) + 1) - 1] : 0)) + 1) /* * Special space for hash index pages. * - * hasho_flag's LH_PAGE_TYPE bits tell us which type of page we're looking at. - * Additional bits in the flag word are used for more transient purposes. - * - * To test a page's type, do (hasho_flag & LH_PAGE_TYPE) == LH_xxx_PAGE. - * However, we ensure that each used page type has a distinct bit so that - * we can OR together page types for uses such as the allowable-page-types - * argument of _hash_checkpage(). + * hasho_flag tells us which type of page we're looking at. For + * example, knowing overflow pages from bucket pages is necessary + * information when you're deleting tuples from a page. If all the + * tuples are deleted from an overflow page, the overflow is made + * available to other buckets by calling _hash_freeovflpage(). If all + * the tuples are deleted from a bucket page, no additional action is + * necessary. */ #define LH_UNUSED_PAGE (0) #define LH_OVERFLOW_PAGE (1 << 0) #define LH_BUCKET_PAGE (1 << 1) #define LH_BITMAP_PAGE (1 << 2) #define LH_META_PAGE (1 << 3) -#define LH_BUCKET_BEING_POPULATED (1 << 4) -#define LH_BUCKET_BEING_SPLIT (1 << 5) -#define LH_BUCKET_NEEDS_SPLIT_CLEANUP (1 << 6) -#define LH_PAGE_HAS_DEAD_TUPLES (1 << 7) -#define LH_PAGE_TYPE \ - (LH_OVERFLOW_PAGE | LH_BUCKET_PAGE | LH_BITMAP_PAGE | LH_META_PAGE) - -/* - * In an overflow page, hasho_prevblkno stores the block number of the previous - * page in the bucket chain; in a bucket page, hasho_prevblkno stores the - * hashm_maxbucket value as of the last time the bucket was last split, or - * else as of the time the bucket was created. The latter convention is used - * to determine whether a cached copy of the metapage is too stale to be used - * without needing to lock or pin the metapage. - * - * hasho_nextblkno is always the block number of the next page in the - * bucket chain, or InvalidBlockNumber if there are no more such pages. - */ typedef struct HashPageOpaqueData { - BlockNumber hasho_prevblkno; /* see above */ - BlockNumber hasho_nextblkno; /* see above */ - Bucket hasho_bucket; /* bucket number this pg belongs to */ - uint16 hasho_flag; /* page type code + flag bits, see above */ - uint16 hasho_page_id; /* for identification of hash indexes */ + BlockNumber hasho_prevblkno; /* previous ovfl (or bucket) blkno */ + BlockNumber hasho_nextblkno; /* next ovfl blkno */ + Bucket hasho_bucket; /* bucket number this pg belongs to */ + uint16 hasho_flag; /* page type code, see above */ + uint16 hasho_page_id; /* for identification of hash indexes */ } HashPageOpaqueData; typedef HashPageOpaqueData* HashPageOpaque; -#define H_NEEDS_SPLIT_CLEANUP(opaque) (((opaque)->hasho_flag & LH_BUCKET_NEEDS_SPLIT_CLEANUP) != 0) -#define H_BUCKET_BEING_SPLIT(opaque) (((opaque)->hasho_flag & LH_BUCKET_BEING_SPLIT) != 0) -#define H_BUCKET_BEING_POPULATED(opaque) (((opaque)->hasho_flag & LH_BUCKET_BEING_POPULATED) != 0) -#define H_HAS_DEAD_TUPLES(opaque) (((opaque)->hasho_flag & LH_PAGE_HAS_DEAD_TUPLES) != 0) - /* * The page ID is for the convenience of pg_filedump and similar utilities, * which otherwise would have a hard time telling pages of different index @@ -94,19 +71,26 @@ typedef HashPageOpaqueData* HashPageOpaque; */ #define HASHO_PAGE_ID 0xFF80 -typedef struct HashScanPosItem { - ItemPointerData heapTid; /* TID of referenced heap item */ - OffsetNumber indexOffset; /* index item's location within page */ -} HashScanPosItem; - - /* - * HashScanOpaqueData is private state for a hash index scan. + * HashScanOpaqueData is private state for a hash index scan. */ typedef struct HashScanOpaqueData { /* Hash value of the scan key, ie, the hash key we seek */ uint32 hashso_sk_hash; + /* + * By definition, a hash scan should be examining only one bucket. We + * record the bucket number here as soon as it is known. + */ + Bucket hashso_bucket; + bool hashso_bucket_valid; + + /* + * If we have a share lock on the bucket, we record it here. When + * hashso_bucket_blkno is zero, we have no such lock. + */ + BlockNumber hashso_bucket_blkno; + /* * We also want to remember which buffer we're currently examining in the * scan. We keep the buffer pinned (but not locked) across hashgettuple @@ -115,33 +99,11 @@ typedef struct HashScanOpaqueData { */ Buffer hashso_curbuf; - /* remember the buffer associated with primary bucket */ - Buffer hashso_bucket_buf; - - /* - * remember the buffer associated with primary bucket page of bucket being - * split. it is required during the scan of the bucket which is being - * populated during split operation. - */ - Buffer hashso_split_bucket_buf; - /* Current position of the scan, as an index TID */ ItemPointerData hashso_curpos; /* Current position of the scan, as a heap TID */ ItemPointerData hashso_heappos; - - /* Whether scan starts on bucket being populated due to split */ - bool hashso_buc_populated; - - /* - * Whether scanning bucket being split? The value of this parameter is - * referred only when hashso_buc_populated is true. - */ - bool hashso_buc_split; - /* info about killed items if any (killedItems is NULL if never used) */ - HashScanPosItem *killedItems; /* tids and offset numbers of killed items */ - int numKilled; /* number of currently stored items */ } HashScanOpaqueData; typedef HashScanOpaqueData* HashScanOpaque; @@ -153,7 +115,7 @@ typedef HashScanOpaqueData* HashScanOpaque; #define HASH_METAPAGE 0 /* metapage is always block 0 */ #define HASH_MAGIC 0x6440640 -#define HASH_VERSION 4 +#define HASH_VERSION 2 /* 2 signifies only hash key value is stored */ /* * Spares[] holds the number of overflow pages currently allocated at or @@ -166,32 +128,17 @@ typedef HashScanOpaqueData* HashScanOpaque; * * ovflpages that have been recycled for reuse can be found by looking at * bitmaps that are stored within ovflpages dedicated for the purpose. - * The blknos of these bitmap pages are kept in mapp[]; nmaps is the + * The blknos of these bitmap pages are kept in bitmaps[]; nmaps is the * number of currently existing bitmaps. * * The limitation on the size of spares[] comes from the fact that there's * no point in having more than 2^32 buckets with only uint32 hashcodes. - * (Note: The value of HASH_MAX_SPLITPOINTS which is the size of spares[] is - * adjusted in such a way to accommodate multi phased allocation of buckets - * after HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE). - * * There is no particular upper limit on the size of mapp[], other than - * needing to fit into the metapage. (With 8K block size, 1024 bitmaps - * limit us to 256 GB of overflow space...) + * needing to fit into the metapage. (With 8K block size, 128 bitmaps + * limit us to 64 Gb of overflow space...) */ -#define HASH_MAX_BITMAPS 1024 - -#define HASH_SPLITPOINT_PHASE_BITS 2 -#define HASH_SPLITPOINT_PHASES_PER_GRP (1 << HASH_SPLITPOINT_PHASE_BITS) -#define HASH_SPLITPOINT_PHASE_MASK (HASH_SPLITPOINT_PHASES_PER_GRP - 1) -#define HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE 10 - -/* defines max number of splitpoit phases a hash index can have */ -#define HASH_MAX_SPLITPOINT_GROUP 32 -#define HASH_MAX_SPLITPOINTS \ - (((HASH_MAX_SPLITPOINT_GROUP - HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE) * \ - HASH_SPLITPOINT_PHASES_PER_GRP) + \ - HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE) +#define HASH_MAX_SPLITPOINTS 32 +#define HASH_MAX_BITMAPS 128 typedef struct HashMetaPageData { uint32 hashm_magic; /* magic no. for hash tables */ @@ -333,40 +280,37 @@ extern Datum hash_new_uint32(uint32 k); /* private routines */ /* hashinsert.c */ -extern void _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel); +extern void _hash_doinsert(Relation rel, IndexTuple itup); extern OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf, Size itemsize, IndexTuple itup); -extern void _hash_pgaddmultitup(Relation rel, Buffer buf, IndexTuple *itups, - OffsetNumber *itup_offsets, uint16 nitups); /* hashovfl.c */ -extern Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin); -extern BlockNumber _hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf, - Buffer wbuf, IndexTuple *itups, OffsetNumber *itup_offsets, - Size *tups_size, uint16 nitups, BufferAccessStrategy bstrategy); -extern void _hash_initbitmapbuffer(Buffer buf, uint16 bmsize, bool initpage); -extern void _hash_squeezebucket(Relation rel, Bucket bucket, BlockNumber bucket_blkno, Buffer bucket_buf, BufferAccessStrategy bstrategy); +extern Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf); +extern BlockNumber _hash_freeovflpage(Relation rel, Buffer ovflbuf, BufferAccessStrategy bstrategy); +extern void _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno, ForkNumber forkNum); +extern void _hash_squeezebucket(Relation rel, Bucket bucket, BlockNumber bucket_blkno, BufferAccessStrategy bstrategy); /* hashpage.c */ +extern void _hash_getlock(Relation rel, BlockNumber whichlock, int access); +extern bool _hash_try_getlock(Relation rel, BlockNumber whichlock, int access); +extern void _hash_droplock(Relation rel, BlockNumber whichlock, int access); extern Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags); -extern Buffer _hash_getbuf_with_condlock_cleanup(Relation rel, - BlockNumber blkno, int flags); -extern HashMetaPage _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh); -extern Buffer _hash_getbucketbuf_from_hashkey(Relation rel, uint32 hashkey, - int access, HashMetaPage *cachedmetap); extern Buffer _hash_getinitbuf(Relation rel, BlockNumber blkno); -extern void _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag, bool initpage); extern Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum); extern Buffer _hash_getbuf_with_strategy( Relation rel, BlockNumber blkno, int access, int flags, BufferAccessStrategy bstrategy); extern void _hash_relbuf(Relation rel, Buffer buf); extern void _hash_dropbuf(Relation rel, Buffer buf); -extern void _hash_dropscanbuf(Relation rel, HashScanOpaque so); -extern uint32 _hash_init(Relation rel, double num_tuples, ForkNumber forkNum); -extern void _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, uint16 ffactor, bool initpage); +extern void _hash_wrtbuf(Relation rel, Buffer buf); +extern void _hash_chgbufaccess(Relation rel, Buffer buf, int from_access, int to_access); +extern uint32 _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum); extern void _hash_pageinit(Page page, Size size); extern void _hash_expandtable(Relation rel, Buffer metabuf); -extern void _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket, - uint32 maxbucket, uint32 highmask, uint32 lowmask); + +/* hashscan.c */ +extern void _hash_regscan(IndexScanDesc scan); +extern void _hash_dropscan(IndexScanDesc scan); +extern bool _hash_has_active_scan(Relation rel, Bucket bucket); +extern void ReleaseResources_hash(void); /* hashsearch.c */ extern bool _hash_next(IndexScanDesc scan, ScanDirection dir); @@ -376,10 +320,10 @@ extern bool _hash_step(IndexScanDesc scan, Buffer* bufP, ScanDirection dir); /* hashsort.c */ typedef struct HSpool HSpool; /* opaque struct in hashsort.c */ -extern HSpool* _h_spoolinit(Relation heap, Relation index, uint32 num_buckets, void* meminfo); +extern HSpool* _h_spoolinit(Relation index, uint32 num_buckets, void* meminfo); extern void _h_spooldestroy(HSpool* hspool); extern void _h_spool(HSpool* hspool, ItemPointer self, Datum* values, const bool* isnull); -extern void _h_indexbuild(HSpool* hspool, Relation heapRel); +extern void _h_indexbuild(HSpool* hspool); /* hashutil.c */ extern bool _hash_checkqual(IndexScanDesc scan, IndexTuple itup); @@ -387,28 +331,16 @@ extern uint32 _hash_datum2hashkey(Relation rel, Datum key); extern uint32 _hash_datum2hashkey_type(Relation rel, Datum key, Oid keytype); extern Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask); extern uint32 _hash_log2(uint32 num); -extern uint32 _hash_spareindex(uint32 num_bucket); -extern uint32 _hash_get_totalbuckets(uint32 splitpoint_phase); extern void _hash_checkpage(Relation rel, Buffer buf, int flags); extern uint32 _hash_get_indextuple_hashkey(IndexTuple itup); -extern bool _hash_convert_tuple(Relation index, Datum *user_values, const bool *user_isnull, - Datum *index_values, bool *index_isnull); +extern IndexTuple _hash_form_tuple(Relation index, Datum* values, const bool* isnull); extern OffsetNumber _hash_binsearch(Page page, uint32 hash_value); extern OffsetNumber _hash_binsearch_last(Page page, uint32 hash_value); -extern BlockNumber _hash_get_oldblock_from_newbucket(Relation rel, Bucket new_bucket); -extern BlockNumber _hash_get_newblock_from_oldbucket(Relation rel, Bucket old_bucket); -extern Bucket _hash_get_newbucket_from_oldbucket(Relation rel, Bucket old_bucket, - uint32 lowmask, uint32 maxbucket); -extern void _hash_kill_items(IndexScanDesc scan); /* hash.c */ -extern void hashbucketcleanup(Relation rel, Bucket cur_bucket, - Buffer bucket_buf, BlockNumber bucket_blkno, - BufferAccessStrategy bstrategy, - uint32 maxbucket, uint32 highmask, uint32 lowmask, - double *tuples_removed, double *num_index_tuples, - bool bucket_has_garbage, - IndexBulkDeleteCallback callback, void *callback_state); +extern void hash_redo(XLogReaderState* record); +extern void hash_desc(StringInfo buf, XLogReaderState* record); +extern const char* hash_type_name(uint8 subtype); #ifdef PGXC extern Datum compute_hash(Oid type, Datum value, char locator); diff --git a/src/include/access/hash_xlog.h b/src/include/access/hash_xlog.h deleted file mode 100644 index 9d10c9ae4..000000000 --- a/src/include/access/hash_xlog.h +++ /dev/null @@ -1,352 +0,0 @@ -/*------------------------------------------------------------------------- - * - * hash_xlog.h - * header file for Postgres hash AM implementation - * - * Portions Copyright (c) 2021 Huawei Technologies Co.,Ltd. - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/access/hash_xlog.h - * - *------------------------------------------------------------------------- - */ - -#ifndef HASH_XLOG_H -#define HASH_XLOG_H - -#include "access/xlogreader.h" -#include "lib/stringinfo.h" -#include "storage/off.h" - -/* Number of buffers required for XLOG_HASH_SQUEEZE_PAGE operation */ -#define HASH_XLOG_FREE_OVFL_BUFS 6 - -/* - * XLOG records for hash operations - */ -#define XLOG_HASH_INIT_META_PAGE 0x00 /* initialize the meta page */ -#define XLOG_HASH_INIT_BITMAP_PAGE 0x10 /* initialize the bitmap page */ -#define XLOG_HASH_INSERT 0x20 /* add index tuple without split */ -#define XLOG_HASH_ADD_OVFL_PAGE 0x30 /* add overflow page */ -#define XLOG_HASH_SPLIT_ALLOCATE_PAGE 0x40 /* allocate new page for split */ -#define XLOG_HASH_SPLIT_PAGE 0x50 /* split page */ -#define XLOG_HASH_SPLIT_COMPLETE 0x60 /* completion of split operation */ -#define XLOG_HASH_MOVE_PAGE_CONTENTS 0x70 /* remove tuples from one page - * and add to another page */ -#define XLOG_HASH_SQUEEZE_PAGE 0x80 /* add tuples to one of the previous - * pages in chain and free the ovfl - * page */ -#define XLOG_HASH_DELETE 0x90 /* delete index tuples from a page */ -#define XLOG_HASH_SPLIT_CLEANUP 0xA0 /* clear split-cleanup flag in primary - * bucket page after deleting tuples - * that are moved due to split */ -#define XLOG_HASH_UPDATE_META_PAGE 0xB0 /* update meta page after vacuum */ -#define XLOG_HASH_VACUUM_ONE_PAGE 0xC0 /* remove dead tuples from index page */ - -typedef enum { - XLOG_HASH_INIT_META_PAGE_NUM = 0, -}XLogHashInitMetaPageEnum; - -typedef enum { - XLOG_HASH_INIT_BITMAP_PAGE_BITMAP_NUM = 0, - XLOG_HASH_INIT_BITMAP_PAGE_META_NUM, -}XLogHashInitBitmapPageEnum; - -typedef enum { - XLOG_HASH_INSERT_PAGE_NUM = 0, - XLOG_HASH_INSERT_META_NUM, -}XLogHashInsertEnum; - -typedef enum { - XLOG_HASH_ADD_OVFL_PAGE_OVFL_NUM = 0, - XLOG_HASH_ADD_OVFL_PAGE_LEFT_NUM, - XLOG_HASH_ADD_OVFL_PAGE_MAP_NUM, - XLOG_HASH_ADD_OVFL_PAGE_NEWMAP_NUM, - XLOG_HASH_ADD_OVFL_PAGE_META_NUM, -}XLogHashAddOvflPageEnum; - -typedef enum { - XLOG_HASH_SPLIT_ALLOCATE_PAGE_OBUK_NUM = 0, - XLOG_HASH_SPLIT_ALLOCATE_PAGE_NBUK_NUM, - XLOG_HASH_SPLIT_ALLOCATE_PAGE_META_NUM, -}XLogHashSplitAllocatePageEnum; - -typedef enum { - XLOG_HASH_SPLIT_PAGE_NUM = 0, -}XLogHashSplitPageEnum; - -typedef enum { - XLOG_HASH_SPLIT_COMPLETE_OBUK_NUM = 0, - XLOG_HASH_SPLIT_COMPLETE_NBUK_NUM, -}XLogHashSplitCompleteEnum; - -typedef enum { - HASH_MOVE_BUK_BLOCK_NUM = 0, - HASH_MOVE_ADD_BLOCK_NUM, - HASH_MOVE_DELETE_OVFL_BLOCK_NUM, -}XLogHashMovePageEnum; - -typedef enum { - HASH_SQUEEZE_BUK_BLOCK_NUM = 0, - HASH_SQUEEZE_ADD_BLOCK_NUM, - HASH_SQUEEZE_INIT_OVFLBUF_BLOCK_NUM, - HASH_SQUEEZE_UPDATE_PREV_BLOCK_NUM, - HASH_SQUEEZE_UPDATE_NEXT_BLOCK_NUM, - HASH_SQUEEZE_UPDATE_BITMAP_BLOCK_NUM, - HASH_SQUEEZE_UPDATE_META_BLOCK_NUM, -}XLogHashSqueezePageEnum; - -typedef enum { - HASH_DELETE_BUK_BLOCK_NUM = 0, - HASH_DELETE_OVFL_BLOCK_NUM, -}XLogHashDeleteEnum; - -typedef enum { - HASH_SPLIT_CLEANUP_BLOCK_NUM, -}XLogHashSplitCleanupEnum; - -typedef enum { - HASH_UPDATE_META_BLOCK_NUM, -} XLogHashUpdateMateEnum; - -typedef enum { - HASH_VACUUM_PAGE_BLOCK_NUM = 0, - HASH_VACUUM_META_BLOCK_NUM, -} XLogHashVacuumPageEnum; - -/* - * xl_hash_split_allocate_page flag values, 8 bits are available. - */ -#define XLH_SPLIT_META_UPDATE_MASKS (1<<0) -#define XLH_SPLIT_META_UPDATE_SPLITPOINT (1<<1) - -/* - * This is what we need to know about a HASH index create. - * - * Backup block 0: metapage - */ -typedef struct xl_hash_createidx -{ - double num_tuples; - RegProcedure procid; - uint16 ffactor; -} xl_hash_createidx; - -#define SizeOfHashCreateIdx (offsetof(xl_hash_createidx, ffactor) + sizeof(uint16)) - -/* - * This is what we need to know about simple (without split) insert. - * - * This data record is used for XLOG_HASH_INSERT - * - * Backup Blk 0: original page (data contains the inserted tuple) - * Backup Blk 1: metapage (HashMetaPageData) - */ -typedef struct xl_hash_insert -{ - OffsetNumber offnum; -} xl_hash_insert; - -#define SizeOfHashInsert (offsetof(xl_hash_insert, offnum) + sizeof(OffsetNumber)) - -/* - * This is what we need to know about addition of overflow page. - * - * This data record is used for XLOG_HASH_ADD_OVFL_PAGE - * - * Backup Blk 0: newly allocated overflow page - * Backup Blk 1: page before new overflow page in the bucket chain - * Backup Blk 2: bitmap page - * Backup Blk 3: new bitmap page - * Backup Blk 4: metapage - */ -typedef struct xl_hash_add_ovfl_page -{ - uint16 bmsize; - bool bmpage_found; -} xl_hash_add_ovfl_page; - -#define SizeOfHashAddOvflPage \ - (offsetof(xl_hash_add_ovfl_page, bmpage_found) + sizeof(bool)) - -/* - * This is what we need to know about allocating a page for split. - * - * This data record is used for XLOG_HASH_SPLIT_ALLOCATE_PAGE - * - * Backup Blk 0: page for old bucket - * Backup Blk 1: page for new bucket - * Backup Blk 2: metapage - */ -typedef struct xl_hash_split_allocate_page -{ - uint32 new_bucket; - uint16 old_bucket_flag; - uint16 new_bucket_flag; - uint8 flags; -} xl_hash_split_allocate_page; - -#define SizeOfHashSplitAllocPage \ - (offsetof(xl_hash_split_allocate_page, flags) + sizeof(uint8)) - -/* - * This is what we need to know about completing the split operation. - * - * This data record is used for XLOG_HASH_SPLIT_COMPLETE - * - * Backup Blk 0: page for old bucket - * Backup Blk 1: page for new bucket - */ -typedef struct xl_hash_split_complete -{ - uint16 old_bucket_flag; - uint16 new_bucket_flag; -} xl_hash_split_complete; - -#define SizeOfHashSplitComplete \ - (offsetof(xl_hash_split_complete, new_bucket_flag) + sizeof(uint16)) - -/* - * This is what we need to know about move page contents required during - * squeeze operation. - * - * This data record is used for XLOG_HASH_MOVE_PAGE_CONTENTS - * - * Backup Blk 0: bucket page - * Backup Blk 1: page containing moved tuples - * Backup Blk 2: page from which tuples will be removed - */ -typedef struct xl_hash_move_page_contents -{ - uint16 ntups; - bool is_prim_bucket_same_wrt; /* true if the page to which - * tuples are moved is same as - * primary bucket page */ -} xl_hash_move_page_contents; - -#define SizeOfHashMovePageContents \ - (offsetof(xl_hash_move_page_contents, is_prim_bucket_same_wrt) + sizeof(bool)) - -/* - * This is what we need to know about the squeeze page operation. - * - * This data record is used for XLOG_HASH_SQUEEZE_PAGE - * - * Backup Blk 0: page containing tuples moved from freed overflow page - * Backup Blk 1: freed overflow page - * Backup Blk 2: page previous to the freed overflow page - * Backup Blk 3: page next to the freed overflow page - * Backup Blk 4: bitmap page containing info of freed overflow page - * Backup Blk 5: meta page - */ -typedef struct xl_hash_squeeze_page -{ - BlockNumber prevblkno; - BlockNumber nextblkno; - uint16 ntups; - bool is_prim_bucket_same_wrt; /* true if the page to which - * tuples are moved is same as - * primary bucket page */ - bool is_prev_bucket_same_wrt; /* true if the page to which - * tuples are moved is the page - * previous to the freed overflow - * page */ -} xl_hash_squeeze_page; - -#define SizeOfHashSqueezePage \ - (offsetof(xl_hash_squeeze_page, is_prev_bucket_same_wrt) + sizeof(bool)) - -/* - * This is what we need to know about the deletion of index tuples from a page. - * - * This data record is used for XLOG_HASH_DELETE - * - * Backup Blk 0: primary bucket page - * Backup Blk 1: page from which tuples are deleted - */ -typedef struct xl_hash_delete -{ - bool clear_dead_marking; /* true if this operation clears - * LH_PAGE_HAS_DEAD_TUPLES flag */ - bool is_primary_bucket_page; /* true if the operation is for - * primary bucket page */ -} xl_hash_delete; - -#define SizeOfHashDelete \ - (offsetof(xl_hash_delete, is_primary_bucket_page) + sizeof(bool)) - -/* - * This is what we need for metapage update operation. - * - * This data record is used for XLOG_HASH_UPDATE_META_PAGE - * - * Backup Blk 0: meta page - */ -typedef struct xl_hash_update_meta_page -{ - double ntuples; -} xl_hash_update_meta_page; - -#define SizeOfHashUpdateMetaPage \ - (offsetof(xl_hash_update_meta_page, ntuples) + sizeof(double)) - -/* - * This is what we need to initialize metapage. - * - * This data record is used for XLOG_HASH_INIT_META_PAGE - * - * Backup Blk 0: meta page - */ -typedef struct xl_hash_init_meta_page -{ - double num_tuples; - RegProcedure procid; - uint16 ffactor; -} xl_hash_init_meta_page; - -#define SizeOfHashInitMetaPage \ - (offsetof(xl_hash_init_meta_page, ffactor) + sizeof(uint16)) - -/* - * This is what we need to initialize bitmap page. - * - * This data record is used for XLOG_HASH_INIT_BITMAP_PAGE - * - * Backup Blk 0: bitmap page - * Backup Blk 1: meta page - */ -typedef struct xl_hash_init_bitmap_page -{ - uint16 bmsize; -} xl_hash_init_bitmap_page; - -#define SizeOfHashInitBitmapPage \ - (offsetof(xl_hash_init_bitmap_page, bmsize) + sizeof(uint16)) - -/* - * This is what we need for index tuple deletion and to - * update the meta page. - * - * This data record is used for XLOG_HASH_VACUUM_ONE_PAGE - * - * Backup Blk 0: bucket page - * Backup Blk 1: meta page - */ -typedef struct xl_hash_vacuum_one_page -{ - RelFileNode hnode; - int ntuples; - - /* TARGET OFFSET NUMBERS FOLLOW AT THE END */ -} xl_hash_vacuum_one_page; - -#define SizeOfHashVacuumOnePage \ - (offsetof(xl_hash_vacuum_one_page, ntuples) + sizeof(int)) - -extern void hash_redo(XLogReaderState *record); -extern void hash_desc(StringInfo buf, XLogReaderState *record); -extern const char *hash_identify(uint8 info); -extern bool IsHashVacuumPages(XLogReaderState *record); - -#endif /* HASH_XLOG_H */ diff --git a/src/include/access/hbucket_am.h b/src/include/access/hbucket_am.h index e85b2a34d..f29b6b3da 100644 --- a/src/include/access/hbucket_am.h +++ b/src/include/access/hbucket_am.h @@ -47,9 +47,13 @@ typedef struct RedisMergeItemOrderArray { }RedisMergeItemOrderArray; extern RedisMergeItem *hbkt_get_one_merge_item(char *merge_list, int merge_list_length, int2 bucketid); - +#ifdef ENABLE_MULTIPLE_NODES extern TableScanDesc GetTableScanDesc(TableScanDesc scan, Relation rel); extern IndexScanDesc GetIndexScanDesc(IndexScanDesc scan); +#else +#define GetTableScanDesc(scan, rel) (scan) +#define GetIndexScanDesc(scan) (scan) +#endif extern oidvector* hbkt_load_buckets(Relation relation, BucketInfo* bkt_info); extern RedisMergeItemOrderArray *hbkt_get_all_merge_item(char* merge_list, int merge_list_length); diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index b42f6b4fd..f04fdf90d 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -96,6 +96,10 @@ typedef struct TableScanDescData /* state set up at initscan time */ RangeScanInRedis rs_rangeScanInRedis; /* if it is a range scan in redistribution */ TupleTableSlot *slot; /* For begin scan of CopyTo */ + + /* variables for batch mode scan */ + int rs_ctupRows; + int rs_maxScanRows; } TableScanDescData; /* struct definition appears in relscan.h */ @@ -268,7 +272,6 @@ extern void bucketClosePartition(Partition bucket); #define heap_close(r,l) relation_close(r,l) /* struct definition appears in relscan.h */ -typedef struct HeapScanDescData* HeapScanDesc; typedef struct ParallelHeapScanDescData *ParallelHeapScanDesc; /* @@ -290,6 +293,7 @@ extern void heapgetpage(TableScanDesc scan, BlockNumber page); extern void heap_rescan(TableScanDesc sscan, ScanKey key); extern void heap_endscan(TableScanDesc scan); extern HeapTuple heap_getnext(TableScanDesc scan, ScanDirection direction); +extern bool HeapamGetNextBatchMode(TableScanDesc scan, ScanDirection direction); extern void heap_init_parallel_seqscan(TableScanDesc sscan, int32 dop, ScanDirection dir); extern void HeapParallelscanInitialize(ParallelHeapScanDesc target, Relation relation); @@ -311,7 +315,8 @@ extern void heap_get_max_tid(Relation rel, ItemPointer ctid); extern BulkInsertState GetBulkInsertState(void); extern void FreeBulkInsertState(BulkInsertState); -extern Oid heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate); +extern Oid heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate, + bool istoast = false); extern void heap_abort_speculative(Relation relation, HeapTuple tuple); extern bool heap_page_prepare_for_xid( Relation relation, Buffer buffer, TransactionId xid, bool multi, bool pageReplication = false); @@ -327,10 +332,10 @@ extern TM_Result heap_update(Relation relation, Relation parentRelation, ItemPoi bool allow_delete_self = false); extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer* buffer, CommandId cid, LockTupleMode mode, bool nowait, bool follow_updates, TM_FailureData *tmfd, - bool allow_lock_self = false); + bool allow_lock_self = false, int waitSec = 0); void FixInfomaskFromInfobits(uint8 infobits, uint16 *infomask, uint16 *infomask2); -extern void heap_inplace_update(Relation relation, HeapTuple tuple); +extern void heap_inplace_update(Relation relation, HeapTuple tuple, bool waitFlush = false); extern bool heap_freeze_tuple(HeapTuple tuple, TransactionId cutoff_xid, TransactionId cutoff_multi, bool *changedMultiXid = NULL); extern bool heap_tuple_needs_freeze(HeapTuple tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf); @@ -349,15 +354,15 @@ extern void partition_sync(Relation rel, Oid partitionId, LOCKMODE toastLockmode extern void heap_redo(XLogReaderState* rptr); extern void heap_desc(StringInfo buf, XLogReaderState* record); +extern const char* heap_type_name(uint8 subtype); extern void heap2_redo(XLogReaderState* rptr); extern void heap2_desc(StringInfo buf, XLogReaderState* record); +extern const char* heap2_type_name(uint8 subtype); extern void heap3_redo(XLogReaderState* rptr); extern void heap3_desc(StringInfo buf, XLogReaderState* record); +extern const char* heap3_type_name(uint8 subtype); extern void heap_bcm_redo(xl_heap_bcm* xlrec, RelFileNode node, XLogRecPtr lsn); -extern bool heap_page_upgrade(Relation relation, Buffer buffer); -extern void heap_page_upgrade_nocheck(Relation relation, Buffer buffer); - extern XLogRecPtr log_heap_cleanup_info(const RelFileNode* rnode, TransactionId latestRemovedXid); extern XLogRecPtr log_heap_clean(Relation reln, Buffer buffer, OffsetNumber* redirected, int nredirected, OffsetNumber* nowdead, int ndead, OffsetNumber* nowunused, int nunused, TransactionId latestRemovedXid, @@ -418,6 +423,14 @@ typedef enum { HEAPTUPLE_DELETE_IN_PROGRESS /* deleting xact is still in progress */ } HTSV_Result; +typedef enum { + CheckLockTupleMode, + CheckMultiXactLockMode, + CheckXactLockTableMode, + CheckSubXactLockTableMode +} CheckWaitLockMode; + + /* Special "satisfies" routines with different APIs */ extern TM_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer, bool self_visible = false); extern HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer, bool isAnalyzing = false); diff --git a/src/include/access/htup.h b/src/include/access/htup.h index 6927a5174..c5094324b 100644 --- a/src/include/access/htup.h +++ b/src/include/access/htup.h @@ -204,17 +204,12 @@ typedef HeapTupleHeaderData* HeapTupleHeader; #define HEAP_XMAX_INVALID 0x0800 /* t_xmax invalid/aborted */ #define HEAP_XMAX_IS_MULTI 0x1000 /* t_xmax is a MultiXactId */ #define HEAP_UPDATED 0x2000 /* this is UPDATEd version of row */ -#define HEAP_MOVED_OFF \ - 0x4000 /* moved to another place by pre-9.0 \ - * VACUUM FULL; kept for binary \ - * upgrade support */ -#define HEAP_MOVED_IN \ - 0x8000 /* moved from another place by pre-9.0 \ - * VACUUM FULL; kept for binary \ - * upgrade support */ -#define HEAP_MOVED (HEAP_MOVED_OFF | HEAP_MOVED_IN) -#define HEAP_XACT_MASK 0xFFE0 /* visibility-related bits */ +#define HEAP_HAS_8BYTE_UID (0x4000) /* tuple has 8 bytes uid */ +#define HEAP_UID_MASK (0x4000) +#define HEAP_RESERVED_BIT (0x8000) /* tuple uid related bits */ + +#define HEAP_XACT_MASK (0x3FE0) /* visibility-related bits */ /* * information stored in t_infomask2: @@ -273,16 +268,11 @@ typedef HeapTupleHeaderData* HeapTupleHeader; * on disk. */ -#define HeapTupleCopyBaseFromPage(tup, page) \ - { \ - if (PageIs8BXidHeapVersion(page)) { \ - (tup)->t_xid_base = ((HeapPageHeader)(page))->pd_xid_base; \ - (tup)->t_multi_base = ((HeapPageHeader)(page))->pd_multi_base; \ - } else { \ - (tup)->t_xid_base = 0; \ - (tup)->t_multi_base = 0; \ - } \ - } +#define HeapTupleCopyBaseFromPage(tup, page) \ + do { \ + (tup)->t_xid_base = ((HeapPageHeader)(page))->pd_xid_base; \ + (tup)->t_multi_base = ((HeapPageHeader)(page))->pd_multi_base; \ + } while (0) #define HeapTupleCopyBase(dest, src) \ do { \ @@ -301,7 +291,7 @@ typedef HeapTupleHeaderData* HeapTupleHeader; (((tup)->t_infomask & (HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID)) == HEAP_XMIN_INVALID) #define HeapTupleHeaderGetXmin(page, tup) \ (ShortTransactionIdToNormal( \ - PageIs8BXidHeapVersion(page) ? ((HeapPageHeader)(page))->pd_xid_base : 0, (tup)->t_choice.t_heap.t_xmin)) + ((HeapPageHeader)(page))->pd_xid_base, (tup)->t_choice.t_heap.t_xmin)) #define HeapTupleHeaderGetRawXmax(page, tup) HeapTupleHeaderGetXmax(page, tup) #define HeapTupleGetRawXmin(tup) (ShortTransactionIdToNormal((tup)->t_xid_base, (tup)->t_data->t_choice.t_heap.t_xmin)) @@ -348,19 +338,19 @@ typedef HeapTupleHeaderData* HeapTupleHeader; #define HeapTupleHeaderGetXmax(page, tup) \ (ShortTransactionIdToNormal(((tup)->t_infomask & HEAP_XMAX_IS_MULTI) \ - ? (PageIs8BXidHeapVersion(page) ? ((HeapPageHeader)(page))->pd_multi_base : 0) \ - : (PageIs8BXidHeapVersion(page) ? ((HeapPageHeader)(page))->pd_xid_base : 0), \ + ? (((HeapPageHeader)(page))->pd_multi_base) \ + : (((HeapPageHeader)(page))->pd_xid_base), \ ((tup)->t_choice.t_heap.t_xmax))) #define HeapTupleHeaderSetXmax(page, tup, xid) \ ((tup)->t_choice.t_heap.t_xmax = NormalTransactionIdToShort( \ (((tup)->t_infomask & HEAP_XMAX_IS_MULTI) \ - ? (PageIs8BXidHeapVersion(page) ? ((HeapPageHeader)(page))->pd_multi_base : 0) \ - : (PageIs8BXidHeapVersion(page) ? ((HeapPageHeader)(page))->pd_xid_base : 0)), \ + ? (((HeapPageHeader)(page))->pd_multi_base) \ + : (((HeapPageHeader)(page))->pd_xid_base)), \ (xid))) #define HeapTupleHeaderSetXmin(page, tup, xid) \ ((tup)->t_choice.t_heap.t_xmin = NormalTransactionIdToShort( \ - (PageIs8BXidHeapVersion(page) ? ((HeapPageHeader)(page))->pd_xid_base : 0), (xid))) + (((HeapPageHeader)(page))->pd_xid_base), (xid))) #define HeapTupleSetXmax(tup, xid) \ ((tup)->t_data->t_choice.t_heap.t_xmax = NormalTransactionIdToShort( \ @@ -380,7 +370,6 @@ typedef HeapTupleHeaderData* HeapTupleHeader; /* SetCmin is reasonably simple since we never need a combo CID */ #define HeapTupleHeaderSetCmin(tup, cid) \ do { \ - Assert(!((tup)->t_infomask & HEAP_MOVED)); \ (tup)->t_choice.t_heap.t_field3.t_cid = (cid); \ (tup)->t_infomask &= ~HEAP_COMBOCID; \ } while (0) @@ -388,7 +377,6 @@ typedef HeapTupleHeaderData* HeapTupleHeader; /* SetCmax must be used after HeapTupleHeaderAdjustCmax; see combocid.c */ #define HeapTupleHeaderSetCmax(tup, cid, iscombo) \ do { \ - Assert(!((tup)->t_infomask & HEAP_MOVED)); \ (tup)->t_choice.t_heap.t_field3.t_cid = (cid); \ if (iscombo) \ (tup)->t_infomask |= HEAP_COMBOCID; \ @@ -419,6 +407,20 @@ typedef HeapTupleHeaderData* HeapTupleHeader; #define HeapTupleHeaderHasOid(tup) (((tup)->t_infomask & HEAP_HASOID) != 0) +#define HeapTupleHeaderHasUid(tup) (((tup)->t_infomask & HEAP_HAS_8BYTE_UID) != 0) +#define GetUidByteLen(uid) (sizeof(uint64)) +#define HeapUidMask () +#define GetUidByteLenInfomask(uid) (HEAP_HAS_8BYTE_UID) +#define HeapTupleHeaderSetUid(tup, uid, uidLen) \ + do { \ + Assert((tup)->t_infomask & HEAP_HAS_8BYTE_UID); \ + Assert(!HeapTupleHeaderHasOid(tup)); \ + *((uint64*)((char*)(tup) + (tup)->t_hoff - uidLen)) = (uid); \ + } while (0) + +extern uint64 HeapTupleGetUid(HeapTuple tup); +extern void HeapTupleSetUid(HeapTuple tup, uint64 uid, int nattrs); + /* * Note that we stop considering a tuple HOT-updated as soon as it is known * aborted or the would-be updating transaction is known aborted. For best @@ -728,7 +730,7 @@ inline HeapTuple heaptup_alloc(Size size) */ #define XLOG_HEAP2_FREEZE 0x00 #define XLOG_HEAP2_CLEAN 0x10 -/* 0x20 is free, was XLOG_HEAP2_CLEAN_MOVE */ +/* 0x20 is free, was XLOG_HEAP2_PAGE_UPGRADE */ #define XLOG_HEAP2_PAGE_UPGRADE 0x20 #define XLOG_HEAP2_CLEANUP_INFO 0x30 #define XLOG_HEAP2_VISIBLE 0x40 diff --git a/src/include/access/multi_redo_api.h b/src/include/access/multi_redo_api.h index b10fc9db9..80c19c6cd 100644 --- a/src/include/access/multi_redo_api.h +++ b/src/include/access/multi_redo_api.h @@ -68,12 +68,12 @@ static inline int get_recovery_undozidworkers_num() return 1; } -static inline bool IsExtremeRedo() +inline bool IsExtremeRedo() { return g_instance.comm_cxt.predo_cxt.redoType == EXTREME_REDO && (get_real_recovery_parallelism() > 1); } -static inline bool IsParallelRedo() +inline bool IsParallelRedo() { return g_instance.comm_cxt.predo_cxt.redoType == PARALLEL_REDO && (get_real_recovery_parallelism() > 1); } @@ -110,10 +110,33 @@ void SwitchToDispatcherContext(); void FreeAllocatedRedoItem(); void** GetXLogInvalidPagesFromWorkers(); void SendRecoveryEndMarkToWorkersAndWaitForFinish(int code); -bool IsExtremeRtoReadWorkerRunning(); RedoWaitInfo GetRedoIoEvent(int32 event_id); void GetRedoWrokerStatistic(uint32* realNum, RedoWorkerStatsData* worker, uint32 workerLen); bool IsExtremeRtoSmartShutdown(); void ExtremeRtoRedoManagerSendEndToStartup(); +void CountXLogNumbers(XLogReaderState *record); +void ApplyRedoRecord(XLogReaderState* record); +void DiagLogRedoRecord(XLogReaderState *record, const char *funcName); +void GetRedoWorkerTimeCount(RedoWorkerTimeCountsInfo **workerCountInfoList, uint32 *realNum); +void ResetXLogStatics(); + +static inline void GetRedoStartTime(RedoTimeCost &cost) +{ + cost.startTime = GetCurrentTimestamp(); +} + +static inline void CountRedoTime(RedoTimeCost &cost) +{ + cost.totalDuration += GetCurrentTimestamp() - cost.startTime; + cost.counter += 1; +} + +static inline void CountAndGetRedoTime(RedoTimeCost &curCost, RedoTimeCost &nextCost) +{ + uint64 curTime = GetCurrentTimestamp(); + curCost.totalDuration += curTime - curCost.startTime; + curCost.counter += 1; + nextCost.startTime = curTime; +} #endif diff --git a/src/include/access/multi_redo_settings.h b/src/include/access/multi_redo_settings.h index 288766998..b097d8c6c 100644 --- a/src/include/access/multi_redo_settings.h +++ b/src/include/access/multi_redo_settings.h @@ -25,6 +25,27 @@ #ifndef MULTI_REDO_SETTINGS_H #define MULTI_REDO_SETTINGS_H +#include +#include "gs_thread.h" + +typedef enum RedoCpuBindType { + REDO_NO_CPU_BIND, + REDO_NODE_BIND, + REDO_CPU_BIND, +}RedoCpuBindType; + +typedef struct { + RedoCpuBindType bindType; + int totalNumaNum; + int totalCpuNum; + int activeCpuNum; + int *cpuArr; + bool* isBindCpuArr; + bool* isBindNumaArr; + bool* isMcsCpuArr; + cpu_set_t cpuSet; // cpu set of main thread + cpu_set_t configCpuSet; // cpu set config to redo worker +} RedoCpuBindControl; /* if you change MOST_FAST_RECOVERY_LIMIT, remember to change max_recovery_parallelism in cluster_guc.conf */ static const int MOST_FAST_RECOVERY_LIMIT = 20; @@ -32,8 +53,6 @@ static const int MAX_PARSE_WORKERS = 16; static const int MAX_REDO_WORKERS_PER_PARSE = 8; - - static const int TRXN_REDO_MANAGER_NUM = 1; static const int TRXN_REDO_WORKER_NUM = 1; static const int XLOG_READER_NUM = 3; @@ -41,10 +60,15 @@ static const int XLOG_READER_NUM = 3; static const int MAX_EXTREME_THREAD_NUM = MAX_PARSE_WORKERS * MAX_REDO_WORKERS_PER_PARSE + MAX_PARSE_WORKERS + MAX_PARSE_WORKERS + TRXN_REDO_MANAGER_NUM + TRXN_REDO_WORKER_NUM + XLOG_READER_NUM; +#ifndef ENABLE_LITE_MODE static const int MAX_RECOVERY_THREAD_NUM = (MAX_EXTREME_THREAD_NUM > MOST_FAST_RECOVERY_LIMIT) ? MAX_EXTREME_THREAD_NUM : MOST_FAST_RECOVERY_LIMIT; +#else +static const int MAX_RECOVERY_THREAD_NUM = MOST_FAST_RECOVERY_LIMIT; +#endif void ConfigRecoveryParallelism(); - +void ProcessRedoCpuBindInfo(); +void BindRedoThreadToSpecifiedCpu(knl_thread_role thread_role); #endif diff --git a/src/include/access/multixact.h b/src/include/access/multixact.h index 80cb009b5..113b56ba0 100644 --- a/src/include/access/multixact.h +++ b/src/include/access/multixact.h @@ -93,8 +93,8 @@ extern MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, Multi extern bool MultiXactIdIsRunning(MultiXactId multi); extern bool MultiXactIdIsCurrent(MultiXactId multi); extern MultiXactId ReadNextMultiXactId(void); -extern bool DoMultiXactIdWait(MultiXactId multi, MultiXactStatus status, int *remaining, bool nowait); -extern void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, int *remaining); +extern bool DoMultiXactIdWait(MultiXactId multi, MultiXactStatus status, int *remaining, bool nowait, int waitSec = 0); +extern void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, int *remaining, int waitSec = 0); extern bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status, int *remaining); extern void MultiXactIdSetOldestMember(void); extern int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember** members); @@ -123,5 +123,6 @@ extern void multixact_twophase_postabort(TransactionId xid, uint16 info, void* r extern void multixact_redo(XLogReaderState* record); extern void multixact_desc(StringInfo buf, XLogReaderState* record); +extern const char* multixact_type_name(uint8 subtype); #endif /* MULTIXACT_H */ diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h index 9077f68fb..d5207a76a 100644 --- a/src/include/access/nbtree.h +++ b/src/include/access/nbtree.h @@ -679,6 +679,8 @@ typedef struct BTScanPosItem { /* what we remember about each match */ LocationIndex tupleOffset; /* IndexTuple's offset in workspace, if any */ Oid partitionOid; /* partition table oid in workspace, if any */ int2 bucketid; /* bucketid in workspace, if any */ + /* only used in ubtree */ + bool needRecheck; } BTScanPosItem; typedef struct BTScanPosData { @@ -1112,6 +1114,7 @@ extern Buffer _bt_walk_left(Relation rel, Buffer buf); extern Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost); extern bool _bt_gettuple_internal(IndexScanDesc scan, ScanDirection dir); extern bool _bt_check_natts(const Relation index, Page page, OffsetNumber offnum); +extern int _bt_getrootheight(Relation rel); /* * prototypes for functions in nbtutils.c @@ -1144,8 +1147,7 @@ extern double _bt_spools_heapscan(Relation heap, Relation index, BTBuildState *b extern void _bt_end_parallel(); extern BTSpool* _bt_spoolinit(Relation heap, Relation index, bool isunique, bool isdead, void* meminfo); extern void _bt_spooldestroy(BTSpool* btspool); -extern void _bt_spool(BTSpool *btspool, ItemPointer self, Datum *values, const bool *isnull, - IndexTransInfo *transInfo = NULL); +extern void _bt_spool(BTSpool *btspool, ItemPointer self, Datum *values, const bool *isnull); extern void _bt_leafbuild(BTSpool* btspool, BTSpool* spool2); // these 4 functions are move here from nbtsearch.cpp(static functions) extern void _bt_buildadd(BTWriteState* wstate, BTPageState* state, IndexTuple itup); @@ -1161,6 +1163,7 @@ extern uint64 uniter_next(pg_atomic_uint64 *curiter, uint32 cycle0, uint32 cycle */ extern void btree_redo(XLogReaderState* record); extern void btree_desc(StringInfo buf, XLogReaderState* record); +extern const char* btree_type_name(uint8 subtype); extern void btree_xlog_startup(void); extern void btree_xlog_cleanup(void); extern bool btree_safe_restartpoint(void); diff --git a/src/include/access/obs/obs_am.h b/src/include/access/obs/obs_am.h index 8b8b476f2..fc1aaec7b 100755 --- a/src/include/access/obs/obs_am.h +++ b/src/include/access/obs/obs_am.h @@ -30,7 +30,9 @@ #include "nodes/pg_list.h" #include "storage/buf/buffile.h" #include "replication/slot.h" +#ifndef ENABLE_LITE_MODE #include "eSDKOBS.h" +#endif #define OBS_MAX_UPLOAD_ID_LEN 256 #define OBS_MAX_FILE_PATH 1024 @@ -130,7 +132,7 @@ typedef struct OBSReadWriteHandler { bool in_computing; /* size_t m_offset; =>get_cond.start_byte */ /* obs_bucket_context m_bucketCtx; =>m_option.bucket_options */ - +#ifndef ENABLE_LITE_MODE obs_options m_option; obs_object_info m_object_info; @@ -144,12 +146,15 @@ typedef struct OBSReadWriteHandler { * More OBS operation properties added here */ } properties; +#endif } OBSReadWriteHandler; typedef struct list_service_data { int headerPrinted; int allDetails; +#ifndef ENABLE_LITE_MODE obs_status ret_status; +#endif } list_service_data; extern void SetObsMemoryContext(MemoryContext mctx); diff --git a/src/include/access/parallel_recovery/dispatcher.h b/src/include/access/parallel_recovery/dispatcher.h index 39e09701d..a68bce254 100644 --- a/src/include/access/parallel_recovery/dispatcher.h +++ b/src/include/access/parallel_recovery/dispatcher.h @@ -67,6 +67,8 @@ typedef struct LogDispatcher { XLogRecPtr dispatchEndRecPtr; /* end of dispatch record read */ bool checkpointNeedFullSync; RedoInterruptCallBackFunc oldStartupIntrruptFunc; + XLogRedoNumStatics xlogStatics[RM_NEXT_ID][MAX_XLOG_INFO_NUM]; + RedoTimeCost *startupTimeCost; } LogDispatcher; extern LogDispatcher* g_dispatcher; @@ -110,12 +112,11 @@ void** GetXLogInvalidPagesFromWorkers(); /* Other utility functions. */ uint32 GetWorkerId(const RelFileNode& node, BlockNumber block, ForkNumber forkNum); -bool XactWillRemoveRelFiles(XLogReaderState* record); XLogReaderState* NewReaderState(XLogReaderState* readerState, bool bCopyState = false); void FreeAllocatedRedoItem(); void GetReplayedRecPtrFromWorkers(XLogRecPtr *readPtr, XLogRecPtr *endPtr); +void GetReplayedRecPtrFromWorkers(XLogRecPtr *endPtr); void GetReplayedRecPtrFromUndoWorkers(XLogRecPtr *readPtr, XLogRecPtr *endPtr); -void DiagLogRedoRecord(XLogReaderState* record, const char* funcName); List* CheckImcompleteAction(List* imcompleteActionList); void SetPageWorkStateByThreadId(uint32 threadState); RedoWaitInfo redo_get_io_event(int32 event_id); @@ -123,11 +124,17 @@ void redo_get_wroker_statistic(uint32* realNum, RedoWorkerStatsData* worker, uin extern void redo_dump_all_stats(); void WaitRedoWorkerIdle(); void SendClearMarkToAllWorkers(); +void SendClosefdMarkToAllWorkers(); +void SendCleanInvalidPageMarkToAllWorkers(RepairFileKey key); extern void SetStartupBufferPinWaitBufId(int bufid); extern void GetStartupBufferPinWaitBufId(int *bufids, uint32 len); extern uint32 GetStartupBufferPinWaitBufLen(); +extern void InitReaderStateByOld(XLogReaderState *newState, XLogReaderState *oldState, bool isNew); +extern void CopyDataFromOldReader(XLogReaderState *newReaderState, XLogReaderState *oldReaderState); bool TxnQueueIsEmpty(TxnRedoWorker* worker); +void redo_get_wroker_time_count(RedoWorkerTimeCountsInfo **workerCountInfoList, uint32 *realNum); + } #endif diff --git a/src/include/access/parallel_recovery/page_redo.h b/src/include/access/parallel_recovery/page_redo.h index 3e0a720b7..e3056d9fc 100644 --- a/src/include/access/parallel_recovery/page_redo.h +++ b/src/include/access/parallel_recovery/page_redo.h @@ -34,20 +34,31 @@ #include "access/parallel_recovery/posix_semaphore.h" #include "access/parallel_recovery/spsc_blocking_queue.h" +#include "postmaster/pagerepair.h" namespace parallel_recovery { -static const uint32 PAGE_WORK_QUEUE_SIZE = 1024; +static const uint32 PAGE_WORK_QUEUE_SIZE = 4096; static const uint32 PAGE_REDO_WORKER_APPLY_ITEM = 0; static const uint32 PAGE_REDO_WORKER_SKIP_ITEM = 1; +static const uint32 MAX_REMOTE_READ_INFO_NUM = 100; struct SafeRestartPoint { SafeRestartPoint* next; XLogRecPtr restartPoint; }; +typedef struct BadBlockRecEnt{ + RepairBlockKey key; + XLogPhyBlock pblk; + XLogRecPtr rec_min_lsn; + XLogRecPtr rec_max_lsn; + RedoItem *head; + RedoItem *tail; +} BadBlockRecEnt; + typedef enum { DataPageWorker, UndoLogZidWorker, @@ -171,9 +182,13 @@ struct PageRedoWorker { uint64 statWaitReach; uint64 statWaitReplay; pg_atomic_uint32 readyStatus; - pg_atomic_uint32 skipItemFlg; MemoryContext oldCtx; int bufferPinWaitBufId; + RedoTimeCost timeCostList[TIME_COST_NUM]; + uint32 remoteReadPageNum; + HTAB *badPageHashTbl; + char page[BLCKSZ]; + XLogReaderState *current_item; }; extern THR_LOCAL PageRedoWorker* g_redoWorker; @@ -198,6 +213,9 @@ void PageRedoWorkerMain(); /* Dispatcher phases. */ bool SendPageRedoEndMark(PageRedoWorker* worker); bool SendPageRedoClearMark(PageRedoWorker* worker); +bool SendPageRedoClosefdMark(PageRedoWorker *worker); +bool SendPageRedoCleanInvalidPageMark(PageRedoWorker *worker, RepairFileKey key); + void WaitPageRedoWorkerReachLastMark(PageRedoWorker* worker); /* Redo processing. */ @@ -216,16 +234,26 @@ void* GetBTreeIncompleteActions(PageRedoWorker* worker); void ClearBTreeIncompleteActions(PageRedoWorker* worker); void* GetXLogInvalidPages(PageRedoWorker* worker); bool RedoWorkerIsIdle(PageRedoWorker* worker); -void PageRedoSetAffinity(uint32 id); - void DumpPageRedoWorker(PageRedoWorker* worker); void SetPageRedoWorkerIndex(int index); -void OnlyFreeRedoItem(RedoItem* item); void SetCompletedReadEndPtr(PageRedoWorker* worker, XLogRecPtr readPtr, XLogRecPtr endPtr); void GetCompletedReadEndPtr(PageRedoWorker* worker, XLogRecPtr *readPtr, XLogRecPtr *endPtr); void UpdateRecordGlobals(RedoItem* item, HotStandbyState standbyState); void redo_dump_worker_queue_info(); - void WaitAllPageWorkersQueueEmpty(); + +/* recovery thread handle bad block function */ +HTAB* BadBlockHashTblCreate(); +void RepairPageAndRecoveryXLog(BadBlockRecEnt *page_info, const char *page); +void RecordBadBlockAndPushToRemote(XLogReaderState *record, RepairBlockKey key, + PageErrorType error_type, XLogRecPtr old_lsn, XLogPhyBlock pblk); +void CheckRemoteReadAndRepairPage(BadBlockRecEnt *entry); +void SeqCheckRemoteReadAndRepairPage(); +void ClearRecoveryThreadHashTbl(const RelFileNode &node, ForkNumber forknum, BlockNumber minblkno, + bool segment_shrink); +void BatchClearRecoveryThreadHashTbl(Oid spcNode, Oid dbNode); +void ClearSpecificsPageEntryAndMem(BadBlockRecEnt *entry); + + } #endif diff --git a/src/include/access/parallel_recovery/redo_item.h b/src/include/access/parallel_recovery/redo_item.h index 49b33f59b..a1ca3b2e9 100644 --- a/src/include/access/parallel_recovery/redo_item.h +++ b/src/include/access/parallel_recovery/redo_item.h @@ -97,12 +97,11 @@ typedef struct ContextUpdateOp { } ContextUpdateOp; typedef struct RedoItem { - /* Old version. */ - bool oldVersion; bool sharewithtrxn; /* if ture when designatedWorker is trxn or all and need sync with pageworker */ bool blockbytrxn; /* if ture when designatedWorker is pagerworker and need sync with trxn */ bool imcheckpoint; bool replay_undo; /* if ture replay undo, otherwise replay data page */ + bool need_free; /* bad block repair finish, need call pfree to free memory */ /* Number of workers sharing this item. */ uint32 shareCount; /* Number of page workers holding reference to this item + undo redo worker */ @@ -132,6 +131,7 @@ typedef struct RedoItem { TransactionId RecentXmin; ServerMode syncServerMode; pg_atomic_uint32 freed; + RedoItem *remoteNext; } RedoItem; static const int32 ANY_BLOCK_ID = -1; @@ -146,8 +146,6 @@ RedoItem* CreateLSNMarker(XLogReaderState* record, List* expectedTLIs, bool buse bool IsLSNMarker(const RedoItem* item); -void ApplyRedoRecord(XLogReaderState* record, bool bOld); - static inline RedoItem* GetRedoItemPtr(XLogReaderState* record) { return (RedoItem*)(((char*)record) - offsetof(RedoItem, record)); diff --git a/src/include/access/parallel_recovery/txn_redo.h b/src/include/access/parallel_recovery/txn_redo.h index d331fa47e..bfbd04b1b 100644 --- a/src/include/access/parallel_recovery/txn_redo.h +++ b/src/include/access/parallel_recovery/txn_redo.h @@ -38,6 +38,5 @@ void ApplyReadyTxnLogRecords(TxnRedoWorker* worker, bool forceAll); void MoveTxnItemToApplyQueue(TxnRedoWorker* worker); void DumpTxnWorker(TxnRedoWorker* txnWorker); bool IsTxnWorkerIdle(TxnRedoWorker* worker); -void FreeTxnItem(); } #endif diff --git a/src/include/access/redo_statistic_msg.h b/src/include/access/redo_statistic_msg.h index 3a3c36bc9..96154a94b 100644 --- a/src/include/access/redo_statistic_msg.h +++ b/src/include/access/redo_statistic_msg.h @@ -36,6 +36,11 @@ typedef struct RedoWaitInfo { int64 counter; } RedoWaitInfo; +typedef struct XLogRedoNumStatics { + volatile uint64 total_num; + volatile uint64 extra_num; +} XLogRedoNumStatics; + typedef enum RedoWaitStats { WAIT_READ_XLOG = 0, WAIT_READ_DATA, diff --git a/src/include/access/reloptions.h b/src/include/access/reloptions.h index ce6348cc1..732a1f704 100644 --- a/src/include/access/reloptions.h +++ b/src/include/access/reloptions.h @@ -23,6 +23,7 @@ #include "nodes/pg_list.h" #include "nodes/primnodes.h" #include "utils/rel.h" +#include "utils/rel_gs.h" /* types supported by reloptions */ typedef enum relopt_type { @@ -130,22 +131,6 @@ typedef struct { int offset; /* offset of field in result struct */ } relopt_parse_elt; -struct TableCreateSupport { - bool compressType; - bool compressLevel; - bool compressChunkSize; - bool compressPreAllocChunks; - bool compressByteConvert; - bool compressDiffConvert; -}; - -inline bool HasCompressOption(TableCreateSupport *tableCreateSupport) -{ - return tableCreateSupport->compressLevel || tableCreateSupport->compressChunkSize || - tableCreateSupport->compressPreAllocChunks || tableCreateSupport->compressByteConvert || - tableCreateSupport->compressDiffConvert; -} - /* * The following are the table append modes currently supported. * on: mark the table on-line scaleout mode, when it is set, later data write by append mode. @@ -298,10 +283,7 @@ extern void ForbidUserToSetDefinedIndexOptions(List* options); extern bool CheckRelOptionValue(Datum options, const char* opt_name); extern void forbid_to_set_options_for_timeseries_tbl(List* options); extern List* RemoveRelOption(List* options, const char* optName, bool* removed); -void RowTblCheckCompressionOption(List *options); +void RowTblCheckCompressionOption(List *options, int8 rowCompress = REL_CMPRS_PAGE_PLAIN); void RowTblCheckHashBucketOption(List* options, StdRdOptions* std_opt); -void ForbidUserToSetCompressedOptions(List *options); -void SetOneOfCompressOption(const char *defname, TableCreateSupport *tableCreateSupport); -void CheckCompressOption(TableCreateSupport *tableCreateSupport); #endif /* RELOPTIONS_H */ diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h index 610fd259b..4de222f0c 100644 --- a/src/include/access/relscan.h +++ b/src/include/access/relscan.h @@ -67,6 +67,10 @@ typedef struct HeapScanDescData { */ HeapTupleData rs_ctup; /* current tuple in scan, if any */ ParallelHeapScanDesc rs_parallel; /* parallel scan information */ + + HeapTupleData* rs_ctupBatch; + + /* this must be the end of this sturcture */ HeapTupleHeaderData rs_ctbuf_hdr; } HeapScanDescData; diff --git a/src/include/access/rmgr.h b/src/include/access/rmgr.h index aa2ff4068..0fc9d5fa1 100644 --- a/src/include/access/rmgr.h +++ b/src/include/access/rmgr.h @@ -19,7 +19,7 @@ typedef uint8 RmgrId; * Note: RM_MAX_ID must fit in RmgrId; widening that type will affect the XLOG * file format. */ -#define PG_RMGR(symname, name, redo, parse, startup, cleanup, restartpoint, undo, undo_desc) symname, +#define PG_RMGR(symname, name, redo, parse, startup, cleanup, restartpoint, undo, undo_desc, type_name) symname, typedef enum RmgrIds { #include "access/rmgrlist.h" @@ -28,4 +28,5 @@ typedef enum RmgrIds { #undef PG_RMGR #define RM_MAX_ID (RM_NEXT_ID - 1) +#define MAX_XLOG_INFO_NUM 16 #endif /* RMGR_H */ diff --git a/src/include/access/rmgrlist.h b/src/include/access/rmgrlist.h index b62200281..34f8bc535 100644 --- a/src/include/access/rmgrlist.h +++ b/src/include/access/rmgrlist.h @@ -32,48 +32,49 @@ * Changes to this list possibly need an XLOG_PAGE_MAGIC bump. */ -/* symbol name, textual name, redo, desc, identify, startup, cleanup, undo, undo_desc */ -PG_RMGR(RM_XLOG_ID, "XLOG", xlog_redo, xlog_desc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_XACT_ID, "Transaction", xact_redo, xact_desc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_SMGR_ID, "Storage", smgr_redo, smgr_desc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_CLOG_ID, "CLOG", clog_redo, clog_desc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_DBASE_ID, "Database", dbase_redo, dbase_desc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_TBLSPC_ID, "Tablespace", tblspc_redo, tblspc_desc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_MULTIXACT_ID, "MultiXact", multixact_redo, multixact_desc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_RELMAP_ID, "RelMap", relmap_redo, relmap_desc, NULL, NULL, NULL, NULL, NULL) +/* symbol name, textual name, redo, desc, identify, startup, cleanup, undo, undo_desc info_type_name */ +PG_RMGR(RM_XLOG_ID, "XLOG", xlog_redo, xlog_desc, NULL, NULL, NULL, NULL, NULL, xlog_type_name) +PG_RMGR(RM_XACT_ID, "Transaction", xact_redo, xact_desc, NULL, NULL, NULL, NULL, NULL, xact_type_name) +PG_RMGR(RM_SMGR_ID, "Storage", smgr_redo, smgr_desc, NULL, NULL, NULL, NULL, NULL, smgr_type_name) +PG_RMGR(RM_CLOG_ID, "CLOG", clog_redo, clog_desc, NULL, NULL, NULL, NULL, NULL, clog_type_name) +PG_RMGR(RM_DBASE_ID, "Database", dbase_redo, dbase_desc, NULL, NULL, NULL, NULL, NULL, dbase_type_name) +PG_RMGR(RM_TBLSPC_ID, "Tablespace", tblspc_redo, tblspc_desc, NULL, NULL, NULL, NULL, NULL, tblspc_type_name) +PG_RMGR(RM_MULTIXACT_ID, "MultiXact", multixact_redo, multixact_desc, NULL, NULL, NULL, NULL, NULL, multixact_type_name) +PG_RMGR(RM_RELMAP_ID, "RelMap", relmap_redo, relmap_desc, NULL, NULL, NULL, NULL, NULL, relmap_type_name) -#ifndef ENABLE_MULTIPLE_NODES -PG_RMGR(RM_STANDBY_ID, "Standby", standby_redo, standby_desc, StandbyXlogStartup, StandbyXlogCleanup, StandbySafeRestartpoint, NULL, NULL) -#else -PG_RMGR(RM_STANDBY_ID, "Standby", standby_redo, standby_desc, NULL, NULL, NULL, NULL, NULL) -#endif +PG_RMGR(RM_STANDBY_ID, "Standby", standby_redo, standby_desc, StandbyXlogStartup, StandbyXlogCleanup, \ + StandbySafeRestartpoint, NULL, NULL, standby_type_name) -PG_RMGR(RM_HEAP2_ID, "Heap2", heap2_redo, heap2_desc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_HEAP_ID, "Heap", heap_redo, heap_desc, NULL, NULL, NULL, NULL, NULL) +PG_RMGR(RM_HEAP2_ID, "Heap2", heap2_redo, heap2_desc, NULL, NULL, NULL, NULL, NULL, heap2_type_name) +PG_RMGR(RM_HEAP_ID, "Heap", heap_redo, heap_desc, NULL, NULL, NULL, NULL, NULL, heap_type_name) PG_RMGR(RM_BTREE_ID, "Btree", btree_redo, btree_desc, btree_xlog_startup, btree_xlog_cleanup, btree_safe_restartpoint, - NULL, NULL) -PG_RMGR(RM_HASH_ID, "Hash", hash_redo, hash_desc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_GIN_ID, "Gin", gin_redo, gin_desc, gin_xlog_startup, gin_xlog_cleanup, NULL, NULL, NULL) -PG_RMGR(RM_GIST_ID, "Gist", gist_redo, gist_desc, gist_xlog_startup, gist_xlog_cleanup, NULL, NULL, NULL) -PG_RMGR(RM_SEQ_ID, "Sequence", seq_redo, seq_desc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_SPGIST_ID, "SPGist", spg_redo, spg_desc, spg_xlog_startup, spg_xlog_cleanup, NULL, NULL, NULL) -PG_RMGR(RM_SLOT_ID, "Slot", slot_redo, slot_desc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_HEAP3_ID, "Heap3", heap3_redo, heap3_desc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_BARRIER_ID, "Barrier", barrier_redo, barrier_desc, NULL, NULL, NULL, NULL, NULL) + NULL, NULL, btree_type_name) +PG_RMGR(RM_HASH_ID, "Hash", hash_redo, hash_desc, NULL, NULL, NULL, NULL, NULL, hash_type_name) +PG_RMGR(RM_GIN_ID, "Gin", gin_redo, gin_desc, gin_xlog_startup, gin_xlog_cleanup, NULL, NULL, NULL, gin_type_name) +PG_RMGR(RM_GIST_ID, "Gist", gist_redo, gist_desc, gist_xlog_startup, gist_xlog_cleanup, NULL, NULL, NULL, \ + gist_type_name) +PG_RMGR(RM_SEQ_ID, "Sequence", seq_redo, seq_desc, NULL, NULL, NULL, NULL, NULL, seq_type_name) +PG_RMGR(RM_SPGIST_ID, "SPGist", spg_redo, spg_desc, spg_xlog_startup, spg_xlog_cleanup, NULL, NULL, NULL, \ + spg_type_name) +PG_RMGR(RM_SLOT_ID, "Slot", slot_redo, slot_desc, NULL, NULL, NULL, NULL, NULL, slot_type_name) +PG_RMGR(RM_HEAP3_ID, "Heap3", heap3_redo, heap3_desc, NULL, NULL, NULL, NULL, NULL, heap3_type_name) +PG_RMGR(RM_BARRIER_ID, "Barrier", barrier_redo, barrier_desc, NULL, NULL, NULL, NULL, NULL, barrier_type_name) #ifdef ENABLE_MOT -PG_RMGR(RM_MOT_ID, "MOT", MOTRedo, MOTDesc, NULL, NULL, NULL, NULL, NULL) +PG_RMGR(RM_MOT_ID, "MOT", MOTRedo, MOTDesc, NULL, NULL, NULL, NULL, NULL, MOT_type_name) #endif -PG_RMGR(RM_UHEAP_ID, "UHeap", UHeapRedo, UHeapDesc, NULL, NULL, NULL, UHeapUndoActions, NULL) -PG_RMGR(RM_UHEAP2_ID, "UHeap2", UHeap2Redo, UHeap2Desc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_UNDOLOG_ID, "UndoLog", undo::UndoXlogRedo, undo::UndoXlogDesc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_UHEAPUNDO_ID, "UHeapUndo", UHeapUndoRedo, UHeapUndoDesc, NULL, NULL, NULL, NULL, NULL) +PG_RMGR(RM_UHEAP_ID, "UHeap", UHeapRedo, UHeapDesc, NULL, NULL, NULL, UHeapUndoActions, NULL, uheap_type_name) +PG_RMGR(RM_UHEAP2_ID, "UHeap2", UHeap2Redo, UHeap2Desc, NULL, NULL, NULL, NULL, NULL, uheap2_type_name) +PG_RMGR(RM_UNDOLOG_ID, "UndoLog", undo::UndoXlogRedo, undo::UndoXlogDesc, NULL, NULL, NULL, NULL, NULL, \ + undo::undo_xlog_type_name) +PG_RMGR(RM_UHEAPUNDO_ID, "UHeapUndo", UHeapUndoRedo, UHeapUndoDesc, NULL, NULL, NULL, NULL, NULL, uheap_undo_type_name) PG_RMGR(RM_UNDOACTION_ID, "UndoAction", undo::UndoXlogRollbackFinishRedo, undo::UndoXlogRollbackFinishDesc, NULL, NULL, - NULL, NULL, NULL) + NULL, NULL, NULL, undo::undo_xlog_roll_back_finish_type_name) PG_RMGR(RM_UBTREE_ID, "UBtree", UBTreeRedo, UBTreeDesc, UBTreeXlogStartup, UBTreeXlogCleanup, UBTreeSafeRestartPoint, -NULL, NULL) -PG_RMGR(RM_UBTREE2_ID, "UBtree2", UBTree2Redo, UBTree2Desc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_SEGPAGE_ID, "SegpageStorage", segpage_smgr_redo, segpage_smgr_desc, NULL, NULL, NULL, NULL, NULL) -PG_RMGR(RM_REPLORIGIN_ID, "ReplicationOrigin", replorigin_redo, replorigin_desc, NULL, NULL, NULL, NULL, NULL) - +NULL, NULL, ubtree_type_name) +PG_RMGR(RM_UBTREE2_ID, "UBtree2", UBTree2Redo, UBTree2Desc, NULL, NULL, NULL, NULL, NULL, ubtree2_type_name) +PG_RMGR(RM_SEGPAGE_ID, "SegpageStorage", segpage_smgr_redo, segpage_smgr_desc, NULL, NULL, NULL, NULL, NULL, \ + segpage_smgr_type_name) +PG_RMGR(RM_REPLORIGIN_ID, "ReplicationOrigin", replorigin_redo, replorigin_desc, NULL, NULL, NULL, NULL, NULL, \ + replorigin_type_name) diff --git a/src/include/access/spgist.h b/src/include/access/spgist.h index 71979fcdb..6b85eea65 100644 --- a/src/include/access/spgist.h +++ b/src/include/access/spgist.h @@ -186,6 +186,7 @@ extern Datum spgvacuumcleanup(PG_FUNCTION_ARGS); /* spgxlog.c */ extern void spg_redo(XLogReaderState* record); extern void spg_desc(StringInfo buf, XLogReaderState* record); +extern const char* spg_type_name(uint8 subtype); extern void spg_xlog_startup(void); extern void spg_xlog_cleanup(void); extern bool IsSpgistVacuum(XLogReaderState* record); diff --git a/src/include/access/sysattr.h b/src/include/access/sysattr.h index ec2f9793d..7d8d13932 100644 --- a/src/include/access/sysattr.h +++ b/src/include/access/sysattr.h @@ -28,7 +28,8 @@ #define XC_NodeIdAttributeNumber (-8) #define BucketIdAttributeNumber (-9) -#define FirstLowInvalidHeapAttributeNumber (-10) +#define UidAttributeNumber (-10) +#define FirstLowInvalidHeapAttributeNumber (-11) #else #define FirstLowInvalidHeapAttributeNumber (-8) diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index 2c709ee74..8c767a41e 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -175,7 +175,7 @@ typedef struct TableAmRoutine { * @param: should_free true if clear the slot's tuple contents by pfree_ext() during ExecClearTuple * @return: none */ - void (*tslot_store_tuple)(Tuple tuple, TupleTableSlot *slot, Buffer buffer, bool should_free); + void (*tslot_store_tuple)(Tuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree, bool batchMode); /* * Fill up first natts entries of tts_values and tts_isnull arrays with @@ -189,6 +189,9 @@ typedef struct TableAmRoutine { */ void (*tslot_getsomeattrs)(TupleTableSlot *slot, int natts); + + void (*tslot_formbatch)(TupleTableSlot* slot, VectorBatch* batch, int cur_rows, int natts); + /* * Fetches a given attribute from the slot's current tuple. * attnums beyond the slot's tupdesc's last attribute will be considered NULL @@ -410,6 +413,8 @@ typedef struct TableAmRoutine { */ Tuple (*scan_getnexttuple)(TableScanDesc sscan, ScanDirection direction); + bool (*scan_GetNextBatch)(TableScanDesc scan, ScanDirection direction); + /* * Get next page */ @@ -457,7 +462,8 @@ typedef struct TableAmRoutine { TM_Result (*tuple_lock)(Relation relation, Tuple tuple, Buffer *buffer, CommandId cid, LockTupleMode mode, bool nowait, TM_FailureData *tmfd, bool allow_lock_self, bool follow_updates, bool eval, Snapshot snapshot, - ItemPointer tid, bool isSelectForUpdate, bool isUpsert, TransactionId conflictXid); + ItemPointer tid, bool isSelectForUpdate, bool isUpsert, TransactionId conflictXid, + int waitSec); Tuple (*tuple_lock_updated)(CommandId cid, Relation relation, int lockmode, ItemPointer tid, TransactionId priorXmax, Snapshot snapshot, bool isSelectForUpdate); @@ -503,10 +509,11 @@ extern MinimalTuple tableam_tslot_copy_minimal_tuple(TupleTableSlot *slot); extern void tableam_tslot_store_minimal_tuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree); extern HeapTuple tableam_tslot_get_heap_tuple(TupleTableSlot *slot); extern HeapTuple tableam_tslot_copy_heap_tuple(TupleTableSlot *slot); -extern void tableam_tslot_store_tuple(Tuple tuple, TupleTableSlot *slot, Buffer buffer, bool should_free); +extern void tableam_tslot_store_tuple(Tuple tuple, TupleTableSlot *slot, Buffer buffer, bool shouldFree, bool batchMode); extern void tableam_tslot_getsomeattrs(TupleTableSlot *slot, int natts); extern Datum tableam_tslot_getattr(TupleTableSlot *slot, int attnum, bool *isnull); extern void tableam_tslot_getallattrs(TupleTableSlot *slot); +extern void tableam_tslot_formbatch(TupleTableSlot* slot, VectorBatch* batch, int cur_rows, int natts); extern bool tableam_tslot_attisnull(TupleTableSlot *slot, int attnum); extern Tuple tableam_tslot_get_tuple_from_slot(Relation relation, TupleTableSlot *slot); extern Datum tableam_tops_getsysattr(Tuple tup, int attnum, TupleDesc tuple_desc, bool *isnull, @@ -588,7 +595,7 @@ extern TM_Result tableam_tuple_update(Relation relation, Relation parentRelation extern TM_Result tableam_tuple_lock(Relation relation, Tuple tuple, Buffer *buffer, CommandId cid, LockTupleMode mode, bool nowait, TM_FailureData *tmfd, bool allow_lock_self, bool follow_updates, bool eval, Snapshot snapshot, ItemPointer tid, bool isSelectForUpdate, bool isUpsert = false, - TransactionId conflictXid = InvalidTransactionId); + TransactionId conflictXid = InvalidTransactionId, int waitSec = 0); extern Tuple tableam_tuple_lock_updated(CommandId cid, Relation relation, int lockmode, ItemPointer tid, TransactionId priorXmax, Snapshot snapshot = NULL, bool isSelectForUpdate = false); extern void tableam_tuple_check_visible(Relation relation, Snapshot snapshot, Tuple tuple, Buffer buffer); @@ -609,6 +616,7 @@ extern TableScanDesc tableam_scan_begin_bm(Relation relation, Snapshot snapshot, extern TableScanDesc tableam_scan_begin_sampling(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync, RangeScanInRedis rangeScanInRedis = { false, 0, 0 }); extern Tuple tableam_scan_getnexttuple(TableScanDesc sscan, ScanDirection direction); +extern bool tableam_scan_gettuplebatchmode(TableScanDesc sscan, ScanDirection direction); extern void tableam_scan_getpage(TableScanDesc sscan, BlockNumber page); extern Tuple tableam_scan_gettuple_for_verify(TableScanDesc sscan, ScanDirection direction, bool isValidRelationPage); extern void tableam_scan_end(TableScanDesc sscan); diff --git a/src/include/access/transam.h b/src/include/access/transam.h index 8fc837ef6..10248c3ef 100755 --- a/src/include/access/transam.h +++ b/src/include/access/transam.h @@ -160,6 +160,11 @@ typedef struct VariableCacheData { pg_atomic_uint64 nextCommitSeqNo; TransactionId latestCompletedXid; /* newest XID that has committed or * aborted */ + + CommitSeqNo xlogMaxCSN; /* latest CSN read in the xlog */ + CommitSeqNo* max_csn_array; /* Record the xlogMaxCSN of all nodes. */ + bool* main_standby_array; /* Record whether node is main standby or not */ + /* * These fields only set in startup to normal */ @@ -233,7 +238,6 @@ extern TransactionId TransactionIdLatest(TransactionId mainxid, int nxids, const extern XLogRecPtr TransactionIdGetCommitLSN(TransactionId xid); extern bool LatestFetchTransactionIdDidAbort(TransactionId transactionId); extern bool LatestFetchCSNDidAbort(TransactionId transactionId); -extern bool TransactionIdLogicallyPrecedes(TransactionId id1, TransactionId id2); /* in transam/varsup.c */ #ifdef PGXC /* PGXC_DATANODE */ diff --git a/src/include/access/tupdesc.h b/src/include/access/tupdesc.h index d266c1713..fbbdc8b2a 100644 --- a/src/include/access/tupdesc.h +++ b/src/include/access/tupdesc.h @@ -139,6 +139,7 @@ typedef struct tupleDesc { int32 tdtypmod; /* typmod for tuple type */ bool tdhasoid; /* tuple has oid attribute in its header */ int tdrefcount; /* reference count, or -1 if not counting */ + bool tdhasuids; /* tuple has uid attribute in its header */ } * TupleDesc; /* Accessor for the i'th attribute of tupdesc. */ @@ -191,5 +192,7 @@ extern void copyDroppedAttribute(Form_pg_attribute target, Form_pg_attribute sou extern char GetGeneratedCol(TupleDesc tupdesc, int atti); +extern TupleConstr *TupleConstrCopy(const TupleDesc tupdesc); +extern TupInitDefVal *tupInitDefValCopy(TupInitDefVal *pInitDefVal, int nAttr); #endif /* TUPDESC_H */ diff --git a/src/include/access/tupmacs.h b/src/include/access/tupmacs.h index b2bbfbc83..f9f2f70f9 100644 --- a/src/include/access/tupmacs.h +++ b/src/include/access/tupmacs.h @@ -69,7 +69,7 @@ * format have to account for that themselves. */ #define att_align_datum(cur_offset, attalign, attlen, attdatum) \ - (((attlen) == -1 && VARATT_IS_SHORT(DatumGetPointer(attdatum))) ? (intptr_t)(cur_offset) \ + (((attlen) == -1 && (VARATT_IS_SHORT(DatumGetPointer(attdatum)) || VARATT_IS_HUGE_TOAST_POINTER(DatumGetPointer(attdatum)) )) ? (intptr_t)(cur_offset) \ : att_align_nominal(cur_offset, attalign)) /* diff --git a/src/include/access/tuptoaster.h b/src/include/access/tuptoaster.h index 9608cb353..01d95f3c6 100644 --- a/src/include/access/tuptoaster.h +++ b/src/include/access/tuptoaster.h @@ -91,6 +91,8 @@ /* Size of an EXTERNAL datum that contains a standard TOAST pointer */ #define TOAST_POINTER_SIZE (VARHDRSZ_EXTERNAL + sizeof(struct varatt_external)) +#define LARGE_TOAST_POINTER_SIZE (VARHDRSZ_EXTERNAL + sizeof(struct varatt_lob_external)) +#define LOB_POINTER_SIZE (VARHDRSZ_EXTERNAL + sizeof(struct varatt_lob_pointer)) /* Size of an indirect datum that contains a standard TOAST pointer */ #define INDIRECT_POINTER_SIZE (VARHDRSZ_EXTERNAL + sizeof(struct varatt_indirect)) @@ -104,6 +106,23 @@ #define VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer) \ ((toast_pointer).va_extsize < (toast_pointer).va_rawsize - VARHDRSZ) +#define VARATT_EXTERNAL_GET_HUGE_POINTER(large_toast_pointer, attr) \ +do { \ + varattrib_1b_e *attre = (varattrib_1b_e *) (attr); \ + errno_t rcs; \ + Assert(VARATT_IS_HUGE_TOAST_POINTER(attre)); \ + if (VARATT_IS_EXTERNAL_BUCKET(attre)) \ + { \ + Assert(VARSIZE_EXTERNAL(attre) == sizeof(large_toast_pointer) + VARHDRSZ_EXTERNAL + sizeof(int2)); \ + }\ + else\ + { \ + Assert(VARSIZE_EXTERNAL(attre) == sizeof(large_toast_pointer) + VARHDRSZ_EXTERNAL); \ + }\ + rcs = memcpy_s(&(large_toast_pointer), sizeof(large_toast_pointer), VARDATA_EXTERNAL(attre), sizeof(large_toast_pointer)); \ + securec_check(rcs, "", ""); \ +} while (0) + /* * Macro to fetch the possibly-unaligned contents of an EXTERNAL datum * into a local "struct varatt_external" toast pointer. This should be @@ -151,6 +170,8 @@ do { \ } \ } while (0) +class ScalarVector; + /* ---------- * toast_insert_or_update - @@ -164,6 +185,9 @@ extern HeapTuple toast_insert_or_update( extern Datum toast_save_datum(Relation rel, Datum value, struct varlena* oldexternal, int options); extern void toast_delete_datum(Relation rel, Datum value, int options, bool allow_update_self = false); +extern void toast_delete_datum_internal(varatt_external toast_pointer, int options, bool allow_update_self, int2 bucketid = InvalidBktId); +extern void toast_huge_delete_datum(Relation rel, Datum value, int options, bool allow_update_self = false); +extern void checkHugeToastPointer(struct varlena *value); /* ---------- * toast_delete - @@ -182,6 +206,8 @@ extern void toast_delete(Relation rel, HeapTuple oldtup, int options); * ---------- */ extern struct varlena* heap_tuple_fetch_attr(struct varlena* attr); +extern struct varlena* heap_internal_toast_fetch_datum(struct varatt_external toast_pointer, + Relation toastrel, Relation toastidx); /* ---------- * heap_tuple_untoast_attr() - @@ -190,7 +216,7 @@ extern struct varlena* heap_tuple_fetch_attr(struct varlena* attr); * it as needed. * ---------- */ -extern struct varlena* heap_tuple_untoast_attr(struct varlena* attr); +extern struct varlena* heap_tuple_untoast_attr(struct varlena* attr, ScalarVector *arr = NULL); /* ---------- * heap_tuple_untoast_attr_slice() - @@ -199,7 +225,7 @@ extern struct varlena* heap_tuple_untoast_attr(struct varlena* attr); * (Handles all cases for attribute storage) * ---------- */ -extern struct varlena* heap_tuple_untoast_attr_slice(struct varlena* attr, int32 sliceoffset, int32 slicelength); +extern struct varlena* heap_tuple_untoast_attr_slice(struct varlena* attr, int64 sliceoffset, int32 slicelength); /* ---------- * toast_flatten_tuple - @@ -220,6 +246,11 @@ extern HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc); * ---------- */ extern Datum toast_flatten_tuple_attribute(Datum value, Oid typeId, int32 typeMod); +extern text* text_catenate_huge(text* t1, text* t2, Oid toastOid); +extern Oid get_toast_oid(); +extern int64 calculate_huge_length(text* t); +extern Datum fetch_lob_value_from_tuple(varatt_lob_pointer* lob_pointer, Oid update_oid, + bool* is_null, bool* is_huge_clob = NULL); /* ---------- * toast_compress_datum - @@ -247,5 +278,9 @@ extern Size toast_datum_size(Datum value); extern bool toastrel_valueid_exists(Relation toastrel, Oid valueid); +extern varlena* toast_huge_write_datum_slice(struct varlena* attr1, struct varlena* attr2, int64 sliceoffset, int32 length); +extern varlena* toast_pointer_fetch_data(TupleTableSlot* varSlot, Form_pg_attribute attr, int varNumber); +bool create_toast_by_sid(Oid *toastOid); + #endif /* TUPTOASTER_H */ diff --git a/src/include/access/twophase.h b/src/include/access/twophase.h index cc5405ec2..6769b0fb7 100644 --- a/src/include/access/twophase.h +++ b/src/include/access/twophase.h @@ -22,7 +22,7 @@ #define GIDSIZE 200 #define MAX_PREP_XACT_VERSIONS 64 -#define TwoPhaseStateHashPartition(hashcode) ((hashcode) % NUM_TWOPHASE_PARTITIONS) +#define TwoPhaseStateHashPartition(hashcode) ((hashcode) % (TransactionId)NUM_TWOPHASE_PARTITIONS) #define TwoPhaseState(n) (t_thrd.xact_cxt.TwoPhaseState[TwoPhaseStateHashPartition(n)]) #define TwoPhaseStateMappingPartitionLock(hashcode) (&t_thrd.shemem_ptr_cxt.mainLWLockArray[FirstTwoPhaseStateLock + \ @@ -180,9 +180,6 @@ bool relsContainsSegmentTable(ColFileNodeRel *delrels, int ndelrels); extern void FinishPreparedTransaction(const char* gid, bool isCommit); extern bool TransactionIdIsPrepared(TransactionId xid); - -extern bool IsTransactionIdMarkedPrepared(TransactionId xid); - extern void SetLocalSnapshotPreparedArray(Snapshot snapshot); extern int GetPendingXactCount(void); @@ -194,5 +191,5 @@ extern void RemoveStaleTwophaseState(TransactionId xid); extern void RecoverPrepareTransactionCSNLog(char* buf); extern int get_snapshot_defualt_prepared_num(void); - +extern void DeleteObsoleteTwoPhaseFile(int64 pageno); #endif /* TWOPHASE_H */ diff --git a/src/include/access/ubtree.h b/src/include/access/ubtree.h index ff590910e..5e6607a58 100644 --- a/src/include/access/ubtree.h +++ b/src/include/access/ubtree.h @@ -22,6 +22,7 @@ #include "access/sdir.h" #include "access/transam.h" #include "access/xlogreader.h" +#include "access/ubtree.h" #include "access/visibilitymap.h" #include "access/ustore/knl_whitebox_test.h" #include "catalog/pg_index.h" @@ -49,12 +50,13 @@ extern Datum ubtoptions(PG_FUNCTION_ARGS); extern bool UBTreeDelete(Relation index_relation, Datum* values, const bool* isnull, ItemPointer heapTCtid); extern bool IndexPagePrepareForXid(Relation rel, Page page, TransactionId xid, bool needWal, Buffer buf); -extern void FreezeSingleIndexPage(Relation rel, Buffer buf, bool *hasPruned); +extern void FreezeSingleIndexPage(Relation rel, Buffer buf, bool *hasPruned, OidRBTree *invisibleParts = NULL); /* structures for ubtrecycle.cpp */ typedef enum { - RECYCLE_FREED_FORK, + RECYCLE_FREED_FORK = 0, RECYCLE_EMPTY_FORK, + RECYCLE_NONE_FORK, //last unused item } UBTRecycleForkNumber; typedef struct UBTRecycleQueueAddress { @@ -161,6 +163,10 @@ typedef UBTRecycleQueueHeaderData* UBTRecycleQueueHeader; #define XLOG_UBTREE2_FREEZE 0x40 +#define UBTREE_VERIFY_OUTPUT_PARAM_CNT 3 +#define UBTREE_RECYCLE_OUTPUT_PARAM_CNT 6 +#define UBTREE_RECYCLE_OUTPUT_XID_STR_LEN 32 + enum { UBTREE_MARK_DELETE_BLOCK_NUM, }; @@ -334,6 +340,8 @@ typedef struct xl_ubtree_split { /* UBT_STATUS_OFFSET_MASK status bits */ #define UBT_PIVOT_HEAP_TID_ATTR 0x1000 +const uint16 InvalidOffset = ((uint16)0) - 1; + /* * Note: UBTreeTupleIsPivot() can have false negatives (but not false * positives) when used with !heapkeyspace indexes @@ -474,8 +482,7 @@ extern bool UBTreeDoInsert(Relation rel, IndexTuple itup, IndexUniqueCheck check extern bool UBTreeDoDelete(Relation rel, IndexTuple itup); extern bool UBTreePagePruneOpt(Relation rel, Buffer buf, bool tryDelete); -extern bool UBTreePagePrune(Relation rel, Buffer buf, TransactionId oldestXmin, - IndexBulkDeleteCallback callback = NULL, void* callbackState = NULL); +extern bool UBTreePagePrune(Relation rel, Buffer buf, TransactionId oldestXmin, OidRBTree *invisibleParts = NULL); extern bool UBTreePruneItem(Page page, OffsetNumber offnum, TransactionId oldestXmin, IndexPruneState* prstate); extern void UBTreePagePruneExecute(Page page, OffsetNumber* nowdead, int ndead, IndexPruneState* prstate); extern void UBTreePageRepairFragmentation(Page page); @@ -527,6 +534,7 @@ extern TransactionIdStatus UBTreeCheckXid(TransactionId xid); extern void UBTreePageInit(Page page, Size size); extern void UBTreeInitMetaPage(Page page, BlockNumber rootbknum, uint32 level); extern Buffer UBTreeGetRoot(Relation rel, int access); +extern bool UBTreePageRecyclable(Page page); extern int UBTreePageDel(Relation rel, Buffer buf); extern OffsetNumber UBTreeFindsplitloc(Relation rel, Page page, OffsetNumber newitemoff, @@ -542,7 +550,9 @@ extern Buffer UBTreeGetNewPage(Relation rel, UBTRecycleQueueAddress* addr); extern void UBTreeRedo(XLogReaderState* record); extern void UBTree2Redo(XLogReaderState* record); extern void UBTreeDesc(StringInfo buf, XLogReaderState* record); +extern const char* ubtree_type_name(uint8 subtype); extern void UBTree2Desc(StringInfo buf, XLogReaderState* record); +extern const char* ubtree2_type_name(uint8 subtype); extern void UBTreeXlogStartup(void); extern void UBTreeXlogCleanup(void); extern bool UBTreeSafeRestartPoint(void); @@ -552,9 +562,52 @@ extern bool IsUBTreeVacuum(const XLogReaderState* record); extern void UBTreeRestorePage(Page page, char* from, int len); extern void DumpUBTreeDeleteInfo(XLogRecPtr lsn, OffsetNumber offsetList[], uint64 offsetNum); + +/* + * prototypes for functions in ubtdump.cpp + */ +typedef enum { + VERIFY_XID_BASE_TOO_LARGE, + VERIFY_XID_TOO_LARGE, + VERIFY_HIKEY_ERROR, + VERIFY_PREV_HIKEY_ERROR, + VERIFY_ORDER_ERROR, + VERIFY_XID_ORDER_ERROR, + VERIFY_CSN_ORDER_ERROR, + VERIFY_INCONSISTENT_XID_STATUS, + VERIFY_XID_STATUS_ERROR, + VERIFY_RECYCLE_QUEUE_HEAD_ERROR, + VERIFY_RECYCLE_QUEUE_TAIL_ERROR, + VERIFY_INCONSISTENT_USED_PAGE, + VERIFY_RECYCLE_QUEUE_ENDLESS, + VERIFY_RECYCLE_QUEUE_TAIL_MISSED, + VERIFY_RECYCLE_QUEUE_PAGE_TOO_LESS, + VERIFY_RECYCLE_QUEUE_OFFSET_ERROR, + VERIFY_RECYCLE_QUEUE_XID_TOO_LARGE, + VERIFY_RECYCLE_QUEUE_UNEXPECTED_TAIL, + VERIFY_RECYCLE_QUEUE_FREE_LIST_ERROR, + VERIFY_RECYCLE_QUEUE_FREE_LIST_INVALID_OFFSET, + VERIFY_NORMAL +} UBTVerifyErrorCode; +typedef enum { + VERIFY_MAIN_PAGE, + VERIFY_RECYCLE_QUEUE_PAGE +} UBTVerifyPageType; +extern char * UBTGetVerifiedPageTypeStr(uint32 type); +extern char * UBTGetVerifiedResultStr(uint32 type); +extern void UBTreeVerifyRecordOutput(uint blkType, BlockNumber blkno, int errorCode, + TupleDesc *tupDesc, Tuplestorestate *tupstore, uint32 cols); +extern void UBTreeVerifyIndex(Relation rel, TupleDesc *tupDesc, Tuplestorestate *tupstore, uint32 cols); +extern int UBTreeVerifyOnePage(Relation rel, Page page, BTScanInsert cmpKeys, IndexTuple prevHikey); +extern uint32 UBTreeVerifyRecycleQueue(Relation rel, TupleDesc *tupleDesc, Tuplestorestate *tupstore, uint32 cols); +extern Buffer RecycleQueueGetEndpointPage(Relation rel, UBTRecycleForkNumber forkNumber, bool needHead, int access); + /* * prototypes for functions in ubtrecycle.cpp */ +const BlockNumber minRecycleQueueBlockNumber = 6; +extern UBTRecycleQueueHeader GetRecycleQueueHeader(Page page, BlockNumber blkno); +extern Buffer ReadRecycleQueueBuffer(Relation rel, BlockNumber blkno); extern void UBTreeInitializeRecycleQueue(Relation rel); extern void UBTreeTryRecycleEmptyPage(Relation rel); extern void UBTreeRecordFreePage(Relation rel, BlockNumber blkno, TransactionId xid); @@ -567,5 +620,9 @@ extern void UBtreeRecycleQueueChangeChain(Buffer buf, BlockNumber newBlkno, bool extern void UBTreeRecycleQueuePageChangeEndpointLeftPage(Buffer buf, bool isHead); extern void UBTreeRecycleQueuePageChangeEndpointRightPage(Buffer buf, bool isHead); extern void UBTreeXlogRecycleQueueModifyPage(Buffer buf, xl_ubtree2_recycle_queue_modify *xlrec); +extern uint32 UBTreeRecycleQueuePageDump(Relation rel, Buffer buf, bool recordEachItem, + TupleDesc *tupleDesc, Tuplestorestate *tupstore, uint32 cols); +extern void UBTreeDumpRecycleQueueFork(Relation rel, UBTRecycleForkNumber forkNum, TupleDesc *tupDesc, + Tuplestorestate *tupstore, uint32 cols); #endif /* UBTREE_H */ diff --git a/src/include/access/ustore/knl_uheap.h b/src/include/access/ustore/knl_uheap.h index ea131e7d1..d0f5e95b0 100644 --- a/src/include/access/ustore/knl_uheap.h +++ b/src/include/access/ustore/knl_uheap.h @@ -33,7 +33,6 @@ * indicate that threshold of 200 is good enough to keep the contention on * transaction slots under control. */ -#define NUM_BLOCKS_FOR_NON_INPLACE_UPDATES 200 #define MIN_SAVING_LEN 3 typedef struct UHeapWALInfo { @@ -94,7 +93,7 @@ typedef struct { } RelationBuffer; Datum UHeapFastGetAttr(UHeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull); -Oid UHeapInsert(Relation rel, UHeapTuple tuple, CommandId cid, BulkInsertState bistate = NULL); +Oid UHeapInsert(Relation rel, UHeapTuple tuple, CommandId cid, BulkInsertState bistate = NULL, bool isToast = false); extern TransactionId UHeapFetchInsertXid(UHeapTuple uhtup, Buffer buffer); UHeapTuple UHeapPrepareInsert(Relation rel, UHeapTuple tuple, int options); void RelationPutUTuple(Relation relation, Buffer buffer, UHeapTupleData *tuple); @@ -104,12 +103,13 @@ extern bool TableFetchAndStore(Relation scanRelation, Snapshot snapshot, Tuple t TM_Result UHeapUpdate(Relation relation, Relation parentRelation, ItemPointer otid, UHeapTuple newtup, CommandId cid, Snapshot crosscheck, Snapshot snapshot, bool wait, TupleTableSlot **oldslot, TM_FailureData *tmfd, bool *indexkey_update_flag, Bitmapset **modifiedIdxAttrs, bool allow_inplace_update = true); +extern void PutBlockInplaceUpdateTuple(Page page, Item item, RowPtr *lp, Size size); TM_Result UHeapLockTuple(Relation relation, UHeapTuple tuple, Buffer* buffer, CommandId cid, LockTupleMode mode, bool nowait, TM_FailureData *tmfd, bool follow_updates, bool eval, Snapshot snapshot, bool isSelectForUpdate = false, bool allowLockSelf = false, bool isUpsert = false, - TransactionId conflictXid = InvalidTransactionId); + TransactionId conflictXid = InvalidTransactionId, int waitSec = 0); UHeapTuple UHeapLockUpdated(CommandId cid, Relation relation, LockTupleMode lock_mode, ItemPointer tid, @@ -125,9 +125,9 @@ void UHeapReserveDualPageTDSlot(Relation relation, Buffer oldbuf, Buffer newbuf, void UHeapMultiInsert(Relation relation, UHeapTuple *tuples, int ntuples, CommandId cid, int options, BulkInsertState bistate); int UHeapPageReserveTransactionSlot(Relation relation, Buffer buf, TransactionId fxid, UndoRecPtr *urecPtr, - bool *lockReacquired, Buffer otherBuf, bool *slotReused, TransactionId *minxid, bool aggressiveSearch = true); + bool *lockReacquired, Buffer otherBuf, TransactionId *minxid, bool aggressiveSearch = true); bool UHeapPageFreezeTransSlots(Relation relation, Buffer buf, bool *lockReacquired, TD *transinfo, - Buffer otherBuf, bool aggressiveFreeze); + Buffer otherBuf); void UHeapFreezeOrInvalidateTuples(Buffer buf, int nSlots, const int *slots, bool isFrozen); void UHeapPageSetUndo(Buffer buffer, int transSlotId, TransactionId fxid, UndoRecPtr urecptr); int UPageGetTDSlotId(Buffer buf, TransactionId fxid, UndoRecPtr *urecAdd); @@ -149,10 +149,6 @@ UndoRecPtr UHeapPrepareUndoDelete(Oid relOid, Oid partitionOid, Oid relfilenode, UndoPersistence persistence, Buffer buffer, OffsetNumber offnum, TransactionId xid, SubTransactionId subxid, CommandId cid, UndoRecPtr prevurpInOneBlk, UndoRecPtr prevurpInOneXact, _in_ TD *oldtd, UHeapTuple oldtuple, BlockNumber blk = InvalidBlockNumber, XlUndoHeader *xlundohdr = NULL, undo::XlogUndoMeta *xlundometa = NULL); -UndoRecPtr UHeapPrepareUndoLock(Oid relOid, Oid partitionOid, Oid relfilenode, Oid tablespace, - UndoPersistence persistence, Buffer buffer, OffsetNumber offnum, TransactionId xid, SubTransactionId subxid, - CommandId cid, UndoRecPtr prevurpInOneBlk, UndoRecPtr prevurpInOneXact, _in_ TD *oldtd, UHeapTuple oldtuple, - BlockNumber blk = InvalidBlockNumber, XlUndoHeader *xlundohdr = NULL, undo::XlogUndoMeta *xlundometa = NULL); UndoRecPtr UHeapPrepareUndoUpdate(Oid relOid, Oid partitionOid, Oid relfilenode, Oid tablespace, UndoPersistence persistence, Buffer buffer, Buffer newbuffer, OffsetNumber offnum, TransactionId xid, SubTransactionId subxid, CommandId cid, UndoRecPtr prevurpInOneBlk, UndoRecPtr newprevurpInOneBlk, @@ -169,6 +165,7 @@ extern int UHeapPagePruneGuts(const RelationBuffer *relbuf, TransactionId Oldest extern void UHeapPagePruneExecute(Buffer buffer, OffsetNumber target_offnum, const UPruneState *prstate); extern void UPageRepairFragmentation(Buffer buffer, OffsetNumber target_offnum, Size space_required, bool *pruned, bool unused_set); +extern void UHeapPagePruneFSM(Relation relation, Buffer buffer, TransactionId fxid, Page page, BlockNumber blkno); int UHeapPageGetTDSlotId(Buffer buffer, TransactionId xid, UndoRecPtr *urp); int UpdateTupleHeaderFromUndoRecord(UndoRecord *urec, UHeapDiskTuple tuple, Page page); bool UHeapExecPendingUndoActions(Relation rel, Buffer buffer, TransactionId xwait); @@ -177,7 +174,8 @@ extern XLogRecPtr LogUHeapClean(Relation reln, Buffer buffer, OffsetNumber targe OffsetNumber *nowdeleted, int ndeleted, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid, bool pruned); -extern void SimpleUHeapDelete(Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot** oldslot = NULL); +extern void SimpleUHeapDelete(Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot** oldslot = NULL, + TransactionId* tmfdXmin = NULL); void UHeapPageShiftBase(Buffer buffer, Page page, bool multi, int64 delta); diff --git a/src/include/access/ustore/knl_uhio.h b/src/include/access/ustore/knl_uhio.h index a29262c3c..224348d57 100644 --- a/src/include/access/ustore/knl_uhio.h +++ b/src/include/access/ustore/knl_uhio.h @@ -22,12 +22,15 @@ #define UHEAP_INSERT_SKIP_WAL HEAP_INSERT_SKIP_WAL #define UHEAP_INSERT_SKIP_FSM HEAP_INSERT_SKIP_FSM #define UHEAP_INSERT_FROZEN HEAP_INSERT_FROZEN +#define UHEAP_INSERT_EXTEND 0x0020 + +#define GET_BUF_FOR_UTUPLE_LOOP_LIMIT 2 extern BlockNumber RelationPruneOptional(Relation relation, Size requiredSize); extern BlockNumber RelationPruneBlockAndReturn(Relation relation, BlockNumber start_block, BlockNumber max_blocks_to_scan, Size required_size, BlockNumber *next_block); extern Buffer RelationGetBufferForUTuple(Relation relation, Size len, Buffer otherBuffer, int options, - BulkInsertState bistate, bool switch_buf = false); + BulkInsertState bistate); #endif diff --git a/src/include/access/ustore/knl_umultilocker.h b/src/include/access/ustore/knl_umultilocker.h index ae4b8de07..2ac57a0c3 100644 --- a/src/include/access/ustore/knl_umultilocker.h +++ b/src/include/access/ustore/knl_umultilocker.h @@ -40,8 +40,6 @@ typedef struct UMultiLockMember { LockTupleMode mode; } UMultiLockMember; -LockTupleMode GetOldLockMode(uint16 infomask); bool UMultiLockMembersSame(const List *list1, const List *list2); -void UGetMultiLockInfo(uint16 oldInfomask, TransactionId tupXid, int tupTdSlot, TransactionId addToXid, - uint16 *newInfomask, int *newTdSlot, LockTupleMode *mode, bool *oldTupleHasUpdate, LockOper lockoper); + #endif diff --git a/src/include/access/ustore/knl_upage.h b/src/include/access/ustore/knl_upage.h index ef3fa6da4..f9c6713b1 100644 --- a/src/include/access/ustore/knl_upage.h +++ b/src/include/access/ustore/knl_upage.h @@ -23,6 +23,8 @@ #define TD_SLOT_INCREMENT_SIZE 2 #define TD_THRESHOLD_FOR_PAGE_SWITCH 32 +#define FORCE_EXTEND_THRESHOLD 3 +#define DML_MAX_RETRY_TIMES 100000 #define UHEAP_HAS_FREE_LINES 0x0001 /* are there any unused line pointers? */ #define UHEAP_PAGE_FULL 0x0002 /* not enough free space for new \ @@ -81,6 +83,13 @@ #define UPageClearPrunable(_page) (((UHeapPageHeaderData *)(_page))->pd_prune_xid = InvalidTransactionId) +#define LimitRetryTimes(retryTimes) \ + do { \ + if ((retryTimes) > DML_MAX_RETRY_TIMES) { \ + elog(ERROR, "Transaction aborted due to too many retries."); \ + } \ + } while (0) + /* * RowPtr "flags" has these possible states. An UNUSED row pointer is available * for immediate re-use, the other states are not. @@ -251,11 +260,11 @@ typedef struct UHeapPageTDData { TD td_info[1]; } UHeapPageTDData; -inline bool UPageIsEmpty(UHeapPageHeaderData *phdr, uint16 relTdCount) +typedef UHeapPageHeaderData* UHeapPageHeader; + +inline bool UPageIsEmpty(UHeapPageHeaderData *phdr) { uint16 td_count = phdr->td_count; - Assert(td_count >= relTdCount); - uint16 start = (uint16)SizeOfUHeapPageHeaderData + (td_count * sizeof(TD)); return phdr->pd_lower <= start; } @@ -294,7 +303,7 @@ Size PageGetExactUHeapFreeSpace(Page page); extern UHeapFreeOffsetRanges *UHeapGetUsableOffsetRanges(Buffer buffer, UHeapTuple *tuples, int ntuples, Size saveFreeSpace); -void UHeapRecordPotentialFreeSpace(Relation rel, Buffer buffer, int delta); +void UHeapRecordPotentialFreeSpace(Buffer buffer, int delta); /* * UPageGetMaxOffsetNumber diff --git a/src/include/access/ustore/knl_uredo.h b/src/include/access/ustore/knl_uredo.h index 3f940f792..dfaf5649a 100644 --- a/src/include/access/ustore/knl_uredo.h +++ b/src/include/access/ustore/knl_uredo.h @@ -122,6 +122,7 @@ typedef struct XlUHeapInsert { #define XLZ_UPDATE_SUFFIX_FROM_OLD (1 << 1) #define XLZ_NON_INPLACE_UPDATE (1 << 2) #define XLZ_HAS_UPDATE_UNDOTUPLE (1 << 3) +#define XLZ_BLOCK_INPLACE_UPDATE (1 << 4) /* size=24 alignment=8 */ typedef struct XlUHeapUpdate { @@ -268,10 +269,14 @@ typedef struct XlUHeapExtendTdSlots { extern void UHeapRedo(XLogReaderState *record); extern void UHeapDesc(StringInfo buf, XLogReaderState *record); +extern const char* uheap_type_name(uint8 subtype); extern char* GetUndoHeader(XlUndoHeader *xlundohdr, Oid *partitionOid, UndoRecPtr* blkprev); extern void UHeap2Redo(XLogReaderState *record); extern void UHeap2Desc(StringInfo buf, XLogReaderState *record); +extern const char* uheap2_type_name(uint8 subtype); extern void UHeapUndoRedo(XLogReaderState *record); extern void UHeapUndoDesc(StringInfo buf, XLogReaderState *record); +extern TransactionId UHeapXlogGetCurrentXid(XLogReaderState *record, bool hasCSN); +extern const char* uheap_undo_type_name(uint8 subtype); extern TransactionId UHeapXlogGetCurrentXid(XLogReaderState *record); #endif diff --git a/src/include/access/ustore/knl_uscan.h b/src/include/access/ustore/knl_uscan.h index 5bcd1828e..31f695b86 100644 --- a/src/include/access/ustore/knl_uscan.h +++ b/src/include/access/ustore/knl_uscan.h @@ -34,8 +34,14 @@ typedef struct UHeapScanDescData { /* these fields only used in page-at-a-time mode and for bitmap scans */ // XXXTAM + ItemPointerData rs_mctid; /* marked scan position, if any */ + + /* these fields only used in page-at-a-time mode and for bitmap scans */ + int rs_mindex; /* marked tuple's saved index */ + UHeapTuple rs_visutuples[MaxPossibleUHeapTuplesPerPage]; /* visible tuples */ UHeapTuple rs_cutup; /* current tuple in scan, if any */ + UHeapTuple* rs_ctupBatch; /* current tuples in scan */ } UHeapScanDescData; typedef struct UHeapScanDescData *UHeapScanDesc; @@ -46,10 +52,13 @@ const uint32 BULKSCAN_BLOCKS_PER_BUFFER = 4; UHeapTuple UHeapGetTupleFromPage(UHeapScanDesc scan, ScanDirection dir); UHeapTuple UHeapGetNextForVerify(TableScanDesc sscan, ScanDirection direction, bool& isValidRelationPage); TableScanDesc UHeapBeginScan(Relation relation, Snapshot snapshot, int nkeys); +void UHeapMarkPos(TableScanDesc uscan); +void UHeapRestRpos(TableScanDesc sscan); void UHeapEndScan(TableScanDesc uscan); void UHeapRescan(TableScanDesc uscan, ScanKey key); HeapTuple UHeapGetNextSlot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot); UHeapTuple UHeapGetNextSlotGuts(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot); +UHeapTuple UHeapIndexBuildGetNextTuple(UHeapScanDesc scan, TupleTableSlot *slot); UHeapTuple UHeapSearchBuffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, bool *all_dead, UHeapTuple freebuf = NULL); bool UHeapScanBitmapNextTuple(TableScanDesc sscan, TBMIterateResult *tbmres, TupleTableSlot *slot); @@ -57,4 +66,5 @@ bool UHeapScanBitmapNextBlock(TableScanDesc sscan, const TBMIterateResult *tbmre bool UHeapGetPage(TableScanDesc sscan, BlockNumber page); UHeapTuple UHeapGetNext(TableScanDesc sscan, ScanDirection dir); +extern bool UHeapGetTupPageBatchmode(UHeapScanDesc scan, ScanDirection dir); #endif diff --git a/src/include/access/ustore/knl_utuple.h b/src/include/access/ustore/knl_utuple.h index 13ddfce81..e40522a78 100644 --- a/src/include/access/ustore/knl_utuple.h +++ b/src/include/access/ustore/knl_utuple.h @@ -37,7 +37,6 @@ struct TupleTableSlot; #define UHEAP_DELETED 0x0010 /* tuple deleted */ #define UHEAP_INPLACE_UPDATED 0x0020 /* tuple is updated inplace */ #define UHEAP_UPDATED 0x0040 /* tuple is not updated inplace */ -#define UHEAP_XID_LOCK_ONLY 0x0080 /* xid, if valid, is only a locker */ #define UHEAP_XID_KEYSHR_LOCK 0x0100 /* xid is a key-shared locker */ #define UHEAP_XID_NOKEY_EXCL_LOCK 0x0200 /* xid is a nokey-exclusive locker */ @@ -62,11 +61,10 @@ struct TupleTableSlot; * another partition */ #define UHEAP_LOCK_MASK (UHEAP_XID_SHR_LOCK | UHEAP_XID_EXCL_LOCK) -#define UHEAP_VIS_STATUS_MASK 0x3FF0 /* mask for visibility bits (5 ~ 14 \ +#define UHEAP_VIS_STATUS_MASK 0x7FF0 /* mask for visibility bits (5 ~ 14 \ * bits) */ #define UHEAP_LOCK_STATUS_MASK \ - (UHEAP_XID_LOCK_ONLY | UHEAP_XID_KEYSHR_LOCK | UHEAP_XID_NOKEY_EXCL_LOCK | UHEAP_XID_EXCL_LOCK | \ - UHEAP_MULTI_LOCKERS) + (UHEAP_XID_KEYSHR_LOCK | UHEAP_XID_NOKEY_EXCL_LOCK | UHEAP_XID_EXCL_LOCK | UHEAP_MULTI_LOCKERS) /* @@ -75,10 +73,10 @@ struct TupleTableSlot; #define SINGLE_LOCKER_XID_IS_EXCL_LOCKED(infomask) ((infomask & SINGLE_LOCKER_INFOMASK) == SINGLE_LOCKER_XID_EXCL_LOCK) #define SINGLE_LOCKER_XID_IS_SHR_LOCKED(infomask) ((infomask & SINGLE_LOCKER_INFOMASK) == SINGLE_LOCKER_XID_SHR_LOCK) +#define UHEAP_XID_IS_LOCK(infomask) (((infomask) & SINGLE_LOCKER_XID_IS_LOCK) != 0) #define UHEAP_XID_IS_SHR_LOCKED(infomask) (((infomask)&UHEAP_LOCK_MASK) == UHEAP_XID_SHR_LOCK) #define UHEAP_XID_IS_EXCL_LOCKED(infomask) (((infomask)&UHEAP_LOCK_MASK) == UHEAP_XID_EXCL_LOCK) #define UHeapTupleHasExternal(tuple) (((tuple)->disk_tuple->flag & UHEAP_HASEXTERNAL) != 0) -#define UHEAP_XID_IS_LOCKED_ONLY(infomask) (((infomask)&UHEAP_XID_LOCK_ONLY) != 0) #define UHeapTupleHasMultiLockers(infomask) (((infomask)&UHEAP_MULTI_LOCKERS) != 0) @@ -117,11 +115,14 @@ struct TupleTableSlot; #define UHeapTupleHeaderSetMovedPartitions(udisk_tuple) ((udisk_tuple)->flag |= UHEAP_MOVED) -#define UHeapTupleHeaderClearSingleLocker(udisk_tuple) \ - ((udisk_tuple)->flag &= ~(SINGLE_LOCKER_XID_IS_LOCK | SINGLE_LOCKER_XID_IS_SUBXACT)) +#define UHeapTupleHeaderClearSingleLocker(utuple) \ + do { \ + (utuple)->xid = (ShortTransactionId)FrozenTransactionId; \ + (utuple)->flag &= ~(SINGLE_LOCKER_XID_IS_LOCK | SINGLE_LOCKER_XID_IS_SUBXACT); \ + } while (0) #define IsUHeapTupleModified(infomask) \ - ((infomask & (UHEAP_DELETED | UHEAP_UPDATED | UHEAP_INPLACE_UPDATED | UHEAP_XID_LOCK_ONLY)) != 0) + ((infomask & (UHEAP_DELETED | UHEAP_UPDATED | UHEAP_INPLACE_UPDATED)) != 0) /* UHeap tuples do not have the idea of xmin/xmax but a single XID */ #define UHEAP_XID_COMMITTED 0x0800 @@ -190,6 +191,7 @@ typedef UHeapTupleData *UHeapTuple; inline UHeapTuple uheaptup_alloc(Size size) { UHeapTuple tup = (UHeapTuple)palloc0(size); + tup->t_bucketId = InvalidBktId; tup->tupTableType = UHEAP_TUPLE; return tup; } @@ -290,10 +292,11 @@ void SlotDeformUTuple(TupleTableSlot *slot, UHeapTuple tuple, long *offp, int na uint32 UHeapCalcTupleDataSize(TupleDesc tuple_desc, Datum *values, const bool *is_nulls, uint32 hoff, bool enableReverseBitmap, bool enableReserve); HeapTuple UHeapCopyHeapTuple(TupleTableSlot *slot); -void UHeapSlotStoreUHeapTuple(UHeapTuple utuple, TupleTableSlot *slot, bool shouldFree); +void UHeapSlotStoreUHeapTuple(UHeapTuple utuple, TupleTableSlot *slot, bool shouldFree, bool batchMode); void UHeapSlotClear(TupleTableSlot *slot); void UHeapSlotGetSomeAttrs(TupleTableSlot *slot, int attnum); +void UHeapSlotFormBatch(TupleTableSlot* slot, VectorBatch* batch, int cur_rows, int attnum); void UHeapSlotGetAllAttrs(TupleTableSlot *slot); Datum UHeapSlotGetAttr(TupleTableSlot *slot, int attnum, bool *isnull); bool UHeapSlotAttIsNull(const TupleTableSlot *slot, int attnum); diff --git a/src/include/access/ustore/knl_utuptoaster.h b/src/include/access/ustore/knl_utuptoaster.h index 681e29bad..463d66424 100644 --- a/src/include/access/ustore/knl_utuptoaster.h +++ b/src/include/access/ustore/knl_utuptoaster.h @@ -55,5 +55,5 @@ UHeapTuple UHeapToastInsertOrUpdate(Relation relation, UHeapTuple newtup, UHeapT extern struct varlena *UHeapInternalToastFetchDatum(struct varatt_external toastPointer, Relation toastrel, Relation toastidx); extern struct varlena *UHeapInternalToastFetchDatumSlice(struct varatt_external toast_pointer, Relation toastrel, - Relation toastidx, int32 sliceoffset, int32 length); + Relation toastidx, int64 sliceoffset, int32 length); #endif diff --git a/src/include/access/ustore/knl_uundorecord.h b/src/include/access/ustore/knl_uundorecord.h index e30c0b070..7fca6eac0 100644 --- a/src/include/access/ustore/knl_uundorecord.h +++ b/src/include/access/ustore/knl_uundorecord.h @@ -361,8 +361,8 @@ typedef bool (*SatisfyUndoRecordCallback)(_in_ UndoRecord *urec, _in_ BlockNumbe * * Returns the UNDO_RET_SUCC if found, otherwise, return UNDO_RET_FAIL. */ -int FetchUndoRecord(__inout UndoRecord *urec, _in_ SatisfyUndoRecordCallback callback, - _in_ BlockNumber blkno, _in_ OffsetNumber offset, _in_ TransactionId xid); +UndoTraversalState FetchUndoRecord(__inout UndoRecord *urec, _in_ SatisfyUndoRecordCallback callback, + _in_ BlockNumber blkno, _in_ OffsetNumber offset, _in_ TransactionId xid, bool isNeedByPass = false); /* * Example: satisfied callback function. diff --git a/src/include/access/ustore/knl_uvisibility.h b/src/include/access/ustore/knl_uvisibility.h index a118b67d8..e1a60c95f 100644 --- a/src/include/access/ustore/knl_uvisibility.h +++ b/src/include/access/ustore/knl_uvisibility.h @@ -62,7 +62,7 @@ TM_Result UHeapTupleSatisfiesUpdate(Relation rel, Snapshot snapshot, ItemPointer TransactionId UHeapTupleGetTransXid(UHeapTuple uhtup, Buffer buf, bool nobuflock); -void UHeapTupleGetTransInfo(Buffer buf, OffsetNumber offnum, UHeapTupleTransInfo *txactinfo); +UndoTraversalState UHeapTupleGetTransInfo(Buffer buf, OffsetNumber offnum, UHeapTupleTransInfo *txactinfo); UHTSVResult UHeapTupleSatisfiesOldestXmin(UHeapTuple inplacehtup, TransactionId OldestXmin, Buffer buffer, bool resolve_abort_in_progress, UHeapTuple *preabort_tuple, TransactionId *xid, SubTransactionId *subxid); @@ -71,9 +71,8 @@ CommandId UHeapTupleGetCid(UHeapTuple uhtup, Buffer buf); void GetTDSlotInfo(Buffer buf, int tdId, UHeapTupleTransInfo *tdinfo); -void FetchTransInfoFromUndo(BlockNumber blocknum, OffsetNumber offnum, TransactionId xid, - UHeapTupleTransInfo *txactinfo, ItemPointer newCtid); - +UndoTraversalState FetchTransInfoFromUndo(BlockNumber blocknum, OffsetNumber offnum, TransactionId xid, + UHeapTupleTransInfo *txactinfo, ItemPointer newCtid, bool needByPass); bool UHeapTupleIsSurelyDead(UHeapTuple uhtup, Buffer buffer, OffsetNumber offnum, const UHeapTupleTransInfo *cachedTdInfo, const bool useCachedTdInfo); diff --git a/src/include/access/ustore/undo/knl_uundoapi.h b/src/include/access/ustore/undo/knl_uundoapi.h index 905e7b53f..31d6263b5 100644 --- a/src/include/access/ustore/undo/knl_uundoapi.h +++ b/src/include/access/ustore/undo/knl_uundoapi.h @@ -37,10 +37,7 @@ bool IsSkipInsertUndo(UndoRecPtr urp); bool IsSkipInsertSlot(UndoSlotPtr urp); /* Check undo record valid.. */ -bool CheckUndoRecordValid(UndoRecPtr urp, bool checkForceRecycle); - -/* Check undo record recovery status for ROS */ -UndoRecoveryStatus CheckUndoRecordRecoveryStatus(UndoRecPtr urp); +UndoRecordState CheckUndoRecordValid(UndoRecPtr urp, bool checkForceRecycle); void CheckPointUndoSystemMeta(XLogRecPtr checkPointRedo); @@ -65,10 +62,9 @@ void SetUndoMetaLSN(XlogUndoMeta *meta, XLogRecPtr lsn); void RedoUndoMeta(XLogReaderState *record, XlogUndoMeta *meta, UndoRecPtr startUndoPtr, UndoRecPtr lastRecord, uint32 lastRecordSize); void ReleaseSlotBuffer(); - +void InitUndoCountThreshold(); UndoRecPtr GetPrevUrp(UndoRecPtr currUrp); } // namespace undo extern void GetUndoFileDirectory(char *path, int len, UndoPersistence upersistence); - #endif // __KNL_UUNDOAPI_H__ diff --git a/src/include/access/ustore/undo/knl_uundospace.h b/src/include/access/ustore/undo/knl_uundospace.h index 4f6096a17..e5cc112d7 100644 --- a/src/include/access/ustore/undo/knl_uundospace.h +++ b/src/include/access/ustore/undo/knl_uundospace.h @@ -23,7 +23,6 @@ namespace undo { #define UNDOSPACE_CLEAN 0 #define UNDOSPACE_DIRTY 1 - #define UNDO_SPACE_META_VERSION 1 /* Information about the undospace persistency metadata. */ diff --git a/src/include/access/ustore/undo/knl_uundotype.h b/src/include/access/ustore/undo/knl_uundotype.h index 9010901a2..e5e2fd469 100644 --- a/src/include/access/ustore/undo/knl_uundotype.h +++ b/src/include/access/ustore/undo/knl_uundotype.h @@ -38,7 +38,7 @@ typedef uint64 UndoSlotPtr; const uint32 USTORE_VERSION = 92350; -#define UNDO_ZONE_COUNT (1024*1024) +const int32 UNDO_ZONE_COUNT = 1024*1024; /* Parameter for calculating the number of locks used in undo zone. */ #define UNDO_ZONE_LOCK 4 @@ -123,8 +123,6 @@ const int PAGES_READ_NUM = 1024 * 16; /* The only valid fork number for undo log buffers. */ #define UNDO_FORKNUM MAIN_FORKNUM -typedef enum { UNDO_NOT_VALID = 0, UNDO_NOT_RECOVERY = 1, UNDO_RECOVERY = 2, UNDO_DISCARD = 3 } UndoRecoveryStatus; - typedef enum { UNDO_PERMANENT = 0, UNDO_UNLOGGED = 1, @@ -138,6 +136,22 @@ typedef enum { UNDO_SPACE_BUTT } UndoSpaceType; +typedef enum { + UNDO_TRAVERSAL_DEFAULT = 0, + UNDO_TRAVERSAL_COMPLETE, + UNDO_TRAVERSAL_STOP, + UNDO_TRAVERSAL_ABORT, + UNDO_TRAVERSAL_END +} UndoTraversalState; + +typedef enum { + UNDO_RECORD_NORMAL = 0, + UNDO_RECORD_DISCARD, + UNDO_RECORD_FORCE_DISCARD, + UNDO_RECORD_NOT_INSERT, + UNDO_RECORD_INVALID +} UndoRecordState; + #define UNDO_PERSISTENCE_LEVELS 3 #define REL_PERSISTENCE(upersistence) \ @@ -208,9 +222,6 @@ typedef enum { #define UNDO_DELETE 0x03 #define UNDO_INPLACE_UPDATE 0x04 #define UNDO_UPDATE 0x05 -#define UNDO_XID_LOCK_ONLY 0x06 -#define UNDO_XID_LOCK_FOR_UPDATE 0x07 -#define UNDO_XID_MULTI_LOCK_ONLY 0x08 #define UNDO_ITEMID_UNUSED 0x09 #define UNDO_UREC_INFO_UNKNOWN 0x00 @@ -241,4 +252,6 @@ extern const char *UNDO_PERMANENT_DIR; extern const char *UNDO_UNLOGGED_DIR; extern const char *UNDO_TEMP_DIR; +extern int UNDOZONE_META_PAGE_COUNT; +extern int UNDOSPACE_META_PAGE_COUNT; #endif // __KNL_UUNDOTYPE_H__ diff --git a/src/include/access/ustore/undo/knl_uundoxlog.h b/src/include/access/ustore/undo/knl_uundoxlog.h index 46894aee2..23793254f 100644 --- a/src/include/access/ustore/undo/knl_uundoxlog.h +++ b/src/include/access/ustore/undo/knl_uundoxlog.h @@ -115,8 +115,10 @@ typedef struct XlogRollbackFinish { extern void UndoXlogRedo(XLogReaderState *record); extern void UndoXlogDesc(StringInfo buf, XLogReaderState *record); +extern const char* undo_xlog_type_name(uint8 subtype); extern void UndoXlogRollbackFinishRedo(XLogReaderState *record); extern void UndoXlogRollbackFinishDesc(StringInfo buf, XLogReaderState *record); +extern const char* undo_xlog_roll_back_finish_type_name(uint8 subtype); XLogRecPtr WriteUndoXlog(void *xlrec, uint8 type); void LogUndoMeta(const XlogUndoMeta *xlum); } diff --git a/src/include/access/ustore/undo/knl_uundozone.h b/src/include/access/ustore/undo/knl_uundozone.h index 81d989724..a63847587 100644 --- a/src/include/access/ustore/undo/knl_uundozone.h +++ b/src/include/access/ustore/undo/knl_uundozone.h @@ -20,6 +20,7 @@ #include "access/ustore/undo/knl_uundospace.h" #include "access/ustore/undo/knl_uundoxlog.h" #include "access/ustore/undo/knl_uundotxn.h" +#include "access/ustore/undo/knl_uundoapi.h" namespace undo { #define UNDOZONE_CLEAN 0 @@ -44,7 +45,6 @@ typedef struct UndoZoneMetaInfo { #define UNDO_META_FILE "undo/undometa" #define UNDO_META_PAGE_SIZE 512 #define UNDO_WRITE_SIZE (UNDO_META_PAGE_SIZE * 8) - #define UNDO_META_PAGE_CRC_LENGTH 4 #define UNDO_ZONE_NUMA_GROUP 64 #define UNDOZONE_COUNT_PER_PAGE ((UNDO_META_PAGE_SIZE - UNDO_META_PAGE_CRC_LENGTH) / sizeof(undo::UndoZoneMetaInfo)) @@ -55,6 +55,17 @@ typedef struct UndoZoneMetaInfo { count = (total % unit == 0) ? (total / unit) : (total / unit) + 1; \ } while (0) +#define GET_UPERSISTENCE(zid, upersistence) \ + do { \ + if (zid >= (PERSIST_ZONE_COUNT + PERSIST_ZONE_COUNT)) { \ + upersistence = UNDO_TEMP; \ + } else if (zid >= PERSIST_ZONE_COUNT) { \ + upersistence = UNDO_UNLOGGED; \ + } else { \ + upersistence = UNDO_PERMANENT; \ + } \ + } while (0) + /* Find the first 1 and set it to 0, which means the undo zone is used. */ #define ALLOCATE_ZONEID(upersistence, retZid) \ do { \ @@ -155,10 +166,18 @@ public: { return MAKE_UNDO_PTR(zid_, recycle_); } + inline UndoSlotPtr GetFrozenSlotPtr(void) + { + return frozenSlotPtr_; + } inline TransactionId GetRecycleXid(void) { return recycleXid_; } + inline TransactionId GetAttachPid(void) + { + return attachPid_; + } inline bool Attached(void) { return attached_ == UNDO_ZONE_ATTACHED; @@ -180,10 +199,18 @@ public: { forceDiscard_ = UNDO_PTR_GET_OFFSET(discard); } + inline void SetAttachPid(ThreadId attachPid) + { + attachPid_ = attachPid; + } inline void SetAllocate(UndoSlotPtr allocate) { allocate_ = UNDO_PTR_GET_OFFSET(allocate); } + inline void SetFrozenSlotPtr(UndoSlotPtr frozenSlotPtr) + { + frozenSlotPtr_ = frozenSlotPtr; + } inline void SetRecycle(UndoSlotPtr recycle) { recycle_ = UNDO_PTR_GET_OFFSET(recycle); @@ -300,7 +327,7 @@ public: return ((allocate_ - recycle_) / BLCKSZ); } bool CheckNeedSwitch(UndoRecordSize size); - bool CheckUndoRecordValid(UndoLogOffset offset, bool checkForceRecycle); + UndoRecordState CheckUndoRecordValid(UndoLogOffset offset, bool checkForceRecycle); bool CheckRecycle(UndoRecPtr starturp, UndoRecPtr endurp); UndoRecPtr AllocateSpace(uint64 size); @@ -330,11 +357,13 @@ private: UndoSlotBuffer buf_; UndoSlotOffset allocate_; UndoSlotOffset recycle_; + UndoSlotPtr frozenSlotPtr_; UndoLogOffset insertUndoPtr_; UndoLogOffset discardUndoPtr_; UndoLogOffset forceDiscard_; UndoPersistence pLevel_; TransactionId recycleXid_; + ThreadId attachPid_; /* Need Lock undo zone before alloc, preventing from checkpoint. */ LWLock *lock_; /* Lsn for undo zone meta. */ @@ -350,10 +379,12 @@ public: static int AllocateZone(UndoPersistence upersistence); static void ReleaseZone(int zid, UndoPersistence upersistence); static UndoZone *SwitchZone(int zid, UndoPersistence upersistence); - static UndoZone *GetUndoZone(int zid, bool needInit = false); + static UndoZone *GetUndoZone(int zid, bool isNeedInitZone = true, UndoPersistence upersistence = UNDO_PERMANENT); + static void InitUndoCxtUzones(); }; // class UndoZoneGroup void AllocateZonesBeforXid(); +void InitZone(UndoZone *uzone, const int zoneId, UndoPersistence upersistence); +void InitUndoSpace(UndoZone *uzone, UndoSpaceType type); } // namespace undo - #endif // __KNL_UUNDOZONE_H__ diff --git a/src/include/access/xact.h b/src/include/access/xact.h index a004776f4..289e15be0 100644 --- a/src/include/access/xact.h +++ b/src/include/access/xact.h @@ -105,6 +105,7 @@ typedef enum { /* Define the default setting for synchonous_commit */ #define SYNCHRONOUS_COMMIT_ON SYNCHRONOUS_COMMIT_REMOTE_FLUSH + /* ---------------- * transaction-related XLOG entries * ---------------- @@ -256,6 +257,7 @@ typedef struct { /* procarray.c */ TransactionId* allDiffXids; /*different xids between GTM and the local */ uint32 DiffXidsCount; /*number of different xids between GTM and the local*/ + LocalSysDBCache *lsc_dbcache; } StreamTxnContext; #define STCSaveElem(dest, src) ((dest) = (src)) @@ -301,24 +303,19 @@ extern TransactionId GetTopTransactionId(void); extern TransactionId GetTopTransactionIdIfAny(void); extern TransactionId GetCurrentTransactionId(void); extern TransactionId GetCurrentTransactionIdIfAny(void); -extern GTM_TransactionKey GetCurrentTransactionKey(void); -extern GTM_TransactionKey GetCurrentTransactionKeyIfAny(void); extern GTM_TransactionHandle GetTransactionHandleIfAny(TransactionState s); extern GTM_TransactionHandle GetCurrentTransactionHandleIfAny(void); -extern GTM_Timeline GetCurrentTransactionTimeline(void); extern TransactionState GetCurrentTransactionState(void); extern TransactionId GetParentTransactionIdIfAny(TransactionState s); extern void ResetTransactionInfo(void); extern void EndParallelWorkerTransaction(void); #ifdef PGXC /* PGXC_COORD */ -extern TransactionId GetNewGxidGTM(TransactionState s, bool is_sub_xact); extern bool GetCurrentLocalParamStatus(void); extern void SetCurrentLocalParamStatus(bool status); extern GlobalTransactionId GetTopGlobalTransactionId(void); extern void SetTopGlobalTransactionId(GlobalTransactionId gxid); #endif -extern TransactionId GetTransactionIdFromGidStr(char* gid); extern TransactionId GetStableLatestTransactionId(void); extern void SetCurrentSubTransactionLocked(void); extern bool HasCurrentSubTransactionLock(void); @@ -332,7 +329,6 @@ extern TimestampTz GetCurrentStatementStartTimestamp(void); extern TimestampTz GetCurrentStatementLocalStartTimestamp(void); extern TimestampTz GetCurrentTransactionStopTimestamp(void); extern void SetCurrentStatementStartTimestamp(); -extern void SetStatementStartTimestamp(TimestampTz timestamp); #ifdef PGXC extern TimestampTz GetCurrentGTMStartTimestamp(void); extern TimestampTz GetCurrentStmtsysTimestamp(void); @@ -373,7 +369,6 @@ extern void BeginInternalSubTransaction(const char* name); extern void ReleaseCurrentSubTransaction(bool inSTP = false); extern void RollbackAndReleaseCurrentSubTransaction(bool inSTP = false); extern bool IsSubTransaction(void); -extern void SetCurrentTransactionId(TransactionId tid); extern bool IsTransactionBlock(void); extern bool IsTransactionOrTransactionBlock(void); extern char TransactionBlockStatusCode(void); @@ -389,14 +384,11 @@ extern void CallXactCallbacks(XactEvent event); extern bool AtEOXact_GlobalTxn(bool commit, bool is_write = false); #ifdef PGXC -extern void RegisterGTMCallback(GTMCallback callback, void* arg); extern void RegisterSequenceCallback(GTMCallback callback, void* arg); -extern void UnregisterGTMCallback(GTMCallback callback, const void* arg); extern void RegisterTransactionNodes(int count, void** connections, bool write); extern void PrintRegisteredTransactionNodes(void); extern void ForgetTransactionNodes(void); extern void RegisterTransactionLocalNode(bool write); -extern bool IsTransactionLocalNode(bool write); extern void ForgetTransactionLocalNode(void); extern bool IsXidImplicit(const char* xid); extern void SaveReceivedCommandId(CommandId cid); @@ -414,6 +406,7 @@ extern int xactGetCommittedChildren(TransactionId** ptr); extern void xact_redo(XLogReaderState* record); extern void xact_desc(StringInfo buf, XLogReaderState* record); +extern const char *xact_type_name(uint8 subtype); extern void xactApplyXLogDropRelation(XLogReaderState* record); @@ -429,10 +422,13 @@ extern bool IsInLiveSubtransaction(); extern void ExtendCsnlogForSubtrans(TransactionId parent_xid, int nsub_xid, TransactionId* sub_xids); extern CommitSeqNo SetXact2CommitInProgress(TransactionId xid, CommitSeqNo csn); extern void XactGetRelFiles(XLogReaderState* record, ColFileNodeRel** xnodesPtr, int* nrelsPtr); +extern bool XactWillRemoveRelFiles(XLogReaderState *record); extern HTAB* relfilenode_hashtbl_create(); extern CommitSeqNo getLocalNextCSN(); extern void UpdateNextMaxKnownCSN(CommitSeqNo csn); +extern void XLogInsertStandbyCSNCommitting(TransactionId xid, CommitSeqNo csn, + TransactionId *children, uint64 nchildren); #ifdef ENABLE_MOT extern bool IsMOTEngineUsed(); extern bool IsMOTEngineUsedInParentTransaction(); @@ -460,9 +456,7 @@ extern void ResetUndoActionsInfo(void); extern bool CanPerformUndoActions(void); extern void push_unlink_rel_to_hashtbl(ColFileNodeRel *xnodes, int nrels); -extern void XactReserveSPIContext(); -extern void XactResumeSPIContext(bool clean); -extern void XactCleanExceptionSubTransaction(SubTransactionId head, bool hasAbort); +extern void XactCleanExceptionSubTransaction(SubTransactionId head); extern char* GetCurrentTransactionName(); - +extern List* GetTransactionList(List *head); #endif /* XACT_H */ diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h index 4a9378b14..2746498ed 100755 --- a/src/include/access/xlog.h +++ b/src/include/access/xlog.h @@ -231,7 +231,10 @@ struct WALInitSegLockPadded { 0x0002 /* Like shutdown checkpoint, \ * but issued at end of WAL \ * recovery */ -#define CHECKPOINT_FILE_SYNC 0x0004 /* File sync */ + +/* standby file repair, need wait all dirty page flush, can remove old bad file */ +#define CHECKPOINT_FLUSH_DIRTY 0x0004 /* only flush dirty page, don't update redo point */ + #define CHECKPOINT_IMMEDIATE 0x0008 /* Do it without delays */ #define CHECKPOINT_FORCE 0x0010 /* Force even if no activity */ /* These are important to RequestCheckpoint */ @@ -246,6 +249,7 @@ struct WALInitSegLockPadded { #define LAZY_BACKWRITE 0x0400 /* lazy backwrite */ #define PAGERANGE_BACKWRITE 0x0800 /* PageRangeBackWrite */ + /* Checkpoint statistics */ typedef struct CheckpointStatsData { TimestampTz ckpt_start_t; /* start of checkpoint */ @@ -341,10 +345,296 @@ typedef struct TermFileData { bool finish_redo; } TermFileData; +/* + * Inserting to WAL is protected by a small fixed number of WAL insertion + * locks. To insert to the WAL, you must hold one of the locks - it doesn't + * matter which one. To lock out other concurrent insertions, you must hold + * of them. Each WAL insertion lock consists of a lightweight lock, plus an + * indicator of how far the insertion has progressed (insertingAt). + * + * The insertingAt values are read when a process wants to flush WAL from + * the in-memory buffers to disk, to check that all the insertions to the + * region the process is about to write out have finished. You could simply + * wait for all currently in-progress insertions to finish, but the + * insertingAt indicator allows you to ignore insertions to later in the WAL, + * so that you only wait for the insertions that are modifying the buffers + * you're about to write out. + * + * This isn't just an optimization. If all the WAL buffers are dirty, an + * inserter that's holding a WAL insert lock might need to evict an old WAL + * buffer, which requires flushing the WAL. If it's possible for an inserter + * to block on another inserter unnecessarily, deadlock can arise when two + * inserters holding a WAL insert lock wait for each other to finish their + * insertion. + * + * Small WAL records that don't cross a page boundary never update the value, + * the WAL record is just copied to the page and the lock is released. But + * to avoid the deadlock-scenario explained above, the indicator is always + * updated before sleeping while holding an insertion lock. + */ +typedef struct { + LWLock lock; +#ifdef __aarch64__ + pg_atomic_uint32 xlogGroupFirst; +#endif + XLogRecPtr insertingAt; +} WALInsertLock; + +/* + * All the WAL insertion locks are allocated as an array in shared memory. We + * force the array stride to be a power of 2, which saves a few cycles in + * indexing, but more importantly also ensures that individual slots don't + * cross cache line boundaries. (Of course, we have to also ensure that the + * array start address is suitably aligned.) + */ +typedef union WALInsertLockPadded { + WALInsertLock l; + char pad[PG_CACHE_LINE_SIZE]; +} WALInsertLockPadded; + +/* + * Shared state data for WAL insertion. + */ +typedef struct XLogCtlInsert { + /* + * CurrBytePos is the end of reserved WAL. The next record will be inserted + * at that position. PrevBytePos is the start position of the previously + * inserted (or rather, reserved) record - it is copied to the the prev- + * link of the next record. These are stored as "usable byte positions" + * rather than XLogRecPtrs (see XLogBytePosToRecPtr()). + */ + uint64 CurrBytePos; + uint32 PrevByteSize; + int32 CurrLRC; + +#if (!defined __x86_64__) && (!defined __aarch64__) + slock_t insertpos_lck; /* protects CurrBytePos and PrevBytePos */ +#endif + /* + * Make sure the above heavily-contended spinlock and byte positions are + * on their own cache line. In particular, the RedoRecPtr and full page + * write variables below should be on a different cache line. They are + * read on every WAL insertion, but updated rarely, and we don't want + * those reads to steal the cache line containing Curr/PrevBytePos. + */ + char pad[PG_CACHE_LINE_SIZE]; + /* + * WAL insertion locks. + */ + WALInsertLockPadded **WALInsertLocks; + + /* + * fullPageWrites is the master copy used by all backends to determine + * whether to write full-page to WAL, instead of using process-local one. + * This is required because, when full_page_writes is changed by SIGHUP, + * we must WAL-log it before it actually affects WAL-logging by backends. + * Checkpointer sets at startup or after SIGHUP. + * To read these fields, you must hold an insertion slot. To modify them, + * you must hold ALL the slots. + */ + XLogRecPtr RedoRecPtr; /* current redo point for insertions */ + bool forcePageWrites; /* forcing full-page writes for PITR? */ + bool fullPageWrites; + + /* + * exclusiveBackup is true if a backup started with pg_start_backup() is + * in progress, and nonExclusiveBackups is a counter indicating the number + * of streaming base backups currently in progress. forcePageWrites is set + * to true when either of these is non-zero. lastBackupStart is the latest + * checkpoint redo location used as a starting point for an online backup. + */ + bool exclusiveBackup; + int nonExclusiveBackups; + XLogRecPtr lastBackupStart; +} XLogCtlInsert; + +/* + * Total shared-memory state for XLOG. + */ +typedef struct XLogCtlData { + /* Protected by WALInsertLock: */ + XLogCtlInsert Insert; + + /* Protected by info_lck: */ + XLogwrtRqst LogwrtRqst; + XLogRecPtr RedoRecPtr; /* a recent copy of Insert->RedoRecPtr */ + TransactionId ckptXid; + XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */ + XLogRecPtr replicationSlotMinLSN; /* oldest LSN needed by any slot */ + XLogRecPtr replicationSlotMaxLSN; /* latest LSN for dummy startpoint */ + XLogSegNo lastRemovedSegNo; /* latest removed/recycled XLOG segment */ + + /* Time of last xlog segment switch. Protected by WALWriteLock. */ + pg_time_t lastSegSwitchTime; + + /* + * Protected by info_lck and WALWriteLock (you must hold either lock to + * read it, but both to update) + */ + XLogwrtResult LogwrtResult; + +#ifndef ENABLE_MULTIPLE_NODES + /* + * Protected by info_lck and WALWritePaxosLock (you must hold either lock to + * read it, but both to update) + */ + XLogwrtPaxos LogwrtPaxos; +#endif + + /* + * Latest initialized block index in cache. + * + * To change curridx and the identity of a buffer, you need to hold + * WALBufMappingLock. To change the identity of a buffer that's still + * dirty, the old page needs to be written out first, and for that you + * need WALWriteLock, and you need to ensure that there are no in-progress + * insertions to the page by calling WaitXLogInsertionsToFinish(). + */ + XLogRecPtr InitializedUpTo; + + /* + * These values do not change after startup, although the pointed-to pages + * and xlblocks values certainly do. xlblock values are protected by + * WALBufMappingLock. + */ + char *pages; /* buffers for unwritten XLOG pages */ + XLogRecPtr *xlblocks; /* 1st byte ptr-s + XLOG_BLCKSZ */ + int XLogCacheBlck; /* highest allocated xlog buffer index */ + TimeLineID ThisTimeLineID; + + /* + * archiveCleanupCommand is read from recovery.conf but needs to be in + * shared memory so that the checkpointer process can access it. + */ + char archiveCleanupCommand[MAXPGPATH]; + + /* + * SharedRecoveryInProgress indicates if we're still in crash or archive + * recovery. Protected by info_lck. + */ + bool SharedRecoveryInProgress; + + bool IsRecoveryDone; + + /* + * SharedHotStandbyActive indicates if we're still in crash or archive + * recovery. Protected by info_lck. + */ + bool SharedHotStandbyActive; + + /* + * WalWriterSleeping indicates whether the WAL writer is currently in + * low-power mode (and hence should be nudged if an async commit occurs). + * Protected by info_lck. + */ + bool WalWriterSleeping; + + /* + * recoveryWakeupLatch is used to wake up the startup process to continue + * WAL replay, if it is waiting for WAL to arrive or failover trigger file + * to appear. + */ + Latch recoveryWakeupLatch; + + Latch dataRecoveryLatch; + + /* + * During recovery, we keep a copy of the latest checkpoint record here. + * Used by the background writer when it wants to create a restartpoint. + * + * Protected by info_lck. + */ + XLogRecPtr lastCheckPointRecPtr; + CheckPoint lastCheckPoint; + + /* lastReplayedReadRecPtr points to the header of last apply lsn. */ + XLogRecPtr lastReplayedReadRecPtr; + /* + * lastReplayedEndRecPtr points to end+1 of the last record successfully + * replayed. When we're currently replaying a record, ie. in a redo + * function, replayEndRecPtr points to the end+1 of the record being + * replayed, otherwise it's equal to lastReplayedEndRecPtr. + */ + XLogRecPtr lastReplayedEndRecPtr; + XLogRecPtr replayEndRecPtr; + /* timestamp of last COMMIT/ABORT record replayed (or being replayed) */ + TimestampTz recoveryLastXTime; + /* current effective recovery target timeline */ + TimeLineID RecoveryTargetTLI; + + /* + * timestamp of when we started replaying the current chunk of WAL data, + * only relevant for replication or archive recovery + */ + TimestampTz currentChunkStartTime; + /* Are we requested to pause recovery? */ + bool recoveryPause; + + /* Are we requested to suspend recovery? */ + bool recoverySusPend; + + /* + * lastFpwDisableRecPtr points to the start of the last replayed + * XLOG_FPW_CHANGE record that instructs full_page_writes is disabled. + */ + XLogRecPtr lastFpwDisableRecPtr; + + /* + * After started up, we need to make sure that + * it will do full page write before the first checkpoint. + */ + bool FpwBeforeFirstCkpt; + + /* LSN of xlogs already tracked by CBM, which checkpoint can now recycle. */ + XLogRecPtr cbmTrackedLSN; + /* the bitmap file rotate lsn requested by outter */ + volatile XLogRecPtr cbmMaxRequestRotateLsn; + /* curr cbm file name like pg_xlog_xx_xxxx_xxxx.cbm */ + XLogRecPtr currCbmFileStartLsn; + + /* if true, stale xlog segments are not recycled during checkpoint, for backup purpose */ + bool delayXlogRecycle; + + /* start point from where dropped column relation files are delayed to do physical unlinking */ + XLogRecPtr ddlDelayStartPtr; + + /* start point for logging new remain segments or extents */ + XLogRecPtr remain_segs_start_point; + bool is_need_log_remain_segs; + XLogRecPtr remainCommitLsn; + + slock_t info_lck; /* locks shared variables shown above */ +} XLogCtlData; + +/* Xlog flush statistics*/ +struct XlogFlushStats{ + bool statSwitch; + uint64 writeTimes; + uint64 syncTimes; + uint64 totalXlogSyncBytes; + uint64 totalActualXlogSyncBytes; + uint32 avgWriteBytes; + uint32 avgActualWriteBytes; + uint32 avgSyncBytes; + uint32 avgActualSyncBytes; + uint64 totalWriteTime; + uint64 totalSyncTime; + uint64 avgWriteTime; + uint64 avgSyncTime; + uint64 currOpenXlogSegNo; + TimestampTz lastRestTime; +}; + +extern XLogSegNo GetNewestXLOGSegNo(const char* workingPath); +/* + * Hint bit for whether xlog contains CSN info, which is stored in xl_term. + */ +#define XLOG_CONTAIN_CSN 0x80000000 +#define XLOG_MASK_TERM 0x7FFFFFFF extern void XLogMultiFileInit(int advance_xlog_file_num); -extern XLogRecPtr XLogInsertRecord(struct XLogRecData* rdata, XLogRecPtr fpw_lsn, bool isupgrade = false); +extern XLogRecPtr XLogInsertRecord(struct XLogRecData* rdata, XLogRecPtr fpw_lsn); extern void XLogWaitFlush(XLogRecPtr recptr); extern void XLogWaitBufferInit(XLogRecPtr recptr); extern void UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force); @@ -365,6 +655,7 @@ extern XLogRecPtr XLogGetReplicationSlotMinimumLSNByOther(void); extern XLogSegNo XLogGetLastRemovedSegno(void); extern void xlog_redo(XLogReaderState* record); extern void xlog_desc(StringInfo buf, XLogReaderState* record); +extern const char *xlog_type_name(uint8 subtype); extern void issue_xlog_fsync(int fd, XLogSegNo segno); @@ -390,13 +681,14 @@ extern XLogRecPtr GetPaxosConsensusRecPtr(void); #endif extern bool RecoveryIsPaused(void); extern void SetRecoveryPause(bool recoveryPause); +extern void SetRecoverySuspend(bool recoverySuspend); extern TimestampTz GetLatestXTime(void); extern TimestampTz GetCurrentChunkReplayStartTime(void); extern char* XLogFileNameP(TimeLineID tli, XLogSegNo segno); extern pg_crc32 GetXlogRecordCrc(XLogRecPtr RecPtr, bool& crcvalid, XLogPageReadCB pagereadfunc, Size bufAlignSize); extern bool IsCheckPoint(XLogReaderState* record); extern bool IsRestartPointSafe(const XLogRecPtr checkPoint); -extern bool HasTimelineUpdate(XLogReaderState* record, bool bOld); +extern bool HasTimelineUpdate(XLogReaderState* record); extern void UpdateTimeline(CheckPoint* checkPoint); extern void UpdateControlFile(void); @@ -431,6 +723,7 @@ extern bool CheckFinishRedoSignal(void); extern bool CheckPromoteSignal(void); extern bool CheckPrimarySignal(void); extern bool CheckStandbySignal(void); +extern bool CheckCascadeStandbySignal(void); extern bool CheckNormalSignal(void); extern int CheckSwitchoverSignal(void); @@ -494,7 +787,6 @@ extern char* TrimStr(const char* str); extern void CloseXlogFilesAtThreadExit(void); extern void SetLatestXTime(TimestampTz xtime); -extern CheckPoint update_checkpoint(XLogReaderState* record); XLogRecord* XLogParallelReadNextRecord(XLogReaderState* xlogreader); void ResourceManagerStartup(void); @@ -533,10 +825,12 @@ void ShareStorageInit(); void FindLastRecordCheckInfoOnShareStorage(XLogRecPtr *lastRecordPtr, pg_crc32 *lastRecordCrc, int *lastRecordLen); int SharedStorageXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, - XLogRecPtr targetRecPtr, char *readBuf, TimeLineID *readTLI); + XLogRecPtr targetRecPtr, char *readBuf, TimeLineID *readTLI, char *xlog_path); void ShareStorageSetBuildErrorAndExit(HaRebuildReason reason, bool setRcvDone = true); -void ReserveKeyForApp(); void rename_recovery_conf_for_roach(); +bool CheckForFailoverTrigger(void); +bool CheckForSwitchoverTrigger(void); +void HandleCascadeStandbyPromote(XLogRecPtr *recptr); extern XLogRecPtr XlogRemoveSegPrimary; @@ -612,5 +906,7 @@ void WriteRemainSegsFile(int fd, const char* buffer, uint32 used_len); void InitXlogStatuEntryTblSize(); void CheckShareStorageWriteLock(); XLogRecPtr GetFlushMainStandby(); +extern bool RecoveryIsSuspend(void); +extern void InitUndoCountThreshold(); #endif /* XLOG_H */ diff --git a/src/include/access/xlog_basic.h b/src/include/access/xlog_basic.h index 9222b9638..4048e43d3 100644 --- a/src/include/access/xlog_basic.h +++ b/src/include/access/xlog_basic.h @@ -40,6 +40,7 @@ */ #define MAXFNAMELEN 64 + /* size of the buffer allocated for error message. */ #define MAX_ERRORMSG_LEN 1000 @@ -47,7 +48,6 @@ * Each page of XLOG file has a header like this: */ #define XLOG_PAGE_MAGIC 0xD074 /* can be used as WAL version indicator */ -#define XLOG_PAGE_MAGIC_OLD 0xD073 /* can be used as WAL old version indicator */ /* * The XLOG is split into WAL segments (physical files) of the size indicated @@ -139,7 +139,7 @@ typedef struct XLogReaderState XLogReaderState; /* Function type definition for the read_page callback */ typedef int (*XLogPageReadCB)(XLogReaderState* xlogreader, XLogRecPtr targetPagePtr, int reqLen, - XLogRecPtr targetRecPtr, char* readBuf, TimeLineID* pageTLI); + XLogRecPtr targetRecPtr, char* readBuf, TimeLineID* pageTLI, char* xlog_path); typedef struct { /* Is this block ref in use? */ @@ -213,16 +213,6 @@ typedef struct XLogRecord { /* XLogRecordBlockHeaders and XLogRecordDataHeader follow, no padding */ } XLogRecord; -typedef struct XLogRecordOld { - uint32 xl_tot_len; /* total len of entire record */ - ShortTransactionId xl_xid; /* xact id */ - XLogRecPtrOld xl_prev; /* ptr to previous record in log */ - uint8 xl_info; /* flag bits, see below */ - RmgrId xl_rmid; /* resource manager for this record */ - /* 2 bytes of padding here, initialize to zero */ - pg_crc32 xl_crc; /* CRC for this record */ -} XLogRecordOld; - struct XLogReaderState { /* * @@ -263,7 +253,8 @@ struct XLogReaderState { * Opaque data for callbacks to use. Not used by XLogReader. */ void* private_data; - + /* Buffer to hold error message */ + char* errormsg_buf; /* * Start and end point of last record read. EndRecPtr is also used as the * position to read next, if XLogReadRecord receives an invalid recptr. @@ -328,20 +319,17 @@ struct XLogReaderState { char* readRecordBuf; uint32 readRecordBufSize; - /* Buffer to hold error message */ - char* errormsg_buf; - - /* add for batch redo */ - uint32 refcount; // For parallel recovery bool isPRProcess; bool isDecode; bool isFullSync; + /* add for batch redo */ + uint32 refcount; bool isTde; + uint32 readblocks; }; #define SizeOfXLogRecord (offsetof(XLogRecord, xl_crc) + sizeof(pg_crc32c)) -#define SizeOfXLogRecordOld (offsetof(XLogRecordOld, xl_crc) + sizeof(pg_crc32)) typedef struct XLogPageHeaderData { uint16 xlp_magic; /* magic value for correctness checks */ @@ -462,5 +450,7 @@ typedef struct ShareStorageOperateCtl_ { const ShareStorageOperateIf *opereateIf; } ShareStorageOperateCtl; +extern List *readTimeLineHistory(TimeLineID targetTLI); + #endif /* XLOG_BASIC_H */ diff --git a/src/include/access/xlog_internal.h b/src/include/access/xlog_internal.h index 3228b0e5f..b3c787491 100755 --- a/src/include/access/xlog_internal.h +++ b/src/include/access/xlog_internal.h @@ -37,12 +37,12 @@ #ifndef FRONTEND /* Compute the xlog filename with timelineId and segment number.*/ -#define XLogFileName(fname, tli, logSegNo) \ +#define XLogFileName(fname, len, tli, logSegNo) \ do { \ int nRet; \ nRet = snprintf_s(fname, \ - MAXFNAMELEN, \ - MAXFNAMELEN - 1, \ + len, \ + len - 1, \ "%08X%08X%08X", \ tli, \ (uint32)((logSegNo) / XLogSegmentsPerXLogId), \ @@ -51,12 +51,12 @@ } while (0) /* compute xlog file path with timelineId and segment number. */ -#define XLogFilePath(path, tli, logSegNo) \ +#define XLogFilePath(path, len, tli, logSegNo) \ do { \ int nRet; \ nRet = snprintf_s(path, \ - MAXPGPATH, \ - MAXPGPATH - 1, \ + len, \ + len - 1, \ XLOGDIR "/%08X%08X%08X", \ tli, \ (uint32)((logSegNo) / XLogSegmentsPerXLogId), \ @@ -84,12 +84,12 @@ strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ strcmp((fname) + strlen(fname) - strlen(".backup"), ".backup") == 0) -#define XLogFileName(fname, tli, logSegNo) \ +#define XLogFileName(fname, len, tli, logSegNo) \ do { \ int nRet; \ nRet = snprintf_s(fname, \ - MAXFNAMELEN, \ - MAXFNAMELEN - 1, \ + len, \ + len - 1, \ "%08X%08X%08X", \ tli, \ (uint32)((logSegNo) / XLogSegmentsPerXLogId), \ @@ -97,12 +97,12 @@ securec_check_ss_c(nRet, "\0", "\0"); \ } while (0) -#define XLogFilePath(path, tli, logSegNo) \ +#define XLogFilePath(path, len, tli, logSegNo) \ do { \ int nRet; \ nRet = snprintf_s(path, \ - MAXPGPATH, \ - MAXPGPATH - 1, \ + len, \ + len - 1, \ XLOGDIR "/%08X%08X%08X", \ tli, \ (uint32)((logSegNo) / XLogSegmentsPerXLogId), \ @@ -123,36 +123,36 @@ } while (0) /* compute history filename with timelineID. */ -#define TLHistoryFileName(fname, tli) \ +#define TLHistoryFileName(fname, len, tli) \ do { \ int nRet = 0; \ - nRet = snprintf_s(fname, MAXFNAMELEN, MAXFNAMELEN - 1, "%08X.history", tli); \ + nRet = snprintf_s(fname, len, len - 1, "%08X.history", tli); \ securec_check_ss(nRet, "\0", "\0"); \ } while (0) /* compute history filepath with timelineID. */ -#define TLHistoryFilePath(path, tli) \ +#define TLHistoryFilePath(path, len, tli) \ do { \ int nRet = 0; \ - nRet = snprintf_s(path, MAXPGPATH, MAXPGPATH - 1, XLOGDIR "/%08X.history", tli); \ + nRet = snprintf_s(path, len, len - 1, XLOGDIR "/%08X.history", tli); \ securec_check_ss(nRet, "\0", "\0"); \ } while (0) /* compute status filePath with xlog name and suffix. */ -#define StatusFilePath(path, xlog, suffix) \ +#define StatusFilePath(path, len, xlog, suffix) \ do { \ int nRet = 0; \ - nRet = snprintf_s(path, MAXPGPATH, MAXPGPATH - 1, XLOGDIR "/archive_status/%s%s", xlog, suffix); \ + nRet = snprintf_s(path, len, len - 1, XLOGDIR "/archive_status/%s%s", xlog, suffix); \ securec_check_ss(nRet, "\0", "\0"); \ } while (0) /*compute backup history filename with timelineID, segment number and offset. */ -#define BackupHistoryFileName(fname, tli, logSegNo, offset) \ +#define BackupHistoryFileName(fname, len, tli, logSegNo, offset) \ do { \ int nRet = 0; \ nRet = snprintf_s(fname, \ - MAXFNAMELEN, \ - MAXFNAMELEN - 1, \ + len, \ + len - 1, \ "%08X%08X%08X.%08X.backup", \ tli, \ (uint32)((logSegNo) / XLogSegmentsPerXLogId), \ @@ -162,12 +162,12 @@ } while (0) /**/ -#define BackupHistoryFilePath(path, tli, logSegNo, offset) \ +#define BackupHistoryFilePath(path, len, tli, logSegNo, offset) \ do { \ int nRet = 0; \ nRet = snprintf_s(path, \ - MAXPGPATH, \ - MAXPGPATH - 1, \ + len, \ + len - 1, \ XLOGDIR "/%08X%08X%08X.%08X.backup", \ tli, \ (uint32)((logSegNo) / XLogSegmentsPerXLogId), \ @@ -223,6 +223,7 @@ typedef struct RmgrData { TransactionId xid, Oid reloid, Oid partitionoid, BlockNumber blkno, bool isFullChain); void (*rm_undo_desc) (StringInfo buf, UndoRecord *record); + const char* (*rm_type_name)(uint8 subtype); } RmgrData; /* @@ -273,9 +274,10 @@ extern Datum pg_xlog_replay_pause(PG_FUNCTION_ARGS); extern Datum pg_xlog_replay_resume(PG_FUNCTION_ARGS); extern Datum pg_is_xlog_replay_paused(PG_FUNCTION_ARGS); extern Datum pg_xlog_location_diff(PG_FUNCTION_ARGS); +extern int XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, + char *readBuf, TimeLineID *readTLI, char* xlog_path = NULL); -int XLogPageRead(XLogReaderState* xlogreader, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, - char* readBuf, TimeLineID* readTLI); bool XLogReadFromWriteBuffer(XLogRecPtr targetStartPtr, int reqLen, char* readBuf, uint32 *rereadlen); +extern void handleRecoverySusPend(XLogRecPtr lsn); #endif /* XLOG_INTERNAL_H */ diff --git a/src/include/access/xloginsert.h b/src/include/access/xloginsert.h index 4c1729de3..f39a70a9e 100755 --- a/src/include/access/xloginsert.h +++ b/src/include/access/xloginsert.h @@ -20,7 +20,6 @@ #include "storage/buf/block.h" #include "storage/buf/buf.h" #include "storage/buf/bufpage.h" -#include "storage/page_compression.h" #include "storage/smgr/relfilenode.h" struct XLogPhyBlock; @@ -46,10 +45,13 @@ struct XLogPhyBlock; #define REGBUF_KEEP_DATA \ 0x10 /* include data even if a full-page image \ * is taken */ + +/* The time unit is microsecond. */ +static const int MAX_RPO_SLEEP_TIME = 500000; + /* prototypes for public functions in xloginsert.c: */ extern void XLogBeginInsert(void); -extern XLogRecPtr XLogInsert(RmgrId rmid, uint8 info, bool isupgrade = false, int bucket_id = InvalidBktId, - bool isSwitchoverBarrier = false); +extern XLogRecPtr XLogInsert(RmgrId rmid, uint8 info, int bucket_id = InvalidBktId, bool istoast = false); extern void XLogEnsureRecordSpace(int nbuffers, int ndatas); extern void XLogRegisterData(char* data, int len); extern void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags, TdeInfo* tdeinfo = NULL); @@ -61,5 +63,5 @@ extern bool XLogCheckBufferNeedsBackup(Buffer buffer); extern XLogRecPtr XLogSaveBufferForHint(Buffer buffer, bool buffer_std); extern void InitXLogInsert(void); extern void XLogIncludeOrigin(void); - +extern void LogCSN(CommitSeqNo *curCSN); #endif /* XLOGINSERT_H */ diff --git a/src/include/access/xlogproc.h b/src/include/access/xlogproc.h index 819d874cd..85101d5bf 100755 --- a/src/include/access/xlogproc.h +++ b/src/include/access/xlogproc.h @@ -1,1221 +1,1195 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * --------------------------------------------------------------------------------------- - * - * xlogproc.h - * - * - * IDENTIFICATION - * src/include/access/xlogproc.h - * - * --------------------------------------------------------------------------------------- - */ - -#ifndef XLOG_PROC_H -#define XLOG_PROC_H -#include "postgres.h" -#include "knl/knl_variable.h" - -#include "access/xlogreader.h" -#include "storage/buf/bufmgr.h" -#include "access/xlog_basic.h" -#include "access/xlogutils.h" -#include "access/clog.h" -#include "access/ustore/knl_uredo.h" -#include "access/ustore/knl_utuple.h" -#include "access/ustore/undo/knl_uundotxn.h" -#include "access/ustore/undo/knl_uundoxlog.h" - - -#ifndef byte -#define byte unsigned char -#endif - -typedef void (*relasexlogreadstate)(void* record); -/* **************define for parse end******************************* */ -#define MIN(_a, _b) ((_a) > (_b) ? (_b) : (_a)) - -/* for common blockhead begin */ - -#define XLogBlockHeadGetInfo(blockhead) ((blockhead)->xl_info) -#define XLogBlockHeadGetXid(blockhead) ((blockhead)->xl_xid) -#define XLogBlockHeadGetRmid(blockhead) ((blockhead)->xl_rmid) - -#define XLogBlockHeadGetLSN(blockhead) ((blockhead)->end_ptr) -#define XLogBlockHeadGetRelNode(blockhead) ((blockhead)->relNode) -#define XLogBlockHeadGetSpcNode(blockhead) ((blockhead)->spcNode) -#define XLogBlockHeadGetDbNode(blockhead) ((blockhead)->dbNode) -#define XLogBlockHeadGetForkNum(blockhead) ((blockhead)->forknum) -#define XLogBlockHeadGetBlockNum(blockhead) ((blockhead)->blkno) -#define XLogBlockHeadGetBucketId(blockhead) ((blockhead)->bucketNode) -#define XLogBlockHeadGetCompressOpt(blockhead) ((blockhead)->opt) -#define XLogBlockHeadGetValidInfo(blockhead) ((blockhead)->block_valid) -#define XLogBlockHeadGetPhysicalBlock(blockhead) ((blockhead)->pblk) -/* for common blockhead end */ - -/* for block data beging */ -#define XLogBlockDataHasBlockImage(blockdata) ((blockdata)->blockhead.has_image) -#define XLogBlockDataHasBlockData(blockdata) ((blockdata)->blockhead.has_data) -#define XLogBlockDataGetLastBlockLSN(_blockdata) ((_blockdata)->blockdata.last_lsn) -#define XLogBlockDataGetBlockFlags(blockdata) ((blockdata)->blockhead.flags) - -#define XLogBlockDataGetBlockId(blockdata) ((blockdata)->blockhead.cur_block_id) -#define XLogBlockDataGetAuxiBlock1(blockdata) ((blockdata)->blockhead.auxiblk1) -#define XLogBlockDataGetAuxiBlock2(blockdata) ((blockdata)->blockhead.auxiblk2) -/* for block data end */ - -typedef struct { - RelFileNode rnode; - ForkNumber forknum; - BlockNumber blkno; - XLogPhyBlock pblk; -} RedoBufferTag; - -typedef struct { - Page page; // pagepointer - Size pagesize; -} RedoPageInfo; - -typedef struct { - XLogRecPtr lsn; /* block cur lsn */ - Buffer buf; - RedoBufferTag blockinfo; - RedoPageInfo pageinfo; - int dirtyflag; /* true if the buffer changed */ -} RedoBufferInfo; - -extern void GetFlushBufferInfo(void *buf, RedoBufferInfo *bufferinfo, uint32 *buf_state, ReadBufferMethod flushmethod); - -#define MakeRedoBufferDirty(bufferinfo) ((bufferinfo)->dirtyflag = true) -#define RedoBufferDirtyClear(bufferinfo) ((bufferinfo)->dirtyflag = false) -#define IsRedoBufferDirty(bufferinfo) ((bufferinfo)->dirtyflag == true) - -#define RedoMemIsValid(memctl, bufferid) (((bufferid) > InvalidBuffer) && ((bufferid) <= (memctl->totalblknum))) - -typedef struct { - RedoBufferTag blockinfo; - pg_atomic_uint32 state; -} RedoBufferDesc; - -typedef struct { - Buffer buff_id; - pg_atomic_uint32 state; -} ParseBufferDesc; - -#define RedoBufferSlotGetBuffer(bslot) ((bslot)->buf_id) - -#define EnalbeWalLsnCheck true - -#pragma pack(push, 1) - -#define INVALID_BLOCK_ID (XLR_MAX_BLOCK_ID + 2) - -#define LOW_BLOKNUMBER_BITS (32) -#define LOW_BLOKNUMBER_MASK (((uint64)1 << 32) - 1) - - -/* ********BLOCK COMMON HEADER BEGIN ***************** */ -typedef enum { - BLOCK_DATA_MAIN_DATA_TYPE = 0, /* BLOCK DATA */ - BLOCK_DATA_VM_TYPE, /* VM */ - BLOCK_DATA_UNDO_TYPE, /* UNDO */ - BLOCK_DATA_FSM_TYPE, /* FSM */ - BLOCK_DATA_DDL_TYPE, /* DDL */ - BLOCK_DATA_BCM_TYPE, /* bcm */ - BLOCK_DATA_NEWCU_TYPE, /* cu newlog */ - BLOCK_DATA_CLOG_TYPE, /* CLog */ - BLOCK_DATA_MULITACT_OFF_TYPE, /* MultiXact */ - BLOCK_DATA_MULITACT_MEM_TYPE, - BLOCK_DATA_CSNLOG_TYPE, /* CSNLog */ - /* *****xact don't need sent to dfv */ - BLOCK_DATA_MULITACT_UPDATEOID_TYPE, - BLOCK_DATA_XACTDATA_TYPE, /* XACT */ - BLOCK_DATA_RELMAP_TYPE, /* RELMAP */ - BLOCK_DATA_SLOT_TYPE, - BLOCK_DATA_BARRIER_TYPE, - BLOCK_DATA_PREPARE_TYPE, /* prepare */ - BLOCK_DATA_INVALIDMSG_TYPE, /* INVALIDMSG */ - BLOCK_DATA_INCOMPLETE_TYPE, - BLOCK_DATA_VACUUM_PIN_TYPE, - BLOCK_DATA_XLOG_COMMON_TYPE, - BLOCK_DATA_CREATE_DATABASE_TYPE, - BLOCK_DATA_DROP_DATABASE_TYPE, - BLOCK_DATA_CREATE_TBLSPC_TYPE, - BLOCK_DATA_DROP_TBLSPC_TYPE, - BLOCK_DATA_DROP_SLICE_TYPE, - BLOCK_DATA_SEG_FILE_EXTEND_TYPE, - BLOCK_DATA_SEG_SPACE_DROP, - BLOCK_DATA_SEG_SPACE_SHRINK, -} XLogBlockParseEnum; - -/* ********BLOCK COMMON HEADER END ***************** */ - -/* **************define for parse begin ******************************* */ - -/* ********BLOCK DATE BEGIN ***************** */ - -typedef struct { - uint8 cur_block_id; /* blockid */ - uint8 flags; - uint8 has_image; - uint8 has_data; - BlockNumber auxiblk1; - BlockNumber auxiblk2; -} XLogBlocDatakHead; - -#define XLOG_BLOCK_DATAHEAD_LEN sizeof(XLogBlocDatakHead) - -typedef struct { - uint16 extra_flag; - uint16 hole_offset; - uint16 hole_length; /* image position */ - uint16 data_len; /* data length */ - XLogRecPtr last_lsn; - char* bkp_image; - char* data; -} XLogBlockData; - -#define XLOG_BLOCK_DATA_LEN sizeof(XLogBlockData) - -typedef struct { - XLogBlocDatakHead blockhead; - XLogBlockData blockdata; - uint32 main_data_len; /* main data portion's length */ - char* main_data; /* point to XLogReaderState's main_data */ -} XLogBlockDataParse; -/* ********BLOCK DATE END ***************** */ -#define XLOG_BLOCK_DATA_PARSE_LEN sizeof(XLogBlockDataParse) - -/* ********BLOCK DDL BEGIN ***************** */ -typedef enum { - BLOCK_DDL_TYPE_NONE = 0, - BLOCK_DDL_CREATE_RELNODE, - BLOCK_DDL_DROP_RELNODE, - BLOCK_DDL_EXTEND_RELNODE, - BLOCK_DDL_TRUNCATE_RELNODE, - BLOCK_DDL_CLOG_ZERO, - BLOCK_DDL_CLOG_TRUNCATE, - BLOCK_DDL_MULTIXACT_OFF_ZERO, - BLOCK_DDL_MULTIXACT_MEM_ZERO, - BLOCK_DDL_DROP_BKTLIST -} XLogBlockDdlInfoEnum; - -typedef struct { - uint32 blockddltype; - uint32 columnrel; - Oid ownerid; - char *mainData; -} XLogBlockDdlParse; - -/* ********BLOCK DDL END ***************** */ - -/* ********BLOCK CLOG BEGIN ***************** */ - -#define MAX_BLOCK_XID_NUMS (28) -typedef struct { - TransactionId topxid; - uint16 status; - uint16 xidnum; - uint16 xidsarry[MAX_BLOCK_XID_NUMS]; -} XLogBlockCLogParse; - -/* ********BLOCK CLOG END ***************** */ - -/* ********BLOCK CSNLOG BEGIN ***************** */ -typedef struct { - TransactionId topxid; - CommitSeqNo cslseq; - uint32 xidnum; - uint16 xidsarry[MAX_BLOCK_XID_NUMS]; -} XLogBlockCSNLogParse; - -/* ********BLOCK CSNLOG END ***************** */ - -/* ********BLOCK prepare BEGIN ***************** */ -struct TwoPhaseFileHeader; - -typedef struct { - TransactionId maxxid; - Size maindatalen; - char* maindata; -} XLogBlockPrepareParse; - -/* ********BLOCK prepare END ***************** */ - -/* ********BLOCK Bcm BEGIN ***************** */ -typedef struct { - uint64 startblock; - int count; - int status; -} XLogBlockBcmParse; - -/* ********BLOCK Bcm END ***************** */ - -/* ********BLOCK Vm BEGIN ***************** */ -typedef struct { - BlockNumber heapBlk; -} XLogBlockVmParse; - -#define XLOG_BLOCK_VM_PARSE_LEN sizeof(XLogBlockVmParse) -/* ********BLOCK Vm END ***************** */ - -/* ********BLOCK Undo BEGIN ***************** */ -struct insertUndoParse { - TransactionId recxid; - BlockNumber blkno; - Oid spcNode; - Oid relNode; - XLogRecPtr lsn; - XlUndoHeader xlundohdr; - XlUndoHeaderExtra xlundohdrextra; - undo::XlogUndoMeta xlundometa; - OffsetNumber offnum; -}; - -struct deleteUndoParse { - TransactionId recxid; - TransactionId oldxid; - BlockNumber blkno; - Oid spcNode; - Oid relNode; - XLogRecPtr lsn; - XlUndoHeader xlundohdr; - XlUndoHeaderExtra xlundohdrextra; - undo::XlogUndoMeta xlundometa; - UHeapTupleData utup; - OffsetNumber offnum; -}; - -struct updateUndoParse { - bool inplaceUpdate; - TransactionId recxid; - TransactionId oldxid; - Oid spcNode; - Oid relNode; - OffsetNumber new_offnum; - OffsetNumber old_offnum; - XLogRecPtr lsn; - XlUndoHeader xlundohdr; - XlUndoHeaderExtra xlundohdrextra; - XlUndoHeader xlnewundohdr; - XlUndoHeaderExtra xlnewundohdrextra; - undo::XlogUndoMeta xlundometa; - int undoXorDeltaSize; - char *xlogXorDelta; - BlockNumber newblk; - BlockNumber oldblk; -}; - -struct multiInsertUndoParse { - TransactionId recxid; - BlockNumber blkno; - Oid spcNode; - Oid relNode; - XLogRecPtr lsn; - bool isinit; - bool skipUndo; - XlUndoHeader xlundohdr; - XlUndoHeaderExtra xlundohdrextra; - UndoRecPtr last_urecptr; - undo::XlogUndoMeta xlundometa; -}; - -struct rollbackFinishParse { - UndoSlotPtr slotPtr; - XLogRecPtr lsn; -}; - -struct undoDiscardParse { - int zoneId; - UndoSlotPtr endSlot; - UndoSlotPtr startSlot; - UndoRecPtr endUndoPtr; - TransactionId recycledXid; - XLogRecPtr lsn; -}; - -struct undoUnlinkParse { - int zoneId; - UndoLogOffset headOffset; - XLogRecPtr unlinkLsn; -}; - -struct undoExtendParse { - int zoneId; - UndoLogOffset tailOffset; - XLogRecPtr extendLsn; -}; - -struct undoCleanParse { - int zoneId; - UndoLogOffset tailOffset; - XLogRecPtr cleanLsn; -}; - -typedef struct { - char *maindata; - Size recordlen; - union { - struct insertUndoParse insertUndoParse; - struct deleteUndoParse deleteUndoParse; - struct updateUndoParse updateUndoParse; - struct undoDiscardParse undoDiscardParse; - struct undoUnlinkParse undoUnlinkParse; - struct undoExtendParse undoExtendParse; - struct undoCleanParse undoCleanParse; - struct rollbackFinishParse rollbackFinishParse; - struct multiInsertUndoParse multiInsertUndoParse; - }; -} XLogBlockUndoParse; -/* ********BLOCK Undo END ***************** */ - -/* ********BLOCK NewCu BEGIN ***************** */ -typedef struct { - uint32 main_data_len; /* main data portion's length */ - char* main_data; /* point to XLogReaderState's main_data */ -} XLogBlockNewCuParse; - - -/* ********BLOCK NewCu END ***************** */ - -/* ********BLOCK InvalidMsg BEGIN ***************** */ -typedef struct { - TransactionId cutoffxid; -} XLogBlockInvalidParse; - -/* ********BLOCK InvalidMsg END ***************** */ - -/* ********BLOCK Incomplete BEGIN ***************** */ - -typedef enum { - INCOMPLETE_ACTION_LOG = 0, - INCOMPLETE_ACTION_FORGET -} XLogBlockIncompleteEnum; - -typedef struct { - uint16 action; /* split or delete */ - bool issplit; - bool isroot; - BlockNumber downblk; - BlockNumber leftblk; - BlockNumber rightblk; -} XLogBlockIncompleteParse; - -/* ********BLOCK Incomplete END ***************** */ - -/* ********BLOCK VacuumPin BEGIN ***************** */ -typedef struct { - BlockNumber lastBlockVacuumed; -} XLogBlockVacuumPinParse; - -/* ********BLOCK XLOG Common BEGIN ***************** */ -typedef struct { - XLogRecPtr readrecptr; - Size maindatalen; - char* maindata; -} XLogBlockXLogComParse; - -/* ********BLOCK XLOG Common END ***************** */ - -/* ********BLOCK DataBase BEGIN ***************** */ -typedef struct { - Oid src_db_id; - Oid src_tablespace_id; -} XLogBlockDataBaseParse; - -/* ********BLOCK DataBase Common END ***************** */ - -/* ********BLOCK table spc BEGIN ***************** */ -typedef struct { - char* tblPath; - bool isRelativePath; -} XLogBlockTblSpcParse; - -/* ********BLOCK table spc END ***************** */ - -/* ********BLOCK Multi Xact Offset BEGIN ***************** */ -typedef struct { - MultiXactId multi; - MultiXactOffset moffset; -} XLogBlockMultiXactOffParse; - -/* ********BLOCK Multi Xact Offset END ***************** */ - -/* ********BLOCK Multi Xact Mem BEGIN ***************** */ -typedef struct { - MultiXactId multi; - MultiXactOffset startoffset; - uint64 xidnum; - TransactionId xidsarry[MAX_BLOCK_XID_NUMS]; -} XLogBlockMultiXactMemParse; -/* ********BLOCK Multi Xact Mem END ***************** */ - -/* ********BLOCK Multi Xact update oid BEGIN ***************** */ -typedef struct { - MultiXactId nextmulti; - MultiXactOffset nextoffset; - TransactionId maxxid; -} XLogBlockMultiUpdateParse; -/* ********BLOCK Multi Xact update oid END ***************** */ - -/* ********BLOCK rel map BEGIN ***************** */ -typedef struct { - Size maindatalen; - char* maindata; -} XLogBlockRelMapParse; -/* ********BLOCK rel map END ***************** */ - -typedef struct { - uint32 xl_term; -} XLogBlockRedoHead; - -#define XLogRecRedoHeadEncodeSize (offsetof(XLogBlockRedoHead, refrecord)) -typedef struct { - XLogRecPtr start_ptr; - XLogRecPtr end_ptr; /* copy from XLogReaderState's EndRecPtr */ - BlockNumber blkno; - Oid relNode; /* relation */ - uint16 block_valid; /* block data validinfo see XLogBlockInfoEnum */ - uint8 xl_info; /* flag bits, see below */ - RmgrId xl_rmid; /* resource manager for this record */ - ForkNumber forknum; - TransactionId xl_xid; /* xact id */ - Oid spcNode; /* tablespace */ - Oid dbNode; /* database */ - int2 bucketNode; /* bucket */ - uint2 opt; - XLogPhyBlock pblk; -} XLogBlockHead; - -#define XLogBlockHeadEncodeSize (sizeof(XLogBlockHead)) - -#define BYTE_NUM_BITS (8) -#define BYTE_MASK (0xFF) -#define U64_BYTES_NUM (8) -#define U32_BYTES_NUM (4) -#define U16_BYTES_NUM (2) -#define U8_BYTES_NUM (1) - -#define U32_BITS_NUM (BYTE_NUM_BITS * U32_BYTES_NUM) - -extern uint64 XLog_Read_N_Bytes(char* buffer, Size buffersize, Size readbytes); - -#define XLog_Read_1_Bytes(buffer, buffersize) XLog_Read_N_Bytes(buffer, buffersize, U8_BYTES_NUM) -#define XLog_Read_2_Bytes(buffer, buffersize) XLog_Read_N_Bytes(buffer, buffersize, U16_BYTES_NUM) -#define XLog_Read_4_Bytes(buffer, buffersize) XLog_Read_N_Bytes(buffer, buffersize, U32_BYTES_NUM) -#define XLog_Read_8_Bytes(buffer, buffersize) XLog_Read_N_Bytes(buffer, buffersize, U64_BYTES_NUM) - -extern bool XLog_Write_N_bytes(uint64 values, Size writebytes, byte* buffer); - -#define XLog_Write_1_Bytes(values, buffer) XLog_Write_N_bytes(values, U8_BYTES_NUM, buffer) -#define XLog_Write_2_Bytes(values, buffer) XLog_Write_N_bytes(values, U16_BYTES_NUM, buffer) -#define XLog_Write_4_Bytes(values, buffer) XLog_Write_N_bytes(values, U32_BYTES_NUM, buffer) -#define XLog_Write_8_Bytes(values, buffer) XLog_Write_N_bytes(values, U64_BYTES_NUM, buffer) - -typedef struct XLogBlockEnCode { - bool (*xlog_encodefun)(byte* buffer, Size buffersize, Size* encodesize, void* xlogbody); - uint16 block_valid; -} XLogBlockEnCode; - -typedef struct XLogBlockRedoCode { - void (*xlog_redofun)(char* buffer, Size buffersize, XLogBlockHead* blockhead, XLogBlockRedoHead* redohead, - void* page, Size pagesize); - uint16 block_valid; -} XLogBlockRedoCode; - -#pragma pack(pop) - -/* ********BLOCK Xact BEGIN ***************** */ -typedef struct { - uint8 delayddlflag; - uint8 updateminrecovery; - uint16 committype; - int invalidmsgnum; - int nrels; /* delete rels */ - int nlibs; /* delete libs */ - uint64 xinfo; - TimestampTz xact_time; - TransactionId maxxid; - CommitSeqNo maxcommitseq; - void* invalidmsg; - void* xnodes; - void* libfilename; -} XLogBlockXactParse; - -typedef struct { - Size maindatalen; - char* maindata; -} XLogBlockSlotParse; -/* ********BLOCK slot END ***************** */ - -/* ********BLOCK barrier BEGIN ***************** */ -typedef struct { - XLogRecPtr startptr; - XLogRecPtr endptr; -} XLogBlockBarrierParse; - -/* ********BLOCK Xact END ***************** */ - -/* ********BLOCK VacuumPin END ***************** */ - -/* ********BLOCK Segfile Extend Begin */ -typedef struct { - BlockNumber target_blocks; -} XLogSegFileExtendParse; -/* ********BLOCK Segfile Extend END */ - -/* ********BLOCK Segment Truncate Begin */ -typedef struct { - XLogBlockDdlParse blockddlrec; - XLogBlockDataParse blockdatarec; -} XLogBlockSegDdlParse; -/* ********BLOCK Segment Truncate END */ - -typedef struct { - XLogBlockHead blockhead; - XLogBlockRedoHead redohead; - union { - XLogBlockDataParse blockdatarec; - XLogBlockVmParse blockvmrec; - XLogBlockUndoParse blockundorec; - XLogBlockDdlParse blockddlrec; - XLogBlockBcmParse blockbcmrec; - XLogBlockNewCuParse blocknewcu; - XLogBlockCLogParse blockclogrec; - XLogBlockCSNLogParse blockcsnlogrec; - XLogBlockXactParse blockxact; - XLogBlockPrepareParse blockprepare; - XLogBlockInvalidParse blockinvalidmsg; - // XLogBlockIncompleteParse blockincomplete; - XLogBlockVacuumPinParse blockvacuumpin; - XLogBlockXLogComParse blockxlogcommon; - XLogBlockDataBaseParse blockdatabase; - XLogBlockTblSpcParse blocktblspc; - XLogBlockMultiXactOffParse blockmultixactoff; - XLogBlockMultiXactMemParse blockmultixactmem; - XLogBlockMultiUpdateParse blockmultiupdate; - XLogBlockRelMapParse blockrelmap; - XLogBlockSlotParse blockslot; - XLogBlockBarrierParse blockbarrier; - XLogSegFileExtendParse segfileExtend; - XLogBlockSegDdlParse blocksegddlrec; - } extra_rec; -} XLogBlockParse; - -#define XLogBlockParseGetDdlParse(blockdatarec, ddlrecparse) \ - do \ - { \ - Assert((blockdatarec)->blockparse.blockhead.block_valid == BLOCK_DATA_DDL_TYPE); \ - if (blockdatarec->blockparse.blockhead.bucketNode != InvalidBktId) { \ - ddlrecparse = &blockdatarec->blockparse.extra_rec.blocksegddlrec.blockddlrec; \ - } else { \ - ddlrecparse = &blockdatarec->blockparse.extra_rec.blockddlrec; \ - } \ - } while (0); - -typedef struct -{ - Buffer buf_id; - Buffer freeNext; -} RedoMemSlot; - -typedef void (*InterruptFunc)(); - -typedef struct -{ - int totalblknum; /* total slot */ - int usedblknum; /* used slot */ - Size itemsize; - Buffer firstfreeslot; /* first free slot */ - Buffer firstreleaseslot; /* first release slot */ - RedoMemSlot *memslot; /* slot itme */ - bool isInit; - InterruptFunc doInterrupt; -}RedoMemManager; - -typedef void (*RefOperateFunc)(void *record); -#ifdef USE_ASSERT_CHECKING -typedef void (*RecordCheckFunc)(void *record, XLogRecPtr curPageLsn, uint32 blockId, bool replayed); -#endif - -typedef struct { - RefOperateFunc refCount; - RefOperateFunc DerefCount; -#ifdef USE_ASSERT_CHECKING - RecordCheckFunc checkFunc; -#endif -}RefOperate; - -typedef struct -{ - void *BufferBlockPointers; /* RedoBufferDesc + block */ - RedoMemManager memctl; - RefOperate *refOperate; -}RedoBufferManager; - - - -typedef struct -{ - void *parsebuffers; /* ParseBufferDesc + XLogRecParseState */ - RedoMemManager memctl; - RefOperate *refOperate; -}RedoParseManager; - - - -typedef struct { - void* nextrecord; - XLogBlockParse blockparse; /* block data */ - RedoParseManager* manager; - void* refrecord; /* origin dataptr, for mem release */ - uint64 batchcount; - bool isFullSync; -} XLogRecParseState; - -typedef struct XLogBlockRedoExtreRto { - void (*xlog_redoextrto)(XLogBlockHead* blockhead, void* blockrecbody, RedoBufferInfo* bufferinfo); - uint16 block_valid; -} XLogBlockRedoExtreRto; - -typedef struct XLogParseBlock { - XLogRecParseState* (*xlog_parseblock)(XLogReaderState* record, uint32* blocknum); - RmgrId rmid; -} XLogParseBlock; - -typedef enum { - HEAP_INSERT_ORIG_BLOCK_NUM = 0 -} XLogHeapInsertBlockEnum; - -typedef enum { - HEAP_DELETE_ORIG_BLOCK_NUM = 0 -} XLogHeapDeleteBlockEnum; - -typedef enum { - HEAP_UPDATE_NEW_BLOCK_NUM = 0, - HEAP_UPDATE_OLD_BLOCK_NUM -} XLogHeapUpdateBlockEnum; - -typedef enum { - HEAP_BASESHIFT_ORIG_BLOCK_NUM = 0 -} XLogHeapBaeShiftBlockEnum; - -typedef enum { - HEAP_NEWPAGE_ORIG_BLOCK_NUM = 0 -} XLogHeapNewPageBlockEnum; - -typedef enum { - HEAP_LOCK_ORIG_BLOCK_NUM = 0 -} XLogHeapLockBlockEnum; - -typedef enum { - HEAP_INPLACE_ORIG_BLOCK_NUM = 0 -} XLogHeapInplaceBlockEnum; - -typedef enum { - HEAP_FREEZE_ORIG_BLOCK_NUM = 0 -} XLogHeapFreezeBlockEnum; - -typedef enum { - HEAP_CLEAN_ORIG_BLOCK_NUM = 0 -} XLogHeapCleanBlockEnum; - -typedef enum { - HEAP_VISIBLE_VM_BLOCK_NUM = 0, - HEAP_VISIBLE_DATA_BLOCK_NUM -} XLogHeapVisibleBlockEnum; - -typedef enum { - HEAP_MULTI_INSERT_ORIG_BLOCK_NUM = 0 -} XLogHeapMultiInsertBlockEnum; - -typedef enum { - HEAP_PAGE_UPDATE_ORIG_BLOCK_NUM = 0 -} XLogHeapPageUpdateBlockEnum; - -typedef enum { - UHEAP_INSERT_ORIG_BLOCK_NUM = 0 -} XLogUHeapInsertBlockEnum; - -typedef enum { - UHEAP_DELETE_ORIG_BLOCK_NUM = 0 -} XLogUHeapDeleteBlockEnum; - -typedef enum { - UHEAP_UPDATE_NEW_BLOCK_NUM = 0, - UHEAP_UPDATE_OLD_BLOCK_NUM -} XLogUHeapUpdateBlockEnum; - -typedef enum { - UHEAP_MULTI_INSERT_ORIG_BLOCK_NUM = 0 -} XLogUHeapMultiInsertBlockEnum; - -typedef enum { - UHEAP_FREEZE_TD_ORIG_BLOCK_NUM = 0 -} XLogUHeapFreezeTDBlockEnum; - -typedef enum { - UHEAP_INVALID_TD_ORIG_BLOCK_NUM = 0 -} XLogUHeapInvalidTDBlockEnum; - -typedef enum { - UHEAP_CLEAN_ORIG_BLOCK_NUM = 0 -} XLogUHeapCleanBlockEnum; - -typedef enum { - UHEAP2_ORIG_BLOCK_NUM = 0 -} XLogUHeap2BlockEnum; - -typedef enum { - UHEAP_UNDO_ORIG_BLOCK_NUM = 0 -} XLogUHeapUndoBlockEnum; - -typedef enum { - UHEAP_UNDOACTION_ORIG_BLOCK_NUM = 0 -} XLogUheapUndoActionBlockEnum; - -extern THR_LOCAL RedoParseManager* g_parseManager; -extern THR_LOCAL RedoBufferManager* g_bufferManager; - -extern void* XLogMemCtlInit(RedoMemManager* memctl, Size itemsize, int itemnum); -extern RedoMemSlot* XLogMemAlloc(RedoMemManager* memctl); -extern void XLogMemRelease(RedoMemManager* memctl, Buffer bufferid); - -extern void XLogRedoBufferInit(RedoBufferManager* buffermanager, int buffernum, RefOperate *refOperate, - InterruptFunc interruptOperte); -extern void XLogRedoBufferDestory(RedoBufferManager* buffermanager); -extern RedoMemSlot* XLogRedoBufferAlloc( - RedoBufferManager* buffermanager, RelFileNode relnode, ForkNumber forkNum, BlockNumber blockNum); -extern bool XLogRedoBufferIsValid(RedoBufferManager* buffermanager, Buffer bufferid); -extern void XLogRedoBufferRelease(RedoBufferManager* buffermanager, Buffer bufferid); -extern BlockNumber XLogRedoBufferGetBlkNumber(RedoBufferManager* buffermanager, Buffer bufferid); -extern Block XLogRedoBufferGetBlk(RedoBufferManager* buffermanager, RedoMemSlot* bufferslot); -extern Block XLogRedoBufferGetPage(RedoBufferManager* buffermanager, Buffer bufferid); -extern void XLogRedoBufferSetState(RedoBufferManager* buffermanager, RedoMemSlot* bufferslot, uint32 state); - -#define XLogRedoBufferInitFunc(bufferManager, buffernum, defOperate, interruptOperte) do { \ - XLogRedoBufferInit(bufferManager, buffernum, defOperate, interruptOperte); \ -} while (0) -#define XLogRedoBufferDestoryFunc(bufferManager) do { \ - XLogRedoBufferDestory(bufferManager); \ -} while (0) -#define XLogRedoBufferAllocFunc(relnode, forkNum, blockNum, bufferslot) do { \ - *bufferslot = XLogRedoBufferAlloc(g_bufferManager, relnode, forkNum, blockNum); \ -} while (0) -#define XLogRedoBufferIsValidFunc(bufferid, isvalid) do { \ - *isvalid = XLogRedoBufferIsValid(g_bufferManager, bufferid); \ -} while (0) -#define XLogRedoBufferReleaseFunc(bufferid) do { \ - XLogRedoBufferRelease(g_bufferManager, bufferid); \ -} while (0) - -#define XLogRedoBufferGetBlkNumberFunc(bufferid, blknumber) do { \ - *blknumber = XLogRedoBufferGetBlkNumber(g_bufferManager, bufferid); \ -} while (0) - -#define XLogRedoBufferGetBlkFunc(bufferslot, blockdata) do { \ - *blockdata = XLogRedoBufferGetBlk(g_bufferManager, bufferslot); \ -} while (0) - -#define XLogRedoBufferGetPageFunc(bufferid, blockdata) do { \ - *blockdata = (Page)XLogRedoBufferGetPage(g_bufferManager, bufferid); \ -} while (0) -#define XLogRedoBufferSetStateFunc(bufferslot, state) do { \ - XLogRedoBufferSetState(g_bufferManager, bufferslot, state); \ -} while (0) - -extern void XLogParseBufferInit(RedoParseManager* parsemanager, int buffernum, RefOperate *refOperate, - InterruptFunc interruptOperte); -extern void XLogParseBufferDestory(RedoParseManager* parsemanager); -extern void XLogParseBufferRelease(XLogRecParseState* recordstate); -extern XLogRecParseState* XLogParseBufferAllocList(RedoParseManager* parsemanager, XLogRecParseState* blkstatehead, void *record); -extern XLogRedoAction XLogReadBufferForRedo(XLogReaderState* record, uint8 buffer_id, RedoBufferInfo* bufferinfo); -extern void XLogInitBufferForRedo(XLogReaderState* record, uint8 block_id, RedoBufferInfo* bufferinfo); -extern XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState* record, uint8 buffer_id, ReadBufferMode mode, - bool get_cleanup_lock, RedoBufferInfo* bufferinfo, ReadBufferMethod readmethod = WITH_NORMAL_CACHE); -#define XLogParseBufferInitFunc(parseManager, buffernum, defOperate, interruptOperte) do { \ - XLogParseBufferInit(parseManager, buffernum, defOperate, interruptOperte); \ -} while (0) - -#define XLogParseBufferDestoryFunc(parseManager) do { \ - XLogParseBufferDestory(parseManager); \ -} while (0) - -#define XLogParseBufferReleaseFunc(recordstate) do { \ - XLogParseBufferRelease(recordstate); \ -} while (0) - -#define XLogParseBufferAllocListFunc(record, newblkstate, blkstatehead) do { \ - *newblkstate = XLogParseBufferAllocList(g_parseManager, blkstatehead, record); \ -} while (0) - -#define XLogParseBufferAllocListStateFunc(record, newblkstate, blkstatehead) do { \ - if (*blkstatehead == NULL) { \ - *newblkstate = XLogParseBufferAllocList(g_parseManager, NULL, record); \ - *blkstatehead = *newblkstate; \ - } else { \ - *newblkstate = XLogParseBufferAllocList(g_parseManager, *blkstatehead, record); \ - } \ -} while (0) - - - - -#ifdef EXTREME_RTO_DEBUG_AB -typedef void (*AbnormalProcFunc)(void); -typedef enum { - A_THREAD_EXIT, - ALLOC_FAIL, - OPEN_FILE_FAIL, - WAIT_LONG, - ABNORMAL_NUM, -}AbnormalType; -extern AbnormalProcFunc g_AbFunList[ABNORMAL_NUM]; - - -#define ADD_ABNORMAL_POSITION(pos) do { \ - static int __count##pos = 0; \ - __count##pos++; \ - if (g_instance.attr.attr_storage.extreme_rto_ab_pos == pos) { \ - if (g_instance.attr.attr_storage.extreme_rto_ab_count == __count##pos) { \ - ereport(LOG, (errmsg("extreme rto debug abnormal stop pos:%d, type:%d, count:%d", pos, \ - g_instance.attr.attr_storage.extreme_rto_ab_type, __count##pos))); \ - g_AbFunList[g_instance.attr.attr_storage.extreme_rto_ab_type % ABNORMAL_NUM](); \ - } \ - } \ -} while(0) -#else -#define ADD_ABNORMAL_POSITION(pos) -#endif - - - -void HeapXlogCleanOperatorPage( - RedoBufferInfo* buffer, void* recorddata, void* blkdata, Size datalen, Size* freespace, bool repairFragmentation); -void HeapXlogFreezeOperatorPage(RedoBufferInfo* buffer, void* recorddata, void* blkdata, Size datalen, - bool isTupleLockUpgrade); -void HeapXlogVisibleOperatorPage(RedoBufferInfo* buffer, void* recorddata); -void HeapXlogVisibleOperatorVmpage(RedoBufferInfo* vmbuffer, void* recorddata); -void HeapXlogDeleteOperatorPage(RedoBufferInfo* buffer, void* recorddata, TransactionId recordxid, - bool isTupleLockUpgrade); -void HeapXlogInsertOperatorPage(RedoBufferInfo* buffer, void* recorddata, bool isinit, void* blkdata, Size datalen, - TransactionId recxid, Size* freespace, bool tde = false); -void HeapXlogMultiInsertOperatorPage(RedoBufferInfo* buffer, const void* recoreddata, bool isinit, const void* blkdata, - Size len, TransactionId recordxid, Size* freespace, bool tde = false); -void HeapXlogUpdateOperatorOldpage(RedoBufferInfo* buffer, void* recoreddata, bool hot_update, bool isnewinit, - BlockNumber newblk, TransactionId recordxid, bool isTupleLockUpgrade); -void HeapXlogUpdateOperatorNewpage(RedoBufferInfo* buffer, void* recorddata, bool isinit, void* blkdata, - Size datalen, TransactionId recordxid, Size* freespace, bool isTupleLockUpgrade, bool tde = false); -void HeapXlogPageUpgradeOperatorPage(RedoBufferInfo* buffer); -void HeapXlogLockOperatorPage(RedoBufferInfo* buffer, void* recorddata, bool isTupleLockUpgrade); -void HeapXlogInplaceOperatorPage(RedoBufferInfo* buffer, void* recorddata, void* blkdata, Size newlen); -void HeapXlogBaseShiftOperatorPage(RedoBufferInfo* buffer, void* recorddata); - -void BtreeRestorePage(Page page, char* from, int len); -void BtreeXlogMarkDeleteOperatorPage(RedoBufferInfo* buffer, void* recorddata); -void BtreeXlogPrunePageOperatorPage(RedoBufferInfo* buffer, void* recorddata); -void Btree2XlogShiftBaseOperatorPage(RedoBufferInfo* buffer, void* recorddata); - -void BtreeRestoreMetaOperatorPage(RedoBufferInfo* metabuf, void* recorddata, Size datalen); -void BtreeXlogInsertOperatorPage(RedoBufferInfo* buffer, void* recorddata, void* data, Size datalen); -void BtreeXlogSplitOperatorRightpage( - RedoBufferInfo* rbuf, void* recorddata, BlockNumber leftsib, BlockNumber rnext, void* blkdata, Size datalen); -void BtreeXlogSplitOperatorNextpage(RedoBufferInfo* buffer, BlockNumber rightsib); -void BtreeXlogSplitOperatorLeftpage( - RedoBufferInfo* lbuf, void* recorddata, BlockNumber rightsib, bool onleft, void* blkdata, Size datalen); -void BtreeXlogVacuumOperatorPage(RedoBufferInfo* redobuffer, void* recorddata, void* blkdata, Size len); -void BtreeXlogDeleteOperatorPage(RedoBufferInfo* buffer, void* recorddata, Size recorddatalen); -void btreeXlogDeletePageOperatorRightpage(RedoBufferInfo* buffer, void* recorddata); - -void BtreeXlogDeletePageOperatorLeftpage(RedoBufferInfo* buffer, void* recorddata); - -void BtreeXlogDeletePageOperatorCurrentpage(RedoBufferInfo* buffer, void* recorddata); - -void BtreeXlogNewrootOperatorPage(RedoBufferInfo* buffer, void* record, void* blkdata, Size len, BlockNumber* downlink); -void BtreeXlogHalfdeadPageOperatorParentpage( - RedoBufferInfo* pbuf, void* recorddata); -void BtreeXlogHalfdeadPageOperatorLeafpage( - RedoBufferInfo* lbuf, void* recorddata); -void BtreeXlogUnlinkPageOperatorRightpage(RedoBufferInfo* rbuf, void* recorddata); -void BtreeXlogUnlinkPageOperatorLeftpage(RedoBufferInfo* lbuf, void* recorddata); -void BtreeXlogUnlinkPageOperatorCurpage(RedoBufferInfo* buf, void* recorddata); -void BtreeXlogUnlinkPageOperatorChildpage(RedoBufferInfo* cbuf, void* recorddata); - -void BtreeXlogClearIncompleteSplit(RedoBufferInfo* buffer); - -/* UBTree */ -extern void UBTreeRestorePage(Page page, char* from, int len); -extern void UBTreeXlogMarkDeleteOperatorPage(RedoBufferInfo* buffer, void* recorddata); -extern void UBTreeXlogPrunePageOperatorPage(RedoBufferInfo* buffer, void* recorddata); -extern void UBTree2XlogShiftBaseOperatorPage(RedoBufferInfo* buffer, void* recorddata); -extern void UBTree2XlogRecycleQueueInitPageOperatorCurrPage(RedoBufferInfo* buffer, void* recorddata); -extern void UBTree2XlogRecycleQueueInitPageOperatorAdjacentPage(RedoBufferInfo* buffer, void* recorddata, bool isLeft); -extern void UBTree2XlogRecycleQueueEndpointOperatorLeftPage(RedoBufferInfo* buffer, void* recorddata); -extern void UBTree2XlogRecycleQueueEndpointOperatorRightPage(RedoBufferInfo* buffer, void* recorddata); -extern void UBTree2XlogRecycleQueueModifyOperatorPage(RedoBufferInfo* buffer, void* recorddata); -extern void UBTree2XlogFreezeOperatorPage(RedoBufferInfo* buffer, void* recorddata); - -extern void UBTreeRestoreMetaOperatorPage(RedoBufferInfo* metabuf, void* recorddata, Size datalen); -extern void UBTreeXlogInsertOperatorPage(RedoBufferInfo* buffer, void* recorddata, void* data, Size datalen); -extern void UBTreeXlogSplitOperatorRightPage(RedoBufferInfo* rbuf, void* recorddata, BlockNumber leftsib, - BlockNumber rnext, void* blkdata, Size datalen, bool hasOpaque = true); -extern void UBTreeXlogSplitOperatorNextpage(RedoBufferInfo* buffer, BlockNumber rightsib); -extern void UBTreeXlogSplitOperatorLeftpage(RedoBufferInfo* lbuf, void* recorddata, BlockNumber rightsib, - bool onleft, void* blkdata, Size datalen, bool hasOpaque = true); -extern void UBTreeXlogVacuumOperatorPage(RedoBufferInfo* redobuffer, void* recorddata, void* blkdata, Size len); -extern void UBTreeXlogDeleteOperatorPage(RedoBufferInfo* buffer, void* recorddata, Size recorddatalen); -extern void UBTreeXlogDeletePageOperatorRightpage(RedoBufferInfo* buffer, void* recorddata); - -extern void UBTreeXlogDeletePageOperatorLeftpage(RedoBufferInfo* buffer, void* recorddata); - -extern void UBTreeXlogDeletePageOperatorCurrentpage(RedoBufferInfo* buffer, void* recorddata); - -extern void UBTreeXlogNewrootOperatorPage(RedoBufferInfo *buffer, void *record, void *blkdata, Size len, - BlockNumber *downlink); -extern void UBTreeXlogHalfdeadPageOperatorParentpage( - RedoBufferInfo* pbuf, void* recorddata); -extern void UBTreeXlogHalfdeadPageOperatorLeafpage( - RedoBufferInfo* lbuf, void* recorddata); -extern void UBTreeXlogUnlinkPageOperatorRightpage(RedoBufferInfo* rbuf, void* recorddata); -extern void UBTreeXlogUnlinkPageOperatorLeftpage(RedoBufferInfo* lbuf, void* recorddata); -extern void UBTreeXlogUnlinkPageOperatorCurpage(RedoBufferInfo* buf, void* recorddata); -extern void UBTreeXlogUnlinkPageOperatorChildpage(RedoBufferInfo* cbuf, void* recorddata); - -extern void UBTreeXlogClearIncompleteSplit(RedoBufferInfo* buffer); - -void HashRedoInitMetaPageOperatorPage(RedoBufferInfo *metabuf, void *recorddata); - -void HashRedoInitBitmapPageOperatorBitmapPage(RedoBufferInfo *bitmapbuf, void *recorddata); -void HashRedoInitBitmapPageOperatorMetaPage(RedoBufferInfo *metabuf); - -void HashRedoInsertOperatorPage(RedoBufferInfo *buffer, void *recorddata, void *data, Size datalen); -void HashRedoInsertOperatorMetaPage(RedoBufferInfo *metabuf); - -void HashRedoAddOvflPageOperatorOvflPage(RedoBufferInfo *ovflbuf, BlockNumber leftblk, void *data, Size datalen); -void HashRedoAddOvflPageOperatorLeftPage(RedoBufferInfo *ovflbuf, BlockNumber rightblk); -void HashRedoAddOvflPageOperatorMapPage(RedoBufferInfo *mapbuf, void *data); -void HashRedoAddOvflPageOperatorNewmapPage(RedoBufferInfo *newmapbuf, void *recorddata); -void HashRedoAddOvflPageOperatorMetaPage(RedoBufferInfo *metabuf, void *recorddata, void *data, Size datalen); - -void HashRedoSplitAllocatePageOperatorObukPage(RedoBufferInfo *oldbukbuf, void *recorddata); -void HashRedoSplitAllocatePageOperatorNbukPage(RedoBufferInfo *newbukbuf, void *recorddata); -void HashRedoSplitAllocatePageOperatorMetaPage(RedoBufferInfo *metabuf, void *recorddata, void *blkdata); - -void HashRedoSplitCompleteOperatorObukPage(RedoBufferInfo *oldbukbuf, void *recorddata); -void HashRedoSplitCompleteOperatorNbukPage(RedoBufferInfo *newbukbuf, void *recorddata); - -void HashXlogMoveAddPageOperatorPage(RedoBufferInfo *redobuffer, void *recorddata, void *blkdata, Size len); -void HashXlogMoveDeleteOvflPageOperatorPage(RedoBufferInfo *redobuffer, void *blkdata, Size len); - -void HashXlogSqueezeAddPageOperatorPage(RedoBufferInfo *redobuffer, void *recorddata, void *blkdata, Size len); -void HashXlogSqueezeInitOvflbufOperatorPage(RedoBufferInfo *redobuffer, void *recorddata); -void HashXlogSqueezeUpdatePrevPageOperatorPage(RedoBufferInfo *redobuffer, void *recorddata); -void HashXlogSqueezeUpdateNextPageOperatorPage(RedoBufferInfo *redobuffer, void *recorddata); -void HashXlogSqueezeUpdateBitmapOperatorPage(RedoBufferInfo *redobuffer, void *blkdata); -void HashXlogSqueezeUpdateMateOperatorPage(RedoBufferInfo *redobuffer, void *blkdata); - -void HashXlogDeleteBlockOperatorPage(RedoBufferInfo *redobuffer, void *recorddata, void *blkdata, Size len); - -void HashXlogSplitCleanupOperatorPage(RedoBufferInfo *redobuffer); - -void HashXlogUpdateMetaOperatorPage(RedoBufferInfo *redobuffer, void *recorddata); - -void HashXlogVacuumOnePageOperatorPage(RedoBufferInfo *redobuffer, void *recorddata, Size len); - -void HashXlogVacuumMateOperatorPage(RedoBufferInfo *redobuffer, void *recorddata); - -void XLogRecSetBlockCommonState(XLogReaderState* record, XLogBlockParseEnum blockvalid, - RelFileNodeForkNum filenode, XLogRecParseState* recordblockstate, XLogPhyBlock *pblk = NULL); - -void XLogRecSetBlockCLogState( - XLogBlockCLogParse* blockclogstate, TransactionId topxid, uint16 status, uint16 xidnum, uint16* xidsarry); - -void XLogRecSetBlockCSNLogState( - XLogBlockCSNLogParse* blockcsnlogstate, TransactionId topxid, CommitSeqNo csnseq, uint16 xidnum, uint16* xidsarry); -void XLogRecSetXactRecoveryState(XLogBlockXactParse* blockxactstate, TransactionId maxxid, CommitSeqNo maxcsnseq, - uint8 delayddlflag, uint8 updateminrecovery); -void XLogRecSetXactDdlState(XLogBlockXactParse* blockxactstate, int nrels, void* xnodes, int invalidmsgnum, - void* invalidmsg, int nlibs, void* libfilename); -void XLogRecSetXactCommonState( - XLogBlockXactParse* blockxactstate, uint16 committype, uint64 xinfo, TimestampTz xact_time); -void XLogRecSetBcmState(XLogBlockBcmParse* blockbcmrec, uint64 startblock, int count, int status); -void XLogRecSetNewCuState(XLogBlockNewCuParse* blockcudata, char* main_data, uint32 main_data_len); -void XLogRecSetInvalidMsgState(XLogBlockInvalidParse* blockinvalid, TransactionId cutoffxid); -void XLogRecSetIncompleteMsgState(XLogBlockIncompleteParse* blockincomplete, uint16 action, bool issplit, bool isroot, - BlockNumber downblk, BlockNumber leftblk, BlockNumber rightblk); -void XLogRecSetPinVacuumState(XLogBlockVacuumPinParse* blockvacuum, BlockNumber lastblknum); - -void XLogRecSetAuxiBlkNumState(XLogBlockDataParse* blockdatarec, BlockNumber auxilaryblkn1, BlockNumber auxilaryblkn2); -void XLogRecSetBlockDataStateContent(XLogReaderState *record, uint32 blockid, XLogBlockDataParse *blockdatarec); -void XLogRecSetBlockDataState(XLogReaderState* record, uint32 blockid, XLogRecParseState* recordblockstate); -extern char* XLogBlockDataGetBlockData(XLogBlockDataParse* datadecode, Size* len); -void Heap2RedoDataBlock(XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); -extern void HeapRedoDataBlock( - XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); -extern void xlog_redo_data_block( - XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); -extern void XLogRecSetBlockDdlState(XLogBlockDdlParse* blockddlstate, uint32 blockddltype, uint32 columnrel, - char *mainData, Oid ownerid = InvalidOid); -XLogRedoAction XLogCheckBlockDataRedoAction(XLogBlockDataParse* datadecode, RedoBufferInfo* bufferinfo); - -void BtreeRedoDataBlock(XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); -void Btree2RedoDataBlock(XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); - -/* UBTree */ -extern void UBTreeRedoDataBlock(XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); -extern void UBTree2RedoDataBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, - RedoBufferInfo *bufferinfo); - -extern void HashRedoDataBlock(XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); -XLogRecParseState* XactXlogCsnlogParseToBlock(XLogReaderState* record, uint32* blocknum, TransactionId xid, - int nsubxids, TransactionId* subxids, CommitSeqNo csn, XLogRecParseState* recordstatehead); -extern void XLogRecSetVmBlockState(XLogReaderState* record, uint32 blockid, XLogRecParseState* recordblockstate); -extern void XLogRecSetUHeapUndoBlockState(XLogReaderState* record, uint32 blockid, XLogRecParseState* recordundostate); -extern void XLogRecSetUndoBlockState(XLogReaderState* record, uint32 blockid, XLogRecParseState* recordundostate); -extern void XLogRecSetRollbackFinishBlockState(XLogReaderState *record, uint32 blockid, - XLogRecParseState *recordundostate); -extern bool DoLsnCheck(const RedoBufferInfo* bufferinfo, bool willInit, XLogRecPtr lastLsn, const XLogPhyBlock *pblk); -char* XLogBlockDataGetMainData(XLogBlockDataParse* datadecode, Size* len); -void HeapRedoVmBlock(XLogBlockHead* blockhead, XLogBlockVmParse* blockvmrec, RedoBufferInfo* bufferinfo); -void Heap2RedoVmBlock(XLogBlockHead* blockhead, XLogBlockVmParse* blockvmrec, RedoBufferInfo* bufferinfo); -XLogRecParseState* xlog_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); -XLogRecParseState* smgr_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); -XLogRecParseState* segpage_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); - -XLogRecParseState* XactXlogClogParseToBlock(XLogReaderState* record, XLogRecParseState* recordstatehead, - uint32* blocknum, TransactionId xid, int nsubxids, TransactionId* subxids, CLogXidStatus status); -XLogRecParseState* xact_xlog_commit_parse_to_block(XLogReaderState* record, XLogRecParseState* recordstatehead, - uint32* blocknum, TransactionId maxxid, CommitSeqNo maxseqnum); -void visibilitymap_clear_buffer(RedoBufferInfo* bufferinfo, BlockNumber heapBlk); -XLogRecParseState* xact_xlog_abort_parse_to_block(XLogReaderState* record, XLogRecParseState* recordstatehead, - uint32* blocknum, TransactionId maxxid, CommitSeqNo maxseqnum); -XLogRecParseState* xact_xlog_prepare_parse_to_block( - XLogReaderState* record, XLogRecParseState* recordstatehead, uint32* blocknum, TransactionId maxxid); -XLogRecParseState* xact_xlog_parse_to_block(XLogReaderState* record, uint32* blocknum); -XLogRecParseState* ClogRedoParseToBlock(XLogReaderState* record, uint32* blocknum); - -XLogRecParseState* DbaseRedoParseToBlock(XLogReaderState* record, uint32* blocknum); - -XLogRecParseState* Heap2RedoParseIoBlock(XLogReaderState* record, uint32* blocknum); -extern XLogRecParseState* HeapRedoParseToBlock(XLogReaderState* record, uint32* blocknum); -extern XLogRecParseState* BtreeRedoParseToBlock(XLogReaderState* record, uint32* blocknum); -/* UBTree */ -extern XLogRecParseState* UBTreeRedoParseToBlock(XLogReaderState* record, uint32* blocknum); -extern XLogRecParseState* UBTree2RedoParseToBlock(XLogReaderState* record, uint32* blocknum); - -extern XLogRecParseState* Heap3RedoParseToBlock(XLogReaderState* record, uint32* blocknum); - -extern Size SalEncodeXLogBlock(void* recordblockstate, byte* buffer, void* sliceinfo); - -extern XLogRecParseState* XLogParseToBlockForDfv(XLogReaderState* record, uint32* blocknum); -extern Size getBlockSize(XLogRecParseState* recordblockstate); -extern XLogRecParseState* GistRedoParseToBlock(XLogReaderState* record, uint32* blocknum); -extern XLogRecParseState* GinRedoParseToBlock(XLogReaderState* record, uint32* blocknum); - -extern void GistRedoClearFollowRightOperatorPage(RedoBufferInfo* buffer); -extern void GistRedoPageUpdateOperatorPage(RedoBufferInfo* buffer, void* recorddata, void* blkdata, Size datalen); -extern void GistRedoPageSplitOperatorPage( - RedoBufferInfo* buffer, void* recorddata, void* data, Size datalen, bool Markflag, BlockNumber rightlink); -extern void GistRedoCreateIndexOperatorPage(RedoBufferInfo* buffer); - -extern void GinRedoCreateIndexOperatorMetaPage(RedoBufferInfo* MetaBuffer); -extern void GinRedoCreateIndexOperatorRootPage(RedoBufferInfo* RootBuffer); -extern void GinRedoCreatePTreeOperatorPage(RedoBufferInfo* buffer, void* recordData); -extern void GinRedoClearIncompleteSplitOperatorPage(RedoBufferInfo* buffer); -extern void GinRedoVacuumDataOperatorLeafPage(RedoBufferInfo* buffer, void* recorddata); -extern void GinRedoDeletePageOperatorCurPage(RedoBufferInfo* dbuffer); -extern void GinRedoDeletePageOperatorParentPage(RedoBufferInfo* pbuffer, void* recorddata); -extern void GinRedoDeletePageOperatorLeftPage(RedoBufferInfo* lbuffer, void* recorddata); -extern void GinRedoUpdateOperatorMetapage(RedoBufferInfo* metabuffer, void* recorddata); -extern void GinRedoUpdateOperatorTailPage(RedoBufferInfo* buffer, void* payload, Size totaltupsize, int32 ntuples); -extern void GinRedoInsertListPageOperatorPage( - RedoBufferInfo* buffer, void* recorddata, void* payload, Size totaltupsize); -extern void GinRedoUpdateAddNewTail(RedoBufferInfo* buffer, BlockNumber newRightlink); -extern void GinRedoInsertData(RedoBufferInfo* buffer, bool isLeaf, BlockNumber rightblkno, void* rdata); -extern void GinRedoInsertEntry(RedoBufferInfo* buffer, bool isLeaf, BlockNumber rightblkno, void* rdata); - -extern void GinRedoDeleteListPagesOperatorPage(RedoBufferInfo* metabuffer, const void* recorddata); -extern void GinRedoDeleteListPagesMarkDelete(RedoBufferInfo* buffer); - -extern void spgRedoCreateIndexOperatorMetaPage(RedoBufferInfo* buffer); -extern void spgRedoCreateIndexOperatorRootPage(RedoBufferInfo* buffer); -extern void spgRedoCreateIndexOperatorLeafPage(RedoBufferInfo* buffer); -extern void spgRedoAddLeafOperatorPage(RedoBufferInfo* bufferinfo, void* recorddata); -extern void spgRedoAddLeafOperatorParent(RedoBufferInfo* bufferinfo, void* recorddata, BlockNumber blknoLeaf); -extern void spgRedoMoveLeafsOpratorDstPage(RedoBufferInfo* buffer, void* recorddata, void* insertdata, void* tupledata); -extern void spgRedoMoveLeafsOpratorSrcPage( - RedoBufferInfo* buffer, void* recorddata, void* insertdata, void* deletedata, BlockNumber blknoDst, int nInsert); -extern void spgRedoMoveLeafsOpratorParentPage( - RedoBufferInfo* buffer, void* recorddata, void* insertdata, BlockNumber blknoDst, int nInsert); -extern void spgRedoAddNodeUpdateSrcPage(RedoBufferInfo* buffer, void* recorddata, void* tuple, void* tupleheader); -extern void spgRedoAddNodeOperatorSrcPage(RedoBufferInfo* buffer, void* recorddata, BlockNumber blknoNew); -extern void spgRedoAddNodeOperatorDestPage( - RedoBufferInfo* buffer, void* recorddata, void* tuple, void* tupleheader, BlockNumber blknoNew); -extern void spgRedoAddNodeOperatorParentPage(RedoBufferInfo* buffer, void* recorddata, BlockNumber blknoNew); -extern void spgRedoSplitTupleOperatorDestPage(RedoBufferInfo* buffer, void* recorddata, void* tuple); -extern void spgRedoSplitTupleOperatorSrcPage(RedoBufferInfo* buffer, void* recorddata, void* pretuple, void* posttuple); -extern void spgRedoPickSplitRestoreLeafTuples( - RedoBufferInfo* buffer, void* recorddata, bool destflag, void* pageselect, void* insertdata); -extern void spgRedoPickSplitOperatorSrcPage(RedoBufferInfo* srcBuffer, void* recorddata, void* deleteoffset, - BlockNumber blknoInner, void* pageselect, void* insertdata); -extern void spgRedoPickSplitOperatorDestPage( - RedoBufferInfo* destBuffer, void* recorddata, void* pageselect, void* insertdata); -extern void spgRedoPickSplitOperatorInnerPage( - RedoBufferInfo* innerBuffer, void* recorddata, void* tuple, void* tupleheader, BlockNumber blknoInner); -extern void spgRedoPickSplitOperatorParentPage(RedoBufferInfo* parentBuffer, void* recorddata, BlockNumber blknoInner); -extern void spgRedoVacuumLeafOperatorPage(RedoBufferInfo* buffer, void* recorddata); -extern void spgRedoVacuumRootOperatorPage(RedoBufferInfo* buffer, void* recorddata); -extern void spgRedoVacuumRedirectOperatorPage(RedoBufferInfo* buffer, void* recorddata); - -extern XLogRecParseState* SpgRedoParseToBlock(XLogReaderState* record, uint32* blocknum); - -extern void seqRedoOperatorPage(RedoBufferInfo* buffer, void* itmedata, Size itemsz); -extern void seq_redo_data_block(XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); - -extern void Heap3RedoDataBlock( - XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); - -extern XLogRecParseState* xact_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); - -extern bool XLogBlockRedoForExtremeRTO(XLogRecParseState* redoblocktate, RedoBufferInfo *bufferinfo, - bool notfound); -void XLogBlockParseStateRelease_debug(XLogRecParseState* recordstate, const char *func, uint32 line); -#define XLogBlockParseStateRelease(recordstate) XLogBlockParseStateRelease_debug(recordstate, __FUNCTION__, __LINE__) -#ifdef USE_ASSERT_CHECKING -extern void DoRecordCheck(XLogRecParseState *recordstate, XLogRecPtr pageLsn, bool replayed); -#endif -extern XLogRecParseState* XLogParseBufferCopy(XLogRecParseState *srcState); -extern XLogRecParseState* XLogParseToBlockForExtermeRTO(XLogReaderState* record, uint32* blocknum); -extern XLogRedoAction XLogReadBufferForRedoBlockExtend(RedoBufferTag *redoblock, ReadBufferMode mode, - bool get_cleanup_lock, RedoBufferInfo *redobufferinfo, - XLogRecPtr xloglsn, XLogRecPtr last_lsn, bool willinit, - ReadBufferMethod readmethod, bool tde = false); -extern XLogRecParseState* tblspc_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); -extern XLogRecParseState* relmap_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); -extern XLogRecParseState* HashRedoParseToBlock(XLogReaderState* record, uint32* blocknum); -extern XLogRecParseState* seq_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); -extern XLogRecParseState* slot_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); -extern XLogRecParseState* barrier_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); -extern XLogRecParseState* multixact_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); -extern void ExtremeRtoFlushBuffer(RedoBufferInfo *bufferinfo, bool updateFsm); -extern void XLogForgetDDLRedo(XLogRecParseState* redoblockstate); -void XLogDropSpaceShrink(XLogRecParseState *redoblockstate); -extern void SyncOneBufferForExtremRto(RedoBufferInfo *bufferinfo); -extern void XLogBlockInitRedoBlockInfo(XLogBlockHead* blockhead, RedoBufferTag* blockinfo); -extern void XLogBlockDdlDoSmgrAction(XLogBlockHead* blockhead, void* blockrecbody, RedoBufferInfo* bufferinfo); -extern void XLogBlockSegDdlDoRealAction(XLogBlockHead* blockhead, void* blockrecbody, RedoBufferInfo* bufferinfo); -extern void GinRedoDataBlock(XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); -extern void GistRedoDataBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, RedoBufferInfo *bufferinfo); -extern bool IsCheckPoint(const XLogRecParseState *parseState); -#endif +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * xlogproc.h + * + * + * IDENTIFICATION + * src/include/access/xlogproc.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef XLOG_PROC_H +#define XLOG_PROC_H +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "access/xlogreader.h" +#include "storage/buf/bufmgr.h" +#include "storage/buf/buf_internals.h" +#include "access/xlog_basic.h" +#include "access/xlogutils.h" +#include "access/clog.h" +#include "access/ustore/knl_uredo.h" +#include "access/ustore/knl_utuple.h" +#include "access/ustore/undo/knl_uundotxn.h" +#include "access/ustore/undo/knl_uundoxlog.h" + +#ifndef byte +#define byte unsigned char +#endif + +typedef void (*relasexlogreadstate)(void* record); +/* **************define for parse end******************************* */ +#define MIN(_a, _b) ((_a) > (_b) ? (_b) : (_a)) + +/* for common blockhead begin */ + +#define XLogBlockHeadGetInfo(blockhead) ((blockhead)->xl_info) +#define XLogBlockHeadGetXid(blockhead) ((blockhead)->xl_xid) +#define XLogBlockHeadGetRmid(blockhead) ((blockhead)->xl_rmid) + +#define XLogBlockHeadGetLSN(blockhead) ((blockhead)->end_ptr) +#define XLogBlockHeadGetRelNode(blockhead) ((blockhead)->relNode) +#define XLogBlockHeadGetSpcNode(blockhead) ((blockhead)->spcNode) +#define XLogBlockHeadGetDbNode(blockhead) ((blockhead)->dbNode) +#define XLogBlockHeadGetForkNum(blockhead) ((blockhead)->forknum) +#define XLogBlockHeadGetBlockNum(blockhead) ((blockhead)->blkno) +#define XLogBlockHeadGetBucketId(blockhead) ((blockhead)->bucketNode) +#define XLogBlockHeadGetValidInfo(blockhead) ((blockhead)->block_valid) +#define XLogBlockHeadGetPhysicalBlock(blockhead) ((blockhead)->pblk) +/* for common blockhead end */ + +/* for block data beging */ +#define XLogBlockDataHasBlockImage(blockdata) ((blockdata)->blockhead.has_image) +#define XLogBlockDataHasBlockData(blockdata) ((blockdata)->blockhead.has_data) +#define XLogBlockDataGetLastBlockLSN(_blockdata) ((_blockdata)->blockdata.last_lsn) +#define XLogBlockDataGetBlockFlags(blockdata) ((blockdata)->blockhead.flags) + +#define XLogBlockDataGetBlockId(blockdata) ((blockdata)->blockhead.cur_block_id) +#define XLogBlockDataGetAuxiBlock1(blockdata) ((blockdata)->blockhead.auxiblk1) +#define XLogBlockDataGetAuxiBlock2(blockdata) ((blockdata)->blockhead.auxiblk2) +/* for block data end */ + +typedef struct { + RelFileNode rnode; + ForkNumber forknum; + BlockNumber blkno; + XLogPhyBlock pblk; +} RedoBufferTag; + +typedef struct { + Page page; // pagepointer + Size pagesize; +#ifdef USE_ASSERT_CHECKING + bool ignorecheck; +#endif +} RedoPageInfo; + +typedef struct { + XLogRecPtr lsn; /* block cur lsn */ + Buffer buf; + RedoBufferTag blockinfo; + RedoPageInfo pageinfo; + int dirtyflag; /* true if the buffer changed */ +} RedoBufferInfo; + +extern void GetFlushBufferInfo(void *buf, RedoBufferInfo *bufferinfo, uint32 *buf_state, ReadBufferMethod flushmethod); + +#define MakeRedoBufferDirty(bufferinfo) ((bufferinfo)->dirtyflag = true) +#define RedoBufferDirtyClear(bufferinfo) ((bufferinfo)->dirtyflag = false) +#define IsRedoBufferDirty(bufferinfo) ((bufferinfo)->dirtyflag == true) + +#define RedoMemIsValid(memctl, bufferid) (((bufferid) > InvalidBuffer) && ((bufferid) <= (memctl->totalblknum))) + +typedef struct { + RedoBufferTag blockinfo; + pg_atomic_uint32 state; +} RedoBufferDesc; + +typedef struct { + Buffer buff_id; + pg_atomic_uint32 state; +} ParseBufferDesc; + +#define RedoBufferSlotGetBuffer(bslot) ((bslot)->buf_id) + +#define EnalbeWalLsnCheck true + +#pragma pack(push, 1) + +#define INVALID_BLOCK_ID (XLR_MAX_BLOCK_ID + 2) + +#define LOW_BLOKNUMBER_BITS (32) +#define LOW_BLOKNUMBER_MASK (((uint64)1 << 32) - 1) + + +/* ********BLOCK COMMON HEADER BEGIN ***************** */ +typedef enum { + BLOCK_DATA_MAIN_DATA_TYPE = 0, /* BLOCK DATA */ + BLOCK_DATA_VM_TYPE, /* VM */ + BLOCK_DATA_UNDO_TYPE, /* UNDO */ + BLOCK_DATA_FSM_TYPE, /* FSM */ + BLOCK_DATA_DDL_TYPE, /* DDL */ + BLOCK_DATA_BCM_TYPE, /* bcm */ + BLOCK_DATA_NEWCU_TYPE, /* cu newlog */ + BLOCK_DATA_CLOG_TYPE, /* CLog */ + BLOCK_DATA_MULITACT_OFF_TYPE, /* MultiXact */ + BLOCK_DATA_MULITACT_MEM_TYPE, + BLOCK_DATA_CSNLOG_TYPE, /* CSNLog */ + /* *****xact don't need sent to dfv */ + BLOCK_DATA_MULITACT_UPDATEOID_TYPE, + BLOCK_DATA_XACTDATA_TYPE, /* XACT */ + BLOCK_DATA_RELMAP_TYPE, /* RELMAP */ + BLOCK_DATA_SLOT_TYPE, + BLOCK_DATA_BARRIER_TYPE, + BLOCK_DATA_PREPARE_TYPE, /* prepare */ + BLOCK_DATA_INVALIDMSG_TYPE, /* INVALIDMSG */ + BLOCK_DATA_INCOMPLETE_TYPE, + BLOCK_DATA_VACUUM_PIN_TYPE, + BLOCK_DATA_XLOG_COMMON_TYPE, + BLOCK_DATA_CREATE_DATABASE_TYPE, + BLOCK_DATA_DROP_DATABASE_TYPE, + BLOCK_DATA_CREATE_TBLSPC_TYPE, + BLOCK_DATA_DROP_TBLSPC_TYPE, + BLOCK_DATA_DROP_SLICE_TYPE, + BLOCK_DATA_SEG_FILE_EXTEND_TYPE, + BLOCK_DATA_SEG_SPACE_DROP, + BLOCK_DATA_SEG_SPACE_SHRINK, + BLOCK_DATA_SEG_FULL_SYNC_TYPE, + BLOCK_DATA_SEG_EXTEND, +} XLogBlockParseEnum; + +/* ********BLOCK COMMON HEADER END ***************** */ + +/* **************define for parse begin ******************************* */ + +/* ********BLOCK DATE BEGIN ***************** */ + +typedef struct { + uint8 cur_block_id; /* blockid */ + uint8 flags; + uint8 has_image; + uint8 has_data; + BlockNumber auxiblk1; + BlockNumber auxiblk2; +} XLogBlocDatakHead; + +#define XLOG_BLOCK_DATAHEAD_LEN sizeof(XLogBlocDatakHead) + +typedef struct { + uint16 extra_flag; + uint16 hole_offset; + uint16 hole_length; /* image position */ + uint16 data_len; /* data length */ + XLogRecPtr last_lsn; + char* bkp_image; + char* data; +} XLogBlockData; + +#define XLOG_BLOCK_DATA_LEN sizeof(XLogBlockData) + +typedef struct { + XLogBlocDatakHead blockhead; + XLogBlockData blockdata; + uint32 main_data_len; /* main data portion's length */ + char* main_data; /* point to XLogReaderState's main_data */ +} XLogBlockDataParse; +/* ********BLOCK DATE END ***************** */ +#define XLOG_BLOCK_DATA_PARSE_LEN sizeof(XLogBlockDataParse) + +/* ********BLOCK DDL BEGIN ***************** */ +typedef enum { + BLOCK_DDL_TYPE_NONE = 0, + BLOCK_DDL_CREATE_RELNODE, + BLOCK_DDL_DROP_RELNODE, + BLOCK_DDL_EXTEND_RELNODE, + BLOCK_DDL_TRUNCATE_RELNODE, + BLOCK_DDL_CLOG_ZERO, + BLOCK_DDL_CLOG_TRUNCATE, + BLOCK_DDL_MULTIXACT_OFF_ZERO, + BLOCK_DDL_MULTIXACT_MEM_ZERO, +} XLogBlockDdlInfoEnum; + +typedef struct { + uint32 blockddltype; + int rels; + char *mainData; +} XLogBlockDdlParse; + +/* ********BLOCK DDL END ***************** */ + +/* ********BLOCK CLOG BEGIN ***************** */ + +#define MAX_BLOCK_XID_NUMS (28) +typedef struct { + TransactionId topxid; + uint16 status; + uint16 xidnum; + uint16 xidsarry[MAX_BLOCK_XID_NUMS]; +} XLogBlockCLogParse; + +/* ********BLOCK CLOG END ***************** */ + +/* ********BLOCK CSNLOG BEGIN ***************** */ +typedef struct { + TransactionId topxid; + CommitSeqNo cslseq; + uint32 xidnum; + uint16 xidsarry[MAX_BLOCK_XID_NUMS]; +} XLogBlockCSNLogParse; + +/* ********BLOCK CSNLOG END ***************** */ + +/* ********BLOCK prepare BEGIN ***************** */ +struct TwoPhaseFileHeader; + +typedef struct { + TransactionId maxxid; + Size maindatalen; + char* maindata; +} XLogBlockPrepareParse; + +/* ********BLOCK prepare END ***************** */ + +/* ********BLOCK Bcm BEGIN ***************** */ +typedef struct { + uint64 startblock; + int count; + int status; +} XLogBlockBcmParse; + +/* ********BLOCK Bcm END ***************** */ + +/* ********BLOCK Vm BEGIN ***************** */ +typedef struct { + BlockNumber heapBlk; +} XLogBlockVmParse; + +#define XLOG_BLOCK_VM_PARSE_LEN sizeof(XLogBlockVmParse) +/* ********BLOCK Vm END ***************** */ + +/* ********BLOCK Undo BEGIN ***************** */ +struct insertUndoParse { + TransactionId recxid; + BlockNumber blkno; + Oid spcNode; + Oid relNode; + XLogRecPtr lsn; + XlUndoHeader xlundohdr; + XlUndoHeaderExtra xlundohdrextra; + undo::XlogUndoMeta xlundometa; + OffsetNumber offnum; +}; + +struct deleteUndoParse { + TransactionId recxid; + TransactionId oldxid; + BlockNumber blkno; + Oid spcNode; + Oid relNode; + XLogRecPtr lsn; + XlUndoHeader xlundohdr; + XlUndoHeaderExtra xlundohdrextra; + undo::XlogUndoMeta xlundometa; + UHeapTupleData utup; + OffsetNumber offnum; +}; + +struct updateUndoParse { + bool inplaceUpdate; + TransactionId recxid; + TransactionId oldxid; + Oid spcNode; + Oid relNode; + OffsetNumber new_offnum; + OffsetNumber old_offnum; + XLogRecPtr lsn; + XlUndoHeader xlundohdr; + XlUndoHeaderExtra xlundohdrextra; + XlUndoHeader xlnewundohdr; + XlUndoHeaderExtra xlnewundohdrextra; + undo::XlogUndoMeta xlundometa; + int undoXorDeltaSize; + char *xlogXorDelta; + BlockNumber newblk; + BlockNumber oldblk; +}; + +struct multiInsertUndoParse { + TransactionId recxid; + BlockNumber blkno; + Oid spcNode; + Oid relNode; + XLogRecPtr lsn; + bool isinit; + bool skipUndo; + XlUndoHeader xlundohdr; + XlUndoHeaderExtra xlundohdrextra; + UndoRecPtr last_urecptr; + undo::XlogUndoMeta xlundometa; +}; + +struct rollbackFinishParse { + UndoSlotPtr slotPtr; + XLogRecPtr lsn; +}; + +struct undoDiscardParse { + int zoneId; + UndoSlotPtr endSlot; + UndoSlotPtr startSlot; + UndoRecPtr endUndoPtr; + TransactionId recycledXid; + XLogRecPtr lsn; +}; + +struct undoUnlinkParse { + int zoneId; + UndoLogOffset headOffset; + XLogRecPtr unlinkLsn; +}; + +struct undoExtendParse { + int zoneId; + UndoLogOffset tailOffset; + XLogRecPtr extendLsn; +}; + +struct undoCleanParse { + int zoneId; + UndoLogOffset tailOffset; + XLogRecPtr cleanLsn; +}; + +typedef struct { + char *maindata; + Size recordlen; + union { + struct insertUndoParse insertUndoParse; + struct deleteUndoParse deleteUndoParse; + struct updateUndoParse updateUndoParse; + struct undoDiscardParse undoDiscardParse; + struct undoUnlinkParse undoUnlinkParse; + struct undoExtendParse undoExtendParse; + struct undoCleanParse undoCleanParse; + struct rollbackFinishParse rollbackFinishParse; + struct multiInsertUndoParse multiInsertUndoParse; + }; +} XLogBlockUndoParse; +/* ********BLOCK Undo END ***************** */ + +/* ********BLOCK NewCu BEGIN ***************** */ +typedef struct { + uint32 main_data_len; /* main data portion's length */ + char* main_data; /* point to XLogReaderState's main_data */ +} XLogBlockNewCuParse; + + +/* ********BLOCK NewCu END ***************** */ + +/* ********BLOCK InvalidMsg BEGIN ***************** */ +typedef struct { + TransactionId cutoffxid; +} XLogBlockInvalidParse; + +/* ********BLOCK InvalidMsg END ***************** */ + +/* ********BLOCK Incomplete BEGIN ***************** */ + +typedef enum { + INCOMPLETE_ACTION_LOG = 0, + INCOMPLETE_ACTION_FORGET +} XLogBlockIncompleteEnum; + +typedef struct { + uint16 action; /* split or delete */ + bool issplit; + bool isroot; + BlockNumber downblk; + BlockNumber leftblk; + BlockNumber rightblk; +} XLogBlockIncompleteParse; + +/* ********BLOCK Incomplete END ***************** */ + +/* ********BLOCK VacuumPin BEGIN ***************** */ +typedef struct { + BlockNumber lastBlockVacuumed; +} XLogBlockVacuumPinParse; + +/* ********BLOCK XLOG Common BEGIN ***************** */ +typedef struct { + XLogRecPtr readrecptr; + Size maindatalen; + char* maindata; +} XLogBlockXLogComParse; + +/* ********BLOCK XLOG Common END ***************** */ + +/* ********BLOCK DataBase BEGIN ***************** */ +typedef struct { + Oid src_db_id; + Oid src_tablespace_id; +} XLogBlockDataBaseParse; + +/* ********BLOCK DataBase Common END ***************** */ + +/* ********BLOCK table spc BEGIN ***************** */ +typedef struct { + char* tblPath; + bool isRelativePath; +} XLogBlockTblSpcParse; + +/* ********BLOCK table spc END ***************** */ + +/* ********BLOCK Multi Xact Offset BEGIN ***************** */ +typedef struct { + MultiXactId multi; + MultiXactOffset moffset; +} XLogBlockMultiXactOffParse; + +/* ********BLOCK Multi Xact Offset END ***************** */ + +/* ********BLOCK Multi Xact Mem BEGIN ***************** */ +typedef struct { + MultiXactId multi; + MultiXactOffset startoffset; + uint64 xidnum; + TransactionId xidsarry[MAX_BLOCK_XID_NUMS]; +} XLogBlockMultiXactMemParse; +/* ********BLOCK Multi Xact Mem END ***************** */ + +/* ********BLOCK Multi Xact update oid BEGIN ***************** */ +typedef struct { + MultiXactId nextmulti; + MultiXactOffset nextoffset; + TransactionId maxxid; +} XLogBlockMultiUpdateParse; +/* ********BLOCK Multi Xact update oid END ***************** */ + +/* ********BLOCK rel map BEGIN ***************** */ +typedef struct { + Size maindatalen; + char* maindata; +} XLogBlockRelMapParse; +/* ********BLOCK rel map END ***************** */ + +typedef struct { + uint32 xl_term; +} XLogBlockRedoHead; + +#define XLogRecRedoHeadEncodeSize (offsetof(XLogBlockRedoHead, refrecord)) +typedef struct { + XLogRecPtr start_ptr; + XLogRecPtr end_ptr; /* copy from XLogReaderState's EndRecPtr */ + BlockNumber blkno; + Oid relNode; /* relation */ + uint16 block_valid; /* block data validinfo see XLogBlockInfoEnum */ + uint8 xl_info; /* flag bits, see below */ + RmgrId xl_rmid; /* resource manager for this record */ + ForkNumber forknum; + TransactionId xl_xid; /* xact id */ + Oid spcNode; /* tablespace */ + Oid dbNode; /* database */ + int4 bucketNode; /* bucket */ + XLogPhyBlock pblk; +} XLogBlockHead; + +#define XLogBlockHeadEncodeSize (sizeof(XLogBlockHead)) + +#define BYTE_NUM_BITS (8) +#define BYTE_MASK (0xFF) +#define U64_BYTES_NUM (8) +#define U32_BYTES_NUM (4) +#define U16_BYTES_NUM (2) +#define U8_BYTES_NUM (1) + +#define U32_BITS_NUM (BYTE_NUM_BITS * U32_BYTES_NUM) + +extern uint64 XLog_Read_N_Bytes(char* buffer, Size buffersize, Size readbytes); + +#define XLog_Read_1_Bytes(buffer, buffersize) XLog_Read_N_Bytes(buffer, buffersize, U8_BYTES_NUM) +#define XLog_Read_2_Bytes(buffer, buffersize) XLog_Read_N_Bytes(buffer, buffersize, U16_BYTES_NUM) +#define XLog_Read_4_Bytes(buffer, buffersize) XLog_Read_N_Bytes(buffer, buffersize, U32_BYTES_NUM) +#define XLog_Read_8_Bytes(buffer, buffersize) XLog_Read_N_Bytes(buffer, buffersize, U64_BYTES_NUM) + +extern bool XLog_Write_N_bytes(uint64 values, Size writebytes, byte* buffer); + +#define XLog_Write_1_Bytes(values, buffer) XLog_Write_N_bytes(values, U8_BYTES_NUM, buffer) +#define XLog_Write_2_Bytes(values, buffer) XLog_Write_N_bytes(values, U16_BYTES_NUM, buffer) +#define XLog_Write_4_Bytes(values, buffer) XLog_Write_N_bytes(values, U32_BYTES_NUM, buffer) +#define XLog_Write_8_Bytes(values, buffer) XLog_Write_N_bytes(values, U64_BYTES_NUM, buffer) + +typedef struct XLogBlockEnCode { + bool (*xlog_encodefun)(byte* buffer, Size buffersize, Size* encodesize, void* xlogbody); + uint16 block_valid; +} XLogBlockEnCode; + +typedef struct XLogBlockRedoCode { + void (*xlog_redofun)(char* buffer, Size buffersize, XLogBlockHead* blockhead, XLogBlockRedoHead* redohead, + void* page, Size pagesize); + uint16 block_valid; +} XLogBlockRedoCode; + +#pragma pack(pop) + +/* ********BLOCK Xact BEGIN ***************** */ +typedef struct { + uint8 delayddlflag; + uint8 updateminrecovery; + uint16 committype; + int invalidmsgnum; + int nrels; /* delete rels */ + int nlibs; /* delete libs */ + uint64 xinfo; + TimestampTz xact_time; + TransactionId maxxid; + CommitSeqNo maxcommitseq; + void* invalidmsg; + void* xnodes; + void* libfilename; +} XLogBlockXactParse; + +typedef struct { + Size maindatalen; + char* maindata; +} XLogBlockSlotParse; +/* ********BLOCK slot END ***************** */ + +/* ********BLOCK barrier BEGIN ***************** */ +typedef struct { + char* maindata; + Size maindatalen; +} XLogBlockBarrierParse; + +/* ********BLOCK Xact END ***************** */ + +/* ********BLOCK VacuumPin END ***************** */ + +/* ********BLOCK Segfile Extend Begin */ +typedef struct { + BlockNumber target_blocks; +} XLogSegFileExtendParse; +/* ********BLOCK Segfile Extend END */ + +/* ********BLOCK Segment Truncate Begin */ +typedef struct { + XLogBlockDdlParse blockddlrec; + XLogBlockDataParse blockdatarec; +} XLogBlockSegDdlParse; +/* ********BLOCK Segment Truncate END */ + +typedef struct { + void *childState; +} XLogBlockSegFullSyncParse; + +typedef struct { + char *mainData; + Size dataLen; +} XLogBlockSegNewPage; + +typedef struct { + XLogBlockHead blockhead; + XLogBlockRedoHead redohead; + union { + XLogBlockDataParse blockdatarec; + XLogBlockVmParse blockvmrec; + XLogBlockUndoParse blockundorec; + XLogBlockDdlParse blockddlrec; + XLogBlockBcmParse blockbcmrec; + XLogBlockNewCuParse blocknewcu; + XLogBlockCLogParse blockclogrec; + XLogBlockCSNLogParse blockcsnlogrec; + XLogBlockXactParse blockxact; + XLogBlockPrepareParse blockprepare; + XLogBlockInvalidParse blockinvalidmsg; + // XLogBlockIncompleteParse blockincomplete; + XLogBlockVacuumPinParse blockvacuumpin; + XLogBlockXLogComParse blockxlogcommon; + XLogBlockDataBaseParse blockdatabase; + XLogBlockTblSpcParse blocktblspc; + XLogBlockMultiXactOffParse blockmultixactoff; + XLogBlockMultiXactMemParse blockmultixactmem; + XLogBlockMultiUpdateParse blockmultiupdate; + XLogBlockRelMapParse blockrelmap; + XLogBlockSlotParse blockslot; + XLogBlockBarrierParse blockbarrier; + XLogSegFileExtendParse segfileExtend; + XLogBlockSegDdlParse blocksegddlrec; + XLogBlockSegFullSyncParse blocksegfullsyncrec; + XLogBlockSegNewPage blocksegnewpageinfo; + } extra_rec; +} XLogBlockParse; + +#define XLogBlockParseGetDdlParse(blockdatarec, ddlrecparse) \ + do \ + { \ + Assert((blockdatarec)->blockparse.blockhead.block_valid == BLOCK_DATA_DDL_TYPE); \ + if (blockdatarec->blockparse.blockhead.bucketNode != InvalidBktId) { \ + ddlrecparse = &blockdatarec->blockparse.extra_rec.blocksegddlrec.blockddlrec; \ + } else { \ + ddlrecparse = &blockdatarec->blockparse.extra_rec.blockddlrec; \ + } \ + } while (0); + +typedef struct +{ + Buffer buf_id; + Buffer freeNext; +} RedoMemSlot; + +typedef void (*InterruptFunc)(); + +typedef struct +{ + int totalblknum; /* total slot */ + int usedblknum; /* used slot */ + Size itemsize; + Buffer firstfreeslot; /* first free slot */ + Buffer firstreleaseslot; /* first release slot */ + RedoMemSlot *memslot; /* slot itme */ + bool isInit; + InterruptFunc doInterrupt; +}RedoMemManager; + +typedef void (*RefOperateFunc)(void *record); +#ifdef USE_ASSERT_CHECKING +typedef void (*RecordCheckFunc)(void *record, XLogRecPtr curPageLsn, uint32 blockId, bool replayed); +#endif +typedef void (*AddReadBlockFunc)(void *record, uint32 readblocks); + +typedef struct { + RefOperateFunc refCount; + RefOperateFunc DerefCount; +#ifdef USE_ASSERT_CHECKING + RecordCheckFunc checkFunc; +#endif + AddReadBlockFunc addReadBlock; +}RefOperate; + +typedef struct +{ + void *BufferBlockPointers; /* RedoBufferDesc + block */ + RedoMemManager memctl; + RefOperate *refOperate; +}RedoBufferManager; + + + +typedef struct +{ + void *parsebuffers; /* ParseBufferDesc + XLogRecParseState */ + RedoMemManager memctl; + RefOperate *refOperate; +}RedoParseManager; + + + +typedef struct { + void* nextrecord; + XLogBlockParse blockparse; /* block data */ + RedoParseManager* manager; + void* refrecord; /* origin dataptr, for mem release */ + bool isFullSync; +} XLogRecParseState; + +typedef struct XLogBlockRedoExtreRto { + void (*xlog_redoextrto)(XLogBlockHead* blockhead, void* blockrecbody, RedoBufferInfo* bufferinfo); + uint16 block_valid; +} XLogBlockRedoExtreRto; + +typedef struct XLogParseBlock { + XLogRecParseState* (*xlog_parseblock)(XLogReaderState* record, uint32* blocknum); + RmgrId rmid; +} XLogParseBlock; + +typedef enum { + HEAP_INSERT_ORIG_BLOCK_NUM = 0 +} XLogHeapInsertBlockEnum; + +typedef enum { + HEAP_DELETE_ORIG_BLOCK_NUM = 0 +} XLogHeapDeleteBlockEnum; + +typedef enum { + HEAP_UPDATE_NEW_BLOCK_NUM = 0, + HEAP_UPDATE_OLD_BLOCK_NUM +} XLogHeapUpdateBlockEnum; + +typedef enum { + HEAP_BASESHIFT_ORIG_BLOCK_NUM = 0 +} XLogHeapBaeShiftBlockEnum; + +typedef enum { + HEAP_NEWPAGE_ORIG_BLOCK_NUM = 0 +} XLogHeapNewPageBlockEnum; + +typedef enum { + HEAP_LOCK_ORIG_BLOCK_NUM = 0 +} XLogHeapLockBlockEnum; + +typedef enum { + HEAP_INPLACE_ORIG_BLOCK_NUM = 0 +} XLogHeapInplaceBlockEnum; + +typedef enum { + HEAP_FREEZE_ORIG_BLOCK_NUM = 0 +} XLogHeapFreezeBlockEnum; + +typedef enum { + HEAP_CLEAN_ORIG_BLOCK_NUM = 0 +} XLogHeapCleanBlockEnum; + +typedef enum { + HEAP_VISIBLE_VM_BLOCK_NUM = 0, + HEAP_VISIBLE_DATA_BLOCK_NUM +} XLogHeapVisibleBlockEnum; + +typedef enum { + HEAP_MULTI_INSERT_ORIG_BLOCK_NUM = 0 +} XLogHeapMultiInsertBlockEnum; + +typedef enum { + UHEAP_INSERT_ORIG_BLOCK_NUM = 0 +} XLogUHeapInsertBlockEnum; + +typedef enum { + UHEAP_DELETE_ORIG_BLOCK_NUM = 0 +} XLogUHeapDeleteBlockEnum; + +typedef enum { + UHEAP_UPDATE_NEW_BLOCK_NUM = 0, + UHEAP_UPDATE_OLD_BLOCK_NUM +} XLogUHeapUpdateBlockEnum; + +typedef enum { + UHEAP_MULTI_INSERT_ORIG_BLOCK_NUM = 0 +} XLogUHeapMultiInsertBlockEnum; + +typedef enum { + UHEAP_FREEZE_TD_ORIG_BLOCK_NUM = 0 +} XLogUHeapFreezeTDBlockEnum; + +typedef enum { + UHEAP_INVALID_TD_ORIG_BLOCK_NUM = 0 +} XLogUHeapInvalidTDBlockEnum; + +typedef enum { + UHEAP_CLEAN_ORIG_BLOCK_NUM = 0 +} XLogUHeapCleanBlockEnum; + +typedef enum { + UHEAP2_ORIG_BLOCK_NUM = 0 +} XLogUHeap2BlockEnum; + +typedef enum { + UHEAP_UNDO_ORIG_BLOCK_NUM = 0 +} XLogUHeapUndoBlockEnum; + +typedef enum { + UHEAP_UNDOACTION_ORIG_BLOCK_NUM = 0 +} XLogUheapUndoActionBlockEnum; + +extern THR_LOCAL RedoParseManager* g_parseManager; +extern THR_LOCAL RedoBufferManager* g_bufferManager; + +extern void* XLogMemCtlInit(RedoMemManager* memctl, Size itemsize, int itemnum); +extern RedoMemSlot* XLogMemAlloc(RedoMemManager* memctl); +extern void XLogMemRelease(RedoMemManager* memctl, Buffer bufferid); + +extern void XLogRedoBufferInit(RedoBufferManager* buffermanager, int buffernum, RefOperate *refOperate, + InterruptFunc interruptOperte); +extern void XLogRedoBufferDestory(RedoBufferManager* buffermanager); +extern RedoMemSlot* XLogRedoBufferAlloc( + RedoBufferManager* buffermanager, RelFileNode relnode, ForkNumber forkNum, BlockNumber blockNum); +extern bool XLogRedoBufferIsValid(RedoBufferManager* buffermanager, Buffer bufferid); +extern void XLogRedoBufferRelease(RedoBufferManager* buffermanager, Buffer bufferid); +extern BlockNumber XLogRedoBufferGetBlkNumber(RedoBufferManager* buffermanager, Buffer bufferid); +extern Block XLogRedoBufferGetBlk(RedoBufferManager* buffermanager, RedoMemSlot* bufferslot); +extern Block XLogRedoBufferGetPage(RedoBufferManager* buffermanager, Buffer bufferid); +extern void XLogRedoBufferSetState(RedoBufferManager* buffermanager, RedoMemSlot* bufferslot, uint32 state); + +#define XLogRedoBufferInitFunc(bufferManager, buffernum, defOperate, interruptOperte) do { \ + XLogRedoBufferInit(bufferManager, buffernum, defOperate, interruptOperte); \ +} while (0) +#define XLogRedoBufferDestoryFunc(bufferManager) do { \ + XLogRedoBufferDestory(bufferManager); \ +} while (0) +#define XLogRedoBufferAllocFunc(relnode, forkNum, blockNum, bufferslot) do { \ + *bufferslot = XLogRedoBufferAlloc(g_bufferManager, relnode, forkNum, blockNum); \ +} while (0) +#define XLogRedoBufferIsValidFunc(bufferid, isvalid) do { \ + *isvalid = XLogRedoBufferIsValid(g_bufferManager, bufferid); \ +} while (0) +#define XLogRedoBufferReleaseFunc(bufferid) do { \ + XLogRedoBufferRelease(g_bufferManager, bufferid); \ +} while (0) + +#define XLogRedoBufferGetBlkNumberFunc(bufferid, blknumber) do { \ + *blknumber = XLogRedoBufferGetBlkNumber(g_bufferManager, bufferid); \ +} while (0) + +#define XLogRedoBufferGetBlkFunc(bufferslot, blockdata) do { \ + *blockdata = XLogRedoBufferGetBlk(g_bufferManager, bufferslot); \ +} while (0) + +#define XLogRedoBufferGetPageFunc(bufferid, blockdata) do { \ + *blockdata = (Page)XLogRedoBufferGetPage(g_bufferManager, bufferid); \ +} while (0) +#define XLogRedoBufferSetStateFunc(bufferslot, state) do { \ + XLogRedoBufferSetState(g_bufferManager, bufferslot, state); \ +} while (0) + +extern void XLogParseBufferInit(RedoParseManager* parsemanager, int buffernum, RefOperate *refOperate, + InterruptFunc interruptOperte); +extern void XLogParseBufferDestory(RedoParseManager* parsemanager); +extern void XLogParseBufferRelease(XLogRecParseState* recordstate); +extern XLogRecParseState* XLogParseBufferAllocList(RedoParseManager* parsemanager, XLogRecParseState* blkstatehead, void *record); +extern XLogRedoAction XLogReadBufferForRedo(XLogReaderState* record, uint8 buffer_id, RedoBufferInfo* bufferinfo); +extern void XLogInitBufferForRedo(XLogReaderState* record, uint8 block_id, RedoBufferInfo* bufferinfo); +extern XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState* record, uint8 buffer_id, ReadBufferMode mode, + bool get_cleanup_lock, RedoBufferInfo* bufferinfo, ReadBufferMethod readmethod = WITH_NORMAL_CACHE); +#define XLogParseBufferInitFunc(parseManager, buffernum, defOperate, interruptOperte) do { \ + XLogParseBufferInit(parseManager, buffernum, defOperate, interruptOperte); \ +} while (0) + +#define XLogParseBufferDestoryFunc(parseManager) do { \ + XLogParseBufferDestory(parseManager); \ +} while (0) + +#define XLogParseBufferReleaseFunc(recordstate) do { \ + XLogParseBufferRelease(recordstate); \ +} while (0) + +#define XLogParseBufferAllocListFunc(record, newblkstate, blkstatehead) do { \ + *newblkstate = XLogParseBufferAllocList(g_parseManager, blkstatehead, record); \ +} while (0) + +#define XLogParseBufferAllocListStateFunc(record, newblkstate, blkstatehead) do { \ + if (*blkstatehead == NULL) { \ + *newblkstate = XLogParseBufferAllocList(g_parseManager, NULL, record); \ + *blkstatehead = *newblkstate; \ + } else { \ + *newblkstate = XLogParseBufferAllocList(g_parseManager, *blkstatehead, record); \ + } \ +} while (0) + + + + +#ifdef EXTREME_RTO_DEBUG_AB +typedef void (*AbnormalProcFunc)(void); +typedef enum { + A_THREAD_EXIT, + ALLOC_FAIL, + OPEN_FILE_FAIL, + WAIT_LONG, + ABNORMAL_NUM, +}AbnormalType; +extern AbnormalProcFunc g_AbFunList[ABNORMAL_NUM]; + + +#define ADD_ABNORMAL_POSITION(pos) do { \ + static int __count##pos = 0; \ + __count##pos++; \ + if (g_instance.attr.attr_storage.extreme_rto_ab_pos == pos) { \ + if (g_instance.attr.attr_storage.extreme_rto_ab_count == __count##pos) { \ + ereport(LOG, (errmsg("extreme rto debug abnormal stop pos:%d, type:%d, count:%d", pos, \ + g_instance.attr.attr_storage.extreme_rto_ab_type, __count##pos))); \ + g_AbFunList[g_instance.attr.attr_storage.extreme_rto_ab_type % ABNORMAL_NUM](); \ + } \ + } \ +} while(0) +#else +#define ADD_ABNORMAL_POSITION(pos) +#endif + + + +void HeapXlogCleanOperatorPage( + RedoBufferInfo* buffer, void* recorddata, void* blkdata, Size datalen, Size* freespace, bool repairFragmentation); +void HeapXlogFreezeOperatorPage(RedoBufferInfo* buffer, void* recorddata, void* blkdata, Size datalen, + bool isTupleLockUpgrade); +void HeapXlogVisibleOperatorPage(RedoBufferInfo* buffer, void* recorddata); +void HeapXlogVisibleOperatorVmpage(RedoBufferInfo* vmbuffer, void* recorddata); +void HeapXlogDeleteOperatorPage(RedoBufferInfo* buffer, void* recorddata, TransactionId recordxid, + bool isTupleLockUpgrade); +void HeapXlogInsertOperatorPage(RedoBufferInfo* buffer, void* recorddata, bool isinit, void* blkdata, Size datalen, + TransactionId recxid, Size* freespace, bool tde = false); +void HeapXlogMultiInsertOperatorPage(RedoBufferInfo* buffer, const void* recoreddata, bool isinit, const void* blkdata, + Size len, TransactionId recordxid, Size* freespace, bool tde = false); +void HeapXlogUpdateOperatorOldpage(RedoBufferInfo* buffer, void* recoreddata, bool hot_update, bool isnewinit, + BlockNumber newblk, TransactionId recordxid, bool isTupleLockUpgrade); +void HeapXlogUpdateOperatorNewpage(RedoBufferInfo* buffer, void* recorddata, bool isinit, void* blkdata, + Size datalen, TransactionId recordxid, Size* freespace, bool isTupleLockUpgrade, bool tde = false); +void HeapXlogLockOperatorPage(RedoBufferInfo* buffer, void* recorddata, bool isTupleLockUpgrade); +void HeapXlogInplaceOperatorPage(RedoBufferInfo* buffer, void* recorddata, void* blkdata, Size newlen); +void HeapXlogBaseShiftOperatorPage(RedoBufferInfo* buffer, void* recorddata); + +void BtreeRestorePage(Page page, char* from, int len); +void BtreeXlogMarkDeleteOperatorPage(RedoBufferInfo* buffer, void* recorddata); +void BtreeXlogPrunePageOperatorPage(RedoBufferInfo* buffer, void* recorddata); +void Btree2XlogShiftBaseOperatorPage(RedoBufferInfo* buffer, void* recorddata); + +void BtreeRestoreMetaOperatorPage(RedoBufferInfo* metabuf, void* recorddata, Size datalen); +void BtreeXlogInsertOperatorPage(RedoBufferInfo* buffer, void* recorddata, void* data, Size datalen); +void BtreeXlogSplitOperatorRightpage( + RedoBufferInfo* rbuf, void* recorddata, BlockNumber leftsib, BlockNumber rnext, void* blkdata, Size datalen); +void BtreeXlogSplitOperatorNextpage(RedoBufferInfo* buffer, BlockNumber rightsib); +void BtreeXlogSplitOperatorLeftpage( + RedoBufferInfo* lbuf, void* recorddata, BlockNumber rightsib, bool onleft, void* blkdata, Size datalen); +void BtreeXlogVacuumOperatorPage(RedoBufferInfo* redobuffer, void* recorddata, void* blkdata, Size len); +void BtreeXlogDeleteOperatorPage(RedoBufferInfo* buffer, void* recorddata, Size recorddatalen); +void btreeXlogDeletePageOperatorRightpage(RedoBufferInfo* buffer, void* recorddata); + +void BtreeXlogDeletePageOperatorLeftpage(RedoBufferInfo* buffer, void* recorddata); + +void BtreeXlogDeletePageOperatorCurrentpage(RedoBufferInfo* buffer, void* recorddata); + +void BtreeXlogNewrootOperatorPage(RedoBufferInfo* buffer, void* record, void* blkdata, Size len, BlockNumber* downlink); +void BtreeXlogHalfdeadPageOperatorParentpage( + RedoBufferInfo* pbuf, void* recorddata); +void BtreeXlogHalfdeadPageOperatorLeafpage( + RedoBufferInfo* lbuf, void* recorddata); +void BtreeXlogUnlinkPageOperatorRightpage(RedoBufferInfo* rbuf, void* recorddata); +void BtreeXlogUnlinkPageOperatorLeftpage(RedoBufferInfo* lbuf, void* recorddata); +void BtreeXlogUnlinkPageOperatorCurpage(RedoBufferInfo* buf, void* recorddata); +void BtreeXlogUnlinkPageOperatorChildpage(RedoBufferInfo* cbuf, void* recorddata); + +void BtreeXlogClearIncompleteSplit(RedoBufferInfo* buffer); + +/* UBTree */ +extern void UBTreeRestorePage(Page page, char* from, int len); +extern void UBTreeXlogMarkDeleteOperatorPage(RedoBufferInfo* buffer, void* recorddata); +extern void UBTreeXlogPrunePageOperatorPage(RedoBufferInfo* buffer, void* recorddata); +extern void UBTree2XlogShiftBaseOperatorPage(RedoBufferInfo* buffer, void* recorddata); +extern void UBTree2XlogRecycleQueueInitPageOperatorCurrPage(RedoBufferInfo* buffer, void* recorddata); +extern void UBTree2XlogRecycleQueueInitPageOperatorAdjacentPage(RedoBufferInfo* buffer, void* recorddata, bool isLeft); +extern void UBTree2XlogRecycleQueueEndpointOperatorLeftPage(RedoBufferInfo* buffer, void* recorddata); +extern void UBTree2XlogRecycleQueueEndpointOperatorRightPage(RedoBufferInfo* buffer, void* recorddata); +extern void UBTree2XlogRecycleQueueModifyOperatorPage(RedoBufferInfo* buffer, void* recorddata); +extern void UBTree2XlogFreezeOperatorPage(RedoBufferInfo* buffer, void* recorddata); + +extern void UBTreeRestoreMetaOperatorPage(RedoBufferInfo* metabuf, void* recorddata, Size datalen); +extern void UBTreeXlogInsertOperatorPage(RedoBufferInfo* buffer, void* recorddata, void* data, Size datalen); +extern void UBTreeXlogSplitOperatorRightPage(RedoBufferInfo* rbuf, void* recorddata, BlockNumber leftsib, + BlockNumber rnext, void* blkdata, Size datalen, bool hasOpaque = true); +extern void UBTreeXlogSplitOperatorNextpage(RedoBufferInfo* buffer, BlockNumber rightsib); +extern void UBTreeXlogSplitOperatorLeftpage(RedoBufferInfo* lbuf, void* recorddata, BlockNumber rightsib, + bool onleft, void* blkdata, Size datalen, bool hasOpaque = true); +extern void UBTreeXlogVacuumOperatorPage(RedoBufferInfo* redobuffer, void* recorddata, void* blkdata, Size len); +extern void UBTreeXlogDeleteOperatorPage(RedoBufferInfo* buffer, void* recorddata, Size recorddatalen); +extern void UBTreeXlogDeletePageOperatorRightpage(RedoBufferInfo* buffer, void* recorddata); + +extern void UBTreeXlogDeletePageOperatorLeftpage(RedoBufferInfo* buffer, void* recorddata); + +extern void UBTreeXlogDeletePageOperatorCurrentpage(RedoBufferInfo* buffer, void* recorddata); + +extern void UBTreeXlogNewrootOperatorPage(RedoBufferInfo *buffer, void *record, void *blkdata, Size len, + BlockNumber *downlink); +extern void UBTreeXlogHalfdeadPageOperatorParentpage( + RedoBufferInfo* pbuf, void* recorddata); +extern void UBTreeXlogHalfdeadPageOperatorLeafpage( + RedoBufferInfo* lbuf, void* recorddata); +extern void UBTreeXlogUnlinkPageOperatorRightpage(RedoBufferInfo* rbuf, void* recorddata); +extern void UBTreeXlogUnlinkPageOperatorLeftpage(RedoBufferInfo* lbuf, void* recorddata); +extern void UBTreeXlogUnlinkPageOperatorCurpage(RedoBufferInfo* buf, void* recorddata); +extern void UBTreeXlogUnlinkPageOperatorChildpage(RedoBufferInfo* cbuf, void* recorddata); + +extern void UBTreeXlogClearIncompleteSplit(RedoBufferInfo* buffer); + +void XLogRecSetBlockCommonState(XLogReaderState* record, XLogBlockParseEnum blockvalid, + RelFileNodeForkNum filenode, XLogRecParseState* recordblockstate, XLogPhyBlock *pblk = NULL); + +void XLogRecSetBlockCLogState( + XLogBlockCLogParse* blockclogstate, TransactionId topxid, uint16 status, uint16 xidnum, uint16* xidsarry); + +void XLogRecSetBlockCSNLogState( + XLogBlockCSNLogParse* blockcsnlogstate, TransactionId topxid, CommitSeqNo csnseq, uint16 xidnum, uint16* xidsarry); +void XLogRecSetXactRecoveryState(XLogBlockXactParse* blockxactstate, TransactionId maxxid, CommitSeqNo maxcsnseq, + uint8 delayddlflag, uint8 updateminrecovery); +void XLogRecSetXactDdlState(XLogBlockXactParse* blockxactstate, int nrels, void* xnodes, int invalidmsgnum, + void* invalidmsg, int nlibs, void* libfilename); +void XLogRecSetXactCommonState( + XLogBlockXactParse* blockxactstate, uint16 committype, uint64 xinfo, TimestampTz xact_time); +void XLogRecSetBcmState(XLogBlockBcmParse* blockbcmrec, uint64 startblock, int count, int status); +void XLogRecSetNewCuState(XLogBlockNewCuParse* blockcudata, char* main_data, uint32 main_data_len); +void XLogRecSetInvalidMsgState(XLogBlockInvalidParse* blockinvalid, TransactionId cutoffxid); +void XLogRecSetIncompleteMsgState(XLogBlockIncompleteParse* blockincomplete, uint16 action, bool issplit, bool isroot, + BlockNumber downblk, BlockNumber leftblk, BlockNumber rightblk); +void XLogRecSetPinVacuumState(XLogBlockVacuumPinParse* blockvacuum, BlockNumber lastblknum); +void XLogRecSetSegFullSyncState(XLogBlockSegFullSyncParse *state, void *childState); +void XLogRecSetSegNewPageInfo(XLogBlockSegNewPage *state, char *mainData, Size len); +void XLogRecSetAuxiBlkNumState(XLogBlockDataParse* blockdatarec, BlockNumber auxilaryblkn1, BlockNumber auxilaryblkn2); +void XLogRecSetBlockDataStateContent(XLogReaderState *record, uint32 blockid, XLogBlockDataParse *blockdatarec); +void XLogRecSetBlockDataState(XLogReaderState* record, uint32 blockid, XLogRecParseState* recordblockstate, + XLogBlockParseEnum type = BLOCK_DATA_MAIN_DATA_TYPE); +extern char* XLogBlockDataGetBlockData(XLogBlockDataParse* datadecode, Size* len); +void Heap2RedoDataBlock(XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); +extern void HeapRedoDataBlock( + XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); +void SegPageRedoDataBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, RedoBufferInfo *bufferinfo); +extern void xlog_redo_data_block( + XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); +extern void XLogRecSetBlockDdlState(XLogBlockDdlParse* blockddlstate, uint32 blockddltype, char *mainData, + int rels = 1); +XLogRedoAction XLogCheckBlockDataRedoAction(XLogBlockDataParse* datadecode, RedoBufferInfo* bufferinfo); + +void BtreeRedoDataBlock(XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); +void Btree2RedoDataBlock(XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); + +/* UBTree */ +extern void UBTreeRedoDataBlock(XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); +extern void UBTree2RedoDataBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, + RedoBufferInfo *bufferinfo); + +XLogRecParseState* XactXlogCsnlogParseToBlock(XLogReaderState* record, uint32* blocknum, TransactionId xid, + int nsubxids, TransactionId* subxids, CommitSeqNo csn, XLogRecParseState* recordstatehead); +extern void XLogRecSetVmBlockState(XLogReaderState* record, uint32 blockid, XLogRecParseState* recordblockstate); +extern void XLogRecSetUHeapUndoBlockState(XLogReaderState* record, uint32 blockid, XLogRecParseState* recordundostate); +extern void XLogRecSetUndoBlockState(XLogReaderState* record, uint32 blockid, XLogRecParseState* recordundostate); +extern void XLogRecSetRollbackFinishBlockState(XLogReaderState *record, uint32 blockid, + XLogRecParseState *recordundostate); +extern bool DoLsnCheck(const RedoBufferInfo* bufferinfo, bool willInit, XLogRecPtr lastLsn, + const XLogPhyBlock *pblk, bool *needRepair); +char* XLogBlockDataGetMainData(XLogBlockDataParse* datadecode, Size* len); +void HeapRedoVmBlock(XLogBlockHead* blockhead, XLogBlockVmParse* blockvmrec, RedoBufferInfo* bufferinfo); +void Heap2RedoVmBlock(XLogBlockHead* blockhead, XLogBlockVmParse* blockvmrec, RedoBufferInfo* bufferinfo); +XLogRecParseState* xlog_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); +XLogRecParseState* smgr_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); +XLogRecParseState* segpage_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); +void ProcSegPageCommonRedo(XLogRecParseState *parseState); +void ProcSegPageJustFreeChildState(XLogRecParseState *parseState); +XLogRecParseState* XactXlogClogParseToBlock(XLogReaderState* record, XLogRecParseState* recordstatehead, + uint32* blocknum, TransactionId xid, int nsubxids, TransactionId* subxids, CLogXidStatus status); +XLogRecParseState* xact_xlog_commit_parse_to_block(XLogReaderState* record, XLogRecParseState* recordstatehead, + uint32* blocknum, TransactionId maxxid, CommitSeqNo maxseqnum); +void visibilitymap_clear_buffer(RedoBufferInfo* bufferinfo, BlockNumber heapBlk); +XLogRecParseState* xact_xlog_abort_parse_to_block(XLogReaderState* record, XLogRecParseState* recordstatehead, + uint32* blocknum, TransactionId maxxid, CommitSeqNo maxseqnum); +XLogRecParseState* xact_xlog_prepare_parse_to_block( + XLogReaderState* record, XLogRecParseState* recordstatehead, uint32* blocknum, TransactionId maxxid); +XLogRecParseState* xact_xlog_parse_to_block(XLogReaderState* record, uint32* blocknum); +XLogRecParseState* ClogRedoParseToBlock(XLogReaderState* record, uint32* blocknum); + +XLogRecParseState* DbaseRedoParseToBlock(XLogReaderState* record, uint32* blocknum); + +XLogRecParseState* Heap2RedoParseIoBlock(XLogReaderState* record, uint32* blocknum); +extern XLogRecParseState* HeapRedoParseToBlock(XLogReaderState* record, uint32* blocknum); +extern XLogRecParseState* BtreeRedoParseToBlock(XLogReaderState* record, uint32* blocknum); +/* UBTree */ +extern XLogRecParseState* UBTreeRedoParseToBlock(XLogReaderState* record, uint32* blocknum); +extern XLogRecParseState* UBTree2RedoParseToBlock(XLogReaderState* record, uint32* blocknum); + +extern XLogRecParseState* Heap3RedoParseToBlock(XLogReaderState* record, uint32* blocknum); + +extern Size SalEncodeXLogBlock(void* recordblockstate, byte* buffer, void* sliceinfo); + +extern XLogRecParseState* XLogParseToBlockForDfv(XLogReaderState* record, uint32* blocknum); +extern Size getBlockSize(XLogRecParseState* recordblockstate); +extern XLogRecParseState* GistRedoParseToBlock(XLogReaderState* record, uint32* blocknum); +extern XLogRecParseState* GinRedoParseToBlock(XLogReaderState* record, uint32* blocknum); + +extern void GistRedoClearFollowRightOperatorPage(RedoBufferInfo* buffer); +extern void GistRedoPageUpdateOperatorPage(RedoBufferInfo* buffer, void* recorddata, void* blkdata, Size datalen); +extern void GistRedoPageSplitOperatorPage( + RedoBufferInfo* buffer, void* recorddata, void* data, Size datalen, bool Markflag, BlockNumber rightlink); +extern void GistRedoCreateIndexOperatorPage(RedoBufferInfo* buffer); + +extern void GinRedoCreateIndexOperatorMetaPage(RedoBufferInfo* MetaBuffer); +extern void GinRedoCreateIndexOperatorRootPage(RedoBufferInfo* RootBuffer); +extern void GinRedoCreatePTreeOperatorPage(RedoBufferInfo* buffer, void* recordData); +extern void GinRedoClearIncompleteSplitOperatorPage(RedoBufferInfo* buffer); +extern void GinRedoVacuumDataOperatorLeafPage(RedoBufferInfo* buffer, void* recorddata); +extern void GinRedoDeletePageOperatorCurPage(RedoBufferInfo* dbuffer); +extern void GinRedoDeletePageOperatorParentPage(RedoBufferInfo* pbuffer, void* recorddata); +extern void GinRedoDeletePageOperatorLeftPage(RedoBufferInfo* lbuffer, void* recorddata); +extern void GinRedoUpdateOperatorMetapage(RedoBufferInfo* metabuffer, void* recorddata); +extern void GinRedoUpdateOperatorTailPage(RedoBufferInfo* buffer, void* payload, Size totaltupsize, int32 ntuples); +extern void GinRedoInsertListPageOperatorPage( + RedoBufferInfo* buffer, void* recorddata, void* payload, Size totaltupsize); +extern void GinRedoUpdateAddNewTail(RedoBufferInfo* buffer, BlockNumber newRightlink); +extern void GinRedoInsertData(RedoBufferInfo* buffer, bool isLeaf, BlockNumber rightblkno, void* rdata); +extern void GinRedoInsertEntry(RedoBufferInfo* buffer, bool isLeaf, BlockNumber rightblkno, void* rdata); + +extern void GinRedoDeleteListPagesOperatorPage(RedoBufferInfo* metabuffer, const void* recorddata); +extern void GinRedoDeleteListPagesMarkDelete(RedoBufferInfo* buffer); + +extern void spgRedoCreateIndexOperatorMetaPage(RedoBufferInfo* buffer); +extern void spgRedoCreateIndexOperatorRootPage(RedoBufferInfo* buffer); +extern void spgRedoCreateIndexOperatorLeafPage(RedoBufferInfo* buffer); +extern void spgRedoAddLeafOperatorPage(RedoBufferInfo* bufferinfo, void* recorddata); +extern void spgRedoAddLeafOperatorParent(RedoBufferInfo* bufferinfo, void* recorddata, BlockNumber blknoLeaf); +extern void spgRedoMoveLeafsOpratorDstPage(RedoBufferInfo* buffer, void* recorddata, void* insertdata, void* tupledata); +extern void spgRedoMoveLeafsOpratorSrcPage( + RedoBufferInfo* buffer, void* recorddata, void* insertdata, void* deletedata, BlockNumber blknoDst, int nInsert); +extern void spgRedoMoveLeafsOpratorParentPage( + RedoBufferInfo* buffer, void* recorddata, void* insertdata, BlockNumber blknoDst, int nInsert); +extern void spgRedoAddNodeUpdateSrcPage(RedoBufferInfo* buffer, void* recorddata, void* tuple, void* tupleheader); +extern void spgRedoAddNodeOperatorSrcPage(RedoBufferInfo* buffer, void* recorddata, BlockNumber blknoNew); +extern void spgRedoAddNodeOperatorDestPage( + RedoBufferInfo* buffer, void* recorddata, void* tuple, void* tupleheader, BlockNumber blknoNew); +extern void spgRedoAddNodeOperatorParentPage(RedoBufferInfo* buffer, void* recorddata, BlockNumber blknoNew); +extern void spgRedoSplitTupleOperatorDestPage(RedoBufferInfo* buffer, void* recorddata, void* tuple); +extern void spgRedoSplitTupleOperatorSrcPage(RedoBufferInfo* buffer, void* recorddata, void* pretuple, void* posttuple); +extern void spgRedoPickSplitRestoreLeafTuples( + RedoBufferInfo* buffer, void* recorddata, bool destflag, void* pageselect, void* insertdata); +extern void spgRedoPickSplitOperatorSrcPage(RedoBufferInfo* srcBuffer, void* recorddata, void* deleteoffset, + BlockNumber blknoInner, void* pageselect, void* insertdata); +extern void spgRedoPickSplitOperatorDestPage( + RedoBufferInfo* destBuffer, void* recorddata, void* pageselect, void* insertdata); +extern void spgRedoPickSplitOperatorInnerPage( + RedoBufferInfo* innerBuffer, void* recorddata, void* tuple, void* tupleheader, BlockNumber blknoInner); +extern void spgRedoPickSplitOperatorParentPage(RedoBufferInfo* parentBuffer, void* recorddata, BlockNumber blknoInner); +extern void spgRedoVacuumLeafOperatorPage(RedoBufferInfo* buffer, void* recorddata); +extern void spgRedoVacuumRootOperatorPage(RedoBufferInfo* buffer, void* recorddata); +extern void spgRedoVacuumRedirectOperatorPage(RedoBufferInfo* buffer, void* recorddata); + +extern XLogRecParseState* SpgRedoParseToBlock(XLogReaderState* record, uint32* blocknum); + +extern void seqRedoOperatorPage(RedoBufferInfo* buffer, void* itmedata, Size itemsz); +extern void seq_redo_data_block(XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); + +extern void Heap3RedoDataBlock( + XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); + +extern XLogRecParseState* xact_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); + +extern bool XLogBlockRedoForExtremeRTO(XLogRecParseState* redoblocktate, RedoBufferInfo *bufferinfo, + bool notfound, RedoTimeCost &readBufCost, RedoTimeCost &redoCost); +void XLogBlockParseStateRelease_debug(XLogRecParseState* recordstate, const char *func, uint32 line); +#define XLogBlockParseStateRelease(recordstate) XLogBlockParseStateRelease_debug(recordstate, __FUNCTION__, __LINE__) +#ifdef USE_ASSERT_CHECKING +extern void DoRecordCheck(XLogRecParseState *recordstate, XLogRecPtr pageLsn, bool replayed); +#endif +extern XLogRecParseState* XLogParseBufferCopy(XLogRecParseState *srcState); +extern XLogRecParseState* XLogParseToBlockForExtermeRTO(XLogReaderState* record, uint32* blocknum); +extern XLogRedoAction XLogReadBufferForRedoBlockExtend(RedoBufferTag *redoblock, ReadBufferMode mode, + bool get_cleanup_lock, RedoBufferInfo *redobufferinfo, + XLogRecPtr xloglsn, XLogRecPtr last_lsn, bool willinit, + ReadBufferMethod readmethod, bool tde = false); +extern XLogRecParseState* tblspc_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); +extern XLogRecParseState* relmap_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); +extern XLogRecParseState* HashRedoParseToBlock(XLogReaderState* record, uint32* blocknum); +extern XLogRecParseState* seq_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); +extern XLogRecParseState* slot_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); +extern XLogRecParseState* barrier_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); +extern XLogRecParseState* multixact_redo_parse_to_block(XLogReaderState* record, uint32* blocknum); +extern void ExtremeRtoFlushBuffer(RedoBufferInfo *bufferinfo, bool updateFsm); +extern void XLogForgetDDLRedo(XLogRecParseState* redoblockstate); +void XLogDropSpaceShrink(XLogRecParseState *redoblockstate); +extern void SyncOneBufferForExtremRto(RedoBufferInfo *bufferinfo); +extern void XLogBlockInitRedoBlockInfo(XLogBlockHead* blockhead, RedoBufferTag* blockinfo); +extern void XLogBlockDdlDoSmgrAction(XLogBlockHead* blockhead, void* blockrecbody, RedoBufferInfo* bufferinfo); +extern void XLogBlockSegDdlDoRealAction(XLogBlockHead* blockhead, void* blockrecbody, RedoBufferInfo* bufferinfo); +extern void GinRedoDataBlock(XLogBlockHead* blockhead, XLogBlockDataParse* blockdatarec, RedoBufferInfo* bufferinfo); +extern void GistRedoDataBlock(XLogBlockHead *blockhead, XLogBlockDataParse *blockdatarec, RedoBufferInfo *bufferinfo); +extern bool IsCheckPoint(const XLogRecParseState *parseState); +void redo_atomic_xlog_dispatch(uint8 opCode, RedoBufferInfo *redo_buf, const char *data); +void seg_redo_new_page_copy_and_flush(BufferTag *tag, char *data, XLogRecPtr lsn); + +#endif diff --git a/src/include/access/xlogreader.h b/src/include/access/xlogreader.h index 47de07d91..ee7cd3f48 100644 --- a/src/include/access/xlogreader.h +++ b/src/include/access/xlogreader.h @@ -39,13 +39,9 @@ extern XLogReaderState* XLogReaderAllocate(XLogPageReadCB pagereadfunc, void* pr /* Free an XLogReader */ extern void XLogReaderFree(XLogReaderState* state); -/* The function has the stronger ability to find the next xlog record with XLogReadRecord() deployed. */ -extern bool ValidateNextXLogRecordPtr(XLogReaderState* state, XLogRecPtr& cur_ptr, char** err_msg); -/* Adjust the start ptr of XlogRecoed for XlogReadRecord */ -extern void AlignXlogPtrToNextPageIfNeeded(XLogRecPtr* recPtr); /* Read the next XLog record. Returns NULL on end-of-WAL or failure */ extern struct XLogRecord* XLogReadRecord( - XLogReaderState* state, XLogRecPtr recptr, char** errormsg, bool readoldversion = false, bool doDecode = true); + XLogReaderState* state, XLogRecPtr recptr, char** errormsg, bool doDecode = true, char* xlog_path = NULL); extern bool XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum, XLogPhyBlock *pblk = NULL); @@ -59,17 +55,17 @@ extern void XLogRecGetVMPhysicalBlock(const XLogReaderState *record, uint8 block /* Invalidate read state */ extern void XLogReaderInvalReadState(XLogReaderState* state); -extern XLogRecPtr XLogFindNextRecord(XLogReaderState* state, XLogRecPtr RecPtr, XLogRecPtr *endPtr = NULL); +extern XLogRecPtr XLogFindNextRecord(XLogReaderState* state, XLogRecPtr RecPtr, XLogRecPtr *endPtr = NULL, char* xlog_path = NULL); extern XLogRecPtr FindMaxLSN(char* workingpath, char* returnmsg, int msg_len, pg_crc32* maxLsnCrc, uint32 *maxLsnLen = NULL, TimeLineID *returnTli = NULL); extern XLogRecPtr FindMinLSN(char *workingPath, char *returnMsg, int msgLen, pg_crc32 *minLsnCrc); extern void CloseXlogFile(void); extern int SimpleXLogPageRead(XLogReaderState* xlogreader, XLogRecPtr targetPagePtr, int reqLen, - XLogRecPtr targetRecPtr, char* readBuf, TimeLineID* pageTLI); + XLogRecPtr targetRecPtr, char* readBuf, TimeLineID* pageTLI, char* xlog_path = NULL); extern void CloseXlogFile(void); /* Functions for decoding an XLogRecord */ -extern bool DecodeXLogRecord(XLogReaderState* state, XLogRecord* record, char** errmsg, bool readoldversion); +extern bool DecodeXLogRecord(XLogReaderState* state, XLogRecord* record, char** errmsg); #define XLogRecGetTotalLen(decoder) ((decoder)->decoded_record->xl_tot_len) #define XLogRecGetPrev(decoder) ((decoder)->decoded_record->xl_prev) @@ -77,7 +73,7 @@ extern bool DecodeXLogRecord(XLogReaderState* state, XLogRecord* record, char** #define XLogRecGetTdeInfo(decoder) ((decoder)->isTde) #define XLogRecGetRmid(decoder) ((decoder)->decoded_record->xl_rmid) #define XLogRecGetXid(decoder) ((decoder)->decoded_record->xl_xid) -#define XLogRecGetTerm(decoder) ((decoder)->decoded_record->xl_term) +#define XLogRecGetTerm(decoder) ((decoder)->decoded_record->xl_term & XLOG_MASK_TERM) #define XLogRecGetBucketId(decoder) ((decoder)->decoded_record->xl_bucket_id - 1) #define XLogRecGetCrc(decoder) ((decoder)->decoded_record->xl_crc) #define XLogRecGetOrigin(decoder) ((decoder)->record_origin) @@ -92,7 +88,7 @@ extern char* XLogRecGetBlockData(XLogReaderState* record, uint8 block_id, Size* extern bool allocate_recordbuf(XLogReaderState* state, uint32 reclength); extern bool XlogFileIsExisted(const char* workingPath, XLogRecPtr inputLsn, TimeLineID timeLine); extern void ResetDecoder(XLogReaderState* state); -bool ValidXLogPageHeader(XLogReaderState* state, XLogRecPtr recptr, XLogPageHeader hdr, bool readoldversion); +bool ValidXLogPageHeader(XLogReaderState* state, XLogRecPtr recptr, XLogPageHeader hdr); void report_invalid_record(XLogReaderState* state, const char* fmt, ...) /* * This extension allows gcc to check the format string for consistency with diff --git a/src/include/access/xlogrecord.h b/src/include/access/xlogrecord.h index 47fd856a0..9d7a07d93 100644 --- a/src/include/access/xlogrecord.h +++ b/src/include/access/xlogrecord.h @@ -37,8 +37,7 @@ */ #define XLR_SPECIAL_REL_UPDATE 0x01 #define XLR_BTREE_UPGRADE_FLAG 0x02 -/* If xlog record is the compress table creation */ -#define XLR_REL_COMPRESS 0X04 + #define XLR_IS_TOAST 0X08 /* If xlog record is from toast page */ @@ -85,7 +84,7 @@ typedef struct XLogRecordBlockHeader { #define BKID_HAS_TDE_PAGE (0x40) #define BKID_GET_BKID(id) (id & 0x3F) -/* +/* * In segment-page storage, RelFileNode and block number are logic for XLog. Thus, we need record * physical location in xlog. This macro is used to check whether in such situation. */ diff --git a/src/include/access/xlogutils.h b/src/include/access/xlogutils.h index a67cd6bdc..4fc545abc 100644 --- a/src/include/access/xlogutils.h +++ b/src/include/access/xlogutils.h @@ -16,6 +16,7 @@ #include "access/xlogreader.h" #include "storage/buf/bufmgr.h" +#include "postmaster/pagerepair.h" /* Result codes for XLogReadBufferForRedo[Extended] */ typedef enum { @@ -31,6 +32,7 @@ typedef enum { NOT_PRESENT, NOT_INITIALIZED, LSN_CHECK_ERROR, + CRC_CHECK_ERROR, SEGPAGE_LSN_CHECK_ERROR, }InvalidPageType; @@ -42,7 +44,6 @@ extern void XLogCheckInvalidPages(void); extern void XLogDropRelation(const RelFileNode& rnode, ForkNumber forknum); extern void XlogDropRowReation(RelFileNode rnode); extern void XLogDropDatabase(Oid dbid); -extern void XLogTruncateRelation(XLogReaderState* record, const RelFileNode& rnode, ForkNumber forkNum, BlockNumber nblocks); extern void XLogTruncateRelation(RelFileNode rnode, ForkNumber forkNum, BlockNumber nblocks); extern void XLogTruncateSegmentSpace(RelFileNode rnode, ForkNumber forkNum, BlockNumber nblocks); extern void XLogDropSegmentSpace(Oid spcNode, Oid dbNode); @@ -61,12 +62,12 @@ extern void FreeFakeRelcacheEntry(Relation fakerel); extern void log_invalid_page(const RelFileNode &node, ForkNumber forkno, BlockNumber blkno, InvalidPageType type, const XLogPhyBlock *pblk); extern int read_local_xlog_page(XLogReaderState* state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, - char* cur_page, TimeLineID* pageTLI); + char* cur_page, TimeLineID* pageTLI, char* xlog_path = NULL); extern void closeXLogRead(); extern bool IsDataBaseDrop(XLogReaderState* record); extern bool IsTableSpaceDrop(XLogReaderState* record); extern bool IsTableSpaceCreate(XLogReaderState* record); -extern bool IsBarrierCreate(XLogReaderState *record); +extern bool IsBarrierRelated(XLogReaderState *record); extern bool IsDataBaseCreate(XLogReaderState* record); extern bool IsSegPageShrink(XLogReaderState *record); extern bool IsSegPageDropSpace(XLogReaderState *record); @@ -83,4 +84,6 @@ bool ParseStateUseShareBuf(); bool ParseStateUseLocalBuf(); bool ParseStateWithoutCache(); +extern void forget_specified_invalid_pages(RepairBlockKey key); +extern void forget_range_invalid_pages(void *pageinfo); #endif diff --git a/src/include/alarm/alarm.h b/src/include/alarm/alarm.h index 737067a88..c1528a2b2 100644 --- a/src/include/alarm/alarm.h +++ b/src/include/alarm/alarm.h @@ -86,6 +86,8 @@ typedef enum AlarmId { ALM_AI_DNReduceSyncList = 0x404F005F, ALM_AI_DNIncreaseSyncList = 0x404F0060, ALM_AI_PgxcNodeMismatch = 0x404F0061, + ALM_AI_StreamingDisasterRecoveryCnDisconnected = 0x404F0070, + ALM_AI_StreamingDisasterRecoveryDnDisconnected = 0x404F0071, ALM_AI_BUTT = 0x7FFFFFFFFFFFFFFF /*force compiler to decide AlarmId as uint64*/ } AlarmId; diff --git a/src/include/bin/elog.h b/src/include/bin/elog.h index 7f8c03d1c..0ebae4c50 100644 --- a/src/include/bin/elog.h +++ b/src/include/bin/elog.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * Portions Copyright (c) 2021, openGauss Contributors * * openGauss is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. diff --git a/src/include/bulkload/utils.h b/src/include/bulkload/utils.h index 65a68f3f8..6f22ed859 100644 --- a/src/include/bulkload/utils.h +++ b/src/include/bulkload/utils.h @@ -245,10 +245,17 @@ return -1 #define READ_JSON_STRING(_field) \ do { \ tmpObj = cJSON_GetObjectItem(json, #_field); \ - if (tmpObj) \ - tmp->_field = (char*)cJSON_strdup((const unsigned char*)tmpObj->valuestring); \ - else \ + if (tmpObj) { \ + cJSON *_tmpStringItem = cJSON_CreateString(tmpObj->valuestring); \ + if (_tmpStringItem != NULL) { \ + tmp->_field = _tmpStringItem->valuestring; \ + cJSON_free(_tmpStringItem); /* shallow free */ \ + } else { \ + tmp->_field = NULL; \ + } \ + } else { \ tmp->_field = NULL; \ + } \ } while (0) #define READ_JSON_BOOL(_field) \ diff --git a/src/include/catalog/catalog.h b/src/include/catalog/catalog.h index 4785e4bed..bc5a82b02 100644 --- a/src/include/catalog/catalog.h +++ b/src/include/catalog/catalog.h @@ -56,6 +56,7 @@ extern bool IsSystemRelation(Relation relation); extern bool IsToastRelation(Relation relation); extern bool IsCatalogRelation(Relation relation); +extern bool IsSysSchema(Oid namespaceId); extern bool IsSystemClass(Form_pg_class reltuple); extern bool IsToastClass(Form_pg_class reltuple); extern bool IsCatalogClass(Oid relid, Form_pg_class reltuple); @@ -63,7 +64,6 @@ extern bool IsCatalogClass(Oid relid, Form_pg_class reltuple); extern bool IsSystemNamespace(Oid namespaceId); extern bool IsToastNamespace(Oid namespaceId); extern bool IsCStoreNamespace(Oid namespaceId); - extern bool IsPerformanceNamespace(Oid namespaceId); extern bool IsSnapshotNamespace(Oid namespaceId); extern bool IsMonitorSpace(Oid namespaceId); diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index 4968b448c..18c86015e 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -62,6 +62,6 @@ #define NAILED_IN_CATALOG_NUM 8 -#define CATALOG_NUM 103 +#define CATALOG_NUM 105 #endif diff --git a/src/include/catalog/dependency.h b/src/include/catalog/dependency.h index e1317fb68..5fc84cd22 100644 --- a/src/include/catalog/dependency.h +++ b/src/include/catalog/dependency.h @@ -122,9 +122,12 @@ struct ObjectAddresses { * is the role mentioned in a policy object. The referenced object must be a * pg_authid entry. * - * (m) a SHARED_DEPENDENCY_MOT_TABLE entry means that the referenced object + * (e) a SHARED_DEPENDENCY_MOT_TABLE entry means that the referenced object * is the database holding FDW table. The dependent object must be a FDW table entry. * + * (f) a SHARED_DEPENDENCY_DBPRIV entry means that the referenced object is + * a role mentioned in the gs_db_privilege. The referenced object must be a pg_authid entry. + * * SHARED_DEPENDENCY_INVALID is a value used as a parameter in internal * routines, and is not valid in the catalog itself. */ @@ -134,6 +137,7 @@ typedef enum SharedDependencyType { SHARED_DEPENDENCY_ACL = 'a', SHARED_DEPENDENCY_RLSPOLICY = 'r', SHARED_DEPENDENCY_MOT_TABLE = 'm', + SHARED_DEPENDENCY_DBPRIV = 'd', SHARED_DEPENDENCY_INVALID = 0 } SharedDependencyType; @@ -198,6 +202,7 @@ typedef enum ObjectClass { OCLASS_GLOBAL_SETTING_ARGS, /* global setting args */ OCLASS_COLUMN_SETTING_ARGS, /* column setting args */ OCLASS_DEFACL, /* pg_default_acl */ + OCLASS_DB_PRIVILEGE, /* gs_db_privilege */ OCLASS_EXTENSION, /* pg_extension */ OCLASS_DATA_SOURCE, /* data source */ OCLASS_DIRECTORY, /* pg_directory */ diff --git a/src/include/catalog/gs_db_privilege.h b/src/include/catalog/gs_db_privilege.h new file mode 100644 index 000000000..6b3117240 --- /dev/null +++ b/src/include/catalog/gs_db_privilege.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2021 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * gs_db_privilege.h + * definition of relation for database level privileges + * + * IDENTIFICATION + * src/include/catalog/gs_db_privilege.h + * + * ------------------------------------------------------------------------- + */ + +#ifndef GS_DB_PRIVILEGE_H +#define GS_DB_PRIVILEGE_H + +#include "catalog/genbki.h" +#include "nodes/parsenodes.h" + +#define DbPrivilegeId 5566 +#define DbPrivilege_Rowtype_Id 5567 + +CATALOG(gs_db_privilege,5566) BKI_SCHEMA_MACRO +{ + Oid roleid; +#ifdef CATALOG_VARLEN + text privilege_type; +#endif + bool admin_option; +} FormData_gs_db_privilege; + +typedef FormData_gs_db_privilege *Form_gs_db_privilege; + +#define Natts_gs_db_privilege 3 +#define Anum_gs_db_privilege_roleid 1 +#define Anum_gs_db_privilege_privilege_type 2 +#define Anum_gs_db_privilege_admin_option 3 + +extern void ExecuteGrantDbStmt(GrantDbStmt* stmt); +extern bool HasSpecAnyPriv(Oid userId, const char* priv, bool isAdminOption); +extern bool HasOneOfAnyPriv(Oid roleId); +extern Datum has_any_privilege(PG_FUNCTION_ARGS); +extern void DropDbPrivByOid(Oid roleID); + +#define CREATE_ANY_TABLE "create any table" +#define ALTER_ANY_TABLE "alter any table" +#define DROP_ANY_TABLE "drop any table" +#define SELECT_ANY_TABLE "select any table" +#define INSERT_ANY_TABLE "insert any table" +#define UPDATE_ANY_TABLE "update any table" +#define DELETE_ANY_TABLE "delete any table" +#define CREATE_ANY_FUNCTION "create any function" +#define EXECUTE_ANY_FUNCTION "execute any function" +#define CREATE_ANY_PACKAGE "create any package" +#define EXECUTE_ANY_PACKAGE "execute any package" +#define CREATE_ANY_TYPE "create any type" +#define CREATE_ANY_SEQUENCE "create any sequence" +#define CREATE_ANY_INDEX "create any index" + +#endif /* GS_DB_PRIVILEGE_H */ diff --git a/src/include/catalog/gs_encrypted_proc.h b/src/include/catalog/gs_encrypted_proc.h index 9b1d52324..49ee1eaa3 100755 --- a/src/include/catalog/gs_encrypted_proc.h +++ b/src/include/catalog/gs_encrypted_proc.h @@ -38,11 +38,11 @@ CATALOG(gs_encrypted_proc,9750) BKI_SCHEMA_MACRO { Oid func_id; /* function oid */ int4 prorettype_orig; /* OID of result type */ + timestamp last_change; /* last change of this procedure */ oidvector proargcachedcol; /* colums settings oid (excludes OUT params) */ #ifdef CATALOG_VARLEN Oid proallargtypes_orig[1]; /* all param types (NULL if IN only) */ - timestamp last_change; /* last change of this procedure */ #endif } FormData_gs_encrypted_proc; @@ -52,8 +52,8 @@ typedef FormData_gs_encrypted_proc *Form_gs_encrypted_proc; #define Anum_gs_encrypted_proc_func_id 1 #define Anum_gs_encrypted_proc_prorettype_orig 2 -#define Anum_gs_encrypted_proc_proargcachedcol 3 -#define Anum_gs_encrypted_proc_proallargtypes_orig 4 -#define Anum_gs_encrypted_proc_last_change 5 +#define Anum_gs_encrypted_proc_last_change 3 +#define Anum_gs_encrypted_proc_proargcachedcol 4 +#define Anum_gs_encrypted_proc_proallargtypes_orig 5 #endif /* GS_ENCRYPTED_PROC_H */ diff --git a/src/include/catalog/gs_job_argument.h b/src/include/catalog/gs_job_argument.h index af2e5106b..1e38d4d93 100644 --- a/src/include/catalog/gs_job_argument.h +++ b/src/include/catalog/gs_job_argument.h @@ -84,4 +84,4 @@ List *search_by_sysscan_1(Relation rel, ScanKeyInfo *scan_key_info); List *search_by_sysscan_2(Relation rel, ScanKeyInfo *scan_key_info1, ScanKeyInfo *scan_key_info2); -#endif /* GS_JOB_ARGUMENT_H */ +#endif /* GS_JOB_ARGUMENT_H */ \ No newline at end of file diff --git a/src/include/catalog/gs_job_attribute.h b/src/include/catalog/gs_job_attribute.h index d10121fbc..34a738c37 100644 --- a/src/include/catalog/gs_job_attribute.h +++ b/src/include/catalog/gs_job_attribute.h @@ -279,6 +279,8 @@ extern void batch_lookup_job_attribute(JobAttribute *attributes, int n); extern void batch_lookup_program_argument(Datum job_name, JobArgument *arguments, int num_of_args); extern List *search_related_attribute(Relation gs_job_attribute_rel, Datum attribute_name, Datum attribute_value); extern HeapTuple search_from_pg_job(Relation pg_job_rel, Datum job_name); +extern int4 get_job_id_from_pg_job(Datum job_name); +HeapTuple search_from_pg_job_proc_no_exception(Relation rel, Datum job_name); /* Tools */ extern void enable_single_force(Datum object_name, Datum enable_value, bool force); @@ -304,4 +306,4 @@ extern bool execute_backend_scheduler_job(Datum job_name, StringInfoData *buf); /* default credential */ #define DEFAULT_CREDENTIAL_NAME "db_credential" -#endif /* GS_JOB_ATTRIBUTE_H */ +#endif /* GS_JOB_ATTRIBUTE_H */ \ No newline at end of file diff --git a/src/include/catalog/gs_matview.h b/src/include/catalog/gs_matview.h index c81812c5c..72654de3f 100644 --- a/src/include/catalog/gs_matview.h +++ b/src/include/catalog/gs_matview.h @@ -84,5 +84,6 @@ extern bool CheckPermissionForBasetable(const RangeTblEntry *rte); extern void CheckRefreshMatview(Relation matviewRel, bool isIncremental); extern void acquire_mativew_tables_lock(Query *query, bool incremental); extern bool CheckMatviewQuals(Query *query); +extern Oid FindRoleid(Oid relid); #endif /* GS_MATVIEW_H */ diff --git a/src/include/catalog/gs_package.h b/src/include/catalog/gs_package.h index 159cb4195..2441a1a3e 100644 --- a/src/include/catalog/gs_package.h +++ b/src/include/catalog/gs_package.h @@ -27,6 +27,7 @@ #define GS_PACKAGE_H #include "catalog/genbki.h" +#include "utils/plpgsql.h" /* ---------------------------------------------------------------- * gs_package definition. @@ -50,9 +51,25 @@ extern NameData* GetPackageName(Oid packageOid); extern Oid PackageNameListGetOid(List* pkgnameList, bool missing_ok=false); extern Oid GetPackageNamespace(Oid packageOid); extern bool IsExistPackageName(const char* pkgname); -extern void BuildSessionPackageRuntime(uint64 sessionId, uint64 parentSessionId); +extern void BuildSessionPackageRuntimeForAutoSession(uint64 sessionId, uint64 parentSessionId, + PLpgSQL_execstate* estate = NULL, PLpgSQL_function* func = NULL); extern void initAutonomousPkgValue(PLpgSQL_package* targetPkg, uint64 sessionId); -extern void processAutonmSessionPkgs(PLpgSQL_function* func); +extern void initAutoSessionPkgsValue(uint64 sessionId); +extern void SetFuncInfoValue(List* SessionFuncInfo, PLpgSQL_execstate* estate); +extern void processAutonmSessionPkgsInException(PLpgSQL_function* func); +extern void initAutoSessionFuncInfoValue(uint64 sessionId, PLpgSQL_execstate* estate); +extern List *processAutonmSessionPkgs(PLpgSQL_function* func, PLpgSQL_execstate* estate = NULL, + bool isAutonm = false); +extern Portal BuildHoldPortalFromAutoSession(); +extern void restoreAutonmSessionCursors(PLpgSQL_execstate* estate, PLpgSQL_row* row); +extern void ResetAutoPortalConext(Portal portal); +extern void BuildSessionPackageRuntimeForParentSession(uint64 sessionId, PLpgSQL_execstate* estate); +enum FunctionErrorType {FunctionDuplicate, FunctionUndefined, FuncitonDefineError, FunctionReturnTypeError}; +#ifndef ENABLE_MULTIPLE_NODES +extern Oid GetOldTupleOid(const char* procedureName, oidvector* parameterTypes, Oid procNamespace, + Oid propackageid, Datum* values, Datum parameterModes); +bool isSameArgList(CreateFunctionStmt* stmt1, CreateFunctionStmt* stmt2); +#endif CATALOG(gs_package,7815) BKI_BOOTSTRAP BKI_ROWTYPE_OID(9745) BKI_SCHEMA_MACRO { Oid pkgnamespace; /*package name space*/ diff --git a/src/include/catalog/heap.h b/src/include/catalog/heap.h index f4f8e727c..e86c66914 100644 --- a/src/include/catalog/heap.h +++ b/src/include/catalog/heap.h @@ -80,14 +80,12 @@ extern Relation heap_create(const char *relname, bool mapped_relation, bool allow_system_table_mods, int8 row_compress, - Datum reloptions, Oid ownerid, bool skip_create_storage, TableAmType tam_type, int8 relindexsplit = 0, StorageType storage_type = HEAP_DISK, - bool newcbi = false, - Oid accessMethodObjectId = 0); + bool newcbi = false); extern bool heap_is_matview_init_state(Relation rel); @@ -100,9 +98,7 @@ heapCreatePartition(const char* part_name, Oid bucketOid, Oid ownerid, StorageType storage_type, - bool newcbi = false, - Datum reloptions = Datum(0)); - + bool newcbi = false); extern Oid heap_create_with_catalog(const char *relname, Oid relnamespace, @@ -124,7 +120,7 @@ extern Oid heap_create_with_catalog(const char *relname, bool use_user_acl, bool allow_system_table_mods, PartitionState *partTableState, - int8 row_compress, + int8 row_compress, HashBucketInfo *bucketinfo, bool record_dependce = true, List* ceLst = NULL, @@ -153,6 +149,12 @@ extern Oid HeapAddListPartition(Relation pgPartRel, Oid partTableOid, Oid partT extern Oid HeapAddHashPartition(Relation pgPartRel, Oid partTableOid, Oid partTablespace, Oid bucketOid, HashPartitionDefState *newPartDef, Oid ownerid, Datum reloptions, const bool* isTimestamptz, StorageType storage_type, int2vector* subpartition_key = NULL, bool isSubPartition = false); +extern Node *MakeDefaultSubpartition(PartitionState *partitionState, Node *partitionDefState); +extern List *addNewSubPartitionTuplesForPartition(Relation pgPartRel, Oid partTableOid, Oid partTablespace, + Oid bucketOid, Oid ownerid, Datum reloptions, const bool *isTimestamptz, StorageType storage_type, + PartitionState *partitionState, Node *partitionDefState, LOCKMODE partLockMode); + +extern Oid GetPartTablespaceOidForSubpartition(Oid reltablespace, const char* partTablespacename); extern void heapDropPartitionIndex(Relation parentIndex, Oid partIndexId); extern void addNewPartitionTuple(Relation pg_part_desc, Partition new_part_desc, int2vector* pkey, oidvector *intablespace, @@ -187,7 +189,7 @@ extern void RemoveAttrDefaultById(Oid attrdefId); template extern void RemoveStatistics(Oid relid, AttrNumber attnum); -extern Form_pg_attribute SystemAttributeDefinition(AttrNumber attno, bool relhasoids, bool relhasbucket); +extern Form_pg_attribute SystemAttributeDefinition(AttrNumber attno, bool relhasoids, bool relhasbucket, bool relhasuids); extern Form_pg_attribute SystemAttributeByName(const char *attname, bool relhasoids); extern int GetSysAttLength(bool hasBucketAttr = true); @@ -198,7 +200,7 @@ extern void CheckAttributeType(const char *attname, Oid atttypid, Oid attcollati #ifdef PGXC /* Functions related to distribution data of relations */ extern void AddRelationDistribution(const char *relname, Oid relid, DistributeBy *distributeby, - PGXCSubCluster *subcluster, List *parentOids, TupleDesc descriptor, bool isinstallationgroup, + PGXCSubCluster *subcluster, List *parentOids, TupleDesc descriptor, bool isinstallationgroup, bool isbucket = false, int bucketmaplen = 0); extern void GetRelationDistributionItems(Oid relid, DistributeBy *distributeby, TupleDesc descriptor, char *locatortype, int *hashalgorithm, int *hashbuckets, AttrNumber *attnum); @@ -238,4 +240,5 @@ extern Oid AddNewIntervalPartition(Relation rel, void* insertTuple); extern int GetIndexKeyAttsByTuple(Relation relation, HeapTuple indexTuple); +extern void AddOrDropUidsAttr(Oid relOid, bool oldRelHasUids, bool newRelHasUids); #endif /* HEAP_H */ diff --git a/src/include/catalog/index.h b/src/include/catalog/index.h index 72d897f88..706c96a3c 100644 --- a/src/include/catalog/index.h +++ b/src/include/catalog/index.h @@ -20,7 +20,6 @@ #include "utils/tuplesort.h" #define DEFAULT_INDEX_TYPE "btree" -#define DEFAULT_HASH_INDEX_TYPE "hash" #define DEFAULT_CSTORE_INDEX_TYPE "psort" #define DEFAULT_GIST_INDEX_TYPE "gist" #define CSTORE_BTREE_INDEX_TYPE "cbtree" @@ -183,12 +182,13 @@ extern void index_set_state_flags(Oid indexId, IndexStateFlagsAction action); extern void reindex_indexpart_internal(Relation heapRelation, Relation iRel, IndexInfo* indexInfo, - Oid indexPartId); + Oid indexPartId, + void *baseDesc); extern void reindex_index(Oid indexId, Oid indexPartId, bool skip_constraint_checks, AdaptMem *memInfo, bool dbWide, void *baseDesc = NULL); -extern void ReindexGlobalIndexInternal(Relation heapRelation, Relation iRel, IndexInfo* indexInfo); +extern void ReindexGlobalIndexInternal(Relation heapRelation, Relation iRel, IndexInfo* indexInfo, void* baseDesc); /* Flag bits for ReindexRelation(): */ #define REINDEX_REL_PROCESS_TOAST 0x01 @@ -238,7 +238,7 @@ extern void AddGPIForPartition(Oid partTableOid, Oid partOid); extern void AddGPIForSubPartition(Oid partTableOid, Oid partOid, Oid subPartOid); void AddCBIForPartition(Relation partTableRel, Relation tempTableRel, const List* indexRelList, const List* indexDestOidList); -extern void DeleteGPITuplesForPartition(Oid partTableOid, Oid partOid); +extern bool DeleteGPITuplesForPartition(Oid partTableOid, Oid partOid); extern void DeleteGPITuplesForSubPartition(Oid partTableOid, Oid partOid, Oid subPartOid); extern void mergeBTreeIndexes(List* mergingBtreeIndexes, List* srcPartMergeOffset, int2 bktId); extern bool RecheckIndexTuple(IndexScanDesc scan, TupleTableSlot *slot); diff --git a/src/include/catalog/indexing.h b/src/include/catalog/indexing.h index 73154d8ed..6a8236549 100644 --- a/src/include/catalog/indexing.h +++ b/src/include/catalog/indexing.h @@ -100,6 +100,14 @@ DECLARE_UNIQUE_INDEX(pg_auth_members_role_member_index, 2694, on pg_auth_members DECLARE_UNIQUE_INDEX(pg_auth_members_member_role_index, 2695, on pg_auth_members using btree(member oid_ops, roleid oid_ops)); #define AuthMemMemRoleIndexId 2695 +/* Add index for gs_db_privilege */ +DECLARE_UNIQUE_INDEX(gs_db_privilege_oid_index, 5568, on gs_db_privilege using btree(oid oid_ops)); +#define DbPrivilegeOidIndexId 5568 +DECLARE_INDEX(gs_db_privilege_roleid_index, 5569, on gs_db_privilege using btree(roleid oid_ops)); +#define DbPrivilegeRoleidIndexId 5569 +DECLARE_UNIQUE_INDEX(gs_db_privilege_roleid_privilege_type_index, 5570, on gs_db_privilege using btree(roleid oid_ops, privilege_type text_ops)); +#define DbPrivilegeRoleidPrivilegeTypeIndexId 5570 + DECLARE_UNIQUE_INDEX(pg_cast_oid_index, 2660, on pg_cast using btree(oid oid_ops)); #define CastOidIndexId 2660 DECLARE_UNIQUE_INDEX(pg_cast_source_target_index, 2661, on pg_cast using btree(castsource oid_ops, casttarget oid_ops)); @@ -217,7 +225,7 @@ DECLARE_UNIQUE_INDEX(pg_proc_oid_index, 2690, on pg_proc using btree(oid oid_ops DECLARE_INDEX(pg_proc_proname_args_nsp_index, 2691, on pg_proc using btree(proname name_ops, proargtypes oidvector_ops, pronamespace oid_ops, propackageid oid_ops)); #define ProcedureNameArgsNspIndexId 2691 -DECLARE_UNIQUE_INDEX(pg_proc_proname_all_args_nsp_index, 9666, on pg_proc using btree(proname name_ops, allargtypes oidvector_ops, pronamespace oid_ops, propackageid oid_ops)); +DECLARE_INDEX(pg_proc_proname_all_args_nsp_index, 9666, on pg_proc using btree(proname name_ops, allargtypes oidvector_ops, pronamespace oid_ops, propackageid oid_ops)); #define ProcedureNameAllArgsNspIndexId 9666 DECLARE_INDEX(pg_proc_proname_args_nsp_new_index, 9378, on pg_proc using btree(proname name_ops, proargtypes oidvector_ops, pronamespace oid_ops, propackageid oid_ops)); @@ -407,6 +415,10 @@ DECLARE_UNIQUE_INDEX(pg_hashbucket_oid_index, 3492, on pg_hashbucket using btree DECLARE_INDEX(pg_hashbucket_bid_index,3493, on pg_hashbucket using btree(bucketid oid_ops, bucketcnt int4_ops, bucketmapsize int4_ops)); #define HashBucketBidIndexId 3493 +/* Add index of table oid for gs_uid */ +DECLARE_UNIQUE_INDEX(gs_uid_relid_index, 3499, on gs_uid using btree(relid oid_ops)); +#define UidRelidIndexId 3499 + /* Add index of table oid for pg_job, pg_job_proc */ DECLARE_UNIQUE_INDEX(pg_job_oid_index, 3453, on pg_job using btree(oid oid_ops)); #define PgJobOidIndexId 3453 diff --git a/src/include/catalog/namespace.h b/src/include/catalog/namespace.h index 2dbbd0675..76431d341 100644 --- a/src/include/catalog/namespace.h +++ b/src/include/catalog/namespace.h @@ -25,6 +25,8 @@ #include "nodes/primnodes.h" #include "lib/stringinfo.h" +static volatile uint32 gt_tempID_seed = 0; + /* * This structure holds a list of possible functions or operators * found by namespace lookup. Each function/operator is identified @@ -43,6 +45,7 @@ typedef struct _FuncCandidateList int ndargs; /* number of defaulted args */ int *argnumbers; /* args' positional indexes, if named call */ Oid refSynOid; /* referenced synonym's OID if mapping successfully, and drop it when view decoupling */ + int allArgNum; /* all param num including in/out/inout */ Oid args[FLEXIBLE_ARRAY_MEMBER]; /* arg types --- VARIABLE LENGTH ARRAY */ } *FuncCandidateList; /* VARIABLE LENGTH STRUCT */ @@ -110,9 +113,12 @@ extern Oid RangeVarGetRelidExtended(const RangeVar *relation, StringInfo detailInfo = NULL, Oid *refSynOid = NULL); extern Oid RangeVarGetCreationNamespace(const RangeVar *newRelation); +extern bool CheckRelationCreateAnyPrivilege(Oid userId, char relkind); +extern bool CheckCreatePrivilegeInNamespace(Oid namespaceId, Oid roleId, const char* anyPrivilege); extern Oid RangeVarGetAndCheckCreationNamespace(RangeVar *newRelation, LOCKMODE lockmode, - Oid *existing_relation_id); + Oid *existing_relation_id, + char relkind); extern void RangeVarAdjustRelationPersistence(RangeVar *newRelation, Oid nspid); extern Oid RelnameGetRelid(const char *relname, StringInfo detailInfo = NULL); extern char* RelnameGetRelidExtended(const char *relname, Oid *relOid, Oid *refSynOid = NULL, @@ -216,7 +222,7 @@ extern Oid get_my_temp_schema(); extern void FetchDefaultArgumentPos(int **defpos, int2vector *adefpos, const char *argmodes, int pronallargs); -extern Oid GetUserIdFromNspId(Oid nspid, bool is_securityadmin = false); +extern Oid GetUserIdFromNspId(Oid nspid, bool is_securityadmin = false, bool anyPriv = false); extern void SetTempNamespace(Node *stmt, Oid namespaceOid); extern void setTempToastNspName(); @@ -230,5 +236,9 @@ extern bool IsPackageFunction(List* funcname); extern void recomputeNamespacePath(StringInfo error_info = NULL); extern KeyCandidateList GlobalSettingGetCandidates(const List *names, bool); extern KeyCandidateList CeknameGetCandidates(const List *names, bool); + +extern bool isTableofType(Oid typeOid, Oid* base_oid, Oid* indexbyType); +extern bool isTableofIndexbyType(Oid typeOid); +extern bool IsPlpgsqlLanguageOid(Oid langoid); #endif // !FRONTEND #endif /* NAMESPACE_H */ diff --git a/src/include/catalog/pg_attribute.h b/src/include/catalog/pg_attribute.h index f8cf1c85d..ead640dda 100644 --- a/src/include/catalog/pg_attribute.h +++ b/src/include/catalog/pg_attribute.h @@ -34,7 +34,7 @@ #define AttributeRelation_Rowtype_Id 75 #define IsAttributeRelation(rel) (RelationGetRelid(rel) == AttributeRelationId) -#define IsAttributeCache(cache) ((cache)->cc_reloid == AttributeRelationId) +#define IsAttributeCache(reloid) ((reloid) == AttributeRelationId) CATALOG(pg_attribute,1249) BKI_BOOTSTRAP BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(75) BKI_SCHEMA_MACRO { diff --git a/src/include/catalog/pg_cast.h b/src/include/catalog/pg_cast.h index bca1ccd75..0e0af1e9d 100644 --- a/src/include/catalog/pg_cast.h +++ b/src/include/catalog/pg_cast.h @@ -100,71 +100,71 @@ typedef enum CoercionMethod * int2->int4->int8->int16->numeric->float4->float8, while casts in the * reverse direction are assignment-only. */ -DATA(insert ( 23 5545 5526 i f _null_)); -DATA(insert ( 5545 16 5533 i f _null_)); -DATA(insert ( 16 5545 5534 i f _null_)); -DATA(insert ( 5545 26 5525 i f _null_)); -DATA(insert ( 5545 24 5525 i f _null_)); -DATA(insert ( 5545 2202 5525 i f _null_)); -DATA(insert ( 5545 2203 5525 i f _null_)); -DATA(insert ( 5545 2204 5525 i f _null_)); -DATA(insert ( 5545 2205 5525 i f _null_)); -DATA(insert ( 5545 2206 5525 i f _null_)); -DATA(insert ( 5545 3734 5525 i f _null_)); -DATA(insert ( 5545 3769 5525 i f _null_)); -DATA(insert ( 5545 21 5523 i f _null_)); -DATA(insert ( 5545 23 5525 i f _null_)); -DATA(insert ( 5545 20 5527 i f _null_)); -DATA(insert ( 5545 1700 5521 i f _null_)); -DATA(insert ( 5545 700 5529 i f _null_)); -DATA(insert ( 5545 701 5531 i f _null_)); -DATA(insert ( 20 5545 5528 a f _null_)); -DATA(insert ( 21 5545 5524 i f _null_)); -DATA(insert ( 700 5545 5530 i f _null_)); -DATA(insert ( 701 5545 5532 i f _null_)); -DATA(insert ( 1700 5545 5522 i f _null_)); -DATA(insert ( 20 21 714 a f _null_)); -DATA(insert ( 20 23 480 a f _null_)); -DATA(insert ( 20 700 652 i f _null_)); -DATA(insert ( 20 701 482 i f _null_)); -DATA(insert ( 20 1700 1781 i f _null_)); -DATA(insert ( 21 20 754 i f _null_)); -DATA(insert ( 21 23 313 i f _null_)); -DATA(insert ( 21 700 236 i f _null_)); -DATA(insert ( 21 701 235 i f _null_)); -DATA(insert ( 21 1700 1782 i f _null_)); -DATA(insert ( 23 20 481 i f _null_)); -DATA(insert ( 23 21 314 i f _null_)); -DATA(insert ( 23 700 318 i f _null_)); -DATA(insert ( 23 701 316 i f _null_)); -DATA(insert ( 23 1700 1740 i f _null_)); -DATA(insert ( 700 20 653 i f _null_)); -DATA(insert ( 700 21 238 i f _null_)); -DATA(insert ( 700 23 319 i f _null_)); -DATA(insert ( 700 701 311 i f _null_)); -DATA(insert ( 700 1700 1742 i f _null_)); -DATA(insert ( 701 20 483 i f _null_)); -DATA(insert ( 701 21 237 i f _null_)); -DATA(insert ( 701 23 317 i f _null_)); -DATA(insert ( 701 700 312 i f _null_)); -DATA(insert ( 5545 34 6405 i f _null_)); -DATA(insert ( 34 5545 6406 a f _null_)); -DATA(insert ( 21 34 6407 i f _null_)); -DATA(insert ( 34 21 6408 a f _null_)); -DATA(insert ( 23 34 6409 i f _null_)); -DATA(insert ( 34 23 6410 a f _null_)); -DATA(insert ( 20 34 6411 i f _null_)); -DATA(insert ( 34 20 6412 a f _null_)); -DATA(insert ( 701 34 6413 i f _null_)); -DATA(insert ( 34 701 6414 i f _null_)); -DATA(insert ( 700 34 6415 i f _null_)); -DATA(insert ( 34 700 6416 i f _null_)); -DATA(insert ( 26 34 6417 i f _null_)); -DATA(insert ( 34 26 6418 i f _null_)); -DATA(insert ( 16 34 6419 i f _null_)); -DATA(insert ( 34 16 6420 i f _null_)); -DATA(insert ( 1700 34 6421 i f _null_)); -DATA(insert ( 34 1700 6422 i f _null_)); +DATA(insert ( 23 5545 5526 i f _null_)); +DATA(insert ( 5545 16 5533 i f _null_)); +DATA(insert ( 16 5545 5534 i f _null_)); +DATA(insert ( 5545 26 5525 i f _null_)); +DATA(insert ( 5545 24 5525 i f _null_)); +DATA(insert ( 5545 2202 5525 i f _null_)); +DATA(insert ( 5545 2203 5525 i f _null_)); +DATA(insert ( 5545 2204 5525 i f _null_)); +DATA(insert ( 5545 2205 5525 i f _null_)); +DATA(insert ( 5545 2206 5525 i f _null_)); +DATA(insert ( 5545 3734 5525 i f _null_)); +DATA(insert ( 5545 3769 5525 i f _null_)); +DATA(insert ( 5545 21 5523 i f _null_)); +DATA(insert ( 5545 23 5525 i f _null_)); +DATA(insert ( 5545 20 5527 i f _null_)); +DATA(insert ( 5545 1700 5521 i f _null_)); +DATA(insert ( 5545 700 5529 i f _null_)); +DATA(insert ( 5545 701 5531 i f _null_)); +DATA(insert ( 20 5545 5528 a f _null_)); +DATA(insert ( 21 5545 5524 i f _null_)); +DATA(insert ( 700 5545 5530 i f _null_)); +DATA(insert ( 701 5545 5532 i f _null_)); +DATA(insert ( 1700 5545 5522 i f _null_)); +DATA(insert ( 20 21 714 a f _null_)); +DATA(insert ( 20 23 480 a f _null_)); +DATA(insert ( 20 700 652 i f _null_)); +DATA(insert ( 20 701 482 i f _null_)); +DATA(insert ( 20 1700 1781 i f _null_)); +DATA(insert ( 21 20 754 i f _null_)); +DATA(insert ( 21 23 313 i f _null_)); +DATA(insert ( 21 700 236 i f _null_)); +DATA(insert ( 21 701 235 i f _null_)); +DATA(insert ( 21 1700 1782 i f _null_)); +DATA(insert ( 23 20 481 i f _null_)); +DATA(insert ( 23 21 314 i f _null_)); +DATA(insert ( 23 700 318 i f _null_)); +DATA(insert ( 23 701 316 i f _null_)); +DATA(insert ( 23 1700 1740 i f _null_)); +DATA(insert ( 700 20 653 i f _null_)); +DATA(insert ( 700 21 238 i f _null_)); +DATA(insert ( 700 23 319 i f _null_)); +DATA(insert ( 700 701 311 i f _null_)); +DATA(insert ( 700 1700 1742 i f _null_)); +DATA(insert ( 701 20 483 i f _null_)); +DATA(insert ( 701 21 237 i f _null_)); +DATA(insert ( 701 23 317 i f _null_)); +DATA(insert ( 701 700 312 i f _null_)); +DATA(insert ( 5545 34 6405 i f _null_)); +DATA(insert ( 34 5545 6406 a f _null_)); +DATA(insert ( 21 34 6407 i f _null_)); +DATA(insert ( 34 21 6408 a f _null_)); +DATA(insert ( 23 34 6409 i f _null_)); +DATA(insert ( 34 23 6410 a f _null_)); +DATA(insert ( 20 34 6411 i f _null_)); +DATA(insert ( 34 20 6412 a f _null_)); +DATA(insert ( 701 34 6413 i f _null_)); +DATA(insert ( 34 701 6414 i f _null_)); +DATA(insert ( 700 34 6415 i f _null_)); +DATA(insert ( 34 700 6416 i f _null_)); +DATA(insert ( 26 34 6417 i f _null_)); +DATA(insert ( 34 26 6418 i f _null_)); +DATA(insert ( 16 34 6419 i f _null_)); +DATA(insert ( 34 16 6420 i f _null_)); +DATA(insert ( 1700 34 6421 i f _null_)); +DATA(insert ( 34 1700 6422 i f _null_)); /* * convert float8 to numeric implicit(not assignment-only) @@ -190,11 +190,12 @@ DATA(insert ( 16 23 2558 i f _null_)); DATA(insert ( 21 16 3180 i f _null_)); DATA(insert ( 16 21 3181 i f _null_)); /*Bool <-->INT8*/ + DATA(insert ( 20 16 3177 i f _null_)); DATA(insert ( 16 20 3178 i f _null_)); /* Bool <--> Numeric*/ -DATA(insert ( 1700 16 6434 i f _null_)); -DATA(insert ( 16 1700 6433 i f _null_)); +DATA(insert ( 1700 16 6434 i f _null_)); +DATA(insert ( 16 1700 6433 i f _null_)); /* int4 ->bpchar */ diff --git a/src/include/catalog/pg_control.h b/src/include/catalog/pg_control.h index cc8222860..f03129d24 100644 --- a/src/include/catalog/pg_control.h +++ b/src/include/catalog/pg_control.h @@ -79,23 +79,6 @@ typedef struct CheckPointUndo /* you can add more attributes here */ } CheckPointUndo; -typedef struct CheckPointOld { - XLogRecPtrOld redo; /* next RecPtr available when we began to - * create CheckPoint (i.e. REDO start point) */ - TimeLineID ThisTimeLineID; /* current TLI */ - bool fullPageWrites; /* current full_page_writes */ - uint32 nextXidEpoch; /* higher-order bits of nextXid */ - ShortTransactionId nextXid; /* next free XID */ - Oid nextOid; /* next free OID */ - ShortTransactionId nextMulti; /* next free MultiXactId */ - ShortTransactionId nextMultiOffset; /* next free MultiXact offset */ - ShortTransactionId oldestXid; /* cluster-wide minimum datfrozenxid */ - Oid oldestXidDB; /* database with minimum datfrozenxid */ - pg_time_t time; /* time stamp of checkpoint */ - ShortTransactionId oldestActiveXid; -} CheckPointOld; - - /* XLOG info values for XLOG rmgr */ #define XLOG_CHECKPOINT_SHUTDOWN 0x00 @@ -255,40 +238,6 @@ typedef struct ControlFileData { pg_crc32c crc; } ControlFileData; -/*This struct is used to verify the checksum of old version control file*/ -typedef struct ControlFileDataOld { - uint64 system_identifier; - uint32 pg_control_version; - uint32 catalog_version_no; - uint32 timeline; - DBState state; - pg_time_t time; - XLogRecPtrOld checkPoint; - XLogRecPtrOld prevCheckPoint; - CheckPointOld checkPointCopy; - XLogRecPtrOld minRecoveryPoint; - XLogRecPtrOld backupStartPoint; - XLogRecPtrOld backupEndPoint; - bool backupEndRequired; - int wal_level; - int MaxConnections; - int max_prepared_xacts; - int max_locks_per_xact; - uint32 maxAlign; - double floatFormat; - uint32 blcksz; - uint32 relseg_size; - uint32 xlog_blcksz; - uint32 xlog_seg_size; - uint32 nameDataLen; - uint32 indexMaxKeys; - uint32 toast_max_chunk_size; - bool enableIntTimes; - bool float4ByVal; - bool float8ByVal; - pg_crc32 crc; -}ControlFileDataOld; - typedef struct LsnXlogFlushData { XLogRecPtr localLsnFlushPoint; /* latest flush buffer's lsn postion on primary*/ XLogRecPtr peerXlogFlushPoint; /* flush xlog postion on standby */ diff --git a/src/include/catalog/pg_job.h b/src/include/catalog/pg_job.h index 36e97497e..3e2d07b7f 100644 --- a/src/include/catalog/pg_job.h +++ b/src/include/catalog/pg_job.h @@ -167,6 +167,7 @@ extern void RemoveJobById(Oid objectId); extern void check_job_permission(HeapTuple tuple, bool check_running = true); extern int jobid_alloc(uint16* pusJobId, int64 job_max_number = JOBID_MAX_NUMBER); extern void update_pg_job_dbname(Oid jobid, const char* dbname); +extern void update_pg_job_username(Oid jobid, const char* username); extern char* get_real_search_schema(); extern void check_interval_valid(int4 job_id, Relation rel, Datum interval); extern void check_job_status(Datum job_status); diff --git a/src/include/catalog/pg_partition_fn.h b/src/include/catalog/pg_partition_fn.h index abb6a554a..92562ebc6 100644 --- a/src/include/catalog/pg_partition_fn.h +++ b/src/include/catalog/pg_partition_fn.h @@ -82,6 +82,7 @@ extern bool isPartitionedObject(Oid relid, char relkind, bool missing_ok); extern bool isPartitionObject(Oid partid, char partkind, bool missing_ok); extern Oid getPartitionIndexOid(Oid indexid, Oid partitionid); extern Oid getPartitionIndexTblspcOid(Oid indexid, Oid partitionid); +extern char* getPartitionIndexName(Oid indexid, Oid partitionid); extern Oid indexPartGetHeapPart(Oid indexPart, bool missing_ok); extern Oid searchPartitionIndexOid(Oid partitionedIndexid, List *pindex); extern List *getPartitionObjectIdList(Oid relid, char relkind); @@ -97,20 +98,27 @@ extern Oid partitionNameGetPartitionOid (Oid partitionedTableOid, Oid *partOidForSubPart = NULL); extern Oid partitionValuesGetPartitionOid(Relation rel, List *partKeyValueList, LOCKMODE lockMode, bool topClosed, bool missingOk, bool noWait); +extern Oid subpartitionValuesGetSubpartitionOid(Relation rel, List *partKeyValueList, List *subpartKeyValueList, + LOCKMODE lockMode, bool topClosed, bool missingOk, bool noWait, Oid *partOidForSubPart); extern List *searchPartitionIndexesByblid(Oid blid); extern List *searchPgPartitionByParentId(char parttype, Oid parentId); extern List* searchPgSubPartitionByParentId(char parttype, List *parentOids); extern void freePartList(List *l); extern void freeSubPartList(List* plist); extern HeapTuple searchPgPartitionByParentIdCopy(char parttype, Oid parentId); +extern Oid GetBaseRelOidOfParition(Relation relation); extern List* relationGetPartitionOidList(Relation rel); extern List* RelationGetSubPartitionOidList(Relation rel, LOCKMODE lockmode = AccessShareLock); extern List* RelationGetSubPartitionOidListList(Relation rel); extern List* relationGetPartitionList(Relation relation, LOCKMODE lockmode); +extern List* RelationGetPartitionNameList(Relation relation); +extern void RelationGetSubpartitionInfo(Relation relation, char *subparttype, List **subpartKeyPosList, + int2vector **subpartitionKey); extern List* indexGetPartitionOidList(Relation indexRelation); extern List* indexGetPartitionList(Relation indexRelation, LOCKMODE lockmode); extern Relation SubPartitionGetRelation(Relation heap, Partition indexpart, LOCKMODE lockmode); +extern Partition SubPartitionOidGetPartition(Relation rel, Oid subPartOid, LOCKMODE lockmode); extern Relation SubPartitionOidGetParentRelation(Relation rel, Oid subPartOid, LOCKMODE lockmode); extern List* RelationGetSubPartitionList(Relation relation, LOCKMODE lockmode); extern void releasePartitionList(Relation relation, List** partList, LOCKMODE lockmode, bool validCheck = true); diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h index a5137fab2..7e93375fe 100644 --- a/src/include/catalog/pg_proc.h +++ b/src/include/catalog/pg_proc.h @@ -35,7 +35,7 @@ #define ProcedureRelation_Rowtype_Id 81 #define IsProcRelation(rel) (RelationGetRelid(rel) == ProcedureRelationId) -#define IsProcCache(cache) ((cache)->cc_reloid == ProcedureRelationId) +#define IsProcCache(reloid) ((reloid) == ProcedureRelationId) CATALOG(pg_proc,1255) BKI_BOOTSTRAP BKI_ROWTYPE_OID(81) BKI_SCHEMA_MACRO { @@ -422,6 +422,8 @@ typedef FormData_pg_proc *Form_pg_proc; #define DB4AI_PREDICT_BY_FLOAT8_OID 7106 #define DB4AI_PREDICT_BY_NUMERIC_OID 7107 #define DB4AI_PREDICT_BY_TEXT_OID 7108 +#define DB4AI_PREDICT_BY_FLOAT8ARRAY_OID 7109 +#define DB4AI_EXPLAIN_MODEL_OID 7110 #define JSONAGGFUNCOID 3124 #define JSONOBJECTAGGFUNCOID 3403 @@ -469,6 +471,7 @@ typedef FormData_pg_proc *Form_pg_proc; #define OID_REGEXP_SPLIT_TO_TABLE 2765 #define OID_REGEXP_SPLIT_TO_TABLE_NO_FLAG 2766 +#define OID_ARRAY_UNNEST 2331 /* cast functions oid */ #define INT4TOCHARFUNCOID 78 diff --git a/src/include/catalog/pg_proc_fn.h b/src/include/catalog/pg_proc_fn.h index a24916b01..969db0095 100644 --- a/src/include/catalog/pg_proc_fn.h +++ b/src/include/catalog/pg_proc_fn.h @@ -52,12 +52,18 @@ extern Oid ProcedureCreate(const char *procedureName, extern bool function_parse_error_transpose(const char *prosrc); -extern bool isSameArgList(CreateFunctionStmt* stmt1, CreateFunctionStmt* stmt2); - extern bool isSameParameterList(List* parameterList1, List* parameterList2); extern char* getFuncName(List* funcNameList); -extern oidvector* MakeMd5HashArgTypes(oidvector* paramterTypes); +#ifndef ENABLE_MULTIPLE_NODES +extern char* ConvertArgModesToString(Datum proArgModes); +extern bool IsProArgModesEqual(Datum argModes1, Datum argModes2); +extern bool IsProArgModesEqualByTuple(HeapTuple tup, TupleDesc desc, oidvector* argModes); +extern oidvector* ConvertArgModesToMd5Vector(Datum proArgModes); +extern oidvector* MergeOidVector(oidvector* allArgTypes, oidvector* argModes); +#endif + +extern oidvector* MakeMd5HashOids(oidvector* paramterTypes); extern oidvector* ProcedureGetArgTypes(HeapTuple tuple); diff --git a/src/include/catalog/pg_subscription.h b/src/include/catalog/pg_subscription.h index 323d87861..12d1c3a56 100644 --- a/src/include/catalog/pg_subscription.h +++ b/src/include/catalog/pg_subscription.h @@ -86,6 +86,6 @@ extern Oid get_subscription_oid(const char *subname, bool missing_ok); extern char *get_subscription_name(Oid subid, bool missing_ok); extern int CountDBSubscriptions(Oid dbid); - +extern char *DecryptConninfo(char *encryptConninfo); #endif /* PG_SUBSCRIPTION_H */ diff --git a/src/include/catalog/pg_synonym.h b/src/include/catalog/pg_synonym.h index 43aab6fbe..17b8c229f 100644 --- a/src/include/catalog/pg_synonym.h +++ b/src/include/catalog/pg_synonym.h @@ -93,6 +93,7 @@ extern char* CheckReferencedObject(Oid relOid, RangeVar *objVar, const char *syn extern Oid GetSynonymOid(const char *synName, Oid synNamespace, bool missing); extern char* GetQualifiedSynonymName(Oid synOid, bool qualified = true); extern void GetSynonymAndSchemaName(Oid synOid, char **synName_p, char **synSchema_p); +extern void AlterSynonymOwnerByOid(Oid synonymOid, Oid newOwnerId); #endif /* PG_SYNONYM_H */ diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h index 7f7adbf7c..ae313ca88 100644 --- a/src/include/catalog/pg_type.h +++ b/src/include/catalog/pg_type.h @@ -832,8 +832,9 @@ DATA(insert OID = 4407 ( _TdigestData PGNSP PGUID -1 f b A f t \054 0 4406 0 ar #define TYPCATEGORY_USER 'U' #define TYPCATEGORY_BITSTRING 'V' /* er ... "varbit"? */ #define TYPCATEGORY_UNKNOWN 'X' -#define TYPCATEGORY_TABLEOF 'O' /* table of type, index by int */ +#define TYPCATEGORY_TABLEOF 'O' /* table of type */ #define TYPCATEGORY_TABLEOF_VARCHAR 'Q' /* table of type, index by varchar */ +#define TYPCATEGORY_TABLEOF_INTEGER 'F' /* table of type, index by integer */ /* Is a type OID a polymorphic pseudotype? (Beware of multiple evaluation) */ #define IsPolymorphicType(typid) \ diff --git a/src/include/catalog/pg_uid.h b/src/include/catalog/pg_uid.h new file mode 100644 index 000000000..35827e105 --- /dev/null +++ b/src/include/catalog/pg_uid.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2021 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * pg_uid.h + * + * + * IDENTIFICATION + * src/include/catalog/pg_uid.h + * + * --------------------------------------------------------------------------------------- + */ + + +#ifndef PG_UID_H +#define PG_UID_H +#include "catalog/genbki.h" + +#define UidRelationId 8666 +#define UidRelationId_Rowtype_Id (8667) + +#define int8 int64 + +CATALOG(gs_uid,8666) BKI_WITHOUT_OIDS BKI_SCHEMA_MACRO +{ + Oid relid; + int8 uid_backup; +} FormData_gs_uid; + +#undef int8 + +typedef FormData_gs_uid* Form_gs_uid; + +#define Natts_gs_uid 2 +#define Anum_gs_uid_relid 1 +#define Anum_gs_uid_backup 2 +#endif /* PG_UID_H */ + diff --git a/src/include/catalog/pg_uid_fn.h b/src/include/catalog/pg_uid_fn.h new file mode 100644 index 000000000..cd9dd1e8c --- /dev/null +++ b/src/include/catalog/pg_uid_fn.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * pg_uid_fn.h + * + * + * IDENTIFICATION + * src/include/catalog/pg_uid_fn.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef PG_UID_FN_H +#define PG_UID_FN_H + +#define BACKUP_NO_START (0) +#define BACKUP_IN_PROGRESS (1) + +#ifdef USE_ASSERT_CHECKING +#define UID_RESTORE_DURATION (5) +#else +#define UID_RESTORE_DURATION (2000000) +#endif + +typedef struct UidHashKey { + Oid dbOid; /* database */ + Oid relOid; /* relation */ +} UidHashKey; + +typedef struct UidHashValue { + /* key field */ + UidHashKey key; + pg_atomic_uint64 currentUid; + pg_atomic_uint64 backupUidRange; + pg_atomic_uint32 backUpState; +} UidHashValue; + +extern void DeleteDatabaseUidEntry(Oid dbOid); +extern void DeleteUidEntry(Oid relid); +extern void InsertUidEntry(Oid relid); +extern void UpdateUidEntry(Oid dbOid, Oid relOid, uint64 backupUid); +extern void InitUidCache(void); +extern uint64 GetNewUidForTuple(Relation relation); +extern void BuildUidHashCache(Oid dbOid, Oid relOid); + +#endif + diff --git a/src/include/catalog/storage_xlog.h b/src/include/catalog/storage_xlog.h index 19dbf5ff5..87e4541fd 100644 --- a/src/include/catalog/storage_xlog.h +++ b/src/include/catalog/storage_xlog.h @@ -38,27 +38,17 @@ typedef struct xl_smgr_create { ForkNumber forkNum; } xl_smgr_create; -typedef struct xl_smgr_create_compress { - xl_smgr_create xlrec; - uint2 pageCompressOpts; -} xl_smgr_create_compress; - typedef struct xl_smgr_truncate { BlockNumber blkno; RelFileNodeOld rnode; } xl_smgr_truncate; -typedef struct xl_smgr_truncate_compress { - xl_smgr_truncate xlrec; - uint2 pageCompressOpts; -} xl_smgr_truncate_compress; - - - extern void log_smgrcreate(RelFileNode *rnode, ForkNumber forkNum); extern void smgr_redo(XLogReaderState *record); extern void smgr_desc(StringInfo buf, XLogReaderState *record); +extern const char* smgr_type_name(uint8 subtype); + extern void smgr_redo_create(RelFileNode rnode, ForkNumber forkNum, char *data); extern void xlog_block_smgr_redo_truncate(RelFileNode rnode, BlockNumber blkno, XLogRecPtr lsn); @@ -100,6 +90,7 @@ extern void log_move_segment_buckets(xl_seg_bktentry_tag_t *mapentry, uint32 nen extern void log_move_segment_redisinfo(SegRedisInfo *dredis, SegRedisInfo *sredis, Buffer dbuffer, Buffer sbuffer); extern void segpage_smgr_redo(XLogReaderState *record); extern void segpage_smgr_desc(StringInfo buf, XLogReaderState *record); +extern const char* segpage_smgr_type_name(uint8 subtype); typedef struct XLogDataSpaceAllocateExtent { uint32 hwm; @@ -162,6 +153,7 @@ typedef struct XLogMoveExtent { } XLogMoveExtent; struct HTAB* redo_create_remain_segs_htbl(); +extern void move_extent_flush_buffer(XLogMoveExtent *xlog_data); #endif /* STORAGE_XLOG_H */ diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_602.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_602.sql new file mode 100644 index 000000000..eb97f7697 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback-post_catalog_maindb_92_602.sql @@ -0,0 +1,2394 @@ + +DO +$do$ +DECLARE +query_str text; +type array_t is varray(10) of varchar2(50); +rel_array array_t := array[ + 'pg_catalog.pg_proc', + 'pg_catalog.pg_type', + 'pg_catalog.pg_attrdef', + 'pg_catalog.pg_constraint', + 'pg_catalog.pg_rewrite', + 'pg_catalog.pg_rewrite', + 'pg_catalog.pg_trigger', + 'pg_catalog.pg_rlspolicy' +]; +att_array array_t := array[ + 'proargdefaults', + 'typdefaultbin', + 'adbin', + 'conbin', + 'ev_qual', + 'ev_action', + 'tgqual', + 'polqual' +]; +ans boolean; +old_version boolean; +has_version_proc boolean; +need_upgrade boolean; +BEGIN + FOR ans in select case when count(*) = 1 then true else false end as ans from (select 1 from pg_catalog.pg_proc where proname = 'large_seq_rollback_ntree' limit 1) LOOP + IF ans = true then + need_upgrade = false; + select case when count(*)=1 then true else false end as has_version_proc from (select * from pg_proc where proname = 'working_version_num' limit 1) into has_version_proc; + IF has_version_proc = true then + select working_version_num < 92455 as old_version from working_version_num() into old_version; + IF old_version = true then + raise info 'Processing sequence APIs'; + FOR i IN 1..rel_array.count LOOP + raise info '%.%',rel_array[i],att_array[i]; + query_str := 'UPDATE ' || rel_array[i] || ' SET ' || att_array[i] || ' = large_seq_rollback_ntree(' || att_array[i] || ' ) WHERE ' || att_array[i] || ' LIKE ''%:funcid 1574 :%'' OR ' || att_array[i] || ' LIKE ''%:funcid 1575 :%'' OR ' || att_array[i] || ' LIKE ''%:funcid 2559 :%'';'; + EXECUTE query_str; + END LOOP; + END IF; + END IF; + END IF; + END LOOP; +END +$do$; + +DROP FUNCTION IF EXISTS pg_catalog.large_seq_upgrade_ntree(pg_node_tree); +DROP FUNCTION IF EXISTS pg_catalog.large_seq_rollback_ntree(pg_node_tree); +DROP FUNCTION IF EXISTS pg_catalog.gs_get_shared_memctx_detail() cascade; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_thread_memctx_detail() cascade; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_session_memctx_detail() cascade;DROP FUNCTION IF EXISTS pg_catalog.gs_get_parallel_decode_status() cascade; +DROP FUNCTION IF EXISTS pg_catalog.gs_index_advise(cstring); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC,4888; +CREATE OR REPLACE FUNCTION pg_catalog.gs_index_advise(sql_string cstring, OUT schema text, OUT "table" text, OUT "column" text) + RETURNS SETOF record + LANGUAGE internal + STABLE NOT FENCED NOT SHIPPABLE +AS $function$gs_index_advise$function$; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +DROP FUNCTION IF EXISTS pg_catalog.gs_parse_page_bypath(text, bigint, text, boolean) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_lsn(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_xid(xid) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_tablepath(text, bigint, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_parsepage_tablepath(text, bigint, text, boolean) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.local_xlog_redo_statics() cascade; +DROP FUNCTION IF EXISTS pg_catalog.local_redo_time_count() cascade; +DROP VIEW IF EXISTS pg_catalog.gs_wlm_ec_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.gs_wlm_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.pg_get_invalid_backends CASCADE; + +DROP VIEW IF EXISTS pg_catalog.pg_stat_activity cascade; +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity_with_conninfo(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT connection_info text, OUT srespool name, OUT global_sessionid text, OUT unique_sql_id bigint) cascade; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4212; +CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity_with_conninfo +( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT connection_info text, + OUT srespool name, + OUT global_sessionid text +) +RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity_with_conninfo'; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + S.connection_info + FROM pg_database D, pg_stat_get_activity_with_conninfo(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_ec_operator_statistics AS +SELECT + t.queryid, + t.plan_node_id, + t.start_time, + t.ec_status, + t.ec_execute_datanode, + t.ec_dsn, + t.ec_username, + t.ec_query, + t.ec_libodbc_type, + t.ec_fetch_count +FROM pg_catalog.pg_stat_activity AS s, pg_catalog.pg_stat_get_wlm_realtime_ec_operator_info(NULL) as t +where s.query_id = t.queryid and t.ec_operator > 0; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_operator_statistics AS +SELECT t.* +FROM pg_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t +where s.query_id = t.queryid; + +CREATE OR REPLACE VIEW pg_catalog.pg_get_invalid_backends AS + SELECT + C.pid, + C.node_name, + S.datname AS dbname, + S.backend_start, + S.query + FROM pg_pool_validate(false, ' ') AS C LEFT JOIN pg_stat_activity AS S + ON (C.pid = S.sessionid); + +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_wlm_session_iostat_info(integer) cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5014; +CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_wlm_session_iostat_info(IN unuseattr integer, OUT threadid bigint, OUT maxcurr_iops integer, OUT mincurr_iops integer, OUT maxpeak_iops integer, OUT minpeak_iops integer, OUT iops_limits integer, OUT io_priority integer, OUT curr_io_limits integer) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'pg_stat_get_wlm_session_iostat_info'; + +DO $DO$ +DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP VIEW IF EXISTS DBE_PERF.global_session_stat_activity cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_session_stat_activity() cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_operator_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_operator_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.operator_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.session_stat_activity cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_replication_stat cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_replication_stat() cascade; + DROP VIEW IF EXISTS DBE_PERF.replication_stat cascade; + + + DROP VIEW IF EXISTS DBE_PERF.session_cpu_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.session_memory_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.global_statement_complex_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_statement_complex_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.statement_complex_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.statement_iostat_complex_runtime cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_memory_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.pg_session_iostat cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_cpu_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.gs_wlm_session_statistics cascade; + + DROP VIEW IF EXISTS pg_catalog.pg_stat_activity_ng cascade; + DROP VIEW IF EXISTS pg_catalog.pg_stat_replication cascade; + + DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT srespool name, OUT global_sessionid text, OUT unique_sql_id bigint) cascade; + + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2022; + CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity + ( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT srespool name, + OUT global_sessionid text + ) + RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity'; + + CREATE OR REPLACE VIEW dbe_perf.session_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_session_stat_activity + (out coorname text, out datid oid, out datname text, out pid bigint, + out usesysid oid, out usename text, out application_name text, out client_addr inet, + out client_hostname text, out client_port integer, out backend_start timestamptz, + out xact_start timestamptz, out query_start timestamptz, out state_change timestamptz, + out waiting boolean, out enqueue text, out state text, out resource_pool name, + out query_id bigint, out query text) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.session_stat_activity%rowtype; + coor_name record; + fet_active text; + fetch_coor text; + BEGIN + --Get all cn node names + fetch_coor := 'select * from dbe_perf.node_name'; + FOR coor_name IN EXECUTE(fetch_coor) LOOP + coorname := coor_name.node_name; + fet_active := 'SELECT * FROM dbe_perf.session_stat_activity'; + FOR row_data IN EXECUTE(fet_active) LOOP + coorname := coorname; + datid :=row_data.datid; + datname := row_data.datname; + pid := row_data.pid; + usesysid :=row_data.usesysid; + usename := row_data.usename; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_hostname :=row_data.client_hostname; + client_port :=row_data.client_port; + backend_start := row_data.backend_start; + xact_start := row_data.xact_start; + query_start := row_data.query_start; + state_change := row_data.state_change; + waiting := row_data.waiting; + enqueue := row_data.enqueue; + state := row_data.state; + resource_pool :=row_data.resource_pool; + query_id :=row_data.query_id; + query := row_data.query; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + + CREATE OR REPLACE VIEW DBE_PERF.global_session_stat_activity AS + SELECT * FROM DBE_PERF.get_global_session_stat_activity(); + + CREATE OR REPLACE VIEW dbe_perf.operator_runtime AS + SELECT t.* + FROM dbe_perf.session_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t + WHERE s.query_id = t.queryid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_operator_runtime() + RETURNS setof dbe_perf.operator_runtime + AS $$ + DECLARE + row_data dbe_perf.operator_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.operator_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_operator_runtime AS + SELECT * FROM dbe_perf.get_global_operator_runtime(); + + + CREATE OR REPLACE VIEW dbe_perf.replication_stat AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_replication_stat + (OUT node_name name, + OUT pid bigint, + OUT usesysid oid, + OUT usename name, + OUT application_name text, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT backend_start timestamp with time zone, + OUT state text, + OUT sender_sent_location text, + OUT receiver_write_location text, + OUT receiver_flush_location text, + OUT receiver_replay_location text, + OUT sync_priority integer, + OUT sync_state text) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.replication_stat%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + --Get all the node names + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.replication_stat'; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := row_name.node_name; + pid := row_data.pid; + usesysid := row_data.usesysid; + usename := row_data.usename; + client_addr := row_data.client_addr; + client_hostname := row_data.client_hostname; + client_port := row_data.client_port; + state := row_data.state; + sender_sent_location := row_data.sender_sent_location; + receiver_write_location := row_data.receiver_write_location; + receiver_flush_location := row_data.receiver_flush_location; + receiver_replay_location := row_data.receiver_replay_location; + sync_priority := row_data.sync_priority; + sync_state := row_data.sync_state; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_replication_stat AS + SELECT * FROM dbe_perf.get_global_replication_stat(); + + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity_ng AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + N.node_group + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_stat_get_activity_ng(NULL) AS N, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid AND + S.sessionid = N.sessionid; + + + CREATE OR REPLACE VIEW dbe_perf.session_cpu_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.session_memory_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.statement_complex_runtime AS + SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_statement_complex_runtime() + RETURNS setof dbe_perf.statement_complex_runtime + AS $$ + DECLARE + row_data dbe_perf.statement_complex_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.statement_complex_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_statement_complex_runtime AS + SELECT * FROM dbe_perf.get_global_statement_complex_runtime(); + + CREATE OR REPLACE VIEW dbe_perf.statement_iostat_complex_runtime AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 20 THEN 'Low'::text + WHEN T.io_priority = 50 THEN 'Medium'::text + WHEN T.io_priority = 80 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T + WHERE S.pid = T.threadid; + + end if; +END$DO$; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_memory_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_session_iostat AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 10 THEN 'Low'::text + WHEN T.io_priority = 20 THEN 'Medium'::text + WHEN T.io_priority = 50 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_cpu_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_session_statistics AS +SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + S.sessionid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_replication AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +DO $DO$ +DECLARE + ans boolean; + user_name text; + query_str text; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.replication_stat TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_replication_stat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity_ng TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_cpu_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_cpu_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_memory_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_memory_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_iostat_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_iostat_complex_runtime TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_memory_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_session_iostat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_cpu_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_session_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_replication TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_ec_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_get_invalid_backends TO PUBLIC; + end if; +END$DO$; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int, int, text) CASCADE; +do $$DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP FUNCTION IF EXISTS pg_catalog.gs_get_active_archiving_standby(); + DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_get_warning_for_xlog_force_recycle(); + DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_clean_history_global_barriers(); + DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_archive_slot_force_advance(); + end if; +END$$; +DROP FUNCTION IF EXISTS pg_catalog.gs_get_standby_cluster_barrier_status() cascade; +DROP FUNCTION IF EXISTS pg_catalog.gs_set_standby_cluster_target_barrier_id() cascade; +DROP FUNCTION IF EXISTS pg_catalog.gs_query_standby_cluster_barrier_id_exist() cascade; +do $$DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP VIEW IF EXISTS DBE_PERF.global_streaming_hadr_rto_and_rpo_stat CASCADE; + end if; + DROP FUNCTION IF EXISTS pg_catalog.gs_hadr_local_rto_and_rpo_stat(); + DROP FUNCTION IF EXISTS pg_catalog.gs_hadr_remote_rto_and_rpo_stat(); +END$$; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 5077; +CREATE OR REPLACE FUNCTION pg_catalog.gs_hadr_local_rto_and_rpo_stat +( +OUT hadr_sender_node_name pg_catalog.text, +OUT hadr_receiver_node_name pg_catalog.text, +OUT source_ip pg_catalog.text, +OUT source_port pg_catalog.int4, +OUT dest_ip pg_catalog.text, +OUT dest_port pg_catalog.int4, +OUT current_rto pg_catalog.int8, +OUT target_rto pg_catalog.int8, +OUT current_rpo pg_catalog.int8, +OUT target_rpo pg_catalog.int8, +OUT current_sleep_time pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_hadr_local_rto_and_rpo_stat'; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 5078; +CREATE OR REPLACE FUNCTION pg_catalog.gs_hadr_remote_rto_and_rpo_stat +( +OUT hadr_sender_node_name pg_catalog.text, +OUT hadr_receiver_node_name pg_catalog.text, +OUT source_ip pg_catalog.text, +OUT source_port pg_catalog.int4, +OUT dest_ip pg_catalog.text, +OUT dest_port pg_catalog.int4, +OUT current_rto pg_catalog.int8, +OUT target_rto pg_catalog.int8, +OUT current_rpo pg_catalog.int8, +OUT target_rpo pg_catalog.int8, +OUT current_sleep_time pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_hadr_remote_rto_and_rpo_stat'; +CREATE OR REPLACE VIEW DBE_PERF.global_streaming_hadr_rto_and_rpo_stat AS + SELECT hadr_sender_node_name, hadr_receiver_node_name, current_rto, target_rto, current_rpo, target_rpo, current_sleep_time +FROM pg_catalog.gs_hadr_local_rto_and_rpo_stat(); +REVOKE ALL on DBE_PERF.global_streaming_hadr_rto_and_rpo_stat FROM PUBLIC; +DECLARE + user_name text; + query_str text; +BEGIN + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT ALL ON TABLE DBE_PERF.global_streaming_hadr_rto_and_rpo_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; +END; +/ +GRANT SELECT ON TABLE DBE_PERF.global_streaming_hadr_rto_and_rpo_stat TO PUBLIC; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DROP VIEW IF EXISTS pg_catalog.gs_gsc_memory_detail cascade; +DROP VIEW IF EXISTS pg_catalog.gs_lsc_memory_detail cascade; +DROP FUNCTION IF EXISTS pg_catalog.gs_gsc_table_detail() cascade; +DROP FUNCTION IF EXISTS pg_catalog.gs_gsc_catalog_detail() cascade; +DROP FUNCTION IF EXISTS pg_catalog.gs_gsc_dbstat_info() cascade; +DROP FUNCTION IF EXISTS pg_catalog.gs_gsc_clean() cascade;DROP VIEW IF EXISTS pg_catalog.gs_wlm_ec_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.gs_wlm_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.pg_get_invalid_backends CASCADE; + +DROP VIEW IF EXISTS pg_catalog.pg_stat_activity cascade; +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity_with_conninfo(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT connection_info text, OUT srespool name, OUT global_sessionid text, OUT unique_sql_id bigint, OUT trace_id text) cascade; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4212; +CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity_with_conninfo +( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT connection_info text, + OUT srespool name, + OUT global_sessionid text, + OUT unique_sql_id bigint +) +RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity_with_conninfo'; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + S.connection_info, + S.unique_sql_id + FROM pg_database D, pg_stat_get_activity_with_conninfo(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_ec_operator_statistics AS +SELECT + t.queryid, + t.plan_node_id, + t.start_time, + t.ec_status, + t.ec_execute_datanode, + t.ec_dsn, + t.ec_username, + t.ec_query, + t.ec_libodbc_type, + t.ec_fetch_count +FROM pg_catalog.pg_stat_activity AS s, pg_catalog.pg_stat_get_wlm_realtime_ec_operator_info(NULL) as t +where s.query_id = t.queryid and t.ec_operator > 0; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_operator_statistics AS +SELECT t.* +FROM pg_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t +where s.query_id = t.queryid; + +CREATE OR REPLACE VIEW pg_catalog.pg_get_invalid_backends AS + SELECT + C.pid, + C.node_name, + S.datname AS dbname, + S.backend_start, + S.query + FROM pg_pool_validate(false, ' ') AS C LEFT JOIN pg_stat_activity AS S + ON (C.pid = S.sessionid); + +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_wlm_session_iostat_info(integer) cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5014; +CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_wlm_session_iostat_info(IN unuseattr integer, OUT threadid bigint, OUT maxcurr_iops integer, OUT mincurr_iops integer, OUT maxpeak_iops integer, OUT minpeak_iops integer, OUT iops_limits integer, OUT io_priority integer, OUT curr_io_limits integer) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'pg_stat_get_wlm_session_iostat_info'; + +DO $DO$ +DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP VIEW IF EXISTS DBE_PERF.global_session_stat_activity cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_session_stat_activity() cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_operator_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_operator_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.operator_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.session_stat_activity cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_replication_stat cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_replication_stat() cascade; + DROP VIEW IF EXISTS DBE_PERF.replication_stat cascade; + + + DROP VIEW IF EXISTS DBE_PERF.session_cpu_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.session_memory_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.global_statement_complex_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_statement_complex_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.statement_complex_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.statement_iostat_complex_runtime cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_memory_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.pg_session_iostat cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_cpu_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.gs_wlm_session_statistics cascade; + + DROP VIEW IF EXISTS pg_catalog.pg_stat_activity_ng cascade; + DROP VIEW IF EXISTS pg_catalog.pg_stat_replication cascade; + + DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT srespool name, OUT global_sessionid text, OUT unique_sql_id bigint, OUT trace_id text) cascade; + + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2022; + CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity + ( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT srespool name, + OUT global_sessionid text, + OUT unique_sql_id bigint + ) + RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity'; + + CREATE OR REPLACE VIEW dbe_perf.session_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + S.unique_sql_id + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_session_stat_activity + (out coorname text, out datid oid, out datname text, out pid bigint, + out usesysid oid, out usename text, out application_name text, out client_addr inet, + out client_hostname text, out client_port integer, out backend_start timestamptz, + out xact_start timestamptz, out query_start timestamptz, out state_change timestamptz, + out waiting boolean, out enqueue text, out state text, out resource_pool name, + out query_id bigint, out query text, out unique_sql_id bigint) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.session_stat_activity%rowtype; + coor_name record; + fet_active text; + fetch_coor text; + BEGIN + --Get all cn node names + fetch_coor := 'select * from dbe_perf.node_name'; + FOR coor_name IN EXECUTE(fetch_coor) LOOP + coorname := coor_name.node_name; + fet_active := 'SELECT * FROM dbe_perf.session_stat_activity'; + FOR row_data IN EXECUTE(fet_active) LOOP + coorname := coorname; + datid :=row_data.datid; + datname := row_data.datname; + pid := row_data.pid; + usesysid :=row_data.usesysid; + usename := row_data.usename; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_hostname :=row_data.client_hostname; + client_port :=row_data.client_port; + backend_start := row_data.backend_start; + xact_start := row_data.xact_start; + query_start := row_data.query_start; + state_change := row_data.state_change; + waiting := row_data.waiting; + enqueue := row_data.enqueue; + state := row_data.state; + resource_pool :=row_data.resource_pool; + query_id :=row_data.query_id; + query := row_data.query; + unique_sql_id := row_data.unique_sql_id; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + + CREATE OR REPLACE VIEW DBE_PERF.global_session_stat_activity AS + SELECT * FROM DBE_PERF.get_global_session_stat_activity(); + + CREATE OR REPLACE VIEW dbe_perf.operator_runtime AS + SELECT t.* + FROM dbe_perf.session_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t + WHERE s.query_id = t.queryid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_operator_runtime() + RETURNS setof dbe_perf.operator_runtime + AS $$ + DECLARE + row_data dbe_perf.operator_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.operator_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_operator_runtime AS + SELECT * FROM dbe_perf.get_global_operator_runtime(); + + + CREATE OR REPLACE VIEW dbe_perf.replication_stat AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_replication_stat + (OUT node_name name, + OUT pid bigint, + OUT usesysid oid, + OUT usename name, + OUT application_name text, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT backend_start timestamp with time zone, + OUT state text, + OUT sender_sent_location text, + OUT receiver_write_location text, + OUT receiver_flush_location text, + OUT receiver_replay_location text, + OUT sync_priority integer, + OUT sync_state text) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.replication_stat%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + --Get all the node names + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.replication_stat'; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := row_name.node_name; + pid := row_data.pid; + usesysid := row_data.usesysid; + usename := row_data.usename; + client_addr := row_data.client_addr; + client_hostname := row_data.client_hostname; + client_port := row_data.client_port; + state := row_data.state; + sender_sent_location := row_data.sender_sent_location; + receiver_write_location := row_data.receiver_write_location; + receiver_flush_location := row_data.receiver_flush_location; + receiver_replay_location := row_data.receiver_replay_location; + sync_priority := row_data.sync_priority; + sync_state := row_data.sync_state; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_replication_stat AS + SELECT * FROM dbe_perf.get_global_replication_stat(); + + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity_ng AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + N.node_group + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_stat_get_activity_ng(NULL) AS N, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid AND + S.sessionid = N.sessionid; + + + CREATE OR REPLACE VIEW dbe_perf.session_cpu_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.session_memory_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.statement_complex_runtime AS + SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_statement_complex_runtime() + RETURNS setof dbe_perf.statement_complex_runtime + AS $$ + DECLARE + row_data dbe_perf.statement_complex_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.statement_complex_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_statement_complex_runtime AS + SELECT * FROM dbe_perf.get_global_statement_complex_runtime(); + + CREATE OR REPLACE VIEW dbe_perf.statement_iostat_complex_runtime AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 20 THEN 'Low'::text + WHEN T.io_priority = 50 THEN 'Medium'::text + WHEN T.io_priority = 80 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T + WHERE S.pid = T.threadid; + + end if; +END$DO$; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_memory_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_session_iostat AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 10 THEN 'Low'::text + WHEN T.io_priority = 20 THEN 'Medium'::text + WHEN T.io_priority = 50 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_cpu_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_session_statistics AS +SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + S.sessionid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_replication AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +DO $DO$ +DECLARE + ans boolean; + user_name text; + query_str text; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.replication_stat TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_replication_stat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity_ng TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_cpu_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_cpu_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_memory_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_memory_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_iostat_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_iostat_complex_runtime TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_memory_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_session_iostat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_cpu_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_session_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_replication TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_ec_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_get_invalid_backends TO PUBLIC; + end if; +END$DO$; + +DO $DO$ +DECLARE + ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp() cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp() cascade; + DROP VIEW IF EXISTS DBE_PERF.statement_history cascade; + end if; +END$DO$; + +DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx; +DROP TABLE IF EXISTS pg_catalog.statement_history cascade; + +CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history( + db_name name, + schema_name name, + origin_node integer, + user_name name, + application_name text, + client_addr text, + client_port integer, + unique_query_id bigint, + debug_query_id bigint, + query text, + start_time timestamp with time zone, + finish_time timestamp with time zone, + slow_sql_threshold bigint, + transaction_id bigint, + thread_id bigint, + session_id bigint, + n_soft_parse bigint, + n_hard_parse bigint, + query_plan text, + n_returned_rows bigint, + n_tuples_fetched bigint, + n_tuples_returned bigint, + n_tuples_inserted bigint, + n_tuples_updated bigint, + n_tuples_deleted bigint, + n_blocks_fetched bigint, + n_blocks_hit bigint, + db_time bigint, + cpu_time bigint, + execution_time bigint, + parse_time bigint, + plan_time bigint, + rewrite_time bigint, + pl_execution_time bigint, + pl_compilation_time bigint, + data_io_time bigint, + net_send_info text, + net_recv_info text, + net_stream_send_info text, + net_stream_recv_info text, + lock_count bigint, + lock_time bigint, + lock_wait_count bigint, + lock_wait_time bigint, + lock_max_count bigint, + lwlock_count bigint, + lwlock_wait_count bigint, + lwlock_time bigint, + lwlock_wait_time bigint, + details bytea, + is_slow_sql boolean +); +REVOKE ALL on table pg_catalog.statement_history FROM public; +create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); + +DO $DO$ +DECLARE + ans boolean; + username text; + querystr text; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + CREATE VIEW DBE_PERF.statement_history AS + select * from pg_catalog.statement_history; + + CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp + (in start_timestamp timestamp with time zone, + in end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean) + RETURNS setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + query_str text; + -- node name + node_names name[]; + each_node_name name; + BEGIN + -- Get all node names(CN + master DN) + node_names := ARRAY(SELECT pgxc_node.node_name FROM pgxc_node WHERE (node_type = 'C' or node_type = 'D') AND nodeis_active = true); + FOREACH each_node_name IN ARRAY node_names + LOOP + query_str := 'EXECUTE DIRECT ON (' || each_node_name || ') ''SELECT * FROM DBE_PERF.statement_history where start_time >= ''''' ||$1|| ''''' and start_time <= ''''' || $2 || ''''''''; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := each_node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp + (in start_timestamp timestamp with time zone, + in end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean) + RETURNS setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + row_name record; + query_str text; + -- node name + node_names name[]; + each_node_name name; + BEGIN + -- Get all node names(CN + master DN) + node_names := ARRAY(SELECT pgxc_node.node_name FROM pgxc_node WHERE (node_type = 'C' or node_type = 'D') AND nodeis_active = true); + FOREACH each_node_name IN ARRAY node_names + LOOP + query_str := 'EXECUTE DIRECT ON (' || each_node_name || ') ''SELECT * FROM DBE_PERF.statement_history where start_time >= ''''' ||$1|| ''''' and start_time <= ''''' || $2 || ''''' and (extract(epoch from (finish_time - start_time)) * 1000000) >= slow_sql_threshold '''; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := each_node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + DROP FUNCTION IF EXISTS pg_catalog.statement_detail_decode() CASCADE; + set local inplace_upgrade_next_system_object_oids = IUO_PROC, 5732; + CREATE OR REPLACE FUNCTION pg_catalog.statement_detail_decode + ( IN text, + IN text, + IN boolean) + RETURNS text LANGUAGE INTERNAL NOT FENCED as 'statement_detail_decode'; + + SELECT SESSION_USER INTO username; + IF EXISTS (SELECT oid FROM pg_catalog.pg_class WHERE relname='statement_history') THEN + querystr := 'REVOKE SELECT on table dbe_perf.statement_history FROM public;'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT ALL ON TABLE DBE_PERF.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT ALL ON TABLE pg_catalog.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC; + END IF; + end if; +END$DO$; +DROP FUNCTION IF EXISTS pg_catalog.gs_get_shared_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5255; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_shared_memctx_detail( +IN context_name cstring, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_shared_memctx_detail'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_thread_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5256; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_thread_memctx_detail( +IN threadid int8, +IN context_name cstring, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_thread_memctx_detail'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_session_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5254; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_session_memctx_detail( +IN context_name cstring, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_session_memctx_detail';DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_set() CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_init() CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_clear() CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_status() CASCADE;DROP FUNCTION IF EXISTS pg_catalog.gs_verify_data_file(bool) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.gs_repair_file(Oid, text, integer) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.gs_verify_and_tryrepair_page(text, oid, boolean, boolean) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.gs_repair_page(text, oid, bool, integer) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.local_bad_block_info() CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.local_clear_bad_block_info() CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_file_from_remote(oid, oid, oid, integer, integer, integer, xid, integer) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_file_size_from_remote(oid, oid, oid, integer, integer, xid, integer) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_segment_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, integer, integer) CASCADE; +-- deleting system view +DROP VIEW IF EXISTS pg_catalog.pg_publication_tables; +DROP VIEW IF EXISTS pg_catalog.pg_stat_subscription; +DROP VIEW IF EXISTS pg_catalog.pg_replication_origin_status; + +-- deleting function pg_replication_origin_create +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_create(IN node_name text, OUT replication_origin_oid oid) CASCADE; + +-- deleting function pg_replication_origin_drop +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_drop(IN node_name text, OUT replication_origin_oid oid) CASCADE; + +-- deleting function pg_replication_origin_oid +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_oid(IN node_name text, OUT replication_origin_oid oid) CASCADE; + +-- deleting function pg_replication_origin_session_setup +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_setup(IN node_name text) CASCADE; + +-- deleting function pg_replication_origin_session_reset +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_reset() CASCADE; + +-- deleting function pg_replication_origin_session_is_setup +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_is_setup() CASCADE; + +-- deleting function pg_replication_origin_session_progress +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_progress(IN flush boolean) CASCADE; + +-- deleting function pg_replication_origin_xact_setup +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_xact_setup(IN origin_lsn text, IN origin_timestamp timestamp with time zone) CASCADE; + +-- deleting function pg_replication_origin_xact_reset +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_xact_reset() CASCADE; + +-- deleting function pg_replication_origin_advance +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_advance(IN node_name text, IN lsn text) CASCADE; + +-- deleting function pg_replication_origin_progress +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_progress(IN node_name text, IN flush boolean) CASCADE; + +-- deleting function pg_show_replication_origin_status +DROP FUNCTION IF EXISTS pg_catalog.pg_show_replication_origin_status(OUT local_id oid, OUT external_id text, OUT remote_lsn text, OUT local_lsn text) CASCADE; + +-- deleting function pg_get_publication_tables +DROP FUNCTION IF EXISTS pg_catalog.pg_get_publication_tables(IN pubname text, OUT relid oid) CASCADE; + +-- deleting function pg_stat_get_subscription +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_subscription(IN subid oid, OUT subid oid, OUT pid integer, OUT received_lsn text, OUT last_msg_send_time timestamp with time zone, OUT last_msg_receipt_time timestamp with time zone, OUT latest_end_lsn text, OUT latest_end_time timestamp with time zone) CASCADE; + +-- deleting system table pg_subscription + +DROP INDEX IF EXISTS pg_catalog.pg_subscription_oid_index; +DROP INDEX IF EXISTS pg_catalog.pg_subscription_subname_index; +DROP TYPE IF EXISTS pg_catalog.pg_subscription; +DROP TABLE IF EXISTS pg_catalog.pg_subscription; + +-- deleting system table pg_replication_origin + +DROP INDEX IF EXISTS pg_catalog.pg_replication_origin_roident_index; +DROP INDEX IF EXISTS pg_catalog.pg_replication_origin_roname_index; +DROP TYPE IF EXISTS pg_catalog.pg_replication_origin; +DROP TABLE IF EXISTS pg_catalog.pg_replication_origin;DROP FUNCTION IF EXISTS pg_catalog.gs_explain_model(text) cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_float8_array(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_bool(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_float4(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_float8(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_int32(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_int64(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_numeric(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_text(text, VARIADIC "any") cascade; +do $$DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_clean_history_global_barriers(); + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4581; + CREATE FUNCTION pg_catalog.gs_pitr_clean_history_global_barriers + ( + IN stop_barrier_timestamp pg_catalog.timestamptz, + OUT oldest_barrier_record pg_catalog.text + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_pitr_clean_history_global_barriers'; + DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_archive_slot_force_advance(); + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4580; + CREATE FUNCTION pg_catalog.gs_pitr_archive_slot_force_advance + ( + IN stop_barrier_timestamp pg_catalog.timestamptz, + OUT archive_restart_lsn pg_catalog.text + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_pitr_archive_slot_force_advance'; + + end if; +END$$; +DO +$do$ + DECLARE + v5r1c20_and_later_version boolean; + has_version_proc boolean; + need_upgrade boolean; + BEGIN + need_upgrade = false; + select case when count(*)=1 then true else false end as has_version_proc from (select * from pg_proc where proname = 'working_version_num' limit 1) into has_version_proc; + IF has_version_proc = true then + select working_version_num >= 92584 as v5r1c20_and_later_version from working_version_num() into v5r1c20_and_later_version; + IF v5r1c20_and_later_version = true then + need_upgrade = true; + end IF; + END IF; + IF need_upgrade = true then + +comment on function PG_CATALOG.regexp_count(text, text) is ''; +comment on function PG_CATALOG.regexp_count(text, text, integer) is ''; +comment on function PG_CATALOG.regexp_count(text, text, integer, text) is ''; +comment on function PG_CATALOG.regexp_instr(text, text) is ''; +comment on function PG_CATALOG.regexp_instr(text, text, integer) is ''; +comment on function PG_CATALOG.regexp_instr(text, text, integer, integer) is ''; +comment on function PG_CATALOG.regexp_instr(text, text, integer, integer, integer) is ''; +comment on function PG_CATALOG.regexp_instr(text, text, integer, integer, integer, text) is ''; +comment on function PG_CATALOG.lpad(text, integer, text) is ''; +comment on function PG_CATALOG.rpad(text, integer, text) is ''; +comment on function PG_CATALOG.regexp_replace(text, text) is ''; +comment on function PG_CATALOG.regexp_replace(text, text, text, integer) is ''; +comment on function PG_CATALOG.regexp_replace(text, text, text, integer, integer) is ''; +comment on function PG_CATALOG.regexp_replace(text, text, text, integer, integer, text) is ''; +comment on function PG_CATALOG.line_in(cstring) is ''; +comment on function PG_CATALOG.regexp_substr(text, text, integer) is ''; +comment on function PG_CATALOG.regexp_substr(text, text, integer, integer) is ''; +comment on function PG_CATALOG.regexp_substr(text, text, integer, integer, text) is ''; +comment on function PG_CATALOG.pg_stat_get_activity(bigint) is ''; +comment on function PG_CATALOG.to_char(timestamp without time zone, text) is ''; +comment on function PG_CATALOG.pg_replication_origin_advance(text, text) is ''; +comment on function PG_CATALOG.pg_replication_origin_create(text) is ''; +comment on function PG_CATALOG.pg_replication_origin_drop(text) is ''; +comment on function PG_CATALOG.pg_replication_origin_oid(text) is ''; +comment on function PG_CATALOG.pg_replication_origin_progress(text, boolean) is ''; +comment on function PG_CATALOG.pg_replication_origin_session_is_setup() is ''; +comment on function PG_CATALOG.pg_replication_origin_session_progress(boolean) is ''; +comment on function PG_CATALOG.pg_replication_origin_session_reset() is ''; +comment on function PG_CATALOG.pg_replication_origin_session_setup(text) is ''; +comment on function PG_CATALOG.pg_replication_origin_xact_reset() is ''; +comment on function PG_CATALOG.pg_replication_origin_xact_setup(text, timestamp with time zone) is ''; +comment on function PG_CATALOG.pg_show_replication_origin_status() is ''; +comment on function PG_CATALOG.pg_get_publication_tables(text) is ''; +comment on function PG_CATALOG.pg_stat_get_subscription(oid) is ''; +comment on function PG_CATALOG.xpath(text, xml, text[]) is ''; +comment on function PG_CATALOG.xpath(text, xml) is ''; +comment on function PG_CATALOG.xpath_exists(text, xml, text[]) is ''; +comment on function PG_CATALOG.json_array_element_text(json, integer) is ''; +comment on function PG_CATALOG.json_extract_path_op(json, text[]) is ''; +comment on function PG_CATALOG.json_extract_path_text_op(json, text[]) is ''; +comment on function PG_CATALOG.jsonb_extract_path_op(jsonb, text[]) is ''; +comment on function PG_CATALOG.json_object_field(json, text) is ''; +comment on function PG_CATALOG.jsonb_array_element(jsonb, integer) is ''; +comment on function PG_CATALOG.jsonb_array_element_text(jsonb, integer) is ''; +comment on function PG_CATALOG.jsonb_contains(jsonb, jsonb) is ''; +comment on function PG_CATALOG.jsonb_eq(jsonb, jsonb) is ''; +comment on function PG_CATALOG.jsonb_exists(jsonb, text) is ''; +comment on function PG_CATALOG.jsonb_exists_all(jsonb, text[]) is ''; +comment on function PG_CATALOG.jsonb_exists_any(jsonb, text[]) is ''; +comment on function PG_CATALOG.jsonb_extract_path_text_op(jsonb, text[]) is ''; +comment on function PG_CATALOG.jsonb_ge(jsonb, jsonb) is ''; +comment on function PG_CATALOG.jsonb_gt(jsonb, jsonb) is ''; +comment on function PG_CATALOG.jsonb_le(jsonb, jsonb) is ''; +comment on function PG_CATALOG.jsonb_ne(jsonb, jsonb) is ''; +comment on function PG_CATALOG.jsonb_object_field(jsonb, text) is ''; +comment on function PG_CATALOG.jsonb_object_field_text(jsonb, text) is ''; +comment on function PG_CATALOG.json_object_field_text(json, text) is ''; +comment on function PG_CATALOG.json_array_element(json, integer) is ''; +comment on function PG_CATALOG.jsonb_lt(jsonb, jsonb) is ''; +comment on function PG_CATALOG.jsonb_contained(jsonb, jsonb) is ''; +comment on function PG_CATALOG.has_any_privilege(name, text) is ''; +comment on function PG_CATALOG.int16eq(int16, int16) is ''; +comment on function PG_CATALOG.int16ne(int16, int16) is ''; +comment on function PG_CATALOG.int16lt(int16, int16) is ''; +comment on function PG_CATALOG.int16le(int16, int16) is ''; +comment on function PG_CATALOG.int16gt(int16, int16) is ''; +comment on function PG_CATALOG.int16ge(int16, int16) is ''; +comment on function PG_CATALOG.int16pl(int16, int16) is ''; +comment on function PG_CATALOG.int16eq(int16, int16) is ''; +comment on function PG_CATALOG.int16mi(int16, int16) is ''; +comment on function PG_CATALOG.int16mul(int16, int16) is ''; +comment on function PG_CATALOG.int16div(int16, int16) is ''; +comment on function PG_CATALOG.array_varchar_first(anyarray) is ''; +comment on function PG_CATALOG.array_varchar_last(anyarray) is ''; +comment on function PG_CATALOG.array_integer_first(anyarray) is ''; +comment on function PG_CATALOG.array_integer_last(anyarray) is ''; +comment on function PG_CATALOG.array_indexby_length(anyarray, integer) is ''; +comment on function PG_CATALOG.gs_index_verify() is ''; +comment on function PG_CATALOG.gs_index_recycle_queue() is ''; +comment on function PG_CATALOG.int16div(int16, int16) is ''; + END IF; + END +$do$; +DROP FUNCTION IF EXISTS pg_catalog.login_audit_messages(boolean); +DROP FUNCTION IF EXISTS pg_catalog.login_audit_messages_pid(boolean); + +CREATE OR REPLACE FUNCTION pg_catalog.login_audit_messages(in flag boolean) returns table (username text, database text, logintime timestamp with time zone, mytype text, result text, client_conninfo text) AUTHID DEFINER +AS $$ +DECLARE +user_id text; +user_name text; +db_name text; +SQL_STMT VARCHAR2(500); +fail_cursor REFCURSOR; +success_cursor REFCURSOR; +BEGIN + SELECT text(oid) FROM pg_authid WHERE rolname=SESSION_USER INTO user_id; + SELECT SESSION_USER INTO user_name; + SELECT CURRENT_DATABASE() INTO db_name; + IF flag = true THEN + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo FROM pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN success_cursor FOR EXECUTE SQL_STMT; + --search bottom up for all the success login info + FETCH LAST FROM success_cursor into username, database, logintime, mytype, result, client_conninfo; + FETCH BACKWARD FROM success_cursor into username, database, logintime, mytype, result, client_conninfo; + IF FOUND THEN + return next; + END IF; + CLOSE success_cursor; + ELSE + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo FROM pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'', ''login_failed'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN fail_cursor FOR EXECUTE SQL_STMT; + --search bottom up + FETCH LAST FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo; + LOOP + FETCH BACKWARD FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo; + EXIT WHEN NOT FOUND; + IF mytype = 'login_failed' THEN + return next; + ELSE + -- must be login_success + EXIT; + END IF; + END LOOP; + CLOSE fail_cursor; + END IF; +END; $$ +LANGUAGE plpgsql NOT FENCED; + +CREATE OR REPLACE FUNCTION pg_catalog.login_audit_messages_pid(flag boolean) + RETURNS TABLE(username text, database text, logintime timestamp with time zone, mytype text, result text, client_conninfo text, backendid bigint) AUTHID DEFINER +AS $$ +DECLARE +user_id text; +user_name text; +db_name text; +SQL_STMT VARCHAR2(500); +fail_cursor REFCURSOR; +success_cursor REFCURSOR; +mybackendid bigint; +curSessionFound boolean; +BEGIN + SELECT text(oid) FROM pg_authid WHERE rolname=SESSION_USER INTO user_id; + SELECT SESSION_USER INTO user_name; + SELECT CURRENT_DATABASE() INTO db_name; + SELECT pg_backend_pid() INTO mybackendid; + curSessionFound = false; + IF flag = true THEN + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo, split_part(thread_id,''@'',1) backendid FROM pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN success_cursor FOR EXECUTE SQL_STMT; + --search bottom up for all the success login info + FETCH LAST FROM success_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + LOOP + IF backendid = mybackendid THEN + --found the login info for the current session + curSessionFound = true; + EXIT; + END IF; + FETCH BACKWARD FROM success_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + EXIT WHEN NOT FOUND; + END LOOP; + IF curSessionFound THEN + FETCH BACKWARD FROM success_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + IF FOUND THEN + return next; + END IF; + END IF; + ELSE + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo, split_part(thread_id,''@'',1) backendid FROM pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'', ''login_failed'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN fail_cursor FOR EXECUTE SQL_STMT; + --search bottom up + FETCH LAST FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + LOOP + IF backendid = mybackendid AND mytype = 'login_success' THEN + --found the login info for the current session + curSessionFound = true; + EXIT; + END IF; + FETCH BACKWARD FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + EXIT WHEN NOT FOUND; + END LOOP; + IF curSessionFound THEN + LOOP + FETCH BACKWARD FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo, backendid ; + EXIT WHEN NOT FOUND; + IF mytype = 'login_failed' THEN + return next; + ELSE + -- must be login_success + EXIT; + END IF; + END LOOP; + END IF; --curSessionFound + CLOSE fail_cursor; + END IF; +END; $$ +LANGUAGE plpgsql NOT FENCED;DROP FUNCTION IF EXISTS pg_catalog.gs_read_segment_block_from_remote(oid, oid, oid, smallint, integer, xid, integer, xid, oid, oid, integer) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_read_segment_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, integer, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4770; +CREATE OR REPLACE FUNCTION pg_catalog.gs_read_segment_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, integer, integer) + RETURNS bytea + LANGUAGE internal + STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_read_segment_block_from_remote$function$; + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_block_from_remote(oid, oid, oid, smallint, integer, xid, integer, xid, boolean, integer) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_read_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, boolean) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4767; +CREATE OR REPLACE FUNCTION pg_catalog.gs_read_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, boolean) + RETURNS bytea + LANGUAGE internal + STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_read_block_from_remote$function$; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DROP FUNCTION IF EXISTS pg_catalog.array_remove(anyarray, anyelement) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.array_replace(anyarray, anyelement, anyelement) CASCADE; +DROP AGGREGATE IF EXISTS pg_catalog.last(anyelement) CASCADE; +DROP AGGREGATE IF EXISTS pg_catalog.first(anyelement) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.last_transition(anyelement, anyelement) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.first_transition(anyelement, anyelement) CASCADE; + + + +DROP AGGREGATE IF EXISTS pg_catalog.max(inet) CASCADE; +DROP AGGREGATE IF EXISTS pg_catalog.min(inet) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.network_larger(inet, inet) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.network_smaller(inet, inet) CASCADE; + + +DROP FUNCTION IF EXISTS pg_catalog.pg_buffercache_pages() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4130; +CREATE FUNCTION pg_catalog.pg_buffercache_pages( + OUT bufferid integer, + OUT relfilenode oid, + OUT bucketid smallint, + OUT storage_type bigint, + OUT reltablespace oid, + OUT reldatabase oid, + OUT relforknumber integer, + OUT relblocknumber oid, + OUT isdirty boolean, + OUT usage_count smallint) +RETURNS SETOF record +LANGUAGE internal +STABLE NOT FENCED NOT SHIPPABLE ROWS 100 +AS 'pg_buffercache_pages'; + + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DO $DO$ +DECLARE + ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select tablename from PG_TABLES where tablename='statement_history' and schemaname='pg_catalog' limit 1) into ans; + if ans = true then + TRUNCATE TABLE pg_catalog.statement_history; + DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx; + create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); + end if; +END$DO$;DROP FUNCTION IF EXISTS pg_catalog.pg_create_physical_replication_slot_extern(name, boolean, text) cascade; +DROP FUNCTION IF EXISTS pg_catalog.pg_create_physical_replication_slot_extern(name, boolean, text, boolean) cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3790; +CREATE OR REPLACE FUNCTION pg_catalog.pg_create_physical_replication_slot_extern(slotname name, dummy_standby boolean, extra_content text, OUT slotname text, OUT xlog_position text) + RETURNS record + LANGUAGE internal + NOT FENCED NOT SHIPPABLE +AS $function$pg_create_physical_replication_slot_extern$function$; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int, int, text) CASCADE; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_602.sql b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_602.sql new file mode 100644 index 000000000..a9f9b5672 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_maindb/rollback_catalog_maindb_92_602.sql @@ -0,0 +1,814 @@ +declare + has_version_proc boolean; + have_column boolean; +begin + select case when count(*)=1 then true else false end as has_version_proc from (select * from pg_proc where proname = 'working_version_num' limit 1) into has_version_proc; + if has_version_proc = true then + select working_version_num >= 92458 as have_column from working_version_num() into have_column; + end if; + + if have_column = false then + DROP INDEX IF EXISTS pg_catalog.pg_proc_proname_all_args_nsp_index; + else + DROP INDEX IF EXISTS pg_catalog.pg_proc_proname_all_args_nsp_index; + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 9666; + CREATE UNIQUE INDEX pg_catalog.pg_proc_proname_all_args_nsp_index on pg_catalog.pg_proc USING BTREE(proname name_ops, allargtypes oidvector_ops, pronamespace oid_ops, propackageid oid_ops); + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + REINDEX INDEX pg_catalog.pg_proc_proname_all_args_nsp_index; + end if; +end; +DROP INDEX IF EXISTS pg_catalog.gs_uid_relid_index; +DROP TYPE IF EXISTS pg_catalog.gs_uid; +DROP TABLE IF EXISTS pg_catalog.gs_uid; +DROP FUNCTION IF EXISTS pg_catalog.gs_stat_wal_entrytable(int8); +DROP FUNCTION IF EXISTS pg_catalog.gs_walwriter_flush_position(); +DROP FUNCTION IF EXISTS pg_catalog.gs_walwriter_flush_stat(int4); +DROP FUNCTION IF EXISTS pg_catalog.gs_stat_undo();--drop system function has_any_privilege(user, privilege) +DROP FUNCTION IF EXISTS pg_catalog.has_any_privilege(name, text); + +--drop system view gs_db_privileges +DROP VIEW IF EXISTS pg_catalog.gs_db_privileges; + +--drop indexes on system relation gs_db_privilege +DROP INDEX IF EXISTS gs_db_privilege_oid_index; +DROP INDEX IF EXISTS gs_db_privilege_roleid_index; +DROP INDEX IF EXISTS gs_db_privilege_roleid_privilege_type_index; + +--drop type gs_db_privilege +DROP TYPE IF EXISTS pg_catalog.gs_db_privilege; + +--drop system relation gs_db_privilege +DROP TABLE IF EXISTS pg_catalog.gs_db_privilege; +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_record(int8); +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta(int4, int4, int4); +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot(int4, int4); +DROP FUNCTION IF EXISTS pg_catalog.gs_index_verify(oid, oid); +DROP FUNCTION IF EXISTS pg_catalog.gs_index_recycle_queue(oid, oid, oid);DROP FUNCTION IF EXISTS pg_catalog.pg_logical_get_area_changes() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +do $$DECLARE +ans boolean; +func boolean; +user_name text; +query_str text; +BEGIN + + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + select case when count(*)=1 then true else false end as func from (select * from pg_proc where proname='local_double_write_stat' limit 1) into func; + DROP FUNCTION IF EXISTS pg_catalog.local_double_write_stat(); + DROP FUNCTION IF EXISTS pg_catalog.remote_double_write_stat(); + DROP VIEW IF EXISTS DBE_PERF.global_double_write_status CASCADE; + if func = true then + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4384; + CREATE FUNCTION pg_catalog.local_double_write_stat + ( + OUT node_name pg_catalog.text, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'local_double_write_stat'; + + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4385; + CREATE FUNCTION pg_catalog.remote_double_write_stat + ( + OUT node_name pg_catalog.text, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'remote_double_write_stat'; + + CREATE OR REPLACE VIEW dbe_perf.global_double_write_status AS + SELECT node_name, curr_dwn, curr_start_page, file_trunc_num, file_reset_num, + total_writes, low_threshold_writes, high_threshold_writes, + total_pages, low_threshold_pages, high_threshold_pages + FROM pg_catalog.local_double_write_stat(); + + REVOKE ALL on DBE_PERF.global_double_write_status FROM PUBLIC; + + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLE DBE_PERF.global_double_write_status TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + + GRANT SELECT ON TABLE DBE_PERF.global_double_write_status TO PUBLIC; + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + end if; + end if; +END$$; +-- deleting system table pg_publication + +DROP INDEX IF EXISTS pg_catalog.pg_publication_oid_index; +DROP INDEX IF EXISTS pg_catalog.pg_publication_pubname_index; +DROP TYPE IF EXISTS pg_catalog.pg_publication; +DROP TABLE IF EXISTS pg_catalog.pg_publication; + +-- deleting system table pg_publication_rel + +DROP INDEX IF EXISTS pg_catalog.pg_publication_rel_oid_index; +DROP INDEX IF EXISTS pg_catalog.pg_publication_rel_map_index; +DROP TYPE IF EXISTS pg_catalog.pg_publication_rel; +DROP TABLE IF EXISTS pg_catalog.pg_publication_rel;do $$DECLARE +ans boolean; +func boolean; +user_name text; +query_str text; +has_version_proc boolean; +no_file_id boolean; +BEGIN + no_file_id = true; + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + select case when count(*)=1 then true else false end as func from (select * from pg_proc where proname='local_double_write_stat' limit 1) into func; + select case when count(*)=1 then true else false end as has_version_proc from (select * from pg_proc where proname = 'working_version_num' limit 1) into has_version_proc; + if has_version_proc = true then + select working_version_num < 92568 as no_file_id from working_version_num() into no_file_id; + end if; + + DROP FUNCTION IF EXISTS pg_catalog.local_double_write_stat(); + DROP FUNCTION IF EXISTS pg_catalog.remote_double_write_stat(); + DROP VIEW IF EXISTS DBE_PERF.global_double_write_status CASCADE; + if func = true then + if no_file_id = true then + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4384; + CREATE FUNCTION pg_catalog.local_double_write_stat + ( + OUT node_name pg_catalog.text, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'local_double_write_stat'; + + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4385; + CREATE FUNCTION pg_catalog.remote_double_write_stat + ( + OUT node_name pg_catalog.text, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'remote_double_write_stat'; + + CREATE OR REPLACE VIEW dbe_perf.global_double_write_status AS + SELECT node_name, curr_dwn, curr_start_page, file_trunc_num, file_reset_num, + total_writes, low_threshold_writes, high_threshold_writes, + total_pages, low_threshold_pages, high_threshold_pages + FROM pg_catalog.local_double_write_stat(); + else + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4384; + CREATE FUNCTION pg_catalog.local_double_write_stat + ( + OUT node_name pg_catalog.text, + OUT file_id pg_catalog.int8, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'local_double_write_stat'; + + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4385; + CREATE FUNCTION pg_catalog.remote_double_write_stat + ( + OUT node_name pg_catalog.text, + OUT file_id pg_catalog.int8, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'remote_double_write_stat'; + + CREATE OR REPLACE VIEW dbe_perf.global_double_write_status AS + SELECT node_name, file_id, curr_dwn, curr_start_page, file_trunc_num, file_reset_num, + total_writes, low_threshold_writes, high_threshold_writes, + total_pages, low_threshold_pages, high_threshold_pages + FROM pg_catalog.local_double_write_stat(); + end if; + + REVOKE ALL on DBE_PERF.global_double_write_status FROM PUBLIC; + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLE DBE_PERF.global_double_write_status TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + + GRANT SELECT ON TABLE DBE_PERF.global_double_write_status TO PUBLIC; + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + end if; + end if; +END$$; + +DO $DO$ +DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select * from pg_tables where tablename = 'snap_global_double_write_status' and schemaname = 'snapshot' limit 1) into ans; + if ans = true then + alter table snapshot.snap_global_double_write_status + DROP COLUMN IF EXISTS snap_file_id; + end if; +END$DO$;SET search_path TO information_schema; + +-- element_types is generated by data_type_privileges +DROP VIEW IF EXISTS information_schema.element_types CASCADE; + +-- data_type_privileges is generated by columns +DROP VIEW IF EXISTS information_schema.data_type_privileges CASCADE; +-- data_type_privileges is generated by table_privileges +DROP VIEW IF EXISTS information_schema.role_column_grants CASCADE; +-- data_type_privileges is generated by column_privileges +DROP VIEW IF EXISTS information_schema.role_table_grants CASCADE; + +-- other views need upgrade for matview +DROP VIEW IF EXISTS information_schema.column_domain_usage CASCADE; +DROP VIEW IF EXISTS information_schema.column_privileges CASCADE; +DROP VIEW IF EXISTS information_schema.column_udt_usage CASCADE; +DROP VIEW IF EXISTS information_schema.columns CASCADE; +DROP VIEW IF EXISTS information_schema.table_privileges CASCADE; +DROP VIEW IF EXISTS information_schema.tables CASCADE; +DROP VIEW IF EXISTS information_schema.view_column_usage CASCADE; +DROP VIEW IF EXISTS information_schema.view_table_usage CASCADE; + +CREATE VIEW information_schema.column_domain_usage AS + SELECT CAST(current_database() AS sql_identifier) AS domain_catalog, + CAST(nt.nspname AS sql_identifier) AS domain_schema, + CAST(t.typname AS sql_identifier) AS domain_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name + + FROM pg_type t, pg_namespace nt, pg_class c, pg_namespace nc, + pg_attribute a + + WHERE t.typnamespace = nt.oid + AND c.relnamespace = nc.oid + AND a.attrelid = c.oid + AND a.atttypid = t.oid + AND t.typtype = 'd' + AND c.relkind IN ('r', 'm', 'v', 'f') + AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + AND a.attnum > 0 + AND NOT a.attisdropped + AND pg_has_role(t.typowner, 'USAGE'); + +CREATE VIEW information_schema.column_privileges AS + SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, + CAST(grantee.rolname AS sql_identifier) AS grantee, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(x.relname AS sql_identifier) AS table_name, + CAST(x.attname AS sql_identifier) AS column_name, + CAST(x.prtype AS character_data) AS privilege_type, + CAST( + CASE WHEN + -- object owner always has grant options + pg_has_role(x.grantee, x.relowner, 'USAGE') + OR x.grantable + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable + + FROM ( + SELECT pr_c.grantor, + pr_c.grantee, + attname, + relname, + relnamespace, + pr_c.prtype, + pr_c.grantable, + pr_c.relowner + FROM (SELECT oid, relname, relnamespace, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* + FROM pg_class + WHERE relkind IN ('r', 'm', 'v', 'f') + ) pr_c (oid, relname, relnamespace, relowner, grantor, grantee, prtype, grantable), + pg_attribute a + WHERE a.attrelid = pr_c.oid + AND a.attnum > 0 + AND NOT a.attisdropped + UNION + SELECT pr_a.grantor, + pr_a.grantee, + attname, + relname, + relnamespace, + pr_a.prtype, + pr_a.grantable, + c.relowner + FROM (SELECT attrelid, attname, (aclexplode(coalesce(attacl, acldefault('c', relowner)))).* + FROM pg_attribute a JOIN pg_class cc ON (a.attrelid = cc.oid) + WHERE attnum > 0 + AND NOT attisdropped + ) pr_a (attrelid, attname, grantor, grantee, prtype, grantable), + pg_class c + WHERE pr_a.attrelid = c.oid + AND relkind IN ('r', 'm', 'v', 'f') + ) x, + pg_namespace nc, + pg_authid u_grantor, + ( + SELECT oid, rolname FROM pg_authid + UNION ALL + SELECT 0::oid, 'PUBLIC' + ) AS grantee (oid, rolname) + + WHERE x.relnamespace = nc.oid + AND x.grantee = grantee.oid + AND x.grantor = u_grantor.oid + AND x.prtype IN ('INSERT', 'SELECT', 'UPDATE', 'REFERENCES', 'COMMENT') + AND (x.relname not like 'mlog_%' AND x.relname not like 'matviewmap_%') + AND (pg_has_role(u_grantor.oid, 'USAGE') + OR pg_has_role(grantee.oid, 'USAGE') + OR grantee.rolname = 'PUBLIC'); + +CREATE VIEW information_schema.column_udt_usage AS + SELECT CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(coalesce(nbt.nspname, nt.nspname) AS sql_identifier) AS udt_schema, + CAST(coalesce(bt.typname, t.typname) AS sql_identifier) AS udt_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name + + FROM pg_attribute a, pg_class c, pg_namespace nc, + (pg_type t JOIN pg_namespace nt ON (t.typnamespace = nt.oid)) + LEFT JOIN (pg_type bt JOIN pg_namespace nbt ON (bt.typnamespace = nbt.oid)) + ON (t.typtype = 'd' AND t.typbasetype = bt.oid) + + WHERE a.attrelid = c.oid + AND a.atttypid = t.oid + AND nc.oid = c.relnamespace + AND a.attnum > 0 AND NOT a.attisdropped AND c.relkind in ('r', 'm', 'v', 'f') + AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + AND pg_has_role(coalesce(bt.typowner, t.typowner), 'USAGE'); + +CREATE VIEW information_schema.columns AS + SELECT CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name, + CAST(a.attnum AS cardinal_number) AS ordinal_position, + CAST(pg_get_expr(ad.adbin, ad.adrelid) AS character_data) AS column_default, + CAST(CASE WHEN a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) THEN 'NO' ELSE 'YES' END + AS yes_or_no) + AS is_nullable, + + CAST( + CASE WHEN t.typtype = 'd' THEN + CASE WHEN bt.typelem <> 0 AND bt.typlen = -1 THEN 'ARRAY' + WHEN nbt.nspname = 'pg_catalog' THEN format_type(t.typbasetype, null) + ELSE 'USER-DEFINED' END + ELSE + CASE WHEN t.typelem <> 0 AND t.typlen = -1 THEN 'ARRAY' + WHEN nt.nspname = 'pg_catalog' THEN format_type(a.atttypid, null) + ELSE 'USER-DEFINED' END + END + AS character_data) + AS data_type, + + CAST( + _pg_char_max_length(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS character_maximum_length, + + CAST( + _pg_char_octet_length(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS character_octet_length, + + CAST( + _pg_numeric_precision(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS numeric_precision, + + CAST( + _pg_numeric_precision_radix(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS numeric_precision_radix, + + CAST( + _pg_numeric_scale(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS numeric_scale, + + CAST( + _pg_datetime_precision(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS datetime_precision, + + CAST( + _pg_interval_type(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS character_data) + AS interval_type, + CAST(null AS cardinal_number) AS interval_precision, + + CAST(null AS sql_identifier) AS character_set_catalog, + CAST(null AS sql_identifier) AS character_set_schema, + CAST(null AS sql_identifier) AS character_set_name, + + CAST(CASE WHEN nco.nspname IS NOT NULL THEN current_database() END AS sql_identifier) AS collation_catalog, + CAST(nco.nspname AS sql_identifier) AS collation_schema, + CAST(co.collname AS sql_identifier) AS collation_name, + + CAST(CASE WHEN t.typtype = 'd' THEN current_database() ELSE null END + AS sql_identifier) AS domain_catalog, + CAST(CASE WHEN t.typtype = 'd' THEN nt.nspname ELSE null END + AS sql_identifier) AS domain_schema, + CAST(CASE WHEN t.typtype = 'd' THEN t.typname ELSE null END + AS sql_identifier) AS domain_name, + + CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(coalesce(nbt.nspname, nt.nspname) AS sql_identifier) AS udt_schema, + CAST(coalesce(bt.typname, t.typname) AS sql_identifier) AS udt_name, + + CAST(null AS sql_identifier) AS scope_catalog, + CAST(null AS sql_identifier) AS scope_schema, + CAST(null AS sql_identifier) AS scope_name, + + CAST(null AS cardinal_number) AS maximum_cardinality, + CAST(a.attnum AS sql_identifier) AS dtd_identifier, + CAST('NO' AS yes_or_no) AS is_self_referencing, + + CAST('NO' AS yes_or_no) AS is_identity, + CAST(null AS character_data) AS identity_generation, + CAST(null AS character_data) AS identity_start, + CAST(null AS character_data) AS identity_increment, + CAST(null AS character_data) AS identity_maximum, + CAST(null AS character_data) AS identity_minimum, + CAST(null AS yes_or_no) AS identity_cycle, + + CAST('NEVER' AS character_data) AS is_generated, + CAST(null AS character_data) AS generation_expression, + + CAST(CASE WHEN c.relkind = 'r' + OR (c.relkind = 'v' + AND EXISTS (SELECT 1 FROM pg_rewrite WHERE ev_class = c.oid AND ev_type = '2' AND is_instead) + AND EXISTS (SELECT 1 FROM pg_rewrite WHERE ev_class = c.oid AND ev_type = '4' AND is_instead)) + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_updatable + + FROM (pg_attribute a LEFT JOIN pg_attrdef ad ON attrelid = adrelid AND attnum = adnum) + JOIN (pg_class c JOIN pg_namespace nc ON (c.relnamespace = nc.oid)) ON a.attrelid = c.oid + JOIN (pg_type t JOIN pg_namespace nt ON (t.typnamespace = nt.oid)) ON a.atttypid = t.oid + LEFT JOIN (pg_type bt JOIN pg_namespace nbt ON (bt.typnamespace = nbt.oid)) + ON (t.typtype = 'd' AND t.typbasetype = bt.oid) + LEFT JOIN (pg_collation co JOIN pg_namespace nco ON (co.collnamespace = nco.oid)) + ON a.attcollation = co.oid AND (nco.nspname, co.collname) <> ('pg_catalog', 'default') + + WHERE (NOT pg_is_other_temp_schema(nc.oid)) + + AND a.attnum > 0 AND NOT a.attisdropped AND c.relkind in ('r', 'm', 'v', 'f') + + AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + + AND (pg_has_role(c.relowner, 'USAGE') + OR has_column_privilege(c.oid, a.attnum, + 'SELECT, INSERT, UPDATE, REFERENCES')); + +CREATE VIEW information_schema.table_privileges AS + SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, + CAST(grantee.rolname AS sql_identifier) AS grantee, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(c.prtype AS character_data) AS privilege_type, + CAST( + CASE WHEN + -- object owner always has grant options + pg_has_role(grantee.oid, c.relowner, 'USAGE') + OR c.grantable + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable, + CAST(CASE WHEN c.prtype = 'SELECT' THEN 'YES' ELSE 'NO' END AS yes_or_no) AS with_hierarchy + + FROM ( + SELECT oid, relname, relnamespace, relkind, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* FROM pg_class + ) AS c (oid, relname, relnamespace, relkind, relowner, grantor, grantee, prtype, grantable), + pg_namespace nc, + pg_authid u_grantor, + ( + SELECT oid, rolname FROM pg_authid + UNION ALL + SELECT 0::oid, 'PUBLIC' + ) AS grantee (oid, rolname) + + WHERE c.relnamespace = nc.oid + AND c.relkind IN ('r', 'm', 'v') + AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + AND c.grantee = grantee.oid + AND c.grantor = u_grantor.oid + AND (c.prtype IN ('INSERT', 'SELECT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER') + OR c.prtype IN ('ALTER', 'DROP', 'COMMENT', 'INDEX', 'VACUUM') + ) + AND (pg_has_role(u_grantor.oid, 'USAGE') + OR pg_has_role(grantee.oid, 'USAGE') + OR grantee.rolname = 'PUBLIC'); + +CREATE VIEW information_schema.tables AS + SELECT CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + + CAST( + CASE WHEN nc.oid = pg_my_temp_schema() THEN 'LOCAL TEMPORARY' + WHEN c.relkind = 'r' THEN 'BASE TABLE' + WHEN c.relkind = 'm' THEN 'MATERIALIZED VIEW' + WHEN c.relkind = 'v' THEN 'VIEW' + WHEN c.relkind = 'f' THEN 'FOREIGN TABLE' + ELSE null END + AS character_data) AS table_type, + + CAST(null AS sql_identifier) AS self_referencing_column_name, + CAST(null AS character_data) AS reference_generation, + + CAST(CASE WHEN t.typname IS NOT NULL THEN current_database() ELSE null END AS sql_identifier) AS user_defined_type_catalog, + CAST(nt.nspname AS sql_identifier) AS user_defined_type_schema, + CAST(t.typname AS sql_identifier) AS user_defined_type_name, + + CAST(CASE WHEN c.relkind = 'r' + OR (c.relkind = 'v' + AND EXISTS (SELECT 1 FROM pg_rewrite WHERE ev_class = c.oid AND ev_type = '3' AND is_instead)) + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_insertable_into, + + CAST(CASE WHEN t.typname IS NOT NULL THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_typed, + CAST(null AS character_data) AS commit_action + + FROM pg_namespace nc JOIN pg_class c ON (nc.oid = c.relnamespace) + LEFT JOIN (pg_type t JOIN pg_namespace nt ON (t.typnamespace = nt.oid)) ON (c.reloftype = t.oid) + + WHERE c.relkind IN ('r', 'm', 'v', 'f') + AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + AND (NOT pg_is_other_temp_schema(nc.oid)) + AND (pg_has_role(c.relowner, 'USAGE') + OR has_table_privilege(c.oid, 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') + OR has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES') ); + +CREATE VIEW information_schema.view_column_usage AS + SELECT DISTINCT + CAST(current_database() AS sql_identifier) AS view_catalog, + CAST(nv.nspname AS sql_identifier) AS view_schema, + CAST(v.relname AS sql_identifier) AS view_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nt.nspname AS sql_identifier) AS table_schema, + CAST(t.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name + + FROM pg_namespace nv, pg_class v, pg_depend dv, + pg_depend dt, pg_class t, pg_namespace nt, + pg_attribute a + + WHERE nv.oid = v.relnamespace + AND v.relkind = 'v' + AND v.oid = dv.refobjid + AND dv.refclassid = 'pg_catalog.pg_class'::regclass + AND dv.classid = 'pg_catalog.pg_rewrite'::regclass + AND dv.deptype = 'i' + AND dv.objid = dt.objid + AND dv.refobjid <> dt.refobjid + AND dt.classid = 'pg_catalog.pg_rewrite'::regclass + AND dt.refclassid = 'pg_catalog.pg_class'::regclass + AND dt.refobjid = t.oid + AND t.relnamespace = nt.oid + AND t.relkind IN ('r', 'm', 'v', 'f') + AND (t.relname not like 'mlog_%' AND t.relname not like 'matviewmap_%') + AND t.oid = a.attrelid + AND dt.refobjsubid = a.attnum + AND pg_has_role(t.relowner, 'USAGE'); + +CREATE VIEW information_schema.view_table_usage AS + SELECT DISTINCT + CAST(current_database() AS sql_identifier) AS view_catalog, + CAST(nv.nspname AS sql_identifier) AS view_schema, + CAST(v.relname AS sql_identifier) AS view_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nt.nspname AS sql_identifier) AS table_schema, + CAST(t.relname AS sql_identifier) AS table_name + + FROM pg_namespace nv, pg_class v, pg_depend dv, + pg_depend dt, pg_class t, pg_namespace nt + + WHERE nv.oid = v.relnamespace + AND v.relkind = 'v' + AND v.oid = dv.refobjid + AND dv.refclassid = 'pg_catalog.pg_class'::regclass + AND dv.classid = 'pg_catalog.pg_rewrite'::regclass + AND dv.deptype = 'i' + AND dv.objid = dt.objid + AND dv.refobjid <> dt.refobjid + AND dt.classid = 'pg_catalog.pg_rewrite'::regclass + AND dt.refclassid = 'pg_catalog.pg_class'::regclass + AND dt.refobjid = t.oid + AND t.relnamespace = nt.oid + AND t.relkind IN ('r', 'm', 'v', 'f') + AND (t.relname not like 'mlog_%' AND t.relname not like 'matviewmap_%') + AND pg_has_role(t.relowner, 'USAGE'); + +CREATE VIEW information_schema.data_type_privileges AS + SELECT CAST(current_database() AS sql_identifier) AS object_catalog, + CAST(x.objschema AS sql_identifier) AS object_schema, + CAST(x.objname AS sql_identifier) AS object_name, + CAST(x.objtype AS character_data) AS object_type, + CAST(x.objdtdid AS sql_identifier) AS dtd_identifier + + FROM + ( + SELECT udt_schema, udt_name, 'USER-DEFINED TYPE'::text, dtd_identifier FROM attributes + UNION ALL + SELECT table_schema, table_name, 'TABLE'::text, dtd_identifier FROM columns + UNION ALL + SELECT domain_schema, domain_name, 'DOMAIN'::text, dtd_identifier FROM domains + UNION ALL + SELECT specific_schema, specific_name, 'ROUTINE'::text, dtd_identifier FROM parameters + UNION ALL + SELECT specific_schema, specific_name, 'ROUTINE'::text, dtd_identifier FROM routines + ) AS x (objschema, objname, objtype, objdtdid); + +CREATE VIEW information_schema.role_column_grants AS + SELECT grantor, + grantee, + table_catalog, + table_schema, + table_name, + column_name, + privilege_type, + is_grantable + FROM column_privileges + WHERE grantor IN (SELECT role_name FROM enabled_roles) + OR grantee IN (SELECT role_name FROM enabled_roles); + +CREATE VIEW information_schema.role_table_grants AS + SELECT grantor, + grantee, + table_catalog, + table_schema, + table_name, + privilege_type, + is_grantable, + with_hierarchy + FROM table_privileges + WHERE grantor IN (SELECT role_name FROM enabled_roles) + OR grantee IN (SELECT role_name FROM enabled_roles); + +CREATE VIEW information_schema.element_types AS + SELECT CAST(current_database() AS sql_identifier) AS object_catalog, + CAST(n.nspname AS sql_identifier) AS object_schema, + CAST(x.objname AS sql_identifier) AS object_name, + CAST(x.objtype AS character_data) AS object_type, + CAST(x.objdtdid AS sql_identifier) AS collection_type_identifier, + CAST( + CASE WHEN nbt.nspname = 'pg_catalog' THEN format_type(bt.oid, null) + ELSE 'USER-DEFINED' END AS character_data) AS data_type, + + CAST(null AS cardinal_number) AS character_maximum_length, + CAST(null AS cardinal_number) AS character_octet_length, + CAST(null AS sql_identifier) AS character_set_catalog, + CAST(null AS sql_identifier) AS character_set_schema, + CAST(null AS sql_identifier) AS character_set_name, + CAST(CASE WHEN nco.nspname IS NOT NULL THEN current_database() END AS sql_identifier) AS collation_catalog, + CAST(nco.nspname AS sql_identifier) AS collation_schema, + CAST(co.collname AS sql_identifier) AS collation_name, + CAST(null AS cardinal_number) AS numeric_precision, + CAST(null AS cardinal_number) AS numeric_precision_radix, + CAST(null AS cardinal_number) AS numeric_scale, + CAST(null AS cardinal_number) AS datetime_precision, + CAST(null AS character_data) AS interval_type, + CAST(null AS cardinal_number) AS interval_precision, + + CAST(null AS character_data) AS domain_default, -- XXX maybe a bug in the standard + + CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(nbt.nspname AS sql_identifier) AS udt_schema, + CAST(bt.typname AS sql_identifier) AS udt_name, + + CAST(null AS sql_identifier) AS scope_catalog, + CAST(null AS sql_identifier) AS scope_schema, + CAST(null AS sql_identifier) AS scope_name, + + CAST(null AS cardinal_number) AS maximum_cardinality, + CAST('a' || CAST(x.objdtdid AS text) AS sql_identifier) AS dtd_identifier + + FROM pg_namespace n, pg_type at, pg_namespace nbt, pg_type bt, + ( + /* columns, attributes */ + SELECT c.relnamespace, CAST(c.relname AS sql_identifier), + CASE WHEN c.relkind = 'c' THEN 'USER-DEFINED TYPE'::text ELSE 'TABLE'::text END, + a.attnum, a.atttypid, a.attcollation + FROM pg_class c, pg_attribute a + WHERE c.oid = a.attrelid + AND c.relkind IN ('r', 'm', 'v', 'f', 'c') + AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + AND attnum > 0 AND NOT attisdropped + + UNION ALL + + /* domains */ + SELECT t.typnamespace, CAST(t.typname AS sql_identifier), + 'DOMAIN'::text, 1, t.typbasetype, t.typcollation + FROM pg_type t + WHERE t.typtype = 'd' + + UNION ALL + + /* parameters */ + SELECT pronamespace, CAST(proname || '_' || CAST(oid AS text) AS sql_identifier), + 'ROUTINE'::text, (ss.x).n, (ss.x).x, 0 + FROM (SELECT p.pronamespace, p.proname, p.oid, + _pg_expandarray(coalesce(p.proallargtypes, p.proargtypes::oid[])) AS x + FROM pg_proc p) AS ss + + UNION ALL + + /* result types */ + SELECT p.pronamespace, CAST(p.proname || '_' || CAST(p.oid AS text) AS sql_identifier), + 'ROUTINE'::text, 0, p.prorettype, 0 + FROM pg_proc p + + ) AS x (objschema, objname, objtype, objdtdid, objtypeid, objcollation) + LEFT JOIN (pg_collation co JOIN pg_namespace nco ON (co.collnamespace = nco.oid)) + ON x.objcollation = co.oid AND (nco.nspname, co.collname) <> ('pg_catalog', 'default') + + WHERE n.oid = x.objschema + AND at.oid = x.objtypeid + AND (at.typelem <> 0 AND at.typlen = -1) + AND at.typelem = bt.oid + AND nbt.oid = bt.typnamespace + + AND (n.nspname, x.objname, x.objtype, CAST(x.objdtdid AS sql_identifier)) IN + ( SELECT object_schema, object_name, object_type, dtd_identifier + FROM data_type_privileges ); + +do $$DECLARE + user_name text; + query_str text; +BEGIN + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.element_types TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.data_type_privileges TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.role_column_grants TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.role_table_grants TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.column_domain_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.column_privileges TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.column_udt_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.columns TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.table_privileges TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.tables TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.view_column_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.view_table_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; +END$$; + +GRANT SELECT ON information_schema.element_types TO PUBLIC; +GRANT SELECT ON information_schema.data_type_privileges TO PUBLIC; +GRANT SELECT ON information_schema.role_column_grants TO PUBLIC; +GRANT SELECT ON information_schema.role_table_grants TO PUBLIC; +GRANT SELECT ON information_schema.column_domain_usage TO PUBLIC; +GRANT SELECT ON information_schema.column_privileges TO PUBLIC; +GRANT SELECT ON information_schema.column_udt_usage TO PUBLIC; +GRANT SELECT ON information_schema.columns TO PUBLIC; +GRANT SELECT ON information_schema.table_privileges TO PUBLIC; +GRANT SELECT ON information_schema.tables TO PUBLIC; +GRANT SELECT ON information_schema.view_column_usage TO PUBLIC; +GRANT SELECT ON information_schema.view_table_usage TO PUBLIC; + +RESET search_path; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_602.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_602.sql new file mode 100644 index 000000000..7e2f4ddd7 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback-post_catalog_otherdb_92_602.sql @@ -0,0 +1,2391 @@ + +DO +$do$ +DECLARE +query_str text; +type array_t is varray(10) of varchar2(50); +rel_array array_t := array[ + 'pg_catalog.pg_proc', + 'pg_catalog.pg_type', + 'pg_catalog.pg_attrdef', + 'pg_catalog.pg_constraint', + 'pg_catalog.pg_rewrite', + 'pg_catalog.pg_rewrite', + 'pg_catalog.pg_trigger', + 'pg_catalog.pg_rlspolicy' +]; +att_array array_t := array[ + 'proargdefaults', + 'typdefaultbin', + 'adbin', + 'conbin', + 'ev_qual', + 'ev_action', + 'tgqual', + 'polqual' +]; +ans boolean; +old_version boolean; +has_version_proc boolean; +need_upgrade boolean; +BEGIN + FOR ans in select case when count(*) = 1 then true else false end as ans from (select 1 from pg_catalog.pg_proc where proname = 'large_seq_rollback_ntree' limit 1) LOOP + IF ans = true then + need_upgrade = false; + select case when count(*)=1 then true else false end as has_version_proc from (select * from pg_proc where proname = 'working_version_num' limit 1) into has_version_proc; + IF has_version_proc = true then + select working_version_num < 92455 as old_version from working_version_num() into old_version; + IF old_version = true then + raise info 'Processing sequence APIs'; + FOR i IN 1..rel_array.count LOOP + raise info '%.%',rel_array[i],att_array[i]; + query_str := 'UPDATE ' || rel_array[i] || ' SET ' || att_array[i] || ' = large_seq_rollback_ntree(' || att_array[i] || ' ) WHERE ' || att_array[i] || ' LIKE ''%:funcid 1574 :%'' OR ' || att_array[i] || ' LIKE ''%:funcid 1575 :%'' OR ' || att_array[i] || ' LIKE ''%:funcid 2559 :%'';'; + EXECUTE query_str; + END LOOP; + END IF; + END IF; + END IF; + END LOOP; +END +$do$; + +DROP FUNCTION IF EXISTS pg_catalog.large_seq_upgrade_ntree(pg_node_tree); +DROP FUNCTION IF EXISTS pg_catalog.large_seq_rollback_ntree(pg_node_tree); +DROP FUNCTION IF EXISTS pg_catalog.gs_get_shared_memctx_detail() cascade; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_thread_memctx_detail() cascade; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_session_memctx_detail() cascade;DROP FUNCTION IF EXISTS pg_catalog.gs_get_parallel_decode_status() cascade; +DROP FUNCTION IF EXISTS pg_catalog.gs_index_advise(cstring); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC,4888; +CREATE OR REPLACE FUNCTION pg_catalog.gs_index_advise(sql_string cstring, OUT schema text, OUT "table" text, OUT "column" text) + RETURNS SETOF record + LANGUAGE internal + STABLE NOT FENCED NOT SHIPPABLE +AS $function$gs_index_advise$function$; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +DROP FUNCTION IF EXISTS pg_catalog.gs_parse_page_bypath(text, bigint, text, boolean) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_lsn(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_xid(xid) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_tablepath(text, bigint, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_parsepage_tablepath(text, bigint, text, boolean) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.local_xlog_redo_statics() cascade; +DROP FUNCTION IF EXISTS pg_catalog.local_redo_time_count() cascade; +DROP VIEW IF EXISTS pg_catalog.gs_wlm_ec_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.gs_wlm_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.pg_get_invalid_backends CASCADE; + +DROP VIEW IF EXISTS pg_catalog.pg_stat_activity cascade; +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity_with_conninfo(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT connection_info text, OUT srespool name, OUT global_sessionid text, OUT unique_sql_id bigint) cascade; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4212; +CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity_with_conninfo +( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT connection_info text, + OUT srespool name, + OUT global_sessionid text +) +RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity_with_conninfo'; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + S.connection_info + FROM pg_database D, pg_stat_get_activity_with_conninfo(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_ec_operator_statistics AS +SELECT + t.queryid, + t.plan_node_id, + t.start_time, + t.ec_status, + t.ec_execute_datanode, + t.ec_dsn, + t.ec_username, + t.ec_query, + t.ec_libodbc_type, + t.ec_fetch_count +FROM pg_catalog.pg_stat_activity AS s, pg_catalog.pg_stat_get_wlm_realtime_ec_operator_info(NULL) as t +where s.query_id = t.queryid and t.ec_operator > 0; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_operator_statistics AS +SELECT t.* +FROM pg_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t +where s.query_id = t.queryid; + +CREATE OR REPLACE VIEW pg_catalog.pg_get_invalid_backends AS + SELECT + C.pid, + C.node_name, + S.datname AS dbname, + S.backend_start, + S.query + FROM pg_pool_validate(false, ' ') AS C LEFT JOIN pg_stat_activity AS S + ON (C.pid = S.sessionid); + +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_wlm_session_iostat_info(integer) cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5014; +CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_wlm_session_iostat_info(IN unuseattr integer, OUT threadid bigint, OUT maxcurr_iops integer, OUT mincurr_iops integer, OUT maxpeak_iops integer, OUT minpeak_iops integer, OUT iops_limits integer, OUT io_priority integer, OUT curr_io_limits integer) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'pg_stat_get_wlm_session_iostat_info'; + +DO $DO$ +DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP VIEW IF EXISTS DBE_PERF.global_session_stat_activity cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_session_stat_activity() cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_operator_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_operator_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.operator_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.session_stat_activity cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_replication_stat cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_replication_stat() cascade; + DROP VIEW IF EXISTS DBE_PERF.replication_stat cascade; + + + DROP VIEW IF EXISTS DBE_PERF.session_cpu_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.session_memory_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.global_statement_complex_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_statement_complex_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.statement_complex_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.statement_iostat_complex_runtime cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_memory_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.pg_session_iostat cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_cpu_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.gs_wlm_session_statistics cascade; + + DROP VIEW IF EXISTS pg_catalog.pg_stat_activity_ng cascade; + DROP VIEW IF EXISTS pg_catalog.pg_stat_replication cascade; + + DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT srespool name, OUT global_sessionid text, OUT unique_sql_id bigint) cascade; + + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2022; + CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity + ( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT srespool name, + OUT global_sessionid text + ) + RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity'; + + CREATE OR REPLACE VIEW dbe_perf.session_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_session_stat_activity + (out coorname text, out datid oid, out datname text, out pid bigint, + out usesysid oid, out usename text, out application_name text, out client_addr inet, + out client_hostname text, out client_port integer, out backend_start timestamptz, + out xact_start timestamptz, out query_start timestamptz, out state_change timestamptz, + out waiting boolean, out enqueue text, out state text, out resource_pool name, + out query_id bigint, out query text) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.session_stat_activity%rowtype; + coor_name record; + fet_active text; + fetch_coor text; + BEGIN + --Get all cn node names + fetch_coor := 'select * from dbe_perf.node_name'; + FOR coor_name IN EXECUTE(fetch_coor) LOOP + coorname := coor_name.node_name; + fet_active := 'SELECT * FROM dbe_perf.session_stat_activity'; + FOR row_data IN EXECUTE(fet_active) LOOP + coorname := coorname; + datid :=row_data.datid; + datname := row_data.datname; + pid := row_data.pid; + usesysid :=row_data.usesysid; + usename := row_data.usename; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_hostname :=row_data.client_hostname; + client_port :=row_data.client_port; + backend_start := row_data.backend_start; + xact_start := row_data.xact_start; + query_start := row_data.query_start; + state_change := row_data.state_change; + waiting := row_data.waiting; + enqueue := row_data.enqueue; + state := row_data.state; + resource_pool :=row_data.resource_pool; + query_id :=row_data.query_id; + query := row_data.query; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + + CREATE OR REPLACE VIEW DBE_PERF.global_session_stat_activity AS + SELECT * FROM DBE_PERF.get_global_session_stat_activity(); + + CREATE OR REPLACE VIEW dbe_perf.operator_runtime AS + SELECT t.* + FROM dbe_perf.session_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t + WHERE s.query_id = t.queryid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_operator_runtime() + RETURNS setof dbe_perf.operator_runtime + AS $$ + DECLARE + row_data dbe_perf.operator_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.operator_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_operator_runtime AS + SELECT * FROM dbe_perf.get_global_operator_runtime(); + + + CREATE OR REPLACE VIEW dbe_perf.replication_stat AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_replication_stat + (OUT node_name name, + OUT pid bigint, + OUT usesysid oid, + OUT usename name, + OUT application_name text, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT backend_start timestamp with time zone, + OUT state text, + OUT sender_sent_location text, + OUT receiver_write_location text, + OUT receiver_flush_location text, + OUT receiver_replay_location text, + OUT sync_priority integer, + OUT sync_state text) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.replication_stat%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + --Get all the node names + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.replication_stat'; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := row_name.node_name; + pid := row_data.pid; + usesysid := row_data.usesysid; + usename := row_data.usename; + client_addr := row_data.client_addr; + client_hostname := row_data.client_hostname; + client_port := row_data.client_port; + state := row_data.state; + sender_sent_location := row_data.sender_sent_location; + receiver_write_location := row_data.receiver_write_location; + receiver_flush_location := row_data.receiver_flush_location; + receiver_replay_location := row_data.receiver_replay_location; + sync_priority := row_data.sync_priority; + sync_state := row_data.sync_state; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_replication_stat AS + SELECT * FROM dbe_perf.get_global_replication_stat(); + + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity_ng AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + N.node_group + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_stat_get_activity_ng(NULL) AS N, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid AND + S.sessionid = N.sessionid; + + + CREATE OR REPLACE VIEW dbe_perf.session_cpu_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.session_memory_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.statement_complex_runtime AS + SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_statement_complex_runtime() + RETURNS setof dbe_perf.statement_complex_runtime + AS $$ + DECLARE + row_data dbe_perf.statement_complex_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.statement_complex_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_statement_complex_runtime AS + SELECT * FROM dbe_perf.get_global_statement_complex_runtime(); + + CREATE OR REPLACE VIEW dbe_perf.statement_iostat_complex_runtime AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 20 THEN 'Low'::text + WHEN T.io_priority = 50 THEN 'Medium'::text + WHEN T.io_priority = 80 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T + WHERE S.pid = T.threadid; + + end if; +END$DO$; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_memory_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_session_iostat AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 10 THEN 'Low'::text + WHEN T.io_priority = 20 THEN 'Medium'::text + WHEN T.io_priority = 50 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_cpu_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_session_statistics AS +SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + S.sessionid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_replication AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +DO $DO$ +DECLARE + ans boolean; + user_name text; + query_str text; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.replication_stat TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_replication_stat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity_ng TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_cpu_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_cpu_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_memory_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_memory_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_iostat_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_iostat_complex_runtime TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_memory_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_session_iostat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_cpu_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_session_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_replication TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_ec_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_get_invalid_backends TO PUBLIC; + end if; +END$DO$; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int, int, text) CASCADE; +do $$DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP FUNCTION IF EXISTS pg_catalog.gs_get_active_archiving_standby(); + DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_get_warning_for_xlog_force_recycle(); + DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_clean_history_global_barriers(); + DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_archive_slot_force_advance(); + end if; +END$$; +DROP FUNCTION IF EXISTS pg_catalog.gs_get_standby_cluster_barrier_status() cascade; +DROP FUNCTION IF EXISTS pg_catalog.gs_set_standby_cluster_target_barrier_id() cascade; +DROP FUNCTION IF EXISTS pg_catalog.gs_query_standby_cluster_barrier_id_exist() cascade; +do $$DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP VIEW IF EXISTS DBE_PERF.global_streaming_hadr_rto_and_rpo_stat CASCADE; + end if; + DROP FUNCTION IF EXISTS pg_catalog.gs_hadr_local_rto_and_rpo_stat(); + DROP FUNCTION IF EXISTS pg_catalog.gs_hadr_remote_rto_and_rpo_stat(); +END$$; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 5077; +CREATE OR REPLACE FUNCTION pg_catalog.gs_hadr_local_rto_and_rpo_stat +( +OUT hadr_sender_node_name pg_catalog.text, +OUT hadr_receiver_node_name pg_catalog.text, +OUT source_ip pg_catalog.text, +OUT source_port pg_catalog.int4, +OUT dest_ip pg_catalog.text, +OUT dest_port pg_catalog.int4, +OUT current_rto pg_catalog.int8, +OUT target_rto pg_catalog.int8, +OUT current_rpo pg_catalog.int8, +OUT target_rpo pg_catalog.int8, +OUT current_sleep_time pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_hadr_local_rto_and_rpo_stat'; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 5078; +CREATE OR REPLACE FUNCTION pg_catalog.gs_hadr_remote_rto_and_rpo_stat +( +OUT hadr_sender_node_name pg_catalog.text, +OUT hadr_receiver_node_name pg_catalog.text, +OUT source_ip pg_catalog.text, +OUT source_port pg_catalog.int4, +OUT dest_ip pg_catalog.text, +OUT dest_port pg_catalog.int4, +OUT current_rto pg_catalog.int8, +OUT target_rto pg_catalog.int8, +OUT current_rpo pg_catalog.int8, +OUT target_rpo pg_catalog.int8, +OUT current_sleep_time pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_hadr_remote_rto_and_rpo_stat'; +CREATE OR REPLACE VIEW DBE_PERF.global_streaming_hadr_rto_and_rpo_stat AS + SELECT hadr_sender_node_name, hadr_receiver_node_name, current_rto, target_rto, current_rpo, target_rpo, current_sleep_time +FROM pg_catalog.gs_hadr_local_rto_and_rpo_stat(); +REVOKE ALL on DBE_PERF.global_streaming_hadr_rto_and_rpo_stat FROM PUBLIC; +DECLARE + user_name text; + query_str text; +BEGIN + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT ALL ON TABLE DBE_PERF.global_streaming_hadr_rto_and_rpo_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; +END; +/ +GRANT SELECT ON TABLE DBE_PERF.global_streaming_hadr_rto_and_rpo_stat TO PUBLIC; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DROP VIEW IF EXISTS pg_catalog.gs_gsc_memory_detail cascade; +DROP VIEW IF EXISTS pg_catalog.gs_lsc_memory_detail cascade; +DROP FUNCTION IF EXISTS pg_catalog.gs_gsc_table_detail() cascade; +DROP FUNCTION IF EXISTS pg_catalog.gs_gsc_catalog_detail() cascade; +DROP FUNCTION IF EXISTS pg_catalog.gs_gsc_dbstat_info() cascade; +DROP FUNCTION IF EXISTS pg_catalog.gs_gsc_clean() cascade;DROP VIEW IF EXISTS pg_catalog.gs_wlm_ec_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.gs_wlm_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.pg_get_invalid_backends CASCADE; + +DROP VIEW IF EXISTS pg_catalog.pg_stat_activity cascade; +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity_with_conninfo(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT connection_info text, OUT srespool name, OUT global_sessionid text, OUT unique_sql_id bigint, OUT trace_id text) cascade; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4212; +CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity_with_conninfo +( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT connection_info text, + OUT srespool name, + OUT global_sessionid text, + OUT unique_sql_id bigint +) +RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity_with_conninfo'; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + S.connection_info, + S.unique_sql_id + FROM pg_database D, pg_stat_get_activity_with_conninfo(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_ec_operator_statistics AS +SELECT + t.queryid, + t.plan_node_id, + t.start_time, + t.ec_status, + t.ec_execute_datanode, + t.ec_dsn, + t.ec_username, + t.ec_query, + t.ec_libodbc_type, + t.ec_fetch_count +FROM pg_catalog.pg_stat_activity AS s, pg_catalog.pg_stat_get_wlm_realtime_ec_operator_info(NULL) as t +where s.query_id = t.queryid and t.ec_operator > 0; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_operator_statistics AS +SELECT t.* +FROM pg_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t +where s.query_id = t.queryid; + +CREATE OR REPLACE VIEW pg_catalog.pg_get_invalid_backends AS + SELECT + C.pid, + C.node_name, + S.datname AS dbname, + S.backend_start, + S.query + FROM pg_pool_validate(false, ' ') AS C LEFT JOIN pg_stat_activity AS S + ON (C.pid = S.sessionid); + +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_wlm_session_iostat_info(integer) cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5014; +CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_wlm_session_iostat_info(IN unuseattr integer, OUT threadid bigint, OUT maxcurr_iops integer, OUT mincurr_iops integer, OUT maxpeak_iops integer, OUT minpeak_iops integer, OUT iops_limits integer, OUT io_priority integer, OUT curr_io_limits integer) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'pg_stat_get_wlm_session_iostat_info'; + +DO $DO$ +DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP VIEW IF EXISTS DBE_PERF.global_session_stat_activity cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_session_stat_activity() cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_operator_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_operator_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.operator_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.session_stat_activity cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_replication_stat cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_replication_stat() cascade; + DROP VIEW IF EXISTS DBE_PERF.replication_stat cascade; + + + DROP VIEW IF EXISTS DBE_PERF.session_cpu_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.session_memory_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.global_statement_complex_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_statement_complex_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.statement_complex_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.statement_iostat_complex_runtime cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_memory_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.pg_session_iostat cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_cpu_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.gs_wlm_session_statistics cascade; + + DROP VIEW IF EXISTS pg_catalog.pg_stat_activity_ng cascade; + DROP VIEW IF EXISTS pg_catalog.pg_stat_replication cascade; + + DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT srespool name, OUT global_sessionid text, OUT unique_sql_id bigint, OUT trace_id text) cascade; + + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2022; + CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity + ( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT srespool name, + OUT global_sessionid text, + OUT unique_sql_id bigint + ) + RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity'; + + CREATE OR REPLACE VIEW dbe_perf.session_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + S.unique_sql_id + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_session_stat_activity + (out coorname text, out datid oid, out datname text, out pid bigint, + out usesysid oid, out usename text, out application_name text, out client_addr inet, + out client_hostname text, out client_port integer, out backend_start timestamptz, + out xact_start timestamptz, out query_start timestamptz, out state_change timestamptz, + out waiting boolean, out enqueue text, out state text, out resource_pool name, + out query_id bigint, out query text, out unique_sql_id bigint) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.session_stat_activity%rowtype; + coor_name record; + fet_active text; + fetch_coor text; + BEGIN + --Get all cn node names + fetch_coor := 'select * from dbe_perf.node_name'; + FOR coor_name IN EXECUTE(fetch_coor) LOOP + coorname := coor_name.node_name; + fet_active := 'SELECT * FROM dbe_perf.session_stat_activity'; + FOR row_data IN EXECUTE(fet_active) LOOP + coorname := coorname; + datid :=row_data.datid; + datname := row_data.datname; + pid := row_data.pid; + usesysid :=row_data.usesysid; + usename := row_data.usename; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_hostname :=row_data.client_hostname; + client_port :=row_data.client_port; + backend_start := row_data.backend_start; + xact_start := row_data.xact_start; + query_start := row_data.query_start; + state_change := row_data.state_change; + waiting := row_data.waiting; + enqueue := row_data.enqueue; + state := row_data.state; + resource_pool :=row_data.resource_pool; + query_id :=row_data.query_id; + query := row_data.query; + unique_sql_id := row_data.unique_sql_id; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + + CREATE OR REPLACE VIEW DBE_PERF.global_session_stat_activity AS + SELECT * FROM DBE_PERF.get_global_session_stat_activity(); + + CREATE OR REPLACE VIEW dbe_perf.operator_runtime AS + SELECT t.* + FROM dbe_perf.session_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t + WHERE s.query_id = t.queryid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_operator_runtime() + RETURNS setof dbe_perf.operator_runtime + AS $$ + DECLARE + row_data dbe_perf.operator_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.operator_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_operator_runtime AS + SELECT * FROM dbe_perf.get_global_operator_runtime(); + + + CREATE OR REPLACE VIEW dbe_perf.replication_stat AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_replication_stat + (OUT node_name name, + OUT pid bigint, + OUT usesysid oid, + OUT usename name, + OUT application_name text, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT backend_start timestamp with time zone, + OUT state text, + OUT sender_sent_location text, + OUT receiver_write_location text, + OUT receiver_flush_location text, + OUT receiver_replay_location text, + OUT sync_priority integer, + OUT sync_state text) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.replication_stat%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + --Get all the node names + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.replication_stat'; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := row_name.node_name; + pid := row_data.pid; + usesysid := row_data.usesysid; + usename := row_data.usename; + client_addr := row_data.client_addr; + client_hostname := row_data.client_hostname; + client_port := row_data.client_port; + state := row_data.state; + sender_sent_location := row_data.sender_sent_location; + receiver_write_location := row_data.receiver_write_location; + receiver_flush_location := row_data.receiver_flush_location; + receiver_replay_location := row_data.receiver_replay_location; + sync_priority := row_data.sync_priority; + sync_state := row_data.sync_state; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_replication_stat AS + SELECT * FROM dbe_perf.get_global_replication_stat(); + + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity_ng AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + N.node_group + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_stat_get_activity_ng(NULL) AS N, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid AND + S.sessionid = N.sessionid; + + + CREATE OR REPLACE VIEW dbe_perf.session_cpu_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.session_memory_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.statement_complex_runtime AS + SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_statement_complex_runtime() + RETURNS setof dbe_perf.statement_complex_runtime + AS $$ + DECLARE + row_data dbe_perf.statement_complex_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.statement_complex_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_statement_complex_runtime AS + SELECT * FROM dbe_perf.get_global_statement_complex_runtime(); + + CREATE OR REPLACE VIEW dbe_perf.statement_iostat_complex_runtime AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 20 THEN 'Low'::text + WHEN T.io_priority = 50 THEN 'Medium'::text + WHEN T.io_priority = 80 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T + WHERE S.pid = T.threadid; + + end if; +END$DO$; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_memory_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_session_iostat AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 10 THEN 'Low'::text + WHEN T.io_priority = 20 THEN 'Medium'::text + WHEN T.io_priority = 50 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_cpu_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_session_statistics AS +SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + S.sessionid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_replication AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +DO $DO$ +DECLARE + ans boolean; + user_name text; + query_str text; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.replication_stat TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_replication_stat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity_ng TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_cpu_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_cpu_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_memory_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_memory_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_iostat_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_iostat_complex_runtime TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_memory_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_session_iostat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_cpu_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_session_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_replication TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_ec_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_get_invalid_backends TO PUBLIC; + end if; +END$DO$; + +DO $DO$ +DECLARE + ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp() cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp() cascade; + DROP VIEW IF EXISTS DBE_PERF.statement_history cascade; + end if; +END$DO$; + +DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx; +DROP TABLE IF EXISTS pg_catalog.statement_history cascade; + +CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history( + db_name name, + schema_name name, + origin_node integer, + user_name name, + application_name text, + client_addr text, + client_port integer, + unique_query_id bigint, + debug_query_id bigint, + query text, + start_time timestamp with time zone, + finish_time timestamp with time zone, + slow_sql_threshold bigint, + transaction_id bigint, + thread_id bigint, + session_id bigint, + n_soft_parse bigint, + n_hard_parse bigint, + query_plan text, + n_returned_rows bigint, + n_tuples_fetched bigint, + n_tuples_returned bigint, + n_tuples_inserted bigint, + n_tuples_updated bigint, + n_tuples_deleted bigint, + n_blocks_fetched bigint, + n_blocks_hit bigint, + db_time bigint, + cpu_time bigint, + execution_time bigint, + parse_time bigint, + plan_time bigint, + rewrite_time bigint, + pl_execution_time bigint, + pl_compilation_time bigint, + data_io_time bigint, + net_send_info text, + net_recv_info text, + net_stream_send_info text, + net_stream_recv_info text, + lock_count bigint, + lock_time bigint, + lock_wait_count bigint, + lock_wait_time bigint, + lock_max_count bigint, + lwlock_count bigint, + lwlock_wait_count bigint, + lwlock_time bigint, + lwlock_wait_time bigint, + details bytea, + is_slow_sql boolean +); +REVOKE ALL on table pg_catalog.statement_history FROM public; +create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); + +DO $DO$ +DECLARE + ans boolean; + username text; + querystr text; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + CREATE VIEW DBE_PERF.statement_history AS + select * from pg_catalog.statement_history; + + CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp + (in start_timestamp timestamp with time zone, + in end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean) + RETURNS setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + query_str text; + -- node name + node_names name[]; + each_node_name name; + BEGIN + -- Get all node names(CN + master DN) + node_names := ARRAY(SELECT pgxc_node.node_name FROM pgxc_node WHERE (node_type = 'C' or node_type = 'D') AND nodeis_active = true); + FOREACH each_node_name IN ARRAY node_names + LOOP + query_str := 'EXECUTE DIRECT ON (' || each_node_name || ') ''SELECT * FROM DBE_PERF.statement_history where start_time >= ''''' ||$1|| ''''' and start_time <= ''''' || $2 || ''''''''; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := each_node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp + (in start_timestamp timestamp with time zone, + in end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean) + RETURNS setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + row_name record; + query_str text; + -- node name + node_names name[]; + each_node_name name; + BEGIN + -- Get all node names(CN + master DN) + node_names := ARRAY(SELECT pgxc_node.node_name FROM pgxc_node WHERE (node_type = 'C' or node_type = 'D') AND nodeis_active = true); + FOREACH each_node_name IN ARRAY node_names + LOOP + query_str := 'EXECUTE DIRECT ON (' || each_node_name || ') ''SELECT * FROM DBE_PERF.statement_history where start_time >= ''''' ||$1|| ''''' and start_time <= ''''' || $2 || ''''' and (extract(epoch from (finish_time - start_time)) * 1000000) >= slow_sql_threshold '''; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := each_node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + DROP FUNCTION IF EXISTS pg_catalog.statement_detail_decode() CASCADE; + set local inplace_upgrade_next_system_object_oids = IUO_PROC, 5732; + CREATE OR REPLACE FUNCTION pg_catalog.statement_detail_decode + ( IN text, + IN text, + IN boolean) + RETURNS text LANGUAGE INTERNAL NOT FENCED as 'statement_detail_decode'; + + SELECT SESSION_USER INTO username; + IF EXISTS (SELECT oid FROM pg_catalog.pg_class WHERE relname='statement_history') THEN + querystr := 'REVOKE SELECT on table dbe_perf.statement_history FROM public;'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT ALL ON TABLE DBE_PERF.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT ALL ON TABLE pg_catalog.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC; + END IF; + end if; +END$DO$; +DROP FUNCTION IF EXISTS pg_catalog.gs_get_shared_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5255; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_shared_memctx_detail( +IN context_name cstring, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_shared_memctx_detail'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_thread_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5256; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_thread_memctx_detail( +IN threadid int8, +IN context_name cstring, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_thread_memctx_detail'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_session_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5254; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_session_memctx_detail( +IN context_name cstring, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_session_memctx_detail';DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_set() CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_init() CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_clear() CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_status() CASCADE;DROP FUNCTION IF EXISTS pg_catalog.gs_verify_data_file(bool) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.gs_repair_file(Oid, text, integer) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.gs_verify_and_tryrepair_page(text, oid, boolean, boolean) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.gs_repair_page(text, oid, bool, integer) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.local_bad_block_info() CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.local_clear_bad_block_info() CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_file_from_remote(oid, oid, oid, integer, integer, integer, xid, integer) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_file_size_from_remote(oid, oid, oid, integer, integer, xid, integer) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_segment_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, integer, integer) CASCADE; +-- deleting system view +DROP VIEW IF EXISTS pg_catalog.pg_publication_tables; +DROP VIEW IF EXISTS pg_catalog.pg_stat_subscription; +DROP VIEW IF EXISTS pg_catalog.pg_replication_origin_status; + +-- deleting function pg_replication_origin_create +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_create(IN node_name text, OUT replication_origin_oid oid) CASCADE; + +-- deleting function pg_replication_origin_drop +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_drop(IN node_name text, OUT replication_origin_oid oid) CASCADE; + +-- deleting function pg_replication_origin_oid +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_oid(IN node_name text, OUT replication_origin_oid oid) CASCADE; + +-- deleting function pg_replication_origin_session_setup +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_setup(IN node_name text) CASCADE; + +-- deleting function pg_replication_origin_session_reset +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_reset() CASCADE; + +-- deleting function pg_replication_origin_session_is_setup +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_is_setup() CASCADE; + +-- deleting function pg_replication_origin_session_progress +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_progress(IN flush boolean) CASCADE; + +-- deleting function pg_replication_origin_xact_setup +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_xact_setup(IN origin_lsn text, IN origin_timestamp timestamp with time zone) CASCADE; + +-- deleting function pg_replication_origin_xact_reset +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_xact_reset() CASCADE; + +-- deleting function pg_replication_origin_advance +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_advance(IN node_name text, IN lsn text) CASCADE; + +-- deleting function pg_replication_origin_progress +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_progress(IN node_name text, IN flush boolean) CASCADE; + +-- deleting function pg_show_replication_origin_status +DROP FUNCTION IF EXISTS pg_catalog.pg_show_replication_origin_status(OUT local_id oid, OUT external_id text, OUT remote_lsn text, OUT local_lsn text) CASCADE; + +-- deleting function pg_get_publication_tables +DROP FUNCTION IF EXISTS pg_catalog.pg_get_publication_tables(IN pubname text, OUT relid oid) CASCADE; + +-- deleting function pg_stat_get_subscription +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_subscription(IN subid oid, OUT subid oid, OUT pid integer, OUT received_lsn text, OUT last_msg_send_time timestamp with time zone, OUT last_msg_receipt_time timestamp with time zone, OUT latest_end_lsn text, OUT latest_end_time timestamp with time zone) CASCADE; + +-- deleting system table pg_subscription + +DROP INDEX IF EXISTS pg_catalog.pg_subscription_oid_index; +DROP INDEX IF EXISTS pg_catalog.pg_subscription_subname_index; +DROP TYPE IF EXISTS pg_catalog.pg_subscription; +DROP TABLE IF EXISTS pg_catalog.pg_subscription; + +-- deleting system table pg_replication_origin + +DROP INDEX IF EXISTS pg_catalog.pg_replication_origin_roident_index; +DROP INDEX IF EXISTS pg_catalog.pg_replication_origin_roname_index; +DROP TYPE IF EXISTS pg_catalog.pg_replication_origin; +DROP TABLE IF EXISTS pg_catalog.pg_replication_origin;DROP FUNCTION IF EXISTS pg_catalog.gs_explain_model(text) cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_float8_array(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_bool(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_float4(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_float8(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_int32(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_int64(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_numeric(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_text(text, VARIADIC "any") cascade; +do $$DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_clean_history_global_barriers(); + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4581; + CREATE FUNCTION pg_catalog.gs_pitr_clean_history_global_barriers + ( + IN stop_barrier_timestamp pg_catalog.timestamptz, + OUT oldest_barrier_record pg_catalog.text + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_pitr_clean_history_global_barriers'; + DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_archive_slot_force_advance(); + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4580; + CREATE FUNCTION pg_catalog.gs_pitr_archive_slot_force_advance + ( + IN stop_barrier_timestamp pg_catalog.timestamptz, + OUT archive_restart_lsn pg_catalog.text + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_pitr_archive_slot_force_advance'; + + end if; +END$$; +DO +$do$ + DECLARE + v5r1c20_and_later_version boolean; + has_version_proc boolean; + need_upgrade boolean; + BEGIN + need_upgrade = false; + select case when count(*)=1 then true else false end as has_version_proc from (select * from pg_proc where proname = 'working_version_num' limit 1) into has_version_proc; + IF has_version_proc = true then + select working_version_num >= 92584 as v5r1c20_and_later_version from working_version_num() into v5r1c20_and_later_version; + IF v5r1c20_and_later_version = true then + need_upgrade = true; + end IF; + END IF; + IF need_upgrade = true then + +comment on function PG_CATALOG.regexp_count(text, text) is ''; +comment on function PG_CATALOG.regexp_count(text, text, integer) is ''; +comment on function PG_CATALOG.regexp_count(text, text, integer, text) is ''; +comment on function PG_CATALOG.regexp_instr(text, text) is ''; +comment on function PG_CATALOG.regexp_instr(text, text, integer) is ''; +comment on function PG_CATALOG.regexp_instr(text, text, integer, integer) is ''; +comment on function PG_CATALOG.regexp_instr(text, text, integer, integer, integer) is ''; +comment on function PG_CATALOG.regexp_instr(text, text, integer, integer, integer, text) is ''; +comment on function PG_CATALOG.lpad(text, integer, text) is ''; +comment on function PG_CATALOG.rpad(text, integer, text) is ''; +comment on function PG_CATALOG.regexp_replace(text, text) is ''; +comment on function PG_CATALOG.regexp_replace(text, text, text, integer) is ''; +comment on function PG_CATALOG.regexp_replace(text, text, text, integer, integer) is ''; +comment on function PG_CATALOG.regexp_replace(text, text, text, integer, integer, text) is ''; +comment on function PG_CATALOG.line_in(cstring) is ''; +comment on function PG_CATALOG.regexp_substr(text, text, integer) is ''; +comment on function PG_CATALOG.regexp_substr(text, text, integer, integer) is ''; +comment on function PG_CATALOG.regexp_substr(text, text, integer, integer, text) is ''; +comment on function PG_CATALOG.pg_stat_get_activity(bigint) is ''; +comment on function PG_CATALOG.to_char(timestamp without time zone, text) is ''; +comment on function PG_CATALOG.pg_replication_origin_advance(text, text) is ''; +comment on function PG_CATALOG.pg_replication_origin_create(text) is ''; +comment on function PG_CATALOG.pg_replication_origin_drop(text) is ''; +comment on function PG_CATALOG.pg_replication_origin_oid(text) is ''; +comment on function PG_CATALOG.pg_replication_origin_progress(text, boolean) is ''; +comment on function PG_CATALOG.pg_replication_origin_session_is_setup() is ''; +comment on function PG_CATALOG.pg_replication_origin_session_progress(boolean) is ''; +comment on function PG_CATALOG.pg_replication_origin_session_reset() is ''; +comment on function PG_CATALOG.pg_replication_origin_session_setup(text) is ''; +comment on function PG_CATALOG.pg_replication_origin_xact_reset() is ''; +comment on function PG_CATALOG.pg_replication_origin_xact_setup(text, timestamp with time zone) is ''; +comment on function PG_CATALOG.pg_show_replication_origin_status() is ''; +comment on function PG_CATALOG.pg_get_publication_tables(text) is ''; +comment on function PG_CATALOG.pg_stat_get_subscription(oid) is ''; +comment on function PG_CATALOG.xpath(text, xml, text[]) is ''; +comment on function PG_CATALOG.xpath(text, xml) is ''; +comment on function PG_CATALOG.xpath_exists(text, xml, text[]) is ''; +comment on function PG_CATALOG.json_array_element_text(json, integer) is ''; +comment on function PG_CATALOG.json_extract_path_op(json, text[]) is ''; +comment on function PG_CATALOG.json_extract_path_text_op(json, text[]) is ''; +comment on function PG_CATALOG.jsonb_extract_path_op(jsonb, text[]) is ''; +comment on function PG_CATALOG.json_object_field(json, text) is ''; +comment on function PG_CATALOG.jsonb_array_element(jsonb, integer) is ''; +comment on function PG_CATALOG.jsonb_array_element_text(jsonb, integer) is ''; +comment on function PG_CATALOG.jsonb_contains(jsonb, jsonb) is ''; +comment on function PG_CATALOG.jsonb_eq(jsonb, jsonb) is ''; +comment on function PG_CATALOG.jsonb_exists(jsonb, text) is ''; +comment on function PG_CATALOG.jsonb_exists_all(jsonb, text[]) is ''; +comment on function PG_CATALOG.jsonb_exists_any(jsonb, text[]) is ''; +comment on function PG_CATALOG.jsonb_extract_path_text_op(jsonb, text[]) is ''; +comment on function PG_CATALOG.jsonb_ge(jsonb, jsonb) is ''; +comment on function PG_CATALOG.jsonb_gt(jsonb, jsonb) is ''; +comment on function PG_CATALOG.jsonb_le(jsonb, jsonb) is ''; +comment on function PG_CATALOG.jsonb_ne(jsonb, jsonb) is ''; +comment on function PG_CATALOG.jsonb_object_field(jsonb, text) is ''; +comment on function PG_CATALOG.jsonb_object_field_text(jsonb, text) is ''; +comment on function PG_CATALOG.json_object_field_text(json, text) is ''; +comment on function PG_CATALOG.json_array_element(json, integer) is ''; +comment on function PG_CATALOG.jsonb_lt(jsonb, jsonb) is ''; +comment on function PG_CATALOG.jsonb_contained(jsonb, jsonb) is ''; +comment on function PG_CATALOG.has_any_privilege(name, text) is ''; +comment on function PG_CATALOG.int16eq(int16, int16) is ''; +comment on function PG_CATALOG.int16ne(int16, int16) is ''; +comment on function PG_CATALOG.int16lt(int16, int16) is ''; +comment on function PG_CATALOG.int16le(int16, int16) is ''; +comment on function PG_CATALOG.int16gt(int16, int16) is ''; +comment on function PG_CATALOG.int16ge(int16, int16) is ''; +comment on function PG_CATALOG.int16pl(int16, int16) is ''; +comment on function PG_CATALOG.int16eq(int16, int16) is ''; +comment on function PG_CATALOG.int16mi(int16, int16) is ''; +comment on function PG_CATALOG.int16mul(int16, int16) is ''; +comment on function PG_CATALOG.int16div(int16, int16) is ''; +comment on function PG_CATALOG.array_varchar_first(anyarray) is ''; +comment on function PG_CATALOG.array_varchar_last(anyarray) is ''; +comment on function PG_CATALOG.array_integer_first(anyarray) is ''; +comment on function PG_CATALOG.array_integer_last(anyarray) is ''; +comment on function PG_CATALOG.array_indexby_length(anyarray, integer) is ''; +comment on function PG_CATALOG.gs_index_verify() is ''; +comment on function PG_CATALOG.gs_index_recycle_queue() is ''; +comment on function PG_CATALOG.int16div(int16, int16) is ''; + END IF; + END +$do$; +DROP FUNCTION IF EXISTS pg_catalog.login_audit_messages(boolean); +DROP FUNCTION IF EXISTS pg_catalog.login_audit_messages_pid(boolean); + +CREATE OR REPLACE FUNCTION pg_catalog.login_audit_messages(in flag boolean) returns table (username text, database text, logintime timestamp with time zone, mytype text, result text, client_conninfo text) AUTHID DEFINER +AS $$ +DECLARE +user_id text; +user_name text; +db_name text; +SQL_STMT VARCHAR2(500); +fail_cursor REFCURSOR; +success_cursor REFCURSOR; +BEGIN + SELECT text(oid) FROM pg_authid WHERE rolname=SESSION_USER INTO user_id; + SELECT SESSION_USER INTO user_name; + SELECT CURRENT_DATABASE() INTO db_name; + IF flag = true THEN + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo FROM pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN success_cursor FOR EXECUTE SQL_STMT; + --search bottom up for all the success login info + FETCH LAST FROM success_cursor into username, database, logintime, mytype, result, client_conninfo; + FETCH BACKWARD FROM success_cursor into username, database, logintime, mytype, result, client_conninfo; + IF FOUND THEN + return next; + END IF; + CLOSE success_cursor; + ELSE + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo FROM pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'', ''login_failed'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN fail_cursor FOR EXECUTE SQL_STMT; + --search bottom up + FETCH LAST FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo; + LOOP + FETCH BACKWARD FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo; + EXIT WHEN NOT FOUND; + IF mytype = 'login_failed' THEN + return next; + ELSE + -- must be login_success + EXIT; + END IF; + END LOOP; + CLOSE fail_cursor; + END IF; +END; $$ +LANGUAGE plpgsql NOT FENCED; + +CREATE OR REPLACE FUNCTION pg_catalog.login_audit_messages_pid(flag boolean) + RETURNS TABLE(username text, database text, logintime timestamp with time zone, mytype text, result text, client_conninfo text, backendid bigint) AUTHID DEFINER +AS $$ +DECLARE +user_id text; +user_name text; +db_name text; +SQL_STMT VARCHAR2(500); +fail_cursor REFCURSOR; +success_cursor REFCURSOR; +mybackendid bigint; +curSessionFound boolean; +BEGIN + SELECT text(oid) FROM pg_authid WHERE rolname=SESSION_USER INTO user_id; + SELECT SESSION_USER INTO user_name; + SELECT CURRENT_DATABASE() INTO db_name; + SELECT pg_backend_pid() INTO mybackendid; + curSessionFound = false; + IF flag = true THEN + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo, split_part(thread_id,''@'',1) backendid FROM pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN success_cursor FOR EXECUTE SQL_STMT; + --search bottom up for all the success login info + FETCH LAST FROM success_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + LOOP + IF backendid = mybackendid THEN + --found the login info for the current session + curSessionFound = true; + EXIT; + END IF; + FETCH BACKWARD FROM success_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + EXIT WHEN NOT FOUND; + END LOOP; + IF curSessionFound THEN + FETCH BACKWARD FROM success_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + IF FOUND THEN + return next; + END IF; + END IF; + ELSE + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo, split_part(thread_id,''@'',1) backendid FROM pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'', ''login_failed'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN fail_cursor FOR EXECUTE SQL_STMT; + --search bottom up + FETCH LAST FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + LOOP + IF backendid = mybackendid AND mytype = 'login_success' THEN + --found the login info for the current session + curSessionFound = true; + EXIT; + END IF; + FETCH BACKWARD FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + EXIT WHEN NOT FOUND; + END LOOP; + IF curSessionFound THEN + LOOP + FETCH BACKWARD FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo, backendid ; + EXIT WHEN NOT FOUND; + IF mytype = 'login_failed' THEN + return next; + ELSE + -- must be login_success + EXIT; + END IF; + END LOOP; + END IF; --curSessionFound + CLOSE fail_cursor; + END IF; +END; $$ +LANGUAGE plpgsql NOT FENCED;DROP FUNCTION IF EXISTS pg_catalog.gs_read_segment_block_from_remote(oid, oid, oid, smallint, integer, xid, integer, xid, oid, oid, integer) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_read_segment_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, integer, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4770; +CREATE OR REPLACE FUNCTION pg_catalog.gs_read_segment_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, integer, integer) + RETURNS bytea + LANGUAGE internal + STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_read_segment_block_from_remote$function$; + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_block_from_remote(oid, oid, oid, smallint, integer, xid, integer, xid, boolean, integer) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_read_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, boolean) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4767; +CREATE OR REPLACE FUNCTION pg_catalog.gs_read_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, boolean) + RETURNS bytea + LANGUAGE internal + STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_read_block_from_remote$function$; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DROP FUNCTION IF EXISTS pg_catalog.array_remove(anyarray, anyelement) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.array_replace(anyarray, anyelement, anyelement) CASCADE; +DROP AGGREGATE IF EXISTS pg_catalog.last(anyelement) CASCADE; +DROP AGGREGATE IF EXISTS pg_catalog.first(anyelement) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.last_transition(anyelement, anyelement) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.first_transition(anyelement, anyelement) CASCADE; + + +DROP AGGREGATE IF EXISTS pg_catalog.max(inet) CASCADE; +DROP AGGREGATE IF EXISTS pg_catalog.min(inet) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.network_larger(inet, inet) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.network_smaller(inet, inet) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.pg_buffercache_pages() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4130; +CREATE FUNCTION pg_catalog.pg_buffercache_pages( + OUT bufferid integer, + OUT relfilenode oid, + OUT bucketid smallint, + OUT storage_type bigint, + OUT reltablespace oid, + OUT reldatabase oid, + OUT relforknumber integer, + OUT relblocknumber oid, + OUT isdirty boolean, + OUT usage_count smallint) +RETURNS SETOF record +LANGUAGE internal +STABLE NOT FENCED NOT SHIPPABLE ROWS 100 +AS 'pg_buffercache_pages'; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DO $DO$ +DECLARE + ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select tablename from PG_TABLES where tablename='statement_history' and schemaname='pg_catalog' limit 1) into ans; + if ans = true then + TRUNCATE TABLE pg_catalog.statement_history; + DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx; + create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); + end if; +END$DO$;DROP FUNCTION IF EXISTS pg_catalog.pg_create_physical_replication_slot_extern(name, boolean, text) cascade; +DROP FUNCTION IF EXISTS pg_catalog.pg_create_physical_replication_slot_extern(name, boolean, text, boolean) cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3790; +CREATE OR REPLACE FUNCTION pg_catalog.pg_create_physical_replication_slot_extern(slotname name, dummy_standby boolean, extra_content text, OUT slotname text, OUT xlog_position text) + RETURNS record + LANGUAGE internal + NOT FENCED NOT SHIPPABLE +AS $function$pg_create_physical_replication_slot_extern$function$; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int, int, text) CASCADE; diff --git a/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_602.sql b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_602.sql new file mode 100644 index 000000000..a9f9b5672 --- /dev/null +++ b/src/include/catalog/upgrade_sql/rollback_catalog_otherdb/rollback_catalog_otherdb_92_602.sql @@ -0,0 +1,814 @@ +declare + has_version_proc boolean; + have_column boolean; +begin + select case when count(*)=1 then true else false end as has_version_proc from (select * from pg_proc where proname = 'working_version_num' limit 1) into has_version_proc; + if has_version_proc = true then + select working_version_num >= 92458 as have_column from working_version_num() into have_column; + end if; + + if have_column = false then + DROP INDEX IF EXISTS pg_catalog.pg_proc_proname_all_args_nsp_index; + else + DROP INDEX IF EXISTS pg_catalog.pg_proc_proname_all_args_nsp_index; + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 9666; + CREATE UNIQUE INDEX pg_catalog.pg_proc_proname_all_args_nsp_index on pg_catalog.pg_proc USING BTREE(proname name_ops, allargtypes oidvector_ops, pronamespace oid_ops, propackageid oid_ops); + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + REINDEX INDEX pg_catalog.pg_proc_proname_all_args_nsp_index; + end if; +end; +DROP INDEX IF EXISTS pg_catalog.gs_uid_relid_index; +DROP TYPE IF EXISTS pg_catalog.gs_uid; +DROP TABLE IF EXISTS pg_catalog.gs_uid; +DROP FUNCTION IF EXISTS pg_catalog.gs_stat_wal_entrytable(int8); +DROP FUNCTION IF EXISTS pg_catalog.gs_walwriter_flush_position(); +DROP FUNCTION IF EXISTS pg_catalog.gs_walwriter_flush_stat(int4); +DROP FUNCTION IF EXISTS pg_catalog.gs_stat_undo();--drop system function has_any_privilege(user, privilege) +DROP FUNCTION IF EXISTS pg_catalog.has_any_privilege(name, text); + +--drop system view gs_db_privileges +DROP VIEW IF EXISTS pg_catalog.gs_db_privileges; + +--drop indexes on system relation gs_db_privilege +DROP INDEX IF EXISTS gs_db_privilege_oid_index; +DROP INDEX IF EXISTS gs_db_privilege_roleid_index; +DROP INDEX IF EXISTS gs_db_privilege_roleid_privilege_type_index; + +--drop type gs_db_privilege +DROP TYPE IF EXISTS pg_catalog.gs_db_privilege; + +--drop system relation gs_db_privilege +DROP TABLE IF EXISTS pg_catalog.gs_db_privilege; +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_record(int8); +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta(int4, int4, int4); +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot(int4, int4); +DROP FUNCTION IF EXISTS pg_catalog.gs_index_verify(oid, oid); +DROP FUNCTION IF EXISTS pg_catalog.gs_index_recycle_queue(oid, oid, oid);DROP FUNCTION IF EXISTS pg_catalog.pg_logical_get_area_changes() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +do $$DECLARE +ans boolean; +func boolean; +user_name text; +query_str text; +BEGIN + + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + select case when count(*)=1 then true else false end as func from (select * from pg_proc where proname='local_double_write_stat' limit 1) into func; + DROP FUNCTION IF EXISTS pg_catalog.local_double_write_stat(); + DROP FUNCTION IF EXISTS pg_catalog.remote_double_write_stat(); + DROP VIEW IF EXISTS DBE_PERF.global_double_write_status CASCADE; + if func = true then + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4384; + CREATE FUNCTION pg_catalog.local_double_write_stat + ( + OUT node_name pg_catalog.text, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'local_double_write_stat'; + + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4385; + CREATE FUNCTION pg_catalog.remote_double_write_stat + ( + OUT node_name pg_catalog.text, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'remote_double_write_stat'; + + CREATE OR REPLACE VIEW dbe_perf.global_double_write_status AS + SELECT node_name, curr_dwn, curr_start_page, file_trunc_num, file_reset_num, + total_writes, low_threshold_writes, high_threshold_writes, + total_pages, low_threshold_pages, high_threshold_pages + FROM pg_catalog.local_double_write_stat(); + + REVOKE ALL on DBE_PERF.global_double_write_status FROM PUBLIC; + + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLE DBE_PERF.global_double_write_status TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + + GRANT SELECT ON TABLE DBE_PERF.global_double_write_status TO PUBLIC; + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + end if; + end if; +END$$; +-- deleting system table pg_publication + +DROP INDEX IF EXISTS pg_catalog.pg_publication_oid_index; +DROP INDEX IF EXISTS pg_catalog.pg_publication_pubname_index; +DROP TYPE IF EXISTS pg_catalog.pg_publication; +DROP TABLE IF EXISTS pg_catalog.pg_publication; + +-- deleting system table pg_publication_rel + +DROP INDEX IF EXISTS pg_catalog.pg_publication_rel_oid_index; +DROP INDEX IF EXISTS pg_catalog.pg_publication_rel_map_index; +DROP TYPE IF EXISTS pg_catalog.pg_publication_rel; +DROP TABLE IF EXISTS pg_catalog.pg_publication_rel;do $$DECLARE +ans boolean; +func boolean; +user_name text; +query_str text; +has_version_proc boolean; +no_file_id boolean; +BEGIN + no_file_id = true; + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + select case when count(*)=1 then true else false end as func from (select * from pg_proc where proname='local_double_write_stat' limit 1) into func; + select case when count(*)=1 then true else false end as has_version_proc from (select * from pg_proc where proname = 'working_version_num' limit 1) into has_version_proc; + if has_version_proc = true then + select working_version_num < 92568 as no_file_id from working_version_num() into no_file_id; + end if; + + DROP FUNCTION IF EXISTS pg_catalog.local_double_write_stat(); + DROP FUNCTION IF EXISTS pg_catalog.remote_double_write_stat(); + DROP VIEW IF EXISTS DBE_PERF.global_double_write_status CASCADE; + if func = true then + if no_file_id = true then + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4384; + CREATE FUNCTION pg_catalog.local_double_write_stat + ( + OUT node_name pg_catalog.text, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'local_double_write_stat'; + + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4385; + CREATE FUNCTION pg_catalog.remote_double_write_stat + ( + OUT node_name pg_catalog.text, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'remote_double_write_stat'; + + CREATE OR REPLACE VIEW dbe_perf.global_double_write_status AS + SELECT node_name, curr_dwn, curr_start_page, file_trunc_num, file_reset_num, + total_writes, low_threshold_writes, high_threshold_writes, + total_pages, low_threshold_pages, high_threshold_pages + FROM pg_catalog.local_double_write_stat(); + else + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4384; + CREATE FUNCTION pg_catalog.local_double_write_stat + ( + OUT node_name pg_catalog.text, + OUT file_id pg_catalog.int8, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'local_double_write_stat'; + + SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4385; + CREATE FUNCTION pg_catalog.remote_double_write_stat + ( + OUT node_name pg_catalog.text, + OUT file_id pg_catalog.int8, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 + ) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'remote_double_write_stat'; + + CREATE OR REPLACE VIEW dbe_perf.global_double_write_status AS + SELECT node_name, file_id, curr_dwn, curr_start_page, file_trunc_num, file_reset_num, + total_writes, low_threshold_writes, high_threshold_writes, + total_pages, low_threshold_pages, high_threshold_pages + FROM pg_catalog.local_double_write_stat(); + end if; + + REVOKE ALL on DBE_PERF.global_double_write_status FROM PUBLIC; + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLE DBE_PERF.global_double_write_status TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + + GRANT SELECT ON TABLE DBE_PERF.global_double_write_status TO PUBLIC; + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + end if; + end if; +END$$; + +DO $DO$ +DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select * from pg_tables where tablename = 'snap_global_double_write_status' and schemaname = 'snapshot' limit 1) into ans; + if ans = true then + alter table snapshot.snap_global_double_write_status + DROP COLUMN IF EXISTS snap_file_id; + end if; +END$DO$;SET search_path TO information_schema; + +-- element_types is generated by data_type_privileges +DROP VIEW IF EXISTS information_schema.element_types CASCADE; + +-- data_type_privileges is generated by columns +DROP VIEW IF EXISTS information_schema.data_type_privileges CASCADE; +-- data_type_privileges is generated by table_privileges +DROP VIEW IF EXISTS information_schema.role_column_grants CASCADE; +-- data_type_privileges is generated by column_privileges +DROP VIEW IF EXISTS information_schema.role_table_grants CASCADE; + +-- other views need upgrade for matview +DROP VIEW IF EXISTS information_schema.column_domain_usage CASCADE; +DROP VIEW IF EXISTS information_schema.column_privileges CASCADE; +DROP VIEW IF EXISTS information_schema.column_udt_usage CASCADE; +DROP VIEW IF EXISTS information_schema.columns CASCADE; +DROP VIEW IF EXISTS information_schema.table_privileges CASCADE; +DROP VIEW IF EXISTS information_schema.tables CASCADE; +DROP VIEW IF EXISTS information_schema.view_column_usage CASCADE; +DROP VIEW IF EXISTS information_schema.view_table_usage CASCADE; + +CREATE VIEW information_schema.column_domain_usage AS + SELECT CAST(current_database() AS sql_identifier) AS domain_catalog, + CAST(nt.nspname AS sql_identifier) AS domain_schema, + CAST(t.typname AS sql_identifier) AS domain_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name + + FROM pg_type t, pg_namespace nt, pg_class c, pg_namespace nc, + pg_attribute a + + WHERE t.typnamespace = nt.oid + AND c.relnamespace = nc.oid + AND a.attrelid = c.oid + AND a.atttypid = t.oid + AND t.typtype = 'd' + AND c.relkind IN ('r', 'm', 'v', 'f') + AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + AND a.attnum > 0 + AND NOT a.attisdropped + AND pg_has_role(t.typowner, 'USAGE'); + +CREATE VIEW information_schema.column_privileges AS + SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, + CAST(grantee.rolname AS sql_identifier) AS grantee, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(x.relname AS sql_identifier) AS table_name, + CAST(x.attname AS sql_identifier) AS column_name, + CAST(x.prtype AS character_data) AS privilege_type, + CAST( + CASE WHEN + -- object owner always has grant options + pg_has_role(x.grantee, x.relowner, 'USAGE') + OR x.grantable + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable + + FROM ( + SELECT pr_c.grantor, + pr_c.grantee, + attname, + relname, + relnamespace, + pr_c.prtype, + pr_c.grantable, + pr_c.relowner + FROM (SELECT oid, relname, relnamespace, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* + FROM pg_class + WHERE relkind IN ('r', 'm', 'v', 'f') + ) pr_c (oid, relname, relnamespace, relowner, grantor, grantee, prtype, grantable), + pg_attribute a + WHERE a.attrelid = pr_c.oid + AND a.attnum > 0 + AND NOT a.attisdropped + UNION + SELECT pr_a.grantor, + pr_a.grantee, + attname, + relname, + relnamespace, + pr_a.prtype, + pr_a.grantable, + c.relowner + FROM (SELECT attrelid, attname, (aclexplode(coalesce(attacl, acldefault('c', relowner)))).* + FROM pg_attribute a JOIN pg_class cc ON (a.attrelid = cc.oid) + WHERE attnum > 0 + AND NOT attisdropped + ) pr_a (attrelid, attname, grantor, grantee, prtype, grantable), + pg_class c + WHERE pr_a.attrelid = c.oid + AND relkind IN ('r', 'm', 'v', 'f') + ) x, + pg_namespace nc, + pg_authid u_grantor, + ( + SELECT oid, rolname FROM pg_authid + UNION ALL + SELECT 0::oid, 'PUBLIC' + ) AS grantee (oid, rolname) + + WHERE x.relnamespace = nc.oid + AND x.grantee = grantee.oid + AND x.grantor = u_grantor.oid + AND x.prtype IN ('INSERT', 'SELECT', 'UPDATE', 'REFERENCES', 'COMMENT') + AND (x.relname not like 'mlog_%' AND x.relname not like 'matviewmap_%') + AND (pg_has_role(u_grantor.oid, 'USAGE') + OR pg_has_role(grantee.oid, 'USAGE') + OR grantee.rolname = 'PUBLIC'); + +CREATE VIEW information_schema.column_udt_usage AS + SELECT CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(coalesce(nbt.nspname, nt.nspname) AS sql_identifier) AS udt_schema, + CAST(coalesce(bt.typname, t.typname) AS sql_identifier) AS udt_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name + + FROM pg_attribute a, pg_class c, pg_namespace nc, + (pg_type t JOIN pg_namespace nt ON (t.typnamespace = nt.oid)) + LEFT JOIN (pg_type bt JOIN pg_namespace nbt ON (bt.typnamespace = nbt.oid)) + ON (t.typtype = 'd' AND t.typbasetype = bt.oid) + + WHERE a.attrelid = c.oid + AND a.atttypid = t.oid + AND nc.oid = c.relnamespace + AND a.attnum > 0 AND NOT a.attisdropped AND c.relkind in ('r', 'm', 'v', 'f') + AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + AND pg_has_role(coalesce(bt.typowner, t.typowner), 'USAGE'); + +CREATE VIEW information_schema.columns AS + SELECT CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name, + CAST(a.attnum AS cardinal_number) AS ordinal_position, + CAST(pg_get_expr(ad.adbin, ad.adrelid) AS character_data) AS column_default, + CAST(CASE WHEN a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) THEN 'NO' ELSE 'YES' END + AS yes_or_no) + AS is_nullable, + + CAST( + CASE WHEN t.typtype = 'd' THEN + CASE WHEN bt.typelem <> 0 AND bt.typlen = -1 THEN 'ARRAY' + WHEN nbt.nspname = 'pg_catalog' THEN format_type(t.typbasetype, null) + ELSE 'USER-DEFINED' END + ELSE + CASE WHEN t.typelem <> 0 AND t.typlen = -1 THEN 'ARRAY' + WHEN nt.nspname = 'pg_catalog' THEN format_type(a.atttypid, null) + ELSE 'USER-DEFINED' END + END + AS character_data) + AS data_type, + + CAST( + _pg_char_max_length(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS character_maximum_length, + + CAST( + _pg_char_octet_length(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS character_octet_length, + + CAST( + _pg_numeric_precision(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS numeric_precision, + + CAST( + _pg_numeric_precision_radix(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS numeric_precision_radix, + + CAST( + _pg_numeric_scale(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS numeric_scale, + + CAST( + _pg_datetime_precision(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS datetime_precision, + + CAST( + _pg_interval_type(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS character_data) + AS interval_type, + CAST(null AS cardinal_number) AS interval_precision, + + CAST(null AS sql_identifier) AS character_set_catalog, + CAST(null AS sql_identifier) AS character_set_schema, + CAST(null AS sql_identifier) AS character_set_name, + + CAST(CASE WHEN nco.nspname IS NOT NULL THEN current_database() END AS sql_identifier) AS collation_catalog, + CAST(nco.nspname AS sql_identifier) AS collation_schema, + CAST(co.collname AS sql_identifier) AS collation_name, + + CAST(CASE WHEN t.typtype = 'd' THEN current_database() ELSE null END + AS sql_identifier) AS domain_catalog, + CAST(CASE WHEN t.typtype = 'd' THEN nt.nspname ELSE null END + AS sql_identifier) AS domain_schema, + CAST(CASE WHEN t.typtype = 'd' THEN t.typname ELSE null END + AS sql_identifier) AS domain_name, + + CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(coalesce(nbt.nspname, nt.nspname) AS sql_identifier) AS udt_schema, + CAST(coalesce(bt.typname, t.typname) AS sql_identifier) AS udt_name, + + CAST(null AS sql_identifier) AS scope_catalog, + CAST(null AS sql_identifier) AS scope_schema, + CAST(null AS sql_identifier) AS scope_name, + + CAST(null AS cardinal_number) AS maximum_cardinality, + CAST(a.attnum AS sql_identifier) AS dtd_identifier, + CAST('NO' AS yes_or_no) AS is_self_referencing, + + CAST('NO' AS yes_or_no) AS is_identity, + CAST(null AS character_data) AS identity_generation, + CAST(null AS character_data) AS identity_start, + CAST(null AS character_data) AS identity_increment, + CAST(null AS character_data) AS identity_maximum, + CAST(null AS character_data) AS identity_minimum, + CAST(null AS yes_or_no) AS identity_cycle, + + CAST('NEVER' AS character_data) AS is_generated, + CAST(null AS character_data) AS generation_expression, + + CAST(CASE WHEN c.relkind = 'r' + OR (c.relkind = 'v' + AND EXISTS (SELECT 1 FROM pg_rewrite WHERE ev_class = c.oid AND ev_type = '2' AND is_instead) + AND EXISTS (SELECT 1 FROM pg_rewrite WHERE ev_class = c.oid AND ev_type = '4' AND is_instead)) + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_updatable + + FROM (pg_attribute a LEFT JOIN pg_attrdef ad ON attrelid = adrelid AND attnum = adnum) + JOIN (pg_class c JOIN pg_namespace nc ON (c.relnamespace = nc.oid)) ON a.attrelid = c.oid + JOIN (pg_type t JOIN pg_namespace nt ON (t.typnamespace = nt.oid)) ON a.atttypid = t.oid + LEFT JOIN (pg_type bt JOIN pg_namespace nbt ON (bt.typnamespace = nbt.oid)) + ON (t.typtype = 'd' AND t.typbasetype = bt.oid) + LEFT JOIN (pg_collation co JOIN pg_namespace nco ON (co.collnamespace = nco.oid)) + ON a.attcollation = co.oid AND (nco.nspname, co.collname) <> ('pg_catalog', 'default') + + WHERE (NOT pg_is_other_temp_schema(nc.oid)) + + AND a.attnum > 0 AND NOT a.attisdropped AND c.relkind in ('r', 'm', 'v', 'f') + + AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + + AND (pg_has_role(c.relowner, 'USAGE') + OR has_column_privilege(c.oid, a.attnum, + 'SELECT, INSERT, UPDATE, REFERENCES')); + +CREATE VIEW information_schema.table_privileges AS + SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, + CAST(grantee.rolname AS sql_identifier) AS grantee, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(c.prtype AS character_data) AS privilege_type, + CAST( + CASE WHEN + -- object owner always has grant options + pg_has_role(grantee.oid, c.relowner, 'USAGE') + OR c.grantable + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable, + CAST(CASE WHEN c.prtype = 'SELECT' THEN 'YES' ELSE 'NO' END AS yes_or_no) AS with_hierarchy + + FROM ( + SELECT oid, relname, relnamespace, relkind, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* FROM pg_class + ) AS c (oid, relname, relnamespace, relkind, relowner, grantor, grantee, prtype, grantable), + pg_namespace nc, + pg_authid u_grantor, + ( + SELECT oid, rolname FROM pg_authid + UNION ALL + SELECT 0::oid, 'PUBLIC' + ) AS grantee (oid, rolname) + + WHERE c.relnamespace = nc.oid + AND c.relkind IN ('r', 'm', 'v') + AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + AND c.grantee = grantee.oid + AND c.grantor = u_grantor.oid + AND (c.prtype IN ('INSERT', 'SELECT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER') + OR c.prtype IN ('ALTER', 'DROP', 'COMMENT', 'INDEX', 'VACUUM') + ) + AND (pg_has_role(u_grantor.oid, 'USAGE') + OR pg_has_role(grantee.oid, 'USAGE') + OR grantee.rolname = 'PUBLIC'); + +CREATE VIEW information_schema.tables AS + SELECT CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + + CAST( + CASE WHEN nc.oid = pg_my_temp_schema() THEN 'LOCAL TEMPORARY' + WHEN c.relkind = 'r' THEN 'BASE TABLE' + WHEN c.relkind = 'm' THEN 'MATERIALIZED VIEW' + WHEN c.relkind = 'v' THEN 'VIEW' + WHEN c.relkind = 'f' THEN 'FOREIGN TABLE' + ELSE null END + AS character_data) AS table_type, + + CAST(null AS sql_identifier) AS self_referencing_column_name, + CAST(null AS character_data) AS reference_generation, + + CAST(CASE WHEN t.typname IS NOT NULL THEN current_database() ELSE null END AS sql_identifier) AS user_defined_type_catalog, + CAST(nt.nspname AS sql_identifier) AS user_defined_type_schema, + CAST(t.typname AS sql_identifier) AS user_defined_type_name, + + CAST(CASE WHEN c.relkind = 'r' + OR (c.relkind = 'v' + AND EXISTS (SELECT 1 FROM pg_rewrite WHERE ev_class = c.oid AND ev_type = '3' AND is_instead)) + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_insertable_into, + + CAST(CASE WHEN t.typname IS NOT NULL THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_typed, + CAST(null AS character_data) AS commit_action + + FROM pg_namespace nc JOIN pg_class c ON (nc.oid = c.relnamespace) + LEFT JOIN (pg_type t JOIN pg_namespace nt ON (t.typnamespace = nt.oid)) ON (c.reloftype = t.oid) + + WHERE c.relkind IN ('r', 'm', 'v', 'f') + AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + AND (NOT pg_is_other_temp_schema(nc.oid)) + AND (pg_has_role(c.relowner, 'USAGE') + OR has_table_privilege(c.oid, 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') + OR has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES') ); + +CREATE VIEW information_schema.view_column_usage AS + SELECT DISTINCT + CAST(current_database() AS sql_identifier) AS view_catalog, + CAST(nv.nspname AS sql_identifier) AS view_schema, + CAST(v.relname AS sql_identifier) AS view_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nt.nspname AS sql_identifier) AS table_schema, + CAST(t.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name + + FROM pg_namespace nv, pg_class v, pg_depend dv, + pg_depend dt, pg_class t, pg_namespace nt, + pg_attribute a + + WHERE nv.oid = v.relnamespace + AND v.relkind = 'v' + AND v.oid = dv.refobjid + AND dv.refclassid = 'pg_catalog.pg_class'::regclass + AND dv.classid = 'pg_catalog.pg_rewrite'::regclass + AND dv.deptype = 'i' + AND dv.objid = dt.objid + AND dv.refobjid <> dt.refobjid + AND dt.classid = 'pg_catalog.pg_rewrite'::regclass + AND dt.refclassid = 'pg_catalog.pg_class'::regclass + AND dt.refobjid = t.oid + AND t.relnamespace = nt.oid + AND t.relkind IN ('r', 'm', 'v', 'f') + AND (t.relname not like 'mlog_%' AND t.relname not like 'matviewmap_%') + AND t.oid = a.attrelid + AND dt.refobjsubid = a.attnum + AND pg_has_role(t.relowner, 'USAGE'); + +CREATE VIEW information_schema.view_table_usage AS + SELECT DISTINCT + CAST(current_database() AS sql_identifier) AS view_catalog, + CAST(nv.nspname AS sql_identifier) AS view_schema, + CAST(v.relname AS sql_identifier) AS view_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nt.nspname AS sql_identifier) AS table_schema, + CAST(t.relname AS sql_identifier) AS table_name + + FROM pg_namespace nv, pg_class v, pg_depend dv, + pg_depend dt, pg_class t, pg_namespace nt + + WHERE nv.oid = v.relnamespace + AND v.relkind = 'v' + AND v.oid = dv.refobjid + AND dv.refclassid = 'pg_catalog.pg_class'::regclass + AND dv.classid = 'pg_catalog.pg_rewrite'::regclass + AND dv.deptype = 'i' + AND dv.objid = dt.objid + AND dv.refobjid <> dt.refobjid + AND dt.classid = 'pg_catalog.pg_rewrite'::regclass + AND dt.refclassid = 'pg_catalog.pg_class'::regclass + AND dt.refobjid = t.oid + AND t.relnamespace = nt.oid + AND t.relkind IN ('r', 'm', 'v', 'f') + AND (t.relname not like 'mlog_%' AND t.relname not like 'matviewmap_%') + AND pg_has_role(t.relowner, 'USAGE'); + +CREATE VIEW information_schema.data_type_privileges AS + SELECT CAST(current_database() AS sql_identifier) AS object_catalog, + CAST(x.objschema AS sql_identifier) AS object_schema, + CAST(x.objname AS sql_identifier) AS object_name, + CAST(x.objtype AS character_data) AS object_type, + CAST(x.objdtdid AS sql_identifier) AS dtd_identifier + + FROM + ( + SELECT udt_schema, udt_name, 'USER-DEFINED TYPE'::text, dtd_identifier FROM attributes + UNION ALL + SELECT table_schema, table_name, 'TABLE'::text, dtd_identifier FROM columns + UNION ALL + SELECT domain_schema, domain_name, 'DOMAIN'::text, dtd_identifier FROM domains + UNION ALL + SELECT specific_schema, specific_name, 'ROUTINE'::text, dtd_identifier FROM parameters + UNION ALL + SELECT specific_schema, specific_name, 'ROUTINE'::text, dtd_identifier FROM routines + ) AS x (objschema, objname, objtype, objdtdid); + +CREATE VIEW information_schema.role_column_grants AS + SELECT grantor, + grantee, + table_catalog, + table_schema, + table_name, + column_name, + privilege_type, + is_grantable + FROM column_privileges + WHERE grantor IN (SELECT role_name FROM enabled_roles) + OR grantee IN (SELECT role_name FROM enabled_roles); + +CREATE VIEW information_schema.role_table_grants AS + SELECT grantor, + grantee, + table_catalog, + table_schema, + table_name, + privilege_type, + is_grantable, + with_hierarchy + FROM table_privileges + WHERE grantor IN (SELECT role_name FROM enabled_roles) + OR grantee IN (SELECT role_name FROM enabled_roles); + +CREATE VIEW information_schema.element_types AS + SELECT CAST(current_database() AS sql_identifier) AS object_catalog, + CAST(n.nspname AS sql_identifier) AS object_schema, + CAST(x.objname AS sql_identifier) AS object_name, + CAST(x.objtype AS character_data) AS object_type, + CAST(x.objdtdid AS sql_identifier) AS collection_type_identifier, + CAST( + CASE WHEN nbt.nspname = 'pg_catalog' THEN format_type(bt.oid, null) + ELSE 'USER-DEFINED' END AS character_data) AS data_type, + + CAST(null AS cardinal_number) AS character_maximum_length, + CAST(null AS cardinal_number) AS character_octet_length, + CAST(null AS sql_identifier) AS character_set_catalog, + CAST(null AS sql_identifier) AS character_set_schema, + CAST(null AS sql_identifier) AS character_set_name, + CAST(CASE WHEN nco.nspname IS NOT NULL THEN current_database() END AS sql_identifier) AS collation_catalog, + CAST(nco.nspname AS sql_identifier) AS collation_schema, + CAST(co.collname AS sql_identifier) AS collation_name, + CAST(null AS cardinal_number) AS numeric_precision, + CAST(null AS cardinal_number) AS numeric_precision_radix, + CAST(null AS cardinal_number) AS numeric_scale, + CAST(null AS cardinal_number) AS datetime_precision, + CAST(null AS character_data) AS interval_type, + CAST(null AS cardinal_number) AS interval_precision, + + CAST(null AS character_data) AS domain_default, -- XXX maybe a bug in the standard + + CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(nbt.nspname AS sql_identifier) AS udt_schema, + CAST(bt.typname AS sql_identifier) AS udt_name, + + CAST(null AS sql_identifier) AS scope_catalog, + CAST(null AS sql_identifier) AS scope_schema, + CAST(null AS sql_identifier) AS scope_name, + + CAST(null AS cardinal_number) AS maximum_cardinality, + CAST('a' || CAST(x.objdtdid AS text) AS sql_identifier) AS dtd_identifier + + FROM pg_namespace n, pg_type at, pg_namespace nbt, pg_type bt, + ( + /* columns, attributes */ + SELECT c.relnamespace, CAST(c.relname AS sql_identifier), + CASE WHEN c.relkind = 'c' THEN 'USER-DEFINED TYPE'::text ELSE 'TABLE'::text END, + a.attnum, a.atttypid, a.attcollation + FROM pg_class c, pg_attribute a + WHERE c.oid = a.attrelid + AND c.relkind IN ('r', 'm', 'v', 'f', 'c') + AND (c.relname not like 'mlog_%' AND c.relname not like 'matviewmap_%') + AND attnum > 0 AND NOT attisdropped + + UNION ALL + + /* domains */ + SELECT t.typnamespace, CAST(t.typname AS sql_identifier), + 'DOMAIN'::text, 1, t.typbasetype, t.typcollation + FROM pg_type t + WHERE t.typtype = 'd' + + UNION ALL + + /* parameters */ + SELECT pronamespace, CAST(proname || '_' || CAST(oid AS text) AS sql_identifier), + 'ROUTINE'::text, (ss.x).n, (ss.x).x, 0 + FROM (SELECT p.pronamespace, p.proname, p.oid, + _pg_expandarray(coalesce(p.proallargtypes, p.proargtypes::oid[])) AS x + FROM pg_proc p) AS ss + + UNION ALL + + /* result types */ + SELECT p.pronamespace, CAST(p.proname || '_' || CAST(p.oid AS text) AS sql_identifier), + 'ROUTINE'::text, 0, p.prorettype, 0 + FROM pg_proc p + + ) AS x (objschema, objname, objtype, objdtdid, objtypeid, objcollation) + LEFT JOIN (pg_collation co JOIN pg_namespace nco ON (co.collnamespace = nco.oid)) + ON x.objcollation = co.oid AND (nco.nspname, co.collname) <> ('pg_catalog', 'default') + + WHERE n.oid = x.objschema + AND at.oid = x.objtypeid + AND (at.typelem <> 0 AND at.typlen = -1) + AND at.typelem = bt.oid + AND nbt.oid = bt.typnamespace + + AND (n.nspname, x.objname, x.objtype, CAST(x.objdtdid AS sql_identifier)) IN + ( SELECT object_schema, object_name, object_type, dtd_identifier + FROM data_type_privileges ); + +do $$DECLARE + user_name text; + query_str text; +BEGIN + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.element_types TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.data_type_privileges TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.role_column_grants TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.role_table_grants TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.column_domain_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.column_privileges TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.column_udt_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.columns TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.table_privileges TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.tables TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.view_column_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.view_table_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; +END$$; + +GRANT SELECT ON information_schema.element_types TO PUBLIC; +GRANT SELECT ON information_schema.data_type_privileges TO PUBLIC; +GRANT SELECT ON information_schema.role_column_grants TO PUBLIC; +GRANT SELECT ON information_schema.role_table_grants TO PUBLIC; +GRANT SELECT ON information_schema.column_domain_usage TO PUBLIC; +GRANT SELECT ON information_schema.column_privileges TO PUBLIC; +GRANT SELECT ON information_schema.column_udt_usage TO PUBLIC; +GRANT SELECT ON information_schema.columns TO PUBLIC; +GRANT SELECT ON information_schema.table_privileges TO PUBLIC; +GRANT SELECT ON information_schema.tables TO PUBLIC; +GRANT SELECT ON information_schema.view_column_usage TO PUBLIC; +GRANT SELECT ON information_schema.view_table_usage TO PUBLIC; + +RESET search_path; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_602.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_602.sql new file mode 100644 index 000000000..3513c8b28 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade-post_catalog_maindb_92_602.sql @@ -0,0 +1,3662 @@ +DROP FUNCTION IF EXISTS pg_catalog.large_seq_rollback_ntree(pg_node_tree); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 6016; +CREATE OR REPLACE FUNCTION pg_catalog.large_seq_rollback_ntree(pg_node_tree) + RETURNS pg_node_tree + LANGUAGE internal + IMMUTABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$large_sequence_rollback_node_tree$function$; + +DROP FUNCTION IF EXISTS pg_catalog.large_seq_upgrade_ntree(pg_node_tree); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 6017; +CREATE OR REPLACE FUNCTION pg_catalog.large_seq_upgrade_ntree(pg_node_tree) + RETURNS pg_node_tree + LANGUAGE internal + IMMUTABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$large_sequence_upgrade_node_tree$function$; + +DO +$do$ +DECLARE +query_str text; +type array_t is varray(10) of varchar2(50); +rel_array array_t := array[ + 'pg_catalog.pg_proc', + 'pg_catalog.pg_type', + 'pg_catalog.pg_attrdef', + 'pg_catalog.pg_constraint', + 'pg_catalog.pg_rewrite', + 'pg_catalog.pg_rewrite', + 'pg_catalog.pg_trigger', + 'pg_catalog.pg_rlspolicy' +]; +att_array array_t := array[ + 'proargdefaults', + 'typdefaultbin', + 'adbin', + 'conbin', + 'ev_qual', + 'ev_action', + 'tgqual', + 'polqual' +]; +BEGIN + raise info 'Processing sequence APIs'; + FOR i IN 1..rel_array.count LOOP + raise info '%.%',rel_array[i],att_array[i]; + query_str := 'UPDATE ' || rel_array[i] || ' SET ' || att_array[i] || ' = large_seq_upgrade_ntree(' || att_array[i] || ' ) WHERE ' || att_array[i] || ' LIKE ''%:funcid 1574 :%'' OR ' || att_array[i] || ' LIKE ''%:funcid 1575 :%'' OR ' || att_array[i] || ' LIKE ''%:funcid 2559 :%'';'; + EXECUTE query_str; + END LOOP; +END +$do$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DROP INDEX IF EXISTS pg_catalog.pg_proc_proname_all_args_nsp_index; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 9666; +CREATE INDEX pg_catalog.pg_proc_proname_all_args_nsp_index on pg_catalog.pg_proc USING BTREE(proname name_ops, allargtypes oidvector_ops, pronamespace oid_ops, propackageid oid_ops); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +REINDEX INDEX pg_catalog.pg_proc_proname_all_args_nsp_index; +DROP FUNCTION IF EXISTS pg_catalog.gs_get_shared_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5255; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_shared_memctx_detail( +text, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_shared_memctx_detail'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_thread_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5256; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_thread_memctx_detail( +int8, +text, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_thread_memctx_detail'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_session_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5254; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_session_memctx_detail( +text, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_session_memctx_detail';DROP FUNCTION IF EXISTS pg_catalog.gs_get_parallel_decode_status() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9377; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_parallel_decode_status(OUT slot_name text, OUT parallel_decode_num int4, OUT read_change_queue_length text, OUT decode_change_queue_length text) + RETURNS SETOF RECORD + LANGUAGE internal +AS $function$gs_get_parallel_decode_status$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +DROP FUNCTION IF EXISTS pg_catalog.gs_index_advise(cstring); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC,4888; +CREATE OR REPLACE FUNCTION pg_catalog.gs_index_advise(sql_string cstring, OUT schema text, OUT "table" text, OUT "column" text, OUT indextype text) + RETURNS SETOF record + LANGUAGE internal + STABLE NOT FENCED NOT SHIPPABLE ROWS 100 +AS $function$gs_index_advise$function$; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +-- gs_parse_page_bypath +DROP FUNCTION IF EXISTS pg_catalog.gs_parse_page_bypath(text, bigint, text, boolean) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2620; +CREATE OR REPLACE FUNCTION pg_catalog.gs_parse_page_bypath(path text, blocknum bigint, relation_type text, read_memory boolean, OUT output_filepath text) + RETURNS text + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_parse_page_bypath$function$; +comment on function PG_CATALOG.gs_parse_page_bypath(path text, blocknum bigint, relation_type text, read_memory boolean) is 'parse data page to output file based on given filepath'; + +-- gs_xlogdump_lsn +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_lsn(text, text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2619; +CREATE OR REPLACE FUNCTION pg_catalog.gs_xlogdump_lsn(start_lsn text, end_lsn text, OUT output_filepath text) + RETURNS text + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_xlogdump_lsn$function$; +comment on function PG_CATALOG.gs_xlogdump_lsn(start_lsn text, end_lsn text) is 'dump xlog records to output file based on the given start_lsn and end_lsn'; + +-- gs_xlogdump_xid +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_xid(xid) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2617; +CREATE OR REPLACE FUNCTION pg_catalog.gs_xlogdump_xid(c_xid xid, OUT output_filepath text) + RETURNS text + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_xlogdump_xid$function$; +comment on function PG_CATALOG.gs_xlogdump_xid(c_xid xid) is 'dump xlog records to output file based on the given xid'; + +-- gs_xlogdump_tablepath +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_tablepath(text, bigint, text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2616; +CREATE OR REPLACE FUNCTION pg_catalog.gs_xlogdump_tablepath(path text, blocknum bigint, relation_type text, OUT output_filepath text) + RETURNS text + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_xlogdump_tablepath$function$; +comment on function PG_CATALOG.gs_xlogdump_tablepath(path text, blocknum bigint, relation_type text) is 'dump xlog records to output file based on given filepath'; + +-- gs_xlogdump_parsepage_tablepath +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_parsepage_tablepath(text, bigint, text, boolean) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2618; +CREATE OR REPLACE FUNCTION pg_catalog.gs_xlogdump_parsepage_tablepath(path text, blocknum bigint, relation_type text, read_memory boolean, OUT output_filepath text) + RETURNS text + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_xlogdump_parsepage_tablepath$function$; +comment on function PG_CATALOG.gs_xlogdump_parsepage_tablepath(path text, blocknum bigint, relation_type text, read_memory boolean) is 'parse data page to output file based on given filepath'; +DROP FUNCTION IF EXISTS pg_catalog.local_xlog_redo_statics(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4390; +CREATE FUNCTION pg_catalog.local_xlog_redo_statics +( +OUT xlog_type pg_catalog.text, +OUT rmid pg_catalog.int4, +OUT info pg_catalog.int4, +OUT num pg_catalog.int8, +OUT extra pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'local_xlog_redo_statics'; + +DROP FUNCTION IF EXISTS pg_catalog.local_redo_time_count(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4391; +CREATE FUNCTION pg_catalog.local_redo_time_count +( +OUT thread_name pg_catalog.text, +OUT step1_total pg_catalog.int8, +OUT step1_count pg_catalog.int8, +OUT step2_total pg_catalog.int8, +OUT step2_count pg_catalog.int8, +OUT step3_total pg_catalog.int8, +OUT step3_count pg_catalog.int8, +OUT step4_total pg_catalog.int8, +OUT step4_count pg_catalog.int8, +OUT step5_total pg_catalog.int8, +OUT step5_count pg_catalog.int8, +OUT step6_total pg_catalog.int8, +OUT step6_count pg_catalog.int8, +OUT step7_total pg_catalog.int8, +OUT step7_count pg_catalog.int8, +OUT step8_total pg_catalog.int8, +OUT step8_count pg_catalog.int8, +OUT step9_total pg_catalog.int8, +OUT step9_count pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'local_redo_time_count'; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;DROP VIEW IF EXISTS pg_catalog.gs_wlm_ec_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.gs_wlm_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.pg_get_invalid_backends CASCADE; + +DROP VIEW IF EXISTS pg_catalog.pg_stat_activity cascade; +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity_with_conninfo(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT connection_info text, OUT srespool name, OUT global_sessionid text) cascade; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4212; +CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity_with_conninfo +( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT connection_info text, + OUT srespool name, + OUT global_sessionid text, + OUT unique_sql_id bigint +) +RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity_with_conninfo'; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + S.connection_info, + S.unique_sql_id + FROM pg_database D, pg_stat_get_activity_with_conninfo(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_ec_operator_statistics AS +SELECT + t.queryid, + t.plan_node_id, + t.start_time, + t.ec_status, + t.ec_execute_datanode, + t.ec_dsn, + t.ec_username, + t.ec_query, + t.ec_libodbc_type, + t.ec_fetch_count +FROM pg_catalog.pg_stat_activity AS s, pg_catalog.pg_stat_get_wlm_realtime_ec_operator_info(NULL) as t +where s.query_id = t.queryid and t.ec_operator > 0; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_operator_statistics AS +SELECT t.* +FROM pg_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t +where s.query_id = t.queryid; + +CREATE OR REPLACE VIEW pg_catalog.pg_get_invalid_backends AS + SELECT + C.pid, + C.node_name, + S.datname AS dbname, + S.backend_start, + S.query + FROM pg_pool_validate(false, ' ') AS C LEFT JOIN pg_stat_activity AS S + ON (C.pid = S.sessionid); + +DO $DO$ +DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP VIEW IF EXISTS DBE_PERF.global_session_stat_activity cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_session_stat_activity() cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_operator_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_operator_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.operator_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.session_stat_activity cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_replication_stat cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_replication_stat() cascade; + DROP VIEW IF EXISTS DBE_PERF.replication_stat cascade; + + + DROP VIEW IF EXISTS DBE_PERF.session_cpu_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.session_memory_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.global_statement_complex_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_statement_complex_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.statement_complex_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.statement_iostat_complex_runtime cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_memory_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.pg_session_iostat cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_cpu_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.gs_wlm_session_statistics cascade; + + DROP VIEW IF EXISTS pg_catalog.pg_stat_activity_ng cascade; + DROP VIEW IF EXISTS pg_catalog.pg_stat_replication cascade; + + DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT srespool name, OUT global_sessionid text) cascade; + + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2022; + CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity + ( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT srespool name, + OUT global_sessionid text, + OUT unique_sql_id bigint + ) + RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity'; + + CREATE OR REPLACE VIEW dbe_perf.session_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + S.unique_sql_id + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_session_stat_activity + (out coorname text, out datid oid, out datname text, out pid bigint, + out usesysid oid, out usename text, out application_name text, out client_addr inet, + out client_hostname text, out client_port integer, out backend_start timestamptz, + out xact_start timestamptz, out query_start timestamptz, out state_change timestamptz, + out waiting boolean, out enqueue text, out state text, out resource_pool name, + out query_id bigint, out query text, out unique_sql_id bigint) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.session_stat_activity%rowtype; + coor_name record; + fet_active text; + fetch_coor text; + BEGIN + --Get all cn node names + fetch_coor := 'select * from dbe_perf.node_name'; + FOR coor_name IN EXECUTE(fetch_coor) LOOP + coorname := coor_name.node_name; + fet_active := 'SELECT * FROM dbe_perf.session_stat_activity'; + FOR row_data IN EXECUTE(fet_active) LOOP + coorname := coorname; + datid :=row_data.datid; + datname := row_data.datname; + pid := row_data.pid; + usesysid :=row_data.usesysid; + usename := row_data.usename; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_hostname :=row_data.client_hostname; + client_port :=row_data.client_port; + backend_start := row_data.backend_start; + xact_start := row_data.xact_start; + query_start := row_data.query_start; + state_change := row_data.state_change; + waiting := row_data.waiting; + enqueue := row_data.enqueue; + state := row_data.state; + resource_pool :=row_data.resource_pool; + query_id :=row_data.query_id; + query := row_data.query; + unique_sql_id := unique_sql_id; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + + CREATE OR REPLACE VIEW DBE_PERF.global_session_stat_activity AS + SELECT * FROM DBE_PERF.get_global_session_stat_activity(); + + CREATE OR REPLACE VIEW dbe_perf.operator_runtime AS + SELECT t.* + FROM dbe_perf.session_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t + WHERE s.query_id = t.queryid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_operator_runtime() + RETURNS setof dbe_perf.operator_runtime + AS $$ + DECLARE + row_data dbe_perf.operator_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.operator_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_operator_runtime AS + SELECT * FROM dbe_perf.get_global_operator_runtime(); + + + CREATE OR REPLACE VIEW dbe_perf.replication_stat AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_replication_stat + (OUT node_name name, + OUT pid bigint, + OUT usesysid oid, + OUT usename name, + OUT application_name text, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT backend_start timestamp with time zone, + OUT state text, + OUT sender_sent_location text, + OUT receiver_write_location text, + OUT receiver_flush_location text, + OUT receiver_replay_location text, + OUT sync_priority integer, + OUT sync_state text) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.replication_stat%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + --Get all the node names + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.replication_stat'; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := row_name.node_name; + pid := row_data.pid; + usesysid := row_data.usesysid; + usename := row_data.usename; + client_addr := row_data.client_addr; + client_hostname := row_data.client_hostname; + client_port := row_data.client_port; + state := row_data.state; + sender_sent_location := row_data.sender_sent_location; + receiver_write_location := row_data.receiver_write_location; + receiver_flush_location := row_data.receiver_flush_location; + receiver_replay_location := row_data.receiver_replay_location; + sync_priority := row_data.sync_priority; + sync_state := row_data.sync_state; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_replication_stat AS + SELECT * FROM dbe_perf.get_global_replication_stat(); + + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity_ng AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + N.node_group + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_stat_get_activity_ng(NULL) AS N, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid AND + S.sessionid = N.sessionid; + + + CREATE OR REPLACE VIEW dbe_perf.session_cpu_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.session_memory_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.statement_complex_runtime AS + SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_statement_complex_runtime() + RETURNS setof dbe_perf.statement_complex_runtime + AS $$ + DECLARE + row_data dbe_perf.statement_complex_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.statement_complex_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_statement_complex_runtime AS + SELECT * FROM dbe_perf.get_global_statement_complex_runtime(); + + CREATE OR REPLACE VIEW dbe_perf.statement_iostat_complex_runtime AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 20 THEN 'Low'::text + WHEN T.io_priority = 50 THEN 'Medium'::text + WHEN T.io_priority = 80 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T + WHERE S.pid = T.threadid; + + end if; +END$DO$; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_memory_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_session_iostat AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 10 THEN 'Low'::text + WHEN T.io_priority = 20 THEN 'Medium'::text + WHEN T.io_priority = 50 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_cpu_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_session_statistics AS +SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + S.sessionid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_replication AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DO $DO$ +DECLARE + ans boolean; + user_name text; + query_str text; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.replication_stat TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_replication_stat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity_ng TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_cpu_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_cpu_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_memory_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_memory_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_iostat_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_iostat_complex_runtime TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_memory_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_session_iostat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_cpu_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_session_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_replication TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_ec_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_get_invalid_backends TO PUBLIC; + end if; +END$DO$; +-- regexp_count +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 385; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_count(text, text) + RETURNS integer + LANGUAGE internal + IMMUTABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$regexp_count_noopt$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 386; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_count(text, text, int) + RETURNS integer + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_count_position$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 387; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_count(text, text, int, text) + RETURNS integer + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_count_matchopt$function$; + +-- regexp_instr +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 630; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_instr(text, text) + RETURNS integer + LANGUAGE internal + IMMUTABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$regexp_instr_noopt$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 631; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_instr(text, text, int) + RETURNS integer + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_instr_position$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 632; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_instr(text, text, int, int) + RETURNS integer + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_instr_occurren$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 633; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_instr(text, text, int, int, int) + RETURNS integer + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_instr_returnopt$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 634; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_instr(text, text, int, int, int, text) + RETURNS integer + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_instr_matchopt$function$; + +-- regexp_replace +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1116; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_replace(text, text) + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_replace_noopt$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1117; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_replace(text, text, text, int) + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_replace_position$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1118; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_replace(text, text, text, int, int) + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_replace_occur$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1119; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_replace(text, text, text, int, int, text) + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_replace_matchopt$function$; + +-- regexp_substr +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1566; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_substr(text, text, int) + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_substr_with_position$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1567; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_substr(text, text, int, int) + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_substr_with_occur$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1568; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_substr(text, text, int, int, text) + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_substr_with_opt$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;DROP FUNCTION IF EXISTS pg_catalog.local_double_write_stat(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4384; +CREATE FUNCTION pg_catalog.local_double_write_stat +( + OUT node_name pg_catalog.text, + OUT file_id pg_catalog.int8, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'local_double_write_stat'; + +DROP FUNCTION IF EXISTS pg_catalog.remote_double_write_stat(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4385; +CREATE FUNCTION pg_catalog.remote_double_write_stat +( + OUT node_name pg_catalog.text, + OUT file_id pg_catalog.int8, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'remote_double_write_stat'; + +DROP VIEW IF EXISTS DBE_PERF.global_double_write_status CASCADE; +CREATE OR REPLACE VIEW dbe_perf.global_double_write_status AS + SELECT node_name, file_id, curr_dwn, curr_start_page, file_trunc_num, file_reset_num, + total_writes, low_threshold_writes, high_threshold_writes, + total_pages, low_threshold_pages, high_threshold_pages + FROM pg_catalog.local_double_write_stat(); + +REVOKE ALL on DBE_PERF.global_double_write_status FROM PUBLIC; + +DECLARE + user_name text; + query_str text; +BEGIN + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLE DBE_PERF.global_double_write_status TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; +END; +/ + +GRANT SELECT ON TABLE DBE_PERF.global_double_write_status TO PUBLIC; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DROP FUNCTION IF EXISTS pg_catalog.gs_get_active_archiving_standby(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4579; +CREATE FUNCTION pg_catalog.gs_get_active_archiving_standby +( +OUT standby_name pg_catalog.text, +OUT archive_location pg_catalog.text, +OUT archived_file_num pg_catalog.int4 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_get_active_archiving_standby'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_get_warning_for_xlog_force_recycle(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4582; +CREATE FUNCTION pg_catalog.gs_pitr_get_warning_for_xlog_force_recycle +( +OUT xlog_force_recycled pg_catalog.bool +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_pitr_get_warning_for_xlog_force_recycle'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_clean_history_global_barriers(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4581; +CREATE FUNCTION pg_catalog.gs_pitr_clean_history_global_barriers +( +IN stop_barrier_timestamp pg_catalog.timestamptz, +OUT oldest_barrier_record pg_catalog.text +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_pitr_clean_history_global_barriers'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_archive_slot_force_advance(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4580; +CREATE FUNCTION pg_catalog.gs_pitr_archive_slot_force_advance +( +IN stop_barrier_timestamp pg_catalog.timestamptz, +OUT archive_restart_lsn pg_catalog.text +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_pitr_archive_slot_force_advance'; +DROP FUNCTION IF EXISTS pg_catalog.gs_get_standby_cluster_barrier_status() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9039; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_standby_cluster_barrier_status +( OUT barrier_id text, + OUT barrier_lsn text, + OUT recovery_id text, + OUT target_id text) +RETURNS SETOF record LANGUAGE INTERNAL ROWS 1 STRICT as 'gs_get_standby_cluster_barrier_status'; +DROP FUNCTION IF EXISTS pg_catalog.gs_set_standby_cluster_target_barrier_id() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9037; +CREATE OR REPLACE FUNCTION pg_catalog.gs_set_standby_cluster_target_barrier_id +( IN barrier_id text, + OUT target_id text) +RETURNS SETOF record LANGUAGE INTERNAL ROWS 1 STRICT as 'gs_set_standby_cluster_target_barrier_id'; +DROP FUNCTION IF EXISTS pg_catalog.gs_query_standby_cluster_barrier_id_exist() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9038; +CREATE OR REPLACE FUNCTION pg_catalog.gs_query_standby_cluster_barrier_id_exist +( IN barrier_id text, + OUT target_id bool) +RETURNS SETOF record LANGUAGE INTERNAL ROWS 1 STRICT as 'gs_query_standby_cluster_barrier_id_exist'; +do $$DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP VIEW IF EXISTS DBE_PERF.global_streaming_hadr_rto_and_rpo_stat CASCADE; + end if; + DROP FUNCTION IF EXISTS pg_catalog.gs_hadr_local_rto_and_rpo_stat(); + DROP FUNCTION IF EXISTS pg_catalog.gs_hadr_remote_rto_and_rpo_stat(); +END$$; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 5077; +CREATE OR REPLACE FUNCTION pg_catalog.gs_hadr_local_rto_and_rpo_stat +( +OUT hadr_sender_node_name pg_catalog.text, +OUT hadr_receiver_node_name pg_catalog.text, +OUT source_ip pg_catalog.text, +OUT source_port pg_catalog.int4, +OUT dest_ip pg_catalog.text, +OUT dest_port pg_catalog.int4, +OUT current_rto pg_catalog.int8, +OUT target_rto pg_catalog.int8, +OUT current_rpo pg_catalog.int8, +OUT target_rpo pg_catalog.int8, +OUT rto_sleep_time pg_catalog.int8, +OUT rpo_sleep_time pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_hadr_local_rto_and_rpo_stat'; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 5078; +CREATE OR REPLACE FUNCTION pg_catalog.gs_hadr_remote_rto_and_rpo_stat +( +OUT hadr_sender_node_name pg_catalog.text, +OUT hadr_receiver_node_name pg_catalog.text, +OUT source_ip pg_catalog.text, +OUT source_port pg_catalog.int4, +OUT dest_ip pg_catalog.text, +OUT dest_port pg_catalog.int4, +OUT current_rto pg_catalog.int8, +OUT target_rto pg_catalog.int8, +OUT current_rpo pg_catalog.int8, +OUT target_rpo pg_catalog.int8, +OUT rto_sleep_time pg_catalog.int8, +OUT rpo_sleep_time pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_hadr_remote_rto_and_rpo_stat'; +CREATE OR REPLACE VIEW DBE_PERF.global_streaming_hadr_rto_and_rpo_stat AS + SELECT hadr_sender_node_name, hadr_receiver_node_name, current_rto, target_rto, current_rpo, target_rpo, rto_sleep_time, rpo_sleep_time +FROM pg_catalog.gs_hadr_local_rto_and_rpo_stat(); +REVOKE ALL on DBE_PERF.global_streaming_hadr_rto_and_rpo_stat FROM PUBLIC; +DECLARE + user_name text; + query_str text; +BEGIN + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT ALL ON TABLE DBE_PERF.global_streaming_hadr_rto_and_rpo_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; +END; +/ +GRANT SELECT ON TABLE DBE_PERF.global_streaming_hadr_rto_and_rpo_stat TO PUBLIC; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +CREATE OR REPLACE VIEW pg_catalog.gs_gsc_memory_detail AS + SELECT db_id, sum(totalsize) AS totalsize, sum(freesize) AS freesize, sum(usedsize) AS usedsize + FROM ( + SELECT + CASE WHEN contextname like '%GlobalSysDBCacheEntryMemCxt%' THEN substring(contextname, 29) + ELSE substring(parent, 29) END AS db_id, + totalsize, + freesize, + usedsize + FROM pg_shared_memory_detail() + WHERE contextname LIKE '%GlobalSysDBCacheEntryMemCxt%' OR parent LIKE '%GlobalSysDBCacheEntryMemCxt%' + )a + GROUP BY db_id; +GRANT SELECT ON TABLE pg_catalog.gs_gsc_memory_detail TO PUBLIC; +CREATE OR REPLACE VIEW pg_catalog.gs_lsc_memory_detail AS +SELECT * FROM pv_thread_memory_detail() WHERE contextname LIKE '%LocalSysCache%' OR parent LIKE '%LocalSysCache%'; +GRANT SELECT ON TABLE pg_catalog.gs_lsc_memory_detail TO PUBLIC; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9123; +CREATE OR REPLACE FUNCTION pg_catalog.gs_gsc_table_detail(database_oid bigint DEFAULT NULL::bigint, rel_oid bigint DEFAULT NULL::bigint, OUT database_oid oid, OUT database_name text, OUT reloid oid, OUT relname text, OUT relnamespace oid, OUT reltype oid, OUT reloftype oid, OUT relowner oid, OUT relam oid, OUT relfilenode oid, OUT reltablespace oid, OUT relhasindex boolean, OUT relisshared boolean, OUT relkind "char", OUT relnatts smallint, OUT relhasoids boolean, OUT relhaspkey boolean, OUT parttype "char", OUT tdhasuids boolean, OUT attnames text, OUT extinfo text) + RETURNS SETOF record + LANGUAGE internal + NOT FENCED NOT SHIPPABLE +AS $function$gs_gsc_table_detail$function$; + + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9122; +CREATE OR REPLACE FUNCTION pg_catalog.gs_gsc_catalog_detail(database_id bigint DEFAULT NULL::bigint, rel_id bigint DEFAULT NULL::bigint, OUT database_id bigint, OUT database_name text, OUT rel_id bigint, OUT rel_name text, OUT cache_id bigint, OUT self text, OUT ctid text, OUT infomask bigint, OUT infomask2 bigint, OUT hash_value bigint, OUT refcount bigint) + RETURNS SETOF record + LANGUAGE internal + NOT FENCED NOT SHIPPABLE +AS $function$gs_gsc_catalog_detail$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9121; +CREATE OR REPLACE FUNCTION pg_catalog.gs_gsc_dbstat_info(database_id bigint DEFAULT NULL::bigint, OUT database_id bigint, OUT database_name text, OUT tup_searches bigint, OUT tup_hits bigint, OUT tup_miss bigint, OUT tup_count bigint, OUT tup_dead bigint, OUT tup_memory bigint, OUT rel_searches bigint, OUT rel_hits bigint, OUT rel_miss bigint, OUT rel_count bigint, OUT rel_dead bigint, OUT rel_memory bigint, OUT part_searches bigint, OUT part_hits bigint, OUT part_miss bigint, OUT part_count bigint, OUT part_dead bigint, OUT part_memory bigint, OUT total_memory bigint, OUT swapout_count bigint, OUT refcount bigint) + RETURNS SETOF record + LANGUAGE internal + NOT FENCED NOT SHIPPABLE ROWS 100 +AS $function$gs_gsc_dbstat_info$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9120; +CREATE OR REPLACE FUNCTION pg_catalog.gs_gsc_clean(database_id bigint DEFAULT NULL::bigint) + RETURNS boolean + LANGUAGE internal + NOT FENCED NOT SHIPPABLE +AS $function$gs_gsc_clean$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DROP VIEW IF EXISTS pg_catalog.gs_wlm_ec_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.gs_wlm_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.pg_get_invalid_backends CASCADE; + +DROP VIEW IF EXISTS pg_catalog.pg_stat_activity cascade; +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity_with_conninfo(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT connection_info text, OUT srespool name, OUT global_sessionid text, OUT unique_sql_id bigint) cascade; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4212; +CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity_with_conninfo +( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT connection_info text, + OUT srespool name, + OUT global_sessionid text, + OUT unique_sql_id bigint, + OUT trace_id text +) +RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity_with_conninfo'; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + S.connection_info, + S.unique_sql_id, + S.trace_id + FROM pg_database D, pg_stat_get_activity_with_conninfo(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_ec_operator_statistics AS +SELECT + t.queryid, + t.plan_node_id, + t.start_time, + t.ec_status, + t.ec_execute_datanode, + t.ec_dsn, + t.ec_username, + t.ec_query, + t.ec_libodbc_type, + t.ec_fetch_count +FROM pg_catalog.pg_stat_activity AS s, pg_catalog.pg_stat_get_wlm_realtime_ec_operator_info(NULL) as t +where s.query_id = t.queryid and t.ec_operator > 0; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_operator_statistics AS +SELECT t.* +FROM pg_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t +where s.query_id = t.queryid; + +CREATE OR REPLACE VIEW pg_catalog.pg_get_invalid_backends AS + SELECT + C.pid, + C.node_name, + S.datname AS dbname, + S.backend_start, + S.query + FROM pg_pool_validate(false, ' ') AS C LEFT JOIN pg_stat_activity AS S + ON (C.pid = S.sessionid); + +DO $DO$ +DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP VIEW IF EXISTS DBE_PERF.global_session_stat_activity cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_session_stat_activity() cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_operator_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_operator_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.operator_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.session_stat_activity cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_replication_stat cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_replication_stat() cascade; + DROP VIEW IF EXISTS DBE_PERF.replication_stat cascade; + + + DROP VIEW IF EXISTS DBE_PERF.session_cpu_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.session_memory_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.global_statement_complex_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_statement_complex_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.statement_complex_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.statement_iostat_complex_runtime cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_memory_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.pg_session_iostat cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_cpu_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.gs_wlm_session_statistics cascade; + + DROP VIEW IF EXISTS pg_catalog.pg_stat_activity_ng cascade; + DROP VIEW IF EXISTS pg_catalog.pg_stat_replication cascade; + + DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT srespool name, OUT global_sessionid text, OUT unique_sql_id bigint) cascade; + + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2022; + CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity + ( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT srespool name, + OUT global_sessionid text, + OUT unique_sql_id bigint, + OUT trace_id text + ) + RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity'; + + CREATE OR REPLACE VIEW dbe_perf.session_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + S.unique_sql_id, + S.trace_id + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_session_stat_activity + (out coorname text, out datid oid, out datname text, out pid bigint, + out usesysid oid, out usename text, out application_name text, out client_addr inet, + out client_hostname text, out client_port integer, out backend_start timestamptz, + out xact_start timestamptz, out query_start timestamptz, out state_change timestamptz, + out waiting boolean, out enqueue text, out state text, out resource_pool name, + out query_id bigint, out query text, out unique_sql_id bigint, out trace_id text) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.session_stat_activity%rowtype; + coor_name record; + fet_active text; + fetch_coor text; + BEGIN + --Get all cn node names + fetch_coor := 'select * from dbe_perf.node_name'; + FOR coor_name IN EXECUTE(fetch_coor) LOOP + coorname := coor_name.node_name; + fet_active := 'SELECT * FROM dbe_perf.session_stat_activity'; + FOR row_data IN EXECUTE(fet_active) LOOP + coorname := coorname; + datid :=row_data.datid; + datname := row_data.datname; + pid := row_data.pid; + usesysid :=row_data.usesysid; + usename := row_data.usename; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_hostname :=row_data.client_hostname; + client_port :=row_data.client_port; + backend_start := row_data.backend_start; + xact_start := row_data.xact_start; + query_start := row_data.query_start; + state_change := row_data.state_change; + waiting := row_data.waiting; + enqueue := row_data.enqueue; + state := row_data.state; + resource_pool :=row_data.resource_pool; + query_id :=row_data.query_id; + query := row_data.query; + unique_sql_id := row_data.unique_sql_id; + trace_id := row_data.trace_id; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + + CREATE OR REPLACE VIEW DBE_PERF.global_session_stat_activity AS + SELECT * FROM DBE_PERF.get_global_session_stat_activity(); + + CREATE OR REPLACE VIEW dbe_perf.operator_runtime AS + SELECT t.* + FROM dbe_perf.session_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t + WHERE s.query_id = t.queryid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_operator_runtime() + RETURNS setof dbe_perf.operator_runtime + AS $$ + DECLARE + row_data dbe_perf.operator_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.operator_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_operator_runtime AS + SELECT * FROM dbe_perf.get_global_operator_runtime(); + + + CREATE OR REPLACE VIEW dbe_perf.replication_stat AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_replication_stat + (OUT node_name name, + OUT pid bigint, + OUT usesysid oid, + OUT usename name, + OUT application_name text, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT backend_start timestamp with time zone, + OUT state text, + OUT sender_sent_location text, + OUT receiver_write_location text, + OUT receiver_flush_location text, + OUT receiver_replay_location text, + OUT sync_priority integer, + OUT sync_state text) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.replication_stat%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + --Get all the node names + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.replication_stat'; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := row_name.node_name; + pid := row_data.pid; + usesysid := row_data.usesysid; + usename := row_data.usename; + client_addr := row_data.client_addr; + client_hostname := row_data.client_hostname; + client_port := row_data.client_port; + state := row_data.state; + sender_sent_location := row_data.sender_sent_location; + receiver_write_location := row_data.receiver_write_location; + receiver_flush_location := row_data.receiver_flush_location; + receiver_replay_location := row_data.receiver_replay_location; + sync_priority := row_data.sync_priority; + sync_state := row_data.sync_state; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_replication_stat AS + SELECT * FROM dbe_perf.get_global_replication_stat(); + + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity_ng AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + N.node_group + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_stat_get_activity_ng(NULL) AS N, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid AND + S.sessionid = N.sessionid; + + + CREATE OR REPLACE VIEW dbe_perf.session_cpu_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.session_memory_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.statement_complex_runtime AS + SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_statement_complex_runtime() + RETURNS setof dbe_perf.statement_complex_runtime + AS $$ + DECLARE + row_data dbe_perf.statement_complex_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.statement_complex_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_statement_complex_runtime AS + SELECT * FROM dbe_perf.get_global_statement_complex_runtime(); + + CREATE OR REPLACE VIEW dbe_perf.statement_iostat_complex_runtime AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 20 THEN 'Low'::text + WHEN T.io_priority = 50 THEN 'Medium'::text + WHEN T.io_priority = 80 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T + WHERE S.pid = T.threadid; + + end if; +END$DO$; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_memory_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_session_iostat AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 10 THEN 'Low'::text + WHEN T.io_priority = 20 THEN 'Medium'::text + WHEN T.io_priority = 50 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_cpu_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_session_statistics AS +SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + S.sessionid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_replication AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DO $DO$ +DECLARE + ans boolean; + user_name text; + query_str text; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.replication_stat TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_replication_stat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity_ng TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_cpu_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_cpu_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_memory_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_memory_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_iostat_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_iostat_complex_runtime TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_memory_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_session_iostat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_cpu_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_session_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_replication TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_ec_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_get_invalid_backends TO PUBLIC; + end if; +END$DO$; + +DO $DO$ +DECLARE + ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp() cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp() cascade; + DROP VIEW IF EXISTS DBE_PERF.statement_history cascade; + end if; +END$DO$; + +DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx; +DROP TABLE IF EXISTS pg_catalog.statement_history cascade; + +CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history( + db_name name, + schema_name name, + origin_node integer, + user_name name, + application_name text, + client_addr text, + client_port integer, + unique_query_id bigint, + debug_query_id bigint, + query text, + start_time timestamp with time zone, + finish_time timestamp with time zone, + slow_sql_threshold bigint, + transaction_id bigint, + thread_id bigint, + session_id bigint, + n_soft_parse bigint, + n_hard_parse bigint, + query_plan text, + n_returned_rows bigint, + n_tuples_fetched bigint, + n_tuples_returned bigint, + n_tuples_inserted bigint, + n_tuples_updated bigint, + n_tuples_deleted bigint, + n_blocks_fetched bigint, + n_blocks_hit bigint, + db_time bigint, + cpu_time bigint, + execution_time bigint, + parse_time bigint, + plan_time bigint, + rewrite_time bigint, + pl_execution_time bigint, + pl_compilation_time bigint, + data_io_time bigint, + net_send_info text, + net_recv_info text, + net_stream_send_info text, + net_stream_recv_info text, + lock_count bigint, + lock_time bigint, + lock_wait_count bigint, + lock_wait_time bigint, + lock_max_count bigint, + lwlock_count bigint, + lwlock_wait_count bigint, + lwlock_time bigint, + lwlock_wait_time bigint, + details bytea, + is_slow_sql boolean, + trace_id text +); +REVOKE ALL on table pg_catalog.statement_history FROM public; +create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); + +DO $DO$ +DECLARE + ans boolean; + username text; + querystr text; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + CREATE VIEW DBE_PERF.statement_history AS + select * from pg_catalog.statement_history; + + CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp + (in start_timestamp timestamp with time zone, + in end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text) + RETURNS setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + query_str text; + -- node name + node_names name[]; + each_node_name name; + BEGIN + -- Get all node names(CN + master DN) + node_names := ARRAY(SELECT pgxc_node.node_name FROM pgxc_node WHERE (node_type = 'C' or node_type = 'D') AND nodeis_active = true); + FOREACH each_node_name IN ARRAY node_names + LOOP + query_str := 'EXECUTE DIRECT ON (' || each_node_name || ') ''SELECT * FROM DBE_PERF.statement_history where start_time >= ''''' ||$1|| ''''' and start_time <= ''''' || $2 || ''''''''; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := each_node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp + (in start_timestamp timestamp with time zone, + in end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text) + RETURNS setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + row_name record; + query_str text; + -- node name + node_names name[]; + each_node_name name; + BEGIN + -- Get all node names(CN + master DN) + node_names := ARRAY(SELECT pgxc_node.node_name FROM pgxc_node WHERE (node_type = 'C' or node_type = 'D') AND nodeis_active = true); + FOREACH each_node_name IN ARRAY node_names + LOOP + query_str := 'EXECUTE DIRECT ON (' || each_node_name || ') ''SELECT * FROM DBE_PERF.statement_history where start_time >= ''''' ||$1|| ''''' and start_time <= ''''' || $2 || ''''' and (extract(epoch from (finish_time - start_time)) * 1000000) >= slow_sql_threshold '''; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := each_node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + DROP FUNCTION IF EXISTS pg_catalog.statement_detail_decode() CASCADE; + set local inplace_upgrade_next_system_object_oids = IUO_PROC, 5732; + CREATE OR REPLACE FUNCTION pg_catalog.statement_detail_decode + ( IN text, + IN text, + IN boolean) + RETURNS text LANGUAGE INTERNAL NOT FENCED as 'statement_detail_decode'; + + SELECT SESSION_USER INTO username; + IF EXISTS (SELECT oid FROM pg_catalog.pg_class WHERE relname='statement_history') THEN + querystr := 'REVOKE ALL ON TABLE dbe_perf.statement_history FROM ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + querystr := 'REVOKE ALL ON TABLE pg_catalog.statement_history FROM ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + querystr := 'REVOKE SELECT on table dbe_perf.statement_history FROM public;'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE dbe_perf.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE pg_catalog.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC; + END IF; + end if; +END$DO$;DROP FUNCTION IF EXISTS pg_catalog.gs_get_shared_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5255; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_shared_memctx_detail( +IN context_name text, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_shared_memctx_detail'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_thread_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5256; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_thread_memctx_detail( +IN threadid int8, +IN context_name text, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_thread_memctx_detail'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_session_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5254; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_session_memctx_detail( +IN context_name text, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_session_memctx_detail';DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_set() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3268; +CREATE FUNCTION pg_catalog.pgxc_disaster_read_set +(text, OUT set_ok boolean) +RETURNS SETOF boolean LANGUAGE INTERNAL ROWS 1 STRICT as 'pgxc_disaster_read_set'; + +DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_init() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3269; +CREATE FUNCTION pg_catalog.pgxc_disaster_read_init +(OUT init_ok boolean) +RETURNS SETOF boolean LANGUAGE INTERNAL ROWS 1 STRICT as 'pgxc_disaster_read_init'; + +DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_clear() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3271; +CREATE FUNCTION pg_catalog.pgxc_disaster_read_clear +(OUT clear_ok boolean) +RETURNS SETOF boolean LANGUAGE INTERNAL ROWS 1 STRICT as 'pgxc_disaster_read_clear'; + +DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_status() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3273; +CREATE FUNCTION pg_catalog.pgxc_disaster_read_status +( +OUT node_oid pg_catalog.oid, +OUT node_type pg_catalog.text, +OUT host pg_catalog.text, +OUT port pg_catalog.int4, +OUT host1 pg_catalog.text, +OUT port1 pg_catalog.int4, +OUT xlogMaxCSN pg_catalog.int8, +OUT consistency_point_csn pg_catalog.int8 +) +RETURNS SETOF record LANGUAGE INTERNAL VOLATILE ROWS 100 COST 100 STRICT as 'pgxc_disaster_read_status';DROP FUNCTION IF EXISTS pg_catalog.gs_verify_data_file(bool) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4571; +CREATE OR REPLACE FUNCTION pg_catalog.gs_verify_data_file(verify_segment boolean DEFAULT false, OUT node_name text, OUT rel_oid oid, OUT rel_name text, OUT miss_file_path text) + RETURNS SETOF record + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_verify_data_file$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.gs_repair_file(Oid, text, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4771; +CREATE OR REPLACE FUNCTION pg_catalog.gs_repair_file(tableoid oid, path text, timeout integer) + RETURNS SETOF boolean + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_repair_file$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.gs_verify_and_tryrepair_page(text, oid, boolean, boolean) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4569; +CREATE OR REPLACE FUNCTION pg_catalog.gs_verify_and_tryrepair_page(path text, blocknum oid, verify_mem boolean, is_segment boolean, OUT node_name text, OUT path text, OUT blocknum oid, OUT disk_page_res text, OUT mem_page_res text, OUT is_repair boolean) + RETURNS SETOF record + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_verify_and_tryrepair_page$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.gs_repair_page(text, oid, bool, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4570; +CREATE OR REPLACE FUNCTION pg_catalog.gs_repair_page(path text, blocknum oid, is_segment boolean, timeout integer, OUT result boolean) + RETURNS SETOF boolean + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_repair_page$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.local_bad_block_info() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4567; +CREATE OR REPLACE FUNCTION pg_catalog.local_bad_block_info(OUT node_name text, OUT spc_node oid, OUT db_node oid, OUT rel_node oid, OUT bucket_node integer, OUT fork_num integer, OUT block_num integer, OUT file_path text, OUT check_time timestamp with time zone, OUT repair_time timestamp with time zone) + RETURNS SETOF record + LANGUAGE internal + STABLE NOT FENCED NOT SHIPPABLE +AS $function$local_bad_block_info$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.local_clear_bad_block_info() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4568; +CREATE OR REPLACE FUNCTION pg_catalog.local_clear_bad_block_info(OUT result boolean) + RETURNS SETOF boolean + LANGUAGE internal + STABLE NOT FENCED NOT SHIPPABLE +AS $function$local_clear_bad_block_info$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_file_from_remote(oid, oid, oid, integer, integer, integer, xid, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4768; +CREATE OR REPLACE FUNCTION pg_catalog.gs_read_file_from_remote(oid, oid, oid, integer, integer, integer, xid, integer, OUT bytea, OUT xid) + RETURNS record + LANGUAGE internal + STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_read_file_from_remote$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_file_size_from_remote(oid, oid, oid, integer, integer, xid, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4769; +CREATE OR REPLACE FUNCTION pg_catalog.gs_read_file_size_from_remote(oid, oid, oid, integer, integer, xid, integer, OUT bigint) + RETURNS bigint + LANGUAGE internal + STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_read_file_size_from_remote$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_segment_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, integer, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4770; +CREATE OR REPLACE FUNCTION pg_catalog.gs_read_segment_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, integer, integer) + RETURNS bytea + LANGUAGE internal + STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_read_segment_block_from_remote$function$; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +-- adding system table pg_subscription + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, true, true, 6126, 6128, 0, 0; + +CREATE TABLE IF NOT EXISTS pg_catalog.pg_subscription +( + subdbid oid NOCOMPRESS NOT NULL, + subname name NOCOMPRESS, + subowner oid NOCOMPRESS, + subenabled bool NOCOMPRESS, + subconninfo text NOCOMPRESS, + subslotname name NOCOMPRESS, + subsynccommit text NOCOMPRESS, + subpublications text[] NOCOMPRESS +) WITH OIDS TABLESPACE pg_global; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, true, true, 0, 0, 0, 6124; +CREATE UNIQUE INDEX pg_subscription_oid_index ON pg_catalog.pg_subscription USING BTREE(oid OID_OPS); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, true, true, 0, 0, 0, 6125; +CREATE UNIQUE INDEX pg_subscription_subname_index ON pg_catalog.pg_subscription USING BTREE(subdbid, subname); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +DECLARE + user_name text; + query_str text; +BEGIN + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLE pg_catalog.pg_subscription TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; +END; +/ + +-- adding system table pg_replication_origin + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, true, true, 6134, 6143, 0, 0; + +CREATE TABLE IF NOT EXISTS pg_catalog.pg_replication_origin +( + roident oid NOCOMPRESS NOT NULL, + roname text NOCOMPRESS +) TABLESPACE pg_global; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, true, true, 0, 0, 0, 6136; +CREATE UNIQUE INDEX pg_replication_origin_roident_index ON pg_catalog.pg_replication_origin USING BTREE(roident OID_OPS); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, true, true, 0, 0, 0, 6137; +CREATE UNIQUE INDEX pg_replication_origin_roname_index ON pg_catalog.pg_replication_origin USING BTREE(roname TEXT_PATTERN_OPS); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +GRANT SELECT ON TABLE pg_catalog.pg_replication_origin TO PUBLIC; + +-- adding function pg_replication_origin_create +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_create(IN node_name text, OUT replication_origin_oid oid) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2635; +CREATE FUNCTION pg_catalog.pg_replication_origin_create(IN node_name text, OUT replication_origin_oid oid) RETURNS oid LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_create'; + +-- adding function pg_replication_origin_drop +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_drop(IN node_name text, OUT replication_origin_oid oid) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2636; +CREATE FUNCTION pg_catalog.pg_replication_origin_drop(IN node_name text, OUT replication_origin_oid oid) RETURNS void LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_drop'; + +-- adding function pg_replication_origin_oid +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_oid(IN node_name text, OUT replication_origin_oid oid) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2637; +CREATE FUNCTION pg_catalog.pg_replication_origin_oid(IN node_name text, OUT replication_origin_oid oid) RETURNS void LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_oid'; + +-- adding function pg_replication_origin_session_setup +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_setup(IN node_name text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2751; +CREATE FUNCTION pg_catalog.pg_replication_origin_session_setup(IN node_name text) RETURNS void LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_session_setup'; + +-- adding function pg_replication_origin_session_reset +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_reset() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2750; +CREATE FUNCTION pg_catalog.pg_replication_origin_session_reset() RETURNS void LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_session_reset'; + +-- adding function pg_replication_origin_session_is_setup +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_is_setup() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2639; +CREATE FUNCTION pg_catalog.pg_replication_origin_session_is_setup() RETURNS boolean LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_session_is_setup'; + +-- adding function pg_replication_origin_session_progress +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_progress(IN flush boolean) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2640; +CREATE FUNCTION pg_catalog.pg_replication_origin_session_progress(IN flush boolean) RETURNS record LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_session_progress'; + +-- adding function pg_replication_origin_xact_setup +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_xact_setup(IN origin_lsn text, IN origin_timestamp timestamp with time zone) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2799; +CREATE FUNCTION pg_catalog.pg_replication_origin_xact_setup(IN origin_lsn text, IN origin_timestamp timestamp with time zone) RETURNS void LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_xact_setup'; + +-- adding function pg_replication_origin_xact_reset +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_xact_reset() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2752; +CREATE FUNCTION pg_catalog.pg_replication_origin_xact_reset() RETURNS void LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_xact_reset'; + +-- adding function pg_replication_origin_advance +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_advance(IN node_name text, IN lsn text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2634; +CREATE FUNCTION pg_catalog.pg_replication_origin_advance(IN node_name text, IN lsn text) RETURNS void LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_advance'; + +-- adding function pg_replication_origin_progress +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_progress(IN node_name text, IN flush boolean) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2638; +CREATE FUNCTION pg_catalog.pg_replication_origin_progress(IN node_name text, IN flush boolean) RETURNS record LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_progress'; + +-- adding function pg_show_replication_origin_status +DROP FUNCTION IF EXISTS pg_catalog.pg_show_replication_origin_status(OUT local_id oid, OUT external_id text, OUT remote_lsn text, OUT local_lsn text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2800; +CREATE FUNCTION pg_catalog.pg_show_replication_origin_status(OUT local_id oid, OUT external_id text, OUT remote_lsn text, OUT local_lsn text) RETURNS SETOF record LANGUAGE INTERNAL STABLE ROWS 100 AS 'pg_show_replication_origin_status'; + +-- adding function pg_get_publication_tables +DROP FUNCTION IF EXISTS pg_catalog.pg_get_publication_tables(IN pubname text, OUT relid oid) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2801; +CREATE FUNCTION pg_catalog.pg_get_publication_tables(IN pubname text, OUT relid oid) RETURNS SETOF oid LANGUAGE INTERNAL STABLE STRICT AS 'pg_get_publication_tables'; + +-- adding function pg_stat_get_subscription +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_subscription(IN subid oid, OUT subid oid, OUT pid integer, OUT received_lsn text, OUT last_msg_send_time timestamp with time zone, OUT last_msg_receipt_time timestamp with time zone, OUT latest_end_lsn text, OUT latest_end_time timestamp with time zone) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2802; +CREATE FUNCTION pg_catalog.pg_stat_get_subscription(IN subid oid, OUT subid oid, OUT pid integer, OUT received_lsn text, OUT last_msg_send_time timestamp with time zone, OUT last_msg_receipt_time timestamp with time zone, OUT latest_end_lsn text, OUT latest_end_time timestamp with time zone) RETURNS record LANGUAGE INTERNAL STABLE AS 'pg_stat_get_subscription'; + +-- adding system view +DROP VIEW IF EXISTS pg_catalog.pg_publication_tables CASCADE; +CREATE VIEW pg_catalog.pg_publication_tables AS + SELECT + P.pubname AS pubname, + N.nspname AS schemaname, + C.relname AS tablename + FROM pg_publication P, pg_class C + JOIN pg_namespace N ON (N.oid = C.relnamespace) + WHERE C.oid IN (SELECT relid FROM pg_get_publication_tables(P.pubname)); + +DROP VIEW IF EXISTS pg_catalog.pg_stat_subscription CASCADE; +CREATE VIEW pg_catalog.pg_stat_subscription AS + SELECT + su.oid AS subid, + su.subname, + st.pid, + st.received_lsn, + st.last_msg_send_time, + st.last_msg_receipt_time, + st.latest_end_lsn, + st.latest_end_time + FROM pg_subscription su + LEFT JOIN pg_stat_get_subscription(NULL) st + ON (st.subid = su.oid); + +DROP VIEW IF EXISTS pg_catalog.pg_replication_origin_status CASCADE; +CREATE VIEW pg_catalog.pg_replication_origin_status AS + SELECT * + FROM pg_show_replication_origin_status(); + +REVOKE ALL ON pg_catalog.pg_replication_origin_status FROM public;DROP FUNCTION IF EXISTS pg_catalog.gs_explain_model(text) cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_float8_array(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_bool(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_float4(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_float8(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_int32(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_int64(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_numeric(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_text(text, VARIADIC "any") cascade; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7101; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_bool(text, VARIADIC "any") + RETURNS boolean + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_bool$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7105; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_float4(text, VARIADIC "any") + RETURNS real + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_float4$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7106; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_float8(text, VARIADIC "any") + RETURNS double precision + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_float8$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7102; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_int32(text, VARIADIC "any") + RETURNS integer + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_int32$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7103; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_int64(text, VARIADIC "any") + RETURNS bigint + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_int64$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7107; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_numeric(text, VARIADIC "any") + RETURNS numeric + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_numeric$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7108; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_text(text, VARIADIC "any") + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_text$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7109; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_float8_array(text, VARIADIC "any") + RETURNS double precision[] + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_float8_array$function$; + + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7110; +CREATE OR REPLACE FUNCTION pg_catalog.gs_explain_model(text) + RETURNS text + LANGUAGE internal + IMMUTABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_explain_model$function$; +comment on function PG_CATALOG.gs_explain_model(text) is 'explain machine learning model'; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_clean_history_global_barriers(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4581; +CREATE FUNCTION pg_catalog.gs_pitr_clean_history_global_barriers +( +IN stop_barrier_timestamp cstring, +OUT oldest_barrier_record text +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_pitr_clean_history_global_barriers'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_archive_slot_force_advance(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4580; +CREATE FUNCTION pg_catalog.gs_pitr_archive_slot_force_advance +( +IN stop_barrier_timestamp cstring, +OUT archive_restart_lsn text +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_pitr_archive_slot_force_advance'; +DROP FUNCTION IF EXISTS pg_catalog.local_double_write_stat(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4384; +CREATE FUNCTION pg_catalog.local_double_write_stat +( + OUT node_name pg_catalog.text, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8, + OUT file_id pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'local_double_write_stat'; + +DROP FUNCTION IF EXISTS pg_catalog.remote_double_write_stat(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4385; +CREATE FUNCTION pg_catalog.remote_double_write_stat +( + OUT node_name pg_catalog.text, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8, + OUT file_id pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'remote_double_write_stat'; + +DROP VIEW IF EXISTS DBE_PERF.global_double_write_status CASCADE; +CREATE OR REPLACE VIEW dbe_perf.global_double_write_status AS + SELECT node_name, curr_dwn, curr_start_page, file_trunc_num, file_reset_num, + total_writes, low_threshold_writes, high_threshold_writes, + total_pages, low_threshold_pages, high_threshold_pages, file_id + FROM pg_catalog.local_double_write_stat(); + +REVOKE ALL on DBE_PERF.global_double_write_status FROM PUBLIC; + +DECLARE + user_name text; + query_str text; +BEGIN + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLE DBE_PERF.global_double_write_status TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; +END; +/ + +GRANT SELECT ON TABLE DBE_PERF.global_double_write_status TO PUBLIC; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +DO $DO$ +DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select * from pg_tables where tablename = 'snap_global_double_write_status' and schemaname = 'snapshot' limit 1) into ans; + if ans = true then + alter table snapshot.snap_global_double_write_status + ADD COLUMN snap_file_id int8; + end if; +END$DO$; +comment on function PG_CATALOG.regexp_count(text, text) is 'find match(es) count for regexp'; +comment on function PG_CATALOG.regexp_count(text, text, integer) is 'find match(es) count for regexp'; +comment on function PG_CATALOG.regexp_count(text, text, integer, text) is 'find match(es) count for regexp'; +comment on function PG_CATALOG.regexp_instr(text, text) is 'find match(es) position for regexp'; +comment on function PG_CATALOG.regexp_instr(text, text, integer) is 'find match(es) position for regexp'; +comment on function PG_CATALOG.regexp_instr(text, text, integer, integer) is 'find match(es) position for regexp'; +comment on function PG_CATALOG.regexp_instr(text, text, integer, integer, integer) is 'find match(es) position for regexp'; +comment on function PG_CATALOG.regexp_instr(text, text, integer, integer, integer, text) is 'find match(es) position for regexp'; +comment on function PG_CATALOG.lpad(text, integer, text) is 'left-pad string to length'; +comment on function PG_CATALOG.rpad(text, integer, text) is 'right-pad string to length'; +comment on function PG_CATALOG.regexp_replace(text, text) is 'replace text using regexp'; +comment on function PG_CATALOG.regexp_replace(text, text, text, integer) is 'replace text using regexp'; +comment on function PG_CATALOG.regexp_replace(text, text, text, integer, integer) is 'replace text using regexp'; +comment on function PG_CATALOG.regexp_replace(text, text, text, integer, integer, text) is 'replace text using regexp'; +comment on function PG_CATALOG.line_in(cstring) is 'I/O'; +comment on function PG_CATALOG.regexp_substr(text, text, integer) is 'extract text matching regular expression'; +comment on function PG_CATALOG.regexp_substr(text, text, integer, integer) is 'extract text matching regular expression'; +comment on function PG_CATALOG.regexp_substr(text, text, integer, integer, text) is 'extract text matching regular expression'; +comment on function PG_CATALOG.pg_stat_get_activity(bigint) is 'statistics: information about currently active backends'; +comment on function PG_CATALOG.to_char(timestamp without time zone, text) is 'format timestamp to text'; +comment on function PG_CATALOG.pg_replication_origin_advance(text, text) is 'advance replication itentifier to specific location'; +comment on function PG_CATALOG.pg_replication_origin_create(text) is 'create a replication origin'; +comment on function PG_CATALOG.pg_replication_origin_drop(text) is 'drop replication origin identified by its name'; +comment on function PG_CATALOG.pg_replication_origin_oid(text) is 'translate the replication origin\''s name to its id'; +comment on function PG_CATALOG.pg_replication_origin_progress(text, boolean) is 'get an individual replication origin\''s replication progress'; +comment on function PG_CATALOG.pg_replication_origin_session_is_setup() is 'is a replication origin configured in this session'; +comment on function PG_CATALOG.pg_replication_origin_session_progress(boolean) is 'get the replication progress of the current session'; +comment on function PG_CATALOG.pg_replication_origin_session_reset() is 'teardown configured replication progress tracking'; +comment on function PG_CATALOG.pg_replication_origin_session_setup(text) is 'configure session to maintain replication progress tracking for the passed in origin'; +comment on function PG_CATALOG.pg_replication_origin_xact_reset() is 'reset the transaction\''s origin lsn and timestamp'; +comment on function PG_CATALOG.pg_replication_origin_xact_setup(text, timestamp with time zone) is 'setup the transaction\''s origin lsn and timestamp'; +comment on function PG_CATALOG.pg_show_replication_origin_status() is 'get progress for all replication origins'; +comment on function PG_CATALOG.pg_get_publication_tables(text) is 'get OIDs of tables in a publication'; +comment on function PG_CATALOG.pg_stat_get_subscription(oid) is 'statistics: information about subscription'; +comment on function PG_CATALOG.xpath(text, xml, text[]) is 'evaluate XPath expression, with namespaces support'; +comment on function PG_CATALOG.xpath(text, xml) is 'evaluate XPath expression'; +comment on function PG_CATALOG.xpath_exists(text, xml, text[]) is 'test XML value against XPath expression, with namespace support'; +comment on function PG_CATALOG.json_array_element_text(json, integer) is 'implementation of ->> operator'; +comment on function PG_CATALOG.json_extract_path_op(json, text[]) is 'implementation of #> operator'; +comment on function PG_CATALOG.json_extract_path_text_op(json, text[]) is 'implementation of #>> operator'; +comment on function PG_CATALOG.jsonb_extract_path_op(jsonb, text[]) is 'implementation of #> operator'; +comment on function PG_CATALOG.json_object_field(json, text) is 'implementation of -> operator'; +comment on function PG_CATALOG.jsonb_array_element(jsonb, integer) is 'implementation of -> operator'; +comment on function PG_CATALOG.jsonb_array_element_text(jsonb, integer) is 'implementation of ->> operator'; +comment on function PG_CATALOG.jsonb_contains(jsonb, jsonb) is 'implementation of @> operator'; +comment on function PG_CATALOG.jsonb_eq(jsonb, jsonb) is 'implementation of = operator'; +comment on function PG_CATALOG.jsonb_exists(jsonb, text) is 'implementation of ? operator'; +comment on function PG_CATALOG.jsonb_exists_all(jsonb, text[]) is 'implementation of ?& operator'; +comment on function PG_CATALOG.jsonb_exists_any(jsonb, text[]) is 'implementation of ?| operator'; +comment on function PG_CATALOG.jsonb_extract_path_text_op(jsonb, text[]) is 'implementation of #>> operator'; +comment on function PG_CATALOG.jsonb_ge(jsonb, jsonb) is 'implementation of >= operator'; +comment on function PG_CATALOG.jsonb_gt(jsonb, jsonb) is 'implementation of > operator'; +comment on function PG_CATALOG.jsonb_le(jsonb, jsonb) is 'implementation of <= operator'; +comment on function PG_CATALOG.jsonb_ne(jsonb, jsonb) is 'implementation of <> operator'; +comment on function PG_CATALOG.jsonb_object_field(jsonb, text) is 'implementation of -> operator'; +comment on function PG_CATALOG.jsonb_object_field_text(jsonb, text) is 'implementation of ->> operator'; +comment on function PG_CATALOG.json_object_field_text(json, text) is 'implementation of ->> operator'; +comment on function PG_CATALOG.json_array_element(json, integer) is 'implementation of -> operator'; +comment on function PG_CATALOG.jsonb_lt(jsonb, jsonb) is 'implementation of < operator'; +comment on function PG_CATALOG.jsonb_contained(jsonb, jsonb) is 'implementation of <@ operator'; +comment on function PG_CATALOG.has_any_privilege(name, text) is 'current user privilege on database level'; +comment on function PG_CATALOG.int16eq(int16, int16) is 'implementation of = operator'; +comment on function PG_CATALOG.int16ne(int16, int16) is 'implementation of <> operator'; +comment on function PG_CATALOG.int16lt(int16, int16) is 'implementation of < operator'; +comment on function PG_CATALOG.int16le(int16, int16) is 'implementation of <= operator'; +comment on function PG_CATALOG.int16gt(int16, int16) is 'implementation of > operator'; +comment on function PG_CATALOG.int16ge(int16, int16) is 'implementation of >= operator'; +comment on function PG_CATALOG.int16pl(int16, int16) is 'implementation of + operator'; +comment on function PG_CATALOG.int16eq(int16, int16) is 'implementation of = operator'; +comment on function PG_CATALOG.int16mi(int16, int16) is 'implementation of - operator'; +comment on function PG_CATALOG.int16mul(int16, int16) is 'implementation of * operator'; +comment on function PG_CATALOG.int16div(int16, int16) is 'implementation of / operator'; +comment on function PG_CATALOG.array_varchar_first(anyarray) is 'array_varchar_first'; +comment on function PG_CATALOG.array_varchar_last(anyarray) is 'array_varchar_last'; +comment on function PG_CATALOG.array_integer_first(anyarray) is 'array_integer_first'; +comment on function PG_CATALOG.array_integer_last(anyarray) is 'array_integer_last'; +comment on function PG_CATALOG.array_indexby_length(anyarray, integer) is 'array index by length'; +comment on function PG_CATALOG.gs_index_verify() is 'array index by length'; +comment on function PG_CATALOG.gs_index_recycle_queue() is 'array index by length'; +comment on function PG_CATALOG.int16div(int16, int16) is 'array index by length'; +DROP FUNCTION IF EXISTS pg_catalog.login_audit_messages(boolean); +DROP FUNCTION IF EXISTS pg_catalog.login_audit_messages_pid(boolean); + +CREATE OR REPLACE FUNCTION pg_catalog.login_audit_messages(in flag boolean) returns table (username text, database text, logintime timestamp with time zone, mytype text, result text, client_conninfo text) AUTHID DEFINER +AS $$ +DECLARE +user_id text; +user_name text; +db_name text; +SQL_STMT VARCHAR2(500); +fail_cursor REFCURSOR; +success_cursor REFCURSOR; +BEGIN + SELECT text(oid) FROM pg_catalog.pg_authid WHERE rolname=SESSION_USER INTO user_id; + SELECT SESSION_USER INTO user_name; + SELECT pg_catalog.CURRENT_DATABASE() INTO db_name; + IF flag = true THEN + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo FROM pg_catalog.pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN success_cursor FOR EXECUTE SQL_STMT; + --search bottom up for all the success login info + FETCH LAST FROM success_cursor into username, database, logintime, mytype, result, client_conninfo; + FETCH BACKWARD FROM success_cursor into username, database, logintime, mytype, result, client_conninfo; + IF FOUND THEN + return next; + END IF; + CLOSE success_cursor; + ELSE + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo FROM pg_catalog.pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'', ''login_failed'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN fail_cursor FOR EXECUTE SQL_STMT; + --search bottom up + FETCH LAST FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo; + LOOP + FETCH BACKWARD FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo; + EXIT WHEN NOT FOUND; + IF mytype = 'login_failed' THEN + return next; + ELSE + -- must be login_success + EXIT; + END IF; + END LOOP; + CLOSE fail_cursor; + END IF; +END; $$ +LANGUAGE plpgsql NOT FENCED; + +CREATE OR REPLACE FUNCTION pg_catalog.login_audit_messages_pid(flag boolean) + RETURNS TABLE(username text, database text, logintime timestamp with time zone, mytype text, result text, client_conninfo text, backendid bigint) AUTHID DEFINER +AS $$ +DECLARE +user_id text; +user_name text; +db_name text; +SQL_STMT VARCHAR2(500); +fail_cursor REFCURSOR; +success_cursor REFCURSOR; +mybackendid bigint; +curSessionFound boolean; +BEGIN + SELECT text(oid) FROM pg_catalog.pg_authid WHERE rolname=SESSION_USER INTO user_id; + SELECT SESSION_USER INTO user_name; + SELECT pg_catalog.CURRENT_DATABASE() INTO db_name; + SELECT pg_catalog.pg_backend_pid() INTO mybackendid; + curSessionFound = false; + IF flag = true THEN + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo, split_part(thread_id,''@'',1) backendid FROM pg_catalog.pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN success_cursor FOR EXECUTE SQL_STMT; + --search bottom up for all the success login info + FETCH LAST FROM success_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + LOOP + IF backendid = mybackendid THEN + --found the login info for the current session + curSessionFound = true; + EXIT; + END IF; + FETCH BACKWARD FROM success_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + EXIT WHEN NOT FOUND; + END LOOP; + IF curSessionFound THEN + FETCH BACKWARD FROM success_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + IF FOUND THEN + return next; + END IF; + END IF; + ELSE + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo, split_part(thread_id,''@'',1) backendid FROM pg_catalog.pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'', ''login_failed'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN fail_cursor FOR EXECUTE SQL_STMT; + --search bottom up + FETCH LAST FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + LOOP + IF backendid = mybackendid AND mytype = 'login_success' THEN + --found the login info for the current session + curSessionFound = true; + EXIT; + END IF; + FETCH BACKWARD FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + EXIT WHEN NOT FOUND; + END LOOP; + IF curSessionFound THEN + LOOP + FETCH BACKWARD FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo, backendid ; + EXIT WHEN NOT FOUND; + IF mytype = 'login_failed' THEN + return next; + ELSE + -- must be login_success + EXIT; + END IF; + END LOOP; + END IF; --curSessionFound + CLOSE fail_cursor; + END IF; +END; $$ +LANGUAGE plpgsql NOT FENCED;DROP FUNCTION IF EXISTS pg_catalog.gs_read_segment_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, integer, integer) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_read_segment_block_from_remote(oid, oid, oid, smallint, integer, xid, integer, xid, oid, oid, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4770; +CREATE OR REPLACE FUNCTION pg_catalog.gs_read_segment_block_from_remote(oid, oid, oid, smallint, integer, xid, integer, xid, oid, oid, integer) + RETURNS bytea + LANGUAGE internal + STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_read_segment_block_from_remote$function$; + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, boolean) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_read_block_from_remote(oid, oid, oid, smallint, integer, xid, integer, xid, boolean, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4767; +CREATE OR REPLACE FUNCTION pg_catalog.gs_read_block_from_remote(oid, oid, oid, smallint, integer, xid, integer, xid, boolean, integer) + RETURNS bytea + LANGUAGE internal + STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_read_block_from_remote$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +DROP FUNCTION IF EXISTS pg_catalog.array_remove(anyarray, anyelement) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6555; +CREATE FUNCTION pg_catalog.array_remove ( + anyarray, anyelement +) RETURNS anyarray LANGUAGE INTERNAL IMMUTABLE as 'array_remove'; + +DROP FUNCTION IF EXISTS pg_catalog.array_replace(anyarray, anyelement, anyelement) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6556; +CREATE FUNCTION pg_catalog.array_replace ( + anyarray, anyelement, anyelement +) RETURNS anyarray LANGUAGE INTERNAL IMMUTABLE as 'array_replace'; + + +DROP FUNCTION IF EXISTS pg_catalog.first_transition(anyelement, anyelement) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6558; +CREATE FUNCTION pg_catalog.first_transition ( +anyelement, anyelement +) RETURNS anyelement LANGUAGE INTERNAL IMMUTABLE STRICT as 'first_transition'; + +DROP FUNCTION IF EXISTS pg_catalog.last_transition(anyelement, anyelement) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6559; +CREATE FUNCTION pg_catalog.last_transition ( +anyelement, anyelement +) RETURNS anyelement LANGUAGE INTERNAL IMMUTABLE STRICT as 'last_transition'; + +DROP aggregate IF EXISTS pg_catalog.first(anyelement) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6560; +create aggregate first(anyelement) ( + sfunc = first_transition, + stype = anyelement +); + +DROP aggregate IF EXISTS pg_catalog.last(anyelement) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6561; +create aggregate last(anyelement) ( + sfunc = last_transition, + stype = anyelement +); + + + +DROP FUNCTION IF EXISTS pg_catalog.network_larger(inet, inet) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6666; +CREATE FUNCTION pg_catalog.network_larger ( +inet, inet +) RETURNS inet LANGUAGE INTERNAL STRICT as 'network_larger'; + +DROP FUNCTION IF EXISTS pg_catalog.network_smaller(inet, inet) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6667; +CREATE FUNCTION pg_catalog.network_smaller ( +inet, inet +) RETURNS inet LANGUAGE INTERNAL STRICT as 'network_smaller'; + + +DROP aggregate IF EXISTS pg_catalog.max(inet) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6668; +create aggregate max(inet) ( + sfunc = network_larger, + stype = inet +); + +DROP aggregate IF EXISTS pg_catalog.min(inet) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6669; +create aggregate min(inet) ( + sfunc = network_smaller, + stype = inet +); + +DROP FUNCTION IF EXISTS pg_catalog.pg_buffercache_pages() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4130; +CREATE FUNCTION pg_catalog.pg_buffercache_pages( + OUT bufferid integer, + OUT relfilenode oid, + OUT bucketid integer, + OUT storage_type bigint, + OUT reltablespace oid, + OUT reldatabase oid, + OUT relforknumber integer, + OUT relblocknumber oid, + OUT isdirty boolean, + OUT isvalid boolean, + OUT usage_count smallint, + OUT pinning_backends integer) +RETURNS SETOF record +LANGUAGE internal +STABLE NOT FENCED NOT SHIPPABLE ROWS 100 +AS 'pg_buffercache_pages'; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DO $DO$ +DECLARE + ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select tablename from PG_TABLES where tablename='statement_history' and schemaname='pg_catalog' limit 1) into ans; + if ans = true then + TRUNCATE TABLE pg_catalog.statement_history; + DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx; + create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); + end if; +END$DO$;DROP FUNCTION IF EXISTS pg_catalog.pg_create_physical_replication_slot_extern(name, boolean, text) cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3790; +CREATE OR REPLACE FUNCTION pg_catalog.pg_create_physical_replication_slot_extern(slotname name, dummy_standby boolean, extra_content text, need_recycle_xlog boolean, OUT slotname text, OUT xlog_position text) + RETURNS record + LANGUAGE internal + NOT FENCED NOT SHIPPABLE +AS $function$pg_create_physical_replication_slot_extern$function$; +SET search_path TO information_schema; + +-- element_types is generated by data_type_privileges +DROP VIEW IF EXISTS information_schema.element_types CASCADE; + +-- data_type_privileges is generated by columns +DROP VIEW IF EXISTS information_schema.data_type_privileges CASCADE; +-- data_type_privileges is generated by table_privileges +DROP VIEW IF EXISTS information_schema.role_column_grants CASCADE; +-- data_type_privileges is generated by column_privileges +DROP VIEW IF EXISTS information_schema.role_table_grants CASCADE; + +-- other views need upgrade for matview +DROP VIEW IF EXISTS information_schema.column_domain_usage CASCADE; +DROP VIEW IF EXISTS information_schema.column_privileges CASCADE; +DROP VIEW IF EXISTS information_schema.column_udt_usage CASCADE; +DROP VIEW IF EXISTS information_schema.columns CASCADE; +DROP VIEW IF EXISTS information_schema.table_privileges CASCADE; +DROP VIEW IF EXISTS information_schema.tables CASCADE; +DROP VIEW IF EXISTS information_schema.view_column_usage CASCADE; +DROP VIEW IF EXISTS information_schema.view_table_usage CASCADE; + +CREATE VIEW information_schema.column_domain_usage AS + SELECT CAST(current_database() AS sql_identifier) AS domain_catalog, + CAST(nt.nspname AS sql_identifier) AS domain_schema, + CAST(t.typname AS sql_identifier) AS domain_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name + + FROM pg_type t, pg_namespace nt, pg_class c, pg_namespace nc, + pg_attribute a + + WHERE t.typnamespace = nt.oid + AND c.relnamespace = nc.oid + AND a.attrelid = c.oid + AND a.atttypid = t.oid + AND t.typtype = 'd' + AND c.relkind IN ('r', 'm', 'v', 'f') + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') + AND a.attnum > 0 + AND NOT a.attisdropped + AND pg_has_role(t.typowner, 'USAGE'); + +CREATE VIEW information_schema.column_privileges AS + SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, + CAST(grantee.rolname AS sql_identifier) AS grantee, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(x.relname AS sql_identifier) AS table_name, + CAST(x.attname AS sql_identifier) AS column_name, + CAST(x.prtype AS character_data) AS privilege_type, + CAST( + CASE WHEN + -- object owner always has grant options + pg_has_role(x.grantee, x.relowner, 'USAGE') + OR x.grantable + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable + + FROM ( + SELECT pr_c.grantor, + pr_c.grantee, + attname, + relname, + relnamespace, + pr_c.prtype, + pr_c.grantable, + pr_c.relowner + FROM (SELECT oid, relname, relnamespace, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* + FROM pg_class + WHERE relkind IN ('r', 'm', 'v', 'f') + ) pr_c (oid, relname, relnamespace, relowner, grantor, grantee, prtype, grantable), + pg_attribute a + WHERE a.attrelid = pr_c.oid + AND a.attnum > 0 + AND NOT a.attisdropped + UNION + SELECT pr_a.grantor, + pr_a.grantee, + attname, + relname, + relnamespace, + pr_a.prtype, + pr_a.grantable, + c.relowner + FROM (SELECT attrelid, attname, (aclexplode(coalesce(attacl, acldefault('c', relowner)))).* + FROM pg_attribute a JOIN pg_class cc ON (a.attrelid = cc.oid) + WHERE attnum > 0 + AND NOT attisdropped + ) pr_a (attrelid, attname, grantor, grantee, prtype, grantable), + pg_class c + WHERE pr_a.attrelid = c.oid + AND relkind IN ('r', 'm', 'v', 'f') + ) x, + pg_namespace nc, + pg_authid u_grantor, + ( + SELECT oid, rolname FROM pg_authid + UNION ALL + SELECT 0::oid, 'PUBLIC' + ) AS grantee (oid, rolname) + + WHERE x.relnamespace = nc.oid + AND x.grantee = grantee.oid + AND x.grantor = u_grantor.oid + AND x.prtype IN ('INSERT', 'SELECT', 'UPDATE', 'REFERENCES', 'COMMENT') + AND (x.relname not like 'mlog\_%' AND x.relname not like 'matviewmap\_%') + AND (pg_has_role(u_grantor.oid, 'USAGE') + OR pg_has_role(grantee.oid, 'USAGE') + OR grantee.rolname = 'PUBLIC'); + +CREATE VIEW information_schema.column_udt_usage AS + SELECT CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(coalesce(nbt.nspname, nt.nspname) AS sql_identifier) AS udt_schema, + CAST(coalesce(bt.typname, t.typname) AS sql_identifier) AS udt_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name + + FROM pg_attribute a, pg_class c, pg_namespace nc, + (pg_type t JOIN pg_namespace nt ON (t.typnamespace = nt.oid)) + LEFT JOIN (pg_type bt JOIN pg_namespace nbt ON (bt.typnamespace = nbt.oid)) + ON (t.typtype = 'd' AND t.typbasetype = bt.oid) + + WHERE a.attrelid = c.oid + AND a.atttypid = t.oid + AND nc.oid = c.relnamespace + AND a.attnum > 0 AND NOT a.attisdropped AND c.relkind in ('r', 'm', 'v', 'f') + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') + AND pg_has_role(coalesce(bt.typowner, t.typowner), 'USAGE'); + +CREATE VIEW information_schema.columns AS + SELECT CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name, + CAST(a.attnum AS cardinal_number) AS ordinal_position, + CAST(CASE WHEN ad.adgencol <> 's' THEN pg_get_expr(ad.adbin, ad.adrelid) END AS character_data) AS column_default, + CAST(CASE WHEN a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) THEN 'NO' ELSE 'YES' END + AS yes_or_no) + AS is_nullable, + + CAST( + CASE WHEN t.typtype = 'd' THEN + CASE WHEN bt.typelem <> 0 AND bt.typlen = -1 THEN 'ARRAY' + WHEN nbt.nspname = 'pg_catalog' THEN format_type(t.typbasetype, null) + ELSE 'USER-DEFINED' END + ELSE + CASE WHEN t.typelem <> 0 AND t.typlen = -1 THEN 'ARRAY' + WHEN nt.nspname = 'pg_catalog' THEN format_type(a.atttypid, null) + ELSE 'USER-DEFINED' END + END + AS character_data) + AS data_type, + + CAST( + _pg_char_max_length(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS character_maximum_length, + + CAST( + _pg_char_octet_length(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS character_octet_length, + + CAST( + _pg_numeric_precision(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS numeric_precision, + + CAST( + _pg_numeric_precision_radix(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS numeric_precision_radix, + + CAST( + _pg_numeric_scale(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS numeric_scale, + + CAST( + _pg_datetime_precision(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS datetime_precision, + + CAST( + _pg_interval_type(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS character_data) + AS interval_type, + CAST(null AS cardinal_number) AS interval_precision, + + CAST(null AS sql_identifier) AS character_set_catalog, + CAST(null AS sql_identifier) AS character_set_schema, + CAST(null AS sql_identifier) AS character_set_name, + + CAST(CASE WHEN nco.nspname IS NOT NULL THEN current_database() END AS sql_identifier) AS collation_catalog, + CAST(nco.nspname AS sql_identifier) AS collation_schema, + CAST(co.collname AS sql_identifier) AS collation_name, + + CAST(CASE WHEN t.typtype = 'd' THEN current_database() ELSE null END + AS sql_identifier) AS domain_catalog, + CAST(CASE WHEN t.typtype = 'd' THEN nt.nspname ELSE null END + AS sql_identifier) AS domain_schema, + CAST(CASE WHEN t.typtype = 'd' THEN t.typname ELSE null END + AS sql_identifier) AS domain_name, + + CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(coalesce(nbt.nspname, nt.nspname) AS sql_identifier) AS udt_schema, + CAST(coalesce(bt.typname, t.typname) AS sql_identifier) AS udt_name, + + CAST(null AS sql_identifier) AS scope_catalog, + CAST(null AS sql_identifier) AS scope_schema, + CAST(null AS sql_identifier) AS scope_name, + + CAST(null AS cardinal_number) AS maximum_cardinality, + CAST(a.attnum AS sql_identifier) AS dtd_identifier, + CAST('NO' AS yes_or_no) AS is_self_referencing, + + CAST('NO' AS yes_or_no) AS is_identity, + CAST(null AS character_data) AS identity_generation, + CAST(null AS character_data) AS identity_start, + CAST(null AS character_data) AS identity_increment, + CAST(null AS character_data) AS identity_maximum, + CAST(null AS character_data) AS identity_minimum, + CAST(null AS yes_or_no) AS identity_cycle, + + CAST(CASE WHEN ad.adgencol = 's' THEN 'ALWAYS' ELSE 'NEVER' END AS character_data) AS is_generated, + CAST(CASE WHEN ad.adgencol = 's' THEN pg_get_expr(ad.adbin, ad.adrelid) END AS character_data) AS generation_expression, + + CAST(CASE WHEN c.relkind = 'r' + OR (c.relkind = 'v' + AND EXISTS (SELECT 1 FROM pg_rewrite WHERE ev_class = c.oid AND ev_type = '2' AND is_instead) + AND EXISTS (SELECT 1 FROM pg_rewrite WHERE ev_class = c.oid AND ev_type = '4' AND is_instead)) + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_updatable + + FROM (pg_attribute a LEFT JOIN pg_attrdef ad ON attrelid = adrelid AND attnum = adnum) + JOIN (pg_class c JOIN pg_namespace nc ON (c.relnamespace = nc.oid)) ON a.attrelid = c.oid + JOIN (pg_type t JOIN pg_namespace nt ON (t.typnamespace = nt.oid)) ON a.atttypid = t.oid + LEFT JOIN (pg_type bt JOIN pg_namespace nbt ON (bt.typnamespace = nbt.oid)) + ON (t.typtype = 'd' AND t.typbasetype = bt.oid) + LEFT JOIN (pg_collation co JOIN pg_namespace nco ON (co.collnamespace = nco.oid)) + ON a.attcollation = co.oid AND (nco.nspname, co.collname) <> ('pg_catalog', 'default') + + WHERE (NOT pg_is_other_temp_schema(nc.oid)) + + AND a.attnum > 0 AND NOT a.attisdropped AND c.relkind in ('r', 'm', 'v', 'f') + + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') + + AND (pg_has_role(c.relowner, 'USAGE') + OR has_column_privilege(c.oid, a.attnum, + 'SELECT, INSERT, UPDATE, REFERENCES')); + +CREATE VIEW information_schema.table_privileges AS + SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, + CAST(grantee.rolname AS sql_identifier) AS grantee, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(c.prtype AS character_data) AS privilege_type, + CAST( + CASE WHEN + -- object owner always has grant options + pg_has_role(grantee.oid, c.relowner, 'USAGE') + OR c.grantable + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable, + CAST(CASE WHEN c.prtype = 'SELECT' THEN 'YES' ELSE 'NO' END AS yes_or_no) AS with_hierarchy + + FROM ( + SELECT oid, relname, relnamespace, relkind, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* FROM pg_class + ) AS c (oid, relname, relnamespace, relkind, relowner, grantor, grantee, prtype, grantable), + pg_namespace nc, + pg_authid u_grantor, + ( + SELECT oid, rolname FROM pg_authid + UNION ALL + SELECT 0::oid, 'PUBLIC' + ) AS grantee (oid, rolname) + + WHERE c.relnamespace = nc.oid + AND c.relkind IN ('r', 'm', 'v') + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') + AND c.grantee = grantee.oid + AND c.grantor = u_grantor.oid + AND (c.prtype IN ('INSERT', 'SELECT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER') + OR c.prtype IN ('ALTER', 'DROP', 'COMMENT', 'INDEX', 'VACUUM') + ) + AND (pg_has_role(u_grantor.oid, 'USAGE') + OR pg_has_role(grantee.oid, 'USAGE') + OR grantee.rolname = 'PUBLIC'); + +CREATE VIEW information_schema.tables AS + SELECT CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + + CAST( + CASE WHEN nc.oid = pg_my_temp_schema() THEN 'LOCAL TEMPORARY' + WHEN c.relkind = 'r' THEN 'BASE TABLE' + WHEN c.relkind = 'm' THEN 'MATERIALIZED VIEW' + WHEN c.relkind = 'v' THEN 'VIEW' + WHEN c.relkind = 'f' THEN 'FOREIGN TABLE' + ELSE null END + AS character_data) AS table_type, + + CAST(null AS sql_identifier) AS self_referencing_column_name, + CAST(null AS character_data) AS reference_generation, + + CAST(CASE WHEN t.typname IS NOT NULL THEN current_database() ELSE null END AS sql_identifier) AS user_defined_type_catalog, + CAST(nt.nspname AS sql_identifier) AS user_defined_type_schema, + CAST(t.typname AS sql_identifier) AS user_defined_type_name, + + CAST(CASE WHEN c.relkind = 'r' + OR (c.relkind = 'v' + AND EXISTS (SELECT 1 FROM pg_rewrite WHERE ev_class = c.oid AND ev_type = '3' AND is_instead)) + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_insertable_into, + + CAST(CASE WHEN t.typname IS NOT NULL THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_typed, + CAST(null AS character_data) AS commit_action + + FROM pg_namespace nc JOIN pg_class c ON (nc.oid = c.relnamespace) + LEFT JOIN (pg_type t JOIN pg_namespace nt ON (t.typnamespace = nt.oid)) ON (c.reloftype = t.oid) + + WHERE c.relkind IN ('r', 'm', 'v', 'f') + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') + AND (NOT pg_is_other_temp_schema(nc.oid)) + AND (pg_has_role(c.relowner, 'USAGE') + OR has_table_privilege(c.oid, 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') + OR has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES') ); + +CREATE VIEW information_schema.view_column_usage AS + SELECT DISTINCT + CAST(current_database() AS sql_identifier) AS view_catalog, + CAST(nv.nspname AS sql_identifier) AS view_schema, + CAST(v.relname AS sql_identifier) AS view_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nt.nspname AS sql_identifier) AS table_schema, + CAST(t.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name + + FROM pg_namespace nv, pg_class v, pg_depend dv, + pg_depend dt, pg_class t, pg_namespace nt, + pg_attribute a + + WHERE nv.oid = v.relnamespace + AND v.relkind = 'v' + AND v.oid = dv.refobjid + AND dv.refclassid = 'pg_catalog.pg_class'::regclass + AND dv.classid = 'pg_catalog.pg_rewrite'::regclass + AND dv.deptype = 'i' + AND dv.objid = dt.objid + AND dv.refobjid <> dt.refobjid + AND dt.classid = 'pg_catalog.pg_rewrite'::regclass + AND dt.refclassid = 'pg_catalog.pg_class'::regclass + AND dt.refobjid = t.oid + AND t.relnamespace = nt.oid + AND t.relkind IN ('r', 'm', 'v', 'f') + AND (t.relname not like 'mlog\_%' AND t.relname not like 'matviewmap\_%') + AND t.oid = a.attrelid + AND dt.refobjsubid = a.attnum + AND pg_has_role(t.relowner, 'USAGE'); + +CREATE VIEW information_schema.view_table_usage AS + SELECT DISTINCT + CAST(current_database() AS sql_identifier) AS view_catalog, + CAST(nv.nspname AS sql_identifier) AS view_schema, + CAST(v.relname AS sql_identifier) AS view_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nt.nspname AS sql_identifier) AS table_schema, + CAST(t.relname AS sql_identifier) AS table_name + + FROM pg_namespace nv, pg_class v, pg_depend dv, + pg_depend dt, pg_class t, pg_namespace nt + + WHERE nv.oid = v.relnamespace + AND v.relkind = 'v' + AND v.oid = dv.refobjid + AND dv.refclassid = 'pg_catalog.pg_class'::regclass + AND dv.classid = 'pg_catalog.pg_rewrite'::regclass + AND dv.deptype = 'i' + AND dv.objid = dt.objid + AND dv.refobjid <> dt.refobjid + AND dt.classid = 'pg_catalog.pg_rewrite'::regclass + AND dt.refclassid = 'pg_catalog.pg_class'::regclass + AND dt.refobjid = t.oid + AND t.relnamespace = nt.oid + AND t.relkind IN ('r', 'm', 'v', 'f') + AND (t.relname not like 'mlog\_%' AND t.relname not like 'matviewmap\_%') + AND pg_has_role(t.relowner, 'USAGE'); + +CREATE VIEW information_schema.data_type_privileges AS + SELECT CAST(current_database() AS sql_identifier) AS object_catalog, + CAST(x.objschema AS sql_identifier) AS object_schema, + CAST(x.objname AS sql_identifier) AS object_name, + CAST(x.objtype AS character_data) AS object_type, + CAST(x.objdtdid AS sql_identifier) AS dtd_identifier + + FROM + ( + SELECT udt_schema, udt_name, 'USER-DEFINED TYPE'::text, dtd_identifier FROM attributes + UNION ALL + SELECT table_schema, table_name, 'TABLE'::text, dtd_identifier FROM columns + UNION ALL + SELECT domain_schema, domain_name, 'DOMAIN'::text, dtd_identifier FROM domains + UNION ALL + SELECT specific_schema, specific_name, 'ROUTINE'::text, dtd_identifier FROM parameters + UNION ALL + SELECT specific_schema, specific_name, 'ROUTINE'::text, dtd_identifier FROM routines + ) AS x (objschema, objname, objtype, objdtdid); + +CREATE VIEW information_schema.role_column_grants AS + SELECT grantor, + grantee, + table_catalog, + table_schema, + table_name, + column_name, + privilege_type, + is_grantable + FROM column_privileges + WHERE grantor IN (SELECT role_name FROM enabled_roles) + OR grantee IN (SELECT role_name FROM enabled_roles); + +CREATE VIEW information_schema.role_table_grants AS + SELECT grantor, + grantee, + table_catalog, + table_schema, + table_name, + privilege_type, + is_grantable, + with_hierarchy + FROM table_privileges + WHERE grantor IN (SELECT role_name FROM enabled_roles) + OR grantee IN (SELECT role_name FROM enabled_roles); + +CREATE VIEW information_schema.element_types AS + SELECT CAST(current_database() AS sql_identifier) AS object_catalog, + CAST(n.nspname AS sql_identifier) AS object_schema, + CAST(x.objname AS sql_identifier) AS object_name, + CAST(x.objtype AS character_data) AS object_type, + CAST(x.objdtdid AS sql_identifier) AS collection_type_identifier, + CAST( + CASE WHEN nbt.nspname = 'pg_catalog' THEN format_type(bt.oid, null) + ELSE 'USER-DEFINED' END AS character_data) AS data_type, + + CAST(null AS cardinal_number) AS character_maximum_length, + CAST(null AS cardinal_number) AS character_octet_length, + CAST(null AS sql_identifier) AS character_set_catalog, + CAST(null AS sql_identifier) AS character_set_schema, + CAST(null AS sql_identifier) AS character_set_name, + CAST(CASE WHEN nco.nspname IS NOT NULL THEN current_database() END AS sql_identifier) AS collation_catalog, + CAST(nco.nspname AS sql_identifier) AS collation_schema, + CAST(co.collname AS sql_identifier) AS collation_name, + CAST(null AS cardinal_number) AS numeric_precision, + CAST(null AS cardinal_number) AS numeric_precision_radix, + CAST(null AS cardinal_number) AS numeric_scale, + CAST(null AS cardinal_number) AS datetime_precision, + CAST(null AS character_data) AS interval_type, + CAST(null AS cardinal_number) AS interval_precision, + + CAST(null AS character_data) AS domain_default, -- XXX maybe a bug in the standard + + CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(nbt.nspname AS sql_identifier) AS udt_schema, + CAST(bt.typname AS sql_identifier) AS udt_name, + + CAST(null AS sql_identifier) AS scope_catalog, + CAST(null AS sql_identifier) AS scope_schema, + CAST(null AS sql_identifier) AS scope_name, + + CAST(null AS cardinal_number) AS maximum_cardinality, + CAST('a' || CAST(x.objdtdid AS text) AS sql_identifier) AS dtd_identifier + + FROM pg_namespace n, pg_type at, pg_namespace nbt, pg_type bt, + ( + /* columns, attributes */ + SELECT c.relnamespace, CAST(c.relname AS sql_identifier), + CASE WHEN c.relkind = 'c' THEN 'USER-DEFINED TYPE'::text ELSE 'TABLE'::text END, + a.attnum, a.atttypid, a.attcollation + FROM pg_class c, pg_attribute a + WHERE c.oid = a.attrelid + AND c.relkind IN ('r', 'm', 'v', 'f', 'c') + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') + AND attnum > 0 AND NOT attisdropped + + UNION ALL + + /* domains */ + SELECT t.typnamespace, CAST(t.typname AS sql_identifier), + 'DOMAIN'::text, 1, t.typbasetype, t.typcollation + FROM pg_type t + WHERE t.typtype = 'd' + + UNION ALL + + /* parameters */ + SELECT pronamespace, CAST(proname || '_' || CAST(oid AS text) AS sql_identifier), + 'ROUTINE'::text, (ss.x).n, (ss.x).x, 0 + FROM (SELECT p.pronamespace, p.proname, p.oid, + _pg_expandarray(coalesce(p.proallargtypes, p.proargtypes::oid[])) AS x + FROM pg_proc p) AS ss + + UNION ALL + + /* result types */ + SELECT p.pronamespace, CAST(p.proname || '_' || CAST(p.oid AS text) AS sql_identifier), + 'ROUTINE'::text, 0, p.prorettype, 0 + FROM pg_proc p + + ) AS x (objschema, objname, objtype, objdtdid, objtypeid, objcollation) + LEFT JOIN (pg_collation co JOIN pg_namespace nco ON (co.collnamespace = nco.oid)) + ON x.objcollation = co.oid AND (nco.nspname, co.collname) <> ('pg_catalog', 'default') + + WHERE n.oid = x.objschema + AND at.oid = x.objtypeid + AND (at.typelem <> 0 AND at.typlen = -1) + AND at.typelem = bt.oid + AND nbt.oid = bt.typnamespace + + AND (n.nspname, x.objname, x.objtype, CAST(x.objdtdid AS sql_identifier)) IN + ( SELECT object_schema, object_name, object_type, dtd_identifier + FROM data_type_privileges ); + +do $$DECLARE + user_name text; + query_str text; +BEGIN + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.element_types TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.data_type_privileges TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.role_column_grants TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.role_table_grants TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.column_domain_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.column_privileges TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.column_udt_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.columns TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.table_privileges TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.tables TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.view_column_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.view_table_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; +END$$; + +GRANT SELECT ON information_schema.element_types TO PUBLIC; +GRANT SELECT ON information_schema.data_type_privileges TO PUBLIC; +GRANT SELECT ON information_schema.role_column_grants TO PUBLIC; +GRANT SELECT ON information_schema.role_table_grants TO PUBLIC; +GRANT SELECT ON information_schema.column_domain_usage TO PUBLIC; +GRANT SELECT ON information_schema.column_privileges TO PUBLIC; +GRANT SELECT ON information_schema.column_udt_usage TO PUBLIC; +GRANT SELECT ON information_schema.columns TO PUBLIC; +GRANT SELECT ON information_schema.table_privileges TO PUBLIC; +GRANT SELECT ON information_schema.tables TO PUBLIC; +GRANT SELECT ON information_schema.view_column_usage TO PUBLIC; +GRANT SELECT ON information_schema.view_table_usage TO PUBLIC; + +RESET search_path; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int, int, text) CASCADE; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_602.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_602.sql new file mode 100644 index 000000000..420555f6b --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_maindb/upgrade_catalog_maindb_92_602.sql @@ -0,0 +1,158 @@ +--create gs_uid +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 8666, 8667, 0, 0; +CREATE TABLE IF NOT EXISTS pg_catalog.gs_uid +( + relid OID NOCOMPRESS NOT NULL, + uid_backup bigint NOCOMPRESS NOT NULL +); +GRANT SELECT ON pg_catalog.gs_uid TO public; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 3499; +CREATE UNIQUE INDEX gs_uid_relid_index ON pg_catalog.gs_uid USING BTREE(relid OID_OPS); + + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +/*------ add sys fuction gs_stat_wal_entrytable ------*/ +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2861; +CREATE FUNCTION pg_catalog.gs_stat_wal_entrytable(int8, OUT idx xid, OUT endlsn xid, OUT lrc int, OUT status xid32) +RETURNS SETOF record LANGUAGE INTERNAL as 'gs_stat_wal_entrytable'; + +/*------ add sys fuction gs_walwriter_flush_position ------*/ +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2862; +CREATE FUNCTION pg_catalog.gs_walwriter_flush_position(out last_flush_status_entry int, out last_scanned_lrc int, out curr_lrc int, out curr_byte_pos xid, out prev_byte_size xid32, out flush_result xid, out send_result xid, out shm_rqst_write_pos xid, out shm_rqst_flush_pos xid, out shm_result_write_pos xid, out shm_result_flush_pos xid, out curr_time timestamptz) +RETURNS SETOF record LANGUAGE INTERNAL as 'gs_walwriter_flush_position'; + +/*------ add sys fuction gs_walwriter_flush_stat ------*/ +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2863; +CREATE FUNCTION pg_catalog.gs_walwriter_flush_stat(int4, out write_times xid, out sync_times xid, out total_xlog_sync_bytes xid, out total_actual_xlog_sync_bytes xid, out avg_write_bytes xid32, out avg_actual_write_bytes xid32, out avg_sync_bytes xid32, out avg_actual_sync_bytes xid32, out total_write_time xid, out total_sync_time xid, out avg_write_time xid32, out avg_sync_time xid32, out curr_init_xlog_segno xid, out curr_open_xlog_segno xid, out last_reset_time timestamptz, out curr_time timestamptz) +RETURNS SETOF record LANGUAGE INTERNAL as 'gs_walwriter_flush_stat'; + +/*------ add sys fuction gs_stat_undo ------*/ +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4434; +CREATE FUNCTION pg_catalog.gs_stat_undo(OUT curr_used_zone_count int4, OUT top_used_zones text, OUT curr_used_undo_size int4, +OUT undo_threshold int4, OUT oldest_xid_in_undo oid, OUT oldest_xmin oid, OUT total_undo_chain_len oid, OUT max_undo_chain_len oid, +OUT create_undo_file_count int4, OUT discard_undo_file_count int4) +RETURNS record LANGUAGE INTERNAL as 'gs_stat_undo';--create system relation gs_db_privilege and its indexes +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 5566, 5567, 0, 0; +CREATE TABLE pg_catalog.gs_db_privilege +( + roleid Oid NOCOMPRESS not null, + privilege_type text NOCOMPRESS, + admin_option boolean NOCOMPRESS not null +) WITH OIDS TABLESPACE pg_default; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 5568; +CREATE UNIQUE INDEX gs_db_privilege_oid_index ON pg_catalog.gs_db_privilege USING BTREE(oid oid_ops); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 5569; +CREATE INDEX gs_db_privilege_roleid_index ON pg_catalog.gs_db_privilege USING BTREE(roleid oid_ops); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 5570; +CREATE UNIQUE INDEX gs_db_privilege_roleid_privilege_type_index ON pg_catalog.gs_db_privilege + USING BTREE(roleid oid_ops, privilege_type text_ops); + +GRANT SELECT ON pg_catalog.gs_db_privilege TO PUBLIC; + +--create system function has_any_privilege(user, privilege) +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5571; +CREATE OR REPLACE FUNCTION pg_catalog.has_any_privilege(name, text) RETURNS boolean + LANGUAGE INTERNAL STRICT STABLE + AS 'has_any_privilege'; + +--create system view gs_db_privileges +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +CREATE VIEW pg_catalog.gs_db_privileges AS + SELECT + pg_catalog.pg_get_userbyid(roleid) AS rolename, + privilege_type AS privilege_type, + CASE + WHEN admin_option THEN + 'yes' + ELSE + 'no' + END AS admin_option + FROM pg_catalog.gs_db_privilege; + +GRANT SELECT ON pg_catalog.gs_db_privileges TO PUBLIC; +/*------ add sys fuction gs_undo_record ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_record(int8, OUT undoptr oid, OUT xid oid, OUT cid text, +OUT reloid text, OUT relfilenode text, OUT utype text, OUT blkprev text, OUT blockno text, OUT uoffset text, +OUT prevurp text, OUT payloadlen text); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4439; +CREATE FUNCTION pg_catalog.gs_undo_record(int8, OUT undoptr oid, OUT xid oid, OUT cid text, +OUT reloid text, OUT relfilenode text, OUT utype text, OUT blkprev text, OUT blockno text, OUT uoffset text, +OUT prevurp text, OUT payloadlen text) +RETURNS record LANGUAGE INTERNAL as 'gs_undo_record'; + +/*------ add sys fuction gs_undo_meta ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta(int4, int4, int4, OUT zoneId oid, OUT persistType oid, OUT insertptr text, OUT discard text, OUT endptr text, OUT used text, OUT lsn text); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4430; +CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_meta(int4, int4, int4, OUT zoneId oid, OUT persistType oid, OUT insertptr text, OUT discard text, OUT endptr text, OUT used text, OUT lsn text, OUT pid oid) +RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_meta'; + +/* add sys fuction gs_undo_translot */ +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot(int4, int4, OUT grpId oid, OUT xactId text, OUT startUndoPtr text, OUT endUndoPtr text, OUT lsn text); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4431; +CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot(int4, int4, OUT grpId oid, OUT xactId text, OUT startUndoPtr text, OUT endUndoPtr text, OUT lsn text, OUT slot_states oid) +RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot'; + +/*------ add sys fuction gs_index_verify ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_index_verify(oid, oid, OUT ptype text, OUT blkno oid, OUT status text); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9150; +CREATE FUNCTION pg_catalog.gs_index_verify(oid, oid, OUT ptype text, OUT blkno oid, OUT status text) +RETURNS SETOF record LANGUAGE INTERNAL as 'gs_index_verify'; + +/*------ add sys fuction gs_index_recycle_queue ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_index_recycle_queue(oid, oid, oid, OUT rblkno oid, OUT item_offset oid, OUT xid text, OUT dblkno oid, OUT prev oid, OUT next oid); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9151; +CREATE FUNCTION pg_catalog.gs_index_recycle_queue(oid, oid, oid, OUT rblkno oid, OUT item_offset oid, OUT xid text, OUT dblkno oid, OUT prev oid, OUT next oid) +RETURNS SETOF record LANGUAGE INTERNAL as 'gs_index_recycle_queue';DROP FUNCTION IF EXISTS pg_catalog.pg_logical_get_area_changes() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4978; +CREATE OR REPLACE FUNCTION pg_catalog.pg_logical_get_area_changes(start_lsn text, upto_lsn text, upto_nchanges integer, plugin name DEFAULT '{}'::text[], xlog_path text, VARIADIC options text[], OUT location text, OUT xid xid, OUT data text) + RETURNS SETOF record + LANGUAGE internal + NOT FENCED NOT SHIPPABLE COST 1000 +AS $function$pg_logical_get_area_changes$function$; +-- adding system table pg_publication + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 6130, 6141, 0, 0; + +CREATE TABLE IF NOT EXISTS pg_catalog.pg_publication +( + pubname name NOCOMPRESS, + pubowner oid NOCOMPRESS, + puballtables bool NOCOMPRESS, + pubinsert bool NOCOMPRESS, + pubupdate bool NOCOMPRESS, + pubdelete bool NOCOMPRESS +) WITH OIDS; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 6120; +CREATE UNIQUE INDEX pg_publication_oid_index ON pg_catalog.pg_publication USING BTREE(oid OID_OPS); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 6121; +CREATE UNIQUE INDEX pg_publication_pubname_index ON pg_catalog.pg_publication USING BTREE(pubname); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +GRANT SELECT ON TABLE pg_catalog.pg_publication TO PUBLIC; + +-- adding system table pg_publication_rel + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 6132, 6142, 0, 0; + +CREATE TABLE IF NOT EXISTS pg_catalog.pg_publication_rel +( + prpubid oid NOCOMPRESS NOT NULL, + prrelid oid NOCOMPRESS NOT NULL +) WITH OIDS; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 6122; +CREATE UNIQUE INDEX pg_publication_rel_oid_index ON pg_catalog.pg_publication_rel USING BTREE(oid OID_OPS); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 6123; +CREATE UNIQUE INDEX pg_publication_rel_map_index ON pg_catalog.pg_publication_rel USING BTREE(prrelid, prpubid); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +GRANT SELECT ON TABLE pg_catalog.pg_publication_rel TO PUBLIC; \ No newline at end of file diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_602.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_602.sql new file mode 100644 index 000000000..a06e71640 --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade-post_catalog_otherdb_92_602.sql @@ -0,0 +1,3664 @@ +DROP FUNCTION IF EXISTS pg_catalog.large_seq_rollback_ntree(pg_node_tree); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 6016; +CREATE OR REPLACE FUNCTION pg_catalog.large_seq_rollback_ntree(pg_node_tree) + RETURNS pg_node_tree + LANGUAGE internal + IMMUTABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$large_sequence_rollback_node_tree$function$; + +DROP FUNCTION IF EXISTS pg_catalog.large_seq_upgrade_ntree(pg_node_tree); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 6017; +CREATE OR REPLACE FUNCTION pg_catalog.large_seq_upgrade_ntree(pg_node_tree) + RETURNS pg_node_tree + LANGUAGE internal + IMMUTABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$large_sequence_upgrade_node_tree$function$; + +DO +$do$ +DECLARE +query_str text; +type array_t is varray(10) of varchar2(50); +rel_array array_t := array[ + 'pg_catalog.pg_proc', + 'pg_catalog.pg_type', + 'pg_catalog.pg_attrdef', + 'pg_catalog.pg_constraint', + 'pg_catalog.pg_rewrite', + 'pg_catalog.pg_rewrite', + 'pg_catalog.pg_trigger', + 'pg_catalog.pg_rlspolicy' +]; +att_array array_t := array[ + 'proargdefaults', + 'typdefaultbin', + 'adbin', + 'conbin', + 'ev_qual', + 'ev_action', + 'tgqual', + 'polqual' +]; +BEGIN + raise info 'Processing sequence APIs'; + FOR i IN 1..rel_array.count LOOP + raise info '%.%',rel_array[i],att_array[i]; + query_str := 'UPDATE ' || rel_array[i] || ' SET ' || att_array[i] || ' = large_seq_upgrade_ntree(' || att_array[i] || ' ) WHERE ' || att_array[i] || ' LIKE ''%:funcid 1574 :%'' OR ' || att_array[i] || ' LIKE ''%:funcid 1575 :%'' OR ' || att_array[i] || ' LIKE ''%:funcid 2559 :%'';'; + EXECUTE query_str; + END LOOP; +END +$do$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DROP INDEX IF EXISTS pg_catalog.pg_proc_proname_all_args_nsp_index; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 9666; +CREATE INDEX pg_catalog.pg_proc_proname_all_args_nsp_index on pg_catalog.pg_proc USING BTREE(proname name_ops, allargtypes oidvector_ops, pronamespace oid_ops, propackageid oid_ops); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +REINDEX INDEX pg_catalog.pg_proc_proname_all_args_nsp_index; +DROP FUNCTION IF EXISTS pg_catalog.gs_get_shared_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5255; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_shared_memctx_detail( +text, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_shared_memctx_detail'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_thread_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5256; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_thread_memctx_detail( +int8, +text, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_thread_memctx_detail'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_session_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5254; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_session_memctx_detail( +text, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_session_memctx_detail';DROP FUNCTION IF EXISTS pg_catalog.gs_get_parallel_decode_status() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9377; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_parallel_decode_status(OUT slot_name text, OUT parallel_decode_num int4, OUT read_change_queue_length text, OUT decode_change_queue_length text) + RETURNS SETOF RECORD + LANGUAGE internal +AS $function$gs_get_parallel_decode_status$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +DROP FUNCTION IF EXISTS pg_catalog.gs_index_advise(cstring); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC,4888; +CREATE OR REPLACE FUNCTION pg_catalog.gs_index_advise(sql_string cstring, OUT schema text, OUT "table" text, OUT "column" text, OUT indextype text) + RETURNS SETOF record + LANGUAGE internal + STABLE NOT FENCED NOT SHIPPABLE ROWS 100 +AS $function$gs_index_advise$function$; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +-- gs_parse_page_bypath +DROP FUNCTION IF EXISTS pg_catalog.gs_parse_page_bypath(text, bigint, text, boolean) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2620; +CREATE OR REPLACE FUNCTION pg_catalog.gs_parse_page_bypath(path text, blocknum bigint, relation_type text, read_memory boolean, OUT output_filepath text) + RETURNS text + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_parse_page_bypath$function$; +comment on function PG_CATALOG.gs_parse_page_bypath(path text, blocknum bigint, relation_type text, read_memory boolean) is 'parse data page to output file based on given filepath'; + +-- gs_xlogdump_lsn +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_lsn(text, text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2619; +CREATE OR REPLACE FUNCTION pg_catalog.gs_xlogdump_lsn(start_lsn text, end_lsn text, OUT output_filepath text) + RETURNS text + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_xlogdump_lsn$function$; +comment on function PG_CATALOG.gs_xlogdump_lsn(start_lsn text, end_lsn text) is 'dump xlog records to output file based on the given start_lsn and end_lsn'; + +-- gs_xlogdump_xid +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_xid(xid) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2617; +CREATE OR REPLACE FUNCTION pg_catalog.gs_xlogdump_xid(c_xid xid, OUT output_filepath text) + RETURNS text + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_xlogdump_xid$function$; +comment on function PG_CATALOG.gs_xlogdump_xid(c_xid xid) is 'dump xlog records to output file based on the given xid'; + +-- gs_xlogdump_tablepath +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_tablepath(text, bigint, text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2616; +CREATE OR REPLACE FUNCTION pg_catalog.gs_xlogdump_tablepath(path text, blocknum bigint, relation_type text, OUT output_filepath text) + RETURNS text + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_xlogdump_tablepath$function$; +comment on function PG_CATALOG.gs_xlogdump_tablepath(path text, blocknum bigint, relation_type text) is 'dump xlog records to output file based on given filepath'; + +-- gs_xlogdump_parsepage_tablepath +DROP FUNCTION IF EXISTS pg_catalog.gs_xlogdump_parsepage_tablepath(text, bigint, text, boolean) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 2618; +CREATE OR REPLACE FUNCTION pg_catalog.gs_xlogdump_parsepage_tablepath(path text, blocknum bigint, relation_type text, read_memory boolean, OUT output_filepath text) + RETURNS text + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_xlogdump_parsepage_tablepath$function$; +comment on function PG_CATALOG.gs_xlogdump_parsepage_tablepath(path text, blocknum bigint, relation_type text, read_memory boolean) is 'parse data page to output file based on given filepath'; +DROP FUNCTION IF EXISTS pg_catalog.local_xlog_redo_statics(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4390; +CREATE FUNCTION pg_catalog.local_xlog_redo_statics +( +OUT xlog_type pg_catalog.text, +OUT rmid pg_catalog.int4, +OUT info pg_catalog.int4, +OUT num pg_catalog.int8, +OUT extra pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'local_xlog_redo_statics'; + +DROP FUNCTION IF EXISTS pg_catalog.local_redo_time_count(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4391; +CREATE FUNCTION pg_catalog.local_redo_time_count +( +OUT thread_name pg_catalog.text, +OUT step1_total pg_catalog.int8, +OUT step1_count pg_catalog.int8, +OUT step2_total pg_catalog.int8, +OUT step2_count pg_catalog.int8, +OUT step3_total pg_catalog.int8, +OUT step3_count pg_catalog.int8, +OUT step4_total pg_catalog.int8, +OUT step4_count pg_catalog.int8, +OUT step5_total pg_catalog.int8, +OUT step5_count pg_catalog.int8, +OUT step6_total pg_catalog.int8, +OUT step6_count pg_catalog.int8, +OUT step7_total pg_catalog.int8, +OUT step7_count pg_catalog.int8, +OUT step8_total pg_catalog.int8, +OUT step8_count pg_catalog.int8, +OUT step9_total pg_catalog.int8, +OUT step9_count pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'local_redo_time_count'; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;DROP VIEW IF EXISTS pg_catalog.gs_wlm_ec_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.gs_wlm_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.pg_get_invalid_backends CASCADE; + +DROP VIEW IF EXISTS pg_catalog.pg_stat_activity cascade; +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity_with_conninfo(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT connection_info text, OUT srespool name, OUT global_sessionid text) cascade; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4212; +CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity_with_conninfo +( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT connection_info text, + OUT srespool name, + OUT global_sessionid text, + OUT unique_sql_id bigint +) +RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity_with_conninfo'; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + S.connection_info, + S.unique_sql_id + FROM pg_database D, pg_stat_get_activity_with_conninfo(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_ec_operator_statistics AS +SELECT + t.queryid, + t.plan_node_id, + t.start_time, + t.ec_status, + t.ec_execute_datanode, + t.ec_dsn, + t.ec_username, + t.ec_query, + t.ec_libodbc_type, + t.ec_fetch_count +FROM pg_catalog.pg_stat_activity AS s, pg_catalog.pg_stat_get_wlm_realtime_ec_operator_info(NULL) as t +where s.query_id = t.queryid and t.ec_operator > 0; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_operator_statistics AS +SELECT t.* +FROM pg_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t +where s.query_id = t.queryid; + +CREATE OR REPLACE VIEW pg_catalog.pg_get_invalid_backends AS + SELECT + C.pid, + C.node_name, + S.datname AS dbname, + S.backend_start, + S.query + FROM pg_pool_validate(false, ' ') AS C LEFT JOIN pg_stat_activity AS S + ON (C.pid = S.sessionid); + +DO $DO$ +DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP VIEW IF EXISTS DBE_PERF.global_session_stat_activity cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_session_stat_activity() cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_operator_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_operator_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.operator_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.session_stat_activity cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_replication_stat cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_replication_stat() cascade; + DROP VIEW IF EXISTS DBE_PERF.replication_stat cascade; + + + DROP VIEW IF EXISTS DBE_PERF.session_cpu_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.session_memory_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.global_statement_complex_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_statement_complex_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.statement_complex_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.statement_iostat_complex_runtime cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_memory_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.pg_session_iostat cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_cpu_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.gs_wlm_session_statistics cascade; + + DROP VIEW IF EXISTS pg_catalog.pg_stat_activity_ng cascade; + DROP VIEW IF EXISTS pg_catalog.pg_stat_replication cascade; + + DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT srespool name, OUT global_sessionid text) cascade; + + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2022; + CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity + ( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT srespool name, + OUT global_sessionid text, + OUT unique_sql_id bigint + ) + RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity'; + + CREATE OR REPLACE VIEW dbe_perf.session_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + S.unique_sql_id + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_session_stat_activity + (out coorname text, out datid oid, out datname text, out pid bigint, + out usesysid oid, out usename text, out application_name text, out client_addr inet, + out client_hostname text, out client_port integer, out backend_start timestamptz, + out xact_start timestamptz, out query_start timestamptz, out state_change timestamptz, + out waiting boolean, out enqueue text, out state text, out resource_pool name, + out query_id bigint, out query text, out unique_sql_id bigint) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.session_stat_activity%rowtype; + coor_name record; + fet_active text; + fetch_coor text; + BEGIN + --Get all cn node names + fetch_coor := 'select * from dbe_perf.node_name'; + FOR coor_name IN EXECUTE(fetch_coor) LOOP + coorname := coor_name.node_name; + fet_active := 'SELECT * FROM dbe_perf.session_stat_activity'; + FOR row_data IN EXECUTE(fet_active) LOOP + coorname := coorname; + datid :=row_data.datid; + datname := row_data.datname; + pid := row_data.pid; + usesysid :=row_data.usesysid; + usename := row_data.usename; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_hostname :=row_data.client_hostname; + client_port :=row_data.client_port; + backend_start := row_data.backend_start; + xact_start := row_data.xact_start; + query_start := row_data.query_start; + state_change := row_data.state_change; + waiting := row_data.waiting; + enqueue := row_data.enqueue; + state := row_data.state; + resource_pool :=row_data.resource_pool; + query_id :=row_data.query_id; + query := row_data.query; + unique_sql_id := unique_sql_id; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + + CREATE OR REPLACE VIEW DBE_PERF.global_session_stat_activity AS + SELECT * FROM DBE_PERF.get_global_session_stat_activity(); + + CREATE OR REPLACE VIEW dbe_perf.operator_runtime AS + SELECT t.* + FROM dbe_perf.session_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t + WHERE s.query_id = t.queryid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_operator_runtime() + RETURNS setof dbe_perf.operator_runtime + AS $$ + DECLARE + row_data dbe_perf.operator_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.operator_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_operator_runtime AS + SELECT * FROM dbe_perf.get_global_operator_runtime(); + + + CREATE OR REPLACE VIEW dbe_perf.replication_stat AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_replication_stat + (OUT node_name name, + OUT pid bigint, + OUT usesysid oid, + OUT usename name, + OUT application_name text, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT backend_start timestamp with time zone, + OUT state text, + OUT sender_sent_location text, + OUT receiver_write_location text, + OUT receiver_flush_location text, + OUT receiver_replay_location text, + OUT sync_priority integer, + OUT sync_state text) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.replication_stat%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + --Get all the node names + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.replication_stat'; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := row_name.node_name; + pid := row_data.pid; + usesysid := row_data.usesysid; + usename := row_data.usename; + client_addr := row_data.client_addr; + client_hostname := row_data.client_hostname; + client_port := row_data.client_port; + state := row_data.state; + sender_sent_location := row_data.sender_sent_location; + receiver_write_location := row_data.receiver_write_location; + receiver_flush_location := row_data.receiver_flush_location; + receiver_replay_location := row_data.receiver_replay_location; + sync_priority := row_data.sync_priority; + sync_state := row_data.sync_state; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_replication_stat AS + SELECT * FROM dbe_perf.get_global_replication_stat(); + + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity_ng AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + N.node_group + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_stat_get_activity_ng(NULL) AS N, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid AND + S.sessionid = N.sessionid; + + + CREATE OR REPLACE VIEW dbe_perf.session_cpu_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.session_memory_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.statement_complex_runtime AS + SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_statement_complex_runtime() + RETURNS setof dbe_perf.statement_complex_runtime + AS $$ + DECLARE + row_data dbe_perf.statement_complex_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.statement_complex_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_statement_complex_runtime AS + SELECT * FROM dbe_perf.get_global_statement_complex_runtime(); + + CREATE OR REPLACE VIEW dbe_perf.statement_iostat_complex_runtime AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 20 THEN 'Low'::text + WHEN T.io_priority = 50 THEN 'Medium'::text + WHEN T.io_priority = 80 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T + WHERE S.pid = T.threadid; + + end if; +END$DO$; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_memory_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_session_iostat AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 10 THEN 'Low'::text + WHEN T.io_priority = 20 THEN 'Medium'::text + WHEN T.io_priority = 50 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_cpu_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_session_statistics AS +SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + S.sessionid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_replication AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DO $DO$ +DECLARE + ans boolean; + user_name text; + query_str text; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.replication_stat TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_replication_stat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity_ng TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_cpu_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_cpu_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_memory_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_memory_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_iostat_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_iostat_complex_runtime TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_memory_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_session_iostat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_cpu_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_session_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_replication TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_ec_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_get_invalid_backends TO PUBLIC; + end if; +END$DO$; +-- regexp_count +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 385; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_count(text, text) + RETURNS integer + LANGUAGE internal + IMMUTABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$regexp_count_noopt$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 386; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_count(text, text, int) + RETURNS integer + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_count_position$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 387; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_count(text, text, int, text) + RETURNS integer + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_count_matchopt$function$; + +-- regexp_instr +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 630; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_instr(text, text) + RETURNS integer + LANGUAGE internal + IMMUTABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$regexp_instr_noopt$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 631; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_instr(text, text, int) + RETURNS integer + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_instr_position$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 632; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_instr(text, text, int, int) + RETURNS integer + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_instr_occurren$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 633; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_instr(text, text, int, int, int) + RETURNS integer + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_instr_returnopt$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 634; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_instr(text, text, int, int, int, text) + RETURNS integer + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_instr_matchopt$function$; + +-- regexp_replace +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1116; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_replace(text, text) + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_replace_noopt$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1117; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_replace(text, text, text, int) + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_replace_position$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1118; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_replace(text, text, text, int, int) + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_replace_occur$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1119; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_replace(text, text, text, int, int, text) + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_replace_matchopt$function$; + +-- regexp_substr +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1566; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_substr(text, text, int) + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_substr_with_position$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1567; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_substr(text, text, int, int) + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_substr_with_occur$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 1568; +CREATE OR REPLACE FUNCTION pg_catalog.regexp_substr(text, text, int, int, text) + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$regexp_substr_with_opt$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0;DROP FUNCTION IF EXISTS pg_catalog.local_double_write_stat(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4384; +CREATE FUNCTION pg_catalog.local_double_write_stat +( + OUT node_name pg_catalog.text, + OUT file_id pg_catalog.int8, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'local_double_write_stat'; + +DROP FUNCTION IF EXISTS pg_catalog.remote_double_write_stat(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4385; +CREATE FUNCTION pg_catalog.remote_double_write_stat +( + OUT node_name pg_catalog.text, + OUT file_id pg_catalog.int8, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'remote_double_write_stat'; + +DROP VIEW IF EXISTS DBE_PERF.global_double_write_status CASCADE; +CREATE OR REPLACE VIEW dbe_perf.global_double_write_status AS + SELECT node_name, file_id, curr_dwn, curr_start_page, file_trunc_num, file_reset_num, + total_writes, low_threshold_writes, high_threshold_writes, + total_pages, low_threshold_pages, high_threshold_pages + FROM pg_catalog.local_double_write_stat(); + +REVOKE ALL on DBE_PERF.global_double_write_status FROM PUBLIC; + +DECLARE + user_name text; + query_str text; +BEGIN + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLE DBE_PERF.global_double_write_status TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; +END; +/ + +GRANT SELECT ON TABLE DBE_PERF.global_double_write_status TO PUBLIC; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DROP FUNCTION IF EXISTS pg_catalog.gs_get_active_archiving_standby(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4579; +CREATE FUNCTION pg_catalog.gs_get_active_archiving_standby +( +OUT standby_name pg_catalog.text, +OUT archive_location pg_catalog.text, +OUT archived_file_num pg_catalog.int4 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_get_active_archiving_standby'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_get_warning_for_xlog_force_recycle(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4582; +CREATE FUNCTION pg_catalog.gs_pitr_get_warning_for_xlog_force_recycle +( +OUT xlog_force_recycled pg_catalog.bool +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_pitr_get_warning_for_xlog_force_recycle'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_clean_history_global_barriers(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4581; +CREATE FUNCTION pg_catalog.gs_pitr_clean_history_global_barriers +( +IN stop_barrier_timestamp pg_catalog.timestamptz, +OUT oldest_barrier_record pg_catalog.text +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_pitr_clean_history_global_barriers'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_archive_slot_force_advance(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4580; +CREATE FUNCTION pg_catalog.gs_pitr_archive_slot_force_advance +( +IN stop_barrier_timestamp pg_catalog.timestamptz, +OUT archive_restart_lsn pg_catalog.text +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_pitr_archive_slot_force_advance'; +DROP FUNCTION IF EXISTS pg_catalog.gs_get_standby_cluster_barrier_status() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9039; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_standby_cluster_barrier_status +( OUT barrier_id text, + OUT barrier_lsn text, + OUT recovery_id text, + OUT target_id text) +RETURNS SETOF record LANGUAGE INTERNAL ROWS 1 STRICT as 'gs_get_standby_cluster_barrier_status'; +DROP FUNCTION IF EXISTS pg_catalog.gs_set_standby_cluster_target_barrier_id() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9037; +CREATE OR REPLACE FUNCTION pg_catalog.gs_set_standby_cluster_target_barrier_id +( IN barrier_id text, + OUT target_id text) +RETURNS SETOF record LANGUAGE INTERNAL ROWS 1 STRICT as 'gs_set_standby_cluster_target_barrier_id'; +DROP FUNCTION IF EXISTS pg_catalog.gs_query_standby_cluster_barrier_id_exist() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9038; +CREATE OR REPLACE FUNCTION pg_catalog.gs_query_standby_cluster_barrier_id_exist +( IN barrier_id text, + OUT target_id bool) +RETURNS SETOF record LANGUAGE INTERNAL ROWS 1 STRICT as 'gs_query_standby_cluster_barrier_id_exist'; +do $$DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP VIEW IF EXISTS DBE_PERF.global_streaming_hadr_rto_and_rpo_stat CASCADE; + end if; + DROP FUNCTION IF EXISTS pg_catalog.gs_hadr_local_rto_and_rpo_stat(); + DROP FUNCTION IF EXISTS pg_catalog.gs_hadr_remote_rto_and_rpo_stat(); +END$$; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 5077; +CREATE OR REPLACE FUNCTION pg_catalog.gs_hadr_local_rto_and_rpo_stat +( +OUT hadr_sender_node_name pg_catalog.text, +OUT hadr_receiver_node_name pg_catalog.text, +OUT source_ip pg_catalog.text, +OUT source_port pg_catalog.int4, +OUT dest_ip pg_catalog.text, +OUT dest_port pg_catalog.int4, +OUT current_rto pg_catalog.int8, +OUT target_rto pg_catalog.int8, +OUT current_rpo pg_catalog.int8, +OUT target_rpo pg_catalog.int8, +OUT rto_sleep_time pg_catalog.int8, +OUT rpo_sleep_time pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_hadr_local_rto_and_rpo_stat'; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 5078; +CREATE OR REPLACE FUNCTION pg_catalog.gs_hadr_remote_rto_and_rpo_stat +( +OUT hadr_sender_node_name pg_catalog.text, +OUT hadr_receiver_node_name pg_catalog.text, +OUT source_ip pg_catalog.text, +OUT source_port pg_catalog.int4, +OUT dest_ip pg_catalog.text, +OUT dest_port pg_catalog.int4, +OUT current_rto pg_catalog.int8, +OUT target_rto pg_catalog.int8, +OUT current_rpo pg_catalog.int8, +OUT target_rpo pg_catalog.int8, +OUT rto_sleep_time pg_catalog.int8, +OUT rpo_sleep_time pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_hadr_remote_rto_and_rpo_stat'; +CREATE OR REPLACE VIEW DBE_PERF.global_streaming_hadr_rto_and_rpo_stat AS + SELECT hadr_sender_node_name, hadr_receiver_node_name, current_rto, target_rto, current_rpo, target_rpo, rto_sleep_time, rpo_sleep_time +FROM pg_catalog.gs_hadr_local_rto_and_rpo_stat(); +REVOKE ALL on DBE_PERF.global_streaming_hadr_rto_and_rpo_stat FROM PUBLIC; +DECLARE + user_name text; + query_str text; +BEGIN + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT ALL ON TABLE DBE_PERF.global_streaming_hadr_rto_and_rpo_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; +END; +/ +GRANT SELECT ON TABLE DBE_PERF.global_streaming_hadr_rto_and_rpo_stat TO PUBLIC; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +CREATE OR REPLACE VIEW pg_catalog.gs_gsc_memory_detail AS + SELECT db_id, sum(totalsize) AS totalsize, sum(freesize) AS freesize, sum(usedsize) AS usedsize + FROM ( + SELECT + CASE WHEN contextname like '%GlobalSysDBCacheEntryMemCxt%' THEN substring(contextname, 29) + ELSE substring(parent, 29) END AS db_id, + totalsize, + freesize, + usedsize + FROM pg_shared_memory_detail() + WHERE contextname LIKE '%GlobalSysDBCacheEntryMemCxt%' OR parent LIKE '%GlobalSysDBCacheEntryMemCxt%' + )a + GROUP BY db_id; +GRANT SELECT ON TABLE pg_catalog.gs_gsc_memory_detail TO PUBLIC; +CREATE OR REPLACE VIEW pg_catalog.gs_lsc_memory_detail AS +SELECT * FROM pv_thread_memory_detail() WHERE contextname LIKE '%LocalSysCache%' OR parent LIKE '%LocalSysCache%'; +GRANT SELECT ON TABLE pg_catalog.gs_lsc_memory_detail TO PUBLIC; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9123; +CREATE OR REPLACE FUNCTION pg_catalog.gs_gsc_table_detail(database_oid bigint DEFAULT NULL::bigint, rel_oid bigint DEFAULT NULL::bigint, OUT database_oid oid, OUT database_name text, OUT reloid oid, OUT relname text, OUT relnamespace oid, OUT reltype oid, OUT reloftype oid, OUT relowner oid, OUT relam oid, OUT relfilenode oid, OUT reltablespace oid, OUT relhasindex boolean, OUT relisshared boolean, OUT relkind "char", OUT relnatts smallint, OUT relhasoids boolean, OUT relhaspkey boolean, OUT parttype "char", OUT tdhasuids boolean, OUT attnames text, OUT extinfo text) + RETURNS SETOF record + LANGUAGE internal + NOT FENCED NOT SHIPPABLE +AS $function$gs_gsc_table_detail$function$; + + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9122; +CREATE OR REPLACE FUNCTION pg_catalog.gs_gsc_catalog_detail(database_id bigint DEFAULT NULL::bigint, rel_id bigint DEFAULT NULL::bigint, OUT database_id bigint, OUT database_name text, OUT rel_id bigint, OUT rel_name text, OUT cache_id bigint, OUT self text, OUT ctid text, OUT infomask bigint, OUT infomask2 bigint, OUT hash_value bigint, OUT refcount bigint) + RETURNS SETOF record + LANGUAGE internal + NOT FENCED NOT SHIPPABLE +AS $function$gs_gsc_catalog_detail$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9121; +CREATE OR REPLACE FUNCTION pg_catalog.gs_gsc_dbstat_info(database_id bigint DEFAULT NULL::bigint, OUT database_id bigint, OUT database_name text, OUT tup_searches bigint, OUT tup_hits bigint, OUT tup_miss bigint, OUT tup_count bigint, OUT tup_dead bigint, OUT tup_memory bigint, OUT rel_searches bigint, OUT rel_hits bigint, OUT rel_miss bigint, OUT rel_count bigint, OUT rel_dead bigint, OUT rel_memory bigint, OUT part_searches bigint, OUT part_hits bigint, OUT part_miss bigint, OUT part_count bigint, OUT part_dead bigint, OUT part_memory bigint, OUT total_memory bigint, OUT swapout_count bigint, OUT refcount bigint) + RETURNS SETOF record + LANGUAGE internal + NOT FENCED NOT SHIPPABLE ROWS 100 +AS $function$gs_gsc_dbstat_info$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9120; +CREATE OR REPLACE FUNCTION pg_catalog.gs_gsc_clean(database_id bigint DEFAULT NULL::bigint) + RETURNS boolean + LANGUAGE internal + NOT FENCED NOT SHIPPABLE +AS $function$gs_gsc_clean$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DROP VIEW IF EXISTS pg_catalog.gs_wlm_ec_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.gs_wlm_operator_statistics CASCADE; +DROP VIEW IF EXISTS pg_catalog.pg_get_invalid_backends CASCADE; + +DROP VIEW IF EXISTS pg_catalog.pg_stat_activity cascade; +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity_with_conninfo(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT connection_info text, OUT srespool name, OUT global_sessionid text, OUT unique_sql_id bigint) cascade; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4212; +CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity_with_conninfo +( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT connection_info text, + OUT srespool name, + OUT global_sessionid text, + OUT unique_sql_id bigint, + OUT trace_id text +) +RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity_with_conninfo'; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + S.connection_info, + S.unique_sql_id, + S.trace_id + FROM pg_database D, pg_stat_get_activity_with_conninfo(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_ec_operator_statistics AS +SELECT + t.queryid, + t.plan_node_id, + t.start_time, + t.ec_status, + t.ec_execute_datanode, + t.ec_dsn, + t.ec_username, + t.ec_query, + t.ec_libodbc_type, + t.ec_fetch_count +FROM pg_catalog.pg_stat_activity AS s, pg_catalog.pg_stat_get_wlm_realtime_ec_operator_info(NULL) as t +where s.query_id = t.queryid and t.ec_operator > 0; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_operator_statistics AS +SELECT t.* +FROM pg_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t +where s.query_id = t.queryid; + +CREATE OR REPLACE VIEW pg_catalog.pg_get_invalid_backends AS + SELECT + C.pid, + C.node_name, + S.datname AS dbname, + S.backend_start, + S.query + FROM pg_pool_validate(false, ' ') AS C LEFT JOIN pg_stat_activity AS S + ON (C.pid = S.sessionid); + +DO $DO$ +DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP VIEW IF EXISTS DBE_PERF.global_session_stat_activity cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_session_stat_activity() cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_operator_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_operator_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.operator_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.session_stat_activity cascade; + + DROP VIEW IF EXISTS DBE_PERF.global_replication_stat cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_replication_stat() cascade; + DROP VIEW IF EXISTS DBE_PERF.replication_stat cascade; + + + DROP VIEW IF EXISTS DBE_PERF.session_cpu_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.session_memory_runtime cascade; + DROP VIEW IF EXISTS DBE_PERF.global_statement_complex_runtime cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_statement_complex_runtime() cascade; + DROP VIEW IF EXISTS DBE_PERF.statement_complex_runtime cascade; + + DROP VIEW IF EXISTS DBE_PERF.statement_iostat_complex_runtime cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_memory_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.pg_session_iostat cascade; + DROP VIEW IF EXISTS pg_catalog.gs_session_cpu_statistics cascade; + DROP VIEW IF EXISTS pg_catalog.gs_wlm_session_statistics cascade; + + DROP VIEW IF EXISTS pg_catalog.pg_stat_activity_ng cascade; + DROP VIEW IF EXISTS pg_catalog.pg_stat_replication cascade; + + DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_activity(IN pid bigint, OUT datid oid, OUT pid bigint, OUT sessionid bigint, OUT usesysid oid, OUT application_name text, OUT state text, OUT query text, OUT waiting boolean, OUT xact_start timestamp with time zone, OUT query_start timestamp with time zone, OUT backend_start timestamp with time zone, OUT state_change timestamp with time zone, OUT client_addr inet, OUT client_hostname text, OUT client_port integer, OUT enqueue text, OUT query_id bigint, OUT srespool name, OUT global_sessionid text, OUT unique_sql_id bigint) cascade; + + SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2022; + CREATE OR REPLACE FUNCTION pg_catalog.pg_stat_get_activity + ( + IN pid bigint, + OUT datid oid, + OUT pid bigint, + OUT sessionid bigint, + OUT usesysid oid, + OUT application_name text, + OUT state text, + OUT query text, + OUT waiting boolean, + OUT xact_start timestamp with time zone, + OUT query_start timestamp with time zone, + OUT backend_start timestamp with time zone, + OUT state_change timestamp with time zone, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT enqueue text, + OUT query_id bigint, + OUT srespool name, + OUT global_sessionid text, + OUT unique_sql_id bigint, + OUT trace_id text + ) + RETURNS setof record LANGUAGE INTERNAL VOLATILE NOT FENCED as 'pg_stat_get_activity'; + + CREATE OR REPLACE VIEW dbe_perf.session_stat_activity AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + S.unique_sql_id, + S.trace_id + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_session_stat_activity + (out coorname text, out datid oid, out datname text, out pid bigint, + out usesysid oid, out usename text, out application_name text, out client_addr inet, + out client_hostname text, out client_port integer, out backend_start timestamptz, + out xact_start timestamptz, out query_start timestamptz, out state_change timestamptz, + out waiting boolean, out enqueue text, out state text, out resource_pool name, + out query_id bigint, out query text, out unique_sql_id bigint, out trace_id text) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.session_stat_activity%rowtype; + coor_name record; + fet_active text; + fetch_coor text; + BEGIN + --Get all cn node names + fetch_coor := 'select * from dbe_perf.node_name'; + FOR coor_name IN EXECUTE(fetch_coor) LOOP + coorname := coor_name.node_name; + fet_active := 'SELECT * FROM dbe_perf.session_stat_activity'; + FOR row_data IN EXECUTE(fet_active) LOOP + coorname := coorname; + datid :=row_data.datid; + datname := row_data.datname; + pid := row_data.pid; + usesysid :=row_data.usesysid; + usename := row_data.usename; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_hostname :=row_data.client_hostname; + client_port :=row_data.client_port; + backend_start := row_data.backend_start; + xact_start := row_data.xact_start; + query_start := row_data.query_start; + state_change := row_data.state_change; + waiting := row_data.waiting; + enqueue := row_data.enqueue; + state := row_data.state; + resource_pool :=row_data.resource_pool; + query_id :=row_data.query_id; + query := row_data.query; + unique_sql_id := row_data.unique_sql_id; + trace_id := row_data.trace_id; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + + CREATE OR REPLACE VIEW DBE_PERF.global_session_stat_activity AS + SELECT * FROM DBE_PERF.get_global_session_stat_activity(); + + CREATE OR REPLACE VIEW dbe_perf.operator_runtime AS + SELECT t.* + FROM dbe_perf.session_stat_activity AS s, pg_stat_get_wlm_realtime_operator_info(NULL) as t + WHERE s.query_id = t.queryid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_operator_runtime() + RETURNS setof dbe_perf.operator_runtime + AS $$ + DECLARE + row_data dbe_perf.operator_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.operator_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_operator_runtime AS + SELECT * FROM dbe_perf.get_global_operator_runtime(); + + + CREATE OR REPLACE VIEW dbe_perf.replication_stat AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_replication_stat + (OUT node_name name, + OUT pid bigint, + OUT usesysid oid, + OUT usename name, + OUT application_name text, + OUT client_addr inet, + OUT client_hostname text, + OUT client_port integer, + OUT backend_start timestamp with time zone, + OUT state text, + OUT sender_sent_location text, + OUT receiver_write_location text, + OUT receiver_flush_location text, + OUT receiver_replay_location text, + OUT sync_priority integer, + OUT sync_state text) + RETURNS setof record + AS $$ + DECLARE + row_data dbe_perf.replication_stat%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + --Get all the node names + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.replication_stat'; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := row_name.node_name; + pid := row_data.pid; + usesysid := row_data.usesysid; + usename := row_data.usename; + client_addr := row_data.client_addr; + client_hostname := row_data.client_hostname; + client_port := row_data.client_port; + state := row_data.state; + sender_sent_location := row_data.sender_sent_location; + receiver_write_location := row_data.receiver_write_location; + receiver_flush_location := row_data.receiver_flush_location; + receiver_replay_location := row_data.receiver_replay_location; + sync_priority := row_data.sync_priority; + sync_state := row_data.sync_state; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_replication_stat AS + SELECT * FROM dbe_perf.get_global_replication_stat(); + + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_activity_ng AS + SELECT + S.datid AS datid, + D.datname AS datname, + S.pid, + S.sessionid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + S.xact_start, + S.query_start, + S.state_change, + S.waiting, + S.enqueue, + S.state, + CASE + WHEN S.srespool = 'unknown' THEN (U.rolrespool) :: name + ELSE S.srespool + END AS resource_pool, + S.query_id, + S.query, + N.node_group + FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_stat_get_activity_ng(NULL) AS N, pg_authid U + WHERE S.datid = D.oid AND + S.usesysid = U.oid AND + S.sessionid = N.sessionid; + + + CREATE OR REPLACE VIEW dbe_perf.session_cpu_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.session_memory_runtime AS + SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE VIEW dbe_perf.statement_complex_runtime AS + SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T + WHERE S.pid = T.threadid; + + + CREATE OR REPLACE FUNCTION dbe_perf.get_global_statement_complex_runtime() + RETURNS setof dbe_perf.statement_complex_runtime + AS $$ + DECLARE + row_data dbe_perf.statement_complex_runtime%rowtype; + row_name record; + query_str text; + query_str_nodes text; + BEGIN + query_str_nodes := 'select * from dbe_perf.node_name'; + FOR row_name IN EXECUTE(query_str_nodes) LOOP + query_str := 'SELECT * FROM dbe_perf.statement_complex_runtime'; + FOR row_data IN EXECUTE(query_str) LOOP + return next row_data; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE VIEW dbe_perf.global_statement_complex_runtime AS + SELECT * FROM dbe_perf.get_global_statement_complex_runtime(); + + CREATE OR REPLACE VIEW dbe_perf.statement_iostat_complex_runtime AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 20 THEN 'Low'::text + WHEN T.io_priority = 50 THEN 'Medium'::text + WHEN T.io_priority = 80 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits + FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T + WHERE S.pid = T.threadid; + + end if; +END$DO$; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_memory_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_peak_memory, + T.max_peak_memory, + T.spill_info, + S.query, + S.node_group, + T.top_mem_dn +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_session_iostat AS + SELECT + S.query_id, + T.mincurr_iops as mincurriops, + T.maxcurr_iops as maxcurriops, + T.minpeak_iops as minpeakiops, + T.maxpeak_iops as maxpeakiops, + T.iops_limits as io_limits, + CASE WHEN T.io_priority = 0 THEN 'None'::text + WHEN T.io_priority = 10 THEN 'Low'::text + WHEN T.io_priority = 20 THEN 'Medium'::text + WHEN T.io_priority = 50 THEN 'High'::text END AS io_priority, + S.query, + S.node_group, + T.curr_io_limits as curr_io_limits +FROM pg_catalog.pg_stat_activity_ng AS S, pg_catalog.pg_stat_get_wlm_session_iostat_info(0) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_session_cpu_statistics AS +SELECT + S.datid AS datid, + S.usename, + S.pid, + S.query_start AS start_time, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + S.query, + S.node_group, + T.top_cpu_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.gs_wlm_session_statistics AS +SELECT + S.datid AS datid, + S.datname AS dbname, + T.schemaname, + T.nodename, + S.usename AS username, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + T.query_band, + S.pid, + S.sessionid, + T.block_time, + S.query_start AS start_time, + T.duration, + T.estimate_total_time, + T.estimate_left_time, + S.enqueue, + S.resource_pool, + T.control_group, + T.estimate_memory, + T.min_peak_memory, + T.max_peak_memory, + T.average_peak_memory, + T.memory_skew_percent, + T.spill_info, + T.min_spill_size, + T.max_spill_size, + T.average_spill_size, + T.spill_skew_percent, + T.min_dn_time, + T.max_dn_time, + T.average_dn_time, + T.dntime_skew_percent, + T.min_cpu_time, + T.max_cpu_time, + T.total_cpu_time, + T.cpu_skew_percent, + T.min_peak_iops, + T.max_peak_iops, + T.average_peak_iops, + T.iops_skew_percent, + T.warning, + S.query_id AS queryid, + T.query, + T.query_plan, + S.node_group, + T.top_cpu_dn, + T.top_mem_dn +FROM pg_stat_activity_ng AS S, pg_stat_get_wlm_realtime_session_info(NULL) AS T +WHERE S.pid = T.threadid; + +CREATE OR REPLACE VIEW pg_catalog.pg_stat_replication AS + SELECT + S.pid, + S.usesysid, + U.rolname AS usename, + S.application_name, + S.client_addr, + S.client_hostname, + S.client_port, + S.backend_start, + W.state, + W.sender_sent_location, + W.receiver_write_location, + W.receiver_flush_location, + W.receiver_replay_location, + W.sync_priority, + W.sync_state + FROM pg_stat_get_activity(NULL) AS S, pg_authid U, + pg_stat_get_wal_senders() AS W + WHERE S.usesysid = U.oid AND + S.pid = W.pid; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DO $DO$ +DECLARE + ans boolean; + user_name text; + query_str text; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_session_stat_activity TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_session_stat_activity TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_operator_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_operator_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.replication_stat TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_replication_stat TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_replication_stat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity_ng TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_cpu_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_cpu_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.session_memory_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.session_memory_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.global_statement_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.global_statement_complex_runtime TO PUBLIC; + + query_str := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE DBE_PERF.statement_iostat_complex_runtime TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + GRANT SELECT ON TABLE DBE_PERF.statement_iostat_complex_runtime TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_memory_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_session_iostat TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_session_cpu_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_session_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_replication TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_stat_activity TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_ec_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.gs_wlm_operator_statistics TO PUBLIC; + + GRANT SELECT ON TABLE pg_catalog.pg_get_invalid_backends TO PUBLIC; + end if; +END$DO$; + +DO $DO$ +DECLARE + ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + DROP FUNCTION IF EXISTS DBE_PERF.get_global_full_sql_by_timestamp() cascade; + DROP FUNCTION IF EXISTS DBE_PERF.get_global_slow_sql_by_timestamp() cascade; + DROP VIEW IF EXISTS DBE_PERF.statement_history cascade; + end if; +END$DO$; + +DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx; +DROP TABLE IF EXISTS pg_catalog.statement_history cascade; + +CREATE unlogged table IF NOT EXISTS pg_catalog.statement_history( + db_name name, + schema_name name, + origin_node integer, + user_name name, + application_name text, + client_addr text, + client_port integer, + unique_query_id bigint, + debug_query_id bigint, + query text, + start_time timestamp with time zone, + finish_time timestamp with time zone, + slow_sql_threshold bigint, + transaction_id bigint, + thread_id bigint, + session_id bigint, + n_soft_parse bigint, + n_hard_parse bigint, + query_plan text, + n_returned_rows bigint, + n_tuples_fetched bigint, + n_tuples_returned bigint, + n_tuples_inserted bigint, + n_tuples_updated bigint, + n_tuples_deleted bigint, + n_blocks_fetched bigint, + n_blocks_hit bigint, + db_time bigint, + cpu_time bigint, + execution_time bigint, + parse_time bigint, + plan_time bigint, + rewrite_time bigint, + pl_execution_time bigint, + pl_compilation_time bigint, + data_io_time bigint, + net_send_info text, + net_recv_info text, + net_stream_send_info text, + net_stream_recv_info text, + lock_count bigint, + lock_time bigint, + lock_wait_count bigint, + lock_wait_time bigint, + lock_max_count bigint, + lwlock_count bigint, + lwlock_wait_count bigint, + lwlock_time bigint, + lwlock_wait_time bigint, + details bytea, + is_slow_sql boolean, + trace_id text +); +REVOKE ALL on table pg_catalog.statement_history FROM public; +create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); + +DO $DO$ +DECLARE + ans boolean; + username text; + querystr text; +BEGIN + select case when count(*)=1 then true else false end as ans from (select nspname from pg_namespace where nspname='dbe_perf' limit 1) into ans; + if ans = true then + CREATE VIEW DBE_PERF.statement_history AS + select * from pg_catalog.statement_history; + + CREATE OR REPLACE FUNCTION DBE_PERF.get_global_full_sql_by_timestamp + (in start_timestamp timestamp with time zone, + in end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text) + RETURNS setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + query_str text; + -- node name + node_names name[]; + each_node_name name; + BEGIN + -- Get all node names(CN + master DN) + node_names := ARRAY(SELECT pgxc_node.node_name FROM pgxc_node WHERE (node_type = 'C' or node_type = 'D') AND nodeis_active = true); + FOREACH each_node_name IN ARRAY node_names + LOOP + query_str := 'EXECUTE DIRECT ON (' || each_node_name || ') ''SELECT * FROM DBE_PERF.statement_history where start_time >= ''''' ||$1|| ''''' and start_time <= ''''' || $2 || ''''''''; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := each_node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + CREATE OR REPLACE FUNCTION DBE_PERF.get_global_slow_sql_by_timestamp + (in start_timestamp timestamp with time zone, + in end_timestamp timestamp with time zone, + OUT node_name name, + OUT db_name name, + OUT schema_name name, + OUT origin_node integer, + OUT user_name name, + OUT application_name text, + OUT client_addr text, + OUT client_port integer, + OUT unique_query_id bigint, + OUT debug_query_id bigint, + OUT query text, + OUT start_time timestamp with time zone, + OUT finish_time timestamp with time zone, + OUT slow_sql_threshold bigint, + OUT transaction_id bigint, + OUT thread_id bigint, + OUT session_id bigint, + OUT n_soft_parse bigint, + OUT n_hard_parse bigint, + OUT query_plan text, + OUT n_returned_rows bigint, + OUT n_tuples_fetched bigint, + OUT n_tuples_returned bigint, + OUT n_tuples_inserted bigint, + OUT n_tuples_updated bigint, + OUT n_tuples_deleted bigint, + OUT n_blocks_fetched bigint, + OUT n_blocks_hit bigint, + OUT db_time bigint, + OUT cpu_time bigint, + OUT execution_time bigint, + OUT parse_time bigint, + OUT plan_time bigint, + OUT rewrite_time bigint, + OUT pl_execution_time bigint, + OUT pl_compilation_time bigint, + OUT data_io_time bigint, + OUT net_send_info text, + OUT net_recv_info text, + OUT net_stream_send_info text, + OUT net_stream_recv_info text, + OUT lock_count bigint, + OUT lock_time bigint, + OUT lock_wait_count bigint, + OUT lock_wait_time bigint, + OUT lock_max_count bigint, + OUT lwlock_count bigint, + OUT lwlock_wait_count bigint, + OUT lwlock_time bigint, + OUT lwlock_wait_time bigint, + OUT details bytea, + OUT is_slow_sql boolean, + OUT trace_id text) + RETURNS setof record + AS $$ + DECLARE + row_data pg_catalog.statement_history%rowtype; + row_name record; + query_str text; + -- node name + node_names name[]; + each_node_name name; + BEGIN + -- Get all node names(CN + master DN) + node_names := ARRAY(SELECT pgxc_node.node_name FROM pgxc_node WHERE (node_type = 'C' or node_type = 'D') AND nodeis_active = true); + FOREACH each_node_name IN ARRAY node_names + LOOP + query_str := 'EXECUTE DIRECT ON (' || each_node_name || ') ''SELECT * FROM DBE_PERF.statement_history where start_time >= ''''' ||$1|| ''''' and start_time <= ''''' || $2 || ''''' and (extract(epoch from (finish_time - start_time)) * 1000000) >= slow_sql_threshold '''; + FOR row_data IN EXECUTE(query_str) LOOP + node_name := each_node_name; + db_name := row_data.db_name; + schema_name := row_data.schema_name; + origin_node := row_data.origin_node; + user_name := row_data.user_name; + application_name := row_data.application_name; + client_addr := row_data.client_addr; + client_port := row_data.client_port; + unique_query_id := row_data.unique_query_id; + debug_query_id := row_data.debug_query_id; + query := row_data.query; + start_time := row_data.start_time; + finish_time := row_data.finish_time; + slow_sql_threshold := row_data.slow_sql_threshold; + transaction_id := row_data.transaction_id; + thread_id := row_data.thread_id; + session_id := row_data.session_id; + n_soft_parse := row_data.n_soft_parse; + n_hard_parse := row_data.n_hard_parse; + query_plan := row_data.query_plan; + n_returned_rows := row_data.n_returned_rows; + n_tuples_fetched := row_data.n_tuples_fetched; + n_tuples_returned := row_data.n_tuples_returned; + n_tuples_inserted := row_data.n_tuples_inserted; + n_tuples_updated := row_data.n_tuples_updated; + n_tuples_deleted := row_data.n_tuples_deleted; + n_blocks_fetched := row_data.n_blocks_fetched; + n_blocks_hit := row_data.n_blocks_hit; + db_time := row_data.db_time; + cpu_time := row_data.cpu_time; + execution_time := row_data.execution_time; + parse_time := row_data.parse_time; + plan_time := row_data.plan_time; + rewrite_time := row_data.rewrite_time; + pl_execution_time := row_data.pl_execution_time; + pl_compilation_time := row_data.pl_compilation_time; + data_io_time := row_data.data_io_time; + net_send_info := row_data.net_send_info; + net_recv_info := row_data.net_recv_info; + net_stream_send_info := row_data.net_stream_send_info; + net_stream_recv_info := row_data.net_stream_recv_info; + lock_count := row_data.lock_count; + lock_time := row_data.lock_time; + lock_wait_count := row_data.lock_wait_count; + lock_wait_time := row_data.lock_wait_time; + lock_max_count := row_data.lock_max_count; + lwlock_count := row_data.lwlock_count; + lwlock_wait_count := row_data.lwlock_wait_count; + lwlock_time := row_data.lwlock_time; + lwlock_wait_time := row_data.lwlock_wait_time; + details := row_data.details; + is_slow_sql := row_data.is_slow_sql; + trace_id := row_data.trace_id; + return next; + END LOOP; + END LOOP; + return; + END; $$ + LANGUAGE 'plpgsql' NOT FENCED; + + DROP FUNCTION IF EXISTS pg_catalog.statement_detail_decode() CASCADE; + set local inplace_upgrade_next_system_object_oids = IUO_PROC, 5732; + CREATE OR REPLACE FUNCTION pg_catalog.statement_detail_decode + ( IN text, + IN text, + IN boolean) + RETURNS text LANGUAGE INTERNAL NOT FENCED as 'statement_detail_decode'; + + SELECT SESSION_USER INTO username; + IF EXISTS (SELECT oid FROM pg_catalog.pg_class WHERE relname='statement_history') THEN + querystr := 'REVOKE ALL ON TABLE dbe_perf.statement_history FROM ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + querystr := 'REVOKE ALL ON TABLE pg_catalog.statement_history FROM ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + querystr := 'REVOKE SELECT on table dbe_perf.statement_history FROM public;'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE dbe_perf.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + querystr := 'GRANT INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER ON TABLE pg_catalog.statement_history TO ' || quote_ident(username) || ';'; + EXECUTE IMMEDIATE querystr; + GRANT SELECT ON TABLE DBE_PERF.statement_history TO PUBLIC; + END IF; + end if; +END$DO$;DROP FUNCTION IF EXISTS pg_catalog.gs_get_shared_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5255; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_shared_memctx_detail( +IN context_name text, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_shared_memctx_detail'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_thread_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5256; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_thread_memctx_detail( +IN threadid int8, +IN context_name text, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_thread_memctx_detail'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_get_session_memctx_detail() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5254; +CREATE OR REPLACE FUNCTION pg_catalog.gs_get_session_memctx_detail( +IN context_name text, +OUT file text, +OUT line int8, +OUT size int8) RETURNS SETOF RECORD LANGUAGE INTERNAL as 'gs_get_session_memctx_detail';DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_set() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3268; +CREATE FUNCTION pg_catalog.pgxc_disaster_read_set +(text, OUT set_ok boolean) +RETURNS SETOF boolean LANGUAGE INTERNAL ROWS 1 STRICT as 'pgxc_disaster_read_set'; + +DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_init() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3269; +CREATE FUNCTION pg_catalog.pgxc_disaster_read_init +(OUT init_ok boolean) +RETURNS SETOF boolean LANGUAGE INTERNAL ROWS 1 STRICT as 'pgxc_disaster_read_init'; + +DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_clear() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3271; +CREATE FUNCTION pg_catalog.pgxc_disaster_read_clear +(OUT clear_ok boolean) +RETURNS SETOF boolean LANGUAGE INTERNAL ROWS 1 STRICT as 'pgxc_disaster_read_clear'; + +DROP FUNCTION IF EXISTS pg_catalog.pgxc_disaster_read_status() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 3273; +CREATE FUNCTION pg_catalog.pgxc_disaster_read_status +( +OUT node_oid pg_catalog.oid, +OUT node_type pg_catalog.text, +OUT host pg_catalog.text, +OUT port pg_catalog.int4, +OUT host1 pg_catalog.text, +OUT port1 pg_catalog.int4, +OUT xlogMaxCSN pg_catalog.int8, +OUT consistency_point_csn pg_catalog.int8 +) +RETURNS SETOF record LANGUAGE INTERNAL VOLATILE ROWS 100 COST 100 STRICT as 'pgxc_disaster_read_status';DROP FUNCTION IF EXISTS pg_catalog.gs_verify_data_file(bool) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4571; +CREATE OR REPLACE FUNCTION pg_catalog.gs_verify_data_file(verify_segment boolean DEFAULT false, OUT node_name text, OUT rel_oid oid, OUT rel_name text, OUT miss_file_path text) + RETURNS SETOF record + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_verify_data_file$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.gs_repair_file(Oid, text, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4771; +CREATE OR REPLACE FUNCTION pg_catalog.gs_repair_file(tableoid oid, path text, timeout integer) + RETURNS SETOF boolean + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_repair_file$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.gs_verify_and_tryrepair_page(text, oid, boolean, boolean) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4569; +CREATE OR REPLACE FUNCTION pg_catalog.gs_verify_and_tryrepair_page(path text, blocknum oid, verify_mem boolean, is_segment boolean, OUT node_name text, OUT path text, OUT blocknum oid, OUT disk_page_res text, OUT mem_page_res text, OUT is_repair boolean) + RETURNS SETOF record + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_verify_and_tryrepair_page$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.gs_repair_page(text, oid, bool, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4570; +CREATE OR REPLACE FUNCTION pg_catalog.gs_repair_page(path text, blocknum oid, is_segment boolean, timeout integer, OUT result boolean) + RETURNS SETOF boolean + LANGUAGE internal + STABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_repair_page$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.local_bad_block_info() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4567; +CREATE OR REPLACE FUNCTION pg_catalog.local_bad_block_info(OUT node_name text, OUT spc_node oid, OUT db_node oid, OUT rel_node oid, OUT bucket_node integer, OUT fork_num integer, OUT block_num integer, OUT file_path text, OUT check_time timestamp with time zone, OUT repair_time timestamp with time zone) + RETURNS SETOF record + LANGUAGE internal + STABLE NOT FENCED NOT SHIPPABLE +AS $function$local_bad_block_info$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.local_clear_bad_block_info() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4568; +CREATE OR REPLACE FUNCTION pg_catalog.local_clear_bad_block_info(OUT result boolean) + RETURNS SETOF boolean + LANGUAGE internal + STABLE NOT FENCED NOT SHIPPABLE +AS $function$local_clear_bad_block_info$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_file_from_remote(oid, oid, oid, integer, integer, integer, xid, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4768; +CREATE OR REPLACE FUNCTION pg_catalog.gs_read_file_from_remote(oid, oid, oid, integer, integer, integer, xid, integer, OUT bytea, OUT xid) + RETURNS record + LANGUAGE internal + STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_read_file_from_remote$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_file_size_from_remote(oid, oid, oid, integer, integer, xid, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4769; +CREATE OR REPLACE FUNCTION pg_catalog.gs_read_file_size_from_remote(oid, oid, oid, integer, integer, xid, integer, OUT bigint) + RETURNS bigint + LANGUAGE internal + STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_read_file_size_from_remote$function$; + + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_segment_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, integer, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4770; +CREATE OR REPLACE FUNCTION pg_catalog.gs_read_segment_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, integer, integer) + RETURNS bytea + LANGUAGE internal + STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_read_segment_block_from_remote$function$; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +-- adding system table pg_subscription + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, true, false, 6126, 6128, 0, 0; + +CREATE TABLE IF NOT EXISTS pg_catalog.pg_subscription +( + subdbid oid NOCOMPRESS NOT NULL, + subname name NOCOMPRESS, + subowner oid NOCOMPRESS, + subenabled bool NOCOMPRESS, + subconninfo text NOCOMPRESS, + subslotname name NOCOMPRESS, + subsynccommit text NOCOMPRESS, + subpublications text[] NOCOMPRESS +) WITH OIDS TABLESPACE pg_global; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, true, false, 0, 0, 0, 6124; +CREATE UNIQUE INDEX pg_subscription_oid_index ON pg_catalog.pg_subscription USING BTREE(oid OID_OPS); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, true, false, 0, 0, 0, 6125; +CREATE UNIQUE INDEX pg_subscription_subname_index ON pg_catalog.pg_subscription USING BTREE(subdbid, subname); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +DECLARE + user_name text; + query_str text; +BEGIN + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLE pg_catalog.pg_subscription TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; +END; +/ + +-- adding system table pg_replication_origin + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, true, false, 6134, 6143, 0, 0; + +CREATE TABLE IF NOT EXISTS pg_catalog.pg_replication_origin +( + roident oid NOCOMPRESS NOT NULL, + roname text NOCOMPRESS +) TABLESPACE pg_global; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, true, false, 0, 0, 0, 6136; +CREATE UNIQUE INDEX pg_replication_origin_roident_index ON pg_catalog.pg_replication_origin USING BTREE(roident OID_OPS); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, true, false, 0, 0, 0, 6137; +CREATE UNIQUE INDEX pg_replication_origin_roname_index ON pg_catalog.pg_replication_origin USING BTREE(roname TEXT_PATTERN_OPS); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +GRANT SELECT ON TABLE pg_catalog.pg_replication_origin TO PUBLIC; + +-- adding function pg_replication_origin_create +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_create(IN node_name text, OUT replication_origin_oid oid) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2635; +CREATE FUNCTION pg_catalog.pg_replication_origin_create(IN node_name text, OUT replication_origin_oid oid) RETURNS oid LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_create'; + +-- adding function pg_replication_origin_drop +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_drop(IN node_name text, OUT replication_origin_oid oid) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2636; +CREATE FUNCTION pg_catalog.pg_replication_origin_drop(IN node_name text, OUT replication_origin_oid oid) RETURNS void LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_drop'; + +-- adding function pg_replication_origin_oid +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_oid(IN node_name text, OUT replication_origin_oid oid) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2637; +CREATE FUNCTION pg_catalog.pg_replication_origin_oid(IN node_name text, OUT replication_origin_oid oid) RETURNS void LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_oid'; + +-- adding function pg_replication_origin_session_setup +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_setup(IN node_name text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2751; +CREATE FUNCTION pg_catalog.pg_replication_origin_session_setup(IN node_name text) RETURNS void LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_session_setup'; + +-- adding function pg_replication_origin_session_reset +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_reset() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2750; +CREATE FUNCTION pg_catalog.pg_replication_origin_session_reset() RETURNS void LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_session_reset'; + +-- adding function pg_replication_origin_session_is_setup +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_is_setup() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2639; +CREATE FUNCTION pg_catalog.pg_replication_origin_session_is_setup() RETURNS boolean LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_session_is_setup'; + +-- adding function pg_replication_origin_session_progress +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_session_progress(IN flush boolean) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2640; +CREATE FUNCTION pg_catalog.pg_replication_origin_session_progress(IN flush boolean) RETURNS record LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_session_progress'; + +-- adding function pg_replication_origin_xact_setup +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_xact_setup(IN origin_lsn text, IN origin_timestamp timestamp with time zone) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2799; +CREATE FUNCTION pg_catalog.pg_replication_origin_xact_setup(IN origin_lsn text, IN origin_timestamp timestamp with time zone) RETURNS void LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_xact_setup'; + +-- adding function pg_replication_origin_xact_reset +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_xact_reset() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2752; +CREATE FUNCTION pg_catalog.pg_replication_origin_xact_reset() RETURNS void LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_xact_reset'; + +-- adding function pg_replication_origin_advance +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_advance(IN node_name text, IN lsn text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2634; +CREATE FUNCTION pg_catalog.pg_replication_origin_advance(IN node_name text, IN lsn text) RETURNS void LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_advance'; + +-- adding function pg_replication_origin_progress +DROP FUNCTION IF EXISTS pg_catalog.pg_replication_origin_progress(IN node_name text, IN flush boolean) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2638; +CREATE FUNCTION pg_catalog.pg_replication_origin_progress(IN node_name text, IN flush boolean) RETURNS record LANGUAGE INTERNAL STRICT AS 'pg_replication_origin_progress'; + +-- adding function pg_show_replication_origin_status +DROP FUNCTION IF EXISTS pg_catalog.pg_show_replication_origin_status(OUT local_id oid, OUT external_id text, OUT remote_lsn text, OUT local_lsn text) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2800; +CREATE FUNCTION pg_catalog.pg_show_replication_origin_status(OUT local_id oid, OUT external_id text, OUT remote_lsn text, OUT local_lsn text) RETURNS SETOF record LANGUAGE INTERNAL STABLE ROWS 100 AS 'pg_show_replication_origin_status'; + +-- adding function pg_get_publication_tables +DROP FUNCTION IF EXISTS pg_catalog.pg_get_publication_tables(IN pubname text, OUT relid oid) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2801; +CREATE FUNCTION pg_catalog.pg_get_publication_tables(IN pubname text, OUT relid oid) RETURNS SETOF oid LANGUAGE INTERNAL STABLE STRICT AS 'pg_get_publication_tables'; + +-- adding function pg_stat_get_subscription +DROP FUNCTION IF EXISTS pg_catalog.pg_stat_get_subscription(IN subid oid, OUT subid oid, OUT pid integer, OUT received_lsn text, OUT last_msg_send_time timestamp with time zone, OUT last_msg_receipt_time timestamp with time zone, OUT latest_end_lsn text, OUT latest_end_time timestamp with time zone) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2802; +CREATE FUNCTION pg_catalog.pg_stat_get_subscription(IN subid oid, OUT subid oid, OUT pid integer, OUT received_lsn text, OUT last_msg_send_time timestamp with time zone, OUT last_msg_receipt_time timestamp with time zone, OUT latest_end_lsn text, OUT latest_end_time timestamp with time zone) RETURNS record LANGUAGE INTERNAL STABLE AS 'pg_stat_get_subscription'; + +-- adding system view +DROP VIEW IF EXISTS pg_catalog.pg_publication_tables CASCADE; +CREATE VIEW pg_catalog.pg_publication_tables AS + SELECT + P.pubname AS pubname, + N.nspname AS schemaname, + C.relname AS tablename + FROM pg_publication P, pg_class C + JOIN pg_namespace N ON (N.oid = C.relnamespace) + WHERE C.oid IN (SELECT relid FROM pg_get_publication_tables(P.pubname)); + +DROP VIEW IF EXISTS pg_catalog.pg_stat_subscription CASCADE; +CREATE VIEW pg_catalog.pg_stat_subscription AS + SELECT + su.oid AS subid, + su.subname, + st.pid, + st.received_lsn, + st.last_msg_send_time, + st.last_msg_receipt_time, + st.latest_end_lsn, + st.latest_end_time + FROM pg_subscription su + LEFT JOIN pg_stat_get_subscription(NULL) st + ON (st.subid = su.oid); + +DROP VIEW IF EXISTS pg_catalog.pg_replication_origin_status CASCADE; +CREATE VIEW pg_catalog.pg_replication_origin_status AS + SELECT * + FROM pg_show_replication_origin_status(); + +REVOKE ALL ON pg_catalog.pg_replication_origin_status FROM public;DROP FUNCTION IF EXISTS pg_catalog.gs_explain_model(text) cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_float8_array(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_bool(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_float4(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_float8(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_int32(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_int64(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_numeric(text, VARIADIC "any") cascade; +DROP FUNCTION IF EXISTS pg_catalog.db4ai_predict_by_text(text, VARIADIC "any") cascade; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7101; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_bool(text, VARIADIC "any") + RETURNS boolean + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_bool$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7105; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_float4(text, VARIADIC "any") + RETURNS real + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_float4$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7106; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_float8(text, VARIADIC "any") + RETURNS double precision + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_float8$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7102; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_int32(text, VARIADIC "any") + RETURNS integer + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_int32$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7103; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_int64(text, VARIADIC "any") + RETURNS bigint + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_int64$function$; + + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7107; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_numeric(text, VARIADIC "any") + RETURNS numeric + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_numeric$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7108; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_text(text, VARIADIC "any") + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_text$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7109; +CREATE OR REPLACE FUNCTION pg_catalog.db4ai_predict_by_float8_array(text, VARIADIC "any") + RETURNS double precision[] + LANGUAGE internal + IMMUTABLE NOT FENCED NOT SHIPPABLE +AS $function$db4ai_predict_by_float8_array$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 7110; +CREATE OR REPLACE FUNCTION pg_catalog.gs_explain_model(text) + RETURNS text + LANGUAGE internal + IMMUTABLE STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_explain_model$function$; +comment on function PG_CATALOG.gs_explain_model(text) is 'explain machine learning model'; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_clean_history_global_barriers(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4581; +CREATE FUNCTION pg_catalog.gs_pitr_clean_history_global_barriers +( +IN stop_barrier_timestamp cstring, +OUT oldest_barrier_record text +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_pitr_clean_history_global_barriers'; + +DROP FUNCTION IF EXISTS pg_catalog.gs_pitr_archive_slot_force_advance(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4580; +CREATE FUNCTION pg_catalog.gs_pitr_archive_slot_force_advance +( +IN stop_barrier_timestamp cstring, +OUT archive_restart_lsn text +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'gs_pitr_archive_slot_force_advance'; +DROP FUNCTION IF EXISTS pg_catalog.local_double_write_stat(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4384; +CREATE FUNCTION pg_catalog.local_double_write_stat +( + OUT node_name pg_catalog.text, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8, + OUT file_id pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'local_double_write_stat'; + +DROP FUNCTION IF EXISTS pg_catalog.remote_double_write_stat(); +SET LOCAL inplace_upgrade_next_system_object_oids=IUO_PROC, 4385; +CREATE FUNCTION pg_catalog.remote_double_write_stat +( + OUT node_name pg_catalog.text, + OUT curr_dwn pg_catalog.int8, + OUT curr_start_page pg_catalog.int8, + OUT file_trunc_num pg_catalog.int8, + OUT file_reset_num pg_catalog.int8, + OUT total_writes pg_catalog.int8, + OUT low_threshold_writes pg_catalog.int8, + OUT high_threshold_writes pg_catalog.int8, + OUT total_pages pg_catalog.int8, + OUT low_threshold_pages pg_catalog.int8, + OUT high_threshold_pages pg_catalog.int8, + OUT file_id pg_catalog.int8 +) RETURNS SETOF record LANGUAGE INTERNAL STABLE as 'remote_double_write_stat'; + +DROP VIEW IF EXISTS DBE_PERF.global_double_write_status CASCADE; +CREATE OR REPLACE VIEW dbe_perf.global_double_write_status AS + SELECT node_name, curr_dwn, curr_start_page, file_trunc_num, file_reset_num, + total_writes, low_threshold_writes, high_threshold_writes, + total_pages, low_threshold_pages, high_threshold_pages, file_id + FROM pg_catalog.local_double_write_stat(); + +REVOKE ALL on DBE_PERF.global_double_write_status FROM PUBLIC; + +DECLARE + user_name text; + query_str text; +BEGIN + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLE DBE_PERF.global_double_write_status TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; +END; +/ + +GRANT SELECT ON TABLE DBE_PERF.global_double_write_status TO PUBLIC; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +DO $DO$ +DECLARE +ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select * from pg_tables where tablename = 'snap_global_double_write_status' and schemaname = 'snapshot' limit 1) into ans; + if ans = true then + alter table snapshot.snap_global_double_write_status + ADD COLUMN snap_file_id int8; + end if; +END$DO$; +comment on function PG_CATALOG.regexp_count(text, text) is 'find match(es) count for regexp'; +comment on function PG_CATALOG.regexp_count(text, text, integer) is 'find match(es) count for regexp'; +comment on function PG_CATALOG.regexp_count(text, text, integer, text) is 'find match(es) count for regexp'; +comment on function PG_CATALOG.regexp_instr(text, text) is 'find match(es) position for regexp'; +comment on function PG_CATALOG.regexp_instr(text, text, integer) is 'find match(es) position for regexp'; +comment on function PG_CATALOG.regexp_instr(text, text, integer, integer) is 'find match(es) position for regexp'; +comment on function PG_CATALOG.regexp_instr(text, text, integer, integer, integer) is 'find match(es) position for regexp'; +comment on function PG_CATALOG.regexp_instr(text, text, integer, integer, integer, text) is 'find match(es) position for regexp'; +comment on function PG_CATALOG.lpad(text, integer, text) is 'left-pad string to length'; +comment on function PG_CATALOG.rpad(text, integer, text) is 'right-pad string to length'; +comment on function PG_CATALOG.regexp_replace(text, text) is 'replace text using regexp'; +comment on function PG_CATALOG.regexp_replace(text, text, text, integer) is 'replace text using regexp'; +comment on function PG_CATALOG.regexp_replace(text, text, text, integer, integer) is 'replace text using regexp'; +comment on function PG_CATALOG.regexp_replace(text, text, text, integer, integer, text) is 'replace text using regexp'; +comment on function PG_CATALOG.line_in(cstring) is 'I/O'; +comment on function PG_CATALOG.regexp_substr(text, text, integer) is 'extract text matching regular expression'; +comment on function PG_CATALOG.regexp_substr(text, text, integer, integer) is 'extract text matching regular expression'; +comment on function PG_CATALOG.regexp_substr(text, text, integer, integer, text) is 'extract text matching regular expression'; +comment on function PG_CATALOG.pg_stat_get_activity(bigint) is 'statistics: information about currently active backends'; +comment on function PG_CATALOG.to_char(timestamp without time zone, text) is 'format timestamp to text'; +comment on function PG_CATALOG.pg_replication_origin_advance(text, text) is 'advance replication itentifier to specific location'; +comment on function PG_CATALOG.pg_replication_origin_create(text) is 'create a replication origin'; +comment on function PG_CATALOG.pg_replication_origin_drop(text) is 'drop replication origin identified by its name'; +comment on function PG_CATALOG.pg_replication_origin_oid(text) is 'translate the replication origin\''s name to its id'; +comment on function PG_CATALOG.pg_replication_origin_progress(text, boolean) is 'get an individual replication origin\''s replication progress'; +comment on function PG_CATALOG.pg_replication_origin_session_is_setup() is 'is a replication origin configured in this session'; +comment on function PG_CATALOG.pg_replication_origin_session_progress(boolean) is 'get the replication progress of the current session'; +comment on function PG_CATALOG.pg_replication_origin_session_reset() is 'teardown configured replication progress tracking'; +comment on function PG_CATALOG.pg_replication_origin_session_setup(text) is 'configure session to maintain replication progress tracking for the passed in origin'; +comment on function PG_CATALOG.pg_replication_origin_xact_reset() is 'reset the transaction\''s origin lsn and timestamp'; +comment on function PG_CATALOG.pg_replication_origin_xact_setup(text, timestamp with time zone) is 'setup the transaction\''s origin lsn and timestamp'; +comment on function PG_CATALOG.pg_show_replication_origin_status() is 'get progress for all replication origins'; +comment on function PG_CATALOG.pg_get_publication_tables(text) is 'get OIDs of tables in a publication'; +comment on function PG_CATALOG.pg_stat_get_subscription(oid) is 'statistics: information about subscription'; +comment on function PG_CATALOG.xpath(text, xml, text[]) is 'evaluate XPath expression, with namespaces support'; +comment on function PG_CATALOG.xpath(text, xml) is 'evaluate XPath expression'; +comment on function PG_CATALOG.xpath_exists(text, xml, text[]) is 'test XML value against XPath expression, with namespace support'; +comment on function PG_CATALOG.json_array_element_text(json, integer) is 'implementation of ->> operator'; +comment on function PG_CATALOG.json_extract_path_op(json, text[]) is 'implementation of #> operator'; +comment on function PG_CATALOG.json_extract_path_text_op(json, text[]) is 'implementation of #>> operator'; +comment on function PG_CATALOG.jsonb_extract_path_op(jsonb, text[]) is 'implementation of #> operator'; +comment on function PG_CATALOG.json_object_field(json, text) is 'implementation of -> operator'; +comment on function PG_CATALOG.jsonb_array_element(jsonb, integer) is 'implementation of -> operator'; +comment on function PG_CATALOG.jsonb_array_element_text(jsonb, integer) is 'implementation of ->> operator'; +comment on function PG_CATALOG.jsonb_contains(jsonb, jsonb) is 'implementation of @> operator'; +comment on function PG_CATALOG.jsonb_eq(jsonb, jsonb) is 'implementation of = operator'; +comment on function PG_CATALOG.jsonb_exists(jsonb, text) is 'implementation of ? operator'; +comment on function PG_CATALOG.jsonb_exists_all(jsonb, text[]) is 'implementation of ?& operator'; +comment on function PG_CATALOG.jsonb_exists_any(jsonb, text[]) is 'implementation of ?| operator'; +comment on function PG_CATALOG.jsonb_extract_path_text_op(jsonb, text[]) is 'implementation of #>> operator'; +comment on function PG_CATALOG.jsonb_ge(jsonb, jsonb) is 'implementation of >= operator'; +comment on function PG_CATALOG.jsonb_gt(jsonb, jsonb) is 'implementation of > operator'; +comment on function PG_CATALOG.jsonb_le(jsonb, jsonb) is 'implementation of <= operator'; +comment on function PG_CATALOG.jsonb_ne(jsonb, jsonb) is 'implementation of <> operator'; +comment on function PG_CATALOG.jsonb_object_field(jsonb, text) is 'implementation of -> operator'; +comment on function PG_CATALOG.jsonb_object_field_text(jsonb, text) is 'implementation of ->> operator'; +comment on function PG_CATALOG.json_object_field_text(json, text) is 'implementation of ->> operator'; +comment on function PG_CATALOG.json_array_element(json, integer) is 'implementation of -> operator'; +comment on function PG_CATALOG.jsonb_lt(jsonb, jsonb) is 'implementation of < operator'; +comment on function PG_CATALOG.jsonb_contained(jsonb, jsonb) is 'implementation of <@ operator'; +comment on function PG_CATALOG.has_any_privilege(name, text) is 'current user privilege on database level'; +comment on function PG_CATALOG.int16eq(int16, int16) is 'implementation of = operator'; +comment on function PG_CATALOG.int16ne(int16, int16) is 'implementation of <> operator'; +comment on function PG_CATALOG.int16lt(int16, int16) is 'implementation of < operator'; +comment on function PG_CATALOG.int16le(int16, int16) is 'implementation of <= operator'; +comment on function PG_CATALOG.int16gt(int16, int16) is 'implementation of > operator'; +comment on function PG_CATALOG.int16ge(int16, int16) is 'implementation of >= operator'; +comment on function PG_CATALOG.int16pl(int16, int16) is 'implementation of + operator'; +comment on function PG_CATALOG.int16eq(int16, int16) is 'implementation of = operator'; +comment on function PG_CATALOG.int16mi(int16, int16) is 'implementation of - operator'; +comment on function PG_CATALOG.int16mul(int16, int16) is 'implementation of * operator'; +comment on function PG_CATALOG.int16div(int16, int16) is 'implementation of / operator'; +comment on function PG_CATALOG.array_varchar_first(anyarray) is 'array_varchar_first'; +comment on function PG_CATALOG.array_varchar_last(anyarray) is 'array_varchar_last'; +comment on function PG_CATALOG.array_integer_first(anyarray) is 'array_integer_first'; +comment on function PG_CATALOG.array_integer_last(anyarray) is 'array_integer_last'; +comment on function PG_CATALOG.array_indexby_length(anyarray, integer) is 'array index by length'; +comment on function PG_CATALOG.gs_index_verify() is 'array index by length'; +comment on function PG_CATALOG.gs_index_recycle_queue() is 'array index by length'; +comment on function PG_CATALOG.int16div(int16, int16) is 'array index by length'; +DROP FUNCTION IF EXISTS pg_catalog.login_audit_messages(boolean); +DROP FUNCTION IF EXISTS pg_catalog.login_audit_messages_pid(boolean); + +CREATE OR REPLACE FUNCTION pg_catalog.login_audit_messages(in flag boolean) returns table (username text, database text, logintime timestamp with time zone, mytype text, result text, client_conninfo text) AUTHID DEFINER +AS $$ +DECLARE +user_id text; +user_name text; +db_name text; +SQL_STMT VARCHAR2(500); +fail_cursor REFCURSOR; +success_cursor REFCURSOR; +BEGIN + SELECT text(oid) FROM pg_catalog.pg_authid WHERE rolname=SESSION_USER INTO user_id; + SELECT SESSION_USER INTO user_name; + SELECT pg_catalog.CURRENT_DATABASE() INTO db_name; + IF flag = true THEN + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo FROM pg_catalog.pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN success_cursor FOR EXECUTE SQL_STMT; + --search bottom up for all the success login info + FETCH LAST FROM success_cursor into username, database, logintime, mytype, result, client_conninfo; + FETCH BACKWARD FROM success_cursor into username, database, logintime, mytype, result, client_conninfo; + IF FOUND THEN + return next; + END IF; + CLOSE success_cursor; + ELSE + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo FROM pg_catalog.pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'', ''login_failed'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN fail_cursor FOR EXECUTE SQL_STMT; + --search bottom up + FETCH LAST FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo; + LOOP + FETCH BACKWARD FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo; + EXIT WHEN NOT FOUND; + IF mytype = 'login_failed' THEN + return next; + ELSE + -- must be login_success + EXIT; + END IF; + END LOOP; + CLOSE fail_cursor; + END IF; +END; $$ +LANGUAGE plpgsql NOT FENCED; + +CREATE OR REPLACE FUNCTION pg_catalog.login_audit_messages_pid(flag boolean) + RETURNS TABLE(username text, database text, logintime timestamp with time zone, mytype text, result text, client_conninfo text, backendid bigint) AUTHID DEFINER +AS $$ +DECLARE +user_id text; +user_name text; +db_name text; +SQL_STMT VARCHAR2(500); +fail_cursor REFCURSOR; +success_cursor REFCURSOR; +mybackendid bigint; +curSessionFound boolean; +BEGIN + SELECT text(oid) FROM pg_catalog.pg_authid WHERE rolname=SESSION_USER INTO user_id; + SELECT SESSION_USER INTO user_name; + SELECT pg_catalog.CURRENT_DATABASE() INTO db_name; + SELECT pg_catalog.pg_backend_pid() INTO mybackendid; + curSessionFound = false; + IF flag = true THEN + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo, split_part(thread_id,''@'',1) backendid FROM pg_catalog.pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN success_cursor FOR EXECUTE SQL_STMT; + --search bottom up for all the success login info + FETCH LAST FROM success_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + LOOP + IF backendid = mybackendid THEN + --found the login info for the current session + curSessionFound = true; + EXIT; + END IF; + FETCH BACKWARD FROM success_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + EXIT WHEN NOT FOUND; + END LOOP; + IF curSessionFound THEN + FETCH BACKWARD FROM success_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + IF FOUND THEN + return next; + END IF; + END IF; + ELSE + SQL_STMT := 'SELECT username,database,time,type,result,client_conninfo, split_part(thread_id,''@'',1) backendid FROM pg_catalog.pg_query_audit(''1970-1-1'',''9999-12-31'') WHERE + type IN (''login_success'', ''login_failed'') AND username =' || quote_literal(user_name) || + ' AND database =' || quote_literal(db_name) || ' AND userid =' || quote_literal(user_id) || ';'; + OPEN fail_cursor FOR EXECUTE SQL_STMT; + --search bottom up + FETCH LAST FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + LOOP + IF backendid = mybackendid AND mytype = 'login_success' THEN + --found the login info for the current session + curSessionFound = true; + EXIT; + END IF; + FETCH BACKWARD FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo, backendid; + EXIT WHEN NOT FOUND; + END LOOP; + IF curSessionFound THEN + LOOP + FETCH BACKWARD FROM fail_cursor into username, database, logintime, mytype, result, client_conninfo, backendid ; + EXIT WHEN NOT FOUND; + IF mytype = 'login_failed' THEN + return next; + ELSE + -- must be login_success + EXIT; + END IF; + END LOOP; + END IF; --curSessionFound + CLOSE fail_cursor; + END IF; +END; $$ +LANGUAGE plpgsql NOT FENCED;DROP FUNCTION IF EXISTS pg_catalog.gs_read_segment_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, integer, integer) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_read_segment_block_from_remote(oid, oid, oid, smallint, integer, xid, integer, xid, oid, oid, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4770; +CREATE OR REPLACE FUNCTION pg_catalog.gs_read_segment_block_from_remote(oid, oid, oid, smallint, integer, xid, integer, xid, oid, oid, integer) + RETURNS bytea + LANGUAGE internal + STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_read_segment_block_from_remote$function$; + +DROP FUNCTION IF EXISTS pg_catalog.gs_read_block_from_remote(integer, integer, integer, smallint, integer, xid, integer, xid, boolean) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.gs_read_block_from_remote(oid, oid, oid, smallint, integer, xid, integer, xid, boolean, integer) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4767; +CREATE OR REPLACE FUNCTION pg_catalog.gs_read_block_from_remote(oid, oid, oid, smallint, integer, xid, integer, xid, boolean, integer) + RETURNS bytea + LANGUAGE internal + STRICT NOT FENCED NOT SHIPPABLE +AS $function$gs_read_block_from_remote$function$; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +DROP FUNCTION IF EXISTS pg_catalog.array_remove(anyarray, anyelement) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6555; +CREATE FUNCTION pg_catalog.array_remove ( + anyarray, anyelement +) RETURNS anyarray LANGUAGE INTERNAL IMMUTABLE as 'array_remove'; + +DROP FUNCTION IF EXISTS pg_catalog.array_replace(anyarray, anyelement, anyelement) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6556; +CREATE FUNCTION pg_catalog.array_replace ( + anyarray, anyelement, anyelement +) RETURNS anyarray LANGUAGE INTERNAL IMMUTABLE as 'array_replace'; + + +DROP FUNCTION IF EXISTS pg_catalog.first_transition(anyelement, anyelement) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6558; +CREATE FUNCTION pg_catalog.first_transition ( +anyelement, anyelement +) RETURNS anyelement LANGUAGE INTERNAL IMMUTABLE STRICT as 'first_transition'; + +DROP FUNCTION IF EXISTS pg_catalog.last_transition(anyelement, anyelement) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6559; +CREATE FUNCTION pg_catalog.last_transition ( +anyelement, anyelement +) RETURNS anyelement LANGUAGE INTERNAL IMMUTABLE STRICT as 'last_transition'; + +DROP aggregate IF EXISTS pg_catalog.first(anyelement) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6560; +create aggregate first(anyelement) ( + sfunc = first_transition, + stype = anyelement +); + +DROP aggregate IF EXISTS pg_catalog.last(anyelement) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6561; +create aggregate last(anyelement) ( + sfunc = last_transition, + stype = anyelement +); + + + +DROP FUNCTION IF EXISTS pg_catalog.network_larger(inet, inet) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6666; +CREATE FUNCTION pg_catalog.network_larger ( +inet, inet +) RETURNS inet LANGUAGE INTERNAL STRICT as 'network_larger'; + +DROP FUNCTION IF EXISTS pg_catalog.network_smaller(inet, inet) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6667; +CREATE FUNCTION pg_catalog.network_smaller ( +inet, inet +) RETURNS inet LANGUAGE INTERNAL STRICT as 'network_smaller'; + + +DROP aggregate IF EXISTS pg_catalog.max(inet) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6668; +create aggregate max(inet) ( + sfunc = network_larger, + stype = inet +); + +DROP aggregate IF EXISTS pg_catalog.min(inet) CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 6669; +create aggregate min(inet) ( + sfunc = network_smaller, + stype = inet +); + +DROP FUNCTION IF EXISTS pg_catalog.pg_buffercache_pages() CASCADE; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4130; +CREATE FUNCTION pg_catalog.pg_buffercache_pages( + OUT bufferid integer, + OUT relfilenode oid, + OUT bucketid integer, + OUT storage_type bigint, + OUT reltablespace oid, + OUT reldatabase oid, + OUT relforknumber integer, + OUT relblocknumber oid, + OUT isdirty boolean, + OUT isvalid boolean, + OUT usage_count smallint, + OUT pinning_backends integer) +RETURNS SETOF record +LANGUAGE internal +STABLE NOT FENCED NOT SHIPPABLE ROWS 100 +AS 'pg_buffercache_pages'; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +DO $DO$ +DECLARE + ans boolean; +BEGIN + select case when count(*)=1 then true else false end as ans from (select tablename from PG_TABLES where tablename='statement_history' and schemaname='pg_catalog' limit 1) into ans; + if ans = true then + TRUNCATE TABLE pg_catalog.statement_history; + DROP INDEX IF EXISTS pg_catalog.statement_history_time_idx; + create index pg_catalog.statement_history_time_idx on pg_catalog.statement_history USING btree (start_time, is_slow_sql); + end if; +END$DO$;DROP FUNCTION IF EXISTS pg_catalog.pg_create_physical_replication_slot_extern(name, boolean, text) cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 3790; +CREATE OR REPLACE FUNCTION pg_catalog.pg_create_physical_replication_slot_extern(slotname name, dummy_standby boolean, extra_content text, need_recycle_xlog boolean, OUT slotname text, OUT xlog_position text) + RETURNS record + LANGUAGE internal + NOT FENCED NOT SHIPPABLE +AS $function$pg_create_physical_replication_slot_extern$function$; +SET search_path TO information_schema; + +-- element_types is generated by data_type_privileges +DROP VIEW IF EXISTS information_schema.element_types CASCADE; + +-- data_type_privileges is generated by columns +DROP VIEW IF EXISTS information_schema.data_type_privileges CASCADE; +-- data_type_privileges is generated by table_privileges +DROP VIEW IF EXISTS information_schema.role_column_grants CASCADE; +-- data_type_privileges is generated by column_privileges +DROP VIEW IF EXISTS information_schema.role_table_grants CASCADE; + +-- other views need upgrade for matview +DROP VIEW IF EXISTS information_schema.column_domain_usage CASCADE; +DROP VIEW IF EXISTS information_schema.column_privileges CASCADE; +DROP VIEW IF EXISTS information_schema.column_udt_usage CASCADE; +DROP VIEW IF EXISTS information_schema.columns CASCADE; +DROP VIEW IF EXISTS information_schema.table_privileges CASCADE; +DROP VIEW IF EXISTS information_schema.tables CASCADE; +DROP VIEW IF EXISTS information_schema.view_column_usage CASCADE; +DROP VIEW IF EXISTS information_schema.view_table_usage CASCADE; + +CREATE VIEW information_schema.column_domain_usage AS + SELECT CAST(current_database() AS sql_identifier) AS domain_catalog, + CAST(nt.nspname AS sql_identifier) AS domain_schema, + CAST(t.typname AS sql_identifier) AS domain_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name + + FROM pg_type t, pg_namespace nt, pg_class c, pg_namespace nc, + pg_attribute a + + WHERE t.typnamespace = nt.oid + AND c.relnamespace = nc.oid + AND a.attrelid = c.oid + AND a.atttypid = t.oid + AND t.typtype = 'd' + AND c.relkind IN ('r', 'm', 'v', 'f') + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') + AND a.attnum > 0 + AND NOT a.attisdropped + AND pg_has_role(t.typowner, 'USAGE'); + +CREATE VIEW information_schema.column_privileges AS + SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, + CAST(grantee.rolname AS sql_identifier) AS grantee, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(x.relname AS sql_identifier) AS table_name, + CAST(x.attname AS sql_identifier) AS column_name, + CAST(x.prtype AS character_data) AS privilege_type, + CAST( + CASE WHEN + -- object owner always has grant options + pg_has_role(x.grantee, x.relowner, 'USAGE') + OR x.grantable + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable + + FROM ( + SELECT pr_c.grantor, + pr_c.grantee, + attname, + relname, + relnamespace, + pr_c.prtype, + pr_c.grantable, + pr_c.relowner + FROM (SELECT oid, relname, relnamespace, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* + FROM pg_class + WHERE relkind IN ('r', 'm', 'v', 'f') + ) pr_c (oid, relname, relnamespace, relowner, grantor, grantee, prtype, grantable), + pg_attribute a + WHERE a.attrelid = pr_c.oid + AND a.attnum > 0 + AND NOT a.attisdropped + UNION + SELECT pr_a.grantor, + pr_a.grantee, + attname, + relname, + relnamespace, + pr_a.prtype, + pr_a.grantable, + c.relowner + FROM (SELECT attrelid, attname, (aclexplode(coalesce(attacl, acldefault('c', relowner)))).* + FROM pg_attribute a JOIN pg_class cc ON (a.attrelid = cc.oid) + WHERE attnum > 0 + AND NOT attisdropped + ) pr_a (attrelid, attname, grantor, grantee, prtype, grantable), + pg_class c + WHERE pr_a.attrelid = c.oid + AND relkind IN ('r', 'm', 'v', 'f') + ) x, + pg_namespace nc, + pg_authid u_grantor, + ( + SELECT oid, rolname FROM pg_authid + UNION ALL + SELECT 0::oid, 'PUBLIC' + ) AS grantee (oid, rolname) + + WHERE x.relnamespace = nc.oid + AND x.grantee = grantee.oid + AND x.grantor = u_grantor.oid + AND x.prtype IN ('INSERT', 'SELECT', 'UPDATE', 'REFERENCES', 'COMMENT') + AND (x.relname not like 'mlog\_%' AND x.relname not like 'matviewmap\_%') + AND (pg_has_role(u_grantor.oid, 'USAGE') + OR pg_has_role(grantee.oid, 'USAGE') + OR grantee.rolname = 'PUBLIC'); + +CREATE VIEW information_schema.column_udt_usage AS + SELECT CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(coalesce(nbt.nspname, nt.nspname) AS sql_identifier) AS udt_schema, + CAST(coalesce(bt.typname, t.typname) AS sql_identifier) AS udt_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name + + FROM pg_attribute a, pg_class c, pg_namespace nc, + (pg_type t JOIN pg_namespace nt ON (t.typnamespace = nt.oid)) + LEFT JOIN (pg_type bt JOIN pg_namespace nbt ON (bt.typnamespace = nbt.oid)) + ON (t.typtype = 'd' AND t.typbasetype = bt.oid) + + WHERE a.attrelid = c.oid + AND a.atttypid = t.oid + AND nc.oid = c.relnamespace + AND a.attnum > 0 AND NOT a.attisdropped AND c.relkind in ('r', 'm', 'v', 'f') + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') + AND pg_has_role(coalesce(bt.typowner, t.typowner), 'USAGE'); + +CREATE VIEW information_schema.columns AS + SELECT CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name, + CAST(a.attnum AS cardinal_number) AS ordinal_position, + CAST(CASE WHEN ad.adgencol <> 's' THEN pg_get_expr(ad.adbin, ad.adrelid) END AS character_data) AS column_default, + CAST(CASE WHEN a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) THEN 'NO' ELSE 'YES' END + AS yes_or_no) + AS is_nullable, + + CAST( + CASE WHEN t.typtype = 'd' THEN + CASE WHEN bt.typelem <> 0 AND bt.typlen = -1 THEN 'ARRAY' + WHEN nbt.nspname = 'pg_catalog' THEN format_type(t.typbasetype, null) + ELSE 'USER-DEFINED' END + ELSE + CASE WHEN t.typelem <> 0 AND t.typlen = -1 THEN 'ARRAY' + WHEN nt.nspname = 'pg_catalog' THEN format_type(a.atttypid, null) + ELSE 'USER-DEFINED' END + END + AS character_data) + AS data_type, + + CAST( + _pg_char_max_length(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS character_maximum_length, + + CAST( + _pg_char_octet_length(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS character_octet_length, + + CAST( + _pg_numeric_precision(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS numeric_precision, + + CAST( + _pg_numeric_precision_radix(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS numeric_precision_radix, + + CAST( + _pg_numeric_scale(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS numeric_scale, + + CAST( + _pg_datetime_precision(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS cardinal_number) + AS datetime_precision, + + CAST( + _pg_interval_type(_pg_truetypid(a, t), _pg_truetypmod(a, t)) + AS character_data) + AS interval_type, + CAST(null AS cardinal_number) AS interval_precision, + + CAST(null AS sql_identifier) AS character_set_catalog, + CAST(null AS sql_identifier) AS character_set_schema, + CAST(null AS sql_identifier) AS character_set_name, + + CAST(CASE WHEN nco.nspname IS NOT NULL THEN current_database() END AS sql_identifier) AS collation_catalog, + CAST(nco.nspname AS sql_identifier) AS collation_schema, + CAST(co.collname AS sql_identifier) AS collation_name, + + CAST(CASE WHEN t.typtype = 'd' THEN current_database() ELSE null END + AS sql_identifier) AS domain_catalog, + CAST(CASE WHEN t.typtype = 'd' THEN nt.nspname ELSE null END + AS sql_identifier) AS domain_schema, + CAST(CASE WHEN t.typtype = 'd' THEN t.typname ELSE null END + AS sql_identifier) AS domain_name, + + CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(coalesce(nbt.nspname, nt.nspname) AS sql_identifier) AS udt_schema, + CAST(coalesce(bt.typname, t.typname) AS sql_identifier) AS udt_name, + + CAST(null AS sql_identifier) AS scope_catalog, + CAST(null AS sql_identifier) AS scope_schema, + CAST(null AS sql_identifier) AS scope_name, + + CAST(null AS cardinal_number) AS maximum_cardinality, + CAST(a.attnum AS sql_identifier) AS dtd_identifier, + CAST('NO' AS yes_or_no) AS is_self_referencing, + + CAST('NO' AS yes_or_no) AS is_identity, + CAST(null AS character_data) AS identity_generation, + CAST(null AS character_data) AS identity_start, + CAST(null AS character_data) AS identity_increment, + CAST(null AS character_data) AS identity_maximum, + CAST(null AS character_data) AS identity_minimum, + CAST(null AS yes_or_no) AS identity_cycle, + + CAST(CASE WHEN ad.adgencol = 's' THEN 'ALWAYS' ELSE 'NEVER' END AS character_data) AS is_generated, + CAST(CASE WHEN ad.adgencol = 's' THEN pg_get_expr(ad.adbin, ad.adrelid) END AS character_data) AS generation_expression, + + CAST(CASE WHEN c.relkind = 'r' + OR (c.relkind = 'v' + AND EXISTS (SELECT 1 FROM pg_rewrite WHERE ev_class = c.oid AND ev_type = '2' AND is_instead) + AND EXISTS (SELECT 1 FROM pg_rewrite WHERE ev_class = c.oid AND ev_type = '4' AND is_instead)) + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_updatable + + FROM (pg_attribute a LEFT JOIN pg_attrdef ad ON attrelid = adrelid AND attnum = adnum) + JOIN (pg_class c JOIN pg_namespace nc ON (c.relnamespace = nc.oid)) ON a.attrelid = c.oid + JOIN (pg_type t JOIN pg_namespace nt ON (t.typnamespace = nt.oid)) ON a.atttypid = t.oid + LEFT JOIN (pg_type bt JOIN pg_namespace nbt ON (bt.typnamespace = nbt.oid)) + ON (t.typtype = 'd' AND t.typbasetype = bt.oid) + LEFT JOIN (pg_collation co JOIN pg_namespace nco ON (co.collnamespace = nco.oid)) + ON a.attcollation = co.oid AND (nco.nspname, co.collname) <> ('pg_catalog', 'default') + + WHERE (NOT pg_is_other_temp_schema(nc.oid)) + + AND a.attnum > 0 AND NOT a.attisdropped AND c.relkind in ('r', 'm', 'v', 'f') + + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') + + AND (pg_has_role(c.relowner, 'USAGE') + OR has_column_privilege(c.oid, a.attnum, + 'SELECT, INSERT, UPDATE, REFERENCES')); + +CREATE VIEW information_schema.table_privileges AS + SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, + CAST(grantee.rolname AS sql_identifier) AS grantee, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(c.prtype AS character_data) AS privilege_type, + CAST( + CASE WHEN + -- object owner always has grant options + pg_has_role(grantee.oid, c.relowner, 'USAGE') + OR c.grantable + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable, + CAST(CASE WHEN c.prtype = 'SELECT' THEN 'YES' ELSE 'NO' END AS yes_or_no) AS with_hierarchy + + FROM ( + SELECT oid, relname, relnamespace, relkind, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* FROM pg_class + ) AS c (oid, relname, relnamespace, relkind, relowner, grantor, grantee, prtype, grantable), + pg_namespace nc, + pg_authid u_grantor, + ( + SELECT oid, rolname FROM pg_authid + UNION ALL + SELECT 0::oid, 'PUBLIC' + ) AS grantee (oid, rolname) + + WHERE c.relnamespace = nc.oid + AND c.relkind IN ('r', 'm', 'v') + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') + AND c.grantee = grantee.oid + AND c.grantor = u_grantor.oid + AND (c.prtype IN ('INSERT', 'SELECT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER') + OR c.prtype IN ('ALTER', 'DROP', 'COMMENT', 'INDEX', 'VACUUM') + ) + AND (pg_has_role(u_grantor.oid, 'USAGE') + OR pg_has_role(grantee.oid, 'USAGE') + OR grantee.rolname = 'PUBLIC'); + +CREATE VIEW information_schema.tables AS + SELECT CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + + CAST( + CASE WHEN nc.oid = pg_my_temp_schema() THEN 'LOCAL TEMPORARY' + WHEN c.relkind = 'r' THEN 'BASE TABLE' + WHEN c.relkind = 'm' THEN 'MATERIALIZED VIEW' + WHEN c.relkind = 'v' THEN 'VIEW' + WHEN c.relkind = 'f' THEN 'FOREIGN TABLE' + ELSE null END + AS character_data) AS table_type, + + CAST(null AS sql_identifier) AS self_referencing_column_name, + CAST(null AS character_data) AS reference_generation, + + CAST(CASE WHEN t.typname IS NOT NULL THEN current_database() ELSE null END AS sql_identifier) AS user_defined_type_catalog, + CAST(nt.nspname AS sql_identifier) AS user_defined_type_schema, + CAST(t.typname AS sql_identifier) AS user_defined_type_name, + + CAST(CASE WHEN c.relkind = 'r' + OR (c.relkind = 'v' + AND EXISTS (SELECT 1 FROM pg_rewrite WHERE ev_class = c.oid AND ev_type = '3' AND is_instead)) + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_insertable_into, + + CAST(CASE WHEN t.typname IS NOT NULL THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_typed, + CAST(null AS character_data) AS commit_action + + FROM pg_namespace nc JOIN pg_class c ON (nc.oid = c.relnamespace) + LEFT JOIN (pg_type t JOIN pg_namespace nt ON (t.typnamespace = nt.oid)) ON (c.reloftype = t.oid) + + WHERE c.relkind IN ('r', 'm', 'v', 'f') + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') + AND (NOT pg_is_other_temp_schema(nc.oid)) + AND (pg_has_role(c.relowner, 'USAGE') + OR has_table_privilege(c.oid, 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER') + OR has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES') ); + +CREATE VIEW information_schema.view_column_usage AS + SELECT DISTINCT + CAST(current_database() AS sql_identifier) AS view_catalog, + CAST(nv.nspname AS sql_identifier) AS view_schema, + CAST(v.relname AS sql_identifier) AS view_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nt.nspname AS sql_identifier) AS table_schema, + CAST(t.relname AS sql_identifier) AS table_name, + CAST(a.attname AS sql_identifier) AS column_name + + FROM pg_namespace nv, pg_class v, pg_depend dv, + pg_depend dt, pg_class t, pg_namespace nt, + pg_attribute a + + WHERE nv.oid = v.relnamespace + AND v.relkind = 'v' + AND v.oid = dv.refobjid + AND dv.refclassid = 'pg_catalog.pg_class'::regclass + AND dv.classid = 'pg_catalog.pg_rewrite'::regclass + AND dv.deptype = 'i' + AND dv.objid = dt.objid + AND dv.refobjid <> dt.refobjid + AND dt.classid = 'pg_catalog.pg_rewrite'::regclass + AND dt.refclassid = 'pg_catalog.pg_class'::regclass + AND dt.refobjid = t.oid + AND t.relnamespace = nt.oid + AND t.relkind IN ('r', 'm', 'v', 'f') + AND (t.relname not like 'mlog\_%' AND t.relname not like 'matviewmap\_%') + AND t.oid = a.attrelid + AND dt.refobjsubid = a.attnum + AND pg_has_role(t.relowner, 'USAGE'); + +CREATE VIEW information_schema.view_table_usage AS + SELECT DISTINCT + CAST(current_database() AS sql_identifier) AS view_catalog, + CAST(nv.nspname AS sql_identifier) AS view_schema, + CAST(v.relname AS sql_identifier) AS view_name, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nt.nspname AS sql_identifier) AS table_schema, + CAST(t.relname AS sql_identifier) AS table_name + + FROM pg_namespace nv, pg_class v, pg_depend dv, + pg_depend dt, pg_class t, pg_namespace nt + + WHERE nv.oid = v.relnamespace + AND v.relkind = 'v' + AND v.oid = dv.refobjid + AND dv.refclassid = 'pg_catalog.pg_class'::regclass + AND dv.classid = 'pg_catalog.pg_rewrite'::regclass + AND dv.deptype = 'i' + AND dv.objid = dt.objid + AND dv.refobjid <> dt.refobjid + AND dt.classid = 'pg_catalog.pg_rewrite'::regclass + AND dt.refclassid = 'pg_catalog.pg_class'::regclass + AND dt.refobjid = t.oid + AND t.relnamespace = nt.oid + AND t.relkind IN ('r', 'm', 'v', 'f') + AND (t.relname not like 'mlog\_%' AND t.relname not like 'matviewmap\_%') + AND pg_has_role(t.relowner, 'USAGE'); + +CREATE VIEW information_schema.data_type_privileges AS + SELECT CAST(current_database() AS sql_identifier) AS object_catalog, + CAST(x.objschema AS sql_identifier) AS object_schema, + CAST(x.objname AS sql_identifier) AS object_name, + CAST(x.objtype AS character_data) AS object_type, + CAST(x.objdtdid AS sql_identifier) AS dtd_identifier + + FROM + ( + SELECT udt_schema, udt_name, 'USER-DEFINED TYPE'::text, dtd_identifier FROM attributes + UNION ALL + SELECT table_schema, table_name, 'TABLE'::text, dtd_identifier FROM columns + UNION ALL + SELECT domain_schema, domain_name, 'DOMAIN'::text, dtd_identifier FROM domains + UNION ALL + SELECT specific_schema, specific_name, 'ROUTINE'::text, dtd_identifier FROM parameters + UNION ALL + SELECT specific_schema, specific_name, 'ROUTINE'::text, dtd_identifier FROM routines + ) AS x (objschema, objname, objtype, objdtdid); + +CREATE VIEW information_schema.role_column_grants AS + SELECT grantor, + grantee, + table_catalog, + table_schema, + table_name, + column_name, + privilege_type, + is_grantable + FROM column_privileges + WHERE grantor IN (SELECT role_name FROM enabled_roles) + OR grantee IN (SELECT role_name FROM enabled_roles); + +CREATE VIEW information_schema.role_table_grants AS + SELECT grantor, + grantee, + table_catalog, + table_schema, + table_name, + privilege_type, + is_grantable, + with_hierarchy + FROM table_privileges + WHERE grantor IN (SELECT role_name FROM enabled_roles) + OR grantee IN (SELECT role_name FROM enabled_roles); + +CREATE VIEW information_schema.element_types AS + SELECT CAST(current_database() AS sql_identifier) AS object_catalog, + CAST(n.nspname AS sql_identifier) AS object_schema, + CAST(x.objname AS sql_identifier) AS object_name, + CAST(x.objtype AS character_data) AS object_type, + CAST(x.objdtdid AS sql_identifier) AS collection_type_identifier, + CAST( + CASE WHEN nbt.nspname = 'pg_catalog' THEN format_type(bt.oid, null) + ELSE 'USER-DEFINED' END AS character_data) AS data_type, + + CAST(null AS cardinal_number) AS character_maximum_length, + CAST(null AS cardinal_number) AS character_octet_length, + CAST(null AS sql_identifier) AS character_set_catalog, + CAST(null AS sql_identifier) AS character_set_schema, + CAST(null AS sql_identifier) AS character_set_name, + CAST(CASE WHEN nco.nspname IS NOT NULL THEN current_database() END AS sql_identifier) AS collation_catalog, + CAST(nco.nspname AS sql_identifier) AS collation_schema, + CAST(co.collname AS sql_identifier) AS collation_name, + CAST(null AS cardinal_number) AS numeric_precision, + CAST(null AS cardinal_number) AS numeric_precision_radix, + CAST(null AS cardinal_number) AS numeric_scale, + CAST(null AS cardinal_number) AS datetime_precision, + CAST(null AS character_data) AS interval_type, + CAST(null AS cardinal_number) AS interval_precision, + + CAST(null AS character_data) AS domain_default, -- XXX maybe a bug in the standard + + CAST(current_database() AS sql_identifier) AS udt_catalog, + CAST(nbt.nspname AS sql_identifier) AS udt_schema, + CAST(bt.typname AS sql_identifier) AS udt_name, + + CAST(null AS sql_identifier) AS scope_catalog, + CAST(null AS sql_identifier) AS scope_schema, + CAST(null AS sql_identifier) AS scope_name, + + CAST(null AS cardinal_number) AS maximum_cardinality, + CAST('a' || CAST(x.objdtdid AS text) AS sql_identifier) AS dtd_identifier + + FROM pg_namespace n, pg_type at, pg_namespace nbt, pg_type bt, + ( + /* columns, attributes */ + SELECT c.relnamespace, CAST(c.relname AS sql_identifier), + CASE WHEN c.relkind = 'c' THEN 'USER-DEFINED TYPE'::text ELSE 'TABLE'::text END, + a.attnum, a.atttypid, a.attcollation + FROM pg_class c, pg_attribute a + WHERE c.oid = a.attrelid + AND c.relkind IN ('r', 'm', 'v', 'f', 'c') + AND (c.relname not like 'mlog\_%' AND c.relname not like 'matviewmap\_%') + AND attnum > 0 AND NOT attisdropped + + UNION ALL + + /* domains */ + SELECT t.typnamespace, CAST(t.typname AS sql_identifier), + 'DOMAIN'::text, 1, t.typbasetype, t.typcollation + FROM pg_type t + WHERE t.typtype = 'd' + + UNION ALL + + /* parameters */ + SELECT pronamespace, CAST(proname || '_' || CAST(oid AS text) AS sql_identifier), + 'ROUTINE'::text, (ss.x).n, (ss.x).x, 0 + FROM (SELECT p.pronamespace, p.proname, p.oid, + _pg_expandarray(coalesce(p.proallargtypes, p.proargtypes::oid[])) AS x + FROM pg_proc p) AS ss + + UNION ALL + + /* result types */ + SELECT p.pronamespace, CAST(p.proname || '_' || CAST(p.oid AS text) AS sql_identifier), + 'ROUTINE'::text, 0, p.prorettype, 0 + FROM pg_proc p + + ) AS x (objschema, objname, objtype, objdtdid, objtypeid, objcollation) + LEFT JOIN (pg_collation co JOIN pg_namespace nco ON (co.collnamespace = nco.oid)) + ON x.objcollation = co.oid AND (nco.nspname, co.collname) <> ('pg_catalog', 'default') + + WHERE n.oid = x.objschema + AND at.oid = x.objtypeid + AND (at.typelem <> 0 AND at.typlen = -1) + AND at.typelem = bt.oid + AND nbt.oid = bt.typnamespace + + AND (n.nspname, x.objname, x.objtype, CAST(x.objdtdid AS sql_identifier)) IN + ( SELECT object_schema, object_name, object_type, dtd_identifier + FROM data_type_privileges ); + +do $$DECLARE + user_name text; + query_str text; +BEGIN + SELECT SESSION_USER INTO user_name; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.element_types TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.data_type_privileges TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.role_column_grants TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.role_table_grants TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.column_domain_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.column_privileges TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.column_udt_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.columns TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.table_privileges TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.tables TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.view_column_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; + query_str := 'GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON information_schema.view_table_usage TO ' || quote_ident(user_name) || ';'; + EXECUTE IMMEDIATE query_str; +END$$; + +GRANT SELECT ON information_schema.element_types TO PUBLIC; +GRANT SELECT ON information_schema.data_type_privileges TO PUBLIC; +GRANT SELECT ON information_schema.role_column_grants TO PUBLIC; +GRANT SELECT ON information_schema.role_table_grants TO PUBLIC; +GRANT SELECT ON information_schema.column_domain_usage TO PUBLIC; +GRANT SELECT ON information_schema.column_privileges TO PUBLIC; +GRANT SELECT ON information_schema.column_udt_usage TO PUBLIC; +GRANT SELECT ON information_schema.columns TO PUBLIC; +GRANT SELECT ON information_schema.table_privileges TO PUBLIC; +GRANT SELECT ON information_schema.tables TO PUBLIC; +GRANT SELECT ON information_schema.view_column_usage TO PUBLIC; +GRANT SELECT ON information_schema.view_table_usage TO PUBLIC; + +RESET search_path; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_count(text, text, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_instr(text, text, int, int, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_replace(text, text, text, int, int, text) CASCADE; + +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int, int) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.regexp_substr(text, text, int, int, text) CASCADE; diff --git a/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_602.sql b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_602.sql new file mode 100644 index 000000000..420555f6b --- /dev/null +++ b/src/include/catalog/upgrade_sql/upgrade_catalog_otherdb/upgrade_catalog_otherdb_92_602.sql @@ -0,0 +1,158 @@ +--create gs_uid +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 8666, 8667, 0, 0; +CREATE TABLE IF NOT EXISTS pg_catalog.gs_uid +( + relid OID NOCOMPRESS NOT NULL, + uid_backup bigint NOCOMPRESS NOT NULL +); +GRANT SELECT ON pg_catalog.gs_uid TO public; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 3499; +CREATE UNIQUE INDEX gs_uid_relid_index ON pg_catalog.gs_uid USING BTREE(relid OID_OPS); + + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +/*------ add sys fuction gs_stat_wal_entrytable ------*/ +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2861; +CREATE FUNCTION pg_catalog.gs_stat_wal_entrytable(int8, OUT idx xid, OUT endlsn xid, OUT lrc int, OUT status xid32) +RETURNS SETOF record LANGUAGE INTERNAL as 'gs_stat_wal_entrytable'; + +/*------ add sys fuction gs_walwriter_flush_position ------*/ +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2862; +CREATE FUNCTION pg_catalog.gs_walwriter_flush_position(out last_flush_status_entry int, out last_scanned_lrc int, out curr_lrc int, out curr_byte_pos xid, out prev_byte_size xid32, out flush_result xid, out send_result xid, out shm_rqst_write_pos xid, out shm_rqst_flush_pos xid, out shm_result_write_pos xid, out shm_result_flush_pos xid, out curr_time timestamptz) +RETURNS SETOF record LANGUAGE INTERNAL as 'gs_walwriter_flush_position'; + +/*------ add sys fuction gs_walwriter_flush_stat ------*/ +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 2863; +CREATE FUNCTION pg_catalog.gs_walwriter_flush_stat(int4, out write_times xid, out sync_times xid, out total_xlog_sync_bytes xid, out total_actual_xlog_sync_bytes xid, out avg_write_bytes xid32, out avg_actual_write_bytes xid32, out avg_sync_bytes xid32, out avg_actual_sync_bytes xid32, out total_write_time xid, out total_sync_time xid, out avg_write_time xid32, out avg_sync_time xid32, out curr_init_xlog_segno xid, out curr_open_xlog_segno xid, out last_reset_time timestamptz, out curr_time timestamptz) +RETURNS SETOF record LANGUAGE INTERNAL as 'gs_walwriter_flush_stat'; + +/*------ add sys fuction gs_stat_undo ------*/ +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4434; +CREATE FUNCTION pg_catalog.gs_stat_undo(OUT curr_used_zone_count int4, OUT top_used_zones text, OUT curr_used_undo_size int4, +OUT undo_threshold int4, OUT oldest_xid_in_undo oid, OUT oldest_xmin oid, OUT total_undo_chain_len oid, OUT max_undo_chain_len oid, +OUT create_undo_file_count int4, OUT discard_undo_file_count int4) +RETURNS record LANGUAGE INTERNAL as 'gs_stat_undo';--create system relation gs_db_privilege and its indexes +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 5566, 5567, 0, 0; +CREATE TABLE pg_catalog.gs_db_privilege +( + roleid Oid NOCOMPRESS not null, + privilege_type text NOCOMPRESS, + admin_option boolean NOCOMPRESS not null +) WITH OIDS TABLESPACE pg_default; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 5568; +CREATE UNIQUE INDEX gs_db_privilege_oid_index ON pg_catalog.gs_db_privilege USING BTREE(oid oid_ops); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 5569; +CREATE INDEX gs_db_privilege_roleid_index ON pg_catalog.gs_db_privilege USING BTREE(roleid oid_ops); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 5570; +CREATE UNIQUE INDEX gs_db_privilege_roleid_privilege_type_index ON pg_catalog.gs_db_privilege + USING BTREE(roleid oid_ops, privilege_type text_ops); + +GRANT SELECT ON pg_catalog.gs_db_privilege TO PUBLIC; + +--create system function has_any_privilege(user, privilege) +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 5571; +CREATE OR REPLACE FUNCTION pg_catalog.has_any_privilege(name, text) RETURNS boolean + LANGUAGE INTERNAL STRICT STABLE + AS 'has_any_privilege'; + +--create system view gs_db_privileges +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; +CREATE VIEW pg_catalog.gs_db_privileges AS + SELECT + pg_catalog.pg_get_userbyid(roleid) AS rolename, + privilege_type AS privilege_type, + CASE + WHEN admin_option THEN + 'yes' + ELSE + 'no' + END AS admin_option + FROM pg_catalog.gs_db_privilege; + +GRANT SELECT ON pg_catalog.gs_db_privileges TO PUBLIC; +/*------ add sys fuction gs_undo_record ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_record(int8, OUT undoptr oid, OUT xid oid, OUT cid text, +OUT reloid text, OUT relfilenode text, OUT utype text, OUT blkprev text, OUT blockno text, OUT uoffset text, +OUT prevurp text, OUT payloadlen text); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4439; +CREATE FUNCTION pg_catalog.gs_undo_record(int8, OUT undoptr oid, OUT xid oid, OUT cid text, +OUT reloid text, OUT relfilenode text, OUT utype text, OUT blkprev text, OUT blockno text, OUT uoffset text, +OUT prevurp text, OUT payloadlen text) +RETURNS record LANGUAGE INTERNAL as 'gs_undo_record'; + +/*------ add sys fuction gs_undo_meta ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_meta(int4, int4, int4, OUT zoneId oid, OUT persistType oid, OUT insertptr text, OUT discard text, OUT endptr text, OUT used text, OUT lsn text); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4430; +CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_meta(int4, int4, int4, OUT zoneId oid, OUT persistType oid, OUT insertptr text, OUT discard text, OUT endptr text, OUT used text, OUT lsn text, OUT pid oid) +RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_meta'; + +/* add sys fuction gs_undo_translot */ +DROP FUNCTION IF EXISTS pg_catalog.gs_undo_translot(int4, int4, OUT grpId oid, OUT xactId text, OUT startUndoPtr text, OUT endUndoPtr text, OUT lsn text); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4431; +CREATE OR REPLACE FUNCTION pg_catalog.gs_undo_translot(int4, int4, OUT grpId oid, OUT xactId text, OUT startUndoPtr text, OUT endUndoPtr text, OUT lsn text, OUT slot_states oid) +RETURNS SETOF record LANGUAGE INTERNAL as 'gs_undo_translot'; + +/*------ add sys fuction gs_index_verify ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_index_verify(oid, oid, OUT ptype text, OUT blkno oid, OUT status text); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9150; +CREATE FUNCTION pg_catalog.gs_index_verify(oid, oid, OUT ptype text, OUT blkno oid, OUT status text) +RETURNS SETOF record LANGUAGE INTERNAL as 'gs_index_verify'; + +/*------ add sys fuction gs_index_recycle_queue ------*/ +DROP FUNCTION IF EXISTS pg_catalog.gs_index_recycle_queue(oid, oid, oid, OUT rblkno oid, OUT item_offset oid, OUT xid text, OUT dblkno oid, OUT prev oid, OUT next oid); +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 9151; +CREATE FUNCTION pg_catalog.gs_index_recycle_queue(oid, oid, oid, OUT rblkno oid, OUT item_offset oid, OUT xid text, OUT dblkno oid, OUT prev oid, OUT next oid) +RETURNS SETOF record LANGUAGE INTERNAL as 'gs_index_recycle_queue';DROP FUNCTION IF EXISTS pg_catalog.pg_logical_get_area_changes() cascade; +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_PROC, 4978; +CREATE OR REPLACE FUNCTION pg_catalog.pg_logical_get_area_changes(start_lsn text, upto_lsn text, upto_nchanges integer, plugin name DEFAULT '{}'::text[], xlog_path text, VARIADIC options text[], OUT location text, OUT xid xid, OUT data text) + RETURNS SETOF record + LANGUAGE internal + NOT FENCED NOT SHIPPABLE COST 1000 +AS $function$pg_logical_get_area_changes$function$; +-- adding system table pg_publication + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 6130, 6141, 0, 0; + +CREATE TABLE IF NOT EXISTS pg_catalog.pg_publication +( + pubname name NOCOMPRESS, + pubowner oid NOCOMPRESS, + puballtables bool NOCOMPRESS, + pubinsert bool NOCOMPRESS, + pubupdate bool NOCOMPRESS, + pubdelete bool NOCOMPRESS +) WITH OIDS; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 6120; +CREATE UNIQUE INDEX pg_publication_oid_index ON pg_catalog.pg_publication USING BTREE(oid OID_OPS); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 6121; +CREATE UNIQUE INDEX pg_publication_pubname_index ON pg_catalog.pg_publication USING BTREE(pubname); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +GRANT SELECT ON TABLE pg_catalog.pg_publication TO PUBLIC; + +-- adding system table pg_publication_rel + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 6132, 6142, 0, 0; + +CREATE TABLE IF NOT EXISTS pg_catalog.pg_publication_rel +( + prpubid oid NOCOMPRESS NOT NULL, + prrelid oid NOCOMPRESS NOT NULL +) WITH OIDS; + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 6122; +CREATE UNIQUE INDEX pg_publication_rel_oid_index ON pg_catalog.pg_publication_rel USING BTREE(oid OID_OPS); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 6123; +CREATE UNIQUE INDEX pg_publication_rel_map_index ON pg_catalog.pg_publication_rel USING BTREE(prrelid, prpubid); + +SET LOCAL inplace_upgrade_next_system_object_oids = IUO_CATALOG, false, true, 0, 0, 0, 0; + +GRANT SELECT ON TABLE pg_catalog.pg_publication_rel TO PUBLIC; \ No newline at end of file diff --git a/src/include/client_logic/client_logic_enums.h b/src/include/client_logic/client_logic_enums.h index 2b00138d6..03b124c02 100644 --- a/src/include/client_logic/client_logic_enums.h +++ b/src/include/client_logic/client_logic_enums.h @@ -76,7 +76,7 @@ inline CmkKeyStore get_key_store_from_string(const char *key_store) if (key_store == NULL || strlen(key_store) == 0) { return CmkKeyStore::INVALID_KEYSTORE; } -#if ((defined(ENABLE_MULTIPLE_NODES)) || (defined(ENABLE_PRIVATEGAUSS))) +#if ((defined(ENABLE_MULTIPLE_NODES)) || (defined(ENABLE_PRIVATEGAUSS) && (!defined(ENABLE_LITE_MODE)))) if (strcasecmp(key_store, "gs_ktool") == 0) { return CmkKeyStore::GS_KTOOL; } diff --git a/src/include/client_logic/client_logic_proc.h b/src/include/client_logic/client_logic_proc.h index ebca803f6..0a9db2567 100644 --- a/src/include/client_logic/client_logic_proc.h +++ b/src/include/client_logic/client_logic_proc.h @@ -26,10 +26,14 @@ #include "stdint.h" #include "datatypes.h" -extern void add_rettype_orig(const Oid func_id, const Oid ret_type, const Oid res_type); -extern void add_allargtypes_orig(const Oid func_id, Datum* all_types_orig, Datum* all_types, const int tup_natts); -extern void record_proc_depend(const Oid func_id, const Oid gs_encrypted_proc_id); -extern void verify_rettype_for_out_param(const Oid func_id); -extern void delete_proc_client_info(Oid func_id); +#include "postgres_ext.h" +#include "access/htup.h" -#endif \ No newline at end of file +void add_rettype_orig(const Oid func_id, const Oid ret_type, const Oid res_type); +void add_allargtypes_orig(const Oid func_id, Datum* all_types_orig, Datum* all_types, const int tup_natts, const Oid relid = InvalidOid); +void record_proc_depend(const Oid func_id, const Oid gs_encrypted_proc_id); +void verify_rettype_for_out_param(const Oid func_id); +void delete_proc_client_info(HeapTuple); +void delete_proc_client_info(Oid func_id); + +#endif diff --git a/src/include/client_logic/cstrings_map.h b/src/include/client_logic/cstrings_map.h index abf70879b..b5f487579 100644 --- a/src/include/client_logic/cstrings_map.h +++ b/src/include/client_logic/cstrings_map.h @@ -43,7 +43,7 @@ public: CStringsMap &operator = (const CStringsMap &other); void init(const CStringsMap &other); ~CStringsMap(); - void set(const char *key, const char *value, size_t valsize = 0); + void set(const char *key, const char *value, size_t valsize = SIZE_MAX); void clear(); const char *find(const char *key) const; const char *find(const char *key, size_t *size) const; diff --git a/src/include/cm/cm_cgroup.h b/src/include/cm/cm_cgroup.h new file mode 100644 index 000000000..690ab80b9 --- /dev/null +++ b/src/include/cm/cm_cgroup.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * cm_cgroup.h + * + * + * + * IDENTIFICATION + * src/include/cm/cm_cgroup.h + * + * --------------------------------------------------------------------------------------- + */ +#ifndef CM_CGROUP_H +#define CM_CGROUP_H +#ifdef ENABLE_MULTIPLE_NODES +/* get the cm cgroup relpath and initialize cgroup. + * Please note,caller should free the return value. + */ +extern char* gscgroup_cm_init(); + +/* make the current thread attach to cm cgroup */ +extern void gscgroup_cm_attach_task(const char* relpath); +extern void gscgroup_cm_attach_task_pid(const char* relpath, pid_t tid); +#endif +#endif \ No newline at end of file diff --git a/src/include/cm/cm_misc.h b/src/include/cm/cm_misc.h new file mode 100644 index 000000000..a3fca3e7f --- /dev/null +++ b/src/include/cm/cm_misc.h @@ -0,0 +1,209 @@ +/* --------------------------------------------------------------------------------------- + * + * cm_misc.h + * Declarations/definitions for "StringInfo" functions. + * + * StringInfo provides an indefinitely-extensible string data type. + * It can be used to buffer either ordinary C strings (null-terminated text) + * or arbitrary binary data. All storage is allocated with palloc(). + * + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group + * + * + * IDENTIFICATION + * src/include/cm/cm_misc.h + * + * --------------------------------------------------------------------------------------- + */ +#ifndef CM_MISC_H +#define CM_MISC_H + +#include "utils/syscall_lock.h" +#include "cm/etcdapi.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct instance_not_exist_reason_string { + const char* level_string; + int level_val; +} instance_not_exist_reason_string; + +typedef struct log_level_string { + const char* level_string; + int level_val; +} log_level_string; + +typedef struct instance_datanode_build_reason_string { + const char* reason_string; + int reason_val; +} instance_datanode_build_reason_string; + +typedef struct instacne_type_string { + const char* type_string; + int type_val; +} instacne_type_string; + +typedef struct gtm_con_string { + const char* con_string; + int con_val; +} gtm_con_string; + +typedef struct instance_coordinator_active_status_string { + const char* active_status_string; + int active_status_val; +} instance_coordinator_active_status_string; + +typedef struct instance_datanode_lockmode_string { + const char* lockmode_string; + uint32 lockmode_val; +} instance_datanode_lockmode_string; + +typedef struct instacne_datanode_role_string { + const char* role_string; + uint32 role_val; +} instacne_datanode_role_string; + +typedef struct instacne_datanode_dbstate_string { + const char* dbstate_string; + int dbstate_val; +} instacne_datanode_dbstate_string; + +typedef struct instacne_datanode_wal_send_state_string { + const char* wal_send_state_string; + int wal_send_state_val; +} instacne_datanode_wal_send_state_string; + +typedef struct instacne_datanode_sync_state_string { + const char* wal_sync_state_string; + int wal_sync_state_val; +} instacne_datanode_sync_state_string; + +typedef struct cluster_state_string { + const char* cluster_state_string; + int cluster_state_val; +} cluster_state_string; + +typedef struct cluster_msg_string { + const char* cluster_msg_str; + int cluster_msg_val; +} cluster_msg_string; + +typedef struct ObsBackupStatusMapString_t { + const char *obsStatusStr; + int backupStatus; +} ObsBackupStatusMapString; + +typedef struct server_role_string { + int role_val; + const char* role_string; +} server_role_string; + +#define LOCK_FILE_LINE_PID 1 +#define LOCK_FILE_LINE_DATA_DIR 2 +#define LOCK_FILE_LINE_START_TIME 3 +#define LOCK_FILE_LINE_PORT 4 +#define LOCK_FILE_LINE_SOCKET_DIR 5 +#define LOCK_FILE_LINE_LISTEN_ADDR 6 +#define LOCK_FILE_LINE_SHMEM_KEY 7 + +#ifndef ERROR_LIMIT_LEN +#define ERROR_LIMIT_LEN 256 +#endif + +/** + * @def SHELL_RETURN_CODE + * @brief Get the shell command return code. + * @return Return the shell command return code. + */ +#define SHELL_RETURN_CODE(systemReturn) \ + (systemReturn > 0 ? static_cast(static_cast(systemReturn) >> 8) : systemReturn) + +extern char** readfile(const char* path); +extern void freefile(char** lines); +extern int log_level_string_to_int(const char* log_level); +extern int datanode_rebuild_reason_string_to_int(const char* reason); +extern const char* DcfRoleToString(int role); +extern const char* instance_not_exist_reason_to_string(int reason); +extern int datanode_lockmode_string_to_int(const char* lockmode); +extern int datanode_role_string_to_int(const char* role); +extern int datanode_dbstate_string_to_int(const char* dbstate); +extern int datanode_wal_send_state_string_to_int(const char* dbstate); +extern int datanode_wal_sync_state_string_to_int(const char* dbstate); +extern const char* log_level_int_to_string(int log_level); +extern const char* cluster_state_int_to_string(int cluster_state); +extern const char* cluster_msg_int_to_string(int cluster_msg); +extern int32 ObsStatusStr2Int(const char *statusStr); +extern const char* datanode_wal_sync_state_int_to_string(int dbstate); +extern const char* datanode_wal_send_state_int_to_string(int dbstate); + +extern const char* datanode_dbstate_int_to_string(int dbstate); +extern const char* type_int_to_string(int type); +const char* gtm_con_int_to_string(int con); +extern const char* datanode_role_int_to_string(int role); +extern const char* datanode_static_role_int_to_string(uint32 role); +extern const char* datanode_rebuild_reason_int_to_string(int reason); +extern const char* server_role_to_string(int role, bool is_pending); +extern const char* etcd_role_to_string(int role); +extern const char* kerberos_status_to_string(int role); + +extern void cm_sleep(unsigned int sec); +extern void cm_usleep(unsigned int usec); + +extern uint32 get_healthy_etcd_node_count(EtcdTlsAuthPath* tlsPath, int programType); + +extern void check_input_for_security(const char* input); +extern void check_env_value(const char* input_env_value); + +extern void print_environ(void); + +/** + * @brief + * Creates a lock file for a process with a specified PID. + * + * @note + * When pid is set to -1, the specified process is the current process. + * + * @param [in] filename + * The name of the lockfile to create. + * @param [in] data_path + * The data path of the instance. + * @param [in] pid + * The pid of the process. + * + * @return + * - 0 Create successfully. + * - -1 Create failure. + */ +extern int create_lock_file(const char* filename, const char* data_path, const pid_t pid = -1); + + +/** + * @brief + * Delete pid file. + * + * @param [in] filename + * The pid file to be deleted. + * + * @return + * void. + */ +extern void delete_lock_file(const char *filename); + +extern void cm_pthread_rw_lock(pthread_rwlock_t* rwlock); +extern void cm_pthread_rw_unlock(pthread_rwlock_t* rwlock); + +extern int cm_getenv( + const char* env_var, char* output_env_value, uint32 env_value_len, syscalllock cmLock, int elevel = -1); + +extern int CmExecuteCmd(const char* command, struct timeval timeout); +extern int CmInitMasks(const int* ListenSocket, fd_set* rmask); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/include/cm/cm_msg.h b/src/include/cm/cm_msg.h new file mode 100644 index 000000000..c746a6dee --- /dev/null +++ b/src/include/cm/cm_msg.h @@ -0,0 +1,1652 @@ +/* --------------------------------------------------------------------------------------- + * + * cm_msg.h + * + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group + * + * IDENTIFICATION + * src/include/cm/cm_msg.h + * + * --------------------------------------------------------------------------------------- + */ +#ifndef CM_MSG_H +#define CM_MSG_H + +#include "replication/replicainternal.h" +#include "access/xlogdefs.h" +#include "access/redo_statistic_msg.h" +#include "common/config/cm_config.h" +#include +#include + +#define CM_MAX_SENDER_NUM 2 +#define LOGIC_CLUSTER_NUMBER (32 + 1) // max 32 logic + 1 elastic group +#define CM_LOGIC_CLUSTER_NAME_LEN 64 +#define CM_MSG_ERR_INFORMATION_LENGTH 1024 +#ifndef MAX_INT32 +#define MAX_INT32 (2147483600) +#endif +#define CN_INFO_NUM 8 +#define RESERVE_NUM 160 +#define RESERVE_NUM_USED 4 +#define MAX_SYNC_STANDBY_LIST 1024 +#define REMAIN_LEN 20 + +using std::string; +using std::vector; + +const uint32 g_barrierSlotVersion = 92380; +const uint32 g_hadrKeyCn = 92381; + +const int32 FAILED_SYNC_DATA = 0; +const int32 SUCCESS_SYNC_DATA = 1; + +/* + * Symbols in the following enum are usd in cluster_msg_map_string defined in cm_misc.cpp. + * Modifictaion to the following enum should be reflected to cluster_msg_map_string as well. + */ +typedef enum CM_MessageType { + MSG_CTL_CM_SWITCHOVER = 0, + MSG_CTL_CM_BUILD = 1, + MSG_CTL_CM_SYNC = 2, + MSG_CTL_CM_QUERY = 3, + MSG_CTL_CM_NOTIFY = 4, + MSG_CTL_CM_BUTT = 5, + MSG_CM_CTL_DATA_BEGIN = 6, + MSG_CM_CTL_DATA = 7, + MSG_CM_CTL_NODE_END = 8, + MSG_CM_CTL_DATA_END = 9, + MSG_CM_CTL_COMMAND_ACK = 10, + + MSG_CM_AGENT_SWITCHOVER = 11, + MSG_CM_AGENT_FAILOVER = 12, + MSG_CM_AGENT_BUILD = 13, + MSG_CM_AGENT_SYNC = 14, + MSG_CM_AGENT_NOTIFY = 15, + MSG_CM_AGENT_NOTIFY_CN = 16, + MSG_AGENT_CM_NOTIFY_CN_FEEDBACK = 17, + MSG_CM_AGENT_CANCEL_SESSION = 18, + MSG_CM_AGENT_RESTART = 19, + MSG_CM_AGENT_RESTART_BY_MODE = 20, + MSG_CM_AGENT_REP_SYNC = 21, + MSG_CM_AGENT_REP_ASYNC = 22, + MSG_CM_AGENT_REP_MOST_AVAILABLE = 23, + MSG_CM_AGENT_BUTT = 24, + + MSG_AGENT_CM_DATA_INSTANCE_REPORT_STATUS = 25, + MSG_AGENT_CM_COORDINATE_INSTANCE_STATUS = 26, + MSG_AGENT_CM_GTM_INSTANCE_STATUS = 27, + MSG_AGENT_CM_BUTT = 28, + + /**************** =====CAUTION===== ****************: + If you want to add a new MessageType, you should add at the end , + It's forbidden to insert new MessageType at middle, it will change the other MessageType value. + The MessageType is transfered between cm_agent and cm_server on different host, + You should ensure the type value be identical and compatible between old and new versions */ + + MSG_CM_CM_VOTE = 29, + MSG_CM_CM_BROADCAST = 30, + MSG_CM_CM_NOTIFY = 31, + MSG_CM_CM_SWITCHOVER = 32, + MSG_CM_CM_FAILOVER = 33, + MSG_CM_CM_SYNC = 34, + MSG_CM_CM_SWITCHOVER_ACK = 35, + MSG_CM_CM_FAILOVER_ACK = 36, + MSG_CM_CM_ROLE_CHANGE_NOTIFY = 37, + MSG_CM_CM_REPORT_SYNC = 38, + + MSG_AGENT_CM_HEARTBEAT = 39, + MSG_CM_AGENT_HEARTBEAT = 40, + MSG_CTL_CM_SET = 41, + MSG_CTL_CM_SWITCHOVER_ALL = 42, + MSG_CM_CTL_SWITCHOVER_ALL_ACK = 43, + MSG_CTL_CM_BALANCE_CHECK = 44, + MSG_CM_CTL_BALANCE_CHECK_ACK = 45, + MSG_CTL_CM_BALANCE_RESULT = 46, + MSG_CM_CTL_BALANCE_RESULT_ACK = 47, + MSG_CTL_CM_QUERY_CMSERVER = 48, + MSG_CM_CTL_CMSERVER = 49, + + MSG_TYPE_BUTT = 50, + MSG_CM_AGENT_NOTIFY_CN_CENTRAL_NODE = 51, + MSG_CM_AGENT_DROP_CN = 52, + MSG_CM_AGENT_DROPPED_CN = 53, + MSG_AGENT_CM_FENCED_UDF_INSTANCE_STATUS = 54, + MSG_CTL_CM_SWITCHOVER_FULL = 55, /* inform cm agent to do switchover -A */ + MSG_CM_CTL_SWITCHOVER_FULL_ACK = 56, /* inform cm ctl that cm server is doing swtichover -A */ + MSG_CM_CTL_SWITCHOVER_FULL_DENIED = 57, /* inform cm ctl that switchover -A is denied by cm server */ + MSG_CTL_CM_SWITCHOVER_FULL_CHECK = 58, /* cm ctl inform cm server to check if swtichover -A is done */ + MSG_CM_CTL_SWITCHOVER_FULL_CHECK_ACK = 59, /* inform cm ctl that swtichover -A is done */ + MSG_CTL_CM_SWITCHOVER_FULL_TIMEOUT = 60, /* cm ctl inform cm server to swtichover -A timed out */ + MSG_CM_CTL_SWITCHOVER_FULL_TIMEOUT_ACK = 61, /* inform cm ctl that swtichover -A stopped */ + + MSG_CTL_CM_SETMODE = 62, /* new mode */ + MSG_CM_CTL_SETMODE_ACK = 63, + + MSG_CTL_CM_SWITCHOVER_AZ = 64, /* inform cm agent to do switchover -zazName */ + MSG_CM_CTL_SWITCHOVER_AZ_ACK = 65, /* inform cm ctl that cm server is doing swtichover -zazName */ + MSG_CM_CTL_SWITCHOVER_AZ_DENIED = 66, /* inform cm ctl that switchover -zazName is denied by cm server */ + MSG_CTL_CM_SWITCHOVER_AZ_CHECK = 67, /* cm ctl inform cm server to check if swtichover -zazName is done */ + MSG_CM_CTL_SWITCHOVER_AZ_CHECK_ACK = 68, /* inform cm ctl that swtichover -zazName is done */ + MSG_CTL_CM_SWITCHOVER_AZ_TIMEOUT = 69, /* cm ctl inform cm server to swtichover -zazName timed out */ + MSG_CM_CTL_SWITCHOVER_AZ_TIMEOUT_ACK = 70, /* inform cm ctl that swtichover -zazName stopped */ + + MSG_CM_CTL_SET_ACK = 71, + MSG_CTL_CM_GET = 72, + MSG_CM_CTL_GET_ACK = 73, + MSG_CM_AGENT_GS_GUC = 74, + MSG_AGENT_CM_GS_GUC_ACK = 75, + MSG_CM_CTL_SWITCHOVER_INCOMPLETE_ACK = 76, + MSG_CM_CM_TIMELINE = 77, /* when restart cluster , cmserver primary and standy timeline */ + MSG_CM_BUILD_DOING = 78, + MSG_AGENT_CM_ETCD_CURRENT_TIME = 79, /* etcd clock monitoring message */ + MSG_CM_QUERY_INSTANCE_STATUS = 80, + MSG_CM_SERVER_TO_AGENT_CONN_CHECK = 81, + MSG_CTL_CM_GET_DATANODE_RELATION = 82, /* depracated for the removal of quick switchover */ + MSG_CM_BUILD_DOWN = 83, + MSG_CTL_CM_HOTPATCH = 84, + MSG_CM_SERVER_REPAIR_CN_ACK = 85, + MSG_CTL_CM_DISABLE_CN = 86, + MSG_CTL_CM_DISABLE_CN_ACK = 87, + MSG_CM_AGENT_LOCK_NO_PRIMARY = 88, + MSG_CM_AGENT_LOCK_CHOSEN_PRIMARY = 89, + MSG_CM_AGENT_UNLOCK = 90, + MSG_CTL_CM_STOP_ARBITRATION = 91, + MSG_CTL_CM_FINISH_REDO = 92, + MSG_CM_CTL_FINISH_REDO_ACK = 93, + MSG_CM_AGENT_FINISH_REDO = 94, + MSG_CTL_CM_FINISH_REDO_CHECK = 95, + MSG_CM_CTL_FINISH_REDO_CHECK_ACK = 96, + MSG_AGENT_CM_KERBEROS_STATUS = 97, + MSG_CTL_CM_QUERY_KERBEROS = 98, + MSG_CTL_CM_QUERY_KERBEROS_ACK = 99, + MSG_AGENT_CM_DISKUSAGE_STATUS = 100, + MSG_CM_AGENT_OBS_DELETE_XLOG = 101, + MSG_CM_AGENT_DROP_CN_OBS_XLOG = 102, + MSG_AGENT_CM_DATANODE_INSTANCE_BARRIER = 103, + MSG_AGENT_CM_COORDINATE_INSTANCE_BARRIER = 104, + MSG_CTL_CM_GLOBAL_BARRIER_QUERY = 105, + MSG_CM_CTL_GLOBAL_BARRIER_DATA = 106, + MSG_CM_CTL_GLOBAL_BARRIER_DATA_BEGIN = 107, + MSG_CM_CTL_BARRIER_DATA_END = 108, + MSG_CM_CTL_BACKUP_OPEN = 109, + MSG_CM_AGENT_DN_SYNC_LIST = 110, + MSG_AGENT_CM_DN_SYNC_LIST = 111, + MSG_CTL_CM_SWITCHOVER_FAST = 112, + MSG_CM_AGENT_SWITCHOVER_FAST = 113, + MSG_CTL_CM_RELOAD = 114, + MSG_CM_CTL_RELOAD_ACK = 115, + MSG_CM_CTL_INVALID_COMMAND_ACK = 116, + MSG_AGENT_CM_CN_OBS_STATUS = 117, + MSG_CM_AGENT_NOTIFY_CN_RECOVER = 118, + MSG_CM_AGENT_FULL_BACKUP_CN_OBS = 119, + MSG_AGENT_CM_BACKUP_STATUS_ACK = 120, + MSG_CM_AGENT_REFRESH_OBS_DEL_TEXT = 121, + MSG_AGENT_CM_INSTANCE_BARRIER_NEW = 122, + MSG_CTL_CM_GLOBAL_BARRIER_QUERY_NEW = 123, + MSG_CM_CTL_GLOBAL_BARRIER_DATA_BEGIN_NEW = 124, + MSG_CM_AGENT_DATANODE_INSTANCE_BARRIER = 125, + MSG_CM_AGENT_COORDINATE_INSTANCE_BARRIER = 126, +} CM_MessageType; + +#define UNDEFINED_LOCKMODE 0 +#define POLLING_CONNECTION 1 +#define SPECIFY_CONNECTION 2 +#define PROHIBIT_CONNECTION 3 + +#define INSTANCE_ROLE_INIT 0 +#define INSTANCE_ROLE_PRIMARY 1 +#define INSTANCE_ROLE_STANDBY 2 +#define INSTANCE_ROLE_PENDING 3 +#define INSTANCE_ROLE_NORMAL 4 +#define INSTANCE_ROLE_UNKNOWN 5 +#define INSTANCE_ROLE_DUMMY_STANDBY 6 +#define INSTANCE_ROLE_DELETED 7 +#define INSTANCE_ROLE_DELETING 8 +#define INSTANCE_ROLE_READONLY 9 +#define INSTANCE_ROLE_OFFLINE 10 +#define INSTANCE_ROLE_MAIN_STANDBY 11 +#define INSTANCE_ROLE_CASCADE_STANDBY 12 + +#define INSTANCE_ROLE_FIRST_INIT 1 +#define INSTANCE_ROLE_HAVE_INIT 2 + +#define INSTANCE_DATA_REPLICATION_SYNC 1 +#define INSTANCE_DATA_REPLICATION_ASYNC 2 +#define INSTANCE_DATA_REPLICATION_MOST_AVAILABLE 3 +#define INSTANCE_DATA_REPLICATION_POTENTIAL_SYNC 4 +#define INSTANCE_DATA_REPLICATION_QUORUM 5 +#define INSTANCE_DATA_REPLICATION_UNKONWN 6 + +#define INSTANCE_TYPE_GTM 1 +#define INSTANCE_TYPE_DATANODE 2 +#define INSTANCE_TYPE_COORDINATE 3 +#define INSTANCE_TYPE_FENCED_UDF 4 +#define INSTANCE_TYPE_UNKNOWN 5 + +#define INSTANCE_WALSNDSTATE_STARTUP 0 +#define INSTANCE_WALSNDSTATE_BACKUP 1 +#define INSTANCE_WALSNDSTATE_CATCHUP 2 +#define INSTANCE_WALSNDSTATE_STREAMING 3 +#define INSTANCE_WALSNDSTATE_DUMPLOG 4 +const int INSTANCE_WALSNDSTATE_NORMAL = 5; +const int INSTANCE_WALSNDSTATE_UNKNOWN = 6; + +#define CON_OK 0 +#define CON_BAD 1 +#define CON_STARTED 2 +#define CON_MADE 3 +#define CON_AWAITING_RESPONSE 4 +#define CON_AUTH_OK 5 +#define CON_SETEN 6 +#define CON_SSL_STARTUP 7 +#define CON_NEEDED 8 +#define CON_UNKNOWN 9 +#define CON_MANUAL_STOPPED 10 +#define CON_DISK_DEMAGED 11 +#define CON_PORT_USED 12 +#define CON_NIC_DOWN 13 +#define CON_GTM_STARTING 14 + +#define CM_SERVER_UNKNOWN 0 +#define CM_SERVER_PRIMARY 1 +#define CM_SERVER_STANDBY 2 +#define CM_SERVER_INIT 3 +#define CM_SERVER_DOWN 4 + +#define CM_ETCD_UNKNOWN 0 +#define CM_ETCD_FOLLOWER 1 +#define CM_ETCD_LEADER 2 +#define CM_ETCD_DOWN 3 + +#define SWITCHOVER_UNKNOWN 0 +#define SWITCHOVER_FAIL 1 +#define SWITCHOVER_SUCCESS 2 +#define SWITCHOVER_EXECING 3 +#define SWITCHOVER_PARTLY_SUCCESS 4 +#define SWITCHOVER_ABNORMAL 5 +#define INVALID_COMMAND 6 + + +#define UNKNOWN_BAD_REASON 0 +#define PORT_BAD_REASON 1 +#define NIC_BAD_REASON 2 +#define DISC_BAD_REASON 3 +#define STOPPED_REASON 4 +#define CN_DELETED_REASON 5 + +#define KERBEROS_STATUS_UNKNOWN 0 +#define KERBEROS_STATUS_NORMAL 1 +#define KERBEROS_STATUS_ABNORMAL 2 +#define KERBEROS_STATUS_DOWN 3 + +#define HOST_LENGTH 32 +#define BARRIERLEN 40 +#define MAX_SLOT_NAME_LEN 64 +#define MAX_BARRIER_SLOT_COUNT 5 +// the length of cm_serer sync msg is 9744, msg type is MSG_CM_CM_REPORT_SYNC +#define CM_MSG_MAX_LENGTH (12288 * 2) + +#define CMAGENT_NO_CCN "NoCentralNode" + +#define OBS_DEL_VERSION_V1 (1) +#define DEL_TEXT_HEADER_LEN_V1 (10) // version(4->V%3d) + delCount(4->C%3d) + '\n' + '\0' +#define CN_BUILD_TASK_ID_MAX_LEN (21) // cnId(4) + cmsId(4) + time(12->yyMMddHH24mmss) + 1 +#define MAX_OBS_CN_COUNT (64) +#define MAX_OBS_DEL_TEXT_LEN (CN_BUILD_TASK_ID_MAX_LEN * MAX_OBS_CN_COUNT + DEL_TEXT_HEADER_LEN_V1) + +extern int g_gtm_phony_dead_times; +extern int g_dn_phony_dead_times[CM_MAX_DATANODE_PER_NODE]; +extern int g_cn_phony_dead_times; + +typedef enum {DN, CN} GetinstanceType; + +typedef struct DatanodeSyncList { + int count; + uint32 dnSyncList[CM_PRIMARY_STANDBY_NUM]; + int syncStandbyNum; + // remain + int remain; + char remainStr[DN_SYNC_LEN]; +} DatanodeSyncList; + +typedef struct cm_msg_type { + int msg_type; +} cm_msg_type; + +typedef struct cm_switchover_incomplete_msg { + int msg_type; + char errMsg[CM_MSG_ERR_INFORMATION_LENGTH]; +} cm_switchover_incomplete_msg; + +typedef struct cm_redo_stats { + int is_by_query; + uint64 redo_replayed_speed; + XLogRecPtr standby_last_replayed_read_Ptr; +} cm_redo_stats; + +typedef struct ctl_to_cm_stop_arbitration { + int msg_type; +} ctl_to_cm_stop_arbitration; + +typedef struct ctl_to_cm_switchover { + int msg_type; + char azName[CM_AZ_NAME]; + uint32 node; + uint32 instanceId; + int instance_type; + int wait_seconds; +} ctl_to_cm_switchover; + +typedef struct ctl_to_cm_failover { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int wait_seconds; +} ctl_to_cm_failover; + +typedef struct cm_to_ctl_finish_redo_check_ack { + int msg_type; + int finish_redo_count; +} cm_to_ctl_finish_redo_check_ack; + +typedef struct ctl_to_cm_finish_redo { + int msg_type; +} ctl_to_cm_finish_redo; + +#define CM_CTL_UNFORCE_BUILD 0 +#define CM_CTL_FORCE_BUILD 1 + +typedef struct ctl_to_cm_build { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int wait_seconds; + int force_build; + int full_build; +} ctl_to_cm_build; +typedef struct ctl_to_cm_global_barrier_query { + int msg_type; +}ctl_to_cm_global_barrier_query; + +typedef struct ctl_to_cm_query { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int wait_seconds; + int detail; + int relation; +} ctl_to_cm_query; + +#define NOTIFY_MSG_RESERVED (32) + +typedef struct Cm2AgentNotifyCnRecoverByObs_t { + int msg_type; + uint32 instanceId; + bool changeKeyCn; + uint32 syncCnId; + char slotName[MAX_SLOT_NAME_LEN]; +} Cm2AgentNotifyCnRecoverByObs; + +typedef struct Cm2AgentBackupCn2Obs_t { + int msg_type; + uint32 instanceId; + char slotName[MAX_SLOT_NAME_LEN]; + char taskIdStr[CN_BUILD_TASK_ID_MAX_LEN]; +} Cm2AgentBackupCn2Obs; + +typedef struct Agent2CMBackupStatusAck_t { + int msg_type; + uint32 node; + uint32 instanceId; + char slotName[MAX_SLOT_NAME_LEN]; + char taskIdStr[CN_BUILD_TASK_ID_MAX_LEN]; + int32 status; +} Agent2CMBackupStatusAck; + +typedef struct Cm2AgentRefreshObsDelText_t { + int msg_type; + uint32 instanceId; + char slotName[MAX_SLOT_NAME_LEN]; + char obsDelCnText[MAX_OBS_DEL_TEXT_LEN]; +} Cm2AgentRefreshObsDelText; + +typedef struct ctl_to_cm_notify { + CM_MessageType msg_type; + ctlToCmNotifyDetail detail; +} ctl_to_cm_notify; + +typedef struct ctl_to_cm_disable_cn { + int msg_type; + uint32 instanceId; + int wait_seconds; +} ctl_to_cm_disable_cn; + +typedef struct ctl_to_cm_disable_cn_ack { + int msg_type; + bool disable_ok; + char errMsg[CM_MSG_ERR_INFORMATION_LENGTH]; +} ctl_to_cm_disable_cn_ack; + +typedef enum arbitration_mode { + UNKNOWN_ARBITRATION = 0, + MAJORITY_ARBITRATION = 1, + MINORITY_ARBITRATION = 2 +} arbitration_mode; + +typedef enum cm_start_mode { + UNKNOWN_START = 0, + MAJORITY_START = 1, + MINORITY_START = 2, + OTHER_MINORITY_START = 3 +} cm_start_mode; + +typedef enum switchover_az_mode { + UNKNOWN_SWITCHOVER_AZ = 0, + NON_AUTOSWITCHOVER_AZ = 1, + AUTOSWITCHOVER_AZ = 2 +} switchover_az_mode; + +typedef enum logic_cluster_restart_mode { + UNKNOWN_LOGIC_CLUSTER_RESTART = 0, + INITIAL_LOGIC_CLUSTER_RESTART = 1, + MODIFY_LOGIC_CLUSTER_RESTART = 2 +} logic_cluster_restart_mode; + +typedef enum cluster_mode { + INVALID_CLUSTER_MODE = 0, + ONE_MASTER_1_SLAVE, + ONE_MASTER_2_SLAVE, + ONE_MASTER_3_SLAVE, + ONE_MASTER_4_SLAVE, + ONE_MASTER_5_SLAVE +} cluster_mode; + +typedef enum synchronous_standby_mode { + AnyFirstNo = 0, /* don't have */ + AnyAz1, /* ANY 1(az1) */ + FirstAz1, /* FIRST 1(az1) */ + AnyAz2, /* ANY 1(az2) */ + FirstAz2, /* FIRST 1(az2) */ + Any2Az1Az2, /* ANY 2(az1,az2) */ + First2Az1Az2, /* FIRST 2(az1,az2) */ + Any3Az1Az2, /* ANY 3(az1, az2) */ + First3Az1Az2 /* FIRST 3(az1, az2) */ +} synchronous_standby_mode; + +typedef enum { + CLUSTER_PRIMARY = 0, + CLUSTER_OBS_STANDBY = 1, + CLUSTER_STREAMING_STANDBY = 2 +} ClusterRole; + +typedef enum { + DISASTER_RECOVERY_NULL = 0, + DISASTER_RECOVERY_OBS = 1, + DISASTER_RECOVERY_STREAMING = 2 +} DisasterRecoveryType; + +typedef enum { + INSTALL_TYPE_DEFAULT = 0, + INSTALL_TYPE_SHARE_STORAGE = 1, + INSTALL_TYPE_STREAMING = 2 +}ClusterInstallType; + +typedef struct ctl_to_cm_set { + int msg_type; + int log_level; + uint32 logic_cluster_delay; + arbitration_mode cm_arbitration_mode; + switchover_az_mode cm_switchover_az_mode; + logic_cluster_restart_mode cm_logic_cluster_restart_mode; +} ctl_to_cm_set, cm_to_ctl_get; + +typedef struct cm_to_agent_switchover { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int wait_seconds; + int role; + uint32 term; +} cm_to_agent_switchover; + +typedef struct cm_to_agent_failover { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int wait_seconds; + uint32 term; +} cm_to_agent_failover; + +typedef struct cm_to_agent_build { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int wait_seconds; + int role; + int full_build; + uint32 term; +} cm_to_agent_build; + +typedef struct cm_to_agent_lock1 { + int msg_type; + uint32 node; + uint32 instanceId; +} cm_to_agent_lock1; + +typedef struct cm_to_agent_obs_delete_xlog { + int msg_type; + uint32 node; + uint32 instanceId; + uint64 lsn; +} cm_to_agent_obs_delete_xlog; + +typedef struct cm_to_agent_lock2 { + int msg_type; + uint32 node; + uint32 instanceId; + char disconn_host[HOST_LENGTH]; + uint32 disconn_port; +} cm_to_agent_lock2; + +typedef struct cm_to_agent_unlock { + int msg_type; + uint32 node; + uint32 instanceId; +} cm_to_agent_unlock; + +typedef struct cm_to_agent_finish_redo { + int msg_type; + uint32 node; + uint32 instanceId; + bool is_finish_redo_cmd_sent; +} cm_to_agent_finish_redo; + +typedef struct cm_to_agent_gs_guc { + int msg_type; + uint32 node; + uint32 instanceId; + synchronous_standby_mode type; +} cm_to_agent_gs_guc; + +typedef struct agent_to_cm_gs_guc_feedback { + int msg_type; + uint32 node; + uint32 instanceId; /* node of this agent */ + synchronous_standby_mode type; + bool status; /* gs guc command exec status */ +} agent_to_cm_gs_guc_feedback; + +typedef struct CmToAgentGsGucSyncList { + int msgType; + uint32 node; + uint32 instanceId; + uint32 groupIndex; + DatanodeSyncList dnSyncList; + int instanceNum; + // remain + int remain[REMAIN_LEN]; +} CmToAgentGsGucSyncList; + +typedef struct cm_to_agent_notify { + int msg_type; + uint32 node; + uint32 instanceId; + int role; + uint32 term; +} cm_to_agent_notify; + +/* + * msg struct using for cmserver to cmagent + * including primaryed datanode count and datanode instanceId list. + */ +typedef struct cm_to_agent_notify_cn { + int msg_type; + uint32 node; /* node of this coordinator */ + uint32 instanceId; /* coordinator instance id */ + int datanodeCount; /* current count of datanode got primaryed */ + uint32 coordinatorId; + int notifyCount; + /* datanode instance id array */ + uint32 datanodeId[FLEXIBLE_ARRAY_MEMBER]; /* VARIABLE LENGTH ARRAY */ +} cm_to_agent_notify_cn; + +/* + * msg struct using for cmserver to cmagent + * including primaryed datanode count and datanode instanceId list. + */ +typedef struct cm_to_agent_drop_cn { + int msg_type; + uint32 node; /* node of this coordinator */ + uint32 instanceId; /* coordinator instance id */ + uint32 coordinatorId; + int role; + bool delay_repair; +} cm_to_agent_drop_cn; + +typedef struct cm_to_agent_notify_cn_central_node { + int msg_type; + uint32 node; /* node of this coordinator */ + uint32 instanceId; /* coordinator instance id */ + char cnodename[NAMEDATALEN]; /* central node id */ + char nodename[NAMEDATALEN]; +} cm_to_agent_notify_cn_central_node; + +/* + * msg struct using for cmserver to cmagent + * including primaryed datanode count and datanode instanceId list. + */ +typedef struct cm_to_agent_cancel_session { + int msg_type; + uint32 node; /* node of this coordinator */ + uint32 instanceId; /* coordinator instance id */ +} cm_to_agent_cancel_session; + +/* + * msg struct using for cmagent to cmserver + * feedback msg for notify cn. + */ +typedef struct agent_to_cm_notify_cn_feedback { + int msg_type; + uint32 node; /* node of this coordinator */ + uint32 instanceId; /* coordinator instance id */ + bool status; /* notify command exec status */ + int notifyCount; +} agent_to_cm_notify_cn_feedback; + +typedef struct BackupInfo_t { + uint32 localKeyCnId; + uint32 obsKeyCnId; + char slotName[MAX_SLOT_NAME_LEN]; + char obsDelCnText[MAX_OBS_DEL_TEXT_LEN]; +} BackupInfo; + +typedef struct Agent2CmBackupInfoRep_t { + int msg_type; + uint32 instanceId; /* coordinator instance id */ + uint32 slotCount; + BackupInfo backupInfos[MAX_BARRIER_SLOT_COUNT]; +} Agent2CmBackupInfoRep; + +typedef struct cm_to_agent_restart { + int msg_type; + uint32 node; + uint32 instanceId; +} cm_to_agent_restart; + +typedef struct cm_to_agent_restart_by_mode { + int msg_type; + uint32 node; + uint32 instanceId; + int role_old; + int role_new; +} cm_to_agent_restart_by_mode; + +typedef struct cm_to_agent_rep_sync { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int sync_mode; +} cm_to_agent_rep_sync; + +typedef struct cm_to_agent_rep_async { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int sync_mode; +} cm_to_agent_rep_async; + +typedef struct cm_to_agent_rep_most_available { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int sync_mode; +} cm_to_agent_rep_most_available; + +typedef struct cm_instance_central_node { + pthread_rwlock_t rw_lock; + pthread_mutex_t mt_lock; + uint32 instanceId; + uint32 node; + uint32 recover; + uint32 isCentral; + uint32 nodecount; + char nodename[NAMEDATALEN]; + char cnodename[NAMEDATALEN]; + char* failnodes; + cm_to_agent_notify_cn_central_node notify; +} cm_instance_central_node; + +typedef struct cm_instance_central_node_msg { + pthread_rwlock_t rw_lock; + cm_to_agent_notify_cn_central_node notify; +} cm_instance_central_node_msg; + +#define MAX_LENGTH_HP_CMD (9) +#define MAX_LENGTH_HP_PATH (256) +#define MAX_LENGTH_HP_RETURN_MSG (1024) + +typedef struct cm_hotpatch_msg { + int msg_type; + char command[MAX_LENGTH_HP_CMD]; + char path[MAX_LENGTH_HP_PATH]; +} cm_hotpatch_msg; + +typedef struct cm_hotpatch_ret_msg { + char msg[MAX_LENGTH_HP_RETURN_MSG]; +} cm_hotpatch_ret_msg; + +typedef struct cm_to_agent_barrier_info { + int msg_type; + uint32 node; + uint32 instanceId; + char queryBarrier[BARRIERLEN]; + char targetBarrier[BARRIERLEN]; +} cm_to_agent_barrier_info; + +#define IP_LEN 64 +#define MAX_REPL_CONNINFO_LEN 256 +#define MAX_REBUILD_REASON_LEN 256 + +const int cn_active_unknown = 0; +const int cn_active = 1; +const int cn_inactive = 2; + +#define INSTANCE_HA_STATE_UNKONWN 0 +#define INSTANCE_HA_STATE_NORMAL 1 +#define INSTANCE_HA_STATE_NEED_REPAIR 2 +#define INSTANCE_HA_STATE_STARTING 3 +#define INSTANCE_HA_STATE_WAITING 4 +#define INSTANCE_HA_STATE_DEMOTING 5 +#define INSTANCE_HA_STATE_PROMOTING 6 +#define INSTANCE_HA_STATE_BUILDING 7 +#define INSTANCE_HA_STATE_CATCH_UP 8 +#define INSTANCE_HA_STATE_COREDUMP 9 +#define INSTANCE_HA_STATE_MANUAL_STOPPED 10 +#define INSTANCE_HA_STATE_DISK_DAMAGED 11 +#define INSTANCE_HA_STATE_PORT_USED 12 +#define INSTANCE_HA_STATE_BUILD_FAILED 13 +#define INSTANCE_HA_STATE_HEARTBEAT_TIMEOUT 14 +#define INSTANCE_HA_STATE_NIC_DOWN 15 +#define INSTANCE_HA_STATE_READ_ONLY 16 + +#define INSTANCE_HA_DATANODE_BUILD_REASON_NORMAL 0 +#define INSTANCE_HA_DATANODE_BUILD_REASON_WALSEGMENT_REMOVED 1 +#define INSTANCE_HA_DATANODE_BUILD_REASON_DISCONNECT 2 +#define INSTANCE_HA_DATANODE_BUILD_REASON_VERSION_NOT_MATCHED 3 +#define INSTANCE_HA_DATANODE_BUILD_REASON_MODE_NOT_MATCHED 4 +#define INSTANCE_HA_DATANODE_BUILD_REASON_SYSTEMID_NOT_MATCHED 5 +#define INSTANCE_HA_DATANODE_BUILD_REASON_TIMELINE_NOT_MATCHED 6 +#define INSTANCE_HA_DATANODE_BUILD_REASON_UNKNOWN 7 +#define INSTANCE_HA_DATANODE_BUILD_REASON_USER_PASSWD_INVALID 8 +#define INSTANCE_HA_DATANODE_BUILD_REASON_CONNECTING 9 +#define INSTANCE_HA_DATANODE_BUILD_REASON_DCF_LOG_LOSS 10 + +#define UNKNOWN_LEVEL 0 + +typedef uint64 XLogRecPtr; + +typedef enum CM_DCF_ROLE { + DCF_ROLE_UNKNOWN = 0, + DCF_ROLE_LEADER, + DCF_ROLE_FOLLOWER, + DCF_ROLE_PASSIVE, + DCF_ROLE_LOGGER, + DCF_ROLE_PRE_CANDIDATE, + DCF_ROLE_CANDIDATE, + DCF_ROLE_CEIL, +} DCF_ROLE; + +typedef struct agent_to_cm_coordinate_barrier_status_report { + int msg_type; + uint32 node; + uint32 instanceId; + int instanceType; + uint64 ckpt_redo_point; + char global_barrierId[BARRIERLEN]; + char global_achive_barrierId[BARRIERLEN]; + char barrierID [BARRIERLEN]; + char query_barrierId[BARRIERLEN]; + uint64 barrierLSN; + uint64 archive_LSN; + uint64 flush_LSN; + bool is_barrier_exist; +}agent_to_cm_coordinate_barrier_status_report; + +typedef struct GlobalBarrierItem_t { + char slotname[MAX_SLOT_NAME_LEN]; + char globalBarrierId[BARRIERLEN]; + char globalAchiveBarrierId[BARRIERLEN]; +} GlobalBarrierItem; + +typedef struct GlobalBarrierStatus_t { + int slotCount; + GlobalBarrierItem globalBarriers[MAX_BARRIER_SLOT_COUNT]; +} GlobalBarrierStatus; + +typedef struct LocalBarrierStatus_t { + uint64 ckptRedoPoint; + uint64 barrierLSN; + uint64 archiveLSN; + uint64 flushLSN; + char barrierID[BARRIERLEN]; +} LocalBarrierStatus; + +typedef struct Agent2CmBarrierStatusReport_t { + int msg_type; + uint32 node; + uint32 instanceId; + int instanceType; + LocalBarrierStatus localStatus; + GlobalBarrierStatus globalStatus; +} Agent2CmBarrierStatusReport; + +typedef struct agent_to_cm_datanode_barrier_status_report { + int msg_type; + uint32 node; + uint32 instanceId; + int instanceType; + uint64 ckpt_redo_point; + char barrierID [BARRIERLEN]; + uint64 barrierLSN; + uint64 archive_LSN; + uint64 flush_LSN; +}agent_to_cm_datanode_barrier_status_report; + +typedef struct cm_local_replconninfo { + int local_role; + int static_connections; + int db_state; + XLogRecPtr last_flush_lsn; + int buildReason; + uint32 term; + uint32 disconn_mode; + char disconn_host[HOST_LENGTH]; + uint32 disconn_port; + char local_host[HOST_LENGTH]; + uint32 local_port; + bool redo_finished; +} cm_local_replconninfo; + +typedef struct cm_sender_replconninfo { + pid_t sender_pid; + int local_role; + int peer_role; + int peer_state; + int state; + XLogRecPtr sender_sent_location; + XLogRecPtr sender_write_location; + XLogRecPtr sender_flush_location; + XLogRecPtr sender_replay_location; + XLogRecPtr receiver_received_location; + XLogRecPtr receiver_write_location; + XLogRecPtr receiver_flush_location; + XLogRecPtr receiver_replay_location; + int sync_percent; + int sync_state; + int sync_priority; +} cm_sender_replconninfo; + +typedef struct cm_receiver_replconninfo { + pid_t receiver_pid; + int local_role; + int peer_role; + int peer_state; + int state; + XLogRecPtr sender_sent_location; + XLogRecPtr sender_write_location; + XLogRecPtr sender_flush_location; + XLogRecPtr sender_replay_location; + XLogRecPtr receiver_received_location; + XLogRecPtr receiver_write_location; + XLogRecPtr receiver_flush_location; + XLogRecPtr receiver_replay_location; + int sync_percent; +} cm_receiver_replconninfo; + +typedef struct cm_gtm_replconninfo { + int local_role; + int connect_status; + TransactionId xid; + uint64 send_msg_count; + uint64 receive_msg_count; + int sync_mode; +} cm_gtm_replconninfo; + +typedef struct cm_coordinate_replconninfo { + int status; + int db_state; +} cm_coordinate_replconninfo; + +typedef enum cm_coordinate_group_mode { + GROUP_MODE_INIT, + GROUP_MODE_NORMAL, + GROUP_MODE_PENDING, + GROUP_MODE_BUTT +} cm_coordinate_group_mode; + +#define AGENT_TO_INSTANCE_CONNECTION_BAD 0 +#define AGENT_TO_INSTANCE_CONNECTION_OK 1 + +#define INSTANCE_PROCESS_DIED 0 +#define INSTANCE_PROCESS_RUNNING 1 + +const int max_cn_node_num_for_old_version = 16; + +typedef struct cluster_cn_info { + uint32 cn_Id; + uint32 cn_active; + bool cn_connect; + bool drop_success; +} cluster_cn_info; + +typedef struct agent_to_cm_coordinate_status_report_old { + int msg_type; + uint32 node; + uint32 instanceId; + int instanceType; + int connectStatus; + int processStatus; + int isCentral; + char nodename[NAMEDATALEN]; + char logicClusterName[CM_LOGIC_CLUSTER_NAME_LEN]; + char cnodename[NAMEDATALEN]; + cm_coordinate_replconninfo status; + cm_coordinate_group_mode group_mode; + bool cleanDropCnFlag; + bool isCnDnDisconnected; + cluster_cn_info cn_active_info[max_cn_node_num_for_old_version]; + int cn_restart_counts; + int phony_dead_times; +} agent_to_cm_coordinate_status_report_old; + +typedef struct agent_to_cm_coordinate_status_report { + int msg_type; + uint32 node; + uint32 instanceId; + int instanceType; + int connectStatus; + int processStatus; + int isCentral; + char nodename[NAMEDATALEN]; + char logicClusterName[CM_LOGIC_CLUSTER_NAME_LEN]; + char cnodename[NAMEDATALEN]; + cm_coordinate_replconninfo status; + cm_coordinate_group_mode group_mode; + bool cleanDropCnFlag; + bool isCnDnDisconnected; + uint32 cn_active_info[CN_INFO_NUM]; + int buildReason; + char resevered[RESERVE_NUM - RESERVE_NUM_USED]; + int cn_restart_counts; + int phony_dead_times; +} agent_to_cm_coordinate_status_report; + +typedef struct agent_to_cm_coordinate_status_report_v1 { + int msg_type; + uint32 node; + uint32 instanceId; + int instanceType; + int connectStatus; + int processStatus; + int isCentral; + char nodename[NAMEDATALEN]; + char logicClusterName[CM_LOGIC_CLUSTER_NAME_LEN]; + char cnodename[NAMEDATALEN]; + cm_coordinate_replconninfo status; + cm_coordinate_group_mode group_mode; + bool cleanDropCnFlag; + bool isCnDnDisconnected; + uint32 cn_active_info[CN_INFO_NUM]; + int buildReason; + int cn_dn_disconnect_times; + char resevered[RESERVE_NUM - (2 * RESERVE_NUM_USED)]; + int cn_restart_counts; + int phony_dead_times; +} agent_to_cm_coordinate_status_report_v1; + +typedef struct agent_to_cm_fenced_UDF_status_report { + int msg_type; + uint32 nodeid; + int status; +} agent_to_cm_fenced_UDF_status_report; + +typedef struct agent_to_cm_datanode_status_report { + int msg_type; + uint32 node; + uint32 instanceId; + int instanceType; + int connectStatus; + int processStatus; + cm_local_replconninfo local_status; + BuildState build_info; + cm_sender_replconninfo sender_status[CM_MAX_SENDER_NUM]; + cm_receiver_replconninfo receive_status; + RedoStatsData parallel_redo_status; + cm_redo_stats local_redo_stats; + int dn_restart_counts; + int phony_dead_times; + int dn_restart_counts_in_hour; +} agent_to_cm_datanode_status_report; + +typedef struct AgentToCmserverDnSyncList { + int msg_type; + uint32 node; + uint32 instanceId; + int instanceType; + char dnSynLists[DN_SYNC_LEN]; + // remain + int remain[REMAIN_LEN]; + char remainStr[DN_SYNC_LEN]; +} AgentToCmserverDnSyncList; + +typedef struct agent_to_cm_gtm_status_report { + int msg_type; + uint32 node; + uint32 instanceId; + int instanceType; + int connectStatus; + int processStatus; + cm_gtm_replconninfo status; + int phony_dead_times; +} agent_to_cm_gtm_status_report; + +typedef struct agent_to_cm_current_time_report { + int msg_type; + uint32 nodeid; + long int etcd_time; +} agent_to_cm_current_time_report; + +typedef struct AgentToCMS_DiskUsageStatusReport { + int msgType; + uint32 instanceId; + uint32 dataPathUsage; + uint32 logPathUsage; +} AgentToCMS_DiskUsageStatusReport; + +typedef struct agent_to_cm_heartbeat { + int msg_type; + uint32 node; + uint32 instanceId; + int instanceType; + int cluster_status_request; +} agent_to_cm_heartbeat; + +typedef struct DnStatus_t { + CM_MessageType barrierMsgType; + agent_to_cm_datanode_status_report reportMsg; + union { + agent_to_cm_coordinate_barrier_status_report barrierMsg; + Agent2CmBarrierStatusReport barrierMsgNew; + }; +} DnStatus; + +typedef struct DnSyncListInfo_t { + pthread_rwlock_t lk_lock; + AgentToCmserverDnSyncList dnSyncListMsg; +} DnSyncListInfo; + +typedef struct CnStatus_t { + CM_MessageType barrierMsgType; + agent_to_cm_coordinate_status_report reportMsg; + Agent2CmBackupInfoRep backupMsg; + union { + agent_to_cm_coordinate_barrier_status_report barrierMsg; + Agent2CmBarrierStatusReport barrierMsgNew; + }; +} CnStatus; + +typedef struct coordinate_status_info { + pthread_rwlock_t lk_lock; + CnStatus cnStatus; +} coordinate_status_info; + +typedef struct datanode_status_info { + pthread_rwlock_t lk_lock; + DnStatus dnStatus; +} datanode_status_info; + +typedef struct gtm_status_info { + pthread_rwlock_t lk_lock; + agent_to_cm_gtm_status_report report_msg; +} gtm_status_info; + +typedef struct cm_to_agent_heartbeat { + int msg_type; + uint32 node; + int type; + int cluster_status; + uint32 healthCoorId; +} cm_to_agent_heartbeat; + +typedef struct cm_to_cm_vote { + int msg_type; + uint32 node; + uint32 instanceId; + int role; +} cm_to_cm_vote; + +typedef struct cm_to_cm_timeline { + int msg_type; + uint32 node; + uint32 instanceId; + long timeline; +} cm_to_cm_timeline; + +typedef struct cm_to_cm_broadcast { + int msg_type; + uint32 node; + uint32 instanceId; + int role; +} cm_to_cm_broadcast; + +typedef struct cm_to_cm_notify { + int msg_type; + int role; +} cm_to_cm_notify; + +typedef struct cm_to_cm_switchover { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int wait_seconds; +} cm_to_cm_switchover; + +typedef struct cm_to_cm_switchover_ack { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int wait_seconds; +} cm_to_cm_switchover_ack; + +typedef struct cm_to_cm_failover { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int wait_seconds; +} cm_to_cm_failover; + +typedef struct cm_to_cm_failover_ack { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int wait_seconds; +} cm_to_cm_failover_ack; + +typedef struct cm_to_cm_sync { + int msg_type; + int role; +} cm_to_cm_sync; + +typedef struct cm_instance_command_status { + int command_status; + int command_send_status; + int command_send_times; + int command_send_num; + int pengding_command; + int time_out; + int role_changed; + volatile int heat_beat; + int arbitrate_delay_time_out; + int arbitrate_delay_set; + int local_arbitrate_delay_role; + int peerl_arbitrate_delay_role; + int full_build; + int notifyCnCount; + volatile int keep_heartbeat_timeout; + int sync_mode; + int maxSendTimes; +} cm_instance_command_status; + +// need to keep consist with cm_to_ctl_instance_datanode_status +typedef struct cm_instance_datanode_report_status { + cm_local_replconninfo local_status; + int sender_count; + BuildState build_info; + cm_sender_replconninfo sender_status[CM_MAX_SENDER_NUM]; + cm_receiver_replconninfo receive_status; + RedoStatsData parallel_redo_status; + cm_redo_stats local_redo_stats; + synchronous_standby_mode sync_standby_mode; + int send_gs_guc_time; + int dn_restart_counts; + bool arbitrateFlag; + int failoverStep; + int failoverTimeout; + int phony_dead_times; + int phony_dead_interval; + int dn_restart_counts_in_hour; + bool is_finish_redo_cmd_sent; + uint64 ckpt_redo_point; + char barrierID[BARRIERLEN]; + char query_barrierId[BARRIERLEN]; + uint64 barrierLSN; + uint64 archive_LSN; + uint64 flush_LSN; + DatanodeSyncList dnSyncList; + int32 syncDone; + uint32 arbiTime; + uint32 sendFailoverTimes; + bool is_barrier_exist; +} cm_instance_datanode_report_status; + +typedef struct cm_instance_gtm_report_status { + cm_gtm_replconninfo local_status; + int phony_dead_times; + int phony_dead_interval; +} cm_instance_gtm_report_status; + +/* + * each coordinator manage a list of datanode notify status + */ +typedef struct cm_notify_msg_status { + uint32* datanode_instance; + uint32* datanode_index; + bool* notify_status; + bool* have_notified; + bool* have_dropped; + bool have_canceled; + uint32 gtmIdBroadCast; +} cm_notify_msg_status; + +#define OBS_BACKUP_INIT (0) // not start +#define OBS_BACKUP_PROCESSING (1) +#define OBS_BACKUP_COMPLETED (2) +#define OBS_BACKUP_FAILED (3) +#define OBS_BACKUP_UNKNOWN (4) // conn failed, can't get status, will do nothing until it change to other + +typedef struct cm_instance_coordinate_report_status { + cm_coordinate_replconninfo status; + int isdown; + int clean; + uint32 exec_drop_instanceId; + cm_coordinate_group_mode group_mode; + cm_notify_msg_status notify_msg; + char logicClusterName[CM_LOGIC_CLUSTER_NAME_LEN]; + uint32 cn_restart_counts; + int phony_dead_times; + int phony_dead_interval; + bool delay_repair; + bool isCnDnDisconnected; + int auto_delete_delay_time; + int disable_time_out; + int cma_fault_timeout_to_killcn; + + char barrierID [BARRIERLEN]; + char query_barrierId[BARRIERLEN]; + uint64 barrierLSN; + uint64 archive_LSN; + uint64 flush_LSN; + uint64 ckpt_redo_point; + bool is_barrier_exist; + int buildReason; +} cm_instance_coordinate_report_status; + +typedef struct cm_instance_arbitrate_status { + int sync_mode; + bool restarting; + int promoting_timeout; +} cm_instance_arbitrate_status; + +#define MAX_CM_TO_CM_REPORT_SYNC_COUNT_PER_CYCLE 5 +typedef struct cm_to_cm_report_sync { + int msg_type; + uint32 node[CM_PRIMARY_STANDBY_NUM]; + uint32 instanceId[CM_PRIMARY_STANDBY_NUM]; + int instance_type[CM_PRIMARY_STANDBY_NUM]; + cm_instance_command_status command_member[CM_PRIMARY_STANDBY_NUM]; + cm_instance_datanode_report_status data_node_member[CM_PRIMARY_STANDBY_NUM]; + cm_instance_gtm_report_status gtm_member[CM_PRIMARY_STANDBY_NUM]; + cm_instance_coordinate_report_status coordinatemember; + cm_instance_arbitrate_status arbitrate_status_member[CM_PRIMARY_STANDBY_NUM]; +} cm_to_cm_report_sync; + +typedef struct cm_instance_role_status { + // available zone information + char azName[CM_AZ_NAME]; + uint32 azPriority; + + uint32 node; + uint32 instanceId; + int instanceType; + int role; + int dataReplicationMode; + int instanceRoleInit; +} cm_instance_role_status; + +typedef struct cm_instance_role_status_0 { + uint32 node; + uint32 instanceId; + int instanceType; + int role; + int dataReplicationMode; + int instanceRoleInit; +} cm_instance_role_status_0; + +#define CM_PRIMARY_STANDBY_MAX_NUM 8 // supprot 1 primary and [1, 7] standby +#define CM_PRIMARY_STANDBY_MAX_NUM_0 3 // support master standby dummy + +typedef struct cm_instance_role_group_0 { + int count; + cm_instance_role_status_0 instanceMember[CM_PRIMARY_STANDBY_MAX_NUM_0]; +} cm_instance_role_group_0; + +typedef struct cm_instance_role_group { + int count; + cm_instance_role_status instanceMember[CM_PRIMARY_STANDBY_MAX_NUM]; +} cm_instance_role_group; + +typedef struct cm_to_cm_role_change_notify { + int msg_type; + cm_instance_role_group role_change; +} cm_to_cm_role_change_notify; + +typedef struct ctl_to_cm_datanode_relation_info { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int wait_seconds; +} ctl_to_cm_datanode_relation_info; + +typedef struct cm_to_ctl_get_datanode_relation_ack { + int command_result; + int member_index; + cm_instance_role_status instanceMember[CM_PRIMARY_STANDBY_MAX_NUM]; + cm_instance_gtm_report_status gtm_member[CM_PRIMARY_STANDBY_NUM]; + cm_instance_datanode_report_status data_node_member[CM_PRIMARY_STANDBY_MAX_NUM]; +} cm_to_ctl_get_datanode_relation_ack; + +// need to keep consist with the struct cm_instance_datanode_report_status +typedef struct cm_to_ctl_instance_datanode_status { + cm_local_replconninfo local_status; + int sender_count; + BuildState build_info; + cm_sender_replconninfo sender_status[CM_MAX_SENDER_NUM]; + cm_receiver_replconninfo receive_status; + RedoStatsData parallel_redo_status; + cm_redo_stats local_redo_stats; + synchronous_standby_mode sync_standby_mode; + int send_gs_guc_time; +} cm_to_ctl_instance_datanode_status; + +typedef struct cm_to_ctl_instance_gtm_status { + cm_gtm_replconninfo local_status; +} cm_to_ctl_instance_gtm_status; + +typedef struct cm_to_ctl_instance_coordinate_status { + int status; + cm_coordinate_group_mode group_mode; + /* no notify map in ctl */ +} cm_to_ctl_instance_coordinate_status; + +typedef struct cm_to_ctl_instance_status { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int member_index; + int is_central; + int fenced_UDF_status; + cm_to_ctl_instance_datanode_status data_node_member; + cm_to_ctl_instance_gtm_status gtm_member; + cm_to_ctl_instance_coordinate_status coordinatemember; +} cm_to_ctl_instance_status; + +typedef struct cm_to_ctl_instance_barrier_info { + int msg_type; + uint32 node; + uint32 instanceId; + int instance_type; + int member_index; + uint64 ckpt_redo_point; + char barrierID [BARRIERLEN]; + uint64 barrierLSN; + uint64 archive_LSN; + uint64 flush_LSN; +} cm_to_ctl_instance_barrier_info; + +typedef struct cm_to_ctl_central_node_status { + uint32 instanceId; + int node_index; + int status; +} cm_to_ctl_central_node_status; + +#define CM_CAN_PRCESS_COMMAND 0 +#define CM_ANOTHER_COMMAND_RUNNING 1 +#define CM_INVALID_COMMAND 2 +#define CM_DN_NORMAL_STATE 3 + +typedef struct cm_to_ctl_command_ack { + int msg_type; + int command_result; + uint32 node; + uint32 instanceId; + int instance_type; + int command_status; + int pengding_command; + int time_out; +} cm_to_ctl_command_ack; + +typedef struct cm_to_ctl_balance_check_ack { + int msg_type; + int switchoverDone; +} cm_to_ctl_balance_check_ack; + +typedef struct cm_to_ctl_switchover_full_check_ack { + int msg_type; + int switchoverDone; +} cm_to_ctl_switchover_full_check_ack, cm_to_ctl_switchover_az_check_ack; + +#define MAX_INSTANCES_LEN 512 +typedef struct cm_to_ctl_balance_result { + int msg_type; + int imbalanceCount; + uint32 instances[MAX_INSTANCES_LEN]; +} cm_to_ctl_balance_result; + +#define CM_STATUS_STARTING 0 +#define CM_STATUS_PENDING 1 +#define CM_STATUS_NORMAL 2 +#define CM_STATUS_NEED_REPAIR 3 +#define CM_STATUS_DEGRADE 4 +#define CM_STATUS_UNKNOWN 5 +#define CM_STATUS_NORMAL_WITH_CN_DELETED 6 +#define CM_STATUS_UNKNOWN_WITH_BINARY_DAMAGED (7) + +typedef struct cm_to_ctl_cluster_status { + int msg_type; + int cluster_status; + bool is_all_group_mode_pending; + int switchedCount; + int node_id; + bool inReloading; +} cm_to_ctl_cluster_status; + +typedef struct cm_to_ctl_cluster_global_barrier_info { + int msg_type; + char global_barrierId[BARRIERLEN]; + char global_achive_barrierId[BARRIERLEN]; + char globalRecoveryBarrierId[BARRIERLEN]; +} cm_to_ctl_cluster_global_barrier_info; + +typedef struct cm2CtlGlobalBarrierNew_t { + int msg_type; + char globalRecoveryBarrierId[BARRIERLEN]; + GlobalBarrierStatus globalStatus; +} cm2CtlGlobalBarrierNew; + +typedef struct cm_to_ctl_logic_cluster_status { + int msg_type; + int cluster_status; + bool is_all_group_mode_pending; + int switchedCount; + bool inReloading; + + int logic_cluster_status[LOGIC_CLUSTER_NUMBER]; + bool logic_is_all_group_mode_pending[LOGIC_CLUSTER_NUMBER]; + int logic_switchedCount[LOGIC_CLUSTER_NUMBER]; +} cm_to_ctl_logic_cluster_status; + +typedef struct cm_to_ctl_cmserver_status { + int msg_type; + int local_role; + bool is_pending; +} cm_to_ctl_cmserver_status; + +typedef struct cm_query_instance_status { + int msg_type; + uint32 nodeId; + uint32 instanceType; // only for etcd and cmserver + uint32 msg_step; + uint32 status; + bool pending; +} cm_query_instance_status; + +typedef struct etcd_status_info { + pthread_rwlock_t lk_lock; + cm_query_instance_status report_msg; +} etcd_status_info; + +/* kerberos information */ +#define ENV_MAX 100 +#define ENVLUE_NUM 3 +#define MAX_BUFF 1024 +#define MAXLEN 20 +#define KERBEROS_NUM 2 + +typedef struct agent_to_cm_kerberos_status_report { + int msg_type; + uint32 node; + char kerberos_ip[CM_IP_LENGTH]; + uint32 port; + uint32 status; + char role[MAXLEN]; + char nodeName[CM_NODE_NAME]; +} agent_to_cm_kerberos_status_report; + +typedef struct kerberos_status_info { + pthread_rwlock_t lk_lock; + agent_to_cm_kerberos_status_report report_msg; +} kerberos_status_info; + +typedef struct cm_to_ctl_kerberos_status_query { + int msg_type; + uint32 heartbeat[KERBEROS_NUM]; + uint32 node[KERBEROS_NUM]; + char kerberos_ip[KERBEROS_NUM][CM_IP_LENGTH]; + uint32 port[KERBEROS_NUM]; + uint32 status[KERBEROS_NUM]; + char role[KERBEROS_NUM][MAXLEN]; + char nodeName[KERBEROS_NUM][CM_NODE_NAME]; +} cm_to_ctl_kerberos_status_query; + +typedef struct kerberos_group_report_status { + pthread_rwlock_t lk_lock; + cm_to_ctl_kerberos_status_query kerberos_status; +} kerberos_group_report_status; + + +typedef uint32 ShortTransactionId; + +/* ---------------- + * Special transaction ID values + * + * BootstrapTransactionId is the XID for "bootstrap" operations, and + * FrozenTransactionId is used for very old tuples. Both should + * always be considered valid. + * + * FirstNormalTransactionId is the first "normal" transaction id. + * Note: if you need to change it, you must change pg_class.h as well. + * ---------------- + */ +#define InvalidTransactionId ((TransactionId)0) +#define BootstrapTransactionId ((TransactionId)1) +#define FrozenTransactionId ((TransactionId)2) +#define FirstNormalTransactionId ((TransactionId)3) +#define MaxTransactionId ((TransactionId)0xFFFFFFFF) + +/* ---------------- + * transaction ID manipulation macros + * ---------------- + */ +#define TransactionIdIsValid(xid) ((xid) != InvalidTransactionId) +#define TransactionIdIsNormal(xid) ((xid) >= FirstNormalTransactionId) +#define TransactionIdEquals(id1, id2) ((id1) == (id2)) +#define TransactionIdStore(xid, dest) (*(dest) = (xid)) +#define StoreInvalidTransactionId(dest) (*(dest) = InvalidTransactionId) + +/* + * Macros for comparing XLogRecPtrs + * + * Beware of passing expressions with side-effects to these macros, + * since the arguments may be evaluated multiple times. + */ +#define XLByteLT(a, b) ((a) < (b)) +#define XLByteLE(a, b) ((a) <= (b)) +#define XLByteEQ(a, b) ((a) == (b)) + +#define InvalidTerm (0) +#define FirstTerm (1) +#define TermIsInvalid(term) ((term) == InvalidTerm) + +#define XLByteLT_W_TERM(a_term, a_logptr, b_term, b_logptr) \ + (((a_term) < (b_term)) || (((a_term) == (b_term)) && ((a_logptr) < (b_logptr)))) +#define XLByteLE_W_TERM(a_term, a_logptr, b_term, b_logptr) \ + (((a_term) < (b_term)) || (((a_term) == (b_term)) && ((a_logptr) <= (b_logptr)))) +#define XLByteEQ_W_TERM(a_term, a_logptr, b_term, b_logptr) (((a_term) == (b_term)) && ((a_logptr) == (b_logptr))) +#define XLByteWE_W_TERM(a_term, a_logptr, b_term, b_logptr) \ + (((a_term) > (b_term)) || (((a_term) == (b_term)) && ((a_logptr) > (b_logptr)))) + +#define CM_RESULT_COMM_ERROR (-2) /* Communication error */ +#define CM_RESULT_ERROR (-1) +#define CM_RESULT_OK (0) +/* + * This error is used ion the case where allocated buffer is not large + * enough to store the errors. It may happen of an allocation failed + * so it's status is considered as unknown. + */ +#define CM_RESULT_UNKNOWN (1) + +typedef struct ResultDataPacked { + char pad[CM_MSG_MAX_LENGTH]; +} ResultDataPacked; + +typedef union CM_ResultData { + ResultDataPacked packed; +} CM_ResultData; + +typedef struct CM_Result { + int gr_msglen; + int gr_status; + int gr_type; + CM_ResultData gr_resdata; +} CM_Result; + +extern int query_gtm_status_wrapper(const char pid_path[MAXPGPATH], agent_to_cm_gtm_status_report& agent_to_cm_gtm); +extern int query_gtm_status_for_phony_dead(const char pid_path[MAXPGPATH]); + +typedef struct CtlToCMReload { + int msgType; +} CtlToCMReload; +typedef struct CMToCtlReloadAck { + int msgType; + bool reloadOk; +} CMToCtlReloadAck; + +#endif diff --git a/src/include/codegen/gscodegen.h b/src/include/codegen/gscodegen.h index 92931b15b..6bee38367 100644 --- a/src/include/codegen/gscodegen.h +++ b/src/include/codegen/gscodegen.h @@ -32,6 +32,10 @@ #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif + +#include "pg_config.h" + +#ifdef ENABLE_LLVM_COMPILE #include "llvm/IR/Verifier.h" #include "llvm/ExecutionEngine/MCJIT.h" #include "llvm/ExecutionEngine/ObjectCache.h" @@ -52,6 +56,7 @@ #include "llvm/Support/Path.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/raw_os_ostream.h" +#endif #undef __STDC_LIMIT_MACROS #include "c.h" @@ -148,6 +153,7 @@ #define pos_atom_data 0 #define pos_atom_nullflag 1 +#ifdef ENABLE_LLVM_COMPILE /* The whole intrinsic methods are listed in include/llvm/IR/IntrinsicEnums.inc */ #if LLVM_MAJOR_VERSION == 10 const int llvm_prefetch = 217; @@ -159,6 +165,11 @@ const int llvm_prefetch = 225; const int llvm_sadd_with_overflow = 239; const int llvm_smul_with_overflow = 247; const int llvm_ssub_with_overflow = 252; +#elif LLVM_MAJOR_VERSION == 12 +const int llvm_prefetch = 225; +const int llvm_sadd_with_overflow = 240; +const int llvm_smul_with_overflow = 250; +const int llvm_ssub_with_overflow = 256; #else #error Un-supported LLVM version. #endif @@ -189,6 +200,7 @@ class IRBuilder; class IRBuilderDefaultInserter; } // namespace llvm +#endif namespace dorado { @@ -204,6 +216,7 @@ bool canInitCodegenInvironment(); */ bool canInitThreadCodeGen(); +#ifdef ENABLE_LLVM_COMPILE class GsCodeGen : public BaseObject { public: void initialize(); @@ -597,6 +610,7 @@ private: /* Records the c-function calls in codegen IR fucntion of expression tree */ List* m_cfunction_calls; }; +#endif /* * Macros used to define the variables diff --git a/src/include/codegen/vecexprcodegen.h b/src/include/codegen/vecexprcodegen.h index 1994f6896..b0ddfac6a 100644 --- a/src/include/codegen/vecexprcodegen.h +++ b/src/include/codegen/vecexprcodegen.h @@ -33,6 +33,7 @@ #include "codegen/gscodegen.h" namespace dorado { +#ifdef ENABLE_LLVM_COMPILE /* * @Description : Arguments used for Vectorized Expression CodeGen Engine. */ @@ -464,5 +465,6 @@ public: */ static llvm::Value* MemCxtSwitToCodeGen(GsCodeGen::LlvmBuilder* ptrbuilder, llvm::Value* context); }; +#endif } // namespace dorado #endif diff --git a/src/include/codegen/vechashaggcodegen.h b/src/include/codegen/vechashaggcodegen.h index 18d94ed00..131a4d57d 100644 --- a/src/include/codegen/vechashaggcodegen.h +++ b/src/include/codegen/vechashaggcodegen.h @@ -36,6 +36,7 @@ namespace dorado { /* * VecHashAggCodeGen class implements specific optimization by using LLVM */ +#ifdef ENABLE_LLVM_COMPILE class VecHashAggCodeGen : public BaseObject { public: /* @@ -225,6 +226,7 @@ public: */ static void WrapResetEContextCodeGen(GsCodeGen::LlvmBuilder* ptrbuilder, llvm::Value* econtext); }; +#endif } // namespace dorado #endif diff --git a/src/include/codegen/vechashjoincodegen.h b/src/include/codegen/vechashjoincodegen.h index a30836724..01597ee2a 100644 --- a/src/include/codegen/vechashjoincodegen.h +++ b/src/include/codegen/vechashjoincodegen.h @@ -38,6 +38,7 @@ namespace dorado { /* * VecHashJoinCodeGen class implements specific optimization by using LLVM */ +#ifdef ENABLE_LLVM_COMPILE class VecHashJoinCodeGen : public BaseObject { public: /* @@ -168,5 +169,6 @@ public: */ static llvm::Function* HashJoinCodeGen_bf_includeLong(VecHashJoinState* node); }; +#endif } // namespace dorado #endif diff --git a/src/include/codegen/vecsortcodegen.h b/src/include/codegen/vecsortcodegen.h index 55197c804..3e1c2e3bb 100644 --- a/src/include/codegen/vecsortcodegen.h +++ b/src/include/codegen/vecsortcodegen.h @@ -39,6 +39,7 @@ namespace dorado { /* * VecSortCodeGen class implements specific optimization by using LLVM */ +#ifdef ENABLE_LLVM_COMPILE class VecSortCodeGen : public BaseObject { public: /* @@ -151,5 +152,6 @@ public: */ static llvm::Function* SortAggTexteqCodeGen(); }; +#endif } // namespace dorado #endif diff --git a/src/include/commands/cluster.h b/src/include/commands/cluster.h index 7bf9525a8..a7173de19 100644 --- a/src/include/commands/cluster.h +++ b/src/include/commands/cluster.h @@ -27,7 +27,7 @@ extern void mark_index_clustered(Relation rel, Oid indexOid); extern Oid make_new_heap(Oid OIDOldHeap, Oid NewTableSpace, int lockMode = AccessExclusiveLock); extern Oid makePartitionNewHeap(Relation partitionedTableRel, TupleDesc partTabHeapDesc, Datum partTabRelOptions, - Oid oldPartOid, Oid partToastOid, Oid NewTableSpace, bool isCStore = false); + Oid oldPartOid, Oid partToastOid, Oid NewTableSpace, bool isCStore = false, Oid subpartFilenode = InvalidOid); extern double copy_heap_data_internal(Relation OldHeap, Relation OldIndex, Relation NewHeap, TransactionId OldestXmin, TransactionId FreezeXid, bool verbose, bool use_sort, AdaptMem* memUsage); extern double CopyUHeapDataInternal(Relation oldHeap, Relation oldIndex, Relation newHeap, TransactionId oldestXmin, diff --git a/src/include/commands/dbcommands.h b/src/include/commands/dbcommands.h index f025e8db1..532f57429 100644 --- a/src/include/commands/dbcommands.h +++ b/src/include/commands/dbcommands.h @@ -63,7 +63,8 @@ extern bool have_createdb_privilege(void); extern void dbase_redo(XLogReaderState* rptr); extern void dbase_desc(StringInfo buf, XLogReaderState* record); -extern void xlog_db_drop(Oid dbId, Oid tbSpcId); +extern const char* dbase_type_name(uint8 subtype); +extern void xlog_db_drop(XLogRecPtr lsn, Oid dbId, Oid tbSpcId); extern void xlog_db_create(Oid dstDbId, Oid dstTbSpcId, Oid srcDbId, Oid srcTbSpcId); extern void check_encoding_locale_matches(int encoding, const char* collate, const char* ctype); diff --git a/src/include/commands/defrem.h b/src/include/commands/defrem.h index 20e16e872..ba3162552 100644 --- a/src/include/commands/defrem.h +++ b/src/include/commands/defrem.h @@ -40,6 +40,11 @@ extern void ComputeIndexAttrs(IndexInfo* indexInfo, Oid* typeOidP, Oid* collatio Oid accessMethodId, bool amcanorder, bool isconstraint); extern List* ChooseIndexColumnNames(const List* indexElems); +#ifdef ENABLE_MULTIPLE_NODES +extern void mark_indisvalid_local(char* schname, char* idxname); +extern void mark_indisvalid_all_cns(char* schname, char* idxname); +#endif + /* commands/functioncmds.c */ extern bool PrepareCFunctionLibrary(HeapTuple tup); extern void InsertIntoPendingLibraryDelete(const char* filename, bool atCommit); @@ -49,12 +54,12 @@ extern void CreateFunction(CreateFunctionStmt* stmt, const char* queryString, Oi extern void RemoveFunctionById(Oid funcOid); extern void remove_encrypted_proc_by_id(Oid funcOid); extern void RemovePackageById(Oid pkgOid, bool isBody = false); -extern void dropFunctionByPackageOid(Oid package_oid); +extern void DeleteFunctionByPackageOid(Oid package_oid); extern void SetFunctionReturnType(Oid funcOid, Oid newRetType); extern void SetFunctionArgType(Oid funcOid, int argIndex, Oid newArgType); extern void RenameFunction(List* name, List* argtypes, const char* newname); extern void AlterFunctionOwner(List* name, List* argtypes, Oid newOwnerId); -extern void AlterFunctionOwner_oid(Oid procOid, Oid newOwnerId); +extern void AlterFunctionOwner_oid(Oid procOid, Oid newOwnerId, bool byPackage = false); extern bool IsFunctionTemp(AlterFunctionStmt* stmt); extern void AlterFunction(AlterFunctionStmt* stmt); extern void CreateCast(CreateCastStmt* stmt); @@ -67,6 +72,8 @@ extern Oid get_cast_oid(Oid sourcetypeid, Oid targettypeid, bool missing_ok); /* commands/operatorcmds.c */ extern void CreatePackageCommand(CreatePackageStmt* parsetree, const char* queryString); extern void CreatePackageBodyCommand(CreatePackageBodyStmt* parsetree, const char* queryString); +extern void AlterPackageOwner(List* name, Oid newOwnerId); +extern void AlterFunctionOwnerByPkg(Oid package_oid, Oid newOwnerId); extern void DefineOperator(List* names, List* parameters); extern void RemoveOperatorById(Oid operOid); diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h index 114a7901e..0ec470412 100644 --- a/src/include/commands/explain.h +++ b/src/include/commands/explain.h @@ -151,7 +151,7 @@ typedef struct PlanTableEntry { #define V_PLAN_TABLE "plan_table" /* plan_table column length for explain plan. */ -#define PLANTABLECOLNUM 11 +#define PLANTABLECOLNUM 13 #define SESSIONIDLEN 32 #define STMTIDLEN 31 /* the max statement_id length is 30 byte. */ #define OPERATIONLEN 31 @@ -172,6 +172,8 @@ typedef struct PlanTableData { char object_type[OBJECTLEN]; /* object type. */ char object_owner[NAMEDATALEN]; /* object schema */ StringInfo projection; /* output targetlist of the node */ + double cost; /* cost of operation */ + double cardinality; /* cost of operation */ } PlanTableData; typedef enum PlanTableCol { @@ -189,7 +191,9 @@ typedef enum PlanTableCol { PT_OBJECT_TYPE, PT_OBJECT_OWNER, - PT_PROJECTION + PT_PROJECTION, + PT_COST, + PT_CARDINALITY } PlanTableCol; /* Store all node tupls of one plan. */ @@ -260,6 +264,9 @@ public: /* Set projection for one paln node. */ void set_plan_table_projection(int plan_node_id, List* tlist); + /* Set plan cost and cardinality. */ + void set_plan_table_cost_card(int plan_node_id, double plan_cost, double plan_cardinality); + /* Call heap_insert to insert all nodes tuples of the plan into table. */ void insert_plan_table_tuple(); @@ -435,6 +442,7 @@ typedef struct DN_RunInfo { typedef struct ExplainState { StringInfo str; /* output buffer */ + StringInfo post_str; /* output buffer after plan tree */ /* options */ bool plan; /* do not print plan */ bool verbose; /* be verbose */ @@ -496,6 +504,7 @@ extern void ExplainEndOutput(ExplainState* es); extern void ExplainSeparatePlans(ExplainState* es); extern void ExplainPropertyList(const char* qlabel, List* data, ExplainState* es); +extern void ExplainPropertyListPostPlanTree(const char* qlabel, List* data, ExplainState* es); extern void ExplainPropertyText(const char* qlabel, const char* value, ExplainState* es); extern void ExplainPropertyInteger(const char* qlabel, int value, ExplainState* es); extern void ExplainPropertyLong(const char* qlabel, long value, ExplainState* es); diff --git a/src/include/commands/extension.h b/src/include/commands/extension.h index 4a86bfe8f..5516ecdcb 100644 --- a/src/include/commands/extension.h +++ b/src/include/commands/extension.h @@ -43,10 +43,10 @@ extern void AlterExtensionNamespace(List* names, const char* newschema); extern void AlterExtensionOwner_oid(Oid extensionOid, Oid newOwnerId); -extern void RepallocSessionVarsArrayIfNecessary(); - /* Retuen true if the extension is supported. */ extern bool CheckExtensionValid(const char *extentName); extern bool CheckExtensionSqlValid(char *queryString); +extern void RepallocSessionVarsArrayIfNecessary(); + #endif /* EXTENSION_H */ diff --git a/src/include/commands/prepare.h b/src/include/commands/prepare.h index e5362a73f..7898a4b06 100644 --- a/src/include/commands/prepare.h +++ b/src/include/commands/prepare.h @@ -51,7 +51,7 @@ extern bool HaveActiveCoordinatorPreparedStatement(const char* stmt_name); #ifdef PGXC extern DatanodeStatement* FetchDatanodeStatement(const char* stmt_name, bool throwError); -extern bool ActivateDatanodeStatementOnNode(const char* stmt_name, int noid); +extern bool ActivateDatanodeStatementOnNode(const char* stmt_name, int nodeIdx); extern void DeActiveAllDataNodeStatements(void); extern bool HaveActiveDatanodeStatements(void); extern void DropDatanodeStatement(const char* stmt_name); diff --git a/src/include/commands/sequence.h b/src/include/commands/sequence.h index 22dc49e40..8d230ac1d 100644 --- a/src/include/commands/sequence.h +++ b/src/include/commands/sequence.h @@ -169,6 +169,7 @@ extern void ResetSequence(Oid seq_relid); extern void seq_redo(XLogReaderState* rptr); extern void seq_desc(StringInfo buf, XLogReaderState* record); +extern const char* seq_type_name(uint8 subtype); extern GTM_UUID get_uuid_from_rel(Relation rel); extern void lockNextvalOnCn(Oid relid); diff --git a/src/include/commands/tablecmds.h b/src/include/commands/tablecmds.h index 6c1c7bfb4..4efd285d2 100644 --- a/src/include/commands/tablecmds.h +++ b/src/include/commands/tablecmds.h @@ -128,6 +128,7 @@ extern bool checkRelationLocalIndexesUsable(Relation relation); extern List* GetPartitionkeyPos(List* partitionkeys, List* schema); extern void ComparePartitionValue(List* pos, Form_pg_attribute* attrs, List *partitionList, bool isPartition = true); +extern void CompareListValue(const List* pos, Form_pg_attribute* attrs, List *partitionList); extern void clearAttrInitDefVal(Oid relid); extern void AlterDfsCreateTables(Oid relOid, Datum toast_options, CreateStmt* mainTblStmt); @@ -146,7 +147,8 @@ extern Node* GetTargetValue(Form_pg_attribute attrs, Const* src, bool isinterval extern void ATExecEnableDisableRls(Relation rel, RelationRlsStatus changeType, LOCKMODE lockmode); extern bool isQueryUsingTempRelation(Query *query); extern void addToastTableForNewPartition(Relation relation, Oid newPartId, bool isForSubpartition = false); -extern void fastDropPartition(Relation rel, Oid partOid, const char* stmt, Oid intervalPartOid = InvalidOid); +extern void fastDropPartition(Relation rel, Oid partOid, const char *stmt, Oid intervalPartOid = InvalidOid, + bool sendInvalid = true); extern void ExecutePurge(PurgeStmt* stmt); extern void ExecuteTimeCapsule(TimeCapsuleStmt* stmt); extern void truncate_check_rel(Relation rel); diff --git a/src/include/commands/tablespace.h b/src/include/commands/tablespace.h index 561cb4630..c3bdcf257 100644 --- a/src/include/commands/tablespace.h +++ b/src/include/commands/tablespace.h @@ -137,6 +137,7 @@ extern void check_create_dir(char* location); extern void tblspc_redo(XLogReaderState* rptr); extern void tblspc_desc(StringInfo buf, XLogReaderState* record); +extern const char* tblspc_type_name(uint8 subtype); extern uint64 pg_cal_tablespace_size_oid(Oid tblspcOid); extern Oid ConvertToPgclassRelTablespaceOid(Oid tblspc); extern Oid ConvertToRelfilenodeTblspcOid(Oid tblspc); diff --git a/src/include/commands/typecmds.h b/src/include/commands/typecmds.h index b64b15a77..a5a43b96d 100644 --- a/src/include/commands/typecmds.h +++ b/src/include/commands/typecmds.h @@ -47,5 +47,7 @@ extern void AlterTypeNamespace(List* names, const char* newschema, ObjectType ob extern Oid AlterTypeNamespace_oid(Oid typeOid, Oid nspOid, ObjectAddresses* objsMoved); extern Oid AlterTypeNamespaceInternal( Oid typeOid, Oid nspOid, bool isImplicitArray, bool errorOnTableType, ObjectAddresses* objsMoved); +extern void AlterTypeOwnerByPkg(Oid pkgOid, Oid newOwnerId); +extern void AlterTypeOwnerByFunc(Oid funcOid, Oid newOwnerId); #endif /* TYPECMDS_H */ diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h index a3cc903b5..b3370295c 100644 --- a/src/include/commands/vacuum.h +++ b/src/include/commands/vacuum.h @@ -494,11 +494,8 @@ extern void lazy_vacuum_index(Relation indrel, BufferAccessStrategy vacStrategy); extern IndexBulkDeleteResult* lazy_cleanup_index( Relation indrel, IndexBulkDeleteResult* stats, LVRelStats* vacrelstats, BufferAccessStrategy vac_strategy); -extern bool ShouldAttemptTruncation(const LVRelStats *vacrelstats); extern void lazy_record_dead_tuple(LVRelStats *vacrelstats, ItemPointer itemptr); -extern BlockNumber CountNondeletablePages(Relation onerel, - LVRelStats *vacrelstats); extern void vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats); extern void CBIOpenLocalCrossbucketIndex(Relation onerel, LOCKMODE lockmode, int* nindexes, Relation** iRel); diff --git a/src/include/commands/verify.h b/src/include/commands/verify.h index ccd3f944d..997fd1079 100644 --- a/src/include/commands/verify.h +++ b/src/include/commands/verify.h @@ -47,10 +47,88 @@ #include "storage/lock/lock.h" #include "storage/remote_read.h" #include "utils/relcache.h" +#include "postmaster/pagerepair.h" +#include "storage/copydir.h" +#include "utils/inval.h" +#include "utils/timestamp.h" +#include "storage/lock/lwlock.h" +#include "utils/hsearch.h" +#include "c.h" +#include "storage/buf/bufmgr.h" +#include "storage/buf/block.h" +#include "storage/smgr/smgr.h" +#include "storage/checksum.h" +#include "storage/smgr/segment.h" +#include "fmgr.h" +#include "funcapi.h" +#include "storage/remote_adapter.h" +#include "utils/relmapper.h" +#include "utils/relcache.h" +#include "access/sysattr.h" +#include "pgstat.h" +#include "access/double_write.h" +#include "miscadmin.h" + +#define FAIL_RETRY_MAX_NUM 5 +#define REGR_MCR_SIZE_1MB 1048576 +#define REGR_MCR_SIZE_1GB 1073741824 extern void DoGlobalVerifyMppTable(VacuumStmt* stmt, const char* queryString, bool sentToRemote); extern void DoGlobalVerifyDatabase(VacuumStmt* stmt, const char* queryString, bool sentToRemote); extern void DoVerifyTableOtherNode(VacuumStmt* stmt, bool sentToRemote); extern void VerifyAbortBufferIO(void); +extern bool isCLogOrCsnLogPath(char* path); +extern bool gsRepairCsnOrCLog(char* path, int timeout); +extern bool isSegmentPath(char* path, uint32* relfileNode); +extern void getRelfiNodeAndSegno(char* str, const char *delim, char **relfileNodeAndSegno); +extern bool gsRepairFile(Oid tableOid, char* path, int timeout); +#define BAD_BLOCK_NAME_LEN = 128; + +const int ERR_MSG_LEN = 512; + +typedef struct BadFileItem { + Oid reloid; + NameData relname; + char relfilepath[MAX_PATH]; +} BadFileItem; + +extern void addGlobalRepairBadBlockStat(const RelFileNodeBackend &rnode, ForkNumber forknum, + BlockNumber blocknum); +extern void UpdateRepairTime(const RelFileNode &rnode, ForkNumber forknum, BlockNumber blocknum); +extern void initRepairBadBlockStat(); +extern void verifyAndTryRepairPage(char* path, int blockNum, bool verify_mem, bool is_segment); +extern void PrepForRead(char* path, int64 blocknum, bool is_segment, RelFileNode *relnode); +extern Buffer PageIsInMemory(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum); +extern void resetRepairBadBlockStat(); +extern bool repairPage(char* path, uint blocknum, bool is_segment, int timeout); +extern bool tryRepairPage(int blocknum, bool is_segment, RelFileNode *relnode, int timeout); +extern char* relSegmentDir(RelFileNode rnode, ForkNumber forknum); +extern List* getSegmentMainFilesPath(char* segmentDir, char split, int num); +extern List* appendIfNot(List* targetList, Oid datum); +extern uint32 getSegmentFileHighWater(char* path); +extern int getIntLength(uint32 intValue); +extern List* getPartitionBadFiles(Relation tableRel, List* badFileItems, Oid relOid); +extern int getMaxSegno(RelFileNode* prnode); +extern List* getTableBadFiles(List* badFileItems, Oid relOid, Form_pg_class classForm, Relation tableRel); +extern List* getSegmentBadFiles(List* spcList, List* badFileItems); +extern List* getSegnoBadFiles(char* path, int maxSegno, Oid relOid, char* tabName, List* badFileItems); +extern List* appendBadFileItems(List* badFileItems, Oid relOid, char* tabName, char* path); +extern List* getNonSegmentBadFiles(List* badFileItems, Oid relOid, Form_pg_class classForm, Relation tableRel); +extern void gs_verify_page_by_disk(SMgrRelation smgr, ForkNumber forkNum, int blockNum, char* disk_page_res); +extern void splicMemPageMsg(bool isPageValid, bool isDirty, char* mem_page_res); +extern bool isNeedRepairPageByMem(char* disk_page_res, int blockNum, char* mem_page_res, + XLogPhyBlock *pblk, RelFileNode relnode); +extern int CheckAndRenameFile(char* path); +extern void BatchClearBadBlock(const RelFileNode rnode, ForkNumber forknum, BlockNumber startblkno); +extern void df_close_all_file(RepairFileKey key, int32 max_sliceno); + +Datum local_bad_block_info(PG_FUNCTION_ARGS); +Datum local_clear_bad_block_info(PG_FUNCTION_ARGS); +Datum gs_repair_page(PG_FUNCTION_ARGS); +Datum gs_verify_and_tryrepair_page(PG_FUNCTION_ARGS); +Datum gs_verify_data_file(PG_FUNCTION_ARGS); +Datum gs_repair_file(PG_FUNCTION_ARGS); +Datum remote_bad_block_info(PG_FUNCTION_ARGS); +Datum gs_read_segment_block_from_remote(PG_FUNCTION_ARGS); #endif /* VERIFY_H */ diff --git a/src/include/communication/commproxy_interface.h b/src/include/communication/commproxy_interface.h index 12a3f569e..5a3aa0484 100644 --- a/src/include/communication/commproxy_interface.h +++ b/src/include/communication/commproxy_interface.h @@ -164,7 +164,9 @@ extern ErrorLevel min_debug_level; #define MAX_CONTINUOUS_COUNT 10 #define INIT_TX_ALLOC_BUFF_NUM 5 +#ifndef WITH_OPENEULER_OS extern int gettimeofday(struct timeval* tp, struct timezone* tzp); +#endif //extern THR_LOCAL knl_thrd_context t_thrd; #define COMM_ELOG(elevel, format, ...) \ diff --git a/src/include/db4ai/aifuncs.h b/src/include/db4ai/aifuncs.h index 4077b6f99..0d0c14473 100644 --- a/src/include/db4ai/aifuncs.h +++ b/src/include/db4ai/aifuncs.h @@ -16,7 +16,7 @@ * plannodes.h * * IDENTIFICATION - * src/include/dbmind/db4ai/nodes/aifuncs.h + * src/include/db4ai/aifuncs.h * * --------------------------------------------------------------------------------------- */ @@ -24,42 +24,146 @@ #ifndef DB4AI_AIFUNCS_H #define DB4AI_AIFUNCS_H +#include "db4ai/hyperparameter_validation.h" +#include "db4ai/kmeans.h" +#include "db4ai/xgboost.h" #include "nodes/plannodes.h" -inline const char* algorithm_ml_to_string(AlgorithmML x) +#define ARRAY_LENGTH(x) sizeof(x) / sizeof((x)[0]) +#define ASSERT_ELEMENTS_ENUM_TO_STR(strings, max_enum) \ + static_assert((ARRAY_LENGTH(strings)-1) == max_enum, "Mismatch string to enum conversion. Check missing string values"); + + +// Generic function to convert an enum to a string value +template +const char *enum_to_string(Enum x) { - switch(x) { - case LOGISTIC_REGRESSION: return "logistic_regression"; - case SVM_CLASSIFICATION: return "svm_classification"; - case LINEAR_REGRESSION: return "linear_regression"; - case KMEANS: return "kmeans"; - case INVALID_ALGORITHM_ML: - default: return "INVALID_ALGORITHM_ML"; - } + if (static_cast(x) > values_size) return NULL; + return values[static_cast(x)]; } -inline AlgorithmML get_algorithm_ml(const char *str) +// Generic function to get the string value for an enumeration. Caller can define the +// error behavior when the string does not match. Setting error_on_mismatch stops the execution with +// an ERROR. Otherwise, the last value of the enum is returned +template +Enum string_to_enum(const char *str, const char *error_msg = NULL, bool error_on_mismatch = true) { - if (0 == strcmp(str, "logistic_regression")) { - return LOGISTIC_REGRESSION; - } else if (0 == strcmp(str, "svm_classification")) { - return SVM_CLASSIFICATION; - } else if (0 == strcmp(str, "linear_regression")) { - return LINEAR_REGRESSION; - } else if (0 == strcmp(str, "kmeans")) { - return KMEANS; - } else { - return INVALID_ALGORITHM_ML; + for (uint i = 0; i < values_size; i++) { + if (0 == strcmp(values[i], str)) { + return static_cast(i); + } } + if (error_on_mismatch) { + StringInfo s = makeStringInfo(); + int32_t printable_values = include_last_on_error ? values_size : values_size - 1; + for (int32_t i = 0; i < printable_values; i++) { + if (i != 0) appendStringInfoString(s, ", "); + appendStringInfoString(s, enum_to_string(static_cast(i))); + } + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("%s. Valid values are: %s", error_msg == NULL ? "Error" : error_msg, s->data))); + } + return static_cast(values_size - 1); } -inline bool is_supervised(AlgorithmML algorithm) +/////////////////////////////////////////////////////////////////////////////// + +extern const char *algorithm_ml_str[]; +extern const int32_t algorithm_ml_str_size; + +const char *algorithm_ml_to_string(AlgorithmML x); +AlgorithmML get_algorithm_ml(const char *str); + +/////////////////////////////////////////////////////////////////////////////// + +extern const char *prediction_type_str[]; +extern const int32_t prediction_type_str_size; + +const char* prediction_type_to_string(PredictionType x); + +/////////////////////////////////////////////////////////////////////////////// + +extern const char *kmeans_distance_functions_str[]; +extern const int32_t kmeans_distance_functions_str_size; + +const char *kmeans_distance_to_string(DistanceFunction x); +DistanceFunction get_kmeans_distance(const char *str); + +inline const char *kmeans_distance_function_getter(void *x) { - if (algorithm == KMEANS) { - return false; - } else { - return true; - } + return kmeans_distance_to_string(*static_cast(x)); } -#endif // DB4AI_AIFUNCS_H + +inline void kmeans_distance_function_setter(const char *str, void *x) +{ + *static_cast(x) = get_kmeans_distance(str); +} + +/////////////////////////////////////////////////////////////////////////////// + +extern const char *kmeans_seeding_str[]; +extern const int32_t kmeans_seeding_str_size; + +const char *kmean_seeding_to_string(SeedingFunction x); +SeedingFunction get_kmeans_seeding(const char *str); + +inline const char *kmeans_seeding_getter(void *x) +{ + return kmean_seeding_to_string(*static_cast(x)); +} + +inline void kmeans_seeding_setter(const char *str, void *x) +{ + *static_cast(x) = get_kmeans_seeding(str); +} + + +extern const char *metric_ml_str[]; +extern const int32_t metric_ml_str_size; + +const char *metric_ml_to_string(MetricML x); +MetricML get_metric_ml(const char *str); + + +inline bool metric_ml_is_maximize(MetricML metric) +{ + switch (metric) { + case METRIC_ML_ACCURACY: + case METRIC_ML_F1: + case METRIC_ML_PRECISION: + case METRIC_ML_RECALL: + case METRIC_ML_AUC: + case METRIC_ML_AUC_PR: + case METRIC_ML_MAP: + return true; + + case METRIC_ML_LOSS: + case METRIC_ML_MSE: + case METRIC_ML_DISTANCE_L1: + case METRIC_ML_DISTANCE_L2: + case METRIC_ML_DISTANCE_L2_SQUARED: + case METRIC_ML_DISTANCE_L_INF: + case METRIC_ML_RMSE: + case METRIC_ML_RMSLE: + case METRIC_ML_MAE: + return false; + default: + break; + } + Assert(false); + return false; +} + + +inline double metric_ml_worst_score(MetricML metric) +{ + return metric_ml_is_maximize(metric) ? -DBL_MAX : DBL_MAX; +} + + +/////////////////////////////////////////////////////////////////////////////// + +bool is_supervised(AlgorithmML algorithm); + +#endif // DB4AI_AIFUNCS_H diff --git a/src/include/db4ai/create_model.h b/src/include/db4ai/create_model.h index 5a4b8439e..433b4c007 100644 --- a/src/include/db4ai/create_model.h +++ b/src/include/db4ai/create_model.h @@ -13,10 +13,10 @@ * See the Mulan PSL v2 for more details. * --------------------------------------------------------------------------------------- * - * command.h + * create_model.h * * IDENTIFICATION - * src/include/dbmind/db4ai/commands/create_model.h + * src/include/db4ai/create_model.h * * --------------------------------------------------------------------------------------- */ @@ -33,15 +33,22 @@ #include "tcop/dest.h" struct Model; +struct QueryDesc; struct DestReceiverTrainModel { DestReceiver dest; - Model *model; + MemoryContext memcxt; + AlgorithmML algorithm; + const char* model_name; + const char* sql; + List* hyperparameters; // List of Hyperparamters List *targetlist; // for gradient descent + bool save_model; // Set to save automatically the model into the modle warehouse }; -void configure_dest_receiver_train_model(DestReceiverTrainModel *dest, AlgorithmML algorithm, const char *model_name, - const char *sql); + +void configure_dest_receiver_train_model(DestReceiverTrainModel *dest, MemoryContext context, AlgorithmML algorithm, + const char* model_name, const char* sql, bool automatic_save); // Create a DestReceiver object for training model operators DestReceiver *CreateTrainModelDestReceiver(); @@ -49,9 +56,12 @@ DestReceiver *CreateTrainModelDestReceiver(); // Rewrite a create model query, and plan the query. This method is used in query execution // and for explain statements PlannedStmt *plan_create_model(CreateModelStmt *stmt, const char *query_string, ParamListInfo params, - DestReceiver *dest); + DestReceiver *dest, MemoryContext cxt); // Call executor void exec_create_model(CreateModelStmt *stmt, const char *queryString, ParamListInfo params, char *completionTag); +// Execute a query plan for create model +void exec_create_model_planned(QueryDesc *queryDesc, char *completionTag); + #endif diff --git a/src/include/db4ai/db4ai_api.h b/src/include/db4ai/db4ai_api.h new file mode 100644 index 000000000..5402838e9 --- /dev/null +++ b/src/include/db4ai/db4ai_api.h @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + *--------------------------------------------------------------------------------------- + * + * db4ai_api.h + * + * IDENTIFICATION + * src/include/db4ai/db4ai_api.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef DB4AI_API_H +#define DB4AI_API_H + +#include "nodes/execnodes.h" +#include "commands/explain.h" +#include "db4ai/hyperparameter_validation.h" +#include "db4ai/predict_by.h" + +// generic flags, can be OR'ed +enum { + ALGORITHM_ML_DEFAULT = 0x00000000, // default is supervised / binary + ALGORITHM_ML_UNSUPERVISED = 0x00000001, + ALGORITHM_ML_RESCANS_DATA = 0x00000002, // set when algorithm reads more than once the subplan + // for supervised + ALGORITHM_ML_TARGET_MULTICLASS = 0x00010000, // categorical with more than two categories + ALGORITHM_ML_TARGET_CONTINUOUS = 0x00020000, // not categorical (and not binary) +}; + +// this is just a base structure to be extended by the ML algorithms +typedef struct ModelHyperparameters { +} ModelHyperparameters; + +/* + * Generic ML API for training & prediction. + * + * Algorithms following this interface may be used in two different scenarios: + * - training embedded into query plan generated from a SQL statement, where + * the tuples are obtained transparently from the query subplan + * - direct training in C, where the tuples are provided by the user + * + * All callbacks receive a pointer to the AlgorithmAPI where they belong + * in order to be able to call automatically to other callbacks or to extend + * the api for specific purposes, such as multiple ML algorithms implemented + * by a single API (e.g. gradient descent). + */ +typedef struct AlgorithmAPI { + // algorithm description + AlgorithmML algorithm; + const char* name; + int flags; // OR'ed ALGORITHM_ML_xxx flags + + ////////////////////////////////////////////////////////////////////// + // callbaks used to populate model warehouse when database is created + + /* + * Returns the metrics that can be computed by this algorithm + */ + MetricML* (*get_metrics)( + struct AlgorithmAPI *self, + int *num_metrics // output, number of metrics returned + ); + + /* Returns the definition of the supported hyperparameters. + * + * The offsets of the fields must correspond to the structure + * returned by the callback + */ + const HyperparameterDefinition* (*get_hyperparameters_definitions)( + struct AlgorithmAPI *self, + int *num_hyperp // output, number of definitions returned + ); + + ////////////////////////////////////////////////////////////////////// + // callbacks used by CREATE MODEL interface + + /* + * Creates an structure to store the hyperparameters + */ + ModelHyperparameters* (*make_hyperparameters)( + struct AlgorithmAPI *self + ); + + /* + * Updates the input hyperparameters with extra computations or initializations (e.g. seed). + * + * Tiis callback it is optional, a NULL pointer disables it + */ + void (*update_hyperparameters)( + struct AlgorithmAPI *self, + ModelHyperparameters *hyperparameters + ); + + ////////////////////////////////////////////////////////////////////// + // callbacks used into the query runtime operator + + /* + * Create an initializes new training state. + * + * This training state has to be completely agnostic of query subplans + * or executor nodes because it will be used also for direct training. + */ + TrainModelState* (*create)( + struct AlgorithmAPI *self, + const TrainModel* configuration + ); + + /* + * Trains all configurations at a time and return all trained models + * in the same order as the hyperparameter configurations. + * + * Each model has a status field that denotes if it has terminated + * successfully or not. + * + * Tuples are fetched using the callback provided into the state, + * which loads the tuple data into the state itself. For supervised + * algorithms, the target (label) column is always the first. + * + * Multiple iterations of the data are possible by requesting a + * rescan to the tuple fetch callback. The + */ + void (*run)( + struct AlgorithmAPI *self, + TrainModelState *state, + Model ** // output, returns all result models + ); + + /* + * Ends the training. The state is freed by the caller. + */ + void (*end)( + struct AlgorithmAPI *self, + TrainModelState *state + ); + + ////////////////////////////////////////////////////////////////////// + // used by PREDICT BY + + /* + * Prepares a model predictor using a trained model + */ + ModelPredictor (*prepare_predict)( + struct AlgorithmAPI *self, + const SerializedModel *model, + Oid return_type + ); + + /* + * Predicts using a trained model. + * + * The input tuple to use for prediction must have the same number + * of attributes and in the same order as those used for training + * except the target in the case of supervised. + * + * The predicted value is of the result type specified into the model. + */ + Datum (*predict)( + struct AlgorithmAPI *self, + ModelPredictor predictor, + Datum *values, + bool *isnull, + Oid *types, + int num_fields + ); + + ////////////////////////////////////////////////////////////////////// + // serialization + + /* + * Deserializes a model and returns a list of TrainingInfo + */ + List* (*explain)( + struct AlgorithmAPI *self, + const SerializedModel *model, + Oid return_type + ); + +} AlgorithmAPI; + +// return the API for a given algorithm +AlgorithmAPI *get_algorithm_api(AlgorithmML algorithm); + +// direct ML interface, use only these methods and never direct calls to the API +Model* model_fit(const char* name, + AlgorithmML algorithm, + // optional hyperparameter values provided by the caller + const Hyperparameter *hyperparameters, + int num_hyperparameters, + // tuple description + Oid *typid, + bool *typbyval, + int16 *typlen, + int num_columns, + // callbacks to fetch data + callback_ml_fetch fetch, + callback_ml_rescan rescan, + void* callback_data // optional, user defined structure for fetch/rescan + ); + +// saves a trained model into the model warehouse +void model_store(const Model *model); + +// returns a trained model stored into the model warehouse +const Model *model_load(const char *model_name); + +// prepares a predictor for a trained model +ModelPredictor model_prepare_predict(const Model* model); + +// predicts using a trained model +Datum model_predict(ModelPredictor predictor, + Datum *values, + bool *isnull, + Oid *typid, + int num_columns + ); + +#endif // DB4AI_H + + diff --git a/src/include/db4ai/db4ai_common.h b/src/include/db4ai/db4ai_common.h new file mode 100644 index 000000000..aad1a8eaa --- /dev/null +++ b/src/include/db4ai/db4ai_common.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + *--------------------------------------------------------------------------------------- + * db4ai_common.h + * + * IDENTIFICATION + * src/include/db4ai/db4ai_common.h + *--------------------------------------------------------------------------------------- + */ + +#ifndef DB4AI_COMMON_H +#define DB4AI_COMMON_H + +#include "utils/builtins.h" +#include "utils/timestamp.h" + + +uint64_t time_diff(struct timespec *time_p1, struct timespec *time_p2); +double interval_to_sec(double time_interval); +double interval_to_msec(double time_interval); + +Datum float8_get_datum(Oid type, float8 value); +float8 datum_get_float8(Oid type, Datum datum); +int32 datum_get_int(Oid type, Datum datum); + +Datum string_to_datum(const char *str, Oid datatype); + +void check_hyper_bounds(unsigned int num_x, unsigned int num_y, const char *hyper); + +#endif /* DB4AI_COMMON_H */ diff --git a/src/include/db4ai/db4ai_cpu.h b/src/include/db4ai/db4ai_cpu.h index f33805272..57b61243f 100644 --- a/src/include/db4ai/db4ai_cpu.h +++ b/src/include/db4ai/db4ai_cpu.h @@ -12,11 +12,10 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. --------------------------------------------------------------------------------------- -mrc_cpu.h - Code to provide micro-optimizations +db4ai_cpu.h IDENTIFICATION - src/include/dbmind/db4ai/executor/mrc_cpu.h + src/include/db4ai/db4ai_cpu.h --------------------------------------------------------------------------------------- **/ diff --git a/src/include/db4ai/distance_functions.h b/src/include/db4ai/distance_functions.h index 8a8503600..1065b938d 100644 --- a/src/include/db4ai/distance_functions.h +++ b/src/include/db4ai/distance_functions.h @@ -17,7 +17,7 @@ distance_functions.h Current set of distance functions that can be used (for k-means for example) IDENTIFICATION - src/include/dbmind/db4ai/executor/distance_functions.h + src/include/db4ai/distance_functions.h --------------------------------------------------------------------------------------- **/ diff --git a/src/include/db4ai/explain_model.h b/src/include/db4ai/explain_model.h new file mode 100644 index 000000000..0a0118d19 --- /dev/null +++ b/src/include/db4ai/explain_model.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2022 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + *--------------------------------------------------------------------------------------- + * + * explain_model.h + * + * IDENTIFICATION + * src/include/db4ai/explain_model.h + * + *--------------------------------------------------------------------------------------- + */ + +#ifndef DB4AI_EXPLAIN_MODEL_H +#define DB4AI_EXPLAIN_MODEL_H +#include "postgres.h" +#include "fmgr/fmgr_comp.h" + +extern char* str_tolower(const char* buff, size_t nbytes, Oid collid); +text* ExecExplainModel(char* model_name); +Datum db4ai_explain_model(PG_FUNCTION_ARGS); +#endif diff --git a/src/include/db4ai/fp_ops.h b/src/include/db4ai/fp_ops.h index e799d430e..640145b1b 100644 --- a/src/include/db4ai/fp_ops.h +++ b/src/include/db4ai/fp_ops.h @@ -17,7 +17,7 @@ fp_ops.h Robust floating point operations IDENTIFICATION - src/include/dbmind/db4ai/executor/fp_ops.h + src/include/db4ai/fp_ops.h --------------------------------------------------------------------------------------- **/ @@ -25,6 +25,8 @@ IDENTIFICATION #ifndef DB4AI_FP_OPS_H #define DB4AI_FP_OPS_H +#include + /* * High precision sum: a + b = *sum + *e */ @@ -50,4 +52,32 @@ extern void square(double a, double* square, double* e); */ extern void twoDiv(double a, double b, double* div, double* e); +/* + * to keep running statistics on each cluster being constructed + */ +class IncrementalStatistics { + uint64_t population = 0; + double max_value = DBL_MIN; + double min_value = DBL_MAX; + double total = 0.; + double s = 0; + +public: + + IncrementalStatistics operator+(IncrementalStatistics const& rhs) const; + IncrementalStatistics operator-(IncrementalStatistics const& rhs) const; + IncrementalStatistics& operator+=(IncrementalStatistics const& rhs); + IncrementalStatistics& operator-=(IncrementalStatistics const& rhs); + + double getMin() const; + double getMax() const; + double getTotal() const; + uint64_t getPopulation() const; + void setTotal(double); + double getEmpiricalMean() const; + double getEmpiricalVariance() const; + double getEmpiricalStdDev() const; + bool reset(); +}; + #endif //DB4AI_FP_OPS_H diff --git a/src/include/db4ai/gd.h b/src/include/db4ai/gd.h index 70fa8705b..0e80274fe 100644 --- a/src/include/db4ai/gd.h +++ b/src/include/db4ai/gd.h @@ -25,7 +25,11 @@ #define GD_H #include "nodes/execnodes.h" +#include "db4ai/matrix.h" #include "db4ai/predict_by.h" +#include "db4ai_common.h" +#include "db4ai/db4ai_api.h" +#include "db4ai/aifuncs.h" // returns the current time in microseconds inline uint64_t @@ -35,101 +39,134 @@ gd_get_clock_usecs() { return tv.tv_sec * 1000000ULL + tv.tv_usec; } -enum { - GD_DEPENDENT_VAR_CONTINUOUS = 0x00000000, - GD_DEPENDENT_VAR_BINARY = 0x00000001, - GD_DEPENDENT_VAR_CATEGORICAL = 0x00000002, -}; +struct TrainModel; -enum { - METRIC_ACCURACY = 0x0001, // (tp + tn) / n - METRIC_F1 = 0x0002, // 2 * (precision * recall) / (precision + recall) - METRIC_PRECISION = 0x0004, // tp / (tp + fp) - METRIC_RECALL = 0x0008, // tp / (tp + fn) - METRIC_LOSS = 0x0010, // defined by each algorithm - METRIC_MSE = 0x0020, // sum((y-y')^2)) / n -}; +/////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////// -char* gd_get_metric_name(int metric); +// GD node: used for train models using Gradient Descent struct GradientDescent; +struct OptimizerGD; +struct ShuffleGD; +struct HyperparametersGD; +struct KernelTransformer; -typedef void (*f_gd_gradients)(const GradientDescent* gd_node, const Matrix* features, const Matrix* dep_var, - Matrix* weights, Matrix* gradients); -typedef double (*f_gd_test)(const GradientDescent* gd_node, const Matrix* features, const Matrix* dep_var, - const Matrix* weights, Scores* scores); -typedef gd_float (*f_gd_predict)(const Matrix* features, const Matrix* weights); +typedef struct GradientDescentState { + TrainModelState tms; -typedef struct GradientDescentAlgorithm { - const char* name; - int flags; - int metrics; + // solvers + OptimizerGD *optimizer; + ShuffleGD *shuffle; + KernelTransformer *kernel; + Matrix aux_input; // only for non-linear kernels + + // tuple information + bool reuse_tuple; // only after initialization + int n_features; // number of features + Oid target_typid; + + // dependent var categories + int num_classes; + int allocated_classes; + Datum *value_classes; + + // training state + bool init; // training has started, not just for explain + Matrix weights; + double learning_rate; + int n_iterations; + int usecs; // execution time + int processed; // tuples + int discarded; + float loss; + Scores scores; +} GradientDescentState; + +typedef struct GradientsConfig { + const HyperparametersGD *hyperp; + const Matrix* features; + Matrix* weights; + Matrix* gradients; +} GradientsConfig; + +typedef struct GradientsConfigGD { + GradientsConfig hdr; + const Matrix* dep_var; +} GradientsConfigGD; + +typedef struct GradientsConfigPCA { + GradientsConfig hdr; + IncrementalStatistics *eigenvalues_stats = nullptr; + double batch_error = 0.; + Matrix *dot_products = nullptr; +} GradientsConfigPCA; + +typedef struct GradientDescent { + AlgorithmAPI algo; + bool add_bias; + // default values + Oid def_ret_typid; + float8 def_feature; // values for binary algorithms // e.g. (0,1) for logistic regression or (-1,1) for svm classifier - gd_float min_class; - gd_float max_class; - // callbacks for hooks - f_gd_gradients gradients_callback; - f_gd_test test_callback; - f_gd_predict predict_callback; -} GradientDescentAlgorithm; + float8 min_class; + float8 max_class; + // callbacks + bool (*scan)(const Matrix* features, const Matrix* dep_var); // return true to abort + OptimizerGD* (*prepare_optimizer)(const GradientDescentState *gd_state, HyperparametersGD *hyperp); + KernelTransformer* (*prepare_kernel)(int features, HyperparametersGD *hyperp); + void* (*start_iteration)(GradientDescentState* gd_state, int iter); + void (*end_iteration)(void* data); + void (*compute_gradients)(GradientsConfig *cfg); + double (*test)(const GradientDescentState* gd_state, const Matrix* features, const Matrix* dep_var, + const Matrix* weights, Scores* scores); + Datum (*predict)(const Matrix* features, const Matrix* weights, + Oid return_type, void *extra_data, bool max_binary, bool* categorize); + void* (*get_extra_data)(GradientDescentState* gd_state, int *size); +} GradientDescent; -GradientDescentAlgorithm* gd_get_algorithm(AlgorithmML algorithm); +#define GD_SVM_CLASSIFICATION_NAME "svm_classification" +#define GD_LOGISTIC_REGRESSION_NAME "logistic_regression" -inline bool dep_var_is_continuous(const GradientDescentAlgorithm* algo) { - return ((algo->flags & (GD_DEPENDENT_VAR_BINARY | GD_DEPENDENT_VAR_CATEGORICAL)) == 0); +extern GradientDescent gd_logistic_regression; +extern GradientDescent gd_svm_classification; +extern GradientDescent gd_linear_regression; +extern GradientDescent gd_pca; +extern GradientDescent gd_multiclass; + +GradientDescent* gd_get_algorithm(AlgorithmML algorithm); + +inline bool gd_is_supervised(const GradientDescent* gd) { + return ((gd->algo.flags & ALGORITHM_ML_UNSUPERVISED) == 0); } -inline bool dep_var_is_binary(const GradientDescentAlgorithm* algo) { - return ((algo->flags & GD_DEPENDENT_VAR_BINARY) != 0); +inline bool gd_dep_var_is_continuous(const GradientDescent* gd) { + return ((gd->algo.flags & ALGORITHM_ML_TARGET_CONTINUOUS) != 0); } -gd_float gd_datum_get_float(Oid type, Datum datum); -Datum gd_float_get_datum(Oid type, gd_float value); - -inline Oid -get_atttypid(GradientDescentState* gd_state, int col) { - return gd_state->tupdesc->attrs[col]->atttypid; +inline bool gd_dep_var_is_binary(const GradientDescent* gd) { + return ((gd->algo.flags & (ALGORITHM_ML_TARGET_MULTICLASS | ALGORITHM_ML_TARGET_CONTINUOUS)) == 0); } -inline bool -get_attbyval(GradientDescentState* gd_state, int col) { - return gd_state->tupdesc->attrs[col]->attbyval; -} +void gd_copy_pg_array_data(float8 *dest, Datum const source, int32_t const num_entries); -inline int -get_attlen(GradientDescentState* gd_state, int col) { - return gd_state->tupdesc->attrs[col]->attlen; -} +void gd_multiclass_target(GradientDescent *algorithm, int target, float8 *dest, const float8 *src, int count); -inline int -get_atttypmod(GradientDescentState* gd_state, int col) { - return gd_state->tupdesc->attrs[col]->atttypmod; -} - -inline int -get_natts(GradientDescentState* gd_state) { - return gd_state->tupdesc->natts; -} - -struct Model; - -ModelPredictor gd_predict_prepare(const Model* model); -Datum gd_predict(ModelPredictor pred, Datum* values, bool* isnull, Oid* types, int values_size); - -inline const GradientDescent* gd_get_node(const GradientDescentState* gd_state) { - return (const GradientDescent*)gd_state->ss.ps.plan; -} - -const char* gd_get_expr_name(GradientDescentExprField field); -Datum ExecEvalGradientDescent(GradientDescentExprState* mlstate, ExprContext* econtext, - bool* isNull, ExprDoneCond* isDone); - //////////////////////////////////////////////////////////////////////// // optimizers +// GD optimizers +typedef enum { + OPTIMIZER_GD = 0, // simple mini-batch + OPTIMIZER_NGD, // normalized gradient descent + INVALID_OPTIMIZER, +} OptimizerML; + typedef struct OptimizerGD { - // shared members, initialized by the caller and not by the specialization + // shared members + const GradientDescentState *gd_state; + const HyperparametersGD *hyperp; Matrix weights; Matrix gradients; @@ -138,26 +175,201 @@ typedef struct OptimizerGD { void (*update_batch)(OptimizerGD* optimizer, const Matrix* features, const Matrix* dep_var); void (*end_iteration)(OptimizerGD* optimizer); void (*release)(OptimizerGD* optimizer); + void (*finalize)(OptimizerGD *optimizer); } OptimizerGD; -OptimizerGD* gd_init_optimizer_gd(const GradientDescentState* gd_state); -OptimizerGD* gd_init_optimizer_ngd(const GradientDescentState* gd_state); +typedef struct OptimizerOVA { + OptimizerGD opt; + OptimizerGD *parent; + GradientDescent *algorithm; +} OptimizerOVA; + +OptimizerGD* gd_init_optimizer_gd(const GradientDescentState* gd_state, HyperparametersGD *hyperp); +OptimizerGD* gd_init_optimizer_ngd(const GradientDescentState* gd_state, HyperparametersGD *hyperp); +OptimizerGD* gd_init_optimizer_pca(const GradientDescentState* gd_state, HyperparametersGD *hyperp); +OptimizerGD* gd_init_optimizer_ova(const GradientDescentState* gd_state, HyperparametersGD *hyperp); +OptimizerGD* gd_init_optimizer(const GradientDescentState *gd_state, HyperparametersGD *hyperp); const char* gd_get_optimizer_name(OptimizerML optimizer); +const char *optimizer_ml_to_string(OptimizerML optimizer_ml); +OptimizerML get_optimizer_ml(const char *str); + +inline const char *optimizer_ml_getter(void *optimizer_ml) +{ + return optimizer_ml_to_string(*static_cast(optimizer_ml)); +} + +inline void optimizer_ml_setter(const char *str, void *optimizer_ml) +{ + *static_cast(optimizer_ml) = get_optimizer_ml(str); +} + //////////////////////////////////////////////////////////////////////////// // shuffle typedef struct ShuffleGD { OptimizerGD* optimizer; + bool supervised; + int num_batches; void (*start_iteration)(ShuffleGD* shuffle); Matrix* (*get)(ShuffleGD* shuffle, Matrix** dep_var); void (*unget)(ShuffleGD* shuffle, int tuples); void (*end_iteration)(ShuffleGD* shuffle); void (*release)(ShuffleGD* shuffle); + bool (*has_snapshot)(ShuffleGD* shuffle); } ShuffleGD; -ShuffleGD* gd_init_shuffle_cache(const GradientDescentState* gd_state); +ShuffleGD* gd_init_shuffle_cache(const GradientDescentState* gd_state, HyperparametersGD *hyperp); +////////////////////////////////////////////////////////////////////////// + +typedef enum { + SVM_KERNEL_LINEAR, // default + SVM_KERNEL_GAUSSIAN, + SVM_KERNEL_POLYNOMIAL, + SVM_NUM_KERNELS +} KernelSVM; + +typedef struct HyperparametersGD { + ModelHyperparameters mhp; + + // generic hyperparameters + OptimizerML optimizer; // default GD/mini-batch + int max_seconds; // 0 to disable + bool verbose; + int max_iterations; // maximum number of iterations + int batch_size; + double learning_rate; + double decay; // (0:1], learning rate decay + double tolerance; // [0:1], 0 means to run all iterations + int seed; // [0:N], random seed + + // for SVM + double lambda; // regularization strength + KernelSVM kernel; // default linear (0) + double gamma; // for gaussian kernel + int degree; // [2:9] for polynomial kernel + double coef0; // [0:N] for polynomial kernel + int components; // for gaussian and polynomial kernels, default 0 => Max(128, 2 * num_features) + + // for PCA (for now) + int number_dimensions; // dimension of the sub-space to be computed + + // for multiclass + AlgorithmML classifier; +} HyperparametersGD; + +#define GD_HYPERPARAMETERS_SUPERVISED \ + HYPERPARAMETER_INT4("batch_size", 1000, 1, true, INT32_MAX, true, \ + HyperparametersGD, batch_size, \ + HP_AUTOML_INT(1, 10000, 4, ProbabilityDistribution::LOG_RANGE)), \ + HYPERPARAMETER_FLOAT8("decay", 0.95, 0.0, false, DBL_MAX, true, \ + HyperparametersGD, decay, \ + HP_AUTOML_FLOAT(1E-6, 1E3, 9, ProbabilityDistribution::LOG_RANGE)), \ + HYPERPARAMETER_FLOAT8("learning_rate", 0.8, 0.0, false, DBL_MAX, true, \ + HyperparametersGD, learning_rate, \ + HP_AUTOML_FLOAT(1E-6, 1E3, 9, ProbabilityDistribution::LOG_RANGE)), \ + HYPERPARAMETER_INT4("max_iterations", 100, 1, true, INT32_MAX, true, \ + HyperparametersGD, max_iterations, \ + HP_AUTOML_INT(1, 100, 10, ProbabilityDistribution::LOG_RANGE)), \ + HYPERPARAMETER_INT4("max_seconds", 0, 0, true, INT32_MAX, true, \ + HyperparametersGD, max_seconds, \ + HP_NO_AUTOML()), \ + HYPERPARAMETER_ENUM("optimizer", "gd", gd_optimizer_ml, GD_NUM_OPTIMIZERS, \ + optimizer_ml_getter, optimizer_ml_setter, \ + HyperparametersGD, optimizer, \ + HP_AUTOML_ENUM()), \ + HYPERPARAMETER_FLOAT8("tolerance", 0.0005, 0.0, false, DBL_MAX, true, \ + HyperparametersGD, tolerance, \ + HP_AUTOML_FLOAT(1E-6, 1E3, 9, ProbabilityDistribution::LOG_RANGE)), \ + HYPERPARAMETER_INT4("seed", 0, 0, true, INT32_MAX, true, \ + HyperparametersGD, seed, \ + HP_AUTOML_INT(1, INT32_MAX, 1, ProbabilityDistribution::UNIFORM_RANGE)), \ + HYPERPARAMETER_BOOL("verbose", false, \ + HyperparametersGD, verbose, \ + HP_NO_AUTOML()), + +extern const char* svm_kernel_str[SVM_NUM_KERNELS]; +const char *svm_kernel_getter(void *kernel); +void svm_kernel_setter(const char *str, void *kernel); + +#define GD_HYPERPARAMETERS_SVM_CLASSIFICATION \ + GD_HYPERPARAMETERS_SUPERVISED \ + HYPERPARAMETER_FLOAT8("lambda", 0.01, 0.0, false, DBL_MAX, true, \ + HyperparametersGD, lambda, \ + HP_AUTOML_FLOAT(1E-6, 1E3, 9, ProbabilityDistribution::LOG_RANGE)), \ + HYPERPARAMETER_ENUM("kernel", "linear", \ + svm_kernel_str, SVM_NUM_KERNELS, \ + svm_kernel_getter, svm_kernel_setter, \ + HyperparametersGD, kernel, \ + HP_AUTOML_ENUM()), \ + HYPERPARAMETER_INT4("components", 0, 0, true, INT32_MAX, true, \ + HyperparametersGD, components, \ + HP_AUTOML_INT(0, INT32_MAX, 1, ProbabilityDistribution::LOG_RANGE)), \ + HYPERPARAMETER_FLOAT8("gamma", 0.5, 0.0, false, DBL_MAX, true, \ + HyperparametersGD, gamma, \ + HP_AUTOML_FLOAT(1E-3, 1E3, 9, ProbabilityDistribution::LOG_RANGE)), \ + HYPERPARAMETER_INT4("degree", 2, 2, true, 9, true, \ + HyperparametersGD, degree, \ + HP_AUTOML_INT(2, 9, 1, ProbabilityDistribution::UNIFORM_RANGE)), \ + HYPERPARAMETER_FLOAT8("coef0", 1.0, 0.0, true, DBL_MAX, true, \ + HyperparametersGD, coef0, \ + HP_AUTOML_FLOAT(1E-2, 1E1, 9, ProbabilityDistribution::LOG_RANGE)), + +////////////////////////////////////////////////////////////////////////// +// serialization + +typedef struct GradientDescentModelV01 { + HyperparametersGD hyperparameters; + int input; // number of input columns + int features; // may be different from columns due to bias, kernel, ... + int dimensions[2]; // weights + int categories_size; + // the following come in binary format + // - float8 weights[rows * columns] + // - uint8_t categories[categories_size] + // - uint8_t extra_data[] -- for prediction +} GradientDescentModelV01; + +////////////////////////////////////////////////////////////////////////// +// shared by multiple algorithms + +#define GD_NUM_OPTIMIZERS 2 +extern const char* gd_optimizer_ml[GD_NUM_OPTIMIZERS]; + +TrainModelState* gd_create(AlgorithmAPI *self, const TrainModel *pnode); +void gd_run(AlgorithmAPI *self, TrainModelState* pstate, Model **models); +void gd_end(AlgorithmAPI *self, TrainModelState* pstate); + +ModelHyperparameters* gd_make_hyperparameters(AlgorithmAPI *self); +void gd_update_hyperparameters(AlgorithmAPI *self, ModelHyperparameters* hyperp); + +const HyperparameterDefinition* gd_get_hyperparameters_regression(AlgorithmAPI *self, int *definitions_size); + +MetricML *gd_metrics_accuracy(AlgorithmAPI *self, int *num_metrics); +MetricML *gd_metrics_mse(AlgorithmAPI *self, int *num_metrics); +MetricML *gd_metrics_loss(AlgorithmAPI *self, int *num_metrics); + +ModelPredictor gd_predict_prepare(AlgorithmAPI *self, const SerializedModel* model, Oid return_type); +Datum gd_predict(AlgorithmAPI *self, ModelPredictor pred, Datum* values, bool* isnull, Oid* types, int values_size); + +typedef struct SerializedModelGD { + GradientDescent *algorithm; + HyperparametersGD hyperparameters; + int ncategories; + int input; // number of input columns, may be different from number of features + Oid return_type; + Matrix weights; + Matrix features; + Datum *categories; + void *extra_data; + KernelTransformer *kernel; + Matrix aux_input; // for kernels +} SerializedModelGD; + +void gd_deserialize(GradientDescent *gd, const SerializedModel *model, Oid return_type, SerializedModelGD *gdm); + +List* gd_explain(struct AlgorithmAPI *self, const SerializedModel *model, Oid return_type); #endif /* GD_H */ diff --git a/src/include/db4ai/hyperparameter_validation.h b/src/include/db4ai/hyperparameter_validation.h index 57b1aacca..e995ba346 100644 --- a/src/include/db4ai/hyperparameter_validation.h +++ b/src/include/db4ai/hyperparameter_validation.h @@ -31,95 +31,217 @@ #include "nodes/pg_list.h" #include "utils/builtins.h" - #include -struct HyperparameterDefinition { - const char* name; - Datum default_value; +// Probability distributions for HPO +enum class ProbabilityDistribution { + UNIFORM_RANGE, + LOG_RANGE, + INVALID_DISTRIBUTION // internal, for checking +}; + +struct HyperparameterAutoML{ + bool enable; // Set if AutoML can tune the hyperparameter + ProbabilityDistribution distribution; + int32_t steps; // Steps considered for AutoML + Datum min_value_automl; // Minimum value for AutoML (inclusive) + Datum max_value_automl; // Max value for AutoML (exclusive) +}; + +struct HyperparameterValidation { Datum min_value; Datum max_value; - const char** valid_values; - void (* enum_setter)(const char* s, void* enum_addr); - Oid type; + bool min_inclusive; + bool max_inclusive; + const char **valid_values; int32_t valid_values_size; + const char* (* enum_getter)(void *enum_addr); + void (* enum_setter)(const char *s, void *enum_addr); +}; + + +struct HyperparameterDefinition { + const char *name; + Oid type; + Datum default_value; + HyperparameterValidation validation; + HyperparameterAutoML automl; int32_t offset; bool min_inclusive; bool max_inclusive; }; -struct HyperparameterValidation { - void* min_value; - bool min_inclusive; - void* max_value; - bool max_inclusive; - const char** valid_values; - int32_t valid_values_size; + +struct AlgorithmConfiguration{ + AlgorithmML algorithm; + HyperparameterDefinition* hyperparameters; + int32_t hyperparameters_size; + MetricML* available_metrics; + int32_t available_metrics_size; + bool is_supervised; }; +// Prepare the set of hyperparameter of a the model +List *prepare_model_hyperparameters(const HyperparameterDefinition definitions[], int32_t definitions_size, + void* hyperparameter_struct, MemoryContext memcxt); + +// Configure the hyperparameters of an algorithm or automl, using a list of VariableSetStmt +void configure_hyperparameters_vset(const HyperparameterDefinition definitions[], + int32_t definitions_size, List *hyperparameters, void *configuration); + +// Configure the hyperparameters of an algorithm or automl, using a list of Hyperparameter +void configure_hyperparameters(const HyperparameterDefinition definitions[], + int32_t definitions_size, const Hyperparameter *hyperparameters, int nhyperp, void *configuration); + +// Configure the hyperparameters of an algorithm or automl, using a list of Hyperparameter from the model warehouse +void configure_hyperparameters_modelw(const HyperparameterDefinition definitions[], + int32_t definitions_size, List *hyperparameters, void *configuration); + +// Extract the value for a hyperparameter from a VariableSetStmt +Datum extract_datum_from_variable_set_stmt(VariableSetStmt* stmt, const HyperparameterDefinition* definition); + +// Return a hyperparameter with the given name from the hyperparameter list. Returns null if none is available +const HyperparameterDefinition* find_hyperparameter_definition(const HyperparameterDefinition definitions[], + int32_t definitions_size, const char* hyperparameter_name); + +// Get the hyperparameter definitions for one specific algorithm +const HyperparameterDefinition* get_hyperparameter_definitions(AlgorithmML algorithm, + int32_t *result_size); + +// Get the confgiuration parameters for an architecture +AlgorithmConfiguration* get_algorithm_configuration(AlgorithmML algorithm); + +// Initialize a hyperparameter struct with the default values +void init_hyperparameters_with_defaults(const HyperparameterDefinition definitions[], + int32_t defintions_size, void* hyperparameter_struct); + +// Print the list of hyperparameters +void print_hyperparameters(int level, List* /**/ hyperparameters); + // changes the final value of a hyperparameter into the model warehouse output -void update_model_hyperparameter(Model* model, const char* name, Oid type, Datum value); +void update_model_hyperparameter(MemoryContext memcxt, List *hyperparameters, const char* name, Oid type, Datum value); -void configure_hyperparameters(AlgorithmML algorithm, - List* hyperparameters, Model* model, void* hyperparameter_struct); +//////////////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////////////// -// Set int hyperparameter -void set_hyperparameter_value(const char* name, int* hyperparameter, - Value* value, VariableSetKind kind, int default_value, Model* model, - HyperparameterValidation* validation); +// AutoML Configuration for a hyperparameter +#define HP_AUTOML(automl_tuning, min_value_automl, max_value_automl, steps, distribution) \ + automl_tuning, min_value_automl, max_value_automl, steps, distribution +#define HP_AUTOML_INT(min_value_automl, max_value_automl, steps, distribution) \ + HP_AUTOML(true, min_value_automl, max_value_automl, steps, distribution) +#define HP_AUTOML_FLOAT(min_value_automl, max_value_automl, steps, distribution) \ + HP_AUTOML(true, min_value_automl, max_value_automl, steps, distribution) +#define HP_AUTOML_BOOL() HP_AUTOML(true, 0, 0, 0, ProbabilityDistribution::UNIFORM_RANGE) +#define HP_AUTOML_ENUM() HP_AUTOML(true, 0, 0, 0, ProbabilityDistribution::UNIFORM_RANGE) +#define HP_NO_AUTOML() HP_AUTOML(false, 0, 0, 0, ProbabilityDistribution::INVALID_DISTRIBUTION) -// Set double hyperparameter -void set_hyperparameter_value(const char* name, double* hyperparameter, Value* value, - VariableSetKind kind, double default_value, Model* model, - HyperparameterValidation* validation); - - -// Set string hyperparameter (no const) -void set_hyperparameter_value(const char* name, char** hyperparameter, Value* value, - VariableSetKind kind, char* default_value, Model* model, - HyperparameterValidation* validation); - - -// Set boolean hyperparameter -void set_hyperparameter_value(const char* name, bool* hyperparameter, - Value* value, VariableSetKind kind, bool default_value, Model* model, - HyperparameterValidation* validation); - - -// General purpouse method to set the hyperparameters -// Locate the hyperparameter in the list by name and set it to the selected value -// Return the index in the list of the hyperparameters. If not found return -1 -template -int set_hyperparameter(const char* name, T* hyperparameter, List* hyperparameters, T default_value, Model* model, - HyperparameterValidation* validation) { - int result = 0; - foreach_cell(it, hyperparameters) { - VariableSetStmt* current = lfirst_node(VariableSetStmt, it); - - if (strcmp(current->name, name) == 0) { - if (list_length(current->args) > 1) { - ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Hyperparameter %s cannot be a list", current->name))); - } - - Value* value = NULL; - if (current->args != NULL) { - A_Const* aconst = NULL; - aconst = linitial_node(A_Const, current->args); - value = &aconst->val; - } - - set_hyperparameter_value(name, hyperparameter, value, current->kind, default_value, model, validation); - return result; - } - result++; +// Definitions of hyperparameters. The following macors have an indirection level to allow +// definitions using other macros such as HP_AUTOML +#define HYPERPARAMETER_BOOL_INTERNAL(name, default_value, \ + struct_name, attribute, \ + automl_tuning, min_value_automl, max_value_automl, steps, distribution) \ + { \ + name, BOOLOID, BoolGetDatum(default_value), \ + {PointerGetDatum(NULL), PointerGetDatum(NULL), false, false, \ + NULL, 0, NULL, NULL}, \ + {automl_tuning, distribution, steps, min_value_automl, max_value_automl}, \ + offsetof(struct_name, attribute) \ } - - // If not set by user, set the default value - set_hyperparameter_value(name, hyperparameter, NULL, VAR_SET_DEFAULT, default_value, model, validation); - return -1; -} + +#define HYPERPARAMETER_BOOL(name, default_value, struct_name, attribute, automl) \ + HYPERPARAMETER_BOOL_INTERNAL(name, default_value, struct_name, attribute, automl) + +/////////////////////////////////////////////////////////////////////////////// + +#define HYPERPARAMETER_ENUM_INTERNAL(name, default_value, \ + enum_values, enum_values_size, enum_getter, enum_setter, struct_name, attribute, \ + automl_tuning, min_value_automl, max_value_automl, steps, distribution) \ + { \ + name, ANYENUMOID, CStringGetDatum(default_value), \ + {PointerGetDatum(NULL), PointerGetDatum(NULL), false, false, \ + enum_values, enum_values_size, enum_getter, enum_setter}, \ + {automl_tuning, distribution, steps, min_value_automl, max_value_automl}, \ + offsetof(struct_name, attribute) \ + } + +#define HYPERPARAMETER_ENUM(name, default_value, enum_values, enum_values_size, enum_getter, enum_setter, \ + struct_name, attribute, automl) \ + HYPERPARAMETER_ENUM_INTERNAL(name, default_value, enum_values, enum_values_size, enum_getter, enum_setter, \ + struct_name, attribute, automl) + +/////////////////////////////////////////////////////////////////////////////// + +#define HYPERPARAMETER_INT4_INTERNAL(name, default_value, \ + min, min_inclusive, max, max_inclusive, struct_name, attribute, \ + automl_tuning, min_value_automl, max_value_automl, steps, distribution) \ + { \ + name, INT4OID, Int32GetDatum(default_value), \ + {Int32GetDatum(min), Int32GetDatum(max), min_inclusive, max_inclusive, \ + NULL, 0, NULL, NULL}, \ + {automl_tuning, distribution, steps, Int32GetDatum(min_value_automl), Int32GetDatum(max_value_automl)}, \ + offsetof(struct_name, attribute) \ + } + +#define HYPERPARAMETER_INT4(name, default_value, min, min_inclusive, max, max_inclusive, \ + struct_name, attribute, automl) \ + HYPERPARAMETER_INT4_INTERNAL(name, default_value, min, min_inclusive, max, max_inclusive, \ + struct_name, attribute, automl) + +/////////////////////////////////////////////////////////////////////////////// + +#define HYPERPARAMETER_INT8_INTERNAL(name, default_value, \ + min, min_inclusive, max, max_inclusive, struct_name, attribute, \ + automl_tuning, min_value_automl, max_value_automl, steps, distribution) \ + { \ + name, INT8OID, Int64GetDatum(default_value), \ + {Int64GetDatum(min), Int64GetDatum(max), min_inclusive, max_inclusive, \ + NULL, 0, NULL, NULL}, \ + {automl_tuning, distribution, steps, Int64GetDatum(min_value_automl), Int64GetDatum(max_value_automl)}, \ + offsetof(struct_name, attribute) \ + } + +#define HYPERPARAMETER_INT8(name, default_value, min, min_inclusive, max, max_inclusive, \ + struct_name, attribute, automl) \ + HYPERPARAMETER_INT8_INTERNAL(name, default_value, min, min_inclusive, max, max_inclusive, \ + struct_name, attribute, automl) + +/////////////////////////////////////////////////////////////////////////////// + +#define HYPERPARAMETER_FLOAT8_INTERNAL(name, default_value, \ + min, min_inclusive, max, max_inclusive, struct_name, attribute, \ + automl_tuning, min_value_automl, max_value_automl, steps, distribution) \ + { \ + name, FLOAT8OID, Float8GetDatum(default_value), \ + {Float8GetDatum(min), Float8GetDatum(max), min_inclusive, max_inclusive, \ + NULL, 0, NULL, NULL}, \ + {automl_tuning, distribution, steps, Float8GetDatum(min_value_automl), Float8GetDatum(max_value_automl)}, \ + offsetof(struct_name, attribute) \ + } + +#define HYPERPARAMETER_FLOAT8(name, default_value, min, min_inclusive, max, max_inclusive, \ + struct_name, attribute, automl) \ + HYPERPARAMETER_FLOAT8_INTERNAL(name, default_value, min, min_inclusive, max, max_inclusive, \ + struct_name, attribute, automl) + +/////////////////////////////////////////////////////////////////////////////// + +#define HYPERPARAMETER_STRING_INTERNAL(name, default_value, \ + str_values, str_values_size, struct_name, attribute, \ + automl_tuning, min_value_automl, max_value_automl, steps, distribution) \ + { \ + name, CSTRINGOID, CStringGetDatum(default_value), \ + {PointerGetDatum(NULL), PointerGetDatum(NULL), false, false, \ + str_values, str_values_size, NULL, NULL}, \ + {automl_tuning, distribution, steps, min_value_automl, max_value_automl}, \ + offsetof(struct_name, attribute) \ + } + +#define HYPERPARAMETER_STRING(name, default_value, str_values, str_values_size, \ + struct_name, attribute, automl) \ + HYPERPARAMETER_STRING_INTERNAL(name, default_value, str_values, str_values_size, \ + struct_name, attribute, automl) #endif diff --git a/src/include/db4ai/kernel.h b/src/include/db4ai/kernel.h new file mode 100644 index 000000000..ef210f113 --- /dev/null +++ b/src/include/db4ai/kernel.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + *--------------------------------------------------------------------------------------- + * + * kernel.h + * + * IDENTIFICATION + * src/include/db4ai/kernel.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef _KERNEL_H_ +#define _KERNEL_H_ + +#include "db4ai/matrix.h" + +typedef struct KernelTransformer { + int coefficients; + void (*release)(struct KernelTransformer *kernel); + void (*transform)(struct KernelTransformer *kernel, const Matrix *input, Matrix *output); +} KernelTransformer; + +typedef struct KernelGaussian { + KernelTransformer km; + Matrix weights; + Matrix offsets; +} KernelGaussian; + +typedef struct KernelPolynomial { + KernelTransformer km; + int *components; + Matrix weights; + Matrix coefs; +} KernelPolynomial; + +void kernel_init_gaussian(KernelGaussian *kernel, int features, int components, double gamma, int seed); +void kernel_init_polynomial(KernelPolynomial *kernel, int features, int components, int degree, double coef0, int seed); + +#endif // _KERNEL_H_ + + diff --git a/src/include/db4ai/kmeans.h b/src/include/db4ai/kmeans.h new file mode 100644 index 000000000..8aa0eb768 --- /dev/null +++ b/src/include/db4ai/kmeans.h @@ -0,0 +1,84 @@ +/** +Copyright (c) 2021 Huawei Technologies Co.,Ltd. +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +--------------------------------------------------------------------------------------- + +kmeans.h + Public k-means interface + +IDENTIFICATION + src/include/db4ai/kmeans.h + +--------------------------------------------------------------------------------------- +**/ + +#ifndef DB4AI_KMEANS_H +#define DB4AI_KMEANS_H + +#include "db4ai/db4ai_api.h" +#include "db4ai/fp_ops.h" + +/* + * current available distance functions + */ +typedef enum : uint32_t { + KMEANS_L1 = 0U, + KMEANS_L2, + KMEANS_L2_SQUARED, + KMEANS_LINF +} DistanceFunction; + +/* + * current available seeding method + */ +typedef enum : uint32_t { + KMEANS_RANDOM_SEED = 0U, + KMEANS_BB +} SeedingFunction; + +/* + * Verbosity level + */ +typedef enum : uint32_t { + NO_OUTPUT = 0U, + FASTCHECK_OUTPUT, + VERBOSE_OUTPUT +} Verbosity; + +/* + * the k-means node (re-packaged) + */ +typedef struct KMeans { + AlgorithmAPI algo; +} KMeans; + +/* + * internal representation of a centroid + */ +typedef struct Centroid { + IncrementalStatistics statistics; + ArrayType* coordinates = nullptr; + uint32_t id = 0U; +} Centroid; + +/* + * internal representation of a point (not a centroid) + */ +typedef struct GSPoint { + ArrayType* pg_coordinates = nullptr; + uint32_t weight = 1U; + uint32_t id = 0ULL; + double distance_to_closest_centroid = DBL_MAX; + bool should_free = false; +} GSPoint; + +#endif //DB4AI_KMEANS_H diff --git a/src/include/db4ai/matrix.h b/src/include/db4ai/matrix.h index d2fb54b4c..26e57ba35 100644 --- a/src/include/db4ai/matrix.h +++ b/src/include/db4ai/matrix.h @@ -16,7 +16,7 @@ * matrix.h * * IDENTIFICATION - * src/include/dbmind/db4ai/executor/gd/matrix.h + * src/include/db4ai/matrix.h * * --------------------------------------------------------------------------------------- */ @@ -33,15 +33,13 @@ #define MATRIX_CACHE 16 -typedef float4 gd_float; - typedef struct Matrix { int rows; int columns; bool transposed; int allocated; - gd_float *data; - gd_float cache[MATRIX_CACHE]; + float8 *data; + float8 cache[MATRIX_CACHE]; } Matrix; int matrix_expected_size(int rows, int columns = 1); @@ -55,15 +53,53 @@ void matrix_init_clone(Matrix *matrix, const Matrix *src); // initializes with a transposes a matrix, it is a virtual operation void matrix_init_transpose(Matrix *matrix, const Matrix *src); +// initializes a matrix with the MacLaurin coefficients from 0 to a certain degree +void matrix_init_maclaurin_coefs(Matrix *matrix, int degree, float8 coef0); + +// initializes a vector with random values following a gaussian distribution +void matrix_init_random_gaussian(Matrix *matrix, int rows, int columns, float8 mu, float8 sigma, int seed); + +// initializes a vector with random values following a uniform distribution +void matrix_init_random_uniform(Matrix *matrix, int rows, int columns, float8 min, float8 max, int seed); + +// initializes a vector with random values following a bernoulli distribution (yes/no with some probability) +void matrix_init_random_bernoulli(Matrix *matrix, int rows, int columns, float8 p, float8 min, float8 max, int seed); + +// initializes the matrices required for a Gaussian kernel mapping +void matrix_init_kernel_gaussian(int features, int components, float8 gamma, int seed, Matrix *weights, Matrix *offsets); + +// transforms a vector to a linear space using a gaussien kernel +void matrix_transform_kernel_gaussian(const Matrix *input, const Matrix *weights, const Matrix *offsets, Matrix *output); + +// initializes the matrices and components required for a polynomial kernel mapping +// returns a vector of components and the two transformation matrices +int *matrix_init_kernel_polynomial(int features, int components, int degree, float8 coef0, int seed, Matrix *weights, Matrix *coefs); + +// transforms a vector to a linear space using a polynomial kernel +void matrix_transform_kernel_polynomial(const Matrix *input, int ncomponents, int *components, + const Matrix *weights, const Matrix *coefficients, Matrix *output); + // releases the memory of a matrix and makes it emmpty void matrix_release(Matrix *matrix); -// fiils a matrix with zeroes +// replaces temporarily the pointer to the matrix data +float8* matrix_swap_data(Matrix *matrix, float8 *new_data); + +// copies the shape and data from another matrix +void matrix_copy(Matrix *matrix, const Matrix *src); + +// fills a matrix with zeroes void matrix_zeroes(Matrix *matrix); +// fills a matrix with ones +void matrix_ones(Matrix *matrix); + // changes the shape of a matrix // only number of rows will be done later -void matrix_resize(Matrix *matrix, int rows, int columns); +void matrix_resize(Matrix *matrix, int rows, int columns = 1); + +// adds a vector to each row of a matrix +void matrix_add_vector(Matrix *matrix, const Matrix *vector); // multiplies a matrix by a vector, row by row void matrix_mult_vector(const Matrix *matrix, const Matrix *vector, Matrix *result); @@ -72,14 +108,20 @@ void matrix_mult_vector(const Matrix *matrix, const Matrix *vector, Matrix *resu void matrix_mult_entrywise(Matrix *m1, const Matrix *m2); // multiplies a matrix by a scalar, coeeficient by coefficient -void matrix_mult_scalar(Matrix *matrix, gd_float factor); +void matrix_mult_scalar(Matrix *matrix, float8 factor); // divides a matrix by a scalar, coeeficient by coefficient -void matrix_divide(Matrix *matrix, gd_float factor); +void matrix_divide(Matrix *matrix, float8 factor); // adds a matrix by another matrix, coeeficient by coefficient void matrix_add(Matrix *m1, const Matrix *m2); +// matrix multiplication +void matrix_mult(const Matrix *matrix1, const Matrix *matrix2, Matrix *result); + +// adds a matrix by the product of another matrix with a scalar, coefficient by coefficient +void matrix_mult_scalar_add(Matrix *m1, const Matrix *m2, const float8 factor); + // subtracts a matrix by another matrix, coeeficient by coefficient void matrix_subtract(Matrix *m1, const Matrix *m2); @@ -98,17 +140,26 @@ void matrix_log(Matrix *matrix); // computes the natural logarithm log(1+c) of all coefficients void matrix_log1p(Matrix *matrix); +// computes the cosinus of all coefficients +void matrix_cos(Matrix *matrix); + // negates all coefficients (-c) void matrix_negate(Matrix *matrix); // complements all coefficients (1-c) void matrix_complement(Matrix *matrix); -// make sure all coeeficients are c>=0 +// makes sure all coeeficients are c>=0 void matrix_positive(Matrix *matrix); -// return the sum of all coefficients -gd_float matrix_get_sum(Matrix *matrix); +// returns the sum of all coefficients +float8 matrix_get_sum(const Matrix *matrix); + +// returns the average of all coefficients +float8 matrix_get_average(Matrix *matrix); + +// returns the standard deviation of all coefficients +float8 matrix_get_stdev(Matrix *matrix); // scales a matrix row by row, using two vectors ( N & D) // where each coefficient is scale c'=(c-N)/D @@ -117,14 +168,14 @@ gd_float matrix_get_sum(Matrix *matrix); void matrix_scale(Matrix *matrix, const Matrix *m_n, const Matrix *m_d); // computes the dot product of two vectors -gd_float matrix_dot(const Matrix *v1, const Matrix *v2); +float8 matrix_dot(const Matrix *v1, const Matrix *v2); // converts all coefficients to binary values w.r.t. a threshold // low: v=threshold -void matrix_binary(Matrix *matrix, gd_float threshold, gd_float low, gd_float high); +void matrix_binary(Matrix *matrix, float8 threshold, float8 low, float8 high); // compares two binary vectors -void matrix_relevance(const Matrix *v1, const Matrix *v2, Scores *scores, gd_float positive); +void matrix_relevance(const Matrix *v1, const Matrix *v2, Scores *scores, float8 positive); // prints to a buffer void matrix_print(const Matrix *matrix, StringInfo buf, bool full); @@ -132,6 +183,8 @@ void matrix_print(const Matrix *matrix, StringInfo buf, bool full); // prints into the log void elog_matrix(int elevel, const char *msg, const Matrix *matrix); +void matrix_gram_schmidt(Matrix *matrix, int32_t const num_vectors); + // /////////////////////////////////////////////////////////////////////////// // inline @@ -143,7 +196,7 @@ inline int matrix_expected_size(int rows, int columns) if (cells <= MATRIX_CACHE) cells = 0; // cached, no extra memory - return sizeof(Matrix) + cells * sizeof(gd_float); + return sizeof(Matrix) + cells * sizeof(float8); } inline void matrix_init(Matrix *matrix, int rows, int columns) @@ -157,10 +210,10 @@ inline void matrix_init(Matrix *matrix, int rows, int columns) matrix->allocated = rows * columns; if (matrix->allocated <= MATRIX_CACHE) { matrix->data = matrix->cache; - errno_t rc = memset_s(matrix->data, MATRIX_CACHE * sizeof(gd_float), 0, matrix->allocated * sizeof(gd_float)); + errno_t rc = memset_s(matrix->data, MATRIX_CACHE * sizeof(float8), 0, matrix->allocated * sizeof(float8)); securec_check(rc, "", ""); } else - matrix->data = (gd_float *)palloc0(matrix->allocated * sizeof(gd_float)); + matrix->data = (float8 *)palloc0(matrix->allocated * sizeof(float8)); } inline void matrix_init_clone(Matrix *matrix, const Matrix *src) @@ -169,8 +222,8 @@ inline void matrix_init_clone(Matrix *matrix, const Matrix *src) Assert(src != nullptr); Assert(!src->transposed); matrix_init(matrix, src->rows, src->columns); - size_t bytes = src->rows * src->columns * sizeof(gd_float); - errno_t rc = memcpy_s(matrix->data, bytes, src->data, bytes); + size_t bytes = src->rows * src->columns * sizeof(float8); + errno_t rc = memcpy_s(matrix->data, matrix->allocated * sizeof(float8), src->data, bytes); securec_check(rc, "", ""); } @@ -180,13 +233,46 @@ inline void matrix_init_transpose(Matrix *matrix, const Matrix *src) Assert(matrix != nullptr); Assert(src != nullptr); Assert(!src->transposed); - matrix->transposed = true; + // it is not necessary to mark vectors as transposed + matrix->transposed = (src->columns > 1); matrix->rows = src->columns; matrix->columns = src->rows; matrix->allocated = 0; matrix->data = src->data; } +static int nCr(int n, int r) { + // ncr = n1 / (r! (n-r)!) + Assert(n >= r); + if (n == r) + return 1; + + r = Min(r, n-r); + + int nume = 1; + for (int i = n-r+1; i <= n; i++) + nume *= i; + + int deno = 1; + for (int i = 2; i <= r; i++) + deno *= i; + + return nume / deno; +} + +inline void matrix_init_maclaurin_coefs(Matrix *matrix, int degree, float8 coef0) +{ + matrix_init(matrix, degree + 1); + float8 *pcoefs = matrix->data; + for (int k = 0; k <= degree; k++) { + float8 coef = 0; + if (coef0 > 0) + coef = nCr(degree, k) * pow(coef0, degree - k); + + *pcoefs++ = sqrt(coef * (1 << (k + 1))); + } +} + inline void matrix_release(Matrix *matrix) { Assert(matrix != nullptr); @@ -202,15 +288,59 @@ inline void matrix_release(Matrix *matrix) matrix->columns = 0; } +inline float8* matrix_swap_data(Matrix *matrix, float8 *new_data) { + Assert(matrix != nullptr); + Assert(!matrix->transposed); + Assert(new_data != nullptr); + float8* current = matrix->data; + matrix->data = new_data; + return current; +} + +inline void matrix_copy(Matrix *matrix, const Matrix *src) +{ + Assert(matrix != nullptr); + Assert(src != nullptr); + Assert(!src->transposed); + + int count = src->rows * src->columns; + if (count > matrix->allocated) { + // resize + matrix->allocated = count; + if (matrix->allocated > MATRIX_CACHE) { + // realloc + if (matrix->data != matrix->cache) + pfree(matrix->data); + + matrix->data = (float8 *)palloc0(matrix->allocated * sizeof(float8)); + } + } + + matrix->rows = src->rows; + matrix->columns = src->columns; + errno_t rc = memcpy_s(matrix->data, matrix->allocated * sizeof(float8), + src->data, sizeof(float8) * count); + securec_check(rc, "", ""); +} + inline void matrix_zeroes(Matrix *matrix) { Assert(matrix != nullptr); Assert(!matrix->transposed); - errno_t rc = memset_s(matrix->data, sizeof(gd_float) * matrix->allocated, 0, - sizeof(gd_float) * matrix->rows * matrix->columns); + errno_t rc = memset_s(matrix->data, sizeof(float8) * matrix->allocated, 0, + sizeof(float8) * matrix->rows * matrix->columns); securec_check(rc, "", ""); } +inline void matrix_ones(Matrix *matrix) +{ + Assert(matrix != nullptr); + size_t count = matrix->rows * matrix->columns; + float8 *pd = matrix->data; + while (count-- > 0) + *pd++ = 1.0; +} + inline void matrix_resize(Matrix *matrix, int rows, int columns) { Assert(matrix != nullptr); @@ -232,6 +362,26 @@ inline void matrix_resize(Matrix *matrix, int rows, int columns) } } +inline void matrix_add_vector(Matrix *matrix, const Matrix *vector) +{ + Assert(matrix != nullptr); + Assert(!matrix->transposed); + Assert(vector != nullptr); + Assert(!vector->transposed); + Assert(vector->columns == 1); + Assert(matrix->columns == vector->rows); + + float8 *pm = matrix->data; + for (int r = 0; r < matrix->rows; r++) { + const float8 *pv = vector->data; + size_t count = matrix->columns; + while (count-- > 0) { + *pm += *pv++; + pm++; + } + } +} + inline void matrix_mult_vector(const Matrix *matrix, const Matrix *vector, Matrix *result) { Assert(matrix != nullptr); @@ -245,12 +395,12 @@ inline void matrix_mult_vector(const Matrix *matrix, const Matrix *vector, Matri Assert(matrix->columns == vector->rows); if (matrix->transposed) { - gd_float *pd = result->data; + float8 *pd = result->data; // loop assumes that the data has not been physically transposed for (int r = 0; r < matrix->rows; r++) { - const gd_float *pm = matrix->data + r; - const gd_float *pv = vector->data; - gd_float x = 0.0; + const float8 *pm = matrix->data + r; + const float8 *pv = vector->data; + float8 x = 0.0; for (int c = 0; c < matrix->columns; c++) { x += *pm * *pv++; pm += matrix->rows; @@ -258,12 +408,12 @@ inline void matrix_mult_vector(const Matrix *matrix, const Matrix *vector, Matri *pd++ = x; } } else { - const gd_float *pm = matrix->data; - gd_float *pd = result->data; + const float8 *pm = matrix->data; + float8 *pd = result->data; for (int r = 0; r < matrix->rows; r++) { - const gd_float *pv = vector->data; + const float8 *pv = vector->data; size_t count = matrix->columns; - gd_float x = 0.0; + float8 x = 0.0; while (count-- > 0) x += *pv++ * *pm++; *pd++ = x; @@ -281,27 +431,27 @@ inline void matrix_mult_entrywise(Matrix *m1, const Matrix *m2) Assert(m1->columns == m2->columns); size_t count = m1->rows * m1->columns; - gd_float *pd = m1->data; - const gd_float *ps = m2->data; + float8 *pd = m1->data; + const float8 *ps = m2->data; while (count-- > 0) *pd++ *= *ps++; } -inline void matrix_mult_scalar(Matrix *matrix, gd_float factor) +inline void matrix_mult_scalar(Matrix *matrix, float8 factor) { Assert(matrix != nullptr); size_t count = matrix->rows * matrix->columns; - gd_float *pd = matrix->data; + float8 *pd = matrix->data; while (count-- > 0) *pd++ *= factor; } -inline void matrix_divide(Matrix *matrix, gd_float factor) +inline void matrix_divide(Matrix *matrix, float8 factor) { Assert(matrix != nullptr); Assert(factor != 0.0); size_t count = matrix->rows * matrix->columns; - gd_float *pd = matrix->data; + float8 *pd = matrix->data; while (count-- > 0) *pd++ /= factor; } @@ -315,12 +465,27 @@ inline void matrix_add(Matrix *m1, const Matrix *m2) Assert(m1->rows == m2->rows); Assert(m1->columns == m2->columns); size_t count = m1->rows * m1->columns; - gd_float *p1 = m1->data; - const gd_float *p2 = m2->data; + float8 *p1 = m1->data; + const float8 *p2 = m2->data; while (count-- > 0) *p1++ += *p2++; } +inline void matrix_mult_scalar_add(Matrix *m1, const Matrix *m2, const float8 factor) +{ + Assert(m1 != nullptr); + Assert(!m1->transposed); + Assert(m2 != nullptr); + Assert(!m2->transposed); + Assert(m1->rows == m2->rows); + Assert(m1->columns == m2->columns); + size_t count = m1->rows * m1->columns; + float8 *p1 = m1->data; + const float8 *p2 = m2->data; + while (count-- > 0) + *p1++ += factor * *p2++; +} + inline void matrix_subtract(Matrix *m1, const Matrix *m2) { Assert(m1 != nullptr); @@ -330,13 +495,13 @@ inline void matrix_subtract(Matrix *m1, const Matrix *m2) Assert(m1->rows == m2->rows); Assert(m1->columns == m2->columns); size_t count = m1->rows * m1->columns; - gd_float *p1 = m1->data; - const gd_float *p2 = m2->data; + float8 *p1 = m1->data; + const float8 *p2 = m2->data; while (count-- > 0) *p1++ -= *p2++; } -inline gd_float matrix_dot(const Matrix *v1, const Matrix *v2) +inline float8 matrix_dot(const Matrix *v1, const Matrix *v2) { Assert(v1 != nullptr); Assert(!v1->transposed); @@ -347,9 +512,9 @@ inline gd_float matrix_dot(const Matrix *v1, const Matrix *v2) Assert(v2->columns == 1); size_t count = v1->rows; - const gd_float *p1 = v1->data; - const gd_float *p2 = v2->data; - gd_float result = 0; + const float8 *p1 = v1->data; + const float8 *p2 = v2->data; + float8 result = 0; while (count-- > 0) result += *p1++ * *p2++; @@ -360,7 +525,7 @@ inline void matrix_square(Matrix *matrix) { Assert(matrix != nullptr); size_t count = matrix->rows * matrix->columns; - gd_float *pd = matrix->data; + float8 *pd = matrix->data; while (count-- > 0) { *pd *= *pd; pd++; @@ -371,20 +536,31 @@ inline void matrix_square_root(Matrix *matrix) { Assert(matrix != nullptr); size_t count = matrix->rows * matrix->columns; - gd_float *pd = matrix->data; + float8 *pd = matrix->data; while (count-- > 0) { *pd = sqrt(*pd); pd++; } } +inline void matrix_cos(Matrix *matrix) +{ + Assert(matrix != nullptr); + size_t count = matrix->rows * matrix->columns; + float8 *pd = matrix->data; + while (count-- > 0) { + *pd = cos(*pd); + pd++; + } +} + inline void matrix_sigmoid(Matrix *matrix) { Assert(matrix != nullptr); size_t count = matrix->rows * matrix->columns; - gd_float *pd = matrix->data; + float8 *pd = matrix->data; while (count-- > 0) { - gd_float c = *pd; + float8 c = *pd; *pd++ = 1.0 / (1.0 + exp(-c)); } } @@ -393,9 +569,9 @@ inline void matrix_log(Matrix *matrix) { Assert(matrix != nullptr); size_t count = matrix->rows * matrix->columns; - gd_float *pd = matrix->data; + float8 *pd = matrix->data; while (count-- > 0) { - gd_float v = *pd; + float8 v = *pd; *pd++ = log(v); } } @@ -404,9 +580,9 @@ inline void matrix_log1p(Matrix *matrix) { Assert(matrix != nullptr); size_t count = matrix->rows * matrix->columns; - gd_float *pd = matrix->data; + float8 *pd = matrix->data; while (count-- > 0) { - gd_float v = *pd + 1; + float8 v = *pd + 1; *pd++ = log(v); } } @@ -415,9 +591,9 @@ inline void matrix_negate(Matrix *matrix) { Assert(matrix != nullptr); size_t count = matrix->rows * matrix->columns; - gd_float *pd = matrix->data; + float8 *pd = matrix->data; while (count-- > 0) { - gd_float v = *pd; + float8 v = *pd; *pd++ = -v; } } @@ -426,9 +602,9 @@ inline void matrix_complement(Matrix *matrix) { Assert(matrix != nullptr); size_t count = matrix->rows * matrix->columns; - gd_float *pd = matrix->data; + float8 *pd = matrix->data; while (count-- > 0) { - gd_float v = 1.0 - *pd; + float8 v = 1.0 - *pd; *pd++ = v; } } @@ -437,26 +613,55 @@ inline void matrix_positive(Matrix *matrix) { Assert(matrix != nullptr); size_t count = matrix->rows * matrix->columns; - gd_float *pd = matrix->data; + float8 *pd = matrix->data; while (count-- > 0) { - gd_float v = *pd; + float8 v = *pd; if (v < 0.0) *pd = 0.0; pd++; } } -inline gd_float matrix_get_sum(Matrix *matrix) +inline float8 matrix_get_sum(const Matrix *matrix) { Assert(matrix != nullptr); size_t count = matrix->rows * matrix->columns; - gd_float *ps = matrix->data; - gd_float s = 0.0; + float8 *ps = matrix->data; + float8 s = 0.0; while (count-- > 0) s += *ps++; return s; } +inline float8 matrix_get_average(Matrix *matrix) +{ + Assert(matrix != nullptr); + int total = matrix->rows * matrix->columns; + int count = total; + float8 *ps = matrix->data; + float8 s = 0.0; + while (count-- > 0) + s += *ps++; + return s / total; +} + +inline float8 matrix_get_stdev(Matrix *matrix) +{ + Assert(matrix != nullptr); + int total = matrix->rows * matrix->columns; + int count = total; + float8 *ps = matrix->data; + float8 s = 0.0; + float8 sq = 0.0; + while (count-- > 0) { + float8 v = *ps++; + s += v; + sq += v * v; + } + s /= total; + return sqrt((sq / total) - (s * s)); +} + inline void matrix_scale(Matrix *matrix, const Matrix *m_n, const Matrix *m_d) { Assert(matrix != nullptr); @@ -469,10 +674,10 @@ inline void matrix_scale(Matrix *matrix, const Matrix *m_n, const Matrix *m_d) Assert(!m_d->transposed); Assert(matrix->columns == m_d->rows); Assert(m_d->columns == 1); - gd_float *pd = matrix->data; + float8 *pd = matrix->data; for (int r = 0; r < matrix->rows; r++) { - const gd_float *p1 = m_n->data; - const gd_float *p2 = m_d->data; + const float8 *p1 = m_n->data; + const float8 *p2 = m_d->data; for (int c = 0; c < matrix->columns; c++) { *pd = (*pd - *p1++) / *p2++; pd++; @@ -480,18 +685,18 @@ inline void matrix_scale(Matrix *matrix, const Matrix *m_n, const Matrix *m_d) } } -inline void matrix_binary(Matrix *matrix, gd_float threshold, gd_float low, gd_float high) +inline void matrix_binary(Matrix *matrix, float8 threshold, float8 low, float8 high) { Assert(matrix != nullptr); size_t count = matrix->rows * matrix->columns; - gd_float *pd = matrix->data; + float8 *pd = matrix->data; while (count-- > 0) { - gd_float v = *pd; + float8 v = *pd; *pd++ = (v < threshold ? low : high); } } -inline void matrix_relevance(const Matrix *v1, const Matrix *v2, Scores *scores, gd_float positive) +inline void matrix_relevance(const Matrix *v1, const Matrix *v2, Scores *scores, float8 positive) { Assert(v1 != nullptr); Assert(!v1->transposed); @@ -502,11 +707,13 @@ inline void matrix_relevance(const Matrix *v1, const Matrix *v2, Scores *scores, Assert(v2->columns == 1); size_t count = v1->rows; - const gd_float *p1 = v1->data; - const gd_float *p2 = v2->data; + scores->count += count; + + const float8 *p1 = v1->data; + const float8 *p2 = v2->data; while (count-- > 0) { - gd_float x = *p1++; - gd_float y = *p2++; + float8 x = *p1++; + float8 y = *p2++; if (x == positive) { // positive if (y == positive) @@ -520,7 +727,76 @@ inline void matrix_relevance(const Matrix *v1, const Matrix *v2, Scores *scores, else scores->fn++; } - scores->count++; + } +} + +inline void matrix_gram_schmidt(Matrix *matrix, int32_t const num_vectors) +{ + int32_t const dimension = matrix->rows; + Matrix done_vector; + Matrix current_vector; + done_vector.rows = current_vector.rows = dimension; + done_vector.columns = current_vector.columns = 1; + done_vector.allocated = current_vector.allocated = dimension; + done_vector.transposed = current_vector.transposed = false; + float8 projection = 0.; + float8 squared_magnitude = 0.; + float8 magnitude = 0.; + int32_t first_non_zero = -1; + + /* + * we first find the very first eigenvector with non-zero magnitude + * and normalize it (the order of the test in the loop matters) + */ + while ((magnitude == 0.) && (++first_non_zero < num_vectors)) { + current_vector.data = matrix->data + (first_non_zero * dimension); + squared_magnitude = matrix_dot(¤t_vector, ¤t_vector); + if (squared_magnitude == 0.) + continue; + magnitude = std::sqrt(squared_magnitude); + matrix_mult_scalar(¤t_vector, 1. / magnitude); + } + + /* + * no valid vector found :( + */ + if (unlikely(first_non_zero == num_vectors)) + ereport(ERROR, (errmodule(MOD_DB4AI), + errmsg("Gram-Schmidt: No vector of non-zero magnitude found"))); + + /* + * we can indeed orthonormalized at least one vector, let's go + */ + for (int32_t cv = first_non_zero + 1; cv < num_vectors; ++cv) { + current_vector.data = matrix->data + (cv * dimension); + // we go thru previously orthonormalized vectors to produce the next one + for (int32_t dv = first_non_zero; dv < cv; ++dv) { + done_vector.data = matrix->data + (dv * dimension); + + projection = matrix_dot(¤t_vector, &done_vector); + + /* + * no shadow is cast, and thus the vectors are perpendicular + * and we can continue with the next vector in the upper loop + */ + if (projection == 0.) { + dv = cv; + continue; + } + + matrix_mult_scalar(&done_vector, projection); + matrix_subtract(¤t_vector, &done_vector); + /* + * for the time being we are using no extra space and thus we have + * to normalize again a previously-ready vector + */ + squared_magnitude = matrix_dot(&done_vector, &done_vector); + magnitude = std::sqrt(squared_magnitude); + matrix_mult_scalar(&done_vector, 1. / magnitude); + } + squared_magnitude = matrix_dot(¤t_vector, ¤t_vector); + magnitude = std::sqrt(squared_magnitude); + matrix_mult_scalar(¤t_vector, 1. / magnitude); } } diff --git a/src/include/db4ai/model_warehouse.h b/src/include/db4ai/model_warehouse.h index 17969ac67..58b6c92f8 100644 --- a/src/include/db4ai/model_warehouse.h +++ b/src/include/db4ai/model_warehouse.h @@ -16,7 +16,7 @@ * command.h * * IDENTIFICATION - * src/gausskernel/catalog/model_warehouse.h + * src/include/db4ai/model_warehouse.h * * --------------------------------------------------------------------------------------- */ @@ -39,6 +39,8 @@ struct TrainingInfo{ const char* name; Oid type; Datum value; + bool open_group; + bool close_group; }; struct TrainingScore{ @@ -46,23 +48,69 @@ struct TrainingScore{ double value; }; +// The model structure is mapped to the gs_model_wareshouse table that contains: +// - model_name : unique model name provided by the user +// - model_owner : user id of model owner (automatic) +// - create_time : timestamp when model is stored (automatic) +// - processedtuples : number of tuples processed +// - discardedtuples : number of tuples discarded +// - pre_process_time : preprocessing time in seconds +// - exec_time : trainign time in seconds +// - iterations : number of iterations +// - outputtype : oid type of prediction +// - modeltype : algorithm name +// - query : CREATE MODEL SQL statement +// - modeldata : optional binary data of the trained model +// - weight : array of weights for gradient descent, DEPRECATED +// - hyperparametersnames : hyperparameters +// - hyperparametersvalues +// - hyperparametersoids +// - coefnames : extra training info, DEPRECATED +// - coefvalues : DEPRECATED +// - coefoids : DEPRECATED +// - trainingscorename : scores, DEPRECATED +// - trainingscorevalue : DEPRECATED +// - model_describe : extra description, DEPRECATED + +// Serialized model woth a content only known by the algorithm +typedef enum SerializedModelVersion { + DB4AI_MODEL_UNDEFINED = -1, + DB4AI_MODEL_V00 = 0, + DB4AI_MODEL_V01 = 1, + DB4AI_MODEL_INVALID = 2 // and above +} SerializedModelVersion; + +typedef struct SerializedModel { + SerializedModelVersion version; + void *raw_data; + Size size; +} SerializedModel; + // Base class for models struct Model{ + // header, filled by the caller + MemoryContext memory_context; // Memory context to allocate all Model fields AlgorithmML algorithm; const char* model_name; const char* sql; - double exec_time_secs; - double pre_time_secs; + List* hyperparameters; // List of Hyperparamters + // model data filled by the algorithm + int status; // ERRCODE_SUCCESSFUL_COMPLETION (0) or the error code + Oid return_type; // Return type of the model for prediction + double pre_time_secs; // preprocessing time + double exec_time_secs; // total training time int64_t processed_tuples; int64_t discarded_tuples; - List* train_info; // List of TrainingInfo - List* hyperparameters; // List of Hyperparamters - List* scores; // List of TrainingScore - Oid return_type; // Return type of the model int32_t num_actual_iterations; + List* scores; // List of TrainingScore + SerializedModel data; // private model data + + // TODO_DB4AI_API: DEPRECATED + List* train_info; // List of TrainingInfo + Datum weights; // optional, float[] + Datum model_data; // optional, varlena (void*) }; -// Used by all GradientDescent variants struct ModelGradientDescent{ Model model; Datum weights; // Float[] @@ -70,7 +118,6 @@ struct ModelGradientDescent{ Datum categories; // only for categorical, an array of return_type[ncategories] }; -// Used by K-Means models typedef struct WHCentroid { double objective_function = DBL_MAX; double avg_distance_to_centroid = DBL_MAX; @@ -95,18 +142,13 @@ struct ModelKMeans { WHCentroid* centroids = nullptr; }; -// Used by XGBoost -struct ModelBinary { - Model model; - uint64_t model_len; - Datum model_data; // varlena (void*) -}; - - // Store the model in the catalog tables void store_model(const Model* model); // Get the model from the catalog tables -Model* get_model(const char* model_name, bool only_model); +const Model* get_model(const char* model_name, bool only_model); + +// Dump model to log +void elog_model(int level, const Model *model); #endif diff --git a/src/include/db4ai/predict_by.h b/src/include/db4ai/predict_by.h index 4a59404ff..ee689eddb 100644 --- a/src/include/db4ai/predict_by.h +++ b/src/include/db4ai/predict_by.h @@ -34,16 +34,19 @@ typedef void* ModelPredictor; // Deserialized version of the model that can compute efficiently predictions +struct AlgorithmAPI; struct PredictorInterface { - ModelPredictor (*prepare) (const Model* model); - Datum (*predict) (ModelPredictor pred, Datum* values, bool* isnull, Oid* types, int values_size); + ModelPredictor (*prepare) (AlgorithmAPI*, const Model* model); + Datum (*predict) (AlgorithmAPI*, ModelPredictor pred, Datum* values, bool* isnull, Oid* types, int values_size); }; Datum db4ai_predict_by_bool(PG_FUNCTION_ARGS); +Datum db4ai_predict_by_bytea(PG_FUNCTION_ARGS); Datum db4ai_predict_by_int32(PG_FUNCTION_ARGS); Datum db4ai_predict_by_int64(PG_FUNCTION_ARGS); Datum db4ai_predict_by_float4(PG_FUNCTION_ARGS); Datum db4ai_predict_by_float8(PG_FUNCTION_ARGS); +Datum db4ai_predict_by_float8_array(PG_FUNCTION_ARGS); Datum db4ai_predict_by_numeric(PG_FUNCTION_ARGS); Datum db4ai_predict_by_text(PG_FUNCTION_ARGS); diff --git a/src/include/db4ai/scores.h b/src/include/db4ai/scores.h index d6f1d4adf..54d6e6372 100644 --- a/src/include/db4ai/scores.h +++ b/src/include/db4ai/scores.h @@ -16,7 +16,7 @@ * scores.h * * IDENTIFICATION - * src/include/dbmind/db4ai/executor/gd/scores.h + * src/include/db4ai/scores.h * * --------------------------------------------------------------------------------------- */ @@ -44,8 +44,13 @@ inline void scores_init(Scores *scores) } // (tp + tn) / n -inline double get_accuracy(const Scores *scores) +inline double get_accuracy(const Scores *scores, bool *has) { + if (scores->count == 0) { + *has = false; + return 0; + } + *has = true; return (scores->tp + scores->tn) / (double)scores->count; } diff --git a/src/include/db4ai/xgboost.h b/src/include/db4ai/xgboost.h new file mode 100644 index 000000000..3d6e36ac5 --- /dev/null +++ b/src/include/db4ai/xgboost.h @@ -0,0 +1,37 @@ +/** +Copyright (c) 2021 Huawei Technologies Co.,Ltd. +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +--------------------------------------------------------------------------------------- + +xgboost.h + Public xgboost interface + +IDENTIFICATION + src/include/db4ai/xgboost.h + +--------------------------------------------------------------------------------------- +**/ + +#ifndef DB4AI_XGBOOST_H +#define DB4AI_XGBOOST_H + +#include "db4ai/db4ai_api.h" + +#define XG_TARGET_COLUMN 0 +/* + * the XGBoostnode (re-packaged) + */ +typedef struct XGBoost { + AlgorithmAPI algo; +} XGBoost; + +#endif \ No newline at end of file diff --git a/src/include/dbmind/hypopg_index.h b/src/include/dbmind/hypopg_index.h index f52fa0974..bd507d479 100644 --- a/src/include/dbmind/hypopg_index.h +++ b/src/include/dbmind/hypopg_index.h @@ -68,6 +68,8 @@ typedef struct hypoIndex { bool amcanunique; /* does AM support UNIQUE indexes? */ bool amcanmulticol; /* does AM support multi-column indexes? */ + bool isGlobal; /* true if index is global partition index */ + bool ispartitionedindex; /* it is an partitioned index */ /* store some informations usually saved in catalogs */ List *options; /* WITH clause options: a list of DefElem */ bool amcanorder; /* does AM support order by column value? */ diff --git a/src/include/distributelayer/streamProducer.h b/src/include/distributelayer/streamProducer.h index 07a5f4373..d789d8a44 100644 --- a/src/include/distributelayer/streamProducer.h +++ b/src/include/distributelayer/streamProducer.h @@ -272,6 +272,8 @@ public: } void setUniqueSQLKey(uint64 unique_sql_id, Oid unique_user_id, uint32 unique_cn_id); + void setGlobalSessionId(GlobalSessionId* globalSessionId); + void getGlobalSessionId(GlobalSessionId* globalSessionId); /* The plan the producer thread will run. */ PlannedStmt* m_plan; @@ -503,6 +505,9 @@ private: uint64 m_uniqueSQLId; Oid m_uniqueSQLUserId; uint32 m_uniqueSQLCNId; + + /* global session id */ + GlobalSessionId m_globalSessionId; }; extern THR_LOCAL StreamProducer* streamProducer; diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index 2f1ad0f1a..00a4eb98e 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -276,7 +276,10 @@ extern TupleTableSlot* ExecProject(ProjectionInfo* projInfo, ExprDoneCond* isDon extern TupleTableSlot* ExecScan(ScanState* node, ExecScanAccessMtd accessMtd, ExecScanRecheckMtd recheckMtd); extern void ExecAssignScanProjectionInfo(ScanState* node); extern void ExecScanReScan(ScanState* node); -extern HTAB* ExecEvalParamExternTableOfIndex(ExprState* exprstate, ExprContext* econtext, Oid* tableOfIndexType, bool *isnestedtable); +extern void initExecTableOfIndexInfo(ExecTableOfIndexInfo* execTableOfIndexInfo, ExprContext* econtext); +extern bool ExecEvalParamExternTableOfIndexById(ExecTableOfIndexInfo* execTableOfIndexInfo); +extern void ExecEvalParamExternTableOfIndex(Node* node, ExecTableOfIndexInfo* execTableOfIndexInfo); +extern bool is_external_clob(Oid type_oid, bool is_null, Datum value); /* * prototypes from functions in execTuples.c @@ -331,7 +334,7 @@ extern void end_tup_output(TupOutputState* tstate); /* * prototypes from functions in execUtils.c */ -extern EState* CreateExecutorState(void); +extern EState* CreateExecutorState(MemoryContext saveCxt = NULL); extern void FreeExecutorState(EState* estate); extern ExprContext* CreateExprContext(EState* estate); extern ExprContext* CreateStandaloneExprContext(void); @@ -421,6 +424,13 @@ extern int PthreadMutexLock(ResourceOwner owner, pthread_mutex_t* mutex, bool tr extern int PthreadMutexTryLock(ResourceOwner owner, pthread_mutex_t* mutex, bool trace = true); extern int PthreadMutexUnlock(ResourceOwner owner, pthread_mutex_t* mutex, bool trace = true); +extern int PthreadRWlockTryRdlock(ResourceOwner owner, pthread_rwlock_t* rwlock); +extern void PthreadRWlockRdlock(ResourceOwner owner, pthread_rwlock_t* rwlock); +extern int PthreadRWlockTryWrlock(ResourceOwner owner, pthread_rwlock_t* rwlock); +extern void PthreadRWlockWrlock(ResourceOwner owner, pthread_rwlock_t* rwlock); +extern void PthreadRWlockUnlock(ResourceOwner owner, pthread_rwlock_t* rwlock); +extern void PthreadRwLockInit(pthread_rwlock_t* rwlock, pthread_rwlockattr_t *attr); + extern bool executorEarlyStop(); extern void ExecEarlyFree(PlanState* node); extern void ExecEarlyFreeBody(PlanState* node); diff --git a/src/include/executor/instrument.h b/src/include/executor/instrument.h index 32805acf0..e88de69b6 100644 --- a/src/include/executor/instrument.h +++ b/src/include/executor/instrument.h @@ -1022,11 +1022,20 @@ typedef struct size_info { int64 ec_fetch_count; } size_info; +typedef struct IterationStats { + int totalIters; + struct timeval currentStartTime; + int levelBuf[SW_LOG_ROWS_FULL]; + int64 rowCountBuf[SW_LOG_ROWS_FULL]; + struct timeval startTimeBuf[SW_LOG_ROWS_FULL]; + struct timeval endTimeBuf[SW_LOG_ROWS_FULL]; +} IterationStats; + extern OperatorProfileTable g_operator_table; extern Instrumentation* InstrAlloc(int n, int instrument_options); extern void InstrStartNode(Instrumentation* instr); -extern void InstrStopNode(Instrumentation* instr, double nTuples); +extern void InstrStopNode(Instrumentation* instr, double nTuples, bool containMemory = true); extern void InstrEndLoop(Instrumentation* instr); extern void StreamEndLoop(StreamTime* instr); extern void AddControlMemoryContext(Instrumentation* instr, MemoryContext context); diff --git a/src/include/executor/node/nodeCtescan.h b/src/include/executor/node/nodeCtescan.h index 4c5bd0d8c..2ea2a6ad5 100644 --- a/src/include/executor/node/nodeCtescan.h +++ b/src/include/executor/node/nodeCtescan.h @@ -55,4 +55,8 @@ extern bool CheckCycleExeception(StartWithOpState *node, TupleTableSlot *slot); extern int SibglingsKeyCmp(Datum x, Datum y, SortSupport ssup); extern int SibglingsKeyCmpFast(Datum x, Datum y, SortSupport ssup); +extern void markSWLevelBegin(StartWithOpState *node); +extern void markSWLevelEnd(StartWithOpState *node, int64 rowCount); +extern TupleTableSlot* GetStartWithSlot(RecursiveUnionState* node, TupleTableSlot* slot); +extern bool ExecStartWithRowLevelQual(RecursiveUnionState* node, TupleTableSlot* dstSlot); #endif /* NODECTESCAN_H */ diff --git a/src/include/executor/node/nodeGD.h b/src/include/executor/node/nodeGD.h deleted file mode 100644 index 16de0c13d..000000000 --- a/src/include/executor/node/nodeGD.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - *--------------------------------------------------------------------------------------- - * - * nodeGD.h - * - * IDENTIFICATION - * src/include/executor/nodeGD.h - * - * --------------------------------------------------------------------------------------- - */ - -#ifndef NODE_GD_H -#define NODE_GD_H - -#include "nodes/execnodes.h" - -extern GradientDescentState* ExecInitGradientDescent(GradientDescent* node, EState* estate, int eflags); -extern TupleTableSlot* ExecGradientDescent(GradientDescentState* state); -extern void ExecEndGradientDescent(GradientDescentState* state); - -typedef void (*GradientDescentHook_iteration)(GradientDescentState* state); -extern GradientDescentHook_iteration gdhook_iteration; - -List* makeGradientDescentExpr(AlgorithmML algorithm, List* list, int field); - -#endif /* NODE_GD_H */ diff --git a/src/include/executor/node/nodeModifyTable.h b/src/include/executor/node/nodeModifyTable.h index 6c2a9ac88..99098b5e6 100644 --- a/src/include/executor/node/nodeModifyTable.h +++ b/src/include/executor/node/nodeModifyTable.h @@ -30,6 +30,7 @@ extern void RecordDeletedTuple(Oid relid, int2 bucketid, const ItemPointer tuple /* May move all resizing declaration to appropriate postion sometime. */ extern bool RelationInClusterResizing(const Relation rel); extern bool RelationInClusterResizingReadOnly(const Relation rel); +extern bool RelationInClusterResizingEndCatchup(const Relation rel); extern bool CheckRangeVarInRedistribution(const RangeVar* range_var); extern bool RelationIsDeleteDeltaTable(char* delete_delta_name); extern Relation GetAndOpenDeleteDeltaRel(const Relation rel, LOCKMODE lockmode, bool isMultiCatchup); @@ -39,6 +40,7 @@ extern List* eval_ctid_funcs(Relation rel, List* original_quals, RangeScanInRedi extern char* nodeTagToString(NodeTag type); extern bool ClusterResizingInProgress(); extern void RelationGetNewTableName(Relation rel, char* newtable_name); +extern bool RelationInClusterResizingWriteErrorMode(const Relation rel); extern TupleTableSlot* ExecDelete(ItemPointer tupleid, Oid deletePartitionOid, int2 bucketid, HeapTupleHeader oldtuple, TupleTableSlot* planSlot, EPQState* epqstate, ModifyTableState* node, bool canSetTag); diff --git a/src/include/executor/node/nodePartIterator.h b/src/include/executor/node/nodePartIterator.h index 5cc85d310..bef9bca26 100644 --- a/src/include/executor/node/nodePartIterator.h +++ b/src/include/executor/node/nodePartIterator.h @@ -30,6 +30,7 @@ #include "nodes/execnodes.h" +extern void SetPartitionIteratorParamter(PartIteratorState* node, List* subPartLengthList); extern PartIteratorState* ExecInitPartIterator(PartIterator* node, EState* estate, int eflags); extern TupleTableSlot* ExecPartIterator(PartIteratorState* node); extern void ExecEndPartIterator(PartIteratorState* node); diff --git a/src/include/executor/node/nodeSeqscan.h b/src/include/executor/node/nodeSeqscan.h index 30644b2ff..1a5ab3f4e 100644 --- a/src/include/executor/node/nodeSeqscan.h +++ b/src/include/executor/node/nodeSeqscan.h @@ -24,4 +24,6 @@ extern void ExecSeqRestrPos(SeqScanState* node); extern void ExecReScanSeqScan(SeqScanState* node); extern void InitScanRelation(SeqScanState* node, EState* estate, int eflags); extern RangeScanInRedis reset_scan_qual(Relation currHeapRel, ScanState *node, bool isRangeScanInRedis = false); + +extern ExprState *ExecInitVecExpr(Expr *node, PlanState *parent); #endif /* NODESEQSCAN_H */ diff --git a/src/include/executor/node/nodeTrainModel.h b/src/include/executor/node/nodeTrainModel.h new file mode 100644 index 000000000..978aa121b --- /dev/null +++ b/src/include/executor/node/nodeTrainModel.h @@ -0,0 +1,33 @@ +/* +* Copyright (c) 2020 Huawei Technologies Co.,Ltd. +* +* openGauss is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*--------------------------------------------------------------------------------------- +* +* nodeTrainModel.h +* +* IDENTIFICATION +* src/include/executor/nodeTrainModel.h +* +* --------------------------------------------------------------------------------------- +*/ + +#ifndef NODE_TRAIN_MODEL_H +#define NODE_TRAIN_MODEL_H + +#include "nodes/execnodes.h" + +extern TrainModelState* ExecInitTrainModel(TrainModel* node, EState* estate, int eflags); +extern TupleTableSlot* ExecTrainModel(TrainModelState* state); +extern void ExecEndTrainModel(TrainModelState* state); + +#endif /* NODE_TRAIN_MODEL_H */ diff --git a/src/include/executor/spi.h b/src/include/executor/spi.h index 38311af20..a55ee877a 100644 --- a/src/include/executor/spi.h +++ b/src/include/executor/spi.h @@ -5,8 +5,8 @@ * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * + * Portions Copyright (c) 2021, openGauss Contributors * src/include/executor/spi.h * * ------------------------------------------------------------------------- @@ -134,7 +134,7 @@ extern void SPI_cursor_move(Portal portal, bool forward, long count); extern void SPI_scroll_cursor_fetch(Portal, FetchDirection direction, long count); extern void SPI_scroll_cursor_move(Portal, FetchDirection direction, long count); extern void SPI_cursor_close(Portal portal); -extern void SPI_start_transaction(void); +extern void SPI_start_transaction(List* transactionHead); extern void SPI_stp_transaction_check(bool read_only, bool savepoint = false); extern void SPI_commit(); extern void SPI_rollback(); diff --git a/src/include/executor/spiDbesql.h b/src/include/executor/spiDbesql.h index c232831f1..9b328abc5 100644 --- a/src/include/executor/spiDbesql.h +++ b/src/include/executor/spiDbesql.h @@ -25,7 +25,7 @@ typedef struct SPIDescColumns { } SPIDescColumns; extern void SpiDescribeColumnsCallback(CommandDest dest, const char *src, ArrayType** resDescribe, - MemoryContext memctx); + MemoryContext memctx, ParserSetupHook parserSetup = NULL, void *parserSetupArg = NULL); extern void spi_exec_bind_with_callback(CommandDest dest, const char *src, bool read_only, long tcount, bool direct_call, void (*callbackFn)(void *), void *clientData, int nargs, Oid *argtypes, Datum *Values, ParserSetupHook parserSetup, void *parserSetupArg, const char *nulls = NULL); diff --git a/src/include/executor/spi_priv.h b/src/include/executor/spi_priv.h index 27654be90..5b7e9ef86 100644 --- a/src/include/executor/spi_priv.h +++ b/src/include/executor/spi_priv.h @@ -5,8 +5,8 @@ * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * + * Portions Copyright (c) 2021, openGauss Contributors * src/include/executor/spi_priv.h * * ------------------------------------------------------------------------- diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h index 697ed9b9b..48b4850fd 100644 --- a/src/include/executor/tuptable.h +++ b/src/include/executor/tuptable.h @@ -135,6 +135,7 @@ typedef struct TupleTableSlot { int tts_nvalid; /* # of valid values in tts_values */ Datum* tts_values; /* current per-attribute values */ bool* tts_isnull; /* current per-attribute isnull flags */ + Datum* tts_lobPointers; MinimalTuple tts_mintuple; /* minimal tuple, or NULL if none */ HeapTupleData tts_minhdr; /* workspace for minimal-tuple-only case */ long tts_off; /* saved state for slot_deform_tuple */ @@ -157,6 +158,8 @@ extern TupleTableSlot* MakeSingleTupleTableSlot(TupleDesc tupdesc, bool allocSlo extern void ExecDropSingleTupleTableSlot(TupleTableSlot* slot); extern void ExecSetSlotDescriptor(TupleTableSlot* slot, TupleDesc tupdesc); extern TupleTableSlot* ExecStoreTuple(Tuple tuple, TupleTableSlot* slot, Buffer buffer, bool shouldFree); +extern TupleTableSlot *ExecStoreTupleBatch(HeapTuple tuple, TupleTableSlot *slot, + Buffer buffer, bool shouldFree, int rownum); extern TupleTableSlot* ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot* slot, bool shouldFree); #ifdef PGXC @@ -183,11 +186,13 @@ extern MinimalTuple heap_slot_copy_minimal_tuple(TupleTableSlot *slot); extern void heap_slot_store_minimal_tuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree); extern HeapTuple heap_slot_get_heap_tuple (TupleTableSlot* slot); extern HeapTuple heap_slot_copy_heap_tuple (TupleTableSlot *slot); -extern void heap_slot_store_heap_tuple(HeapTuple tuple, TupleTableSlot* slot, Buffer buffer, bool should_free); +extern void heap_slot_store_heap_tuple(HeapTuple tuple, TupleTableSlot* slot, Buffer buffer, bool shouldFree, bool batchMode); extern Datum heap_slot_getattr(TupleTableSlot* slot, int attnum, bool* isnull, bool need_transform_anyarray = false); extern void heap_slot_getallattrs(TupleTableSlot* slot, bool need_transform_anyarray = false); +extern void slot_getallattrsfast(TupleTableSlot *slot, int maxIdx); extern void heap_slot_getsomeattrs(TupleTableSlot* slot, int attnum); extern bool heap_slot_attisnull(TupleTableSlot* slot, int attnum); +extern void heap_slot_formbatch(TupleTableSlot* slot, struct VectorBatch* batch, int cur_rows, int attnum); #endif /* !FRONTEND_PARSER */ #endif /* TUPTABLE_H */ diff --git a/src/include/fmgr.h b/src/include/fmgr.h index 48cdd9c77..89d9b582a 100644 --- a/src/include/fmgr.h +++ b/src/include/fmgr.h @@ -162,7 +162,7 @@ typedef struct FunctionCallInfoData { Datum prealloc_arg[FUNC_PREALLOCED_ARGS]; /* prealloced arguments.*/ bool prealloc_argnull[FUNC_PREALLOCED_ARGS]; /* prealloced argument null flags.*/ Oid prealloc_argTypes[FUNC_PREALLOCED_ARGS]; /* prealloced argument type */ - ScalarVector* argVector; /*Scalar Vector */ + ScalarVector* argVector; /* Scalar Vector */ RefcusorInfoData refcursor_data; UDFInfoType udfInfo; StartWithFuncEvalInfo swinfo; diff --git a/src/include/fmgr/fmgr_comp.h b/src/include/fmgr/fmgr_comp.h index 52b06061d..f44c3daac 100644 --- a/src/include/fmgr/fmgr_comp.h +++ b/src/include/fmgr/fmgr_comp.h @@ -78,13 +78,13 @@ */ extern struct varlena* pg_detoast_datum(struct varlena* datum); extern struct varlena* pg_detoast_datum_copy(struct varlena* datum); -extern struct varlena* pg_detoast_datum_slice(struct varlena* datum, int32 first, int32 count); +extern struct varlena* pg_detoast_datum_slice(struct varlena* datum, int64 first, int32 count); extern struct varlena* pg_detoast_datum_packed(struct varlena* datum); #define PG_DETOAST_DATUM(datum) pg_detoast_datum((struct varlena*)DatumGetPointer(datum)) #define PG_DETOAST_DATUM_COPY(datum) pg_detoast_datum_copy((struct varlena*)DatumGetPointer(datum)) #define PG_DETOAST_DATUM_SLICE(datum, f, c) \ - pg_detoast_datum_slice((struct varlena*)DatumGetPointer(datum), (int32)(f), (int32)(c)) + pg_detoast_datum_slice((struct varlena*)DatumGetPointer(datum), (int64)(f), (int32)(c)) /* WARNING -- unaligned pointer */ #define PG_DETOAST_DATUM_PACKED(datum) pg_detoast_datum_packed((struct varlena*)DatumGetPointer(datum)) diff --git a/src/include/funcapi.h b/src/include/funcapi.h index f9832e82a..d11278652 100644 --- a/src/include/funcapi.h +++ b/src/include/funcapi.h @@ -156,6 +156,7 @@ extern TypeFuncClass get_call_result_type(FunctionCallInfo fcinfo, Oid* resultTy extern TypeFuncClass get_expr_result_type(Node* expr, Oid* resultTypeId, TupleDesc* resultTupleDesc, int4* resultTypeId_orig = NULL); extern TypeFuncClass get_func_result_type(Oid functionId, Oid* resultTypeId, TupleDesc* resultTupleDesc); +extern void construct_func_param_desc(Oid funcid, TypeFuncClass* typclass, TupleDesc* tupdesc, Oid* resultTypeId); extern bool resolve_polymorphic_argtypes(int numargs, Oid* argtypes, const char* argmodes, Node* call_expr); @@ -168,6 +169,7 @@ extern char* get_func_result_name(Oid functionId); extern TupleDesc build_function_result_tupdesc_d(Datum proallargtypes, Datum proargmodes, Datum proargnames, Datum funcid); extern TupleDesc build_function_result_tupdesc_t(HeapTuple procTuple); +extern TupleDesc get_func_param_desc(HeapTuple tp, Oid resultTypeId, int* return_out_args_num = NULL); /* ---------- * Support to ease writing functions returning composite types @@ -261,6 +263,7 @@ extern TupleTableSlot* TupleDescGetSlot(TupleDesc tupdesc); extern FuncCallContext* init_MultiFuncCall(PG_FUNCTION_ARGS); extern FuncCallContext* per_MultiFuncCall(PG_FUNCTION_ARGS); extern void end_MultiFuncCall(PG_FUNCTION_ARGS, FuncCallContext* funcctx); +extern bool is_function_with_plpgsql_language_and_outparam(Oid funcid); #define SRF_IS_FIRSTCALL() (fcinfo->flinfo->fn_extra == NULL) diff --git a/src/include/gs_thread.h b/src/include/gs_thread.h index bb1b634dc..bea42c903 100755 --- a/src/include/gs_thread.h +++ b/src/include/gs_thread.h @@ -100,12 +100,15 @@ typedef enum knl_thread_role { DATARECWRITER, CBMWRITER, PAGEWRITER_THREAD, + PAGEREPAIR_THREAD, HEARTBEAT, COMM_SENDERFLOWER, COMM_RECEIVERFLOWER, COMM_RECEIVER, COMM_AUXILIARY, COMM_POOLER_CLEAN, + LOGICAL_READ_RECORD, + PARALLEL_DECODE, UNDO_RECYCLER, UNDO_LAUNCHER, @@ -118,6 +121,7 @@ typedef enum knl_thread_role { SHARE_STORAGE_XLOG_COPYER, APPLY_LAUNCHER, APPLY_WORKER, + BARRIER_PREPARSE, TS_COMPACTION, TS_COMPACTION_CONSUMER, TS_COMPACTION_AUXILIAY, @@ -136,6 +140,7 @@ typedef enum knl_thread_role { REDISTRIBUTION_WORKER, WAL_NORMAL_SENDER, WAL_HADR_SENDER, /* A cross cluster wal sender to hadr cluster main standby */ + WAL_HADR_CN_SENDER, /* A cross cluster wal sender to hadr cluster coordinator standby */ WAL_SHARE_STORE_SENDER, /* A cross cluster wal sender to share storage cluster standby */ WAL_STANDBY_SENDER, /* Am I cascading WAL to another standby ? */ WAL_DB_SENDER, diff --git a/src/include/gs_threadlocal.h b/src/include/gs_threadlocal.h index 4f8dfa82e..40ae7851d 100644 --- a/src/include/gs_threadlocal.h +++ b/src/include/gs_threadlocal.h @@ -15,7 +15,6 @@ #ifndef GS_THREADLOCAL_H_ #define GS_THREADLOCAL_H_ -/*PClint*/ #ifdef PC_LINT #define THR_LOCAL #endif diff --git a/src/include/gtm/gtm.h b/src/include/gtm/gtm.h index e660789d3..27e8fd14f 100644 --- a/src/include/gtm/gtm.h +++ b/src/include/gtm/gtm.h @@ -184,6 +184,9 @@ GTM_ThreadInfo* GTM_GetThreadInfo(GTM_ThreadID thrid); extern void DestroyConnectControlTable(void); extern void RebuildConnectControlTable(void); +extern uint64 GTM_ConPointTblFind(const char* cnName); +extern void GTM_ConPointTblInsert(const char* cnName, uint64 consistencyPoint); + /* * pthread keys to get thread specific information */ diff --git a/src/include/gtm/gtm_c.h b/src/include/gtm/gtm_c.h index ad65f9ca5..cb8efee66 100644 --- a/src/include/gtm/gtm_c.h +++ b/src/include/gtm/gtm_c.h @@ -35,6 +35,10 @@ #include "c.h" #include "cm/etcdapi.h" +#define DR_MAX_NODE_NUM (1024 * 3) +/* disaster cluster info form: "num_cn num_slice num_one_slice (slice_num host host1 port slice_name)..." */ +#define MAX_DISASTER_INFO_LEN (DR_MAX_NODE_NUM * 90 + 15) + typedef uint64 GlobalTransactionId; /* 64-bit global transaction ids */ typedef int16 GTMProxy_ConnID; typedef uint32 GTM_StrLen; diff --git a/src/include/gtm/gtm_client.h b/src/include/gtm/gtm_client.h index 0fd29b8fa..bb9e2d981 100644 --- a/src/include/gtm/gtm_client.h +++ b/src/include/gtm/gtm_client.h @@ -155,6 +155,13 @@ typedef union GTM_ResultData { int32 grd_delete_resource_pool_result; /*MSG_WLM_RESOURCEPOOL_DELETE*/ int32 grd_init_resource_pool_result; /*MSG_WLM_RESOURCEPOOL_INIT*/ + int32 grd_set_disaster_cluster_result; + int32 grd_del_disaster_cluster_result; + struct { + int32 length; + char* result; + } grd_get_disaster_cluster_result; + struct { int32 length; char* result; @@ -221,6 +228,10 @@ int set_gtm_vaccum_flag(GTM_Conn* conn, bool is_vaccum, GTM_TransactionKey txnKe GTM_TransactionKey begin_transaction(GTM_Conn* conn, GTM_IsolationLevel isolevel, GTM_Timestamp* timestamp); GlobalTransactionId begin_get_gxid(GTM_Conn* conn, GTM_TransactionKey txn, bool is_sub_xact, GTMClientErrCode* err); +bool begin_set_disaster_cluster(GTM_Conn* conn, char* disasterCluster, GTMClientErrCode* err); +bool begin_get_disaster_cluster(GTM_Conn* conn, char** disasterCluster, GTMClientErrCode* err); +bool begin_del_disaster_cluster(GTM_Conn* conn, GTMClientErrCode* err); + GlobalTransactionId begin_transaction_gxid(GTM_Conn* conn, GTM_IsolationLevel isolevel, GTM_Timestamp* timestamp); int bkup_gtm_control_file_gxid(GTM_Conn* conn, GlobalTransactionId gxid); @@ -246,12 +257,17 @@ int bkup_prepare_transaction(GTM_Conn* conn, GlobalTransactionId gxid); int get_gid_data(GTM_Conn* conn, GTM_IsolationLevel isolevel, const char* gid, GlobalTransactionId* gxid, GlobalTransactionId* prepared_gxid, char** nodestring); +bool set_consistency_point_csn(GTM_Conn* conn, CommitSeqNo consistencyPointCSN, GTMClientErrCode* err); +bool set_consistency_point(GTM_Conn* conn, CommitSeqNo consistencyPointCSN, const char* cnName, GTMClientErrCode* err); + /* * Snapshot Management API */ GTM_SnapshotData *get_snapshot(GTM_Conn *conn, GTM_TransactionKey txnKey, GlobalTransactionId gxid, bool canbe_grouped, bool is_vacuum); GTM_SnapshotData *get_snapshot_gtm_lite(GTM_Conn *conn); +GTM_SnapshotData *get_snapshot_gtm_dr(GTM_Conn *conn); +GTM_SnapshotData *get_snapshot_gtm_disaster(GTM_Conn *conn, const char* cnName); GTMLite_StatusData *get_status_gtm_lite(GTM_Conn *conn); @@ -333,5 +349,4 @@ int InitResourcePool(GTM_Conn* conn, int rp_count, int buf_len, char* buf); /* Set parameter API */ void set_gtm_client_rw_timeout(int timeout); extern void process_for_gtm_connection_failed(GTM_Conn* conn); -extern THR_LOCAL bool need_reset_xmin; #endif diff --git a/src/include/gtm/gtm_msg.h b/src/include/gtm/gtm_msg.h index 466a65c5c..7b1d32bae 100644 --- a/src/include/gtm/gtm_msg.h +++ b/src/include/gtm/gtm_msg.h @@ -127,6 +127,12 @@ typedef enum GTM_MessageType { MSG_TXN_GET_NEXT_CSN_LITE, /* gtm-lite commit csn */ MSG_SNAPSHOT_GET_LITE, /* Get a GTMLite global snapshot */ MSG_GET_GTM_LITE_STATUS, /* Get the status of gtm lite */ + + MSG_SET_DISASTER_CLUSTER, + MSG_GET_DISASTER_CLUSTER, + MSG_DEL_DISASTER_CLUSTER, + MSG_SNAPSHOT_GET_DR, + MSG_SET_CONSISTENCY_POINT, /* * Must be at the end */ @@ -202,6 +208,11 @@ typedef enum GTM_ResultType { GTM_HOTPATCH_RESULT, SNAPSHOT_GET_RESULT_LITE, GTM_LITE_STATUS_RESULT, + DISASTER_CLUSTER_SET_RESULT, + DISASTER_CLUSTER_GET_RESULT, + DISASTER_CLUSTER_DEL_RESULT, + SNAPSHOT_GET_RESULT_DR, + TXN_SET_CONSISTENCY_POINT_RESULT, RESULT_TYPE_COUNT } GTM_ResultType; diff --git a/src/include/gtm/gtm_txn.h b/src/include/gtm/gtm_txn.h index b03187485..1bf74d3e7 100644 --- a/src/include/gtm/gtm_txn.h +++ b/src/include/gtm/gtm_txn.h @@ -102,6 +102,10 @@ extern void GlobalTransactionIdAbort(GlobalTransactionId transactionId); extern void GTM_CalculateLatestSnapshot(bool calc_xmin); +extern bool GTM_SetDisasterClusterToEtcd(const char* mainCluster); +extern char* GTM_GetDisasterClusterFromEtcd(); +extern bool GTM_DelDisasterClusterFromEtcd(); + /* in transam/varsup.c */ extern bool GTM_SetDoVacuum(GTM_TransactionHandle handle); extern GlobalTransactionId GTM_GetGlobalTransactionId(GTM_TransactionHandle handle, bool is_sub_xact); @@ -189,6 +193,8 @@ typedef struct GTM_Transactions { volatile GlobalTransactionId gt_backedUpXid; /* backed up, restoration point */ volatile uint64 gt_csn; /* current commit sequence number */ + volatile uint64 gt_consistencyPointCsn; + volatile uint64 gt_baseConsistencyPoint; GlobalTransactionId gt_oldestXid; /* cluster-wide minimum datfrozenxid */ /* @@ -293,6 +299,7 @@ void ProcessGXIDListCommand(Port* myport, StringInfo message); void ProcessGetNextGXIDTransactionCommand(Port* myport, StringInfo message); void ProcessGetNextCSNTransactionCommand(Port* myport, StringInfo message); void ProcessGetNextCSNTransactionLiteCommand(Port* myport, StringInfo message); +void ProcessSetConsistencyPointTransactionCommand(Port* myport, StringInfo message); void ProcessGetGlobalXminTransactionCommand(Port* myport, StringInfo message); void ProcessGetTimelineTransactionCommand(Port* myport, StringInfo message); @@ -339,6 +346,9 @@ bool GTM_SyncTimelineToStandby(GTM_Timeline timeline); bool GTM_SetTimelineToEtcd(GTM_Timeline timeline); bool GTM_GetTimelineFromEtcd(GTM_Timeline& timeline); +bool GTM_SetConsistencyPointToEtcd(uint64 cp_csn); +bool GTM_GetConsistencyPointFromEtcd(); + void ProcessWorkloadManagerInitCommand(Port* myport, StringInfo message, bool is_backup); void ProcessWorkloadManagerReserveMemCommand(Port* myport, StringInfo message, bool is_backup); void ProcessWorkloadManagerReleaseMemCommand(Port* myport, StringInfo message, bool is_backup); @@ -355,7 +365,11 @@ void ProcessWorkloadManagerDeleteResourcePoolCommand(Port* myport, StringInfo me */ void ProcessGetSnapshotCommand(Port* myport, StringInfo message, bool get_gxid); void ProcessGetSnapshotLiteCommand(Port* myport, StringInfo message); +void ProcessGetSnapshotDRCommand(Port* myport, StringInfo message); void ProcessGetSnapshotStatusCommand(Port* myport, StringInfo message); void ProcessGetGTMLiteStatusCommand(Port* myport, StringInfo message); void GTM_FreeSnapshotData(GTM_Snapshot snapshot); +void ProcessSetDisasterClusterCommand(Port *myport, GTM_MessageType mtype, StringInfo message); +void ProcessGetDisasterClusterCommand(Port *myport, GTM_MessageType mtype, StringInfo message); +void ProcessDelDisasterClusterCommand(Port *myport, GTM_MessageType mtype, StringInfo message); #endif diff --git a/src/include/gtm/utils/elog.h b/src/include/gtm/utils/elog.h index 7d4b1cec7..2ddfd3aa8 100644 --- a/src/include/gtm/utils/elog.h +++ b/src/include/gtm/utils/elog.h @@ -219,7 +219,7 @@ extern int errhint(const char* fmt, ...) } while (0) #else #define elog elog_start(__FILE__, __LINE__, PG_FUNCNAME_MACRO), elog_finish -#endif /* PCLINT_CHECK */ +#endif extern void elog_start(const char* filename, int lineno, const char* funcname); extern void elog_finish(int elevel, const char* fmt, ...) diff --git a/src/include/gtm/utils/libpq-int.h b/src/include/gtm/utils/libpq-int.h index f02b17568..33186d54d 100644 --- a/src/include/gtm/utils/libpq-int.h +++ b/src/include/gtm/utils/libpq-int.h @@ -34,6 +34,10 @@ #else #include #endif + +#ifdef __cplusplus +extern "C" { +#endif /* * GTM_Conn stores all the state data associated with a single connection * to a backend. @@ -154,4 +158,8 @@ int gtmpqPutMsgBytes(const void* buf, size_t len, GTM_Conn* conn); #define SOCK_ERRNO errno #define SOCK_ERRNO_SET(e) (errno = (e)) +#ifdef __cplusplus +} +#endif + #endif /* LIBPQ_INT_H */ diff --git a/src/include/instruments/instr_statement.h b/src/include/instruments/instr_statement.h index 3bc0fca6d..4f7368e0b 100644 --- a/src/include/instruments/instr_statement.h +++ b/src/include/instruments/instr_statement.h @@ -165,6 +165,7 @@ typedef struct StatementStatContext { uint64 unique_query_id; /* from knl_u_unique_sql_context's unique_sql_id */ uint64 debug_query_id; /* from knl_session_context's debug_query_id */ uint32 unique_sql_cn_id; /* from knl_session_context's unique_sql_cn_id */ + char trace_id[MAX_TRACE_ID_SIZE]; /* from knl_session_context's trace_id */ char* query; /* from PgBackendStatus's st_activity or knl_u_unique_sql_context's curr_single_unique_sql */ TimestampTz start_time; /* from PgBackendStatus's st_activity_start_timestamp */ @@ -206,6 +207,7 @@ extern void instr_stmt_report_txid(uint64 txid); extern void instr_stmt_report_query(uint64 unique_query_id); extern void instr_stmt_report_query_plan(QueryDesc *queryDesc); extern void instr_stmt_report_debug_query_id(uint64 debug_query_id); +extern void instr_stmt_report_trace_id(char *trace_id); extern void instr_stmt_report_start_time(); extern void instr_stmt_report_finish_time(); extern bool instr_stmt_need_track_plan(); diff --git a/src/include/instruments/instr_unique_sql.h b/src/include/instruments/instr_unique_sql.h index 9b58a5de0..b20478283 100644 --- a/src/include/instruments/instr_unique_sql.h +++ b/src/include/instruments/instr_unique_sql.h @@ -189,6 +189,9 @@ extern int GetUniqueSQLTrackType(); #define START_TRX_UNIQUE_SQL_ID 2718638560 +#define PUSH_SKIP_UNIQUE_SQL_HOOK() u_sess->unique_sql_cxt.skipUniqueSQLCount++; +#define POP_SKIP_UNIQUE_SQL_HOOK() u_sess->unique_sql_cxt.skipUniqueSQLCount--; + void InitUniqueSQL(); void UpdateUniqueSQLStat(Query* query, const char* sql, int64 elapse_start_time, PgStat_TableCounts* agg_table_count = NULL, UniqueSQLStat* sql_stat = NULL); diff --git a/src/include/iprange/iprange.h b/src/include/iprange/iprange.h index 907556e6f..ea2eb9b45 100644 --- a/src/include/iprange/iprange.h +++ b/src/include/iprange/iprange.h @@ -165,7 +165,6 @@ public: bool empty() const; const std::string& get_err_str() { return m_err_str; } - void print_ranges(); std::string ip_to_str(const IPV6 *ip) const; bool str_to_ip(const char* ip_str, IPV6 *ip); private: @@ -184,7 +183,6 @@ private: bool mask_range(Range *range, unsigned short cidr); void handle_remove_intersection(Ranges_t *new_ranges, const Range *remove_range, Range *exist_range); bool handle_add_intersection(Range *new_range, const Range *exist_range); - void print_range(const Range *range); void copy_without_spaces(char buf[], size_t buf_len, const char *original, size_t original_len) const; void net_ipv6_to_host_order(IPV6 *ip, const struct sockaddr_in6 *sa) const; void net_ipv4_to_host_order(IPV6 *ip, const struct in_addr *addr) const; diff --git a/src/include/knl/knl_guc/knl_instance_attr_common.h b/src/include/knl/knl_guc/knl_instance_attr_common.h index da51c6e89..df0401f54 100644 --- a/src/include/knl/knl_guc/knl_instance_attr_common.h +++ b/src/include/knl/knl_guc/knl_instance_attr_common.h @@ -51,6 +51,7 @@ typedef struct knl_instance_attr_common { bool enable_thread_pool; bool enable_ffic_log; bool enable_global_plancache; + bool enable_global_syscache; int max_files_per_process; int pgstat_track_activity_query_size; int GtmHostPortArray[MAX_GTM_HOST_NUM]; @@ -70,6 +71,7 @@ typedef struct knl_instance_attr_common { char* PGXCNodeName; char* transparent_encrypt_kms_url; char* thread_pool_attr; + char* thread_pool_stream_attr; char* comm_proxy_attr; char* numa_distribute_mode; @@ -98,6 +100,7 @@ typedef struct knl_instance_attr_common { bool enable_auto_clean_unique_sql; #endif int cluster_run_mode; + int stream_cluster_run_mode; } knl_instance_attr_common; #endif /* SRC_INCLUDE_KNL_KNL_INSTANCE_ATTR_COMMON_H_ */ diff --git a/src/include/knl/knl_guc/knl_instance_attr_memory.h b/src/include/knl/knl_guc/knl_instance_attr_memory.h index d262925f2..1c4d2565f 100644 --- a/src/include/knl/knl_guc/knl_instance_attr_memory.h +++ b/src/include/knl/knl_guc/knl_instance_attr_memory.h @@ -46,6 +46,8 @@ typedef struct knl_instance_attr_memory { int memorypool_size; int max_process_memory; int local_syscache_threshold; + bool enable_memory_context_check_debug; + int global_syscache_threshold; } knl_instance_attr_memory; #endif /* SRC_INCLUDE_KNL_KNL_INSTANCE_ATTR_MEMORY_H_ */ diff --git a/src/include/knl/knl_guc/knl_instance_attr_security.h b/src/include/knl/knl_guc/knl_instance_attr_security.h index ac8357a95..89b8c731e 100644 --- a/src/include/knl/knl_guc/knl_instance_attr_security.h +++ b/src/include/knl/knl_guc/knl_instance_attr_security.h @@ -56,6 +56,7 @@ typedef struct knl_instance_attr_security { char* transparent_encrypt_kms_region; bool use_elastic_search; char* elastic_search_ip_addr; + int audit_thread_num; } knl_instance_attr_security; #endif /* SRC_INCLUDE_KNL_KNL_INSTANCE_ATTR_SECURITY_H_ */ diff --git a/src/include/knl/knl_guc/knl_instance_attr_storage.h b/src/include/knl/knl_guc/knl_instance_attr_storage.h index f8c45bf72..d52797c9b 100755 --- a/src/include/knl/knl_guc/knl_instance_attr_storage.h +++ b/src/include/knl/knl_guc/knl_instance_attr_storage.h @@ -46,6 +46,7 @@ enum LWLOCK_PARTITION_ID { CSNLOG_PART = 1, LOG2_LOCKTABLE_PART = 2, TWOPHASE_PART = 3, + FASTPATH_PART = 4, LWLOCK_PART_KIND }; @@ -94,6 +95,9 @@ typedef struct knl_instance_attr_storage { bool enable_delta_store; bool enableWalLsnCheck; bool gucMostAvailableSync; + bool enable_ustore; + bool auto_csn_barrier; + bool enable_wal_shipping_compression; int WalReceiverBufSize; int DataQueueBufSize; int NBuffers; @@ -120,17 +124,16 @@ typedef struct knl_instance_attr_storage { int recovery_undo_workers; int recovery_redo_workers_per_paser_worker; int pagewriter_thread_num; + int dw_file_num; + int dw_file_size; int real_recovery_parallelism; int batch_redo_num; int remote_read_mode; int advance_xlog_file_num; int gtm_option; int max_undo_workers; - int auto_csn_barrier; int enable_update_max_page_flush_lsn; int max_keep_log_seg; - int undo_space_limit_size; - int undo_limit_size_transaction; int max_size_for_xlog_receiver; #ifdef EXTREME_RTO_DEBUG_AB int extreme_rto_ab_pos; @@ -148,7 +151,10 @@ typedef struct knl_instance_attr_storage { int64 xlog_file_size; char* xlog_file_path; char* xlog_lock_file_path; + int wal_flush_timeout; + int wal_flush_delay; int max_logical_replication_workers; + char *redo_bind_cpu_attr; } knl_instance_attr_storage; #endif /* SRC_INCLUDE_KNL_KNL_INSTANCE_ATTR_STORAGE_H_ */ diff --git a/src/include/knl/knl_guc/knl_session_attr_common.h b/src/include/knl/knl_guc/knl_session_attr_common.h index 5cd8edab0..461581e3b 100644 --- a/src/include/knl/knl_guc/knl_session_attr_common.h +++ b/src/include/knl/knl_guc/knl_session_attr_common.h @@ -91,9 +91,6 @@ typedef struct knl_session_attr_common { int Log_RotationSize; int max_function_args; int max_user_defined_exception; - int pset_lob_length; - char* pset_num_format; - int pset_num_width; int tcp_keepalives_idle; int tcp_keepalives_interval; int tcp_keepalives_count; @@ -222,9 +219,9 @@ typedef struct knl_session_attr_common { char* node_name; #ifndef ENABLE_MULTIPLE_NODES bool plsql_show_all_error; +#endif uint32 extension_session_vars_array_size; void** extension_session_vars_array; -#endif } knl_session_attr_common; #endif /* SRC_INCLUDE_KNL_KNL_SESSION_ATTR_COMMON_H_ */ diff --git a/src/include/knl/knl_guc/knl_session_attr_security.h b/src/include/knl/knl_guc/knl_session_attr_security.h index 064a682fe..3e2aa3b91 100644 --- a/src/include/knl/knl_guc/knl_session_attr_security.h +++ b/src/include/knl/knl_guc/knl_session_attr_security.h @@ -87,6 +87,7 @@ typedef struct knl_session_attr_security { char* pg_krb_srvnam; char* tde_cmk_id; bool Enable_Security_Policy; + int audit_xid_info; } knl_session_attr_security; #endif /* SRC_INCLUDE_KNL_KNL_SESSION_ATTR_SECURITY_H_ */ diff --git a/src/include/knl/knl_guc/knl_session_attr_sql.h b/src/include/knl/knl_guc/knl_session_attr_sql.h index 1b9892e2a..89e055eef 100644 --- a/src/include/knl/knl_guc/knl_session_attr_sql.h +++ b/src/include/knl/knl_guc/knl_session_attr_sql.h @@ -230,8 +230,11 @@ typedef struct knl_session_attr_sql { char* db4ai_snapshot_version_delimiter; char* db4ai_snapshot_version_separator; int pldebugger_timeout; - bool for_print_tuple; - bool numeric_out_for_format; + +#ifndef ENABLE_MULTIPLE_NODES + bool uppercase_attribute_name; +#endif + int vectorEngineStrategy; #ifndef ENABLE_MULTIPLE_NODES bool enable_custom_parser; #endif diff --git a/src/include/knl/knl_guc/knl_session_attr_storage.h b/src/include/knl/knl_guc/knl_session_attr_storage.h index ef65905ab..ed7748d0e 100755 --- a/src/include/knl/knl_guc/knl_session_attr_storage.h +++ b/src/include/knl/knl_guc/knl_session_attr_storage.h @@ -43,6 +43,8 @@ typedef struct knl_session_attr_dcf { /* parameters can be reloaded while DCF is running */ int dcf_election_timeout; + int dcf_auto_elc_priority_en; + int dcf_election_switch_threshold; int dcf_run_mode; char* dcf_log_level; int dcf_max_log_file_size; @@ -208,6 +210,7 @@ typedef struct knl_session_attr_storage { bool enable_twophase_commit; int ustats_tracker_naptime; int umax_search_length_for_prune; + int archive_interval; /* * xlog keep for all standbys even through they are not connect and donnot created replslot. @@ -216,6 +219,7 @@ typedef struct knl_session_attr_storage { int max_size_for_xlog_prune; int defer_csn_cleanup_time; + bool enable_defer_calculate_snapshot; bool enable_hashbucket; bool enable_segment; @@ -223,8 +227,13 @@ typedef struct knl_session_attr_storage { int max_active_gtt; int vacuum_gtt_defer_check_age; + /* for undo */ + int undo_space_limit_size; + int undo_limit_size_transaction; + bool enable_recyclebin; - int recyclebin_retention; + int recyclebin_retention_time; + int undo_retention_time; /* * !!!!!!!!!!! Be Carefull !!!!!!!!!!! * Make sure to use the same value in UHeapCalcTupleDataSize and UheapFillDiskTuple when creating a tuple. diff --git a/src/include/knl/knl_instance.h b/src/include/knl/knl_instance.h index fabfaab66..fe6a28d4a 100755 --- a/src/include/knl/knl_instance.h +++ b/src/include/knl/knl_instance.h @@ -40,6 +40,7 @@ #include "nodes/pg_list.h" #include "storage/lock/s_lock.h" #include "access/double_write_basic.h" +#include "utils/knl_globalsysdbcache.h" #include "utils/palloc.h" #include "replication/replicainternal.h" #include "storage/latch.h" @@ -74,6 +75,19 @@ const int NUM_PERCENTILE_COUNT = 2; const int INIT_NUMA_ALLOC_COUNT = 32; const int HOTKEY_ABANDON_LENGTH = 100; const int MAX_GLOBAL_CACHEMEM_NUM = 128; +const int MAX_GLOBAL_PRC_NUM = 32; +const int MAX_AUDIT_NUM = 48; + +const uint32 PARALLEL_DECODE_WORKER_INVALID = 0; +const uint32 PARALLEL_DECODE_WORKER_START = 1; +const uint32 PARALLEL_DECODE_WORKER_READY = 2; +const uint32 PARALLEL_DECODE_WORKER_EXIT = 3; +/* Maximum number of max parallel decode threads */ +#define MAX_PARALLEL_DECODE_NUM 20 + +/* Maximum number of max replication slots */ +#define MAX_REPLICATION_SLOT_NUM 100 + #ifndef ENABLE_MULTIPLE_NODES const int DB_CMPT_MAX = 4; #endif @@ -116,6 +130,7 @@ typedef struct knl_g_cache_context { MemoryContext global_cache_mem; MemoryContext global_plancache_mem[MAX_GLOBAL_CACHEMEM_NUM]; + MemoryContext global_prc_mem[MAX_GLOBAL_PRC_NUM]; } knl_g_cache_context; typedef struct knl_g_cost_context { @@ -138,6 +153,7 @@ typedef struct knl_g_pid_context { ThreadId BgWriterPID; ThreadId SpBgWriterPID; ThreadId* PageWriterPID; + ThreadId PageRepairPID; ThreadId CheckpointerPID; ThreadId WalWriterPID; ThreadId WalWriterAuxiliaryPID; @@ -150,7 +166,7 @@ typedef struct knl_g_pid_context { ThreadId PgArchPID; ThreadId PgStatPID; ThreadId PercentilePID; - ThreadId PgAuditPID; + ThreadId *PgAuditPID; ThreadId SysLoggerPID; ThreadId CatchupPID; ThreadId WLMCollectPID; @@ -181,6 +197,9 @@ typedef struct knl_g_pid_context { ThreadId TsCompactionPID; ThreadId TsCompactionAuxiliaryPID; ThreadId sharedStorageXlogCopyThreadPID; + ThreadId LogicalReadWorkerPID; + ThreadId LogicalDecoderWorkerPID; + ThreadId BarrierPreParsePID; ThreadId ApplyLauncerPID; } knl_g_pid_context; @@ -430,11 +449,35 @@ typedef struct ckpt_view_struct { } ckpt_view_struct; const int TWO_UINT64_SLOT = 2; -/*checkpoint*/ + +typedef struct knl_g_audit_context { + MemoryContext global_audit_context; + /* audit pipes */ + int *sys_audit_pipes; + + /* audit logs */ + char index_file_path[MAXPGPATH]; + LWLock *index_file_lock; + struct AuditIndexTable* audit_indextbl; + struct AuditIndexTableOld* audit_indextbl_old; + uint64 pgaudit_totalspace; + + /* audit thread */ + volatile uint32 current_audit_index; + volatile int thread_num; + volatile uint32 audit_coru_fnum[MAX_AUDIT_NUM]; +} knl_g_audit_context; + +/* incremental checkpoint */ typedef struct knl_g_ckpt_context { uint64 dirty_page_queue_reclsn; uint64 dirty_page_queue_tail; + /* dirty pages applied to flush */ + volatile uint32 prepared; + pg_atomic_uint32 CkptBufferIdsTail; + pg_atomic_uint32 CkptBufferIdsCompletedPages; + volatile uint32 CkptBufferIdsFlushPages; CkptSortItem* CkptBufferIds; /* dirty_page_queue store dirty buffer, buf_id + 1, 0 is invalid */ @@ -479,6 +522,8 @@ typedef struct knl_g_ckpt_context { struct CheckpointCallbackItem* ckptCallback; #endif + struct IncreCkptSyncShmemStruct* incre_ckpt_sync_shmem; + uint64 pad[TWO_UINT64_SLOT]; } knl_g_ckpt_context; @@ -490,20 +535,49 @@ typedef struct recovery_dw_buf { bool *single_flush_state; } recovery_dw_buf; -typedef struct knl_g_dw_context { +typedef struct dw_batch_file_context{ + int id; + volatile int fd; - volatile uint32 closed; uint16 flush_page; /* total number of flushed pages before truncate or reset */ - MemoryContext mem_cxt; char* unaligned_buf; char* buf; - struct LWLock* flush_lock; /* single flush: first version pos lock, batch flush: pos lock */ + struct LWLock* flush_lock; dw_file_head_t* file_head; volatile uint32 write_pos; /* the copied pages in buffer, updated when mark page */ + uint64 file_size; + + dw_stat_info_batch batch_stat_info; + + char file_name[PATH_MAX]; +}dw_batch_file_context; + +typedef struct knl_g_dw_context { + volatile int fd; + volatile uint32 closed; + + volatile bool old_batch_version; + volatile int recovery_dw_file_num; + volatile int recovery_dw_file_size; + + dw_batch_meta_file batch_meta_file; + dw_batch_file_context *batch_file_cxts; + + MemoryContext mem_cxt; + volatile uint32 dw_version; + uint16 flush_page; + + char* unaligned_buf; + char* buf; + + struct LWLock* flush_lock; + dw_file_head_t* file_head; + volatile uint32 write_pos; + /* second version flush, only single flush */ struct LWLock* second_flush_lock; /* single flush: second version pos lock */ struct LWLock* second_buftag_lock; /* single flush: second version bufferTag page lock */ @@ -519,7 +593,6 @@ typedef struct knl_g_dw_context { recovery_dw_buf recovery_buf; /* recovery the c20 version dw file */ /* dw view information */ - dw_stat_info_batch batch_stat_info; dw_stat_info_single single_stat_info; } knl_g_dw_context; @@ -531,6 +604,17 @@ typedef struct knl_g_bgwriter_context { Latch *invalid_buf_proc_latch; }knl_g_bgwriter_context; +typedef struct knl_g_repair_context { + ThreadId startup_tid; + Latch *repair_proc_latch; + HTAB *page_repair_hashtbl; /* standby store page repair info */ + LWLock *page_repair_hashtbl_lock; + HTAB *file_repair_hashtbl; /* standby store file repair info */ + LWLock *file_repair_hashtbl_lock; + HTAB *global_repair_bad_block_stat; /* local_bad_block_info use this */ + bool support_repair; +} knl_g_repair_context; + typedef struct { ThreadId threadId; uint32 threadState; @@ -561,6 +645,39 @@ typedef enum{ EXTREME_REDO, }RedoType; +extern struct ReorderBufferChange* change; + +enum knl_parallel_decode_state { + DECODE_INIT = 0, + DECODE_STARTING_BEGIN, + DECODE_STARTING_END, + DECODE_IN_PROGRESS, + DECODE_DONE, +}; + +typedef struct { + ThreadId threadId; + uint32 threadState; +} ParallelDecodeWorkerStatus; + +typedef struct { + ThreadId threadId; + uint32 threadState; +} ParallelReaderWorkerStatusTye; + +typedef struct knl_g_parallel_decode_context { + MemoryContext parallelDecodeCtx; + int state; + slock_t rwlock; + slock_t destroy_lock; /* redo worker destroy lock */ + char* dbUser; + char* dbName; + int totalNum; + volatile ParallelDecodeWorkerStatus ParallelDecodeWorkerStatusList[MAX_PARALLEL_DECODE_NUM]; + volatile ParallelReaderWorkerStatusTye ParallelReaderWorkerStatus; + gs_thread_t tid[MAX_PARALLEL_DECODE_NUM]; + Oid relationOid; +} knl_g_parallel_decode_context; typedef struct knl_g_parallel_redo_context { RedoType redoType; @@ -574,12 +691,15 @@ typedef struct knl_g_parallel_redo_context { int pre_enable_switch; slock_t destroy_lock; /* redo worker destroy lock */ pg_atomic_uint64 max_page_flush_lsn[NUM_MAX_PAGE_FLUSH_LSN_PARTITIONS]; + pg_atomic_uint64 last_replayed_conflict_csn; pg_atomic_uint32 permitFinishRedo; pg_atomic_uint32 hotStdby; volatile XLogRecPtr newestCheckpointLoc; volatile CheckPoint newestCheckpoint; char* unali_buf; /* unaligned_buf */ char* ali_buf; + XLogRedoNumStatics xlogStatics[RM_NEXT_ID][MAX_XLOG_INFO_NUM]; + RedoCpuBindControl redoCpuBindcontrl; } knl_g_parallel_redo_context; typedef struct knl_g_heartbeat_context { @@ -596,6 +716,13 @@ typedef struct knl_g_barrier_creator_context { bool is_barrier_creator; } knl_g_barrier_creator_context; +typedef struct knl_g_csn_barrier_context { + struct HTAB* barrier_hash_table; + LWLock* barrier_hashtbl_lock; + char stopBarrierId[MAX_BARRIER_ID_LENGTH]; + MemoryContext barrier_context; +} knl_g_csn_barrier_context; + typedef struct knl_g_comm_context { /* function point, for wake up consumer in executor */ wakeup_hook_type gs_wakeup_consumer; @@ -643,6 +770,7 @@ typedef struct knl_g_comm_context { knl_g_counters_context counters_cxt; knl_g_tests_context tests_cxt; knl_g_pollers_context pollers_cxt; + knl_g_parallel_decode_context pdecode_cxt[MAX_REPLICATION_SLOT_NUM]; knl_g_reqcheck_context reqcheck_cxt; knl_g_mctcp_context mctcp_cxt; knl_g_commutil_context commutil_cxt; @@ -653,6 +781,9 @@ typedef struct knl_g_comm_context { HTAB* usedDnSpace; uint32 current_gsrewind_count; + bool request_disaster_cluster; + bool isNeedChangeRole; + long lastArchiveRcvTime; #ifdef USE_SSL libcomm_sslinfo* libcomm_data_port_list; @@ -666,6 +797,12 @@ typedef struct knl_g_comm_logic_context { FdCollection *comm_fd_collection; } knl_g_comm_logic_context; +/* g_instance struct for "Pooler" */ +typedef struct knl_g_pooler_context { + /* Record global connection status for function 'comm_check_connection_status' */ + struct GlobalConnStatus* globalConnStatus; +} knl_g_pooler_context; + typedef struct knl_g_libpq_context { /* Workaround for Solaris 2.6 brokenness */ char* pam_passwd; @@ -717,14 +854,25 @@ typedef struct knl_g_xlog_context { } knl_g_xlog_context; typedef struct knl_g_undo_context { - void *uZones[UNDO_ZONE_COUNT]; + void **uZones; uint32 undoTotalSize; uint32 undoMetaSize; - uint32 uZoneCount; + volatile uint32 uZoneCount; Bitmapset *uZoneBitmap[UNDO_PERSISTENCE_LEVELS]; MemoryContext undoContext; + int64 undoChainTotalSize; + int64 maxChainSize; + uint32 undo_chain_visited_count; + uint32 undoCountThreshold; + TransactionId oldestFrozenXid; + /* Oldest transaction id which is having undo. */ + pg_atomic_uint64 oldestXidInUndo; } knl_g_undo_context; +typedef struct knl_g_flashback_context { + TransactionId oldestXminInFlashback; +} knl_g_flashback_context; + struct NumaMemAllocInfo { void* numaAddr; /* Start address returned from numa_alloc_xxx */ size_t length; @@ -761,9 +909,11 @@ typedef struct WalInsertStatusEntry WALInsertStatusEntry; typedef struct WALFlushWaitLockPadded WALFlushWaitLockPadded; typedef struct WALBufferInitWaitLockPadded WALBufferInitWaitLockPadded; typedef struct WALInitSegLockPadded WALInitSegLockPadded; +typedef struct XlogFlushStats XlogFlushStatistics; typedef struct knl_g_conn_context { volatile int CurConnCount; - volatile int CurCMAConnCount; + volatile int CurCMAConnCount; /* Connection count of cm_agent after initialize, using for connection limit */ + volatile uint32 CurCMAProcCount; /* Proc count of cm_agent connections, indicate proc unsing condition */ slock_t ConnCountLock; } knl_g_conn_context; @@ -790,6 +940,9 @@ typedef struct knl_g_wal_context { volatile int lastLRCFlushed; int num_locks_in_group; DemoteMode upgradeSwitchMode; + uint64 totalXlogIterBytes; + uint64 totalXlogIterTimes; + XlogFlushStatistics* xlogFlushStats; } knl_g_wal_context; typedef struct GlobalSeqInfoHashBucket { @@ -809,6 +962,7 @@ typedef struct knl_g_archive_context { struct ArchiveBarrierLsnInfo *barrier_lsn_info; slock_t barrier_lock; int max_node_cnt; + int chosen_walsender_index; bool in_switchover; bool in_service_truncate; } knl_g_archive_context; @@ -904,6 +1058,11 @@ typedef struct knl_g_archive_thread_info { typedef struct knl_g_roach_context { bool isRoachRestore; + char* targetTimeInPITR; + char* globalBarrierRecordForPITR; + bool isXLogForceRecycled; + bool isGtmFreeCsn; + char* targetRestoreTimeFromMedia; } knl_g_roach_context; /* Added for streaming disaster recovery */ @@ -912,9 +1071,23 @@ typedef struct knl_g_streaming_dr_context { bool isInSwitchover; bool isInteractionCompleted; XLogRecPtr switchoverBarrierLsn; - TimestampTz lastRequestTimestamp; + int64 rpoSleepTime; + int64 rpoBalanceSleepTime; + char currentBarrierId[MAX_BARRIER_ID_LENGTH]; + char targetBarrierId[MAX_BARRIER_ID_LENGTH]; + slock_t mutex; /* locks shared variables shown above */ } knl_g_streaming_dr_context; +typedef struct knl_g_startup_context { + uint32 remoteReadPageNum; + HTAB *badPageHashTbl; + char page[BLCKSZ]; + XLogReaderState *current_record; + volatile uint32 BadFileNum; + ThreadId startup_tid; + XLogRecPtr suspend_lsn; +}knl_g_startup_context; + typedef struct knl_instance_context { knl_virtual_role role; volatile int status; @@ -971,7 +1144,6 @@ typedef struct knl_instance_context { /* load ir file count for each session */ long codegen_IRload_process_count; uint64 global_session_seq; - char stopBarrierId[MAX_BARRIER_ID_LENGTH]; struct HTAB* vec_func_hash; @@ -992,12 +1164,14 @@ typedef struct knl_instance_context { knl_g_cost_context cost_cxt; knl_g_comm_context comm_cxt; knl_g_comm_logic_context comm_logic_cxt; + knl_g_pooler_context pooler_cxt; knl_g_conn_context conn_cxt; knl_g_libpq_context libpq_cxt; struct knl_g_wlm_context* wlm_cxt; knl_g_ckpt_context ckpt_cxt; knl_g_ckpt_context* ckpt_cxt_ctl; knl_g_bgwriter_context bgwriter_cxt; + knl_g_repair_context repair_cxt; struct knl_g_dw_context dw_batch_cxt; struct knl_g_dw_context dw_single_cxt; knl_g_shmem_context shmem_cxt; @@ -1009,6 +1183,7 @@ typedef struct knl_instance_context { knl_g_numa_context numa_cxt; knl_g_undo_context undo_cxt; + knl_g_flashback_context flashback_cxt; #ifdef ENABLE_MOT knl_g_mot_context mot_cxt; @@ -1019,10 +1194,11 @@ typedef struct knl_instance_context { knl_g_streaming_context streaming_cxt; knl_g_csnminsync_context csnminsync_cxt; knl_g_barrier_creator_context barrier_creator_cxt; + knl_g_csn_barrier_context csn_barrier_cxt; + GlobalSysDBCache global_sysdbcache; knl_g_archive_context archive_obs_cxt; knl_g_archive_thread_info archive_thread_info; struct HTAB* ngroup_hash_table; - struct HTAB* mmapCache; knl_g_hypo_context hypo_cxt; knl_g_segment_context segment_cxt; @@ -1032,12 +1208,14 @@ typedef struct knl_instance_context { knl_g_roach_context roach_cxt; knl_g_streaming_dr_context streaming_dr_cxt; struct PLGlobalPackageRuntimeCache* global_session_pkg; - pg_atomic_uint32 extensionNum; + knl_g_startup_context startup_cxt; #ifndef ENABLE_MULTIPLE_NODES void *raw_parser_hook[DB_CMPT_MAX]; void *plsql_parser_hook[DB_CMPT_MAX]; #endif + pg_atomic_uint32 extensionNum; + knl_g_audit_context audit_cxt; } knl_instance_context; extern long random(); @@ -1053,10 +1231,8 @@ extern void add_numa_alloc_info(void* numaAddr, size_t length); #define GTM_FREE_MODE (g_instance.attr.attr_storage.enable_gtm_free || \ g_instance.attr.attr_storage.gtm_option == GTMOPTION_GTMFREE) -#define GTM_MODE (!g_instance.attr.attr_storage.enable_gtm_free && \ - (g_instance.attr.attr_storage.gtm_option == GTMOPTION_GTM)) -#define GTM_LITE_MODE (!g_instance.attr.attr_storage.enable_gtm_free && \ - g_instance.attr.attr_storage.gtm_option == GTMOPTION_GTMLITE) +#define GTM_MODE (false) +#define GTM_LITE_MODE (!(GTM_FREE_MODE)) #define REDO_FINISH_STATUS_LOCAL 0x00000001 #define REDO_FINISH_STATUS_CM 0x00000002 @@ -1067,6 +1243,9 @@ extern void add_numa_alloc_info(void* numaAddr, size_t length); #define GLOBAL_PLANCACHE_MEMCONTEXT \ (g_instance.cache_cxt.global_plancache_mem[u_sess->session_id % MAX_GLOBAL_CACHEMEM_NUM]) +#define GLOBAL_PRC_MEMCONTEXT \ + (g_instance.cache_cxt.global_prc_mem[u_sess->session_id % MAX_GLOBAL_PRC_NUM]) + #define DISABLE_MULTI_NODES_GPI (u_sess->attr.attr_storage.default_index_kind == DEFAULT_INDEX_KIND_NONE) #define DEFAULT_CREATE_LOCAL_INDEX (u_sess->attr.attr_storage.default_index_kind == DEFAULT_INDEX_KIND_LOCAL) #define DEFAULT_CREATE_GLOBAL_INDEX (u_sess->attr.attr_storage.default_index_kind == DEFAULT_INDEX_KIND_GLOBAL) diff --git a/src/include/knl/knl_session.h b/src/include/knl/knl_session.h index 0605ace02..636706693 100644 --- a/src/include/knl/knl_session.h +++ b/src/include/knl/knl_session.h @@ -65,6 +65,7 @@ #include "storage/shmem.h" #include "utils/palloc.h" #include "utils/memgroup.h" +#include "storage/lock/lock.h" typedef void (*pg_on_exit_callback)(int code, Datum arg); @@ -190,7 +191,7 @@ typedef struct knl_u_sig_context { * like got_PoolReload, but just for the compute pool. * see CPmonitor_MainLoop for more details. */ - volatile sig_atomic_t got_PoolReload; + volatile sig_atomic_t got_pool_reload; volatile sig_atomic_t cp_PoolReload; } knl_u_sig_context; @@ -199,6 +200,8 @@ class AutonomousSession; typedef struct TableOfIndexPass { Oid tableOfIndexType = InvalidOid; HTAB* tableOfIndex = NULL; + int tableOfNestLayer = -1; /* number of layers of this tablevar */ + int tableOfGetNestLayer = -1; /* number of layers of this tablevar needs to be get */ } TableOfIndexPass; typedef struct knl_u_SPI_context { @@ -249,6 +252,8 @@ typedef struct knl_u_SPI_context { struct CachedPlan* cur_spi_cplan; /* save table's index into session, for pass into function */ struct TableOfIndexPass* cur_tableof_index; + + bool has_stream_in_cursor_or_forloop_sql; } knl_u_SPI_context; typedef struct knl_u_index_context { @@ -566,6 +571,8 @@ typedef struct knl_u_utils_context { TransactionId RecentXmin; + TransactionId RecentDataXmin; + TransactionId RecentGlobalXmin; TransactionId RecentGlobalDataXmin; @@ -623,10 +630,6 @@ typedef struct knl_u_utils_context { bool enable_memory_context_control; syscalllock deleMemContextMutex; - - struct _DestReceiver* donothingDR; - struct _DestReceiver* debugtupDR; - struct _DestReceiver* spi_printtupDR; } knl_u_utils_context; typedef struct knl_u_security_context { @@ -1398,6 +1401,7 @@ typedef struct knl_u_xact_context { List *sendSeqSchmaName; List *sendSeqName; List *send_result; + Oid ActiveLobRelid; } knl_u_xact_context; typedef struct PLpgSQL_compile_context { @@ -1415,6 +1419,7 @@ typedef struct PLpgSQL_compile_context { struct PLpgSQL_datum** plpgsql_Datums; struct PLpgSQL_function* plpgsql_curr_compile; + bool* datum_need_free; /* need free datum when free function/package memory? */ bool plpgsql_DumpExecTree; bool plpgsql_pkg_DumpExecTree; @@ -1498,6 +1503,7 @@ typedef struct knl_u_plpgsql_context { /* pl_exec.cpp */ struct EState* simple_eval_estate; + struct ResourceOwnerData* shared_simple_eval_resowner; struct SimpleEcontextStackEntry* simple_econtext_stack; struct PLpgSQL_pkg_execstate* pkg_execstate; @@ -1544,6 +1550,10 @@ typedef struct knl_u_plpgsql_context { /* xact context still attached by SPI while transaction is terminated. */ void *spi_xact_context; + + /* store reseverd subxact resowner's scope temporay during finshing. */ + int64 minSubxactStackId; + int package_as_line; int package_first_line; int procedure_start_line; @@ -1555,9 +1565,27 @@ typedef struct knl_u_plpgsql_context { bool isCreateFunction; bool need_pkg_dependencies; List* pkg_dependencies; - struct SessionPackageRuntime* auto_parent_session_pkgs; - bool not_found_parent_session_pkgs; - char* sourceText; + List* func_tableof_index; + TupleDesc pass_func_tupdesc; /* save tupdesc for inner function pass out param to outter function's value */ + + int portal_depth; /* portal depth of current thread */ + + /* ---------- + * variables for autonomous procedure out ref cursor param and reference package variables + * ---------- + */ + struct SessionPackageRuntime* auto_parent_session_pkgs; /* package values from parent session */ + bool not_found_parent_session_pkgs; /* flag of found parent session package values? */ + List* storedPortals; /* returned portal date from auto session */ + List* portalContext; /* portal context from parent session */ + bool call_after_auto; /* call after a autonomous transaction procedure? */ + uint64 parent_session_id; + ThreadId parent_thread_id; + MemoryContext parent_context; /* parent_context from parent session */ + Oid ActiveLobToastOid; + struct ExceptionContext* cur_exception_cxt; + bool pragma_autonomous; /* save autonomous flag */ + char* debug_query_string; } knl_u_plpgsql_context; //this is used to define functions in package @@ -2084,6 +2112,14 @@ typedef struct knl_u_unique_sql_context { bool is_top_unique_sql; bool need_update_calls; + /* + * For example, the execute direct on statement enters the unique SQL hook + * for multiple times in the parse_analyze function. + * The sub-statements of the statement do not need to invoke the hook. + * Instead, the complete statement needs to generate the unique SQL ID. + */ + uint64 skipUniqueSQLCount; + /* sort and hash instrment states */ struct unique_sql_sorthash_instr* unique_sql_sort_instr; struct unique_sql_sorthash_instr* unique_sql_hash_instr; @@ -2170,7 +2206,7 @@ typedef struct knl_u_user_login_context { #define MAXINVALMSGS 32 typedef struct knl_u_inval_context { - int32 deepthInAcceptInvalidationMessage; + int32 DeepthInAcceptInvalidationMessage; struct TransInvalidationInfo* transInvalInfo; @@ -2192,7 +2228,9 @@ typedef struct knl_u_inval_context { int partcache_callback_count; - uint64 SharedInvalidMessageCounter; + uint64 SIMCounter; /* SharedInvalidMessageCounter, there are two counter, both on sess and thrd, + u_sess->inval_cxt.SIMCounter; + if (EnableLocalSysCache()) {t_thrd.lsc_cxt.lsc->inval_cxt.SIMCounter;} */ volatile sig_atomic_t catchupInterruptPending; @@ -2362,6 +2400,7 @@ typedef struct knl_u_pgxc_context { /* Current size of dn_handles and co_handles */ int NumDataNodes; int NumCoords; + int NumTotalDataNodes; /* includes all DN in disaster cluster*/ int NumStandbyDataNodes; /* Number of connections held */ @@ -2375,6 +2414,9 @@ typedef struct knl_u_pgxc_context { Oid primary_data_node; int num_preferred_data_nodes; Oid preferred_data_node[MAX_PREFERRED_NODES]; + int* disasterReadArray; /* array to save dn node index for disaster read */ + bool DisasterReadArrayInit; + /* * Datanode handles saved in session memory context * when PostgresMain is launched. @@ -2388,6 +2430,9 @@ typedef struct knl_u_pgxc_context { */ struct pgxc_node_handle* co_handles; + uint32 num_skipnodes; + void* skipnodes; + struct RemoteXactState* remoteXactState; int PGXCNodeId; @@ -2412,6 +2457,7 @@ typedef struct knl_u_pgxc_context { bool PoolerResendParams; struct PGXCNodeConnectionInfo* PoolerConnectionInfo; struct PoolAgent* poolHandle; + bool ConsistencyPointUpdating; List* connection_cache; List* connection_cache_handle; @@ -2543,16 +2589,11 @@ typedef struct sess_orient{ }sess_orient; struct SessionInfo; -typedef struct GlobalSessionId { - uint64 sessionId; /* Increasing sequence num */ - uint32 nodeId; /* the number of the send node */ - /* Used to identify the latest global sessionid during pooler reuse */ - uint64 seq; -} GlobalSessionId; -typedef struct knl_u_hook_context { - void *analyzerRoutineHook; -} knl_u_hook_context; +#define MAX_TRACE_ID_SIZE 33 +typedef struct knl_u_trace_context { + char trace_id[MAX_TRACE_ID_SIZE]; +} knl_u_trace_context; struct ReplicationState; struct ReplicationStateShmStruct; @@ -2571,15 +2612,33 @@ typedef struct knl_u_rep_origin_context { ReplicationStateShmStruct *repStatesShm; } knl_u_rep_origin_context; +/* Record start time and end time in session initialize for client connection */ +typedef struct knl_u_clientConnTime_context { + instr_time connStartTime; + instr_time connEndTime; + + /* Flag to indicate that this session is in initial process(client connect) or not */ + bool checkOnlyInConnProcess; +} knl_u_clientConnTime_context; + +typedef struct knl_u_hook_context { + void *analyzerRoutineHook; +} knl_u_hook_context; + typedef struct knl_session_context { volatile knl_session_status status; + /* used for threadworker, elem in m_readySessionList */ Dlelem elem; + /* used for threadworker && gsc, elems in m_session_bucket + * this variable is used for syscache hit */ + Dlelem elem2; ThreadId attachPid; MemoryContext top_mem_cxt; MemoryContext cache_mem_cxt; MemoryContext top_transaction_mem_cxt; + MemoryContext dbesql_mem_cxt; MemoryContext self_mem_cxt; MemoryContext top_portal_cxt; MemoryContext probackup_context; @@ -2589,6 +2648,7 @@ typedef struct knl_session_context { int session_ctr_index; uint64 session_id; GlobalSessionId globalSessionId; + knl_u_trace_context trace_cxt; uint64 debug_query_id; List* ts_cached_queryid; @@ -2685,8 +2745,16 @@ typedef struct knl_session_context { instr_time last_access_time; - knl_u_hook_context hook_cxt; knl_u_rep_origin_context reporigin_cxt; + + /* + * Initialize context which records time for client connection establish. + * This time records start on incommining resuest arrives e.g. poll() invoked to accept() and + * end on returning message by server side clientfd. + */ + struct knl_u_clientConnTime_context clientConnTime_cxt; + + knl_u_hook_context hook_cxt; } knl_session_context; enum stp_xact_err_type { @@ -2697,10 +2765,13 @@ enum stp_xact_err_type { STP_XACT_AFTER_TRIGGER_BEGIN, STP_XACT_PACKAGE_INSTANTIATION, STP_XACT_IN_TRIGGER, - STP_XACT_COMPL_SQL + STP_XACT_IMMUTABLE, + STP_XACT_COMPL_SQL, + STP_XACT_TOO_MANY_PORTAL }; - +extern void knl_u_inval_init(knl_u_inval_context* inval_cxt); +extern void knl_u_relmap_init(knl_u_relmap_context* relmap_cxt); extern void knl_session_init(knl_session_context* sess_cxt); extern void knl_u_executor_init(knl_u_executor_context* exec_cxt); extern knl_session_context* create_session_context(MemoryContext parent, uint64 id); @@ -2743,7 +2814,6 @@ inline void stp_reset_opt_values() u_sess->SPI_cxt.is_proconfig_set = false; u_sess->SPI_cxt.portal_stp_exception_counter = 0; u_sess->plsql_cxt.stp_savepoint_cnt = 0; - u_sess->plsql_cxt.nextStackEntryId = 0; } inline void stp_reset_xact_state_and_err_msg(bool savedisAllowCommitRollback, bool needResetErrMsg) diff --git a/src/include/knl/knl_thread.h b/src/include/knl/knl_thread.h index 0b956ed61..58e5847c2 100644 --- a/src/include/knl/knl_thread.h +++ b/src/include/knl/knl_thread.h @@ -1,8 +1,8 @@ /* + * Portions Copyright (c) 2021, openGauss Contributors * Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd. * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * * --------------------------------------------------------------------------------------- * @@ -53,6 +53,7 @@ #include "knl/knl_session.h" #include "nodes/pg_list.h" #include "storage/lock/s_lock.h" +#include "utils/knl_localsysdbcache.h" #include "utils/palloc.h" #include "storage/latch.h" #include "portability/instr_time.h" @@ -74,9 +75,11 @@ #include "port/pg_crc32c.h" #define MAX_PATH_LEN 1024 -#define RESERVE_SIZE 49 +#define RESERVE_SIZE 52 +#define PARTKEY_VALUE_MAXNUM 64 typedef struct ResourceOwnerData* ResourceOwner; +typedef struct logicalLog logicalLog; typedef struct knl_t_codegen_context { void* thr_codegen_obj; @@ -164,6 +167,60 @@ typedef struct xllist { uint32 total_len; /* total data bytes in chain */ } xllist; +typedef struct { + volatile uint64 totalDuration; + volatile uint64 counter; + volatile uint64 startTime; +}RedoTimeCost; + +typedef enum { + TIME_COST_STEP_1 = 0, + TIME_COST_STEP_2, + TIME_COST_STEP_3, + TIME_COST_STEP_4, + TIME_COST_STEP_5, + TIME_COST_STEP_6, + TIME_COST_STEP_7, + TIME_COST_STEP_8, + TIME_COST_STEP_9, + TIME_COST_NUM, +} TimeCostPosition; + +/* +for extreme rto +thread step1 step2 step3 step4 + step5 step6 step7 step8 +redo batch get a record redo record(total) update stanbystate parse xlog + dispatch to redo manager null null null +redo manager get a record redo record(total) proc page xlog redo ddl + dispatch to redo worker null null null +redo worker get a record redo record(total) redo page xlog(total) read xlog page + redo page xlog redi other xlog fsm update full sync wait +trxn mamanger get a record redo record(total) update flush lsn wait sync work + dispatch to trxn worker global lsn update null null +trxn worker get a record redo record(total) redo xlog update thread lsn + full sync wait null null null +read worker get xlog page(total) read xlog page change segment null + null null null null +read page worker get a record make lsn forwarder get new item put to dispatch thread + update thread lsn crc check null null +startup get a record check stop delay redo dispatch(total) + decode null null null + +for parallel redo +thread step1 step2 step3 step4 step5 + step6 step7 step8 step9 +page redo get a record redo record(total) update stanbystate redo undo log redo share trxn log + redo sync trxn log redo single log redo all workers log redo multi workers log +startup read a record check stop delay redo dispatch(total) trxn apply + force apply wait null null null +*/ + +typedef struct RedoWorkerTimeCountsInfo { + char *worker_name; + RedoTimeCost *time_cost; +}RedoWorkerTimeCountsInfo; + typedef struct knl_t_xact_context { /* var in transam.cpp */ typedef uint64 CommitSeqNo; @@ -295,12 +352,6 @@ typedef struct knl_t_xact_context { bool XactPrepareSent; bool AlterCoordinatorStmt; - /* white-box check TransactionID, when there is no 2pc - * the thread local variable save the executor cn commit(abort) xid - * compare with the remote-commit xid in other CNs and DNs - */ - TransactionId XactXidStoreForCheck; - TransactionId reserved_nextxid_check; /* * Some commands want to force synchronous commit. */ @@ -311,7 +362,6 @@ typedef struct knl_t_xact_context { * when we've run out of memory. */ MemoryContext TransactionAbortContext; - struct GTMCallbackItem* GTM_callbacks; struct GTMCallbackItem* Seq_callbacks; LocalTransactionId lxid; @@ -355,10 +405,16 @@ typedef struct knl_t_xact_context { int PGXCGroupOid; int PGXCNodeId; bool inheritFileNode; + bool enable_lock_cancel; #endif + TransactionId XactXidStoreForCheck; + Oid ActiveLobRelid; + bool isSelectInto; } knl_t_xact_context; +typedef struct RepairBlockKey RepairBlockKey; typedef void (*RedoInterruptCallBackFunc)(void); +typedef void (*RedoPageRepairCallBackFunc)(RepairBlockKey key, XLogPhyBlock pblk); typedef struct knl_t_xlog_context { #define MAXFNAMELEN 64 @@ -585,9 +641,6 @@ typedef struct knl_t_xlog_context { * record from and failed. */ unsigned int failedSources; /* OR of XLOG_FROM_* codes */ - - bool readfrombuffer; - /* * These variables track when we last obtained some WAL data to process, * and where we got it from. (XLogReceiptSource is initially the same as @@ -694,7 +747,6 @@ typedef struct knl_t_xlog_context { MemoryContext gist_opCtx; MemoryContext gin_opCtx; - bool redo_oldversion_xlog; /* * Statistics for current checkpoint are collected in this global struct. * Because only the checkpointer or a stand-alone backend can perform @@ -712,17 +764,18 @@ typedef struct knl_t_xlog_context { XLogRecPtr max_page_flush_lsn; bool permit_finish_redo; -#ifndef ENABLE_MULTIPLE_NODES /* redo RM_STANDBY_ID record committing csn's transaction id */ List* committing_csn_list; -#endif + RedoInterruptCallBackFunc redoInterruptCallBackFunc; + RedoPageRepairCallBackFunc redoPageRepairCallBackFunc; void *xlog_atomic_op; /* Record current xlog lsn to avoid pass parameter to underlying functions level-to-level */ XLogRecPtr current_redo_xlog_lsn; /* for switchover failed when load xlog record invalid retry count */ int currentRetryTimes; + RedoTimeCost timeCost[TIME_COST_NUM]; } knl_t_xlog_context; typedef struct knl_t_dfs_context { @@ -1161,9 +1214,6 @@ typedef struct knl_t_format_context { typedef struct knl_t_audit_context { bool Audit_delete; - /* - + * Flags set by interrupt handlers for later service in the main loop. - + */ /* for only sessionid needed by SDBSS */ TimestampTz user_login_time; volatile sig_atomic_t need_exit; @@ -1180,6 +1230,8 @@ typedef struct knl_t_audit_context { time_t last_pgaudit_start_time; struct AuditIndexTable* audit_indextbl; char pgaudit_filepath[MAXPGPATH]; + + int cur_thread_idx; #define NBUFFER_LISTS 256 List* buffer_lists[NBUFFER_LISTS]; } knl_stat_context; @@ -1228,6 +1280,7 @@ typedef struct knl_t_arch_context { XLogRecPtr pitr_task_last_lsn; TimestampTz arch_start_timestamp; + XLogRecPtr arch_start_lsn; /* millsecond */ int task_wait_interval; int sync_walsender_idx; @@ -1250,6 +1303,7 @@ typedef struct knl_t_barrier_arch_context { volatile sig_atomic_t wakened; char* slot_name; char barrierName[MAX_BARRIER_ID_LENGTH]; + XLogRecPtr lastArchiveLoc; }knl_t_barrier_arch_context; @@ -1512,12 +1566,21 @@ typedef struct knl_t_bgwriter_context { typedef struct knl_t_pagewriter_context { volatile sig_atomic_t got_SIGHUP; volatile sig_atomic_t shutdown_requested; + volatile sig_atomic_t sync_requested; int page_writer_after; int pagewriter_id; uint64 next_flush_time; uint64 next_scan_time; } knl_t_pagewriter_context; +typedef struct knl_t_pagerepair_context { + volatile sig_atomic_t got_SIGHUP; + volatile sig_atomic_t shutdown_requested; + volatile sig_atomic_t page_repair_requested; + volatile sig_atomic_t file_repair_requested; +} knl_t_pagerepair_context; + + typedef struct knl_t_sharestoragexlogcopyer_context_ { volatile sig_atomic_t got_SIGHUP; volatile sig_atomic_t shutdown_requested; @@ -1719,14 +1782,12 @@ typedef struct knl_t_utils_context { */ int ContextUsedCount; struct PartitionIdentifier* partId; -#define RANGE_PARTKEYMAXNUM 4 - struct Const* valueItemArr[RANGE_PARTKEYMAXNUM]; + struct Const* valueItemArr[PARTKEY_VALUE_MAXNUM + 1]; struct ResourceOwnerData* TopResourceOwner; struct ResourceOwnerData* CurrentResourceOwner; struct ResourceOwnerData* STPSavedResourceOwner; struct ResourceOwnerData* CurTransactionResourceOwner; struct ResourceOwnerData* TopTransactionResourceOwner; - struct ResourceReleaseCallbackItem* ResourceRelease_callbacks; bool SortColumnOptimize; struct RelationData* pRelatedRel; @@ -1786,12 +1847,14 @@ typedef struct knl_t_pgxc_context { int* shmemNumCoordsInCluster; int* shmemNumDataNodes; int* shmemNumDataStandbyNodes; + int* shmemNumSkipNodes; /* Shared memory tables of node definitions */ struct NodeDefinition* coDefs; struct NodeDefinition* coDefsInCluster; struct NodeDefinition* dnDefs; struct NodeDefinition* dnStandbyDefs; + struct SkipNodeDefinition* skipNodes; /* pgxcnode.cpp */ struct PGXCNodeNetCtlLayer* pgxc_net_ctl; @@ -1850,6 +1913,7 @@ typedef struct { volatile sig_atomic_t shutdown_requested; volatile sig_atomic_t got_SIGHUP; volatile sig_atomic_t sleep_long; + volatile sig_atomic_t check_repair; } knl_t_page_redo_context; typedef struct knl_t_startup_context { @@ -1868,6 +1932,7 @@ typedef struct knl_t_startup_context { * that it's safe to just proc_exit. */ volatile sig_atomic_t in_restore_command; + volatile sig_atomic_t check_repair; struct notifysignaldata* NotifySigState; } knl_t_startup_context; @@ -2081,6 +2146,7 @@ typedef struct knl_t_libwalreceiver_context { char* recvBuf; char* shared_storage_buf; char* shared_storage_read_buf; + char* decompressBuf; XLogReaderState* xlogreader; LibpqrcvConnectParam connect_param; } knl_t_libwalreceiver_context; @@ -2302,28 +2368,39 @@ typedef struct knl_t_walsender_context { /* for config_file */ char gucconf_file[MAXPGPATH]; char gucconf_lock_file[MAXPGPATH]; + char slotname[NAMEDATALEN]; /* the dummy data reader fd for the wal streaming */ FILE* ws_dummy_data_read_file_fd; uint32 ws_dummy_data_read_file_num; /* Missing CU checking stuff */ struct cbmarray* CheckCUArray; struct LogicalDecodingContext* logical_decoding_ctx; + struct ParallelLogicalDecodingContext* parallel_logical_decoding_ctx; XLogRecPtr logical_startptr; int remotePort; /* Have we caught up with primary? */ bool walSndCaughtUp; + int LogicalSlot; /* Notify primary to advance logical replication slot. */ struct pg_conn* advancePrimaryConn; /* Timestamp of the last check-timeout time in WalSndCheckTimeOut. */ TimestampTz last_check_timeout_timestamp; + + /* Read data from WAL into xlogReadBuf, then compress it to compressBuf */ + char *xlogReadBuf; + char *compressBuf; + /* flag set in WalSndCheckTimeout */ bool isWalSndSendTimeoutMessage; int datafd; int ep_fd; + logicalLog* restoreLogicalLogHead; /* is obs backup, in this mode, skip backup replication slot */ bool is_obsmode; + bool standbyConnection; + bool cancelLogCtl; } knl_t_walsender_context; typedef struct knl_t_walreceiverfuncs_context { @@ -2362,9 +2439,31 @@ typedef struct knl_t_logical_context { uint64 sendSegNo; uint32 sendOff; bool ExportInProgress; + bool IsAreaDecode; ResourceOwner SavedResourceOwnerDuringExport; } knl_t_logical_context; +extern struct ParallelDecodeWorker** parallelDecodeWorker; + +typedef struct knl_t_parallel_decode_worker_context { + volatile sig_atomic_t shutdown_requested; + volatile sig_atomic_t got_SIGHUP; + volatile sig_atomic_t sleep_long; + int slotId; + int parallelDecodeId; +} knl_t_parallel_decode_worker_context; + +typedef struct knl_t_logical_read_worker_context { + volatile sig_atomic_t shutdown_requested; + volatile sig_atomic_t got_SIGHUP; + volatile sig_atomic_t sleep_long; + volatile sig_atomic_t got_SIGTERM; + MemoryContext ReadWorkerCxt; + ParallelDecodeWorker** parallelDecodeWorkers; + int slotId; + int totalWorkerCount; +} knl_t_logical_read_worker_context; + typedef struct knl_t_dataqueue_context { struct DataQueueData* DataSenderQueue; struct DataQueueData* DataWriterQueue; @@ -2594,6 +2693,7 @@ typedef struct knl_t_storage_context { char* pageCopy; char* segPageCopy; + bool isSwitchoverLockHolder; int num_held_lwlocks; struct LWLockHandle* held_lwlocks; int lock_addin_request; @@ -2624,6 +2724,9 @@ typedef struct knl_t_storage_context { struct HTAB* DataFileIdCache; /* Thread shared Seg Spc cache */ struct HTAB* SegSpcCache; + + struct HTAB* uidHashCache; + struct HTAB* DisasterCache; /* * Maximum number of file descriptors to open for either VFD entries or * AllocateFile/AllocateDir/OpenTransientFile operations. This is initialized @@ -2638,6 +2741,8 @@ typedef struct knl_t_storage_context { /* reserve `1000' for thread-private file id */ int max_userdatafiles; + int timeoutRemoteOpera; + } knl_t_storage_context; typedef struct knl_t_port_context { @@ -2659,6 +2764,15 @@ typedef struct knl_t_tsearch_context { int ntres; } knl_t_tsearch_context; +typedef enum { + NO_CHANGE = 0, + OLD_REPL_CHANGE_IP_OR_PORT, + ADD_REPL_CONN_INFO_WITH_OLD_LOCAL_IP_PORT, + ADD_REPL_CONN_INFO_WITH_NEW_LOCAL_IP_PORT, + ADD_DISASTER_RECOVERY_INFO +} ReplConnInfoChangeType; + + typedef struct knl_t_postmaster_context { /* Notice: the value is same sa GUC_MAX_REPLNODE_NUM */ #define MAX_REPLNODE_NUM 9 @@ -2682,7 +2796,7 @@ typedef struct knl_t_postmaster_context { * or standby on secondary. */ struct replconninfo* ReplConnArray[DOUBLE_MAX_REPLNODE_NUM + 1]; - bool ReplConnChanged[DOUBLE_MAX_REPLNODE_NUM + 1]; + int ReplConnChangeType[DOUBLE_MAX_REPLNODE_NUM + 1]; struct replconninfo* CrossClusterReplConnArray[MAX_REPLNODE_NUM]; bool CrossClusterReplConnChanged[MAX_REPLNODE_NUM]; struct hashmemdata* HaShmData; @@ -2764,6 +2878,7 @@ typedef struct knl_t_buf_context { char show_tcp_keepalives_count_buf[16]; char show_unix_socket_permissions_buf[8]; } knl_t_buf_context; +#define SQL_STATE_BUF_LEN 12 typedef struct knl_t_bootstrap_context { #define MAXATTR 40 @@ -2913,6 +3028,11 @@ typedef struct knl_t_bgworker_context { uint64 bgworkerId; } knl_t_bgworker_context; +typedef struct knl_t_index_advisor_context { + List* stmt_table_list; + List* stmt_target_list; +} +knl_t_index_advisor_context; #ifdef ENABLE_MOT /* MOT thread attributes */ @@ -2964,8 +3084,18 @@ typedef struct knl_t_mot_context { typedef struct knl_t_barrier_creator_context { volatile sig_atomic_t got_SIGHUP; volatile sig_atomic_t shutdown_requested; + bool is_first_barrier; + struct BarrierUpdateLastTimeInfo* barrier_update_last_time_info; + List* archive_slot_names; + uint64 first_cn_timeline; } knl_t_barrier_creator_context; +typedef struct knl_t_barrier_preparse_context { + volatile sig_atomic_t got_SIGHUP; + volatile sig_atomic_t shutdown_requested; +} knl_t_barrier_preparse_context; + + // the length of t_thrd.proxy_cxt.identifier #define IDENTIFIER_LENGTH 64 typedef struct knl_t_proxy_context { @@ -2975,12 +3105,13 @@ typedef struct knl_t_proxy_context { #define DCF_MAX_NODES 10 /* For log ctrl. Willing let standby flush and apply log under RTO seconds */ typedef struct DCFLogCtrlData { + int64 prev_sleep_time; int64 sleep_time; int64 balance_sleep_time; int64 prev_RTO; int64 current_RTO; uint64 sleep_count; - int64 sleep_count_limit; + uint64 sleep_count_limit; XLogRecPtr prev_flush; XLogRecPtr prev_apply; TimestampTz prev_reply_time; @@ -3064,6 +3195,12 @@ typedef struct knl_t_dcf_context { DcfContextInfo* dcfCtxInfo; } knl_t_dcf_context; +typedef struct knl_t_lsc_context { + LocalSysDBCache *lsc; + bool enable_lsc; + FetTupleFrom FetchTupleFromCatCList; +}knl_t_lsc_context; + /* replication apply launcher, for subscription */ typedef struct knl_t_apply_launcher_context { /* Flags set by signal handlers */ @@ -3111,6 +3248,8 @@ typedef struct knl_thrd_context { struct PGPROC* proc; struct PGXACT* pgxact; struct Backend* bn; + int child_slot; + bool is_inited; /* is new thread get new backend? */ // we need to have a fake session to do some initialize knl_session_context* fake_session; @@ -3118,6 +3257,7 @@ typedef struct knl_thrd_context { MemoryContext top_mem_cxt; MemoryContextGroup* mcxt_group; + knl_t_lsc_context lsc_cxt; /* variables to support comm proxy */ CommSocketOption comm_sock_option; @@ -3136,7 +3276,9 @@ typedef struct knl_thrd_context { knl_t_bgwriter_context bgwriter_cxt; knl_t_bootstrap_context bootstrap_cxt; knl_t_pagewriter_context pagewriter_cxt; + knl_t_pagerepair_context pagerepair_cxt; knl_t_sharestoragexlogcopyer_context sharestoragexlogcopyer_cxt; + knl_t_barrier_preparse_context barrier_preparse_cxt; knl_t_buf_context buf_cxt; knl_t_bulkload_context bulk_cxt; knl_t_cbm_context cbm_cxt; @@ -3203,6 +3345,8 @@ typedef struct knl_thrd_context { knl_t_percentile_context percentile_cxt; knl_t_perf_snap_context perf_snap_cxt; knl_t_page_redo_context page_redo_cxt; + knl_t_parallel_decode_worker_context parallel_decode_cxt; + knl_t_logical_read_worker_context logicalreadworker_cxt; knl_t_heartbeat_context heartbeat_cxt; knl_t_security_policy_context security_policy_cxt; knl_t_security_ledger_context security_ledger_cxt; @@ -3233,6 +3377,7 @@ typedef struct knl_thrd_context { knl_t_proxy_context proxy_cxt; knl_t_dcf_context dcf_cxt; knl_t_bgworker_context bgworker_cxt; + knl_t_index_advisor_context index_advisor_cxt; knl_t_apply_launcher_context applylauncher_cxt; knl_t_apply_worker_context applyworker_cxt; knl_t_publication_context publication_cxt; @@ -3242,6 +3387,7 @@ typedef struct knl_thrd_context { extern void knl_thread_mot_init(); #endif +extern void knl_t_syscache_init(); extern void knl_thread_init(knl_thread_role role); extern THR_LOCAL knl_thrd_context t_thrd; @@ -3262,5 +3408,7 @@ inline bool StreamTopConsumerAmI() RedoInterruptCallBackFunc RegisterRedoInterruptCallBack(RedoInterruptCallBackFunc func); void RedoInterruptCallBack(); +RedoPageRepairCallBackFunc RegisterRedoPageRepairCallBack(RedoPageRepairCallBackFunc func); +void RedoPageRepairCallBack(RepairBlockKey key, XLogPhyBlock pblk); #endif /* SRC_INCLUDE_KNL_KNL_THRD_H_ */ diff --git a/src/include/lib/dllist.h b/src/include/lib/dllist.h index 985b3ff65..0c9850442 100644 --- a/src/include/lib/dllist.h +++ b/src/include/lib/dllist.h @@ -62,10 +62,16 @@ class DllistWithLock : public BaseObject { public: DllistWithLock(); ~DllistWithLock(); - void Remove(Dlelem* e); + void Remove(Dlelem* e) + { + (void)RemoveConfirm(e); + } + bool RemoveConfirm(Dlelem* e); void AddHead(Dlelem* e); void AddTail(Dlelem* e); Dlelem* RemoveHead(); + Dlelem* RemoveHeadNoLock(); + Dlelem* RemoveTail(); bool IsEmpty(); Dlelem* GetHead(); void GetLock(); diff --git a/src/include/libcomm/libcomm.h b/src/include/libcomm/libcomm.h index d9f12a4bb..15e082551 100644 --- a/src/include/libcomm/libcomm.h +++ b/src/include/libcomm/libcomm.h @@ -127,6 +127,7 @@ #define MAX_CN_NODE_NUM 1024 #define MAX_CN_DN_NODE_NUM (MAX_DN_NODE_NUM + MAX_CN_NODE_NUM) //(MaxCoords+MaxDataNodes) #define MIN_CN_DN_NODE_NUM (1 + 1) //(1 CN + 1 DN) +#define DOUBLE_NAMEDATALEN 128 #define SEC_TO_MICRO_SEC 1000 diff --git a/src/include/libpq/auth.h b/src/include/libpq/auth.h index 876a43af8..a4da81ab9 100644 --- a/src/include/libpq/auth.h +++ b/src/include/libpq/auth.h @@ -15,20 +15,22 @@ #define AUTH_H #include "libpq/libpq-be.h" +#ifdef ENABLE_GSS #include "gssapi/gssapi.h" #include "gssapi/gssapi_krb5.h" - +#endif #define INITIAL_USER_ID 10 #define POSTFIX_LENGTH 8 /* The struct for gss kerberos authentication. */ typedef struct GssConn { int sock; - +#ifdef ENABLE_GSS gss_ctx_id_t gctx; /* GSS context */ gss_name_t gtarg_nam; /* GSS target name */ gss_buffer_desc ginbuf; /* GSS input token */ gss_buffer_desc goutbuf; /* GSS output token */ +#endif } GssConn; extern char* pg_krb_server_hostname; diff --git a/src/include/libpq/cl_state.h b/src/include/libpq/cl_state.h index 8af0d3a62..a44ad035a 100644 --- a/src/include/libpq/cl_state.h +++ b/src/include/libpq/cl_state.h @@ -37,6 +37,7 @@ #include "client_logic_cache/icached_column_manager.h" #include "client_logic_common/client_logic_utils.h" #include "client_logic_data_fetcher/data_fetcher_manager.h" + #include "client_logic/client_logic_enums.h" typedef struct pg_conn PGconn; @@ -83,6 +84,7 @@ public: PreparedStatementsList *pendingStatements; char lastStmtName[NAMEDATALEN]; + ObjectFqdn *droppedSchemas; size_t droppedSchemas_size; size_t droppedSchemas_allocated; @@ -92,7 +94,6 @@ public: size_t droppedColumnSettings_allocated; ExecStatusType m_lastResultStatus; bool isInvalidOperationOnColumn; - bool should_refresh_function; bool isDuringRefreshCacheOnError; bool is_external_err; CacheRefreshType cacheRefreshType; @@ -102,8 +103,8 @@ public: GucParams gucParams; GucParams tmpGucParams; updateGucValues val_to_update; - RawValuesList *rawValuesForReplace; - RawValuesList raw_values_for_post_query; + RawValuesList *rawValuesForReplace; /* helper list for replacing raw values in query string on text mode */ + RawValuesList raw_values_for_post_query; /* list of all raw values in the query for replacing in error response */ ICachedColumnManager* m_cached_column_manager; char **called_functions_list; size_t called_functions_list_size; diff --git a/src/include/libpq/libpq-fe.h b/src/include/libpq/libpq-fe.h index 384cd9f30..428c20dcb 100644 --- a/src/include/libpq/libpq-fe.h +++ b/src/include/libpq/libpq-fe.h @@ -95,7 +95,11 @@ typedef enum { * backend startup. */ CONNECTION_SETENV, /* Negotiating environment. */ CONNECTION_SSL_STARTUP, /* Negotiating SSL. */ - CONNECTION_NEEDED /* Internal state: connect() needed */ + CONNECTION_NEEDED, /* Internal state: connect() needed */ + CONNECTION_CHECK_WRITABLE, /* Checking if session is read-write. */ + CONNECTION_CONSUME, /* Consuming any extra messages. */ + CONNECTION_CHECK_TARGET, /* Checking target server properties. */ + CONNECTION_CHECK_STANDBY /* Checking if server is in standby mode. */ } ConnStatusType; typedef enum { diff --git a/src/include/libpq/libpq-int.h b/src/include/libpq/libpq-int.h index 970c71ab1..58ef9cc5e 100644 --- a/src/include/libpq/libpq-int.h +++ b/src/include/libpq/libpq-int.h @@ -287,6 +287,51 @@ typedef struct pgDataValue { const char* value; /* data value, without zero-termination */ } PGdataValue; +typedef enum pg_conn_host_type +{ + CHT_HOST_NAME, + CHT_HOST_ADDRESS, + CHT_UNIX_SOCKET +} pg_conn_host_type; + +/* + * pg_conn_host stores all information about one of possibly several hosts + * mentioned in the connection string. Derived by splitting the pghost + * on the comma character and then parsing each segment. + */ +typedef struct pg_conn_host +{ + pg_conn_host_type type; /* type of host */ + char *hostaddr; /* host numeric IP address */ + char* host; /* host name or address, or socket path */ + char* port; /* port number for this host; if not NULL, + * overrrides the PGConn's pgport */ + char* password; /* password for this host, read from the + * password file. only set if the PGconn's + * pgpass field is NULL. */ +} pg_conn_host; + +/* Target server type (decoded value of target_session_attrs) */ +typedef enum +{ + SERVER_TYPE_ANY = 0, /* Any server (default) */ + SERVER_TYPE_READ_WRITE, /* Read-write server */ + SERVER_TYPE_READ_ONLY, /* Read-only server */ + SERVER_TYPE_PRIMARY, /* Primary server */ + SERVER_TYPE_STANDBY, /* Standby server */ + SERVER_TYPE_PREFER_STANDBY, /* Prefer standby server */ + SERVER_TYPE_PREFER_STANDBY_PASS2 /* second pass - behaves same as ANY */ +} PGTargetServerType; + +/* Boolean value plus a not-known state, for GUCs we might have to fetch */ +typedef enum +{ + PG_BOOL_UNKNOWN = 0, /* Currently unknown */ + PG_BOOL_YES, /* Yes (true) */ + PG_BOOL_NO /* No (false) */ +} PGTernaryBool; + + /* * PGconn stores all the state data associated with a single connection * to a backend. @@ -335,6 +380,12 @@ struct pg_conn { char* krbsrvname; /* Kerberos service name */ #endif + char* target_session_attrs; /* Type of connection to make + * Possible values any, read-write. */ + PGTargetServerType target_server_type; /* desired session properties */ + PGTernaryBool default_transaction_read_only; /* default_transaction_read_only */ + PGTernaryBool in_hot_standby; /* in_hot_standby */ + /* Optional file to write trace info to */ FILE* Pfdebug; @@ -363,6 +414,11 @@ struct pg_conn { PGnotify* notifyHead; /* oldest unreported Notify msg */ PGnotify* notifyTail; /* newest unreported Notify msg */ + /* Support for multiple hosts in connection string */ + int nconnhost; /* # of possible hosts */ + int whichhost; /* host we're currently considering */ + pg_conn_host *connhost; /* details about each possible host */ + /* Connection data */ int sock; /* Unix FD for socket, -1 if not connected */ SockAddr laddr; /* Local address */ diff --git a/src/include/libpq/pqcomm.h b/src/include/libpq/pqcomm.h index caae193a7..b1e2ee250 100644 --- a/src/include/libpq/pqcomm.h +++ b/src/include/libpq/pqcomm.h @@ -144,6 +144,9 @@ extern const unsigned short protoVersionList[][2]; #define PG_PROTOCOL_LATEST PG_PROTOCOL(3, 51) #define PG_PROTOCOL_GAUSS_BASE 50 +/* Buffer length of error message for connection fail information */ +#define INITIAL_EXPBUFFER_SIZE 256 + typedef uint32 ProtocolVersion; /* FE/BE protocol version number */ typedef ProtocolVersion MsgType; @@ -199,8 +202,11 @@ typedef struct StartupPacket { #define AUTH_REQ_SHA256 10 /* sha256 password */ #define AUTH_REQ_MD5_SHA256 11 /* md5_auth_sha256_stored password */ -#define AUTH_REQ_IAM 12 /* iam token authenication */ +#ifdef ENABLE_LITE_MODE +#define AUTH_REQ_SHA256_RFC 12 /* sha256 auth for RFC5802 */ +#endif #define AUTH_REQ_SM3 13 /* sm3 password */ +#define AUTH_REQ_IAM 14 /* iam token authenication */ typedef uint32 AuthRequest; diff --git a/src/include/libpq/pqformat.h b/src/include/libpq/pqformat.h index ea50ef50a..0619b74cf 100644 --- a/src/include/libpq/pqformat.h +++ b/src/include/libpq/pqformat.h @@ -5,8 +5,8 @@ * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * + * Portions Copyright (c) 2021, openGauss Contributors * src/include/libpq/pqformat.h * * ------------------------------------------------------------------------- @@ -211,7 +211,10 @@ static inline void pq_sendint16(StringInfo buf, uint16 i) } /* append a binary [u]int32 to a StringInfo buffer */ -static inline void pq_sendint32(StringInfo buf, uint32 i) +#ifndef ENABLE_UT +static +#endif + inline void pq_sendint32(StringInfo buf, uint32 i) { enlargeStringInfo(buf, sizeof(uint32)); pq_writeint32(buf, i); diff --git a/src/include/libpq/sha2.h b/src/include/libpq/sha2.h index 95eb6ace0..403e4a9d7 100644 --- a/src/include/libpq/sha2.h +++ b/src/include/libpq/sha2.h @@ -51,6 +51,9 @@ /*** SHA-256 Context Structures *******************************/ #define K_LENGTH 32 #define ITERATION_COUNT 10000 +#ifdef ENABLE_LITE_MODE +#define ITERATION_COUNT_V1 2048 +#endif #define CLIENT_STRING_LENGTH 11 #define SEVER_STRING_LENGTH 10 #define SEVER_STRING_LENGTH_SM3 11 @@ -99,7 +102,10 @@ #define SM3_PASSWORD 3 #define ERROR_PASSWORD 4 #define BAD_MEM_ADDR 5 -#define COMBINED_PASSWORD 6 +#ifdef ENABLE_LITE_MODE +#define SHA256_PASSWORD_RFC 6 +#endif +#define COMBINED_PASSWORD 7 typedef struct _SHA256_CTX2 { uint32 state[8]; @@ -114,6 +120,9 @@ void SHA256_Final2(uint8[SHA256_DIGEST_LENGTH], SHA256_CTX2*); /* Use the old iteration ITERATION_COUNT as the default iteraion count. */ extern bool pg_sha256_encrypt(const char* passwd, const char* salt_s, size_t salt_len, char* buf, char* client_key_buf, int iteration_count = ITERATION_COUNT); +#ifdef ENABLE_LITE_MODE +extern bool pg_sha256_encrypt_v1(const char* passwd, const char* salt_s, size_t salt_len, char* buf, char* client_key_buf); +#endif extern int XOR_between_password(const char* password1, const char* password2, char* r, int length); extern void sha_hex_to_bytes32(char* s, const char b[64]); extern void sha_hex_to_bytes4(char* s, const char b[8]); diff --git a/src/include/lite/memory_lite.h b/src/include/lite/memory_lite.h new file mode 100644 index 000000000..430b709d2 --- /dev/null +++ b/src/include/lite/memory_lite.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * memory_lite.h + * + * + * + * IDENTIFICATION + * src/include/lite/memory_lite.h + * + * --------------------------------------------------------------------------------------- + */ + + +#ifndef MEMORY_LITE_H +#define MEMORY_LITE_H + +#ifdef ENABLE_LITE_MODE +#define PARTITION_OID_INDEX_ID_NBUCKETS 512 +#define PARTITION_PART_OID_INDEX_ID_NBUCKETS 512 +#define PARTITION_INDEX_TABLE_ID_PARENT_OID_INDEX_ID_NBUCKETS 512 +#define PG_JOB_ID_INDEX_ID_NBUCKETS 4 +#define PG_OBJECT_INDEX_NBUCKETS 4 +#define PGXC_CLASS_PGXC_REL_ID_INDEX_ID_NBUCKETS 4 +#define PGXC_GROUP_GROUP_NAME_INDEX_ID_NBUCKETS 4 +#define PGXC_GROUP_OID_INDEX_ID_NBUCKETS 4 +#define PGXC_NODE_NODE_NAME_INDEX_ID_NBUCKETS 4 +#define PGXC_NODE_OID_INDEX_ID_NBUCKETS 4 +#define PGXC_NODE_NODE_ID_INDEX_ID_NBUCKETS 4 +#define WORKLOAD_GROUP_GROUP_NAME_INDEX_ID_NBUCKETS 4 +#define WORKLOAD_GROUP_OID_INDEX_ID_NBUCKETS 4 +#define PGXC_SLICE_INDEX_ID_NBUCKETS 4 +#define STATISTIC_RELID_KIND_ATTNUM_INH_INDEX_ID_NBUCKETS 4 +#define STATISTIC_EXT_RELID_KIND_INH_KEY_INDEX_ID_NBUCKETS 4 +#define PACKAGE_OID_INDEX_ID_NBUCKETS 4 +#define PACKAGE_NAME_INDEX_ID_NBUCKETS 4 +#define STREAMING_CONT_QUERY_DEFRELID_INDEX_ID_NBUCKETS 4 +#define STREAMING_CONT_QUERY_ID_INDEX_ID_NBUCKETS 4 +#define STREAMING_CONT_QUERY_LOOKUP_ID_XID_INDEX_ID_NBUCKETS 4 +#define STREAMING_CONT_QUERY_MATRELID_INDEX_ID_NBUCKETS 4 +#define STREAMING_CONT_QUERY_OID_INDEX_ID_NBUCKETS 4 +#define STREAMING_CONT_QUERY_RELID_INDEX_ID_NBUCKETS 4 +#define STREAMING_CONT_QUERY_SCHEMA_CHANGE_INDEX_ID_NBUCKETS 4 +#define STREAMING_STREAM_OID_INDEX_ID_NBUCKETS 4 +#define STREAMING_STREAM_RELID_INDEX_ID_NBUCKETS 4 +#define STREAMING_REAPER_STATUS_OID_INDEX_ID_NBUCKETS 4 +#define STREAMING_CQ_REAPER_STATUS_OID_INDEX_ID_NBUCKETS 4 + +#else + +#define PARTITION_OID_INDEX_ID_NBUCKETS 1024 +#define PARTITION_PART_OID_INDEX_ID_NBUCKETS 1024 +#define PARTITION_INDEX_TABLE_ID_PARENT_OID_INDEX_ID_NBUCKETS 1024 +#define PG_JOB_ID_INDEX_ID_NBUCKETS 2048 +#define PG_OBJECT_INDEX_NBUCKETS 2048 +#define PGXC_CLASS_PGXC_REL_ID_INDEX_ID_NBUCKETS 1024 +#define PGXC_GROUP_GROUP_NAME_INDEX_ID_NBUCKETS 256 +#define PGXC_GROUP_OID_INDEX_ID_NBUCKETS 256 +#define PGXC_NODE_NODE_NAME_INDEX_ID_NBUCKETS 256 +#define PGXC_NODE_OID_INDEX_ID_NBUCKETS 256 +#define PGXC_NODE_NODE_ID_INDEX_ID_NBUCKETS 256 +#define WORKLOAD_GROUP_GROUP_NAME_INDEX_ID_NBUCKETS 256 +#define WORKLOAD_GROUP_OID_INDEX_ID_NBUCKETS 256 +#define PGXC_SLICE_INDEX_ID_NBUCKETS 1024 +#define STATISTIC_RELID_KIND_ATTNUM_INH_INDEX_ID_NBUCKETS 1024 +#define STATISTIC_EXT_RELID_KIND_INH_KEY_INDEX_ID_NBUCKETS 1024 +#define PACKAGE_OID_INDEX_ID_NBUCKETS 2048 +#define PACKAGE_NAME_INDEX_ID_NBUCKETS 2048 +#define STREAMING_CONT_QUERY_DEFRELID_INDEX_ID_NBUCKETS 2048 +#define STREAMING_CONT_QUERY_ID_INDEX_ID_NBUCKETS 2048 +#define STREAMING_CONT_QUERY_LOOKUP_ID_XID_INDEX_ID_NBUCKETS 2048 +#define STREAMING_CONT_QUERY_MATRELID_INDEX_ID_NBUCKETS 2048 +#define STREAMING_CONT_QUERY_OID_INDEX_ID_NBUCKETS 2048 +#define STREAMING_CONT_QUERY_RELID_INDEX_ID_NBUCKETS 2048 +#define STREAMING_CONT_QUERY_SCHEMA_CHANGE_INDEX_ID_NBUCKETS 2048 +#define STREAMING_STREAM_OID_INDEX_ID_NBUCKETS 2048 +#define STREAMING_STREAM_RELID_INDEX_ID_NBUCKETS 2048 +#define STREAMING_REAPER_STATUS_OID_INDEX_ID_NBUCKETS 2048 +#define STREAMING_CQ_REAPER_STATUS_OID_INDEX_ID_NBUCKETS 2048 +#endif + + +#endif /* MEMORY_LITE_H */ diff --git a/src/include/mb/pg_wchar.h b/src/include/mb/pg_wchar.h index dafd44d21..206f1abf4 100644 --- a/src/include/mb/pg_wchar.h +++ b/src/include/mb/pg_wchar.h @@ -442,6 +442,7 @@ extern int pg_mic_mblen(const unsigned char* mbstr); extern int pg_mbstrlen(const char* mbstr); extern int pg_mbstrlen_with_len(const char* mbstr, int len); extern int pg_mbstrlen_with_len_eml(const char* mbstr, int len, int eml); +extern int pg_mbstrlen_with_len_toast(const char* mbstr, int* len); extern int pg_mbcliplen(const char* mbstr, int len, int limit); extern int pg_encoding_mbcliplen(int encoding, const char* mbstr, int len, int limit); extern int pg_mbcharcliplen(const char* mbstr, int len, int imit); diff --git a/src/include/memory_func.h b/src/include/memory_func.h new file mode 100644 index 000000000..aa9290c78 --- /dev/null +++ b/src/include/memory_func.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * IDENTIFICATION + * src/include/memory_func.h + * + * --------------------------------------------------------------------------------------- + */ +#ifndef MEMORY_FUNC_H +#define MEMORY_FUNC_H + +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "catalog/catalog.h" +#include "catalog/pg_authid.h" +#include "catalog/pg_tablespace.h" +#include "catalog/pg_type.h" +#include "funcapi.h" +#include "utils/builtins.h" +#include "utils/elog.h" +#include "utils/acl.h" + +#ifdef MEMORY_CONTEXT_TRACK +extern void GetAllocBlockInfo(AllocSet set, StringInfoData* buf); +extern void GetAsanBlockInfo(AsanSet set, StringInfoData* buf); +void gs_recursive_unshared_memory_context(const MemoryContext context, + const char* ctx_name, StringInfoData* buf); +#endif + +Datum gs_get_shared_memctx_detail(PG_FUNCTION_ARGS); +Datum gs_get_session_memctx_detail(PG_FUNCTION_ARGS); +Datum gs_get_thread_memctx_detail(PG_FUNCTION_ARGS); + +extern void check_stack_depth(void); + +#endif /* MEMORY_FUNC_H */ diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index 5a4a769e9..3a5c59a42 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -12,6 +12,7 @@ * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2021, openGauss Contributors * * src/include/miscadmin.h * @@ -39,6 +40,8 @@ extern const uint32 GRAND_VERSION_NUM; +extern const uint32 PREDPUSH_SAME_LEVEL_VERSION_NUM; +extern const uint32 UPSERT_WHERE_VERSION_NUM; extern const uint32 FUNC_PARAM_COL_VERSION_NUM; extern const uint32 SUBPARTITION_VERSION_NUM; extern const uint32 COMMENT_PROC_VERSION_NUM; @@ -62,12 +65,13 @@ extern const uint32 PRIVS_VERSION_NUM; extern const uint32 ML_OPT_MODEL_VERSION_NUM; extern const uint32 RANGE_LIST_DISTRIBUTION_VERSION_NUM; extern const uint32 FIX_SQL_ADD_RELATION_REF_COUNT; -extern const uint32 INPLACE_UPDATE_WERSION_NUM; +extern const uint32 INPLACE_UPDATE_VERSION_NUM; extern const uint32 GENERATED_COL_VERSION_NUM; extern const uint32 SEGMENT_PAGE_VERSION_NUM; extern const uint32 DECODE_ABORT_VERSION_NUM; extern const uint32 COPY_TRANSFORM_VERSION_NUM; extern const uint32 TDE_VERSION_NUM; +extern const uint32 PARALLEL_DECODE_VERSION_NUM; extern const uint32 V5R1C20_BACKEND_VERSION_NUM; extern const uint32 V5R2C00_START_VERSION_NUM; extern const uint32 V5R2C00_BACKEND_VERSION_NUM; @@ -75,13 +79,18 @@ extern const uint32 TWOPHASE_FILE_VERSION; extern const uint32 CLIENT_ENCRYPTION_PROC_VERSION_NUM; extern const uint32 PRIVS_DIRECTORY_VERSION_NUM; extern const uint32 COMMENT_RECORD_PARAM_VERSION_NUM; -extern const uint32 PUBLICATION_VERSION_NUM; extern const uint32 ENHANCED_TUPLE_LOCK_VERSION_NUM; +extern const uint32 HASUID_VERSION_NUM; +extern const uint32 CREATE_INDEX_CONCURRENTLY_DIST_VERSION_NUM; +extern const uint32 WAIT_N_TUPLE_LOCK_VERSION_NUM; +extern const uint32 DISASTER_READ_VERSION_NUM; +extern const uint32 SUPPORT_DATA_REPAIR; +extern const uint32 SCAN_BATCH_MODE_VERSION_NUM; +extern const uint32 PUBLICATION_VERSION_NUM; +extern const uint32 ANALYZER_HOOK_VERSION_NUM; extern void register_backend_version(uint32 backend_version); extern bool contain_backend_version(uint32 version_number); -extern const uint32 SUPPORT_HASH_XLOG_VERSION_NUM; -extern const uint32 ANALYZER_HOOK_VERSION_NUM; #define INPLACE_UPGRADE_PRECOMMIT_VERSION 1 @@ -101,7 +110,15 @@ extern const uint32 ANALYZER_HOOK_VERSION_NUM; #define OPT_SECURITY_DEFINER 8192 #define OPT_SKIP_GS_SOURCE 16384 #define OPT_PROC_OUTPARAM_OVERRIDE 32768 -#define OPT_MAX 16 +#define OPT_ALLOW_PROCEDURE_COMPILE_CHECK 65536 +#define OPT_IMPLICIT_FOR_LOOP_VARIABLE 131072 +#define OPT_AFORMAT_NULL_TEST 262144 +#define OPT_AFORMAT_REGEX_MATCH 524288 +#define OPT_ROWNUM_TYPE_COMPAT 1048576 +#define OPT_COMPAT_CURSOR 2097152 +#define OPT_CHAR_COERCE_COMPAT 4194304 +#define OPT_MAX 23 + #define DISPLAY_LEADING_ZERO (u_sess->utils_cxt.behavior_compat_flags & OPT_DISPLAY_LEADING_ZERO) #define END_MONTH_CALCULATE (u_sess->utils_cxt.behavior_compat_flags & OPT_END_MONTH_CALCULATE) @@ -122,6 +139,14 @@ extern const uint32 ANALYZER_HOOK_VERSION_NUM; #define PLSQL_SECURITY_DEFINER (u_sess->utils_cxt.behavior_compat_flags & OPT_SECURITY_DEFINER) #define SKIP_GS_SOURCE (u_sess->utils_cxt.behavior_compat_flags & OPT_SKIP_GS_SOURCE) #define PROC_OUTPARAM_OVERRIDE (u_sess->utils_cxt.behavior_compat_flags & OPT_PROC_OUTPARAM_OVERRIDE) +#define ALLOW_PROCEDURE_COMPILE_CHECK (u_sess->utils_cxt.behavior_compat_flags & OPT_ALLOW_PROCEDURE_COMPILE_CHECK) +#define IMPLICIT_FOR_LOOP_VARIABLE (u_sess->utils_cxt.behavior_compat_flags & OPT_IMPLICIT_FOR_LOOP_VARIABLE) +#define AFORMAT_NULL_TEST (u_sess->utils_cxt.behavior_compat_flags & OPT_AFORMAT_NULL_TEST) +#define AFORMAT_REGEX_MATCH (u_sess->utils_cxt.behavior_compat_flags & OPT_AFORMAT_REGEX_MATCH) +#define ROWNUM_TYPE_COMPAT (u_sess->utils_cxt.behavior_compat_flags & OPT_ROWNUM_TYPE_COMPAT) +#define COMPAT_CURSOR (u_sess->utils_cxt.behavior_compat_flags & OPT_COMPAT_CURSOR) +#define CHAR_COERCE_COMPAT (u_sess->utils_cxt.behavior_compat_flags & OPT_CHAR_COERCE_COMPAT) + /* define database compatibility Attribute */ typedef struct { int flag; @@ -174,6 +199,16 @@ extern void ProcessInterrupts(void); } \ } while (0) +#define PREVENT_POOL_VALIDATE_SIGUSR2() \ + do { \ + if (t_thrd.int_cxt.PoolValidateCancelPending && IS_PGXC_COORDINATOR) { \ + g_pq_interrupt_happened = false; \ + t_thrd.int_cxt.ProcDiePending = false; \ + t_thrd.int_cxt.QueryCancelPending = false; \ + t_thrd.int_cxt.PoolValidateCancelPending = false; \ + } \ + } while (0) \ + #define START_CRIT_SECTION() (t_thrd.int_cxt.CritSectionCount++) #define END_CRIT_SECTION() \ @@ -392,6 +427,7 @@ typedef enum { StartupProcess, BgWriterProcess, SpBgWriterProcess, + PageRepairProcess, CheckpointerProcess, WalWriterProcess, WalWriterAuxiliaryProcess, @@ -416,6 +452,7 @@ typedef enum { TsCompactionProcess, TsCompactionAuxiliaryProcess, XlogCopyBackendProcess, + BarrierPreParseBackendProcess, NUM_SINGLE_AUX_PROC, /* Sentry for auxiliary type with single thread. */ /* @@ -428,6 +465,8 @@ typedef enum { TpoolListenerProcess, TsCompactionConsumerProcess, CsnMinSyncProcess, + ParallelDecodeProcess, + LogicalReadRecord, NUM_AUXPROCTYPES /* Must be last! */ } AuxProcType; @@ -450,6 +489,8 @@ typedef enum { #define AmCBMWriterProcess() (t_thrd.bootstrap_cxt.MyAuxProcType == CBMWriterProcess) #define AmRemoteServiceProcess() (t_thrd.bootstrap_cxt.MyAuxProcType == RemoteServiceProcess) #define AmPageWriterProcess() (t_thrd.bootstrap_cxt.MyAuxProcType == PageWriterProcess) +#define AmPageWriterMainProcess() (t_thrd.bootstrap_cxt.MyAuxProcType == PageWriterProcess && \ + t_thrd.pagewriter_cxt.pagewriter_id == 0) #define AmHeartbeatProcess() (t_thrd.bootstrap_cxt.MyAuxProcType == HeartbeatProcess) #define AmTsCompactionProcess() (t_thrd.bootstrap_cxt.MyAuxProcType == TsCompactionProcess) #define AmTsCompactionConsumerProcess() (t_thrd.bootstrap_cxt.MyAuxProcType == TsCompactionConsumerProcess) @@ -458,7 +499,6 @@ typedef enum { - /***************************************************************************** * pinit.h -- * * openGauss initialization and cleanup definitions. * diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 72d8c4b4a..0e4623294 100755 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -4,9 +4,9 @@ * definitions for executor state nodes * * + * Portions Copyright (c) 2021, openGauss Contributors * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * * src/include/nodes/execnodes.h * @@ -304,6 +304,7 @@ typedef struct ProjectionInfo { TupleTableSlot* pi_slot; ExprDoneCond* pi_itemIsDone; bool pi_directMap; + bool pi_topPlan; /* Whether the outermost layer query */ int pi_numSimpleVars; int* pi_varSlotOffsets; int* pi_varNumbers; @@ -658,6 +659,7 @@ typedef struct ExecRowMark { Index rowmarkId; /* unique identifier for resjunk columns */ RowMarkType markType; /* see enum in nodes/plannodes.h */ bool noWait; /* NOWAIT option */ + int waitSec; /* WAIT time Sec */ ItemPointerData curCtid; /* ctid of currently locked tuple, if any */ int numAttrs; /* number of attributes in subplan */ } ExecRowMark; @@ -1346,11 +1348,12 @@ typedef struct MergeActionState { */ typedef struct UpsertState { - NodeTag type; - UpsertAction us_action; /* Flags showing DUPLICATE UPDATE NOTHING or SOMETHING */ - TupleTableSlot *us_existing; /* slot to store existing target tuple in */ - List *us_excludedtlist; /* the excluded pseudo relation's tlist */ - TupleTableSlot *us_updateproj; /* slot to update */ + NodeTag type; + UpsertAction us_action; /* Flags showing DUPLICATE UPDATE NOTHING or SOMETHING */ + TupleTableSlot *us_existing; /* slot to store existing target tuple in */ + List *us_excludedtlist; /* the excluded pseudo relation's tlist */ + TupleTableSlot *us_updateproj; /* slot to update */ + List *us_updateWhere; /* state for the upsert where clause */ } UpsertState; /* ---------------- @@ -1519,6 +1522,8 @@ typedef struct StartWithOpState */ TargetEntry *sw_pseudoCols[PSEUDO_COLUMN_NUM]; + IterationStats iterStats; + /* variables to help calculate pseodu return columns */ TupleTableSlot *sw_workingSlot; /* A dedicate slot to hold tuple-2-tuple conversion in side of StartWithOp node, @@ -1599,6 +1604,28 @@ typedef struct SampleScanParams { void* tsm_state; /* tablesample method can keep state here */ } SampleScanParams; +/* ---------------------------------------------------------------- +* Batch Scan Information +* ---------------------------------------------------------------- +*/ +struct ScanBatchResult { + int rows; /* rows number for current page. */ + TupleTableSlot** scanTupleSlotInBatch; /* array size of BatchMaxSize, stores tuples scanned in a page */ +}; + +struct ScanBatchState { + VectorBatch* pCurrentBatch; /* for output in batch */ + VectorBatch* pScanBatch; /* batch formed from tuples */ + int scanTupleSlotMaxNum; /* max row number of tuples can be scanned once */ + int colNum; + int *colId; /* for qual and project, only save the used cols. */ + int maxcolId; + bool *nullflag; /*indicate the batch has null value for performance */ + bool *lateRead; /* for project */ + bool scanfinished; /* last time return with rows, but pages of this partition is read out */ + ScanBatchResult scanBatch; +}; + /* ---------------------------------------------------------------- * Scan State Information * ---------------------------------------------------------------- @@ -1626,6 +1653,7 @@ struct SeqScanAccessor; */ typedef TupleTableSlot *(*ExecScanAccessMtd) (ScanState *node); typedef bool(*ExecScanRecheckMtd) (ScanState *node, TupleTableSlot *slot); +typedef void (*SeqScanGetNextMtd)(TableScanDesc scan, TupleTableSlot* slot, ScanDirection direction); typedef struct ScanState { PlanState ps; /* its first field is NodeTag */ @@ -1651,7 +1679,10 @@ typedef struct ScanState { RangeScanInRedis rangeScanInRedis; /* if it is a range scan in redistribution time */ bool isSampleScan; /* identify is it table sample scan or not. */ SampleScanParams sampleScanInfo; /* TABLESAMPLE params include type/seed/repeatable. */ + SeqScanGetNextMtd fillNextSlotFunc; ExecScanAccessMtd ScanNextMtd; + bool scanBatchMode; + ScanBatchState* scanBatchState; } ScanState; /* @@ -2574,133 +2605,38 @@ inline bool BitmapNodeNeedSwitchPartRel(BitmapHeapScanState* node) return tbm_is_global(node->tbm) && GPIScanCheckPartOid(node->gpi_scan, node->tbmres->partitionOid); } -/* - * DB4AI GD node: used for train models using Gradient Descent - */ +// DB4AI state node -struct GradientDescentAlgorithm; -struct GradientDescentExpr; -struct OptimizerGD; -struct ShuffleGD; +struct AlgorithmAPI; +struct TrainModelState; -typedef struct GradientDescentState { - ScanState ss; /* its first field is NodeTag */ - GradientDescentAlgorithm* algorithm; - OptimizerGD* optimizer; - ShuffleGD* shuffle; - - // tuple description - TupleDesc tupdesc; - int n_features; // number of features +typedef struct ModelTuple { + Datum *values; // attributes value + bool *isnull; // whether an attribute is null + Oid *typid; // type of an attribute + bool *typbyval; // attribute is passed by value or by reference + int16 *typlen; // the length of an attribute + int ncolumns; // number of attributes +} ModelTuple; - // dependant var binary values - int num_classes; - Datum binary_classes[2]; +// returns the next data row, or nullptr when iteration has finished +typedef bool (*callback_ml_fetch)(void *callback_data, ModelTuple *tuple); - // training state - bool done; // when finished - Matrix weights; - double learning_rate; - int n_iterations; - int usecs; // execution time - int processed; // tuples - int discarded; - float loss; - Scores scores; -} GradientDescentState; - - -typedef struct GradientDescentExprState { - ExprState xprstate; - PlanState* ps; - GradientDescentExpr* xpr; -} GradientDescentExprState; - -/* - * DB4AI k-means node - */ - -/* - * to keep running statistics on each cluster being constructed - */ -class IncrementalStatistics { - uint64_t population = 0; - double max_value = 0.; - double min_value = DBL_MAX; - double total = 0.; - double s = 0; - -public: - - IncrementalStatistics operator+(IncrementalStatistics const& rhs) const; - IncrementalStatistics operator-(IncrementalStatistics const& rhs) const; - IncrementalStatistics& operator+=(IncrementalStatistics const& rhs); - IncrementalStatistics& operator-=(IncrementalStatistics const& rhs); - - double getMin() const; - void setMin(double); - double getMax() const; - void setMax(double); - double getTotal() const; - void setTotal(double); - uint64_t getPopulation() const; - void setPopulation(uint64_t); - double getEmpiricalMean() const; - double getEmpiricalVariance() const; - double getEmpiricalStdDev() const; - bool reset(); -}; - -/* - * internal representation of a centroid - */ -typedef struct Centroid { - IncrementalStatistics statistics; - ArrayType* coordinates = nullptr; - uint32_t id = 0U; -} Centroid; - -/* - * current state of the algorithm - */ -typedef struct KMeansStateDescription { - Centroid* centroids[2] = {nullptr}; - ArrayType* bbox_min = nullptr; - ArrayType* bbox_max = nullptr; - - double (* distance)(double const*, double const*, uint32_t const dimension) = nullptr; - - double execution_time = 0.; - double seeding_time = 0.; - IncrementalStatistics solution_statistics[2]; - uint64_t num_good_points = 0UL; - uint64_t num_dead_points = 0UL; - uint32_t current_iteration = 0U; - uint32_t current_centroid = 0U; - uint32_t dimension = 0U; - uint32_t num_centroids = 0U; - uint32_t actual_num_iterations = 0U; -} KMeansStateDescription; - -/* - * current state of the k-means node - */ -typedef struct KMeansState { - ScanState sst; - KMeansStateDescription description; - bool done = false; -} KMeansState; - -/* - * internal representation of a point (not a centroid) - */ -typedef struct GSPoint { - ArrayType* pg_coordinates = nullptr; - uint32_t weight = 1U; - uint32_t id = 0ULL; - double distance_to_closest_centroid = DBL_MAX; - bool should_free = false; -} GSPoint; +// restarts the iteration of the dataset without fetching any tuple at all +typedef void (*callback_ml_rescan)(void *callback_data); +typedef struct TrainModelState { + ScanState ss; /* its first field is NodeTag */ + const TrainModel *config; + AlgorithmAPI *algorithm; + int finished; // number of configurations still running + // row data + ModelTuple tuple; // data as well as metadata + bool row_allocated; + // utility functions + callback_ml_fetch fetch; + callback_ml_rescan rescan; + void *callback_data; // for direct ML, used by the fetch callback +} TrainModelState; #endif /* EXECNODES_H */ diff --git a/src/include/nodes/memnodes.h b/src/include/nodes/memnodes.h index d7564f118..3413b6dca 100644 --- a/src/include/nodes/memnodes.h +++ b/src/include/nodes/memnodes.h @@ -178,6 +178,32 @@ typedef struct AllocBlockData { #endif } AllocBlockData; +/* + * AllocChunk + * The prefix of each piece of memory in an AllocBlock + * + * NB: this MUST match StandardChunkHeader as defined by utils/memutils.h. + */ +typedef struct AllocChunkData { + /* aset is the owning aset if allocated, or the freelist link if free */ + void* aset; + /* size is always the size of the usable space in the chunk */ + Size size; +#ifdef MEMORY_CONTEXT_CHECKING + /* when debugging memory usage, also store actual requested size */ + /* this is zero in a free chunk */ + Size requested_size; +#endif +#ifdef MEMORY_CONTEXT_TRACK + const char* file; /* __FILE__ of palloc/palloc0 call */ + int line; /* __LINE__ of palloc/palloc0 call */ +#endif +#ifdef MEMORY_CONTEXT_CHECKING + uint32 prenum; /* prefix magic number */ +#endif +} AllocChunkData; + +#define ALLOC_CHUNKHDRSZ MAXALIGN(sizeof(AllocChunkData)) #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData)) /* AsanSetContext is our asan implementation of MemoryContext. * Note: diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h index 9049f0880..95f1b3c30 100755 --- a/src/include/nodes/nodes.h +++ b/src/include/nodes/nodes.h @@ -7,8 +7,8 @@ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 2010-2012 Postgres-XC Development Group - * Portions Copyright (c) 2021, openGauss Contributors * + * Portions Copyright (c) 2021, openGauss Contributors * src/include/nodes/nodes.h * * ------------------------------------------------------------------------- @@ -104,6 +104,8 @@ typedef enum NodeTag { T_CreateResourcePoolStmt, T_AlterResourcePoolStmt, T_DropResourcePoolStmt, + T_AlterGlobalConfigStmt, + T_DropGlobalConfigStmt, T_CreateWorkloadGroupStmt, T_AlterWorkloadGroupStmt, T_DropWorkloadGroupStmt, @@ -140,6 +142,7 @@ typedef enum NodeTag { */ T_PlanState = 200, T_ResultState, + T_VecToRowState, T_MergeActionState, T_ModifyTableState, T_DistInsertSelectState, @@ -181,6 +184,7 @@ typedef enum NodeTag { #ifdef PGXC T_RemoteQueryState, #endif + T_TrainModelState, T_StreamState, /* @@ -288,6 +292,7 @@ typedef enum NodeTag { T_RangePartitionindexDefState, T_SplitPartitionState, T_AddPartitionState, + T_AddSubPartitionState, T_RangePartitionStartEndDefState, T_RownumState, T_ListPartitionDefState, @@ -390,6 +395,7 @@ typedef enum NodeTag { T_SetOperationStmt, T_GrantStmt, T_GrantRoleStmt, + T_GrantDbStmt, T_AlterDefaultPrivilegesStmt, T_ClosePortalStmt, T_ClusterStmt, @@ -539,6 +545,7 @@ typedef enum NodeTag { T_PrivGrantee, T_FuncWithArgs, T_AccessPriv, + T_DbPriv, T_CreateOpClassItem, T_TableLikeClause, T_FunctionParameter, @@ -658,10 +665,9 @@ typedef enum NodeTag { /* * Vectorized Execution Nodes */ - T_VecToRowState = 2000, // this must put first for vector engine runtime state - T_VecStartState, + T_VecStartState = 2001, T_RowToVecState, T_VecAggState, @@ -724,6 +730,7 @@ typedef enum NodeTag { T_ScanMethodHint, T_MultiNodeHint, T_PredpushHint, + T_PredpushSameLevelHint, T_SkewHint, T_RewriteHint, T_GatherHint, @@ -765,12 +772,8 @@ typedef enum NodeTag { // DB4AI T_CreateModelStmt = 5000, T_PredictByFunction, - T_GradientDescent, - T_GradientDescentState, - T_GradientDescentExpr, - T_GradientDescentExprState, - T_KMeans, - T_KMeansState, + T_TrainModel, + T_ExplainModelStmt, // End DB4AI /* Plpgsql */ diff --git a/src/include/nodes/params.h b/src/include/nodes/params.h index c84b26359..f0a8f89a5 100644 --- a/src/include/nodes/params.h +++ b/src/include/nodes/params.h @@ -61,15 +61,21 @@ typedef struct Cursor_Data { } Cursor_Data; typedef struct HTAB HTAB; + +typedef struct TableOfInfo { + bool isnestedtable = false; + int tableOfLayers = 0; + Oid tableOfIndexType = InvalidOid; /* type Oid of table of */ + HTAB* tableOfIndex = NULL; /* mapping of table of index */ +} TableOfInfo; + typedef struct ParamExternData { Datum value; /* parameter value */ bool isnull; /* is it NULL? */ uint16 pflags; /* flag bits, see above */ Oid ptype; /* parameter's datatype, or 0 */ Cursor_Data cursor_data; - bool isnestedtable = false; - Oid tableOfIndexType = InvalidOid; /* type Oid of table of */ - HTAB* tableOfIndex = NULL; /* mapping of table of index */ + TableOfInfo* tabInfo = NULL; } ParamExternData; typedef struct ParamListInfoData* ParamListInfo; diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index df582d14a..1e588ea59 100755 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -362,6 +362,8 @@ typedef struct RangeTblEntry { /* For sublink in targetlist pull up */ bool sublink_pull_up; /* mark the subquery is sublink pulled up */ Bitmapset *extraUpdatedCols; /* generated columns being updated */ + bool pulled_from_subquery; /* mark whether it is pulled-up from subquery to the current level, for upsert remote + query deparse */ } RangeTblEntry; /* @@ -477,6 +479,7 @@ typedef struct RowMarkClause { Index rti; /* range table index of target relation */ bool forUpdate; /* for compatibility, we reserve this filed but don't use it */ bool noWait; /* NOWAIT option */ + int waitSec; /* WAIT time Sec */ bool pushedDown; /* pushed down from higher query level? */ LockClauseStrength strength; } RowMarkClause; @@ -667,6 +670,23 @@ typedef struct GrantRoleStmt { DropBehavior behavior; /* drop behavior (for REVOKE) */ } GrantRoleStmt; +/* ---------------------- + * Grant/Revoke Database Privilege Statement + * ---------------------- + */ +typedef struct GrantDbStmt { + NodeTag type; + bool is_grant; /* true = GRANT, false = REVOKE */ + bool admin_opt; /* with admin option */ + List* privileges; /* list of DbPriv nodes */ + List* grantees; /* list of PrivGrantee nodes */ +} GrantDbStmt; + +typedef struct DbPriv { + NodeTag type; + char* db_priv_name; /* string name of sys privilege */ +} DbPriv; + /* ---------------------- * Alter Default Privileges Statement * ---------------------- @@ -1698,7 +1718,7 @@ typedef struct VacuumStmt { */ typedef struct BarrierStmt { NodeTag type; - const char* id; /* User supplied barrier id, if any */ + char* id; /* User supplied barrier id, if any */ } BarrierStmt; /* @@ -1975,6 +1995,16 @@ typedef struct DropResourcePoolStmt { char* pool_name; } DropResourcePoolStmt; +typedef struct AlterGlobalConfigStmt { + NodeTag type; + List* options; +} AlterGlobalConfigStmt; + +typedef struct DropGlobalConfigStmt { + NodeTag type; + List* options; +} DropGlobalConfigStmt; + /* * ---------------------- * Create Workload Group statement @@ -2096,6 +2126,8 @@ typedef struct LockStmt { List* relations; /* relations to lock */ int mode; /* lock mode */ bool nowait; /* no wait mode */ + bool cancelable; /* send term to lock holder */ + int waitSec; /* WAIT time Sec */ } LockStmt; /* ---------------------- diff --git a/src/include/nodes/parsenodes_common.h b/src/include/nodes/parsenodes_common.h index f60e64e78..8e79f0381 100644 --- a/src/include/nodes/parsenodes_common.h +++ b/src/include/nodes/parsenodes_common.h @@ -98,10 +98,10 @@ typedef enum ObjectType { OBJECT_DIRECTORY, OBJECT_GLOBAL_SETTING, OBJECT_COLUMN_SETTING, - OBJECT_PUBLICATION, - OBJECT_PUBLICATION_NAMESPACE, - OBJECT_PUBLICATION_REL, - OBJECT_SUBSCRIPTION + OBJECT_PUBLICATION, + OBJECT_PUBLICATION_NAMESPACE, + OBJECT_PUBLICATION_REL, + OBJECT_SUBSCRIPTION } ObjectType; #define OBJECT_IS_SEQUENCE(obj) \ @@ -416,7 +416,8 @@ typedef struct HintState { List* skew_hint; /* skew hint list */ List* hint_warning; /* hint warning list */ bool multi_node_hint; /* multinode hint */ - List* predpush_hint; + List* predpush_hint; /* predpush hint */ + List* predpush_same_level_hint; /* predpush same level hint */ List* rewrite_hint; /* rewrite hint list */ List* gather_hint; /* gather hint */ List* set_hint; /* query-level guc hint */ @@ -437,6 +438,7 @@ typedef struct HintState { typedef struct UpsertClause { NodeTag type; List *targetList; + Node *whereClause; int location; } UpsertClause; @@ -596,7 +598,7 @@ typedef struct StartWithClause { bool priorDirection; bool nocycle; - /* extension opetions */ + /* extension options */ bool opt; } StartWithClause; @@ -732,6 +734,7 @@ typedef enum AlterTableType { AT_AddColumnRecurse, /* internal to commands/tablecmds.c */ AT_AddColumnToView, /* implicitly via CREATE OR REPLACE VIEW */ AT_AddPartition, + AT_AddSubPartition, AT_ColumnDefault, /* alter column default */ AT_DropNotNull, /* alter column drop not null */ AT_SetNotNull, /* alter column set not null */ @@ -744,6 +747,7 @@ typedef enum AlterTableType { AT_DropColumn, /* drop column */ AT_DropColumnRecurse, /* internal to commands/tablecmds.c */ AT_DropPartition, + AT_DropSubPartition, AT_AddIndex, /* add index */ AT_ReAddIndex, /* internal to commands/tablecmds.c */ AT_AddConstraint, /* add constraint */ @@ -1049,6 +1053,7 @@ typedef struct RangePartitionindexDefState { NodeTag type; char* name; char* tablespace; + List *sublist; } RangePartitionindexDefState; /* * @@ -1086,6 +1091,7 @@ typedef struct PartitionState { List *partitionList; /* list of partition definition */ RowMovementValue rowMovement; /* default: for colum-stored table means true, for row-stored means false */ PartitionState *subPartitionState; + List *partitionNameList; /* existing partitionNameList for add partition */ } PartitionState; typedef struct AddPartitionState { /* ALTER TABLE ADD PARTITION */ @@ -1094,6 +1100,12 @@ typedef struct AddPartitionState { /* ALTER TABLE ADD PARTITION */ bool isStartEnd; } AddPartitionState; +typedef struct AddSubPartitionState { /* ALTER TABLE MODIFY PARTITION ADD SUBPARTITION */ + NodeTag type; + const char* partitionName; + List *subPartitionList; +} AddSubPartitionState; + typedef enum SplitPartitionType { RANGEPARTITIION, /* not used */ LISTPARTITIION, /* not support */ @@ -1625,6 +1637,7 @@ typedef struct LockingClause { bool forUpdate; /* for compatibility, we reserve this field but don't use it */ bool noWait; /* NOWAIT option */ LockClauseStrength strength; + int waitSec; /* WAIT time Sec */ } LockingClause; /* @@ -1920,6 +1933,7 @@ typedef struct Query { * do some expression processing. * Please refer to subquery_planner. */ + bool is_from_inlist2join_rewrite; /* true if the query is created when applying inlist2join optimization */ uint64 uniqueSQLId; /* used by unique sql id */ #ifndef ENABLE_MULTIPLE_NODES char* unique_sql_text; /* used by unique sql plain text */ @@ -2152,55 +2166,46 @@ typedef struct PredictByFunction{ // DB4AI int model_args_location; // Only for parser } PredictByFunction; - -typedef struct CreatePublicationStmt -{ - NodeTag type; - char *pubname; /* Name of of the publication */ - List *options; /* List of DefElem nodes */ - List *tables; /* Optional list of tables to add */ - bool for_all_tables; /* Special publication for all tables in db */ +typedef struct CreatePublicationStmt { + NodeTag type; + char *pubname; /* Name of of the publication */ + List *options; /* List of DefElem nodes */ + List *tables; /* Optional list of tables to add */ + bool for_all_tables; /* Special publication for all tables in db */ } CreatePublicationStmt; -typedef struct AlterPublicationStmt -{ - NodeTag type; - char *pubname; /* Name of of the publication */ +typedef struct AlterPublicationStmt { + NodeTag type; + char *pubname; /* Name of of the publication */ - /* parameters used for ALTER PUBLICATION ... WITH */ - List *options; /* List of DefElem nodes */ + /* parameters used for ALTER PUBLICATION ... WITH */ + List *options; /* List of DefElem nodes */ - /* parameters used for ALTER PUBLICATION ... ADD/DROP TABLE */ - List *tables; /* List of tables to add/drop */ - bool for_all_tables; /* Special publication for all tables in db */ - DefElemAction tableAction; /* What action to perform with the tables */ + /* parameters used for ALTER PUBLICATION ... ADD/DROP TABLE */ + List *tables; /* List of tables to add/drop */ + bool for_all_tables; /* Special publication for all tables in db */ + DefElemAction tableAction; /* What action to perform with the tables */ } AlterPublicationStmt; -typedef struct CreateSubscriptionStmt -{ - NodeTag type; - char *subname; /* Name of of the subscription */ - char *conninfo; /* Connection string to publisher */ - List *publication; /* One or more publication to subscribe to */ - List *options; /* List of DefElem nodes */ +typedef struct CreateSubscriptionStmt { + NodeTag type; + char *subname; /* Name of of the subscription */ + char *conninfo; /* Connection string to publisher */ + List *publication; /* One or more publication to subscribe to */ + List *options; /* List of DefElem nodes */ } CreateSubscriptionStmt; -typedef struct AlterSubscriptionStmt -{ - NodeTag type; - char *subname; /* Name of of the subscription */ - List *options; /* List of DefElem nodes */ +typedef struct AlterSubscriptionStmt { + NodeTag type; + char *subname; /* Name of of the subscription */ + List *options; /* List of DefElem nodes */ } AlterSubscriptionStmt; -typedef struct DropSubscriptionStmt -{ - NodeTag type; - char *subname; /* Name of of the subscription */ - bool missing_ok; /* Skip error if missing? */ - DropBehavior behavior; /* RESTRICT or CASCADE behavior */ +typedef struct DropSubscriptionStmt { + NodeTag type; + char *subname; /* Name of of the subscription */ + bool missing_ok; /* Skip error if missing? */ + DropBehavior behavior; /* RESTRICT or CASCADE behavior */ } DropSubscriptionStmt; - - - #endif /* PARSENODES_COMMONH */ diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index 46667dea0..992a60ff8 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -4,9 +4,9 @@ * definitions for query plan nodes * * + * Portions Copyright (c) 2021, openGauss Contributors * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * * src/include/nodes/plannodes.h * @@ -420,6 +420,7 @@ typedef struct ModifyTable { List* updateTlist; /* List of UPDATE target */ List* exclRelTlist; /* target list of the EXECLUDED pseudo relation */ Index exclRelRTIndex; /* RTI of the EXCLUDED pseudo relation */ + Node* upsertWhere; /* Qualifiers for upsert's update clause to check */ OpMemInfo mem_info; /* Memory info for modify node */ } ModifyTable; @@ -586,6 +587,8 @@ typedef struct Scan { /* Memory info for scan node, now it just used on indexscan, indexonlyscan, bitmapscan, dfsindexscan */ OpMemInfo mem_info; bool is_inplace; + bool scanBatchMode; + double tableRows; } Scan; /* ---------------- @@ -1096,6 +1099,7 @@ typedef struct HashJoin { bool rebuildHashTable; bool isSonicHash; OpMemInfo mem_info; /* Memory info for inner hash table */ + double joinRows; } HashJoin; /* ---------------- @@ -1379,6 +1383,7 @@ typedef struct PlanRowMark { Index rowmarkId; /* unique identifier for resjunk columns */ RowMarkType markType; /* see enum above */ bool noWait; /* NOWAIT option */ + int waitSec; /* WAIT time Sec */ bool isParent; /* true if this is a "dummy" parent entry */ int numAttrs; /* number of attributes in subplan */ Bitmapset* bms_nodeids; @@ -1475,107 +1480,15 @@ static inline bool IsJoinPlan(Node* node) * DB4AI */ - -// GD optimizers -typedef enum { - OPTIMIZER_GD, // simple mini-batch - OPTIMIZER_NGD, // normalized gradient descent -} OptimizerML; - -inline void optimizer_ml_setter(const char* str, void* optimizer_ml){ - OptimizerML* optimizer = (OptimizerML*) optimizer_ml; - if (strcmp(str, "gd") == 0) - *optimizer = OPTIMIZER_GD; - else if (strcmp(str, "ngd") == 0) - *optimizer = OPTIMIZER_NGD; - else - elog(ERROR, "Invalid optimizer. Current candidates are: gd (default), ngd"); - return; -} - -// Gradient Descent node -typedef struct GradientDescent { +// Training model node +struct ModelHyperparameters; +typedef struct TrainModel { Plan plan; AlgorithmML algorithm; - int targetcol; - - // generic hyperparameters - OptimizerML optimizer; // default GD/mini-batch - int max_seconds; // 0 to disable - bool verbose; - int max_iterations; // maximum number of iterations - int batch_size; - double learning_rate; - double decay; // (0:1], learning rate decay - double tolerance; // [0:1], 0 means to run all iterations - int seed; // [0:N], random seed - - // for SVM - double lambda; // regularization strength -} GradientDescent; - -/* - * DB4AI k-means - */ - -/* - * current available distance functions - */ -typedef enum : uint32_t { - KMEANS_L1 = 0U, - KMEANS_L2, - KMEANS_L2_SQUARED, - KMEANS_LINF -} DistanceFunction; - -/* - * current available seeding method - */ -typedef enum : uint32_t { - KMEANS_RANDOM_SEED = 0U, - KMEANS_BB -} SeedingFunction; - -/* - * Verbosity level - */ -typedef enum : uint32_t { - NO_OUTPUT = 0U, - FASTCHECK_OUTPUT, - VERBOSE_OUTPUT -} Verbosity; - -/* - * description of the k-means instance - */ -struct KMeansDescription { - char const* model_name = nullptr; - SeedingFunction seeding = KMEANS_RANDOM_SEED; - DistanceFunction distance = KMEANS_L2_SQUARED; - Verbosity verbosity = NO_OUTPUT; - uint32_t n_features = 0U; - uint32_t batch_size = 0U; -}; - -/* - * current hyper-parameters - */ -struct KMeansHyperParameters { - uint32_t num_centroids = 0U; - uint32_t num_iterations = 0U; - uint64_t external_seed = 0ULL; - double tolerance = 0.00001; -}; - -/* - * the actual k-means operator - */ -typedef struct KMeans { - Plan plan; - AlgorithmML algorithm; - KMeansDescription description; - KMeansHyperParameters parameters; -} KMeans; + int configurations; // 1..N configurations for HPO + const ModelHyperparameters **hyperparameters; // one for each configuration + MemoryContext cxt; // to store models +} TrainModel; #endif /* PLANNODES_H */ diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h index 7fc1d71f3..c287c0a9c 100644 --- a/src/include/nodes/primnodes.h +++ b/src/include/nodes/primnodes.h @@ -136,6 +136,7 @@ typedef struct IntoClause { */ typedef struct Expr { NodeTag type; + double selec; } Expr; /* @@ -1419,7 +1420,8 @@ typedef struct UpsertExpr { /* DUPLICATE KEY UPDATE */ List* updateTlist; /* List of UPDATE TargetEntrys */ List* exclRelTlist; /* tlist of the 'EXCLUDED' pseudo relation */ - int exclRelIndex; /* RT index of 'EXCLUDED' relation */ + int exclRelIndex; /* RT index of 'EXCLUDED' relation */ + Node* upsertWhere; /* Qualifiers for upsert's update clause to check */ } UpsertExpr; /* @@ -1428,35 +1430,62 @@ typedef struct UpsertExpr { #define DB4AI_SNAPSHOT_VERSION_DELIMITER 1 #define DB4AI_SNAPSHOT_VERSION_SEPARATOR 2 +typedef enum MetricML{ + // classifier + METRIC_ML_ACCURACY, + METRIC_ML_F1, + METRIC_ML_PRECISION, + METRIC_ML_RECALL, + // General purpouse + METRIC_ML_LOSS, + // regression + METRIC_ML_MSE, + // distance + METRIC_ML_DISTANCE_L1, + METRIC_ML_DISTANCE_L2, + METRIC_ML_DISTANCE_L2_SQUARED, + METRIC_ML_DISTANCE_L_INF, + // xgboost + METRIC_ML_AUC, // area under curve + METRIC_ML_AUC_PR, // area under pr curve + METRIC_ML_MAP, // mean avg. precision + METRIC_ML_RMSE, // root mean square err + METRIC_ML_RMSLE, // root mean square log err + METRIC_ML_MAE, // mean abs. value + METRIC_ML_INVALID, +} MetricML; + typedef enum { - LOGISTIC_REGRESSION, + TYPE_BOOL = 0, + TYPE_BYTEA, + TYPE_INT32, + TYPE_INT64, + TYPE_FLOAT32, + TYPE_FLOAT64, + TYPE_FLOAT64ARRAY, + TYPE_NUMERIC, + TYPE_TEXT, + TYPE_VARCHAR, + TYPE_INVALID_PREDICTION, +} PredictionType; + +typedef enum { + // algorithms implememted through the generic API + LOGISTIC_REGRESSION = 0, SVM_CLASSIFICATION, - KMEANS, LINEAR_REGRESSION, + PCA, + KMEANS, + XG_REG_LOGISTIC, + XG_BIN_LOGISTIC, + XG_REG_SQE, // regression with squared error + XG_REG_GAMMA, + MULTICLASS, + + // for internal use INVALID_ALGORITHM_ML, + + } AlgorithmML; -typedef enum GradientDescentExprField { - // generic - GD_EXPR_ALGORITHM, - GD_EXPR_OPTIMIZER, - GD_EXPR_RESULT_TYPE, - GD_EXPR_NUM_ITERATIONS, - GD_EXPR_EXEC_TIME_MSECS, - GD_EXPR_PROCESSED_TUPLES, - GD_EXPR_DISCARDED_TUPLES, - GD_EXPR_WEIGHTS, - GD_EXPR_CATEGORIES, - // scores - GD_EXPR_SCORE = 0x10000, // or-ed with the score id -} GradientDescentExprField; - -#define makeGradientDescentExprFieldScore(_SCORE) (GradientDescentExprField)((int)GD_EXPR_SCORE | _SCORE) - -typedef struct GradientDescentExpr { - Expr xpr; - GradientDescentExprField field; - Oid fieldtype; /* pg_type OID of the datatype */ -} GradientDescentExpr; - #endif /* PRIMNODES_H */ diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h index 230753593..c177f38ee 100755 --- a/src/include/nodes/relation.h +++ b/src/include/nodes/relation.h @@ -4,9 +4,9 @@ * Definitions for planner's internal data structures. * * + * Portions Copyright (c) 2021, openGauss Contributors * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * * src/include/nodes/relation.h * @@ -1348,6 +1348,7 @@ typedef struct HashPath { List* path_hashclauses; /* join clauses used for hashing */ int num_batches; /* number of batches expected */ OpMemInfo mem_info; /* Mem info for hash table */ + double joinRows; } HashPath; #ifdef PGXC @@ -1619,9 +1620,6 @@ typedef struct RestrictInfo { /* cache space for hashclause processing; -1 if not yet set */ BucketSize left_bucketsize; /* avg bucketsize of left side */ BucketSize right_bucketsize; /* avg bucketsize of right side */ - - /* Is this restrict info converted from index matching process */ - bool converted; } RestrictInfo; /* diff --git a/src/include/opfusion/opfusion_util.h b/src/include/opfusion/opfusion_util.h index 322536c3a..0148d4f37 100644 --- a/src/include/opfusion/opfusion_util.h +++ b/src/include/opfusion/opfusion_util.h @@ -181,6 +181,7 @@ const Oid function_id[] = { 2089, /* convert int4 number to hex */ 2090, /* convert int8 number to hex */ 2617, /* ceiling */ + 3180, /* convert int2 to boolen */ 3192, /* convert int4 to bpchar */ 3207, /* convert text to timestamp without time zone */ 3811, /* convert int4 to money */ diff --git a/src/include/optimizer/clauses.h b/src/include/optimizer/clauses.h index c9e9692fc..fa0103c76 100644 --- a/src/include/optimizer/clauses.h +++ b/src/include/optimizer/clauses.h @@ -4,9 +4,9 @@ * prototypes for clauses.c. * * + * Portions Copyright (c) 2021, openGauss Contributors * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * * src/include/optimizer/clauses.h * @@ -22,6 +22,12 @@ #define is_opclause(clause) ((clause) != NULL && IsA(clause, OpExpr)) #define is_funcclause(clause) ((clause) != NULL && IsA(clause, FuncExpr)) +#ifdef ENABLE_MULTIPLE_NODES +#define AFORMAT_NULL_TEST_MODE false +#else +#define AFORMAT_NULL_TEST_MODE (u_sess->attr.attr_sql.sql_compatibility == A_FORMAT && AFORMAT_NULL_TEST) +#endif + typedef struct { int numWindowFuncs; /* total number of WindowFuncs found */ Index maxWinRef; /* windowFuncs[] is indexed 0 .. maxWinRef */ @@ -124,4 +130,6 @@ static inline void ExcludeRownumExpr(ParseState* pstate, Node* expr) extern List* get_quals_lists(Node *jtnode); +extern bool isTableofType(Oid typeOid, Oid* base_oid, Oid* indexbyType); + #endif /* CLAUSES_H */ diff --git a/src/include/optimizer/cost.h b/src/include/optimizer/cost.h index 8f69117ac..38598fe60 100644 --- a/src/include/optimizer/cost.h +++ b/src/include/optimizer/cost.h @@ -302,6 +302,7 @@ private: void group_clauselist(List* clauses); void group_clauselist_groupby(List* varinfos); void init_candidate(es_candidate* es) const; + bool IsEsCandidateInEqClass(es_candidate *es, EquivalenceClass *ec); void load_eqsel_clause(RestrictInfo* clause); void load_eqjoinsel_clause(RestrictInfo* clause); Bitmapset* make_attnums_by_clause_map(es_candidate* es, Bitmapset* attnums, bool left) const; diff --git a/src/include/optimizer/nodegroups.h b/src/include/optimizer/nodegroups.h index 43ff1e775..177dff0fe 100644 --- a/src/include/optimizer/nodegroups.h +++ b/src/include/optimizer/nodegroups.h @@ -280,7 +280,7 @@ extern bool ng_is_distribute_key_valid(PlannerInfo* root, List* distribute_key, extern void ngroup_info_hash_create(); extern Bitmapset* ngroup_info_hash_search(Oid ngroup_oid); extern void ngroup_info_hash_insert(Oid ngroup_oid, Bitmapset * bms_node_ids); -extern void ngroup_info_hash_delete(Oid ngroup_oid); +extern void ngroup_info_hash_delete(Oid ngroup_oid, bool is_destory = false); extern void ngroup_info_hash_destory(void); diff --git a/src/include/optimizer/orclauses.h b/src/include/optimizer/orclauses.h deleted file mode 100644 index dd8d35a6a..000000000 --- a/src/include/optimizer/orclauses.h +++ /dev/null @@ -1,21 +0,0 @@ -/*------------------------------------------------------------------------- - * - * orclauses.h - * prototypes for orclauses.cpp. - * - * - * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/optimizer/orclauses.h - * - *------------------------------------------------------------------------- - */ -#ifndef ORCLAUSES_H -#define ORCLAUSES_H - -#include "nodes/relation.h" - -extern void extract_restriction_or_clauses(PlannerInfo *root); - -#endif /* ORCLAUSES_H */ diff --git a/src/include/optimizer/paths.h b/src/include/optimizer/paths.h index 6a5ddc25b..5325f638a 100644 --- a/src/include/optimizer/paths.h +++ b/src/include/optimizer/paths.h @@ -70,7 +70,6 @@ extern void expand_indexqual_conditions( extern void check_partial_indexes(PlannerInfo* root, RelOptInfo* rel); extern Expr* adjust_rowcompare_for_index( RowCompareExpr* clause, IndexOptInfo* index, int indexcol, List** indexcolnos, bool* var_on_left_p); -extern int compute_parallel_worker(const RelOptInfo *rel, double heap_pages, int rel_maxworker); /* * Check index path whether use global partition index to scan */ diff --git a/src/include/optimizer/pgxcship.h b/src/include/optimizer/pgxcship.h index 0f7282cde..752571d5b 100644 --- a/src/include/optimizer/pgxcship.h +++ b/src/include/optimizer/pgxcship.h @@ -99,6 +99,9 @@ extern bool pgxc_find_nonshippable_row_trig( extern bool pgxc_query_contains_foreign_table(List* rtable); extern bool pgxc_check_dynamic_param(List* dynamicExpr, ParamListInfo params); +/* check junk tlist */ +extern bool check_replicated_junktlist(Query* subquery); + /* For online expansion, we need some user defined function to be shippable to DN */ extern bool redis_func_shippable(Oid funcid); #ifdef ENABLE_MULTIPLE_NODES diff --git a/src/include/optimizer/plancat.h b/src/include/optimizer/plancat.h index 34435264c..e50b9da4a 100644 --- a/src/include/optimizer/plancat.h +++ b/src/include/optimizer/plancat.h @@ -4,9 +4,9 @@ * prototypes for plancat.c. * * + * Portions Copyright (c) 2021, openGauss Contributors * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * * src/include/optimizer/plancat.h * diff --git a/src/include/optimizer/planner.h b/src/include/optimizer/planner.h index 78184b261..2aa846209 100644 --- a/src/include/optimizer/planner.h +++ b/src/include/optimizer/planner.h @@ -113,6 +113,21 @@ typedef struct RewriteVarMapping { bool need_fix; /* the var is needed to fix when create plan */ } RewriteVarMapping; +typedef struct VectorPlanContext { + bool containRowTable; + bool forceVectorEngine; + bool currentExprIsFilter; + Cost rowCost; + Cost vecCost; +} VectorPlanContext; + +typedef struct VectorExprContext { + double rows; + double lefttreeRows; + VectorPlanContext* planContext; + List* varList; +} VectorExprContext; + extern MemoryContext SwitchToPlannerTempMemCxt(PlannerInfo *root); extern MemoryContext ResetPlannerTempMemCxt(PlannerInfo *root, MemoryContext cxt); extern void fix_vars_plannode(PlannerInfo* root, Plan* plan); @@ -127,8 +142,9 @@ extern List* get_distributekey_from_tlist( PlannerInfo* root, List* tlist, List* groupcls, double rows, double* result_multiple, void* skew_info = NULL); extern Plan* try_vectorize_plan(Plan* top_plan, Query* parse, bool from_subplan, PlannerInfo* subroot = NULL); extern bool is_vector_scan(Plan* plan); +extern bool CheckColumnsSuportedByBatchMode(List* targetList, List *qual); -extern bool vector_engine_unsupport_expression_walker(Node* node); +extern bool vector_engine_unsupport_expression_walker(Node* node, VectorPlanContext* planContext = NULL); extern void adjust_all_pathkeys_by_agg_tlist(PlannerInfo* root, List* tlist, WindowLists* wflists); extern void get_multiple_from_exprlist(PlannerInfo* root, List* exprList, double rows, bool* useskewmultiple, @@ -189,6 +205,6 @@ extern List* get_plan_list(Plan* plan); extern RelOptInfo* build_alternative_rel(const RelOptInfo* origin, RTEKind rtekind); extern Plan* get_foreign_scan(Plan* plan); extern uint64 adjust_plsize(Oid relid, uint64 plan_width, uint64 pl_size, uint64* width); -extern int plan_create_index_workers(Oid tableOid); +extern bool check_stream_for_loop_fetch(Portal portal); #endif /* PLANNER_H */ diff --git a/src/include/optimizer/planswcb.h b/src/include/optimizer/planswcb.h index 984744947..4ace424ba 100644 --- a/src/include/optimizer/planswcb.h +++ b/src/include/optimizer/planswcb.h @@ -4,9 +4,9 @@ * prototypes for plan_startwith.cpp * * + * Portions Copyright (c) 2021, openGauss Contributors * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * * src/include/optimizer/planswcb.h * @@ -111,4 +111,4 @@ extern StartWithCTEPseudoReturnColumns g_StartWithCTEPseudoReturnColumns[]; #define STARTWITH_PSEUDO_RETURN_ATTNUMS 4 -#endif /* PLANSWCB_H */ +#endif /* PLANSWCB_H */ \ No newline at end of file diff --git a/src/include/optimizer/prep.h b/src/include/optimizer/prep.h index 8998c9c4e..9e7f459eb 100755 --- a/src/include/optimizer/prep.h +++ b/src/include/optimizer/prep.h @@ -4,9 +4,9 @@ * prototypes for files in optimizer/prep/ * * + * Portions Copyright (c) 2021, openGauss Contributors * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * * src/include/optimizer/prep.h * diff --git a/src/include/optimizer/streamplan.h b/src/include/optimizer/streamplan.h index 2920eb8aa..478da8448 100644 --- a/src/include/optimizer/streamplan.h +++ b/src/include/optimizer/streamplan.h @@ -130,6 +130,7 @@ extern ExecNodes* stream_merge_exec_nodes(Plan* lefttree, Plan* righttree, bool extern ExecNodes* get_all_data_nodes(char locatortype); extern void pushdown_execnodes(Plan* plan, ExecNodes* exec_nodes, bool add_node = false, bool only_nodelist = false); extern void stream_join_plan(PlannerInfo* root, Plan* join_plan, JoinPath* join_path); +extern void disaster_read_array_init(); extern NodeDefinition* get_all_datanodes_def(); extern List* distributeKeyIndex(PlannerInfo* root, List* distributed_keys, List* targetlist); extern List* make_groupcl_for_append(PlannerInfo* root, List* targetlist); diff --git a/src/include/optimizer/var.h b/src/include/optimizer/var.h index c0b8c9d3a..650d9e4a7 100644 --- a/src/include/optimizer/var.h +++ b/src/include/optimizer/var.h @@ -77,4 +77,5 @@ extern List* check_random_expr(Node* node); extern List* check_subplan_expr(Node* node, bool recurseSubPlan = false); extern bool check_varno(Node* qual, int varno, int varlevelsup); extern List* check_vartype(Node* node); +extern Node *LocateOpExprLeafVar(Node *node); #endif /* VAR_H */ diff --git a/src/include/parser/analyze.h b/src/include/parser/analyze.h index bf93168b6..3fdfd660e 100644 --- a/src/include/parser/analyze.h +++ b/src/include/parser/analyze.h @@ -42,7 +42,8 @@ extern Query* transformStmt(ParseState* pstate, Node* parseTree, bool isFirstNod extern bool analyze_requires_snapshot(Node* parseTree); extern void CheckSelectLocking(Query* qry); -extern void applyLockingClause(Query* qry, Index rtindex, LockClauseStrength strength, bool noWait, bool pushedDown); +extern void applyLockingClause(Query* qry, Index rtindex, LockClauseStrength strength, bool noWait, bool pushedDown, + int waitSec); #ifdef ENABLE_MOT extern void CheckTablesStorageEngine(Query* qry, StorageEngineType* type); extern bool CheckMotIndexedColumnUpdate(Query* qry); @@ -99,4 +100,5 @@ extern void fixResTargetListWithTableNameRef(Relation rd, RangeVar* rel, List* c extern bool getOperatorPlusFlag(); + #endif /* ANALYZE_H */ diff --git a/src/include/parser/kwlist.h b/src/include/parser/kwlist.h index 97fa193fb..e915b4229 100644 --- a/src/include/parser/kwlist.h +++ b/src/include/parser/kwlist.h @@ -89,6 +89,7 @@ PG_KEYWORD("byteawithoutorderwithequal", BYTEAWITHOUTORDERWITHEQUAL, COL_NAME_KE PG_KEYWORD("cache", CACHE, UNRESERVED_KEYWORD) PG_KEYWORD("call", CALL, UNRESERVED_KEYWORD) PG_KEYWORD("called", CALLED, UNRESERVED_KEYWORD) +PG_KEYWORD("cancelable", CANCELABLE, UNRESERVED_KEYWORD) PG_KEYWORD("cascade", CASCADE, UNRESERVED_KEYWORD) PG_KEYWORD("cascaded", CASCADED, UNRESERVED_KEYWORD) PG_KEYWORD("case", CASE, RESERVED_KEYWORD) @@ -128,7 +129,7 @@ PG_KEYWORD("compress", COMPRESS, UNRESERVED_KEYWORD) PG_KEYWORD("concurrently", CONCURRENTLY, TYPE_FUNC_NAME_KEYWORD) PG_KEYWORD("condition", CONDITION, UNRESERVED_KEYWORD) PG_KEYWORD("configuration", CONFIGURATION, UNRESERVED_KEYWORD) -PG_KEYWORD("connect", CONNECT, RESERVED_KEYWORD) +PG_KEYWORD("connect", CONNECT, UNRESERVED_KEYWORD) PG_KEYWORD("connection", CONNECTION, UNRESERVED_KEYWORD) PG_KEYWORD("constant", CONSTANT, UNRESERVED_KEYWORD) PG_KEYWORD("constraint", CONSTRAINT, RESERVED_KEYWORD) @@ -420,6 +421,7 @@ PG_KEYWORD("overlay", OVERLAY, COL_NAME_KEYWORD) PG_KEYWORD("owned", OWNED, UNRESERVED_KEYWORD) PG_KEYWORD("owner", OWNER, UNRESERVED_KEYWORD) PG_KEYWORD("package", PACKAGE, UNRESERVED_KEYWORD) +PG_KEYWORD("packages", PACKAGES, UNRESERVED_KEYWORD) PG_KEYWORD("parser", PARSER, UNRESERVED_KEYWORD) PG_KEYWORD("partial", PARTIAL, UNRESERVED_KEYWORD) PG_KEYWORD("partition", PARTITION, UNRESERVED_KEYWORD) @@ -448,7 +450,7 @@ PG_KEYWORD("prepare", PREPARE, UNRESERVED_KEYWORD) PG_KEYWORD("prepared", PREPARED, UNRESERVED_KEYWORD) PG_KEYWORD("preserve", PRESERVE, UNRESERVED_KEYWORD) PG_KEYWORD("primary", PRIMARY, RESERVED_KEYWORD) -PG_KEYWORD("prior", PRIOR, RESERVED_KEYWORD) +PG_KEYWORD("prior", PRIOR, UNRESERVED_KEYWORD) PG_KEYWORD("priorer", PRIORER, RESERVED_KEYWORD) PG_KEYWORD("private", PRIVATE, UNRESERVED_KEYWORD) PG_KEYWORD("privilege", PRIVILEGE, UNRESERVED_KEYWORD) @@ -531,7 +533,7 @@ PG_KEYWORD("share", SHARE, UNRESERVED_KEYWORD) PG_KEYWORD("shippable", SHIPPABLE, UNRESERVED_KEYWORD) PG_KEYWORD("show", SHOW, UNRESERVED_KEYWORD) PG_KEYWORD("shutdown", SHUTDOWN, UNRESERVED_KEYWORD) -PG_KEYWORD("siblings", SIBLINGS, RESERVED_KEYWORD) +PG_KEYWORD("siblings", SIBLINGS, UNRESERVED_KEYWORD) PG_KEYWORD("similar", SIMILAR, TYPE_FUNC_NAME_KEYWORD) PG_KEYWORD("simple", SIMPLE, UNRESERVED_KEYWORD) PG_KEYWORD("size", SIZE, UNRESERVED_KEYWORD) @@ -548,7 +550,7 @@ PG_KEYWORD("spill", SPILL, UNRESERVED_KEYWORD) PG_KEYWORD("split", SPLIT, UNRESERVED_KEYWORD) PG_KEYWORD("stable", STABLE, UNRESERVED_KEYWORD) PG_KEYWORD("standalone", STANDALONE_P, UNRESERVED_KEYWORD) -PG_KEYWORD("start", START, RESERVED_KEYWORD) +PG_KEYWORD("start", START, UNRESERVED_KEYWORD) PG_KEYWORD("statement", STATEMENT, UNRESERVED_KEYWORD) PG_KEYWORD("statement_id", STATEMENT_ID, UNRESERVED_KEYWORD) PG_KEYWORD("statistics", STATISTICS, UNRESERVED_KEYWORD) @@ -638,6 +640,7 @@ PG_KEYWORD("verify", VERIFY, RESERVED_KEYWORD) PG_KEYWORD("version", VERSION_P, UNRESERVED_KEYWORD) PG_KEYWORD("view", VIEW, UNRESERVED_KEYWORD) PG_KEYWORD("volatile", VOLATILE, UNRESERVED_KEYWORD) +PG_KEYWORD("wait", WAIT, UNRESERVED_KEYWORD) PG_KEYWORD("weak", WEAK, UNRESERVED_KEYWORD) PG_KEYWORD("when", WHEN, RESERVED_KEYWORD) PG_KEYWORD("where", WHERE, RESERVED_KEYWORD) diff --git a/src/include/parser/parse_coerce.h b/src/include/parser/parse_coerce.h index 0be89eb0c..4c807a150 100644 --- a/src/include/parser/parse_coerce.h +++ b/src/include/parser/parse_coerce.h @@ -6,6 +6,7 @@ * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2021, openGauss Contributors * * src/include/parser/parse_coerce.h * diff --git a/src/include/parser/parse_hint.h b/src/include/parser/parse_hint.h index b17496758..88a3c009b 100644 --- a/src/include/parser/parse_hint.h +++ b/src/include/parser/parse_hint.h @@ -50,6 +50,7 @@ #define HINT_TRUE "True" #define HINT_FALSE "False" #define HINT_PRED_PUSH "Predpush" +#define HINT_PRED_PUSH_SAME_LEVEL "Predpush_Same_Level" #define HINT_REWRITE "Rewrite_rule" #define HINT_GATHER "Gather" #define HINT_NO_EXPAND "No_expand" @@ -96,6 +97,7 @@ typedef enum HintKeyword { HINT_KEYWORD_INDEXONLYSCAN, HINT_KEYWORD_SKEW, HINT_KEYWORD_PREDPUSH, + HINT_KEYWORD_PREDPUSH_SAME_LEVEL, HINT_KEYWORD_REWRITE, HINT_KEYWORD_GATHER, HINT_KEYWORD_NO_EXPAND, @@ -238,6 +240,14 @@ typedef struct PredpushHint { Relids candidates; /* which one will be push down */ } PredpushHint; +typedef struct PredpushSameLevelHint { + Hint base; /* base hint */ + bool negative; + char *dest_name; + int dest_id; + Relids candidates; /* which one will be push down */ +} PredpushSameLevelHint; + /* Enable/disable rewrites with hint */ typedef struct RewriteHint { Hint base; /* base hint */ @@ -300,6 +310,7 @@ extern void HintStateDelete(HintState* hintState); extern bool permit_predpush(PlannerInfo *root); extern bool permit_from_rewrite_hint(PlannerInfo *root, unsigned int params); extern Relids predpush_candidates_same_level(PlannerInfo *root); +extern bool is_predpush_same_level_matched(PredpushSameLevelHint* hint, Relids relids, ParamPathInfo* ppi); extern bool permit_gather(PlannerInfo *root, GatherSource src = HINT_GATHER_GUC); extern GatherSource get_gather_hint_source(PlannerInfo *root); extern bool check_set_hint_in_white_list(const char* name); diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h index decdcb8d9..89aeabfc6 100644 --- a/src/include/parser/parse_node.h +++ b/src/include/parser/parse_node.h @@ -165,6 +165,8 @@ struct ParseState { int p_next_resno; /* next targetlist resno to assign */ List* p_locking_clause; /* raw FOR UPDATE/FOR SHARE info */ Node* p_value_substitute; /* what to replace VALUE with, if any */ + + /* Flags telling about things found in the query: */ bool p_hasAggs; bool p_hasWindowFuncs; bool p_hasSubLinks; @@ -195,6 +197,7 @@ struct ParseState { PreParseColumnRefHook p_pre_columnref_hook; PostParseColumnRefHook p_post_columnref_hook; PreParseColumnRefHook p_bind_variable_columnref_hook; + PreParseColumnRefHook p_bind_describe_hook; ParseParamRefHook p_paramref_hook; CoerceParamHook p_coerce_param_hook; CreateProcOperatorHook p_create_proc_operator_hook; @@ -202,6 +205,8 @@ struct ParseState { void* p_ref_hook_state; /* common passthrough link for above */ void* p_cl_hook_state; /* cl related state - SQLFunctionParseInfoPtr */ List* p_target_list; + void* p_bind_hook_state; + void* p_describeco_hook_state; /* * star flag info @@ -246,6 +251,12 @@ struct ParseState { * in SelectStmt. */ + bool use_level; /* When selecting a column with the same name in an RTE list, whether to consider the + * priority of RTE. + * The priority refers to the index of RTE in the list. The smaller the index value, the + * higher the priority. + */ + PlusJoinRTEInfo* p_plusjoin_rte_info; /* The RTE info while processing "(+)" */ }; diff --git a/src/include/parser/parse_relation.h b/src/include/parser/parse_relation.h index c4fdebc91..22cb4c97b 100644 --- a/src/include/parser/parse_relation.h +++ b/src/include/parser/parse_relation.h @@ -56,6 +56,9 @@ extern int attnameAttNum(Relation rd, const char* attname, bool sysColOK); extern Name attnumAttName(Relation rd, int attid); extern Oid attnumTypeId(Relation rd, int attid); extern Oid attnumCollationId(Relation rd, int attid); +extern Oid getPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState *pstate, Relation rel); +extern Oid getSubPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState *pstate, Relation rel, + Oid *partOid); #ifdef PGXC extern int specialAttNum(const char* attname); diff --git a/src/include/parser/parse_type.h b/src/include/parser/parse_type.h index 827064f72..0762e3dd2 100644 --- a/src/include/parser/parse_type.h +++ b/src/include/parser/parse_type.h @@ -5,8 +5,8 @@ * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * + * Portions Copyright (c) 2021, openGauss Contributors * src/include/parser/parse_type.h * * ------------------------------------------------------------------------- @@ -45,7 +45,8 @@ extern Oid typeTypeCollation(Type typ); extern Datum stringTypeDatum(Type tp, char* string, int32 atttypmod); extern Oid typeidTypeRelid(Oid type_id); -extern bool IsTypeSupportedByCStore(_in_ Oid typeOid, _in_ int32 typeMod); +extern bool IsTypeSupportedByCStore(_in_ Oid typeOid); +extern bool CheckTypeSupportRowToVec(List* targetlist, int errLevel); extern bool IsTypeSupportedByORCRelation(_in_ Oid typeOid); extern bool IsTypeSupportedByTsStore(_in_ int kvtype, _in_ Oid typeOid); extern bool IsTypeSupportedByUStore (_in_ Oid typeOid, _in_ int32 typeMod); diff --git a/src/include/parser/parse_utilcmd.h b/src/include/parser/parse_utilcmd.h index 6212f1004..5036c050d 100644 --- a/src/include/parser/parse_utilcmd.h +++ b/src/include/parser/parse_utilcmd.h @@ -7,8 +7,8 @@ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 2010-2012 Postgres-XC Development Group - * Portions Copyright (c) 2021, openGauss Contributors * + * Portions Copyright (c) 2021, openGauss Contributors * src/include/parser/parse_utilcmd.h * * ------------------------------------------------------------------------- @@ -77,6 +77,8 @@ extern Oid generateClonedIndex(Relation source_idx, Relation source_relation, ch extern void checkPartitionName(List* partitionList, bool isPartition = true); extern void checkSubPartitionName(List* partitionList); extern List* GetPartitionNameList(List* partitionList); +extern char* GetPartitionDefStateName(Node *partitionDefState); +extern NodeTag GetPartitionStateType(char type); extern Oid searchSeqidFromExpr(Node* cooked_default); extern bool is_start_end_def_list(List* def_list); diff --git a/src/include/pg_config.h.in b/src/include/pg_config.h.in index 8f6280ce1..eddb9b8e6 100644 --- a/src/include/pg_config.h.in +++ b/src/include/pg_config.h.in @@ -64,6 +64,9 @@ /* Define to 1 if you want to generate gauss product as multiple nodes. (--enable-multiple-nodes) */ #undef ENABLE_MULTIPLE_NODES +/* Define to 1 if you want to generate gauss product as lite mode. (--enable-lite-mode) */ +#undef ENABLE_LITE_MODE + /* Define to 1 if you want MOT support. (--enable-mot). Supported only in single_node mode, not supported with (--enable-multiple-nodes) */ #undef ENABLE_MOT @@ -735,6 +738,10 @@ #undef LLVM_MAJOR_VERSION #undef LLVM_MINOR_VERSION +/* Flex Version */ +#undef FLEX_MAJOR_VERSION +#undef FLEX_MINOR_VERSION + /* Define to 1 to allow profiling output to be saved separately for each process. */ #undef PROFILE_PID_DIR @@ -917,3 +924,5 @@ code using `volatile' can become incorrect without. Disable with care. */ #undef volatile +/* Define to 1 if opengauss is integration into the openeuler system. (--with-openeuler-os) */ +#undef WITH_OPENEULER_OS \ No newline at end of file diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h index eca07429b..1135b8f38 100644 --- a/src/include/pg_config_manual.h +++ b/src/include/pg_config_manual.h @@ -6,9 +6,9 @@ * for developers. If you edit any of these, be sure to do a *full* * rebuild (and an initdb if noted). * + * Portions Copyright (c) 2021, openGauss Contributors * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * * src/include/pg_config_manual.h * ------------------------------------------------------------------------ @@ -268,6 +268,23 @@ #define MEMORY_CONTEXT_CHECKING #endif +/* + * Define this to track memory allocation info. + * MEMORY_CONTEXT_CHECKING macro is a debug macro uniformly used by memcontext. It only takes effect in the debug + * version. MEMORY_CONTEXT_TRACK takes effect in both debug and release versions. + * In release version, if ENABLE_LITE_MODE macro or __USE_NUMA macro open, MEMORY_CONTEXT_TRACK will be closed. + */ +#ifndef MEMORY_CONTEXT_CHECKING +#ifndef ENABLE_LITE_MODE +#ifndef __USE_NUMA +#define MEMORY_CONTEXT_TRACK +#endif +#endif +#else +#define MEMORY_CONTEXT_TRACK +#endif + + /* * Define this to cause palloc()'d memory to be filled with random data, to * facilitate catching code that depends on the contents of uninitialized @@ -304,6 +321,14 @@ */ #define TRACE_SORT 1 +/* + * Max iterations to traces at the beginning / end of a + * start-with-connect-by recursive query + */ +#define SW_LOG_ROWS_HALF 10 +/* Full Size = First SW_LOG_ROWS_HALF + Last SW_LOG_ROWS_HALF */ +#define SW_LOG_ROWS_FULL (SW_LOG_ROWS_HALF + SW_LOG_ROWS_HALF) + /* * Enable tracing of syncscan operations (see also the trace_syncscan GUC var). * define TRACE_SYNCSCAN diff --git a/src/include/pgaudit.h b/src/include/pgaudit.h index 54b32dc2c..76b7eb967 100644 --- a/src/include/pgaudit.h +++ b/src/include/pgaudit.h @@ -44,8 +44,15 @@ extern HANDLE sysauditPipe[2]; #endif extern ThreadId pgaudit_start(void); +extern void pgaudit_start_all(void); +extern void pgaudit_stop_all(void); extern void allow_immediate_pgaudit_restart(void); +// multi-thread audit +extern void audit_process_cxt_init(void); +extern void audit_process_cxt_exit(); +extern int audit_load_thread_index(void); + #ifdef EXEC_BACKEND extern void PgAuditorMain(); #endif @@ -98,8 +105,13 @@ typedef enum { AUDIT_POLICY_EVENT, MASKING_POLICY_EVENT, SECURITY_EVENT, - AUDIT_DDL_SEQUENCE, - AUDIT_DDL_KEY // ddl_sequence in struct AuditTypeDescs + AUDIT_DDL_SEQUENCE, // ddl_sequence in struct AuditTypeDescs + AUDIT_DDL_KEY, + AUDIT_DDL_PACKAGE, + AUDIT_DDL_MODEL, + AUDIT_DDL_GLOBALCONFIG, + AUDIT_DDL_PUBLICATION_SUBSCRIPTION, + AUDIT_DDL_FOREIGN_DATA_WRAPPER } AuditType; /* keep the same sequence with parameter audit_system_object */ @@ -124,19 +136,55 @@ typedef enum { DDL_DIRECTORY, DDL_SYNONYM, DDL_SEQUENCE, - DDL_KEY + DDL_KEY, + DDL_PACKAGE, + DDL_MODEL, + DDL_PUBLICATION_SUBSCRIPTION, + DDL_GLOBALCONFIG, + DDL_FOREIGN_DATA_WRAPPER } DDLType; +/* + * Brief : the string field number in audit record + * Description : + */ +typedef enum { + AUDIT_USER_ID = 0, + AUDIT_USER_NAME, + AUDIT_DATABASE_NAME, + AUDIT_CLIENT_CONNINFO, + AUDIT_OBJECT_NAME, + AUDIT_DETAIL_INFO, + AUDIT_NODENAME_INFO, + AUDIT_THREADID_INFO, + AUDIT_LOCALPORT_INFO, + AUDIT_REMOTEPORT_INFO +} AuditStringFieldNum; + +struct AuditElasticEvent { + const char* aDataType; + const char* aDataResult; + const char* auditUserId; + const char* auditUserName; + const char* auditDatabaseName; + const char* clientConnInfo; + const char* objectName; + const char* detailInfo; + const char* nodeNameInfo; + const char* threadIdInfo; + const char* localPortInfo; + const char* remotePortInfo; + long long eventTime; +}; + typedef enum { AUDIT_UNKNOWN = 0, AUDIT_OK, AUDIT_FAILED } AuditResult; - typedef enum { AUDIT_FUNC_QUERY = 0, AUDIT_FUNC_DELETE } AuditFuncType; - typedef enum { STD_AUDIT_TYPE = 0, UNIFIED_AUDIT_TYPE } AuditClassType; + extern void audit_report(AuditType type, AuditResult result, const char* object_name, const char* detail_info, AuditClassType ctype = STD_AUDIT_TYPE); - extern Datum pg_query_audit(PG_FUNCTION_ARGS); - extern Datum pg_delete_audit(PG_FUNCTION_ARGS); +extern bool pg_auditor_thread(ThreadId pid); /* define a macro about the return value of security function */ #define check_intval(errno, express, retval, file, line) \ diff --git a/src/include/pgstat.h b/src/include/pgstat.h index ede5ac8ba..bb92b5a66 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -37,6 +37,13 @@ /* Values for track_functions GUC variable --- order is significant! */ typedef enum TrackFunctionsLevel { TRACK_FUNC_OFF, TRACK_FUNC_PL, TRACK_FUNC_ALL } TrackFunctionsLevel; +/* ---------- + * Paths for the statistics files (relative to installation's $PGDATA). + * ---------- + */ +#define PGSTAT_STAT_PERMANENT_FILENAME "global/pgstat.stat" +#define PGSTAT_STAT_PERMANENT_TMPFILE "global/pgstat.tmp" + /* ---------- * The types of backend -> collector messages * ---------- @@ -131,7 +138,7 @@ typedef struct PgStat_TableCounts { #ifdef DEBUG_UHEAP -typedef int64 UHeapStat_Counter; +typedef uint64 UHeapStat_Counter; #define MAX_TYPE_GET_TRANSSLOT_FROM 8 #define MAX_TYPE_XID_IN_PROGRESS 7 @@ -872,7 +879,11 @@ typedef struct PgStat_MsgBadBlock { BadBlockHashEnt m_entry[PGSTAT_NUM_BADBLOCK_ENTRIES]; } PgStat_MsgBadBlock; +#ifndef ENABLE_LITE_MODE const int MAX_SQL_RT_INFO_COUNT = 100000; +#else +const int MAX_SQL_RT_INFO_COUNT = 10; +#endif typedef struct SqlRTInfo { uint64 UniqueSQLId; @@ -1165,6 +1176,11 @@ typedef enum WaitState { STATE_WAIT_DATASYNC, STATE_WAIT_DATASYNC_QUEUE, STATE_WAIT_FLUSH_DATA, + STATE_WAIT_RESERVE_TD, + STATE_WAIT_TD_ROLLBACK, + STATE_WAIT_TRANSACTION_ROLLBACK, + STATE_PRUNE_TABLE, + STATE_PRUNE_INDEX, STATE_STREAM_WAIT_CONNECT_NODES, STATE_STREAM_WAIT_PRODUCER_READY, STATE_STREAM_WAIT_THREAD_SYNC_QUIT, @@ -1172,6 +1188,7 @@ typedef enum WaitState { STATE_WAIT_ACTIVE_STATEMENT, STATE_WAIT_MEMORY, STATE_EXEC_SORT, + STATE_EXEC_SORT_FETCH_TUPLE, STATE_EXEC_SORT_WRITE_FILE, STATE_EXEC_MATERIAL, STATE_EXEC_MATERIAL_WRITE_FILE, @@ -1205,8 +1222,13 @@ typedef enum WaitState { STATE_GTM_SEQUENCE_SET_VAL, STATE_GTM_DROP_SEQUENCE, STATE_GTM_RENAME_SEQUENCE, + STATE_GTM_SET_DISASTER_CLUSTER, + STATE_GTM_GET_DISASTER_CLUSTER, + STATE_GTM_DEL_DISASTER_CLUSTER, STATE_WAIT_SYNC_CONSUMER_NEXT_STEP, STATE_WAIT_SYNC_PRODUCER_NEXT_STEP, + STATE_GTM_SET_CONSISTENCY_POINT, + STATE_WAIT_SYNC_BGWORKERS, STATE_WAIT_NUM // MUST be last, DO NOT use this value. } WaitState; @@ -1283,8 +1305,9 @@ typedef enum WaitEventIO { WAIT_EVENT_UNDO_FILE_PREFETCH, WAIT_EVENT_UNDO_FILE_READ, WAIT_EVENT_UNDO_FILE_WRITE, - WAIT_EVENT_UNDO_FILE_FLUSH, WAIT_EVENT_UNDO_FILE_SYNC, + WAIT_EVENT_UNDO_FILE_UNLINK, + WAIT_EVENT_UNDO_META_SYNC, WAIT_EVENT_WAL_BOOTSTRAP_SYNC, WAIT_EVENT_WAL_BOOTSTRAP_WRITE, WAIT_EVENT_WAL_COPY_READ, @@ -1313,8 +1336,6 @@ typedef enum WaitEventIO { WAIT_EVENT_OBS_READ, WAIT_EVENT_OBS_WRITE, WAIT_EVENT_LOGCTRL_SLEEP, - WAIT_EVENT_COMPRESS_ADDRESS_FILE_FLUSH, - WAIT_EVENT_COMPRESS_ADDRESS_FILE_SYNC, IO_EVENT_NUM = WAIT_EVENT_LOGCTRL_SLEEP - WAIT_EVENT_BUFFILE_READ + 1 // MUST be last, DO NOT use this value. } WaitEventIO; @@ -1577,6 +1598,7 @@ typedef struct PgBackendStatus { volatile uint64 st_block_sessionid; /* block session */ syscalllock statement_cxt_lock; /* mutex for statement context(between session and statement flush thread) */ void* statement_cxt; /* statement context of full sql */ + knl_u_trace_context trace_cxt; /* request trace id */ } PgBackendStatus; typedef struct PgBackendStatusNode { @@ -1728,6 +1750,7 @@ extern void pgstat_report_conninfo(const char* conninfo); extern void pgstat_report_xact_timestamp(TimestampTz tstamp); extern void pgstat_report_waiting_on_resource(WorkloadManagerEnqueueState waiting); extern void pgstat_report_queryid(uint64 queryid); +extern void pgstat_report_unique_sql_id(bool resetUniqueSql); extern void pgstat_report_global_session_id(GlobalSessionId globalSessionId); extern void pgstat_report_jobid(uint64 jobid); extern void pgstat_report_parent_sessionid(uint64 sessionid, uint32 level = 0); @@ -1735,6 +1758,7 @@ extern void pgstat_report_smpid(uint32 smpid); extern void pgstat_report_blocksid(void* waitLockThrd, uint64 blockSessionId); +extern void pgstat_report_trace_id(knl_u_trace_context *trace_cxt, bool is_report_trace_id = false); extern bool pgstat_get_waitlock(uint32 wait_event_info); extern const char* pgstat_get_wait_event(uint32 wait_event_info); extern const char* pgstat_get_backend_current_activity(ThreadId pid, bool checkUser); @@ -2720,6 +2744,7 @@ extern void pgstat_set_io_state(WorkloadManagerIOState iostate); extern void pgstat_set_stmt_tag(WorkloadManagerStmtTag stmttag); extern ThreadId* pgstat_get_user_io_entry(Oid userid, int* num); extern ThreadId* pgstat_get_stmttag_write_entry(int* num); +extern PgBackendStatusNode* pgstat_get_backend_status_by_appname(const char* appName, int* num); extern List* pgstat_get_user_backend_entry(Oid userid); extern void pgstat_reset_current_status(void); extern WaitInfo* read_current_instr_wait_info(void); @@ -2738,6 +2763,7 @@ extern TableDistributionInfo* get_rto_stat(TupleDesc tuple_desc); extern TableDistributionInfo* get_recovery_stat(TupleDesc tuple_desc); extern TableDistributionInfo* streaming_hadr_get_recovery_stat(TupleDesc tuple_desc); extern TableDistributionInfo* get_remote_node_xid_csn(TupleDesc tuple_desc); +extern TableDistributionInfo* get_remote_index_status(TupleDesc tuple_desc, const char *schname, const char *idxname); #define SessionMemoryArraySize (BackendStatusArray_size) @@ -2826,6 +2852,7 @@ extern void XLogStatShmemInit(void); extern Size XLogStatShmemSize(void); extern bool CheckUserExist(Oid userId, bool removeCount); +extern void FreeBackendStatusNodeMemory(PgBackendStatusNode* node); extern PgBackendStatusNode* gs_stat_read_current_status(uint32* maxCalls); extern uint32 gs_stat_read_current_status(Tuplestorestate *tupStore, TupleDesc tupDesc, FuncType insert, bool hasTID = false, ThreadId threadId = 0); @@ -2924,5 +2951,23 @@ typedef struct IoWaitStatGlobalInfo { int io_wait_list_len; } IoWaitStatGlobalInfo; +void pgstat_release_session_memory_entry(); + +#define MAX_PATH 256 + +typedef struct BadBlockKey { + RelFileNode relfilenode; + ForkNumber forknum; + uint32 blocknum; +} BadBlockKey; + +typedef struct BadBlockEntry { + BadBlockKey key; + char path[MAX_PATH]; + TimestampTz check_time; + TimestampTz repair_time; + XLogPhyBlock pblk; +} BadBlockEntry; + #endif /* PGSTAT_H */ diff --git a/src/include/pgxc/barrier.h b/src/include/pgxc/barrier.h index c408476fc..efaf30f5c 100755 --- a/src/include/pgxc/barrier.h +++ b/src/include/pgxc/barrier.h @@ -20,9 +20,11 @@ #include "lib/stringinfo.h" #define CREATE_BARRIER_PREPARE 'P' +#define CREATE_SWITCHOVER_BARRIER_PREPARE 'O' #define CREATE_BARRIER_EXECUTE 'X' #define CREATE_SWITCHOVER_BARRIER_EXECUTE 'S' #define CREATE_BARRIER_END 'E' +#define CREATE_BARRIER_COMMIT 'C' #define CREATE_BARRIER_QUERY_ARCHIVE 'W' #define BARRIER_QUERY_ARCHIVE 'Q' @@ -42,43 +44,54 @@ #define HADR_IN_FAILOVER "hadr_promote" #define HADR_BARRIER_ID_HEAD "hadr" #define CSN_BARRIER_ID_HEAD "csn" +#define ROACH_BARRIER_ID_HEAD "gs_roach" #define HADR_KEY_CN_FILE "hadr_key_cn" #define HADR_DELETE_CN_FILE "hadr_delete_cn" #define HADR_SWITCHOVER_BARRIER_ID "hadr_switchover_000000000_0000000000000" +#define HADR_SWITCHOVER_BARRIER_TAIL "dr_switchover" #define BARRIER_LSN_FILE_LENGTH 17 #define BARRIER_CSN_FILE_LENGTH 39 #define MAX_BARRIER_ID_LENGTH 40 -#define BARRIER_ID_WITHOUT_TIMESTAMP_LEN 25 +#define BARRIER_ID_WITHOUT_TIMESTAMP_LEN 26 #define BARRIER_ID_TIMESTAMP_LEN 13 #define MAX_DEFAULT_LENGTH 255 #define WAIT_ARCHIVE_TIMEOUT 6000 #define MAX_BARRIER_SQL_LENGTH 60 #define BARRIER_LSN_LENGTH 30 #define MAX_BARRIER_PREFIX_LEHGTH 25 + #define XLOG_BARRIER_CREATE 0x00 +#define XLOG_BARRIER_COMMIT 0x10 +#define XLOG_BARRIER_SWITCHOVER 0x20 + #define IS_CSN_BARRIER(id) (strncmp(id, CSN_BARRIER_ID_HEAD, strlen(CSN_BARRIER_ID_HEAD)) == 0) +#define IS_HADR_BARRIER(id) (strncmp(id, HADR_BARRIER_ID_HEAD, strlen(HADR_BARRIER_ID_HEAD)) == 0) +#define IS_ROACH_BARRIER(id) (strncmp(id, ROACH_BARRIER_ID_HEAD, strlen(ROACH_BARRIER_ID_HEAD)) == 0) #define BARRIER_EQ(barrier1, barrier2) (strcmp((char *)barrier1, (char *)barrier2) == 0) #define BARRIER_GT(barrier1, barrier2) (strcmp((char *)barrier1, (char *)barrier2) > 0) #define BARRIER_LT(barrier1, barrier2) (strcmp((char *)barrier1, (char *)barrier2) < 0) +#define BARRIER_LE(barrier1, barrier2) (strcmp((char *)barrier1, (char *)barrier2) <= 0) +#define BARRIER_GE(barrier1, barrier2) (strcmp((char *)barrier1, (char *)barrier2) >= 0) -extern void ProcessCreateBarrierPrepare(const char* id); +extern void ProcessCreateBarrierPrepare(const char* id, bool isSwitchoverBarrier = false); extern void ProcessCreateBarrierEnd(const char* id); extern void ProcessCreateBarrierExecute(const char* id, bool isSwitchoverBarrier = false); +extern void ProcessCreateBarrierCommit(const char* id); -extern void RequestBarrier(const char* id, char* completionTag, bool isSwitchoverBarrier = false); +extern void CleanupBarrierLock(); +extern void RequestBarrier(char* id, char* completionTag, bool isSwitchoverBarrier = false); extern void barrier_redo(XLogReaderState* record); extern void barrier_desc(StringInfo buf, XLogReaderState* record); +extern const char* barrier_type_name(uint8 subtype); extern void DisasterRecoveryRequestBarrier(const char* id, bool isSwitchoverBarrier = false); extern void ProcessBarrierQueryArchive(char* id); -extern void ConnectETCD(); +extern bool is_barrier_pausable(const char* id); -extern void SendETCDLocalNewestBarrierInXlog(); - -extern void UpdateRedoBarrierTargetFromETCD(); #ifndef ENABLE_MULTIPLE_NODES extern void CreateHadrSwitchoverBarrier(); #endif +extern void UpdateXLogMaxCSN(CommitSeqNo xlogCSN); #endif diff --git a/src/include/pgxc/csnminsync.h b/src/include/pgxc/csnminsync.h index 180801875..bddab9c0b 100644 --- a/src/include/pgxc/csnminsync.h +++ b/src/include/pgxc/csnminsync.h @@ -27,5 +27,5 @@ extern void csnminsync_main(void); extern void csnminsync_thread_shutdown(void); - +extern bool csnminsync_is_first_cn_or_ccn(const char *node_name); #endif /* CSNMINSYNC_H */ \ No newline at end of file diff --git a/src/include/pgxc/execRemote.h b/src/include/pgxc/execRemote.h index 570552906..5562f65e0 100644 --- a/src/include/pgxc/execRemote.h +++ b/src/include/pgxc/execRemote.h @@ -47,7 +47,7 @@ #define RESPONSE_PLANID_OK 6 #define RESPONSE_ANALYZE_ROWCNT 7 #define RESPONSE_SEQUENCE_OK 8 - +#define RESPONSE_MAXCSN_RECEIVED 9 #define REMOTE_CHECKMSG_LEN 8 /* it equals to the count of bytes added in AddCheckMessage when is_stream is false */ #define STREAM_CHECKMSG_LEN 20 /* it equals to the count of bytes added in AddCheckMessage when is_stream is true */ diff --git a/src/include/pgxc/locator.h b/src/include/pgxc/locator.h index 56e53328f..22712478e 100644 --- a/src/include/pgxc/locator.h +++ b/src/include/pgxc/locator.h @@ -210,6 +210,7 @@ typedef struct { /* Function for RelationLocInfo building and management */ extern void RelationBuildLocator(Relation rel); +extern void InitBuckets(RelationLocInfo* rel_loc_info, Relation relation); extern RelationLocInfo* GetRelationLocInfo(Oid relid); extern RelationLocInfo* TsGetRelationLocInfo(Oid relid); extern RelationLocInfo* GetRelationLocInfoDN(Oid relid); diff --git a/src/include/pgxc/nodemgr.h b/src/include/pgxc/nodemgr.h index 3a7692060..2faa0244b 100644 --- a/src/include/pgxc/nodemgr.h +++ b/src/include/pgxc/nodemgr.h @@ -44,6 +44,12 @@ typedef struct { NodeDefinition* nodesDefinition; /* all data nodes' defination */ } GlobalNodeDefinition; +typedef struct SkipNodeDefinition { + Oid nodeoid; + NameData nodename; + int slicenum; +} SkipNodeDefinition; + /* Connection statistics info */ typedef struct { Oid primaryNodeId; /* original primary nodeid */ @@ -64,6 +70,8 @@ extern Size NodeTablesShmemSize(void); extern void PgxcNodeListAndCount(void); extern void PgxcNodeGetOids(Oid** coOids, Oid** dnOids, int* num_coords, int* num_dns, bool update_preferred); +extern void PgxcNodeGetOidsForInit(Oid** coOids, Oid** dnOids, + int* num_coords, int* num_dns, int * num_primaries, bool update_preferred); extern void PgxcNodeGetStandbyOids(Oid** coOids, Oid** dnOids, int* numCoords, int* numDns, bool needInitPGXC); extern NodeDefinition* PgxcNodeGetDefinition(Oid node, bool checkStandbyNodes = false); extern void PgxcNodeAlter(AlterNodeStmt* stmt); @@ -98,4 +106,27 @@ extern void dn_info_hash_insert(Oid dn_oid, int row); extern void dn_info_hash_delete(Oid dn_oid); extern void dn_info_hash_destory(); +typedef struct { + char host[NAMEDATALEN]; + int port; +} DisasterAddr; + +typedef struct { + int slice; + int port; + char host[NAMEDATALEN]; + char host1[NAMEDATALEN]; + char node_name[NAMEDATALEN]; +} DisasterNode; + +typedef struct { + List* addrs; + DisasterNode* disaster_info; + int num_cn; + int num_dn; + int num_one_slice; +} DisasterReadContext; + +extern void UpdateConsistencyPoint(); +extern void UpdateCacheAndConsistencyPoint(DisasterReadContext* drContext); #endif /* NODEMGR_H */ diff --git a/src/include/pgxc/pgxc.h b/src/include/pgxc/pgxc.h index cfe548770..33eee922a 100644 --- a/src/include/pgxc/pgxc.h +++ b/src/include/pgxc/pgxc.h @@ -39,8 +39,8 @@ typedef enum { #define IS_PGXC_COORDINATOR (g_instance.role == VCOORDINATOR && !is_streaming_engine()) #define IS_PGXC_DATANODE (g_instance.role == VDATANODE || g_instance.role == VSINGLENODE || is_streaming_engine()) #else -#define IS_PGXC_COORDINATOR (g_instance.role == VCOORDINATOR) -#define IS_PGXC_DATANODE (g_instance.role == VDATANODE || g_instance.role == VSINGLENODE) +#define IS_PGXC_COORDINATOR false +#define IS_PGXC_DATANODE true #endif #define IS_SINGLE_NODE (g_instance.role == VSINGLENODE) #define REMOTE_CONN_TYPE u_sess->attr.attr_common.remoteConnType diff --git a/src/include/pgxc/pgxcnode.h b/src/include/pgxc/pgxcnode.h index ea13666e8..44e7e2620 100644 --- a/src/include/pgxc/pgxcnode.h +++ b/src/include/pgxc/pgxcnode.h @@ -231,6 +231,7 @@ extern void PGXCConnClean(int code, Datum arg); /* Look at information cached in node handles */ extern int PGXCNodeGetNodeId(Oid nodeoid, char node_type); +extern int PGXCNodeDRGetNodeId(Oid nodeoid); extern Oid PGXCNodeGetNodeOid(int nodeid, char node_type); extern int PGXCNodeGetNodeIdFromName(const char* node_name, char node_type); extern char* PGXCNodeGetNodeNameFromId(int nodeid, char node_type); diff --git a/src/include/pgxc/poolmgr.h b/src/include/pgxc/poolmgr.h index 11d5d0492..02d122ee4 100644 --- a/src/include/pgxc/poolmgr.h +++ b/src/include/pgxc/poolmgr.h @@ -202,14 +202,31 @@ typedef struct { int streamid; } PoolConnectionInfo; +/* + * Record global connection status for function 'comm_check_connection_status' + * This struct in g_instance.pooler_cxt is defined to g_GlobalConnStatus! + */ +typedef struct GlobalConnStatus { + /* Record connection status */ + struct ConnectionStatus **connEntries; + + /* Numbers of coordiantors and primary datanodes for connEntries */ + int totalEntriesCount; + + /* Lock of this struct */ + pthread_mutex_t connectionStatusLock; +} GlobalConnStatus; + /* * ConnectionStatus entry for pg_conn_status view */ -typedef struct { +typedef struct ConnectionStatus { + Oid remote_nodeoid; /* remode node oid */ char *remote_name; char *remote_host; int remote_port; - bool is_connected; + bool is_connected; /* connection status flag, record by creating socket connection */ + bool no_error_occur; /* pooler connect status flag, record by creating pooler connection */ int sock; } ConnectionStatus; @@ -406,6 +423,11 @@ extern void reload_user_name_pgoptions(PoolGeneralInfo* info, PoolAgent* agent, extern bool release_slot_to_nodepool(PGXCNodePool* nodePool, bool force_destroy, PGXCNodePoolSlot* slot); extern void free_pool_conn(PoolConnDef* conndef_for_validate); +extern void FillNodeConnectionStatus(ConnectionStatus *connsEntry, int entryCnt); +extern void RecreateGlobalConnEntries(); +extern void ResetPoolerConnectionStatus(); +extern void FlushPoolerConnectionStatus(Oid nodeOid); + /* The root memory context */ extern MemoryContext PoolerMemoryContext; @@ -428,6 +450,9 @@ extern PoolAgent** poolAgents; extern pthread_mutex_t g_poolAgentsLock; extern int MaxAgentCount; +/* GlobalConnStatus -- Record connection status in memory */ +#define g_GlobalConnStatus (g_instance.pooler_cxt.globalConnStatus) + #define PROTO_TCP 1 #define get_agent(handle) ((PoolAgent*)(handle)) #endif diff --git a/src/include/pgxc/remoteCombiner.h b/src/include/pgxc/remoteCombiner.h index 851157f06..436d7ba4f 100644 --- a/src/include/pgxc/remoteCombiner.h +++ b/src/include/pgxc/remoteCombiner.h @@ -117,6 +117,8 @@ typedef struct RemoteQueryState { RemoteDataRowData currentRow; /* next data ro to be wrapped into a tuple */ RowStoreManager row_store; /* buffer where rows are stored when connection should be cleaned for reuse by other RemoteQuery*/ + CommitSeqNo maxCSN; + bool hadrMainStandby; /* * To handle special case - if this RemoteQuery is feeding sorted data to * Sort plan and if the connection fetching data from the Datanode @@ -167,6 +169,7 @@ typedef struct RemoteQueryState { char* previousNodeName; /* previous DataNode that rowcount is different from current DataNode */ char* serializedPlan; /* the serialized plan tree */ ParallelFunctionState* parallel_function_state; + bool has_stream_for_loop; /* has stream node in for loop sql which may cause hang. */ } RemoteQueryState; extern RemoteQueryState* CreateResponseCombiner(int node_count, CombineType combine_type); diff --git a/src/include/port.h b/src/include/port.h index a34463f01..f7c9cc21b 100644 --- a/src/include/port.h +++ b/src/include/port.h @@ -63,6 +63,7 @@ extern void get_man_path(const char* my_exec_path, char* ret_path); extern bool get_home_path(char* ret_path, size_t sz); extern void get_parent_directory(char* path); extern char* pg_strtolower(char* str); +extern char* pg_strtoupper(char* str); extern int mask_single_passwd(char* passwd); /* port/dirmod.c */ diff --git a/src/include/postgres.h b/src/include/postgres.h index ddacb9c0c..b0aebcdf6 100644 --- a/src/include/postgres.h +++ b/src/include/postgres.h @@ -7,10 +7,10 @@ * Client-side code should include postgres_fe.h instead. * * + * Portions Copyright (c) 2021, openGauss Contributors * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1995, Regents of the University of California * Portions Copyright (c) 2010-2012 Postgres-XC Development Group - * Portions Copyright (c) 2021, openGauss Contributors * * src/include/postgres.h * @@ -117,6 +117,10 @@ #define BITS_PER_INT (BITS_PER_BYTE * sizeof(int)) #define STREAM_RESERVE_PROC_TIMES (16) + +/* for CLOB/BLOB more than 1GB, the first chunk threadhold. */ +#define MAX_TOAST_CHUNK_SIZE 1073741771 + /* this struct is used to store connection info got from pool */ typedef struct { /* hostname of the connection */ @@ -155,6 +159,21 @@ typedef struct varatt_external { Oid va_toastrelid; /* RelID of TOAST table containing it */ } varatt_external; +typedef struct varatt_lob_external { + int64 va_rawsize; /* Original data size (includes header) */ + Oid va_valueid; /* Unique ID of value within TOAST table */ + Oid va_toastrelid; /* RelID of TOAST table containing it */ +} varatt_lob_external; + +typedef struct varatt_lob_pointer { + Oid relid; + int2 columid; + int2 bucketid; + uint16 bi_hi; + uint16 bi_lo; + uint16 ip_posid; +} varatt_lob_pointer; + /* * Out-of-line Datum thats stored in memory in contrast to varatt_external * pointers which points to data in an external toast relation. @@ -171,13 +190,19 @@ typedef struct varatt_indirect { * comes from the requirement for on-disk compatibility with the older * definitions of varattrib_1b_e where v_tag was named va_len_1be... */ -typedef enum vartag_external { VARTAG_INDIRECT = 1, VARTAG_BUCKET = 8, VARTAG_ONDISK = 18 } vartag_external; +typedef enum vartag_external { VARTAG_INDIRECT = 1, VARTAG_BUCKET = 8, VARTAG_ONDISK = 18, VARTAG_LOB = 28 } vartag_external; -#define VARTAG_SIZE(tag) \ +#define VARTAG_SIZE(tag) ((tag & 0x80) == 0x00 ? \ ((tag) == VARTAG_INDIRECT ? sizeof(varatt_indirect) : \ - (tag) == VARTAG_ONDISK ? sizeof(varatt_external) : \ - (tag) == VARTAG_BUCKET ? sizeof(varatt_external) + sizeof(int2) : \ - TrapMacro(true, "unknown vartag")) + ((tag) == VARTAG_ONDISK ? sizeof(varatt_external) : \ + ((tag) == VARTAG_BUCKET ? sizeof(varatt_external) + sizeof(int2) : \ + ((tag) == VARTAG_LOB ? sizeof(varatt_lob_pointer) : \ + TrapMacro(true, "unknown vartag"))))) : \ + ((tag & 0x7f) == VARTAG_INDIRECT ? sizeof(varatt_indirect) : \ + ((tag & 0x7f) == VARTAG_ONDISK ? sizeof(varatt_lob_external) : \ + ((tag & 0x7f) == VARTAG_BUCKET ? sizeof(varatt_lob_external) + sizeof(int2) : \ + ((tag & 0x7f) == VARTAG_LOB ? sizeof(varatt_lob_pointer) : \ + TrapMacro(true, "unknown vartag")))))) /* * These structs describe the header of a varlena object that may have been @@ -296,6 +321,8 @@ typedef enum { #define VARATT_IS_4B_C(PTR) ((((varattrib_1b*)(PTR))->va_header & 0xC0) == 0x40) #define VARATT_IS_1B(PTR) ((((varattrib_1b*)(PTR))->va_header & 0x80) == 0x80) #define VARATT_IS_1B_E(PTR) ((((varattrib_1b*)(PTR))->va_header) == 0x80) +#define VARATT_IS_HUGE_TOAST_POINTER(PTR) ((((varattrib_1b*)(PTR))->va_header) == 0x80 && \ + ((((varattrib_1b_e*)(PTR))->va_tag) & 0x01) == 0x01) #define VARATT_NOT_PAD_BYTE(PTR) (*((uint8*)(PTR)) != 0) /* VARSIZE_4B() should only be used on known-aligned data */ @@ -307,6 +334,8 @@ typedef enum { #define SET_VARSIZE_4B_C(PTR, len) (((varattrib_4b*)(PTR))->va_4byte.va_header = ((len)&0x3FFFFFFF) | 0x40000000) #define SET_VARSIZE_1B(PTR, len) (((varattrib_1b*)(PTR))->va_header = (len) | 0x80) #define SET_VARTAG_1B_E(PTR, tag) (((varattrib_1b_e*)(PTR))->va_header = 0x80, ((varattrib_1b_e*)(PTR))->va_tag = (tag)) +#define SET_HUGE_TOAST_POINTER_TAG(PTR, tag) (((varattrib_1b_e*)(PTR))->va_header = 0x80, \ + ((varattrib_1b_e*)(PTR))->va_tag = (tag) | 0x01) #else /* !WORDS_BIGENDIAN */ #define VARATT_IS_4B(PTR) ((((varattrib_1b*)(PTR))->va_header & 0x01) == 0x00) @@ -314,6 +343,8 @@ typedef enum { #define VARATT_IS_4B_C(PTR) ((((varattrib_1b*)(PTR))->va_header & 0x03) == 0x02) #define VARATT_IS_1B(PTR) ((((varattrib_1b*)(PTR))->va_header & 0x01) == 0x01) #define VARATT_IS_1B_E(PTR) ((((varattrib_1b*)(PTR))->va_header) == 0x01) +#define VARATT_IS_HUGE_TOAST_POINTER(PTR) ((((varattrib_1b*)(PTR))->va_header) == 0x01 && \ + ((((varattrib_1b_e*)(PTR))->va_tag) >> 7) == 0x01) #define VARATT_NOT_PAD_BYTE(PTR) (*((uint8*)(PTR)) != 0) /* VARSIZE_4B() should only be used on known-aligned data */ @@ -324,6 +355,8 @@ typedef enum { #define SET_VARSIZE_4B_C(PTR, len) (((varattrib_4b*)(PTR))->va_4byte.va_header = (((uint32)(len)) << 2) | 0x02) #define SET_VARSIZE_1B(PTR, len) (((varattrib_1b*)(PTR))->va_header = (((uint8)(len)) << 1) | 0x01) #define SET_VARTAG_1B_E(PTR, tag) (((varattrib_1b_e*)(PTR))->va_header = 0x01, ((varattrib_1b_e*)(PTR))->va_tag = (tag)) +#define SET_HUGE_TOAST_POINTER_TAG(PTR, tag) (((varattrib_1b_e*)(PTR))->va_header = 0x01, \ + ((varattrib_1b_e*)(PTR))->va_tag = (tag) | 0x80) #endif /* WORDS_BIGENDIAN */ #define VARHDRSZ_SHORT offsetof(varattrib_1b, va_data) @@ -375,6 +408,7 @@ typedef enum { #define VARATT_IS_EXTERNAL_INDIRECT(PTR) (VARATT_IS_EXTERNAL(PTR) && VARTAG_EXTERNAL(PTR) == VARTAG_INDIRECT) #define VARATT_IS_EXTERNAL_BUCKET(PTR) \ (VARATT_IS_EXTERNAL(PTR) && VARTAG_EXTERNAL(PTR) == VARTAG_BUCKET) +#define VARATT_IS_EXTERNAL_LOB(PTR) (VARATT_IS_EXTERNAL(PTR) && VARTAG_EXTERNAL(PTR) == VARTAG_LOB) #define VARATT_IS_EXTERNAL_ONDISK_B(PTR) \ (VARATT_IS_EXTERNAL_ONDISK(PTR) || VARATT_IS_EXTERNAL_BUCKET(PTR)) @@ -389,9 +423,9 @@ typedef enum { #define VARSIZE_ANY(PTR) \ (VARATT_IS_1B_E(PTR) ? VARSIZE_EXTERNAL(PTR) : (VARATT_IS_1B(PTR) ? VARSIZE_1B(PTR) : VARSIZE_4B(PTR))) -#define VARSIZE_ANY_EXHDR(PTR) \ - (VARATT_IS_1B_E(PTR) ? VARSIZE_EXTERNAL(PTR) - VARHDRSZ_EXTERNAL \ - : (VARATT_IS_1B(PTR) ? VARSIZE_1B(PTR) - VARHDRSZ_SHORT : VARSIZE_4B(PTR) - VARHDRSZ)) +#define VARSIZE_ANY_EXHDR(PTR) \ + (VARATT_IS_1B_E(PTR) ? VARSIZE_EXTERNAL(PTR) - VARHDRSZ_EXTERNAL : \ + (VARATT_IS_1B(PTR) ? VARSIZE_1B(PTR) - VARHDRSZ_SHORT : VARSIZE_4B(PTR) - VARHDRSZ)) /* caution: this will not work on an external or compressed-in-line Datum */ /* caution: this will return a possibly unaligned pointer */ @@ -933,17 +967,18 @@ extern void cJSON_internal_free(void* pointer); extern void InitThreadLocalWhenSessionExit(); extern void RemoveTempNamespace(); #ifndef ENABLE_MULTIPLE_NODES -#define CacheIsProcNameArgNsp(cache) ((cache)->id == PROCNAMEARGSNSP || (cache)->id == PROCALLARGS) +#define CacheIsProcNameArgNsp(cc_id) ((cc_id) == PROCNAMEARGSNSP || (cc_id) == PROCALLARGS) #else -#define CacheIsProcNameArgNsp(cache) ((cache)->id == PROCNAMEARGSNSP) +#define CacheIsProcNameArgNsp(cc_id) ((cc_id) == PROCNAMEARGSNSP) #endif -#define CacheIsProcOid(cache) ((cache)->id == PROCOID) +#define CacheIsProcOid(cc_id) ((cc_id) == PROCOID) #define IsBootingPgProc(rel) IsProcRelation(rel) #define BootUsingBuiltinFunc true extern int errdetail_abort(void); void log_disconnections(int code, Datum arg); +void cleanGPCPlanProcExit(int code, Datum arg); void ResetInterruptCxt(); diff --git a/src/include/postmaster/barrier_creator.h b/src/include/postmaster/barrier_creator.h index a8b7be4f3..8606d5588 100755 --- a/src/include/postmaster/barrier_creator.h +++ b/src/include/postmaster/barrier_creator.h @@ -27,18 +27,29 @@ #define BARRIER_CREATOR_H #define BARRIER_FILE "hadr_barrier_id" +#define START_AUTO_CSN_BARRIER (g_instance.attr.attr_storage.auto_csn_barrier) #define BARRIER_ARCH_INFO_LEN 100 +#define CSN_BARRIER_NAME "csn" typedef struct ArchiveBarrierLsnInfo { XLogRecPtr barrierLsn; Oid nodeoid; } ArchiveBarrierLsnInfo; +typedef struct BarrierUpdateLastTimeInfo { + long lastBarrierFileStartTimestamp; + char* archiveSlotName; +} BarrierUpdateLastTimeInfo; + extern void barrier_creator_main(void); extern void barrier_creator_thread_shutdown(void); -extern uint64_t GetObsBarrierIndex(const List *archiveSlotNames); +extern uint64_t GetObsBarrierIndex(const List *archiveSlotNames, long *last_barrier_time); extern void BarrierArchMain(knl_thread_arg* arg); extern bool IsFirstCn(void); +extern void GetCsnBarrierName(char* barrier_ret, bool isSwitchoverBarrier); +extern CommitSeqNo CsnBarrierNameGetCsn(const char* csnBarrier); +extern int64 CsnBarrierNameGetTimeStamp(const char* csnBarrier); +extern bool IsSwitchoverBarrier(const char *csnBarrier); #endif /* BARRIER_CREATOR_H */ diff --git a/src/include/postmaster/barrier_preparse.h b/src/include/postmaster/barrier_preparse.h new file mode 100644 index 000000000..d92b6d68a --- /dev/null +++ b/src/include/postmaster/barrier_preparse.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * barrier_preparse.h + * Synchronize the global min csn between all nodes, head file + * + * + * IDENTIFICATION + * /Code/src/include/pgxc/barrier_preparse.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef BARRIER_PREPARSE_H +#define BARRIER_PREPARSE_H + +#define INIBARRIERCACHESIZE 100 + +#define BarrierCacheInsertBarrierId(BARRIER) do { \ + CommitSeqNo *idhentry = NULL; \ + bool found = false; \ + idhentry = (CommitSeqNo *)hash_search(g_instance.csn_barrier_cxt.barrier_hash_table, (void *)(BARRIER), HASH_ENTER, &found); \ +} while (0) + +#define BarrierCacheDeleteBarrierId(BARRIER) do { \ + CommitSeqNo *idhentry = NULL; \ + idhentry = (CommitSeqNo *)hash_search(g_instance.csn_barrier_cxt.barrier_hash_table, (void *)(BARRIER), HASH_REMOVE, NULL); \ + if (idhentry == NULL) \ + ereport(WARNING, (errmsg("trying to delete a barrierID that does not exist"))); \ +} while (0) + +extern void BarrierPreParseMain(void); +extern void WakeUpBarrierPreParseBackend(void); +extern void SetBarrierPreParseLsn(XLogRecPtr startptr); + +#endif /* BARRIER_PREPARSE_H */ diff --git a/src/include/postmaster/bgwriter.h b/src/include/postmaster/bgwriter.h index b0233c490..af3f8ae1d 100644 --- a/src/include/postmaster/bgwriter.h +++ b/src/include/postmaster/bgwriter.h @@ -42,8 +42,8 @@ extern void CheckpointerMain(void); extern void RequestCheckpoint(int flags); extern void CheckpointWriteDelay(int flags, double progress); -extern bool ForwardSyncRequest(const FileTag *ftag, SyncRequestType type); -extern void AbsorbFsyncRequests(void); +extern bool CkptForwardSyncRequest(const FileTag *ftag, SyncRequestType type); +extern void CkptAbsorbFsyncRequests(void); extern Size CheckpointerShmemSize(void); extern void CheckpointerShmemInit(void); diff --git a/src/include/postmaster/pagerepair.h b/src/include/postmaster/pagerepair.h new file mode 100644 index 000000000..0443111b0 --- /dev/null +++ b/src/include/postmaster/pagerepair.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * pagerepair.h + * Data struct to store pagerepair thread variables. + * + * + * IDENTIFICATION + * src/include/postmaster/pagerepair.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef _PAGEREPAIR_H +#define _PAGEREPAIR_H + +#include "access/xlog.h" +#include "access/xlog_basic.h" +#include "gs_thread.h" +#include "knl/knl_variable.h" +#include "miscadmin.h" +#include "storage/smgr/relfilenode.h" + +typedef uint64 XLogRecPtr; + +const int SEGLEN = 20; +const int MAX_REPAIR_PAGE_NUM = 1000; + +typedef enum { + CRC_CHECK_FAIL = 0, + LSN_CHECK_FAIL +} PageErrorType; + +typedef enum { + WAIT_REMOTE_READ = 0, /* the page need read from primary */ + WAIT_LSN_CHECK, /* the page read finish, wait do lsn check */ + WAIT_REPAIR +} RepairPageState; + +typedef enum { + WAIT_FILE_CHECK_REPAIR = 0, /* Waiting to determine if the file repair is required */ + WAIT_FILE_REMOTE_READ, /* the file need read from primary */ + WAIT_FILE_REPAIR, /* wait the standby replay lsn greater than file lsn from primary */ + WAIT_FILE_REPAIR_SEGMENT, + WAIT_RENAME, + WAIT_FOREGT_INVALID_PAGE, /* only segment file need startup thread handle forget invalid page again */ + WAIT_CLEAN /* primary this file also not exits, wait clean by some option (drop table etc.) */ +} RepairFileState; + +typedef enum { + NEED_CONTINUE_CHECK = 0, /* need check next record */ + CHECK_SUCCESS, + CHECK_FAIL +} BlockLsnState; + +typedef struct RepairBlockKey { + RelFileNode relfilenode; + ForkNumber forknum; + BlockNumber blocknum; +} RepairBlockKey; + +typedef struct RepairBlockEntry { + RepairBlockKey key; + ThreadId recovery_tid; /* recovery thread id, when the page can recovery, need tell the recovery thread */ + PageErrorType error_type; /* crc error or lsn check error */ + RepairPageState page_state; /* page current state */ + XLogRecPtr page_old_lsn; /* lsn check error, current page lsn */ + XLogRecPtr page_new_lsn; /* page lsn of new page */ + XLogPhyBlock pblk; /* physical location for segment-page storage */ + char page_content[BLCKSZ]; /* new page from primary */ +} RepairBlockEntry; + +typedef struct RepairFileKey { + RelFileNode relfilenode; + ForkNumber forknum; + BlockNumber segno; +} RepairFileKey; + +typedef struct RemoteReadFileKey { + RelFileNode relfilenode; + ForkNumber forknum; + BlockNumber blockstart; +} RemoteReadFileKey; + +typedef struct RepairFileEntry { + RepairFileKey key; + XLogRecPtr min_recovery_point; /* min_recovery_point of found the file not exists */ + RepairFileState file_state; /* file current state */ + XLogRecPtr primary_file_lsn; /* LSN of from the primary DN read file finish */ +} RepairFileEntry; + +extern void PageRepairMain(void); +extern void PageRepairHashTblInit(void); +extern void FileRepairHashTblInit(void); +extern void ClearPageRepairTheadMem(void); +extern bool CheckRepairPage(RepairBlockKey key, XLogRecPtr min_lsn, XLogRecPtr max_lsn, char *page); +extern bool BlockNodeMatch(RepairBlockKey key, XLogPhyBlock pblk, RelFileNode node, ForkNumber forknum, + BlockNumber minblkno, bool segment_shrink); +extern bool dbNodeandSpcNodeMatch(RelFileNode *rnode, Oid spcNode, Oid dbNode); +extern void BatchClearPageRepairHashTbl(Oid spcNode, Oid dbNode); +extern void ClearPageRepairHashTbl(const RelFileNode &node, ForkNumber forknum, BlockNumber minblkno, + bool segment_shrink); +extern void ClearSpecificsPageRepairHashTbl(RepairBlockKey key); +extern bool PushBadPageToRemoteHashTbl(RepairBlockKey key, PageErrorType error_type, XLogRecPtr old_lsn, + XLogPhyBlock pblk, ThreadId tid); +extern void BatchClearBadFileHashTbl(Oid spcNode, Oid dbNode); +extern void ClearBadFileHashTbl(const RelFileNode &node, ForkNumber forknum, uint32 segno); +extern void CheckNeedRenameFile(); +extern void CheckIsStopRecovery(void); +extern int CreateRepairFile(char *path); +extern int WriteRepairFile(int fd, char* path, char *buf, uint32 offset, uint32 size); +extern void CheckNeedRecordBadFile(RepairFileKey key, uint32 nblock, uint32 blocknum, + const XLogPhyBlock *pblk); +extern bool CheckFileRepairHashTbl(RelFileNode rnode, ForkNumber forknum, uint32 segno); +extern void df_clear_and_close_all_file(RepairFileKey key, int32 max_sliceno); +extern void df_open_all_file(RepairFileKey key, int32 max_sliceno); + +inline bool IsPrimaryClusterStandbyDN() +{ + load_server_mode(); + /* Standby DN or RecoveryInProgress DN of the primary cluster or a single cluster */ + if (g_instance.attr.attr_common.cluster_run_mode == RUN_MODE_PRIMARY && + g_instance.attr.attr_common.stream_cluster_run_mode == RUN_MODE_PRIMARY && + (!t_thrd.xlog_cxt.is_hadr_main_standby && !t_thrd.xlog_cxt.is_cascade_standby) && + (t_thrd.xlog_cxt.server_mode == STANDBY_MODE || t_thrd.xlog_cxt.server_mode == PENDING_MODE)) { + return true; + } + + return false; +} + +#define CheckVerionSupportRepair() (t_thrd.proc->workingVersionNum >= SUPPORT_DATA_REPAIR) + +#endif /* _PAGEREPAIR_H */ \ No newline at end of file diff --git a/src/include/postmaster/pagewriter.h b/src/include/postmaster/pagewriter.h index 78816347b..a807a9cec 100644 --- a/src/include/postmaster/pagewriter.h +++ b/src/include/postmaster/pagewriter.h @@ -12,11 +12,11 @@ * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. * --------------------------------------------------------------------------------------- - * + * * pagewriter.h * Data struct to store pagewriter thread variables. - * - * + * + * * IDENTIFICATION * src/include/postmaster/pagewriter.h * @@ -113,6 +113,22 @@ typedef struct RecoveryQueueState { LWLock *recovery_queue_lock; } RecoveryQueueState; +typedef struct { + SyncRequestType type; /* request type */ + FileTag ftag; /* file identifier */ +} CheckpointerRequest; + +typedef struct IncreCkptSyncShmemStruct { + ThreadId pagewritermain_pid; /* PID (0 if not started) */ + slock_t sync_lock; /* protects all the fsync_* fields */ + int64 fsync_start; + int64 fsync_done; + LWLock *sync_queue_lwlock; /* */ + int num_requests; /* current # of requests */ + int max_requests; /* allocated array size */ + CheckpointerRequest requests[1]; /* VARIABLE LENGTH ARRAY */ +} IncreCkptSyncShmemStruct; + /* * The slot location is pre-occupied. When the slot buffer is set, the state will set * to valid. when remove dirty page form queue, don't change the state, only when move @@ -151,4 +167,12 @@ extern bool seg_candidate_buf_pop(int *buf_id, int thread_id); extern void candidate_buf_init(void); extern uint32 get_curr_candidate_nums(bool segment); +extern void PgwrAbsorbFsyncRequests(void); +extern Size PageWriterShmemSize(void); +extern void PageWriterSyncShmemInit(void); +extern void RequestPgwrSync(void); +extern void PageWriterSync(void); +extern bool PgwrForwardSyncRequest(const FileTag *ftag, SyncRequestType type); +extern void PageWriterSyncWithAbsorption(void); + #endif /* _PAGEWRITER_H */ diff --git a/src/include/postmaster/postmaster.h b/src/include/postmaster/postmaster.h index fdede6d0c..8fff14d0b 100755 --- a/src/include/postmaster/postmaster.h +++ b/src/include/postmaster/postmaster.h @@ -29,6 +29,8 @@ extern bool FencedUDFMasterMode; #define FastShutdown 2 #define ImmediateShutdown 3 +const int ReaperLogBufSize = 1024; /* reaper function log buffer size */ + extern volatile int Shutdown; extern uint32 noProcLogicTid; @@ -153,7 +155,7 @@ extern int SubPostmasterMain(int argc, char* argv[]); #define MAX_BACKENDS 0x3FFFF extern void KillGraceThreads(void); -#define MAX_IPADDR_LEN 32 +#define MAX_IPADDR_LEN 64 #define MAX_PORT_LEN 6 #define MAX_LISTEN_ENTRY 64 #define MAX_IP_STR_LEN 64 diff --git a/src/include/postmaster/snapcapturer.h b/src/include/postmaster/snapcapturer.h index f6b584fe5..02541ff4a 100644 --- a/src/include/postmaster/snapcapturer.h +++ b/src/include/postmaster/snapcapturer.h @@ -53,6 +53,6 @@ extern ThreadId StartSnapWorkerCapturer(void); extern bool IsTxnSnapWorkerProcess(void); extern NON_EXEC_STATIC void TxnSnapWorkerMain(void); -#define ENABLE_TCAP_VERSION (u_sess->attr.attr_storage.version_retention_age > 0) +#define ENABLE_TCAP_VERSION (u_sess->attr.attr_storage.undo_retention_time > 0) #endif diff --git a/src/include/postmaster/startup.h b/src/include/postmaster/startup.h index 28bc8b40a..be2a0f7a6 100644 --- a/src/include/postmaster/startup.h +++ b/src/include/postmaster/startup.h @@ -12,7 +12,8 @@ #ifndef _STARTUP_H #define _STARTUP_H -typedef enum { NOTIFY_PRIMARY = 0, NOTIFY_STANDBY, NOTIFY_FAILOVER, NOTIFY_SWITCHOVER, NUM_NOTIFYS } NotifyReason; +typedef enum { NOTIFY_PRIMARY = 0, NOTIFY_STANDBY, NOTIFY_CASCADE_STANDBY, NOTIFY_FAILOVER, + NOTIFY_SWITCHOVER, NUM_NOTIFYS } NotifyReason; /* * Save the notify signal reason in the share memory. diff --git a/src/include/replication/archive_walreceiver.h b/src/include/replication/archive_walreceiver.h index 89a214520..03b8407f1 100644 --- a/src/include/replication/archive_walreceiver.h +++ b/src/include/replication/archive_walreceiver.h @@ -48,23 +48,28 @@ extern void archive_disconnect(void); /* sizeof(uint32) + OBS_XLOG_SLICE_BLOCK_SIZE */ #define OBS_XLOG_SLICE_FILE_SIZE (OBS_XLOG_SLICE_BLOCK_SIZE + OBS_XLOG_SLICE_HEADER_SIZE) #define OBS_XLOG_SAVED_FILES_NUM 25600 /* 100G*1024*1024*1024/OBS_XLOG_SLICE_BLOCK_SIZE */ -#define IS_DISASTER_RECOVER_MODE \ +#define IS_OBS_DISASTER_RECOVER_MODE \ (t_thrd.postmaster_cxt.HaShmData->current_mode == STANDBY_MODE && GetArchiveRecoverySlot()) -#define IS_CNDISASTER_RECOVER_MODE \ +#define IS_CN_OBS_DISASTER_RECOVER_MODE \ (IS_PGXC_COORDINATOR && GetArchiveRecoverySlot()) #define OBS_ARCHIVE_STATUS_FILE "obs_archive_start_end_record" - +#define ARCHIVE_GLOBAL_BARRIER_LIST_PATH "global_barrier_records" +#define FILE_TIME_INTERVAL 600000 extern int archive_replication_receive(XLogRecPtr startPtr, char **buffer, int *bufferLength, int timeout_ms, char* inner_buff); extern int ArchiveReplicationAchiver(const ArchiveXlogMessage *xlogInfo); extern void update_archive_start_end_location_file(XLogRecPtr endPtr, TimestampTz endTime); -extern int archive_replication_cleanup(XLogRecPtr recPtr, ArchiveConfig *archive_config = NULL); +extern int archive_replication_cleanup(XLogRecPtr recPtr, ArchiveConfig *archive_config = NULL, bool reverse = false); extern void update_recovery_barrier(); extern void update_stop_barrier(); extern int archive_replication_get_last_xlog(ArchiveXlogMessage *xloginfo, ArchiveConfig* archive_obs); extern bool ArchiveReplicationReadFile(const char* fileName, char* content, int contentLen, const char *slotName = NULL); extern char* get_local_key_cn(void); - +extern void UpdateGlobalBarrierListOnMedia(const char* id, const char* availableCNName); +extern void WriteGlobalBarrierListStartTimeOnMedia(long cur_time); +extern uint64 ReadBarrierTimelineRecordFromObs(const char* archiveSlotName); +extern char* DeleteStopBarrierRecordsOnMedia(long stopBarrierTimestamp, long endBarrierTimestamp = 0); +extern int GetArchiveXLogFileTotalNum(ArchiveConfig *archiverConfig, XLogRecPtr endLsn); #endif diff --git a/src/include/replication/dcf_flowcontrol.h b/src/include/replication/dcf_flowcontrol.h new file mode 100644 index 000000000..5a01a3efe --- /dev/null +++ b/src/include/replication/dcf_flowcontrol.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * dcf_flowcontrol.h + * + * IDENTIFICATION + * src/gausskernel/storage/replication/dcf/dcf_flowcontrol.h + * + * --------------------------------------------------------------------------------------- + */ +#ifndef _DCF_FLOWCONTROL_H +#define _DCF_FLOWCONTROL_H + +#include "c.h" +#include "cjson/cJSON.h" + +#define DCF_MAX_STREAM_INFO_LEN 2048 +#ifndef ENABLE_MULTIPLE_NODES + +/* + * Reply dcf message from standby (message type 'r'). + */ +#define DCF_STANDBY_NAME_SIZE 1024 +#define DCF_MAX_IP_LEN 64 + +typedef struct DCFStandbyReplyMessage { + char id[DCF_STANDBY_NAME_SIZE]; + /* + * The xlog locations that have been received, written, flushed, and applied by + * standby-side. These may be invalid if the standby-side is unable to or + * chooses not to report these. + */ + XLogRecPtr receive; + XLogRecPtr write; + XLogRecPtr flush; + XLogRecPtr apply; + XLogRecPtr applyRead; + + /* local role on walreceiver, they will be sent to walsender */ + ServerMode peer_role; + DbState peer_state; + + /* Sender's system clock at the time of transmission */ + TimestampTz sendTime; + + /* + * If replyRequested is set, the server should reply immediately to this + * message, to avoid a timeout disconnect. + */ + bool replyRequested; +} DCFStandbyReplyMessage; +extern void ResetDCFNodesInfo(void); +extern void DCFProcessStandbyReplyMessage(uint32 srcNodeID, const char* msg, uint32 msgSize); +extern bool ResetDCFNodeInfoWithNodeID(uint32 nodeID); +extern bool SetNodeInfoByNodeID(uint32 nodeID, DCFStandbyReplyMessage reply, int *nodeIndex); +extern bool GetNodeInfos(cJSON **nodeInfos); +extern bool GetDCFNodeInfo(const cJSON *nodeJsons, int nodeID, char *role, int roleLen, char *ip, int ipLen, int *port); +extern bool QueryLeaderNodeInfo(uint32* leaderID, char* leaderIP, uint32 ipLen, uint32 *leaderPort); +#endif +#endif diff --git a/src/include/replication/dcf_replication.h b/src/include/replication/dcf_replication.h index 8723b8cad..8821d8a19 100755 --- a/src/include/replication/dcf_replication.h +++ b/src/include/replication/dcf_replication.h @@ -27,17 +27,14 @@ #ifndef _DCF_REPLICATION_H #define _DCF_REPLICATION_H -#define DCF_MAX_STREAM_INFO_LEN 1024 - #ifndef ENABLE_MULTIPLE_NODES #include "dcf_interface.h" #include "cjson/cJSON.h" +#include "replication/dcf_flowcontrol.h" -#define DCF_MAX_IP_LEN 64 #define DCF_QUERY_IDLE 30000 /* 30ms RTT */ #define DCF_CHECK_CONF_IDLE 3600000; /* 1 hour */ -#define DCF_STANDBY_NAME_SIZE 1024 #define DCF_FOLLOWER_STR "FOLLOWER" #define DCF_UNIT_S 1000 /* 1000ms */ @@ -69,36 +66,6 @@ typedef enum { DCF_RUN_MODE_DISABLE = 2 } DcfRunModePerm; -/* - * Reply dcf message from standby (message type 'r'). - */ -typedef struct DCFStandbyReplyMessage { - char id[DCF_STANDBY_NAME_SIZE]; - /* - * The xlog locations that have been received, written, flushed, and applied by - * standby-side. These may be invalid if the standby-side is unable to or - * chooses not to report these. - */ - XLogRecPtr receive; - XLogRecPtr write; - XLogRecPtr flush; - XLogRecPtr apply; - XLogRecPtr applyRead; - - /* local role on walreceiver, they will be sent to walsender */ - ServerMode peer_role; - DbState peer_state; - - /* Sender's system clock at the time of transmission */ - TimestampTz sendTime; - - /* - * If replyRequested is set, the server should reply immediately to this - * message, to avoid a timeout disconnect. - */ - bool replyRequested; -} DCFStandbyReplyMessage; - extern Size DcfContextShmemSize(void); extern void DcfContextShmemInit(void); extern bool InitPaxosModule(); @@ -120,25 +87,11 @@ extern void StopPaxosModule(void); extern void DcfLogTruncate(void); extern bool QueryLeaderNodeInfo(uint32* leaderID, char* leaderIP = NULL, uint32 ipLen = 0, uint32 *leaderPort = NULL); extern void CheckConfigFile(bool after_build = false); -extern bool ResetDCFNodeInfoWithNodeID(uint32 nodeID); -extern void ResetDCFNodesInfo(void); -extern bool SetNodeInfoByNodeID(uint32 nodeID, DCFStandbyReplyMessage reply, int *nodeIndex); extern bool DCFSendMsg(uint32 streamID, uint32 destNodeID, const char* msg, uint32 msgSize); extern bool DCFSendXLogLocation(void); -extern int DCFLogCtrlCalculateCurrentRTO(const DCFStandbyReplyMessage *reply, DCFLogCtrlData* logCtrl); -extern void DCFLogCtrlCalculateSleepTime(DCFLogCtrlData *logCtrl); -extern void DCFLogCtrlCountSleepLimit(DCFLogCtrlData *logCtrl); -extern void SleepNodeReplication(int nodeIndex); -extern bool IsForceUpdate(TimestampTz preSendTime, TimestampTz curSendTime); extern void SetDcfNeedSyncConfig(void); extern bool DcfArchiveRoachForPitrMaster(XLogRecPtr targetLsn); extern void DcfSendArchiveXlogResponse(ArchiveTaskStatus *archive_task_status); -extern bool GetNodeInfos(cJSON **nodeInfos); -bool GetDCFNodeInfo(const cJSON *nodeJsons, int nodeID, char *role, int roleLen, char *ip, int ipLen, int *port); +extern bool IsDCFReadyOrDisabled(void); #endif - #endif - - - - diff --git a/src/include/replication/decode.h b/src/include/replication/decode.h index 5e10945a0..46df43172 100644 --- a/src/include/replication/decode.h +++ b/src/include/replication/decode.h @@ -17,7 +17,20 @@ #include "access/xlogreader.h" #include "replication/reorderbuffer.h" #include "replication/logical.h" +extern bool FilterByOrigin(LogicalDecodingContext *ctx, RepOriginId origin_id); +extern bool FilterByOrigin(ParallelLogicalDecodingContext *ctx, RepOriginId origin_id); void LogicalDecodingProcessRecord(LogicalDecodingContext* ctx, XLogReaderState* record); +void AreaLogicalDecodingProcessRecord(LogicalDecodingContext* ctx, XLogReaderState* record); +extern Pointer UGetMultiInsertXlrec(XLogReaderState *record, CommitSeqNo* curCSN); + +void DecodeXLogTuple(const char *data, Size len, ReorderBufferTupleBuf *tuple, bool isHeapTuple); +extern Pointer UGetXlrec(XLogReaderState * record); +size_t DecodeUndoMeta(const char* data); +bool FilterRecord(LogicalDecodingContext *ctx, XLogReaderState *r, uint8 flags, RelFileNode* rnode); +void UpdateUndoBody(Size* addLenPtr, uint8 flag); +void UpdateOldTupleCalc(bool isInplaceUpdate, XLogReaderState *r, char **tupleOld, Size *tuplelenOld); +extern void ParallelDecodeWorkerMain(void* point); +extern void LogicalReadWorkerMain(void* point); #endif diff --git a/src/include/replication/libpqwalreceiver.h b/src/include/replication/libpqwalreceiver.h index 4ce01f4ed..ba0ce4c25 100755 --- a/src/include/replication/libpqwalreceiver.h +++ b/src/include/replication/libpqwalreceiver.h @@ -48,7 +48,7 @@ extern void libpqrcv_disconnect(void); extern void HaSetRebuildRepInfoError(HaRebuildReason reason); extern void SetObsRebuildReason(HaRebuildReason reason); extern void libpqrcv_check_conninfo(const char *conninfo); -extern bool libpqrcv_command(const char *cmd, char **err); +extern bool libpqrcv_command(const char *cmd, char **err, int *sqlstate); extern void IdentifyRemoteSystem(bool checkRemote); extern void CreateRemoteReplicationSlot(XLogRecPtr startpoint, const char* slotname, bool isLogical); diff --git a/src/include/replication/logical.h b/src/include/replication/logical.h index e5167c920..5b0d34757 100644 --- a/src/include/replication/logical.h +++ b/src/include/replication/logical.h @@ -19,13 +19,37 @@ #include "access/xlog.h" #include "access/xlogreader.h" #include "replication/output_plugin.h" +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "access/extreme_rto/redo_item.h" +#include "nodes/pg_list.h" +#include "storage/proc.h" + + +#include "access/extreme_rto/posix_semaphore.h" +#include "access/extreme_rto/spsc_blocking_queue.h" +#include "access/parallel_recovery/redo_item.h" + +#include "nodes/replnodes.h" +#include "access/ustore/knl_utuple.h" +#include "replication/logical_queue.h" +#include "replication/parallel_reorderbuffer.h" + +/* The number of extra digits displayed for floating-point values in logical decoding */ +#define LOGICAL_DECODE_EXTRA_FLOAT_DIGITS 3 + +/* Maximum number of max parallel decode threads */ +#define MAX_PARALLEL_DECODE_NUM 20 + +/* Maximum number of max replication slots */ +#define MAX_REPLICATION_SLOT_NUM 100 -struct LogicalDecodingContext; typedef void (*LogicalOutputPluginWriterWrite)( struct LogicalDecodingContext* lr, XLogRecPtr Ptr, TransactionId xid, bool last_write); - typedef LogicalOutputPluginWriterWrite LogicalOutputPluginWriterPrepareWrite; +typedef struct logicalLog logicalLog; typedef struct LogicalDecodingContext { /* memory context this is all allocated in */ @@ -83,8 +107,190 @@ typedef struct LogicalDecodingContext { bool random_mode; } LogicalDecodingContext; -extern void CheckLogicalDecodingRequirements(Oid databaseId); +typedef struct chosenTable { + char *schema; /* NULL means any schema */ + char *table; /* NULL means any table */ +} chosenTable; +/* parallel decode callback signature */ +typedef void(*ParallelDecodeChangeCB)(Relation relation, ParallelReorderBufferChange* change, logicalLog *logChange, + ParallelLogicalDecodingContext* ctx, int slotId); + +typedef struct { + bool include_xids; + bool include_timestamp; + bool skip_empty_xacts; + bool xact_wrote_changes; + bool only_local; + char decode_style; /* 'j' stands for json while 't' stands for text */ + int parallel_decode_num; + int sending_batch; + ParallelDecodeChangeCB decode_change; + List *tableWhiteList; +} ParallelDecodeOption; + +typedef struct { + MemoryContext context; + ParallelDecodeOption pOptions; +} ParallelDecodingData; + +typedef struct { + MemoryContext context; + bool include_xids; + bool include_timestamp; + bool skip_empty_xacts; + bool xact_wrote_changes; + bool only_local; + List *tableWhiteList; +} PluginTestDecodingData; + +typedef struct ParallelLogicalDecodingContext { + /* memory context this is all allocated in */ + MemoryContext context; + + /* infrastructure pieces */ + XLogReaderState* reader; + ReplicationSlot* slot; + ParallelReorderBuffer* reorder; + /* + * Marks the logical decoding context as fast forward decoding one. + * Such a context does not have plugin loaded so most of the the following + * properties are unused. + */ + bool fast_forward; + + ParallelOutputPluginCallbacks callbacks; + OutputPluginOptions options; + + /* + * User specified options + */ + List* output_plugin_options; + + /* + * Output buffer. + */ + StringInfo out; + + /* + * Private data pointer of the output plugin. + */ + void* output_plugin_private; + + /* + * Private data pointer for the data writer. + */ + void* output_writer_private; + + /* + * State for writing output. + */ + bool accept_writes; + bool prepared_write; + XLogRecPtr write_location; + TransactionId write_xid; + + bool random_mode; + bool isParallel; +} ParallelLogicalDecodingContext; + +typedef struct ParallelDecodeWorker { + /* Worker id. */ + uint32 id; + /* Thread id */ + gs_thread_t tid; + int slotId; + /* To-be-replayed log-record-list queue. */ + LogicalQueue* changeQueue; + LogicalQueue* LogicalLogQueue; + logicalLog* freeGetLogicalLogHead; + MemoryContext oldCtx; + char dbUser[NAMEDATALEN]; + char dbName[NAMEDATALEN]; + char slotname[NAMEDATALEN]; + char decodeStyle; +} ParallelDecodeWorker; + +typedef struct ParallelDecodeReaderWorker { + /* Worker id. */ + uint32 id; + /* Thread id */ + ThreadId tid; + int slotId; + char dbUser[NAMEDATALEN]; + char dbName[NAMEDATALEN]; + char slotname[NAMEDATALEN]; + StartReplicationCmd *cmd; + + XLogRecPtr current_lsn; + XLogRecPtr restart_lsn; + XLogRecPtr candidate_oldest_xmin_lsn; + XLogRecPtr candidate_oldest_xmin; + XLogRecPtr flushLSN; + /* To-be-replayed log-record-list queue. */ + LogicalQueue* queue; + MemoryContext oldCtx; + ParallelDecodingData data; + slock_t rwlock; +} ParallelDecodeReaderWorker; + +typedef struct LogicalDispatcher { + MemoryContext oldCtx; + int decodeWorkerId; + ParallelDecodeWorker** decodeWorkers; /* Array of parallel decode workers. */ + ParallelDecodeOption pOptions; + + int totalWorkerCount; /* Number of parallel decode workers started. */ + ParallelDecodeReaderWorker* readWorker; /* Txn reader worker. */ + ParallelReorderBufferChange* freeChangeHead; /* Head of freed-item list. */ + ParallelReorderBufferChange* freeGetChangeHead; + + ReorderBufferTupleBuf* freeTupleHead; /* Head of freed-item list. */ + ReorderBufferTupleBuf* freeGetTupleHead; + + logicalLog* freeLogicalLogHead; /* Head of freed-item list. */ + char slotName[NAMEDATALEN]; + int32 pendingCount; /* Number of records pending. */ + int32 pendingMax; /* The max. pending count per batch. */ + int exitCode; /* Thread exit code. */ + uint64 totalCostTime; + uint64 txnCostTime; /* txn cost time */ + uint64 pprCostTime; + uint64 sentPtr; + uint32 curChangeNum; + uint32 curTupleNum; + uint32 curLogNum; + uint64 num; + uint32* chosedWorkerIds; + uint32 chosedWorkerCount; + uint32 readyWorkerCnt; + int id; + TimestampTz decodeTime; + bool remainPatch; + bool checkpointNeedFullSync; + bool active; + bool firstLoop; + bool abnormal; + XLogRecPtr startpoint; + struct ReplicationSlot* MyReplicationSlot; +} LogicalDispatcher; + +#define QUEUE_RESULT_LEN 256 +typedef struct ParallelStatusData { + char slotName[NAMEDATALEN]; + int parallelDecodeNum; + char readQueueLen[QUEUE_RESULT_LEN]; + char decodeQueueLen[QUEUE_RESULT_LEN]; +} ParallelStatusData; + +extern LogicalDispatcher g_Logicaldispatcher[]; +extern bool firstCreateDispatcher; + +extern void CheckLogicalDecodingRequirements(Oid databaseId); +extern void ParallelReorderBufferQueueChange(ParallelReorderBuffer *rb, logicalLog *change, int slotId); +extern void ParallelReorderBufferForget(ParallelReorderBuffer *rb, int slotId, ParallelReorderBufferTXN *txn); +extern void ParallelReorderBufferCommit(ParallelReorderBuffer *rb, logicalLog *change, int slotId, + ParallelReorderBufferTXN *txn); extern LogicalDecodingContext* CreateInitDecodingContext(const char* plugin, List* output_plugin_options, bool need_full_snapshot, XLogPageReadCB read_page, LogicalOutputPluginWriterPrepareWrite prepare_write, LogicalOutputPluginWriterWrite do_write); @@ -94,6 +300,12 @@ extern LogicalDecodingContext* CreateInitDecodingContextInternal(char* plugin, L extern LogicalDecodingContext* CreateDecodingContext(XLogRecPtr start_lsn, List* output_plugin_options, bool fast_forward, XLogPageReadCB read_page, LogicalOutputPluginWriterPrepareWrite prepare_write, LogicalOutputPluginWriterWrite do_write); +extern LogicalDecodingContext* CreateDecodingContextForArea(XLogRecPtr start_lsn, const char* plugin,List* output_plugin_options, + bool fast_forward, XLogPageReadCB read_page, LogicalOutputPluginWriterPrepareWrite prepare_write, + LogicalOutputPluginWriterWrite do_write); +extern ParallelLogicalDecodingContext *ParallelCreateDecodingContext(XLogRecPtr start_lsn, List *output_plugin_options, + bool fast_forward, XLogPageReadCB read_page, int slotId); + extern void DecodingContextFindStartpoint(LogicalDecodingContext* ctx); extern bool DecodingContextReady(LogicalDecodingContext* ctx); extern void FreeDecodingContext(LogicalDecodingContext* ctx); @@ -104,4 +316,18 @@ extern void LogicalConfirmReceivedLocation(XLogRecPtr lsn); extern bool filter_by_origin_cb_wrapper(LogicalDecodingContext* ctx, RepOriginId origin_id); extern void CloseLogicalAdvanceConnect(); extern void NotifyPrimaryAdvance(XLogRecPtr restart, XLogRecPtr flush); +extern void ParallelDecodeWorkerMain(void* point); +extern void LogicalReadWorkerMain(void* point); +extern void ParseProcessRecord(ParallelLogicalDecodingContext *ctx, XLogReaderState *record, ParallelDecodeReaderWorker + *worker); +extern void XLogSendPararllelLogical(); +extern int StartLogicalLogWorkers(char* dbUser, char* dbName, char* slotname, List *options, int parallelDecodeNum); +extern void CheckBooleanOption(DefElem *elem, bool *booleanOption, bool defaultValue); +extern int ParseParallelDecodeNumOnly(List *options); +extern bool CheckWhiteList(const List *whiteList, const char *schema, const char *table); +extern bool ParseStringToWhiteList(char *tableString, List **tableWhiteList); +extern void ParseWhiteList(List **whiteList, DefElem* elem); +extern void ParseDecodingOptionPlugin(ListCell* option, PluginTestDecodingData* data, OutputPluginOptions* opt); +extern ParallelStatusData *GetParallelDecodeStatus(uint32 *num); +extern void PrintLiteral(StringInfo s, Oid typid, char* outputstr); #endif diff --git a/src/include/replication/logical_parse.h b/src/include/replication/logical_parse.h new file mode 100644 index 000000000..2ca612be1 --- /dev/null +++ b/src/include/replication/logical_parse.h @@ -0,0 +1,68 @@ +/* --------------------------------------------------------------------------------------- + * + * logical_parse.h + * openGauss parallel decoding parse xlog. + * + * Copyright (c) 2012-2014, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/include/replication/logical_parse.h + * + * --------------------------------------------------------------------------------------- + */ +#ifndef LOGICAL_PARSE_H +#define LOGICAL_PARSE_H +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "access/heapam.h" +#include "access/transam.h" +#include "access/xact.h" +#include "access/xlog_internal.h" +#include "access/xlogreader.h" + +#include "storage/standby.h" +#include "utils/lsyscache.h" +#include "utils/builtins.h" + +#include "utils/memutils.h" +#include "utils/relfilenodemap.h" +#include "utils/atomic.h" +#include "cjson/cJSON.h" + +#include "catalog/pg_control.h" + +#include "replication/decode.h" +#include "replication/logical.h" +#include "replication/reorderbuffer.h" +#include "replication/snapbuild.h" +#include "replication/parallel_decode.h" +#include "replication/parallel_reorderbuffer.h" + +extern void ParallelDecodeChange(ParallelReorderBufferChange* change, ParallelLogicalDecodingContext* ctx, + ParallelDecodeWorker *worker); +extern void parallel_decode_change_to_json(Relation relation, ParallelReorderBufferChange* change, + logicalLog *logChange, ParallelLogicalDecodingContext* ctx, int slotId); +extern void parallel_decode_change_to_text(Relation relation, ParallelReorderBufferChange* change, + logicalLog *logChange, ParallelLogicalDecodingContext* ctx, int slotId); +extern void ParseProcessRecord(ParallelLogicalDecodingContext *ctx, XLogReaderState *record, + ParallelDecodeReaderWorker *worker); +extern void ParseHeapOp(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, ParallelDecodeReaderWorker *worker); +extern void ParseHeap2Op(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, + ParallelDecodeReaderWorker *worker); +extern void ParseAbortXlog(ParallelLogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, + TransactionId *sub_xids, int nsubxacts, ParallelDecodeReaderWorker *worker); +extern void ParseInsertXlog(ParallelLogicalDecodingContext *ctx, XLogRecordBuffer *buf, + ParallelDecodeReaderWorker *worker); +extern int GetDecodeParallelism(int slotId); +extern ParallelReorderBufferTXN *ParallelReorderBufferGetOldestTXN(ParallelReorderBuffer *rb); +extern logicalLog* GetLogicalLog(ParallelDecodeWorker *worker, int slotId = -1); +extern void FreeLogicalLog(logicalLog *logChange, int slotId); +extern void PutChangeQueue(int slotId, ParallelReorderBufferChange *change); +extern bool CheckToastTuple(ParallelReorderBufferChange *change, ParallelLogicalDecodingContext *ctx, + Relation relation, bool istoast); +extern void ToastTupleReplace(ParallelReorderBuffer *rb, Relation relation, + ParallelReorderBufferChange *change, Oid partationReltoastrelid, ParallelDecodeReaderWorker *worker, bool isHeap); +extern void ToastTupleAppendChunk(ParallelReorderBuffer *rb, Relation relation, + ParallelReorderBufferChange *change); +#endif diff --git a/src/include/replication/logical_queue.h b/src/include/replication/logical_queue.h new file mode 100644 index 000000000..8472b6221 --- /dev/null +++ b/src/include/replication/logical_queue.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * logical_queue.h + * + * + * + * IDENTIFICATION + * src/include/replication/logical_queue.h + * + * --------------------------------------------------------------------------------------- + */ + +#include "postgres.h" +#include "knl/knl_variable.h" +#define POWER_OF_TWO(x) (((x) & ((x)-1)) == 0) +#define COUNT(head, tail, mask) ((uint32)(((head) - (tail)) & (mask))) +#define SPACE(head, tail, mask) ((uint32)(((tail) - ((head) + 1)) & (mask))) + +const int QUEUE_CAPACITY_MIN_LIMIT = 2; + +typedef void (*CallBackFunc)(); +typedef struct LogicalQueue { + pg_atomic_uint32 writeHead; /* Array index for the next write. */ + pg_atomic_uint32 readTail; /* Array index for the next read. */ + uint32 capacity; /* Queue capacity, must be power of 2. */ + uint32 mask; /* Bit mask for computing index. */ + pg_atomic_uint32 maxUsage; + pg_atomic_uint64 totalCnt; + CallBackFunc callBackFunc; + void* buffer[1]; /* Queue buffer, the actual size is capacity. */ +}LogicalQueue; + +LogicalQueue *LogicalQueueCreate(uint32 capacity, uint32 slotId, CallBackFunc func = NULL); + +bool LogicalQueuePut(LogicalQueue* queue, void* element); +void* LogicalQueueTop(LogicalQueue* queue); +void LogicalQueuePop(LogicalQueue* queue); + diff --git a/src/include/replication/logicalfuncs.h b/src/include/replication/logicalfuncs.h index 67317f879..edf7dc909 100644 --- a/src/include/replication/logicalfuncs.h +++ b/src/include/replication/logicalfuncs.h @@ -15,9 +15,11 @@ #define LOGICALFUNCS_H #include "replication/logical.h" +#include "libgen.h" + extern int logical_read_local_xlog_page(XLogReaderState* state, XLogRecPtr targetPagePtr, int reqLen, - XLogRecPtr targetRecPtr, char* cur_page, TimeLineID* pageTLI); + XLogRecPtr targetRecPtr, char* cur_page, TimeLineID* pageTLI, char* xlog_path = NULL); extern bool AssignLsn(XLogRecPtr* lsn_ptr, const char* input); extern Datum pg_logical_slot_get_changes(PG_FUNCTION_ARGS); @@ -28,5 +30,8 @@ extern Datum pg_logical_peek_changes(PG_FUNCTION_ARGS); extern Datum gs_write_term_log(PG_FUNCTION_ARGS); extern void write_term_log(uint32 term); extern void check_permissions(bool for_backup = false); +extern void CheckLogicalPremissions(Oid userId); +extern Datum pg_logical_get_area_changes(PG_FUNCTION_ARGS); + #endif diff --git a/src/include/replication/logicalrelation.h b/src/include/replication/logicalrelation.h index f4f49f8de..16bc9f6b0 100644 --- a/src/include/replication/logicalrelation.h +++ b/src/include/replication/logicalrelation.h @@ -16,32 +16,29 @@ typedef struct LogicalRepRelMapEntry { - LogicalRepRelation remoterel; /* key is remoterel.remoteid */ + LogicalRepRelation remoterel; /* key is remoterel.remoteid */ /* - * Validity flag -- when false, revalidate all derived info at next - * logicalrep_rel_open. (While the localrel is open, we assume our lock - * on that rel ensures the info remains good.) - */ - bool localrelvalid; + * Validity flag -- when false, revalidate all derived info at next + * logicalrep_rel_open. (While the localrel is open, we assume our lock + * on that rel ensures the info remains good.) + */ + bool localrelvalid; - /* Mapping to local relation, filled as needed. */ - Oid localreloid; /* local relation id */ - Relation localrel; /* relcache entry (NULL when closed) */ - AttrNumber *attrmap; /* map of local attributes to remote ones */ - bool updatable; /* Can apply updates/detetes? */ + /* Mapping to local relation, filled as needed. */ + Oid localreloid; /* local relation id */ + Relation localrel; /* relcache entry (NULL when closed) */ + AttrNumber *attrmap; /* map of local attributes to remote ones */ + bool updatable; /* Can apply updates/detetes? */ - /* Sync state. */ - char state; - XLogRecPtr statelsn; + /* Sync state. */ + char state; + XLogRecPtr statelsn; } LogicalRepRelMapEntry; extern void logicalrep_relmap_update(LogicalRepRelation *remoterel); - -extern LogicalRepRelMapEntry *logicalrep_rel_open(LogicalRepRelId remoteid, - LOCKMODE lockmode); -extern void logicalrep_rel_close(LogicalRepRelMapEntry *rel, - LOCKMODE lockmode); +extern LogicalRepRelMapEntry *logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode); +extern void logicalrep_rel_close(LogicalRepRelMapEntry *rel, LOCKMODE lockmode); #endif /* LOGICALRELATION_H */ diff --git a/src/include/replication/obswalreceiver.h b/src/include/replication/obswalreceiver.h deleted file mode 100755 index 98aa95f47..000000000 --- a/src/include/replication/obswalreceiver.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2020 Huawei Technologies Co.,Ltd. - * - * Description: openGauss is licensed under Mulan PSL v2. - * You can use this software according to the terms and conditions of the Mulan PSL v2. - * You may obtain a copy of Mulan PSL v2 at: - * - * http://license.coscl.org.cn/MulanPSL2 - * - * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, - * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, - * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. - * See the Mulan PSL v2 for more details. - * --------------------------------------------------------------------------------------- - * - * obswalreceiver.h - * obswalreceiver init for WalreceiverMain. - * - * - * IDENTIFICATION - * src/include/replication/obswalreceiver.h - * - * --------------------------------------------------------------------------------------- - */ -#ifndef OBSWALRECEIVER_H -#define OBSWALRECEIVER_H - -#include "postgres.h" -#include "access/xlogdefs.h" -#include "replication/walprotocol.h" -#include "replication/slot.h" - - -extern int32 pg_atoi(char* s, int size, int c); -extern int32 pg_strtoint32(const char* s); -/* Prototypes for interface functions */ - -extern bool obs_connect(char* conninfo, XLogRecPtr* startpoint, char* slotname, int channel_identifier); -extern bool obs_receive(int timeout, unsigned char* type, char** buffer, int* len); -extern void obs_send(const char *buffer, int nbytes); -extern void obs_disconnect(void); - -#define OBS_XLOG_FILENAME_LENGTH 1024 -#define OBS_XLOG_SLICE_NUM_MAX 0x3 -#define OBS_XLOG_SLICE_BLOCK_SIZE ((uint32)(4 * 1024 * 1024)) -#define OBS_XLOG_SLICE_HEADER_SIZE (sizeof(uint32)) -/* sizeof(uint32) + OBS_XLOG_SLICE_BLOCK_SIZE */ -#define OBS_XLOG_SLICE_FILE_SIZE (OBS_XLOG_SLICE_BLOCK_SIZE + OBS_XLOG_SLICE_HEADER_SIZE) -#define OBS_XLOG_SAVED_FILES_NUM 25600 /* 100G*1024*1024*1024/OBS_XLOG_SLICE_BLOCK_SIZE */ -#define IS_DISASTER_RECOVER_MODE \ - (t_thrd.postmaster_cxt.HaShmData->current_mode == STANDBY_MODE && getObsRecoverySlot()) -#define IS_CNDISASTER_RECOVER_MODE \ - (IS_PGXC_COORDINATOR && getObsRecoverySlot()) - - -extern int obs_replication_receive(XLogRecPtr startPtr, char **buffer, - int *bufferLength, int timeout_ms, char* inner_buff); -extern int obs_replication_archive(const ArchiveXlogMessage *xlogInfo); -extern void obs_update_archive_start_end_location_file(XLogRecPtr endPtr, TimestampTz endTime); -extern int obs_replication_cleanup(XLogRecPtr recPtr, ObsArchiveConfig *obs_config = NULL); -extern void update_recovery_barrier(); -extern void update_stop_barrier(); -extern int obs_replication_get_last_xlog(ArchiveXlogMessage *xloginfo, ObsArchiveConfig* archive_obs); -extern bool obs_replication_read_file(const char* fileName, char* content, int contentLen, const char *slotName = NULL); -extern char* get_local_key_cn(void); - -#endif diff --git a/src/include/replication/origin.h b/src/include/replication/origin.h index fa1d70410..0343c7679 100644 --- a/src/include/replication/origin.h +++ b/src/include/replication/origin.h @@ -90,6 +90,7 @@ extern void StartupReplicationOrigin(void); /* WAL logging */ void replorigin_redo(XLogReaderState *record); void replorigin_desc(StringInfo buf, XLogReaderState *record); +const char* replorigin_type_name(uint8 subtype); /* shared memory allocation */ extern Size ReplicationOriginShmemSize(void); diff --git a/src/include/replication/output_plugin.h b/src/include/replication/output_plugin.h index d369b3bb6..c3b4535ae 100644 --- a/src/include/replication/output_plugin.h +++ b/src/include/replication/output_plugin.h @@ -15,6 +15,7 @@ #define OUTPUT_PLUGIN_H #include "replication/reorderbuffer.h" +#include "replication/parallel_reorderbuffer.h" struct LogicalDecodingContext; struct OutputPluginCallbacks; @@ -36,6 +37,7 @@ typedef struct OutputPluginOptions { * when loading an output plugin shared library. */ typedef void (*LogicalOutputPluginInit)(struct OutputPluginCallbacks* cb); +typedef void (*ParallelLogicalOutputPluginInit)(struct ParallelOutputPluginCallbacks* cb); /* * Callback that gets called in a user-defined plugin. ctx->private_data can @@ -58,11 +60,24 @@ typedef void (*LogicalDecodeBeginCB)(struct LogicalDecodingContext* ctx, Reorder typedef void (*LogicalDecodeChangeCB)( struct LogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation relation, ReorderBufferChange* change); +typedef void (*ParallelLogicalDecodeChangeCB)( + struct ParallelLogicalDecodingContext* ctx, ReorderBufferTXN* txn, Relation relation, ParallelReorderBufferChange* change); + /* * Called for every (explicit or implicit) COMMIT of a successful transaction. */ typedef void (*LogicalDecodeCommitCB)(struct LogicalDecodingContext* ctx, ReorderBufferTXN* txn, XLogRecPtr commit_lsn); +/* + * Called for every (explicit or implicit) COMMIT of a successful transaction. + */ +typedef void (*LogicalDecodeAbortCB)(struct LogicalDecodingContext* ctx, ReorderBufferTXN* txn); + +/* + * Called for every (explicit or implicit) COMMIT of a successful transaction. + */ +typedef void (*LogicalDecodePrepareCB)(struct LogicalDecodingContext* ctx, ReorderBufferTXN* txn); + /* * Called to shutdown an output plugin. */ @@ -81,10 +96,21 @@ typedef struct OutputPluginCallbacks { LogicalDecodeBeginCB begin_cb; LogicalDecodeChangeCB change_cb; LogicalDecodeCommitCB commit_cb; + LogicalDecodeAbortCB abort_cb; + LogicalDecodePrepareCB prepare_cb; LogicalDecodeShutdownCB shutdown_cb; LogicalDecodeFilterByOriginCB filter_by_origin_cb; } OutputPluginCallbacks; +typedef struct ParallelOutputPluginCallbacks { + LogicalDecodeStartupCB startup_cb; + LogicalDecodeBeginCB begin_cb; + ParallelLogicalDecodeChangeCB change_cb; + LogicalDecodeCommitCB commit_cb; + LogicalDecodeShutdownCB shutdown_cb; + LogicalDecodeFilterByOriginCB filter_by_origin_cb; +} ParallelOutputPluginCallbacks; + extern void OutputPluginPrepareWrite(struct LogicalDecodingContext* ctx, bool last_write); extern void OutputPluginWrite(struct LogicalDecodingContext* ctx, bool last_write); diff --git a/src/include/replication/parallel_decode.h b/src/include/replication/parallel_decode.h new file mode 100644 index 000000000..34a92de55 --- /dev/null +++ b/src/include/replication/parallel_decode.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * IDENTIFICATION + * src/include/replication/parallel_decode.h + * + * --------------------------------------------------------------------------------------- + */ +#ifndef PARALLELDECODE_H +#define PARALLELDECODE_H + + +#include "postgres.h" +#include "knl/knl_variable.h" + +#include "access/heapam.h" +#include "access/transam.h" +#include "access/xact.h" +#include "access/xlog_internal.h" +#include "access/xlogreader.h" + +#include "storage/standby.h" + +#include "utils/memutils.h" +#include "utils/relfilenodemap.h" + +#include "replication/decode.h" +#include "replication/logical.h" +#include "replication/reorderbuffer.h" +#include "replication/snapbuild.h" +#include "replication/parallel_reorderbuffer.h" + +#include "catalog/pg_control.h" + +typedef struct XLogRecordBuffer { + XLogRecPtr origptr; + XLogRecPtr endptr; + XLogReaderState *record; + char *record_data; +} XLogRecordBuffer; + + +typedef enum { + NOT_DECODE_THREAD, + DECODE_THREAD_EXIT_NORMAL, + DECODE_THREAD_EXIT_ABNORMAL, +} DECODEExitStatus; + + +extern void ParallelDecodeChange(ParallelReorderBufferChange* change, ParallelLogicalDecodingContext* ctx, + ParallelDecodeWorker *worker); +extern void parallel_decode_change_to_json(Relation relation, ParallelReorderBufferChange* change, + logicalLog *logChange, ParallelLogicalDecodingContext* ctx, int slotId); +extern void parallel_decode_change_to_text(Relation relation, ParallelReorderBufferChange* change, + logicalLog *logChange, ParallelLogicalDecodingContext* ctx, int slotId); +extern void parallel_decode_change_to_bin(Relation relation, ParallelReorderBufferChange* change, + logicalLog *logChange, ParallelLogicalDecodingContext* ctx, int slotId); +extern int GetDecodeParallelism(int slotId); +extern ParallelReorderBufferTXN *ParallelReorderBufferGetOldestTXN(ParallelReorderBuffer *rb); +extern logicalLog* GetLogicalLog(ParallelDecodeWorker *worker); +extern void FreeLogicalLog(logicalLog *logChange, int slotId); +Snapshot GetLocalSnapshot(MemoryContext ctx); + +#endif diff --git a/src/include/replication/parallel_decode_worker.h b/src/include/replication/parallel_decode_worker.h new file mode 100644 index 000000000..5701e66e1 --- /dev/null +++ b/src/include/replication/parallel_decode_worker.h @@ -0,0 +1,60 @@ +/* --------------------------------------------------------------------------------------- + * + * parallel_decode_worker.h + * openGauss parallel decoding create worker threads. + * + * Copyright (c) 2012-2014, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/include/replication/parallel_decode_worker.h + * + * --------------------------------------------------------------------------------------- + */ +#ifndef PARALLEL_DECODEWORKER_H +#define PARALLEL_DECODEWORKER_H + +#include "postgres.h" +#include "knl/knl_variable.h" + +#include +#include + +#include "miscadmin.h" + +#include "access/rewriteheap.h" +#include "access/transam.h" +#include "access/tuptoaster.h" +#include "access/xact.h" + +#include "replication/logical.h" +#include "replication/reorderbuffer.h" +#include "replication/slot.h" +#include "replication/snapbuild.h" /* just for SnapBuildSnapDecRefcount */ +#include "access/xlog_internal.h" + +#include "storage/smgr/fd.h" +#include "storage/sinval.h" + +#include "catalog/catalog.h" +#include "catalog/pg_namespace.h" +#include "lib/binaryheap.h" + +#include "utils/lsyscache.h" +#include "utils/builtins.h" +#include "utils/combocid.h" +#include "utils/memutils.h" +#include "utils/relcache.h" +#include "utils/relfilenodemap.h" +#include "knl/knl_thread.h" +#include "utils/postinit.h" +#include "utils/ps_status.h" +#include "storage/ipc.h" + + +extern ParallelDecodeWorker *CreateLogicalDecodeWorker(uint32 id, char* dbUser, char* dbName, char* slotname, uint32 slotId); +extern void SendSignalToDecodeWorker(int signal, int slotId); +extern void parallel_tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls); +extern void LogicalReadRecordMain(ParallelDecodeReaderWorker *worker); +extern int logical_read_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, + char *cur_page, TimeLineID *pageTLI, char* xlog_path); +#endif diff --git a/src/include/replication/parallel_reorderbuffer.h b/src/include/replication/parallel_reorderbuffer.h new file mode 100644 index 000000000..3a69b3b20 --- /dev/null +++ b/src/include/replication/parallel_reorderbuffer.h @@ -0,0 +1,377 @@ +/* --------------------------------------------------------------------------------------- + * + * parallel_reorderbuffer.h + * openGauss parallel decoding reorder buffer management. + * + * Copyright (c) 2012-2014, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/include/replication/parallel_reorderbuffer.h + * + * --------------------------------------------------------------------------------------- + */ +#ifndef PARALLEL_REORDERBUFFER_H +#define PARALLEL_REORDERBUFFER_H + +#include "lib/ilist.h" + +#include "storage/sinval.h" +#include "utils/hsearch.h" +#include "utils/rel.h" +#include "utils/snapshot.h" +#include "utils/timestamp.h" + +typedef struct ParallelReorderBuffer ParallelReorderBuffer; +typedef struct ParallelReorderBufferTXN ParallelReorderBufferTXN; +typedef struct ParallelReorderBufferChange ParallelReorderBufferChange; +typedef struct ParallelDecodeReaderWorker ParallelDecodeReaderWorker; + +#ifndef LOGICAL_LOG +#define LOGICAL_LOG +enum LogicalLogType { + LOGICAL_LOG_DML, + LOGICAL_LOG_COMMIT, + LOGICAL_LOG_ABORT, + LOGICAL_LOG_EMPTY, + LOGICAL_LOG_RUNNING_XACTS, + LOGICAL_LOG_CONFIRM_FLUSH, + LOGICAL_LOG_NEW_CID +}; + +typedef struct logicalLog { + StringInfo out; + XLogRecPtr lsn; + XLogRecPtr finalLsn; + XLogRecPtr endLsn; + TransactionId xid; + TransactionId oldestXmin; + dlist_node node; + LogicalLogType type; + CommitSeqNo csn; + int nsubxacts; + TransactionId *subXids; + TimestampTz commitTime; + logicalLog *freeNext; +} logicalLog; +#endif + +ParallelReorderBufferTXN *ParallelReorderBufferTXNByXid(ParallelReorderBuffer *rb, TransactionId xid, bool create, bool *is_new, + XLogRecPtr lsn, bool create_as_top); +typedef void (*ParallelReorderBufferApplyChangeCB)( + ParallelReorderBuffer* rb, ReorderBufferTXN* txn, Relation relation, ParallelReorderBufferChange* change); + +enum ParallelReorderBufferChangeType { + PARALLEL_REORDER_BUFFER_CHANGE_INSERT, + PARALLEL_REORDER_BUFFER_CHANGE_UPDATE, + PARALLEL_REORDER_BUFFER_CHANGE_DELETE, + PARALLEL_REORDER_BUFFER_CHANGE_RUNNING_XACT, + PARALLEL_REORDER_BUFFER_CHANGE_COMMIT, + PARALLEL_REORDER_BUFFER_CHANGE_ABORT, + PARALLEL_REORDER_BUFFER_CHANGE_UINSERT, + PARALLEL_REORDER_BUFFER_CHANGE_UUPDATE, + PARALLEL_REORDER_BUFFER_CHANGE_UDELETE, + PARALLEL_REORDER_BUFFER_INVALIDATIONS_MESSAGE, + PARALLEL_REORDER_BUFFER_CHANGE_CONFIRM_FLUSH, + PARALLEL_REORDER_BUFFER_NEW_CID +}; + + +typedef struct ParallelReorderBufferTXNByIdEnt { + TransactionId xid; + ParallelReorderBufferTXN *txn; +} ParallelReorderBufferTXNByIdEnt; + +typedef struct ParallelReorderBufferIterTXNEntry { + XLogRecPtr lsn; + logicalLog *change; + ParallelReorderBufferTXN *txn; + int fd; + XLogSegNo segno; +} ParallelReorderBufferIterTXNEntry; + +typedef struct ParallelReorderBufferIterTXNState { + binaryheap *heap; + Size nr_txns; + dlist_head old_change; + ParallelReorderBufferIterTXNEntry entries[FLEXIBLE_ARRAY_MEMBER]; +} ParallelReorderBufferIterTXNState; +/* + * a single 'change', can be an insert (with one tuple), an update (old, new), + * or a delete (old). + * + * The same struct is also used internally for other purposes but that should + * never be visible outside reorderbuffer.c. + */ +typedef struct ParallelReorderBufferChange { + XLogRecPtr lsn; + XLogRecPtr finalLsn; + XLogRecPtr endLsn; + TransactionId xid; + TransactionId oldestXmin; + CommitSeqNo csn; + int ninvalidations; + SharedInvalidationMessage *invalidations; + enum ParallelReorderBufferChangeType action; + HTAB* toast_hash; + + /* + * Context data for the change, which part of the union is valid depends + * on action/action_internal. + */ + union { + /* Old, new tuples when action == *_INSERT|UPDATE|DELETE */ + struct { + /* relation that has been changed */ + RelFileNode relnode; + /* no previously reassembled toast chunks are necessary anymore */ + bool clear_toast_afterwards; + + /* valid for DELETE || UPDATE */ + ReorderBufferTupleBuf* oldtuple; + /* valid for INSERT || UPDATE */ + ReorderBufferTupleBuf* newtuple; + CommitSeqNo snapshotcsn; + } tp; + struct { + char* ddl_opt; + char* ddl_type; + char* ddl_name; + } ddl_msg; + } data; + ParallelReorderBufferChange * freeNext; + int nsubxacts; + TransactionId * subXids; + dlist_node node; + TimestampTz commitTime; +} ParallelReorderBufferChange; + +typedef struct ParallelReorderBufferTXN { + /* + * The transactions transaction id, can be a toplevel or sub xid. + */ + TransactionId xid; + TransactionId oldestXid; + + /* did the TX have catalog changes */ + bool has_catalog_changes; + + /* Do we know this is a subxact? Xid of top-level txn if so */ + bool is_known_as_subxact; + + TransactionId toplevel_xid; + + /* + * LSN of the first data carrying, WAL record with knowledge about this + * xid. This is allowed to *not* be first record adorned with this xid, if + * the previous records aren't relevant for logical decoding. + */ + XLogRecPtr first_lsn; + + /* ---- + * LSN of the record that lead to this xact to be committed or + * aborted. This can be a + * * plain commit record + * * plain commit record, of a parent transaction + * * prepared transaction commit + * * plain abort record + * * prepared transaction abort + * * error during decoding + * ---- + */ + XLogRecPtr final_lsn; + + /* + * LSN pointing to the end of the commit record + 1. + */ + XLogRecPtr end_lsn; + + /* + * LSN of the last lsn at which snapshot information reside, so we can + * restart decoding from there and fully recover this transaction from + * WAL. + */ + XLogRecPtr restart_decoding_lsn; + + /* The csn of the transaction */ + CommitSeqNo csn; + + /* origin of the change that caused this transaction */ + RepOriginId origin_id; + + /* + * Commit time, only known when we read the actual commit record. + */ + TimestampTz commit_time; + + /* + * Base snapshot or NULL. + * The base snapshot is used to decode all changes until either this + * transaction modifies the catalog, or another catalog-modifying + * transaction commits. + */ + Snapshot base_snapshot; + XLogRecPtr base_snapshot_lsn; + dlist_node base_snapshot_node; /* link in txns_by_base_snapshot_lsn */ + + /* + * Has this transaction been spilled to disk? It's not always possible to + * deduce that fact by comparing nentries with nentries_mem, because + * e.g. subtransactions of a large transaction might get serialized + * together with the parent - if they're restored to memory they'd have + * nentries_mem == nentries. + */ + bool serialized; + + /* + * How many ReorderBufferChange's do we have in this txn. + * + * Changes in subtransactions are *not* included but tracked separately. + */ + uint64 nentries; + + /* + * How many of the above entries are stored in memory in contrast to being + * spilled to disk. + */ + uint64 nentries_mem; + + /* + * List of ReorderBufferChange structs, including new Snapshots and new + * CommandIds + */ + dlist_head changes; + + /* + * List of (relation, ctid) => (cmin, cmax) mappings for catalog tuples. + * Those are always assigned to the toplevel transaction. (Keep track of + * #entries to create a hash of the right size) + */ + dlist_head tuplecids; + uint64 ntuplecids; + + /* + * Hash containing (potentially partial) toast entries. NULL if no toast + * tuples have been found for the current change. + */ + HTAB* toast_hash; + + /* + * On-demand built hash for looking up the above values. + */ + HTAB* tuplecid_hash; + + /* + * non-hierarchical list of subtransactions that are *not* aborted. Only + * used in toplevel transactions. + */ + dlist_head subtxns; + uint32 nsubtxns; + + /* + * Stored cache invalidations. This is not a linked list because we get + * all the invalidations at once. + */ + uint32 ninvalidations; + SharedInvalidationMessage* invalidations; + + /* --- + * Position in one of three lists: + * * list of subtransactions if we are *known* to be subxact + * * list of toplevel xacts (can be a as-yet unknown subxact) + * * list of preallocated ReorderBufferTXNs + * --- + */ + dlist_node node; + +} ParallelReorderBufferTXN; + +struct ParallelReorderBuffer { + /* + * xid => ReorderBufferTXN lookup table + */ + HTAB* by_txn; + + /* + * Transactions that could be a toplevel xact, ordered by LSN of the first + * record bearing that xid.. + */ + dlist_head toplevel_by_lsn; + + /* + * Transactions and subtransactions that have a base snapshot, ordered by + * LSN of the record which caused us to first obtain the base snapshot. + * This is not the same as toplevel_by_lsn, because we only set the base + * snapshot on the first logical-decoding-relevant record (eg. heap + * writes), whereas the initial LSN could be set by other operations. + */ + dlist_head txns_by_base_snapshot_lsn; + + /* + * one-entry sized cache for by_txn. Very frequently the same txn gets + * looked up over and over again. + */ + TransactionId by_txn_last_xid; + ParallelReorderBufferTXN* by_txn_last_txn; + + /* + * Callacks to be called when a transactions commits. + */ + ReorderBufferBeginCB begin; + ParallelReorderBufferApplyChangeCB apply_change; + ReorderBufferCommitCB commit; + + /* + * Pointer that will be passed untouched to the callbacks. + */ + void* private_data; + + /* + * Private memory context. + */ + MemoryContext context; + + /* + * Data structure slab cache. + * + * We allocate/deallocate some structures very frequently, to avoid bigger + * overhead we cache some unused ones here. + * + * The maximum number of cached entries is controlled by const variables + * ontop of reorderbuffer.c + */ + + + /* cached ReorderBufferChanges */ + dlist_head cached_changes; + Size nr_cached_changes; + + /* cached ReorderBufferTupleBufs */ + slist_head cached_tuplebufs; + Size nr_cached_tuplebufs; + TransactionId lastRunningXactOldestXmin; + + XLogRecPtr current_restart_decoding_lsn; + + /* buffer for disk<->memory conversions */ + char* outbuf; + Size outbufsize; +}; + +/* Disk serialization support datastructures */ +typedef struct ParallelReorderBufferDiskChange { + Size size; + logicalLog change; + /* data follows */ +} ParallelReorderBufferDiskChange; + +ParallelReorderBuffer* ParallelReorderBufferAllocate(int slotId); +extern void ParallelFreeTuple(ReorderBufferTupleBuf *tuple, int slotId); +extern void ParallelFreeChange(ParallelReorderBufferChange *change, int slotId); +extern ParallelReorderBufferChange* ParallelReorderBufferGetChange(ParallelReorderBuffer *rb, int slotId); +extern ReorderBufferTupleBuf *ParallelReorderBufferGetTupleBuf(ParallelReorderBuffer *rb, Size tuple_len, + ParallelDecodeReaderWorker *worker, bool isHeapTuple); +extern void ParallelReorderBufferToastReset(ParallelReorderBufferChange *change, int slotId); +extern void WalSndWriteDataHelper(StringInfo out, XLogRecPtr lsn, TransactionId xid, bool last_write); +extern void WalSndPrepareWriteHelper(StringInfo out, XLogRecPtr lsn, TransactionId xid, bool last_write); +const uint32 max_decode_cache_num = 1000000; +#endif diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h index 4759f4f85..5967e94ce 100644 --- a/src/include/replication/reorderbuffer.h +++ b/src/include/replication/reorderbuffer.h @@ -22,6 +22,7 @@ #include "utils/snapshot.h" #include "utils/timestamp.h" #include "access/ustore/knl_utuple.h" +#include "lib/binaryheap.h" /* an individual tuple, stored in one chunk of memory */ typedef struct ReorderBufferTupleBuf { @@ -32,7 +33,8 @@ typedef struct ReorderBufferTupleBuf { HeapTupleData tuple; /* pre-allocated size of tuple buffer, different from tuple size */ Size alloc_tuple_size; - + /* auxiliary pointer for caching freed tuples */ + ReorderBufferTupleBuf* freeNext; /* actual tuple data follows */ } ReorderBufferTupleBuf; /* pointer to the data stored in a TupleBuf */ @@ -46,7 +48,8 @@ typedef struct ReorderBufferUTupleBuf { UHeapTupleData tuple; /* pre-allocated size of tuple buffer, different from tuple size */ Size alloc_tuple_size; - + /* auxiliary pointer for caching freed tuples */ + ReorderBufferUTupleBuf* freeNext; /* actual tuple data follows */ } ReorderBufferUTupleBuf; #define ReorderBufferUTupleBufData(p) ((UHeapDiskTuple)MAXALIGN(((char*)p) + sizeof(ReorderBufferUTupleBuf))) @@ -102,6 +105,7 @@ typedef struct ReorderBufferChange { ReorderBufferTupleBuf* oldtuple; /* valid for INSERT || UPDATE */ ReorderBufferTupleBuf* newtuple; + CommitSeqNo snapshotcsn; } tp; /* Old, new utuples when action == UHEAP_INSERT|UPDATE|DELETE */ @@ -115,6 +119,7 @@ typedef struct ReorderBufferChange { ReorderBufferUTupleBuf* oldtuple; /* valid for INSERT || UPDATE */ ReorderBufferUTupleBuf* newtuple; + CommitSeqNo snapshotcsn; } utp; /* New snapshot, set when action == *_INTERNAL_SNAPSHOT */ @@ -299,6 +304,13 @@ typedef void (*ReorderBufferBeginCB)(ReorderBuffer* rb, ReorderBufferTXN* txn); /* commit callback signature */ typedef void (*ReorderBufferCommitCB)(ReorderBuffer* rb, ReorderBufferTXN* txn, XLogRecPtr commit_lsn); +/* abort callback signature */ +typedef void (*ReorderBufferAbortCB)(ReorderBuffer* rb, ReorderBufferTXN* txn); + +/* prepare callback signature */ +typedef void (*ReorderBufferPrepareCB)(ReorderBuffer* rb, ReorderBufferTXN* txn); + + struct ReorderBuffer { /* * xid => ReorderBufferTXN lookup table @@ -333,6 +345,8 @@ struct ReorderBuffer { ReorderBufferBeginCB begin; ReorderBufferApplyChangeCB apply_change; ReorderBufferCommitCB commit; + ReorderBufferAbortCB abort; + ReorderBufferPrepareCB prepare; /* * Pointer that will be passed untouched to the callbacks. @@ -365,6 +379,7 @@ struct ReorderBuffer { /* cached ReorderBufferTupleBufs */ slist_head cached_tuplebufs; Size nr_cached_tuplebufs; + TransactionId lastRunningXactOldestXmin; XLogRecPtr current_restart_decoding_lsn; @@ -373,6 +388,60 @@ struct ReorderBuffer { Size outbufsize; }; +/* entry for a hash table we use to map from xid to our transaction state */ +typedef struct ReorderBufferTXNByIdEnt { + TransactionId xid; + ReorderBufferTXN *txn; +} ReorderBufferTXNByIdEnt; + +/* data structures for (relfilenode, ctid) => (cmin, cmax) mapping */ +typedef struct ReorderBufferTupleCidKey { + RelFileNode relnode; + ItemPointerData tid; +} ReorderBufferTupleCidKey; + +typedef struct ReorderBufferTupleCidEnt { + ReorderBufferTupleCidKey key; + CommandId cmin; + CommandId cmax; + CommandId combocid; /* just for debugging */ +} ReorderBufferTupleCidEnt; + +/* k-way in-order change iteration support structures */ +typedef struct ReorderBufferIterTXNEntry { + XLogRecPtr lsn; + ReorderBufferChange *change; + ReorderBufferTXN *txn; + int fd; + XLogSegNo segno; +} ReorderBufferIterTXNEntry; + +typedef struct ReorderBufferIterTXNState { + binaryheap *heap; + Size nr_txns; + dlist_head old_change; + ReorderBufferIterTXNEntry entries[FLEXIBLE_ARRAY_MEMBER]; +} ReorderBufferIterTXNState; + +/* toast datastructures */ +typedef struct ReorderBufferToastEnt { + Oid chunk_id; /* toast_table.chunk_id */ + int32 last_chunk_seq; /* toast_table.chunk_seq of the last chunk we + * have seen */ + Size num_chunks; /* number of chunks we've already seen */ + Size size; /* combined size of chunks seen */ + dlist_head chunks; /* linked list of chunks */ + struct varlena *reconstructed; /* reconstructed varlena now pointed + * to in main tup */ +} ReorderBufferToastEnt; + +/* Disk serialization support datastructures */ +typedef struct ReorderBufferDiskChange { + Size size; + ReorderBufferChange change; + /* data follows */ +} ReorderBufferDiskChange; + ReorderBuffer* ReorderBufferAllocate(void); void ReorderBufferFree(ReorderBuffer*); diff --git a/src/include/replication/replicainternal.h b/src/include/replication/replicainternal.h index 492729476..46df4f0ce 100755 --- a/src/include/replication/replicainternal.h +++ b/src/include/replication/replicainternal.h @@ -20,6 +20,7 @@ #endif #define IP_LEN 64 +#define SSL_MODE_LEN 16 #define PG_PROTOCOL_VERSION "MPPDB" /* Notice: the value is same sa GUC_MAX_REPLNODE_NUM */ @@ -108,6 +109,7 @@ typedef struct gaussstate { uint64 term; BuildState build_info; HaRebuildReason ha_rebuild_reason; + int current_connect_idx; } GaussState; typedef struct newnodeinfo { @@ -137,6 +139,9 @@ typedef struct replconninfo { int remoteheartbeatport; bool isCascade; bool isCrossRegion; +#ifdef ENABLE_LITE_MODE + char sslmode[SSL_MODE_LEN]; +#endif } ReplConnInfo; /* @@ -150,6 +155,7 @@ typedef struct hashmemdata { bool is_cross_region; bool is_hadr_main_standby; int current_repl; + int prev_repl; int repl_list_num; int loop_find_times; slock_t mutex; diff --git a/src/include/replication/rto_statistic.h b/src/include/replication/rto_statistic.h index 9c57e861d..4018a566a 100644 --- a/src/include/replication/rto_statistic.h +++ b/src/include/replication/rto_statistic.h @@ -30,13 +30,11 @@ static const uint32 RTO_VIEW_NAME_SIZE = 32; static const uint32 RTO_VIEW_COL_SIZE = 2; -static const uint32 MAX_WAL_SENDER = 100; static const int32 DCF_MAX_NODE_NUM = 10; -static const uint32 RTO_INFO_BUFFER_SIZE = 2048 * (1 + MAX_WAL_SENDER); static const uint32 DCF_RTO_INFO_BUFFER_SIZE = 2048 * (1 + DCF_MAX_NODE_NUM); static const uint32 STANDBY_NAME_SIZE = 1024; static const uint32 RECOVERY_RTO_VIEW_COL = 9; -static const uint32 HADR_RTO_RPO_VIEW_COL = 11; +static const uint32 HADR_RTO_RPO_VIEW_COL = 12; typedef Datum (*GetViewDataFunc)(); @@ -68,11 +66,12 @@ typedef struct HadrRTOAndRPOData { int target_rto; int64 current_rpo; int target_rpo; - int64 current_sleep_time; + int64 rto_sleep_time; + int64 rpo_sleep_time; } HadrRTOAndRPOData; typedef struct knl_g_rto_context { - RTOStandbyData rto_standby_data[MAX_WAL_SENDER]; + RTOStandbyData* rto_standby_data; #ifndef ENABLE_MULTIPLE_NODES RTOStandbyData dcf_rto_standby_data[DCF_MAX_NODE_NUM]; #endif diff --git a/src/include/replication/shared_storage_walreceiver.h b/src/include/replication/shared_storage_walreceiver.h index 5b811d684..0ccf59bda 100644 --- a/src/include/replication/shared_storage_walreceiver.h +++ b/src/include/replication/shared_storage_walreceiver.h @@ -45,7 +45,12 @@ extern bool SharedStorageXlogReadCheck(XLogReaderState *xlogreader, XLogRecPtr r (t_thrd.postmaster_cxt.HaShmData->is_cascade_standby && \ g_instance.attr.attr_storage.xlog_file_path != 0) -#define IS_SHARED_STORAGE_STANBY_CLUSTER_MODE \ +#define IS_SHARED_STORAGE_STANDBY_CLUSTER \ + (g_instance.attr.attr_common.cluster_run_mode == RUN_MODE_STANDBY && \ + g_instance.attr.attr_storage.xlog_file_path != 0) + + +#define IS_SHARED_STORAGE_STANDBY_CLUSTER_STANDBY_MODE \ (t_thrd.xlog_cxt.server_mode == STANDBY_MODE && \ g_instance.attr.attr_common.cluster_run_mode == RUN_MODE_STANDBY && \ g_instance.attr.attr_storage.xlog_file_path != 0) diff --git a/src/include/replication/slot.h b/src/include/replication/slot.h index e06fa48e4..f8bebb567 100755 --- a/src/include/replication/slot.h +++ b/src/include/replication/slot.h @@ -17,9 +17,11 @@ #include "fmgr.h" #include "access/xlog.h" +#include "replication/walprotocol.h" #include "storage/lock/lwlock.h" #include "storage/shmem.h" #include "storage/spin.h" + extern const uint32 EXTRA_SLOT_VERSION_NUM; #define ARCHIVE_PITR_PREFIX "pitr_" @@ -195,6 +197,12 @@ typedef struct ArchiveSlotConfig { #define INT32_HIGH_MASK 0xFF00 #define INT32_LOW_MASK 0x00FF +/* + * Interval in which standby snapshots are logged into the WAL stream, in + * milliseconds. + */ +#define LOG_SNAPSHOT_INTERVAL_MS 15000 + /* we steal two bytes from persistency for updagrade */ #define GET_SLOT_EXTRA_DATA_LENGTH(data) (((int)((data).persistency)) >> 16) #define SET_SLOT_EXTRA_DATA_LENGTH(data, length) ((data).persistency = (ReplicationSlotPersistency)((int)(((data).persistency) & INT32_LOW_MASK) | ((length) << 16))) @@ -214,6 +222,7 @@ typedef struct ReplicationSlotState { XLogRecPtr max_required; XLogRecPtr quorum_min_required; XLogRecPtr min_tools_required; + XLogRecPtr min_archive_slot_required; bool exist_in_use; } ReplicationSlotState; /* @@ -292,8 +301,9 @@ extern Datum pg_get_replication_slot_name(PG_FUNCTION_ARGS); /* slot redo */ extern void slot_redo(XLogReaderState* record); extern void slot_desc(StringInfo buf, XLogReaderState* record); +extern const char* slot_type_name(uint8 subtype); extern void redo_slot_advance(const ReplicationSlotPersistentData* slotInfo); -extern void log_slot_advance(const ReplicationSlotPersistentData* slotInfo); +extern void log_slot_advance(const ReplicationSlotPersistentData* slotInfo, char* extra_content = NULL); extern void log_slot_drop(const char* name); extern void LogCheckSlot(); extern Size GetAllLogicalSlot(LogicalPersistentData*& LogicalSlot); diff --git a/src/include/replication/snapbuild.h b/src/include/replication/snapbuild.h index a29c79b3e..765542a38 100644 --- a/src/include/replication/snapbuild.h +++ b/src/include/replication/snapbuild.h @@ -51,6 +51,7 @@ typedef struct SnapBuild SnapBuild; /* forward declare so we don't have to include reorderbuffer.h */ struct ReorderBuffer; +struct ParallelReorderBuffer; /* forward declare so we don't have to include heapam_xlog.h */ struct xl_heap_new_cid; diff --git a/src/include/replication/syncrep.h b/src/include/replication/syncrep.h index ef6dcd40e..26c78ecf7 100755 --- a/src/include/replication/syncrep.h +++ b/src/include/replication/syncrep.h @@ -17,9 +17,9 @@ #include "utils/guc.h" #include "replication/replicainternal.h" -#define SyncRepRequested() \ +#define SyncRepRequested() \ (g_instance.attr.attr_storage.max_wal_senders > 0 && \ - u_sess->attr.attr_storage.guc_synchronous_commit > SYNCHRONOUS_COMMIT_LOCAL_FLUSH) + u_sess->attr.attr_storage.guc_synchronous_commit > SYNCHRONOUS_COMMIT_LOCAL_FLUSH) /* SyncRepWaitMode */ #define SYNC_REP_NO_WAIT -1 diff --git a/src/include/replication/walprotocol.h b/src/include/replication/walprotocol.h index 0965e9e31..a2b07fec4 100755 --- a/src/include/replication/walprotocol.h +++ b/src/include/replication/walprotocol.h @@ -17,6 +17,7 @@ #include "access/xlogdefs.h" #include "datatype/timestamp.h" #include "replication/replicainternal.h" +#include "pgxc/barrier.h" #define XLOG_NAME_LENGTH 24 /* @@ -221,6 +222,14 @@ typedef struct StandbyReplyMessage { * message, to avoid a timeout disconnect. */ bool replyRequested; + + /* flag array + * 0x00000001 flag IS_PAUSE_BY_TARGET_BARRIER + * + * 0x00000010 flag IS_CANCEL_LOG_CTRL + * If this flag is true, the walsend will set + */ + uint32 replyFlags; } StandbyReplyMessage; /* @@ -259,13 +268,33 @@ typedef struct StandbySwitchRequestMessage { * switchover request message in the streaming dr (message type ''). This is wrapped within * a CopyData message at the FE/BE protocol level. * - * Note that the data length is not specified here. + * */ typedef struct { /* The barrier LSN used by the streaming dr switchover this time */ XLogRecPtr switchoverBarrierLsn; } HadrSwitchoverMessage; +/* + * Reply message from hadr standby (message type 'R'). + * + * Note that the data length is not specified here. + */ +typedef struct HadrReplyMessage { + /* The target barrier Id in standby cluster */ + char targetBarrierId[MAX_BARRIER_ID_LENGTH]; + /* receiver's system clock at the time of transmission */ + TimestampTz sendTime; + /* The target barrier LSN used by the streaming dr */ + XLogRecPtr targetBarrierLsn; + /* reserved fields */ + uint32 pad1; + uint32 pad2; + uint64 pad3; + uint64 pad4; +} HadrReplyMessage; + + /* * Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ. * diff --git a/src/include/replication/walreceiver.h b/src/include/replication/walreceiver.h index 14e48fd21..6d6df4177 100755 --- a/src/include/replication/walreceiver.h +++ b/src/include/replication/walreceiver.h @@ -28,7 +28,7 @@ #include "storage/latch.h" #include "storage/spin.h" #include "pgxc/barrier.h" - +#include "pgxc/pgxc.h" /* * MAXCONNINFO: maximum size of a connection string. @@ -38,9 +38,25 @@ #define MAXCONNINFO 1024 #define HIGHEST_PERCENT 100 #define STREAMING_START_PERCENT 90 +#define IS_PAUSE_BY_TARGET_BARRIER 0x00000001 +#define IS_CANCEL_LOG_CTRL 0x00000010 + +#ifdef ENABLE_MULTIPLE_NODES +#define AM_HADR_CN_WAL_RECEIVER (t_thrd.postmaster_cxt.HaShmData->is_cross_region && \ + t_thrd.postmaster_cxt.HaShmData->current_mode == STANDBY_MODE && IS_PGXC_COORDINATOR) +#endif + #define AM_HADR_WAL_RECEIVER (t_thrd.postmaster_cxt.HaShmData->is_cross_region && \ t_thrd.postmaster_cxt.HaShmData->is_hadr_main_standby) +#define IS_DISASTER_RECOVER_MODE \ + (t_thrd.postmaster_cxt.HaShmData->current_mode == STANDBY_MODE && \ + g_instance.attr.attr_common.stream_cluster_run_mode == RUN_MODE_STANDBY) + +#define IS_CN_DISASTER_RECOVER_MODE \ + (IS_PGXC_COORDINATOR && t_thrd.postmaster_cxt.HaShmData->current_mode == STANDBY_MODE && \ + g_instance.attr.attr_common.stream_cluster_run_mode == RUN_MODE_STANDBY) + #define DUMMY_STANDBY_DATADIR "base/dummy_standby" #define CHECK_MSG_SIZE(msglen, structType, errmsg)\ @@ -81,10 +97,8 @@ typedef struct WalRcvCtlBlock { XLogRecPtr writePtr; /* last byte + 1 written out in the standby */ XLogRecPtr flushPtr; /* last byte + 1 flushed in the standby */ XLogRecPtr walStart; - XLogRecPtr lastReadPtr; int64 walWriteOffset; int64 walFreeOffset; - int64 walReadOffset; bool walIsWriting; slock_t mutex; @@ -206,10 +220,13 @@ typedef struct WalRcvData { char recoveryStopBarrierId[MAX_BARRIER_ID_LENGTH]; char recoverySwitchoverBarrierId[MAX_BARRIER_ID_LENGTH]; char lastRecoveredBarrierId[MAX_BARRIER_ID_LENGTH]; + char lastReceivedBarrierId[MAX_BARRIER_ID_LENGTH]; XLogRecPtr lastRecoveredBarrierLSN; + XLogRecPtr lastReceivedBarrierLSN; XLogRecPtr lastSwitchoverBarrierLSN; XLogRecPtr targetSwitchoverBarrierLSN; bool isFirstTimeAccessStorage; + bool isPauseByTargetBarrier; Latch* obsArchLatch; struct ArchiveSlotConfig *archive_slot; uint32 rcvDoneFromShareStorage; @@ -221,7 +238,7 @@ typedef struct WalReceiverFunc { bool (*walrcv_receive)(int timeout, unsigned char* type, char** buffer, int* len); void (*walrcv_send)(const char *buffer, int nbytes); void (*walrcv_disconnect)(); - bool (*walrcv_command)(const char *cmd, char **err); + bool (*walrcv_command)(const char *cmd, char **err, int *sqlstate); void (*walrcv_identify_system)(); void (*walrcv_startstreaming)(const LibpqrcvConnectParam *options); void (*walrcv_create_slot)(const LibpqrcvConnectParam *options); @@ -277,6 +294,7 @@ extern void CloseWSDataFileOnDummyStandby(void); extern void InitWSDataNumOnDummyStandby(void); extern WalRcvCtlBlock* getCurrentWalRcvCtlBlock(void); + extern int walRcvWrite(WalRcvCtlBlock* walrcb); extern int WSWalRcvWrite(WalRcvCtlBlock* walrcb, char* buf, Size nbytes, XLogRecPtr start_ptr); extern void WalRcvXLogClose(void); @@ -301,11 +319,13 @@ extern void get_failover_host_conninfo_for_dummy(int *repl); extern void set_wal_rcv_write_rec_ptr(XLogRecPtr rec_ptr); extern void ha_set_rebuild_connerror(HaRebuildReason reason, WalRcvConnError connerror); extern void XLogWalRcvReceive(char *buf, Size nbytes, XLogRecPtr recptr); -extern void XLogWalRcvReceiveInBuf(char *buf, Size nbytes, XLogRecPtr recptr); extern void wal_get_ha_rebuild_reason(char *buildReason, ServerMode local_role, bool isRunning); extern bool HasBuildReason(); extern void GetMinLsnRecordsFromHadrCascadeStandby(void); +extern void XLogWalRecordsPreProcess(char **buf, Size *len, WalDataMessageHeader *msghdr); +extern int XLogDecompression(const char *buf, Size len, XLogRecPtr dataStart); void GetPasswordForHadrStreamingReplication(char user[], char password[]); +extern char* remove_ipv6_zone(char* addr_src, char* addr_dest, int len); static inline void WalRcvCtlAcquireExitLock(void) { diff --git a/src/include/replication/walsender.h b/src/include/replication/walsender.h index ca72089d9..a69cbbb0b 100644 --- a/src/include/replication/walsender.h +++ b/src/include/replication/walsender.h @@ -28,12 +28,17 @@ #define AM_WAL_NORMAL_SENDER (t_thrd.role == WAL_NORMAL_SENDER) #define AM_WAL_STANDBY_SENDER (t_thrd.role == WAL_STANDBY_SENDER) #define AM_WAL_DB_SENDER (t_thrd.role == WAL_DB_SENDER) +#define AM_PARALLEL_DECODE (t_thrd.role == PARALLEL_DECODE) +#define AM_LOGICAL_READ_RECORD (t_thrd.role == LOGICAL_READ_RECORD) #define AM_WAL_HADR_SENDER (t_thrd.role == WAL_HADR_SENDER) +#define AM_WAL_HADR_CN_SENDER (t_thrd.role == WAL_HADR_CN_SENDER) #define AM_WAL_SHARE_STORE_SENDER (t_thrd.role == WAL_SHARE_STORE_SENDER) #define AM_NOT_HADR_SENDER (AM_WAL_NORMAL_SENDER || AM_WAL_STANDBY_SENDER || AM_WAL_DB_SENDER) +#define AM_WAL_HADR_DNCN_SENDER (AM_WAL_HADR_SENDER || AM_WAL_HADR_CN_SENDER) #define AM_WAL_SENDER \ (AM_WAL_NORMAL_SENDER || AM_WAL_STANDBY_SENDER || AM_WAL_DB_SENDER || AM_WAL_HADR_SENDER || \ - AM_WAL_SHARE_STORE_SENDER) + AM_WAL_HADR_CN_SENDER || AM_WAL_SHARE_STORE_SENDER) +#define STANDBY_IN_BARRIER_PAUSE ((reply->replyFlags & IS_PAUSE_BY_TARGET_BARRIER) != 0) typedef struct WSXLogJustSendRegion { XLogRecPtr start_ptr; @@ -68,6 +73,9 @@ extern bool WalSndAllInProgress(int type); extern bool WalSndQuorumInProgress(int type); extern XLogSegNo WalGetSyncCountWindow(void); extern void add_archive_task_to_list(int archive_task_status_idx, WalSnd *walsnd); +extern void SendSignalToDecodeWorker(int signal, int slotId); +extern void SendSignalToReaderWorker(int signal, int slotId); +extern void XLogCompression(int *compressedSize, XLogRecPtr startPtr, Size nbytes); /* * Remember that we want to wakeup walsenders later diff --git a/src/include/replication/walsender_private.h b/src/include/replication/walsender_private.h index 8482cedfe..0941337ac 100644 --- a/src/include/replication/walsender_private.h +++ b/src/include/replication/walsender_private.h @@ -53,13 +53,17 @@ typedef struct LogCtrlData { int64 sleep_count_limit; XLogRecPtr prev_flush; XLogRecPtr prev_apply; + XLogRecPtr local_prev_flush; + TimestampTz prev_send_time; TimestampTz prev_reply_time; - uint64 pre_rate1; - uint64 pre_rate2; - uint64 pre_rpo_rate; + TimestampTz prev_calculate_time; /* Controls the flush_rate and apply_rate calculation interval. */ + uint64 flush_rate; /* Recent average flush speed << SHIFT_SPEED. */ + uint64 apply_rate; /* Recent average apply speed << SHIFT_SPEED. */ + uint64 period_total_flush; /* Flush amount in a calculation period */ + uint64 period_total_apply; /* Apply amount in a calculation period */ + uint64 local_flush_rate; /* Local log generation speed << SHIFT_SPEED. */ int64 prev_RPO; int64 current_RPO; - TimestampTz prev_send_time; } LogCtrlData; /* @@ -78,6 +82,7 @@ typedef struct WalSnd { bool sendKeepalive; /* do we send keepalives on this connection? */ bool replSender; /* is the walsender a normal replication or building */ bool is_cross_cluster; /* is the walsender from another cluster? */ + bool isTermChanged; /* is the term changed? used in streaming dr cluster */ ServerMode peer_role; DbState peer_state; @@ -90,6 +95,8 @@ typedef struct WalSnd { XLogRecPtr write; XLogRecPtr flush; XLogRecPtr apply; + /* record standby reply message reply.replyFlags */ + uint32 replyFlags; /* if valid means all the required replication data already flushed on the standby */ XLogRecPtr data_flush; @@ -133,6 +140,9 @@ typedef struct WalSnd { * Time needed for synchronous per xlog while catching up. */ double catchupRate; + /* Whether the interaction between the active and standby clusters of the streaming disaster recovery switchover is complete */ + bool isInteractionCompleted; + TimestampTz lastRequestTimestamp; } WalSnd; extern THR_LOCAL WalSnd* MyWalSnd; @@ -185,7 +195,7 @@ typedef struct WalSndCtlData { bool sync_master_standalone; TimestampTz keep_sync_window_start; bool out_keep_sync_window; - + /* * The demotion of postmaster Also indicates that all the walsenders * should reject any demote requests if postmaster is doning domotion. diff --git a/src/include/service/remote_read_client.h b/src/include/service/remote_read_client.h index abf57305c..032e119ec 100755 --- a/src/include/service/remote_read_client.h +++ b/src/include/service/remote_read_client.h @@ -29,11 +29,20 @@ #include "c.h" #include "storage/remote_adapter.h" +#include "storage/smgr/relfilenode.h" +#include "postmaster/pagerepair.h" +#include "access/xlog_basic.h" +typedef uint64 XLogRecPtr; extern int RemoteGetCU(char* remote_address, uint32 spcnode, uint32 dbnode, uint32 relnode, int32 colid, uint64 offset, int32 size, uint64 lsn, char* cu_data); -extern int RemoteGetPage(char* remote_address, uint32 spcnode, uint32 dbnode, uint32 relnode, int2 bucketnode, uint2 opt, - int32 forknum, uint32 blocknum, uint32 blocksize, uint64 lsn, char* page_data); +extern int RemoteGetPage(char* remote_address, RepairBlockKey *key, uint32 blocksize, uint64 lsn, + char* page_data, const XLogPhyBlock *pblk, int timeout); + +extern int RemoteGetFile(char* remoteAddress, RemoteReadFileKey *key, uint64 lsn, uint32 size, char* pageData, + XLogRecPtr *remote_lsn, uint32 *remote_size, int timeout); +extern int RemoteGetFileSize(char* remoteAddress, RemoteReadFileKey *key, uint64 lsn, int64 *size, int timeout); + #endif /* REMOTE_READ_CLIENT_H */ diff --git a/src/include/storage/buf/buf_internals.h b/src/include/storage/buf/buf_internals.h index 7813dd107..26d4fba11 100644 --- a/src/include/storage/buf/buf_internals.h +++ b/src/include/storage/buf/buf_internals.h @@ -96,13 +96,6 @@ typedef struct buftag { BlockNumber blockNum; /* blknum relative to begin of reln */ } BufferTag; -typedef struct buftagnocompress { - RelFileNodeV2 rnode; - ForkNumber forkNum; - BlockNumber blockNum; /* blknum relative to begin of reln */ -} BufferTagSecondVer; - - typedef struct buftagnohbkt { RelFileNodeOld rnode; /* physical relation identifier */ ForkNumber forkNum; @@ -346,6 +339,6 @@ extern void DropRelFileNodeAllLocalBuffers(const RelFileNode& rnode); extern void AtEOXact_LocalBuffers(bool isCommit); extern void update_wait_lockid(LWLock* lock); extern char* PageDataEncryptForBuffer(Page page, BufferDesc *bufdesc, bool is_segbuf = false); -extern void FlushBuffer(void* buf, SMgrRelation reln, ReadBufferMethod flushmethod = WITH_NORMAL_CACHE); +extern void FlushBuffer(void* buf, SMgrRelation reln, ReadBufferMethod flushmethod = WITH_NORMAL_CACHE, bool skipFsync = false); extern void LocalBufferFlushAllBuffer(); #endif /* BUFMGR_INTERNALS_H */ diff --git a/src/include/storage/buf/bufmgr.h b/src/include/storage/buf/bufmgr.h index 0f02e92e0..526a96f61 100644 --- a/src/include/storage/buf/bufmgr.h +++ b/src/include/storage/buf/bufmgr.h @@ -20,6 +20,7 @@ #include "storage/buf/bufpage.h" #include "storage/smgr/relfilenode.h" #include "utils/relcache.h" +#include "postmaster/pagerepair.h" #define SEGMENT_BUFFER_NUM (g_instance.attr.attr_storage.NSegBuffers) // 1GB #define SegmentBufferStartID (g_instance.attr.attr_storage.NBuffers) @@ -29,6 +30,11 @@ #define IsSegmentBufferID(id) ((id) >= SegmentBufferStartID) #define SharedBufferNumber (SegmentBufferStartID) +#define USE_CKPT_THREAD_SYNC (!g_instance.attr.attr_storage.enableIncrementalCheckpoint || \ + IsBootstrapProcessingMode() || \ + pg_atomic_read_u32(&g_instance.ckpt_cxt_ctl->current_page_writer_count) < 1) + + typedef void* Block; typedef struct PrivateRefCountEntry { @@ -45,7 +51,8 @@ typedef enum BufferAccessStrategyType { BAS_BULKREAD, /* Large read-only scan (hint bit updates are * ok) */ BAS_BULKWRITE, /* Large multi-block write (e.g. COPY IN) */ - BAS_VACUUM /* VACUUM */ + BAS_VACUUM, /* VACUUM */ + BAS_REPAIR /* repair file */ } BufferAccessStrategyType; /* Possible modes for ReadBufferExtended() */ @@ -245,7 +252,7 @@ extern Buffer ReadBufferWithoutRelcache(const RelFileNode &rnode, ForkNumber for extern Buffer ReadUndoBufferWithoutRelcache(const RelFileNode &rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, char relpersistence); extern Buffer ReadBufferForRemote(const RelFileNode &rnode, ForkNumber forkNum, BlockNumber blockNum, - ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit); + ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit, const XLogPhyBlock *pblk); extern void MarkBufferMetaFlag(Buffer bufid, bool flag); extern void ForgetBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum); extern void ReleaseBuffer(Buffer buffer); @@ -284,6 +291,12 @@ extern BlockNumber PartitionGetNumberOfBlocksInFork(Relation relation, Partition #define RelationGetNumberOfBlocks(reln) RelationGetNumberOfBlocksInFork(reln, MAIN_FORKNUM) +/* + * prototypes for functions in segbuffer.cpp + */ +extern bool SegPinBuffer(BufferDesc *buf); +extern void SegUnpinBuffer(BufferDesc *buf); + /* * PartitionGetNumberOfBlocks * Determines the current number of pages in the partition. @@ -292,7 +305,8 @@ extern BlockNumber PartitionGetNumberOfBlocksInFork(Relation relation, Partition extern bool BufferIsPermanent(Buffer buffer); -extern void RemoteReadBlock(const RelFileNodeBackend& rnode, ForkNumber forkNum, BlockNumber blockNum, char* buf); +extern void RemoteReadBlock(const RelFileNodeBackend& rnode, ForkNumber forkNum, BlockNumber blockNum, + char* buf, const XLogPhyBlock *pblk, int timeout = 60); #ifdef NOT_USED extern void PrintPinnedBufs(void); @@ -311,7 +325,6 @@ extern bool ConditionalLockBuffer(Buffer buffer); extern void LockBufferForCleanup(Buffer buffer); extern bool ConditionalLockBufferForCleanup(Buffer buffer); extern bool ConditionalLockUHeapBufferForCleanup(Buffer buffer); -extern bool IsBufferCleanupOK(Buffer buffer); extern bool HoldingBufferPinThatDelaysRecovery(void); extern void AsyncUnpinBuffer(volatile void* bufHdr, bool forgetBuffer); extern void AsyncCompltrPinBuffer(volatile void* bufHdr); @@ -347,6 +360,10 @@ extern Buffer ReadBuffer_common_for_direct(RelFileNode rnode, char relpersistenc extern Buffer ReadBuffer_common_for_localbuf(RelFileNode rnode, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit); extern void DropRelFileNodeShareBuffers(RelFileNode node, ForkNumber forkNum, BlockNumber firstDelBlock); +extern void RangeForgetBuffer(RelFileNode node, ForkNumber forkNum, BlockNumber firstDelBlock, + BlockNumber endDelBlock); + +extern void DropSegRelNodeSharedBuffer(RelFileNode node, ForkNumber forkNum); extern int GetThreadBufferLeakNum(void); extern void flush_all_buffers(Relation rel, Oid db_id, HTAB *hashtbl = NULL); /* in localbuf.c */ @@ -356,5 +373,10 @@ extern bool InsertTdeInfoToCache(RelFileNode rnode, TdeInfo *tde_info); extern void RelationInsertTdeInfoToCache(Relation reln); extern void PartitionInsertTdeInfoToCache(Relation reln, Partition p); extern void wakeup_pagewriter_thread(); +extern int getDuplicateRequest(CheckpointerRequest *requests, int num_requests, bool *skip_slot); +extern void RemoteReadFile(RemoteReadFileKey *key, char *buf, uint32 size, int timeout, uint32* remote_size); +extern int64 RemoteReadFileSize(RemoteReadFileKey *key, int timeout); + +extern bool StartBufferIO(BufferDesc* buf, bool forInput); #endif diff --git a/src/include/storage/buf/bufpage.h b/src/include/storage/buf/bufpage.h index 2538496b8..f52a9dc7f 100644 --- a/src/include/storage/buf/bufpage.h +++ b/src/include/storage/buf/bufpage.h @@ -180,8 +180,6 @@ typedef HeapPageHeaderData* HeapPageHeader; #define GetPageHeaderSize(page) (PageIs8BXidHeapVersion(page) ? SizeOfHeapPageHeaderData : SizeOfPageHeaderData) #define SizeOfHeapPageUpgradeData MAXALIGN(offsetof(HeapPageHeaderData, pd_linp) - offsetof(PageHeaderData, pd_linp)) - -#define GET_ITEMID_BY_IDX(buf, i) ((ItemIdData *)(buf + GetPageHeaderSize(buf) + (i) * sizeof(ItemIdData))) #define PageXLogRecPtrGet(val) \ ((uint64) (val).xlogid << 32 | (val).xrecoff) @@ -228,7 +226,6 @@ typedef HeapPageHeaderData* HeapPageHeader; #define PG_UHEAP_PAGE_LAYOUT_VERSION 7 #define PG_HEAP_PAGE_LAYOUT_VERSION 6 #define PG_COMM_PAGE_LAYOUT_VERSION 5 -#define PG_PAGE_4B_LAYOUT_VERSION 4 /* ---------------------------------------------------------------- * page support macros @@ -269,6 +266,8 @@ typedef HeapPageHeaderData* HeapPageHeader; (PageIs8BXidHeapVersion(page) ? ((ItemId)(&((HeapPageHeader)(page))->pd_linp[(offsetNumber)-1])) \ : ((ItemId)(&((PageHeader)(page))->pd_linp[(offsetNumber)-1]))) +#define HeapPageGetItemId(page, offsetNumber) ((ItemId)(&((HeapPageHeader)(page))->pd_linp[(offsetNumber)-1])) \ + /* * PageGetContents * To be used in case the page does not contain item pointers. @@ -306,10 +305,6 @@ typedef HeapPageHeaderData* HeapPageHeader; */ #define PageGetPageLayoutVersion(page) (((PageHeader)(page))->pd_pagesize_version & 0x00FF) -#define PageIs8BXidVersion(page) (PageGetPageLayoutVersion(page) == PG_COMM_PAGE_LAYOUT_VERSION) - -#define PageIs4BXidVersion(page) (PageGetPageLayoutVersion(page) == PG_PAGE_4B_LAYOUT_VERSION) - #define PageIs8BXidHeapVersion(page) (PageGetPageLayoutVersion(page) == PG_HEAP_PAGE_LAYOUT_VERSION) #define PageIsSegmentVersion(page) (PageGetPageLayoutVersion(page) == PG_SEGMENT_PAGE_LAYOUT_VERSION) @@ -344,14 +339,14 @@ typedef HeapPageHeaderData* HeapPageHeader; (AssertMacro(PageIsValid(page)), (char*)((char*)(page) + ((PageHeader)(page))->pd_special)) #define BTPageGetSpecial(page) \ - (PageIs8BXidVersion(page) \ - ? (AssertMacro(((PageHeader)page)->pd_special == BLCKSZ - MAXALIGN(sizeof(BTPageOpaqueData))), \ - (BTPageOpaque)((Pointer)page + BLCKSZ - MAXALIGN(sizeof(BTPageOpaqueData)))) \ - : NULL) +(\ + AssertMacro(((PageHeader)page)->pd_special == BLCKSZ - MAXALIGN(sizeof(BTPageOpaqueData))), \ + (BTPageOpaque)((Pointer)page + BLCKSZ - MAXALIGN(sizeof(BTPageOpaqueData))) \ +) #define HeapPageSetPruneXid(page, xid) \ (((PageHeader)(page))->pd_prune_xid = NormalTransactionIdToShort( \ - PageIs8BXidHeapVersion(page) ? ((HeapPageHeader)(page))->pd_xid_base : 0, (xid))) + ((HeapPageHeader)(page))->pd_xid_base, (xid))) #define PageSetPruneXid(page, xid) \ (((PageHeader)(page))->pd_prune_xid = NormalTransactionIdToShort( \ @@ -359,7 +354,7 @@ typedef HeapPageHeaderData* HeapPageHeader; #define HeapPageGetPruneXid(page) \ (ShortTransactionIdToNormal( \ - PageIs8BXidHeapVersion(page) ? ((HeapPageHeader)(page))->pd_xid_base : 0, ((PageHeader)(page))->pd_prune_xid)) + ((HeapPageHeader)(page))->pd_xid_base, ((PageHeader)(page))->pd_prune_xid)) #define PageGetPruneXid(page) \ (ShortTransactionIdToNormal( \ @@ -408,7 +403,6 @@ inline OffsetNumber PageGetMaxOffsetNumber(char* pghr) #define PageSetLSNInternal(page, lsn) \ (((PageHeader)(page))->pd_lsn.xlogid = (uint32)((lsn) >> 32), ((PageHeader)(page))->pd_lsn.xrecoff = (uint32)(lsn)) -#ifndef FRONTEND inline void PageSetLSN(Page page, XLogRecPtr LSN, bool check = true) { if (check && XLByteLT(LSN, PageGetLSN(page))) { @@ -416,7 +410,6 @@ inline void PageSetLSN(Page page, XLogRecPtr LSN, bool check = true) } PageSetLSNInternal(page, LSN); } -#endif #define PageHasFreeLinePointers(page) (((PageHeader)(page))->pd_flags & PD_HAS_FREE_LINES) #define PageSetHasFreeLinePointers(page) (((PageHeader)(page))->pd_flags |= PD_HAS_FREE_LINES) @@ -502,11 +495,10 @@ extern OffsetNumber PageAddItem( Page page, Item item, Size size, OffsetNumber offsetNumber, bool overwrite, bool is_heap); extern Page PageGetTempPage(Page page); extern Page PageGetTempPageCopy(Page page); -extern Page PageGetTempPageCopySpecial(Page page, bool isbtree); +extern Page PageGetTempPageCopySpecial(Page page); extern void PageRestoreTempPage(Page tempPage, Page oldPage); extern void PageRepairFragmentation(Page page); extern Size PageGetFreeSpace(Page page); -extern Size PageGetFreeSpaceForMultipleTuples(Page page, int ntups); extern Size PageGetExactFreeSpace(Page page); extern Size PageGetHeapFreeSpace(Page page); extern void PageIndexTupleDelete(Page page, OffsetNumber offset); @@ -519,7 +511,6 @@ extern void PageDataDecryptIfNeed(Page page); extern char* PageSetChecksumCopy(Page page, BlockNumber blkno, bool is_segbuf = false); extern void PageSetChecksumInplace(Page page, BlockNumber blkno); -extern void PageLocalUpgrade(Page page); extern void DumpPageInfo(Page page, XLogRecPtr newLsn); extern void SegPageInit(Page page, Size pageSize); #endif /* BUFPAGE_H */ diff --git a/src/include/storage/dfs/dfscache_mgr.h b/src/include/storage/dfs/dfscache_mgr.h index c283ae047..d98a79ce1 100644 --- a/src/include/storage/dfs/dfscache_mgr.h +++ b/src/include/storage/dfs/dfscache_mgr.h @@ -26,8 +26,12 @@ #ifndef METACACHEMGR_H #define METACACHEMGR_H +#include "pg_config.h" + #include +#ifndef ENABLE_LITE_MODE #include "orc_proto.pb.h" +#endif #include "storage/cache_mgr.h" #include "storage/smgr/relfilenode.h" @@ -115,9 +119,11 @@ int CarbonMetaCacheGetBlockSize(CacheSlotId_t slotId); void MetaCacheSetBlockWithFileName(CacheSlotId_t slotId, const char* fileName); CacheSlotId_t MetaCacheAllocBlock( RelFileNodeOld* fileNode, int32 fileID, uint32 stripeOrBlocketID, uint32 columnID, bool& found, int type); +#ifndef ENABLE_LITE_MODE void OrcMetaCacheSetBlock(CacheSlotId_t slotId, uint64 footerStart, const orc::proto::PostScript* postScript, const orc::proto::Footer* fileFooter, const orc::proto::StripeFooter* stripeFooter, const orc::proto::RowIndex* rowIndex, const char* fileName, const char* dataDNA); +#endif void CarbonMetaCacheSetBlock(CacheSlotId_t slotId, uint64 headerSize, uint64 footerSize, unsigned char* fileHeader, unsigned char* fileFooter, const char* fileName, const char* dataDNA); @@ -138,9 +144,11 @@ public: void MetaBlockCompleteIO(int slotId); int GetOrcMetaBlockSize(CacheSlotId_t slotId); int GetCarbonMetaBlockSize(CacheSlotId_t slotId); +#ifndef ENABLE_LITE_MODE void SetOrcMetaBlockValue(CacheSlotId_t slotId, uint64 footerStart, const orc::proto::PostScript* postScript, const orc::proto::Footer* fileFooter, const orc::proto::StripeFooter* stripeFooter, const orc::proto::RowIndex* rowIndex, const char* fileName, const char* dataDNA); +#endif void SetCarbonMetaBlockValue(CacheSlotId_t slotId, uint64 headerSize, uint64 footerSize, unsigned char* fileHeader, unsigned char* fileFooter, const char* fileName, const char* dataDNA); diff --git a/src/include/storage/item/itemptr.h b/src/include/storage/item/itemptr.h index 331fa04a8..b3941073a 100644 --- a/src/include/storage/item/itemptr.h +++ b/src/include/storage/item/itemptr.h @@ -171,6 +171,8 @@ typedef ItemPointerData* ItemPointer; */ extern bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2); +/* for upgrade from existed session, syscache has builtin tuple, AssertMacro(ItemPointerIsValid(pointer)) will fail */ +extern bool ItemPointerEqualsNoCheck(ItemPointer pointer1, ItemPointer pointer2); extern int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2); /* -------------------------------------------------------- diff --git a/src/include/storage/lmgr.h b/src/include/storage/lmgr.h index 0fab300dc..3dd4b8919 100644 --- a/src/include/storage/lmgr.h +++ b/src/include/storage/lmgr.h @@ -37,6 +37,7 @@ extern void LockRelation(Relation relation, LOCKMODE lockmode); extern bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode); extern void UnlockRelation(Relation relation, LOCKMODE lockmode); extern bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode); +extern bool LockHasWaitersPartition(Relation relation, LOCKMODE lockmode); extern void LockRelationIdForSession(LockRelId* relid, LOCKMODE lockmode); extern void UnlockRelationIdForSession(LockRelId* relid, LOCKMODE lockmode); @@ -56,19 +57,20 @@ extern bool ConditionalLockPage(Relation relation, BlockNumber blkno, LOCKMODE l extern void UnlockPage(Relation relation, BlockNumber blkno, LOCKMODE lockmode); /* Lock a tuple (see heap_lock_tuple before assuming you understand this) */ -extern void LockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode, bool allow_con_update = false); +extern void LockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode, bool allow_con_update = false, int waitSec = 0); +extern void LockTupleUid(Relation relation, uint64 uid, LOCKMODE lockmode, bool allow_con_update, bool lockTuple); extern bool ConditionalLockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode); extern void UnlockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode); /* Lock an XID (used to wait for a transaction to finish) */ extern void XactLockTableInsert(TransactionId xid); extern void XactLockTableDelete(TransactionId xid); -extern void XactLockTableWait(TransactionId xid, bool allow_con_update = false); +extern void XactLockTableWait(TransactionId xid, bool allow_con_update = false, int waitSec = 0); extern bool ConditionalXactLockTableWait(TransactionId xid, bool waitparent = true, bool bCareNextxid = false); /* Lock a SubXID */ extern void SubXactLockTableInsert(SubTransactionId subxid); -extern void SubXactLockTableWait(TransactionId xid, SubTransactionId subxid); +extern void SubXactLockTableWait(TransactionId xid, SubTransactionId subxid, int waitSec = 0); extern bool ConditionalSubXactLockTableWait(TransactionId xid, SubTransactionId subxid); /* Lock a general object (other than a relation) of the current database */ diff --git a/src/include/storage/lock/lock.h b/src/include/storage/lock/lock.h index 6339093c3..3b6f3510b 100644 --- a/src/include/storage/lock/lock.h +++ b/src/include/storage/lock/lock.h @@ -18,7 +18,6 @@ #include "storage/lock/lwlock.h" #include "storage/shmem.h" #include "gs_thread.h" -#include "knl/knl_session.h" typedef struct PROC_QUEUE { SHM_QUEUE links; /* head of list of PGPROC objects */ @@ -179,6 +178,7 @@ typedef enum LockTagType { LOCKTAG_RELFILENODE, /* relfilenode */ LOCKTAG_SUBTRANSACTION, /* subtransaction (for waiting for subxact done) */ /* ID info for a transaction is its TransactionId + SubTransactionId */ + LOCKTAG_UID, LOCK_EVENT_NUM } LockTagType; @@ -253,6 +253,15 @@ typedef struct LOCKTAG { (locktag).locktag_type = LOCKTAG_TUPLE, \ (locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD) +#define SET_LOCKTAG_UID(locktag, dboid, reloid, uidHighBits, uidLowBits) \ + ((locktag).locktag_field1 = (dboid), \ + (locktag).locktag_field2 = (reloid), \ + (locktag).locktag_field3 = (uidHighBits), \ + (locktag).locktag_field4 = (uidLowBits), \ + (locktag).locktag_field5 = 0, \ + (locktag).locktag_type = LOCKTAG_UID, \ + (locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD) + #define SET_LOCKTAG_TRANSACTION(locktag, xid) \ ((locktag).locktag_field1 = (uint32)((xid)&0xFFFFFFFF), \ (locktag).locktag_field2 = (uint32)((xid) >> 32), \ @@ -470,6 +479,12 @@ typedef struct LOCALLOCK { #define LOCALLOCK_LOCKMETHOD(llock) ((llock).tag.lock.locktag_lockmethodid) +typedef struct GlobalSessionId { + uint64 sessionId; /* Increasing sequence num */ + uint32 nodeId; /* the number of the send node */ + /* Used to identify the latest global sessionid during pooler reuse */ + uint64 seq; +} GlobalSessionId; /* * These structures hold information passed from lmgr internals to the lock * listing user-level functions (in lockfuncs.c). @@ -547,6 +562,90 @@ typedef enum LockWaitPolicy { #define LockHashPartitionLockByProc(leader_pgproc) \ LockHashPartitionLock((leader_pgproc)->pgprocno) +/* Macros for manipulating proc->fpLockBits */ +#define FAST_PATH_BITS_PER_SLOT 3 +#define FAST_PATH_LOCKNUMBER_OFFSET 1 +#define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1) +#define FAST_PATH_GET_BITS(proc, n) (((proc)->fpLockBits[n / FP_LOCK_SLOTS_PER_LOCKBIT] \ + >> (FAST_PATH_BITS_PER_SLOT * (n % FP_LOCK_SLOTS_PER_LOCKBIT))) & FAST_PATH_MASK) +#define FAST_PATH_BIT_POSITION(n, l) \ + (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \ + AssertMacro((l) < FAST_PATH_BITS_PER_SLOT + FAST_PATH_LOCKNUMBER_OFFSET), \ + AssertMacro((n) < FP_LOCK_SLOTS_PER_LOCKBIT), \ + ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (n))) + +#define FAST_PATH_SET_LOCKMODE(proc, n, l) \ + (proc)->fpLockBits[n / FP_LOCK_SLOTS_PER_LOCKBIT] |= \ + UINT64CONST(UINT64CONST(1) << FAST_PATH_BIT_POSITION((n % FP_LOCK_SLOTS_PER_LOCKBIT), l)) +#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \ + (proc)->fpLockBits[n / FP_LOCK_SLOTS_PER_LOCKBIT] &= \ + ~(UINT64CONST(UINT64CONST(1) << FAST_PATH_BIT_POSITION((n % FP_LOCK_SLOTS_PER_LOCKBIT), l))) +#define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \ + ((proc)->fpLockBits[n / FP_LOCK_SLOTS_PER_LOCKBIT] & \ + (UINT64CONST(UINT64CONST(1) << FAST_PATH_BIT_POSITION((n % FP_LOCK_SLOTS_PER_LOCKBIT), l)))) + +#define PRINT_WAIT_LENTH (8 + 1) +#define CHECK_LOCKMETHODID(lockMethodId) \ + do { \ + if (unlikely((lockMethodId) == 0 || (lockMethodId) >= lengthof(LockMethods))) { \ + ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), \ + errmsg("unrecognized lock method: %hu", (lockMethodId)))); \ + } \ + } while (0) +#define CHECK_LOCKMODE(lockMode, lockMethodTable) \ + do { \ + if (unlikely((lockMode) <= 0 || (lockMode) > (lockMethodTable)->numLockModes)) { \ + ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), \ + errmsg("unrecognized lock mode: %d", (lockMode)))); \ + } \ + } while (0) + +/* + * The fast-path lock mechanism is concerned only with relation locks on + * unshared relations by backends bound to a database. The fast-path + * mechanism exists mostly to accelerate acquisition and release of locks + * that rarely conflict. Because ShareUpdateExclusiveLock is + * self-conflicting, it can't use the fast-path mechanism; but it also does + * not conflict with any of the locks that do, so we can ignore it completely. + */ +#define EligibleForRelationFastPath(locktag, mode) \ + ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \ + ((locktag)->locktag_type == LOCKTAG_RELATION || (locktag)->locktag_type == LOCKTAG_PARTITION) && \ + (mode) < ShareUpdateExclusiveLock) +#define ConflictsWithRelationFastPath(locktag, mode) \ + ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \ + ((locktag)->locktag_type == LOCKTAG_RELATION || (locktag)->locktag_type == LOCKTAG_PARTITION) && \ + (mode) > ShareUpdateExclusiveLock) + +/* + * To make the fast-path lock mechanism work, we must have some way of + * preventing the use of the fast-path when a conflicting lock might be + * present. We partition* the locktag space into FAST_PATH_HASH_BUCKETS + * partitions, and maintain an integer count of the number of "strong" lockers + * in each partition. When any "strong" lockers are present (which is + * hopefully not very often), the fast-path mechanism can't be used, and we + * must fall back to the slower method of pushing matching locks directly + * into the main lock tables. + * + * The deadlock detector does not know anything about the fast path mechanism, + * so any locks that might be involved in a deadlock must be transferred from + * the fast-path queues to the main lock table. + */ +#define FAST_PATH_STRONG_LOCK_HASH_BITS 10 +#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS (1 << FAST_PATH_STRONG_LOCK_HASH_BITS) +#define FastPathStrongLockHashPartition(hashcode) ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS) + +typedef struct FastPathStrongRelationLockData { + slock_t mutex; + uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]; +} FastPathStrongRelationLockData; + +/* Record that's written to 2PC state file when a lock is persisted */ +typedef struct TwoPhaseLockRecord { + LOCKTAG locktag; + LOCKMODE lockmode; +} TwoPhaseLockRecord; + /* * function prototypes */ @@ -555,10 +654,10 @@ extern LockMethod GetLocksMethodTable(const LOCK *lock); extern uint32 LockTagHashCode(const LOCKTAG *locktag); extern bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2); extern LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, - bool allow_con_update = false); + bool allow_con_update = false, int waitSec = 0); extern bool LockIncrementIfExists(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock); extern LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, - bool report_memory_error, bool allow_con_update = false); + bool report_memory_error, bool allow_con_update = false, int waitSec = 0); extern void AbortStrongLockAcquire(void); extern bool LockRelease(const LOCKTAG* locktag, LOCKMODE lockmode, bool sessionLock); extern void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks); diff --git a/src/include/storage/lock/lwlock.h b/src/include/storage/lock/lwlock.h index 7eb802e5c..cd07da784 100644 --- a/src/include/storage/lock/lwlock.h +++ b/src/include/storage/lock/lwlock.h @@ -4,9 +4,9 @@ * Lightweight lock manager * * + * Portions Copyright (c) 2021, openGauss Contributors * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * * src/include/storage/lock/lwlock.h * @@ -44,6 +44,7 @@ const struct LWLOCK_PARTITION_DESC LWLockPartInfo[] = { {"CSNLOG_PART", 512, 1, 512}, {"LOG2_LOCKTABLE_PART", 4, 4, 16}, /* lock table partition range is 2^4 to 2^16 */ {"TWOPHASE_PART", 1, 1, 64}, + {"FASTPATH_PART", 20, 20, 10000} }; /* @@ -106,8 +107,12 @@ const struct LWLOCK_PARTITION_DESC LWLockPartInfo[] = { /* Number of partions the ngroup info hash table */ #define NUM_NGROUP_INFO_PARTITIONS 256 +#ifndef ENABLE_LITE_MODE /* Number of partions the io state hashtable */ #define NUM_IO_STAT_PARTITIONS 128 +#else +#define NUM_IO_STAT_PARTITIONS 2 +#endif /* Number of partitions the xid => procid hashtable */ #define NUM_PROCXACT_PARTITIONS 128 @@ -127,11 +132,15 @@ const struct LWLOCK_PARTITION_DESC LWLockPartInfo[] = { #define NUM_SESSION_ROLEID_PARTITIONS 128 #ifdef WIN32 -#define NUM_INDIVIDUAL_LWLOCKS 113 /* num should be same as lwlockname.txt */ +#define NUM_INDIVIDUAL_LWLOCKS 116 /* num should be same as lwlockname.txt */ #endif /* Number of partitions the global package runtime state hashtable */ +#ifndef ENABLE_LITE_MODE #define NUM_GPRC_PARTITIONS 128 +#else +#define NUM_GPRC_PARTITIONS 2 +#endif /* * WARNING---Please keep the order of LWLockTrunkOffset and BuiltinTrancheIds consistent!!! @@ -245,7 +254,12 @@ enum BuiltinTrancheIds LWTRANCHE_SEGHEAD_PARTITION, LWTRANCHE_TWOPHASE_STATE, LWTRANCHE_ROLEID_PARTITION, + LWTRANCHE_PGWR_SYNC_QUEUE, + LWTRANCHE_BARRIER_TBL, + LWTRANCHE_PAGE_REPAIR, + LWTRANCHE_FILE_REPAIR, LWTRANCHE_REPLICATION_ORIGIN, + LWTRANCHE_AUDIT_INDEX_WAIT, /* * Each trancheId above should have a corresponding item in BuiltinTrancheNames; */ diff --git a/src/include/storage/mot/mot_xlog.h b/src/include/storage/mot/mot_xlog.h index c965f3d30..2b560ec0b 100644 --- a/src/include/storage/mot/mot_xlog.h +++ b/src/include/storage/mot/mot_xlog.h @@ -30,5 +30,6 @@ extern void MOTRedo(XLogReaderState* record); extern void MOTDesc(StringInfo buf, XLogReaderState* record); +extern const char* MOT_type_name(uint8 subtype); #endif /* MOT_XLOG_H */ diff --git a/src/include/storage/page_compression.h b/src/include/storage/page_compression.h deleted file mode 100644 index d8d4c6e26..000000000 --- a/src/include/storage/page_compression.h +++ /dev/null @@ -1,334 +0,0 @@ -/* - * page_compression.h - * internal declarations for page compression - * - * Copyright (c) 2020, PostgreSQL Global Development Group - * - * IDENTIFICATION - * src/include/storage/page_compression.h - */ - -#ifndef PAGE_COMPRESSION_H -#define PAGE_COMPRESSION_H - -#include - -#include "storage/buf/bufpage.h" -#include "datatype/timestamp.h" -#include "catalog/pg_class.h" -#include "utils/atomic.h" - -/* The page compression feature relies on native atomic operation support. - * On platforms that do not support native atomic operations, the members - * of pg_atomic_uint32 contain semaphore objects, which will affect the - * persistence of compressed page address files. - */ -#define SUPPORT_PAGE_COMPRESSION (sizeof(pg_atomic_uint32) == sizeof(uint32)) - -/* In order to avoid the inconsistency of address metadata data when the server - * is down, it is necessary to prevent the address metadata of one data block - * from crossing two storage device blocks. The block size of ordinary storage - * devices is a multiple of 512, so 512 is used as the block size of the - * compressed address file. - */ -#define COMPRESS_ADDR_BLCKSZ 512 - -/* COMPRESS_ALGORITHM_XXX must be the same as COMPRESS_TYPE_XXX */ -#define COMPRESS_ALGORITHM_PGLZ 1 -#define COMPRESS_ALGORITHM_ZSTD 2 - -constexpr uint32 COMPRESS_ADDRESS_FLUSH_CHUNKS = 5000; - -#define SUPPORT_COMPRESSED(relKind, relam) \ - ((relKind) == RELKIND_RELATION || ((relKind) == RELKIND_INDEX && (relam) == BTREE_AM_OID)) -#define REL_SUPPORT_COMPRESSED(relation) \ - ((relation->rd_rel->relkind) == RELKIND_RELATION || \ - ((relation->rd_rel->relkind) == RELKIND_INDEX && (relation->rd_rel->relam) == BTREE_AM_OID)) - -typedef uint32 pc_chunk_number_t; -const uint32 PAGE_COMPRESSION_VERSION = 92505; - -enum CompressedFileType { - COMPRESSED_TYPE_UNKNOWN, - COMPRESSED_TABLE_FILE, - COMPRESSED_TABLE_PCA_FILE, - COMPRESSED_TABLE_PCD_FILE -}; - -/* - * layout of files for Page Compress: - * - * 1. page compression address file(_pca) - * - PageCompressHeader - * - PageCompressAddr[] - * - * 2. page compression data file(_pcd) - * - PageCompressData[] - * - */ -typedef struct PageCompressHeader { - pg_atomic_uint32 nblocks; /* number of total blocks in this segment */ - pg_atomic_uint32 allocated_chunks; /* number of total allocated chunks in data area */ - uint16 chunk_size; /* size of each chunk, must be 1/2 1/4 or 1/8 of BLCKSZ */ - uint8 algorithm; /* compress algorithm, 1=pglz, 2=lz4 */ - pg_atomic_uint32 last_synced_nblocks; /* last synced nblocks */ - pg_atomic_uint32 last_synced_allocated_chunks; /* last synced allocated_chunks */ - pg_atomic_uint32 sync; - TimestampTz last_recovery_start_time; /* postmaster start time of last recovery */ -} PageCompressHeader; - -typedef struct PageCompressAddr { - uint32 checksum; - volatile uint8 nchunks; /* number of chunks for this block */ - volatile uint8 allocated_chunks; /* number of allocated chunks for this block */ - /* variable-length fields, 1 based chunk no array for this block, size of the array must be 2, 4 or 8 */ - pc_chunk_number_t chunknos[FLEXIBLE_ARRAY_MEMBER]; -} PageCompressAddr; - -struct ReadBlockChunksStruct { - PageCompressHeader* header; // header: pca file - char* pageBuffer; // pageBuffer: decompressed page - size_t pageBufferLen; - FILE* fp; // fp: table fp - int segmentNo; - char* fileName; // fileName: for error report -}; - -typedef struct PageCompressData { - char page_header[SizeOfPageHeaderData]; /* page header */ - uint32 size : 16; /* size of compressed data */ - uint32 byte_convert : 1; - uint32 diff_convert : 1; - uint32 unused : 14; - char data[FLEXIBLE_ARRAY_MEMBER]; /* compressed page, except for the page header */ -} PageCompressData; - - -typedef struct HeapPageCompressData { - char page_header[SizeOfHeapPageHeaderData]; /* page header */ - uint32 size : 16; /* size of compressed data */ - uint32 byte_convert : 1; - uint32 diff_convert : 1; - uint32 unused : 14; - char data[FLEXIBLE_ARRAY_MEMBER]; /* compressed page, except for the page header */ -} HeapPageCompressData; - -const uint4 CHUNK_SIZE_LIST[4] = {BLCKSZ / 2, BLCKSZ / 4, BLCKSZ / 8, BLCKSZ / 16}; -constexpr uint4 INDEX_OF_HALF_BLCKSZ = 0; -constexpr uint4 INDEX_OF_QUARTER_BLCKSZ = 1; -constexpr uint4 INDEX_OF_EIGHTH_BRICK_BLCKSZ = 2; -constexpr uint4 INDEX_OF_SIXTEENTHS_BLCKSZ = 3; -#define MAX_PREALLOC_CHUNKS 7 -#define PCA_SUFFIX "%s_pca" -#define PCD_SUFFIX "%s_pcd" - -#define SIZE_OF_PAGE_COMPRESS_HEADER_DATA sizeof(PageCompressHeader) -#define SIZE_OF_PAGE_COMPRESS_ADDR_HEADER_DATA offsetof(PageCompressAddr, chunknos) -#define SIZE_OF_PAGE_COMPRESS_DATA_HEADER_DATA(heapData) \ - ((heapData) ? offsetof(HeapPageCompressData, data) : offsetof(PageCompressData, data)) - -#define SIZE_OF_PAGE_COMPRESS_ADDR(chunk_size) \ - (SIZE_OF_PAGE_COMPRESS_ADDR_HEADER_DATA + sizeof(pc_chunk_number_t) * (BLCKSZ / (chunk_size))) - -#define NUMBER_PAGE_COMPRESS_ADDR_PER_BLOCK(chunk_size) (COMPRESS_ADDR_BLCKSZ / SIZE_OF_PAGE_COMPRESS_ADDR(chunk_size)) - -#define OFFSET_OF_PAGE_COMPRESS_ADDR(chunk_size, blockno) \ - (COMPRESS_ADDR_BLCKSZ * (1 + (blockno) / NUMBER_PAGE_COMPRESS_ADDR_PER_BLOCK(chunk_size)) + \ - SIZE_OF_PAGE_COMPRESS_ADDR(chunk_size) * ((blockno) % NUMBER_PAGE_COMPRESS_ADDR_PER_BLOCK(chunk_size))) - -#define GET_PAGE_COMPRESS_ADDR(pcbuffer, chunk_size, blockno) \ - (PageCompressAddr*)((char*)(pcbuffer) + OFFSET_OF_PAGE_COMPRESS_ADDR((chunk_size), (blockno) % RELSEG_SIZE)) - -#define SIZE_OF_PAGE_COMPRESS_ADDR_FILE(chunk_size) OFFSET_OF_PAGE_COMPRESS_ADDR((chunk_size), RELSEG_SIZE) - -#define OFFSET_OF_PAGE_COMPRESS_CHUNK(chunk_size, chunkno) ((chunk_size) * ((chunkno)-1)) - -/* Abnormal scenarios may cause holes in the space allocation of data files, - * causing data file expansion. Usually the holes are not too big, so the definition - * allows a maximum of 10,000 chunks for holes. If allocated_chunks exceeds this value, - * VACUUM FULL needs to be executed to reclaim space. - */ -#define MAX_CHUNK_NUMBER(chunk_size) ((uint32)(RELSEG_SIZE * (BLCKSZ / (chunk_size)) + 10000)) - -constexpr unsigned CMP_BYTE_CONVERT_LEN = 1; -constexpr unsigned CMP_DIFF_CONVERT_LEN = 1; -constexpr unsigned CMP_PRE_CHUNK_LEN = 3; -constexpr unsigned CMP_LEVEL_SYMBOL_LEN = 1; -constexpr unsigned CMP_LEVEL_LEN = 5; -constexpr unsigned CMP_ALGORITHM_LEN = 3; -constexpr unsigned CMP_CHUNK_SIZE_LEN = 2; - -constexpr unsigned CMP_BYTE_CONVERT_INDEX = 0; -constexpr unsigned CMP_DIFF_CONVERT_INDEX = 1; -constexpr unsigned CMP_PRE_CHUNK_INDEX = 2; -constexpr unsigned CMP_COMPRESS_LEVEL_SYMBOL = 3; -constexpr unsigned CMP_LEVEL_INDEX = 4; -constexpr unsigned CMP_ALGORITHM_INDEX = 5; -constexpr unsigned CMP_CHUNK_SIZE_INDEX = 6; - -struct CmpBitStruct { - unsigned int bitLen; - unsigned int mask; - unsigned int moveBit; -}; - -constexpr CmpBitStruct g_cmpBitStruct[] = {{CMP_BYTE_CONVERT_LEN, 0x01, 15}, - {CMP_DIFF_CONVERT_LEN, 0x01, 14}, - {CMP_PRE_CHUNK_LEN, 0x07, 11}, - {CMP_LEVEL_SYMBOL_LEN, 0x01, 10}, - {CMP_LEVEL_LEN, 0x1F, 5}, - {CMP_ALGORITHM_LEN, 0x07, 2}, - {CMP_CHUNK_SIZE_LEN, 0x03, 0}}; -/* RelFileCompressOption: Row-oriented table compress option */ -struct RelFileCompressOption { - unsigned byteConvert : g_cmpBitStruct[CMP_BYTE_CONVERT_INDEX].bitLen, /* need byte convert? */ - diffConvert : g_cmpBitStruct[CMP_DIFF_CONVERT_INDEX].bitLen, /* need diff convert processed? */ - compressPreallocChunks : g_cmpBitStruct[CMP_PRE_CHUNK_INDEX] - .bitLen, /* prealloced chunks to store compressed data */ - compressLevelSymbol : g_cmpBitStruct[CMP_COMPRESS_LEVEL_SYMBOL] - .bitLen, /* compress level symbol, true for positive and false for negative */ - compressLevel : g_cmpBitStruct[CMP_LEVEL_INDEX].bitLen, /* compress level */ - compressAlgorithm : g_cmpBitStruct[CMP_ALGORITHM_INDEX].bitLen, /* compress algorithm */ - compressChunkSize : g_cmpBitStruct[CMP_CHUNK_SIZE_INDEX].bitLen; /* chunk size of compressed data */ -}; - -inline void TransCompressOptions(const RelFileNode& node, RelFileCompressOption* opt) -{ - unsigned short compressOption = node.opt; - opt->compressChunkSize = compressOption & g_cmpBitStruct[CMP_CHUNK_SIZE_INDEX].mask; - compressOption = compressOption >> g_cmpBitStruct[CMP_CHUNK_SIZE_INDEX].bitLen; - opt->compressAlgorithm = compressOption & g_cmpBitStruct[CMP_ALGORITHM_INDEX].mask; - compressOption = compressOption >> g_cmpBitStruct[CMP_ALGORITHM_INDEX].bitLen; - opt->compressLevel = compressOption & g_cmpBitStruct[CMP_LEVEL_INDEX].mask; - compressOption = compressOption >> g_cmpBitStruct[CMP_LEVEL_INDEX].bitLen; - opt->compressLevelSymbol = compressOption & g_cmpBitStruct[CMP_COMPRESS_LEVEL_SYMBOL].mask; - compressOption = compressOption >> g_cmpBitStruct[CMP_COMPRESS_LEVEL_SYMBOL].bitLen; - opt->compressPreallocChunks = compressOption & g_cmpBitStruct[CMP_PRE_CHUNK_INDEX].mask; - compressOption = compressOption >> g_cmpBitStruct[CMP_PRE_CHUNK_INDEX].bitLen; - opt->diffConvert = compressOption & g_cmpBitStruct[CMP_DIFF_CONVERT_INDEX].mask; - compressOption = compressOption >> g_cmpBitStruct[CMP_DIFF_CONVERT_INDEX].bitLen; - opt->byteConvert = compressOption & g_cmpBitStruct[CMP_BYTE_CONVERT_INDEX].mask; - compressOption = compressOption >> g_cmpBitStruct[CMP_BYTE_CONVERT_INDEX].bitLen; -} - -#define SET_COMPRESS_OPTION(node, byteConvert, diffConvert, preChunks, symbol, level, algorithm, chunkSize) \ - do { \ - (node).opt = (node).opt << g_cmpBitStruct[CMP_BYTE_CONVERT_INDEX].bitLen; \ - (node).opt += (byteConvert)&g_cmpBitStruct[CMP_BYTE_CONVERT_INDEX].mask; \ - (node).opt = (node).opt << g_cmpBitStruct[CMP_DIFF_CONVERT_INDEX].bitLen; \ - (node).opt += (diffConvert)&g_cmpBitStruct[CMP_DIFF_CONVERT_INDEX].mask; \ - (node).opt = (node).opt << g_cmpBitStruct[CMP_PRE_CHUNK_INDEX].bitLen; \ - (node).opt += (preChunks)&g_cmpBitStruct[CMP_PRE_CHUNK_INDEX].mask; \ - (node).opt = (node).opt << g_cmpBitStruct[CMP_COMPRESS_LEVEL_SYMBOL].bitLen; \ - (node).opt += (symbol)&g_cmpBitStruct[CMP_COMPRESS_LEVEL_SYMBOL].mask; \ - (node).opt = (node).opt << g_cmpBitStruct[CMP_LEVEL_INDEX].bitLen; \ - (node).opt += (level)&g_cmpBitStruct[CMP_LEVEL_INDEX].mask; \ - (node).opt = (node).opt << g_cmpBitStruct[CMP_ALGORITHM_INDEX].bitLen; \ - (node).opt += (algorithm)&g_cmpBitStruct[CMP_ALGORITHM_INDEX].mask; \ - (node).opt = (node).opt << g_cmpBitStruct[CMP_CHUNK_SIZE_INDEX].bitLen; \ - (node).opt += (chunkSize)&g_cmpBitStruct[CMP_CHUNK_SIZE_INDEX].mask; \ - } while (0) - -#define GET_ROW_COL_CONVERT(opt) \ - (((opt) >> g_cmpBitStruct[CMP_BYTE_CONVERT_INDEX].moveBit) & g_cmpBitStruct[CMP_BYTE_CONVERT_INDEX].mask) -#define GET_DIFF_CONVERT(opt) \ - (((opt) >> g_cmpBitStruct[CMP_DIFF_CONVERT_INDEX].moveBit) & g_cmpBitStruct[CMP_DIFF_CONVERT_INDEX].mask) -#define GET_COMPRESS_PRE_CHUNKS(opt) \ - (((opt) >> g_cmpBitStruct[CMP_PRE_CHUNK_INDEX].moveBit) & g_cmpBitStruct[CMP_PRE_CHUNK_INDEX].mask) -#define GET_COMPRESS_LEVEL_SYMBOL(opt) \ - (((opt) >> g_cmpBitStruct[CMP_COMPRESS_LEVEL_SYMBOL].moveBit) & g_cmpBitStruct[CMP_COMPRESS_LEVEL_SYMBOL].mask) -#define GET_COMPRESS_LEVEL(opt) \ - (((opt) >> g_cmpBitStruct[CMP_LEVEL_INDEX].moveBit) & g_cmpBitStruct[CMP_LEVEL_INDEX].mask) -#define GET_COMPRESS_ALGORITHM(opt) \ - (((opt) >> g_cmpBitStruct[CMP_ALGORITHM_INDEX].moveBit) & g_cmpBitStruct[CMP_ALGORITHM_INDEX].mask) -#define GET_COMPRESS_CHUNK_SIZE(opt) \ - (((opt) >> g_cmpBitStruct[CMP_CHUNK_SIZE_INDEX].moveBit) & g_cmpBitStruct[CMP_CHUNK_SIZE_INDEX].mask) - -#define IS_COMPRESSED_MAINFORK(reln, forkNum) ((reln)->smgr_rnode.node.opt != 0 && (forkNum) == MAIN_FORKNUM) -#define IS_COMPRESSED_RNODE(rnode, forkNum) ((rnode).opt != 0 && (forkNum) == MAIN_FORKNUM) - -/* Compress function */ -template -extern int TemplateCompressPage(const char* src, char* dst, int dst_size, RelFileCompressOption option); - -template -extern int TemplateDecompressPage(const char* src, char* dst, uint8 algorithm); - -int CompressPageBufferBound(const char* page, uint8 algorithm); - -int CompressPage(const char* src, char* dst, int dst_size, RelFileCompressOption option); - -int DecompressPage(const char* src, char* dst, uint8 algorithm); - -/* Memory mapping function */ -extern PageCompressHeader* pc_mmap(int fd, int chunk_size, bool readonly); -extern PageCompressHeader* pc_mmap_real_size(int fd, int size, bool readonly); -extern int pc_munmap(PageCompressHeader * map); -extern int pc_msync(PageCompressHeader * map); - -/** - * format mainfork path name to compressed path - * @param dst destination buffer - * @param pathName uncompressed table name - * @param compressFileType pca or pcd - */ -extern void CopyCompressedPath(char dst[MAXPGPATH], const char* pathName, CompressedFileType compressFileType); - -/** - * @param pathName mainFork File path name - * @param relFileNode physically access, for validation - * @param forkNumber for validation - * @return size of mainFork - */ -extern int64 CalculateMainForkSize(char* pathName, RelFileNode* relFileNode, ForkNumber forkNumber); -extern int64 CalculateCompressMainForkSize(char* pathName, bool suppressedENOENT = false); - -extern uint16 ReadChunkSize(FILE *pcaFile, char* pcaFilePath, size_t len); - -/** - * read compressed chunks into dst, and decompressed page into pageBuffer - * @param dst destination - * @param destLen destination length - * @param blockNumber blockNumber - * @param ReadBlockChunksStruct other data needed - */ -size_t ReadAllChunkOfBlock(char *dst, size_t destLen, BlockNumber blockNumber, ReadBlockChunksStruct& rbStruct); -/** - * check if fileName is end with pca or pcd - * @param fileName fileName - * @return filetype - */ -CompressedFileType IsCompressedFile(char *fileName, size_t fileNameLen); - -int64 CalculateFileSize(char* pathName, size_t size, bool suppressedENOENT = false); -/** - * release mmap. print warning log if failed - * @param map mmap pointer - * @param fileName mmap filename, for loggging - */ -void ReleaseMap(PageCompressHeader* map, const char* fileName); - -/** - * convert chunk size to the index of CHUNK_SIZE_LIST - * @param compressedChunkSize {BLCKSZ / 2, BLCKSZ / 4, BLCKSZ / 8, BLCKSZ / 16} - * @param success success or not - * @return index of CHUNK_SIZE_LIST - */ -extern uint1 ConvertChunkSize(uint32 compressedChunkSize, bool* success); - -/** - * - * @param blockNumber block number - * @param pageCompressAddr addr of block - * @return checksum uint32 - */ -extern uint32 AddrChecksum32(BlockNumber blockNumber, const PageCompressAddr* pageCompressAddr, uint16 chunkSize); - -#ifndef FRONTEND -extern void CheckAndRepairCompressAddress(PageCompressHeader *pcMap, uint16 chunk_size, uint8 algorithm, const char *path); -PageCompressHeader* GetPageCompressHeader(void* vfd, uint16 chunkSize, const RelFileNodeForkNum &relFileNodeForkNum); -void UnReferenceAddrFile(void* vfd); -void RealInitialMMapLockArray(); -#endif - -#endif /* PAGE_COMPRESSION_H */ diff --git a/src/include/storage/page_compression_impl.h b/src/include/storage/page_compression_impl.h deleted file mode 100644 index 1c9f8bba2..000000000 --- a/src/include/storage/page_compression_impl.h +++ /dev/null @@ -1,721 +0,0 @@ -/* - * page_compression.h - * internal declarations for page compression - * - * Copyright (c) 2020, PostgreSQL Global Development Group - * - * IDENTIFICATION - * src/include/storage/page_compression_impl.h - */ - -#ifndef RC_INCLUDE_STORAGE_PAGE_COMPRESSION_IMPL_H -#define RC_INCLUDE_STORAGE_PAGE_COMPRESSION_IMPL_H - -#include -#include -#include -#include -#include -#include -#include - -#include "storage/page_compression.h" -#include "utils/pg_lzcompress.h" - -#include - -#define DEFAULT_ZSTD_COMPRESSION_LEVEL (1) -#define MIN_ZSTD_COMPRESSION_LEVEL ZSTD_minCLevel() -#define MAX_ZSTD_COMPRESSION_LEVEL ZSTD_maxCLevel() - -#define COMPRESS_DEFAULT_ERROR (-1) -#define COMPRESS_UNSUPPORTED_ERROR (-2) -#define GS_INVALID_ID16 (uint16)0xFFFF -#define MIN_DIFF_SIZE (64) -#define MIN_CONVERT_CNT (4) - -#ifndef USE_ASSERT_CHECKING -#define ASSERT(condition) -#else -#define ASSERT(condition) assert(condition) -#endif - - -#ifndef FRONTEND - -/** - * return data of page - * @param dst HeapPageCompressData or HeapPageCompressData - * @param heapPageData heapPageData or pagedata - * @return dst->data - */ -static inline char* GetPageCompressedData(char* dst, bool heapPageData) -{ - return heapPageData ? ((HeapPageCompressData*)dst)->data : ((PageCompressData*)dst)->data; -} - -static inline void FreePointer(void* pointer) -{ - if (pointer != NULL) { - pfree(pointer); - } -} - -/*======================================================================================*/ -#define COMPRESS "" -void cprs_diff_convert_rows(char *buf, uint32 offset,uint16 min_row_len, uint16 real_row_cnt) { - uint16 row_cnt = real_row_cnt; - uint32 common_size = min_row_len; - uint8 *copy_begin = (uint8 *)(buf + offset); - uint16 i, j; - - for (i = 0; i < common_size; i++) { - for (j = row_cnt - 1; j > 0; j--) { - copy_begin[i * row_cnt + j] -= copy_begin[i * row_cnt + (j - 1)]; - } - } - return ; -} - -void cprs_diff_deconvert_rows(char *buf, uint32 offset, uint16 min_row_len, uint16 real_row_cnt) { - uint16 row_cnt = real_row_cnt; - uint32 common_size = min_row_len; - uint8 *copy_begin = (uint8 *)(buf + offset); - uint16 i, j; - - for (i = 0; i < common_size; i++) { - for (j = 1; j < row_cnt; j++) { - copy_begin[i * row_cnt + j] += copy_begin[i * row_cnt + (j - 1)]; - } - } - return ; -} - -void CompressConvertItemIds(char *buf, char *aux_buf) { - errno_t ret; - HeapPageHeaderData *page = (HeapPageHeaderData *)buf; - uint16 row_cnt = (page->pd_lower - GetPageHeaderSize(page)) / sizeof(ItemIdData); - uint32 total_size = row_cnt * sizeof(ItemIdData); - char *copy_begin = buf + GetPageHeaderSize(page); - uint16 i, j, k; - - // clear aux_buf - ret = memset_sp(aux_buf, BLCKSZ, 0, BLCKSZ); - securec_check(ret, "", ""); - - k = 0; - for (i = 0; i < row_cnt; i++) { - for (j = 0; j < sizeof(ItemIdData); j++) { - aux_buf[j * row_cnt + i] = copy_begin[k++]; - } - } - - // cp aux_buf to page_buf - ret = memcpy_sp(copy_begin, total_size, aux_buf, total_size); - securec_check(ret, "", ""); - return ; -} - - -void CompressConvertRows(char *buf, char *aux_buf, int16 *real_order, uint16 max_row_len, uint16 real_row_cnt) { - errno_t ret; - HeapPageHeaderData *page = (HeapPageHeaderData *)buf; - uint16 row_cnt = real_row_cnt; - uint32 total_size = page->pd_special - page->pd_upper; - char *copy_begin = buf + page->pd_upper; - char *row; - uint16 i, j, k, cur, up, row_size; - - ret = memset_sp(aux_buf, BLCKSZ, 0, BLCKSZ); - securec_check(ret, "", ""); - - k = 0; - for (i = 0; i < max_row_len; i++) { - for (j = 0; j < row_cnt; j++) { - up = (j == (row_cnt - 1)) ? page->pd_special : GET_ITEMID_BY_IDX(buf, (real_order[j + 1]))->lp_off; - cur = GET_ITEMID_BY_IDX(buf, (real_order[j]))->lp_off; - row_size = up - cur; - row = buf + cur; - if (i < row_size) { - aux_buf[k++] = row[i]; // this part is reshaped - } - } - } - - if (k != total_size) { - printf("ERROR!!! convert_rows_2 error...!!!\n"); - ASSERT(0); - return; - } - - // cp aux_buf to page_buf - ret = memcpy_sp(copy_begin, total_size, aux_buf, total_size); - securec_check(ret, "", ""); - return ; -} - -// 1: as tuple_offset order, that means asc order. -// 2: store all itemid's idx. -// 3:maybe some itemid is not in order. -void CompressConvertItemRealOrder(char *buf, int16 *real_order, uint16 real_row_cnt) { - HeapPageHeaderData *page = (HeapPageHeaderData *)buf; - uint16 row_cnt = (page->pd_lower - GetPageHeaderSize(page)) / sizeof(ItemIdData); - ItemIdData *begin = (ItemIdData *)(buf + GetPageHeaderSize(page)); - int16 *link_order = real_order + real_row_cnt; - - int16 i, head, curr, prev; - int16 end = -1; // invalid index - - head = end; - // very likely to seems that itemids stored by desc order, and ignore invalid itemid - for (i = 0; i < row_cnt; i++) { - if (!ItemIdIsNormal(begin + i)) { - continue; - } - - if (head == end) { // set the head idx, insert the first - link_order[i] = end; - head = i; - continue; - } - - if ((begin + i)->lp_off < (begin + head)->lp_off) { - link_order[i] = head; // update the head idx - head = i; - continue; - } - - prev = head; - curr = link_order[head]; - while ((curr != end) && ((begin + i)->lp_off > (begin + curr)->lp_off)) { - prev = curr; - curr = link_order[curr]; - } - - link_order[prev] = i; - link_order[i] = curr; - } - - // arrange the link to array - curr = head; - for (i = 0; i < real_row_cnt; i++) { - real_order[i] = curr; - curr = link_order[curr]; - } - - if (curr != end) { - printf("ERROR!!! pre_convert_real_order error...!!!\n"); - ASSERT(0); - return; - } - -} - -// maybe some itemid is not valid -uint16 HeapPageCalcRealRowCnt (char *buf) { - HeapPageHeaderData *page = (HeapPageHeaderData *)buf; - uint16 cnt = 0; - uint16 i; - uint16 row_cnt = (page->pd_lower - GetPageHeaderSize(page)) / sizeof(ItemIdData); - - for (i = 0; i < row_cnt; i++) { - if (ItemIdIsNormal(GET_ITEMID_BY_IDX(buf, i))) { - cnt++; - } - } - return cnt; -} - -// to find all row size are diffs in MIN_DIFF_SIZE byts. -bool CompressConvertCheck(char *buf, int16 **real_order, uint16 *max_row_len, uint16 *min_row_len, uint16 *real_row_cnt) { - HeapPageHeaderData *page = (HeapPageHeaderData *)buf; - uint16 row_cnt = (page->pd_lower - GetPageHeaderSize(page)) / sizeof(ItemIdData); - int16 i, row_size; - ItemIdData *ptr = NULL; - uint16 up = page->pd_special; - uint16 min_size = GS_INVALID_ID16; - uint16 max_size = 0; - errno_t ret; - if (page->pd_lower < GetPageHeaderSize(page) || (page->pd_lower > page->pd_upper)) { - return false; - } - - uint16 normal_row_cnt = HeapPageCalcRealRowCnt(buf); - if (normal_row_cnt < MIN_CONVERT_CNT) { // no need convert - return false; - } - - // to store the real tuple order. - /* - --------------------------|-------------------------- - xxxxxxxxxxxxxxxxxxxxxxxxxx|xxxxxxxxxxxxxxxxxxxxxxxxxx - --------------------------|-------------------------- - */ - // the first part is real array order, and the second part is link. - *real_order = (int16 *)palloc(sizeof(uint16) * row_cnt * 2); - if (*real_order == NULL) { - printf("zfunc compress file"); - return false; - } - ret = memset_sp(*real_order, sizeof(uint16) * row_cnt * 2, 0, sizeof(uint16) * row_cnt * 2); - securec_check(ret, "", ""); - - // order the ItemIds by tuple_offset order. - CompressConvertItemRealOrder(buf, *real_order, normal_row_cnt); - - // do the check, to check all size of tuples. - for (i = normal_row_cnt - 1; i >= 0; i--) { - ptr = GET_ITEMID_BY_IDX(buf, ((*real_order)[i])); - - row_size = up - ptr->lp_off; - if (row_size < MIN_CONVERT_CNT * 2) { - return false; - } - - min_size = (row_size < min_size) ? row_size : min_size; - max_size = (row_size > max_size) ? row_size : max_size; - - if ((max_size - min_size) > MIN_DIFF_SIZE) { // no need convert - return false; - } - up = ptr->lp_off; - } - - // get the min row common size. - *max_row_len = max_size; - *min_row_len = min_size; - *real_row_cnt = normal_row_cnt; - return true; -} - -bool CompressConvertOnePage(char *buf, char *aux_buf, bool diff_convert) { - uint16 max_row_len = 0; - uint16 min_row_len = 0; - int16 *real_order = NULL; // itemids are not in order sometimes. we must find the real - uint16 real_row_cnt = 0; - if (!CompressConvertCheck(buf, &real_order, &max_row_len, &min_row_len, &real_row_cnt)) { - FreePointer((void*)real_order); - return false; - } - - CompressConvertRows(buf, aux_buf, real_order, max_row_len, real_row_cnt); - CompressConvertItemIds(buf, aux_buf); - - if (diff_convert) { - cprs_diff_convert_rows(buf, ((HeapPageHeaderData *)buf)->pd_upper, min_row_len, real_row_cnt); - cprs_diff_convert_rows(buf, GetPageHeaderSize(buf), sizeof(ItemIdData), - (((HeapPageHeaderData *)buf)->pd_lower - GetPageHeaderSize(buf)) / sizeof(ItemIdData)); - } - - FreePointer((void*)real_order); - return true; -} - -void CompressPagePrepareConvert(char *src, bool diff_convert, bool *real_ByteConvert) -{ - char *aux_buf = NULL; - errno_t rc; - - aux_buf = (char *)palloc(BLCKSZ); - if (aux_buf == NULL) { - // add log - return; - } - rc = memset_sp(aux_buf, BLCKSZ, 0, BLCKSZ); - securec_check(rc, "", ""); - - // do convert - *real_ByteConvert = false; - if (CompressConvertOnePage(src, aux_buf, diff_convert)) { - *real_ByteConvert = true; - } - - FreePointer((void*)aux_buf); -} - -inline size_t CompressReservedLen(const char* page) -{ - auto length = offsetof(HeapPageCompressData, page_header) - offsetof(HeapPageCompressData, data); - return GetPageHeaderSize(page) + length; -} - -/** - * CompressPageBufferBound() - * -- Get the destination buffer boundary to compress one page. - * Return needed destination buffer size for compress one page or - * -1 for unrecognized compression algorithm - */ -int CompressPageBufferBound(const char* page, uint8 algorithm) -{ - switch (algorithm) { - case COMPRESS_ALGORITHM_PGLZ: - return BLCKSZ + 4; - case COMPRESS_ALGORITHM_ZSTD: - return ZSTD_compressBound(BLCKSZ - CompressReservedLen(page)); - default: - return -1; - } -} - -int CompressPage(const char* src, char* dst, int dst_size, RelFileCompressOption option) -{ - if (PageIs8BXidHeapVersion(src)) { - return TemplateCompressPage(src, dst, dst_size, option); - } else { - return TemplateCompressPage(src, dst, dst_size, option); - } -} - -int DecompressPage(const char* src, char* dst, uint8 algorithm) -{ - if (PageIs8BXidHeapVersion(src)) { - return TemplateDecompressPage(src, dst, algorithm); - } else { - return TemplateDecompressPage(src, dst, algorithm); - } -} - -inline size_t GetSizeOfHeadData(bool heapPageData) -{ - if (heapPageData) { - return SizeOfHeapPageHeaderData; - } else { - return SizeOfPageHeaderData; - } -} - -/** - * CompressPage() -- Compress one page. - * - * Only the parts other than the page header will be compressed. The - * compressed data is rounded by chunck_size, The insufficient part is - * filled with zero. Compression needs to be able to save at least one - * chunk of space, otherwise it fail. - * This function returen the size of compressed data or - * -1 for compression fail - * COMPRESS_UNSUPPORTED_ERROR for unrecognized compression algorithm - */ -template -int TemplateCompressPage(const char* src, char* dst, int dst_size, RelFileCompressOption option) -{ - int compressed_size; - int8 level = option.compressLevelSymbol ? option.compressLevel : -option.compressLevel; - size_t sizeOfHeaderData = GetSizeOfHeadData(heapPageData); - char* src_copy = NULL; - bool real_ByteConvert = false; - errno_t rc; - - if (option.byteConvert) { - // copy and maybe change it - src_copy = (char*)palloc(BLCKSZ); - if (src_copy == NULL) { - // add log - return -1; - } - rc = memcpy_s(src_copy, BLCKSZ, src, BLCKSZ); - securec_check(rc, "", ""); - CompressPagePrepareConvert(src_copy, option.diffConvert, &real_ByteConvert); /* preprocess convert src */ - } - - char* data = GetPageCompressedData(dst, heapPageData); - - switch (option.compressAlgorithm) { - case COMPRESS_ALGORITHM_PGLZ: - if (real_ByteConvert) { - compressed_size = lz_compress(src_copy + sizeOfHeaderData, BLCKSZ - sizeOfHeaderData, data); - } else { - compressed_size = lz_compress(src + sizeOfHeaderData, BLCKSZ - sizeOfHeaderData, data); - } - break; - case COMPRESS_ALGORITHM_ZSTD: { - if (level == 0 || level < MIN_ZSTD_COMPRESSION_LEVEL || level > MAX_ZSTD_COMPRESSION_LEVEL) { - level = DEFAULT_ZSTD_COMPRESSION_LEVEL; - } - - if (real_ByteConvert) { - compressed_size = - ZSTD_compress(data, dst_size, src_copy + sizeOfHeaderData, BLCKSZ - sizeOfHeaderData, level); - } else { - compressed_size = - ZSTD_compress(data, dst_size, src + sizeOfHeaderData, BLCKSZ - sizeOfHeaderData, level); - } - - if (ZSTD_isError(compressed_size)) { - FreePointer((void*)src_copy); - return -1; - } - break; - } - default: - FreePointer((void*)src_copy); - return COMPRESS_UNSUPPORTED_ERROR; - } - - if (compressed_size < 0) { - FreePointer((void*)src_copy); - return -1; - } - - if (heapPageData) { - HeapPageCompressData* pcdptr = ((HeapPageCompressData*)dst); - rc = memcpy_s(pcdptr->page_header, sizeOfHeaderData, src, sizeOfHeaderData); - securec_check(rc, "", ""); - pcdptr->size = compressed_size; - pcdptr->byte_convert = real_ByteConvert; - pcdptr->diff_convert = option.diffConvert; - } else { - PageCompressData* pcdptr = ((PageCompressData*)dst); - rc = memcpy_s(pcdptr->page_header, sizeOfHeaderData, src, sizeOfHeaderData); - securec_check(rc, "", ""); - pcdptr->size = compressed_size; - pcdptr->byte_convert = real_ByteConvert; - pcdptr->diff_convert = option.diffConvert; - } - - FreePointer((void*)src_copy); - return SIZE_OF_PAGE_COMPRESS_DATA_HEADER_DATA(heapPageData) + compressed_size; -} - -/*======================================================================================*/ -#define DECOMPRESS "" -void DecompressDeconvertRows(char *buf, char *aux_buf, int16 *real_order, uint16 max_row_len, uint16 real_row_cnt) { - errno_t ret; - HeapPageHeaderData *page = (HeapPageHeaderData *)buf; - uint16 row_cnt = real_row_cnt; - uint32 total_size = page->pd_special - page->pd_upper; - char *copy_begin = buf + page->pd_upper; - char *row; - uint16 i, j, k, cur, up, row_size; - - ret = memset_sp(aux_buf, BLCKSZ, 0, BLCKSZ); - securec_check(ret, "", ""); - - for (i = 0, k = 0; i < max_row_len; i++) { - for (j = 0; j < row_cnt; j++) { - up = (j == (row_cnt - 1)) ? page->pd_special : GET_ITEMID_BY_IDX(buf, (real_order[j + 1]))->lp_off; - cur = GET_ITEMID_BY_IDX(buf, (real_order[j]))->lp_off; - row_size = up - cur; - row = aux_buf + cur; - if (i < row_size) { - row[i] = copy_begin[k++]; // this part is reshaped - } - } - } - - if (k != total_size) { - printf("ERROR!!! pg_deconvert_rows error...!!!\n"); - ASSERT(0); - return; - } - - // cp aux_buf to page_buf - ret = memcpy_sp(copy_begin, total_size, aux_buf + page->pd_upper, total_size); - securec_check(ret, "", ""); - return ; -} - -void DecompressDeconvertItemIds(char *buf, char *aux_buf) { - errno_t ret; - HeapPageHeaderData *page = (HeapPageHeaderData *)buf; - uint16 row_cnt = (page->pd_lower - GetPageHeaderSize(page)) / sizeof(ItemIdData); - uint32 total_size = row_cnt * sizeof(ItemIdData); - char* copy_begin = buf + GetPageHeaderSize(page); - uint16 i, j, k; - - // clear aux_buf - ret = memset_sp(aux_buf, BLCKSZ, 0, BLCKSZ); - securec_check(ret, "", ""); - - for (i = 0, k = 0; i < sizeof(ItemIdData); i++) { - for (j = 0; j < row_cnt; j++) { - aux_buf[j * sizeof(ItemIdData) + i] = copy_begin[k++]; - } - } - - // cp aux_buf to page_buf - ret = memcpy_sp(copy_begin, total_size, aux_buf, total_size); - securec_check(ret, "", ""); - return ; -} - -void DecompressDeconvertOnePage(char *buf, char *aux_buf, bool diff_convert) { - uint16 max_row_len = 0; - uint16 min_row_len = 0; - int16 *real_order = NULL; // itemids are not in order sometimes. we must find the real - uint16 real_row_cnt = 0; - - if (diff_convert) { - cprs_diff_deconvert_rows(buf, GetPageHeaderSize(buf), sizeof(ItemIdData), - (((HeapPageHeaderData *)buf)->pd_lower - GetPageHeaderSize(buf)) / sizeof(ItemIdData)); - } - - // =======firstly, arrange the itemids. - DecompressDeconvertItemIds(buf, aux_buf); - - if (!CompressConvertCheck(buf, &real_order, &max_row_len, &min_row_len, &real_row_cnt)) { - FreePointer((void*)real_order); - ASSERT(0); - return ; - } - - // =======and last, the tuples - if (diff_convert) { - cprs_diff_deconvert_rows(buf, ((HeapPageHeaderData *)buf)->pd_upper, min_row_len, real_row_cnt); - } - DecompressDeconvertRows(buf, aux_buf, real_order, max_row_len, real_row_cnt); - FreePointer((void*)real_order); -} - -void DecompressPageDeconvert(char *src, bool diff_convert) -{ - char *aux_buf = NULL; - errno_t rc; - - aux_buf = (char *)palloc(BLCKSZ); - if (aux_buf == NULL) { - // add log - return; - } - rc = memset_s(aux_buf, BLCKSZ, 0, BLCKSZ); - securec_check(rc, "", ""); - - // do convert - DecompressDeconvertOnePage(src, aux_buf, diff_convert); - - FreePointer((void*)aux_buf); -} - -/** - * DecompressPage() -- Decompress one compressed page. - * return size of decompressed page which should be BLCKSZ or - * -1 for decompress error - * -2 for unrecognized compression algorithm - * - * note:The size of dst must be greater than or equal to BLCKSZ. - */ -template -int TemplateDecompressPage(const char* src, char* dst, uint8 algorithm) -{ - int decompressed_size; - char* data; - uint32 size; - bool byte_convert, diff_convert; - size_t headerSize = GetSizeOfHeadData(heapPageData); - int rc = memcpy_s(dst, headerSize, src, headerSize); - securec_check(rc, "", ""); - - if (heapPageData) { - data = ((HeapPageCompressData*)src)->data; - size = ((HeapPageCompressData*)src)->size; - byte_convert = ((HeapPageCompressData*)src)->byte_convert; - diff_convert = ((HeapPageCompressData*)src)->diff_convert; - } else { - data = ((PageCompressData*)src)->data; - size = ((PageCompressData*)src)->size; - byte_convert = ((PageCompressData*)src)->byte_convert; - diff_convert = ((PageCompressData*)src)->diff_convert; - } - - switch (algorithm) { - case COMPRESS_ALGORITHM_PGLZ: - decompressed_size = lz_decompress(data, size, dst + headerSize, BLCKSZ - headerSize, false); - break; - case COMPRESS_ALGORITHM_ZSTD: - decompressed_size = ZSTD_decompress(dst + headerSize, BLCKSZ - headerSize, data, size); - if (ZSTD_isError(decompressed_size)) { - return -1; - } - break; - default: - return COMPRESS_UNSUPPORTED_ERROR; - break; - } - - if (byte_convert) { - DecompressPageDeconvert(dst, diff_convert); - } - - return headerSize + decompressed_size; -} -#endif - -/** - * pc_mmap() -- create memory map for page compress file's address area. - * - */ -PageCompressHeader* pc_mmap(int fd, int chunk_size, bool readonly) -{ - int pc_memory_map_size = SIZE_OF_PAGE_COMPRESS_ADDR_FILE(chunk_size); - return pc_mmap_real_size(fd, pc_memory_map_size, readonly); -} - -/** - * pc_mmap_real_size() -- create memory map for page compress file's address area. - * - */ -extern PageCompressHeader* pc_mmap_real_size(int fd, int pc_memory_map_size, bool readonly) -{ - PageCompressHeader* map = NULL; - int file_size = lseek(fd, 0, SEEK_END); - if (file_size != pc_memory_map_size) { - if (ftruncate(fd, pc_memory_map_size) != 0) { - return (PageCompressHeader*) MAP_FAILED; - } - } - if (readonly) { - map = (PageCompressHeader*) mmap(NULL, pc_memory_map_size, PROT_READ, MAP_SHARED, fd, 0); - } else { - map = (PageCompressHeader*) mmap(NULL, pc_memory_map_size, PROT_WRITE | PROT_READ, MAP_SHARED, fd, 0); - } - return map; -} - -/** - * pc_munmap() -- release memory map of page compress file. - * - */ -int pc_munmap(PageCompressHeader *map) -{ - return munmap(map, SIZE_OF_PAGE_COMPRESS_ADDR_FILE(map->chunk_size)); -} - -/** - * pc_msync() -- sync memory map of page compress file. - * - */ -int pc_msync(PageCompressHeader *map) -{ -#ifndef FRONTEND - if (!u_sess->attr.attr_storage.enableFsync) { - return 0; - } -#endif - return msync(map, SIZE_OF_PAGE_COMPRESS_ADDR_FILE(map->chunk_size), MS_SYNC); -} - - -uint32 AddrChecksum32(BlockNumber blockNumber, const PageCompressAddr* pageCompressAddr, uint16 chunkSize) -{ -#define UINT_LEN sizeof(uint32) - uint32 checkSum = 0; - char* addr = ((char*) pageCompressAddr) + UINT_LEN; - size_t len = SIZE_OF_PAGE_COMPRESS_ADDR(chunkSize) - UINT_LEN; - do { - if (len >= UINT_LEN) { - checkSum += *((uint32*) addr); - addr += UINT_LEN; - len -= UINT_LEN; - } else { - char finalNum[UINT_LEN] = {0}; - size_t i = 0; - for (; i < len; ++i) { - finalNum[i] = addr[i]; - } - checkSum += *((uint32*) finalNum); - len -= i; - } - } while (len); - return checkSum; -} - -#endif diff --git a/src/include/storage/pmsignal.h b/src/include/storage/pmsignal.h index bbb29a912..5fbead2d9 100644 --- a/src/include/storage/pmsignal.h +++ b/src/include/storage/pmsignal.h @@ -47,6 +47,8 @@ typedef enum { PMSIGNAL_START_UNDO_WORKER, /* start a new undo worker */ PMSIGNAL_START_RB_WORKER, /* start a rbworker */ PMSIGNAL_START_TXNSNAPWORKER, /* start a snapcaputure worker */ + PMSIGNAL_START_LOGICAL_READ_WORKER,/* start logical read worker */ + PMSIGNAL_START_PARALLEL_DECODE_WORKER,/* start parallel decoding worker */ PMSIGNAL_START_APPLY_WORKER, /* start a apply worker */ NUM_PMSIGNALS /* Must be last value of enum! */ } PMSignalReason; diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h index 79b98252e..906ea1411 100755 --- a/src/include/storage/proc.h +++ b/src/include/storage/proc.h @@ -74,7 +74,17 @@ struct XidCache { * rather than the main lock table. This eases contention on the lock * manager LWLocks. See storage/lmgr/README for additional details. */ -#define FP_LOCK_SLOTS_PER_BACKEND 20 +#define FP_LOCK_SLOTS_PER_BACKEND ((uint32)g_instance.attr.attr_storage.num_internal_lock_partitions[FASTPATH_PART]) +#define FP_LOCK_SLOTS_PER_LOCKBIT 20 +#define FP_LOCKBIT_NUM (((FP_LOCK_SLOTS_PER_BACKEND - 1) / FP_LOCK_SLOTS_PER_LOCKBIT) + 1) +#define FAST_PATH_SET_LOCKBITS_ZERO(proc) \ + do { \ + for (uint32 _idx = 0; _idx < FP_LOCKBIT_NUM; _idx++) { \ + (proc)->fpLockBits[_idx] = 0; \ + } \ + } while (0) + + typedef struct FastPathTag { uint32 dbid; @@ -241,8 +251,8 @@ struct PGPROC { LWLock* backendLock; /* protects the fields below */ /* Lock manager data, recording fast-path locks taken by this backend. */ - uint64 fpLockBits; /* lock modes held for each fast-path slot */ - FastPathTag fpRelId[FP_LOCK_SLOTS_PER_BACKEND]; /* slots for rel oids */ + uint64 *fpLockBits; /* lock modes held for each fast-path slot */ + FastPathTag *fpRelId; /* slots for rel oids */ bool fpVXIDLock; /* are we holding a fast-path VXID lock? */ LocalTransactionId fpLocalTransactionId; /* lxid for fast-path VXID * lock */ @@ -305,6 +315,7 @@ typedef struct PGXACT { * vacuum must not remove tuples deleted by * xid >= xmin ! */ CommitSeqNo csn_min; /* local csn min */ + CommitSeqNo csn_dr; TransactionId next_xid; /* xid sent down from CN */ int nxids; /* use int replace uint8, avoid overflow when sub xids >= 256 */ uint8 vacuumFlags; /* vacuum-related flags, see above */ @@ -357,9 +368,12 @@ typedef struct PROC_HDR { Latch* walwriterauxiliaryLatch; /* Checkpointer process's latch */ Latch* checkpointerLatch; + /* pagewriter main process's latch */ + Latch* pgwrMainThreadLatch; /* BCMWriter process's latch */ Latch* cbmwriterLatch; volatile Latch* ShareStoragexlogCopyerLatch; + volatile Latch* BarrierPreParseLatch; /* Current shared estimate of appropriate spins_per_delay value */ int spins_per_delay; /* The proc of the Startup process, since not in ProcArray */ @@ -370,8 +384,6 @@ typedef struct PROC_HDR { #ifdef __aarch64__ char pad[PG_CACHE_LINE_SIZE - PROC_HDR_PAD_OFFSET]; #endif - /* Oldest transaction id which is having undo. */ - pg_atomic_uint64 oldestXidInUndo; } PROC_HDR; /* @@ -385,7 +397,12 @@ typedef struct PROC_HDR { * PGXC needs another slot for the pool manager process */ const int MAX_PAGE_WRITER_THREAD_NUM = 16; + +#ifndef ENABLE_LITE_MODE const int MAX_COMPACTION_THREAD_NUM = 100; +#else +const int MAX_COMPACTION_THREAD_NUM = 10; +#endif /* number of multi auxiliary threads. */ #define NUM_MULTI_AUX_PROC \ @@ -399,6 +416,8 @@ const int MAX_COMPACTION_THREAD_NUM = 100; /* max number of CMA's connections */ #define NUM_CMAGENT_PROCS (10) +/* buffer length of information when no free proc available for cm_agent */ +#define CONNINFOLEN (64) /* max number of DCF call back threads */ #define NUM_DCF_CALLBACK_PROCS \ @@ -419,6 +438,8 @@ const int MAX_COMPACTION_THREAD_NUM = 100; #define BackendStatusArray_size (MAX_BACKEND_SLOT + NUM_AUXILIARY_PROCS) +#define GSC_MAX_BACKEND_SLOT (g_instance.shmem_cxt.MaxBackends + MAX_SESSION_SLOT_COUNT) + extern AlarmCheckResult ConnectionOverloadChecker(Alarm* alarm, AlarmAdditionalParam* additionalParam); /* @@ -441,7 +462,7 @@ extern int GetUsedConnectionCount(void); extern int GetUsedInnerToolConnCount(void); extern void ProcQueueInit(PROC_QUEUE* queue); -extern int ProcSleep(LOCALLOCK* locallock, LockMethod lockMethodTable, bool allow_con_update); +extern int ProcSleep(LOCALLOCK* locallock, LockMethod lockMethodTable, bool allow_con_update, int waitSec); extern PGPROC* ProcWakeup(PGPROC* proc, int waitStatus); extern void ProcLockWakeup(LockMethod lockMethodTable, LOCK* lock, const PROCLOCK* proclock = NULL); extern void ProcBlockerUpdate(PGPROC *waiterProc, PROCLOCK *blockerProcLock, const char* lockMode, bool isLockHolder); @@ -471,14 +492,24 @@ extern ThreadId getThreadIdFromLogicThreadId(int logictid); extern int getLogicThreadIdFromThreadId(ThreadId tid); extern bool IsRedistributionWorkerProcess(void); +extern void PgStatCMAThreadStatus(); void CancelBlockedRedistWorker(LOCK* lock, LOCKMODE lockmode); extern void BecomeLockGroupLeader(void); extern void BecomeLockGroupMember(PGPROC *leader); + static inline bool TransactionIdOlderThanAllUndo(TransactionId xid) { - uint64 cutoff = pg_atomic_read_u64(&g_instance.proc_base->oldestXidInUndo); + uint64 cutoff = pg_atomic_read_u64(&g_instance.undo_cxt.oldestXidInUndo); return xid < cutoff; } +static inline bool TransactionIdOlderThanFrozenXid(TransactionId xid) +{ + uint64 cutoff = pg_atomic_read_u64(&g_instance.undo_cxt.oldestFrozenXid); + return xid < cutoff; +} + +extern int GetThreadPoolStreamProcNum(void); + #endif /* PROC_H */ diff --git a/src/include/storage/procarray.h b/src/include/storage/procarray.h index 4c137f4ad..2b8c2f314 100755 --- a/src/include/storage/procarray.h +++ b/src/include/storage/procarray.h @@ -78,11 +78,9 @@ extern TransactionId GetRecentGlobalXmin(void); extern TransactionId GetOldestXmin(Relation rel, bool bFixRecentGlobalXmin = false, bool bRecentGlobalXminNoCheck = false); extern TransactionId GetGlobalOldestXmin(void); -extern TransactionId GetOldestXminForUndo(void); +extern TransactionId GetOldestXminForUndo(TransactionId * recycleXmin); extern void CheckCurrentTimeline(GTM_Timeline timeline); extern TransactionId GetOldestActiveTransactionId(TransactionId *globalXmin); -extern void FixCurrentSnapshotByGxid(TransactionId gxid); -extern void CheckSnapshotIsValidException(Snapshot snapshot, const char* location); extern TransactionId GetOldestSafeDecodingTransactionId(bool catalogOnly); extern void CheckSnapshotTooOldException(Snapshot snapshot, const char* location); @@ -158,6 +156,7 @@ extern void SyncWaitXidEnd(TransactionId xid, Buffer buffer); extern CommitSeqNo calculate_local_csn_min(); extern void proc_cancel_invalid_gtm_lite_conn(); extern void forward_recent_global_xmin(void); +extern void UpdateXLogMaxCSN(CommitSeqNo xlogCSN); extern void UpdateCSNLogAtTransactionEND( TransactionId xid, int nsubxids, TransactionId* subXids, CommitSeqNo csn, bool isCommit); diff --git a/src/include/storage/remote_adapter.h b/src/include/storage/remote_adapter.h index db786a2a9..6023dd292 100755 --- a/src/include/storage/remote_adapter.h +++ b/src/include/storage/remote_adapter.h @@ -33,11 +33,20 @@ #include "c.h" #include "storage/remote_read.h" +#include "storage/smgr/relfilenode.h" +#include "access/xlogdefs.h" +#include "access/xlog_basic.h" +#include "storage/smgr/segment_internal.h" +#include "funcapi.h" -extern int StandbyReadCUforPrimary(uint32 spcnode, uint32 dbnode, uint32 relnode, int32 colid, uint64 offset, - int32 size, uint64 lsn, bytea** cudata); +extern int StandbyReadCUforPrimary(RepairBlockKey key, uint64 offset, int32 size, uint64 lsn, int timeout, + bytea** cudata); +extern int StandbyReadPageforPrimary(RepairBlockKey key, uint32 blocksize, uint64 lsn, bytea** pagedata, + int timeout, const XLogPhyBlock *pblk); -extern int StandbyReadPageforPrimary(uint32 spcnode, uint32 dbnode, uint32 relnode, int16 bucketnode, uint16 opt, int32 forknum, uint32 blocknum, - uint32 blocksize, uint64 lsn, bytea** pagedata); +extern int ReadFileSizeForRemote(RelFileNode rnode, int32 forknum, XLogRecPtr lsn, int64* res, int timeout); + +Datum gs_read_file_from_remote(PG_FUNCTION_ARGS); +Datum gs_read_file_size_from_remote(PG_FUNCTION_ARGS); #endif /* REMOTE_ADAPTER_H */ diff --git a/src/include/storage/remote_read.h b/src/include/storage/remote_read.h index 41437671d..d991f652d 100755 --- a/src/include/storage/remote_read.h +++ b/src/include/storage/remote_read.h @@ -47,10 +47,14 @@ typedef enum { #define REMOTE_READ_RPC_TIMEOUT 6 #define REMOTE_READ_BLCKSZ_NOT_SAME 7 #define REMOTE_READ_MEMCPY_ERROR 8 +#define REMOTE_READ_IP_NOT_EXIST 9 +#define REMOTE_READ_CONN_ERROR 10 + #define MAX_PATH_LEN 1024 +#define MAX_IPADDR_LEN 64 -#define MAX_IPADDR_LEN 32 +const int MAX_BATCH_READ_BLOCKNUM = 16 * 1024 * 1024 / BLCKSZ; /* 16MB file */ extern const char* RemoteReadErrMsg(int error_code); diff --git a/src/include/storage/sinval.h b/src/include/storage/sinval.h index 5fc58d717..62e96e0d5 100644 --- a/src/include/storage/sinval.h +++ b/src/include/storage/sinval.h @@ -136,8 +136,9 @@ typedef union SharedInvalidationMessage { extern THR_LOCAL volatile sig_atomic_t catchupInterruptPending; extern void SendSharedInvalidMessages(const SharedInvalidationMessage* msgs, int n); + extern void ReceiveSharedInvalidMessages( - void (*invalFunction)(SharedInvalidationMessage* msg), void (*resetFunction)(void)); + void (*invalFunction)(SharedInvalidationMessage* msg), void (*resetFunction)(void), bool worksession); /* signal handler for catchup events (PROCSIG_CATCHUP_INTERRUPT) */ extern void HandleCatchupInterrupt(void); @@ -152,5 +153,8 @@ extern void ProcessCatchupInterrupt(void); extern int xactGetCommittedInvalidationMessages(SharedInvalidationMessage** msgs, bool* RelcacheInitFileInval); extern void ProcessCommittedInvalidationMessages( SharedInvalidationMessage* msgs, int nmsgs, bool RelcacheInitFileInval, Oid dbid, Oid tsid); -extern void LocalExecuteInvalidationMessage(SharedInvalidationMessage* msg); +extern void LocalExecuteThreadAndSessionInvalidationMessage(SharedInvalidationMessage* msg); +extern void LocalExecuteThreadInvalidationMessage(SharedInvalidationMessage* msg); +extern void LocalExecuteSessionInvalidationMessage(SharedInvalidationMessage* msg); +extern void GlobalExecuteSharedInvalidMessages(const SharedInvalidationMessage* msgs, int n); #endif /* SINVAL_H */ diff --git a/src/include/storage/sinvaladt.h b/src/include/storage/sinvaladt.h index 64933ad01..f77f5bacc 100644 --- a/src/include/storage/sinvaladt.h +++ b/src/include/storage/sinvaladt.h @@ -35,7 +35,7 @@ extern void SharedInvalBackendInit(bool sendOnly, bool worksession); extern PGPROC* BackendIdGetProc(int backendID); extern void SIInsertDataEntries(const SharedInvalidationMessage* data, int n); -extern int SIGetDataEntries(SharedInvalidationMessage* data, int datasize); +extern int SIGetDataEntries(SharedInvalidationMessage* data, int datasize, bool worksession); extern void SICleanupQueue(bool callerHasWriteLock, int minFree); extern LocalTransactionId GetNextLocalTransactionId(void); diff --git a/src/include/storage/smgr/fd.h b/src/include/storage/smgr/fd.h index 9e11a9760..31dc4cce5 100644 --- a/src/include/storage/smgr/fd.h +++ b/src/include/storage/smgr/fd.h @@ -42,7 +42,6 @@ #include #include "utils/hsearch.h" #include "storage/smgr/relfilenode.h" -#include "storage/page_compression.h" #include "postmaster/aiocompleter.h" /* @@ -61,6 +60,7 @@ typedef struct DataFileIdCacheEntry { /* the following are setted in runtime */ int fd; int refcount; + int repaired_fd; } DataFileIdCacheEntry; enum FileExistStatus { FILE_EXIST, FILE_NOT_EXIST, FILE_NOT_REG }; @@ -182,10 +182,7 @@ extern int data_sync_elevel(int elevel); extern bool FdRefcntIsZero(SMgrRelation reln, ForkNumber forkNum); extern FileExistStatus CheckFileExists(const char* path); - -/* Page compression support routines */ -extern void SetupPageCompressMemoryMap(File file, RelFileNode node, const RelFileNodeForkNum& relFileNodeForkNum); -extern PageCompressHeader *GetPageCompressMemoryMap(File file, uint32 chunk_size); +extern bool repair_deleted_file_check(RelFileNodeForkNum fileNode, int fd); /* Filename components for OpenTemporaryFile */ // Note that this macro must be the same to macro in initdb.cpp diff --git a/src/include/storage/smgr/relfilenode.h b/src/include/storage/smgr/relfilenode.h index 7a67bae31..514e25a45 100644 --- a/src/include/storage/smgr/relfilenode.h +++ b/src/include/storage/smgr/relfilenode.h @@ -45,9 +45,6 @@ typedef int ForkNumber; #define VISIBILITYMAP_FORKNUM 2 #define BCM_FORKNUM 3 #define INIT_FORKNUM 4 -// used for data file cache, you can modify than as you like -#define PCA_FORKNUM 5 -#define PCD_FORKNUM 6 /* * NOTE: if you add a new fork, change MAX_FORKNUM below and update the @@ -97,20 +94,11 @@ typedef int ForkNumber; * should be safe as long as all the fields are of type Oid. */ typedef struct RelFileNode { - Oid spcNode; /* tablespace */ - Oid dbNode; /* database */ - Oid relNode; /* relation */ - int2 bucketNode; /* bucketid */ - uint2 opt; -} RelFileNode; - -typedef struct RelFileNodeV2 { Oid spcNode; /* tablespace */ Oid dbNode; /* database */ Oid relNode; /* relation */ int4 bucketNode; /* bucketid */ -} RelFileNodeV2; - +} RelFileNode; #define IsSegmentFileNode(rnode) ((rnode).bucketNode > InvalidBktId) #define IsHeapFileNode(rnode) (!IsSegmentFileNode(rnode)) @@ -142,20 +130,11 @@ typedef struct RelFileNodeOld (relFileNode).bucketNode = (bucketid); \ } while(0) -#define RelFileNodeV2Copy(relFileNodeV2, relFileNode) \ - do { \ - (relFileNodeV2).spcNode = (relFileNode).spcNode; \ - (relFileNodeV2).dbNode = (relFileNode).dbNode; \ - (relFileNodeV2).relNode = (relFileNode).relNode; \ - (relFileNodeV2).bucketNode = (relFileNode).bucketNode; \ - } while (0) - /*This struct used for remove duplicated file list where we scan part of BCM files*/ typedef struct RelFileNodeKey { RelFileNode relfilenode; /*relfilenode*/ int columnid; /*column for CU store*/ } RelFileNodeKey; - typedef struct RelFileNodeKeyEntry { RelFileNodeKey key; int number; /*Times the relfilenode occurence*/ diff --git a/src/include/storage/smgr/segment.h b/src/include/storage/smgr/segment.h index 142cd09f4..c234b630c 100644 --- a/src/include/storage/smgr/segment.h +++ b/src/include/storage/smgr/segment.h @@ -126,6 +126,7 @@ DecodedXLogBlockOp XLogAtomicDecodeBlockData(char *data, int len); /* * APIs used for segment store metadata. */ +BufferDesc *SegBufferAlloc(SegSpace *spc, RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, bool *foundPtr); Buffer ReadBufferFast(SegSpace *spc, RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode); void SegReleaseBuffer(Buffer buffer); void SegUnlockReleaseBuffer(Buffer buffer); @@ -184,6 +185,8 @@ const char* XlogGetRemainExtentTypeName(StatRemainExtentType remainExtentType); extern Oid get_database_oid_by_name(const char *dbname); extern Oid get_tablespace_oid_by_name(const char *tablespacename); -extern StorageType PartitionGetStorageType(Oid parentOid); +extern void redo_xlog_deal_alloc_seg(uint8 opCode, Buffer buffer, const char* data, int data_len, + TransactionId xid); +extern StorageType PartitionGetStorageType(Partition partition, Oid parentOid); #endif diff --git a/src/include/storage/smgr/segment_internal.h b/src/include/storage/smgr/segment_internal.h index 29f10089c..c5c263059 100644 --- a/src/include/storage/smgr/segment_internal.h +++ b/src/include/storage/smgr/segment_internal.h @@ -71,8 +71,9 @@ typedef struct SegPhysicalFile { const int DF_ARRAY_EXTEND_STEP = 4; const ssize_t DF_FILE_EXTEND_STEP_BLOCKS = RELSEG_SIZE / 8; -const ssize_t DF_FILE_EXTEND_STEP_SIZE = DF_FILE_EXTEND_STEP_BLOCKS * BLCKSZ; // 1GB -const ssize_t DF_FILE_SLICE_SIZE = RELSEG_SIZE * BLCKSZ; // 1GB +const ssize_t DF_FILE_EXTEND_STEP_SIZE = DF_FILE_EXTEND_STEP_BLOCKS * BLCKSZ; // 128MB +const ssize_t DF_FILE_SLICE_BLOCKS = RELSEG_SIZE; +const ssize_t DF_FILE_SLICE_SIZE = DF_FILE_SLICE_BLOCKS * BLCKSZ; // 1GB const ssize_t DF_FILE_MIN_BLOCKS = DF_FILE_EXTEND_STEP_BLOCKS; #define DF_OFFSET_TO_SLICENO(offset) (offset / DF_FILE_SLICE_SIZE) @@ -98,6 +99,7 @@ void df_fsync(SegLogicFile *sf); void df_unlink(SegLogicFile *sf); void df_create_file(SegLogicFile *sf, bool redo); void df_shrink(SegLogicFile *sf, BlockNumber target); +void df_flush_data(SegLogicFile *sf, BlockNumber blocknum, BlockNumber nblocks); /* * Data files status in the segment space; diff --git a/src/include/storage/smgr/smgr.h b/src/include/storage/smgr/smgr.h index 4b074a3eb..68b7a53e6 100644 --- a/src/include/storage/smgr/smgr.h +++ b/src/include/storage/smgr/smgr.h @@ -137,11 +137,13 @@ enum SMGR_READ_STATUS { extern void smgrinit(void); extern SMgrRelation smgropen(const RelFileNode& rnode, BackendId backend, int col = 0); +extern void smgrshutdown(int code, Datum arg); extern bool smgrexists(SMgrRelation reln, ForkNumber forknum, BlockNumber blockNum = InvalidBlockNumber); extern void smgrsetowner(SMgrRelation* owner, SMgrRelation reln); extern void smgrclearowner(SMgrRelation* owner, SMgrRelation reln); extern void smgrclose(SMgrRelation reln, BlockNumber blockNum = InvalidBlockNumber); extern void smgrcloseall(void); +extern void smgrcleanblocknumall(void); extern void smgrclosenode(const RelFileNodeBackend& rnode); extern void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo); extern void smgrdounlink(SMgrRelation reln, bool isRedo, BlockNumber blockNum = InvalidBlockNumber); @@ -186,9 +188,7 @@ extern void mdForgetDatabaseFsyncRequests(Oid dbid); /* md sync requests */ extern void ForgetDatabaseSyncRequests(Oid dbid); -extern void CheckPointSyncForDw(void); extern void CheckPointSyncWithAbsorption(void); -extern int64 CheckPointGetFsyncRequset(); /* md sync callbacks */ extern int SyncMdFile(const FileTag *ftag, char *path); extern int UnlinkMdFile(const FileTag *ftag, char *path); diff --git a/src/include/storage/standby.h b/src/include/storage/standby.h index b1bc60afe..72324232b 100755 --- a/src/include/storage/standby.h +++ b/src/include/storage/standby.h @@ -56,10 +56,10 @@ extern bool standbyWillTouchStandbyLocks(XLogReaderState* record); #define XLOG_RUNNING_XACTS 0x10 #define XLOG_STANDBY_UNLOCK 0x20 #define XLOG_STANDBY_CSN 0x30 -#ifndef ENABLE_MULTIPLE_NODES + #define XLOG_STANDBY_CSN_COMMITTING 0x40 #define XLOG_STANDBY_CSN_ABORTED 0x50 -#endif + typedef struct xl_standby_locks { int nlocks; /* number of entries in locks array */ @@ -87,15 +87,15 @@ typedef struct xl_running_xacts { /* Recovery handlers for the Standby Rmgr (RM_STANDBY_ID) */ extern void standby_redo(XLogReaderState* record); extern void standby_desc(StringInfo buf, XLogReaderState* record); -#ifndef ENABLE_MULTIPLE_NODES +extern const char* standby_type_name(uint8 subtype); + extern void StandbyXlogStartup(void); extern void StandbyXlogCleanup(void); extern bool StandbySafeRestartpoint(void); extern bool RemoveCommittedCsnInfo(TransactionId xid); -extern void *XLogReleaseAdnGetCommittingCsnList(); +extern void RemoveAllCommittedCsnInfo(); +extern void *XLogReleaseAndGetCommittingCsnList(); extern void CleanUpMakeCommitAbort(List* committingCsnList); - -#endif typedef struct xl_running_xacts_old { int xcnt; /* # of xact ids in xids[] */ bool subxid_overflow; /* snapshot overflowed, subxids missing */ diff --git a/src/include/storage/tcap.h b/src/include/storage/tcap.h index a707313cd..e40c0f5c6 100644 --- a/src/include/storage/tcap.h +++ b/src/include/storage/tcap.h @@ -38,7 +38,7 @@ static inline bool TcapFeatureAvail() { - return t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_WERSION_NUM; + return t_thrd.proc->workingVersionNum >= INPLACE_UPDATE_VERSION_NUM; } static inline void TcapFeatureEnsure() @@ -48,7 +48,7 @@ static inline void TcapFeatureEnsure() (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Un-support feature"), errdetail("Only support timecapsule from version %u", - INPLACE_UPDATE_WERSION_NUM))); + INPLACE_UPDATE_VERSION_NUM))); } } @@ -68,6 +68,7 @@ extern void TvUheapDeleteDelta(Oid relid, Snapshot snap); extern void TvUheapInsertLost(Oid relid, Snapshot snap); extern void TvRestoreVersion(TimeCapsuleStmt *stmt); +extern TransactionId TvFetchSnpxminRecycle(TimestampTz tz); /* * Interfaces for Timecapsule `Recyclebin-based query, restore` @@ -79,16 +80,20 @@ typedef enum TrObjType { RB_OBJ_TOAST = 2, RB_OBJ_TOAST_INDEX = 3, RB_OBJ_SEQUENCE = 4, + RB_OBJ_PARTITION = 5, + RB_OBJ_GLOBAL_INDEX = 6, + RB_OBJ_MATVIEW = 7 } TrObjType; extern bool TrCheckRecyclebinDrop(const DropStmt *stmt, ObjectAddresses *objects); -extern void TrDrop(const ObjectAddresses *objects, DropBehavior behavior); +extern void TrDrop(const DropStmt* drop, const ObjectAddresses *objects, DropBehavior behavior); extern bool TrCheckRecyclebinTruncate(const TruncateStmt *stmt); extern void TrTruncate(const TruncateStmt *stmt); extern void TrRelationSetNewRelfilenode(Relation relation, TransactionId freezeXid, void *baseDesc); +extern void TrPartitionSetNewRelfilenode(Relation parent, Partition part, TransactionId freezeXid, void *baseDesc); -extern void TrPurgeObject(const RangeVar *purobj, TrObjType type); +extern void TrPurgeObject(RangeVar *purobj, TrObjType type); extern void TrPurgeTablespaceDML(int64 id); extern void TrPurgeTablespace(int64 id); extern void TrPurgeRecyclebin(int64 id); diff --git a/src/include/storage/tcap_impl.h b/src/include/storage/tcap_impl.h index 04be296dd..367d26330 100644 --- a/src/include/storage/tcap_impl.h +++ b/src/include/storage/tcap_impl.h @@ -53,7 +53,7 @@ typedef struct TrObjDesc { Oid dbid; Oid relid; char name[NAMEDATALEN]; - char originname[NAMEDATALEN]; + char originname[2 * NAMEDATALEN]; TrObjOperType operation; TrObjType type; int64 recyclecsn; @@ -87,9 +87,11 @@ extern void TrUpdateBaseid(const TrObjDesc *desc); extern Oid TrDescWrite(TrObjDesc *desc); extern void TrDescInit(Relation rel, TrObjDesc *desc, TrObjOperType operType, TrObjType objType, bool canpurge, bool isBaseObj = false); +extern void TrPartDescInit(Relation rel, Partition part, TrObjDesc *desc, TrObjOperType operType, + TrObjType objType, bool canpurge, bool isBaseObj = false); extern void TrFindAllRefObjs(Relation depRel, const ObjectAddress *subobj, ObjectAddresses *refobjs, bool ignoreObjSubId = false); -extern void TrOperFetch(const RangeVar *purobj, TrObjType objtype, - TrObjDesc *desc, TrOperMode operMode); +extern void TrOperFetch(const RangeVar *purobj, TrObjType objtype, TrObjDesc *desc, TrOperMode operMode); extern void TrOperPrep(TrObjDesc *desc, TrOperMode operMode); -extern void TrSwapRelfilenode(Relation rbRel, HeapTuple rbTup); +extern void TrSwapRelfilenode(Relation rbRel, HeapTuple rbTup, bool isPart); +extern bool TrFetchName(const char *rcyname, TrObjType type, TrObjDesc *desc, TrOperMode operMode); diff --git a/src/include/storage/vfd.h b/src/include/storage/vfd.h index 1135517da..33a0fdab4 100644 --- a/src/include/storage/vfd.h +++ b/src/include/storage/vfd.h @@ -17,7 +17,6 @@ #include #include "utils/resowner.h" -#include "storage/page_compression.h" #include "storage/smgr/relfilenode.h" typedef struct vfd { @@ -35,8 +34,6 @@ typedef struct vfd { int fileFlags; /* open(2) flags for (re)opening the file */ int fileMode; /* mode to pass to open(2) */ RelFileNodeForkNum fileNode; /* current logical file node */ - bool with_pcmap; /* is page compression relation */ - PageCompressHeader *pcmap; /* memory map of page compression address file */ } Vfd; #endif /* VFD_H */ diff --git a/src/include/tcop/autonomoustransaction.h b/src/include/tcop/autonomoustransaction.h index 86f1bbf54..eb3821d98 100644 --- a/src/include/tcop/autonomoustransaction.h +++ b/src/include/tcop/autonomoustransaction.h @@ -48,6 +48,7 @@ struct ATResult { bool withtuple; PQResult result; Datum ResTup; + bool resisnull; ATResult() : withtuple(false), result(RES_DEFAULT) {} ATResult(bool btuple, PQResult pqres) : withtuple(btuple), result(pqres) {} @@ -67,15 +68,19 @@ public: m_conn = NULL; m_res = NULL; current_attach_sessionid = 0; + saved_deadlock_timeout = 0; RefSessionCount(); } - Datum ExecSimpleQuery(const char* query, TupleDesc resultTupleDesc, int64 currentXid, bool isLockWait = false); + ATResult ExecSimpleQuery(const char* query, TupleDesc resultTupleDesc, int64 currentXid, bool isLockWait = false, + bool is_plpgsql_func_with_outparam = false); void DetachSession(void); void AttachSession(void); bool GetConnStatus(void); bool ReConnSession(void); + void SetDeadLockTimeOut(void); + void ReSetDeadLockTimeOut(void); public: uint64 current_attach_sessionid = 0; @@ -110,9 +115,11 @@ private: PGresult* m_res = NULL; static pg_atomic_uint32 m_sessioncnt; + + int saved_deadlock_timeout = 0; }; -ATResult HandlePGResult(PGconn* conn, PGresult* pgresult, TupleDesc resultTupleDesc); +ATResult HandlePGResult(PGconn* conn, PGresult* pgresult, TupleDesc resultTupleDesc, bool is_plpgsql_func_with_outparam); enum PLpgSQL_exectype { STMT_SQL, diff --git a/src/include/tcop/dest.h b/src/include/tcop/dest.h index 2f58bfe3b..0d3fe0128 100644 --- a/src/include/tcop/dest.h +++ b/src/include/tcop/dest.h @@ -169,9 +169,5 @@ extern void NullCommand(CommandDest dest); extern void ReadyForQuery(CommandDest dest); extern void ReadyForQuery_noblock(CommandDest dest, int timeout); -extern void init_sess_dest(DestReceiver* initdonothingDR, - DestReceiver* initdebugtupDR, - DestReceiver* initspi_printtupDR); - #endif /* !FRONTEND_PARSER */ #endif /* DEST_H */ diff --git a/src/include/tcop/stmt_retry.h b/src/include/tcop/stmt_retry.h index 90ea5eca9..c010787e5 100644 --- a/src/include/tcop/stmt_retry.h +++ b/src/include/tcop/stmt_retry.h @@ -396,7 +396,7 @@ extern bool IsStmtNeedRetryByErrCode(const char* ecode_str, int errlevel); */ inline bool IsStmtNeedRetryByErrCode(int ecode, int errlevel) { - char* ecode_str = plpgsql_get_sqlstate(ecode); + const char* ecode_str = plpgsql_get_sqlstate(ecode); return IsStmtNeedRetryByErrCode(ecode_str, errlevel); } diff --git a/src/include/tcop/tcopprot.h b/src/include/tcop/tcopprot.h index e91f8cb4c..60a99ee11 100644 --- a/src/include/tcop/tcopprot.h +++ b/src/include/tcop/tcopprot.h @@ -62,7 +62,10 @@ extern int PostgresMain(int argc, char* argv[], const char* dbname, const char* extern long get_stack_depth_rlimit(void); extern void ResetUsage(void); extern void ShowUsage(const char* title); + +#define PRINTF_DST_MAX 32 /* buffer size used for check_log_duration's first parameter - msec_str */ extern int check_log_duration(char* msec_str, bool was_logged); + extern void set_debug_options(int debug_flag, GucContext context, GucSource source); extern bool set_plan_disabling_options(const char* arg, GucContext context, GucSource source); extern const char* get_stats_option_name(const char* arg); diff --git a/src/include/tcop/utility.h b/src/include/tcop/utility.h index 92983f459..a4a445486 100644 --- a/src/include/tcop/utility.h +++ b/src/include/tcop/utility.h @@ -4,9 +4,9 @@ * prototypes for utility.c. * * + * Portions Copyright (c) 2021, openGauss Contributors * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * * src/include/tcop/utility.h * @@ -89,6 +89,7 @@ typedef enum { ARQ_TYPE_SAMPLE, /* include sample rows or sample table. */ } ANALYZE_RQTYPE; +extern bool pg_try_advisory_lock_for_redis(Relation rel); extern void pgxc_lock_for_utility_stmt(Node* parsetree, bool is_temp); extern void ExecUtilityStmtOnNodes(const char* queryString, ExecNodes* nodes, bool sentToRemote, bool force_autocommit, RemoteQueryExecType exec_type, bool is_temp, Node* parsetree = NULL); @@ -121,6 +122,8 @@ extern bool ObjectsInSameNodeGroup(List* objects, NodeTag stmttype); extern void EstIdxMemInfo( Relation rel, RangeVar* relation, UtilityDesc* desc, IndexInfo* info, const char* accessMethod); extern void AdjustIdxMemInfo(AdaptMem* operatorMem, UtilityDesc* desc); +extern void AlterGlobalConfig(AlterGlobalConfigStmt *stmt); +extern void DropGlobalConfig(DropGlobalConfigStmt *stmt); /* * @hdfs The struct HDFSTableAnalyze is used to store datanode work infomation diff --git a/src/include/tde_key_management/tde_key_storage.h b/src/include/tde_key_management/tde_key_storage.h index 8cfd87521..b46a19b08 100755 --- a/src/include/tde_key_management/tde_key_storage.h +++ b/src/include/tde_key_management/tde_key_storage.h @@ -45,14 +45,15 @@ public: bool insert_cache(TDECacheEntry* tde_cache_entry); char* search_cache(const char* dek_cipher); void cache_watch_dog(); + void clean_cache_entry_value(char* dek_plaintext); + void reset_dek_plaintext(char* dek_plaintext); private: TDEKeyStorage(); ~TDEKeyStorage(); static uint32 tde_cache_entry_hash_func(const void* key, Size keysize); static int tde_cache_entry_match_func(const void* key1, const void* key2, Size keySize); - void clean_cache_entry_value(char* dek_plaintext); - void reset_dek_plaintext(char* dek_plaintext); + private: MemoryContext tde_cache_mem; HTAB* tde_cache; diff --git a/src/include/threadpool/threadpool.h b/src/include/threadpool/threadpool.h index 34893b609..efc72b90d 100644 --- a/src/include/threadpool/threadpool.h +++ b/src/include/threadpool/threadpool.h @@ -48,6 +48,12 @@ #define MAX_STREAM_POOL_SIZE 16384 #define MAX_THREAD_POOL_GROUPS 64 #define CHAR_SIZE 512 +#define DEFAULT_THREAD_POOL_STREAM_PROC_RATIO 0.2 + +/* dop max is 8 */ +#define MAX_THREAD_POOL_STREAM_PROC_RATIO 8 + + extern ThreadPoolControler* g_threadPoolControler; diff --git a/src/include/threadpool/threadpool_controler.h b/src/include/threadpool/threadpool_controler.h index fbdbe7365..624ab07ee 100644 --- a/src/include/threadpool/threadpool_controler.h +++ b/src/include/threadpool/threadpool_controler.h @@ -33,7 +33,12 @@ class ThreadPoolSessControl; -enum CPUBindType { NO_CPU_BIND, ALL_CPU_BIND, NODE_BIND, CPU_BIND }; +enum CPUBindType { NO_CPU_BIND, ALL_CPU_BIND, NODE_BIND, CPU_BIND, NUMA_BIND }; + +typedef struct { + int numaId; + int cpuId; +}NumaCpuId; typedef struct CPUInfo { int totalCpuNum; @@ -47,6 +52,7 @@ typedef struct CPUInfo { bool* isBindCpuArr; bool* isBindNumaArr; bool* isMcsCpuArr; + bool* isBindCpuNumaArr; } CPUInfo; typedef struct ThreadPoolAttr { @@ -55,6 +61,14 @@ typedef struct ThreadPoolAttr { char* bindCpu; } ThreadPoolAttr; +typedef struct ThreadPoolStreamAttr { + int threadNum; + float procRatio; + int groupNum; + char* bindCpu; +} ThreadPoolStreamAttr; + + class ThreadPoolControler : public BaseObject { public: ThreadPoolSessControl* m_sessCtrl; @@ -92,24 +106,37 @@ public: { return m_threadPoolContext; } + + inline int GetStreamThreadNum() + { + return m_maxStreamPoolSize; + } + + inline float GetStreamProcRatio() + { + return m_streamProcRatio; + } void BindThreadToAllAvailCpu(ThreadId thread) const; void EnableAdjustPool(); - + static int ParseRangeStr(char* attr, bool* arr, int totalNum, char* bindtype); + static bool* GetMcsCpuInfo(int totalCpuNum); + static void GetCpuAndNumaNum(int32 *totalCpuNum, int32 *totalNumaNum); + static void GetActiveCpu(NumaCpuId *numaCpuIdList, int *num); + static void GetInstanceBind(cpu_set_t *cpuset); private: ThreadPoolGroup* FindThreadGroupWithLeastSession(); void ParseAttr(); + void ParseStreamAttr(); void ParseBindCpu(); - int ParseRangeStr(char* attr, bool* arr, int totalNum, char* bindtype); - void GetMcsCpuInfo(); void GetSysCpuInfo(); void InitCpuInfo(); - void GetCpuAndNumaNum(); bool IsActiveCpu(int cpuid, int numaid); void SetGroupAndThreadNum(); + void SetStreamInfo(); void ConstrainThreadNum(); - void GetInstanceBind(); bool CheckCpuBind() const; + bool CheckCpuNumaBind() const; private: MemoryContext m_threadPoolContext; @@ -117,10 +144,13 @@ private: ThreadPoolScheduler* m_scheduler; CPUInfo m_cpuInfo; ThreadPoolAttr m_attr; + ThreadPoolStreamAttr m_stream_attr; cpu_set_t m_cpuset; int m_groupNum; int m_threadNum; int m_maxPoolSize; + int m_maxStreamPoolSize; + float m_streamProcRatio; }; #endif /* THREAD_POOL_CONTROLER_H */ diff --git a/src/include/threadpool/threadpool_group.h b/src/include/threadpool/threadpool_group.h index 27b08003c..a097ec8ef 100644 --- a/src/include/threadpool/threadpool_group.h +++ b/src/include/threadpool/threadpool_group.h @@ -70,7 +70,7 @@ public: ThreadPoolListener* m_listener; ThreadPoolGroup(int maxWorkerNum, int expectWorkerNum, int maxStreamNum, - int groupId, int numaId, int cpuNum, int* cpuArr); + int groupId, int numaId, int cpuNum, int* cpuArr, bool enableBindCpuNuma); ~ThreadPoolGroup(); void Init(bool enableNumaDistribute); void InitWorkerSentry(); @@ -134,6 +134,7 @@ public: private: void AttachThreadToCPU(ThreadId thread, int cpu); void AttachThreadToNodeLevel(ThreadId thread) const; + void AttachThreadToCpuNuma(ThreadId thread); private: int m_maxWorkerNum; @@ -156,7 +157,9 @@ private: int m_groupCpuNum; int* m_groupCpuArr; bool m_enableNumaDistribute; + bool m_enableBindCpuNuma; cpu_set_t m_nodeCpuSet; /* for numa node distribution only */ + cpu_set_t m_CpuNumaSet; /* for numa node distribution only */ ThreadWorkerSentry* m_workers; MemoryContext m_context; diff --git a/src/include/threadpool/threadpool_listener.h b/src/include/threadpool/threadpool_listener.h index bc351568f..addfbfc0d 100644 --- a/src/include/threadpool/threadpool_listener.h +++ b/src/include/threadpool/threadpool_listener.h @@ -35,6 +35,7 @@ class ThreadPoolListener : public BaseObject { public: ThreadPoolGroup* m_group; volatile bool m_reaperAllSession; + bool m_getKilled; ThreadPoolListener(ThreadPoolGroup* group); ~ThreadPoolListener(); @@ -64,10 +65,26 @@ public: { m_tid = 0; } + +#ifdef ENABLE_LITE_MODE + inline bool IsBusy() + { + if (m_group->m_waitServeSessionCount == 0 && m_group->m_processTaskCount > 2) { + return false; + } else { + return true; + } + } +#endif private: void HandleConnEvent(int nevets); knl_session_context* GetSessionBaseOnEvent(struct epoll_event* ev); + Dlelem *GetFreeWorker(knl_session_context* session); void DispatchSession(knl_session_context* session); + Dlelem *GetReadySession(ThreadPoolWorker* worker); + Dlelem *GetSessFromReadySessionList(ThreadPoolWorker *worker); + void AddIdleSessionToTail(knl_session_context* session); + void AddIdleSessionToHead(knl_session_context* session); private: ThreadId m_tid; @@ -77,6 +94,14 @@ private: DllistWithLock* m_freeWorkerList; DllistWithLock* m_readySessionList; DllistWithLock* m_idleSessionList; + + // split session by dbid, put them into hashtable as a sessionlist + // key is dbid, and value is a sessionlist, who has same elements as m_readySessionList + int m_session_nbucket; + Dllist *m_session_bucket; // add rwlock + pthread_rwlock_t *m_session_rw_locks; + uint32 m_match_search; + const uint32 MATCH_SEARCH_THRESHOLD = 10; }; #endif /* THREAD_POOL_LISTENER_H */ diff --git a/src/include/threadpool/threadpool_scheduler.h b/src/include/threadpool/threadpool_scheduler.h index d8af09a1d..4760be6fd 100644 --- a/src/include/threadpool/threadpool_scheduler.h +++ b/src/include/threadpool/threadpool_scheduler.h @@ -49,7 +49,7 @@ public: MemoryContext m_gpcContext; bool m_getSIGHUP; volatile bool m_canAdjustPool; - + bool m_getKilled; private: void AdjustWorkerPool(int idx); void AdjustStreamPool(int idx); diff --git a/src/include/threadpool/threadpool_sessctl.h b/src/include/threadpool/threadpool_sessctl.h index 0e8e13dc5..943e246f0 100644 --- a/src/include/threadpool/threadpool_sessctl.h +++ b/src/include/threadpool/threadpool_sessctl.h @@ -62,6 +62,7 @@ public: void CheckSessionTimeout(); void CheckPermissionForSendSignal(knl_session_context* sess, sig_atomic_t* lock); void getSessionMemoryDetail(Tuplestorestate* tupStore, TupleDesc tupDesc, knl_sess_control** sess); + void getSessionMemoryContextInfo(const char* ctx_name, StringInfoData* buf, knl_sess_control** sess); knl_session_context* GetSessionByIdx(int idx); int FindCtrlIdxBySessId(uint64 id); TransactionId ListAllSessionGttFrozenxids(int maxSize, ThreadId *pids, TransactionId *xids, int *n); diff --git a/src/include/threadpool/threadpool_worker.h b/src/include/threadpool/threadpool_worker.h index 993f3bcc6..0a6564bb7 100644 --- a/src/include/threadpool/threadpool_worker.h +++ b/src/include/threadpool/threadpool_worker.h @@ -107,6 +107,11 @@ public: { m_currentSession = session; } + const inline knl_thrd_context *GetThreadContextPtr() + { + return m_thrd; + } + inline ThreadStatus GetthreadStatus() { @@ -144,6 +149,7 @@ private: ThreadPoolGroup* m_group; pthread_mutex_t* m_mutex; pthread_cond_t* m_cond; + knl_thrd_context *m_thrd; }; #endif /* THREAD_POOL_WORKER_H */ diff --git a/src/include/utils/acl.h b/src/include/utils/acl.h index 696b91104..e8158a742 100644 --- a/src/include/utils/acl.h +++ b/src/include/utils/acl.h @@ -264,7 +264,7 @@ extern void aclitemsort(Acl* acl); extern bool aclequal(const Acl* left_acl, const Acl* right_acl); extern AclMode aclmask(const Acl* acl, Oid roleid, Oid ownerId, AclMode mask, AclMaskHow how); -extern AclMode aclmask_dbe_perf(const Acl* acl, Oid roleid, Oid ownerId, AclMode mask, AclMaskHow how); +extern AclMode aclmask_without_sysadmin(const Acl* acl, Oid roleid, Oid ownerId, AclMode mask, AclMaskHow how); extern int aclmembers(const Acl* acl, Oid** roleids); extern bool has_privs_of_role(Oid member, Oid role); @@ -276,8 +276,9 @@ extern Oid get_role_oid(const char* rolname, bool missing_ok); extern void select_best_grantor( Oid roleId, AclMode privileges, AclMode ddl_privileges, const Acl* acl, Oid ownerId, - Oid* grantorId, AclMode* grantOptions, AclMode* ddl_grantOptions, bool is_dbe_perf = false); - + Oid* grantorId, AclMode* grantOptions, AclMode* ddl_grantOptions, bool is_dbe_perf = false, + bool isPgCatalog = false); +extern List * roles_has_privs_of(Oid roleid); extern void initialize_acl(void); /* diff --git a/src/include/utils/aset.h b/src/include/utils/aset.h index 7fcef0a63..d30083ce1 100644 --- a/src/include/utils/aset.h +++ b/src/include/utils/aset.h @@ -221,12 +221,6 @@ public: template static int gs_posix_memalign(void** memptr, Size alignment, Size sz, bool needProtect); - - template - static bool gs_memprot_reserve(Size sz, bool needProtect); - - template - static void gs_memprot_release(Size sz); }; extern int alloc_trunk_size(int width); diff --git a/src/include/utils/be_module.h b/src/include/utils/be_module.h index 39203f24b..d3163ecbb 100755 --- a/src/include/utils/be_module.h +++ b/src/include/utils/be_module.h @@ -57,6 +57,7 @@ enum ModuleId { MOD_EXECUTOR, /* Row Executor */ MOD_OPFUSION, /* Bypass Opfusion */ MOD_GPC, /* Global plancache */ + MOD_GSC, /* Global syscache */ MOD_VEC_EXECUTOR, /* Vector Executor */ MOD_STREAM, /* Stream */ MOD_LLVM, /* LLVM */ @@ -134,6 +135,8 @@ enum ModuleId { MOD_SPI, MOD_NEST_COMPILE, MOD_RESOWNER, + MOD_LOGICAL_DECODE, /* logical decode */ + MOD_GPRC, /* global package runtime cache */ /* * Add your module id above. diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h index 2bfd91eb6..c315488c4 100644 --- a/src/include/utils/builtins.h +++ b/src/include/utils/builtins.h @@ -4,6 +4,7 @@ * Declarations for operations on built-in types. * * + * Portions Copyright (c) 2021, openGauss Contributors * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 2021, openGauss Contributors @@ -137,6 +138,7 @@ extern Datum has_cek_privilege_id_name(PG_FUNCTION_ARGS); extern Datum has_cek_privilege_id_id(PG_FUNCTION_ARGS); extern Datum has_cek_privilege_name(PG_FUNCTION_ARGS); extern Datum has_cek_privilege_id(PG_FUNCTION_ARGS); +extern Datum has_any_privilege(PG_FUNCTION_ARGS); /* bool.c */ extern Datum boolin(PG_FUNCTION_ARGS); @@ -591,7 +593,6 @@ extern Datum pg_read_file(PG_FUNCTION_ARGS); extern Datum pg_read_file_all(PG_FUNCTION_ARGS); extern Datum pg_read_binary_file(PG_FUNCTION_ARGS); extern Datum pg_read_binary_file_all(PG_FUNCTION_ARGS); -extern Datum pg_read_binary_file_blocks(PG_FUNCTION_ARGS); extern Datum pg_ls_dir(PG_FUNCTION_ARGS); extern Datum pg_stat_file_recursive(PG_FUNCTION_ARGS); @@ -702,7 +703,22 @@ extern Datum texticregexne(PG_FUNCTION_ARGS); extern Datum textregexsubstr(PG_FUNCTION_ARGS); extern Datum textregexreplace_noopt(PG_FUNCTION_ARGS); extern Datum textregexreplace(PG_FUNCTION_ARGS); +extern Datum regexp_replace_noopt(PG_FUNCTION_ARGS); +extern Datum regexp_replace_with_position(PG_FUNCTION_ARGS); +extern Datum regexp_replace_with_occur(PG_FUNCTION_ARGS); +extern Datum regexp_replace_with_opt(PG_FUNCTION_ARGS); extern Datum similar_escape(PG_FUNCTION_ARGS); +extern Datum regexp_count_noopt(PG_FUNCTION_ARGS); +extern Datum regexp_count_position(PG_FUNCTION_ARGS); +extern Datum regexp_count_matchopt(PG_FUNCTION_ARGS); +extern Datum regexp_instr_noopt(PG_FUNCTION_ARGS); +extern Datum regexp_instr_position(PG_FUNCTION_ARGS); +extern Datum regexp_instr_occurren(PG_FUNCTION_ARGS); +extern Datum regexp_instr_returnopt(PG_FUNCTION_ARGS); +extern Datum regexp_instr_matchopt(PG_FUNCTION_ARGS); +extern Datum regexp_substr_with_position(PG_FUNCTION_ARGS); +extern Datum regexp_substr_with_occurrence(PG_FUNCTION_ARGS); +extern Datum regexp_substr_with_opt(PG_FUNCTION_ARGS); extern Datum regexp_matches(PG_FUNCTION_ARGS); extern Datum regexp_matches_no_flags(PG_FUNCTION_ARGS); extern Datum regexp_split_to_table(PG_FUNCTION_ARGS); @@ -954,7 +970,7 @@ extern bool SplitIdentifierString(char* rawstring, char separator, List** nameli extern bool SplitIdentifierInteger(char* rawstring, char separator, List** namelist); extern Datum replace_text(PG_FUNCTION_ARGS); extern Datum replace_text_with_two_args(PG_FUNCTION_ARGS); -extern text* replace_text_regexp(text* src_text, void* regexp, text* replace_text, bool glob); +extern text* replace_text_regexp(text* src_text, void* regexp, text* replace_text, int position, int occur); extern Datum split_text(PG_FUNCTION_ARGS); extern Datum text_to_array(PG_FUNCTION_ARGS); extern Datum array_to_text(PG_FUNCTION_ARGS); @@ -1459,6 +1475,10 @@ extern Datum pgxc_pool_connection_status(PG_FUNCTION_ARGS); extern Datum pg_pool_validate(PG_FUNCTION_ARGS); extern Datum pg_pool_ping(PG_FUNCTION_ARGS); extern Datum comm_check_connection_status(PG_FUNCTION_ARGS); +extern Datum pgxc_disaster_read_set(PG_FUNCTION_ARGS); +extern Datum pgxc_disaster_read_init(PG_FUNCTION_ARGS); +extern Datum pgxc_disaster_read_clear(PG_FUNCTION_ARGS); +extern Datum pgxc_disaster_read_status(PG_FUNCTION_ARGS); #endif /* comm_proxy.cpp */ @@ -1538,13 +1558,13 @@ extern void encryptOBS(char* srcplaintext, char destciphertext[], uint32 destcip extern void decryptOBS( const char* srcciphertext, char destplaintext[], uint32 destplainlength, const char* obskey = NULL); extern void encryptECString(char* src_plain_text, char* dest_cipher_text, - uint32 dest_cipher_length, KeyMode mode); + uint32 dest_cipher_length, int mode); extern bool decryptECString(const char* src_cipher_text, char* dest_plain_text, - uint32 dest_plain_length, KeyMode mode); + uint32 dest_plain_length, int mode); extern bool IsECEncryptedString(const char* src_cipher_text); extern void EncryptGenericOptions(List* options, const char** sensitiveOptionsArray, - int arrayLength, KeyMode mode); -extern void DecryptOptions(List *options, const char** sensitiveOptionsArray, int arrayLength, KeyMode mode); + int arrayLength, int mode); +extern void DecryptOptions(List *options, const char** sensitiveOptionsArray, int arrayLength, int mode); #define EC_CIPHER_TEXT_LENGTH 1024 @@ -1596,6 +1616,7 @@ extern Datum tdigest_in(PG_FUNCTION_ARGS); /* AI */ extern Datum db4ai_predict_by(PG_FUNCTION_ARGS); +extern Datum db4ai_explain_model(PG_FUNCTION_ARGS); extern Datum gs_index_advise(PG_FUNCTION_ARGS); extern Datum hypopg_create_index(PG_FUNCTION_ARGS); extern Datum hypopg_display_index(PG_FUNCTION_ARGS); @@ -1608,9 +1629,20 @@ extern Datum mot_global_memory_detail(PG_FUNCTION_ARGS); extern Datum mot_local_memory_detail(PG_FUNCTION_ARGS); extern Datum mot_session_memory_detail(PG_FUNCTION_ARGS); +/* UBtree index */ +Datum gs_index_verify(PG_FUNCTION_ARGS); +Datum gs_index_recycle_queue(PG_FUNCTION_ARGS); + /* undo meta */ extern Datum gs_undo_meta(PG_FUNCTION_ARGS); +extern Datum gs_stat_undo(PG_FUNCTION_ARGS); extern Datum gs_undo_translot(PG_FUNCTION_ARGS); +extern Datum gs_undo_record(PG_FUNCTION_ARGS); + +/* Xlog write/flush */ +extern Datum gs_stat_wal_entrytable(PG_FUNCTION_ARGS); +extern Datum gs_walwriter_flush_position(PG_FUNCTION_ARGS); +extern Datum gs_walwriter_flush_stat(PG_FUNCTION_ARGS); /* Ledger */ extern Datum get_dn_hist_relhash(PG_FUNCTION_ARGS); @@ -1626,6 +1658,15 @@ extern Datum gs_is_recycle_object(PG_FUNCTION_ARGS); extern Datum sys_connect_by_path(PG_FUNCTION_ARGS); extern Datum connect_by_root(PG_FUNCTION_ARGS); +/* Sequence update */ +Datum large_sequence_upgrade_node_tree(PG_FUNCTION_ARGS); +Datum large_sequence_rollback_node_tree(PG_FUNCTION_ARGS); + +/* Create Index Concurrently for Distribution */ +#ifdef ENABLE_MULTIPLE_NODES +extern Datum gs_mark_indisvalid(PG_FUNCTION_ARGS); +#endif + /* origin.cpp */ extern Datum pg_replication_origin_advance(PG_FUNCTION_ARGS); extern Datum pg_replication_origin_create(PG_FUNCTION_ARGS); @@ -1646,7 +1687,6 @@ extern Datum pg_get_publication_tables(PG_FUNCTION_ARGS); /* launcher.cpp */ extern Datum pg_stat_get_subscription(PG_FUNCTION_ARGS); - #endif /* !FRONTEND_PARSER */ #endif /* BUILTINS_H */ diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h index 6e3213a35..e2c9cb7b4 100644 --- a/src/include/utils/catcache.h +++ b/src/include/utils/catcache.h @@ -164,7 +164,9 @@ typedef struct catclist { short nkeys; /* number of lookup keys specified */ int n_members; /* number of member tuples */ CatCache* my_cache; /* link to owning catcache */ - CatCTup* members[FLEXIBLE_ARRAY_MEMBER]; /* members --- VARIABLE LENGTH ARRAY */ + CatCTup** systups; /* systups, link to CatCTup for pg; link to GlobalCatCTup for lsc + dont access this variable directly, + fetch element by call t_thrd.lsc_cxt.FetchTupleFromCatCList(catlist, i) instead */ } CatCList; /* VARIABLE LENGTH STRUCT */ typedef struct CatCacheHeader { @@ -202,4 +204,9 @@ extern void PrintCatCacheLeakWarning(HeapTuple tuple); extern void PrintCatCacheListLeakWarning(CatCList* list); extern void InsertBuiltinFuncDescInBootstrap(); extern void InsertBuiltinFuncInBootstrap(); + +#ifndef ENABLE_MULTIPLE_NODES +extern HeapTuple SearchSysCacheForProcAllArgs(Datum v1, Datum v2, Datum v3, Datum v4, Datum proArgModes); +#endif + #endif /* CATCACHE_H */ diff --git a/src/include/utils/elog.h b/src/include/utils/elog.h index f1bd5342b..131945f5a 100644 --- a/src/include/utils/elog.h +++ b/src/include/utils/elog.h @@ -6,7 +6,6 @@ * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * * src/include/utils/elog.h * @@ -124,6 +123,9 @@ #define ereport_domain(elevel, domain, rest) \ (errstart(elevel, __FILE__, __LINE__, PG_FUNCNAME_MACRO, domain) ? (errfinish rest) : (void)0) +extern THR_LOCAL int log_min_messages; +extern THR_LOCAL int client_min_messages; + #ifdef PC_LINT #define ereport(elevel, rest) \ do { \ @@ -132,8 +134,10 @@ } while (0) #else -#define ereport(elevel, rest) ereport_domain(elevel, TEXTDOMAIN, rest) -#endif /*PCLINT_CHECK*/ +#define ereport(elevel, rest) \ + (((elevel) > DEBUG1 || (elevel) < DEBUG5 || log_min_messages <= (elevel) || client_min_messages <= (elevel)) ? \ + ereport_domain(elevel, TEXTDOMAIN, rest) : (void)0) +#endif #define TEXTDOMAIN NULL @@ -259,7 +263,7 @@ extern int ignore_interrupt(bool ignore); } while (0) #else #define elog elog_start(__FILE__, __LINE__, PG_FUNCNAME_MACRO), elog_finish -#endif /* PCLINT_CHECK */ +#endif extern void elog_start(const char* filename, int lineno, const char* funcname); extern void elog_finish(int elevel, const char* fmt, ...) @@ -424,7 +428,11 @@ typedef struct FormatCallStack { while (0) // ADIO means async direct io +#ifndef ENABLE_LITE_MODE #define ADIO_RUN() if (g_instance.attr.attr_storage.enable_adio_function) { +#else +#define ADIO_RUN() if (false) { +#endif #define ADIO_ELSE() \ } \ @@ -433,6 +441,7 @@ typedef struct FormatCallStack { #define ADIO_END() } + // BFIO means buffer io #define BFIO_RUN() if (!g_instance.attr.attr_storage.enable_adio_function) { @@ -540,8 +549,8 @@ typedef enum { /* Other exported functions */ extern void DebugFileOpen(void); -extern char* unpack_sql_state(int sql_state); -extern char *plpgsql_get_sqlstate(int sqlcode); +extern const char* unpack_sql_state(int sql_state); +extern const char *plpgsql_get_sqlstate(int sqlcode); extern bool in_error_recursion_trouble(void); #ifdef HAVE_SYSLOG @@ -610,6 +619,15 @@ extern void SimpleLogToServer(int elevel, bool silent, const char* fmt, ...) errdetail("The distributed capability is not supported currently."))); \ } while (0) +/* This Macro reports an error when touching on the lite mode */ +#define FEATURE_ON_LITE_MODE_NOT_SUPPORTED() \ + do { \ + ereport(ERROR, \ + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), \ + errmsg("Un-support feature"), \ + errdetail("The feature is not supported on lite mode currently."))); \ + } while (0) + #define IPC_PERFORMANCE_LOG_OUTPUT(errorMessage) \ do \ if (module_logging_is_on(MOD_COMM_IPC) && \ diff --git a/src/include/utils/globalplancore.h b/src/include/utils/globalplancore.h index aa1cd73dd..382548a04 100644 --- a/src/include/utils/globalplancore.h +++ b/src/include/utils/globalplancore.h @@ -32,7 +32,12 @@ #include "storage/sinval.h" #include "utils/plancache.h" +#ifndef ENABLE_LITE_MODE #define GPC_NUM_OF_BUCKETS (128) +#else +#define GPC_NUM_OF_BUCKETS (2) +#endif + #define GPC_HTAB_SIZE (128) #define GLOBALPLANCACHEKEY_MAGIC (953717831) #define CAS_SLEEP_DURATION (2) @@ -206,6 +211,7 @@ extern void GPCResetAll(); void GPCCleanDatanodeStatement(int dn_stmt_num, const char* stmt_name); void GPCReGplan(CachedPlanSource* plansource); void CNGPCCleanUpSession(); +void GPCCleanUpSessionSavedPlan(); List* CopyLocalStmt(const List* stmt_list, const MemoryContext parent_cxt, MemoryContext* plan_context); bool SPIParseEnableGPC(const Node *node); void CleanSessGPCPtr(knl_session_context* currentSession); diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h index 9e0baa111..f808ca256 100755 --- a/src/include/utils/guc.h +++ b/src/include/utils/guc.h @@ -326,7 +326,6 @@ extern bool check_asp_flush_mode(char** newval, void** extra, GucSource source); /* in access/transam/xlog.c */ extern bool check_wal_buffers(int* newval, void** extra, GucSource source); -extern bool check_wal_insert_status_entries(int* newval, void** extra, GucSource source); extern void assign_xlog_sync_method(int new_sync_method, void* extra); /* in tcop/stmt_retry.cpp */ @@ -373,11 +372,18 @@ typedef enum { CANONICAL_PATHKEY = 256, /* Use canonicalize pathkeys directly */ INDEX_COST_WITH_LEAF_PAGES_ONLY = 512, /* compute index cost with consideration of leaf-pages-only */ PARTITION_OPFUSION = 1024, /* Enable partition opfusion */ - SPI_DEBUG = 2048, - RESOWNER_DEBUG = 4096, - A_STYLE_COERCE = 8192 + A_STYLE_COERCE = 2048, + PLPGSQL_STREAM_FETCHALL = 4096, /* fetch all tuple when has stream sql under plpgsql's for-loop */ + PREDPUSH_SAME_LEVEL = 8192, /* predpush same level */ + PARTITION_FDW_ON = 16384 /* support create foreign table on partitioned table */ } sql_beta_param; +typedef enum { + OFF_VECTOR_ENGINE, + FORCE_VECTOR_ENGINE, + OPT_VECTOR_ENGINE +} TryVectorEngineStrategy; + #define ENABLE_PRED_PUSH(root) \ ((PRED_PUSH & (uint)u_sess->attr.attr_sql.rewrite_rule) && permit_predpush(root)) @@ -409,6 +415,9 @@ typedef struct { } ConfFileLock; #define PG_LOCKFILE_SIZE 1024 + +#define CONFIG_BAK_FILENAME "postgresql.conf.bak" + extern void* pg_malloc(size_t size); extern char* xstrdup(const char* s); @@ -418,6 +427,7 @@ extern int find_guc_option(char** optlines, const char* opt_name, int* name_offset, int* name_len, int* value_offset, int* value_len, bool ignore_case); extern void modify_guc_lines(char*** optlines, const char** opt_name, char** copy_from_line); +extern void modify_guc_one_line(char*** guc_optlines, const char* opt_name, const char* copy_from_line); extern ErrCode copy_guc_lines(char** copy_to_line, char** optlines, const char** opt_name); extern ErrCode copy_asyn_lines(char* path, char** copy_to_line, const char** opt_name); @@ -456,7 +466,6 @@ extern void set_qunit_case_number_hook(int newval, void* extra); #endif extern GucContext get_guc_context(); -extern void InitializeNumLwLockPartitions(void); extern bool check_double_parameter(double* newval, void** extra, GucSource source); extern bool CheckReplChannel(const char* ChannelInfo); diff --git a/src/include/utils/guc_sql.h b/src/include/utils/guc_sql.h index 5027e1cc9..d8d3d86dc 100644 --- a/src/include/utils/guc_sql.h +++ b/src/include/utils/guc_sql.h @@ -4,9 +4,9 @@ * External declarations pertaining to backend/utils/misc/guc-file.l * and backend/utils/misc/guc/guc_sql.cpp * + * Portions Copyright (c) 2021, openGauss Contributors * Copyright (c) 2000-2012, PostgreSQL Global Development Group * Written by Peter Eisentraut . - * Portions Copyright (c) 2021, openGauss Contributors * * src/include/utils/guc_sql.h * -------------------------------------------------------------------- @@ -15,7 +15,5 @@ #define GUC_SQL_H extern void InitSqlConfigureNames(); -extern char* apply_num_width(double num); -extern char* apply_num_format(double num); #endif /* GUC_SQL_H */ diff --git a/src/include/utils/int16.h b/src/include/utils/int16.h index 690b7a9e8..4cac3a70d 100644 --- a/src/include/utils/int16.h +++ b/src/include/utils/int16.h @@ -66,4 +66,4 @@ extern Datum bool_int16(PG_FUNCTION_ARGS); extern Datum int16_numeric(PG_FUNCTION_ARGS); extern Datum numeric_int16(PG_FUNCTION_ARGS); -#endif /* INT16_H */ +#endif /* INT16_H */ \ No newline at end of file diff --git a/src/include/utils/inval.h b/src/include/utils/inval.h index f0679093d..31b855b09 100644 --- a/src/include/utils/inval.h +++ b/src/include/utils/inval.h @@ -81,20 +81,24 @@ extern void CacheInvalidateRelmap(Oid databaseId); extern void CacheInvalidateHeapTupleInplace(Relation relation, HeapTuple tuple); -extern void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg); - -extern void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg); - -extern void CallSyscacheCallbacks(int cacheid, uint32 hashvalue); - extern void inval_twophase_postcommit(TransactionId xid, uint16 info, void* recdata, uint32 len); -extern void CacheRegisterPartcacheCallback(PartcacheCallbackFunction func, Datum arg); + extern void CacheInvalidatePartcache(Partition partition); extern void CacheInvalidatePartcacheByTuple(HeapTuple partitionTuple); extern void CacheInvalidatePartcacheByPartid(Oid partid); extern void InvalidateSystemCaches(void); +extern void CacheRegisterThreadSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg); +extern void CacheRegisterThreadRelcacheCallback(RelcacheCallbackFunction func, Datum arg); +extern void CacheRegisterThreadPartcacheCallback(PartcacheCallbackFunction func, Datum arg); +extern void CacheRegisterSessionSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg); +extern void CacheRegisterSessionRelcacheCallback(RelcacheCallbackFunction func, Datum arg); +extern void CacheRegisterSessionPartcacheCallback(PartcacheCallbackFunction func, Datum arg); +extern void CallThreadSyscacheCallbacks(int cacheid, uint32 hashvalue); +extern void CallSessionSyscacheCallbacks(int cacheid, uint32 hashvalue); +extern void InvalidateSessionSystemCaches(void); +extern void InvalidateThreadSystemCaches(void); extern void CacheInvalidateRelcacheAll(void); #endif /* INVAL_H */ diff --git a/src/include/utils/knl_catcache.h b/src/include/utils/knl_catcache.h new file mode 100644 index 000000000..2b43a6405 --- /dev/null +++ b/src/include/utils/knl_catcache.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_catcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_catcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_CATCACHE_H +#define KNL_CATCACHE_H +#include "utils/catcache.h" +#include "utils/fmgrtab.h" +#include "utils/atomic.h" + +extern void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, const int *attnos, Datum *srckeys, Datum *dstkeys); +extern void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, const int *attnos, Datum *keys); +extern HeapTuple CreateHeapTuple4BuiltinFunc(const Builtin_func *func, TupleDesc desc); +extern HeapTuple SearchBuiltinProcCacheMiss(int cache_id, int nkeys, Datum* arguments); +extern HeapTuple SearchPgAttributeCacheMiss(int cache_id, TupleDesc cc_tupdesc, int nkeys, const Datum* arguments); +extern bool IndexScanOK(int cache_id); +extern bool CatalogCacheCompareTuple( + const CCFastEqualFN *cc_fastequal, int nkeys, const Datum* cachekeys, const Datum* searchkeys); +extern uint32 CatalogCacheComputeTupleHashValue( + int cc_id, int* cc_keyno, TupleDesc cc_tupdesc, CCHashFN *cc_hashfunc, Oid cc_reloid, int nkeys, HeapTuple tuple); +extern uint32 CatalogCacheComputeHashValue(CCHashFN *cc_hashfunc, int nkeys, Datum *arguments); +HeapTuple GetPgAttributeAttrTuple(TupleDesc tupleDesc, const Form_pg_attribute attr); +void GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc); +void SearchCatCacheCheck(); +#endif \ No newline at end of file diff --git a/src/include/utils/knl_globalbasedefcache.h b/src/include/utils/knl_globalbasedefcache.h new file mode 100644 index 000000000..92fd85348 --- /dev/null +++ b/src/include/utils/knl_globalbasedefcache.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_globalbasedefcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_globalbasedefcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_GLOBALBASEDEFCACHE_H +#define KNL_GLOBALBASEDEFCACHE_H +#include "nodes/memnodes.h" +#include "postgres.h" +#include "utils/knl_globalsyscache_common.h" +#include "utils/knl_globalsystupcache.h" +#include "utils/palloc.h" + +/* + * GSC's base class, normally as super class for RelCache CatCache + * (1). class GlobalTabDefCache for "RelCache" see details in knl_globaltabdefcache.cpp + * (2). class GlobalPartDefCache for "PartCache" see details in knl_globalpartdefcache.cpp + */ +class GlobalBaseDefCache : public BaseObject { +public: + GlobalBaseDefCache(Oid dbOid, bool isShared, struct GlobalSysDBCacheEntry *entry, char relkind); + virtual ~GlobalBaseDefCache() {} + + /* simple getter/setter routines in GSC base class */ + + /* + * Return the number of "active elements" in current GSC object(RelCache, PartCache) + * depends on where it inheritend + */ + inline uint64 GetActiveElementsNum() + { + return m_bucket_list.GetActiveElementCount(); + } + + /* + * Return the number of "dead elements" in current GSC object(RelCache, PartCache) + * depends on where it inheritend + */ + inline uint64 GetDeadElementsNum() + { + return m_dead_entries.GetLength(); + } + + /* + * Return the object lock for rel when being inserted into current GSC + */ + inline pthread_rwlock_t *GetHashValueLock(uint32 hash_value) + { + Assert(m_oid_locks != NULL); + Index hash_index = HASH_INDEX(hash_value, m_nbuckets); + return m_oid_locks + hash_index; + } + template + void RemoveAllTailElements(); + +protected: + /* base class initialization funciton */ + void Init(int nbucket); + void InitHashTable(); + + /* + * Cache entry lookup related functions + * + * Note: Normally, besides objOid we also pass in hash_value/hash_index to avoid + * hashfunc recalculation, oid here indicates its inheritant class relOid or + * partRelOid. + */ + + /* search an entry with given obj_oid and hash_value, */ + GlobalBaseEntry *SearchReadOnly(Oid objOid, uint32 hash_value); + + /* search and entry with given objOid/hash_index*/ + GlobalBaseEntry *FindEntryWithIndex(Oid objOid, Index hash_index, int *location); + + /* check given objOid and hash_index exist */ + bool EntryExist(Oid objOid, Index hash_index); + + /* functions to handle message invalidation */ + template void Invalidate(Oid dbOid, Oid objOid); + template void InvalidateRelationNodeListBy(bool (*IsInvalidEntry)(GlobalBaseEntry *)); + + /* fucntions to remove/free elem from GSC hashtable */ + template void HandleDeadEntry(GlobalBaseEntry *entry); /* remove from hashtable */ + template void FreeDeadEntrys(); /* free elem */ + + /* function to handle GSC memory swapout */ + template void ResetCaches(); + template void RemoveTailElements(Index hash_index); + + /* function to add/remove elem to GSC hashtable */ + template void RemoveElemFromBucket(GlobalBaseEntry *base); + template void AddHeadToBucket(Index hash_index, GlobalBaseEntry *base); + + /* GSC Identifier fields */ + Oid m_db_oid; + + /* GSC status control fields */ + bool m_is_shared; + bool m_is_inited; + char m_relkind; /* dev-debug only, no real process so far */ + + volatile uint32 *m_is_swappingouts; + + /* GSC statistic information, assigned from GlobalSysCacheStat class */ + volatile uint64 *m_searches; + volatile uint64 *m_hits; + volatile uint64 *m_newloads; + volatile uint64 m_base_space; + + /* GSC container fields */ + GlobalBucketList m_bucket_list; /* GSC hashtable to hold buckets/elements */ + int m_nbuckets; /* GSC hashtable's bucket num, assigned in constructor */ + pthread_rwlock_t *m_obj_locks; /* GSC internal bucket level locks, type as array + with length m_nbuckets */ + DllistWithLock m_dead_entries; /* List with elem removed from current GSC */ + + pthread_rwlock_t *m_oid_locks; /* locks for GSC object, partRelOid or relOid, avoid xact + commit thread conflict with other threads */ + + /* GSC other fields */ + struct GlobalSysDBCacheEntry *m_db_entry; /* pointer to global DB-level syscache pointer */ +}; + +#endif \ No newline at end of file diff --git a/src/include/utils/knl_globalbucketlist.h b/src/include/utils/knl_globalbucketlist.h new file mode 100644 index 000000000..f395b8ed4 --- /dev/null +++ b/src/include/utils/knl_globalbucketlist.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_globalbucketlist.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_globalbucketlist.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_GLOBALBUCKETLIST_H +#define KNL_GLOBALBUCKETLIST_H +#include "lib/dllist.h" +#include "access/htup.h" + +#define forloopactivebucketlist(bucket_elt, m_active_bucket_list) \ + for ((bucket_elt) = DLGetHead((m_active_bucket_list)); (bucket_elt);) + + +#define forloopbucket(elt, bucket_elt) \ + BucketEntry *bucket_entry = (BucketEntry *)DLE_VAL(bucket_elt); \ + bucket_elt = DLGetSucc(bucket_elt); \ + for ((elt) = DLGetHead(&bucket_entry->cc_bucket); (elt);) + +/* + * base class of GSC/LSC's entry + */ +struct BucketEntry { + Dllist cc_bucket; + Dlelem elem; +}; + +/* + * A hashtable (key->bucket) to works as a container to hold GSC elements in BucketEntry + * type format + * + * Note: global shared, + * + * Reminding! GlobalBucketList should be changed to Gloabl + */ +class GlobalBucketList : public BaseObject { + /* Dlist hold active buckets and its protecting lock */ + Dllist m_active_bucket_list; + slock_t m_active_bucket_list_lock; + + /* + * bucket information of current GSC with length of m_nbuckets + * + * Normally, each element type could be: + * GlobalSysDBCacheEntry: when use as GlobalSysDBCache:m_bucket_list + * RelationData: when use as GlobalTabDefCache:m_bucket_list + * PartitionData: when used as GlobalPartDefCache:m_bucket_list + */ + BucketEntry *m_bucket_entry; /* array of buckets */ + int m_nbuckets; /* array length */ + + /* record the memory-usage and number of element in current GSC */ + volatile uint64 m_elem_count; + +public: + GlobalBucketList() + { + m_nbuckets = 0; + m_bucket_entry = NULL; + m_elem_count = 0; + } + + /* init function */ + void Init(int nbuckets); + + /* + * Global bucket list operation functions, add/remove etc. + */ + + /* add elment */ + void AddHeadToBucket(Index hash_index, Dlelem *elem); + + /* remove element */ + void RemoveElemFromBucket(Dlelem *elem); + + /* get tail index */ + Index GetTailBucketIndex(); + + inline Dllist *GetBucket(Index hash_index) + { + return &(m_bucket_entry[hash_index].cc_bucket); + } + + inline uint64 GetActiveElementCount() + { + return m_elem_count; + } +}; + +#endif \ No newline at end of file diff --git a/src/include/utils/knl_globaldbstatmanager.h b/src/include/utils/knl_globaldbstatmanager.h new file mode 100644 index 000000000..05627768c --- /dev/null +++ b/src/include/utils/knl_globaldbstatmanager.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_globalsysdbcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_globalsysdbcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_GLOBALDBSTATMANAGER_H +#define KNL_GLOBALDBSTATMANAGER_H +#include "utils/knl_globalsyscache_common.h" +#include "utils/atomic.h" +#include "nodes/memnodes.h" +#include "utils/knl_globalsystabcache.h" +#include "utils/knl_globaltabdefcache.h" +#include "utils/knl_globalpartdefcache.h" +#include "utils/knl_globalrelmapcache.h" + +const Oid ALL_DB_OID = (Oid)-1; +const Oid ALL_REL_OID = (Oid)-1; +const Index ALL_DB_INDEX = (Index)-1; +const Index INVALID_INDEX = (Index)-2; +const int INVALID_LOCATION = (int)-2; + +/* + * Data struct to describe global DB objects statistic info, normally support for GSC + * memory swap algorithm + */ +typedef struct GlobalSysCacheStat { + Oid db_oid; + Index hash_index; + pg_atomic_uint64 tup_searches; + pg_atomic_uint64 tup_hits; + pg_atomic_uint64 tup_newloads; + pg_atomic_uint64 rel_searches; + pg_atomic_uint64 rel_hits; + pg_atomic_uint64 rel_newloads; + pg_atomic_uint64 part_searches; + pg_atomic_uint64 part_hits; + pg_atomic_uint64 part_newloads; + uint64 swapout_count; +} GlobalSysCacheStat; + +/* + * Data Structure to describe catalog table's statistic info in current DB, normally + * indicate each cached "heap tuple" in catlog tables of current database object + */ +typedef struct GlobalCatalogTupleStat { + /* database oid & name(in datum) for current GSC catalog sys-tupe */ + Oid db_oid; + Datum db_name_datum; + + /* rel oid & name(in datum) for current GSC catalog sys-tupe */ + Oid rel_oid; + Datum rel_name_datum; + + int cache_id; + + /* catalog tuple header info */ + ItemPointerData self; + ItemPointerData ctid; + uint16 infomask; + uint16 infomask2; + uint64 hash_value; + + uint64 refcount; +} GlobalCatalogTupleStat; + +struct GlobalCatalogTableStat{ + Oid db_id; + Datum db_name; + Oid rel_id; + Form_pg_class rd_rel; + TupleDesc rd_att; +}; +/* + * Global handler to each DB object info, where its underlaying SysTabCache, TabDefCache, + * PartDefCache, RelMapDef handlers is avaialble via pointer + * + * Stored as an element of GlobalBucketList(HT TODO will change), visible all globally + */ +typedef struct GlobalSysDBCacheEntry { + /* + * dbOid/dbName of current database handler + */ + Oid m_dbOid; + char *m_dbName; + + /* + * flag indicating if current DB entry is spilled out, if it is true, we need + * fetch its newest version by GlobalSysDBCache::GetGSCEntry(dbOid, dbName) + */ + bool m_isDead; + + /* hash value of dbOid in GlobalBucketList */ + uint32 m_hash_value; + + /* bucket index of GlobalBucketList's internal buckets array */ + Index m_hash_index; + + /* num of memory context in current DB info entry */ + uint32 m_memcxt_nums; + + /* index of memory context to supoort GetRandomMemCxt()'s random fetch */ + pg_atomic_uint32 m_memcxt_index; + + /* + * pointer of GSC hanlders in current DB, including CatCache, RelCache, PartCache, RelMapCache + */ + GlobalSysTabCache *m_systabCache; + GlobalTabDefCache *m_tabdefCache; + GlobalPartDefCache *m_partdefCache; + GlobalRelMapCache *m_relmapCache; + + /* reference count, normally used in GSC memory-spilling algorithm */ + pg_atomic_uint64 m_refcount; + + /* pointer of current DB element in GlobalBucketList */ + Dlelem m_cache_elem; + + /* pointer of GSC stats info of current database */ + GlobalSysCacheStat *m_dbstat; + + /* a rough estimate of memory context's used space */ + pg_atomic_uint64 m_rough_used_space; + + /* array of memory context hold by current database */ + MemoryContext m_mem_cxt_groups[FLEXIBLE_ARRAY_MEMBER]; + + MemoryContext GetRandomMemCxt() + { + uint64 index = pg_atomic_fetch_add_u32(&m_memcxt_index, 1); + return m_mem_cxt_groups[(index & (m_memcxt_nums -1 ))]; + } + + uint64 GetDBTotalSpace() + { + Size total_space = 0; + for (uint32 i = 0; i < m_memcxt_nums; i++) { + total_space += ((AllocSet)m_mem_cxt_groups[i])->totalSpace; + } + return total_space + m_tabdefCache->GetSysCacheSpaceNum(); + } + + uint64 GetDBUsedSpace() + { + Size total_space = 0; + for (uint32 i = 0; i < m_memcxt_nums; i++) { + total_space += AllocSetContextUsedSpace((AllocSet)m_mem_cxt_groups[i]); + } + return total_space + m_tabdefCache->GetSysCacheSpaceNum(); + } + + void MemoryEstimateAdd(uint64 size); + void MemoryEstimateSub(uint64 size); + + template void ResetDBCache(); + void RemoveTailElements(); + + static void Free(GlobalSysDBCacheEntry *entry); + void Release(); +} GlobalSysDBCacheEntry; + +typedef struct GlobalDBStatManager { + int m_nbuckets; + int *m_dbstat_nbuckets; + List **m_dbstat_buckets; + + + uint32 m_max_backend_id; + GlobalSysDBCacheEntry **m_mydb_refs; // record all threads' ref + int *m_mydb_roles; + pthread_rwlock_t *m_backend_ref_lock; + + MemoryContext m_dbstat_memcxt; + + GlobalDBStatManager(); + + void InitDBStat(int nbuckets, MemoryContext top); + + void RecordSwapOutDBEntry(GlobalSysDBCacheEntry *entry); + void DropDB(Oid db_id, Index hash_index); + void GetDBStat(GlobalSysCacheStat *db_stat); + + bool IsDBUsedByProc(GlobalSysDBCacheEntry *entry); + void RepallocThreadEntryArray(Oid backend_id); + void ThreadHoldDB(GlobalSysDBCacheEntry *db); +} GlobalDBStatManager; + +#endif \ No newline at end of file diff --git a/src/include/utils/knl_globalpartdefcache.h b/src/include/utils/knl_globalpartdefcache.h new file mode 100644 index 000000000..848f1a201 --- /dev/null +++ b/src/include/utils/knl_globalpartdefcache.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_globalpartdefcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_globalpartdefcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_GLOBALPARTDEFCACHE_H +#define KNL_GLOBALPARTDEFCACHE_H + +#include "utils/partcache.h" +#include "utils/knl_globalbasedefcache.h" +#include "utils/knl_globalsyscache_common.h" +#include "nodes/memnodes.h" + +/* + * GlobalPartDefCache refers to PartCache, it is the cache for pg_partition catalog + * partOid is the oid of subPartition (key of pg_partition) for a partition table + */ +class GlobalPartDefCache : public GlobalBaseDefCache { +public: + GlobalPartDefCache(Oid dbOid, bool isShared, struct GlobalSysDBCacheEntry *entry); + ~GlobalPartDefCache() {} + + void Init(); + void Insert(Partition part, uint32 hash_value); + + /* + * Note: partRelOid is the key entry pg_partition + */ + GlobalPartitionEntry *SearchReadOnly(Oid partRelOid, uint32 hash_value) + { + GlobalBaseDefCache::FreeDeadEntrys(); + GlobalPartitionEntry *entry = + (GlobalPartitionEntry *)GlobalBaseDefCache::SearchReadOnly(partRelOid, hash_value); + return entry; + } + template + void ResetPartCaches() + { + if (!m_is_inited) { + return; + } + GlobalBaseDefCache::ResetCaches(); + GlobalBaseDefCache::FreeDeadEntrys(); + } + + inline void Invalidate(Oid dbOid, Oid partRelOid) + { + GlobalBaseDefCache::Invalidate(dbOid, partRelOid); + } + + inline uint64 GetSysCacheSpaceNum() + { + return m_base_space; + } +protected: + GlobalPartitionEntry *CreateEntry(Partition part); +}; + +#endif diff --git a/src/include/utils/knl_globalrelmapcache.h b/src/include/utils/knl_globalrelmapcache.h new file mode 100644 index 000000000..bb2d0744e --- /dev/null +++ b/src/include/utils/knl_globalrelmapcache.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_globalrelmapcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_globalrelmapcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_GLOBALRELMAPCACHE_H +#define KNL_GLOBALRELMAPCACHE_H + +#include "postgres.h" +#include +#include "utils/relmapper.h" + +class GlobalRelMapCache : public BaseObject { +public: + GlobalRelMapCache(Oid dbOid, bool shared); + ~GlobalRelMapCache(); + + /* phase 1 and phase 2 init function */ + void Init(); + void InitPhase2(); + + /* relmap file handling function */ + void CopyInto(RelMapFile *rel_map); + void UpdateBy(RelMapFile *rel_map); + +private: + Oid m_dbOid; + volatile bool m_isInited; + bool m_isShared; + pthread_rwlock_t m_lock; + RelMapFile m_relmap; +}; + +#endif \ No newline at end of file diff --git a/src/include/utils/knl_globalsyscache_common.h b/src/include/utils/knl_globalsyscache_common.h new file mode 100644 index 000000000..074a3a8c4 --- /dev/null +++ b/src/include/utils/knl_globalsyscache_common.h @@ -0,0 +1,154 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_globalsyscache_common.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_globalsyscache_common.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_GLOBALSYSCACHE_COMMON_H +#define KNL_GLOBALSYSCACHE_COMMON_H + +#include "catalog/pg_class.h" +#include "nodes/memnodes.h" +#include "utils/knl_globalbucketlist.h" + +/* + * Given a hash value and the size of the hash table, find the bucket + * in which the hash value belongs. Since the hash table must contain + * a power-of-2 number of elements, this is a simple bitmask. + */ +#define HASH_INDEX(h, sz) ((Index)((h) & ((sz)-1))) + + +/* + * for tuple uncommitted or being deleted, we dont sure it whether it will be committed or aborted, + * so just store them into localcatcache, and invalid them by si msg + */ +inline bool CanTupleInertGSC(HeapTuple tuple) +{ + if (tuple->t_tableOid == InvalidOid) { + // this is a heapformtuple + Assert(tuple->tupTableType == HEAP_TUPLE); + Assert(tuple->t_bucketId = InvalidBktId); + Assert(tuple->t_self.ip_blkid.bi_hi == InvalidBlockNumber >> 16); + Assert(tuple->t_self.ip_blkid.bi_lo == (InvalidBlockNumber & 0xffff)); + Assert(tuple->t_self.ip_posid == InvalidOffsetNumber); + return true; + } + bool can_insert_into_gsc = + HeapTupleHeaderXminCommitted(tuple->t_data) && !TransactionIdIsValid(HeapTupleGetRawXmax(tuple)); + return can_insert_into_gsc; +} + +extern void TopnLruMoveToFront(Dlelem *e, Dllist *list, pthread_rwlock_t *lock, int location); + +extern bytea *CopyOption(bytea *options); +extern TupleDesc CopyTupleDesc(TupleDesc tupdesc); +Relation CopyRelationData(Relation newrel, Relation rel, MemoryContext rules_cxt, MemoryContext rls_cxt, + MemoryContext index_cxt); + +void AcquireGSCTableReadLock(bool *has_concurrent_lock, pthread_rwlock_t *concurrent_lock); +void ReleaseGSCTableReadLock(bool *has_concurrent_lock, pthread_rwlock_t *concurrent_lock); + +enum GlobalObjDefEntry{GLOBAL_RELATION_ENTRY, GLOBAL_PARTITION_ENTRY}; + +struct GlobalBaseEntry { + GlobalObjDefEntry type; + Oid oid; + volatile uint64 refcount; + Dlelem cache_elem; + void Release(); + template + static void Free(GlobalBaseEntry *entry); + + void FreeError() + { + if (type == GLOBAL_RELATION_ENTRY) { + Free(this); + } else { + Assert(type == GLOBAL_PARTITION_ENTRY); + Free(this); + } + } +}; +struct GlobalRelationEntry : public GlobalBaseEntry { + Relation rel; + MemoryContext rel_mem_manager; +}; +struct GlobalPartitionEntry : public GlobalBaseEntry { + Partition part; +}; + +void CopyPartitionData(Partition dest_partition, Partition src_partition); + +#ifdef CACHEDEBUG + #define GSC_CACHE1_elog(b) ereport(DEBUG2, (errmodule(MOD_GSC), errmsg(b))) + #define GSC_CACHE2_elog(b, c) ereport(DEBUG2, (errmodule(MOD_GSC), errmsg(b, c))) + #define GSC_CACHE3_elog(b, c, d) ereport(DEBUG2, (errmodule(MOD_GSC), errmsg(b, c, d))) + #define GSC_CACHE4_elog(b, c, d, e) ereport(DEBUG2, (errmodule(MOD_GSC), errmsg(b, c, d, e))) + #define GSC_CACHE5_elog(b, c, d, e, f) ereport(DEBUG2, (errmodule(MOD_GSC), errmsg(b, c, d, e, f))) + #define GSC_CACHE6_elog(b, c, d, e, f, g) ereport(DEBUG2, (errmodule(MOD_GSC), errmsg(b, c, d, e, f, g))) + #define GSC_CACHE7_elog(b, c, d, e, f, g, h) ereport(DEBUG2, (errmodule(MOD_GSC), errmsg(b, c, d, e, f, g, h))) +#else + #define GSC_CACHE1_elog(b) + #define GSC_CACHE2_elog(b, c) + #define GSC_CACHE3_elog(b, c, d) + #define GSC_CACHE4_elog(b, c, d, e) + #define GSC_CACHE5_elog(b, c, d, e, f) + #define GSC_CACHE6_elog(b, c, d, e, f, g) + #define GSC_CACHE7_elog(b, c, d, e, f, g, h) +#endif + +#ifdef ENABLE_LITE_MODE + const int LOCAL_INIT_RELCACHE_SIZE = 128; + const int LOCAL_INIT_PARTCACHE_SIZE = 128; + const int GLOBAL_INIT_RELCACHE_SIZE = LOCAL_INIT_RELCACHE_SIZE << 1; + const int GLOBAL_INIT_PARTCACHE_SIZE = LOCAL_INIT_PARTCACHE_SIZE << 1; + const int INIT_DB_SIZE = 64; + const int MinHashBucketSize = 4; + const uint64 GLOBAL_DB_MEMORY_MAX = 4 * 1024 * 1024; +#else + const int LOCAL_INIT_RELCACHE_SIZE = 512; + const int LOCAL_INIT_PARTCACHE_SIZE = 512; + const int GLOBAL_INIT_RELCACHE_SIZE = LOCAL_INIT_RELCACHE_SIZE << 1; + const int GLOBAL_INIT_PARTCACHE_SIZE = LOCAL_INIT_PARTCACHE_SIZE << 1; + const int INIT_DB_SIZE = 1024; + const int MinHashBucketSize = 32; + const uint64 GLOBAL_DB_MEMORY_MAX = 8 * 1024 * 1024; +#endif + +const uint64 GLOBAL_DB_MEMORY_MIN = 2 * 1024 * 1024; +const uint64 MemIncreEveryTrans = 1024 * 1024; +const int GLOBAL_BUCKET_DEFAULT_TOP_N = 3; +const int32 MAX_GSC_LIST_LENGTH = 2048; +const int32 MAX_LSC_LIST_LENGTH = 1024; +const int CHUNK_ALGIN_PAD = ALLOC_CHUNKHDRSZ + 8; + +inline uint64 GetSwapOutNum(bool blow_threshold, uint64 length) +{ + if (blow_threshold) { + return length >> 2; + } else { + return length >> 1; + } +} + +#endif \ No newline at end of file diff --git a/src/include/utils/knl_globalsysdbcache.h b/src/include/utils/knl_globalsysdbcache.h new file mode 100644 index 000000000..05f194960 --- /dev/null +++ b/src/include/utils/knl_globalsysdbcache.h @@ -0,0 +1,238 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_globalsysdbcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_globalsysdbcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_GLOBALSYSDBCACHE_H +#define KNL_GLOBALSYSDBCACHE_H +#include "access/skey.h" +#include "utils/knl_globaldbstatmanager.h" +#include "utils/relcache.h" + + +enum DynamicGSCMemoryLevel { + DynamicGSCMemoryLow = 0, /* less than 80% */ + DynamicGSCMemoryHigh, /* less than 100% */ + DynamicGSCMemoryOver, /* less than 150% */ + DynamicGSCMemoryOutOfControl /* more than 150% */ +}; + +enum DynamicHashBucketStrategy { + DynamicHashBucketDefault, + DynamicHashBucketHalf, + DynamicHashBucketQuarter, + DynamicHashBucketEighth, + DynamicHashBucketMin +}; + +enum GscStatDetail { + GscStatDetailDBInfo, + GscStatDetailTable, + GscStatDetailTuple +}; + +extern bool atomic_compare_exchange_u32(volatile uint32* ptr, uint32 expected, uint32 newval); + +/* + * GlobalSysDBCache is DB instance level object, oversee all database object operations, + * each database plays as a "DB entry" where its global caching status, handlers to CatCache, + * RelCache/PartCache are available, more details see "struct GlobalSysDBCacheEntry" + */ +class GlobalSysDBCache : public BaseObject { +public: + GlobalSysDBCache(); + + /* + * Find temp GSC entry with given dbOid, there are three cases: + * case1: return NULL if dbOid does not index to any db object or Cache not loaded + * case2: return an entry can use normally + * + * Note: for the term "temp GSC entry", we use it for cache invalidation with desired + * dbOid and no need to create one if not found + */ + GlobalSysDBCacheEntry *FindTempGSCEntry(Oid dbOid); + void ReleaseTempGSCEntry(GlobalSysDBCacheEntry *entry); + + /* + * Find normal GSC entry with given dbOid, if DB is not loaded, do normal init and + * return to caller, the real content is build up by LSC access + */ + GlobalSysDBCacheEntry *GetGSCEntry(Oid dbOid, char *dbName); + void ReleaseGSCEntry(GlobalSysDBCacheEntry *entry); + + /* Fetch shared GSC entry */ + GlobalSysDBCacheEntry *GetSharedGSCEntry(); + + /* Remove the GSC entry from m_bucket */ + void DropDB(Oid dbOid, bool need_clear); + + void Init(MemoryContext parent); + + inline bool HashSearchSharedRelation(Oid relOid) + { + if (relOid > (Oid)FirstBootstrapObjectId) { + return false; + } + return m_rel_store_in_shared[relOid]; + } + + inline bool IsCritialForInitSysCache(Oid relOid) + { + if (relOid > (Oid)FirstNormalObjectId) { + return false; + } + return m_rel_for_init_syscache[relOid]; + } + + inline bool RelationHasSysCache(Oid relOid) + { + if (relOid > (Oid)FirstNormalObjectId) { + return false; + } + return m_syscache_relids[relOid]; + } + + void UpdateGSCConfig(int tmp_global_syscache_threshold) + { + Assert(tmp_global_syscache_threshold > 0); + if (m_global_syscache_threshold == tmp_global_syscache_threshold) { + return; + } + m_global_syscache_threshold = tmp_global_syscache_threshold; + REAL_GSC_MEMORY_SPACE = ((uint64)m_global_syscache_threshold) << 10; + SAFETY_GSC_MEMORY_SPACE = (uint64)(REAL_GSC_MEMORY_SPACE * 0.8); + MAX_GSC_MEMORY_SPACE = (uint64)(REAL_GSC_MEMORY_SPACE * 1.5); + EXPECT_MAX_DB_COUNT = REAL_GSC_MEMORY_SPACE / (GLOBAL_DB_MEMORY_MIN) + 1; +#ifdef ENABLE_LIET_MODE + /* since gsc_rough_used_space is an esitmate of memory, reduce the upperlimit in lite mode + * 1.5 * 0.7 = 1.05. it is the upperlimit */ + REAL_GSC_MEMORY_SPACE = REAL_GSC_MEMORY_SPACE * 0.7; + SAFETY_GSC_MEMORY_SPACE = SAFETY_GSC_MEMORY_SPACE * 0.7; + MAX_GSC_MEMORY_SPACE = MAX_GSC_MEMORY_SPACE * 0.7; +#endif + } + + inline bool StopInsertGSC() + { + return gsc_rough_used_space > REAL_GSC_MEMORY_SPACE; + } + + inline bool MemoryUnderControl() + { + return gsc_rough_used_space < SAFETY_GSC_MEMORY_SPACE; + } + + void Refresh(GlobalSysDBCacheEntry *entry); + + List* GetGlobalDBStatDetail(Oid dbOid, Oid relOid, GscStatDetail stat_detail); + void Clean(Oid dbOid); + void InvalidAllRelations(); + + /* in standby mode, and wal_level is less than hot_standby, then gsc is unusable */ + bool hot_standby; + void RefreshHotStandby(); + bool recovery_finished; + + DynamicHashBucketStrategy dynamic_hash_bucket_strategy; + + /** a rough estimate of memory context's used space + * it doesnt include the head of blocks belong to memcxt + * for example: 1GB memcxt, and block size is 1kb, then there are 1000,000 blocks, and head of block is 64 bytes, + * 64MB is out of estimating */ + pg_atomic_uint64 gsc_rough_used_space; + void GSCMemThresholdCheck(); +private: + void FreeDeadDBs(); + void HandleDeadDB(GlobalSysDBCacheEntry *exist_db); + void SwapOutDBEntry(Index hash_index); + void SwapOutTailBucket(); + void UpdateBucketSpace(Index hash_index); + + void CalcDynamicHashBucketStrategy(); + DynamicGSCMemoryLevel CalcDynamicGSCMemoryLevel(uint64 total_space); + void SwapOutGivenDBContent(Index hash_index, Oid db_id, DynamicGSCMemoryLevel mem_level); + void SwapoutGivenDBInstance(Index hash_index, Oid db_id); + + GlobalSysDBCacheEntry *CreateGSCEntry(Oid dbOid, Index hash_index, char *dbName); + GlobalSysDBCacheEntry *CreateSharedGSCEntry(); + GlobalSysDBCacheEntry *SearchGSCEntry(Oid dbOid, Index hash_index, char *dbName); + GlobalSysDBCacheEntry *FindGSCEntryWithoutLock(Oid dbOid, Index hash_index, int *location); + + void InitRelStoreInSharedFlag(); + void InitRelForInitSysCacheFlag(); + void InitSysCacheRelIds(); + + void RemoveElemFromBucket(GlobalSysDBCacheEntry *entry); + + void AddHeadToBucket(Index hash_index, GlobalSysDBCacheEntry *entry); + + /* Flag to indicate if inited */ + bool m_is_inited; + + /* Fields to hold shared-catalog of GlobalSysDBCacheEntry */ + GlobalSysDBCacheEntry *m_global_shared_db_entry; + + /* + * A hash table (key:dbOid) to hold all none-shared GlobalSysDBCacheEntry, more details + * see class GlobalBucketList's intro + */ + GlobalBucketList m_bucket_list; + + /* Array to hold each bucket lock and mem-useage with length of m_nbuckets */ + pthread_rwlock_t *m_db_locks; + int m_nbuckets; + + /* Fields to hold dbstat manager, used for memory spill out */ + GlobalDBStatManager m_dbstat_manager; + + /* du-list to hold dead db objects, element type is GlobalSysDBCacheEntry */ + DllistWithLock m_dead_dbs; + + /* Global memory control fields */ + MemoryContext m_global_sysdb_mem_cxt; + + volatile uint32 m_is_memorychecking; + Index m_swapout_hash_index; + + int m_global_syscache_threshold; + + /* Fields attaching to GUC, not change frequently */ + uint64 REAL_GSC_MEMORY_SPACE; + uint64 SAFETY_GSC_MEMORY_SPACE; + uint64 MAX_GSC_MEMORY_SPACE; + uint64 EXPECT_MAX_DB_COUNT; + + static const int FirstBootstrapObjectId = 10000; + static const int FirstNormalObjectId = 16384; + bool m_rel_store_in_shared[FirstBootstrapObjectId]; + bool m_rel_for_init_syscache[FirstNormalObjectId]; + bool m_syscache_relids[FirstNormalObjectId]; + +}; +void NotifyGscRecoveryFinished(); +extern Datum gs_gsc_dbstat_info(PG_FUNCTION_ARGS); +extern Datum gs_gsc_clean(PG_FUNCTION_ARGS); +extern Datum gs_gsc_catalog_detail(PG_FUNCTION_ARGS); +extern Datum gs_gsc_table_detail(PG_FUNCTION_ARGS); +extern int ResizeHashBucket(int origin_nbucket, DynamicHashBucketStrategy strategy); +#endif \ No newline at end of file diff --git a/src/include/utils/knl_globalsystabcache.h b/src/include/utils/knl_globalsystabcache.h new file mode 100644 index 000000000..c3a54c623 --- /dev/null +++ b/src/include/utils/knl_globalsystabcache.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_globalsystabcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_globalsystabcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_GLOBALSYSTABCACHE_H +#define KNL_GLOBALSYSTABCACHE_H +#include "access/skey.h" +#include "utils/knl_globalsyscache_common.h" +#include "utils/atomic.h" +#include "utils/relcache.h" +#include "utils/knl_globalsystupcache.h" +#include "utils/rel.h" + +/* + * GlobalSysTabCache is a database level object, by design it contains table level + * CatCache of all none-shared catalog, note, shared catalog is not considered as + * current database + */ +class GlobalSysTabCache : public BaseObject { +public: + /* Constructor and DeConstructor */ + GlobalSysTabCache(Oid dbOid, bool isShared, struct GlobalSysDBCacheEntry *dbEntry); + ~GlobalSysTabCache(){} + + /* GlobalSysTabCache's export routines */ + void Init(); + + /* + * Function to return list of tuple stat with given relOid, tuple stat object type + * is "GlobalCatalogTupleStat" + */ + List *GetCatalogTupleStats(Oid relOid); + + /* + * Function to find GloablSysTupCache object with given cache_id + */ + GlobalSysTupCache *CacheIdGetGlobalSysTupCache(int cache_id); + + /* + * Reset all underlaying CatCaches, where mark each systup as dead and put them to + * dead_list, note the mem-free handled by next systup search, see the invoke of + * GlobalSysTupCache::FreeDeadCls() for details + */ + template + void ResetCatCaches(); + + void RemoveAllTailElements(); + + /* + * Invalid/Reset CatCaches with given cache_id and hash_value, note, only mark systup + * as "dead" and put to deap sys tuple to "dead-list" + */ + void InvalidTuples(int cache_id, uint32 hash_value, bool reset); + + /* + * Return the number of "active elements" in current CatCache + * + * Reminding! we may abstract fetching active/dead/space in every sys cache object + * as an interface, lets do it later + */ + inline uint64 GetActiveElementsNum() + { + return m_tup_count; + } + + /* Return the number of "dead elements" in current CatCache */ + inline uint64 GetDeadElementsNum() + { + uint64 dead_count = 0; + for (int i = 0; i < SysCacheSize; i++) { + // shared table is separated from normal db + if (m_global_systupcaches[i] == NULL) { + continue; + } + dead_count += m_global_systupcaches[i]->GetDeadNum(); + } + return dead_count; + } + + /* Return the memory consumption of current */ + inline uint64 GetSysCacheSpaceNum() + { + return m_tup_space + (sizeof(GlobalCatCTup) + MAXIMUM_ALIGNOF) * m_tup_count; + } + +private: + /* oid of current DB for this GlobalSysTabCache object, means its identifier */ + Oid m_dbOid; + volatile bool m_isInited; + + /* + * Flag to indicate whether contains shared catalog, true means current GlobalSysTabCache + * is the special one, by design it contains all shared catalog table (instance level) + */ + bool m_isShared; + + /* + * array of GlobalSysTupCache(in table level) and control locks in curernt + * GlobalSysTabCache(in db level), with length SysCacheSize + */ + GlobalSysTupCache **m_global_systupcaches; + pthread_rwlock_t *m_systab_locks; + + struct GlobalSysDBCacheEntry *m_dbEntry; + + /* stat info fields */ + volatile uint64 m_tup_count; + volatile uint64 m_tup_space; +}; + +#endif \ No newline at end of file diff --git a/src/include/utils/knl_globalsystupcache.h b/src/include/utils/knl_globalsystupcache.h new file mode 100644 index 000000000..662aca1b9 --- /dev/null +++ b/src/include/utils/knl_globalsystupcache.h @@ -0,0 +1,450 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_globalsystupcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_globalsystupcache.h + * + * --------------------------------------------------------------------------------------- + */ + +/* + * Given a hash value and the size of the hash table, find the bucket + * in which the hash value belongs. Since the hash table must contain + * a power-of-2 number of elements, this is a simple bitmask. + */ +#ifndef KNL_GLOBALSYSTUPCACHE_H +#define KNL_GLOBALSYSTUPCACHE_H +#include "utils/atomic.h" +#include "access/htup.h" +#include "utils/knl_globalsyscache_common.h" +#include "utils/relcache.h" +#include "access/skey.h" +#include "utils/syscache.h" +#define CATCACHE_MAXKEYS 4 + +#ifdef CACHEDEBUG +#define CACHE1_elog(a, b) ereport(a, (errmsg(b))) +#define CACHE2_elog(a, b, c) ereport(a, (errmsg(b, c))) +#define CACHE3_elog(a, b, c, d) ereport(a, (errmsg(b, c, d))) +#define CACHE4_elog(a, b, c, d, e) ereport(a, (errmsg(b, c, d, e))) +#define CACHE5_elog(a, b, c, d, e, f) ereport(a, (errmsg(b, c, d, e, f))) +#define CACHE6_elog(a, b, c, d, e, f, g) ereport(a, (errmsg(b, c, d, e, f, g))) +#else +#define CACHE1_elog(a, b) +#define CACHE2_elog(a, b, c) +#define CACHE3_elog(a, b, c, d) +#define CACHE4_elog(a, b, c, d, e) +#define CACHE5_elog(a, b, c, d, e, f) +#define CACHE6_elog(a, b, c, d, e, f, g) +#endif +/* function computing a datum's hash */ +typedef uint32 (*CCHashFN)(Datum datum); + +/* function computing equality of two datums */ +typedef bool (*CCFastEqualFN)(Datum a, Datum b); + +/* + * Global SysCache object definitions, plase understand them in design of CatCache + * - GlobalSysTupCache -> CatCache + * cc_bucket: an array of bucket with DList as its array element + * cc_list: a due-list with GlobalCatCList as its list element + * - GlobalCatCTup - CatCTuple + * - GLobalCatCList - > CatCList + */ +class GlobalSysTupCache; + +/* + * Refers to "CatCTup" in none-GSC mode, normally consider it as wrapper data structure + * of HeapTuple of catlaog table that catched in GSC, most of fields are similar with those + * declared in CatCTup + * + * Briefly, GlobalCatCTup describes as a cached catlog tuple for GSC + */ +struct GlobalCatCTup { + int ct_magic; /* for identifying CatCTup entries */ +#define CT_MAGIC 0x57261502 + bool canInsertGSC; /* ddl tuple? */ + bool dead; /* dead flag, swapout or invalid */ + uint32 hash_value; /* hash value for this tuple's keys */ + + /* Pointer to its belonging GlobalSysTupCache object */ + GlobalSysTupCache *my_cache; + + /* number of active references */ + pg_atomic_uint64 refcount; + + /* + * Lookup keys for the entry. By-reference datums point into the tuple for + * positive cache entries + */ + Datum keys[CATCACHE_MAXKEYS]; + + /* + * Each tuple in a cache is a member of a Dllist that stores the elements + * of its hash bucket. + */ + Dlelem cache_elem; /* list member of per-bucket list */ + HeapTupleData tuple; /* tuple management header */ + + void Release(); +}; + +/* + * Refers to "CatCList" in none-GSC mode, normally consider it as wrapper data structure + * of HeapTuple of catlaog table that catched in GSC, most of fields are similar with those + * declared in CatCTup + * + * Briefly, GlobalCatCList works as a partial search result list (int GlobalCatCTup) which + * satisify first N key of catalog index + */ +struct GlobalCatCList { + int cl_magic; /* for identifying CatCList entries */ +#define CL_MAGIC 0x52765103 + uint32 hash_value; /* hash value for lookup keys */ + bool ordered; /* members listed in index order? */ + bool canInsertGSC; /* contain ddl tuple? */ + short nkeys; /* number of lookup keys specified */ + int n_members; /* number of member tuples */ + GlobalSysTupCache *my_cache; /* used to free when palloc fail */ + pg_atomic_uint64 refcount; /* number of active references */ + + /* + * Lookup keys for the entry, with the first nkeys elements being valid. + * All by-reference are separately allocated. + */ + Datum keys[CATCACHE_MAXKEYS]; + Dlelem cache_elem; /* list member of per-catcache list */ + GlobalCatCTup *members[FLEXIBLE_ARRAY_MEMBER]; + + void Release(); +}; + +/* + * A wrapper data structure to keep relation related information + * + * Reminding! we may remove it later as does not refers a concrete module, just for function + * invoking pararm convience + */ +typedef struct CatTupRelInfoMsg { + const char *cc_relname; /* name of relation the tuples come from */ + TupleDesc cc_tupdesc; /* tuple descriptor (copied from reldesc) */ + CCHashFN cc_hashfunc[CATCACHE_MAXKEYS]; /* hash function for each key */ + CCFastEqualFN cc_fastequal[CATCACHE_MAXKEYS]; /* fast equal function for each key */ + Oid cc_reloid; /* OID of relation the tuples come from */ + Oid cc_indexoid; /* OID of index matching cache keys */ + // int cc_ntup; /* # of tuples currently in this cache */ + int cc_nkeys; /* # of keys (1..CATCACHE_MAXKEYS) */ + int cc_keyno[CATCACHE_MAXKEYS]; /* AttrNumber of each key */ + bool cc_relisshared; /* is relation shared across databases? */ +} CatTupRelInfoMsg; + +/* + * GSC CatCache search type + */ +enum FIND_TYPE { + SEARCH_TUPLE_SKIP, + SCAN_TUPLE_SKIP, + SCAN_LIST_SKIP, + PGATTR_LIST_SKIP, + PROC_LIST_SKIP +}; + +/* + * Play a wrapper data structure for function parameters for GlobalCatCtup search + * routines, most used for internal + */ +typedef struct InsertCatTupInfo { + FIND_TYPE find_type; + Datum *arguments; + HeapTuple ntp; + uint32 hash_value; + Index hash_index; + int16 attnum; + CatalogRelationBuildParam *catalogDesc; + + /* + * Flag to indicate current thread holds the lock of GlobalSysTupCache::m_concurrent_lock + */ + bool has_concurrent_lock; + + /* + * Flag to indicate if current return GlobalCatCTuple can be insert into GSC, normally + * it is affected by has_concurrent_lock, is_exclusive + */ + bool canInsertGSC; /* output param */ + + /* + * Flag to indicate store the tuple global or local in GSC + */ + bool is_exclusive; +} InsertCatTupInfo; + +/* + * Play a wrapper data structure for function parameters for GlobalCatClist search routines, + * most used for internal + */ +typedef struct InsertCatListInfo { + List *ctlist; /* for output fields */ + uint32 hash_value; + + /* partial search's length of arguments array */ + Datum *arguments; /* array */ + int nkeys; /* length */ + + /* + * Flag to indicate current thread holds the lock of GlobalSysTupCache::m_concurrent_lock + */ + bool has_concurrent_lock; + + /* + * Flag to indicate whether the search result is ordered + */ + bool ordered; /* for output fields */ + + /* + * Flag to indicate if current return GlobalCatCList can be insert into GSC, normally + * it is affected by has_concurrent_lock, is_exclusive, GlobalCatCTup's canInsertGSC + */ + bool canInsertGSC; /* output param */ + + /* + * Flag to indicate store the tuple global or local in GSC + */ + bool is_exclusive; +} InsertCatListInfo; + +/* + * Refers to "CatCache" in none-GSC mode, normally consider it as cache manager for catlog, + * a GlobalCatCache object refers one catalog table + */ +class GlobalSysTupCache : public BaseObject { +public: + GlobalSysTupCache(Oid dbOid, int cache_id, bool is_shared, + struct GlobalSysDBCacheEntry *entry); + void SetStatInfoPtr(volatile uint64* tup_count, volatile uint64 *tup_space); + + /* + * Function to return statinfo for CatCTups stored in cc_bucket + */ + List *GetGlobalCatCTupStat(); + void ReleaseGlobalCatCTup(GlobalCatCTup *ct); + void ReleaseGlobalCatCList(GlobalCatCList *cl); + + /* common inferface */ + void HashValueInvalidate(uint32 hash_value); + template + void ResetCatalogCache(); + + void RemoveAllTailElements(); + + /* + * GlobalSysCache major search interfaces from user side: + * 1. SearchTuple(hsah_value, args) + * 2. SearchTupleFromFile(hsah_value, args, is_exclusive) + * 3. SearchList(hash_value, nkeys, agrs) + * 4. SearchListFromFile(hash_value, nkeys, agrs, is_exclusive) + */ + GlobalCatCTup *SearchTuple(uint32 hash_value, Datum *arguments) + { + return SearchTupleInternal(hash_value, arguments); + } + + GlobalCatCTup *SearchTupleFromFile(uint32 hash_value, Datum *arguments, bool is_exclusive); + + GlobalCatCList *SearchList(uint32 hash_value, int nkeys, Datum *arguments) + { + return SearchListInternal(hash_value, nkeys, arguments); + } + + GlobalCatCList *SearchListFromFile(uint32 hash_value, int nkeys, Datum *arguments, bool is_exclusive); + + + /* simple inline functions */ + inline const char *GetCCRelName() + { + return m_relinfo.cc_relname; + } + + inline const bool CCRelIsShared() + { + return m_relinfo.cc_relisshared; + } + + inline TupleDesc GetCCTupleDesc() + { + return m_relinfo.cc_tupdesc; + } + + inline const CCFastEqualFN *GetCCFastEqual() + { + return m_relinfo.cc_fastequal; + } + + inline const CCHashFN *GetCCHashFunc() + { + return m_relinfo.cc_hashfunc; + } + + inline Oid GetCCRelOid() + { + return m_relinfo.cc_reloid; + } + + void Init(); + inline bool Inited() + { + return m_isInited; + } + + inline uint64 GetDeadNum() + { + return m_dead_cts.GetLength(); + } + +#ifndef ENABLE_MULTIPLE_NODES + GlobalCatCTup *SearchTupleWithArgModes(uint32 hash_value, Datum *arguments, oidvector* argModes); + GlobalCatCTup *SearchTupleFromFileWithArgModes( + uint32 hash_value, Datum *arguments, oidvector* argModes, bool is_disposable); + GlobalCatCTup *SearchTupleMissWithArgModes(InsertCatTupInfo *tup_info, oidvector* argModes); +#endif + + bool enable_rls; +private: + /* + * Note: functions with "Search" prefix do search + insert if target tuple is not found, + * functions with "Find" prefix do search only + */ + GlobalCatCTup *SearchTupleInternal(uint32 hash_value, Datum *arguments); + GlobalCatCTup *SearchTupleMiss(InsertCatTupInfo *tup_info); + GlobalCatCTup *SearchMissFromProcAndAttribute(InsertCatTupInfo *tup_info); + /* used by searchtupleinternal */ + GlobalCatCTup *FindSearchKeyTupleFromCache(InsertCatTupInfo *tup_info, int *location); + /* used by searchtuplemiss */ + GlobalCatCTup *FindScanKeyTupleFromCache(InsertCatTupInfo *tup_info); + /* used by SearchBuiltinProcCacheList */ + GlobalCatCTup *FindHashTupleFromCache(InsertCatTupInfo *tup_info); + /* used by searchlistinternal when scan */ + GlobalCatCTup *FindSameTupleFromCache(InsertCatTupInfo *tup_info); + /* used by SearchPgAttributeCacheList */ + GlobalCatCTup *FindPgAttrTupleFromCache(InsertCatTupInfo *tup_info); + inline GlobalCatCTup *FindTupleFromCache(InsertCatTupInfo *tup_info) + { + Assert(tup_info->find_type != SEARCH_TUPLE_SKIP); + switch (tup_info->find_type) { + case SCAN_LIST_SKIP: + return FindSameTupleFromCache(tup_info); + case PGATTR_LIST_SKIP: + return FindPgAttrTupleFromCache(tup_info); + case PROC_LIST_SKIP: + return FindHashTupleFromCache(tup_info); + case SCAN_TUPLE_SKIP: + return FindScanKeyTupleFromCache(tup_info); + default: + Assert(false); + return NULL; + } + return NULL; + } + + GlobalCatCList *SearchListInternal(uint32 hash_value, int nkeys, Datum *arguments); + GlobalCatCList *SearchListMiss(InsertCatListInfo *list_info); + GlobalCatCList *CreateCatCacheList(InsertCatListInfo *list_info); + GlobalCatCList *InsertListIntoCatCacheList(InsertCatListInfo *list_info, GlobalCatCList *cl); + GlobalCatCTup *InsertHeapTupleIntoGlobalCatCache(InsertCatTupInfo *tup_info); + GlobalCatCTup *InsertHeapTupleIntoCatCacheInSingle(InsertCatTupInfo *tup_info); + GlobalCatCTup *InsertHeapTupleIntoCatCacheInList(InsertCatTupInfo *tup_info); + GlobalCatCTup *InsertHeapTupleIntoLocalCatCache(InsertCatTupInfo *tup_info); + void ReleaseTempList(const List *ctlist); + void SearchPgAttributeCacheList(InsertCatListInfo *list_info); + void SearchBuiltinProcCacheList(InsertCatListInfo *list_info); + GlobalCatCList *FindListInternal(uint32 hash_value, int nkeys, Datum *arguments, int *location); + + void FreeDeadCts(); + void HandleDeadGlobalCatCTup(GlobalCatCTup *ct); + void RemoveTailTupleElements(Index hash_index); + void InvalidLSC(uint32 hash_value); + + void FreeDeadCls(); + void HandleDeadGlobalCatCList(GlobalCatCList *cl); + void RemoveTailListElements(); + + void FreeGlobalCatCList(GlobalCatCList *cl); + + /* when initdb, this func call first */ + void InitCacheInfo(Oid reloid, Oid indexoid, int nkeys, const int *key, int nbuckets); + void InitHashTable(); + /* when initdb, this func call second after init relcache */ + void InitRelationInfo(); + + uint32 GetGlobalCatCachehashValue(Datum v1, Datum v2, Datum v3, Datum v4); + void InsertBuiltinFuncInBootstrap(); + + void InitInsertCatTupInfo(InsertCatTupInfo *tup_info, HeapTuple ntp, Datum *arguments); + void CopyTupleIntoGlobalCatCTup(GlobalCatCTup *ct, HeapTuple ntp); + + Dllist *GetBucket(Index hash_index) + { + return &(cc_buckets[hash_index]); + } + + void AddHeadToCCList(GlobalCatCList *cl); + void RemoveElemFromCCList(GlobalCatCList *cl); + + void AddHeadToBucket(Index hash_index, GlobalCatCTup *ct); + void RemoveElemFromBucket(GlobalCatCTup *ct); + + /* Global cache identifier */ + Oid m_dbOid; + volatile bool m_isInited; + struct GlobalSysDBCacheEntry *m_dbEntry; + + int m_cache_id; + int cc_id; /* cache identifier --- see syscache.h */ + DllistWithLock m_dead_cts; + DllistWithLock m_dead_cls; + + CatTupRelInfoMsg m_relinfo; + ScanKeyData cc_skey[CATCACHE_MAXKEYS]; /* precomputed key info for + * heap scans */ + + /* # of hash buckets in this cache */ + int cc_nbuckets; + Dllist *cc_buckets; /* same to CatCache::cc_bucket */ + Dllist cc_lists; /* same to CatCache::cc_list */ + + pthread_rwlock_t *m_bucket_rw_locks; // count of lock equal nbucket + pthread_rwlock_t *m_list_rw_lock; + + /* for pg_proc which has no concurrent lock, or for shared read, we need acquire a rdlock + * avoid a ddl when we load a tuple but not insert into gsc. + * this means when ddl, should acquire a wrlock before modify gsc. + * for ddl, first write tuple infomask, then acquire a wrlock and modify gsc */ + pthread_rwlock_t *m_concurrent_lock; + + volatile uint32 *m_is_tup_swappingouts; + volatile uint32 m_is_list_swappingout; + + volatile uint64 *m_tup_count; + volatile uint64 *m_tup_space; + volatile uint64 *m_searches; + volatile uint64 *m_hits; + volatile uint64 *m_newloads; +}; + +#endif \ No newline at end of file diff --git a/src/include/utils/knl_globaltabdefcache.h b/src/include/utils/knl_globaltabdefcache.h new file mode 100644 index 000000000..0bb052ade --- /dev/null +++ b/src/include/utils/knl_globaltabdefcache.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_globaltabdefcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_globaltabdefcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_GLOBALTABDEFCACHE_H +#define KNL_GLOBALTABDEFCACHE_H +#include "utils/relcache.h" +#include "utils/knl_globalbasedefcache.h" +#include "utils/knl_globalsyscache_common.h" +#include "utils/relmapper.h" + +extern bool has_locator_info(GlobalBaseEntry *entry); +class GlobalTabDefCache : public GlobalBaseDefCache { +public: + GlobalTabDefCache(Oid dbOid, bool is_shared, struct GlobalSysDBCacheEntry *entry); + ~GlobalTabDefCache() {}; + + void Init(); + void Insert(Relation rel, uint32 hash_value); + + GlobalRelationEntry *SearchReadOnly(Oid relOid, uint32 hash_value) + { + GlobalBaseDefCache::FreeDeadEntrys(); + GlobalRelationEntry *entry = + (GlobalRelationEntry *)GlobalBaseDefCache::SearchReadOnly(relOid, hash_value); + return entry; + } + + template + inline void ResetRelCaches() + { + if (!m_is_inited) { + return; + } + GlobalBaseDefCache::ResetCaches(); + GlobalBaseDefCache::FreeDeadEntrys(); + } + + inline uint64 GetSysCacheSpaceNum() + { + return m_base_space; + } + + inline void Invalidate(Oid dbOid, Oid relOid) + { + GlobalBaseDefCache::Invalidate(dbOid, relOid); + } + + inline void InvalidateRelationNodeList() + { + GlobalBaseDefCache::InvalidateRelationNodeListBy(has_locator_info); + } + + TupleDesc GetPgClassDescriptor(); + + TupleDesc GetPgIndexDescriptor(); + + List *GetTableStats(Oid rel_oid); + +private: + + GlobalRelationEntry *CreateEntry(Relation rel); + + TupleDesc m_pgclassdesc; + TupleDesc m_pgindexdesc; + pthread_mutex_t *m_catalog_lock; +}; +#endif \ No newline at end of file diff --git a/src/include/utils/knl_localbasedefcache.h b/src/include/utils/knl_localbasedefcache.h new file mode 100644 index 000000000..14e443ddb --- /dev/null +++ b/src/include/utils/knl_localbasedefcache.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_localbasedefcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_localbasedefcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_LOCALBASEDEFCACHE_H +#define KNL_LOCALBASEDEFCACHE_H +#include "utils/knl_globalsysdbcache.h" +#include "utils/knl_globalsystupcache.h" +#include "utils/knl_localbucketlist.h" +#include "utils/knl_localsyscache_common.h" +#include "utils/rel.h" + +class LocalBaseDefCache : public BaseObject { +public: + LocalBaseDefCache() + { + m_nbuckets = 0; + m_db_id = InvalidOid; + } + + LocalBaseEntry *SearchEntryFromLocal(Oid oid, Index hash_index); + + void CreateDefBucket(size_t size); + + LocalBaseEntry *CreateEntry(Index hash_index, size_t size); + InvalidBaseEntry invalid_entries; + + template + void RemoveTailDefElements(); + +protected: + + int m_nbuckets; /* # of hash buckets in this cache */ + Oid m_db_id; + LocalBucketList m_bucket_list; +}; +#endif \ No newline at end of file diff --git a/src/include/utils/knl_localbucketlist.h b/src/include/utils/knl_localbucketlist.h new file mode 100644 index 000000000..8823b4408 --- /dev/null +++ b/src/include/utils/knl_localbucketlist.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * IDENTIFICATION + * src/include/utils/knl_localbucketlist.h + * + * --------------------------------------------------------------------------------------- + */ +#ifndef KNL_LOCALBUCKETLIST_H +#define KNL_LOCALBUCKETLIST_H +#include "lib/dllist.h" +#include "utils/knl_globalbucketlist.h" +class LocalBucketList : public BaseObject { + int m_nbuckets; + BucketEntry *m_bucket_entry; + Dllist m_active_bucket_list; +public: + LocalBucketList() + { + m_nbuckets = 0; + m_bucket_entry = NULL; + } + void Init(int nbuckets) + { + DLInitList(&m_active_bucket_list); + m_bucket_entry = (BucketEntry *)palloc0(sizeof(BucketEntry) * nbuckets); + m_nbuckets = nbuckets; + for (int i = 0; i < nbuckets; i++) { + m_bucket_entry[i].elem.dle_val = &m_bucket_entry[i]; + } + } + + void ResetContent() + { + DLInitList(&m_active_bucket_list); + if (m_bucket_entry == NULL) { + return; + } + errno_t rc = memset_s(m_bucket_entry, sizeof(BucketEntry) * m_nbuckets, 0, sizeof(BucketEntry) * m_nbuckets); + securec_check(rc, "\0", "\0"); + for (int i = 0; i < m_nbuckets; i++) { + m_bucket_entry[i].elem.dle_val = &m_bucket_entry[i]; + } + } + + Dllist *GetActiveBucketList() + { + return &m_active_bucket_list; + } + + Dllist *GetBucket(Index hash_index) + { + return &(m_bucket_entry[hash_index].cc_bucket); + } + + /* + * @return INVALID_INDEX means there are no active bucket, no need weedout, otherwise return natural number + */ + Index GetTailBucketIndex() + { + Index tail_index = INVALID_INDEX; + if (!DLIsNIL(&m_active_bucket_list)) { + Dlelem *elt = DLGetTail(&m_active_bucket_list); + BucketEntry *bucket_entry = (BucketEntry *)DLE_VAL(elt); + tail_index = bucket_entry - m_bucket_entry; + } + return tail_index; + } + + void AddHeadToBucket(Index hash_index, Dlelem *elem) + { + Dllist *bucket = &(m_bucket_entry[hash_index].cc_bucket); + // inactive bucket + if (DLIsNIL(bucket)) { + DLAddHead(&m_active_bucket_list, &m_bucket_entry[hash_index].elem); + } + DLAddHead(bucket, elem); + } + + void RemoveElemFromBucket(Dlelem *elem) + { + Dllist *bucket = elem->dle_list; + DLRemove(elem); + // inactive bucket + if (DLIsNIL(bucket)) { + BucketEntry *bucket_obj = (BucketEntry *)(bucket); + Assert(&bucket_obj->cc_bucket == bucket); + Assert(DLGetListHdr(&bucket_obj->elem) == &m_active_bucket_list); + DLRemove(&bucket_obj->elem); + } + } + + void MoveBucketToHead(Index hash_index) + { + DLMoveToFront(&m_bucket_entry[hash_index].elem); + } + +}; +#endif diff --git a/src/include/utils/knl_localpartdefcache.h b/src/include/utils/knl_localpartdefcache.h new file mode 100644 index 000000000..a37b2f94e --- /dev/null +++ b/src/include/utils/knl_localpartdefcache.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_localpartdefcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_localpartdefcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_LOCALPARTDEFCACHE_H +#define KNL_LOCALPARTDEFCACHE_H + +#include "utils/knl_localbasedefcache.h" +#include "utils/knl_globalpartdefcache.h" +#include "utils/relcache.h" + +class LocalPartDefCache : public LocalBaseDefCache { +public: + LocalPartDefCache(); + + void ResetInitFlag() + { + m_bucket_list.ResetContent(); + invalid_entries.ResetInitFlag(); + m_global_partdefcache = NULL; + part_cache_need_eoxact_work = false; + m_is_inited = false; + m_db_id = InvalidOid; + } + + void Init(); + void CreateDefBucket() + { + LocalBaseDefCache::CreateDefBucket(LOCAL_INIT_PARTCACHE_SIZE); + } + Partition SearchPartition(Oid part_id); + Partition SearchPartitionFromLocal(Oid part_id); + template + Partition SearchPartitionFromGlobalCopy(Oid part_id); + + void InsertPartitionIntoLocal(Partition part); + void RemovePartition(Partition part); + + void InvalidateGlobalPartition(Oid db_oid, Oid part_oid, bool is_commit); + void InvalidateAll(); + void AtEOXact_PartitionCache(bool isCommit); + void AtEOSubXact_PartitionCache(bool isCommit, SubTransactionId mySubid, SubTransactionId parentSubid); + Partition PartitionIdGetPartition(Oid part_oid, StorageType storage_type); + +public: + bool part_cache_need_eoxact_work; + +private: + void InsertPartitionIntoGlobal(Partition part, uint32 hash_value); + void CreateLocalPartEntry(Partition part, Index hash_index); + void CopyLocalPartition(Partition dest, Partition src); + Partition RemovePartitionByOid(Oid part_id, Index hash_index); + LocalPartitionEntry *FindPartitionFromLocal(Oid part_id); + GlobalPartDefCache *m_global_partdefcache; + bool m_is_inited; +}; + +#endif \ No newline at end of file diff --git a/src/include/utils/knl_localsyscache_common.h b/src/include/utils/knl_localsyscache_common.h new file mode 100644 index 000000000..55f58071a --- /dev/null +++ b/src/include/utils/knl_localsyscache_common.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_localsyscache_common.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_localsyscache_common.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_LOCALSYSCACHE_COMMON_H +#define KNL_LOCALSYSCACHE_COMMON_H +#include "utils/knl_globalsyscache_common.h" + +struct LocalBaseEntry { + Oid oid; + Dlelem cache_elem; + bool obj_is_nailed; +}; +struct LocalPartitionEntry : LocalBaseEntry { + Partition part; +}; +struct LocalRelationEntry : LocalBaseEntry { + Relation rel; +}; + +struct InvalidBaseEntry { + int count; + int size; + uint32 *invalid_values; + bool is_reset; + InvalidBaseEntry() + { + count = 0; + size = 0; + invalid_values = NULL; + is_reset = false; + } + /* base interface */ + void Init() + { + count = 0; + size = 32; + invalid_values = (uint32 *)palloc0(size * sizeof(uint32)); + } + void ResetInitFlag() + { + /* we dont clean invalid_values and size variable if rebuild lsc + * for catcache, they will be reinited when rebuild catbucket + * for rel/part, they will never be reinited */ + count = 0; + is_reset = false; + } + + void InsertInvalidValue(uint32 value) + { + if (ExistDefValue(value)) { + return; + } + if (count == size) { + size = size * 2; + invalid_values = (uint32 *)repalloc(invalid_values, size * sizeof(uint32)); + } + invalid_values[count] = value; + count++; + } + + bool ExistValue(uint32 value) + { + for (int i = 0; i < count; i++) { + if (invalid_values[i] == value) { + return true; + } + } + return false; + } + + /* rel/partcache */ + + bool ExistDefValue(uint32 value) + { + return ExistValue(value); + } + + void InsertInvalidDefValue(uint32 value) + { + InsertInvalidValue(value); + } + + /* catcache */ + void InsertInvalidTupleValue(uint32 value) + { + if (is_reset) { + return; + } + InsertInvalidValue(value); + } + + void ResetCatalog() + { + is_reset = true; + } + + bool ExistTuple(uint32 value) + { + if (is_reset) { + return true; + } + return ExistDefValue(value); + } + + bool ExistList() + { + if (is_reset || count > 0) { + return true; + } + return false; + } +}; + +void StreamTxnContextSaveInvalidMsg(void *stc); +void StreamTxnContextRestoreInvalidMsg(void *stc); +#define EnableLocalSysCache() t_thrd.lsc_cxt.enable_lsc +#endif \ No newline at end of file diff --git a/src/include/utils/knl_localsysdbcache.h b/src/include/utils/knl_localsysdbcache.h new file mode 100644 index 000000000..590fa74c1 --- /dev/null +++ b/src/include/utils/knl_localsysdbcache.h @@ -0,0 +1,250 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_localsysdbcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_localsysdbcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_LOCALSYSDBCACHE_H +#define KNL_LOCALSYSDBCACHE_H +#include "utils/knl_localsystabcache.h" +#include "utils/knl_localtabdefcache.h" +#include "utils/knl_localpartdefcache.h" +#include "knl/knl_session.h" + +void ReLoadLSCWhenWaitMission(); + +void ReleaseAllGSCRdConcurrentLock(); + +void RememberRelSonMemCxtSpace(Relation rel); +void ForgetRelSonMemCxtSpace(Relation rel); + +bool CheckMyDatabaseMatch(); + +char *GetMyDatabasePath(); +Oid GetMyDatabaseId(); +Oid GetMyDatabaseTableSpace(); + +bool IsGotPoolReload(); +void ResetGotPoolReload(bool value); + +bool DeepthInAcceptInvalidationMessageNotZero(); +void ResetDeepthInAcceptInvalidationMessage(int value); + +void CloseLocalSysDBCache(); + +extern void CreateLocalSysDBCache(); + +extern MemoryContext LocalSharedCacheMemCxt(); +extern MemoryContext LocalMyDBCacheMemCxt(); +extern MemoryContext LocalGBucketMapMemCxt(); +extern MemoryContext LocalSmgrStorageMemoryCxt(); + +extern bool EnableGlobalSysCache(); + +knl_u_inval_context *GetInvalCxt(); +knl_u_relmap_context *GetRelMapCxt(); + +struct HTAB *GetTypeCacheHash(); +struct HTAB *GetTableSpaceCacheHash(); +struct HTAB *GetSMgrRelationHash(); + +struct vfd *GetVfdCache(); +struct vfd **GetVfdCachePtr(); +void SetVfdCache(vfd *value); +Size GetSizeVfdCache(); +Size *GetSizeVfdCachePtr(); +void SetSizeVfdCache(Size value); +int GetVfdNfile(); +void AddVfdNfile(int n); + +dlist_head *getUnownedReln(); + +extern void AtEOXact_SysDBCache(bool is_commit); + +extern void ReBuildLSC(); + +struct BadPtrObj : public BaseObject { + int nbadptr; + void **bad_ptr_lists; + int maxbadptr; + BadPtrObj() + { + ResetInitFlag(); + } + void ResetInitFlag() + { + nbadptr = 0; + bad_ptr_lists = NULL; + maxbadptr = 0; + } +}; + +const int MAX_GSC_READLOCK_COUNT = 16; +struct GSCRdLockInfo { + int count; + bool *has_concurrent_lock[MAX_GSC_READLOCK_COUNT]; + pthread_rwlock_t *concurrent_lock[MAX_GSC_READLOCK_COUNT]; +}; + +class LocalSysDBCache : public BaseObject { +public: + LocalSysDBCache(); + + GlobalSysTabCache *GetGlobalSysTabCache() + { + if (!is_inited) { + Init(); + } + return m_global_db->m_systabCache; + } + + GlobalTabDefCache *GetGlobalTabDefCache() + { + if (!is_inited) { + Init(); + } + return m_global_db->m_tabdefCache; + } + + GlobalPartDefCache *GetGlobalPartDefCache() + { + if (!is_inited) { + Init(); + } + return m_global_db->m_partdefCache; + } + + GlobalSysTabCache *GetSharedSysTabCache() + { + return m_shared_global_db->m_systabCache; + } + GlobalTabDefCache *GetSharedTabDefCache() + { + return m_shared_global_db->m_tabdefCache; + } + + struct GlobalSysDBCacheEntry *GetMyGlobalDBEntry() + { + return m_global_db; + } + + struct GlobalSysDBCacheEntry *GetSharedGlobalDBEntry() + { + return m_shared_global_db; + } + + void InitRelMapPhase2(); + void InitRelMapPhase3(); + void LoadRelMapFromGlobal(bool shared); + void InvalidateGlobalRelMap(bool shared, Oid db_id, RelMapFile* real_map); + + void LocalSysDBCacheReSet(); + void ClearSysCacheIfNecessary(Oid db_id, const char *db_name); + void CloseLocalSysDBCache(); + void CreateDBObject(); + void InitThreadDatabase(Oid db_id, const char *db_name, Oid db_tabspc); + void InitSessionDatabase(Oid db_id, const char *db_name, Oid db_tabspc); + void InitDatabasePath(const char *db_path); + + bool LocalSysDBCacheNeedSwapOut(); + void SetThreadDefExclusive(bool is_exclusive); + bool GetThreadDefExclusive() + { + return m_is_def_exclusive; + } + + void LocalSysDBCacheReBuild(); + bool LocalSysDBCacheNeedReBuild(); + void LocalSysDBCacheReleaseGlobalReSource(bool is_commit); + + LocalSysTabCache systabcache; + LocalTabDefCache tabdefcache; + LocalPartDefCache partdefcache; + + Oid my_database_id; + char my_database_name[NAMEDATALEN]; + char *my_database_path; + Oid my_database_tablespace; + + knl_u_inval_context inval_cxt; + knl_u_relmap_context relmap_cxt; + /* Hash table for information about each tablespace */ + struct HTAB *TableSpaceCacheHash; + struct HTAB *TypeCacheHash; + struct HTAB *SMgrRelationHash; + + struct vfd *VfdCache; + Size SizeVfdCache; + /* Number of file descriptors known to be in use by VFD entries. */ + int nfile; + dlist_head unowned_reln; + + MemoryContext lsc_top_memcxt; + MemoryContext lsc_share_memcxt; + MemoryContext lsc_mydb_memcxt; + /* mark whether we have loaded syscache */ + bool is_inited; + + bool recovery_finished; + + /* used for query lsc/gsc out of transaction */ + struct ResourceOwnerData *local_sysdb_resowner; + /* used to record multi palloc, unused for now */ + BadPtrObj bad_ptr_obj; + /* mark lsc close flag, never query lsc if is_closed == true */ + bool is_closed; + /* mark pgxcpoolreload flag, flush relcache's locatorinfo->nodelist */ + bool got_pool_reload; + /* record other palloc on lsc */ + int64 other_space; + /* record rel's index and rule cxt */ + int64 rel_index_rule_space; + /* record abort count, which may cause memory leak + * it seems fmgr_info_cxt rule_cxt rls_cxt index_cxt has no resowner to avoid mem leak */ + uint64 abort_count; + /* record concurrent rd locks on syscache */ + GSCRdLockInfo rdlock_info; +private: + void Init(); + void CreateCatBucket(); + void LocalSysDBCacheClearMyDB(Oid db_id, const char *db_name); + bool LocalSysDBCacheNeedClearMyDB(Oid db_id, const char *db_name); + + void LocalSysDBCacheReleaseCritialReSource(bool include_shared); + void SetDatabaseName(const char *db_name); + + struct GlobalSysDBCacheEntry *m_global_db; + struct GlobalSysDBCacheEntry *m_shared_global_db; + bool is_lsc_catbucket_created; + /* mark me a special thread like stream_worker and bgworker + * or mark I started a spacial thread */ + bool m_is_def_exclusive; +}; +extern void AppendBadPtr(void *elem); +extern void RemoveBadPtr(void *elem); + +#define LOCAL_SYSDB_RESOWNER \ + (unlikely(t_thrd.utils_cxt.CurrentResourceOwner == NULL) ? \ + (AssertMacro(!IsTransactionOrTransactionBlock()), t_thrd.lsc_cxt.lsc->local_sysdb_resowner) \ + : t_thrd.utils_cxt.CurrentResourceOwner) + +#endif \ No newline at end of file diff --git a/src/include/utils/knl_localsystabcache.h b/src/include/utils/knl_localsystabcache.h new file mode 100644 index 000000000..5acca8a3a --- /dev/null +++ b/src/include/utils/knl_localsystabcache.h @@ -0,0 +1,275 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_localsystabcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_localsystabcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_LOCALSYSTABCACHE_H +#define KNL_LOCALSYSTABCACHE_H +#include "utils/catcache.h" +#include "utils/inval.h" +#include "utils/knl_localsystupcache.h" + +using FetTupleFrom = HeapTuple (*)(CatCList *, int); + +class LocalSysTabCache : public BaseObject { +public: + /* common interface */ + LocalSysTabCache() + { + m_is_inited = false; + local_systupcaches = NULL; + need_eoxact_work = false; + } + + void ResetInitFlag(bool include_shared) + { + for (int cache_id = 0; cache_id < SysCacheSize; cache_id++) { + if (!include_shared && local_systupcaches[cache_id]->GetCCRelIsShared()) { + continue; + } + local_systupcaches[cache_id]->ResetInitFlag(); + } + m_is_inited = false; + } + + void AtEOXact_CatCache(bool isCommit) + { + if (!need_eoxact_work) { + return; + } + for (int cache_id = 0; cache_id < SysCacheSize; cache_id++) { + local_systupcaches[cache_id]->AtEOXact_CatCache(isCommit); + } + need_eoxact_work = false; + } + + /* systup may be shared table cache. not cleaned when cleardb */ + MemoryContext GetLocalTupCacheMemoryCxt(Oid cache_id) + { + return local_systupcaches[cache_id]->GetMemoryCxt(); + } + + // call when rebuild db + void ReleaseGlobalRefcount(bool include_shared) + { + for (int cache_id = 0; cache_id < SysCacheSize; cache_id++) { + if (!include_shared && local_systupcaches[cache_id]->GetCCRelIsShared()) { + continue; + } + local_systupcaches[cache_id]->ReleaseGlobalRefcount(); + } + } + + void ResetCatalogCaches() + { + for (int cache_id = 0; cache_id < SysCacheSize; cache_id++) { + local_systupcaches[cache_id]->ResetCatalogCache(); + } + } + + void CatalogCacheFlushCatalogLocal(Oid rel_oid) + { + for (int cache_id = 0; cache_id < SysCacheSize; cache_id++) { + Assert(cacheinfo[cache_id].reloid == local_systupcaches[cache_id]->GetCCRelOid() && m_is_inited); + if (cacheinfo[cache_id].reloid == rel_oid) { + /* Yes, so flush all its contents */ + local_systupcaches[cache_id]->ResetCatalogCache(); + /* Tell inval.c to call syscache callbacks for this cache */ + /* sessionsyscachecallback called by SessionCatCacheCallBack */ + CallThreadSyscacheCallbacks(cache_id, 0); + } + } + } + + void CatalogCacheFlushCatalogGlobal(Oid db_id, Oid rel_oid, bool is_commit) + { + for (int cache_id = 0; cache_id < SysCacheSize; cache_id++) { + Assert(cacheinfo[cache_id].reloid == local_systupcaches[cache_id]->GetCCRelOid()); + if (cacheinfo[cache_id].reloid == rel_oid) { + local_systupcaches[cache_id]->ResetGlobal(db_id, is_commit); + } + } + } + + void CatCacheCallBack(Oid rel_oid) + { + for (int cache_id = 0; cache_id < SysCacheSize; cache_id++) { + Assert(cacheinfo[cache_id].reloid == local_systupcaches[cache_id]->GetCCRelOid() && m_is_inited); + if (cacheinfo[cache_id].reloid == rel_oid) { + CallThreadSyscacheCallbacks(cache_id, 0); + } + } + } + + void SessionCatCacheCallBack(Oid rel_oid) + { + for (int cache_id = 0; cache_id < SysCacheSize; cache_id++) { + Assert(cacheinfo[cache_id].reloid == local_systupcaches[cache_id]->GetCCRelOid() && m_is_inited); + if (cacheinfo[cache_id].reloid == rel_oid) { + CallSessionSyscacheCallbacks(cache_id, 0); + } + } + } + + void CacheIdHashValueInvalidateLocal(int cache_id, uint32 hash_value) + { + local_systupcaches[cache_id]->HashValueInvalidateLocal(hash_value); + } + + void CacheIdHashValueInvalidateGlobal(Oid db_id, int cache_id, uint32 hash_value, bool is_commit) + { + local_systupcaches[cache_id]->HashValueInvalidateGlobal(db_id, hash_value, is_commit); + } + + void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, + void (*function)(int, uint32, Oid)) + { + Oid reloid; + CACHE1_elog(DEBUG2, "RelationHeapTupleInvalidate: called"); + /* + * sanity checks + */ + Assert(RelationIsValid(relation)); + Assert(HeapTupleIsValid(tuple)); + reloid = RelationGetRelid(relation); + /* ---------------- + * for each cache + * if the cache contains tuples from the specified relation + * compute the tuple's hash value(s) in this cache, + * and call the GlobalCatalogCacheIdInvalidate. + * ---------------- + */ + for (int cache_id = 0; cache_id < SysCacheSize; cache_id++) { + Assert(cacheinfo[cache_id].reloid == local_systupcaches[cache_id]->GetCCRelOid() && m_is_inited); + if (cacheinfo[cache_id].reloid != reloid) { + continue; + } + local_systupcaches[cache_id]->PrepareToInvalidateCacheTuple(tuple, newtuple, function); + } + need_eoxact_work = true; + } + + void CreateObject(); + void CreateCatBuckets(); + void Init() + { + if (m_is_inited) { + return; + } + for (int cache_id = 0; cache_id < SysCacheSize; cache_id++) { + local_systupcaches[cache_id]->Init(); + } + m_is_inited = true; + } + + /* + * GetCatCacheHashValue + * + * Compute the hash value for a given set of search keys. + * + * The reason for exposing this as part of the API is that the hash value is + * exposed in cache invalidation operations, so there are places outside the + * LocalCatCache code that need to be able to compute the hash values. + */ + uint32 GetCatCacheHashValue(int cache_id, Datum v1, Datum v2, Datum v3, Datum v4) + { + return local_systupcaches[cache_id]->GetCatCacheHashValue(v1, v2, v3, v4); + } + const TupleDesc GetCCTupleDesc(int cache_id) + { + return local_systupcaches[cache_id]->GetCCTupleDesc(); + } + + const LocalSysTupCache *GetLocalSysTupCache(int cache_id) + { + return local_systupcaches[cache_id]; + } + /* search interface */ + HeapTuple SearchTuple(int cache_id, Datum v1, Datum v2, Datum v3, Datum v4, int level = DEBUG2) + { + return SearchTupleN(cache_id, local_systupcaches[cache_id]->GetCCNKeys(), v1, v2, v3, v4, level); + } + + /* + * SearchTupleN() are SearchTuple() versions for a specific number of + * arguments. The compiler can inline the body and unroll loops, making them a + * bit faster than SearchTuple(). + */ + HeapTuple SearchTuple1(int cache_id, Datum v1) + { + return SearchTupleN(cache_id, 1, v1, 0, 0, 0); + } + + HeapTuple SearchTuple2(int cache_id, Datum v1, Datum v2) + { + return SearchTupleN(cache_id, 2, v1, v2, 0, 0); + } + HeapTuple SearchTuple3(int cache_id, Datum v1, Datum v2, Datum v3) + { + return SearchTupleN(cache_id, 3, v1, v2, v3, 0); + } + HeapTuple SearchTuple4(int cache_id, Datum v1, Datum v2, Datum v3, Datum v4) + { + return SearchTupleN(cache_id, 4, v1, v2, v3, v4); + } + +#ifndef ENABLE_MULTIPLE_NODES + /* + * Specific SearchTuple Function to support ProcedureCreate! + */ + HeapTuple SearchTupleForProcAllArgs(Datum v1, Datum v2, Datum v3, Datum v4, Datum proArgModes) + { + LocalCatCTup *ct = local_systupcaches[PROCALLARGS]->SearchLocalCatCTupleForProcAllArgs( + v1, v2, v3, v4, proArgModes); + if (ct == NULL) { + return NULL; + } + return &ct->global_ct->tuple; + } +#endif + + CatCList *SearchCatCList(int cache_id, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4, int level = DEBUG2) + { + Assert(m_is_inited); + LocalCatCList *tuples = local_systupcaches[cache_id]->SearchLocalCatCList(nkeys, v1, v2, v3, v4, level); + CatCList *cl = (CatCList *)tuples; + return cl; + } + + /* catcahe manage struct */ + LocalSysTupCache **local_systupcaches; +private: + HeapTuple SearchTupleN(int cache_id, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4, int level = DEBUG2) + { + Assert(m_is_inited); + LocalCatCTup *ct = local_systupcaches[cache_id]->SearchLocalCatCTuple(nkeys, v1, v2, v3, v4, level); + if (ct == NULL) { + return NULL; + } + return &ct->global_ct->tuple; + } + + bool m_is_inited; + bool need_eoxact_work; +}; +#endif \ No newline at end of file diff --git a/src/include/utils/knl_localsystupcache.h b/src/include/utils/knl_localsystupcache.h new file mode 100644 index 000000000..f176a2cf9 --- /dev/null +++ b/src/include/utils/knl_localsystupcache.h @@ -0,0 +1,260 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_localsystupcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_localsystupcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_LOCALSYSTUPCACHE_H +#define KNL_LOCALSYSTUPCACHE_H + +#include "utils/knl_globalsysdbcache.h" +#include "utils/knl_globalsystabcache.h" +#include "utils/knl_globalsystupcache.h" +#include "utils/knl_localsyscache_common.h" +#include "utils/syscache.h" + +class LocalSysTupCache; +struct LocalCatCTup { + int ct_magic; /* for identifying CatCTup entries */ +#define CT_MAGIC 0x57261502 + uint32 hash_value; /* hash value for this tuple's keys */ + /* + * Each tuple in a cache is a member of a Dllist that stores the elements + * of its hash bucket. We keep each Dllist in LRU order to speed repeated + * lookups. + */ + Dlelem cache_elem; /* list member of per-bucket list */ + Datum keys[CATCACHE_MAXKEYS]; + /* + * A tuple marked "dead" must not be returned by subsequent searches. + * However, it won't be physically deleted from the cache until its + * refcount goes to zero. + * + * A negative cache entry is an assertion that there is no tuple matching + * a particular key. This is just as useful as a normal entry so far as + * avoiding catalog searches is concerned. Management of positive and + * negative entries is identical. + */ + int refcount; /* number of active references */ + GlobalCatCTup *global_ct; /* NULL means negative tuple */ + + void Release() + { + Assert(ct_magic == CT_MAGIC); + Assert(refcount > 0); + Assert(global_ct != NULL); + refcount--; + } +}; + +struct LocalCatCList : CatCList { + GlobalCatCList *global_cl; + void Release() + { + /* Safety checks to ensure we were handed a cache entry */ + Assert(cl_magic == CL_MAGIC); + Assert(refcount > 0); + refcount--; + } +}; + +class LocalSysTupCache : public BaseObject { +public: + /* common interface */ + LocalSysTupCache(int cache_id); + void ResetInitFlag(); + void AtEOXact_CatCache(bool isCommit) + { + invalid_entries.ResetInitFlag(); + } + int GetCCNKeys() const + { + return m_relinfo.cc_nkeys; + } + int GetCCKeyNo(int index) const + { + return m_relinfo.cc_keyno[index]; + } + Oid GetCCRelOid() const + { + return m_relinfo.cc_reloid; + } + Oid GetCCIndexOid() const + { + return m_relinfo.cc_indexoid; + } + bool GetCCRelIsShared() + { + return m_relinfo.cc_relisshared; + } + const MemoryContext GetMemoryCxt() + { + InitPhase2(); + return m_local_mem_cxt; + } + + const TupleDesc GetCCTupleDesc() + { + InitPhase2(); + return m_relinfo.cc_tupdesc; + } + /* init funcs */ + void CreateCatBucket(); + void Init() + { + Assert(!m_is_inited || m_relinfo.cc_relisshared); + m_is_inited = true; + } + + uint32 GetCatCacheHashValue(Datum v1, Datum v2, Datum v3, Datum v4); + /* search interface */ + LocalCatCTup *SearchLocalCatCTuple(int nkeys, Datum v1, Datum v2, Datum v3, Datum v4, int level = DEBUG2) + { + InitPhase2(); + return SearchTupleInternal(nkeys, v1, v2, v3, v4, level); + } + +#ifndef ENABLE_MULTIPLE_NODES + /* + * Specific SearchLocalCatCTuple Function to support ProcedureCreate! + */ + LocalCatCTup *SearchTupleFromGlobalForProcAllArgs( + Datum *arguments, uint32 hash_value, Index hash_index, oidvector* argModes); + LocalCatCTup *SearchLocalCatCTupleForProcAllArgs(Datum v1, Datum v2, Datum v3, Datum v4, Datum proArgModes); +#endif + + LocalCatCList *SearchLocalCatCList(int nkeys, Datum v1, Datum v2, Datum v3, Datum v4, int level = DEBUG2) + { + InitPhase2(); + return SearchListInternal(nkeys, v1, v2, v3, v4, level); + } + + void ReleaseGlobalRefcount(); + template + void FlushGlobalByInvalidMsg(Oid db_id, uint32 hash_value); + void ResetCatalogCache(); + void ResetGlobal(Oid db_id, bool is_commit) + { + if (!is_commit) { + invalid_entries.ResetCatalog(); + return; + } + FlushGlobalByInvalidMsg(db_id, 0); + } + void HashValueInvalidateLocal(uint32 hash_value); + void HashValueInvalidateGlobal(Oid db_id, uint32 hash_value, bool is_commit) + { + if (!is_commit) { + invalid_entries.InsertInvalidDefValue(hash_value); + return; + } + FlushGlobalByInvalidMsg(db_id, hash_value); + } + void PrepareToInvalidateCacheTuple(HeapTuple tuple, HeapTuple newtuple, void (*function)(int, uint32, Oid)); + InvalidBaseEntry invalid_entries; +private: + /* + * SearchTupleInternal + * + * This call searches a system cache for a tuple, opening the relation + * if necessary (on the first access to a particular cache). + * + * The result is NULL if not found, or a pointer to a HeapTuple in + * the cache. The caller must not modify the tuple, and must call + * ReleaseTuple() when done with it. + * + * The search key values should be expressed as Datums of the key columns' + * datatype(s). (Pass zeroes for any unused parameters.) As a special + * exception, the passed-in key for a NAME column can be just a C string; + * the caller need not go to the trouble of converting it to a fully + * null-padded NAME. + */ + LocalCatCTup *SearchTupleInternal(int nkeys, Datum v1, Datum v2, Datum v3, Datum v4, int level); + LocalCatCTup *SearchTupleFromGlobal(Datum *arguments, uint32 hash_value, Index hash_index, int level); + LocalCatCList *SearchListInternal(int nkeys, Datum v1, Datum v2, Datum v3, Datum v4, int level); + LocalCatCList *SearchListFromGlobal(int nkeys, Datum *arguments, uint32 hash_value, int level); + + void FreeLocalCatCList(LocalCatCList *cl); + void HandleDeadLocalCatCList(LocalCatCList *cl); + void FreeDeadCls(); + + void FreeLocalCatCTup(LocalCatCTup *ct); + void HandleDeadLocalCatCTup(LocalCatCTup *ct); + void FreeDeadCts(); + + LocalCatCTup *CreateLocalCatCTup(GlobalCatCTup *global_ct, Datum *arguments, uint32 hash_value, Index hash_index); + + void InitPhase2() + { + if (unlikely(!m_is_inited_phase2)) { + InitPhase2Impl(); + } + if (unlikely(m_global_systupcache->enable_rls)) { + FlushRlsUserImpl(); + } + } + + void FlushRlsUserImpl(); + void InitPhase2Impl(); + + void RemoveTailTupleElements(Index hash_index); + void RemoveTailListElements(); + + Dllist *GetBucket(Index hash_index) + { + return &(cc_buckets[hash_index]); + } + + bool m_is_inited; + bool m_is_inited_phase2; + Oid m_rls_user; + Dllist m_dead_cts; + Dllist m_dead_cls; + + /* standard memory context, manage memory only used by current thread */ + MemoryContext m_local_mem_cxt; + + /* catcache manage struct */ + GlobalSysTupCache *m_global_systupcache; + + Oid m_db_id; + Oid m_cache_id; /* equal cc_id */ + int cc_id; /* cache identifier --- see syscache.h */ + CatTupRelInfoMsg m_relinfo; + int cc_nbuckets; /* # of hash buckets in this cache */ + Dllist cc_lists; /* list of CatCList structs */ + Dllist *cc_buckets; + long cc_searches; /* total # searches against this cache */ + long cc_hits; /* # of matches against existing entry */ + long cc_neg_hits; /* # of matches against negative entry */ + long cc_newloads; /* # of successful loads of new entry */ + /* + * cc_searches - (cc_hits + cc_neg_hits + cc_newloads) is number of failed + * searches, each of which will result in loading a negative entry + */ + long cc_invals; /* # of entries invalidated from cache */ + long cc_lsearches; /* total # list-searches */ + long cc_lhits; /* # of matches against existing lists */ +}; + +extern bool CheckPrivilegeOfTuple(HeapTuple ct); +#endif diff --git a/src/include/utils/knl_localtabdefcache.h b/src/include/utils/knl_localtabdefcache.h new file mode 100644 index 000000000..ca86d33e1 --- /dev/null +++ b/src/include/utils/knl_localtabdefcache.h @@ -0,0 +1,154 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_localtabdefcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_localtabdefcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_LOCALTABDEFCACHE_H +#define KNL_LOCALTABDEFCACHE_H + +#include "utils/knl_globaltabdefcache.h" +#include "utils/knl_localbasedefcache.h" +#include "utils/rel.h" + +class LocalTabDefCache : public LocalBaseDefCache { +public: + LocalTabDefCache(); + void ResetInitFlag(); + Relation SearchRelation(Oid rel_id); + Relation SearchRelationFromLocal(Oid rel_id); + template + Relation SearchRelationFromGlobalCopy(Oid rel_id); + void InsertRelationIntoLocal(Relation rel); + void RemoveRelation(Relation rel); + void CreateDefBucket() + { + LocalBaseDefCache::CreateDefBucket(LOCAL_INIT_RELCACHE_SIZE); + } + void Init(); + void InitPhase2(); + void InitPhase3(); + + void InvalidateRelationNodeList(); + void InvalidateGlobalRelationNodeList() + { + m_global_tabdefcache->InvalidateRelationNodeList(); + } + void InvalidateRelationAll(); + void InvalidateRelationBucketsAll(); + void InvalidateGlobalRelation(Oid db_id, Oid rel_oid, bool is_commit); + /* Free all tupleDescs remembered in RememberToFreeTupleDescAtEOX in a batch when a transaction ends */ + void AtEOXact_FreeTupleDesc(); + void AtEOXact_RelationCache(bool isCommit); + void AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid, SubTransactionId parentSubid); + /* Remember old tupleDescs when processing invalid messages */ + void RememberToFreeTupleDescAtEOX(TupleDesc td); + TupleDesc GetPgClassDescriptor() + { + if (m_pgclassdesc == NULL) { + m_pgclassdesc = m_global_tabdefcache->GetPgClassDescriptor(); + } + return m_pgclassdesc; + } + TupleDesc GetPgIndexDescriptor() + { + if (m_pgindexdesc == NULL) { + m_pgindexdesc = m_global_tabdefcache->GetPgIndexDescriptor(); + } + return m_pgindexdesc; + } + Relation RelationIdGetRelation(Oid rel_oid); + +private: + + void InsertRelationIntoGlobal(Relation rel, uint32 hash_value); + void CreateLocalRelEntry(Relation rel, Index hash_index); + Relation RemoveRelationByOid(Oid rel_id, Index hash_index); + void CopyLocalRelation(Relation dest, Relation src); + void LoadCriticalIndex(Oid indexoid, Oid heapoid); + void FormrDesc(const char *relationName, Oid relationReltype, bool is_shared, bool hasoids, int natts, + const struct FormData_pg_attribute *attrs); + +public: + /* + * This flag is false until we have hold the CriticalCacheBuildLock + */ + bool needNewLocalCacheFile; + + /* + * This flag is false until we have prepared the critical relcache entries + * that are needed to do indexscans on the tables read by relcache building. + * Should be used only by relcache.c and catcache.c + */ + bool criticalRelcachesBuilt; + + /* + * This flag is false until we have prepared the critical relcache entries + * for shared catalogs (which are the tables needed for login). + * Should be used only by relcache.c and postinit.c + */ + bool criticalSharedRelcachesBuilt; + + /* + * This counter counts relcache inval events received since backend startup + * (but only for rels that are actually in cache). Presently, we use it only + * to detect whether data about to be written by write_relcache_init_file() + * might already be obsolete. + */ + long relcacheInvalsReceived; + + /* + * This list remembers the OIDs of the non-shared relations cached in the + * database's local relcache init file. Note that there is no corresponding + * list for the shared relcache init file, for reasons explained in the + * comments for RelationCacheInitFileRemove. + */ + List *initFileRelationIds; + + bool need_eoxact_work; + + struct tupleDesc *m_pgclassdesc; + struct tupleDesc *m_pgindexdesc; + + /* + * BucketMap Cache, consists of a list of BucketMapCache element. + * Location information of every rel cache is actually pointed to these list + * members. + * Attention: we need to invalidate bucket map caches when accepting + * SI messages of tuples in PGXC_GROUP or SI reset messages! + */ + List *g_bucketmap_cache; + uint32 max_bucket_map_size; + + struct tupleDesc **EOXactTupleDescArray; + int NextEOXactTupleDescNum; + int EOXactTupleDescArrayLen; +private: + bool m_is_inited; + bool m_is_inited_phase2; + bool m_is_inited_phase3; + + GlobalTabDefCache *m_global_tabdefcache; + GlobalTabDefCache *m_global_shared_tabdefcache; +}; + +#endif \ No newline at end of file diff --git a/src/include/utils/knl_partcache.h b/src/include/utils/knl_partcache.h new file mode 100644 index 000000000..a8324ca77 --- /dev/null +++ b/src/include/utils/knl_partcache.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_partcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_partcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_PARTCACHE_H +#define KNL_PARTCACHE_H +#include "catalog/pg_partition.h" +#include "utils/knl_localpartdefcache.h" +#include "utils/knl_localsysdbcache.h" +#include "knl/knl_thread.h" +#include "utils/relcache.h" +#include "utils/snapshot.h" + +/* part 1:macro definitions, global virables, and typedefs */ +typedef struct partidcacheent { + Oid partoid; + Partition partdesc; +} PartIdCacheEnt; + +/* part 2: functions used only in this gsc source file */ +extern void PartitionDestroyPartition(Partition partition); +extern void PartitionClearPartition(Partition partition, bool rebuild); +extern void PartitionInitPhysicalAddr(Partition partition); +extern void PartitionReloadIndexInfo(Partition part); +extern Partition PartitionBuildDesc(Oid targetPartId, StorageType storage_type, bool insertIt); + + +#define PartitionIdCacheInsertIntoLocal(PARTITION) \ + do { \ + if (EnableLocalSysCache()) { \ + t_thrd.lsc_cxt.lsc->partdefcache.InsertPartitionIntoLocal((PARTITION)); \ + } else { \ + PartIdCacheEnt *idhentry; \ + bool found = true; \ + idhentry = (PartIdCacheEnt *)hash_search( \ + u_sess->cache_cxt.PartitionIdCache, (void *)&((PARTITION)->pd_id), HASH_ENTER, &found); \ + /* used to give notice if found -- now just keep quiet */ \ + idhentry->partdesc = PARTITION; \ + } \ + } while (0) + +#define PartitionIdCacheLookup(ID, PARTITION) \ + do { \ + if (EnableLocalSysCache()) { \ + (PARTITION) = t_thrd.lsc_cxt.lsc->partdefcache.SearchPartition((ID)); \ + } else { \ + PartIdCacheEnt *hentry; \ + hentry = \ + (PartIdCacheEnt *)hash_search(u_sess->cache_cxt.PartitionIdCache, (void *)&(ID), HASH_FIND, NULL); \ + if (hentry != NULL) \ + (PARTITION) = hentry->partdesc; \ + else \ + (PARTITION) = NULL; \ + } \ + } while (0) + +#define PartitionIdCacheDeleteLocal(PARTITION) \ + do { \ + if (EnableLocalSysCache()) { \ + t_thrd.lsc_cxt.lsc->partdefcache.RemovePartition((PARTITION)); \ + } else { \ + PartIdCacheEnt *idhentry; \ + idhentry = (PartIdCacheEnt *)hash_search( \ + u_sess->cache_cxt.PartitionIdCache, (void *)&((PARTITION)->pd_id), HASH_REMOVE, NULL); \ + if (idhentry == NULL) \ + ereport(WARNING, \ + (errcode(ERRCODE_UNDEFINED_TABLE), \ + errmsg("trying to delete a rd_id partdesc that does not exist"))); \ + } \ + } while (0) + +#define PartitionIdCacheLookupOnlyLocal(ID, PARTITION) \ + do { \ + if (EnableLocalSysCache()) { \ + (PARTITION) = t_thrd.lsc_cxt.lsc->partdefcache.SearchPartitionFromLocal((ID)); \ + } else { \ + PartIdCacheEnt *hentry; \ + hentry = \ + (PartIdCacheEnt *)hash_search(u_sess->cache_cxt.PartitionIdCache, (void *)&(ID), HASH_FIND, NULL); \ + if (hentry != NULL) \ + (PARTITION) = hentry->partdesc; \ + else \ + (PARTITION) = NULL; \ + } \ + } while (0) + +inline bool PartCacheNeedEoxactWork() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->partdefcache.part_cache_need_eoxact_work; + } else { + return u_sess->cache_cxt.part_cache_need_eoxact_work; + } +} + +inline void SetPartCacheNeedEoxactWork(bool value) +{ + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->partdefcache.part_cache_need_eoxact_work = value; + } else { + u_sess->cache_cxt.part_cache_need_eoxact_work = value; + } +} +#endif \ No newline at end of file diff --git a/src/include/utils/knl_relcache.h b/src/include/utils/knl_relcache.h new file mode 100644 index 000000000..cf269e9ed --- /dev/null +++ b/src/include/utils/knl_relcache.h @@ -0,0 +1,292 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * knl_relcache.h + * + * + * + * IDENTIFICATION + * src/include/utils/knl_relcache.h + * + * --------------------------------------------------------------------------------------- + */ + +#ifndef KNL_RELCACHE_H +#define KNL_RELCACHE_H +#include "postgres.h" +#include "catalog/pg_attribute.h" +#include "catalog/pg_class.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_type.h" +#include "catalog/pg_database.h" +#include "catalog/pg_authid.h" +#include "catalog/pg_auth_members.h" +#include "catalog/pg_user_status.h" +/* + * hardcoded tuple descriptors, contents generated by genbki.pl + */ +extern const FormData_pg_attribute Desc_pg_class[Natts_pg_class]; +extern const FormData_pg_attribute Desc_pg_attribute[Natts_pg_attribute]; +extern const FormData_pg_attribute Desc_pg_proc[Natts_pg_proc]; +extern const FormData_pg_attribute Desc_pg_type[Natts_pg_type]; +extern const FormData_pg_attribute Desc_pg_database[Natts_pg_database]; +extern const FormData_pg_attribute Desc_pg_authid[Natts_pg_authid]; +extern const FormData_pg_attribute Desc_pg_auth_members[Natts_pg_auth_members]; +extern const FormData_pg_attribute Desc_pg_index[Natts_pg_index]; +extern const FormData_pg_attribute Desc_pg_user_status[Natts_pg_user_status]; + +/* non-export function prototypes */ +extern void RelationDestroyRelation(Relation relation, bool remember_tupdesc); +extern void RelationClearRelation(Relation relation, bool rebuild); +extern void RelationReloadIndexInfo(Relation relation); +extern Relation RelationBuildDesc(Oid targetRelId, bool insertIt, bool buildkey = true); + +extern void formrdesc(const char *relationName, Oid relationReltype, bool isshared, bool hasoids, int natts, + const FormData_pg_attribute *attrs); + +extern void RelationInitPhysicalAddr(Relation relation); +extern Relation load_critical_index(Oid indexoid, Oid heapoid); + +extern TupleDesc GetPgClassDescriptor(void); +extern TupleDesc GetPgIndexDescriptor(void); + +extern void SetBackendId(Relation relation); +extern void RelationBuildRuleLock(Relation relation); +extern void RelationCacheInvalidOid(Relation relation); + +#define RelationIdCacheInsertIntoLocal(RELATION) \ + do { \ + if (EnableLocalSysCache()) { \ + t_thrd.lsc_cxt.lsc->tabdefcache.InsertRelationIntoLocal((RELATION)); \ + } else { \ + RelIdCacheEnt *idhentry = NULL; \ + bool found = false; \ + idhentry = (RelIdCacheEnt *)hash_search(u_sess->relcache_cxt.RelationIdCache, \ + (void *)&((RELATION)->rd_id), HASH_ENTER, &found); \ + /* used to give notice if found -- now just keep quiet */ \ + idhentry->reldesc = RELATION; \ + } \ + } while (0) + +#define RelationIdCacheLookup(ID, RELATION) \ + do { \ + if (EnableLocalSysCache()) { \ + (RELATION) = t_thrd.lsc_cxt.lsc->tabdefcache.SearchRelation((ID)); \ + } else { \ + RelIdCacheEnt *hentry = NULL; \ + hentry = \ + (RelIdCacheEnt *)hash_search(u_sess->relcache_cxt.RelationIdCache, (void *)&(ID), HASH_FIND, NULL); \ + if (hentry != NULL) \ + (RELATION) = hentry->reldesc; \ + else \ + (RELATION) = NULL; \ + } \ + } while (0) + +#define RelationIdCacheLookupOnlyLocal(ID, RELATION) \ + do { \ + if (EnableLocalSysCache()) { \ + (RELATION) = t_thrd.lsc_cxt.lsc->tabdefcache.SearchRelationFromLocal((ID)); \ + } else { \ + RelIdCacheEnt *hentry = NULL; \ + hentry = \ + (RelIdCacheEnt *)hash_search(u_sess->relcache_cxt.RelationIdCache, (void *)&(ID), HASH_FIND, NULL); \ + if (hentry != NULL) \ + (RELATION) = hentry->reldesc; \ + else \ + (RELATION) = NULL; \ + } \ + } while (0) + +#define RelationCacheDeleteLocal(RELATION) \ + do { \ + if (EnableLocalSysCache()) { \ + (void)t_thrd.lsc_cxt.lsc->tabdefcache.RemoveRelation(RELATION); \ + } else { \ + RelIdCacheEnt *idhentry; \ + idhentry = (RelIdCacheEnt *)hash_search(u_sess->relcache_cxt.RelationIdCache, \ + (void *)&((RELATION)->rd_id), HASH_REMOVE, NULL); \ + if (idhentry == NULL) \ + ereport(WARNING, (errmsg("trying to delete a rd_id reldesc that does not exist"))); \ + } \ + } while (0) + + +inline TupleDesc GetLSCPgClassDescriptor() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->tabdefcache.GetPgClassDescriptor(); + } else { + return GetPgClassDescriptor(); + } +} + +inline TupleDesc GetLSCPgIndexDescriptor() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->tabdefcache.GetPgIndexDescriptor(); + } else { + return GetPgIndexDescriptor(); + } +} + +inline bool LocalRelCacheCriticalRelcachesBuilt() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->tabdefcache.criticalRelcachesBuilt; + } else { + return u_sess->relcache_cxt.criticalRelcachesBuilt; + } +} +inline void SetLocalRelCacheCriticalRelcachesBuilt(bool value) +{ + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.criticalRelcachesBuilt = value; + } else { + u_sess->relcache_cxt.criticalRelcachesBuilt = value; + } +} + +inline bool LocalRelCacheCriticalSharedRelcachesBuilt() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->tabdefcache.criticalSharedRelcachesBuilt; + } else { + return u_sess->relcache_cxt.criticalSharedRelcachesBuilt; + } +} +inline void SetLocalRelCacheCriticalSharedRelcachesBuilt(bool value) +{ + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.criticalSharedRelcachesBuilt = value; + } else { + u_sess->relcache_cxt.criticalSharedRelcachesBuilt = value; + } +} + +inline long LocalRelCacheInvalsReceived() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->tabdefcache.relcacheInvalsReceived; + } else { + return u_sess->relcache_cxt.relcacheInvalsReceived; + } +} + +inline void AddLocalRelCacheInvalsReceived(int value) +{ + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.relcacheInvalsReceived += value; + } else { + u_sess->relcache_cxt.relcacheInvalsReceived += value; + } +} + +inline bool LocalRelCacheNeedEOXactWork() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->tabdefcache.need_eoxact_work; + } else { + return u_sess->relcache_cxt.need_eoxact_work; + } +} + +inline void SetLocalRelCacheNeedEOXactWork(bool value) +{ + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.need_eoxact_work = value; + } else { + u_sess->relcache_cxt.need_eoxact_work = value; + } +} + +inline List *LocalRelCacheInitFileRelationIds() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->tabdefcache.initFileRelationIds; + } else { + return u_sess->relcache_cxt.initFileRelationIds; + } +} + +inline void LconsLocalRelCacheInitFileRelationIds(Relation rel) +{ + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.initFileRelationIds = + lcons_oid(RelationGetRelid(rel), t_thrd.lsc_cxt.lsc->tabdefcache.initFileRelationIds); + } else { + u_sess->relcache_cxt.initFileRelationIds = + lcons_oid(RelationGetRelid(rel), u_sess->relcache_cxt.initFileRelationIds); + } +} + +inline void ClearLocalRelCacheInitFileRelationIds() +{ + if (EnableLocalSysCache()) { + Assert(t_thrd.lsc_cxt.lsc->tabdefcache.initFileRelationIds == NIL); + t_thrd.lsc_cxt.lsc->tabdefcache.initFileRelationIds = NIL; + } else { + u_sess->relcache_cxt.initFileRelationIds = NIL; + } +} + +inline List *LocalRelCacheGBucketMapCache() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->tabdefcache.g_bucketmap_cache; + } else { + return u_sess->relcache_cxt.g_bucketmap_cache; + } +} + +inline void AppendLocalRelCacheGBucketMapCache(ListCell *cell) +{ + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.g_bucketmap_cache = + lappend(t_thrd.lsc_cxt.lsc->tabdefcache.g_bucketmap_cache, cell); + } else { + u_sess->relcache_cxt.g_bucketmap_cache = lappend(u_sess->relcache_cxt.g_bucketmap_cache, cell); + } +} + +inline void DeteleLocalRelCacheGBucketMapCache(ListCell *cell, ListCell *prev) +{ + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.g_bucketmap_cache = + list_delete_cell(t_thrd.lsc_cxt.lsc->tabdefcache.g_bucketmap_cache, cell, prev); + } else { + u_sess->relcache_cxt.g_bucketmap_cache = list_delete_cell(u_sess->relcache_cxt.g_bucketmap_cache, cell, prev); + } +} + +inline uint32 LocalRelCacheMaxBucketMapSize() +{ + if (EnableLocalSysCache()) { + return t_thrd.lsc_cxt.lsc->tabdefcache.max_bucket_map_size; + } else { + return u_sess->relcache_cxt.max_bucket_map_size; + } +} + +inline void EnlargeLocalRelCacheMaxBucketMapSize(double ratio) +{ + if (EnableLocalSysCache()) { + t_thrd.lsc_cxt.lsc->tabdefcache.max_bucket_map_size *= ratio; + } else { + u_sess->relcache_cxt.max_bucket_map_size *= ratio; + } +} + +#endif \ No newline at end of file diff --git a/src/include/utils/memprot.h b/src/include/utils/memprot.h index 499364a36..0e4e09f83 100644 --- a/src/include/utils/memprot.h +++ b/src/include/utils/memprot.h @@ -101,12 +101,6 @@ extern void gs_memprot_reset_beyondchunk(void); #define GS_MEMPROT_SHARED_MALLOC(sz) MemoryProtectFunctions::gs_memprot_malloc(sz) #define GS_MEMPROT_SHARED_FREE(ptr, sz) MemoryProtectFunctions::gs_memprot_free(ptr, sz) -/** - * reserve or release memory for compressed table - */ -extern bool compressed_mem_reserve(Size sz, bool protect); -extern void compressed_mem_release(Size sz); - extern int getSessionMemoryUsageMB(); #endif diff --git a/src/include/utils/memutils.h b/src/include/utils/memutils.h index 2e6e231c8..f8ef78a88 100644 --- a/src/include/utils/memutils.h +++ b/src/include/utils/memutils.h @@ -71,6 +71,8 @@ typedef struct StandardChunkHeader { #ifdef MEMORY_CONTEXT_CHECKING /* when debugging memory usage, also store actual requested size */ Size requested_size; +#endif +#ifdef MEMORY_CONTEXT_TRACK const char* file; int line; #endif diff --git a/src/include/utils/numeric.h b/src/include/utils/numeric.h index 715dc3ca5..c4be2550d 100644 --- a/src/include/utils/numeric.h +++ b/src/include/utils/numeric.h @@ -235,7 +235,7 @@ extern int convert_int128_to_short_numeric_byscale( _out_ char* outBuf, _in_ int128 v, _in_ int32 typmod, _in_ int32 vscale); extern Datum convert_short_numeric_to_int64(_in_ Numeric inNum, _out_ bool* outSuccess); extern Datum convert_short_numeric_to_int128(_in_ Numeric inNum, _out_ bool* outSuccess); -extern Datum try_convert_numeric_normal_to_fast(Datum value); +extern Datum try_convert_numeric_normal_to_fast(Datum value, ScalarVector *arr = NULL); extern int64 convert_short_numeric_to_int64_byscale(_in_ Numeric n, _in_ int scale); extern void convert_short_numeric_to_int128_byscale(_in_ Numeric n, _in_ int scale, _out_ int128& result); extern int32 get_ndigit_from_numeric(_in_ Numeric num); @@ -300,7 +300,9 @@ typedef struct NumericVar { #define init_var(v) MemSetAligned(v, 0, sizeof(NumericVar)) Numeric makeNumeric(NumericVar* var); extern Numeric make_result(NumericVar *var); +extern void init_var_from_num(Numeric num, NumericVar* dest); extern void free_var(NumericVar *var); +extern bool numericvar_to_int64(const NumericVar* var, int64* result); extern void int64_to_numericvar(int64 val, NumericVar *var); extern void add_var(NumericVar *var1, NumericVar *var2, NumericVar *result); extern char *numeric_normalize(Numeric num); diff --git a/src/include/utils/numeric_gs.h b/src/include/utils/numeric_gs.h index 8d9197a9f..0882d7a9b 100644 --- a/src/include/utils/numeric_gs.h +++ b/src/include/utils/numeric_gs.h @@ -16,6 +16,7 @@ #define _PG_NUMERIC_GS_H_ #include "fmgr.h" +#include "vecexecutor/vectorbatch.h" #define NUMERIC_HDRSZ (VARHDRSZ + sizeof(uint16) + sizeof(int16)) #define NUMERIC_HDRSZ_SHORT (VARHDRSZ + sizeof(uint16)) @@ -159,10 +160,14 @@ * @IN value: the value of bi64 * @IN scale: the scale of bi64 */ -inline Datum makeNumeric64(int64 value, uint8 scale) +inline Datum makeNumeric64(int64 value, uint8 scale, ScalarVector *arr = NULL) { - Numeric result; - result = (Numeric)palloc(NUMERIC_64SZ); + Numeric result = NULL; + if (arr == NULL) { + result = (Numeric)palloc(NUMERIC_64SZ); + } else { + result = (Numeric)arr->m_buf->Allocate(NUMERIC_64SZ); + } SET_VARSIZE(result, NUMERIC_64SZ); result->choice.n_header = NUMERIC_64 + scale; *((int64*)(result->choice.n_bi.n_data)) = value; @@ -174,10 +179,14 @@ inline Datum makeNumeric64(int64 value, uint8 scale) * @IN value: the value of bi128 * @IN scale: the scale of bi128 */ -inline Datum makeNumeric128(int128 value, uint8 scale) +inline Datum makeNumeric128(int128 value, uint8 scale, ScalarVector *arr = NULL) { Numeric result; - result = (Numeric)palloc(NUMERIC_128SZ); + if (arr == NULL) { + result = (Numeric)palloc(NUMERIC_128SZ); + } else { + result = (Numeric)arr->m_buf->Allocate(NUMERIC_128SZ); + } SET_VARSIZE(result, NUMERIC_128SZ); result->choice.n_header = NUMERIC_128 + scale; errno_t rc = EOK; diff --git a/src/include/utils/palloc.h b/src/include/utils/palloc.h index 74a5fe480..a09420797 100644 --- a/src/include/utils/palloc.h +++ b/src/include/utils/palloc.h @@ -64,7 +64,11 @@ extern THR_LOCAL PGDLLIMPORT MemoryContext TopMemoryContext; #define DISABLE_MEMORY_PROTECT() (t_thrd.utils_cxt.memNeedProtect = false) /* Definition for the unchanged interfaces */ +#ifndef MEMORY_CONTEXT_CHECKING +#define MemoryContextAlloc(context, size) MemoryAllocFromContext(context, size, __FILE__, __LINE__) +#else #define MemoryContextAlloc(context, size) MemoryContextAllocDebug(context, size, __FILE__, __LINE__) +#endif #define MemoryContextAllocZero(context, size) MemoryContextAllocZeroDebug(context, size, __FILE__, __LINE__) #define MemoryContextAllocZeroAligned(context, size) \ MemoryContextAllocZeroAlignedDebug(context, size, __FILE__, __LINE__) @@ -80,6 +84,7 @@ extern THR_LOCAL PGDLLIMPORT MemoryContext TopMemoryContext; /* * Fundamental memory-allocation operations (more are in utils/memutils.h) */ +extern void* MemoryAllocFromContext(MemoryContext context, Size size, const char* file, int line); extern void* MemoryContextAllocDebug(MemoryContext context, Size size, const char* file, int line); extern void* MemoryContextAllocHugeDebug(MemoryContext context, Size size, const char* file, int line); extern void* repallocHugeDebug(void* pointer, Size size, const char* file, int line); diff --git a/src/include/utils/partcache.h b/src/include/utils/partcache.h index 8010063e6..f62291c98 100644 --- a/src/include/utils/partcache.h +++ b/src/include/utils/partcache.h @@ -68,7 +68,7 @@ extern void PartitionCacheInitializePhase3(void); * Routine to create a partcache entry for an about-to-be-created relation */ Partition PartitionBuildLocalPartition(const char *relname, Oid partid, Oid partfilenode, Oid parttablespace, - StorageType storage_type, Datum reloptions); + StorageType storage_type); /* * Routines for backend startup */ @@ -82,6 +82,7 @@ extern void PartitionCacheInvalidate(void); extern void PartitionCloseSmgrByOid(Oid partitionId); extern void AtEOXact_PartitionCache(bool isCommit); extern void AtEOSubXact_PartitionCache(bool isCommit, SubTransactionId mySubid, SubTransactionId parentSubid); +extern void UpdatePartrelPointer(Relation partrel, Relation rel, Partition part); extern Relation partitionGetRelation(Relation rel, Partition part); void releaseDummyRelation(Relation* relation); @@ -105,5 +106,6 @@ extern void PartitionSetEnabledClean( extern void PartitionSetAllEnabledClean(Oid parentOid); extern void PartitionGetAllInvisibleParts(Oid parentOid, OidRBTree** invisibleParts); extern bool PartitionMetadataDisabledClean(Relation pgPartition); +extern void UpdateWaitCleanGpiRelOptions(Relation pgPartition, HeapTuple partTuple, bool enable, bool inplace); #endif /* RELCACHE_H */ diff --git a/src/include/utils/partitionkey.h b/src/include/utils/partitionkey.h index 3ede0a6ce..f118f87ef 100644 --- a/src/include/utils/partitionkey.h +++ b/src/include/utils/partitionkey.h @@ -60,6 +60,10 @@ extern List* untransformPartitionBoundary(Datum options); */ extern void CheckValuePartitionKeyType(Form_pg_attribute* attrs, List* pos); +extern Oid getPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState *pstate, Relation rel); +extern Oid GetSubPartitionOidForRTE(RangeTblEntry *rte, RangeVar *relation, ParseState *pstate, Relation rel, + Oid *partOid); + #define partitonKeyCompareForRouting(value1, value2, len, compare) \ do { \ uint32 i = 0; \ diff --git a/src/include/utils/partitionmap.h b/src/include/utils/partitionmap.h index c32581188..814a3c8c3 100644 --- a/src/include/utils/partitionmap.h +++ b/src/include/utils/partitionmap.h @@ -135,4 +135,7 @@ extern bool partitionHasToast(Oid partOid); extern void constCompare(Const* value1, Const* value2, int& compare); +extern struct ListPartElement* CopyListElements(ListPartElement* src, int elementNum); +extern struct HashPartElement* CopyHashElements(HashPartElement* src, int elementNum, int partkeyNum); + #endif /* PARTITIONMAP_H_ */ diff --git a/src/include/utils/pg_lzcompress.h b/src/include/utils/pg_lzcompress.h index bb433333a..77fd1e306 100644 --- a/src/include/utils/pg_lzcompress.h +++ b/src/include/utils/pg_lzcompress.h @@ -127,8 +127,4 @@ extern const PGLZ_Strategy* const PGLZ_strategy_always; extern bool pglz_compress(const char* source, int32 slen, PGLZ_Header* dest, const PGLZ_Strategy* strategy); extern void pglz_decompress(const PGLZ_Header* source, char* dest); -extern int32 lz_compress(const char* source, int32 slen, char* dest); - -extern int32 lz_decompress(const char* source, int32 slen, char* dest, int32 rawsize, bool check_complete); - #endif /* _PG_LZCOMPRESS_H_ */ diff --git a/src/include/utils/pl_global_package_runtime_cache.h b/src/include/utils/pl_global_package_runtime_cache.h index e7bfb08e3..1610faf45 100644 --- a/src/include/utils/pl_global_package_runtime_cache.h +++ b/src/include/utils/pl_global_package_runtime_cache.h @@ -1,8 +1,8 @@ /* * Portions Copyright (c) 2021 Huawei Technologies Co.,Ltd. * Portions Copyright (c) 2002-2007, PostgreSQL Global Development Group - * Portions Copyright (c) 2021, openGauss Contributors * + * Portions Copyright (c) 2021, openGauss Contributors * openGauss is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. * You may obtain a copy of Mulan PSL v2 at: @@ -39,25 +39,50 @@ typedef struct SessionPackageRuntime { List *runtimes; MemoryContext context; + List* portalContext; + List* portalData; + List* funcValInfo; } SessionPackageRuntime; typedef struct GPRCValue { + uint64 sessionId; SessionPackageRuntime* sessPkgRuntime; } GPRCValue; class PLGlobalPackageRuntimeCache : public BaseObject { public: - PLGlobalPackageRuntimeCache(); - ~PLGlobalPackageRuntimeCache(); - void init(); - bool add(uint64 sessionId, SessionPackageRuntime* runtime); - SessionPackageRuntime* fetch(uint64 sessionId); - bool remove(uint64 sessionId); + bool Add(uint64 sessionId, SessionPackageRuntime* runtime); + SessionPackageRuntime* Fetch(uint64 sessionId); + bool Remove(uint64 sessionId); + + static PLGlobalPackageRuntimeCache* Instance() + { + static PLGlobalPackageRuntimeCache runtimeCache; + if (!inited) { + runtimeCache.Init(); + inited = true; + } + return &runtimeCache; + } + private: - GPRCHashCtl *hashArray; - + + PLGlobalPackageRuntimeCache() { + }; + + ~PLGlobalPackageRuntimeCache() { + }; + + void Init(); + GPRCHashCtl *hashArray; + static volatile bool inited; }; +List* CopyPortalDatas(SessionPackageRuntime *runtime); +List* CopyPortalContexts(List *portalContexts); +List* CopyFuncInfoDatas(SessionPackageRuntime *runtime); + + #endif diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h index 1968c1ff0..01c28d8b2 100644 --- a/src/include/utils/plancache.h +++ b/src/include/utils/plancache.h @@ -7,8 +7,8 @@ * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * + * Portions Copyright (c) 2021, openGauss Contributors * src/include/utils/plancache.h * * ------------------------------------------------------------------------- @@ -444,5 +444,6 @@ extern void PlanCacheSysCallback(Datum arg, int cacheid, uint32 hashvalue); extern bool IsStreamSupport(); extern void AcquirePlannerLocks(List* stmt_list, bool acquire); extern void AcquireExecutorLocks(List* stmt_list, bool acquire); - +extern bool CachedPlanAllowsSimpleValidityCheck(CachedPlanSource *plansource, CachedPlan *plan, ResourceOwner owner); +extern bool CachedPlanIsSimplyValid(CachedPlanSource *plansource, CachedPlan *plan, ResourceOwner owner); #endif /* PLANCACHE_H */ diff --git a/src/include/utils/plpgsql.h b/src/include/utils/plpgsql.h index a409b66e0..0c9b3b429 100644 --- a/src/include/utils/plpgsql.h +++ b/src/include/utils/plpgsql.h @@ -5,8 +5,8 @@ * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * + * Portions Copyright (c) 2021, openGauss Contributors * IDENTIFICATION * src/include/utils/plpgsql.h * @@ -52,6 +52,8 @@ enum { COMPILIE_PKG_FUNC, /* compile anonyous block in package */ COMPILIE_PKG_ANON_BLOCK, + /* compile anonyous block's func in package */ + COMPILIE_PKG_ANON_BLOCK_FUNC, /* compile anonyous block */ COMPILIE_ANON_BLOCK, NONE_STATUS @@ -283,8 +285,20 @@ typedef struct PLpgSQL_expr { /* SQpL Query to plan and execute */ Expr* expr_simple_expr; /* NULL means not a simple expr */ int expr_simple_generation; /* plancache generation we checked */ Oid expr_simple_type; /* result type Oid, if simple */ + int32 expr_simple_typmod; /* result typmod, if simple */ + bool expr_simple_mutable; /* true if simple expr is mutable */ bool expr_simple_need_snapshot; /* true means need snapshot */ + /* + * If the expression was ever determined to be simple, we remember its + * CachedPlanSource and CachedPlan here. If expr_simple_plan_lxid matches + * current LXID, then we hold a refcount on expr_simple_plan in the + * current transaction. Otherwise we need to get one before re-using it. + */ + CachedPlanSource *expr_simple_plansource; /* extracted from "plan" */ + CachedPlan *expr_simple_plan; /* extracted from "plan" */ + LocalTransactionId expr_simple_plan_lxid; + /* * if expr is simple AND prepared in current transaction, * expr_simple_state and expr_simple_in_use are valid. Test validity by @@ -296,8 +310,16 @@ typedef struct PLpgSQL_expr { /* SQpL Query to plan and execute */ LocalTransactionId expr_simple_lxid; bool isouttype; /* the parameter will output */ bool is_funccall; + int out_param_dno; /* if expr is func call and param is seperate from return values */ uint32 idx; bool is_cachedplan_shared; + /* if the expr have table of index var, we should found its param. */ + bool is_have_tableof_index_var; + int tableof_var_dno; + /* if the expr have table of index function, we should found its args->param. */ + bool is_have_tableof_index_func; + /* dno maybe is 0, so need an extra variable */ + int tableof_func_dno; } PLpgSQL_expr; typedef struct { /* openGauss data type */ @@ -325,6 +347,12 @@ typedef struct { /* openGauss data type */ Oid cursorCompositeOid = InvalidOid; } PLpgSQL_type; +typedef struct { + Oid userId; + int secContext; + int level; +} transactionNode; + typedef struct PLpgSQL_var { /* Scalar variable */ int dtype; int dno; @@ -348,11 +376,14 @@ typedef struct PLpgSQL_var { /* Scalar variable */ bool freeval; bool is_cursor_var; /* variable is a refcursor */ bool is_cursor_open; /* mark var is isopen for isopen option shoule be always not null */ + bool cursor_closed; /* if the var is cursor, mark the cursor is closed by close cursor? */ List* pkg_name = NULL; PLpgSQL_package* pkg = NULL; Oid tableOfIndexType = InvalidOid; /* type Oid of table of */ + bool isIndexByTblOf; struct PLpgSQL_var* nest_table; /* origin nest table type, copy from it when add new nest table */ HTAB* tableOfIndex = NULL; /* mapping of table of index */ + int nest_layers = 0; } PLpgSQL_var; typedef struct { /* Row variable */ @@ -376,7 +407,7 @@ typedef struct { /* Row variable */ */ int nfields; char** fieldnames; - int* varnos; + int* varnos; /* only use for unpkg's var, pkg's var is in row->pkg->datums */ int customErrorCode; /* only for exception variable. */ int intoplaceholders; /* number of placeholders, for anonymous block in dynamic stmt */ @@ -1004,6 +1035,7 @@ typedef struct PLpgSQL_function { /* Complete compiled function */ int ndatums; PLpgSQL_datum** datums; + bool* datum_need_free; /* need free datum when free function memory? */ PLpgSQL_stmt_block* action; List* goto_labels; @@ -1018,6 +1050,9 @@ typedef struct PLpgSQL_function { /* Complete compiled function */ /* pl debugger ptr */ struct DebugInfo* debug; struct PLpgSQL_nsitem* ns_top; + + bool is_autonomous; + bool is_plpgsql_func_with_outparam; } PLpgSQL_function; class AutonomousSession; @@ -1027,6 +1062,9 @@ typedef struct PLpgSQL_execstate { /* Runtime execution data */ Datum retval; bool retisnull; Oid rettype; /* type of current retval */ + Datum paramval; + bool paramisnull; + Oid paramtype; Oid fn_rettype; /* info about declared function rettype */ bool retistuple; @@ -1035,6 +1073,7 @@ typedef struct PLpgSQL_execstate { /* Runtime execution data */ bool readonly_func; TupleDesc rettupdesc; + TupleDesc paramtupdesc; char* exitlabel; /* the "target" label of the current EXIT or * CONTINUE stmt, if any */ ErrorData* cur_error; /* current exception handler's error */ @@ -1065,6 +1104,12 @@ typedef struct PLpgSQL_execstate { /* Runtime execution data */ int datums_alloc; PLpgSQL_datum** datums; + /* EState and resowner to use for "simple" expression evaluation */ + EState *simple_eval_estate; + ResourceOwner simple_eval_resowner; + + ParamListInfo paramLI; + /* temporary state for results from evaluation of query or expr */ SPITupleTable* eval_tuptable; uint32 eval_processed; @@ -1088,6 +1133,7 @@ typedef struct PLpgSQL_execstate { /* Runtime execution data */ int64 stack_entry_start; /* ExprContext's starting number for eval simple expression */ Oid curr_nested_table_type; + bool is_exception; } PLpgSQL_execstate; typedef struct PLpgSQL_pkg_execstate { /* Runtime execution data */ @@ -1104,7 +1150,11 @@ typedef struct PLpgSQL_pkg_execstate { /* Runtime execution data */ struct PLpgSQL_nsitem* private_ns; } PLpgSQL_pkg_execstate; - +typedef struct PLpgSQL_func_tableof_index { + int varno; + Oid tableOfIndexType; + HTAB* tableOfIndex; +} PLpgSQL_func_tableof_index; /* * A PLpgSQL_plugin structure represents an instrumentation plugin. @@ -1220,12 +1270,14 @@ typedef struct PLpgSQL_package { /* Complete compiled package */ PLpgSQL_resolve_option resolve_option; int ndatums; int public_ndatums; + int datums_alloc; PLpgSQL_datum** datums; + bool* datum_need_free; /* need free datum when free package memory? */ int n_initvars; int* initvarnos; List* invalItems; /* other dependencies, like other pkg's type or variable */ - int use_count; /* count for other func or pkg use */ + unsigned long use_count; /* count for other func use */ Cursor_Data* cursor_return_data; char* plpgsql_error_funcname; @@ -1233,6 +1285,17 @@ typedef struct PLpgSQL_package { /* Complete compiled package */ Oid namespaceOid; bool isInit; } PLpgSQL_package; + +typedef struct ExecTableOfIndexInfo { + ExprContext* econtext; + HTAB* tableOfIndex; + Oid tableOfIndexType; + bool isnestedtable; + int tableOfLayers; + int paramid; + Oid paramtype; +} ExecTableOfIndexInfo; + /********************************************************************** * Pl debugger **********************************************************************/ @@ -1471,11 +1534,13 @@ typedef struct plpgsql_hashent { } plpgsql_HashEnt; extern PLpgSQL_function* plpgsql_compile(FunctionCallInfo fcinfo, bool forValidator); -extern void delete_function(PLpgSQL_function* func); +extern void delete_function(PLpgSQL_function* func, bool fromPackage = false); extern PLpgSQL_function* plpgsql_compile_nohashkey(FunctionCallInfo fcinfo); /* parse trigger func */ extern PLpgSQL_function* plpgsql_compile_inline(char* proc_source); extern void plpgsql_parser_setup(struct ParseState* pstate, PLpgSQL_expr* expr); extern void plpgsql_parser_setup_bind(struct ParseState* pstate, List** expr); +extern void plpgsql_parser_setup_describe(struct ParseState* pstate, List** expr); + extern bool plpgsql_parse_word(char* word1, const char* yytxt, PLwdatum* wdatum, PLword* word, int* tok_flag); extern bool plpgsql_parse_dblword(char* word1, char* word2, PLwdatum* wdatum, PLcword* cword, int* nsflag); extern bool plpgsql_parse_tripword(char* word1, char* word2, char* word3, PLwdatum* wdatum, @@ -1489,9 +1554,9 @@ extern PLpgSQL_type* plpgsql_parse_cwordrowtype(List* idents); extern PLpgSQL_type* plpgsql_build_datatype(Oid typeOid, int32 typmod, Oid collation); extern PLpgSQL_type* build_datatype(HeapTuple type_tup, int32 typmod, Oid collation); extern PLpgSQL_type* plpgsql_build_nested_datatype(); -extern char *plpgsql_code_int2cstring(const int sqlcode); -extern int plpgsql_code_cstring2int(const char *codename); -extern void plpgsql_set_variable(const char* varname, const int value); +extern const char *plpgsql_code_int2cstring(int sqlcode); +extern const int plpgsql_code_cstring2int(const char *codename); +extern void plpgsql_set_variable(const char* varname, int value); extern PLpgSQL_variable* plpgsql_build_variable(const char* refname, int lineno, PLpgSQL_type* dtype, bool add2namespace, bool isImplicit = false, const char* varname = NULL, knl_pl_body_type plType = PL_BODY_FUNCTION); PLpgSQL_variable* plpgsql_build_varrayType(const char* refname, int lineno, PLpgSQL_type* dtype, bool add2namespace); @@ -1507,7 +1572,8 @@ extern PLpgSQL_row* build_row_from_tuple_desc(const char* rowname, int lineno, T extern PLpgSQL_row* build_row_from_rec_type(const char* rowname, int lineno, PLpgSQL_rec_type* type); extern bool plpgsql_check_colocate(Query* query, RangeTblEntry* rte, void* plpgsql_func); extern void plpgsql_HashTableDeleteAll(); -extern void plpgsql_HashTableDeleteAndCheckFunc(int cacheId, Oid objId); +extern void plpgsql_hashtable_delete_and_check_invalid_item(int classId, Oid objId); +extern void delete_package_and_check_invalid_item(Oid pkgOid); extern void plpgsql_HashTableDelete(PLpgSQL_function* func); extern bool plpgsql_get_current_value_stp_with_exception(); extern void plpgsql_restore_current_value_stp_with_exception(bool saved_current_stp_with_exception); @@ -1537,6 +1603,8 @@ extern "C" Datum plpgsql_inline_handler(PG_FUNCTION_ARGS); extern "C" Datum plpgsql_validator(PG_FUNCTION_ARGS); extern "C" PLpgSQL_package* plpgsql_package_validator(Oid packageOid, bool isSpec, bool isCreate=false); extern void record_pkg_function_dependency(PLpgSQL_package* pkg, List** invalItems, Oid funcid, Oid pkgid); +extern void DecreasePackageUseCount(PLpgSQL_function* func); +extern void AddPackageUseCount(PLpgSQL_function* func); /* --- --- --- * Functions in plsql_packages.c @@ -1558,18 +1626,20 @@ extern THR_LOCAL PLpgSQL_execstate* plpgsql_estate; #define BULK_COLLECT_MAX ((Size)0x3FFFFFF) /* maximum number of rows can be bulk collected (by 3FFFFFFF/16) */ extern Datum plpgsql_exec_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, bool dynexec_anonymous_block); - extern Datum plpgsql_exec_autonm_function(PLpgSQL_function* func, FunctionCallInfo fcinfo, char* source_text); - extern HeapTuple plpgsql_exec_trigger(PLpgSQL_function* func, TriggerData* trigdata); extern void plpgsql_xact_cb(XactEvent event, void* arg); extern void plpgsql_subxact_cb(SubXactEvent event, SubTransactionId mySubid, SubTransactionId parentSubid, void* arg); + extern Oid exec_get_datum_type(PLpgSQL_execstate* estate, PLpgSQL_datum* datum); extern void exec_get_datum_type_info(PLpgSQL_execstate* estate, PLpgSQL_datum* datum, Oid* typid, int32* typmod, Oid* collation, Oid* tableOfIndexType, PLpgSQL_function* func = NULL); extern Datum exec_simple_cast_datum( PLpgSQL_execstate* estate, Datum value, Oid valtype, Oid reqtype, int32 reqtypmod, bool isnull); extern void ResetCursorOption(Portal portal, bool reset); +#ifndef ENABLE_MULTIPLE_NODES +extern void ResetCursorAtrribute(Portal portal); +#endif extern void exec_assign_value(PLpgSQL_execstate *estate, PLpgSQL_datum *target, Datum value, Oid valtype, bool *isNull, HTAB* tableOfIndex = NULL); @@ -1588,6 +1658,10 @@ void exec_assign_expr(PLpgSQL_execstate* estate, PLpgSQL_datum* target, PLpgSQL_ extern int getTableOfIndexByDatumValue(TableOfIndexKey key, HTAB* tableOfIndex, PLpgSQL_var** node = NULL); extern Datum fillNestedTableArray(ArrayType* arrayval, Oid parenttypoid, Oid elemtypoid, int value, int idx); extern int plpgsql_estate_adddatum(PLpgSQL_execstate* estate, PLpgSQL_datum* newm); +extern void CheckCurrCompileDependOnPackage(Oid pkgOid); +#ifndef ENABLE_MULTIPLE_NODES +extern void estate_cursor_set(FormatCallStack* plcallstack); +#endif /* ---------- * Functions for namespace handling in pl_funcs.c @@ -1612,7 +1686,7 @@ extern PLpgSQL_nsitem* plpgsql_ns_lookup_label(PLpgSQL_nsitem* ns_cur, const cha */ extern const char* plpgsql_stmt_typename(PLpgSQL_stmt* stmt); extern const char* plpgsql_getdiag_kindname(int kind); -extern void plpgsql_free_function_memory(PLpgSQL_function* func); +extern void plpgsql_free_function_memory(PLpgSQL_function* func, bool fromPackage = false); extern void plpgsql_free_package_memory(PLpgSQL_package* pkg); extern void plpgsql_dumptree(PLpgSQL_function* func); extern bool plpgsql_is_trigger_shippable(PLpgSQL_function* func); @@ -1671,6 +1745,64 @@ typedef struct PackageRuntimeState { int size; } PackageRuntimeState; +typedef struct AutoSessionFuncValInfo { + bool found; + int sql_cursor_found; + int sql_notfound; + bool sql_isopen; + int sql_rowcount; + int sqlcode; + bool sqlcode_isnull; +} AutoSessionFuncValInfo; + +typedef struct AutoSessionPortalData { + int outParamIndex; + PortalStrategy strategy; + int cursorOptions; + const char* sourceText; + const char* commandTag; + bool atStart; + bool atEnd; + bool posOverflow; + long portalPos; + Tuplestorestate* holdStore; + MemoryContext holdContext; + TupleDesc tupDesc; + bool is_open; + bool found; + bool not_found; + int row_count; + bool null_open; + bool null_fetch; +} AutoSessionPortalData; + +typedef enum { /* PLpgSQL_PortalContextState */ + CONTEXT_NEW, /* Initial state, new */ + CONTEXT_USED, /* already used by one portal */ +} PLpgSQL_PortalContextState; + +typedef struct AutoSessionPortalContextData { + PLpgSQL_PortalContextState status; + MemoryContext portalHoldContext; +} AutoSessionPortalContextData; + +/* Context for exception block */ +typedef struct ExceptionContext { + MemoryContext oldMemCxt; /* CurrentMemoryContext saved at exception's entry */ + ResourceOwner oldResOwner; /* CurrentResourceOwner saved at exception's entry */ + TransactionId oldTransactionId; /* top transaction id saved at exception entry */ + SubTransactionId subXid; /* exception subtransaction's id */ + + int curExceptionCounter; /* serial number for this exception block */ + bool hasReleased; /* whehter or not exception subtransaction has released. */ + + ErrorData* cur_edata; /* ErrorData captured by this exception block. */ + ErrorData* old_edata; /* saved ErrorData before this Exception block. */ + + int spi_connected; /* SPI connected level before exception. */ + int64 stackId; /* the start stack Id before entry exception block. */ +} ExceptionContext; + /* Quick access array state */ #define IS_ARRAY_STATE(state_list, state) ((state_list && u_sess->attr.attr_sql.sql_compatibility == A_FORMAT) ? \ (linitial_int(state_list) == state) : false) @@ -1688,7 +1820,7 @@ extern void pl_validate_stmt_block(PLpgSQL_stmt_block *block, PLpgSQL_function* SPIPlanPtr* plan, List** dynexec_list); extern void pl_validate_stmt_block_in_subtransaction(PLpgSQL_stmt_block* block, PLpgSQL_function* func, SQLFunctionParseInfoPtr pinfo, SPIPlanPtr* plan, List** dynexec_list); -extern TupleDesc getCursorTupleDesc(PLpgSQL_expr* expr, bool isOnlySelect); +extern TupleDesc getCursorTupleDesc(PLpgSQL_expr* expr, bool isOnlySelect, bool isOnlyParse = false); extern int CompileStatusSwtichTo(int newCompileStatus); extern void checkCompileMemoryContext(MemoryContext cxt); @@ -1703,6 +1835,15 @@ extern PLpgSQL_datum* GetPackageDatum(List* name, bool* isSamePackage = NULL); extern bool pushed_bulk_exception(); extern bool CheckElementParsetreeTag(Node* parsetree); +extern Datum transVaratt1BTo4B(Datum value); extern PLpgSQL_datum* deepCopyPlpgsqlDatum(PLpgSQL_datum* datum); extern PLpgSQL_var* copyPlpgsqlVar(PLpgSQL_var* src); +extern void assign_text_var(PLpgSQL_var* var, const char* str); +extern MemoryContext GetAvailableHoldContext(List* PortalContextList); + +extern void stp_reset_xact(); +extern void stp_reset_stmt(); +extern void stp_reserve_subxact_resowner(ResourceOwner resowner); +extern void stp_cleanup_subxact_resowner(int64 minStackId); + #endif /* PLPGSQL_H */ diff --git a/src/include/utils/portal.h b/src/include/utils/portal.h index 296e40543..966b10fc0 100644 --- a/src/include/utils/portal.h +++ b/src/include/utils/portal.h @@ -36,9 +36,9 @@ * to look like NO SCROLL cursors. * * + * Portions Copyright (c) 2021, openGauss Contributors * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * * src/include/utils/portal.h * @@ -221,6 +221,7 @@ typedef struct PortalData { bool atEnd; bool posOverflow; long portalPos; + bool hasStreamForPlpgsql; /* true if plpgsql's portal has stream may cause hang in for-loop */ /* Presentation data, primarily used by the pg_cursors system view */ TimestampTz creation_time; /* time at which this portal was defined */ @@ -234,6 +235,8 @@ typedef struct PortalData { bool is_from_spi; #ifndef ENABLE_MULTIPLE_NODES PortalStream streamInfo; + bool isAutoOutParam; /* is autonomous transaction procedure out param? */ + bool isPkgCur; /* cursor variable is a package variable? */ #endif } PortalData; @@ -267,7 +270,7 @@ extern void UnpinPortal(Portal portal); extern void MarkPortalActive(Portal portal); extern void MarkPortalDone(Portal portal); extern void MarkPortalFailed(Portal portal); -extern void PortalDrop(Portal portal, bool isTopCommit, bool isInCreate = false); +extern void PortalDrop(Portal portal, bool isTopCommit); extern Portal GetPortalByName(const char* name); extern void PortalDefineQuery(Portal portal, const char* prepStmtName, const char* sourceText, const char* commandTag, List* stmts, CachedPlan* cplan); @@ -275,6 +278,7 @@ extern Node* PortalListGetPrimaryStmt(List* stmts); extern void PortalCreateHoldStore(Portal portal); extern void PortalHashTableDeleteAll(void); extern bool ThereAreNoReadyPortals(void); -extern void ResetPortalCursor(SubTransactionId mySubid, Oid funOid, int funUseCount); +extern void ResetPortalCursor(SubTransactionId mySubid, Oid funOid, int funUseCount, bool reset = true); extern void HoldPinnedPortals(void); +extern void HoldPortal(Portal portal); #endif /* PORTAL_H */ diff --git a/src/include/utils/postinit.h b/src/include/utils/postinit.h index 5ecf7e52f..948d65ab9 100644 --- a/src/include/utils/postinit.h +++ b/src/include/utils/postinit.h @@ -98,6 +98,8 @@ public: void InitWAL(); + void InitParallelDecode(); + void InitSession(); void InitStreamingBackend(); @@ -116,6 +118,8 @@ public: void InitFencedSysCache(); + void InitLoadLocalSysCache(Oid db_oid, const char *db_name); + void InitApplyLauncher(); void InitApplyWorker(); diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h index d746ff858..c94e185a6 100644 --- a/src/include/utils/rel.h +++ b/src/include/utils/rel.h @@ -7,8 +7,8 @@ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 2010-2012 Postgres-XC Development Group - * Portions Copyright (c) 2021, openGauss Contributors * + * Portions Copyright (c) 2021, openGauss Contributors * src/include/utils/rel.h * * ------------------------------------------------------------------------- @@ -21,7 +21,6 @@ #include "catalog/pg_class.h" #include "catalog/pg_index.h" - #include "fmgr.h" #include "nodes/bitmapset.h" #include "nodes/nodes.h" @@ -37,7 +36,6 @@ #include "utils/reltrigger.h" #include "utils/partitionmap.h" #include "catalog/pg_hashbucket_fn.h" -#include "catalog/pg_publication.h" #ifndef HDFS @@ -96,16 +94,6 @@ typedef struct RelationBucketKey Oid *bucketKeyType; /*the data type of partition key*/ }RelationBucketKey; -/* page compress related reloptions. */ -typedef struct PageCompressOpts { - int compressType; /* compress algorithm */ - int compressLevel; /* compress level */ - uint32 compressChunkSize; /* chunk size of compressed data */ - uint32 compressPreallocChunks; /* prealloced chunks to store compressed data */ - bool compressByteConvert; /* byte row-coll-convert */ - bool compressDiffConvert; /* make difference convert */ -} PageCompressOpts; - /* describe commit sequence number of object in pg_object */ typedef struct ObjectCSN { @@ -125,7 +113,6 @@ typedef struct PgObjectOption { */ typedef struct RelationData { - RelFileNode rd_node; /* relation physical identifier */ /* use "struct" here to avoid needing to include smgr.h: */ struct SMgrRelationData* rd_smgr; /* cached file handle, or NULL */ @@ -165,16 +152,16 @@ typedef struct RelationData { /* data managed by RelationGetIndexList: */ List* rd_indexlist; /* list of OIDs of indexes on relation */ Oid rd_oidindex; /* OID of unique index on OID, if any */ - Oid rd_pkindex; /* OID of primary key, if any */ + Oid rd_pkindex; /* OID of primary key, if any */ Oid rd_refSynOid; /* OID of referenced synonym Oid, if mapping indeed. */ /* data managed by RelationGetIndexAttrBitmap: */ Bitmapset* rd_indexattr; /* identifies columns used in indexes */ - Bitmapset* rd_pkattr; /* cols included in primary key */ Bitmapset* rd_keyattr; /* cols that can be ref'd by foreign keys */ + Bitmapset* rd_pkattr; /* cols included in primary key */ Bitmapset* rd_idattr; /* included in replica identity index */ - PublicationActions* rd_pubactions; /* publication actions */ + void* rd_pubactions; /* publication actions, PublicationActions */ /* * The index chosen as the relation's replication identity or @@ -257,6 +244,7 @@ typedef struct RelationData { CommitSeqNo rd_changecsn; /* the commit sequence number when the old version expires */ CommitSeqNo rd_createcsn; /* the commit sequence number when object create */ + CommitSeqNo xmin_csn; /* the commit sequence number when the xmin of tuple commit */ /* bucket key info, indicating which keys are used to comoute hash value */ int rd_bucketmapsize; /* Size of bucket map */ @@ -292,6 +280,10 @@ typedef struct RelationData { Oid rd_mlogoid; /* Is under the context of creating crossbucket index? */ bool newcbi; + + bool is_compressed; + /* used only for gsc, keep it preserved if you modify the rel, otherwise set it null */ + struct LocalRelationEntry *entry; } RelationData; /* @@ -323,16 +315,11 @@ typedef enum RedisRelAction { REDIS_REL_NORMAL, REDIS_REL_APPEND, REDIS_REL_READ_ONLY, + REDIS_REL_END_CATCHUP, REDIS_REL_DESTINATION, REDIS_REL_RESET_CTID } RedisHtlAction; -/* PageCompressOpts->compressType values */ -typedef enum CompressTypeOption { - COMPRESS_TYPE_NONE = 0, COMPRESS_TYPE_PGLZ = 1, COMPRESS_TYPE_ZSTD = 2 -} CompressTypeOption; - - typedef struct StdRdOptions { int32 vl_len_; /* varlena header (do not touch directly!) */ int fillfactor; /* page fill factor in percent (0..100) */ @@ -359,8 +346,11 @@ typedef struct StdRdOptions { char* wait_clean_cbi; int bucketcnt; /* number of bucket counts */ int parallel_workers; /* max number of parallel workers */ + bool hasuids; /* enable uids for this relation */ /* info for redistribution */ Oid rel_cn_oid; + int exec_step; + int64 create_time; RedisHtlAction append_mode_internal; int initTd; @@ -398,7 +388,6 @@ typedef struct StdRdOptions { char* encrypt_algo; bool enable_tde; /* switch flag for table-level TDE encryption */ bool on_commit_delete_rows; /* global temp table */ - PageCompressOpts compress; /* page compress related reloptions. */ } StdRdOptions; #define HEAP_MIN_FILLFACTOR 10 @@ -590,7 +579,7 @@ typedef struct StdRdOptions { * Returns the rel's frozenxid64. */ extern TransactionId RelationGetRelFrozenxid64(Relation r); - +extern TransactionId PartGetRelFrozenxid64(Partition part); /* * RelationGetRelFileNode * Returns the rel's relfilenode. @@ -804,6 +793,6 @@ extern void RelationDecrementReferenceCount(Oid relationId); extern void GetTdeInfoFromRel(Relation rel, TdeInfo *tde_info); extern char RelationGetRelReplident(Relation r); -extern void SetupPageCompressForRelation(RelFileNode* node, PageCompressOpts* compressOpts, const char* name); + #endif /* REL_H */ diff --git a/src/include/utils/rel_gs.h b/src/include/utils/rel_gs.h index 8e3082ec9..4df58a9a2 100644 --- a/src/include/utils/rel_gs.h +++ b/src/include/utils/rel_gs.h @@ -103,6 +103,8 @@ typedef struct PartitionData { bool newcbi; Relation partrel; /* a temprary relation generated by the partition itself and it's parent relation */ PartitionMap* partMap; /* For Level-1 partition of subpartition table */ + struct LocalPartitionEntry *entry; + CommitSeqNo xmin_csn; /* the commit sequence number when the xmin of tuple commit */ } PartitionData; typedef struct AttrMetaData { @@ -256,7 +258,7 @@ static inline TableAmType get_tableam_from_reloptions(bytea* reloptions, char re * Returns the relations TableAmType */ #define RelationGetIndexsplitMethod(_reloptions) \ - StdRdOptionsGetStringData(_reloptions, indexsplit, INDEXSPLIT_OPT_DEFAULT) + StdRdOptionsGetStringData(_reloptions, indexsplit, INDEXSPLIT_OPT_INSERTPT) #define RelationIsIndexsplitMethodDefault(_reloptions) \ pg_strcasecmp(RelationGetIndexsplitMethod(_reloptions), INDEXSPLIT_OPT_DEFAULT) == 0 @@ -654,11 +656,6 @@ extern void PartitionDecrementReferenceCount(Partition part); ((PARTTYPE_VALUE_PARTITIONED_RELATION == (relation)->rd_rel->parttype) && \ (RELKIND_RELATION == (relation)->rd_rel->relkind)) -#define HEAP_IS_PARTITIONED(relation) \ - ((PARTTYPE_PARTITIONED_RELATION == (relation)->rd_rel->parttype || \ - PARTTYPE_VALUE_PARTITIONED_RELATION == (relation)->rd_rel->parttype) && \ - (RELKIND_RELATION == (relation)->rd_rel->relkind || RELKIND_INDEX == (relation)->rd_rel->relkind)) - /* * type bucketOid bucketKey meaning * N INV INV relation has no bucket @@ -686,16 +683,28 @@ extern void PartitionDecrementReferenceCount(Partition part); !IsCreatingCrossBucketIndex(relation)) /* type: SK && Non-part*/ +#ifdef ENABLE_MULTIPLE_NODES #define RELATION_CREATE_BUCKET(relation) \ (RelationIsNonpartitioned(relation) && \ RELATION_OWN_BUCKET(relation) && \ !RelationIsCrossBucketIndex(relation) && \ !IsCreatingCrossBucketIndex(relation)) +#else +#define RELATION_CREATE_BUCKET(relation) false +#endif #define RELATION_OWN_BUCKETKEY_COMMON(relation) (RELATION_OWN_BUCKET(relation) && RelationIsPartitioned(relation)) #define RELATION_CREATE_BUCKET_COMMON(relation) (RelationIsNonpartitioned(relation) && RELATION_OWN_BUCKET(relation)) +/* if relation has uids */ +#define RELATION_HAS_UIDS(relation) \ + ((((relation)->rd_rel->relkind == RELKIND_RELATION) && ((relation)->rd_options)) ? \ + ((StdRdOptions*)(relation)->rd_options)->hasuids : false) + +#define StdRdOptionsHasUids(options, relkind) \ + ((((relkind) == RELKIND_RELATION) && options) ? ((StdRdOptions*)options)->hasuids : false) + #define RELATION_IS_DELTA(relation) \ (IsCStoreNamespace(RelationGetNamespace(relation)) && \ pg_strncasecmp(RelationGetRelationName(relation), "pg_delta", strlen("pg_delta")) == 0) @@ -728,6 +737,9 @@ static inline bool IsCompressedByCmprsInPgclass(const RelCompressType cmprInPgcl #define RelationInRedistributeReadOnly(relation) \ (REDIS_REL_READ_ONLY == (RelationGetAppendMode(relation)) ? true : false) +#define RelationInRedistributeEndCatchup(relation) \ + (REDIS_REL_END_CATCHUP == (RelationGetAppendMode(relation)) ? true : false) + #define RelationIsRedistributeDest(relation) \ (REDIS_REL_DESTINATION == (RelationGetAppendMode(relation)) ? true : false) diff --git a/src/include/utils/relcache.h b/src/include/utils/relcache.h index 652b9e32c..2744871d0 100644 --- a/src/include/utils/relcache.h +++ b/src/include/utils/relcache.h @@ -113,8 +113,7 @@ extern void RelationCacheInitializePhase3(void); */ extern Relation RelationBuildLocalRelation(const char* relname, Oid relnamespace, TupleDesc tupDesc, Oid relid, Oid relfilenode, Oid reltablespace, bool shared_relation, bool mapped_relation, char relpersistence, char relkind, - int8 row_compress, Datum reloptions, TableAmType tam_type, int8 relindexsplit = 0, StorageType storage_type = HEAP_DISK, - Oid accessMethodObjectId = 0); + int8 row_compress, TableAmType tam_type, int8 relindexsplit = 0, StorageType storage_type = HEAP_DISK); /* * Routine to manage assignment of new relfilenode to a relation @@ -124,7 +123,11 @@ extern void DeltaTableSetNewRelfilenode(Oid relid, TransactionId freezeXid, bool extern void RelationSetNewRelfilenode(Relation relation, TransactionId freezeXid, MultiXactId minmulti, bool isDfsTruncate = false); extern RelFileNodeBackend CreateNewRelfilenode(Relation relation, TransactionId freezeXid); +extern RelFileNodeBackend CreateNewRelfilenodePart(Relation parent, Partition part); + extern void UpdatePgclass(Relation relation, TransactionId freezeXid, const RelFileNodeBackend *rnode); +extern void UpdatePartition(Relation parent, Partition part, TransactionId freezeXid, const RelFileNodeBackend *newrnode); + /* * Routines for flushing/rebuilding relcache entries in various scenarios */ @@ -142,6 +145,7 @@ extern Oid RelationGetBucketOid(Relation relation); extern void AtEOXact_RelationCache(bool isCommit); extern void AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid, SubTransactionId parentSubid); +extern void InvalidateRelationNodeList(); /* * Routines to help manage rebuilding of relcache init files */ diff --git a/src/include/utils/relfilenodemap.h b/src/include/utils/relfilenodemap.h index b9eedda92..fd9455bd7 100644 --- a/src/include/utils/relfilenodemap.h +++ b/src/include/utils/relfilenodemap.h @@ -19,5 +19,6 @@ extern Oid PartitionRelidByRelfilenode(Oid reltablespace, Oid relfilenode, Oid & Oid *partitionOid, bool segment); extern Oid UHeapRelidByRelfilenode(Oid reltablespace, Oid relfilenode); extern Oid UHeapPartitionRelidByRelfilenode(Oid reltablespace, Oid relfilenode, Oid& partationReltoastrelid); - +extern void RelfilenodeMapInvalidateCallback(Datum arg, Oid relid); +extern void UHeapRelfilenodeMapInvalidateCallback(Datum arg, Oid relid); #endif /* RELFILENODEMAP_H */ diff --git a/src/include/utils/relmapper.h b/src/include/utils/relmapper.h index f77182567..aa2ca4683 100644 --- a/src/include/utils/relmapper.h +++ b/src/include/utils/relmapper.h @@ -106,5 +106,7 @@ extern void RelationMapInitializePhase3(void); extern void relmap_redo(XLogReaderState* record); extern void relmap_desc(StringInfo buf, XLogReaderState* record); +extern const char* relmap_type_name(uint8 subtype); +extern void load_relmap_file(bool shared, RelMapFile *map); #endif /* RELMAPPER_H */ diff --git a/src/include/utils/resowner.h b/src/include/utils/resowner.h index 2c1338965..14c8fa26e 100755 --- a/src/include/utils/resowner.h +++ b/src/include/utils/resowner.h @@ -11,8 +11,8 @@ * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2021, openGauss Contributors * + * Portions Copyright (c) 2021, openGauss Contributors * src/include/utils/resowner.h * * ------------------------------------------------------------------------- @@ -22,11 +22,12 @@ #include "storage/smgr/fd.h" #include "utils/catcache.h" +#include "utils/knl_globalsystupcache.h" +#include "utils/knl_localsystupcache.h" #include "utils/plancache.h" #include "utils/snapshot.h" #include "utils/partcache.h" #include "storage/cucache_mgr.h" - /* * ResourceOwner objects are an opaque data structure known only within * resowner.c. @@ -68,8 +69,6 @@ extern ResourceOwner ResourceOwnerGetNextChild(ResourceOwner owner); extern const char * ResourceOwnerGetName(ResourceOwner owner); extern ResourceOwner ResourceOwnerGetFirstChild(ResourceOwner owner); extern void ResourceOwnerNewParent(ResourceOwner owner, ResourceOwner newparent); -extern void RegisterResourceReleaseCallback(ResourceReleaseCallback callback, void* arg); -extern void UnregisterResourceReleaseCallback(ResourceReleaseCallback callback, const void* arg); /* support for buffer refcount management */ extern void ResourceOwnerEnlargeBuffers(ResourceOwner owner); @@ -167,19 +166,45 @@ extern void PrintGMemContextLeakWarning(MemoryContext memcontext); extern void ResourceOwnerMarkInvalid(ResourceOwner owner); extern bool ResourceOwnerIsValid(ResourceOwner owner); +extern void ResourceOwnerReleaseAllPlanCacheRefs(ResourceOwner owner); -inline void resowner_record_log(const char* action, const char* filename, int lineno, const char* funcname, - ResourceOwner owner) -{ - if (u_sess->attr.attr_common.log_min_messages >= DEBUG3 && module_logging_is_on(MOD_RESOWNER)) { - ereport(DEBUG3, (errmodule(MOD_RESOWNER), errcode(ERRCODE_LOG), - errmsg("RESOWNER(Action:%s, Location %s,%d, Funcname:%s): owner: %p", - action, filename, lineno, funcname, owner))); - } -} - -#define RESOWNER_LOG(action, owner) \ - (resowner_record_log(action, __FILE__, __LINE__, __func__, owner)) - +extern void PrintPthreadRWlockLeakWarning(pthread_rwlock_t* pRWlock); +/* support for local catcache tuple/list ref management */ +extern void ResourceOwnerEnlargeLocalCatCList(ResourceOwner owner); +extern void ResourceOwnerRememberLocalCatCList(ResourceOwner owner, LocalCatCList* list); +extern void ResourceOwnerForgetLocalCatCList(ResourceOwner owner, LocalCatCList* list); +extern void ResourceOwnerEnlargeLocalCatCTup(ResourceOwner owner); +extern void ResourceOwnerRememberLocalCatCTup(ResourceOwner owner, LocalCatCTup* tuple); +extern LocalCatCTup* ResourceOwnerForgetLocalCatCTup(ResourceOwner owner, HeapTuple tuple); +/* support for global catcache tuple/list ref management */ +extern void ResourceOwnerEnlargePthreadRWlock(ResourceOwner owner); +extern void ResourceOwnerForgetPthreadRWlock(ResourceOwner owner, pthread_rwlock_t* pRWlock); +extern void ResourceOwnerRememberPthreadRWlock(ResourceOwner owner, pthread_rwlock_t* pRWlock); +extern void ResourceOwnerEnlargeGlobalCatCTup(ResourceOwner owner); +extern void ResourceOwnerRememberGlobalCatCTup(ResourceOwner owner, GlobalCatCTup* tuple); +extern void ResourceOwnerForgetGlobalCatCTup(ResourceOwner owner, GlobalCatCTup* tuple); +extern void ResourceOwnerEnlargeGlobalCatCList(ResourceOwner owner); +extern void ResourceOwnerRememberGlobalCatCList(ResourceOwner owner, GlobalCatCList* list); +extern void ResourceOwnerForgetGlobalCatCList(ResourceOwner owner, GlobalCatCList* list); +extern void ResourceOwnerEnlargeGlobalBaseEntry(ResourceOwner owner); +extern void ResourceOwnerRememberGlobalBaseEntry(ResourceOwner owner, GlobalBaseEntry* entry); +extern void ResourceOwnerForgetGlobalBaseEntry(ResourceOwner owner, GlobalBaseEntry* entry); +extern void ResourceOwnerEnlargeGlobalDBEntry(ResourceOwner owner); +extern void ResourceOwnerRememberGlobalDBEntry(ResourceOwner owner, GlobalSysDBCacheEntry* entry); +extern void ResourceOwnerForgetGlobalDBEntry(ResourceOwner owner, GlobalSysDBCacheEntry* entry); +extern void ResourceOwnerEnlargeGlobalIsExclusive(ResourceOwner owner); +extern void ResourceOwnerRememberGlobalIsExclusive(ResourceOwner owner, volatile uint32 *isexclusive); +extern void ResourceOwnerForgetGlobalIsExclusive(ResourceOwner owner, volatile uint32 *isexclusive); +extern void ResourceOwnerReleaseRWLock(ResourceOwner owner, bool isCommit); +extern void ResourceOwnerReleaseLocalCatCTup(ResourceOwner owner, bool isCommit); +extern void ResourceOwnerReleaseLocalCatCList(ResourceOwner owner, bool isCommit); +extern void ResourceOwnerReleaseRelationRef(ResourceOwner owner, bool isCommit); +extern void ResourceOwnerReleasePartitionRef(ResourceOwner owner, bool isCommit); +extern void ResourceOwnerReleaseGlobalCatCTup(ResourceOwner owner, bool isCommit); +extern void ResourceOwnerReleaseGlobalCatCList(ResourceOwner owner, bool isCommit); +extern void ResourceOwnerReleaseGlobalBaseEntry(ResourceOwner owner, bool isCommit); +extern void ResourceOwnerReleaseGlobalDBEntry(ResourceOwner owner, bool isCommit); +extern void ResourceOwnerReleaseGlobalIsExclusive(ResourceOwner owner, bool isCommit); +extern bool CurrentResourceOwnerIsEmpty(ResourceOwner owner); #endif /* RESOWNER_H */ diff --git a/src/include/utils/snapmgr.h b/src/include/utils/snapmgr.h index d3f0a8afd..122f03678 100644 --- a/src/include/utils/snapmgr.h +++ b/src/include/utils/snapmgr.h @@ -42,7 +42,12 @@ extern THR_LOCAL PGDLLIMPORT SnapshotData SnapshotNowNoSyncData; extern bool XidVisibleInSnapshot(TransactionId xid, Snapshot snapshot, TransactionIdStatus *hintstatus, Buffer buffer, bool *sync); +extern bool UHeapXidVisibleInSnapshot(TransactionId xid, Snapshot snapshot, TransactionIdStatus *hintstatus, + Buffer buffer, bool *sync); +extern bool XidVisibleInDecodeSnapshot(TransactionId xid, Snapshot snapshot, + TransactionIdStatus* hintstatus, Buffer buffer); extern bool CommittedXidVisibleInSnapshot(TransactionId xid, Snapshot snapshot, Buffer buffer); +extern bool CommittedXidVisibleInDecodeSnapshot(TransactionId xid, Snapshot snapshot, Buffer buffer); extern bool IsXidVisibleInGtmLiteLocalSnapshot(TransactionId xid, Snapshot snapshot, TransactionIdStatus hint_status, bool xmin_equal_xmax, Buffer buffer, bool *sync); /* @@ -60,7 +65,7 @@ extern bool IsXidVisibleInGtmLiteLocalSnapshot(TransactionId xid, Snapshot snaps /* This macro encodes the knowledge of which snapshots are MVCC-safe */ #define IsMVCCSnapshot(snapshot) \ ((((snapshot)->satisfies) == SNAPSHOT_MVCC) || (((snapshot)->satisfies) == SNAPSHOT_HISTORIC_MVCC) || \ - IsVersionMVCCSnapshot(snapshot)) + (((snapshot)->satisfies) == SNAPSHOT_DECODE_MVCC) || IsVersionMVCCSnapshot(snapshot)) extern Snapshot GetTransactionSnapshot(bool force_local_snapshot = false); extern Snapshot GetLatestSnapshot(void); diff --git a/src/include/utils/snapshot.h b/src/include/utils/snapshot.h index 7f97cdbc9..bb60e4301 100644 --- a/src/include/utils/snapshot.h +++ b/src/include/utils/snapshot.h @@ -168,6 +168,11 @@ typedef enum SnapshotSatisfiesMethod { * contents in the context of logical decoding). */ SNAPSHOT_HISTORIC_MVCC, + /* + * Whether a tuple is visible is decided by CSN, + * which is used in parallel decoding. + */ + SNAPSHOT_DECODE_MVCC } SnapshotSatisfiesMethod; typedef struct SnapshotData* Snapshot; diff --git a/src/include/utils/spccache.h b/src/include/utils/spccache.h index b9c81ebbe..327efd9f3 100644 --- a/src/include/utils/spccache.h +++ b/src/include/utils/spccache.h @@ -13,6 +13,7 @@ #ifndef SPCCACHE_H #define SPCCACHE_H -void get_tablespace_page_costs(Oid spcid, float8* spc_random_page_cost, float8* spc_seq_page_cost); +extern void get_tablespace_page_costs(Oid spcid, float8* spc_random_page_cost, float8* spc_seq_page_cost); +extern void InvalidateTableSpaceCacheCallback(Datum arg, int cacheid, uint32 hashvalue); #endif /* SPCCACHE_H */ diff --git a/src/include/utils/syscache.h b/src/include/utils/syscache.h index db29af8e5..a1b45496a 100644 --- a/src/include/utils/syscache.h +++ b/src/include/utils/syscache.h @@ -9,8 +9,8 @@ * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 2010-2012 Postgres-XC Development Group - * Portions Copyright (c) 2021, openGauss Contributors * + * Portions Copyright (c) 2021, openGauss Contributors * src/include/utils/syscache.h * * ------------------------------------------------------------------------- @@ -111,6 +111,8 @@ enum SysCacheIdentifier { PGXCAPPWGMAPPINGOID, PGXCSLICERELID, #endif + SUBSCRIPTIONNAME, + SUBSCRIPTIONOID, PROCNAMEARGSNSP, #ifndef ENABLE_MULTIPLE_NODES PROCALLARGS, @@ -132,6 +134,7 @@ enum SysCacheIdentifier { STREAMOID, STREAMRELID, REAPERCQOID, + PUBLICATIONRELMAP, SYNOID, SYNONYMNAMENSP, TABLESPACEOID, @@ -155,12 +158,19 @@ enum SysCacheIdentifier { PKGNAMENSP, PUBLICATIONNAME, PUBLICATIONOID, - PUBLICATIONREL, - PUBLICATIONRELMAP, - SUBSCRIPTIONNAME, - SUBSCRIPTIONOID + UIDRELID, + DBPRIVOID, + DBPRIVROLE, + DBPRIVROLEPRIV }; - +struct cachedesc { + Oid reloid; /* OID of the relation being cached */ + Oid indoid; /* OID of index relation for this cache */ + int nkeys; /* # of keys needed for cache lookup */ + int key[CATCACHE_MAXKEYS]; /* attribute numbers of key attrs */ + int nbuckets; /* number of hash buckets for this cache */ +}; +extern const cachedesc cacheinfo[]; extern int SysCacheSize; extern void InitCatalogCache(void); @@ -177,10 +187,12 @@ extern HeapTuple SearchSysCache3(int cacheId, Datum key1, Datum key2, Datum key3 extern HeapTuple SearchSysCache4(int cacheId, Datum key1, Datum key2, Datum key3, Datum key4); extern void ReleaseSysCache(HeapTuple tuple); +extern void ReleaseSysCacheList(catclist *cl); /* convenience routines */ extern HeapTuple SearchSysCacheCopy(int cacheId, Datum key1, Datum key2, Datum key3, Datum key4, int level = DEBUG2); extern bool SearchSysCacheExists(int cacheId, Datum key1, Datum key2, Datum key3, Datum key4); + extern Oid GetSysCacheOid(int cacheId, Datum key1, Datum key2, Datum key3, Datum key4); extern HeapTuple SearchSysCacheAttName(Oid relid, const char* attname); @@ -194,6 +206,10 @@ extern uint32 GetSysCacheHashValue(int cacheId, Datum key1, Datum key2, Datum ke /* list-search interface. Users of this must import catcache.h too */ extern struct catclist* SearchSysCacheList(int cacheId, int nkeys, Datum key1, Datum key2, Datum key3, Datum key4); +#ifndef ENABLE_MULTIPLE_NODES +extern bool SearchSysCacheExistsForProcAllArgs(Datum key1, Datum key2, Datum key3, Datum key4, Datum proArgModes); +#endif + /* * The use of the macros below rather than direct calls to the corresponding * functions is encouraged, as it insulates the caller from changes in the @@ -227,6 +243,4 @@ extern struct catclist* SearchSysCacheList(int cacheId, int nkeys, Datum key1, D #define SearchSysCacheList3(cacheId, key1, key2, key3) SearchSysCacheList(cacheId, 3, key1, key2, key3, 0) #define SearchSysCacheList4(cacheId, key1, key2, key3, key4) SearchSysCacheList(cacheId, 4, key1, key2, key3, key4) -#define ReleaseSysCacheList(x) ReleaseCatCacheList(x) - #endif /* SYSCACHE_H */ diff --git a/src/include/utils/tuplesort.h b/src/include/utils/tuplesort.h index 7a4be48e2..611685900 100644 --- a/src/include/utils/tuplesort.h +++ b/src/include/utils/tuplesort.h @@ -129,8 +129,7 @@ extern Tuplesortstate* tuplesort_begin_cluster( extern Tuplesortstate* tuplesort_begin_index_btree( Relation indexRel, bool enforceUnique, int workMem, SortCoordinate coordinate, bool randomAccess, int maxMem); extern Tuplesortstate* tuplesort_begin_index_hash( - Relation heapRel, Relation indexRel, uint32 high_mask, uint32 low_mask, uint32 max_buckets, - int workMem, bool randomAccess, int maxMem); + Relation indexRel, uint32 hash_mask, int workMem, bool randomAccess, int maxMem); extern Tuplesortstate* tuplesort_begin_datum( Oid datumType, Oid sortOperator, Oid sortCollation, bool nullsFirstFlag, int workMem, bool randomAccess); #ifdef PGXC @@ -144,8 +143,7 @@ extern void tuplesort_set_siblings(Tuplesortstate* state, const int numKeys, con extern void tuplesort_puttupleslot(Tuplesortstate* state, TupleTableSlot* slot); extern void TuplesortPutheaptuple(Tuplesortstate* state, HeapTuple tup); extern void tuplesort_putindextuplevalues( - Tuplesortstate* state, Relation rel, ItemPointer self, Datum* values, - const bool* isnull, IndexTransInfo* transInfo = NULL); + Tuplesortstate* state, Relation rel, ItemPointer self, Datum* values, const bool* isnull); extern void tuplesort_putdatum(Tuplesortstate* state, Datum val, bool isNull); extern void tuplesort_performsort(Tuplesortstate* state); diff --git a/src/include/utils/typcache.h b/src/include/utils/typcache.h index 1496ca04d..64483b48d 100644 --- a/src/include/utils/typcache.h +++ b/src/include/utils/typcache.h @@ -118,4 +118,5 @@ extern void assign_record_type_typmod(TupleDesc tupDesc); extern int compare_values_of_enum(TypeCacheEntry* tcache, Oid arg1, Oid arg2); +extern void TypeCacheRelCallback(Datum arg, Oid relid); #endif /* TYPCACHE_H */ diff --git a/src/include/utils/utesteventutil.h b/src/include/utils/utesteventutil.h new file mode 100644 index 000000000..a8603d98d --- /dev/null +++ b/src/include/utils/utesteventutil.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * IDENTIFICATION + * src/include/utils/utesteventutil.h + * + * --------------------------------------------------------------------------------------- + */ +#ifndef __UTEST_EVENT_UTIL_H__ +#define __UTEST_EVENT_UTIL_H__ +#include "c.h" // necessary for ENABLE_UT definition + +#ifdef ENABLE_UT + +#include "access/xlogproc.h" + +enum EnumTestEventType { + // for extreme_rto + UTEST_EVENT_RTO_START = 0, + UTEST_EVENT_RTO_TRXNMGR_DISTRIBUTE_ITEMS, + UTEST_EVENT_RTO_DISPATCH_REDO_RECORD_TO_FILE, + UTEST_EVENT_RTO_PAGEMGR_REDO_BEFORE_DISTRIBUTE_ITEMS, + UTEST_EVENT_RTO_PAGEMGR_REDO_AFTER_DISTRIBUTE_ITEMS, +}; + +extern void TestXLogRecParseStateEventProbe(EnumTestEventType eventType, + const char* sourceName, const XLogRecParseState* parseState); +extern void TestXLogReaderProbe(EnumTestEventType eventType, + const char* sourceName, const XLogReaderState* readerState); + +#endif +#endif + diff --git a/src/include/vecexecutor/vecexecutor.h b/src/include/vecexecutor/vecexecutor.h index ec6803def..8c6e776d4 100644 --- a/src/include/vecexecutor/vecexecutor.h +++ b/src/include/vecexecutor/vecexecutor.h @@ -43,7 +43,8 @@ } extern VectorBatch* VectorEngine(PlanState* node); -extern VectorBatch* ExecVecProject(ProjectionInfo* projInfo, bool selReSet = true, ExprDoneCond* isDone = NULL); +extern VectorBatch *ExecVecProject(ProjectionInfo *projInfo, bool selReSet = true, + ExprDoneCond *isDone = NULL); extern ExprState* ExecInitVecExpr(Expr* node, PlanState* parent); extern ScalarVector* ExecVecQual(List* qual, ExprContext* econtext, bool resultForNull, bool isReset = true); diff --git a/src/include/vecexecutor/vecnoderowtovector.h b/src/include/vecexecutor/vecnoderowtovector.h index a7fcf8683..546dc0d62 100644 --- a/src/include/vecexecutor/vecnoderowtovector.h +++ b/src/include/vecexecutor/vecnoderowtovector.h @@ -29,6 +29,7 @@ #include "vecexecutor/vecnodes.h" #include "vecexecutor/vectorbatch.h" +extern struct varlena *DetoastDatumBatch(struct varlena* datum, ScalarVector* arr); extern RowToVecState* ExecInitRowToVec(RowToVec* node, EState* estate, int eflags); extern VectorBatch* ExecRowToVec(RowToVecState* node); extern void ExecEndRowToVec(RowToVecState* node); diff --git a/src/include/vecexecutor/vecnodes.h b/src/include/vecexecutor/vecnodes.h index 86b0538fa..cc6330227 100644 --- a/src/include/vecexecutor/vecnodes.h +++ b/src/include/vecexecutor/vecnodes.h @@ -185,6 +185,7 @@ typedef struct RowToVecState { bool m_fNoMoreRows; // does it has more rows to output VectorBatch* m_pCurrentBatch; // current active batch in outputing + bool m_batchMode; } RowToVecState; typedef struct VecResultState : public ResultState { diff --git a/src/include/vecexecutor/vectorbatch.h b/src/include/vecexecutor/vectorbatch.h index 55531988c..2341b5351 100644 --- a/src/include/vecexecutor/vectorbatch.h +++ b/src/include/vecexecutor/vectorbatch.h @@ -27,7 +27,6 @@ #define VECTORBATCH_H_ #include "postgres.h" -#include "knl/knl_variable.h" #include "access/tupdesc.h" #include "lib/stringinfo.h" #include "catalog/pg_type.h" @@ -99,12 +98,6 @@ inline bool COL_IS_ENCODE(int typeId) } } -template -bool COL_IS_ENCODE_T() -{ - return COL_IS_ENCODE(typeId); -} - #define BOTH_NOT_NULL(flag1, flag2) (likely(NOT_NULL((flag1) | (flag2)))) #define IS_NULL(flag) (unlikely(((flag)&V_NULL_MASK) == V_NULL_MASK)) #define NOT_NULL(flag) ((((unsigned int)flag) & V_NULL_MASK) == V_NOTNULL_MASK) @@ -432,7 +425,7 @@ public: // Pack the batch // - void Pack(const bool* sel); + void Pack(const bool *sel); /* Optimzed Pack function */ void OptimizePack(const bool* sel, List* CopyVars); @@ -454,7 +447,7 @@ public: public: /* Pack template function. */ template - void PackT(_in_ const bool* sel); + void PackT(const bool* sel); /* Optimize template function. */ template @@ -510,7 +503,7 @@ inline ScalarValue ScalarVector::DatumToScalarT(Datum datumVal, bool isNull) DBG_ASSERT(datumType != InvalidOid); if (!isNull) { - if (COL_IS_ENCODE_T()) { + if (COL_IS_ENCODE(datumType)) { switch (datumType) { case MACADDROID: val = DatumFixLenToScalar(datumVal, 6); diff --git a/src/lib/CMakeLists.txt b/src/lib/CMakeLists.txt index a3960cb5f..d30c9a68f 100755 --- a/src/lib/CMakeLists.txt +++ b/src/lib/CMakeLists.txt @@ -14,6 +14,8 @@ set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/build_query ${CMAKE_CURRENT_SOURCE_DIR}/config ${CMAKE_CURRENT_SOURCE_DIR}/pgcommon + ${CMAKE_CURRENT_SOURCE_DIR}/cm_common + ${CMAKE_CURRENT_SOURCE_DIR}/cm_communication ) add_subdirectory(gstrace) @@ -23,4 +25,6 @@ add_subdirectory(alarm) add_subdirectory(build_query) add_subdirectory(config) add_subdirectory(pgcommon) +add_subdirectory(cm_common) +add_subdirectory(cm_communication) diff --git a/src/lib/Makefile b/src/lib/Makefile index b003a0cce..8f0c8021b 100644 --- a/src/lib/Makefile +++ b/src/lib/Makefile @@ -26,7 +26,7 @@ subdir = src/lib top_builddir = ../.. include $(top_builddir)/src/Makefile.global -SUBDIRS = build_query config pgcommon alarm gstrace hotpatch +SUBDIRS = build_query config pgcommon alarm gstrace hotpatch cm_common cm_communication $(recurse) diff --git a/src/lib/alarm/CMakeLists.txt b/src/lib/alarm/CMakeLists.txt index 76e6f6719..f025138d8 100755 --- a/src/lib/alarm/CMakeLists.txt +++ b/src/lib/alarm/CMakeLists.txt @@ -7,7 +7,7 @@ set(TGT_alarmclient_INC ) set(alarmclient_DEF_OPTIONS ${MACRO_OPTIONS}) -set(alarmclient_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} -g3 -ggdb3 -lsecurec ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${LIB_SECURE_OPTIONS} ${CHECK_OPTIONS} -O3) +set(alarmclient_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} -g3 -ggdb3 -l${SECURE_C_CHECK} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${LIB_SECURE_OPTIONS} ${CHECK_OPTIONS} -O3) list(REMOVE_ITEM alarmclient_COMPILE_OPTIONS -O2) add_static_libtarget(alarmclient TGT_alarmclient_SRC TGT_alarmclient_INC "${alarmclient_DEF_OPTIONS}" "${alarmclient_COMPILE_OPTIONS}") target_link_directories(alarmclient_static PRIVATE ${SECURE_LIB_PATH}) diff --git a/src/lib/alarm/Makefile b/src/lib/alarm/Makefile index 5b62dc02e..74f6635a2 100644 --- a/src/lib/alarm/Makefile +++ b/src/lib/alarm/Makefile @@ -24,7 +24,7 @@ top_builddir=../../.. include $(top_builddir)/src/Makefile.global subdir = src/alarm/client -override CPPFLAGS := -I. -I../common/ -I$(top_srcdir)/../Platform/N9000_RPC_Lib/ $(CPPFLAGS) -g3 -ggdb3 -O3 -lsecurec +override CPPFLAGS := -I. -I../common/ -I$(top_srcdir)/../Platform/N9000_RPC_Lib/ $(CPPFLAGS) -g3 -ggdb3 -O3 -l$(SECURE_C_CHECK) LDFLAGS += $(top_srcdir)/../Platform/N9000_RPC_Lib/ include $(top_srcdir)/src/gausskernel/common.mk diff --git a/src/lib/alarm/alarm.cpp b/src/lib/alarm/alarm.cpp index db827bff3..c7f11aa0b 100644 --- a/src/lib/alarm/alarm.cpp +++ b/src/lib/alarm/alarm.cpp @@ -55,6 +55,8 @@ static char LogicClusterName[CLUSTER_NAME_LEN] = {0}; char* Alarm_component = NULL; THR_LOCAL int AlarmReportInterval = 10; +static int IP_LEN = 64; /* default ip len */ +static int AF_INET6_MAX_BITS = 128; /* ip mask bit */ // if report alarm succeed(component), return 0 #define ALARM_REPORT_SUCCEED 0 // if report alarm suppress(component), return 2 @@ -383,18 +385,33 @@ static void GetHostIP(const char* myHostName, char* myHostIP, unsigned int myHos { struct hostent* hp; errno_t rc = 0; + char* ipstr = NULL; + char ipv6[IP_LEN] = {0}; + char* result = NULL; hp = gethostbyname(myHostName); - if (NULL == hp) { - AlarmLog(ALM_LOG, "GET host IP by name failed.\n"); - } else { - char* ipstr = inet_ntoa(*((struct in_addr*)hp->h_addr)); - size_t len = (strlen(ipstr) < (myHostIPLen - 1)) ? strlen(ipstr) : (myHostIPLen - 1); - rc = memcpy_s(myHostIP, myHostIPLen, ipstr, len); - securec_check_c(rc, "\0", "\0"); - myHostIP[len] = '\0'; - AlarmLog(ALM_LOG, "Host IP: %s \n", myHostIP); + if (hp == NULL) { + hp = gethostbyname2(myHostName, AF_INET6); + if (hp == NULL) { + AlarmLog(ALM_LOG, "GET host IP by name failed.\n"); + return; + } } + + if (hp->h_addrtype == AF_INET) { + ipstr = inet_ntoa(*((struct in_addr*)hp->h_addr)); + } else if (hp->h_addrtype == AF_INET6) { + result = inet_net_ntop(AF_INET6, ((struct in6_addr*)hp->h_addr), AF_INET6_MAX_BITS, ipv6, IP_LEN); + if (result == NULL) { + AlarmLog(ALM_LOG, "inet_net_ntop failed, error: %d.\n", EAFNOSUPPORT); + } + ipstr = ipv6; + } + size_t len = (strlen(ipstr) < (myHostIPLen - 1)) ? strlen(ipstr) : (myHostIPLen - 1); + rc = memcpy_s(myHostIP, myHostIPLen, ipstr, len); + securec_check_c(rc, "\0", "\0"); + myHostIP[len] = '\0'; + AlarmLog(ALM_LOG, "Host IP: %s \n", myHostIP); } static void GetClusterName(char* clusterName, unsigned int clusterNameLen) diff --git a/src/lib/alarm/alarmItem.conf b/src/lib/alarm/alarmItem.conf index befb8f1a2..081b18daf 100755 --- a/src/lib/alarm/alarmItem.conf +++ b/src/lib/alarm/alarmItem.conf @@ -52,6 +52,9 @@ 1078919263 DNReduceSyncList DNִн Datanode %s reduce sync list. ݿʵ%sִн minor 1078919264 DNIncreaseSyncList DNִ Datanode %s increase sync list. ݿʵ%sִ minor 1078919265 PgxcNodeMismatch pgxc_nodeһ Coordinator %s pgxc_node mismatch. CNʵ%s pgxc_nodeһ critical +1078919280 StreamingDisasterRecoveryCnDisconnected ֱȺcn In streaming standby cluster,cn is disconnected from cn %s of the primary cluster. ֱȺCNʵȺCNʵ%s critical +1078919281 StreamingDisasterRecoveryDnDisconnected ֱȺdn In streaming standby cluster,dn %s is disconnected from Corresponding Shard dn of the primary cluster. ֱȺDNʵ%sȺӦƬDNʵ critical + #used for om_monitor alarm_component = /opt/huawei/snas/bin/snas_cm_cmd diff --git a/src/lib/cm_common/CMakeLists.txt b/src/lib/cm_common/CMakeLists.txt new file mode 100644 index 000000000..c32c039d1 --- /dev/null +++ b/src/lib/cm_common/CMakeLists.txt @@ -0,0 +1,35 @@ +#This is the main CMAKE for build all components. +# libcmpq.a +execute_process( + COMMAND perl generate-cm-errcodes.pl cm_errcodes.txt + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + OUTPUT_FILE cm_errcodes.h +) +execute_process(COMMAND ln -fs ${CMAKE_CURRENT_SOURCE_DIR}/cm_errcodes.h ${PROJECT_SRC_DIR}/include/cm/cm_errcodes.h) + +AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} TGT_cmcommon_SRC) + +set(TGT_cmcommon_INC + ${PROJECT_SRC_DIR}/include/cm + ${PROJECT_SRC_DIR}/common/interfaces/libpq + ${PROJECT_TRUNK_DIR}/distribute/cm/cm_etcdapi + ${PROJECT_TRUNK_DIR}/distribute/include + ${LIBCGROUP_INCLUDE_PATH} +) + +set(cmcommon_DEF_OPTIONS ${MACRO_OPTIONS}) +set(cmcommon_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${LIB_SECURE_OPTIONS} ${CHECK_OPTIONS} -fstack-protector-strong) +list(REMOVE_ITEM cmcommon_COMPILE_OPTIONS -fstack-protector) +set(cmcommon_LINK_OPTIONS ${LIB_LINK_OPTIONS}) +list(REMOVE_ITEM cmcommon_LINK_OPTIONS -pthread) +add_shared_libtarget(cmcommon TGT_cmcommon_SRC TGT_cmcommon_INC "" "${cmcommon_COMPILE_OPTIONS}" "${cmcommon_LINK_OPTIONS}") +target_link_libraries(cmcommon PRIVATE -lcgroup) +target_link_directories(cmcommon PUBLIC + ${LIBCGROUP_LIB_PATH} +) +set_target_properties(cmcommon PROPERTIES VERSION 2) +install(TARGETS cmcommon DESTINATION lib) + +add_static_libtarget(cmcommon TGT_cmcommon_SRC TGT_cmcommon_INC "${cmcommon_DEF_OPTIONS}" "${cmcommon_COMPILE_OPTIONS}") +install(TARGETS cmcommon_static ARCHIVE DESTINATION lib) + diff --git a/src/lib/cm_common/Makefile b/src/lib/cm_common/Makefile new file mode 100644 index 000000000..1d78f721b --- /dev/null +++ b/src/lib/cm_common/Makefile @@ -0,0 +1,76 @@ +#---------------------------------------------------------------------------- +# +# openGauss CM common makefile +# +# Copyright(c) 2010-2012 Postgres-XC Development Group +# +# distribute/cm/cm_common/Makefile +# +#----------------------------------------------------------------------------- +top_builddir = ../../.. +include $(top_builddir)/src/Makefile.global +subdir = src/lib/cm_common + +VERSION = 2 + +override CFLAGS += -fstack-protector-strong -Wl,-z,relro,-z,now +override CPPFLAGS := -I. -I$(libpq_srcdir) -I$(LIBCGROUP_INCLUDE_PATH) $(CPPFLAGS) -L$(LIBCGROUP_LIB_PATH) -lcgroup +LIBS += $(PTHREAD_LIBS) + +override CPPFLAGS := $(filter-out -fPIE, $(CPPFLAGS)) -fPIC +override CFLAGS := $(filter-out -fPIE, $(CFLAGS)) -fPIC +override CPPFLAGS += -I$(top_builddir)/src/include/cm -I$(top_builddir)/../distribute/include/ -I$(top_builddir)/../distribute/include/cm -L$(top_builddir)/../distribute/cm/cm_etcdapi + +include $(top_srcdir)/src/gausskernel/common.mk + +ifneq "$(MAKECMDGOALS)" "clean" + ifneq "$(MAKECMDGOALS)" "distclean" + ifneq "$(shell which g++ |grep hutaf_llt |wc -l)" "1" + -include $(DEPEND) + endif + endif +endif +OBJS = cm_elog.o be_module.o cm_stringinfo.o cm_misc.o cm_cgroup.o cm_path.o + +ETCDAPI=$(top_builddir)/../distribute/cm/cm_etcdapi/libetcdapi.so + +.NOTPARALLEL: +all: cm_errcodes.h libcmcommon.a libcmcommon.so + +cm_errcodes.h: cm_errcodes.txt + $(PERL) generate-cm-errcodes.pl $< > $@ + rm -rf $(top_builddir)/src/include/cm/cm_errcodes.h + $(LN_S) $(top_builddir)/src/lib/cm_common/cm_errcodes.h $(top_builddir)/src/include/cm/cm_errcodes.h + +libcmcommon.so: + $(CC) -fPIC -shared $(CFLAGS) $(CPPFLAGS) cm_elog.cpp cm_stringinfo.cpp cm_misc.cpp cm_cgroup.cpp cm_path.cpp be_module.cpp -o libcmcommon.so.$(VERSION) + rm -f libcmcommon.so && \ + ln -sf libcmcommon.so.$(VERSION) libcmcommon.so + +libcmcommon.a: $(OBJS) + $(AR) $(AROPT) $@ $^ + +clean: + rm -f $(OBJS) libcmcommon.a libcmcommon.so libcmcommon.so.$(VERSION) *.depend + rm -rf cm_errcodes.h + rm -rf $(top_builddir)/src/include/cm/cm_errcodes.h + +install: all installdirs + $(INSTALL_STLIB) libcmcommon.a '$(DESTDIR)$(libdir)/libcmcommon.a' + $(INSTALL_STLIB) libcmcommon.so.$(VERSION) '$(DESTDIR)$(libdir)/libcmcommon.so.$(VERSION)' + cd '$(DESTDIR)$(libdir)' && \ + rm -f libcmcommon.so && \ + ln -sf libcmcommon.so.$(VERSION) libcmcommon.so + +installdirs: + $(MKDIR_P) '$(DESTDIR)$(libdir)' + +uninstall: + rm -f '$(DESTDIR)$(libdir)/libcmcommon.a' + +$(top_builddir)/../distribute/cm/cm_etcdapi/libetcdapi.so: + $(MAKE) -C $(top_builddir)/../distribute/cm/cm_etcdapi libetcdapi.so + +distclean: clean + +maintainer-clean: distclean diff --git a/src/lib/cm_common/be_module.cpp b/src/lib/cm_common/be_module.cpp new file mode 100644 index 000000000..d97afdbe9 --- /dev/null +++ b/src/lib/cm_common/be_module.cpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * ------------------------------------------------------------------------- + * + * be_module.cpp + * + * IDENTIFICATION + * src/distribute/cm/cm_common/be_module.cpp + * + * ------------------------------------------------------------------------- + */ +#include "cm/be_module.h" + +const module_data module_map[] = { + {MOD_ALL, "ALL"}, + /* add your module name following */ + /* add your module name above */ + {MOD_CMA, "CMAGENT"}, + {MOD_CMS, "CMSERVER"}, + {MOD_CMCTL, "CMCTL"}, + {MOD_OMMONITER, "OMMONITER"}, + {MOD_MAX, "BACKEND"} +}; diff --git a/src/lib/cm_common/cm_cgroup.cpp b/src/lib/cm_common/cm_cgroup.cpp new file mode 100644 index 000000000..dd3ce91b8 --- /dev/null +++ b/src/lib/cm_common/cm_cgroup.cpp @@ -0,0 +1,233 @@ +/** + * @file cm_cgroup.cpp + * @brief + * @author xxx + * @version 1.0 + * @date 2020-08-06 + * + * @copyright Copyright (c) Huawei Technologies Co., Ltd. 2011-2020. All rights reserved. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cm/elog.h" +#include "cm/cm_cgroup.h" +#include "securec.h" + +#define GSCGROUP_CM "CM" +#define GPNAME_PATH_LEN 1024 + +static struct cgroup* gscgroup_get_cgroup(const char* relpath); + +typedef int errno_t; + +/* + * function name: gscgroup_cm_init + * description : This function return a pointer of cm cgroup path + * if initialization were successful or else return NULL. + * Note : Please note,the caller must free the return value. + */ + +char* gscgroup_cm_init() +{ + int ret = 0; + errno_t rc; + + struct mntent* ent = NULL; + char* mntent_buffer = NULL; + struct mntent temp_ent = {0}; + + char abspath[GPNAME_PATH_LEN] = {0}; + char* relpath = NULL; + size_t mnt_dir_len = 0; + size_t relpath_len; + + struct passwd* pw = NULL; + struct stat buf = {0}; + + write_runlog(LOG, "starting: gscgroup_cm_init().\n"); + ret = cgroup_init(); + if (ret) { + write_runlog(WARNING, "Cgroup initialization failed: %s.\n", cgroup_strerror(ret)); + return NULL; + } + + write_runlog(LOG, "finished: cgroup_init().\n"); + /* open '/proc/mounts' to load mount points */ + FILE* proc_mount = fopen("/proc/mounts", "re"); + if (proc_mount == NULL) { + goto error; + } + + /* The buffer is too big, so it can not be stored in the stack space. */ + mntent_buffer = (char*)malloc(4 * FILENAME_MAX); + if (mntent_buffer == NULL) { + write_runlog(ERROR, "Failed to allocate memory: Out of memory. RequestSize=%d.\n", 4 * FILENAME_MAX); + goto error; + } + + while ((ent = getmntent_r(proc_mount, &temp_ent, mntent_buffer, 4 * FILENAME_MAX)) != NULL) { + if (strcmp(ent->mnt_type, "cgroup") != 0 || strstr(ent->mnt_opts, "cpu") == NULL) { + continue; + } else { + /* get the cm cgroup abspath */ + pw = getpwuid(getuid()); + if (pw == NULL) { + goto error; + } + + mnt_dir_len = strlen(ent->mnt_dir); + rc = sprintf_s(abspath, GPNAME_PATH_LEN, "%s/%s:%s", ent->mnt_dir, GSCGROUP_CM, pw->pw_name); + if (rc == -1) { + goto error; + } + + break; + } + } + + if (stat(abspath, &buf) != 0) { + write_runlog(WARNING, "can not get cm cgroup dir path at %s.\n", abspath); + goto error; + } + + relpath_len = strlen(abspath) - mnt_dir_len; + relpath = (char*)malloc(relpath_len); + if (relpath == NULL) { + goto error; + } + + /* eg. 'mnt_dir_len + 1' may be '/dev/cgroup/cpu/' , + * so we should plus 1 that represents '/'. + */ + rc = strcpy_s(relpath, relpath_len, abspath + mnt_dir_len + 1); + if (rc == -1) { + write_runlog(WARNING, "can not strcpy cgroup relpath from abspath. abspath is %s.\n", abspath); + goto error; + } + if (proc_mount != NULL) { + fclose(proc_mount); + } + if (mntent_buffer != NULL) { + free(mntent_buffer); + } + + write_runlog(LOG, "get cm cgroup relpath succeed, this path is %s.\n", relpath); + return relpath; + +error: + if (proc_mount != NULL) { + fclose(proc_mount); + } + if (relpath != NULL) { + free(relpath); + } + if (mntent_buffer != NULL) { + free(mntent_buffer); + } + + write_runlog(WARNING, "CM cgroup initilization failed.\n"); + return NULL; +} + +/* + * function name: gscgroup_cm_attach_task + * description : attach cm process into a specified cgroup. + */ +void gscgroup_cm_attach_task(const char* relpath) +{ + int ret; + struct cgroup* cg = NULL; + + /* get the cgroup structure */ + cg = gscgroup_get_cgroup(relpath); + + if (NULL == cg) { + write_runlog(WARNING, "can not get cgroup from relpath, relpath is %s.\n", relpath); + return; + } else { + /* attach current thread into the cgroup */ + ret = cgroup_attach_task(cg); + if (ret != 0) { + write_runlog(WARNING, + "Cgroup failed to attach " + "into \"%s\" group: %s(%d).\n", + relpath, + cgroup_strerror(ret), + ret); + } + } + + cgroup_free(&cg); +} + +/* + * function name: gscgroup_cm_attach_task_pid + * description : attach a process into a specified cgroup. + */ +void gscgroup_cm_attach_task_pid(const char* relpath, pid_t tid) +{ + int ret; + struct cgroup* cg = NULL; + + /* get the cgroup structure */ + cg = gscgroup_get_cgroup(relpath); + + if (NULL == cg) { + write_runlog(WARNING, "can not get cgroup from relpath, relpath is %s.\n", relpath); + } else { + /* attach current thread into the cgroup */ + ret = cgroup_attach_task_pid(cg, tid); + if (ret != 0) { + write_runlog(WARNING, + "Cgroup failed to attach " + "into \"%s\" group: %s(%d).\n", + relpath, + cgroup_strerror(ret), + ret); + } + } + + cgroup_free(&cg); +} + +/* + * function name: gscgroup_get_cgroup + * description : retrieve cgroup structure based on relative path + * arguments : + * relpath: the relative path of cgroup + * return value : + * NULL: abnormal + * other: normal + * + */ +static struct cgroup* gscgroup_get_cgroup(const char* relpath) +{ + int ret = 0; + struct cgroup* cg = NULL; + + /* allocate new cgroup structure */ + cg = cgroup_new_cgroup(relpath); + if (cg == NULL) { + write_runlog(WARNING, "Cgroup %s failed to call cgroup_new_cgroup.\n", relpath); + return NULL; + } + + /* get all information regarding the cgroup from kernel */ + ret = cgroup_get_cgroup(cg); + if (ret != 0) { + write_runlog(WARNING, "Cgroup get_cgroup %s information: %s(%d).\n", relpath, cgroup_strerror(ret), ret); + cgroup_free(&cg); + return NULL; + } + + return cg; +} diff --git a/src/lib/cm_common/cm_elog.cpp b/src/lib/cm_common/cm_elog.cpp new file mode 100644 index 000000000..776d57b26 --- /dev/null +++ b/src/lib/cm_common/cm_elog.cpp @@ -0,0 +1,1783 @@ +/** + * @file cm_elog.cpp + * @brief error logging and reporting + * @author xxx + * @version 1.0 + * @date 2020-08-06 + * + * @copyright Copyright (c) Huawei Technologies Co., Ltd. 2011-2020. All rights reserved. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include "cm/path.h" + +#include "cm/cm_c.h" +#include "cm/stringinfo.h" +#include "cm/elog.h" +#include "alarm/alarm.h" +#include "cm/cm_misc.h" +#include "cm/be_module.h" + +#include +#if !defined(WIN32) +#include +#define gettid() syscall(__NR_gettid) +#else +/* windows. */ +#endif +#include + +#undef _ +#define _(x) x + +/* + * We really want line-buffered mode for logfile output, but Windows does + * not have it, and interprets _IOLBF as _IOFBF (bozos). So use _IONBF + * instead on Windows. + */ +#ifdef WIN32 +#define LBF_MODE _IONBF +#else +#define LBF_MODE _IOLBF +#endif + +int log_destion_choice = LOG_DESTION_FILE; + +/* declare the global variable of alarm module. */ +int g_alarmReportInterval; +char g_alarmComponentPath[MAXPGPATH]; +int g_alarmReportMaxCount; + +char sys_log_path[MAX_PATH_LEN] = {0}; /* defalut cmData/cm_server or cmData/cm_agent. */ +char cm_krb_server_keyfile[MAX_PATH_LEN] = {0}; +int Log_RotationSize = 16 * 1024 * 1024L; +pthread_rwlock_t syslog_write_lock; +pthread_rwlock_t dotCount_lock; +static bool dotCountNotZero = false; + +FILE* syslogFile = NULL; +const char* prefix_name = NULL; + +char curLogFileName[MAXPGPATH] = {0}; +volatile int log_min_messages = WARNING; +volatile bool incremental_build = true; +volatile bool security_mode = false; +volatile int maxLogFileSize = 16 * 1024 * 1024; +volatile bool logInitFlag = false; +/* undocumentedVersion: + * It's for inplace upgrading. This variable means which version we are + * upgrading from. Zero means we are not upgrading. + */ +volatile uint32 undocumentedVersion = 0; +bool log_file_set = false; +/* unify log style */ +THR_LOCAL const char* thread_name = NULL; + +FILE* logfile_open(const char* filename, const char* mode); +static void get_alarm_report_interval(const char* conf); +static void TrimPathDoubleEndQuotes(char* path); + +#define BUF_LEN 1024 +#define COUNTSTR_LEN 128 +#define MSBUF_LENGTH 8 +#define FORMATTED_TS_LEN 128 + +static THR_LOCAL char errbuf_errdetail[EREPORT_BUF_LEN]; +static THR_LOCAL char errbuf_errcode[EREPORT_BUF_LEN]; +static THR_LOCAL char errbuf_errmodule[EREPORT_BUF_LEN]; +static THR_LOCAL char errbuf_errmsg[EREPORT_BUF_LEN]; +static THR_LOCAL char errbuf_errcause[EREPORT_BUF_LEN]; +static THR_LOCAL char errbuf_erraction[EREPORT_BUF_LEN]; + +static THR_LOCAL char formatted_log_time[FORMATTED_TS_LEN]; + +/** + * @brief When a parent process opens a file, the child processes will inherit the + * file handle of the parent process. If the file is deleted and the child processes + * are still running, the file handle will not be freed and take up disk space. + * We set the FD_CLOEXEC flag to the file, so that the child processes don't inherit + * the file handle of the parent process, and do not cause handle leak. + * + * @param fp open file object + * @return int 0 means successfully set the flag. + */ +int SetFdCloseExecFlag(FILE* fp) +{ + int fd = fileno(fp); + int flags = fcntl(fd, F_GETFD); + if (flags < 0) { + (void)printf("fcntl get flags failed.\n"); + return flags; + } + flags |= FD_CLOEXEC; + int ret = fcntl(fd, F_SETFD, flags); + if (ret == -1) { + (void)printf("fcntl set flags failed.\n"); + } + + return ret; +} + +void AlarmLogImplementation(int level, const char* prefix, const char* logtext) +{ + switch (level) { + case ALM_DEBUG: + write_runlog(LOG, "%s%s\n", prefix, logtext); + break; + case ALM_LOG: + write_runlog(LOG, "%s%s\n", prefix, logtext); + break; + default: + break; + } +} + +/* + * setup formatted_log_time, for consistent times between CSV and regular logs + */ +static void setup_formatted_log_time(void) +{ + struct timeval tv = {0}; + time_t stamp_time; + char msbuf[MSBUF_LENGTH]; + struct tm timeinfo = {0}; + int rc; + errno_t rcs; + + (void)gettimeofday(&tv, NULL); + stamp_time = (time_t)tv.tv_sec; + (void)localtime_r(&stamp_time, &timeinfo); + + (void)strftime(formatted_log_time, + FORMATTED_TS_LEN, + /* leave room for milliseconds... */ + "%Y-%m-%d %H:%M:%S %Z", + &timeinfo); + + /* 'paste' milliseconds into place... */ + rc = sprintf_s(msbuf, MSBUF_LENGTH, ".%03d", (int)(tv.tv_usec / 1000)); + securec_check_ss_c(rc, "\0", "\0"); + rcs = strncpy_s(formatted_log_time + 19, FORMATTED_TS_LEN - 19, msbuf, 4); + securec_check_c(rcs, "\0", "\0"); +} + +void add_log_prefix(int elevel, char* str) +{ + char errbuf_tmp[BUF_LEN * 3] = {0}; + errno_t rc; + int rcs; + + setup_formatted_log_time(); + + /* unify log style */ + if (thread_name == NULL) { + thread_name = ""; + } + rcs = snprintf_s(errbuf_tmp, + sizeof(errbuf_tmp), + sizeof(errbuf_tmp) - 1, + "%s tid=%ld %s %s: ", + formatted_log_time, + gettid(), + thread_name, + log_level_int_to_string(elevel)); + securec_check_intval(rcs, ); + /* max message length less than 2048. */ + rc = strncat_s(errbuf_tmp, BUF_LEN * 3, str, BUF_LEN * 3 - strlen(errbuf_tmp)); + + securec_check_c(rc, "\0", "\0"); + rc = memcpy_s(str, BUF_LEN * 2, errbuf_tmp, BUF_LEN * 2 - 1); + securec_check_c(rc, "\0", "\0"); + str[BUF_LEN * 2 - 1] = '\0'; +} + +/* + * is_log_level_output -- is elevel logically >= log_min_level? + * + * We use this for tests that should consider LOG to sort out-of-order, + * between ERROR and FATAL. Generally this is the right thing for testing + * whether a message should go to the postmaster log, whereas a simple >= + * test is correct for testing whether the message should go to the client. + */ +static bool is_log_level_output(int elevel, int log_min_level) +{ + if (elevel == LOG) { + if (log_min_level == LOG || log_min_level <= ERROR) { + return true; + } + } else if (log_min_level == LOG) { + /* elevel not equal to LOG */ + if (elevel >= FATAL) + return true; + } else if (elevel >= log_min_level) { + /* Neither is LOG */ + return true; + } + + return false; +} + +/* + * Write errors to stderr (or by equal means when stderr is + * not available). + */ +void write_runlog(int elevel, const char* fmt, ...) +{ + va_list ap; + va_list bp; + char errbuf[2048] = {0}; + char fmtBuffer[2048] = {0}; + int count = 0; + int ret = 0; + bool output_to_server = false; + + /* Get whether the record will be logged into the file. */ + output_to_server = is_log_level_output(elevel, log_min_messages); + if (!output_to_server) { + return; + } + + /* Obtaining international texts. */ + fmt = _(fmt); + + va_start(ap, fmt); + + if (prefix_name != NULL && strcmp(prefix_name, "cm_ctl") == 0) { + /* Skip the wait dot log and the line break log. */ + if (strcmp(fmt, ".") == 0) { + (void)pthread_rwlock_wrlock(&dotCount_lock); + dotCountNotZero = true; + (void)pthread_rwlock_unlock(&dotCount_lock); + (void)vfprintf(stdout, fmt, ap); + (void)fflush(stdout); + va_end(ap); + return; + } + + /** + * Log the record to std error. + * 1. The log level is greater than the level "LOG", and the process name is "cm_ctl". + * 2. The log file path was not initialized. + */ + if (elevel >= LOG || sys_log_path[0] == '\0') { + if (dotCountNotZero == true) { + fprintf(stdout, "\n"); + (void)pthread_rwlock_wrlock(&dotCount_lock); + dotCountNotZero = false; + (void)pthread_rwlock_unlock(&dotCount_lock); + } + + /* Get the print out format. */ + ret = snprintf_s(fmtBuffer, sizeof(fmtBuffer), sizeof(fmtBuffer) - 1, "%s: %s", prefix_name, fmt); + securec_check_ss_c(ret, "\0", "\0"); + va_copy(bp, ap); + (void)vfprintf(stdout, fmtBuffer, bp); + (void)fflush(stdout); + va_end(bp); + } + } + + /* Format the log record. */ + count = vsnprintf_s(errbuf, sizeof(errbuf), sizeof(errbuf) - 1, fmt, ap); + va_end(ap); + + switch (log_destion_choice) { + case LOG_DESTION_FILE: + add_log_prefix(elevel, errbuf); + write_log_file(errbuf, count); + break; + + default: + break; + } +} + +int add_message_string(char* errmsg_tmp, char* errdetail_tmp, char* errmodule_tmp, char* errcode_tmp, const char* fmt) +{ + int rcs = 0; + char *p = NULL; + char errbuf_tmp[BUF_LEN] = {0}; + + rcs = snprintf_s(errbuf_tmp, sizeof(errbuf_tmp), sizeof(errbuf_tmp) - 1, "%s", fmt); + securec_check_intval(rcs, ); + if ((p = strstr(errbuf_tmp, "[ERRMSG]:")) != NULL) { + rcs = snprintf_s(errmsg_tmp, BUF_LEN, BUF_LEN - 1, "%s", fmt + strlen("[ERRMSG]:")); + } else if ((p = strstr(errbuf_tmp, "[ERRDETAIL]:")) != NULL) { + rcs = snprintf_s(errdetail_tmp, BUF_LEN, BUF_LEN - 1, "%s", fmt); + } else if ((p = strstr(errbuf_tmp, "[ERRMODULE]:")) != NULL) { + rcs = snprintf_s(errmodule_tmp, BUF_LEN, BUF_LEN - 1, "%s", fmt + strlen("[ERRMODULE]:")); + } else if ((p = strstr(errbuf_tmp, "[ERRCODE]:")) != NULL) { + rcs = snprintf_s(errcode_tmp, BUF_LEN, BUF_LEN - 1, "%s", fmt + strlen("[ERRCODE]:")); + } + securec_check_intval(rcs, ); + return 0; +} + +int add_message_string(char* errmsg_tmp, char* errdetail_tmp, char* errmodule_tmp, char* errcode_tmp, + char* errcause_tmp, char* erraction_tmp, const char* fmt) +{ + int rcs = 0; + char *p = NULL; + char errbuf_tmp[BUF_LEN] = {0}; + + rcs = snprintf_s(errbuf_tmp, sizeof(errbuf_tmp), sizeof(errbuf_tmp) - 1, "%s", fmt); + securec_check_intval(rcs, ); + if ((p = strstr(errbuf_tmp, "[ERRMSG]:")) != NULL) { + rcs = snprintf_s(errmsg_tmp, BUF_LEN, BUF_LEN - 1, "%s", fmt + strlen("[ERRMSG]:")); + } else if ((p = strstr(errbuf_tmp, "[ERRDETAIL]:")) != NULL) { + rcs = snprintf_s(errdetail_tmp, BUF_LEN, BUF_LEN - 1, "%s", fmt); + } else if ((p = strstr(errbuf_tmp, "[ERRMODULE]:")) != NULL) { + rcs = snprintf_s(errmodule_tmp, BUF_LEN, BUF_LEN - 1, "%s", fmt + strlen("[ERRMODULE]:")); + } else if ((p = strstr(errbuf_tmp, "[ERRCODE]:")) != NULL) { + rcs = snprintf_s(errcode_tmp, BUF_LEN, BUF_LEN - 1, "%s", fmt + strlen("[ERRCODE]:")); + } else if ((p = strstr(errbuf_tmp, "[ERRCAUSE]:")) != NULL) { + rcs = snprintf_s(errcause_tmp, BUF_LEN, BUF_LEN - 1, "%s", fmt); + } else if ((p = strstr(errbuf_tmp, "[ERRACTION]:")) != NULL) { + rcs = snprintf_s(erraction_tmp, BUF_LEN, BUF_LEN - 1, "%s", fmt); + } + securec_check_intval(rcs, ); + return 0; +} + + +void add_log_prefix2(int elevel, const char* errmodule_tmp, const char* errcode_tmp, char* str) +{ + char errbuf_tmp[BUF_LEN * 3] = {0}; + errno_t rc; + int rcs; + + setup_formatted_log_time(); + + /* unify log style */ + if (thread_name == NULL) { + thread_name = ""; + } + if (errmodule_tmp[0] && errcode_tmp[0]) { + rcs = snprintf_s(errbuf_tmp, + sizeof(errbuf_tmp), + sizeof(errbuf_tmp) - 1, + "%s tid=%ld %s [%s] %s %s: ", + formatted_log_time, + gettid(), + thread_name, + errmodule_tmp, + errcode_tmp, + log_level_int_to_string(elevel)); + } else { + rcs = snprintf_s(errbuf_tmp, + sizeof(errbuf_tmp), + sizeof(errbuf_tmp) - 1, + "%s tid=%ld %s %s: ", + formatted_log_time, + gettid(), + thread_name, + log_level_int_to_string(elevel)); + } + securec_check_intval(rcs, ); + /* max message length less than 2048. */ + rc = strncat_s(errbuf_tmp, BUF_LEN * 3, str, BUF_LEN * 3 - strlen(errbuf_tmp)); + + securec_check_c(rc, "\0", "\0"); + rc = memcpy_s(str, BUF_LEN * 2, errbuf_tmp, BUF_LEN * 2 - 1); + securec_check_c(rc, "\0", "\0"); + str[BUF_LEN * 2 - 1] = '\0'; +} + +/* + * Write errors to stderr (or by equal means when stderr is + * not available). + */ +void write_runlog3(int elevel, const char* errmodule_tmp, const char* errcode_tmp, const char* fmt, ...) +{ + va_list ap; + va_list bp; + char errbuf[2048] = {0}; + char fmtBuffer[2048] = {0}; + int count = 0; + int ret = 0; + bool output_to_server = false; + + /* Get whether the record will be logged into the file. */ + output_to_server = is_log_level_output(elevel, log_min_messages); + if (!output_to_server) { + return; + } + + /* Obtaining international texts. */ + fmt = _(fmt); + + va_start(ap, fmt); + + if (prefix_name != NULL && strcmp(prefix_name, "cm_ctl") == 0) { + /* Skip the wait dot log and the line break log. */ + if (strcmp(fmt, ".") == 0) { + (void)pthread_rwlock_wrlock(&dotCount_lock); + dotCountNotZero = true; + (void)pthread_rwlock_unlock(&dotCount_lock); + (void)vfprintf(stdout, fmt, ap); + (void)fflush(stdout); + va_end(ap); + return; + } + + /** + * Log the record to std error. + * 1. The log level is greater than the level "LOG", and the process name is "cm_ctl". + * 2. The log file path was not initialized. + */ + if (elevel >= LOG || sys_log_path[0] == '\0') { + if (dotCountNotZero == true) { + fprintf(stdout, "\n"); + (void)pthread_rwlock_wrlock(&dotCount_lock); + dotCountNotZero = false; + (void)pthread_rwlock_unlock(&dotCount_lock); + } + + /* Get the print out format. */ + ret = snprintf_s(fmtBuffer, sizeof(fmtBuffer), sizeof(fmtBuffer) - 1, "%s: %s", prefix_name, fmt); + securec_check_intval(ret, ); + va_copy(bp, ap); + (void)vfprintf(stdout, fmtBuffer, bp); + (void)fflush(stdout); + va_end(bp); + } + } + + /* Format the log record. */ + count = vsnprintf_s(errbuf, sizeof(errbuf), sizeof(errbuf) - 1, fmt, ap); + securec_check_intval(count, ); + va_end(ap); + + switch (log_destion_choice) { + case LOG_DESTION_FILE: + add_log_prefix2(elevel, errmodule_tmp, errcode_tmp, errbuf); + write_log_file(errbuf, count); + break; + + default: + break; + } +} + +/* + * Open a new logfile with proper permissions and buffering options. + * + * If allow_errors is true, we just log any open failure and return NULL + * (with errno still correct for the fopen failure). + * Otherwise, errors are treated as fatal. + */ +FILE* logfile_open(const char* log_path, const char* mode) +{ + FILE* fh = NULL; + mode_t oumask; + char log_file_name[MAXPGPATH] = {0}; + char log_temp_name[MAXPGPATH] = {0}; + char log_create_time[LOG_MAX_TIMELEN] = {0}; + DIR* dir = NULL; + struct dirent* de = NULL; + bool is_exist = false; + pg_time_t current_time; + struct tm* systm = NULL; + /* check validity of current log file name */ + char* name_ptr = NULL; + errno_t rc = 0; + int ret = 0; + + if (log_path == NULL) { + (void)printf("logfile_open,log file path is null.\n"); + return NULL; + } + + /* + * Note we do not let Log_file_mode disable IWUSR, + * since we certainly want to be able to write the files ourselves. + */ + oumask = umask((mode_t)((~(mode_t)(S_IRUSR | S_IWUSR | S_IXUSR)) & (S_IRWXU | S_IRWXG | S_IRWXO))); + + /* find current log file. */ + if ((dir = opendir(log_path)) == NULL) { + printf(_("%s: opendir %s failed! \n"), prefix_name, log_path); + return NULL; + } + while ((de = readdir(dir)) != NULL) { + /* exist current log file. */ + if (strstr(de->d_name, prefix_name) != NULL) { + name_ptr = strstr(de->d_name, "-current.log"); + if (name_ptr != NULL) { + name_ptr += strlen("-current.log"); + if ((*name_ptr) == '\0') { + is_exist = true; + break; + } + } + } + } + + rc = memset_s(log_file_name, MAXPGPATH, 0, MAXPGPATH); + securec_check_errno(rc, ); + if (!is_exist) { + /* create current log file name. */ + current_time = time(NULL); + systm = localtime(¤t_time); + if (systm != NULL) { + (void)strftime(log_create_time, LOG_MAX_TIMELEN, "-%Y-%m-%d_%H%M%S", systm); + } + ret = + snprintf_s(log_temp_name, MAXPGPATH, MAXPGPATH - 1, "%s%s%s", prefix_name, log_create_time, curLogFileMark); + securec_check_intval(ret, ); + ret = snprintf_s(log_file_name, MAXPGPATH, MAXPGPATH - 1, "%s/%s", log_path, log_temp_name); + securec_check_intval(ret, ); + } else { + /* if log file exist, get its file name. */ + ret = snprintf_s(log_file_name, MAXPGPATH, MAXPGPATH - 1, "%s/%s", log_path, de->d_name); + securec_check_intval(ret, ); + } + (void)closedir(dir); + fh = fopen(log_file_name, mode); + + (void)umask(oumask); + + if (fh != NULL) { + (void)setvbuf(fh, NULL, LBF_MODE, 0); + +#ifdef WIN32 + /* use CRLF line endings on Windows */ + _setmode(_fileno(fh), _O_TEXT); +#endif + /* + * when parent process(cm_agent) open the cm_agent_xxx.log, the child processes(cn\dn\gtm\cm_server) + * inherit the file handle of the parent process. If the file is deleted and the child processes + * are still running, the file handle will not be freed, it will take up disk space, so we set + * the FD_CLOEXEC flag to the file, so that the child processes don't inherit the file handle of the + * parent process. + */ + if (SetFdCloseExecFlag(fh) == -1) { + (void)printf("set file flag failed, filename:%s, errmsg: %s.\n", log_file_name, strerror(errno)); + } + } else { + int save_errno = errno; + + (void)printf("logfile_open could not open log file:%s %s.\n", log_file_name, strerror(errno)); + errno = save_errno; + } + + /* store current log file name */ + rc = memset_s(curLogFileName, MAXPGPATH, 0, MAXPGPATH); + securec_check_errno(rc, ); + rc = strncpy_s(curLogFileName, MAXPGPATH, log_file_name, strlen(log_file_name)); + securec_check_errno(rc, ); + + return fh; +} + +int logfile_init() +{ + int rc; + errno_t rcs; + + rc = pthread_rwlock_init(&syslog_write_lock, NULL); + if (rc != 0) { + fprintf(stderr, "logfile_init lock failed.exit\n"); + exit(1); + } + rc = pthread_rwlock_init(&dotCount_lock, NULL); + if (rc != 0) { + fprintf(stderr, "logfile_init dot_count_lock failed.exit\n"); + exit(1); + } + rcs = memset_s(sys_log_path, MAX_PATH_LEN, 0, MAX_PATH_LEN); + securec_check_c(rcs, "\0", "\0"); + + return 0; +} + +int is_comment_line(const char* str) +{ + size_t ii = 0; + + if (str == NULL) { + printf("bad config file line\n"); + exit(1); + } + + /* skip blank */ + for (;;) { + if (*(str + ii) == ' ') { + ii++; /* skip blank */ + } else { + break; + } + } + + if (*(str + ii) == '#') { + return 1; /* comment line */ + } + + return 0; /* not comment line */ +} + +int get_authentication_type(const char* config_file) +{ + char buf[BUF_LEN]; + FILE* fd = NULL; + int type = CM_AUTH_TRUST; + + if (config_file == NULL) { + return CM_AUTH_TRUST; /* default level */ + } + + fd = fopen(config_file, "r"); + if (fd == NULL) { + char errBuffer[ERROR_LIMIT_LEN]; + printf("can not open config file: %s %s\n", config_file, pqStrerror(errno, errBuffer, ERROR_LIMIT_LEN)); + exit(1); + } + + while (!feof(fd)) { + errno_t rc; + rc = memset_s(buf, BUF_LEN, 0, BUF_LEN); + securec_check_c(rc, "\0", "\0"); + (void)fgets(buf, BUF_LEN, fd); + + if (is_comment_line(buf) == 1) { + continue; /* skip # comment */ + } + + if (strstr(buf, "cm_auth_method") != NULL) { + /* check all lines */ + if (strstr(buf, "trust") != NULL) { + type = CM_AUTH_TRUST; + } + + if (strstr(buf, "gss") != NULL) { + type = CM_AUTH_GSS; + } + } + } + + fclose(fd); + return type; +} + +/* trim successive characters on both ends */ +static char* TrimToken(char* src, const char& delim) +{ + char* s = 0; + char* e = 0; + char* c = 0; + + for (c = src; (c != NULL) && *c; ++c) { + if (*c == delim) { + if (e == NULL) { + e = c; + } + } else { + if (s == NULL) { + s = c; + } + e = NULL; + } + } + + if (s == NULL) { + s = src; + } + + if (e != NULL) { + *e = 0; + } + + return s; +} + +static void TrimPathDoubleEndQuotes(char* path) +{ + int pathLen = strlen(path); + + /* make sure buf[MAXPGPATH] can copy the whole path, last '\0' included */ + if (pathLen > MAXPGPATH - 1) { + return; + } + + char* pathTrimed = NULL; + pathTrimed = TrimToken(path, '\''); + pathTrimed = TrimToken(pathTrimed, '\"'); + + char buf[MAXPGPATH] = {0}; + errno_t rc = 0; + + rc = strncpy_s(buf, MAXPGPATH, pathTrimed, strlen(pathTrimed)); + securec_check_errno(rc, ); + + rc = strncpy_s(path, pathLen + 1, buf, strlen(buf)); + securec_check_errno(rc, ); +} + +void get_krb_server_keyfile(const char* config_file) +{ + char buf[MAXPGPATH]; + FILE* fd = NULL; + + int ii = 0; + + char* subStr = NULL; + char* subStr1 = NULL; + char* subStr2 = NULL; + char* subStr3 = NULL; + + char* saveptr1 = NULL; + char* saveptr2 = NULL; + char* saveptr3 = NULL; + errno_t rc = 0; + + if (config_file == NULL) { + return; + } else { + logInitFlag = true; + } + + fd = fopen(config_file, "r"); + if (fd == NULL) { + printf("get_krb_server_keyfile confDir error\n"); + exit(1); + } + + while (!feof(fd)) { + rc = memset_s(buf, MAXPGPATH, 0, MAXPGPATH); + securec_check_errno(rc, ); + + (void)fgets(buf, MAXPGPATH, fd); + buf[MAXPGPATH - 1] = 0; + + if (is_comment_line(buf) == 1) { + continue; /* skip # comment */ + } + + subStr = strstr(buf, "cm_krb_server_keyfile"); + if (subStr == NULL) { + continue; + } + + subStr = strstr(subStr + 7, "="); + if (subStr == NULL) { + continue; + } + + /* = is last char */ + if (subStr + 1 == 0) { + continue; + } + + /* skip blank */ + ii = 1; + for (;;) { + if (*(subStr + ii) == ' ') { + ii++; /* skip blank */ + } else { + break; + } + } + subStr = subStr + ii; + + /* beging check blank */ + subStr1 = strtok_r(subStr, " ", &saveptr1); + if (subStr1 == NULL) { + continue; + } + + subStr2 = strtok_r(subStr1, "\n", &saveptr2); + if (subStr2 == NULL) { + continue; + } + + subStr3 = strtok_r(subStr2, "\r", &saveptr3); + if (subStr3 == NULL) { + continue; + } + if (subStr3[0] == '\'') { + subStr3 = subStr3 + 1; + } + if (subStr3[strlen(subStr3) - 1] == '\'') { + subStr3[strlen(subStr3) - 1] = '\0'; + } + if (strlen(subStr3) > 0) { + rc = memcpy_s(cm_krb_server_keyfile, sizeof(sys_log_path), subStr3, strlen(subStr3) + 1); + securec_check_errno(rc, ); + } + } + + fclose(fd); + + TrimPathDoubleEndQuotes(cm_krb_server_keyfile); + + return; /* default value warning */ +} + +void GetStringFromConf(const char* configFile, char* itemValue, size_t itemValueLenth, const char* itemName) +{ + char buf[MAXPGPATH]; + FILE* fd = NULL; + + int ii = 0; + + char* subStr = NULL; + char* subStr1 = NULL; + char* subStr2 = NULL; + char* subStr3 = NULL; + + char* saveptr1 = NULL; + char* saveptr2 = NULL; + char* saveptr3 = NULL; + errno_t rc = 0; + + if (configFile == NULL) { + return; + } else { + logInitFlag = true; + } + + fd = fopen(configFile, "r"); + if (fd == NULL) { + printf("%s confDir error\n", itemName); + exit(1); + } + + while (!feof(fd)) { + rc = memset_s(buf, MAXPGPATH, 0, MAXPGPATH); + securec_check_errno(rc, ); + + (void)fgets(buf, MAXPGPATH, fd); + buf[MAXPGPATH - 1] = 0; + + if (is_comment_line(buf) == 1) { + continue; /* skip # comment */ + } + + subStr = strstr(buf, itemName); + if (subStr == NULL) { + continue; + } + + subStr = strstr(subStr + strlen(itemName), "="); + if (subStr == NULL) { + continue; + } + + if (subStr + 1 == 0) { + continue; /* = is last char */ + } + + /* skip blank */ + ii = 1; + for (;;) { + if (*(subStr + ii) == ' ') { + ii++; /* skip blank */ + } else { + break; + } + } + subStr = subStr + ii; + + /* beging check blank */ + subStr1 = strtok_r(subStr, " ", &saveptr1); + if (subStr1 == NULL) { + continue; + } + + subStr2 = strtok_r(subStr1, "\n", &saveptr2); + if (subStr2 == NULL) { + continue; + } + + subStr3 = strtok_r(subStr2, "\r", &saveptr3); + if (subStr3 == NULL) { + continue; + } + if (subStr3[0] == '\'') { + subStr3 = subStr3 + 1; + } + if (subStr3[strlen(subStr3) - 1] == '\'') { + subStr3[strlen(subStr3) - 1] = '\0'; + } + if (strlen(subStr3) > 0) { + rc = memcpy_s(itemValue, itemValueLenth, subStr3, strlen(subStr3) + 1); + securec_check_errno(rc, ); + } else { + write_runlog(ERROR, "invalid value for parameter \" %s \" in %s.\n", itemName, configFile); + } + } + + fclose(fd); + + return; /* default value warning */ +} + +/* used for cm_agent and cm_server */ +/* g_currentNode->cmDataPath --> confDir */ +void get_log_level(const char* config_file) +{ + char buf[BUF_LEN]; + FILE* fd = NULL; + + if (config_file == NULL) { + return; + } else { + logInitFlag = true; + } + + fd = fopen(config_file, "r"); + if (fd == NULL) { + char errBuffer[ERROR_LIMIT_LEN]; + printf("can not open config file: %s %s\n", config_file, pqStrerror(errno, errBuffer, ERROR_LIMIT_LEN)); + exit(1); + } + + while (!feof(fd)) { + errno_t rc; + rc = memset_s(buf, BUF_LEN, 0, BUF_LEN); + securec_check_c(rc, "\0", "\0"); + (void)fgets(buf, BUF_LEN, fd); + + if (is_comment_line(buf) == 1) { + continue; /* skip # comment */ + } + + if (strstr(buf, "log_min_messages") != NULL) { + /* check all lines */ + if (strcasestr(buf, "DEBUG5") != NULL) { + log_min_messages = DEBUG5; + break; + } + + if (strcasestr(buf, "DEBUG1") != NULL) { + log_min_messages = DEBUG1; + break; + } + + if (strcasestr(buf, "WARNING") != NULL) { + log_min_messages = WARNING; + break; + } + + if (strcasestr(buf, "ERROR") != NULL) { + log_min_messages = ERROR; + break; + } + + if (strcasestr(buf, "FATAL") != NULL) { + log_min_messages = FATAL; + break; + } + + if (strcasestr(buf, "LOG") != NULL) { + log_min_messages = LOG; + break; + } + } + } + + fclose(fd); + return; /* default value warning */ +} + +/* used for cm_agent */ +void get_build_mode(const char* config_file) +{ + char buf[BUF_LEN]; + FILE* fd = NULL; + + if (config_file == NULL) { + return; + } + + fd = fopen(config_file, "r"); + if (fd == NULL) { + char errBuffer[ERROR_LIMIT_LEN]; + printf("can not open config file: %s %s\n", config_file, pqStrerror(errno, errBuffer, ERROR_LIMIT_LEN)); + exit(1); + } + + while (!feof(fd)) { + errno_t rc; + rc = memset_s(buf, BUF_LEN, 0, BUF_LEN); + securec_check_c(rc, "\0", "\0"); + (void)fgets(buf, BUF_LEN, fd); + + /* skip # comment */ + if (is_comment_line(buf) == 1) { + continue; + } + + /* check all lines */ + if (strstr(buf, "incremental_build") != NULL) { + if (strstr(buf, "on") != NULL) { + incremental_build = true; + } else if (strstr(buf, "off") != NULL) { + incremental_build = false; + } else { + incremental_build = true; + write_runlog(FATAL, "invalid value for parameter \"incremental_build\" in %s.\n", config_file); + } + } + } + + fclose(fd); + return; +} + +/* used for cm_agent and cm_server */ +void get_log_file_size(const char* config_file) +{ + char buf[BUF_LEN]; + FILE* fd = NULL; + + if (config_file == NULL) { + return; /* default size */ + } else { + logInitFlag = true; + } + + fd = fopen(config_file, "r"); + if (fd == NULL) { + printf("get_log_file_size error\n"); + exit(1); + } + + while (!feof(fd)) { + errno_t rc; + rc = memset_s(buf, BUF_LEN, 0, BUF_LEN); + securec_check_c(rc, "\0", "\0"); + (void)fgets(buf, BUF_LEN, fd); + + if (is_comment_line(buf) == 1) { + continue; /* skip # comment */ + } + + if (strstr(buf, "log_file_size") != NULL) { + /* only check the first line */ + char* subStr = NULL; + char countStr[COUNTSTR_LEN] = {0}; + int ii = 0; + int jj = 0; + + subStr = strchr(buf, '='); + if (subStr != NULL) { + /* find = */ + ii = 1; /* 1 is = */ + + /* skip blank */ + for (;;) { + if (*(subStr + ii) == ' ') { + ii++; /* skip blank */ + } else if (*(subStr + ii) >= '0' && *(subStr + ii) <= '9') { + break; /* number find.break */ + } else { + /* invalid character. */ + goto out; + } + } + + while (*(subStr + ii) >= '0' && *(subStr + ii) <= '9') { + /* end when no more number. */ + if (jj > (int)sizeof(countStr) - 2) { + printf("too large log file size.\n"); + exit(1); + } else { + countStr[jj] = *(subStr + ii); + } + + ii++; + jj++; + } + countStr[jj] = 0; /* jj maybe have added itself.terminate string. */ + + if (countStr[0] != 0) { + maxLogFileSize = atoi(countStr) * 1024 * 1024; /* byte */ + } else { + write_runlog(ERROR, "invalid value for parameter \"log_file_size\" in %s.\n", config_file); + } + } + } + } + +out: + fclose(fd); + return; /* default value is warning */ +} + +int get_cm_thread_count(const char* config_file) +{ +#define DEFAULT_THREAD_NUM 5 + + char buf[BUF_LEN]; + FILE* fd = NULL; + int thread_count = DEFAULT_THREAD_NUM; + errno_t rc = 0; + + if (config_file == NULL) { + printf("no cmserver config file! exit.\n"); + exit(1); + } + + fd = fopen(config_file, "r"); + if (fd == NULL) { + printf("open cmserver config file :%s ,error:%m\n", config_file); + exit(1); + } + + while (!feof(fd)) { + rc = memset_s(buf, sizeof(buf), 0, sizeof(buf)); + securec_check_errno(rc, ); + (void)fgets(buf, BUF_LEN, fd); + + if (is_comment_line(buf) == 1) { + continue; /* skip # comment */ + } + + if (strstr(buf, "thread_count") != NULL) { + /* only check the first line */ + char* subStr = NULL; + char countStr[COUNTSTR_LEN] = {0}; + int ii = 0; + int jj = 0; + + subStr = strchr(buf, '='); + /* find = */ + if (subStr != NULL) { + ii = 1; + + /* skip blank */ + for (;;) { + if (*(subStr + ii) == ' ') { + ii++; /* skip blank */ + } else if (*(subStr + ii) >= '0' && *(subStr + ii) <= '9') { + /* number find.break */ + break; + } else { + /* invalid character. */ + goto out; + } + } + + /* end when no number */ + while (*(subStr + ii) >= '0' && *(subStr + ii) <= '9') { + if (jj > (int)sizeof(countStr) - 2) { + printf("too large thread count.\n"); + exit(1); + } else { + countStr[jj] = *(subStr + ii); + } + + ii++; + jj++; + } + countStr[jj] = 0; /* jj maybe have added itself.terminate string. */ + + if (countStr[0] != 0) { + thread_count = atoi(countStr); + + if (thread_count < 2 || thread_count > 1000) { + printf("invalid thread count %d, range [2 - 1000].\n", thread_count); + exit(1); + } + } else { + thread_count = DEFAULT_THREAD_NUM; + } + } + } + } + +out: + fclose(fd); + return thread_count; +} + +/* + * @Description: get value of paramater from configuration file + * + * @in config_file: configuration file path + * @in key: name of paramater + * @in defaultValue: default value of parameter + * + * @out: value of parameter + */ +int get_int_value_from_config(const char* config_file, const char* key, int defaultValue) +{ + int64 i64 = get_int64_value_from_config(config_file, key, defaultValue); + if (i64 > INT_MAX) { + return defaultValue; + } else if (i64 < INT_MIN) { + return defaultValue; + } + + return (int)i64; +} + +/* + * @Description: get value of paramater from configuration file + * + * @in config_file: configuration file path + * @in key: name of paramater + * @in defaultValue: default value of parameter + * + * @out: value of parameter + */ +uint32 get_uint32_value_from_config(const char* config_file, const char* key, uint32 defaultValue) +{ + int64 i64 = get_int64_value_from_config(config_file, key, defaultValue); + if (i64 > UINT_MAX) { + return defaultValue; + } else if (i64 < 0) { + return defaultValue; + } + + return (uint32)i64; +} + +/* + * @Description: get value of paramater from configuration file + * + * @in config_file: configuration file path + * @in key: name of paramater + * @in defaultValue: default value of parameter + * + * @out: value of parameter + */ +int64 get_int64_value_from_config(const char* config_file, const char* key, int64 defaultValue) +{ + char buf[BUF_LEN]; + FILE* fd = NULL; + int64 int64Value = defaultValue; + errno_t rc = 0; + + Assert(key); + if (config_file == NULL) { + printf("no config file! exit.\n"); + exit(1); + } + + fd = fopen(config_file, "r"); + if (fd == NULL) { + char errBuffer[ERROR_LIMIT_LEN]; + printf("open config file failed:%s ,error:%s\n", config_file, pqStrerror(errno, errBuffer, ERROR_LIMIT_LEN)); + exit(1); + } + + while (!feof(fd)) { + rc = memset_s(buf, sizeof(buf), 0, sizeof(buf)); + securec_check_errno(rc, ); + (void)fgets(buf, BUF_LEN, fd); + + if (is_comment_line(buf) == 1) { + continue; /* skip # comment */ + } + + if (strstr(buf, key) != NULL) { + /* only check the first line */ + char* subStr = NULL; + char countStr[COUNTSTR_LEN] = {0}; + int ii = 0; + int jj = 0; + + subStr = strchr(buf, '='); + if (subStr != NULL) { + /* find = */ + ii = 1; + + /* skip blank */ + while (1) { + if (*(subStr + ii) == ' ') { + ii++; /* skip blank */ + } else if (isdigit(*(subStr + ii))) { + /* number find.break */ + break; + } else { + /* invalid character. */ + goto out; + } + } + + while (isdigit(*(subStr + ii))) { + /* end when no number */ + if (jj >= COUNTSTR_LEN - 1) { + write_runlog(ERROR, "length is not enough for constr\n"); + goto out; + } + countStr[jj] = *(subStr + ii); + + ii++; + jj++; + } + countStr[jj] = 0; /* jj maybe have added itself.terminate string. */ + + if (countStr[0] != 0) { + int64Value = strtoll(countStr, NULL, 10); + } + break; + } + } + } + +out: + fclose(fd); + return int64Value; +} + +#define ALARM_REPORT_INTERVAL "alarm_report_interval" +#define ALARM_REPORT_INTERVAL_DEFAULT 10 + +#define ALARM_REPORT_MAX_COUNT "alarm_report_max_count" +#define ALARM_REPORT_MAX_COUNT_DEFAULT 5 + +/* trim blank characters on both ends */ +char* trim(char* src) +{ + char* s = 0; + char* e = 0; + char* c = 0; + + for (c = src; (c != NULL) && *c; ++c) { + if (isspace(*c)) { + if (e == NULL) { + e = c; + } + } else { + if (s == NULL) { + s = c; + } + e = 0; + } + } + if (s == NULL) { + s = src; + } + if (e != NULL) { + *e = 0; + } + + return s; +} + +/* Check this line is comment line or not, which is in cm_server.conf file */ +static bool is_comment_entity(char* str_line) +{ + char* src = NULL; + if (str_line == NULL || strlen(str_line) < 1) { + return false; + } + src = str_line; + src = trim(src); + if (src == NULL || strlen(src) < 1) { + return true; + } + if (*src == '#') { + return true; + } + + return false; +} + +int is_digit_string(char* str) +{ +#define isDigital(_ch) (((_ch) >= '0') && ((_ch) <= '9')) + + int i = 0; + int len = -1; + char* p = NULL; + if (str == nullptr) { + return 0; + } + if ((len = strlen(str)) <= 0) { + return 0; + } + p = str; + for (i = 0; i < len; i++) { + if (!isDigital(p[i])) { + return 0; + } + } + return 1; +} +static void get_alarm_parameters(const char* config_file) +{ + char buf[BUF_LEN] = {0}; + FILE* fd = NULL; + char* index1 = NULL; + char* index2 = NULL; + char* src = NULL; + char* key = NULL; + char* value = NULL; + errno_t rc = 0; + + if (config_file == NULL) { + return; + } + + fd = fopen(config_file, "r"); + if (fd == NULL) { + return; + } + + while (!feof(fd)) { + rc = memset_s(buf, BUF_LEN, 0, BUF_LEN); + securec_check_c(rc, "\0", "\0"); + (void)fgets(buf, BUF_LEN, fd); + + if (is_comment_entity(buf) == true) { + continue; + } + index1 = strchr(buf, '#'); + if (index1 != NULL) { + *index1 = '\0'; + } + index2 = strchr(buf, '='); + if (index2 == NULL) { + continue; + } + src = buf; + src = trim(src); + index2 = strchr(src, '='); + key = src; + /* jump to the beginning of recorded values */ + value = index2 + 1; + + key = trim(key); + value = trim(value); + if (strncmp(key, ALARM_REPORT_INTERVAL, strlen(ALARM_REPORT_INTERVAL)) == 0) { + if (is_digit_string(value)) { + g_alarmReportInterval = atoi(value); + if (g_alarmReportInterval == -1) { + g_alarmReportInterval = ALARM_REPORT_INTERVAL_DEFAULT; + } + } + break; + } + } + fclose(fd); +} + +static void get_alarm_report_max_count(const char* config_file) +{ + char buf[BUF_LEN] = {0}; + FILE* fd = NULL; + char* index1 = NULL; + char* index2 = NULL; + char* src = NULL; + char* key = NULL; + char* value = NULL; + errno_t rc = 0; + + if (config_file == NULL) { + return; + } + + fd = fopen(config_file, "r"); + if (fd == NULL) { + return; + } + + while (!feof(fd)) { + rc = memset_s(buf, BUF_LEN, 0, BUF_LEN); + securec_check_c(rc, "\0", "\0"); + (void)fgets(buf, BUF_LEN, fd); + + if (is_comment_entity(buf)) { + continue; + } + index1 = strchr(buf, '#'); + if (index1 != NULL) { + *index1 = '\0'; + } + index2 = strchr(buf, '='); + if (index2 == NULL) { + continue; + } + src = buf; + src = trim(src); + index2 = strchr(src, '='); + key = src; + /* jump to the beginning of recorded values */ + value = index2 + 1; + + key = trim(key); + value = trim(value); + if (strncmp(key, ALARM_REPORT_MAX_COUNT, strlen(ALARM_REPORT_MAX_COUNT)) == 0) { + if (is_digit_string(value)) { + g_alarmReportMaxCount = atoi(value); + if (g_alarmReportMaxCount == -1) { + g_alarmReportMaxCount = ALARM_REPORT_MAX_COUNT_DEFAULT; + } + } + break; + } + } + fclose(fd); +} + +/* + * This function is for reading cm_server.conf parameters, which have been applied at server side. + * In cm_server this function is ugly, it should be rewritten at new version. + */ +static void get_alarm_report_interval(const char* conf) +{ + get_alarm_parameters(conf); +} + +void get_log_paramter(const char* confDir) +{ + get_log_level(confDir); + get_log_file_size(confDir); + GetStringFromConf(confDir, sys_log_path, sizeof(sys_log_path), "log_dir"); + GetStringFromConf(confDir, g_alarmComponentPath, sizeof(g_alarmComponentPath), "alarm_component"); + get_alarm_report_interval(confDir); + get_alarm_report_max_count(confDir); +} + +/* + * @GaussDB@ + * Brief : close the current file, and open the next file + * Description : + * Notes : + */ +void switchLogFile(void) +{ + char log_new_name[MAXPGPATH] = {0}; + mode_t oumask; + char current_localtime[LOG_MAX_TIMELEN] = {0}; + pg_time_t current_time; + struct tm* systm; + + int len_log_cur_name = 0; + int len_suffix_name = 0; + int len_log_new_name = 0; + int ret = 0; + errno_t rc = 0; + + current_time = time(NULL); + + systm = localtime(¤t_time); + + if (systm != nullptr) { + (void)strftime(current_localtime, LOG_MAX_TIMELEN, "-%Y-%m-%d_%H%M%S", systm); + } + + /* close the current file */ + if (syslogFile != NULL) { + fclose(syslogFile); + syslogFile = NULL; + } + + /* renamed the current file without Mark */ + len_log_cur_name = strlen(curLogFileName); + len_suffix_name = strlen(curLogFileMark); + len_log_new_name = len_log_cur_name - len_suffix_name; + + rc = strncpy_s(log_new_name, MAXPGPATH, curLogFileName, len_log_new_name); + securec_check_errno(rc, ); + rc = strncat_s(log_new_name, MAXPGPATH, ".log", strlen(".log")); + securec_check_errno(rc, ); + ret = rename(curLogFileName, log_new_name); + if (ret != 0) { + printf(_("%s: rename log file %s failed! \n"), prefix_name, curLogFileName); + return; + } + + /* new current file name */ + rc = memset_s(curLogFileName, MAXPGPATH, 0, MAXPGPATH); + securec_check_errno(rc, ); + ret = snprintf_s(curLogFileName, + MAXPGPATH, + MAXPGPATH - 1, + "%s/%s%s%s", + sys_log_path, + prefix_name, + current_localtime, + curLogFileMark); + securec_check_intval(ret, ); + + oumask = umask((mode_t)((~(mode_t)(S_IRUSR | S_IWUSR | S_IXUSR)) & (S_IRWXU | S_IRWXG | S_IRWXO))); + + syslogFile = fopen(curLogFileName, "a"); + + (void)umask(oumask); + + if (syslogFile == NULL) { + (void)printf("switchLogFile,switch new log file failed %s\n", strerror(errno)); + } else { + if (SetFdCloseExecFlag(syslogFile) == -1) { + (void)printf("set file flag failed, filename:%s, errmsg: %s.\n", curLogFileName, strerror(errno)); + } + } +} + +/* + * @GaussDB@ + * Brief: + * Description: write info to the files + * Notes: if the current file size is full, switch to the next + */ +void write_log_file(const char* buffer, int count) +{ + int rc = 0; + + (void)pthread_rwlock_wrlock(&syslog_write_lock); + + if (syslogFile == NULL) { + /* maybe syslogFile no init. */ + syslogFile = logfile_open(sys_log_path, "a"); + } + if (syslogFile != NULL) { + count = strlen(buffer); + + /* switch to the next file when current file full */ + if ((ftell(syslogFile) + count) > (maxLogFileSize)) { + switchLogFile(); + } + + if (syslogFile != NULL) { + rc = fwrite(buffer, 1, count, syslogFile); + if (rc != count) { + printf("could not write to log file: %s %m\n", curLogFileName); + } + } else { + printf("write_log_file could not open log file %s : %m\n", curLogFileName); + } + } else { + printf("write_log_file,log file is null now:%s\n", buffer); + } + + (void)pthread_rwlock_unlock(&syslog_write_lock); +} + +char *errmsg(const char* fmt, ...) +{ + va_list ap; + int count = 0; + int rcs; + errno_t rc; + char errbuf[BUF_LEN] = {0}; + fmt = _(fmt); + va_start(ap, fmt); + rc = memset_s(errbuf_errmsg, EREPORT_BUF_LEN, 0, EREPORT_BUF_LEN); + securec_check_c(rc, "\0", "\0"); + count = vsnprintf_s(errbuf, sizeof(errbuf), sizeof(errbuf) - 1, fmt, ap); + securec_check_intval(count, ); + va_end(ap); + + rcs = snprintf_s(errbuf_errmsg, EREPORT_BUF_LEN, EREPORT_BUF_LEN - 1, "%s", "[ERRMSG]:"); + securec_check_intval(rcs, ); + rc = memcpy_s(errbuf_errmsg + strlen(errbuf_errmsg), BUF_LEN - strlen(errbuf_errmsg), + errbuf, BUF_LEN - strlen(errbuf_errmsg) - 1); + securec_check_errno(rc, (void)rc); + return errbuf_errmsg; +} + +char* errdetail(const char* fmt, ...) +{ + va_list ap; + int count = 0; + int rcs; + errno_t rc; + char errbuf[BUF_LEN] = {0}; + fmt = _(fmt); + va_start(ap, fmt); + rc = memset_s(errbuf_errdetail, EREPORT_BUF_LEN, 0, EREPORT_BUF_LEN); + securec_check_c(rc, "\0", "\0"); + count = vsnprintf_s(errbuf, sizeof(errbuf), sizeof(errbuf) - 1, fmt, ap); + securec_check_intval(count, ); + va_end(ap); + rcs = snprintf_s(errbuf_errdetail, EREPORT_BUF_LEN, + EREPORT_BUF_LEN - 1, "%s", "[ERRDETAIL]:"); + securec_check_intval(rcs, ); + rc = memcpy_s(errbuf_errdetail + strlen(errbuf_errdetail), BUF_LEN - strlen(errbuf_errdetail), + errbuf, BUF_LEN - strlen(errbuf_errdetail) - 1); + securec_check_errno(rc, (void)rc); + return errbuf_errdetail; +} + +char* errcode(int sql_state) +{ + int i; + int rcs; + errno_t rc; + char buf[6] = {0}; + rc = memset_s(errbuf_errcode, EREPORT_BUF_LEN, 0, EREPORT_BUF_LEN); + securec_check_c(rc, "\0", "\0"); + /* the length of sql code is 5 */ + for (i = 0; i < 5; i++) { + buf[i] = PGUNSIXBIT(sql_state); + sql_state >>= 6; + } + buf[i] = '\0'; + rcs = snprintf_s(errbuf_errcode, EREPORT_BUF_LEN, EREPORT_BUF_LEN - 1, "%s%s", "[ERRCODE]:", buf); + securec_check_intval(rcs, ); + return errbuf_errcode; +} + +char* errcause(const char* fmt, ...) +{ + va_list ap; + int count = 0; + int rcs; + errno_t rc; + char errbuf[BUF_LEN] = {0}; + fmt = _(fmt); + va_start(ap, fmt); + rc = memset_s(errbuf_errcause, EREPORT_BUF_LEN, 0, EREPORT_BUF_LEN); + securec_check_c(rc, "\0", "\0"); + count = vsnprintf_s(errbuf, sizeof(errbuf), sizeof(errbuf) - 1, fmt, ap); + securec_check_intval(count, ); + va_end(ap); + rcs = snprintf_s(errbuf_errcause, EREPORT_BUF_LEN, + EREPORT_BUF_LEN - 1, "%s", "[ERRCAUSE]:"); + securec_check_intval(rcs, ); + rc = memcpy_s(errbuf_errcause + strlen(errbuf_errcause), BUF_LEN - strlen(errbuf_errcause), + errbuf, BUF_LEN - strlen(errbuf_errcause) - 1); + securec_check_errno(rc, (void)rc); + return errbuf_errcause; +} + +char* erraction(const char* fmt, ...) +{ + va_list ap; + int count = 0; + int rcs; + errno_t rc; + char errbuf[BUF_LEN] = {0}; + fmt = _(fmt); + va_start(ap, fmt); + rc = memset_s(errbuf_erraction, EREPORT_BUF_LEN, 0, EREPORT_BUF_LEN); + securec_check_c(rc, "\0", "\0"); + count = vsnprintf_s(errbuf, sizeof(errbuf), sizeof(errbuf) - 1, fmt, ap); + securec_check_intval(count, ); + va_end(ap); + rcs = snprintf_s(errbuf_erraction, EREPORT_BUF_LEN, + EREPORT_BUF_LEN - 1, "%s", "[ERRACTION]:"); + securec_check_intval(rcs, ); + rc = memcpy_s(errbuf_erraction + strlen(errbuf_erraction), BUF_LEN - strlen(errbuf_erraction), + errbuf, BUF_LEN - strlen(errbuf_erraction) - 1); + securec_check_errno(rc, (void)rc); + return errbuf_erraction; +} + + +char* errmodule(ModuleId id) +{ + errno_t rc = memset_s(errbuf_errmodule, EREPORT_BUF_LEN, 0, EREPORT_BUF_LEN); + securec_check_c(rc, "\0", "\0"); + int rcs = snprintf_s(errbuf_errmodule, EREPORT_BUF_LEN - 1, + EREPORT_BUF_LEN - 1, "%s", "[ERRMODULE]:"); + securec_check_intval(rcs, (void)rcs); + rcs = snprintf_s(errbuf_errmodule + strlen(errbuf_errmodule), + EREPORT_BUF_LEN - strlen(errbuf_errmodule), + EREPORT_BUF_LEN - strlen(errbuf_errmodule) - 1, "%s", + get_valid_module_name(id)); + securec_check_intval(rcs, (void)rcs); + return errbuf_errmodule; +} diff --git a/src/lib/cm_common/cm_errcodes.txt b/src/lib/cm_common/cm_errcodes.txt new file mode 100644 index 000000000..d42e579b6 --- /dev/null +++ b/src/lib/cm_common/cm_errcodes.txt @@ -0,0 +1,88 @@ +# +# @copyright Copyright (c) Huawei Technologies Co., Ltd. 2011-2020. All rights reserved. +# +# +# +# cm_errcodes.txt +# CM error codes +# +# The files generated from this one are: +# +# src/include/utils/cm_errcodes.h +# macros defining errcode constants to be used in the rest of the source +# +# The format of this file is one error code per line, with the following +# whitespace-separated fields: +# +# sqlstate E/W/S errcode_macro_name spec_name +# +# where sqlstate is a five-character string following the SQLSTATE conventions, +# the second field indicates if the code means an error, a warning or success, +# errcode_macro_name is the C macro name starting with ERRCODE that will be put +# in errcodes.h, and spec_name is a lowercase, underscore-separated name that +# will be used as the PL/pgSQL condition name and will also be included in the +# SGML list. The last field is optional, if not present the PL/pgSQL condition +# and the SGML entry will not be generated. +# +# Empty lines and lines starting with a hash are comments. +# +# There are also special lines in the format of: +# +# Section: section description +# +# that is, lines starting with the string "Section:". They are used to delimit +# error classes as defined in the SQL spec, and are necessary for SGML output. +# +# +# SQLSTATE codes for errors. +# +# The SQL99 code set is rather impoverished, especially in the area of +# syntactical and semantic errors. We have borrowed codes from IBM's DB2 +# and invented our own codes to develop a useful code set. +# +# When adding a new code, make sure it is placed in the most appropriate +# class (the first two characters of the code value identify the class). +# The listing is organized by class to make this prominent. +# +# Each class should have a generic '000' subclass. However, +# the generic '000' subclass code should be used for an error only +# when there is not a more-specific subclass code defined. +# +# The SQL spec requires that all the elements of a SQLSTATE code be +# either digits or upper-case ASCII characters. +# +# Classes that begin with 0-4 or A-H are defined by the +# standard. Within such a class, subclass values defined by the +# standard must begin with 0-4 or A-H. To define a new error code, +# ensure that it is either in an "implementation-defined class" (it +# begins with 5-9 or I-Z), or its subclass falls outside the range of +# error codes that could be present in future versions of the +# standard (i.e. the subclass value begins with 5-9 or I-Z). +# +# The convention is that new error codes defined by PostgreSQL in a +# class defined by the standard have a subclass value that begins +# with 'P'. In addition, error codes defined by PostgreSQL clients +# (such as ecpg) have a class value that begins with 'Y'. + +Section: Class c0 - Connection Exception +c0000 E ERRCODE_CONNECTION_EXCEPTION connection_exception +c0001 E ERRCODE_CONNECTION_DOES_NOT_EXIST connection_does_not_exist +c0002 E ERRCODE_CONNECTION_FAILURE connection_failure + +Section: Class c1 - Environment Exception +c1000 E ERRCODE_OUT_OF_MEMORY out_of_memory + +Section: Class c2 - File Exception +c3000 E ERRCODE_OPEN_FILE_FAILURE open_file_failure +c3001 E ERRCODE_READ_FILE_FAILURE read_file_failure +c3002 E ERRCODE_CONFIG_FILE_FAILURE config_file_failure + +Section: Class c3 - Parameter Exception +c3000 E ERRCODE_PARAMETER_FAILURE parameter_failure +c3001 E ERRCODE_ENVIRONMENT_VARIABLE_FAILURE environment_variable_failure + +Section: Class c4 - Etcd Exception +c4000 E ERRCODE_ETCD_OPEN_FAILURE etcd_open_failure + +Section: Class c5 - Internal Exception +c5000 E ERRCODE_INTERNAL_ERROR internal_error diff --git a/src/lib/cm_common/cm_misc.cpp b/src/lib/cm_common/cm_misc.cpp new file mode 100644 index 000000000..27ad4ea90 --- /dev/null +++ b/src/lib/cm_common/cm_misc.cpp @@ -0,0 +1,1302 @@ +/** + * @file cm_misc.cpp + * @brief + * @author xxx + * @version 1.0 + * @date 2020-08-06 + * + * @copyright Copyright (c) Huawei Technologies Co., Ltd. 2011-2020. All rights reserved. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cm/elog.h" +#include "cm/cm_c.h" +#include "cm/stringinfo.h" +#include "cm/cm_msg.h" +#include "common/config/cm_config.h" +#include "cm/etcdapi.h" +#include "cm/cm_misc.h" + +/* + * ssh connect does not exit automatically when the network is fault, + * this will cause cm_ctl hang for several hours, + * so we should add the following timeout options for ssh. + */ +#define SSH_CONNECT_TIMEOUT "5" +#define SSH_CONNECT_ATTEMPTS "3" +#define SSH_SERVER_ALIVE_INTERVAL "15" +#define SSH_SERVER_ALIVE_COUNT_MAX "3" +#define PSSH_TIMEOUT_OPTION \ + " -t 60 -O ConnectTimeout=" SSH_CONNECT_TIMEOUT " -O ConnectionAttempts=" SSH_CONNECT_ATTEMPTS \ + " -O ServerAliveInterval=" SSH_SERVER_ALIVE_INTERVAL " -O ServerAliveCountMax=" SSH_SERVER_ALIVE_COUNT_MAX " " + +const int MAXLISTEN = 64; + +uint32 g_health_etcd_index[CM_NODE_MAXNUM] = {0}; +bool g_health_etcd_flag = false; +uint32 g_health_etcd_count = 0; +pthread_rwlock_t g_health_etcd_rwlock = PTHREAD_RWLOCK_INITIALIZER; + +/* + * @@GaussDB@@ + * Brief : void *pg_malloc(size_t size) + * Description : malloc space + * Notes : + */ +static void* pg_malloc(size_t size) +{ + void* result = NULL; + + /* Avoid unportable behavior of malloc(0) */ + if (size == 0) { + write_runlog(ERROR, "malloc 0.\n"); + exit(1); + } + + result = (void*)malloc(size); + if (result == NULL) { + write_runlog(ERROR, "malloc failed, out of memory.\n"); + exit(1); + } + return result; +} + +char** readfile(const char* path) +{ + int fd = 0; + int nlines; + char** result; + char* buffer = NULL; + char* linebegin = NULL; + int i; + int n; + int len; + struct stat statbuf = {0}; + + /* + * Slurp the file into memory. + * + * The file can change concurrently, so we read the whole file into memory + * with a single read() call. That's not guaranteed to get an atomic + * snapshot, but in practice, for a small file, it's close enough for the + * current use. + */ + fd = open(path, O_RDONLY | PG_BINARY | O_CLOEXEC, 0); + if (fd < 0) { + return NULL; + } + if (fstat(fd, &statbuf) < 0) { + close(fd); + return NULL; + } + if (statbuf.st_size == 0) { + /* empty file */ + close(fd); + result = (char**)pg_malloc(sizeof(char*)); + *result = NULL; + return result; + } + buffer = (char*)pg_malloc(statbuf.st_size + 1); + + len = read(fd, buffer, statbuf.st_size + 1); + close(fd); + if (len != statbuf.st_size) { + /* oops, the file size changed between fstat and read */ + FREE_AND_RESET(buffer); + return NULL; + } + + /* + * Count newlines. We expect there to be a newline after each full line, + * including one at the end of file. If there isn't a newline at the end, + * any characters after the last newline will be ignored. + */ + nlines = 0; + for (i = 0; i < len; i++) { + if (buffer[i] == '\n') { + nlines++; + } + } + + /* set up the result buffer */ + result = (char**)pg_malloc((nlines + 1) * sizeof(char*)); + + /* now split the buffer into lines */ + linebegin = buffer; + n = 0; + for (i = 0; i < len; i++) { + if (buffer[i] == '\n') { + int slen = &buffer[i] - linebegin + 1; + char* linebuf = (char*)pg_malloc(slen + 1); + errno_t rc; + + rc = memcpy_s(linebuf, slen + 1, linebegin, slen); + securec_check_c(rc, linebuf, "\0"); + linebuf[slen] = '\0'; + result[n++] = linebuf; + linebegin = &buffer[i + 1]; + } + } + result[n] = NULL; + + FREE_AND_RESET(buffer); + + return result; +} + +void freefile(char** lines) +{ + char** line = NULL; + if (lines == nullptr) { + return; + } + line = lines; + while (*line != NULL) { + FREE_AND_RESET(*line); + line++; + } + free(lines); +} + +log_level_string log_level_map_string[] = { + + {"DEBUG5", DEBUG5}, + {"DEBUG1", DEBUG1}, + {"WARNING", WARNING}, + {"LOG", LOG}, + {"ERROR", ERROR}, + {"FATAL", FATAL}, + {NULL, UNKNOWN_LEVEL} + +}; + +int log_level_string_to_int(const char* log_level) +{ + int i; + for (i = 0; log_level_map_string[i].level_string != NULL; i++) { + if (strcasecmp(log_level_map_string[i].level_string, log_level) == 0) { + return log_level_map_string[i].level_val; + } + } + return UNKNOWN_LEVEL; +} + +const char* log_level_int_to_string(int log_level) +{ + int i; + for (i = 0; log_level_map_string[i].level_string != NULL; i++) { + if (log_level_map_string[i].level_val == log_level) { + return log_level_map_string[i].level_string; + } + } + return "Unknown"; +} + +const char* DcfRoleToString(int role) +{ + switch (role) { + case DCF_ROLE_LEADER: + return "LEADER"; + case DCF_ROLE_FOLLOWER: + return "FOLLOWER"; + case DCF_ROLE_LOGGER: + return "LOGGER"; + case DCF_ROLE_PASSIVE: + return "PASSIVE"; + case DCF_ROLE_PRE_CANDIDATE: + return "PRE_CANDIDATE"; + case DCF_ROLE_CANDIDATE: + return "CANDIDATE"; + default: + return "UNKNOWN"; + } + + return "UNKNOWN"; +} + +instance_datanode_build_reason_string datanode_build_reason_map_string[] = { + + {"Normal", INSTANCE_HA_DATANODE_BUILD_REASON_NORMAL}, + {"WAL segment removed", INSTANCE_HA_DATANODE_BUILD_REASON_WALSEGMENT_REMOVED}, + {"Disconnected", INSTANCE_HA_DATANODE_BUILD_REASON_DISCONNECT}, + {"Version not matched", INSTANCE_HA_DATANODE_BUILD_REASON_VERSION_NOT_MATCHED}, + {"Mode not matched", INSTANCE_HA_DATANODE_BUILD_REASON_MODE_NOT_MATCHED}, + {"System id not matched", INSTANCE_HA_DATANODE_BUILD_REASON_SYSTEMID_NOT_MATCHED}, + {"Timeline not matched", INSTANCE_HA_DATANODE_BUILD_REASON_TIMELINE_NOT_MATCHED}, + {"DCF log loss", INSTANCE_HA_DATANODE_BUILD_REASON_DCF_LOG_LOSS}, + {"Unknown", INSTANCE_HA_DATANODE_BUILD_REASON_UNKNOWN}, + {"User/Password invalid", INSTANCE_HA_DATANODE_BUILD_REASON_USER_PASSWD_INVALID}, + {"Connecting", INSTANCE_HA_DATANODE_BUILD_REASON_CONNECTING}, + {NULL, INSTANCE_HA_DATANODE_BUILD_REASON_UNKNOWN} + +}; + +int datanode_rebuild_reason_string_to_int(const char* reason) +{ + int i; + + for (i = 0; datanode_build_reason_map_string[i].reason_string != NULL; i++) { + if (strstr(reason, datanode_build_reason_map_string[i].reason_string) != NULL) { + return datanode_build_reason_map_string[i].reason_val; + } + } + + return INSTANCE_HA_DATANODE_BUILD_REASON_UNKNOWN; +} + +const char* datanode_rebuild_reason_int_to_string(int reason) +{ + int i; + + for (i = 0; datanode_build_reason_map_string[i].reason_string != NULL; i++) { + if (datanode_build_reason_map_string[i].reason_val == reason) { + return datanode_build_reason_map_string[i].reason_string; + } + } + return "Unknown"; +} + +instacne_type_string type_map_string[] = { + + {"GTM", INSTANCE_TYPE_GTM}, + {"Datanode", INSTANCE_TYPE_DATANODE}, + {"Coordinator", INSTANCE_TYPE_COORDINATE}, + {"Fenced UDF", INSTANCE_TYPE_FENCED_UDF}, + {NULL, INSTANCE_TYPE_UNKNOWN}}; + +const char* type_int_to_string(int type) +{ + int i; + for (i = 0; type_map_string[i].type_string != NULL; i++) { + if (type_map_string[i].type_val == type) { + return type_map_string[i].type_string; + } + } + return "Unknown"; +} + +gtm_con_string gtm_con_map_string[] = {{"Connection ok", CON_OK}, + {"Connection bad", CON_BAD}, + {"Connection started", CON_STARTED}, + {"Connection made", CON_MADE}, + {"Connection awaiting response", CON_AWAITING_RESPONSE}, + {"Connection authentication ok", CON_AUTH_OK}, + {"Connection prepare environment", CON_SETEN}, + {"Connection prepare SSL", CON_SSL_STARTUP}, + {"Connection needed", CON_NEEDED}, + {"Unknown", CON_UNKNOWN}, + {"Manually stopped", CON_MANUAL_STOPPED}, + {"Disk damaged", CON_DISK_DEMAGED}, + {"Port conflicting", CON_PORT_USED}, + {"Nic down", CON_NIC_DOWN}, + {"Starting", CON_GTM_STARTING}, + {NULL, CON_UNKNOWN}}; + +const char* gtm_con_int_to_string(int con) +{ + int i; + for (i = 0; gtm_con_map_string[i].con_string != NULL; i++) { + if (gtm_con_map_string[i].con_val == con) { + return gtm_con_map_string[i].con_string; + } + } + return "Unknown"; +} + +server_role_string server_role_string_map[] = {{CM_SERVER_UNKNOWN, "UNKNOWN"}, + {CM_SERVER_PRIMARY, "Primary"}, + {CM_SERVER_STANDBY, "Standby"}, + {CM_SERVER_INIT, "Init"}, + {CM_SERVER_DOWN, "Down"}}; + + +server_role_string etcd_role_string_map[] = {{CM_ETCD_UNKNOWN, "UNKNOWN"}, + {CM_ETCD_FOLLOWER, "StateFollower"}, + {CM_ETCD_LEADER, "StateLeader"}, + {CM_ETCD_DOWN, "Down"}}; + +server_role_string kerberos_role_string_map[] = {{KERBEROS_STATUS_UNKNOWN, "UNKNOWN"}, + {KERBEROS_STATUS_NORMAL, "Normal"}, + {KERBEROS_STATUS_ABNORMAL, "Abnormal"}, + {KERBEROS_STATUS_DOWN, "Down"}}; + +const char* etcd_role_to_string(int role) +{ + if (role <= CM_ETCD_UNKNOWN || role > CM_ETCD_DOWN) { + return etcd_role_string_map[CM_ETCD_UNKNOWN].role_string; + } else { + return etcd_role_string_map[role].role_string; + } +} + +const char* server_role_to_string(int role, bool is_pending) +{ + if (role <= CM_SERVER_UNKNOWN || role >= CM_SERVER_INIT) { + return "Unknown"; + } else { + if (CM_SERVER_PRIMARY == role && is_pending) { + return "Pending"; + } else { + return server_role_string_map[role].role_string; + } + } +} + +instance_datanode_lockmode_string g_datanode_lockmode_map_string[] = {{"polling_connection", POLLING_CONNECTION}, + {"specify_connection", SPECIFY_CONNECTION}, + {"prohibit_connection", PROHIBIT_CONNECTION}, + {NULL, UNDEFINED_LOCKMODE}}; + +int datanode_lockmode_string_to_int(const char* lockmode) +{ + int i; + if (lockmode == NULL || strlen(lockmode) == 0) { + write_runlog(ERROR, "datanode_lockmode_string_to_int failed, input string role is: NULL\n"); + return UNDEFINED_LOCKMODE; + } else { + for (i = 0; g_datanode_lockmode_map_string[i].lockmode_string != NULL; i++) { + if (strncmp(g_datanode_lockmode_map_string[i].lockmode_string, lockmode, strlen(lockmode)) == 0) { + return g_datanode_lockmode_map_string[i].lockmode_val; + } + } + } + write_runlog(ERROR, "datanode_lockmode_string_to_int failed, input lockmode is: (%s)\n", lockmode); + return UNDEFINED_LOCKMODE; +} + +instacne_datanode_role_string datanode_role_map_string[] = { + + {"Primary", INSTANCE_ROLE_PRIMARY}, + {"Standby", INSTANCE_ROLE_STANDBY}, + {"Pending", INSTANCE_ROLE_PENDING}, + {"Normal", INSTANCE_ROLE_NORMAL}, + {"Down", INSTANCE_ROLE_UNKNOWN}, + {"Secondary", INSTANCE_ROLE_DUMMY_STANDBY}, + {"Deleted", INSTANCE_ROLE_DELETED}, + {"ReadOnly", INSTANCE_ROLE_READONLY}, + {"Offline", INSTANCE_ROLE_OFFLINE}, + {"Main Standby", INSTANCE_ROLE_MAIN_STANDBY}, + {"Cascade Standby", INSTANCE_ROLE_CASCADE_STANDBY}, + {NULL, INSTANCE_ROLE_UNKNOWN}}; + +int datanode_role_string_to_int(const char* role) +{ + int i; + if (NULL == role) { + write_runlog(ERROR, "datanode_role_string_to_int failed, input string role is: NULL\n"); + return INSTANCE_ROLE_UNKNOWN; + } + for (i = 0; datanode_role_map_string[i].role_string != NULL; i++) { + if (strcmp(datanode_role_map_string[i].role_string, role) == 0) { + return datanode_role_map_string[i].role_val; + } + } + write_runlog(ERROR, "datanode_role_string_to_int failed, input string role is: (%s)\n", role); + return INSTANCE_ROLE_UNKNOWN; +} + +const char* datanode_role_int_to_string(int role) +{ + int i; + for (i = 0; datanode_role_map_string[i].role_string != NULL; i++) { + if ((int)datanode_role_map_string[i].role_val == role) { + return datanode_role_map_string[i].role_string; + } + } + return "Unknown"; +} + + +instacne_datanode_role_string datanode_static_role_map_string[] = { + {"P", PRIMARY_DN}, {"S", STANDBY_DN}, {"R", DUMMY_STANDBY_DN}, {NULL, INSTANCE_ROLE_NORMAL}}; + +const char* datanode_static_role_int_to_string(uint32 role) +{ + int i; + for (i = 0; datanode_static_role_map_string[i].role_string != NULL; i++) { + if (datanode_static_role_map_string[i].role_val == role) { + return datanode_static_role_map_string[i].role_string; + } + } + return "Unknown"; +} + +instacne_datanode_dbstate_string datanode_dbstate_map_string[] = {{"Unknown", INSTANCE_HA_STATE_UNKONWN}, + {"Normal", INSTANCE_HA_STATE_NORMAL}, + {"Need repair", INSTANCE_HA_STATE_NEED_REPAIR}, + {"Starting", INSTANCE_HA_STATE_STARTING}, + {"Wait promoting", INSTANCE_HA_STATE_WAITING}, + {"Demoting", INSTANCE_HA_STATE_DEMOTING}, + {"Promoting", INSTANCE_HA_STATE_PROMOTING}, + {"Building", INSTANCE_HA_STATE_BUILDING}, + {"Manually stopped", INSTANCE_HA_STATE_MANUAL_STOPPED}, + {"Disk damaged", INSTANCE_HA_STATE_DISK_DAMAGED}, + {"Port conflicting", INSTANCE_HA_STATE_PORT_USED}, + {"Build failed", INSTANCE_HA_STATE_BUILD_FAILED}, + {"Catchup", INSTANCE_HA_STATE_CATCH_UP}, + {"CoreDump", INSTANCE_HA_STATE_COREDUMP}, + {"ReadOnly", INSTANCE_HA_STATE_READ_ONLY}, + {NULL, INSTANCE_ROLE_NORMAL}}; + +int datanode_dbstate_string_to_int(const char* dbstate) +{ + int i; + if (NULL == dbstate) { + write_runlog(ERROR, "datanode_dbstate_string_to_int failed, input string dbstate is: NULL\n"); + return INSTANCE_HA_STATE_UNKONWN; + } + for (i = 0; datanode_dbstate_map_string[i].dbstate_string != NULL; i++) { + if (strcmp(datanode_dbstate_map_string[i].dbstate_string, dbstate) == 0) { + return datanode_dbstate_map_string[i].dbstate_val; + } + } + write_runlog(ERROR, "datanode_dbstate_string_to_int failed, input string dbstate is: (%s)\n", dbstate); + return INSTANCE_HA_STATE_UNKONWN; +} + +const char* datanode_dbstate_int_to_string(int dbstate) +{ + int i; + for (i = 0; datanode_dbstate_map_string[i].dbstate_string != NULL; i++) { + if (datanode_dbstate_map_string[i].dbstate_val == dbstate) { + return datanode_dbstate_map_string[i].dbstate_string; + } + } + return "Unknown"; +} + +instacne_datanode_wal_send_state_string datanode_wal_send_state_map_string[] = { + {"Startup", INSTANCE_WALSNDSTATE_STARTUP}, + {"Backup", INSTANCE_WALSNDSTATE_BACKUP}, + {"Catchup", INSTANCE_WALSNDSTATE_CATCHUP}, + {"Streaming", INSTANCE_WALSNDSTATE_STREAMING}, + {"Dump syslog", INSTANCE_WALSNDSTATE_DUMPLOG}, + {"Normal", INSTANCE_WALSNDSTATE_NORMAL}, + {"Unknown", INSTANCE_WALSNDSTATE_UNKNOWN}, + {NULL, INSTANCE_WALSNDSTATE_UNKNOWN}}; + +int datanode_wal_send_state_string_to_int(const char* dbstate) +{ + int i; + if (NULL == dbstate) { + write_runlog(ERROR, "datanode_wal_send_state_string_to_int failed, input string dbstate is: NULL\n"); + return INSTANCE_WALSNDSTATE_UNKNOWN; + } + for (i = 0; datanode_wal_send_state_map_string[i].wal_send_state_string != NULL; i++) { + if (strcmp(datanode_wal_send_state_map_string[i].wal_send_state_string, dbstate) == 0) { + return datanode_wal_send_state_map_string[i].wal_send_state_val; + } + } + write_runlog(ERROR, "datanode_wal_send_state_string_to_int failed, input string dbstate is: (%s)\n", dbstate); + return INSTANCE_WALSNDSTATE_UNKNOWN; +} + +const char* datanode_wal_send_state_int_to_string(int dbstate) +{ + int i; + for (i = 0; datanode_wal_send_state_map_string[i].wal_send_state_string != NULL; i++) { + if (datanode_wal_send_state_map_string[i].wal_send_state_val == dbstate) { + return datanode_wal_send_state_map_string[i].wal_send_state_string; + } + } + return "Unknown"; +} + +instacne_datanode_sync_state_string datanode_wal_sync_state_map_string[] = {{"Async", INSTANCE_DATA_REPLICATION_ASYNC}, + {"Sync", INSTANCE_DATA_REPLICATION_SYNC}, + {"Most available", INSTANCE_DATA_REPLICATION_MOST_AVAILABLE}, + {"Potential", INSTANCE_DATA_REPLICATION_POTENTIAL_SYNC}, + {"Quorum", INSTANCE_DATA_REPLICATION_QUORUM}, + {NULL, INSTANCE_DATA_REPLICATION_UNKONWN}}; + +int datanode_wal_sync_state_string_to_int(const char* dbstate) +{ + int i; + if (NULL == dbstate) { + write_runlog(ERROR, "datanode_wal_sync_state_string_to_int failed, input string dbstate is: NULL\n"); + return INSTANCE_DATA_REPLICATION_UNKONWN; + } + for (i = 0; datanode_wal_sync_state_map_string[i].wal_sync_state_string != NULL; i++) { + if (strcmp(datanode_wal_sync_state_map_string[i].wal_sync_state_string, dbstate) == 0) { + return datanode_wal_sync_state_map_string[i].wal_sync_state_val; + } + } + write_runlog(ERROR, "datanode_wal_sync_state_string_to_int failed, input string dbstate is: (%s)\n", dbstate); + return INSTANCE_DATA_REPLICATION_UNKONWN; +} + +const char* datanode_wal_sync_state_int_to_string(int dbstate) +{ + int i; + for (i = 0; datanode_wal_sync_state_map_string[i].wal_sync_state_string != NULL; i++) { + if (datanode_wal_sync_state_map_string[i].wal_sync_state_val == dbstate) { + return datanode_wal_sync_state_map_string[i].wal_sync_state_string; + } + } + return "Unknown"; +} + +cluster_state_string cluster_state_map_string[] = { + {"Starting", CM_STATUS_STARTING}, + {"Redistributing", CM_STATUS_PENDING}, + {"Normal", CM_STATUS_NORMAL}, + {"Unavailable", CM_STATUS_NEED_REPAIR}, + {"Degraded", CM_STATUS_DEGRADE}, + {"Unknown", CM_STATUS_UNKNOWN}, + {"NormalCNDeleted", CM_STATUS_NORMAL_WITH_CN_DELETED}, + {NULL, CM_STATUS_UNKNOWN}, +}; + +const char* cluster_state_int_to_string(int cluster_state) +{ + int i; + for (i = 0; cluster_state_map_string[i].cluster_state_string != NULL; i++) { + if (cluster_state_map_string[i].cluster_state_val == cluster_state) { + return cluster_state_map_string[i].cluster_state_string; + } + } + return "Unknown"; +} + +/* this map should be sync with CM_MessageType in cm_msg.h file. */ +cluster_msg_string cluster_msg_map_string[] = { + + {"MSG_CTL_CM_SWITCHOVER", MSG_CTL_CM_SWITCHOVER}, + {"MSG_CTL_CM_BUILD", MSG_CTL_CM_BUILD}, + {"MSG_CTL_CM_SYNC", MSG_CTL_CM_SYNC}, + {"MSG_CTL_CM_QUERY", MSG_CTL_CM_QUERY}, + {"MSG_CTL_CM_NOTIFY", MSG_CTL_CM_NOTIFY}, + {"MSG_CTL_CM_BUTT", MSG_CTL_CM_BUTT}, + {"MSG_CM_CTL_DATA_BEGIN", MSG_CM_CTL_DATA_BEGIN}, + {"MSG_CM_CTL_DATA", MSG_CM_CTL_DATA}, + {"MSG_CM_CTL_NODE_END", MSG_CM_CTL_NODE_END}, + {"MSG_CM_CTL_DATA_END", MSG_CM_CTL_DATA_END}, + {"MSG_CM_CTL_COMMAND_ACK", MSG_CM_CTL_COMMAND_ACK}, + + {"MSG_CM_AGENT_SWITCHOVER", MSG_CM_AGENT_SWITCHOVER}, + {"MSG_CM_AGENT_FAILOVER", MSG_CM_AGENT_FAILOVER}, + {"MSG_CM_AGENT_BUILD", MSG_CM_AGENT_BUILD}, + {"MSG_CM_AGENT_SYNC", MSG_CM_AGENT_SYNC}, + {"MSG_CM_AGENT_NOTIFY", MSG_CM_AGENT_NOTIFY}, + {"MSG_CM_AGENT_NOTIFY_CN", MSG_CM_AGENT_NOTIFY_CN}, + {"MSG_CM_AGENT_NOTIFY_CN_CENTRAL_NODE", MSG_CM_AGENT_NOTIFY_CN_CENTRAL_NODE}, + {"MSG_AGENT_CM_NOTIFY_CN_FEEDBACK", MSG_AGENT_CM_NOTIFY_CN_FEEDBACK}, + {"MSG_CM_AGENT_DROP_CN", MSG_CM_AGENT_DROP_CN}, + {"MSG_CM_AGENT_CANCEL_SESSION", MSG_CM_AGENT_CANCEL_SESSION}, + {"MSG_CM_AGENT_DROPPED_CN", MSG_CM_AGENT_DROPPED_CN}, + {"MSG_CM_AGENT_RESTART", MSG_CM_AGENT_RESTART}, + {"MSG_CM_AGENT_RESTART_BY_MODE", MSG_CM_AGENT_RESTART_BY_MODE}, + {"MSG_CM_AGENT_REP_SYNC", MSG_CM_AGENT_REP_SYNC}, + {"MSG_CM_AGENT_REP_ASYNC", MSG_CM_AGENT_REP_ASYNC}, + {"MSG_CM_AGENT_REP_MOST_AVAILABLE", MSG_CM_AGENT_REP_MOST_AVAILABLE}, + {"MSG_CM_AGENT_BUTT", MSG_CM_AGENT_BUTT}, + + {"MSG_AGENT_CM_DATA_INSTANCE_REPORT_STATUS", MSG_AGENT_CM_DATA_INSTANCE_REPORT_STATUS}, + {"MSG_AGENT_CM_COORDINATE_INSTANCE_STATUS", MSG_AGENT_CM_COORDINATE_INSTANCE_STATUS}, + {"MSG_AGENT_CM_GTM_INSTANCE_STATUS", MSG_AGENT_CM_GTM_INSTANCE_STATUS}, + {"MSG_AGENT_CM_FENCED_UDF_INSTANCE_STATUS", MSG_AGENT_CM_FENCED_UDF_INSTANCE_STATUS}, + {"MSG_AGENT_CM_BUTT", MSG_AGENT_CM_BUTT}, + + {"MSG_CM_CM_VOTE", MSG_CM_CM_VOTE}, + {"MSG_CM_CM_BROADCAST", MSG_CM_CM_BROADCAST}, + {"MSG_CM_CM_NOTIFY", MSG_CM_CM_NOTIFY}, + {"MSG_CM_CM_SWITCHOVER", MSG_CM_CM_SWITCHOVER}, + {"MSG_CM_CM_FAILOVER", MSG_CM_CM_FAILOVER}, + {"MSG_CM_CM_SYNC", MSG_CM_CM_SYNC}, + {"MSG_CM_CM_SWITCHOVER_ACK", MSG_CM_CM_SWITCHOVER_ACK}, + {"MSG_CM_CM_FAILOVER_ACK", MSG_CM_CM_FAILOVER_ACK}, + {"MSG_CM_CM_ROLE_CHANGE_NOTIFY", MSG_CM_CM_ROLE_CHANGE_NOTIFY}, + {"MSG_CM_CM_REPORT_SYNC", MSG_CM_CM_REPORT_SYNC}, + + {"MSG_AGENT_CM_HEARTBEAT", MSG_AGENT_CM_HEARTBEAT}, + {"MSG_CM_AGENT_HEARTBEAT", MSG_CM_AGENT_HEARTBEAT}, + {"MSG_CTL_CM_SET", MSG_CTL_CM_SET}, + {"MSG_CTL_CM_SWITCHOVER_ALL", MSG_CTL_CM_SWITCHOVER_ALL}, + {"MSG_CM_CTL_SWITCHOVER_ALL_ACK", MSG_CM_CTL_SWITCHOVER_ALL_ACK}, + {"MSG_CTL_CM_BALANCE_CHECK", MSG_CTL_CM_BALANCE_CHECK}, + {"MSG_CM_CTL_BALANCE_CHECK_ACK", MSG_CM_CTL_BALANCE_CHECK_ACK}, + {"MSG_CTL_CM_BALANCE_RESULT", MSG_CTL_CM_BALANCE_RESULT}, + {"MSG_CM_CTL_BALANCE_RESULT_ACK", MSG_CM_CTL_BALANCE_RESULT_ACK}, + {"MSG_CTL_CM_QUERY_CMSERVER", MSG_CTL_CM_QUERY_CMSERVER}, + {"MSG_CM_CTL_CMSERVER", MSG_CM_CTL_CMSERVER}, + {"MSG_TYPE_BUTT", MSG_TYPE_BUTT}, + {"MSG_CTL_CM_SWITCHOVER_FULL", MSG_CTL_CM_SWITCHOVER_FULL}, + {"MSG_CM_CTL_SWITCHOVER_FULL_ACK", MSG_CM_CTL_SWITCHOVER_FULL_ACK}, + {"MSG_CM_CTL_SWITCHOVER_FULL_DENIED", MSG_CM_CTL_SWITCHOVER_FULL_DENIED}, + {"MSG_CTL_CM_SWITCHOVER_FULL_CHECK", MSG_CTL_CM_SWITCHOVER_FULL_CHECK}, + {"MSG_CM_CTL_SWITCHOVER_FULL_CHECK_ACK", MSG_CM_CTL_SWITCHOVER_FULL_CHECK_ACK}, + {"MSG_CTL_CM_SWITCHOVER_FULL_TIMEOUT", MSG_CTL_CM_SWITCHOVER_FULL_TIMEOUT}, + {"MSG_CM_CTL_SWITCHOVER_FULL_TIMEOUT_ACK", MSG_CM_CTL_SWITCHOVER_FULL_TIMEOUT_ACK}, + {"MSG_CTL_CM_SETMODE", MSG_CTL_CM_SETMODE}, + {"MSG_CM_CTL_SETMODE_ACK", MSG_CM_CTL_SETMODE_ACK}, + + {"MSG_CTL_CM_SWITCHOVER_AZ", MSG_CTL_CM_SWITCHOVER_AZ}, + {"MSG_CM_CTL_SWITCHOVER_AZ_ACK", MSG_CM_CTL_SWITCHOVER_AZ_ACK}, + {"MSG_CM_CTL_SWITCHOVER_AZ_DENIED", MSG_CM_CTL_SWITCHOVER_AZ_DENIED}, + {"MSG_CTL_CM_SWITCHOVER_AZ_CHECK", MSG_CTL_CM_SWITCHOVER_AZ_CHECK}, + {"MSG_CM_CTL_SWITCHOVER_AZ_CHECK_ACK", MSG_CM_CTL_SWITCHOVER_AZ_CHECK_ACK}, + {"MSG_CTL_CM_SWITCHOVER_AZ_TIMEOUT", MSG_CTL_CM_SWITCHOVER_AZ_TIMEOUT}, + {"MSG_CM_CTL_SWITCHOVER_AZ_TIMEOUT_ACK", MSG_CM_CTL_SWITCHOVER_AZ_TIMEOUT_ACK}, + + {"MSG_CM_CTL_SET_ACK", MSG_CM_CTL_SET_ACK}, + {"MSG_CTL_CM_GET", MSG_CTL_CM_GET}, + {"MSG_CM_CTL_GET_ACK", MSG_CM_CTL_GET_ACK}, + + {"MSG_CM_AGENT_GS_GUC", MSG_CM_AGENT_GS_GUC}, + {"MSG_AGENT_CM_GS_GUC_ACK", MSG_AGENT_CM_GS_GUC_ACK}, + {"MSG_CM_CTL_SWITCHOVER_INCOMPLETE_ACK", MSG_CM_CTL_SWITCHOVER_INCOMPLETE_ACK}, + {"MSG_CM_CM_TIMELINE", MSG_CM_CM_TIMELINE}, + {"MSG_CM_BUILD_DOING", MSG_CM_BUILD_DOING}, + {"MSG_AGENT_CM_ETCD_CURRENT_TIME", MSG_AGENT_CM_ETCD_CURRENT_TIME}, + {"MSG_CM_QUERY_INSTANCE_STATUS", MSG_CM_QUERY_INSTANCE_STATUS}, + {"MSG_CM_SERVER_TO_AGENT_CONN_CHECK", MSG_CM_SERVER_TO_AGENT_CONN_CHECK}, + {"MSG_CTL_CM_GET_DATANODE_RELATION", MSG_CTL_CM_GET_DATANODE_RELATION}, + {"MSG_CM_BUILD_DOWN", MSG_CM_BUILD_DOWN}, + {"MSG_CM_SERVER_REPAIR_CN_ACK", MSG_CM_SERVER_REPAIR_CN_ACK}, + {"MSG_CTL_CM_SETMODE", MSG_CTL_CM_DISABLE_CN}, + {"MSG_CM_CTL_SETMODE_ACK", MSG_CTL_CM_DISABLE_CN_ACK}, + {"MSG_CM_AGENT_LOCK_NO_PRIMARY", MSG_CM_AGENT_LOCK_NO_PRIMARY}, + {"MSG_CM_AGENT_LOCK_CHOSEN_PRIMARY", MSG_CM_AGENT_LOCK_CHOSEN_PRIMARY}, + {"MSG_CM_AGENT_UNLOCK", MSG_CM_AGENT_UNLOCK}, + {"MSG_CTL_CM_STOP_ARBITRATION", MSG_CTL_CM_STOP_ARBITRATION}, + {"MSG_CTL_CM_FINISH_REDO", MSG_CTL_CM_FINISH_REDO}, + {"MSG_CM_CTL_FINISH_REDO_ACK", MSG_CM_CTL_FINISH_REDO_ACK}, + {"MSG_CM_AGENT_FINISH_REDO", MSG_CM_AGENT_FINISH_REDO}, + {"MSG_CTL_CM_FINISH_REDO_CHECK", MSG_CTL_CM_FINISH_REDO_CHECK}, + {"MSG_CM_CTL_FINISH_REDO_CHECK_ACK", MSG_CM_CTL_FINISH_REDO_CHECK_ACK}, + {"MSG_AGENT_CM_KERBEROS_STATUS", MSG_AGENT_CM_KERBEROS_STATUS}, + {"MSG_CTL_CM_QUERY_KERBEROS", MSG_CTL_CM_QUERY_KERBEROS}, + {"MSG_CTL_CM_QUERY_KERBEROS_ACK", MSG_CTL_CM_QUERY_KERBEROS_ACK}, + {"MSG_AGENT_CM_DISKUSAGE_STATUS", MSG_AGENT_CM_DISKUSAGE_STATUS}, + {"MSG_CM_AGENT_OBS_DELETE_XLOG", MSG_CM_AGENT_OBS_DELETE_XLOG}, + {"MSG_CM_AGENT_DROP_CN_OBS_XLOG", MSG_CM_AGENT_DROP_CN_OBS_XLOG}, + {"MSG_AGENT_CM_DATANODE_INSTANCE_BARRIER", MSG_AGENT_CM_DATANODE_INSTANCE_BARRIER}, + {"MSG_CTL_CM_GLOBAL_BARRIER_QUERY", MSG_CTL_CM_GLOBAL_BARRIER_QUERY}, + {"MSG_AGENT_CM_COORDINATE_INSTANCE_BARRIER", MSG_AGENT_CM_COORDINATE_INSTANCE_BARRIER}, + {"MSG_CM_CTL_GLOBAL_BARRIER_DATA_BEGIN", MSG_CM_CTL_GLOBAL_BARRIER_DATA_BEGIN}, + {"MSG_CM_CTL_GLOBAL_BARRIER_DATA", MSG_CM_CTL_GLOBAL_BARRIER_DATA}, + {"MSG_CM_CTL_BARRIER_DATA_END", MSG_CM_CTL_BARRIER_DATA_END}, + {"MSG_CM_CTL_BACKUP_OPEN", MSG_CM_CTL_BACKUP_OPEN}, + {"MSG_CM_AGENT_DN_SYNC_LIST", MSG_CM_AGENT_DN_SYNC_LIST}, + {"MSG_AGENT_CM_DN_SYNC_LIST", MSG_AGENT_CM_DN_SYNC_LIST}, + {"MSG_CTL_CM_SWITCHOVER_FAST", MSG_CTL_CM_SWITCHOVER_FAST}, + {"MSG_CM_AGENT_SWITCHOVER_FAST", MSG_CM_AGENT_SWITCHOVER_FAST}, + {"MSG_CTL_CM_RELOAD", MSG_CTL_CM_RELOAD}, + {"MSG_CM_CTL_RELOAD_ACK", MSG_CM_CTL_RELOAD_ACK}, + {"MSG_CM_CTL_INVALID_COMMAND_ACK", MSG_CM_CTL_INVALID_COMMAND_ACK}, + {"MSG_AGENT_CM_CN_OBS_STATUS", MSG_AGENT_CM_CN_OBS_STATUS}, + {"MSG_CM_AGENT_NOTIFY_CN_RECOVER", MSG_CM_AGENT_NOTIFY_CN_RECOVER}, + {"MSG_CM_AGENT_FULL_BACKUP_CN_OBS", MSG_CM_AGENT_FULL_BACKUP_CN_OBS}, + {"MSG_AGENT_CM_BACKUP_STATUS_ACK", MSG_AGENT_CM_BACKUP_STATUS_ACK}, + {"MSG_CM_AGENT_REFRESH_OBS_DEL_TEXT", MSG_CM_AGENT_REFRESH_OBS_DEL_TEXT}, + {"MSG_AGENT_CM_INSTANCE_BARRIER_NEW", MSG_AGENT_CM_INSTANCE_BARRIER_NEW}, + {"MSG_CTL_CM_GLOBAL_BARRIER_QUERY_NEW", MSG_CTL_CM_GLOBAL_BARRIER_QUERY_NEW}, + {"MSG_CM_CTL_GLOBAL_BARRIER_DATA_BEGIN_NEW", MSG_CM_CTL_GLOBAL_BARRIER_DATA_BEGIN_NEW}, + {"MSG_CM_AGENT_DATANODE_INSTANCE_BARRIER", MSG_CM_AGENT_DATANODE_INSTANCE_BARRIER}, + {"MSG_CM_AGENT_COORDINATE_INSTANCE_BARRIER", MSG_CM_AGENT_COORDINATE_INSTANCE_BARRIER}, + {NULL, MSG_TYPE_BUTT}, +}; + +static ObsBackupStatusMapString g_obsBackupMapping[] = { + {"build start", OBS_BACKUP_PROCESSING}, + {"build failed", OBS_BACKUP_FAILED}, + {"build done", OBS_BACKUP_COMPLETED}, + {NULL, OBS_BACKUP_UNKNOWN}, +}; + +int32 ObsStatusStr2Int(const char *statusStr) +{ + for (uint32 i = 0; g_obsBackupMapping[i].obsStatusStr != NULL; i++) { + if (strcmp(g_obsBackupMapping[i].obsStatusStr, statusStr) == 0) { + return g_obsBackupMapping[i].backupStatus; + } + } + + write_runlog(ERROR, "ObsStatusStr2Int failed, input status is: (%s)\n", statusStr); + return OBS_BACKUP_UNKNOWN; +} + +const char* cluster_msg_int_to_string(int cluster_msg) +{ + int i = 0; + for (i = 0; cluster_msg_map_string[i].cluster_msg_str != NULL; i++) { + if (cluster_msg_map_string[i].cluster_msg_val == cluster_msg) { + return cluster_msg_map_string[i].cluster_msg_str; + } + } + write_runlog(ERROR, "cluster_msg_int_to_string failed, input int cluster_msg is: (%d)\n", cluster_msg); + return "Unknown message type"; +} + +instance_not_exist_reason_string instance_not_exist_reason[] = { + {"unknown", UNKNOWN_BAD_REASON}, + {"check port fail", PORT_BAD_REASON}, + {"nic not up", NIC_BAD_REASON}, + {"data path disc writable test failed", DISC_BAD_REASON}, + {"stopped by users", STOPPED_REASON}, + {"cn deleted, please repair quickly", CN_DELETED_REASON}, + {NULL, MSG_TYPE_BUTT}, +}; + +const char* instance_not_exist_reason_to_string(int reason) +{ + int i = 0; + for (i = 0; instance_not_exist_reason[i].level_string != NULL; i++) { + if (instance_not_exist_reason[i].level_val == reason) { + return instance_not_exist_reason[i].level_string; + } + } + return "unknown"; +} + +static void cm_init_block_sig(sigset_t* sleep_block_sig) +{ +#ifdef SIGTRAP + (void)sigdelset(sleep_block_sig, SIGTRAP); +#endif +#ifdef SIGABRT + (void)sigdelset(sleep_block_sig, SIGABRT); +#endif +#ifdef SIGILL + (void)sigdelset(sleep_block_sig, SIGILL); +#endif +#ifdef SIGFPE + (void)sigdelset(sleep_block_sig, SIGFPE); +#endif +#ifdef SIGSEGV + (void)sigdelset(sleep_block_sig, SIGSEGV); +#endif +#ifdef SIGBUS + (void)sigdelset(sleep_block_sig, SIGBUS); +#endif +#ifdef SIGSYS + (void)sigdelset(sleep_block_sig, SIGSYS); +#endif +} + +void cm_sleep(unsigned int sec) +{ + sigset_t sleep_block_sig; + sigset_t old_sig; + (void)sigfillset(&sleep_block_sig); + + cm_init_block_sig(&sleep_block_sig); + + (void)sigprocmask(SIG_SETMASK, &sleep_block_sig, &old_sig); + + (void)sleep(sec); + + (void)sigprocmask(SIG_SETMASK, &old_sig, NULL); +} + +void cm_usleep(unsigned int usec) +{ + sigset_t sleep_block_sig; + sigset_t old_sig; + (void)sigfillset(&sleep_block_sig); + + cm_init_block_sig(&sleep_block_sig); + + (void)sigprocmask(SIG_SETMASK, &sleep_block_sig, &old_sig); + + (void)usleep(usec); + + (void)sigprocmask(SIG_SETMASK, &old_sig, NULL); +} + +uint32 get_healthy_etcd_node_count(EtcdTlsAuthPath* tlsPath, int programType) +{ + uint32 i; + uint32 health_count = 0; + uint32 unhealth_count = 0; + bool findUnhealth = true; + int logLevel = (programType == CM_CTL) ? DEBUG1 : ERROR; + + char* health = (char*)malloc(ETCD_STATE_LEN * sizeof(char)); + if (health == NULL) { + write_runlog(logLevel, "malloc memory failed! size = %d\n", ETCD_STATE_LEN); + exit(1); + } + errno_t rc = memset_s(health, ETCD_STATE_LEN, 0, ETCD_STATE_LEN); + securec_check_errno(rc, ); + + if (g_health_etcd_flag) { + for (i = 0; i < g_health_etcd_count; i++) { + uint32 etcd_index = g_health_etcd_index[i]; + if (etcd_index >= g_node_num || !g_node[etcd_index].etcd) { + break; + } + + int serverLen = 2; + EtcdServerSocket server[serverLen]; + server[0].host = g_node[etcd_index].etcdClientListenIPs[0]; + server[0].port = g_node[etcd_index].etcdClientListenPort; + server[1].host = NULL; + EtcdSession sess = 0; + int etcd_cluster_result = ETCD_OK; + if (etcd_open(&sess, server, tlsPath, ETCD_DEFAULT_TIMEOUT) != 0) { + const char* err_now = get_last_error(); + write_runlog(logLevel, "open etcd server %s failed: %s.\n", server[0].host, err_now); + break; + } + + etcd_cluster_result = etcd_cluster_health(sess, g_node[etcd_index].etcdName, health, ETCD_STATE_LEN); + if (etcd_close(sess) != 0) { + /* Only print error info */ + const char* err = get_last_error(); + write_runlog(WARNING, "etcd_close failed,%s\n", err); + } + + if (etcd_cluster_result == 0) { + if (0 == strcmp(health, "healthy")) { + health_count++; + } else { + break; + } + } else { + const char* err_now = get_last_error(); + write_runlog(logLevel, "etcd get all node health failed: %s.\n", err_now); + break; + } + + if (health_count > g_etcd_num / 2) { + FREE_AND_RESET(health); + return health_count; + } + } + } + health_count = 0; + (void)pthread_rwlock_wrlock(&g_health_etcd_rwlock); + g_health_etcd_count = 0; + for (i = 0; i < g_node_num; i++) { + if (g_node[i].etcd) { + EtcdServerSocket server[2]; + server[0].host = g_node[i].etcdClientListenIPs[0]; + server[0].port = g_node[i].etcdClientListenPort; + server[1].host = NULL; + EtcdSession sess = 0; + int etcd_cluster_result = ETCD_OK; + if (etcd_open(&sess, server, tlsPath, ETCD_DEFAULT_TIMEOUT) != 0) { + const char* err_now = get_last_error(); + write_runlog(logLevel, "open etcd server %s failed: %s.\n", server[0].host, err_now); + continue; + } + etcd_cluster_result = etcd_cluster_health(sess, g_node[i].etcdName, health, ETCD_STATE_LEN); + if (etcd_cluster_result == 0) { + if (0 == strcmp(health, "healthy")) { + g_health_etcd_index[health_count] = i; + health_count++; + } else { + unhealth_count++; + } + } else { + unhealth_count++; + const char* err_now = get_last_error(); + write_runlog(logLevel, "etcd get node %s health state failed: %s.\n", g_node[i].etcdName, err_now); + } + + if (etcd_close(sess) != 0) { + /* Only print error info */ + const char* err = get_last_error(); + write_runlog(WARNING, "etcd_close failed,%s\n", err); + } + + if (health_count > g_etcd_num / 2) { + findUnhealth = false; + break; + } + + if (unhealth_count > g_etcd_num / 2) { + break; + } + } + } + if (findUnhealth) { + g_health_etcd_flag = false; + } else { + g_health_etcd_flag = true; + g_health_etcd_count = health_count; + } + (void)pthread_rwlock_unlock(&g_health_etcd_rwlock); + + FREE_AND_RESET(health); + return health_count; +} + +void check_input_for_security(const char* input) +{ + char* danger_token[] = {"|", ";", "&", "$", "<", ">", "`", "\\", "!", "\n", NULL}; + int i = 0; + for (i = 0; danger_token[i] != NULL; i++) { + if (strstr(input, danger_token[i]) != NULL) { + printf("invalid token \"%s\" in string: %s.", danger_token[i], input); + exit(1); + } + } +} + +/* CAUTION: the env value MPPDB_ENV_SEPARATE_PATH does not exist in some system */ +int cm_getenv(const char* env_var, char* output_env_value, uint32 env_value_len, syscalllock cmLock, int elevel) +{ + char* env_value = NULL; + elevel = (elevel == -1) ? ERROR : elevel; + + if (env_var == NULL) { + write_runlog(elevel, "cm_getenv: invalid env_var !\n"); + return -1; + } + + (void)syscalllockAcquire(&cmLock); + env_value = getenv(env_var); + + if (env_value == NULL || env_value[0] == '\0') { + + if (strcmp(env_var, "MPPDB_KRB5_FILE_PATH") == 0 || + strcmp(env_var, "KRB_HOME") == 0 || + strcmp(env_var, "MPPDB_ENV_SEPARATE_PATH") == 0) { + + /* MPPDB_KRB5_FILE_PATH, KRB_HOME, MPPDB_ENV_SEPARATE_PATH is not necessary, + and do not print failed to get environment log */ + (void)syscalllockRelease(&cmLock); + return -1; + } else { + write_runlog(elevel, + "cm_getenv: failed to get environment variable:%s. Please check and make sure it is configured!\n", + env_var); + } + (void)syscalllockRelease(&cmLock); + return -1; + } + check_env_value(env_value); + + int rc = strcpy_s(output_env_value, env_value_len, env_value); + if (rc != EOK) { + write_runlog(elevel, + "cm_getenv: failed to get environment variable %s , variable length:%lu.\n", + env_var, + strlen(env_value)); + (void)syscalllockRelease(&cmLock); + return -1; + } + + (void)syscalllockRelease(&cmLock); + return EOK; +} + +void check_env_value(const char* input_env_value) +{ + const char* danger_character_list[] = {"|", + ";", + "&", + "$", + "<", + ">", + "`", + "\\", + "'", + "\"", + "{", + "}", + "(", + ")", + "[", + "]", + "~", + "*", + "?", + "!", + "\n", + NULL}; + int i = 0; + + for (i = 0; danger_character_list[i] != NULL; i++) { + if (strstr(input_env_value, danger_character_list[i]) != NULL) { + fprintf( + stderr, "invalid token \"%s\" in input_env_value: (%s)\n", danger_character_list[i], input_env_value); + exit(1); + } + } +} + +void print_environ(void) +{ + int i; + + write_runlog(LOG, "begin printing environment variables.\n"); + for (i = 0; environ[i] != NULL; i++) { + write_runlog(LOG, "%s\n", environ[i]); + } + write_runlog(LOG, "end printing environment variables\n"); +} + +void cm_pthread_rw_lock(pthread_rwlock_t* rwlock) +{ + int ret = pthread_rwlock_wrlock(rwlock); + if (ret != 0) { + write_runlog(ERROR, "pthread_rwlock_wrlock failed.\n"); + exit(1); + } +} + +void cm_pthread_rw_unlock(pthread_rwlock_t* rwlock) +{ + int ret = pthread_rwlock_unlock(rwlock); + if (ret != 0) { + write_runlog(ERROR, "pthread_rwlock_unlock failed.\n"); + exit(1); + } +} + +/** + * @brief Creates a lock file for a process with a specified PID. + * + * @note When the parameter "pid" is set to -1, the specified process is the current process. + * @param filename The name of the lockfile to create. + * @param data_path The data path of the instance. + * @param pid The pid of the process. + * @return 0 Create successfully, -1 Create failure. + */ +int create_lock_file( + const char* filename, + const char* data_path, + const pid_t pid) +{ + int fd; + char buffer[MAXPGPATH + 100] = { 0 }; + const pid_t my_pid = (pid >= 0) ? pid : getpid(); + int try_times = 0; + + do + { + /* The maximum number of attempts is 3. */ + if (try_times++ > 3) { + write_runlog(ERROR, "could not create lock file: filename=\"%s\", error_no=%d.\n", filename, errno); + return -1; + } + + /* Attempt to create a specified PID file. */ + fd = open(filename, O_RDWR | O_CREAT | O_EXCL, 0600); + if (fd >= 0) { + break; + } + + /* If the creation fails, try to open the existing pid file. */ + fd = open(filename, O_RDONLY | O_CLOEXEC, 0600); + if (fd < 0) { + write_runlog(ERROR, "could not open lock file: filename=\"%s\", error_no=%d.\n", filename, errno); + return EEXIST; + } + + /* If the file is opened successfully, the system attempts to read the file content. */ + int len = read(fd, buffer, sizeof(buffer) - 1); + (void)close(fd); + if (len < 0 || len >= (MAXPGPATH + 100)) { + write_runlog(ERROR, "could not read lock file: filename=\"%s\", error_no=%d.\n", filename, errno); + return EEXIST; + } + + /* Obtains the PID information in a PID file. */ + const pid_t other_pid = static_cast(atoi(buffer)); + if (other_pid <= 0) { + write_runlog(ERROR, + "bogus data in lock file: filename=\"%s\", buffer=\"%s\", error_no=%d.\n", + filename, buffer, errno); + return EEXIST; + } + + /* If the obtained PID is not the specified process ID or parent process ID. */ + if (other_pid != my_pid +#ifndef WIN32 + && other_pid != getppid() +#endif + ) { + /* Sends signals to the specified PID. */ + if (kill(other_pid, 0) == 0 || (errno != ESRCH && errno != EPERM)) { + write_runlog(WARNING, + "lock file \"%s\" exists, Is another instance (PID %d) running in data directory \"%s\"?\n", + filename, (int)(other_pid), data_path); + } + } + + /* Attempt to delete the specified PID file. */ + if (unlink(filename) < 0) { + write_runlog(ERROR, + "could not remove old lock file \"%s\", The file seems accidentally" + " left over, but it could not be removed. Please remove the file by hand and try again: errno=%d.\n", + filename, errno); + return -1; + } + } while (true); + + int rc = snprintf_s(buffer, sizeof(buffer), sizeof(buffer) - 1, "%d\n%s\n%d\n", (int)(my_pid), data_path, 0); + securec_check_intval(rc, ); + + /* Writes PID information. */ + errno = 0; + if (write(fd, buffer, strlen(buffer)) != (int)(strlen(buffer))) { + write_runlog(ERROR, "could not write lock file: filename=\"%s\", error_no=%d.\n", filename, errno); + + close(fd); + unlink(filename); + return EEXIST; + } + + /* Close the pid file. */ + if (close(fd)) { + write_runlog(FATAL, "could not write lock file: filename=\"%s\", error_no=%d.\n", filename, errno); + + unlink(filename); + return -1; + } + + return 0; +} + +/** + * @brief Delete pid file. + * + * @param filename The pid file to be deleted. + */ +void delete_lock_file(const char* filename) +{ + struct stat stat_buf = {0}; + + /* Check whether the pid file exists. */ + if (stat(filename, &stat_buf) != 0) { + return; + } + + /* Delete the PID file. */ + if (unlink(filename) < 0) { + write_runlog(FATAL, "could not remove old lock file \"%s\"", filename); + } +} + +/* kerberos status to string */ +const char* kerberos_status_to_string(int role) +{ + if (role <= KERBEROS_STATUS_UNKNOWN || role > KERBEROS_STATUS_DOWN) { + return kerberos_role_string_map[KERBEROS_STATUS_UNKNOWN].role_string; + } else { + return kerberos_role_string_map[role].role_string; + } +} + +static void SigAlarmHandler(int arg) +{ + ; +} + +int CmExecuteCmd(const char* command, struct timeval timeout) +{ +#ifndef WIN32 + pid_t pid; + pid_t child = 0; + struct sigaction intact = {}; + struct sigaction quitact = {}; + sigset_t newsigblock, oldsigblock; + struct itimerval write_timeout; + errno_t rc; + + if (command == NULL) { + write_runlog(ERROR, "ExecuteCmd invalid command.\n"); + return 1; + } + /* + * Ignore SIGINT and SIGQUIT, block SIGCHLD. Remember to save existing + * signal dispositions. + */ + struct sigaction ign = {}; + rc = memset_s(&ign, sizeof(struct sigaction), 0, sizeof(struct sigaction)); + securec_check_errno(rc, (void)rc); + ign.sa_handler = SIG_IGN; + (void)sigemptyset(&ign.sa_mask); + ign.sa_flags = 0; + (void)sigaction(SIGINT, &ign, &intact); + (void)sigaction(SIGQUIT, &ign, &quitact); + (void)sigemptyset(&newsigblock); + (void)sigaddset(&newsigblock, SIGCHLD); + (void)sigprocmask(SIG_BLOCK, &newsigblock, &oldsigblock); + + switch (pid = fork()) { + case -1: /* error */ + break; + case 0: /* child */ + + /* + * Restore original signal dispositions and exec the command. + */ + (void)sigaction(SIGINT, &intact, NULL); + (void)sigaction(SIGQUIT, &quitact, NULL); + (void)sigprocmask(SIG_SETMASK, &oldsigblock, NULL); + (void)execl("/bin/sh", "sh", "-c", command, (char*)0); + _exit(127); + break; + default: + /* wait the child process end ,if timeout then kill the child process force */ + write_runlog(LOG, "ExecuteCmd: %s, pid:%d. start!\n", command, pid); + write_timeout.it_value.tv_sec = timeout.tv_sec; + write_timeout.it_value.tv_usec = timeout.tv_usec; + write_timeout.it_interval.tv_sec = 0; + write_timeout.it_interval.tv_usec = 0; + child = pid; + (void)setitimer(ITIMER_REAL, &write_timeout, NULL); + (void)signal(SIGALRM, SigAlarmHandler); + if (pid != waitpid(pid, NULL, 0)) { + /* kill child process */ + (void)kill(child, SIGKILL); + pid = -1; + /* avoid the zombie process */ + (void)wait(NULL); + } + write_runlog(LOG, "ExecuteCmd: %s, pid:%d. end!\n", command, pid); + (void)signal(SIGALRM, SIG_IGN); + break; + } + (void)sigaction(SIGINT, &intact, NULL); + (void)sigaction(SIGQUIT, &quitact, NULL); + (void)sigprocmask(SIG_SETMASK, &oldsigblock, NULL); + if (pid == -1) { + write_runlog(ERROR, "ExecuteCmd: %s, failed errno:%d.\n", command, errno); + } + return ((pid == -1) ? -1 : 0); +#else + return -1; +#endif +} + +int CmInitMasks(const int* ListenSocket, fd_set* rmask) +{ + int maxsock = -1; + int i; + + FD_ZERO(rmask); + + for (i = 0; i < MAXLISTEN; i++) { + int fd = ListenSocket[i]; + + if (fd == -1) { + break; + } + FD_SET(fd, rmask); + if (fd > maxsock) { + maxsock = fd; + } + } + + return maxsock + 1; +} diff --git a/src/lib/cm_common/cm_path.cpp b/src/lib/cm_common/cm_path.cpp new file mode 100644 index 000000000..af95429af --- /dev/null +++ b/src/lib/cm_common/cm_path.cpp @@ -0,0 +1,60 @@ +/** + * @file cm_path.cpp + * @brief + * @author xxx + * @version 1.0 + * @date 2020-08-06 + * + * @copyright Copyright (c) Huawei Technologies Co., Ltd. 2011-2020. All rights reserved. + * + */ + +#include +#include +#include +#include +#include + +#include "cm/cm_c.h" + +#define IS_DIR_SEP_GTM(ch) ((ch) == '/' || (ch) == '\\') + +#define skip_drive(path) (path) + +void trim_directory(char* path); + +/** + * @brief Trim trailing directory from path, that is, remove any trailing slashes, + * the last pathname component, and the slash just ahead of it --- but never + * remove a leading slash. + * + * @param path Check the path validity. + */ +void trim_directory(char* path) +{ + char* p = NULL; + + path = skip_drive(path); + + if (path[0] == '\0') { + return; + } + + /* back up over trailing slash(es) */ + for (p = path + strlen(path) - 1; IS_DIR_SEP_GTM(*p) && p > path; p--) { + ; + } + /* back up over directory name */ + for (; !IS_DIR_SEP_GTM(*p) && p > path; p--) { + ; + } + /* if multiple slashes before directory name, remove 'em all */ + for (; p > path && IS_DIR_SEP_GTM(*(p - 1)); p--) { + ; + } + /* don't erase a leading slash */ + if (p == path && IS_DIR_SEP_GTM(*p)) { + p++; + } + *p = '\0'; +} diff --git a/src/lib/cm_common/cm_stringinfo.cpp b/src/lib/cm_common/cm_stringinfo.cpp new file mode 100644 index 000000000..0b1140e80 --- /dev/null +++ b/src/lib/cm_common/cm_stringinfo.cpp @@ -0,0 +1,377 @@ +/** + * @file cm_stringinfo.cpp + * @brief StringInfo provides an indefinitely-extensible string data type. + * It can be used to buffer either ordinary C strings (null-terminated text) + * or arbitrary binary data. All storage is allocated with palloc(). + * @author xxx + * @version 1.0 + * @date 2020-08-06 + * + * @copyright Copyright (c) Huawei Technologies Co., Ltd. 2011-2020. All rights reserved. + * + */ +#include "cm/cm_c.h" +#include "cm/stringinfo.h" +#include "cm/elog.h" + +/* + * makeStringInfo + * + * Create an empty 'StringInfoData' & return a pointer to it. + */ +CM_StringInfo CM_makeStringInfo(void) +{ + CM_StringInfo res; + + res = (CM_StringInfo)malloc(sizeof(CM_StringInfoData)); + if (res == NULL) { + write_runlog(ERROR, "malloc CM_StringInfo failed, out of memory.\n"); + exit(1); + } + + CM_initStringInfo(res); + + return res; +} + +/* + * makeStringInfo + * + * Create an empty 'StringInfoData' & return a pointer to it. + */ +void CM_destroyStringInfo(CM_StringInfo str) +{ + if (str != NULL) { + if (str->maxlen > 0) { + FREE_AND_RESET(str->data); + } + free(str); + } + return; +} + +/* + * makeStringInfo + * + * Create an empty 'StringInfoData' & return a pointer to it. + */ +void CM_freeStringInfo(CM_StringInfo str) +{ + if (str->maxlen > 0) { + FREE_AND_RESET(str->data); + } + return; +} + +/* + * dupStringInfo + * + * Get new StringInfo and copy the original to it. + */ +CM_StringInfo CM_dupStringInfo(CM_StringInfo orig) +{ + CM_StringInfo newvar; + + newvar = CM_makeStringInfo(); + if (newvar == NULL) { + return (newvar); + } + + if (orig->len > 0) { + CM_appendBinaryStringInfo(newvar, orig->data, orig->len); + newvar->cursor = orig->cursor; + } + return (newvar); +} + +/* + * copyStringInfo + * Deep copy: Data part is copied too. Cursor of the destination is + * initialized to zero. + */ +void CM_copyStringInfo(CM_StringInfo to, CM_StringInfo from) +{ + CM_resetStringInfo(to); + CM_appendBinaryStringInfo(to, from->data, from->len); + return; +} + +/* + * initStringInfo + * + * Initialize a StringInfoData struct (with previously undefined contents) + * to describe an empty string. + */ +void CM_initStringInfo(CM_StringInfo str) +{ + int size = 1024; /* initial default buffer size */ + + str->data = (char*)malloc(size); + if (str->data == NULL) { + write_runlog(ERROR, "malloc CM_StringInfo->data failed, out of memory.\n"); + exit(1); + } + str->maxlen = size; + CM_resetStringInfo(str); +} + +/* + * resetStringInfo + * + * Reset the StringInfo: the data buffer remains valid, but its + * previous content, if any, is cleared. + */ +void CM_resetStringInfo(CM_StringInfo str) +{ + if (str == NULL) { + return; + } + + str->data[0] = '\0'; + str->len = 0; + str->cursor = 0; + str->qtype = 0; + str->msglen = 0; +} + +/* + * appendStringInfo + * + * Format text data under the control of fmt (an sprintf-style format string) + * and append it to whatever is already in str. More space is allocated + * to str if necessary. This is sort of like a combination of sprintf and + * strcat. + */ +void CM_appendStringInfo(CM_StringInfo str, const char* fmt, ...) +{ + for (;;) { + va_list args; + bool success = false; + + /* Try to format the data. */ + va_start(args, fmt); + success = CM_appendStringInfoVA(str, fmt, args); + va_end(args); + + if (success) { + break; + } + + /* Double the buffer size and try again. */ + (void)CM_enlargeStringInfo(str, str->maxlen); + } +} + +/* + * appendStringInfoVA + * + * Attempt to format text data under the control of fmt (an sprintf-style + * format string) and append it to whatever is already in str. If successful + * return true; if not (because there's not enough space), return false + * without modifying str. Typically the caller would enlarge str and retry + * on false return --- see appendStringInfo for standard usage pattern. + * + * XXX This API is ugly, but there seems no alternative given the C spec's + * restrictions on what can portably be done with va_list arguments: you have + * to redo va_start before you can rescan the argument list, and we can't do + * that from here. + */ +bool CM_appendStringInfoVA(CM_StringInfo str, const char* fmt, va_list args) +{ + int avail, nprinted; + + /* + * If there's hardly any space, don't bother trying, just fail to make the + * caller enlarge the buffer first. + */ + avail = str->maxlen - str->len - 1; + if (avail < 16) { + return false; + } + + /* + * Assert check here is to catch buggy vsnprintf that overruns the + * specified buffer length. Solaris 7 in 64-bit mode is an example of a + * platform with such a bug. + */ +#ifdef USE_ASSERT_CHECKING + str->data[str->maxlen - 1] = '\0'; +#endif + + nprinted = vsnprintf_s(str->data + str->len, str->maxlen - str->len, avail, fmt, args); + + /* + * Note: some versions of vsnprintf return the number of chars actually + * stored, but at least one returns -1 on failure. Be conservative about + * believing whether the print worked. + */ + if (nprinted >= 0 && nprinted < avail - 1) { + /* Success. Note nprinted does not include trailing null. */ + str->len += nprinted; + return true; + } + + /* Restore the trailing null so that str is unmodified. */ + str->data[str->len] = '\0'; + return false; +} + +/* + * appendStringInfoString + * + * Append a null-terminated string to str. + * Like appendStringInfo(str, "%s", s) but faster. + */ +void CM_appendStringInfoString(CM_StringInfo str, const char* s) +{ + CM_appendBinaryStringInfo(str, s, strlen(s)); +} + +/* + * appendStringInfoChar + * + * Append a single byte to str. + * Like appendStringInfo(str, "%c", ch) but much faster. + */ +void CM_appendStringInfoChar(CM_StringInfo str, char ch) +{ + /* Make more room if needed */ + if (str->len + 1 >= str->maxlen) { + (void)CM_enlargeStringInfo(str, 1); + } + + /* OK, append the character */ + str->data[str->len] = ch; + str->len++; + str->data[str->len] = '\0'; +} + +/* + * appendBinaryStringInfo + * + * Append arbitrary binary data to a StringInfo, allocating more space + * if necessary. + */ +void CM_appendBinaryStringInfo(CM_StringInfo str, const char* data, int datalen) +{ + errno_t rc; + + /* Make more room if needed */ + (void)CM_enlargeStringInfo(str, datalen); + + /* OK, append the data */ + rc = memcpy_s(str->data + str->len, str->maxlen - str->len, data, datalen); + securec_check_c(rc, "\0", "\0"); + str->len += datalen; + + /* + * Keep a trailing null in place, even though it's probably useless for + * binary data... + */ + str->data[str->len] = '\0'; +} + +/* + * enlargeStringInfo + * + * Make sure there is enough space for 'needed' more bytes + * ('needed' does not include the terminating null). + * + * External callers usually need not concern themselves with this, since + * all stringinfo.c routines do it automatically. However, if a caller + * knows that a StringInfo will eventually become X bytes large, it + * can save some palloc overhead by enlarging the buffer before starting + * to store data in it. + * + * NB: because we use repalloc() to enlarge the buffer, the string buffer + * will remain allocated in the same memory context that was current when + * initStringInfo was called, even if another context is now current. + * This is the desired and indeed critical behavior! + */ +int CM_enlargeStringInfo(CM_StringInfo str, int needed) +{ + int newlen; + char* newdata = NULL; + + /* + * Guard against out-of-range "needed" values. Without this, we can get + * an overflow or infinite loop in the following. + */ + if (needed < 0) /* should not happen */ + { + write_runlog(ERROR, "invalid string enlargement request size: %d\n", needed); + return -1; + } + + if (((Size)needed) >= (CM_MaxAllocSize - (Size)str->len)) { + write_runlog(ERROR, + "out of memory !Cannot enlarge string buffer containing %d bytes by %d more bytes.\n", + str->len, + needed); + return -1; + } + + needed += str->len + 1; /* total space required now */ + + /* Because of the above test, we now have needed <= MaxAllocSize */ + + if (needed <= str->maxlen) { + return 0; /* got enough space already */ + } + + /* + * We don't want to allocate just a little more space with each append; + * for efficiency, double the buffer size each time it overflows. + * Actually, we might need to more than double it if 'needed' is big... + */ + newlen = 2 * str->maxlen; + while (needed > newlen) { + newlen = 2 * newlen; + } + + /* + * Clamp to MaxAllocSize in case we went past it. Note we are assuming + * here that MaxAllocSize <= INT_MAX/2, else the above loop could + * overflow. We will still have newlen >= needed. + */ + if (newlen > (int)CM_MaxAllocSize) { + newlen = (int)CM_MaxAllocSize; + } + + newdata = (char*)malloc(newlen); + if (newdata != NULL) { + if (str->data != NULL) { + errno_t rc; + rc = memcpy_s(newdata, newlen, str->data, str->maxlen); + securec_check_c(rc, "\0", "\0"); + FREE_AND_RESET(str->data); + } + str->data = newdata; + str->maxlen = newlen; + } else { + if (str->data != NULL) { + FREE_AND_RESET(str->data); + str->maxlen = 0; + } + } + return 0; +} + +int CM_is_str_all_digit(const char* name) +{ + int size = 0; + int i = 0; + + if (name == NULL) { + write_runlog(ERROR, "CM_is_str_all_digit input null\n"); + return -1; + } + + size = strlen(name); + for (i = 0; i < size; i++) { + if (name[i] < '0' || name[i] > '9') { + return -1; + } + } + return 0; +} \ No newline at end of file diff --git a/src/lib/cm_common/generate-cm-errcodes.pl b/src/lib/cm_common/generate-cm-errcodes.pl new file mode 100644 index 000000000..910d0c000 --- /dev/null +++ b/src/lib/cm_common/generate-cm-errcodes.pl @@ -0,0 +1,46 @@ +#!/usr/bin/perl +# +# Generate the cm_errcodes.h header from cm_errcodes.txt +# Copyright (c) 2000-2012, PostgreSQL Global Development Group + +use warnings; +use strict; + +print + "/* autogenerated from src/backend/utils/cm_errcodes.txt, do not edit */\n"; +print "/* there is deliberately not an #ifndef ERRCODES_H here */\n"; + +open my $cm_errcodes, $ARGV[0] or die; + +while (<$cm_errcodes>) +{ + chomp; + + # Skip comments + next if /^#/; + next if /^\s*$/; + + # Emit a comment for each section header + if (/^Section:(.*)/) + { + my $header = $1; + $header =~ s/^\s+//; + print "\n/* $header */\n"; + next; + } + + die "unable to parse cm_errcodes.txt" + unless /^([^\s]{5})\s+[EWS]\s+([^\s]+)/; + + (my $sqlstate, my $errcode_macro) = ($1, $2); + + # Split the sqlstate letters + $sqlstate = join ",", split "", $sqlstate; + + # And quote them + $sqlstate =~ s/([^,])/'$1'/g; + + print "#define $errcode_macro MAKE_SQLSTATE($sqlstate)\n"; +} + +close $cm_errcodes; diff --git a/src/lib/cm_communication/CMakeLists.txt b/src/lib/cm_communication/CMakeLists.txt new file mode 100644 index 000000000..a4a981f2c --- /dev/null +++ b/src/lib/cm_communication/CMakeLists.txt @@ -0,0 +1,13 @@ +#This is the main CMAKE for build all components. + +SET(CMAKE_VERBOSE_MAKEFILE ON) #显示详细的原始编译信息 +SET(CMAKE_RULE_MESSAGES OFF) #减少一些不必要的输出 +SET(CMAKE_SKIP_RPATH TRUE) #TRUE表示在编译及安装阶段忽略RPATH + +set(CMAKE_MODULE_PATH + ${CMAKE_CURRENT_SOURCE_DIR}/cm_feconnect + ${CMAKE_CURRENT_SOURCE_DIR}/cm_libpq +) + +add_subdirectory(cm_feconnect) +add_subdirectory(cm_libpq) \ No newline at end of file diff --git a/src/lib/cm_communication/Makefile b/src/lib/cm_communication/Makefile new file mode 100644 index 000000000..5f82e2035 --- /dev/null +++ b/src/lib/cm_communication/Makefile @@ -0,0 +1,19 @@ +#---------------------------------------------------------------------------- +# +# MPPDB CM makefile +# +# +# distribute/cm_communication/Makefile +# +#----------------------------------------------------------------------------- +PGFILEDESC = "cm - Global Transaction Manager for openGauss" +subdir = src/lib/cm_communication +top_builddir = ../../.. +include $(top_builddir)/src/Makefile.global + +SUBDIRS = cm_feconnect cm_libpq + +# Supress parallel build to avoid depencies in the subdirectories. +.NOTPARALLEL: + +$(recurse) diff --git a/src/lib/cm_communication/cm_feconnect/CMakeLists.txt b/src/lib/cm_communication/cm_feconnect/CMakeLists.txt new file mode 100644 index 000000000..7066a1679 --- /dev/null +++ b/src/lib/cm_communication/cm_feconnect/CMakeLists.txt @@ -0,0 +1,24 @@ +#This is the main CMAKE for build all components. +# libcmclient.a +AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} TGT_cmclient_SRC) +set(TGT_cmclient_INC + ${PROJECT_SRC_DIR}/include/cm + ${PROJECT_SRC_DIR}/common/interfaces/libpq + ${PROJECT_TRUNK_DIR}/distribute/cm/cm_etcdapi + ${LIBCGROUP_INCLUDE_PATH} + ${KERBEROS_INCLUDE_PATH} +) + +set(cmclient_DEF_OPTIONS ${MACRO_OPTIONS}) +set(cmclient_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${LIB_SECURE_OPTIONS} ${CHECK_OPTIONS} -fstack-protector-strong) +list(REMOVE_ITEM cmclient_COMPILE_OPTIONS -fstack-protector) +add_static_libtarget(cmclient TGT_cmclient_SRC TGT_cmclient_INC "${cmclient_DEF_OPTIONS}" "${cmclient_COMPILE_OPTIONS}") +install(TARGETS cmclient_static ARCHIVE DESTINATION lib) + +set(cmclient_LINK_OPTIONS ${LIB_LINK_OPTIONS}) +list(REMOVE_ITEM cmclient_LINK_OPTIONS -pthread) + +add_shared_libtarget(cmclient TGT_cmclient_SRC TGT_cmclient_INC "${cmclient_DEF_OPTIONS}" "${cmclient_COMPILE_OPTIONS}" "${cmclient_LINK_OPTIONS}") +set_target_properties(cmclient PROPERTIES VERSION 1) + +install(TARGETS cmclient LIBRARY DESTINATION lib) diff --git a/src/lib/cm_communication/cm_feconnect/Makefile b/src/lib/cm_communication/cm_feconnect/Makefile new file mode 100644 index 000000000..5a23dad77 --- /dev/null +++ b/src/lib/cm_communication/cm_feconnect/Makefile @@ -0,0 +1,61 @@ +#---------------------------------------------------------------------------- +# +# MPPDB CM feconnect makefile +# +# +# distribute/cm/cm_communication/cm_feconnect/Makefile +# +#----------------------------------------------------------------------------- +top_builddir = ../../../../ +include $(top_builddir)/src/Makefile.global +subdir = src/lib/cm_communication/cm_feconnect + +VERSION = 1 + +override CFLAGS += -fstack-protector-strong -Wl,-z,relro,-z,now +override CPPFLAGS := -I$(libpq_srcdir) $(CPPFLAGS) +LIBS += $(PTHREAD_LIBS) + +override CPPFLAGS := $(filter-out -fPIE, $(CPPFLAGS)) -fPIC +override CFLAGS := $(filter-out -fPIE, $(CFLAGS)) -fPIC + +include $(top_srcdir)/src/gausskernel/common.mk + +ifneq "$(MAKECMDGOALS)" "clean" + ifneq "$(MAKECMDGOALS)" "distclean" + ifneq "$(shell which g++ |grep hutaf_llt |wc -l)" "1" + -include $(DEPEND) + endif + endif +endif +OBJS = fe-misc.o fe-connect.o fe-protocol.o ip.o pqexpbuffer.o + +all: libcmclient.a libcmclient.so + +libcmclient.so: + $(CC) -fPIC -shared $(CFLAGS) $(CPPFLAGS) fe-misc.cpp fe-connect.cpp fe-protocol.cpp ip.cpp pqexpbuffer.cpp -o libcmclient.so.$(VERSION) + rm -f libcmclient.so && \ + ln -sf libcmclient.so.$(VERSION) libcmclient.so + +libcmclient.a: $(OBJS) + $(AR) $(AROPT) $@ $^ + +install: all installdirs + $(INSTALL_STLIB) libcmclient.so.$(VERSION) '$(DESTDIR)$(libdir)/libcmclient.so.$(VERSION)' + cd '$(DESTDIR)$(libdir)' && \ + rm -f libcmclient.so && \ + ln -sf libcmclient.so.$(VERSION) libcmclient.so + +installdirs: + $(MKDIR_P) '$(DESTDIR)$(libdir)' + +utcmclient: + $(CC) -fPIC -shared $(CFLAGS) $(CPPFLAGS) fe-misc.o pqexpbuffer.o -L$(SECURE_LIB_PATH) -l$(SECURE_C_CHECK) -o libutcmclient.so + mv libutcmclient.so $(top_builddir)/../distribute/test/ut/lib/ + +clean: + rm -f $(OBJS) libcmclient.a libcmclient.so libcmclient.so.$(VERSION) *.depend + +distclean: clean + +maintainer-clean: distclean diff --git a/src/lib/cm_communication/cm_feconnect/fe-connect.cpp b/src/lib/cm_communication/cm_feconnect/fe-connect.cpp new file mode 100644 index 000000000..c229f653c --- /dev/null +++ b/src/lib/cm_communication/cm_feconnect/fe-connect.cpp @@ -0,0 +1,1578 @@ +/* ------------------------------------------------------------------------- + * + * fe-connect.c + * functions related to setting up a connection to the backend + * + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group + * + * + * IDENTIFICATION + * $PostgreSQL: pgsql/src/interfaces/libpq/fe-connect.c,v 1.371 2008/12/15 10:28:21 mha Exp $ + * + * ------------------------------------------------------------------------- + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gssapi/gssapi_krb5.h" +#include "cm/libpq-fe.h" +#include "cm/libpq-int.h" +#include "cm/cm_c.h" +#include "cm/cm_ip.h" +#include "cm/cm_msg.h" +#include "cm/ip.h" +#include "cm/elog.h" + +/* + * fall back options if they are not specified by arguments or defined + * by environment variables. + */ +#define DefaultHost "localhost" + +/* ---------- + * Definition of the conninfo parameters and their fallback resources. + * + * CMPQconninfoOptions[] is a constant static array that we use to initialize + * a dynamically allocated working copy. All the "val" fields in + * CMPQconninfoOptions[] *must* be NULL. In a working copy, non-null "val" + * fields point to malloc'd strings that should be freed when the working + * array is freed (see CMPQconninfoFree). + * ---------- + */ +static const CMPQconninfoOption CMPQconninfoOptions[] = {{"connect_timeout", NULL}, + {"host", NULL}, + {"hostaddr", NULL}, + {"port", NULL}, + {"localhost", NULL}, + {"localport", NULL}, + {"node_id", NULL}, + {"node_name", NULL}, + {"remote_type", NULL}, + {"postmaster", NULL}, + {"user", NULL}, + /* Terminating entry --- MUST BE LAST */ + {NULL, NULL}}; + +static bool connectOptions1(CM_Conn* conn, const char* conninfo); +static int connectCMStart(CM_Conn* conn); +static int connectCMComplete(CM_Conn* conn); +static CM_Conn* makeEmptyCM_Conn(void); +static void freeCM_Conn(CM_Conn* conn); +static void closeCM_Conn(CM_Conn* conn); +static CMPQconninfoOption* conninfo_parse(const char* conninfo, PQExpBuffer errorMessage, bool use_defaults); +static char* conninfo_getval(CMPQconninfoOption* connOptions, const char* keyword); +static int CMGssContinue(CM_Conn* conn); +static int CMGssStartup(CM_Conn* conn); +static char* gs_getenv_with_check(const char* envKey, CM_Conn* conn); +bool pg_fe_set_noblock(pgsocket sock) +{ +#if !defined(WIN32) + return (fcntl(sock, F_SETFL, O_NONBLOCK) != -1); +#else + unsigned long ioctlsocket_ret = 1; + + /* Returns non-0 on failure, while fcntl() returns -1 on failure */ + return (ioctlsocket(sock, FIONBIO, &ioctlsocket_ret) == 0); +#endif +} + +CM_Conn* PQconnectCM(const char* conninfo) +{ + CM_Conn* conn = PQconnectCMStart(conninfo); + + if ((conn != NULL) && conn->status != CONNECTION_BAD) { + (void)connectCMComplete(conn); + } else if (conn != NULL) { + closeCM_Conn(conn); + freeCM_Conn(conn); + conn = NULL; + } + + return conn; +} + +/* + * PQconnectCMStart + * + * Returns a CM_Conn*. If NULL is returned, a malloc error has occurred, and + * you should not attempt to proceed with this connection. If the status + * field of the connection returned is CONNECTION_BAD, an error has + * occurred. In this case you should call CMPQfinish on the result, (perhaps + * inspecting the error message first). Other fields of the structure may not + * be valid if that occurs. If the status field is not CONNECTION_BAD, then + * this stage has succeeded - call CMPQconnectPoll, using select(2) to see when + * this is necessary. + * + * See CMPQconnectPoll for more info. + */ +CM_Conn* PQconnectCMStart(const char* conninfo) +{ + CM_Conn* conn = NULL; + + /* + * Allocate memory for the conn structure + */ + conn = makeEmptyCM_Conn(); + if (conn == NULL) { + return NULL; + } + + /* + * Parse the conninfo string + */ + if (!connectOptions1(conn, conninfo)) { + return conn; + } + + /* + * Connect to the database + */ + if (!connectCMStart(conn)) { + /* Just in case we failed to set it in connectCMStart */ + conn->status = CONNECTION_BAD; + } + + return conn; +} + +/* + * connectOptions1 + * + * Internal subroutine to set up connection parameters given an already- + * created CM_Conn and a conninfo string. + * + * Returns true if OK, false if trouble (in which case errorMessage is set + * and so is conn->status). + */ +static bool connectOptions1(CM_Conn* conn, const char* conninfo) +{ + CMPQconninfoOption* connOptions = NULL; + char* tmp = NULL; + + /* + * Parse the conninfo string + */ + connOptions = conninfo_parse(conninfo, &conn->errorMessage, true); + if (connOptions == NULL) { + conn->status = CONNECTION_BAD; + /* errorMessage is already set */ + return false; + } + + /* + * Move option values into conn structure + * + * XXX: probably worth checking strdup() return value here... + */ + tmp = conninfo_getval(connOptions, "hostaddr"); + conn->pghostaddr = tmp != NULL ? strdup(tmp) : NULL; + tmp = conninfo_getval(connOptions, "host"); + conn->pghost = tmp != NULL ? strdup(tmp) : NULL; + tmp = conninfo_getval(connOptions, "port"); + conn->pgport = tmp != NULL ? strdup(tmp) : NULL; + tmp = conninfo_getval(connOptions, "localhost"); + conn->pglocalhost = tmp != NULL ? strdup(tmp) : NULL; + tmp = conninfo_getval(connOptions, "localport"); + conn->pglocalport = tmp != NULL ? strdup(tmp) : NULL; + tmp = conninfo_getval(connOptions, "connect_timeout"); + conn->connect_timeout = tmp != NULL ? strdup(tmp) : NULL; + tmp = conninfo_getval(connOptions, "user"); + conn->pguser = tmp != NULL ? strdup(tmp) : NULL; + tmp = conninfo_getval(connOptions, "node_id"); + conn->node_id = tmp != NULL ? atoi(tmp) : 0; + tmp = conninfo_getval(connOptions, "node_name"); + conn->gc_node_name = tmp != NULL ? strdup(tmp) : NULL; + tmp = conninfo_getval(connOptions, "postmaster"); + conn->is_postmaster = tmp != NULL ? atoi(tmp) : 0; + tmp = conninfo_getval(connOptions, "remote_type"); + conn->remote_type = tmp != NULL ? atoi(tmp) : CM_NODE_DEFAULT; + + /* + * Free the option info - all is in conn now + */ + CMPQconninfoFree(connOptions); + connOptions = NULL; + + return true; +} + +/* ---------- + * connectNoDelay - + * Sets the TCP_NODELAY socket option. + * Returns 1 if successful, 0 if not. + * ---------- + */ +static int connectNoDelay(CM_Conn* conn) +{ +#ifdef TCP_NODELAY + int on = 1; + + if (setsockopt(conn->sock, IPPROTO_TCP, TCP_NODELAY, (char*)&on, sizeof(on)) < 0) { + appendCMPQExpBuffer(&conn->errorMessage, "could not set socket to TCP no delay mode: \n"); + return 0; + } +#endif + + return 1; +} + +/* ---------- + * connectFailureMessage - + * create a friendly error message on connection failure. + * ---------- + */ +static void connectFailureMessage(CM_Conn* conn) +{ + appendCMPQExpBuffer(&conn->errorMessage, + "could not connect to server: \n" + "\tIs the server running on host \"%s\" and accepting\n" + "\tTCP/IP connections on port %s?\n", + conn->pghostaddr != NULL ? conn->pghostaddr : (conn->pghost != NULL ? conn->pghost : "???"), + conn->pgport); +} + +/* ---------- + * connectCMStart - + * Begin the process of making a connection to the backend. + * + * Returns 1 if successful, 0 if not. + * ---------- + */ +static int connectCMStart(CM_Conn* conn) +{ + int portnum = 0; + char portstr[128]; + struct addrinfo* addrs = NULL; + struct addrinfo hint = {0}; + const char* node = NULL; + int ret; + errno_t rc = 0; + + if (conn == NULL) { + return 0; + } + + /* Ensure our buffers are empty */ + conn->inStart = conn->inCursor = conn->inEnd = 0; + conn->outCount = 0; + + /* + * Determine the parameters to pass to CM_getaddrinfo_all. + */ + + /* Initialize hint structure */ + rc = memset_s(&hint, sizeof(hint), 0, sizeof(hint)); + securec_check_errno(rc, ); + hint.ai_socktype = SOCK_STREAM; + hint.ai_family = AF_UNSPEC; + + /* Set up port number as a string */ + if (conn->pgport != NULL && conn->pgport[0] != '\0') { + portnum = atoi(conn->pgport); + } + rc = snprintf_s(portstr, sizeof(portstr), sizeof(portstr) - 1, "%d", portnum); + securec_check_ss_c(rc, "\0", "\0"); + + if (conn->pghostaddr != NULL && conn->pghostaddr[0] != '\0') { + /* Using pghostaddr avoids a hostname lookup */ + node = conn->pghostaddr; + hint.ai_family = AF_UNSPEC; + hint.ai_flags = AI_NUMERICHOST; + } else if (conn->pghost != NULL && conn->pghost[0] != '\0') { + /* Using pghost, so we have to look-up the hostname */ + node = conn->pghost; + hint.ai_family = AF_UNSPEC; + } else { + /* Without Unix sockets, default to localhost instead */ + node = "localhost"; + hint.ai_family = AF_UNSPEC; + } + + /* Use CM_getaddrinfo_all() to resolve the address */ + ret = cm_getaddrinfo_all(node, portstr, &hint, &addrs); + if (ret || (addrs == NULL)) { + if (node != NULL) { + appendCMPQExpBuffer( + &conn->errorMessage, "could not translate host name \"%s\" to address: %s\n", node, gai_strerror(ret)); + } else { + appendCMPQExpBuffer(&conn->errorMessage, + "could not translate Unix-domain socket path \"%s\" to address: %s\n", + portstr, + gai_strerror(ret)); + } + if (addrs != NULL) { + cm_freeaddrinfo_all(hint.ai_family, addrs); + } + goto connect_errReturn; + } + + /* + * Set up to try to connect, with protocol 3.0 as the first attempt. + */ + conn->addrlist = addrs; + conn->addr_cur = addrs; + conn->addrlist_family = hint.ai_family; + conn->status = CONNECTION_NEEDED; + + /* + * The code for processing CONNECTION_NEEDED state is in CMPQconnectPoll(), + * so that it can easily be re-executed if needed again during the + * asynchronous startup process. However, we must run it once here, + * because callers expect a success return from this routine to mean that + * we are in PGRES_POLLING_WRITING connection state. + */ + if (CMPQconnectPoll(conn) == PGRES_POLLING_WRITING) { + return 1; + } + +connect_errReturn: + if (conn->sock >= 0) { + close(conn->sock); + conn->sock = -1; + } + conn->status = CONNECTION_BAD; + return 0; +} + +/* + * connectCMComplete + * + * Block and complete a connection. + * + * Returns 1 on success, 0 on failure. + */ +static int connectCMComplete(CM_Conn* conn) +{ + CMPostgresPollingStatusType flag = PGRES_POLLING_WRITING; + time_t finish_time = ((time_t)-1); + + if (conn == NULL || conn->status == CONNECTION_BAD) { + return 0; + } + + /* + * Set up a time limit, if connect_timeout isn't zero. + */ + if (conn->connect_timeout != NULL) { + int timeout = atoi(conn->connect_timeout); + + if (timeout > 0) { + /* + * Rounding could cause connection to fail; need at least 2 secs + */ + if (timeout < 2) { + timeout = 2; + } + /* calculate the finish time based on start + timeout */ + finish_time = time(NULL) + timeout; + } + } + + for (;;) { + /* + * Wait, if necessary. Note that the initial state (just after + * PQconnectCMStart) is to wait for the socket to select for writing. + */ + switch (flag) { + case PGRES_POLLING_OK: + /* Reset stored error messages since we now have a working connection */ + resetCMPQExpBuffer(&conn->errorMessage); + return 1; /* success! */ + + case PGRES_POLLING_READING: + if (cmpqWaitTimed(1, 0, conn, finish_time)) { + conn->status = CONNECTION_BAD; + return 0; + } + break; + + case PGRES_POLLING_WRITING: + if (cmpqWaitTimed(0, 1, conn, finish_time)) { + conn->status = CONNECTION_BAD; + return 0; + } + break; + + default: + /* Just in case we failed to set it in CMPQconnectPoll */ + conn->status = CONNECTION_BAD; + return 0; + } + + /* + * Now try to advance the state machine. + */ + flag = CMPQconnectPoll(conn); + } +} + +/* ---------------- + * CMPQconnectPoll + * + * Poll an asynchronous connection. + * + * Returns a CMClientPollingStatusType. + * Before calling this function, use select(2) to determine when data + * has arrived.. + * + * You must call CMPQfinish whether or not this fails. + */ +CMPostgresPollingStatusType CMPQconnectPoll(CM_Conn* conn) +{ + errno_t rc = EOK; + + if (conn == NULL) { + return PGRES_POLLING_FAILED; + } + + /* Get the new data */ + switch (conn->status) { + /* + * We really shouldn't have been polled in these two cases, but we + * can handle it. + */ + case CONNECTION_BAD: + return PGRES_POLLING_FAILED; + case CONNECTION_OK: + return PGRES_POLLING_OK; + + /* These are reading states */ + case CONNECTION_AWAITING_RESPONSE: + case CONNECTION_AUTH_OK: { + /* Load waiting data */ + int flushResult; + /* + * If data remains unsent, send it. Else we might be waiting for the + * result of a command the backend hasn't even got yet. + */ + while ((flushResult = cmpqFlush(conn)) > 0) { + if (cmpqWait(false, true, conn)) { + flushResult = -1; + break; + } + } + + int n = cmpqReadData(conn); + + if (n < 0) { + goto error_return; + } + if (n == 0) { + return PGRES_POLLING_READING; + } + + break; + } + + /* These are writing states, so we just proceed. */ + case CONNECTION_STARTED: + case CONNECTION_MADE: + break; + + case CONNECTION_NEEDED: + break; + + default: + appendCMPQExpBuffer(&conn->errorMessage, + "invalid connection state, " + "probably indicative of memory corruption\n"); + goto error_return; + } + +keep_going: /* We will come back to here until there is + * nothing left to do. */ + switch (conn->status) { + case CONNECTION_NEEDED: { + /* + * Try to initiate a connection to one of the addresses + * returned by cm_getaddrinfo_all(). conn->addr_cur is the + * next one to try. We fail when we run out of addresses + * (reporting the error returned for the *last* alternative, + * which may not be what users expect :-(). + */ + while (conn->addr_cur != NULL) { + struct addrinfo* addr_cur = conn->addr_cur; + + /* Remember current address for possible error msg */ + rc = memcpy_s(&conn->raddr.addr, sizeof(conn->raddr.addr), addr_cur->ai_addr, addr_cur->ai_addrlen); + securec_check_c(rc, "\0", "\0"); + conn->raddr.salen = addr_cur->ai_addrlen; + + /* Open a socket */ + conn->sock = socket(addr_cur->ai_family, SOCK_STREAM, 0); + if (conn->sock < 0) { + /* + * ignore socket() failure if we have more addresses + * to try + */ + if (addr_cur->ai_next != NULL) { + conn->addr_cur = addr_cur->ai_next; + continue; + } + appendCMPQExpBuffer(&conn->errorMessage, "could not create socket: \n"); + break; + } + + if (conn->pglocalhost == NULL) { + appendCMPQExpBuffer(&conn->errorMessage, "could not found localhost, localhost is null \n"); + break; + } + + struct sockaddr_in localaddr; + + rc = memset_s(&localaddr, sizeof(sockaddr_in), 0, sizeof(sockaddr_in)); + securec_check_errno(rc, ); + localaddr.sin_family = AF_INET; + localaddr.sin_addr.s_addr = inet_addr(conn->pglocalhost); + /* Any local port will do. */ + localaddr.sin_port = 0; + + rc = bind(conn->sock, (struct sockaddr*)&localaddr, sizeof(localaddr)); + if (rc != 0) { + appendCMPQExpBuffer( + &conn->errorMessage, "could not bind localhost:%s, result is %d \n", conn->pglocalhost, rc); + break; + } + +#ifdef F_SETFD + if (fcntl(conn->sock, F_SETFD, FD_CLOEXEC) == -1) { + appendCMPQExpBuffer(&conn->errorMessage, "could not set socket(FD_CLOEXEC): %d\n", SOCK_ERRNO); + closesocket(conn->sock); + conn->sock = -1; + conn->addr_cur = addr_cur->ai_next; + continue; + } +#endif /* F_SETFD */ + + /* + * Random_Port_Reuse need set SO_REUSEADDR on. + * Random_Port_Reuse must not use bind interface, + * because socket owns a random port private when used bind interface. + * SO_REUSEPORT solve this problem in kernel 3.9. + */ + if (!IS_AF_UNIX(addr_cur->ai_family)) { + int on = 1; + + if ((setsockopt(conn->sock, SOL_SOCKET, SO_REUSEADDR, (char*)&on, sizeof(on))) == -1) { + appendCMPQExpBuffer(&conn->errorMessage, "setsockopt(SO_REUSEADDR) failed: %d\n", SOCK_ERRNO); + closesocket(conn->sock); + conn->sock = -1; + conn->addr_cur = addr_cur->ai_next; + continue; + } + } + + /* + * Select socket options: no delay of outgoing data for + * TCP sockets, nonblock mode, close-on-exec. Fail if any + * of this fails. + */ + if (!IS_AF_UNIX(addr_cur->ai_family)) { + if (!connectNoDelay(conn)) { + close(conn->sock); + conn->sock = -1; + conn->addr_cur = addr_cur->ai_next; + continue; + } + } + + if ( +#ifndef WIN32 + !IS_AF_UNIX(addr_cur->ai_family) && +#endif + !pg_fe_set_noblock(conn->sock)) { + appendCMPQExpBuffer( + &conn->errorMessage, "could not set socket to non-blocking mode: %d\n", SOCK_ERRNO); + close(conn->sock); + conn->sock = -1; + conn->addr_cur = addr_cur->ai_next; + continue; + } + + /* + * Start/make connection. This should not block, since we + * are in nonblock mode. If it does, well, too bad. + */ + if (connect(conn->sock, addr_cur->ai_addr, addr_cur->ai_addrlen) < 0) { + if (SOCK_ERRNO == EINPROGRESS || SOCK_ERRNO == EWOULDBLOCK || SOCK_ERRNO == EINTR || + SOCK_ERRNO == 0) { + /* + * This is fine - we're in non-blocking mode, and + * the connection is in progress. Tell caller to + * wait for write-ready on socket. + */ + conn->status = CONNECTION_STARTED; + return PGRES_POLLING_WRITING; + } + /* otherwise, trouble */ + } else { + /* + * Hm, we're connected already --- seems the "nonblock + * connection" wasn't. Advance the state machine and + * go do the next stuff. + */ + conn->status = CONNECTION_STARTED; + goto keep_going; + } + + /* + * This connection failed --- set up error report, then + * close socket (do it this way in case close() affects + * the value of errno...). We will ignore the connect() + * failure and keep going if there are more addresses. + */ + connectFailureMessage(conn); + if (conn->sock >= 0) { + close(conn->sock); + conn->sock = -1; + } + + /* + * Try the next address, if any. + */ + conn->addr_cur = addr_cur->ai_next; + } /* loop over addresses */ + + /* + * Ooops, no more addresses. An appropriate error message is + * already set up, so just set the right status. + */ + goto error_return; + } + + case CONNECTION_STARTED: { + int optval; + size_t optlen = sizeof(optval); + + /* + * Write ready, since we've made it here, so the connection + * has been made ... or has failed. + */ + + /* + * Now check (using getsockopt) that there is not an error + * state waiting for us on the socket. + */ + + if (getsockopt(conn->sock, SOL_SOCKET, SO_ERROR, (char*)&optval, (socklen_t*)&optlen) == -1) { + appendCMPQExpBuffer(&conn->errorMessage, libpq_gettext("could not get socket error status: \n")); + goto error_return; + } else if (optval != 0) { + /* + * When using a nonblocking connect, we will typically see + * connect failures at this point, so provide a friendly + * error message. + */ + connectFailureMessage(conn); + + /* + * If more addresses remain, keep trying, just as in the + * case where connect() returned failure immediately. + */ + if (conn->addr_cur->ai_next != NULL) { + if (conn->sock >= 0) { + close(conn->sock); + conn->sock = -1; + } + conn->addr_cur = conn->addr_cur->ai_next; + conn->status = CONNECTION_NEEDED; + goto keep_going; + } + goto error_return; + } + + /* Fill in the client address */ + conn->laddr.salen = sizeof(conn->laddr.addr); + if (getsockname(conn->sock, (struct sockaddr*)&conn->laddr.addr, (socklen_t*)&conn->laddr.salen) < 0) { + appendCMPQExpBuffer(&conn->errorMessage, "could not get client address from socket:\n"); + goto error_return; + } + + /* + * Make sure we can write before advancing to next step. + */ + conn->status = CONNECTION_MADE; + return PGRES_POLLING_WRITING; + } + + case CONNECTION_MADE: { + CM_StartupPacket* sp = (CM_StartupPacket*)malloc(sizeof(CM_StartupPacket)); + if (sp == NULL) { + appendCMPQExpBuffer(&conn->errorMessage, "malloc failed, size: %ld \n", sizeof(CM_StartupPacket)); + goto error_return; + } + int packetlen = sizeof(CM_StartupPacket); + + rc = memset_s(sp, sizeof(CM_StartupPacket), 0, sizeof(CM_StartupPacket)); + securec_check_errno(rc, ); + + if (conn->pguser != NULL) { + rc = strncpy_s(sp->sp_user, SP_USER, conn->pguser, SP_USER - 1); + securec_check_errno(rc, ); + sp->sp_user[SP_USER - 1] = '\0'; + } + + if (conn->pglocalhost != NULL) { + rc = strncpy_s(sp->sp_host, SP_HOST, conn->pglocalhost, SP_HOST - 1); + securec_check_errno(rc, ); + sp->sp_host[SP_HOST - 1] = '\0'; + } + + /* + * Build a startup packet. We tell the CM server/proxy our + * PGXC Node name and whether we are a proxy or not. + * + * When the connection is made from the proxy, we let the CM + * server know about it so that some special headers are + * handled correctly by the server. + */ + rc = strncpy_s(sp->sp_node_name, SP_NODE_NAME, conn->gc_node_name, SP_NODE_NAME - 1); + securec_check_errno(rc, ); + sp->sp_node_name[SP_NODE_NAME - 1] = '\0'; + sp->sp_remotetype = conn->remote_type; + sp->node_id = conn->node_id; + sp->sp_ispostmaster = conn->is_postmaster; + + /* + * Send the startup packet. + * + * Theoretically, this could block, but it really shouldn't + * since we only got here if the socket is write-ready. + */ + if (CMPQPacketSend(conn, 'A', (char*)sp, packetlen) != STATUS_OK) { + appendCMPQExpBuffer(&conn->errorMessage, "could not send startup packet: \n"); + free(sp); + goto error_return; + } + + conn->status = CONNECTION_AWAITING_RESPONSE; + + /* Clean up startup packet */ + free(sp); + + return PGRES_POLLING_READING; + } + + /* + * Handle authentication exchange: wait for postmaster messages + * and respond as necessary. + */ + case CONNECTION_AWAITING_RESPONSE: { + char beresp; + int msgLength; + int avail; + + /* + * Scan the message from current point (note that if we find + * the message is incomplete, we will return without advancing + * inStart, and resume here next time). + */ + conn->inCursor = conn->inStart; + + /* Read type byte */ + if (cmpqGetc(&beresp, conn)) { + /* We'll come back when there is more data */ + return PGRES_POLLING_READING; + } + + /* + * Validate message type: we expect among (a default request without authentication, + * an error, a kerberos authentication) here. Anything else probably means + * it's not CM on the other end at all. + */ + if (!(beresp == 'R' || beresp == 'E' || beresp == 'P')) { + appendCMPQExpBuffer(&conn->errorMessage, + "expected authentication request from " + "server, but received %c\n", + beresp); + goto error_return; + } + + /* Read message length word */ + if (cmpqGetInt(&msgLength, 4, conn)) { + /* We'll come back when there is more data */ + return PGRES_POLLING_READING; + } + /* + * Try to validate message length before using it. + * Authentication requests can't be very large, although GSS + * auth requests may not be that small. Errors can be a + * little larger, but not huge. If we see a large apparent + * length in an error, it means we're really talking to a + * pre-3.0-protocol server; cope. + */ + if (beresp == 'R' && (msgLength < 4 || msgLength > 2000)) { + appendCMPQExpBuffer(&conn->errorMessage, + libpq_gettext("expected authentication request from " + "server, but received %c\n"), + beresp); + goto error_return; + } + + /* Handle errors. */ + if (beresp == 'E') { + if (cmpqGets_append(&conn->errorMessage, conn)) { + /* We'll come back when there is more data */ + return PGRES_POLLING_READING; + } + /* OK, we read the message; mark data consumed */ + conn->inStart = conn->inCursor; + goto error_return; + } + + msgLength -= 4; + if (msgLength <= 0) { + goto error_return; + } + if (beresp == 'P') { + int llen = msgLength; + conn->gss_inbuf.length = llen; + FREE_AND_RESET(conn->gss_inbuf.value); + conn->gss_inbuf.value = malloc(llen); + if (conn->gss_inbuf.value == NULL) { + appendCMPQExpBuffer(&conn->errorMessage, + libpq_gettext("failed to allocate the gss_inbuf memory:" + "out of memory: request_size=%d.\n"), + llen); + goto error_return; + } + cmpqGetnchar((char*)conn->gss_inbuf.value, llen, conn); + /* OK, we successfully read the message; mark data consumed */ + conn->inStart = conn->inCursor; + rc = CMGssContinue(conn); + if (rc != STATUS_OK) { + FREE_AND_RESET(conn->gss_inbuf.value); + goto error_return; + } + goto keep_going; + } + + avail = conn->inEnd - conn->inCursor; + if (avail < msgLength) { + /* + * Before returning, try to enlarge the input buffer if + * needed to hold the whole message; see notes in + * pqParseInput3. + */ + if (cmpqCheckInBufferSpace((size_t)(conn->inCursor + msgLength), conn)) + goto error_return; + /* We'll come back when there is more data */ + return PGRES_POLLING_READING; + } + + /* Get the type of request. */ + int areq = 0; + if (cmpqGetInt(&areq, 4, conn)) { + /* We'll come back when there are more data */ + return PGRES_POLLING_READING; + } + + if (areq == CM_AUTH_REQ_OK) { + /* OK, we successfully read the message; mark data consumed */ + conn->inStart = conn->inCursor; + /* We are done with authentication exchange */ + conn->status = CONNECTION_AUTH_OK; + /* Look to see if we have more data yet. */ + goto keep_going; + } else if (areq == CM_AUTH_REQ_GSS) { + /* OK, we successfully read the message; mark data consumed */ + resetCMPQExpBuffer(&conn->errorMessage); + conn->inStart = conn->inCursor; + rc = CMGssStartup(conn); + if (rc != STATUS_OK) + goto error_return; + goto keep_going; + } else if (areq == CM_AUTH_REQ_GSS_CONT) { + int llen = msgLength - 4; + if (llen <= 0) { + goto error_return; + } + conn->gss_inbuf.length = llen; + FREE_AND_RESET(conn->gss_inbuf.value); + conn->gss_inbuf.value = malloc(llen); + if (conn->gss_inbuf.value == NULL) { + appendCMPQExpBuffer(&conn->errorMessage, + libpq_gettext("failed to allocate memory for gss_inbuf:" + "out of memory: request size=%d.\n"), + llen); + goto error_return; + } + cmpqGetnchar((char*)conn->gss_inbuf.value, llen, conn); + /* OK, we successfully read the message; mark data consumed */ + conn->inStart = conn->inCursor; + rc = CMGssContinue(conn); + if (rc != STATUS_OK) { + FREE_AND_RESET(conn->gss_inbuf.value); + goto error_return; + } + cmpqFlush(conn); + goto keep_going; + } else { + goto error_return; + } + } + + case CONNECTION_AUTH_OK: { + /* We can release the address list now. */ + cm_freeaddrinfo_all(conn->addrlist_family, conn->addrlist); + conn->addrlist = NULL; + conn->addr_cur = NULL; + + /* Otherwise, we are open for business! */ + conn->status = CONNECTION_OK; + return PGRES_POLLING_OK; + } + + default: + appendCMPQExpBuffer(&conn->errorMessage, + "invalid connection state %c, " + "probably indicative of memory corruption\n", + conn->status); + goto error_return; + } + + /* Unreachable */ + +error_return: + /* + * We used to close the socket at this point, but that makes it awkward + * for those above us if they wish to remove this socket from their own + * records (an fd_set for example). We'll just have this socket closed + * when CMPQfinish is called (which is compulsory even after an error, since + * the connection structure must be freed). + */ + conn->status = CONNECTION_BAD; + return PGRES_POLLING_FAILED; +} + +/* + * makeEmptyCM_Conn + * - create a CM_Conn data structure with (as yet) no interesting data + */ +static CM_Conn* makeEmptyCM_Conn(void) +{ + CM_Conn* conn = NULL; + errno_t rc = 0; + + conn = (CM_Conn*)malloc(sizeof(CM_Conn)); + if (conn == NULL) { + write_runlog(DEBUG1, "[conn abnormal] Out of memory for CmServer_conn!\n"); + return conn; + } + + /* Zero all pointers and booleans */ + rc = memset_s(conn, sizeof(CM_Conn), 0, sizeof(CM_Conn)); + securec_check_errno(rc, FREE_AND_RESET(conn)); + + conn->status = CONNECTION_BAD; + conn->result = NULL; + + /* + * We try to send at least 8K at a time, which is the usual size of pipe + * buffers on Unix systems. That way, when we are sending a large amount + * of data, we avoid incurring extra kernel context swaps for partial + * bufferloads. The output buffer is initially made 16K in size, and we + * try to dump it after accumulating 8K. + * + * With the same goal of minimizing context swaps, the input buffer will + * be enlarged anytime it has less than 8K free, so we initially allocate + * twice that. + */ + conn->inBufSize = 16 * 1024; + conn->inBuffer = (char*)malloc(conn->inBufSize); + conn->outBufSize = 16 * 1024; + conn->outBuffer = (char*)malloc(conn->outBufSize); + initCMPQExpBuffer(&conn->errorMessage); + initCMPQExpBuffer(&conn->workBuffer); + + if (conn->inBuffer == NULL || conn->outBuffer == NULL || PQExpBufferBroken(&conn->errorMessage) || + PQExpBufferBroken(&conn->workBuffer)) { + /* out of memory already :-( */ + write_runlog(LOG, "[conn abnormal] Out of memory for inBuffer and outBuffer!\n"); + freeCM_Conn(conn); + conn = NULL; + } + + return conn; +} + +/* + * freeCM_Conn + * - free an idle (closed) CM_Conn data structure + * + * NOTE: this should not overlap any functionality with closeCM_Conn(). + * Clearing/resetting of transient state belongs there; what we do here is + * release data that is to be held for the life of the CM_Conn structure. + * If a value ought to be cleared/freed during PQreset(), do it there not here. + */ +static void freeCM_Conn(CM_Conn* conn) +{ + FREE_AND_RESET(conn->pghost); + FREE_AND_RESET(conn->pghostaddr); + FREE_AND_RESET(conn->pgport); + FREE_AND_RESET(conn->pglocalhost); + FREE_AND_RESET(conn->pglocalport); + FREE_AND_RESET(conn->pguser); + FREE_AND_RESET(conn->connect_timeout); + FREE_AND_RESET(conn->gc_node_name); + FREE_AND_RESET(conn->inBuffer); + FREE_AND_RESET(conn->outBuffer); + FREE_AND_RESET(conn->result); + termCMPQExpBuffer(&conn->errorMessage); + termCMPQExpBuffer(&conn->workBuffer); + + OM_uint32 lmin_s = 0; + gss_release_name(&lmin_s, &conn->gss_targ_nam); + + free(conn); +} + +/* + * closeCM_Conn + * - properly close a connection to the backend + * + * This should reset or release all transient state, but NOT the connection + * parameters. On exit, the CM_Conn should be in condition to start a fresh + * connection with the same parameters (see PQreset()). + */ +static void closeCM_Conn(CM_Conn* conn) +{ + /* + * Note that the protocol doesn't allow us to send Terminate messages + * during the startup phase. + */ + if (conn->sock >= 0 && conn->status == CONNECTION_OK) { + /* + * Try to send "close connection" message to backend. Ignore any + * error. + * + * Force length word for backends may try to read that in a generic + * code + */ + cmpqPutMsgStart('X', true, conn); + cmpqPutMsgEnd(conn); + cmpqFlush(conn); + } + + /* + * Close the connection, reset all transient state, flush I/O buffers. + */ + if (conn->sock >= 0) { + close(conn->sock); + } + conn->sock = -1; + conn->status = CONNECTION_BAD; /* Well, not really _bad_ - just + * absent */ + cm_freeaddrinfo_all(conn->addrlist_family, conn->addrlist); + conn->addrlist = NULL; + conn->addr_cur = NULL; + conn->inStart = conn->inCursor = conn->inEnd = 0; + conn->outCount = 0; +} + +/* + * CMPQfinish: properly close a connection to the backend. Also frees + * the CM_Conn data structure so it shouldn't be re-used after this. + */ +void CMPQfinish(CM_Conn* conn) +{ + if (conn != NULL) { + closeCM_Conn(conn); + freeCM_Conn(conn); + conn = NULL; + } +} + +/* + * pqPacketSend() -- convenience routine to send a message to server. + * + * pack_type: the single-byte message type code. (Pass zero for startup + * packets, which have no message type code.) + * + * buf, buf_len: contents of message. The given length includes only what + * is in buf; the message type and message length fields are added here. + * + * RETURNS: STATUS_ERROR if the write fails, STATUS_OK otherwise. + * SIDE_EFFECTS: may block. + * + * Note: all messages sent with this routine have a length word, whether + * it's protocol 2.0 or 3.0. + */ +int CMPQPacketSend(CM_Conn* conn, char pack_type, const void* buf, size_t buf_len) +{ + int ret = 0; + if (conn == NULL) { + write_runlog(ERROR, "CMPQPacketSend failed conn is null"); + return STATUS_ERROR; + } + + /* Start the message. */ + if (cmpqPutMsgStart(pack_type, true, conn)) { + write_runlog(ERROR, "Start the message failed"); + return STATUS_ERROR; + } + + /* Send the message body. */ + if (cmpqPutnchar((const char*)buf, buf_len, conn)) { + write_runlog(ERROR, "Send the message body failed"); + return STATUS_ERROR; +} + /* Finish the message. */ + ret = cmpqPutMsgEnd(conn); + if (ret < 0) { + write_runlog(LOG, "cmpqPutMsgEnd failed ret=%d\n", ret); + return STATUS_ERROR; + } + + /* Flush to ensure backend gets it. */ + ret = cmpqFlush(conn); + if (ret < 0) { + write_runlog(LOG, "cmpqFlush failed ret=%d\n", ret); + return STATUS_ERROR; + } + + return STATUS_OK; +} + + /** + * @brief Conninfo parser routine. Defaults are supplied (from a service file, environment variables, etc) + * for unspecified options, but only if use_defaults is TRUE. + * + * @return CMPQconninfoOption* If successful, a malloc'd CMPQconninfoOption array is returned. + * If not successful, NULL is returned and an error message is left in errorMessage. + */ +static CMPQconninfoOption* conninfo_parse(const char* conninfo, PQExpBuffer errorMessage, bool use_defaults) +{ + char* pname = NULL; + char* pval = NULL; + char* buf = NULL; + char* cp = NULL; + char* cp2 = NULL; + CMPQconninfoOption* options = NULL; + CMPQconninfoOption* option = NULL; + errno_t rc; + + /* Make a working copy of CMPQconninfoOptions */ + options = (CMPQconninfoOption*)malloc(sizeof(CMPQconninfoOptions)); + if (options == NULL) { + printfCMPQExpBuffer(errorMessage, libpq_gettext("out of memory\n")); + return NULL; + } + rc = memcpy_s(options, sizeof(CMPQconninfoOptions), CMPQconninfoOptions, sizeof(CMPQconninfoOptions)); + securec_check_c(rc, "\0", "\0"); + + /* Need a modifiable copy of the input string */ + if ((buf = strdup(conninfo)) == NULL) { + printfCMPQExpBuffer(errorMessage, libpq_gettext("out of memory\n")); + CMPQconninfoFree(options); + options = NULL; + return NULL; + } + cp = buf; + + while (*cp) { + /* Skip blanks before the parameter name */ + if (isspace((unsigned char)*cp)) { + cp++; + continue; + } + + /* Get the parameter name */ + pname = cp; + while (*cp) { + if (*cp == '=') { + break; + } + if (isspace((unsigned char)*cp)) { + *cp++ = '\0'; + while (*cp) { + if (!isspace((unsigned char)*cp)) { + break; + } + cp++; + } + break; + } + cp++; + } + + /* Check that there is a following '=' */ + if (*cp != '=') { + printfCMPQExpBuffer( + errorMessage, libpq_gettext("missing \"=\" after \"%s\" in connection info string\n"), pname); + CMPQconninfoFree(options); + options = NULL; + + FREE_AND_RESET(buf); + return NULL; + } + *cp++ = '\0'; + + /* Skip blanks after the '=' */ + while (*cp) { + if (!isspace((unsigned char)*cp)) { + break; + } + cp++; + } + + /* Get the parameter value */ + pval = cp; + + if (*cp != '\'') { + cp2 = pval; + while (*cp) { + if (isspace((unsigned char)*cp)) { + *cp++ = '\0'; + break; + } + if (*cp == '\\') { + cp++; + if (*cp != '\0') { + *cp2++ = *cp++; + } + } else { + *cp2++ = *cp++; + } + } + *cp2 = '\0'; + } else { + cp2 = pval; + cp++; + for (;;) { + if (*cp == '\0') { + printfCMPQExpBuffer( + errorMessage, libpq_gettext("unterminated quoted string in connection info string\n")); + CMPQconninfoFree(options); + options = NULL; + + FREE_AND_RESET(buf); + return NULL; + } + if (*cp == '\\') { + cp++; + if (*cp != '\0') { + *cp2++ = *cp++; + } + continue; + } + if (*cp == '\'') { + *cp2 = '\0'; + cp++; + break; + } + *cp2++ = *cp++; + } + } + + /* + * Now we have the name and the value. Search for the param record. + */ + for (option = options; option->keyword != NULL; option++) { + if (strcmp(option->keyword, pname) == 0) { + break; + } + } + if (option->keyword == NULL) { + printfCMPQExpBuffer(errorMessage, libpq_gettext("invalid connection option \"%s\"\n"), pname); + CMPQconninfoFree(options); + options = NULL; + + FREE_AND_RESET(buf); + return NULL; + } + + /* + * Store the value + */ + FREE_AND_RESET(option->val); + option->val = strdup(pval); + if (option->val == NULL) { + printfCMPQExpBuffer(errorMessage, libpq_gettext("out of memory\n")); + CMPQconninfoFree(options); + options = NULL; + + FREE_AND_RESET(buf); + return NULL; + } + } + + /* Done with the modifiable input string */ + FREE_AND_RESET(buf); + + return options; +} + +static char* conninfo_getval(CMPQconninfoOption* connOptions, const char* keyword) +{ + CMPQconninfoOption* option = NULL; + + for (option = connOptions; option->keyword != NULL; option++) { + if (strcmp(option->keyword, keyword) == 0) { + return option->val; + } + } + + return NULL; +} + +void CMPQconninfoFree(CMPQconninfoOption* connOptions) +{ + CMPQconninfoOption* option = NULL; + + if (connOptions == NULL) { + return; + } + + for (option = connOptions; option->keyword != NULL; option++) { + FREE_AND_RESET(option->val); + } + free(connOptions); +} + +CMConnStatusType CMPQstatus(const CM_Conn* conn) +{ + if (conn == NULL) { + return CONNECTION_BAD; + } + return conn->status; +} + +char* CMPQerrorMessage(const CM_Conn* conn) +{ + if (conn == NULL) { + return libpq_gettext("connection pointer is NULL\n"); + } + + return conn->errorMessage.data; +} + +/* + * Continue GSS authentication with next token as needed. + */ +static int CMGssContinue(CM_Conn* conn) +{ + OM_uint32 maj_stat = 0; + OM_uint32 min_stat = 0; + OM_uint32 lmin_s = 0; + char* krbconfig = NULL; + int retry_count = 0; + +retry_init: + /* + * This function is used for internal and external connections, do not add lock here. + * If gss init failed, retry 10 times. + * Clean the config cache and ticket cache set by hadoop remote read. + */ +#ifndef ENABLE_LITE_MODE + krb5_clean_cache_profile_path(); +#endif + + + /* Krb5 config file priority : setpath > env(MPPDB_KRB5_FILE_PATH) > default(/etc/krb5.conf).*/ + krbconfig = gs_getenv_with_check("MPPDB_KRB5_FILE_PATH", conn); + if (krbconfig == NULL) { + appendCMPQExpBuffer(&conn->errorMessage, "get env MPPDB_KRB5_FILE_PATH failed.\n"); + return STATUS_ERROR; + } +#ifndef ENABLE_LITE_MODE + (void)krb5_set_profile_path(krbconfig); +#endif + + /* + * The first time come here(with no tickent cache), gss_init_sec_context will send TGS_REQ + * to kerberos server to get ticket and then cache it in default_ccache_name which configured + * in MPPDB_KRB5_FILE_PATH. + */ + maj_stat = gss_init_sec_context(&min_stat, + GSS_C_NO_CREDENTIAL, + &conn->gss_ctx, + conn->gss_targ_nam, + GSS_C_NO_OID, + GSS_C_MUTUAL_FLAG, + 0, + GSS_C_NO_CHANNEL_BINDINGS, + (conn->gss_ctx == GSS_C_NO_CONTEXT) ? GSS_C_NO_BUFFER : &conn->gss_inbuf, + NULL, + &conn->gss_outbuf, + NULL, + NULL); + + if (conn->gss_outbuf.length != 0) { + /* + * GSS generated data to send to the server. We don't care if it's the + * first or subsequent packet, just send the same kind of password + * packet. + */ + if (CMPQPacketSend(conn, 'p', conn->gss_outbuf.value, conn->gss_outbuf.length) != STATUS_OK) { + printfCMPQExpBuffer(&conn->errorMessage, libpq_gettext("Send p type packet failed\n")); + (void)gss_release_buffer(&lmin_s, &conn->gss_outbuf); + if (conn->gss_ctx != NULL) { + (void)gss_delete_sec_context(&lmin_s, &conn->gss_ctx, GSS_C_NO_BUFFER); + } + if (conn->gss_inbuf.value != NULL) { + FREE_AND_RESET(conn->gss_inbuf.value); + conn->gss_inbuf.length = 0; + } + return STATUS_ERROR; + } + conn->status = CONNECTION_AWAITING_RESPONSE; + } + if (conn->gss_inbuf.value != NULL) { + FREE_AND_RESET(conn->gss_inbuf.value); + conn->gss_inbuf.length = 0; + } + (void)gss_release_buffer(&lmin_s, &conn->gss_outbuf); + + if (maj_stat != GSS_S_COMPLETE && maj_stat != GSS_S_CONTINUE_NEEDED) { + OM_uint32 qp_min_s = 0; + OM_uint32 qp_msg_ctx = 0; + gss_buffer_desc qp_msg; + gss_display_status(&qp_min_s, maj_stat, GSS_C_GSS_CODE, GSS_C_NO_OID, &qp_msg_ctx, &qp_msg); + fprintf(stderr, "gss failed: %s\n", (char*)qp_msg.value); + gss_release_buffer(&qp_min_s, &qp_msg); + gss_display_status(&qp_min_s, min_stat, GSS_C_MECH_CODE, GSS_C_NO_OID, &qp_msg_ctx, &qp_msg); + fprintf(stderr, "gss failed: %s\n", (char*)qp_msg.value); + gss_release_buffer(&qp_min_s, &qp_msg); + + /* Retry 10 times for init context responding to scenarios such as cache renewed by kinit. */ + if (retry_count < 10) { + (void)usleep(1000); + retry_count++; + goto retry_init; + } + + gss_release_name(&lmin_s, &conn->gss_targ_nam); + if (conn->gss_ctx != NULL) { + gss_delete_sec_context(&lmin_s, &conn->gss_ctx, GSS_C_NO_BUFFER); + } + + printfCMPQExpBuffer(&conn->errorMessage, libpq_gettext("GSSAPI continuation error, more than 10 times\n")); + return STATUS_ERROR; + } + + if (maj_stat == GSS_S_COMPLETE) { + gss_release_name(&lmin_s, &conn->gss_targ_nam); + } + if (conn->gss_ctx != NULL) { + gss_delete_sec_context(&lmin_s, &conn->gss_ctx, GSS_C_NO_BUFFER); + } + + return STATUS_OK; +} + +/* + * Send initial GSS authentication token + */ +static int CMGssStartup(CM_Conn* conn) +{ + OM_uint32 maj_stat = 0; + OM_uint32 min_stat = 0; + int maxlen = -1; + gss_buffer_desc temp_gbuf; + char* krbsrvname = NULL; + char* krbhostname = NULL; + errno_t rc = EOK; + + if (!((conn->pghost != NULL) && conn->pghost[0] != '\0')) { + printfCMPQExpBuffer(&conn->errorMessage, libpq_gettext("host name must be specified\n")); + return STATUS_ERROR; + } + + if (conn->gss_ctx != NULL) { + printfCMPQExpBuffer(&conn->errorMessage, libpq_gettext("duplicate GSS authentication request\n")); + return STATUS_ERROR; + } + + /* + * Import service principal name so the proper ticket can be acquired by + * the GSSAPI system. The PGKRBSRVNAME and KRBHOSTNAME is from + * the principal. + */ + krbsrvname = gs_getenv_with_check("PGKRBSRVNAME", conn); + if (krbsrvname == NULL) { + printfCMPQExpBuffer(&conn->errorMessage, libpq_gettext("The environment PGKRBSRVNAME is null.\n")); + return STATUS_ERROR; + } + + krbhostname = gs_getenv_with_check("KRBHOSTNAME", conn); + if (krbhostname == NULL) { + printfCMPQExpBuffer(&conn->errorMessage, libpq_gettext("The environment KRBHOSTNAME null.\n")); + return STATUS_ERROR; + } + + if ((MAX_INT32 - strlen(krbhostname)) < (strlen(krbsrvname) + 2)) { + return STATUS_ERROR; + } + maxlen = strlen(krbhostname) + strlen(krbsrvname) + 2; + temp_gbuf.value = (char*)malloc(maxlen); + if (temp_gbuf.value == NULL) { + printfCMPQExpBuffer(&conn->errorMessage, libpq_gettext("out of memory, remote datanode.\n")); + return STATUS_ERROR; + } + + rc = snprintf_s((char*)temp_gbuf.value, maxlen, maxlen - 1, "%s/%s", krbsrvname, krbhostname); + securec_check_ss_c(rc, "", ""); + temp_gbuf.length = strlen((char*)temp_gbuf.value); + + maj_stat = gss_import_name(&min_stat, &temp_gbuf, (gss_OID)GSS_KRB5_NT_PRINCIPAL_NAME, &conn->gss_targ_nam); + FREE_AND_RESET(temp_gbuf.value); + if (maj_stat != GSS_S_COMPLETE) { + printfCMPQExpBuffer(&conn->errorMessage, libpq_gettext("GSSAPI name import error.\n")); + return STATUS_ERROR; + } + + /* + * Initial packet is the same as a continuation packet with no initial + * context. + */ + conn->gss_ctx = GSS_C_NO_CONTEXT; + + return CMGssContinue(conn); +} + +static void check_backend_env(const char* input_env_value, CM_Conn* conn) +{ + const int MAXENVLEN = 1024; + const char* danger_character_list[] = {";", "`", "\\", "'", "\"", ">", "<", "$", "&", "|", "!", "\n", NULL}; + int i = 0; + + if (input_env_value == NULL || strlen(input_env_value) >= MAXENVLEN) { + appendCMPQExpBuffer(&conn->errorMessage, "wrong env value.\n"); + return; + } + + for (i = 0; danger_character_list[i] != NULL; i++) { + if (strstr((const char*)input_env_value, danger_character_list[i])) { + appendCMPQExpBuffer(&conn->errorMessage, + "env_value(%s) contains invaid symbol(%s).\n", + input_env_value, + danger_character_list[i]); + } + } +} + +static char* gs_getenv_with_check(const char* envKey, CM_Conn* conn) +{ + static char* result = NULL; + char* envValue = gs_getenv_r(envKey); + + if (envValue != NULL) { + check_backend_env(envValue, conn); + result = envValue; + } + + return result; +} diff --git a/src/lib/cm_communication/cm_feconnect/fe-misc.cpp b/src/lib/cm_communication/cm_feconnect/fe-misc.cpp new file mode 100644 index 000000000..03df232a2 --- /dev/null +++ b/src/lib/cm_communication/cm_feconnect/fe-misc.cpp @@ -0,0 +1,956 @@ +/* ------------------------------------------------------------------------- + * + * FILE + * fe-misc.c + * + * DESCRIPTION + * miscellaneous useful functions + * + * The communication routines here are analogous to the ones in + * backend/libpq/pqcomm.c and backend/libpq/pqcomprim.c, but operate + * in the considerably different environment of the frontend libpq. + * In particular, we work with a bare nonblock-mode socket, rather than + * a stdio stream, so that we can avoid unwanted blocking of the application. + * + * XXX: MOVE DEBUG PRINTOUT TO HIGHER LEVEL. As is, block and restart + * will cause repeat printouts. + * + * We must speak the same transmitted data representations as the backend + * routines. + * + * + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group + * + * IDENTIFICATION + * $PostgreSQL: pgsql/src/interfaces/libpq/fe-misc.c,v 1.137 2008/12/11 07:34:09 petere Exp $ + * + * ------------------------------------------------------------------------- + */ + +#include "cm/cm_c.h" + +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include + +#include "cm/libpq-fe.h" +#include "cm/libpq-int.h" + +static int cmpqPutMsgBytes(const void* buf, size_t len, CM_Conn* conn); +static int cmpqSendSome(CM_Conn* conn, int len); +static int cmpqSocketCheck(CM_Conn* conn, int forRead, int forWrite, time_t end_time); +static int cmpqSocketPoll(int sock, int forRead, int forWrite, time_t end_time); + +#define BYTES2 2 +#define BYTES4 4 + +/* + * cmpqGetc: get 1 character from the connection + * + * All these routines return 0 on success, EOF on error. + * Note that for the Get routines, EOF only means there is not enough + * data in the buffer, not that there is necessarily a hard error. + */ +int cmpqGetc(char* result, CM_Conn* conn) +{ + if (conn->inCursor >= conn->inEnd) { + return EOF; + } + + *result = conn->inBuffer[conn->inCursor++]; + + if (conn->Pfdebug != NULL) { + fprintf(conn->Pfdebug, "From backend> %c\n", *result); + } + + return 0; +} + +/* + * cmpqGets[_append]: + * get a null-terminated string from the connection, + * and store it in an expansible PQExpBuffer. + * If we run out of memory, all of the string is still read, + * but the excess characters are silently discarded. + */ +static int cmpqGets_internal(PQExpBuffer buf, CM_Conn* conn, bool resetbuffer) +{ + /* Copy conn data to locals for faster search loop */ + char* inBuffer = conn->inBuffer; + int inCursor = conn->inCursor; + int inEnd = conn->inEnd; + int slen; + + while (inCursor < inEnd && inBuffer[inCursor]) { + inCursor++; + } + + if (inCursor >= inEnd) { + return EOF; + } + + slen = inCursor - conn->inCursor; + + if (resetbuffer) { + resetCMPQExpBuffer(buf); + } + + appendBinaryCMPQExpBuffer(buf, inBuffer + conn->inCursor, slen); + + inCursor++; + conn->inCursor = inCursor; + + if (conn->Pfdebug != NULL) { + fprintf(conn->Pfdebug, "From backend> \"%s\"\n", buf->data); + } + + return 0; +} + +int cmpqGets(PQExpBuffer buf, CM_Conn* conn) +{ + return cmpqGets_internal(buf, conn, true); +} + +int cmpqGets_append(PQExpBuffer buf, CM_Conn* conn) +{ + return cmpqGets_internal(buf, conn, false); +} + +/* + * cmpqPutnchar: + * write exactly len bytes to the current message + */ +int cmpqPutnchar(const char* s, size_t len, CM_Conn* conn) +{ + if (cmpqPutMsgBytes(s, len, conn)) { + return EOF; + } + + if (conn->Pfdebug != NULL) { + fprintf(conn->Pfdebug, "To backend> %.*s\n", (int)len, s); + } + + return 0; +} + +/* + * cmpqGetnchar: + * get a string of exactly len bytes in buffer s, no null termination + */ +int cmpqGetnchar(char* s, size_t len, CM_Conn* conn) +{ + errno_t rc = EOK; + if (len > (size_t)(conn->inEnd - conn->inCursor)) { + return EOF; + } + + rc = memcpy_s(s, len, conn->inBuffer + conn->inCursor, len); + securec_check_c(rc, "", ""); + /* no terminating null */ + + conn->inCursor += len; + + if (conn->Pfdebug != NULL) { + fprintf(conn->Pfdebug, "From backend (%lu)> %.*s\n", (unsigned long)len, (int)len, s); + } + + return 0; +} + +/* + * cmpqGetInt + * read a 2 or 4 byte integer and convert from network byte order + * to local byte order + */ +int cmpqGetInt(int* result, size_t bytes, CM_Conn* conn) +{ + uint16 tmp2; + uint32 tmp4; + errno_t rc; + + switch (bytes) { + case 2: + if (conn->inCursor + BYTES2 > conn->inEnd) { + return EOF; + } + rc = memcpy_s(&tmp2, BYTES2, conn->inBuffer + conn->inCursor, BYTES2); + securec_check_c(rc, "\0", "\0"); + conn->inCursor += BYTES2; + *result = (int)ntohs(tmp2); + break; + case 4: + if (conn->inCursor + BYTES4 > conn->inEnd) { + return EOF; + } + rc = memcpy_s(&tmp4, BYTES4, conn->inBuffer + conn->inCursor, BYTES4); + securec_check_c(rc, "\0", "\0"); + conn->inCursor += BYTES4; + *result = (int)ntohl(tmp4); + break; + default: + if (conn->Pfdebug != NULL) { + fprintf(conn->Pfdebug, "Integer size of (%lu) bytes not supported", bytes); + } + return EOF; + } + + if (conn->Pfdebug != NULL) { + fprintf(conn->Pfdebug, "From backend (#%lu)> %d\n", (unsigned long)bytes, *result); + } + + return 0; +} + +bool cmConnSetting(CM_Conn* conn, size_t bytes_needed, bool multi) +{ + const int sizeDouble = 2; + const int size8K = 8192; + int newsize = conn->outBufSize; + char* newbuf = NULL; + + if (multi) { + do { + newsize *= sizeDouble; + } while (newsize > 0 && bytes_needed > (size_t)newsize); + } else { + do { + newsize += size8K; + } while (newsize > 0 && bytes_needed > (size_t)newsize); + } + + if (newsize > 0 && bytes_needed <= (size_t)newsize) { + newbuf = (char*)malloc(newsize); + if (newbuf != NULL) { + /* realloc succeeded */ + if (conn->outBuffer != NULL) { + errno_t rc; + rc = memcpy_s(newbuf, newsize, conn->outBuffer, conn->outBufSize); + securec_check_c(rc, "\0", "\0"); + FREE_AND_RESET(conn->outBuffer); + } + conn->outBuffer = newbuf; + conn->outBufSize = newsize; + return true; + } + } + + return false; +} + +/* + * Make sure conn's output buffer can hold bytes_needed bytes (caller must + * include already-stored data into the value!) + * + * Returns 0 on success, EOF if failed to enlarge buffer + */ +int cmpqCheckOutBufferSpace(size_t bytes_needed, CM_Conn* conn) +{ + if (bytes_needed <= (size_t)(conn->outBufSize)) { + return 0; + } + + /* + * If we need to enlarge the buffer, we first try to double it in size; if + * that doesn't work, enlarge in multiples of 8K. This avoids thrashing + * the malloc pool by repeated small enlargements. + * + * Note: tests for newsize > 0 are to catch integer overflow. + */ + if (cmConnSetting(conn, bytes_needed, true) || cmConnSetting(conn, bytes_needed, false)) { + return 0; + } + + /* realloc failed. Probably out of memory */ + printfCMPQExpBuffer(&conn->errorMessage, "cannot allocate memory for output buffer\n"); + return EOF; +} + +/* + * Make sure conn's input buffer can hold bytes_needed bytes (caller must + * include already-stored data into the value!) + * + * Returns 0 on success, EOF if failed to enlarge buffer + */ +int cmpqCheckInBufferSpace(size_t bytes_needed, CM_Conn* conn) +{ + int newsize = conn->inBufSize; + char* newbuf = NULL; + + if (bytes_needed <= (size_t)newsize) { + return 0; + } + + do { + newsize *= 2; + } while (newsize > 0 && bytes_needed > (size_t)newsize); + + if (newsize > 0 && bytes_needed <= (size_t)newsize) { + newbuf = (char*)malloc(newsize); + if (newbuf != NULL) { + /* realloc succeeded */ + if (conn->inBuffer != NULL) { + errno_t rc; + rc = memcpy_s(newbuf, newsize, conn->inBuffer, conn->inBufSize); + securec_check_c(rc, "\0", "\0"); + FREE_AND_RESET(conn->inBuffer); + } + conn->inBuffer = newbuf; + conn->inBufSize = newsize; + return 0; + } + } + + newsize = conn->inBufSize; + do { + newsize += 8192; + } while (newsize > 0 && bytes_needed > (size_t)newsize); + + if (newsize > 0 && bytes_needed <= (size_t)newsize) { + newbuf = (char*)malloc(newsize); + if (newbuf != NULL) { + /* realloc succeeded */ + if (conn->inBuffer != NULL) { + errno_t rc; + rc = memcpy_s(newbuf, newsize, conn->inBuffer, conn->inBufSize); + securec_check_c(rc, "\0", "\0"); + FREE_AND_RESET(conn->inBuffer); + } + conn->inBuffer = newbuf; + conn->inBufSize = newsize; + return 0; + } + } + + /* realloc failed. Probably out of memory */ + printfCMPQExpBuffer(&conn->errorMessage, "cannot allocate memory for input buffer\n"); + return EOF; +} + +/* + * cmpqPutMsgStart: begin construction of a message to the server + * + * msg_type is the message type byte, or 0 for a message without type byte + * (only startup messages have no type byte) + * + * force_len forces the message to have a length word; otherwise, we add + * a length word if protocol 3. + * + * Returns 0 on success, EOF on error + * + * The idea here is that we construct the message in conn->outBuffer, + * beginning just past any data already in outBuffer (ie, at + * outBuffer+outCount). We enlarge the buffer as needed to hold the message. + * When the message is complete, we fill in the length word (if needed) and + * then advance outCount past the message, making it eligible to send. + * + * The state variable conn->outMsgStart points to the incomplete message's + * length word: it is either outCount or outCount+1 depending on whether + * there is a type byte. If we are sending a message without length word + * (pre protocol 3.0 only), then outMsgStart is -1. The state variable + * conn->outMsgEnd is the end of the data collected so far. + */ +int cmpqPutMsgStart(char msg_type, bool force_len, CM_Conn* conn) +{ + int lenPos; + int endPos; + + /* allow room for message type byte */ + if (msg_type) { + endPos = conn->outCount + 1; + } else { + endPos = conn->outCount; + } + + /* do we want a length word? */ + if (force_len) { + lenPos = endPos; + /* allow room for message length */ + endPos += 4; + } else { + lenPos = -1; + } + + /* make sure there is room for message header */ + if (cmpqCheckOutBufferSpace((size_t)endPos, conn)) { + return EOF; + } + /* okay, save the message type byte if any */ + if (msg_type) { + conn->outBuffer[conn->outCount] = msg_type; + } + /* set up the message pointers */ + conn->outMsgStart = lenPos; + conn->outMsgEnd = endPos; + /* length word, if needed, will be filled in by cmpqPutMsgEnd */ + + if (conn->Pfdebug != NULL) { + fprintf(conn->Pfdebug, "To backend> Msg %c\n", msg_type ? msg_type : ' '); + } + + return 0; +} + +/* + * cmpqPutMsgBytes: add bytes to a partially-constructed message + * + * Returns 0 on success, EOF on error + */ +static int cmpqPutMsgBytes(const void* buf, size_t len, CM_Conn* conn) +{ + errno_t rc; + + /* make sure there is room for it */ + if (cmpqCheckOutBufferSpace((size_t)conn->outMsgEnd + len, conn)) { + return EOF; + } + /* okay, save the data */ + rc = memcpy_s(conn->outBuffer + conn->outMsgEnd, conn->outBufSize - conn->outMsgEnd, buf, len); + securec_check_c(rc, "\0", "\0"); + conn->outMsgEnd += len; + /* no Pfdebug call here, caller should do it */ + return 0; +} + +/* + * cmpqPutMsgEnd: finish constructing a message and possibly send it + * + * Returns 0 on success, EOF on error + * + * We don't actually send anything here unless we've accumulated at least + * 8K worth of data (the typical size of a pipe buffer on Unix systems). + * This avoids sending small partial packets. The caller must use cmpqFlush + * when it's important to flush all the data out to the server. + */ +int cmpqPutMsgEnd(CM_Conn* conn) +{ + if (conn->Pfdebug != NULL) { + fprintf(conn->Pfdebug, "To backend> Msg complete, length %d\n", conn->outMsgEnd - conn->outCount); + } + + /* Fill in length word if needed */ + if (conn->outMsgStart >= 0) { + uint32 msgLen = conn->outMsgEnd - conn->outMsgStart; + errno_t rc; + + msgLen = htonl(msgLen); + rc = memcpy_s(conn->outBuffer + conn->outMsgStart, conn->outBufSize - conn->outMsgStart, &msgLen, 4); + securec_check_c(rc, "\0", "\0"); + } + + /* Make message eligible to send */ + conn->outCount = conn->outMsgEnd; + + if (conn->outCount >= 8192) { + int toSend = conn->outCount - (conn->outCount % 8192); + + if (cmpqSendSome(conn, toSend) < 0) { + return EOF; + } + /* in nonblock mode, don't complain if unable to send it all */ + } + + return 0; +} + +/* ---------- + * cmpqReadData: read more data, if any is available + * Possible return values: + * 1: successfully loaded at least one more byte + * 0: no data is presently available, but no error detected + * -1: error detected (including EOF = connection closure); + * conn->errorMessage set + * NOTE: callers must not assume that pointers or indexes into conn->inBuffer + * remain valid across this call! + * ---------- + */ +int cmpqReadData(CM_Conn* conn) +{ + int someread = 0; + int nread; + + if (conn->sock < 0) { + printfCMPQExpBuffer(&conn->errorMessage, "connection not open\n"); + return TCP_SOCKET_ERROR_EPIPE; + } + + /* Left-justify any data in the buffer to make room */ + if (conn->inStart < conn->inEnd) { + if (conn->inStart > 0) { + errno_t rc; + + rc = + memmove_s(conn->inBuffer, conn->inBufSize, conn->inBuffer + conn->inStart, conn->inEnd - conn->inStart); + securec_check_c(rc, "\0", "\0"); + conn->inEnd -= conn->inStart; + conn->inCursor -= conn->inStart; + conn->inStart = 0; + } + } else { + /* buffer is logically empty, reset it */ + conn->inStart = conn->inCursor = conn->inEnd = 0; + } + + /* + * If the buffer is fairly full, enlarge it. We need to be able to enlarge + * the buffer in case a single message exceeds the initial buffer size. We + * enlarge before filling the buffer entirely so as to avoid asking the + * kernel for a partial packet. The magic constant here should be large + * enough for a TCP packet or Unix pipe bufferload. 8K is the usual pipe + * buffer size, so... + */ + if (conn->inBufSize - conn->inEnd < 8192) { + if (cmpqCheckInBufferSpace(conn->inEnd + (size_t)8192, conn)) { + /* + * We don't insist that the enlarge worked, but we need some room + */ + if (conn->inBufSize - conn->inEnd < 100) + return TCP_SOCKET_ERROR_EPIPE; /* errorMessage already set */ + } + } + + /* OK, try to read some data */ +retry3: + nread = recv(conn->sock, conn->inBuffer + conn->inEnd, conn->inBufSize - conn->inEnd, MSG_DONTWAIT); + conn->last_call = CM_LastCall_RECV; + if (nread < 0) { + conn->last_errno = SOCK_ERRNO; + + if (SOCK_ERRNO == EINTR) { + goto retry3; + } + /* Some systems return EAGAIN/EWOULDBLOCK for no data */ +#ifdef EAGAIN + if (SOCK_ERRNO == EAGAIN) { + return someread; + } +#endif +#if defined(EWOULDBLOCK) && (!defined(EAGAIN) || (EWOULDBLOCK != EAGAIN)) + if (SOCK_ERRNO == EWOULDBLOCK) { + return someread; + } +#endif + /* We might get ECONNRESET here if using TCP and backend died */ +#ifdef ECONNRESET + if (SOCK_ERRNO == ECONNRESET) { + goto definitelyFailed; + } +#endif + printfCMPQExpBuffer(&conn->errorMessage, "could not receive data from server:\n"); + return TCP_SOCKET_ERROR_EPIPE; + } else { + conn->last_errno = 0; + } + + if (nread > 0) { + conn->inEnd += nread; + + /* + * Hack to deal with the fact that some kernels will only give us back + * 1 packet per recv() call, even if we asked for more and there is + * more available. If it looks like we are reading a long message, + * loop back to recv() again immediately, until we run out of data or + * buffer space. Without this, the block-and-restart behavior of + * libpq's higher levels leads to O(N^2) performance on long messages. + * + * Since we left-justified the data above, conn->inEnd gives the + * amount of data already read in the current message. We consider + * the message "long" once we have acquired 32k ... + */ +#ifdef NOT_USED + if (conn->inEnd > 32768 && (conn->inBufSize - conn->inEnd) >= 8192) { + someread = 1; + goto retry3; + } +#endif + return 1; + } + + if (someread) { + return 1; /* got a zero read after successful tries */ + } + + /* + * A return value of 0 could mean just that no data is now available, or + * it could mean EOF --- that is, the server has closed the connection. + * Since we have the socket in nonblock mode, the only way to tell the + * difference is to see if select() is saying that the file is ready. + * Grumble. Fortunately, we don't expect this path to be taken much, + * since in normal practice we should not be trying to read data unless + * the file selected for reading already. + * + * In SSL mode it's even worse: SSL_read() could say WANT_READ and then + * data could arrive before we make the cmpqReadReady() test. So we must + * play dumb and assume there is more data, relying on the SSL layer to + * detect true EOF. + */ + + switch (cmpqReadReady(conn)) { + case 0: + /* definitely no data available */ + return 0; + case 1: + /* ready for read */ + break; + default: + goto definitelyFailed; + } + + /* + * Still not sure that it's EOF, because some data could have just + * arrived. + */ +retry4: + nread = recv(conn->sock, conn->inBuffer + conn->inEnd, conn->inBufSize - conn->inEnd, MSG_DONTWAIT); + conn->last_call = CM_LastCall_RECV; + if (nread < 0) { + conn->last_errno = SOCK_ERRNO; + if (SOCK_ERRNO == EINTR) { + goto retry4; + } + /* Some systems return EAGAIN/EWOULDBLOCK for no data */ +#ifdef EAGAIN + if (SOCK_ERRNO == EAGAIN) { + return 0; + } +#endif +#if defined(EWOULDBLOCK) && (!defined(EAGAIN) || (EWOULDBLOCK != EAGAIN)) + if (SOCK_ERRNO == EWOULDBLOCK) { + return 0; + } +#endif + /* We might get ECONNRESET here if using TCP and backend died */ +#ifdef ECONNRESET + if (SOCK_ERRNO == ECONNRESET) { + goto definitelyFailed; + } +#endif + printfCMPQExpBuffer(&conn->errorMessage, "could not receive data from server: \n"); + return -1; + } else { + conn->last_errno = 0; + } + if (nread > 0) { + conn->inEnd += nread; + return 1; + } + + /* + * OK, we are getting a zero read even though select() says ready. This + * means the connection has been closed. Cope. + */ +definitelyFailed: + printfCMPQExpBuffer(&conn->errorMessage, + "server closed the connection unexpectedly\n" + "\tThis probably means the server terminated abnormally\n" + "\tbefore or while processing the request.\n"); + conn->status = CONNECTION_BAD; /* No more connection to backend */ + close(conn->sock); + conn->sock = -1; + + return TCP_SOCKET_ERROR_EPIPE; +} + +/* + * cmpqSendSome: send data waiting in the output buffer. + * + * len is how much to try to send (typically equal to outCount, but may + * be less). + * + * Return 0 on success, -1 on failure and 1 when not all data could be sent + * because the socket would block and the connection is non-blocking. + */ +static int cmpqSendSome(CM_Conn* conn, int len) +{ + char* ptr = conn->outBuffer; + int remaining = conn->outCount; + int result = 0; + + if (conn->sock < 0) { + printfCMPQExpBuffer(&conn->errorMessage, "connection not open\n"); + return TCP_SOCKET_ERROR_EPIPE; + } + + /* while there's still data to send */ + while (len > 0) { + int sent; + + sent = send(conn->sock, ptr, len, MSG_DONTWAIT); + conn->last_call = CM_LastCall_SEND; + + if (sent < 0) { + conn->last_errno = SOCK_ERRNO; + /* + * Anything except EAGAIN/EWOULDBLOCK/EINTR is trouble. If it's + * EPIPE or ECONNRESET, assume we've lost the backend connection + * permanently. + */ + switch (SOCK_ERRNO) { +#ifdef EAGAIN + case EAGAIN: + break; +#endif +#if defined(EWOULDBLOCK) && (!defined(EAGAIN) || (EWOULDBLOCK != EAGAIN)) + case EWOULDBLOCK: + break; +#endif + case EINTR: + continue; + + case EPIPE: +#ifdef ECONNRESET + case ECONNRESET: +#endif + printfCMPQExpBuffer(&conn->errorMessage, + "server closed the connection unexpectedly\n" + "\tThis probably means the server terminated abnormally\n" + "\tbefore or while processing the request. errno:%d\n", + SOCK_ERRNO); + + /* + * We used to close the socket here, but that's a bad idea + * since there might be unread data waiting (typically, a + * NOTICE message from the backend telling us it's + * committing hara-kiri...). Leave the socket open until + * cmpqReadData finds no more data can be read. But abandon + * attempt to send data. + */ + conn->outCount = 0; + return TCP_SOCKET_ERROR_EPIPE; + + default: + printfCMPQExpBuffer(&conn->errorMessage, "could not send data to server: \n"); + /* We don't assume it's a fatal error... */ + conn->outCount = 0; + return TCP_SOCKET_ERROR_EPIPE; + } + } else { + ptr += sent; + len -= sent; + remaining -= sent; + conn->last_errno = 0; + } + + if (len > 0) { + /* + * We didn't send it all, wait till we can send more. + * + * If the connection is in non-blocking mode we don't wait, but + * return 1 to indicate that data is still pending. + */ + result = 1; + break; + } + } + + /* shift the remaining contents of the buffer */ + if (remaining > 0) { + errno_t rc; + + rc = memmove_s(conn->outBuffer, conn->outBufSize, ptr, remaining); + securec_check_c(rc, "\0", "\0"); + } + conn->outCount = remaining; + + return result; +} + +/* + * cmpqFlush: send any data waiting in the output buffer + * + * Return 0 on success, -1 on failure and 1 when not all data could be sent + * because the socket would block and the connection is non-blocking. + */ +int cmpqFlush(CM_Conn* conn) +{ + if (conn->Pfdebug != NULL) { + fflush(conn->Pfdebug); + } + + if (conn->outCount > 0) { + return cmpqSendSome(conn, conn->outCount); + } + + return 0; +} +/* + * cmpqWait: wait until we can read or write the connection socket + * + * JAB: If SSL enabled and used and forRead, buffered bytes short-circuit the + * call to select(). + * + * We also stop waiting and return if the kernel flags an exception condition + * on the socket. The actual error condition will be detected and reported + * when the caller tries to read or write the socket. + */ +int cmpqWait(int forRead, int forWrite, CM_Conn* conn) +{ + return cmpqWaitTimed(forRead, forWrite, conn, time(NULL) + 5); +} + +/* + * cmpqWaitTimed: wait, but not past finish_time. + * + * If finish_time is exceeded then we return failure (EOF). This is like + * the response for a kernel exception because we don't want the caller + * to try to read/write in that case. + * + * finish_time = ((time_t) -1) disables the wait limit. + */ +int cmpqWaitTimed(int forRead, int forWrite, CM_Conn* conn, time_t finish_time) +{ + int result; + + result = cmpqSocketCheck(conn, forRead, forWrite, finish_time); + + if (result < 0) { + return EOF; /* errorMessage is already set */ + } + + if (result == 0) { + printfCMPQExpBuffer(&conn->errorMessage, "timeout expired\n"); + return EOF; + } + + return 0; +} + +/* + * cmpqReadReady: is select() saying the file is ready to read? + * Returns -1 on failure, 0 if not ready, 1 if ready. + */ +int cmpqReadReady(CM_Conn* conn) +{ + return cmpqSocketCheck(conn, 1, 0, (time_t)0); +} + +/* + * Checks a socket, using poll or select, for data to be read, written, + * or both. Returns >0 if one or more conditions are met, 0 if it timed + * out, -1 if an error occurred. + * + * If SSL is in use, the SSL buffer is checked prior to checking the socket + * for read data directly. + */ +static int cmpqSocketCheck(CM_Conn* conn, int forRead, int forWrite, time_t end_time) +{ + int result; + + if (conn == NULL) { + return TCP_SOCKET_ERROR_EPIPE; + } + if (conn->sock < 0) { + printfCMPQExpBuffer(&conn->errorMessage, "socket not open\n"); + return TCP_SOCKET_ERROR_EPIPE; + } + + /* We will retry as long as we get EINTR */ + do + result = cmpqSocketPoll(conn->sock, forRead, forWrite, end_time); + while (result < 0 && SOCK_ERRNO == EINTR); + + if (result < 0) { + printfCMPQExpBuffer(&conn->errorMessage, "select() failed: \n"); + } + + return result; +} + +/* + * Check a file descriptor for read and/or write data, possibly waiting. + * If neither forRead nor forWrite are set, immediately return a timeout + * condition (without waiting). Return >0 if condition is met, 0 + * if a timeout occurred, -1 if an error or interrupt occurred. + * + * Timeout is infinite if end_time is -1. Timeout is immediate (no blocking) + * if end_time is 0 (or indeed, any time before now). + */ +static int cmpqSocketPoll(int sock, int forRead, int forWrite, time_t end_time) +{ + +#if 1 + /* We use poll(2) if available, otherwise select(2) */ +#ifdef HAVE_POLL + struct pollfd input_fd; + int timeout_ms; + + if (!forRead && !forWrite) { + return 0; + } + + input_fd.fd = sock; + input_fd.events = POLLERR; + input_fd.revents = 0; + + if (forRead) { + input_fd.events |= POLLIN; + } + if (forWrite) { + input_fd.events |= POLLOUT; + } + + /* Compute appropriate timeout interval */ + if (end_time == ((time_t)-1)) { + timeout_ms = -1; + } else { + time_t now = time(NULL); + + if (end_time > now) { + timeout_ms = (end_time - now) * 1000; + } else { + timeout_ms = 0; + } + } + + return poll(&input_fd, 1, timeout_ms); +#else /* !HAVE_POLL */ + + fd_set input_mask; + fd_set output_mask; + fd_set except_mask; + struct timeval timeout; + struct timeval* ptr_timeout = NULL; + + if (!forRead && !forWrite) { + return 0; + } + + FD_ZERO(&input_mask); + FD_ZERO(&output_mask); + FD_ZERO(&except_mask); + if (forRead) + FD_SET(sock, &input_mask); + if (forWrite) + FD_SET(sock, &output_mask); + FD_SET(sock, &except_mask); + + /* Compute appropriate timeout interval */ + if (end_time == ((time_t)-1)) { + ptr_timeout = NULL; + } else { + time_t now = time(NULL); + + if (end_time > now) { + timeout.tv_sec = end_time - now; + } else { + timeout.tv_sec = 0; + } + timeout.tv_usec = 0; + ptr_timeout = &timeout; + } + + return select(sock + 1, &input_mask, &output_mask, &except_mask, ptr_timeout); +#endif /* HAVE_POLL */ +#endif +} diff --git a/src/lib/cm_communication/cm_feconnect/fe-protocol.cpp b/src/lib/cm_communication/cm_feconnect/fe-protocol.cpp new file mode 100644 index 000000000..2fe5cdcfc --- /dev/null +++ b/src/lib/cm_communication/cm_feconnect/fe-protocol.cpp @@ -0,0 +1,253 @@ +/* ------------------------------------------------------------------------- + * + * fe-protocol3.c + * functions that are specific to frontend/backend protocol version 3 + * + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group + * + * + * IDENTIFICATION + * $PostgreSQL$ + * + * ------------------------------------------------------------------------- + */ +#include "cm/cm_c.h" + +#include +#include + +#include "cm/libpq-fe.h" +#include "cm/libpq-int.h" + +#include +#include + +/* + * This macro lists the backend message types that could be "long" (more + * than a couple of kilobytes). + */ +#define VALID_LONG_MESSAGE_TYPE(id) ((id) == 'S' || (id) == 'E') + +static void handleSyncLoss(CM_Conn* conn, char id, int msgLength); +static CM_Result* pqParseInput(CM_Conn* conn); +static int cmpqParseSuccess(CM_Conn* conn, CM_Result* result); +/* + * parseInput: if appropriate, parse input data from backend + * until input is exhausted or a stopping state is reached. + * Note that this function will NOT attempt to read more data from the backend. + */ +static CM_Result* pqParseInput(CM_Conn* conn) +{ + char id; + int msgLength; + int avail; + CM_Result* result = NULL; + + if (conn->result == NULL) { + errno_t rc; + + conn->result = (CM_Result*)malloc(sizeof(CM_Result)); + if (conn->result == NULL) + return NULL; + rc = memset_s(conn->result, sizeof(CM_Result), 0, sizeof(CM_Result)); + securec_check_c(rc, (char*)conn->result, "\0"); + } + + result = conn->result; + + /* + * Try to read a message. First get the type code and length. Return + * if not enough data. + */ + conn->inCursor = conn->inStart; + if (cmpqGetc(&id, conn)) + return NULL; + if (cmpqGetInt(&msgLength, 4, conn)) + return NULL; + + /* + * Try to validate message type/length here. A length less than 4 is + * definitely broken. Large lengths should only be believed for a few + * message types. + */ + if (msgLength < 4) { + handleSyncLoss(conn, id, msgLength); + return NULL; + } + if (msgLength > 30000 && !VALID_LONG_MESSAGE_TYPE(id)) { + handleSyncLoss(conn, id, msgLength); + return NULL; + } + + /* + * Can't process if message body isn't all here yet. + */ + conn->result->gr_msglen = msgLength -= 4; + avail = conn->inEnd - conn->inCursor; + if (avail < msgLength) { + /* + * Before returning, enlarge the input buffer if needed to hold + * the whole message. This is better than leaving it to + * cmpqReadData because we can avoid multiple cycles of realloc() + * when the message is large; also, we can implement a reasonable + * recovery strategy if we are unable to make the buffer big + * enough. + */ + if (cmpqCheckInBufferSpace((size_t)(conn->inCursor + msgLength), conn)) { + /* + * XXX add some better recovery code... plan is to skip over + * the message using its length, then report an error. For the + * moment, just treat this like loss of sync (which indeed it + * might be!) + */ + handleSyncLoss(conn, id, msgLength); + } + return NULL; + } + + /* switch on protocol character */ + switch (id) { + case 'S': /* command complete */ + if (cmpqParseSuccess(conn, result)) + return NULL; + break; + + case 'E': /* error return */ + if (cmpqGetError(conn, result)) + return NULL; + result->gr_status = CM_RESULT_ERROR; + break; + default: + printfCMPQExpBuffer( + &conn->errorMessage, "unexpected response from server; first received character was \"%c\"\n", id); + conn->inCursor += msgLength; + break; + } + /* Successfully consumed this message */ + if (conn->inCursor == conn->inStart + 5 + msgLength) { + /* Normal case: parsing agrees with specified length */ + conn->inStart = conn->inCursor; + } else { + /* Trouble --- report it */ + printfCMPQExpBuffer( + &conn->errorMessage, "message contents do not agree with length in message type \"%c\"\n", id); + /* trust the specified message length as what to skip */ + conn->inStart += 5 + msgLength; + } + + return result; +} + +/* + * handleSyncLoss: clean up after loss of message-boundary sync + * + * There isn't really a lot we can do here except abandon the connection. + */ +static void handleSyncLoss(CM_Conn* conn, char id, int msgLength) +{ + printfCMPQExpBuffer( + &conn->errorMessage, "lost synchronization with server: got message type \"%c\", length %d\n", id, msgLength); + close(conn->sock); + conn->sock = -1; + conn->status = CONNECTION_BAD; /* No more connection to backend */ +} + +/* + * Attempt to read an Error or Notice response message. + * This is possible in several places, so we break it out as a subroutine. + * Entry: 'E' message type and length have already been consumed. + * Exit: returns 0 if successfully consumed message. + * returns EOF if not enough data. + */ +int cmpqGetError(CM_Conn* conn, CM_Result* result) +{ + char id; + + /* + * Read the fields and save into res. + */ + for (;;) { + if (cmpqGetc(&id, conn)) { + goto fail; + } + if (id == '\0') { + break; + } + if (cmpqGets(&conn->errorMessage, conn)) { + goto fail; + } + } + return 0; + +fail: + return EOF; +} + +/* + * CMPQgetResult + * Get the next CM_Result produced. Returns NULL if no + * query work remains or an error has occurred (e.g. out of + * memory). + */ + +CM_Result* cmpqGetResult(CM_Conn* conn) +{ + CM_Result* res = NULL; + + if (conn == NULL) + return NULL; + + /* Parse any available data, if our state permits. */ + while ((res = pqParseInput(conn)) == NULL) { + int flushResult; + + /* + * If data remains unsent, send it. Else we might be waiting for the + * result of a command the backend hasn't even got yet. + */ + while ((flushResult = cmpqFlush(conn)) > 0) { + if (cmpqWait(false, true, conn)) { + flushResult = -1; + break; + } + } + + /* Wait for some more data, and load it. */ + if (flushResult || + + cmpqReadData(conn) <= 0) { + /* + * conn->errorMessage has been set by cmpqWait or cmpqReadData. + */ + return NULL; + } + } + + return res; +} + +/* + * return 0 if parsing command is totally completed. + * return 1 if it needs to be read continuously. + */ +static int cmpqParseSuccess(CM_Conn* conn, CM_Result* result) +{ + errno_t rc; + + result->gr_status = CM_RESULT_OK; + rc = memcpy_s(&(result->gr_resdata), CM_MSG_MAX_LENGTH, conn->inBuffer + conn->inCursor, result->gr_msglen); + securec_check_c(rc, "\0", "\0"); + return (result->gr_status); +} + +void cmpqResetResultData(CM_Result* result) +{ + + if (NULL != result) { + result->gr_msglen = 0; + result->gr_status = 0; + result->gr_type = 0; + } +} diff --git a/src/lib/cm_communication/cm_feconnect/ip.cpp b/src/lib/cm_communication/cm_feconnect/ip.cpp new file mode 100644 index 000000000..614eb5a50 --- /dev/null +++ b/src/lib/cm_communication/cm_feconnect/ip.cpp @@ -0,0 +1,63 @@ +/* ------------------------------------------------------------------------- + * + * ip.c + * IPv6-aware network access. + * + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group + * + * + * IDENTIFICATION + * $PostgreSQL: pgsql/src/backend/libpq/ip.c,v 1.43 2009/01/01 17:23:42 momjian Exp $ + * + * This file and the IPV6 implementation were initially provided by + * Nigel Kukard , Linux Based Systems Design + * http://www.lbsd.net. + * + * ------------------------------------------------------------------------- + */ + +/* This is intended to be used in both frontend and backend, so use c.h */ + +#include +#ifdef HAVE_NETINET_TCP_H +#include +#endif + +#include "cm/cm_ip.h" + +/* + * cm_getaddrinfo_all - get address info for Unix, IPv4 and IPv6 sockets + */ +int cm_getaddrinfo_all( + const char* hostname, const char* servname, const struct addrinfo* hintp, struct addrinfo** result) +{ + int rc; + + /* not all versions of getaddrinfo() zero *result on failure */ + *result = NULL; + + /* NULL has special meaning to getaddrinfo(). */ + rc = getaddrinfo(((hostname == NULL) || hostname[0] == '\0') ? NULL : hostname, servname, hintp, result); + + return rc; +} + +/* + * cm_freeaddrinfo_all - free addrinfo structures for IPv4, IPv6, or Unix + * + * Note: the ai_family field of the original hint structure must be passed + * so that we can tell whether the addrinfo struct was built by the system's + * getaddrinfo() routine or our own getaddrinfo_unix() routine. Some versions + * of getaddrinfo() might be willing to return AF_UNIX addresses, so it's + * not safe to look at ai_family in the addrinfo itself. + */ +void cm_freeaddrinfo_all(int hint_ai_family, struct addrinfo* ai) +{ + { + /* struct was built by getaddrinfo() */ + if (ai != NULL) + freeaddrinfo(ai); + } +} diff --git a/src/lib/cm_communication/cm_feconnect/pqexpbuffer.cpp b/src/lib/cm_communication/cm_feconnect/pqexpbuffer.cpp new file mode 100644 index 000000000..30a64a9a1 --- /dev/null +++ b/src/lib/cm_communication/cm_feconnect/pqexpbuffer.cpp @@ -0,0 +1,301 @@ +/* ------------------------------------------------------------------------- + * + * pqexpbuffer.c + * + * PQExpBuffer provides an indefinitely-extensible string data type. + * It can be used to buffer either ordinary C strings (null-terminated text) + * or arbitrary binary data. All storage is allocated with malloc(). + * + * This module is essentially the same as the backend's StringInfo data type, + * but it is intended for use in frontend libpq and client applications. + * Thus, it does not rely on palloc() nor elog(). + * + * It does rely on vsnprintf(); if configure finds that libc doesn't provide + * a usable vsnprintf(), then a copy of our own implementation of it will + * be linked into libpq. + * + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group + * + * $PostgreSQL: pgsql/src/interfaces/libpq/pqexpbuffer.c,v 1.25 2008/11/26 00:26:23 tgl Exp $ + * + * ------------------------------------------------------------------------- + */ + +#include "cm/cm_c.h" + +#include + +#include "cm/pqexpbuffer.h" + +/* All "broken" PQExpBuffers point to this string. */ +static const char oom_buffer[1] = ""; + +/* + * markPQExpBufferBroken + * + * Put a PQExpBuffer in "broken" state if it isn't already. + */ +static void markPQExpBufferBroken(PQExpBuffer str) +{ + if (str->data != oom_buffer) { + FREE_AND_RESET(str->data); + } + /* + * Casting away const here is a bit ugly, but it seems preferable to + * not marking oom_buffer const. We want to do that to encourage the + * compiler to put oom_buffer in read-only storage, so that anyone who + * tries to scribble on a broken PQExpBuffer will get a failure. + */ + str->data = (char*)oom_buffer; + str->len = 0; + str->maxlen = 0; +} + +/* + * initCMPQExpBuffer + * + * Initialize a PQExpBufferData struct (with previously undefined contents) + * to describe an empty string. + */ +void initCMPQExpBuffer(PQExpBuffer str) +{ + str->data = (char*)malloc(INITIAL_EXPBUFFER_SIZE); + if (str->data == NULL) { + str->data = (char*)oom_buffer; /* see comment above */ + str->maxlen = 0; + str->len = 0; + } else { + str->maxlen = INITIAL_EXPBUFFER_SIZE; + str->len = 0; + str->data[0] = '\0'; + } +} + +/* + * termCMPQExpBuffer(str) + * free()s the data buffer but not the PQExpBufferData itself. + * This is the inverse of initCMPQExpBuffer(). + */ +void termCMPQExpBuffer(PQExpBuffer str) +{ + if (str->data != oom_buffer) { + FREE_AND_RESET(str->data); + } + + /* just for luck, make the buffer validly empty. */ + str->data = (char*)oom_buffer; /* see comment above */ + str->maxlen = 0; + str->len = 0; +} + +/* + * resetCMPQExpBuffer + * Reset a PQExpBuffer to empty + * + * Note: if possible, a "broken" PQExpBuffer is returned to normal. + */ +void resetCMPQExpBuffer(PQExpBuffer str) +{ + if (str != NULL) { + if ((str->data != NULL) && str->data != oom_buffer) { + str->len = 0; + str->data[0] = '\0'; + } else { + /* try to reinitialize to valid state */ + initCMPQExpBuffer(str); + } + } +} + +/* + * enlargeCMPQExpBuffer + * Make sure there is enough space for 'needed' more bytes in the buffer + * ('needed' does not include the terminating null). + * + * Returns 1 if OK, 0 if failed to enlarge buffer. (In the latter case + * the buffer is left in "broken" state.) + */ +int enlargeCMPQExpBuffer(PQExpBuffer str, size_t needed) +{ + size_t newlen; + char* newdata = NULL; + + if (PQExpBufferBroken(str)) + return 0; /* already failed */ + + /* + * Guard against ridiculous "needed" values, which can occur if we're fed + * bogus data. Without this, we can get an overflow or infinite loop in + * the following. + */ + if (needed >= ((size_t)INT_MAX - str->len)) { + markPQExpBufferBroken(str); + return 0; + } + + needed += str->len + 1; /* total space required now */ + + /* Because of the above test, we now have needed <= INT_MAX */ + if (needed <= str->maxlen) { + return 1; /* got enough space already */ + } + + /* + * We don't want to allocate just a little more space with each append; + * for efficiency, double the buffer size each time it overflows. + * Actually, we might need to more than double it if 'needed' is big... + */ + newlen = (str->maxlen > 0) ? (2 * str->maxlen) : 64; + while (needed > newlen) { + newlen = 2 * newlen; + } + + /* + * Clamp to INT_MAX in case we went past it. Note we are assuming here + * that INT_MAX <= UINT_MAX/2, else the above loop could overflow. We + * will still have newlen >= needed. + */ + if (newlen > (size_t)INT_MAX) { + newlen = (size_t)INT_MAX; + } + + newdata = (char*)malloc(newlen); + if (newdata != NULL) { + if (str->data != NULL) { + errno_t rc; + rc = memcpy_s(newdata, newlen, str->data, str->maxlen); + securec_check_c(rc, "\0", "\0"); + FREE_AND_RESET(str->data); + } + str->data = newdata; + str->maxlen = newlen; + return 1; + } + + markPQExpBufferBroken(str); + return 0; +} + +/* + * printfCMPQExpBuffer + * Format text data under the control of fmt (an sprintf-like format string) + * and insert it into str. More space is allocated to str if necessary. + * This is a convenience routine that does the same thing as + * resetCMPQExpBuffer() followed by appendCMPQExpBuffer(). + */ +void printfCMPQExpBuffer(PQExpBuffer str, const char* fmt, ...) +{ + va_list args; + size_t avail; + int nprinted; + + resetCMPQExpBuffer(str); + + if (PQExpBufferBroken(str)) { + return; /* already failed */ + } + + for (;;) { + /* + * Try to format the given string into the available space; but if + * there's hardly any space, don't bother trying, just fall through to + * enlarge the buffer first. + */ + if (str->maxlen > str->len + 16) { + avail = str->maxlen - str->len - 1; + va_start(args, fmt); + nprinted = vsnprintf_s(str->data + str->len, str->maxlen - str->len, avail, fmt, args); + va_end(args); + + /* + * Note: some versions of vsnprintf return the number of chars + * actually stored, but at least one returns -1 on failure. Be + * conservative about believing whether the print worked. + */ + if (nprinted >= 0 && nprinted < (int)avail - 1) { + /* Success. Note nprinted does not include trailing null. */ + str->len += nprinted; + break; + } + } + /* Double the buffer size and try again. */ + if (!enlargeCMPQExpBuffer(str, str->maxlen)) + return; /* oops, out of memory */ + } +} + +/* + * appendCMPQExpBuffer + * + * Format text data under the control of fmt (an sprintf-like format string) + * and append it to whatever is already in str. More space is allocated + * to str if necessary. This is sort of like a combination of sprintf and + * strcat. + */ +void appendCMPQExpBuffer(PQExpBuffer str, const char* fmt, ...) +{ + va_list args; + size_t avail; + int nprinted; + + if (PQExpBufferBroken(str)) { + return; /* already failed */ + } + + for (;;) { + /* + * Try to format the given string into the available space; but if + * there's hardly any space, don't bother trying, just fall through to + * enlarge the buffer first. + */ + if (str->maxlen > str->len + 16) { + avail = str->maxlen - str->len - 1; + va_start(args, fmt); + nprinted = vsnprintf_s(str->data + str->len, str->maxlen - str->len, avail, fmt, args); + va_end(args); + + /* + * Note: some versions of vsnprintf return the number of chars + * actually stored, but at least one returns -1 on failure. Be + * conservative about believing whether the print worked. + */ + if (nprinted >= 0 && nprinted < (int)avail - 1) { + /* Success. Note nprinted does not include trailing null. */ + str->len += nprinted; + break; + } + } + /* Double the buffer size and try again. */ + if (!enlargeCMPQExpBuffer(str, str->maxlen)) { + return; /* oops, out of memory */ + } + } +} + +/* + * appendBinaryCMPQExpBuffer + * + * Append arbitrary binary data to a PQExpBuffer, allocating more space + * if necessary. + */ +void appendBinaryCMPQExpBuffer(PQExpBuffer str, const char* data, size_t datalen) +{ + errno_t rc; + /* Make more room if needed */ + if (!enlargeCMPQExpBuffer(str, datalen)) { + return; + } + + /* OK, append the data */ + rc = memcpy_s(str->data + str->len, str->maxlen - str->len, data, datalen); + securec_check_c(rc, "\0", "\0"); + str->len += datalen; + + /* + * Keep a trailing null in place, even though it's probably useless for + * binary data... + */ + str->data[str->len] = '\0'; +} diff --git a/src/lib/cm_communication/cm_libpq/CMakeLists.txt b/src/lib/cm_communication/cm_libpq/CMakeLists.txt new file mode 100644 index 000000000..72d5dd296 --- /dev/null +++ b/src/lib/cm_communication/cm_libpq/CMakeLists.txt @@ -0,0 +1,23 @@ +#This is the main CMAKE for build all components. +# libcmpq.a +AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} TGT_cmpq_SRC) +set(TGT_cmpq_INC + ${PROJECT_SRC_DIR}/include/cm + ${PROJECT_SRC_DIR}/common/interfaces/libpq + ${PROJECT_TRUNK_DIR}/distribute/cm/cm_etcdapi + ${LIBCGROUP_INCLUDE_PATH} + ${KERBEROS_INCLUDE_PATH} +) + +set(cmpq_DEF_OPTIONS ${MACRO_OPTIONS}) +set(cmpq_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${LIB_SECURE_OPTIONS} ${CHECK_OPTIONS} -fstack-protector-strong) +list(REMOVE_ITEM cmpq_COMPILE_OPTIONS -fstack-protector) +add_static_libtarget(cmpq TGT_cmpq_SRC TGT_cmpq_INC "${cmpq_DEF_OPTIONS}" "${cmpq_COMPILE_OPTIONS}") + +set(cmpq_LINK_OPTIONS ${LIB_LINK_OPTIONS}) +list(REMOVE_ITEM cmpq_LINK_OPTIONS -pthread) +add_shared_libtarget(cmpq TGT_cmpq_SRC TGT_cmpq_INC "${cmpq_DEF_OPTIONS}" "${cmpq_COMPILE_OPTIONS}" "${cmpq_LINK_OPTIONS}") +set_target_properties(cmpq PROPERTIES VERSION 1) + +install(TARGETS cmpq_static ARCHIVE DESTINATION lib) +install(TARGETS cmpq DESTINATION lib) diff --git a/src/lib/cm_communication/cm_libpq/Makefile b/src/lib/cm_communication/cm_libpq/Makefile new file mode 100644 index 000000000..e3d729f89 --- /dev/null +++ b/src/lib/cm_communication/cm_libpq/Makefile @@ -0,0 +1,64 @@ +#---------------------------------------------------------------------------- +# +# MPPDB CM libpq makefile +# +# +# distribute/cm/cm_communication/cm_libpq/Makefile +# +#----------------------------------------------------------------------------- +top_builddir = ../../../../ +include $(top_builddir)/src/Makefile.global +subdir = src/lib/cm_communication/cm_libpq + +VERSION = 1 + +include $(top_srcdir)/src/gausskernel/common.mk + +override CFLAGS += -fstack-protector-strong -Wl,-z,relro,-z,now + +ifneq "$(MAKECMDGOALS)" "clean" + ifneq "$(MAKECMDGOALS)" "distclean" + ifneq "$(shell which g++ |grep hutaf_llt |wc -l)" "1" + -include $(DEPEND) + endif + endif +endif + +override CPPFLAGS := $(filter-out -fPIE, $(CPPFLAGS)) -fPIC +override CFLAGS := $(filter-out -fPIE, $(CFLAGS)) -fPIC + +OBJS = ip.o pqcomm.o pqformat.o strlcpy.o pqsignal.o + +.NOTPARALLEL: +all: libcmpq.a libcmpq.so + +libcmpq.so: + $(CC) -fPIC -shared $(CFLAGS) $(CPPFLAGS) ip.cpp pqcomm.cpp pqformat.cpp strlcpy.cpp pqsignal.cpp -o libcmpq.so.$(VERSION) + rm -f libcmpq.so && \ + ln -s libcmpq.so.$(VERSION) libcmpq.so + +libcmpq.a: $(OBJS) + $(AR) $(AROPT) $@ $^ + +install: all installdirs + $(INSTALL_STLIB) libcmpq.a '$(DESTDIR)$(libdir)/libcmpq.a' + $(INSTALL_STLIB) libcmpq.so.$(VERSION) '$(DESTDIR)$(libdir)/libcmpq.so.$(VERSION)' + cd '$(DESTDIR)$(libdir)' && \ + rm -f libcmpq.so && \ + ln -s libcmpq.so.$(VERSION) libcmpq.so + $(INSTALL_DATA) $(top_builddir)/src/include/cm/libpq-fe.h '$(DESTDIR)$(includedir)/cm-libpq-fe.h' + +installdirs: + $(MKDIR_P) '$(DESTDIR)$(libdir)' + $(MKDIR_P) '$(DESTDIR)$(includedir)' + +uninstall: + rm -f '$(DESTDIR)$(libdir)/libcmpq.a' + rm -f '$(DESTDIR)$(includedir)/cm-libpq-fe.h' + +clean: + rm -f $(OBJS) libcmpq.a libcmpq.so libcmpq.so.$(VERSION) *.depend + +distclean: clean + +maintainer-clean: distclean diff --git a/src/lib/cm_communication/cm_libpq/ip.cpp b/src/lib/cm_communication/cm_libpq/ip.cpp new file mode 100644 index 000000000..6d1f96989 --- /dev/null +++ b/src/lib/cm_communication/cm_libpq/ip.cpp @@ -0,0 +1,59 @@ +/* ------------------------------------------------------------------------- + * + * ip.c + * IPv6-aware network access. + * + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group + * + * + * IDENTIFICATION + * $PostgreSQL: pgsql/src/backend/libpq/ip.c,v 1.43 2009/01/01 17:23:42 momjian Exp $ + * + * This file and the IPV6 implementation were initially provided by + * Nigel Kukard , Linux Based Systems Design + * http://www.lbsd.net. + * + * ------------------------------------------------------------------------- + */ + +/* This is intended to be used in both frontend and backend, so use c.h */ +#ifdef HAVE_NETINET_TCP_H +#include +#endif +#include "cm/cm_c.h" + +/* + * cmpg_getaddrinfo_all - get address info for Unix, IPv4 and IPv6 sockets + */ +int cmpg_getaddrinfo_all( + const char* hostname, const char* servname, const struct addrinfo* hintp, struct addrinfo** result) +{ + int rc; + + /* not all versions of getaddrinfo() zero *result on failure */ + *result = NULL; + + /* NULL has special meaning to getaddrinfo(). */ + rc = getaddrinfo(((hostname == NULL) || hostname[0] == '\0') ? NULL : hostname, servname, hintp, result); + + return rc; +} + +/* + * pg_freeaddrinfo_all - free addrinfo structures for IPv4, IPv6, or Unix + * + * Note: the ai_family field of the original hint structure must be passed + * so that we can tell whether the addrinfo struct was built by the system's + * getaddrinfo() routine or our own getaddrinfo_unix() routine. Some versions + * of getaddrinfo() might be willing to return AF_UNIX addresses, so it's + * not safe to look at ai_family in the addrinfo itself. + */ +void cmpg_freeaddrinfo_all(struct addrinfo* ai) +{ + /* struct was built by getaddrinfo() */ + if (ai != NULL) { + freeaddrinfo(ai); + } +} diff --git a/src/lib/cm_communication/cm_libpq/pqcomm.cpp b/src/lib/cm_communication/cm_libpq/pqcomm.cpp new file mode 100644 index 000000000..1fbd2e732 --- /dev/null +++ b/src/lib/cm_communication/cm_libpq/pqcomm.cpp @@ -0,0 +1,1038 @@ +/* ------------------------------------------------------------------------- + * + * pqcomm.c + * Communication functions between the Frontend and the Backend + * + * These routines handle the low-level details of communication between + * frontend and backend. They just shove data across the communication + * channel, and are ignorant of the semantics of the data --- or would be, + * except for major brain damage in the design of the old COPY OUT protocol. + * Unfortunately, COPY OUT was designed to commandeer the communication + * channel (it just transfers data without wrapping it into messages). + * No other messages can be sent while COPY OUT is in progress; and if the + * copy is aborted by an ereport(ERROR), we need to close out the copy so that + * the frontend gets back into sync. Therefore, these routines have to be + * aware of COPY OUT state. (New COPY-OUT is message-based and does *not* + * set the DoingCopyOut flag.) + * + * NOTE: generally, it's a bad idea to emit outgoing messages directly with + * pq_putbytes(), especially if the message would require multiple calls + * to send. Instead, use the routines in pqformat.c to construct the message + * in a buffer and then emit it in one call to pq_putmessage. This ensures + * that the channel will not be clogged by an incomplete message if execution + * is aborted by ereport(ERROR) partway through the message. The only + * non-libpq code that should call pq_putbytes directly is old-style COPY OUT. + * + * At one time, libpq was shared between frontend and backend, but now + * the backend's "backend/libpq" is quite separate from "interfaces/libpq". + * All that remains is similarities of names to trap the unwary... + * + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group + * + * $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.198 2008/01/01 19:45:49 momjian Exp $ + * + * ------------------------------------------------------------------------- + */ + +/* ------------------------ + * INTERFACE ROUTINES + * + * setup/teardown: + * StreamServerPort - Open postmaster's server port + * StreamConnection - Create new connection with client + * StreamClose - Close a client/backend connection + * TouchSocketFile - Protect socket file against /tmp cleaners + * pq_init - initialize libpq at backend startup + * pq_comm_reset - reset libpq during error recovery + * pq_close - shutdown libpq at backend exit + * + * low-level I/O: + * pq_getbytes - get a known number of bytes from connection + * pq_getstring - get a null terminated string from connection + * pq_getmessage - get a message with length word from connection + * pq_getbyte - get next byte from connection + * pq_peekbyte - peek at next byte from connection + * pq_putbytes - send bytes to connection (not flushed until pq_flush) + * pq_flush - flush pending output + * + * message-level I/O (and old-style-COPY-OUT cruft): + * pq_putmessage - send a normal message (suppressed in COPY OUT mode) + * pq_startcopyout - inform libpq that a COPY OUT transfer is beginning + * pq_endcopyout - end a COPY OUT transfer + * + * ------------------------ + */ + +#include "pg_config.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef HAVE_NETINET_TCP_H +#include +#endif +#include +#ifdef HAVE_UTIME_H +#include +#endif +#include "cm/cm_c.h" +#include "cm/ip.h" +#include "cm/libpq.h" +#include "cm/libpq-be.h" +#include "cm/elog.h" + +#define MAXGTMPATH 256 + +#ifndef NO_SOCKET +const int NO_SOCKET = -1; +#endif + +extern int tcp_keepalives_idle; +extern int tcp_keepalives_interval; +extern int tcp_keepalives_count; + +/* + * Buffers for low-level I/O + */ + +/* Internal functions */ +static int internal_putbytes(Port* myport, const char* s, size_t len); +static int internal_flush(Port* myport); + +/* + * Streams -- wrapper around Unix socket system calls + * + * + * Stream functions are used for vanilla TCP connection protocol. + */ + +/* + * StreamServerPort -- open a "listening" port to accept connections. + * + * Successfully opened sockets are added to the ListenSocket[] array, + * at the first position that isn't -1. + * + * RETURNS: STATUS_OK or STATUS_ERROR + */ + +int StreamServerPort(int family, char* hostName, unsigned short portNumber, int ListenSocket[], int MaxListen) +{ + int fd = 0; + int err; + int maxconn; + int ret; + char portNumberStr[32]; + const char* familyDesc = NULL; + char familyDescBuf[64]; + char* service = NULL; + struct addrinfo* addrs = NULL; + struct addrinfo* addr = NULL; + struct addrinfo hint; + int listen_index = 0; + int added = 0; + errno_t rc = 0; + +#if !defined(WIN32) || defined(IPV6_V6ONLY) + int one = 1; +#endif + + /* Initialize hint structure */ + rc = memset_s(&hint, sizeof(hint), 0, sizeof(hint)); + securec_check_errno(rc, ); + + hint.ai_family = family; + hint.ai_flags = AI_PASSIVE; + hint.ai_socktype = SOCK_STREAM; + + { + rc = snprintf_s(portNumberStr, sizeof(portNumberStr), sizeof(portNumberStr) - 1, "%d", portNumber); + securec_check_intval(rc, ); + service = portNumberStr; + } + + ret = cmpg_getaddrinfo_all(hostName, service, &hint, &addrs); + if (ret || (addrs == NULL)) { + if (hostName != NULL) { + write_runlog(LOG, + "could not translate host name \"%s\", service \"%s\" to address: %s", + hostName, + service, + gai_strerror(ret)); + } else { + write_runlog(LOG, "could not translate service \"%s\" to address: %s", service, gai_strerror(ret)); + } + if (addrs != NULL) { + cmpg_freeaddrinfo_all(addrs); + } + return STATUS_ERROR; + } + + for (addr = addrs; addr != NULL; addr = addr->ai_next) { + if (!IS_AF_UNIX(family) && IS_AF_UNIX(addr->ai_family)) { + /* + * Only set up a unix domain socket when they really asked for it. + * The service/port is different in that case. + */ + continue; + } + + /* See if there is still room to add 1 more socket. */ + for (; listen_index < MaxListen; listen_index++) { + if (ListenSocket[listen_index] == -1) { + break; + } + } + if (listen_index >= MaxListen) { + write_runlog(LOG, "could not bind to all requested addresses: MAXLISTEN (%d) exceeded\n", MaxListen); + break; + } + + /* set up family name for possible error messages */ + switch (addr->ai_family) { + case AF_INET: + familyDesc = "IPv4"; + break; +#ifdef HAVE_IPV6 + case AF_INET6: + familyDesc = "IPv6"; + break; +#endif + default: + rc = snprintf_s(familyDescBuf, + sizeof(familyDescBuf), + sizeof(familyDescBuf) - 1, + "unrecognized address family %d", + addr->ai_family); + securec_check_intval(rc, ); + familyDesc = familyDescBuf; + break; + } + + if ((fd = socket(addr->ai_family, SOCK_STREAM, 0)) < 0) { + write_runlog(LOG, "could not create %s socket: \n", familyDesc); + continue; + } + +#ifndef WIN32 + + /* + * Without the SO_REUSEADDR flag, a new postmaster can't be started + * right away after a stop or crash, giving "address already in use" + * error on TCP ports. + * + * On win32, however, this behavior only happens if the + * SO_EXLUSIVEADDRUSE is set. With SO_REUSEADDR, win32 allows multiple + * servers to listen on the same address, resulting in unpredictable + * behavior. With no flags at all, win32 behaves as Unix with + * SO_REUSEADDR. + */ + if (!IS_AF_UNIX(addr->ai_family)) { + if ((setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char*)&one, sizeof(one))) == -1) { + write_runlog(LOG, "setsockopt(SO_REUSEADDR) failed: \n"); + close(fd); + continue; + } + } +#endif + +#ifdef IPV6_V6ONLY + if (addr->ai_family == AF_INET6) { + if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char*)&one, sizeof(one)) == -1) { + write_runlog(LOG, "setsockopt(IPV6_V6ONLY) failed: \n"); + close(fd); + continue; + } + } +#endif + + /* + * Note: This might fail on some OS's, like Linux older than + * 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and map + * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4 + * connections. + */ + err = bind(fd, addr->ai_addr, addr->ai_addrlen); + if (err < 0) { + write_runlog(LOG, + "could not bind %s socket: Is another instance already running on port %d? If not, wait a few seconds " + "and retry.\n", + familyDesc, + (int)portNumber); + + close(fd); + continue; + } + +#define CM_MAX_CONNECTIONS 1024 + + /* + * Select appropriate accept-queue length limit. PG_SOMAXCONN is only + * intended to provide a clamp on the request on platforms where an + * overly large request provokes a kernel error (are there any?). + */ + maxconn = CM_MAX_CONNECTIONS * 2; + + err = listen(fd, maxconn); + if (err < 0) { + write_runlog(LOG, "could not listen on %s socket: \n", familyDesc); + close(fd); + continue; + } + ListenSocket[listen_index] = fd; + added++; + } + + cmpg_freeaddrinfo_all(addrs); + + if (!added) { + return STATUS_ERROR; + } + + return STATUS_OK; +} + +int SetSocketNoBlock(int isocketId) +{ + int iFlag = 0; + int ret = 0; + uint32 uFlag = 0; + + iFlag = fcntl(isocketId, F_GETFL, 0); + if (iFlag < 0) { + write_runlog(LOG, + "Get socket info is failed(socketId = %d,errno = %d,errinfo = %s).", + isocketId, + errno, + strerror(errno)); + return STATUS_ERROR; + } + + uFlag = (uint32)iFlag; + uFlag |= O_NONBLOCK; + + ret = fcntl(isocketId, F_SETFL, uFlag); + if (ret < 0) { + write_runlog(LOG, + "Set socket block is failed(socketId = %d,errno = %d,errinfo = %s).", + isocketId, + errno, + strerror(errno)); + return STATUS_ERROR; + } + + return STATUS_OK; +} + +/* + * StreamConnection -- create a new connection with client using + * server port. Set port->sock to the FD of the new connection. + * + * ASSUME: that this doesn't need to be non-blocking because + * the Postmaster uses select() to tell when the server master + * socket is ready for accept(). + * + * RETURNS: STATUS_OK or STATUS_ERROR + */ +int StreamConnection(int server_fd, Port* port) +{ + /* accept connection and fill in the client (remote) address */ + port->raddr.salen = sizeof(port->raddr.addr); + if ((port->sock = accept(server_fd, (struct sockaddr*)&port->raddr.addr, (socklen_t*)&port->raddr.salen)) < 0) { + write_runlog(LOG, "could not accept new connection: \n"); + + /* + * If accept() fails then postmaster.c will still see the server + * socket as read-ready, and will immediately try again. To avoid + * uselessly sucking lots of CPU, delay a bit before trying again. + * (The most likely reason for failure is being out of kernel file + * table slots; we can do little except hope some will get freed up.) + */ + return STATUS_ERROR; + } + +#ifdef SCO_ACCEPT_BUG + + /* + * UnixWare 7+ and OpenServer 5.0.4 are known to have this bug, but it + * shouldn't hurt to catch it for all versions of those platforms. + */ + if (port->raddr.addr.ss_family == 0) { + port->raddr.addr.ss_family = AF_UNIX; + } +#endif + + /* fill in the server (local) address */ + port->laddr.salen = sizeof(port->laddr.addr); + if (getsockname(port->sock, (struct sockaddr*)&port->laddr.addr, (socklen_t*)&port->laddr.salen) < 0) { + write_runlog(LOG, "getsockname() failed !\n"); + return STATUS_ERROR; + } + + /* select NODELAY and KEEPALIVE options if it's a TCP connection */ + if (!IS_AF_UNIX(port->laddr.addr.ss_family)) { + int on; + +#ifdef TCP_NODELAY + on = 1; + if (setsockopt(port->sock, IPPROTO_TCP, TCP_NODELAY, (char*)&on, sizeof(on)) < 0) { + write_runlog(LOG, "setsockopt(TCP_NODELAY) failed\n"); + return STATUS_ERROR; + } +#endif + on = 1; + if (setsockopt(port->sock, SOL_SOCKET, SO_KEEPALIVE, (char*)&on, sizeof(on)) < 0) { + write_runlog(LOG, "setsockopt(SO_KEEPALIVE) failed\n"); + return STATUS_ERROR; + } + + on = SetSocketNoBlock(port->sock); + if (STATUS_OK != on) { + write_runlog(LOG, "SetSocketNoBlock failed\n"); + return STATUS_ERROR; + } + /* + * Also apply the current keepalive parameters. If we fail to set a + * parameter, don't error out, because these aren't universally + * supported. (Note: you might think we need to reset the GUC + * variables to 0 in such a case, but it's not necessary because the + * show hooks for these variables report the truth anyway.) + */ + (void)pq_setkeepalivesidle(tcp_keepalives_idle, port); + (void)pq_setkeepalivesinterval(tcp_keepalives_interval, port); + (void)pq_setkeepalivescount(tcp_keepalives_count, port); + } + + return STATUS_OK; +} + +/* + * StreamClose -- close a client/backend connection + * + * NOTE: this is NOT used to terminate a session; it is just used to release + * the file descriptor in a process that should no longer have the socket + * open. (For example, the postmaster calls this after passing ownership + * of the connection to a child process.) It is expected that someone else + * still has the socket open. So, we only want to close the descriptor, + * we do NOT want to send anything to the far end. + */ +void StreamClose(int sock) +{ + close(sock); +} + +/* -------------------------------- + * Low-level I/O routines begin here. + * + * These routines communicate with a frontend client across a connection + * already established by the preceding routines. + * -------------------------------- + */ + +/* -------------------------------- + * pq_recvbuf - load some bytes into the input buffer + * + * returns 0 if OK, EOF if trouble + * -------------------------------- + */ +static int pq_recvbuf(Port* myport) +{ + errno_t rc; + if (myport->PqRecvPointer > 0) { + if (myport->PqRecvLength > myport->PqRecvPointer) { + /* still some unread data, left-justify it in the buffer */ + rc = memmove_s(myport->PqRecvBuffer, + myport->PqRecvLength, + myport->PqRecvBuffer + myport->PqRecvPointer, + myport->PqRecvLength - myport->PqRecvPointer); + securec_check_errno(rc, ); + myport->PqRecvLength -= myport->PqRecvPointer; + myport->PqRecvPointer = 0; + } else { + myport->PqRecvLength = myport->PqRecvPointer = 0; + } + } + + /* Can fill buffer from myport->PqRecvLength and upwards */ + for (;;) { + int r; + + r = recv(myport->sock, + myport->PqRecvBuffer + myport->PqRecvLength, + PQ_BUFFER_SIZE - myport->PqRecvLength, + MSG_DONTWAIT); + myport->last_call = CM_LastCall_RECV; + + if (r < 0) { + myport->last_errno = errno; + if (errno == EINTR) + continue; /* Ok if interrupted */ + + if (EPIPE == errno) { + return TCP_SOCKET_ERROR_EPIPE; + } + + /* + * The socket's file descriptor is marked O_NONBLOCK and no data is waiting to be received; or MSG_OOB is + * set and no out-of-band data is available and either the socket's file descriptor is marked O_NONBLOCK or + * the socket does not support blocking to await out-of-band data. + */ + if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) { + return TCP_SOCKET_ERROR_NO_MESSAGE; + } + + /* + * Careful: an ereport() that tries to write to the client would + * cause recursion to here, leading to stack overflow and core + * dump! This message must go *only* to the postmaster log. + */ + write_runlog(ERROR, "could not receive data from client: err=%d\n", errno); + return TCP_SOCKET_ERROR_EPIPE; + } else { + myport->last_errno = 0; + } + if (r == 0) { + /* + * EOF detected. We used to write a log message here, but it's + * better to expect the ultimate caller to do that. + */ + return EOF; + } + /* r contains number of bytes read, so just incr length */ + myport->PqRecvLength += r; + return 0; + } +} + +/* -------------------------------- + * pq_getbyte - get a single byte from connection, or return EOF + * -------------------------------- + */ +int pq_getbyte(Port* myport) +{ + int ret; + while (myport->PqRecvPointer >= myport->PqRecvLength) { + ret = pq_recvbuf(myport); + if (0 == ret) { + continue; + } else { + return ret; /* Failed to recv data */ + } + } + return (unsigned char)myport->PqRecvBuffer[myport->PqRecvPointer++]; +} + +/* -------------------------------- + * pq_getbytes - get a known number of bytes from connection + * + * returns 0 if OK, EOF if trouble + * -------------------------------- + */ +int pq_getbytes(Port* myport, char* s, size_t len, size_t* recvlen) +{ + size_t amount; + *recvlen = 0; + int ret; + errno_t rc; + + while (len > 0) { + while (myport->PqRecvPointer >= myport->PqRecvLength) { + ret = pq_recvbuf(myport); + if (0 != ret && TCP_SOCKET_ERROR_NO_MESSAGE != ret) { + return ret; /* Failed to recv data */ + } + } + amount = myport->PqRecvLength - myport->PqRecvPointer; + if (amount > len) { + amount = len; + } + rc = memcpy_s(s, len, myport->PqRecvBuffer + myport->PqRecvPointer, amount); + securec_check_errno(rc, ); + myport->PqRecvPointer += amount; + s += amount; + len -= amount; + *recvlen += amount; + } + return 0; +} + +#ifdef NOT_USED +/* -------------------------------- + * pq_discardbytes - throw away a known number of bytes + * + * same as pq_getbytes except we do not copy the data to anyplace. + * this is used for resynchronizing after read errors. + * + * returns 0 if OK, EOF if trouble + * -------------------------------- + */ +static int pq_discardbytes(Port* myport, size_t len) +{ + size_t amount; + int ret; + + while (len > 0) { + while (myport->PqRecvPointer >= myport->PqRecvLength) { + ret = pq_recvbuf(myport); + if (0 == ret) { + /* If nothing in buffer, then recv some */ + continue; + } else { + return ret; /* Failed to recv data */ + } + } + amount = myport->PqRecvLength - myport->PqRecvPointer; + if (amount > len) { + amount = len; + } + myport->PqRecvPointer += amount; + len -= amount; + } + return 0; +} +#endif /* NOT_USED */ + +/* -------------------------------- + * pq_getmessage - get a message with length word from connection + * + * The return value is placed in an expansible StringInfo, which has + * already been initialized by the caller. + * Only the message body is placed in the StringInfo; the length word + * is removed. Also, s->cursor is initialized to zero for convenience + * in scanning the message contents. + * + * If maxlen is not zero, it is an upper limit on the length of the + * message we are willing to accept. We abort the connection (by + * returning EOF) if client tries to send more than that. + * + * returns 0 if OK, EOF if trouble + * -------------------------------- + */ +int pq_getmessage(Port* myport, CM_StringInfo s, int maxlen) +{ + int32 len = 0; + size_t recvlen = 0; + int ret = 0; + + /* Read message length word */ + if (0 == s->msglen) { + ret = pq_getbytes(myport, (char*)&len, (size_t)4, &recvlen); + if (ret != 0) { + if (EOF == ret) { + write_runlog(ERROR, "unexpected EOF within message length word\n"); + } + return ret; + } + + len = ntohl(len); + + if (len < 4 || (maxlen > 0 && len > maxlen)) { + write_runlog(ERROR, "invalid message length"); + return EOF; + } + + len -= 4; /* discount length itself */ + s->msglen = len; + if (len > 0) { + /* + * Allocate space for message. If we run out of room (ridiculously + * large message), we will elog(ERROR), but we want to discard the + * message body so as not to lose communication sync. + */ + if (0 != CM_enlargeStringInfo(s, len)) { + return EOF; + } + } + } + + if (s->msglen > s->len) { + + /* And grab the message */ + ret = pq_getbytes(myport, s->data + s->len, s->msglen - s->len, &(recvlen)); + if (ret != 0) { + write_runlog(ERROR, "incomplete message from client, ret=%d\n", ret); + return ret; + } + s->len = s->len + recvlen; + /* Place a trailing null per StringInfo convention */ + s->data[s->len] = '\0'; + } + + return 0; +} + +static int internal_putbytes(Port* myport, const char* s, size_t len) +{ + size_t amount; + int ret; + errno_t rc; + + while (len > 0) { + /* If buffer is full, then flush it out */ + if (myport->PqSendPointer >= PQ_BUFFER_SIZE) { + ret = internal_flush(myport); + if (0 != ret) { + return ret; + } + } + amount = PQ_BUFFER_SIZE - myport->PqSendPointer; + if (amount > len) { + amount = len; + } + rc = memcpy_s(myport->PqSendBuffer + myport->PqSendPointer, PQ_BUFFER_SIZE - myport->PqSendPointer, s, amount); + securec_check_errno(rc, ); + myport->PqSendPointer += amount; + s += amount; + len -= amount; + } + return 0; +} + +/* -------------------------------- + * pq_flush- flush pending output + * + * returns 0 if OK, EOF if trouble + * -------------------------------- + */ +int pq_flush(Port* myport) +{ + int res; + + /* No-op if reentrant call */ + res = internal_flush(myport); + return res; +} + +static int internal_flush(Port* myport) +{ + static THR_LOCAL int last_reported_send_errno = 0; + + char* bufptr = myport->PqSendBuffer; + char* bufend = myport->PqSendBuffer + myport->PqSendPointer; + + while (bufptr < bufend) { + int r; + resend: + errno = 0; + r = send(myport->sock, bufptr, bufend - bufptr, MSG_DONTWAIT); + myport->last_call = CM_LastCall_SEND; + if (r <= 0) { + myport->last_errno = errno; + if (errno == EINTR) { + continue; /* Ok if we were interrupted */ + } + + if (EPIPE == errno) { + return TCP_SOCKET_ERROR_EPIPE; + } + + if (errno == EAGAIN || errno == EWOULDBLOCK) { + goto resend; + } + + /* + * Careful: an ereport() that tries to write to the client would + * cause recursion to here, leading to stack overflow and core + * dump! This message must go *only* to the postmaster log. + * + * If a client disconnects while we're in the midst of output, we + * might write quite a bit of data before we get to a safe query + * abort point. So, suppress duplicate log messages. + */ + if (errno != last_reported_send_errno) { + last_reported_send_errno = errno; + write_runlog(ERROR, "could not send data to client: \n"); + } + + /* + * We drop the buffered data anyway so that processing can + * continue, even though we'll probably quit soon. + */ + myport->PqSendPointer = 0; + return EOF; + } else { + myport->last_errno = 0; + } + + last_reported_send_errno = 0; /* reset after any successful send */ + bufptr += r; + myport->PqSendPointer = myport->PqSendPointer + r; + } + + myport->PqSendPointer = 0; + return 0; +} + +/* -------------------------------- + * Message-level I/O routines begin here. + * + * These routines understand about the old-style COPY OUT protocol. + * -------------------------------- + */ + +/* -------------------------------- + * pq_putmessage - send a normal message (suppressed in COPY OUT mode) + * + * If msgtype is not '\0', it is a message type code to place before + * the message body. If msgtype is '\0', then the message has no type + * code (this is only valid in pre-3.0 protocols). + * + * len is the length of the message body data at *s. In protocol 3.0 + * and later, a message length word (equal to len+4 because it counts + * itself too) is inserted by this routine. + * + * All normal messages are suppressed while old-style COPY OUT is in + * progress. (In practice only a few notice messages might get emitted + * then; dropping them is annoying, but at least they will still appear + * in the postmaster log.) + * + * We also suppress messages generated while pqcomm.c is busy. This + * avoids any possibility of messages being inserted within other + * messages. The only known trouble case arises if SIGQUIT occurs + * during a pqcomm.c routine --- quickdie() will try to send a warning + * message, and the most reasonable approach seems to be to drop it. + * + * returns 0 if OK, EOF if trouble + * -------------------------------- + */ +int pq_putmessage(Port* myport, char msgtype, const char* s, size_t len) +{ + uint32 n32; + int ret; + if (msgtype) { + ret = internal_putbytes(myport, &msgtype, 1); + if (ret != 0) { + return ret; + } + } + + n32 = htonl((uint32)(len + 4)); + ret = internal_putbytes(myport, (char*)&n32, 4); + if (ret != 0) { + return ret; + } + + ret = internal_putbytes(myport, s, len); + if (ret != 0) { + return ret; + } + + return 0; +} + +/* + * Support for TCP Keepalive parameters + */ + +int pq_getkeepalivesidle(Port* port) +{ +#ifdef TCP_KEEPIDLE + if (port == NULL || IS_AF_UNIX(port->laddr.addr.ss_family)) { + return 0; + } + + if (port->keepalives_idle != 0) { + return port->keepalives_idle; + } + + if (port->default_keepalives_idle == 0) { + ACCEPT_TYPE_ARG3 size = sizeof(port->default_keepalives_idle); + + if (getsockopt(port->sock, IPPROTO_TCP, TCP_KEEPIDLE, (char*)&port->default_keepalives_idle, &size) < 0) { + write_runlog(LOG, "getsockopt(TCP_KEEPIDLE) failed:\n"); + port->default_keepalives_idle = -1; /* don't know */ + } + } + + return port->default_keepalives_idle; +#else + return 0; +#endif +} + +int pq_setkeepalivesidle(int idle, Port* port) +{ + if (port == NULL || IS_AF_UNIX(port->laddr.addr.ss_family)) { + return STATUS_OK; + } + +#ifdef TCP_KEEPIDLE + if (idle == port->keepalives_idle) { + return STATUS_OK; + } + + if (port->default_keepalives_idle <= 0) { + if (pq_getkeepalivesidle(port) < 0) { + if (idle == 0) { + return STATUS_OK; /* default is set but unknown */ + } else { + return STATUS_ERROR; + } + } + } + + if (idle == 0) { + idle = port->default_keepalives_idle; + } + + if (setsockopt(port->sock, IPPROTO_TCP, TCP_KEEPIDLE, (char*)&idle, sizeof(idle)) < 0) { + write_runlog(LOG, "setsockopt(TCP_KEEPIDLE) failed:\n"); + return STATUS_ERROR; + } + + port->keepalives_idle = idle; +#else + if (idle != 0) { + write_runlog(LOG, "setsockopt(TCP_KEEPIDLE) not supported"); + return STATUS_ERROR; + } +#endif + + return STATUS_OK; +} + +int pq_getkeepalivesinterval(Port* port) +{ +#ifdef TCP_KEEPINTVL + if (port == NULL || IS_AF_UNIX(port->laddr.addr.ss_family)) { + return 0; + } + + if (port->keepalives_interval != 0) { + return port->keepalives_interval; + } + + if (port->default_keepalives_interval == 0) { + ACCEPT_TYPE_ARG3 size = sizeof(port->default_keepalives_interval); + + if (getsockopt(port->sock, IPPROTO_TCP, TCP_KEEPINTVL, (char*)&port->default_keepalives_interval, &size) < 0) { + write_runlog(LOG, "getsockopt(TCP_KEEPINTVL) failed:\n"); + port->default_keepalives_interval = -1; /* don't know */ + } + } + + return port->default_keepalives_interval; +#else + return 0; +#endif +} + +int pq_setkeepalivesinterval(int interval, Port* port) +{ + if (port == NULL || IS_AF_UNIX(port->laddr.addr.ss_family)) { + return STATUS_OK; + } + +#ifdef TCP_KEEPINTVL + if (interval == port->keepalives_interval) { + return STATUS_OK; + } + + if (port->default_keepalives_interval <= 0) { + if (pq_getkeepalivesinterval(port) < 0) { + if (interval == 0) { + return STATUS_OK; /* default is set but unknown */ + } else { + return STATUS_ERROR; + } + } + } + + if (interval == 0) { + interval = port->default_keepalives_interval; + } + + if (setsockopt(port->sock, IPPROTO_TCP, TCP_KEEPINTVL, (char*)&interval, sizeof(interval)) < 0) { + write_runlog(LOG, "setsockopt(TCP_KEEPINTVL) failed: \n"); + return STATUS_ERROR; + } + + port->keepalives_interval = interval; +#else + if (interval != 0) { + write_runlog(LOG, "setsockopt(TCP_KEEPINTVL) not supported\n"); + return STATUS_ERROR; + } +#endif + + return STATUS_OK; +} + +int pq_getkeepalivescount(Port* port) +{ +#ifdef TCP_KEEPCNT + if (port == NULL || IS_AF_UNIX(port->laddr.addr.ss_family)) { + return 0; + } + + if (port->keepalives_count != 0) { + return port->keepalives_count; + } + + if (port->default_keepalives_count == 0) { + ACCEPT_TYPE_ARG3 size = sizeof(port->default_keepalives_count); + + if (getsockopt(port->sock, IPPROTO_TCP, TCP_KEEPCNT, (char*)&port->default_keepalives_count, &size) < 0) { + write_runlog(LOG, "getsockopt(TCP_KEEPCNT) failed: \n"); + port->default_keepalives_count = -1; /* don't know */ + } + } + + return port->default_keepalives_count; +#else + return 0; +#endif +} + +int pq_setkeepalivescount(int count, Port* port) +{ + if (port == NULL || IS_AF_UNIX(port->laddr.addr.ss_family)) { + return STATUS_OK; + } + +#ifdef TCP_KEEPCNT + if (count == port->keepalives_count) { + return STATUS_OK; + } + + if (port->default_keepalives_count <= 0) { + if (pq_getkeepalivescount(port) < 0) { + if (count == 0) { + return STATUS_OK; /* default is set but unknown */ + } else { + return STATUS_ERROR; + } + } + } + + if (count == 0) { + count = port->default_keepalives_count; + } + + if (port->sock != NO_SOCKET) { + if (setsockopt(port->sock, IPPROTO_TCP, TCP_KEEPCNT, (char*)&count, sizeof(count)) < 0) { + write_runlog(LOG, "setsockopt(TCP_KEEPCNT) failed: \n"); + return STATUS_ERROR; + } + + port->keepalives_count = count; + } + +#else + if (count != 0) { + write_runlog(LOG, "setsockopt(TCP_KEEPCNT) not supported\n"); + return STATUS_ERROR; + } +#endif + + return STATUS_OK; +} diff --git a/src/lib/cm_communication/cm_libpq/pqformat.cpp b/src/lib/cm_communication/cm_libpq/pqformat.cpp new file mode 100644 index 000000000..265bb3e45 --- /dev/null +++ b/src/lib/cm_communication/cm_libpq/pqformat.cpp @@ -0,0 +1,154 @@ +/* ------------------------------------------------------------------------- + * + * pqformat.c + * Routines for formatting and parsing frontend/backend messages + * + * Outgoing messages are built up in a StringInfo buffer (which is expansible) + * and then sent in a single call to pq_putmessage. This module provides data + * formatting/conversion routines that are needed to produce valid messages. + * Note in particular the distinction between "raw data" and "text"; raw data + * is message protocol characters and binary values that are not subject to + * character set conversion, while text is converted by character encoding + * rules. + * + * Incoming messages are similarly read into a StringInfo buffer, via + * pq_getmessage, and then parsed and converted from that using the routines + * in this module. + * + * These same routines support reading and writing of external binary formats + * (typsend/typreceive routines). The conversion routines for individual + * data types are exactly the same, only initialization and completion + * are different. + * + * + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group + * + * $PostgreSQL: pgsql/src/backend/libpq/pqformat.c,v 1.48 2009/01/01 17:23:42 momjian Exp $ + * + * ------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * Message assembly and output: + * pq_beginmessage - initialize StringInfo buffer + * pq_sendbyte - append a raw byte to a StringInfo buffer + * pq_sendint - append a binary integer to a StringInfo buffer + * pq_sendint64 - append a binary 8-byte int to a StringInfo buffer + * pq_sendfloat4 - append a float4 to a StringInfo buffer + * pq_sendfloat8 - append a float8 to a StringInfo buffer + * pq_sendbytes - append raw data to a StringInfo buffer + * pq_sendcountedtext - append a counted text string (with character set conversion) + * pq_sendtext - append a text string (with conversion) + * pq_sendstring - append a null-terminated text string (with conversion) + * pq_send_ascii_string - append a null-terminated text string (without conversion) + * pq_endmessage - send the completed message to the frontend + * Note: it is also possible to append data to the StringInfo buffer using + * the regular StringInfo routines, but this is discouraged since required + * character set conversion may not occur. + * + * typsend support (construct a bytea value containing external binary data): + * pq_begintypsend - initialize StringInfo buffer + * pq_endtypsend - return the completed string as a "bytea*" + * + * Special-case message output: + * pq_puttextmessage - generate a character set-converted message in one step + * pq_putemptymessage - convenience routine for message with empty body + * + * Message parsing after input: + * pq_getmsgbyte - get a raw byte from a message buffer + * pq_getmsgint - get a binary integer from a message buffer + * pq_getmsgint64 - get a binary 8-byte int from a message buffer + * pq_getmsgfloat4 - get a float4 from a message buffer + * pq_getmsgfloat8 - get a float8 from a message buffer + * pq_getmsgbytes - get raw data from a message buffer + * pq_copymsgbytes - copy raw data from a message buffer + * pq_getmsgtext - get a counted text string (with conversion) + * pq_getmsgstring - get a null-terminated text string (with conversion) + * pq_getmsgend - verify message fully consumed + * pq_getmsgunreadlen - get length of the unread data in the message buffer + */ + +/* -------------------------------- + * pq_getmsgint - get a binary integer from a message buffer + * + * Values are treated as unsigned. + * -------------------------------- + */ +#include +#include +#include +#include "cm/libpq.h" +#include "cm/pqformat.h" +#include "cm/elog.h" + +const char* pq_get_msg_type(CM_StringInfo msg, int datalen) +{ + const char* result = NULL; + + if (datalen < 0 || datalen > (msg->len - msg->cursor)) { + write_runlog(ERROR, + "pq_get_msg_type: insufficient data left in message, datalen=%d, msg->len=%d, msg->cursor=%d.\n", + datalen, + msg->len, + msg->cursor); + return NULL; + } + + result = &msg->data[msg->cursor]; + return result; +} + +/* -------------------------------- + * pq_getmsgbytes - get raw data from a message buffer + * + * Returns a pointer directly into the message buffer; note this + * may not have any particular alignment. + * -------------------------------- + */ +const char* pq_getmsgbytes(CM_StringInfo msg, int datalen) +{ + const char* result = NULL; + errno_t rc = 0; + int printMsgLen = 101; + char dataLog[printMsgLen] = {0}; + if (datalen < 0 || datalen > (msg->len - msg->cursor)) { + write_runlog(ERROR, + "pq_getmsgbytes: insufficient data left in message, " + "datalen=%d, msg->len=%d, msg->maxlen=%d, msg->cursor=%d," + " msg->qtype=%d, msg->msglen=%d.\n", + datalen, + msg->len, + msg->maxlen, + msg->cursor, + msg->qtype, + msg->msglen); + if (msg->len < printMsgLen) { + rc = memcpy_s(dataLog, printMsgLen, msg->data, msg->len); + securec_check_errno(rc, ); + write_runlog(ERROR, "pq_getmsgbytes: msg->data=%s.\n", dataLog); + } + return NULL; + } + + result = &msg->data[msg->cursor]; + msg->cursor += datalen; + return result; +} + +const char* pq_getmsgbytes(CM_Result* msg, int datalen) +{ + const char* result = NULL; + if (datalen < 0 || datalen > msg->gr_msglen) { + write_runlog(ERROR, + "pq_getmsgbytes: insufficient data left in message, " + "datalen=%d, res->gr_msglen=%d.\n", + datalen, + msg->gr_msglen); + return NULL; + } + + result = (char*)&(msg->gr_resdata); + return result; +} diff --git a/src/lib/cm_communication/cm_libpq/pqsignal.cpp b/src/lib/cm_communication/cm_libpq/pqsignal.cpp new file mode 100644 index 000000000..243cf9b24 --- /dev/null +++ b/src/lib/cm_communication/cm_libpq/pqsignal.cpp @@ -0,0 +1,162 @@ +/* ------------------------------------------------------------------------- + * + * pqsignal.c + * reliable BSD-style signal(2) routine stolen from RWW who stole it + * from Stevens... + * + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group + * + * + * IDENTIFICATION + * $PostgreSQL: pgsql/src/backend/libpq/pqsignal.c,v 1.44 2008/01/01 19:45:49 momjian Exp $ + * + * NOTES + * This shouldn't be in libpq, but the monitor and some other + * things need it... + * + * A NOTE ABOUT SIGNAL HANDLING ACROSS THE VARIOUS PLATFORMS. + * + * pg_config.h defines the macro HAVE_POSIX_SIGNALS for some platforms and + * not for others. This file and pqsignal.h use that macro to decide + * how to handle signalling. + * + * signal(2) handling - this is here because it affects some of + * the frontend commands as well as the backend server. + * + * Ultrix and SunOS provide BSD signal(2) semantics by default. + * + * SVID2 and POSIX signal(2) semantics differ from BSD signal(2) + * semantics. We can use the POSIX sigaction(2) on systems that + * allow us to request restartable signals (SA_RESTART). + * + * Some systems don't allow restartable signals at all unless we + * link to a special BSD library. + * + * We devoutly hope that there aren't any systems that provide + * neither POSIX signals nor BSD signals. The alternative + * is to do signal-handler reinstallation, which doesn't work well + * at all. + * ------------------------------------------------------------------------ */ +#include +#include "cm/cm_c.h" +#include "cm/pqsignal.h" + +sigset_t unblock_sig, block_sig; + +/* + * Initialize BlockSig, UnBlockSig, and AuthBlockSig. + * + * BlockSig is the set of signals to block when we are trying to block + * signals. This includes all signals we normally expect to get, but NOT + * signals that should never be turned off. + * + * AuthBlockSig is the set of signals to block during authentication; + * it's essentially BlockSig minus SIGTERM, SIGQUIT, SIGALRM. + * + * UnBlockSig is the set of signals to block when we don't want to block + * signals (is this ever nonzero??) + */ +void init_signal_mask(void) +{ +#ifdef HAVE_SIGPROCMASK + + sigemptyset(&unblock_sig); + /* First set all signals, then clear some. */ + sigfillset(&block_sig); + + /* + * Unmark those signals that should never be blocked. Some of these signal + * names don't exist on all platforms. Most do, but might as well ifdef + * them all for consistency... + */ +#ifdef SIGTRAP + (void)sigdelset(&block_sig, SIGTRAP); +#endif +#ifdef SIGABRT + (void)sigdelset(&block_sig, SIGABRT); +#endif +#ifdef SIGILL + (void)sigdelset(&block_sig, SIGILL); +#endif +#ifdef SIGFPE + (void)sigdelset(&block_sig, SIGFPE); +#endif +#ifdef SIGSEGV + (void)sigdelset(&block_sig, SIGSEGV); +#endif +#ifdef SIGBUS + (void)sigdelset(&block_sig, SIGBUS); +#endif +#ifdef SIGSYS + (void)sigdelset(&block_sig, SIGSYS); +#endif +#ifdef SIGCONT + (void)sigdelset(&block_sig, SIGCONT); +#endif +#ifdef SIGQUIT + (void)sigdelset(&block_sig, SIGQUIT); +#endif +#ifdef SIGTERM + (void)sigdelset(&block_sig, SIGTERM); +#endif +#ifdef SIGALRM + (void)sigdelset(&block_sig, SIGALRM); +#endif +#ifdef SIGCHLD + (void)sigdelset(&block_sig, SIGCHLD); +#endif +#ifdef SIGINT + (void)sigdelset(&block_sig, SIGINT); +#endif +#ifdef SIGUSR1 + (void)sigdelset(&block_sig, SIGUSR1); +#endif +#ifdef SIGUSR2 + (void)sigdelset(&block_sig, SIGUSR2); +#endif +#ifdef SIGHUP + (void)sigdelset(&block_sig, SIGHUP); +#endif + +#else + /* Set the signals we want. */ + block_sig = sigmask(SIGQUIT) | sigmask(SIGTERM) | sigmask(SIGALRM) | + /* common signals between two */ + sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGUSR1) | sigmask(SIGUSR2) | sigmask(SIGWINCH) | + sigmask(SIGFPE); +#endif +} + +/* Win32 signal handling is in backend/port/win32/signal.c */ +#ifndef WIN32 +sigfunc setup_signal_handle(int signo, sigfunc func) +{ +#if !defined(HAVE_POSIX_SIGNALS) + return signal(signo, func); +#else + struct sigaction act, oact; + + act.sa_handler = func; + sigemptyset(&act.sa_mask); + act.sa_flags = SA_ONSTACK; + + if (signo != SIGALRM) + act.sa_flags |= SA_RESTART; + +#ifdef SA_NOCLDSTOP + + if (signo == SIGCHLD) + act.sa_flags |= SA_NOCLDSTOP; + +#endif + + if (sigaction(signo, &act, &oact) < 0) + return SIG_ERR; + + return oact.sa_handler; +#endif /* !HAVE_POSIX_SIGNALS */ +} + +#endif /* WIN32 */ diff --git a/src/lib/cm_communication/cm_libpq/strlcpy.cpp b/src/lib/cm_communication/cm_libpq/strlcpy.cpp new file mode 100644 index 000000000..8cc4321c0 --- /dev/null +++ b/src/lib/cm_communication/cm_libpq/strlcpy.cpp @@ -0,0 +1,69 @@ +/* ------------------------------------------------------------------------- + * + * strlcpy.c + * strncpy done right + * + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group + * Portions Copyright (c) 2010-2012 Postgres-XC Development Group + * + * + * IDENTIFICATION + * $PostgreSQL: pgsql/src/port/strlcpy.c,v 1.5 2008/01/01 19:46:00 momjian Exp $ + * + * This file was taken from OpenBSD and is used on platforms that don't + * provide strlcpy(). The OpenBSD copyright terms follow. + * ------------------------------------------------------------------------- + */ + +/* $OpenBSD: strlcpy.c,v 1.11 2006/05/05 15:27:38 millert Exp $ */ + +/* + * Copyright (c) 1998 Todd C. Miller + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "cm/cm_c.h" +/* + * Copy src to string dst of size siz. At most siz-1 characters + * will be copied. Always NUL terminates (unless siz == 0). + * Returns strlen(src); if retval >= siz, truncation occurred. + * Function creation history: http://www.gratisoft.us/todd/papers/strlcpy.html + */ +size_t strlcpy(char* dst, const char* src, size_t siz) +{ + char* d = dst; + const char* s = src; + size_t n = siz; + + /* Copy as many bytes as will fit */ + if (n != 0) { + while (--n != 0) { + if ((*d++ = *s++) == '\0') { + break; + } + } + } + + /* Not enough room in dst, add NUL and traverse rest of src */ + if (n == 0) { + if (siz != 0) + *d = '\0'; /* NUL-terminate dst */ + while (*s++) { + continue; + } + + } + + return (s - src - 1); /* count does not include NUL */ +} diff --git a/src/lib/config/Makefile b/src/lib/config/Makefile index 0bc1a470d..67c4c4132 100644 --- a/src/lib/config/Makefile +++ b/src/lib/config/Makefile @@ -53,7 +53,7 @@ libconfig.a: $(OBJS) $(AR) $(AROPT) $@ $^ utconfig: - $(CC) -fPIC -shared $(CFLAGS) $(CPPFLAGS) cm_config.cpp -L$(SECURE_LIB_PATH) -lsecurec -o libutconfig.so + $(CC) -fPIC -shared $(CFLAGS) $(CPPFLAGS) cm_config.cpp -L$(SECURE_LIB_PATH) -l$(SECURE_C_CHECK) -o libutconfig.so mv libutconfig.so $(top_builddir)/../distribute/test/ut/lib/ install: all installdirs diff --git a/src/lib/elog/CMakeLists.txt b/src/lib/elog/CMakeLists.txt index f5d342029..b5bdd0f54 100755 --- a/src/lib/elog/CMakeLists.txt +++ b/src/lib/elog/CMakeLists.txt @@ -6,6 +6,6 @@ set(TGT_elog_INC ) set(elog_DEF_OPTIONS ${MACRO_OPTIONS}) -set(elog_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) +set(elog_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${LIB_SECURE_OPTIONS} ${CHECK_OPTIONS}) add_static_libtarget(elog TGT_elog_SRC TGT_elog_INC "${elog_DEF_OPTIONS}" "${elog_COMPILE_OPTIONS}") diff --git a/src/lib/gstrace/CMakeLists.txt b/src/lib/gstrace/CMakeLists.txt index fe2e166b6..4551e767c 100755 --- a/src/lib/gstrace/CMakeLists.txt +++ b/src/lib/gstrace/CMakeLists.txt @@ -16,7 +16,10 @@ set(TGT_trace_INC set(trace_DEF_OPTIONS ${MACRO_OPTIONS}) set(trace_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(trace_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(trace_LINK_LIBS libgstrace.a -lcgroup -lgssapi_krb5_gauss -lkrb5_gauss -lk5crypto_gauss -lcom_err_gauss -lkrb5support_gauss libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -pthread -lrt -lz -lminiunz) +set(trace_LINK_LIBS libgstrace.a -lcgroup libpgport.a -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -pthread -lrt -lz -lminiunz) +if(NOT "${ENABLE_LITE_MODE}" STREQUAL "ON") + list(APPEND trace_LINK_LIBS -lgssapi_krb5_gauss -lkrb5_gauss -lk5crypto_gauss -lcom_err_gauss -lkrb5support_gauss) +endif() add_bintarget(gstrace TGT_trace_SRC TGT_trace_INC "${trace_DEF_OPTIONS}" "${trace_COMPILE_OPTIONS}" "${trace_LINK_OPTIONS}" "${trace_LINK_LIBS}") add_dependencies(gstrace gstrace_static pgport_static) target_link_directories(gstrace PUBLIC diff --git a/src/lib/gstrace/common/Makefile b/src/lib/gstrace/common/Makefile index fcffca5bf..47bfb415f 100644 --- a/src/lib/gstrace/common/Makefile +++ b/src/lib/gstrace/common/Makefile @@ -36,7 +36,7 @@ ifneq "$(MAKECMDGOALS)" "clean" endif OBJS = $(top_builddir)/src/common/port/libpgport.a gstrace_infra.o -LIBS += -lsecurec +LIBS += -l$(SECURE_C_CHECK) override CXXFLAGS := -I$(top_builddir)/src/include $(CXXFLAGS) -DHAVE_UNSETENV all: libgstrace.a diff --git a/src/lib/gstrace/common/gstrace_infra.cpp b/src/lib/gstrace/common/gstrace_infra.cpp index 993ff8cb2..283e2f854 100644 --- a/src/lib/gstrace/common/gstrace_infra.cpp +++ b/src/lib/gstrace/common/gstrace_infra.cpp @@ -383,7 +383,7 @@ static bool checkProcess(int port) if (fp == NULL) { printf("popen failed. could not query database process.\n"); return procexist; - } + } while (fgets(buf, sizeof(buf), fp) != NULL) { procexist = true; break; diff --git a/src/lib/gstrace/tool/Makefile b/src/lib/gstrace/tool/Makefile index 6345fa361..73540c879 100644 --- a/src/lib/gstrace/tool/Makefile +++ b/src/lib/gstrace/tool/Makefile @@ -28,7 +28,10 @@ loginfo="hello test" $(info, $(LIBS)) ifneq ($(PORTNAME), win32) - override CFLAGS += $(PTHREAD_CFLAGS) -L$(LIBOPENSSL_LIB_PATH) -lssl -lcrypto -L$(LIBCGROUP_LIB_PATH) -lcgroup -lgssapi_krb5_gauss -lkrb5_gauss -lk5crypto_gauss -lcom_err_gauss -lkrb5support_gauss + override CFLAGS += $(PTHREAD_CFLAGS) -L$(LIBOPENSSL_LIB_PATH) -lssl -lcrypto -L$(LIBCGROUP_LIB_PATH) -lcgroup + ifeq ($(enable_lite_mode), no) + override CFLAGS += -lgssapi_krb5_gauss -lkrb5_gauss -lk5crypto_gauss -lcom_err_gauss -lkrb5support_gauss + endif override CPPFLAGS := -I$(LIBETCDAPI_INCLUDE_PATH) -I$(LIBCGROUP_INCLUDE_PATH) $(CPPFLAGS) endif diff --git a/src/makefiles/pgxs.mk b/src/makefiles/pgxs.mk index 2994043d9..5ef81f0db 100644 --- a/src/makefiles/pgxs.mk +++ b/src/makefiles/pgxs.mk @@ -55,11 +55,45 @@ $(error pgxs error: makefile variable PGXS or NO_PGXS must be set) endif endif - ifdef PGXS # We assume that we are in src/makefiles/, so top is ... top_builddir := $(dir $(PGXS))../.. + +ifneq (,$(wildcard $(top_builddir)/src/Makefile.global)) include $(top_builddir)/src/Makefile.global +else #cmake build no Makefile.global +PORTNAME= linux +enable_shared=yes +CC=g++ +GCC=yes +C=gcc +host_cpu=@HOST_CPU@ +MKDIR_P = /usr/bin/mkdir -p +LN_S = ln -s +DLSUFFIX = .so + +bindir := $(shell $(PG_CONFIG) --bindir) +sysconfdir := $(shell $(PG_CONFIG) --sysconfdir) +pkgincludedir := $(shell $(PG_CONFIG) --pkgincludedir) +datadir := $(shell $(PG_CONFIG) --sharedir) +pkglibdir := $(shell $(PG_CONFIG) --pkglibdir) +localedir := $(shell $(PG_CONFIG) --localedir) + +includedir_server = $(pkgincludedir)/server +includedir_internal = $(pkgincludedir)/internal + +# Installation. +INSTALL = $(SHELL) $(top_srcdir)/config/install-sh -c +INSTALL_SCRIPT_MODE = 755 +INSTALL_DATA_MODE = 644 +INSTALL_SCRIPT = $(INSTALL) -m $(INSTALL_SCRIPT_MODE) +INSTALL_DATA = $(INSTALL) -m $(INSTALL_DATA_MODE) +INSTALL_SHLIB = $(INSTALL_SHLIB_ENV) $(INSTALL) $(INSTALL_SHLIB_OPTS) $(INSTALL_STRIP_FLAG) +# Override in Makefile.port if necessary +INSTALL_SHLIB_OPTS = -m 755 + +override CPPFLAGS := -I$(includedir_server) -I$(includedir_internal) -I$(top_builddir)/src/lib/gstrace $(CPPFLAGS) +endif top_srcdir = $(top_builddir) srcdir = . @@ -74,8 +108,13 @@ endif ifeq ($(FLEX),) FLEX = flex endif -endif +else # not PGXS +override CPPFLAGS := -I$(top_srcdir)/src/include -I$(top_builddir)/src/lib/gstrace -D_GNU_SOURCE $(CPPFLAGS) +ifdef VPATH +override CPPFLAGS := -I$(top_builddir)/src/include -I$(top_builddir)/src/lib/gstrace $(CPPFLAGS) +endif +endif override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) diff --git a/src/test/CMakeLists.txt b/src/test/CMakeLists.txt index b2c12dc25..1ff925c7c 100755 --- a/src/test/CMakeLists.txt +++ b/src/test/CMakeLists.txt @@ -4,11 +4,21 @@ set(CMAKE_VERBOSE_MAKEFILE ON) set(CMAKE_RULE_MESSAGES OFF) set(CMAKE_SKIP_RPATH TRUE) -set(CMAKE_MODULE_PATH - ${CMAKE_CURRENT_SOURCE_DIR}/isolation -) +set(CMAKE_MODULE_PATH + ${CMAKE_CURRENT_SOURCE_DIR}/isolation + ) add_subdirectory(isolation) -if("${ENABLE_MULTIPLE_NODES}" STREQUAL "OFF") +if ("${ENABLE_MULTIPLE_NODES}" STREQUAL "OFF") add_subdirectory(regress) -endif() +endif () + +if ("${ENABLE_UT}" STREQUAL "ON" AND "${ENABLE_MULTIPLE_NODES}" STREQUAL "OFF") + add_subdirectory(ut) + install( + DIRECTORY ${CMAKE_BINARY_DIR}/ut_bin/ DESTINATION ut_bin + PATTERN "ut_bin/*" + PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ + ) +endif () + diff --git a/src/test/grayscale_upgrade/upgradeCheck.py b/src/test/grayscale_upgrade/upgradeCheck.py new file mode 100644 index 000000000..48b5354c5 --- /dev/null +++ b/src/test/grayscale_upgrade/upgradeCheck.py @@ -0,0 +1,493 @@ +#!/usr/bin/env python +# +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# --------------------------------------------------------------------------------------- +# +# IDENTIFICATION +# src/test/grayscale_upgrade/upgradeCheck.py +# +# --------------------------------------------------------------------------------------- + +import getopt, sys, os +import shutil +import time +import string +import commands +import getpass +import datetime +from multiprocessing.dummy import Pool as ThreadPool + +g_base_port = 25632 +g_gtm_port = 25686 +g_pooler_base_port = 25832 +g_file_node_info = "cndn.cnf" +bin_path = "" +g_valgrind = "" +fastcheck_conf_file = "src/distribute/test/regress/make_fastcheck_postgresql.conf" +startTime = "" +endTime = "" +(INIT_STATUS_AND_EXEC_PRE_SCRIPT, + EXEC_PRE_ROLLBACK_SCRIPT, + RESERVED_VALUE, + EXEC_POST_ROLLBACK_SCRIPT, + EXEC_POST_SCRIPT + ) = list(range(1, 6)) + +class Pterodb(): + def __init__(self, coordinator_num = 3, data_node_num = 12, data_dir = "src/distribute/test/regress/tmp_check/", base_port = 25632, part_number = 0, upgrade_from = 92, gsql_dir = "gsql"): + self.coordinator_num = coordinator_num + self.data_node_num = data_node_num + self.base_port = base_port + self.gtm_port = base_port + 2*coordinator_num + 4*data_node_num + self.dname_prefix = "datanode" + self.data_dir = data_dir + self.new_version_number = 0 + self.base_versioni_number = 0 + self.part_number = part_number + script_path = os.path.abspath(os.path.dirname(__file__)) + self.script_path = script_path + self.upgrade_from = float(upgrade_from)/1000.0 + self.gsql_dir = gsql_dir + self.run_type = 0 + global upgrade_catalog_maindb_path_check, upgrade_catalog_maindb_sql_check, \ + upgrade_catalog_otherdb_path_check, upgrade_catalog_otherdb_sql_check, \ + rollback_catalog_maindb_path_check, rollback_catalog_maindb_sql_check, \ + rollback_catalog_otherdb_path_check, rollback_catalog_otherdb_sql_check, \ + upgrade_post_catalog_maindb_sql_check, upgrade_post_catalog_otherdb_sql_check, \ + rollback_post_catalog_maindb_sql_check, rollback_post_catalog_otherdb_sql_check, \ + check_upgrade_path, check_upgrade_sql, \ + exec_sql_log, upgradeCheckLog, \ + maindb, otherdbs, private_dict, startTime, endTime, \ + INIT_STATUS_AND_EXEC_PRE_SCRIPT, EXEC_PRE_ROLLBACK_SCRIPT, RESERVED_VALUE, EXEC_POST_ROLLBACK_SCRIPT, EXEC_POST_SCRIPT + + # Upgrade Maindb Script Path + private_upgrade_catalog_maindb_path_check = script_path + "/../../../../privategauss/include/catalog/upgrade_sql/upgrade_catalog_maindb/" + upgrade_catalog_maindb_path_check = script_path + "/../../include/catalog/upgrade_sql/upgrade_catalog_maindb/" + + # Upgrade Maindb Exec Script Type + upgrade_catalog_maindb_sql_check = script_path + "/sql/upgrade_catalog_maindb_tmp.sql" + upgrade_post_catalog_maindb_sql_check = script_path + "/sql/upgrade-post_catalog_maindb_tmp.sql" + + # Upgrade Otherdb Script Path + private_upgrade_catalog_otherdb_path_check = script_path + "/../../../../privategauss/include/catalog/upgrade_sql/upgrade_catalog_otherdb/" + upgrade_catalog_otherdb_path_check = script_path + "/../../include/catalog/upgrade_sql/upgrade_catalog_otherdb/" + + # Upgrade Otherdb Exec Script Type + upgrade_catalog_otherdb_sql_check = script_path + "/sql/upgrade_catalog_otherdb_tmp.sql" + upgrade_post_catalog_otherdb_sql_check = script_path + "/sql/upgrade-post_catalog_otherdb_tmp.sql" + + # Rollback Maindb Script Path + private_rollback_catalog_maindb_path_check = script_path + "/../../../../privategauss/include/catalog/upgrade_sql/rollback_catalog_maindb/" + rollback_catalog_maindb_path_check = script_path + "/../../include/catalog/upgrade_sql/rollback_catalog_maindb/" + + # Rollback Maindb Exec Script Type + rollback_catalog_maindb_sql_check = script_path + "/sql/rollback_catalog_maindb_tmp.sql" + rollback_post_catalog_maindb_sql_check = script_path + "/sql/rollback-post_catalog_maindb_tmp.sql" + + # Rollback Otherdb Script Path + private_rollback_catalog_otherdb_path_check = script_path + "/../../../../privategauss/include/catalog/upgrade_sql/rollback_catalog_otherdb/" + rollback_catalog_otherdb_path_check = script_path + "/../../include/catalog/upgrade_sql/rollback_catalog_otherdb/" + + # Rollback Otherdb Exec Script Type + rollback_catalog_otherdb_sql_check = script_path + "/sql/rollback_catalog_otherdb_tmp.sql" + rollback_post_catalog_otherdb_sql_check = script_path + "/sql/rollback-post_catalog_otherdb_tmp.sql" + + # Check Script Path + check_upgrade_path = script_path + "/../../include/catalog/upgrade_sql/check_upgrade/" + + # Check Exec Script Type + check_upgrade_sql = script_path + "/sql/check_upgrade_tmp.sql" + exec_sql_log = script_path + "/sql/execSql.log" + upgradeCheckLog = script_path + "/upgradeCheck.log" + maindb = "postgres" + otherdbs = ['template0', 'template1', 'test_td'] + private_dict = {upgrade_catalog_maindb_path_check: private_upgrade_catalog_maindb_path_check, + upgrade_catalog_otherdb_path_check: private_upgrade_catalog_otherdb_path_check, + rollback_catalog_maindb_path_check: private_rollback_catalog_maindb_path_check, + rollback_catalog_otherdb_path_check: private_rollback_catalog_otherdb_path_check} + startTime, endTime = datetime.datetime.now(), datetime.datetime.now() + + def writeLogFile(self, msg): + """ + write log file durng upgrade check + """ + try: + fp = None + fp = open(upgradeCheckLog, 'a') + recordMsg = "[{0}]:{1}".format(datetime.datetime.now(), msg) + fp.write(recordMsg + "\n") + if(fp): + fp.close() + except Exception, e: + if(fp): + fp.close() + raise Exception(str(e)) + + def cleanup(self): + """ + clean up temporary check files that may exist last time + """ + cmd = "rm -rf %s/sql && rm -f %s && mkdir %s/sql" % (self.script_path, upgradeCheckLog, self.script_path) + self.writeLogFile("Cmd for cleanup is: %s" % cmd) + (status, output) = commands.getstatusoutput(cmd) + if(status != 0): + raise Exception("Failed to clean up temproray check files from last time.\nCmd:%s\nOutput:%s" % (cmd, output)) + + def prepareUpCheckSqlFile(self): + """ + prepare 5 files: upgrade_catalog_maindb_tmp.sql,upgrade_catalog_otherdb_tmp.sql,check_upgrade_tmp.sql + rollback_catalog_maindb_tmp.sql,rollback_catalog_otherdb_tmp.sql + """ + try: + self.preparedSqlFile("upgrade", "maindb", upgrade_catalog_maindb_path_check, upgrade_catalog_maindb_sql_check) + self.preparedSqlFile("upgrade-post", "maindb", upgrade_catalog_maindb_path_check, upgrade_post_catalog_maindb_sql_check) + self.preparedSqlFile("upgrade", "otherdb", upgrade_catalog_otherdb_path_check, upgrade_catalog_otherdb_sql_check) + self.preparedSqlFile("upgrade-post", "otherdb", upgrade_catalog_otherdb_path_check, upgrade_post_catalog_otherdb_sql_check) + self.preparedSqlFile("rollback", "maindb", rollback_catalog_maindb_path_check, rollback_catalog_maindb_sql_check) + self.preparedSqlFile("rollback-post", "maindb", rollback_catalog_maindb_path_check, rollback_post_catalog_maindb_sql_check) + self.preparedSqlFile("rollback", "otherdb", rollback_catalog_otherdb_path_check, rollback_catalog_otherdb_sql_check) + self.preparedSqlFile("rollback-post", "otherdb", rollback_catalog_otherdb_path_check, rollback_post_catalog_otherdb_sql_check) + self.preparedSqlFile("check", "", check_upgrade_path, check_upgrade_sql) + + self.writeLogFile("Successfully prepared check upgrade sql files.") + except Exception, e: + raise Exception("Prepare upgrade check sql file failed.ERROR: %s" % str(e)) + + def preparedSqlFile(self, scriptType, dbType, scriptDir, sqlAll): + #filter all upgrade sql files and merge into the result file + try: + if "check" in scriptType: + self.writeLogFile("Preparing check_upgrade_tmp.sql") + else: + self.writeLogFile("Preparing %s_catalog_%s_tmp.sql" % (scriptType, dbType)) + #filter all upgrade sql files + validFileList = self.spliceSqlFile(scriptDir, scriptType) + #sort the scripts + if "rollback" in scriptType: + validFileList.sort(reverse=True) + else: + validFileList.sort() + #merge into one result file + if "check" in scriptType: + self.writeLogFile("check_upgrade_valid_fileList: %s" % validFileList) + else: + self.writeLogFile("%s_catalog_%s_valid_fileList: %s" % (scriptType, dbType, validFileList)) + self.writeSqlFile(sqlAll, validFileList, scriptDir) + except Exception, e: + raise Exception("Prepare sql file failed.ERROR: %s" % str(e)) + + def getNewVersionNum(self): + """ + Obtain the version number from the globals.cpp file. + :return: + """ + flagStr = "const uint32 GRAND_VERSION_NUM" + globalsPath = os.path.abspath(os.path.dirname(__file__) + "/../../common/backend/utils/init/globals.cpp") + if not os.path.isfile(globalsPath): + errMsg = "The file {0} cannot be found".format(globalsPath) + raise Exception(errMsg) + allLines = [] + with open(globalsPath, 'r') as fp: + allLines = fp.readlines() + for line in allLines: + if flagStr in line: + result = line.split("=")[1].split(";")[0].strip(" ") + return float(result)/1000.0 + errMsg = "The '{0}' cannot be found in the '{1}' file. " \ + "Change the version number in '{0} = 92xxx;' format.".format(flagStr,globalsPath ) + raise Exception(errMsg) + + + def spliceSqlFile(self, fileDir, scriptType="_"): + try: + NewVersionNum = self.getNewVersionNum() + BaseVersionNum = self.upgrade_from + fileAllList = os.listdir(fileDir) + privateFileAllList = [] + keyElement = [] + if fileDir != check_upgrade_path: + privateFileAllList = os.listdir(private_dict[fileDir]) + commonScriptList = list(set(fileAllList) & set(privateFileAllList)) + commonScriptList = [script for script in commonScriptList if "407" not in script] + commonScriptList = [script for script in commonScriptList if "445" not in script] + commonScriptList = [script for script in commonScriptList if "467" not in script] + keyElement = fileDir.split('/')[-2].split('_') + if commonScriptList: + errMsg = "OpenGauss and privategauss contain scripts of the same version number. " \ + "Please change the version number, " \ + "the scripts {0} in openGauss and privateGauss cannot use the same version number.".format(commonScriptList) + self.writeLogFile(errMsg) + raise Exception("The script name does not meet the specifications. Error: {0}".format(errMsg)) + + allList = fileAllList + privateFileAllList + + for name in allList: + for key in keyElement: + if key not in name: + errMsg = "The script {0} name does not meet the specifications, it needs to contain {1}".format(name, key) + self.writeLogFile(errMsg) + raise Exception(errMsg) + + result = [] + if len(allList) != 0: + for each_sql_file in allList: + if not os.path.isfile("%s/%s" % (fileDir, each_sql_file)) and \ + not os.path.isfile("%s/%s" % (private_dict[fileDir], each_sql_file)): + errMsg = "can not file the file {0}".format(each_sql_file) + raise Exception(errMsg) + prefix = each_sql_file.split('.')[0] + resList = prefix.split('_') + if(len(resList) != 5) or scriptType not in resList: + continue + file_num = "%s.%s" % (resList[3], resList[4]) + if float(file_num) <= 92: + continue + if BaseVersionNum < float(file_num) <= NewVersionNum: + result.append(each_sql_file) + except Exception, e: + raise Exception("Splice sql file failed.ERROR: %s" % str(e)) + return result + + def writeSqlFile(self, fileName, fileList, fileDir): + file = open(fileName, 'w') + file.write("START TRANSACTION;") + file.write(os.linesep) + file.write("SET IsInplaceUpgrade = on;") + file.write(os.linesep) + self.writeLogFile("fileDir is {0}, The list of files being written is {1}".format(fileDir, fileList)) + for each_file in fileList: + if os.path.isfile("%s/%s" % (fileDir, each_file)): + each_file_with_path = "%s/%s" % (fileDir, each_file) + elif os.path.isfile("%s/%s" % (private_dict[fileDir], each_file)): + each_file_with_path = "%s/%s" % (private_dict[fileDir], each_file) + else: + errMsg = "can not file the file {0}".format(each_file) + raise Exception(errMsg) + self.writeLogFile("handling file: %s" % each_file_with_path) + for txt in open(each_file_with_path,'r'): + file.write(txt) + file.write(os.linesep) + file.write("COMMIT;") + file.write(os.linesep) + file.close() + self.writeLogFile("Complate file {0} with the list:{1}".format(fileName, fileList)) + + def checkSqlResult(self, Type = "upgrade"): + cmd = "grep ERROR " + exec_sql_log + (status, output) = commands.getstatusoutput(cmd) + if(output.find("ERROR") != -1): + raise Exception("Failed to execute catalog %s" % Type) + cmd = "grep PANIC " + exec_sql_log + (status, output) = commands.getstatusoutput(cmd) + if(output.find("PANIC") != -1): + raise Exception("Failed to execute catalog %s" % Type) + cmd = "grep FATAL " + exec_sql_log + (status, output) = commands.getstatusoutput(cmd) + if(output.find("FATAL") != -1): + raise Exception("Failed to execute catalog %s" % Type) + + def upgrade_one_database(self, db_name): + """ + """ + try: + if db_name == "postgres": + if self.run_type == EXEC_POST_SCRIPT: + upgrade_catalog_file = upgrade_post_catalog_maindb_sql_check + else: + upgrade_catalog_file = upgrade_catalog_maindb_sql_check + else: + if self.run_type == EXEC_POST_SCRIPT: + upgrade_catalog_file = upgrade_post_catalog_otherdb_sql_check + else: + upgrade_catalog_file = upgrade_catalog_otherdb_sql_check + cmd = self.gsql_dir + " -X -q -a -d " + db_name + " -p " + str(self.base_port) + " -f " + upgrade_catalog_file + " >> " + exec_sql_log + " 2>&1 " + (status, output) = commands.getstatusoutput(cmd) + self.writeLogFile("Cmd is {0}, output is {1}".format(cmd, output)) + if(status != 0): + raise Exception("Failed to upgrade catalogs!") + except Exception, e: + raise Exception(str(e)) + + def rollback_one_database(self, db_name): + """ + """ + try: + if db_name == "postgres": + if self.run_type >= EXEC_POST_ROLLBACK_SCRIPT: + rollback_catalog_file = rollback_post_catalog_maindb_sql_check + else: + rollback_catalog_file = rollback_catalog_maindb_sql_check + else: + if self.run_type >= EXEC_POST_ROLLBACK_SCRIPT: + rollback_catalog_file = rollback_post_catalog_otherdb_sql_check + else: + rollback_catalog_file = rollback_catalog_otherdb_sql_check + cmd = self.gsql_dir + " -X -q -a -d " + db_name + " -p " + str(self.base_port) + " -f " + rollback_catalog_file + " >> " + exec_sql_log + " 2>&1 " + (status, output) = commands.getstatusoutput(cmd) + self.writeLogFile("Cmd is {0}, output is {1}".format(cmd, output)) + if(status != 0): + raise Exception("Failed to rollback catalogs!") + except Exception, e: + raise Exception(str(e)) + + def execSqlFile(self, scriptType): + self.rollback_one_database(maindb) + pool = ThreadPool(1) + pool.map(self.rollback_one_database, otherdbs) + pool.close() + pool.join() + self.checkSqlResult("rollback%s" % scriptType) + + if self.run_type in [EXEC_PRE_ROLLBACK_SCRIPT, EXEC_POST_ROLLBACK_SCRIPT]: + return + + self.upgrade_one_database(maindb) + pool = ThreadPool(1) + pool.map(self.upgrade_one_database, otherdbs) + pool.close() + pool.join() + self.checkSqlResult("upgrade%s" % scriptType) + + + def executeSQL(self): + try: + cmd = "" + if self.run_type < EXEC_POST_ROLLBACK_SCRIPT: + testSql = self.gsql_dir + " -X -q -a -d postgres -p " + str(self.base_port) + " -c \"select datname from pg_database;\"" + (status, output) = commands.getstatusoutput(testSql) + if(status != 0): + raise Exception("Failed to check test_td database!") + if "test_td" not in output: + cmd = cmd + self.gsql_dir + " -X -q -a -d postgres -p " + str(self.base_port) + " -c \"create database test_td DBCOMPATIBILITY 'C';\"" + " >> " + exec_sql_log + " 2>&1" + (status, output) = commands.getstatusoutput(cmd) + if(status != 0): + raise Exception("Failed to create database!") + + if self.run_type >= EXEC_POST_ROLLBACK_SCRIPT: + self.execSqlFile("-post") + else: + self.execSqlFile("") + + except Exception, e: + raise Exception("ERROR: %s\nPlease refer to %s for details" % (str(e), exec_sql_log)) + + def executeSimpleTest(self): + cmd = "gsql -X -q -a -d postgres -p " + str(self.base_port) + " -c \"create table upcheck_tmp_table(a int); drop table upcheck_tmp_table;\""+ " >> " + exec_sql_log + " 2>&1 " + (status, output) = commands.getstatusoutput(cmd) + if(status != 0): + raise Exception("Simple test failed before make full-load fast check!") + self.checkSqlResult("upgrade simple test") + + def run(self): + try: + if self.run_type == INIT_STATUS_AND_EXEC_PRE_SCRIPT: + + self.cleanup() + + self.prepareUpCheckSqlFile() + + self.executeSQL() + + except Exception, e: + print str(e) + exit(1) + +def usage(): + print "------------------------------------------------------" + print "python pgxc.py\n -c coor_num -d datanode_num\n -s means start\n -o means stop" + print " -g means memcheck" + print " -D data directory\n" + print "------------------------------------------------------" + +def main(): + try: + opts, args = getopt.getopt(sys.argv[1:], "hD:c:d:ukovgp:n:f:s:r:p", ["help", "data_dir=", "rollback", "post"]) + except getopt.GetoptError, err: + # print help information and exit: + print str(err) # will print something like "option -a not recognized" + # usage() + sys.exit(2) + + coordinator_num = 3 + datanode_num = 12 + base_port = 25632 + part_number = 0 + upgrade_from = 0 + data_dir = "src/distribute/test/regress/tmp_check/" + gsql_dir = "gsql" + global g_valgrind; + global g_file_node_info; + + #1 start; 2 stop; 3 ; 4 again exec; 5 post script + run_type = 0 + + for o, a in opts: + if o == "-v": + verbose = True + elif o in ("-h", "--help"): + usage() + sys.exit() + elif o in ("-D", "data_dir"): + data_dir = a + elif o in ("-s", "--gsql-dir"): + gsql_dir = a + elif o in ("-c", "--coordinator"): + coordinator_num = int(a) + elif o in ("-d", "--datanode"): + datanode_num = int(a) + elif o in ("-p", "--port"): + base_port = int(a) + elif o in ("-f", "--from"): + upgrade_from = int(a) + elif o in ("-n", "--number"): + part_number = int(a) + elif o in ("-u", "--startup"): + # The fastcheck invokes the upgradeCheck script for the first time. and execute the pre script. + run_type = INIT_STATUS_AND_EXEC_PRE_SCRIPT + elif o in ("-k", "--startcheck"): + # Reserved Value + run_type = RESERVED_VALUE + elif o in ("-o", "--pre_rollback"): + # Executing the Pre-Rollback Script + run_type = EXEC_PRE_ROLLBACK_SCRIPT + elif o in ("-r", "--rollback"): + # Executing the Post-Rollback Script + run_type = EXEC_POST_ROLLBACK_SCRIPT + elif o in ("--post"): + # Executing the Post Script + run_type = EXEC_POST_SCRIPT + elif o in ("-g", "--memcheck"): + g_valgrind = "valgrind --tool=memcheck --leak-check=full --log-file=memcheck.log " + #g_valgrind = "valgrind --tool=massif --time-unit=B --detailed-freq=1 --massif-out-file=mass.out " + else: + assert False, "unhandled option" + + if((coordinator_num == 0 or datanode_num == 0) and run_type == 0): + usage() + sys.exit() + + g_file_node_info = data_dir + "/" + g_file_node_info + ptdb = Pterodb(coordinator_num,datanode_num, data_dir, base_port, part_number, upgrade_from, gsql_dir) + ptdb.run_type = run_type + ptdb.run() + endTime = datetime.datetime.now() + if (run_type not in [EXEC_PRE_ROLLBACK_SCRIPT, EXEC_POST_ROLLBACK_SCRIPT]): + print "Make upgrade check successfully. Total time {0}s\n".format((endTime-startTime).seconds) + else: + print "Make rollback upgrade check successfully. Total time {0}s\n".format((endTime - startTime).seconds) + exit(0) + + +if __name__ == "__main__": + main() diff --git a/src/test/ha/data/parallel_decode_data b/src/test/ha/data/parallel_decode_data new file mode 100644 index 000000000..20c987501 --- /dev/null +++ b/src/test/ha/data/parallel_decode_data @@ -0,0 +1,3 @@ +10 abc +20 cde +30 efg diff --git a/src/test/ha/data/parallel_decode_xact.sql b/src/test/ha/data/parallel_decode_xact.sql new file mode 100644 index 000000000..7654f6af3 --- /dev/null +++ b/src/test/ha/data/parallel_decode_xact.sql @@ -0,0 +1,125 @@ +drop table if exists t1_decode; +drop table if exists t2_decode; +drop table if exists t3_decode; +drop table if exists t4_decode; +drop table if exists t5_decode; + +create table t1_decode(a int, b text); +alter table t1_decode replica identity full; +insert into t1_decode values(1,'abc'); +update t1_decode set b = 'cde' where a = 1; +delete from t1_decode; +insert into t1_decode select 1, string_agg(g.i::text,'') from generate_series(1,2000) g(i); +insert into t1_decode values(generate_series(1,5000),'ab'); +begin; +insert into t1_decode values(10,'abc'); +savepoint a; +insert into t1_decode values(20,'abc'); +rollback to savepoint a; +insert into t1_decode values(30,'abc'); +commit; +begin; +insert into t1_decode values(11,'a'); +savepoint a; +insert into t1_decode values(22,'a'); +rollback to savepoint a; +insert into t1_decode values(33,'a'); +rollback; +begin; +insert into t1_decode values(generate_series(1,5000),'a'); +savepoint a; +insert into t1_decode values(generate_series(1,5000),'b'); +rollback to savepoint a; +insert into t1_decode values(generate_series(1,5000),'c'); +commit; + +create table t2_decode(a int, b text) with(storage_type = ustore); +insert into t2_decode values(1,'abc'); +update t2_decode set b = 'cde' where a = 1; +delete from t2_decode; +insert into t2_decode select 1, string_agg(g.i::text,'') from generate_series(1,2000) g(i); +insert into t2_decode values(generate_series(1,5000),'ab'); +begin; +insert into t2_decode values(10,'abc'); +savepoint a; +insert into t2_decode values(20,'abc'); +rollback to savepoint a; +insert into t2_decode values(30,'abc'); +commit; +begin; +insert into t2_decode values(11,'a'); +savepoint a; +insert into t2_decode values(22,'a'); +rollback to savepoint a; +insert into t2_decode values(33,'a'); +rollback; +begin; +insert into t2_decode values(generate_series(1,5000),'a'); +savepoint a; +insert into t2_decode values(generate_series(1,5000),'b'); +rollback to savepoint a; +insert into t2_decode values(generate_series(1,5000),'c'); +commit; + + +create table t3_decode(a int, b text); +alter table t3_decode replica identity full; +insert into t3_decode values(1,'abc'); +update t3_decode set b = 'cde' where a = 1; +delete from t3_decode; +insert into t3_decode select 1, string_agg(g.i::text,'') from generate_series(1,2000) g(i); +insert into t3_decode values(generate_series(1,5000),'ab'); +begin; +insert into t3_decode values(10,'abc'); +savepoint a; +insert into t3_decode values(20,'abc'); +rollback to savepoint a; +insert into t3_decode values(30,'abc'); +commit; +begin; +insert into t3_decode values(11,'a'); +savepoint a; +insert into t3_decode values(22,'a'); +rollback to savepoint a; +insert into t3_decode values(33,'a'); +rollback; +begin; +insert into t3_decode values(generate_series(1,5000),'a'); +savepoint a; +insert into t3_decode values(generate_series(1,5000),'b'); +rollback to savepoint a; +insert into t3_decode values(generate_series(1,5000),'c'); +commit; + + +create table t4_decode(a int, b text) with(storage_type = ustore); +insert into t4_decode values(1,'abc'); +update t4_decode set b = 'cde' where a = 1; +delete from t4_decode; +insert into t4_decode select 1, string_agg(g.i::text,'') from generate_series(1,2000) g(i); +insert into t4_decode values(generate_series(1,5000),'ab'); +begin; +insert into t4_decode values(10,'abc'); +savepoint a; +insert into t4_decode values(20,'abc'); +rollback to savepoint a; +insert into t4_decode values(30,'abc'); +commit; +begin; +insert into t4_decode values(11,'a'); +savepoint a; +insert into t4_decode values(22,'a'); +rollback to savepoint a; +insert into t4_decode values(33,'a'); +rollback; +begin; +insert into t4_decode values(generate_series(1,5000),'a'); +savepoint a; +insert into t4_decode values(generate_series(1,5000),'b'); +rollback to savepoint a; +insert into t4_decode values(generate_series(1,5000),'c'); +commit; + +create table t5_decode(a int, b bool, c bit, d text); +insert into t5_decode values(1, true, B'1', '你好'); +insert into t5_decode values(2); diff --git a/src/test/ha/ha_schedule_multi_single b/src/test/ha/ha_schedule_multi_single index 16bde853c..dbeb4470a 100644 --- a/src/test/ha/ha_schedule_multi_single +++ b/src/test/ha/ha_schedule_multi_single @@ -11,4 +11,3 @@ multi_standby_single/params multi_standby_single/xlog_redo_apply_delay #multi_standby_single/most_available multi_standby_single/failover_with_data -multi_standby_single/hash_index \ No newline at end of file diff --git a/src/test/ha/standby_env.sh b/src/test/ha/standby_env.sh index e64eab58f..c95084006 100644 --- a/src/test/ha/standby_env.sh +++ b/src/test/ha/standby_env.sh @@ -2,7 +2,7 @@ #some enviroment vars export g_base_port=8888 -#export prefix=${PREFIX_HOME} +export prefix=${PREFIX_HOME} export g_pooler_base_port=`expr $g_base_port \+ 410` export g_base_standby_port=`expr $g_base_port \+ 400` export install_path="$prefix" diff --git a/src/test/ha/testcase/data_replication_single/datareplica_failover_consistency.sh b/src/test/ha/testcase/data_replication_single/datareplica_failover_consistency.sh index 42fa908a7..4b2176428 100644 --- a/src/test/ha/testcase/data_replication_single/datareplica_failover_consistency.sh +++ b/src/test/ha/testcase/data_replication_single/datareplica_failover_consistency.sh @@ -1,5 +1,4 @@ #!/bin/sh -#this test is based on the bug DTS2015042205639 #If there is no data changed after the restart checkpoint LSN, and during failover #we received the data of that page @@ -62,4 +61,4 @@ gsql -d $db -p $dn1_primary_port -c "DROP TABLE if exists mpp_test2;" } test_1 -tear_down \ No newline at end of file +tear_down diff --git a/src/test/ha/testcase/decode_single/logical_decoding_on_standby.sh b/src/test/ha/testcase/decode_single/logical_decoding_on_standby.sh index c0165b64e..2e199ca07 100644 --- a/src/test/ha/testcase/decode_single/logical_decoding_on_standby.sh +++ b/src/test/ha/testcase/decode_single/logical_decoding_on_standby.sh @@ -11,22 +11,43 @@ function test_1() kill_cluster gs_guc set -Z datanode -D $primary_data_dir -c "wal_level = logical" gs_guc set -Z datanode -D $primary_data_dir -c "enable_slot_log = on" + gs_guc set -Z datanode -D $primary_data_dir -c "undo_zone_count = 16384" gs_guc set -Z datanode -D $primary_data_dir -h "local replication $USER trust" gs_guc set -Z datanode -D $standby_data_dir -c "wal_level = logical" gs_guc set -Z datanode -D $standby_data_dir -c "hot_standby = on" + gs_guc set -Z datanode -D $standby_data_dir -c "undo_zone_count = 16384" gs_guc set -Z datanode -D $standby_data_dir -h "local replication $USER trust" + gs_guc set -Z datanode -D $standby2_data_dir -c "undo_zone_count = 16384" + gs_guc set -Z datanode -D $standby3_data_dir -c "undo_zone_count = 16384" + gs_guc set -Z datanode -D $standby4_data_dir -c "undo_zone_count = 16384" start_cluster - #create table - gsql -d $db -p $dn1_primary_port -c "DROP TABLE if exists decode_test; CREATE TABLE decode_test(id INT,name VARCHAR(15) NOT NULL);" - echo "drop table success" - - #create logical replication slot - pg_recvlogical -d $db -p $dn1_primary_port -S test --create + #create logical replication slots + pg_recvlogical -d $db -p $dn1_primary_port -S slot1 --create if [ $? -eq 0 ]; then - echo "create replication slot success" + echo "create replication slot slot1 success" else - echo "$failed_keyword: create replication slot failed." + echo "$failed_keyword: create replication slot slot1 failed." + exit 1 + fi + + sleep 1 + + pg_recvlogical -d $db -p $dn1_primary_port -S slot2 --create + if [ $? -eq 0 ]; then + echo "create replication slot slot2 success" + else + echo "$failed_keyword: create replication slot slot2 failed." + exit 1 + fi + + sleep 1 + + pg_recvlogical -d $db -p $dn1_primary_port -S slot3 --create + if [ $? -eq 0 ]; then + echo "create replication slot3 success" + else + echo "$failed_keyword: create replication slot slot3 failed." exit 1 fi @@ -34,39 +55,70 @@ function test_1() #start logical decoding on standby echo "begin to decode" - nohup pg_recvlogical -d $db -p $dn1_standby_port -S test --start -f - & + nohup pg_recvlogical -d $db -p $dn1_standby_port -o include-xids=false -o include-timestamp=true -o skip-empty-xacts=true -o only-local=true -o white-table-list='public.*' -o parallel-decode-num=5 -o standby-connection=false -o decode-style='j' -S slot1 --start -s 2 -f $scripts_dir/data/test1.log & if [ $? -eq 0 ]; then - echo "logical decoding on standby success" + echo "parallel decoding with type \'j\' start on standby success" else - echo "$failed_keyword: logical decoding on standby failed." + echo "$failed_keyword: parallel decoding with type \'j\' start on standby failed." exit 1 fi - #insert - gsql -d $db -p $dn1_primary_port -c "insert into decode_test values(1, 'gaussdb');" - gsql -d $db -p $dn1_primary_port -c "insert into decode_test values(2, 'opengauss');" - sleep 30 -} + nohup pg_recvlogical -d $db -p $dn1_standby_port -o parallel-decode-num=5 -o standby-connection=true -o decode-style='t' -o white-table-list='public.t4_decode,*.t1_decode' -S slot2 --start -s 2 -f $scripts_dir/data/test2.log & + if [ $? -eq 0 ]; then + echo "parallel decoding with type \'t\' start on standby success" + else + echo "$failed_keyword: parallel decoding with type \'t\' start on standby failed." + exit 1 + fi -function tear_down() -{ - set_default - sleep 3 + nohup pg_recvlogical -d $db -p $dn1_standby_port -o parallel-decode-num=5 -o standby-connection=true -o decode-style='b' -o white-table-list='public.t2_decode,public.t3_decode' -S slot3 --start -s 2 -f $scripts_dir/data/test3.log & + if [ $? -eq 0 ]; then + echo "parallel decoding with type \'b\' start on standby success" + else + echo "$failed_keyword: parallel decoding with type \'b\' start on standby failed." + exit 1 + fi + + #run sql for parallel decoding + gsql -d $db -p $dn1_primary_port -f $scripts_dir/data/parallel_decode_xact.sql + gsql -d $db -p $dn1_primary_port -c "copy t1_decode from '$scripts_dir/data/parallel_decode_data';" + gsql -d $db -p $dn1_primary_port -c "copy t2_decode from '$scripts_dir/data/parallel_decode_data';" + gsql -d $db -p $dn1_primary_port -c "copy t3_decode from '$scripts_dir/data/parallel_decode_data';" + gsql -d $db -p $dn1_primary_port -c "copy t4_decode from '$scripts_dir/data/parallel_decode_data';" + gsql -d $db -p $dn1_standby_port -c "select * from gs_get_parallel_decode_status();" + sleep 60 #kill pg_recvlogical ps -ef | grep pg_recvlogical | grep -v grep | awk '{print $2}' | xargs kill -9 #drop table - gsql -d $db -p $dn1_primary_port -c "DROP TABLE if exists decode_test;" + gsql -d $db -p $dn1_primary_port -c "DROP TABLE IF EXISTS t1_decode; DROP TABLE IF EXISTS t2_decode; DROP TABLE IF EXISTS t3_decode; DROP TABLE IF EXISTS t4_decode; DROP TABLE IF EXISTS t5_decode;" #drop logical replication slot - pg_recvlogical -d $db -p $dn1_primary_port -S test --drop + pg_recvlogical -d $db -p $dn1_primary_port -S slot1 --drop if [ $? -eq 0 ]; then echo "drop replication slot success" else echo "$failed_keyword: drop replication slot failed." exit 1 fi + pg_recvlogical -d $db -p $dn1_primary_port -S slot2 --drop + if [ $? -eq 0 ]; then + echo "drop replication slot success" + else + echo "$failed_keyword: drop replication slot failed." + exit 1 + fi + pg_recvlogical -d $db -p $dn1_primary_port -S slot3 --drop + if [ $? -eq 0 ]; then + echo "drop replication slot success" + else + echo "$failed_keyword: drop replication slot failed." + exit 1 + fi + + rm $scripts_dir/data/test1.log + rm $scripts_dir/data/test2.log + rm $scripts_dir/data/test3.log } test_1 -tear_down diff --git a/src/test/ha/testcase/multi_standby_single/hash_index.sh b/src/test/ha/testcase/multi_standby_single/hash_index.sh deleted file mode 100644 index 863ed7d20..000000000 --- a/src/test/ha/testcase/multi_standby_single/hash_index.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/sh - -# hash index xlog -# 1. parallel recovery mode -# 2. extreme rto mode - -source ./util.sh - -function hash_index_test() -{ - db_name=$1 - echo "begin test hash index in database $db_name" - - gsql -d $db -p $dn1_primary_port -c "create database $db_name;" - - gsql -d $db_name -p $dn1_primary_port -c "create table hash_table_1 (id int, num int, sex varchar default 'male');" - gsql -d $db_name -p $dn1_primary_port -c "create index hash_t1_id1 on hash_table_1 using hash (id);" - gsql -d $db_name -p $dn1_primary_port -c "insert into hash_table_1 select random()*10, random()*10, 'XXX' from generate_series(1,5000);" - gsql -d $db_name -p $dn1_primary_port -c "delete from hash_table_1 where id = 7 and num = 1;" - gsql -d $db_name -p $dn1_primary_port -c "insert into hash_table_1 select 7, random()*3, 'XXX' from generate_series(1,500);" - gsql -d $db_name -p $dn1_primary_port -c "delete from hash_table_1 where id = 5;" - gsql -d $db_name -p $dn1_primary_port -c "vacuum hash_table_1;" - gsql -d $db_name -p $dn1_primary_port -c "insert into hash_table_1 select random()*50, random()*3, 'XXX' from generate_series(1,50000);" - gsql -d $db_name -p $dn1_primary_port -c "delete from hash_table_1 where num = 2;" - gsql -d $db_name -p $dn1_primary_port -c "vacuum hash_table_1;" - - gsql -d $db_name -p $dn1_primary_port -c "create table hash_table_2(id int, name varchar, sex varchar default 'male');" - gsql -d $db_name -p $dn1_primary_port -c "insert into hash_table_2 select random()*100, 'XXX', 'XXX' from generate_series(1,50000);" - gsql -d $db_name -p $dn1_primary_port -c "create or replace procedure hash_proc_9(sid in integer) -is -begin -set enable_indexscan = on; -set enable_bitmapscan = off; -delete from hash_table_9 where id = sid; -perform * from hash_table_9 where id = sid; -insert into hash_table_9 select sid, random() * 10, 'xxx' from generate_series(1,5000); -end; -/" - gsql -d $db_name -p $dn1_primary_port -c "call hash_proc_9(1);" - gsql -d $db_name -p $dn1_primary_port -c "call hash_proc_9(1);" - gsql -d $db_name -p $dn1_primary_port -c "call hash_proc_9(1);" - gsql -d $db_name -p $dn1_primary_port -c "call hash_proc_9(1);" - - sleep 3; - - gsql -d $db -p $dn1_primary_port -c "drop database $db_name;" -} - -function test_1() -{ - set_default - gs_guc set -Z datanode -D $primary_data_dir -c "autovacuum = off" - - # parallel recovery - echo "begin to kill primary" - kill_cluster - echo "begin to set parallel recovery param" - gs_guc set -Z datanode -D $primary_data_dir -c "recovery_max_workers = 2" - gs_guc set -Z datanode -D $primary_data_dir -c "recovery_parse_workers = 0" - gs_guc set -Z datanode -D $primary_data_dir -c "recovery_redo_workers = 0" - gs_guc set -Z datanode -D $primary_data_dir -c "hot_standby = on" - start_cluster - echo "start cluter success!" - hash_index_test "hash_db_1" - echo "begin to query primary" - query_primary - echo "begin to query standby" - query_standby - - # extreme rto - echo "begin to kill primary" - kill_cluster - echo "begin to set extreme rto param" - gs_guc set -Z datanode -D $primary_data_dir -c "recovery_max_workers = 0" - gs_guc set -Z datanode -D $primary_data_dir -c "recovery_parse_workers = 2" - gs_guc set -Z datanode -D $primary_data_dir -c "recovery_redo_workers = 1" - gs_guc set -Z datanode -D $primary_data_dir -c "hot_standby = off" - start_cluster - echo "start cluter success!" - hash_index_test "hash_db_2" - echo "begin to query primary" - query_primary - echo "begin to query standby" - query_standby -} - -function tear_down() -{ - sleep 1 - set_default - kill_cluster - gs_guc set -Z datanode -D $primary_data_dir -c "recovery_max_workers = 4" - gs_guc set -Z datanode -D $primary_data_dir -c "recovery_parse_workers = 1" - gs_guc set -Z datanode -D $primary_data_dir -c "recovery_redo_workers = 1" - gs_guc set -Z datanode -D $primary_data_dir -c "hot_standby = on" - start_cluster -} - -test_1 -tear_down \ No newline at end of file diff --git a/src/test/ha/testcase/multi_standby_single/logical_decoding_on_standby.sh b/src/test/ha/testcase/multi_standby_single/logical_decoding_on_standby.sh deleted file mode 100644 index c0165b64e..000000000 --- a/src/test/ha/testcase/multi_standby_single/logical_decoding_on_standby.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/sh -# logical decoding on standby - -source ./util.sh - -function test_1() -{ - set_default - - echo "set logical decoding parameters" - kill_cluster - gs_guc set -Z datanode -D $primary_data_dir -c "wal_level = logical" - gs_guc set -Z datanode -D $primary_data_dir -c "enable_slot_log = on" - gs_guc set -Z datanode -D $primary_data_dir -h "local replication $USER trust" - gs_guc set -Z datanode -D $standby_data_dir -c "wal_level = logical" - gs_guc set -Z datanode -D $standby_data_dir -c "hot_standby = on" - gs_guc set -Z datanode -D $standby_data_dir -h "local replication $USER trust" - start_cluster - - #create table - gsql -d $db -p $dn1_primary_port -c "DROP TABLE if exists decode_test; CREATE TABLE decode_test(id INT,name VARCHAR(15) NOT NULL);" - echo "drop table success" - - #create logical replication slot - pg_recvlogical -d $db -p $dn1_primary_port -S test --create - if [ $? -eq 0 ]; then - echo "create replication slot success" - else - echo "$failed_keyword: create replication slot failed." - exit 1 - fi - - sleep 1 - - #start logical decoding on standby - echo "begin to decode" - nohup pg_recvlogical -d $db -p $dn1_standby_port -S test --start -f - & - if [ $? -eq 0 ]; then - echo "logical decoding on standby success" - else - echo "$failed_keyword: logical decoding on standby failed." - exit 1 - fi - - #insert - gsql -d $db -p $dn1_primary_port -c "insert into decode_test values(1, 'gaussdb');" - gsql -d $db -p $dn1_primary_port -c "insert into decode_test values(2, 'opengauss');" - sleep 30 -} - -function tear_down() -{ - set_default - sleep 3 - #kill pg_recvlogical - ps -ef | grep pg_recvlogical | grep -v grep | awk '{print $2}' | xargs kill -9 - - #drop table - gsql -d $db -p $dn1_primary_port -c "DROP TABLE if exists decode_test;" - - #drop logical replication slot - pg_recvlogical -d $db -p $dn1_primary_port -S test --drop - if [ $? -eq 0 ]; then - echo "drop replication slot success" - else - echo "$failed_keyword: drop replication slot failed." - exit 1 - fi -} - -test_1 -tear_down diff --git a/src/test/ha/testcase/multi_standby_single/standby_full_build.sh b/src/test/ha/testcase/multi_standby_single/standby_full_build.sh new file mode 100644 index 000000000..b3f6e9100 --- /dev/null +++ b/src/test/ha/testcase/multi_standby_single/standby_full_build.sh @@ -0,0 +1,30 @@ +#!/bin/sh +# 1 standby3 request standby2 full build. +source ./util.sh + +function test_1() +{ + set_default + kill_cluster + start_cluster + + echo "start cluter success!" + inc_build_pattern="waiting for server to start..." + echo "standby2_build_standby3" + build_result=`gs_ctl build -b standby_full -D ${standby3_data_dir} -C "localhost=127.0.0.1 localport=$standby3_port remotehost=127.0.0.1 remoteport=$standby2_port"` + if [[ $build_result =~ $inc_build_pattern ]] + then + echo "standby build success" + else + echo "standby build $failed_keyword" + fi +} + +function tear_down() { + sleep 1 + kill_cluster + start_cluster +} + +test_1 +tear_down diff --git a/src/test/ipv6/GNUmakefile b/src/test/ipv6/GNUmakefile new file mode 100644 index 000000000..a6782447a --- /dev/null +++ b/src/test/ipv6/GNUmakefile @@ -0,0 +1,48 @@ +#------------------------------------------------------------------------- +# +# GNUmakefile-- +# Makefile for src/test/ipv6 (the regression tests) +# +# Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/test/ipv6/GNUmakefile +# +#------------------------------------------------------------------------- + +subdir = src/test/ipv6 +top_builddir = ../../.. +include $(top_builddir)/src/Makefile.global + +# where to find psql for testing an existing installation +PSQLDIR = $(bindir) + +## +## Run tests +## + +REGRESS_OPTS = --dlpath=. $(EXTRA_REGRESS_OPTS) +REG_CONF = --regconf=regress.conf + +ipv6check_all: all + export prefix=$(prefix) && sh $(CURDIR)/check_ipv6.sh ipv6_all + +ipv6check_normal: all + export prefix=$(prefix) && sh $(CURDIR)/check_ipv6.sh normal + +ipv6check_primary_standby: all + export prefix=$(prefix) && sh $(CURDIR)/check_ipv6.sh primary_standby + +ipv6check_muti_standby: all + export prefix=$(prefix) && sh $(CURDIR)/check_ipv6.sh muti_standby + +ipv6check_casecade_standby: all + export prefix=$(prefix) && sh $(CURDIR)/check_ipv6.sh casecade_standby + +## +## Clean up +## + +# things created by various check targets +clean distclean maintainer-clean: + rm -rf $(pg_regress_clean_files) diff --git a/src/test/ipv6/Ipv6Test.java b/src/test/ipv6/Ipv6Test.java new file mode 100644 index 000000000..dda1eb981 --- /dev/null +++ b/src/test/ipv6/Ipv6Test.java @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * IDENTIFICATION + * src/test/ipv6/Ipv6Test.java + * + * --------------------------------------------------------------------------------------- + */ +import java.io.FileInputStream; +import java.io.IOException; +import java.sql.*; +import java.util.*; +import java.text.SimpleDateFormat; +import org.postgresql.util.*; + +public +class Ipv6Test { +public + static Connection GetConnection(String port,String host) + { + String urls = "jdbc:postgresql://" + host + ":" + port + "/postgres?prepareThreshold=0&loggerLevel=off"; + String driver = "org.postgresql.Driver"; + + Properties urlProps = new Properties(); + urlProps.setProperty("user", "ipv6_tmp"); + urlProps.setProperty("password", "Gauss@123"); + + Connection conn = null; + try { + Class.forName(driver).newInstance(); + conn = DriverManager.getConnection(urls, urlProps); + System.out.println("Connection succeed!"); + } catch (Exception exception) { + exception.printStackTrace(); + return null; + } + + return conn; + }; + + +public + static void CreateTable(Connection conn) + { + Statement stmt = null; + try { + stmt = conn.createStatement(); + + int drc = stmt.executeUpdate("drop table if exists ipv6_test ;"); + + int rc = stmt.executeUpdate("create table ipv6_test(id int, class int, name text, score float);"); + + + stmt.close(); + } catch (SQLException exception) { + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException exception1) { + exception1.printStackTrace(); + } + } + exception.printStackTrace(); + } + } + + +public + static void main(String[] args) + { + String PORT = args[0]; + String HOST = args[1]; + Connection conn = GetConnection(PORT,HOST); + + if (conn == null) { + System.out.println("connection failed"); + return; + } + CreateTable(conn); + + try { + conn.close(); + System.out.println("close connection"); + } catch (SQLException exception) { + exception.printStackTrace(); + } + } +} diff --git a/src/test/ipv6/Makefile b/src/test/ipv6/Makefile new file mode 100644 index 000000000..46d25a170 --- /dev/null +++ b/src/test/ipv6/Makefile @@ -0,0 +1,12 @@ +# The openGauss make files exploit features of GNU make that other makes +# do not have. Because it is a common mistake for users to try to build +# openGauss with a different make, we have this make file that does nothing +# but tell the user to use GNU make. + +# If the user were using GNU make now, this file would not get used because +# GNU make uses a make file named "GNUmakefile" in preference to "Makefile" +# if it exists. openGauss is shipped with a "GNUmakefile". + +all ipv6check: + @echo "You must use GNU make to use Postgres. It may be installed" + @echo "on your system with the name 'gmake'." diff --git a/src/test/ipv6/check_ipv6.sh b/src/test/ipv6/check_ipv6.sh new file mode 100644 index 000000000..b4fe8c06e --- /dev/null +++ b/src/test/ipv6/check_ipv6.sh @@ -0,0 +1,401 @@ +#!/bin/sh +# run all the test case of ipv6 + +source ./ipv6_env.sh +test -f regression.diffs.ipv6check && rm regression.diffs.ipv6check +rm -rf results +mkdir -p results + +function test_1() +{ + testcase_name="single" + rm -rf ./results/result_${testcase_name}.log + node_num_all=1 + + #stop all exists database + kill_all 3 >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "init the database...\n" + python create_server.py -d $node_num_all >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "setup hba for ip...\n" + setup_hba $node_num_all + + printf "start the database...\n" + start_normal >> ./results/result_${testcase_name}.log + + create_test_user >> ./results/result_${testcase_name}.log + gsql_test >> ./results/result_${testcase_name}.log + jdbc_test >> ./results/result_${testcase_name}.log + + if [ $( grep "$failed_keyword" ./results/result_${testcase_name}.log | wc -l ) -eq 0 ]; then + printf "===================================\n" + printf "%s tests passed.\n" ${testcase_name} + printf "===================================\n\n" + else + echo "${testcase_name} tests .... FAILED" >> regression.diffs.ipv6check + printf "===================================\n" + printf "%s tests .... FAILED.\n" ${testcase_name} + printf "===================================\n\n" + fi +} + +function test_2() +{ + testcase_name="primary_standby" + rm -rf ./results/result_${testcase_name}.log + node_num_all=2 + + #stop all exists database + kill_all 3 >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "init the database...\n" + python create_server.py -d $node_num_all >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "setup hba for ip...\n" + setup_hba $node_num_all + + printf "start the primary database...\n" + start_primary >> ./results/result_${testcase_name}.log + + printf "build the standby...\n" + $bin_dir/gs_ctl build -Z single_node -D $data_dir/datanode2 -b full >> ./results/tmp_${testcase_name}.log 2>&1 + check_standby_startup >> ./results/result_${testcase_name}.log >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "check result...\n" + check_instance_primary_standby >> ./results/result_${testcase_name}.log + + create_test_user >> ./results/result_${testcase_name}.log + gsql_test >> ./results/result_${testcase_name}.log + gsql_standby_test >> ./results/result_${testcase_name}.log + jdbc_test >> ./results/result_${testcase_name}.log + + if [ $( grep "$failed_keyword" ./results/result_${testcase_name}.log | wc -l ) -eq 0 ]; then + printf "===================================\n" + printf "%s tests passed.\n" ${testcase_name} + printf "===================================\n\n" + else + echo "${testcase_name} tests .... FAILED" >> regression.diffs.ipv6check + printf "===================================\n" + printf "%s tests .... FAILED.\n" ${testcase_name} + printf "===================================\n\n" + fi +} + +function test_3() +{ + testcase_name="muti_standby" + rm -rf ./results/result_${testcase_name}.log + node_num_all=3 + + #stop all exists database + kill_all 3 >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "init the database...\n" + python create_server.py -d $node_num_all >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "setup hba for ip...\n" + setup_hba $node_num_all + + printf "start the primary database...\n" + start_primary >> ./results/result_${testcase_name}.log + + printf "build the muti standby...\n" + $bin_dir/gs_ctl build -Z single_node -D $data_dir/datanode2 -b full >> ./results/tmp_${testcase_name}.log 2>&1 + $bin_dir/gs_ctl build -Z single_node -D $data_dir/datanode3 -b full >> ./results/tmp_${testcase_name}.log 2>&1 + check_multi_standby_startup $node_num_all >> ./results/result_${testcase_name}.log + + printf "check result...\n" + check_instance_multi_standby >> ./results/result_${testcase_name}.log + + create_test_user >> ./results/result_${testcase_name}.log + gsql_test >> ./results/result_${testcase_name}.log + gsql_standby_test >> ./results/result_${testcase_name}.log + jdbc_test >> ./results/result_${testcase_name}.log + + if [ $( grep "$failed_keyword" ./results/result_${testcase_name}.log | wc -l ) -eq 0 ]; then + printf "===================================\n" + printf "%s tests passed.\n" ${testcase_name} + printf "===================================\n\n" + else + echo "${testcase_name} tests .... FAILED" >> regression.diffs.ipv6check + printf "===================================\n" + printf "%s tests .... FAILED.\n" ${testcase_name} + printf "===================================\n\n" + fi +} + +function test_4() +{ + testcase_name="casecade_standby" + rm -rf ./results/result_${testcase_name}.log + node_num_all=3 + + #stop all exists database + kill_all 3 >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "init the database...\n" + python create_server.py -d $node_num_all >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "setup hba for ip...\n" + setup_hba $node_num_all + + printf "start the primary database...\n" + start_primary >> ./results/result_${testcase_name}.log + + printf "build the standby...\n" + $bin_dir/gs_ctl build -Z single_node -D $data_dir/datanode2 -b full >> ./results/tmp_${testcase_name}.log 2>&1 + check_standby_startup >> ./results/result_${testcase_name}.log + + printf "build the casecade standby...\n" + $bin_dir/gs_ctl build -Z single_node -M cascade_standby -D $data_dir/datanode3 -b standby_full >> ./results/tmp_${testcase_name}.log 2>&1 + check_casecade_standby_startup >> ./results/result_${testcase_name}.log + + printf "check result...\n" + check_instance_casecade_standby >> ./results/result_${testcase_name}.log + + create_test_user >> ./results/result_${testcase_name}.log + gsql_test >> ./results/result_${testcase_name}.log + gsql_standby_test >> ./results/result_${testcase_name}.log + jdbc_test >> ./results/result_${testcase_name}.log + + if [ $( grep "$failed_keyword" ./results/result_${testcase_name}.log | wc -l ) -eq 0 ]; then + printf "===================================\n" + printf "%s tests passed.\n" ${testcase_name} + printf "===================================\n\n" + else + echo "${testcase_name} tests .... FAILED" >> regression.diffs.ipv6check + printf "===================================\n" + printf "%s tests .... FAILED.\n" ${testcase_name} + printf "===================================\n\n" + fi +} + +function test_5() +{ + testcase_name="listen_thread_pool" + rm -rf ./results/result_${testcase_name}.log + node_num_all=3 + + #stop all exists database + kill_all 3 >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "init the database...\n" + python create_server.py -d $node_num_all >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "set listen address and enalble thread pool...\n" + set_node_conf 1 >> ./results/result_${testcase_name}.log + set_node_conf 2 >> ./results/result_${testcase_name}.log + set_node_conf 3 >> ./results/result_${testcase_name}.log + + printf "setup hba for ip...\n" + setup_hba $node_num_all + + printf "start the primary database...\n" + start_primary >> ./results/result_${testcase_name}.log + + printf "build the muti standby...\n" + $bin_dir/gs_ctl build -Z single_node -D $data_dir/datanode2 -b full >> ./results/tmp_${testcase_name}.log 2>&1 + $bin_dir/gs_ctl build -Z single_node -D $data_dir/datanode3 -b full >> ./results/tmp_${testcase_name}.log 2>&1 + check_multi_standby_startup $node_num_all >> ./results/result_${testcase_name}.log + + printf "check result...\n" + check_instance_multi_standby >> ./results/result_${testcase_name}.log + + create_test_user >> ./results/result_${testcase_name}.log + gsql_test >> ./results/result_${testcase_name}.log + gsql_standby_test >> ./results/result_${testcase_name}.log + jdbc_test >> ./results/result_${testcase_name}.log + + if [ $( grep "$failed_keyword" ./results/result_${testcase_name}.log | wc -l ) -eq 0 ]; then + printf "===================================\n" + printf "%s tests passed.\n" ${testcase_name} + printf "===================================\n\n" + else + echo "${testcase_name} tests .... FAILED" >> regression.diffs.ipv6check + printf "===================================\n" + printf "%s tests .... FAILED.\n" ${testcase_name} + printf "===================================\n\n" + fi +} + +function test_6() +{ + testcase_name="check_disable_conn" + rm -rf ./results/result_${testcase_name}.log + node_num_all=3 + + #stop all exists database + kill_all 3 >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "init the database...\n" + python create_server.py -d $node_num_all >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "set listen address...\n" + set_listen_address 1 + set_listen_address 2 + set_listen_address 3 + + printf "setup hba for ip...\n" + setup_hba $node_num_all + + printf "start the primary database...\n" + start_primary >> ./results/result_${testcase_name}.log + + printf "build the muti standby...\n" + $bin_dir/gs_ctl build -Z single_node -D $data_dir/datanode2 -b full >> ./results/tmp_${testcase_name}.log 2>&1 + $bin_dir/gs_ctl build -Z single_node -D $data_dir/datanode3 -b full >> ./results/tmp_${testcase_name}.log 2>&1 + check_multi_standby_startup $node_num_all >> ./results/result_${testcase_name}.log + + printf "check result...\n" + check_instance_multi_standby >> ./results/result_${testcase_name}.log + + #local port standby + port=$(($g_base_standby_port + 4)) + if [ $( $bin_dir/gsql -d $db -p $dn1_standby_port -c "select read_disable_conn_file();" | grep $port | wc -l ) -eq 0 ]; then + echo "$failed_keyword when $testcase_name" >> ./results/result_${testcase_name}.log + fi + + #set disable_conn = remote port ( master port) + port2=$(($g_base_standby_port + 1)) + $bin_dir/gsql -d $db -p $dn1_standby_port -c "select disable_conn('specify_connection','"$g_local_ip"','"$port2"');" + if [ $( $bin_dir/gsql -d $db -p $dn1_standby_port -c "select read_disable_conn_file();" | grep $port2 | wc -l ) -eq 0 ]; then + echo "$failed_keyword when $testcase_name" >> ./results/result_${testcase_name}.log + fi + #check standby is connected to primary + sleep 2 + check_instance_multi_standby >> ./results/result_${testcase_name}.log + + #set disable_conn != remote port + port2=$(($g_base_standby_port + 13)) + $bin_dir/gsql -d $db -p $dn1_standby_port -c "select disable_conn('specify_connection','"$g_local_ip"','"$port2"');" + if [ $( $bin_dir/gsql -d $db -p $dn1_standby_port -c "select read_disable_conn_file();" | grep $port2 | wc -l ) -eq 0 ]; then + echo "$failed_keyword when $testcase_name" >> ./results/result_${testcase_name}.log + fi + #check standby is disconnected from primary + sleep 2 + if [ $(query_standby | grep -E $setup_keyword | wc -l) -eq 1 ]; then + echo "$failed_keyword when $testcase_name" >> ./results/result_${testcase_name}.log + fi + + #set disable_conn = remote port ( master port) again + port=$(($g_base_standby_port + 1)) + $bin_dir/gsql -d $db -p $dn1_standby_port -c "select disable_conn('specify_connection','"$g_local_ip"','"$port"');" + sleep 5 + #check standby is connected to primary + check_instance_multi_standby >> ./results/result_${testcase_name}.log + + + if [ $( grep "$failed_keyword" ./results/result_${testcase_name}.log | wc -l ) -eq 0 ]; then + printf "===================================\n" + printf "%s tests passed.\n" ${testcase_name} + printf "===================================\n\n" + else + echo "${testcase_name} tests .... FAILED" >> regression.diffs.ipv6check + printf "===================================\n" + printf "%s tests .... FAILED.\n" ${testcase_name} + printf "===================================\n\n" + fi +} + +function test_7() +{ + testcase_name="change_replconninfo" + rm -rf ./results/result_${testcase_name}.log + node_num_all=2 + + #stop all exists database + kill_all 3 >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "init the database...\n" + python create_server.py -d $node_num_all >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "setup hba for ip...\n" + setup_hba $node_num_all + + printf "start the primary database...\n" + start_primary >> ./results/result_${testcase_name}.log + + printf "build the standby...\n" + $bin_dir/gs_ctl build -Z single_node -D $data_dir/datanode2 -b full >> ./results/tmp_${testcase_name}.log 2>&1 + check_standby_startup >> ./results/result_${testcase_name}.log >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "check result...\n" + check_instance_primary_standby >> ./results/result_${testcase_name}.log + + primary_ha_port=$(($g_base_standby_port + 1)) + tmp_primary_ha_port=$(($primary_ha_port - 1)) + sed -i "s/$primary_ha_port/$tmp_primary_ha_port/g" ${primary_data_dir}/postgresql.conf + sed -i "s/$primary_ha_port/$tmp_primary_ha_port/g" ${standby_data_dir}/postgresql.conf + $bin_dir/gs_ctl reload -D ${primary_data_dir} + $bin_dir/gs_ctl reload -D ${standby_data_dir} + sleep 5 + + printf "check ha result...\n" + check_instance_primary_standby >> ./results/result_${testcase_name}.log + + create_test_user >> ./results/result_${testcase_name}.log + gsql_test >> ./results/result_${testcase_name}.log + gsql_standby_test >> ./results/result_${testcase_name}.log + jdbc_test >> ./results/result_${testcase_name}.log + + if [ $( grep "$failed_keyword" ./results/result_${testcase_name}.log | wc -l ) -eq 0 ]; then + printf "===================================\n" + printf "%s tests passed.\n" ${testcase_name} + printf "===================================\n\n" + else + echo "${testcase_name} tests .... FAILED" >> regression.diffs.ipv6check + printf "===================================\n" + printf "%s tests .... FAILED.\n" ${testcase_name} + printf "===================================\n\n" + fi +} + +function tear_down() +{ + node_num_all=4 + + #stop all exists database + kill_all $node_num_all +} +server_type=$1 + +case "$server_type" in + ipv6_all) + test_1 + test_2 + test_3 + test_4 + #test_5 + test_6 + test_7 + ;; + normal) + test_1 + ;; + primary_standby) + test_2 + test_7 + ;; + muti_standby) + test_3 + #test_5 + test_6 + ;; + casecade_standby) + test_4 + ;; + *) + echo "Internal Error: server_type option processing error: $server_type" + echo "please input right paramenter values ipv6_all, normal, primary_standby, muti_standby or casecade_standby" + exit 1 +esac + +#tear_down + +printf "===================================\n" +printf "all tests finished.\n" +printf "===================================\n" + + + diff --git a/src/test/ipv6/create_server.py b/src/test/ipv6/create_server.py new file mode 100644 index 000000000..b7efd356e --- /dev/null +++ b/src/test/ipv6/create_server.py @@ -0,0 +1,209 @@ +#!/usr/bin/python +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# --------------------------------------------------------------------------------------- +# +# IDENTIFICATION +# src/test/ipv6/create_server.py +# +# --------------------------------------------------------------------------------------- + +import getopt, sys, os +import shutil +import time +import string + +g_base_port = int(os.environ.get("g_base_port")) +g_pooler_base_port = int(os.environ.get("g_pooler_base_port")) +g_base_standby_port = int(os.environ.get("g_base_standby_port")) +install_path = os.environ.get("install_path") +g_data_path = os.environ.get("g_data_path") +g_local_ip = os.environ.get("g_local_ip") +g_valgrind = "" +g_passwd = "Gauss@123" +g_trace_compress = False + + +class Pterodb(): + def __init__(self, data_node_num, data_dir): + self.data_node_num = data_node_num + self.data_dir = data_dir + self.dname_prefix = "datanode" + print self.data_dir + self.ha_port_arr = [0 for i in range(data_node_num+1)] + self.service_port_arr = [0 for i in range(data_node_num+1)] + self.heartbeat_port_arr = [0 for i in range(data_node_num+1)] + + def init_env(self): + if(os.path.exists(self.data_dir) == False): + os.mkdir(self.data_dir) + else: + shutil.rmtree(self.data_dir) + os.mkdir(self.data_dir) + print "rm dir ok" + + #generate port array + self.__generate_port() + + for i in range(1,self.data_node_num + 1): + datanode_cmd_init = install_path + "/bin/gs_initdb -D " + self.data_dir + "/" + self.dname_prefix + str(i) + " --nodename=" + self.dname_prefix + str(i) + " -w " + g_passwd + print datanode_cmd_init + os.system(datanode_cmd_init) + + conf_file = self.data_dir + "/" + self.dname_prefix + str(i) + "/postgresql.conf" + self.__modify_conf_port(conf_file,i) + self.__turn_on_pg_log(conf_file) + if(self.data_node_num > 1): + self.__modify_conf_standby(conf_file,i) + + self.__modify_conf_application_name(conf_file, "dn_p" + str(i)) + + def __generate_port(self): + port = g_base_standby_port + for i in range(0,self.data_node_num): + self.ha_port_arr[i] = port + 1; + self.service_port_arr[i] = port + 2; + self.heartbeat_port_arr[i] = port + 3; + port = port + 3; + + def __modify_conf_standby(self, conf_file, n): + j = 1 + + file_handler = open(conf_file,"a") + for i in range(1,self.data_node_num + 1): + if(i != n): + #repl + string = "replconninfo%d = 'localhost=%s localport=%d localheartbeatport=%d localservice=%d remotehost=%s remoteport=%d remoteheartbeatport=%d remoteservice=%d'\n" % \ + (j, g_local_ip, self.ha_port_arr[n-1], self.heartbeat_port_arr[n-1], self.service_port_arr[n-1], g_local_ip, self.ha_port_arr[i-1], self.heartbeat_port_arr[i-1], self.service_port_arr[i-1]) + print string + file_handler.write(string) + j = j + 1 + + file_handler.close() + + def __modify_conf_application_name(self, conf_file, name): + file_handler = open(conf_file,"a") + string = "application_name = '" + name + "'" + "\n" + file_handler.write(string) + file_handler.close() + + def __modify_conf_port(self, conf_file, n): + file_handler = open(conf_file,"a") + + string = "listen_addresses = '*'"+ "\n" + file_handler.write(string) + + port = g_base_port + 3 * n + string = "port = " + str(port) + "\n" + file_handler.write(string) + + file_handler.close() + + def __turn_on_pg_log(self, conf_file): + file_handler = open(conf_file,"a") + pglog_conf = "logging_collector = on \n" + pglog_conf = pglog_conf + "log_directory = 'pg_log' \n" + pglog_conf = pglog_conf + "log_line_prefix = '%m %c %d %p %a %x %e ' \n" + pglog_conf = pglog_conf + "enable_data_replicate = off \n" + pglog_conf = pglog_conf + "replication_type = 1 \n" + file_handler.write(pglog_conf) + file_handler.close() + + def __switch_trace_cmpr(self): + for i in range(1, self.data_node_num+1): + conf_file = self.data_dir + "/" + self.dname_prefix + str(i) + "/postgresql.conf" + file_handler = open(conf_file,"a") + if g_trace_compress: + pglog_conf = "log_line_prefix = '' \n" + pglog_conf = pglog_conf + "log_min_messages = info \n" + file_handler.write(pglog_conf) + else: + pglog_conf = "log_line_prefix = '%m %c %d %p %a %x %e ' \n" + pglog_conf = pglog_conf + "log_min_messages = warning \n" + file_handler.write(pglog_conf) + file_handler.close() + + def __create_default_db(self): + # connect to primary DN to create db + cmd = install_path + "/bin/gsql -p " + str(g_base_port + 3) + " postgres -c 'create database test'" + os.system(cmd) + + def __rm_pid_file(self): + cmd = "rm -rf " + # dn + for i in range(1,self.data_node_num+2): + rm_cmd = cmd + self.data_dir + "/" + self.dname_prefix + str(i) + "/postmaster.pid" + print rm_cmd + os.system(rm_cmd) + + def run(self): + self.init_env() + print "create ok" + +def usage(): + print "------------------------------------------------------" + print "python create_server.py\n" + print " -t trace compression log" + print " -g means memcheck" + print " -D data directory" + print "------------------------------------------------------" + +def main(): + try: + opts, args = getopt.getopt(sys.argv[1:], "hrD:c:d:t:sovg", ["help", "data_dir=", "regress="]) + except getopt.GetoptError, err: + # print help information and exit: + print str(err) # will print something like "option -a not recognized" + # usage() + sys.exit(2) + + datanode_num = 0 + global g_valgrind; + global g_trace_compress; + + data_dir = g_data_path + #1 start + #2 stop + + for o, a in opts: + if o == "-v": + verbose = True + elif o in ("-h", "--help"): + usage() + sys.exit() + elif o in ("-D", "data_dir"): + data_dir = a + elif o in ("-d", "--datanode"): + datanode_num = int(a) + elif o in ("-g", "--memcheck"): + g_valgrind = "valgrind --tool=memcheck --leak-check=full --log-file=memcheck.log " + elif o in ("-t", "--trace"): + if 'on' == a: + g_trace_compress = True + else: + g_trace_compress = False + run_type = 3 + print g_trace_compress + else: + assert False, "unhandled option" + + if(datanode_num == 0): + usage() + sys.exit() + + ptdb = Pterodb(datanode_num, data_dir) + ptdb.init_env() + + +if __name__ == "__main__": + main() diff --git a/src/test/ipv6/ipv6_env.sh b/src/test/ipv6/ipv6_env.sh new file mode 100644 index 000000000..da33a7684 --- /dev/null +++ b/src/test/ipv6/ipv6_env.sh @@ -0,0 +1,773 @@ +#!/bin/sh +#some enviroment vars +export g_base_port=7777 + +export g_pooler_base_port=`expr $g_base_port \+ 410` +export g_base_standby_port=`expr $g_base_port \+ 400` +export install_path="$prefix" +export GAUSSHOME="$prefix" +export LD_LIBRARY_PATH=$prefix/lib:$LD_LIBRARY_PATH +export PATH="$prefix/bin":$PATH +export g_data_path="$install_path/ipv6_data" + +export g_local_ip="::1" + +root_dir=$(dirname $(pwd)) +export jar_path=$root_dir/regress/jdbc_test/gsjdbc400.jar + +db=postgres +scripts_dir=`pwd` +username=`whoami` +data_dir=$g_data_path +install_dir=$install_path +bin_dir="$install_dir/bin" +passwd="Gauss@123" + +dn1_primary_port=`expr $g_base_port \+ 3` +dn1_standby_port=`expr $g_base_port \+ 6` +standby2_port=`expr $g_base_port \+ 9` +standby3_port=`expr $g_base_port \+ 12` +standby4_port=`expr $g_base_port \+ 15` +casecade_standby_port=`expr $g_base_port \+ 9` +dn_temp_port=`expr $g_base_port \+ 21` +dn1_normal_port=`expr $g_base_port \+ 3` + +primary_data_dir="$data_dir/datanode1" +standby_data_dir="$data_dir/datanode2" +standby2_data_dir="$data_dir/datanode3" +standby3_data_dir="$data_dir/datanode4" +standby4_data_dir="$data_dir/datanode5" +casecade_standby_data_dir="$data_dir/datanode3" +normal_data_dir="$data_dir/datanode1" + +failed_keyword="testcase_failed" +startup_keyword="Normal|repair" +startup_keyword1="starting" +setup_keyword="peer_role" +primary_setup_keyword="Standby" +standby_setup_keyword="Primary" +casecade_standby_setup_keyword="Cascade Standby" +walkeepsegment_keyword="removed" +create_table_keyword="TABLE" +build_keyword="completed" +building_keyword="Building" +buildfailed_keyword="failed" + +function query_node() +{ +node_num=$1 +echo query node $node_num +if [[ $node_num -lt 1 ]] ; then + echo "node_num must great than 0" + return 0 +fi + +$bin_dir/gs_ctl query -D $data_dir/datanode$node_num +} + +function query_primary() +{ +echo query primary +$bin_dir/gs_ctl query -D $primary_data_dir +} + +function query_normal() +{ +echo query normal +$bin_dir/gs_ctl query -D $primary_data_dir +} + +function query_multi_standby() +{ + node_num=$1 + if [[ $node_num -lt 2 ]] ; then + echo "node_num must great than 1" + return 0 + fi + echo query multi standby + for((i=2; i<$node_num+1; i++));do + node_dir=$data_dir/datanode$i + $bin_dir/gs_ctl query -D $node_dir + done +} + +function query_casecade_standby() +{ + echo query casecade standby + $bin_dir/gs_ctl query -D $casecade_standby_data_dir +} + +function query_standby() +{ +echo query standby +$bin_dir/gs_ctl query -D $standby_data_dir +} + +function query_standby2() +{ + echo query standby2 + $bin_dir/gs_ctl query -D $standby2_data_dir + +} + +function query_standby3() +{ + echo query standby2 + $bin_dir/gs_ctl query -D $standby2_data_dir + +} + +function check_primary_startup() +{ +echo checking primary startup +for i in $(seq 1 30) +do + if [ $(query_primary | grep -E $startup_keyword | wc -l) -eq 0 -o $(query_primary | grep $startup_keyword1 | wc -l) -gt 0 ]; then + sleep 2 + else + return 0 + fi +done +echo "$failed_keyword when check_primary_startup" +return 1 +} + +function check_normal_startup() +{ +echo checking normal startup +for i in $(seq 1 30) +do + if [ $(query_normal | grep -E $startup_keyword | wc -l) -eq 0 -o $(query_normal | grep $startup_keyword1 | wc -l) -gt 0 ]; then + sleep 2 + else + return 0 + fi +done +echo "$failed_keyword when check_normal_startup" +return 1 +} + +function check_standby_startup() +{ +echo checking standby startup +for i in $(seq 1 30) +do + if [ $(query_standby | grep -E $startup_keyword | wc -l) -eq 0 -o $(query_standby | grep $startup_keyword1 | wc -l) -gt 0 ]; then + sleep 2 + else + return 0 + fi +done +echo "$failed_keyword when check_standby_startup" +return 1 +} + +function check_casecade_standby_startup() +{ +echo checking casecade standby startup +for i in $(seq 1 30) +do + if [ $(query_casecade_standby | grep -E $startup_keyword | wc -l) -eq 0 -o $(query_casecade_standby | grep $startup_keyword1 | wc -l) -gt 0 ]; then + sleep 2 + else + return 0 + fi +done +echo "$failed_keyword when check_casecade_standby_startup" +return 1 +} + +function check_multi_standby_startup() +{ + node_num=$1 + if [[ $node_num -lt 2 ]] ; then + echo "node_num must great than 1" + return 0 + fi + echo checking standby startup + for i in $(seq 1 30) + do + if [ $(query_multi_standby $node_num | grep -E $startup_keyword | wc -l) -eq 0 -o $(query_multi_standby $node_num | grep $startup_keyword1 | wc -l) -gt 0 ]; then + sleep 2 + else + return 0 + fi + done + echo "$failed_keyword when check_multi_standby_startup" + return 1 +} + +function check_primary_setup() +{ +echo checking primary setup +for i in $(seq 1 30) +do + if [ $(query_primary | grep -E $setup_keyword | wc -l) -eq 1 ]; then + return 0 + else + sleep 2 + fi +done + +echo "$failed_keyword when check_primary_setup" +return 1 +} + +function check_replication_setup_for_primary() +{ +echo checking replication setup +for i in $(seq 1 30) +do + if [ $(query_primary | grep -E $setup_keyword | wc -l) -ge 1 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_replication_setup_for_primary" +return 1 +} + +function check_primary_setup_for_multi_standby() +{ +echo checking primary setup for multi-standby mode +for i in $(seq 1 30) +do + if [ $(query_primary | grep -E $setup_keyword | wc -l) -ge 2 ]; then + return 0 + else + sleep 2 + fi +done + +echo "$failed_keyword when check_primary_setup_for_multi_standby" +return 1 +} + +function check_standby_replication_setup() +{ +echo checking replication setup +for i in $(seq 1 30) +do + if [ $(query_standby | grep -E $setup_keyword | wc -l) -eq 1 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_standby_replication_setup" +return 1 +} + +function check_standby_as_primary_setup() +{ +echo checking replication setup +for i in $(seq 1 30) +do + if [ $(query_standby | grep -E $setup_keyword | wc -l) -eq 2 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_standby_as_primary_setup" +return 1 +} + + +function check_casecade_setup() +{ +echo checking casecade standby setup +for i in $(seq 1 30) +do + if [ $(query_casecade_standby | grep -E $setup_keyword | wc -l) -eq 1 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_casecade_setup" +return 1 +} + +function check_standby_replication_setup_for_casecade() +{ +echo checking replication setup for casecade +for i in $(seq 1 30) +do + if [ $(query_standby | grep -E $setup_keyword | wc -l) -ge 2 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_replication_setup_for_casecade" +return 1 +} + + +function check_standby_setup() +{ +echo checking standby setup +for i in $(seq 1 30) +do + if [ $(query_standby | grep -E $standby_setup_keyword | wc -l) -eq 1 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_standby_setup" +return 1 +} + +function check_standby2_setup() +{ +echo checking standby2 setup +for i in $(seq 1 30) +do + if [ $(query_standby2 | grep -E $standby_setup_keyword | wc -l) -eq 1 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_standby2_setup" +return 1 +} + +function check_walkeepsegment() +{ +echo checking wal keep segment +for i in $(seq 1 30) +do + if [ $(query_standby | grep -E $walkeepsegment_keyword | wc -l) -eq 1 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_walkeepsegment" +return 1 +} + +function wait_catchup_finish() +{ +echo wait catchup finish +while [ $($bin_dir/gsql -d $db -p $dn1_primary_port -c "select * from pg_get_senders_catchup_time;" | grep "Catchup" | wc -l) -gt 0 ] +do + sleep 1 +done +} + +function wait_primarycatchup_finish() +{ +echo wait catchup finish +while [ $($bin_dir/gsql -d $db -p $dn1_standby_port -c "select * from pg_get_senders_catchup_time;" | grep "Catchup" | wc -l) -gt 0 ] +do + sleep 1 +done +} + +function kill_all() +{ + node_num=$1 + if [[ $node_num -lt 1 ]] ; then + echo "node_num must great than 0" + return 0 + fi + echo "kill all" + for((i=1; i<$node_num+1; i++));do + node_dir=$data_dir/datanode$i + ps -ef | grep -w $node_dir | grep -v grep | awk '{print $2}' | xargs kill -9 > /dev/null 2>&1 + sleep 1 + done +} + +function kill_normal() +{ +echo "kill primary" +ps -ef | grep $USER | grep -w $normal_data_dir | grep -v grep | awk '{print $2}' | xargs kill -9 +sleep 1 +} + +function kill_primary() +{ +echo "kill primary" +ps -ef | grep $USER | grep -w $primary_data_dir | grep -v grep | awk '{print $2}' | xargs kill -9 +sleep 1 +} + +function kill_standby() +{ +echo "kill standby" +ps -ef | grep $USER | grep -w $standby_data_dir | grep -v grep | awk '{print $2}' | xargs kill -9 +sleep 1 +} + +function kill_multi_standy() +{ + node_num=$1 + if [[ $node_num -lt 2 ]] ; then + echo "node_num must great than 1" + return 0 + fi + echo kill multi standy + for((i=2; i<$node_num+1; i++));do + standby_dir=$data_dir/datanode$i + echo "kill standby $standby_dir" + ps -ef | grep -w $standby_dir | grep -v grep | awk '{print $2}' | xargs kill -9 + sleep 1 + done +} + +function kill_dummystandby() +{ +echo "kill dummystandby" +ps -ef | grep $USER | grep -w $dummystandby_data_dir | grep -v grep | awk '{print $2}' | xargs kill -9 +sleep 1 +} + +function kill_standby2() +{ + echo "kill standby2" + ps -ef | grep -w $standby2_data_dir | grep -v grep | awk '{print $2}' | xargs kill -9 + sleep 1 +} + +function kill_standby3() +{ + echo "kill standby3" + ps -ef | grep -w $standby3_data_dir | grep -v grep | awk '{print $2}' | xargs kill -9 + sleep 1 +} + +function kill_standby4() +{ + echo "kill standby4" + ps -ef | grep -w $standby4_data_dir | grep -v grep | awk '{print $2}' | xargs kill -9 + sleep 1 +} + +function stop_normal() +{ +echo "stop normal" +$bin_dir/gs_ctl stop -D $normal_data_dir -m fast >> ./results/gs_ctl.log 2>&1 +sleep 1 +} + +function stop_primary() +{ +echo "stop primary" +$bin_dir/gs_ctl stop -D $primary_data_dir -m fast >> ./results/gs_ctl.log 2>&1 +sleep 1 +} + +function stop_multi_standby() +{ + node_num=$1 + if [[ $node_num -lt 2 ]] ; then + echo "node_num must great than 1" + return 0 + fi + + echo "stop all standby" + for((i=2; i<$node_num+1; i++));do + standby_dir=$data_dir/datanode$i + $bin_dir/gs_ctl stop -D $standby_dir -m fast >> ./results/gs_ctl.log 2>&1 + sleep 1 + done + +} + +function stop_standby() +{ +echo "stop standby" +$bin_dir/gs_ctl stop -D $standby_data_dir -m fast >> ./results/gs_ctl.log 2>&1 +sleep 1 +} + + +function stop_standby2() +{ +echo "stop standby" +$bin_dir/gs_ctl stop -D $standby2_data_dir -m fast >> ./results/gs_ctl.log 2>&1 +sleep 1 +} + +function stop_standby3() +{ +echo "stop standby" +$bin_dir/gs_ctl stop -D $standby3_data_dir -m fast >> ./results/gs_ctl.log 2>&1 +sleep 1 +} + +function stop_standby4() +{ +echo "stop standby" +$bin_dir/gs_ctl stop -D $standby4_data_dir -m fast >> ./results/gs_ctl.log 2>&1 +sleep 1 +} + +function start_normal() +{ +echo "start normal" +$bin_dir/gs_guc set -D ${normal_data_dir} -c "most_available_sync = on" >> ./results/gaussdb.log 2>&1 +$bin_dir/gaussdb --single_node -M normal -p $dn1_normal_port -D $normal_data_dir >> ./results/gaussdb.log 2>&1 & +check_normal_startup +} + +function start_primary() +{ +echo "start primary" +$bin_dir/gaussdb --single_node -M primary -p $dn1_primary_port -D $primary_data_dir >> ./results/gaussdb.log 2>&1 & +check_primary_startup +} + +function start_multi_standby() +{ + node_num=$1 + if [[ $node_num -lt 2 ]] ; then + echo "node_num must great than 1" + return 0 + fi + + echo "start all standby" + for((i=2; i<$node_num+1; i++));do + standby_dir=$data_dir/datanode$i + echo $standby_dir + port=$(($g_base_port + i*3)) + echo $port + $bin_dir/gaussdb --single_node -M standby -p $port -D $standby_dir >> ./results/gaussdb.log 2>&1 & + done + check_multi_standby_startup $node_num +} + +function start_casecade_standby() +{ +echo "start standby" +$bin_dir/gaussdb --single_node -M cascade_standby -p $casecade_standby_port -D $casecade_standby_data_dir >> ./results/gaussdb.log 2>&1 & +check_casecade_standby_startup +} + +function start_standby() +{ +echo "start standby" +$bin_dir/gaussdb --single_node -M standby -p $dn1_standby_port -D $standby_data_dir >> ./results/gaussdb.log 2>&1 & +check_standby_startup +} + + +function start_standby2() +{ + echo "start standby2" + $bin_dir/gaussdb --single_node -M standby -p $standby2_port -D $standby2_data_dir >> ./results/gaussdb.log 2>&1 & + sleep 2 +} + +function start_standby3() +{ + echo "start standby3" + $bin_dir/gaussdb --single_node -M standby -p $standby3_port -D $standby3_data_dir >> ./results/gaussdb.log 2>&1 & + sleep 2 +} + +function start_standby4() +{ + echo "start standby4" + $bin_dir/gaussdb --single_node -M standby -p $standby4_port -D $standby4_data_dir >> ./results/gaussdb.log 2>&1 & + sleep 2 +} + +function start_primary_as_standby() +{ +echo "start primary as standby" +$bin_dir/gaussdb --single_node -M standby -p $dn1_primary_port -D $primary_data_dir >> ./results/gaussdb.log 2>&1 & +check_primary_startup +} + +function start_standby_as_primary() +{ +echo "start standby as primary" +$bin_dir/gaussdb --single_node -M primary -p $dn1_standby_port -D $standby_data_dir >> ./results/gaussdb.log 2>&1 & +check_standby_startup +} + +function start_primary_as_pending() +{ +echo "start primary as pending" +$bin_dir/gaussdb --single_node -M pending -p $dn1_primary_port -D $primary_data_dir >> ./results/gaussdb.log 2>&1 & +check_primary_startup +} + +function start_standby_as_pending() +{ +echo "start standby as pending" +$bin_dir/gaussdb --single_node -M pending -p $dn1_standby_port -D $standby_data_dir >> ./results/gaussdb.log 2>&1 & +check_standby_startup +} + +function notify_primary_as_primary() +{ +echo "notify primary as primary" +$bin_dir/gs_ctl notify -M primary -D $primary_data_dir +} + +function notify_primary_as_standby() +{ +echo "notify primary as standby" +$bin_dir/gs_ctl notify -M standby -D $primary_data_dir +} + +function notify_standby_as_primary() +{ +echo "notify standby as primary" +$bin_dir/gs_ctl notify -M primary -D $standby_data_dir +} + +function notify_standby_as_standby() +{ +echo "notify standby as standby" +$bin_dir/gs_ctl notify -M standby -D $standby_data_dir +} + +# for primary-standby +function check_instance_primary_standby(){ +echo query datanode1 +check_primary_setup + +echo query datanode1_standby +check_standby_replication_setup +} + +# for multi-standby +function check_instance_multi_standby(){ +echo query datanode1 +check_primary_setup_for_multi_standby + +echo query datanode_standby +check_standby_setup +check_standby2_setup + +} + +# for casecade-standby +function check_instance_casecade_standby(){ +echo query datanode1 +check_replication_setup_for_primary + +echo query datanode1_standby +check_casecade_setup +check_standby_replication_setup_for_casecade +} + +function gsql_test(){ +echo gsql ipv6 connect +if [ $($bin_dir/gsql -d $db -p $dn1_primary_port -h ${g_local_ip} -U ipv6_tmp -W ${passwd} -c "create table ipv6_test(i int);" | grep "CREATE TABLE" | wc -l) -gt 0 ] +then + return 0 +else + echo "$failed_keyword when gsql_test" +fi +} + +function create_test_user(){ +echo create test user +if [ $($bin_dir/gsql -d $db -p $dn1_primary_port -c "create user ipv6_tmp with login sysadmin password '"$passwd"';" | grep "CREATE ROLE" | wc -l) -gt 0 ] +then + return 0 +else + echo "$failed_keyword when create_test_user" +fi +} + +function gsql_standby_test(){ +echo gsql ipv6 standby connect +if [ $($bin_dir/gsql -d $db -p $dn1_standby_port -h ${g_local_ip} -U ipv6_tmp -W ${passwd} -c "\dt;" | grep "ipv6_test" | wc -l) -gt 0 ] +then + return 0 +else + echo "$failed_keyword when gsql_standby_test" +fi +} + +function jdbc_test(){ +echo jdbc ipv6 connect +javac -cp $CLASSPATH:$jar_path Ipv6Test.java +if [ $(java -cp $CLASSPATH:$jar_path Ipv6Test $dn1_primary_port $g_local_ip | grep "Connection succeed!" | wc -l) -gt 0 ] +then + return 0 +else + echo "$failed_keyword when jdbc_test" +fi +} + +function set_listen_address(){ +echo set node listen address +node_num=$1 +if [[ $node_num -le 0 ]] ; then + echo "node_num must great than 0" + return 0 +fi + +dir=$data_dir/datanode$node_num +if [ $($bin_dir/gs_guc set -D ${dir} -c "listen_addresses='"$g_local_ip"'" | grep "Success to perform gs_guc!" | wc -l) -gt 0 ] +then + return 0 +else + echo "$failed_keyword when set_listen_address" +fi +} + +function set_node_conf(){ +echo set node listen address +node_num=$1 +if [[ $node_num -le 0 ]] ; then + echo "node_num must great than 0" + return 0 +fi + +dir=$data_dir/datanode$node_num +if [ $($bin_dir/gs_guc set -D ${dir} -c "listen_addresses='"$g_local_ip"'" | grep "Success to perform gs_guc!" | wc -l) -lt 1 ] +then + echo "$failed_keyword when set_listen_addresses" +fi + +if [ $($bin_dir/gs_guc set -D ${dir} -c "enable_thread_pool=on" | grep "Success to perform gs_guc!" | wc -l) -lt 1 ] +then + echo "$failed_keyword when set_enable_thread_pool" +fi +} + +function setup_hba() +{ + node_num=$1 + if [[ $node_num -le 0 ]] ; then + echo "node_num must great than 0" + return 0 + fi + + if [[ "$g_local_ip" = "127.0.0.1" ]]; then + return 0 + elif [[ "$g_local_ip" = "::1" ]]; then + return 0 + fi + + result=$(echo $g_local_ip | grep "::") + if [[ "$result" != "" ]] + then + mask="128" + else + mask="32" + fi + + for((i=1; i<$node_num+1; i++)) + do + dir=$data_dir/datanode$i + hba_line="host all all "${g_local_ip}/${mask}" trust" + $bin_dir/gs_guc set -Z datanode -D ${dir} -h "${hba_line}" >> ./results/gaussdb.log 2>&1 + # set up hba for test user + hba_line="host all ipv6_tmp "${g_local_ip}/${mask}" sha256" + $bin_dir/gs_guc set -Z datanode -D ${dir} -h "${hba_line}" >> ./results/gaussdb.log 2>&1 + done +} diff --git a/src/test/mutil_ip/Makefile b/src/test/mutil_ip/Makefile new file mode 100644 index 000000000..a79849bcb --- /dev/null +++ b/src/test/mutil_ip/Makefile @@ -0,0 +1,30 @@ +# +# Makefile for mutil ip test +# + +subdir = src/test/mutil_ip +top_builddir = ../../.. +include $(top_builddir)/src/Makefile.global + +ifeq ($(PORTNAME), win32) +LDLIBS += -lws2_32 +endif + +override CPPFLAGS := -I$(libpq_srcdir) $(CPPFLAGS) +override LDLIBS := $(libpq_pgport) $(LDLIBS) + +ifneq "$(MAKECMDGOALS)" "clean" + ifneq "$(MAKECMDGOALS)" "distclean" + ifneq "$(shell which g++ |grep hutaf_llt |wc -l)" "1" + -include $(DEPEND) + endif + endif +endif +PROGS = testlibpq + +check_mutilip: $(PROGS) + export prefix=$(prefix) && sh check_mutilip.sh + +clean: + rm -f $(PROGS) *.depend + rm -f ./results/* \ No newline at end of file diff --git a/src/test/mutil_ip/check_mutilip.sh b/src/test/mutil_ip/check_mutilip.sh new file mode 100644 index 000000000..a88446559 --- /dev/null +++ b/src/test/mutil_ip/check_mutilip.sh @@ -0,0 +1,78 @@ +#!/bin/sh +# run all the test case of mutilip + +source ./mutilip_env.sh +rm -rf results +mkdir -p results +#set -e + +function create_instance() +{ + testcase_name="create_instance" + rm -rf ./results/result_${testcase_name}.log + node_num_all=2 + + #stop all exists database + kill_all 2 >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "init the database...\n" + python create_server.py -d $node_num_all >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "setup hba for ip...\n" + setup_hba $node_num_all + + printf "start the primary database...\n" + start_primary >> ./results/result_${testcase_name}.log + + printf "build the standby...\n" + $bin_dir/gs_ctl build -Z single_node -D $data_dir/datanode2 -b full >> ./results/tmp_${testcase_name}.log 2>&1 + check_standby_startup >> ./results/result_${testcase_name}.log >> ./results/tmp_${testcase_name}.log 2>&1 + + printf "check result...\n" + check_instance_primary_standby >> ./results/result_${testcase_name}.log + + create_test_user >> ./results/result_${testcase_name}.log +} + +function mutilip_test() +{ + while read line + do + line_content=`eval echo "$line" | sed -e 's/\r//g' | sed -e 's/\!/\&/g'` + echo "trying $line_content" + ./testlibpq "$line_content" + echo "" + done < mutilip.in >results/mutilip.out 2>&1 + + cp results/mutilip.out results/mutilip.out.org + sed -i 's/".*"//g' results/mutilip.out + sed -i "s/$host_name/host_name/g" results/mutilip.out + if [[ "$g_local_ip" = "127.0.0.1" ]]; then + expect_file_name=expected_127.out + else + expect_file_name=expected_ipv4.out + sed -i "s/$g_local_ip/local_ip/g" results/mutilip.out + fi + + if diff -c $expect_file_name results/mutilip.out >results/mutilip.diff; then + echo "========================================" + echo "All tests passed" + exit 0 + else + echo "========================================" + echo "FAILED: the test result differs from the expected output" + echo + echo "Review the difference in results/mutilip.diff" + echo "========================================" + exit 1 + fi +} + +create_instance +mutilip_test +kill_all 2 + + + + + diff --git a/src/test/mutil_ip/create_server.py b/src/test/mutil_ip/create_server.py new file mode 100644 index 000000000..74ea7505f --- /dev/null +++ b/src/test/mutil_ip/create_server.py @@ -0,0 +1,209 @@ +#!/usr/bin/python +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# --------------------------------------------------------------------------------------- +# +# IDENTIFICATION +# src/test/mutil_ip/create_server.py +# +# --------------------------------------------------------------------------------------- + +import getopt, sys, os +import shutil +import time +import string + +g_base_port = int(os.environ.get("g_base_port")) +g_pooler_base_port = int(os.environ.get("g_pooler_base_port")) +g_base_standby_port = int(os.environ.get("g_base_standby_port")) +install_path = os.environ.get("install_path") +g_data_path = os.environ.get("g_data_path") +g_local_ip = os.environ.get("g_local_ip") +g_valgrind = "" +g_passwd = "Gauss@123" +g_trace_compress = False + + +class Pterodb(): + def __init__(self, data_node_num, data_dir): + self.data_node_num = data_node_num + self.data_dir = data_dir + self.dname_prefix = "datanode" + print self.data_dir + self.ha_port_arr = [0 for i in range(data_node_num+1)] + self.service_port_arr = [0 for i in range(data_node_num+1)] + self.heartbeat_port_arr = [0 for i in range(data_node_num+1)] + + def init_env(self): + if(os.path.exists(self.data_dir) == False): + os.mkdir(self.data_dir) + else: + shutil.rmtree(self.data_dir) + os.mkdir(self.data_dir) + print "rm dir ok" + + #generate port array + self.__generate_port() + + for i in range(1,self.data_node_num + 1): + datanode_cmd_init = install_path + "/bin/gs_initdb -D " + self.data_dir + "/" + self.dname_prefix + str(i) + " --nodename=" + self.dname_prefix + str(i) + " -w " + g_passwd + print datanode_cmd_init + os.system(datanode_cmd_init) + + conf_file = self.data_dir + "/" + self.dname_prefix + str(i) + "/postgresql.conf" + self.__modify_conf_port(conf_file,i) + self.__turn_on_pg_log(conf_file) + if(self.data_node_num > 1): + self.__modify_conf_standby(conf_file,i) + + self.__modify_conf_application_name(conf_file, "dn_p" + str(i)) + + def __generate_port(self): + port = g_base_standby_port + for i in range(0,self.data_node_num): + self.ha_port_arr[i] = port + 1; + self.service_port_arr[i] = port + 2; + self.heartbeat_port_arr[i] = port + 3; + port = port + 3; + + def __modify_conf_standby(self, conf_file, n): + j = 1 + + file_handler = open(conf_file,"a") + for i in range(1,self.data_node_num + 1): + if(i != n): + #repl + string = "replconninfo%d = 'localhost=%s localport=%d localheartbeatport=%d localservice=%d remotehost=%s remoteport=%d remoteheartbeatport=%d remoteservice=%d'\n" % \ + (j, g_local_ip, self.ha_port_arr[n-1], self.heartbeat_port_arr[n-1], self.service_port_arr[n-1], g_local_ip, self.ha_port_arr[i-1], self.heartbeat_port_arr[i-1], self.service_port_arr[i-1]) + print string + file_handler.write(string) + j = j + 1 + + file_handler.close() + + def __modify_conf_application_name(self, conf_file, name): + file_handler = open(conf_file,"a") + string = "application_name = '" + name + "'" + "\n" + file_handler.write(string) + file_handler.close() + + def __modify_conf_port(self, conf_file, n): + file_handler = open(conf_file,"a") + + string = "listen_addresses = '*'"+ "\n" + file_handler.write(string) + + port = g_base_port + 3 * n + string = "port = " + str(port) + "\n" + file_handler.write(string) + + file_handler.close() + + def __turn_on_pg_log(self, conf_file): + file_handler = open(conf_file,"a") + pglog_conf = "logging_collector = on \n" + pglog_conf = pglog_conf + "log_directory = 'pg_log' \n" + pglog_conf = pglog_conf + "log_line_prefix = '%m %c %d %p %a %x %e ' \n" + pglog_conf = pglog_conf + "enable_data_replicate = off \n" + pglog_conf = pglog_conf + "replication_type = 1 \n" + file_handler.write(pglog_conf) + file_handler.close() + + def __switch_trace_cmpr(self): + for i in range(1, self.data_node_num+1): + conf_file = self.data_dir + "/" + self.dname_prefix + str(i) + "/postgresql.conf" + file_handler = open(conf_file,"a") + if g_trace_compress: + pglog_conf = "log_line_prefix = '' \n" + pglog_conf = pglog_conf + "log_min_messages = info \n" + file_handler.write(pglog_conf) + else: + pglog_conf = "log_line_prefix = '%m %c %d %p %a %x %e ' \n" + pglog_conf = pglog_conf + "log_min_messages = warning \n" + file_handler.write(pglog_conf) + file_handler.close() + + def __create_default_db(self): + # connect to primary DN to create db + cmd = install_path + "/bin/gsql -p " + str(g_base_port + 3) + " postgres -c 'create database test'" + os.system(cmd) + + def __rm_pid_file(self): + cmd = "rm -rf " + # dn + for i in range(1,self.data_node_num+2): + rm_cmd = cmd + self.data_dir + "/" + self.dname_prefix + str(i) + "/postmaster.pid" + print rm_cmd + os.system(rm_cmd) + + def run(self): + self.init_env() + print "create ok" + +def usage(): + print "------------------------------------------------------" + print "python create_server.py\n" + print " -t trace compression log" + print " -g means memcheck" + print " -D data directory" + print "------------------------------------------------------" + +def main(): + try: + opts, args = getopt.getopt(sys.argv[1:], "hrD:c:d:t:sovg", ["help", "data_dir=", "regress="]) + except getopt.GetoptError, err: + # print help information and exit: + print str(err) # will print something like "option -a not recognized" + # usage() + sys.exit(2) + + datanode_num = 0 + global g_valgrind; + global g_trace_compress; + + data_dir = g_data_path + #1 start + #2 stop + + for o, a in opts: + if o == "-v": + verbose = True + elif o in ("-h", "--help"): + usage() + sys.exit() + elif o in ("-D", "data_dir"): + data_dir = a + elif o in ("-d", "--datanode"): + datanode_num = int(a) + elif o in ("-g", "--memcheck"): + g_valgrind = "valgrind --tool=memcheck --leak-check=full --log-file=memcheck.log " + elif o in ("-t", "--trace"): + if 'on' == a: + g_trace_compress = True + else: + g_trace_compress = False + run_type = 3 + print g_trace_compress + else: + assert False, "unhandled option" + + if(datanode_num == 0): + usage() + sys.exit() + + ptdb = Pterodb(datanode_num, data_dir) + ptdb.init_env() + + +if __name__ == "__main__": + main() diff --git a/src/test/mutil_ip/expected_127.out b/src/test/mutil_ip/expected_127.out new file mode 100644 index 000000000..763a8edf7 --- /dev/null +++ b/src/test/mutil_ip/expected_127.out @@ -0,0 +1,283 @@ +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,127.0.0.1 +pg_is_in_recovery + +t + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,127.0.0.1 +pg_is_in_recovery + +f + +trying dbname=postgres port=7783,7783 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,127.0.0.1 +pg_is_in_recovery + +t + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,127.0.0.1 target_session_attrs=read-only +pg_is_in_recovery + +t + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,127.0.0.1 target_session_attrs=primary +pg_is_in_recovery + +f + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,127.0.0.1 target_session_attrs= prefer-standby +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,127.0.0.1 target_session_attrs= prefer-standby +pg_is_in_recovery + +t + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,127.0.0.1 +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=host_name,host_name target_session_attrs=read-only +Connection to database failed: FATAL: no pg_hba.conf entry for host , SSL off +FATAL: no pg_hba.conf entry for host , SSL off + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,127.0.0.1 target_session_attrs=standby +pg_is_in_recovery + +t + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,127.0.0.1 target_session_attrs=read-write +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,127.0.0.1 target_session_attrs= prefer-standby +pg_is_in_recovery + +t + +trying dbname=postgres port=7780,7780 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,127.0.0.1 target_session_attrs= prefer-standby +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7780 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,127.0.0.1 target_session_attrs= prefer-standby +pg_is_in_recovery + +f + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=/tmp,127.0.0.1 +pg_is_in_recovery + +t + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,/tmp +pg_is_in_recovery + +f + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=/tmp,/tmp target_session_attrs=primary +pg_is_in_recovery + +f + +trying dbname=postgres port=7783 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,127.0.0.1 target_session_attrs=read-only +pg_is_in_recovery + +t + +trying dbname=postgres port=7780 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,127.0.0.1 target_session_attrs= prefer-standby +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=::1,127.0.0.1 +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,::1 +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=, +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host= , +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=, +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=,127.0.0.1 +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=127.0.0.1, +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=127.0.0.1, target_session_attrs=standby +pg_is_in_recovery + +t + +trying dbname=postgres port=7780 user=ipv6_tmp password=Gauss@123 +pg_is_in_recovery + +f + +trying dbname=postgres port=7783,7783 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,10.10.10.10 +pg_is_in_recovery + +t + +trying postgres://ipv6_tmp@127.0.0.1 :7780,127.0.0.1:7783/postgres?password=Gauss@123&target_session_attrs=standby +pg_is_in_recovery + +t + +trying postgres://ipv6_tmp@127.0.0.1:7780,127.0.0.1 :7783/postgres?password=Gauss@123&target_session_attrs=primary +pg_is_in_recovery + +f + +trying postgres://ipv6_tmp@127.0.0.1:7780,127.0.0.1:7783/postgres?password=Gauss@123 +pg_is_in_recovery + +f + +trying postgres://ipv6_tmp@127.0.0.1:7780,127.0.0.1:7783/postgres?password=Gauss@123&target_session_attrs=standby +pg_is_in_recovery + +t + +trying postgres://ipv6_tmp@[::1]:7780,127.0.0.1:7783/postgres?password=Gauss@123 +pg_is_in_recovery + +f + +trying postgres://ipv6_tmp@[::1]:7780,127.0.0.1:7783/postgres?password=Gauss@123&target_session_attrs=standby +pg_is_in_recovery + +t + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,10.10.10.20 +Connection to database failed: could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7780? +could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7783? + +trying dbname=postgres port=7780,7783 user=ipv6_tmp2 password=Gauss@123 host=127.0.0.1,127.0.0.1 +Connection to database failed: FATAL: Invalid username/password,login denied. +FATAL: Invalid username/password,login denied. + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@12 host=127.0.0.1,127.0.0.1 +Connection to database failed: FATAL: Invalid username/password,login denied. +FATAL: Invalid username/password,login denied. + +trying dbname=postgres port=7780,7783,7784 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,127.0.0.1 +Connection to database failed: could not match 3 port numbers to 2 hosts + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=host_name,host_name hostaddr=127.0.0.1 +Connection to database failed: could not match 2 host names to 1 hostaddr values + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=host_name hostaddr=127.0.0.1,127.0.0.1 +Connection to database failed: could not match 1 host names to 2 hostaddr values + +trying dbname=postgres port=7780ab,7783c user=ipv6_tmp password=Gauss@123 hostaddr=127.0.0.1,127.0.0.1 +Connection to database failed: invalid integer value +invalid integer value + +trying dbname=postgres port=7780ab,7780 user=ipv6_tmp password=Gauss@123 hostaddr=127.0.0.1,127.0.0.1 target_session_attrs=read-only +Connection to database failed: invalid integer value +session is not read-only + +trying dbname=postgres port=7780ab,7780 user=ipv6_tmp password=Gauss@123 hostaddr=127.0.0.1,127.0.0.1 target_session_attrs=standby +Connection to database failed: invalid integer value +server is not in hot standby mode + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,10.10.10.20 target_session_attrs= prefer-standby +Connection to database failed: could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7780? +could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7783? +could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7780? +could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7783? + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 hostaddr=host_name,host_name +Connection to database failed: could not translate host name to address: Name or service not known +could not translate host name to address: Name or service not known + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 hostaddr=10.10.10.10,10.10.10.20 +Connection to database failed: could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7783? +could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7780? + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,10.10.10.20 +Connection to database failed: could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7783? +could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7780? + +trying dbname=postgresabc port=7783,7780 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,127.0.0.1 +Connection to database failed: FATAL: database does not exist + +trying dbname=postgres port=7783 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,127.0.0.1 target_session_attrs=read-write +Connection to database failed: session is read-only +session is read-only + +trying dbname=postgres port=0 user=ipv6_tmp password=Gauss@123 host=127.0.0.1,127.0.0.1 target_session_attrs=read-write +Connection to database failed: invalid port number: +invalid port number: + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 +Connection to database failed: could not match 2 port numbers to 1 hosts + +trying dbname=postgres port=7783 user=ipv6_tmp password=Gauss@123 target_session_attrs=read-write +Connection to database failed: session is read-only + +trying dbname=postgres port=7783 user=ipv6_tmp +Connection to database failed: FATAL: Invalid username/password,login denied. + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 +Connection to database failed: could not match 2 port numbers to 1 hosts + +trying dbname=postgres user=ipv6_tmp password=Gauss@123 +Connection to database failed: connect to server failed: No such file or directory + +trying postgres://ipv6_tmp@127.0.0.1 :7780,127.0.0.1 :7783/postgres?password=Gauss@123 +Connection to database failed: could not translate host name to address: Name or service not known +could not translate host name to address: Name or service not known + +trying postgres://ipv6_tmp@127.0.0.1 :7780,127.0.0.1:7783/postgres?password=Gauss@123&target_session_attrs=primary +Connection to database failed: could not translate host name to address: Name or service not known +server is in hot standby mode + +trying postgres://ipv6_tmp@127.0.0.1:7780,127.0.0.1 :7783/postgres?password=Gauss@123&target_session_attrs=standby +Connection to database failed: server is not in hot standby mode +could not translate host name to address: Name or service not known + diff --git a/src/test/mutil_ip/expected_ipv4.out b/src/test/mutil_ip/expected_ipv4.out new file mode 100644 index 000000000..e49bafa84 --- /dev/null +++ b/src/test/mutil_ip/expected_ipv4.out @@ -0,0 +1,284 @@ +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=local_ip,local_ip +pg_is_in_recovery + +t + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,local_ip +pg_is_in_recovery + +f + +trying dbname=postgres port=7783,7783 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,local_ip +pg_is_in_recovery + +t + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=local_ip,local_ip target_session_attrs=read-only +pg_is_in_recovery + +t + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=local_ip,local_ip target_session_attrs=primary +pg_is_in_recovery + +f + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,local_ip target_session_attrs= prefer-standby +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,local_ip target_session_attrs= prefer-standby +pg_is_in_recovery + +t + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=local_ip,local_ip +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=host_name,host_name target_session_attrs=read-only +pg_is_in_recovery + +t + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=local_ip,local_ip target_session_attrs=standby +pg_is_in_recovery + +t + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=local_ip,local_ip target_session_attrs=read-write +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=local_ip,local_ip target_session_attrs= prefer-standby +pg_is_in_recovery + +t + +trying dbname=postgres port=7780,7780 user=ipv6_tmp password=Gauss@123 host=local_ip,local_ip target_session_attrs= prefer-standby +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7780 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,local_ip target_session_attrs= prefer-standby +pg_is_in_recovery + +f + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=/tmp,local_ip +pg_is_in_recovery + +t + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,/tmp +pg_is_in_recovery + +f + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=/tmp,/tmp target_session_attrs=primary +pg_is_in_recovery + +f + +trying dbname=postgres port=7783 user=ipv6_tmp password=Gauss@123 host=local_ip,local_ip target_session_attrs=read-only +pg_is_in_recovery + +t + +trying dbname=postgres port=7780 user=ipv6_tmp password=Gauss@123 host=local_ip,local_ip target_session_attrs= prefer-standby +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=::1,local_ip +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=local_ip,::1 +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=, +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host= , +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=, +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=,local_ip +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=local_ip, +pg_is_in_recovery + +f + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=local_ip, target_session_attrs=standby +pg_is_in_recovery + +t + +trying dbname=postgres port=7780 user=ipv6_tmp password=Gauss@123 +pg_is_in_recovery + +f + +trying dbname=postgres port=7783,7783 user=ipv6_tmp password=Gauss@123 host=local_ip,10.10.10.10 +pg_is_in_recovery + +t + +trying postgres://ipv6_tmp@local_ip :7780,local_ip:7783/postgres?password=Gauss@123&target_session_attrs=standby +pg_is_in_recovery + +t + +trying postgres://ipv6_tmp@local_ip:7780,local_ip :7783/postgres?password=Gauss@123&target_session_attrs=primary +pg_is_in_recovery + +f + +trying postgres://ipv6_tmp@local_ip:7780,local_ip:7783/postgres?password=Gauss@123 +pg_is_in_recovery + +f + +trying postgres://ipv6_tmp@local_ip:7780,local_ip:7783/postgres?password=Gauss@123&target_session_attrs=standby +pg_is_in_recovery + +t + +trying postgres://ipv6_tmp@[::1]:7780,local_ip:7783/postgres?password=Gauss@123 +pg_is_in_recovery + +f + +trying postgres://ipv6_tmp@[::1]:7780,local_ip:7783/postgres?password=Gauss@123&target_session_attrs=standby +pg_is_in_recovery + +t + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,10.10.10.20 +Connection to database failed: could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7780? +could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7783? + +trying dbname=postgres port=7780,7783 user=ipv6_tmp2 password=Gauss@123 host=local_ip,local_ip +Connection to database failed: FATAL: Forbid remote connection with trust method! +FATAL: Forbid remote connection with trust method! + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@12 host=local_ip,local_ip +Connection to database failed: FATAL: Invalid username/password,login denied. +FATAL: Invalid username/password,login denied. + +trying dbname=postgres port=7780,7783,7784 user=ipv6_tmp password=Gauss@123 host=local_ip,local_ip +Connection to database failed: could not match 3 port numbers to 2 hosts + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=host_name,host_name hostaddr=local_ip +Connection to database failed: could not match 2 host names to 1 hostaddr values + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=host_name hostaddr=local_ip,local_ip +Connection to database failed: could not match 1 host names to 2 hostaddr values + +trying dbname=postgres port=7780ab,7783c user=ipv6_tmp password=Gauss@123 hostaddr=local_ip,local_ip +Connection to database failed: invalid integer value +invalid integer value + +trying dbname=postgres port=7780ab,7780 user=ipv6_tmp password=Gauss@123 hostaddr=local_ip,local_ip target_session_attrs=read-only +Connection to database failed: invalid integer value +session is not read-only + +trying dbname=postgres port=7780ab,7780 user=ipv6_tmp password=Gauss@123 hostaddr=local_ip,local_ip target_session_attrs=standby +Connection to database failed: invalid integer value +server is not in hot standby mode + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,10.10.10.20 target_session_attrs= prefer-standby +Connection to database failed: could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7780? +could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7783? +could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7780? +could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7783? + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 hostaddr=host_name,host_name +Connection to database failed: could not translate host name to address: Name or service not known +could not translate host name to address: Name or service not known + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 hostaddr=10.10.10.10,10.10.10.20 +Connection to database failed: could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7783? +could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7780? + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 host=10.10.10.10,10.10.10.20 +Connection to database failed: could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7783? +could not connect to server: Operation now in progress + Is the server running on host and accepting + TCP/IP connections on port 7780? + +trying dbname=postgresabc port=7783,7780 user=ipv6_tmp password=Gauss@123 host=local_ip,local_ip +Connection to database failed: FATAL: database does not exist + +trying dbname=postgres port=7783 user=ipv6_tmp password=Gauss@123 host=local_ip,local_ip target_session_attrs=read-write +Connection to database failed: session is read-only +session is read-only + +trying dbname=postgres port=0 user=ipv6_tmp password=Gauss@123 host=local_ip,local_ip target_session_attrs=read-write +Connection to database failed: invalid port number: +invalid port number: + +trying dbname=postgres port=7783,7780 user=ipv6_tmp password=Gauss@123 +Connection to database failed: could not match 2 port numbers to 1 hosts + +trying dbname=postgres port=7783 user=ipv6_tmp password=Gauss@123 target_session_attrs=read-write +Connection to database failed: session is read-only + +trying dbname=postgres port=7783 user=ipv6_tmp +Connection to database failed: FATAL: Invalid username/password,login denied. + +trying dbname=postgres port=7780,7783 user=ipv6_tmp password=Gauss@123 +Connection to database failed: could not match 2 port numbers to 1 hosts + +trying dbname=postgres user=ipv6_tmp password=Gauss@123 +Connection to database failed: connect to server failed: No such file or directory + +trying postgres://ipv6_tmp@local_ip :7780,local_ip :7783/postgres?password=Gauss@123 +Connection to database failed: could not translate host name to address: Name or service not known +could not translate host name to address: Name or service not known + +trying postgres://ipv6_tmp@local_ip :7780,local_ip:7783/postgres?password=Gauss@123&target_session_attrs=primary +Connection to database failed: could not translate host name to address: Name or service not known +server is in hot standby mode + +trying postgres://ipv6_tmp@local_ip:7780,local_ip :7783/postgres?password=Gauss@123&target_session_attrs=standby +Connection to database failed: server is not in hot standby mode +could not translate host name to address: Name or service not known + diff --git a/src/test/mutil_ip/mutilip.in b/src/test/mutil_ip/mutilip.in new file mode 100644 index 000000000..69aad5190 --- /dev/null +++ b/src/test/mutil_ip/mutilip.in @@ -0,0 +1,60 @@ +dbname=postgres port=$dn1_standby_port,$dn1_primary_port user=ipv6_tmp password=$passwd host=$g_local_ip,$g_local_ip +dbname=postgres port=$dn1_standby_port,$dn1_primary_port user=ipv6_tmp password=$passwd host=$error_ip,$g_local_ip +dbname=postgres port=$dn1_standby_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$error_ip,$g_local_ip +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$g_local_ip,$g_local_ip target_session_attrs=read-only +dbname=postgres port=$dn1_standby_port,$dn1_primary_port user=ipv6_tmp password=$passwd host=$g_local_ip,$g_local_ip target_session_attrs=primary +dbname=postgres port=$dn1_standby_port,$dn1_primary_port user=ipv6_tmp password=$passwd host=$error_ip,$g_local_ip target_session_attrs= prefer-standby +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$error_ip,$g_local_ip target_session_attrs= prefer-standby +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$g_local_ip,$g_local_ip +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$host_name,$host_name target_session_attrs=read-only +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$g_local_ip,$g_local_ip target_session_attrs=standby +dbname=postgres port=$dn1_standby_port,$dn1_primary_port user=ipv6_tmp password=$passwd host=$g_local_ip,$g_local_ip target_session_attrs=read-write +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$g_local_ip,$g_local_ip target_session_attrs= prefer-standby +dbname=postgres port=$dn1_primary_port,$dn1_primary_port user=ipv6_tmp password=$passwd host=$g_local_ip,$g_local_ip target_session_attrs= prefer-standby +dbname=postgres port=$dn1_primary_port,$dn1_primary_port user=ipv6_tmp password=$passwd host=$error_ip,$g_local_ip target_session_attrs= prefer-standby +dbname=postgres port=$dn1_standby_port,$dn1_primary_port user=ipv6_tmp password=$passwd host=/tmp,$g_local_ip +dbname=postgres port=$dn1_standby_port,$dn1_primary_port user=ipv6_tmp password=$passwd host=$error_ip,/tmp +dbname=postgres port=$dn1_standby_port,$dn1_primary_port user=ipv6_tmp password=$passwd host=/tmp,/tmp target_session_attrs=primary +dbname=postgres port=$dn1_standby_port user=ipv6_tmp password=$passwd host=$g_local_ip,$g_local_ip target_session_attrs=read-only +dbname=postgres port=$dn1_primary_port user=ipv6_tmp password=$passwd host=$g_local_ip,$g_local_ip target_session_attrs= prefer-standby +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$g_local_ip_v6,$g_local_ip +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$g_local_ip,$g_local_ip_v6 +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=, +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host= , +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=, +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=,$g_local_ip +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$g_local_ip, +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$g_local_ip, target_session_attrs=standby +dbname=postgres port=$dn1_primary_port user=ipv6_tmp password=$passwd +dbname=postgres port=$dn1_standby_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$g_local_ip,$error_ip +postgres://ipv6_tmp@$g_local_ip :$dn1_primary_port,$g_local_ip:$dn1_standby_port/postgres?password=$passwd!target_session_attrs=standby +postgres://ipv6_tmp@$g_local_ip:$dn1_primary_port,$g_local_ip :$dn1_standby_port/postgres?password=$passwd!target_session_attrs=primary +postgres://ipv6_tmp@$g_local_ip:$dn1_primary_port,$g_local_ip:$dn1_standby_port/postgres?password=$passwd +postgres://ipv6_tmp@$g_local_ip:$dn1_primary_port,$g_local_ip:$dn1_standby_port/postgres?password=$passwd!target_session_attrs=standby +postgres://ipv6_tmp@[$g_local_ip_v6]:$dn1_primary_port,$g_local_ip:$dn1_standby_port/postgres?password=$passwd +postgres://ipv6_tmp@[$g_local_ip_v6]:$dn1_primary_port,$g_local_ip:$dn1_standby_port/postgres?password=$passwd!target_session_attrs=standby +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$error_ip,$error_ip2 +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp2 password=$passwd host=$g_local_ip,$g_local_ip +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password='Gauss@12' host=$g_local_ip,$g_local_ip +dbname=postgres port=$dn1_primary_port,$dn1_standby_port,7784 user=ipv6_tmp password=$passwd host=$g_local_ip,$g_local_ip +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$host_name,$host_name hostaddr=$g_local_ip +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$host_name hostaddr=$g_local_ip,$g_local_ip +dbname=postgres port=${error_port},${error_port2} user=ipv6_tmp password=$passwd hostaddr=$g_local_ip,$g_local_ip +dbname=postgres port=${error_port},$dn1_primary_port user=ipv6_tmp password=$passwd hostaddr=$g_local_ip,$g_local_ip target_session_attrs=read-only +dbname=postgres port=${error_port},$dn1_primary_port user=ipv6_tmp password=$passwd hostaddr=$g_local_ip,$g_local_ip target_session_attrs=standby +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd host=$error_ip,$error_ip2 target_session_attrs= prefer-standby +dbname=postgres port=$dn1_standby_port,$dn1_primary_port user=ipv6_tmp password=$passwd hostaddr=$host_name,$host_name +dbname=postgres port=$dn1_standby_port,$dn1_primary_port user=ipv6_tmp password=$passwd hostaddr=${error_ip},${error_ip2} +dbname=postgres port=$dn1_standby_port,$dn1_primary_port user=ipv6_tmp password=$passwd host=${error_ip},${error_ip2} +dbname=postgresabc port=$dn1_standby_port,$dn1_primary_port user=ipv6_tmp password=$passwd host=$g_local_ip,$g_local_ip +dbname=postgres port=$dn1_standby_port user=ipv6_tmp password=$passwd host=$g_local_ip,$g_local_ip target_session_attrs=read-write +dbname=postgres port=0 user=ipv6_tmp password=$passwd host=$g_local_ip,$g_local_ip target_session_attrs=read-write +dbname=postgres port=$dn1_standby_port,$dn1_primary_port user=ipv6_tmp password=$passwd +dbname=postgres port=$dn1_standby_port user=ipv6_tmp password=$passwd target_session_attrs=read-write +dbname=postgres port=$dn1_standby_port user=ipv6_tmp +dbname=postgres port=$dn1_primary_port,$dn1_standby_port user=ipv6_tmp password=$passwd +dbname=postgres user=ipv6_tmp password=$passwd +postgres://ipv6_tmp@$g_local_ip :$dn1_primary_port,$g_local_ip :$dn1_standby_port/postgres?password=$passwd +postgres://ipv6_tmp@$g_local_ip :$dn1_primary_port,$g_local_ip:$dn1_standby_port/postgres?password=$passwd!target_session_attrs=primary +postgres://ipv6_tmp@$g_local_ip:$dn1_primary_port,$g_local_ip :$dn1_standby_port/postgres?password=$passwd!target_session_attrs=standby +postgres://ipv6_tmp@[]:$dn1_primary_port,$g_local_ip:$dn1_standby_port/postgres?password=$passwd!target_session_attrs=standby \ No newline at end of file diff --git a/src/test/mutil_ip/mutilip_env.sh b/src/test/mutil_ip/mutilip_env.sh new file mode 100644 index 000000000..7cb9ae80d --- /dev/null +++ b/src/test/mutil_ip/mutilip_env.sh @@ -0,0 +1,779 @@ +#!/bin/sh +#some enviroment vars +export g_base_port=7777 + +export g_pooler_base_port=`expr $g_base_port \+ 410` +export g_base_standby_port=`expr $g_base_port \+ 400` +export install_path="$prefix" +export GAUSSHOME="$prefix" +export LD_LIBRARY_PATH=$prefix/lib:$LD_LIBRARY_PATH +export PATH="$prefix/bin":$PATH +export g_data_path="$install_path/ipv6_data" + +export g_local_ip="127.0.0.1" +export g_local_ip_v6="::1" + +root_dir=$(dirname $(pwd)) +export jar_path=$root_dir/regress/jdbc_test/gsjdbc400.jar + +host_name=`hostname` +error_ip="10.10.10.10" +error_ip2="10.10.10.20" +error_port="7780ab" +error_port2="7783c" +db=postgres +scripts_dir=`pwd` +username=`whoami` +data_dir=$g_data_path +install_dir=$install_path +bin_dir="$install_dir/bin" +passwd="Gauss@123" + +dn1_primary_port=`expr $g_base_port \+ 3` +dn1_standby_port=`expr $g_base_port \+ 6` +standby2_port=`expr $g_base_port \+ 9` +standby3_port=`expr $g_base_port \+ 12` +standby4_port=`expr $g_base_port \+ 15` +casecade_standby_port=`expr $g_base_port \+ 9` +dn_temp_port=`expr $g_base_port \+ 21` +dn1_normal_port=`expr $g_base_port \+ 3` + +primary_data_dir="$data_dir/datanode1" +standby_data_dir="$data_dir/datanode2" +standby2_data_dir="$data_dir/datanode3" +standby3_data_dir="$data_dir/datanode4" +standby4_data_dir="$data_dir/datanode5" +casecade_standby_data_dir="$data_dir/datanode3" +normal_data_dir="$data_dir/datanode1" + +failed_keyword="testcase_failed" +startup_keyword="Normal|repair" +startup_keyword1="starting" +setup_keyword="peer_role" +primary_setup_keyword="Standby" +standby_setup_keyword="Primary" +casecade_standby_setup_keyword="Cascade Standby" +walkeepsegment_keyword="removed" +create_table_keyword="TABLE" +build_keyword="completed" +building_keyword="Building" +buildfailed_keyword="failed" + +function query_node() +{ +node_num=$1 +echo query node $node_num +if [[ $node_num -lt 1 ]] ; then + echo "node_num must great than 0" + return 0 +fi + +$bin_dir/gs_ctl query -D $data_dir/datanode$node_num +} + +function query_primary() +{ +echo query primary +$bin_dir/gs_ctl query -D $primary_data_dir +} + +function query_normal() +{ +echo query normal +$bin_dir/gs_ctl query -D $primary_data_dir +} + +function query_multi_standby() +{ + node_num=$1 + if [[ $node_num -lt 2 ]] ; then + echo "node_num must great than 1" + return 0 + fi + echo query multi standby + for((i=2; i<$node_num+1; i++));do + node_dir=$data_dir/datanode$i + $bin_dir/gs_ctl query -D $node_dir + done +} + +function query_casecade_standby() +{ + echo query casecade standby + $bin_dir/gs_ctl query -D $casecade_standby_data_dir +} + +function query_standby() +{ +echo query standby +$bin_dir/gs_ctl query -D $standby_data_dir +} + +function query_standby2() +{ + echo query standby2 + $bin_dir/gs_ctl query -D $standby2_data_dir + +} + +function query_standby3() +{ + echo query standby2 + $bin_dir/gs_ctl query -D $standby2_data_dir + +} + +function check_primary_startup() +{ +echo checking primary startup +for i in $(seq 1 30) +do + if [ $(query_primary | grep -E $startup_keyword | wc -l) -eq 0 -o $(query_primary | grep $startup_keyword1 | wc -l) -gt 0 ]; then + sleep 2 + else + return 0 + fi +done +echo "$failed_keyword when check_primary_startup" +return 1 +} + +function check_normal_startup() +{ +echo checking normal startup +for i in $(seq 1 30) +do + if [ $(query_normal | grep -E $startup_keyword | wc -l) -eq 0 -o $(query_normal | grep $startup_keyword1 | wc -l) -gt 0 ]; then + sleep 2 + else + return 0 + fi +done +echo "$failed_keyword when check_normal_startup" +return 1 +} + +function check_standby_startup() +{ +echo checking standby startup +for i in $(seq 1 30) +do + if [ $(query_standby | grep -E $startup_keyword | wc -l) -eq 0 -o $(query_standby | grep $startup_keyword1 | wc -l) -gt 0 ]; then + sleep 2 + else + return 0 + fi +done +echo "$failed_keyword when check_standby_startup" +return 1 +} + +function check_casecade_standby_startup() +{ +echo checking casecade standby startup +for i in $(seq 1 30) +do + if [ $(query_casecade_standby | grep -E $startup_keyword | wc -l) -eq 0 -o $(query_casecade_standby | grep $startup_keyword1 | wc -l) -gt 0 ]; then + sleep 2 + else + return 0 + fi +done +echo "$failed_keyword when check_casecade_standby_startup" +return 1 +} + +function check_multi_standby_startup() +{ + node_num=$1 + if [[ $node_num -lt 2 ]] ; then + echo "node_num must great than 1" + return 0 + fi + echo checking standby startup + for i in $(seq 1 30) + do + if [ $(query_multi_standby $node_num | grep -E $startup_keyword | wc -l) -eq 0 -o $(query_multi_standby $node_num | grep $startup_keyword1 | wc -l) -gt 0 ]; then + sleep 2 + else + return 0 + fi + done + echo "$failed_keyword when check_multi_standby_startup" + return 1 +} + +function check_primary_setup() +{ +echo checking primary setup +for i in $(seq 1 30) +do + if [ $(query_primary | grep -E $setup_keyword | wc -l) -eq 1 ]; then + return 0 + else + sleep 2 + fi +done + +echo "$failed_keyword when check_primary_setup" +return 1 +} + +function check_replication_setup_for_primary() +{ +echo checking replication setup +for i in $(seq 1 30) +do + if [ $(query_primary | grep -E $setup_keyword | wc -l) -ge 1 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_replication_setup_for_primary" +return 1 +} + +function check_primary_setup_for_multi_standby() +{ +echo checking primary setup for multi-standby mode +for i in $(seq 1 30) +do + if [ $(query_primary | grep -E $setup_keyword | wc -l) -ge 2 ]; then + return 0 + else + sleep 2 + fi +done + +echo "$failed_keyword when check_primary_setup_for_multi_standby" +return 1 +} + +function check_standby_replication_setup() +{ +echo checking replication setup +for i in $(seq 1 30) +do + if [ $(query_standby | grep -E $setup_keyword | wc -l) -eq 1 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_standby_replication_setup" +return 1 +} + +function check_standby_as_primary_setup() +{ +echo checking replication setup +for i in $(seq 1 30) +do + if [ $(query_standby | grep -E $setup_keyword | wc -l) -eq 2 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_standby_as_primary_setup" +return 1 +} + + +function check_casecade_setup() +{ +echo checking casecade standby setup +for i in $(seq 1 30) +do + if [ $(query_casecade_standby | grep -E $setup_keyword | wc -l) -eq 1 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_casecade_setup" +return 1 +} + +function check_standby_replication_setup_for_casecade() +{ +echo checking replication setup for casecade +for i in $(seq 1 30) +do + if [ $(query_standby | grep -E $setup_keyword | wc -l) -ge 2 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_replication_setup_for_casecade" +return 1 +} + + +function check_standby_setup() +{ +echo checking standby setup +for i in $(seq 1 30) +do + if [ $(query_standby | grep -E $standby_setup_keyword | wc -l) -eq 1 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_standby_setup" +return 1 +} + +function check_standby2_setup() +{ +echo checking standby2 setup +for i in $(seq 1 30) +do + if [ $(query_standby2 | grep -E $standby_setup_keyword | wc -l) -eq 1 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_standby2_setup" +return 1 +} + +function check_walkeepsegment() +{ +echo checking wal keep segment +for i in $(seq 1 30) +do + if [ $(query_standby | grep -E $walkeepsegment_keyword | wc -l) -eq 1 ]; then + return 0 + else + sleep 2 + fi +done +echo "$failed_keyword when check_walkeepsegment" +return 1 +} + +function wait_catchup_finish() +{ +echo wait catchup finish +while [ $($bin_dir/gsql -d $db -p $dn1_primary_port -c "select * from pg_get_senders_catchup_time;" | grep "Catchup" | wc -l) -gt 0 ] +do + sleep 1 +done +} + +function wait_primarycatchup_finish() +{ +echo wait catchup finish +while [ $($bin_dir/gsql -d $db -p $dn1_standby_port -c "select * from pg_get_senders_catchup_time;" | grep "Catchup" | wc -l) -gt 0 ] +do + sleep 1 +done +} + +function kill_all() +{ + node_num=$1 + if [[ $node_num -lt 1 ]] ; then + echo "node_num must great than 0" + return 0 + fi + echo "kill all" + for((i=1; i<$node_num+1; i++));do + node_dir=$data_dir/datanode$i + ps -ef | grep -w $node_dir | grep -v grep | awk '{print $2}' | xargs kill -9 > /dev/null 2>&1 + sleep 1 + done +} + +function kill_normal() +{ +echo "kill primary" +ps -ef | grep $USER | grep -w $normal_data_dir | grep -v grep | awk '{print $2}' | xargs kill -9 +sleep 1 +} + +function kill_primary() +{ +echo "kill primary" +ps -ef | grep $USER | grep -w $primary_data_dir | grep -v grep | awk '{print $2}' | xargs kill -9 +sleep 1 +} + +function kill_standby() +{ +echo "kill standby" +ps -ef | grep $USER | grep -w $standby_data_dir | grep -v grep | awk '{print $2}' | xargs kill -9 +sleep 1 +} + +function kill_multi_standy() +{ + node_num=$1 + if [[ $node_num -lt 2 ]] ; then + echo "node_num must great than 1" + return 0 + fi + echo kill multi standy + for((i=2; i<$node_num+1; i++));do + standby_dir=$data_dir/datanode$i + echo "kill standby $standby_dir" + ps -ef | grep -w $standby_dir | grep -v grep | awk '{print $2}' | xargs kill -9 + sleep 1 + done +} + +function kill_dummystandby() +{ +echo "kill dummystandby" +ps -ef | grep $USER | grep -w $dummystandby_data_dir | grep -v grep | awk '{print $2}' | xargs kill -9 +sleep 1 +} + +function kill_standby2() +{ + echo "kill standby2" + ps -ef | grep -w $standby2_data_dir | grep -v grep | awk '{print $2}' | xargs kill -9 + sleep 1 +} + +function kill_standby3() +{ + echo "kill standby3" + ps -ef | grep -w $standby3_data_dir | grep -v grep | awk '{print $2}' | xargs kill -9 + sleep 1 +} + +function kill_standby4() +{ + echo "kill standby4" + ps -ef | grep -w $standby4_data_dir | grep -v grep | awk '{print $2}' | xargs kill -9 + sleep 1 +} + +function stop_normal() +{ +echo "stop normal" +$bin_dir/gs_ctl stop -D $normal_data_dir -m fast >> ./results/gs_ctl.log 2>&1 +sleep 1 +} + +function stop_primary() +{ +echo "stop primary" +$bin_dir/gs_ctl stop -D $primary_data_dir -m fast >> ./results/gs_ctl.log 2>&1 +sleep 1 +} + +function stop_multi_standby() +{ + node_num=$1 + if [[ $node_num -lt 2 ]] ; then + echo "node_num must great than 1" + return 0 + fi + + echo "stop all standby" + for((i=2; i<$node_num+1; i++));do + standby_dir=$data_dir/datanode$i + $bin_dir/gs_ctl stop -D $standby_dir -m fast >> ./results/gs_ctl.log 2>&1 + sleep 1 + done + +} + +function stop_standby() +{ +echo "stop standby" +$bin_dir/gs_ctl stop -D $standby_data_dir -m fast >> ./results/gs_ctl.log 2>&1 +sleep 1 +} + + +function stop_standby2() +{ +echo "stop standby" +$bin_dir/gs_ctl stop -D $standby2_data_dir -m fast >> ./results/gs_ctl.log 2>&1 +sleep 1 +} + +function stop_standby3() +{ +echo "stop standby" +$bin_dir/gs_ctl stop -D $standby3_data_dir -m fast >> ./results/gs_ctl.log 2>&1 +sleep 1 +} + +function stop_standby4() +{ +echo "stop standby" +$bin_dir/gs_ctl stop -D $standby4_data_dir -m fast >> ./results/gs_ctl.log 2>&1 +sleep 1 +} + +function start_normal() +{ +echo "start normal" +$bin_dir/gs_guc set -D ${normal_data_dir} -c "most_available_sync = on" >> ./results/gaussdb.log 2>&1 +$bin_dir/gaussdb --single_node -M normal -p $dn1_normal_port -D $normal_data_dir >> ./results/gaussdb.log 2>&1 & +check_normal_startup +} + +function start_primary() +{ +echo "start primary" +$bin_dir/gaussdb --single_node -M primary -p $dn1_primary_port -D $primary_data_dir >> ./results/gaussdb.log 2>&1 & +check_primary_startup +} + +function start_multi_standby() +{ + node_num=$1 + if [[ $node_num -lt 2 ]] ; then + echo "node_num must great than 1" + return 0 + fi + + echo "start all standby" + for((i=2; i<$node_num+1; i++));do + standby_dir=$data_dir/datanode$i + echo $standby_dir + port=$(($g_base_port + i*3)) + echo $port + $bin_dir/gaussdb --single_node -M standby -p $port -D $standby_dir >> ./results/gaussdb.log 2>&1 & + done + check_multi_standby_startup $node_num +} + +function start_casecade_standby() +{ +echo "start standby" +$bin_dir/gaussdb --single_node -M cascade_standby -p $casecade_standby_port -D $casecade_standby_data_dir >> ./results/gaussdb.log 2>&1 & +check_casecade_standby_startup +} + +function start_standby() +{ +echo "start standby" +$bin_dir/gaussdb --single_node -M standby -p $dn1_standby_port -D $standby_data_dir >> ./results/gaussdb.log 2>&1 & +check_standby_startup +} + + +function start_standby2() +{ + echo "start standby2" + $bin_dir/gaussdb --single_node -M standby -p $standby2_port -D $standby2_data_dir >> ./results/gaussdb.log 2>&1 & + sleep 2 +} + +function start_standby3() +{ + echo "start standby3" + $bin_dir/gaussdb --single_node -M standby -p $standby3_port -D $standby3_data_dir >> ./results/gaussdb.log 2>&1 & + sleep 2 +} + +function start_standby4() +{ + echo "start standby4" + $bin_dir/gaussdb --single_node -M standby -p $standby4_port -D $standby4_data_dir >> ./results/gaussdb.log 2>&1 & + sleep 2 +} + +function start_primary_as_standby() +{ +echo "start primary as standby" +$bin_dir/gaussdb --single_node -M standby -p $dn1_primary_port -D $primary_data_dir >> ./results/gaussdb.log 2>&1 & +check_primary_startup +} + +function start_standby_as_primary() +{ +echo "start standby as primary" +$bin_dir/gaussdb --single_node -M primary -p $dn1_standby_port -D $standby_data_dir >> ./results/gaussdb.log 2>&1 & +check_standby_startup +} + +function start_primary_as_pending() +{ +echo "start primary as pending" +$bin_dir/gaussdb --single_node -M pending -p $dn1_primary_port -D $primary_data_dir >> ./results/gaussdb.log 2>&1 & +check_primary_startup +} + +function start_standby_as_pending() +{ +echo "start standby as pending" +$bin_dir/gaussdb --single_node -M pending -p $dn1_standby_port -D $standby_data_dir >> ./results/gaussdb.log 2>&1 & +check_standby_startup +} + +function notify_primary_as_primary() +{ +echo "notify primary as primary" +$bin_dir/gs_ctl notify -M primary -D $primary_data_dir +} + +function notify_primary_as_standby() +{ +echo "notify primary as standby" +$bin_dir/gs_ctl notify -M standby -D $primary_data_dir +} + +function notify_standby_as_primary() +{ +echo "notify standby as primary" +$bin_dir/gs_ctl notify -M primary -D $standby_data_dir +} + +function notify_standby_as_standby() +{ +echo "notify standby as standby" +$bin_dir/gs_ctl notify -M standby -D $standby_data_dir +} + +# for primary-standby +function check_instance_primary_standby(){ +echo query datanode1 +check_primary_setup + +echo query datanode1_standby +check_standby_replication_setup +} + +# for multi-standby +function check_instance_multi_standby(){ +echo query datanode1 +check_primary_setup_for_multi_standby + +echo query datanode_standby +check_standby_setup +check_standby2_setup + +} + +# for casecade-standby +function check_instance_casecade_standby(){ +echo query datanode1 +check_replication_setup_for_primary + +echo query datanode1_standby +check_casecade_setup +check_standby_replication_setup_for_casecade +} + +function gsql_test(){ +echo gsql ipv6 connect +if [ $($bin_dir/gsql -d $db -p $dn1_primary_port -h ${g_local_ip} -U ipv6_tmp -W ${passwd} -c "create table ipv6_test(i int);" | grep "CREATE TABLE" | wc -l) -gt 0 ] +then + return 0 +else + echo "$failed_keyword when gsql_test" +fi +} + +function create_test_user(){ +echo create test user +if [ $($bin_dir/gsql -d $db -p $dn1_primary_port -c "create user ipv6_tmp with login sysadmin password '"$passwd"';" | grep "CREATE ROLE" | wc -l) -gt 0 ] +then + return 0 +else + echo "$failed_keyword when create_test_user" +fi +} + +function gsql_standby_test(){ +echo gsql ipv6 standby connect +if [ $($bin_dir/gsql -d $db -p $dn1_standby_port -h ${g_local_ip} -U ipv6_tmp -W ${passwd} -c "\dt;" | grep "ipv6_test" | wc -l) -gt 0 ] +then + return 0 +else + echo "$failed_keyword when gsql_standby_test" +fi +} + +function jdbc_test(){ +echo jdbc ipv6 connect +javac -cp $CLASSPATH:$jar_path Ipv6Test.java +if [ $(java -cp $CLASSPATH:$jar_path Ipv6Test $dn1_primary_port $g_local_ip | grep "Connection succeed!" | wc -l) -gt 0 ] +then + return 0 +else + echo "$failed_keyword when jdbc_test" +fi +} + +function set_listen_address(){ +echo set node listen address +node_num=$1 +if [[ $node_num -le 0 ]] ; then + echo "node_num must great than 0" + return 0 +fi + +dir=$data_dir/datanode$node_num +if [ $($bin_dir/gs_guc set -D ${dir} -c "listen_addresses='"$g_local_ip"'" | grep "Success to perform gs_guc!" | wc -l) -gt 0 ] +then + return 0 +else + echo "$failed_keyword when set_listen_address" +fi +} + +function set_node_conf(){ +echo set node listen address +node_num=$1 +if [[ $node_num -le 0 ]] ; then + echo "node_num must great than 0" + return 0 +fi + +dir=$data_dir/datanode$node_num +if [ $($bin_dir/gs_guc set -D ${dir} -c "listen_addresses='"$g_local_ip"'" | grep "Success to perform gs_guc!" | wc -l) -lt 1 ] +then + echo "$failed_keyword when set_listen_addresses" +fi + +if [ $($bin_dir/gs_guc set -D ${dir} -c "enable_thread_pool=on" | grep "Success to perform gs_guc!" | wc -l) -lt 1 ] +then + echo "$failed_keyword when set_enable_thread_pool" +fi +} + +function setup_hba() +{ + node_num=$1 + if [[ $node_num -le 0 ]] ; then + echo "node_num must great than 0" + return 0 + fi + + if [[ "$g_local_ip" = "127.0.0.1" ]]; then + return 0 + elif [[ "$g_local_ip" = "::1" ]]; then + return 0 + fi + + result=$(echo $g_local_ip | grep "::") + if [[ "$result" != "" ]] + then + mask="128" + else + mask="32" + fi + + for((i=1; i<$node_num+1; i++)) + do + dir=$data_dir/datanode$i + hba_line="host all all "${g_local_ip}/${mask}" trust" + $bin_dir/gs_guc set -Z datanode -D ${dir} -h "${hba_line}" >> ./results/gaussdb.log 2>&1 + # set up hba for test user + hba_line="host all ipv6_tmp "${g_local_ip}/${mask}" sha256" + $bin_dir/gs_guc set -Z datanode -D ${dir} -h "${hba_line}" >> ./results/gaussdb.log 2>&1 + done +} diff --git a/src/test/mutil_ip/testlibpq.cpp b/src/test/mutil_ip/testlibpq.cpp new file mode 100644 index 000000000..1a3986465 --- /dev/null +++ b/src/test/mutil_ip/testlibpq.cpp @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * IDENTIFICATION + * src/test/mutil_ip/testlibpq.cpp + * + * --------------------------------------------------------------------------------------- + */ +#include +#include +#include "libpq/libpq-fe.h" + +static void exit_nicely(PGconn* conn) +{ + PQpass(conn); + PQfinish(conn); + exit(1); +} + +int main(int argc, char** argv) +{ + const char* conninfo = NULL; + PGconn* conn = NULL; + PGresult* res = NULL; + int nFields; + int i, j; + + /* + * If the user supplies a parameter on the command line, use it as the + * conninfo string; otherwise default to setting dbname=postgres and using + * environment variables or defaults for all other connection parameters. + */ + if (argc > 1) + conninfo = argv[1]; + else + conninfo = "dbname = postgres"; + + /* Make a connection to the database */ + conn = PQconnectdb(conninfo); + + /* Check to see that the backend connection was successfully made */ + if (PQstatus(conn) != CONNECTION_OK) { + fprintf(stderr, "Connection to database failed: %s", PQerrorMessage(conn)); + exit_nicely(conn); + } + + /* + * Our test case here involves using a cursor, for which we must be inside + * a transaction block. We could do the whole thing with a single + * PQexec() of "select * from pg_database", but that's too trivial to make + * a good example. + */ + + /* Start a transaction block */ + res = PQexec(conn, "START TRANSACTION"); + if (PQresultStatus(res) != PGRES_COMMAND_OK) { + fprintf(stderr, "BEGIN command failed: %s", PQerrorMessage(conn)); + PQclear(res); + exit_nicely(conn); + } + + /* + * Should PQclear PGresult whenever it is no longer needed to avoid memory + * leaks + */ + PQclear(res); + + /* + * Fetch rows from pg_database, the system catalog of databases + */ + res = PQexec(conn, "DECLARE myportal CURSOR FOR select pg_catalog.pg_is_in_recovery()"); + if (PQresultStatus(res) != PGRES_COMMAND_OK) { + fprintf(stderr, "DECLARE CURSOR failed: %s", PQerrorMessage(conn)); + PQclear(res); + exit_nicely(conn); + } + PQclear(res); + + res = PQexec(conn, "FETCH ALL in myportal"); + if (PQresultStatus(res) != PGRES_TUPLES_OK) { + fprintf(stderr, "FETCH ALL failed: %s", PQerrorMessage(conn)); + PQclear(res); + exit_nicely(conn); + } + + /* first, print out the attribute names */ + nFields = PQnfields(res); + for (i = 0; i < nFields; i++) + printf("%-15s", PQfname(res, i)); + printf("\n\n"); + + /* next, print out the rows */ + for (i = 0; i < PQntuples(res); i++) { + for (j = 0; j < nFields; j++) + printf("%-15s", PQgetvalue(res, i, j)); + printf("\n"); + } + + PQclear(res); + + /* close the portal ... we don't bother to check for errors ... */ + res = PQexec(conn, "CLOSE myportal"); + PQclear(res); + + /* end the transaction */ + res = PQexec(conn, "END"); + PQclear(res); + + /* close the connection to the database and cleanup */ + PQpass(conn); + PQfinish(conn); + + return 0; +} diff --git a/src/test/regress/CMakeLists.txt b/src/test/regress/CMakeLists.txt index ffaad33c0..ee43e2161 100755 --- a/src/test/regress/CMakeLists.txt +++ b/src/test/regress/CMakeLists.txt @@ -15,13 +15,14 @@ set(pg_regress_single_DEF_OPTIONS ${MACRO_OPTIONS} -DHOST_TUPLE="${HOST_TUPLE}" set(pg_regress_single_main_DEF_OPTIONS ${MACRO_OPTIONS}) set(pg_regress_single_COMPILE_OPTIONS ${OPTIMIZE_OPTIONS} ${OS_OPTIONS} ${PROTECT_OPTIONS} ${WARNING_OPTIONS} ${BIN_SECURE_OPTIONS} ${CHECK_OPTIONS}) set(pg_regress_single_LINK_OPTIONS ${BIN_LINK_OPTIONS}) -set(pg_regress_single_LINK_LIBS pg_regress_single_obj -lpgport -lcrypt -ldl -lm -lssl -lcrypto -lsecurec -pthread -lrt -lz -lminiunz) +set(pg_regress_single_LINK_LIBS pg_regress_single_obj -lpgport -lcrypt -ldl -lm -lssl -lcrypto -l${SECURE_C_CHECK} -pthread -lrt -lz -lminiunz) add_static_objtarget(pg_regress_single_obj TGT_pg_regress_single_SRC TGT_pg_regress_single_INC "${pg_regress_single_DEF_OPTIONS}" "${pg_regress_single_COMPILE_OPTIONS}" "${pg_regress_single_LINK_OPTIONS}") add_bintarget(pg_regress_single TGT_pg_regress_single_main_SRC TGT_pg_regress_single_INC "${pg_regress_single_main_DEF_OPTIONS}" "${pg_regress_single_COMPILE_OPTIONS}" "${pg_regress_single_LINK_OPTIONS}" "${pg_regress_single_LINK_LIBS}") add_dependencies(pg_regress_single pg_regress_single_obj pgport_static) target_link_directories(pg_regress_single PRIVATE ${LIBEDIT_LIB_PATH} ${ZLIB_LIB_PATH} ${CMAKE_BINARY_DIR}/lib ${LIBOPENSSL_LIB_PATH} ${SECURE_LIB_PATH}) add_subdirectory(stub/roach_api_stub) +add_subdirectory(jdbc_client) #fastcheck @@ -34,9 +35,12 @@ endfunction(add_func_target_withargs_fastcheck) MESSAGE(STATUS ${PROJECT_TRUNK_DIR} ${CMAKE_BINARY_DIR} ${CMAKE_INSTALL_PREFIX}) -# add_func_target_withargs_fastcheck(fastcheck_single fastcheck_single) +add_func_target_withargs_fastcheck(fastcheck_single_audit fastcheck_single_audit) +add_func_target_withargs_fastcheck(fastcheck_lite fastcheck_lite) add_func_target_withargs_fastcheck(fastcheck_single_mot fastcheck_single_mot) +add_func_target_withargs_fastcheck(fastcheck_ce_single fastcheck_ce_single) +add_func_target_withargs_fastcheck(execute_fastcheck_ce_single_jdbc execute_fastcheck_ce_single_jdbc) add_func_target_withargs_fastcheck(redischeck_single redischeck_single) add_func_target_withargs_fastcheck(wlmcheck_single wlmcheck_single) add_func_target_withargs_fastcheck(2pccheck_single 2pccheck_single) @@ -46,4 +50,5 @@ add_func_target_withargs_fastcheck(hacheck_decode hacheck_decode) add_func_target_withargs_fastcheck(hacheck_multi_single hacheck_multi_single) add_func_target_withargs_fastcheck(hacheck_multi_single_mot hacheck_multi_single_mot) add_func_target_withargs_fastcheck(decodecheck_single decodecheck_single) +add_func_target_withargs_fastcheck(upgradecheck_single upgradecheck_single) diff --git a/src/test/regress/GNUmakefile b/src/test/regress/GNUmakefile index 9303c8aef..3e057d5da 100644 --- a/src/test/regress/GNUmakefile +++ b/src/test/regress/GNUmakefile @@ -30,8 +30,6 @@ ifdef TEMP_CONFIG TEMP_CONF += --temp-config=$(TEMP_CONFIG) endif -ud=4 -uc=1 d=1 c=0 p=25632 @@ -49,16 +47,12 @@ sk='FiqwtDDeUCtsieCbOCWzrQ3fwJHFJetzzXbdLHEh' abs_gausshome='$(GAUSSHOME)' keep_last_data=false inplace_data_base_dir=../upgrade -grayscale_data_base_dir=../grayscale_upgrade +grayscale_data_base_dir=../../../../privategauss/test/grayscale_upgrade old_bin_dir='./tmp_check/bin' cpu_bit=$(shell uname -m) aie_host='None' aie_port='None' -ifeq ($(PLAT_FORM_STR), euleros2.0_sp2_x86_64) - upgrade_from=92011 -else - upgrade_from=92023 -endif +upgrade_from=92497 inplace_upgrade_script_dir=../upgrade grayscale_upgrade_script_dir=../grayscale_upgrade @@ -272,8 +266,7 @@ fastcheck_initdb: all tablespace-setup $(call hotpatch_check_func) export LD_LIBRARY_PATH=$(SSL_LIB_PATH):$(LD_LIBRARY_PATH) && \ $(call hotpatch_setup_func) && \ - $(call exception_arm_cases) && \ - $(pg_regress_check) $(REGRESS_OPTS) -d $(d) -c $(c) -p $(p) -r 0 -b $(dir) -n $(n) --abs_gausshome=$(abs_gausshome) --schedule=$(srcdir)/parallel_schedule$(PART) -w --keep_last_data=${keep_last_data} $(MAXCONNOPT) --temp-config=$(srcdir)/make_fastcheck_postgresql.conf --init_database $(EXTRA_TESTS) $(REG_CONF) + $(pg_regress_check) $(REGRESS_OPTS) -d $(d) -c $(c) -p $(p) -r 0 -b $(dir) -n $(n) --abs_gausshome=$(abs_gausshome) --single_node --schedule=$(srcdir)/parallel_schedule$(PART) -w --keep_last_data=${keep_last_data} $(MAXCONNOPT) --temp-config=$(srcdir)/make_fastcheck_postgresql.conf --init_database $(EXTRA_TESTS) $(REG_CONF) fastcheck_inplace: all tablespace-setup $(call hotpatch_check_func) @@ -288,6 +281,38 @@ fastcheck_single: all tablespace-setup $(call hotpatch_setup_func) && \ $(pg_regress_check) $(REGRESS_OPTS) -d 1 -c 0 -p $(p) -r $(runtest) -b $(dir) -n $(n) --abs_gausshome=$(abs_gausshome) --single_node --schedule=$(srcdir)/parallel_schedule0$(PART) -w --keep_last_data=${keep_last_data} $(MAXCONNOPT) --temp-config=$(srcdir)/make_fastcheck_postgresql.conf $(EXTRA_TESTS) $(REG_CONF) +fastcheck_lite: all tablespace-setup + $(call hotpatch_check_func) + export LD_LIBRARY_PATH=$(SSL_LIB_PATH):$(LD_LIBRARY_PATH) && \ + $(call hotpatch_setup_func) && \ + $(pg_regress_check) $(REGRESS_OPTS) -d 1 -c 0 -p $(p) -r $(runtest) -b $(dir) -n $(n) --abs_gausshome=$(abs_gausshome) --single_node --schedule=$(srcdir)/parallel_schedule.lite$(PART) -w --keep_last_data=${keep_last_data} $(MAXCONNOPT) --temp-config=$(srcdir)/make_fastcheck_postgresql.conf $(EXTRA_TESTS) $(REG_CONF) + +upgradecheck_single: all tablespace-setup + $(call hotpatch_check_func) + export LD_LIBRARY_PATH=$(SSL_LIB_PATH):$(LD_LIBRARY_PATH) && \ + $(call hotpatch_setup_func) && \ + $(pg_regress_check) $(REGRESS_OPTS) -d 1 -c 0 -p $(p) -r $(runtest) -b $(dir) -n $(n) --abs_gausshome=$(abs_gausshome) --single_node --schedule=$(srcdir)/parallel_schedule0$(PART) -w --keep_last_data=${keep_last_data} $(MAXCONNOPT) --temp-config=$(srcdir)/make_fastcheck_postgresql.conf $(EXTRA_TESTS) $(REG_CONF) + --data_base_dir=$(grayscale_data_base_dir) --platform=$(PLAT_FORM_STR) \ + --upgrade_script_dir=$(grayscale_upgrade_script_dir) \ + --old_bin_dir=$(old_bin_dir) --grayscale_full_mode \ + --upgrade_from=$(upgrade_from) + +fastcheck_ce_single: all tablespace-setup + export LD_LIBRARY_PATH=$(SSL_LIB_PATH):$(LD_LIBRARY_PATH) && \ + $(pg_regress_check) $(REGRESS_OPTS) -d 1 -c 0 -p $(p) -r $(runtest) -b $(dir) -n $(n) --abs_gausshome=$(abs_gausshome) --schedule=$(srcdir)/c + +execute_fastcheck_ce_jdbc_tool: + $(MAKE) -j 10 -C jdbc_client + +execute_fastcheck_ce_single_jdbc: execute_fastcheck_ce_jdbc_tool +ifdef NO_BUILD + export LD_LIBRARY_PATH=$(SSL_LIB_PATH):$(LD_LIBRARY_PATH) && \ + $(pg_regress_installcheck) --schedule=$(srcdir)/ce_sched_jdbc $(REG_CONF) --jdbc || true +else + export LD_LIBRARY_PATH=$(SSL_LIB_PATH):$(LD_LIBRARY_PATH) && \ + $(pg_regress_check) $(REGRESS_OPTS) -d 1 -c 0 -p $(p) -r $(runtest) -b $(dir) -n $(n) --abs_gausshome=$(abs_gausshome) --schedule=$(srcdir)/ce_sched_jdbc --keep_last_data=${keep_last_data} $(MAXCONNOPT) --temp-config=$(srcdir)/make_fastcheck_postgresql.conf --data_base_dir=$(data_base_dir) $(REG_CONF) --jdbc || true +endif + fastcheck_single_segment: all tablespace-setup export LD_LIBRARY_PATH=$(SSL_LIB_PATH):$(LD_LIBRARY_PATH) && \ $(call hotpatch_setup_func) && \ diff --git a/src/test/regress/ce_sched b/src/test/regress/ce_sched new file mode 100644 index 000000000..5cc29fb46 --- /dev/null +++ b/src/test/regress/ce_sched @@ -0,0 +1,82 @@ +test: ce_kt_key_manage +test: ce_kt_invalid_input +test: ce_kt_toughness +#test: ce_crt_cmk_by_gskt # not supported right now +#test: ce_crt_cmk_by_hwkms +#test: ce_crt_cek +#test: ce_crt_tbl + +test: ce_alteruser +test: ce_cmk_cek_test +test: ce_cmk_search_path +test: ce_column_constraint +test: ce_copy_options +test: ce_copy +test: ce_crt_tbl_as +test: ce_cursor_crud_all_types_test +test: ce_default_values +test: ce_depend +test: ce_describe +test: ce_distinct +test: ce_drop_cek_depend +test: ce_drop_cek_schema +test: ce_drop_cek +test: ce_drop_cmk_schema +test: ce_drop_cmk +test: ce_drop_schema +test: ce_encoding_test +test: ce_escaping +test: ce_foreign_key +test: ce_insert_columnstore +test: ce_insert_from_select_test +test: ce_join +test: ce_limit +test: ce_permission_on_keys_schema +test: ce_privileges_dba +test: ce_privileges_on_schema +test: ce_random +test: ce_rlspolicy +test: ce_same_column_name_different_schemas +test: ce_searchpath +test: ce_select +test: ce_select_operators +test: ce_select_where_encrypt_test +test: ce_set +test: ce_table_type +test: ce_textual_prepare_crud_all_types_test +test: ce_transactions_test +test: ce_type_binarys +test: ce_type_boolean +test: ce_type_char +test: ce_type_float +test: ce_type_int +test: ce_type_money +test: ce_orderby +test: ce_trigger +test: ce_verify_column_alter +test: ce_verify_schema_alter +test: ce_view +test: ce_with +test: ce_merge_into +test: ce_procedure +test: ce_alter_add_drop_column +test: ce_select_jdbc +#test: ce_fetchsize_jdbc +test: ce_transaction_jdbc +test: ce_prepare_jdbc +#test: ce_create_jdbc +test: ce_trigger_jdbc +test: ce_mul_query_jdbc +test: ce_functions_return_values +test: ce_functions_hardcoded +test: ce_functions_examples +test: ce_functions_out_params +test: ce_functions_input_params +test: ce_plpgsql_functions_input_params +test: ce_functions_in_out_params +test: ce_functions_create_replace +test: ce_functions_describe +test: ce_functions_drop_function +test: ce_functions_return_table +test: ce_functions_return_variable +test: ce_functions_anonymous_block diff --git a/src/test/regress/ce_sched_jdbc b/src/test/regress/ce_sched_jdbc new file mode 100644 index 000000000..c07b0d7d2 --- /dev/null +++ b/src/test/regress/ce_sched_jdbc @@ -0,0 +1,85 @@ +test: ce_kt_key_manage +test: ce_cmk_cek_test +test: ce_cmk_search_path +test: ce_default_values +test: ce_depend +test: ce_distinct +test: ce_drop_cek_depend +test: ce_drop_cek_schema +test: ce_drop_cek +test: ce_drop_cmk_schema +test: ce_drop_cmk +test: ce_drop_schema +test: ce_encoding_test +test: ce_escaping +test: ce_foreign_key +test: ce_insert_columnstore +test: ce_insert_from_select_test +test: ce_limit +test: ce_permission_on_keys_schema +test: ce_privileges_dba +test: ce_privileges_on_schema +test: ce_rlspolicy +test: ce_same_column_name_different_schemas +test: ce_searchpath +test: ce_select +test: ce_select_operators +test: ce_select_where_encrypt_test +test: ce_set +test: ce_transactions_test +test: ce_type_binarys +test: ce_type_boolean +test: ce_type_char +test: ce_type_money +test: ce_orderby +test: ce_trigger +test: ce_verify_column_alter +test: ce_verify_schema_alter +test: ce_view +test: ce_with +test: ce_merge_into +test: ce_functions_out_params +test: ce_functions_in_out_params +test: ce_functions_drop_function +test: ce_functions_return_variable +test: ce_functions_hardcoded +test: ce_functions_return_values +test: ce_functions_examples +test: ce_functions_input_params +#test: ce_functions_create_replace +test: ce_functions_describe +test: ce_functions_return_table + +test: BatchPreparedStatementsBin +test: BatchSimpleQueryBin +test: BatchStatamentBin +test: CreateTableWithPrepareStatamentBin +test: DatabaseMetadataGetColumnsBin +test: DisconnectedRsBin +test: HandlingBadSQLBin +test: MultiThreadClientLogicBin +test: NullEmptyStringBin +test: ResultSetFetchNoCLBin +test: PrepareStatementUniqueBin +test: ParameterMetaDataTestBin +test: PrepareStatamentIntBin +test: PrepareStatamentNumericBin +test: PrepareStatamentStringBin +test: PrepareStatamentVarCharBin +test: PrepareStatamentWithHardCodedValuesBin +test: PrepareStatementDeleteResultSetBin +test: PrepareStatementInsertResultSetBin +test: PrepareStatementUpdateResultSetBin +test: PrepareStatementViewBin +#test: ResultsetFetchBin +test: ResultSetMetaDataTestBin +test: SimpleQueryBin +test: StmtBatchClientLogicBin +test: CreateFunctionBin +test: FunctionsInoutParamBin +test: FunctionsInOutParamBinaryModeBin +test: FunctionsInOutParamsBin +test: FunctionsOutParamBin +test: FunctionsReturnValuesBin +test: FunctionsMetadataBin +test: ClientLogicCacheRetryBin diff --git a/src/test/regress/data/ce_copy_from.csv b/src/test/regress/data/ce_copy_from.csv new file mode 100644 index 000000000..bfcd012a6 --- /dev/null +++ b/src/test/regress/data/ce_copy_from.csv @@ -0,0 +1,5 @@ +2450811,1382,24 +2450812,1388,23 +2450835,1393,21 +2450845,1399,22 +2450855,1400,23 diff --git a/src/test/regress/data/datanode1/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_DTS2016060600832.data b/src/test/regress/data/datanode1/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_TESTTABLE.data similarity index 100% rename from src/test/regress/data/datanode1/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_DTS2016060600832.data rename to src/test/regress/data/datanode1/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_TESTTABLE.data diff --git a/src/test/regress/data/datanode1/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_DTS2016060600832_all.data b/src/test/regress/data/datanode1/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_TESTTABLE_all.data similarity index 79% rename from src/test/regress/data/datanode1/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_DTS2016060600832_all.data rename to src/test/regress/data/datanode1/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_TESTTABLE_all.data index f71e710a3..3287f6a4f 100644 Binary files a/src/test/regress/data/datanode1/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_DTS2016060600832_all.data and b/src/test/regress/data/datanode1/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_TESTTABLE_all.data differ diff --git a/src/test/regress/data/ecoli.csv b/src/test/regress/data/ecoli.csv new file mode 100644 index 000000000..ff231a903 --- /dev/null +++ b/src/test/regress/data/ecoli.csv @@ -0,0 +1,336 @@ +0.49,0.29,0.48,0.50,0.56,0.24,0.35,cp +0.07,0.40,0.48,0.50,0.54,0.35,0.44,cp +0.56,0.40,0.48,0.50,0.49,0.37,0.46,cp +0.59,0.49,0.48,0.50,0.52,0.45,0.36,cp +0.23,0.32,0.48,0.50,0.55,0.25,0.35,cp +0.67,0.39,0.48,0.50,0.36,0.38,0.46,cp +0.29,0.28,0.48,0.50,0.44,0.23,0.34,cp +0.21,0.34,0.48,0.50,0.51,0.28,0.39,cp +0.20,0.44,0.48,0.50,0.46,0.51,0.57,cp +0.42,0.40,0.48,0.50,0.56,0.18,0.30,cp +0.42,0.24,0.48,0.50,0.57,0.27,0.37,cp +0.25,0.48,0.48,0.50,0.44,0.17,0.29,cp +0.39,0.32,0.48,0.50,0.46,0.24,0.35,cp +0.51,0.50,0.48,0.50,0.46,0.32,0.35,cp +0.22,0.43,0.48,0.50,0.48,0.16,0.28,cp +0.25,0.40,0.48,0.50,0.46,0.44,0.52,cp +0.34,0.45,0.48,0.50,0.38,0.24,0.35,cp +0.44,0.27,0.48,0.50,0.55,0.52,0.58,cp +0.23,0.40,0.48,0.50,0.39,0.28,0.38,cp +0.41,0.57,0.48,0.50,0.39,0.21,0.32,cp +0.40,0.45,0.48,0.50,0.38,0.22,0.00,cp +0.31,0.23,0.48,0.50,0.73,0.05,0.14,cp +0.51,0.54,0.48,0.50,0.41,0.34,0.43,cp +0.30,0.16,0.48,0.50,0.56,0.11,0.23,cp +0.36,0.39,0.48,0.50,0.48,0.22,0.23,cp +0.29,0.37,0.48,0.50,0.48,0.44,0.52,cp +0.25,0.40,0.48,0.50,0.47,0.33,0.42,cp +0.21,0.51,0.48,0.50,0.50,0.32,0.41,cp +0.43,0.37,0.48,0.50,0.53,0.35,0.44,cp +0.43,0.39,0.48,0.50,0.47,0.31,0.41,cp +0.53,0.38,0.48,0.50,0.44,0.26,0.36,cp +0.34,0.33,0.48,0.50,0.38,0.35,0.44,cp +0.56,0.51,0.48,0.50,0.34,0.37,0.46,cp +0.40,0.29,0.48,0.50,0.42,0.35,0.44,cp +0.24,0.35,0.48,0.50,0.31,0.19,0.31,cp +0.36,0.54,0.48,0.50,0.41,0.38,0.46,cp +0.29,0.52,0.48,0.50,0.42,0.29,0.39,cp +0.65,0.47,0.48,0.50,0.59,0.30,0.40,cp +0.32,0.42,0.48,0.50,0.35,0.28,0.38,cp +0.38,0.46,0.48,0.50,0.48,0.22,0.29,cp +0.33,0.45,0.48,0.50,0.52,0.32,0.41,cp +0.30,0.37,0.48,0.50,0.59,0.41,0.49,cp +0.40,0.50,0.48,0.50,0.45,0.39,0.47,cp +0.28,0.38,0.48,0.50,0.50,0.33,0.42,cp +0.61,0.45,0.48,0.50,0.48,0.35,0.41,cp +0.17,0.38,0.48,0.50,0.45,0.42,0.50,cp +0.44,0.35,0.48,0.50,0.55,0.55,0.61,cp +0.43,0.40,0.48,0.50,0.39,0.28,0.39,cp +0.42,0.35,0.48,0.50,0.58,0.15,0.27,cp +0.23,0.33,0.48,0.50,0.43,0.33,0.43,cp +0.37,0.52,0.48,0.50,0.42,0.42,0.36,cp +0.29,0.30,0.48,0.50,0.45,0.03,0.17,cp +0.22,0.36,0.48,0.50,0.35,0.39,0.47,cp +0.23,0.58,0.48,0.50,0.37,0.53,0.59,cp +0.47,0.47,0.48,0.50,0.22,0.16,0.26,cp +0.54,0.47,0.48,0.50,0.28,0.33,0.42,cp +0.51,0.37,0.48,0.50,0.35,0.36,0.45,cp +0.40,0.35,0.48,0.50,0.45,0.33,0.42,cp +0.44,0.34,0.48,0.50,0.30,0.33,0.43,cp +0.42,0.38,0.48,0.50,0.54,0.34,0.43,cp +0.44,0.56,0.48,0.50,0.50,0.46,0.54,cp +0.52,0.36,0.48,0.50,0.41,0.28,0.38,cp +0.36,0.41,0.48,0.50,0.48,0.47,0.54,cp +0.18,0.30,0.48,0.50,0.46,0.24,0.35,cp +0.47,0.29,0.48,0.50,0.51,0.33,0.43,cp +0.24,0.43,0.48,0.50,0.54,0.52,0.59,cp +0.25,0.37,0.48,0.50,0.41,0.33,0.42,cp +0.52,0.57,0.48,0.50,0.42,0.47,0.54,cp +0.25,0.37,0.48,0.50,0.43,0.26,0.36,cp +0.35,0.48,0.48,0.50,0.56,0.40,0.48,cp +0.26,0.26,0.48,0.50,0.34,0.25,0.35,cp +0.44,0.51,0.48,0.50,0.47,0.26,0.36,cp +0.37,0.50,0.48,0.50,0.42,0.36,0.45,cp +0.44,0.42,0.48,0.50,0.42,0.25,0.20,cp +0.24,0.43,0.48,0.50,0.37,0.28,0.38,cp +0.42,0.30,0.48,0.50,0.48,0.26,0.36,cp +0.48,0.42,0.48,0.50,0.45,0.25,0.35,cp +0.41,0.48,0.48,0.50,0.51,0.44,0.51,cp +0.44,0.28,0.48,0.50,0.43,0.27,0.37,cp +0.29,0.41,0.48,0.50,0.48,0.38,0.46,cp +0.34,0.28,0.48,0.50,0.41,0.35,0.44,cp +0.41,0.43,0.48,0.50,0.45,0.31,0.41,cp +0.29,0.47,0.48,0.50,0.41,0.23,0.34,cp +0.34,0.55,0.48,0.50,0.58,0.31,0.41,cp +0.36,0.56,0.48,0.50,0.43,0.45,0.53,cp +0.40,0.46,0.48,0.50,0.52,0.49,0.56,cp +0.50,0.49,0.48,0.50,0.49,0.46,0.53,cp +0.52,0.44,0.48,0.50,0.37,0.36,0.42,cp +0.50,0.51,0.48,0.50,0.27,0.23,0.34,cp +0.53,0.42,0.48,0.50,0.16,0.29,0.39,cp +0.34,0.46,0.48,0.50,0.52,0.35,0.44,cp +0.40,0.42,0.48,0.50,0.37,0.27,0.27,cp +0.41,0.43,0.48,0.50,0.50,0.24,0.25,cp +0.30,0.45,0.48,0.50,0.36,0.21,0.32,cp +0.31,0.47,0.48,0.50,0.29,0.28,0.39,cp +0.64,0.76,0.48,0.50,0.45,0.35,0.38,cp +0.35,0.37,0.48,0.50,0.30,0.34,0.43,cp +0.57,0.54,0.48,0.50,0.37,0.28,0.33,cp +0.65,0.55,0.48,0.50,0.34,0.37,0.28,cp +0.51,0.46,0.48,0.50,0.58,0.31,0.41,cp +0.38,0.40,0.48,0.50,0.63,0.25,0.35,cp +0.24,0.57,0.48,0.50,0.63,0.34,0.43,cp +0.38,0.26,0.48,0.50,0.54,0.16,0.28,cp +0.33,0.47,0.48,0.50,0.53,0.18,0.29,cp +0.24,0.34,0.48,0.50,0.38,0.30,0.40,cp +0.26,0.50,0.48,0.50,0.44,0.32,0.41,cp +0.44,0.49,0.48,0.50,0.39,0.38,0.40,cp +0.43,0.32,0.48,0.50,0.33,0.45,0.52,cp +0.49,0.43,0.48,0.50,0.49,0.30,0.40,cp +0.47,0.28,0.48,0.50,0.56,0.20,0.25,cp +0.32,0.33,0.48,0.50,0.60,0.06,0.20,cp +0.34,0.35,0.48,0.50,0.51,0.49,0.56,cp +0.35,0.34,0.48,0.50,0.46,0.30,0.27,cp +0.38,0.30,0.48,0.50,0.43,0.29,0.39,cp +0.38,0.44,0.48,0.50,0.43,0.20,0.31,cp +0.41,0.51,0.48,0.50,0.58,0.20,0.31,cp +0.34,0.42,0.48,0.50,0.41,0.34,0.43,cp +0.51,0.49,0.48,0.50,0.53,0.14,0.26,cp +0.25,0.51,0.48,0.50,0.37,0.42,0.50,cp +0.29,0.28,0.48,0.50,0.50,0.42,0.50,cp +0.25,0.26,0.48,0.50,0.39,0.32,0.42,cp +0.24,0.41,0.48,0.50,0.49,0.23,0.34,cp +0.17,0.39,0.48,0.50,0.53,0.30,0.39,cp +0.04,0.31,0.48,0.50,0.41,0.29,0.39,cp +0.61,0.36,0.48,0.50,0.49,0.35,0.44,cp +0.34,0.51,0.48,0.50,0.44,0.37,0.46,cp +0.28,0.33,0.48,0.50,0.45,0.22,0.33,cp +0.40,0.46,0.48,0.50,0.42,0.35,0.44,cp +0.23,0.34,0.48,0.50,0.43,0.26,0.37,cp +0.37,0.44,0.48,0.50,0.42,0.39,0.47,cp +0.00,0.38,0.48,0.50,0.42,0.48,0.55,cp +0.39,0.31,0.48,0.50,0.38,0.34,0.43,cp +0.30,0.44,0.48,0.50,0.49,0.22,0.33,cp +0.27,0.30,0.48,0.50,0.71,0.28,0.39,cp +0.17,0.52,0.48,0.50,0.49,0.37,0.46,cp +0.36,0.42,0.48,0.50,0.53,0.32,0.41,cp +0.30,0.37,0.48,0.50,0.43,0.18,0.30,cp +0.26,0.40,0.48,0.50,0.36,0.26,0.37,cp +0.40,0.41,0.48,0.50,0.55,0.22,0.33,cp +0.22,0.34,0.48,0.50,0.42,0.29,0.39,cp +0.44,0.35,0.48,0.50,0.44,0.52,0.59,cp +0.27,0.42,0.48,0.50,0.37,0.38,0.43,cp +0.16,0.43,0.48,0.50,0.54,0.27,0.37,cp +0.06,0.61,0.48,0.50,0.49,0.92,0.37,im +0.44,0.52,0.48,0.50,0.43,0.47,0.54,im +0.63,0.47,0.48,0.50,0.51,0.82,0.84,im +0.23,0.48,0.48,0.50,0.59,0.88,0.89,im +0.34,0.49,0.48,0.50,0.58,0.85,0.80,im +0.43,0.40,0.48,0.50,0.58,0.75,0.78,im +0.46,0.61,0.48,0.50,0.48,0.86,0.87,im +0.27,0.35,0.48,0.50,0.51,0.77,0.79,im +0.52,0.39,0.48,0.50,0.65,0.71,0.73,im +0.29,0.47,0.48,0.50,0.71,0.65,0.69,im +0.55,0.47,0.48,0.50,0.57,0.78,0.80,im +0.12,0.67,0.48,0.50,0.74,0.58,0.63,im +0.40,0.50,0.48,0.50,0.65,0.82,0.84,im +0.73,0.36,0.48,0.50,0.53,0.91,0.92,im +0.84,0.44,0.48,0.50,0.48,0.71,0.74,im +0.48,0.45,0.48,0.50,0.60,0.78,0.80,im +0.54,0.49,0.48,0.50,0.40,0.87,0.88,im +0.48,0.41,0.48,0.50,0.51,0.90,0.88,im +0.50,0.66,0.48,0.50,0.31,0.92,0.92,im +0.72,0.46,0.48,0.50,0.51,0.66,0.70,im +0.47,0.55,0.48,0.50,0.58,0.71,0.75,im +0.33,0.56,0.48,0.50,0.33,0.78,0.80,im +0.64,0.58,0.48,0.50,0.48,0.78,0.73,im +0.54,0.57,0.48,0.50,0.56,0.81,0.83,im +0.47,0.59,0.48,0.50,0.52,0.76,0.79,im +0.63,0.50,0.48,0.50,0.59,0.85,0.86,im +0.49,0.42,0.48,0.50,0.53,0.79,0.81,im +0.31,0.50,0.48,0.50,0.57,0.84,0.85,im +0.74,0.44,0.48,0.50,0.55,0.88,0.89,im +0.33,0.45,0.48,0.50,0.45,0.88,0.89,im +0.45,0.40,0.48,0.50,0.61,0.74,0.77,im +0.71,0.40,0.48,0.50,0.71,0.70,0.74,im +0.50,0.37,0.48,0.50,0.66,0.64,0.69,im +0.66,0.53,0.48,0.50,0.59,0.66,0.66,im +0.60,0.61,0.48,0.50,0.54,0.67,0.71,im +0.83,0.37,0.48,0.50,0.61,0.71,0.74,im +0.34,0.51,0.48,0.50,0.67,0.90,0.90,im +0.63,0.54,0.48,0.50,0.65,0.79,0.81,im +0.70,0.40,0.48,0.50,0.56,0.86,0.83,im +0.60,0.50,1.00,0.50,0.54,0.77,0.80,im +0.16,0.51,0.48,0.50,0.33,0.39,0.48,im +0.74,0.70,0.48,0.50,0.66,0.65,0.69,im +0.20,0.46,0.48,0.50,0.57,0.78,0.81,im +0.89,0.55,0.48,0.50,0.51,0.72,0.76,im +0.70,0.46,0.48,0.50,0.56,0.78,0.73,im +0.12,0.43,0.48,0.50,0.63,0.70,0.74,im +0.61,0.52,0.48,0.50,0.54,0.67,0.52,im +0.33,0.37,0.48,0.50,0.46,0.65,0.69,im +0.63,0.65,0.48,0.50,0.66,0.67,0.71,im +0.41,0.51,0.48,0.50,0.53,0.75,0.78,im +0.34,0.67,0.48,0.50,0.52,0.76,0.79,im +0.58,0.34,0.48,0.50,0.56,0.87,0.81,im +0.59,0.56,0.48,0.50,0.55,0.80,0.82,im +0.51,0.40,0.48,0.50,0.57,0.62,0.67,im +0.50,0.57,0.48,0.50,0.71,0.61,0.66,im +0.60,0.46,0.48,0.50,0.45,0.81,0.83,im +0.37,0.47,0.48,0.50,0.39,0.76,0.79,im +0.58,0.55,0.48,0.50,0.57,0.70,0.74,im +0.36,0.47,0.48,0.50,0.51,0.69,0.72,im +0.39,0.41,0.48,0.50,0.52,0.72,0.75,im +0.35,0.51,0.48,0.50,0.61,0.71,0.74,im +0.31,0.44,0.48,0.50,0.50,0.79,0.82,im +0.61,0.66,0.48,0.50,0.46,0.87,0.88,im +0.48,0.49,0.48,0.50,0.52,0.77,0.71,im +0.11,0.50,0.48,0.50,0.58,0.72,0.68,im +0.31,0.36,0.48,0.50,0.58,0.94,0.94,im +0.68,0.51,0.48,0.50,0.71,0.75,0.78,im +0.69,0.39,0.48,0.50,0.57,0.76,0.79,im +0.52,0.54,0.48,0.50,0.62,0.76,0.79,im +0.46,0.59,0.48,0.50,0.36,0.76,0.23,im +0.36,0.45,0.48,0.50,0.38,0.79,0.17,im +0.00,0.51,0.48,0.50,0.35,0.67,0.44,im +0.10,0.49,0.48,0.50,0.41,0.67,0.21,im +0.30,0.51,0.48,0.50,0.42,0.61,0.34,im +0.61,0.47,0.48,0.50,0.00,0.80,0.32,im +0.63,0.75,0.48,0.50,0.64,0.73,0.66,im +0.71,0.52,0.48,0.50,0.64,1.00,0.99,im +0.85,0.53,0.48,0.50,0.53,0.52,0.35,imS +0.63,0.49,0.48,0.50,0.54,0.76,0.79,imS +0.75,0.55,1.00,1.00,0.40,0.47,0.30,imL +0.70,0.39,1.00,0.50,0.51,0.82,0.84,imL +0.72,0.42,0.48,0.50,0.65,0.77,0.79,imU +0.79,0.41,0.48,0.50,0.66,0.81,0.83,imU +0.83,0.48,0.48,0.50,0.65,0.76,0.79,imU +0.69,0.43,0.48,0.50,0.59,0.74,0.77,imU +0.79,0.36,0.48,0.50,0.46,0.82,0.70,imU +0.78,0.33,0.48,0.50,0.57,0.77,0.79,imU +0.75,0.37,0.48,0.50,0.64,0.70,0.74,imU +0.59,0.29,0.48,0.50,0.64,0.75,0.77,imU +0.67,0.37,0.48,0.50,0.54,0.64,0.68,imU +0.66,0.48,0.48,0.50,0.54,0.70,0.74,imU +0.64,0.46,0.48,0.50,0.48,0.73,0.76,imU +0.76,0.71,0.48,0.50,0.50,0.71,0.75,imU +0.84,0.49,0.48,0.50,0.55,0.78,0.74,imU +0.77,0.55,0.48,0.50,0.51,0.78,0.74,imU +0.81,0.44,0.48,0.50,0.42,0.67,0.68,imU +0.58,0.60,0.48,0.50,0.59,0.73,0.76,imU +0.63,0.42,0.48,0.50,0.48,0.77,0.80,imU +0.62,0.42,0.48,0.50,0.58,0.79,0.81,imU +0.86,0.39,0.48,0.50,0.59,0.89,0.90,imU +0.81,0.53,0.48,0.50,0.57,0.87,0.88,imU +0.87,0.49,0.48,0.50,0.61,0.76,0.79,imU +0.47,0.46,0.48,0.50,0.62,0.74,0.77,imU +0.76,0.41,0.48,0.50,0.50,0.59,0.62,imU +0.70,0.53,0.48,0.50,0.70,0.86,0.87,imU +0.64,0.45,0.48,0.50,0.67,0.61,0.66,imU +0.81,0.52,0.48,0.50,0.57,0.78,0.80,imU +0.73,0.26,0.48,0.50,0.57,0.75,0.78,imU +0.49,0.61,1.00,0.50,0.56,0.71,0.74,imU +0.88,0.42,0.48,0.50,0.52,0.73,0.75,imU +0.84,0.54,0.48,0.50,0.75,0.92,0.70,imU +0.63,0.51,0.48,0.50,0.64,0.72,0.76,imU +0.86,0.55,0.48,0.50,0.63,0.81,0.83,imU +0.79,0.54,0.48,0.50,0.50,0.66,0.68,imU +0.57,0.38,0.48,0.50,0.06,0.49,0.33,imU +0.78,0.44,0.48,0.50,0.45,0.73,0.68,imU +0.78,0.68,0.48,0.50,0.83,0.40,0.29,om +0.63,0.69,0.48,0.50,0.65,0.41,0.28,om +0.67,0.88,0.48,0.50,0.73,0.50,0.25,om +0.61,0.75,0.48,0.50,0.51,0.33,0.33,om +0.67,0.84,0.48,0.50,0.74,0.54,0.37,om +0.74,0.90,0.48,0.50,0.57,0.53,0.29,om +0.73,0.84,0.48,0.50,0.86,0.58,0.29,om +0.75,0.76,0.48,0.50,0.83,0.57,0.30,om +0.77,0.57,0.48,0.50,0.88,0.53,0.20,om +0.74,0.78,0.48,0.50,0.75,0.54,0.15,om +0.68,0.76,0.48,0.50,0.84,0.45,0.27,om +0.56,0.68,0.48,0.50,0.77,0.36,0.45,om +0.65,0.51,0.48,0.50,0.66,0.54,0.33,om +0.52,0.81,0.48,0.50,0.72,0.38,0.38,om +0.64,0.57,0.48,0.50,0.70,0.33,0.26,om +0.60,0.76,1.00,0.50,0.77,0.59,0.52,om +0.69,0.59,0.48,0.50,0.77,0.39,0.21,om +0.63,0.49,0.48,0.50,0.79,0.45,0.28,om +0.71,0.71,0.48,0.50,0.68,0.43,0.36,om +0.68,0.63,0.48,0.50,0.73,0.40,0.30,om +0.77,0.57,1.00,0.50,0.37,0.54,0.01,omL +0.66,0.49,1.00,0.50,0.54,0.56,0.36,omL +0.71,0.46,1.00,0.50,0.52,0.59,0.30,omL +0.67,0.55,1.00,0.50,0.66,0.58,0.16,omL +0.68,0.49,1.00,0.50,0.62,0.55,0.28,omL +0.74,0.49,0.48,0.50,0.42,0.54,0.36,pp +0.70,0.61,0.48,0.50,0.56,0.52,0.43,pp +0.66,0.86,0.48,0.50,0.34,0.41,0.36,pp +0.73,0.78,0.48,0.50,0.58,0.51,0.31,pp +0.65,0.57,0.48,0.50,0.47,0.47,0.51,pp +0.72,0.86,0.48,0.50,0.17,0.55,0.21,pp +0.67,0.70,0.48,0.50,0.46,0.45,0.33,pp +0.67,0.81,0.48,0.50,0.54,0.49,0.23,pp +0.67,0.61,0.48,0.50,0.51,0.37,0.38,pp +0.63,1.00,0.48,0.50,0.35,0.51,0.49,pp +0.57,0.59,0.48,0.50,0.39,0.47,0.33,pp +0.71,0.71,0.48,0.50,0.40,0.54,0.39,pp +0.66,0.74,0.48,0.50,0.31,0.38,0.43,pp +0.67,0.81,0.48,0.50,0.25,0.42,0.25,pp +0.64,0.72,0.48,0.50,0.49,0.42,0.19,pp +0.68,0.82,0.48,0.50,0.38,0.65,0.56,pp +0.32,0.39,0.48,0.50,0.53,0.28,0.38,pp +0.70,0.64,0.48,0.50,0.47,0.51,0.47,pp +0.63,0.57,0.48,0.50,0.49,0.70,0.20,pp +0.74,0.82,0.48,0.50,0.49,0.49,0.41,pp +0.63,0.86,0.48,0.50,0.39,0.47,0.34,pp +0.63,0.83,0.48,0.50,0.40,0.39,0.19,pp +0.63,0.71,0.48,0.50,0.60,0.40,0.39,pp +0.71,0.86,0.48,0.50,0.40,0.54,0.32,pp +0.68,0.78,0.48,0.50,0.43,0.44,0.42,pp +0.64,0.84,0.48,0.50,0.37,0.45,0.40,pp +0.74,0.47,0.48,0.50,0.50,0.57,0.42,pp +0.75,0.84,0.48,0.50,0.35,0.52,0.33,pp +0.63,0.65,0.48,0.50,0.39,0.44,0.35,pp +0.69,0.67,0.48,0.50,0.30,0.39,0.24,pp +0.70,0.71,0.48,0.50,0.42,0.84,0.85,pp +0.69,0.80,0.48,0.50,0.46,0.57,0.26,pp +0.64,0.66,0.48,0.50,0.41,0.39,0.20,pp +0.63,0.80,0.48,0.50,0.46,0.31,0.29,pp +0.66,0.71,0.48,0.50,0.41,0.50,0.35,pp +0.69,0.59,0.48,0.50,0.46,0.44,0.52,pp +0.68,0.67,0.48,0.50,0.49,0.40,0.34,pp +0.64,0.78,0.48,0.50,0.50,0.36,0.38,pp +0.62,0.78,0.48,0.50,0.47,0.49,0.54,pp +0.76,0.73,0.48,0.50,0.44,0.39,0.39,pp +0.64,0.81,0.48,0.50,0.37,0.39,0.44,pp +0.29,0.39,0.48,0.50,0.52,0.40,0.48,pp +0.62,0.83,0.48,0.50,0.46,0.36,0.40,pp +0.56,0.54,0.48,0.50,0.43,0.37,0.30,pp +0.69,0.66,0.48,0.50,0.41,0.50,0.25,pp +0.69,0.65,0.48,0.50,0.63,0.48,0.41,pp +0.43,0.59,0.48,0.50,0.52,0.49,0.56,pp +0.74,0.56,0.48,0.50,0.47,0.68,0.30,pp +0.71,0.57,0.48,0.50,0.48,0.35,0.32,pp +0.61,0.60,0.48,0.50,0.44,0.39,0.38,pp +0.59,0.61,0.48,0.50,0.42,0.42,0.37,pp +0.74,0.74,0.48,0.50,0.31,0.53,0.52,pp diff --git a/src/test/regress/data/gs_loader_issue_DTS2021091619762.csv b/src/test/regress/data/gs_loader_issue_TESTTABLE.csv similarity index 100% rename from src/test/regress/data/gs_loader_issue_DTS2021091619762.csv rename to src/test/regress/data/gs_loader_issue_TESTTABLE.csv diff --git a/src/test/regress/data/gs_loader_issue_DTS2021091619762.ctl b/src/test/regress/data/gs_loader_issue_TESTTABLE.ctl similarity index 100% rename from src/test/regress/data/gs_loader_issue_DTS2021091619762.ctl rename to src/test/regress/data/gs_loader_issue_TESTTABLE.ctl diff --git a/src/test/regress/data/moons.csv b/src/test/regress/data/moons.csv new file mode 100644 index 000000000..6d01f203e --- /dev/null +++ b/src/test/regress/data/moons.csv @@ -0,0 +1,200 @@ + 0, 0.611058, 0.664072 + 1, 0.870532, 0.121105 + 1, 0.339841, 0.51475 + 0, 0.0160473, 0.304442 + 1, 0.918411, 0.247067 + 1, 0.955784, 0.324352 + 0, 0.662193, 0.476362 + 1, 0.634094, 0.0551296 + 0, 0.637545, 0.570553 + 1, 0.708009, 0.0278832 + 0, 0.228992, 0.848534 + 1, 0.426666, 0.206189 + 1, 0.934605, 0.271048 + 1, 0.740371, 0.104826 + 0, 0.0801694, 0.736775 + 0, 0.664083, 0.475467 + 0, 0.521793, 0.883628 + 1, 0.459354, 0.168577 + 0, 0.460578, 0.884899 + 0, 0.0952446, 0.845304 + 1, 0.969984, 0.357193 + 1, 0.625349, 0.0628603 + 0, 0.411418, 0.950217 + 1, 0.76404, 0.126598 + 0, 0.256604, 0.96748 + 1, 0.425001, 0.303937 + 1, 0.945382, 0.291013 + 1, 0.956444, 0.376948 + 1, 0.400387, 0.298003 + 0, 0.391582, 0.889785 + 0, 0.635026, 0.664683 + 0, 0.272918, 0.856374 + 1, 0.664774, 0.0700606 + 1, 0.360438, 0.467077 + 0, 0.594042, 0.754773 + 1, 0.965188, 0.47744 + 1, 0.487849, 0.211306 + 0, 0.658003, 0.40965 + 0, 0.073795, 0.665577 + 1, 0.333383, 0.565037 + 1, 0.526357, 0.093939 + 0, 0.204711, 0.867508 + 0, 0.558133, 0.778938 + 1, 0.379975, 0.390364 + 1, 0.847907, 0.145022 + 0, 0.225955, 0.925125 + 0, 0.675867, 0.465584 + 0, 0.113966, 0.744253 + 1, 0.398943, 0.285 + 1, 0.626441, 0.073145 + 0, 0.121105, 0.832534 + 1, 0.452843, 0.231713 + 1, 0.98764, 0.373061 + 0, 0.0539148, 0.384356 + 1, 0.684955, 0.0486028 + 0, 0.590545, 0.745526 + 0, 0.524613, 0.793396 + 1, 0.748135, 0.0633139 + 0, 0.427366, 0.917534 + 0, 0.279554, 0.947917 + 1, 0.460432, 0.174816 + 0, 0.0178346, 0.447998 + 1, 0.923209, 0.274897 + 0, 0.629644, 0.669498 + 1, 0.702697, 0.0683983 + 0, 0.000378676, 0.492354 + 0, 0.295366, 0.96121 + 1, 0.82105, 0.120462 + 0, 0.356514, 0.938892 + 0, 0.0436248, 0.624985 + 1, 0.560436, 0.0693472 + 0, 0.0258734, 0.594792 + 1, 0.496689, 0.0691713 + 1, 0.304005, 0.665775 + 1, 0.61069, 0.0919615 + 0, 0.039866, 0.577054 + 1, 0.363877, 0.461894 + 0, 0.646552, 0.596761 + 0, 0.543128, 0.821364 + 1, 0.582841, 0.125729 + 1, 0.98736, 0.42472 + 0, 0.340077, 0.937744 + 1, 0.553252, 0.0492268 + 1, 0.99603, 0.618496 + 1, 0.354218, 0.49465 + 0, 0.230597, 0.968554 + 0, 0.655504, 0.375041 + 0, 0.236948, 0.929958 + 1, 0.540735, 0.110312 + 1, 0.990789, 0.58695 + 0, 0.453925, 0.913991 + 0, 0.668597, 0.56107 + 1, 0.990225, 0.508189 + 0, 0.128367, 0.810164 + 1, 0.996392, 0.569794 + 1, 0.923731, 0.280673 + 1, 0.90858, 0.212665 + 1, 0.478263, 0.196756 + 0, 0.0149978, 0.496467 + 1, 0.367482, 0.396618 + 1, 0.767727, 0 + 1, 0.98563, 0.535933 + 0, 0.357172, 1 + 0, 0.68033, 0.335491 + 0, 0.0552652, 0.53451 + 1, 0.372836, 0.525341 + 0, 0.0269639, 0.412506 + 0, 0.193347, 0.92896 + 1, 0.847207, 0.139021 + 0, 0.422232, 0.889845 + 0, 0.157467, 0.833649 + 0, 0.358856, 0.966176 + 0, 0.340436, 0.994358 + 0, 0.0706009, 0.590184 + 0, 0.64651, 0.576923 + 1, 0.81031, 0.0478281 + 0, 0.542911, 0.748787 + 1, 0.975844, 0.43231 + 1, 0.534373, 0.109746 + 0, 0.58723, 0.657048 + 0, 0.62865, 0.675761 + 0, 0.116807, 0.694895 + 1, 0.336752, 0.552525 + 0, 0.337064, 0.949499 + 1, 0.326293, 0.60402 + 0, 0.0903431, 0.761584 + 0, 0.644772, 0.375003 + 1, 0.40431, 0.377206 + 1, 0.783398, 0.112471 + 1, 0.988742, 0.656447 + 0, 0.670682, 0.513458 + 0, 0.171147, 0.809187 + 0, 0.0241084, 0.63433 + 1, 0.422478, 0.257235 + 1, 0.739717, 0.103764 + 1, 0.864843, 0.241822 + 1, 0.721618, 0.106495 + 0, 0.0201201, 0.480061 + 1, 1, 0.652923 + 0, 0.634227, 0.584884 + 1, 0.379639, 0.342614 + 1, 0.342705, 0.633357 + 0, 0.49095, 0.919641 + 0, 0.231214, 0.942067 + 0, 0.204833, 0.882167 + 0, 0.675308, 0.39016 + 1, 0.4215, 0.356828 + 1, 0.783929, 0.0817289 + 0, 0.428999, 0.964569 + 1, 0.344826, 0.499974 + 1, 0.500535, 0.126854 + 1, 0.890636, 0.140425 + 0, 0.62039, 0.624712 + 0, 0.457462, 0.895657 + 1, 0.536822, 0.138598 + 0, 0.289061, 0.931962 + 1, 0.754048, 0.0643849 + 1, 0.455275, 0.241324 + 0, 0.517047, 0.921328 + 0, 0.554618, 0.817978 + 1, 0.39776, 0.378246 + 1, 0.70652, 0.0447936 + 0, 0.542096, 0.854375 + 1, 0.826479, 0.152793 + 1, 0.475503, 0.127769 + 1, 0.887947, 0.293697 + 0, 0.500609, 0.86514 + 1, 0.575215, 0.0954533 + 1, 0.852925, 0.139911 + 1, 0.650967, 0.0034987 + 0, 0.545427, 0.85482 + 0, 0, 0.365958 + 0, 0.0512313, 0.666855 + 0, 0.104578, 0.66202 + 1, 0.394258, 0.367781 + 1, 0.652097, 0.0798683 + 1, 0.943768, 0.59443 + 0, 0.130458, 0.76599 + 0, 0.108463, 0.710976 + 0, 0.541369, 0.786866 + 1, 0.82386, 0.123767 + 0, 0.0594803, 0.656242 + 1, 0.970959, 0.459512 + 1, 0.924945, 0.184422 + 1, 0.978202, 0.509941 + 0, 0.455045, 0.940488 + 0, 0.292082, 0.887179 + 1, 0.596118, 0.0381829 + 0, 0.0596729, 0.570906 + 0, 0.143212, 0.82623 + 0, 0.0457369, 0.583305 + 0, 0.484093, 0.911647 + 0, 0.608407, 0.672718 + 0, 0.650929, 0.513175 + 1, 0.341517, 0.574961 + 0, 0.173157, 0.868028 + 1, 0.956039, 0.497058 + 1, 0.403601, 0.345525 + 0, 0.386929, 0.956949 + 1, 0.87148, 0.215393 diff --git a/src/test/regress/data/rain.txt b/src/test/regress/data/rain.txt new file mode 100644 index 000000000..22a8cdcef --- /dev/null +++ b/src/test/regress/data/rain.txt @@ -0,0 +1,20 @@ +1,Bendigo,7.6,14.3,9,69,24,28,75,72,1010.8,1013.2,7,5,9.4,12.2,1,0 +2,Nuriootpa,8.6,14.3,5.6,56,30,31,72,49,1015.2,1016.9,7,6,10.9,13.6,1,0 +3,Tuggera0ng,11.6,22.7,2.6,28,0,13,99,67,1016.4,1014.1,0,0,15.9,21.7,1,1 +4,Witchcliffe,11.1,28.2,0,43,17,26,64,46,1020.7,1016,0,0,17.9,26.2,0,0 +5,Brisbane,16.7,26.6,0,24,0,6,75,62,1021.6,1016.8,4,3,21.9,23.4,0,0 +6,0rahHead,19.4,26.7,0.2,56,15,26,80,71,1012.7,1010.7,0,0,21.9,24,0,1 +7,MelbourneAirport,10.2,21.2,0,35,9,15,73,46,1024.2,1020.1,8,2,13.7,20.8,0,0 +8,BadgerysCreek,16.2,25,23.4,48,19,28,78,56,1018.3,1018.1,0,0,21.3,23.2,1,0 +9,Melbourne,18.2,21.3,0,31,11,22,83,76,1011.9,1010.4,8,7,18.9,20.8,0,1 +10,Woomera,9.7,22,0,33,13,17,61,31,1023.8,1020.5,1,7,14,20.9,0,0 +11,Melbourne,0,0,0,33,20,17,0,0,0,0,0,0,0,0,0,0 +12,MountGinini,5,21,0,0,20,0,48,0,0,0,0,0,11.6,0,0,0 +13,Tuggera0ng,10,22.3,0,35,9,11,70,38,1018.4,1014.5,0,0,13.1,21,0,0 +14,Woomera,21.3,29.8,0,50,17,31,52,38,1006.4,1004.9,7,7,23.8,27.9,0,0 +15,WaggaWagga,7.9,13.8,1.4,48,17,22,90,44,1014.8,1011.6,7,6,8.7,13,1,0 +16,Moree,13.8,28.1,0,0,30,15,55,31,1020.6,1015.2,7,1,19.4,27.2,0,0 +17,Woomera,10,16.6,0,39,9,24,58,59,1018.2,1017.6,7,4,12,14.1,0,0 +18,Woomera,8.5,24,0,31,15,9,48,20,1025.7,1021.1,3,3,15.3,23,0,0 +19,Albany,12.5,25,0,0,9,0,49,0,1017.8,1015.3,0,0,21.5,0,0,0 +20,Cairns,25.2,33.9,0,43,17,31,70,50,1009.4,1006,7,7,29.3,32.3,0,1 diff --git a/src/test/regress/expected/BatchPreparedStatementsBin.out b/src/test/regress/expected/BatchPreparedStatementsBin.out new file mode 100644 index 000000000..53dd52ff4 --- /dev/null +++ b/src/test/regress/expected/BatchPreparedStatementsBin.out @@ -0,0 +1,40 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_varchar(id INT, name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +starting batch : INSERT INTO t_varchar (id, name, address) VALUES (?,?,?); +Number of rows to add: 20 +executing batch ... +select * from t_varchar order by id; + id | name | address +----+---------+------------ + 1 | Name 1 | Address 1 + 2 | Name 2 | Address 2 + 3 | Name 3 | Address 3 + 4 | Name 4 | Address 4 + 5 | Name 5 | Address 5 + 6 | Name 6 | Address 6 + 7 | Name 7 | Address 7 + 8 | Name 8 | Address 8 + 9 | Name 9 | Address 9 + 10 | Name 10 | Address 10 + 11 | Name 11 | Address 11 + 12 | Name 12 | Address 12 + 13 | Name 13 | Address 13 + 14 | Name 14 | Address 14 + 15 | Name 15 | Address 15 + 16 | Name 16 | Address 16 + 17 | Name 17 | Address 17 + 18 | Name 18 | Address 18 + 19 | Name 19 | Address 19 + 20 | Name 20 | Address 20 +(20 rows) + +DROP table t_varchar; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; diff --git a/src/test/regress/expected/BatchSimpleQueryBin.out b/src/test/regress/expected/BatchSimpleQueryBin.out new file mode 100644 index 000000000..ff0e8e5aa --- /dev/null +++ b/src/test/regress/expected/BatchSimpleQueryBin.out @@ -0,0 +1,23 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_varchar(id INT, name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +INSERT INTO t_varchar (id, name, address) VALUES (1, 'MyName-1', 'MyAddress-1');INSERT INTO t_varchar (id, name, address) VALUES (2, 'MyName-2', 'MyAddress-2'); +multiple statements is not allowed under client logic routine, please split it up into simpleQuery per statement. +INSERT INTO t_varchar (id, name, address) VALUES (1, 'MyName-1', 'MyAddress-1'); +INSERT INTO t_varchar (id, name, address) VALUES (2, 'MyName-2', 'MyAddress-2'); +SELECT * from t_varchar ORDER BY id; + id | name | address +----+----------+------------- + 1 | MyName-1 | MyAddress-1 + 2 | MyName-2 | MyAddress-2 +(2 rows) + +DROP table t_varchar; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; diff --git a/src/test/regress/expected/BatchStatamentBin.out b/src/test/regress/expected/BatchStatamentBin.out new file mode 100644 index 000000000..72a83d6ba --- /dev/null +++ b/src/test/regress/expected/BatchStatamentBin.out @@ -0,0 +1,58 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_varchar(id INT, name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (1, 'MyName1', 'MyAddress1'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (2, 'MyName2', 'MyAddress2'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (3, 'MyName3', 'MyAddress3'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (4, 'MyName4', 'MyAddress4'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (5, 'MyName5', 'MyAddress5'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (6, 'MyName6', 'MyAddress6'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (7, 'MyName7', 'MyAddress7'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (8, 'MyName8', 'MyAddress8'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (9, 'MyName9', 'MyAddress9'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (10, 'MyName10', 'MyAddress10'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (11, 'MyName11', 'MyAddress11'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (12, 'MyName12', 'MyAddress12'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (13, 'MyName13', 'MyAddress13'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (14, 'MyName14', 'MyAddress14'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (15, 'MyName15', 'MyAddress15'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (16, 'MyName16', 'MyAddress16'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (17, 'MyName17', 'MyAddress17'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (18, 'MyName18', 'MyAddress18'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (19, 'MyName19', 'MyAddress19'); +added to batch INSERT INTO t_varchar (id, name, address) VALUES (20, 'MyName20', 'MyAddress20'); +executing batch ... +select * from t_varchar order by id; + id | name | address +----+----------+------------- + 1 | MyName1 | MyAddress1 + 2 | MyName2 | MyAddress2 + 3 | MyName3 | MyAddress3 + 4 | MyName4 | MyAddress4 + 5 | MyName5 | MyAddress5 + 6 | MyName6 | MyAddress6 + 7 | MyName7 | MyAddress7 + 8 | MyName8 | MyAddress8 + 9 | MyName9 | MyAddress9 + 10 | MyName10 | MyAddress10 + 11 | MyName11 | MyAddress11 + 12 | MyName12 | MyAddress12 + 13 | MyName13 | MyAddress13 + 14 | MyName14 | MyAddress14 + 15 | MyName15 | MyAddress15 + 16 | MyName16 | MyAddress16 + 17 | MyName17 | MyAddress17 + 18 | MyName18 | MyAddress18 + 19 | MyName19 | MyAddress19 + 20 | MyName20 | MyAddress20 +(20 rows) + +DROP table t_varchar; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; diff --git a/src/test/regress/expected/ClientLogicCacheRetryBin.out b/src/test/regress/expected/ClientLogicCacheRetryBin.out new file mode 100644 index 000000000..22848aec5 --- /dev/null +++ b/src/test/regress/expected/ClientLogicCacheRetryBin.out @@ -0,0 +1,357 @@ + +******************************************************** +* Validate cache retry when creating a column settings * +******************************************************** +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 + +******************************************************************** +* Validate cache retry when using 2nd connection to create a table * +******************************************************************** +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC)) +DROP TABLE t_num +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 + +*************************************************************** +* Validate cache retry when inserting data using simple query * +*************************************************************** +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC)) +INSERT INTO t_num (id, num) VALUES (1, 555) +select * from t_num + id | num +----+----- + 1 | 555 +(1 row) + +INSERT INTO t_num (id, num) VALUES (1, 666) +select * from t_num + id | num +----+----- + 1 | 666 + 1 | 555 +(2 rows) + +DROP TABLE t_num +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 + +********************************************************************************************* +* Validate the cache retry mechanism when inserting data to a table using prepare statement * +********************************************************************************************* +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC)) +INSERT INTO t_num (id, num) VALUES (?,?) +1,2 +select * from t_num + id | num +----+----- + 1 | 2 +(1 row) + +DROP TABLE t_num +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 + +************************************************************** +* Validate cache retry when updating data using simple query * +************************************************************** +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC)) +INSERT INTO t_num (id, num) VALUES (1, 555) +INSERT INTO t_num (id, num) VALUES (1, 666) +select * from t_num + id | num +----+----- + 1 | 666 + 1 | 555 +(2 rows) + +update t_num set num = 7000 +select * from t_num + id | num +----+------ + 1 | 7000 + 1 | 7000 +(2 rows) + +DROP TABLE t_num +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 + +************************************************************************************************* +* Validate cache retry when applying where clause on client logic field using prepare statament * +************************************************************************************************* +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC)) +INSERT INTO t_num (id, num) VALUES (?,?) +1,2 +select * from t_num + id | num +----+----- + 1 | 2 +(1 row) + +SELECT * FROM t_num where num = ? +2 + id | num +----+----- + 1 | 2 +(1 row) + +select * from t_num + id | num +----+----- + 1 | 2 +(1 row) + +DROP TABLE t_num +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 + +********************************************************************************************** +* Validate cache retry when applying where clause on client logic field Using simple queries * +********************************************************************************************** +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC)) +INSERT INTO t_num (id, num) VALUES (?,?) +1,2 +select * from t_num + id | num +----+----- + 1 | 2 +(1 row) + +SELECT * FROM t_num where num = 2 + id | num +----+----- + 1 | 2 +(1 row) + +select * from t_num + id | num +----+----- + 1 | 2 +(1 row) + +DROP TABLE t_num +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 + +******************************************************* +* Validate the cache refresh when trying to read data * +******************************************************* +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC)) +INSERT INTO t_num (id, num) VALUES (1, 555) +select * from t_num + id | num +----+----- + 1 | 555 +(1 row) + +select * from t_num + id | num +----+----- + 1 | 555 +(1 row) + +DROP TABLE t_num +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 + +********************************************************** +* Cache retry for functions with client logic parameters * +********************************************************** +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_processed (name text, val INT ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val2 INT) +insert into t_processed values('one',1,10),('two',2,20),('three',3,30),('four',4,40),('five',5,50),('six',6,60),('seven',7,70),('eight',8,80),('nine',9,90),('ten',10,100) +CREATE FUNCTION f_out_only(out1 OUT int, out2 OUT int) AS 'SELECT val, val2 from t_processed ORDER BY name LIMIT 1' LANGUAGE SQL +SELECT f_out_only () + f_out_only +------------ + (8,80) +(1 row) + +SELECT f_out_only () + f_out_only +-------------------------------------------------------------------------------------------------------------------------------------------------------------- +--?.* +(1 row) + +Trying SELECT f_out_only () again after calling to isValid method: +SELECT f_out_only() + f_out_only +------------ + (8,80) +(1 row) + +conn2, which is to be used now have refreshClientEncryption set to zero +SELECT f_out_only() + f_out_only +-------------------------------------------------------------------------------------------------------------------------------------------------------------- +--?.* +(1 row) + +SELECT f_out_only() + f_out_only +-------------------------------------------------------------------------------------------------------------------------------------------------------------- +--?.* +(1 row) + +CREATE FUNCTION f_plaintext_out(out1 INOUT int, out2 INOUT int) AS 'SELECT val, val2 from t_processed where val=out1 AND val2=out2 ORDER BY name LIMIT 1' LANGUAGE SQL +CALL f_plaintext_out (3, 30) + out1 | out2 +------+------ + 3 | 30 +(1 row) + +SELECT f_plaintext_out (3, 30) + f_plaintext_out +----------------- + (3,30) +(1 row) + +SELECT f_plaintext_out (3, 30) + f_plaintext_out +----------------- + (3,30) +(1 row) + +CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC)) +INSERT INTO t_num (id, num) VALUES (1, 5555) +INSERT INTO t_num (id, num) VALUES (2, 6666) +SELECT * from t_num + id | num +----+------ + 2 | 6666 + 1 | 5555 +(2 rows) + +CREATE FUNCTION select1 () RETURNS t_num LANGUAGE SQL AS 'SELECT * from t_num;'; +call select1() + id | num +----+------ + 1 | 5555 +(1 row) + +call select1() + id | num +----+------ + 1 | 5555 +(1 row) + +CREATE FUNCTION f_processed_in_out_plpgsql(in1 int, out out1 int, in2 int, out out2 int)as $$ begin select val, val2 INTO out1, out2 from t_processed where val = in2 or val = in1 limit 1; end;$$ LANGUAGE plpgsql +SELECT f_processed_in_out_plpgsql(17,3) + f_processed_in_out_plpgsql +---------------------------- + (3,30) +(1 row) + +SELECT f_processed_in_out_plpgsql(17,3) + f_processed_in_out_plpgsql +---------------------------- + (3,30) +(1 row) + +DROP function f_out_only +DROP function f_plaintext_out; +DROP function select1 +DROP function f_processed_in_out_plpgsql +DROP TABLE t_num CASCADE +DROP TABLE t_processed CASCADE +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/CreateFunctionBin.out b/src/test/regress/expected/CreateFunctionBin.out new file mode 100644 index 000000000..6a0a26665 --- /dev/null +++ b/src/test/regress/expected/CreateFunctionBin.out @@ -0,0 +1,21 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE sbtest1(id int,k INTEGER ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC),c CHAR(120) ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC),pad CHAR(60) ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC)); +insert into sbtest1 values (1,1,1,1) +create function select_data() returns table(a int, b INTEGER, c CHAR(120), d CHAR(120)) as $BODY$ begin return query(select * from sbtest1); end; $BODY$ LANGUAGE plpgsql; +call select_data(); + a | b | c | d +---+---+--------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------- + 1 | 1 | 1 | 1 +(1 row) + +DROP FUNCTION select_data +DROP TABLE sbtest1; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1 diff --git a/src/test/regress/expected/CreateTableWithPrepareStatamentBin.out b/src/test/regress/expected/CreateTableWithPrepareStatamentBin.out new file mode 100644 index 000000000..d7319c2ca --- /dev/null +++ b/src/test/regress/expected/CreateTableWithPrepareStatamentBin.out @@ -0,0 +1,27 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_string(key int,_varchar_ varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),_char_ char(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),_text_ text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +INSERT INTO t_string (key, _varchar_, _char_, _text_) VALUES (?,?,?,?); +1,varchar data,char data,text data +INSERT INTO t_string (key, _varchar_, _char_, _text_) VALUES (1,'2','3','4'); +INSERT INTO t_string (key, _varchar_, _char_, _text_) VALUES (1,'2','3','4'); +select * from t_string; + key | _varchar_ | _char_ | _text_ +-----+--------------+----------------------------------------------------+----------- + 1 | 2 | 3 | 4 + 1 | 2 | 3 | 4 + 1 | varchar data | char data | text data +(3 rows) + +DROP TABLE t_string; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/DatabaseMetadataGetColumnsBin.out b/src/test/regress/expected/DatabaseMetadataGetColumnsBin.out new file mode 100644 index 000000000..488fe00f0 --- /dev/null +++ b/src/test/regress/expected/DatabaseMetadataGetColumnsBin.out @@ -0,0 +1,162 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS metadata_client_logic_test_tbl(key int,id int PRIMARY KEY ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),char_col varchar(30) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),float_col float ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)) +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "metadata_client_logic_test_tbl_pkey" for table "metadata_client_logic_test_tbl" +*******inserting data to the metadata_client_logic_test_tbl; +insert into metadata_client_logic_test_tbl (key, id, char_col, float_col ) values (?,?,?,?); +1,2,test_data_4_meta_data,1.1 +*************verifying data +select * from metadata_client_logic_test_tbl; + + key | id | char_col | float_col +-----+----+-----------------------+----------- + 1 | 2 | test_data_4_meta_data | 1.1 +(1 row) + +create table metadata_simple_test_tbl (key int , id int primary key, char_col varchar(30), float_col float); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "metadata_simple_test_tbl_pkey" for table "metadata_simple_test_tbl" +*******inserting data to the metadata_simple_test_tbl +insert into metadata_simple_test_tbl (key, id, char_col, float_col ) values (?,?,?,?); +1,2,test_data_4_meta_data,1.1 +*************verifying data +select * from metadata_simple_test_tbl; + + key | id | char_col | float_col +-----+----+-----------------------+--------------------- + 1 | 2 | test_data_4_meta_data | 1.10000000000000009 +(1 row) + +Testing table with client logic ... +Column name: key +Column size(10) +Ordinal position: 1 +Catalog: null +Data type (integer value): 4 +Data type name: int4 +Decimal value: 0 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Column name: id +Column size(10) +Ordinal position: 2 +Catalog: null +Data type (integer value): 4 +Data type name: int4 +Decimal value: 0 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Column name: char_col +Column size(30) +Ordinal position: 3 +Catalog: null +Data type (integer value): 12 +Data type name: varchar +Decimal value: 0 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Column name: float_col +Column size(17) +Ordinal position: 4 +Catalog: null +Data type (integer value): 8 +Data type name: float8 +Decimal value: 17 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +drop table metadata_client_logic_test_tbl; +DROP CLIENT MASTER KEY cmk1 CASCADE; +NOTICE: drop cascades to column encryption key: cek1 + +Testing table with no client logic ... +Column name: key +Column size(10) +Ordinal position: 1 +Catalog: null +Data type (integer value): 4 +Data type name: int4 +Decimal value: 0 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Column name: id +Column size(10) +Ordinal position: 2 +Catalog: null +Data type (integer value): 4 +Data type name: int4 +Decimal value: 0 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Column name: char_col +Column size(30) +Ordinal position: 3 +Catalog: null +Data type (integer value): 12 +Data type name: varchar +Decimal value: 0 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Column name: float_col +Column size(17) +Ordinal position: 4 +Catalog: null +Data type (integer value): 8 +Data type name: float8 +Decimal value: 17 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Testing table with no client logic and with no client logic in connection string ... +Column name: key +Column size(10) +Ordinal position: 1 +Catalog: null +Data type (integer value): 4 +Data type name: int4 +Decimal value: 0 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Column name: id +Column size(10) +Ordinal position: 2 +Catalog: null +Data type (integer value): 4 +Data type name: int4 +Decimal value: 0 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Column name: char_col +Column size(30) +Ordinal position: 3 +Catalog: null +Data type (integer value): 12 +Data type name: varchar +Decimal value: 0 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Column name: float_col +Column size(17) +Ordinal position: 4 +Catalog: null +Data type (integer value): 8 +Data type name: float8 +Decimal value: 17 + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +drop table metadata_simple_test_tbl; diff --git a/src/test/regress/expected/DisconnectedRsBin.out b/src/test/regress/expected/DisconnectedRsBin.out new file mode 100644 index 000000000..c9958688f --- /dev/null +++ b/src/test/regress/expected/DisconnectedRsBin.out @@ -0,0 +1,25 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_varchar_regular(id INT, name varchar(50), address varchar(50)); +CREATE TABLE IF NOT EXISTS t_varchar(id INT, name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +INSERT INTO t_varchar (id, name, address) VALUES (1, 'MyName', 'MyAddress'); +INSERT INTO t_varchar VALUES (2, 'MyName2', 'MyAddress2'); +INSERT INTO t_varchar_regular (id, name, address) VALUES (1, 'MyName', 'MyAddress'); +INSERT INTO t_varchar_regular VALUES (2, 'MyName2', 'MyAddress2'); +drop table t_varchar; +drop table t_varchar_regular; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +Connection is closed + id | name | address +----+---------+------------ + 1 | MyName | MyAddress + 2 | MyName2 | MyAddress2 +(2 rows) + diff --git a/src/test/regress/expected/FunctionsInOutParamBinaryModeBin.out b/src/test/regress/expected/FunctionsInOutParamBinaryModeBin.out new file mode 100644 index 000000000..b81effe8b --- /dev/null +++ b/src/test/regress/expected/FunctionsInOutParamBinaryModeBin.out @@ -0,0 +1,80 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_processed (name text, val INT ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val2 INT) +insert into t_processed values('one',1,10),('two',2,20),('three',3,30),('four',4,40),('five',5,50),('six',6,60),('seven',7,70),('eight',8,80),('nine',9,90),('ten',10,100) +CREATE TABLE t_processed_b (name text, val text ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val2 INT) +INSERT INTO t_processed_b VALUES('name1', 'one', 10),('name2', 'two', 20),('name3', 'three', 30),('name4', 'four', 40),('name5', 'five', 50),('name6', 'six', 60),('name7', 'seven', 70),('name8', 'eight', 80),('name9', 'nine', 90),('name10', 'ten', 100) +CREATE OR REPLACE FUNCTION f_processed_in_out_1param( out1 OUT int,in1 int) AS 'SELECT val from t_processed where val = in1 LIMIT 1' LANGUAGE SQL +CREATE OR REPLACE FUNCTION f_processed_in_out(out1 OUT int,in1 int, out2 OUT int) AS 'SELECT val, val2 from t_processed where val = in1 LIMIT 1' LANGUAGE SQL +CREATE OR REPLACE FUNCTION f_processed_in_out_b(out1 OUT text, out2 OUT int,in1 text, in2 text) AS 'SELECT val, val2 from t_processed_b where val = in1 or name = in2 LIMIT 1' LANGUAGE SQL +CREATE OR REPLACE FUNCTION f_processed_in_out_plpgsql(in1 int, out out1 int, in2 int, out out2 int)as $$ begin select val, val2 INTO out1, out2 from t_processed where val = in2 or val = in1 limit 1; end;$$ LANGUAGE plpgsql +CREATE OR REPLACE FUNCTION f_processed_in_out_plpgsql2(out out1 t_processed.val%TYPE, out out2 t_processed.val%TYPE, in1 t_processed.val%TYPE) as $$ begin select val, val2 INTO out1, out2 from t_processed where val = in1 limit 1; end;$$ LANGUAGE plpgsql +NOTICE: type reference t_processed.val%TYPE converted to integer +NOTICE: type reference t_processed.val%TYPE converted to integer +NOTICE: type reference t_processed.val%TYPE converted to integer +CREATE OR REPLACE FUNCTION f_processed_in_out_aliases_plpgsql(out out1 int, in1 int,out out2 int) as $BODY$ DECLARE val1 ALIAS FOR out1; input_p ALIAS for in1; begin select val, val2 INTO val1, out2 from t_processed where val = input_p; end; $BODY$ LANGUAGE plpgsql; +select proname, prorettype, proallargtypes, prorettype_orig, proallargtypes_orig FROM pg_proc LEFT JOIN gs_encrypted_proc ON pg_proc.Oid = gs_encrypted_proc.func_id WHERE proname IN ('f_processed_in_out', 'f_processed_in_out_plpgsql', 'f_processed_in_out_plpgsql2', 'f_processed_in_out_aliases_plpgsql', 'f_processed_in_out_1param') ORDER BY proname + proname | prorettype | proallargtypes | prorettype_orig | proallargtypes_orig +------------------------------------+------------+---------------------+-----------------+--------------------- + f_processed_in_out | 2249 | {4402,4402,23} | | {23,23,-1} + f_processed_in_out_1param | 4402 | {4402,4402} | 23 | {23,23} + f_processed_in_out_aliases_plpgsql | 2249 | {4402,4402,23} | | {23,23,-1} + f_processed_in_out_plpgsql | 2249 | {4402,4402,4402,23} | | {23,23,23,-1} + f_processed_in_out_plpgsql2 | 2249 | {4402,23,4402} | | {23,-1,23} +(5 rows) + +SELECT f_processed_in_out_1param(2) + f_processed_in_out_1param +--------------------------- + 2 +(1 row) + +SELECT f_processed_in_out(5) + f_processed_in_out +-------------------- + (5,50) +(1 row) + +SELECT f_processed_in_out_b('ten','name70') + f_processed_in_out_b +---------------------- + (ten,100) +(1 row) + +SELECT f_processed_in_out_plpgsql(17,3) + f_processed_in_out_plpgsql +---------------------------- + (3,30) +(1 row) + +SELECT f_processed_in_out_plpgsql2(6) + f_processed_in_out_plpgsql2 +----------------------------- + (6,60) +(1 row) + +SELECT f_processed_in_out_aliases_plpgsql(4) + f_processed_in_out_aliases_plpgsql +------------------------------------ + (4,40) +(1 row) + +DROP TABLE t_processed CASCADE +DROP TABLE t_processed_b CASCADE +DROP FUNCTION f_processed_in_out_1param +DROP FUNCTION f_processed_in_out +DROP FUNCTION f_processed_in_out_b +DROP FUNCTION f_processed_in_out_plpgsql +DROP FUNCTION f_processed_in_out_plpgsql2 +DROP FUNCTION f_processed_in_out_aliases_plpgsql +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/FunctionsInOutParamsBin.out b/src/test/regress/expected/FunctionsInOutParamsBin.out new file mode 100644 index 000000000..cb5e0f546 --- /dev/null +++ b/src/test/regress/expected/FunctionsInOutParamsBin.out @@ -0,0 +1,61 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_processed (name varchar(100) ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), id INT ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val INT ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val2 INT) +insert into t_processed values('one',1,10,10),('two',2,20,20),('three',3,30,30),('four',4,40,40),('five',5,50,50),('six',6,60,60),('seven',7,70,70),('eight',8,80,80),('nine',9,90,90),('ten',10,100,100) +SELECT * from t_processed order by val2 + name | id | val | val2 +-------+----+-----+------ + one | 1 | 10 | 10 + two | 2 | 20 | 20 + three | 3 | 30 | 30 + four | 4 | 40 | 40 + five | 5 | 50 | 50 + six | 6 | 60 | 60 + seven | 7 | 70 | 70 + eight | 8 | 80 | 80 + nine | 9 | 90 | 90 + ten | 10 | 100 | 100 +(10 rows) + +CREATE OR REPLACE FUNCTION f_processed_in_out_1_int_param(in1 int, out1 OUT int) AS 'SELECT val from t_processed where id = in1 LIMIT 1' LANGUAGE SQL +SELECT f_processed_in_out_1_int_param(2) + f_processed_in_out_1_int_param +-------------------------------- + 20 +(1 row) + +Invoking f_processed_in_out_1_int_param using CallableStatement +f_processed_in_out_1param value of index 2 Type is java.lang.Integer value is 20 +CREATE OR REPLACE FUNCTION f_processed_in_int_out_varchar(in1 int, out1 OUT varchar) AS 'SELECT name from t_processed where id = in1 LIMIT 1' LANGUAGE SQL +SELECT f_processed_in_int_out_varchar(2) + f_processed_in_int_out_varchar +-------------------------------- + two +(1 row) + +Invoking f_processed_in_int_out_varchar using CallableStatement +f_processed_in_out_1param_varchar_out value of index 2 Type is java.lang.String value is two +CREATE OR REPLACE FUNCTION f_processed_varchar_in_int_out(in1 varchar, out1 OUT int) AS 'SELECT id from t_processed where name = in1 LIMIT 1' LANGUAGE SQL +SELECT f_processed_varchar_in_int_out('one') + f_processed_varchar_in_int_out +-------------------------------- + 1 +(1 row) + +Invoking f_processed_varchar_in_int_out using CallableStatement +f_processed_varchar_in_int_out value of index 2 Type is java.lang.Integer value is 1 +DROP FUNCTION f_processed_in_out_1_int_param +DROP FUNCTION f_processed_in_int_out_varchar +DROP FUNCTION f_processed_varchar_in_int_out +DROP TABLE t_processed CASCADE +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/FunctionsInoutParamBin.out b/src/test/regress/expected/FunctionsInoutParamBin.out new file mode 100644 index 000000000..015ca600f --- /dev/null +++ b/src/test/regress/expected/FunctionsInoutParamBin.out @@ -0,0 +1,74 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_processed (name text, val INT ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val2 INT) +insert into t_processed values('one',1,10),('two',2,20),('three',3,30),('four',4,40),('five',5,50),('six',6,60),('seven',7,70),('eight',8,80),('nine',9,90),('ten',10,100) +CREATE OR REPLACE FUNCTION f_plaintext_out(out1 INOUT int, out2 INOUT int) AS 'SELECT val, val2 from t_processed where val= out1 ORDER BY name LIMIT 1' LANGUAGE SQL +CALL f_plaintext_out (1, 1) + out1 | out2 +------+------ + 1 | 10 +(1 row) + +f_plaintext_out value of index 1 Type is java.lang.Integer value is 1 +f_plaintext_out value of index 2 Type is java.lang.Integer value is 10 +DROP FUNCTION f_plaintext_out +CREATE OR REPLACE FUNCTION f_plaintext_out(out1 INOUT int, out2 INOUT int) AS 'SELECT val, val2 from t_processed where val=out1 AND val2=out2 ORDER BY name LIMIT 1' LANGUAGE SQL +CALL f_plaintext_out (3, 30) + out1 | out2 +------+------ + 3 | 30 +(1 row) + +SELECT f_plaintext_out (3, 30) + f_plaintext_out +----------------- + (3,30) +(1 row) + +f_plaintext_out value of index 1 Type is java.lang.Integer value is 3 +f_plaintext_out value of index 2 Type is java.lang.Integer value is 30 +DROP FUNCTION f_plaintext_out +CREATE OR REPLACE FUNCTION f_plaintext_out(out1 INOUT int, out2 INOUT int) AS $$ BEGIN SELECT val, val2 from t_processed ORDER BY name LIMIT 1 INTO out1, out2; END;$$ LANGUAGE PLPGSQL +CALL f_plaintext_out (2, 3) + out1 | out2 +------+------ + 8 | 80 +(1 row) + +SELECT f_plaintext_out (2, 3) + f_plaintext_out +----------------- + (8,80) +(1 row) + +f_plaintext_out value of index 1 Type is java.lang.Integer value is 8 +f_plaintext_out value of index 2 Type is java.lang.Integer value is 80 +DROP FUNCTION f_plaintext_out +CREATE OR REPLACE FUNCTION f_plaintext_out(out1 INOUT int, out2 INOUT int) AS $$ BEGIN SELECT val, val2 from t_processed where val=out1 or val2=out2 ORDER BY name LIMIT 1 INTO out1, out2; END; $$ LANGUAGE PLPGSQL +CALL f_plaintext_out (2, 30) + out1 | out2 +------+------ + 3 | 30 +(1 row) + +SELECT f_plaintext_out (2, 30) + f_plaintext_out +----------------- + (3,30) +(1 row) + +f_plaintext_out value of index 1 Type is java.lang.Integer value is 3 +f_plaintext_out value of index 2 Type is java.lang.Integer value is 30 +DROP FUNCTION f_plaintext_out +DROP TABLE t_processed +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/FunctionsMetadataBin.out b/src/test/regress/expected/FunctionsMetadataBin.out new file mode 100644 index 000000000..bef3bdc87 --- /dev/null +++ b/src/test/regress/expected/FunctionsMetadataBin.out @@ -0,0 +1,175 @@ +CREATE TABLE t_processed (name varchar(100) , id INT , val INT , val2 INT) +CREATE TABLE t_num(id INT, num int ) +CREATE OR REPLACE FUNCTION t_processed(in1 int, in2 int, in3 int, out1 OUT int, out2 OUT varchar, out3 OUT int) AS 'SELECT val, name, val2 from t_processed where id = in1 and id = in2 and val2 = in3 LIMIT 1' LANGUAGE SQL +CREATE OR REPLACE FUNCTION f_plaintext_out(out1 INOUT int, out2 INOUT int) AS $$ BEGIN SELECT val, val2 from t_processed ORDER BY name LIMIT 1 INTO out1, out2; END;$$ LANGUAGE PLPGSQL +CREATE FUNCTION select4 () RETURNS setof t_num LANGUAGE SQL AS 'SELECT id, num from t_num;' +CREATE FUNCTION select5 () RETURNS int LANGUAGE SQL AS 'SELECT num from t_num;' +Invoking select5 using simple query: +CREATE FUNCTION select6 () RETURNS setof int LANGUAGE SQL AS 'SELECT num from t_num;'; +CREATE FUNCTION select7 () RETURNS TABLE(a INT, b INT) LANGUAGE SQL AS 'SELECT id, num from t_num;'; +CREATE OR REPLACE FUNCTION get_rows_setof() RETURNS SETOF t_num AS +$BODY$ +DECLARE +r t_num%rowtype; +BEGIN +FOR r IN +SELECT * FROM t_num +LOOP +-- can do some processing here +RETURN NEXT r; -- return current row of SELECT +END LOOP; +RETURN; +END +$BODY$ +LANGUAGE plpgsql; +Obtaining the list of columns + PROCEDURE_CAT | PROCEDURE_SCHEM | PROCEDURE_NAME | COLUMN_NAME | COLUMN_TYPE | DATA_TYPE | TYPE_NAME | PRECISION | LENGTH | SCALE | RADIX | NULLABLE | REMARKS | COLUMN_DEF | SQL_DATA_TYPE | SQL_DATETIME_SUB | CHAR_OCTECT_LENGTH | ORDINAL_POSITION | IS_NULLABLE | SPECIFIC_NAME +---------------+-----------------+-----------------+-------------+-------------+-----------+-----------+-----------+--------+-------+-------+----------+---------+------------+---------------+------------------+--------------------+------------------+-------------+--------------- + | public | f_plaintext_out | out1 | 2 | 4 | int4 | | | | | 2 | | | | | | 1 | | + | public | f_plaintext_out | out2 | 2 | 4 | int4 | | | | | 2 | | | | | | 2 | | + | public | get_rows_setof | id | 3 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | get_rows_setof | num | 3 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | select4 | id | 3 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | select4 | num | 3 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | select5 | returnValue | 5 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | select6 | returnValue | 5 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | select7 | a | 5 | 4 | int4 | | | | | 2 | | | | | | 1 | | + | public | select7 | b | 5 | 4 | int4 | | | | | 2 | | | | | | 2 | | + | public | t_processed | in1 | 1 | 4 | int4 | | | | | 2 | | | | | | 1 | | + | public | t_processed | in2 | 1 | 4 | int4 | | | | | 2 | | | | | | 2 | | + | public | t_processed | in3 | 1 | 4 | int4 | | | | | 2 | | | | | | 3 | | + | public | t_processed | out1 | 4 | 4 | int4 | | | | | 2 | | | | | | 4 | | + | public | t_processed | out2 | 4 | 12 | varchar | | | | | 2 | | | | | | 5 | | + | public | t_processed | out3 | 4 | 4 | int4 | | | | | 2 | | | | | | 6 | | +(16 rows) + +DROP FUNCTION t_processed +DROP FUNCTION f_plaintext_out +DROP FUNCTION select4 +DROP FUNCTION select5 +DROP FUNCTION select6 +DROP FUNCTION select7 +DROP FUNCTION get_rows_setof +DROP TABLE t_processed +DROP TABLE t_num +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_processed (name varchar(100) ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), id INT ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val INT ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val2 INT) +CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC)) +CREATE OR REPLACE FUNCTION t_processed(in1 int, in2 int, in3 int, out1 OUT int, out2 OUT varchar, out3 OUT int) AS 'SELECT val, name, val2 from t_processed where id = in1 and id = in2 and val2 = in3 LIMIT 1' LANGUAGE SQL +CREATE OR REPLACE FUNCTION f_plaintext_out(out1 INOUT int, out2 INOUT int) AS $$ BEGIN SELECT val, val2 from t_processed ORDER BY name LIMIT 1 INTO out1, out2; END;$$ LANGUAGE PLPGSQL +CREATE FUNCTION select4 () RETURNS setof t_num LANGUAGE SQL AS 'SELECT id, num from t_num;' +CREATE FUNCTION select5 () RETURNS int LANGUAGE SQL AS 'SELECT num from t_num;' +Invoking select5 using simple query: +CREATE FUNCTION select6 () RETURNS setof int LANGUAGE SQL AS 'SELECT num from t_num;'; +CREATE FUNCTION select7 () RETURNS TABLE(a INT, b INT) LANGUAGE SQL AS 'SELECT id, num from t_num;'; +CREATE OR REPLACE FUNCTION get_rows_setof() RETURNS SETOF t_num AS +$BODY$ +DECLARE +r t_num%rowtype; +BEGIN +FOR r IN +SELECT * FROM t_num +LOOP +-- can do some processing here +RETURN NEXT r; -- return current row of SELECT +END LOOP; +RETURN; +END +$BODY$ +LANGUAGE plpgsql; +Obtaining the list of columns + PROCEDURE_CAT | PROCEDURE_SCHEM | PROCEDURE_NAME | COLUMN_NAME | COLUMN_TYPE | DATA_TYPE | TYPE_NAME | PRECISION | LENGTH | SCALE | RADIX | NULLABLE | REMARKS | COLUMN_DEF | SQL_DATA_TYPE | SQL_DATETIME_SUB | CHAR_OCTECT_LENGTH | ORDINAL_POSITION | IS_NULLABLE | SPECIFIC_NAME +---------------+-----------------+-----------------+-------------+-------------+-----------+-----------+-----------+--------+-------+-------+----------+---------+------------+---------------+------------------+--------------------+------------------+-------------+--------------- + | public | f_plaintext_out | out1 | 2 | 4 | int4 | | | | | 2 | | | | | | 1 | | + | public | f_plaintext_out | out2 | 2 | 4 | int4 | | | | | 2 | | | | | | 2 | | + | public | get_rows_setof | id | 3 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | get_rows_setof | num | 3 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | select4 | id | 3 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | select4 | num | 3 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | select5 | returnValue | 5 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | select6 | returnValue | 5 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | select7 | a | 5 | 4 | int4 | | | | | 2 | | | | | | 1 | | + | public | select7 | b | 5 | 4 | int4 | | | | | 2 | | | | | | 2 | | + | public | t_processed | in1 | 1 | 4 | int4 | | | | | 2 | | | | | | 1 | | + | public | t_processed | in2 | 1 | 4 | int4 | | | | | 2 | | | | | | 2 | | + | public | t_processed | in3 | 1 | 4 | int4 | | | | | 2 | | | | | | 3 | | + | public | t_processed | out1 | 4 | 4 | int4 | | | | | 2 | | | | | | 4 | | + | public | t_processed | out2 | 4 | 12 | varchar | | | | | 2 | | | | | | 5 | | + | public | t_processed | out3 | 4 | 4 | int4 | | | | | 2 | | | | | | 6 | | +(16 rows) + +DROP FUNCTION t_processed +DROP FUNCTION f_plaintext_out +DROP FUNCTION select4 +DROP FUNCTION select5 +DROP FUNCTION select6 +DROP FUNCTION select7 +DROP FUNCTION get_rows_setof +DROP TABLE t_processed +DROP TABLE t_num +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 +CREATE TABLE t_processed (name varchar(100) , id INT , val INT , val2 INT) +CREATE TABLE t_num(id INT, num int ) +CREATE OR REPLACE FUNCTION t_processed(in1 int, in2 int, in3 int, out1 OUT int, out2 OUT varchar, out3 OUT int) AS 'SELECT val, name, val2 from t_processed where id = in1 and id = in2 and val2 = in3 LIMIT 1' LANGUAGE SQL +CREATE OR REPLACE FUNCTION f_plaintext_out(out1 INOUT int, out2 INOUT int) AS $$ BEGIN SELECT val, val2 from t_processed ORDER BY name LIMIT 1 INTO out1, out2; END;$$ LANGUAGE PLPGSQL +CREATE FUNCTION select4 () RETURNS setof t_num LANGUAGE SQL AS 'SELECT id, num from t_num;' +CREATE FUNCTION select5 () RETURNS int LANGUAGE SQL AS 'SELECT num from t_num;' +Invoking select5 using simple query: +CREATE FUNCTION select6 () RETURNS setof int LANGUAGE SQL AS 'SELECT num from t_num;'; +CREATE FUNCTION select7 () RETURNS TABLE(a INT, b INT) LANGUAGE SQL AS 'SELECT id, num from t_num;'; +CREATE OR REPLACE FUNCTION get_rows_setof() RETURNS SETOF t_num AS +$BODY$ +DECLARE +r t_num%rowtype; +BEGIN +FOR r IN +SELECT * FROM t_num +LOOP +-- can do some processing here +RETURN NEXT r; -- return current row of SELECT +END LOOP; +RETURN; +END +$BODY$ +LANGUAGE plpgsql; +Obtaining the list of columns + PROCEDURE_CAT | PROCEDURE_SCHEM | PROCEDURE_NAME | COLUMN_NAME | COLUMN_TYPE | DATA_TYPE | TYPE_NAME | PRECISION | LENGTH | SCALE | RADIX | NULLABLE | REMARKS | COLUMN_DEF | SQL_DATA_TYPE | SQL_DATETIME_SUB | CHAR_OCTECT_LENGTH | ORDINAL_POSITION | IS_NULLABLE | SPECIFIC_NAME +---------------+-----------------+-----------------+-------------+-------------+-----------+-----------+-----------+--------+-------+-------+----------+---------+------------+---------------+------------------+--------------------+------------------+-------------+--------------- + | public | f_plaintext_out | out1 | 2 | 4 | int4 | | | | | 2 | | | | | | 1 | | + | public | f_plaintext_out | out2 | 2 | 4 | int4 | | | | | 2 | | | | | | 2 | | + | public | get_rows_setof | id | 3 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | get_rows_setof | num | 3 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | select4 | id | 3 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | select4 | num | 3 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | select5 | returnValue | 5 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | select6 | returnValue | 5 | 4 | int4 | | | | | 2 | | | | | | 0 | | + | public | select7 | a | 5 | 4 | int4 | | | | | 2 | | | | | | 1 | | + | public | select7 | b | 5 | 4 | int4 | | | | | 2 | | | | | | 2 | | + | public | t_processed | in1 | 1 | 4 | int4 | | | | | 2 | | | | | | 1 | | + | public | t_processed | in2 | 1 | 4 | int4 | | | | | 2 | | | | | | 2 | | + | public | t_processed | in3 | 1 | 4 | int4 | | | | | 2 | | | | | | 3 | | + | public | t_processed | out1 | 4 | 4 | int4 | | | | | 2 | | | | | | 4 | | + | public | t_processed | out2 | 4 | 12 | varchar | | | | | 2 | | | | | | 5 | | + | public | t_processed | out3 | 4 | 4 | int4 | | | | | 2 | | | | | | 6 | | +(16 rows) + +DROP FUNCTION t_processed +DROP FUNCTION f_plaintext_out +DROP FUNCTION select4 +DROP FUNCTION select5 +DROP FUNCTION select6 +DROP FUNCTION select7 +DROP FUNCTION get_rows_setof +DROP TABLE t_processed +DROP TABLE t_num diff --git a/src/test/regress/expected/FunctionsOutParamBin.out b/src/test/regress/expected/FunctionsOutParamBin.out new file mode 100644 index 000000000..1170ccd7f --- /dev/null +++ b/src/test/regress/expected/FunctionsOutParamBin.out @@ -0,0 +1,93 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_processed (name text, val INT ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val2 INT) +INSERT INTO t_processed VALUES('name', 1, 2) +select * from t_processed + name | val | val2 +------+-----+------ + name | 1 | 2 +(1 row) + +CREATE TABLE t_processed_b (name text, val bytea ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val2 INT) +INSERT INTO t_processed_b VALUES('name', 'test', 2) +CREATE OR REPLACE FUNCTION f_processed_out_1param(out1 OUT int) AS 'SELECT val from t_processed LIMIT 1' LANGUAGE SQL +CREATE OR REPLACE FUNCTION f_processed_out(out1 OUT int, out2 OUT int) AS 'SELECT val, val2 from t_processed LIMIT 1' LANGUAGE SQL +CREATE OR REPLACE FUNCTION f_processed_out_b(out1 OUT bytea, out2 OUT int) AS 'SELECT val, val2 from t_processed_b LIMIT 1' LANGUAGE SQL +CREATE OR REPLACE FUNCTION f_processed_out_plpgsql(out out1 int, out out2 int) +as $$ +begin + select val, val2 INTO out1, out2 from t_processed; +end;$$ +LANGUAGE plpgsql +CREATE OR REPLACE FUNCTION f_processed_out_plpgsql2(out out1 t_processed.val%TYPE, out out2 t_processed.val%TYPE) +as $$ +begin + select val, val2 INTO out1, out2 from t_processed; +end;$$ +LANGUAGE plpgsql +NOTICE: type reference t_processed.val%TYPE converted to integer +NOTICE: type reference t_processed.val%TYPE converted to integer +CREATE OR REPLACE FUNCTION f_processed_aliases_plpgsql(out out1 int, out out2 int) as +$BODY$ +DECLARE + val1 ALIAS FOR out1; +begin + select val, val2 INTO val1, out2 from t_processed; +end; +$BODY$ +LANGUAGE plpgsql +select f_processed_out_1param() + f_processed_out_1param +------------------------ + 1 +(1 row) + +select f_processed_out() + f_processed_out +----------------- + (1,2) +(1 row) + +select f_processed_out_b() + f_processed_out_b +------------------- + ("\\x74657374",2) +(1 row) + +select f_processed_out_plpgsql() + f_processed_out_plpgsql +------------------------- + (1,2) +(1 row) + +select f_processed_out_plpgsql2() + f_processed_out_plpgsql2 +-------------------------- + (1,2) +(1 row) + +select f_processed_aliases_plpgsql() + f_processed_aliases_plpgsql +----------------------------- + (1,2) +(1 row) + +drop function f_processed_out_1param +drop function f_processed_out +drop function f_processed_out_b +drop function f_processed_out_plpgsql +drop function f_processed_out_plpgsql2 +drop function f_processed_aliases_plpgsql +drop table t_processed +drop table t_processed_b +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/FunctionsReturnValuesBin.out b/src/test/regress/expected/FunctionsReturnValuesBin.out new file mode 100644 index 000000000..afe5b8166 --- /dev/null +++ b/src/test/regress/expected/FunctionsReturnValuesBin.out @@ -0,0 +1,167 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_num_non_cl(id INT, num int) +INSERT INTO t_num_non_cl (id, num) VALUES (1, 5555) +INSERT INTO t_num_non_cl (id, num) VALUES (2, 6666) +SELECT * from t_num_non_cl order by id + id | num +----+------ + 1 | 5555 + 2 | 6666 +(2 rows) + +CREATE TABLE IF NOT EXISTS t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC)) +INSERT INTO t_num (id, num) VALUES (1, 5555) +INSERT INTO t_num (id, num) VALUES (2, 6666) +SELECT * from t_num order by id + id | num +----+------ + 1 | 5555 + 2 | 6666 +(2 rows) + +CREATE FUNCTION reffunc(refcursor) RETURNS refcursor AS 'BEGIN OPEN $1 FOR SELECT * FROM t_num; RETURN $1; END; ' LANGUAGE plpgsql; +CREATE FUNCTION f_processed_return_table() RETURNS TABLE(val_p int, val2_p int) +as +$BODY$ +begin +return query (SELECT id, num from t_num); +end; +$BODY$ +language plpgsql ; + +CREATE FUNCTION select1 () RETURNS t_num LANGUAGE SQL AS 'SELECT * from t_num;' +Invoking select1 using simple query: +call select1(); + id | num +----+------ + 1 | 5555 +(1 row) + +Invoking select1 using CallableStatement: +select1 value of index 0 Type is java.lang.Integer value is 1 +select1 value of index 1 Type is java.lang.Integer value is 5555 +CREATE FUNCTION select2 () RETURNS t_num LANGUAGE SQL AS 'SELECT id, num from t_num;'; +Invoking select2 using simple query: +call select2(); + id | num +----+------ + 1 | 5555 +(1 row) + +Invoking select2 using CallableStatement: +select2 value of index 0 Type is java.lang.Integer value is 1 +select2 value of index 1 Type is java.lang.Integer value is 5555 +CREATE FUNCTION select3 () RETURNS setof t_num LANGUAGE SQL AS 'SELECT * from t_num;' +Invoking select3 using simple query: +call select3(); + id | num +----+------ + 1 | 5555 + 2 | 6666 +(2 rows) + +Invoking select3 using CallableStatement: +select3 value of index 0 Type is java.lang.Integer value is 1 +select3 value of index 1 Type is java.lang.Integer value is 5555 +CREATE FUNCTION select4 () RETURNS setof t_num LANGUAGE SQL AS 'SELECT id, num from t_num;' +call select4(); + id | num +----+------ + 1 | 5555 + 2 | 6666 +(2 rows) + +Invoking select4 using CallableStatement: +select4 value of index 0 Type is java.lang.Integer value is 1 +select4 value of index 1 Type is java.lang.Integer value is 5555 +CREATE FUNCTION select5 () RETURNS int LANGUAGE SQL AS 'SELECT num from t_num;' +Invoking select5 using simple query: +call select5(); + select5 +--------- + 5555 +(1 row) + +Invoking select5 using CallableStatement: +select5 value of index 0 Type is java.lang.Integer value is 5555 +CREATE FUNCTION select6 () RETURNS setof int LANGUAGE SQL AS 'SELECT num from t_num;'; +call select6(); + select6 +--------- + 5555 + 6666 +(2 rows) + +Invoking select6 using CallableStatement: +select6 value of index 0 Type is java.lang.Integer value is 5555 +CREATE FUNCTION select7 () RETURNS TABLE(a INT, b INT) LANGUAGE SQL AS 'SELECT id, num from t_num;'; +call select7(); + a | b +---+------ + 1 | 5555 + 2 | 6666 +(2 rows) + +CREATE OR REPLACE FUNCTION get_rows_setof() RETURNS SETOF t_num AS +$BODY$ +DECLARE +r t_num%rowtype; +BEGIN +FOR r IN +SELECT * FROM t_num +LOOP +-- can do some processing here +RETURN NEXT r; -- return current row of SELECT +END LOOP; +RETURN; +END +$BODY$ +LANGUAGE plpgsql; +call get_rows_setof() + id | num +----+------ + 1 | 5555 + 2 | 6666 +(2 rows) + +CALL f_processed_return_table(); + val_p | val2_p +-------+-------- + 1 | 5555 + 2 | 6666 +(2 rows) + +DROP FUNCTION select1; +DROP FUNCTION select2; +DROP FUNCTION select3; +DROP FUNCTION select4; +DROP FUNCTION select5; +DROP FUNCTION select6; +DROP FUNCTION select7; +DROP FUNCTION reffunc(refcursor); +DROP FUNCTION get_rows_setof(); +DROP FUNCTION f_processed_return_table(); +DROP TABLE t_num CASCADE; +SELECT COUNT(*) FROM gs_encrypted_proc; + count +------- + 0 +(1 row) + +SELECT proname, prorettype, proallargtypes FROM gs_encrypted_proc JOIN pg_proc ON pg_proc.Oid = gs_encrypted_proc.func_id; + proname | prorettype | proallargtypes +---------+------------+---------------- +(0 rows) + +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/HandlingBadSQLBin.out b/src/test/regress/expected/HandlingBadSQLBin.out new file mode 100644 index 000000000..899ce52de --- /dev/null +++ b/src/test/regress/expected/HandlingBadSQLBin.out @@ -0,0 +1,153 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS metadata_client_logic_test_tbl(key int,id int PRIMARY KEY ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),char_col varchar(30) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),float_col float ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)) +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "metadata_client_logic_test_tbl_pkey" for table "metadata_client_logic_test_tbl" +*******inserting data to the metadata_client_logic_test_tbl; +insert into metadata_client_logic_test_tbl (key, id, char_col, float_col ) values (?,?,?,?); +1,2,test_data_4_meta_data,1.1 +*************verifying data +select * from metadata_client_logic_test_tbl; + + key | id | char_col | float_col +-----+----+-----------------------+----------- + 1 | 2 | test_data_4_meta_data | 1.1 +(1 row) + +create table metadata_simple_test_tbl (key int , id int primary key, char_col varchar(30), float_col float); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "metadata_simple_test_tbl_pkey" for table "metadata_simple_test_tbl" +*******inserting data to the metadata_simple_test_tbl +insert into metadata_simple_test_tbl (key, id, char_col, float_col ) values (?,?,?,?); +1,2,test_data_4_meta_data,1.1 +*************verifying data +select * from metadata_simple_test_tbl; + + key | id | char_col | float_col +-----+----+-----------------------+--------------------- + 1 | 2 | test_data_4_meta_data | 1.10000000000000009 +(1 row) + +Testing table with client logic ... +select 1* from metadata_client_logic_test_tbl; +ERROR: syntax error at or near "from" +LINE 1: select 1* from metadata_client_logic_test_tbl; + ^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +from * select metadata_client_logic_test_tbl; +ERROR: syntax error at or near "from" +LINE 1: from * select metadata_client_logic_test_tbl; + ^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +select col from metadata_client_logic_test_tbl; +ERROR: column "col" does not exist +LINE 1: select col from metadata_client_logic_test_tbl; + ^ +CONTEXT: referenced column: col +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +select * frm metadata_client_logic_test_tbl; +ERROR: syntax error at or near "frm" +LINE 1: select * frm metadata_client_logic_test_tbl; + ^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +select * from "mEtadata_client_logic_test_tbl"; +ERROR: relation "mEtadata_client_logic_test_tbl" does not exist on datanode1 +LINE 1: select * from "mEtadata_client_logic_test_tbl"; + ^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +select * from metadata_client_logic_test_tbl select * from metadata_client_logic_test_tbl;; +ERROR: syntax error at or near "select" +LINE 1: select * from metadata_client_logic_test_tbl select * from m... + ^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +drop table metadata_client_logic_test_tbl; +DROP CLIENT MASTER KEY cmk1 CASCADE; +NOTICE: drop cascades to column encryption key: cek1 +Testing table with no client logic ... +select 1* from metadata_simple_test_tbl; +ERROR: syntax error at or near "from" +LINE 1: select 1* from metadata_simple_test_tbl; + ^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +from * select metadata_simple_test_tbl; +ERROR: syntax error at or near "from" +LINE 1: from * select metadata_simple_test_tbl; + ^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +select col from metadata_simple_test_tbl; +ERROR: column "col" does not exist +LINE 1: select col from metadata_simple_test_tbl; + ^ +CONTEXT: referenced column: col +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +select * frm metadata_simple_test_tbl; +ERROR: syntax error at or near "frm" +LINE 1: select * frm metadata_simple_test_tbl; + ^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +select * from "mEtadata_simple_test_tbl"; +ERROR: relation "mEtadata_simple_test_tbl" does not exist on datanode1 +LINE 1: select * from "mEtadata_simple_test_tbl"; + ^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +select * from metadata_simple_test_tbl select * from metadata_simple_test_tbl; +ERROR: syntax error at or near "select" +LINE 1: select * from metadata_simple_test_tbl select * from metadat... + ^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Testing table with no client logic and with no client logic in connection string ... +select 1* from metadata_simple_test_tbl; +ERROR: syntax error at or near "from" +LINE 1: select 1* from metadata_simple_test_tbl; + ^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +from * select metadata_simple_test_tbl; +ERROR: syntax error at or near "from" +LINE 1: from * select metadata_simple_test_tbl; + ^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +select col from metadata_simple_test_tbl; +ERROR: column "col" does not exist +LINE 1: select col from metadata_simple_test_tbl; + ^ +CONTEXT: referenced column: col +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +select * frm metadata_simple_test_tbl; +ERROR: syntax error at or near "frm" +LINE 1: select * frm metadata_simple_test_tbl; + ^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +select * from "mEtadata_simple_test_tbl"; +ERROR: relation "mEtadata_simple_test_tbl" does not exist on datanode1 +LINE 1: select * from "mEtadata_simple_test_tbl"; + ^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +select * from metadata_simple_test_tbl select * from metadata_simple_test_tbl; +ERROR: syntax error at or near "select" +LINE 1: select * from metadata_simple_test_tbl select * from metadat... + ^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +drop table metadata_simple_test_tbl; diff --git a/src/test/regress/expected/MultiThreadClientLogicBin.out b/src/test/regress/expected/MultiThreadClientLogicBin.out new file mode 100644 index 000000000..93012da44 --- /dev/null +++ b/src/test/regress/expected/MultiThreadClientLogicBin.out @@ -0,0 +1,81 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_varchar(id int, name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +DROP TABLE t_varchar; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 +INSERT INTO t_varchar VALUES ((SELECT COALESCE(MAX(ID),0) FROM t_varchar) + 1, 'worker_1') +SELECT * FROM t_varchar ORDER BY ID + id | name +----+---------- + 1 | worker_1 +(1 row) + +INSERT INTO t_varchar VALUES ((SELECT COALESCE(MAX(ID),0) FROM t_varchar) + 1, 'worker_2') +SELECT * FROM t_varchar ORDER BY ID + id | name +----+---------- + 1 | worker_1 + 2 | worker_2 +(2 rows) + +INSERT INTO t_varchar VALUES ((SELECT COALESCE(MAX(ID),0) FROM t_varchar) + 1, 'worker_3') +INSERT INTO t_varchar VALUES ((SELECT COALESCE(MAX(ID),0) FROM t_varchar) + 1, 'worker_3') +INSERT INTO t_varchar VALUES ((SELECT COALESCE(MAX(ID),0) FROM t_varchar) + 1, 'worker_3') +SELECT * FROM t_varchar ORDER BY ID + id | name +----+---------- + 1 | worker_1 + 2 | worker_2 + 3 | worker_3 + 4 | worker_3 + 5 | worker_3 +(5 rows) + +INSERT INTO t_varchar VALUES ((SELECT COALESCE(MAX(ID),0) FROM t_varchar) + 1, 'worker_4') +INSERT INTO t_varchar VALUES ((SELECT COALESCE(MAX(ID),0) FROM t_varchar) + 1, 'worker_4') +INSERT INTO t_varchar VALUES ((SELECT COALESCE(MAX(ID),0) FROM t_varchar) + 1, 'worker_4') +SELECT * FROM t_varchar ORDER BY ID + id | name +----+---------- + 1 | worker_1 + 2 | worker_2 + 3 | worker_3 + 4 | worker_3 + 5 | worker_3 +--? 6 | worker_. +--? . | worker_. +--? 7 | worker_. +--? . | worker_. +--? 8 | worker_. +--? . | worker_. +(11 rows) + +INSERT INTO t_varchar VALUES ((SELECT COALESCE(MAX(ID),0) FROM t_varchar) + 1, 'worker_5') +INSERT INTO t_varchar VALUES ((SELECT COALESCE(MAX(ID),0) FROM t_varchar) + 1, 'worker_5') +INSERT INTO t_varchar VALUES ((SELECT COALESCE(MAX(ID),0) FROM t_varchar) + 1, 'worker_5') +SELECT * FROM t_varchar ORDER BY ID + id | name +----+---------- + 1 | worker_1 + 2 | worker_2 + 3 | worker_3 + 4 | worker_3 + 5 | worker_3 +--? 6 | worker_. +--? . | worker_. +--? 7 | worker_. +--? . | worker_. +--? 8 | worker_. +--? . | worker_. +(11 rows) + diff --git a/src/test/regress/expected/NullEmptyStringBin.out b/src/test/regress/expected/NullEmptyStringBin.out new file mode 100644 index 000000000..10a7d763c --- /dev/null +++ b/src/test/regress/expected/NullEmptyStringBin.out @@ -0,0 +1,67 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_not_cl(id INT, name varchar(50)); +INSERT INTO t_not_cl (id, name) VALUES (?, ?); +1,MyName +INSERT INTO t_not_cl (id) VALUES (2); +INSERT INTO t_not_cl (id, name) VALUES (?, ?); +3, +INSERT INTO t_not_cl (id, name) VALUES (?, ?); +4, +select id is null, name is null from t_not_cl order by id; + ?column? | ?column? +----------+---------- + f | f + f | t + f | t + f | t +(4 rows) + +select * from t_not_cl order by id; + id | name +----+-------- + 1 | MyName + 2 | + 3 | + 4 | +(4 rows) + +CREATE TABLE IF NOT EXISTS t_with_cl(id INT, name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +INSERT INTO t_with_cl (id, name) VALUES (?, ?); +1,MyName +INSERT INTO t_with_cl (id) VALUES (2); +INSERT INTO t_with_cl (id, name) VALUES (?, ?); +3, +INSERT INTO t_with_cl (id, name) VALUES (?, ?); +4, +select id is null, name is null from t_with_cl order by id; + ?column? | ?column? +----------+---------- + f | f + f | t + f | t + f | t +(4 rows) + +select * from t_with_cl order by id; + id | name +----+-------- + 1 | MyName + 2 | + 3 | + 4 | +(4 rows) + +DROP TABLE t_not_cl; +DROP TABLE t_with_cl; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/ParameterMetaDataTestBin.out b/src/test/regress/expected/ParameterMetaDataTestBin.out new file mode 100644 index 000000000..6d63d19d8 --- /dev/null +++ b/src/test/regress/expected/ParameterMetaDataTestBin.out @@ -0,0 +1,142 @@ + +Test with client logic table +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS metadata_client_logic_test_tbl(key int,id int PRIMARY KEY ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),char_col varchar(30) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),float_col float ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)) +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "metadata_client_logic_test_tbl_pkey" for table "metadata_client_logic_test_tbl" +*******inserting data to the metadata_client_logic_test_tbl; +insert into metadata_client_logic_test_tbl (key, id, char_col, float_col ) values (?,?,?,?); +1,2,test_data_4_meta_data,1.1 +*************verifying data +select * from metadata_client_logic_test_tbl; + + key | id | char_col | float_col +-----+----+-----------------------+----------- + 1 | 2 | test_data_4_meta_data | 1.1 +(1 row) + +there is a support for the ParameterMetaData +paramCount=2 +param number=1 +param mode=1 +the parameter's mode is IN. +param type = 1111 +param class name = null +param count = 2 +param precision = 0 +param scale = 0 +param isNullable = 2 +param isSugned = false + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +param number=2 +param mode=1 +the parameter's mode is IN. +param type = 1111 +param class name = null +param count = 2 +param precision = 0 +param scale = 0 +param isNullable = 2 +param isSugned = false + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +drop table metadata_client_logic_test_tbl; +DROP CLIENT MASTER KEY cmk1 CASCADE; +NOTICE: drop cascades to column encryption key: cek1 + +Test with client logic connection on regular table +create table metadata_simple_test_tbl (key int , id int primary key, char_col varchar(30), float_col float); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "metadata_simple_test_tbl_pkey" for table "metadata_simple_test_tbl" +*******inserting data to the metadata_simple_test_tbl +insert into metadata_simple_test_tbl (key, id, char_col, float_col ) values (?,?,?,?); +1,2,test_data_4_meta_data,1.1 +*************verifying data +select * from metadata_simple_test_tbl; + + key | id | char_col | float_col +-----+----+-----------------------+--------------------- + 1 | 2 | test_data_4_meta_data | 1.10000000000000009 +(1 row) + +there is a support for the ParameterMetaData +paramCount=2 +param number=1 +param mode=1 +the parameter's mode is IN. +param type = 4 +param class name = java.lang.Integer +param count = 2 +param precision = 0 +param scale = 0 +param isNullable = 2 +param isSugned = true + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +param number=2 +param mode=1 +the parameter's mode is IN. +param type = 12 +param class name = java.lang.String +param count = 2 +param precision = 0 +param scale = 0 +param isNullable = 2 +param isSugned = false + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +drop table metadata_simple_test_tbl; + +Test with regular connection on regular table +create table metadata_simple_test_tbl (key int , id int primary key, char_col varchar(30), float_col float); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "metadata_simple_test_tbl_pkey" for table "metadata_simple_test_tbl" +*******inserting data to the metadata_simple_test_tbl +insert into metadata_simple_test_tbl (key, id, char_col, float_col ) values (?,?,?,?); +1,2,test_data_4_meta_data,1.1 +*************verifying data +select * from metadata_simple_test_tbl; + + key | id | char_col | float_col +-----+----+-----------------------+--------------------- + 1 | 2 | test_data_4_meta_data | 1.10000000000000009 +(1 row) + +there is a support for the ParameterMetaData +paramCount=2 +param number=1 +param mode=1 +the parameter's mode is IN. +param type = 4 +param class name = java.lang.Integer +param count = 2 +param precision = 0 +param scale = 0 +param isNullable = 2 +param isSugned = true + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +param number=2 +param mode=1 +the parameter's mode is IN. +param type = 12 +param class name = java.lang.String +param count = 2 +param precision = 0 +param scale = 0 +param isNullable = 2 +param isSugned = false + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +drop table metadata_simple_test_tbl; diff --git a/src/test/regress/expected/PrepareStatamentIntBin.out b/src/test/regress/expected/PrepareStatamentIntBin.out new file mode 100644 index 000000000..4bd4468ef --- /dev/null +++ b/src/test/regress/expected/PrepareStatamentIntBin.out @@ -0,0 +1,65 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_int(key int,_smallint_ smallint ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),_int_ int ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),_bigint_ bigint ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +*******inserting data to the int table +INSERT INTO t_int (key, _smallint_, _int_, _bigint_) VALUES (?,?,?,?); +1,-3333,0,3333 +INSERT INTO t_int (key, _smallint_, _int_, _bigint_) VALUES (?,?,?,?); +2,-1234,256,1234 +*************inserting data verification +select * from t_int where (_smallint_ = ? and _int_ =?) or _bigint_ =? order by key +-3333,0,1234 + key | _smallint_ | _int_ | _bigint_ +-----+------------+-------+---------- + 1 | -3333 | 0 | 3333 + 2 | -1234 | 256 | 1234 +(2 rows) + +***************updating data +Update t_int set _smallint_= ? , _int_= ? where _bigint_ = ? or key = ? +5555,5555,1234,2 +**************updating data verification +select * from t_int where _smallint_ = ? and _int_ =? +5555,5555 + key | _smallint_ | _int_ | _bigint_ +-----+------------+-------+---------- + 2 | 5555 | 5555 | 1234 +(1 row) + +*************deleting data +delete from t_int where _smallint_= ? and _bigint_= ? and _int_ = ? +5555,1234,5555 +*******************deleting data verification +select * from t_int where _smallint_= ? and _bigint_= ? and _int_ = ? +5555,1234,5555 + key | _smallint_ | _int_ | _bigint_ +-----+------------+-------+---------- +(0 rows) + +select * from t_int; + + key | _smallint_ | _int_ | _bigint_ +-----+------------+-------+---------- + 1 | -3333 | 0 | 3333 +(1 row) + +*************deleting all data +delete from t_int + +No results were returned by the query. +**************deleting all data verification +select * from t_int; + + key | _smallint_ | _int_ | _bigint_ +-----+------------+-------+---------- +(0 rows) + +drop table t_int; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; diff --git a/src/test/regress/expected/PrepareStatamentNumericBin.out b/src/test/regress/expected/PrepareStatamentNumericBin.out new file mode 100644 index 000000000..342f4b7b0 --- /dev/null +++ b/src/test/regress/expected/PrepareStatamentNumericBin.out @@ -0,0 +1,65 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_numeric(key int,_real_ real ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),_decimal_ decimal ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),_doubleprecision_ double precision ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),_numeric_ numeric ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +*******inserting data to the serial table +INSERT INTO t_numeric (key, _real_, _decimal_, _numeric_, _doubleprecision_) VALUES (?,?,?,?,?); +1,1234.1234,5678.5678,91011.91011,12131415.12131415 +INSERT INTO t_numeric (key, _real_, _decimal_, _numeric_, _doubleprecision_) VALUES (?,?,?,?,?); +2,111213.111213,141516.141516,17181920.17181920,2122232425.2122232425 +*************inserting data verification +select * from t_numeric where (_real_ = ? and _decimal_ =?) or (_numeric_ =? and _doubleprecision_= ?) order by key; +1234.1234,5678.5678,17181920.17181920,2122232425.2122232425 + key | _real_ | _decimal_ | _doubleprecision_ | _numeric_ +-----+---------+---------------+-------------------+------------------- + 1 | 1234.12 | 5678.5678 | 12131415.1213141 | 91011.91011 + 2 | 111213 | 141516.141516 | 2122232425.21222 | 17181920.17181920 +(2 rows) + +***************updating data +Update t_numeric set _real_= ? , _decimal_= ? where _numeric_ = ? or key = ? +212223.212223,24252627.24252627,17181920.17181920,2 +**************updating data verification +select * from t_numeric where _real_ = ? and _decimal_ =? +212223.212223,24252627.24252627 + key | _real_ | _decimal_ | _doubleprecision_ | _numeric_ +-----+--------+-------------------+-------------------+------------------- + 2 | 212223 | 24252627.24252627 | 2122232425.21222 | 17181920.17181920 +(1 row) + +*************deleting data +delete from t_numeric where _real_= ? and _numeric_= ? and _decimal_ = ? and _doubleprecision_ =? +212223.212223,17181920.17181920,24252627.24252627,2122232425.2122232425 +*******************deleting data verification +select * from t_numeric where _real_= ? and _numeric_= ? and _decimal_ = ? and _doubleprecision_ =? +212223.212223,17181920.17181920,24252627.24252627,2122232425.2122232425 + key | _real_ | _decimal_ | _doubleprecision_ | _numeric_ +-----+--------+-----------+-------------------+----------- +(0 rows) + +select * from t_numeric; + + key | _real_ | _decimal_ | _doubleprecision_ | _numeric_ +-----+---------+-----------+-------------------+------------- + 1 | 1234.12 | 5678.5678 | 12131415.1213141 | 91011.91011 +(1 row) + +*************deleting all data +delete from t_numeric; + +No results were returned by the query. +**************deleting all data verification +select * from t_numeric; + + key | _real_ | _decimal_ | _doubleprecision_ | _numeric_ +-----+--------+-----------+-------------------+----------- +(0 rows) + +drop table t_numeric; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; diff --git a/src/test/regress/expected/PrepareStatamentStringBin.out b/src/test/regress/expected/PrepareStatamentStringBin.out new file mode 100644 index 000000000..46358df4d --- /dev/null +++ b/src/test/regress/expected/PrepareStatamentStringBin.out @@ -0,0 +1,65 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_string(key int,_varchar_ varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),_char_ char(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),_text_ text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +*******inserting data to the serial table +INSERT INTO t_string (key, _varchar_, _char_, _text_) VALUES (?,?,?,?); +1,varchar data,char data,text data +INSERT INTO t_string (key, _varchar_, _char_, _text_) VALUES (?,?,?,?); +2,varchar data 2,char data 2,text data 2 +*************inserting data verification +select * from t_string where (_varchar_ = ? and _char_ = ?) or _text_ =? order by key +varchar data,char data,text data 2 + key | _varchar_ | _char_ | _text_ +-----+----------------+----------------------------------------------------+------------- + 1 | varchar data | char data | text data + 2 | varchar data 2 | char data 2 | text data 2 +(2 rows) + +***************updating data +Update t_string set _varchar_ =? , _char_ = ? where _text_ = ? or key = ? +varchar updated data,char updated data,text data 2,2 +**************updating data verification +select * from t_string where _varchar_ = ? and _char_ = ? +varchar updated data,char updated data + key | _varchar_ | _char_ | _text_ +-----+----------------------+----------------------------------------------------+------------- + 2 | varchar updated data | char updated data | text data 2 +(1 row) + +*************deleting data +delete from t_string where _varchar_ = ? and _text_ = ? and _char_ = ? +varchar updated data,text data 2,char updated data +*******************deleting data verification +select * from t_string where _varchar_ = ? and _text_ = ? and _char_ = ? +varchar updated data,text data 2,char updated data + key | _varchar_ | _char_ | _text_ +-----+-----------+--------+-------- +(0 rows) + +select * from t_string + + key | _varchar_ | _char_ | _text_ +-----+--------------+----------------------------------------------------+----------- + 1 | varchar data | char data | text data +(1 row) + +*************deleting all data +delete from t_string + +No results were returned by the query. +**************deleting all data verification +select * from t_string + + key | _varchar_ | _char_ | _text_ +-----+-----------+--------+-------- +(0 rows) + +drop table t_string; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; diff --git a/src/test/regress/expected/PrepareStatamentVarCharBin.out b/src/test/regress/expected/PrepareStatamentVarCharBin.out new file mode 100644 index 000000000..1004f31b4 --- /dev/null +++ b/src/test/regress/expected/PrepareStatamentVarCharBin.out @@ -0,0 +1,86 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_varchar(id INT, name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +NOTICE: relation "t_varchar" already exists, skipping +INSERT INTO t_varchar (id, name, address) VALUES (?,?,?); +1,MyName,MyAddress +SELECT * from t_varchar ORDER BY id; + id | name | address +----+--------+----------- + 0 | MyName | MyAddress + 1 | MyName | MyAddress + 1 | MyName | MyAddress + 2 | MyName | MyAddress + 3 | MyName | MyAddress + 4 | MyName | MyAddress + 5 | MyName | MyAddress + 6 | MyName | MyAddress + 7 | MyName | MyAddress + 8 | MyName | MyAddress + 9 | MyName | MyAddress + 10 | MyName | MyAddress + 11 | MyName | MyAddress + 12 | MyName | MyAddress + 13 | MyName | MyAddress + 14 | MyName | MyAddress + 15 | MyName | MyAddress + 16 | MyName | MyAddress + 17 | MyName | MyAddress + 18 | MyName | MyAddress + 19 | MyName | MyAddress + 20 | MyName | MyAddress + 21 | MyName | MyAddress + 22 | MyName | MyAddress + 23 | MyName | MyAddress + 24 | MyName | MyAddress + 25 | MyName | MyAddress + 26 | MyName | MyAddress + 27 | MyName | MyAddress + 28 | MyName | MyAddress + 29 | MyName | MyAddress + 30 | MyName | MyAddress + 31 | MyName | MyAddress + 32 | MyName | MyAddress + 33 | MyName | MyAddress + 34 | MyName | MyAddress + 35 | MyName | MyAddress + 36 | MyName | MyAddress + 37 | MyName | MyAddress + 38 | MyName | MyAddress + 39 | MyName | MyAddress + 40 | MyName | MyAddress + 41 | MyName | MyAddress + 42 | MyName | MyAddress + 43 | MyName | MyAddress + 44 | MyName | MyAddress + 45 | MyName | MyAddress + 46 | MyName | MyAddress + 47 | MyName | MyAddress + 48 | MyName | MyAddress + 49 | MyName | MyAddress + 50 | MyName | MyAddress + 51 | MyName | MyAddress + 52 | MyName | MyAddress + 53 | MyName | MyAddress + 54 | MyName | MyAddress + 55 | MyName | MyAddress + 56 | MyName | MyAddress + 57 | MyName | MyAddress + 58 | MyName | MyAddress + 59 | MyName | MyAddress + 60 | MyName | MyAddress + 61 | MyName | MyAddress + 62 | MyName | MyAddress + 63 | MyName | MyAddress + 64 | MyName | MyAddress +(66 rows) + +drop table t_varchar; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; diff --git a/src/test/regress/expected/PrepareStatamentWithHardCodedValuesBin.out b/src/test/regress/expected/PrepareStatamentWithHardCodedValuesBin.out new file mode 100644 index 000000000..e155fe2ff --- /dev/null +++ b/src/test/regress/expected/PrepareStatamentWithHardCodedValuesBin.out @@ -0,0 +1,41 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_varchar(id INT, name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +INSERT INTO t_varchar (id, name, address) VALUES (?,?,?); +1,MyName,MyAddress +INSERT INTO t_varchar (id, name, address) VALUES (?,?,?); +2,MyName2,MyAddress2 +INSERT INTO t_varchar (id, name, address) VALUES (?, 'MyName3',?); +3,MyAddress3 +SELECT * from t_varchar ORDER BY id; + id | name | address +----+---------+------------ + 1 | MyName | MyAddress + 2 | MyName2 | MyAddress2 + 3 | MyName3 | MyAddress3 +(3 rows) + +select * from t_varchar where name = ? and address = 'MyAddress'; +MyName + id | name | address +----+--------+----------- + 1 | MyName | MyAddress +(1 row) + +SELECT * from t_varchar ORDER BY id; + id | name | address +----+---------+------------ + 1 | MyName | MyAddress + 2 | MyName2 | MyAddress2 + 3 | MyName3 | MyAddress3 +(3 rows) + +drop table t_varchar; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; diff --git a/src/test/regress/expected/PrepareStatementDeleteResultSetBin.out b/src/test/regress/expected/PrepareStatementDeleteResultSetBin.out new file mode 100644 index 000000000..37a6ebc2d --- /dev/null +++ b/src/test/regress/expected/PrepareStatementDeleteResultSetBin.out @@ -0,0 +1,35 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_delete_rows_tbl(key int PRIMARY KEY ,col_varchar varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),col_int int ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),col_float float ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_delete_rows_tbl_pkey" for table "t_delete_rows_tbl" +*******inserting data to the t_delete_rows_tbl +INSERT INTO t_delete_rows_tbl (key, col_varchar, col_int, col_float) VALUES (?,?,?,?); +1,this_row_will_be_deleted,1,1.1 +INSERT INTO t_delete_rows_tbl (key, col_varchar, col_int, col_float) VALUES (?,?,?,?); +2,this_row_will_not_deleted,2,2.2 +*************verify data before the delete +select * from t_delete_rows_tbl order by key; + + key | col_varchar | col_int | col_float +-----+---------------------------+---------+----------- + 1 | this_row_will_be_deleted | 1 | 1.1 + 2 | this_row_will_not_deleted | 2 | 2.2 +(2 rows) + +*************verifying the deleted data +select * from t_delete_rows_tbl; + + key | col_varchar | col_int | col_float +-----+---------------------------+---------+----------- + 2 | this_row_will_not_deleted | 2 | 2.2 +(1 row) + +drop table t_delete_rows_tbl; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; diff --git a/src/test/regress/expected/PrepareStatementInsertResultSetBin.out b/src/test/regress/expected/PrepareStatementInsertResultSetBin.out new file mode 100644 index 000000000..0fea838d1 --- /dev/null +++ b/src/test/regress/expected/PrepareStatementInsertResultSetBin.out @@ -0,0 +1,37 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_insert_rows_tbl(key int PRIMARY KEY ,col_varchar varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),col_int int ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),col_float float ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_insert_rows_tbl_pkey" for table "t_insert_rows_tbl" +*******inserting data to the t_insert_rows_tbl +INSERT INTO t_insert_rows_tbl (key, col_varchar, col_int, col_float) VALUES (?,?,?,?); +1,this_row_exists_before_insert,1,1.1 +INSERT INTO t_insert_rows_tbl (key, col_varchar, col_int, col_float) VALUES (?,?,?,?); +2,this_row_exists_before_insert,2,2.2 +*************verify data before the insert +select * from t_insert_rows_tbl order by key; + + key | col_varchar | col_int | col_float +-----+-------------------------------+---------+----------- + 1 | this_row_exists_before_insert | 1 | 1.1 + 2 | this_row_exists_before_insert | 2 | 2.2 +(2 rows) + +*************verifying the insert data +select * from t_insert_rows_tbl order by key desc; + + key | col_varchar | col_int | col_float +-----+-------------------------------+---------+----------- + 3 | new_data_was_inserted | 3 | 3 + 2 | this_row_exists_before_insert | 2 | 2.2 + 1 | this_row_exists_before_insert | 1 | 1.1 +(3 rows) + +drop table t_insert_rows_tbl; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; diff --git a/src/test/regress/expected/PrepareStatementUniqueBin.out b/src/test/regress/expected/PrepareStatementUniqueBin.out new file mode 100644 index 000000000..a7b276dc1 --- /dev/null +++ b/src/test/regress/expected/PrepareStatementUniqueBin.out @@ -0,0 +1,31 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_unique(id INT, name text UNIQUE ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "t_unique_name_key" for table "t_unique" +INSERT INTO t_unique values (?,?) +5,John +INSERT INTO t_unique values (?,?) +2,Moses +INSERT INTO t_unique values (?,?) +6,John +ERROR: duplicate key value violates unique constraint "t_unique_name_key" +--?.* +SELECT * FROM t_unique order by id + id | name +----+------- + 2 | Moses + 5 | John +(2 rows) + +DROP TABLE t_unique +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/PrepareStatementUpdateResultSetBin.out b/src/test/regress/expected/PrepareStatementUpdateResultSetBin.out new file mode 100644 index 000000000..08d3db2bf --- /dev/null +++ b/src/test/regress/expected/PrepareStatementUpdateResultSetBin.out @@ -0,0 +1,36 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_update_rows_tbl(key int PRIMARY KEY ,col_varchar varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),col_int int ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),col_float float ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_update_rows_tbl_pkey" for table "t_update_rows_tbl" +*******inserting data to the t_update_rows_tbl +INSERT INTO t_update_rows_tbl (key, col_varchar, col_int, col_float) VALUES (?,?,?,?); +1,this_row_will_be_updated,1,1.1 +INSERT INTO t_update_rows_tbl (key, col_varchar, col_int, col_float) VALUES (?,?,?,?); +2,this_row_will_not_updated,2,2.2 +*************verify data before the update +select * from t_update_rows_tbl order by key + + key | col_varchar | col_int | col_float +-----+---------------------------+---------+----------- + 1 | this_row_will_be_updated | 1 | 1.1 + 2 | this_row_will_not_updated | 2 | 2.2 +(2 rows) + +*************verifying the updated data +select * from t_update_rows_tbl order by key + + key | col_varchar | col_int | col_float +-----+---------------------------+---------+----------- + 1 | this_row_was_updated | 10 | 10 + 2 | this_row_will_not_updated | 2 | 2.2 +(2 rows) + +drop table t_update_rows_tbl; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; diff --git a/src/test/regress/expected/PrepareStatementViewBin.out b/src/test/regress/expected/PrepareStatementViewBin.out new file mode 100644 index 000000000..8029c47b9 --- /dev/null +++ b/src/test/regress/expected/PrepareStatementViewBin.out @@ -0,0 +1,47 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_table_4_view(key int PRIMARY KEY ,col_varchar varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),col_int int ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),col_float float ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_table_4_view_pkey" for table "t_table_4_view" +*******inserting data to the t_table_4_view +INSERT INTO t_table_4_view (key, col_varchar, col_int, col_float) VALUES (?,?,?,?); +1,data_4_view,1,1.1 +INSERT INTO t_table_4_view (key, col_varchar, col_int, col_float) VALUES (?,?,?,?); +2,data_4_view2,2,2.2 +*************verify data before creating the view +select * from t_table_4_view order by key; + + key | col_varchar | col_int | col_float +-----+--------------+---------+----------- + 1 | data_4_view | 1 | 1.1 + 2 | data_4_view2 | 2 | 2.2 +(2 rows) + +CREATE View v_view_from_table_4_view as select * from t_table_4_view; +*************verifying that new view was successfully created +select * from v_view_from_table_4_view order by key; + + key | col_varchar | col_int | col_float +-----+--------------+---------+----------- + 1 | data_4_view | 1 | 1.1 + 2 | data_4_view2 | 2 | 2.2 +(2 rows) + +**************verifying view creation +select *, col_int from v_view_from_table_4_view where col_float = ? or key = ? +2.2,1 + key | col_varchar | col_int | col_float | col_int +-----+--------------+---------+-----------+--------- + 2 | data_4_view2 | 2 | 2.2 | 2 + 1 | data_4_view | 1 | 1.1 | 1 +(2 rows) + +drop view v_view_from_table_4_view; +drop table t_table_4_view; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; diff --git a/src/test/regress/expected/ResultSetFetchNoCLBin.out b/src/test/regress/expected/ResultSetFetchNoCLBin.out new file mode 100644 index 000000000..4ba460d5d --- /dev/null +++ b/src/test/regress/expected/ResultSetFetchNoCLBin.out @@ -0,0 +1,141 @@ +CREATE TABLE IF NOT EXISTS t_varchar(id INT, name varchar(50), address varchar(50)); +INSERT INTO t_varchar (id, name, address) VALUES (0, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (1, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (2, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (3, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (4, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (5, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (6, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (7, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (8, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (9, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (10, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (11, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (12, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (13, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (14, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (15, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (16, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (17, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (18, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (19, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (20, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (21, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (22, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (23, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (24, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (25, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (26, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (27, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (28, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (29, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (30, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (31, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (32, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (33, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (34, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (35, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (36, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (37, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (38, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (39, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (40, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (41, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (42, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (43, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (44, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (45, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (46, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (47, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (48, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (49, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (50, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (51, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (52, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (53, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (54, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (55, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (56, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (57, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (58, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (59, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (60, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (61, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (62, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (63, 'MyName', 'MyAddress') +INSERT INTO t_varchar (id, name, address) VALUES (64, 'MyName', 'MyAddress') + id | name | address +----+--------+----------- + 0 | MyName | MyAddress + 1 | MyName | MyAddress + 2 | MyName | MyAddress + 3 | MyName | MyAddress + 4 | MyName | MyAddress + 5 | MyName | MyAddress + 6 | MyName | MyAddress + 7 | MyName | MyAddress + 8 | MyName | MyAddress + 9 | MyName | MyAddress + 10 | MyName | MyAddress + 11 | MyName | MyAddress + 12 | MyName | MyAddress + 13 | MyName | MyAddress + 14 | MyName | MyAddress + 15 | MyName | MyAddress + 16 | MyName | MyAddress + 17 | MyName | MyAddress + 18 | MyName | MyAddress + 19 | MyName | MyAddress + 20 | MyName | MyAddress + 21 | MyName | MyAddress + 22 | MyName | MyAddress + 23 | MyName | MyAddress + 24 | MyName | MyAddress + 25 | MyName | MyAddress + 26 | MyName | MyAddress + 27 | MyName | MyAddress + 28 | MyName | MyAddress + 29 | MyName | MyAddress + 30 | MyName | MyAddress + 31 | MyName | MyAddress + 32 | MyName | MyAddress + 33 | MyName | MyAddress + 34 | MyName | MyAddress + 35 | MyName | MyAddress + 36 | MyName | MyAddress + 37 | MyName | MyAddress + 38 | MyName | MyAddress + 39 | MyName | MyAddress + 40 | MyName | MyAddress + 41 | MyName | MyAddress + 42 | MyName | MyAddress + 43 | MyName | MyAddress + 44 | MyName | MyAddress + 45 | MyName | MyAddress + 46 | MyName | MyAddress + 47 | MyName | MyAddress + 48 | MyName | MyAddress + 49 | MyName | MyAddress + 50 | MyName | MyAddress + 51 | MyName | MyAddress + 52 | MyName | MyAddress + 53 | MyName | MyAddress + 54 | MyName | MyAddress + 55 | MyName | MyAddress + 56 | MyName | MyAddress + 57 | MyName | MyAddress + 58 | MyName | MyAddress + 59 | MyName | MyAddress + 60 | MyName | MyAddress + 61 | MyName | MyAddress + 62 | MyName | MyAddress + 63 | MyName | MyAddress + 64 | MyName | MyAddress +(65 rows) + +DROP table t_varchar +select * from gs_column_keys + column_key_name | column_key_distributed_id | global_key_id | key_namespace | key_owner | create_date | key_acl +--?.* +(0 rows) + diff --git a/src/test/regress/expected/ResultSetMetaDataTestBin.out b/src/test/regress/expected/ResultSetMetaDataTestBin.out new file mode 100644 index 000000000..787df54db --- /dev/null +++ b/src/test/regress/expected/ResultSetMetaDataTestBin.out @@ -0,0 +1,282 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS metadata_client_logic_test_tbl(key int,id int PRIMARY KEY ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),char_col varchar(30) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),float_col float ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)) +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "metadata_client_logic_test_tbl_pkey" for table "metadata_client_logic_test_tbl" +*******inserting data to the metadata_client_logic_test_tbl; +insert into metadata_client_logic_test_tbl (key, id, char_col, float_col ) values (?,?,?,?); +1,2,test_data_4_meta_data,1.1 +*************verifying data +select * from metadata_client_logic_test_tbl; + + key | id | char_col | float_col +-----+----+-----------------------+----------- + 1 | 2 | test_data_4_meta_data | 1.1 +(1 row) + +create table metadata_simple_test_tbl (key int , id int primary key, char_col varchar(30), float_col float); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "metadata_simple_test_tbl_pkey" for table "metadata_simple_test_tbl" +*******inserting data to the metadata_simple_test_tbl +insert into metadata_simple_test_tbl (key, id, char_col, float_col ) values (?,?,?,?); +1,2,test_data_4_meta_data,1.1 +*************verifying data +select * from metadata_simple_test_tbl; + + key | id | char_col | float_col +-----+----+-----------------------+--------------------- + 1 | 2 | test_data_4_meta_data | 1.10000000000000009 +(1 row) + +Testing table with client logic ... +Index: 1 column name: key + getColumnDisplaySize is: 11 + getColumnClassName is: java.lang.Integer + getColumnLabel is: key + getColumnType is: 4 + getColumnTypeName is: int4 + getPrecision is: 10 + getScale is: 0 + isNullable is: 1 + isNullable is: false + isCaseSensitive is: false + isCurrency is: false + isReadOnly is: false + isSigned is: true + isWritable is: true + isDefinitelyWritable is: false + isSearchable is: true + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Index: 2 column name: id + getColumnDisplaySize is: 11 + getColumnClassName is: java.lang.Integer + getColumnLabel is: id + getColumnType is: 4 + getColumnTypeName is: int4 + getPrecision is: 10 + getScale is: 0 + isNullable is: 0 + isNullable is: false + isCaseSensitive is: false + isCurrency is: false + isReadOnly is: false + isSigned is: true + isWritable is: true + isDefinitelyWritable is: false + isSearchable is: true + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Index: 3 column name: char_col + getColumnDisplaySize is: 30 + getColumnClassName is: java.lang.String + getColumnLabel is: char_col + getColumnType is: 12 + getColumnTypeName is: varchar + getPrecision is: 30 + getScale is: 0 + isNullable is: 1 + isNullable is: false + isCaseSensitive is: true + isCurrency is: false + isReadOnly is: false + isSigned is: false + isWritable is: true + isDefinitelyWritable is: false + isSearchable is: true + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Index: 4 column name: float_col + getColumnDisplaySize is: 25 + getColumnClassName is: java.lang.Double + getColumnLabel is: float_col + getColumnType is: 8 + getColumnTypeName is: float8 + getPrecision is: 17 + getScale is: 17 + isNullable is: 1 + isNullable is: false + isCaseSensitive is: false + isCurrency is: false + isReadOnly is: false + isSigned is: true + isWritable is: true + isDefinitelyWritable is: false + isSearchable is: true + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +drop table metadata_client_logic_test_tbl; +DROP CLIENT MASTER KEY cmk1 CASCADE; +NOTICE: drop cascades to column encryption key: cek1 + +Testing table with no client logic ... +Index: 1 column name: key + getColumnDisplaySize is: 11 + getColumnClassName is: java.lang.Integer + getColumnLabel is: key + getColumnType is: 4 + getColumnTypeName is: int4 + getPrecision is: 10 + getScale is: 0 + isNullable is: 1 + isNullable is: false + isCaseSensitive is: false + isCurrency is: false + isReadOnly is: false + isSigned is: true + isWritable is: true + isDefinitelyWritable is: false + isSearchable is: true + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Index: 2 column name: id + getColumnDisplaySize is: 11 + getColumnClassName is: java.lang.Integer + getColumnLabel is: id + getColumnType is: 4 + getColumnTypeName is: int4 + getPrecision is: 10 + getScale is: 0 + isNullable is: 0 + isNullable is: false + isCaseSensitive is: false + isCurrency is: false + isReadOnly is: false + isSigned is: true + isWritable is: true + isDefinitelyWritable is: false + isSearchable is: true + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Index: 3 column name: char_col + getColumnDisplaySize is: 30 + getColumnClassName is: java.lang.String + getColumnLabel is: char_col + getColumnType is: 12 + getColumnTypeName is: varchar + getPrecision is: 30 + getScale is: 0 + isNullable is: 1 + isNullable is: false + isCaseSensitive is: true + isCurrency is: false + isReadOnly is: false + isSigned is: false + isWritable is: true + isDefinitelyWritable is: false + isSearchable is: true + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Index: 4 column name: float_col + getColumnDisplaySize is: 25 + getColumnClassName is: java.lang.Double + getColumnLabel is: float_col + getColumnType is: 8 + getColumnTypeName is: float8 + getPrecision is: 17 + getScale is: 17 + isNullable is: 1 + isNullable is: false + isCaseSensitive is: false + isCurrency is: false + isReadOnly is: false + isSigned is: true + isWritable is: true + isDefinitelyWritable is: false + isSearchable is: true + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Testing table with no client logic and with no client logic in connection string ... +Index: 1 column name: key + getColumnDisplaySize is: 11 + getColumnClassName is: java.lang.Integer + getColumnLabel is: key + getColumnType is: 4 + getColumnTypeName is: int4 + getPrecision is: 10 + getScale is: 0 + isNullable is: 1 + isNullable is: false + isCaseSensitive is: false + isCurrency is: false + isReadOnly is: false + isSigned is: true + isWritable is: true + isDefinitelyWritable is: false + isSearchable is: true + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Index: 2 column name: id + getColumnDisplaySize is: 11 + getColumnClassName is: java.lang.Integer + getColumnLabel is: id + getColumnType is: 4 + getColumnTypeName is: int4 + getPrecision is: 10 + getScale is: 0 + isNullable is: 0 + isNullable is: false + isCaseSensitive is: false + isCurrency is: false + isReadOnly is: false + isSigned is: true + isWritable is: true + isDefinitelyWritable is: false + isSearchable is: true + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Index: 3 column name: char_col + getColumnDisplaySize is: 30 + getColumnClassName is: java.lang.String + getColumnLabel is: char_col + getColumnType is: 12 + getColumnTypeName is: varchar + getPrecision is: 30 + getScale is: 0 + isNullable is: 1 + isNullable is: false + isCaseSensitive is: true + isCurrency is: false + isReadOnly is: false + isSigned is: false + isWritable is: true + isDefinitelyWritable is: false + isSearchable is: true + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Index: 4 column name: float_col + getColumnDisplaySize is: 25 + getColumnClassName is: java.lang.Double + getColumnLabel is: float_col + getColumnType is: 8 + getColumnTypeName is: float8 + getPrecision is: 17 + getScale is: 17 + isNullable is: 1 + isNullable is: false + isCaseSensitive is: false + isCurrency is: false + isReadOnly is: false + isSigned is: true + isWritable is: true + isDefinitelyWritable is: false + isSearchable is: true + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +drop table metadata_simple_test_tbl; diff --git a/src/test/regress/expected/ResultsetFetchBin.out b/src/test/regress/expected/ResultsetFetchBin.out new file mode 100644 index 000000000..cda9b4f71 --- /dev/null +++ b/src/test/regress/expected/ResultsetFetchBin.out @@ -0,0 +1,151 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_varchar(id INT, name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)); +INSERT INTO t_varchar (id, name, address) VALUES (0, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (1, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (2, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (3, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (4, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (5, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (6, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (7, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (8, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (9, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (10, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (11, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (12, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (13, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (14, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (15, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (16, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (17, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (18, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (19, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (20, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (21, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (22, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (23, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (24, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (25, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (26, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (27, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (28, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (29, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (30, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (31, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (32, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (33, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (34, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (35, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (36, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (37, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (38, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (39, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (40, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (41, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (42, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (43, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (44, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (45, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (46, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (47, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (48, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (49, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (50, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (51, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (52, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (53, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (54, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (55, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (56, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (57, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (58, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (59, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (60, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (61, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (62, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (63, 'MyName', 'MyAddress'); +INSERT INTO t_varchar (id, name, address) VALUES (64, 'MyName', 'MyAddress'); + id | name | address +----+--------+----------- + 0 | MyName | MyAddress + 1 | MyName | MyAddress + 2 | MyName | MyAddress + 3 | MyName | MyAddress + 4 | MyName | MyAddress + 5 | MyName | MyAddress + 6 | MyName | MyAddress + 7 | MyName | MyAddress + 8 | MyName | MyAddress + 9 | MyName | MyAddress + 10 | MyName | MyAddress + 11 | MyName | MyAddress + 12 | MyName | MyAddress + 13 | MyName | MyAddress + 14 | MyName | MyAddress + 15 | MyName | MyAddress + 16 | MyName | MyAddress + 17 | MyName | MyAddress + 18 | MyName | MyAddress + 19 | MyName | MyAddress + 20 | MyName | MyAddress + 21 | MyName | MyAddress + 22 | MyName | MyAddress + 23 | MyName | MyAddress + 24 | MyName | MyAddress + 25 | MyName | MyAddress + 26 | MyName | MyAddress + 27 | MyName | MyAddress + 28 | MyName | MyAddress + 29 | MyName | MyAddress + 30 | MyName | MyAddress + 31 | MyName | MyAddress + 32 | MyName | MyAddress + 33 | MyName | MyAddress + 34 | MyName | MyAddress + 35 | MyName | MyAddress + 36 | MyName | MyAddress + 37 | MyName | MyAddress + 38 | MyName | MyAddress + 39 | MyName | MyAddress + 40 | MyName | MyAddress + 41 | MyName | MyAddress + 42 | MyName | MyAddress + 43 | MyName | MyAddress + 44 | MyName | MyAddress + 45 | MyName | MyAddress + 46 | MyName | MyAddress + 47 | MyName | MyAddress + 48 | MyName | MyAddress + 49 | MyName | MyAddress + 50 | MyName | MyAddress + 51 | MyName | MyAddress + 52 | MyName | MyAddress + 53 | MyName | MyAddress + 54 | MyName | MyAddress + 55 | MyName | MyAddress + 56 | MyName | MyAddress + 57 | MyName | MyAddress + 58 | MyName | MyAddress + 59 | MyName | MyAddress + 60 | MyName | MyAddress + 61 | MyName | MyAddress + 62 | MyName | MyAddress + 63 | MyName | MyAddress + 64 | MyName | MyAddress +(65 rows) + +DROP table t_varchar; +DROP COLUMN ENCRYPTION KEY cek1; +DROP CLIENT MASTER KEY cmk1; +select * from gs_column_keys; + column_key_name | column_key_distributed_id | global_key_id | key_namespace | key_owner | create_date | key_acl +-----------------+---------------------------+---------------+---------------+-----------+-------------+--------- +(0 rows) + diff --git a/src/test/regress/expected/SimpleQueryBin.out b/src/test/regress/expected/SimpleQueryBin.out new file mode 100644 index 000000000..622db2270 --- /dev/null +++ b/src/test/regress/expected/SimpleQueryBin.out @@ -0,0 +1,366 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS metadata_client_logic_test_tbl(key int,id int PRIMARY KEY ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),char_col varchar(30) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),float_col float ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)) +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "metadata_client_logic_test_tbl_pkey" for table "metadata_client_logic_test_tbl" +*******inserting data to the metadata_client_logic_test_tbl; +insert into metadata_client_logic_test_tbl (key, id, char_col, float_col ) values (?,?,?,?); +1,2,test_data_4_meta_data,1.1 +*************verifying data +select * from metadata_client_logic_test_tbl; + + key | id | char_col | float_col +-----+----+-----------------------+----------- + 1 | 2 | test_data_4_meta_data | 1.1 +(1 row) + +create table metadata_simple_test_tbl (key int , id int primary key, char_col varchar(30), float_col float); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "metadata_simple_test_tbl_pkey" for table "metadata_simple_test_tbl" +*******inserting data to the metadata_simple_test_tbl +insert into metadata_simple_test_tbl (key, id, char_col, float_col ) values (?,?,?,?); +1,2,test_data_4_meta_data,1.1 +*************verifying data +select * from metadata_simple_test_tbl; + + key | id | char_col | float_col +-----+----+-----------------------+--------------------- + 1 | 2 | test_data_4_meta_data | 1.10000000000000009 +(1 row) + +Testing table with client logic ... + +* setCursorName() +setCursorName() executed successfully + +* getWarnings() +null +getWarnings() executed successfully + +* clearWarnings() +clearWarnings() executed successfully + +* getWarnings() +null +getWarnings() executed successfully + +* setEscapeProcessing() +setEscapeProcessing() executed successfully + +* setFetchDirection() +setFetchDirection() executed successfully + +* getFetchDirection() +1000 +getFetchDirection() executed successfully + +* setFetchSize() +setFetchSize() executed successfully + +* getFetchSize() +20 +getFetchSize() executed successfully + +* setMaxFieldSize() +setMaxFieldSize() executed successfully + +* getMaxFieldSize() +40 +getMaxFieldSize() executed successfully + +* setMaxRows() +setMaxRows() executed successfully + +* getMaxRows() +50 +getMaxRows() executed successfully + +* setPoolable() +setPoolable() executed successfully + +* isPoolable() +false +isPoolable() executed successfully + +* setQueryTimeout() +setQueryTimeout() executed successfully + +* getQueryTimeout() +100 +getQueryTimeout() executed successfully + +* getGeneratedKeys() +getGeneratedKeys() executed successfully + +* getMoreResults() +false +getMoreResults() executed successfully + +* getResultSet() +null +getResultSet() executed successfully + +* getResultSetConcurrency() +1007 +getResultSetConcurrency() executed successfully + +* getResultSetHoldability() +2 +getResultSetHoldability() executed successfully + +* getResultSetType() +1003 +getResultSetType() executed successfully + +* getResultSetType() +1003 +getResultSetType() executed successfully + +* getUpdateCount() +-1 +getUpdateCount() executed successfully + +* cancel() +cancel() executed successfully + +* isCloseOnCompletion() +false +isCloseOnCompletion() executed successfully + +* closeOnCompletion() +closeOnCompletion() executed successfully + +* close() +close() executed successfully + +* isClosed() +true +isClosed() executed successfully + +drop table metadata_client_logic_test_tbl; +DROP CLIENT MASTER KEY cmk1 CASCADE; +NOTICE: drop cascades to column encryption key: cek1 + +Testing table with no client logic ... + +* setCursorName() +setCursorName() executed successfully + +* getWarnings() +null +getWarnings() executed successfully + +* clearWarnings() +clearWarnings() executed successfully + +* getWarnings() +null +getWarnings() executed successfully + +* setEscapeProcessing() +setEscapeProcessing() executed successfully + +* setFetchDirection() +setFetchDirection() executed successfully + +* getFetchDirection() +1000 +getFetchDirection() executed successfully + +* setFetchSize() +setFetchSize() executed successfully + +* getFetchSize() +20 +getFetchSize() executed successfully + +* setMaxFieldSize() +setMaxFieldSize() executed successfully + +* getMaxFieldSize() +40 +getMaxFieldSize() executed successfully + +* setMaxRows() +setMaxRows() executed successfully + +* getMaxRows() +50 +getMaxRows() executed successfully + +* setPoolable() +setPoolable() executed successfully + +* isPoolable() +false +isPoolable() executed successfully + +* setQueryTimeout() +setQueryTimeout() executed successfully + +* getQueryTimeout() +100 +getQueryTimeout() executed successfully + +* getGeneratedKeys() +getGeneratedKeys() executed successfully + +* getMoreResults() +false +getMoreResults() executed successfully + +* getResultSet() +null +getResultSet() executed successfully + +* getResultSetConcurrency() +1007 +getResultSetConcurrency() executed successfully + +* getResultSetHoldability() +2 +getResultSetHoldability() executed successfully + +* getResultSetType() +1003 +getResultSetType() executed successfully + +* getResultSetType() +1003 +getResultSetType() executed successfully + +* getUpdateCount() +-1 +getUpdateCount() executed successfully + +* cancel() +cancel() executed successfully + +* isCloseOnCompletion() +false +isCloseOnCompletion() executed successfully + +* closeOnCompletion() +closeOnCompletion() executed successfully + +* close() +close() executed successfully + +* isClosed() +true +isClosed() executed successfully + +Testing table with no client logic and with no client logic in connection string ... + +* setCursorName() +setCursorName() executed successfully + +* getWarnings() +null +getWarnings() executed successfully + +* clearWarnings() +clearWarnings() executed successfully + +* getWarnings() +null +getWarnings() executed successfully + +* setEscapeProcessing() +setEscapeProcessing() executed successfully + +* setFetchDirection() +setFetchDirection() executed successfully + +* getFetchDirection() +1000 +getFetchDirection() executed successfully + +* setFetchSize() +setFetchSize() executed successfully + +* getFetchSize() +20 +getFetchSize() executed successfully + +* setMaxFieldSize() +setMaxFieldSize() executed successfully + +* getMaxFieldSize() +40 +getMaxFieldSize() executed successfully + +* setMaxRows() +setMaxRows() executed successfully + +* getMaxRows() +50 +getMaxRows() executed successfully + +* setPoolable() +setPoolable() executed successfully + +* isPoolable() +false +isPoolable() executed successfully + +* setQueryTimeout() +setQueryTimeout() executed successfully + +* getQueryTimeout() +100 +getQueryTimeout() executed successfully + +* getGeneratedKeys() +getGeneratedKeys() executed successfully + +* getMoreResults() +false +getMoreResults() executed successfully + +* getResultSet() +null +getResultSet() executed successfully + +* getResultSetConcurrency() +1007 +getResultSetConcurrency() executed successfully + +* getResultSetHoldability() +2 +getResultSetHoldability() executed successfully + +* getResultSetType() +1003 +getResultSetType() executed successfully + +* getResultSetType() +1003 +getResultSetType() executed successfully + +* getUpdateCount() +-1 +getUpdateCount() executed successfully + +* cancel() +cancel() executed successfully + +* isCloseOnCompletion() +false +isCloseOnCompletion() executed successfully + +* closeOnCompletion() +closeOnCompletion() executed successfully + +* close() +close() executed successfully + +* isClosed() +true +isClosed() executed successfully + +drop table metadata_simple_test_tbl; diff --git a/src/test/regress/expected/StmtBatchClientLogicBin.out b/src/test/regress/expected/StmtBatchClientLogicBin.out new file mode 100644 index 000000000..bae012164 --- /dev/null +++ b/src/test/regress/expected/StmtBatchClientLogicBin.out @@ -0,0 +1,33 @@ +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS metadata_client_logic_test_tbl(key int,id int PRIMARY KEY ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),char_col varchar(30) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC),float_col float ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)) +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "metadata_client_logic_test_tbl_pkey" for table "metadata_client_logic_test_tbl" +*******inserting data to the metadata_client_logic_test_tbl; +insert into metadata_client_logic_test_tbl (key, id, char_col, float_col ) values (?,?,?,?); +1,2,test_data_4_meta_data,1.1 +*************verifying data +select * from metadata_client_logic_test_tbl; + + key | id | char_col | float_col +-----+----+-----------------------+----------- + 1 | 2 | test_data_4_meta_data | 1.1 +(1 row) + +Failed to execute batchjava.sql.SQLException: ERROR(CLIENT): could not support order by operator for column encryption +verifying the clear batch cmd +select * from metadata_client_logic_test_tbl; + + key | id | char_col | float_col +-----+----+-----------------------+----------- + 1 | 2 | test_data_4_meta_data | 1.1 +(1 row) + +drop table metadata_client_logic_test_tbl; +DROP CLIENT MASTER KEY cmk1 CASCADE; +NOTICE: drop cascades to column encryption key: cek1 diff --git a/src/test/regress/expected/alter_hw_package.out b/src/test/regress/expected/alter_hw_package.out new file mode 100644 index 000000000..1a66549c2 --- /dev/null +++ b/src/test/regress/expected/alter_hw_package.out @@ -0,0 +1,107 @@ +SELECT SESSION_USER, CURRENT_USER; + session_user | current_user +--------------+-------------- +--?.* +(1 row) + +reset session AUTHORIZATION; +create user user1 PASSWORD 'Gauss123'; +create user user2 PASSWORD 'Gauss123'; +SET SESSION AUTHORIZATION user1 password 'Gauss123'; +drop procedure p1; +ERROR: function p1 does not exist +create procedure p1 +is +begin +null; +end; +/ +drop package if exists pck1; +NOTICE: package pck1() does not exist, skipping +create or replace package user1.pck1 as +procedure p1(); +end pck1; +/ +--包内嵌套定义 +create or replace package body pck1 as +procedure p1 is +begin +null; +end; +end pck1; +/ +SELECT SESSION_USER, CURRENT_USER; + session_user | current_user +--------------+-------------- + user1 | user1 +(1 row) + +reset session AUTHORIZATION; +SELECT SESSION_USER, CURRENT_USER; + session_user | current_user +--------------+-------------- +--?.* +(1 row) + +---修改 package owner +alter package user1.pck1 owner to user2; +---校验 +------usename 为 user2 +select usename from pg_user where usesysid = (select pkgowner from gs_package where pkgname = 'pck1'); + usename +--------- + user2 +(1 row) + +grant usage on schema user1 to user2; +grant execute on package user1.pck1 to user2; +------调用成功,结果正确 +SET SESSION AUTHORIZATION user2 password 'Gauss123'; +drop procedure p1; +ERROR: function p1 does not exist +call user1.pck1.p1(); + p1 +---- + +(1 row) + +------原owner create or replace 预期失败 +SET SESSION AUTHORIZATION user1 password 'Gauss123'; +create or replace package pck1 as + type t1 is record(c1 int,c2 int); + type t2 is table of t1; + type t3 is varray(10) of t1; + type t4 is ref cursor; +end pck1; +/ +ERROR: must be owner of package pck1 +DETAIL: N/A +create or replace package body pck1 as + type t5 is record(c1 t1,c2 int); + type t6 is table of t5; + type t7 is varray(10) of t1; + type t8 is ref cursor; +end pck1; +/ +ERROR: permission denied for package pck1 +DETAIL: N/A +reset session AUTHORIZATION; +SELECT SESSION_USER, CURRENT_USER; + session_user | current_user +--------------+-------------- +--?.* +(1 row) + +select usename from pg_user where usesysid = (select pkgowner from gs_package where pkgname = 'pck1'); + usename +--------- + user2 +(1 row) + +---清理 +SET SESSION AUTHORIZATION user1 password 'Gauss123'; +drop package if exists pck1; +NOTICE: drop cascades to function user1.p1() +reset session AUTHORIZATION; +drop user if exists user1 cascade; +drop user if exists user2 cascade; diff --git a/src/test/regress/expected/alter_table_000.out b/src/test/regress/expected/alter_table_000.out index d5abcb34e..1f366f2d8 100644 --- a/src/test/regress/expected/alter_table_000.out +++ b/src/test/regress/expected/alter_table_000.out @@ -714,3 +714,6 @@ drop table tt_row_rep_1; drop table tt_row_rep_2; drop table tt_col_rep_1; drop table tt_col_rep_2; +select pg_catalog.ledger_hist_repair('0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', 65536); +ERROR: The schema name exceeds the maximum length. +CONTEXT: referenced column: ledger_hist_repair diff --git a/src/test/regress/expected/arrayinterface_indexby.out b/src/test/regress/expected/arrayinterface_indexby.out new file mode 100644 index 000000000..bb4f290f8 --- /dev/null +++ b/src/test/regress/expected/arrayinterface_indexby.out @@ -0,0 +1,1582 @@ +-- FOR VARRAY INTERFACE -- +-- check compatibility -- +show sql_compatibility; -- expect A -- + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists plpgsql_array_interface_indexby; +NOTICE: schema "plpgsql_array_interface_indexby" does not exist, skipping +create schema plpgsql_array_interface_indexby; +set current_schema = plpgsql_array_interface_indexby; +-- test array interface count -- +create or replace procedure array_interface_p1() as +declare + type ta is table of varchar(32) index by varchar; + colors ta; -- array['red', 'orange', null, '', 'green', 'blue', 'indigo', 'violet'] +begin + colors('a1234567') := 'red'; + colors('a12345678') := 'orange'; + colors('a2345671') := null; + colors('a3456712') := ''; + colors('a4567123') := 'green'; + colors('a5671234') := 'blue'; + colors('a6712345') := 'indigo'; + colors('a7123456') := 'violet'; + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + colors[1] := null; + colors[4] := null; + colors[6] := ''; + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; +end; +/ +call array_interface_p1(); +NOTICE: {red,orange,NULL,NULL,green,blue,indigo,violet} +NOTICE: 8 +NOTICE: {red,orange,NULL,NULL,green,blue,indigo,violet,NULL,NULL,NULL} +NOTICE: 11 + array_interface_p1 +-------------------- + +(1 row) + +create or replace procedure array_interface_p1() as +declare + type ta is table of varchar(32) index by integer; + colors ta; -- array['red', 'orange', null, '', 'green', 'blue', 'indigo', 'violet'] +begin + colors(5) := 'red'; + colors(-1) := 'orange'; + colors(2) := null; + colors(8) := ''; + colors(-6) := 'green'; + colors(10) := 'blue'; + colors(-3) := 'indigo'; + colors(3) := 'violet'; + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + colors[1] := null; + colors[4] := null; + colors[6] := ''; + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; +end; +/ +call array_interface_p1(); +NOTICE: {red,orange,NULL,NULL,green,blue,indigo,violet} +NOTICE: 8 +NOTICE: {red,orange,NULL,NULL,green,blue,indigo,violet,NULL,NULL,NULL} +NOTICE: 11 + array_interface_p1 +-------------------- + +(1 row) + +-- test array interface exists -- +create or replace procedure array_interface_p2() as +declare + type ta is table of varchar(32) index by varchar; + colors ta; -- array[null,'red','orange',null,'green','','blue',null,'indigo','violet',null] + ind varchar2(32); +begin + colors('1') := null; + colors('2') := 'red'; + colors('3') := 'orange'; + colors('4') := null; + colors('5') := 'green'; + colors('6') := ''; + colors('7') := 'blue'; + colors('8') := null; + colors('9') := 'indigo'; + colors('10') := 'violet'; + colors('11') := null; + raise NOTICE '--------------------colors--------------------------'; + raise NOTICE '%', colors; + ind := colors.first; + raise NOTICE '%', ind; + while colors.exists(ind) loop + raise NOTICE '%:%', ind, colors(ind); + ind := colors.next(ind); + end loop; +end; +/ +call array_interface_p2(); +NOTICE: --------------------colors-------------------------- +NOTICE: {NULL,red,orange,NULL,green,NULL,blue,NULL,indigo,violet,NULL} +NOTICE: 1 +NOTICE: 1: +NOTICE: 10:violet +NOTICE: 11: +NOTICE: 2:red +NOTICE: 3:orange +NOTICE: 4: +NOTICE: 5:green +NOTICE: 6: +NOTICE: 7:blue +NOTICE: 8: +NOTICE: 9:indigo + array_interface_p2 +-------------------- + +(1 row) + +-- test array interface exists -- +create or replace procedure array_interface_p2() as +declare + type ta is table of varchar(32) index by integer; + colors ta; -- array[null,'red','orange',null,'green','','blue',null,'indigo','violet',null] + ind varchar2(32); +begin + colors(5) := null; + colors(-3) := 'orange'; + colors(-5) := null; + colors(-1) := 'green'; + colors(0) := ''; + colors(3) := 'indigo'; + colors(1) := 'blue'; + colors(-2) := null; + colors(2) := null; + colors(-4) := 'red'; + colors(4) := 'violet'; + + raise NOTICE '--------------------colors--------------------------'; + raise NOTICE '%', colors; + ind := colors.first; + raise NOTICE '%', ind; + while colors.exists(ind) loop + raise NOTICE '%:%', ind, colors(ind); + ind := colors.next(ind); + end loop; +end; +/ +call array_interface_p2(); +NOTICE: --------------------colors-------------------------- +NOTICE: {NULL,orange,NULL,green,NULL,indigo,blue,NULL,NULL,red,violet} +NOTICE: -5 +NOTICE: -5: +NOTICE: -4:red +NOTICE: -3:orange +NOTICE: -2: +NOTICE: -1:green +NOTICE: 0: +NOTICE: 1:blue +NOTICE: 2: +NOTICE: 3:indigo +NOTICE: 4:violet +NOTICE: 5: + array_interface_p2 +-------------------- + +(1 row) + +-- test array interface exists -- +create or replace procedure array_interface_p3() as +declare + type ta is table of integer index by varchar; + colors ta; -- array[1,2,'',3,4,null,5,6,7,8,9] + ind varchar2(32); +begin + colors('a') := 1; + colors('ab') := 2; + colors('ba') := ''; + colors('bab') := 3; + colors('bb') := 4; + colors('bc') := null; + colors('ca') := 5; + colors('cb') := 6; + colors('cab') := 7; + colors('cba') := 8; + colors('cbb') := 9; + raise NOTICE '--------------------colors--------------------------'; + raise NOTICE '%', colors; + ind := colors.first; + raise NOTICE '%', ind; + while colors.exists(ind) loop + raise NOTICE '%:%', ind, colors[ind]; + raise NOTICE '%', colors.exists(ind); + ind := colors.next(ind); + end loop; +end; +/ +call array_interface_p3(); +NOTICE: --------------------colors-------------------------- +NOTICE: {1,2,NULL,3,4,NULL,5,6,7,8,9} +NOTICE: a +NOTICE: a:1 +NOTICE: t +NOTICE: ab:2 +NOTICE: t +NOTICE: ba: +NOTICE: t +NOTICE: bab:3 +NOTICE: t +NOTICE: bb:4 +NOTICE: t +NOTICE: bc: +NOTICE: t +NOTICE: ca:5 +NOTICE: t +NOTICE: cab:7 +NOTICE: t +NOTICE: cb:6 +NOTICE: t +NOTICE: cba:8 +NOTICE: t +NOTICE: cbb:9 +NOTICE: t + array_interface_p3 +-------------------- + +(1 row) + +-- test array interface first and last -- +create or replace procedure array_interface_p4() as +declare + type ta is table of varchar(32) index by varchar; + type tb is table of integer index by varchar; + colors1 ta; -- array['red','orange',null,'green','','blue'] + colors2 ta; -- array['red','orange',null,'green','blue',null] + colors3 ta; -- array[null,'red','orange',null,'green','blue'] + colors4 tb; -- array[null,1,2,3,4,null,5,6,7,8,null,''] +begin + colors1('123') := 'red'; + colors1('132') := 'orange'; + colors1('213') := null; + colors1('231') := 'green'; + colors1('312') := ''; + colors1('321') := 'blue'; + raise NOTICE '---------colors1---------'; + raise NOTICE '%', colors1; + raise NOTICE 'colors1 first number: %', colors1.first; + raise NOTICE 'colors1 first: %', colors1[colors1.first]; + raise NOTICE 'colors1 last number: %', colors1.last; + raise NOTICE 'colors1 last: %', colors1[colors1.last]; + + colors2('abc') := 'red'; + colors2('acb') := 'orange'; + colors2('bac') := null; + colors2('bca') := 'green'; + colors2('cab') := 'blue'; + colors2('cba') := null; + raise NOTICE '---------colors2---------'; + raise NOTICE '%', colors2; + raise NOTICE 'colors2 first number: %', colors2.first; + raise NOTICE 'colors2 first: %', colors2[colors2.first]; + raise NOTICE 'colors2 last number: %', colors2.last; + raise NOTICE 'colors2 last: %', colors2[colors2.last]; + + colors3('a1') := null; + colors3('a2') := 'red'; + colors3('b1') := 'orange'; + colors3('ba') := null; + colors3('b2') := 'green'; + colors3('a0') := 'blue'; + raise NOTICE '---------colors3---------'; + raise NOTICE '%', colors3; + raise NOTICE 'colors3 first number: %', colors3.first; + raise NOTICE 'colors3 first: %', colors3[colors3.first]; + raise NOTICE 'colors3 last number: %', colors3.last; + raise NOTICE 'colors3 last: %', colors3[colors3.last]; + + colors4('a312') := null; + colors4('a123') := 1; + colors4('b1') := 2; + colors4('ba') := 3; + colors4('b0') := 4; + colors4('a0') := null; + colors4('b1') := 5; + colors4('bc') := 6; + colors4('bb') := 7; + colors4('c1') := 8; + colors4('ca') := null; + colors4('cb') := ''; + raise NOTICE '---------colors4---------'; + raise NOTICE '%', colors4; + raise NOTICE 'colors4 first number: %', colors4.first; + raise NOTICE 'colors4 first: %', colors4[colors4.first]; + raise NOTICE 'colors4 last number: %', colors4.last; + raise NOTICE 'colors4 last: %', colors4[colors4.last]; +end; +/ +call array_interface_p4(); +NOTICE: ---------colors1--------- +NOTICE: {red,orange,NULL,green,NULL,blue} +NOTICE: colors1 first number: 123 +NOTICE: colors1 first: red +NOTICE: colors1 last number: 321 +NOTICE: colors1 last: blue +NOTICE: ---------colors2--------- +NOTICE: {red,orange,NULL,green,blue,NULL} +NOTICE: colors2 first number: abc +NOTICE: colors2 first: red +NOTICE: colors2 last number: cba +NOTICE: colors2 last: +NOTICE: ---------colors3--------- +NOTICE: {NULL,red,orange,NULL,green,blue} +NOTICE: colors3 first number: a0 +NOTICE: colors3 first: blue +NOTICE: colors3 last number: ba +NOTICE: colors3 last: +NOTICE: ---------colors4--------- +NOTICE: {NULL,1,5,3,4,NULL,6,7,8,NULL,NULL} +NOTICE: colors4 first number: a0 +NOTICE: colors4 first: +NOTICE: colors4 last number: cb +NOTICE: colors4 last: + array_interface_p4 +-------------------- + +(1 row) + +-- test array interface first and last -- +create or replace procedure array_interface_p4() as +declare + type ta is table of varchar(32) index by integer; + colors1 ta; -- array['red','orange',null,'green','','blue'] +begin + colors1(132) := 'orange'; + colors1(321) := 'blue'; + colors1(213) := null; + colors1(123) := 'red'; + colors1(231) := 'green'; + colors1(312) := ''; + + raise NOTICE '---------colors1---------'; + raise NOTICE '%', colors1; + raise NOTICE 'colors1 first number: %', colors1.first; + raise NOTICE 'colors1 first: %', colors1[colors1.first]; + raise NOTICE 'colors1 last number: %', colors1.last; + raise NOTICE 'colors1 last: %', colors1[colors1.last]; +end; +/ +call array_interface_p4(); +NOTICE: ---------colors1--------- +NOTICE: {orange,blue,NULL,red,green,NULL} +NOTICE: colors1 first number: 123 +NOTICE: colors1 first: red +NOTICE: colors1 last number: 321 +NOTICE: colors1 last: blue + array_interface_p4 +-------------------- + +(1 row) + +-- next&prior +create or replace procedure array_interface_p5() as +declare + type ta is table of varchar(32) index by varchar; + type tb is table of integer index by varchar; + colors1 ta; -- array['red','orange',null,'green','blue','','indigo','violet'] + colors2 tb; -- array[1,2,3,null,4,5,6,'',7,8] + ind varchar2(32); + tmp varchar2(32); +begin + colors1('1') := 'red'; + colors1('2') := 'orange'; + colors1('3') := null; + colors1('4') := 'green'; + colors1('5') := 'blue'; + colors1('6') := ''; + colors1('7') := 'indigo'; + colors1('8') := 'violet'; + raise NOTICE '--------------------colors1---------------------'; + raise NOTICE '%', colors1; + ind := colors1.first; + while colors1.exists(ind) loop + raise NOTICE 'current is: %', colors1[ind]; + raise NOTICE 'next index is: %', colors1.next(ind); + tmp := colors1.next(ind); + if tmp is null then + raise NOTICE 'next element is: %', tmp; + else + raise NOTICE 'next element is: %', colors1[tmp]; + end if; + raise NOTICE 'prior index is: %', colors1.prior(ind); + tmp := colors1.prior(ind); + if tmp is null then + raise NOTICE 'prior element is: %', tmp; + else + raise NOTICE 'prior element is: %', colors1[tmp]; + end if; + raise NOTICE '-------slash-------'; + ind := colors1.next(ind); + end loop; + + colors1('a') := 1; + colors1('b') := 2; + colors1('c') := 3; + colors1('d') := null; + colors1('e') := 4; + colors1('f') := 5; + colors1('g') := 6; + colors1('h') := ''; + colors1('i') := 7; + colors1('j') := 8; + raise NOTICE '--------------------colors2---------------------'; + raise NOTICE '%', colors2; + ind := colors1.first; + while colors1.exists(ind) loop + raise NOTICE 'current is: %', colors2[ind]; + raise NOTICE 'next index is: %', colors2.next(ind); + raise NOTICE 'next element is: %', colors2[colors2.next(ind)]; + raise NOTICE 'prior index is: %', colors2.prior(ind); + raise NOTICE 'prior element is: %', colors2[colors2.prior(ind)]; + raise NOTICE '-----------'; + ind := colors1.next(ind); + end loop; +end; +/ +call array_interface_p5(); +NOTICE: --------------------colors1--------------------- +NOTICE: {red,orange,NULL,green,blue,NULL,indigo,violet} +NOTICE: current is: red +NOTICE: next index is: 2 +NOTICE: next element is: orange +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: -------slash------- +NOTICE: current is: orange +NOTICE: next index is: 3 +NOTICE: next element is: +NOTICE: prior index is: 1 +NOTICE: prior element is: red +NOTICE: -------slash------- +NOTICE: current is: +NOTICE: next index is: 4 +NOTICE: next element is: green +NOTICE: prior index is: 2 +NOTICE: prior element is: orange +NOTICE: -------slash------- +NOTICE: current is: green +NOTICE: next index is: 5 +NOTICE: next element is: blue +NOTICE: prior index is: 3 +NOTICE: prior element is: +NOTICE: -------slash------- +NOTICE: current is: blue +NOTICE: next index is: 6 +NOTICE: next element is: +NOTICE: prior index is: 4 +NOTICE: prior element is: green +NOTICE: -------slash------- +NOTICE: current is: +NOTICE: next index is: 7 +NOTICE: next element is: indigo +NOTICE: prior index is: 5 +NOTICE: prior element is: blue +NOTICE: -------slash------- +NOTICE: current is: indigo +NOTICE: next index is: 8 +NOTICE: next element is: violet +NOTICE: prior index is: 6 +NOTICE: prior element is: +NOTICE: -------slash------- +NOTICE: current is: violet +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: 7 +NOTICE: prior element is: indigo +NOTICE: -------slash------- +NOTICE: --------------------colors2--------------------- +NOTICE: +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- +NOTICE: current is: +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: ----------- + array_interface_p5 +-------------------- + +(1 row) + +create or replace procedure array_interface_p5() as +declare + type ta is table of varchar(32) index by integer; + colors1 ta; -- array['red','orange',null,'green','blue','','indigo','violet'] + ind varchar2(32); + tmp varchar2(32); +begin + colors1(-15) := 'red'; + colors1(-8) := 'orange'; + colors1(-1) := null; + colors1(0) := 'green'; + colors1(10) := 'blue'; + colors1(24) := ''; + colors1(45) := 'indigo'; + colors1(50) := 'violet'; + raise NOTICE '--------------------colors1---------------------'; + raise NOTICE '%', colors1; + ind := colors1.first; + while colors1.exists(ind) loop + raise NOTICE 'current is: %', colors1[ind]; + raise NOTICE 'next index is: %', colors1.next(ind); + tmp := colors1.next(ind); + if tmp is null then + raise NOTICE 'next element is: %', tmp; + else + raise NOTICE 'next element is: %', colors1[tmp]; + end if; + raise NOTICE 'prior index is: %', colors1.prior(ind); + tmp := colors1.prior(ind); + if tmp is null then + raise NOTICE 'prior element is: %', tmp; + else + raise NOTICE 'prior element is: %', colors1[tmp]; + end if; + raise NOTICE '-------slash-------'; + ind := colors1.next(ind); + end loop; +end; +/ +call array_interface_p5(); +NOTICE: --------------------colors1--------------------- +NOTICE: {red,orange,NULL,green,blue,NULL,indigo,violet} +NOTICE: current is: red +NOTICE: next index is: -8 +NOTICE: next element is: orange +NOTICE: prior index is: +NOTICE: prior element is: +NOTICE: -------slash------- +NOTICE: current is: orange +NOTICE: next index is: -1 +NOTICE: next element is: +NOTICE: prior index is: -15 +NOTICE: prior element is: red +NOTICE: -------slash------- +NOTICE: current is: +NOTICE: next index is: 0 +NOTICE: next element is: green +NOTICE: prior index is: -8 +NOTICE: prior element is: orange +NOTICE: -------slash------- +NOTICE: current is: green +NOTICE: next index is: 10 +NOTICE: next element is: blue +NOTICE: prior index is: -1 +NOTICE: prior element is: +NOTICE: -------slash------- +NOTICE: current is: blue +NOTICE: next index is: 24 +NOTICE: next element is: +NOTICE: prior index is: 0 +NOTICE: prior element is: green +NOTICE: -------slash------- +NOTICE: current is: +NOTICE: next index is: 45 +NOTICE: next element is: indigo +NOTICE: prior index is: 10 +NOTICE: prior element is: blue +NOTICE: -------slash------- +NOTICE: current is: indigo +NOTICE: next index is: 50 +NOTICE: next element is: violet +NOTICE: prior index is: 24 +NOTICE: prior element is: +NOTICE: -------slash------- +NOTICE: current is: violet +NOTICE: next index is: +NOTICE: next element is: +NOTICE: prior index is: 45 +NOTICE: prior element is: indigo +NOTICE: -------slash------- + array_interface_p5 +-------------------- + +(1 row) + +-- test empty array exists interface return +create or replace procedure array_interface_p6() as +declare + type ta is table of varchar(32) index by varchar; + type tb is table of integer index by varchar; + colors1 ta := array[]::varchar[]; + colors2 tb := array[]::integer[]; + vi varchar2(32); +begin + raise NOTICE 'colors1 is %', colors1; + raise NOTICE 'colors1 length is %', colors1.count; + raise NOTICE 'colors1 first is %', colors1.first; + raise NOTICE 'colors1 last is %', colors1.last; + raise NOTICE 'colors2 is %', colors2; + raise NOTICE 'colors2 length is %', colors2.count; + raise NOTICE 'colors2 first is %', colors2.first; + raise NOTICE 'colors2 last is %', colors2.last; + vi := 111; + raise NOTICE 'colors1[%] exists return %', vi, colors1.exists(vi); + vi := '1'; + raise NOTICE 'colors1["%"] exists return %', vi, colors1.exists(vi); + vi := 123432; + raise NOTICE 'colors2[%] exists return %', vi, colors2.exists(vi); + vi := '43243442'; + raise NOTICE 'colors2["%"] exists return %', vi, colors2.exists(vi); +end; +/ +call array_interface_p6(); +NOTICE: colors1 is {} +NOTICE: colors1 length is 0 +NOTICE: colors1 first is +NOTICE: colors1 last is +NOTICE: colors2 is {} +NOTICE: colors2 length is 0 +NOTICE: colors2 first is +NOTICE: colors2 last is +NOTICE: colors1[111] exists return f +NOTICE: colors1["1"] exists return f +NOTICE: colors2[123432] exists return f +NOTICE: colors2["43243442"] exists return f + array_interface_p6 +-------------------- + +(1 row) + +-- test empty array exists interface return +create or replace procedure array_interface_p6() as +declare + type ta is table of varchar(32) index by integer; + type tb is table of integer index by integer; + colors1 ta := array[]::varchar[]; + colors2 tb := array[]::integer[]; + vi varchar2(32); +begin + raise NOTICE 'colors1 is %', colors1; + raise NOTICE 'colors1 length is %', colors1.count; + raise NOTICE 'colors1 first is %', colors1.first; + raise NOTICE 'colors1 last is %', colors1.last; + raise NOTICE 'colors2 is %', colors2; + raise NOTICE 'colors2 length is %', colors2.count; + raise NOTICE 'colors2 first is %', colors2.first; + raise NOTICE 'colors2 last is %', colors2.last; + vi := 111; + raise NOTICE 'colors1[%] exists return %', vi, colors1.exists(vi); + vi := '1'; + raise NOTICE 'colors1["%"] exists return %', vi, colors1.exists(vi); + vi := 123432; + raise NOTICE 'colors2[%] exists return %', vi, colors2.exists(vi); + vi := '43243442'; + raise NOTICE 'colors2["%"] exists return %', vi, colors2.exists(vi); +end; +/ +call array_interface_p6(); +NOTICE: colors1 is {} +NOTICE: colors1 length is 0 +NOTICE: colors1 first is +NOTICE: colors1 last is +NOTICE: colors2 is {} +NOTICE: colors2 length is 0 +NOTICE: colors2 first is +NOTICE: colors2 last is +NOTICE: colors1[111] exists return f +NOTICE: colors1["1"] exists return f +NOTICE: colors2[123432] exists return f +NOTICE: colors2["43243442"] exists return f + array_interface_p6 +-------------------- + +(1 row) + +-- test array exists interface A.B input parameter +create or replace procedure array_interface_p7() as +declare + type ta is table of varchar(32) index by varchar; + v_a ta := array[]::varchar2[]; +begin + raise NOTICE 'v_a is %', v_a; + for rec in (select generate_series(1,10) x) loop + if v_a.exists(rec.x) then + raise NOTICE 'v_a[%] is exist', rec.x; + else + raise NOTICE 'v_a[%] is not exist', rec.x; + end if; + end loop; + for i in 1 .. 10 loop + v_a(i) := i; + end loop; + raise NOTICE 'v_a is %', v_a; + for rec in (select generate_series(1,10) x) loop + if v_a.exists(rec.x) then + raise NOTICE 'v_a[%] is exist', rec.x; + else + raise NOTICE 'v_a[%] is not exist', rec.x; + end if; + end loop; +end; +/ +call array_interface_p7(); +NOTICE: v_a is {} +NOTICE: v_a[1] is not exist +NOTICE: v_a[2] is not exist +NOTICE: v_a[3] is not exist +NOTICE: v_a[4] is not exist +NOTICE: v_a[5] is not exist +NOTICE: v_a[6] is not exist +NOTICE: v_a[7] is not exist +NOTICE: v_a[8] is not exist +NOTICE: v_a[9] is not exist +NOTICE: v_a[10] is not exist +NOTICE: v_a is {1,2,3,4,5,6,7,8,9,10} +NOTICE: v_a[1] is exist +NOTICE: v_a[2] is exist +NOTICE: v_a[3] is exist +NOTICE: v_a[4] is exist +NOTICE: v_a[5] is exist +NOTICE: v_a[6] is exist +NOTICE: v_a[7] is exist +NOTICE: v_a[8] is exist +NOTICE: v_a[9] is exist +NOTICE: v_a[10] is exist + array_interface_p7 +-------------------- + +(1 row) + +-- test array exists interface A.B input parameter +create or replace procedure array_interface_p7() as +declare + type ta is table of varchar(32) index by integer; + v_a ta := array[]::varchar2[]; +begin + raise NOTICE 'v_a is %', v_a; + for rec in (select generate_series(1,10) x) loop + if v_a.exists(rec.x) then + raise NOTICE 'v_a[%] is exist', rec.x; + else + raise NOTICE 'v_a[%] is not exist', rec.x; + end if; + end loop; + for i in 1 .. 10 loop + v_a(i) := i; + end loop; + raise NOTICE 'v_a is %', v_a; + for rec in (select generate_series(1,10) x) loop + if v_a.exists(rec.x) then + raise NOTICE 'v_a[%] is exist', rec.x; + else + raise NOTICE 'v_a[%] is not exist', rec.x; + end if; + end loop; +end; +/ +call array_interface_p7(); +NOTICE: v_a is {} +NOTICE: v_a[1] is not exist +NOTICE: v_a[2] is not exist +NOTICE: v_a[3] is not exist +NOTICE: v_a[4] is not exist +NOTICE: v_a[5] is not exist +NOTICE: v_a[6] is not exist +NOTICE: v_a[7] is not exist +NOTICE: v_a[8] is not exist +NOTICE: v_a[9] is not exist +NOTICE: v_a[10] is not exist +NOTICE: v_a is {1,2,3,4,5,6,7,8,9,10} +NOTICE: v_a[1] is exist +NOTICE: v_a[2] is exist +NOTICE: v_a[3] is exist +NOTICE: v_a[4] is exist +NOTICE: v_a[5] is exist +NOTICE: v_a[6] is exist +NOTICE: v_a[7] is exist +NOTICE: v_a[8] is exist +NOTICE: v_a[9] is exist +NOTICE: v_a[10] is exist + array_interface_p7 +-------------------- + +(1 row) + +create or replace procedure array_interface_p8() as +declare + type ta is table of varchar(32) index by varchar; + colors ta; +begin + -- colors := array['red','orange','yellow','green','blue','indigo','violet','c8','c9','c10','c11','c12','c13','c14','c15']; + colors('0') := 'red'; + colors('1') := 'orange'; + colors('2') := 'yellow'; + colors('3') := 'green'; + colors('4') := 'blue'; + colors('5') := 'indigo'; + colors('6') := 'violet'; + colors('7') := 'c8'; + colors('8') := 'c9'; + colors('9') := 'c10'; + colors('10') := 'c11'; + colors('11') := 'c12'; + colors('12') := 'c13'; + colors('13') := 'c14'; + colors('14') := 'c15'; + if colors.exists(1+1) then + raise NOTICE 'array exist, element is %', colors[1+1]; + else + raise NOTICE 'array not exist'; + end if; + if colors.exists('1' || '2') then + raise NOTICE 'array exist, element is %', colors['1'||'2']; + else + raise NOTICE 'array not exist'; + end if; +end; +/ +call array_interface_p8(); +NOTICE: array exist, element is yellow +NOTICE: array exist, element is c13 + array_interface_p8 +-------------------- + +(1 row) + +create or replace procedure array_interface_p8() as +declare + type ta is table of varchar(32) index by integer; + colors ta; +begin + -- colors := array['red','orange','yellow','green','blue','indigo','violet','c8','c9','c10','c11','c12','c13','c14','c15']; + colors(0) := 'red'; + colors(1) := 'orange'; + colors(2) := 'yellow'; + colors(3) := 'green'; + colors(4) := 'blue'; + colors(5) := 'indigo'; + colors(6) := 'violet'; + colors(7) := 'c8'; + colors(8) := 'c9'; + colors(9) := 'c10'; + colors(10) := 'c11'; + colors(11) := 'c12'; + colors(12) := 'c13'; + colors(13) := 'c14'; + colors(14) := 'c15'; + if colors.exists(1+1) then + raise NOTICE 'array exist, element is %', colors[1+1]; + else + raise NOTICE 'array not exist'; + end if; + if colors.exists('1' || '2') then + raise NOTICE 'array exist, element is %', colors['1'||'2']; + else + raise NOTICE 'array not exist'; + end if; +end; +/ +call array_interface_p8(); +NOTICE: array exist, element is yellow +NOTICE: array exist, element is c13 + array_interface_p8 +-------------------- + +(1 row) + +create or replace procedure array_interface_p9() as +declare + type ta is table of varchar(32) index by varchar; + colors ta; +begin + -- colors := array['red','orange','yellow','green','blue','indigo','violet']; + colors('1') := 'red'; + colors('2') := 'orange'; + colors('3') := 'yellow'; + colors('4') := 'green'; + colors('5') := 'blue'; + colors('6') := 'indigo'; + colors('7') := 'violet'; + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + raise NOTICE '%', colors.count(); + raise NOTICE '%', colors.first; + raise NOTICE '%', colors.first(); + raise NOTICE '%', colors.last; + raise NOTICE '%', colors.last(); + for i in colors.first .. colors.last loop + raise NOTICE '%', colors[i]; + end loop; + for i in 1 .. colors.count loop + raise NOTICE '%', colors[i]; + end loop; + for i in colors.first() .. colors.last() loop + raise NOTICE '%', colors[i]; + end loop; + for i in 1 .. colors.count() loop + raise NOTICE '%', colors[i]; + end loop; + colors.delete('7'); + raise NOTICE '%', colors; + colors.delete('1'); + raise NOTICE '%', colors; + colors.delete('13424'); + raise NOTICE '%', colors; + colors.delete(); + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + raise NOTICE '%', colors.first; + raise NOTICE '%', colors.last; + raise NOTICE '%', colors.next('1'); + raise NOTICE '%', colors.prior('1'); + raise NOTICE '%', colors; + colors.delete('1'); + raise NOTICE '%', colors; +end; +/ +call array_interface_p9(); +NOTICE: {red,orange,yellow,green,blue,indigo,violet} +NOTICE: 7 +NOTICE: 7 +NOTICE: 1 +NOTICE: 1 +NOTICE: 7 +NOTICE: 7 +NOTICE: red +NOTICE: orange +NOTICE: yellow +NOTICE: green +NOTICE: blue +NOTICE: indigo +NOTICE: violet +NOTICE: red +NOTICE: orange +NOTICE: yellow +NOTICE: green +NOTICE: blue +NOTICE: indigo +NOTICE: violet +NOTICE: red +NOTICE: orange +NOTICE: yellow +NOTICE: green +NOTICE: blue +NOTICE: indigo +NOTICE: violet +NOTICE: red +NOTICE: orange +NOTICE: yellow +NOTICE: green +NOTICE: blue +NOTICE: indigo +NOTICE: violet +NOTICE: {red,orange,yellow,green,blue,indigo} +NOTICE: {orange,yellow,green,blue,indigo} +NOTICE: {orange,yellow,green,blue,indigo} +NOTICE: {} +NOTICE: 0 +NOTICE: +NOTICE: +NOTICE: +NOTICE: +NOTICE: {} +NOTICE: {} + array_interface_p9 +-------------------- + +(1 row) + +create or replace procedure array_interface_p9() as +declare + type ta is table of varchar(32) index by integer; + colors ta; +begin + -- colors := array['red','orange','yellow','green','blue','indigo','violet']; + colors(1) := 'red'; + colors(2) := 'orange'; + colors(3) := 'yellow'; + colors(4) := 'green'; + colors(5) := 'blue'; + colors(6) := 'indigo'; + colors(7) := 'violet'; + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + raise NOTICE '%', colors.count(); + raise NOTICE '%', colors.first; + raise NOTICE '%', colors.first(); + raise NOTICE '%', colors.last; + raise NOTICE '%', colors.last(); + for i in colors.first .. colors.last loop + raise NOTICE '%', colors[i]; + end loop; + for i in 1 .. colors.count loop + raise NOTICE '%', colors[i]; + end loop; + for i in colors.first() .. colors.last() loop + raise NOTICE '%', colors[i]; + end loop; + for i in 1 .. colors.count() loop + raise NOTICE '%', colors[i]; + end loop; + colors.delete(7); + raise NOTICE '%', colors; + colors.delete(1); + raise NOTICE '%', colors; + colors.delete(13424); + raise NOTICE '%', colors; + colors.delete(); + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + raise NOTICE '%', colors.first; + raise NOTICE '%', colors.last; + raise NOTICE '%', colors.next(1); + raise NOTICE '%', colors.prior(1); + raise NOTICE '%', colors; + colors.delete(1); + raise NOTICE '%', colors; +end; +/ +call array_interface_p9(); +NOTICE: {red,orange,yellow,green,blue,indigo,violet} +NOTICE: 7 +NOTICE: 7 +NOTICE: 1 +NOTICE: 1 +NOTICE: 7 +NOTICE: 7 +NOTICE: red +NOTICE: orange +NOTICE: yellow +NOTICE: green +NOTICE: blue +NOTICE: indigo +NOTICE: violet +NOTICE: red +NOTICE: orange +NOTICE: yellow +NOTICE: green +NOTICE: blue +NOTICE: indigo +NOTICE: violet +NOTICE: red +NOTICE: orange +NOTICE: yellow +NOTICE: green +NOTICE: blue +NOTICE: indigo +NOTICE: violet +NOTICE: red +NOTICE: orange +NOTICE: yellow +NOTICE: green +NOTICE: blue +NOTICE: indigo +NOTICE: violet +NOTICE: {red,orange,yellow,green,blue,indigo} +NOTICE: {orange,yellow,green,blue,indigo} +NOTICE: {orange,yellow,green,blue,indigo} +NOTICE: {} +NOTICE: 0 +NOTICE: +NOTICE: +NOTICE: +NOTICE: +NOTICE: {} +NOTICE: {} + array_interface_p9 +-------------------- + +(1 row) + +declare +type ta is table of varchar2(10) index by varchar2; +va ta; +var varchar(10); +begin +va('a1') = 'a'; +va('a2') = 'b'; +va('a3') = 'c'; +va('aaa') = 'd'; +var = 'a'; +raise notice '%' , va.exists('a'||'2'); +raise notice '%' , va.exists('a'||'4'); +if va.exists('a'|| var ||'a') then +raise NOTICE 'aaa exists'; +else +raise NOTICE 'not exists'; +end if; +raise notice '%' , va.next('a'||'2'); +raise notice '%' , va.prior('a'||'2'); +raise notice '%' , va(va.prior('a'||'2')); +raise notice '%' , va(va.first()); +raise notice '%' , va(va.last()); +end; +/ +NOTICE: t +NOTICE: f +NOTICE: aaa exists +NOTICE: a3 +NOTICE: a1 +NOTICE: a +NOTICE: a +NOTICE: d +declare +type ta is table of varchar2(10) index by integer; +va ta; +var varchar(10); +begin +va(11) = 'a'; +va(12) = 'b'; +va(13) = 'c'; +va('111') = 'd'; +var = '1'; +raise notice '%' , va.exists('1'||'2'); +raise notice '%' , va.exists('1'||'4'); +if va.exists('1'|| var ||'1') then +raise NOTICE '111 exists'; +else +raise NOTICE 'not exists'; +end if; +raise notice '%' , va.next('1'||'2'); +raise notice '%' , va.prior('1'||'2'); +raise notice '%' , va(va.prior('1'||'2')); +raise notice '%' , va(va.first()); +raise notice '%' , va(va.last()); +end; +/ +NOTICE: t +NOTICE: f +NOTICE: 111 exists +NOTICE: 13 +NOTICE: 11 +NOTICE: a +NOTICE: a +NOTICE: d +declare + type t_arr is table of number index by varchar2(20); + v_arr t_arr; +begin + if v_arr.exists('1'||'12')=false then + raise info 'not exists'; + end if; +end; +/ +INFO: not exists +declare + type t_arr is table of number index by integer; + v_arr t_arr; +begin + if v_arr.exists(12)=false then + raise info 'not exists'; + end if; +end; +/ +INFO: not exists +declare + type t_arr is varray(10) of number; + v_arr t_arr; +begin + if v_arr.exists(12)=false then + raise info 'not exists'; + end if; +end; +/ +INFO: not exists +create or replace procedure indexbychar1() +as +type ta is table of varchar2(10) index by varchar2; +va ta; +var varchar(10); +begin +va('a1') = 'a'; +va('a2') = 'b'; +va('a3') = 'c'; +va('aaa') = 'd'; +var = 'a'; +raise notice '%' , va.exists('a'||'2'); +raise notice '%' , va.exists('a'||'4'); +if va.exists('a'|| var ||'a') then +raise NOTICE 'aaa exists'; +else +raise NOTICE 'not exists'; +end if; +raise notice '%' , va.next('a'||'2'); +raise notice '%' , va.prior('a'||'2'); +raise notice '%' , va(va.prior('a'||'2')); +raise notice '%' , va(va.first()); +raise notice '%' , va(va.last()); +raise notice '%' , va.count; +va.delete(); +raise notice '%' , va.count; +raise notice '%' , va(va.last()); +exception when others then +raise notice 'delete all the array items'; +for i in 1..10 loop +va(i) := 'va'||(i::varchar(2)); +END LOOP; +raise notice '%', va.COUNT; +raise notice '%', va(1); +raise notice '%', va(10); +raise notice 'first%', va.FIRST; +raise notice 'last%', va.LAST; +raise notice '%', va; +raise notice '%', va(va.LAST); +raise notice '%', va(va.NEXT(va.FIRST)); +raise notice '%', va(va.PRIOR(va.LAST)); +end; +/ +call indexbychar1(); +NOTICE: t +NOTICE: f +NOTICE: aaa exists +NOTICE: a3 +NOTICE: a1 +NOTICE: a +NOTICE: a +NOTICE: d +NOTICE: 4 +NOTICE: 0 +NOTICE: delete all the array items +NOTICE: 10 +NOTICE: va1 +NOTICE: va10 +NOTICE: first1 +NOTICE: last9 +NOTICE: {va1,va2,va3,va4,va5,va6,va7,va8,va9,va10} +NOTICE: va9 +NOTICE: va10 +NOTICE: va8 + indexbychar1 +-------------- + +(1 row) + +create table pkgtbl085 (c1 int,c2 number,c3 varchar2(30),c4 clob,c5 text,c6 blob); +insert into pkgtbl085 values(1,1,'var1','clob1','text1','bb1'); +insert into pkgtbl085 values(2,2,'var2','clob2','text2','bb2'); +insert into pkgtbl085 values(3,3,'var3','clob3','text3','bb3'); +--I2.table of index by varchar2(20) +create or replace package pkg085 +as +type ty1 is table of varchar2(20) index by varchar2(20); +type ty2 is record (c1 number,c2 pkgtbl085%rowtype); +procedure p1(); +end pkg085; +/ +create or replace package body pkg085 +as +procedure p1() +is +va ty2; +vb ty1; +numcount int; +begin +for i in 1..3 loop +select c3 into va.c2.c3 from pkgtbl085 where c1=i; +if va.c2.c3 is not null then +vb(va.c2.c3)=va.c2.c3; +end if; +end loop; +raise info 'vb is %',vb; +raise info 'vb.count is %',vb.count; +raise info 'vb(va.c2,c3) is %',vb(va.c2.c3); +raise info 'va.c2.c3 is %',va.c2.c3; +raise info 'vb.prior(va.c2.c3) is %',vb.prior(va.c2.c3); +raise info 'vb.prior(var3) is %',vb.prior('var3'); +va.c2.c3='var1'; +raise info 'vb.next(va.c2.c3) is %',vb.next(va.c2.c3); +raise info 'vb.exists(va.c2.c3) is %',vb.exists(va.c2.c3); +if vb.exists(va.c2.c3) then +raise notice 'true'; +end if; +end; +end pkg085; +/ +call pkg085.p1(); +INFO: vb is {var1,var2,var3} +INFO: vb.count is 3 +INFO: vb(va.c2,c3) is var3 +INFO: va.c2.c3 is var3 +INFO: vb.prior(va.c2.c3) is var2 +INFO: vb.prior(var3) is var2 +INFO: vb.next(va.c2.c3) is var2 +INFO: vb.exists(va.c2.c3) is t +NOTICE: true + p1 +---- + +(1 row) + +drop package pkg085; +NOTICE: drop cascades to function plpgsql_array_interface_indexby.p1() +drop table pkgtbl085; +create or replace procedure array_interface_p10() as +declare + type ta is table of varchar(32) index by varchar; + c1 ta; + c2 ta; +begin + if c1.first = c2.first then + null; + end if; +end; +/ +ERROR: do not support more than 2 table of index by variables call functions in an expr at or near "c2" +LINE 7: if c1.first = c2.first then + ^ +QUERY: +declare + type ta is table of varchar(32) index by varchar; + c1 ta; + c2 ta; +begin + if c1.first = c2.first then + null; + end if; +end +create or replace procedure array_interface_p10() as +declare + type ta is table of varchar(32) index by varchar; + c1 ta; + c2 ta; +begin + raise info '%', c1.next(c2.first); +end; +/ +ERROR: do not support more than 2 table of index by variables call functions in an expr at or near "c2" +LINE 7: raise info '%', c1.next(c2.first); + ^ +QUERY: +declare + type ta is table of varchar(32) index by varchar; + c1 ta; + c2 ta; +begin + raise info '%', c1.next(c2.first); +end +create or replace procedure tableof_delete_1() +is +type ty4 is table of integer index by varchar; +pv4 ty4; +begin +pv4('1'):=2; +pv4('-1'):=-1; +pv4.delete('-1'); +raise info '%', pv4('1'); +raise info '%', pv4('-1'); +raise info '%', pv4; +end; +/ +call tableof_delete_1(); +INFO: 2 +INFO: +INFO: {2} + tableof_delete_1 +------------------ + +(1 row) + +create or replace procedure tableof_delete_2() +is +type ty4 is table of integer index by integer; +pv4 ty4; +begin +pv4(1):=2; +pv4(-1):=-1; +pv4.delete(-1); +raise info '%', pv4(1); +raise info '%', pv4(-1); +raise info '%', pv4; +end; +/ +call tableof_delete_2(); +INFO: 2 +INFO: +INFO: {2} + tableof_delete_2 +------------------ + +(1 row) + +create or replace procedure tableof_delete_3() +is +type ty4 is varray(10) of integer; +pv4 ty4; +begin +pv4(1):=2; +pv4(-1):=-1; +pv4(-2):=-2; +raise info '%', pv4; +pv4.delete(-1); +raise info '%', pv4(1); +raise info '%', pv4(-1); +raise info '%', pv4; +end; +/ +call tableof_delete_3(); +INFO: [-2:1]={-2,-1,NULL,2} +INFO: +INFO: +INFO: [-2:0]={-2,NULL,2} + tableof_delete_3 +------------------ + +(1 row) + +create or replace procedure tableof_delete_4() +is +type ty4 is varray(10) of integer; +pv4 ty4; +begin +pv4(4):=2; +pv4(3):=-1; +pv4(2):=-2; +raise info '%', pv4; +pv4.delete(3); +raise info '%', pv4(4); +raise info '%', pv4; +end; +/ +call tableof_delete_4(); +INFO: [2:4]={-2,-1,2} +INFO: +INFO: [2:3]={-2,2} + tableof_delete_4 +------------------ + +(1 row) + +create or replace procedure tableof_delete_5() +is +type ty4 is varray(10) of integer; +pv4 ty4; +a integer; +begin +a = 1; +pv4(1):=2; +pv4(-1):=-1; +pv4(-2):=-2; +raise info '%', pv4; +pv4.delete(-a); +raise info '%', pv4(1); +raise info '%', pv4(-1); +raise info '%', pv4; +end; +/ +call tableof_delete_5(); +INFO: [-2:1]={-2,-1,NULL,2} +INFO: +INFO: +INFO: [-2:0]={-2,NULL,2} + tableof_delete_5 +------------------ + +(1 row) + +-- clean up -- +drop schema if exists plpgsql_array_interface_indexby cascade; +NOTICE: drop cascades to 15 other objects +DETAIL: drop cascades to function array_interface_p1() +drop cascades to function array_interface_p2() +drop cascades to function array_interface_p3() +drop cascades to function array_interface_p4() +drop cascades to function array_interface_p5() +drop cascades to function array_interface_p6() +drop cascades to function array_interface_p7() +drop cascades to function array_interface_p8() +drop cascades to function array_interface_p9() +drop cascades to function indexbychar1() +drop cascades to function tableof_delete_1() +drop cascades to function tableof_delete_2() +drop cascades to function tableof_delete_3() +drop cascades to function tableof_delete_4() +drop cascades to function tableof_delete_5() diff --git a/src/test/regress/expected/arrayinterface_single.out b/src/test/regress/expected/arrayinterface_single.out new file mode 100644 index 000000000..ce8c7e60f --- /dev/null +++ b/src/test/regress/expected/arrayinterface_single.out @@ -0,0 +1,922 @@ +-- FOR VARRAY INTERFACE -- + +-- check compatibility -- +show sql_compatibility; -- expect A -- + sql_compatibility +------------------- + A +(1 row) + + +-- create new schema -- +drop schema if exists plpgsql_array_interface; +NOTICE: schema "plpgsql_array_interface" does not exist, skipping +create schema plpgsql_array_interface; +set current_schema = plpgsql_array_interface; + +-- test array interface count -- +create or replace procedure array_interface_p1() +as +declare + colors varchar[] := array['red','orange',null,'','green','blue','indigo','violet']; + colors1 int[] := array[1,2,null,3,'',4,null,5,6,null,null,7,8]; + colors2 varchar[] := array['red','orange','null','green','blue',null,'indigo','violet']; +begin + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + raise NOTICE '%', colors2; + raise NOTICE '%', colors2.count; + raise NOTICE '%', colors1; + raise NOTICE '%', colors1.count; +end; +/ + +call array_interface_p1(); +NOTICE: {red,orange,NULL,NULL,green,blue,indigo,violet} +NOTICE: 8 +NOTICE: {red,orange,"null",green,blue,NULL,indigo,violet} +NOTICE: 8 +NOTICE: {1,2,NULL,3,NULL,4,NULL,5,6,NULL,NULL,7,8} +NOTICE: 13 + array_interface_p1 +-------------------- + +(1 row) + + +-- test array interface count -- +create or replace procedure array_interface_p2() +as +declare + colors varchar[] := array['red','orange','green','blue','indigo','violet']; + colors1 int[] := array[1,2,3,4,5,6,7,8]; +begin + raise NOTICE '%', colors; + colors[1] := null; + colors[4] := null; + colors[6] := ''; + raise NOTICE '%', colors; + raise NOTICE '%', colors1; + colors1[1] := null; + colors1[4] := null; + colors1[6] := ''; + raise NOTICE '%', colors1; +end; +/ + +call array_interface_p2(); +NOTICE: {red,orange,green,blue,indigo,violet} +NOTICE: {NULL,orange,green,NULL,indigo,NULL} +NOTICE: {1,2,3,4,5,6,7,8} +NOTICE: {NULL,2,3,NULL,5,NULL,7,8} + array_interface_p2 +-------------------- + +(1 row) + + +-- test array interface exists -- +create or replace procedure array_interface_p3() +as +declare + colors varchar[] := array[null,'red','orange',null,'green','','blue',null,'indigo','violet',null]; + ind int := 1; + colors1 varchar[] := array['null','red','orange',null,'green','blue',null,'indigo','violet',null]; +begin + raise NOTICE '--------------------colors--------------------------'; + raise NOTICE '%', colors; + for ind in 1..colors.last + loop + raise NOTICE '%', colors[ind]; + raise NOTICE '%', colors.exists(ind); + if colors.exists(ind) then + raise NOTICE ' exists'; + else + raise NOTICE ' not exists'; + end if; + raise NOTICE '----------------'; + end loop; + + raise NOTICE '--------------------colors1--------------------------'; + raise NOTICE '%', colors1; + for ind in 1 .. colors1.last + loop + raise NOTICE '%', colors1[ind]; + raise NOTICE '%', colors1.exists(ind); + if colors1.exists(ind) then + raise NOTICE ' exists'; + else + raise NOTICE ' not exists'; + end if; + raise NOTICE '----------------'; + end loop; +end; +/ + +call array_interface_p3(); +NOTICE: --------------------colors-------------------------- +NOTICE: {NULL,red,orange,NULL,green,NULL,blue,NULL,indigo,violet,NULL} +NOTICE: +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: red +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: orange +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: green +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: blue +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: indigo +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: violet +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: --------------------colors1-------------------------- +NOTICE: {"null",red,orange,NULL,green,blue,NULL,indigo,violet,NULL} +NOTICE: null +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: red +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: orange +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: green +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: blue +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: indigo +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: violet +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: +NOTICE: t +NOTICE: exists +NOTICE: ---------------- + array_interface_p3 +-------------------- + +(1 row) + + +-- test array interface exists -- +create or replace procedure array_interface_p4() +as +declare + colors int[] := array[1,2,'',3,4,null,5,6,7,8]; + ind int := 1; + colors1 int[] := array[null,1,2,3,4,null,5,6,'',7,8,null]; +begin + raise NOTICE '--------------------colors--------------------------'; + raise NOTICE '%', colors; + for ind in 1 .. colors.last + loop + raise NOTICE '%', colors[ind]; + raise NOTICE '%', colors.exists(ind); + if colors.exists(ind) then + raise NOTICE ' exists'; + else + raise NOTICE ' not exists'; + end if; + raise NOTICE '----------------'; + end loop; + + raise NOTICE '--------------------colors1--------------------------'; + raise NOTICE '%', colors1; + for ind in 1 .. colors1.last + loop + raise NOTICE '%', colors1[ind]; + raise NOTICE '%', colors1.exists(ind); + if colors1.exists(ind) then + raise NOTICE ' exists'; + else + raise NOTICE ' not exists'; + end if; + raise NOTICE '----------------'; + end loop; +end; +/ + +call array_interface_p4(); +NOTICE: --------------------colors-------------------------- +NOTICE: {1,2,NULL,3,4,NULL,5,6,7,8} +NOTICE: 1 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: 2 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: 3 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: 4 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: 5 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: 6 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: 7 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: 8 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: --------------------colors1-------------------------- +NOTICE: {NULL,1,2,3,4,NULL,5,6,NULL,7,8,NULL} +NOTICE: +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: 1 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: 2 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: 3 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: 4 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: 5 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: 6 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: 7 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: 8 +NOTICE: t +NOTICE: exists +NOTICE: ---------------- +NOTICE: +NOTICE: t +NOTICE: exists +NOTICE: ---------------- + array_interface_p4 +-------------------- + +(1 row) + + +-- test array interface first and last -- +create or replace procedure array_interface_p5() +as +declare + colors1 varchar[] := array['red','orange',null,'green','','blue']; + colors2 varchar[] := array['red','orange',null,'green','blue',null]; + colors3 varchar[] := array[null,'red','orange',null,'green','blue']; + colors4 int[] := array[null,1,2,3,4,null,5,6,7,8,null,'']; +begin + raise NOTICE '---------colors1---------'; + raise NOTICE '%', colors1; + raise NOTICE 'colors1 first number: %', colors1.first; + raise NOTICE 'colors1 first: %', colors1[colors1.first]; + raise NOTICE 'colors1 last number: %', colors1.last; + raise NOTICE 'colors1 last: %', colors1[colors1.last]; + + raise NOTICE '---------colors2---------'; + raise NOTICE '%', colors2; + raise NOTICE 'colors2 first number: %', colors2.first; + raise NOTICE 'colors2 first: %', colors2[colors2.first]; + raise NOTICE 'colors2 last number: %', colors2.last; + raise NOTICE 'colors2 last: %', colors2[colors2.last]; + + raise NOTICE '---------colors3---------'; + raise NOTICE '%', colors3; + raise NOTICE 'colors3 first number: %', colors3.first; + raise NOTICE 'colors3 first: %', colors3[colors3.first]; + raise NOTICE 'colors3 last number: %', colors3.last; + raise NOTICE 'colors3 last: %', colors3[colors3.last]; + + raise NOTICE '---------colors4---------'; + raise NOTICE '%', colors4; + raise NOTICE 'colors4 first number: %', colors4.first; + raise NOTICE 'colors4 first: %', colors4[colors4.first]; + raise NOTICE 'colors4 last number: %', colors4.last; + raise NOTICE 'colors4 last: %', colors4[colors4.last]; +end; +/ + +call array_interface_p5(); +NOTICE: ---------colors1--------- +NOTICE: {red,orange,NULL,green,NULL,blue} +NOTICE: colors1 first number: 1 +NOTICE: colors1 first: red +NOTICE: colors1 last number: 6 +NOTICE: colors1 last: blue +NOTICE: ---------colors2--------- +NOTICE: {red,orange,NULL,green,blue,NULL} +NOTICE: colors2 first number: 1 +NOTICE: colors2 first: red +NOTICE: colors2 last number: 6 +NOTICE: colors2 last: +NOTICE: ---------colors3--------- +NOTICE: {NULL,red,orange,NULL,green,blue} +NOTICE: colors3 first number: 1 +NOTICE: colors3 first: +NOTICE: colors3 last number: 6 +NOTICE: colors3 last: blue +NOTICE: ---------colors4--------- +NOTICE: {NULL,1,2,3,4,NULL,5,6,7,8,NULL,NULL} +NOTICE: colors4 first number: 1 +NOTICE: colors4 first: +NOTICE: colors4 last number: 12 +NOTICE: colors4 last: + array_interface_p5 +-------------------- + +(1 row) + + +-- next&prior +create or replace procedure array_interface_p6() +as +declare + colors1 varchar[] := array['red','orange',null,'green','blue','','indigo','violet']; + colors2 int[]:=array[1,2,3,null,4,5,6,'',7,8]; + colors3 int[]:=array[null,1,2,3,null,4,5,'',6,7,8,null]; + ind int := 1; +begin + raise NOTICE '--------------------colors1---------------------'; + raise NOTICE '%', colors1; + for ind in 1 .. colors1.last + loop + raise NOTICE 'current is: %', colors1[ind]; + raise NOTICE 'next number is: %', colors1.next(ind); + raise NOTICE 'next is: %', colors1[colors1.next(ind)]; + raise NOTICE 'prior number is: %', colors1.prior(ind); + raise NOTICE 'prior is: %', colors1[colors1.prior(ind)]; + raise NOTICE '-------'; + end loop; + + raise NOTICE '--------------------colors2---------------------'; + raise NOTICE '%', colors2; + for ind in 1 .. colors2.last + loop + raise NOTICE 'current is: %', colors2[ind]; + raise NOTICE 'next number is: %', colors2.next(ind); + raise NOTICE 'next is: %', colors2[colors2.next(ind)]; + raise NOTICE 'prior number is: %', colors2.prior(ind); + raise NOTICE 'prior is: %', colors2[colors2.prior(ind)]; + raise NOTICE '-------'; + end loop; + raise NOTICE '--------------------colors3---------------------'; + raise NOTICE '%', colors3; + for ind in 1 .. colors3.last + loop + raise NOTICE 'current is: %', colors3[ind]; + raise NOTICE 'next number is: %', colors3.next(ind); + raise NOTICE 'next is: %', colors3[colors3.next(ind)]; + raise NOTICE 'prior number is: %', colors3.prior(ind); + raise NOTICE 'prior is: %', colors3[colors3.prior(ind)]; + raise NOTICE '-------'; + end loop; +end; +/ + +call array_interface_p6(); +NOTICE: --------------------colors1--------------------- +NOTICE: {red,orange,NULL,green,blue,NULL,indigo,violet} +NOTICE: current is: red +NOTICE: next number is: 2 +NOTICE: next is: orange +NOTICE: prior number is: +NOTICE: prior is: +NOTICE: ------- +NOTICE: current is: orange +NOTICE: next number is: 3 +NOTICE: next is: +NOTICE: prior number is: 1 +NOTICE: prior is: red +NOTICE: ------- +NOTICE: current is: +NOTICE: next number is: 4 +NOTICE: next is: green +NOTICE: prior number is: 2 +NOTICE: prior is: orange +NOTICE: ------- +NOTICE: current is: green +NOTICE: next number is: 5 +NOTICE: next is: blue +NOTICE: prior number is: 3 +NOTICE: prior is: +NOTICE: ------- +NOTICE: current is: blue +NOTICE: next number is: 6 +NOTICE: next is: +NOTICE: prior number is: 4 +NOTICE: prior is: green +NOTICE: ------- +NOTICE: current is: +NOTICE: next number is: 7 +NOTICE: next is: indigo +NOTICE: prior number is: 5 +NOTICE: prior is: blue +NOTICE: ------- +NOTICE: current is: indigo +NOTICE: next number is: 8 +NOTICE: next is: violet +NOTICE: prior number is: 6 +NOTICE: prior is: +NOTICE: ------- +NOTICE: current is: violet +NOTICE: next number is: +NOTICE: next is: +NOTICE: prior number is: 7 +NOTICE: prior is: indigo +NOTICE: ------- +NOTICE: --------------------colors2--------------------- +NOTICE: {1,2,3,NULL,4,5,6,NULL,7,8} +NOTICE: current is: 1 +NOTICE: next number is: 2 +NOTICE: next is: 2 +NOTICE: prior number is: +NOTICE: prior is: +NOTICE: ------- +NOTICE: current is: 2 +NOTICE: next number is: 3 +NOTICE: next is: 3 +NOTICE: prior number is: 1 +NOTICE: prior is: 1 +NOTICE: ------- +NOTICE: current is: 3 +NOTICE: next number is: 4 +NOTICE: next is: +NOTICE: prior number is: 2 +NOTICE: prior is: 2 +NOTICE: ------- +NOTICE: current is: +NOTICE: next number is: 5 +NOTICE: next is: 4 +NOTICE: prior number is: 3 +NOTICE: prior is: 3 +NOTICE: ------- +NOTICE: current is: 4 +NOTICE: next number is: 6 +NOTICE: next is: 5 +NOTICE: prior number is: 4 +NOTICE: prior is: +NOTICE: ------- +NOTICE: current is: 5 +NOTICE: next number is: 7 +NOTICE: next is: 6 +NOTICE: prior number is: 5 +NOTICE: prior is: 4 +NOTICE: ------- +NOTICE: current is: 6 +NOTICE: next number is: 8 +NOTICE: next is: +NOTICE: prior number is: 6 +NOTICE: prior is: 5 +NOTICE: ------- +NOTICE: current is: +NOTICE: next number is: 9 +NOTICE: next is: 7 +NOTICE: prior number is: 7 +NOTICE: prior is: 6 +NOTICE: ------- +NOTICE: current is: 7 +NOTICE: next number is: 10 +NOTICE: next is: 8 +NOTICE: prior number is: 8 +NOTICE: prior is: +NOTICE: ------- +NOTICE: current is: 8 +NOTICE: next number is: +NOTICE: next is: +NOTICE: prior number is: 9 +NOTICE: prior is: 7 +NOTICE: ------- +NOTICE: --------------------colors3--------------------- +NOTICE: {NULL,1,2,3,NULL,4,5,NULL,6,7,8,NULL} +NOTICE: current is: +NOTICE: next number is: 2 +NOTICE: next is: 1 +NOTICE: prior number is: +NOTICE: prior is: +NOTICE: ------- +NOTICE: current is: 1 +NOTICE: next number is: 3 +NOTICE: next is: 2 +NOTICE: prior number is: 1 +NOTICE: prior is: +NOTICE: ------- +NOTICE: current is: 2 +NOTICE: next number is: 4 +NOTICE: next is: 3 +NOTICE: prior number is: 2 +NOTICE: prior is: 1 +NOTICE: ------- +NOTICE: current is: 3 +NOTICE: next number is: 5 +NOTICE: next is: +NOTICE: prior number is: 3 +NOTICE: prior is: 2 +NOTICE: ------- +NOTICE: current is: +NOTICE: next number is: 6 +NOTICE: next is: 4 +NOTICE: prior number is: 4 +NOTICE: prior is: 3 +NOTICE: ------- +NOTICE: current is: 4 +NOTICE: next number is: 7 +NOTICE: next is: 5 +NOTICE: prior number is: 5 +NOTICE: prior is: +NOTICE: ------- +NOTICE: current is: 5 +NOTICE: next number is: 8 +NOTICE: next is: +NOTICE: prior number is: 6 +NOTICE: prior is: 4 +NOTICE: ------- +NOTICE: current is: +NOTICE: next number is: 9 +NOTICE: next is: 6 +NOTICE: prior number is: 7 +NOTICE: prior is: 5 +NOTICE: ------- +NOTICE: current is: 6 +NOTICE: next number is: 10 +NOTICE: next is: 7 +NOTICE: prior number is: 8 +NOTICE: prior is: +NOTICE: ------- +NOTICE: current is: 7 +NOTICE: next number is: 11 +NOTICE: next is: 8 +NOTICE: prior number is: 9 +NOTICE: prior is: 6 +NOTICE: ------- +NOTICE: current is: 8 +NOTICE: next number is: 12 +NOTICE: next is: +NOTICE: prior number is: 10 +NOTICE: prior is: 7 +NOTICE: ------- +NOTICE: current is: +NOTICE: next number is: +NOTICE: next is: +NOTICE: prior number is: 11 +NOTICE: prior is: 8 +NOTICE: ------- + array_interface_p6 +-------------------- + +(1 row) + + +-- test empty array exists interface return +create or replace procedure array_interface_p7() +as +declare + colors1 varchar[] := array[]::varchar[]; + colors2 integer[]:= array[]::integer[]; + vi varchar2(32); +begin + raise NOTICE 'colors1 is %', colors1; + raise NOTICE 'colors2 is %', colors2; + vi := 111; + raise NOTICE 'colors1[%] exists return %', vi, colors1.exists(vi); + vi := '1'; + raise NOTICE 'colors1["%"] exists return %', vi, colors1.exists(vi); + vi := 123432; + raise NOTICE 'colors2[%] exists return %', vi, colors2.exists(vi); + vi := '43243442'; + raise NOTICE 'colors2["%"] exists return %', vi, colors2.exists(vi); +end; +/ + +call array_interface_p7(); +NOTICE: colors1 is {} +NOTICE: colors2 is {} +NOTICE: colors1[111] exists return f +NOTICE: colors1["1"] exists return f +NOTICE: colors2[123432] exists return f +NOTICE: colors2["43243442"] exists return f + array_interface_p7 +-------------------- + +(1 row) + + +-- test array exists interface string input parameter +create or replace procedure array_interface_p8() +as +declare + colors1 varchar2[] := array['11', '12', '13']; + line varchar[]:=array['--------------------------------']; + chk boolean := false; +begin + raise NOTICE'%', colors1; + chk := colors.exists(2); + raise NOTICE'check exists return %', chk; +end; +/ +ERROR: schema "colors" does not exist +CONTEXT: compilation of PL/pgSQL function "array_interface_p8" near line 7 + +--call array_interface_p8(); + +-- test array exists interface A.B input parameter +create or replace procedure array_interface_p9() +as +declare + v_a varchar2[] := array[]::varchar2[]; +begin + raise NOTICE 'v_a is %', v_a; + for rec in (select generate_series(1,10) x) loop + if v_a.exists(rec.x) then + raise NOTICE 'v_a[%] is exist', rec.x; + else + raise NOTICE 'v_a[%] is not exist', rec.x; + end if; + end loop; + v_a.extend(10); + for i in 1 .. 10 loop + v_a(i) := i; + end loop; + raise NOTICE 'v_a is %', v_a; + for rec in (select generate_series(1,10) x) loop + if v_a.exists(rec.x) then + raise NOTICE 'v_a[%] is exist', rec.x; + else + raise NOTICE 'v_a[%] is not exist', rec.x; + end if; + end loop; +end; +/ + +call array_interface_p9(); +NOTICE: v_a is {} +NOTICE: v_a[1] is not exist +NOTICE: v_a[2] is not exist +NOTICE: v_a[3] is not exist +NOTICE: v_a[4] is not exist +NOTICE: v_a[5] is not exist +NOTICE: v_a[6] is not exist +NOTICE: v_a[7] is not exist +NOTICE: v_a[8] is not exist +NOTICE: v_a[9] is not exist +NOTICE: v_a[10] is not exist +NOTICE: v_a is {1,2,3,4,5,6,7,8,9,10} +NOTICE: v_a[1] is exist +NOTICE: v_a[2] is exist +NOTICE: v_a[3] is exist +NOTICE: v_a[4] is exist +NOTICE: v_a[5] is exist +NOTICE: v_a[6] is exist +NOTICE: v_a[7] is exist +NOTICE: v_a[8] is exist +NOTICE: v_a[9] is exist +NOTICE: v_a[10] is exist + array_interface_p9 +-------------------- + +(1 row) + + +create or replace procedure array_interface_p10() as +declare + colors varchar2[]; +begin + colors := array['red','orange','yellow','green','blue','indigo','violet','c8','c9','c10','c11','c12','c13','c14','c15']; + if colors.exists(1+1) then + raise NOTICE 'array exist, element is %', colors[1+1]; + else + raise NOTICE 'array not exist'; + end if; + if colors.exists('1' || '2') then + raise NOTICE 'array exist, element is %', colors['1'||'2']; + else + raise NOTICE 'array not exist'; + end if; +end; +/ + +call array_interface_p10(); +NOTICE: array exist, element is orange +NOTICE: array exist, element is c12 + array_interface_p10 +--------------------- + +(1 row) + + +create or replace procedure array_interface_p11() as +declare + colors varchar2[]; +begin + colors := array['red','orange','yellow','green','blue','indigo','violet','c8','c9','c10','c11','c12','c13','c14','c15']; + if colors.exists(1+1) then + raise NOTICE 'array exist, element is %', colors[1+1]; + else + raise NOTICE 'array not exist'; + end if; + if colors.exists('1'||'2') then + raise NOTICE 'array exist, element is %', colors['1'||'2']; + else + raise NOTICE 'array not exist'; + end if; +end; +/ + +call array_interface_p11(); +NOTICE: array exist, element is orange +NOTICE: array exist, element is c12 + array_interface_p11 +--------------------- + +(1 row) + + +create or replace procedure array_interface_p12() as +declare + colors varchar2[]; +begin + colors := array['red','orange','yellow','green','blue','indigo','violet']; + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + raise NOTICE '%', colors.count(); + raise NOTICE '%', colors.first; + raise NOTICE '%', colors.first(); + raise NOTICE '%', colors.last; + raise NOTICE '%', colors.last(); + for i in colors.first .. colors.last loop + raise NOTICE '%', colors[i]; + end loop; + for i in 1 .. colors.count loop + raise NOTICE '%', colors[i]; + end loop; + for i in colors.first() .. colors.last() loop + raise NOTICE '%', colors[i]; + end loop; + for i in 1 .. colors.count() loop + raise NOTICE '%', colors[i]; + end loop; + colors.extend; + raise NOTICE '%', colors; + colors.extend(); + raise NOTICE '%', colors; + colors.extend(2); + raise NOTICE '%', colors; + colors.delete; + raise NOTICE '%', colors; + colors.extend(); + raise NOTICE '%', colors; + colors.delete(); + raise NOTICE '%', colors; +end; +/ + +call array_interface_p12(); +NOTICE: {red,orange,yellow,green,blue,indigo,violet} +NOTICE: 7 +NOTICE: 7 +NOTICE: 1 +NOTICE: 1 +NOTICE: 7 +NOTICE: 7 +NOTICE: red +NOTICE: orange +NOTICE: yellow +NOTICE: green +NOTICE: blue +NOTICE: indigo +NOTICE: violet +NOTICE: red +NOTICE: orange +NOTICE: yellow +NOTICE: green +NOTICE: blue +NOTICE: indigo +NOTICE: violet +NOTICE: red +NOTICE: orange +NOTICE: yellow +NOTICE: green +NOTICE: blue +NOTICE: indigo +NOTICE: violet +NOTICE: red +NOTICE: orange +NOTICE: yellow +NOTICE: green +NOTICE: blue +NOTICE: indigo +NOTICE: violet +NOTICE: {red,orange,yellow,green,blue,indigo,violet,NULL} +NOTICE: {red,orange,yellow,green,blue,indigo,violet,NULL,NULL} +NOTICE: {red,orange,yellow,green,blue,indigo,violet,NULL,NULL,NULL,NULL} +NOTICE: {} +NOTICE: {NULL} +NOTICE: {} + array_interface_p12 +--------------------- + +(1 row) + + + +-- clean up -- +drop schema if exists plpgsql_array_interface cascade; +NOTICE: drop cascades to 11 other objects +DETAIL: drop cascades to function array_interface_p1() +drop cascades to function array_interface_p2() +drop cascades to function array_interface_p3() +drop cascades to function array_interface_p4() +drop cascades to function array_interface_p5() +drop cascades to function array_interface_p6() +drop cascades to function array_interface_p7() +drop cascades to function array_interface_p9() +drop cascades to function array_interface_p10() +drop cascades to function array_interface_p11() +drop cascades to function array_interface_p12() diff --git a/src/test/regress/expected/arrayinterface_ted.out b/src/test/regress/expected/arrayinterface_ted.out new file mode 100644 index 000000000..b346a93f9 --- /dev/null +++ b/src/test/regress/expected/arrayinterface_ted.out @@ -0,0 +1,468 @@ +-- FOR VARRAY INTERFACE -- + +-- check compatibility -- +show sql_compatibility; + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists plpgsql_array_interface_ted; +NOTICE: schema "plpgsql_array_interface_ted" does not exist, skipping +create schema plpgsql_array_interface_ted; +set current_schema = plpgsql_array_interface_ted; + +-- test array interface extend trim and delete -- +create or replace procedure array_interface_p1() +as +declare + colors varchar[] := array['red','orange',null,'blue']; + colors1 varchar[] := array['red','blue']; +begin + raise NOTICE'%',colors; + colors.extend; + colors.extend(1); + raise NOTICE'%',colors; + colors.trim; + colors.trim(1); + raise NOTICE'%',colors; + colors.delete; + raise NOTICE'%',colors; + colors1.delete; + raise NOTICE'%',colors1; +end; +/ + +call array_interface_p1(); +NOTICE: {red,orange,NULL,blue} +NOTICE: {red,orange,NULL,blue,NULL,NULL} +NOTICE: {red,orange,NULL,blue} +NOTICE: {} +NOTICE: {} + array_interface_p1 +-------------------- + +(1 row) + + +-- test array interface extend trim and delete -- +create or replace procedure array_interface_p1() +as +declare + colors varchar[] := array['red']; + colors1 varchar[] := array['red','blue']; +begin + raise NOTICE'%',colors; + colors.EXTEND; + colors.EXTEND(3); + raise NOTICE'%',colors; + colors.TRIM; + colors.TRIM(1); + raise NOTICE'%',colors; + colors.DELETE; + raise NOTICE'%',colors; + colors1.DELETE; + raise NOTICE'%',colors1; +end; +/ + +call array_interface_p1(); +NOTICE: {red} +NOTICE: {red,NULL,NULL,NULL,NULL} +NOTICE: {red,NULL,NULL} +NOTICE: {} +NOTICE: {} + array_interface_p1 +-------------------- + +(1 row) + + +-- test array interface extend mistake format -- +create or replace procedure array_interface_p1() +as +declare + colors varchar[] := array['red']; +begin + raise NOTICE'%',colors; + colors.extend[1]; + raise NOTICE'%',colors; + colors.extend[-1]; + raise NOTICE'%',colors; +end; +/ +ERROR: syntax error at or near "[" +LINE 6: colors.extend[1]; + ^ +QUERY: +declare + colors varchar[] := array['red']; +begin + raise NOTICE'%',colors; + colors.extend[1]; + raise NOTICE'%',colors; + colors.extend[-1]; + raise NOTICE'%',colors; +end + +--call array_interface_p1(); + +-- test array interface trim mistake format -- +create or replace procedure array_interface_p1() +as +declare + colors varchar[] := array['red','orange','green','blue','indigo','violet']; +begin + raise NOTICE'%',colors; + colors.trim[1]; + raise NOTICE'%',colors; + colors.trim[-1]; + raise NOTICE'%',colors; +end; +/ +ERROR: syntax error at or near "[" +LINE 6: colors.trim[1]; + ^ +QUERY: +declare + colors varchar[] := array['red','orange','green','blue','indigo','violet']; +begin + raise NOTICE'%',colors; + colors.trim[1]; + raise NOTICE'%',colors; + colors.trim[-1]; + raise NOTICE'%',colors; +end + +--call array_interface_p1(); + +-- test array interface delete mistake format -- +create or replace procedure array_interface_p1() +as +declare + colors varchar[] := array['red','orange','green','blue','indigo','violet']; + colors2 varchar[] := array['red','orange','green','blue','indigo','violet']; + colors3 varchar[] := array['red','orange','green','blue','indigo','violet']; + colors4 varchar[] := array['red','orange','green','blue','indigo','violet']; +begin + raise NOTICE'%',colors; + colors.delete(-1); + raise NOTICE'%',colors; + raise NOTICE'%',colors2; + colors2.delete(1); + raise NOTICE'%',colors2; + raise NOTICE'%',colors3; + colors3.delete(10); + raise NOTICE'%',colors3; + raise NOTICE'%',colors4; + colors4.delete[-1]; + raise NOTICE'%',colors4; +end; +/ +ERROR: syntax error at or near "[" +LINE 18: colors4.delete[-1]; + ^ +QUERY: +declare + colors varchar[] := array['red','orange','green','blue','indigo','violet']; + colors2 varchar[] := array['red','orange','green','blue','indigo','violet']; + colors3 varchar[] := array['red','orange','green','blue','indigo','violet']; + colors4 varchar[] := array['red','orange','green','blue','indigo','violet']; +begin + raise NOTICE'%',colors; + colors.delete(-1); + raise NOTICE'%',colors; + raise NOTICE'%',colors2; + colors2.delete(1); + raise NOTICE'%',colors2; + raise NOTICE'%',colors3; + colors3.delete(10); + raise NOTICE'%',colors3; + raise NOTICE'%',colors4; + colors4.delete[-1]; + raise NOTICE'%',colors4; +end + +--call array_interface_p1(); + +-- test array name use special character -- +create or replace procedure array_interface_p1() +as +declare + "!arr#%" varchar[]:=array['red','orange','green','blue','indigo']; + "@*ar&" integer[]:=array[1,0,5,6,8,3,9]; +begin + raise NOTICE '%',"!arr#%"; + "!arr#%".extend(3); + raise NOTICE '%',"!arr#%"; + "!arr#%".trim(3); + raise NOTICE '%',"!arr#%"; + "@*ar&".delete; + raise NOTICE '%',"@*ar&"; +end; +/ + +call array_interface_p1(); +NOTICE: {red,orange,green,blue,indigo} +NOTICE: {red,orange,green,blue,indigo,NULL,NULL,NULL} +NOTICE: {red,orange,green,blue,indigo} +NOTICE: {} + array_interface_p1 +-------------------- + +(1 row) + + +-- test array name use special character mistake -- +create or replace procedure array_interface_p1() +as +declare + @*ar& integer[]:=array[1,0,5,6,8,3,9]; +begin + raise NOTICE'%',@*ar&; + @*ar&.extend(3); + raise NOTICE'%',@*ar&; +end; +/ +ERROR: syntax error at or near "@*" +LINE 3: @*ar& integer[]:=array[1,0,5,6,8,3,9]; + ^ +QUERY: +declare + @*ar& integer[]:=array[1,0,5,6,8,3,9]; +begin + raise NOTICE'%',@*ar&; + @*ar&.extend(3); + raise NOTICE'%',@*ar&; +end + +-- call array_interface_p1(); + +-- test array interface extend with large parameter -- +create or replace procedure array_interface_p1() +as +declare + arr integer[] := array[1,0,5,6,8,3,9]; +begin + raise NOTICE'%',arr; + arr.extend(10000); + raise NOTICE'%',arr.count; +end; +/ + +call array_interface_p1(); +NOTICE: {1,0,5,6,8,3,9} +NOTICE: 10007 + array_interface_p1 +-------------------- + +(1 row) + + +-- test array interface trim -- +create or replace procedure array_interface_p1() +as +declare + arr integer[] := array[1,0,5]; +begin + raise NOTICE'%',arr; + arr.trim(10); + raise NOTICE'%',arr; + end; +/ + +call array_interface_p1(); +NOTICE: {1,0,5} +NOTICE: {} + array_interface_p1 +-------------------- + +(1 row) + + +-- test array interface delete with empty array -- +create or replace procedure array_interface_p1() +as +declare + arr integer[] := array[]::integer[]; +begin + raise NOTICE'%',arr; + arr.delete; + raise NOTICE'%',arr; +end; +/ + +call array_interface_p1(); +NOTICE: {} +NOTICE: {} + array_interface_p1 +-------------------- + +(1 row) + + +-- test array interface delete missing ; -- +create or replace procedure array_interface_p1() +as +declare + colors varchar[] := array['red','orange','green','blue','indigo','violet']; +begin + raise NOTICE'%',colors; + colors.delete + raise NOTICE'%',colors; +end; +/ +ERROR: syntax error at or near "raise" +LINE 7: raise NOTICE'%',colors; + ^ +QUERY: +declare + colors varchar[] := array['red','orange','green','blue','indigo','violet']; +begin + raise NOTICE'%',colors; + colors.delete + raise NOTICE'%',colors; +end + +-- test call array interface of another package +create or replace package pck1 is + type ta is varray(10) of varchar(100); + tb ta := ta('1','2','3', '4', '5'); +end pck1; +/ + +create or replace package pck2 is + procedure proc1; + end pck2; +/ + +create or replace package body pck2 is +procedure proc1() is +begin + raise NOTICE '%',pck1.tb; + raise NOTICE '%',pck1.tb.count; + raise NOTICE '%',pck1.tb.first; + raise NOTICE '%',pck1.tb.last; + raise NOTICE '%',pck1.tb.count(); + raise NOTICE '%',pck1.tb.first(); + raise NOTICE '%',pck1.tb.last(); + for i in pck1.tb.first .. pck1.tb.last + loop + if pck1.tb.exists(i) then + raise NOTICE '%',pck1.tb[i]; + else + raise NOTICE ''; + end if; + end loop; + pck1.tb.extend; + raise NOTICE '%',pck1.tb; + pck1.tb.extend(); + raise NOTICE '%',pck1.tb; + pck1.tb.extend(2); + raise NOTICE '%',pck1.tb; + pck1.tb.trim; + raise NOTICE '%',pck1.tb; + pck1.tb.trim(); + raise NOTICE '%',pck1.tb; + pck1.tb.trim(2); + raise NOTICE '%',pck1.tb; + pck1.tb.delete; + raise NOTICE '%',pck1.tb; + pck1.tb.extend; + raise NOTICE '%',pck1.tb; + pck1.tb.delete(); + raise NOTICE '%',pck1.tb; +end; +end pck2; +/ + +call pck2.proc1(); +NOTICE: {1,2,3,4,5} +NOTICE: 5 +NOTICE: 1 +NOTICE: 5 +NOTICE: 5 +NOTICE: 1 +NOTICE: 5 +NOTICE: 1 +NOTICE: 2 +NOTICE: 3 +NOTICE: 4 +NOTICE: 5 +NOTICE: {1,2,3,4,5,NULL} +NOTICE: {1,2,3,4,5,NULL,NULL} +NOTICE: {1,2,3,4,5,NULL,NULL,NULL,NULL} +NOTICE: {1,2,3,4,5,NULL,NULL,NULL} +NOTICE: {1,2,3,4,5,NULL,NULL} +NOTICE: {1,2,3,4,5} +NOTICE: {} +NOTICE: {NULL} +NOTICE: {} + proc1 +------- + +(1 row) + + +-- test array interface delete with index -- +create or replace procedure array_interface_p1() as +declare + array1 integer[] := array[1,2,3,4,5]; + indx integer; +begin + raise NOTICE '%', array1; + raise NOTICE '%', array1.count; + raise NOTICE '%', array1.first; + raise NOTICE '%', array1.last; + indx := array1.first; + array1.delete(indx); + raise NOTICE '%', array1; + raise NOTICE '%', array1.count; + raise NOTICE '%', array1.first; + raise NOTICE '%', array1.last; + array1 := array[1,2,3,4,5]; + indx := array1.last; + array1.delete(indx); + raise NOTICE '%', array1; + raise NOTICE '%', array1.count; + raise NOTICE '%', array1.first; + raise NOTICE '%', array1.last; + array1 := array[1,2,3,4,5]; + array1.delete(3); + raise NOTICE '%', array1; + raise NOTICE '%', array1.count; + raise NOTICE '%', array1.first; + raise NOTICE '%', array1.last; +end; +/ + +call array_interface_p1(); +NOTICE: {1,2,3,4,5} +NOTICE: 5 +NOTICE: 1 +NOTICE: 5 +NOTICE: {2,3,4,5} +NOTICE: 4 +NOTICE: 1 +NOTICE: 4 +NOTICE: {1,2,3,4} +NOTICE: 4 +NOTICE: 1 +NOTICE: 4 +NOTICE: {1,2,4,5} +NOTICE: 4 +NOTICE: 1 +NOTICE: 4 + array_interface_p1 +-------------------- + +(1 row) + + +-- clean up -- +drop package if exists pck2; +NOTICE: drop cascades to function plpgsql_array_interface_ted.proc1() +drop package if exists pck1; +drop schema if exists plpgsql_array_interface_ted cascade; +NOTICE: drop cascades to function array_interface_p1() diff --git a/src/test/regress/expected/auto_explain.out b/src/test/regress/expected/auto_explain.out new file mode 100644 index 000000000..217f020a1 --- /dev/null +++ b/src/test/regress/expected/auto_explain.out @@ -0,0 +1,732 @@ +set enable_auto_explain = false; +create or replace function data_table returns int as $$ +begin +drop table if exists course; +drop table if exists stu; +drop table if exists teacher; +create table course(cno int,name varchar); +insert into course values(1,'test1'); +insert into course values(2,'test2'); +insert into course values(3,'test2'); +create table stu(sno int, name varchar,sex varchar,cno int); +insert into stu values(1,'zhang','M',1); +insert into stu values(1,'zhang','M',2); +insert into stu values(2,'wangwei','M',2); +insert into stu values(3,'liu','F',3); +create table teacher(tno int,name varchar,sex varchar,cno int); +insert into teacher values(1,'Yang','F',1); +insert into teacher values(2,'zhang','F',2); +insert into teacher values(3,'liu','F',3); +return 1; +end; +$$ +LANGUAGE plpgsql; +select data_table(); +--?.* +CONTEXT: SQL statement "drop table if exists course" +PL/pgSQL function data_table() line 3 at SQL statement +referenced column: data_table +--?.* +CONTEXT: SQL statement "drop table if exists stu" +PL/pgSQL function data_table() line 4 at SQL statement +referenced column: data_table +--?.* +CONTEXT: SQL statement "drop table if exists teacher" +PL/pgSQL function data_table() line 5 at SQL statement +referenced column: data_table + data_table +------------ + 1 +(1 row) + +CREATE OR REPLACE FUNCTION course_delete_trigger() +RETURNS TRIGGER AS $$ +BEGIN + DELETE FROM teacher where teacher.cno = OLD.cno; + RETURN OLD; +END; +$$ +LANGUAGE plpgsql; +CREATE TRIGGER delete_trigger + AFTER DELETE ON course + FOR EACH ROW EXECUTE PROCEDURE course_delete_trigger(); + +CREATE OR REPLACE FUNCTION courseUpdate() + RETURNS trigger AS $$ + BEGIN + UPDATE teacher SET teacher.cno = NEW.cno where teacher.cno = NEW.cno; + UPDATE student set student.cno = NEW.cno where student.cno = NEW.cno; + END; +$$ +LANGUAGE plpgsql VOLATILE; +CREATE TRIGGER course_Update AFTER UPDATE OF "cno" ON "public"."course" +FOR EACH ROW +EXECUTE PROCEDURE "public".courseUpdate(); +create or replace function process_test() returns int as $$ +declare status int; +begin +select complicate_process() into status; +return status; +END +$$ +LANGUAGE plpgsql; +prepare get_stu_lesson(varchar) as select stu.name,course.name from stu left join course on course.cno = stu.cno where stu.name = $1; +execute get_stu_lesson('liu'); + name | name +------+------- + liu | test2 +(1 row) + +prepare get_stu_info(varchar) as select stu.name,course.name,teacher.name from stu left join course on course.cno =stu.cno left join teacher on course.cno = teacher.cno where stu.name = $1; +set auto_explain_level = notice; +set enable_auto_explain = true; +execute get_stu_info(''); +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: prepare get_stu_info(varchar) as select stu.name,course.name,teacher.name from stu left join course on course.cno =stu.cno left join teacher on course.cno = teacher.cno where stu.name = $1; +Name: datanode1 +--?.* + Output: stu.name, course.name, teacher.name +--?.* + Hash Cond: (teacher.cno = course.cno) +--?.* + Output: teacher.tno, teacher.name, teacher.sex, teacher.cno + Exec Nodes: All datanodes +--?.* + Output: stu.name, course.name, course.cno + Exec Nodes: All datanodes +--?.* + Output: stu.name, course.name, course.cno + Exec Nodes: All datanodes + Hash Cond: (course.cno = stu.cno) +--?.* + Output: course.cno, course.name + Exec Nodes: All datanodes +--?.* + Output: stu.name, stu.cno + Exec Nodes: All datanodes +--?.* + Output: stu.name, stu.cno + Exec Nodes: All datanodes + Filter: ((stu.name)::text = ($1)::text) +Parameter1 value: null + +NOTICE: +----------------------------NestLevel:0---------------------------- +--?.* + + name | name | name +------+------+------ +(0 rows) + +set enable_auto_explain = false; +create or replace function open_cursor(myCursor OUT REFCURSOR) as $$ +begin +open myCursor for select teacher.name,stu.name from teacher left join course on course.cno = teacher.cno left join stu on stu.cno = course.cno; +END +$$ +LANGUAGE plpgsql; +create or replace function complicate_process(status out int) as $$ +declare sql varchar; +numbers int; +declare docType varchar:='REISSUE'; +declare v_count1 int; +declare v_count2 int; +declare tt REFCURSOR; +declare teacher_name varchar; +declare stu_name varchar; +begin +status:=0; +if docType = 'REISSUE' then + select count(1) into v_count1 from stu; + select count(2) into v_count2 from teacher; + if v_count1>0 and v_count2>0 then + insert into stu values(4,'liu','F',1); + insert into teacher values(4,'li',4); + end if; +end if; +update teacher set tno =100 where tno = 3; +select open_cursor() into tt; +fetch next from tt into teacher_name,stu_name; +While true +loop + fetch next from tt into teacher_name,stu_name; + if found then + else + Exit ; + end if; +end loop; +status:=1; + +END +$$ +LANGUAGE plpgsql; +set auto_explain_level = notice; +set enable_auto_explain = true; +select process_test(); +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: select process_test(); +Name: datanode1 +--?.* + Output: process_test() + + +NOTICE: +QueryPlan + +----------------------------NestLevel:1---------------------------- +Query Text: select complicate_process() +Name: datanode1 +--?.* + Output: complicate_process() + + +CONTEXT: SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +QueryPlan + +----------------------------NestLevel:2---------------------------- +Query Text: select count(1) from stu +Name: datanode1 +--?.* +--?.* + Exec Nodes: All datanodes +--?.* + Output: sno, name, sex, cno + Exec Nodes: All datanodes + + +CONTEXT: SQL statement "select count(1) from stu" +PL/pgSQL function complicate_process() line 13 at SQL statement +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +----------------------------NestLevel:2---------------------------- +--?.* + +CONTEXT: SQL statement "select count(1) from stu" +PL/pgSQL function complicate_process() line 13 at SQL statement +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +QueryPlan + +----------------------------NestLevel:2---------------------------- +Query Text: select count(2) from teacher +Name: datanode1 +--?.* +--?.* + Exec Nodes: All datanodes +--?.* + Output: tno, name, sex, cno + Exec Nodes: All datanodes + + +CONTEXT: SQL statement "select count(2) from teacher" +PL/pgSQL function complicate_process() line 14 at SQL statement +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +----------------------------NestLevel:2---------------------------- +--?.* + +CONTEXT: SQL statement "select count(2) from teacher" +PL/pgSQL function complicate_process() line 14 at SQL statement +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +QueryPlan + +----------------------------NestLevel:2---------------------------- +Query Text: insert into stu values(4,'liu','F',1) +Name: datanode1 +--?.* +--?.* + Output: 4, 'liu'::character varying, 'F'::character varying, 1 + Exec Nodes: All datanodes + + +CONTEXT: SQL statement "insert into stu values(4,'liu','F',1)" +PL/pgSQL function complicate_process() line 16 at SQL statement +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +----------------------------NestLevel:2---------------------------- +--?.* + +CONTEXT: SQL statement "insert into stu values(4,'liu','F',1)" +PL/pgSQL function complicate_process() line 16 at SQL statement +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +QueryPlan + +----------------------------NestLevel:2---------------------------- +Query Text: insert into teacher values(4,'li',4) +Name: datanode1 +--?.* +--?.* + Output: 4, 'li'::character varying, '4'::character varying, NULL::integer + Exec Nodes: All datanodes + + +CONTEXT: SQL statement "insert into teacher values(4,'li',4)" +PL/pgSQL function complicate_process() line 17 at SQL statement +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +----------------------------NestLevel:2---------------------------- +--?.* + +CONTEXT: SQL statement "insert into teacher values(4,'li',4)" +PL/pgSQL function complicate_process() line 17 at SQL statement +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +QueryPlan + +----------------------------NestLevel:2---------------------------- +Query Text: update teacher set tno =100 where tno = 3 +Name: datanode1 +--?.* +--?.* + Output: 100, name, sex, cno, ctid + Exec Nodes: All datanodes + Filter: (teacher.tno = 3) + + +CONTEXT: SQL statement "update teacher set tno =100 where tno = 3" +PL/pgSQL function complicate_process() line 20 at SQL statement +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +----------------------------NestLevel:2---------------------------- +--?.* + +CONTEXT: SQL statement "update teacher set tno =100 where tno = 3" +PL/pgSQL function complicate_process() line 20 at SQL statement +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +QueryPlan + +----------------------------NestLevel:2---------------------------- +Query Text: select open_cursor() +Name: datanode1 +--?.* + Output: open_cursor() + + +CONTEXT: SQL statement "select open_cursor()" +PL/pgSQL function complicate_process() line 21 at SQL statement +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +----------------------------NestLevel:2---------------------------- +--?.* + +CONTEXT: SQL statement "select open_cursor()" +PL/pgSQL function complicate_process() line 21 at SQL statement +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +QueryPlan + +----------------------------NestLevel:2---------------------------- +Query Text: select teacher.name,stu.name from teacher left join course on course.cno = teacher.cno left join stu on stu.cno = course.cno +Name: datanode1 +--?.* + Output: teacher.name, stu.name + Exec Nodes: All datanodes + Hash Cond: (course.cno = stu.cno) +--?.* + Output: teacher.name, course.cno + Exec Nodes: All datanodes + Hash Cond: (course.cno = teacher.cno) +--?.* + Output: course.cno, course.name + Exec Nodes: All datanodes +--?.* + Output: teacher.name, teacher.cno + Exec Nodes: All datanodes +--?.* + Output: teacher.name, teacher.cno + Exec Nodes: All datanodes +--?.* + Output: stu.name, stu.cno + Exec Nodes: All datanodes +--?.* + Output: stu.name, stu.cno + Exec Nodes: All datanodes + + +CONTEXT: PL/pgSQL function complicate_process() line 22 at FETCH +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +----------------------------NestLevel:2---------------------------- +--?.* + +CONTEXT: PL/pgSQL function complicate_process() line 22 at FETCH +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +QueryPlan + +----------------------------NestLevel:2---------------------------- +Query Text: select teacher.name,stu.name from teacher left join course on course.cno = teacher.cno left join stu on stu.cno = course.cno +Name: datanode1 +--?.* + Output: teacher.name, stu.name + Hash Cond: (course.cno = stu.cno) +--?.* + Output: teacher.name, course.cno + Hash Cond: (course.cno = teacher.cno) +--?.* + Output: course.cno, course.name +--?.* + Output: teacher.name, teacher.cno +--?.* + Output: teacher.name, teacher.cno +--?.* + Output: stu.name, stu.cno +--?.* + Output: stu.name, stu.cno + + +CONTEXT: PL/pgSQL function complicate_process() line 25 at FETCH +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +----------------------------NestLevel:2---------------------------- +--?.* + +CONTEXT: PL/pgSQL function complicate_process() line 25 at FETCH +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +QueryPlan + +----------------------------NestLevel:2---------------------------- +Query Text: select teacher.name,stu.name from teacher left join course on course.cno = teacher.cno left join stu on stu.cno = course.cno +Name: datanode1 +--?.* + Output: teacher.name, stu.name + Hash Cond: (course.cno = stu.cno) +--?.* + Output: teacher.name, course.cno + Hash Cond: (course.cno = teacher.cno) +--?.* + Output: course.cno, course.name +--?.* + Output: teacher.name, teacher.cno +--?.* + Output: teacher.name, teacher.cno +--?.* + Output: stu.name, stu.cno +--?.* + Output: stu.name, stu.cno + + +CONTEXT: PL/pgSQL function complicate_process() line 25 at FETCH +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +----------------------------NestLevel:2---------------------------- +--?.* + +CONTEXT: PL/pgSQL function complicate_process() line 25 at FETCH +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +QueryPlan + +----------------------------NestLevel:2---------------------------- +Query Text: select teacher.name,stu.name from teacher left join course on course.cno = teacher.cno left join stu on stu.cno = course.cno +Name: datanode1 +--?.* + Output: teacher.name, stu.name + Hash Cond: (course.cno = stu.cno) +--?.* + Output: teacher.name, course.cno + Hash Cond: (course.cno = teacher.cno) +--?.* + Output: course.cno, course.name +--?.* + Output: teacher.name, teacher.cno +--?.* + Output: teacher.name, teacher.cno +--?.* + Output: stu.name, stu.cno +--?.* + Output: stu.name, stu.cno + + +CONTEXT: PL/pgSQL function complicate_process() line 25 at FETCH +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +----------------------------NestLevel:2---------------------------- +--?.* + +CONTEXT: PL/pgSQL function complicate_process() line 25 at FETCH +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +QueryPlan + +----------------------------NestLevel:2---------------------------- +Query Text: select teacher.name,stu.name from teacher left join course on course.cno = teacher.cno left join stu on stu.cno = course.cno +Name: datanode1 +--?.* + Output: teacher.name, stu.name + Hash Cond: (course.cno = stu.cno) +--?.* + Output: teacher.name, course.cno + Hash Cond: (course.cno = teacher.cno) +--?.* + Output: course.cno, course.name +--?.* + Output: teacher.name, teacher.cno +--?.* + Output: teacher.name, teacher.cno +--?.* + Output: stu.name, stu.cno +--?.* + Output: stu.name, stu.cno + + +CONTEXT: PL/pgSQL function complicate_process() line 25 at FETCH +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +----------------------------NestLevel:2---------------------------- +--?.* + +CONTEXT: PL/pgSQL function complicate_process() line 25 at FETCH +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +QueryPlan + +----------------------------NestLevel:2---------------------------- +Query Text: select teacher.name,stu.name from teacher left join course on course.cno = teacher.cno left join stu on stu.cno = course.cno +Name: datanode1 +--?.* + Output: teacher.name, stu.name + Hash Cond: (course.cno = stu.cno) +--?.* + Output: teacher.name, course.cno + Hash Cond: (course.cno = teacher.cno) +--?.* + Output: course.cno, course.name +--?.* + Output: teacher.name, teacher.cno +--?.* + Output: teacher.name, teacher.cno +--?.* + Output: stu.name, stu.cno +--?.* + Output: stu.name, stu.cno + + +CONTEXT: PL/pgSQL function complicate_process() line 25 at FETCH +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +----------------------------NestLevel:2---------------------------- +--?.* + +CONTEXT: PL/pgSQL function complicate_process() line 25 at FETCH +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +QueryPlan + +----------------------------NestLevel:2---------------------------- +Query Text: select teacher.name,stu.name from teacher left join course on course.cno = teacher.cno left join stu on stu.cno = course.cno +Name: datanode1 +--?.* + Output: teacher.name, stu.name + Hash Cond: (course.cno = stu.cno) +--?.* + Output: teacher.name, course.cno + Hash Cond: (course.cno = teacher.cno) +--?.* + Output: course.cno, course.name +--?.* + Output: teacher.name, teacher.cno +--?.* + Output: teacher.name, teacher.cno +--?.* + Output: stu.name, stu.cno +--?.* + Output: stu.name, stu.cno + + +CONTEXT: PL/pgSQL function complicate_process() line 25 at FETCH +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +----------------------------NestLevel:2---------------------------- +--?.* + +CONTEXT: PL/pgSQL function complicate_process() line 25 at FETCH +referenced column: complicate_process +SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +----------------------------NestLevel:1---------------------------- +--?.* + +CONTEXT: SQL statement "select complicate_process()" +PL/pgSQL function process_test() line 4 at SQL statement +referenced column: process_test +NOTICE: +----------------------------NestLevel:0---------------------------- +--?.* + + process_test +-------------- + 1 +(1 row) + +set enable_auto_explain = false; +drop table if exists test1; +--?.* +create table test1(id number, val number); +insert into test1 values(generate_series(1,1000), generate_series(1,1000)); +create OR REPLACE function test_merge_into() returns int as $$ +declare tt REFCURSOR; +id_val int; +begin +id_val:=103; +merge into test1 t1 using (select count(*) cnt from test1 where id = id_val) t2 on (cnt <> 0) +when matched then update set val = val + 1 where id = id_val when not matched then insert values(id_val, 1); +return 1; +end; +$$ +LANGUAGE plpgsql; +set enable_auto_explain = true; +set auto_explain_level = notice; +select test_merge_into(); +NOTICE: +QueryPlan + +----------------------------NestLevel:0---------------------------- +Query Text: select test_merge_into(); +Name: datanode1 +--?.* + Output: test_merge_into() + + +NOTICE: +QueryPlan + +----------------------------NestLevel:1---------------------------- +Query Text: merge into test1 t1 using (select count(*) cnt from test1 where id = id_val) t2 on (cnt <> 0) +when matched then update set val = val + 1 where id = id_val when not matched then insert values(id_val, 1) +Name: datanode1 +--?.* + Update Cond: (t1.id = ($15)::numeric) +--?.* + Output: (count(*)), t1.id, t1.val, t1.ctid, (count(*)) + Exec Nodes: All datanodes + Join Filter: ((count(*)) <> 0) +--?.* + Output: count(*) + Exec Nodes: All datanodes +--?.* + Output: test1.id, test1.val + Exec Nodes: All datanodes + Filter: (test1.id = ($15)::numeric) + -> Seq Scan on public.test1 t1 (cost=0.00..18.69 rows=869 p-time=0 p-rows=0 width=70) + Output: t1.id, t1.val, t1.ctid + Exec Nodes: All datanodes +param1 value: 103 type: int4 + + +CONTEXT: SQL statement "merge into test1 t1 using (select count(*) cnt from test1 where id = id_val) t2 on (cnt <> 0) +when matched then update set val = val + 1 where id = id_val when not matched then insert values(id_val, 1)" +PL/pgSQL function test_merge_into() line 6 at SQL statement +referenced column: test_merge_into +NOTICE: +----------------------------NestLevel:1---------------------------- +--?.* + +CONTEXT: SQL statement "merge into test1 t1 using (select count(*) cnt from test1 where id = id_val) t2 on (cnt <> 0) +when matched then update set val = val + 1 where id = id_val when not matched then insert values(id_val, 1)" +PL/pgSQL function test_merge_into() line 6 at SQL statement +referenced column: test_merge_into +NOTICE: +----------------------------NestLevel:0---------------------------- +--?.* + + test_merge_into +----------------- + 1 +(1 row) + +set enable_auto_explain = false; +drop table if exists course; +drop table if exists stu; +drop table if exists teacher; +drop table if exists test1; diff --git a/src/test/regress/expected/autonomous_cursor.out b/src/test/regress/expected/autonomous_cursor.out new file mode 100644 index 000000000..f97582fc2 --- /dev/null +++ b/src/test/regress/expected/autonomous_cursor.out @@ -0,0 +1,1919 @@ +-- test for autonomous transaction with out ref cursor param +create schema pl_auto_ref; +set current_schema to pl_auto_ref; +-- 1. (a) base use, no commit +create table t1(a int,b number(3),c varchar2(20),d clob,e blob,f text); +insert into t1 values (1,100,'var1','clob1','1234abd1','text1'); +insert into t1 values (2,200,'var2','clob2','1234abd2','text2'); +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: 0 +INFO: (1,100,var1,clob1,1234ABD1,text1) +INFO: rowcount: 1 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- 1. (b) base use, fetch before return +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +fetch c1 into va; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: 1 +INFO: (2,200,var2,clob2,1234ABD2,text2) +INFO: rowcount: 2 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- 2. base use, commit +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +commit; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: 0 +INFO: (1,100,var1,clob1,1234ABD1,text1) +INFO: rowcount: 1 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- 2. (a) base use, commit, and error +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +open c1 for select * from t1; +commit; +vb := 3/0; +end; +end pck1; +/ +call pck1.p1(); +ERROR: ERROR: division by zero +CONTEXT: SQL statement "SELECT 3/0" +PL/pgSQL function p2() line 8 at assignment +referenced column: p2 + +CONTEXT: SQL statement "CALL p2(c1)" +PL/pgSQL function p1() line 5 at SQL statement +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- 2. base use, fetch before commit +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +fetch c1 into va; +commit; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: 1 +INFO: (2,200,var2,clob2,1234ABD2,text2) +INFO: rowcount: 2 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- 3. cursor not use +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +null; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: +ERROR: cursor variable "c1" is null in FETCH statement. +CONTEXT: PL/pgSQL function p1() line 7 at FETCH +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- 4. (a) cursor close after open, no commit +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +close c1; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: +ERROR: cursor variable "c1" is null in FETCH statement. +CONTEXT: PL/pgSQL function p1() line 7 at FETCH +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- 4. (b) cursor close after open, commit +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +commit; +close c1; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: +ERROR: cursor variable "c1" is null in FETCH statement. +CONTEXT: PL/pgSQL function p1() line 7 at FETCH +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- 5. nested call, not support now +-- (a) p1->p2->p3, p2,p3 auto +create or replace package pck1 as +procedure p1; +procedure p2 (c2 out sys_refcursor); +procedure p3 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as + +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; + +procedure p2 (c2 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +p3(c2); +end; + +procedure p3 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +commit; +end; +end pck1; +/ +call pck1.p1(); +ERROR: ERROR: nested call of ref cursor out param for autonomous transaction procedure is not supported yet. +DETAIL: N/A +CONTEXT: PL/pgSQL function p2() during function exit +referenced column: p2 + +CONTEXT: SQL statement "CALL p2(c1)" +PL/pgSQL function p1() line 5 at SQL statement +drop package pck1; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +drop cascades to function pl_auto_ref.p3() +-- (b) p1->p2->p3, p2,auto +create or replace package pck1 as +procedure p1; +procedure p2 (c2 out sys_refcursor); +procedure p3 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as + +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; + +procedure p2 (c2 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +p3(c2); +end; + +procedure p3 (c1 out sys_refcursor) as +--PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +commit; +end; +end pck1; +/ +call pck1.p1(); +ERROR: ERROR: nested call of ref cursor out param for autonomous transaction procedure is not supported yet. +DETAIL: N/A +CONTEXT: PL/pgSQL function p2() during function exit +referenced column: p2 + +CONTEXT: SQL statement "CALL p2(c1)" +PL/pgSQL function p1() line 5 at SQL statement +drop package pck1; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +drop cascades to function pl_auto_ref.p3() +-- (c) p1->p2->p3, p3,auto +create or replace package pck1 as +procedure p1; +procedure p2 (c2 out sys_refcursor); +procedure p3 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as + +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; + +procedure p2 (c2 out sys_refcursor) as +--PRAGMA AUTONOMOUS_TRANSACTION; +begin +p3(c2); +end; + +procedure p3 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +commit; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: 0 +INFO: (1,100,var1,clob1,1234ABD1,text1) +INFO: rowcount: 1 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +drop cascades to function pl_auto_ref.p3() +-- 6. exception情况 +-- (a).1 自治事务open前异常 +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +vb := 3/0; +open c1 for select * from t1; +commit; +exception when division_by_zero then +commit; +return; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: +ERROR: cursor variable "c1" is null in FETCH statement. +CONTEXT: PL/pgSQL function p1() line 7 at FETCH +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- (a).2 自治事务open后异常 +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +open c1 for select * from t1; +vb := 3/0; +exception when division_by_zero then +commit; +return; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: +ERROR: cursor variable "c1" is null in FETCH statement. +CONTEXT: PL/pgSQL function p1() line 7 at FETCH +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- (a).3 自治事务匿名块exception +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +vb := 0; +begin +vb := 3/0; +exception when division_by_zero then +commit; +return; +end; +open c1 for select * from t1; +commit; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: +ERROR: cursor variable "c1" is null in FETCH statement. +CONTEXT: PL/pgSQL function p1() line 7 at FETCH +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- (a).4 自治事务匿名块exception +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +vb := 0; +open c1 for select * from t1; +commit; +begin +vb := 3/0; +exception when division_by_zero then +commit; +return; +end; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: 0 +INFO: (1,100,var1,clob1,1234ABD1,text1) +INFO: rowcount: 1 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- (a).4 自治事务匿名块exception +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +vb := 0; +open c1 for select * from t1; +begin +vb := 3/0; +exception when division_by_zero then +commit; +return; +end; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: 0 +INFO: (1,100,var1,clob1,1234ABD1,text1) +INFO: rowcount: 1 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- (b).1 主事务exception +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +vb int; +begin +p2(c1); +vb := 3/0; +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +exception when division_by_zero then +close c1; +commit; +return; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +commit; +end; +end pck1; +/ +call pck1.p1(); + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- 7.自治事务commit,rollback,savepoint +-- (a) before open +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +insert into t1 values (1,100,'var1','clob1','1234abd1','text1'); +commit; +insert into t1 values (2,200,'var2','clob2','1234abd2','text2'); +savepoint s1; +rollback to s1; +open c1 for select * from t1; +fetch c1 into va; +end; +end pck1; +/ +truncate table t1; +call pck1.p1(); +INFO: rowcount: 1 +INFO: (2,200,var2,clob2,1234ABD2,text2) +INFO: rowcount: 2 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- (b) rollback before open +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +insert into t1 values (1,100,'var1','clob1','1234abd1','text1'); +commit; +insert into t1 values (2,200,'var2','clob2','1234abd2','text2'); +savepoint s1; +open c1 for select * from t1; +fetch c1 into va; +rollback to s1; +end; +end pck1; +/ +truncate table t1; +call pck1.p1(); +INFO: rowcount: +ERROR: cursor variable "c1" is null in FETCH statement. +CONTEXT: PL/pgSQL function p1() line 7 at FETCH +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- (c) rollback after open +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +insert into t1 values (1,100,'var1','clob1','1234abd1','text1'); +commit; +insert into t1 values (2,200,'var2','clob2','1234abd2','text2'); +open c1 for select * from t1; +savepoint s1; +fetch c1 into va; +rollback to s1; +end; +end pck1; +/ +truncate table t1; +call pck1.p1(); +INFO: rowcount: 1 +INFO: (2,200,var2,clob2,1234ABD2,text2) +INFO: rowcount: 2 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +--8. multi param +create table t1_test(a int, b int, c int); +create table t2_test(a int, b varchar2(10)); +insert into t1_test values(1,2,3); +insert into t1_test values(4,5,6); +insert into t2_test values(1,'aaa'); +insert into t2_test values(2,'bbb'); +create or replace package pck1 as +procedure p1; +procedure p2 (c1 in int, c2 in int, c3 out sys_refcursor, c4 out int, c5 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 int; +c2 int; +c3 sys_refcursor; +c4 int; +c5 sys_refcursor; +v1 t1_test; +v2 t2_test; +begin +p2(c1,c2,c3,c4,c5); +raise info 'c3 rowcount: %', c3%rowcount; +fetch c3 into v1; +raise info 'c3: %', v1; +raise info 'c3: rowcount: %', c3%rowcount; +close c3; +raise info 'c5 rowcount: %', c5%rowcount; +fetch c5 into v2; +raise info 'c5: %', v2; +raise info 'c5: rowcount: %', c5%rowcount; +close c5; +end; +procedure p2 (c1 in int, c2 in int, c3 out sys_refcursor, c4 out int, c5 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1_test; +vb t2_test; +begin +c4 := 4; +open c3 for select * from t1_test; +open c5 for select * from t2_test; +fetch c3 into va; +fetch c5 into vb; +commit; +end; +end pck1; +/ +call pck1.p1(); +INFO: c3 rowcount: 1 +INFO: c3: (4,5,6) +INFO: c3: rowcount: 2 +INFO: c5 rowcount: 1 +INFO: c5: (2,bbb) +INFO: c5: rowcount: 2 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2(integer,integer) +drop table t1_test; +drop table t2_test; +-- 9.自治事务存在重载的情况,私有存过带有重载的情况 +drop table t1; +create table t1(a int,b number(3),c varchar2(20),d clob,e blob,f text); +insert into t1 values (1,100,'var1','clob1','1234abd1','text1'); +insert into t1 values (2,200,'var2','clob2','1234abd2','text2'); +-- 9.(a)公有的同名自治事务存过 +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +procedure p2(c1 int,c2 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +vn int; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +loop +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +p2(vn,c1); +loop +fetch c1 into vc; +exit when c1%notfound; +raise info 'c1 rowcount %',c1%rowcount; +end loop; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1 order by t1; +end; +procedure p2(c1 int,c2 out sys_refcursor) as +pragma autonomous_transaction; +begin +open c2 for select * from t1; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: 0 +INFO: (1,100,var1,clob1,1234ABD1,text1) +INFO: rowcount: 1 +INFO: (2,200,var2,clob2,1234ABD2,text2) +INFO: rowcount: 2 +INFO: (2,200,var2,clob2,1234ABD2,text2) +INFO: rowcount: 2 +INFO: c1 rowcount 1 +INFO: c1 rowcount 2 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +drop cascades to function pl_auto_ref.p2(integer) +-- 9.(b)顺序调用自治事务 +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +procedure p3(c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +vn int; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +loop +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +p3(c1); +loop +fetch c1 into vc; +raise info 'p3 %',vc; +raise info 'p3 rowcount %',c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1 order by t1; +end; +procedure p3(c1 out sys_refcursor) as +pragma autonomous_transaction; +c2 int; +begin +open c1 for select * from t1 order by t1 desc; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: 0 +INFO: (1,100,var1,clob1,1234ABD1,text1) +INFO: rowcount: 1 +INFO: (2,200,var2,clob2,1234ABD2,text2) +INFO: rowcount: 2 +INFO: (2,200,var2,clob2,1234ABD2,text2) +INFO: rowcount: 2 +INFO: p3 (2,200,var2,clob2,1234ABD2,text2) +INFO: p3 rowcount 1 +INFO: p3 (1,100,var1,clob1,1234ABD1,text1) +INFO: p3 rowcount 2 +INFO: p3 (1,100,var1,clob1,1234ABD1,text1) +INFO: p3 rowcount 2 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +drop cascades to function pl_auto_ref.p3() +-- 9.(c)顺序调用自治事务以及非自治事务 +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +procedure p3(c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +vn int; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +loop +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +p3(c1); +loop +fetch c1 into vc; +raise info 'p3 %',vc; +raise info 'p3 rowcount %',c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1 order by t1; +end; +procedure p3(c1 out sys_refcursor) as +c2 int; +begin +open c1 for select * from t1 order by t1 desc; +end; +end pck1; +/ +call pck1.p1(); +INFO: rowcount: 0 +INFO: (1,100,var1,clob1,1234ABD1,text1) +INFO: rowcount: 1 +INFO: (2,200,var2,clob2,1234ABD2,text2) +INFO: rowcount: 2 +INFO: (2,200,var2,clob2,1234ABD2,text2) +INFO: rowcount: 2 +INFO: p3 (2,200,var2,clob2,1234ABD2,text2) +INFO: p3 rowcount 1 +INFO: p3 (1,100,var1,clob1,1234ABD1,text1) +INFO: p3 rowcount 2 +INFO: p3 (1,100,var1,clob1,1234ABD1,text1) +INFO: p3 rowcount 2 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +drop cascades to function pl_auto_ref.p3() +-- 9.(d) 包外存储过程 +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace procedure p3(c1 out sys_refcursor) +is +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +raise notice 'public.c1'; +end; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p3(c1); +raise info 'public.p2%',c1%rowcount; +loop +fetch c1 into vc; +exit when c1%notfound; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +end loop; +close c1; +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1 order by t1 desc; +end; +end pck1; +/ +call pck1.p1(); +NOTICE: public.c1 +CONTEXT: referenced column: p3 + +INFO: public.p20 +INFO: (1,100,var1,clob1,1234ABD1,text1) +INFO: rowcount: 1 +INFO: (2,200,var2,clob2,1234ABD2,text2) +INFO: rowcount: 2 +INFO: rowcount: 0 +INFO: (2,200,var2,clob2,1234ABD2,text2) +INFO: rowcount: 1 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +drop procedure p3; +-- 10.自治事务增删查改 +drop table t1; +create table t1(a int); +insert into t1 values (1); +insert into t1 values (2); +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +loop +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +update t1 set a=100; +insert into t1 values(1000); +commit; +select count(*) into vb from t1; +raise info 'vb is %',vb; +open c1 for select * from t1; +end; +end pck1; +/ +call pck1.p1(); +INFO: vb is 3 +CONTEXT: referenced column: p2 + +INFO: rowcount: 0 +INFO: (100) +INFO: rowcount: 1 +INFO: (100) +INFO: rowcount: 2 +INFO: (1000) +INFO: rowcount: 3 +INFO: (1000) +INFO: rowcount: 3 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- 10.(b) p1里delete之后进行commit +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +delete from t1;---delete数据 +commit; +p2(c1); +raise info 'rowcount: %', c1%rowcount; +loop +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +update t1 set a=10; +insert into t1 values(8); +commit; +select count(*) into vb from t1; +raise info 'vb is %',vb; +open c1 for select * from t1; +end; +end pck1; +/ +call pck1.p1(); +INFO: vb is 1 +CONTEXT: referenced column: p2 + +INFO: rowcount: 0 +INFO: (8) +INFO: rowcount: 1 +INFO: (8) +INFO: rowcount: 1 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- 11. 跨包调用 +drop table t1_test; +ERROR: table "t1_test" does not exist +create table t1_test(a int, b int, c int); +insert into t1_test values(1,2,3); +insert into t1_test values(4,5,6); +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c3 sys_refcursor; +v1 t1_test; +begin +p2(c3); +raise info 'c3 rowcount: %', c3%rowcount; +fetch c3 into v1; +raise info 'c3: %', v1; +raise info 'c3: rowcount: %', c3%rowcount; +close c3; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +if c1%isopen then + close c1; + raise notice 'cursor is open'; +else + open c1 for select * from t1_test; + raise info 'cursor is close'; +end if; +end; +end pck1; +/ +create or replace package pck2 as +procedure p1(); +procedure p2(c1 out sys_refcursor); +end pck2; +/ +create or replace package body pck2 as +procedure p1 as +c1 sys_refcursor; +v1 t1_test; +begin + pck1.p2(c1); + loop + fetch c1 into v1; + exit when c1%notfound; + raise info 'v1 is %',v1; + raise info 'c1 rowcount is %',c1%rowcount; + end loop; + close c1; + end; + procedure p2(c1 out sys_refcursor) as + PRAGMA AUTONOMOUS_TRANSACTION; + c2 sys_refcursor; + va t1_test; + begin + pck1.p2(c2); + loop + fetch c2 into va; + exit when c2%notfound; + raise info 'va is %',va; + raise info 'c2 rowcount %',c2%rowcount; + end loop; + close c2; + end; +end pck2; +/ +call pck1.p1(); +INFO: cursor is close +CONTEXT: referenced column: p2 + +INFO: c3 rowcount: 0 +INFO: c3: (1,2,3) +INFO: c3: rowcount: 1 + p1 +---- + +(1 row) + +call pck2.p1(); +INFO: cursor is close +CONTEXT: referenced column: p2 + +INFO: v1 is (1,2,3) +INFO: c1 rowcount is 1 +INFO: v1 is (4,5,6) +INFO: c1 rowcount is 2 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +drop package pck2; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +-- 12.自治事务里调用自治事务 +drop table t1_test; +create table t1_test(a int primary key,b number(3),c varchar2(20),d clob,e blob,f text) partition by range(a)(partition p1 values less than(10),partition p2 values less than(20),partition p3 values less than(maxvalue)); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_test_pkey" for table "t1_test" +insert into t1_test values (1,100,'var1','clob1','1234abd1','text1'); +insert into t1_test values (2,200,'var2','clob2','1234abd2','text2'); +insert into t1_test values (11,100,'var1','clob1','1234abd1','text1'); +insert into t1_test values (12,200,'var2','clob2','1234abd2','text2'); +insert into t1_test values (21,100,'var1','clob1','1234abd1','text1'); +insert into t1_test values (32,200,'var2','clob2','1234abd2','text2'); +create or replace package pck1 as +procedure p1(c1 t1_test); +procedure p2 (c1 out sys_refcursor); +end pck1; +/ +create or replace package body pck1 as +procedure p1(c1 t1_test) as +PRAGMA AUTONOMOUS_TRANSACTION; +c2 sys_refcursor; +begin +p2(c2); +loop +fetch c2 into c1; +exit when c2%notfound; +raise info 'c2 rowcount is %',c2%rowcount; +raise info 'c1 is %',c1; +end loop; +end; +procedure p2(c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +open c1 for select * from t1_test where a<15; +end; +end pck1; +/ +call pck1.p1((1,100,'var1','clob1','1234abd1','text1')); +INFO: c2 rowcount is 1 +CONTEXT: referenced column: p1 + +INFO: c1 is (1,100,var1,clob1,1234ABD1,text1) +CONTEXT: referenced column: p1 + +INFO: c2 rowcount is 2 +CONTEXT: referenced column: p1 + +INFO: c1 is (2,200,var2,clob2,1234ABD2,text2) +CONTEXT: referenced column: p1 + +INFO: c2 rowcount is 3 +CONTEXT: referenced column: p1 + +INFO: c1 is (11,100,var1,clob1,1234ABD1,text1) +CONTEXT: referenced column: p1 + +INFO: c2 rowcount is 4 +CONTEXT: referenced column: p1 + +INFO: c1 is (12,200,var2,clob2,1234ABD2,text2) +CONTEXT: referenced column: p1 + + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1(t1_test) +drop cascades to function pl_auto_ref.p2() +-- 13.(a) 主事务commit rollback +drop table t1; +create table t1(a int); +insert into t1 values(1); +insert into t1 values(2); +create or replace procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +end; +/ +CREATE OR REPLACE PROCEDURE check1(a int) AS +DECLARE +c1 sys_refcursor; +vc t1; +BEGIN +p2(c1); +fetch c1 into vc; +while c1%found loop +raise notice 'isopen:%',c1%isopen; +raise notice 'found:%',c1%found; +raise notice 'ans;%', vc; +fetch c1 into vc; +insert into t1 values(a); +if vc.a > 1 then +commit; +else +rollback; +end if; +end loop; +close c1; +END; +/ +call check1(1); +NOTICE: isopen:t +NOTICE: found:t +NOTICE: ans;(1) +NOTICE: isopen:t +NOTICE: found:t +NOTICE: ans;(2) + check1 +-------- + +(1 row) + +DROP procedure check1; +-- 13.(b) savepoint +truncate table t1; +CREATE OR REPLACE PROCEDURE check4(a int) AS +DECLARE +c1 sys_refcursor; +vc t1; +BEGIN + insert into t1 values (1); + insert into t1 values (2); + insert into t1 values (1); + insert into t1 values (2); + commit; + p2(c1); + fetch c1 into vc; + savepoint sp1; + raise notice 'found:%',c1%found; + raise notice 'isopen:%',c1%isopen; + raise notice 'rowcount:%',c1%rowcount; + while c1%found loop + raise notice 'isopen:%',c1%isopen; + raise notice 'found:%',c1%found; + raise notice 'ans;%', vc; + fetch c1 into vc; + insert into t1 values(a); + if vc.a > 1 then + commit; + savepoint sp1; + raise '%',1/0; + else + rollback to sp1; + end if; + end loop; + close c1; + exception + when others then + raise notice 'exception'; + fetch c1 into vc; + raise notice 'isopen:%',c1%isopen; + raise notice 'found:%',c1%found; + raise notice 'ans;%', vc; + close c1; + +END; +/ +call check4(5); +NOTICE: found:t +NOTICE: isopen:t +NOTICE: rowcount:1 +NOTICE: isopen:t +NOTICE: found:t +NOTICE: ans;(1) +NOTICE: exception +NOTICE: isopen:t +NOTICE: found:t +NOTICE: ans;(1) + check4 +-------- + +(1 row) + +-- 13. (c) before cursor +truncate table t1; +insert into t1 values(1); +insert into t1 values(2); +CREATE OR REPLACE PROCEDURE check6(a int) AS +DECLARE +c1 sys_refcursor; +vc t1; +BEGIN +insert into t1 values(a); +savepoint aa; +p2(c1); +fetch c1 into vc; +while c1%found loop +raise notice 'isopen:%',c1%isopen; +raise notice 'found:%',c1%found; +raise notice 'ans;%', vc; +fetch c1 into vc; +end loop; +rollback to aa; +close c1; +END; +/ +call check6(3); +NOTICE: isopen:t +NOTICE: found:t +NOTICE: ans;(1) +NOTICE: isopen:t +NOTICE: found:t +NOTICE: ans;(2) + check6 +-------- + +(1 row) + +drop procedure check6; +drop procedure p2; +-- test call auto procedure at last +create or replace procedure p4 as +PRAGMA AUTONOMOUS_TRANSACTION; +va int; +begin +va := 1; +commit; +end; +/ +create or replace procedure p3(c3 out sys_refcursor) as +begin +open c3 for select * from t1; +end; +/ +create or replace procedure p2(c2 out sys_refcursor) as +begin +p3(c2); +p4(); +raise info 'p2:%',c2; +end; +/ +create or replace procedure p1() as +c1 sys_refcursor; +begin +p2(c1); +raise info 'p1:%',c1; +end; +/ +call p1(); +INFO: p2: +CONTEXT: SQL statement "CALL p2(c1)" +PL/pgSQL function p1() line 4 at SQL statement +INFO: p1: + p1 +---- + +(1 row) + +drop procedure p4; +drop procedure p3; +drop procedure p2; +drop procedure p1; +-- test only in param procedure +create or replace procedure out_refcursor_t2_u1_a(c1 in sys_refcursor) + as PRAGMA AUTONOMOUS_TRANSACTION; + begin + open c1 for select id from count_info; + end; +/ +declare + c1 sys_refcursor; + v1 int; + begin + out_refcursor_t2_u1_a(c1); + fetch c1 into v1; + end; +/ +ERROR: Un-support:ref_cursor parameter is not supported for autonomous transactions. +CONTEXT: SQL statement "CALL out_refcursor_t2_u1_a(c1)" +PL/pgSQL function inline_code_block line 4 at PERFORM +drop procedure out_refcursor_t2_u1_a; +-- test deadlock caused by autonomous session +create type type001 as(c1 number(7,2),c2 varchar(30)); +drop table if exists t2_test; +NOTICE: table "t2_test" does not exist, skipping +create table t2_test(a int,b number(3), c varchar2(20),d clob,e blob,f text,g type001); +insert into t2_test values (1,100,'var1','clob1','1234abd1','text1',(1.00,'aaa')); +insert into t2_test values (2,200,'var2','clob2','1234abd2','text2',(2.00,'bbb')); +create or replace package pck1 as procedure p1; procedure p2 (c1 out sys_refcursor); end pck1; +/ +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t2_test; +begin +delete from t2_test;---delete数据 +pg_sleep(30); +p2(c1); +raise info 'rowcount: %', c1%rowcount; +loop +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t2_test; +vb int; +begin +update t2_test set a=10; +insert into t2_test values(8); +commit; +select count(*) into vb from t2_test; +raise info 'vb is %',vb; +open c1 for select * from t2_test; +end; +end pck1; +/ +call pck1.p1(); +ERROR: ERROR: deadlock detected +--?* +--?* +HINT: See server log for query details. +CONTEXT: SQL statement "update t2_test set a=10" +PL/pgSQL function p2() line 6 at SQL statement +referenced column: p2 + +CONTEXT: SQL statement "CALL p2(c1)" +PL/pgSQL function p1() line 7 at SQL statement +select * from t2_test; + a | b | c | d | e | f | g +---+-----+------+-------+----------+-------+------------ + 1 | 100 | var1 | clob1 | 1234ABD1 | text1 | (1.00,aaa) + 2 | 200 | var2 | clob2 | 1234ABD2 | text2 | (2.00,bbb) +(2 rows) + +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.p1() +drop cascades to function pl_auto_ref.p2() +drop table t2_test; +drop type type001; +-- test dynquery sql when open cursor +drop table count_info; +ERROR: table "count_info" does not exist +drop table refcursor_info; +ERROR: table "refcursor_info" does not exist +create table count_info (id bigserial primary key,count int,info text); +NOTICE: CREATE TABLE will create implicit sequence "count_info_id_seq" for serial column "count_info.id" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "count_info_pkey" for table "count_info" +create table refcursor_info (v varchar,info varchar); +insert into count_info (count,info) values (1,'a'),(2,'b'),(3,'c'),(4,'d'); +create or replace package out_refcursor_029_pkg_t1 IS + procedure out_refcursor_029_t1(cur1 out sys_refcursor); + procedure invoke(); + end out_refcursor_029_pkg_t1; +/ +create or replace package body out_refcursor_029_pkg_t1 as + procedure out_refcursor_029_t1(cur1 out sys_refcursor) + as PRAGMA AUTONOMOUS_TRANSACTION; + begin + open cur1 for 'select count,info from count_info where count<:c' using 4; + end; + procedure invoke() is + declare + c1 sys_refcursor; + v1 int; + v2 text; + tmp_v1 int; + tmp_v2 varchar; + begin + out_refcursor_029_t1(c1); + if c1%ISOPEN then + LOOP + FETCH c1 INTO v1,v2; + tmp_v1:=c1%ROWCOUNT; + tmp_v2:=v1||v2||c1%FOUND; + insert into refcursor_info values (tmp_v1,tmp_v2); + EXIT WHEN C1%NOTFOUND; + END LOOP; + end if; + tmp_v1:=c1%ROWCOUNT; + tmp_v2:=to_char(c1%ISOPEN)||to_char(c1%FOUND); + insert into refcursor_info values (tmp_v1,tmp_v2); + close c1; + tmp_v1:=c1%ROWCOUNT; + tmp_v2:=to_char(c1%ISOPEN)||to_char(c1%FOUND)||to_char(c1%NOTFOUND); + insert into refcursor_info values (tmp_v1,tmp_v2); + end; + end out_refcursor_029_pkg_t1; +/ +call out_refcursor_029_pkg_t1.invoke(); + invoke +-------- + +(1 row) + +select * from refcursor_info; + v | info +---+--------- + 1 | 1atrue + 2 | 2btrue + 3 | 3ctrue + 3 | 3cfalse + 3 | 10 + | 0 +(6 rows) + +drop table refcursor_info; +drop table count_info; +drop package out_refcursor_029_pkg_t1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pl_auto_ref.out_refcursor_029_t1() +drop cascades to function pl_auto_ref.invoke() +-- test cursor assign value (should error) +drop table t1; +create table t1 (a int, b int); +create or replace procedure p1 ( out sys_refcursor) +as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +$1 := 'abc'; +end; +/ +declare +va sys_refcursor; +begin +p1(va); +end; +/ +ERROR: ERROR: autonomous procedure out ref cursor parameter can not be assigned +DETAIL: procedure "p1()" out parameter "$1" not support to be assigned value +CONTEXT: PL/pgSQL function p1() line 4 at assignment +referenced column: p1 + +CONTEXT: SQL statement "CALL p1(va)" +PL/pgSQL function inline_code_block line 4 at SQL statement +create or replace procedure p1 (va out sys_refcursor) +as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +va := 'abc'; +end; +/ +declare +va sys_refcursor; +begin +p1(va); +end; +/ +ERROR: ERROR: autonomous procedure out ref cursor parameter can not be assigned +DETAIL: procedure "p1()" out parameter "va" not support to be assigned value +CONTEXT: PL/pgSQL function p1() line 4 at assignment +referenced column: p1 + +CONTEXT: SQL statement "CALL p1(va)" +PL/pgSQL function inline_code_block line 4 at SQL statement +drop table t1; +drop procedure p1; +-- test function with ref cursor +CREATE OR REPLACE function f1( C2 out SYS_REFCURSOR) +LANGUAGE plpgsql +AS $$ +declare +PRAGMA AUTONOMOUS_TRANSACTION; +begin + return 1; + END; +$$; +ERROR: Autonomous function do not support ref cursor as return types or out, inout arguments. +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL function "f1" near line 4 +CREATE OR REPLACE function f1( ) returns SYS_REFCURSOR +LANGUAGE plpgsql +AS $$ +declare +PRAGMA AUTONOMOUS_TRANSACTION; +begin + return 1; + END; +$$; +ERROR: Autonomous function do not support ref cursor as return types or out, inout arguments. +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL function "f1" near line 4 +CREATE OR REPLACE function f1( C2 out SYS_REFCURSOR, C1 out INT) +LANGUAGE plpgsql +AS $$ +declare +PRAGMA AUTONOMOUS_TRANSACTION; +begin + null; + END; +$$; +ERROR: Autonomous function do not support ref cursor as return types or out, inout arguments. +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL function "f1" near line 4 +CREATE OR REPLACE function f1( ) returns SYS_REFCURSOR +LANGUAGE plpgsql +AS $$ +declare +begin + return 1; + END; +$$; +drop function f1(); +-- clean +drop schema pl_auto_ref cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table t1_test +drop cascades to function check4(integer) diff --git a/src/test/regress/expected/autonomous_test.out b/src/test/regress/expected/autonomous_test.out index fabe9d2c4..47e5c0d60 100644 --- a/src/test/regress/expected/autonomous_test.out +++ b/src/test/regress/expected/autonomous_test.out @@ -1728,3 +1728,282 @@ DROP PROCEDURE p1; DROP PROCEDURE p2; DROP PROCEDURE p3; DROP PACKAGE pck2; +-- 8. multi auto procedure (session will reuse) +create or replace package pck1 as +type r1 is record(a int, b int); +va int; +procedure p1; +procedure p2; +procedure p3; +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +begin +va := 1; +raise info 'before p2: %', va; +p2(); +raise info 'after p2: %', va; +va := 123; +p3(); +raise info 'after p3: %', va; +end; + +procedure p2 as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +raise info 'in p2: %', va; +va := 11; +end; +procedure p3 as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +raise info 'in p3: %', va; +end; +end pck1; +/ +call pck1.p1(); +INFO: before p2: 1 +INFO: in p2: 1 +CONTEXT: referenced column: p2 + +INFO: after p2: 11 +INFO: in p3: 123 +CONTEXT: referenced column: p3 + +INFO: after p3: 123 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function public.p1() +drop cascades to function public.p2() +drop cascades to function public.p3() +-- 9. autosesion call another auto procedure +create or replace package pkg1 IS +va int:=1; +function f1(num1 int) return int; +end pkg1; +/ +create or replace package body pkg1 as +function f1(num1 int) return int +is +declare +PRAGMA AUTONOMOUS_TRANSACTION; +re_int int; +begin +raise notice 'just in f1, pkg.va:%',va; +va:=va+num1; +raise notice 'in f1, pkg.va:%',va; +re_int = 1; +return re_int; +end; +end pkg1; +/ +create or replace function f2(num1 int) return int +is +declare PRAGMA AUTONOMOUS_TRANSACTION; +re_int int; +begin +pkg1.va = 111; +raise notice 'before f1: pkg.va: %',pkg1.va; +re_int = pkg1.f1(num1); +raise notice 'after f1: pkg.va: %', pkg1.va; +return re_int; +end; +/ +select f2(10); +NOTICE: before f1: pkg.va: 111 +CONTEXT: referenced column: f2 + +NOTICE: just in f1, pkg.va:111 +CONTEXT: referenced column: f1 + + +NOTICE: in f1, pkg.va:121 +CONTEXT: referenced column: f1 + + +NOTICE: after f1: pkg.va: 121 +CONTEXT: referenced column: f2 + + f2 +---- + 1 +(1 row) + +drop function f2; +drop package pkg1; +NOTICE: drop cascades to function public.f1(integer) +-- 9. autosesion call another nomarl procedure +create or replace package pkg1 IS +va int:=1; +function f1(num1 int) return int; +end pkg1; +/ +create or replace package body pkg1 as +function f1(num1 int) return int +is +declare +re_int int; +begin +raise notice 'just in f1, pkg.va:%',va; +va:=va+num1; +raise notice 'in f1, pkg.va:%',va; +re_int = 1; +return re_int; +end; +end pkg1; +/ +create or replace function f2(num1 int) return int +is +declare PRAGMA AUTONOMOUS_TRANSACTION; +re_int int; +begin +pkg1.va = 111; +raise notice 'before f1: pkg.va: %',pkg1.va; +re_int = pkg1.f1(num1); +raise notice 'after f1: pkg.va: %', pkg1.va; +return re_int; +end; +/ +select f2(10); +NOTICE: before f1: pkg.va: 111 +CONTEXT: referenced column: f2 + +NOTICE: just in f1, pkg.va:111 +CONTEXT: PL/pgSQL function f2(integer) line 7 at assignment +referenced column: f2 + +NOTICE: in f1, pkg.va:121 +CONTEXT: PL/pgSQL function f2(integer) line 7 at assignment +referenced column: f2 + +NOTICE: after f1: pkg.va: 121 +CONTEXT: referenced column: f2 + + f2 +---- + 1 +(1 row) + +drop function f2; +drop package pkg1; +NOTICE: drop cascades to function public.f1(integer) +-- auto procedure call normal procedure (auto procedure without package) +create or replace package autonomous_pkg_setup IS +count_public int:=1; +end autonomous_pkg_setup; +/ +create or replace package body autonomous_pkg_setup as + count_private int :=1; +end autonomous_pkg_setup; +/ +create or replace procedure out_015(num1 int) +is +declare +va int:=30; +re_int int; +begin +autonomous_pkg_setup.count_public = autonomous_pkg_setup.count_public + va; +raise info 'in out_015,autonomous_pkg_setup.count_public:%', autonomous_pkg_setup.count_public; +end; +/ +create or replace procedure app015_1() +is +declare PRAGMA AUTONOMOUS_TRANSACTION; +begin +out_015(1); +end; +/ +call app015_1(); +INFO: in out_015,autonomous_pkg_setup.count_public:31 +CONTEXT: SQL statement "CALL out_015(1)" +PL/pgSQL function app015_1() line 4 at PERFORM +referenced column: app015_1 + + app015_1 +---------- + +(1 row) + +call app015_1(); +INFO: in out_015,autonomous_pkg_setup.count_public:61 +CONTEXT: SQL statement "CALL out_015(1)" +PL/pgSQL function app015_1() line 4 at PERFORM +referenced column: app015_1 + + app015_1 +---------- + +(1 row) + +drop procedure app015_1; +drop procedure out_015; +drop package autonomous_pkg_setup; +-- 10. package var same name with function param +create or replace package pck1 IS +va int:=1; +procedure p1(va int,vb int); +end pck1; +/ +create or replace package body pck1 as +vb int :=1; +procedure p1(va int,vb int) +is +declare +PRAGMA AUTONOMOUS_TRANSACTION; +begin +va:=pck1.va+va; +pck1.vb:=pck1.vb+vb; +raise info 'in p1, va : %', va; +raise info 'in p1, vb : %', vb; +raise info 'in p1, pck1.va : %', pck1.va; +raise info 'in p1, pck1.vb : %', pck1.vb; +end; +begin +va := 2; +vb := 2; +end pck1; +/ +call pck1.p1(10,20); +INFO: in p1, va : 12 +CONTEXT: referenced column: p1 + +INFO: in p1, vb : 20 +CONTEXT: referenced column: p1 + +INFO: in p1, pck1.va : 2 +CONTEXT: referenced column: p1 + +INFO: in p1, pck1.vb : 22 +CONTEXT: referenced column: p1 + + p1 +---- + +(1 row) + +call pck1.p1(10,20); +INFO: in p1, va : 12 +CONTEXT: referenced column: p1 + +INFO: in p1, vb : 20 +CONTEXT: referenced column: p1 + +INFO: in p1, pck1.va : 2 +CONTEXT: referenced column: p1 + +INFO: in p1, pck1.vb : 42 +CONTEXT: referenced column: p1 + + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to function public.p1(integer,integer) diff --git a/src/test/regress/expected/bypass_simplequery_support.out b/src/test/regress/expected/bypass_simplequery_support.out index b976c7e22..f5bd4a9b9 100755 --- a/src/test/regress/expected/bypass_simplequery_support.out +++ b/src/test/regress/expected/bypass_simplequery_support.out @@ -827,6 +827,11 @@ select * from test_bypass_sq1 where col1>0 and col2>0 order by col1 limit 3 offs 3 | 3 | (3 rows) +select * from test_bypass_sq1 where col1>0 and col2>0 order by col1 limit 3 offset 30; + col1 | col2 | col3 +------+------+------ +(0 rows) + explain select * from test_bypass_sq1 where col1>0 order by col1 for update limit 3 offset 3; QUERY PLAN -------------------------------------------------------------------------------------------------------- @@ -847,6 +852,16 @@ explain select * from test_bypass_sq1 where col1>0 order by col1 for update lim Index Cond: (col1 > 0) (5 rows) +explain select * from test_bypass_sq1 where col1>0 order by col1 for update limit 3 offset 30; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + [Bypass] + Limit (cost=4.24..4.66 rows=3 width=46) + -> LockRows (cost=0.00..54.95 rows=389 width=46) + -> Index Scan using itest_bypass_sq1 on test_bypass_sq1 (cost=0.00..51.06 rows=389 width=46) + Index Cond: (col1 > 0) +(5 rows) + explain select * from test_bypass_sq1 where col1>0 and col2>0 order by col1 offset 3; QUERY PLAN -------------------------------------------------------------------------------------------------- @@ -865,6 +880,11 @@ select * from test_bypass_sq1 where col1>0 and col2>0 order by col1 offset 3; 3 | 3 | test_insert3 (4 rows) +select * from test_bypass_sq1 where col1>0 order by col1 for update limit 3 offset 30; + col1 | col2 | col3 +------+------+------ +(0 rows) + explain select * from test_bypass_sq1 where col1>0 order by col1 for update offset 3; QUERY PLAN -------------------------------------------------------------------------------------------------------- @@ -1886,6 +1906,13 @@ revoke update on test_bypass_sq7 from qps; revoke select on test_bypass_sq7 from qps; DROP OWNED BY qps; DROP ROLE qps; +-- test rule do nothing +create table test(a int); +create view v_test as select * from test; +CREATE OR REPLACE RULE v_delete as ON DELETE TO v_test DO INSTEAD NOTHING; +delete from v_test; +drop table test cascade; +NOTICE: drop cascades to view v_test -- end reset track_activities; set track_sql_count = off; diff --git a/src/test/regress/expected/cast_privileges_test.out b/src/test/regress/expected/cast_privileges_test.out index 369aa98dd..c24139625 100644 --- a/src/test/regress/expected/cast_privileges_test.out +++ b/src/test/regress/expected/cast_privileges_test.out @@ -1,7 +1,5 @@ create user user1 password '1234567i*'; grant all on schema public to user1; -create schema privilege_test; -grant all on schema privilege_test to user1; set role user1 password '1234567i*'; CREATE TYPE public.int111 AS (f1 int, f2 int); CREATE TYPE public.text111 AS (f1 text, f2 text); @@ -9,7 +7,7 @@ create table public.aa_int(aa int111); create table public.bb_text(bb text111); insert into public.aa_int values((111,222)); insert into public.bb_text values((111,222)); -CREATE OR REPLACE FUNCTION privilege_test.text_int(text111)RETURNS int111 AS $$ +CREATE OR REPLACE FUNCTION public.text_int(text111)RETURNS int111 AS $$ declare res public.int111; begin @@ -17,22 +15,18 @@ begin res:=($1.f1::int,$1.f2::int); return res; end;$$ language plpgsql security invoker; -select privilege_test.text_int((111,222)); -ERROR: Permission denied. -CONTEXT: SQL statement "alter USER user1 with sysadmin" -PL/pgSQL function privilege_test.text_int(text111) line 5 at SQL statement -referenced column: text_int -CREATE CAST (text111 AS int111) WITH FUNCTION privilege_test.text_int(text111) AS IMPLICIT; +ERROR: permission denied to create function "text_int" +HINT: must be sysadmin to create a function in public schema. +select public.text_int((111,222)); +ERROR: function public.text_int(record) does not exist +LINE 1: select public.text_int((111,222)); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +CONTEXT: referenced column: text_int +CREATE CAST (text111 AS int111) WITH FUNCTION public.text_int(text111) AS IMPLICIT; +ERROR: function public.text_int(text111) does not exist reset role; select aa ,bb from aa_int ,bb_text where aa_int.aa=bb_text.bb::int111; -ERROR: Permission denied. -CONTEXT: SQL statement "alter USER user1 with sysadmin" -PL/pgSQL function privilege_test.text_int(text111) line 5 at SQL statement -drop table aa_int; -drop table bb_text; -drop type int111 cascade; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to function privilege_test.text_int(text111) -drop cascades to cast from text111 to int111 -drop type text111 cascade; -drop user user1 cascade; +ERROR: cannot cast type text111 to int111 +LINE 1: ...bb from aa_int ,bb_text where aa_int.aa=bb_text.bb::int111; + ^ diff --git a/src/test/regress/expected/ce_alter_add_drop_column.out b/src/test/regress/expected/ce_alter_add_drop_column.out new file mode 100644 index 000000000..ec3a877fb --- /dev/null +++ b/src/test/regress/expected/ce_alter_add_drop_column.out @@ -0,0 +1,75 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +\! gs_ktool -g +GENERATE +2 +DROP CLIENT MASTER KEY IF EXISTS ImgCMK1_sm4 CASCADE; +NOTICE: client master key "imgcmk1_sm4" does not exist +DROP CLIENT MASTER KEY IF EXISTS ImgCMK_sm4 CASCADE; +NOTICE: client master key "imgcmk_sm4" does not exist +CREATE CLIENT MASTER KEY ImgCMK1_sm4 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = SM4); +CREATE CLIENT MASTER KEY ImgCMK_sm4 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/2" , ALGORITHM = SM4); +CREATE COLUMN ENCRYPTION KEY ImgCEK1_sm4 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK1_sm4, ALGORITHM = SM4_sm3); +CREATE COLUMN ENCRYPTION KEY ImgCEK_sm4 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK_sm4, ALGORITHM = SM4_sm3); +-- 创建目标表products和源表newproducts,并插入数据 +drop table IF EXISTS products; +NOTICE: table "products" does not exist, skipping +CREATE TABLE products +( +product_id INTEGER, +product_name VARCHAR2(60) encrypted with (column_encryption_key = ImgCEK_sm4, encryption_type = DETERMINISTIC), +category VARCHAR2(60) +); +INSERT INTO products VALUES (15011, 'vivitar 35mm', 'electrncs'); +INSERT INTO products VALUES (15021, 'olympus is50', 'electrncs'); +INSERT INTO products VALUES (16001, 'play gym', 'toys'); +INSERT INTO products VALUES (16011, 'lamaze', 'toys'); +ALTER TABLE products drop COLUMN product_name; +ALTER TABLE products drop COLUMN category; +ALTER TABLE products ADD COLUMN product_name VARCHAR2(60) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = ImgCEK_sm4, ENCRYPTION_TYPE = DETERMINISTIC) ; +ALTER TABLE products ADD COLUMN category VARCHAR2(60) ; +ALTER TABLE products drop COLUMN product_name; +ALTER TABLE products ADD COLUMN product_name VARCHAR2(60) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = ImgCEK_sm4, ENCRYPTION_TYPE = DETERMINISTIC) ; +ALTER TABLE products ADD COLUMN product_name_2 text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = ImgCEK_sm4, ENCRYPTION_TYPE = DETERMINISTIC) ; +\d products + Table "public.products" + Column | Type | Modifiers +----------------+-----------------------+------------ + product_id | integer | + category | character varying(60) | + product_name | character varying | encrypted + product_name_2 | text | encrypted + + +INSERT INTO products VALUES (175011, 'vivitar 35mm', 'electrncs', 'car'); +INSERT INTO products VALUES (17021, 'olympus is50', 'electrncs', 'shoe'); +INSERT INTO products VALUES (18001, 'play gym', 'toys', 'book'); +INSERT INTO products VALUES (18011, 'lamaze', 'toys', 'computer'); +INSERT INTO products VALUES (18661, 'harry potter', 'dvd', 'cup'); +SELECT * FROM products ORDER BY product_id; + product_id | category | product_name | product_name_2 +------------+--------------+--------------+---------------- + 15011 | | | + 15021 | | | + 16001 | | | + 16011 | | | + 17021 | olympus is50 | electrncs | shoe + 18001 | play gym | toys | book + 18011 | lamaze | toys | computer + 18661 | harry potter | dvd | cup + 175011 | vivitar 35mm | electrncs | car +(9 rows) + + +drop table IF EXISTS products; +DROP CLIENT MASTER KEY IF EXISTS ImgCMK1_sm4 CASCADE; +NOTICE: drop cascades to column encryption key: imgcek1_sm4 +DROP CLIENT MASTER KEY IF EXISTS ImgCMK_sm4 CASCADE; +NOTICE: drop cascades to column encryption key: imgcek_sm4 +\! gs_ktool -d all +DELETE ALL + 1 2 diff --git a/src/test/regress/expected/ce_alteruser.out b/src/test/regress/expected/ce_alteruser.out new file mode 100644 index 000000000..cf3748980 --- /dev/null +++ b/src/test/regress/expected/ce_alteruser.out @@ -0,0 +1,38 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +DROP CLIENT MASTER KEY IF EXISTS CMK1 CASCADE; +NOTICE: client master key "cmk1" does not exist +DROP USER IF EXISTS ce_user1; +NOTICE: role "ce_user1" does not exist, skipping +CREATE USER ce_user1 PASSWORD 'gauss@123'; +SET SESSION AUTHORIZATION ce_user1 PASSWORD 'gauss@123'; +CREATE CLIENT MASTER KEY CMK1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY CEK1 WITH VALUES (CLIENT_MASTER_KEY = CMK1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_alteruser (c1 int, c2 text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = CEK1, ENCRYPTION_TYPE = DETERMINISTIC)); +INSERT INTO t_alteruser VALUES (1,'1'), (2,'2'), (3,'3'), (4,'4'), (5,'5'); +RESET SESSION AUTHORIZATION; +ALTER USER ce_user1 PASSWORD 'gauss@1234' EXPIRED; +ALTER USER ce_user1 identified by 'gauss@1235' replace 'gauss@1234'; +--?.* + c1 | c2 +----+---- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 +(5 rows) + +SET SESSION AUTHORIZATION ce_user1 PASSWORD 'gauss@1235'; +DROP TABLE t_alteruser; +DROP COLUMN ENCRYPTION KEY CEK1; +DROP CLIENT MASTER KEY CMK1; +RESET SESSION AUTHORIZATION; +DROP USER IF EXISTS ce_user1; +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/ce_cmk_cek_test.out b/src/test/regress/expected/ce_cmk_cek_test.out index e2bad50f6..d0109f6aa 100755 --- a/src/test/regress/expected/ce_cmk_cek_test.out +++ b/src/test/regress/expected/ce_cmk_cek_test.out @@ -29,23 +29,31 @@ select count(*), 'count' from gs_column_keys; CREATE CLIENT MASTER KEY ImgCMK WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); -- fail duplicate key CREATE CLIENT MASTER KEY ImgCMK WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2" , ALGORITHM = AES_256_CBC); -ERROR: duplicate key value violates unique constraint "gs_client_global_keys_name_index" -DETAIL: Key (global_key_name, key_namespace)=(imgcmk, 2200) already exists. +ERROR: client master key "imgcmk" already exists -- fail didn't support RSA_2048 algorithm CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2" , ALGORITHM = RSA_2048); -ERROR(CLIENT): unsupported client master key algorithm +ERROR(CLIENT): unpported algorithm 'rsa_2048', gs_ktool only support: AES_256_CBC SM4 +-- fail invalid algorithm +CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2" , ALGORITHM = AES_256_CBC_1); +ERROR(CLIENT): unpported algorithm 'aes_256_cbc_1', gs_ktool only support: AES_256_CBC SM4 -- fail ALGORITHM is missing or invalid CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2"); -ERROR(CLIENT): unsupported client master key algorithm +ERROR(CLIENT): failed to create client master key, failed to find arg: ALGORITHM. -- fail KEY_PATHis missing or invalid CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktool, ALGORITHM = AES_256_CBC); -ERROR(CLIENT): invalid key path +ERROR(CLIENT): failed to create client master key, failed to find arg: KEY_PATH. +-- fail KEY_PATHis invalid +CREATE CLIENT MASTER KEY ImgCMK WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/" , ALGORITHM = AES_256_CBC); +ERROR(CLIENT): invalid key path: 'gs_ktool/', it should be like "gs_ktool/1". -- fail KEY_STORE is missing or invalid CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_PATH = "gs_ktool/2", ALGORITHM = AES_256_CBC); -ERROR(CLIENT): invalid key store +ERROR(CLIENT): failed to create client master key, failed to find arg: KEY_STORE. -- fail duplicate KEY_PATHargs CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2", KEY_PATH = "gs_ktool/3", ALGORITHM = AES_256_CBC); ERROR(CLIENT): duplicate keyPath args +-- fail invalid KEY_STORE +CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktoolgs_ktoolgs_ktool, KEY_PATH = "gs_ktool/3", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): key store type 'gs_ktoolgs_ktoolgs_ktool' is not supported. -- fail duplicate KEY_STORE args CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktool, KEY_STORE = kmc, KEY_PATH = "gs_ktool/2", ALGORITHM = AES_256_CBC); ERROR(CLIENT): duplicate keyStore args @@ -54,27 +62,41 @@ CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktoo ERROR(CLIENT): duplicate algorithm args -- case create CEK - success CREATE COLUMN ENCRYPTION KEY ImgCEK WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +-- case create CEK - success +CREATE COLUMN ENCRYPTION KEY ImgCEK128 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); +-- case create CEK - fail +CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256_1); +ERROR(CLIENT): invalid column encryption key algorithm. +-- case create CEK - fail +CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256_1); +ERROR(CLIENT): invalid column encryption key algorithm. CREATE COLUMN ENCRYPTION KEY ImgCEK1 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE='abcdefghijklmnopqrstuvwxyz12'); --fail encryption key too short CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE='abcdefghijklmnopqrstuvwxyz1'); ERROR(CLIENT): encryption key too short +--sucess +CREATE COLUMN ENCRYPTION KEY ImgCEK_256 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, +ENCRYPTED_VALUE='1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111'); +--fail encryption key too long +CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, +ENCRYPTED_VALUE='1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111'); +ERROR(CLIENT): encryption key too long --fail object does not exist CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); ERROR(CLIENT): failed to get client master key imgcmk2 from cache -- fail duplicate key value CREATE COLUMN ENCRYPTION KEY ImgCEK WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); -ERROR: duplicate key value violates unique constraint "gs_column_keys_name_index" -DETAIL: Key (column_key_name, key_namespace)=(imgcek, 2200) already exists. +ERROR: column encryption key "imgcek" already exists --fail ImgCMK1 dose not exist CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); ERROR(CLIENT): failed to get client master key imgcmk1 from cache -- fail didn't support AES_128_CBC algorithm CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AES_128_CBC); ERROR(CLIENT): invalid column encryption key algorithm. --- fail syntax error parsing cek creation query +-- fail invalid algorithm CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK); ERROR(CLIENT): invalid column encryption key algorithm. --- fail syntax error parsing cek creation query +-- fail invalid algorithm CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); ERROR(CLIENT): failed to get client master key from cache select count(*), 'count' from gs_client_global_keys; @@ -86,7 +108,7 @@ select count(*), 'count' from gs_client_global_keys; select count(*), 'count' from gs_column_keys; count | ?column? -------+---------- - 2 | count + 4 | count (1 row) --cek dose not exist @@ -94,14 +116,16 @@ CREATE TABLE account(user_id INT, username VARCHAR (50) ENCRYPTED WITH (COLUMN_ ); ERROR(CLIENT):error while trying to retrieve column encryption key from cache DROP CLIENT MASTER KEY ImgCMK CASCADE; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 4 other objects DETAIL: drop cascades to column encryption key: imgcek +drop cascades to column encryption key: imgcek128 drop cascades to column encryption key: imgcek1 +drop cascades to column encryption key: imgcek_256 --failed CREATE CLIENT MASTER KEY test_sm2_cmk WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/4" , ALGORITHM = SM3); -ERROR(CLIENT): unsupported client master key algorithm +ERROR(CLIENT): unpported algorithm 'sm3', gs_ktool only support: AES_256_CBC SM4 CREATE CLIENT MASTER KEY test_sm2_cmk WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/4" , ALGORITHM = SM2); -ERROR(CLIENT): unsupported client master key algorithm +ERROR(CLIENT): unpported algorithm 'sm2', gs_ktool only support: AES_256_CBC SM4 --success CREATE CLIENT MASTER KEY test_sm2_cmk WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/4" , ALGORITHM = SM4); CREATE COLUMN ENCRYPTION KEY sm4_cek WITH VALUES (CLIENT_MASTER_KEY = test_sm2_cmk, ALGORITHM = sm4_sm34); diff --git a/src/test/regress/expected/ce_cmk_cek_test.sql.out.diff b/src/test/regress/expected/ce_cmk_cek_test.sql.out.diff new file mode 100644 index 000000000..f1fd59a06 --- /dev/null +++ b/src/test/regress/expected/ce_cmk_cek_test.sql.out.diff @@ -0,0 +1,2 @@ +diff: /data1/xjx/workspace/GaussDB_Kernel_TRUNK/Code/src/*/test/regress/expected/ce_cmk_cek_test.sql.out: No such file or directory +diff: /data1/xjx/workspace/GaussDB_Kernel_TRUNK/Code/src/distribute/test/regress/results/ce_cmk_cek_test.sql.out: No such file or directory diff --git a/src/test/regress/expected/ce_column_constraint.out b/src/test/regress/expected/ce_column_constraint.out index 3234b99b3..b31599b7d 100644 --- a/src/test/regress/expected/ce_column_constraint.out +++ b/src/test/regress/expected/ce_column_constraint.out @@ -231,7 +231,7 @@ CREATE TABLE IF NOT EXISTS check_age_null_1 (id int, age int ENCRYPTED WITH (CO insert into check_age_null_1 values(1, 2); insert into check_age_null_1 values(2, NULL); ERROR: new row for relation "check_age_null_1" violates check constraint "check_age_null_1_age_check" -DETAIL: Failing row contains (2, null). +DETAIL: N/A select * from check_age_null_1 order by id; id | age ----+----- @@ -242,7 +242,7 @@ CREATE TABLE IF NOT EXISTS check_age_null_2 (id int, age int ENCRYPTED WITH (CO insert into check_age_null_2 values(1, 2); insert into check_age_null_2 values(2, NULL); ERROR: new row for relation "check_age_null_2" violates check constraint "check_age_null_2_age_check" -DETAIL: Failing row contains (2, null). +DETAIL: N/A select * from check_age_null_2 order by id; id | age ----+----- @@ -260,7 +260,7 @@ CREATE TABLE IF NOT EXISTS check_randomage_null_1 (id int, age int ENCRYPTED WI insert into check_randomage_null_1 values(1, 2); insert into check_randomage_null_1 values(2, NULL); ERROR: new row for relation "check_randomage_null_1" violates check constraint "check_randomage_null_1_age_check" -DETAIL: Failing row contains (2, null). +DETAIL: N/A select * from check_randomage_null_1 order by id; id | age ----+----- @@ -271,7 +271,7 @@ CREATE TABLE IF NOT EXISTS check_randomage_null_2 (id int, age int ENCRYPTED WI insert into check_randomage_null_2 values(1, 2); insert into check_randomage_null_2 values(2, NULL); ERROR: new row for relation "check_randomage_null_2" violates check constraint "check_randomage_null_2_age_check" -DETAIL: Failing row contains (2, null). +DETAIL: N/A select * from check_randomage_null_2 order by id; id | age ----+----- diff --git a/src/test/regress/expected/ce_copy_options.out b/src/test/regress/expected/ce_copy_options.out new file mode 100644 index 000000000..d8077d582 --- /dev/null +++ b/src/test/regress/expected/ce_copy_options.out @@ -0,0 +1,414 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +DROP CLIENT MASTER KEY IF EXISTS copyCMK CASCADE; +NOTICE: client master key "copycmk" does not exist +CREATE CLIENT MASTER KEY copyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +-- test AEAD_AES_128_CBC_HMAC_SHA256 +CREATE COLUMN ENCRYPTION KEY copyCEK1 WITH VALUES (CLIENT_MASTER_KEY = copyCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY copyCEK2 WITH VALUES (CLIENT_MASTER_KEY = copyCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY copyCEK3 WITH VALUES (CLIENT_MASTER_KEY = copyCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS CopyTbl( + i0 INT, + i1 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = copyCEK1, ENCRYPTION_TYPE = DETERMINISTIC), + i2 TEXT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = copyCEK2, ENCRYPTION_TYPE = DETERMINISTIC), + i3 TEXT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = copyCEK3, ENCRYPTION_TYPE = DETERMINISTIC) default 'stuff' + ); +-- 1 check copy from +-- 1.a check copy the whole table +-- missing data: should fail +COPY CopyTbl FROM stdin; +ERROR: missing data for column "i1" +CONTEXT: COPY copytbl, line 1: "" +-- check \N +copy CopyTbl from stdin; +--1.b check copy part of the table +COPY CopyTbl (i0, i1,i2) FROM stdin; +copy CopyTbl(i0,i2) from stdin; +-- should fail: non-existent column in column list +copy CopyTbl(col2) from stdin; +ERROR: column "col2" of relation "copytbl" does not exist +-- should fail: too many columns in column list +copy CopyTbl(i0,i1,i2,i3,i1,i3) from stdin; +ERROR: column "i1" specified more than once +SELECT * FROM CopyTbl ORDER BY i0; + i0 | i1 | i2 | i3 +------+----+----+------- + 1 | | \N | NN + 10 | 10 | 7 | stuff + 11 | 20 | 8 | stuff + 1001 | | 12 | stuff +(4 rows) + +--3 check options +--3.a format +COPY CopyTbl from stdin(FORMAT CSV); +COPY CopyTbl from stdin(FORMAT TEXT); +--3.b oids DO NOT SUPPORT oids +-- should fail: table "CopyTbl" does not have OIDs +COPY CopyTbl from stdin WITH OIDS; +ERROR: table "copytbl" does not have OIDs +--3.c option:delimiter +copy CopyTbl from stdin with delimiter ','; +--should fail +copy CopyTbl from stdin with delimiter 'a'; +ERROR: delimiter "a" cannot contain any characters in"\.abcdefghijklmnopqrstuvwxyz0123456789" +--should fail +copy CopyTbl from stdin with delimiter E'\r'; +ERROR: COPY delimiter cannot be newline or carriage return +--should fail: delimiter must be no more than 10 bytes +copy CopyTbl from stdin with delimiter '|,%^&*@#$%%^||||'; +ERROR: COPY delimiter must be less than 10 bytes +--3.d option:null force not null +COPY CopyTbl from stdin WITH NULL AS ''; +--should fail +COPY CopyTbl from stdin WITH NULL AS E'\r'; +ERROR: COPY null representation cannot use newline or carriage return +--should fail +COPY CopyTbl from stdin WITH delimiter ',' NULL ','; +ERROR: COPY delimiter must not appear in the NULL specification +--should fail +COPY CopyTbl from stdin WITH CSV quote ',' NULL ','; +ERROR: delimiter cannot contain quote character +-- force not null only available in csv mode and copy from +-- ? no use +COPY CopyTbl from stdin WITH CSV FORCE NOT NULL i2; +COPY CopyTbl from stdin (FORMAT CSV, FORCE_NOT_NULL(i2)); +--3.e option:quote force_quote +COPY CopyTbl TO stdout WITH csv; +1,,\N,NN +10,10,7,stuff +11,20,8,stuff +1001,,12,stuff +3000,1,2,3 +1002,1,2,3 +1006,,2,3 +1,2,3,4 +1,2,3,4 +COPY CopyTbl TO stdout WITH csv quote '''' delimiter '|'; +1||\N|NN +10|10|7|stuff +11|20|8|stuff +1001||12|stuff +3000|1|2|3 +1002|1|2|3 +1006||2|3 +1|2|3|4 +1|2|3|4 +COPY CopyTbl TO stdout WITH CSV FORCE QUOTE i3; +1,,\N,"NN" +10,10,7,"stuff" +11,20,8,"stuff" +1001,,12,"stuff" +3000,1,2,"3" +1002,1,2,"3" +1006,,2,"3" +1,2,3,"4" +1,2,3,"4" +COPY CopyTbl TO stdout WITH CSV FORCE QUOTE *; +"1",,"\N","NN" +"10","10","7","stuff" +"11","20","8","stuff" +"1001",,"12","stuff" +"3000","1","2","3" +"1002","1","2","3" +"1006",,"2","3" +"1","2","3","4" +"1","2","3","4" +--3.f escape +--fail to decrypt +-- COPY CopyTbl TO stdout (FORMAT CSV, ESCAPE E'\\'); +--3.g option: eol +-- fail +-- COPY CopyTbl from stdin WITH EOL 'EOL_CRNL'; +-- COPY CopyTbl from stdin WITH EOL 'EOL_CR'; +-- COPY CopyTbl from stdin WITH EOL 'EOL_NL'; +--3.h ignore extra data +copy CopyTbl from stdin with delimiter '|' ignore_extra_data; +--3.h encoding +COPY CopyTbl to stdout WITH DELIMITER AS ',' ENCODING 'utf8'; +1,\N,\\N,NN +10,10,7,stuff +11,20,8,stuff +1001,\N,12,stuff +3000,1,2,3 +1002,1,2,3 +1006,\N,2,3 +1,2,3,4 +1,2,3,4 +1,2,3,4 +COPY CopyTbl to stdout WITH DELIMITER AS ',' ENCODING 'sql_ascii'; +1,\N,\\N,NN +10,10,7,stuff +11,20,8,stuff +1001,\N,12,stuff +3000,1,2,3 +1002,1,2,3 +1006,\N,2,3 +1,2,3,4 +1,2,3,4 +1,2,3,4 +--4 check copy out +COPY CopyTbl TO stdout WITH CSV; +1,,\N,NN +10,10,7,stuff +11,20,8,stuff +1001,,12,stuff +3000,1,2,3 +1002,1,2,3 +1006,,2,3 +1,2,3,4 +1,2,3,4 +1,2,3,4 +COPY CopyTbl TO stdout WITH CSV QUOTE '''' DELIMITER '|'; +1||\N|NN +10|10|7|stuff +11|20|8|stuff +1001||12|stuff +3000|1|2|3 +1002|1|2|3 +1006||2|3 +1|2|3|4 +1|2|3|4 +1|2|3|4 +COPY CopyTbl TO stdout WITH CSV FORCE QUOTE *; +"1",,"\N","NN" +"10","10","7","stuff" +"11","20","8","stuff" +"1001",,"12","stuff" +"3000","1","2","3" +"1002","1","2","3" +"1006",,"2","3" +"1","2","3","4" +"1","2","3","4" +"1","2","3","4" +COPY CopyTbl TO stdout WITH CSV FORCE QUOTE i2 ENCODING 'sql_ascii'; +1,,"\N",NN +10,10,"7",stuff +11,20,"8",stuff +1001,,"12",stuff +3000,1,"2",3 +1002,1,"2",3 +1006,,"2",3 +1,2,"3",4 +1,2,"3",4 +1,2,"3",4 +-- Repeat above tests with new 9.0 option syntax +COPY CopyTbl TO stdout (FORMAT CSV); +1,,\N,NN +10,10,7,stuff +11,20,8,stuff +1001,,12,stuff +3000,1,2,3 +1002,1,2,3 +1006,,2,3 +1,2,3,4 +1,2,3,4 +1,2,3,4 +COPY CopyTbl TO stdout (FORMAT TEXT); +1 \N \\N NN +10 10 7 stuff +11 20 8 stuff +1001 \N 12 stuff +3000 1 2 3 +1002 1 2 3 +1006 \N 2 3 +1 2 3 4 +1 2 3 4 +1 2 3 4 +COPY CopyTbl TO stdout (FORMAT CSV, QUOTE '''', DELIMITER '|'); +1||\N|NN +10|10|7|stuff +11|20|8|stuff +1001||12|stuff +3000|1|2|3 +1002|1|2|3 +1006||2|3 +1|2|3|4 +1|2|3|4 +1|2|3|4 +COPY CopyTbl TO stdout (FORMAT CSV, FORCE_QUOTE *); +"1",,"\N","NN" +"10","10","7","stuff" +"11","20","8","stuff" +"1001",,"12","stuff" +"3000","1","2","3" +"1002","1","2","3" +"1006",,"2","3" +"1","2","3","4" +"1","2","3","4" +"1","2","3","4" +COPY CopyTbl TO stdout (FORMAT CSV, FORCE_QUOTE(i2),ENCODING 'sql_ascii'); +1,,"\N",NN +10,10,"7",stuff +11,20,"8",stuff +1001,,"12",stuff +3000,1,"2",3 +1002,1,"2",3 +1006,,"2",3 +1,2,"3",4 +1,2,"3",4 +1,2,"3",4 +-- Repeat above tests with \copy +\copy CopyTbl TO stdout (FORMAT CSV); +1,,\N,NN +10,10,7,stuff +11,20,8,stuff +1001,,12,stuff +3000,1,2,3 +1002,1,2,3 +1006,,2,3 +1,2,3,4 +1,2,3,4 +1,2,3,4 +\copy CopyTbl TO stdout (FORMAT TEXT); +1 \N \\N NN +10 10 7 stuff +11 20 8 stuff +1001 \N 12 stuff +3000 1 2 3 +1002 1 2 3 +1006 \N 2 3 +1 2 3 4 +1 2 3 4 +1 2 3 4 +\copy CopyTbl TO stdout (FORMAT CSV, QUOTE '''', DELIMITER '|'); +1||\N|NN +10|10|7|stuff +11|20|8|stuff +1001||12|stuff +3000|1|2|3 +1002|1|2|3 +1006||2|3 +1|2|3|4 +1|2|3|4 +1|2|3|4 +\copy CopyTbl TO stdout (FORMAT CSV, FORCE_QUOTE *); +"1",,"\N","NN" +"10","10","7","stuff" +"11","20","8","stuff" +"1001",,"12","stuff" +"3000","1","2","3" +"1002","1","2","3" +"1006",,"2","3" +"1","2","3","4" +"1","2","3","4" +"1","2","3","4" +\copy CopyTbl TO stdout (FORMAT CSV, FORCE_QUOTE(i2),ENCODING 'sql_ascii'); +1,,"\N",NN +10,10,"7",stuff +11,20,"8",stuff +1001,,"12",stuff +3000,1,"2",3 +1002,1,"2",3 +1006,,"2",3 +1,2,"3",4 +1,2,"3",4 +1,2,"3",4 +-- test end of copy marker +CREATE COLUMN ENCRYPTION KEY copyCEK4 WITH VALUES (CLIENT_MASTER_KEY = copyCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); +create table test_eoc( + a int, + b text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = copyCEK4, ENCRYPTION_TYPE = DETERMINISTIC) +); +copy test_eoc from stdin csv; +select * from test_eoc order by a; + a | b +---+------ + 1 | a\. + 2 | \.b + 3 | c\.d + 4 | \. +(4 rows) + +--5 check copy select +CREATE COLUMN ENCRYPTION KEY copyCEK5 WITH VALUES (CLIENT_MASTER_KEY = copyCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); +create table test_select( + a int, + b text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = copyCEK5, ENCRYPTION_TYPE = DETERMINISTIC) +); +insert into test_select values (1, 'a'); +insert into test_select values (2, 'b'); +insert into test_select values (3, 'c'); +insert into test_select values (4, 'd'); +insert into test_select values (5, 'e'); +CREATE COLUMN ENCRYPTION KEY copyCEK6 WITH VALUES (CLIENT_MASTER_KEY = copyCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); +create table test_select_2( + a int, + b text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = copyCEK6, ENCRYPTION_TYPE = DETERMINISTIC) +); +insert into test_select_2 values (1, 'A'); +insert into test_select_2 values (2, 'B'); +insert into test_select_2 values (3, 'C'); +insert into test_select_2 values (4, 'D'); +insert into test_select_2 values (5, 'E'); +--6. test COPY select table TO +--a. test COPY (select) TO +copy (select * from test_select order by 1) to stdout; +1 a +2 b +3 c +4 d +5 e +copy (select * from test_select order by 1) to stdout; +1 a +2 b +3 c +4 d +5 e +copy (select b from test_select where a=1) to stdout; +a +--b. test COPY (select for update) TO +copy (select b from test_select where a=3 for update) to stdout; +c +-- should fail +copy (select * from test_select) from stdin; +ERROR: syntax error at or near "from" +LINE 1: copy (select * from test_select) from stdin; + ^ +-- should fail +copy (select * from test_select) (a,b) to stdout; +ERROR: syntax error at or near "(" +LINE 1: copy (select * from test_select) (a,b) to stdout; + ^ +--c.test join +copy (select * from test_select join test_select_2 using (a) order by 1) to stdout; +1 a A +2 b B +3 c C +4 d D +5 e E +--d. Test subselect +copy (select * from (select b from test_select where a = 1)) to stdout; +a +--e. test headers, CSV and quotes +copy (select b from test_select where a = 1) to stdout csv header force quote b; +b +"a" +--f. test psql builtins, plain table +\copy (select * from test_select order by 1) to stdout; +1 a +2 b +3 c +4 d +5 e +-- fail to decrypt +-- \copy (select "a",'a','a""'||b,(a + 1)*a,b,"test_select"."b" from test_select where a=3) to stdout; +DROP TABLE CopyTbl; +DROP TABLE test_eoc; +DROP TABLE test_select; +DROP TABLE test_select_2; +DROP CLIENT MASTER KEY copyCMK CASCADE; +NOTICE: drop cascades to 6 other objects +DETAIL: drop cascades to column encryption key: copycek1 +drop cascades to column encryption key: copycek2 +drop cascades to column encryption key: copycek3 +drop cascades to column encryption key: copycek4 +drop cascades to column encryption key: copycek5 +drop cascades to column encryption key: copycek6 +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/ce_crt_cek.out b/src/test/regress/expected/ce_crt_cek.out new file mode 100644 index 000000000..c478dcd3e --- /dev/null +++ b/src/test/regress/expected/ce_crt_cek.out @@ -0,0 +1,178 @@ +------------------------------------------------------------------------------------------------------------------------- +-- grop : security +-- module : client encrypt +-- +-- function : test {sql:CREATE CEK} +-- CREATE COLUMN ENCRYPTION KEY WITH VALUES (CLIENT_MASTER_KEY = $cmk, ALGORITHM = $algo, ENCRYPTED_VALUE = "$value"); +-- +-- dependency : +-- service : Huawei KMS (https://console.huaweicloud.com/dew/?region=cn-north-4#/kms/keyList/customKey) +-- cmk : CREATE CLIENT MASTER KEY $cmk WITH (KEY_STORE = huawei_kms, ...) +------------------------------------------------------------------------------------------------------------------------- +-- prepare | succeed +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk3 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk4 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd", ALGORITHM = AES_256); +-- create cek | succeed +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek3 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek4 WITH VALUES (CLIENT_MASTER_KEY = cmk4, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = '0123456789abcdef0123456789abcdef'); +-- drop cek | succeed +DROP COLUMN ENCRYPTION KEY cek1; +DROP COLUMN ENCRYPTION KEY cek2; +DROP COLUMN ENCRYPTION KEY cek3; +DROP COLUMN ENCRYPTION KEY cek4; +-- create after drop cek | succeed +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +DROP COLUMN ENCRYPTION KEY cek1; +DROP COLUMN ENCRYPTION KEY cek2; +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +-- prepare | succeed +DROP COLUMN ENCRYPTION KEY cek1; +DROP COLUMN ENCRYPTION KEY cek2; +-- create cek | invalud cek object name | error +CREATE COLUMN ENCRYPTION KEY WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +ERROR: syntax error at or near "WITH VALUES" +LINE 1: CREATE COLUMN ENCRYPTION KEY WITH VALUES (CLIENT_MASTER_KEY ... + ^ +CREATE COLUMN ENCRYPTION KEY . WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +ERROR: syntax error at or near "." +LINE 1: CREATE COLUMN ENCRYPTION KEY . WITH VALUES (CLIENT_MASTER_KE... + ^ +CREATE COLUMN ENCRYPTION KEY ecek 1 WITH VALUES (CLIENT_MASTER_KEY = cmk3, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +ERROR: syntax error at or near "1" +LINE 1: CREATE COLUMN ENCRYPTION KEY ecek 1 WITH VALUES (CLIENT_MAST... + ^ +CREATE COLUMN ENCRYPTION KEY ecek ecek WITH VALUES (CLIENT_MASTER_KEY = cmk4, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +ERROR: syntax error at or near "ecek" +LINE 1: CREATE COLUMN ENCRYPTION KEY ecek ecek WITH VALUES (CLIENT_M... + ^ +CREATE COLUMN ENCRYPTION KEY 啊 WITH VALUES (CLIENT_MASTER_KEY = cmk4, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +ERROR: invalid name +-- create cek | loss args | errr +CREATE COLUMN ENCRYPTION KEY ecek1 WITH VALUES (); +ERROR: syntax error at or near ")" +LINE 1: CREATE COLUMN ENCRYPTION KEY ecek1 WITH VALUES (); + ^ +CREATE COLUMN ENCRYPTION KEY ecek2 WITH VALUES (CLIENT_MASTER_KEY = cmk1); +ERROR(CLIENT): invalid column encryption key algorithm. +CREATE COLUMN ENCRYPTION KEY ecek3 WITH VALUES (ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +ERROR(CLIENT): failed to get client master key from cache +CREATE COLUMN ENCRYPTION KEY ecek4 WITH VALUES (CLIENT_MASTER_KEY = cmk1, CLIENT_MASTER_KEY = cmk1); +ERROR(CLIENT): duplicate master key args +CREATE COLUMN ENCRYPTION KEY ecek5 WITH VALUES (ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +ERROR(CLIENT): duplicate CEK algorithm args +CREATE COLUMN ENCRYPTION KEY ecek6 WITH VALUES (CLIENT_MASTER_KEY = cmk1, CLIENT_MASTER_KEY = cmk1, CLIENT_MASTER_KEY = cmk1, CLIENT_MASTER_KEY = cmk1, CLIENT_MASTER_KEY = cmk1, CLIENT_MASTER_KEY = cmk1); +ERROR(CLIENT): duplicate master key args +-- create cek | reduant args | error +CREATE COLUMN ENCRYPTION KEY ecek20 WITH VALUES (CLIENT_MASTER_KEY = cmk1, CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +ERROR(CLIENT): duplicate master key args +CREATE COLUMN ENCRYPTION KEY ecek21 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +ERROR(CLIENT): duplicate CEK algorithm args +CREATE COLUMN ENCRYPTION KEY ecek22 WITH VALUES (CLIENT_MASTER_KEY = cmk3, CLIENT_MASTER_KEY = cmk3, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = "123456789abcdef0123456"); +ERROR: syntax error at or near ""123456789abcdef0123456"" +LINE 1: ...= AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = "123456789... + ^ +CREATE COLUMN ENCRYPTION KEY ecek23 WITH VALUES (CLIENT_MASTER_KEY = cmk4, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = "123456789abcdef0123456"); +ERROR: syntax error at or near ""123456789abcdef0123456"" +LINE 1: ...= AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = "123456789... + ^ +-- create cek | invalid args | error +CREATE COLUMN ENCRYPTION KEY ecek40 WITH VALUES (CLIENT_MASTER_KEY = cmk5, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +ERROR(CLIENT): failed to get client master key cmk5 from cache +-- +CREATE COLUMN ENCRYPTION KEY ecek50 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = ""); +ERROR: zero-length delimited identifier at or near """" +LINE 1: ...RITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = ""); + ^ +CREATE COLUMN ENCRYPTION KEY ecek51 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = "0123456789abcdef0"); +ERROR: syntax error at or near ""0123456789abcdef0"" +LINE 1: ...= AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = "012345678... + ^ +CREATE COLUMN ENCRYPTION KEY ecek52 WITH VALUES (CLIENT_MASTER_KEY = cmk3, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = "0123456789abcdef0123456789abcdef01"); +ERROR: syntax error at or near ""0123456789abcdef0123456789abcdef01"" +LINE 1: ...= AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = "012345678... + ^ +CREATE COLUMN ENCRYPTION KEY ecek53 WITH VALUES (CLIENT_MASTER_KEY = cmk4, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"); +NOTICE: identifier "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" will be truncated to "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcde" +ERROR: syntax error at or near ""0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"" +LINE 1: ...= AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = "012345678... + ^ +-- clear | succeed +DROP CLIENT MASTER KEY cmk1 CASCADE; +DROP CLIENT MASTER KEY cmk2 CASCADE; +DROP CLIENT MASTER KEY cmk3 CASCADE; +DROP CLIENT MASTER KEY cmk4 CASCADE; +SELECT * FROM gs_column_keys; + column_key_name | column_key_distributed_id | global_key_id | key_namespace | key_owner | create_date | key_acl +-----------------+---------------------------+---------------+---------------+-----------+-------------+--------- +(0 rows) + +SELECT * FROM gs_client_global_keys; + global_key_name | key_namespace | key_owner | key_acl | create_date +-----------------+---------------+-----------+---------+------------- +(0 rows) + +------------------------------------------------------------------------------------------------------------- +-- dependency : +-- tool : gs_ktool (sorce code: src/bin/gs_ktool) +------------------------------------------------------------------------------------------------------------- +-- prepare | succeed +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g +GENERATE +1 +GENERATE +2 +GENERATE +3 +GENERATE +4 +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk3 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/3", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk4 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/4", ALGORITHM = AES_256_CBC); +-- create cek | succeed +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek3 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek4 WITH VALUES (CLIENT_MASTER_KEY = cmk4, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = '0123456789abcdef0123456789abcdef'); +-- drop cek | succeed +DROP COLUMN ENCRYPTION KEY cek1; +DROP COLUMN ENCRYPTION KEY cek2; +DROP COLUMN ENCRYPTION KEY cek3; +DROP COLUMN ENCRYPTION KEY cek4; +-- create after drop cek | succeed +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +DROP COLUMN ENCRYPTION KEY cek1; +DROP COLUMN ENCRYPTION KEY cek2; +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +-- clear | succeed +DROP CLIENT MASTER KEY cmk1 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to column encryption key: cek1 +drop cascades to column encryption key: cek2 +DROP CLIENT MASTER KEY cmk2 CASCADE; +DROP CLIENT MASTER KEY cmk3 CASCADE; +DROP CLIENT MASTER KEY cmk4 CASCADE; +SELECT * FROM gs_column_keys; + column_key_name | column_key_distributed_id | global_key_id | key_namespace | key_owner | create_date | key_acl +-----------------+---------------------------+---------------+---------------+-----------+-------------+--------- +(0 rows) + +SELECT * FROM gs_client_global_keys; + global_key_name | key_namespace | key_owner | key_acl | create_date +-----------------+---------------+-----------+---------+------------- +(0 rows) + +\! gs_ktool -d all +DELETE ALL + 1 2 3 4 diff --git a/src/test/regress/expected/ce_crt_cmk_by_gskt.out b/src/test/regress/expected/ce_crt_cmk_by_gskt.out new file mode 100644 index 000000000..4521a1dde --- /dev/null +++ b/src/test/regress/expected/ce_crt_cmk_by_gskt.out @@ -0,0 +1,289 @@ +------------------------------------------------------------------------------------------------------------------------- +-- grop : security +-- module : client encrypt +-- +-- function : test {sql:CREATE CEK} +-- CREATE CLIENT MASTER KEY $cmk WITH (KEY_STORE = $key_store, KEY_PATH = "$key_id" , ALGORITHM = $algo); +-- +-- dependency : +-- tool : gs_ktool (sorce code: src/bin/gs_ktool) +------------------------------------------------------------------------------------------------------------------------- +-- prepare | succeed +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g +GENERATE +1 +GENERATE +2 +GENERATE +3 +GENERATE +4 +GENERATE +5 +-- create cmk | succeed +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2" , ALGORITHM = SM4); +CREATE CLIENT MASTER KEY cmk5 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5" , ALGORITHM = AES_256_CBC); +-- drop cmk | succeed +DROP CLIENT MASTER KEY cmk1; +DROP CLIENT MASTER KEY cmk2; +-- create after drop cmk | succeed +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2", ALGORITHM = SM4); +DROP CLIENT MASTER KEY cmk1; +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/4", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk4 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBC); +-- prepare | succeed +\! gs_ktool -d all +DELETE ALL + 1 2 3 4 5 +DROP CLIENT MASTER KEY cmk1; +DROP CLIENT MASTER KEY cmk2; +DROP CLIENT MASTER KEY cmk4; +DROP CLIENT MASTER KEY cmk5; +\! gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g +GENERATE +1 +GENERATE +2 +GENERATE +3 +GENERATE +4 +GENERATE +5 +GENERATE +6 +-- in word "ecmk", 'e' means 'error' +-- create cmk | invalid cmk object name | error +CREATE CLIENT MASTER KEY ecmk 1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +ERROR: syntax error at or near "1" +LINE 1: CREATE CLIENT MASTER KEY ecmk 1 WITH (KEY_STORE = gs_ktool, ... + ^ +CREATE CLIENT MASTER KEY ecmk 1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +ERROR: syntax error at or near "1" +LINE 1: CREATE CLIENT MASTER KEY ecmk 1 WITH (KEY_STORE = gs_ktool, ... + ^ +CREATE CLIENT MASTER KEY ecmk ecmk WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +ERROR: syntax error at or near "ecmk" +LINE 1: CREATE CLIENT MASTER KEY ecmk ecmk WITH (KEY_STORE = gs_ktoo... + ^ +CREATE CLIENT MASTER KEY "ecmk" ecmk WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +ERROR: syntax error at or near "ecmk" +LINE 1: CREATE CLIENT MASTER KEY "ecmk" ecmk WITH (KEY_STORE = gs_kt... + ^ +CREATE CLIENT MASTER KEY . WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +ERROR: syntax error at or near "." +LINE 1: CREATE CLIENT MASTER KEY . WITH (KEY_STORE = gs_ktool, KEY_P... + ^ +CREATE CLIENT MASTER KEY 你 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +ERROR: invalid name +-- create cmk | loss args | error +CREATE CLIENT MASTER KEY ecmk1 WITH (KEY_STORE = gs_ktool); +ERROR(CLIENT): failed to create client master key, failed to find arg: KEY_PATH. +CREATE CLIENT MASTER KEY ecmk2 WITH (KEY_PATH = "gs_ktool/1"); +ERROR(CLIENT): failed to create client master key, failed to find arg: KEY_STORE. +CREATE CLIENT MASTER KEY ecmk3 WITH (ALGORITHM = AES_256_CBC); +ERROR(CLIENT): failed to create client master key, failed to find arg: KEY_STORE. +CREATE CLIENT MASTER KEY ecmk4 WITH (KEY_PATH = "gs_ktool/2", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): failed to create client master key, failed to find arg: KEY_STORE. +CREATE CLIENT MASTER KEY ecmk5 WITH (KEY_STORE = gs_ktool, ALGORITHM = AES_256_CBC); +ERROR(CLIENT): failed to create client master key, failed to find arg: KEY_PATH. +CREATE CLIENT MASTER KEY ecmk6 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/3"); +ERROR(CLIENT): failed to create client master key, failed to find arg: ALGORITHM. +CREATE CLIENT MASTER KEY ecmk7 WITH (KEY_PATH = "gs_ktool/4", KEY_PATH = "gs_ktool/4", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): duplicate keyPath args +CREATE CLIENT MASTER KEY ecmk8 WITH (KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, ALGORITHM = AES_256_CBC); +ERROR(CLIENT): duplicate keyStore args +CREATE CLIENT MASTER KEY ecmk9 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5", KEY_PATH = "gs_ktool/5"); +ERROR(CLIENT): duplicate keyPath args +CREATE CLIENT MASTER KEY ecmk10 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5", KEY_PATH = "gs_ktool/6"); +ERROR(CLIENT): duplicate keyPath args +CREATE CLIENT MASTER KEY ecmk11 WITH (KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool); +ERROR(CLIENT): duplicate keyStore args +CREATE CLIENT MASTER KEY ecmk12 WITH (KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool); +ERROR(CLIENT): duplicate keyStore args +-- create cmk | redundant args | error +CREATE CLIENT MASTER KEY ecmk20 WITH (KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): duplicate keyStore args +CREATE CLIENT MASTER KEY ecmk21 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2", KEY_PATH = "gs_ktool/2", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): duplicate keyPath args +CREATE CLIENT MASTER KEY ecmk22 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/3", KEY_PATH = "gs_ktool/4", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): duplicate keyPath args +CREATE CLIENT MASTER KEY ecmk23 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5", ALGORITHM = AES_256_CBC, ALGORITHM = AES_256_CBC); +ERROR(CLIENT): duplicate algorithm args +CREATE CLIENT MASTER KEY ecmk24 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5", ALGORITHM = AES_256_CBC, ALGORITHM = AES_256_CBC, ALGORITHM = AES_256_CBC, ALGORITHM = AES_256_CBC); +ERROR(CLIENT): duplicate algorithm args +CREATE CLIENT MASTER KEY ecmk25 WITH (KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBC, ALGORITHM = AES_256_CBC); +ERROR(CLIENT): duplicate keyStore args +-- create cmk | invalid args | error +CREATE CLIENT MASTER KEY ecmk40 WITH (KEY_STORE = , KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBC); +ERROR: syntax error at or near "," +LINE 1: CREATE CLIENT MASTER KEY ecmk40 WITH (KEY_STORE = , KEY_PATH... + ^ +CREATE CLIENT MASTER KEY ecmk41 WITH (KEY_STORE = gs, KEY_PATH = "gs_ktool/2", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): key store type 'gs' is not supported. +CREATE CLIENT MASTER KEY ecmk42 WITH (KEY_STORE = gs_ktooll, KEY_PATH = "gs_ktool/3", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): key store type 'gs_ktooll' is not supported. +CREATE CLIENT MASTER KEY ecmk43 WITH (KEY_STORE = gs_ktoal, KEY_PATH = "gs_ktool/4", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): key store type 'gs_ktoal' is not supported. +CREATE CLIENT MASTER KEY ecmk44 WITH (KEY_STORE = "gs_ktoal", KEY_PATH = "gs_ktool/4", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): key store type 'gs_ktoal' is not supported. +CREATE CLIENT MASTER KEY ecmk45 WITH (KEY_STORE = gs_ktoolllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll1111111111111111111111111111111111, KEY_PATH = "gs_ktool/5", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): key store type 'gs_ktoollllllllllllllllllllllllllllllllllllllllllllllllllllllll' is not supported. +CREATE CLIENT MASTER KEY ecmk46 WITH (KEY_STORE = 很, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): key store type '很' is not supported. +CREATE CLIENT MASTER KEY ecmk47 WITH (KEY_STORE = ,, KEY_PATH = "gs_ktool/2", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): key store type ',' is not supported. +-- -- +CREATE CLIENT MASTER KEY ecmk60 WITH (KEY_STORE = gs_ktool, KEY_PATH = , ALGORITHM = AES_256_CBC); +ERROR: syntax error at or near "," +LINE 1: ...KEY ecmk60 WITH (KEY_STORE = gs_ktool, KEY_PATH = , ALGORITH... + ^ +CREATE CLIENT MASTER KEY ecmk61 WITH (KEY_STORE = gs_ktool, KEY_PATH = "g", ALGORITHM = ); +ERROR: syntax error at or near ")" +LINE 1: ...1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "g", ALGORITHM = ); + ^ +CREATE CLIENT MASTER KEY ecmk62 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktoo/1", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): invalid key path: 'gs_ktoo/1', it should be like "gs_ktool/1". +CREATE CLIENT MASTER KEY ecmk63 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): invalid key path: 'gs_ktool', it should be like "gs_ktool/1". +CREATE CLIENT MASTER KEY ecmk64 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): invalid key path: 'gs_ktool/', it should be like "gs_ktool/1". +CREATE CLIENT MASTER KEY ecmk65 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktooll/1", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): invalid key path: 'gs_ktooll/1', it should be like "gs_ktool/1". +CREATE CLIENT MASTER KEY ecmk66 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktoal/2", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): invalid key path: 'gs_ktoal/2', it should be like "gs_ktool/1". +CREATE CLIENT MASTER KEY ecmk67 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool3", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): invalid key path: 'gs_ktool3', it should be like "gs_ktool/1". +CREATE CLIENT MASTER KEY ecmk68 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool//4", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): invalid key path: 'gs_ktool//4', '/4' is expected to be an integer. +CREATE CLIENT MASTER KEY ecmk69 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/\", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): invalid key path: 'gs_ktool/\', '\' is expected to be an integer. +CREATE CLIENT MASTER KEY ecmk70 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5.", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): invalid key path: 'gs_ktool/5.', '5.' is expected to be an integer. +CREATE CLIENT MASTER KEY ecmk71 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/.", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): invalid key path: 'gs_ktool/.', '.' is expected to be an integer. +CREATE CLIENT MASTER KEY ecmk72 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/6/", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): invalid key path: 'gs_ktool/6/', '6/' is expected to be an integer. +CREATE CLIENT MASTER KEY ecmk73 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/闲", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): invalid key path: 'gs_ktool/闲', '闲' is expected to be an integer. +CREATE CLIENT MASTER KEY ecmk74 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktoolllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll/1", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): invalid key path: 'gs_ktoollllllllllllllllllllllllllllllllllllllllllllllllllllllll', it should be like "gs_ktool/1". +CREATE CLIENT MASTER KEY ecmk75 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555", ALGORITHM = AES_256_CBC); +ERROR: failed to select cmk len, cmk id: -1. +ERROR(CLIENT): failed to read cmk from gs_ktool, key id: -1. +-- -- +CREATE CLIENT MASTER KEY ecmk80 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM =); +ERROR: syntax error at or near ")" +LINE 1: ...KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM =); + ^ +CREATE CLIENT MASTER KEY ecmk81 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES); +ERROR(CLIENT): unpported algorithm 'aes', gs_ktool only support: AES_256_CBC SM4 +CREATE CLIENT MASTER KEY ecmk82 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256); +ERROR(CLIENT): unpported algorithm 'aes_256', gs_ktool only support: AES_256_CBC SM4 +CREATE CLIENT MASTER KEY ecmk83 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CFB); +ERROR(CLIENT): unpported algorithm 'aes_256_cfb', gs_ktool only support: AES_256_CBC SM4 +CREATE CLIENT MASTER KEY ecmk84 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_128_CBC); +ERROR(CLIENT): unpported algorithm 'aes_128_cbc', gs_ktool only support: AES_256_CBC SM4 +CREATE CLIENT MASTER KEY ecmk85 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = RSA_2048); +ERROR(CLIENT): unpported algorithm 'rsa_2048', gs_ktool only support: AES_256_CBC SM4 +CREATE CLIENT MASTER KEY ecmk86 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = RSA_3072); +ERROR(CLIENT): unpported algorithm 'rsa_3072', gs_ktool only support: AES_256_CBC SM4 +CREATE CLIENT MASTER KEY ecmk87 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBCB); +ERROR(CLIENT): unpported algorithm 'aes_256_cbcb', gs_ktool only support: AES_256_CBC SM4 +CREATE CLIENT MASTER KEY ecmk88 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = "AES_256_CBC\0"); +ERROR(CLIENT): unpported algorithm 'AES_256_CBC\0', gs_ktool only support: AES_256_CBC SM4 +CREATE CLIENT MASTER KEY ecmk89 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = .); +ERROR: syntax error at or near "." +LINE 1: ...Y_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = .); + ^ +CREATE CLIENT MASTER KEY ecmk90 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC); +ERROR(CLIENT): unpported algorithm 'aes_256_cbccccccccccccccccccccccccccccccccccccccccccccccccccccc', gs_ktool only support: AES_256_CBC SM4 +CREATE CLIENT MASTER KEY ecmk91 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = 的); +ERROR(CLIENT): unpported algorithm '的', gs_ktool only support: AES_256_CBC SM4 +-- create cmk | invalid keys | error +CREATE CLIENT MASTER KEY ecmk100 WITH (gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +ERROR: syntax error at or near "gs_ktool" +LINE 1: CREATE CLIENT MASTER KEY ecmk100 WITH (gs_ktool, KEY_PATH = ... + ^ +CREATE CLIENT MASTER KEY ecmk101 WITH (KEY_STOR = gs_ktool, KEY_PATH = "gs_ktool/2" , ALGORITHM = AES_256_CBC); +ERROR: syntax error at or near "KEY_STOR" +LINE 1: CREATE CLIENT MASTER KEY ecmk101 WITH (KEY_STOR = gs_ktool, ... + ^ +CREATE CLIENT MASTER KEY ecmk102 WITH (KEY_STORE = gs_ktool, KEY = "gs_ktool/3" , ALGORITHM = AES_256_CBC); +ERROR: syntax error at or near "KEY" +LINE 1: ...NT MASTER KEY ecmk102 WITH (KEY_STORE = gs_ktool, KEY = "gs_... + ^ +CREATE CLIENT MASTER KEY ecmk103 WITH (KEY_STORE = gs_ktool, KEY_PATHH = "gs_ktool/4" , ALGORITHM = AES_256_CBC); +ERROR: syntax error at or near "KEY_PATHH" +LINE 1: ...NT MASTER KEY ecmk103 WITH (KEY_STORE = gs_ktool, KEY_PATHH ... + ^ +CREATE CLIENT MASTER KEY ecmk104 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5" , ALGORITHMA = AES_256_CBC); +ERROR: syntax error at or near "ALGORITHMA" +LINE 1: ... (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5" , ALGORITHMA... + ^ +CREATE CLIENT MASTER KEY ecmk105 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/6" , = AES_256_CBC); +ERROR: syntax error at or near "=" +LINE 1: ... (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/6" , = AES_256_... + ^ +CREATE CLIENT MASTER KEY ecmk106 WITH (KEY_STORE = gs_ktool, 吗 = "gs_ktool/1" , = AES_256_CBC); +ERROR: syntax error at or near "吗" +LINE 1: ...NT MASTER KEY ecmk106 WITH (KEY_STORE = gs_ktool, 吗 = "gs_k... + ^ +CREATE CLIENT MASTER KEY ecmk107 WITH (KEY_STOR = gs_ktool, KEY_STOR = "gs_ktool/2" , ALGORITHM = AES_256_CBC); +ERROR: syntax error at or near "KEY_STOR" +LINE 1: CREATE CLIENT MASTER KEY ecmk107 WITH (KEY_STOR = gs_ktool, ... + ^ +CREATE CLIENT MASTER KEY ecmk108 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC, YES = 1); +ERROR: syntax error at or near "YES" +LINE 1: ...KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC, YES = 1); + ^ +-- prepare | succeed +\! gs_ktool -d all +DELETE ALL + 1 2 3 4 5 6 +\! gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g +GENERATE +1 +GENERATE +2 +GENERATE +3 +GENERATE +4 +GENERATE +5 +GENERATE +6 +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk3 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/3" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk4 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/4" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk5 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5" , ALGORITHM = AES_256_CBC); +-- create cmk | unserviceable args | error +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/6" , ALGORITHM = AES_256_CBC); +ERROR: duplicate key value violates unique constraint "gs_client_global_keys_name_index" +DETAIL: Key (global_key_name, key_namespace)=(cmk1, 2200) already exists. +CREATE CLIENT MASTER KEY cmk6 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +ERROR(CLIENT): key store and key path are already in use by another object +CREATE CLIENT MASTER KEY cmk10 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/10" , ALGORITHM = AES_256_CBC); +ERROR: failed to select cmk len, cmk id: 10. +ERROR(CLIENT): failed to read cmk from gs_ktool, key id: 10. +-- clear | succeed +\! gs_ktool -d all +DELETE ALL + 1 2 3 4 5 6 +DROP CLIENT MASTER KEY cmk1; +DROP CLIENT MASTER KEY cmk2; +DROP CLIENT MASTER KEY cmk3; +DROP CLIENT MASTER KEY cmk4; +DROP CLIENT MASTER KEY cmk5; +SELECT * FROM gs_client_global_keys; + global_key_name | key_namespace | key_owner | key_acl | create_date +-----------------+---------------+-----------+---------+------------- +(0 rows) + diff --git a/src/test/regress/expected/ce_crt_cmk_by_hwkms.out b/src/test/regress/expected/ce_crt_cmk_by_hwkms.out new file mode 100644 index 000000000..92674029b --- /dev/null +++ b/src/test/regress/expected/ce_crt_cmk_by_hwkms.out @@ -0,0 +1,164 @@ +------------------------------------------------------------------------------------------------------------------------- +-- grop : security +-- module : client encrypt +-- +-- function : test {sql:CREATE CEK} +-- CREATE CLIENT MASTER KEY $cmk WITH (KEY_STORE = $key_store, KEY_PATH = "$key_id" , ALGORITHM = $algo); +-- +-- dependency : +-- service : Huawei KMS (https://console.huaweicloud.com/dew/?region=cn-north-4#/kms/keyList/customKey) +------------------------------------------------------------------------------------------------------------------------- +-- prepare | generate keys in Huawei KMS website | succeed +-- cec162c2-983d-4a66-8532-c67b915fb409 | ok +-- 31938a5e-6460-49ce-a358-886f46c6f643 | ok +-- d6107fb0-fa39-4ae5-ae84-019066ce9073 | ok +-- 3be6f4e0-80bf-4209-8ba2-13cdd303f1bd | ok +-- 43e7df16-afdc-4883-97c3-1bc7686ffc2f | to be deleted +-- f1d088d8-3b48-4ca6-bcf1-d77496e1aba3 | unservice +-- create cmk | succeeed +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409" , ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643" , ALGORITHM = AES_256); +-- drop cmk | succeed +DROP CLIENT MASTER KEY cmk1; +DROP CLIENT MASTER KEY cmk2; +-- create after drop cmk | succeed +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073" , ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd" , ALGORITHM = AES_256); +DROP CLIENT MASTER KEY cmk1; +DROP CLIENT MASTER KEY cmk2; +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073" , ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk3 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd" , ALGORITHM = AES_256); +-- prepare | succeed +DROP CLIENT MASTER KEY cmk1; +DROP CLIENT MASTER KEY cmk3; +-- in the word "ecmk", 'e' means 'error' +-- create cmk | loss args | error +CREATE CLIENT MASTER KEY ecmk1 WITH (KEY_STORE = huawei_kms); +ERROR(CLIENT): failed to create client master key, failed to find arg: KEY_PATH. +CREATE CLIENT MASTER KEY ecmk2 WITH (KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409"); +ERROR(CLIENT): failed to create client master key, failed to find arg: KEY_STORE. +CREATE CLIENT MASTER KEY ecmk3 WITH (ALGORITHM = AES_256); +ERROR(CLIENT): failed to create client master key, failed to find arg: KEY_STORE. +CREATE CLIENT MASTER KEY ecmk4 WITH (KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = AES_256); +ERROR(CLIENT): failed to create client master key, failed to find arg: KEY_STORE. +CREATE CLIENT MASTER KEY ecmk5 WITH (KEY_STORE = huawei_kms, ALGORITHM = AES_256); +ERROR(CLIENT): failed to create client master key, failed to find arg: KEY_PATH. +CREATE CLIENT MASTER KEY ecmk6 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073"); +ERROR(CLIENT): failed to create client master key, failed to find arg: ALGORITHM. +CREATE CLIENT MASTER KEY ecmk7 WITH (KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd", KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd", ALGORITHM = AES_256); +ERROR(CLIENT): duplicate keyPath args +CREATE CLIENT MASTER KEY ecmk8 WITH (KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, ALGORITHM = AES_256); +ERROR(CLIENT): duplicate keyStore args +CREATE CLIENT MASTER KEY ecmk9 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409"); +ERROR(CLIENT): duplicate keyPath args +CREATE CLIENT MASTER KEY ecmk10 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073"); +ERROR(CLIENT): duplicate keyPath args +CREATE CLIENT MASTER KEY ecmk11 WITH (KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms); +ERROR(CLIENT): duplicate keyStore args +CREATE CLIENT MASTER KEY ecmk12 WITH (KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms); +ERROR(CLIENT): duplicate keyStore args +-- create cmk | redundant args | error +CREATE CLIENT MASTER KEY ecmk20 WITH (KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_256); +ERROR(CLIENT): duplicate keyStore args +CREATE CLIENT MASTER KEY ecmk21 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = AES_256); +ERROR(CLIENT): duplicate keyPath args +CREATE CLIENT MASTER KEY ecmk22 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073", KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_256); +ERROR(CLIENT): duplicate keyPath args +CREATE CLIENT MASTER KEY ecmk23 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = AES_256, ALGORITHM = AES_256); +ERROR(CLIENT): duplicate algorithm args +CREATE CLIENT MASTER KEY ecmk24 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = AES_256, ALGORITHM = AES_256, ALGORITHM = AES_256, ALGORITHM = AES_256); +ERROR(CLIENT): duplicate algorithm args +CREATE CLIENT MASTER KEY ecmk25 WITH (KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_256, ALGORITHM = AES_256); +ERROR(CLIENT): duplicate keyStore args +-- create cmk | invalid args | error +CREATE CLIENT MASTER KEY ecmk60 WITH (KEY_STORE = huawei_kms, KEY_PATH = , ALGORITHM = AES_256); +ERROR: syntax error at or near "," +LINE 1: ...Y ecmk60 WITH (KEY_STORE = huawei_kms, KEY_PATH = , ALGORITH... + ^ +CREATE CLIENT MASTER KEY ecmk61 WITH (KEY_STORE = huawei_kms, KEY_PATH = "c", ALGORITHM = ); +ERROR: syntax error at or near ")" +LINE 1: ...WITH (KEY_STORE = huawei_kms, KEY_PATH = "c", ALGORITHM = ); + ^ +CREATE CLIENT MASTER KEY ecmk62 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb40", ALGORITHM = AES_256); +ERROR(CLIENT): the length of cmk id is invalid. +CREATE CLIENT MASTER KEY ecmk63 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2983d4a668532c67b915fb409", ALGORITHM = AES_256); +ERROR(CLIENT): the length of cmk id is invalid. +CREATE CLIENT MASTER KEY ecmk64 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2_983d_4a66_8532_c67b915fb409", ALGORITHM = AES_256); +ERROR(CLIENT): kms server error. http status code: 400, kms server error message : {"error":{"error_msg":"Invalid key_id.","error_code":"KMS.0205"}} + +. +CREATE CLIENT MASTER KEY ecmk65 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2_983d_4a66_8532_c67b915fb4099", ALGORITHM = AES_256); +ERROR(CLIENT): the length of cmk id is invalid. +CREATE CLIENT MASTER KEY ecmk66 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2_983d_4a66_8532_c67b915fb409cec162c2_983d_4a66_8532_c67b915fb409", ALGORITHM = AES_256); +ERROR(CLIENT): the length of cmk id is invalid. +CREATE CLIENT MASTER KEY ecmk68 WITH (KEY_STORE = huawei_kms, KEY_PATH = "z1938a5e-6460-49ce-a358-886f46c6f64", ALGORITHM = AES_256); +ERROR(CLIENT): the length of cmk id is invalid. +CREATE CLIENT MASTER KEY ecmk69 WITH (KEY_STORE = huawei_kms, KEY_PATH = ".1938a5e-6460-49ce-a358-886f46c6f64", ALGORITHM = AES_256); +ERROR(CLIENT): the length of cmk id is invalid. +CREATE CLIENT MASTER KEY ecmk70 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bz", ALGORITHM = AES_256); +ERROR(CLIENT): kms server error. http status code: 404, kms server error message : {"error":{"error_msg":"Nonexistent key.","error_code":"KMS.0207"}} + +. +CREATE CLIENT MASTER KEY ecmk73 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1b闲", ALGORITHM = AES_256); +ERROR(CLIENT): the length of cmk id is invalid. +CREATE CLIENT MASTER KEY ecmk74 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40", ALGORITHM = AES_256); +ERROR(CLIENT): the length of cmk id is invalid. +-- +CREATE CLIENT MASTER KEY ecmk80 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM =); +ERROR: syntax error at or near ")" +LINE 1: ...PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM =); + ^ +CREATE CLIENT MASTER KEY ecmk81 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = AES); +ERROR(CLIENT): unpported algorithm 'aes', huawei kms only support: AES_256 +CREATE CLIENT MASTER KEY ecmk82 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073", ALGORITHM = AES_256_CBC); +ERROR(CLIENT): unpported algorithm 'aes_256_cbc', huawei kms only support: AES_256 +CREATE CLIENT MASTER KEY ecmk83 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd", ALGORITHM = AES_256_CFB); +ERROR(CLIENT): unpported algorithm 'aes_256_cfb', huawei kms only support: AES_256 +CREATE CLIENT MASTER KEY ecmk84 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_128_CBC); +ERROR(CLIENT): unpported algorithm 'aes_128_cbc', huawei kms only support: AES_256 +CREATE CLIENT MASTER KEY ecmk85 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = RSA_2048); +ERROR(CLIENT): unpported algorithm 'rsa_2048', huawei kms only support: AES_256 +CREATE CLIENT MASTER KEY ecmk86 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073", ALGORITHM = RSA_3072); +ERROR(CLIENT): unpported algorithm 'rsa_3072', huawei kms only support: AES_256 +CREATE CLIENT MASTER KEY ecmk87 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd", ALGORITHM = AES_257); +ERROR(CLIENT): unpported algorithm 'aes_257', huawei kms only support: AES_256 +CREATE CLIENT MASTER KEY ecmk88 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = "AES_256_"); +ERROR(CLIENT): unpported algorithm 'AES_256_', huawei kms only support: AES_256 +CREATE CLIENT MASTER KEY ecmk89 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = .); +ERROR: syntax error at or near "." +LINE 1: ...TH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = .); + ^ +CREATE CLIENT MASTER KEY ecmk90 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073", ALGORITHM = AES_25666666666666666666666666666666666666666666666666666); +ERROR(CLIENT): unpported algorithm 'aes_25666666666666666666666666666666666666666666666666666', huawei kms only support: AES_256 +CREATE CLIENT MASTER KEY ecmk91 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd", ALGORITHM = 嘿); +ERROR(CLIENT): unpported algorithm '嘿', huawei kms only support: AES_256 +-- create cmk | invalid keys | error +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "43e7df16-afdc-4883-97c3-1bc7686ffc2f", ALGORITHM = AES_256); +ERROR(CLIENT): cmk entity '43e7df16-afdc-4883-97c3-1bc7686ffc2f' is already scheduled to be deleted, please use another cmk entity. +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "f1d088d8-3b48-4ca6-bcf1-d77496e1aba3", ALGORITHM = AES_256); +ERROR(CLIENT): cmk entity 'f1d088d8-3b48-4ca6-bcf1-d77496e1aba3' is unavailable. +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "aad088d8-3b48-4ca6-bcf1-d77496e1ab55", ALGORITHM = AES_256); +ERROR(CLIENT): kms server error. http status code: 404, kms server error message : {"error":{"error_msg":"Nonexistent key.","error_code":"KMS.0207"}} + +. +-- prepare | succeed +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk3 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073", ALGORITHM = AES_256); +-- create cmk | unserviceable args | error +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd", ALGORITHM = AES_256); +ERROR: duplicate key value violates unique constraint "gs_client_global_keys_name_index" +DETAIL: Key (global_key_name, key_namespace)=(cmk1, 2200) already exists. +CREATE CLIENT MASTER KEY cmk4 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_256); +ERROR(CLIENT): key store and key path are already in use by another object +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_256); +ERROR(CLIENT): key store and key path are already in use by another object +-- clear | succeed +DROP CLIENT MASTER KEY cmk1; +DROP CLIENT MASTER KEY cmk2; +DROP CLIENT MASTER KEY cmk3; +SELECT * FROM gs_client_global_keys; + global_key_name | key_namespace | key_owner | key_acl | create_date +-----------------+---------------+-----------+---------+------------- +(0 rows) + diff --git a/src/test/regress/expected/ce_crt_tbl.out b/src/test/regress/expected/ce_crt_tbl.out new file mode 100644 index 000000000..cecc38e1e --- /dev/null +++ b/src/test/regress/expected/ce_crt_tbl.out @@ -0,0 +1,92 @@ +------------------------------------------------------------------------------------------------------------------------- +-- grop : security +-- module : client encrypt +-- +-- function : test {sql:CREATE/INSERT/UPDATE/DELETE/SELECT TABLE} +-- CREATE TABLE $tbl ($col $dat_type ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = $cek, ENCRYPTION_TYPE = $enc_type)); +-- +-- dependency : +-- service : Huawei KMS (https://console.huaweicloud.com/dew/?region=cn-north-4#/kms/keyList/customKey) +-- cmk : CREATE CLIENT MASTER KEY $cmk WITH (KEY_STORE = huawei_kms, ...) +-- cek : CREATE COLUMN ENCRYPTION KEY $cek ... +------------------------------------------------------------------------------------------------------------------------- +-- prepare | succeed +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409" , ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643" , ALGORITHM = AES_256); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek3 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +-- create table | succeed +CREATE TABLE IF NOT EXISTS tbl1 ( + col1 INT, + col2 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek1, ENCRYPTION_TYPE = DETERMINISTIC), + col3 TEXT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek2, ENCRYPTION_TYPE = DETERMINISTIC), + col4 VARCHAR(20) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek2, ENCRYPTION_TYPE = DETERMINISTIC)); +-- insert | succeed +INSERT INTO tbl1 VALUES (1, 1, 'row1 col3', 'row1 col4'); +INSERT INTO tbl1 VALUES (2, 11111, 'row2 col3', 'row2 col4'); +INSERT INTO tbl1 VALUES (3, 11111111, 'row3 col3', 'row3 col4'); +-- update | succeed +UPDATE tbl1 SET col2 = 22222 WHERE col1=1; +UPDATE tbl1 SET col3 = 'new row2 col3' WHERE col1=2; +UPDATE tbl1 SET col4 = 'new row3 col4' WHERE col1=3; +-- select | succeed +SELECT * FROM tbl1 ORDER BY col1; + col1 | col2 | col3 | col4 +------+----------+---------------+--------------- + 1 | 22222 | row1 col3 | row1 col4 + 2 | 11111 | new row2 col3 | row2 col4 + 3 | 11111111 | row3 col3 | new row3 col4 +(3 rows) + +SELECT * FROM tbl1 WHERE col2 = 1; + col1 | col2 | col3 | col4 +------+------+------+------ +(0 rows) + +SELECT * FROM tbl1 WHERE col3 = 'new row2 col3'; + col1 | col2 | col3 | col4 +------+-------+---------------+----------- + 2 | 11111 | new row2 col3 | row2 col4 +(1 row) + +SELECT * FROM tbl1 WHERE col4 = 'new row3 col4' AND col1 = 3; + col1 | col2 | col3 | col4 +------+----------+-----------+--------------- + 3 | 11111111 | row3 col3 | new row3 col4 +(1 row) + +SELECT * FROM tbl1 WHERE col3 = 'row1 col3' AND col4 = 'row1 col4'; + col1 | col2 | col3 | col4 +------+-------+-----------+----------- + 1 | 22222 | row1 col3 | row1 col4 +(1 row) + +-- delete | succeed +DELETE FROM tbl1 WHERE col2=22222; +DELETE FROM tbl1 WHERE col3='new row2 col3'; +DELETE FROM tbl1 WHERE col4='row3 col4'; +-- clear | succeed +SELECT * FROM tbl1; + col1 | col2 | col3 | col4 +------+----------+-----------+--------------- + 3 | 11111111 | row3 col3 | new row3 col4 +(1 row) + +DROP TABLE tbl1; +DROP CLIENT MASTER KEY cmk1 CASCADE; +NOTICE: drop cascades to column encryption key: cek1 +DROP CLIENT MASTER KEY cmk2 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to column encryption key: cek2 +drop cascades to column encryption key: cek3 +SELECT * FROM gs_column_keys; + column_key_name | column_key_distributed_id | global_key_id | key_namespace | key_owner | create_date | key_acl +-----------------+---------------------------+---------------+---------------+-----------+-------------+--------- +(0 rows) + +SELECT * FROM gs_client_global_keys; + global_key_name | key_namespace | key_owner | key_acl | create_date +-----------------+---------------+-----------+---------+------------- +(0 rows) + diff --git a/src/test/regress/expected/ce_crt_tbl_as.out b/src/test/regress/expected/ce_crt_tbl_as.out new file mode 100644 index 000000000..cd4a91536 --- /dev/null +++ b/src/test/regress/expected/ce_crt_tbl_as.out @@ -0,0 +1,377 @@ +-- moudle : client encryption +-- purpose : function test +-- detail : test CREATE AS & SELECT INTO (with encrypted columns) +-- (0) prepare | clean environment | succeed +CREATE SCHEMA ce_crt_tbl_as; +SET search_path TO ce_crt_tbl_as; +DROP TABLE IF EXISTS t1; +NOTICE: table "t1" does not exist, skipping +DROP TABLE IF EXISTS t2; +NOTICE: table "t2" does not exist, skipping +DROP CLIENT MASTER KEY IF EXISTS cmk1 CASCADE; +NOTICE: client master key "cmk1" does not exist +DROP CLIENT MASTER KEY IF EXISTS cmk2 CASCADE; +NOTICE: client master key "cmk2" does not exist +\! gs_ktool -d all +DELETE ALL + +-- (0) prepare | create cmk & cek | succeed +\! gs_ktool -g +GENERATE +1 +\! gs_ktool -g +GENERATE +2 +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek3 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = '1234567890abcdef1234567890abcde'); +-- (0) prepare | create table & insert data | succeed +CREATE TABLE IF NOT EXISTS t1 ( + c1 INT, + c2 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek1, ENCRYPTION_TYPE = DETERMINISTIC), + c3 TEXT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek2, ENCRYPTION_TYPE = DETERMINISTIC), + c4 VARCHAR(20) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek3, ENCRYPTION_TYPE = DETERMINISTIC)); +CREATE TABLE IF NOT EXISTS t2 ( + c1 INT, + c2 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek1, ENCRYPTION_TYPE = DETERMINISTIC), + c3 TEXT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek2, ENCRYPTION_TYPE = DETERMINISTIC), + c4 VARCHAR(20) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek3, ENCRYPTION_TYPE = DETERMINISTIC)); +INSERT INTO t1 VALUES (1, 12, 'r1 c3', 'r1 c4'); +INSERT INTO t1 VALUES (2, 22, 'r2 c3', 'r2 c4'); +INSERT INTO t1 VALUES (3, 32, 'r3 c3', 'r3 c4'); +-- INSERT INTO t2 (nothing) +-- (1) SELECT INTO | succeed +SELECT c1,c2 INTO t1_1 FROM t1; +SELECT c1,c3,c4 INTO t1_2 FROM t1; +SELECT c1,c2,c3,c4 INTO t1_3 FROM t1; +SELECT * INTO t1_4 FROM t1; +SELECT c1,c4 INTO t1_5 FROM t1 WHERE c4 = 'r4 c4'; +SELECT c1,c3,c4 INTO t1_6 FROM t1 WHERE c3 = 'r2 c3'; +SELECT c1,c2,c4 INTO t1_7 FROM t1 WHERE c2 = 12 AND c4 = 'r1 c4'; +SELECT * INTO t1_8 FROM t1 WHERE c2 = 32; +SELECT c1,c2 INTO t2_1 FROM t2; +SELECT c1,c3,c4 INTO t2_2 FROM t2; +SELECT c1,c2,c3,c4 INTO t2_3 FROM t2; +SELECT * INTO t2_4 FROM t2; +SELECT c1,c4 INTO t2_5 FROM t2 WHERE c4 = 'r4 c4'; +SELECT c1,c3,c4 INTO t2_6 FROM t2 WHERE c3 = 'r2 c3'; +SELECT c1,c2,c4 INTO t2_7 FROM t2 WHERE c2 = 12 AND c4 = 'r1 c4'; +SELECT * INTO t2_8 FROM t2 WHERE c2 = 32; +-- (2) SELECT INTO | no distribute column | failed +-- but, in fastcheck, i can't undstand why they succeed +SELECT c2 INTO t1_20 FROM t1; +SELECT c2,c3 INTO t1_21 FROM t1; +SELECT c2 INTO t2_20 FROM t2; +SELECT c2,c3 INTO t2_21 FROM t2; +-- (3) SELECT INTO | result table alreay exist | failed +SELECT c1,c2 INTO t1_1 FROM t1; +ERROR: relation "t1_1" already exists in schema "ce_crt_tbl_as" +DETAIL: creating new table with existing name in the same schema +SELECT c1,c2 INTO t2_1 FROM t2; +ERROR: relation "t2_1" already exists in schema "ce_crt_tbl_as" +DETAIL: creating new table with existing name in the same schema +-- (4) show results | succeed +SELECT count(*) FROM gs_encrypted_columns; + count +------- + 46 +(1 row) + +SELECT * FROM t1_1 ORDER BY c1 ASC; + c1 | c2 +----+---- + 1 | 12 + 2 | 22 + 3 | 32 +(3 rows) + +SELECT * FROM t1_2 ORDER BY c1 ASC; + c1 | c3 | c4 +----+-------+------- + 1 | r1 c3 | r1 c4 + 2 | r2 c3 | r2 c4 + 3 | r3 c3 | r3 c4 +(3 rows) + +SELECT * FROM t1_3 ORDER BY c1 ASC; + c1 | c2 | c3 | c4 +----+----+-------+------- + 1 | 12 | r1 c3 | r1 c4 + 2 | 22 | r2 c3 | r2 c4 + 3 | 32 | r3 c3 | r3 c4 +(3 rows) + +SELECT * FROM t1_4 ORDER BY c1 ASC; + c1 | c2 | c3 | c4 +----+----+-------+------- + 1 | 12 | r1 c3 | r1 c4 + 2 | 22 | r2 c3 | r2 c4 + 3 | 32 | r3 c3 | r3 c4 +(3 rows) + +SELECT * FROM t1_5 ORDER BY c1 ASC; + c1 | c4 +----+---- +(0 rows) + +SELECT * FROM t1_6 ORDER BY c1 ASC; + c1 | c3 | c4 +----+-------+------- + 2 | r2 c3 | r2 c4 +(1 row) + +SELECT * FROM t1_7 ORDER BY c1 ASC; + c1 | c2 | c4 +----+----+------- + 1 | 12 | r1 c4 +(1 row) + +SELECT * FROM t1_8 ORDER BY c1 ASC; + c1 | c2 | c3 | c4 +----+----+-------+------- + 3 | 32 | r3 c3 | r3 c4 +(1 row) + +SELECT * FROM t2_1 ORDER BY c1 ASC; + c1 | c2 +----+---- +(0 rows) + +SELECT * FROM t2_2 ORDER BY c1 ASC; + c1 | c3 | c4 +----+----+---- +(0 rows) + +SELECT * FROM t2_3 ORDER BY c1 ASC; + c1 | c2 | c3 | c4 +----+----+----+---- +(0 rows) + +SELECT * FROM t2_4 ORDER BY c1 ASC; + c1 | c2 | c3 | c4 +----+----+----+---- +(0 rows) + +SELECT * FROM t2_5 ORDER BY c1 ASC; + c1 | c4 +----+---- +(0 rows) + +SELECT * FROM t2_6 ORDER BY c1 ASC; + c1 | c3 | c4 +----+----+---- +(0 rows) + +SELECT * FROM t2_7 ORDER BY c1 ASC; + c1 | c2 | c4 +----+----+---- +(0 rows) + +SELECT * FROM t2_8 ORDER BY c1 ASC; + c1 | c2 | c3 | c4 +----+----+----+---- +(0 rows) + +-- (5) clean copyted table | succeed +DROP TABLE t1_1; +DROP TABLE t1_2; +DROP TABLE t1_3; +DROP TABLE t1_4; +DROP TABLE t1_5; +DROP TABLE t1_6; +DROP TABLE t1_7; +DROP TABLE t1_8; +DROP TABLE t2_1; +DROP TABLE t2_2; +DROP TABLE t2_3; +DROP TABLE t2_4; +DROP TABLE t2_5; +DROP TABLE t2_6; +DROP TABLE t2_7; +DROP TABLE t2_8; +DROP TABLE IF EXISTS t1_20; +DROP TABLE IF EXISTS t1_21; +DROP TABLE IF EXISTS t2_20; +DROP TABLE IF EXISTS t2_21; +-- (6) CREATE AS | succeed +CREATE TABLE t1_1 AS SELECT c1,c2 FROM t1; +CREATE TABLE t1_2 AS SELECT c1,c3,c4 FROM t1; +CREATE TABLE t1_3 AS SELECT c1,c2,c3,c4 FROM t1; +CREATE TABLE t1_4 AS SELECT * FROM t1; +-- TODO : not support yet +CREATE TABLE t1_4 AS SELECT c1,c4 FROM t1 WHERE c4 = 'r4 c4'; +ERROR: invalid input syntax for type byteawithoutorderwithequalcol +LINE 1: ...LE t1_4 AS SELECT c1,c4 FROM t1 WHERE c4 = 'r4 c4'; + ^ +CREATE TABLE t1_5 AS SELECT c1,c3,c4 FROM t1 WHERE c3 = 'r2 c3'; +ERROR: invalid input syntax for type byteawithoutorderwithequalcol +LINE 1: ...LE t1_5 AS SELECT c1,c3,c4 FROM t1 WHERE c3 = 'r2 c3'; + ^ +CREATE TABLE t1_6 AS SELECT c1,c2,c4 FROM t1 WHERE c2 = 12 AND c4 = 'r1 c4'; +ERROR: operator does not exist: byteawithoutorderwithequalcol = integer +LINE 1: ...LE t1_6 AS SELECT c1,c2,c4 FROM t1 WHERE c2 = 12 AND c... + ^ +HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. +CREATE TABLE t1_7 AS SELECT * FROM t1 WHERE c2 = 32; +ERROR: operator does not exist: byteawithoutorderwithequalcol = integer +LINE 1: ...E TABLE t1_7 AS SELECT * FROM t1 WHERE c2 = 32; + ^ +HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. +CREATE TABLE t2_1 AS SELECT c1,c2 FROM t1; +CREATE TABLE t2_2 AS SELECT c1,c3,c4 FROM t1; +CREATE TABLE t2_3 AS SELECT c1,c2,c3,c4 FROM t1; +CREATE TABLE t2_4 AS SELECT * FROM t1; +-- TODO : not support yet +CREATE TABLE t2_4 AS SELECT c1,c4 FROM t2 WHERE c4 = 'r4 c4'; +ERROR: invalid input syntax for type byteawithoutorderwithequalcol +LINE 1: ...LE t2_4 AS SELECT c1,c4 FROM t2 WHERE c4 = 'r4 c4'; + ^ +CREATE TABLE t2_5 AS SELECT c1,c3,c4 FROM t2 WHERE c3 = 'r2 c3'; +ERROR: invalid input syntax for type byteawithoutorderwithequalcol +LINE 1: ...LE t2_5 AS SELECT c1,c3,c4 FROM t2 WHERE c3 = 'r2 c3'; + ^ +CREATE TABLE t2_6 AS SELECT c1,c2,c4 FROM t2 WHERE c2 = 12 AND c4 = 'r1 c4'; +ERROR: operator does not exist: byteawithoutorderwithequalcol = integer +LINE 1: ...LE t2_6 AS SELECT c1,c2,c4 FROM t2 WHERE c2 = 12 AND c... + ^ +HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. +CREATE TABLE t2_7 AS SELECT * FROM t2 WHERE c2 = 32; +ERROR: operator does not exist: byteawithoutorderwithequalcol = integer +LINE 1: ...E TABLE t2_7 AS SELECT * FROM t2 WHERE c2 = 32; + ^ +HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. +-- (7) show results | succeed +SELECT count(*) FROM gs_encrypted_columns; + count +------- + 24 +(1 row) + +SELECT * FROM t1_1 ORDER BY c1 ASC; + c1 | c2 +----+---- + 1 | 12 + 2 | 22 + 3 | 32 +(3 rows) + +SELECT * FROM t1_2 ORDER BY c1 ASC; + c1 | c3 | c4 +----+-------+------- + 1 | r1 c3 | r1 c4 + 2 | r2 c3 | r2 c4 + 3 | r3 c3 | r3 c4 +(3 rows) + +SELECT * FROM t1_3 ORDER BY c1 ASC; + c1 | c2 | c3 | c4 +----+----+-------+------- + 1 | 12 | r1 c3 | r1 c4 + 2 | 22 | r2 c3 | r2 c4 + 3 | 32 | r3 c3 | r3 c4 +(3 rows) + +SELECT * FROM t1_4 ORDER BY c1 ASC; + c1 | c2 | c3 | c4 +----+----+-------+------- + 1 | 12 | r1 c3 | r1 c4 + 2 | 22 | r2 c3 | r2 c4 + 3 | 32 | r3 c3 | r3 c4 +(3 rows) + +SELECT * FROM t1_5 ORDER BY c1 ASC; +ERROR: relation "t1_5" does not exist on datanode1 +LINE 1: SELECT * FROM t1_5 ORDER BY c1 ASC; + ^ +SELECT * FROM t1_6 ORDER BY c1 ASC; +ERROR: relation "t1_6" does not exist on datanode1 +LINE 1: SELECT * FROM t1_6 ORDER BY c1 ASC; + ^ +SELECT * FROM t1_7 ORDER BY c1 ASC; +ERROR: relation "t1_7" does not exist on datanode1 +LINE 1: SELECT * FROM t1_7 ORDER BY c1 ASC; + ^ +SELECT * FROM t1_8 ORDER BY c1 ASC; +ERROR: relation "t1_8" does not exist on datanode1 +LINE 1: SELECT * FROM t1_8 ORDER BY c1 ASC; + ^ +SELECT * FROM t2_1 ORDER BY c1 ASC; + c1 | c2 +----+---- + 1 | 12 + 2 | 22 + 3 | 32 +(3 rows) + +SELECT * FROM t2_2 ORDER BY c1 ASC; + c1 | c3 | c4 +----+-------+------- + 1 | r1 c3 | r1 c4 + 2 | r2 c3 | r2 c4 + 3 | r3 c3 | r3 c4 +(3 rows) + +SELECT * FROM t2_3 ORDER BY c1 ASC; + c1 | c2 | c3 | c4 +----+----+-------+------- + 1 | 12 | r1 c3 | r1 c4 + 2 | 22 | r2 c3 | r2 c4 + 3 | 32 | r3 c3 | r3 c4 +(3 rows) + +SELECT * FROM t2_4 ORDER BY c1 ASC; + c1 | c2 | c3 | c4 +----+----+-------+------- + 1 | 12 | r1 c3 | r1 c4 + 2 | 22 | r2 c3 | r2 c4 + 3 | 32 | r3 c3 | r3 c4 +(3 rows) + +SELECT * FROM t2_5 ORDER BY c1 ASC; +ERROR: relation "t2_5" does not exist on datanode1 +LINE 1: SELECT * FROM t2_5 ORDER BY c1 ASC; + ^ +SELECT * FROM t2_6 ORDER BY c1 ASC; +ERROR: relation "t2_6" does not exist on datanode1 +LINE 1: SELECT * FROM t2_6 ORDER BY c1 ASC; + ^ +SELECT * FROM t2_7 ORDER BY c1 ASC; +ERROR: relation "t2_7" does not exist on datanode1 +LINE 1: SELECT * FROM t2_7 ORDER BY c1 ASC; + ^ +SELECT * FROM t2_8 ORDER BY c1 ASC; +ERROR: relation "t2_8" does not exist on datanode1 +LINE 1: SELECT * FROM t2_8 ORDER BY c1 ASC; + ^ +-- (8) clean copyted table +DROP TABLE t1_1; +DROP TABLE t1_2; +DROP TABLE t1_3; +DROP TABLE t1_4; +DROP TABLE t2_1; +DROP TABLE t2_2; +DROP TABLE t2_3; +DROP TABLE t2_4; +-- (9) finish | clean environment | succeed +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP CLIENT MASTER KEY cmk1 CASCADE; +NOTICE: drop cascades to column encryption key: cek1 +DROP CLIENT MASTER KEY cmk2 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to column encryption key: cek2 +drop cascades to column encryption key: cek3 +\! gs_ktool -d all +DELETE ALL + 1 2 +-- should be empty +SELECT * FROM gs_encrypted_columns; + rel_id | column_name | column_key_id | encryption_type | data_type_original_oid | data_type_original_mod | create_date +--------+-------------+---------------+-----------------+------------------------+------------------------+------------- +(0 rows) + +-- reset +RESET search_path; + diff --git a/src/test/regress/expected/ce_default_values.out b/src/test/regress/expected/ce_default_values.out index ee9aea7dc..c07fd35f7 100644 --- a/src/test/regress/expected/ce_default_values.out +++ b/src/test/regress/expected/ce_default_values.out @@ -12,6 +12,7 @@ CREATE TABLE products ( product_no integer DEFAULT 1, name text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = defaultcek, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT 'Test Product', title varchar(35) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = defaultcek, ENCRYPTION_TYPE = DETERMINISTIC) NOT NULL DEFAULT ' ', + value varchar(35) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = defaultcek, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT '', price numeric ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = defaultcek, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT 9.99, max_price decimal(6,0) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = defaultcek, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT NULL ); @@ -22,43 +23,43 @@ INSERT INTO products (price) VALUES (DEFAULT); INSERT INTO products (name, price) VALUES (DEFAULT, DEFAULT); INSERT INTO products DEFAULT VALUES; SELECT * FROM products; - product_no | name | title | price | max_price -------------+--------------+-------+-------+----------- - 1 | Test2 | | 9.99 | - 1 | Test Product | | 34 | - 1 | Test Product | | 9.99 | - 1 | Test Product | | 9.99 | - 1 | Test Product | | 9.99 | - 1 | Test Product | | 9.99 | + product_no | name | title | value | price | max_price +------------+--------------+-------+-------+-------+----------- + 1 | Test Product | | | 9.99 | + 1 | Test Product | | | 9.99 | + 1 | Test Product | | | 9.99 | + 1 | Test Product | | | 9.99 | + 1 | Test Product | | | 34 | + 1 | Test2 | | | 9.99 | (6 rows) ALTER TABLE products ALTER COLUMN price SET DEFAULT 7.77; INSERT INTO products DEFAULT VALUES; SELECT * FROM products; - product_no | name | title | price | max_price -------------+--------------+-------+-------+----------- - 1 | Test2 | | 9.99 | - 1 | Test Product | | 34 | - 1 | Test Product | | 9.99 | - 1 | Test Product | | 9.99 | - 1 | Test Product | | 9.99 | - 1 | Test Product | | 9.99 | - 1 | Test Product | | 7.77 | + product_no | name | title | value | price | max_price +------------+--------------+-------+-------+-------+----------- + 1 | Test Product | | | 7.77 | + 1 | Test Product | | | 9.99 | + 1 | Test Product | | | 9.99 | + 1 | Test Product | | | 9.99 | + 1 | Test Product | | | 9.99 | + 1 | Test Product | | | 34 | + 1 | Test2 | | | 9.99 | (7 rows) ALTER TABLE products ALTER COLUMN price DROP DEFAULT; INSERT INTO products DEFAULT VALUES; SELECT * FROM products; - product_no | name | title | price | max_price -------------+--------------+-------+-------+----------- - 1 | Test2 | | 9.99 | - 1 | Test Product | | 34 | - 1 | Test Product | | 9.99 | - 1 | Test Product | | 9.99 | - 1 | Test Product | | 9.99 | - 1 | Test Product | | 9.99 | - 1 | Test Product | | 7.77 | - 1 | Test Product | | | + product_no | name | title | value | price | max_price +------------+--------------+-------+-------+-------+----------- + 1 | Test Product | | | | + 1 | Test Product | | | 7.77 | + 1 | Test Product | | | 9.99 | + 1 | Test Product | | | 9.99 | + 1 | Test Product | | | 9.99 | + 1 | Test Product | | | 9.99 | + 1 | Test Product | | | 34 | + 1 | Test2 | | | 9.99 | (8 rows) DROP TABLE products; diff --git a/src/test/regress/expected/ce_describe.out b/src/test/regress/expected/ce_describe.out index 94a6248b6..852eb4591 100644 --- a/src/test/regress/expected/ce_describe.out +++ b/src/test/regress/expected/ce_describe.out @@ -10,13 +10,13 @@ CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); CREATE TABLE IF NOT EXISTS t_varchar (id INT, name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek1, ENCRYPTION_TYPE = DETERMINISTIC), - address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek1, ENCRYPTION_TYPE = DETERMINISTIC)); + address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek1, ENCRYPTION_TYPE = RANDOMIZED)); SELECT attname, atttypid::regtype FROM pg_attribute JOIN pg_class On attrelid = Oid WHERE relname = 't_varchar' AND attnum >0; attname | atttypid ---------+------------------------------- id | integer name | byteawithoutorderwithequalcol - address | byteawithoutorderwithequalcol + address | byteawithoutordercol (3 rows) \d t_varchar; diff --git a/src/test/regress/expected/ce_drop_cek.out b/src/test/regress/expected/ce_drop_cek.out index bc18b8834..03720a6da 100644 --- a/src/test/regress/expected/ce_drop_cek.out +++ b/src/test/regress/expected/ce_drop_cek.out @@ -31,6 +31,7 @@ SELECT count(*), 'count' FROM gs_column_keys; SET ROLE test1 PASSWORD "Gauss@123"; DROP column encryption key test2.test_drop_cek2; ERROR: permission denied for schema test2 +DETAIL: N/A SELECT count(*), 'count' FROM gs_column_keys; count | ?column? -------+---------- @@ -48,6 +49,7 @@ SELECT count(*), 'count' FROM gs_column_keys; SET ROLE test_security_admin PASSWORD "Gauss@123"; DROP COLUMN ENCRYPTION KEY test1.test_drop_cek1; ERROR: permission denied for schema test1 +DETAIL: N/A SELECT count(*), 'count' FROM gs_column_keys; count | ?column? -------+---------- diff --git a/src/test/regress/expected/ce_drop_cmk.out b/src/test/regress/expected/ce_drop_cmk.out index 234b457d6..8244e7583 100644 --- a/src/test/regress/expected/ce_drop_cmk.out +++ b/src/test/regress/expected/ce_drop_cmk.out @@ -37,6 +37,7 @@ SELECT count(*), 'count' FROM gs_client_global_keys; DROP CLIENT MASTER KEY test2.test_drop_cmk2; ERROR: permission denied for schema test2 +DETAIL: N/A SELECT count(*), 'count' FROM gs_client_global_keys; count | ?column? -------+---------- @@ -54,6 +55,7 @@ SELECT count(*), 'count' FROM gs_client_global_keys; SET ROLE test_security_admin PASSWORD "Gauss@123"; DROP CLIENT MASTER KEY test1.test_drop_cmk1; ERROR: permission denied for schema test1 +DETAIL: N/A SELECT count(*), 'count' FROM gs_client_global_keys; count | ?column? -------+---------- diff --git a/src/test/regress/expected/ce_escaping.out b/src/test/regress/expected/ce_escaping.out index d9a738324..080854c22 100644 --- a/src/test/regress/expected/ce_escaping.out +++ b/src/test/regress/expected/ce_escaping.out @@ -6,28 +6,28 @@ GENERATE 1 CREATE CLIENT MASTER KEY MyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY MyCEK WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); -CREATE TABLE ce_customer ( - ce_customer_id integer NOT NULL, +CREATE TABLE customer ( + customer_id integer NOT NULL, id integer ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), first_name character varying(45) NOT NULL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), last_name character varying(45) NOT NULL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC) ); -insert into ce_customer values (770, 1234, 'Ido''s', 'shemer'); -insert into ce_customer (ce_customer_id, id, first_name, last_name) values (771, 1234, 'Eli''s', 'shemer'); -select * from ce_customer order by ce_customer_id; - ce_customer_id | id | first_name | last_name -----------------+------+------------+----------- - 770 | 1234 | Ido's | shemer - 771 | 1234 | Eli's | shemer +insert into customer values (770, 1234, 'Ido''s', 'shemer'); +insert into customer (customer_id, id, first_name, last_name) values (771, 1234, 'Eli''s', 'shemer'); +select * from customer order by customer_id; + customer_id | id | first_name | last_name +-------------+------+------------+----------- + 770 | 1234 | Ido's | shemer + 771 | 1234 | Eli's | shemer (2 rows) -select * from ce_customer where first_name = 'Ido''s'; - ce_customer_id | id | first_name | last_name -----------------+------+------------+----------- - 770 | 1234 | Ido's | shemer +select * from customer where first_name = 'Ido''s'; + customer_id | id | first_name | last_name +-------------+------+------------+----------- + 770 | 1234 | Ido's | shemer (1 row) -drop table ce_customer; +drop table customer; DROP CLIENT MASTER KEY mycmk CASCADE; NOTICE: drop cascades to column encryption key: mycek \! gs_ktool -d all diff --git a/src/test/regress/expected/ce_exec_direct.out b/src/test/regress/expected/ce_exec_direct.out deleted file mode 100644 index c19e69feb..000000000 --- a/src/test/regress/expected/ce_exec_direct.out +++ /dev/null @@ -1,35 +0,0 @@ -\! gs_ktool -d all -DELETE ALL - -\! gs_ktool -g -GENERATE -1 -DROP COLUMN ENCRYPTION KEY IF EXISTS exec_direct_cek; -NOTICE: column setting "exec_direct_cek" does not exist -DROP CLIENT MASTER KEY IF EXISTS exec_direct_cmk; -NOTICE: global setting "exec_direct_cmk" does not exist -CREATE NODE GROUP ngroup1 WITH (datanode1); -CREATE CLIENT MASTER KEY exec_direct_cmk WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); -CREATE COLUMN ENCRYPTION KEY exec_direct_cek WITH VALUES (CLIENT_MASTER_KEY = exec_direct_cmk, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); -CREATE TABLE IF NOT EXISTS exec_direct_t1 (c1 INT, c2 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = exec_direct_cek , ENCRYPTION_TYPE = DETERMINISTIC)) TO GROUP ngroup1; -NOTICE: The 'DISTRIBUTE BY' clause is not specified. Using 'c1' as the distribution column by default. -HINT: Please use 'DISTRIBUTE BY' clause to specify suitable data distribution column. -INSERT INTO exec_direct_t1 VALUES(1,1),(2,2),(3,3),(4,4),(5,5); -EXECUTE DIRECT ON(datanode1) 'SELECT * FROM exec_direct_t1 WHERE c2=1 or c2=2;'; - c1 | c2 -----+---- - 1 | 1 - 2 | 2 -(2 rows) - -EXECUTE DIRECT ON(datanode1) 'UPDATE exec_direct_t1 SET c1=2 WHERE c2=1;'; -ERROR: EXECUTE DIRECT cannot execute DML queries -EXECUTE DIRECT ON(datanode1) 'CREATE TABLE t3(c1 INT, c2 INT);'; -ERROR: EXECUTE DIRECT cannot execute this utility query -DROP TABLE exec_direct_t1; -DROP COLUMN ENCRYPTION KEY exec_direct_cek; -DROP CLIENT MASTER KEY exec_direct_cmk; -DROP NODE GROUP ngroup1; -\! gs_ktool -d all -DELETE ALL - 1 diff --git a/src/test/regress/expected/ce_foreign_key.out b/src/test/regress/expected/ce_foreign_key.out index b032e98f9..0fd9d68d6 100644 --- a/src/test/regress/expected/ce_foreign_key.out +++ b/src/test/regress/expected/ce_foreign_key.out @@ -10,11 +10,10 @@ CREATE CLIENT MASTER KEY MyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktoo CREATE COLUMN ENCRYPTION KEY MyCEK WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); CREATE COLUMN ENCRYPTION KEY MyCEK2 WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); CREATE TABLE so_headers ( - id INTEGER unique ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), + id INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), customer_id INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK2, ENCRYPTION_TYPE = DETERMINISTIC), ship_to VARCHAR (255) ); -NOTICE: CREATE TABLE / UNIQUE will create implicit index "so_headers_id_key" for table "so_headers" CREATE TABLE so_items ( item_id INTEGER NOT NULL, so_id INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), @@ -25,6 +24,7 @@ CREATE TABLE so_items ( FOREIGN KEY (so_id) REFERENCES so_headers (id) ); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "so_items_pkey" for table "so_items" +ERROR: there is no unique constraint matching given keys for referenced table "so_headers" CREATE TABLE so_items_r ( item_id INTEGER NOT NULL, so_id int4 ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC) REFERENCES so_headers(id) ON DELETE RESTRICT, @@ -34,6 +34,7 @@ CREATE TABLE so_items_r ( PRIMARY KEY (item_id,so_id) ); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "so_items_r_pkey" for table "so_items_r" +ERROR: there is no unique constraint matching given keys for referenced table "so_headers" CREATE TABLE so_items_c ( item_id int4 NOT NULL, so_id int4 ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC) REFERENCES so_headers(id) ON DELETE CASCADE, @@ -43,14 +44,14 @@ CREATE TABLE so_items_c ( PRIMARY KEY (item_id,so_id) ); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "so_items_c_pkey" for table "so_items_c" -ALTER TABLE IF EXISTS so_headers ADD CONSTRAINT so_headers_unique1 UNIQUE (id,customer_id); -NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "so_headers_unique1" for table "so_headers" +ERROR: there is no unique constraint matching given keys for referenced table "so_headers" CREATE TABLE payments ( pay_id int, so_id INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), customer_id INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK2, ENCRYPTION_TYPE = DETERMINISTIC), FOREIGN KEY (so_id, customer_id) REFERENCES so_headers (id, customer_id) ); +ERROR: there is no unique constraint matching given keys for referenced table "so_headers" CREATE TABLE so_items_a ( item_id INTEGER NOT NULL, so_id int4 ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), @@ -63,32 +64,43 @@ NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "so_items_a_pkey" INSERT INTO so_headers VALUES (1,101, 'Vina'); INSERT INTO so_headers VALUES (2,103, 'Paris'); INSERT INTO so_items VALUES (10001, 1, 1001, 100, 37.28); +ERROR: relation "so_items" does not exist on datanode1 +LINE 1: INSERT INTO so_items VALUES (10001, 1, 1001, 100, 37.28); + ^ INSERT INTO so_items VALUES (10002, 6, 1001, 100, 37.28); -ERROR: insert or update on table "so_items" violates foreign key constraint "so_items_so_id_fkey" -DETAIL: Key (so_id)=(6) is not present in table "so_headers". +ERROR: relation "so_items" does not exist on datanode1 +LINE 1: INSERT INTO so_items VALUES (10002, 6, 1001, 100, 37.28); + ^ INSERT INTO so_items VALUES (10003, 2, 1001, 100, 37.28); +ERROR: relation "so_items" does not exist on datanode1 +LINE 1: INSERT INTO so_items VALUES (10003, 2, 1001, 100, 37.28); + ^ SELECT * from so_items ORDER BY item_id; - item_id | so_id | product_id | qty | net_price ----------+-------+------------+-----+----------- - 10001 | 1 | 1001 | 100 | 37.28 - 10003 | 2 | 1001 | 100 | 37.28 -(2 rows) - +ERROR: relation "so_items" does not exist on datanode1 +LINE 1: SELECT * from so_items ORDER BY item_id; + ^ INSERT INTO so_items_r VALUES (10001, 1, 1001, 100, 37.28); +ERROR: relation "so_items_r" does not exist on datanode1 +LINE 1: INSERT INTO so_items_r VALUES (10001, 1, 1001, 100, 37.28); + ^ INSERT INTO so_items_r VALUES (10002, 6, 1001, 100, 37.28); -ERROR: insert or update on table "so_items_r" violates foreign key constraint "so_items_r_so_id_fkey" -DETAIL: Key (so_id)=(6) is not present in table "so_headers". +ERROR: relation "so_items_r" does not exist on datanode1 +LINE 1: INSERT INTO so_items_r VALUES (10002, 6, 1001, 100, 37.28); + ^ +INSERT INTO so_items VALUES (10003, 2, 1001, 100, 37.28); +ERROR: relation "so_items" does not exist on datanode1 +LINE 1: INSERT INTO so_items VALUES (10003, 2, 1001, 100, 37.28); + ^ SELECT * from so_items_r ORDER BY item_id; - item_id | so_id | product_id | qty | net_price ----------+-------+------------+-----+----------- - 10001 | 1 | 1001 | 100 | 37.28 -(1 row) - +ERROR: relation "so_items_r" does not exist on datanode1 +LINE 1: SELECT * from so_items_r ORDER BY item_id; + ^ INSERT INTO so_items_a VALUES (10001, 1, 1001, 100, 37.28); INSERT INTO so_items_a VALUES (10002, 6, 1001, 100, 37.28); -INSERT INTO so_items_a VALUES (10001, 1, 1001, 110, 36.28); -ERROR: duplicate key value violates unique constraint "so_items_a_pkey" ---?.* +INSERT INTO so_items VALUES (10003, 2, 1001, 100, 37.28); +ERROR: relation "so_items" does not exist on datanode1 +LINE 1: INSERT INTO so_items VALUES (10003, 2, 1001, 100, 37.28); + ^ SELECT * from so_items_a ORDER BY item_id; item_id | so_id | product_id | qty | net_price ---------+-------+------------+-----+----------- @@ -97,28 +109,26 @@ SELECT * from so_items_a ORDER BY item_id; (2 rows) INSERT INTO so_items_c VALUES (10001, 1, 1001, 100, 37.28); +ERROR: relation "so_items_c" does not exist on datanode1 +LINE 1: INSERT INTO so_items_c VALUES (10001, 1, 1001, 100, 37.28); + ^ INSERT INTO so_items_c VALUES (10002, 6, 1001, 100, 37.28); -ERROR: insert or update on table "so_items_c" violates foreign key constraint "so_items_c_so_id_fkey" -DETAIL: Key (so_id)=(6) is not present in table "so_headers". -INSERT INTO so_items_c VALUES (10001, 1, 1011, 101, 36.28); -ERROR: duplicate key value violates unique constraint "so_items_c_pkey" ---?.* +ERROR: relation "so_items_c" does not exist on datanode1 +LINE 1: INSERT INTO so_items_c VALUES (10002, 6, 1001, 100, 37.28); + ^ +INSERT INTO so_items VALUES (10003, 2, 1001, 100, 37.28); +ERROR: relation "so_items" does not exist on datanode1 +LINE 1: INSERT INTO so_items VALUES (10003, 2, 1001, 100, 37.28); + ^ SELECT * from so_items_c ORDER BY item_id; - item_id | so_id | product_id | qty | net_price ----------+-------+------------+-----+----------- - 10001 | 1 | 1001 | 100 | 37.28 -(1 row) - -DELETE from so_headers where id = 2; -ERROR: update or delete on table "so_headers" violates foreign key constraint "so_items_so_id_fkey" on table "so_items" -DETAIL: Key (id)=(2) is still referenced from table "so_items". +ERROR: relation "so_items_c" does not exist on datanode1 +LINE 1: SELECT * from so_items_c ORDER BY item_id; + ^ +DELETE from so_headers where id =2; SELECT * from so_items ORDER BY item_id; - item_id | so_id | product_id | qty | net_price ----------+-------+------------+-----+----------- - 10001 | 1 | 1001 | 100 | 37.28 - 10003 | 2 | 1001 | 100 | 37.28 -(2 rows) - +ERROR: relation "so_items" does not exist on datanode1 +LINE 1: SELECT * from so_items ORDER BY item_id; + ^ SELECT * from so_items_a ORDER BY item_id; item_id | so_id | product_id | qty | net_price ---------+-------+------------+-----+----------- @@ -127,33 +137,42 @@ SELECT * from so_items_a ORDER BY item_id; (2 rows) SELECT * from so_items_r ORDER BY item_id; - item_id | so_id | product_id | qty | net_price ----------+-------+------------+-----+----------- - 10001 | 1 | 1001 | 100 | 37.28 -(1 row) - +ERROR: relation "so_items_r" does not exist on datanode1 +LINE 1: SELECT * from so_items_r ORDER BY item_id; + ^ SELECT * from so_items_c ORDER BY item_id; - item_id | so_id | product_id | qty | net_price ----------+-------+------------+-----+----------- - 10001 | 1 | 1001 | 100 | 37.28 -(1 row) - +ERROR: relation "so_items_c" does not exist on datanode1 +LINE 1: SELECT * from so_items_c ORDER BY item_id; + ^ INSERT INTO payments VALUES (100001, 1, 101); +ERROR: relation "payments" does not exist on datanode1 +LINE 1: INSERT INTO payments VALUES (100001, 1, 101); + ^ INSERT INTO payments VALUES (100002, 1, 102); -ERROR: insert or update on table "payments" violates foreign key constraint "payments_so_id_fkey" -DETAIL: Key (so_id, customer_id)=(1, 102) is not present in table "so_headers". -SELECT * from payments ORDER BY pay_id; - pay_id | so_id | customer_id ---------+-------+------------- - 100001 | 1 | 101 -(1 row) - -DROP TABLE so_items; -DROP TABLE so_items_r; -DROP TABLE so_items_a; -DROP TABLE so_items_c; -DROP TABLE payments; +ERROR: relation "payments" does not exist on datanode1 +LINE 1: INSERT INTO payments VALUES (100002, 1, 102); + ^ +ALTER TABLE so_items_a ADD CONSTRAINT fkey_a FOREIGN KEY (so_id) REFERENCES so_headers (id); +ERROR: there is no unique constraint matching given keys for referenced table "so_headers" +ALTER TABLE so_items_a DROP CONSTRAINT fkey_a; +ERROR: constraint "fkey_a" of relation "so_items_a" does not exist +ALTER TABLE so_items_a ADD CONSTRAINT constraint_fk +FOREIGN KEY (so_id) +REFERENCES so_headers (id) +ON DELETE CASCADE; +ERROR: there is no unique constraint matching given keys for referenced table "so_headers" DROP TABLE so_headers; +DROP TABLE so_items; +ERROR: table "so_items" does not exist +DROP TABLE so_items_r; +ERROR: table "so_items_r" does not exist +DROP TABLE so_items_a; +DROP TABLE so_items_b; +ERROR: table "so_items_b" does not exist +DROP TABLE so_items_c; +ERROR: table "so_items_c" does not exist +DROP TABLE payments; +ERROR: table "payments" does not exist DROP CLIENT MASTER KEY MyCMK CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to column encryption key: mycek diff --git a/src/test/regress/expected/ce_functions_anonymous_block.out b/src/test/regress/expected/ce_functions_anonymous_block.out new file mode 100644 index 000000000..4233726c2 --- /dev/null +++ b/src/test/regress/expected/ce_functions_anonymous_block.out @@ -0,0 +1,152 @@ +\! gs_ktool -g +GENERATE +1 +DROP CLIENT MASTER KEY IF EXISTS anonymous_block_cmk CASCADE; +NOTICE: client master key "anonymous_block_cmk" does not exist +CREATE CLIENT MASTER KEY anonymous_block_cmk WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = SM4); +CREATE COLUMN ENCRYPTION KEY anonymous_block_cek WITH VALUES (CLIENT_MASTER_KEY = anonymous_block_cmk, ALGORITHM = SM4_SM3); +BEGIN +CREATE TABLE creditcard_info (id_number int, name text encrypted with (column_encryption_key = anonymous_block_cek, encryption_type = DETERMINISTIC), +credit_card varchar(19) encrypted with (column_encryption_key = anonymous_block_cek, encryption_type = DETERMINISTIC)); +END; +/ +do $$ +<> +begin +insert into creditcard_info values(0, 'King', '123456'); +end first_block $$; +select * from creditcard_info; + id_number | name | credit_card +-----------+------+------------- + 0 | King | 123456 +(1 row) + +delete from creditcard_info; +BEGIN +insert into creditcard_info values(1, 'Avi', '123456'); +insert into creditcard_info values(2, 'Eli', '641245'); +END; +/ +select * from creditcard_info order by id_number; + id_number | name | credit_card +-----------+------+------------- + 1 | Avi | 123456 + 2 | Eli | 641245 +(2 rows) + +delete from creditcard_info; +CREATE OR REPLACE PROCEDURE autonomous_1() AS +BEGIN + insert into creditcard_info values(66, 66,66); + commit; + insert into creditcard_info values(77, 77,77); + rollback; +END; +/ +call autonomous_1(); + autonomous_1 +-------------- + +(1 row) + +select * from creditcard_info order by id_number; + id_number | name | credit_card +-----------+------+------------- + 66 | 66 | 66 +(1 row) + +--success without return +CREATE OR REPLACE PROCEDURE exec_insert1 () AS +BEGIN + insert into creditcard_info values(3, 'Rafi', '3'); + update creditcard_info set name='Sun' where credit_card = 3; +END; +/ +call exec_insert1 (); + exec_insert1 +-------------- + +(1 row) + +--success return void +CREATE or replace FUNCTION exec_insert2() RETURN void +AS +BEGIN + insert into creditcard_info values(4,'Gil',4); + update creditcard_info set name='Joy' where credit_card = 4; +END; +/ +SELECT exec_insert2(); + exec_insert2 +-------------- + +(1 row) + +call exec_insert2(); + exec_insert2 +-------------- + +(1 row) + +--success RETURN integer +CREATE or replace FUNCTION exec_insert3() RETURN integer +AS +BEGIN + insert into creditcard_info values(5,'Peter',5); + update creditcard_info set name= 'Xavier' where credit_card = 5; + return 1; +END; +/ +SELECT exec_insert3(); + exec_insert3 +-------------- + 1 +(1 row) + +call exec_insert3(); + exec_insert3 +-------------- + 1 +(1 row) + +-- plpgsql IF +CREATE or replace FUNCTION exec_insert4() RETURN void +AS +BEGIN +IF 2<5 THEN + insert into creditcard_info values(6,'Ziv',6); + update creditcard_info set name='Peter' where credit_card = 6; +END IF; +END; +/ +SELECT exec_insert4(); + exec_insert4 +-------------- + +(1 row) + +call exec_insert4(); + exec_insert4 +-------------- + +(1 row) + +select * from creditcard_info order by id_number; + id_number | name | credit_card +-----------+--------+------------- + 3 | Sun | 3 + 4 | Joy | 4 + 4 | Joy | 4 + 5 | Xavier | 5 + 5 | Xavier | 5 + 6 | Peter | 6 + 6 | Peter | 6 + 66 | 66 | 66 +(8 rows) + +DROP TABLE creditcard_info; +DROP CLIENT MASTER KEY anonymous_block_cmk CASCADE; +NOTICE: drop cascades to column encryption key: anonymous_block_cek +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/ce_functions_create_replace.out b/src/test/regress/expected/ce_functions_create_replace.out index a7c3be019..d3ec9bfe0 100644 --- a/src/test/regress/expected/ce_functions_create_replace.out +++ b/src/test/regress/expected/ce_functions_create_replace.out @@ -41,7 +41,7 @@ BEGIN RETURN c; END; $$ LANGUAGE plpgsql; -\df +\df f_processed_in_plpgsql List of functions Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind --------+------------------------+-------------------+----------------------------------------------------+--------+------------+------------+--------- @@ -60,13 +60,12 @@ BEGIN SELECT INTO out1, out2 name, balance from accounts LIMIT 1; END; $$ LANGUAGE plpgsql; -\df +\df f_processed_out_plpgsql List of functions - Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind ---------+-------------------------+-------------------+------------------------------------------------------------------+--------+------------+------------+--------- - public | f_processed_in_plpgsql | character varying | a character varying encrypted, b numeric encrypted | normal | f | f | f - public | f_processed_out_plpgsql | record | OUT out1 character varying encrypted, OUT out2 numeric encrypted | normal | f | f | f -(2 rows) + Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind +--------+-------------------------+------------------+------------------------------------------------------------------+--------+------------+------------+--------- + public | f_processed_out_plpgsql | record | OUT out1 character varying encrypted, OUT out2 numeric encrypted | normal | f | f | f +(1 row) -- FAILED CREATE OR REPLACE FUNCTION f_processed_out_plpgsql(out1 OUT varchar(100), out2 OUT dec(15,2)) @@ -78,19 +77,34 @@ LANGUAGE plpgsql; SELECT f_processed_out_plpgsql(); f_processed_out_plpgsql ------------------------- - (dani, 123.45) + (dani,123.45) (1 row) -\df +\df f_processed_out_plpgsql List of functions - Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind ---------+-------------------------+-------------------+------------------------------------------------------------------+--------+------------+------------+--------- - public | f_processed_in_plpgsql | character varying | a character varying encrypted, b numeric encrypted | normal | f | f | f - public | f_processed_out_plpgsql | record | OUT out1 character varying encrypted, OUT out2 numeric encrypted | normal | f | f | f -(2 rows) + Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind +--------+-------------------------+------------------+------------------------------------------------------------------+--------+------------+------------+--------- + public | f_processed_out_plpgsql | record | OUT out1 character varying encrypted, OUT out2 numeric encrypted | normal | f | f | f +(1 row) DROP FUNCTION f_processed_in_plpgsql; DROP FUNCTION f_processed_out_plpgsql; +CREATE OR REPLACE FUNCTION select1() RETURNS varchar(100) LANGUAGE SQL AS 'SELECT name from accounts;'; +select proname, pronargs, prorettype, proargtypes, proallargtypes, proargnames, prorettype_orig, proargcachedcol, proallargtypes_orig +from pg_proc join gs_encrypted_proc on pg_proc.oid = func_id where proname = 'select1'; + proname | pronargs | prorettype | proargtypes | proallargtypes | proargnames | prorettype_orig | proargcachedcol | proallargtypes_orig +---------+----------+------------+-------------+----------------+-------------+-----------------+-----------------+--------------------- + select1 | 0 | 4402 | | | | 1043 | | +(1 row) + +CREATE OR REPLACE FUNCTION select1() RETURNS varchar(100) LANGUAGE SQL AS 'SELECT ''aaa'';'; +select proname, pronargs, prorettype, proargtypes, proallargtypes, proargnames, prorettype_orig, proargcachedcol, proallargtypes_orig +from pg_proc join gs_encrypted_proc on pg_proc.oid = func_id where proname = 'select1'; + proname | pronargs | prorettype | proargtypes | proallargtypes | proargnames | prorettype_orig | proargcachedcol | proallargtypes_orig +---------+----------+------------+-------------+----------------+-------------+-----------------+-----------------+--------------------- +(0 rows) + +DROP FUNCTION select1(); DROP TABLE accounts; DROP COLUMN ENCRYPTION KEY create_replace_cek; DROP CLIENT MASTER KEY create_replace_cmk; diff --git a/src/test/regress/expected/ce_functions_create_replace_1.out b/src/test/regress/expected/ce_functions_create_replace_1.out new file mode 100644 index 000000000..16af268db --- /dev/null +++ b/src/test/regress/expected/ce_functions_create_replace_1.out @@ -0,0 +1,95 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +DROP CLIENT MASTER KEY IF EXISTS create_replace_cmk CASCADE; +NOTICE: client master key "create_replace_cmk" does not exist +CREATE CLIENT MASTER KEY create_replace_cmk WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY create_replace_cek WITH VALUES (CLIENT_MASTER_KEY = create_replace_cmk, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +create table accounts ( + id serial, + name varchar(100) not null ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = create_replace_cek, ENCRYPTION_TYPE = DETERMINISTIC), + balance dec(15,2) not null ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = create_replace_cek, ENCRYPTION_TYPE = DETERMINISTIC), + primary key(id) +); +NOTICE: CREATE TABLE will create implicit sequence "accounts_id_seq" for serial column "accounts.id" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "accounts_pkey" for table "accounts" +INSERT INTO accounts VALUES (1, 'dani', 123.45); +CREATE OR REPLACE FUNCTION f_processed_in_plpgsql(a varchar(100), b dec(15,2)) +RETURNS varchar(100) AS $$ +declare +c varchar(100); +BEGIN + SELECT into c name from accounts where name=$1 or balance=$2 LIMIT 1; + RETURN c; +END; $$ +LANGUAGE plpgsql; +SELECT COUNT(*) FROM gs_encrypted_proc where func_id NOT in (SELECT Oid FROM pg_proc); + count +------- + 0 +(1 row) + +CREATE OR REPLACE FUNCTION f_processed_in_plpgsql(a varchar(100), b dec(15,2)) +RETURNS varchar(100) AS $$ +declare +c varchar(100); +BEGIN + SELECT into c name from accounts where name=$1 or balance=$2 LIMIT 1; + RETURN c; +END; $$ +LANGUAGE plpgsql; +\df f_processed_in_plpgsql +SELECT COUNT(*) FROM gs_encrypted_proc where func_id NOT in (SELECT Oid FROM pg_proc); + count +------- + 0 +(1 row) + +CREATE OR REPLACE FUNCTION f_processed_out_plpgsql(out1 OUT varchar(100), out2 OUT dec(15,2)) +AS $$ +BEGIN +SELECT INTO out1, out2 name, balance from accounts LIMIT 1; +END; $$ +LANGUAGE plpgsql; +\df f_processed_out_plpgsql +-- FAILED +CREATE OR REPLACE FUNCTION f_processed_out_plpgsql(out1 OUT varchar(100), out2 OUT dec(15,2)) +AS $$ +BEGIN +SELECT INTO out1, out2 name, balance from accounts LIMIT 1; +END; $$ +LANGUAGE plpgsql; +SELECT f_processed_out_plpgsql(); + f_processed_out_plpgsql +------------------------- + (dani,123.45) +(1 row) + +\df f_processed_out_plpgsql +DROP FUNCTION f_processed_in_plpgsql; +DROP FUNCTION f_processed_out_plpgsql; +CREATE OR REPLACE FUNCTION select1() RETURNS varchar(100) LANGUAGE SQL AS 'SELECT name from accounts;'; +select proname, pronargs, prorettype, proargtypes, proallargtypes, proargnames, prorettype_orig, proargcachedcol, proallargtypes_orig +from pg_proc join gs_encrypted_proc on pg_proc.oid = func_id where proname = 'select1'; + proname | pronargs | prorettype | proargtypes | proallargtypes | proargnames | prorettype_orig | proargcachedcol | proallargtypes_orig +---------+----------+------------+-------------+----------------+-------------+-----------------+-----------------+--------------------- + select1 | 0 | 4402 | | | | 1043 | | +(1 row) + +CREATE OR REPLACE FUNCTION select1() RETURNS varchar(100) LANGUAGE SQL AS 'SELECT ''aaa'';'; +select proname, pronargs, prorettype, proargtypes, proallargtypes, proargnames, prorettype_orig, proargcachedcol, proallargtypes_orig +from pg_proc join gs_encrypted_proc on pg_proc.oid = func_id where proname = 'select1'; + proname | pronargs | prorettype | proargtypes | proallargtypes | proargnames | prorettype_orig | proargcachedcol | proallargtypes_orig +---------+----------+------------+-------------+----------------+-------------+-----------------+-----------------+--------------------- +(0 rows) + +DROP FUNCTION select1(); +DROP TABLE accounts; +DROP COLUMN ENCRYPTION KEY create_replace_cek; +DROP CLIENT MASTER KEY create_replace_cmk; +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/ce_functions_describe.out b/src/test/regress/expected/ce_functions_describe.out index 95e08741c..19ca6f392 100644 --- a/src/test/regress/expected/ce_functions_describe.out +++ b/src/test/regress/expected/ce_functions_describe.out @@ -25,7 +25,7 @@ BEGIN RETURN c; END; $$ LANGUAGE plpgsql; -\df +\df f_processed_in_plpgsql List of functions Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind --------+------------------------+-------------------+----------------------------------------------------+--------+------------+------------+--------- diff --git a/src/test/regress/expected/ce_functions_describe_1.out b/src/test/regress/expected/ce_functions_describe_1.out new file mode 100644 index 000000000..6c6bbe35a --- /dev/null +++ b/src/test/regress/expected/ce_functions_describe_1.out @@ -0,0 +1,35 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +DROP CLIENT MASTER KEY IF EXISTS desc_cmk CASCADE; +NOTICE: client master key "desc_cmk" does not exist +CREATE CLIENT MASTER KEY desc_cmk WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY desc_cek WITH VALUES (CLIENT_MASTER_KEY = desc_cmk, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +create table accounts ( + id serial, + name varchar(100) not null ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = desc_cek, ENCRYPTION_TYPE = DETERMINISTIC), + balance dec(15,2) not null ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = desc_cek, ENCRYPTION_TYPE = DETERMINISTIC), + primary key(id) +); +NOTICE: CREATE TABLE will create implicit sequence "accounts_id_seq" for serial column "accounts.id" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "accounts_pkey" for table "accounts" +CREATE OR REPLACE FUNCTION f_processed_in_plpgsql(a varchar(100), b dec(15,2)) +RETURNS varchar(100) AS $$ +declare +c varchar(100); +BEGIN + SELECT into c name from accounts where name=$1 or balance=$2 LIMIT 1; + RETURN c; +END; $$ +LANGUAGE plpgsql; +\df f_processed_in_plpgsql +DROP FUNCTION f_processed_in_plpgsql(); +DROP TABLE accounts; +DROP COLUMN ENCRYPTION KEY desc_cek; +DROP CLIENT MASTER KEY desc_cmk; +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/ce_functions_examples.out b/src/test/regress/expected/ce_functions_examples.out index 8ba33fd42..dada005af 100644 --- a/src/test/regress/expected/ce_functions_examples.out +++ b/src/test/regress/expected/ce_functions_examples.out @@ -76,22 +76,6 @@ $$ LANGUAGE SQL; CREATE FUNCTION f_plaintext_return_table3(int, int) RETURNS TABLE(name text, val_p int, val2_p int) AS $$ SELECT * FROM t_plaintext WHERE val=$1 or val2=$2; $$ LANGUAGE SQL; - returns table ( - film_title varchar, - film_release_year int - ) - language plpgsql -as $$ -begin - return query - select - title, - release_year::integer - from - film - where - title ilike p_pattern; -end;$$ CREATE OR REPLACE FUNCTION get_all_plaintext_setof() RETURNS SETOF t_plaintext AS $BODY$ DECLARE @@ -107,14 +91,12 @@ BEGIN END $BODY$ LANGUAGE plpgsql; -ERROR: syntax error at or near "returns" -LINE 1: returns table ( - ^ SELECT * FROM get_all_plaintext_setof(); -ERROR: function get_all_plaintext_setof() does not exist -LINE 1: SELECT * FROM get_all_plaintext_setof(); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. + name | val | val2 +------+-----+------ + name | 1 | 2 +(1 row) + CREATE OR REPLACE FUNCTION get_all_plaintext() RETURNS int AS $BODY$ DECLARE @@ -217,7 +199,7 @@ CREATE OR REPLACE FUNCTION foo() end; $BODY$ LANGUAGE 'plpgsql' VOLATILE; -select proname, prorettype::regtype from pg_proc where Oid in (select func_id from gs_encrypted_proc); +select proname, prorettype::regtype from pg_proc where Oid in (select func_id from gs_encrypted_proc) order by proname; proname | prorettype --------------------------+------------------------------- f_processed_in | integer @@ -244,14 +226,10 @@ CALL f_processed_return_table(); (1 row) DROP TABLE t_plaintext CASCADE; -NOTICE: drop cascades to function f_plaintext_return_table2(integer,integer) +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function f_plaintext_return_table2(integer,integer) +drop cascades to function get_all_plaintext_setof() DROP TABLE t_processed CASCADE; -\d - List of relations - Schema | Name | Type | Owner | Storage ---------+------+------+-------+--------- -(0 rows) - DROP FUNCTION f_hardcoded; DROP FUNCTION f_hardcoded_variable; DROP FUNCTION f_plaintext_in; @@ -269,12 +247,6 @@ DROP FUNCTION reffunc_plaintext; DROP FUNCTION reffunc_processed; DROP FUNCTION f_plaintext_out; DROP FUNCTION f_processed_out; -\df - List of functions - Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind ---------+------+------------------+---------------------+------+------------+------------+--------- -(0 rows) - DROP COLUMN ENCRYPTION KEY func_cek; DROP CLIENT MASTER KEY func_cmk CASCADE; \! gs_ktool -d all diff --git a/src/test/regress/expected/ce_functions_hardcoded.out b/src/test/regress/expected/ce_functions_hardcoded.out index 39bc30fb6..6378305f3 100644 --- a/src/test/regress/expected/ce_functions_hardcoded.out +++ b/src/test/regress/expected/ce_functions_hardcoded.out @@ -8,37 +8,37 @@ DROP CLIENT MASTER KEY IF EXISTS hardcode_cmk CASCADE; NOTICE: client master key "hardcode_cmk" does not exist CREATE CLIENT MASTER KEY hardcode_cmk WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY hardcode_cek WITH VALUES (CLIENT_MASTER_KEY = hardcode_cmk, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); -CREATE TABLE IF NOT EXISTS t1(id int, i1 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = hardcode_cek, ENCRYPTION_TYPE = DETERMINISTIC)); -INSERT INTO t1 VALUES(1,1),(2,2),(3,3),(4,4),(5,5); -CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS 'SELECT id FROM t1 WHERE i1=1;' LANGUAGE SQL; +CREATE TABLE IF NOT EXISTS hardcoded_t1(id int, i1 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = hardcode_cek, ENCRYPTION_TYPE = DETERMINISTIC)); +INSERT INTO hardcoded_t1 VALUES(1,1),(2,2),(3,3),(4,4),(5,5); +CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS 'SELECT id FROM hardcoded_t1 WHERE i1=1;' LANGUAGE SQL; SELECT select_func(); select_func ------------- 1 (1 row) -CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS 'SELECT id FROM t1 WHERE i1=2' LANGUAGE SQL; +CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS 'SELECT id FROM hardcoded_t1 WHERE i1=2' LANGUAGE SQL; SELECT select_func(); select_func ------------- 2 (1 row) -CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS $func_tag$SELECT id FROM t1 WHERE i1=3;$func_tag$ LANGUAGE SQL; +CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS $func_tag$SELECT id FROM hardcoded_t1 WHERE i1=3;$func_tag$ LANGUAGE SQL; SELECT select_func(); select_func ------------- 3 (1 row) -CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS $func_tag$SELECT id FROM t1 WHERE i1=4$func_tag$ LANGUAGE SQL; +CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS $func_tag$SELECT id FROM hardcoded_t1 WHERE i1=4$func_tag$ LANGUAGE SQL; SELECT select_func(); select_func ------------- 4 (1 row) -CREATE OR REPLACE FUNCTION insert_func() RETURNS VOID AS 'INSERT INTO t1 VALUES(9,9);' LANGUAGE SQL; +CREATE OR REPLACE FUNCTION insert_func() RETURNS VOID AS 'INSERT INTO hardcoded_t1 VALUES(9,9);' LANGUAGE SQL; SELECT insert_func(); insert_func ------------- @@ -63,13 +63,13 @@ SELECT insert_func(); (1 row) -SELECT count(*) from t1 where id=9; +SELECT count(*) from hardcoded_t1 where id=9; count ------- 4 (1 row) -CREATE OR REPLACE FUNCTION insert_select_func() RETURNS SETOF INTEGER AS 'INSERT INTO t1 VALUES(8,8); SELECT id FROM t1 WHERE i1=9;' LANGUAGE SQL; +CREATE OR REPLACE FUNCTION insert_select_func() RETURNS SETOF INTEGER AS 'INSERT INTO hardcoded_t1 VALUES(8,8); SELECT id FROM hardcoded_t1 WHERE i1=9;' LANGUAGE SQL; SELECT insert_select_func(); insert_select_func -------------------- @@ -106,13 +106,13 @@ SELECT insert_select_func(); 9 (4 rows) -SELECT count(*) from t1 where id=8; +SELECT count(*) from hardcoded_t1 where id=8; count ------- 4 (1 row) -SELECT * from t1 order by id; +SELECT * from hardcoded_t1 order by id; id | i1 ----+---- 1 | 1 @@ -132,7 +132,7 @@ SELECT * from t1 order by id; CREATE FUNCTION f_hardcoded_variable() RETURNS int AS $$ BEGIN -RETURN(SELECT id from t1 where i1 = 5 LIMIT 1); +RETURN(SELECT id from hardcoded_t1 where i1 = 5 LIMIT 1); END; $$ LANGUAGE plpgsql; SELECT f_hardcoded_variable(); @@ -162,7 +162,6 @@ SELECT * FROM t_processed ORDER BY name; (1 row) DROP TABLE t_processed; -DROP TABLE t1 CASCADE; create table accounts ( id serial, name varchar(100) not null ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = hardcode_cek, ENCRYPTION_TYPE = DETERMINISTIC), @@ -200,6 +199,192 @@ select * from accounts ORDER BY id; 3 | 300 | 300.00 (3 rows) +--hardcoded control +DROP FUNCTION IF EXISTS f_hardcoded1; +NOTICE: function f_hardcoded1() does not exist, skipping +CREATE OR REPLACE FUNCTION f_hardcoded1() RETURNS SETOF int AS $$ +DECLARE + r integer; +BEGIN + FOR r IN + SELECT id FROM hardcoded_t1 where i1 = 5 + LOOP + RETURN NEXT r; + END LOOP; + RETURN; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded1(); + f_hardcoded1 +-------------- + 5 +(1 row) + +DROP FUNCTION f_hardcoded1; +DROP FUNCTION IF EXISTS f_hardcoded11; +NOTICE: function f_hardcoded11() does not exist, skipping +CREATE OR REPLACE FUNCTION f_hardcoded11() RETURNS SETOF int AS $$ +DECLARE + r integer; +BEGIN + FOR r IN SELECT id FROM hardcoded_t1 where i1 = 5 + LOOP + RETURN NEXT r; + END LOOP; + RETURN; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded11(); + f_hardcoded11 +--------------- + 5 +(1 row) + +DROP FUNCTION f_hardcoded11; +DROP FUNCTION IF EXISTS f_hardcoded12; +NOTICE: function f_hardcoded12() does not exist, skipping +CREATE OR REPLACE FUNCTION f_hardcoded12() RETURNS SETOF int AS $$ +DECLARE + r integer; +BEGIN + FOR r IN SELECT id FROM hardcoded_t1 where i1 = 5 LOOP + RETURN NEXT r; + END LOOP; + RETURN; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded12(); + f_hardcoded12 +--------------- + 5 +(1 row) + +DROP FUNCTION f_hardcoded12; +DROP FUNCTION IF EXISTS f_hardcoded2; +NOTICE: function f_hardcoded2() does not exist, skipping +CREATE OR REPLACE FUNCTION f_hardcoded2() RETURNS SETOF int AS $$ +BEGIN + IF 1 > 0 THEN + RETURN QUERY(SELECT id FROM hardcoded_t1 where i1 = 5); + ELSIF 2 > 0 THEN + RETURN QUERY(SELECT id FROM hardcoded_t1 where i1 = 4); + ELSE + RETURN QUERY(SELECT id FROM hardcoded_t1 where i1 = 3); + END IF; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded2(); + f_hardcoded2 +-------------- + 5 +(1 row) + +DROP FUNCTION f_hardcoded2; +DROP FUNCTION IF EXISTS f_hardcoded3; +NOTICE: function f_hardcoded3() does not exist, skipping +CREATE OR REPLACE FUNCTION f_hardcoded3() RETURNS SETOF int AS $$ +DECLARE + x integer := 5; +BEGIN + CASE + WHEN x BETWEEN 0 AND 5 THEN + RETURN QUERY(SELECT id FROM hardcoded_t1 where i1 = 5); + WHEN x BETWEEN 6 AND 10 THEN + RETURN QUERY(SELECT id FROM hardcoded_t1 where i1 = 10); + END CASE; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded3(); + f_hardcoded3 +-------------- + 5 +(1 row) + +DROP FUNCTION f_hardcoded3; +DROP FUNCTION IF EXISTS f_hardcoded4; +NOTICE: function f_hardcoded4() does not exist, skipping +CREATE OR REPLACE FUNCTION f_hardcoded4() RETURNS SETOF int AS $$ +DECLARE + x integer := 5; +BEGIN + WHILE x > 0 LOOP + RETURN QUERY (SELECT id FROM hardcoded_t1 where i1 = 5); + x := x - 1; + END LOOP; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded4(); + f_hardcoded4 +-------------- + 5 + 5 + 5 + 5 + 5 +(5 rows) + +DROP FUNCTION f_hardcoded4; +DROP FUNCTION IF EXISTS f_hardcoded5; +NOTICE: function f_hardcoded5() does not exist, skipping +CREATE OR REPLACE FUNCTION f_hardcoded5() RETURNS SETOF int AS $$ +BEGIN + FOR i IN 1..10 LOOP + RETURN QUERY (SELECT id FROM hardcoded_t1 where i1 = 5); + END LOOP; + FOR i IN REVERSE 10..1 LOOP + RETURN QUERY (SELECT id FROM hardcoded_t1 where i1 = 5); + END LOOP; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded5(); + f_hardcoded5 +-------------- + 5 + 5 + 5 + 5 + 5 + 5 + 5 + 5 + 5 + 5 + 5 + 5 + 5 + 5 + 5 + 5 + 5 + 5 + 5 + 5 +(20 rows) + +DROP FUNCTION f_hardcoded5; +DROP FUNCTION IF EXISTS f_hardcoded6; +NOTICE: function f_hardcoded6() does not exist, skipping +CREATE OR REPLACE FUNCTION f_hardcoded6() RETURNS int AS $$ +BEGIN + UPDATE hardcoded_t1 set i1 = 5 where i1 = 5; + BEGIN + UPDATE hardcoded_t1 set i1 = 5 where i1 = 5; + EXCEPTION + WHEN division_by_zero THEN + RAISE NOTICE 'caught division_by_zero'; + RETURN 2; + END; + RETURN 1; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded6(); + f_hardcoded6 +-------------- + 1 +(1 row) + +DROP FUNCTION f_hardcoded6; +DROP TABLE hardcoded_t1 CASCADE; DROP TABLE accounts CASCADE; DROP FUNCTION insert_func100; DROP FUNCTION insert_func200; diff --git a/src/test/regress/expected/ce_functions_in_out_params.out b/src/test/regress/expected/ce_functions_in_out_params.out index f3f74f0b4..3822c29a5 100644 --- a/src/test/regress/expected/ce_functions_in_out_params.out +++ b/src/test/regress/expected/ce_functions_in_out_params.out @@ -45,7 +45,7 @@ Oid = gs_encrypted_proc.func_id WHERE proname IN ('f_processed_in_out', 'f_proce proname | prorettype | proallargtypes | prorettype_orig | proallargtypes_orig ------------------------------------+------------+---------------------+-----------------+--------------------- f_processed_in_out | 2249 | {4402,4402,23} | | {23,23,-1} - f_processed_in_out_1param | 4402 | {4402,4402} | 23 | {-1,23} + f_processed_in_out_1param | 4402 | {4402,4402} | 23 | {23,23} f_processed_in_out_aliases_plpgsql | 2249 | {4402,4402,23} | | {23,23,-1} f_processed_in_out_plpgsql | 2249 | {4402,4402,4402,23} | | {23,23,23,-1} f_processed_in_out_plpgsql2 | 2249 | {4402,23,4402} | | {23,-1,23} @@ -87,14 +87,14 @@ SELECT f_processed_in_out_aliases_plpgsql(4); (4,40) (1 row) -DROP TABLE t_processed CASCADE; -DROP TABLE t_processed_b CASCADE; DROP FUNCTION f_processed_in_out_1param; -DROP FUNCTION f_processed_in_out; +DROP FUNCTION f_processed_in_out(int); DROP FUNCTION f_processed_in_out_b; DROP FUNCTION f_processed_in_out_plpgsql; DROP FUNCTION f_processed_in_out_plpgsql2; DROP FUNCTION f_processed_in_out_aliases_plpgsql; +DROP TABLE t_processed CASCADE; +DROP TABLE t_processed_b CASCADE; DROP COLUMN ENCRYPTION KEY in_out_cek; DROP CLIENT MASTER KEY in_out_cmk; \! gs_ktool -d all diff --git a/src/test/regress/expected/ce_functions_input_params.out b/src/test/regress/expected/ce_functions_input_params.out index 58367246e..534029b45 100644 --- a/src/test/regress/expected/ce_functions_input_params.out +++ b/src/test/regress/expected/ce_functions_input_params.out @@ -64,7 +64,7 @@ create table accounts ( NOTICE: CREATE TABLE will create implicit sequence "accounts_id_seq" for serial column "accounts.id" NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "accounts_pkey" for table "accounts" CREATE OR REPLACE FUNCTION insert_func_2(name varchar(100), balance dec(15,2)) RETURNS VOID AS 'INSERT INTO accounts(name,balance) VALUES($1, $2);' LANGUAGE SQL; -call insert_func_2('Bob', 101.30); +call regression.public.insert_func_2('Bob', 101.30); insert_func_2 --------------- @@ -97,7 +97,7 @@ select * from accounts order by id; 4 | Donald | 1214.88 (4 rows) -drop FUNCTION f_processed_in_sql; +drop FUNCTION f_processed_in_sql(int, int); drop FUNCTION f_processed_in_sql_named; drop FUNCTION insert_func_2; drop table t_processed; diff --git a/src/test/regress/expected/ce_functions_input_params_1.out b/src/test/regress/expected/ce_functions_input_params_1.out new file mode 100644 index 000000000..b6dcb5e1b --- /dev/null +++ b/src/test/regress/expected/ce_functions_input_params_1.out @@ -0,0 +1,99 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +DROP CLIENT MASTER KEY IF EXISTS input_cmk CASCADE; +NOTICE: client master key "input_cmk" does not exist +CREATE CLIENT MASTER KEY input_cmk WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY input_cek WITH VALUES (CLIENT_MASTER_KEY = input_cmk, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_processed (name text, val INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = input_cek, ENCRYPTION_TYPE = DETERMINISTIC), val2 INT); +insert into t_processed values('one',1,10),('two',2,20),('three',3,30),('four',4,40),('five',5,50),('six',6,60),('seven',7,70),('eight',8,80),('nine',9,90),('ten',10,100); +CREATE FUNCTION f_processed_in_sql(int, int) RETURNS int AS 'SELECT val2 from t_processed where val=$1 or val2=$2 LIMIT 1' LANGUAGE SQL; +\sf f_processed_in_sql +select f_processed_in_sql(1,2); + f_processed_in_sql +-------------------- + 10 +(1 row) + +call f_processed_in_sql(1,2); + f_processed_in_sql +-------------------- + 10 +(1 row) + +select * from f_processed_in_sql(1,2); + f_processed_in_sql +-------------------- + 10 +(1 row) + +CREATE FUNCTION f_processed_in_sql_named (val_param int, val2_param int) RETURNS int AS 'SELECT val2 from t_processed where val=val_param or val2=val2_param LIMIT 1' LANGUAGE SQL; +\sf f_processed_in_sql_named +select f_processed_in_sql_named (100,val2_param => 30 ); + f_processed_in_sql_named +-------------------------- + 30 +(1 row) + +select * from t_processed where val2 = f_processed_in_sql_named (val_param := 7,val2_param => 300 ); + name | val | val2 +-------+-----+------ + seven | 7 | 70 +(1 row) + +delete t_processed where val2 = f_processed_in_sql_named (val_param => 6,val2_param := 500 ); +create table accounts ( + id serial, + name varchar(100) not null ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = input_cek, ENCRYPTION_TYPE = DETERMINISTIC), + balance dec(15,2) not null ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = input_cek, ENCRYPTION_TYPE = DETERMINISTIC), + primary key(id) +); +NOTICE: CREATE TABLE will create implicit sequence "accounts_id_seq" for serial column "accounts.id" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "accounts_pkey" for table "accounts" +CREATE OR REPLACE FUNCTION insert_func_2(name varchar(100), balance dec(15,2)) RETURNS VOID AS 'INSERT INTO accounts(name,balance) VALUES($1, $2);' LANGUAGE SQL; +call regression.public.insert_func_2('Bob', 101.30); + insert_func_2 +--------------- + +(1 row) + +call insert_func_2('George', 505.70); + insert_func_2 +--------------- + +(1 row) + +select insert_func_2('Joe', 710.00); + insert_func_2 +--------------- + +(1 row) + +select * from insert_func_2('Donald', 1214.88); + insert_func_2 +--------------- + +(1 row) + +select * from accounts order by id; + id | name | balance +----+--------+--------- + 1 | Bob | 101.30 + 2 | George | 505.70 + 3 | Joe | 710.00 + 4 | Donald | 1214.88 +(4 rows) + +drop FUNCTION f_processed_in_sql(int, int); +drop FUNCTION f_processed_in_sql_named; +drop FUNCTION insert_func_2; +drop table t_processed; +drop table accounts; +DROP COLUMN ENCRYPTION KEY input_cek; +DROP CLIENT MASTER KEY input_cmk; +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/ce_functions_out_params.out b/src/test/regress/expected/ce_functions_out_params.out index 29c6ea8c3..22cbe678c 100644 --- a/src/test/regress/expected/ce_functions_out_params.out +++ b/src/test/regress/expected/ce_functions_out_params.out @@ -64,7 +64,7 @@ SELECT f_processed_out(); SELECT f_processed_out_b(); f_processed_out_b ------------------- - ("\x74657374",2) + ("\\x74657374",2) (1 row) SELECT f_processed_out_plpgsql(); diff --git a/src/test/regress/expected/ce_functions_return_table.out b/src/test/regress/expected/ce_functions_return_table.out index 843dc3651..a91ebf497 100644 --- a/src/test/regress/expected/ce_functions_return_table.out +++ b/src/test/regress/expected/ce_functions_return_table.out @@ -33,22 +33,58 @@ CALL select4(); 2 | Alice | 10000.00 (2 rows) -SELECT select2(); - select2 -------------------- - (1,Bob, 10000.00) +DROP TABLE IF EXISTS fuc_creditcard_info; +NOTICE: table "fuc_creditcard_info" does not exist, skipping +CREATE TABLE fuc_creditcard_info (id_number int, name text encrypted with (column_encryption_key = ret_cek1, encryption_type = DETERMINISTIC), +credit_card varchar(19) encrypted with (column_encryption_key = ret_cek1, encryption_type = DETERMINISTIC)); +INSERT INTO fuc_creditcard_info VALUES (1,2,3); +--函数定义的返回表字段类型与加密表字段类型一致,可以正常加解密 +DROP FUNCTION IF EXISTS select5(); +NOTICE: function select5() does not exist, skipping +CREATE or replace FUNCTION select5() RETURNS TABLE ( + name text , + credit_card varchar(19) +) LANGUAGE SQL +AS 'SELECT name, credit_card from fuc_creditcard_info;'; +call select5(); + name | credit_card +------+------------- + 2 | 3 (1 row) -SELECT select4(); - select4 ---------------------- - (1,Bob, 10000.00) - (2,Alice, 10000.00) -(2 rows) +--函数定义的返回表字段类型为VARCHAR与加密表name的text类型不一致,可以正常加解密 +DROP FUNCTION IF EXISTS select6; +NOTICE: function select6() does not exist, skipping +CREATE or replace FUNCTION select6() RETURNS TABLE ( +name VARCHAR, +credit_card VARCHAR +) LANGUAGE SQL +AS 'SELECT name, credit_card from fuc_creditcard_info;'; +call select6(); + name | credit_card +------+------------- + 2 | 3 +(1 row) +--函数定义的返回表字段类型为INT与加密表字段类型varchar(19)不一致,报错 +DROP FUNCTION IF EXISTS select7; +NOTICE: function select7() does not exist, skipping +CREATE or replace FUNCTION select7() RETURNS TABLE ( +name text, +credit_card INT +) LANGUAGE SQL +AS 'SELECT name, credit_card from fuc_creditcard_info;'; +ERROR: return type mismatch in function declared to return record +DETAIL: Final statement returns character varying instead of integer at column 2. +CONTEXT: SQL function "select7" DROP FUNCTION select2(); DROP FUNCTION select4(); +DROP FUNCTION select5(); +DROP FUNCTION select6(); +DROP FUNCTION select7(); +ERROR: function select7 does not exist DROP TABLE accounts; +DROP TABLE fuc_creditcard_info; DROP COLUMN ENCRYPTION KEY ret_cek1; DROP CLIENT MASTER KEY ret_cmk1; \! gs_ktool -d all diff --git a/src/test/regress/expected/ce_functions_return_values.out b/src/test/regress/expected/ce_functions_return_values.out index 587df98ed..06de45150 100644 --- a/src/test/regress/expected/ce_functions_return_values.out +++ b/src/test/regress/expected/ce_functions_return_values.out @@ -15,8 +15,8 @@ INSERT INTO t_num (id, num) VALUES (2, 6666); SELECT * from t_num; id | num ----+------ - 1 | 5555 2 | 6666 + 1 | 5555 (2 rows) CREATE FUNCTION select1 () RETURNS t_num LANGUAGE SQL @@ -62,21 +62,54 @@ begin end; $BODY$ language plpgsql ; -\df - List of functions - Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind ---------+--------------------------+------------------------------------------------+---------------------+--------+------------+------------+--------- - public | f_processed_return_table | TABLE(val_p integer, val2_p integer encrypted) | | normal | f | f | f - public | get_rows_setof | SETOF t_num | | normal | f | f | f - public | reffunc | refcursor | refcursor | normal | f | f | f - public | select1 | t_num | | normal | f | f | f - public | select2 | t_num | | normal | f | f | f - public | select3 | SETOF t_num | | normal | f | f | f - public | select4 | SETOF t_num | | normal | f | f | f - public | select5 | integer encrypted | | normal | f | f | f - public | select6 | SETOF integer encrypted | | normal | f | f | f - public | select7 | TABLE(a integer, b integer encrypted) | | normal | f | f | f -(10 rows) +\df select1 + List of functions + Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind +--------+---------+------------------+---------------------+--------+------------+------------+--------- + public | select1 | t_num | | normal | f | f | f +(1 row) + +\df select2 + List of functions + Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind +--------+---------+------------------+---------------------+--------+------------+------------+--------- + public | select2 | t_num | | normal | f | f | f +(1 row) + +\df select3 + List of functions + Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind +--------+---------+------------------+---------------------+--------+------------+------------+--------- + public | select3 | SETOF t_num | | normal | f | f | f +(1 row) + +\df select4 + List of functions + Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind +--------+---------+------------------+---------------------+--------+------------+------------+--------- + public | select4 | SETOF t_num | | normal | f | f | f +(1 row) + +\df select5 + List of functions + Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind +--------+---------+-------------------+---------------------+--------+------------+------------+--------- + public | select5 | integer encrypted | | normal | f | f | f +(1 row) + +\df select6 + List of functions + Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind +--------+---------+-------------------------+---------------------+--------+------------+------------+--------- + public | select6 | SETOF integer encrypted | | normal | f | f | f +(1 row) + +\df select7 + List of functions + Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind +--------+---------+---------------------------------------+---------------------+--------+------------+------------+--------- + public | select7 | TABLE(a integer, b integer encrypted) | | normal | f | f | f +(1 row) call select1(); id | num @@ -124,13 +157,6 @@ call select7(); 2 | 6666 (2 rows) -call select7(); - a | b ----+------ - 1 | 5555 - 2 | 6666 -(2 rows) - CALL f_processed_return_table(); val_p | val2_p -------+-------- @@ -167,17 +193,6 @@ drop cascades to function select2() drop cascades to function select3() drop cascades to function select4() drop cascades to function get_rows_setof() -\df - List of functions - Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind ---------+--------------------------+------------------------------------------------+---------------------+--------+------------+------------+--------- - public | f_processed_return_table | TABLE(val_p integer, val2_p integer encrypted) | | normal | f | f | f - public | reffunc | refcursor | refcursor | normal | f | f | f - public | select5 | integer encrypted | | normal | f | f | f - public | select6 | SETOF integer encrypted | | normal | f | f | f - public | select7 | TABLE(a integer, b integer encrypted) | | normal | f | f | f -(5 rows) - DROP FUNCTION select6; DROP FUNCTION select5; DROP FUNCTION select7; @@ -185,12 +200,6 @@ DROP FUNCTION reffunc(refcursor); DROP FUNCTION get_rows_setof(); ERROR: function get_rows_setof does not exist DROP FUNCTION f_processed_return_table(); -\df - List of functions - Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind ---------+------+------------------+---------------------+------+------------+------------+--------- -(0 rows) - SELECT COUNT(*) FROM gs_encrypted_proc; count ------- diff --git a/src/test/regress/expected/ce_functions_return_values_1.out b/src/test/regress/expected/ce_functions_return_values_1.out new file mode 100644 index 000000000..a60b1f442 --- /dev/null +++ b/src/test/regress/expected/ce_functions_return_values_1.out @@ -0,0 +1,176 @@ +\set verbosity verbose +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +DROP CLIENT MASTER KEY IF EXISTS ret_cmk2 CASCADE; +NOTICE: client master key "ret_cmk2" does not exist +CREATE CLIENT MASTER KEY ret_cmk2 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY ret_cek2 WITH VALUES (CLIENT_MASTER_KEY = ret_cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS t_num(id INT, num int ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = ret_cek2, ENCRYPTION_TYPE = DETERMINISTIC)); +INSERT INTO t_num (id, num) VALUES (1, 5555); +INSERT INTO t_num (id, num) VALUES (2, 6666); +SELECT * from t_num; + id | num +----+------ + 2 | 6666 + 1 | 5555 +(2 rows) + +CREATE FUNCTION select1 () RETURNS t_num LANGUAGE SQL +AS 'SELECT * from t_num;'; +CREATE FUNCTION select2 () RETURNS t_num LANGUAGE SQL +AS 'SELECT id, num from t_num;'; +CREATE FUNCTION select3 () RETURNS setof t_num LANGUAGE SQL +AS 'SELECT * from t_num;'; +CREATE FUNCTION select4 () RETURNS setof t_num LANGUAGE SQL +AS 'SELECT id, num from t_num;'; +CREATE FUNCTION select5 () RETURNS int LANGUAGE SQL +AS 'SELECT num from t_num;'; +CREATE FUNCTION select6 () RETURNS setof int LANGUAGE SQL +AS 'SELECT num from t_num;'; +CREATE FUNCTION select7 () RETURNS TABLE(a INT, b INT) LANGUAGE SQL +AS 'SELECT id, num from t_num;'; +CREATE FUNCTION reffunc(refcursor) RETURNS refcursor AS ' +BEGIN + OPEN $1 FOR SELECT * FROM t_num; + RETURN $1; +END; +' LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION get_rows_setof() RETURNS SETOF t_num AS +$BODY$ +DECLARE + r t_num%rowtype; +BEGIN + FOR r IN + SELECT * FROM t_num + LOOP + -- can do some processing here + RETURN NEXT r; -- return current row of SELECT + END LOOP; + RETURN; +END +$BODY$ +LANGUAGE plpgsql; +CREATE FUNCTION f_processed_return_table() RETURNS TABLE(val_p int, val2_p int) +as +$BODY$ +begin + return query (SELECT id, num from t_num); +end; +$BODY$ +language plpgsql ; +\df select1 +\df select2 +\df select3 +\df select4 +\df select5 +\df select6 +\df select7 +call select1(); + id | num +----+------ + 1 | 5555 +(1 row) + +call select2(); + id | num +----+------ + 1 | 5555 +(1 row) + +call select3(); + id | num +----+------ + 1 | 5555 + 2 | 6666 +(2 rows) + +call select4(); + id | num +----+------ + 1 | 5555 + 2 | 6666 +(2 rows) + +call select5(); + select5 +--------- + 5555 +(1 row) + +call select6(); + select6 +--------- + 5555 + 6666 +(2 rows) + +call select7(); + a | b +---+------ + 1 | 5555 + 2 | 6666 +(2 rows) + +CALL f_processed_return_table(); + val_p | val2_p +-------+-------- + 1 | 5555 + 2 | 6666 +(2 rows) + +BEGIN; +SELECT reffunc('funccursor'); + reffunc +------------ + funccursor +(1 row) + +FETCH ALL IN funccursor; + id | num +----+------ + 1 | 5555 + 2 | 6666 +(2 rows) + +COMMIT; +SELECT * FROM get_rows_setof(); + id | num +----+------ + 1 | 5555 + 2 | 6666 +(2 rows) + +DROP TABLE t_num CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to function select1() +drop cascades to function select2() +drop cascades to function select3() +drop cascades to function select4() +drop cascades to function get_rows_setof() +DROP FUNCTION select6; +DROP FUNCTION select5; +DROP FUNCTION select7; +DROP FUNCTION reffunc(refcursor); +DROP FUNCTION get_rows_setof(); +ERROR: function get_rows_setof does not exist +DROP FUNCTION f_processed_return_table(); +SELECT COUNT(*) FROM gs_encrypted_proc; + count +------- + 0 +(1 row) + +SELECT proname, prorettype, proallargtypes FROM gs_encrypted_proc JOIN pg_proc ON pg_proc.Oid = gs_encrypted_proc.func_id; + proname | prorettype | proallargtypes +---------+------------+---------------- +(0 rows) + +DROP COLUMN ENCRYPTION KEY ret_cek2; +DROP CLIENT MASTER KEY ret_cmk2; +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/ce_functions_return_variable.out b/src/test/regress/expected/ce_functions_return_variable.out index 402c81396..4a7482049 100644 --- a/src/test/regress/expected/ce_functions_return_variable.out +++ b/src/test/regress/expected/ce_functions_return_variable.out @@ -36,6 +36,38 @@ ERROR: value too long for type character varying(100) CONTEXT: PL/pgSQL function f_processed_in_plpgsql(byteawithoutorderwithequalcol,byteawithoutorderwithequalcol) line 5 at SQL statement DROP FUNCTION f_processed_in_plpgsql(); DROP TABLE accounts; +CREATE TABLE creditcard_info1 (id_number int,name text, credit_card varchar(19)); +CREATE TABLE creditcard_info2 (id_number int,name text encrypted with (column_encryption_key = ret_cek3, encryption_type = DETERMINISTIC),credit_card varchar(19) encrypted with (column_encryption_key = ret_cek3, encryption_type = DETERMINISTIC)); +CREATE or replace FUNCTION exec_insert1() RETURNS void AS $$ + insert into creditcard_info1 values(1,2,3); + select credit_card from creditcard_info1; + $$ LANGUAGE SQL; +ERROR: return type mismatch in function declared to return void +DETAIL: Actual return type is character varying. +CONTEXT: SQL function "exec_insert1" +CREATE or replace FUNCTION exec_insert2() RETURNS void AS $$ + insert into creditcard_info2 values(1,2,3); + select credit_card from creditcard_info2; + $$ LANGUAGE SQL; +ERROR: return type mismatch in function declared to return void +DETAIL: Actual return type is character varying. +CONTEXT: SQL function "exec_insert2" +CREATE or replace FUNCTION exec_insert1() RETURNS int AS $$ + insert into creditcard_info1 values(1,2,3); + select credit_card from creditcard_info1; + $$ LANGUAGE SQL; +ERROR: return type mismatch in function declared to return integer +DETAIL: Actual return type is character varying. +CONTEXT: SQL function "exec_insert1" +CREATE or replace FUNCTION exec_insert2() RETURNS int AS $$ + insert into creditcard_info2 values(1,2,3); + select credit_card from creditcard_info2; + $$ LANGUAGE SQL; +ERROR: return type mismatch in function declared to return integer +DETAIL: Actual return type is character varying. +CONTEXT: SQL function "exec_insert2" +DROP TABLE creditcard_info1; +DROP TABLE creditcard_info2; DROP COLUMN ENCRYPTION KEY ret_cek3; DROP CLIENT MASTER KEY ret_cmk3; \! gs_ktool -d all diff --git a/src/test/regress/expected/ce_insert_from_select_test.out b/src/test/regress/expected/ce_insert_from_select_test.out index f1195b474..b003eff6c 100755 --- a/src/test/regress/expected/ce_insert_from_select_test.out +++ b/src/test/regress/expected/ce_insert_from_select_test.out @@ -110,6 +110,13 @@ insert into creditcard_info1 select * from creditcard_info ; ERROR(CLIENT): unencrypted data should not be inserted into encrypted column insert into creditcard_info1 select * from creditcard_info2 ; ERROR(CLIENT): encrypted data should not be inserted into encrypted column with different keys +-- error +INSERT INTO creditcard_info1(id_number, name, credit_card) SELECT id_number, name, credit_card FROM creditcard_info2; +ERROR(CLIENT): encrypted data should not be inserted into encrypted column with different keys +INSERT INTO creditcard_info1(id_number, credit_card) SELECT id_number, credit_card FROM creditcard_info2; +ERROR(CLIENT): encrypted data should not be inserted into encrypted column with different keys +-- succeed +INSERT INTO creditcard_info1(id_number, name) SELECT id_number, name FROM creditcard_info2; drop table creditcard_info; drop table creditcard_info1; drop table creditcard_info2; diff --git a/src/test/regress/expected/ce_join.out b/src/test/regress/expected/ce_join.out index cb30df5d1..137f6b11d 100644 --- a/src/test/regress/expected/ce_join.out +++ b/src/test/regress/expected/ce_join.out @@ -341,8 +341,8 @@ INSERT INTO join_t2 VALUES (2, 2, 25); SELECT * FROM join_t1 JOIN join_t2 ON (join_t1.customer_id1=join_t2.customer_id2) JOIN join_t3 ON (join_t2.customer_id2=join_t3.customer_id3 AND join_t1.customer_id1=join_t3.customer_id3); id | customer_id1 | age1 | id | customer_id2 | age2 | id | customer_id3 | age3 ----+--------------+------+----+--------------+------+----+--------------+------ - 1 | 1 | 15 | 1 | 1 | 30 | 1 | 1 | 20 2 | 1 | 20 | 1 | 1 | 30 | 1 | 1 | 20 + 1 | 1 | 15 | 1 | 1 | 30 | 1 | 1 | 20 (2 rows) SELECT * FROM join_t1 JOIN join_t2 ON (join_t1.age1=join_t2.age2); @@ -402,8 +402,8 @@ select * from join_t1 t1 left join where t2.customer_id2 = 1; id | customer_id1 | age1 | id | customer_id2 | age2 | id | customer_id3 | age3 ----+--------------+------+----+--------------+------+----+--------------+------ - 1 | 1 | 15 | 1 | 1 | 30 | 1 | 1 | 20 2 | 1 | 20 | 1 | 1 | 30 | 1 | 1 | 20 + 1 | 1 | 15 | 1 | 1 | 30 | 1 | 1 | 20 (2 rows) DROP TABLE join_t1; diff --git a/src/test/regress/expected/ce_kt_invalid_input.out b/src/test/regress/expected/ce_kt_invalid_input.out index dceb7f78d..4be7377fb 100644 --- a/src/test/regress/expected/ce_kt_invalid_input.out +++ b/src/test/regress/expected/ce_kt_invalid_input.out @@ -286,10 +286,10 @@ RK management options: -- print version \! gs_ktool -v -v VERSION -gs_ktool V1.0.0 || KMC V3.0.0.SPC005 +gs_ktool 1.0.0 | KMC 21.1.0.B006 \! gs_ktool -v 0 VERSION -gs_ktool V1.0.0 || KMC V3.0.0.SPC005 +gs_ktool 1.0.0 | KMC 21.1.0.B006 \! gs_ktool -V gs_ktool: invalid option -- 'V' HINT: try '-h' or '-?' for more infromation. diff --git a/src/test/regress/expected/ce_kt_key_manage.out b/src/test/regress/expected/ce_kt_key_manage.out index 674fad3af..64201baaa 100644 --- a/src/test/regress/expected/ce_kt_key_manage.out +++ b/src/test/regress/expected/ce_kt_key_manage.out @@ -126,7 +126,7 @@ UPDATE ROOTKEY -- print version \! gs_ktool -v VERSION -gs_ktool V1.0.0 || KMC V3.0.0.SPC005 +gs_ktool 1.0.0 | KMC 21.1.0.B006 -- print help \! gs_ktool -h HELP diff --git a/src/test/regress/expected/ce_kt_toughness.out b/src/test/regress/expected/ce_kt_toughness.out new file mode 100644 index 000000000..7e4c4e6d0 --- /dev/null +++ b/src/test/regress/expected/ce_kt_toughness.out @@ -0,0 +1,69 @@ +\! rm -f $GAUSSHOME/etc/gs_ktool.log +-- 1 primary file : not exist, secondary file : not exist | succeed +\! rm -f $GAUSSHOME/etc/gs_ktool_file/*.dat && ls $GAUSSHOME/etc/gs_ktool_file/ +gs_ktool_conf.ini +gs_ktool.log +\! gs_ktool -g && ls $GAUSSHOME/etc/gs_ktool_file/ +GENERATE +1 +gs_ktool_conf.ini +gs_ktool.log +primary_ksf.dat +secondary_ksf.dat +-- 2 primary file : exist, secondary file : exist | succeed +\! gs_ktool -g && ls $GAUSSHOME/etc/gs_ktool_file/ +GENERATE +2 +gs_ktool_conf.ini +gs_ktool.log +primary_ksf.dat +secondary_ksf.dat +-- 3 primary file : not exist, secondary file : exist | succeed +\! rm -f $GAUSSHOME/etc/gs_ktool_file/primary_ksf.dat && ls $GAUSSHOME/etc/gs_ktool_file/ +gs_ktool_conf.ini +gs_ktool.log +secondary_ksf.dat +\! gs_ktool -g && ls $GAUSSHOME/etc/gs_ktool_file/ +GENERATE +3 +gs_ktool_conf.ini +gs_ktool.log +primary_ksf.dat +secondary_ksf.dat +-- 4 primary file : exist, secondary file : not exist | succeed +\! rm -f $GAUSSHOME/etc/gs_ktool_file/secondary_ksf.dat && ls $GAUSSHOME/etc/gs_ktool_file/ +gs_ktool_conf.ini +gs_ktool.log +primary_ksf.dat +\! gs_ktool -g && ls $GAUSSHOME/etc/gs_ktool_file/ +GENERATE +4 +gs_ktool_conf.ini +gs_ktool.log +primary_ksf.dat +secondary_ksf.dat +-- 5 primary file : tainted, secondary file :normal | succeed +\! echo 'invalid data' > $GAUSSHOME/etc/gs_ktool_file/primary_ksf.dat && cat $GAUSSHOME/etc/gs_ktool_file/primary_ksf.dat +invalid data +\! gs_ktool -g && cat $GAUSSHOME/etc/gs_ktool_file/primary_ksf.dat | grep 'invalid data' +GENERATE +5 +-- 6 primary file : normal, secondary file tainted | succeed +\! echo 'invalid data' > $GAUSSHOME/etc/gs_ktool_file/secondary_ksf.dat && cat $GAUSSHOME/etc/gs_ktool_file/secondary_ksf.dat +invalid data +\! gs_ktool -g && cat $GAUSSHOME/etc/gs_ktool_file/secondary_ksf.dat | grep 'invalid data' +GENERATE +6 +-- 7 primary file : tainted, secondary file tainted | falied +\! echo 'invalid data' > $GAUSSHOME/etc/gs_ktool_file/primary_ksf.dat && cat $GAUSSHOME/etc/gs_ktool_file/primary_ksf.dat +invalid data +\! echo 'invalid data' > $GAUSSHOME/etc/gs_ktool_file/secondary_ksf.dat && cat $GAUSSHOME/etc/gs_ktool_file/secondary_ksf.dat +invalid data +\! gs_ktool -g && cat $GAUSSHOME/etc/gs_ktool_file/primary_ksf.dat | grep 'invalid data' +ERROR: failed to initlize kmc. +invalid data +\! gs_ktool -g && cat $GAUSSHOME/etc/gs_ktool_file/secondary_ksf.dat | grep 'invalid data' +ERROR: failed to initlize kmc. +invalid data +-- clear +\! rm -f $GAUSSHOME/etc/gs_ktool_file/*.dat diff --git a/src/test/regress/expected/ce_multinode_create_table_like.out b/src/test/regress/expected/ce_multinode_create_table_like.out deleted file mode 100644 index 036827c2e..000000000 --- a/src/test/regress/expected/ce_multinode_create_table_like.out +++ /dev/null @@ -1,38 +0,0 @@ -\! gs_ktool -d all -DELETE ALL - -\! gs_ktool -g -GENERATE -1 -CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); -CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); ----multi-nodegroup---- -create node group ngroup1 with (datanode1, datanode3); -create node group ngroup2 with (datanode2, datanode4); -CREATE TABLE test2 (z int, a int ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = public.cek1, ENCRYPTION_TYPE = DETERMINISTIC), b int) TO GROUP ngroup1; -NOTICE: The 'DISTRIBUTE BY' clause is not specified. Using 'z' as the distribution column by default. -HINT: Please use 'DISTRIBUTE BY' clause to specify suitable data distribution column. -CREATE TABLE like_test2 (LIKE test2 including all) TO GROUP ngroup2; -INSERT INTO test2 VALUES(3, 4,5); -SELECT * from test2; - z | a | b ----+---+--- - 3 | 4 | 5 -(1 row) - -INSERT INTO like_test2 VALUES(3, 4,5); -SELECT * from like_test2; - z | a | b ----+---+--- - 3 | 4 | 5 -(1 row) - -DROP TABLE like_test2; -DROP TABLE test2; -DROP COLUMN ENCRYPTION KEY cek1 CASCADE; -DROP CLIENT MASTER KEY cmk1 CASCADE; -DROP NODE GROUP ngroup1; -DROP NODE GROUP ngroup2; -\! gs_ktool -d all -DELETE ALL - 1 diff --git a/src/test/regress/expected/ce_orderby.out b/src/test/regress/expected/ce_orderby.out index 50de6cccf..c3ce1a245 100644 --- a/src/test/regress/expected/ce_orderby.out +++ b/src/test/regress/expected/ce_orderby.out @@ -70,7 +70,6 @@ DROP COLUMN ENCRYPTION KEY testns.UnsupportCEK; DROP COLUMN ENCRYPTION KEY public.UnsupportCEK; DROP CLIENT MASTER KEY testns.UnsupportCMK; DROP CLIENT MASTER KEY public.UnsupportCMK; -DROP SCHEMA testns CASCADE; \! gs_ktool -d all DELETE ALL 1 2 diff --git a/src/test/regress/expected/ce_permission_on_keys_schema.out b/src/test/regress/expected/ce_permission_on_keys_schema.out index b9152a915..2793089cb 100644 --- a/src/test/regress/expected/ce_permission_on_keys_schema.out +++ b/src/test/regress/expected/ce_permission_on_keys_schema.out @@ -26,6 +26,7 @@ SET search_path to testns; -- SHOULD FAILL - create TABLE using existing MyCEK1 (missing permissions to both MyCEK1 and MyCMK1) CREATE TABLE acltest1 (x int, x2 varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK1, ENCRYPTION_TYPE = DETERMINISTIC)); ERROR: permission denied for column encryption key mycek1 +DETAIL: N/A RESET SESSION AUTHORIZATION; -- add permission to the keys to newuser (ALL = USAGE, DROP) GRANT USAGE ON COLUMN_ENCRYPTION_KEY MyCEK1 to newuser; @@ -130,6 +131,7 @@ SET search_path to testns; --check DROP KEY WITHOUT PREMISSION(false) DROP COLUMN ENCRYPTION KEY MyCEK1; ERROR: permission denied for column encryption key mycek1 +DETAIL: N/A DROP CLIENT MASTER KEY MyCMK1; ERROR: cannot drop client master key: mycmk1 because other objects depend on it DETAIL: column encryption key: mycek1 depends on client master key: mycmk1 @@ -137,13 +139,15 @@ HINT: Use DROP ... CASCADE to drop the dependent objects too. --check DELETE KEYS(false) delete from gs_client_global_keys; ERROR: permission denied for relation gs_client_global_keys +DETAIL: N/A delete from gs_column_keys; ERROR: permission denied for relation gs_column_keys +DETAIL: N/A RESET SESSION AUTHORIZATION; REVOKE USAGE ON COLUMN_ENCRYPTION_KEY MyCEK1 FROM newuser; REVOKE USAGE ON CLIENT_MASTER_KEY MyCMK1 FROM newuser; -GRANT DROP ON COLUMN_ENCRYPTION_KEY MyCEK1 to newuser; -GRANT DROP ON CLIENT_MASTER_KEY MyCMK1 to newuser; +GRANT DROP ON COLUMN_ENCRYPTION_KEY testns.MyCEK1 to newuser; +GRANT DROP ON CLIENT_MASTER_KEY testns.MyCMK1 to newuser; SELECT has_cmk_privilege('newuser', 'testns.MyCMK1', 'USAGE'); has_cmk_privilege ------------------- @@ -201,3 +205,97 @@ DROP ROLE IF EXISTS newuser; \! gs_ktool -d all DELETE ALL 1 +\! gs_ktool -g +GENERATE +1 +CREATE USER d_user1 PASSWORD 'gauss@123'; +GRANT ALL ON SCHEMA public TO d_user1; +SET SESSION AUTHORIZATION d_user1 PASSWORD 'gauss@123'; +SET search_path to public; +CREATE CLIENT MASTER KEY MyCMK1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY MyCEK1 WITH VALUES (CLIENT_MASTER_KEY = MyCMK1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE acltest1 (x int, x2 varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK1, ENCRYPTION_TYPE = DETERMINISTIC)); +RESET SESSION AUTHORIZATION; +drop ROLE d_user1; +ERROR: role "d_user1" cannot be dropped because some objects depend on it +DETAIL: owner of table acltest1 +privileges for schema public +owner of schema d_user1 +drop USER d_user1; +ERROR: role "d_user1" cannot be dropped because some objects depend on it +DETAIL: owner of table acltest1 +privileges for schema public +drop USER d_user1 cascade; +NOTICE: drop cascades to client master key: mycmk1 +NOTICE: drop cascades to column encryption key: mycek1 +select count(*) from gs_client_global_keys; + count +------- + 0 +(1 row) + +select count(*) from gs_column_keys; + count +------- + 0 +(1 row) + +\! gs_ktool -d all +DELETE ALL + 1 +\! gs_ktool -g +GENERATE +1 +CREATE USER sysadmin1 with sysadmin PASSWORD "Gauss_234"; +CREATE USER rsr3 with PASSWORD "Gauss_234"; +CREATE USER user1 with PASSWORD "Gauss_234"; +RESET search_path; +set role user1 password "Gauss_234"; +CREATE CLIENT MASTER KEY MyCMK1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY MyCEK1 WITH VALUES (CLIENT_MASTER_KEY = MyCMK1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +drop table if exists test_01; +NOTICE: table "test_01" does not exist, skipping +CREATE TABLE test_01 +( + id_number INTEGER NOT NULL, + name VARCHAR(20) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK1, ENCRYPTION_TYPE = DETERMINISTIC) NOT NULL +) WITH (ORIENTATION = COLUMN, COMPRESSION=HIGH); +insert into test_01 values(1,123); +set role sysadmin1 password "Gauss_234"; +insert into user1.test_01 values(1,123); +set role user1 password "Gauss_234"; +insert into test_01 values(1,123); +select * from test_01; +  id_number |  name +------------+------- + 1 | 123 + 1 | 123 + 1 | 123 +(3 rows) + +set role sysadmin1 password "Gauss_234"; +grant usage on schema user1 to rsr3; +grant insert on user1.test_01 to rsr3; +set role rsr3 password "Gauss_234"; +select current_user; + current_user +-------------- + rsr3 +(1 row) + +insert into user1.test_01 values(1,123); +ERROR: column " name" is of type byteawithoutorderwithequalcol but expression is of type integer +LINE 1: insert into user1.test_01 values(1,123); + ^ +HINT: You will need to rewrite or cast the expression. +CONTEXT: referenced column:  name +reset role; +drop table if exists user1.test_01 cascade; +drop COLUMN ENCRYPTION KEY user1.MyCEK1 cascade; +drop CLIENT MASTER KEY user1.MyCMK1 cascade; +drop USER rsr3 cascade; +drop USER user1 cascade; +drop USER sysadmin1 cascade; +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/expected/ce_privileges_dba.out b/src/test/regress/expected/ce_privileges_dba.out index d850cef0f..29c402128 100644 --- a/src/test/regress/expected/ce_privileges_dba.out +++ b/src/test/regress/expected/ce_privileges_dba.out @@ -51,6 +51,83 @@ DROP TABLE IF EXISTS acltest1; NOTICE: table "acltest1" does not exist, skipping CREATE TABLE acltest1 (x int, x2 varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC)); DROP TABLE acltest1; +CREATE USER user_check PASSWORD '1234567i*'; +SELECT has_cmk_privilege('user_check','mycmk','USAGE'); + has_cmk_privilege +------------------- + f +(1 row) + +SELECT has_cmk_privilege('mycmk','USAGE'); + has_cmk_privilege +------------------- + t +(1 row) + +SELECT has_cmk_privilege('user_check',(select oid from gs_client_global_keys where global_key_name='mycmk'),'USAGE'); + has_cmk_privilege +------------------- + f +(1 row) + +SELECT has_cmk_privilege((select oid from gs_client_global_keys where global_key_name='mycmk'),'USAGE'); + has_cmk_privilege +------------------- + t +(1 row) + +SELECT has_cmk_privilege((select oid from pg_authid where rolname='user_check'),'mycmk','USAGE'); + has_cmk_privilege +------------------- + f +(1 row) + +SELECT has_cmk_privilege((select oid from pg_authid where rolname='user_check'),(select oid from gs_client_global_keys where global_key_name='mycmk'),'USAGE'); + has_cmk_privilege +------------------- + f +(1 row) + +SELECT has_cek_privilege('user_check','mycek','USAGE'); + has_cek_privilege +------------------- + f +(1 row) + +SELECT has_cek_privilege('mycek','USAGE'); + has_cek_privilege +------------------- + t +(1 row) + +SELECT has_cek_privilege('user_check',(select oid from gs_column_keys where column_key_name='mycek'),'USAGE'); + has_cek_privilege +------------------- + f +(1 row) + +SELECT has_cek_privilege((select oid from gs_column_keys where column_key_name='mycek'),'USAGE'); + has_cek_privilege +------------------- + t +(1 row) + +SELECT has_cek_privilege((select oid from pg_authid where rolname='user_check'),'mycek','USAGE'); + has_cek_privilege +------------------- + f +(1 row) + +SELECT has_cek_privilege((select oid from pg_authid where rolname='user_check'),(select oid from gs_column_keys where column_key_name='mycek'),'USAGE'); + has_cek_privilege +------------------- + f +(1 row) + +GRANT ALL ON CLIENT_MASTER_KEY mycmk to user_check; +GRANT ALL ON COLUMN_ENCRYPTION_KEY mycek to user_check; +DROP OWNED BY user_check CASCADE; +DROP USER user_check; DROP CLIENT MASTER KEY IF EXISTS MyCMK CASCADE; NOTICE: drop cascades to column encryption key: mycek \! gs_ktool -d all diff --git a/src/test/regress/expected/ce_privileges_on_schema.out b/src/test/regress/expected/ce_privileges_on_schema.out index 22a960c97..62b0bd518 100644 --- a/src/test/regress/expected/ce_privileges_on_schema.out +++ b/src/test/regress/expected/ce_privileges_on_schema.out @@ -37,6 +37,7 @@ SET search_path to testns; -- SHOULD FAIL - check CANNOT drop existing objects DROP COLUMN ENCRYPTION KEY MyCEK1; ERROR: permission denied for column encryption key mycek1 +DETAIL: N/A DROP CLIENT MASTER KEY MyCMK1; ERROR: cannot drop client master key: mycmk1 because other objects depend on it DETAIL: column encryption key: mycek1 depends on client master key: mycmk1 @@ -44,6 +45,7 @@ HINT: Use DROP ... CASCADE to drop the dependent objects too. -- SHOULD FAIL - create TABLE using existing MyCEK1 (missing permissions to both MyCEK1 and MyCMK1) CREATE TABLE acltest1 (x int, x2 varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK1, ENCRYPTION_TYPE = DETERMINISTIC)); ERROR: permission denied for column encryption key mycek1 +DETAIL: N/A -- add speciifc permission to MyCEK1 and retry (should still fail - missing permission to MyCMK1) RESET SESSION AUTHORIZATION; GRANT USAGE ON COLUMN_ENCRYPTION_KEY MyCEK1 to newuser; @@ -51,6 +53,7 @@ SET SESSION AUTHORIZATION newuser PASSWORD 'gauss@123'; SET search_path to testns; CREATE TABLE acltest1 (x int, x2 varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK1, ENCRYPTION_TYPE = DETERMINISTIC)); ERROR: permission denied for client master key mycmk1 +DETAIL: N/A -- SUCCEED - - create TABLE using existing MyCEK1 (now has permission on SCHEMA, CEK and CMK) RESET SESSION AUTHORIZATION; GRANT USAGE ON CLIENT_MASTER_KEY MyCMK1 to newuser; @@ -135,6 +138,7 @@ drop cascades to client master key: mycmk2 drop cascades to column encryption key: mycek3 drop cascades to table acltest3 ERROR: permission denied for column encryption key mycek1 +DETAIL: N/A DROP ROLE IF EXISTS newuser; ERROR: Permission denied to drop role. RESET SESSION AUTHORIZATION; diff --git a/src/test/regress/expected/ce_proc_test.out b/src/test/regress/expected/ce_proc_test.out new file mode 100644 index 000000000..61badde0c --- /dev/null +++ b/src/test/regress/expected/ce_proc_test.out @@ -0,0 +1,126 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +DROP CLIENT MASTER KEY IF EXISTS proc_cmk2 CASCADE; +NOTICE: client master key "proc_cmk2" does not exist +CREATE CLIENT MASTER KEY proc_cmk2 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY proc_cek2 WITH VALUES (CLIENT_MASTER_KEY = proc_cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +-- function test1 ,we need to support the operator of insert, select, update,delete CLIENT_LOGIC data in function,and create CLIENT_LOGIC table, create cmk/cek(which will not frush the cache in function now) +create or replace function fun_001() returns void as $$ +declare +begin + create table schema_tbl_001(a int, b int CLIENT_LOGIC WITH (COLUMN_SETTING = ImgCEK)) ; + insert into schema_tbl_001 values(1,1); +end; +$$ LANGUAGE plpgsql; +ERROR: syntax error at or near "CLIENT_LOGIC" +LINE 4: create table schema_tbl_001(a int, b int CLIENT_LOGIC WI... + ^ +call fun_001(); +ERROR: function "fun_001" doesn't exist +select * from schema_tbl_001; +ERROR: relation "schema_tbl_001" does not exist +LINE 1: select * from schema_tbl_001; + ^ +\d schema_tbl_001 +--function test2 +CREATE TABLE sbtest1( + a int, + b INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT '0' NOT NULL, + c CHAR(120) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT '' NOT NULL, + d CHAR(60) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT '' NOT NULL); +create function select_data() returns table(a int, b INTEGER, c CHAR(120), d CHAR(60)) +as +$BODY$ +begin +return query(select * from sbtest1); +end; +$BODY$ +LANGUAGE plpgsql; +call select_data(); + a | b | c | d +---+---+---+--- +(0 rows) + +--function test3 +--normal table +CREATE TABLE basket_a ( + id INT PRIMARY KEY, + fruit VARCHAR (100) NOT NULL, + age INT NOT NULL +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "basket_a_pkey" for table "basket_a" +CREATE TABLE basket_aa( + id INT, + fruit VARCHAR (100) NOT NULL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC), + age INT NOT NULL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC) +); +CREATE FUNCTION MyInsert1(_id integer, _fruit varchar, _age integer) + RETURNS void AS + $BODY$ + BEGIN + INSERT INTO basket_a(id,fruit,age) + VALUES(_id, _fruit, _age); + END; + $BODY$ + LANGUAGE 'plpgsql' VOLATILE + COST 100; +CREATE FUNCTION MyInsert2(_id integer, _fruit varchar, _age integer) + RETURNS void AS + $BODY$ + BEGIN + INSERT INTO basket_aa(id,fruit,age) + VALUES(_id, _fruit, _age); + END; + $BODY$ + LANGUAGE 'plpgsql' VOLATILE + COST 100; +select * from MyInsert1(1,'apple',1 ); + myinsert1 +----------- + +(1 row) + +select * from basket_a; + id | fruit | age +----+-------+----- + 1 | apple | 1 +(1 row) + +select * from MyInsert2(1,'apple',1 ); + myinsert2 +----------- + +(1 row) + +select * from basket_a; + id | fruit | age +----+-------+----- + 1 | apple | 1 +(1 row) + +-- procedure test1 +CREATE TABLE sbtest2( + id int, + k INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT '0' NOT NULL, + c CHAR(120) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT '' NOT NULL, + pad CHAR(60) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT '' NOT NULL); +insert into sbtest2 values(1,1,1,1); +CREATE OR REPLACE PROCEDURE select2 +( + id IN int, + k OUT int, + c OUT int +) +AS +BEGIN + EXECUTE IMMEDIATE 'select k, c from sbtest2 where id = 1' + INTO k, c + USING IN id; +END; +/ +call select2(1,a,b); +gsql: values_processor.cpp:412: static void ValuesProcessor::process_text_format(unsigned char**, size_t&, bool, int): Assertion `res != __null' failed. diff --git a/src/test/regress/expected/ce_procedure.out b/src/test/regress/expected/ce_procedure.out index 8fd942d27..7c8924685 100644 --- a/src/test/regress/expected/ce_procedure.out +++ b/src/test/regress/expected/ce_procedure.out @@ -1,7 +1,6 @@ ------------------------- -- unsupport procedure -- ------------------------- ---set enable_stream_operator = on; ---------------------forall------------------------ SET CHECK_FUNCTION_BODIES TO ON; \! gs_ktool -d all @@ -54,7 +53,7 @@ CREATE OR REPLACE PROCEDURE INSERT_IMAGE ) AS BEGIN - INSERT INTO Image VALUES ( id_param, title_param, artist_param, description_param, dataTime_param, xresolution_param, yresolution_param, resolution_unit_param, imageSize_param, alititude_param, latitude_param, longitude_param, imagePath_param); + INSERT INTO Image VALUES ( id_param, artist_param, artist_param, description_param, dataTime_param, xresolution_param, yresolution_param, resolution_unit_param, imageSize_param, alititude_param, latitude_param, longitude_param, imagePath_param); END; / CALL INSERT_IMAGE(8, 'img4214196','ZAVIER', 'a river', '2019-11-22 12:45:26', 720, 720, 'px', 1244, 510, 29.75, 105.79, '/DCIM/Camera/img4214196'); @@ -85,9 +84,18 @@ end $BODY$ LANGUAGE plpgsql; call select_data(); -ERROR: structure of query does not match function result type -DETAIL: Returned type integer does not match expected type text in column 1. -CONTEXT: PL/pgSQL function select_data() line 3 at RETURN QUERY + id_param | artist_param +----------+-------------- + 1 | IDO + 2 | IDO + 3 | ZAVIER + 4 | AVI + 5 | AVI + 6 | ELI + 7 | ELI + 8 | ZAVIER +(8 rows) + DROP PROCEDURE IF EXISTS INSERT_IMAGE; DROP PROCEDURE IF EXISTS update_description(); DROP FUNCTION IF EXISTS select_data(); diff --git a/src/test/regress/expected/ce_random.out b/src/test/regress/expected/ce_random.out index 63f1d96c9..8678ab198 100644 --- a/src/test/regress/expected/ce_random.out +++ b/src/test/regress/expected/ce_random.out @@ -131,12 +131,12 @@ INSERT INTO products DEFAULT VALUES; SELECT * FROM products; product_no | name | title | price | max_price ------------+--------------+-------+-------+----------- - 1 | Test2 | | 9.99 | + 1 | Test Product | | 9.99 | + 1 | Test Product | | 9.99 | + 1 | Test Product | | 9.99 | + 1 | Test Product | | 9.99 | 1 | Test Product | | 34 | - 1 | Test Product | | 9.99 | - 1 | Test Product | | 9.99 | - 1 | Test Product | | 9.99 | - 1 | Test Product | | 9.99 | + 1 | Test2 | | 9.99 | (6 rows) DROP TABLE products; diff --git a/src/test/regress/expected/ce_rlspolicy.out b/src/test/regress/expected/ce_rlspolicy.out index 4ea161e7e..50edc550a 100644 --- a/src/test/regress/expected/ce_rlspolicy.out +++ b/src/test/regress/expected/ce_rlspolicy.out @@ -15,6 +15,8 @@ INSERT INTO all_data VALUES(1, 'alice', 'alice data'); INSERT INTO all_data VALUES(2, 'bob', 'bob data'); INSERT INTO all_data VALUES(3, 'peter', 'peter data'); GRANT SELECT ON all_data TO alice, bob; +GRANT USAGE ON COLUMN_ENCRYPTION_KEY rlspolicy_cek to alice, bob; +GRANT USAGE ON CLIENT_MASTER_KEY rlspolicy_cmk to alice, bob; ALTER TABLE all_data ENABLE ROW LEVEL SECURITY; CREATE ROW LEVEL SECURITY POLICY all_data_rls ON all_data USING(role = 'alice'); SET ROLE alice PASSWORD 'Gauss@123'; @@ -36,10 +38,10 @@ SELECT * FROM all_data; RESET ROLE; DROP ROW LEVEL SECURITY POLICY all_data_rls ON all_data; DROP TABLE all_data; -DROP ROLE alice; -DROP ROLE bob; DROP COLUMN ENCRYPTION KEY rlspolicy_cek; DROP CLIENT MASTER KEY rlspolicy_cmk; +DROP ROLE alice; +DROP ROLE bob; CREATE USER rlspolicy_user1 PASSWORD 'gauss@123'; CREATE USER rlspolicy_user2 PASSWORD 'gauss@123'; SET ROLE rlspolicy_user1 PASSWORD 'gauss@123'; diff --git a/src/test/regress/expected/ce_select.out b/src/test/regress/expected/ce_select.out index 26a163a0a..3f6ceade6 100644 --- a/src/test/regress/expected/ce_select.out +++ b/src/test/regress/expected/ce_select.out @@ -11,9 +11,9 @@ GENERATE GENERATE 3 DROP CLIENT MASTER KEY IF EXISTS ImgCMK1 CASCADE; -NOTICE: global setting "imgcmk1" does not exist +NOTICE: client master key "imgcmk1" does not exist DROP CLIENT MASTER KEY IF EXISTS ImgCMK CASCADE; -NOTICE: global setting "imgcmk" does not exist +NOTICE: client master key "imgcmk" does not exist CREATE CLIENT MASTER KEY ImgCMK1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE CLIENT MASTER KEY ImgCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/2" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY ImgCEK1 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); @@ -62,6 +62,18 @@ explain INSERT INTO creditcard_info3 VALUES (3, 'xiaoli','xiaoli', 621187780); (3 rows) --支持 +select * from creditcard_info2 where regression.public.creditcard_info2.name1 = (select name1 from creditcard_info3 order by id_number limit 1); + id_number | name1 | name2 | credit_card +-----------+-------+-------+--------------------- + 1 | joe | joe | 6217986500001288393 +(1 row) + +select * from creditcard_info2 where public.creditcard_info2.name1 = (select name1 from creditcard_info3 order by id_number limit 1); + id_number | name1 | name2 | credit_card +-----------+-------+-------+--------------------- + 1 | joe | joe | 6217986500001288393 +(1 row) + select * from creditcard_info2 where name1 = (select name1 from creditcard_info3 order by id_number limit 1); id_number | name1 | name2 | credit_card -----------+-------+-------+--------------------- @@ -100,7 +112,7 @@ explain select * from (select * from creditcard_info3) as a , (select * from cre --?.* --?.* --?.* ---?.* +(5 rows) select credit_card, name1 from @@ -285,7 +297,7 @@ CREATE CLIENT MASTER KEY lidj_cmk WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_k CREATE COLUMN ENCRYPTION KEY lidj_cek WITH VALUES (CLIENT_MASTER_KEY = lidj_cmk, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); BEGIN; DROP CLIENT MASTER KEY IF EXISTS lidj_cmk CASCADE; -NOTICE: drop cascades to column setting: lidj_cek +NOTICE: drop cascades to column encryption key: lidj_cek ROLLBACK; CREATE TABLE creditcard_info (id_number int, name text encrypted with (column_encryption_key = lidj_cek, encryption_type = DETERMINISTIC)); INSERT INTO creditcard_info VALUES (1,'joe'); @@ -310,7 +322,7 @@ select * from creditcard_info order by id_number; BEGIN; DROP TABLE creditcard_info; DROP CLIENT MASTER KEY IF EXISTS lidj_cmk CASCADE; -NOTICE: drop cascades to column setting: lidj_cek +NOTICE: drop cascades to column encryption key: lidj_cek ROLLBACK; select * from creditcard_info order by id_number; id_number | name @@ -367,6 +379,6 @@ select count(*), 'count' from gs_column_keys; 0 | count (1 row) -\! gs_ktool -d all +--?.*gs_ktool -d all DELETE ALL 1 2 3 diff --git a/src/test/regress/expected/ce_select_where_encrypt_test.out b/src/test/regress/expected/ce_select_where_encrypt_test.out index e69d92aa1..f7e05c431 100644 --- a/src/test/regress/expected/ce_select_where_encrypt_test.out +++ b/src/test/regress/expected/ce_select_where_encrypt_test.out @@ -5,121 +5,121 @@ DELETE ALL GENERATE 1 DROP CLIENT MASTER KEY IF EXISTS MyCMK CASCADE; -NOTICE: global setting "mycmk" does not exist +NOTICE: client master key "mycmk" does not exist CREATE CLIENT MASTER KEY MyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY MyCEK WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); -CREATE TABLE customer_ce ( - customer_ce_id integer NOT NULL, +CREATE TABLE customer ( + customer_id integer NOT NULL, id integer ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), first_name character varying(45) NOT NULL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), last_name character varying(45) NOT NULL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), alias_name character (50) NOT NULL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT 'ali' ); -INSERT INTO customer_ce VALUES(1,1,'Jared','Ely'); -INSERT INTO customer_ce VALUES(1,2,'Mary','Smith'); -INSERT INTO customer_ce VALUES(1,3,'Patricia','Johnson'); -INSERT INTO customer_ce VALUES(1,4,'Linda','Williams'); -INSERT INTO customer_ce VALUES(1,5,'Barbara','Jones'); -INSERT INTO customer_ce VALUES(1,6,'Elizabeth','Brown'); -INSERT INTO customer_ce VALUES(1,7,'Jennifer','Davis'); -INSERT INTO customer_ce VALUES(1,8,'Maria','Miller'); -INSERT INTO customer_ce VALUES(1,9,'Susan','Wilson'); -INSERT INTO customer_ce VALUES(1,10,'Margaret','Moore'); -INSERT INTO customer_ce VALUES(1,11,'Dorothy','Taylor'); -INSERT INTO customer_ce VALUES(1,12,'Lisa','Anderson'); -INSERT INTO customer_ce VALUES(1,13,'Nancy','Thomas'); -INSERT INTO customer_ce VALUES(1,14,'Karen','Jackson'); -INSERT INTO customer_ce VALUES(1,15,'Betty','White'); -INSERT INTO customer_ce VALUES(1,16,'Helen','Harris'); -INSERT INTO customer_ce VALUES(1,17,'Sandra','Martin'); -INSERT INTO customer_ce VALUES(1,18,'Adam','Rodriguez'); -INSERT INTO customer_ce VALUES(1,19,'Carol','Garcia'); -INSERT INTO customer_ce VALUES(1,20,'Jamie','Rice'); -INSERT INTO customer_ce VALUES(1,21,'Annette','Olson'); -INSERT INTO customer_ce VALUES(1,22,'Annie','Russell'); -select * from customer_ce where customer_ce_id = 1 AND id = 1; - customer_ce_id | id | first_name | last_name | alias_name -----------------+----+------------+-----------+------------ - 1 | 1 | Jared | Ely | ali +INSERT INTO customer VALUES(1,1,'Jared','Ely'); +INSERT INTO customer VALUES(1,2,'Mary','Smith'); +INSERT INTO customer VALUES(1,3,'Patricia','Johnson'); +INSERT INTO customer VALUES(1,4,'Linda','Williams'); +INSERT INTO customer VALUES(1,5,'Barbara','Jones'); +INSERT INTO customer VALUES(1,6,'Elizabeth','Brown'); +INSERT INTO customer VALUES(1,7,'Jennifer','Davis'); +INSERT INTO customer VALUES(1,8,'Maria','Miller'); +INSERT INTO customer VALUES(1,9,'Susan','Wilson'); +INSERT INTO customer VALUES(1,10,'Margaret','Moore'); +INSERT INTO customer VALUES(1,11,'Dorothy','Taylor'); +INSERT INTO customer VALUES(1,12,'Lisa','Anderson'); +INSERT INTO customer VALUES(1,13,'Nancy','Thomas'); +INSERT INTO customer VALUES(1,14,'Karen','Jackson'); +INSERT INTO customer VALUES(1,15,'Betty','White'); +INSERT INTO customer VALUES(1,16,'Helen','Harris'); +INSERT INTO customer VALUES(1,17,'Sandra','Martin'); +INSERT INTO customer VALUES(1,18,'Adam','Rodriguez'); +INSERT INTO customer VALUES(1,19,'Carol','Garcia'); +INSERT INTO customer VALUES(1,20,'Jamie','Rice'); +INSERT INTO customer VALUES(1,21,'Annette','Olson'); +INSERT INTO customer VALUES(1,22,'Annie','Russell'); +select * from customer where customer_id = 1 AND id = 1; + customer_id | id | first_name | last_name | alias_name +-------------+----+------------+-----------+------------ + 1 | 1 | Jared | Ely | ali (1 row) -select * from customer_ce where customer_ce_id = 2 OR first_name = 'Jamie'; - customer_ce_id | id | first_name | last_name | alias_name -----------------+----+------------+-----------+------------ - 1 | 20 | Jamie | Rice | ali +select * from customer where customer_id = 2 OR first_name = 'Jamie'; + customer_id | id | first_name | last_name | alias_name +-------------+----+------------+-----------+------------ + 1 | 20 | Jamie | Rice | ali (1 row) -select * from customer_ce where id = 1; - customer_ce_id | id | first_name | last_name | alias_name -----------------+----+------------+-----------+------------ - 1 | 1 | Jared | Ely | ali +select * from customer where id = 1; + customer_id | id | first_name | last_name | alias_name +-------------+----+------------+-----------+------------ + 1 | 1 | Jared | Ely | ali (1 row) -select * from customer_ce where id <> 1; - customer_ce_id | id | first_name | last_name | alias_name -----------------+----+------------+-----------+------------ - 1 | 2 | Mary | Smith | ali - 1 | 3 | Patricia | Johnson | ali - 1 | 4 | Linda | Williams | ali - 1 | 5 | Barbara | Jones | ali - 1 | 6 | Elizabeth | Brown | ali - 1 | 7 | Jennifer | Davis | ali - 1 | 8 | Maria | Miller | ali - 1 | 9 | Susan | Wilson | ali - 1 | 10 | Margaret | Moore | ali - 1 | 11 | Dorothy | Taylor | ali - 1 | 12 | Lisa | Anderson | ali - 1 | 13 | Nancy | Thomas | ali - 1 | 14 | Karen | Jackson | ali - 1 | 15 | Betty | White | ali - 1 | 16 | Helen | Harris | ali - 1 | 17 | Sandra | Martin | ali - 1 | 18 | Adam | Rodriguez | ali - 1 | 19 | Carol | Garcia | ali - 1 | 20 | Jamie | Rice | ali - 1 | 21 | Annette | Olson | ali - 1 | 22 | Annie | Russell | ali +select * from customer where id <> 1; + customer_id | id | first_name | last_name | alias_name +-------------+----+------------+-----------+------------ + 1 | 22 | Annie | Russell | ali + 1 | 21 | Annette | Olson | ali + 1 | 20 | Jamie | Rice | ali + 1 | 19 | Carol | Garcia | ali + 1 | 18 | Adam | Rodriguez | ali + 1 | 17 | Sandra | Martin | ali + 1 | 16 | Helen | Harris | ali + 1 | 15 | Betty | White | ali + 1 | 14 | Karen | Jackson | ali + 1 | 13 | Nancy | Thomas | ali + 1 | 12 | Lisa | Anderson | ali + 1 | 11 | Dorothy | Taylor | ali + 1 | 10 | Margaret | Moore | ali + 1 | 9 | Susan | Wilson | ali + 1 | 8 | Maria | Miller | ali + 1 | 7 | Jennifer | Davis | ali + 1 | 6 | Elizabeth | Brown | ali + 1 | 5 | Barbara | Jones | ali + 1 | 4 | Linda | Williams | ali + 1 | 3 | Patricia | Johnson | ali + 1 | 2 | Mary | Smith | ali (21 rows) -SELECT last_name, first_name FROM customer_ce WHERE first_name = 'Jamie'; +SELECT last_name, first_name FROM customer WHERE first_name = 'Jamie'; last_name | first_name -----------+------------ Rice | Jamie (1 row) -SELECT last_name, first_name FROM customer_ce WHERE first_name <> 'Jamie'; +SELECT last_name, first_name FROM customer WHERE first_name <> 'Jamie'; last_name | first_name -----------+------------ - Ely | Jared - Smith | Mary - Johnson | Patricia - Williams | Linda - Jones | Barbara - Brown | Elizabeth - Davis | Jennifer - Miller | Maria - Wilson | Susan - Moore | Margaret - Taylor | Dorothy - Anderson | Lisa - Thomas | Nancy - Jackson | Karen - White | Betty - Harris | Helen - Martin | Sandra - Rodriguez | Adam - Garcia | Carol - Olson | Annette Russell | Annie + Olson | Annette + Garcia | Carol + Rodriguez | Adam + Martin | Sandra + Harris | Helen + White | Betty + Jackson | Karen + Thomas | Nancy + Anderson | Lisa + Taylor | Dorothy + Moore | Margaret + Wilson | Susan + Miller | Maria + Davis | Jennifer + Brown | Elizabeth + Jones | Barbara + Williams | Linda + Johnson | Patricia + Smith | Mary + Ely | Jared (21 rows) -SELECT last_name, first_name FROM customer_ce WHERE first_name = 'Jamie' AND last_name = 'Rice'; +SELECT last_name, first_name FROM customer WHERE first_name = 'Jamie' AND last_name = 'Rice'; last_name | first_name -----------+------------ Rice | Jamie (1 row) -SELECT first_name, last_name FROM customer_ce WHERE last_name = 'Rodriguez' OR first_name = 'Adam'; +SELECT first_name, last_name FROM customer WHERE last_name = 'Rodriguez' OR first_name = 'Adam'; first_name | last_name ------------+----------- Adam | Rodriguez @@ -129,7 +129,7 @@ SELECT first_name, last_name FROM - customer_ce + customer WHERE first_name IN ('Ann','Anne','Annie'); first_name | last_name @@ -141,7 +141,7 @@ SELECT first_name, last_name FROM - customer_ce + customer WHERE first_name LIKE 'Ann%'; ERROR(CLIENT): operator is not allowed on datatype of this column @@ -149,7 +149,7 @@ SELECT first_name, LENGTH(first_name) name_length FROM - customer_ce + customer WHERE first_name LIKE 'A%' AND LENGTH(first_name) BETWEEN 3 AND 5 @@ -160,7 +160,7 @@ SELECT first_name, last_name FROM - customer_ce + customer WHERE first_name LIKE 'Bra%' AND last_name <> 'Motley'; @@ -169,45 +169,45 @@ SELECT first_name, last_name FROM - customer_ce + customer WHERE - customer_ce.first_name = 'Jamie'; + customer.first_name = 'Jamie'; first_name | last_name ------------+----------- Jamie | Rice (1 row) -SELECT * from customer_ce where id > 1; +SELECT * from customer where id > 1; ERROR(CLIENT): operator is not allowed on datatype of this column -SELECT * from customer_ce where id < 1; +SELECT * from customer where id < 1; ERROR(CLIENT): operator is not allowed on datatype of this column -SELECT * from customer_ce where id != 1; - customer_ce_id | id | first_name | last_name | alias_name -----------------+----+------------+-----------+------------ - 1 | 2 | Mary | Smith | ali - 1 | 3 | Patricia | Johnson | ali - 1 | 4 | Linda | Williams | ali - 1 | 5 | Barbara | Jones | ali - 1 | 6 | Elizabeth | Brown | ali - 1 | 7 | Jennifer | Davis | ali - 1 | 8 | Maria | Miller | ali - 1 | 9 | Susan | Wilson | ali - 1 | 10 | Margaret | Moore | ali - 1 | 11 | Dorothy | Taylor | ali - 1 | 12 | Lisa | Anderson | ali - 1 | 13 | Nancy | Thomas | ali - 1 | 14 | Karen | Jackson | ali - 1 | 15 | Betty | White | ali - 1 | 16 | Helen | Harris | ali - 1 | 17 | Sandra | Martin | ali - 1 | 18 | Adam | Rodriguez | ali - 1 | 19 | Carol | Garcia | ali - 1 | 20 | Jamie | Rice | ali - 1 | 21 | Annette | Olson | ali - 1 | 22 | Annie | Russell | ali +SELECT * from customer where id != 1; + customer_id | id | first_name | last_name | alias_name +-------------+----+------------+-----------+------------ + 1 | 22 | Annie | Russell | ali + 1 | 21 | Annette | Olson | ali + 1 | 20 | Jamie | Rice | ali + 1 | 19 | Carol | Garcia | ali + 1 | 18 | Adam | Rodriguez | ali + 1 | 17 | Sandra | Martin | ali + 1 | 16 | Helen | Harris | ali + 1 | 15 | Betty | White | ali + 1 | 14 | Karen | Jackson | ali + 1 | 13 | Nancy | Thomas | ali + 1 | 12 | Lisa | Anderson | ali + 1 | 11 | Dorothy | Taylor | ali + 1 | 10 | Margaret | Moore | ali + 1 | 9 | Susan | Wilson | ali + 1 | 8 | Maria | Miller | ali + 1 | 7 | Jennifer | Davis | ali + 1 | 6 | Elizabeth | Brown | ali + 1 | 5 | Barbara | Jones | ali + 1 | 4 | Linda | Williams | ali + 1 | 3 | Patricia | Johnson | ali + 1 | 2 | Mary | Smith | ali (21 rows) -DROP TABLE customer_ce; +DROP TABLE customer; DROP COLUMN ENCRYPTION KEY MyCEK; DROP CLIENT MASTER KEY MyCMK; \! gs_ktool -d all diff --git a/src/test/regress/expected/ce_set.out b/src/test/regress/expected/ce_set.out index 7f9d4a30f..ed55e52df 100644 --- a/src/test/regress/expected/ce_set.out +++ b/src/test/regress/expected/ce_set.out @@ -36,7 +36,7 @@ SELECT * FROM t2; drop table t3; drop table t2; DROP CLIENT MASTER KEY IF EXISTS MyCMK CASCADE; -NOTICE: global setting "mycmk" does not exist +NOTICE: client master key "mycmk" does not exist CREATE CLIENT MASTER KEY MyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY MyCEK WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); CREATE COLUMN ENCRYPTION KEY MyCEK2 WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); @@ -93,7 +93,7 @@ update t3 set a=(select d from t2 where c=3) where b=3; --unsupport update t3 set b=(select d from t2 where c=3) where a=4; ERROR: column "b" is of type integer but expression is of type byteawithoutorderwithequalcol -LINE 1: update t3 set b=(select d from t2 where c=3) where a='\x0195... +--?.* ^ HINT: You will need to rewrite or cast the expression. CONTEXT: referenced column: b diff --git a/src/test/regress/expected/ce_table_type.out b/src/test/regress/expected/ce_table_type.out index d73669760..5231d09f6 100644 --- a/src/test/regress/expected/ce_table_type.out +++ b/src/test/regress/expected/ce_table_type.out @@ -5,7 +5,7 @@ DELETE ALL GENERATE 1 DROP CLIENT MASTER KEY IF EXISTS MyCMK CASCADE; -NOTICE: global setting "mycmk" does not exist +NOTICE: client master key "mycmk" does not exist CREATE CLIENT MASTER KEY MyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY MyCEK WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); --行存 @@ -208,7 +208,7 @@ DELETE ALL GENERATE 1 DROP CLIENT MASTER KEY IF EXISTS distributeby_cmk CASCADE; -NOTICE: global setting "distributeby_cmk" does not exist +NOTICE: client master key "distributeby_cmk" does not exist CREATE CLIENT MASTER KEY distributeby_cmk WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY distributeby_cek WITH VALUES (CLIENT_MASTER_KEY = distributeby_cmk, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); CREATE TABLE t_distributeby1( @@ -226,17 +226,38 @@ DETAIL: The distributed capability is not supported currently. CREATE TABLE t_distributeby3( id_number int, name text, - data text) distribute by list(name)(slice s1 values (('China'),('Germary')),slice s2 values (('Japan')), slice s3 values (('USA')), slice s4 values (default)); + data text) distribute by list(name)(slice s1 values (('China')),slice s2 values (('Japan')), slice s3 values (('USA')), slice s4 values ('Germary'), + slice s5 values ('Israel'), slice s6 values ('India'), slice s7 values ('Peru'), slice s8 values ('Thailand'), + slice s9 values ('South Africa'), slice s10 values ('New Zealand'), slice s11 values ('Nepal'), slice s12 values (default)); ERROR: Un-support feature DETAIL: The distributed capability is not supported currently. +CREATE TABLE t_distributeby4( + id_number int, + name text, + data text encrypted with(column_encryption_key = distributeby_cek,encryption_type = DETERMINISTIC)) +distribute by list(name)(slice s1 values (('China')),slice s2 values (('Japan')), slice s3 values (('USA')), slice s4 values ('Germary'), + slice s5 values ('Israel'), slice s6 values ('India'), slice s7 values ('Peru'), slice s8 values ('Thailand'), + slice s9 values ('South Africa'), slice s10 values ('New Zealand'), slice s11 values ('Nepal'), slice s12 values (default)); +ERROR: Un-support feature +DETAIL: The distributed capability is not supported currently. +create table ce_t1 (id BYTEAWITHOUTORDERWITHEQUALCOL); +ERROR: It's not supported to create byteawithoutorderwithequalcol column +create table ce_t2 (id BYTEAWITHOUTORDERCOL); +ERROR: It's not supported to create byteawithoutordercol column +DROP table IF EXISTS ce_t1; +NOTICE: table "ce_t1" does not exist, skipping +DROP table IF EXISTS ce_t2; +NOTICE: table "ce_t2" does not exist, skipping DROP table IF EXISTS t_distributeby1; NOTICE: table "t_distributeby1" does not exist, skipping DROP table IF EXISTS t_distributeby2; NOTICE: table "t_distributeby2" does not exist, skipping DROP table IF EXISTS t_distributeby3; NOTICE: table "t_distributeby3" does not exist, skipping +DROP table IF EXISTS t_distributeby4; +NOTICE: table "t_distributeby4" does not exist, skipping DROP COLUMN ENCRYPTION KEY IF EXISTS distributeby_cek; DROP CLIENT MASTER KEY IF EXISTS distributeby_cmk; \! gs_ktool -d all DELETE ALL - 1 \ No newline at end of file + 1 diff --git a/src/test/regress/expected/ce_textual_prepare_crud_all_types_test.out b/src/test/regress/expected/ce_textual_prepare_crud_all_types_test.out index a7b00df8a..a5c3cd523 100644 --- a/src/test/regress/expected/ce_textual_prepare_crud_all_types_test.out +++ b/src/test/regress/expected/ce_textual_prepare_crud_all_types_test.out @@ -5,7 +5,7 @@ DELETE ALL GENERATE 1 DROP CLIENT MASTER KEY IF EXISTS MyCMK CASCADE; -NOTICE: global setting "mycmk" does not exist +NOTICE: client master key "mycmk" does not exist CREATE CLIENT MASTER KEY MyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY MyCEK770 WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); CREATE TABLE IF NOT EXISTS tr1( @@ -92,6 +92,7 @@ execute select_all_tr2; execute insert_tr1(-1,-1, 0,-1,-1,-1,-3.14,-3.14,' ','AVI' ,'Avi''s' , 'Kessel' , '\x5678', 561032063.5560,561032063.5560,561032063.5560); ERROR: numeric field overflow DETAIL: A field with precision 5, scale 0 must round to an absolute value less than 10^5. +CONTEXT: referenced column: n2 execute insert_tr2(-1,-1, 0,-1,-1,-1,-3.14,-3.14,' ','AVI' ,'Avi''s' , 'Kessel' , '\x5678', 561032063.5560,561032063.5560,561032063.5560); ERROR(CLIENT): numeric field overflow, A field with precision 5 , scale 0 must round to an absolute value less than 10^5 @@ -110,6 +111,7 @@ execute select_all_tr2; execute insert_tr1( 0, 0, 0, 0, 0, 0, 0.14, 0.14,'z','ELI' ,'Eli''s' , 'Shemer' , '\x09', 1563.0, 1563.0, 1563.0); ERROR: numeric field overflow DETAIL: A field with precision 5, scale 2 must round to an absolute value less than 10^3. +CONTEXT: referenced column: n3 execute insert_tr2( 0, 0, 0, 0, 0, 0, 0.14, 0.14,'z','ELI' ,'Eli''s' , 'Shemer' , '\x09', 1563.0, 1563.0, 1563.0); ERROR(CLIENT): numeric field overflow, A field with precision 5 , scale 2 must round to an absolute value less than 10^3 diff --git a/src/test/regress/expected/ce_transactions_test.out b/src/test/regress/expected/ce_transactions_test.out index 8ebb0731d..973bb9aeb 100644 --- a/src/test/regress/expected/ce_transactions_test.out +++ b/src/test/regress/expected/ce_transactions_test.out @@ -5,7 +5,7 @@ DELETE ALL GENERATE 1 DROP CLIENT MASTER KEY IF EXISTS MyCMK CASCADE; -NOTICE: global setting "mycmk" does not exist +NOTICE: client master key "mycmk" does not exist CREATE CLIENT MASTER KEY MyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY MyCEK770 WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); CREATE TABLE IF NOT EXISTS tr2(i1 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK770, ENCRYPTION_TYPE = DETERMINISTIC) , i2 INT); diff --git a/src/test/regress/expected/ce_trigger.out b/src/test/regress/expected/ce_trigger.out index 9b27c7d5b..c246bcab4 100644 --- a/src/test/regress/expected/ce_trigger.out +++ b/src/test/regress/expected/ce_trigger.out @@ -5,7 +5,7 @@ DELETE ALL GENERATE 1 DROP CLIENT MASTER KEY IF EXISTS triggerCMK CASCADE; -NOTICE: global setting "triggercmk" does not exist +NOTICE: client master key "triggercmk" does not exist CREATE CLIENT MASTER KEY triggerCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY triggerCEK1 WITH VALUES (CLIENT_MASTER_KEY = triggerCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); CREATE COLUMN ENCRYPTION KEY triggerCEK2 WITH VALUES (CLIENT_MASTER_KEY = triggerCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); @@ -150,15 +150,15 @@ INSERT INTO test_trigger_src_tbl VALUES(100,400,500); SELECT * FROM test_trigger_src_tbl; id1 | id2 | id3 -----+-----+----- - 100 | 200 | 300 100 | 400 | 500 + 100 | 200 | 300 (2 rows) SELECT * FROM test_trigger_des_tbl; id1 | id2 | id3 -----+-----+----- - 100 | 200 | 300 100 | 400 | 500 + 100 | 200 | 300 (2 rows) --执行UPDATE触发事件并检查触发结果 @@ -166,15 +166,15 @@ UPDATE test_trigger_src_tbl SET id3=400 WHERE id2=200; SELECT * FROM test_trigger_src_tbl; id1 | id2 | id3 -----+-----+----- - 100 | 400 | 500 100 | 200 | 400 + 100 | 400 | 500 (2 rows) SELECT * FROM test_trigger_des_tbl; id1 | id2 | id3 -----+-----+----- - 100 | 400 | 500 100 | 200 | 400 + 100 | 400 | 500 (2 rows) --执行DELETE触发事件并检查触发结果 @@ -183,15 +183,15 @@ ERROR(CLIENT): operator is not allowed on datatype of this column SELECT * FROM test_trigger_src_tbl; id1 | id2 | id3 -----+-----+----- - 100 | 400 | 500 100 | 200 | 400 + 100 | 400 | 500 (2 rows) SELECT * FROM test_trigger_des_tbl; id1 | id2 | id3 -----+-----+----- - 100 | 400 | 500 100 | 200 | 400 + 100 | 400 | 500 (2 rows) DROP TRIGGER insert_trigger ON test_trigger_src_tbl; @@ -199,6 +199,9 @@ DROP TRIGGER update_trigger ON test_trigger_src_tbl; DROP TRIGGER delete_trigger ON test_trigger_src_tbl; DROP TABLE IF EXISTS test_trigger_src_tbl; DROP TABLE IF EXISTS test_trigger_des_tbl; +DROP FUNCTION tri_delete_func; +DROP FUNCTION tri_insert_func; +DROP FUNCTION tri_update_func; DROP COLUMN ENCRYPTION KEY triggerCEK1; DROP COLUMN ENCRYPTION KEY triggerCEK2; DROP CLIENT MASTER KEY triggerCMK; diff --git a/src/test/regress/expected/ce_type_binarys.out b/src/test/regress/expected/ce_type_binarys.out index b45fa87de..d8800e7c9 100644 --- a/src/test/regress/expected/ce_type_binarys.out +++ b/src/test/regress/expected/ce_type_binarys.out @@ -5,7 +5,7 @@ DELETE ALL GENERATE 1 DROP CLIENT MASTER KEY IF EXISTS binaryCMK CASCADE; -NOTICE: global setting "binarycmk" does not exist +NOTICE: client master key "binarycmk" does not exist CREATE CLIENT MASTER KEY binaryCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY binaryCEK WITH VALUES (CLIENT_MASTER_KEY = binaryCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); create table IF NOT EXISTS binary_type( diff --git a/src/test/regress/expected/ce_type_boolean.out b/src/test/regress/expected/ce_type_boolean.out index 3aa02d5fc..020737dd4 100644 --- a/src/test/regress/expected/ce_type_boolean.out +++ b/src/test/regress/expected/ce_type_boolean.out @@ -78,7 +78,7 @@ SELECT * from unencrypted_boolean order by c1; UPDATE unencrypted_boolean SET c2 = 'n' where c2 = 'y'; DROP CLIENT MASTER KEY IF EXISTS boolCMK CASCADE; -NOTICE: global setting "boolcmk" does not exist +NOTICE: client master key "boolcmk" does not exist CREATE CLIENT MASTER KEY boolCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY boolCEK WITH VALUES (CLIENT_MASTER_KEY = boolCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); create table IF NOT EXISTS boolean_type(c1 int, diff --git a/src/test/regress/expected/ce_type_char.out b/src/test/regress/expected/ce_type_char.out index 7761289cd..4a96a85ff 100644 --- a/src/test/regress/expected/ce_type_char.out +++ b/src/test/regress/expected/ce_type_char.out @@ -5,7 +5,7 @@ DELETE ALL GENERATE 1 DROP CLIENT MASTER KEY IF EXISTS charCMK CASCADE; -NOTICE: global setting "charcmk" does not exist +NOTICE: client master key "charcmk" does not exist CREATE CLIENT MASTER KEY charCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY charCEK WITH VALUES (CLIENT_MASTER_KEY = charCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); create table IF NOT EXISTS char_type( @@ -99,15 +99,15 @@ SELECT * from char_type order by c1; (2 rows) -- test empty data -insert into char_type values(12, '', '','','', '','', '', '', '',''); -insert into char_type values(13, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); +insert into char_type values(8, '', '','','', '','', '', '', '',''); +insert into char_type values(9, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); SELECT * from char_type order by c1; c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c9 | c10 | c11 ----+----+----+----+-----------------------+----+------------------+------+-------+-----+------------------------------------ 7 | cc | c | dd | ccccbbbbaaaaaaaaaaaaa | aa | aaaaaaaaaaaaaaaa | aaaa | aaaaa | a | aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + 8 | | | | | | | | | | 8 | cc | c | dd | ccc | aa | a | a | a | a | aaaaa - 12 | | | | | | | | | | - 13 | | | | | | | | | | + 9 | | | | | | | | | | (4 rows) create table IF NOT EXISTS char_type_enc1( diff --git a/src/test/regress/expected/ce_type_float.out b/src/test/regress/expected/ce_type_float.out index 281868034..fa8445b6a 100644 --- a/src/test/regress/expected/ce_type_float.out +++ b/src/test/regress/expected/ce_type_float.out @@ -5,11 +5,11 @@ DELETE ALL GENERATE 1 DROP CLIENT MASTER KEY IF EXISTS floatCMK CASCADE; -NOTICE: global setting "floatcmk" does not exist +NOTICE: client master key "floatcmk" does not exist CREATE CLIENT MASTER KEY floatCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY floatCEK WITH VALUES (CLIENT_MASTER_KEY = floatCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); CREATE TABLE IF NOT EXISTS float_type_t1(id INT, fl_col1 float4 ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC), -fl_col2 float8 ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; +fl_col2 float8 ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); INSERT INTO float_type_t1 (id, fl_col1, fl_col2) VALUES (1, 5.555555, 5.555555567876534); INSERT INTO float_type_t1 (id, fl_col1, fl_col2) VALUES (2, -5.5555556, -5.5555555678765342); INSERT INTO float_type_t1 (id, fl_col1, fl_col2) VALUES (3, -3.40E+38, -1.79E+308); @@ -79,9 +79,9 @@ SELECT * from float_type_t1 order by id; (5 rows) DROP TABLE float_type_t1; -CREATE TABLE IF NOT EXISTS t_float_1(id INT, num float(1) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; -CREATE TABLE IF NOT EXISTS t_float_53(id INT, num float(53) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; -CREATE TABLE IF NOT EXISTS t_float_3(id INT, num float(3) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; +CREATE TABLE IF NOT EXISTS t_float_1(id INT, num float(1) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); +CREATE TABLE IF NOT EXISTS t_float_53(id INT, num float(53) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); +CREATE TABLE IF NOT EXISTS t_float_3(id INT, num float(3) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); CREATE TABLE IF NOT EXISTS t_float_4(id INT, num1 NUMERIC(10,3) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC), num2 decimal(10,3) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC), @@ -89,8 +89,8 @@ num3 NUMBER(10,3) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_T num4 INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC) ); --core dump ---CREATE TABLE IF NOT EXISTS t_float_54(id INT, num float(54) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; ---CREATE TABLE IF NOT EXISTS t_float_0(id INT, num float(0) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; +--CREATE TABLE IF NOT EXISTS t_float_54(id INT, num float(54) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); +--CREATE TABLE IF NOT EXISTS t_float_0(id INT, num float(0) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); INSERT INTO t_float_3 (id, num) VALUES (1, 123.333); INSERT INTO t_float_3 (id, num) VALUES (2, 123.4445); INSERT INTO t_float_3 (id, num) VALUES (3, 1.32); @@ -117,13 +117,13 @@ DROP TABLE IF EXISTS t_float_3; DROP TABLE IF EXISTS t_float_4; DROP TABLE IF EXISTS t_float_1; DROP TABLE IF EXISTS t_float_53; -CREATE TABLE IF NOT EXISTS float_type_t2_test1(id INT, num DECIMAL(5,5) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; -CREATE TABLE IF NOT EXISTS float_type_t2_test2(id INT, num DECIMAL(5,0) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; -CREATE TABLE IF NOT EXISTS float_type_t2_test3(id INT, num DECIMAL(5,6) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; +CREATE TABLE IF NOT EXISTS float_type_t2_test1(id INT, num DECIMAL(5,5) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); +CREATE TABLE IF NOT EXISTS float_type_t2_test2(id INT, num DECIMAL(5,0) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); +CREATE TABLE IF NOT EXISTS float_type_t2_test3(id INT, num DECIMAL(5,6) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); ERROR: NUMERIC scale 6 must be between 0 and precision 5 LINE 1: ...BLE IF NOT EXISTS float_type_t2_test3(id INT, num DECIMAL(5,... ^ -CREATE TABLE IF NOT EXISTS float_type_t2_test4(id INT, num DECIMAL(5,-1) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; +CREATE TABLE IF NOT EXISTS float_type_t2_test4(id INT, num DECIMAL(5,-1) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); ERROR: NUMERIC scale -1 must be between 0 and precision 5 LINE 1: ...BLE IF NOT EXISTS float_type_t2_test4(id INT, num DECIMAL(5,... ^ @@ -134,7 +134,7 @@ d3 DECIMAL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = D n1 NUMERIC ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC), n2 NUMERIC (5) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC), n3 NUMERIC (5,2) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC) -) ; +); INSERT INTO float_type_t2 VALUES (1, 0, 0, 0, 0, 0, 0); INSERT INTO float_type_t2 VALUES (2, 99.999, 99999.34534, 45.45, 3455645.6495869576948, 9999.87584272435245, 999.99); INSERT INTO float_type_t2 VALUES (3, -99.999, -99999.34534, -45.45, -3455645.6495869576948, -9999.87584272435245, -999.99); diff --git a/src/test/regress/expected/ce_type_int.out b/src/test/regress/expected/ce_type_int.out index 977d6cfe2..1889f9897 100644 --- a/src/test/regress/expected/ce_type_int.out +++ b/src/test/regress/expected/ce_type_int.out @@ -12,7 +12,7 @@ int_col2 smallint ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = intCEK, ENCRYPTION_TYP int_col3 INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = intCEK, ENCRYPTION_TYPE = DETERMINISTIC), int_col4 BINARY_INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = intCEK, ENCRYPTION_TYPE = DETERMINISTIC), int_col5 BIGINT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = intCEK, ENCRYPTION_TYPE = DETERMINISTIC) -) ; +); ALTER TABLE int_type ADD COLUMN int_col6 int ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = intCEK, ENCRYPTION_TYPE = DETERMINISTIC); \d int_type Table "public.int_type" @@ -191,7 +191,7 @@ NOTICE: table "serial_type_enc3" does not exist, skipping DROP COLUMN ENCRYPTION KEY intCEK; DROP CLIENT MASTER KEY intCMK; DROP CLIENT MASTER KEY IF EXISTS intCMK CASCADE; -NOTICE: global setting "intcmk" does not exist +NOTICE: client master key "intcmk" does not exist \! gs_ktool -d all DELETE ALL 1 diff --git a/src/test/regress/expected/ce_type_money.out b/src/test/regress/expected/ce_type_money.out index db6db1750..7504b2e27 100644 --- a/src/test/regress/expected/ce_type_money.out +++ b/src/test/regress/expected/ce_type_money.out @@ -5,9 +5,29 @@ DELETE ALL GENERATE 1 DROP CLIENT MASTER KEY IF EXISTS moneyCMK CASCADE; -NOTICE: global setting "moneycmk" does not exist +NOTICE: client master key "moneycmk" does not exist CREATE CLIENT MASTER KEY moneyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY moneyCEK WITH VALUES (CLIENT_MASTER_KEY = moneyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +-- create table IF NOT EXISTS money_type(c1 int, +-- c2 money +-- ); +-- -- -92233720368547758.08 to +92233720368547758.07 +-- insert into money_type values(1, 92233720368547758.07); +-- insert into money_type values(2, -92233720368547758.08); +-- insert into money_type values(3, 0); +-- insert into money_type values(4, 12.3456); +-- insert into money_type values(5, -12.3456); +-- insert into money_type values(6, 92233720368547758.08); +-- insert into money_type values(7, -92233720368547758.09); +-- --when insert the encrypted money type,it will lost its form of momey and scope +-- select * from money_type ORDER BY c1; +-- select * from money_type where c2 = '$12.3456'; +-- DELETE FROM money_type where c2 = '$12.3456'; +-- select * from money_type ORDER BY c1; +-- DELETE FROM money_type as alias_test where alias_test.c2 = '$-12.3456'; +-- select * from money_type ORDER BY c1; +-- UPDATE money_type SET c2 = 23.2 where c2 = '$0'; +-- select * from money_type ORDER BY c1; create table IF NOT EXISTS money_type_enc(c1 int, c2 money ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = moneyCEK, ENCRYPTION_TYPE = DETERMINISTIC) ); diff --git a/src/test/regress/expected/ce_verify_column_alter.out b/src/test/regress/expected/ce_verify_column_alter.out index e4377879a..070f60dc0 100644 --- a/src/test/regress/expected/ce_verify_column_alter.out +++ b/src/test/regress/expected/ce_verify_column_alter.out @@ -5,7 +5,7 @@ DELETE ALL GENERATE 1 DROP CLIENT MASTER KEY IF EXISTS MyCMK CASCADE; -NOTICE: global setting "mycmk" does not exist +NOTICE: client master key "mycmk" does not exist CREATE CLIENT MASTER KEY MyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY MyCEK WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); CREATE TABLE IF NOT EXISTS t_varchar(id INT); @@ -30,13 +30,19 @@ SELECT c.relname, g.column_name from gs_encrypted_columns g join pg_class c on ( t_varchar | newname (1 row) +SELECT * FROM t_varchar where newname = 'MyName'; + id | newname +----+--------- + 1 | MyName +(1 row) + --verify tablename alter INSERT INTO t_varchar VALUES (2, 'MyNumber'); SELECT * from t_varchar; id | newname ----+---------- - 1 | MyName 2 | MyNumber + 1 | MyName (2 rows) SELECT relname from pg_class join gs_encrypted_columns on pg_class.oid = gs_encrypted_columns.rel_id; @@ -49,10 +55,16 @@ ALTER table t_varchar RENAME TO newtable; SELECT * FROM newtable; id | newname ----+---------- - 1 | MyName 2 | MyNumber + 1 | MyName (2 rows) +SELECT * FROM newtable where newname = 'MyName'; + id | newname +----+--------- + 1 | MyName +(1 row) + SELECT relname from pg_class join gs_encrypted_columns on pg_class.oid = gs_encrypted_columns.rel_id; relname ---------- diff --git a/src/test/regress/expected/ce_verify_schema_alter.out b/src/test/regress/expected/ce_verify_schema_alter.out index 2761a278e..c598b7ba5 100644 --- a/src/test/regress/expected/ce_verify_schema_alter.out +++ b/src/test/regress/expected/ce_verify_schema_alter.out @@ -5,7 +5,7 @@ DELETE ALL GENERATE 1 DROP CLIENT MASTER KEY IF EXISTS MyCMK CASCADE; -NOTICE: global setting "mycmk" does not exist +NOTICE: client master key "mycmk" does not exist CREATE SCHEMA test; CREATE CLIENT MASTER KEY MyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY MyCEK WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); @@ -36,6 +36,12 @@ SELECT * FROM test1.t_varchar; 1 | MyName (1 row) +SELECT * FROM test1.t_varchar WHERE name = 'MyName'; + id | name +----+-------- + 1 | MyName +(1 row) + DROP TABLE test1.t_varchar; DROP SCHEMA test1; DROP COLUMN ENCRYPTION KEY MyCEK; diff --git a/src/test/regress/expected/ce_view.out b/src/test/regress/expected/ce_view.out index db7d613f1..c32705ae8 100644 --- a/src/test/regress/expected/ce_view.out +++ b/src/test/regress/expected/ce_view.out @@ -5,13 +5,13 @@ DELETE ALL GENERATE 1 DROP CLIENT MASTER KEY IF EXISTS MyCMK CASCADE; -NOTICE: global setting "mycmk" does not exist +NOTICE: client master key "mycmk" does not exist CREATE CLIENT MASTER KEY MyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY MyCEK WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); -DROP TABLE IF EXISTS public.client_customer CASCADE; -NOTICE: table "client_customer" does not exist, skipping -CREATE TABLE public.client_customer ( - client_customer_id integer, +DROP TABLE IF EXISTS public.customer CASCADE; +NOTICE: table "customer" does not exist, skipping +CREATE TABLE public.customer ( + customer_id integer, store_id integer NOT NULL, first_name character varying(45) NOT NULL, last_name character varying(45) NOT NULL, @@ -49,26 +49,605 @@ CREATE TABLE public.country ( country character varying(50) NOT NULL, last_update timestamp without time zone DEFAULT now() NOT NULL ); -INSERT INTO client_customer VALUES (1, 1, 'Mary', 'Smith', 'mary.smith@sakilaclient_customer.org', 5, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (2, 1, 'Patricia', 'Johnson', 'patricia.johnson@sakilaclient_customer.org', 6, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (3, 1, 'Linda', 'Williams', 'linda.williams@sakilaclient_customer.org', 7, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (4, 2, 'Barbara', 'Jones', 'barbara.jones@sakilaclient_customer.org', 8, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (5, 1, 'Elizabeth', 'Brown', 'elizabeth.brown@sakilaclient_customer.org', 9, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (6, 2, 'Jennifer', 'Davis', 'jennifer.davis@sakilaclient_customer.org', 10, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (7, 1, 'Maria', 'Miller', 'maria.miller@sakilaclient_customer.org', 11, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (8, 2, 'Susan', 'Wilson', 'susan.wilson@sakilaclient_customer.org', 12, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (9, 2, 'Margaret', 'Moore', 'margaret.moore@sakilaclient_customer.org', 13, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (10, 1, 'Dorothy', 'Taylor', 'dorothy.taylor@sakilaclient_customer.org', 14, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (11, 2, 'Lisa', 'Anderson', 'lisa.anderson@sakilaclient_customer.org', 15, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (12, 1, 'Nancy', 'Thomas', 'nancy.thomas@sakilaclient_customer.org', 16, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (13, 2, 'Karen', 'Jackson', 'karen.jackson@sakilaclient_customer.org', 17, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (14, 2, 'Betty', 'White', 'betty.white@sakilaclient_customer.org', 18, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (15, 1, 'Helen', 'Harris', 'helen.harris@sakilaclient_customer.org', 19, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (16, 2, 'Sandra', 'Martin', 'sandra.martin@sakilaclient_customer.org', 20, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); -INSERT INTO client_customer VALUES (17, 1, 'Donna', 'Thompson', 'donna.thompson@sakilaclient_customer.org', 21, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (18, 2, 'Carol', 'Garcia', 'carol.garcia@sakilaclient_customer.org', 22, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (19, 1, 'Gina', 'Williamson', 'gina.williamson@sakilaclient_customer.org', 217, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (20, 1, 'Derrick', 'Bourque', 'derrick.bourque@sakilaclient_customer.org', 481, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (524, 1, 'Jared', 'Ely', 'jared.ely@sakilacustomer.org', 530, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (1, 1, 'Mary', 'Smith', 'mary.smith@sakilacustomer.org', 5, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (2, 1, 'Patricia', 'Johnson', 'patricia.johnson@sakilacustomer.org', 6, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (3, 1, 'Linda', 'Williams', 'linda.williams@sakilacustomer.org', 7, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (4, 2, 'Barbara', 'Jones', 'barbara.jones@sakilacustomer.org', 8, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (5, 1, 'Elizabeth', 'Brown', 'elizabeth.brown@sakilacustomer.org', 9, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (6, 2, 'Jennifer', 'Davis', 'jennifer.davis@sakilacustomer.org', 10, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (7, 1, 'Maria', 'Miller', 'maria.miller@sakilacustomer.org', 11, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (8, 2, 'Susan', 'Wilson', 'susan.wilson@sakilacustomer.org', 12, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (9, 2, 'Margaret', 'Moore', 'margaret.moore@sakilacustomer.org', 13, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (10, 1, 'Dorothy', 'Taylor', 'dorothy.taylor@sakilacustomer.org', 14, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (11, 2, 'Lisa', 'Anderson', 'lisa.anderson@sakilacustomer.org', 15, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (12, 1, 'Nancy', 'Thomas', 'nancy.thomas@sakilacustomer.org', 16, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (13, 2, 'Karen', 'Jackson', 'karen.jackson@sakilacustomer.org', 17, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (14, 2, 'Betty', 'White', 'betty.white@sakilacustomer.org', 18, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (15, 1, 'Helen', 'Harris', 'helen.harris@sakilacustomer.org', 19, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (16, 2, 'Sandra', 'Martin', 'sandra.martin@sakilacustomer.org', 20, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (17, 1, 'Donna', 'Thompson', 'donna.thompson@sakilacustomer.org', 21, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (18, 2, 'Carol', 'Garcia', 'carol.garcia@sakilacustomer.org', 22, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (19, 1, 'Ruth', 'Martinez', 'ruth.martinez@sakilacustomer.org', 23, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (20, 2, 'Sharon', 'Robinson', 'sharon.robinson@sakilacustomer.org', 24, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (21, 1, 'Michelle', 'Clark', 'michelle.clark@sakilacustomer.org', 25, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (22, 1, 'Laura', 'Rodriguez', 'laura.rodriguez@sakilacustomer.org', 26, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (23, 2, 'Sarah', 'Lewis', 'sarah.lewis@sakilacustomer.org', 27, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (24, 2, 'Kimberly', 'Lee', 'kimberly.lee@sakilacustomer.org', 28, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (25, 1, 'Deborah', 'Walker', 'deborah.walker@sakilacustomer.org', 29, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (26, 2, 'Jessica', 'Hall', 'jessica.hall@sakilacustomer.org', 30, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (27, 2, 'Shirley', 'Allen', 'shirley.allen@sakilacustomer.org', 31, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (28, 1, 'Cynthia', 'Young', 'cynthia.young@sakilacustomer.org', 32, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (29, 2, 'Angela', 'Hernandez', 'angela.hernandez@sakilacustomer.org', 33, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (30, 1, 'Melissa', 'King', 'melissa.king@sakilacustomer.org', 34, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (31, 2, 'Brenda', 'Wright', 'brenda.wright@sakilacustomer.org', 35, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (32, 1, 'Amy', 'Lopez', 'amy.lopez@sakilacustomer.org', 36, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (33, 2, 'Anna', 'Hill', 'anna.hill@sakilacustomer.org', 37, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (34, 2, 'Rebecca', 'Scott', 'rebecca.scott@sakilacustomer.org', 38, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (35, 2, 'Virginia', 'Green', 'virginia.green@sakilacustomer.org', 39, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (36, 2, 'Kathleen', 'Adams', 'kathleen.adams@sakilacustomer.org', 40, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (37, 1, 'Pamela', 'Baker', 'pamela.baker@sakilacustomer.org', 41, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (38, 1, 'Martha', 'Gonzalez', 'martha.gonzalez@sakilacustomer.org', 42, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (39, 1, 'Debra', 'Nelson', 'debra.nelson@sakilacustomer.org', 43, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (40, 2, 'Amanda', 'Carter', 'amanda.carter@sakilacustomer.org', 44, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (41, 1, 'Stephanie', 'Mitchell', 'stephanie.mitchell@sakilacustomer.org', 45, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (42, 2, 'Carolyn', 'Perez', 'carolyn.perez@sakilacustomer.org', 46, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (43, 2, 'Christine', 'Roberts', 'christine.roberts@sakilacustomer.org', 47, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (44, 1, 'Marie', 'Turner', 'marie.turner@sakilacustomer.org', 48, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (45, 1, 'Janet', 'Phillips', 'janet.phillips@sakilacustomer.org', 49, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (46, 2, 'Catherine', 'Campbell', 'catherine.campbell@sakilacustomer.org', 50, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (47, 1, 'Frances', 'Parker', 'frances.parker@sakilacustomer.org', 51, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (48, 1, 'Ann', 'Evans', 'ann.evans@sakilacustomer.org', 52, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (49, 2, 'Joyce', 'Edwards', 'joyce.edwards@sakilacustomer.org', 53, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (50, 1, 'Diane', 'Collins', 'diane.collins@sakilacustomer.org', 54, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (51, 1, 'Alice', 'Stewart', 'alice.stewart@sakilacustomer.org', 55, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (52, 1, 'Julie', 'Sanchez', 'julie.sanchez@sakilacustomer.org', 56, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (53, 1, 'Heather', 'Morris', 'heather.morris@sakilacustomer.org', 57, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (54, 1, 'Teresa', 'Rogers', 'teresa.rogers@sakilacustomer.org', 58, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (55, 2, 'Doris', 'Reed', 'doris.reed@sakilacustomer.org', 59, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (56, 1, 'Gloria', 'Cook', 'gloria.cook@sakilacustomer.org', 60, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (57, 2, 'Evelyn', 'Morgan', 'evelyn.morgan@sakilacustomer.org', 61, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (58, 1, 'Jean', 'Bell', 'jean.bell@sakilacustomer.org', 62, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (59, 1, 'Cheryl', 'Murphy', 'cheryl.murphy@sakilacustomer.org', 63, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (60, 1, 'Mildred', 'Bailey', 'mildred.bailey@sakilacustomer.org', 64, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (61, 2, 'Katherine', 'Rivera', 'katherine.rivera@sakilacustomer.org', 65, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (62, 1, 'Joan', 'Cooper', 'joan.cooper@sakilacustomer.org', 66, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (63, 1, 'Ashley', 'Richardson', 'ashley.richardson@sakilacustomer.org', 67, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (64, 2, 'Judith', 'Cox', 'judith.cox@sakilacustomer.org', 68, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (65, 2, 'Rose', 'Howard', 'rose.howard@sakilacustomer.org', 69, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (66, 2, 'Janice', 'Ward', 'janice.ward@sakilacustomer.org', 70, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (67, 1, 'Kelly', 'Torres', 'kelly.torres@sakilacustomer.org', 71, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (68, 1, 'Nicole', 'Peterson', 'nicole.peterson@sakilacustomer.org', 72, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (69, 2, 'Judy', 'Gray', 'judy.gray@sakilacustomer.org', 73, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (70, 2, 'Christina', 'Ramirez', 'christina.ramirez@sakilacustomer.org', 74, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (71, 1, 'Kathy', 'James', 'kathy.james@sakilacustomer.org', 75, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (72, 2, 'Theresa', 'Watson', 'theresa.watson@sakilacustomer.org', 76, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (73, 2, 'Beverly', 'Brooks', 'beverly.brooks@sakilacustomer.org', 77, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (74, 1, 'Denise', 'Kelly', 'denise.kelly@sakilacustomer.org', 78, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (75, 2, 'Tammy', 'Sanders', 'tammy.sanders@sakilacustomer.org', 79, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (76, 2, 'Irene', 'Price', 'irene.price@sakilacustomer.org', 80, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (77, 2, 'Jane', 'Bennett', 'jane.bennett@sakilacustomer.org', 81, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (78, 1, 'Lori', 'Wood', 'lori.wood@sakilacustomer.org', 82, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (79, 1, 'Rachel', 'Barnes', 'rachel.barnes@sakilacustomer.org', 83, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (80, 1, 'Marilyn', 'Ross', 'marilyn.ross@sakilacustomer.org', 84, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (81, 1, 'Andrea', 'Henderson', 'andrea.henderson@sakilacustomer.org', 85, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (82, 1, 'Kathryn', 'Coleman', 'kathryn.coleman@sakilacustomer.org', 86, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (83, 1, 'Louise', 'Jenkins', 'louise.jenkins@sakilacustomer.org', 87, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (84, 2, 'Sara', 'Perry', 'sara.perry@sakilacustomer.org', 88, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (85, 2, 'Anne', 'Powell', 'anne.powell@sakilacustomer.org', 89, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (86, 2, 'Jacqueline', 'Long', 'jacqueline.long@sakilacustomer.org', 90, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (87, 1, 'Wanda', 'Patterson', 'wanda.patterson@sakilacustomer.org', 91, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (88, 2, 'Bonnie', 'Hughes', 'bonnie.hughes@sakilacustomer.org', 92, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (89, 1, 'Julia', 'Flores', 'julia.flores@sakilacustomer.org', 93, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (90, 2, 'Ruby', 'Washington', 'ruby.washington@sakilacustomer.org', 94, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (91, 2, 'Lois', 'Butler', 'lois.butler@sakilacustomer.org', 95, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (92, 2, 'Tina', 'Simmons', 'tina.simmons@sakilacustomer.org', 96, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (93, 1, 'Phyllis', 'Foster', 'phyllis.foster@sakilacustomer.org', 97, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (94, 1, 'Norma', 'Gonzales', 'norma.gonzales@sakilacustomer.org', 98, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (95, 2, 'Paula', 'Bryant', 'paula.bryant@sakilacustomer.org', 99, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (96, 1, 'Diana', 'Alexander', 'diana.alexander@sakilacustomer.org', 100, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (97, 2, 'Annie', 'Russell', 'annie.russell@sakilacustomer.org', 101, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (98, 1, 'Lillian', 'Griffin', 'lillian.griffin@sakilacustomer.org', 102, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (99, 2, 'Emily', 'Diaz', 'emily.diaz@sakilacustomer.org', 103, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (100, 1, 'Robin', 'Hayes', 'robin.hayes@sakilacustomer.org', 104, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (101, 1, 'Peggy', 'Myers', 'peggy.myers@sakilacustomer.org', 105, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (102, 1, 'Crystal', 'Ford', 'crystal.ford@sakilacustomer.org', 106, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (103, 1, 'Gladys', 'Hamilton', 'gladys.hamilton@sakilacustomer.org', 107, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (104, 1, 'Rita', 'Graham', 'rita.graham@sakilacustomer.org', 108, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (105, 1, 'Dawn', 'Sullivan', 'dawn.sullivan@sakilacustomer.org', 109, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (106, 1, 'Connie', 'Wallace', 'connie.wallace@sakilacustomer.org', 110, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (107, 1, 'Florence', 'Woods', 'florence.woods@sakilacustomer.org', 111, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (108, 1, 'Tracy', 'Cole', 'tracy.cole@sakilacustomer.org', 112, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (109, 2, 'Edna', 'West', 'edna.west@sakilacustomer.org', 113, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (110, 2, 'Tiffany', 'Jordan', 'tiffany.jordan@sakilacustomer.org', 114, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (111, 1, 'Carmen', 'Owens', 'carmen.owens@sakilacustomer.org', 115, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (112, 2, 'Rosa', 'Reynolds', 'rosa.reynolds@sakilacustomer.org', 116, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (113, 2, 'Cindy', 'Fisher', 'cindy.fisher@sakilacustomer.org', 117, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (114, 2, 'Grace', 'Ellis', 'grace.ellis@sakilacustomer.org', 118, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (115, 1, 'Wendy', 'Harrison', 'wendy.harrison@sakilacustomer.org', 119, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (116, 1, 'Victoria', 'Gibson', 'victoria.gibson@sakilacustomer.org', 120, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (117, 1, 'Edith', 'Mcdonald', 'edith.mcdonald@sakilacustomer.org', 121, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (118, 1, 'Kim', 'Cruz', 'kim.cruz@sakilacustomer.org', 122, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (119, 1, 'Sherry', 'Marshall', 'sherry.marshall@sakilacustomer.org', 123, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (120, 2, 'Sylvia', 'Ortiz', 'sylvia.ortiz@sakilacustomer.org', 124, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (121, 1, 'Josephine', 'Gomez', 'josephine.gomez@sakilacustomer.org', 125, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (122, 1, 'Thelma', 'Murray', 'thelma.murray@sakilacustomer.org', 126, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (123, 2, 'Shannon', 'Freeman', 'shannon.freeman@sakilacustomer.org', 127, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (124, 1, 'Sheila', 'Wells', 'sheila.wells@sakilacustomer.org', 128, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (125, 1, 'Ethel', 'Webb', 'ethel.webb@sakilacustomer.org', 129, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (126, 1, 'Ellen', 'Simpson', 'ellen.simpson@sakilacustomer.org', 130, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (127, 2, 'Elaine', 'Stevens', 'elaine.stevens@sakilacustomer.org', 131, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (128, 1, 'Marjorie', 'Tucker', 'marjorie.tucker@sakilacustomer.org', 132, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (129, 1, 'Carrie', 'Porter', 'carrie.porter@sakilacustomer.org', 133, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (130, 1, 'Charlotte', 'Hunter', 'charlotte.hunter@sakilacustomer.org', 134, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (131, 2, 'Monica', 'Hicks', 'monica.hicks@sakilacustomer.org', 135, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (132, 2, 'Esther', 'Crawford', 'esther.crawford@sakilacustomer.org', 136, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (133, 1, 'Pauline', 'Henry', 'pauline.henry@sakilacustomer.org', 137, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (134, 1, 'Emma', 'Boyd', 'emma.boyd@sakilacustomer.org', 138, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (135, 2, 'Juanita', 'Mason', 'juanita.mason@sakilacustomer.org', 139, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (136, 2, 'Anita', 'Morales', 'anita.morales@sakilacustomer.org', 140, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (137, 2, 'Rhonda', 'Kennedy', 'rhonda.kennedy@sakilacustomer.org', 141, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (138, 1, 'Hazel', 'Warren', 'hazel.warren@sakilacustomer.org', 142, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (139, 1, 'Amber', 'Dixon', 'amber.dixon@sakilacustomer.org', 143, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (140, 1, 'Eva', 'Ramos', 'eva.ramos@sakilacustomer.org', 144, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (141, 1, 'Debbie', 'Reyes', 'debbie.reyes@sakilacustomer.org', 145, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (142, 1, 'April', 'Burns', 'april.burns@sakilacustomer.org', 146, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (143, 1, 'Leslie', 'Gordon', 'leslie.gordon@sakilacustomer.org', 147, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (144, 1, 'Clara', 'Shaw', 'clara.shaw@sakilacustomer.org', 148, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (145, 1, 'Lucille', 'Holmes', 'lucille.holmes@sakilacustomer.org', 149, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (146, 1, 'Jamie', 'Rice', 'jamie.rice@sakilacustomer.org', 150, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (147, 2, 'Joanne', 'Robertson', 'joanne.robertson@sakilacustomer.org', 151, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (148, 1, 'Eleanor', 'Hunt', 'eleanor.hunt@sakilacustomer.org', 152, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (149, 1, 'Valerie', 'Black', 'valerie.black@sakilacustomer.org', 153, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (150, 2, 'Danielle', 'Daniels', 'danielle.daniels@sakilacustomer.org', 154, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (151, 2, 'Megan', 'Palmer', 'megan.palmer@sakilacustomer.org', 155, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (152, 1, 'Alicia', 'Mills', 'alicia.mills@sakilacustomer.org', 156, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (153, 2, 'Suzanne', 'Nichols', 'suzanne.nichols@sakilacustomer.org', 157, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (154, 2, 'Michele', 'Grant', 'michele.grant@sakilacustomer.org', 158, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (155, 1, 'Gail', 'Knight', 'gail.knight@sakilacustomer.org', 159, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (156, 1, 'Bertha', 'Ferguson', 'bertha.ferguson@sakilacustomer.org', 160, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (157, 2, 'Darlene', 'Rose', 'darlene.rose@sakilacustomer.org', 161, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (158, 1, 'Veronica', 'Stone', 'veronica.stone@sakilacustomer.org', 162, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (159, 1, 'Jill', 'Hawkins', 'jill.hawkins@sakilacustomer.org', 163, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (160, 2, 'Erin', 'Dunn', 'erin.dunn@sakilacustomer.org', 164, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (161, 1, 'Geraldine', 'Perkins', 'geraldine.perkins@sakilacustomer.org', 165, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (162, 2, 'Lauren', 'Hudson', 'lauren.hudson@sakilacustomer.org', 166, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (163, 1, 'Cathy', 'Spencer', 'cathy.spencer@sakilacustomer.org', 167, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (164, 2, 'Joann', 'Gardner', 'joann.gardner@sakilacustomer.org', 168, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (165, 2, 'Lorraine', 'Stephens', 'lorraine.stephens@sakilacustomer.org', 169, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (166, 1, 'Lynn', 'Payne', 'lynn.payne@sakilacustomer.org', 170, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (167, 2, 'Sally', 'Pierce', 'sally.pierce@sakilacustomer.org', 171, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (168, 1, 'Regina', 'Berry', 'regina.berry@sakilacustomer.org', 172, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (169, 2, 'Erica', 'Matthews', 'erica.matthews@sakilacustomer.org', 173, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (170, 1, 'Beatrice', 'Arnold', 'beatrice.arnold@sakilacustomer.org', 174, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (171, 2, 'Dolores', 'Wagner', 'dolores.wagner@sakilacustomer.org', 175, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (172, 1, 'Bernice', 'Willis', 'bernice.willis@sakilacustomer.org', 176, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (173, 1, 'Audrey', 'Ray', 'audrey.ray@sakilacustomer.org', 177, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (174, 2, 'Yvonne', 'Watkins', 'yvonne.watkins@sakilacustomer.org', 178, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (175, 1, 'Annette', 'Olson', 'annette.olson@sakilacustomer.org', 179, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (176, 1, 'June', 'Carroll', 'june.carroll@sakilacustomer.org', 180, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (177, 2, 'Samantha', 'Duncan', 'samantha.duncan@sakilacustomer.org', 181, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (178, 2, 'Marion', 'Snyder', 'marion.snyder@sakilacustomer.org', 182, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (179, 1, 'Dana', 'Hart', 'dana.hart@sakilacustomer.org', 183, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (180, 2, 'Stacy', 'Cunningham', 'stacy.cunningham@sakilacustomer.org', 184, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (181, 2, 'Ana', 'Bradley', 'ana.bradley@sakilacustomer.org', 185, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (182, 1, 'Renee', 'Lane', 'renee.lane@sakilacustomer.org', 186, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (183, 2, 'Ida', 'Andrews', 'ida.andrews@sakilacustomer.org', 187, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (184, 1, 'Vivian', 'Ruiz', 'vivian.ruiz@sakilacustomer.org', 188, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (185, 1, 'Roberta', 'Harper', 'roberta.harper@sakilacustomer.org', 189, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (186, 2, 'Holly', 'Fox', 'holly.fox@sakilacustomer.org', 190, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (187, 2, 'Brittany', 'Riley', 'brittany.riley@sakilacustomer.org', 191, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (188, 1, 'Melanie', 'Armstrong', 'melanie.armstrong@sakilacustomer.org', 192, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (189, 1, 'Loretta', 'Carpenter', 'loretta.carpenter@sakilacustomer.org', 193, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (190, 2, 'Yolanda', 'Weaver', 'yolanda.weaver@sakilacustomer.org', 194, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (191, 1, 'Jeanette', 'Greene', 'jeanette.greene@sakilacustomer.org', 195, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (192, 1, 'Laurie', 'Lawrence', 'laurie.lawrence@sakilacustomer.org', 196, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (193, 2, 'Katie', 'Elliott', 'katie.elliott@sakilacustomer.org', 197, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (194, 2, 'Kristen', 'Chavez', 'kristen.chavez@sakilacustomer.org', 198, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (195, 1, 'Vanessa', 'Sims', 'vanessa.sims@sakilacustomer.org', 199, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (196, 1, 'Alma', 'Austin', 'alma.austin@sakilacustomer.org', 200, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (197, 2, 'Sue', 'Peters', 'sue.peters@sakilacustomer.org', 201, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (198, 2, 'Elsie', 'Kelley', 'elsie.kelley@sakilacustomer.org', 202, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (199, 2, 'Beth', 'Franklin', 'beth.franklin@sakilacustomer.org', 203, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (200, 2, 'Jeanne', 'Lawson', 'jeanne.lawson@sakilacustomer.org', 204, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (201, 1, 'Vicki', 'Fields', 'vicki.fields@sakilacustomer.org', 205, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (202, 2, 'Carla', 'Gutierrez', 'carla.gutierrez@sakilacustomer.org', 206, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (203, 1, 'Tara', 'Ryan', 'tara.ryan@sakilacustomer.org', 207, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (204, 1, 'Rosemary', 'Schmidt', 'rosemary.schmidt@sakilacustomer.org', 208, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (205, 2, 'Eileen', 'Carr', 'eileen.carr@sakilacustomer.org', 209, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (206, 1, 'Terri', 'Vasquez', 'terri.vasquez@sakilacustomer.org', 210, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (207, 1, 'Gertrude', 'Castillo', 'gertrude.castillo@sakilacustomer.org', 211, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (208, 1, 'Lucy', 'Wheeler', 'lucy.wheeler@sakilacustomer.org', 212, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (209, 2, 'Tonya', 'Chapman', 'tonya.chapman@sakilacustomer.org', 213, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (210, 2, 'Ella', 'Oliver', 'ella.oliver@sakilacustomer.org', 214, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (211, 1, 'Stacey', 'Montgomery', 'stacey.montgomery@sakilacustomer.org', 215, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (212, 2, 'Wilma', 'Richards', 'wilma.richards@sakilacustomer.org', 216, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (213, 1, 'Gina', 'Williamson', 'gina.williamson@sakilacustomer.org', 217, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (214, 1, 'Kristin', 'Johnston', 'kristin.johnston@sakilacustomer.org', 218, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (215, 2, 'Jessie', 'Banks', 'jessie.banks@sakilacustomer.org', 219, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (216, 1, 'Natalie', 'Meyer', 'natalie.meyer@sakilacustomer.org', 220, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (217, 2, 'Agnes', 'Bishop', 'agnes.bishop@sakilacustomer.org', 221, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (218, 1, 'Vera', 'Mccoy', 'vera.mccoy@sakilacustomer.org', 222, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (219, 2, 'Willie', 'Howell', 'willie.howell@sakilacustomer.org', 223, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (220, 2, 'Charlene', 'Alvarez', 'charlene.alvarez@sakilacustomer.org', 224, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (221, 1, 'Bessie', 'Morrison', 'bessie.morrison@sakilacustomer.org', 225, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (222, 2, 'Delores', 'Hansen', 'delores.hansen@sakilacustomer.org', 226, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (223, 1, 'Melinda', 'Fernandez', 'melinda.fernandez@sakilacustomer.org', 227, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (224, 2, 'Pearl', 'Garza', 'pearl.garza@sakilacustomer.org', 228, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (225, 1, 'Arlene', 'Harvey', 'arlene.harvey@sakilacustomer.org', 229, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (226, 2, 'Maureen', 'Little', 'maureen.little@sakilacustomer.org', 230, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (227, 1, 'Colleen', 'Burton', 'colleen.burton@sakilacustomer.org', 231, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (228, 2, 'Allison', 'Stanley', 'allison.stanley@sakilacustomer.org', 232, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (229, 1, 'Tamara', 'Nguyen', 'tamara.nguyen@sakilacustomer.org', 233, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (230, 2, 'Joy', 'George', 'joy.george@sakilacustomer.org', 234, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (231, 1, 'Georgia', 'Jacobs', 'georgia.jacobs@sakilacustomer.org', 235, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (232, 2, 'Constance', 'Reid', 'constance.reid@sakilacustomer.org', 236, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (233, 2, 'Lillie', 'Kim', 'lillie.kim@sakilacustomer.org', 237, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (234, 1, 'Claudia', 'Fuller', 'claudia.fuller@sakilacustomer.org', 238, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (235, 1, 'Jackie', 'Lynch', 'jackie.lynch@sakilacustomer.org', 239, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (236, 1, 'Marcia', 'Dean', 'marcia.dean@sakilacustomer.org', 240, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (237, 1, 'Tanya', 'Gilbert', 'tanya.gilbert@sakilacustomer.org', 241, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (238, 1, 'Nellie', 'Garrett', 'nellie.garrett@sakilacustomer.org', 242, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (239, 2, 'Minnie', 'Romero', 'minnie.romero@sakilacustomer.org', 243, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (240, 1, 'Marlene', 'Welch', 'marlene.welch@sakilacustomer.org', 244, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (241, 2, 'Heidi', 'Larson', 'heidi.larson@sakilacustomer.org', 245, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (242, 1, 'Glenda', 'Frazier', 'glenda.frazier@sakilacustomer.org', 246, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (243, 1, 'Lydia', 'Burke', 'lydia.burke@sakilacustomer.org', 247, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (244, 2, 'Viola', 'Hanson', 'viola.hanson@sakilacustomer.org', 248, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (245, 1, 'Courtney', 'Day', 'courtney.day@sakilacustomer.org', 249, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (246, 1, 'Marian', 'Mendoza', 'marian.mendoza@sakilacustomer.org', 250, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (247, 1, 'Stella', 'Moreno', 'stella.moreno@sakilacustomer.org', 251, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (248, 1, 'Caroline', 'Bowman', 'caroline.bowman@sakilacustomer.org', 252, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (249, 2, 'Dora', 'Medina', 'dora.medina@sakilacustomer.org', 253, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (250, 2, 'Jo', 'Fowler', 'jo.fowler@sakilacustomer.org', 254, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (251, 2, 'Vickie', 'Brewer', 'vickie.brewer@sakilacustomer.org', 255, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (252, 2, 'Mattie', 'Hoffman', 'mattie.hoffman@sakilacustomer.org', 256, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (253, 1, 'Terry', 'Carlson', 'terry.carlson@sakilacustomer.org', 258, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (254, 2, 'Maxine', 'Silva', 'maxine.silva@sakilacustomer.org', 259, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (255, 2, 'Irma', 'Pearson', 'irma.pearson@sakilacustomer.org', 260, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (256, 2, 'Mabel', 'Holland', 'mabel.holland@sakilacustomer.org', 261, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (257, 2, 'Marsha', 'Douglas', 'marsha.douglas@sakilacustomer.org', 262, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (258, 1, 'Myrtle', 'Fleming', 'myrtle.fleming@sakilacustomer.org', 263, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (259, 2, 'Lena', 'Jensen', 'lena.jensen@sakilacustomer.org', 264, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (260, 1, 'Christy', 'Vargas', 'christy.vargas@sakilacustomer.org', 265, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (261, 1, 'Deanna', 'Byrd', 'deanna.byrd@sakilacustomer.org', 266, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (262, 2, 'Patsy', 'Davidson', 'patsy.davidson@sakilacustomer.org', 267, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (263, 1, 'Hilda', 'Hopkins', 'hilda.hopkins@sakilacustomer.org', 268, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (264, 1, 'Gwendolyn', 'May', 'gwendolyn.may@sakilacustomer.org', 269, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (265, 2, 'Jennie', 'Terry', 'jennie.terry@sakilacustomer.org', 270, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (266, 2, 'Nora', 'Herrera', 'nora.herrera@sakilacustomer.org', 271, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (267, 1, 'Margie', 'Wade', 'margie.wade@sakilacustomer.org', 272, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (268, 1, 'Nina', 'Soto', 'nina.soto@sakilacustomer.org', 273, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (269, 1, 'Cassandra', 'Walters', 'cassandra.walters@sakilacustomer.org', 274, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (270, 1, 'Leah', 'Curtis', 'leah.curtis@sakilacustomer.org', 275, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (271, 1, 'Penny', 'Neal', 'penny.neal@sakilacustomer.org', 276, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (272, 1, 'Kay', 'Caldwell', 'kay.caldwell@sakilacustomer.org', 277, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (273, 2, 'Priscilla', 'Lowe', 'priscilla.lowe@sakilacustomer.org', 278, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (274, 1, 'Naomi', 'Jennings', 'naomi.jennings@sakilacustomer.org', 279, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (275, 2, 'Carole', 'Barnett', 'carole.barnett@sakilacustomer.org', 280, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (276, 1, 'Brandy', 'Graves', 'brandy.graves@sakilacustomer.org', 281, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (277, 2, 'Olga', 'Jimenez', 'olga.jimenez@sakilacustomer.org', 282, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (278, 2, 'Billie', 'Horton', 'billie.horton@sakilacustomer.org', 283, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (279, 2, 'Dianne', 'Shelton', 'dianne.shelton@sakilacustomer.org', 284, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (280, 2, 'Tracey', 'Barrett', 'tracey.barrett@sakilacustomer.org', 285, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (281, 2, 'Leona', 'Obrien', 'leona.obrien@sakilacustomer.org', 286, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (282, 2, 'Jenny', 'Castro', 'jenny.castro@sakilacustomer.org', 287, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (283, 1, 'Felicia', 'Sutton', 'felicia.sutton@sakilacustomer.org', 288, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (284, 1, 'Sonia', 'Gregory', 'sonia.gregory@sakilacustomer.org', 289, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (285, 1, 'Miriam', 'Mckinney', 'miriam.mckinney@sakilacustomer.org', 290, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (286, 1, 'Velma', 'Lucas', 'velma.lucas@sakilacustomer.org', 291, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (287, 2, 'Becky', 'Miles', 'becky.miles@sakilacustomer.org', 292, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (288, 1, 'Bobbie', 'Craig', 'bobbie.craig@sakilacustomer.org', 293, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (289, 1, 'Violet', 'Rodriquez', 'violet.rodriquez@sakilacustomer.org', 294, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (290, 1, 'Kristina', 'Chambers', 'kristina.chambers@sakilacustomer.org', 295, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (291, 1, 'Toni', 'Holt', 'toni.holt@sakilacustomer.org', 296, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (292, 2, 'Misty', 'Lambert', 'misty.lambert@sakilacustomer.org', 297, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (293, 2, 'Mae', 'Fletcher', 'mae.fletcher@sakilacustomer.org', 298, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (294, 2, 'Shelly', 'Watts', 'shelly.watts@sakilacustomer.org', 299, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (295, 1, 'Daisy', 'Bates', 'daisy.bates@sakilacustomer.org', 300, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (296, 2, 'Ramona', 'Hale', 'ramona.hale@sakilacustomer.org', 301, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (297, 1, 'Sherri', 'Rhodes', 'sherri.rhodes@sakilacustomer.org', 302, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (298, 1, 'Erika', 'Pena', 'erika.pena@sakilacustomer.org', 303, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (299, 2, 'James', 'Gannon', 'james.gannon@sakilacustomer.org', 304, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (300, 1, 'John', 'Farnsworth', 'john.farnsworth@sakilacustomer.org', 305, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (301, 2, 'Robert', 'Baughman', 'robert.baughman@sakilacustomer.org', 306, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (302, 1, 'Michael', 'Silverman', 'michael.silverman@sakilacustomer.org', 307, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (303, 2, 'William', 'Satterfield', 'william.satterfield@sakilacustomer.org', 308, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (304, 2, 'David', 'Royal', 'david.royal@sakilacustomer.org', 309, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (305, 1, 'Richard', 'Mccrary', 'richard.mccrary@sakilacustomer.org', 310, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (306, 1, 'Charles', 'Kowalski', 'charles.kowalski@sakilacustomer.org', 311, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (307, 2, 'Joseph', 'Joy', 'joseph.joy@sakilacustomer.org', 312, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (308, 1, 'Thomas', 'Grigsby', 'thomas.grigsby@sakilacustomer.org', 313, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (309, 1, 'Christopher', 'Greco', 'christopher.greco@sakilacustomer.org', 314, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (310, 2, 'Daniel', 'Cabral', 'daniel.cabral@sakilacustomer.org', 315, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (311, 2, 'Paul', 'Trout', 'paul.trout@sakilacustomer.org', 316, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (312, 2, 'Mark', 'Rinehart', 'mark.rinehart@sakilacustomer.org', 317, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (313, 2, 'Donald', 'Mahon', 'donald.mahon@sakilacustomer.org', 318, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (314, 1, 'George', 'Linton', 'george.linton@sakilacustomer.org', 319, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (315, 2, 'Kenneth', 'Gooden', 'kenneth.gooden@sakilacustomer.org', 320, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (316, 1, 'Steven', 'Curley', 'steven.curley@sakilacustomer.org', 321, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (317, 2, 'Edward', 'Baugh', 'edward.baugh@sakilacustomer.org', 322, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (318, 1, 'Brian', 'Wyman', 'brian.wyman@sakilacustomer.org', 323, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (319, 2, 'Ronald', 'Weiner', 'ronald.weiner@sakilacustomer.org', 324, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (320, 2, 'Anthony', 'Schwab', 'anthony.schwab@sakilacustomer.org', 325, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (321, 1, 'Kevin', 'Schuler', 'kevin.schuler@sakilacustomer.org', 326, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (322, 1, 'Jason', 'Morrissey', 'jason.morrissey@sakilacustomer.org', 327, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (323, 2, 'Matthew', 'Mahan', 'matthew.mahan@sakilacustomer.org', 328, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (324, 2, 'Gary', 'Coy', 'gary.coy@sakilacustomer.org', 329, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (325, 1, 'Timothy', 'Bunn', 'timothy.bunn@sakilacustomer.org', 330, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (326, 1, 'Jose', 'Andrew', 'jose.andrew@sakilacustomer.org', 331, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (327, 2, 'Larry', 'Thrasher', 'larry.thrasher@sakilacustomer.org', 332, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (328, 2, 'Jeffrey', 'Spear', 'jeffrey.spear@sakilacustomer.org', 333, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (329, 2, 'Frank', 'Waggoner', 'frank.waggoner@sakilacustomer.org', 334, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (330, 1, 'Scott', 'Shelley', 'scott.shelley@sakilacustomer.org', 335, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (331, 1, 'Eric', 'Robert', 'eric.robert@sakilacustomer.org', 336, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (332, 1, 'Stephen', 'Qualls', 'stephen.qualls@sakilacustomer.org', 337, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (333, 2, 'Andrew', 'Purdy', 'andrew.purdy@sakilacustomer.org', 338, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (334, 2, 'Raymond', 'Mcwhorter', 'raymond.mcwhorter@sakilacustomer.org', 339, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (335, 1, 'Gregory', 'Mauldin', 'gregory.mauldin@sakilacustomer.org', 340, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (336, 1, 'Joshua', 'Mark', 'joshua.mark@sakilacustomer.org', 341, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (337, 1, 'Jerry', 'Jordon', 'jerry.jordon@sakilacustomer.org', 342, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (338, 1, 'Dennis', 'Gilman', 'dennis.gilman@sakilacustomer.org', 343, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (339, 2, 'Walter', 'Perryman', 'walter.perryman@sakilacustomer.org', 344, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (340, 1, 'Patrick', 'Newsom', 'patrick.newsom@sakilacustomer.org', 345, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (341, 1, 'Peter', 'Menard', 'peter.menard@sakilacustomer.org', 346, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (342, 1, 'Harold', 'Martino', 'harold.martino@sakilacustomer.org', 347, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (343, 1, 'Douglas', 'Graf', 'douglas.graf@sakilacustomer.org', 348, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (344, 1, 'Henry', 'Billingsley', 'henry.billingsley@sakilacustomer.org', 349, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (345, 1, 'Carl', 'Artis', 'carl.artis@sakilacustomer.org', 350, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (346, 1, 'Arthur', 'Simpkins', 'arthur.simpkins@sakilacustomer.org', 351, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (347, 2, 'Ryan', 'Salisbury', 'ryan.salisbury@sakilacustomer.org', 352, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (348, 2, 'Roger', 'Quintanilla', 'roger.quintanilla@sakilacustomer.org', 353, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (349, 2, 'Joe', 'Gilliland', 'joe.gilliland@sakilacustomer.org', 354, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (350, 1, 'Juan', 'Fraley', 'juan.fraley@sakilacustomer.org', 355, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (351, 1, 'Jack', 'Foust', 'jack.foust@sakilacustomer.org', 356, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (352, 1, 'Albert', 'Crouse', 'albert.crouse@sakilacustomer.org', 357, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (353, 1, 'Jonathan', 'Scarborough', 'jonathan.scarborough@sakilacustomer.org', 358, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (354, 2, 'Justin', 'Ngo', 'justin.ngo@sakilacustomer.org', 359, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (355, 2, 'Terry', 'Grissom', 'terry.grissom@sakilacustomer.org', 360, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (356, 2, 'Gerald', 'Fultz', 'gerald.fultz@sakilacustomer.org', 361, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (357, 1, 'Keith', 'Rico', 'keith.rico@sakilacustomer.org', 362, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (358, 2, 'Samuel', 'Marlow', 'samuel.marlow@sakilacustomer.org', 363, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (359, 2, 'Willie', 'Markham', 'willie.markham@sakilacustomer.org', 364, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (360, 2, 'Ralph', 'Madrigal', 'ralph.madrigal@sakilacustomer.org', 365, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (361, 2, 'Lawrence', 'Lawton', 'lawrence.lawton@sakilacustomer.org', 366, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (362, 1, 'Nicholas', 'Barfield', 'nicholas.barfield@sakilacustomer.org', 367, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (363, 2, 'Roy', 'Whiting', 'roy.whiting@sakilacustomer.org', 368, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (364, 1, 'Benjamin', 'Varney', 'benjamin.varney@sakilacustomer.org', 369, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (365, 2, 'Bruce', 'Schwarz', 'bruce.schwarz@sakilacustomer.org', 370, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (366, 1, 'Brandon', 'Huey', 'brandon.huey@sakilacustomer.org', 371, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (367, 1, 'Adam', 'Gooch', 'adam.gooch@sakilacustomer.org', 372, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (368, 1, 'Harry', 'Arce', 'harry.arce@sakilacustomer.org', 373, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (369, 2, 'Fred', 'Wheat', 'fred.wheat@sakilacustomer.org', 374, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (370, 2, 'Wayne', 'Truong', 'wayne.truong@sakilacustomer.org', 375, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (371, 1, 'Billy', 'Poulin', 'billy.poulin@sakilacustomer.org', 376, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (372, 2, 'Steve', 'Mackenzie', 'steve.mackenzie@sakilacustomer.org', 377, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (373, 1, 'Louis', 'Leone', 'louis.leone@sakilacustomer.org', 378, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (374, 2, 'Jeremy', 'Hurtado', 'jeremy.hurtado@sakilacustomer.org', 379, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (375, 2, 'Aaron', 'Selby', 'aaron.selby@sakilacustomer.org', 380, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (376, 1, 'Randy', 'Gaither', 'randy.gaither@sakilacustomer.org', 381, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (377, 1, 'Howard', 'Fortner', 'howard.fortner@sakilacustomer.org', 382, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (378, 1, 'Eugene', 'Culpepper', 'eugene.culpepper@sakilacustomer.org', 383, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (379, 1, 'Carlos', 'Coughlin', 'carlos.coughlin@sakilacustomer.org', 384, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (380, 1, 'Russell', 'Brinson', 'russell.brinson@sakilacustomer.org', 385, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (381, 2, 'Bobby', 'Boudreau', 'bobby.boudreau@sakilacustomer.org', 386, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (382, 2, 'Victor', 'Barkley', 'victor.barkley@sakilacustomer.org', 387, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (383, 1, 'Martin', 'Bales', 'martin.bales@sakilacustomer.org', 388, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (384, 2, 'Ernest', 'Stepp', 'ernest.stepp@sakilacustomer.org', 389, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (385, 1, 'Phillip', 'Holm', 'phillip.holm@sakilacustomer.org', 390, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (386, 1, 'Todd', 'Tan', 'todd.tan@sakilacustomer.org', 391, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (387, 2, 'Jesse', 'Schilling', 'jesse.schilling@sakilacustomer.org', 392, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (388, 2, 'Craig', 'Morrell', 'craig.morrell@sakilacustomer.org', 393, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (389, 1, 'Alan', 'Kahn', 'alan.kahn@sakilacustomer.org', 394, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (390, 1, 'Shawn', 'Heaton', 'shawn.heaton@sakilacustomer.org', 395, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (391, 1, 'Clarence', 'Gamez', 'clarence.gamez@sakilacustomer.org', 396, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (392, 2, 'Sean', 'Douglass', 'sean.douglass@sakilacustomer.org', 397, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (393, 1, 'Philip', 'Causey', 'philip.causey@sakilacustomer.org', 398, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (394, 2, 'Chris', 'Brothers', 'chris.brothers@sakilacustomer.org', 399, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (395, 2, 'Johnny', 'Turpin', 'johnny.turpin@sakilacustomer.org', 400, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (396, 1, 'Earl', 'Shanks', 'earl.shanks@sakilacustomer.org', 401, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (397, 1, 'Jimmy', 'Schrader', 'jimmy.schrader@sakilacustomer.org', 402, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (398, 1, 'Antonio', 'Meek', 'antonio.meek@sakilacustomer.org', 403, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (399, 1, 'Danny', 'Isom', 'danny.isom@sakilacustomer.org', 404, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (400, 2, 'Bryan', 'Hardison', 'bryan.hardison@sakilacustomer.org', 405, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (401, 2, 'Tony', 'Carranza', 'tony.carranza@sakilacustomer.org', 406, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (402, 1, 'Luis', 'Yanez', 'luis.yanez@sakilacustomer.org', 407, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (403, 1, 'Mike', 'Way', 'mike.way@sakilacustomer.org', 408, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (404, 2, 'Stanley', 'Scroggins', 'stanley.scroggins@sakilacustomer.org', 409, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (405, 1, 'Leonard', 'Schofield', 'leonard.schofield@sakilacustomer.org', 410, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (406, 1, 'Nathan', 'Runyon', 'nathan.runyon@sakilacustomer.org', 411, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (407, 1, 'Dale', 'Ratcliff', 'dale.ratcliff@sakilacustomer.org', 412, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (408, 1, 'Manuel', 'Murrell', 'manuel.murrell@sakilacustomer.org', 413, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (409, 2, 'Rodney', 'Moeller', 'rodney.moeller@sakilacustomer.org', 414, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (410, 2, 'Curtis', 'Irby', 'curtis.irby@sakilacustomer.org', 415, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (411, 1, 'Norman', 'Currier', 'norman.currier@sakilacustomer.org', 416, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (412, 2, 'Allen', 'Butterfield', 'allen.butterfield@sakilacustomer.org', 417, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (413, 2, 'Marvin', 'Yee', 'marvin.yee@sakilacustomer.org', 418, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (414, 1, 'Vincent', 'Ralston', 'vincent.ralston@sakilacustomer.org', 419, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (415, 1, 'Glenn', 'Pullen', 'glenn.pullen@sakilacustomer.org', 420, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (416, 2, 'Jeffery', 'Pinson', 'jeffery.pinson@sakilacustomer.org', 421, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (417, 1, 'Travis', 'Estep', 'travis.estep@sakilacustomer.org', 422, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (418, 2, 'Jeff', 'East', 'jeff.east@sakilacustomer.org', 423, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (419, 1, 'Chad', 'Carbone', 'chad.carbone@sakilacustomer.org', 424, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (420, 1, 'Jacob', 'Lance', 'jacob.lance@sakilacustomer.org', 425, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (421, 1, 'Lee', 'Hawks', 'lee.hawks@sakilacustomer.org', 426, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (422, 1, 'Melvin', 'Ellington', 'melvin.ellington@sakilacustomer.org', 427, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (423, 2, 'Alfred', 'Casillas', 'alfred.casillas@sakilacustomer.org', 428, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (424, 2, 'Kyle', 'Spurlock', 'kyle.spurlock@sakilacustomer.org', 429, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (425, 2, 'Francis', 'Sikes', 'francis.sikes@sakilacustomer.org', 430, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (426, 1, 'Bradley', 'Motley', 'bradley.motley@sakilacustomer.org', 431, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (427, 2, 'Jesus', 'Mccartney', 'jesus.mccartney@sakilacustomer.org', 432, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (428, 2, 'Herbert', 'Kruger', 'herbert.kruger@sakilacustomer.org', 433, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (429, 2, 'Frederick', 'Isbell', 'frederick.isbell@sakilacustomer.org', 434, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (430, 1, 'Ray', 'Houle', 'ray.houle@sakilacustomer.org', 435, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (431, 2, 'Joel', 'Francisco', 'joel.francisco@sakilacustomer.org', 436, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (432, 1, 'Edwin', 'Burk', 'edwin.burk@sakilacustomer.org', 437, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (433, 1, 'Don', 'Bone', 'don.bone@sakilacustomer.org', 438, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (434, 1, 'Eddie', 'Tomlin', 'eddie.tomlin@sakilacustomer.org', 439, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (435, 2, 'Ricky', 'Shelby', 'ricky.shelby@sakilacustomer.org', 440, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (436, 1, 'Troy', 'Quigley', 'troy.quigley@sakilacustomer.org', 441, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (437, 2, 'Randall', 'Neumann', 'randall.neumann@sakilacustomer.org', 442, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (438, 1, 'Barry', 'Lovelace', 'barry.lovelace@sakilacustomer.org', 443, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (439, 2, 'Alexander', 'Fennell', 'alexander.fennell@sakilacustomer.org', 444, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (440, 1, 'Bernard', 'Colby', 'bernard.colby@sakilacustomer.org', 445, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (441, 1, 'Mario', 'Cheatham', 'mario.cheatham@sakilacustomer.org', 446, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (442, 1, 'Leroy', 'Bustamante', 'leroy.bustamante@sakilacustomer.org', 447, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (443, 2, 'Francisco', 'Skidmore', 'francisco.skidmore@sakilacustomer.org', 448, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (444, 2, 'Marcus', 'Hidalgo', 'marcus.hidalgo@sakilacustomer.org', 449, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (445, 1, 'Micheal', 'Forman', 'micheal.forman@sakilacustomer.org', 450, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (446, 2, 'Theodore', 'Culp', 'theodore.culp@sakilacustomer.org', 451, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (447, 1, 'Clifford', 'Bowens', 'clifford.bowens@sakilacustomer.org', 452, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (448, 1, 'Miguel', 'Betancourt', 'miguel.betancourt@sakilacustomer.org', 453, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (449, 2, 'Oscar', 'Aquino', 'oscar.aquino@sakilacustomer.org', 454, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (450, 1, 'Jay', 'Robb', 'jay.robb@sakilacustomer.org', 455, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (451, 1, 'Jim', 'Rea', 'jim.rea@sakilacustomer.org', 456, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (452, 1, 'Tom', 'Milner', 'tom.milner@sakilacustomer.org', 457, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (453, 1, 'Calvin', 'Martel', 'calvin.martel@sakilacustomer.org', 458, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (454, 2, 'Alex', 'Gresham', 'alex.gresham@sakilacustomer.org', 459, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (455, 2, 'Jon', 'Wiles', 'jon.wiles@sakilacustomer.org', 460, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (456, 2, 'Ronnie', 'Ricketts', 'ronnie.ricketts@sakilacustomer.org', 461, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (457, 2, 'Bill', 'Gavin', 'bill.gavin@sakilacustomer.org', 462, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (458, 1, 'Lloyd', 'Dowd', 'lloyd.dowd@sakilacustomer.org', 463, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (459, 1, 'Tommy', 'Collazo', 'tommy.collazo@sakilacustomer.org', 464, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (460, 1, 'Leon', 'Bostic', 'leon.bostic@sakilacustomer.org', 465, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (461, 1, 'Derek', 'Blakely', 'derek.blakely@sakilacustomer.org', 466, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (462, 2, 'Warren', 'Sherrod', 'warren.sherrod@sakilacustomer.org', 467, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (463, 2, 'Darrell', 'Power', 'darrell.power@sakilacustomer.org', 468, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (464, 1, 'Jerome', 'Kenyon', 'jerome.kenyon@sakilacustomer.org', 469, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (465, 1, 'Floyd', 'Gandy', 'floyd.gandy@sakilacustomer.org', 470, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (466, 1, 'Leo', 'Ebert', 'leo.ebert@sakilacustomer.org', 471, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (467, 2, 'Alvin', 'Deloach', 'alvin.deloach@sakilacustomer.org', 472, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (468, 1, 'Tim', 'Cary', 'tim.cary@sakilacustomer.org', 473, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (469, 2, 'Wesley', 'Bull', 'wesley.bull@sakilacustomer.org', 474, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (470, 1, 'Gordon', 'Allard', 'gordon.allard@sakilacustomer.org', 475, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (471, 1, 'Dean', 'Sauer', 'dean.sauer@sakilacustomer.org', 476, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (472, 1, 'Greg', 'Robins', 'greg.robins@sakilacustomer.org', 477, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (473, 2, 'Jorge', 'Olivares', 'jorge.olivares@sakilacustomer.org', 478, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (474, 2, 'Dustin', 'Gillette', 'dustin.gillette@sakilacustomer.org', 479, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (475, 2, 'Pedro', 'Chestnut', 'pedro.chestnut@sakilacustomer.org', 480, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (476, 1, 'Derrick', 'Bourque', 'derrick.bourque@sakilacustomer.org', 481, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (477, 1, 'Dan', 'Paine', 'dan.paine@sakilacustomer.org', 482, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (478, 1, 'Lewis', 'Lyman', 'lewis.lyman@sakilacustomer.org', 483, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (479, 1, 'Zachary', 'Hite', 'zachary.hite@sakilacustomer.org', 484, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (480, 1, 'Corey', 'Hauser', 'corey.hauser@sakilacustomer.org', 485, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (481, 1, 'Herman', 'Devore', 'herman.devore@sakilacustomer.org', 486, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (482, 1, 'Maurice', 'Crawley', 'maurice.crawley@sakilacustomer.org', 487, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (483, 2, 'Vernon', 'Chapa', 'vernon.chapa@sakilacustomer.org', 488, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (484, 1, 'Roberto', 'Vu', 'roberto.vu@sakilacustomer.org', 489, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (485, 1, 'Clyde', 'Tobias', 'clyde.tobias@sakilacustomer.org', 490, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (486, 1, 'Glen', 'Talbert', 'glen.talbert@sakilacustomer.org', 491, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (487, 2, 'Hector', 'Poindexter', 'hector.poindexter@sakilacustomer.org', 492, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (488, 2, 'Shane', 'Millard', 'shane.millard@sakilacustomer.org', 493, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (489, 1, 'Ricardo', 'Meador', 'ricardo.meador@sakilacustomer.org', 494, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (490, 1, 'Sam', 'Mcduffie', 'sam.mcduffie@sakilacustomer.org', 495, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (491, 2, 'Rick', 'Mattox', 'rick.mattox@sakilacustomer.org', 496, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (492, 2, 'Lester', 'Kraus', 'lester.kraus@sakilacustomer.org', 497, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (493, 1, 'Brent', 'Harkins', 'brent.harkins@sakilacustomer.org', 498, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (494, 2, 'Ramon', 'Choate', 'ramon.choate@sakilacustomer.org', 499, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (495, 2, 'Charlie', 'Bess', 'charlie.bess@sakilacustomer.org', 500, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (496, 2, 'Tyler', 'Wren', 'tyler.wren@sakilacustomer.org', 501, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (497, 2, 'Gilbert', 'Sledge', 'gilbert.sledge@sakilacustomer.org', 502, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (498, 1, 'Gene', 'Sanborn', 'gene.sanborn@sakilacustomer.org', 503, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (499, 2, 'Marc', 'Outlaw', 'marc.outlaw@sakilacustomer.org', 504, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (500, 1, 'Reginald', 'Kinder', 'reginald.kinder@sakilacustomer.org', 505, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (501, 1, 'Ruben', 'Geary', 'ruben.geary@sakilacustomer.org', 506, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (502, 1, 'Brett', 'Cornwell', 'brett.cornwell@sakilacustomer.org', 507, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (503, 1, 'Angel', 'Barclay', 'angel.barclay@sakilacustomer.org', 508, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (504, 1, 'Nathaniel', 'Adam', 'nathaniel.adam@sakilacustomer.org', 509, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (505, 1, 'Rafael', 'Abney', 'rafael.abney@sakilacustomer.org', 510, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (506, 2, 'Leslie', 'Seward', 'leslie.seward@sakilacustomer.org', 511, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (507, 2, 'Edgar', 'Rhoads', 'edgar.rhoads@sakilacustomer.org', 512, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (508, 2, 'Milton', 'Howland', 'milton.howland@sakilacustomer.org', 513, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (509, 1, 'Raul', 'Fortier', 'raul.fortier@sakilacustomer.org', 514, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (510, 2, 'Ben', 'Easter', 'ben.easter@sakilacustomer.org', 515, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (511, 1, 'Chester', 'Benner', 'chester.benner@sakilacustomer.org', 516, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (512, 1, 'Cecil', 'Vines', 'cecil.vines@sakilacustomer.org', 517, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (513, 2, 'Duane', 'Tubbs', 'duane.tubbs@sakilacustomer.org', 519, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (514, 2, 'Franklin', 'Troutman', 'franklin.troutman@sakilacustomer.org', 520, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (515, 1, 'Andre', 'Rapp', 'andre.rapp@sakilacustomer.org', 521, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (516, 2, 'Elmer', 'Noe', 'elmer.noe@sakilacustomer.org', 522, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (517, 2, 'Brad', 'Mccurdy', 'brad.mccurdy@sakilacustomer.org', 523, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (518, 1, 'Gabriel', 'Harder', 'gabriel.harder@sakilacustomer.org', 524, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (519, 2, 'Ron', 'Deluca', 'ron.deluca@sakilacustomer.org', 525, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (520, 2, 'Mitchell', 'Westmoreland', 'mitchell.westmoreland@sakilacustomer.org', 526, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (521, 2, 'Roland', 'South', 'roland.south@sakilacustomer.org', 527, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (522, 2, 'Arnold', 'Havens', 'arnold.havens@sakilacustomer.org', 528, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (523, 1, 'Harvey', 'Guajardo', 'harvey.guajardo@sakilacustomer.org', 529, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (525, 2, 'Adrian', 'Clary', 'adrian.clary@sakilacustomer.org', 531, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (526, 2, 'Karl', 'Seal', 'karl.seal@sakilacustomer.org', 532, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (527, 1, 'Cory', 'Meehan', 'cory.meehan@sakilacustomer.org', 533, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (528, 1, 'Claude', 'Herzog', 'claude.herzog@sakilacustomer.org', 534, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (529, 2, 'Erik', 'Guillen', 'erik.guillen@sakilacustomer.org', 535, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (530, 2, 'Darryl', 'Ashcraft', 'darryl.ashcraft@sakilacustomer.org', 536, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (531, 2, 'Jamie', 'Waugh', 'jamie.waugh@sakilacustomer.org', 537, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (532, 2, 'Neil', 'Renner', 'neil.renner@sakilacustomer.org', 538, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (533, 1, 'Jessie', 'Milam', 'jessie.milam@sakilacustomer.org', 539, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (534, 1, 'Christian', 'Jung', 'christian.jung@sakilacustomer.org', 540, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (535, 1, 'Javier', 'Elrod', 'javier.elrod@sakilacustomer.org', 541, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (536, 2, 'Fernando', 'Churchill', 'fernando.churchill@sakilacustomer.org', 542, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (537, 2, 'Clinton', 'Buford', 'clinton.buford@sakilacustomer.org', 543, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (538, 2, 'Ted', 'Breaux', 'ted.breaux@sakilacustomer.org', 544, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (539, 1, 'Mathew', 'Bolin', 'mathew.bolin@sakilacustomer.org', 545, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (540, 1, 'Tyrone', 'Asher', 'tyrone.asher@sakilacustomer.org', 546, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (541, 2, 'Darren', 'Windham', 'darren.windham@sakilacustomer.org', 547, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (542, 2, 'Lonnie', 'Tirado', 'lonnie.tirado@sakilacustomer.org', 548, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (543, 1, 'Lance', 'Pemberton', 'lance.pemberton@sakilacustomer.org', 549, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (544, 2, 'Cody', 'Nolen', 'cody.nolen@sakilacustomer.org', 550, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (545, 2, 'Julio', 'Noland', 'julio.noland@sakilacustomer.org', 551, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (546, 1, 'Kelly', 'Knott', 'kelly.knott@sakilacustomer.org', 552, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (547, 1, 'Kurt', 'Emmons', 'kurt.emmons@sakilacustomer.org', 553, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (548, 1, 'Allan', 'Cornish', 'allan.cornish@sakilacustomer.org', 554, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (549, 1, 'Nelson', 'Christenson', 'nelson.christenson@sakilacustomer.org', 555, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (550, 2, 'Guy', 'Brownlee', 'guy.brownlee@sakilacustomer.org', 556, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (551, 2, 'Clayton', 'Barbee', 'clayton.barbee@sakilacustomer.org', 557, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (552, 2, 'Hugh', 'Waldrop', 'hugh.waldrop@sakilacustomer.org', 558, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (553, 1, 'Max', 'Pitt', 'max.pitt@sakilacustomer.org', 559, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (554, 1, 'Dwayne', 'Olvera', 'dwayne.olvera@sakilacustomer.org', 560, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (555, 1, 'Dwight', 'Lombardi', 'dwight.lombardi@sakilacustomer.org', 561, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (556, 2, 'Armando', 'Gruber', 'armando.gruber@sakilacustomer.org', 562, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (557, 1, 'Felix', 'Gaffney', 'felix.gaffney@sakilacustomer.org', 563, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (558, 1, 'Jimmie', 'Eggleston', 'jimmie.eggleston@sakilacustomer.org', 564, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (559, 2, 'Everett', 'Banda', 'everett.banda@sakilacustomer.org', 565, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (560, 1, 'Jordan', 'Archuleta', 'jordan.archuleta@sakilacustomer.org', 566, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (561, 2, 'Ian', 'Still', 'ian.still@sakilacustomer.org', 567, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (562, 1, 'Wallace', 'Slone', 'wallace.slone@sakilacustomer.org', 568, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (563, 2, 'Ken', 'Prewitt', 'ken.prewitt@sakilacustomer.org', 569, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (564, 2, 'Bob', 'Pfeiffer', 'bob.pfeiffer@sakilacustomer.org', 570, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (565, 2, 'Jaime', 'Nettles', 'jaime.nettles@sakilacustomer.org', 571, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (566, 1, 'Casey', 'Mena', 'casey.mena@sakilacustomer.org', 572, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (567, 2, 'Alfredo', 'Mcadams', 'alfredo.mcadams@sakilacustomer.org', 573, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (568, 2, 'Alberto', 'Henning', 'alberto.henning@sakilacustomer.org', 574, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (569, 2, 'Dave', 'Gardiner', 'dave.gardiner@sakilacustomer.org', 575, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (570, 2, 'Ivan', 'Cromwell', 'ivan.cromwell@sakilacustomer.org', 576, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (571, 2, 'Johnnie', 'Chisholm', 'johnnie.chisholm@sakilacustomer.org', 577, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (572, 1, 'Sidney', 'Burleson', 'sidney.burleson@sakilacustomer.org', 578, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (573, 1, 'Byron', 'Box', 'byron.box@sakilacustomer.org', 579, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (574, 2, 'Julian', 'Vest', 'julian.vest@sakilacustomer.org', 580, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (575, 2, 'Isaac', 'Oglesby', 'isaac.oglesby@sakilacustomer.org', 581, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (576, 2, 'Morris', 'Mccarter', 'morris.mccarter@sakilacustomer.org', 582, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (577, 2, 'Clifton', 'Malcolm', 'clifton.malcolm@sakilacustomer.org', 583, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (578, 2, 'Willard', 'Lumpkin', 'willard.lumpkin@sakilacustomer.org', 584, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (579, 2, 'Daryl', 'Larue', 'daryl.larue@sakilacustomer.org', 585, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (580, 1, 'Ross', 'Grey', 'ross.grey@sakilacustomer.org', 586, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (581, 1, 'Virgil', 'Wofford', 'virgil.wofford@sakilacustomer.org', 587, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (582, 2, 'Andy', 'Vanhorn', 'andy.vanhorn@sakilacustomer.org', 588, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (583, 1, 'Marshall', 'Thorn', 'marshall.thorn@sakilacustomer.org', 589, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (584, 2, 'Salvador', 'Teel', 'salvador.teel@sakilacustomer.org', 590, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (585, 1, 'Perry', 'Swafford', 'perry.swafford@sakilacustomer.org', 591, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (586, 1, 'Kirk', 'Stclair', 'kirk.stclair@sakilacustomer.org', 592, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (587, 1, 'Sergio', 'Stanfield', 'sergio.stanfield@sakilacustomer.org', 593, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (588, 1, 'Marion', 'Ocampo', 'marion.ocampo@sakilacustomer.org', 594, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (589, 1, 'Tracy', 'Herrmann', 'tracy.herrmann@sakilacustomer.org', 595, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (590, 2, 'Seth', 'Hannon', 'seth.hannon@sakilacustomer.org', 596, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (591, 1, 'Kent', 'Arsenault', 'kent.arsenault@sakilacustomer.org', 597, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (592, 1, 'Terrance', 'Roush', 'terrance.roush@sakilacustomer.org', 598, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (593, 2, 'Rene', 'Mcalister', 'rene.mcalister@sakilacustomer.org', 599, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (594, 1, 'Eduardo', 'Hiatt', 'eduardo.hiatt@sakilacustomer.org', 600, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (595, 1, 'Terrence', 'Gunderson', 'terrence.gunderson@sakilacustomer.org', 601, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (596, 1, 'Enrique', 'Forsythe', 'enrique.forsythe@sakilacustomer.org', 602, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (597, 1, 'Freddie', 'Duggan', 'freddie.duggan@sakilacustomer.org', 603, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (598, 1, 'Wade', 'Delvalle', 'wade.delvalle@sakilacustomer.org', 604, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (599, 2, 'Austin', 'Cintron', 'austin.cintron@sakilacustomer.org', 605, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (5,'1913 Hanoi Way', 'Nagasaki', 463, '35200', '28303384290', '2006-02-15 09:45:30'); INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (6,'1121 Loja Avenue', 'California', 449, '17886', '838635286649', '2006-02-15 09:45:30'); INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (7,'692 Joliet Street', 'Attika', 38, '83579', '448477190408', '2006-02-15 09:45:30'); @@ -82,8 +661,347 @@ INSERT INTO address (address_id, address, district, city_id, postal_code, phone, INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (17,'270 Amroha Parkway', 'Osmaniye', 384, '29610', '695479687538', '2006-02-15 09:45:30'); INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (18,'770 Bydgoszcz Avenue', 'California', 120, '16266', '517338314235', '2006-02-15 09:45:30'); INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (20,'360 Toulouse Parkway', 'England', 495, '54308', '949312333307', '2006-02-15 09:45:30'); -INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (21,'1001 Miyakonojo Lane', 'Taizz', 518, '67924', '584316724815', '2006-02-15 09:45:30'); -INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (22,'1153 Allende Way', 'Qubec', 179, '20336', '856872225376', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (21,'270 Toulon Boulevard', 'Kalmykia', 156, '81766', '407752414682', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (22,'320 Brest Avenue', 'Kaduna', 252, '43331', '747791594069', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (27,'1780 Hino Boulevard', 'Liepaja', 303, '7716', '902731229323', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (28,'96 Tafuna Way', 'Crdoba', 128, '99865', '934730187245', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (32,'1425 Shikarpur Manor', 'Bihar', 346, '65599', '678220867005', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (33,'786 Aurora Avenue', 'Yamaguchi', 474, '65750', '18461860151', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (34,'1668 Anpolis Street', 'Taipei', 316, '50199', '525255540978', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (38,'61 Tama Street', 'Okayama', 284, '94065', '708403338270', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (39,'391 Callao Drive', 'Midi-Pyrnes', 544, '34021', '440512153169', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (41,'1440 Fukuyama Loop', 'Henan', 362, '47929', '912257250465', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (44,'671 Graz Street', 'Oriental', 353, '94399', '680768868518', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (45,'42 Brindisi Place', 'Yerevan', 586, '16744', '42384721397', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (46,'1632 Bislig Avenue', 'Nonthaburi', 394, '61117', '471675840679', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (47,'1447 Imus Way', 'Tahiti', 167, '48942', '539758313890', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (48,'1998 Halifax Drive', 'Lipetsk', 308, '76022', '177727722820', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (49,'1718 Valencia Street', 'Antofagasta', 27, '37359', '675292816413', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (51,'686 Garland Manor', 'Cear', 247, '52535', '69493378813', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (52,'909 Garland Manor', 'Tatarstan', 367, '69367', '705800322606', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (53,'725 Isesaki Place', 'Mekka', 237, '74428', '876295323994', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (54,'115 Hidalgo Parkway', 'Khartum', 379, '80168', '307703950263', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (55,'1135 Izumisano Parkway', 'California', 171, '48150', '171822533480', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (56,'939 Probolinggo Loop', 'Galicia', 1, '4166', '680428310138', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (57,'17 Kabul Boulevard', 'Chiba', 355, '38594', '697760867968', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (61,'943 Tokat Street', 'Vaduz', 560, '45428', '889318963672', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (62,'1114 Liepaja Street', 'Sarawak', 282, '69226', '212869228936', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (63,'1213 Ranchi Parkway', 'Karnataka', 350, '94352', '800024380485', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (64,'81 Hodeida Way', 'Rajasthan', 231, '55561', '250767749542', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (65,'915 Ponce Place', 'Basel-Stadt', 56, '83980', '1395251317', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (66,'1717 Guadalajara Lane', 'Missouri', 441, '85505', '914090181665', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (67,'1214 Hanoi Way', 'Nebraska', 306, '67055', '491001136577', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (68,'1966 Amroha Avenue', 'Sichuan', 139, '70385', '333489324603', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (69,'698 Otsu Street', 'Cayenne', 105, '71110', '409983924481', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (71,'1586 Guaruj Place', 'Hunan', 579, '5135', '947233365992', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (73,'1031 Daugavpils Parkway', 'Bchar', 63, '59025', '107137400143', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (74,'1124 Buenaventura Drive', 'Mekka', 13, '6856', '407733804223', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (79,'1551 Rampur Lane', 'Changhwa', 108, '72394', '251164340471', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (80,'602 Paarl Street', 'Pavlodar', 402, '98889', '896314772871', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (83,'586 Tete Way', 'Kanagawa', 256, '1079', '18581624103', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (85,'320 Baiyin Parkway', 'Mahajanga', 319, '37307', '223664661973', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (87,'929 Tallahassee Loop', 'Gauteng', 497, '74671', '800716535041', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (89,'1557 Ktahya Boulevard', 'England', 88, '88002', '720998247660', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (90,'870 Ashqelon Loop', 'Songkhla', 489, '84931', '135117278909', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (91,'1740 Portoviejo Avenue', 'Sucre', 480, '29932', '198123170793', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (92,'1942 Ciparay Parkway', 'Cheju', 113, '82624', '978987363654', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (94,'1952 Chatsworth Drive', 'Guangdong', 332, '25958', '991562402283', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (96,'984 Effon-Alaiye Avenue', 'Gois', 183, '17119', '132986892228', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (98,'152 Kitwe Parkway', 'Caraga', 82, '53182', '835433605312', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (99,'1697 Tanauan Lane', 'Punjab', 399, '22870', '4764773857', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (100,'1308 Arecibo Way', 'Georgia', 41, '30695', '6171054059', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (101,'1599 Plock Drive', 'Tete', 534, '71986', '817248913162', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (104,'1913 Kamakura Place', 'Lipetsk', 238, '97287', '942570536750', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (105,'733 Mandaluyong Place', 'Asir', 2, '77459', '196568435814', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (108,'1386 Yangor Avenue', 'Provence-Alpes-Cte', 543, '80720', '449216226468', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (112,'1002 Ahmadnagar Manor', 'Mxico', 213, '93026', '371490777743', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (114,'804 Elista Drive', 'Hubei', 159, '61069', '379804592943', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (121,'1967 Sincelejo Place', 'Gujarat', 176, '73644', '577812616052', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (122,'333 Goinia Way', 'Texas', 185, '78625', '909029256431', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (124,'241 Mosul Lane', 'Risaralda', 147, '76157', '765345144779', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (126,'1175 Tanauan Way', 'Lima', 305, '64615', '937222955822', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (128,'848 Tafuna Manor', 'Ktahya', 281, '45142', '614935229095', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (129,'569 Baicheng Lane', 'Gauteng', 85, '60304', '490211944645', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (131,'801 Hagonoy Drive', 'Smolensk', 484, '8439', '237426099212', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (133,'1854 Tieli Street', 'Shandong', 302, '15819', '509492324775', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (134,'758 Junan Lane', 'Gois', 190, '82639', '935448624185', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (138,'765 Southampton Drive', 'al-Qalyubiya', 421, '4285', '23712411567', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (139,'943 Johannesburg Avenue', 'Maharashtra', 417, '5892', '90921003005', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (140,'788 Atinsk Street', 'Karnataka', 211, '81691', '146497509724', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (141,'1749 Daxian Place', 'Gelderland', 29, '11044', '963369996279', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (143,'1029 Dzerzinsk Manor', 'Ynlin', 542, '57519', '33173584456', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (144,'1666 Beni-Mellal Place', 'Tennessee', 123, '13377', '9099941466', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (145,'928 Jaffna Loop', 'Hiroshima', 172, '93762', '581852137991', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (146,'483 Ljubertsy Parkway', 'Scotland', 149, '60562', '581174211853', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (148,'1027 Songkhla Manor', 'Minsk', 340, '30861', '563660187896', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (149,'999 Sanaa Loop', 'Gauteng', 491, '3439', '918032330119', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (150,'879 Newcastle Way', 'Michigan', 499, '90732', '206841104594', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (151,'1337 Lincoln Parkway', 'Saitama', 555, '99457', '597815221267', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (152,'1952 Pune Lane', 'Saint-Denis', 442, '92150', '354615066969', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (153,'782 Mosul Street', 'Massachusetts', 94, '25545', '885899703621', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (155,'1560 Jelets Boulevard', 'Shandong', 291, '77777', '189446090264', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (156,'1963 Moscow Place', 'Assam', 354, '64863', '761379480249', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (158,'798 Cianjur Avenue', 'Shanxi', 590, '76990', '499408708580', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (167,'1287 Xiangfan Boulevard', 'Gifu', 253, '57844', '819416131190', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (168,'842 Salzburg Lane', 'Adana', 529, '3313', '697151428760', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (169,'154 Tallahassee Loop', 'Xinxiang', 199, '62250', '935508855935', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (171,'1540 Wroclaw Drive', 'Maharashtra', 107, '62686', '182363341674', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (172,'475 Atinsk Way', 'Gansu', 240, '59571', '201705577290', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (173,'1294 Firozabad Drive', 'Jiangxi', 407, '70618', '161801569569', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (174,'1877 Ezhou Lane', 'Rajasthan', 550, '63337', '264541743403', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (175,'316 Uruapan Street', 'Perak', 223, '58194', '275788967899', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (176,'29 Pyongyang Loop', 'Batman', 58, '47753', '734780743462', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (177,'1010 Klerksdorp Way', 'Steiermark', 186, '6802', '493008546874', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (178,'1848 Salala Boulevard', 'Miranda', 373, '25220', '48265851133', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (179,'431 Xiangtan Avenue', 'Kerala', 18, '4854', '230250973122', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (180,'757 Rustenburg Avenue', 'Skikda', 483, '89668', '506134035434', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (181,'146 Johannesburg Way', 'Tamaulipas', 330, '54132', '953689007081', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (183,'1089 Iwatsuki Avenue', 'Kirov', 270, '35109', '866092335135', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (186,'533 al-Ayn Boulevard', 'California', 126, '8862', '662227486184', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (187,'1839 Szkesfehrvr Parkway', 'Gois', 317, '55709', '947468818183', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (188,'741 Ambattur Manor', 'Noord-Brabant', 438, '43310', '302590383819', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (191,'140 Chiayi Parkway', 'Sumy', 506, '38982', '855863906434', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (192,'1166 Changhwa Street', 'Caraga', 62, '58852', '650752094490', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (197,'447 Surakarta Loop', 'Nyanza', 271, '10428', '940830176580', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (198,'345 Oshawa Boulevard', 'Tokyo-to', 204, '32114', '104491201771', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (200,'1074 Binzhou Manor', 'Baden-Wrttemberg', 325, '36490', '331132568928', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (201,'817 Bradford Loop', 'Jiangsu', 109, '89459', '264286442804', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (204,'387 Mwene-Ditu Drive', 'Ahal', 35, '8073', '764477681869', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (205,'68 Molodetno Manor', 'Nordrhein-Westfalen', 575, '4662', '146640639760', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (206,'642 Nador Drive', 'Maharashtra', 77, '3924', '369050085652', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (208,'1215 Pyongyang Parkway', 'Usak', 557, '25238', '646237101779', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (211,'850 Salala Loop', 'Kitaa', 371, '10800', '403404780639', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (213,'43 Dadu Avenue', 'Rajasthan', 74, '4855', '95666951770', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (214,'751 Lima Loop', 'Aden', 7, '99405', '756460337785', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (215,'1333 Haldia Street', 'Jilin', 174, '82161', '408304391718', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (216,'660 Jedda Boulevard', 'Washington', 65, '25053', '168758068397', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (217,'1001 Miyakonojo Lane', 'Taizz', 518, '67924', '584316724815', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (218,'226 Brest Manor', 'California', 508, '2299', '785881412500', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (219,'1229 Valencia Parkway', 'Haskovo', 498, '99124', '352679173732', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (220,'1201 Qomsheh Manor', 'Gois', 28, '21464', '873492228462', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (222,'1168 Najafabad Parkway', 'Kabol', 251, '40301', '886649065861', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (225,'1926 Gingoog Street', 'Sisilia', 511, '22824', '469738825391', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (227,'1820 Maring Parkway', 'Punjab', 324, '88307', '99760893676', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (230,'201 Effon-Alaiye Way', 'Asuncin', 37, '64344', '684192903087', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (231,'430 Alessandria Loop', 'Saarland', 439, '47446', '669828224459', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (233,'356 Olomouc Manor', 'Gois', 26, '93323', '22326410776', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (234,'1256 Bislig Boulevard', 'Botosani', 86, '50598', '479007229460', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (236,'885 Yingkou Manor', 'Kaduna', 596, '31390', '588964509072', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (237,'1736 Cavite Place', 'Qina', 216, '98775', '431770603551', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (238,'346 Skikda Parkway', 'Hawalli', 233, '90628', '630424482919', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (242,'1964 Gijn Manor', 'Karnataka', 473, '14408', '918119601885', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (244,'1148 Saarbrcken Parkway', 'Fukushima', 226, '1921', '137773001988', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (245,'1103 Bilbays Parkway', 'Hubei', 578, '87660', '279979529227', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (246,'1246 Boksburg Parkway', 'Hebei', 422, '28349', '890283544295', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (247,'1483 Pathankot Street', 'Tucumn', 454, '37288', '686015532180', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (249,'300 Junan Street', 'Kyonggi', 553, '81314', '890289150158', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (251,'1473 Changhwa Parkway', 'Mxico', 124, '75933', '266798132374', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (252,'1309 Weifang Street', 'Florida', 520, '57338', '435785045362', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (253,'1760 Oshawa Manor', 'Tianjin', 535, '38140', '56257502250', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (256,'1497 Yuzhou Drive', 'England', 312, '3433', '246810237916', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (258,'752 Ondo Loop', 'Miyazaki', 338, '32474', '134673576619', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (261,'51 Laredo Avenue', 'Sagaing', 342, '68146', '884536620568', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (262,'771 Yaound Manor', 'Sofala', 64, '86768', '245477603573', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (263,'532 Toulon Street', 'Santiago', 460, '69517', '46871694740', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (267,'816 Cayenne Parkway', 'Manab', 414, '93629', '282874611748', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (269,'446 Kirovo-Tepetsk Lane', 'Osaka', 203, '19428', '303967439816', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (271,'1587 Loja Manor', 'Salzburg', 447, '5410', '621625204422', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (272,'1762 Paarl Parkway', 'Hunan', 298, '53928', '192459639410', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (273,'1519 Ilorin Place', 'Kerala', 395, '49298', '357445645426', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (274,'920 Kumbakonam Loop', 'California', 446, '75090', '685010736240', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (275,'906 Goinia Way', 'Wielkopolskie', 255, '83565', '701767622697', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (279,'1884 Shikarpur Avenue', 'Haryana', 263, '85548', '959949395183', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (280,'1980 Kamjanets-Podilskyi Street', 'Illinois', 404, '89502', '874337098891', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (281,'1944 Bamenda Way', 'Michigan', 573, '24645', '75975221996', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (283,'457 Tongliao Loop', 'Bursa', 222, '56254', '880756161823', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (286,'1308 Sumy Loop', 'Fujian', 175, '30657', '583021225407', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (287,'1405 Chisinau Place', 'Ponce', 411, '8160', '62781725285', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (288,'226 Halifax Street', 'Xinxiang', 277, '58492', '790651020929', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (293,'86 Higashiosaka Lane', 'Guanajuato', 563, '33768', '957128697225', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (295,'544 Tarsus Boulevard', 'Gurico', 562, '53145', '892523334', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (296,'1936 Cuman Avenue', 'Virginia', 433, '61195', '976798660411', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (297,'1192 Tongliao Street', 'Sharja', 470, '19065', '350970907017', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (298,'44 Najafabad Way', 'Baskimaa', 146, '61391', '96604821070', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (299,'32 Pudukkottai Lane', 'Ohio', 140, '38834', '967274728547', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (300,'661 Chisinau Lane', 'Pietari', 274, '8856', '816436065431', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (303,'898 Jining Lane', 'Pohjois-Pohjanmaa', 387, '40070', '161643343536', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (304,'1635 Kuwana Boulevard', 'Hiroshima', 205, '52137', '710603868323', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (306,'1883 Maikop Lane', 'Kaliningrad', 254, '68469', '96110042435', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (307,'1908 Gaziantep Place', 'Liaoning', 536, '58979', '108053751300', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (308,'687 Alessandria Parkway', 'Sanaa', 455, '57587', '407218522294', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (309,'827 Yuncheng Drive', 'Callao', 99, '79047', '504434452842', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (316,'746 Joliet Lane', 'Kursk', 286, '94878', '688485191923', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (317,'780 Kimberley Way', 'Tabuk', 515, '17032', '824396883951', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (318,'1774 Yaound Place', 'Hubei', 166, '91400', '613124286867', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (321,'651 Pathankot Loop', 'Maharashtra', 336, '59811', '139378397418', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (322,'1359 Zhoushan Parkway', 'Streymoyar', 545, '29763', '46568045367', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (323,'1769 Iwaki Lane', 'Kujawsko-Pomorskie', 97, '25787', '556100547674', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (324,'1145 Vilnius Manor', 'Mxico', 451, '73170', '674805712553', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (326,'470 Boksburg Street', 'Central', 81, '97960', '908029859266', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (334,'1816 Bydgoszcz Loop', 'Dhaka', 234, '64308', '965273813662', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (335,'587 Benguela Manor', 'Illinois', 42, '91590', '165450987037', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (337,'1838 Tabriz Lane', 'Dhaka', 143, '1195', '38988715447', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (338,'431 Szkesfehrvr Avenue', 'Baki', 48, '57828', '119501405123', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (339,'503 Sogamoso Loop', 'Sumqayit', 505, '49812', '834626715837', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (340,'507 Smolensk Loop', 'Sousse', 492, '22971', '80303246192', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (342,'124 al-Manama Way', 'Hiroshima', 382, '52368', '647899404952', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (344,'1909 Benguela Lane', 'Henan', 581, '19913', '624138001031', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (345,'68 Ponce Parkway', 'Hanoi', 201, '85926', '870635127812', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (346,'1217 Konotop Avenue', 'Gelderland', 151, '504', '718917251754', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (349,'1516 Escobar Drive', 'Tongatapu', 370, '46069', '64536069371', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (350,'1628 Nagareyama Lane', 'Central', 453, '60079', '20064292617', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (351,'1157 Nyeri Loop', 'Adygea', 320, '56380', '262744791493', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (352,'1673 Tangail Drive', 'Daugavpils', 137, '26857', '627924259271', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (353,'381 Kabul Way', 'Taipei', 209, '87272', '55477302294', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (356,'1378 Beira Loop', 'Krasnojarsk', 597, '40792', '840957664136', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (357,'1641 Changhwa Place', 'Nord-Ouest', 52, '37636', '256546485220', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (358,'1698 Southport Loop', 'Hidalgo', 393, '49009', '754358349853', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (360,'619 Hunuco Avenue', 'Shimane', 331, '81508', '142596392389', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (363,'604 Bern Place', 'Jharkhand', 429, '5373', '620719383725', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (365,'1009 Zanzibar Lane', 'Arecibo', 32, '64875', '102396298916', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (367,'1163 London Parkway', 'Par', 66, '6066', '675120358494', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (368,'1658 Jastrzebie-Zdrj Loop', 'Central', 372, '96584', '568367775448', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (369,'817 Laredo Avenue', 'Jalisco', 188, '77449', '151249681135', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (370,'1565 Tangail Manor', 'Okinawa', 377, '45750', '634445428822', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (371,'1912 Emeishan Drive', 'Balikesir', 50, '33050', '99883471275', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (373,'1922 Miraj Way', 'Esfahan', 356, '13203', '320471479776', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (374,'433 Florencia Street', 'Chihuahua', 250, '91330', '561729882725', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (375,'1049 Matamoros Parkway', 'Karnataka', 191, '69640', '960505250340', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (379,'1133 Rizhao Avenue', 'Pernambuco', 572, '2800', '600264533987', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (381,'1618 Olomouc Manor', 'Kurgan', 285, '26385', '96846695220', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (382,'220 Hidalgo Drive', 'Kermanshah', 265, '45298', '342720754566', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (384,'97 Mogiljov Lane', 'Gujarat', 73, '89294', '924815207181', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (388,'368 Hunuco Boulevard', 'Namibe', 360, '17165', '106439158941', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (389,'500 Lincoln Parkway', 'Jiangsu', 210, '95509', '550306965159', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (390,'102 Chapra Drive', 'Ibaragi', 521, '14073', '776031833752', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (391,'1793 Meixian Place', 'Hmelnytskyi', 258, '33535', '619966287415', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (392,'514 Ife Way', 'Shaba', 315, '69973', '900235712074', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (394,'753 Ilorin Avenue', 'Sichuan', 157, '3656', '464511145118', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (396,'767 Pyongyang Drive', 'Osaka', 229, '83536', '667736124769', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (398,'954 Lapu-Lapu Way', 'Moskova', 278, '8816', '737229003916', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (399,'331 Bydgoszcz Parkway', 'Asturia', 181, '966', '537374465982', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (401,'168 Cianjur Manor', 'Saitama', 228, '73824', '679095087143', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (402,'616 Hagonoy Avenue', 'Krasnojarsk', 39, '46043', '604177838256', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (404,'734 Bchar Place', 'Punjab', 375, '30586', '280578750435', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (405,'530 Lausanne Lane', 'Texas', 135, '11067', '775235029633', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (406,'454 Patiala Lane', 'Fukushima', 276, '13496', '794553031307', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (407,'1346 Mysore Drive', 'Bretagne', 92, '61507', '516647474029', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (409,'1266 Laredo Parkway', 'Saitama', 380, '7664', '1483365694', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (413,'692 Amroha Drive', 'Northern', 230, '35575', '359478883004', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (417,'791 Salinas Street', 'Punjab', 208, '40509', '129953030512', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (419,'397 Sunnyvale Avenue', 'Guanajuato', 19, '55566', '680851640676', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (420,'992 Klerksdorp Loop', 'Utrecht', 23, '33711', '855290087237', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (421,'966 Arecibo Loop', 'Sind', 134, '94018', '15273765306', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (424,'1948 Bayugan Parkway', 'Bihar', 264, '60622', '987306329957', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (425,'1866 al-Qatif Avenue', 'California', 155, '89420', '546793516940', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (428,'1727 Matamoros Place', 'Sawhaj', 465, '78813', '129673677866', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (429,'1269 Botosani Manor', 'Guangdong', 468, '47394', '736517327853', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (431,'1596 Acua Parkway', 'Jharkhand', 418, '70425', '157133457169', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (433,'1823 Hoshiarpur Lane', 'Komi', 510, '33191', '307133768620', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (434,'1404 Taguig Drive', 'Okayama', 547, '87212', '572068624538', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (436,'287 Cuautla Boulevard', 'Chuquisaca', 501, '72736', '82619513349', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (438,'596 Huixquilucan Place', 'Nampula', 351, '65892', '342709348083', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (440,'722 Bradford Lane', 'Shandong', 249, '90920', '746251338300', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (443,'1836 Korla Parkway', 'Copperbelt', 272, '55405', '689681677428', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (444,'231 Kaliningrad Place', 'Lombardia', 70, '57833', '575081026569', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (445,'495 Bhimavaram Lane', 'Maharashtra', 144, '3', '82088937724', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (446,'1924 Shimonoseki Drive', 'Batna', 59, '52625', '406784385440', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (449,'1289 Belm Boulevard', 'Tartumaa', 530, '88306', '237368926031', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (452,'207 Cuernavaca Loop', 'Tatarstan', 352, '52671', '782900030287', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (453,'319 Springs Loop', 'Baijeri', 160, '99552', '72524459905', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (456,'814 Simferopol Loop', 'Sinaloa', 154, '48745', '524567129902', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (458,'138 Caracas Boulevard', 'Zulia', 326, '16790', '974433019532', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (461,'1889 Valparai Way', 'Ziguinchor', 600, '75559', '670370974122', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (462,'1485 Bratislava Place', 'Illinois', 435, '83183', '924663855568', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (464,'76 Kermanshah Manor', 'Esfahan', 423, '23343', '762361821578', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (465,'734 Tanshui Avenue', 'Caquet', 170, '70664', '366776723320', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (467,'1621 Tongliao Avenue', 'Irkutsk', 558, '22173', '209342540247', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (469,'1872 Toulon Loop', 'OHiggins', 428, '7939', '928809465153', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (470,'1088 Ibirit Place', 'Jalisco', 595, '88502', '49084281333', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (471,'1322 Mosul Parkway', 'Shandong', 145, '95400', '268053970382', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (472,'1447 Chatsworth Place', 'Chihuahua', 129, '41545', '769370126331', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (473,'1257 Guadalajara Street', 'Karnataka', 78, '33599', '195337700615', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (474,'1469 Plock Lane', 'Galicia', 388, '95835', '622884741180', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (476,'270 Tambaram Parkway', 'Gauteng', 244, '9668', '248446668735', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (479,'1854 Okara Boulevard', 'Drenthe', 158, '42123', '131912793873', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (480,'421 Yaound Street', 'Sumy', 385, '11363', '726875628268', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (481,'1153 Allende Way', 'Qubec', 179, '20336', '856872225376', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (482,'808 Naala-Porto Parkway', 'England', 500, '41060', '553452430707', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (484,'98 Pyongyang Boulevard', 'Ohio', 11, '88749', '191958435142', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (485,'984 Novoterkassk Loop', 'Gaziantep', 180, '28165', '435118527255', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (486,'64 Korla Street', 'Mwanza', 347, '25145', '510383179153', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (488,'698 Jelets Boulevard', 'Denizli', 142, '2596', '975185523021', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (489,'1297 Alvorada Parkway', 'Ningxia', 587, '11839', '508348602835', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (490,'1909 Dayton Avenue', 'Guangdong', 469, '88513', '702955450528', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (492,'185 Mannheim Lane', 'Stavropol', 408, '23661', '589377568313', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (495,'656 Matamoros Drive', 'Boyac', 487, '19489', '17305839123', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (496,'775 ostka Drive', 'al-Daqahliya', 337, '22358', '171973024401', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (498,'319 Plock Parkway', 'Istanbul', 504, '26101', '854259976812', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (500,'362 Rajkot Lane', 'Gansu', 47, '98030', '962020153680', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (501,'1060 Tandil Lane', 'Shandong', 432, '72349', '211256301880', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (502,'1515 Korla Way', 'England', 589, '57197', '959467760895', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (506,'414 Mandaluyong Street', 'Lubelskie', 314, '16370', '52709222667', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (508,'496 Celaya Drive', 'Nagano', 552, '90797', '759586584889', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (509,'786 Matsue Way', 'Illinois', 245, '37469', '111177206479', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (512,'1269 Ipoh Avenue', 'Eskisehir', 163, '54674', '402630109080', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (514,'1747 Rustenburg Place', 'Bihar', 110, '51369', '442673923363', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (515,'886 Tonghae Place', 'Volgograd', 259, '19450', '711928348157', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (516,'1574 Goinia Boulevard', 'Heilongjiang', 502, '39529', '59634255214', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (517,'548 Uruapan Street', 'Ontario', 312, '35653', '879347453467', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (520,'1778 Gijn Manor', 'Hubei', 594, '35156', '288910576761', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (522,'1768 Udine Loop', 'Battambang', 60, '32347', '448876499197', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (523,'608 Birgunj Parkway', 'Taipei', 116, '400', '627425618482', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (525,'1949 Sanya Street', 'Gumma', 224, '61244', '132100972047', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (527,'1993 0 Loop', 'Liaoning', 588, '41214', '25865528181', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (532,'1427 Tabuk Place', 'Florida', 101, '31342', '214756839122', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (533,'556 Asuncin Way', 'Mogiljov', 339, '35364', '338244023543', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (534,'486 Ondo Parkway', 'Benguela', 67, '35202', '105882218332', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (537,'958 Sagamihara Lane', 'Mie', 287, '88408', '427274926505', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (539,'1332 Gaziantep Lane', 'Shandong', 80, '22813', '383353187467', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (541,'195 Ilorin Street', 'Chari-Baguirmi', 363, '49250', '8912935608', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (542,'193 Bhusawal Place', 'Kang-won', 539, '9750', '745267607502', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (543,'43 Vilnius Manor', 'Colorado', 42, '79814', '484500282381', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (544,'183 Haiphong Street', 'Jilin', 46, '69953', '488600270038', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (547,'379 Lublin Parkway', 'Toscana', 309, '74568', '921960450089', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (549,'454 Qinhuangdao Drive', 'Tadla-Azilal', 68, '25866', '786270036240', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (551,'182 Nukualofa Drive', 'Sumy', 275, '15414', '426346224043', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (552,'390 Wroclaw Way', 'Hainan', 462, '5753', '357593328658', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (553,'1421 Quilmes Lane', 'Ishikawa', 260, '19151', '135407755975', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (558,'904 Clarksville Drive', 'Zhejiang', 193, '52234', '955349440539', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (559,'1917 Kumbakonam Parkway', 'Vojvodina', 368, '11892', '698182547686', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (560,'1447 Imus Place', 'Gujarat', 426, '12905', '62127829280', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (561,'1497 Fengshan Drive', 'KwaZulu-Natal', 112, '63022', '368738360376', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (562,'869 Shikarpur Way', 'England', 496, '57380', '590764256785', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (563,'1059 Yuncheng Avenue', 'Vilna', 570, '47498', '107092893983', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (564,'505 Madiun Boulevard', 'Dolnoslaskie', 577, '97271', '970638808606', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (565,'1741 Hoshiarpur Boulevard', 'al-Sharqiya', 79, '22372', '855066328617', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (569,'1342 Abha Boulevard', 'Bukarest', 95, '10714', '997453607116', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (570,'415 Pune Avenue', 'Shandong', 580, '44274', '203202500108', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (571,'1746 Faaa Way', 'Huanuco', 214, '32515', '863080561151', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (572,'539 Hami Way', 'Tokat', 538, '52196', '525518075499', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (573,'1407 Surakarta Manor', 'Moskova', 466, '33224', '324346485054', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (575,'1052 Pathankot Avenue', 'Sichuan', 299, '77397', '128499386727', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (578,'1405 Hagonoy Avenue', 'Slaskie', 133, '86587', '867287719310', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (580,'923 Tangail Boulevard', 'Tokyo-to', 10, '33384', '315528269898', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (581,'186 Skikda Lane', 'Morelos', 131, '89422', '14465669789', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (582,'1568 Celaya Parkway', 'Kaohsiung', 168, '34750', '278669994384', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (583,'1489 Kakamigahara Lane', 'Taipei', 526, '98883', '29341849811', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (584,'1819 Alessandria Loop', 'Campeche', 103, '53829', '377633994405', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (585,'1208 Tama Loop', 'Ninawa', 344, '73605', '954786054144', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (587,'760 Miyakonojo Drive', 'Guerrero', 246, '64682', '294449058179', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (588,'966 Asuncin Way', 'Hidalgo', 212, '62703', '995527378381', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (589,'1584 Ljubertsy Lane', 'England', 494, '22954', '285710089439', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (590,'247 Jining Parkway', 'Banjul', 54, '53446', '170115379190', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (593,'1402 Zanzibar Boulevard', 'Guanajuato', 106, '71102', '387448063440', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (594,'1464 Kursk Parkway', 'Shandong', 574, '17381', '338758048786', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (595,'1074 Sanaa Parkway', 'Loja', 311, '22474', '154124128457', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (596,'1759 Niznekamsk Avenue', 'al-Manama', 14, '39414', '864392582257', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (598,'42 Fontana Avenue', 'Fejr', 512, '14684', '437829801725', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (599,'1895 Zhezqazghan Drive', 'California', 177, '36693', '137809746111', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (601,'844 Bucuresti Place', 'Liaoning', 242, '36603', '935952366111', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (603,'1103 Quilmes Boulevard', 'Piura', 503, '52137', '644021380889', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (604,'1331 Usak Boulevard', 'Vaud', 296, '61960', '145308717464', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (605,'1325 Fukuyama Street', 'Heilongjiang', 537, '27107', '288241215394', '2006-02-15 09:45:30'); INSERT INTO city (city_id, city, country_id, last_update) VALUES (1,'A Corua (La Corua)', 87, '2006-02-15 09:45:25 '); INSERT INTO city (city_id, city, country_id, last_update) VALUES (2,'Abha', 82, '2006-02-15 09:45:25 '); INSERT INTO city (city_id, city, country_id, last_update) VALUES (3,'Abu Dhabi', 101, '2006-02-15 09:45:25 '); @@ -103,7 +1021,587 @@ INSERT INTO city (city_id, city, country_id, last_update) VALUES (16,'al-Qatif', INSERT INTO city (city_id, city, country_id, last_update) VALUES (17,'Alessandria', 49, '2006-02-15 09:45:25 '); INSERT INTO city (city_id, city, country_id, last_update) VALUES (18,'Allappuzha (Alleppey)', 44, '2006-02-15 09:45:25 '); INSERT INTO city (city_id, city, country_id, last_update) VALUES (19,'Allende', 60, '2006-02-15 09:45:25 '); -INSERT INTO city (city_id, city, country_id, last_update) VALUES (20,'Gatineau', 20, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (20,'Almirante Brown', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (21,'Alvorada', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (22,'Ambattur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (23,'Amersfoort', 67, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (24,'Amroha', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (25,'Angra dos Reis', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (26,'Anpolis', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (27,'Antofagasta', 22, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (28,'Aparecida de Goinia', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (29,'Apeldoorn', 67, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (30,'Araatuba', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (31,'Arak', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (32,'Arecibo', 77, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (33,'Arlington', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (34,'Ashdod', 48, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (35,'Ashgabat', 98, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (36,'Ashqelon', 48, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (37,'Asuncin', 73, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (38,'Athenai', 39, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (39,'Atinsk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (40,'Atlixco', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (41,'Augusta-Richmond County', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (42,'Aurora', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (43,'Avellaneda', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (44,'Bag', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (45,'Baha Blanca', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (46,'Baicheng', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (47,'Baiyin', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (48,'Baku', 10, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (49,'Balaiha', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (50,'Balikesir', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (51,'Balurghat', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (52,'Bamenda', 19, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (53,'Bandar Seri Begawan', 16, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (54,'Banjul', 37, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (55,'Barcelona', 104, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (56,'Basel', 91, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (57,'Bat Yam', 48, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (58,'Batman', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (59,'Batna', 2, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (60,'Battambang', 18, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (61,'Baybay', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (62,'Bayugan', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (63,'Bchar', 2, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (64,'Beira', 63, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (65,'Bellevue', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (66,'Belm', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (67,'Benguela', 4, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (68,'Beni-Mellal', 62, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (69,'Benin City', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (70,'Bergamo', 49, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (71,'Berhampore (Baharampur)', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (72,'Bern', 91, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (73,'Bhavnagar', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (74,'Bhilwara', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (75,'Bhimavaram', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (76,'Bhopal', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (77,'Bhusawal', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (78,'Bijapur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (79,'Bilbays', 29, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (80,'Binzhou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (81,'Birgunj', 66, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (82,'Bislig', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (83,'Blumenau', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (84,'Boa Vista', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (85,'Boksburg', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (86,'Botosani', 78, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (87,'Botshabelo', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (88,'Bradford', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (89,'Braslia', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (90,'Bratislava', 84, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (91,'Brescia', 49, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (92,'Brest', 34, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (93,'Brindisi', 49, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (94,'Brockton', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (95,'Bucuresti', 78, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (96,'Buenaventura', 24, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (97,'Bydgoszcz', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (98,'Cabuyao', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (99,'Callao', 74, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (100,'Cam Ranh', 105, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (101,'Cape Coral', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (102,'Caracas', 104, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (103,'Carmen', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (104,'Cavite', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (105,'Cayenne', 35, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (106,'Celaya', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (107,'Chandrapur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (108,'Changhwa', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (109,'Changzhou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (110,'Chapra', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (111,'Charlotte Amalie', 106, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (112,'Chatsworth', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (113,'Cheju', 86, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (114,'Chiayi', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (115,'Chisinau', 61, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (116,'Chungho', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (117,'Cianjur', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (118,'Ciomas', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (119,'Ciparay', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (120,'Citrus Heights', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (121,'Citt del Vaticano', 41, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (122,'Ciudad del Este', 73, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (123,'Clarksville', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (124,'Coacalco de Berriozbal', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (125,'Coatzacoalcos', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (126,'Compton', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (127,'Coquimbo', 22, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (128,'Crdoba', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (129,'Cuauhtmoc', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (130,'Cuautla', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (131,'Cuernavaca', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (132,'Cuman', 104, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (133,'Czestochowa', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (134,'Dadu', 72, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (135,'Dallas', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (136,'Datong', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (137,'Daugavpils', 54, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (138,'Davao', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (139,'Daxian', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (140,'Dayton', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (141,'Deba Habe', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (142,'Denizli', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (143,'Dhaka', 12, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (144,'Dhule (Dhulia)', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (145,'Dongying', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (146,'Donostia-San Sebastin', 87, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (147,'Dos Quebradas', 24, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (148,'Duisburg', 38, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (149,'Dundee', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (150,'Dzerzinsk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (151,'Ede', 67, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (152,'Effon-Alaiye', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (153,'El Alto', 14, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (154,'El Fuerte', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (155,'El Monte', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (156,'Elista', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (157,'Emeishan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (158,'Emmen', 67, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (159,'Enshi', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (160,'Erlangen', 38, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (161,'Escobar', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (162,'Esfahan', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (163,'Eskisehir', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (164,'Etawah', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (165,'Ezeiza', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (166,'Ezhou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (167,'Faaa', 36, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (168,'Fengshan', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (169,'Firozabad', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (170,'Florencia', 24, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (171,'Fontana', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (172,'Fukuyama', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (173,'Funafuti', 99, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (174,'Fuyu', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (175,'Fuzhou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (176,'Gandhinagar', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (177,'Garden Grove', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (178,'Garland', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (179,'Gatineau', 20, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (180,'Gaziantep', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (181,'Gijn', 87, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (182,'Gingoog', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (183,'Goinia', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (184,'Gorontalo', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (185,'Grand Prairie', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (186,'Graz', 9, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (187,'Greensboro', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (188,'Guadalajara', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (189,'Guaruj', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (190,'guas Lindas de Gois', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (191,'Gulbarga', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (192,'Hagonoy', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (193,'Haining', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (194,'Haiphong', 105, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (195,'Haldia', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (196,'Halifax', 20, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (197,'Halisahar', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (198,'Halle/Saale', 38, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (199,'Hami', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (200,'Hamilton', 68, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (201,'Hanoi', 105, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (202,'Hidalgo', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (203,'Higashiosaka', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (204,'Hino', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (205,'Hiroshima', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (206,'Hodeida', 107, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (207,'Hohhot', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (208,'Hoshiarpur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (209,'Hsichuh', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (210,'Huaian', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (211,'Hubli-Dharwad', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (212,'Huejutla de Reyes', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (213,'Huixquilucan', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (214,'Hunuco', 74, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (215,'Ibirit', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (216,'Idfu', 29, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (217,'Ife', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (218,'Ikerre', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (219,'Iligan', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (220,'Ilorin', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (221,'Imus', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (222,'Inegl', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (223,'Ipoh', 59, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (224,'Isesaki', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (225,'Ivanovo', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (226,'Iwaki', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (227,'Iwakuni', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (228,'Iwatsuki', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (229,'Izumisano', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (230,'Jaffna', 88, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (231,'Jaipur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (232,'Jakarta', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (233,'Jalib al-Shuyukh', 53, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (234,'Jamalpur', 12, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (235,'Jaroslavl', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (236,'Jastrzebie-Zdrj', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (237,'Jedda', 82, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (238,'Jelets', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (239,'Jhansi', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (240,'Jinchang', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (241,'Jining', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (242,'Jinzhou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (243,'Jodhpur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (244,'Johannesburg', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (245,'Joliet', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (246,'Jos Azueta', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (247,'Juazeiro do Norte', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (248,'Juiz de Fora', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (249,'Junan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (250,'Jurez', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (251,'Kabul', 1, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (252,'Kaduna', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (253,'Kakamigahara', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (254,'Kaliningrad', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (255,'Kalisz', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (256,'Kamakura', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (257,'Kamarhati', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (258,'Kamjanets-Podilskyi', 100, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (259,'Kamyin', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (260,'Kanazawa', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (261,'Kanchrapara', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (262,'Kansas City', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (263,'Karnal', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (264,'Katihar', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (265,'Kermanshah', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (266,'Kilis', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (267,'Kimberley', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (268,'Kimchon', 86, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (269,'Kingstown', 81, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (270,'Kirovo-Tepetsk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (271,'Kisumu', 52, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (272,'Kitwe', 109, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (273,'Klerksdorp', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (274,'Kolpino', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (275,'Konotop', 100, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (276,'Koriyama', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (277,'Korla', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (278,'Korolev', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (279,'Kowloon and New Kowloon', 42, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (280,'Kragujevac', 108, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (281,'Ktahya', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (282,'Kuching', 59, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (283,'Kumbakonam', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (284,'Kurashiki', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (285,'Kurgan', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (286,'Kursk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (287,'Kuwana', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (288,'La Paz', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (289,'La Plata', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (290,'La Romana', 27, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (291,'Laiwu', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (292,'Lancaster', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (293,'Laohekou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (294,'Lapu-Lapu', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (295,'Laredo', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (296,'Lausanne', 91, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (297,'Le Mans', 34, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (298,'Lengshuijiang', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (299,'Leshan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (300,'Lethbridge', 20, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (301,'Lhokseumawe', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (302,'Liaocheng', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (303,'Liepaja', 54, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (304,'Lilongwe', 58, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (305,'Lima', 74, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (306,'Lincoln', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (307,'Linz', 9, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (308,'Lipetsk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (309,'Livorno', 49, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (310,'Ljubertsy', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (311,'Loja', 28, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (312,'London', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (313,'New London', 20, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (314,'Lublin', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (315,'Lubumbashi', 25, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (316,'Lungtan', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (317,'Luzinia', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (318,'Madiun', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (319,'Mahajanga', 57, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (320,'Maikop', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (321,'Malm', 90, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (322,'Manchester', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (323,'Mandaluyong', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (324,'Mandi Bahauddin', 72, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (325,'Mannheim', 38, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (326,'Maracabo', 104, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (327,'Mardan', 72, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (328,'Maring', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (329,'Masqat', 71, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (330,'Matamoros', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (331,'Matsue', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (332,'Meixian', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (333,'Memphis', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (334,'Merlo', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (335,'Mexicali', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (336,'Miraj', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (337,'Mit Ghamr', 29, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (338,'Miyakonojo', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (339,'Mogiljov', 13, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (340,'Molodetno', 13, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (341,'Monclova', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (342,'Monywa', 64, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (343,'Moscow', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (344,'Mosul', 47, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (345,'Mukateve', 100, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (346,'Munger (Monghyr)', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (347,'Mwanza', 93, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (348,'Mwene-Ditu', 25, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (349,'Myingyan', 64, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (350,'Mysore', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (351,'Naala-Porto', 63, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (352,'Nabereznyje Telny', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (353,'Nador', 62, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (354,'Nagaon', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (355,'Nagareyama', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (356,'Najafabad', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (357,'Naju', 86, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (358,'Nakhon Sawan', 94, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (359,'Nam Dinh', 105, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (360,'Namibe', 4, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (361,'Nantou', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (362,'Nanyang', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (363,'NDjamna', 21, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (364,'Newcastle', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (365,'Nezahualcyotl', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (366,'Nha Trang', 105, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (367,'Niznekamsk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (368,'Novi Sad', 108, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (369,'Novoterkassk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (370,'Nukualofa', 95, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (371,'Nuuk', 40, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (372,'Nyeri', 52, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (373,'Ocumare del Tuy', 104, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (374,'Ogbomosho', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (375,'Okara', 72, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (376,'Okayama', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (377,'Okinawa', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (378,'Olomouc', 26, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (379,'Omdurman', 89, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (380,'Omiya', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (381,'Ondo', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (382,'Onomichi', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (383,'Oshawa', 20, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (384,'Osmaniye', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (385,'ostka', 100, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (386,'Otsu', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (387,'Oulu', 33, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (388,'Ourense (Orense)', 87, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (389,'Owo', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (390,'Oyo', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (391,'Ozamis', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (392,'Paarl', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (393,'Pachuca de Soto', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (394,'Pak Kret', 94, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (395,'Palghat (Palakkad)', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (396,'Pangkal Pinang', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (397,'Papeete', 36, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (398,'Parbhani', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (399,'Pathankot', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (400,'Patiala', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (401,'Patras', 39, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (402,'Pavlodar', 51, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (403,'Pemalang', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (404,'Peoria', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (405,'Pereira', 24, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (406,'Phnom Penh', 18, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (407,'Pingxiang', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (408,'Pjatigorsk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (409,'Plock', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (410,'Po', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (411,'Ponce', 77, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (412,'Pontianak', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (413,'Poos de Caldas', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (414,'Portoviejo', 28, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (415,'Probolinggo', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (416,'Pudukkottai', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (417,'Pune', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (418,'Purnea (Purnia)', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (419,'Purwakarta', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (420,'Pyongyang', 70, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (421,'Qalyub', 29, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (422,'Qinhuangdao', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (423,'Qomsheh', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (424,'Quilmes', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (425,'Rae Bareli', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (426,'Rajkot', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (427,'Rampur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (428,'Rancagua', 22, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (429,'Ranchi', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (430,'Richmond Hill', 20, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (431,'Rio Claro', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (432,'Rizhao', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (433,'Roanoke', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (434,'Robamba', 28, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (435,'Rockford', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (436,'Ruse', 17, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (437,'Rustenburg', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (438,'s-Hertogenbosch', 67, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (439,'Saarbrcken', 38, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (440,'Sagamihara', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (441,'Saint Louis', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (442,'Saint-Denis', 79, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (443,'Sal', 62, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (444,'Salala', 71, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (445,'Salamanca', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (446,'Salinas', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (447,'Salzburg', 9, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (448,'Sambhal', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (449,'San Bernardino', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (450,'San Felipe de Puerto Plata', 27, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (451,'San Felipe del Progreso', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (452,'San Juan Bautista Tuxtepec', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (453,'San Lorenzo', 73, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (454,'San Miguel de Tucumn', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (455,'Sanaa', 107, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (456,'Santa Brbara dOeste', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (457,'Santa F', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (458,'Santa Rosa', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (459,'Santiago de Compostela', 87, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (460,'Santiago de los Caballeros', 27, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (461,'Santo Andr', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (462,'Sanya', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (463,'Sasebo', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (464,'Satna', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (465,'Sawhaj', 29, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (466,'Serpuhov', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (467,'Shahr-e Kord', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (468,'Shanwei', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (469,'Shaoguan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (470,'Sharja', 101, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (471,'Shenzhen', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (472,'Shikarpur', 72, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (473,'Shimoga', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (474,'Shimonoseki', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (475,'Shivapuri', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (476,'Shubra al-Khayma', 29, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (477,'Siegen', 38, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (478,'Siliguri (Shiliguri)', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (479,'Simferopol', 100, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (480,'Sincelejo', 24, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (481,'Sirjan', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (482,'Sivas', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (483,'Skikda', 2, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (484,'Smolensk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (485,'So Bernardo do Campo', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (486,'So Leopoldo', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (487,'Sogamoso', 24, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (488,'Sokoto', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (489,'Songkhla', 94, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (490,'Sorocaba', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (491,'Soshanguve', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (492,'Sousse', 96, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (493,'South Hill', 5, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (494,'Southampton', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (495,'Southend-on-Sea', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (496,'Southport', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (497,'Springs', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (498,'Stara Zagora', 17, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (499,'Sterling Heights', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (500,'Stockport', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (501,'Sucre', 14, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (502,'Suihua', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (503,'Sullana', 74, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (504,'Sultanbeyli', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (505,'Sumqayit', 10, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (506,'Sumy', 100, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (507,'Sungai Petani', 59, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (508,'Sunnyvale', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (509,'Surakarta', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (510,'Syktyvkar', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (511,'Syrakusa', 49, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (512,'Szkesfehrvr', 43, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (513,'Tabora', 93, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (514,'Tabriz', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (515,'Tabuk', 82, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (516,'Tafuna', 3, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (517,'Taguig', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (518,'Taizz', 107, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (519,'Talavera', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (520,'Tallahassee', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (521,'Tama', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (522,'Tambaram', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (523,'Tanauan', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (524,'Tandil', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (525,'Tangail', 12, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (526,'Tanshui', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (527,'Tanza', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (528,'Tarlac', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (529,'Tarsus', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (530,'Tartu', 30, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (531,'Teboksary', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (532,'Tegal', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (533,'Tel Aviv-Jaffa', 48, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (534,'Tete', 63, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (535,'Tianjin', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (536,'Tiefa', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (537,'Tieli', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (538,'Tokat', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (539,'Tonghae', 86, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (540,'Tongliao', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (541,'Torren', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (542,'Touliu', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (543,'Toulon', 34, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (544,'Toulouse', 34, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (545,'Trshavn', 32, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (546,'Tsaotun', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (547,'Tsuyama', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (548,'Tuguegarao', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (549,'Tychy', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (550,'Udaipur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (551,'Udine', 49, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (552,'Ueda', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (553,'Uijongbu', 86, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (554,'Uluberia', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (555,'Urawa', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (556,'Uruapan', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (557,'Usak', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (558,'Usolje-Sibirskoje', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (559,'Uttarpara-Kotrung', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (560,'Vaduz', 55, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (561,'Valencia', 104, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (562,'Valle de la Pascua', 104, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (563,'Valle de Santiago', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (564,'Valparai', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (565,'Vancouver', 20, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (566,'Varanasi (Benares)', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (567,'Vicente Lpez', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (568,'Vijayawada', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (569,'Vila Velha', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (570,'Vilnius', 56, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (571,'Vinh', 105, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (572,'Vitria de Santo Anto', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (573,'Warren', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (574,'Weifang', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (575,'Witten', 38, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (576,'Woodridge', 8, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (577,'Wroclaw', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (578,'Xiangfan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (579,'Xiangtan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (580,'Xintai', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (581,'Xinxiang', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (582,'Yamuna Nagar', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (583,'Yangor', 65, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (584,'Yantai', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (585,'Yaound', 19, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (586,'Yerevan', 7, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (587,'Yinchuan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (588,'Yingkou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (589,'York', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (590,'Yuncheng', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (591,'Yuzhou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (592,'Zalantun', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (593,'Zanzibar', 93, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (594,'Zaoyang', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (595,'Zapopan', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (596,'Zaria', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (597,'Zeleznogorsk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (598,'Zhezqazghan', 51, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (599,'Zhoushan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (600,'Ziguinchor', 83, '2006-02-15 09:45:25 '); INSERT INTO country (country_id, country, last_update) VALUES (1,'Afghanistan', '2006-02-15 09:44:00 '); INSERT INTO country (country_id, country, last_update) VALUES (2,'Algeria', '2006-02-15 09:44:00 '); INSERT INTO country (country_id, country, last_update) VALUES (3,'American Samoa', '2006-02-15 09:44:00 '); @@ -123,8 +1621,96 @@ INSERT INTO country (country_id, country, last_update) VALUES (16,'Brunei', '200 INSERT INTO country (country_id, country, last_update) VALUES (17,'Bulgaria', '2006-02-15 09:44:00 '); INSERT INTO country (country_id, country, last_update) VALUES (18,'Cambodia', '2006-02-15 09:44:00 '); INSERT INTO country (country_id, country, last_update) VALUES (19,'Cameroon', '2006-02-15 09:44:00 '); -INSERT INTO country (country_id, country, last_update) VALUES (20,'Yemen', '2006-02-15 09:44:00 '); -SELECT cu.client_customer_id AS id, +INSERT INTO country (country_id, country, last_update) VALUES (20,'Canada', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (21,'Chad', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (22,'Chile', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (23,'China', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (24,'Colombia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (25,'Congo, The Democratic Republic of the', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (26,'Czech Republic', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (27,'Dominican Republic', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (28,'Ecuador', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (29,'Egypt', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (30,'Estonia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (31,'Ethiopia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (32,'Faroe Islands', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (33,'Finland', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (34,'France', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (35,'French Guiana', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (36,'French Polynesia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (37,'Gambia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (38,'Germany', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (39,'Greece', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (40,'Greenland', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (41,'Holy See (Vatican City State)', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (42,'Hong Kong', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (43,'Hungary', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (44,'India', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (45,'Indonesia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (46,'Iran', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (47,'Iraq', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (48,'Israel', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (49,'Italy', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (50,'Japan', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (51,'Kazakstan', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (52,'Kenya', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (53,'Kuwait', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (54,'Latvia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (55,'Liechtenstein', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (56,'Lithuania', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (57,'Madagascar', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (58,'Malawi', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (59,'Malaysia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (60,'Mexico', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (61,'Moldova', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (62,'Morocco', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (63,'Mozambique', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (64,'Myanmar', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (65,'Nauru', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (66,'Nepal', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (67,'Netherlands', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (68,'New Zealand', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (69,'Nigeria', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (70,'North Korea', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (71,'Oman', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (72,'Pakistan', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (73,'Paraguay', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (74,'Peru', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (75,'Philippines', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (76,'Poland', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (77,'Puerto Rico', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (78,'Romania', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (79,'Runion', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (80,'Russian Federation', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (81,'Saint Vincent and the Grenadines', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (82,'Saudi Arabia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (83,'Senegal', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (84,'Slovakia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (85,'South Africa', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (86,'South Korea', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (87,'Spain', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (88,'Sri Lanka', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (89,'Sudan', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (90,'Sweden', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (91,'Switzerland', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (92,'Taiwan', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (93,'Tanzania', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (94,'Thailand', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (95,'Tonga', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (96,'Tunisia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (97,'Turkey', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (98,'Turkmenistan', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (99,'Tuvalu', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (100,'Ukraine', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (101,'United Arab Emirates', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (102,'United Kingdom', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (103,'United States', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (104,'Venezuela', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (105,'Vietnam', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (106,'Virgin Islands, U.S.', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (107,'Yemen', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (108,'Yugoslavia', '2006-02-15 09:44:00 '); +SELECT cu.customer_id AS id, cu.first_name || ' ' || cu.last_name AS name, a.address, a.postal_code AS "zip code", @@ -136,17 +1722,370 @@ SELECT cu.client_customer_id AS id, ELSE '' END AS notes, cu.store_id AS sid - FROM client_customer cu + FROM customer cu INNER JOIN address a USING (address_id) INNER JOIN city USING (city_id) INNER JOIN country USING (country_id) ORDER BY address, city, country, id; - id | name | address | zip code | phone | city | country | notes | sid -----+------+---------+----------+-------+------+---------+-------+----- -(0 rows) + id | name | address | zip code | phone | city | country | notes | sid +-----+----------------------+---------------------------------+----------+--------------+----------------------------+---------------------------------------+--------+----- + 213 | Gina Williamson | 1001 Miyakonojo Lane | 67924 | 584316724815 | Taizz | Yemen | active | 1 + 108 | Tracy Cole | 1002 Ahmadnagar Manor | 93026 | 371490777743 | Huixquilucan | Mexico | active | 1 + 360 | Ralph Madrigal | 1009 Zanzibar Lane | 64875 | 102396298916 | Arecibo | Puerto Rico | active | 2 + 173 | Audrey Ray | 1010 Klerksdorp Way | 6802 | 493008546874 | Graz | Austria | active | 1 + 144 | Clara Shaw | 1027 Songkhla Manor | 30861 | 563660187896 | Molodetno | Belarus | active | 1 + 139 | Amber Dixon | 1029 Dzerzinsk Manor | 57519 | 33173584456 | Touliu | Taiwan | active | 1 + 385 | Phillip Holm | 102 Chapra Drive | 14073 | 776031833752 | Tama | Japan | active | 1 + 69 | Judy Gray | 1031 Daugavpils Parkway | 59025 | 107137400143 | Bchar | Algeria | active | 2 + 370 | Wayne Truong | 1049 Matamoros Parkway | 69640 | 960505250340 | Gulbarga | India | active | 2 + 569 | Dave Gardiner | 1052 Pathankot Avenue | 77397 | 128499386727 | Leshan | China | active | 2 + 557 | Felix Gaffney | 1059 Yuncheng Avenue | 47498 | 107092893983 | Vilnius | Lithuania | active | 1 + 496 | Tyler Wren | 1060 Tandil Lane | 72349 | 211256301880 | Rizhao | China | active | 2 + 196 | Alma Austin | 1074 Binzhou Manor | 36490 | 331132568928 | Mannheim | Germany | active | 1 + 589 | Tracy Herrmann | 1074 Sanaa Parkway | 22474 | 154124128457 | Loja | Ecuador | active | 1 + 465 | Floyd Gandy | 1088 Ibirit Place | 88502 | 49084281333 | Zapopan | Mexico | active | 1 + 179 | Dana Hart | 1089 Iwatsuki Avenue | 35109 | 866092335135 | Kirovo-Tepetsk | Russian Federation | active | 1 + 241 | Heidi Larson | 1103 Bilbays Parkway | 87660 | 279979529227 | Xiangfan | China | active | 2 + 597 | Freddie Duggan | 1103 Quilmes Boulevard | 52137 | 644021380889 | Sullana | Peru | active | 1 + 58 | Jean Bell | 1114 Liepaja Street | 69226 | 212869228936 | Kuching | Malaysia | active | 1 + 2 | Patricia Johnson | 1121 Loja Avenue | 17886 | 838635286649 | San Bernardino | United States | active | 1 + 70 | Christina Ramirez | 1124 Buenaventura Drive | 6856 | 407733804223 | al-Hawiya | Saudi Arabia | active | 2 + 374 | Jeremy Hurtado | 1133 Rizhao Avenue | 2800 | 600264533987 | Vitria de Santo Anto | Brazil | active | 2 + 51 | Alice Stewart | 1135 Izumisano Parkway | 48150 | 171822533480 | Fontana | United States | active | 1 + 319 | Ronald Weiner | 1145 Vilnius Manor | 73170 | 674805712553 | San Felipe del Progreso | Mexico | active | 2 + 240 | Marlene Welch | 1148 Saarbrcken Parkway | 1921 | 137773001988 | Iwaki | Japan | active | 1 + 476 | Derrick Bourque | 1153 Allende Way | 20336 | 856872225376 | Gatineau | Canada | active | 1 + 346 | Arthur Simpkins | 1157 Nyeri Loop | 56380 | 262744791493 | Maikop | Russian Federation | active | 1 + 50 | Diane Collins | 115 Hidalgo Parkway | 80168 | 307703950263 | Omdurman | Sudan | active | 1 + 362 | Nicholas Barfield | 1163 London Parkway | 6066 | 675120358494 | Belm | Brazil | active | 1 + 188 | Melanie Armstrong | 1166 Changhwa Street | 58852 | 650752094490 | Bayugan | Philippines | active | 1 + 218 | Vera Mccoy | 1168 Najafabad Parkway | 40301 | 886649065861 | Kabul | Afghanistan | active | 1 + 122 | Thelma Murray | 1175 Tanauan Way | 64615 | 937222955822 | Lima | Peru | active | 1 + 292 | Misty Lambert | 1192 Tongliao Street | 19065 | 350970907017 | Sharja | United Arab Emirates | active | 2 + 216 | Natalie Meyer | 1201 Qomsheh Manor | 21464 | 873492228462 | Aparecida de Goinia | Brazil | active | 1 + 579 | Daryl Larue | 1208 Tama Loop | 73605 | 954786054144 | Mosul | Iraq | active | 2 + 59 | Cheryl Murphy | 1213 Ranchi Parkway | 94352 | 800024380485 | Mysore | India | active | 1 + 63 | Ashley Richardson | 1214 Hanoi Way | 67055 | 491001136577 | Lincoln | United States | active | 1 + 204 | Rosemary Schmidt | 1215 Pyongyang Parkway | 25238 | 646237101779 | Usak | Turkey | active | 1 + 341 | Peter Menard | 1217 Konotop Avenue | 504 | 718917251754 | Ede | Netherlands | active | 1 + 215 | Jessie Banks | 1229 Valencia Parkway | 99124 | 352679173732 | Stara Zagora | Bulgaria | active | 2 + 242 | Glenda Frazier | 1246 Boksburg Parkway | 28349 | 890283544295 | Qinhuangdao | China | active | 1 + 337 | Jerry Jordon | 124 al-Manama Way | 52368 | 647899404952 | Onomichi | Japan | active | 1 + 230 | Joy George | 1256 Bislig Boulevard | 50598 | 479007229460 | Botosani | Romania | active | 2 + 468 | Tim Cary | 1257 Guadalajara Street | 33599 | 195337700615 | Bijapur | India | active | 1 + 404 | Stanley Scroggins | 1266 Laredo Parkway | 7664 | 1483365694 | Omiya | Japan | active | 2 + 424 | Kyle Spurlock | 1269 Botosani Manor | 47394 | 736517327853 | Shanwei | China | active | 2 + 507 | Edgar Rhoads | 1269 Ipoh Avenue | 54674 | 402630109080 | Eskisehir | Turkey | active | 2 + 163 | Cathy Spencer | 1287 Xiangfan Boulevard | 57844 | 819416131190 | Kakamigahara | Japan | active | 1 + 444 | Marcus Hidalgo | 1289 Belm Boulevard | 88306 | 237368926031 | Tartu | Estonia | active | 2 + 169 | Erica Matthews | 1294 Firozabad Drive | 70618 | 161801569569 | Pingxiang | China | active | 2 + 484 | Roberto Vu | 1297 Alvorada Parkway | 11839 | 508348602835 | Yinchuan | China | active | 1 + 96 | Diana Alexander | 1308 Arecibo Way | 30695 | 6171054059 | Augusta-Richmond County | United States | active | 1 + 281 | Leona Obrien | 1308 Sumy Loop | 30657 | 583021225407 | Fuzhou | China | active | 2 + 248 | Caroline Bowman | 1309 Weifang Street | 57338 | 435785045362 | Tallahassee | United States | active | 1 + 466 | Leo Ebert | 1322 Mosul Parkway | 95400 | 268053970382 | Dongying | China | active | 1 + 599 | Austin Cintron | 1325 Fukuyama Street | 27107 | 288241215394 | Tieli | China | active | 2 + 598 | Wade Delvalle | 1331 Usak Boulevard | 61960 | 145308717464 | Lausanne | Switzerland | active | 1 + 533 | Jessie Milam | 1332 Gaziantep Lane | 22813 | 383353187467 | Binzhou | China | active | 1 + 211 | Stacey Montgomery | 1333 Haldia Street | 82161 | 408304391718 | Fuyu | China | active | 1 + 147 | Joanne Robertson | 1337 Lincoln Parkway | 99457 | 597815221267 | Urawa | Japan | active | 2 + 563 | Ken Prewitt | 1342 Abha Boulevard | 10714 | 997453607116 | Bucuresti | Romania | active | 2 + 402 | Luis Yanez | 1346 Mysore Drive | 61507 | 516647474029 | Brest | France | active | 1 + 317 | Edward Baugh | 1359 Zhoushan Parkway | 29763 | 46568045367 | Trshavn | Faroe Islands | active | 2 + 351 | Jack Foust | 1378 Beira Loop | 40792 | 840957664136 | Zeleznogorsk | Russian Federation | active | 1 + 104 | Rita Graham | 1386 Yangor Avenue | 80720 | 449216226468 | Toulon | France | active | 1 + 453 | Calvin Martel | 138 Caracas Boulevard | 16790 | 974433019532 | Maracabo | Venezuela | active | 1 + 587 | Sergio Stanfield | 1402 Zanzibar Boulevard | 71102 | 387448063440 | Celaya | Mexico | active | 1 + 429 | Frederick Isbell | 1404 Taguig Drive | 87212 | 572068624538 | Tsuyama | Japan | active | 2 + 282 | Jenny Castro | 1405 Chisinau Place | 8160 | 62781725285 | Ponce | Puerto Rico | active | 2 + 572 | Sidney Burleson | 1405 Hagonoy Avenue | 86587 | 867287719310 | Czestochowa | Poland | active | 1 + 567 | Alfredo Mcadams | 1407 Surakarta Manor | 33224 | 324346485054 | Serpuhov | Russian Federation | active | 2 + 187 | Brittany Riley | 140 Chiayi Parkway | 38982 | 855863906434 | Sumy | Ukraine | active | 2 + 547 | Kurt Emmons | 1421 Quilmes Lane | 19151 | 135407755975 | Kanazawa | Japan | active | 1 + 28 | Cynthia Young | 1425 Shikarpur Manor | 65599 | 678220867005 | Munger (Monghyr) | India | active | 1 + 526 | Karl Seal | 1427 Tabuk Place | 31342 | 214756839122 | Cape Coral | United States | active | 2 + 37 | Pamela Baker | 1440 Fukuyama Loop | 47929 | 912257250465 | Nanyang | China | active | 1 + 467 | Alvin Deloach | 1447 Chatsworth Place | 41545 | 769370126331 | Cuauhtmoc | Mexico | active | 2 + 554 | Dwayne Olvera | 1447 Imus Place | 12905 | 62127829280 | Rajkot | India | active | 1 + 43 | Christine Roberts | 1447 Imus Way | 48942 | 539758313890 | Faaa | French Polynesia | active | 2 + 588 | Marion Ocampo | 1464 Kursk Parkway | 17381 | 338758048786 | Weifang | China | active | 1 + 469 | Wesley Bull | 1469 Plock Lane | 95835 | 622884741180 | Ourense (Orense) | Spain | active | 2 + 177 | Samantha Duncan | 146 Johannesburg Way | 54132 | 953689007081 | Matamoros | Mexico | active | 2 + 247 | Stella Moreno | 1473 Changhwa Parkway | 75933 | 266798132374 | Coacalco de Berriozbal | Mexico | active | 1 + 243 | Lydia Burke | 1483 Pathankot Street | 37288 | 686015532180 | San Miguel de Tucumn | Argentina | active | 1 + 457 | Bill Gavin | 1485 Bratislava Place | 83183 | 924663855568 | Rockford | United States | active | 2 + 577 | Clifton Malcolm | 1489 Kakamigahara Lane | 98883 | 29341849811 | Tanshui | Taiwan | active | 2 + 555 | Dwight Lombardi | 1497 Fengshan Drive | 63022 | 368738360376 | Chatsworth | South Africa | active | 1 + 252 | Mattie Hoffman | 1497 Yuzhou Drive | 3433 | 246810237916 | London | United Kingdom | active | 2 + 497 | Gilbert Sledge | 1515 Korla Way | 57197 | 959467760895 | York | United Kingdom | active | 2 + 344 | Henry Billingsley | 1516 Escobar Drive | 46069 | 64536069371 | Nukualofa | Tonga | active | 1 + 268 | Nina Soto | 1519 Ilorin Place | 49298 | 357445645426 | Palghat (Palakkad) | India | active | 1 + 94 | Norma Gonzales | 152 Kitwe Parkway | 53182 | 835433605312 | Bislig | Philippines | active | 1 + 10 | Dorothy Taylor | 1531 Sal Drive | 53628 | 648856936185 | Esfahan | Iran | active | 1 + 167 | Sally Pierce | 1540 Wroclaw Drive | 62686 | 182363341674 | Chandrapur | India | active | 2 + 11 | Lisa Anderson | 1542 Tarlac Parkway | 1027 | 635297277345 | Sagamihara | Japan | active | 2 + 165 | Lorraine Stephens | 154 Tallahassee Loop | 62250 | 935508855935 | Hami | China | active | 2 + 75 | Tammy Sanders | 1551 Rampur Lane | 72394 | 251164340471 | Changhwa | Taiwan | active | 2 + 85 | Anne Powell | 1557 Ktahya Boulevard | 88002 | 720998247660 | Bradford | United Kingdom | active | 2 + 151 | Megan Palmer | 1560 Jelets Boulevard | 77777 | 189446090264 | Laiwu | China | active | 2 + 365 | Bruce Schwarz | 1565 Tangail Manor | 45750 | 634445428822 | Okinawa | Japan | active | 2 + 4 | Barbara Jones | 1566 Inegl Manor | 53561 | 705814003527 | Myingyan | Myanmar | active | 2 + 576 | Morris Mccarter | 1568 Celaya Parkway | 34750 | 278669994384 | Fengshan | Taiwan | active | 2 + 511 | Chester Benner | 1574 Goinia Boulevard | 39529 | 59634255214 | Suihua | China | active | 1 + 583 | Marshall Thorn | 1584 Ljubertsy Lane | 22954 | 285710089439 | Southampton | United Kingdom | active | 1 + 67 | Kelly Torres | 1586 Guaruj Place | 5135 | 947233365992 | Xiangtan | China | active | 1 + 266 | Nora Herrera | 1587 Loja Manor | 5410 | 621625204422 | Salzburg | Austria | active | 2 + 426 | Bradley Motley | 1596 Acua Parkway | 70425 | 157133457169 | Purnea (Purnia) | India | active | 1 + 97 | Annie Russell | 1599 Plock Drive | 71986 | 817248913162 | Tete | Mozambique | active | 2 + 376 | Randy Gaither | 1618 Olomouc Manor | 26385 | 96846695220 | Kurgan | Russian Federation | active | 1 + 462 | Warren Sherrod | 1621 Tongliao Avenue | 22173 | 209342540247 | Usolje-Sibirskoje | Russian Federation | active | 2 + 345 | Carl Artis | 1628 Nagareyama Lane | 60079 | 20064292617 | San Lorenzo | Paraguay | active | 1 + 42 | Carolyn Perez | 1632 Bislig Avenue | 61117 | 471675840679 | Pak Kret | Thailand | active | 2 + 299 | James Gannon | 1635 Kuwana Boulevard | 52137 | 710603868323 | Hiroshima | Japan | active | 2 + 352 | Albert Crouse | 1641 Changhwa Place | 37636 | 256546485220 | Bamenda | Cameroon | active | 1 + 363 | Roy Whiting | 1658 Jastrzebie-Zdrj Loop | 96584 | 568367775448 | Nyeri | Kenya | active | 2 + 140 | Eva Ramos | 1666 Beni-Mellal Place | 13377 | 9099941466 | Clarksville | United States | active | 1 + 30 | Melissa King | 1668 Anpolis Street | 50199 | 525255540978 | Lungtan | Taiwan | active | 1 + 347 | Ryan Salisbury | 1673 Tangail Drive | 26857 | 627924259271 | Daugavpils | Latvia | active | 2 + 396 | Earl Shanks | 168 Cianjur Manor | 73824 | 679095087143 | Iwatsuki | Japan | active | 1 + 95 | Paula Bryant | 1697 Tanauan Lane | 22870 | 4764773857 | Pathankot | India | active | 2 + 353 | Jonathan Scarborough | 1698 Southport Loop | 49009 | 754358349853 | Pachuca de Soto | Mexico | active | 1 + 62 | Joan Cooper | 1717 Guadalajara Lane | 85505 | 914090181665 | Saint Louis | United States | active | 1 + 45 | Janet Phillips | 1718 Valencia Street | 37359 | 675292816413 | Antofagasta | Chile | active | 1 + 423 | Alfred Casillas | 1727 Matamoros Place | 78813 | 129673677866 | Sawhaj | Egypt | active | 2 + 233 | Lillie Kim | 1736 Cavite Place | 98775 | 431770603551 | Idfu | Egypt | active | 2 + 87 | Wanda Patterson | 1740 Portoviejo Avenue | 29932 | 198123170793 | Sincelejo | Colombia | active | 1 + 559 | Everett Banda | 1741 Hoshiarpur Boulevard | 22372 | 855066328617 | Bilbays | Egypt | active | 2 + 565 | Jaime Nettles | 1746 Faaa Way | 32515 | 863080561151 | Hunuco | Peru | active | 2 + 509 | Raul Fortier | 1747 Rustenburg Place | 51369 | 442673923363 | Chapra | India | active | 1 + 137 | Rhonda Kennedy | 1749 Daxian Place | 11044 | 963369996279 | Apeldoorn | Netherlands | active | 2 + 590 | Seth Hannon | 1759 Niznekamsk Avenue | 39414 | 864392582257 | al-Manama | Bahrain | active | 2 + 249 | Dora Medina | 1760 Oshawa Manor | 38140 | 56257502250 | Tianjin | China | active | 2 + 267 | Margie Wade | 1762 Paarl Parkway | 53928 | 192459639410 | Lengshuijiang | China | active | 1 + 516 | Elmer Noe | 1768 Udine Loop | 32347 | 448876499197 | Battambang | Cambodia | active | 2 + 318 | Brian Wyman | 1769 Iwaki Lane | 25787 | 556100547674 | Bydgoszcz | Poland | active | 1 + 313 | Donald Mahon | 1774 Yaound Place | 91400 | 613124286867 | Ezhou | China | active | 2 + 514 | Franklin Troutman | 1778 Gijn Manor | 35156 | 288910576761 | Zaoyang | China | active | 2 + 23 | Sarah Lewis | 1780 Hino Boulevard | 7716 | 902731229323 | Liepaja | Latvia | active | 2 + 386 | Todd Tan | 1793 Meixian Place | 33535 | 619966287415 | Kamjanets-Podilskyi | Ukraine | active | 1 + 53 | Heather Morris | 17 Kabul Boulevard | 38594 | 697760867968 | Nagareyama | Japan | active | 1 + 329 | Frank Waggoner | 1816 Bydgoszcz Loop | 64308 | 965273813662 | Jamalpur | Bangladesh | active | 2 + 578 | Willard Lumpkin | 1819 Alessandria Loop | 53829 | 377633994405 | Carmen | Mexico | active | 2 + 223 | Melinda Fernandez | 1820 Maring Parkway | 88307 | 99760893676 | Mandi Bahauddin | Pakistan | active | 1 + 428 | Herbert Kruger | 1823 Hoshiarpur Lane | 33191 | 307133768620 | Syktyvkar | Russian Federation | active | 2 + 545 | Julio Noland | 182 Nukualofa Drive | 15414 | 426346224043 | Konotop | Ukraine | active | 2 + 332 | Stephen Qualls | 1838 Tabriz Lane | 1195 | 38988715447 | Dhaka | Bangladesh | active | 1 + 183 | Ida Andrews | 1839 Szkesfehrvr Parkway | 55709 | 947468818183 | Luzinia | Brazil | active | 2 + 538 | Ted Breaux | 183 Haiphong Street | 69953 | 488600270038 | Baicheng | China | active | 2 + 174 | Yvonne Watkins | 1848 Salala Boulevard | 25220 | 48265851133 | Ocumare del Tuy | Venezuela | active | 2 + 474 | Dustin Gillette | 1854 Okara Boulevard | 42123 | 131912793873 | Emmen | Netherlands | active | 2 + 129 | Carrie Porter | 1854 Tieli Street | 15819 | 509492324775 | Liaocheng | China | active | 1 + 487 | Hector Poindexter | 185 Mannheim Lane | 23661 | 589377568313 | Pjatigorsk | Russian Federation | active | 2 + 420 | Jacob Lance | 1866 al-Qatif Avenue | 89420 | 546793516940 | El Monte | United States | active | 1 + 575 | Isaac Oglesby | 186 Skikda Lane | 89422 | 14465669789 | Cuernavaca | Mexico | active | 2 + 464 | Jerome Kenyon | 1872 Toulon Loop | 7939 | 928809465153 | Rancagua | Chile | active | 1 + 170 | Beatrice Arnold | 1877 Ezhou Lane | 63337 | 264541743403 | Udaipur | India | active | 1 + 301 | Robert Baughman | 1883 Maikop Lane | 68469 | 96110042435 | Kaliningrad | Russian Federation | active | 2 + 274 | Naomi Jennings | 1884 Shikarpur Avenue | 85548 | 959949395183 | Karnal | India | active | 1 + 456 | Ronnie Ricketts | 1889 Valparai Way | 75559 | 670370974122 | Ziguinchor | Senegal | active | 2 + 593 | Rene Mcalister | 1895 Zhezqazghan Drive | 36693 | 137809746111 | Garden Grove | United States | active | 2 + 302 | Michael Silverman | 1908 Gaziantep Place | 58979 | 108053751300 | Tiefa | China | active | 1 + 339 | Walter Perryman | 1909 Benguela Lane | 19913 | 624138001031 | Xinxiang | China | active | 2 + 485 | Clyde Tobias | 1909 Dayton Avenue | 88513 | 702955450528 | Shaoguan | China | active | 1 + 366 | Brandon Huey | 1912 Emeishan Drive | 33050 | 99883471275 | Balikesir | Turkey | active | 1 + 1 | Mary Smith | 1913 Hanoi Way | 35200 | 28303384290 | Sasebo | Japan | active | 1 + 100 | Robin Hayes | 1913 Kamakura Place | 97287 | 942570536750 | Jelets | Russian Federation | active | 1 + 553 | Max Pitt | 1917 Kumbakonam Parkway | 11892 | 698182547686 | Novi Sad | Yugoslavia | active | 1 + 368 | Harry Arce | 1922 Miraj Way | 13203 | 320471479776 | Najafabad | Iran | active | 1 + 441 | Mario Cheatham | 1924 Shimonoseki Drive | 52625 | 406784385440 | Batna | Algeria | active | 1 + 221 | Bessie Morrison | 1926 Gingoog Street | 22824 | 469738825391 | Syrakusa | Italy | active | 1 + 291 | Toni Holt | 1936 Cuman Avenue | 61195 | 976798660411 | Roanoke | United States | active | 1 + 536 | Fernando Churchill | 193 Bhusawal Place | 9750 | 745267607502 | Tonghae | South Korea | active | 2 + 88 | Bonnie Hughes | 1942 Ciparay Parkway | 82624 | 978987363654 | Cheju | South Korea | active | 2 + 276 | Brandy Graves | 1944 Bamenda Way | 24645 | 75975221996 | Warren | United States | active | 1 + 419 | Chad Carbone | 1948 Bayugan Parkway | 60622 | 987306329957 | Katihar | India | active | 1 + 519 | Ron Deluca | 1949 Sanya Street | 61244 | 132100972047 | Isesaki | Japan | active | 2 + 90 | Ruby Washington | 1952 Chatsworth Drive | 25958 | 991562402283 | Meixian | China | active | 2 + 148 | Eleanor Hunt | 1952 Pune Lane | 92150 | 354615066969 | Saint-Denis | Runion | active | 1 + 535 | Javier Elrod | 195 Ilorin Street | 49250 | 8912935608 | NDjamna | Chad | active | 1 + 152 | Alicia Mills | 1963 Moscow Place | 64863 | 761379480249 | Nagaon | India | active | 1 + 238 | Nellie Garrett | 1964 Gijn Manor | 14408 | 918119601885 | Shimoga | India | active | 1 + 64 | Judith Cox | 1966 Amroha Avenue | 70385 | 333489324603 | Daxian | China | active | 2 + 117 | Edith Mcdonald | 1967 Sincelejo Place | 73644 | 577812616052 | Gandhinagar | India | active | 1 + 275 | Carole Barnett | 1980 Kamjanets-Podilskyi Street | 89502 | 874337098891 | Peoria | United States | active | 2 + 521 | Roland South | 1993 0 Loop | 41214 | 25865528181 | Yingkou | China | active | 2 + 44 | Marie Turner | 1998 Halifax Drive | 76022 | 177727722820 | Lipetsk | Russian Federation | active | 1 + 226 | Maureen Little | 201 Effon-Alaiye Way | 64344 | 684192903087 | Asuncin | Paraguay | active | 2 + 447 | Clifford Bowens | 207 Cuernavaca Loop | 52671 | 782900030287 | Nabereznyje Telny | Russian Federation | active | 1 + 377 | Howard Fortner | 220 Hidalgo Drive | 45298 | 342720754566 | Kermanshah | Iran | active | 1 + 214 | Kristin Johnston | 226 Brest Manor | 2299 | 785881412500 | Sunnyvale | United States | active | 1 + 283 | Felicia Sutton | 226 Halifax Street | 58492 | 790651020929 | Korla | China | active | 1 + 439 | Alexander Fennell | 231 Kaliningrad Place | 57833 | 575081026569 | Bergamo | Italy | active | 2 + 120 | Sylvia Ortiz | 241 Mosul Lane | 76157 | 765345144779 | Dos Quebradas | Colombia | active | 2 + 584 | Salvador Teel | 247 Jining Parkway | 53446 | 170115379190 | Banjul | Gambia | active | 2 + 13 | Karen Jackson | 270 Amroha Parkway | 29610 | 695479687538 | Osmaniye | Turkey | active | 2 + 471 | Dean Sauer | 270 Tambaram Parkway | 9668 | 248446668735 | Johannesburg | South Africa | active | 1 + 17 | Donna Thompson | 270 Toulon Boulevard | 81766 | 407752414682 | Elista | Russian Federation | active | 1 + 431 | Joel Francisco | 287 Cuautla Boulevard | 72736 | 82619513349 | Sucre | Bolivia | active | 2 + 172 | Bernice Willis | 29 Pyongyang Loop | 47753 | 734780743462 | Batman | Turkey | active | 1 + 245 | Courtney Day | 300 Junan Street | 81314 | 890289150158 | Uijongbu | South Korea | active | 1 + 171 | Dolores Wagner | 316 Uruapan Street | 58194 | 275788967899 | Ipoh | Malaysia | active | 2 + 493 | Brent Harkins | 319 Plock Parkway | 26101 | 854259976812 | Sultanbeyli | Turkey | active | 1 + 448 | Miguel Betancourt | 319 Springs Loop | 99552 | 72524459905 | Erlangen | Germany | active | 1 + 81 | Andrea Henderson | 320 Baiyin Parkway | 37307 | 223664661973 | Mahajanga | Madagascar | active | 1 + 18 | Carol Garcia | 320 Brest Avenue | 43331 | 747791594069 | Kaduna | Nigeria | active | 2 + 294 | Shelly Watts | 32 Pudukkottai Lane | 38834 | 967274728547 | Dayton | United States | active | 2 + 394 | Chris Brothers | 331 Bydgoszcz Parkway | 966 | 537374465982 | Gijn | Spain | active | 2 + 118 | Kim Cruz | 333 Goinia Way | 78625 | 909029256431 | Grand Prairie | United States | active | 1 + 194 | Kristen Chavez | 345 Oshawa Boulevard | 32114 | 104491201771 | Hino | Japan | active | 2 + 234 | Claudia Fuller | 346 Skikda Parkway | 90628 | 630424482919 | Jalib al-Shuyukh | Kuwait | active | 1 + 229 | Tamara Nguyen | 356 Olomouc Manor | 93323 | 22326410776 | Anpolis | Brazil | active | 1 + 16 | Sandra Martin | 360 Toulouse Parkway | 54308 | 949312333307 | Southend-on-Sea | United Kingdom | active | 2 + 495 | Charlie Bess | 362 Rajkot Lane | 98030 | 962020153680 | Baiyin | China | active | 2 + 383 | Martin Bales | 368 Hunuco Boulevard | 17165 | 106439158941 | Namibe | Angola | active | 1 + 541 | Darren Windham | 379 Lublin Parkway | 74568 | 921960450089 | Livorno | Italy | active | 2 + 348 | Roger Quintanilla | 381 Kabul Way | 87272 | 55477302294 | Hsichuh | Taiwan | active | 2 + 200 | Jeanne Lawson | 387 Mwene-Ditu Drive | 8073 | 764477681869 | Ashgabat | Turkmenistan | active | 2 + 546 | Kelly Knott | 390 Wroclaw Way | 5753 | 357593328658 | Sanya | China | active | 1 + 35 | Virginia Green | 391 Callao Drive | 34021 | 440512153169 | Toulouse | France | active | 2 + 414 | Vincent Ralston | 397 Sunnyvale Avenue | 55566 | 680851640676 | Allende | Mexico | active | 1 + 501 | Ruben Geary | 414 Mandaluyong Street | 16370 | 52709222667 | Lublin | Poland | active | 1 + 564 | Bob Pfeiffer | 415 Pune Avenue | 44274 | 203202500108 | Xintai | China | active | 2 + 475 | Pedro Chestnut | 421 Yaound Street | 11363 | 726875628268 | ostka | Ukraine | active | 2 + 41 | Stephanie Mitchell | 42 Brindisi Place | 16744 | 42384721397 | Yerevan | Armenia | active | 1 + 592 | Terrance Roush | 42 Fontana Avenue | 14684 | 437829801725 | Szkesfehrvr | Hungary | active | 1 + 227 | Colleen Burton | 430 Alessandria Loop | 47446 | 669828224459 | Saarbrcken | Germany | active | 1 + 333 | Andrew Purdy | 431 Szkesfehrvr Avenue | 57828 | 119501405123 | Baku | Azerbaijan | active | 2 + 175 | Annette Olson | 431 Xiangtan Avenue | 4854 | 230250973122 | Allappuzha (Alleppey) | India | active | 1 + 369 | Fred Wheat | 433 Florencia Street | 91330 | 561729882725 | Jurez | Mexico | active | 2 + 209 | Tonya Chapman | 43 Dadu Avenue | 4855 | 95666951770 | Bhilwara | India | active | 2 + 537 | Clinton Buford | 43 Vilnius Manor | 79814 | 484500282381 | Aurora | United States | active | 2 + 264 | Gwendolyn May | 446 Kirovo-Tepetsk Lane | 19428 | 303967439816 | Higashiosaka | Japan | active | 1 + 193 | Katie Elliott | 447 Surakarta Loop | 10428 | 940830176580 | Kisumu | Kenya | active | 2 + 293 | Mae Fletcher | 44 Najafabad Way | 61391 | 96604821070 | Donostia-San Sebastin | Spain | active | 2 + 401 | Tony Carranza | 454 Patiala Lane | 13496 | 794553031307 | Koriyama | Japan | active | 2 + 543 | Lance Pemberton | 454 Qinhuangdao Drive | 25866 | 786270036240 | Beni-Mellal | Morocco | active | 1 + 278 | Billie Horton | 457 Tongliao Loop | 56254 | 880756161823 | Inegl | Turkey | active | 2 + 321 | Kevin Schuler | 470 Boksburg Street | 97960 | 908029859266 | Birgunj | Nepal | active | 1 + 168 | Regina Berry | 475 Atinsk Way | 59571 | 201705577290 | Jinchang | China | active | 1 + 8 | Susan Wilson | 478 Joliet Way | 77948 | 657282285970 | Hamilton | New Zealand | active | 2 + 142 | April Burns | 483 Ljubertsy Parkway | 60562 | 581174211853 | Dundee | United Kingdom | active | 1 + 528 | Claude Herzog | 486 Ondo Parkway | 35202 | 105882218332 | Benguela | Angola | active | 1 + 440 | Bernard Colby | 495 Bhimavaram Lane | 3 | 82088937724 | Dhule (Dhulia) | India | active | 1 + 503 | Angel Barclay | 496 Celaya Drive | 90797 | 759586584889 | Ueda | Japan | active | 1 + 384 | Ernest Stepp | 500 Lincoln Parkway | 95509 | 550306965159 | Huaian | China | active | 2 + 334 | Raymond Mcwhorter | 503 Sogamoso Loop | 49812 | 834626715837 | Sumqayit | Azerbaijan | active | 2 + 558 | Jimmie Eggleston | 505 Madiun Boulevard | 97271 | 970638808606 | Wroclaw | Poland | active | 1 + 335 | Gregory Mauldin | 507 Smolensk Loop | 22971 | 80303246192 | Sousse | Tunisia | active | 1 + 387 | Jesse Schilling | 514 Ife Way | 69973 | 900235712074 | Lubumbashi | Congo, The Democratic Republic of the | active | 2 + 256 | Mabel Holland | 51 Laredo Avenue | 68146 | 884536620568 | Monywa | Myanmar | active | 2 + 400 | Bryan Hardison | 530 Lausanne Lane | 11067 | 775235029633 | Dallas | United States | active | 2 + 258 | Myrtle Fleming | 532 Toulon Street | 69517 | 46871694740 | Santiago de los Caballeros | Dominican Republic | active | 1 + 182 | Renee Lane | 533 al-Ayn Boulevard | 8862 | 662227486184 | Compton | United States | active | 1 + 566 | Casey Mena | 539 Hami Way | 52196 | 525518075499 | Tokat | Turkey | active | 1 + 5 | Elizabeth Brown | 53 Idfu Parkway | 42399 | 10655648674 | Nantou | Taiwan | active | 1 + 290 | Kristina Chambers | 544 Tarsus Boulevard | 53145 | 892523334 | Valle de la Pascua | Venezuela | active | 1 + 512 | Cecil Vines | 548 Uruapan Street | 35653 | 879347453467 | London | United Kingdom | active | 1 + 527 | Cory Meehan | 556 Asuncin Way | 35364 | 338244023543 | Mogiljov | Belarus | active | 1 + 125 | Ethel Webb | 569 Baicheng Lane | 60304 | 490211944645 | Boksburg | South Africa | active | 1 + 79 | Rachel Barnes | 586 Tete Way | 1079 | 18581624103 | Kamakura | Japan | active | 1 + 330 | Scott Shelley | 587 Benguela Manor | 91590 | 165450987037 | Aurora | United States | active | 1 + 433 | Don Bone | 596 Huixquilucan Place | 65892 | 342709348083 | Naala-Porto | Mozambique | active | 1 + 76 | Irene Price | 602 Paarl Street | 98889 | 896314772871 | Pavlodar | Kazakstan | active | 2 + 358 | Samuel Marlow | 604 Bern Place | 5373 | 620719383725 | Ranchi | India | active | 2 + 517 | Brad Mccurdy | 608 Birgunj Parkway | 400 | 627425618482 | Chungho | Taiwan | active | 2 + 9 | Margaret Moore | 613 Korolev Drive | 45844 | 380657522649 | Masqat | Oman | active | 2 + 397 | Jimmy Schrader | 616 Hagonoy Avenue | 46043 | 604177838256 | Atinsk | Russian Federation | active | 1 + 355 | Terry Grissom | 619 Hunuco Avenue | 81508 | 142596392389 | Matsue | Japan | active | 2 + 34 | Rebecca Scott | 61 Tama Street | 94065 | 708403338270 | Kurashiki | Japan | active | 2 + 202 | Carla Gutierrez | 642 Nador Drive | 3924 | 369050085652 | Bhusawal | India | active | 2 + 481 | Herman Devore | 64 Korla Street | 25145 | 510383179153 | Mwanza | Tanzania | active | 1 + 316 | Steven Curley | 651 Pathankot Loop | 59811 | 139378397418 | Miraj | India | active | 1 + 490 | Sam Mcduffie | 656 Matamoros Drive | 19489 | 17305839123 | Sogamoso | Colombia | active | 1 + 212 | Wilma Richards | 660 Jedda Boulevard | 25053 | 168758068397 | Bellevue | United States | active | 2 + 295 | Daisy Bates | 661 Chisinau Lane | 8856 | 816436065431 | Kolpino | Russian Federation | active | 1 + 40 | Amanda Carter | 671 Graz Street | 94399 | 680768868518 | Nador | Morocco | active | 2 + 47 | Frances Parker | 686 Garland Manor | 52535 | 69493378813 | Juazeiro do Norte | Brazil | active | 1 + 303 | William Satterfield | 687 Alessandria Parkway | 57587 | 407218522294 | Sanaa | Yemen | active | 2 + 201 | Vicki Fields | 68 Molodetno Manor | 4662 | 146640639760 | Witten | Germany | active | 1 + 340 | Patrick Newsom | 68 Ponce Parkway | 85926 | 870635127812 | Hanoi | Vietnam | active | 1 + 408 | Manuel Murrell | 692 Amroha Drive | 35575 | 359478883004 | Jaffna | Sri Lanka | active | 1 + 3 | Linda Williams | 692 Joliet Street | 83579 | 448477190408 | Athenai | Greece | active | 1 + 483 | Vernon Chapa | 698 Jelets Boulevard | 2596 | 975185523021 | Denizli | Turkey | active | 2 + 65 | Rose Howard | 698 Otsu Street | 71110 | 409983924481 | Cayenne | French Guiana | active | 2 + 435 | Ricky Shelby | 722 Bradford Lane | 90920 | 746251338300 | Junan | China | active | 2 + 49 | Joyce Edwards | 725 Isesaki Place | 74428 | 876295323994 | Jedda | Saudi Arabia | active | 2 + 101 | Peggy Myers | 733 Mandaluyong Place | 77459 | 196568435814 | Abha | Saudi Arabia | active | 1 + 399 | Danny Isom | 734 Bchar Place | 30586 | 280578750435 | Okara | Pakistan | active | 1 + 460 | Leon Bostic | 734 Tanshui Avenue | 70664 | 366776723320 | Florencia | Colombia | active | 1 + 184 | Vivian Ruiz | 741 Ambattur Manor | 43310 | 302590383819 | s-Hertogenbosch | Netherlands | active | 1 + 311 | Paul Trout | 746 Joliet Lane | 94878 | 688485191923 | Kursk | Russian Federation | active | 2 + 210 | Ella Oliver | 751 Lima Loop | 99405 | 756460337785 | Aden | Yemen | active | 2 + 253 | Terry Carlson | 752 Ondo Loop | 32474 | 134673576619 | Miyakonojo | Japan | active | 1 + 389 | Alan Kahn | 753 Ilorin Avenue | 3656 | 464511145118 | Emeishan | China | active | 1 + 176 | June Carroll | 757 Rustenburg Avenue | 89668 | 506134035434 | Skikda | Algeria | active | 1 + 130 | Charlotte Hunter | 758 Junan Lane | 82639 | 935448624185 | guas Lindas de Gois | Brazil | active | 1 + 581 | Virgil Wofford | 760 Miyakonojo Drive | 64682 | 294449058179 | Jos Azueta | Mexico | active | 1 + 134 | Emma Boyd | 765 Southampton Drive | 4285 | 23712411567 | Qalyub | Egypt | active | 1 + 391 | Clarence Gamez | 767 Pyongyang Drive | 83536 | 667736124769 | Izumisano | Japan | active | 1 + 459 | Tommy Collazo | 76 Kermanshah Manor | 23343 | 762361821578 | Qomsheh | Iran | active | 1 + 14 | Betty White | 770 Bydgoszcz Avenue | 16266 | 517338314235 | Citrus Heights | United States | active | 2 + 257 | Marsha Douglas | 771 Yaound Manor | 86768 | 245477603573 | Beira | Mozambique | active | 2 + 491 | Rick Mattox | 775 ostka Drive | 22358 | 171973024401 | Mit Ghamr | Egypt | active | 2 + 312 | Mark Rinehart | 780 Kimberley Way | 17032 | 824396883951 | Tabuk | Saudi Arabia | active | 2 + 149 | Valerie Black | 782 Mosul Street | 25545 | 885899703621 | Brockton | United States | active | 1 + 29 | Angela Hernandez | 786 Aurora Avenue | 65750 | 18461860151 | Shimonoseki | Japan | active | 2 + 504 | Nathaniel Adam | 786 Matsue Way | 37469 | 111177206479 | Joliet | United States | active | 1 + 136 | Anita Morales | 788 Atinsk Street | 81691 | 146497509724 | Hubli-Dharwad | India | active | 2 + 412 | Allen Butterfield | 791 Salinas Street | 40509 | 129953030512 | Hoshiarpur | India | active | 2 + 154 | Michele Grant | 798 Cianjur Avenue | 76990 | 499408708580 | Yuncheng | China | active | 2 + 127 | Elaine Stevens | 801 Hagonoy Drive | 8439 | 237426099212 | Smolensk | Russian Federation | active | 2 + 110 | Tiffany Jordan | 804 Elista Drive | 61069 | 379804592943 | Enshi | China | active | 2 + 12 | Nancy Thomas | 808 Bhopal Manor | 10672 | 465887807014 | Yamuna Nagar | India | active | 1 + 477 | Dan Paine | 808 Naala-Porto Parkway | 41060 | 553452430707 | Stockport | United Kingdom | active | 1 + 451 | Jim Rea | 814 Simferopol Loop | 48745 | 524567129902 | El Fuerte | Mexico | active | 1 + 262 | Patsy Davidson | 816 Cayenne Parkway | 93629 | 282874611748 | Portoviejo | Ecuador | active | 2 + 197 | Sue Peters | 817 Bradford Loop | 89459 | 264286442804 | Changzhou | China | active | 2 + 364 | Benjamin Varney | 817 Laredo Avenue | 77449 | 151249681135 | Guadalajara | Mexico | active | 1 + 60 | Mildred Bailey | 81 Hodeida Way | 55561 | 250767749542 | Jaipur | India | active | 1 + 304 | David Royal | 827 Yuncheng Drive | 79047 | 504434452842 | Callao | Peru | active | 2 + 164 | Joann Gardner | 842 Salzburg Lane | 3313 | 697151428760 | Tarsus | Turkey | active | 2 + 595 | Terrence Gunderson | 844 Bucuresti Place | 36603 | 935952366111 | Jinzhou | China | active | 1 + 124 | Sheila Wells | 848 Tafuna Manor | 45142 | 614935229095 | Ktahya | Turkey | active | 1 + 207 | Gertrude Castillo | 850 Salala Loop | 10800 | 403404780639 | Nuuk | Greenland | active | 1 + 556 | Armando Gruber | 869 Shikarpur Way | 57380 | 590764256785 | Southport | United Kingdom | active | 2 + 288 | Bobbie Craig | 86 Higashiosaka Lane | 33768 | 957128697225 | Valle de Santiago | Mexico | active | 1 + 86 | Jacqueline Long | 870 Ashqelon Loop | 84931 | 135117278909 | Songkhla | Thailand | active | 2 + 146 | Jamie Rice | 879 Newcastle Way | 90732 | 206841104594 | Sterling Heights | United States | active | 1 + 232 | Constance Reid | 885 Yingkou Manor | 31390 | 588964509072 | Zaria | Nigeria | active | 2 + 510 | Ben Easter | 886 Tonghae Place | 19450 | 711928348157 | Kamyin | Russian Federation | active | 2 + 298 | Erika Pena | 898 Jining Lane | 40070 | 161643343536 | Oulu | Finland | active | 1 + 552 | Hugh Waldrop | 904 Clarksville Drive | 52234 | 955349440539 | Haining | China | active | 2 + 270 | Leah Curtis | 906 Goinia Way | 83565 | 701767622697 | Kalisz | Poland | active | 1 + 48 | Ann Evans | 909 Garland Manor | 69367 | 705800322606 | Niznekamsk | Russian Federation | active | 1 + 61 | Katherine Rivera | 915 Ponce Place | 83980 | 1395251317 | Basel | Switzerland | active | 2 + 269 | Cassandra Walters | 920 Kumbakonam Loop | 75090 | 685010736240 | Salinas | United States | active | 1 + 574 | Julian Vest | 923 Tangail Boulevard | 33384 | 315528269898 | Akishima | Japan | active | 2 + 141 | Debbie Reyes | 928 Jaffna Loop | 93762 | 581852137991 | Fukuyama | Japan | active | 1 + 83 | Louise Jenkins | 929 Tallahassee Loop | 74671 | 800716535041 | Springs | South Africa | active | 1 + 52 | Julie Sanchez | 939 Probolinggo Loop | 4166 | 680428310138 | A Corua (La Corua) | Spain | active | 1 + 135 | Juanita Mason | 943 Johannesburg Avenue | 5892 | 90921003005 | Pune | India | active | 2 + 57 | Evelyn Morgan | 943 Tokat Street | 45428 | 889318963672 | Vaduz | Liechtenstein | active | 2 + 393 | Philip Causey | 954 Lapu-Lapu Way | 8816 | 737229003916 | Korolev | Russian Federation | active | 1 + 531 | Jamie Waugh | 958 Sagamihara Lane | 88408 | 427274926505 | Kuwana | Japan | active | 2 + 416 | Jeffery Pinson | 966 Arecibo Loop | 94018 | 15273765306 | Dadu | Pakistan | active | 2 + 582 | Andy Vanhorn | 966 Asuncin Way | 62703 | 995527378381 | Huejutla de Reyes | Mexico | active | 2 + 24 | Kimberly Lee | 96 Tafuna Way | 99865 | 934730187245 | Crdoba | Argentina | active | 2 + 379 | Carlos Coughlin | 97 Mogiljov Lane | 89294 | 924815207181 | Bhavnagar | India | active | 1 + 92 | Tina Simmons | 984 Effon-Alaiye Avenue | 17119 | 132986892228 | Goinia | Brazil | active | 2 + 480 | Corey Hauser | 984 Novoterkassk Loop | 28165 | 435118527255 | Gaziantep | Turkey | active | 1 + 479 | Zachary Hite | 98 Pyongyang Boulevard | 88749 | 191958435142 | Akron | United States | active | 1 + 415 | Glenn Pullen | 992 Klerksdorp Loop | 33711 | 855290087237 | Amersfoort | Netherlands | active | 1 + 145 | Lucille Holmes | 999 Sanaa Loop | 3439 | 918032330119 | Soshanguve | South Africa | active | 1 +(353 rows) -CREATE VIEW client_customer_master AS - SELECT cu.client_customer_id AS id, +CREATE VIEW customer_master AS + SELECT cu.customer_id AS id, cu.first_name || ' ' || cu.last_name AS name, a.address, a.postal_code AS "zip code", @@ -158,25 +2097,379 @@ CREATE VIEW client_customer_master AS ELSE '' END AS notes, cu.store_id AS sid - FROM client_customer cu + FROM customer cu INNER JOIN address a USING (address_id) INNER JOIN city USING (city_id) INNER JOIN country USING (country_id) ORDER BY address, city, country, id; -SELECT * FROM client_customer_master ORDER BY address, city, country, id; - id | name | address | zip code | phone | city | country | notes | sid -----+------+---------+----------+-------+------+---------+-------+----- -(0 rows) +SELECT * FROM customer_master ORDER BY address, city, country, id; + id | name | address | zip code | phone | city | country | notes | sid +-----+----------------------+---------------------------------+----------+--------------+----------------------------+---------------------------------------+--------+----- + 213 | Gina Williamson | 1001 Miyakonojo Lane | 67924 | 584316724815 | Taizz | Yemen | active | 1 + 108 | Tracy Cole | 1002 Ahmadnagar Manor | 93026 | 371490777743 | Huixquilucan | Mexico | active | 1 + 360 | Ralph Madrigal | 1009 Zanzibar Lane | 64875 | 102396298916 | Arecibo | Puerto Rico | active | 2 + 173 | Audrey Ray | 1010 Klerksdorp Way | 6802 | 493008546874 | Graz | Austria | active | 1 + 144 | Clara Shaw | 1027 Songkhla Manor | 30861 | 563660187896 | Molodetno | Belarus | active | 1 + 139 | Amber Dixon | 1029 Dzerzinsk Manor | 57519 | 33173584456 | Touliu | Taiwan | active | 1 + 385 | Phillip Holm | 102 Chapra Drive | 14073 | 776031833752 | Tama | Japan | active | 1 + 69 | Judy Gray | 1031 Daugavpils Parkway | 59025 | 107137400143 | Bchar | Algeria | active | 2 + 370 | Wayne Truong | 1049 Matamoros Parkway | 69640 | 960505250340 | Gulbarga | India | active | 2 + 569 | Dave Gardiner | 1052 Pathankot Avenue | 77397 | 128499386727 | Leshan | China | active | 2 + 557 | Felix Gaffney | 1059 Yuncheng Avenue | 47498 | 107092893983 | Vilnius | Lithuania | active | 1 + 496 | Tyler Wren | 1060 Tandil Lane | 72349 | 211256301880 | Rizhao | China | active | 2 + 196 | Alma Austin | 1074 Binzhou Manor | 36490 | 331132568928 | Mannheim | Germany | active | 1 + 589 | Tracy Herrmann | 1074 Sanaa Parkway | 22474 | 154124128457 | Loja | Ecuador | active | 1 + 465 | Floyd Gandy | 1088 Ibirit Place | 88502 | 49084281333 | Zapopan | Mexico | active | 1 + 179 | Dana Hart | 1089 Iwatsuki Avenue | 35109 | 866092335135 | Kirovo-Tepetsk | Russian Federation | active | 1 + 241 | Heidi Larson | 1103 Bilbays Parkway | 87660 | 279979529227 | Xiangfan | China | active | 2 + 597 | Freddie Duggan | 1103 Quilmes Boulevard | 52137 | 644021380889 | Sullana | Peru | active | 1 + 58 | Jean Bell | 1114 Liepaja Street | 69226 | 212869228936 | Kuching | Malaysia | active | 1 + 2 | Patricia Johnson | 1121 Loja Avenue | 17886 | 838635286649 | San Bernardino | United States | active | 1 + 70 | Christina Ramirez | 1124 Buenaventura Drive | 6856 | 407733804223 | al-Hawiya | Saudi Arabia | active | 2 + 374 | Jeremy Hurtado | 1133 Rizhao Avenue | 2800 | 600264533987 | Vitria de Santo Anto | Brazil | active | 2 + 51 | Alice Stewart | 1135 Izumisano Parkway | 48150 | 171822533480 | Fontana | United States | active | 1 + 319 | Ronald Weiner | 1145 Vilnius Manor | 73170 | 674805712553 | San Felipe del Progreso | Mexico | active | 2 + 240 | Marlene Welch | 1148 Saarbrcken Parkway | 1921 | 137773001988 | Iwaki | Japan | active | 1 + 476 | Derrick Bourque | 1153 Allende Way | 20336 | 856872225376 | Gatineau | Canada | active | 1 + 346 | Arthur Simpkins | 1157 Nyeri Loop | 56380 | 262744791493 | Maikop | Russian Federation | active | 1 + 50 | Diane Collins | 115 Hidalgo Parkway | 80168 | 307703950263 | Omdurman | Sudan | active | 1 + 362 | Nicholas Barfield | 1163 London Parkway | 6066 | 675120358494 | Belm | Brazil | active | 1 + 188 | Melanie Armstrong | 1166 Changhwa Street | 58852 | 650752094490 | Bayugan | Philippines | active | 1 + 218 | Vera Mccoy | 1168 Najafabad Parkway | 40301 | 886649065861 | Kabul | Afghanistan | active | 1 + 122 | Thelma Murray | 1175 Tanauan Way | 64615 | 937222955822 | Lima | Peru | active | 1 + 292 | Misty Lambert | 1192 Tongliao Street | 19065 | 350970907017 | Sharja | United Arab Emirates | active | 2 + 216 | Natalie Meyer | 1201 Qomsheh Manor | 21464 | 873492228462 | Aparecida de Goinia | Brazil | active | 1 + 579 | Daryl Larue | 1208 Tama Loop | 73605 | 954786054144 | Mosul | Iraq | active | 2 + 59 | Cheryl Murphy | 1213 Ranchi Parkway | 94352 | 800024380485 | Mysore | India | active | 1 + 63 | Ashley Richardson | 1214 Hanoi Way | 67055 | 491001136577 | Lincoln | United States | active | 1 + 204 | Rosemary Schmidt | 1215 Pyongyang Parkway | 25238 | 646237101779 | Usak | Turkey | active | 1 + 341 | Peter Menard | 1217 Konotop Avenue | 504 | 718917251754 | Ede | Netherlands | active | 1 + 215 | Jessie Banks | 1229 Valencia Parkway | 99124 | 352679173732 | Stara Zagora | Bulgaria | active | 2 + 242 | Glenda Frazier | 1246 Boksburg Parkway | 28349 | 890283544295 | Qinhuangdao | China | active | 1 + 337 | Jerry Jordon | 124 al-Manama Way | 52368 | 647899404952 | Onomichi | Japan | active | 1 + 230 | Joy George | 1256 Bislig Boulevard | 50598 | 479007229460 | Botosani | Romania | active | 2 + 468 | Tim Cary | 1257 Guadalajara Street | 33599 | 195337700615 | Bijapur | India | active | 1 + 404 | Stanley Scroggins | 1266 Laredo Parkway | 7664 | 1483365694 | Omiya | Japan | active | 2 + 424 | Kyle Spurlock | 1269 Botosani Manor | 47394 | 736517327853 | Shanwei | China | active | 2 + 507 | Edgar Rhoads | 1269 Ipoh Avenue | 54674 | 402630109080 | Eskisehir | Turkey | active | 2 + 163 | Cathy Spencer | 1287 Xiangfan Boulevard | 57844 | 819416131190 | Kakamigahara | Japan | active | 1 + 444 | Marcus Hidalgo | 1289 Belm Boulevard | 88306 | 237368926031 | Tartu | Estonia | active | 2 + 169 | Erica Matthews | 1294 Firozabad Drive | 70618 | 161801569569 | Pingxiang | China | active | 2 + 484 | Roberto Vu | 1297 Alvorada Parkway | 11839 | 508348602835 | Yinchuan | China | active | 1 + 96 | Diana Alexander | 1308 Arecibo Way | 30695 | 6171054059 | Augusta-Richmond County | United States | active | 1 + 281 | Leona Obrien | 1308 Sumy Loop | 30657 | 583021225407 | Fuzhou | China | active | 2 + 248 | Caroline Bowman | 1309 Weifang Street | 57338 | 435785045362 | Tallahassee | United States | active | 1 + 466 | Leo Ebert | 1322 Mosul Parkway | 95400 | 268053970382 | Dongying | China | active | 1 + 599 | Austin Cintron | 1325 Fukuyama Street | 27107 | 288241215394 | Tieli | China | active | 2 + 598 | Wade Delvalle | 1331 Usak Boulevard | 61960 | 145308717464 | Lausanne | Switzerland | active | 1 + 533 | Jessie Milam | 1332 Gaziantep Lane | 22813 | 383353187467 | Binzhou | China | active | 1 + 211 | Stacey Montgomery | 1333 Haldia Street | 82161 | 408304391718 | Fuyu | China | active | 1 + 147 | Joanne Robertson | 1337 Lincoln Parkway | 99457 | 597815221267 | Urawa | Japan | active | 2 + 563 | Ken Prewitt | 1342 Abha Boulevard | 10714 | 997453607116 | Bucuresti | Romania | active | 2 + 402 | Luis Yanez | 1346 Mysore Drive | 61507 | 516647474029 | Brest | France | active | 1 + 317 | Edward Baugh | 1359 Zhoushan Parkway | 29763 | 46568045367 | Trshavn | Faroe Islands | active | 2 + 351 | Jack Foust | 1378 Beira Loop | 40792 | 840957664136 | Zeleznogorsk | Russian Federation | active | 1 + 104 | Rita Graham | 1386 Yangor Avenue | 80720 | 449216226468 | Toulon | France | active | 1 + 453 | Calvin Martel | 138 Caracas Boulevard | 16790 | 974433019532 | Maracabo | Venezuela | active | 1 + 587 | Sergio Stanfield | 1402 Zanzibar Boulevard | 71102 | 387448063440 | Celaya | Mexico | active | 1 + 429 | Frederick Isbell | 1404 Taguig Drive | 87212 | 572068624538 | Tsuyama | Japan | active | 2 + 282 | Jenny Castro | 1405 Chisinau Place | 8160 | 62781725285 | Ponce | Puerto Rico | active | 2 + 572 | Sidney Burleson | 1405 Hagonoy Avenue | 86587 | 867287719310 | Czestochowa | Poland | active | 1 + 567 | Alfredo Mcadams | 1407 Surakarta Manor | 33224 | 324346485054 | Serpuhov | Russian Federation | active | 2 + 187 | Brittany Riley | 140 Chiayi Parkway | 38982 | 855863906434 | Sumy | Ukraine | active | 2 + 547 | Kurt Emmons | 1421 Quilmes Lane | 19151 | 135407755975 | Kanazawa | Japan | active | 1 + 28 | Cynthia Young | 1425 Shikarpur Manor | 65599 | 678220867005 | Munger (Monghyr) | India | active | 1 + 526 | Karl Seal | 1427 Tabuk Place | 31342 | 214756839122 | Cape Coral | United States | active | 2 + 37 | Pamela Baker | 1440 Fukuyama Loop | 47929 | 912257250465 | Nanyang | China | active | 1 + 467 | Alvin Deloach | 1447 Chatsworth Place | 41545 | 769370126331 | Cuauhtmoc | Mexico | active | 2 + 554 | Dwayne Olvera | 1447 Imus Place | 12905 | 62127829280 | Rajkot | India | active | 1 + 43 | Christine Roberts | 1447 Imus Way | 48942 | 539758313890 | Faaa | French Polynesia | active | 2 + 588 | Marion Ocampo | 1464 Kursk Parkway | 17381 | 338758048786 | Weifang | China | active | 1 + 469 | Wesley Bull | 1469 Plock Lane | 95835 | 622884741180 | Ourense (Orense) | Spain | active | 2 + 177 | Samantha Duncan | 146 Johannesburg Way | 54132 | 953689007081 | Matamoros | Mexico | active | 2 + 247 | Stella Moreno | 1473 Changhwa Parkway | 75933 | 266798132374 | Coacalco de Berriozbal | Mexico | active | 1 + 243 | Lydia Burke | 1483 Pathankot Street | 37288 | 686015532180 | San Miguel de Tucumn | Argentina | active | 1 + 457 | Bill Gavin | 1485 Bratislava Place | 83183 | 924663855568 | Rockford | United States | active | 2 + 577 | Clifton Malcolm | 1489 Kakamigahara Lane | 98883 | 29341849811 | Tanshui | Taiwan | active | 2 + 555 | Dwight Lombardi | 1497 Fengshan Drive | 63022 | 368738360376 | Chatsworth | South Africa | active | 1 + 252 | Mattie Hoffman | 1497 Yuzhou Drive | 3433 | 246810237916 | London | United Kingdom | active | 2 + 497 | Gilbert Sledge | 1515 Korla Way | 57197 | 959467760895 | York | United Kingdom | active | 2 + 344 | Henry Billingsley | 1516 Escobar Drive | 46069 | 64536069371 | Nukualofa | Tonga | active | 1 + 268 | Nina Soto | 1519 Ilorin Place | 49298 | 357445645426 | Palghat (Palakkad) | India | active | 1 + 94 | Norma Gonzales | 152 Kitwe Parkway | 53182 | 835433605312 | Bislig | Philippines | active | 1 + 10 | Dorothy Taylor | 1531 Sal Drive | 53628 | 648856936185 | Esfahan | Iran | active | 1 + 167 | Sally Pierce | 1540 Wroclaw Drive | 62686 | 182363341674 | Chandrapur | India | active | 2 + 11 | Lisa Anderson | 1542 Tarlac Parkway | 1027 | 635297277345 | Sagamihara | Japan | active | 2 + 165 | Lorraine Stephens | 154 Tallahassee Loop | 62250 | 935508855935 | Hami | China | active | 2 + 75 | Tammy Sanders | 1551 Rampur Lane | 72394 | 251164340471 | Changhwa | Taiwan | active | 2 + 85 | Anne Powell | 1557 Ktahya Boulevard | 88002 | 720998247660 | Bradford | United Kingdom | active | 2 + 151 | Megan Palmer | 1560 Jelets Boulevard | 77777 | 189446090264 | Laiwu | China | active | 2 + 365 | Bruce Schwarz | 1565 Tangail Manor | 45750 | 634445428822 | Okinawa | Japan | active | 2 + 4 | Barbara Jones | 1566 Inegl Manor | 53561 | 705814003527 | Myingyan | Myanmar | active | 2 + 576 | Morris Mccarter | 1568 Celaya Parkway | 34750 | 278669994384 | Fengshan | Taiwan | active | 2 + 511 | Chester Benner | 1574 Goinia Boulevard | 39529 | 59634255214 | Suihua | China | active | 1 + 583 | Marshall Thorn | 1584 Ljubertsy Lane | 22954 | 285710089439 | Southampton | United Kingdom | active | 1 + 67 | Kelly Torres | 1586 Guaruj Place | 5135 | 947233365992 | Xiangtan | China | active | 1 + 266 | Nora Herrera | 1587 Loja Manor | 5410 | 621625204422 | Salzburg | Austria | active | 2 + 426 | Bradley Motley | 1596 Acua Parkway | 70425 | 157133457169 | Purnea (Purnia) | India | active | 1 + 97 | Annie Russell | 1599 Plock Drive | 71986 | 817248913162 | Tete | Mozambique | active | 2 + 376 | Randy Gaither | 1618 Olomouc Manor | 26385 | 96846695220 | Kurgan | Russian Federation | active | 1 + 462 | Warren Sherrod | 1621 Tongliao Avenue | 22173 | 209342540247 | Usolje-Sibirskoje | Russian Federation | active | 2 + 345 | Carl Artis | 1628 Nagareyama Lane | 60079 | 20064292617 | San Lorenzo | Paraguay | active | 1 + 42 | Carolyn Perez | 1632 Bislig Avenue | 61117 | 471675840679 | Pak Kret | Thailand | active | 2 + 299 | James Gannon | 1635 Kuwana Boulevard | 52137 | 710603868323 | Hiroshima | Japan | active | 2 + 352 | Albert Crouse | 1641 Changhwa Place | 37636 | 256546485220 | Bamenda | Cameroon | active | 1 + 363 | Roy Whiting | 1658 Jastrzebie-Zdrj Loop | 96584 | 568367775448 | Nyeri | Kenya | active | 2 + 140 | Eva Ramos | 1666 Beni-Mellal Place | 13377 | 9099941466 | Clarksville | United States | active | 1 + 30 | Melissa King | 1668 Anpolis Street | 50199 | 525255540978 | Lungtan | Taiwan | active | 1 + 347 | Ryan Salisbury | 1673 Tangail Drive | 26857 | 627924259271 | Daugavpils | Latvia | active | 2 + 396 | Earl Shanks | 168 Cianjur Manor | 73824 | 679095087143 | Iwatsuki | Japan | active | 1 + 95 | Paula Bryant | 1697 Tanauan Lane | 22870 | 4764773857 | Pathankot | India | active | 2 + 353 | Jonathan Scarborough | 1698 Southport Loop | 49009 | 754358349853 | Pachuca de Soto | Mexico | active | 1 + 62 | Joan Cooper | 1717 Guadalajara Lane | 85505 | 914090181665 | Saint Louis | United States | active | 1 + 45 | Janet Phillips | 1718 Valencia Street | 37359 | 675292816413 | Antofagasta | Chile | active | 1 + 423 | Alfred Casillas | 1727 Matamoros Place | 78813 | 129673677866 | Sawhaj | Egypt | active | 2 + 233 | Lillie Kim | 1736 Cavite Place | 98775 | 431770603551 | Idfu | Egypt | active | 2 + 87 | Wanda Patterson | 1740 Portoviejo Avenue | 29932 | 198123170793 | Sincelejo | Colombia | active | 1 + 559 | Everett Banda | 1741 Hoshiarpur Boulevard | 22372 | 855066328617 | Bilbays | Egypt | active | 2 + 565 | Jaime Nettles | 1746 Faaa Way | 32515 | 863080561151 | Hunuco | Peru | active | 2 + 509 | Raul Fortier | 1747 Rustenburg Place | 51369 | 442673923363 | Chapra | India | active | 1 + 137 | Rhonda Kennedy | 1749 Daxian Place | 11044 | 963369996279 | Apeldoorn | Netherlands | active | 2 + 590 | Seth Hannon | 1759 Niznekamsk Avenue | 39414 | 864392582257 | al-Manama | Bahrain | active | 2 + 249 | Dora Medina | 1760 Oshawa Manor | 38140 | 56257502250 | Tianjin | China | active | 2 + 267 | Margie Wade | 1762 Paarl Parkway | 53928 | 192459639410 | Lengshuijiang | China | active | 1 + 516 | Elmer Noe | 1768 Udine Loop | 32347 | 448876499197 | Battambang | Cambodia | active | 2 + 318 | Brian Wyman | 1769 Iwaki Lane | 25787 | 556100547674 | Bydgoszcz | Poland | active | 1 + 313 | Donald Mahon | 1774 Yaound Place | 91400 | 613124286867 | Ezhou | China | active | 2 + 514 | Franklin Troutman | 1778 Gijn Manor | 35156 | 288910576761 | Zaoyang | China | active | 2 + 23 | Sarah Lewis | 1780 Hino Boulevard | 7716 | 902731229323 | Liepaja | Latvia | active | 2 + 386 | Todd Tan | 1793 Meixian Place | 33535 | 619966287415 | Kamjanets-Podilskyi | Ukraine | active | 1 + 53 | Heather Morris | 17 Kabul Boulevard | 38594 | 697760867968 | Nagareyama | Japan | active | 1 + 329 | Frank Waggoner | 1816 Bydgoszcz Loop | 64308 | 965273813662 | Jamalpur | Bangladesh | active | 2 + 578 | Willard Lumpkin | 1819 Alessandria Loop | 53829 | 377633994405 | Carmen | Mexico | active | 2 + 223 | Melinda Fernandez | 1820 Maring Parkway | 88307 | 99760893676 | Mandi Bahauddin | Pakistan | active | 1 + 428 | Herbert Kruger | 1823 Hoshiarpur Lane | 33191 | 307133768620 | Syktyvkar | Russian Federation | active | 2 + 545 | Julio Noland | 182 Nukualofa Drive | 15414 | 426346224043 | Konotop | Ukraine | active | 2 + 332 | Stephen Qualls | 1838 Tabriz Lane | 1195 | 38988715447 | Dhaka | Bangladesh | active | 1 + 183 | Ida Andrews | 1839 Szkesfehrvr Parkway | 55709 | 947468818183 | Luzinia | Brazil | active | 2 + 538 | Ted Breaux | 183 Haiphong Street | 69953 | 488600270038 | Baicheng | China | active | 2 + 174 | Yvonne Watkins | 1848 Salala Boulevard | 25220 | 48265851133 | Ocumare del Tuy | Venezuela | active | 2 + 474 | Dustin Gillette | 1854 Okara Boulevard | 42123 | 131912793873 | Emmen | Netherlands | active | 2 + 129 | Carrie Porter | 1854 Tieli Street | 15819 | 509492324775 | Liaocheng | China | active | 1 + 487 | Hector Poindexter | 185 Mannheim Lane | 23661 | 589377568313 | Pjatigorsk | Russian Federation | active | 2 + 420 | Jacob Lance | 1866 al-Qatif Avenue | 89420 | 546793516940 | El Monte | United States | active | 1 + 575 | Isaac Oglesby | 186 Skikda Lane | 89422 | 14465669789 | Cuernavaca | Mexico | active | 2 + 464 | Jerome Kenyon | 1872 Toulon Loop | 7939 | 928809465153 | Rancagua | Chile | active | 1 + 170 | Beatrice Arnold | 1877 Ezhou Lane | 63337 | 264541743403 | Udaipur | India | active | 1 + 301 | Robert Baughman | 1883 Maikop Lane | 68469 | 96110042435 | Kaliningrad | Russian Federation | active | 2 + 274 | Naomi Jennings | 1884 Shikarpur Avenue | 85548 | 959949395183 | Karnal | India | active | 1 + 456 | Ronnie Ricketts | 1889 Valparai Way | 75559 | 670370974122 | Ziguinchor | Senegal | active | 2 + 593 | Rene Mcalister | 1895 Zhezqazghan Drive | 36693 | 137809746111 | Garden Grove | United States | active | 2 + 302 | Michael Silverman | 1908 Gaziantep Place | 58979 | 108053751300 | Tiefa | China | active | 1 + 339 | Walter Perryman | 1909 Benguela Lane | 19913 | 624138001031 | Xinxiang | China | active | 2 + 485 | Clyde Tobias | 1909 Dayton Avenue | 88513 | 702955450528 | Shaoguan | China | active | 1 + 366 | Brandon Huey | 1912 Emeishan Drive | 33050 | 99883471275 | Balikesir | Turkey | active | 1 + 1 | Mary Smith | 1913 Hanoi Way | 35200 | 28303384290 | Sasebo | Japan | active | 1 + 100 | Robin Hayes | 1913 Kamakura Place | 97287 | 942570536750 | Jelets | Russian Federation | active | 1 + 553 | Max Pitt | 1917 Kumbakonam Parkway | 11892 | 698182547686 | Novi Sad | Yugoslavia | active | 1 + 368 | Harry Arce | 1922 Miraj Way | 13203 | 320471479776 | Najafabad | Iran | active | 1 + 441 | Mario Cheatham | 1924 Shimonoseki Drive | 52625 | 406784385440 | Batna | Algeria | active | 1 + 221 | Bessie Morrison | 1926 Gingoog Street | 22824 | 469738825391 | Syrakusa | Italy | active | 1 + 291 | Toni Holt | 1936 Cuman Avenue | 61195 | 976798660411 | Roanoke | United States | active | 1 + 536 | Fernando Churchill | 193 Bhusawal Place | 9750 | 745267607502 | Tonghae | South Korea | active | 2 + 88 | Bonnie Hughes | 1942 Ciparay Parkway | 82624 | 978987363654 | Cheju | South Korea | active | 2 + 276 | Brandy Graves | 1944 Bamenda Way | 24645 | 75975221996 | Warren | United States | active | 1 + 419 | Chad Carbone | 1948 Bayugan Parkway | 60622 | 987306329957 | Katihar | India | active | 1 + 519 | Ron Deluca | 1949 Sanya Street | 61244 | 132100972047 | Isesaki | Japan | active | 2 + 90 | Ruby Washington | 1952 Chatsworth Drive | 25958 | 991562402283 | Meixian | China | active | 2 + 148 | Eleanor Hunt | 1952 Pune Lane | 92150 | 354615066969 | Saint-Denis | Runion | active | 1 + 535 | Javier Elrod | 195 Ilorin Street | 49250 | 8912935608 | NDjamna | Chad | active | 1 + 152 | Alicia Mills | 1963 Moscow Place | 64863 | 761379480249 | Nagaon | India | active | 1 + 238 | Nellie Garrett | 1964 Gijn Manor | 14408 | 918119601885 | Shimoga | India | active | 1 + 64 | Judith Cox | 1966 Amroha Avenue | 70385 | 333489324603 | Daxian | China | active | 2 + 117 | Edith Mcdonald | 1967 Sincelejo Place | 73644 | 577812616052 | Gandhinagar | India | active | 1 + 275 | Carole Barnett | 1980 Kamjanets-Podilskyi Street | 89502 | 874337098891 | Peoria | United States | active | 2 + 521 | Roland South | 1993 0 Loop | 41214 | 25865528181 | Yingkou | China | active | 2 + 44 | Marie Turner | 1998 Halifax Drive | 76022 | 177727722820 | Lipetsk | Russian Federation | active | 1 + 226 | Maureen Little | 201 Effon-Alaiye Way | 64344 | 684192903087 | Asuncin | Paraguay | active | 2 + 447 | Clifford Bowens | 207 Cuernavaca Loop | 52671 | 782900030287 | Nabereznyje Telny | Russian Federation | active | 1 + 377 | Howard Fortner | 220 Hidalgo Drive | 45298 | 342720754566 | Kermanshah | Iran | active | 1 + 214 | Kristin Johnston | 226 Brest Manor | 2299 | 785881412500 | Sunnyvale | United States | active | 1 + 283 | Felicia Sutton | 226 Halifax Street | 58492 | 790651020929 | Korla | China | active | 1 + 439 | Alexander Fennell | 231 Kaliningrad Place | 57833 | 575081026569 | Bergamo | Italy | active | 2 + 120 | Sylvia Ortiz | 241 Mosul Lane | 76157 | 765345144779 | Dos Quebradas | Colombia | active | 2 + 584 | Salvador Teel | 247 Jining Parkway | 53446 | 170115379190 | Banjul | Gambia | active | 2 + 13 | Karen Jackson | 270 Amroha Parkway | 29610 | 695479687538 | Osmaniye | Turkey | active | 2 + 471 | Dean Sauer | 270 Tambaram Parkway | 9668 | 248446668735 | Johannesburg | South Africa | active | 1 + 17 | Donna Thompson | 270 Toulon Boulevard | 81766 | 407752414682 | Elista | Russian Federation | active | 1 + 431 | Joel Francisco | 287 Cuautla Boulevard | 72736 | 82619513349 | Sucre | Bolivia | active | 2 + 172 | Bernice Willis | 29 Pyongyang Loop | 47753 | 734780743462 | Batman | Turkey | active | 1 + 245 | Courtney Day | 300 Junan Street | 81314 | 890289150158 | Uijongbu | South Korea | active | 1 + 171 | Dolores Wagner | 316 Uruapan Street | 58194 | 275788967899 | Ipoh | Malaysia | active | 2 + 493 | Brent Harkins | 319 Plock Parkway | 26101 | 854259976812 | Sultanbeyli | Turkey | active | 1 + 448 | Miguel Betancourt | 319 Springs Loop | 99552 | 72524459905 | Erlangen | Germany | active | 1 + 81 | Andrea Henderson | 320 Baiyin Parkway | 37307 | 223664661973 | Mahajanga | Madagascar | active | 1 + 18 | Carol Garcia | 320 Brest Avenue | 43331 | 747791594069 | Kaduna | Nigeria | active | 2 + 294 | Shelly Watts | 32 Pudukkottai Lane | 38834 | 967274728547 | Dayton | United States | active | 2 + 394 | Chris Brothers | 331 Bydgoszcz Parkway | 966 | 537374465982 | Gijn | Spain | active | 2 + 118 | Kim Cruz | 333 Goinia Way | 78625 | 909029256431 | Grand Prairie | United States | active | 1 + 194 | Kristen Chavez | 345 Oshawa Boulevard | 32114 | 104491201771 | Hino | Japan | active | 2 + 234 | Claudia Fuller | 346 Skikda Parkway | 90628 | 630424482919 | Jalib al-Shuyukh | Kuwait | active | 1 + 229 | Tamara Nguyen | 356 Olomouc Manor | 93323 | 22326410776 | Anpolis | Brazil | active | 1 + 16 | Sandra Martin | 360 Toulouse Parkway | 54308 | 949312333307 | Southend-on-Sea | United Kingdom | active | 2 + 495 | Charlie Bess | 362 Rajkot Lane | 98030 | 962020153680 | Baiyin | China | active | 2 + 383 | Martin Bales | 368 Hunuco Boulevard | 17165 | 106439158941 | Namibe | Angola | active | 1 + 541 | Darren Windham | 379 Lublin Parkway | 74568 | 921960450089 | Livorno | Italy | active | 2 + 348 | Roger Quintanilla | 381 Kabul Way | 87272 | 55477302294 | Hsichuh | Taiwan | active | 2 + 200 | Jeanne Lawson | 387 Mwene-Ditu Drive | 8073 | 764477681869 | Ashgabat | Turkmenistan | active | 2 + 546 | Kelly Knott | 390 Wroclaw Way | 5753 | 357593328658 | Sanya | China | active | 1 + 35 | Virginia Green | 391 Callao Drive | 34021 | 440512153169 | Toulouse | France | active | 2 + 414 | Vincent Ralston | 397 Sunnyvale Avenue | 55566 | 680851640676 | Allende | Mexico | active | 1 + 501 | Ruben Geary | 414 Mandaluyong Street | 16370 | 52709222667 | Lublin | Poland | active | 1 + 564 | Bob Pfeiffer | 415 Pune Avenue | 44274 | 203202500108 | Xintai | China | active | 2 + 475 | Pedro Chestnut | 421 Yaound Street | 11363 | 726875628268 | ostka | Ukraine | active | 2 + 41 | Stephanie Mitchell | 42 Brindisi Place | 16744 | 42384721397 | Yerevan | Armenia | active | 1 + 592 | Terrance Roush | 42 Fontana Avenue | 14684 | 437829801725 | Szkesfehrvr | Hungary | active | 1 + 227 | Colleen Burton | 430 Alessandria Loop | 47446 | 669828224459 | Saarbrcken | Germany | active | 1 + 333 | Andrew Purdy | 431 Szkesfehrvr Avenue | 57828 | 119501405123 | Baku | Azerbaijan | active | 2 + 175 | Annette Olson | 431 Xiangtan Avenue | 4854 | 230250973122 | Allappuzha (Alleppey) | India | active | 1 + 369 | Fred Wheat | 433 Florencia Street | 91330 | 561729882725 | Jurez | Mexico | active | 2 + 209 | Tonya Chapman | 43 Dadu Avenue | 4855 | 95666951770 | Bhilwara | India | active | 2 + 537 | Clinton Buford | 43 Vilnius Manor | 79814 | 484500282381 | Aurora | United States | active | 2 + 264 | Gwendolyn May | 446 Kirovo-Tepetsk Lane | 19428 | 303967439816 | Higashiosaka | Japan | active | 1 + 193 | Katie Elliott | 447 Surakarta Loop | 10428 | 940830176580 | Kisumu | Kenya | active | 2 + 293 | Mae Fletcher | 44 Najafabad Way | 61391 | 96604821070 | Donostia-San Sebastin | Spain | active | 2 + 401 | Tony Carranza | 454 Patiala Lane | 13496 | 794553031307 | Koriyama | Japan | active | 2 + 543 | Lance Pemberton | 454 Qinhuangdao Drive | 25866 | 786270036240 | Beni-Mellal | Morocco | active | 1 + 278 | Billie Horton | 457 Tongliao Loop | 56254 | 880756161823 | Inegl | Turkey | active | 2 + 321 | Kevin Schuler | 470 Boksburg Street | 97960 | 908029859266 | Birgunj | Nepal | active | 1 + 168 | Regina Berry | 475 Atinsk Way | 59571 | 201705577290 | Jinchang | China | active | 1 + 8 | Susan Wilson | 478 Joliet Way | 77948 | 657282285970 | Hamilton | New Zealand | active | 2 + 142 | April Burns | 483 Ljubertsy Parkway | 60562 | 581174211853 | Dundee | United Kingdom | active | 1 + 528 | Claude Herzog | 486 Ondo Parkway | 35202 | 105882218332 | Benguela | Angola | active | 1 + 440 | Bernard Colby | 495 Bhimavaram Lane | 3 | 82088937724 | Dhule (Dhulia) | India | active | 1 + 503 | Angel Barclay | 496 Celaya Drive | 90797 | 759586584889 | Ueda | Japan | active | 1 + 384 | Ernest Stepp | 500 Lincoln Parkway | 95509 | 550306965159 | Huaian | China | active | 2 + 334 | Raymond Mcwhorter | 503 Sogamoso Loop | 49812 | 834626715837 | Sumqayit | Azerbaijan | active | 2 + 558 | Jimmie Eggleston | 505 Madiun Boulevard | 97271 | 970638808606 | Wroclaw | Poland | active | 1 + 335 | Gregory Mauldin | 507 Smolensk Loop | 22971 | 80303246192 | Sousse | Tunisia | active | 1 + 387 | Jesse Schilling | 514 Ife Way | 69973 | 900235712074 | Lubumbashi | Congo, The Democratic Republic of the | active | 2 + 256 | Mabel Holland | 51 Laredo Avenue | 68146 | 884536620568 | Monywa | Myanmar | active | 2 + 400 | Bryan Hardison | 530 Lausanne Lane | 11067 | 775235029633 | Dallas | United States | active | 2 + 258 | Myrtle Fleming | 532 Toulon Street | 69517 | 46871694740 | Santiago de los Caballeros | Dominican Republic | active | 1 + 182 | Renee Lane | 533 al-Ayn Boulevard | 8862 | 662227486184 | Compton | United States | active | 1 + 566 | Casey Mena | 539 Hami Way | 52196 | 525518075499 | Tokat | Turkey | active | 1 + 5 | Elizabeth Brown | 53 Idfu Parkway | 42399 | 10655648674 | Nantou | Taiwan | active | 1 + 290 | Kristina Chambers | 544 Tarsus Boulevard | 53145 | 892523334 | Valle de la Pascua | Venezuela | active | 1 + 512 | Cecil Vines | 548 Uruapan Street | 35653 | 879347453467 | London | United Kingdom | active | 1 + 527 | Cory Meehan | 556 Asuncin Way | 35364 | 338244023543 | Mogiljov | Belarus | active | 1 + 125 | Ethel Webb | 569 Baicheng Lane | 60304 | 490211944645 | Boksburg | South Africa | active | 1 + 79 | Rachel Barnes | 586 Tete Way | 1079 | 18581624103 | Kamakura | Japan | active | 1 + 330 | Scott Shelley | 587 Benguela Manor | 91590 | 165450987037 | Aurora | United States | active | 1 + 433 | Don Bone | 596 Huixquilucan Place | 65892 | 342709348083 | Naala-Porto | Mozambique | active | 1 + 76 | Irene Price | 602 Paarl Street | 98889 | 896314772871 | Pavlodar | Kazakstan | active | 2 + 358 | Samuel Marlow | 604 Bern Place | 5373 | 620719383725 | Ranchi | India | active | 2 + 517 | Brad Mccurdy | 608 Birgunj Parkway | 400 | 627425618482 | Chungho | Taiwan | active | 2 + 9 | Margaret Moore | 613 Korolev Drive | 45844 | 380657522649 | Masqat | Oman | active | 2 + 397 | Jimmy Schrader | 616 Hagonoy Avenue | 46043 | 604177838256 | Atinsk | Russian Federation | active | 1 + 355 | Terry Grissom | 619 Hunuco Avenue | 81508 | 142596392389 | Matsue | Japan | active | 2 + 34 | Rebecca Scott | 61 Tama Street | 94065 | 708403338270 | Kurashiki | Japan | active | 2 + 202 | Carla Gutierrez | 642 Nador Drive | 3924 | 369050085652 | Bhusawal | India | active | 2 + 481 | Herman Devore | 64 Korla Street | 25145 | 510383179153 | Mwanza | Tanzania | active | 1 + 316 | Steven Curley | 651 Pathankot Loop | 59811 | 139378397418 | Miraj | India | active | 1 + 490 | Sam Mcduffie | 656 Matamoros Drive | 19489 | 17305839123 | Sogamoso | Colombia | active | 1 + 212 | Wilma Richards | 660 Jedda Boulevard | 25053 | 168758068397 | Bellevue | United States | active | 2 + 295 | Daisy Bates | 661 Chisinau Lane | 8856 | 816436065431 | Kolpino | Russian Federation | active | 1 + 40 | Amanda Carter | 671 Graz Street | 94399 | 680768868518 | Nador | Morocco | active | 2 + 47 | Frances Parker | 686 Garland Manor | 52535 | 69493378813 | Juazeiro do Norte | Brazil | active | 1 + 303 | William Satterfield | 687 Alessandria Parkway | 57587 | 407218522294 | Sanaa | Yemen | active | 2 + 201 | Vicki Fields | 68 Molodetno Manor | 4662 | 146640639760 | Witten | Germany | active | 1 + 340 | Patrick Newsom | 68 Ponce Parkway | 85926 | 870635127812 | Hanoi | Vietnam | active | 1 + 408 | Manuel Murrell | 692 Amroha Drive | 35575 | 359478883004 | Jaffna | Sri Lanka | active | 1 + 3 | Linda Williams | 692 Joliet Street | 83579 | 448477190408 | Athenai | Greece | active | 1 + 483 | Vernon Chapa | 698 Jelets Boulevard | 2596 | 975185523021 | Denizli | Turkey | active | 2 + 65 | Rose Howard | 698 Otsu Street | 71110 | 409983924481 | Cayenne | French Guiana | active | 2 + 435 | Ricky Shelby | 722 Bradford Lane | 90920 | 746251338300 | Junan | China | active | 2 + 49 | Joyce Edwards | 725 Isesaki Place | 74428 | 876295323994 | Jedda | Saudi Arabia | active | 2 + 101 | Peggy Myers | 733 Mandaluyong Place | 77459 | 196568435814 | Abha | Saudi Arabia | active | 1 + 399 | Danny Isom | 734 Bchar Place | 30586 | 280578750435 | Okara | Pakistan | active | 1 + 460 | Leon Bostic | 734 Tanshui Avenue | 70664 | 366776723320 | Florencia | Colombia | active | 1 + 184 | Vivian Ruiz | 741 Ambattur Manor | 43310 | 302590383819 | s-Hertogenbosch | Netherlands | active | 1 + 311 | Paul Trout | 746 Joliet Lane | 94878 | 688485191923 | Kursk | Russian Federation | active | 2 + 210 | Ella Oliver | 751 Lima Loop | 99405 | 756460337785 | Aden | Yemen | active | 2 + 253 | Terry Carlson | 752 Ondo Loop | 32474 | 134673576619 | Miyakonojo | Japan | active | 1 + 389 | Alan Kahn | 753 Ilorin Avenue | 3656 | 464511145118 | Emeishan | China | active | 1 + 176 | June Carroll | 757 Rustenburg Avenue | 89668 | 506134035434 | Skikda | Algeria | active | 1 + 130 | Charlotte Hunter | 758 Junan Lane | 82639 | 935448624185 | guas Lindas de Gois | Brazil | active | 1 + 581 | Virgil Wofford | 760 Miyakonojo Drive | 64682 | 294449058179 | Jos Azueta | Mexico | active | 1 + 134 | Emma Boyd | 765 Southampton Drive | 4285 | 23712411567 | Qalyub | Egypt | active | 1 + 391 | Clarence Gamez | 767 Pyongyang Drive | 83536 | 667736124769 | Izumisano | Japan | active | 1 + 459 | Tommy Collazo | 76 Kermanshah Manor | 23343 | 762361821578 | Qomsheh | Iran | active | 1 + 14 | Betty White | 770 Bydgoszcz Avenue | 16266 | 517338314235 | Citrus Heights | United States | active | 2 + 257 | Marsha Douglas | 771 Yaound Manor | 86768 | 245477603573 | Beira | Mozambique | active | 2 + 491 | Rick Mattox | 775 ostka Drive | 22358 | 171973024401 | Mit Ghamr | Egypt | active | 2 + 312 | Mark Rinehart | 780 Kimberley Way | 17032 | 824396883951 | Tabuk | Saudi Arabia | active | 2 + 149 | Valerie Black | 782 Mosul Street | 25545 | 885899703621 | Brockton | United States | active | 1 + 29 | Angela Hernandez | 786 Aurora Avenue | 65750 | 18461860151 | Shimonoseki | Japan | active | 2 + 504 | Nathaniel Adam | 786 Matsue Way | 37469 | 111177206479 | Joliet | United States | active | 1 + 136 | Anita Morales | 788 Atinsk Street | 81691 | 146497509724 | Hubli-Dharwad | India | active | 2 + 412 | Allen Butterfield | 791 Salinas Street | 40509 | 129953030512 | Hoshiarpur | India | active | 2 + 154 | Michele Grant | 798 Cianjur Avenue | 76990 | 499408708580 | Yuncheng | China | active | 2 + 127 | Elaine Stevens | 801 Hagonoy Drive | 8439 | 237426099212 | Smolensk | Russian Federation | active | 2 + 110 | Tiffany Jordan | 804 Elista Drive | 61069 | 379804592943 | Enshi | China | active | 2 + 12 | Nancy Thomas | 808 Bhopal Manor | 10672 | 465887807014 | Yamuna Nagar | India | active | 1 + 477 | Dan Paine | 808 Naala-Porto Parkway | 41060 | 553452430707 | Stockport | United Kingdom | active | 1 + 451 | Jim Rea | 814 Simferopol Loop | 48745 | 524567129902 | El Fuerte | Mexico | active | 1 + 262 | Patsy Davidson | 816 Cayenne Parkway | 93629 | 282874611748 | Portoviejo | Ecuador | active | 2 + 197 | Sue Peters | 817 Bradford Loop | 89459 | 264286442804 | Changzhou | China | active | 2 + 364 | Benjamin Varney | 817 Laredo Avenue | 77449 | 151249681135 | Guadalajara | Mexico | active | 1 + 60 | Mildred Bailey | 81 Hodeida Way | 55561 | 250767749542 | Jaipur | India | active | 1 + 304 | David Royal | 827 Yuncheng Drive | 79047 | 504434452842 | Callao | Peru | active | 2 + 164 | Joann Gardner | 842 Salzburg Lane | 3313 | 697151428760 | Tarsus | Turkey | active | 2 + 595 | Terrence Gunderson | 844 Bucuresti Place | 36603 | 935952366111 | Jinzhou | China | active | 1 + 124 | Sheila Wells | 848 Tafuna Manor | 45142 | 614935229095 | Ktahya | Turkey | active | 1 + 207 | Gertrude Castillo | 850 Salala Loop | 10800 | 403404780639 | Nuuk | Greenland | active | 1 + 556 | Armando Gruber | 869 Shikarpur Way | 57380 | 590764256785 | Southport | United Kingdom | active | 2 + 288 | Bobbie Craig | 86 Higashiosaka Lane | 33768 | 957128697225 | Valle de Santiago | Mexico | active | 1 + 86 | Jacqueline Long | 870 Ashqelon Loop | 84931 | 135117278909 | Songkhla | Thailand | active | 2 + 146 | Jamie Rice | 879 Newcastle Way | 90732 | 206841104594 | Sterling Heights | United States | active | 1 + 232 | Constance Reid | 885 Yingkou Manor | 31390 | 588964509072 | Zaria | Nigeria | active | 2 + 510 | Ben Easter | 886 Tonghae Place | 19450 | 711928348157 | Kamyin | Russian Federation | active | 2 + 298 | Erika Pena | 898 Jining Lane | 40070 | 161643343536 | Oulu | Finland | active | 1 + 552 | Hugh Waldrop | 904 Clarksville Drive | 52234 | 955349440539 | Haining | China | active | 2 + 270 | Leah Curtis | 906 Goinia Way | 83565 | 701767622697 | Kalisz | Poland | active | 1 + 48 | Ann Evans | 909 Garland Manor | 69367 | 705800322606 | Niznekamsk | Russian Federation | active | 1 + 61 | Katherine Rivera | 915 Ponce Place | 83980 | 1395251317 | Basel | Switzerland | active | 2 + 269 | Cassandra Walters | 920 Kumbakonam Loop | 75090 | 685010736240 | Salinas | United States | active | 1 + 574 | Julian Vest | 923 Tangail Boulevard | 33384 | 315528269898 | Akishima | Japan | active | 2 + 141 | Debbie Reyes | 928 Jaffna Loop | 93762 | 581852137991 | Fukuyama | Japan | active | 1 + 83 | Louise Jenkins | 929 Tallahassee Loop | 74671 | 800716535041 | Springs | South Africa | active | 1 + 52 | Julie Sanchez | 939 Probolinggo Loop | 4166 | 680428310138 | A Corua (La Corua) | Spain | active | 1 + 135 | Juanita Mason | 943 Johannesburg Avenue | 5892 | 90921003005 | Pune | India | active | 2 + 57 | Evelyn Morgan | 943 Tokat Street | 45428 | 889318963672 | Vaduz | Liechtenstein | active | 2 + 393 | Philip Causey | 954 Lapu-Lapu Way | 8816 | 737229003916 | Korolev | Russian Federation | active | 1 + 531 | Jamie Waugh | 958 Sagamihara Lane | 88408 | 427274926505 | Kuwana | Japan | active | 2 + 416 | Jeffery Pinson | 966 Arecibo Loop | 94018 | 15273765306 | Dadu | Pakistan | active | 2 + 582 | Andy Vanhorn | 966 Asuncin Way | 62703 | 995527378381 | Huejutla de Reyes | Mexico | active | 2 + 24 | Kimberly Lee | 96 Tafuna Way | 99865 | 934730187245 | Crdoba | Argentina | active | 2 + 379 | Carlos Coughlin | 97 Mogiljov Lane | 89294 | 924815207181 | Bhavnagar | India | active | 1 + 92 | Tina Simmons | 984 Effon-Alaiye Avenue | 17119 | 132986892228 | Goinia | Brazil | active | 2 + 480 | Corey Hauser | 984 Novoterkassk Loop | 28165 | 435118527255 | Gaziantep | Turkey | active | 1 + 479 | Zachary Hite | 98 Pyongyang Boulevard | 88749 | 191958435142 | Akron | United States | active | 1 + 415 | Glenn Pullen | 992 Klerksdorp Loop | 33711 | 855290087237 | Amersfoort | Netherlands | active | 1 + 145 | Lucille Holmes | 999 Sanaa Loop | 3439 | 918032330119 | Soshanguve | South Africa | active | 1 +(353 rows) -SELECT * FROM client_customer_master where country='Canada'; - id | name | address | zip code | phone | city | country | notes | sid -----+------+---------+----------+-------+------+---------+-------+----- -(0 rows) +SELECT * FROM customer_master where country='Canada'; + id | name | address | zip code | phone | city | country | notes | sid +-----+-----------------+------------------+----------+--------------+----------+---------+--------+----- + 476 | Derrick Bourque | 1153 Allende Way | 20336 | 856872225376 | Gatineau | Canada | active | 1 +(1 row) CREATE VIEW test_view AS SELECT city_id, country_id, city from city ORDER BY city; SELECT * FROM test_view ORDER BY city; - city_id | country_id | city ----------+------------+----------------------- + city_id | country_id | city +---------+------------+---------------------------- 2 | 82 | Abha 3 | 101 | Abu Dhabi 1 | 87 | A Corua (La Corua) @@ -194,19 +2487,599 @@ SELECT * FROM test_view ORDER BY city; 18 | 44 | Allappuzha (Alleppey) 19 | 60 | Allende 14 | 11 | al-Manama + 20 | 6 | Almirante Brown 15 | 89 | al-Qadarif 16 | 82 | al-Qatif - 20 | 20 | Gatineau -(20 rows) + 21 | 15 | Alvorada + 22 | 44 | Ambattur + 23 | 67 | Amersfoort + 24 | 44 | Amroha + 25 | 15 | Angra dos Reis + 26 | 15 | Anpolis + 27 | 22 | Antofagasta + 28 | 15 | Aparecida de Goinia + 29 | 67 | Apeldoorn + 30 | 15 | Araatuba + 31 | 46 | Arak + 32 | 77 | Arecibo + 33 | 103 | Arlington + 34 | 48 | Ashdod + 35 | 98 | Ashgabat + 36 | 48 | Ashqelon + 37 | 73 | Asuncin + 38 | 39 | Athenai + 39 | 80 | Atinsk + 40 | 60 | Atlixco + 41 | 103 | Augusta-Richmond County + 42 | 103 | Aurora + 43 | 6 | Avellaneda + 44 | 15 | Bag + 45 | 6 | Baha Blanca + 46 | 23 | Baicheng + 47 | 23 | Baiyin + 48 | 10 | Baku + 49 | 80 | Balaiha + 50 | 97 | Balikesir + 51 | 44 | Balurghat + 52 | 19 | Bamenda + 53 | 16 | Bandar Seri Begawan + 54 | 37 | Banjul + 55 | 104 | Barcelona + 56 | 91 | Basel + 58 | 97 | Batman + 59 | 2 | Batna + 60 | 18 | Battambang + 57 | 48 | Bat Yam + 61 | 75 | Baybay + 62 | 75 | Bayugan + 63 | 2 | Bchar + 64 | 63 | Beira + 65 | 103 | Bellevue + 66 | 15 | Belm + 67 | 4 | Benguela + 68 | 62 | Beni-Mellal + 69 | 69 | Benin City + 70 | 49 | Bergamo + 71 | 44 | Berhampore (Baharampur) + 72 | 91 | Bern + 73 | 44 | Bhavnagar + 74 | 44 | Bhilwara + 75 | 44 | Bhimavaram + 76 | 44 | Bhopal + 77 | 44 | Bhusawal + 78 | 44 | Bijapur + 79 | 29 | Bilbays + 80 | 23 | Binzhou + 81 | 66 | Birgunj + 82 | 75 | Bislig + 83 | 15 | Blumenau + 84 | 15 | Boa Vista + 85 | 85 | Boksburg + 86 | 78 | Botosani + 87 | 85 | Botshabelo + 88 | 102 | Bradford + 89 | 15 | Braslia + 90 | 84 | Bratislava + 91 | 49 | Brescia + 92 | 34 | Brest + 93 | 49 | Brindisi + 94 | 103 | Brockton + 95 | 78 | Bucuresti + 96 | 24 | Buenaventura + 97 | 76 | Bydgoszcz + 98 | 75 | Cabuyao + 99 | 74 | Callao + 100 | 105 | Cam Ranh + 101 | 103 | Cape Coral + 102 | 104 | Caracas + 103 | 60 | Carmen + 104 | 75 | Cavite + 105 | 35 | Cayenne + 106 | 60 | Celaya + 107 | 44 | Chandrapur + 108 | 92 | Changhwa + 109 | 23 | Changzhou + 110 | 44 | Chapra + 111 | 106 | Charlotte Amalie + 112 | 85 | Chatsworth + 113 | 86 | Cheju + 114 | 92 | Chiayi + 115 | 61 | Chisinau + 116 | 92 | Chungho + 117 | 45 | Cianjur + 118 | 45 | Ciomas + 119 | 45 | Ciparay + 120 | 103 | Citrus Heights + 121 | 41 | Citt del Vaticano + 122 | 73 | Ciudad del Este + 123 | 103 | Clarksville + 124 | 60 | Coacalco de Berriozbal + 125 | 60 | Coatzacoalcos + 126 | 103 | Compton + 127 | 22 | Coquimbo + 128 | 6 | Crdoba + 129 | 60 | Cuauhtmoc + 130 | 60 | Cuautla + 131 | 60 | Cuernavaca + 132 | 104 | Cuman + 133 | 76 | Czestochowa + 134 | 72 | Dadu + 135 | 103 | Dallas + 136 | 23 | Datong + 137 | 54 | Daugavpils + 138 | 75 | Davao + 139 | 23 | Daxian + 140 | 103 | Dayton + 141 | 69 | Deba Habe + 142 | 97 | Denizli + 143 | 12 | Dhaka + 144 | 44 | Dhule (Dhulia) + 145 | 23 | Dongying + 146 | 87 | Donostia-San Sebastin + 147 | 24 | Dos Quebradas + 148 | 38 | Duisburg + 149 | 102 | Dundee + 150 | 80 | Dzerzinsk + 151 | 67 | Ede + 152 | 69 | Effon-Alaiye + 153 | 14 | El Alto + 154 | 60 | El Fuerte + 156 | 80 | Elista + 155 | 103 | El Monte + 157 | 23 | Emeishan + 158 | 67 | Emmen + 159 | 23 | Enshi + 160 | 38 | Erlangen + 161 | 6 | Escobar + 162 | 46 | Esfahan + 163 | 97 | Eskisehir + 164 | 44 | Etawah + 165 | 6 | Ezeiza + 166 | 23 | Ezhou + 167 | 36 | Faaa + 168 | 92 | Fengshan + 169 | 44 | Firozabad + 170 | 24 | Florencia + 171 | 103 | Fontana + 172 | 50 | Fukuyama + 173 | 99 | Funafuti + 174 | 23 | Fuyu + 175 | 23 | Fuzhou + 176 | 44 | Gandhinagar + 177 | 103 | Garden Grove + 178 | 103 | Garland + 179 | 20 | Gatineau + 180 | 97 | Gaziantep + 181 | 87 | Gijn + 182 | 75 | Gingoog + 183 | 15 | Goinia + 184 | 45 | Gorontalo + 185 | 103 | Grand Prairie + 186 | 9 | Graz + 187 | 103 | Greensboro + 188 | 60 | Guadalajara + 189 | 15 | Guaruj + 190 | 15 | guas Lindas de Gois + 191 | 44 | Gulbarga + 192 | 75 | Hagonoy + 193 | 23 | Haining + 194 | 105 | Haiphong + 195 | 44 | Haldia + 196 | 20 | Halifax + 197 | 44 | Halisahar + 198 | 38 | Halle/Saale + 199 | 23 | Hami + 200 | 68 | Hamilton + 201 | 105 | Hanoi + 202 | 60 | Hidalgo + 203 | 50 | Higashiosaka + 204 | 50 | Hino + 205 | 50 | Hiroshima + 206 | 107 | Hodeida + 207 | 23 | Hohhot + 208 | 44 | Hoshiarpur + 209 | 92 | Hsichuh + 210 | 23 | Huaian + 211 | 44 | Hubli-Dharwad + 212 | 60 | Huejutla de Reyes + 213 | 60 | Huixquilucan + 214 | 74 | Hunuco + 215 | 15 | Ibirit + 216 | 29 | Idfu + 217 | 69 | Ife + 218 | 69 | Ikerre + 219 | 75 | Iligan + 220 | 69 | Ilorin + 221 | 75 | Imus + 222 | 97 | Inegl + 223 | 59 | Ipoh + 224 | 50 | Isesaki + 225 | 80 | Ivanovo + 226 | 50 | Iwaki + 227 | 50 | Iwakuni + 228 | 50 | Iwatsuki + 229 | 50 | Izumisano + 230 | 88 | Jaffna + 231 | 44 | Jaipur + 232 | 45 | Jakarta + 233 | 53 | Jalib al-Shuyukh + 234 | 12 | Jamalpur + 235 | 80 | Jaroslavl + 236 | 76 | Jastrzebie-Zdrj + 237 | 82 | Jedda + 238 | 80 | Jelets + 239 | 44 | Jhansi + 240 | 23 | Jinchang + 241 | 23 | Jining + 242 | 23 | Jinzhou + 243 | 44 | Jodhpur + 244 | 85 | Johannesburg + 245 | 103 | Joliet + 246 | 60 | Jos Azueta + 247 | 15 | Juazeiro do Norte + 248 | 15 | Juiz de Fora + 249 | 23 | Junan + 250 | 60 | Jurez + 251 | 1 | Kabul + 252 | 69 | Kaduna + 253 | 50 | Kakamigahara + 254 | 80 | Kaliningrad + 255 | 76 | Kalisz + 256 | 50 | Kamakura + 257 | 44 | Kamarhati + 258 | 100 | Kamjanets-Podilskyi + 259 | 80 | Kamyin + 260 | 50 | Kanazawa + 261 | 44 | Kanchrapara + 262 | 103 | Kansas City + 263 | 44 | Karnal + 264 | 44 | Katihar + 265 | 46 | Kermanshah + 266 | 97 | Kilis + 267 | 85 | Kimberley + 268 | 86 | Kimchon + 269 | 81 | Kingstown + 270 | 80 | Kirovo-Tepetsk + 271 | 52 | Kisumu + 272 | 109 | Kitwe + 273 | 85 | Klerksdorp + 274 | 80 | Kolpino + 275 | 100 | Konotop + 276 | 50 | Koriyama + 277 | 23 | Korla + 278 | 80 | Korolev + 279 | 42 | Kowloon and New Kowloon + 280 | 108 | Kragujevac + 281 | 97 | Ktahya + 282 | 59 | Kuching + 283 | 44 | Kumbakonam + 284 | 50 | Kurashiki + 285 | 80 | Kurgan + 286 | 80 | Kursk + 287 | 50 | Kuwana + 291 | 23 | Laiwu + 292 | 103 | Lancaster + 293 | 23 | Laohekou + 288 | 60 | La Paz + 289 | 6 | La Plata + 294 | 75 | Lapu-Lapu + 295 | 103 | Laredo + 290 | 27 | La Romana + 296 | 91 | Lausanne + 297 | 34 | Le Mans + 298 | 23 | Lengshuijiang + 299 | 23 | Leshan + 300 | 20 | Lethbridge + 301 | 45 | Lhokseumawe + 302 | 23 | Liaocheng + 303 | 54 | Liepaja + 304 | 58 | Lilongwe + 305 | 74 | Lima + 306 | 103 | Lincoln + 307 | 9 | Linz + 308 | 80 | Lipetsk + 309 | 49 | Livorno + 310 | 80 | Ljubertsy + 311 | 28 | Loja + 312 | 102 | London + 314 | 76 | Lublin + 315 | 25 | Lubumbashi + 316 | 92 | Lungtan + 317 | 15 | Luzinia + 318 | 45 | Madiun + 319 | 57 | Mahajanga + 320 | 80 | Maikop + 321 | 90 | Malm + 322 | 103 | Manchester + 323 | 75 | Mandaluyong + 324 | 72 | Mandi Bahauddin + 325 | 38 | Mannheim + 326 | 104 | Maracabo + 327 | 72 | Mardan + 328 | 15 | Maring + 329 | 71 | Masqat + 330 | 60 | Matamoros + 331 | 50 | Matsue + 332 | 23 | Meixian + 333 | 103 | Memphis + 334 | 6 | Merlo + 335 | 60 | Mexicali + 336 | 44 | Miraj + 337 | 29 | Mit Ghamr + 338 | 50 | Miyakonojo + 339 | 13 | Mogiljov + 340 | 13 | Molodetno + 341 | 60 | Monclova + 342 | 64 | Monywa + 343 | 80 | Moscow + 344 | 47 | Mosul + 345 | 100 | Mukateve + 346 | 44 | Munger (Monghyr) + 347 | 93 | Mwanza + 348 | 25 | Mwene-Ditu + 349 | 64 | Myingyan + 350 | 44 | Mysore + 351 | 63 | Naala-Porto + 352 | 80 | Nabereznyje Telny + 353 | 62 | Nador + 354 | 44 | Nagaon + 355 | 50 | Nagareyama + 356 | 46 | Najafabad + 357 | 86 | Naju + 358 | 94 | Nakhon Sawan + 359 | 105 | Nam Dinh + 360 | 4 | Namibe + 361 | 92 | Nantou + 362 | 23 | Nanyang + 363 | 21 | NDjamna + 364 | 85 | Newcastle + 313 | 20 | New London + 365 | 60 | Nezahualcyotl + 366 | 105 | Nha Trang + 367 | 80 | Niznekamsk + 368 | 108 | Novi Sad + 369 | 80 | Novoterkassk + 370 | 95 | Nukualofa + 371 | 40 | Nuuk + 372 | 52 | Nyeri + 373 | 104 | Ocumare del Tuy + 374 | 69 | Ogbomosho + 375 | 72 | Okara + 376 | 50 | Okayama + 377 | 50 | Okinawa + 378 | 26 | Olomouc + 379 | 89 | Omdurman + 380 | 50 | Omiya + 381 | 69 | Ondo + 382 | 50 | Onomichi + 383 | 20 | Oshawa + 384 | 97 | Osmaniye + 385 | 100 | ostka + 386 | 50 | Otsu + 387 | 33 | Oulu + 388 | 87 | Ourense (Orense) + 389 | 69 | Owo + 390 | 69 | Oyo + 391 | 75 | Ozamis + 392 | 85 | Paarl + 393 | 60 | Pachuca de Soto + 394 | 94 | Pak Kret + 395 | 44 | Palghat (Palakkad) + 396 | 45 | Pangkal Pinang + 397 | 36 | Papeete + 398 | 44 | Parbhani + 399 | 44 | Pathankot + 400 | 44 | Patiala + 401 | 39 | Patras + 402 | 51 | Pavlodar + 403 | 45 | Pemalang + 404 | 103 | Peoria + 405 | 24 | Pereira + 406 | 18 | Phnom Penh + 407 | 23 | Pingxiang + 408 | 80 | Pjatigorsk + 409 | 76 | Plock + 410 | 15 | Po + 411 | 77 | Ponce + 412 | 45 | Pontianak + 413 | 15 | Poos de Caldas + 414 | 28 | Portoviejo + 415 | 45 | Probolinggo + 416 | 44 | Pudukkottai + 417 | 44 | Pune + 418 | 44 | Purnea (Purnia) + 419 | 45 | Purwakarta + 420 | 70 | Pyongyang + 421 | 29 | Qalyub + 422 | 23 | Qinhuangdao + 423 | 46 | Qomsheh + 424 | 6 | Quilmes + 425 | 44 | Rae Bareli + 426 | 44 | Rajkot + 427 | 44 | Rampur + 428 | 22 | Rancagua + 429 | 44 | Ranchi + 430 | 20 | Richmond Hill + 431 | 15 | Rio Claro + 432 | 23 | Rizhao + 433 | 103 | Roanoke + 434 | 28 | Robamba + 435 | 103 | Rockford + 436 | 17 | Ruse + 437 | 85 | Rustenburg + 439 | 38 | Saarbrcken + 440 | 50 | Sagamihara + 442 | 79 | Saint-Denis + 441 | 103 | Saint Louis + 443 | 62 | Sal + 444 | 71 | Salala + 445 | 60 | Salamanca + 446 | 103 | Salinas + 447 | 9 | Salzburg + 448 | 44 | Sambhal + 455 | 107 | Sanaa + 449 | 103 | San Bernardino + 451 | 60 | San Felipe del Progreso + 450 | 27 | San Felipe de Puerto Plata + 452 | 60 | San Juan Bautista Tuxtepec + 453 | 73 | San Lorenzo + 454 | 6 | San Miguel de Tucumn + 456 | 15 | Santa Brbara dOeste + 457 | 6 | Santa F + 458 | 75 | Santa Rosa + 459 | 87 | Santiago de Compostela + 460 | 27 | Santiago de los Caballeros + 461 | 15 | Santo Andr + 462 | 23 | Sanya + 463 | 50 | Sasebo + 464 | 44 | Satna + 465 | 29 | Sawhaj + 466 | 80 | Serpuhov + 467 | 46 | Shahr-e Kord + 468 | 23 | Shanwei + 469 | 23 | Shaoguan + 470 | 101 | Sharja + 471 | 23 | Shenzhen + 438 | 67 | s-Hertogenbosch + 472 | 72 | Shikarpur + 473 | 44 | Shimoga + 474 | 50 | Shimonoseki + 475 | 44 | Shivapuri + 476 | 29 | Shubra al-Khayma + 477 | 38 | Siegen + 478 | 44 | Siliguri (Shiliguri) + 479 | 100 | Simferopol + 480 | 24 | Sincelejo + 481 | 46 | Sirjan + 482 | 97 | Sivas + 483 | 2 | Skikda + 484 | 80 | Smolensk + 485 | 15 | So Bernardo do Campo + 487 | 24 | Sogamoso + 488 | 69 | Sokoto + 486 | 15 | So Leopoldo + 489 | 94 | Songkhla + 490 | 15 | Sorocaba + 491 | 85 | Soshanguve + 492 | 96 | Sousse + 494 | 102 | Southampton + 495 | 102 | Southend-on-Sea + 493 | 5 | South Hill + 496 | 102 | Southport + 497 | 85 | Springs + 498 | 17 | Stara Zagora + 499 | 103 | Sterling Heights + 500 | 102 | Stockport + 501 | 14 | Sucre + 502 | 23 | Suihua + 503 | 74 | Sullana + 504 | 97 | Sultanbeyli + 505 | 10 | Sumqayit + 506 | 100 | Sumy + 507 | 59 | Sungai Petani + 508 | 103 | Sunnyvale + 509 | 45 | Surakarta + 510 | 80 | Syktyvkar + 511 | 49 | Syrakusa + 512 | 43 | Szkesfehrvr + 513 | 93 | Tabora + 514 | 46 | Tabriz + 515 | 82 | Tabuk + 516 | 3 | Tafuna + 517 | 75 | Taguig + 518 | 107 | Taizz + 519 | 75 | Talavera + 520 | 103 | Tallahassee + 521 | 50 | Tama + 522 | 44 | Tambaram + 523 | 75 | Tanauan + 524 | 6 | Tandil + 525 | 12 | Tangail + 526 | 92 | Tanshui + 527 | 75 | Tanza + 528 | 75 | Tarlac + 529 | 97 | Tarsus + 530 | 30 | Tartu + 531 | 80 | Teboksary + 532 | 45 | Tegal + 533 | 48 | Tel Aviv-Jaffa + 534 | 63 | Tete + 535 | 23 | Tianjin + 536 | 23 | Tiefa + 537 | 23 | Tieli + 538 | 97 | Tokat + 539 | 86 | Tonghae + 540 | 23 | Tongliao + 541 | 60 | Torren + 542 | 92 | Touliu + 543 | 34 | Toulon + 544 | 34 | Toulouse + 545 | 32 | Trshavn + 546 | 92 | Tsaotun + 547 | 50 | Tsuyama + 548 | 75 | Tuguegarao + 549 | 76 | Tychy + 550 | 44 | Udaipur + 551 | 49 | Udine + 552 | 50 | Ueda + 553 | 86 | Uijongbu + 554 | 44 | Uluberia + 555 | 50 | Urawa + 556 | 60 | Uruapan + 557 | 97 | Usak + 558 | 80 | Usolje-Sibirskoje + 559 | 44 | Uttarpara-Kotrung + 560 | 55 | Vaduz + 561 | 104 | Valencia + 562 | 104 | Valle de la Pascua + 563 | 60 | Valle de Santiago + 564 | 44 | Valparai + 565 | 20 | Vancouver + 566 | 44 | Varanasi (Benares) + 567 | 6 | Vicente Lpez + 568 | 44 | Vijayawada + 569 | 15 | Vila Velha + 570 | 56 | Vilnius + 571 | 105 | Vinh + 572 | 15 | Vitria de Santo Anto + 573 | 103 | Warren + 574 | 23 | Weifang + 575 | 38 | Witten + 576 | 8 | Woodridge + 577 | 76 | Wroclaw + 578 | 23 | Xiangfan + 579 | 23 | Xiangtan + 580 | 23 | Xintai + 581 | 23 | Xinxiang + 582 | 44 | Yamuna Nagar + 583 | 65 | Yangor + 584 | 23 | Yantai + 585 | 19 | Yaound + 586 | 7 | Yerevan + 587 | 23 | Yinchuan + 588 | 23 | Yingkou + 589 | 102 | York + 590 | 23 | Yuncheng + 591 | 23 | Yuzhou + 592 | 23 | Zalantun + 593 | 93 | Zanzibar + 594 | 23 | Zaoyang + 595 | 60 | Zapopan + 596 | 69 | Zaria + 597 | 80 | Zeleznogorsk + 598 | 51 | Zhezqazghan + 599 | 23 | Zhoushan + 600 | 83 | Ziguinchor +(600 rows) -DROP VIEW client_customer_master; +DROP VIEW customer_master; DROP VIEW test_view; -DROP table client_customer cascade; +DROP table customer cascade; DROP table address cascade; DROP table city cascade; DROP table country cascade; DROP CLIENT MASTER KEY MyCMK CASCADE; -NOTICE: drop cascades to column setting: mycek +NOTICE: drop cascades to column encryption key: mycek \! gs_ktool -d all DELETE ALL 1 diff --git a/src/test/regress/expected/ce_with.out b/src/test/regress/expected/ce_with.out index bf6aec1fa..e059f95ec 100644 --- a/src/test/regress/expected/ce_with.out +++ b/src/test/regress/expected/ce_with.out @@ -5,7 +5,7 @@ DELETE ALL GENERATE 1 DROP CLIENT MASTER KEY IF EXISTS MyCMK CASCADE; -NOTICE: global setting "mycmk" does not exist +NOTICE: client master key "mycmk" does not exist CREATE CLIENT MASTER KEY MyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY MyCEK WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); CREATE COLUMN ENCRYPTION KEY MyCEK1 WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); diff --git a/src/test/regress/expected/create_compositetype.out b/src/test/regress/expected/create_compositetype.out index ca8dc90dc..12da8ec08 100644 --- a/src/test/regress/expected/create_compositetype.out +++ b/src/test/regress/expected/create_compositetype.out @@ -1018,6 +1018,111 @@ CONTEXT: referenced column: test_func_varray (0,1,2,a,abc) (1 row) +--NULLTEST FOR AFORMAT +set behavior_compat_options='aformat_null_test'; +explain (verbose, costs off) +select r, r is null as isnull, r is not null as isnotnull +from (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) r(a,b); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Values Scan on "*VALUES*" + Output: ROW("*VALUES*".column1, "*VALUES*".column2), (("*VALUES*".column1 IS NULL) AND ("*VALUES*".column2 IS NOT DISTINCT FROM NULL)), (NOT (("*VALUES*".column1 IS NULL) AND ("*VALUES*".column2 IS NOT DISTINCT FROM NULL))) +(2 rows) + +select r, r is null as isnull, r is not null as isnotnull +from (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) r(a,b); + r | isnull | isnotnull +-------------+--------+----------- + (1,"(1,2)") | f | t + (1,"(,)") | f | t + (1,) | f | t + (,"(1,2)") | f | t + (,"(,)") | f | t + (,) | t | f +(6 rows) + +explain (verbose, costs off) +with r(a,b) as + (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) +select r, r is null as isnull, r is not null as isnotnull from r; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Values Scan on "*VALUES*" + Output: ROW("*VALUES*".column1, "*VALUES*".column2), (("*VALUES*".column1 IS NULL) AND ("*VALUES*".column2 IS NOT DISTINCT FROM NULL)), (NOT (("*VALUES*".column1 IS NULL) AND ("*VALUES*".column2 IS NOT DISTINCT FROM NULL))) +(2 rows) + +with r(a,b) as + (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) +select r, r is null as isnull, r is not null as isnotnull from r; + r | isnull | isnotnull +-------------+--------+----------- + (1,"(1,2)") | f | t + (1,"(,)") | f | t + (1,) | f | t + (,"(1,2)") | f | t + (,"(,)") | f | t + (,) | t | f +(6 rows) + +explain (verbose, costs off) +with r(a,b) as materialized + (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) +select r, r is null as isnull, r is not null as isnotnull from r; + QUERY PLAN +---------------------------------------------------------- + CTE Scan on r + Output: r.*, (r.* IS NULL), (r.* IS NOT NULL) + CTE r + -> Values Scan on "*VALUES*" + Output: "*VALUES*".column1, "*VALUES*".column2 +(5 rows) + +with r(a,b) as materialized + (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) +select r, r is null as isnull, r is not null as isnotnull from r; + r | isnull | isnotnull +-------------+--------+----------- + (1,"(1,2)") | f | t + (1,"(,)") | f | t + (1,) | f | t + (,"(1,2)") | f | t + (,"(,)") | f | t + (,) | t | f +(6 rows) + +declare +type ta as record (a int, b int); +va ta; +begin +va.b = 2; +if va is not null then +raise info '1111'; +else +raise info '2222'; +end if; +end; +/ +INFO: 1111 +select r, r is null as isnull, r is not null as isnotnull +from (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) r(a,b); + r | isnull | isnotnull +-------------+--------+----------- + (1,"(1,2)") | f | t + (1,"(,)") | f | t + (1,) | f | t + (,"(1,2)") | f | t + (,"(,)") | f | t + (,) | t | f +(6 rows) + +reset behavior_compat_options; drop function avg_transfn1(); drop function test_func_VARRAY(); drop type avg_state cascade; diff --git a/src/test/regress/expected/create_function.out b/src/test/regress/expected/create_function.out index 3f25fe4e4..e72d2a08a 100644 --- a/src/test/regress/expected/create_function.out +++ b/src/test/regress/expected/create_function.out @@ -3,4 +3,64 @@ AS 'SELECT $1 + $2;' LANGUAGE SQL IMMUTABLE SHIPPABLE RETURNS NULL ON NULL INPUT; -drop function create_function_test; +create or replace procedure proc_commit +is +begin +commit; +end; +/ +create or replace procedure proc_test +IMMUTABLE +is +begin +proc_commit(); +end; +/ +CREATE OR REPLACE FUNCTION public.func_jbpm_createtime( i_businessid IN VARCHAR2 ) + RETURN timestamp without time zone NOT SHIPPABLE NOT FENCED +AS DECLARE v_tm TIMESTAMP ; +BEGIN + BEGIN + SELECT + t.start_time INTO v_tm + FROM + dams_wf_process t + WHERE + t.business_id = i_businessid ; + + EXCEPTION + WHEN no_data_found THEN + SELECT + t.start_time INTO v_tm + FROM + dams_wf_hist_process t + WHERE + t.business_id = i_businessid ; + + END ; + + RETURN v_tm ; + +END ; +/ +call proc_test(); +ERROR: commit/rollback/savepoint is not allowed in a non-volatile function +CONTEXT: PL/pgSQL function proc_commit() line 3 at COMMIT +SQL statement "CALL proc_commit()" +PL/pgSQL function proc_test() line 3 at PERFORM +create or replace procedure p1(a out varchar2,b int) is +begin +a:='aa'||b; +raise info 'a:%',a; +end; +/ +declare +var varchar2; +begin +var=p1(:var,3); +raise info 'var:%',var; +end; +/ +INFO: a:aa3 +CONTEXT: PL/pgSQL function inline_code_block line 4 at assignment +INFO: var:aa3 diff --git a/src/test/regress/expected/create_procedure.out b/src/test/regress/expected/create_procedure.out index 16035c278..c9cc3a734 100644 --- a/src/test/regress/expected/create_procedure.out +++ b/src/test/regress/expected/create_procedure.out @@ -1,55 +1,115 @@ -create procedure test_procedure_test(int,int) -SHIPPABLE IMMUTABLE -as -begin - select $1 + $2; -end; -/ - -create or replace function test2(space boolean default true) return integer as -declare result integer; -begin -if(space is null) then -perform oracle."put$json_printer.pretty_print".test1(12) into result; -return result; -else -return oracle."put$json_printer.pretty_print".test1(15); -end if; -end; -/ +create procedure test_procedure_test(int,int) +SHIPPABLE IMMUTABLE +as +begin + select $1 + $2; +end; +/ + +create or replace function test2(space boolean default true) return integer as +declare result integer; +begin +if(space is null) then +perform oracle."put$json_printer.pretty_print".test1(12) into result; +return result; +else +return oracle."put$json_printer.pretty_print".test1(15); +end if; +end; +/ ERROR: schema "oracle" does not exist CONTEXT: compilation of PL/pgSQL function "test2" near line 2 - -drop function test2; + +drop function test2; ERROR: function test2 does not exist -drop procedure test_procedure_test; - - -create schema "test.test.test"; - -CREATE OR REPLACE PROCEDURE "test.test.test".prc_add -( - param1 IN INTEGER, - param2 IN OUT INTEGER -) -AS -BEGIN - param2:= param1 + param2; - dbe_output.print_line('result is: '||to_char(param2)); -END; -/ - -CREATE OR REPLACE PROCEDURE "test.test.test".prc_add2 -( - param1 IN INTEGER, - param2 IN INTEGER -) -AS -BEGIN - "test.test.test".prc_add(param1, param2); -END; -/ - -drop procedure "test.test.test".prc_add2; -drop procedure "test.test.test".prc_add; -drop schema "test.test.test"; +drop procedure test_procedure_test; + + +create schema "test.test.test"; + +CREATE OR REPLACE PROCEDURE "test.test.test".prc_add +( + param1 IN INTEGER, + param2 IN OUT INTEGER +) +AS +BEGIN + param2:= param1 + param2; + dbe_output.print_line('result is: '||to_char(param2)); +END; +/ + +CREATE OR REPLACE PROCEDURE "test.test.test".prc_add2 +( + param1 IN INTEGER, + param2 IN INTEGER +) +AS +BEGIN + "test.test.test".prc_add(param1, param2); +END; +/ + +drop procedure "test.test.test".prc_add2; +drop procedure "test.test.test".prc_add; +drop schema "test.test.test"; + +set behavior_compat_options='allow_procedure_compile_check'; +drop table if exists bbb; +NOTICE: table "bbb" does not exist, skipping +drop table if exists aaa; +NOTICE: table "aaa" does not exist, skipping +CREATE TABLE bbb(id1 INT, id2 INT, id3 INT); +CREATE TABLE aaa(id1 INT, id2 INT, id3 INT); +CREATE OR REPLACE FUNCTION tri_insert_func() RETURNS TRIGGER AS +$$ +DECLARE +v int; +BEGIN +select count(1) INTO v from bbb where id1 = NEW.id1; +RAISE INFO 'v1: : %' ,v; +RETURN NEW; +END +$$ LANGUAGE PLPGSQL; +CREATE TRIGGER insert_trigger11 +BEFORE INSERT ON aaa +FOR EACH ROW +EXECUTE PROCEDURE tri_insert_func(); +insert into aaa values(1,2,3); +INFO: v1: : 0 +select * from aaa; + id1 | id2 | id3 +-----+-----+----- + 1 | 2 | 3 +(1 row) + +drop TRIGGER insert_trigger11 ON aaa; +drop FUNCTION tri_insert_func; +drop table if exists bbb; +drop table if exists aaa; +create or replace function checkqweerr(a integer) returns int as $$ +declare +b int; +begin +select multi_call211(a) + 1 into b; +return b; +end; +$$ language plpgsql; +ERROR: compile failed when parse the query: select multi_call211(a) + 1 +DETAIL: function multi_call211(integer) does not exist +CONTEXT: compilation of PL/pgSQL function "checkqweerr" near line 5 +call checkqweerr(1); +ERROR: function "checkqweerr" doesn't exist +create or replace procedure checkipoooowdsd2() as +declare +c1 sys_refcursor; +begin +open c1 for delete from tb_test111; +end; +/ +ERROR: compile failed when parse the query: SELECT delete from tb_test111 +DETAIL: relation "tb_test111" does not exist on datanode1 +CONTEXT: compilation of PL/pgSQL function "checkipoooowdsd2" near line 5 +call checkipoooowdsd2(); +ERROR: function "checkipoooowdsd2" doesn't exist +set behavior_compat_options=''; diff --git a/src/test/regress/expected/create_table_like_2.out b/src/test/regress/expected/create_table_like_2.out index 13a218e0c..04c6ab0b8 100644 --- a/src/test/regress/expected/create_table_like_2.out +++ b/src/test/regress/expected/create_table_like_2.out @@ -303,15 +303,15 @@ ERROR: CREATE TABLE ... WITH OIDS is not yet supported. drop table if exists ctltestf, ctltestg; NOTICE: table "ctltestf" does not exist, skipping NOTICE: table "ctltestg" does not exist, skipping -create schema DTS2019071912119; -CREATE OR REPLACE FUNCTION DTS2019071912119.func_increment_plsql(i integer) RETURNS integer AS $$ +create schema testschema; +CREATE OR REPLACE FUNCTION testschema.func_increment_plsql(i integer) RETURNS integer AS $$ BEGIN RETURN i + 1; END; $$ LANGUAGE plpgsql IMMUTABLE ; -create table DTS2019071912119.test1 (a int , b int default DTS2019071912119.func_increment_plsql(1)); -alter schema DTS2019071912119 rename to DTS2019071912119_bak; -create table DTS2019071912119_bak.test2 (like DTS2019071912119_bak.test1 including all); -drop table DTS2019071912119_bak.test2; -drop table DTS2019071912119_bak.test1; -drop function DTS2019071912119_bak.func_increment_plsql(); +create table testschema.test1 (a int , b int default testschema.func_increment_plsql(1)); +alter schema testschema rename to TESTTABLE_bak; +create table TESTTABLE_bak.test2 (like TESTTABLE_bak.test1 including all); +drop table TESTTABLE_bak.test2; +drop table TESTTABLE_bak.test1; +drop function TESTTABLE_bak.func_increment_plsql(); diff --git a/src/test/regress/expected/cte_inline.out b/src/test/regress/expected/cte_inline.out new file mode 100644 index 000000000..2a1c58ea6 --- /dev/null +++ b/src/test/regress/expected/cte_inline.out @@ -0,0 +1,510 @@ +create schema cte_inline; +set current_schema = cte_inline; +-- Set up some simple test tables +CREATE TABLE test ( + f1 integer, + f2 integer, + f3 float +); +INSERT INTO test VALUES (1, 2, 3); +INSERT INTO test VALUES (2, 3, 4); +INSERT INTO test VALUES (3, 4, 5); +INSERT INTO test VALUES (1, 1, 1); +INSERT INTO test VALUES (2, 2, 2); +INSERT INTO test VALUES (3, 3, 3); +INSERT INTO test VALUES (6, 7, 8); +INSERT INTO test VALUES (8, 9, NULL); +CREATE TABLE test_1 (like test); +-- +-- Tests for CTE inlining behavior +-- +-- Basic subquery that can be inlined +explain (verbose, costs off) +with x as (select * from (select f1 from test) ss) +select * from x where f1 = 1; + QUERY PLAN +----------------------------- + Seq Scan on cte_inline.test + Output: test.f1 + Filter: (test.f1 = 1) +(3 rows) + +-- Deep deep subquery +explain (verbose, costs off) +with a as ( + with b as ( + with c as ( + with d as (select * from ( + with z as ( + with y as ( + with x as (select f1 from test) + select * from x) + select * from y) + select * from z) + ) select * from d) + select * from c) + select * from b) +select * from a where f1 = 1; + QUERY PLAN +----------------------------- + Seq Scan on cte_inline.test + Output: test.f1 + Filter: (test.f1 = 1) +(3 rows) + +-- Explicitly request materialization +explain (verbose, costs off) +with x as materialized (select * from (select f1 from test) ss) +select * from x where f1 = 1; + QUERY PLAN +------------------------------------- + CTE Scan on x + Output: x.f1 + Filter: (x.f1 = 1) + CTE x + -> Seq Scan on cte_inline.test + Output: test.f1 +(6 rows) + +-- Stable functions are safe to inline +explain (verbose, costs off) +with x as (select * from (select f1, now() from test) ss) +select * from x where f1 = 1; + QUERY PLAN +----------------------------- + Seq Scan on cte_inline.test + Output: test.f1, now() + Filter: (test.f1 = 1) +(3 rows) + +-- Volatile functions prevent inlining +explain (verbose, costs off) +with x as (select * from (select f1, random() from test) ss) +select * from x where f1 = 1; + QUERY PLAN +------------------------------------- + CTE Scan on x + Output: x.f1, x.random + Filter: (x.f1 = 1) + CTE x + -> Seq Scan on cte_inline.test + Output: test.f1, random() +(6 rows) + +-- SELECT FOR UPDATE/SHARE cannot be inlined +explain (verbose, costs off) +with x as (select * from (select f1 from test for update) ss) +select * from x where f1 = 1; + QUERY PLAN +-------------------------------------------------- + CTE Scan on x + Output: x.f1 + Filter: (x.f1 = 1) + CTE x + -> Subquery Scan on ss + Output: ss.f1 + -> LockRows + Output: test.f1, test.ctid + -> Seq Scan on cte_inline.test + Output: test.f1, test.ctid +(10 rows) + +explain (verbose, costs off) +with x as not materialized (select * from (select f1 from test for share) ss) +select * from x where f1 = 1; + QUERY PLAN +-------------------------------------------------- + CTE Scan on x + Output: x.f1 + Filter: (x.f1 = 1) + CTE x + -> Subquery Scan on ss + Output: ss.f1 + -> LockRows + Output: test.f1, test.ctid + -> Seq Scan on cte_inline.test + Output: test.f1, test.ctid +(10 rows) + +-- IUDs cannot be inlined +explain (verbose, costs off) +with x as not materialized (insert into test_1 values(1,2,4) returning *) +select * from x; + QUERY PLAN +--------------------------------------------------- + CTE Scan on x + Output: x.f1, x.f2, x.f3 + CTE x + -> Insert on cte_inline.test_1 + Output: test_1.f1, test_1.f2, test_1.f3 + -> Result + Output: 1, 2, 4::double precision +(7 rows) + +explain (verbose, costs off) +with x as not materialized (update test_1 set f3 = 3 where f1 = 1 returning *) +select * from x; + QUERY PLAN +-------------------------------------------------------------------------------- + CTE Scan on x + Output: x.f1, x.f2, x.f3 + CTE x + -> Update on cte_inline.test_1 + Output: test_1.f1, test_1.f2, test_1.f3 + -> Seq Scan on cte_inline.test_1 + Output: test_1.f1, test_1.f2, 3::double precision, test_1.ctid + Filter: (test_1.f1 = 1) +(8 rows) + +explain (verbose, costs off) +with x as not materialized (delete from test_1 returning *) +select * from x; + QUERY PLAN +--------------------------------------------------- + CTE Scan on x + Output: x.f1, x.f2, x.f3 + CTE x + -> Delete on cte_inline.test_1 + Output: test_1.f1, test_1.f2, test_1.f3 + -> Seq Scan on cte_inline.test_1 + Output: test_1.ctid +(7 rows) + +-- Multiply-referenced CTEs are inlined only when requested +explain (verbose, costs off) +with x as (select * from (select f1, now() as n from test) ss) +select * from x, x x2 where x.n = x2.n; + QUERY PLAN +------------------------------------- + Hash Join + Output: x.f1, x.n, x2.f1, x2.n + Hash Cond: (x.n = x2.n) + CTE x + -> Seq Scan on cte_inline.test + Output: test.f1, now() + -> CTE Scan on x + Output: x.f1, x.n + -> Hash + Output: x2.f1, x2.n + -> CTE Scan on x x2 + Output: x2.f1, x2.n +(12 rows) + +explain (verbose, costs off) +with x as not materialized (select * from (select f1, now() as n from test) ss) +select * from x, x x2 where x.n = x2.n; + QUERY PLAN +---------------------------------------------------------------------------------- + Result + Output: cte_inline.test.f1, now(), cte_inline.test.f1, now() + One-Time Filter: (now() = now()) + -> Nested Loop + Output: cte_inline.test.f1, cte_inline.test.f1 + -> Seq Scan on cte_inline.test + Output: cte_inline.test.f1, cte_inline.test.f2, cte_inline.test.f3 + -> Materialize + Output: cte_inline.test.f1 + -> Seq Scan on cte_inline.test + Output: cte_inline.test.f1 +(11 rows) + +-- Check handling of outer references +explain (verbose, costs off) +with x as (select * from test) +select * from (with y as (select * from x) select * from y) ss; + QUERY PLAN +--------------------------------------------- + CTE Scan on x + Output: x.f1, x.f2, x.f3 + CTE x + -> Seq Scan on cte_inline.test + Output: test.f1, test.f2, test.f3 +(5 rows) + +explain (verbose, costs off) +with x as materialized (select * from test) +select * from (with y as (select * from x) select * from y) ss; + QUERY PLAN +--------------------------------------------- + CTE Scan on x + Output: x.f1, x.f2, x.f3 + CTE x + -> Seq Scan on cte_inline.test + Output: test.f1, test.f2, test.f3 +(5 rows) + +-- Ensure that we inline the currect CTE when there are +-- multiple CTEs with the same name +explain (verbose, costs off) +with x as (select 1 as y) +select * from (with x as (select 2 as y) select * from x) ss; + QUERY PLAN +------------- + Result + Output: 2 +(2 rows) + +-- Row marks are not pushed into CTEs (opengauss not supported) +explain (verbose, costs off) +with x as (select * from test) +select * from x for update; +ERROR: SELECT FOR UPDATE/SHARE cannot be applied to a WITH query +-- For CTEs in subquery +explain (verbose, costs off) +select * from (with x as (select * from test_1) select x.f1 from x) tmp where tmp.f1 = 1; + QUERY PLAN +------------------------------- + Seq Scan on cte_inline.test_1 + Output: test_1.f1 + Filter: (test_1.f1 = 1) +(3 rows) + +explain (verbose, costs off) +select * from (with x as materialized (select * from test_1) select x.f1 from x) tmp where tmp.f1 = 1; + QUERY PLAN +--------------------------------------------------- + CTE Scan on x + Output: x.f1 + Filter: (x.f1 = 1) + CTE x + -> Seq Scan on cte_inline.test_1 + Output: test_1.f1, test_1.f2, test_1.f3 +(6 rows) + +-- cte within in/any/some sublink are handled correctly +explain (verbose, costs off) +select * from test where test.f1 in +(with x as (select * from test_1) select x.f1 from x); + QUERY PLAN +------------------------------------------------- + Hash Join + Output: test.f1, test.f2, test.f3 + Hash Cond: (test.f1 = test_1.f1) + -> Seq Scan on cte_inline.test + Output: test.f1, test.f2, test.f3 + -> Hash + Output: test_1.f1 + -> HashAggregate + Output: test_1.f1 + Group By Key: test_1.f1 + -> Seq Scan on cte_inline.test_1 + Output: test_1.f1 +(12 rows) + +explain (verbose, costs off) +select * from test where test.f1 in +(with x as materialized (select * from test_1) select x.f1 from x); + QUERY PLAN +--------------------------------------------------------------------- + Hash Join + Output: test.f1, test.f2, test.f3 + Hash Cond: (test.f1 = x.f1) + -> Seq Scan on cte_inline.test + Output: test.f1, test.f2, test.f3 + -> Hash + Output: x.f1 + -> HashAggregate + Output: x.f1 + Group By Key: x.f1 + -> CTE Scan on x + Output: x.f1 + CTE x + -> Seq Scan on cte_inline.test_1 + Output: test_1.f1, test_1.f2, test_1.f3 +(15 rows) + +explain (verbose, costs off) +select * from test where test.f1 = any +(with x as (select * from test_1) select x.f1 from x); + QUERY PLAN +------------------------------------------------- + Hash Join + Output: test.f1, test.f2, test.f3 + Hash Cond: (test.f1 = test_1.f1) + -> Seq Scan on cte_inline.test + Output: test.f1, test.f2, test.f3 + -> Hash + Output: test_1.f1 + -> HashAggregate + Output: test_1.f1 + Group By Key: test_1.f1 + -> Seq Scan on cte_inline.test_1 + Output: test_1.f1 +(12 rows) + +explain (verbose, costs off) +select * from test where test.f1 = any +(with x as materialized (select * from test_1) select x.f1 from x); + QUERY PLAN +--------------------------------------------------------------------- + Hash Join + Output: test.f1, test.f2, test.f3 + Hash Cond: (test.f1 = x.f1) + -> Seq Scan on cte_inline.test + Output: test.f1, test.f2, test.f3 + -> Hash + Output: x.f1 + -> HashAggregate + Output: x.f1 + Group By Key: x.f1 + -> CTE Scan on x + Output: x.f1 + CTE x + -> Seq Scan on cte_inline.test_1 + Output: test_1.f1, test_1.f2, test_1.f3 +(15 rows) + +-- not expanded subquery +explain (verbose, costs off) +select * from test where test.f1 = any +(with x as (select * from test_1) select x.f1 from x); + QUERY PLAN +------------------------------------------------- + Hash Join + Output: test.f1, test.f2, test.f3 + Hash Cond: (test.f1 = test_1.f1) + -> Seq Scan on cte_inline.test + Output: test.f1, test.f2, test.f3 + -> Hash + Output: test_1.f1 + -> HashAggregate + Output: test_1.f1 + Group By Key: test_1.f1 + -> Seq Scan on cte_inline.test_1 + Output: test_1.f1 +(12 rows) + +explain (verbose, costs off) +select * from test where test.f1 = any +(with x as materialized (select * from test_1) select /*+ no_expand */ x.f1 from x); + QUERY PLAN +----------------------------------------------------------- + Seq Scan on cte_inline.test + Output: test.f1, test.f2, test.f3 + Filter: (hashed SubPlan 2) + SubPlan 2 + -> CTE Scan on x + Output: x.f1 + CTE x + -> Seq Scan on cte_inline.test_1 + Output: test_1.f1, test_1.f2, test_1.f3 +(9 rows) + +explain (verbose, costs off) +select * from test where exists +(with x as (select * from test_1) select /*+ no_expand */ x.f1 from x where test.f1 = x.f1); + QUERY PLAN +--------------------------------------------------------- + Seq Scan on cte_inline.test + Output: test.f1, test.f2, test.f3 + Filter: (alternatives: SubPlan 1 or hashed SubPlan 2) + SubPlan 1 + -> Seq Scan on cte_inline.test_1 + Filter: (test.f1 = cte_inline.test_1.f1) + SubPlan 2 + -> Seq Scan on cte_inline.test_1 + Output: cte_inline.test_1.f1 +(9 rows) + +-- intargetlist rewrite +explain (verbose, costs off) +select * from test where test.f1 = (with x as (select * from test_1) select x.f2 from x where x.f2 = test.f2 and x.f2 < 10 order by 1 limit 1) and test.f2 < 50 order by 1,2,3; + QUERY PLAN +---------------------------------------------------------------------------- + Sort + Output: test.f1, test.f2, test.f3 + Sort Key: test.f1, test.f2, test.f3 + -> Seq Scan on cte_inline.test + Output: test.f1, test.f2, test.f3 + Filter: ((test.f2 < 50) AND (test.f1 = (SubPlan 1))) + SubPlan 1 + -> Limit + Output: test_1.f2 + -> Seq Scan on cte_inline.test_1 + Output: test_1.f2 + Filter: ((test_1.f2 < 10) AND (test_1.f2 = test.f2)) +(12 rows) + +explain (verbose, costs off) +select * from test where test.f1 = (with x as materialized (select * from test_1) select x.f2 from x where x.f2 = test.f2 and x.f2 < 10 order by 1 limit 1) and test.f2 < 50 order by 1,2,3; + QUERY PLAN +------------------------------------------------------------------ + Sort + Output: test.f1, test.f2, test.f3 + Sort Key: test.f1, test.f2, test.f3 + -> Seq Scan on cte_inline.test + Output: test.f1, test.f2, test.f3 + Filter: ((test.f2 < 50) AND (test.f1 = (SubPlan 2))) + SubPlan 2 + -> Limit + Output: x.f2 + CTE x + -> Seq Scan on cte_inline.test_1 + Output: test_1.f1, test_1.f2, test_1.f3 + -> CTE Scan on x + Output: x.f2 + Filter: ((x.f2 < 10) AND (x.f2 = test.f2)) +(15 rows) + +-- not referenced cte contains DML +explain (verbose, costs off) +with x as (select f1 from test), +y as (insert into test_1 default values) +select * from x; + QUERY PLAN +------------------------------------------------------------------------------ + Seq Scan on cte_inline.test + Output: test.f1 + CTE y + -> Insert on cte_inline.test_1 + -> Result + Output: NULL::integer, NULL::integer, NULL::double precision +(6 rows) + +explain (verbose, costs off) +with a as( with z as (insert into test default values) select 1) +select 1; +ERROR: WITH clause containing a data-modifying statement must be at the top level +LINE 2: with a as( with z as (insert into test default values) selec... + ^ +-- cte with subquery and referenced in grouping function will not be inlined +explain (verbose, costs off) +WITH cte AS not materialized ( + SELECT + ( + CASE WHEN ( + NOT EXISTS ( + select + * + from + test + ) + ) THEN ('P') END + ) col + FROM + test_1 +) +SELECT + col, GROUPING(col) +FROM + cte +GROUP BY + GROUPING SETS(col); + QUERY PLAN +------------------------------------------------------------------------- + HashAggregate + Output: cte.col, GROUPING(cte.col) + Group By Key: cte.col + CTE cte + -> Seq Scan on cte_inline.test_1 + Output: CASE WHEN (NOT $0) THEN 'P'::text ELSE NULL::text END + InitPlan 1 (returns $0) + -> Seq Scan on cte_inline.test + -> CTE Scan on cte + Output: cte.col +(10 rows) + +drop schema cte_inline cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table test +drop cascades to table test_1 diff --git a/src/test/regress/expected/dbe_scheduler.out b/src/test/regress/expected/dbe_scheduler.out new file mode 100644 index 000000000..ec6575c92 --- /dev/null +++ b/src/test/regress/expected/dbe_scheduler.out @@ -0,0 +1,1930 @@ +-- check define_program_argument +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 3, false, 'test'); + create_program +---------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+---------------------+--------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | number_of_arguments | 3 + program1 | enabled | false + program1 | comments | test +(6 rows) + +select what, job_name from pg_job_proc; + what | job_name +------+---------- +(0 rows) + +select DBE_SCHEDULER.define_program_argument('program1', 0, 'arg0', 'int', 16); +ERROR: Argument position specified is out of range. +DETAIL: Argument 0 should within range (0, 3]. +CONTEXT: referenced column: define_program_argument +select DBE_SCHEDULER.define_program_argument('program1', 4, 'arg4', 'int', 16); +ERROR: Argument position specified is out of range. +DETAIL: Argument 4 should within range (0, 3]. +CONTEXT: referenced column: define_program_argument +select DBE_SCHEDULER.define_program_argument('program2', 1, 'arg1', 'boolean', false); +ERROR: Can not find attribute 'object_type' of object name 'program2'. +DETAIL: N/A +CONTEXT: referenced column: define_program_argument +select DBE_SCHEDULER.define_program_argument('program1', 1, 'arg1', 'boolean', false); + define_program_argument +------------------------- + +(1 row) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+---------------+----------------+--------------- + 1 | boolean | program1 | arg1 | | +(1 row) + +select DBE_SCHEDULER.define_program_argument('program1', 1, 'arg1', 'int', 16); + define_program_argument +------------------------- + +(1 row) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+---------------+----------------+--------------- + 1 | int | program1 | arg1 | | 16 +(1 row) + +select DBE_SCHEDULER.define_program_argument('program1', 2, 'arg2', 'boolean', 'false', false); + define_program_argument +------------------------- + +(1 row) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+---------------+----------------+--------------- + 1 | int | program1 | arg1 | | 16 + 2 | boolean | program1 | arg2 | | false +(2 rows) + +select DBE_SCHEDULER.drop_program('program1', true); + drop_program +-------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+----------------+----------------- +(0 rows) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+---------------+----------------+--------------- +(0 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +--------+-----------+----------+---------+----------+----------+-------- +(0 rows) + +select what, job_name from pg_job_proc; + what | job_name +------+---------- +(0 rows) + +-- check create_program / drop_program +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 3, false, 'test'); + create_program +---------------- + +(1 row) + +select DBE_SCHEDULER.create_program('program2', 'sql', 'select pg_sleep(1);', 3, false, 'test'); +ERROR: program_type sql not support. +DETAIL: Invalid program type. +CONTEXT: referenced column: create_program +select DBE_SCHEDULER.create_program('program2', 'STORED_PROCEDURE', 'select pg_sleep(1);', 3, false, 'test'); + create_program +---------------- + +(1 row) + +select DBE_SCHEDULER.create_job('job1', 'program1', '2021-07-20', 'interval ''3 minute''', '2121-07-20', 'DEFAULT_JOB_CLASS', false, false,'test', 'style', NULL, NULL); + create_job +------------ + +(1 row) + +select what, job_name from pg_job_proc; + what | job_name +---------------------+---------- + select pg_sleep(1); | job1 +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+---------------------+---------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | number_of_arguments | 3 + program1 | enabled | false + program1 | comments | test + program2 | object_type | program + program2 | program_type | stored_procedure + program2 | program_action | select pg_sleep(1); + program2 | number_of_arguments | 3 + program2 | enabled | false + program2 | comments | test + job1 | object_type | job + job1 | program_name | program1 + job1 | schedule_name | inline_schedule_job1 + job1 | job_class | default_job_class + job1 | auto_drop | false + job1 | comments | test + job1 | job_style | style + job1 | credential_name | + job1 | destination_name | +(21 rows) + +select DBE_SCHEDULER.drop_program('program2,program1', false); +ERROR: program_name program1 refered by job job1 +DETAIL: N/A +CONTEXT: SQL statement "CALL dbe_scheduler.drop_single_program(name_list[i],force)" +PL/pgSQL function dbe_scheduler.drop_program(text,boolean) line 8 at PERFORM +referenced column: drop_program +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+---------------------+---------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | number_of_arguments | 3 + program1 | enabled | false + program1 | comments | test + program2 | object_type | program + program2 | program_type | stored_procedure + program2 | program_action | select pg_sleep(1); + program2 | number_of_arguments | 3 + program2 | enabled | false + program2 | comments | test + job1 | object_type | job + job1 | program_name | program1 + job1 | schedule_name | inline_schedule_job1 + job1 | job_class | default_job_class + job1 | auto_drop | false + job1 | comments | test + job1 | job_style | style + job1 | credential_name | + job1 | destination_name | +(21 rows) + +select DBE_SCHEDULER.drop_program('program1,program2', true); + drop_program +-------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+------------------+---------------------- + job1 | object_type | job + job1 | program_name | program1 + job1 | schedule_name | inline_schedule_job1 + job1 | job_class | default_job_class + job1 | auto_drop | false + job1 | comments | test + job1 | job_style | style + job1 | credential_name | + job1 | destination_name | +(9 rows) + +select DBE_SCHEDULER.drop_job('program1,job1', true, false, 'STOP_ON_FIRST_ERROR'); +NOTICE: Undefined object program1. +CONTEXT: referenced column: drop_job + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.drop_job('job1,program1', true, false, 'TRANSACTIONAL'); +ERROR: Undefined object program1. +CONTEXT: referenced column: drop_job +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+------------------+---------------------- + job1 | object_type | job + job1 | program_name | program1 + job1 | schedule_name | inline_schedule_job1 + job1 | job_class | default_job_class + job1 | auto_drop | false + job1 | comments | test + job1 | job_style | style + job1 | credential_name | + job1 | destination_name | +(9 rows) + +select DBE_SCHEDULER.drop_job('job1,program1', true, false, 'ABSORB_ERRORS'); +NOTICE: Undefined object program1. +CONTEXT: referenced column: drop_job + drop_job +---------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+----------------+----------------- +(0 rows) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+---------------+----------------+--------------- +(0 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +--------+-----------+----------+---------+----------+----------+-------- +(0 rows) + +select what, job_name from pg_job_proc; + what | job_name +------+---------- +(0 rows) + +-- set_attribute +--program +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 3, false, 'test'); + create_program +---------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('program1', 'number_of_argument', '2', NULL); +ERROR: Fail to update attribute. +DETAIL: Attribute entry number_of_argument not found. +CONTEXT: referenced column: set_attribute +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', '2', NULL); + set_attribute +--------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+---------------------+--------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | comments | test + program1 | number_of_arguments | 2 + program1 | enabled | false +(6 rows) + +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 3, NULL); + set_attribute +--------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+---------------------+--------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | comments | test + program1 | number_of_arguments | 3 + program1 | enabled | false +(6 rows) + +select DBE_SCHEDULER.set_attribute('program1', 'enabled', true); +ERROR: Can not find argument info of object 'program1'. +DETAIL: N/A +CONTEXT: referenced column: set_attribute +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+---------------------+--------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | comments | test + program1 | number_of_arguments | 3 + program1 | enabled | false +(6 rows) + +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 0, NULL); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('program1', 'enabled', true); + set_attribute +--------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+---------------------+--------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | comments | test + program1 | number_of_arguments | 0 + program1 | enabled | true +(6 rows) + +select DBE_SCHEDULER.create_schedule('schedule1', NULL, 'sysdate', NULL, 'test'); + create_schedule +----------------- + +(1 row) + +select DBE_SCHEDULER.create_job(job_name=>'job1', program_name=>'program1', schedule_name=>'schedule1'); + create_job +------------ + +(1 row) + +select what, job_name from pg_job_proc; + what | job_name +---------------------+---------- + select pg_sleep(1); | job1 +(1 row) + +select DBE_SCHEDULER.set_attribute('program1', 'program_action', 'create role r1 password ''12345'';', NULL); -- failed +ERROR: Job action specified is not allowed. +DETAIL: Job action cannot be executed securely. +CONTEXT: referenced column: set_attribute +select DBE_SCHEDULER.set_attribute('program1', 'program_action', 'select pg_sleep(2);', NULL); + set_attribute +--------------- + +(1 row) + +select what, job_name from pg_job_proc; + what | job_name +---------------------+---------- + select pg_sleep(2); | job1 +(1 row) + +--schedule +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +-----------+---------------------+--------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | comments | test + program1 | number_of_arguments | 0 + program1 | enabled | true + schedule1 | object_type | schedule + schedule1 | start_date | + schedule1 | repeat_interval | sysdate + schedule1 | end_date | + schedule1 | comments | test + job1 | object_type | job + job1 | program_name | program1 + job1 | schedule_name | schedule1 + job1 | job_class | default_job_class + job1 | auto_drop | true + job1 | comments | + job1 | job_style | regular + job1 | credential_name | + job1 | destination_name | + program1 | program_action | select pg_sleep(2); +(20 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +------------+-----------+----------+---------+----------+--------------------------+-------- + regression | datanode1 | sysdate | public | job1 | Sat Jan 01 00:00:00 4000 | f +(1 row) + +select DBE_SCHEDULER.set_attribute('schedule1', 'start_date', '2021-7-20'); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('schedule1', 'end_date', '2021-7-20'); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('schedule1', 'repeat_interval', 'interval ''2000 s'''); + set_attribute +--------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +-----------+---------------------+--------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | comments | test + program1 | number_of_arguments | 0 + program1 | enabled | true + schedule1 | object_type | schedule + schedule1 | comments | test + job1 | object_type | job + job1 | program_name | program1 + job1 | schedule_name | schedule1 + job1 | job_class | default_job_class + job1 | auto_drop | true + job1 | comments | + job1 | job_style | regular + job1 | credential_name | + job1 | destination_name | + program1 | program_action | select pg_sleep(2); + schedule1 | start_date | 2021-7-20 + schedule1 | end_date | 2021-7-20 + schedule1 | repeat_interval | interval '2000 s' +(20 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +------------+-----------+-------------------+---------+----------+--------------------------+-------- + regression | datanode1 | interval '2000 s' | public | job1 | Tue Jul 20 00:00:00 2021 | f +(1 row) + +--job +select DBE_SCHEDULER.create_program('program2', 'STORED_PROCEDURE', 'select pg_sleep(1);', 3, false, 'test'); + create_program +---------------- + +(1 row) + +select DBE_SCHEDULER.create_schedule('schedule2', NULL, 'sysdate', NULL, 'test'); + create_schedule +----------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('job1', 'program_name', 'program2'); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('job1', 'schedule_name', 'schedule2'); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('job1', 'job_class', 'unknown'); +ERROR: Can not find attribute 'object_type' of object name 'unknown'. +DETAIL: N/A +CONTEXT: referenced column: set_attribute +select DBE_SCHEDULER.create_job_class('test'); + create_job_class +------------------ + +(1 row) + +select DBE_SCHEDULER.set_attribute('job1', 'job_class', 'test'); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('job1', 'enabled', true); + set_attribute +--------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +-----------+-------------------------+--------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | comments | test + program1 | number_of_arguments | 0 + program1 | enabled | true + schedule1 | object_type | schedule + schedule1 | comments | test + job1 | object_type | job + job1 | auto_drop | true + job1 | comments | + job1 | job_style | regular + job1 | credential_name | + job1 | destination_name | + program1 | program_action | select pg_sleep(2); + schedule1 | start_date | 2021-7-20 + schedule1 | end_date | 2021-7-20 + schedule1 | repeat_interval | interval '2000 s' + program2 | object_type | program + program2 | program_type | stored_procedure + program2 | program_action | select pg_sleep(1); + program2 | number_of_arguments | 3 + program2 | enabled | false + program2 | comments | test + schedule2 | object_type | schedule + schedule2 | start_date | + schedule2 | repeat_interval | sysdate + schedule2 | end_date | + schedule2 | comments | test + job1 | program_name | program2 + job1 | schedule_name | schedule2 + test | object_type | job_class + test | resource_consumer_group | + test | service | + test | logging_level | 0 + test | log_history | + test | comments | + job1 | job_class | test +(37 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +------------+-----------+----------+---------+----------+--------------------------+-------- + regression | datanode1 | sysdate | public | job1 | Sat Jan 01 00:00:00 4000 | t +(1 row) + +select what, job_name from pg_job_proc; + what | job_name +---------------------+---------- + select pg_sleep(1); | job1 +(1 row) + +select DBE_SCHEDULER.drop_program('program1,program2', true); + drop_program +-------------- + +(1 row) + +select DBE_SCHEDULER.drop_job('job1', true, false, 'STOP_ON_FIRST_ERROR'); + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.drop_schedule('schedule1,schedule2', false); + drop_schedule +--------------- + +(1 row) + +select DBE_SCHEDULER.drop_job_class('test'); + drop_job_class +---------------- + +(1 row) + +select DBE_SCHEDULER.create_job(job_name=>'job1', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);'); + create_job +------------ + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +---------------------+---------------------+---------------------- + inline_program_job1 | object_type | program + inline_program_job1 | program_type | stored_procedure + inline_program_job1 | program_action | select pg_sleep(1); + inline_program_job1 | number_of_arguments | 0 + inline_program_job1 | enabled | true + inline_program_job1 | comments | + job1 | object_type | job + job1 | program_name | inline_program_job1 + job1 | schedule_name | inline_schedule_job1 + job1 | job_class | default_job_class + job1 | auto_drop | true + job1 | comments | + job1 | job_style | + job1 | credential_name | + job1 | destination_name | +(15 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +------------+-----------+----------+---------+----------+--------------------------+-------- + regression | datanode1 | null | public | job1 | Sat Jan 01 00:00:00 4000 | f +(1 row) + +select what, job_name from pg_job_proc; + what | job_name +---------------------+---------- + select pg_sleep(1); | job1 +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +---------------------+---------------------+---------------------- + inline_program_job1 | object_type | program + inline_program_job1 | program_type | stored_procedure + inline_program_job1 | program_action | select pg_sleep(1); + inline_program_job1 | number_of_arguments | 0 + inline_program_job1 | enabled | true + inline_program_job1 | comments | + job1 | object_type | job + job1 | program_name | inline_program_job1 + job1 | schedule_name | inline_schedule_job1 + job1 | job_class | default_job_class + job1 | auto_drop | true + job1 | comments | + job1 | job_style | + job1 | credential_name | + job1 | destination_name | +(15 rows) + +select DBE_SCHEDULER.set_attribute('job1', 'start_date', '2021-7-20'); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('job1', 'end_date', '2021-7-20'); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('job1', 'repeat_interval', 'interval ''2000 s'''); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('job1', 'number_of_arguments', 2); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('job1', 'job_action', 'create role r1 password ''12345'';'); -- failed +ERROR: Job action specified is not allowed. +DETAIL: Job action cannot be executed securely. +CONTEXT: referenced column: set_attribute +select DBE_SCHEDULER.set_attribute('job1', 'job_action', 'select pg_sleep(2);'); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('job1', 'job_type', 'STORED_PROCEDURE'); + set_attribute +--------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +---------------------+---------------------+---------------------- + inline_program_job1 | object_type | program + inline_program_job1 | enabled | true + inline_program_job1 | comments | + job1 | object_type | job + job1 | program_name | inline_program_job1 + job1 | schedule_name | inline_schedule_job1 + job1 | job_class | default_job_class + job1 | auto_drop | true + job1 | comments | + job1 | job_style | + job1 | credential_name | + job1 | destination_name | + inline_program_job1 | number_of_arguments | 2 + inline_program_job1 | program_action | select pg_sleep(2); + inline_program_job1 | program_type | stored_procedure +(15 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +------------+-----------+-------------------+---------+----------+--------------------------+-------- + regression | datanode1 | interval '2000 s' | public | job1 | Tue Jul 20 00:00:00 2021 | f +(1 row) + +select what, job_name from pg_job_proc; + what | job_name +---------------------+---------- + select pg_sleep(2); | job1 +(1 row) + +select DBE_SCHEDULER.drop_job('job1', true, false, 'STOP_ON_FIRST_ERROR'); + drop_job +---------- + +(1 row) + +--create_schedule dropxxx +select DBE_SCHEDULER.create_schedule('schedule1', NULL, 'sysdate', NULL, 'test'); + create_schedule +----------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +-----------+-----------------+----------------- + schedule1 | object_type | schedule + schedule1 | start_date | + schedule1 | repeat_interval | sysdate + schedule1 | end_date | + schedule1 | comments | test +(5 rows) + +select DBE_SCHEDULER.create_schedule('schedule2', NULL, 'sysdate', NULL, 'test'); + create_schedule +----------------- + +(1 row) + +select DBE_SCHEDULER.create_job('job1', 'schedule1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 0, 'DEFAULT_JOB_CLASS', true, true, NULL, NULL, NULL); + create_job +------------ + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +---------------------+---------------------+--------------------- + schedule1 | object_type | schedule + schedule1 | start_date | + schedule1 | repeat_interval | sysdate + schedule1 | end_date | + schedule1 | comments | test + schedule2 | object_type | schedule + schedule2 | start_date | + schedule2 | repeat_interval | sysdate + schedule2 | end_date | + schedule2 | comments | test + inline_program_job1 | object_type | program + inline_program_job1 | program_type | stored_procedure + inline_program_job1 | program_action | select pg_sleep(1); + inline_program_job1 | number_of_arguments | 0 + inline_program_job1 | enabled | true + inline_program_job1 | comments | + job1 | object_type | job + job1 | program_name | inline_program_job1 + job1 | schedule_name | schedule1 + job1 | job_class | default_job_class + job1 | auto_drop | true + job1 | comments | + job1 | job_style | + job1 | credential_name | + job1 | destination_name | +(25 rows) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+---------------+----------------+--------------- +(0 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +------------+-----------+----------+---------+----------+--------------------------+-------- + regression | datanode1 | sysdate | public | job1 | Sat Jan 01 00:00:00 4000 | t +(1 row) + +select what, job_name from pg_job_proc; + what | job_name +---------------------+---------- + select pg_sleep(1); | job1 +(1 row) + +select DBE_SCHEDULER.drop_job('schedule1', true, false, 'STOP_ON_FIRST_ERROR'); +NOTICE: name schedule1 is schedule. +CONTEXT: referenced column: drop_job + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.drop_program('schedule1', false); +ERROR: Cannot find program type of program schedule1. +DETAIL: Invalid program format. +CONTEXT: SQL statement "CALL dbe_scheduler.drop_single_program(name_list[i],force)" +PL/pgSQL function dbe_scheduler.drop_program(text,boolean) line 8 at PERFORM +referenced column: drop_program +select DBE_SCHEDULER.drop_schedule('schedule1', false); +ERROR: schedule_name schedule1 refered by job job1 +DETAIL: N/A +CONTEXT: SQL statement "CALL dbe_scheduler.drop_single_schedule(name_list[i],force)" +PL/pgSQL function dbe_scheduler.drop_schedule(text,boolean) line 8 at PERFORM +referenced column: drop_schedule +select DBE_SCHEDULER.drop_schedule('schedule1,schedule2', true); + drop_schedule +--------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +---------------------+---------------------+--------------------- + inline_program_job1 | object_type | program + inline_program_job1 | program_type | stored_procedure + inline_program_job1 | program_action | select pg_sleep(1); + inline_program_job1 | number_of_arguments | 0 + inline_program_job1 | enabled | true + inline_program_job1 | comments | + job1 | object_type | job + job1 | program_name | inline_program_job1 + job1 | schedule_name | schedule1 + job1 | job_class | default_job_class + job1 | auto_drop | true + job1 | comments | + job1 | job_style | + job1 | credential_name | + job1 | destination_name | +(15 rows) + +select DBE_SCHEDULER.drop_job('job1', true, false, 'STOP_ON_FIRST_ERROR'); + drop_job +---------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+----------------+----------------- +(0 rows) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+---------------+----------------+--------------- +(0 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +--------+-----------+----------+---------+----------+----------+-------- +(0 rows) + +select what, job_name from pg_job_proc; + what | job_name +------+---------- +(0 rows) + +--set_job_arguemnt_value +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 3, false, 'test'); + create_program +---------------- + +(1 row) + +select DBE_SCHEDULER.create_job('job1', 'program1', '2021-07-20', 'sysdate', '2121-07-20', 'DEFAULT_JOB_CLASS', false, false,'test', 'style', NULL, NULL); + create_job +------------ + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+---------------------+---------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | number_of_arguments | 3 + program1 | enabled | false + program1 | comments | test + job1 | object_type | job + job1 | program_name | program1 + job1 | schedule_name | inline_schedule_job1 + job1 | job_class | default_job_class + job1 | auto_drop | false + job1 | comments | test + job1 | job_style | style + job1 | credential_name | + job1 | destination_name | +(15 rows) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+---------------+----------------+--------------- +(0 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +------------+-----------+----------+---------+----------+--------------------------+-------- + regression | datanode1 | sysdate | public | job1 | Sun Jul 20 00:00:00 2121 | f +(1 row) + +select what, job_name from pg_job_proc; + what | job_name +---------------------+---------- + select pg_sleep(1); | job1 +(1 row) + +select DBE_SCHEDULER.set_job_argument_value('job1', 1, 1); + set_job_argument_value +------------------------ + +(1 row) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+--------------------+----------------+--------------- + 1 | text | job1 | default_argument_1 | 1 | +(1 row) + +select DBE_SCHEDULER.set_job_argument_value('job1', 1, 11); + set_job_argument_value +------------------------ + +(1 row) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+--------------------+----------------+--------------- + 1 | text | job1 | default_argument_1 | 11 | +(1 row) + +select DBE_SCHEDULER.set_job_argument_value('job1', 'default_name_of_arg1', 111); +ERROR: Fail to set job argument value. +DETAIL: Argument default_name_of_arg1 was never defined. +CONTEXT: referenced column: set_job_argument_value +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+--------------------+----------------+--------------- + 1 | text | job1 | default_argument_1 | 11 | +(1 row) + +select DBE_SCHEDULER.define_program_argument('program1', 2, 'arg2', 'boolean', 'false', false); + define_program_argument +------------------------- + +(1 row) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+---------------+----------------+--------------- + 2 | boolean | program1 | arg2 | | false +(1 row) + +select DBE_SCHEDULER.set_job_argument_value('job1', 'arg2', 2); + set_job_argument_value +------------------------ + +(1 row) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+--------------------+----------------+--------------- + 2 | boolean | program1 | arg2 | | false + 2 | boolean | job1 | default_argument_2 | 2 | +(2 rows) + +select DBE_SCHEDULER.set_job_argument_value('job1', 'arg2', 22); + set_job_argument_value +------------------------ + +(1 row) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+--------------------+----------------+--------------- + 2 | boolean | program1 | arg2 | | false + 2 | boolean | job1 | default_argument_2 | 22 | +(2 rows) + +select DBE_SCHEDULER.drop_job('job1', true, false, 'STOP_ON_FIRST_ERROR'); + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.drop_program('program1', false); + drop_program +-------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+----------------+----------------- +(0 rows) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+---------------+----------------+--------------- +(0 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +--------+-----------+----------+---------+----------+----------+-------- +(0 rows) + +select what, job_name from pg_job_proc; + what | job_name +------+---------- +(0 rows) + +-- check create_job +select DBE_SCHEDULER.create_schedule('schedule1', NULL, 'sysdate', '2021-7-28', 'test'); + create_schedule +----------------- + +(1 row) + +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 3, false, 'test'); + create_program +---------------- + +(1 row) + +select DBE_SCHEDULER.create_job(job_name=>'job1', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);'); + create_job +------------ + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +---------------------+---------------------+------------------------------ + schedule1 | object_type | schedule + schedule1 | start_date | + schedule1 | repeat_interval | sysdate + schedule1 | end_date | Wed Jul 28 00:00:00 2021 PDT + schedule1 | comments | test + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | number_of_arguments | 3 + program1 | enabled | false + program1 | comments | test + inline_program_job1 | object_type | program + inline_program_job1 | program_type | stored_procedure + inline_program_job1 | program_action | select pg_sleep(1); + inline_program_job1 | number_of_arguments | 0 + inline_program_job1 | enabled | true + inline_program_job1 | comments | + job1 | object_type | job + job1 | program_name | inline_program_job1 + job1 | schedule_name | inline_schedule_job1 + job1 | job_class | default_job_class + job1 | auto_drop | true + job1 | comments | + job1 | job_style | + job1 | credential_name | + job1 | destination_name | +(26 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +------------+-----------+----------+---------+----------+--------------------------+-------- + regression | datanode1 | null | public | job1 | Sat Jan 01 00:00:00 4000 | f +(1 row) + +select what, job_name from pg_job_proc; + what | job_name +---------------------+---------- + select pg_sleep(1); | job1 +(1 row) + +select DBE_SCHEDULER.drop_job('job1', true, false, 'STOP_ON_FIRST_ERROR'); + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.create_job(job_name=>'job2', program_name=>'program1', schedule_name=>'schedule1'); + create_job +------------ + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +-----------+---------------------+------------------------------ + schedule1 | object_type | schedule + schedule1 | start_date | + schedule1 | repeat_interval | sysdate + schedule1 | end_date | Wed Jul 28 00:00:00 2021 PDT + schedule1 | comments | test + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | number_of_arguments | 3 + program1 | enabled | false + program1 | comments | test + job2 | object_type | job + job2 | program_name | program1 + job2 | schedule_name | schedule1 + job2 | job_class | default_job_class + job2 | auto_drop | true + job2 | comments | + job2 | job_style | regular + job2 | credential_name | + job2 | destination_name | +(20 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +------------+-----------+----------+---------+----------+--------------------------+-------- + regression | datanode1 | sysdate | public | job2 | Wed Jul 28 00:00:00 2021 | f +(1 row) + +select what, job_name from pg_job_proc; + what | job_name +---------------------+---------- + select pg_sleep(1); | job2 +(1 row) + +select DBE_SCHEDULER.drop_job('job2', true, false, 'STOP_ON_FIRST_ERROR'); + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.create_job(job_name=>'job3', program_name=>'program1'); + create_job +------------ + +(1 row) + +select job_name, enable from pg_job where job_name = 'job3'; + job_name | enable +----------+-------- + job3 | f +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +-----------+---------------------+------------------------------ + schedule1 | object_type | schedule + schedule1 | start_date | + schedule1 | repeat_interval | sysdate + schedule1 | end_date | Wed Jul 28 00:00:00 2021 PDT + schedule1 | comments | test + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | number_of_arguments | 3 + program1 | enabled | false + program1 | comments | test + job3 | object_type | job + job3 | program_name | program1 + job3 | schedule_name | inline_schedule_job3 + job3 | job_class | default_job_class + job3 | auto_drop | true + job3 | comments | + job3 | job_style | regular + job3 | credential_name | + job3 | destination_name | +(20 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +------------+-----------+----------+---------+----------+--------------------------+-------- + regression | datanode1 | null | public | job3 | Sat Jan 01 00:00:00 4000 | f +(1 row) + +select what, job_name from pg_job_proc; + what | job_name +---------------------+---------- + select pg_sleep(1); | job3 +(1 row) + +select DBE_SCHEDULER.drop_job('job3', true, false, 'STOP_ON_FIRST_ERROR'); + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.create_job(job_name=>'job4', schedule_name=>'schedule1', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(4);'); + create_job +------------ + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +---------------------+---------------------+------------------------------ + schedule1 | object_type | schedule + schedule1 | start_date | + schedule1 | repeat_interval | sysdate + schedule1 | end_date | Wed Jul 28 00:00:00 2021 PDT + schedule1 | comments | test + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | number_of_arguments | 3 + program1 | enabled | false + program1 | comments | test + inline_program_job4 | object_type | program + inline_program_job4 | program_type | stored_procedure + inline_program_job4 | program_action | select pg_sleep(4); + inline_program_job4 | number_of_arguments | 0 + inline_program_job4 | enabled | true + inline_program_job4 | comments | + job4 | object_type | job + job4 | program_name | inline_program_job4 + job4 | schedule_name | schedule1 + job4 | job_class | default_job_class + job4 | auto_drop | true + job4 | comments | + job4 | job_style | + job4 | credential_name | + job4 | destination_name | +(26 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +------------+-----------+----------+---------+----------+--------------------------+-------- + regression | datanode1 | sysdate | public | job4 | Wed Jul 28 00:00:00 2021 | f +(1 row) + +select what, job_name from pg_job_proc; + what | job_name +---------------------+---------- + select pg_sleep(4); | job4 +(1 row) + +select DBE_SCHEDULER.drop_job('job4', true, false, 'STOP_ON_FIRST_ERROR'); + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.drop_schedule('schedule1', true); + drop_schedule +--------------- + +(1 row) + +select DBE_SCHEDULER.drop_program('program1', false); + drop_program +-------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+----------------+----------------- +(0 rows) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+---------------+----------------+--------------- +(0 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +--------+-----------+----------+---------+----------+----------+-------- +(0 rows) + +select what, job_name from pg_job_proc; + what | job_name +------+---------- +(0 rows) + +-- enable/disable +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 0, false, 'test'); + create_program +---------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+---------------------+--------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | number_of_arguments | 0 + program1 | enabled | false + program1 | comments | test +(6 rows) + +select DBE_SCHEDULER.enable('program1', 'STOP_ON_FIRST_ERROR'); + enable +-------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+---------------------+--------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | number_of_arguments | 0 + program1 | comments | test + program1 | enabled | true +(6 rows) + +select DBE_SCHEDULER.disable('program1', false, 'STOP_ON_FIRST_ERROR'); + disable +--------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+---------------------+--------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | number_of_arguments | 0 + program1 | comments | test + program1 | enabled | false +(6 rows) + +select DBE_SCHEDULER.create_job(job_name=>'job1', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);'); + create_job +------------ + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +---------------------+---------------------+---------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | number_of_arguments | 0 + program1 | comments | test + program1 | enabled | false + inline_program_job1 | object_type | program + inline_program_job1 | program_type | stored_procedure + inline_program_job1 | program_action | select pg_sleep(1); + inline_program_job1 | number_of_arguments | 0 + inline_program_job1 | enabled | true + inline_program_job1 | comments | + job1 | object_type | job + job1 | program_name | inline_program_job1 + job1 | schedule_name | inline_schedule_job1 + job1 | job_class | default_job_class + job1 | auto_drop | true + job1 | comments | + job1 | job_style | + job1 | credential_name | + job1 | destination_name | +(21 rows) + +select DBE_SCHEDULER.enable('job1', 'STOP_ON_FIRST_ERROR'); + enable +-------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +---------------------+---------------------+---------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | number_of_arguments | 0 + program1 | comments | test + program1 | enabled | false + inline_program_job1 | object_type | program + inline_program_job1 | program_type | stored_procedure + inline_program_job1 | program_action | select pg_sleep(1); + inline_program_job1 | number_of_arguments | 0 + inline_program_job1 | enabled | true + inline_program_job1 | comments | + job1 | object_type | job + job1 | program_name | inline_program_job1 + job1 | schedule_name | inline_schedule_job1 + job1 | job_class | default_job_class + job1 | auto_drop | true + job1 | comments | + job1 | job_style | + job1 | credential_name | + job1 | destination_name | +(21 rows) + +select DBE_SCHEDULER.disable('job1', false, 'STOP_ON_FIRST_ERROR'); + disable +--------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +---------------------+---------------------+---------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | number_of_arguments | 0 + program1 | comments | test + program1 | enabled | false + inline_program_job1 | object_type | program + inline_program_job1 | program_type | stored_procedure + inline_program_job1 | program_action | select pg_sleep(1); + inline_program_job1 | number_of_arguments | 0 + inline_program_job1 | enabled | true + inline_program_job1 | comments | + job1 | object_type | job + job1 | program_name | inline_program_job1 + job1 | schedule_name | inline_schedule_job1 + job1 | job_class | default_job_class + job1 | auto_drop | true + job1 | comments | + job1 | job_style | + job1 | credential_name | + job1 | destination_name | +(21 rows) + +select DBE_SCHEDULER.drop_job('job1', true, false, 'STOP_ON_FIRST_ERROR'); + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.drop_program('program1', false); + drop_program +-------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+----------------+----------------- +(0 rows) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+---------------+----------------+--------------- +(0 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +--------+-----------+----------+---------+----------+----------+-------- +(0 rows) + +select what, job_name from pg_job_proc; + what | job_name +------+---------- +(0 rows) + +--create / drop job_class +select DBE_SCHEDULER.create_job_class('test'); + create_job_class +------------------ + +(1 row) + +select DBE_SCHEDULER.create_job(job_name=>'job1', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);', job_class=>'test'); + create_job +------------ + +(1 row) + +select DBE_SCHEDULER.create_job(job_name=>'job3', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);', job_class=>'testxxx'); +ERROR: Can not find attribute 'object_type' of object name 'testxxx'. +DETAIL: N/A +CONTEXT: referenced column: create_job +select DBE_SCHEDULER.create_job(job_name=>'job2', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);'); + create_job +------------ + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +---------------------+-------------------------+---------------------- + test | object_type | job_class + test | resource_consumer_group | + test | service | + test | logging_level | 0 + test | log_history | + test | comments | + inline_program_job1 | object_type | program + inline_program_job1 | program_type | stored_procedure + inline_program_job1 | program_action | select pg_sleep(1); + inline_program_job1 | number_of_arguments | 0 + inline_program_job1 | enabled | true + inline_program_job1 | comments | + job1 | object_type | job + job1 | program_name | inline_program_job1 + job1 | schedule_name | inline_schedule_job1 + job1 | job_class | test + job1 | auto_drop | true + job1 | comments | + job1 | job_style | + job1 | credential_name | + job1 | destination_name | + inline_program_job2 | object_type | program + inline_program_job2 | program_type | stored_procedure + inline_program_job2 | program_action | select pg_sleep(1); + inline_program_job2 | number_of_arguments | 0 + inline_program_job2 | enabled | true + inline_program_job2 | comments | + job2 | object_type | job + job2 | program_name | inline_program_job2 + job2 | schedule_name | inline_schedule_job2 + job2 | job_class | default_job_class + job2 | auto_drop | true + job2 | comments | + job2 | job_style | + job2 | credential_name | + job2 | destination_name | +(36 rows) + +select DBE_SCHEDULER.set_attribute('job2', 'job_class', 'test'); + set_attribute +--------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +---------------------+-------------------------+---------------------- + test | object_type | job_class + test | resource_consumer_group | + test | service | + test | logging_level | 0 + test | log_history | + test | comments | + inline_program_job1 | object_type | program + inline_program_job1 | program_type | stored_procedure + inline_program_job1 | program_action | select pg_sleep(1); + inline_program_job1 | number_of_arguments | 0 + inline_program_job1 | enabled | true + inline_program_job1 | comments | + job1 | object_type | job + job1 | program_name | inline_program_job1 + job1 | schedule_name | inline_schedule_job1 + job1 | job_class | test + job1 | auto_drop | true + job1 | comments | + job1 | job_style | + job1 | credential_name | + job1 | destination_name | + inline_program_job2 | object_type | program + inline_program_job2 | program_type | stored_procedure + inline_program_job2 | program_action | select pg_sleep(1); + inline_program_job2 | number_of_arguments | 0 + inline_program_job2 | enabled | true + inline_program_job2 | comments | + job2 | object_type | job + job2 | program_name | inline_program_job2 + job2 | schedule_name | inline_schedule_job2 + job2 | auto_drop | true + job2 | comments | + job2 | job_style | + job2 | credential_name | + job2 | destination_name | + job2 | job_class | test +(36 rows) + +select DBE_SCHEDULER.drop_job_class('test', false); +ERROR: job_class test refered by job job1 +DETAIL: N/A +CONTEXT: SQL statement "CALL dbe_scheduler.drop_single_job_class(name_list[i],force)" +PL/pgSQL function dbe_scheduler.drop_job_class(text,boolean) line 8 at PERFORM +referenced column: drop_job_class +select DBE_SCHEDULER.drop_job_class('test', true); + drop_job_class +---------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +---------------------+---------------------+---------------------- + inline_program_job1 | object_type | program + inline_program_job1 | program_type | stored_procedure + inline_program_job1 | program_action | select pg_sleep(1); + inline_program_job1 | number_of_arguments | 0 + inline_program_job1 | enabled | true + inline_program_job1 | comments | + job1 | object_type | job + job1 | program_name | inline_program_job1 + job1 | schedule_name | inline_schedule_job1 + job1 | auto_drop | true + job1 | comments | + job1 | job_style | + job1 | credential_name | + job1 | destination_name | + inline_program_job2 | object_type | program + inline_program_job2 | program_type | stored_procedure + inline_program_job2 | program_action | select pg_sleep(1); + inline_program_job2 | number_of_arguments | 0 + inline_program_job2 | enabled | true + inline_program_job2 | comments | + job2 | object_type | job + job2 | program_name | inline_program_job2 + job2 | schedule_name | inline_schedule_job2 + job2 | auto_drop | true + job2 | comments | + job2 | job_style | + job2 | credential_name | + job2 | destination_name | + job1 | job_class | DEFAULT_JOB_CLASS + job2 | job_class | DEFAULT_JOB_CLASS +(30 rows) + +select DBE_SCHEDULER.drop_job('job1,job2', true, false, 'STOP_ON_FIRST_ERROR'); + drop_job +---------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+----------------+----------------- +(0 rows) + +select * from gs_job_argument; + argument_position | argument_type | job_name | argument_name | argument_value | default_value +-------------------+---------------+----------+---------------+----------------+--------------- +(0 rows) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; + dbname | node_name | interval | nspname | job_name | end_date | enable +--------+-----------+----------+---------+----------+----------+-------- +(0 rows) + +select what, job_name from pg_job_proc; + what | job_name +------+---------- +(0 rows) + +-- generate_job_name +select DBE_SCHEDULER.generate_job_name(); + generate_job_name +------------------- + JOB$_1 +(1 row) + +select DBE_SCHEDULER.generate_job_name(''); + generate_job_name +------------------- + 2 +(1 row) + +select DBE_SCHEDULER.generate_job_name('job_prefix_'); + generate_job_name +------------------- + job_prefix_3 +(1 row) + +create table t1(c1 int); +create or replace procedure p1(id int) as +begin + insert into t1 values(id); +end; +/ +select DBE_SCHEDULER.create_program('program2', 'STORED_PROCEDURE', 'public.p1', 1, true, 'test'); +ERROR: Can not find argument info of object 'program2'. +DETAIL: N/A +CONTEXT: referenced column: create_program +select DBE_SCHEDULER.create_program('program2', 'STORED_PROCEDURE', 'public.p1', 1, false, 'test'); + create_program +---------------- + +(1 row) + +select DBE_SCHEDULER.define_program_argument('program2', 1, 'arg1', 'int', 2); + define_program_argument +------------------------- + +(1 row) + +select DBE_SCHEDULER.enable('program2', 'TRANSACTIONAL'); + enable +-------- + +(1 row) + +select DBE_SCHEDULER.create_job(job_name=>'job2', program_name=>'program2', enabled=>true, auto_drop=>false); + create_job +------------ + +(1 row) + +select DBE_SCHEDULER.run_job('job2', false); + run_job +--------- + +(1 row) + +select pg_sleep(2); + pg_sleep +---------- + +(1 row) + +select * from t1; + c1 +---- + 2 +(1 row) + +select DBE_SCHEDULER.create_job(job_name=>'job3', job_type=>'PLSQL_BLOCK', job_action=>'insert into public.t1 values(3);', enabled=>true, auto_drop=>false); + create_job +------------ + +(1 row) + +select DBE_SCHEDULER.run_job('job3', false); + run_job +--------- + +(1 row) + +select pg_sleep(2); + pg_sleep +---------- + +(1 row) + +select * from t1; + c1 +---- + 2 + 3 +(2 rows) + +drop table t1; +select DBE_SCHEDULER.drop_job('job2', true); + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.drop_job('job3', true); + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.drop_program('program2', true); + drop_program +-------------- + +(1 row) + +-- others +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 1, false, 'test'); + create_program +---------------- + +(1 row) + +select DBE_SCHEDULER.enable('program1', 'TRANSACTIONAL'); +ERROR: Can not find argument info of object 'program1'. +CONTEXT: referenced column: enable +select DBE_SCHEDULER.define_program_argument('program1', 1, 'arg1', 'int', 16); + define_program_argument +------------------------- + +(1 row) + +select DBE_SCHEDULER.enable('program1', 'TRANSACTIONAL'); + enable +-------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+---------------------+--------------------- + program1 | object_type | program + program1 | program_type | stored_procedure + program1 | program_action | select pg_sleep(1); + program1 | number_of_arguments | 1 + program1 | comments | test + program1 | enabled | true +(6 rows) + +select DBE_SCHEDULER.set_attribute('program1', 'program_type', 'PLSQL_BLOCK'); +ERROR: Invalid program type or argument +DETAIL: Program type PLSQL_BLOCK must has no argument +CONTEXT: referenced column: set_attribute +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 0); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 1); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('program1', 'program_type', 'STORED_PROCEDURE'); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', -1); +ERROR: Invalid program arguments +DETAIL: Program arguments must less euqal than 255 and greater euqal than zero +CONTEXT: referenced column: set_attribute +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 0); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 1); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 255); + set_attribute +--------------- + +(1 row) + +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 256); +ERROR: Invalid program arguments +DETAIL: Program arguments must less euqal than 255 and greater euqal than zero +CONTEXT: referenced column: set_attribute +select DBE_SCHEDULER.drop_program('program1', true); + drop_program +-------------- + +(1 row) + +select * from gs_job_attribute where attribute_name <> 'owner'; -- empty + job_name | attribute_name | attribute_value +----------+----------------+----------------- +(0 rows) + +select DBE_SCHEDULER.create_program('programdb1', 'PLSQL_BLOCK', 'select pg_sleep(1);', 0, false, 'test'); + create_program +---------------- + +(1 row) + +select DBE_SCHEDULER.create_job('jobdb1', 'programdb1', '2021-07-20', 'sysdate', '2121-07-20', 'DEFAULT_JOB_CLASS', false, false,'test', 'style', NULL, NULL); + create_job +------------ + +(1 row) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job; + dbname | node_name | interval | nspname | job_name | end_date | enable +------------+-----------+----------+---------+----------+--------------------------+-------- + regression | datanode1 | sysdate | public | jobdb1 | Sun Jul 20 00:00:00 2121 | f +(1 row) + +create database test11; +\c test11 +select DBE_SCHEDULER.create_program('programdb1', 'PLSQL_BLOCK', 'select pg_sleep(1);', 0, false, 'test'); + create_program +---------------- + +(1 row) + +select DBE_SCHEDULER.create_job('jobdb1', 'programdb1', '2021-07-20', 'sysdate', '2121-07-20', 'DEFAULT_JOB_CLASS', false, false,'test', 'style', NULL, NULL); + create_job +------------ + +(1 row) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job; + dbname | node_name | interval | nspname | job_name | end_date | enable +------------+-----------+----------+---------+----------+--------------------------+-------- + regression | datanode1 | sysdate | public | jobdb1 | Sun Jul 20 00:00:00 2121 | f + test11 | datanode1 | sysdate | public | jobdb1 | Sun Jul 20 00:00:00 2121 | f +(2 rows) + +select dbe_scheduler.run_job('jobdb1', false); + run_job +--------- + +(1 row) + +select dbe_scheduler.drop_job('jobdb1'); + drop_job +---------- + +(1 row) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job; + dbname | node_name | interval | nspname | job_name | end_date | enable +------------+-----------+----------+---------+----------+--------------------------+-------- + regression | datanode1 | sysdate | public | jobdb1 | Sun Jul 20 00:00:00 2121 | f +(1 row) + +\c regression +select dbe_scheduler.drop_job('jobdb1'); + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.drop_program('programdb1', true); + drop_program +-------------- + +(1 row) + +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job; + dbname | node_name | interval | nspname | job_name | end_date | enable +--------+-----------+----------+---------+----------+----------+-------- +(0 rows) + +select * from gs_job_attribute where attribute_name <> 'owner'; + job_name | attribute_name | attribute_value +----------+----------------+----------------- +(0 rows) + diff --git a/src/test/regress/expected/dbe_scheduler_calendar.out b/src/test/regress/expected/dbe_scheduler_calendar.out new file mode 100644 index 000000000..d483dcc00 --- /dev/null +++ b/src/test/regress/expected/dbe_scheduler_calendar.out @@ -0,0 +1,305 @@ +-- calendaring syntax check -- +create or replace procedure eval16(calendar_str text) as +declare + start_date timestamp with time zone; + return_date_after timestamp with time zone; + next_run_date timestamp with time zone; +begin + start_date := '2003-2-1 10:30:00.111111+8'::timestamp with time zone; + return_date_after := start_date; + -- print 16 consecutive next dates -- + FOR i in 1..16 loop + DBE_SCHEDULER.EVALUATE_CALENDAR_STRING( + calendar_str, + start_date, return_date_after, next_run_date); + DBE_OUTPUT.PRINT_LINE('next_run_date: ' || next_run_date); + return_date_after := next_run_date; + end loop; +end; +/ +show timezone; + TimeZone +---------- + PST8PDT +(1 row) + +-- problems: ORA does not support these -- +call eval16('FREQ=weekly;INTERVAL=50;BYMONTH=2,3;BYHOUR=10;BYMINUTE=20,30,40;BYSECOND=0'); +next_run_date: Fri Mar 17 11:20:00 2023 PDT +next_run_date: Fri Mar 17 11:30:00 2023 PDT +next_run_date: Fri Mar 17 11:40:00 2023 PDT +next_run_date: Fri Mar 01 10:20:00 2024 PST +next_run_date: Fri Mar 01 10:30:00 2024 PST +next_run_date: Fri Mar 01 10:40:00 2024 PST +next_run_date: Fri Feb 14 10:20:00 2025 PST +next_run_date: Fri Feb 14 10:30:00 2025 PST +next_run_date: Fri Feb 14 10:40:00 2025 PST +next_run_date: Fri Mar 31 11:20:00 2045 PDT +next_run_date: Fri Mar 31 11:30:00 2045 PDT +next_run_date: Fri Mar 31 11:40:00 2045 PDT +next_run_date: Fri Mar 16 11:20:00 2046 PDT +next_run_date: Fri Mar 16 11:30:00 2046 PDT +next_run_date: Fri Mar 16 11:40:00 2046 PDT +next_run_date: Fri Mar 01 10:20:00 2047 PST + eval16 +-------- + +(1 row) + +call eval16('FREQ=secondly;BYMONTH=6;'); -- hard to find, but worked +next_run_date: Sun Jun 01 01:00:00 2003 PDT +next_run_date: Sun Jun 01 01:00:01 2003 PDT +next_run_date: Sun Jun 01 01:00:02 2003 PDT +next_run_date: Sun Jun 01 01:00:03 2003 PDT +next_run_date: Sun Jun 01 01:00:04 2003 PDT +next_run_date: Sun Jun 01 01:00:05 2003 PDT +next_run_date: Sun Jun 01 01:00:06 2003 PDT +next_run_date: Sun Jun 01 01:00:07 2003 PDT +next_run_date: Sun Jun 01 01:00:08 2003 PDT +next_run_date: Sun Jun 01 01:00:09 2003 PDT +next_run_date: Sun Jun 01 01:00:10 2003 PDT +next_run_date: Sun Jun 01 01:00:11 2003 PDT +next_run_date: Sun Jun 01 01:00:12 2003 PDT +next_run_date: Sun Jun 01 01:00:13 2003 PDT +next_run_date: Sun Jun 01 01:00:14 2003 PDT +next_run_date: Sun Jun 01 01:00:15 2003 PDT + eval16 +-------- + +(1 row) + +-- problem: ORA generate different result -- +call eval16('FREQ=weekly;INTERVAL=40;BYMONTH=2,3;BYHOUR=10;BYMINUTE=20,30,40;BYSECOND=0'); +next_run_date: Fri Feb 24 10:20:00 2006 PST +next_run_date: Fri Feb 24 10:30:00 2006 PST +next_run_date: Fri Feb 24 10:40:00 2006 PST +next_run_date: Fri Mar 20 11:20:00 2009 PDT +next_run_date: Fri Mar 20 11:30:00 2009 PDT +next_run_date: Fri Mar 20 11:40:00 2009 PDT +next_run_date: Fri Feb 12 10:20:00 2016 PST +next_run_date: Fri Feb 12 10:30:00 2016 PST +next_run_date: Fri Feb 12 10:40:00 2016 PST +next_run_date: Fri Mar 08 10:20:00 2019 PST +next_run_date: Fri Mar 08 10:30:00 2019 PST +next_run_date: Fri Mar 08 10:40:00 2019 PST +next_run_date: Fri Feb 23 10:20:00 2029 PST +next_run_date: Fri Feb 23 10:30:00 2029 PST +next_run_date: Fri Feb 23 10:40:00 2029 PST +next_run_date: Fri Mar 19 11:20:00 2032 PDT + eval16 +-------- + +(1 row) + +-- compiled scene -- +call eval16('FREQ=hourly;INTERVAL=2;BYHOUR=6,10;BYMINUTE=0;BYSECOND=0'); -- good +next_run_date: Sat Feb 01 06:00:00 2003 PST +next_run_date: Sat Feb 01 10:00:00 2003 PST +next_run_date: Sun Feb 02 06:00:00 2003 PST +next_run_date: Sun Feb 02 10:00:00 2003 PST +next_run_date: Mon Feb 03 06:00:00 2003 PST +next_run_date: Mon Feb 03 10:00:00 2003 PST +next_run_date: Tue Feb 04 06:00:00 2003 PST +next_run_date: Tue Feb 04 10:00:00 2003 PST +next_run_date: Wed Feb 05 06:00:00 2003 PST +next_run_date: Wed Feb 05 10:00:00 2003 PST +next_run_date: Thu Feb 06 06:00:00 2003 PST +next_run_date: Thu Feb 06 10:00:00 2003 PST +next_run_date: Fri Feb 07 06:00:00 2003 PST +next_run_date: Fri Feb 07 10:00:00 2003 PST +next_run_date: Sat Feb 08 06:00:00 2003 PST +next_run_date: Sat Feb 08 10:00:00 2003 PST + eval16 +-------- + +(1 row) + +call eval16('FREQ=hourly;INTERVAL=2;BYHOUR=6,9;BYMINUTE=0;BYSECOND=0'); -- good, only 6 o'clock +next_run_date: Sat Feb 01 06:00:00 2003 PST +next_run_date: Sun Feb 02 06:00:00 2003 PST +next_run_date: Mon Feb 03 06:00:00 2003 PST +next_run_date: Tue Feb 04 06:00:00 2003 PST +next_run_date: Wed Feb 05 06:00:00 2003 PST +next_run_date: Thu Feb 06 06:00:00 2003 PST +next_run_date: Fri Feb 07 06:00:00 2003 PST +next_run_date: Sat Feb 08 06:00:00 2003 PST +next_run_date: Sun Feb 09 06:00:00 2003 PST +next_run_date: Mon Feb 10 06:00:00 2003 PST +next_run_date: Tue Feb 11 06:00:00 2003 PST +next_run_date: Wed Feb 12 06:00:00 2003 PST +next_run_date: Thu Feb 13 06:00:00 2003 PST +next_run_date: Fri Feb 14 06:00:00 2003 PST +next_run_date: Sat Feb 15 06:00:00 2003 PST +next_run_date: Sun Feb 16 06:00:00 2003 PST + eval16 +-------- + +(1 row) + +call eval16('FREQ=weekly;INTERVAL=3;BYMONTH=2,3;BYHOUR=10;BYMINUTE=20,30,40;BYSECOND=0'); +next_run_date: Fri Feb 21 10:20:00 2003 PST +next_run_date: Fri Feb 21 10:30:00 2003 PST +next_run_date: Fri Feb 21 10:40:00 2003 PST +next_run_date: Fri Mar 14 10:20:00 2003 PST +next_run_date: Fri Mar 14 10:30:00 2003 PST +next_run_date: Fri Mar 14 10:40:00 2003 PST +next_run_date: Fri Feb 13 10:20:00 2004 PST +next_run_date: Fri Feb 13 10:30:00 2004 PST +next_run_date: Fri Feb 13 10:40:00 2004 PST +next_run_date: Fri Mar 05 10:20:00 2004 PST +next_run_date: Fri Mar 05 10:30:00 2004 PST +next_run_date: Fri Mar 05 10:40:00 2004 PST +next_run_date: Fri Mar 26 10:20:00 2004 PST +next_run_date: Fri Mar 26 10:30:00 2004 PST +next_run_date: Fri Mar 26 10:40:00 2004 PST +next_run_date: Fri Feb 04 10:20:00 2005 PST + eval16 +-------- + +(1 row) + +call eval16('FREQ=yearly;INTERVAL=50;BYMONTH=2,3,4,5,6,7,8,9,11,12;BYHOUR=10;BYMINUTE=1,2,3,4,5,6,7,8,9,20,30,40;BYSECOND=0'); -- fine performance +next_run_date: Mon Mar 31 10:01:00 2003 PST +next_run_date: Mon Mar 31 10:02:00 2003 PST +next_run_date: Mon Mar 31 10:03:00 2003 PST +next_run_date: Mon Mar 31 10:04:00 2003 PST +next_run_date: Mon Mar 31 10:05:00 2003 PST +next_run_date: Mon Mar 31 10:06:00 2003 PST +next_run_date: Mon Mar 31 10:07:00 2003 PST +next_run_date: Mon Mar 31 10:08:00 2003 PST +next_run_date: Mon Mar 31 10:09:00 2003 PST +next_run_date: Mon Mar 31 10:20:00 2003 PST +next_run_date: Mon Mar 31 10:30:00 2003 PST +next_run_date: Mon Mar 31 10:40:00 2003 PST +next_run_date: Sat May 31 11:01:00 2003 PDT +next_run_date: Sat May 31 11:02:00 2003 PDT +next_run_date: Sat May 31 11:03:00 2003 PDT +next_run_date: Sat May 31 11:04:00 2003 PDT + eval16 +-------- + +(1 row) + +call eval16('FREQ=secondly;INTERVAL=50;BYMONTH=2,3,4,5,6,7,8,9,11,12;BYHOUR=10;BYMINUTE=1,2,3,4,5,6,7,8,9,20,30,40;BYSECOND=0'); -- fixed, large loops +next_run_date: Sat Feb 01 10:05:00 2003 PST +next_run_date: Sat Feb 01 10:20:00 2003 PST +next_run_date: Sat Feb 01 10:30:00 2003 PST +next_run_date: Sat Feb 01 10:40:00 2003 PST +next_run_date: Sun Feb 02 10:05:00 2003 PST +next_run_date: Sun Feb 02 10:20:00 2003 PST +next_run_date: Sun Feb 02 10:30:00 2003 PST +next_run_date: Sun Feb 02 10:40:00 2003 PST +next_run_date: Mon Feb 03 10:05:00 2003 PST +next_run_date: Mon Feb 03 10:20:00 2003 PST +next_run_date: Mon Feb 03 10:30:00 2003 PST +next_run_date: Mon Feb 03 10:40:00 2003 PST +next_run_date: Tue Feb 04 10:05:00 2003 PST +next_run_date: Tue Feb 04 10:20:00 2003 PST +next_run_date: Tue Feb 04 10:30:00 2003 PST +next_run_date: Tue Feb 04 10:40:00 2003 PST + eval16 +-------- + +(1 row) + +call eval16('FREQ=secondly;INTERVAL=50;BYMONTH=2,3,4,5,6,7,8,9,11,12;BYMONTHDAY=1,3,5,7,9;BYHOUR=1,3,10,13,15,17;BYMINUTE=1,2,3,4,5,6,7,8,9,20,30,40;BYSECOND=0'); -- a looooot of params +next_run_date: Sat Feb 01 01:05:00 2003 PST +next_run_date: Sat Feb 01 01:20:00 2003 PST +next_run_date: Sat Feb 01 01:30:00 2003 PST +next_run_date: Sat Feb 01 01:40:00 2003 PST +next_run_date: Sat Feb 01 03:05:00 2003 PST +next_run_date: Sat Feb 01 03:20:00 2003 PST +next_run_date: Sat Feb 01 03:30:00 2003 PST +next_run_date: Sat Feb 01 03:40:00 2003 PST +next_run_date: Sat Feb 01 10:05:00 2003 PST +next_run_date: Sat Feb 01 10:20:00 2003 PST +next_run_date: Sat Feb 01 10:30:00 2003 PST +next_run_date: Sat Feb 01 10:40:00 2003 PST +next_run_date: Sat Feb 01 13:05:00 2003 PST +next_run_date: Sat Feb 01 13:20:00 2003 PST +next_run_date: Sat Feb 01 13:30:00 2003 PST +next_run_date: Sat Feb 01 13:40:00 2003 PST + eval16 +-------- + +(1 row) + +call eval16('FREQ=secondly;INTERVAL=50;BYMONTH=2,3,4,5,6,7,8,9,11,12;BYMONTHDAY=1,3,5,7,9;BYHOUR=1,3,5,10,13,15,17;BYMINUTE=20,30,40,1,2,3,4,5,6,7,8,9;BYSECOND=0'); -- still good +next_run_date: Sat Feb 01 01:05:00 2003 PST +next_run_date: Sat Feb 01 01:20:00 2003 PST +next_run_date: Sat Feb 01 01:30:00 2003 PST +next_run_date: Sat Feb 01 01:40:00 2003 PST +next_run_date: Sat Feb 01 03:05:00 2003 PST +next_run_date: Sat Feb 01 03:20:00 2003 PST +next_run_date: Sat Feb 01 03:30:00 2003 PST +next_run_date: Sat Feb 01 03:40:00 2003 PST +next_run_date: Sat Feb 01 05:05:00 2003 PST +next_run_date: Sat Feb 01 05:20:00 2003 PST +next_run_date: Sat Feb 01 05:30:00 2003 PST +next_run_date: Sat Feb 01 05:40:00 2003 PST +next_run_date: Sat Feb 01 10:05:00 2003 PST +next_run_date: Sat Feb 01 10:20:00 2003 PST +next_run_date: Sat Feb 01 10:30:00 2003 PST +next_run_date: Sat Feb 01 10:40:00 2003 PST + eval16 +-------- + +(1 row) + +call eval16('FREQ=secondly;INTERVAL=59;BYMONTH=2,3;BYHOUR=10;BYMINUTE=20,30,40;BYSECOND=58'); -- secondly works fine +next_run_date: Tue Feb 18 10:20:58 2003 PST +next_run_date: Thu Feb 27 10:40:58 2003 PST +next_run_date: Mon Mar 24 10:30:58 2003 PST +next_run_date: Sat Feb 07 10:20:58 2004 PST +next_run_date: Mon Feb 16 10:40:58 2004 PST +next_run_date: Fri Mar 12 10:30:58 2004 PST +next_run_date: Fri Feb 04 10:40:58 2005 PST +next_run_date: Tue Mar 01 10:30:58 2005 PST +next_run_date: Sat Mar 26 10:20:58 2005 PST +next_run_date: Sat Feb 18 10:30:58 2006 PST +next_run_date: Wed Mar 15 10:20:58 2006 PST +next_run_date: Fri Mar 24 10:40:58 2006 PST +next_run_date: Wed Feb 07 10:30:58 2007 PST +next_run_date: Sun Mar 04 10:20:58 2007 PST +next_run_date: Sat Mar 31 11:20:58 2007 PDT +next_run_date: Sun Feb 03 10:40:58 2008 PST + eval16 +-------- + +(1 row) + +call eval16('FREQ=minutely;INTERVAL=50;BYMONTH=1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1;BYMONTHDAY=-1;BYHOUR=1;BYMINUTE=0;BYSECOND=0'); +next_run_date: Sat Jan 31 01:00:00 2004 PST +next_run_date: Sun Jan 31 01:00:00 2021 PST +next_run_date: Mon Jan 31 01:00:00 2022 PST +next_run_date: Tue Jan 31 01:00:00 2023 PST +next_run_date: Wed Jan 31 01:00:00 2024 PST +next_run_date: Thu Jan 31 01:00:00 2041 PST +next_run_date: Fri Jan 31 01:00:00 2042 PST +next_run_date: Sat Jan 31 01:00:00 2043 PST +next_run_date: Sun Jan 31 01:00:00 2044 PST +next_run_date: Mon Jan 31 01:00:00 2061 PST +next_run_date: Tue Jan 31 01:00:00 2062 PST +next_run_date: Wed Jan 31 01:00:00 2063 PST +next_run_date: Thu Jan 31 01:00:00 2064 PST +next_run_date: Fri Jan 31 01:00:00 2081 PST +next_run_date: Sat Jan 31 01:00:00 2082 PST +next_run_date: Sun Jan 31 01:00:00 2083 PST + eval16 +-------- + +(1 row) + +-- error scenes -- +call eval16('FREQ=secondly;INTERVAL=50;BYMONTH=6;BYMONTHDAY=6;BYHOUR=10;BYMINUTE=0;BYSECOND=1'); -- not reachable +ERROR: Cannot evaluate calendar clause. +DETAIL: Calender clause too deep. +CONTEXT: PL/pgSQL function dbe_scheduler.evaluate_calendar_string(text,timestamp with time zone,timestamp with time zone) line 3 at assignment +SQL statement "CALL dbe_scheduler.evaluate_calendar_string(calendar_str,start_date,return_date_after,next_run_date)" +PL/pgSQL function eval16(text) line 11 at SQL statement +call eval16('FREQ=secondly;BYMONTH=6;BYNOTHING=6;'); +ERROR: Fail to evaluate calendaring string. +DETAIL: Incorrect/duplicate clause name 'BYNOTHING'. +CONTEXT: PL/pgSQL function dbe_scheduler.evaluate_calendar_string(text,timestamp with time zone,timestamp with time zone) line 3 at assignment +SQL statement "CALL dbe_scheduler.evaluate_calendar_string(calendar_str,start_date,return_date_after,next_run_date)" +PL/pgSQL function eval16(text) line 11 at SQL statement diff --git a/src/test/regress/expected/dbe_scheduler_privilege.out b/src/test/regress/expected/dbe_scheduler_privilege.out new file mode 100644 index 000000000..b268da1d5 --- /dev/null +++ b/src/test/regress/expected/dbe_scheduler_privilege.out @@ -0,0 +1,364 @@ +-- create users +create user scheduler_user1 password 'scheduler_user1.'; +create user scheduler_user2 password 'scheduler_user2.'; +--grant +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'create job'); + grant_user_authorization +-------------------------- + +(1 row) + +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'create external job'); + grant_user_authorization +-------------------------- + +(1 row) + +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'run external job'); + grant_user_authorization +-------------------------- + +(1 row) + +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'execute any program'); + grant_user_authorization +-------------------------- + +(1 row) + +select attribute_name, attribute_value from gs_job_attribute; + attribute_name | attribute_value +---------------------+----------------- + create job | granted + create external job | granted + run external job | granted + execute any program | granted +(4 rows) + +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'create job'); + revoke_user_authorization +--------------------------- + +(1 row) + +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'create external job'); + revoke_user_authorization +--------------------------- + +(1 row) + +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'run external job'); + revoke_user_authorization +--------------------------- + +(1 row) + +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'execute any program'); + revoke_user_authorization +--------------------------- + +(1 row) + +select attribute_name, attribute_value from gs_job_attribute; + attribute_name | attribute_value +----------------+----------------- +(0 rows) + +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'xxx'); +ERROR: Invalid privilege xxx +DETAIL: privilege contains invalid character +CONTEXT: referenced column: grant_user_authorization +-- no privilege +SET ROLE scheduler_user1 PASSWORD "scheduler_user1."; +select DBE_SCHEDULER.create_credential('cre_1', 'scheduler_user1', ''); -- failed +ERROR: Fail to create credential. +DETAIL: Insufficient privilege to create credential. +CONTEXT: referenced column: create_credential +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 0, false, 'test'); -- failed +ERROR: User needs 'create job' privilege to perform this operation. +DETAIL: Not enough privileges. +CONTEXT: referenced column: create_program +select DBE_SCHEDULER.create_schedule('schedule1', NULL, 'sysdate', NULL, 'test'); -- failed +ERROR: User needs 'create job' privilege to perform this operation. +DETAIL: Not enough privileges. +CONTEXT: referenced column: create_schedule +select DBE_SCHEDULER.create_job(job_name=>'job1', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);', enabled=>true, auto_drop=>false); -- failed +ERROR: User needs 'create job' privilege to perform this operation. +DETAIL: Not enough privileges. +CONTEXT: referenced column: create_job +RESET ROLE; +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'create job'); + grant_user_authorization +-------------------------- + +(1 row) + +-- create job privilege +SET ROLE scheduler_user1 PASSWORD "scheduler_user1."; +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 0, false, 'test'); + create_program +---------------- + +(1 row) + +select DBE_SCHEDULER.create_schedule('schedule1', NULL, 'sysdate', NULL, 'test'); + create_schedule +----------------- + +(1 row) + +select DBE_SCHEDULER.create_job(job_name=>'job1', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);', enabled=>true, auto_drop=>false); + create_job +------------ + +(1 row) + +select DBE_SCHEDULER.create_job(job_name=>'job2', program_name=>'program1'); + create_job +------------ + +(1 row) + +RESET ROLE; +select count(*) from adm_scheduler_jobs; + count +------- + 2 +(1 row) + +SET ROLE scheduler_user1 PASSWORD "scheduler_user1."; +-- create external job privilege +select DBE_SCHEDULER.create_program('program1', 'EXTERNAL_SCRIPT', '/usr/bin/pwd'); -- failed +ERROR: User needs 'create external job' privilege to perform this operation. +DETAIL: Not enough privileges. +CONTEXT: referenced column: create_program +select DBE_SCHEDULER.create_job(job_name=>'job1', job_type=>'EXTERNAL_SCRIPT', job_action=>'/usr/bin/pwd', enabled=>true, auto_drop=>false); -- failed +ERROR: User needs 'create external job' privilege to perform this operation. +DETAIL: Not enough privileges. +CONTEXT: referenced column: create_job +RESET ROLE; +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'create external job'); + grant_user_authorization +-------------------------- + +(1 row) + +SET ROLE scheduler_user1 PASSWORD "scheduler_user1."; +select DBE_SCHEDULER.create_program('program2', 'EXTERNAL_SCRIPT', '/usr/bin/pwd'); + create_program +---------------- + +(1 row) + +select DBE_SCHEDULER.create_job(job_name=>'job3', job_type=>'EXTERNAL_SCRIPT', job_action=>'/usr/bin/pwd', enabled=>true, auto_drop=>false); + create_job +------------ + +(1 row) + +-- cross user +RESET ROLE; +select DBE_SCHEDULER.grant_user_authorization('scheduler_user2', 'create job'); + grant_user_authorization +-------------------------- + +(1 row) + +SET ROLE scheduler_user2 PASSWORD "scheduler_user2."; +select DBE_SCHEDULER.create_job(job_name=>'job4', program_name=>'program1'); -- failed +ERROR: User needs 'execute any program' privilege to perform this operation. +DETAIL: Not enough privileges. +CONTEXT: referenced column: create_job +RESET ROLE; +select DBE_SCHEDULER.grant_user_authorization('scheduler_user2', 'execute any program'); + grant_user_authorization +-------------------------- + +(1 row) + +SET ROLE scheduler_user2 PASSWORD "scheduler_user2."; +select DBE_SCHEDULER.create_job(job_name=>'job4', program_name=>'program1'); + create_job +------------ + +(1 row) + +select DBE_SCHEDULER.create_job(job_name=>'job5', program_name=>'program2'); -- failed +ERROR: User needs 'create external job' privilege to perform this operation. +DETAIL: Not enough privileges. +CONTEXT: referenced column: create_job +RESET ROLE; +select DBE_SCHEDULER.grant_user_authorization('scheduler_user2', 'create external job'); + grant_user_authorization +-------------------------- + +(1 row) + +SET ROLE scheduler_user2 PASSWORD "scheduler_user2."; +select DBE_SCHEDULER.create_job(job_name=>'job5', program_name=>'program2'); + create_job +------------ + +(1 row) + +RESET ROLE; +select count(*) from adm_scheduler_jobs; + count +------- + 5 +(1 row) + +SET ROLE scheduler_user2 PASSWORD "scheduler_user2."; +select DBE_SCHEDULER.run_job(job_name=>'job4', use_current_session=>false); + run_job +--------- + +(1 row) + +select DBE_SCHEDULER.run_job(job_name=>'job5', use_current_session=>true); -- failed +ERROR: No database wise credential found. +DETAIL: Need to create default credential for database with name 'db_credential' +CONTEXT: PL/pgSQL function dbe_scheduler.run_job(text,boolean) line 9 at assignment +referenced column: run_job +RESET ROLE; +select DBE_SCHEDULER.enable('job4'); + enable +-------- + +(1 row) + +select enable from pg_job where job_name = 'job4'; + enable +-------- + t +(1 row) + +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user2', 'execute any program'); + revoke_user_authorization +--------------------------- + +(1 row) + +select enable from pg_job where job_name = 'job4'; + enable +-------- + f +(1 row) + +RESET ROLE; +select DBE_SCHEDULER.drop_job('job1', true); + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.drop_job('job2', true); + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.drop_job('job3', true); + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.drop_job('job4', true); + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.drop_job('job5', true); + drop_job +---------- + +(1 row) + +select DBE_SCHEDULER.drop_program('program1', true); + drop_program +-------------- + +(1 row) + +select DBE_SCHEDULER.drop_program('program2', true); + drop_program +-------------- + +(1 row) + +select DBE_SCHEDULER.drop_schedule('schedule1', true); + drop_schedule +--------------- + +(1 row) + +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'create job'); + revoke_user_authorization +--------------------------- + +(1 row) + +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'create external job'); + revoke_user_authorization +--------------------------- + +(1 row) + +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'run external job'); + revoke_user_authorization +--------------------------- + +(1 row) + +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'execute any program'); + revoke_user_authorization +--------------------------- + +(1 row) + +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user2', 'create job'); + revoke_user_authorization +--------------------------- + +(1 row) + +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user2', 'create external job'); + revoke_user_authorization +--------------------------- + +(1 row) + +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user2', 'run external job'); + revoke_user_authorization +--------------------------- + +(1 row) + +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'execute any program'); + revoke_user_authorization +--------------------------- + +(1 row) + +-- check object cleanups -- +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'create job'); + grant_user_authorization +-------------------------- + +(1 row) + +select DBE_SCHEDULER.grant_user_authorization('scheduler_user2', 'execute any program'); + grant_user_authorization +-------------------------- + +(1 row) + +drop user scheduler_user1; +drop user scheduler_user2; +select attribute_name, attribute_value from gs_job_attribute; -- empty + attribute_name | attribute_value +----------------+----------------- +(0 rows) + diff --git a/src/test/regress/expected/dbe_scheduler_rename_user.out b/src/test/regress/expected/dbe_scheduler_rename_user.out new file mode 100644 index 000000000..aa96641bf --- /dev/null +++ b/src/test/regress/expected/dbe_scheduler_rename_user.out @@ -0,0 +1,73 @@ +-- create users +create user scheduler_user password 'scheduler_user@123.'; +-- grant +select DBE_SCHEDULER.grant_user_authorization('scheduler_user', 'create job'); + grant_user_authorization +-------------------------- + +(1 row) + +-- switch role/user and execute job +set role scheduler_user password "scheduler_user@123."; +create table my_tbl_01(tms date, phone text); +select DBE_SCHEDULER.create_job(job_name=>'job_01', job_type=>'PLSQL_BLOCK', job_action=>'insert into my_tbl_01 values (sysdate::date, 13001230123);', start_date=>sysdate, repeat_interval=>'FREQ=MINUTELY;INTERVAL=1', end_date=>sysdate+1,enabled=>true, auto_drop=>false); + create_job +------------ + +(1 row) + +select DBE_SCHEDULER.run_job('job_01', false); + run_job +--------- + +(1 row) + +select count(*) from pg_job where log_user = 'scheduler_user' and nspname = 'scheduler_user'; + count +------- + 1 +(1 row) + +select count(*) from pg_job where log_user = priv_user; + count +------- + 1 +(1 row) + +select count(*) from pg_job where job_name = 'job_01'; + count +------- + 1 +(1 row) + +-- alter and rename pg_job user +reset role; +alter user scheduler_user rename to scheduler_new_user; +-- switch new role/user to execute job +set role scheduler_new_user password "scheduler_user@123."; +select count(*) from pg_job where log_user = 'scheduler_new_user' and nspname = 'scheduler_new_user'; + count +------- + 1 +(1 row) + +select count(*) from pg_job where log_user != priv_user; + count +------- + 1 +(1 row) + +-- return and stop job +reset role; +select DBE_SCHEDULER.drop_job('job_01', true); + drop_job +---------- + +(1 row) + +select count(*) from pg_job where job_name = 'job_01'; + count +------- + 0 +(1 row) + diff --git a/src/test/regress/expected/deferrable.out b/src/test/regress/expected/deferrable.out new file mode 100644 index 000000000..8923ed506 --- /dev/null +++ b/src/test/regress/expected/deferrable.out @@ -0,0 +1,214 @@ +DROP SCHEMA test_deferrable CASCADE; +ERROR: schema "test_deferrable" does not exist +CREATE SCHEMA test_deferrable; +SET CURRENT_SCHEMA TO test_deferrable; +-- partition table for deferrable +drop table t_kenyon; +ERROR: table "t_kenyon" does not exist +create table t_kenyon(id int primary key deferrable) +partition by range(id) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (6000) +)ENABLE ROW MOVEMENT; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_kenyon_pkey" for table "t_kenyon" +insert into t_kenyon values(1); +begin; +set constraints all deferred; +insert into t_kenyon values(1); +end; +ERROR: duplicate key value violates unique constraint "t_kenyon_pkey" +DETAIL: Key (id)=(1) already exists. +begin; +set constraints all IMMEDIATE; +insert into t_kenyon values(1); +ERROR: duplicate key value violates unique constraint "t_kenyon_pkey" +DETAIL: Key (id)=(1) already exists. +end; +drop table t_kenyon; +create table t_kenyon(id int primary key not deferrable ) +partition by range(id) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (6000) +)ENABLE ROW MOVEMENT; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_kenyon_pkey" for table "t_kenyon" +insert into t_kenyon values(1); +begin; +set constraints all deferred; +insert into t_kenyon values(1); +ERROR: duplicate key value violates unique constraint "t_kenyon_pkey" +DETAIL: Key (id)=(1) already exists. +end; +begin; +set constraints all IMMEDIATE; +insert into t_kenyon values(1); +ERROR: duplicate key value violates unique constraint "t_kenyon_pkey" +DETAIL: Key (id)=(1) already exists. +end; +drop table t_kenyon; +create table t_kenyon(id int primary key initially immediate ) +partition by range(id) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (6000) +)ENABLE ROW MOVEMENT; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_kenyon_pkey" for table "t_kenyon" +insert into t_kenyon values(1); +begin; +set constraints all deferred; +insert into t_kenyon values(1); +ERROR: duplicate key value violates unique constraint "t_kenyon_pkey" +DETAIL: Key (id)=(1) already exists. +end; +begin; +set constraints all IMMEDIATE; +insert into t_kenyon values(1); +ERROR: duplicate key value violates unique constraint "t_kenyon_pkey" +DETAIL: Key (id)=(1) already exists. +end; +drop table t_kenyon; +create table t_kenyon(id int primary key initially deferred) +partition by range(id) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (6000) +)ENABLE ROW MOVEMENT; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t_kenyon_pkey" for table "t_kenyon" +insert into t_kenyon values(1); +begin; +set constraints all deferred; +insert into t_kenyon values(1); +end; +ERROR: duplicate key value violates unique constraint "t_kenyon_pkey" +DETAIL: Key (id)=(1) already exists. +begin; +set constraints all IMMEDIATE; +insert into t_kenyon values(1); +ERROR: duplicate key value violates unique constraint "t_kenyon_pkey" +DETAIL: Key (id)=(1) already exists. +end; +-- foreign key for deferrable +drop table warehouse_t23; +ERROR: table "warehouse_t23" does not exist +drop table city_t23; +ERROR: table "city_t23" does not exist +CREATE TABLE city_t23 +( + W_CITY VARCHAR(60) PRIMARY KEY, + W_ADDRESS TEXT +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "city_t23_pkey" for table "city_t23" +CREATE TABLE warehouse_t23 +( + W_INT int, + W_CITY VARCHAR(60) , + FOREIGN KEY(W_CITY) REFERENCES city_t23(W_CITY) deferrable +); +begin; +set constraints all deferred; +insert into warehouse_t23 values(1,'sss'); +end; +ERROR: insert or update on table "warehouse_t23" violates foreign key constraint "warehouse_t23_w_city_fkey" +DETAIL: Key (w_city)=(sss) is not present in table "city_t23". +begin; +set constraints all IMMEDIATE; +insert into warehouse_t23 values(1,'sss'); +ERROR: insert or update on table "warehouse_t23" violates foreign key constraint "warehouse_t23_w_city_fkey" +DETAIL: Key (w_city)=(sss) is not present in table "city_t23". +end; +drop table warehouse_t23; +drop table city_t23; +CREATE TABLE city_t23 +( + W_CITY VARCHAR(60) PRIMARY KEY, + W_ADDRESS TEXT +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "city_t23_pkey" for table "city_t23" +CREATE TABLE warehouse_t23 +( + W_INT int, + W_CITY VARCHAR(60) , + FOREIGN KEY(W_CITY) REFERENCES city_t23(W_CITY) not deferrable +); +begin; +set constraints all deferred; +insert into warehouse_t23 values(1,'sss'); +ERROR: insert or update on table "warehouse_t23" violates foreign key constraint "warehouse_t23_w_city_fkey" +DETAIL: Key (w_city)=(sss) is not present in table "city_t23". +end; +begin; +set constraints all IMMEDIATE; +insert into warehouse_t23 values(1,'sss'); +ERROR: insert or update on table "warehouse_t23" violates foreign key constraint "warehouse_t23_w_city_fkey" +DETAIL: Key (w_city)=(sss) is not present in table "city_t23". +end; +drop table warehouse_t23; +drop table city_t23; +CREATE TABLE city_t23 +( + W_CITY VARCHAR(60) PRIMARY KEY, + W_ADDRESS TEXT +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "city_t23_pkey" for table "city_t23" +CREATE TABLE warehouse_t23 +( + W_INT int, + W_CITY VARCHAR(60) , + FOREIGN KEY(W_CITY) REFERENCES city_t23(W_CITY) initially immediate +); +begin; +set constraints all deferred; +insert into warehouse_t23 values(1,'sss'); +ERROR: insert or update on table "warehouse_t23" violates foreign key constraint "warehouse_t23_w_city_fkey" +DETAIL: Key (w_city)=(sss) is not present in table "city_t23". +end; +begin; +set constraints all IMMEDIATE; +insert into warehouse_t23 values(1,'sss'); +ERROR: insert or update on table "warehouse_t23" violates foreign key constraint "warehouse_t23_w_city_fkey" +DETAIL: Key (w_city)=(sss) is not present in table "city_t23". +end; +drop table warehouse_t23; +drop table city_t23; +CREATE TABLE city_t23 +( + W_CITY VARCHAR(60) PRIMARY KEY, + W_ADDRESS TEXT +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "city_t23_pkey" for table "city_t23" +CREATE TABLE warehouse_t23 +( + W_INT int, + W_CITY VARCHAR(60) , + FOREIGN KEY(W_CITY) REFERENCES city_t23(W_CITY) initially deferred +); +begin; +set constraints all deferred; +insert into warehouse_t23 values(1,'sss'); +end; +ERROR: insert or update on table "warehouse_t23" violates foreign key constraint "warehouse_t23_w_city_fkey" +DETAIL: Key (w_city)=(sss) is not present in table "city_t23". +begin; +set constraints all IMMEDIATE; +insert into warehouse_t23 values(1,'sss'); +ERROR: insert or update on table "warehouse_t23" violates foreign key constraint "warehouse_t23_w_city_fkey" +DETAIL: Key (w_city)=(sss) is not present in table "city_t23". +end; +DROP SCHEMA test_deferrable CASCADE; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to table t_kenyon +drop cascades to table city_t23 +drop cascades to table warehouse_t23 diff --git a/src/test/regress/expected/dfs_orc_vec_nestloop.out b/src/test/regress/expected/dfs_orc_vec_nestloop.out index bb9c46296..bd6662013 100644 --- a/src/test/regress/expected/dfs_orc_vec_nestloop.out +++ b/src/test/regress/expected/dfs_orc_vec_nestloop.out @@ -2698,9 +2698,6 @@ select count(*) from vector_nestloop_table_01 A inner join vector_nestloop_table 1 (1 row) ----- ---- Special Case: nestloop + hashjoin + operator with parameters pushed down (dts2014111302175/2014120306303) ----- CREATE INDEX vecvtor_nestloop_base_index_01 ON VECTOR_NESTLOOP_TABLE_05 USING psort (id) LOCAL(PARTITION b1_p1_id_idx, PARTITION b1_p2_id_idx, PARTITION b1_p3_id_idx) ; CREATE INDEX vecvtor_nestloop_base_index_02 ON VECTOR_NESTLOOP_TABLE_06 USING psort (id, c_d_id, c_id) LOCAL(PARTITION b5_p1_id_c_d_id_c_id_idx, PARTITION b5_p2_id_c_d_id_c_id_idx, PARTITION b5_p3_id_c_d_id_c_id_idx, PARTITION b5_p4_id_c_d_id_c_id_idx, PARTITION b5_p5_id_c_d_id_c_id_idx, PARTITION b5_p6_id_c_d_id_c_id_idx) ; CREATE INDEX vecvtor_nestloop_base_index_03 ON VECTOR_NESTLOOP_TABLE_07 USING psort (id, c_d_id, c_w_id) LOCAL(PARTITION b7_p1_id_c_d_id_c_w_id_idx, PARTITION b7_p2_id_c_d_id_c_w_id_idx, PARTITION b7_p3_id_c_d_id_c_w_id_idx, PARTITION b7_p4_id_c_d_id_c_w_id_idx, PARTITION b7_p5_id_c_d_id_c_w_id_idx, PARTITION b7_p6_id_c_d_id_c_w_id_idx, PARTITION b7_p7_id_c_d_id_c_w_id_idx, PARTITION b7_p8_id_c_d_id_c_w_id_idx, PARTITION b7_p9_id_c_d_id_c_w_id_idx, PARTITION b7_p10_id_c_d_id_c_w_id_idx, PARTITION b7_p11_id_c_d_id_c_w_id_idx) ; diff --git a/src/test/regress/expected/drop_if_exists.out b/src/test/regress/expected/drop_if_exists.out index 6b3fe60f9..4e572f0dc 100644 --- a/src/test/regress/expected/drop_if_exists.out +++ b/src/test/regress/expected/drop_if_exists.out @@ -58,7 +58,9 @@ ERROR: type "test_domain_exists" does not exist DROP DOMAIN IF EXISTS test_domain_exists; NOTICE: type "test_domain_exists" does not exist, skipping CREATE domain test_domain_exists as int not null check (value > 0); +ERROR: domain is not yet supported. DROP DOMAIN IF EXISTS test_domain_exists; +NOTICE: type "test_domain_exists" does not exist, skipping DROP DOMAIN test_domain_exists; ERROR: type "test_domain_exists" does not exist --- diff --git a/src/test/regress/expected/forall_save_exceptions.out b/src/test/regress/expected/forall_save_exceptions.out new file mode 100644 index 000000000..78870560c --- /dev/null +++ b/src/test/regress/expected/forall_save_exceptions.out @@ -0,0 +1,1240 @@ +create schema forall_save_exceptions; +set search_path = forall_save_exceptions; +CREATE TABLE if not exists test_forall(a char(10)); +---- +-- test pragma exception_init +---- +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -1); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -15); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -24381); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + RAISE ex_dml_errors; +EXCEPTION + WHEN ex_dml_errors THEN + RAISE NOTICE 'test:%',SQLcode; +END; +/ +NOTICE: test:-24381 + +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -1); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -2); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -3); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -4); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -5); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -6); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -7); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -8); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -9); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -10); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -9); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -8); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -7); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -6); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + RAISE ex_dml_errors; +EXCEPTION + WHEN ex_dml_errors THEN + RAISE NOTICE 'test:%',SQLcode; +END; +/ +NOTICE: test:-6 + +-- only numeric initialization +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, raise_exception); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ +ERROR: syntax error at or near "raise_exception" +LINE 3: PRAGMA EXCEPTION_INIT(ex_dml_errors, raise_exception); + ^ +QUERY: DECLARE l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, raise_exception); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 'aaa'); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ +ERROR: syntax error at or near "'aaa'" +LINE 3: PRAGMA EXCEPTION_INIT(ex_dml_errors, 'aaa'); + ^ +QUERY: DECLARE l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 'aaa'); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, '1.1'); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ +ERROR: syntax error at or near "'1.1'" +LINE 3: PRAGMA EXCEPTION_INIT(ex_dml_errors, '1.1'); + ^ +QUERY: DECLARE l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, '1.1'); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 1.1); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ +ERROR: syntax error at or near "1.1" +LINE 3: PRAGMA EXCEPTION_INIT(ex_dml_errors, 1.1); + ^ +QUERY: DECLARE l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 1.1); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, '-1'); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ +ERROR: syntax error at or near "'-1'" +LINE 3: PRAGMA EXCEPTION_INIT(ex_dml_errors, '-1'); + ^ +QUERY: DECLARE l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, '-1'); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END +-- expresion not supported, sqlcode must <= 0, and must be int32 (the range is -2147483647~-1) +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 1-2); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ +ERROR: syntax error at or near "-" +LINE 3: PRAGMA EXCEPTION_INIT(ex_dml_errors, 1-2); + ^ +QUERY: DECLARE l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 1-2); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 0); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ +ERROR: SQLCODE in EXCEPTION_INIT should be less than 0 +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 2 +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 1); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ +ERROR: SQLCODE in EXCEPTION_INIT should be less than 0 +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 2 +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 15); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ +ERROR: SQLCODE in EXCEPTION_INIT should be less than 0 +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 2 +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -2147483648); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ +ERROR: syntax error at or near "2147483648" +LINE 3: PRAGMA EXCEPTION_INIT(ex_dml_errors, -2147483648); + ^ +QUERY: DECLARE l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -2147483648); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -2147483647); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ +--exception init with not declared +DECLARE + l_error_count integer; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -1); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ +ERROR: Undefined exception name 'ex_dml_errors' in EXCEPTION_INIT +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 1 +--exception init with system error +DECLARE + x integer := 0; + y integer; + PRAGMA EXCEPTION_INIT(division_by_zero, -1); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + y := x / 0; +EXCEPTION + WHEN division_by_zero THEN + RAISE NOTICE 'test:%',SQLstate; +END; +/ +ERROR: Undefined exception name 'division_by_zero' in EXCEPTION_INIT +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 2 +DECLARE + x integer := 0; + y integer; + PRAGMA EXCEPTION_INIT(division_by_zero, -1); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + y := x / 0; +EXCEPTION + WHEN division_by_zero THEN + RAISE NOTICE 'test:%',SQLstate; +END; +/ +ERROR: Undefined exception name 'division_by_zero' in EXCEPTION_INIT +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 2 +---- +-- support exception when custum exception +---- +DECLARE + ex_dml_errors EXCEPTION; +BEGIN + INSERT INTO test VALUES (1); +EXCEPTION + WHEN ex_dml_errors THEN + RAISE NOTICE 'DML error occurs'; + WHEN others THEN + RAISE NOTICE 'Other error occurs'; +END; +/ +NOTICE: Other error occurs +DECLARE + DEADLOCK_DETECTED EXCEPTION; + PRAGMA EXCEPTION_INIT(DEADLOCK_DETECTED, -60); +BEGIN + IF 1 > 0 THEN + RAISE DEADLOCK_DETECTED; + END IF; +EXCEPTION + WHEN DEADLOCK_DETECTED THEN + RAISE NOTICE 'test:%',SQLcode; +END; +/ +NOTICE: test:-60 +---- +-- when exception init, SQLstate is the same as SQLcode +---- +DECLARE + DEADLOCK_DETECTED EXCEPTION; + PRAGMA EXCEPTION_INIT(DEADLOCK_DETECTED, -60); +BEGIN + IF 1 > 0 THEN + RAISE DEADLOCK_DETECTED; + END IF; +EXCEPTION + WHEN DEADLOCK_DETECTED THEN + RAISE NOTICE 'test:%',SQLstate; +END; +/ +NOTICE: test:-60 +--the sqlerrm is " xxx: non-GaussDB Exception", xxx is |sqlcode| +DECLARE + DEADLOCK_DETECTED EXCEPTION; + PRAGMA EXCEPTION_INIT(DEADLOCK_DETECTED, -60); +BEGIN + IF 1 > 0 THEN + RAISE DEADLOCK_DETECTED; + END IF; +EXCEPTION + WHEN DEADLOCK_DETECTED THEN + RAISE NOTICE 'test:%',SQLerrm; +END; +/ +NOTICE: test: 60: non-GaussDB Exception +-- when not init, SQLcode is generated default with type int, and SQLstate is a text +DECLARE + DEADLOCK_DETECTED EXCEPTION; +BEGIN + IF 1 > 0 THEN + RAISE DEADLOCK_DETECTED; + END IF; +EXCEPTION + WHEN DEADLOCK_DETECTED THEN + RAISE NOTICE 'test:%',SQLcode; +END; +/ +--?NOTICE: test:P.* +DECLARE + DEADLOCK_DETECTED EXCEPTION; +BEGIN + IF 1 > 0 THEN + RAISE DEADLOCK_DETECTED; + END IF; +EXCEPTION + WHEN DEADLOCK_DETECTED THEN + RAISE NOTICE 'test:%',SQLstate; +END; +/ +--?NOTICE: test:P.* +---- +-- forall support save exceptions grammar +---- +-- function +CREATE OR REPLACE FUNCTION test_func(iter IN integer) RETURN integer as +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -24381); +BEGIN + FORALL i IN 1 .. 2 SAVE EXCEPTIONS + INSERT INTO test_forall + VALUES (1); +EXCEPTION + WHEN ex_dml_errors THEN + l_error_count := SQL%BULK_EXCEPTIONS.count; + DBe_OUTPUT.print_line('Number of failures: ' || l_error_count); + FOR i IN 1 .. l_error_count LOOP + DBE_OUTPUT.print_line('Error: ' || i || + ' Array Index: ' || SQL%BULK_EXCEPTIONS(i).error_index || + ' Message: ' || SQL%BULK_EXCEPTIONS(i).error_message); + END LOOP; + RETURN 0; +END; +/ +-- procedure +CREATE OR REPLACE PROCEDURE test_proc(iter IN integer) as +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 24381); +BEGIN + FORALL i IN 1 .. 2 SAVE EXCEPTIONS + INSERT INTO test_forall + VALUES (1); +EXCEPTION + WHEN ex_dml_errors THEN + l_error_count := SQL%BULK_EXCEPTIONS.count; + DBe_OUTPUT.print_line('Number of failures: ' || l_error_count); + FOR i IN 1 .. l_error_count LOOP + DBE_OUTPUT.print_line('Error: ' || i || + ' Array Index: ' || SQL%BULK_EXCEPTIONS(i).error_index || + ' Message: ' || SQL%BULK_EXCEPTIONS(i).error_message); + END LOOP; +END; +/ +ERROR: SQLCODE in EXCEPTION_INIT should be less than 0 +CONTEXT: compilation of PL/pgSQL function "test_proc" near line 4 +-- anonimous block +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -24381); +BEGIN + FORALL i IN 1 .. 10 + SAVE EXCEPTIONS + INSERT INTO test_forall + VALUES (1); +EXCEPTION + WHEN ex_dml_errors THEN + l_error_count := SQL%BULK_EXCEPTIONS.count; + DBe_OUTPUT.print_line('Number of failures: ' || l_error_count); + FOR i IN 1 .. l_error_count LOOP + DBe_OUTPUT.print_line('Error: ' || i || + ' Array Index: ' || SQL%BULK_EXCEPTIONS(i).error_index || + ' Message: ' || SQL%BULK_EXCEPTIONS(i).error_message); + END LOOP; +END; +/ +-- test functionality +CREATE TABLE exception_test ( +id NUMBER(10) NOT NULL UNIQUE +); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "exception_test_id_key" for table "exception_test" +DECLARE + TYPE t_tab IS TABLE OF exception_test%ROWTYPE; + l_tab t_tab := t_tab(); + l_error_count NUMBER; +BEGIN + -- Fill the collection. + FOR i IN 1 .. 100 LOOP + l_tab.extend; + l_tab(l_tab.last).id := i; + END LOOP; + + -- Cause a failure. + l_tab(50).id := NULL; + l_tab(51).id := NULL; + l_tab(60).id := 59; + + EXECUTE IMMEDIATE 'TRUNCATE TABLE exception_test'; + + BEGIN + FORALL i IN l_tab.first .. l_tab.last SAVE EXCEPTIONS + INSERT INTO exception_test + VALUES l_tab(i); + EXCEPTION + WHEN others THEN + l_error_count := SQL%BULK_EXCEPTIONS.count; + DBE_OUTPUT.print_line('Number of failures: ' || l_error_count); + FOR i IN 1 .. l_error_count LOOP + DBE_OUTPUT.print_line('Error: ' || i || + ' Array Index: ' || SQL%BULK_EXCEPTIONS(i).error_index || + ' Error Code: ' || -SQL%BULK_EXCEPTIONS(i).ERROR_CODE || + ' Error Message: ' || SQL%BULK_EXCEPTIONS(i).ERROR_MESSAGE); + END LOOP; + END; + + -- Cause a failure. To test leftovers in bulk exceptions + l_tab(50).id := 50; + l_tab(51).id := 51; + l_tab(60).id := 60; + l_tab(70).id := 69; + + EXECUTE IMMEDIATE 'TRUNCATE TABLE exception_test'; + + BEGIN + FORALL i IN l_tab.first .. l_tab.last SAVE EXCEPTIONS + INSERT INTO exception_test + VALUES l_tab(i); + EXCEPTION + WHEN others THEN + l_error_count := SQL%BULK_EXCEPTIONS.count; + DBE_OUTPUT.print_line('Number of failures: ' || l_error_count); + FOR i IN 1 .. l_error_count LOOP + DBE_OUTPUT.print_line('Error: ' || i || + ' Array Index: ' || SQL%BULK_EXCEPTIONS(i).error_index || + ' Error Code: ' || -SQL%BULK_EXCEPTIONS(i).ERROR_CODE || + ' Error Message: ' || SQL%BULK_EXCEPTIONS(i).ERROR_MESSAGE); + END LOOP; + END; +END; +/ +Number of failures: 3 +Error: 1 Array Index: 50 Error Code: -33575106 Error Message: null value in column "id" violates not-null constraint +Error: 2 Array Index: 51 Error Code: -33575106 Error Message: null value in column "id" violates not-null constraint +Error: 3 Array Index: 60 Error Code: -83906754 Error Message: duplicate key value violates unique constraint "exception_test_id_key" +Number of failures: 1 +Error: 1 Array Index: 70 Error Code: -83906754 Error Message: duplicate key value violates unique constraint "exception_test_id_key" +-- try in function +CREATE OR REPLACE FUNCTION func_test_forall() RETURNS int AS +$BODY$ +DECLARE + TYPE t_tab IS TABLE OF exception_test%ROWTYPE; + + l_tab t_tab := t_tab(); + l_error_count NUMBER; +BEGIN + -- Fill the collection. + FOR i IN 1 .. 20 LOOP + l_tab.extend; + l_tab(l_tab.last).id := i; + END LOOP; + + -- Cause a failure. + l_tab(10).id := 9; + + EXECUTE IMMEDIATE 'TRUNCATE TABLE exception_test'; + + BEGIN + FORALL i IN l_tab.first .. l_tab.last SAVE EXCEPTIONS + INSERT INTO exception_test + VALUES l_tab(i); + EXCEPTION + WHEN others THEN + l_error_count := SQL%BULK_EXCEPTIONS.count; + DBE_OUTPUT.print_line('Number of failures: ' || l_error_count); + FOR i IN 1 .. l_error_count LOOP + DBE_OUTPUT.print_line('Error: ' || i || + ' Array Index: ' || SQL%BULK_EXCEPTIONS(i).error_index || + ' Error Code: ' || -SQL%BULK_EXCEPTIONS(i).ERROR_CODE || + ' Error Message: ' || SQL%BULK_EXCEPTIONS(i).ERROR_MESSAGE); + END LOOP; + END; + + return 0; +END; +$BODY$ +LANGUAGE plpgsql; +select * from func_test_forall(); +Number of failures: 1 +Error: 1 Array Index: 10 Error Code: -83906754 Error Message: duplicate key value violates unique constraint "exception_test_id_key" + func_test_forall +------------------ + 0 +(1 row) + +-- for all should only be followed by DMLs +DECLARE + l_error_count integer; + l_sql varchar(1024); +BEGIN + l_sql :='INSERT INTO test_forall VALUES (1);'; + FORALL i IN 1 .. 2 SAVE EXCEPTIONS + execute immediate l_sql using l_tab(i); +END; +/ +ERROR: FORALL must follow DML statement. +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 5 +DECLARE + l_error_count integer; + l_sql varchar(1024); +BEGIN + l_sql :='INSERT INTO test_forall VALUES (1);'; + FORALL i IN 1 .. 2 + execute immediate l_sql using l_tab(i); +END; +/ +ERROR: missing "DML" at end of SQL expression +LINE 6: execute immediate l_sql using l_tab(i); + ^ +QUERY: DECLARE l_error_count integer; + l_sql varchar(1024); +BEGIN + l_sql :='INSERT INTO test_forall VALUES (1);'; + FORALL i IN 1 .. 2 + execute immediate l_sql using l_tab(i); +END +truncate test_forall; +DECLARE + l_sql varchar(1024); + target int; +BEGIN + FORALL i IN 1 .. 2 --merge OK + merge into test_forall using (SELECT 1 a) src on test_forall.a = src.a WHEN NOT MATCHED THEN INSERT VALUES (src.a); + FORALL i IN 1 .. 2 --select OK + SELECT a into target from test_forall; + FORALL i IN 1 .. 2 --insert OK + INSERT INTO test_forall VALUES (1); + FORALL i IN 1 .. 2 --update OK + UPDATE test_forall SET a = 2; + FORALL i IN 1 .. 2 --delete OK + DELETE FROM test_forall; + FORALL i IN 1 .. 2 SAVE EXCEPTIONS --merge OK + merge into test_forall using (SELECT 1 a) src on test_forall.a = src.a WHEN NOT MATCHED THEN INSERT VALUES (src.a); + FORALL i IN 1 .. 2 SAVE EXCEPTIONS --select OK + SELECT a into target from test_forall; + FORALL i IN 1 .. 2 SAVE EXCEPTIONS --insert OK + INSERT INTO test_forall VALUES (1); + FORALL i IN 1 .. 2 SAVE EXCEPTIONS --update OK + UPDATE test_forall SET a = 2; + FORALL i IN 1 .. 2 SAVE EXCEPTIONS --delete OK + DELETE FROM test_forall; +END; +/ +create table test_conflict(last_modified timestamp, comment text); +insert into test_conflict values(now(), 'donda'); +-- default (equivalent to use_column under a compatibility) +CREATE or replace FUNCTION conf_func(id int, comment text) RETURNS text AS $$ + DECLARE + curtime timestamp := '2021-09-15 20:59:14'; + var text; + BEGIN + select comment into var from test_conflict; + return var; + END; +$$ LANGUAGE plpgsql; +select * from conf_func(1,'off-season'); + conf_func +----------- + donda +(1 row) + +-- error +CREATE or replace FUNCTION conf_func(id int, comment text) RETURNS text AS $$ + #variable_conflict error + DECLARE + curtime timestamp := '2021-09-15 20:59:14'; + var text; + BEGIN + select comment into var from test_conflict; + return var; + END; +$$ LANGUAGE plpgsql; +select * from conf_func(1,'off-season'); +ERROR: column reference "comment" is ambiguous +LINE 1: select comment from test_conflict + ^ +DETAIL: It could refer to either a PL/pgSQL variable or a table column. +QUERY: select comment from test_conflict +CONTEXT: referenced column: comment +PL/pgSQL function conf_func(integer,text) line 7 at SQL statement +-- use_column +CREATE or replace FUNCTION conf_func(id int, comment text) RETURNS text AS $$ + #variable_conflict use_column + DECLARE + curtime timestamp := '2021-09-15 20:59:14'; + var text; + BEGIN + select comment into var from test_conflict; + return var; + END; +$$ LANGUAGE plpgsql; +select * from conf_func(1,'off-season'); + conf_func +----------- + donda +(1 row) + +-- use_variable +CREATE or replace FUNCTION conf_func(id int, comment text) RETURNS text AS $$ + #variable_conflict use_variable + DECLARE + curtime timestamp := '2021-09-15 20:59:14'; + var text; + BEGIN + select comment into var from test_conflict; + return var; + END; +$$ LANGUAGE plpgsql; +select * from conf_func(1,'off-season'); + conf_func +------------ + off-season +(1 row) + +-- test original case +create table test_orig(c1 int,c2 int); +insert into test_orig values(1,2); +create or replace procedure pro_tblof_pro_004(c1 in number,c2 out number) +as + type ARRAY_INTEGER is table of int; + tblof001 ARRAY_INTEGER := ARRAY_INTEGER(); +begin + tblof001.extend(10); + tblof001(1) :=1; + c2 :=tblof001(1); + select c2 into tblof001(2) from test_orig; + DBE_OUTPUT.PRINT_LINE(tblof001(tblof001.FIRST)); + DBE_OUTPUT.PRINT_LINE('tblof001.last is '||tblof001.last); + DBE_OUTPUT.PRINT_LINE('tblof001.2 is '||tblof001(2)); + DBE_OUTPUT.PRINT_LINE('tblof001.3 is '||c2); +end; +/ +declare +a number; +begin + pro_tblof_pro_004(1,a); + DBE_OUTPUT.PRINT_LINE(a); +end; +/ +1 +tblof001.last is 10 +tblof001.2 is 2 +tblof001.3 is 1 +1 +-- test nested forall save exceptions +drop type if exists type01; +NOTICE: type "type01" does not exist, skipping +drop table if exists t_06; +NOTICE: table "t_06" does not exist, skipping +create table t_06(c1 numeric(6,0),c2 date,c3 char(4)); +drop table if exists t_07; +NOTICE: table "t_07" does not exist, skipping +create table t_07(c1 numeric(8,0), c2 date, c3 char(10) not null); +create type type01 is table of t_07%rowtype; +create or replace procedure p1(l_error_count out number) +as +l_error_count number:=0; +type02 type01; +begin +truncate table t_07; +for i in 1..10 loop +type02(i).c1=5000+i; +type02(i).c2='20210929'; +type02(i).c3=i||'id'; +end loop; +type02(2).c3=null; +forall i in 1..type02.count save exceptions +insert into t_07 +values type02(i); +exception + when forall_dml_error then + l_error_count := sql%bulk_exceptions.count; + dbe_output.print_line('number of failures: ' || l_error_count); + for i in 1 .. l_error_count loop + dbe_output.print_line('error: ' || i || + ' array index: ' || sql%bulk_exceptions[i].error_index || + ' messagecode: ' || sql%bulk_exceptions[i].error_code || + ' errormessage: ' || sql%bulk_exceptions[i].error_message); + end loop; + l_error_count := -(sql%bulk_exceptions.count)+type02.count; + dbe_output.print_line('successfully inserted: ' || l_error_count ||'rows'); + +end; +/ +call p1(1); +number of failures: 1 +error: 1 array index: 2 messagecode: 33575106 errormessage: null value in column "c3" violates not-null constraint +successfully inserted: 9rows + l_error_count +--------------- + +(1 row) + +create or replace procedure p2(l_error_count out number) +as +l_error_count number:=0; +type02 type01; + +begin +for i in 1..10 loop +type02(i).c1=5001+i; +type02(i).c2='20210930'; +type02(i).c3=i||'id'; +end loop; +type02(1).c3='a12345'; +truncate table t_06; +insert into t_06(c1,c2,c3) select * from t_07; +forall i in 1..type02.count +update t_06 set c1=(select p1()), + c2=type02(i).c2 + where c1=type02(i).c1; + +exception + when forall_dml_error then + l_error_count := sql%bulk_exceptions.count; + dbe_output.print_line('number of failures: ' || l_error_count); + for i in 1 .. l_error_count loop + dbe_output.print_line('error: ' || i || + ' array index: ' || sql%bulk_exceptions[i].error_index || + ' messagecode: ' || sql%bulk_exceptions[i].error_code || + ' errormessage: ' || sql%bulk_exceptions[i].error_message); + end loop; + l_error_count := -(sql%bulk_exceptions.count)+type02.count; + dbe_output.print_line('successfully inserted: ' || l_error_count ||'rows'); + +end; +/ +select p2(); +ERROR: transaction statement in store procedure used as sql to get value is not supported +CONTEXT: PL/pgSQL function p1() line 12 at FOR with integer loop variable +referenced column: p1 +referenced column: c1 +SQL statement "update t_06 set c1=(select p1()), + c2=type02[i].c2 + where c1=type02[i].c1" +PL/pgSQL function p2() line 15 at SQL statement +referenced column: p2 +create or replace procedure p2(l_error_count out number) +as +l_error_count number:=0; +type02 type01; + +begin +for i in 1..10 loop +type02(i).c1=5001+i; +type02(i).c2='20210930'; +type02(i).c3=i||'id'; +end loop; +type02(1).c3='a12345'; +truncate table t_06; +insert into t_06(c1,c2,c3) select * from t_07; +forall i in 1..type02.count save exceptions +update t_06 set c1=(select p1()), + c2=type02(i).c2 + where c1=type02(i).c1; + +exception + when forall_dml_error then + l_error_count := sql%bulk_exceptions.count; + dbe_output.print_line('number of failures: ' || l_error_count); + for i in 1 .. l_error_count loop + dbe_output.print_line('error: ' || i || + ' array index: ' || sql%bulk_exceptions[i].error_index || + ' messagecode: ' || sql%bulk_exceptions[i].error_code || + ' errormessage: ' || sql%bulk_exceptions[i].error_message); + end loop; + l_error_count := -(sql%bulk_exceptions.count)+type02.count; + dbe_output.print_line('successfully inserted: ' || l_error_count ||'rows'); + +end; +/ +select p2(); +number of failures: 8 +error: 1 array index: 2 messagecode: 1282 errormessage: transaction statement in store procedure used as sql to get value is not supported +error: 2 array index: 3 messagecode: 1282 errormessage: transaction statement in store procedure used as sql to get value is not supported +error: 3 array index: 4 messagecode: 1282 errormessage: transaction statement in store procedure used as sql to get value is not supported +error: 4 array index: 5 messagecode: 1282 errormessage: transaction statement in store procedure used as sql to get value is not supported +error: 5 array index: 6 messagecode: 1282 errormessage: transaction statement in store procedure used as sql to get value is not supported +error: 6 array index: 7 messagecode: 1282 errormessage: transaction statement in store procedure used as sql to get value is not supported +error: 7 array index: 8 messagecode: 1282 errormessage: transaction statement in store procedure used as sql to get value is not supported +error: 8 array index: 9 messagecode: 1282 errormessage: transaction statement in store procedure used as sql to get value is not supported +successfully inserted: 2rows + p2 +---- + +(1 row) + +--test with implicit_savepoint +drop table if exists tab_01; +NOTICE: table "tab_01" does not exist, skipping +create table tab_01( +c1 varchar2(6), +c2 varchar2(8) not null, +c3 number(9,1) default 10+238/5*3, +c4 varchar2(2), +c5 timestamp(6) check(c4 between 1 and 30), +constraint pk_tab01_c1 primary key(c1) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pk_tab01_c1" for table "tab_01" +drop table if exists tab_02; +NOTICE: table "tab_02" does not exist, skipping +create table tab_02( +c1 varchar2(10), +c2 varchar2(10) not null, +c3 number(15,0), +c4 varchar2(5), +c5 timestamp(6), +constraint pk_tab02_c1 primary key(c1) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pk_tab02_c1" for table "tab_02" +create or replace procedure save_exps_insert02(v1 in numeric) +as +l_error_count number; +type tab_02_type is table of tab_02%rowtype; +tab_02_01 tab_02_type; + +begin + + execute immediate 'truncate table tab_01'; + + for i in 1..50 loop + tab_02_01(i).c1:=i; + tab_02_01(i).c2:='001'||i; + tab_02_01(i).c4:=i; + tab_02_01(i).c5:='2010-12-12'; + end loop; + + tab_02_01(10).c1 := 9; + tab_02_01(8).c1 := 7; + tab_02_01(6).c1 := null; + tab_02_01(11).c2 := null; + tab_02_01(1).c3:=2; + tab_02_01(2).c3:=1234567890; + + forall i in 1..tab_02_01.count save exceptions + insert into tab_01(c1,c2,c3,c4,c5) + values (decode(tab_02_01(i).c1,'49','490',tab_02_01(i).c1), + 'p'||tab_02_01(i).c2, + tab_02_01(i).c3, + case when tab_02_01(i).c1=50 then substr(tab_02_01(i).c4,1,1) + when tab_02_01(i).c1=49 then substr(tab_02_01(i).c4,2,1) + else tab_02_01(i).c4 end , + tab_02_01(i).c5 ); + + exception + when forall_dml_error then + l_error_count := sql%bulk_exceptions.count; + dbe_output.print_line('number of failures: ' || l_error_count); + for i in 1 .. l_error_count loop + dbe_output.print_line('error: ' || i || + ' array index: ' || sql%bulk_exceptions[i].error_index || + ' messagecode: ' || sql%bulk_exceptions[i].error_code || + ' errormessage: ' || sql%bulk_exceptions[i].error_message); + end loop; + for i in 1..sql%bulk_exceptions.count loop + if -sql%bulk_exceptions[i].error_code=-33575106 then + insert into tab_01 values(100+tab_02_01(i).c1, + tab_02_01(i).c2, + v1, + tab_02_01(i).c4, + tab_02_01(i).c5); + end if; + end loop; + when others then + rollback ; + raise; +end; +/ +set behavior_compat_options='plstmt_implicit_savepoint'; +call save_exps_insert02(9); +number of failures: 22 +error: 1 array index: 2 messagecode: 50331778 errormessage: numeric field overflow +error: 2 array index: 6 messagecode: 33575106 errormessage: null value in column "c1" violates not-null constraint +error: 3 array index: 8 messagecode: 83906754 errormessage: duplicate key value violates unique constraint "pk_tab01_c1" +error: 4 array index: 10 messagecode: 83906754 errormessage: duplicate key value violates unique constraint "pk_tab01_c1" +error: 5 array index: 31 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 6 array index: 32 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 7 array index: 33 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 8 array index: 34 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 9 array index: 35 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 10 array index: 36 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 11 array index: 37 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 12 array index: 38 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 13 array index: 39 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 14 array index: 40 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 15 array index: 41 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 16 array index: 42 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 17 array index: 43 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 18 array index: 44 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 19 array index: 45 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 20 array index: 46 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 21 array index: 47 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" +error: 22 array index: 48 messagecode: 67391682 errormessage: new row for relation "tab_01" violates check constraint "tab_01_c4_check" + save_exps_insert02 +-------------------- + +(1 row) + +---- +-- test decode coercion +---- +show sql_compatibility; + sql_compatibility +------------------- + A +(1 row) + +create table test_decode_coercion( + col_bool bool, + col_sint int2, + col_int int, + col_bigint bigint, + col_char char(10), + col_bpchar bpchar, + col_varchar varchar, + col_text text, + col_date date, + col_time timestamp +); +COPY test_decode_coercion(col_bool, col_sint, col_int, col_bigint, col_char, col_bpchar, col_varchar, col_text, col_date, col_time) FROM stdin; +-- case 1. coerce first argument to second argument's unknown type +set sql_beta_feature = 'none'; +select decode(2,'ff3',5,2); -- to be supported +ERROR: invalid input syntax for integer: "ff3" +LINE 1: select decode(2,'ff3',5,2); + ^ +CONTEXT: referenced column: case +select case when 2 = 'ff3' then 5 else 2 end; -- to be supported +ERROR: invalid input syntax for integer: "ff3" +LINE 1: select case when 2 = 'ff3' then 5 else 2 end; + ^ +CONTEXT: referenced column: case +-- valid coercions +select + decode(col_char, 'arbitrary', 1, 2), + decode(col_bpchar, 'arbitrary', 1, 2), + decode(col_varchar, 'arbitrary', 1, 2), + decode(col_text, 'arbitrary', 1, 2), + decode(col_date, '2021-09-17', 1, 2), + decode(col_time, '2000-01-01 01:01:01', 1, 2) +from test_decode_coercion; + case | case | case | case | case | case +------+------+------+------+------+------ + 2 | 2 | 2 | 2 | 2 | 1 +(1 row) + +-- to be supported ones +select + decode(col_sint, 'arbitrary', 1, 2), + decode(col_int, 'arbitrary', 1, 2), + decode(col_bigint, 'arbitrary', 1, 2) +from test_decode_coercion; +ERROR: invalid input syntax for integer: "arbitrary" +LINE 2: decode(col_sint, 'arbitrary', 1, 2), + ^ +CONTEXT: referenced column: case +-- invalid +select + decode(col_bool, 'arbitrary', 1, 2) +from test_decode_coercion; +ERROR: invalid input syntax for type boolean: "arbitrary" +LINE 2: decode(col_bool, 'arbitrary', 1, 2) + ^ +CONTEXT: referenced column: case +-- invalid +select + decode(col_date, 'arbitrary', 1, 2) +from test_decode_coercion; +ERROR: invalid input syntax for type timestamp: "arbitrary" +LINE 2: decode(col_date, 'arbitrary', 1, 2) + ^ +CONTEXT: referenced column: case +-- invalid +select + decode(col_time, 'arbitrary', 1, 2) +from test_decode_coercion; +ERROR: invalid input syntax for type timestamp: "arbitrary" +LINE 2: decode(col_time, 'arbitrary', 1, 2) + ^ +CONTEXT: referenced column: case +set sql_beta_feature = 'a_style_coerce'; +select decode(2,'ff3',5,2); -- now ok + case +------ + 2 +(1 row) + +select case when 2 = 'ff3' then 5 else 2 end; -- now ok + case +------ + 2 +(1 row) + +-- still valid +select + decode(col_char, 'arbitrary', 1, 2), + decode(col_bpchar, 'arbitrary', 1, 2), + decode(col_varchar, 'arbitrary', 1, 2), + decode(col_text, 'arbitrary', 1, 2), + decode(col_date, '2021-09-17', 1, 2), + decode(col_time, '2000-01-01 01:01:01', 1, 2) +from test_decode_coercion; + case | case | case | case | case | case +------+------+------+------+------+------ + 2 | 2 | 2 | 2 | 2 | 1 +(1 row) + +-- now supported +select + decode(col_sint, 'arbitrary', 1, 2), + decode(col_int, 'arbitrary', 1, 2), + decode(col_bigint, 'arbitrary', 1, 2) +from test_decode_coercion; + case | case | case +------+------+------ + 2 | 2 | 2 +(1 row) + +-- still fail +select + decode(col_bool, 'arbitrary', 1, 2) +from test_decode_coercion; +ERROR: invalid input syntax for type boolean: "arbitrary" +LINE 2: decode(col_bool, 'arbitrary', 1, 2) + ^ +CONTEXT: referenced column: case +-- still fail +select + decode(col_date, 'arbitrary', 1, 2) +from test_decode_coercion; +ERROR: invalid input syntax for type timestamp: "arbitrary" +LINE 2: decode(col_date, 'arbitrary', 1, 2) + ^ +CONTEXT: referenced column: case +-- still fail +select + decode(col_time, 'arbitrary', 1, 2) +from test_decode_coercion; +ERROR: invalid input syntax for type timestamp: "arbitrary" +LINE 2: decode(col_time, 'arbitrary', 1, 2) + ^ +CONTEXT: referenced column: case +-- case 2. decode case results need coercion +set sql_beta_feature = 'none'; +select decode(2,3,'r',2); -- to be supported +ERROR: invalid input syntax for integer: "r" +LINE 1: select decode(2,3,'r',2); + ^ +CONTEXT: referenced column: case +select case when 2 = 3 then 'r' else 2 end; -- to be supported +ERROR: invalid input syntax for integer: "r" +LINE 1: select case when 2 = 3 then 'r' else 2 end; + ^ +CONTEXT: referenced column: case +-- valid coercions +select + decode(1, 2, 'never', col_char), + decode(1, 2, 'never', col_bpchar), + decode(1, 2, 'never', col_varchar), + decode(1, 2, 'never', col_text), + decode(1, 2, '2021-09-17', col_date), + decode(1, 2, '2000-01-01 01:01:01', col_time) +from test_decode_coercion; + col_char | col_bpchar | col_varchar | col_text | col_date | col_time +------------+------------+-------------+----------+--------------------------+-------------------------- + 11 | 111 | 1111 | 123456 | Sat Jan 01 01:01:01 2000 | Sat Jan 01 01:01:01 2000 +(1 row) + +-- to be supported +select + decode(1, 2, 'never', col_sint), + decode(1, 2, 'never', col_int), + decode(1, 2, 'never', col_bigint) +from test_decode_coercion; +ERROR: invalid input syntax for integer: "never" +LINE 2: decode(1, 2, 'never', col_sint), + ^ +CONTEXT: referenced column: col_sint +-- invalid +select + decode(1, 2, 'never', col_bool) +from test_decode_coercion; +ERROR: invalid input syntax for type boolean: "never" +LINE 2: decode(1, 2, 'never', col_bool) + ^ +CONTEXT: referenced column: col_bool +-- invalid +select + decode(1, 2, 'never', col_date) +from test_decode_coercion; +ERROR: invalid input syntax for type timestamp: "never" +LINE 2: decode(1, 2, 'never', col_date) + ^ +CONTEXT: referenced column: col_date +-- invalid +select + decode(1, 2, 'never', col_time) +from test_decode_coercion; +ERROR: invalid input syntax for type timestamp: "never" +LINE 2: decode(1, 2, 'never', col_time) + ^ +CONTEXT: referenced column: col_time +set sql_beta_feature = 'a_style_coerce'; +select decode(2,3,'r',2); -- now ok + case +------ + 2 +(1 row) + +select case when 2 = 3 then 'r' else 2 end; -- now ok + case +------ + 2 +(1 row) + +-- still valid +select + decode(1, 2, 'never', col_char), + decode(1, 2, 'never', col_bpchar), + decode(1, 2, 'never', col_varchar), + decode(1, 2, 'never', col_text) +from test_decode_coercion; + col_char | col_bpchar | col_varchar | col_text +----------+------------+-------------+---------- + 11 | 111 | 1111 | 123456 +(1 row) + +-- now supported +select + decode(1, 2, 'never', col_sint), + decode(1, 2, 'never', col_int), + decode(1, 2, 'never', col_bigint) +from test_decode_coercion; + col_sint | col_int | col_bigint +----------+---------+------------ + 1 | 0 | 256 +(1 row) + +-- still invalid +select + decode(1, 2, 'never', col_bool) +from test_decode_coercion; +ERROR: CASE types boolean and text cannot be matched +LINE 2: decode(1, 2, 'never', col_bool) + ^ +CONTEXT: referenced column: col_bool +-- still invalid +select + decode(1, 2, 'never', col_date) +from test_decode_coercion; +ERROR: CASE types timestamp without time zone and text cannot be matched +LINE 2: decode(1, 2, 'never', col_date) + ^ +CONTEXT: referenced column: col_date +-- still invalid +select + decode(1, 2, 'never', col_time) +from test_decode_coercion; +ERROR: CASE types timestamp without time zone and text cannot be matched +LINE 2: decode(1, 2, 'never', col_time) + ^ +CONTEXT: referenced column: col_time +drop schema forall_save_exceptions cascade; +NOTICE: drop cascades to 17 other objects +DETAIL: drop cascades to table test_forall +drop cascades to function test_func(integer) +drop cascades to table exception_test +drop cascades to function func_test_forall() +drop cascades to table test_conflict +drop cascades to function conf_func(integer,text) +drop cascades to table test_orig +drop cascades to function pro_tblof_pro_004(numeric) +drop cascades to table t_06 +drop cascades to table t_07 +drop cascades to type _t_07[] +drop cascades to function p1() +drop cascades to function p2() +drop cascades to table tab_01 +drop cascades to table tab_02 +drop cascades to function save_exps_insert02(numeric) +drop cascades to table test_decode_coercion diff --git a/src/test/regress/expected/force_vector_engine.out b/src/test/regress/expected/force_vector_engine.out new file mode 100644 index 000000000..bda581480 --- /dev/null +++ b/src/test/regress/expected/force_vector_engine.out @@ -0,0 +1,262 @@ +create schema force_vector_engine; +set current_schema=force_vector_engine; +create table force_vector_test(id int, val int); +insert into force_vector_test values(generate_series(1, 10000), generate_series(1, 1000)); +create table force_vector_test1(id int, val int); +insert into force_vector_test1 select * from force_vector_test; +create index on force_vector_test1(id); +analyze force_vector_test; +analyze force_vector_test1; +create table force_vector_test2(id int, val int) with (orientation=column); +insert into force_vector_test2 select * from force_vector_test; +analyze force_vector_test2; +create function func_add_sql(a int, b int) +returns int +AS $$ +declare + res int; +begin + select a+b into res; + return res; +end; $$ +LANGUAGE plpgsql; +set try_vector_engine_strategy='force'; +explain select count(*) from force_vector_test; + QUERY PLAN +----------------------------------------------------------------------------------------- + Row Adapter (cost=170.01..170.01 rows=1 width=8) + -> Vector Aggregate (cost=170.00..170.01 rows=1 width=8) + -> Vector Adapter(type: BATCH MODE) (cost=145.00..145.00 rows=10000 width=0) + -> Seq Scan on force_vector_test (cost=0.00..145.00 rows=10000 width=0) +(4 rows) + +select count(*) from force_vector_test; + count +------- + 10000 +(1 row) + +explain select count(*) from force_vector_test1 where id=2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Row Adapter (cost=8.28..8.28 rows=1 width=8) + -> Vector Aggregate (cost=8.27..8.28 rows=1 width=8) + -> Vector Adapter (cost=8.27..8.27 rows=1 width=0) + -> Index Only Scan using force_vector_test1_id_idx on force_vector_test1 (cost=0.00..8.27 rows=1 width=0) + Index Cond: (id = 2) +(5 rows) + +select count(*) from force_vector_test1 where id=2; + count +------- + 1 +(1 row) + +explain select count(*) from force_vector_test1 where id=2 and val=2; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Row Adapter (cost=8.28..8.28 rows=1 width=8) + -> Vector Aggregate (cost=8.27..8.28 rows=1 width=8) + -> Vector Adapter (cost=8.27..8.27 rows=1 width=0) + -> Index Scan using force_vector_test1_id_idx on force_vector_test1 (cost=0.00..8.27 rows=1 width=0) + Index Cond: (id = 2) + Filter: (val = 2) +(6 rows) + +select count(*) from force_vector_test1 where id=2 and val=2; + count +------- + 1 +(1 row) + +set enable_indexscan=off; +explain select count(*) from force_vector_test1 where id=2; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Row Adapter (cost=8.28..8.28 rows=1 width=8) + -> Vector Aggregate (cost=8.27..8.28 rows=1 width=8) + -> Vector Adapter (cost=8.27..8.27 rows=1 width=0) + -> Bitmap Heap Scan on force_vector_test1 (cost=4.26..8.27 rows=1 width=0) + Recheck Cond: (id = 2) + -> Bitmap Index Scan on force_vector_test1_id_idx (cost=0.00..4.26 rows=1 width=0) + Index Cond: (id = 2) +(7 rows) + +select count(*) from force_vector_test1 where id=2; + count +------- + 1 +(1 row) + +explain select count(*) from func_add_sql(1,2); + QUERY PLAN +----------------------------------------------------------------------------------- + Row Adapter (cost=0.27..0.27 rows=1 width=8) + -> Vector Aggregate (cost=0.26..0.27 rows=1 width=8) + -> Vector Adapter (cost=0.26..0.26 rows=1 width=0) + -> Function Scan on func_add_sql (cost=0.25..0.26 rows=1 width=0) +(4 rows) + +select count(*) from func_add_sql(1,2); + count +------- + 1 +(1 row) + +explain values (1, 'AAAAA', 'read'),(2, 'BBBBB', 'write') order by 1,2,3; + QUERY PLAN +-------------------------------------------------------------------------------- + Row Adapter (cost=0.04..0.04 rows=2 width=68) + -> Vector Sort (cost=0.04..0.04 rows=2 width=68) + Sort Key: column1, column2, column3 + -> Vector Adapter (cost=0.03..0.03 rows=2 width=68) + -> Values Scan on "*VALUES*" (cost=0.00..0.03 rows=2 width=68) +(5 rows) + +values (1, 'AAAAA', 'read'),(2, 'BBBBB', 'write') order by 1,2,3; + column1 | column2 | column3 +---------+---------+--------- + 1 | AAAAA | read + 2 | BBBBB | write +(2 rows) + +explain select * from force_vector_test where ctid='(0,1)' order by 2; + QUERY PLAN +----------------------------------------------------------------------------------- + Row Adapter (cost=4.03..4.03 rows=1 width=8) + -> Vector Sort (cost=4.02..4.03 rows=1 width=8) + Sort Key: val + -> Vector Adapter (cost=4.01..4.01 rows=1 width=8) + -> Tid Scan on force_vector_test (cost=0.00..4.01 rows=1 width=8) + TID Cond: (ctid = '(0,1)'::tid) +(6 rows) + +select * from force_vector_test where ctid='(0,1)' order by 2; + id | val +----+----- + 1 | 1 +(1 row) + +explain select * from force_vector_test t1, force_vector_test2 t2 where t1.id=t2.id order by t1.id limit 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Row Adapter (cost=691.62..691.62 rows=10 width=16) + -> Vector Limit (cost=691.60..691.62 rows=10 width=16) + -> Vector Sort (cost=691.60..716.60 rows=10000 width=16) + Sort Key: t1.id + -> Vector Sonic Hash Join (cost=193.00..475.50 rows=10000 width=16) + Hash Cond: (t1.id = t2.id) + -> Vector Adapter(type: BATCH MODE) (cost=145.00..145.00 rows=10000 width=8) + -> Seq Scan on force_vector_test t1 (cost=0.00..145.00 rows=10000 width=8) + -> CStore Scan on force_vector_test2 t2 (cost=0.00..68.00 rows=10000 width=8) +(9 rows) + +select * from force_vector_test t1, force_vector_test2 t2 where t1.id=t2.id order by t1.id limit 10; + id | val | id | val +----+-----+----+----- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 + 4 | 4 | 4 | 4 + 5 | 5 | 5 | 5 + 6 | 6 | 6 | 6 + 7 | 7 | 7 | 7 + 8 | 8 | 8 | 8 + 9 | 9 | 9 | 9 + 10 | 10 | 10 | 10 +(10 rows) + +set query_dop=1004; +explain select count(*) from force_vector_test; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Row Adapter (cost=61.26..61.26 rows=1 width=8) + -> Vector Aggregate (cost=61.25..61.26 rows=1 width=8) + -> Vector Streaming(type: LOCAL GATHER dop: 1/4) (cost=61.25..61.26 rows=1 width=8) + -> Vector Aggregate (cost=61.25..61.26 rows=1 width=8) + -> Vector Adapter(type: BATCH MODE) (cost=36.25..36.25 rows=10000 width=0) + -> Seq Scan on force_vector_test (cost=0.00..36.25 rows=10000 width=0) +(6 rows) + +select count(*) from force_vector_test; + count +------- + 10000 +(1 row) + +set query_dop=1; +create table force_vector_test3(id int, val int) with(storage_type=ustore); +select count(*) from force_vector_test3; + count +------- + 0 +(1 row) + +insert into force_vector_test3 select * from force_vector_test; +analyze force_vector_test3; +explain select count(*) from force_vector_test; + QUERY PLAN +----------------------------------------------------------------------------------------- + Row Adapter (cost=170.01..170.01 rows=1 width=8) + -> Vector Aggregate (cost=170.00..170.01 rows=1 width=8) + -> Vector Adapter(type: BATCH MODE) (cost=145.00..145.00 rows=10000 width=0) + -> Seq Scan on force_vector_test (cost=0.00..145.00 rows=10000 width=0) +(4 rows) + +select count(*) from force_vector_test; + count +------- + 10000 +(1 row) + +create table force_vector_test4(c1 int, c2 double precision, c3 double precision, c4 point); +insert into force_vector_test4(c1, c2, c3) values(20, 2.3, 2.3); +select point(c2, c3) from force_vector_test4 where c1 = 20; + point +----------- + (2.3,2.3) +(1 row) + +-- Do not use vectorization engine +explain select point(c2, c3) from force_vector_test4 where c1 = 20; + QUERY PLAN +-------------------------------------------------------------------- + Seq Scan on force_vector_test4 (cost=0.00..25.49 rows=6 width=16) + Filter: (c1 = 20) +(2 rows) + +create table force_vector_test5(id int, name varchar(1000)); +insert into force_vector_test5 values(1, 'apple'); +insert into force_vector_test5 values(2, 'pear'); +insert into force_vector_test5 values(3, 'apple pear'); +-- Using the Vectorization Engine +explain select count(*) from force_vector_test5 where id =1 or to_tsvector('ngram',name)@@to_tsquery('ngram','pear'); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Row Adapter (cost=12.62..12.62 rows=1 width=8) + -> Vector Aggregate (cost=12.61..12.62 rows=1 width=8) + -> Vector Adapter(type: BATCH MODE) (cost=12.61..12.61 rows=1 width=0) + Filter: ((id = 1) OR (to_tsvector('ngram'::regconfig, (name)::text) @@ '''pe'' & ''ea'' & ''ar'''::tsquery)) + -> Seq Scan on force_vector_test5 (cost=0.00..12.61 rows=1 width=0) +(5 rows) + +select count(*) from force_vector_test5 where id =1 or to_tsvector('ngram',name)@@to_tsquery('ngram','pear'); + count +------- + 3 +(1 row) + +create table force_vector_test6(a int, b int, c int); +insert into force_vector_test6 values(1,2,3); +alter table force_vector_test6 drop column b; +insert into force_vector_test6 select * from force_vector_test6; +set try_vector_engine_strategy='off'; +drop table force_vector_test; +drop table force_vector_test1; +drop table force_vector_test2; +drop table force_vector_test3; +drop table force_vector_test4; +drop table force_vector_test5; +drop table force_vector_test6; +drop function func_add_sql; +drop schema force_vector_engine cascade; diff --git a/src/test/regress/expected/force_vector_engine2.out b/src/test/regress/expected/force_vector_engine2.out new file mode 100644 index 000000000..051f09ebc --- /dev/null +++ b/src/test/regress/expected/force_vector_engine2.out @@ -0,0 +1,52 @@ +create schema test_force_vector2; +set current_schema=test_force_vector2; +create table force_vector_test(id int, val1 int, val2 numeric(10,5)); +insert into force_vector_test values(generate_series(1, 10000), generate_series(1, 1000), generate_series(1, 2000)); +analyze force_vector_test; +-- partition table +create table force_vector_partition(id int, val1 int, val2 text) +partition by range(id) ( + partition force_vector_p1 values less than (2001), + partition force_vector_p2 values less than (4001), + partition force_vector_p3 values less than (6001), + partition force_vector_p4 values less than (8001), + partition force_vector_p5 values less than (MAXVALUE) +); +insert into force_vector_partition values(generate_series(1, 10000), generate_series(1, 2000), generate_series(1, 5000)); +analyze force_vector_partition; +explain (analyze on, timing off) select /*+ set(try_vector_engine_strategy force) */ id, val1*2, val2+val1 as val3 from force_vector_test where id < 5000 and val1 < 500 order by id limit 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Row Adapter (cost=277.80..277.80 rows=10 width=13) (actual rows=10 loops=1) + -> Vector Limit (cost=277.77..277.80 rows=10 width=13) (actual rows=10 loops=1) + -> Vector Sort (cost=277.77..284.02 rows=2500 width=13) (actual rows=10 loops=1) + Sort Key: id + Sort Method: top-N heapsort Memory: 4kB + -> Vector Adapter(type: BATCH MODE) (cost=223.75..223.75 rows=2500 width=13) (actual rows=2495 loops=1) + Filter: ((id < 5000) AND (val1 < 500)) + Rows Removed by Filter: 7505 + -> Seq Scan on force_vector_test (cost=0.00..223.75 rows=2500 width=13) (actual rows=10000 loops=1) +--?.* +(10 rows) + +explain (analyze on, timing off) select /*+ set(try_vector_engine_strategy force) */ id, avg(val1), sum(val2) from force_vector_partition group by id order by id limit 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------- + Row Adapter (cost=596.12..596.12 rows=10 width=76) (actual rows=10 loops=1) + -> Vector Limit (cost=596.10..596.12 rows=10 width=76) (actual rows=10 loops=1) + -> Vector Sort (cost=596.10..621.10 rows=10000 width=76) (actual rows=10 loops=1) + Sort Key: id + Sort Method: top-N heapsort Memory: 5kB + -> Vector Hash Aggregate (cost=255.00..380.00 rows=10000 width=76) (actual rows=10000 loops=1) + Group By Key: id + -> Vector Partition Iterator (cost=0.00..155.00 rows=10000 width=12) (actual rows=10000 loops=1) + Iterations: 5 + -> Vector Adapter(type: BATCH MODE) (cost=155.00..155.00 rows=10000 width=12) (actual rows=10000 loops=5) + -> Partitioned Seq Scan on force_vector_partition (cost=0.00..155.00 rows=10000 width=12) (actual rows=10000 loops=5) + Selected Partitions: 1..5 +--?.* +(13 rows) + +drop table force_vector_test; +drop schema test_force_vector2 cascade; +NOTICE: drop cascades to table force_vector_partition diff --git a/src/test/regress/expected/function.out b/src/test/regress/expected/function.out index 285a49003..03208328b 100644 --- a/src/test/regress/expected/function.out +++ b/src/test/regress/expected/function.out @@ -1612,44 +1612,5 @@ DROP FUNCTION func_increment_sql_1; DROP FUNCTION func_increment_sql_2; DROP FUNCTION fun_test_1; DROP FUNCTION fun_test_2; -CREATE OR REPLACE PROCEDURE test_spi() -AS DECLARE d_statement VARCHAR2(32767); -BEGIN - d_statement := 'EXPLAIN PLAN FOR select 1'; - BEGIN - EXECUTE IMMEDIATE d_statement; - END; - - COMMIT; -END; -/ -select * from test_spi(); - test_spi ----------- - -(1 row) - -\d+ plan_table - View "pg_catalog.plan_table" - Column | Type | Modifiers | Storage | Description ---------------+-------------------------+-----------+----------+------------- - statement_id | character varying(30) | | extended | - plan_id | bigint | | plain | - id | integer | | plain | - operation | character varying(30) | | extended | - options | character varying(255) | | extended | - object_name | name | | plain | - object_type | character varying(30) | | extended | - object_owner | name | | plain | - projection | character varying(4000) | | extended | -View definition: - SELECT plan_table_data.statement_id, plan_table_data.plan_id, - plan_table_data.id, plan_table_data.operation, plan_table_data.options, - plan_table_data.object_name, plan_table_data.object_type, - plan_table_data.object_owner, plan_table_data.projection - FROM plan_table_data - WHERE plan_table_data.session_id = pg_current_sessionid() AND plan_table_data.user_id = pg_current_userid(); - -DROP PROCEDURE test_spi; \c regression; drop database IF EXISTS pl_test_funcion; diff --git a/src/test/regress/expected/function_get_table_def.out b/src/test/regress/expected/function_get_table_def.out index 84342697e..5f6e0bbef 100644 --- a/src/test/regress/expected/function_get_table_def.out +++ b/src/test/regress/expected/function_get_table_def.out @@ -16,8 +16,8 @@ select * from pg_get_tabledef('table_function_export_def'); ------------------------------------------------------------------------------------------------------- SET search_path = test_get_table_def; + CREATE TABLE table_function_export_def ( + - id integer NOT NULL, + - fid integer, + + id integer NOT NULL, + + fid integer, + CONSTRAINT table_export_base_fkey FOREIGN KEY (fid) REFERENCES table_function_export_def_base(id)+ ) + WITH (orientation=row, compression=no); + @@ -39,22 +39,22 @@ partition by range (id) partition table_range1_p4 values less than(maxvalue) ); select * from pg_get_tabledef('table_range1'); - pg_get_tabledef --------------------------------------------------------------------------------------- - SET search_path = test_get_table_def; + - CREATE TABLE table_range1 ( + - id integer, + - a timestamp(0) without time zone, + - b character varying + - ) + - WITH (orientation=row, compression=no) + - PARTITION BY RANGE (id) + - ( + - PARTITION table_range1_p1 VALUES LESS THAN (10) TABLESPACE pg_default, + - PARTITION table_range1_p2 VALUES LESS THAN (50) TABLESPACE pg_default, + - PARTITION table_range1_p3 VALUES LESS THAN (100) TABLESPACE pg_default, + - PARTITION table_range1_p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ - ) + + pg_get_tabledef +--------------------------------------------------------------------------------- + SET search_path = test_get_table_def; + + CREATE TABLE table_range1 ( + + id integer, + + a timestamp(0) without time zone, + + b character varying + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (id) + + ( + + PARTITION table_range1_p1 VALUES LESS THAN (10) TABLESPACE pg_default, + + PARTITION table_range1_p2 VALUES LESS THAN (50) TABLESPACE pg_default, + + PARTITION table_range1_p3 VALUES LESS THAN (100) TABLESPACE pg_default, + + PARTITION table_range1_p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ + ) + ENABLE ROW MOVEMENT; (1 row) @@ -68,22 +68,22 @@ partition by range (a) partition table_range2_p4 values less than(maxvalue) ); select * from pg_get_tabledef('table_range2'); - pg_get_tabledef -------------------------------------------------------------------------------------------- - SET search_path = test_get_table_def; + - CREATE TABLE table_range2 ( + - id integer, + - a timestamp(0) without time zone, + - b character varying + - ) + - WITH (orientation=row, compression=no) + - PARTITION BY RANGE (a) + - ( + - PARTITION table_range2_p1 VALUES LESS THAN ('2020-03-01') TABLESPACE pg_default,+ - PARTITION table_range2_p2 VALUES LESS THAN ('2020-05-01') TABLESPACE pg_default,+ - PARTITION table_range2_p3 VALUES LESS THAN ('2020-07-01') TABLESPACE pg_default,+ - PARTITION table_range2_p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - ) + + pg_get_tabledef +-------------------------------------------------------------------------------------- + SET search_path = test_get_table_def; + + CREATE TABLE table_range2 ( + + id integer, + + a timestamp(0) without time zone, + + b character varying + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (a) + + ( + + PARTITION table_range2_p1 VALUES LESS THAN ('2020-03-01') TABLESPACE pg_default,+ + PARTITION table_range2_p2 VALUES LESS THAN ('2020-05-01') TABLESPACE pg_default,+ + PARTITION table_range2_p3 VALUES LESS THAN ('2020-07-01') TABLESPACE pg_default,+ + PARTITION table_range2_p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + ENABLE ROW MOVEMENT; (1 row) @@ -97,26 +97,62 @@ partition by range (id, a) partition table_range3_p4 values less than(maxvalue, maxvalue) ); select * from pg_get_tabledef('table_range3'); - pg_get_tabledef ------------------------------------------------------------------------------------------------- - SET search_path = test_get_table_def; + - CREATE TABLE table_range3 ( + - id integer, + - a timestamp(0) without time zone, + - b character varying + - ) + - WITH (orientation=row, compression=no) + - PARTITION BY RANGE (id, a) + - ( + - PARTITION table_range3_p1 VALUES LESS THAN (10, '2020-03-01') TABLESPACE pg_default, + - PARTITION table_range3_p2 VALUES LESS THAN (50, '2020-05-01') TABLESPACE pg_default, + - PARTITION table_range3_p3 VALUES LESS THAN (100, '2020-07-01') TABLESPACE pg_default,+ - PARTITION table_range3_p4 VALUES LESS THAN (MAXVALUE, MAXVALUE) TABLESPACE pg_default+ - ) + + pg_get_tabledef +------------------------------------------------------------------------------------------- + SET search_path = test_get_table_def; + + CREATE TABLE table_range3 ( + + id integer, + + a timestamp(0) without time zone, + + b character varying + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (id, a) + + ( + + PARTITION table_range3_p1 VALUES LESS THAN (10, '2020-03-01') TABLESPACE pg_default, + + PARTITION table_range3_p2 VALUES LESS THAN (50, '2020-05-01') TABLESPACE pg_default, + + PARTITION table_range3_p3 VALUES LESS THAN (100, '2020-07-01') TABLESPACE pg_default,+ + PARTITION table_range3_p4 VALUES LESS THAN (MAXVALUE, MAXVALUE) TABLESPACE pg_default+ + ) + ENABLE ROW MOVEMENT; (1 row) drop table table_range3; +create table table_range4 (id int primary key, a date, b varchar) +partition by range (id) +( + partition table_range4_p1 start (10) end (40) every (10), + partition table_range4_p2 end (70), + partition table_range4_p3 start (70), + partition table_range4_p4 start (100) end (150) every (20) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "table_range4_pkey" for table "table_range4" +select * from pg_get_tabledef('table_range4'); + pg_get_tabledef +------------------------------------------------------------------------------- + SET search_path = test_get_table_def; + + CREATE TABLE table_range4 ( + + id integer NOT NULL, + + a timestamp(0) without time zone, + + b character varying + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (id) + + ( + + PARTITION table_range4_p1_0 VALUES LESS THAN (10) TABLESPACE pg_default, + + PARTITION table_range4_p1_1 VALUES LESS THAN (20) TABLESPACE pg_default, + + PARTITION table_range4_p1_2 VALUES LESS THAN (30) TABLESPACE pg_default, + + PARTITION table_range4_p1_3 VALUES LESS THAN (40) TABLESPACE pg_default, + + PARTITION table_range4_p2 VALUES LESS THAN (70) TABLESPACE pg_default, + + PARTITION table_range4_p3 VALUES LESS THAN (100) TABLESPACE pg_default, + + PARTITION table_range4_p4_1 VALUES LESS THAN (120) TABLESPACE pg_default,+ + PARTITION table_range4_p4_2 VALUES LESS THAN (140) TABLESPACE pg_default,+ + PARTITION table_range4_p4_3 VALUES LESS THAN (150) TABLESPACE pg_default + + ) + + ENABLE ROW MOVEMENT; + + ALTER TABLE table_range4 ADD CONSTRAINT table_range4_pkey PRIMARY KEY (id); +(1 row) + +drop table table_range4; --interval table create table table_interval1 (id int, a date, b varchar) partition by range (a) @@ -128,23 +164,23 @@ interval ('1 day') partition table_interval1_p4 values less than(maxvalue) ); select * from pg_get_tabledef('table_interval1'); - pg_get_tabledef ----------------------------------------------------------------------------------------------- - SET search_path = test_get_table_def; + - CREATE TABLE table_interval1 ( + - id integer, + - a timestamp(0) without time zone, + - b character varying + - ) + - WITH (orientation=row, compression=no) + - PARTITION BY RANGE (a) + - INTERVAL ('1 day') + - ( + - PARTITION table_interval1_p1 VALUES LESS THAN ('2020-03-01') TABLESPACE pg_default,+ - PARTITION table_interval1_p2 VALUES LESS THAN ('2020-05-01') TABLESPACE pg_default,+ - PARTITION table_interval1_p3 VALUES LESS THAN ('2020-07-01') TABLESPACE pg_default,+ - PARTITION table_interval1_p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + - ) + + pg_get_tabledef +----------------------------------------------------------------------------------------- + SET search_path = test_get_table_def; + + CREATE TABLE table_interval1 ( + + id integer, + + a timestamp(0) without time zone, + + b character varying + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (a) + + INTERVAL ('1 day') + + ( + + PARTITION table_interval1_p1 VALUES LESS THAN ('2020-03-01') TABLESPACE pg_default,+ + PARTITION table_interval1_p2 VALUES LESS THAN ('2020-05-01') TABLESPACE pg_default,+ + PARTITION table_interval1_p3 VALUES LESS THAN ('2020-07-01') TABLESPACE pg_default,+ + PARTITION table_interval1_p4 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + ENABLE ROW MOVEMENT; (1 row) @@ -158,21 +194,21 @@ partition by list (id) partition table_list1_p3 values (9, 10, 11, 12) ); select * from pg_get_tabledef('table_list1'); - pg_get_tabledef ------------------------------------------------------------------------------ - SET search_path = test_get_table_def; + - CREATE TABLE table_list1 ( + - id integer, + - a timestamp(0) without time zone, + - b character varying + - ) + - WITH (orientation=row, compression=no) + - PARTITION BY LIST (id) + - ( + - PARTITION table_list1_p1 VALUES (1,2,3,4) TABLESPACE pg_default, + - PARTITION table_list1_p2 VALUES (5,6,7,8) TABLESPACE pg_default, + - PARTITION table_list1_p3 VALUES (9,10,11,12) TABLESPACE pg_default+ - ) + + pg_get_tabledef +------------------------------------------------------------------------ + SET search_path = test_get_table_def; + + CREATE TABLE table_list1 ( + + id integer, + + a timestamp(0) without time zone, + + b character varying + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY LIST (id) + + ( + + PARTITION table_list1_p1 VALUES (1,2,3,4) TABLESPACE pg_default, + + PARTITION table_list1_p2 VALUES (5,6,7,8) TABLESPACE pg_default, + + PARTITION table_list1_p3 VALUES (9,10,11,12) TABLESPACE pg_default+ + ) + ENABLE ROW MOVEMENT; (1 row) @@ -185,25 +221,54 @@ partition by list (b) partition table_list2_p3 values ('9', '10', '11', '12') ); select * from pg_get_tabledef('table_list2'); - pg_get_tabledef ----------------------------------------------------------------------------------------- - SET search_path = test_get_table_def; + - CREATE TABLE table_list2 ( + - id integer, + - a timestamp(0) without time zone, + - b character varying + - ) + - WITH (orientation=row, compression=no) + - PARTITION BY LIST (b) + - ( + - PARTITION table_list2_p1 VALUES ('1', '2', '3', '4') TABLESPACE pg_default, + - PARTITION table_list2_p2 VALUES ('5', '6', '7', '8') TABLESPACE pg_default, + - PARTITION table_list2_p3 VALUES ('9', '10', '11', '12') TABLESPACE pg_default+ - ) + + pg_get_tabledef +-------------------------------------------------------------------------------- + SET search_path = test_get_table_def; + + CREATE TABLE table_list2 ( + + id integer, + + a timestamp(0) without time zone, + + b character varying + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY LIST (b) + + ( + + PARTITION table_list2_p1 VALUES ('1','2','3','4') TABLESPACE pg_default, + + PARTITION table_list2_p2 VALUES ('5','6','7','8') TABLESPACE pg_default, + + PARTITION table_list2_p3 VALUES ('9','10','11','12') TABLESPACE pg_default+ + ) + ENABLE ROW MOVEMENT; (1 row) drop table table_list2; +create table table_list3 (id int primary key, a date, b varchar) +partition by list (b) +( + partition table_list3_p1 values ('1', '2', '3', '4'), + partition table_list3_p2 values ('5', '6', '7', '8'), + partition table_list3_p3 values (default) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "table_list3_pkey" for table "table_list3" +select * from pg_get_tabledef('table_list3'); + pg_get_tabledef +------------------------------------------------------------------------------ + SET search_path = test_get_table_def; + + CREATE TABLE table_list3 ( + + id integer NOT NULL, + + a timestamp(0) without time zone, + + b character varying + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY LIST (b) + + ( + + PARTITION table_list3_p1 VALUES ('1','2','3','4') TABLESPACE pg_default,+ + PARTITION table_list3_p2 VALUES ('5','6','7','8') TABLESPACE pg_default,+ + PARTITION table_list3_p3 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ENABLE ROW MOVEMENT; + + ALTER TABLE table_list3 ADD CONSTRAINT table_list3_pkey PRIMARY KEY (id); +(1 row) + +drop table table_list3; --hash table create table table_hash1 (id int, a date, b varchar) partition by hash (id) @@ -213,24 +278,277 @@ partition by hash (id) partition table_hash1_p3 ); select * from pg_get_tabledef('table_hash1'); - pg_get_tabledef ----------------------------------------------------------- - SET search_path = test_get_table_def; + - CREATE TABLE table_hash1 ( + - id integer, + - a timestamp(0) without time zone, + - b character varying + - ) + - WITH (orientation=row, compression=no) + - PARTITION BY HASH (id) + - ( + - PARTITION table_hash1_p1 TABLESPACE pg_default,+ - PARTITION table_hash1_p2 TABLESPACE pg_default,+ - PARTITION table_hash1_p3 TABLESPACE pg_default + - ) + + pg_get_tabledef +----------------------------------------------------- + SET search_path = test_get_table_def; + + CREATE TABLE table_hash1 ( + + id integer, + + a timestamp(0) without time zone, + + b character varying + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY HASH (id) + + ( + + PARTITION table_hash1_p1 TABLESPACE pg_default,+ + PARTITION table_hash1_p2 TABLESPACE pg_default,+ + PARTITION table_hash1_p3 TABLESPACE pg_default + + ) + ENABLE ROW MOVEMENT; (1 row) drop table table_hash1; +--subpartition table +CREATE TABLE list_range_1 ( + col_1 integer primary key, + col_2 integer, + col_3 character varying(30) unique, + col_4 integer +) +WITH (orientation=row, compression=no) +PARTITION BY LIST (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN (-10), + SUBPARTITION p_range_1_2 VALUES LESS THAN (0), + SUBPARTITION p_range_1_3 VALUES LESS THAN (10), + SUBPARTITION p_range_1_4 VALUES LESS THAN (20), + SUBPARTITION p_range_1_5 VALUES LESS THAN (50) + ), + PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10), + PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_range_3_1 VALUES LESS THAN (15), + SUBPARTITION p_range_3_2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_range_4_1 VALUES LESS THAN (-10), + SUBPARTITION p_range_4_2 VALUES LESS THAN (0), + SUBPARTITION p_range_4_3 VALUES LESS THAN (10), + SUBPARTITION p_range_4_4 VALUES LESS THAN (20), + SUBPARTITION p_range_4_5 VALUES LESS THAN (50) + ), + PARTITION p_list_5 VALUES (31,32,33,34,35,36,37,38,39,40), + PARTITION p_list_6 VALUES (41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_range_6_1 VALUES LESS THAN (-10), + SUBPARTITION p_range_6_2 VALUES LESS THAN (0), + SUBPARTITION p_range_6_3 VALUES LESS THAN (10), + SUBPARTITION p_range_6_4 VALUES LESS THAN (20), + SUBPARTITION p_range_6_5 VALUES LESS THAN (50) + ), + PARTITION p_list_7 VALUES (DEFAULT) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "list_range_1_pkey" for table "list_range_1" +NOTICE: CREATE TABLE / UNIQUE will create implicit index "list_range_1_col_3_tableoid_key" for table "list_range_1" +select * from pg_get_tabledef('list_range_1'); + pg_get_tabledef +------------------------------------------------------------------------------------------------- + SET search_path = test_get_table_def; + + CREATE TABLE list_range_1 ( + + col_1 integer NOT NULL, + + col_2 integer, + + col_3 character varying(30), + + col_4 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY LIST (col_1) SUBPARTITION BY RANGE (col_2) + + ( + + PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10) TABLESPACE pg_default + + ( + + SUBPARTITION p_range_1_1 VALUES LESS THAN (-10) TABLESPACE pg_default, + + SUBPARTITION p_range_1_2 VALUES LESS THAN (0) TABLESPACE pg_default, + + SUBPARTITION p_range_1_3 VALUES LESS THAN (10) TABLESPACE pg_default, + + SUBPARTITION p_range_1_4 VALUES LESS THAN (20) TABLESPACE pg_default, + + SUBPARTITION p_range_1_5 VALUES LESS THAN (50) TABLESPACE pg_default + + ), + + PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10) TABLESPACE pg_default + + ( + + SUBPARTITION p_list_2_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ + ), + + PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20) TABLESPACE pg_default + + ( + + SUBPARTITION p_range_3_1 VALUES LESS THAN (15) TABLESPACE pg_default, + + SUBPARTITION p_range_3_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ), + + PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30) TABLESPACE pg_default + + ( + + SUBPARTITION p_range_4_1 VALUES LESS THAN (-10) TABLESPACE pg_default, + + SUBPARTITION p_range_4_2 VALUES LESS THAN (0) TABLESPACE pg_default, + + SUBPARTITION p_range_4_3 VALUES LESS THAN (10) TABLESPACE pg_default, + + SUBPARTITION p_range_4_4 VALUES LESS THAN (20) TABLESPACE pg_default, + + SUBPARTITION p_range_4_5 VALUES LESS THAN (50) TABLESPACE pg_default + + ), + + PARTITION p_list_5 VALUES (31,32,33,34,35,36,37,38,39,40) TABLESPACE pg_default + + ( + + SUBPARTITION p_list_5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ + ), + + PARTITION p_list_6 VALUES (41,42,43,44,45,46,47,48,49,50) TABLESPACE pg_default + + ( + + SUBPARTITION p_range_6_1 VALUES LESS THAN (-10) TABLESPACE pg_default, + + SUBPARTITION p_range_6_2 VALUES LESS THAN (0) TABLESPACE pg_default, + + SUBPARTITION p_range_6_3 VALUES LESS THAN (10) TABLESPACE pg_default, + + SUBPARTITION p_range_6_4 VALUES LESS THAN (20) TABLESPACE pg_default, + + SUBPARTITION p_range_6_5 VALUES LESS THAN (50) TABLESPACE pg_default + + ), + + PARTITION p_list_7 VALUES (DEFAULT) TABLESPACE pg_default + + ( + + SUBPARTITION p_list_7_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default+ + ) + + ) + + ENABLE ROW MOVEMENT; + + ALTER TABLE list_range_1 ADD CONSTRAINT list_range_1_col_3_tableoid_key UNIQUE (col_3); + + ALTER TABLE list_range_1 ADD CONSTRAINT list_range_1_pkey PRIMARY KEY (col_1); +(1 row) + +drop table list_range_1; +CREATE TABLE list_hash_2 ( + col_1 integer primary key, + col_2 integer, + col_3 character varying(30) unique, + col_4 integer +) +WITH (orientation=row, compression=no) +PARTITION BY LIST (col_2) SUBPARTITION BY HASH (col_3) +( + PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10) + ( + SUBPARTITION p_hash_1_1, + SUBPARTITION p_hash_1_2, + SUBPARTITION p_hash_1_3 + ), + PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10), + PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_hash_3_1, + SUBPARTITION p_hash_3_2 + ), + PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_hash_4_1, + SUBPARTITION p_hash_4_2, + SUBPARTITION p_hash_4_3, + SUBPARTITION p_hash_4_4, + SUBPARTITION p_hash_4_5 + ), + PARTITION p_list_5 VALUES (31,32,33,34,35,36,37,38,39,40), + PARTITION p_list_6 VALUES (41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_hash_6_1, + SUBPARTITION p_hash_6_2, + SUBPARTITION p_hash_6_3, + SUBPARTITION p_hash_6_4, + SUBPARTITION p_hash_6_5 + ), + PARTITION p_list_7 VALUES (DEFAULT) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "list_hash_2_pkey" for table "list_hash_2" +NOTICE: CREATE TABLE / UNIQUE will create implicit index "list_hash_2_col_3_tableoid_key" for table "list_hash_2" +create unique index list_hash_2_idx1 on list_hash_2(col_2, col_3, col_4) local; +create index list_hash_2_idx2 on list_hash_2(col_3, col_1) local; +create index list_hash_2_idx3 on list_hash_2(col_4) global; +select * from pg_get_tabledef('list_hash_2'); + pg_get_tabledef +--------------------------------------------------------------------------------------------- + SET search_path = test_get_table_def; + + CREATE TABLE list_hash_2 ( + + col_1 integer NOT NULL, + + col_2 integer, + + col_3 character varying(30), + + col_4 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY LIST (col_2) SUBPARTITION BY HASH (col_3) + + ( + + PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10) TABLESPACE pg_default + + ( + + SUBPARTITION p_hash_1_1 TABLESPACE pg_default, + + SUBPARTITION p_hash_1_2 TABLESPACE pg_default, + + SUBPARTITION p_hash_1_3 TABLESPACE pg_default + + ), + + PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10) TABLESPACE pg_default + + ( + + SUBPARTITION p_list_2_subpartdefault1 TABLESPACE pg_default + + ), + + PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20) TABLESPACE pg_default + + ( + + SUBPARTITION p_hash_3_1 TABLESPACE pg_default, + + SUBPARTITION p_hash_3_2 TABLESPACE pg_default + + ), + + PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30) TABLESPACE pg_default + + ( + + SUBPARTITION p_hash_4_1 TABLESPACE pg_default, + + SUBPARTITION p_hash_4_2 TABLESPACE pg_default, + + SUBPARTITION p_hash_4_3 TABLESPACE pg_default, + + SUBPARTITION p_hash_4_4 TABLESPACE pg_default, + + SUBPARTITION p_hash_4_5 TABLESPACE pg_default + + ), + + PARTITION p_list_5 VALUES (31,32,33,34,35,36,37,38,39,40) TABLESPACE pg_default + + ( + + SUBPARTITION p_list_5_subpartdefault1 TABLESPACE pg_default + + ), + + PARTITION p_list_6 VALUES (41,42,43,44,45,46,47,48,49,50) TABLESPACE pg_default + + ( + + SUBPARTITION p_hash_6_1 TABLESPACE pg_default, + + SUBPARTITION p_hash_6_2 TABLESPACE pg_default, + + SUBPARTITION p_hash_6_3 TABLESPACE pg_default, + + SUBPARTITION p_hash_6_4 TABLESPACE pg_default, + + SUBPARTITION p_hash_6_5 TABLESPACE pg_default + + ), + + PARTITION p_list_7 VALUES (DEFAULT) TABLESPACE pg_default + + ( + + SUBPARTITION p_list_7_subpartdefault1 TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; + + CREATE INDEX list_hash_2_idx3 ON list_hash_2 USING btree (col_4) TABLESPACE pg_default; + + CREATE INDEX list_hash_2_idx2 ON list_hash_2 USING btree (col_3, col_1) LOCAL + + ( + + PARTITION p_list_7_subpartdefault1_col_3_col_1_idx, + + PARTITION p_hash_6_5_col_3_col_1_idx, + + PARTITION p_hash_6_4_col_3_col_1_idx, + + PARTITION p_hash_6_3_col_3_col_1_idx, + + PARTITION p_hash_6_2_col_3_col_1_idx, + + PARTITION p_hash_6_1_col_3_col_1_idx, + + PARTITION p_list_5_subpartdefault1_col_3_col_1_idx, + + PARTITION p_hash_4_5_col_3_col_1_idx, + + PARTITION p_hash_4_4_col_3_col_1_idx, + + PARTITION p_hash_4_3_col_3_col_1_idx, + + PARTITION p_hash_4_2_col_3_col_1_idx, + + PARTITION p_hash_4_1_col_3_col_1_idx, + + PARTITION p_hash_3_2_col_3_col_1_idx, + + PARTITION p_hash_3_1_col_3_col_1_idx, + + PARTITION p_list_2_subpartdefault1_col_3_col_1_idx, + + PARTITION p_hash_1_3_col_3_col_1_idx, + + PARTITION p_hash_1_2_col_3_col_1_idx, + + PARTITION p_hash_1_1_col_3_col_1_idx + + ) TABLESPACE pg_default; + + CREATE UNIQUE INDEX list_hash_2_idx1 ON list_hash_2 USING btree (col_2, col_3, col_4) LOCAL+ + ( + + PARTITION p_list_7_subpartdefault1_col_2_col_3_col_4_idx, + + PARTITION p_hash_6_5_col_2_col_3_col_4_idx, + + PARTITION p_hash_6_4_col_2_col_3_col_4_idx, + + PARTITION p_hash_6_3_col_2_col_3_col_4_idx, + + PARTITION p_hash_6_2_col_2_col_3_col_4_idx, + + PARTITION p_hash_6_1_col_2_col_3_col_4_idx, + + PARTITION p_list_5_subpartdefault1_col_2_col_3_col_4_idx, + + PARTITION p_hash_4_5_col_2_col_3_col_4_idx, + + PARTITION p_hash_4_4_col_2_col_3_col_4_idx, + + PARTITION p_hash_4_3_col_2_col_3_col_4_idx, + + PARTITION p_hash_4_2_col_2_col_3_col_4_idx, + + PARTITION p_hash_4_1_col_2_col_3_col_4_idx, + + PARTITION p_hash_3_2_col_2_col_3_col_4_idx, + + PARTITION p_hash_3_1_col_2_col_3_col_4_idx, + + PARTITION p_list_2_subpartdefault1_col_2_col_3_col_4_idx, + + PARTITION p_hash_1_3_col_2_col_3_col_4_idx, + + PARTITION p_hash_1_2_col_2_col_3_col_4_idx, + + PARTITION p_hash_1_1_col_2_col_3_col_4_idx + + ) TABLESPACE pg_default; + + ALTER TABLE list_hash_2 ADD CONSTRAINT list_hash_2_col_3_tableoid_key UNIQUE (col_3); + + ALTER TABLE list_hash_2 ADD CONSTRAINT list_hash_2_pkey PRIMARY KEY (col_1); +(1 row) + +drop table list_hash_2; reset current_schema; drop schema test_get_table_def cascade; diff --git a/src/test/regress/expected/gs_db_privilege.out b/src/test/regress/expected/gs_db_privilege.out new file mode 100644 index 000000000..bcf46bbec --- /dev/null +++ b/src/test/regress/expected/gs_db_privilege.out @@ -0,0 +1,409 @@ +-- prepare +CREATE ROLE db_priv_user PASSWORD '1234567i*'; +CREATE ROLE db_priv_user1 PASSWORD '1234567i*'; +CREATE ROLE db_priv_user2 PASSWORD '1234567i*'; +CREATE ROLE db_priv_user3 PASSWORD '1234567i*'; +CREATE ROLE db_priv_user4 PASSWORD '1234567i*'; +CREATE ROLE db_priv_user5 PASSWORD '1234567i*'; +-- system relation privilege check +SET ROLE db_priv_user PASSWORD '1234567i*'; +SELECT * FROM gs_db_privilege ORDER BY oid; + roleid | privilege_type | admin_option +--------+----------------+-------------- +(0 rows) + +SELECT * FROM gs_db_privileges ORDER BY rolename; + rolename | privilege_type | admin_option +----------+----------------+-------------- +(0 rows) + +SELECT has_any_privilege('db_priv_user','UPDATE ANY TABLE'); + has_any_privilege +------------------- + f +(1 row) + +-- pg_shdepend +RESET ROLE; +CREATE DATABASE db_priv_base; +\c db_priv_base +CREATE ROLE db_priv_user0 PASSWORD '1234567i*'; +CREATE ROLE db_priv_user00 PASSWORD '1234567i*'; +select d.datname,a.rolname,p.privilege_type from pg_shdepend s join pg_authid a on s.refobjid=a.oid + join pg_database d on s.dbid=d.oid join gs_db_privilege p on s.objid=p.oid; --noting + datname | rolname | privilege_type +---------+---------+---------------- +(0 rows) + +GRANT SELECT ANY TABLE,DROP ANY TABLE TO db_priv_user, db_priv_user0, db_priv_user00; +select d.datname,a.rolname,p.privilege_type from pg_shdepend s join pg_authid a on s.refobjid=a.oid + join pg_database d on s.dbid=d.oid join gs_db_privilege p on s.objid=p.oid order by a.rolname,p.privilege_type; --6 lines + datname | rolname | privilege_type +--------------+----------------+------------------ + db_priv_base | db_priv_user | drop any table + db_priv_base | db_priv_user | select any table + db_priv_base | db_priv_user0 | drop any table + db_priv_base | db_priv_user0 | select any table + db_priv_base | db_priv_user00 | drop any table + db_priv_base | db_priv_user00 | select any table +(6 rows) + +DROP USER db_priv_user00; +ERROR: role "db_priv_user00" cannot be dropped because some objects depend on it +DETAIL: privileges for "drop any table" +privileges for "select any table" +DROP USER db_priv_user00 CASCADE; +DROP USER db_priv_user0; +ERROR: role "db_priv_user0" cannot be dropped because some objects depend on it +DETAIL: privileges for "drop any table" +privileges for "select any table" +REVOKE SELECT ANY TABLE FROM db_priv_user0; +DROP USER db_priv_user0; +ERROR: role "db_priv_user0" cannot be dropped because some objects depend on it +DETAIL: privileges for "drop any table" +REVOKE DROP ANY TABLE FROM db_priv_user0; +DROP USER db_priv_user0; +\c postgres +GRANT SELECT ANY TABLE TO db_priv_user; +select d.datname,a.rolname,p.privilege_type from pg_shdepend s join pg_authid a on s.refobjid=a.oid + join pg_database d on s.dbid=d.oid join gs_db_privilege p on s.objid=p.oid; --1 line + datname | rolname | privilege_type +----------+--------------+------------------ + postgres | db_priv_user | select any table +(1 row) + +DROP USER db_priv_user CASCADE; +ERROR: role "db_priv_user" cannot be dropped because some objects depend on it +DETAIL: 2 objects in database db_priv_base +\c db_priv_base +DROP USER db_priv_user CASCADE; +ERROR: role "db_priv_user" cannot be dropped because some objects depend on it +DETAIL: 1 object in database postgres +REVOKE SELECT ANY TABLE,DROP ANY TABLE FROM db_priv_user; +DROP USER db_priv_user CASCADE; +ERROR: role "db_priv_user" cannot be dropped because some objects depend on it +DETAIL: 1 object in database postgres +\c postgres +DROP USER db_priv_user; +ERROR: role "db_priv_user" cannot be dropped because some objects depend on it +DETAIL: privileges for "select any table" +DROP USER db_priv_user CASCADE; +\c regression +DROP DATABASE db_priv_base; +--syntax and gs_db_privilege +RESET ROLE; +GRANT SELECT ANY TABLES TO db_priv_user3; --failed +ERROR: syntax error at or near "TABLES" +LINE 1: GRANT SELECT ANY TABLES TO db_priv_user3; + ^ +REVOKE SELECT ANY TABLES FROM db_priv_user3; --failed +ERROR: syntax error at or near "TABLES" +LINE 1: REVOKE SELECT ANY TABLES FROM db_priv_user3; + ^ +GRANT DELETE ANY TABLE TO PUBLIC; --failed +ERROR: Invalid grant or revoke operation. +DETAIL: Forbid to grant ANY privileges to PUBLIC or revoke ANY privileges from PUBLIC. +REVOKE DELETE ANY TABLE FROM PUBLIC; --failed +ERROR: Invalid grant or revoke operation. +DETAIL: Forbid to grant ANY privileges to PUBLIC or revoke ANY privileges from PUBLIC. +GRANT SELECT ANY TABLE TO db_priv_user; --failed +ERROR: role "db_priv_user" does not exist +REVOKE SELECT ANY TABLE FROM db_priv_user; --failed +ERROR: role "db_priv_user" does not exist +GRANT SELECT ANY TABLE,DROP ANY TABLE TO db_priv_user1,db_priv_user2; +GRANT update any table TO db_priv_user3, db_priv_user4 WITH ADMIN OPTION; +SELECT * FROM gs_db_privileges ORDER BY rolename; + rolename | privilege_type | admin_option +---------------+------------------+-------------- + db_priv_user1 | select any table | no + db_priv_user1 | drop any table | no + db_priv_user2 | select any table | no + db_priv_user2 | drop any table | no + db_priv_user3 | update any table | yes + db_priv_user4 | update any table | yes +(6 rows) + +GRANT SELECT ANY TABLE TO db_priv_user1; --no change +GRANT SELECT ANY TABLE TO db_priv_user2 WITH ADMIN OPTION; --change to yes +REVOKE ADMIN OPTION FOR DROP ANY TABLE FROM db_priv_user1,db_priv_user2; --no change +REVOKE ADMIN OPTION FOR update ANY TABLE FROM db_priv_user3; --change to no +REVOKE update ANY TABLE FROM db_priv_user4; --delete +SELECT * FROM gs_db_privileges ORDER BY rolename; + rolename | privilege_type | admin_option +---------------+------------------+-------------- + db_priv_user1 | select any table | no + db_priv_user1 | drop any table | no + db_priv_user2 | select any table | yes + db_priv_user2 | drop any table | no + db_priv_user3 | update any table | no +(5 rows) + +REVOKE SELECT ANY TABLE,DROP ANY TABLE,update any table FROM db_priv_user1,db_priv_user2,db_priv_user3,db_priv_user4; +SELECT * FROM gs_db_privileges ORDER BY rolename; + rolename | privilege_type | admin_option +----------+----------------+-------------- +(0 rows) + +--privileges for grant +RESET ROLE; +GRANT SELECT ANY TABLE TO db_priv_user1 WITH ADMIN OPTION; +GRANT INSERT ANY TABLE TO db_priv_user1 WITH ADMIN OPTION; +GRANT UPDATE ANY TABLE TO db_priv_user1; +GRANT DELETE ANY TABLE TO db_priv_user1; +SET ROLE db_priv_user1 PASSWORD '1234567i*'; +GRANT SELECT ANY TABLE,UPDATE ANY TABLE,INSERT ANY TABLE TO db_priv_user2; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant update any table. +GRANT INSERT ANY TABLE,DELETE ANY TABLE TO db_priv_user2; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant delete any table. +GRANT SELECT ANY TABLE TO db_priv_user2 WITH ADMIN OPTION; +GRANT INSERT ANY TABLE TO db_priv_user2; +GRANT UPDATE ANY TABLE TO db_priv_user2; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant update any table. +GRANT DELETE ANY TABLE TO db_priv_user2; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant delete any table. +SET ROLE db_priv_user2 PASSWORD '1234567i*'; +GRANT SELECT ANY TABLE TO db_priv_user3; +GRANT INSERT ANY TABLE TO db_priv_user3; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant insert any table. +GRANT UPDATE ANY TABLE TO db_priv_user3; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant update any table. +GRANT DELETE ANY TABLE TO db_priv_user3; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant delete any table. +SET ROLE db_priv_user3 PASSWORD '1234567i*'; +GRANT SELECT ANY TABLE TO db_priv_user4; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant select any table. +GRANT INSERT ANY TABLE TO db_priv_user4; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant insert any table. +GRANT UPDATE ANY TABLE TO db_priv_user4; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant update any table. +GRANT DELETE ANY TABLE TO db_priv_user4; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant delete any table. +RESET ROLE; +SELECT * FROM gs_db_privileges ORDER BY rolename; + rolename | privilege_type | admin_option +---------------+------------------+-------------- + db_priv_user1 | select any table | yes + db_priv_user1 | insert any table | yes + db_priv_user1 | update any table | no + db_priv_user1 | delete any table | no + db_priv_user2 | select any table | yes + db_priv_user2 | insert any table | no + db_priv_user3 | select any table | no +(7 rows) + +GRANT db_priv_user2 TO db_priv_user3; +SET ROLE db_priv_user3 PASSWORD '1234567i*'; +GRANT SELECT ANY TABLE TO db_priv_user4; +GRANT INSERT ANY TABLE TO db_priv_user4; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant insert any table. +GRANT UPDATE ANY TABLE TO db_priv_user4; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant update any table. +GRANT DELETE ANY TABLE TO db_priv_user4; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant delete any table. +RESET ROLE; +GRANT db_priv_user3 TO db_priv_user4; +SET ROLE db_priv_user4 PASSWORD '1234567i*'; +GRANT SELECT ANY TABLE TO db_priv_user5; +GRANT INSERT ANY TABLE TO db_priv_user5; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant insert any table. +GRANT UPDATE ANY TABLE TO db_priv_user5; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant update any table. +GRANT DELETE ANY TABLE TO db_priv_user5; --failed +ERROR: Permission denied. +DETAIL: Permission denied to grant delete any table. +REVOKE ADMIN OPTION FOR SELECT ANY TABLE FROM db_priv_user2; +GRANT SELECT ANY TABLE TO db_priv_user5;--failed +ERROR: Permission denied. +DETAIL: Permission denied to grant select any table. +RESET ROLE; +GRANT db_priv_user1 TO db_priv_user5; +SET ROLE db_priv_user5 PASSWORD '1234567i*'; +REVOKE SELECT ANY TABLE FROM db_priv_user1,db_priv_user2,db_priv_user3,db_priv_user4,db_priv_user5; +REVOKE INSERT ANY TABLE FROM db_priv_user1,db_priv_user2,db_priv_user3,db_priv_user4,db_priv_user5; +REVOKE UPDATE ANY TABLE FROM db_priv_user1,db_priv_user2,db_priv_user3,db_priv_user4,db_priv_user5; +ERROR: Permission denied. +DETAIL: Permission denied to revoke update any table. +--function has_any_privilege +RESET ROLE; +GRANT UPDATE ANY TABLE TO db_priv_user1 WITH ADMIN OPTION; +SELECT * FROM gs_db_privileges ORDER BY rolename; + rolename | privilege_type | admin_option +---------------+------------------+-------------- + db_priv_user1 | delete any table | no + db_priv_user1 | update any table | yes +(2 rows) + +SELECT has_any_privilege('db_priv_user','SELECT ANY TABLE'); --error +ERROR: role "db_priv_user" does not exist +CONTEXT: referenced column: has_any_privilege +SELECT has_any_privilege('db_priv_user1','SELECT ANY TABLE'); --error +ERROR: Unrecognized privilege type. +DETAIL: Unrecognized privilege type: "SELECT ANY TABLE". +CONTEXT: referenced column: has_any_privilege +SELECT has_any_privilege('db_priv_user1','SELECT ANY TABLES'); --error +ERROR: Unrecognized privilege type. +DETAIL: Unrecognized privilege type: "SELECT ANY TABLES". +CONTEXT: referenced column: has_any_privilege +SELECT has_any_privilege('db_priv_user1','UPDATE ANY TABLE WITH ADMIN OPtION'); --t + has_any_privilege +------------------- + t +(1 row) + +SELECT has_any_privilege('db_priv_user1','update ANY TABLE WITH ADMIN OPtION'); --t + has_any_privilege +------------------- + t +(1 row) + +SELECT has_any_privilege('db_priv_user1','UPDATE ANY TABLE WITH admin OPtION'); --t + has_any_privilege +------------------- + t +(1 row) + +SELECT has_any_privilege('db_priv_user1','update ANY TABLE'); --t + has_any_privilege +------------------- + t +(1 row) + +SELECT has_any_privilege('db_priv_user1','UPDATE ANY TABLE WITH ADMIN OPTION'); --t + has_any_privilege +------------------- + t +(1 row) + +SELECT has_any_privilege('db_priv_user1','DELETE ANY TABLE'); --t + has_any_privilege +------------------- + t +(1 row) + +SELECT has_any_privilege('db_priv_user1','DELETE ANY TABLE WITH ADMIN OPTION'); --f + has_any_privilege +------------------- + f +(1 row) + +SELECT has_any_privilege('db_priv_user1','CREATE ANY TABLE'); --f + has_any_privilege +------------------- + f +(1 row) + +SELECT has_any_privilege('db_priv_user1','CREATE ANY TABLE WITH ADMIN OPTION'); --f + has_any_privilege +------------------- + f +(1 row) + +SELECT has_any_privilege('db_priv_user1','SELECT ANY TABLE, DELETE ANY TABLE WITH ADMIN OPTION'); --f + has_any_privilege +------------------- + f +(1 row) + +SELECT has_any_privilege('db_priv_user1','SELECT ANY TABLE, UPDATE ANY TABLE'); --t + has_any_privilege +------------------- + t +(1 row) + +SELECT has_any_privilege('db_priv_user1','CREATE ANY TABLE WITH ADMIN OPTION, DELETE ANY TABLE'); --t + has_any_privilege +------------------- + t +(1 row) + +SELECT has_any_privilege('db_priv_user5','update ANY TABLE'); --t + has_any_privilege +------------------- + t +(1 row) + +SELECT has_any_privilege('db_priv_user5','UPDATE ANY TABLE WITH ADMIN OPTION'); --t + has_any_privilege +------------------- + t +(1 row) + +SELECT has_any_privilege('db_priv_user5','DELETE ANY TABLE'); --t + has_any_privilege +------------------- + t +(1 row) + +SELECT has_any_privilege('db_priv_user5','DELETE ANY TABLE WITH ADMIN OPTION'); --f + has_any_privilege +------------------- + f +(1 row) + +SELECT has_any_privilege('db_priv_user5','CREATE ANY TABLE'); --f + has_any_privilege +------------------- + f +(1 row) + +SELECT has_any_privilege('db_priv_user5','CREATE ANY TABLE WITH ADMIN OPTION'); --f + has_any_privilege +------------------- + f +(1 row) + +SELECT has_any_privilege('db_priv_user5','SELECT ANY TABLE, DELETE ANY TABLE WITH ADMIN OPTION'); --f + has_any_privilege +------------------- + f +(1 row) + +SELECT has_any_privilege('db_priv_user5','SELECT ANY TABLE, UPDATE ANY TABLE'); --t + has_any_privilege +------------------- + t +(1 row) + +SELECT has_any_privilege('db_priv_user5','CREATE ANY TABLE WITH ADMIN OPTION, DELETE ANY TABLE'); --t + has_any_privilege +------------------- + t +(1 row) + +--audit +RESET ROLE; +SELECT type,result,object_name,detail_info from pg_query_audit('2021-11-30','2099-12-28') + WHERE type='grant_role' AND object_name='db_priv_user0'; + type | result | object_name | detail_info +------------+--------+---------------+--------------------------------------------------------------------------------------- + grant_role | ok | db_priv_user0 | GRANT SELECT ANY TABLE,DROP ANY TABLE TO db_priv_user, db_priv_user0, db_priv_user00; +(1 row) + +SELECT type,result,object_name,detail_info from pg_query_audit('2021-11-30','2099-12-28') + WHERE type='revoke_role' AND object_name='db_priv_user0'; + type | result | object_name | detail_info +-------------+--------+---------------+--------------------------------------------- + revoke_role | ok | db_priv_user0 | REVOKE SELECT ANY TABLE FROM db_priv_user0; + revoke_role | ok | db_priv_user0 | REVOKE DROP ANY TABLE FROM db_priv_user0; +(2 rows) + +--clean +RESET ROLE; +DROP USER db_priv_user1 CASCADE; +DROP USER db_priv_user2,db_priv_user3,db_priv_user4,db_priv_user5; diff --git a/src/test/regress/expected/gsc_func.out b/src/test/regress/expected/gsc_func.out new file mode 100644 index 000000000..5ca56eb22 --- /dev/null +++ b/src/test/regress/expected/gsc_func.out @@ -0,0 +1,176 @@ +select * from gs_gsc_dbstat_info() limit 1; + database_id | database_name | tup_searches | tup_hits | tup_miss | tup_count | tup_dead | tup_memory | rel_searches | rel_hits | rel_miss | rel_count | rel_dead | rel_memory | part_searches | part_hits | part_miss | part_count | part_dead | part_memory | total_memory | swapout_count | refcount +-------------+---------------+--------------+----------+----------+-----------+----------+------------+--------------+----------+----------+-----------+----------+------------+---------------+-----------+-----------+------------+-----------+-------------+--------------+---------------+---------- +--?.* 0 | | 40 | 23 | 9 | 7 | 0 | 3760 | 143 | 2 | 25 | 25 | 0 | 106656 | 0 | 0 | 0 | 0 | 0 | 0 | 405848 | 0 | 0 +(1 row) + +select * from gs_gsc_dbstat_info(-1) limit 1; + database_id | database_name | tup_searches | tup_hits | tup_miss | tup_count | tup_dead | tup_memory | rel_searches | rel_hits | rel_miss | rel_count | rel_dead | rel_memory | part_searches | part_hits | part_miss | part_count | part_dead | part_memory | total_memory | swapout_count | refcount +-------------+---------------+--------------+----------+----------+-----------+----------+------------+--------------+----------+----------+-----------+----------+------------+---------------+-----------+-----------+------------+-----------+-------------+--------------+---------------+---------- +--?.* 0 | | 40 | 23 | 9 | 7 | 0 | 3760 | 143 | 2 | 25 | 25 | 0 | 106656 | 0 | 0 | 0 | 0 | 0 | 0 | 405848 | 0 | 0 +(1 row) + +select * from gs_gsc_dbstat_info(0) limit 1; + database_id | database_name | tup_searches | tup_hits | tup_miss | tup_count | tup_dead | tup_memory | rel_searches | rel_hits | rel_miss | rel_count | rel_dead | rel_memory | part_searches | part_hits | part_miss | part_count | part_dead | part_memory | total_memory | swapout_count | refcount +-------------+---------------+--------------+----------+----------+-----------+----------+------------+--------------+----------+----------+-----------+----------+------------+---------------+-----------+-----------+------------+-----------+-------------+--------------+---------------+---------- +--?.* 0 | | 40 | 23 | 9 | 7 | 0 | 3760 | 143 | 2 | 25 | 25 | 0 | 106656 | 0 | 0 | 0 | 0 | 0 | 0 | 405848 | 0 | 0 +(1 row) + +select * from gs_gsc_dbstat_info(1) limit 1; + database_id | database_name | tup_searches | tup_hits | tup_miss | tup_count | tup_dead | tup_memory | rel_searches | rel_hits | rel_miss | rel_count | rel_dead | rel_memory | part_searches | part_hits | part_miss | part_count | part_dead | part_memory | total_memory | swapout_count | refcount +-------------+---------------+--------------+----------+----------+-----------+----------+------------+--------------+----------+----------+-----------+----------+------------+---------------+-----------+-----------+------------+-----------+-------------+--------------+---------------+---------- +--?.* 0 | | 40 | 23 | 9 | 7 | 0 | 3760 | 143 | 2 | 25 | 25 | 0 | 106656 | 0 | 0 | 0 | 0 | 0 | 0 | 405848 | 0 | 0 +(1 row) + +select * from gs_gsc_dbstat_info(2) limit 1; +ERROR: dbOid doesn't exist. +DETAIL: dbOid is invalid, please pass valid dbOid. +select * from gs_gsc_catalog_detail() limit 1; + database_id | database_name | rel_id | rel_name | cache_id | self | ctid | infomask | infomask2 | hash_value | refcount +-------------+---------------+--------+-----------+----------+--------+--------+----------+-----------+------------+---------- +--?.* 0 | | 1260 | pg_authid | 10 | (0, 9) | (0, 9) | 10507 | 26 | 531311568 | 9 +(1 row) + +select * from gs_gsc_catalog_detail(-1) limit 1; + database_id | database_name | rel_id | rel_name | cache_id | self | ctid | infomask | infomask2 | hash_value | refcount +-------------+---------------+--------+-----------+----------+--------+--------+----------+-----------+------------+---------- +--?.* 0 | | 1260 | pg_authid | 10 | (0, 9) | (0, 9) | 10507 | 26 | 531311568 | 9 +(1 row) + +select * from gs_gsc_catalog_detail(0) limit 1; + database_id | database_name | rel_id | rel_name | cache_id | self | ctid | infomask | infomask2 | hash_value | refcount +-------------+---------------+--------+-----------+----------+--------+--------+----------+-----------+------------+---------- +--?.* 0 | | 1260 | pg_authid | 10 | (0, 9) | (0, 9) | 10507 | 26 | 531311568 | 9 +(1 row) + +select * from gs_gsc_catalog_detail(1) limit 1; + database_id | database_name | rel_id | rel_name | cache_id | self | ctid | infomask | infomask2 | hash_value | refcount +-------------+---------------+--------+-----------+----------+--------+--------+----------+-----------+------------+---------- +--?.* 0 | | 1260 | pg_authid | 10 | (0, 9) | (0, 9) | 10507 | 26 | 531311568 | 9 +(1 row) + +select * from gs_gsc_catalog_detail(-1, 1262) limit 1; + database_id | database_name | rel_id | rel_name | cache_id | self | ctid | infomask | infomask2 | hash_value | refcount +-------------+---------------+--------+-------------+----------+--------+--------+----------+-----------+------------+---------- +--?.* 0 | | 1262 | pg_database | 27 | (0, 4) | (0, 4) | 10506 | 15 | 361410604 | 1 +(1 row) + +select * from gs_gsc_catalog_detail(0, 1262) limit 1; + database_id | database_name | rel_id | rel_name | cache_id | self | ctid | infomask | infomask2 | hash_value | refcount +-------------+---------------+--------+-------------+----------+--------+--------+----------+-----------+------------+---------- +--?.* 0 | | 1262 | pg_database | 27 | (0, 4) | (0, 4) | 10506 | 15 | 361410604 | 1 +(1 row) + +select * from gs_gsc_catalog_detail(1, 1262) limit 1; + database_id | database_name | rel_id | rel_name | cache_id | self | ctid | infomask | infomask2 | hash_value | refcount +-------------+---------------+--------+-------------+----------+--------+--------+----------+-----------+------------+---------- +--?.* 0 | | 1262 | pg_database | 27 | (0, 4) | (0, 4) | 10506 | 15 | 361410604 | 1 +(1 row) + +select * from gs_gsc_catalog_detail(-1, 1259) limit 1; + database_id | database_name | rel_id | rel_name | cache_id | self | ctid | infomask | infomask2 | hash_value | refcount +-------------+---------------+--------+----------+----------+--------+--------+----------+-----------+------------+---------- +--?.* 16299 | postgres | 1259 | pg_class | 84 | (7, 3) | (7, 3) | 10507 | 40 | 1520565747 | 0 +(1 row) + +select * from gs_gsc_catalog_detail(0, 1259) limit 1; + database_id | database_name | rel_id | rel_name | cache_id | self | ctid | infomask | infomask2 | hash_value | refcount +-------------+---------------+--------+----------+----------+------+------+----------+-----------+------------+---------- +(0 rows) + +select * from gs_gsc_catalog_detail(1, 1259) limit 1; + database_id | database_name | rel_id | rel_name | cache_id | self | ctid | infomask | infomask2 | hash_value | refcount +-------------+---------------+--------+----------+----------+------+------+----------+-----------+------------+---------- +(0 rows) + +select * from gs_gsc_catalog_detail(2, 1259) limit 1; +ERROR: dbOid doesn't exist. +DETAIL: dbOid is invalid, please pass valid dbOid. +select * from gs_gsc_table_detail() limit 1; + database_oid | database_name | reloid | relname | relnamespace | reltype | reloftype | relowner | relam | relfilenode | reltablespace | relhasindex | relisshared | relkind | relnatts | relhasoids | relhaspkey | parttype | tdhasuids | attnames | extinfo +--------------+---------------+--------+-------------------------+--------------+---------+-----------+----------+-------+-------------+---------------+-------------+-------------+---------+----------+------------+------------+----------+-----------+-----------+--------- +--?.* 0 | | 2676 | pg_authid_rolname_index | 11 | 0 | 0 | 10 | 403 | 0 | 1664 | f | t | i | 1 | f | f | n | f | 'rolname' | +(1 row) + +select * from gs_gsc_table_detail(-1) limit 1; + database_oid | database_name | reloid | relname | relnamespace | reltype | reloftype | relowner | relam | relfilenode | reltablespace | relhasindex | relisshared | relkind | relnatts | relhasoids | relhaspkey | parttype | tdhasuids | attnames | extinfo +--------------+---------------+--------+-------------------------+--------------+---------+-----------+----------+-------+-------------+---------------+-------------+-------------+---------+----------+------------+------------+----------+-----------+-----------+--------- +--?.* 0 | | 2676 | pg_authid_rolname_index | 11 | 0 | 0 | 10 | 403 | 0 | 1664 | f | t | i | 1 | f | f | n | f | 'rolname' | +(1 row) + +select * from gs_gsc_table_detail(0) limit 1; + database_oid | database_name | reloid | relname | relnamespace | reltype | reloftype | relowner | relam | relfilenode | reltablespace | relhasindex | relisshared | relkind | relnatts | relhasoids | relhaspkey | parttype | tdhasuids | attnames | extinfo +--------------+---------------+--------+-------------------------+--------------+---------+-----------+----------+-------+-------------+---------------+-------------+-------------+---------+----------+------------+------------+----------+-----------+-----------+--------- +--?.* 0 | | 2676 | pg_authid_rolname_index | 11 | 0 | 0 | 10 | 403 | 0 | 1664 | f | t | i | 1 | f | f | n | f | 'rolname' | +(1 row) + +select * from gs_gsc_table_detail(1) limit 1; + database_oid | database_name | reloid | relname | relnamespace | reltype | reloftype | relowner | relam | relfilenode | reltablespace | relhasindex | relisshared | relkind | relnatts | relhasoids | relhaspkey | parttype | tdhasuids | attnames | extinfo +--------------+---------------+--------+-------------------------+--------------+---------+-----------+----------+-------+-------------+---------------+-------------+-------------+---------+----------+------------+------------+----------+-----------+-----------+--------- +--?.* 0 | | 2676 | pg_authid_rolname_index | 11 | 0 | 0 | 10 | 403 | 0 | 1664 | f | t | i | 1 | f | f | n | f | 'rolname' | +(1 row) + +select * from gs_gsc_table_detail(-1, 1262) limit 1; + database_oid | database_name | reloid | relname | relnamespace | reltype | reloftype | relowner | relam | relfilenode | reltablespace | relhasindex | relisshared | relkind | relnatts | relhasoids | relhaspkey | parttype | tdhasuids | attnames | extinfo +--------------+---------------+--------+-------------+--------------+---------+-----------+----------+-------+-------------+---------------+-------------+-------------+---------+----------+------------+------------+----------+-----------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------- +--?.* 0 | | 1262 | pg_database | 11 | 1248 | 0 | 10 | 0 | 0 | 1664 | t | t | r | 15 | t | f | n | f | 'datname','datdba','encoding','datcollate','datctype','datistemplate','datallowconn','datconnlimit','datlastsysoid','datfrozenxid','dattablespace','datcompatibility','datacl','datfrozenxid64','datminmxid' | +(1 row) + +select * from gs_gsc_table_detail(0, 1262) limit 1; + database_oid | database_name | reloid | relname | relnamespace | reltype | reloftype | relowner | relam | relfilenode | reltablespace | relhasindex | relisshared | relkind | relnatts | relhasoids | relhaspkey | parttype | tdhasuids | attnames | extinfo +--------------+---------------+--------+-------------+--------------+---------+-----------+----------+-------+-------------+---------------+-------------+-------------+---------+----------+------------+------------+----------+-----------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------- +--?.* 0 | | 1262 | pg_database | 11 | 1248 | 0 | 10 | 0 | 0 | 1664 | t | t | r | 15 | t | f | n | f | 'datname','datdba','encoding','datcollate','datctype','datistemplate','datallowconn','datconnlimit','datlastsysoid','datfrozenxid','dattablespace','datcompatibility','datacl','datfrozenxid64','datminmxid' | +(1 row) + +select * from gs_gsc_table_detail(1, 1262) limit 1; + database_oid | database_name | reloid | relname | relnamespace | reltype | reloftype | relowner | relam | relfilenode | reltablespace | relhasindex | relisshared | relkind | relnatts | relhasoids | relhaspkey | parttype | tdhasuids | attnames | extinfo +--------------+---------------+--------+-------------+--------------+---------+-----------+----------+-------+-------------+---------------+-------------+-------------+---------+----------+------------+------------+----------+-----------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------- +--?.* 0 | | 1262 | pg_database | 11 | 1248 | 0 | 10 | 0 | 0 | 1664 | t | t | r | 15 | t | f | n | f | 'datname','datdba','encoding','datcollate','datctype','datistemplate','datallowconn','datconnlimit','datlastsysoid','datfrozenxid','dattablespace','datcompatibility','datacl','datfrozenxid64','datminmxid' | +(1 row) + +select * from gs_gsc_table_detail(-1, 1259) limit 1; + database_oid | database_name | reloid | relname | relnamespace | reltype | reloftype | relowner | relam | relfilenode | reltablespace | relhasindex | relisshared | relkind | relnatts | relhasoids | relhaspkey | parttype | tdhasuids | attnames | extinfo +--------------+---------------+--------+----------+--------------+---------+-----------+----------+-------+-------------+---------------+-------------+-------------+---------+----------+------------+------------+----------+-----------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--------- +--?.* 16384 | regression | 1259 | pg_class | 11 | 83 | 0 | 10 | 0 | 0 | 0 | t | f | r | 40 | t | f | n | f | 'relname','relnamespace','reltype','reloftype','relowner','relam','relfilenode','reltablespace','relpages','reltuples','relallvisible','reltoastrelid','reltoastidxid','reldeltarelid','reldeltaidx','relcudescrelid','relcudescidx','relhasindex','relisshared','relpersistence','relkind','relnatts','relchecks','relhasoids','relhaspkey','relhasrules','relhastriggers','relhassubclass','relcmprs','relhasclusterkey','relrowmovement','parttype','relfrozenxid','relacl','reloptions','relreplident','relfrozenxid64','relbucket','relbucketkey','relminmxid' | +(1 row) + +select * from gs_gsc_table_detail(0, 1259) limit 1; + database_oid | database_name | reloid | relname | relnamespace | reltype | reloftype | relowner | relam | relfilenode | reltablespace | relhasindex | relisshared | relkind | relnatts | relhasoids | relhaspkey | parttype | tdhasuids | attnames | extinfo +--------------+---------------+--------+---------+--------------+---------+-----------+----------+-------+-------------+---------------+-------------+-------------+---------+----------+------------+------------+----------+-----------+----------+--------- +(0 rows) + +select * from gs_gsc_table_detail(1, 1259) limit 1; + database_oid | database_name | reloid | relname | relnamespace | reltype | reloftype | relowner | relam | relfilenode | reltablespace | relhasindex | relisshared | relkind | relnatts | relhasoids | relhaspkey | parttype | tdhasuids | attnames | extinfo +--------------+---------------+--------+---------+--------------+---------+-----------+----------+-------+-------------+---------------+-------------+-------------+---------+----------+------------+------------+----------+-----------+----------+--------- +(0 rows) + +select * from gs_gsc_table_detail(2, 1259) limit 1; +ERROR: dbOid doesn't exist. +DETAIL: dbOid is invalid, please pass valid dbOid. +select * from gs_gsc_clean() limit 1; + gs_gsc_clean +-------------- + t +(1 row) + +select * from gs_gsc_clean(-1) limit 1; + gs_gsc_clean +-------------- + t +(1 row) + +select * from gs_gsc_clean(0) limit 1; + gs_gsc_clean +-------------- + t +(1 row) + +select * from gs_gsc_clean(1) limit 1; + gs_gsc_clean +-------------- + t +(1 row) + +select * from gs_gsc_clean(2) limit 1; +ERROR: dbOid doesn't exist. +DETAIL: dbOid is invalid, please pass valid dbOid. \ No newline at end of file diff --git a/src/test/regress/expected/gtt_function.out b/src/test/regress/expected/gtt_function.out index f8ce88448..c43a2c4ac 100644 --- a/src/test/regress/expected/gtt_function.out +++ b/src/test/regress/expected/gtt_function.out @@ -354,6 +354,7 @@ NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "mytable_pkey" fo -- ok create index idx_gtt1_1 on gtt1 using btree (a); create index idx_gtt1_2 on gtt1 using hash (a); +ERROR: access method "hash" does not support row store create global temp table tmp_t0(c0 tsvector,c1 varchar(100)); create index idx_tmp_t0_1 on tmp_t0 using gin (c0); ERROR: access method "gin" is not support for global temporary table index diff --git a/src/test/regress/expected/guc.out b/src/test/regress/expected/guc.out index 4b89a471d..2dc97dae2 100644 --- a/src/test/regress/expected/guc.out +++ b/src/test/regress/expected/guc.out @@ -748,7 +748,7 @@ select myfunc(1), current_setting('work_mem'); SHOW logging_module; logging_module ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - ALL,on(),off(DFS,GUC,HDFS,ORC,SLRU,MEM_CTL,AUTOVAC,CACHE,ADIO,SSL,GDS,TBLSPC,WLM,OBS,EXECUTOR,OPFUSION,GPC,VEC_EXECUTOR,STREAM,LLVM,OPT,OPT_REWRITE,OPT_JOIN,OPT_AGG,OPT_SUBPLAN,OPT_SETOP,OPT_SKEW,UDF,COOP_ANALYZE,WLMCP,ACCELERATE,MOT,PARQUET,PLANHINT,SNAPSHOT,XACT,HANDLE,CLOG,EC,REMOTE,CN_RETRY,PLSQL,TEXTSEARCH,SEQ,REDO,FUNCTION,PARSER,INSTR,INCRE_CKPT,DBL_WRT,RTO,HEARTBEAT,COMM_IPC,COMM_PARAM,OPT_AI,USTORE,UNDO) + ALL,on(),off(DFS,GUC,HDFS,ORC,SLRU,MEM_CTL,AUTOVAC,CACHE,ADIO,SSL,GDS,TBLSPC,WLM,OBS,EXECUTOR,OPFUSION,GPC,GSC,VEC_EXECUTOR,STREAM,LLVM,OPT,OPT_REWRITE,OPT_JOIN,OPT_AGG,OPT_SUBPLAN,OPT_SETOP,OPT_SKEW,UDF,COOP_ANALYZE,WLMCP,ACCELERATE,MOT,PARQUET,PLANHINT,SNAPSHOT,XACT,HANDLE,CLOG,EC,REMOTE,CN_RETRY,PLSQL,TEXTSEARCH,SEQ,REDO,FUNCTION,PARSER,INSTR,INCRE_CKPT,DBL_WRT,RTO,HEARTBEAT,COMM_IPC,COMM_PARAM,OPT_AI,USTORE,UNDO) (1 row) -- error input @@ -818,14 +818,14 @@ set logging_module = 'off(slru)'; SHOW logging_module; logging_module --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - ALL,on(DFS,GUC,HDFS,ORC,MEM_CTL,AUTOVAC,CACHE,ADIO,SSL,GDS,TBLSPC,WLM,OBS,EXECUTOR,OPFUSION,GPC,VEC_EXECUTOR,STREAM,LLVM,OPT,OPT_REWRITE,OPT_JOIN,OPT_AGG,OPT_SUBPLAN,OPT_SETOP,OPT_SKEW,UDF,COOP_ANALYZE,WLMCP,ACCELERATE,MOT,PARQUET,PLANHINT,SNAPSHOT,XACT,HANDLE,CLOG,EC,REMOTE,CN_RETRY,PLSQL,TEXTSEARCH,SEQ,REDO,FUNCTION,PARSER,INSTR,INCRE_CKPT,DBL_WRT,RTO,HEARTBEAT,COMM_IPC,COMM_PARAM,OPT_AI,USTORE,UNDO),off(SLRU) + ALL,on(DFS,GUC,HDFS,ORC,MEM_CTL,AUTOVAC,CACHE,ADIO,SSL,GDS,TBLSPC,WLM,OBS,EXECUTOR,OPFUSION,GPC,GSC,VEC_EXECUTOR,STREAM,LLVM,OPT,OPT_REWRITE,OPT_JOIN,OPT_AGG,OPT_SUBPLAN,OPT_SETOP,OPT_SKEW,UDF,COOP_ANALYZE,WLMCP,ACCELERATE,MOT,PARQUET,PLANHINT,SNAPSHOT,XACT,HANDLE,CLOG,EC,REMOTE,CN_RETRY,PLSQL,TEXTSEARCH,SEQ,REDO,FUNCTION,PARSER,INSTR,INCRE_CKPT,DBL_WRT,RTO,HEARTBEAT,COMM_IPC,COMM_PARAM,OPT_AI,USTORE,UNDO),off(SLRU) (1 row) RESET logging_module; SHOW logging_module; logging_module ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - ALL,on(),off(DFS,GUC,HDFS,ORC,SLRU,MEM_CTL,AUTOVAC,CACHE,ADIO,SSL,GDS,TBLSPC,WLM,OBS,EXECUTOR,OPFUSION,GPC,VEC_EXECUTOR,STREAM,LLVM,OPT,OPT_REWRITE,OPT_JOIN,OPT_AGG,OPT_SUBPLAN,OPT_SETOP,OPT_SKEW,UDF,COOP_ANALYZE,WLMCP,ACCELERATE,MOT,PARQUET,PLANHINT,SNAPSHOT,XACT,HANDLE,CLOG,EC,REMOTE,CN_RETRY,PLSQL,TEXTSEARCH,SEQ,REDO,FUNCTION,PARSER,INSTR,INCRE_CKPT,DBL_WRT,RTO,HEARTBEAT,COMM_IPC,COMM_PARAM,OPT_AI,USTORE,UNDO) + ALL,on(),off(DFS,GUC,HDFS,ORC,SLRU,MEM_CTL,AUTOVAC,CACHE,ADIO,SSL,GDS,TBLSPC,WLM,OBS,EXECUTOR,OPFUSION,GPC,GSC,VEC_EXECUTOR,STREAM,LLVM,OPT,OPT_REWRITE,OPT_JOIN,OPT_AGG,OPT_SUBPLAN,OPT_SETOP,OPT_SKEW,UDF,COOP_ANALYZE,WLMCP,ACCELERATE,MOT,PARQUET,PLANHINT,SNAPSHOT,XACT,HANDLE,CLOG,EC,REMOTE,CN_RETRY,PLSQL,TEXTSEARCH,SEQ,REDO,FUNCTION,PARSER,INSTR,INCRE_CKPT,DBL_WRT,RTO,HEARTBEAT,COMM_IPC,COMM_PARAM,OPT_AI,USTORE,UNDO) (1 row) --- diff --git a/src/test/regress/expected/hash_index_001.out b/src/test/regress/expected/hash_index_001.out deleted file mode 100644 index e44632ea3..000000000 --- a/src/test/regress/expected/hash_index_001.out +++ /dev/null @@ -1,234 +0,0 @@ -------------------------------------- ----------- hash index part1---------- -------------------------------------- -set enable_seqscan = off; -set enable_indexscan = off; ------------------- --- hash_table_1 -- ------------------- -drop table if exists hash_table_1 cascade; -NOTICE: table "hash_table_1" does not exist, skipping -create table hash_table_1 (id int, name varchar, sex varchar default 'male'); -insert into hash_table_1 values (1, 'Smith'); -insert into hash_table_1 values (2, 'Jones'); -insert into hash_table_1 values (3, 'Williams', 'female'); -insert into hash_table_1 values (4, 'Taylor'); -insert into hash_table_1 values (5, 'Brown'); -insert into hash_table_1 values (6, 'Davies'); -drop index if exists hash_t1_id1; -NOTICE: index "hash_t1_id1" does not exist, skipping -create index hash_t1_id1 on hash_table_1 using hash (id); --- error, does not support multicolumn indexes -drop index if exists hash_t1_id2; -NOTICE: index "hash_t1_id2" does not exist, skipping -create index hash_t1_id2 on hash_table_1 using hash (id, sex); -ERROR: access method "hash" does not support multicolumn indexes --- compare with hash_t1_id1 and hash_t1_id3, hash index can be create in same column -drop index if exists hash_t1_id3; -NOTICE: index "hash_t1_id3" does not exist, skipping -drop index if exists hash_t1_id4; -NOTICE: index "hash_t1_id4" does not exist, skipping -create index hash_t1_id3 on hash_table_1 using btree (id); -create index hash_t1_id4 on hash_table_1 using hash (id); --- drop superfluous index now -drop index hash_t1_id3, hash_t1_id4; --- insert into large volumns of data into hash_table_1 -insert into hash_table_1 select 4, 'XXX', 'XXX' from generate_series(1,50000); -insert into hash_table_1 select 6, 'XXX', 'XXX' from generate_series(1,50000); -analyse hash_table_1; --- after insert, hash_t1_id1 is still work -explain(costs off) select * from hash_table_1 where id = 4; - QUERY PLAN ----------------------------------------- - Bitmap Heap Scan on hash_table_1 - Recheck Cond: (id = 4) - -> Bitmap Index Scan on hash_t1_id1 - Index Cond: (id = 4) -(4 rows) - -select count(*) from hash_table_1 where id = 6; --50001 - count -------- - 50001 -(1 row) - --- do other dml action, then check hash_t1_id1 again -insert into hash_table_1 select random()*100, 'XXX', 'XXX' from generate_series(1,50000); -update hash_table_1 set id = 101, sex = 'male' where id = 60; -delete from hash_table_1 where id = 80; -explain(costs off) select * from hash_table_1 where id = 101; - QUERY PLAN ----------------------------------------- - Bitmap Heap Scan on hash_table_1 - Recheck Cond: (id = 101) - -> Bitmap Index Scan on hash_t1_id1 - Index Cond: (id = 101) -(4 rows) - --- cleanup env -drop table hash_table_1 cascade; ------------------- --- hash_table_2 -- ------------------- -drop table if exists hash_table_2 cascade; -NOTICE: table "hash_table_2" does not exist, skipping -create table hash_table_2 (id int, name varchar, sex varchar default 'male'); -insert into hash_table_2 select random()*100, 'XXX', 'XXX' from generate_series(1,100000); --- create index concurrently --- In this fastcheck, we only check it can run properly. However, in a real --- situation, you should run this sql in connection a first, then doing some DML( --- insert, delete, update) operation about this table in connection b as soon --- as possible. We expect the create index do not block DML operation. --- connection a -create index concurrently hash_t2_id1 on hash_table_2 using hash (id); --- connection b -insert into hash_table_2 select random()*100, 'XXX', 'XXX' from generate_series(1,100); -explain(costs off) select * from hash_table_2 where id = 40; - QUERY PLAN ----------------------------------------- - Bitmap Heap Scan on hash_table_2 - Recheck Cond: (id = 40) - -> Bitmap Index Scan on hash_t2_id1 - Index Cond: (id = 40) -(4 rows) - --- error, does not support unique indexes -create unique index hash_t2_id2 on hash_table_2 using hash (sex); -ERROR: access method "hash" does not support unique indexes --- hash_t2_id3 occupies more disk space than hash_t2_id2 -create index hash_t2_id2 on hash_table_2 using hash (id) with (fillfactor=25); -create index hash_t2_id3 on hash_table_2 using hash (id) with (fillfactor=75); -select count(*) from hash_table_2; --100100 - count --------- - 100100 -(1 row) - --- cleanup env -drop table hash_table_2 cascade; ------------------- --- hash_table_3 -- ------------------- -drop schema if exists hash_sc_3 cascade; -NOTICE: schema "hash_sc_3" does not exist, skipping -drop tablespace if exists hash_sp_3; -NOTICE: Tablespace "hash_sp_3" does not exist, skipping. -create schema hash_sc_3; -create tablespace hash_sp_3 relative location 'tablespace/tablespace_1'; -create table hash_sc_3.hash_table_3 -( - id int, name varchar, - sex varchar default 'male' -) -tablespace hash_sp_3; --- create index specify schema and tablespace -create index concurrently hash_sc_3.hash_t3_id1 on hash_sc_3.hash_table_3 using hash (id); -create index hash_sc_3.hash_t3_id2 on hash_sc_3.hash_table_3 using hash (id) tablespace hash_sp_3; -drop table hash_sc_3.hash_table_3 cascade; -drop schema hash_sc_3 cascade; -drop tablespace hash_sp_3; ------------------- --- hash_table_4 -- ------------------- -drop table if exists hash_table_4 cascade; -NOTICE: table "hash_table_4" does not exist, skipping -create table hash_table_4 -( - id int, - name varchar, - sex varchar default 'male' -) -partition by range(id) -( - partition p1 values less than (1000), - partition p2 values less than (2000), - partition p3 values less than (3000), - partition p4 values less than (maxvalue) -); --- hash index only support local index in partition table -drop index if exists hash_t4_id1; -NOTICE: index "hash_t4_id1" does not exist, skipping -drop index if exists hash_t4_id2; -NOTICE: index "hash_t4_id2" does not exist, skipping -drop index if exists hash_t4_id2_new; -NOTICE: index "hash_t4_id2_new" does not exist, skipping -create index hash_t4_id1 on hash_table_4 using hash(id) global; -ERROR: Global partition index only support btree. -create index hash_t4_id2 on hash_table_4 using hash(id) local -( - partition index_t4_p1, - partition index_t4_p2, - partition index_t4_p3, - partition index_t4_p4 -); --- alter index rename, unusable -insert into hash_table_4 select random()*5000, 'XXX', 'XXX' from generate_series(1,1000); -alter index hash_t4_id2 rename to hash_t4_id2_new; -alter index hash_t4_id2_new modify partition index_t4_p2 unusable; -reindex index hash_t4_id2_new partition index_t4_p2; -drop table hash_table_4 cascade; ------------------- --- hash_table_5 -- ------------------- -drop table if exists hash_table_5; -NOTICE: table "hash_table_5" does not exist, skipping -create temporary table hash_table_5(id int, name varchar, sex varchar default 'male'); -drop index if exists hash_t5_id1; -NOTICE: index "hash_t5_id1" does not exist, skipping -create index hash_t5_id1 on hash_table_5 using hash(id) with(fillfactor = 80); -insert into hash_table_5 select random()*100, 'XXX', 'XXX' from generate_series(1,100); -update hash_table_5 set name = 'aaa' where id = 80; -alter index hash_t5_id1 set (fillfactor = 60); -alter index hash_t5_id1 reset (fillfactor); -explain (costs off) select * from hash_table_5 where id = 80; - QUERY PLAN ----------------------------------------- - Bitmap Heap Scan on hash_table_5 - Recheck Cond: (id = 80) - -> Bitmap Index Scan on hash_t5_id1 - Index Cond: (id = 80) -(4 rows) - -drop table hash_table_5 cascade; ------------------- --- hash_table_6 -- ------------------- -drop table if exists hash_table_6; -NOTICE: table "hash_table_6" does not exist, skipping -create global temporary table hash_table_6(id int, name varchar, sex varchar default 'male'); -drop index if exists hash_t6_id1; -NOTICE: index "hash_t6_id1" does not exist, skipping -create index hash_t6_id1 on hash_table_6 using hash((id*10)) with (fillfactor = 30); -insert into hash_table_6 select random()*100, 'XXX', 'XXX' from generate_series(1,1000); -delete from hash_table_6 where id in (50, 60, 70); -explain (costs off) select * from hash_table_6 where id*10 = 80; - QUERY PLAN ----------------------------------------- - Bitmap Heap Scan on hash_table_6 - Recheck Cond: ((id * 10) = 80) - -> Bitmap Index Scan on hash_t6_id1 - Index Cond: ((id * 10) = 80) -(4 rows) - -drop table hash_table_6 cascade; --- create unlogged table index, which will be delete in hash_index_002 -drop table if exists hash_table_7; -NOTICE: table "hash_table_7" does not exist, skipping -create unlogged table hash_table_7(id int, name varchar, sex varchar default 'male'); -insert into hash_table_7 select random()*100, 'XXX', 'XXX' from generate_series(1,1000); -create index hash_t7_id1 on hash_table_7 using hash(id) with (fillfactor = 30); -explain (costs off) select * from hash_table_7 where id = 80; - QUERY PLAN ----------------------------------------- - Bitmap Heap Scan on hash_table_7 - Recheck Cond: (id = 80) - -> Bitmap Index Scan on hash_t7_id1 - Index Cond: (id = 80) -(4 rows) - -select count(*) from hash_table_7; - count -------- - 1000 -(1 row) - diff --git a/src/test/regress/expected/hash_index_002.out b/src/test/regress/expected/hash_index_002.out deleted file mode 100644 index a8fceaaaf..000000000 --- a/src/test/regress/expected/hash_index_002.out +++ /dev/null @@ -1,98 +0,0 @@ -------------------------------------- ----------- hash index part2---------- -------------------------------------- -set enable_seqscan = off; -set enable_indexscan = off; --- continue to hash_index_001 -explain (costs off) select * from hash_table_7 where id = 80; - QUERY PLAN ----------------------------------------- - Bitmap Heap Scan on hash_table_7 - Recheck Cond: (id = 80) - -> Bitmap Index Scan on hash_t7_id1 - Index Cond: (id = 80) -(4 rows) - -drop table hash_table_7 cascade; --- low maintenance_work_mem -set maintenance_work_mem = '1MB'; -drop table if exists hash_table_8; -NOTICE: table "hash_table_8" does not exist, skipping -create table hash_table_8(id int, name varchar, sex varchar default 'male'); -insert into hash_table_8 select random()*100, 'XXX', 'XXX' from generate_series(1,50000); -create index hash_t8_id1 on hash_table_8 using hash(id) with (fillfactor = 30); -explain (costs off) select * from hash_table_8 where id = 80; - QUERY PLAN ----------------------------------------- - Bitmap Heap Scan on hash_table_8 - Recheck Cond: (id = 80) - -> Bitmap Index Scan on hash_t8_id1 - Index Cond: (id = 80) -(4 rows) - -drop table hash_table_8 cascade; --- vacuum one page -set enable_indexscan = on; -set enable_bitmapscan = off; -set maintenance_work_mem = '100MB'; -alter system set autovacuum = off; -drop table if exists hash_table_9; -NOTICE: table "hash_table_9" does not exist, skipping -create table hash_table_9(id int, name varchar, sex varchar default 'male'); -insert into hash_table_9 select random()*100, 'XXX', 'XXX' from generate_series(1,50000); -create index hash_t9_id1 on hash_table_9 using hash(id) with (fillfactor = 10); -create or replace procedure hash_proc_9(sid in integer) -is -begin -delete from hash_table_9 where id = sid; -perform * from hash_table_9 where id = sid; -insert into hash_table_9 select sid, random() * 10, 'xxx' from generate_series(1,5000); -end; -/ -call hash_proc_9(1); - hash_proc_9 -------------- - -(1 row) - -call hash_proc_9(1); - hash_proc_9 -------------- - -(1 row) - -call hash_proc_9(1); - hash_proc_9 -------------- - -(1 row) - -call hash_proc_9(1); - hash_proc_9 -------------- - -(1 row) - -drop table hash_table_9 cascade; -drop procedure hash_proc_9; --- some dml operator -drop table if exists hash_table_10; -NOTICE: table "hash_table_10" does not exist, skipping -create table hash_table_10(id int, num int, sex varchar default 'male'); -create index hash_t10_id1 on hash_table_10 using hash (id); -insert into hash_table_10 select random()*10, random()*10, 'XXX' from generate_series(1,5000); -insert into hash_table_10 select random()*10, random()*10, 'XXX' from generate_series(1,5000); -delete from hash_table_10 where id = 7 and num = 1; -insert into hash_table_10 select 7, random()*3, 'XXX' from generate_series(1,500); -delete from hash_table_10 where id = 5; -vacuum hash_table_10; -insert into hash_table_10 select random()*50, random()*3, 'XXX' from generate_series(1,50000); -delete from hash_table_10 where num = 2; -vacuum hash_table_10; -drop table hash_table_10 cascade; ---reset all parameters -reset enable_indexscan; -reset enable_bitmapscan; -reset enable_seqscan; -reset maintenance_work_mem; -alter system set autovacuum = on; diff --git a/src/test/regress/expected/hll_misc.out b/src/test/regress/expected/hll_misc.out index 05ed29eec..8079aa7b8 100644 --- a/src/test/regress/expected/hll_misc.out +++ b/src/test/regress/expected/hll_misc.out @@ -136,13 +136,13 @@ with type_oids as ) select * from pg_cast where castsource in ( select * from type_oids ) or casttarget in ( select * from type_oids ) order by 1; - castsource | casttarget | castfunc | castcontext | castmethod -------------+------------+----------+-------------+------------ - 17 | 4301 | 0 | e | b - 20 | 4303 | 0 | e | b - 23 | 4303 | 4317 | e | f - 4301 | 4301 | 4311 | i | f - 4402 | 4301 | 0 | e | b + castsource | casttarget | castfunc | castcontext | castmethod | castowner +------------+------------+----------+-------------+------------+----------- + 17 | 4301 | 0 | e | b | + 20 | 4303 | 0 | e | b | + 23 | 4303 | 4317 | e | f | + 4301 | 4301 | 4311 | i | f | + 4402 | 4301 | 0 | e | b | (5 rows) --final cleaning diff --git a/src/test/regress/expected/huge_clob.out b/src/test/regress/expected/huge_clob.out new file mode 100644 index 000000000..8c9dd1bcf --- /dev/null +++ b/src/test/regress/expected/huge_clob.out @@ -0,0 +1,258 @@ +-- test create type table of +-- check compatibility -- +show sql_compatibility; -- expect A -- + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists huge_clob; +NOTICE: schema "huge_clob" does not exist, skipping +create schema huge_clob; +set current_schema = huge_clob; +create table bigclobtbl031(c1 int,c2 clob,c3 clob,c4 blob,c5 date,c6 timestamp,c7 varchar2); +insert into bigclobtbl031 values(generate_series(1,5),repeat('AAAA11111aaaaaaaaaaaaaaaaa',1000000),repeat('abdededfj12345679ujik',1000000),hextoraw(repeat('12345678990abcdef',1000)),sysdate,to_timestamp('','yyyy-mm-dd hh24:mi:ss.ff6'),7000); +update bigclobtbl031 set c2=c2||c2||c2||c2||c2; +update bigclobtbl031 set c2=c2||c2||c2||c2||c2; +update bigclobtbl031 set c2=c2||c2; +update bigclobtbl031 set c3='clobclob3'; +--I1.clob in +create or replace procedure pro_cb4_031(c1 clob,c2 clob) +is +v1 clob; +v2 clob; +begin +v1:=dbe_lob.substr(c1,10,1); +v2:=dbe_lob.substr(c2,10,1); +raise info 'c1 is %',v1; +raise info 'c2 is %',v2; +end; +/ +create or replace procedure pro_cb4_031_1 is +v1 clob; +v2 clob; +begin +execute immediate 'select c2 from bigclobtbl031 where c1=1' into v1; +execute immediate 'select c3 from bigclobtbl031 where c1=1' into v2; +pro_cb4_031(v1,v2); +end; +/ +call pro_cb4_031_1(); +ERROR: huge clob do not support as function in parameter +CONTEXT: PL/pgSQL function pro_cb4_031(clob,clob) line 5 at assignment +SQL statement "CALL pro_cb4_031(v1,v2)" +PL/pgSQL function pro_cb4_031_1() line 7 at PERFORM +--I2.clob > 1G out +create or replace procedure pro_cb4_031(c1 out clob,c2 out clob) +is +v1 clob; +v2 clob; +begin +execute immediate 'select c2 from bigclobtbl031 where c1=1' into v1; +execute immediate 'select c3 from bigclobtbl031 where c1=1' into v2; +c1:=v1; +c2:=v2; +end; +/ +create or replace procedure pro_cb4_031_1 is +v1 clob; +v2 clob; +v3 clob; +v4 clob; +begin +pro_cb4_031(v1,v2); +v3:=dbe_lob.substr(v1,10,1); +v4:=dbe_lob.substr(v2,10,1); +raise info 'v3 is %',v3; +raise info 'v4 is %',v4; +end; +/ +call pro_cb4_031_1(); +ERROR: clob from execute into do not support assign. +CONTEXT: PL/pgSQL function pro_cb4_031() line 7 at assignment +SQL statement "CALL pro_cb4_031(v1,v2)" +PL/pgSQL function pro_cb4_031_1() line 7 at SQL statement +-- <1G out +create or replace procedure pro_cb4_031(c1 out clob,c2 out clob) +is +v1 clob; +v2 clob; +begin +execute immediate 'select c3 from bigclobtbl031 where c1=1' into v1; +execute immediate 'select c3 from bigclobtbl031 where c1=2' into v2; +c1:=v1; +c2:=v2; +end; +/ +call pro_cb4_031_1(); +ERROR: clob from execute into do not support assign. +CONTEXT: PL/pgSQL function pro_cb4_031() line 7 at assignment +SQL statement "CALL pro_cb4_031(v1,v2)" +PL/pgSQL function pro_cb4_031_1() line 7 at SQL statement +--I3.clob as inout +create or replace procedure pro_cb4_031(c1 inout clob,c2 inout clob) +is +v1 clob; +v2 clob; +begin +execute immediate 'select c2 from bigclobtbl031 where c1=1' into v1; +execute immediate 'select c3 from bigclobtbl031 where c1=1' into v2; +c1:=v1; +c2:=v2; +end; +/ +create or replace procedure pro_cb4_031_1 is +v1 clob; +v2 clob; +v3 clob; +v4 clob; +begin +pro_cb4_031(v1,v2); +v3:=dbe_lob.substr(v1,10,1); +v4:=dbe_lob.substr(v2,10,1); +raise info 'v3 is %',v3; +raise info 'v4 is %',v4; +end; +/ +call pro_cb4_031_1(); +ERROR: clob from execute into do not support assign. +CONTEXT: PL/pgSQL function pro_cb4_031(clob,clob) line 7 at assignment +SQL statement "CALL pro_cb4_031(v1,v2)" +PL/pgSQL function pro_cb4_031_1() line 7 at SQL statement +--I4. < 1GB clob inout +create or replace procedure pro_cb4_031(c1 inout clob,c2 clob,c3 out clob) +is +v1 clob; +v2 clob; +v3 clob; +begin +execute immediate 'select c3 from bigclobtbl031 where c1=1' into v1; +execute immediate 'select c3 from bigclobtbl031 where c1=2' into v2; +execute immediate 'select c3 from bigclobtbl031 where c1=3' into v3; +c1:=v1; +c2:=v2; +c3:=v3||'clob3clob3clob3clob3'; +end; +/ +create or replace procedure pro_cb4_031_1 is +v1 clob; +v2 clob; +v3 clob; +v4 clob; +v5 clob; +v6 clob; +begin +pro_cb4_031(v1,v2,v3); +v4:=dbe_lob.substr(v1,10,1); +v5:=dbe_lob.substr(v2,10,1); +v6:=dbe_lob.substr(v3,10,1); +raise info 'v4 is %',v4; +raise info 'v5 is %',v5; +raise info 'v6 is %',v6; +end; +/ +call pro_cb4_031_1(); +ERROR: clob from execute into do not support assign. +CONTEXT: PL/pgSQL function pro_cb4_031(clob,clob) line 9 at assignment +SQL statement "CALL pro_cb4_031(v1,v2,v3)" +PL/pgSQL function pro_cb4_031_1() line 9 at SQL statement +--I5. table of clob +create or replace procedure pro_cb4_031 is +type ty1 is table of clob; +v1 ty1; +begin +for i in 1..10 loop +execute immediate 'select c2 from bigclobtbl031 where c1='||i into v1(i); +update bigclobtbl030 set c3=v1(i)||v1(i) where c1=i; +end loop; +end; +/ +call pro_cb4_031(); +ERROR: huge clob do not support as table of element. +CONTEXT: PL/pgSQL function pro_cb4_031() line 6 at EXECUTE statement +-- array +create or replace procedure pro_cb4_031 is +type ty1 is varray(10) of clob; +v1 ty1; +begin +for i in 1..10 loop +execute immediate 'select c2 from bigclobtbl031 where c1='||i into v1(i); +update bigclobtbl030 set c3=v1(i)||v1(i) where c1=i; +end loop; +end; +/ +call pro_cb4_031(); +ERROR: huge clob do not support as array element. +CONTEXT: PL/pgSQL function pro_cb4_031() line 6 at EXECUTE statement +select c1,c2,length(c2),c3,length(c3) from bigclobtbl031 where c1>5 and c1<10 order by 1,2,3,4,5; + c1 | c2 | length | c3 | length +----+----+--------+----+-------- +(0 rows) + +update bigclobtbl031 set c3='clob3clob3'; +ERROR: tuple concurrently updated +--I6.record +create or replace procedure pro_cb4_031 is +type ty1 is record(c1 int,c2 clob); +v1 ty1; +begin +execute immediate 'select c2 from bigclobtbl031 where c1=1' into v1.c2; +end; +/ +call pro_cb4_031(); + pro_cb4_031 +------------- + +(1 row) + +--I7 fetch +create or replace procedure pro_cb4_037 is +v1 clob; +v2 clob; +v3 clob; +v4 int; +cursor cor1 is select c2 from bigclobtbl031 where c1=1; +begin +open cor1; +loop +fetch cor1 into v1; +fetch cor1 into v1; +fetch cor1 into v1; +fetch cor1 into v1; +fetch cor1 into v1; +exit when cor1%notfound; +end loop; +close cor1; +end; +/ +call pro_cb4_037(); +ERROR: huge clob do not support as record element. +CONTEXT: PL/pgSQL function pro_cb4_037() line 10 at FETCH +drop table if exists cloblongtbl; +NOTICE: table "cloblongtbl" does not exist, skipping +create table cloblongtbl (a int, b clob, c clob); +insert into cloblongtbl values (generate_series(1,4),repeat('唐李白床前明月光,疑是地上霜,举头望明月,低头思故乡',5000000),repeat('唐李白床前明月光,疑是地上霜,举头望明月,低头思故乡',5000000)); +update cloblongtbl set b = b||b; +update cloblongtbl set c = c||c; +update cloblongtbl set b = b||b where a = 2; +update cloblongtbl set c = c||c where a = 3; +update cloblongtbl set b = b||b where a = 4; +update cloblongtbl set c = c||c where a = 4; +select a, length(b || c) from cloblongtbl order by 1; + a | length +---+------------ + 1 | 520000000 + 2 | 780000000 + 3 | 780000000 + 4 | 1040000000 +(4 rows) + +drop table if exists cloblongtbl; +-- clean +drop schema if exists huge_clob cascade; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to table bigclobtbl031 +drop cascades to function pro_cb4_031_1() +drop cascades to function pro_cb4_031() +drop cascades to function pro_cb4_037() diff --git a/src/test/regress/expected/hw_audit_rotation_interval.out b/src/test/regress/expected/hw_audit_rotation_interval.out new file mode 100644 index 000000000..74ede9ff6 --- /dev/null +++ b/src/test/regress/expected/hw_audit_rotation_interval.out @@ -0,0 +1,13 @@ +-- 设置guc参数 +\! /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/install//data6/zhoutl/GaussDB_test/install/bin/gs_guc reload -Z datanode -D /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/ -c "audit_rotation_interval=1" +The gs_guc run with the following arguments: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/install//data6/zhoutl/GaussDB_test/install/bin/gs_guc -Z datanode -D /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/ -c audit_rotation_interval=1 reload ]. +expected instance path: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/postgresql.conf] +gs_guc reload: audit_rotation_interval=1: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/postgresql.conf] +server signaled + +Total instances: 1. Failed instances: 0. +Success to perform gs_guc! + +-- 间隔60s 获取pg_audit 中最新adt文件编号 查看是否增加了1 +\! num1=`expr $(echo $(ls /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/pg_audit -lt | head -n 2 | awk '{print $9}')| tr -cd "[0-9]") + 1` && sleep 1m && num2=$(echo $(ls /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/pg_audit -lt | head -n 2 | awk '{print $9}')| tr -cd "[0-9]") && [[ $num1 == $num2 ]] && echo 'add a new log after interval-- 60 seconds' || echo 'fail to add new logs' +add a new log after interval-- 60 seconds diff --git a/src/test/regress/expected/hw_audit_rotation_size.out b/src/test/regress/expected/hw_audit_rotation_size.out new file mode 100644 index 000000000..35c8cd537 --- /dev/null +++ b/src/test/regress/expected/hw_audit_rotation_size.out @@ -0,0 +1,42 @@ +-- 设置guc参数 +\! /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/install//data6/zhoutl/GaussDB_test/install/bin/gs_guc reload -Z datanode -D /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/ -c "audit_rotation_size=2048" +The gs_guc run with the following arguments: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/install//data6/zhoutl/GaussDB_test/install/bin/gs_guc -Z datanode -D /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/ -c audit_rotation_size=2048 reload ]. +expected instance path: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/postgresql.conf] +gs_guc reload: audit_rotation_size=2048: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/postgresql.conf] +server signaled + +Total instances: 1. Failed instances: 0. +Success to perform gs_guc! + +\! /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/install//data6/zhoutl/GaussDB_test/install/bin/gs_guc reload -Z datanode -D /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/ -c "audit_dml_state=1" +The gs_guc run with the following arguments: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/install//data6/zhoutl/GaussDB_test/install/bin/gs_guc -Z datanode -D /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/ -c audit_dml_state=1 reload ]. +expected instance path: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/postgresql.conf] +gs_guc reload: audit_dml_state=1: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/postgresql.conf] +server signaled + +Total instances: 1. Failed instances: 0. +Success to perform gs_guc! + +-- 产生dml_action 审计日志 +CREATE TABLE T_TEST_ROTATION_SIZE +( + COL1 int4 DEFAULT 1, + COL2 VARCHAR(1024) DEFAULT 'test_rotation_size'); +CREATE OR REPLACE PROCEDURE TRANSACTION_TEST_ROTATION_SIZE() +AS +BEGIN +FOR i IN 0..1000000 LOOP +INSERT INTO T_TEST_ROTATION_SIZE(COL1, COL2) VALUES (i, 'test_time'); +COMMIT; +END LOOP; +END; +/ +CALL TRANSACTION_TEST_ROTATION_SIZE(); + transaction_test_rotation_size +-------------------------------- + +(1 row) + +-- 提取新生成的文件大小 与2.1M比较 全部小于2.1M 为执行成功 +\! flag=0 && for i in $(find /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/pg_audit -newermt $(date -d "-75 seconds" +%H:%M:%S) -name "*_adt"); do size=$(du -h --exclude=index_table_new $i | grep -oP '\d*\.\d+M'); if [[ $size > '2.1M' ]]; then flag=1; echo $size; echo $i; fi; done && [[ "$flag" == 0 ]] && echo 'all the logs are less than 2.1M' || echo 'error -- some logs exceed limit' +all the logs are less than 2.1M diff --git a/src/test/regress/expected/hw_audit_space.out b/src/test/regress/expected/hw_audit_space.out new file mode 100644 index 000000000..2e2bd4907 --- /dev/null +++ b/src/test/regress/expected/hw_audit_space.out @@ -0,0 +1,57 @@ +-- 修改guc参数 +\! /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/install//data6/zhoutl/GaussDB_test/install/bin/gs_guc reload -Z datanode -D /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/ -c "audit_resource_policy = 1" +The gs_guc run with the following arguments: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/install//data6/zhoutl/GaussDB_test/install/bin/gs_guc -Z datanode -D /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/ -c audit_resource_policy = 1 reload ]. +expected instance path: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/postgresql.conf] +gs_guc reload: audit_resource_policy=1: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/postgresql.conf] +server signaled + +Total instances: 1. Failed instances: 0. +Success to perform gs_guc! + +\! /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/install//data6/zhoutl/GaussDB_test/install/bin/gs_guc reload -Z datanode -D /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/ -c "audit_space_limit = 512MB" +The gs_guc run with the following arguments: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/install//data6/zhoutl/GaussDB_test/install/bin/gs_guc -Z datanode -D /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/ -c audit_space_limit = 512MB reload ]. +expected instance path: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/postgresql.conf] +gs_guc reload: audit_space_limit=512MB: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/postgresql.conf] +server signaled + +Total instances: 1. Failed instances: 0. +Success to perform gs_guc! + +\! /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/install//data6/zhoutl/GaussDB_test/install/bin/gs_guc reload -Z datanode -D /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/ -c "audit_dml_state =1" +The gs_guc run with the following arguments: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/install//data6/zhoutl/GaussDB_test/install/bin/gs_guc -Z datanode -D /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/ -c audit_dml_state =1 reload ]. +expected instance path: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/postgresql.conf] +gs_guc reload: audit_dml_state=1: [/data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/postgresql.conf] +server signaled + +Total instances: 1. Failed instances: 0. +Success to perform gs_guc! + +-- 获取pg_audit 中最早建立的adt文件编号 +\! echo $(echo $(ls /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/pg_audit -tr | head -2 | xargs) | tr -cd "[0-9]") + +-- 生成 dml_action 审计日志 +CREATE TABLE T_TEST_SPACE +( + COL1 int4 DEFAULT 1, + COL2 VARCHAR(1024) DEFAULT 'test_space'); +CREATE OR REPLACE PROCEDURE TRANSACTION_TEST_SPACE() +AS +BEGIN +FOR i IN 0..3500000 LOOP +INSERT INTO T_TEST_SPACE(COL1, COL2) VALUES (i, 'a'); +COMMIT; +END LOOP; +END; +/ +CALL TRANSACTION_TEST_SPACE(); + transaction_test_space +------------------------ + +(1 row) + +-- 获取 pg_audit 中最早建立的adt文件编号 与原编号 0_adt 比较 观察是否删除了最早的日志文件 +\! [[ $(echo $(ls /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/pg_audit -tr | head -2 | xargs) | tr -cd "[0-9]") > 1 ]] && echo 'delete oldest files' || echo 'fail to delete oldest files' +delete oldest files +-- 查看pg_audit 文件总大小 是否超过设置的512M +\! [[ $(du -h --exclude=done /data6/zhoutl/GaussDB_test/GaussDBKernel-server/opengauss/src/test/regress/tmp_check/datanode1/pg_audit | grep -oP '\d*M') > '530M' ]] && echo 'size of total logs exceeds upper limit' || echo 'size of total logs not exceeds upper limit' +size of total logs not exceeds upper limit diff --git a/src/test/regress/expected/hw_cipher_sm4.out b/src/test/regress/expected/hw_cipher_sm4.out index 07861e08c..5d069ee88 100644 --- a/src/test/regress/expected/hw_cipher_sm4.out +++ b/src/test/regress/expected/hw_cipher_sm4.out @@ -12,8 +12,8 @@ select gs_decrypt(gs_encrypt('along','1234@abc','sm4'),'1234@abc','sm4'); (1 row) select gs_decrypt(gs_encrypt('world','1234@abc','sm4'),'abc@4321','sm4'); --difference key cause error - gs_decrypt ------------- +--?.* +--?---.* --?.* (1 row) diff --git a/src/test/regress/expected/hw_cursor_part1.out b/src/test/regress/expected/hw_cursor_part1.out index 317049334..32a116707 100644 --- a/src/test/regress/expected/hw_cursor_part1.out +++ b/src/test/regress/expected/hw_cursor_part1.out @@ -975,6 +975,50 @@ NOTICE: a1:2 CONTEXT: PL/pgSQL function inline_code_block line 5 at assignment ERROR: cursor " select c1 from tb1;" does not exist in FETCH statement. CONTEXT: PL/pgSQL function inline_code_block line 7 at FETCH +create table t1_refcursor(a int); +insert into t1_refcursor values (1); +insert into t1_refcursor values (2); +create or replace procedure p3_refcursor (c1 out sys_refcursor) as +va t1_refcursor; +i int; +begin +open c1 for select * from t1_refcursor; +i = 1/0; +exception +when others then + raise info '%', 'exception'; +end; +/ +select * from p3_refcursor(); +INFO: exception + c1 +---- + +(1 row) + +create or replace procedure p3 (c4 in int,c2 out int,c3 out int,c1 out sys_refcursor,cc2 out sys_refcursor) as +va t1_refcursor; +i int; +begin +begin +open cc2 for select * from t1_refcursor; +i = 1/0; +exception +when others then + raise info '%', 'exception2'; +end; +open c1 for select * from t1_refcursor; +c3:=1; +c2:=2; +end; +/ +select * from p3(1); +INFO: exception2 + c2 | c3 | c1 | cc2 +----+----+---------------------+----- + 2 | 1 | | +(1 row) + START TRANSACTION; CURSOR sc FOR select * from generate_series(3, 13) i where i <> all (values (1),(2),(4)); MOVE FORWARD 10 IN sc; @@ -987,7 +1031,7 @@ FETCH BACKWARD FROM sc; END; -- clean up DROP SCHEMA hw_cursor_part1 CASCADE; -NOTICE: drop cascades to 18 other objects +NOTICE: drop cascades to 21 other objects DETAIL: drop cascades to table company drop cascades to table tbl drop cascades to function sp_testsp() @@ -1006,5 +1050,8 @@ drop cascades to function pro_base_003(refcursor,refcursor) drop cascades to table pro_base_001 drop cascades to table tb1 drop cascades to function i_inout(refcursor) +drop cascades to table t1_refcursor +drop cascades to function p3_refcursor() +drop cascades to function p3(integer) \c regression; drop database IF EXISTS pl_test_cursor_part1; diff --git a/src/test/regress/expected/hw_cursor_part3.out b/src/test/regress/expected/hw_cursor_part3.out index 3bf6e09e9..cef4f498f 100644 --- a/src/test/regress/expected/hw_cursor_part3.out +++ b/src/test/regress/expected/hw_cursor_part3.out @@ -217,11 +217,12 @@ begin end ; end; / -ERROR: type name too long ---?.* -CONTEXT: compilation of PL/pgSQL function "joint_debug_cursor_func_procedure_012" near line 7 call JOINT_DEBUG_CURSOR_FUNC_PROCEDURE_012(); -ERROR: function "joint_debug_cursor_func_procedure_012" doesn't exist + joint_debug_cursor_func_procedure_012 +--------------------------------------- + +(1 row) + drop table if exists cursor_vl_tb10; NOTICE: table "cursor_vl_tb10" does not exist, skipping create unlogged table cursor_vl_tb10(c1 int,c2 varchar,c3 numeric,c4 date,c5 text); @@ -516,10 +517,11 @@ ERROR: relation "cursor_vl_tb10" does not exist on datanode1 LINE 1: select * from cursor_vl_tb10; ^ DROP SCHEMA hw_cursor_part3 CASCADE; -NOTICE: drop cascades to 10 other objects +NOTICE: drop cascades to 11 other objects DETAIL: drop cascades to table t1 drop cascades to function sp_testsp_delete() drop cascades to table joint_debug_cursor_func_table_012 +drop cascades to function joint_debug_cursor_func_procedure_012() drop cascades to function i_refcursor10_1(integer) drop cascades to function i_refcursor10_2(integer) drop cascades to function i_refcursor10_3() diff --git a/src/test/regress/expected/hw_cursor_part4.out b/src/test/regress/expected/hw_cursor_part4.out index cc7ec32b9..3b7dd4292 100644 --- a/src/test/regress/expected/hw_cursor_part4.out +++ b/src/test/regress/expected/hw_cursor_part4.out @@ -98,7 +98,7 @@ INSERT INTO TBL_H248LNK_INFO VALUES(123); INSERT INTO TBL_H248LNK_INFO VALUES(456); INSERT INTO TBL_H248LNK_INFO VALUES(789); CREATE TABLE TBL (I_MODULENO INTEGER); -CREATE OR REPLACE PROCEDURE TEST_CURSOR +CREATE OR REPLACE PROCEDURE TEST_CURSOR_4 AS TYPE CUR_TYPE IS REF CURSOR; CUR CUR_TYPE; @@ -208,7 +208,7 @@ BEGIN raise notice 'SQL%%ROWCOUNT :%',NVL(TO_CHAR(SQL%ROWCOUNT),'NULL'); END; / -CALL TEST_CURSOR(); +CALL TEST_CURSOR_4(); NOTICE: CUR%ISOPEN : TRUE NOTICE: CUR%FOUND : TRUE NOTICE: CUR%NOTFOUND : FALSE @@ -241,12 +241,12 @@ NOTICE: SQL%ISOPEN : FALSE NOTICE: SQL%FOUND : TRUE NOTICE: SQL%NOTFOUND : FALSE NOTICE: SQL%ROWCOUNT :1 - test_cursor -------------- + test_cursor_4 +--------------- (1 row) -DROP PROCEDURE TEST_CURSOR; +DROP PROCEDURE TEST_CURSOR_4; DROP TABLE TBL_H248LNK_INFO; DROP TABLE TBL; CREATE TABLE TBL_RCWSCFG ( diff --git a/src/test/regress/expected/hw_cursor_part7.out b/src/test/regress/expected/hw_cursor_part7.out index ce318a518..0fde5b697 100644 --- a/src/test/regress/expected/hw_cursor_part7.out +++ b/src/test/regress/expected/hw_cursor_part7.out @@ -627,7 +627,7 @@ INSERT INTO TBL_H248LNK_INFO VALUES(123); INSERT INTO TBL_H248LNK_INFO VALUES(456); INSERT INTO TBL_H248LNK_INFO VALUES(789); CREATE TABLE TBL (I_MODULENO INTEGER); -CREATE OR REPLACE PROCEDURE TEST_CURSOR +CREATE OR REPLACE PROCEDURE TEST_CURSOR_7 AS TYPE CUR_TYPE IS REF CURSOR; CUR CUR_TYPE; @@ -737,7 +737,7 @@ BEGIN raise notice 'SQL%%ROWCOUNT :%',NVL(TO_CHAR(SQL%ROWCOUNT),'NULL'); END; / -CALL TEST_CURSOR(); +CALL TEST_CURSOR_7(); NOTICE: CUR%ISOPEN : TRUE NOTICE: CUR%FOUND : TRUE NOTICE: CUR%NOTFOUND : FALSE @@ -770,12 +770,12 @@ NOTICE: SQL%ISOPEN : FALSE NOTICE: SQL%FOUND : TRUE NOTICE: SQL%NOTFOUND : FALSE NOTICE: SQL%ROWCOUNT :1 - test_cursor -------------- + test_cursor_7 +--------------- (1 row) -DROP PROCEDURE TEST_CURSOR; +DROP PROCEDURE TEST_CURSOR_7; DROP TABLE TBL_H248LNK_INFO; DROP TABLE TBL; DROP TABLE TBL_RCWSCFG; @@ -901,9 +901,37 @@ NOTICE: SQL%ROWCOUNT :NULL (1 row) +create table tb_test(col1 int); +create or replace procedure proc_test() +as +v_count int; +begin +insert into tb_test select 1; +update tb_test set col1=2; +select 1 into v_count; +raise notice '%',v_count||','||SQL%FOUND || ',' || SQL%ROWCOUNT; +end; +/ +declare +v_count int; +begin +insert into tb_test select 1; +update tb_test set col1=2; +select 1 into v_count; +proc_test(); +v_count:=1; +raise notice '%',v_count||','||SQL%FOUND || ',' || SQL%ROWCOUNT; +end +/ +NOTICE: 1,true,1 +CONTEXT: SQL statement "CALL proc_test()" +PL/pgSQL function inline_code_block line 7 at PERFORM +NOTICE: 1,true,1 +drop table tb_test; drop schema hw_cursor_part7 CASCADE; -NOTICE: drop cascades to 4 other objects +NOTICE: drop cascades to 5 other objects DETAIL: drop cascades to table tbl_rcwscfg drop cascades to table tbl_temp_module_312 drop cascades to function test_temp() drop cascades to function test_crs_rpt_emptysor(integer) +drop cascades to function proc_test() diff --git a/src/test/regress/expected/hw_cursor_part8.out b/src/test/regress/expected/hw_cursor_part8.out index f777101b1..f4562f298 100644 --- a/src/test/regress/expected/hw_cursor_part8.out +++ b/src/test/regress/expected/hw_cursor_part8.out @@ -395,7 +395,7 @@ END; / create table t1(a int); --test with query -create or replace procedure test_cursor() as +create or replace procedure test_cursor_8() as declare cursor cursor1 is with recursive StepCTE(a) @@ -427,7 +427,7 @@ ERROR: cannot open FETCH query as cursor CONTEXT: PL/pgSQL function pro_cursor_c0019() line 5 at OPEN create table test_cursor_table(c1 int,c2 varchar); insert into test_cursor_table values(1,'Jack'),(2,'Rose'); -create or replace procedure test_cursor() as +create or replace procedure test_cursor_8() as declare type ref_cur is ref cursor; cur1 ref_cur; @@ -445,15 +445,15 @@ begin CLOSE cur1; end / -call test_cursor(); +call test_cursor_8(); NOTICE: 1---Jack NOTICE: 2---Rose - test_cursor -------------- + test_cursor_8 +--------------- (1 row) -create or replace procedure test_cursor() as +create or replace procedure test_cursor_8() as declare type ref_cur is ref cursor; cur1 ref_cur; @@ -474,10 +474,10 @@ begin CLOSE cur1; end / -call test_cursor(); +call test_cursor_8(); NOTICE: 1---Jack - test_cursor -------------- + test_cursor_8 +--------------- (1 row) @@ -532,7 +532,7 @@ drop cascades to function test_crs_rpt_emptysor(integer) drop cascades to table t1 drop cascades to function pro_cursor_c0019() drop cascades to table test_cursor_table -drop cascades to function test_cursor() +drop cascades to function test_cursor_8() drop cascades to type pro_type_04 drop cascades to function pro_base13_03(character varying,character varying,character varying) drop cascades to function pro_base13_04(character varying,character varying,character varying) diff --git a/src/test/regress/expected/hw_dbms_sql1.out b/src/test/regress/expected/hw_dbms_sql1.out new file mode 100644 index 000000000..a183a1cdf --- /dev/null +++ b/src/test/regress/expected/hw_dbms_sql1.out @@ -0,0 +1,2943 @@ +create database pl_test_cursor_part1 DBCOMPATIBILITY 'pg'; +\c pl_test_cursor_part1; +---bind_variable int +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,3,1,11); +query := 'select * from pro_dbe_sql_all_tb1_02 where a > y and a < z order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_variable(context_id, 'z', 10); +dbe_sql.sql_bind_variable(context_id, 'y', 1); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +NOTICE: table "pro_dbe_sql_all_tb1_02" does not exist, skipping +CONTEXT: SQL statement "drop table if exists pro_dbe_sql_all_tb1_02" +PL/pgSQL function pro_dbe_sql_all_02(raw,integer,integer) line 10 at SQL statement +id:4 +id:6 + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +---bind_variable clob +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b clob,c clob,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,HEXTORAW('DEADBEEF'),HEXTORAW('D'),11); +insert into pro_dbe_sql_all_tb1_02 values(6,HEXTORAW('DEADBEEF'),HEXTORAW('DE'),11); +query := 'select * from pro_dbe_sql_all_tb1_02 where b = y and c = z'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_variable(context_id, 'y', HEXTORAW('DEADBEEF')); +dbe_sql.sql_bind_variable(context_id, 'z', HEXTORAW('D')); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +id:4 + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +---bind_array int\char\bytea\text\raw +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +v_id1 int[]; +v_id4 char[]; +v_id5 bytea[]; +v_id6 text[]; +v_id7 raw[]; +begin +v_id1[1] := 3; +v_id1[2] := 4; + +v_id5[1] := '2'; +v_id5[2] := '2'; +v_id5[3] := '3'; + +v_id4[1] := '3'; +v_id4[2] := '3'; +v_id4[3] := '3'; + +v_id6[1] := '11'; +v_id6[2] := '11'; + +v_id7[1] := '1'; +v_id7[2] := '1'; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b char,c bytea,d text,e raw); +insert into pro_dbe_sql_all_tb1_02 values(4,'3','2','11','1'); +insert into pro_dbe_sql_all_tb1_02 values(6,'3','1','11','1'); +query := 'select * from pro_dbe_sql_all_tb1_02 where a > y and b = f and c = i and d = j and e = k'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_array(context_id, 'y', v_id1,1,1); +dbe_sql.sql_bind_array(context_id, 'i', v_id5,2,3); +dbe_sql.sql_bind_array(context_id, 'f', v_id4,2,3); +dbe_sql.sql_bind_array(context_id, 'j', v_id6,2,2); +dbe_sql.sql_bind_array(context_id, 'k', v_id7,2,2); + +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +id:4 + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +---bind_array error +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +v_id1 int[]; +begin +v_id1[1] := 3; +v_id1[2] := 4; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,3,1,11); +query := 'select * from pro_dbe_sql_all_tb1_02 where a > y order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_array(context_id, 'y', v_id1,-1,2); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +ERROR: wrong number of arguments in sql_bind_array +CONTEXT: SQL statement "CALL dbe_sql.sql_bind_array(context_id,'y',v_id1,-1,2)" +PL/pgSQL function pro_dbe_sql_all_02(raw,integer,integer) line 23 at PERFORM +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +---bind_array error +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +v_id1 int[]; +begin +v_id1[1] := 3; +v_id1[2] := 4; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,3,1,11); +query := 'select * from pro_dbe_sql_all_tb1_02 where a > y order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_array(context_id, 'y', v_id1); +dbe_sql.sql_bind_array(context_id, 'y', v_id1,1,2); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +id:6 + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + + +---bind_array error +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +v_id1 int[]; +begin +v_id1[1] := 3; +v_id1[2] := 4; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,3,1,11); +query := 'select * from pro_dbe_sql_all_tb1_02 where a > y and b =z order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_array(context_id, 'y', v_id1); +dbe_sql.sql_bind_array(context_id, 'y', v_id1,1,2); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +ERROR: argtypes is not valid +DETAIL: Confirm function input parameters. +CONTEXT: SQL statement "select * from pro_dbe_sql_all_tb1_02 where a > y and b =z order by 1" +PL/pgSQL function pro_dbe_sql_all_02(raw,integer,integer) line 28 at assignment +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +---bind_array error +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +v_id1 int[]; +begin +v_id1[1] := 3; +v_id1[2] := 4; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,3,1,11); +query := 'select * from pro_dbe_sql_all_tb1_02 where a > y order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_array(context_id, 'y', v_id1); +dbe_sql.sql_bind_variable(context_id, 'y', 1); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +id:4 +id:6 + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +---set_results_type +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int :=3; +--test +v_id1 int[]; +v_id4 character[]; +v_id5 bytea[]; +v_id6 text[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a text ,b int, c char, d text); +insert into pro_dbe_sql_all_tb1_02 values('1',9,'5','13'); +insert into pro_dbe_sql_all_tb1_02 values('2',10,'6','14'); +insert into pro_dbe_sql_all_tb1_02 values('3',11,'7','15'); +insert into pro_dbe_sql_all_tb1_02 values('4',12,'8','16'); +query := ' select * from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--定义列 +dbe_sql.set_results_type(context_id,1,v_id6,v_id,v_id2); +dbe_sql.set_results_type(context_id,2,v_id1,v_id,v_id2); +dbe_sql.set_results_type(context_id,3,v_id4,v_id,v_id2); +dbe_sql.set_results_type(context_id,4,v_id5,v_id,v_id2); + +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +v_id3 := dbe_sql.next_row(context_id); +v_id6 := dbe_sql.get_results(context_id,1,v_id6); +v_id1 := dbe_sql.get_results(context_id,2,v_id1); +v_id4 := dbe_sql.get_results(context_id,3,v_id4); +v_id5 := dbe_sql.get_results(context_id,4,v_id5); +exit when(v_id3 != 3); +end loop; + +FOR i IN v_id1.FIRST .. v_id1.LAST LOOP + dbe_output.print_line('int' || i || ' = ' || v_id1[i]); +END LOOP; +FOR j IN v_id4.FIRST .. v_id4.LAST LOOP + dbe_output.print_line('char' || j || ' = ' || v_id4[j]); +END LOOP; +FOR j IN v_id6.FIRST .. v_id6.LAST LOOP + dbe_output.print_line('text' || j || ' = ' || v_id6[j]); +END LOOP; +FOR j IN v_id5.FIRST .. v_id5.LAST LOOP + dbe_output.print_line('bytea' || j || ' = ' || v_id5[j]); +END LOOP; + +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +int1 = 9 +int2 = 10 +int3 = 11 +int4 = 12 +char1 = 5 +char2 = 6 +char3 = 7 +char4 = 8 +text1 = 1 +text2 = 2 +text3 = 3 +text4 = 4 +bytea1 = \x3133 +bytea2 = \x3134 +bytea3 = \x3135 +bytea4 = \x3136 + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + + +---set_results_type +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int :=3; +--test +v_id1 int[]; +v_id4 character[]; +v_id5 bytea[]; +v_id6 text[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a text ,b int, c char, d text); +insert into pro_dbe_sql_all_tb1_02 values('1',9,'5','13'); +insert into pro_dbe_sql_all_tb1_02 values('2',10,'6','14'); +insert into pro_dbe_sql_all_tb1_02 values('3',11,'7','15'); +insert into pro_dbe_sql_all_tb1_02 values('4',12,'8','16'); +query := ' select * from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--定义列 +dbe_sql.set_results_type(context_id,1,v_id6,v_id,-1); +dbe_sql.set_results_type(context_id,2,v_id1,v_id,v_id2); +dbe_sql.set_results_type(context_id,3,v_id4,v_id,v_id2); +dbe_sql.set_results_type(context_id,4,v_id5,v_id,v_id2); + +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +v_id3 := dbe_sql.next_row(context_id); +v_id6 := dbe_sql.get_results(context_id,1,v_id6); +v_id1 := dbe_sql.get_results(context_id,2,v_id1); +v_id4 := dbe_sql.get_results(context_id,3,v_id4); +v_id5 := dbe_sql.get_results(context_id,4,v_id5); +exit when(v_id3 != 3); +end loop; + +FOR i IN v_id1.FIRST .. v_id1.LAST LOOP + dbe_output.print_line('int' || i || ' = ' || v_id1[i]); +END LOOP; +FOR j IN v_id4.FIRST .. v_id4.LAST LOOP + dbe_output.print_line('char' || j || ' = ' || v_id4[j]); +END LOOP; +FOR j IN v_id6.FIRST .. v_id6.LAST LOOP + dbe_output.print_line('text' || j || ' = ' || v_id6[j]); +END LOOP; +FOR j IN v_id5.FIRST .. v_id5.LAST LOOP + dbe_output.print_line('bytea' || j || ' = ' || v_id5[j]); +END LOOP; + +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +int1 = 9 +int2 = 10 +int3 = 11 +int4 = 12 +char1 = 5 +char2 = 6 +char3 = 7 +char4 = 8 +text-1 = 1 +text0 = 2 +text1 = 3 +text2 = 4 +bytea1 = \x3133 +bytea2 = \x3134 +bytea3 = \x3135 +bytea4 = \x3136 + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +--type +create or replace procedure pro_dbe_sql_all_02() +as +bb dbe_sql.date_table; +cc dbe_sql.number_table; +dd dbe_sql.varchar2_table; +ee dbe_sql.desc_tab; +begin + +bb(1) :=to_date('2016-11-24 10:30:10','yyyy-mm-dd hh24:mi:ss'); +cc(2) := 300; +dd(1) := 'gasdf'; +ee(1):= (111,1,'1',1,'1',1,1,1,1,1,false); +ee(2):= (222,1,'1',1,'1',1,1,1,1,1,false); +ee(3):= (333,1,'1',1,'1',1,1,1,1,1,false); + +RAISE INFO 'date_table: %' ,bb(1); +RAISE INFO 'number_table: %' ,cc(2); +RAISE INFO 'varchar2_table: %' ,dd(1); +RAISE INFO 'desc_tab: %' ,ee(1).col_type; +RAISE INFO 'desc_tab: %' ,ee(2).col_type; +RAISE INFO 'desc_tab: %' ,ee(3).col_type; +RAISE INFO 'desc_tab: %' ,ee(3).col_name; +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(); +--?.* +INFO: number_table: 300 +INFO: varchar2_table: gasdf +INFO: desc_tab: 111 +INFO: desc_tab: 222 +INFO: desc_tab: 333 +INFO: desc_tab: 1 + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + + +--describe columns +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +type re_rssc is record (col_num int, desc_col dbe_sql.desc_tab); +employer re_rssc; +res re_rssc; +d int; +dd dbe_sql.desc_tab; +query varchar(2000); +begin +drop table if exists pro_dbe_sql_all_tb1_02; +create table pro_dbe_sql_all_tb1_02(a int ,b int); +insert into pro_dbe_sql_all_tb1_02 values(1,3); +insert into pro_dbe_sql_all_tb1_02 values(2,3); +query := 'select * from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--执行 +res := dbe_sql.sql_describe_columns(context_id, d,dd); + +--输出结果 +dbe_output.print_line('col_num:' || res.col_num); + +dbe_output.print_line('col_type:' || res.desc_col[1].col_type); +dbe_output.print_line('col_max_len:' || res.desc_col[1].col_max_len); +dbe_output.print_line('col_name:' || res.desc_col[1].col_name); +dbe_output.print_line('col_name_len:' || res.desc_col[1].col_name_len); +dbe_output.print_line('col_schema_name:' || res.desc_col[1].col_schema_name); +dbe_output.print_line('col_schema_name_len:' || res.desc_col[1].col_schema_name_len); +dbe_output.print_line('col_precision:' || res.desc_col[1].col_precision); +dbe_output.print_line('col_scale:' || res.desc_col[1].col_scale); +dbe_output.print_line('col_charsetid:' || res.desc_col[1].col_charsetid); +dbe_output.print_line('col_charsetform:' || res.desc_col[1].col_charsetform); +dbe_output.print_line('col_null_ok:' || res.desc_col[1].col_null_ok); + +dbe_output.print_line('col_type:' || res.desc_col[2].col_type); +dbe_output.print_line('col_max_len:' || res.desc_col[2].col_max_len); +dbe_output.print_line('col_name:' || res.desc_col[2].col_name); +dbe_output.print_line('col_name_len:' || res.desc_col[2].col_name_len); +dbe_output.print_line('col_schema_name:' || res.desc_col[2].col_schema_name); +dbe_output.print_line('col_schema_name_len:' || res.desc_col[2].col_schema_name_len); +dbe_output.print_line('col_precision:' || res.desc_col[2].col_precision); +dbe_output.print_line('col_scale:' || res.desc_col[2].col_scale); +dbe_output.print_line('col_charsetid:' || res.desc_col[2].col_charsetid); +dbe_output.print_line('col_charsetform:' || res.desc_col[2].col_charsetform); +dbe_output.print_line('col_null_ok:' || res.desc_col[2].col_null_ok); + +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +col_num:2 +col_type:23 +col_max_len:4 +col_name:a +col_name_len:1 +col_schema_name: +col_schema_name_len:0 +col_precision:0 +col_scale:0 +col_charsetid:0 +col_charsetform:0 +col_null_ok:true +col_type:23 +col_max_len:4 +col_name:b +col_name_len:1 +col_schema_name: +col_schema_name_len:0 +col_precision:0 +col_scale:0 +col_charsetid:0 +col_charsetform:0 +col_null_ok:true + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + + +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +type re_rssc is record (col_num int, desc_col dbe_sql.desc_tab); +employer re_rssc; +d int; +dd dbe_sql.desc_tab; +res re_rssc; +query varchar(2000); +begin +drop table if exists pro_dbe_sql_all_tb1_02; +create table pro_dbe_sql_all_tb1_02(a int ,b int); +insert into pro_dbe_sql_all_tb1_02 values(1,3); +insert into pro_dbe_sql_all_tb1_02 values(2,3); +query := 'select a,b from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--执行 +res := dbe_sql.sql_describe_columns(context_id,d,dd); + +--输出结果 +dbe_output.print_line('col_num:' || res.col_num); + +dbe_output.print_line('col_type:' || res.desc_col[1].col_type); +dbe_output.print_line('col_max_len:' || res.desc_col[1].col_max_len); +dbe_output.print_line('col_name:' || res.desc_col[1].col_name); +dbe_output.print_line('col_name_len:' || res.desc_col[1].col_name_len); +dbe_output.print_line('col_schema_name:' || res.desc_col[1].col_schema_name); +dbe_output.print_line('col_schema_name_len:' || res.desc_col[1].col_schema_name_len); +dbe_output.print_line('col_precision:' || res.desc_col[1].col_precision); +dbe_output.print_line('col_scale:' || res.desc_col[1].col_scale); +dbe_output.print_line('col_charsetid:' || res.desc_col[1].col_charsetid); +dbe_output.print_line('col_charsetform:' || res.desc_col[1].col_charsetform); +dbe_output.print_line('col_null_ok:' || res.desc_col[1].col_null_ok); + +dbe_output.print_line('col_type:' || res.desc_col[2].col_type); +dbe_output.print_line('col_max_len:' || res.desc_col[2].col_max_len); +dbe_output.print_line('col_name:' || res.desc_col[2].col_name); +dbe_output.print_line('col_name_len:' || res.desc_col[2].col_name_len); +dbe_output.print_line('col_schema_name:' || res.desc_col[2].col_schema_name); +dbe_output.print_line('col_schema_name_len:' || res.desc_col[2].col_schema_name_len); +dbe_output.print_line('col_precision:' || res.desc_col[2].col_precision); +dbe_output.print_line('col_scale:' || res.desc_col[2].col_scale); +dbe_output.print_line('col_charsetid:' || res.desc_col[2].col_charsetid); +dbe_output.print_line('col_charsetform:' || res.desc_col[2].col_charsetform); +dbe_output.print_line('col_null_ok:' || res.desc_col[2].col_null_ok); + +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +col_num:2 +col_type:23 +col_max_len:4 +col_name:a +col_name_len:1 +col_schema_name: +col_schema_name_len:0 +col_precision:0 +col_scale:0 +col_charsetid:0 +col_charsetform:0 +col_null_ok:true +col_type:23 +col_max_len:4 +col_name:b +col_name_len:1 +col_schema_name: +col_schema_name_len:0 +col_precision:0 +col_scale:0 +col_charsetid:0 +col_charsetform:0 +col_null_ok:true + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + + + + +------------------------------------------------anyelement +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out int,o_ret2 out int) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret:=10; +o_ret2:=30; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 int; +v3 int; +v2 int; +o_ret int; +o_retw int; +begin +query := 'call proc_test(i_Col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_Col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw,100); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +end; +/ +INFO: v1: 10 +INFO: v3: 30 + + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out text,o_ret2 out text) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret:='10'; +o_ret2:='30'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 text; +v3 text; +v2 int; +o_ret3 text; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_ret3,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_ret3,100); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +end; +/ +INFO: v1: 10 +INFO: v3: 30 + + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out bytea,o_ret2 out bytea) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret:='1'; +o_ret2:='3'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 bytea; +v3 bytea; +o_retw bytea; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw,100); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +end; +/ +INFO: v1: \x31 +INFO: v3: \x33 + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out character,o_ret2 out character) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret:='1'; +o_ret2:='3'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 character; +v3 character; +o_retw character; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw,100); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +end; +/ +INFO: v1: 1 +INFO: v3: 3 + +-------------------------------------------------------- +------------------------------------------------anyarray +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out text[],o_ret2 out text[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret(0):='10'; +o_ret(1):='20'; +o_ret2(0):='30'; +o_ret2(1):='40'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 text[]; +v3 text[]; +v2 int; +o_ret text[]; +o_retw text[]; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); +dbe_sql.sql_unregister_context(context_id); +--输出结果 +dbe_output.print_line('v1: '|| v1(0)); +dbe_output.print_line('v1: '|| v1(1)); +dbe_output.print_line('v1: '|| v3(0)); +dbe_output.print_line('v1: '|| v3(1)); +end; +/ +v1: 10 +v1: 20 +v1: 30 +v1: 40 + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out int[],o_ret2 out int[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret(0):='10'; +o_ret(1):='20'; +o_ret2(0):='30'; +o_ret2(1):='40'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 int[]; +v3 int[]; +v2 int; +o_ret int[]; +o_retw int[]; +begin +query := 'call proc_test(i_col1,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +RAISE INFO 'v1: %' ,v1(0); +RAISE INFO 'v1: %' ,v1(1); +RAISE INFO 'v3: %' ,v3(0); +RAISE INFO 'v3: %' ,v3(1); +end; +/ +INFO: v1: 10 +INFO: v1: 20 +INFO: v3: 30 +INFO: v3: 40 + + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out bytea[],o_ret2 out bytea[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret(0):='1'; +o_ret(1):='1'; +o_ret2(0):='1'; +o_ret2(1):='1'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 bytea[]; +v3 bytea[]; +v2 int; +o_ret bytea[]; +o_retw bytea[]; +begin +query := 'call proc_test(i_col1,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +RAISE INFO 'v1: %' ,v1(0); +RAISE INFO 'v1: %' ,v1(1); +RAISE INFO 'v3: %' ,v3(0); +RAISE INFO 'v3: %' ,v3(1); +end; +/ +INFO: v1: \x31 +INFO: v1: \x31 +INFO: v3: \x31 +INFO: v3: \x31 + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out text[],o_ret2 out text[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret(0):='100'; +o_ret(1):='100'; +o_ret2(0):='40'; +o_ret2(1):='30'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 text[]; +v3 text[]; +v2 int; +o_ret text[]; +o_retw text[]; +begin +query := 'call proc_test(i_col1,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +dbe_output.print_line('v1: '|| v1(0)); +dbe_output.print_line('v1: '|| v1(1)); +dbe_output.print_line('v1: '|| v3(0)); +dbe_output.print_line('v1: '|| v3(1)); +end; +/ +v1: 100 +v1: 100 +v1: 40 +v1: 30 + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out clob[],o_ret2 out clob[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret(0):='100'; +o_ret(1):='100'; +o_ret2(0):='40'; +o_ret2(1):='30'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 clob[]; +v3 clob[]; +v2 int; +o_ret clob[]; +o_retw clob[]; +begin +query := 'call proc_test(i_col1,o_ret,o_ret);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +dbe_output.print_line('v1: '|| v1(0)); +dbe_output.print_line('v1: '|| v1(1)); +dbe_output.print_line('v1: '|| v3(0)); +dbe_output.print_line('v1: '|| v3(1)); +end; +/ +v1: 100 +v1: 100 +v1: 40 +v1: 30 + + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out character[],o_ret2 out character[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret(0):='1'; +o_ret(1):='2'; +o_ret2(0):='3'; +o_ret2(1):='4'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 character[]; +v3 character[]; +v2 int; +o_retw character[]; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +RAISE INFO 'v1: : %' ,v1(0); +RAISE INFO 'v1: : %' ,v1(1); + +RAISE INFO 'v1: : %' ,v3(0); + +RAISE INFO 'v1: : %' ,v3(1); + +end; +/ +INFO: v1: : 1 +INFO: v1: : 2 +INFO: v1: : 3 +INFO: v1: : 4 + +drop PROCEDURE proc_test; + +--------------------------支持同时-bind-----array-和-variable------------------------- +---------------------------bind-----array-----variable------------------------- +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +exe int[]; +dddd char; +begin +exe[1] := 4; +exe[2] := 6; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,5,1,11); +insert into pro_dbe_sql_all_tb1_02 values(6,10,1,11); +insert into pro_dbe_sql_all_tb1_02 values(6,20,1,11); +query := 'select * from pro_dbe_sql_all_tb1_02 where a = y and b < 20 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +--dbe_sql.sql_bind_variable(context_id, 'z', 20); +dbe_sql.sql_bind_array(context_id, 'y', exe); +--定义列 +dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +id:6 +id:6 + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +---------------------bind-----array-----variable---------------insert +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +exe int[]; +dddd char; +begin +exe[1] := 4; +exe[2] := 6; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,5,1,11); +insert into pro_dbe_sql_all_tb1_02 values(6,10,1,11); +insert into pro_dbe_sql_all_tb1_02 values(6,20,1,11); +query := 'insert into pro_dbe_sql_all_tb1_02 values(y,z)'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_variable(context_id, 'z', 20); +dbe_sql.sql_bind_array(context_id, 'y', exe); +--定义列 +dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + + + + +---------------------------支持---bind_variable----的列大小限制------------------------- +----------------------bind_variable--------------in------------------------------------- +---------------------------------------------------------------------------------------- +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id text; +v_id1 text; +--v_id int; +query varchar(2000); +execute_ret int; +begin +v_id1 := 'abc'; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a text ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values('ab',3,2,11); +insert into pro_dbe_sql_all_tb1_02 values('abc',3,1,11); +query := 'select * from pro_dbe_sql_all_tb1_02 where a = y order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_variable(context_id, 'y', v_id1); +--dbe_sql.sql_bind_variable(context_id, 'y', 3); +--定义列 +dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +id:abc + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +------------------------------------------------------------------------ +---------------------------bind_variable--------------inout------------- + +----##########################------------------------text: +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out text,o_ret2 out text) as +v_a varchar2; +begin +o_ret:='123'; +o_ret2:='34567'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 text; +v3 text; +v2 int; +--o_ret character[]; +o_retw text; +o_retw1 text; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,1); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw1,3); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +RAISE INFO 'v1: : %' ,v1; +RAISE INFO 'v1: : %' ,v3; +end; +/ +INFO: v1: : 1 +INFO: v1: : 345 + + +------##########################------------------------text: +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out text,o_ret2 out text) as +v_a varchar2; +begin +o_ret:='123'; +o_ret2:='34567'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 text; +v3 text; +v2 int; +--o_ret character[]; +o_retw text; +o_retw1 text; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,1); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw1,3); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +RAISE INFO 'v1: : %' ,v1; +RAISE INFO 'v1: : %' ,v3; +end; +/ +INFO: v1: : 1 +INFO: v1: : 345 + + +-----##########################------------------------bytea: +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out bytea,o_ret2 out bytea) as +v_a varchar2; +begin +o_ret:='123'; +o_ret2:='34567'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 bytea; +v3 bytea; +o_retw bytea; +o_retw1 bytea; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,1); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw1,3); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +RAISE INFO 'v1: : %' ,v1; +RAISE INFO 'v1: : %' ,v3; +end; +/ +INFO: v1: : \x31 +INFO: v1: : \x333435 + + + +-----##########################------------------------bpchar: +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out bpchar,o_ret2 out bpchar) as +v_a varchar2; +begin +o_ret:='123'; +o_ret2:='34567'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 bpchar; +v3 bpchar; +o_retw bpchar; +o_retw1 bpchar; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw1,4); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +RAISE INFO 'v1: : %' ,v1; +RAISE INFO 'v1: : %' ,v3; +end; +/ +INFO: v1: : 123 +INFO: v1: : 3456 + +-----------------------支持---set_result_type-----的列大小限制---------------------------- +---------------------------------------set_result_type-------column_value-------------text +------------------------------------------------------------------------------------------ +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info1 text :=1; + +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a text ,b clob); +insert into pro_dbe_sql_all_tb1_02 values('asbdrdgg',HEXTORAW('DEADBEEE')); +insert into pro_dbe_sql_all_tb1_02 values(2,in_raw); +query := 'select a from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_info1,10); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_info1); +--输出结果 +dbe_output.print_line('info:'|| 1 || ' info:' ||v_info1); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +info:1 info:2 +info:1 info:asbdrdgg + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + + +---------------set_result_type-------column_value------------------------------bytea +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +nnn text; +v_info1 bytea; +query varchar(2000); +execute_ret int; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a bytea ,b clob); +insert into pro_dbe_sql_all_tb1_02 values('646464',HEXTORAW('DEADBEEE')); +insert into pro_dbe_sql_all_tb1_02 values('646464',in_raw); +query := 'select a from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_info1,10); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_result_raw(context_id,1,v_info1); + +--输出结果 +dbe_output.print_line('info:'|| 1 || ' info:' ||v_info1); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +info:1 info:\x363436343634 +info:1 info:\x363436343634 + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +-----------set_result_type-------column_value----------------------------bpchar + +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +nnn text; +v_info1 bpchar; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a bpchar ,b clob); +insert into pro_dbe_sql_all_tb1_02 values('646464',HEXTORAW('DEADBEEE')); +insert into pro_dbe_sql_all_tb1_02 values('646464',in_raw); +query := 'select a from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_info1,3); + +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_info1); + +--nnn := pkg_util.lob_rawtotext(v_info1); +--输出结果 +dbe_output.print_line('info:'|| 1 || ' info:' ||v_info1); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +info:1 info:646 +info:1 info:646 + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; +---========================test raw /clob/blob function +create or replace procedure pro_get_variable_07(in_raw int,v_in out bigint,v_offset out bigint,ary1 out bigint[],ary2 out bigint[]) +as +context_id int; +v_id int :=3; + +v_id1 int[]; + +v_id5 bytea[]; +v_id6 text[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +begin +v_in:=10; +v_offset:=30; +ary1(0):='1'; +ary1(1):='2'; +ary1(2):='3'; +ary2(0):='12'; +ary2(1):='13'; +ary2(2):='14'; +end; +/ + +create or replace procedure call_get_variable_07() +as +context_id number; +query text; +define_column_ret int; +v1 bigint; +v3 bigint; +v2 bigint; +v4 bigint[]; +v5 bigint[]; +v_in bigint; +v_offset bigint; +ary1 bigint[]; +ary2 bigint[]; +o_retw bigint; +o_retw1 bigint[]; +begin +query := 'call pro_get_variable_07(in_raw,NULL,NULL,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'in_raw',1,10); +dbe_sql.sql_bind_variable(context_id, 'v_in',o_retw,100); +dbe_sql.sql_bind_variable(context_id, 'v_offset',o_retw,100); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); + +define_column_ret := dbe_sql.sql_run(context_id); +dbe_sql.get_variable_result(context_id,'v_in',v1); +dbe_sql.get_variable_result(context_id,'v_offset',v3); +dbe_sql.get_array_result_int(context_id,'ary1',v4); +dbe_sql.get_array_result_int(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v5: %' ,v5(0); +RAISE INFO 'v5: %' ,v5(1); +end; +/ +call call_get_variable_07(); +INFO: v1: 10 +INFO: v3: 30 +INFO: v4: 1 +INFO: v4: 2 +INFO: v5: 12 +INFO: v5: 13 + call_get_variable_07 +---------------------- + +(1 row) + + +----================================================== +create or replace procedure pro_get_variable_result_text_02(in_raw int,v_in out clob,v_offset out clob,ary1 out clob[],ary2 out clob[]) +as +context_id int; +v_id int :=3; +--test +v_id1 int[]; +v_id4 character[]; +v_id5 bytea[]; +v_id6 clob[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +begin +v_in:='abcdnfdfdfdafds'; +v_offset:='ccccccccccccccccccccccc'; +ary1(0):='aa'; +ary1(1):='bb'; +ary2(0):='cc'; +ary2(1):='dd'; +end; +/ + +create or replace procedure call_get_variable_text_02() +as +context_id number; +query clob; +define_column_ret int; +v1 clob; +v3 clob; +v2 clob; +v4 clob[]; +v5 clob[]; +v_in clob; +v_offset clob; +ary1 clob[]; +ary2 clob[]; +o_retw clob; +o_retw1 clob[]; +begin +query := 'call pro_get_variable_result_text_02(in_raw,NULL,NULL,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'in_raw',1,10); +dbe_sql.sql_bind_variable(context_id, 'v_in',o_retw,100); +dbe_sql.sql_bind_variable(context_id, 'v_offset',o_retw,100); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); +define_column_ret := dbe_sql.sql_run(context_id); + +v1:=dbe_sql.get_variable_result_text(context_id,'v_in'); +v2:=dbe_sql.get_variable_result_text(context_id,'v_offset'); +dbe_sql.get_array_result_text(context_id,'ary1',v4); +dbe_sql.get_array_result_text(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v2: %' ,v2; +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v5: %' ,v5(0); +RAISE INFO 'v5: %' ,v5(1); +dbe_sql.sql_unregister_context(context_id); +end; +/ + +call call_get_variable_07(); +INFO: v1: 10 +INFO: v3: 30 +INFO: v4: 1 +INFO: v4: 2 +INFO: v5: 12 +INFO: v5: 13 + call_get_variable_07 +---------------------- + +(1 row) + + +----================================================== +CREATE OR REPLACE PROCEDURE proc_get_variable_arr_result_text_03(i_col1 in int,o_ret out character varying,o_ret2 out character varying, ary1 out character varying[],ary2 out character varying[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret:=1; +o_ret2:=2; +ary1(0):='a'; +ary1(1):='d'; +ary1(2):='f'; +ary2(0):='f'; +ary2(1):='d'; +ary2(2):='f'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 character varying; +v3 character varying; +v2 character varying; +v4 character varying[]; +v5 character varying[]; +o_ret character varying; + +ary1 character varying[]; +ary2 character varying[]; +o_retw character varying; +o_retw1 character varying[]; +begin +query := 'call proc_get_variable_arr_result_text_03(i_col1,o_ret,o_ret2,ary1,ary2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); + +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,1); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw,1); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); +define_column_ret := dbe_sql.sql_run(context_id); +v1:=dbe_sql.get_variable_result_text(context_id,'o_ret'); +v2:=dbe_sql.get_variable_result_text(context_id,'o_ret2'); +dbe_sql.get_array_result_text(context_id,'ary1',v4); +dbe_sql.get_array_result_text(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v2: %' ,v2; +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v5: %' ,v5(2); +RAISE INFO 'v5: %' ,v5(0); +end; +/ +INFO: v1: 1 +INFO: v2: 2 +INFO: v4: d +INFO: v4: a +INFO: v5: f +INFO: v5: f + +----============================================= + +create or replace procedure proc_get_variable_result_raw_01(in_raw int,v_in out raw,v_offset out raw,ary1 out raw[],ary2 out raw[]) +as +context_id int; +v_id int :=3; +begin +v_in:=HEXTORAW('DEADBEEF'); +v_offset:=HEXTORAW('DEADBEEF'); +ary1(0):=HEXTORAW('DEADBEEF'); +ary1(1):=HEXTORAW('DEADBEEF'); +ary2(0):=HEXTORAW('DEADBEEF'); +ary2(1):=HEXTORAW('DEADBEEF'); +end; +/ + +create or replace procedure call_get_variable_arr_raw_01() +as +context_id number; +query text; +define_column_ret int; +v1 raw; +v3 raw; +v2 raw; +v4 raw[]; +v5 raw[]; +v_in raw; +v_offset raw; +ary2 raw[]; +ary1 raw[]; +o_retw raw; +o_retw1 raw[]; +begin +query := 'call proc_get_variable_result_raw_01(in_raw,v_in,v_offset,ary1,ary2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'in_raw',1,10); +dbe_sql.sql_bind_variable(context_id, 'v_in',o_retw,100); +dbe_sql.sql_bind_variable(context_id, 'v_offset',o_retw,100); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result_raw(context_id,'v_in',v1); +dbe_sql.get_variable_result_raw(context_id,'v_offset',v3); +dbe_sql.get_array_result_raw(context_id,'ary1',v4); +dbe_sql.get_array_result_raw(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v5: %' ,v5(0); +RAISE INFO 'v5: %' ,v5(1); +end; +/ +call call_get_variable_arr_raw_01(); +INFO: v1: DEADBEEF +INFO: v3: DEADBEEF +INFO: v4: DEADBEEF +INFO: v4: DEADBEEF +INFO: v5: DEADBEEF +INFO: v5: DEADBEEF + call_get_variable_arr_raw_01 +------------------------------ + +(1 row) + + + +---============================================ + +create or replace procedure pro_get_variable_06(in_raw int,v_in out clob,v_offset out clob,ary1 out clob[],ary2 out clob[]) +as +context_id int; +v_id int :=3; + +v_id1 int[]; +v_id4 clob[]; +v_id5 bytea[]; +v_id6 text[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +begin +v_in:='aaa36'; +v_offset:='fdf5'; +ary1(0):='aafd'; +ary1(1):='fdsf'; +ary2(0):='fa'; +ary2(1):='fsafdasf'; +end; +/ + +create or replace procedure call_get_variable_06() +as +context_id number; +query text; +define_column_ret int; +v1 clob; +v3 clob; +v2 clob; +v4 clob[]; +v5 clob[]; +v_in clob; +v_offset clob; +ary1 clob[]; +ary2 clob[]; +o_retw clob; +o_retw1 clob[]; +begin +query := 'call pro_get_variable_06(in_raw,NULL,NULL,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'in_raw',1,10); +dbe_sql.sql_bind_variable(context_id, 'v_in',o_retw,100); +dbe_sql.sql_bind_variable(context_id, 'v_offset',o_retw,100); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'v_in',v1); +dbe_sql.get_variable_result(context_id,'v_offset',v3); +dbe_sql.get_variable_result(context_id,'ary1',v4); +dbe_sql.get_variable_result(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v5: %' ,v5(0); +RAISE INFO 'v5: %' ,v5(1); +end; +/ +call call_get_variable_06(); +INFO: v1: aaa36 +INFO: v3: fdf5 +INFO: v4: aafd +INFO: v4: fdsf +INFO: v5: fa +INFO: v5: fsafdasf + call_get_variable_06 +---------------------- + +(1 row) + + +----=================================test 直接获取第n列====================== +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(b int, c char, d text); +insert into pro_dbe_sql_all_tb1_02 values(9,'5','13'); +insert into pro_dbe_sql_all_tb1_02 values(10,'6','14'); +insert into pro_dbe_sql_all_tb1_02 values(11,'7','15'); +insert into pro_dbe_sql_all_tb1_02 values(12,'8','16'); +create or replace procedure pro_dbe_sql_all_01() +as +context_id int; +v_id int :=3; +--test +v_id1 int[]; +v_id4 character[]; +v_id5 bytea[]; +v_id6 text[]; +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +col_type1 int; +col_type2 char; +col_type3 text; +col_type4 bytea; +begin +query := ' select * from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--定义列 +DBE_SQL.set_result_type_ints(context_id,2,v_id1,v_id,v_id2); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +v_id3 := dbe_sql.next_row(context_id); +v_id1 := DBE_SQL.get_results_int(context_id,2,v_id1); +exit when(v_id3 != 3); +end loop; +FOR i IN v_id1.FIRST .. v_id1.LAST LOOP +dbe_output.print_line('int' || i || ' = ' || v_id1[i]); +END LOOP; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ + +call pro_dbe_sql_all_01(); +int1 = 5 +int2 = 6 +int3 = 7 +int4 = 8 + pro_dbe_sql_all_01 +-------------------- + +(1 row) + +drop table if exists pro_dbe_sql_all_tb1_02; + +----==============================test raw set_results_type== ==== +drop table if exists pro_dbe_sql_all_tb1_02 ; +NOTICE: table "pro_dbe_sql_all_tb1_02" does not exist, skipping +create table pro_dbe_sql_all_tb1_02(b raw, c raw, d clob); +insert into pro_dbe_sql_all_tb1_02 values('9','5','13'); +insert into pro_dbe_sql_all_tb1_02 values('10','6','14'); +insert into pro_dbe_sql_all_tb1_02 values('11','7','15'); +insert into pro_dbe_sql_all_tb1_02 values('12','8','16'); +create or replace procedure pro_dbe_sql_all_01() +as +context_id int; +v_id int :=3; +--test +v_id1 raw[]; +v_id4 character[]; +v_id5 bytea[]; +v_id6 text[]; +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +col_type1 int; +col_type2 char; +col_type3 text; +col_type4 bytea; +begin +query := ' select * from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--定义列 +DBE_SQL.set_results_type(context_id,1,v_id1,v_id,v_id2); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +v_id3 := dbe_sql.next_row(context_id); +DBE_SQL.get_results(context_id,1,v_id1); +exit when(v_id3 != 3); +end loop; +FOR i IN v_id1.FIRST .. v_id1.LAST LOOP +dbe_output.print_line('int' || i || ' = ' || v_id1[i]); +END LOOP; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ + +call pro_dbe_sql_all_01(); +int1 = 09 +int2 = 10 +int3 = 11 +int4 = 12 + pro_dbe_sql_all_01 +-------------------- + +(1 row) + +drop table if exists pro_dbe_sql_all_tb1_02 ; + +----------------check NULL for is_active and sql_unregister_context + +create or replace procedure call_get_variable_06() +as +context_id int := NULL; +begin +raise notice '11111'; +if dbe_sql.is_active(context_id) then + raise notice '2222'; + dbe_sql.sql_unregister_context(context_id); +end if; +end; +/ +select * from call_get_variable_06(); +NOTICE: 11111 + call_get_variable_06 +---------------------- + +(1 row) + + +create or replace procedure call_get_variable_06() +as +context_id int := NULL; +begin +dbe_sql.sql_unregister_context(context_id); +end; +/ +select * from call_get_variable_06(); + call_get_variable_06 +---------------------- + +(1 row) + +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +type re_rssc is record (col_num int, desc_col dbe_sql.desc_tab); +employer re_rssc; +res re_rssc; +d int; +dd dbe_sql.desc_tab; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,3,1,11); +query := 'select *,1,ss from pro_dbe_sql_all_tb1_02 where a > y and a < z order by s'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--描述 +res := dbe_sql.sql_describe_columns(context_id, d,dd); + +--绑定参数 +dbe_sql.sql_bind_variable(context_id, 'z', 10); +dbe_sql.sql_bind_variable(context_id, 'y', 1); +dbe_sql.sql_bind_variable(context_id, 's', 1); +dbe_sql.sql_bind_variable(context_id, 'ss', 1); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +dbe_output.print_line('col_num:' || res.col_num); +dbe_output.print_line('col_type:' || res.desc_col[1].col_type); +dbe_output.print_line('col_type:' || res.desc_col[2].col_type); +dbe_output.print_line('col_type:' || res.desc_col[3].col_type); +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +NOTICE: table "pro_dbe_sql_all_tb1_02" does not exist, skipping +CONTEXT: SQL statement "drop table if exists pro_dbe_sql_all_tb1_02" +PL/pgSQL function pro_dbe_sql_all_02(raw,integer,integer) line 15 at SQL statement +id:4 +id:6 +col_num:4 +col_type:23 +col_type:23 +col_type:23 + pro_dbe_sql_all_02 +-------------------- + +(1 row) + +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out text[],o_ret2 out text[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret(0):='10'; +o_ret(1):='20'; +o_ret2(0):='30'; +o_ret2(1):='40'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 text[]; +v3 text[]; +v2 int; +o_ret text[]; +o_retw text[]; +v4 int[]; + +begin +v4(0):=1; +v4(1):=2; +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_array(context_id, 'i_col1',v4); +commit; +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); +dbe_sql.sql_unregister_context(context_id); +--输出结果 +dbe_output.print_line('v1: '|| v1(0)); +dbe_output.print_line('v1: '|| v1(1)); +dbe_output.print_line('v1: '|| v3(0)); +dbe_output.print_line('v1: '|| v3(1)); +end; +/ +v1: 10 +v1: 20 +v1: 30 +v1: 40 +----=============================================== +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,i_col2 in int,o_ret out text[],o_ret2 out text[]) as +v_a varchar2; +begin +if i_col1=1 then +o_ret(0):='10'; +o_ret(1):='20'; +o_ret2(0):='30'; +o_ret2(1):='40'; +end if; +if i_col1=2 and i_col2=1 then +o_ret(0):='100'; +o_ret(1):='200'; +o_ret2(0):='300'; +o_ret2(1):='400'; +end if; + +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 text[]; +v3 text[]; +v2 int; +o_ret text[]; +o_retw text[]; +v4 int[]; + +begin +v4(0):=1; +v4(1):=2; +query := 'call proc_test(i_col1,i_col2,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_array(context_id, 'i_col1',v4); +dbe_sql.sql_bind_variable(context_id, 'i_col2',1); +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); +dbe_sql.sql_unregister_context(context_id); +--输出结果 +dbe_output.print_line('v1: '|| v1(0)); +dbe_output.print_line('v1: '|| v1(1)); +dbe_output.print_line('v1: '|| v3(0)); +dbe_output.print_line('v1: '|| v3(1)); +end; +/ +v1: 100 +v1: 200 +v1: 300 +v1: 400 + +-----=========================== +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out int,o_ret2 out int) as +v_a varchar2; +begin +if i_col1=1 then +o_ret:=10; +o_ret2:=30; +end if; +if i_col1=2 then +o_ret:=20; +o_ret2:=40; +else +o_ret:=100; +o_ret2:=200; +end if; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 int; +v3 int; +v2 int; +o_ret int; +o_retw int; +begin +query := 'call proc_test(i_col1,NULL,NULL);'; +context_id := dbe_sql.register_context(); + +for i in 1..3 loop +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',i,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw,100); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +end loop; +dbe_sql.sql_unregister_context(context_id); +--输出结果 + +end; +/ +INFO: v1: 100 +INFO: v3: 200 +INFO: v1: 20 +INFO: v3: 40 +INFO: v1: 100 +INFO: v3: 200 + + +-----==================================================== +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info1 text :=1; + +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a text ,b clob); +insert into pro_dbe_sql_all_tb1_02 values('asbdrdgg',HEXTORAW('DEADBEEE')); +insert into pro_dbe_sql_all_tb1_02 values(2,in_raw); +query := 'select a from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +--定义列 +for i in 1..20 loop +dbe_sql.sql_set_sql(context_id, query, 1); +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_info1,10); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_info1); +--输出结果 +dbe_output.print_line('info:'|| 1 || ' info:' ||v_info1); +end loop; +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg +info:1 info:2 +info:1 info:asbdrdgg + pro_dbe_sql_all_02 +-------------------- + +(1 row) + + +---============================6.自治事物================ +--建表 +create table t2(a int, b int); +insert into t2 values(1,2); +select * from t2; + a | b +---+--- + 1 | 2 +(1 row) + +--创建包含自治事务的存储过程 +CREATE OR REPLACE PROCEDURE autonomous_4(a int, b int) AS +DECLARE + num3 int := a; + num4 int := b; + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + insert into t2 values(num3, num4); + dbe_output.print_line('just use call.'); +END; +/ +--创建调用自治事务存储过程的普通存储过程 +CREATE OR REPLACE PROCEDURE autonomous_5(a int, b int) AS +DECLARE +BEGIN + dbe_output.print_line('just no use call.'); + insert into t2 values(666, 666); + autonomous_4(a,b); + rollback; +END; +/ + +create or replace procedure proc_test3() as +context_id number; +query text; +define_column_ret int; +v1 varchar2; +proc_name varchar2; +begin +proc_name:='autonomous_5'; +query := 'call '||proc_name||'(o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'o_ret',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',1,10); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.sql_unregister_context(context_id); + +--输出结果 +RAISE INFO 'v1: %' ,v1; +end; +/ +CREATE OR REPLACE PACKAGE package_002 IS +PROCEDURE testpro1(var3 int); +END package_002; +/ +ERROR: Package only allowed create in A compatibility + +--调用普通存储过程 +select autonomous_5(11,22); +just no use call. +just use call. + + autonomous_5 +-------------- + +(1 row) + +--查看表结果 +select * from t2 order by a; + a | b +----+---- + 1 | 2 + 11 | 22 +(2 rows) + + +------------------------------------------------------------ +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out text,o_ret2 out text) as +v_a varchar2; +i int; +begin +i =1/0; +exception +when others then + raise info '%', 'exception'; +end; +/ + +CREATE OR REPLACE PROCEDURE q(mm out int) as +declare +context_id number; +query text; +define_column_ret int; +v1 text; +v3 text; +v2 int; +o_retw text; +o_retw1 text; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,1); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw1,3); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); +dbe_sql.sql_unregister_context(context_id); + +RAISE INFO 'v1: : %' ,v1; +RAISE INFO 'v1: : %' ,v3; +mm = 1; +end; +/ +select * from q(); +INFO: exception +CONTEXT: SQL statement "call proc_test(i_col1,o_ret,o_ret2);" +PL/pgSQL function q() line 18 at assignment +INFO: v1: : +INFO: v1: : + mm +---- + 1 +(1 row) + + +create or replace procedure proc_get_variable_result_raw_01(in_raw int,v_in out blob,v_offset out blob,ary1 out blob[],ary2 out blob[]) +as +context_id int; +v_id int :=3; +--test +v_id1 int[]; +v_id4 blob[]; +v_id5 blob[]; +v_id6 text[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +i int := 1; +begin +v_in:=HEXTORAW('DEADBEEF'); +v_offset:=HEXTORAW('DEADBEEF'); +ary1(0):=HEXTORAW('DEADBEEF'); +ary1(1):=HEXTORAW('DEADBEEF'); +ary2(0):=HEXTORAW('DEADBEEF'); +ary2(1):=HEXTORAW('DEADBEEF'); +end; +/ + +create or replace procedure call_get_variable_raw_01() +as +context_id number; +query text; +define_column_ret int; +v1 blob; +v3 blob; +v2 blob; +v4 blob[]; +v5 blob[]; +v_in blob; +v_offset blob; +ary2 blob[]; +ary1 blob[]; +o_retw blob; +o_retw1 blob[]; +i int := 1; +begin +query := 'call proc_get_variable_result_raw_01(in_raw,NULL,NULL,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +--while i < 4 loop +dbe_sql.sql_bind_variable(context_id, 'in_raw',1,10); +dbe_sql.sql_bind_variable(context_id, 'v_in',o_retw,100); +dbe_sql.sql_bind_variable(context_id, 'v_offset',o_retw,100); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result_raw(context_id,'v_in',v1); +dbe_sql.get_variable_result_raw(context_id,'v_offset',v3); +dbe_sql.get_array_result_raw(context_id,'ary1',v4); +dbe_sql.get_array_result_raw(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v5: %' ,v5(0); +RAISE INFO 'v5: %' ,v5(1); +dbe_sql.sql_unregister_context(context_id); +end; +/ + +call call_get_variable_raw_01(); +INFO: v1: DEADBEEF +INFO: v3: DEADBEEF +INFO: v4: DEADBEEF +INFO: v4: DEADBEEF +INFO: v5: DEADBEEF +INFO: v5: DEADBEEF + call_get_variable_raw_01 +-------------------------- + +(1 row) + + +----------------------------------- +create or replace procedure proc_get_variable_result_raw_01(in_raw int,v_in out bytea,v_offset out bytea,ary1 out bytea[],ary2 out bytea[]) +as +context_id int; +v_id int :=3; +--test +v_id1 int[]; +v_id4 bytea[]; +v_id5 bytea[]; +v_id6 text[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +i int := 1; +begin +v_in:=HEXTORAW('DEADBEEF'); +v_offset:=HEXTORAW('DEADBEEF'); +ary1(0):=HEXTORAW('DEADBEEF'); +ary1(1):=HEXTORAW('DEADBEEF'); +ary2(0):=HEXTORAW('DEADBEEF'); +ary2(1):=HEXTORAW('DEADBEEF'); +end; +/ + +create or replace procedure call_get_variable_raw_01() +as +context_id number; +query text; +define_column_ret int; +v1 bytea; +v3 bytea; +v2 bytea; +v4 bytea[]; +v5 bytea[]; +v_in bytea; +v_offset bytea; +ary2 bytea[]; +ary1 bytea[]; +o_retw bytea; +o_retw1 bytea[]; +i int := 1; +begin +query := 'call proc_get_variable_result_raw_01(in_raw,NULL,NULL,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +--while i < 4 loop +dbe_sql.sql_bind_variable(context_id, 'in_raw',1,10); +dbe_sql.sql_bind_variable(context_id, 'v_in',o_retw,100); +dbe_sql.sql_bind_variable(context_id, 'v_offset',o_retw,100); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result_raw(context_id,'v_in',v1); +dbe_sql.get_variable_result_raw(context_id,'v_offset',v3); +dbe_sql.get_array_result_raw(context_id,'ary1',v4); +dbe_sql.get_array_result_raw(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v5: %' ,v5(0); +RAISE INFO 'v5: %' ,v5(1); +dbe_sql.sql_unregister_context(context_id); +end; +/ + +call call_get_variable_raw_01(); +INFO: v1: \x4445414442454546 +INFO: v3: \x4445414442454546 +INFO: v4: \x4445414442454546 +INFO: v4: \x4445414442454546 +INFO: v5: \x4445414442454546 +INFO: v5: \x4445414442454546 + call_get_variable_raw_01 +-------------------------- + +(1 row) + + +------------------------------------- +create or replace procedure proc_get_variable_result_raw_01(in_raw int,v_in out raw,v_offset out raw,ary1 out raw[],ary2 out raw[]) +as +context_id int; +v_id int :=3; +--test +v_id1 int[]; +v_id4 raw[]; +v_id5 raw[]; +v_id6 text[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +i int := 1; +begin +v_in:=HEXTORAW('DEADBEEF'); +v_offset:=HEXTORAW('DEADBEEF'); +ary1(0):=HEXTORAW('DEADBEEF'); +ary1(1):=HEXTORAW('DEADBEEF'); +ary2(0):=HEXTORAW('DEADBEEF'); +ary2(1):=HEXTORAW('DEADBEEF'); +end; +/ + +create or replace procedure call_get_variable_raw_01() +as +context_id number; +query text; +define_column_ret int; +v1 raw; +v3 raw; +v2 raw; +v4 raw[]; +v5 raw[]; +v_in raw; +v_offset raw; +ary2 raw[]; +ary1 raw[]; +o_retw raw; +o_retw1 raw[]; +i int := 1; +begin +query := 'call proc_get_variable_result_raw_01(in_raw,NULL,NULL,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'in_raw',1,10); +dbe_sql.sql_bind_variable(context_id, 'v_in',o_retw,100); +dbe_sql.sql_bind_variable(context_id, 'v_offset',o_retw,100); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result_raw(context_id,'v_in',v1); +dbe_sql.get_variable_result_raw(context_id,'v_offset',v3); +dbe_sql.get_array_result_raw(context_id,'ary1',v4); +dbe_sql.get_array_result_raw(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v5: %' ,v5(0); +RAISE INFO 'v5: %' ,v5(1); +dbe_sql.sql_unregister_context(context_id); +end; +/ + +call call_get_variable_raw_01(); +INFO: v1: DEADBEEF +INFO: v3: DEADBEEF +INFO: v4: DEADBEEF +INFO: v4: DEADBEEF +INFO: v5: DEADBEEF +INFO: v5: DEADBEEF + call_get_variable_raw_01 +-------------------------- + +(1 row) + + +CREATE OR REPLACE FUNCTION x1(a in int) +RETURNS int +AS $$ +DECLARE +BEGIN + a:=11; + commit; + return 12; +END; +$$ LANGUAGE plpgsql; +create or replace procedure y(a in int) +as +declare +begin +savepoint aa; +a:= x1(1); +rollback to aa; +end; +/ +call y(1); +ERROR: no such savepoint +CONTEXT: PL/pgSQL function y(integer) line 6 at ROLLBACK TO SAVEPOINT +drop FUNCTION x1(); +drop procedure y(); diff --git a/src/test/regress/expected/hw_dbms_sql2.out b/src/test/regress/expected/hw_dbms_sql2.out new file mode 100644 index 000000000..d86807a49 --- /dev/null +++ b/src/test/regress/expected/hw_dbms_sql2.out @@ -0,0 +1,399 @@ +----===============1.嵌套============== +CREATE OR REPLACE PACKAGE package_001 IS +PROCEDURE testpro1(var3 int); +END package_001; +/ +create or replace package body package_001 is +procedure testpro1(var3 int) +is +begin +commit; +end; +end package_001; +/ + +create or replace procedure proc_test3() as +context_id number; +query text; +define_column_ret int; +v1 varchar2; +begin +query := 'call package_001.testpro1(o_ret);'; +context_id := dbe_sql.register_context(); +rollback; +dbe_sql.sql_set_sql(context_id, query, 1); +rollback; +dbe_sql.sql_bind_variable(context_id, 'o_ret',1,10); +define_column_ret := dbe_sql.sql_run(context_id); +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.sql_unregister_context(context_id); + +--输出结果 +RAISE INFO 'v1: %' ,v1; +end; +/ +call proc_test3(); +INFO: v1: + proc_test3 +------------ + +(1 row) + +drop package package_001; +NOTICE: drop cascades to function public.testpro1(integer) +---===============1.嵌套============== +CREATE OR REPLACE PACKAGE package_001 IS +PROCEDURE testpro1(var3 int); +END package_001; +/ +create or replace package body package_001 is +procedure testpro1(var3 int) +is +begin +commit; +end; +end package_001; +/ + +create or replace procedure proc_test3() as +context_id number; +query text; +define_column_ret int; +v1 varchar2; +proc_name varchar2; +begin +proc_name:='package_001.testpro1'; +query := 'call '||proc_name||'(o_ret);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'o_ret',1,10); +define_column_ret := dbe_sql.sql_run(context_id); +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.sql_unregister_context(context_id); + +--输出结果 +RAISE INFO 'v1: %' ,v1; +end; +/ +call proc_test3(); +INFO: v1: + proc_test3 +------------ + +(1 row) + +drop package package_001; +NOTICE: drop cascades to function public.testpro1(integer) + +---===============2.全局变量============== + +CREATE OR REPLACE PACKAGE package_001 IS +a int; +b int; +PROCEDURE testpro1(var3 int); +END package_001; +/ +create or replace package body package_001 is +procedure testpro1(var3 int) +is +begin +a = 10; +raise INFO 'a:%' ,a; +commit; +end; +end package_001; +/ + +create or replace procedure proc_test3() as +context_id number; +query text; +define_column_ret int; +v1 varchar2; +proc_name varchar2; +begin +proc_name:='package_001.testpro1'; +query := 'call '||proc_name||'(o_ret);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'o_ret',1,10); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.sql_unregister_context(context_id); + +--输出结果 +RAISE INFO 'v1: %' ,v1; +end; +/ +call proc_test3(); +INFO: a:10 +CONTEXT: SQL statement "call package_001.testpro1(o_ret);" +PL/pgSQL function proc_test3() line 14 at assignment +INFO: v1: + proc_test3 +------------ + +(1 row) + +drop package package_001; +NOTICE: drop cascades to function public.testpro1(integer) +----========================3.savepoint----------- +CREATE OR REPLACE PACKAGE package_001 IS +a int; +b int; +PROCEDURE testpro1(var3 int); +END package_001; +/ +create or replace package body package_001 is +procedure testpro1(var3 int) +is +begin +a=11; +savepoint s1; +a = 10; +ROLLBACK TO SAVEPOINT s1; +raise INFO 'a:%' ,a; +commit; +end; +end package_001; +/ + +create or replace procedure proc_test3() as +context_id number; +query text; +define_column_ret int; +v1 varchar2; +proc_name varchar2; +begin +proc_name:='package_001.testpro1'; +query := 'call '||proc_name||'(o_ret);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'o_ret',1,10); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.sql_unregister_context(context_id); + +--输出结果 +RAISE INFO 'v1: %' ,v1; +end; +/ +call proc_test3(); +INFO: a:10 +CONTEXT: SQL statement "call package_001.testpro1(o_ret);" +PL/pgSQL function proc_test3() line 14 at assignment +INFO: v1: + proc_test3 +------------ + +(1 row) + +drop package package_001; +NOTICE: drop cascades to function public.testpro1(integer) +----========================3.重载----------- +CREATE OR REPLACE PACKAGE package_001 IS +a int; +b int; +PROCEDURE testpro1(var3 int); +PROCEDURE testpro1(var3 int, var4 int); +END package_001; +/ +create or replace package body package_001 is +procedure testpro1(var3 int) +is +begin +a = 10; +raise INFO 'a:%' ,a; +commit; +end; +PROCEDURE testpro1(var3 int, var4 int) +is +begin +a = 11; +raise INFO 'a:%' ,a; +rollback; +end; +end package_001; +/ + +create or replace procedure proc_test3() as +context_id number; +query text; +define_column_ret int; +v1 varchar2; +proc_name varchar2; +begin +proc_name:='package_001.testpro1'; +query := 'call '||proc_name||'(o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'o_ret',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',1,10); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +--dbe_sql.sql_unregister_context(context_id); + +--输出结果 +RAISE INFO 'v1: %' ,v1; +end; +/ +call proc_test3(); +INFO: a:11 +CONTEXT: SQL statement "call package_001.testpro1(o_ret,o_ret2);" +PL/pgSQL function proc_test3() line 15 at assignment +INFO: v1: + proc_test3 +------------ + +(1 row) + +drop package package_001; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function public.testpro1(integer) +drop cascades to function public.testpro1(integer,integer) + +---========================3.重载----------- +CREATE OR REPLACE PACKAGE package_001 IS +a int; +b int; +PROCEDURE testpro1(var3 int); +PROCEDURE testpro1(var3 int, var4 int); +END package_001; +/ +create or replace package body package_001 is +procedure testpro1(var3 int) +is +begin +a = 10; +raise INFO 'a:%' ,a; +commit; +end; +PROCEDURE testpro1(var3 int, var4 int) +is +begin +a = 11; +raise INFO 'a:%' ,a; +rollback; +end; +end package_001; +/ + +create or replace procedure proc_test3() as +context_id number; +query text; +define_column_ret int; +v1 varchar2; +proc_name varchar2; +begin +proc_name:='package_001.testpro1'; +query := 'call '||proc_name||'(o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'o_ret',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',1,10); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.sql_unregister_context(context_id); + +--输出结果 +RAISE INFO 'v1: %' ,v1; +end; +/ +CREATE OR REPLACE PACKAGE package_002 IS +PROCEDURE testpro1(var3 int); +END package_002; +/ +create or replace package body package_002 is +procedure testpro1(var3 int) +is +begin +perform proc_test3(); +commit; +end; +end package_002; +/ +CREATE OR REPLACE PACKAGE package_003 IS +PROCEDURE testpro1(var3 int); +END package_003; +/ +create or replace package body package_003 is +procedure testpro1(var3 int) +is +begin +perform package_002.testpro1(1); +commit; +end; +end package_003; +/ +call package_003.testpro1(1); +INFO: a:11 +CONTEXT: SQL statement "call package_001.testpro1(o_ret,o_ret2);" +PL/pgSQL function proc_test3() line 15 at assignment +referenced column: proc_test3 +SQL statement "SELECT proc_test3()" +PL/pgSQL function testpro1(integer) line 3 at PERFORM +referenced column: testpro1 +SQL statement "SELECT package_002.testpro1(1)" +PL/pgSQL function testpro1(integer) line 3 at PERFORM +INFO: v1: +CONTEXT: referenced column: proc_test3 +SQL statement "SELECT proc_test3()" +PL/pgSQL function testpro1(integer) line 3 at PERFORM +referenced column: testpro1 +SQL statement "SELECT package_002.testpro1(1)" +PL/pgSQL function testpro1(integer) line 3 at PERFORM + testpro1 +---------- + +(1 row) + +drop package package_001; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function public.testpro1(integer) +drop cascades to function public.testpro1(integer,integer) +drop package package_002; +NOTICE: drop cascades to function public.testpro1(integer) +drop package package_003; +NOTICE: drop cascades to function public.testpro1(integer) +-----------------------cursor--------------------------- +create table t1(a int); +insert into t1 values (1); +insert into t1 values (2); +create or replace procedure p2 (c4 in int,c2 out int,c3 out int,c1 out sys_refcursor) as +va t1; +i int; +begin +open c1 for select * from t1; +begin +i = 1/0; +exception +when others then + c3=100; + raise info '%', 'exception1'; +end; +i=2/0; +exception +when others then + + c4=100; + c2=c4+10; + raise info '%', 'exception2'; +end; +/ +select * from p2(1); +INFO: exception1 +INFO: exception2 + c2 | c3 | c1 +-----+-----+---- + 110 | 100 | +(1 row) + +drop table t1; +drop procedure p2; diff --git a/src/test/regress/expected/hw_es_multi_column_stats_eqclass.out b/src/test/regress/expected/hw_es_multi_column_stats_eqclass.out new file mode 100644 index 000000000..b8c43be95 --- /dev/null +++ b/src/test/regress/expected/hw_es_multi_column_stats_eqclass.out @@ -0,0 +1,90 @@ +-- join list occurs any error when optimizing multi-columns statistics using eqClass. +CREATE SCHEMA equivalent_class; +SET current_schema = equivalent_class; +CREATE TABLE dim_warehouse_info_t ( + warehouse_id numeric(10,0), + warehouse_name character varying(60) +) +WITH (orientation=row, compression=no); +CREATE TABLE wms_abnormal_order ( + id bigint, + abnormal_order_no character varying(384), + abnormal_type character varying(384), + warehouse_id numeric(20,0) +) +WITH (orientation=column, compression=middle); +CREATE TABLE wms_stocktaking_merchandise ( + id bigint, + stocktaking_serno character varying(90), + warehouse_id numeric(20,0), + abnormal_order_no character varying(384) +) +WITH (orientation=column, compression=middle); +CREATE TABLE wms_stocktaking_order ( + id bigint, + stocktaking_serno character varying(90), + stocktaking_type character varying(384), + warehouse_id numeric(20,0) +) +WITH (orientation=column, compression=middle); +SET enable_nestloop = off; +SET explain_perf_mode=pretty; +EXPLAIN(costs off) SELECT /* leading((mer (ab (wh ord)))) leading((ab (wh ord)))*/ + mer.abnormal_order_no , + ab.abnormal_order_no +FROM + wms_stocktaking_merchandise mer +LEFT JOIN + dim_warehouse_info_t wh +ON + wh.warehouse_id = mer.warehouse_id +LEFT JOIN + wms_stocktaking_order ord +ON + ord.warehouse_id = mer.warehouse_id +AND + ord.stocktaking_serno = mer.stocktaking_serno +LEFT JOIN wms_abnormal_order ab + ON ab.warehouse_id = mer.warehouse_id + AND ab.abnormal_order_no = mer.abnormal_order_no +AND ab.abnormal_order_no IN ('AB00000000194178', 'AB00000000194175') +WHERE ord.stocktaking_type = 'AF' +AND mer.abnormal_order_no IS NOT NULL +AND ab.abnormal_type IN ('PICK_ABNORMAL','SORTING_ABNORMAL','PACK_ABNORMAL') +AND wh.warehouse_name ='UKGF Warehouse' +AND mer.abnormal_order_no IN ('AB00000000194178' ,'AB00000000194175' ) +GROUP BY +mer.abnormal_order_no , +ab.abnormal_order_no ; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + HashAggregate + Group By Key: mer.abnormal_order_no, ab.abnormal_order_no + -> Hash Join + Hash Cond: (((mer.abnormal_order_no)::text = (ab.abnormal_order_no)::text) AND (mer.warehouse_id = ab.warehouse_id)) + -> Hash Join + Hash Cond: ((mer.warehouse_id = ord.warehouse_id) AND ((mer.stocktaking_serno)::text = (ord.stocktaking_serno)::text)) + -> Hash Join + Hash Cond: (wh.warehouse_id = mer.warehouse_id) + -> Seq Scan on dim_warehouse_info_t wh + Filter: ((warehouse_name)::text = 'UKGF Warehouse'::text) + -> Hash + -> Row Adapter + -> CStore Scan on wms_stocktaking_merchandise mer + Filter: ((abnormal_order_no IS NOT NULL) AND ((abnormal_order_no)::text = ANY ('{AB00000000194178,AB00000000194175}'::text[]))) + -> Hash + -> Row Adapter + -> CStore Scan on wms_stocktaking_order ord + Filter: ((stocktaking_type)::text = 'AF'::text) + -> Hash + -> Row Adapter + -> CStore Scan on wms_abnormal_order ab + Filter: (((abnormal_order_no)::text = ANY ('{AB00000000194178,AB00000000194175}'::text[])) AND ((abnormal_type)::text = ANY ('{PICK_ABNORMAL,SORTING_ABNORMAL,PACK_ABNORMAL}'::text[]))) +(22 rows) + +DROP SCHEMA equivalent_class CASCADE; +NOTICE: drop cascades to 4 other objects +--?.* +--?.* +--?.* +--?.* diff --git a/src/test/regress/expected/hw_grant_package.out b/src/test/regress/expected/hw_grant_package.out new file mode 100644 index 000000000..f4e31b116 --- /dev/null +++ b/src/test/regress/expected/hw_grant_package.out @@ -0,0 +1,102 @@ +create user test_grant1 password 'Gauss123'; +create user test_grant2 password 'Gauss123'; +SET SESSION AUTHORIZATION test_grant1 password 'Gauss123'; +create type s_type as ( + id integer, + name varchar, + addr text +); +create or replace package pck3 is +type r2 is table of s_type index by varchar(10); +type r3 is table of s_type index by integer; +procedure p1; +procedure p2(b int, va r2, a int, vb r3); +end pck3; +/ +create or replace package body pck3 is +procedure p1 as +va r2; +vb r3; +b int; +begin +va('a') := (1, 'zhangsan', 'shanghai'); +vb(5) := (10086,'aa','bb'); +vb(233) := (10087,'aa','bb'); +p2(b,va,1,vb); +end; +procedure p2(b int, va r2, a int, vb r3) as +begin +raise info 'va:%', va('a'); +raise info 'vb(233):%', vb(233); +raise info 'vb:%', vb; +end; +end pck3; +/ +CREATE OR REPLACE package pkg_auth_1 +is +a int; +END pkg_auth_1; +/ +CREATE OR REPLACE package body pkg_auth_1 +is +END pkg_auth_1; +/ +CREATE OR REPLACE package pkg_auth_2 +is +b int; +procedure a(); +END pkg_auth_2; +/ +CREATE OR REPLACE package body pkg_auth_2 +is +procedure a +is +begin +pkg_auth_1.a:=1; +end; +END pkg_auth_2; +/ +grant usage on schema test_grant1 to test_grant2; +SET SESSION AUTHORIZATION test_grant2 password 'Gauss123'; +grant execute,drop on all packages in schema test_grant1 to test_grant2; +ERROR: permission denied for package pck3 +DETAIL: N/A +SET SESSION AUTHORIZATION test_grant1 password 'Gauss123'; +grant execute,drop on all packages in schema test_grant1 to test_grant2; +SET SESSION AUTHORIZATION test_grant2 password 'Gauss123'; +call test_grant1.pck3.p1(); +INFO: va:(1,zhangsan,shanghai) +CONTEXT: SQL statement "CALL p2(b,va,1,vb)" +PL/pgSQL function p1() line 9 at PERFORM +INFO: vb(233):(10087,aa,bb) +CONTEXT: SQL statement "CALL p2(b,va,1,vb)" +PL/pgSQL function p1() line 9 at PERFORM +INFO: vb:{"(10086,aa,bb)","(10087,aa,bb)"} +CONTEXT: SQL statement "CALL p2(b,va,1,vb)" +PL/pgSQL function p1() line 9 at PERFORM + p1 +---- + +(1 row) + +begin +test_grant1.pkg_auth_1.a:=1; +end; +/ +begin +test_grant1.pkg_auth_2.a(); +end; +/ +SET SESSION AUTHORIZATION test_grant2 password 'Gauss123'; +drop package test_grant1.pkg_auth_1; +SET SESSION AUTHORIZATION test_grant1 password 'Gauss123'; +drop package test_grant1.pkg_auth_2; +NOTICE: drop cascades to function test_grant1.a() +drop package pck3; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function test_grant1.p1() +drop cascades to function test_grant1.p2(integer,_s_type[],integer,_s_type[]) +drop type s_type; +reset session AUTHORIZATION; +drop user test_grant1; +drop user test_grant2; diff --git a/src/test/regress/expected/hw_package.out b/src/test/regress/expected/hw_package.out index b61fb0048..db0187fea 100644 --- a/src/test/regress/expected/hw_package.out +++ b/src/test/regress/expected/hw_package.out @@ -30,8 +30,16 @@ insert into test_package1 values(50); create table dams_ci.test1(col1 int); create schema pkgschema1; create schema pkgschema2; +set behavior_compat_options='allow_procedure_compile_check'; drop package if exists exp_pkg; NOTICE: package exp_pkg() does not exist, skipping +create or replace package aa +is +procedure a(col1 int,col2 in int); +procedure a(col1 int,col2 in int,col3 out int); +end aa; +/ +ERROR: can not override out param:a create or replace package exp_pkg as user_exp EXCEPTION; end exp_pkg; @@ -257,12 +265,12 @@ QUERY: SELECT autonomous_pkg_150_1.autonomous_f_150_2_out(3) CONTEXT: referenced column: autonomous_f_150_2_out PL/pgSQL function inline_code_block line 2 at PERFORM drop function if exists func1; -create or replace package exp_pkg as +create or replace package exp_pkg1 as user_exp EXCEPTION; function func1(param int) return number; -end exp_pkg; +end exp_pkg1; / -create or replace package body exp_pkg as +create or replace package body exp_pkg1 as function func1(param int) return number as begin if (param = 1) then @@ -274,9 +282,9 @@ create or replace package body exp_pkg as raise info 'user_exp raise'; return 0; end; -end exp_pkg; +end exp_pkg1; / -select exp_pkg.func1(1); +select exp_pkg1.func1(1); INFO: user_exp raise CONTEXT: referenced column: func1 func1 @@ -305,7 +313,7 @@ ERROR: duplicate declaration DETAIL: name "data1" already defined CONTEXT: compilation of PL/pgSQL package near line 1 drop package transaction_test; -drop package if exists exp_pkg; +drop package if exists exp_pkg1; NOTICE: drop cascades to function public.func1(integer) drop package autonomous_pkg_150_1; NOTICE: drop cascades to 2 other objects @@ -604,9 +612,11 @@ create or replace package body commit_rollback_test as end commit_rollback_test; / call commit_rollback_test.exec_func4(1); -ERROR: transaction statement in store procedure used as a expression is not supported -CONTEXT: PL/pgSQL function exec_func3() line 4 at COMMIT -PL/pgSQL function exec_func4(integer) line 3 at assignment + exec_func4 +------------ + +(1 row) + create or replace package multi_sql as function func5() return int; function func16() return int; @@ -1364,6 +1374,9 @@ end pckg_test2; / create user test1 password 'Gauss123'; create user test2 password 'Gauss123'; +ALTER DEFAULT PRIVILEGES IN SCHEMA test1 grant execute on packages to test2; +ERROR: alter default privileges is not support package yet. +DETAIL: N/A SET SESSION AUTHORIZATION test1 password 'Gauss123'; set behavior_compat_options='plsql_security_definer'; drop package if exists pkg_auth_1; @@ -1426,6 +1439,154 @@ NOTICE: 1111 (1 row) +create user t2 password 'Gauss_234'; +create user t3 password 'Gauss_234'; +create user t4 password 'Gauss_234'; +create user t5 password 'Gauss_234'; +SET SESSION AUTHORIZATION t2 password 'Gauss_234'; +create table tab1(col1 int); +set behavior_compat_options='plsql_security_definer'; +create or replace package a3 authid current_user +is +procedure func_1(); +end a3; +/ +create or replace package body a3 +is +procedure func_1() +is +begin +insert into test1 values(1); +end; +end a3; +/ +\sf a3.func_1 +CREATE OR REPLACE PROCEDURE t2.a3.func_1() + AUTHID CURRENT_USER PACKAGE +AS DECLARE +begin +insert into test1 values(1); +end; +/ +create or replace procedure test2 +is +curruser varchar2; +begin +select current_user into curruser; +raise notice '%',curruser; +insert into t2.tab1 values(1); +commit; +select current_user into curruser; +raise notice '%',curruser; +insert into t2.tab1 values(2); +end; +/ +select proacl,prosecdef from pg_proc where proname='test2'; + proacl | prosecdef +-----------+----------- + {t2=X/t2} | t +(1 row) + +grant usage on schema t2 to t3; +grant usage,create on schema t2 to t3; +grant execute on all functions in schema t2 to t3; +SET SESSION AUTHORIZATION t3 password 'Gauss_234'; +call t2.test2(); +NOTICE: t2 +NOTICE: t2 + test2 +------- + +(1 row) + +set behavior_compat_options='plsql_security_definer'; +create or replace procedure test3 +is +a int:=1; +begin +a:=2/0; +exception when others then +t2.test2(); +raise; +end; +/ +select proacl,prosecdef from pg_proc where proname='test3'; + proacl | prosecdef +-----------+----------- + {t3=X/t3} | t +(1 row) + +grant usage on schema t3 to t3; +grant usage,create on schema t3 to t4; +grant execute on all functions in schema t3 to t4; +SET SESSION AUTHORIZATION t4 password 'Gauss_234'; +set behavior_compat_options='plsql_security_definer'; +create or replace procedure test4 +is +a int:=1; +begin +a:=2/0; +exception when others then +t3.test3(); +commit; +raise; +end; +/ +select proacl,prosecdef from pg_proc where proname='test4'; + proacl | prosecdef +-----------+----------- + {t4=X/t4} | t +(1 row) + +grant usage on schema t4 to t5; +grant usage,create on schema t4 to t5; +grant execute on all functions in schema t4 to t5; +SET SESSION AUTHORIZATION t5 password 'Gauss_234'; +set behavior_compat_options='plsql_security_definer'; +create or replace procedure test5 +is +a int:=1; +begin +a:=2/0; +exception when others then +t4.test4(); +commit; +raise; +end; +/ +select proacl,prosecdef from pg_proc where proname='test5'; + proacl | prosecdef +-----------+----------- + {t5=X/t5} | t +(1 row) + +call t5.test5(); +NOTICE: t2 +CONTEXT: SQL statement "CALL t2.test2()" +PL/pgSQL function t3.test3() line 6 at PERFORM +SQL statement "CALL t3.test3()" +PL/pgSQL function t4.test4() line 6 at PERFORM +SQL statement "CALL t4.test4()" +PL/pgSQL function test5() line 6 at PERFORM +NOTICE: t2 +CONTEXT: SQL statement "CALL t2.test2()" +PL/pgSQL function t3.test3() line 6 at PERFORM +SQL statement "CALL t3.test3()" +PL/pgSQL function t4.test4() line 6 at PERFORM +SQL statement "CALL t4.test4()" +PL/pgSQL function test5() line 6 at PERFORM +ERROR: division by zero +CONTEXT: SQL statement "SELECT 2/0" +PL/pgSQL function t3.test3() line 4 at assignment +SQL statement "CALL t3.test3()" +PL/pgSQL function t4.test4() line 6 at PERFORM +SQL statement "CALL t4.test4()" +PL/pgSQL function test5() line 6 at PERFORM +reset session AUTHORIZATION; +drop user t2 cascade; +drop user t3 cascade; +drop user t4 cascade; +drop user t5 cascade; create or replace package pkg_same_arg_1 is procedure a(); @@ -1528,7 +1689,6 @@ ERROR: function func1 does not exist drop package dams_ci.emp_bonus13; NOTICE: drop cascades to function dams_ci.testpro1() drop package if exists exp_pkg; -NOTICE: package exp_pkg() does not exist, skipping drop trigger if exists insert_trigger on test_trigger_src_tbl; drop table if exists dams_ci.DB_LOG; drop table if exists test_trigger_des_tbl; @@ -1673,43 +1833,29 @@ IS END UT_P_PCKG_DAMS_RECEIVE; --package body definition of UT_P_PCKG_DAMS_RECEIVE / -create or replace function fun123(va in varchar2, vb in varchar2) -return character varying[] -as declare -vc varchar2[]; -begin -vc[1] := va; -vc[2] := vb; -raise info 'out'; -return vc; -end; +create or replace package pck1 as +procedure p1; +procedure p2; +end pck1; / -create or replace package pck123 as -procedure p1(); -function fun123(va in varchar2, vb in varchar2) return character varying[]; -end pck123; -/ -create or replace package body pck123 as +create or replace package body pck1 as procedure p1 as -va varchar2; -vb varchar2; -vc varchar2[]; begin -vc = fun123(va,','); ---vc = fun1(va,vb); +null; end; -function fun123(va in varchar2, vb in varchar2) return character varying[] -as declare -vc varchar2[]; +procedure p2 as begin -vc[1] := va; -vc[2] := vb; -return vc; +drop package pck1; end; -end pck123; +end pck1; / -call pck123.p1(); - p1 +call pck1.p2(); +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function p1() +drop cascades to function p2() +CONTEXT: SQL statement "drop package pck1" +PL/pgSQL function p2() line 3 at SQL statement + p2 ---- (1 row) @@ -1741,3 +1887,601 @@ Description: drop a defined package body Syntax: DROP PACKAGE BODY [ IF EXISTS ] package_name; +create schema pkgsch059; +set current_schema=pkgsch059; +create table pkgtbl059(c0 int,c1 number(5),c2 varchar2(20),c3 clob,c4 blob); +insert into pkgtbl059 values(1,1,'varchar1',repeat('clob1',2),'abcdef1'); +insert into pkgtbl059 values(2,2,'varchar10',repeat('clob2',2),'abcdef2'); +create type type001 is(c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob); +create or replace package pkg059 +is + type type001 is record(c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob); + type type002 is table of pkgsch059.type001 index by integer; + type type003 is table of type001 index by integer; + col1 type002:=type002(); + col2 type003:=type003(); + procedure proc059_1(col3 type002,col4 type003); + function func059_2(col5 int) return int; +end pkg059; +/ +create or replace package body pkg059 +is +procedure proc059_1(col3 type002,col4 type003) +is +begin + raise info 'col3 is %',col3; + raise info 'col4 is %',col4; +end; +function func059_2(col5 int) return int +is +begin + pkg059.col1(1):=(1,1,'varchar1',repeat('clob1',2),'abcdef1'); + pkg059.col1(2):=(2,2,'varchar10',repeat('clob2',2),'abcdef2'); + pkg059.col2('1'):=col1(2); + pkg059.col2('-1'):=col1(1); + proc059_1(pkg059.col1,pkg059.col2); + return pkg059.col1(1).c1; +end; +end pkg059; +/ +create or replace package pkg059_1 +is +procedure proc059_1_1(cp1 pkg059.type002,cp2 out pkg059.type003); +procedure func059_1_2(cf1 pkg059.type002,cf2 out pkg059.type003); +end pkg059_1; +/ +create or replace package body pkg059_1 +is +procedure proc059_1_1(cp1 pkg059.type002,cp2 out pkg059.type003) +is +cp3 varchar2(30); +begin + raise info 'pkg059.col1 %',pkg059.col1; + raise info 'pkg059.col2 %',pkg059.col2; + func059_1_2(cf1=>pkg059.col1,cf2=>pkg059.col2); + raise info 'cp1 is %',cp1; + raise info 'cp2 is %',cp2; + raise info 'cp3 is %',cp3; +end; +procedure func059_1_2(cf1 pkg059.type002,cf2 out pkg059.type003) +is +cf3 number; +cf4 varchar2(30):='cf4'; +begin + cf3:=3; + pkg059.func059_2(cf3); + raise info 'cf2(1).c1 is %',cf2(1); + -- return cf4; +end; +end pkg059_1; +/ +declare +de1 pkg059.type002; +de2 pkg059.type003; +count int:=2; +var2 varchar2(30); +begin +for i in 1..count loop +select c0,c1,c2,c3,c4 into de1(i).c1,de1(i).c2,de1(i).c3,de1(i).c4,de1(i).c5 from pkgtbl059 where c0=i; +select c0+200,c1+200,c2||'200',c3||'200',c4||'200' into de2(i).c1,de2(i).c2,de2(i).c3,de2(i).c4,de2(i).c5 from pkgtbl059 where c0=i; +end loop; + raise info 'de1 is %',de1; + raise info 'de2 is %',de2; + pkg059_1.proc059_1_1(de1,de2); + raise info 'de2 out is %',de2; +end; +/ +INFO: de1 is {"(1,1,varchar1,clob1clob1,0ABCDEF1)","(2,2,varchar10,clob2clob2,0ABCDEF2)"} +INFO: de2 is {"(201,201,varchar1200,clob1clob1200,00ABCDEF1200)","(202,202,varchar10200,clob2clob2200,00ABCDEF2200)"} +INFO: pkg059.col1 {} +CONTEXT: SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: pkg059.col2 {} +CONTEXT: SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: col3 is {"(1,1,varchar1,clob1clob1,0ABCDEF1)","(2,2,varchar10,clob2clob2,0ABCDEF2)"} +CONTEXT: SQL statement "CALL proc059_1(pkg059.col1,pkg059.col2)" +PL/pgSQL function func059_2(integer) line 7 at PERFORM +SQL statement "CALL pkg059.func059_2(cf3)" +--? PL/pgSQL function .* +SQL statement "CALL func059_1_2(cf1=>pkg059.col1,cf2=>pkg059.col2)" +--? PL/pgSQL function .* +SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: col4 is {"(2,2,varchar10,clob2clob2,0ABCDEF2)","(1,1,varchar1,clob1clob1,0ABCDEF1)"} +CONTEXT: SQL statement "CALL proc059_1(pkg059.col1,pkg059.col2)" +PL/pgSQL function func059_2(integer) line 7 at PERFORM +SQL statement "CALL pkg059.func059_2(cf3)" +--? PL/pgSQL function .* +SQL statement "CALL func059_1_2(cf1=>pkg059.col1,cf2=>pkg059.col2)" +--? PL/pgSQL function .* +SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: cf2(1).c1 is +CONTEXT: SQL statement "CALL func059_1_2(cf1=>pkg059.col1,cf2=>pkg059.col2)" +--? PL/pgSQL function .* +SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: cp1 is {"(1,1,varchar1,clob1clob1,0ABCDEF1)","(2,2,varchar10,clob2clob2,0ABCDEF2)"} +CONTEXT: SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: cp2 is +CONTEXT: SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: cp3 is +CONTEXT: SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: de2 out is +create or replace package body pkg059 +is +procedure proc059_1(col3 type002,col4 type003) +is +begin + raise info 'col3 is %',col3; + raise info 'col4 is %',col4; +end; +function func059_2(col5 int) return int +is +begin + pkg059.col1(1):=(1,1,'varchar1',repeat('clob1',2),'abcdef1'); + pkg059.col1(2):=(2,2,'varchar10',repeat('clob2',2),'abcdef2'); + col2('1'):=col1(2); + col2('-1'):=col1(1); + proc059_1(pkg059.col1,pkg059.col2); + return pkg059.col1(1).c1; +end; +end pkg059; +/ +declare +de1 pkg059.type002; +de2 pkg059.type003; +count int:=2; +var2 varchar2(30); +begin +for i in 1..count loop +select c0,c1,c2,c3,c4 into de1(i).c1,de1(i).c2,de1(i).c3,de1(i).c4,de1(i).c5 from pkgtbl059 where c0=i; +select c0+200,c1+200,c2||'200',c3||'200',c4||'200' into de2(i).c1,de2(i).c2,de2(i).c3,de2(i).c4,de2(i).c5 from pkgtbl059 where c0=i; +end loop; + raise info 'de1 is %',de1; + raise info 'de2 is %',de2; + pkg059_1.proc059_1_1(de1,de2); + raise info 'de2 out is %',de2; +end; +/ +INFO: de1 is {"(1,1,varchar1,clob1clob1,0ABCDEF1)","(2,2,varchar10,clob2clob2,0ABCDEF2)"} +INFO: de2 is {"(201,201,varchar1200,clob1clob1200,00ABCDEF1200)","(202,202,varchar10200,clob2clob2200,00ABCDEF2200)"} +INFO: pkg059.col1 {} +CONTEXT: SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: pkg059.col2 {} +CONTEXT: SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: col3 is {"(1,1,varchar1,clob1clob1,0ABCDEF1)","(2,2,varchar10,clob2clob2,0ABCDEF2)"} +CONTEXT: SQL statement "CALL proc059_1(pkg059.col1,pkg059.col2)" +PL/pgSQL function func059_2(integer) line 7 at PERFORM +SQL statement "CALL pkg059.func059_2(cf3)" +--? PL/pgSQL function .* +SQL statement "CALL func059_1_2(cf1=>pkg059.col1,cf2=>pkg059.col2)" +--? PL/pgSQL function .* +SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: col4 is {"(2,2,varchar10,clob2clob2,0ABCDEF2)","(1,1,varchar1,clob1clob1,0ABCDEF1)"} +CONTEXT: SQL statement "CALL proc059_1(pkg059.col1,pkg059.col2)" +PL/pgSQL function func059_2(integer) line 7 at PERFORM +SQL statement "CALL pkg059.func059_2(cf3)" +--? PL/pgSQL function .* +SQL statement "CALL func059_1_2(cf1=>pkg059.col1,cf2=>pkg059.col2)" +--? PL/pgSQL function .* +SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: cf2(1).c1 is +CONTEXT: SQL statement "CALL func059_1_2(cf1=>pkg059.col1,cf2=>pkg059.col2)" +--? PL/pgSQL function .* +SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: cp1 is {"(1,1,varchar1,clob1clob1,0ABCDEF1)","(2,2,varchar10,clob2clob2,0ABCDEF2)"} +CONTEXT: SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: cp2 is +CONTEXT: SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: cp3 is +CONTEXT: SQL statement "CALL pkg059_1.proc059_1_1(de1,de2)" +PL/pgSQL function inline_code_block line 13 at SQL statement +INFO: de2 out is +--test alter package owner +create user alt_package PASSWORD 'gauss@123'; +create user alt_package_2 PASSWORD 'gauss@123'; +create package alt_package.pck1_alter as +procedure p1(); +type r1 is record(a int, b int); +type r2 is table of r1; +type r3 is varray(10) of r1; +type r4 is ref cursor; +end pck1_alter; +/ +create package body alt_package.pck1_alter as +type r5 is record(a int, b int); +type r6 is table of r1; +type r7 is varray(10) of r1; +type r8 is ref cursor; +procedure p1 is +begin +null; +end; +procedure p2 is +begin +null; +end; +end pck1_alter; +/ +select usename from pg_user where usesysid = (select pkgowner from gs_package where pkgname = 'pck1_alter'); + usename +------------- + alt_package +(1 row) + +SET SESSION AUTHORIZATION alt_package_2 password 'gauss@123'; +alter package alt_package.pck1_alter owner to alt_package_2; +ERROR: permission denied for schema alt_package +DETAIL: N/A +SET SESSION AUTHORIZATION alt_package password 'gauss@123'; +alter package alt_package.pck1_alter owner to alt_package_2; +ERROR: must be member of role "alt_package_2" +reset session AUTHORIZATION; +alter package alt_package.pck1_alter owner to alt_package_2; +SET SESSION AUTHORIZATION alt_package password 'gauss@123'; +alter package alt_package.pck1_alter owner to alt_package; +ERROR: must be owner of package pck1_alter +DETAIL: N/A +reset session AUTHORIZATION; +select usename from pg_user where usesysid = (select pkgowner from gs_package where pkgname = 'pck1_alter'); + usename +--------------- + alt_package_2 +(1 row) + +SET SESSION AUTHORIZATION alt_package_2 password 'gauss@123'; +call alt_package.pck1_alter.p1(); +ERROR: permission denied for schema alt_package +DETAIL: N/A +reset session AUTHORIZATION; +grant usage on schema alt_package to alt_package_2; +grant execute on package alt_package.pck1_alter to alt_package_2; +SET SESSION AUTHORIZATION alt_package_2 password 'gauss@123'; +call alt_package.pck1_alter.p1(); + p1 +---- + +(1 row) + +declare +va alt_package.pck1_alter.r1; +vb alt_package.pck1_alter.r2; +vc alt_package.pck1_alter.r3; +vd alt_package.pck1_alter.r4; +begin +va := (1,1); +vb(1) := (2,3); +vc(1) := (3,4); +raise info '%,%,%', va,vb,vc; +end; +/ +INFO: (1,1),{"(2,3)"},{"(3,4)"} +reset session AUTHORIZATION; +drop package alt_package.pck1_alter; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function alt_package.p1() +drop cascades to function alt_package.p2() +drop user alt_package cascade; +drop user alt_package_2 cascade; +-- test \h alter package +\h alter package +Command: ALTER PACKAGE +Description: change the definition of a package +Syntax: +ALTER PACKAGE package_name OWNER TO new_owner; + +-- test \sf procedure with authid +create or replace procedure p1() +AUTHID CURRENT_USER +is +begin +null; +end; +/ +create or replace procedure p2() +AUTHID DEFINER +is +begin +null; +end; +/ +create or replace procedure p3() +is +begin +null; +end; +/ +CREATE OR REPLACE PACKAGE ABORT IS +PROCEDURE testpro1(var3 int); +END ABORT; +/ +CREATE OR REPLACE PACKAGE body ABORT IS +PROCEDURE testpro1(var3 int) +is +begin +null; +end; +END ABORT; +/ +create or replace package autonomous_pkg_tmp +IS +count_public int := 10; +function autonomous_f_public(num1 int) +return int; +end autonomous_pkg_tmp; +/ +create or replace package body autonomous_pkg_tmp as +count_private int:=20; +function autonomous_f_public(num1 int) +return int +is +declare +re_int int; +begin count_public = num1 + count_public; +count_private = num1 + count_private; +re_int = count_public +count_private; +return re_int; +end; +begin +count_public:=0; +count_private:=0; +end autonomous_pkg_tmp; +/ +create function package_func_overload(col int, col2 out int) +return integer package +as +declare + col_type text; +begin + col := 122; + return 0; +end; +/ +create procedure package_func_overload(col int, col2 out varchar) +package +as +declare + col_type text; +begin + col2 := '122'; +end; +/ +ERROR: function "package_func_overload" already exists with same argument types +reset session AUTHORIZATION; +begin +raise notice '%',autonomous_pkg_tmp.count_public; +end; +/ +NOTICE: 0 +create or replace package pck1 is +type tp1 is varray(10) of int; +function f1(in a int,c out tp1) return int; +end pck1; +/ +create or replace package body pck1 is +function f1(in a int,c out tp1) return int +as +declare +begin +c(1):=a; +return a; +end; +end pck1; +/ +declare +kk pck1.tp1; +x int := 10; +res int; +begin +res := pck1.f1(x,kk)+1; +raise info 'res:%',res; +end; +/ +INFO: res:11 +drop package if exists pck1; +NOTICE: drop cascades to function pkgsch059.f1(integer) +drop package if exists pck2; +NOTICE: package pck2() does not exist, skipping +create or replace package pck1 as +function func1() return int; +end pck1; +/ +create or replace package body pck1 as +xx int :=10; +function func1() return int as +begin + xx := xx + 1; + return xx; +end; +end pck1; +/ +create or replace package pck2 as +function func1() return int; +end pck2; +/ +create or replace package body pck2 as +yy int := pck1.func1(); +function func1() return int as +begin +return yy; +end; +end pck2; +/ +call pck2.func1(); + func1 +------- + 11 +(1 row) + +drop package if exists pck1; +NOTICE: drop cascades to function pkgsch059.func1() +drop package if exists pck2; +NOTICE: drop cascades to function pkgsch059.func1() +create or replace package pck1 as +function func2() return int; +end pck1; +/ +create or replace package body pck1 as +function func1() return int as +begin + return 10; +end; +function func2() return int as +begin + return func1(); +end; +end pck1; +/ +create or replace package pck2 as +function func1() return int; +end pck2; +/ +create or replace package body pck2 as +xx int := pck1.func2(); +function func1() return int as +begin + return xx; +end; +end pck2; +/ +call pck2.func1(); + func1 +------- + 10 +(1 row) + +call pck1.func2(); + func2 +------- + 10 +(1 row) + +\sf p1 +CREATE OR REPLACE PROCEDURE pkgsch059.p1() + AUTHID CURRENT_USER +AS DECLARE +begin +null; +end; +/ +\sf p2 +CREATE OR REPLACE PROCEDURE pkgsch059.p2() + AUTHID DEFINER +AS DECLARE +begin +null; +end; +/ +\sf p3 +CREATE OR REPLACE PROCEDURE pkgsch059.p3() + AUTHID DEFINER +AS DECLARE +begin +null; +end; +/ +drop package if exists pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pkgsch059.func1() +drop cascades to function pkgsch059.func2() +drop function if exists func1; +NOTICE: function func1() does not exist, skipping +create or replace function func1() return int as +begin +return 5; +end; +/ +create or replace package pck1 as +procedure proc(); +function func2() return int; +end pck1; +/ +create or replace package body pck1 as +xx integer := func1; +procedure proc() as +begin +raise info 'xx is %',xx; +end; +function func2() return int as +begin +return 10; +end; +function func1() return int as +begin +return 20; +end; +end pck1; +/ +call pck1.proc(); +INFO: xx is 20 + proc +------ + +(1 row) + +drop function func1; +create or replace package body pck1 as +xx integer := func1; +procedure proc() as +begin +raise info 'xx is %',xx; +end; +function func2() return int as +begin +return 10; +end; +end pck1; +/ +call pck1.proc(); +ERROR: function func1() does not exist +LINE 1: SELECT func1 + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +QUERY: SELECT func1 +CONTEXT: referenced column: func1 +PL/pgSQL function inline_code_block line 2 during statement block local variable initialization +--退出会话,重连调用 xx不应该是20,应该在定义时或者调用时就报函数不存在 +call pck1.proc(); +ERROR: function func1() does not exist +LINE 1: SELECT func1 + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +QUERY: SELECT func1 +CONTEXT: referenced column: func1 +PL/pgSQL function inline_code_block line 2 during statement block local variable initialization +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pkgsch059.proc() +--? drop cascades to package .* +drop package pck2; +NOTICE: drop cascades to function pkgsch059.func1() +drop procedure p1; +drop procedure p2; +drop procedure p3; +reset behavior_compat_options; +drop package autonomous_pkg_tmp; +NOTICE: drop cascades to function pkgsch059.autonomous_f_public(integer) +drop package abort; +NOTICE: drop cascades to function pkgsch059.testpro1(integer) +drop schema pkgsch059 cascade; +NOTICE: drop cascades to 8 other objects +DETAIL: drop cascades to table pkgtbl059 +drop cascades to type type001 +--? drop cascades to package .* +--? drop cascades to function .* +--? drop cascades to function .* +--? drop cascades to function .* +--? drop cascades to package .* +drop cascades to function package_func_overload(integer) diff --git a/src/test/regress/expected/hw_package_function.out b/src/test/regress/expected/hw_package_function.out index ddb3bd1bf..6b5b87615 100644 --- a/src/test/regress/expected/hw_package_function.out +++ b/src/test/regress/expected/hw_package_function.out @@ -104,7 +104,6 @@ DECLARE para3 bigint = 2; para4 varchar; BEGIN - package_func_overload(1, 1); package_func_overload(1, para1); package_func_overload(1, para2); package_func_overload(1, para3); @@ -132,12 +131,10 @@ BEGIN package_func_overload(col2 => para2, col => para1); END; / -ERROR: Named argument "col2" can not be a const -CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 6 call package_func_overload(1, 1); - col2 ------- - 0 + package_func_overload +----------------------- + 0 (1 row) call package_func_overload(1, '1'); @@ -191,7 +188,7 @@ begin return 0; end; $$ language plpgsql; -ERROR: Do not allow package function overload not package function. +ERROR: Do not allow package function replace not package function. --test case for none overload function create or replace function package_func_overload_3(col int, col2 out int) return integer package @@ -339,7 +336,6 @@ BEGIN return 1; END; $$ language plpgsql; -ERROR: Package function does not support function overload which has the same type argument. select test_proc_define('hello', 'world', 'NO BIND'); test_proc_define ------------------ @@ -731,17 +727,19 @@ end; / INFO: buffer: 123 drop schema package_schema cascade; -NOTICE: drop cascades to 17 other objects +NOTICE: drop cascades to 20 other objects DETAIL: drop cascades to function package_schema.get_sal(character varying) drop cascades to function package_schema.get_sal(integer) drop cascades to function package_schema.test_package_function(integer) +drop cascades to function package_schema.package_func_overload(integer,integer) drop cascades to function package_schema.package_func_overload(integer,smallint) drop cascades to function package_schema.package_func_overload(integer,bigint) drop cascades to function package_schema.package_func_overload(integer) -drop cascades to function package_schema.package_func_overload(integer) drop cascades to function package_schema.package_func_overload_1(integer,character varying) drop cascades to function package_schema.package_func_overload_2(integer,bigint) +drop cascades to function package_schema.package_func_overload_3(integer) drop cascades to function package_schema.package_func_overload_3(integer,integer) +drop cascades to function package_schema.package_func_overload_4(integer) drop cascades to function package_schema.package_func_overload_4(integer,integer) drop cascades to function package_schema.test_para1(integer) drop cascades to function package_schema.test_para1(smallint) @@ -749,6 +747,7 @@ drop cascades to function package_schema.read_file(integer,integer) drop cascades to function package_schema.read_file(integer,integer,integer) drop cascades to function package_schema.read_file1(integer) drop cascades to function package_schema.test_proc_define(character varying,character varying,character varying) +drop cascades to function package_schema.test_proc_define(character varying,character varying) drop schema package_nps cascade; NOTICE: drop cascades to 15 other objects DETAIL: drop cascades to function read_file(integer,integer,integer) @@ -767,3 +766,83 @@ drop cascades to function test_default_out(integer,integer,integer) drop cascades to function test_para2(text) drop cascades to function test_para2(text,character varying) \c regression; +drop schema if exists s1; +NOTICE: schema "s1" does not exist, skipping +drop schema if exists s2; +NOTICE: schema "s2" does not exist, skipping +create schema s1; +create schema s2; +set current_schema to s1; +create function package_func_overload_1(col int) +returns integer as $$ +declare +begin + return 0; +end; +$$ language plpgsql; +set current_schema to s2; +create function package_func_overload_1(col int) +returns integer as $$ +declare +begin + return 0; +end; +$$ language plpgsql; +reset current_schema; +drop schema s1 cascade; +NOTICE: drop cascades to function s1.package_func_overload_1(integer) +drop schema s2 cascade; +NOTICE: drop cascades to function s2.package_func_overload_1(integer) +create schema s; +set current_schema to s; +CREATE OR REPLACE PACKAGE p1 IS +PROCEDURE testpro1(var3 int); +PROCEDURE testpro1(var2 char); +END p1; +/ +create function testpro1(col int) +returns integer as $$ +declare +begin + return 0; +end; +$$ language plpgsql; +reset current_schema; +drop schema s cascade; +NOTICE: drop cascades to 4 other objects +--?DETAIL: drop cascades to package .* +drop cascades to function s.testpro1(integer) +drop cascades to function s.testpro1(character) +drop cascades to function s.testpro1(integer) +drop package if exists pkg112; +NOTICE: package pkg112() does not exist, skipping +create or replace package pkg112 +as +type ty1 is table of integer index by integer; +procedure p1(v1 in ty1,v2 out ty1,v3 inout ty1,v4 int); +procedure p1(v2 out ty1,v3 inout ty1,v4 int); +procedure p4(); +pv1 ty1; +end pkg112; +/ +set behavior_compat_options='proc_outparam_override'; +drop package if exists pkg112; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function public.p1(_int4[],_int4[],integer) +drop cascades to function public.p1(_int4[],integer) +drop cascades to function public.p4() +create or replace package pkg112 +as +type ty1 is table of integer index by integer; +procedure p1(v1 in ty1,v2 out ty1,v3 inout ty1,v4 int); +procedure p1(v2 out ty1,v3 inout ty1,v4 int); +procedure p4(); +pv1 ty1; +end pkg112; +/ +drop package if exists pkg112; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function public.p1(_int4[],_int4[],integer) +drop cascades to function public.p1(_int4[],integer) +drop cascades to function public.p4() +set behavior_compat_options=''; diff --git a/src/test/regress/expected/hw_package_single.out b/src/test/regress/expected/hw_package_single.out new file mode 100644 index 000000000..f99807a79 --- /dev/null +++ b/src/test/regress/expected/hw_package_single.out @@ -0,0 +1,542 @@ +create database pl_test_pkg_single DBCOMPATIBILITY 'pg'; +\c pl_test_pkg_single; +--test dbe_utility +CREATE OR REPLACE PROCEDURE p0() +AS +declare + a integer; + c integer; + b integer; +BEGIN + a:=1; + c:=0; + b := a / c; + dbe_output.print_line('result is: '||to_char(b)); +END; +/ +CREATE OR REPLACE PROCEDURE p1() +AS +BEGIN + p0(); +END; +/ +CREATE OR REPLACE PROCEDURE p2() +AS +BEGIN + p1(); +END; +/ +--test dbe_utility.format_error_backtrack +CREATE OR REPLACE PROCEDURE p3_error() +AS +BEGIN + p2(); +EXCEPTION + WHEN OTHERS THEN + dbe_output.print_line(dbe_utility.format_error_backtrace()); +END; +/ +call p3_error(); +33816706: PL/pgSQL function p0() line 9 at assignment +33816706: PL/pgSQL function p1() line 3 at PERFORM +33816706: PL/pgSQL function p2() line 3 at PERFORM +33816706: PL/pgSQL function p3_error() line 3 at PERFORM + + p3_error +---------- + +(1 row) + +--test dbe_utility.format_error_stack +CREATE OR REPLACE PROCEDURE p3_error_stack() +AS +BEGIN + p2(); +EXCEPTION + WHEN OTHERS THEN + dbe_output.print_line(dbe_utility.format_error_stack()); +END; +/ +call p3_error_stack(); +33816706: division by zero + p3_error_stack +---------------- + +(1 row) + +CREATE OR REPLACE PROCEDURE p0() +AS +declare + a integer; + c integer; + b integer; +BEGIN + a:=1; + c:=1; + b := a / c; + dbe_output.print_line('result is: '||to_char(b)); +END; +/ +--test dbe_utility.format_error_backtrace +CREATE OR REPLACE PROCEDURE p3_noError() +AS +BEGIN + p2(); +EXCEPTION + WHEN OTHERS THEN + dbe_output.print_line(utility.format_error_backtrace()); +END; +/ +call p3_noError(); +result is: 1 + p3_noerror +------------ + +(1 row) + +--test dbe_utility.format_error_stack +CREATE OR REPLACE PROCEDURE p3_noError_stack() +AS +BEGIN + p2(); +EXCEPTION + WHEN OTHERS THEN + dbe_output.print_line(utility.format_error_stack()); +END; +/ +call p3_noError_stack(); +result is: 1 + p3_noerror_stack +------------------ + +(1 row) + +--test dbe_utility.format_call_stack +CREATE OR REPLACE PROCEDURE p0() +AS +declare + a integer; + c integer; + b integer; +BEGIN + a:=1; + c:=1; + b := a / c; + dbe_output.print_line('result is: '||to_char(b)); + dbe_output.print_line(dbe_utility.format_call_stack()); +END; +/ +CREATE OR REPLACE PROCEDURE p3_call_stack() +AS +BEGIN + p2(); +END; +/ +call p3_call_stack(); +result is: 1 + 3 dbe_utility.format_call_stack() + 11 p0() + 3 p1() + 3 p2() + 3 p3_call_stack() + + p3_call_stack +--------------- + +(1 row) + +--test dbe_utility.get_time +CREATE OR REPLACE PROCEDURE test_get_time1() +AS +declare + start_time bigint; + end_time bigint; +BEGIN + start_time:= dbe_utility.get_time (); + pg_sleep(1); + end_time:=dbe_utility.get_time (); + dbe_output.print_line(end_time - start_time); +END; +/ +call test_get_time1(); +100 + test_get_time1 +---------------- + +(1 row) + +CREATE OR REPLACE PROCEDURE test_get_time5() +AS +declare + start_time bigint; + end_time bigint; +BEGIN + start_time:= dbe_utility.get_time (); + pg_sleep(5); + end_time:=dbe_utility.get_time (); + dbe_output.print_line(end_time - start_time); +END; +/ +call test_get_time5(); +500 + test_get_time5 +---------------- + +(1 row) + +--test dbe_match.edit_distance_similarity +select dbe_match.edit_distance_similarity('abcd', 'abcd'); + edit_distance_similarity +-------------------------- + 100 +(1 row) + +select dbe_match.edit_distance_similarity('aaaa', 'a'); + edit_distance_similarity +-------------------------- + 25 +(1 row) + +select dbe_match.edit_distance_similarity('aaaa', 'aaa'); + edit_distance_similarity +-------------------------- + 75 +(1 row) + +--test dbe_raw +select dbe_raw.bit_or('a1234', '12'); + bit_or +-------- + 1A1234 +(1 row) + +select dbe_raw.bit_or('0000', '1111'); + bit_or +-------- + 1111 +(1 row) + +select dbe_raw.bit_or('0000', '11'); + bit_or +-------- + 1100 +(1 row) + +select dbe_raw.bit_or('baf234', '11'); + bit_or +-------- + BBF234 +(1 row) + +select dbe_raw.bit_or('baf234', '00'); + bit_or +-------- + BAF234 +(1 row) + +CREATE OR REPLACE PROCEDURE test_bitor() +AS +declare + a raw; + b raw; +BEGIN + a:= 'abc123'; + b:= '12'; + dbe_output.print_line(dbe_raw.bit_or(a, b)); +END; +/ +call test_bitor(); +BBC123 + test_bitor +------------ + +(1 row) + +select DBE_RAW.cast_from_varchar2_to_raw('aaa'); + cast_from_varchar2_to_raw +--------------------------- + 616161 +(1 row) + +select dbe_raw.cast_to_varchar2('616161'); + cast_to_varchar2 +------------------ + aaa +(1 row) + +select DBE_RAW.cast_from_varchar2_to_raw('cf12'); + cast_from_varchar2_to_raw +--------------------------- + 63663132 +(1 row) + +select dbe_raw.cast_to_varchar2('63663132'); + cast_to_varchar2 +------------------ + cf12 +(1 row) + +select DBE_RAW.cast_from_varchar2_to_raw('341'); + cast_from_varchar2_to_raw +--------------------------- + 333431 +(1 row) + +select dbe_raw.cast_to_varchar2('333431'); + cast_to_varchar2 +------------------ + 341 +(1 row) + +select dbe_raw.substr('aba', 1, 2); + substr +-------- + ab +(1 row) + +CREATE OR REPLACE PROCEDURE test_substr() +AS +declare + a raw; +BEGIN + a:= 'abc123'; + dbe_output.print_line(dbe_raw.substr(a, 3, 2)); +END; +/ +call test_substr(); + test_substr +------------- + +(1 row) + +--test dbe_session +select DBE_SESSION.set_context('test', 'gaussdb', 'one'); + set_context +------------- + +(1 row) + +select DBE_SESSION.search_context('test', 'gaussdb'); + search_context +---------------- + one +(1 row) + +select DBE_SESSION.set_context('test', 'gaussdb', 'two'); + set_context +------------- + +(1 row) + +select DBE_SESSION.search_context('test', 'gaussdb'); + search_context +---------------- + two +(1 row) + +select DBE_SESSION.set_context('test', 'gaussdb', 'two'); + set_context +------------- + +(1 row) + +select DBE_SESSION.search_context('test', 'gaussdb'); + search_context +---------------- + two +(1 row) + +select DBE_SESSION.clear_context('test', 'test','gaussdb'); + clear_context +--------------- + +(1 row) + +select DBE_SESSION.search_context('test', 'gaussdb'); +ERROR: could not find attribute gaussdb in current namespace +DETAIL: N/A +CONTEXT: PL/pgSQL function dbe_session.search_context(text,text) line 3 at RETURN +referenced column: search_context +create or replace function test_set_context ( + namespace text, + attribute text, + value text +) +returns void AS $$ +BEGIN + DBE_SESSION.set_context(namespace, attribute, value); +END; +$$ LANGUAGE plpgsql; +call test_set_context('test', 'name', 'tony'); + test_set_context +------------------ + +(1 row) + +create or replace function test_sys_context ( + namespace text, + attribute text +) +returns text AS $$ +BEGIN + return DBE_SESSION.search_context(namespace, attribute); +END; +$$ LANGUAGE plpgsql; +call test_sys_context('test', 'name'); + test_sys_context +------------------ + tony +(1 row) + +create or replace function test_clear_context2 ( + namespace text, + attribute text, + value text +) +returns void AS $$ +BEGIN + DBE_SESSION.clear_context(namespace, attribute, value); +END; +$$ LANGUAGE plpgsql; +call test_clear_context('test', 'text', 'name'); +ERROR: function "test_clear_context" doesn't exist +call test_sys_context('test', 'name'); + test_sys_context +------------------ + tony +(1 row) + +create or replace function test_set_context2 ( + namespace text, + attribute text, + value text +) +returns void AS $$ +BEGIN + DBE_SESSION.set_context(namespace, attribute, value); +END; +$$ LANGUAGE plpgsql; +call test_set_context2('CTX_P_GCMS_BIND_PKG', 'type', 'AAA'); + test_set_context2 +------------------- + +(1 row) + +create or replace function test_sys_context2 ( + namespace text, + attribute text +) +returns text AS $$ +BEGIN + return DBE_SESSION.search_context(namespace, attribute); +END; +$$ LANGUAGE plpgsql; +call test_sys_context2('CTX_P_GCMS_BIND_PKG ', 'type',); +ERROR: syntax error at or near ")" +LINE 1: call test_sys_context2('CTX_P_GCMS_BIND_PKG ', 'type',); + ^ +create or replace function test_clear_context2 ( + namespace text, + attribute text, + value text +) +returns void AS $$ +BEGIN + DBE_SESSION.clear_context(namespace, attribute, value); +END; +$$ LANGUAGE plpgsql; +call test_clear_context2('test', 'text', 'name'); +ERROR: There is no context named test +DETAIL: N/A +CONTEXT: SQL statement "CALL pkg_util.session_clear_context(namespace,client_identifier,attribute)" +PL/pgSQL function dbe_session.clear_context(text,text,text) line 3 at PERFORM +SQL statement "CALL dbe_session.clear_context(namespace,attribute,value)" +PL/pgSQL function test_clear_context2(text,text,text) line 3 at PERFORM +call test_sys_context2('test', 'name'); +ERROR: There is no context named test +DETAIL: N/A +CONTEXT: PL/pgSQL function dbe_session.search_context(text,text) line 3 at RETURN +PL/pgSQL function test_sys_context2(text,text) line 3 at RETURN +create or replace function test_set_context3 ( + namespace text, + attribute text, + value text +) +returns void AS $$ +BEGIN + DBE_SESSION.set_context(namespace, attribute, value); +END; +$$ LANGUAGE plpgsql; +call test_set_context('test1', 'name1', 'tony1'); + test_set_context +------------------ + +(1 row) + +create or replace function test_sys_context3 ( + namespace text, + attribute text +) +returns text AS $$ +BEGIN + return DBE_SESSION.search_context(namespace, attribute); +END; +$$ LANGUAGE plpgsql; +call test_sys_context('test1', 'name1'); + test_sys_context +------------------ + tony1 +(1 row) + +create or replace function test_clear_context3 ( + namespace text, + attribute text, + value text +) +returns void AS $$ +BEGIN + DBE_SESSION.clear_context(namespace, attribute, value); +END; +$$ LANGUAGE plpgsql; +call test_clear_context('test1', 'text1', 'name1'); +ERROR: function "test_clear_context" doesn't exist +call test_sys_context('test', 'name'); +ERROR: There is no context named test +DETAIL: N/A +CONTEXT: PL/pgSQL function dbe_session.search_context(text,text) line 3 at RETURN +PL/pgSQL function test_sys_context(text,text) line 3 at RETURN +create or replace procedure proc_test1(i_col1 in varchar2, o_ret out varchar2) as +begin +null; +end; +/ +create or replace procedure proc_test1(i_col1 in varchar2, o_ret out varchar2) as +v_cursor_id number; +o_ret1 varchar2; +v_execute number; +v_sql text; +begin +o_ret:='1'; +o_ret1 := '0'; +v_sql:='begin proc_test(i_col1,o_ret1); end;'; +v_cursor_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(v_cursor_id,v_sql,1); +perform dbe_sql.sql_bind_variable(v_cursor_id,'i_col1',i_col1,10); +perform dbe_sql.sql_bind_variable(v_cursor_id,'o_col1',o_ret1,10); +v_execute:=dbe_sql.sql_run(v_cursor_id); +exception +when others then +if dbe_sql.is_active(v_cursor_id) then +dbe_sql.sql_unregister_context(v_cursor_id); +end if; +end; +/ +select proc_test1('1',''); +ERROR: function proc_test1(unknown, unknown) does not exist +LINE 1: select proc_test1('1',''); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +CONTEXT: referenced column: proc_test1 +drop procedure proc_test1; +\c regression; +drop database IF EXISTS pl_test_pkg_single; diff --git a/src/test/regress/expected/hw_package_variable.out b/src/test/regress/expected/hw_package_variable.out new file mode 100644 index 000000000..2120467d6 --- /dev/null +++ b/src/test/regress/expected/hw_package_variable.out @@ -0,0 +1,2878 @@ +drop schema if exists pkg_val_1 cascade; +NOTICE: schema "pkg_val_1" does not exist, skipping +drop schema if exists pkg_val_2 cascade; +NOTICE: schema "pkg_val_2" does not exist, skipping +create schema pkg_val_1; +create schema pkg_val_2; +set current_schema = pkg_val_2; +set behavior_compat_options='allow_procedure_compile_check'; +--test package val assign +create or replace package pck1 is +type r1 is record (a int, b int); +type r2 is varray(10) of int; +type r3 is table of int; +type r4 is record (a r2); +va r1; +vb r2; +vc r3; +vd int; +vf r4; +end pck1; +/ +create or replace package body pck1 is +end pck1; +/ +create or replace package pck2 is +ve int; +procedure p1; +end pck2; +/ +create or replace package body pck2 is +procedure p1 as +begin +pck1.va := (1,2); +pck1.va := (3,4); +pck1.va.a := 5; +pck1.va.a := pck1.va.a + 1; +pck1.vb(1) := 1; +pck1.vb(2) := 2 + pck1.vb(1); +pck1.vc(1) := 1; +pck1.vc(2) := 2 + pck1.vc(1); +pck1.vd := 4; +pck1.vd := 5; +pck1.vf.a(1) := 1; +pck1.vf.a(2) := 1 + pck1.vf.a(1); +pck2.ve := 6; +pck2.ve := 7; +raise info '%, %, %, %, %, %', pck1.va, pck1.vb, pck1.vc, pck1.vd, pck1.vf, ve; +end; +end pck2; +/ +call pck2.p1(); +INFO: (6,4), {1,3}, {1,3}, 5, ("{1,2}"), 7 + p1 +---- + +(1 row) + +create or replace package body pck2 is +procedure p1 as +begin +select 11,22 into pck1.va; +select 33 into pck1.va.a; +select 11 into pck1.vb(1); +select 22 into pck1.vb(2); +select 33 into pck1.vc(1); +select 44 into pck1.vc(2); +select 55 into pck1.vd; +select 66 into pck1.vd; +select 77 into pck1.vf.a(1); +select 77 into pck2.ve; +select 88 into pck2.ve; +raise info '%, %, %, %, %,%', pck1.va, pck1.vb, pck1.vc, pck1.vd, pck1.vf, ve; +end; +end pck2; +/ +call pck2.p1(); +INFO: (33,22), {11,22}, {33,44}, 66, ("{77,2}"),88 + p1 +---- + +(1 row) + +DROP PACKAGE pck2; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP PACKAGE pck1; +--test 跨schema.pkg.val +create or replace package pkg_val_1.pck1 is +type r1 is record (a int, b int); +type r2 is varray(10) of int; +type r3 is table of int; +type r4 is record (a r2); +va r1; +vb r2; +vc r3; +vd int; +vf r4; +end pck1; +/ +create or replace package body pkg_val_1.pck1 is +end pck1; +/ +create or replace package pck2 is +ve int; +procedure p1; +end pck2; +/ +create or replace package body pck2 is +procedure p1 as +begin +pkg_val_1.pck1.va := (1,2); +pkg_val_1.pck1.va := (3,4); +pkg_val_1.pck1.va.a := 5; +pkg_val_1.pck1.vb(1) := 1; +pkg_val_1.pck1.vb(2) := pkg_val_1.pck1.vb(1) + 11; +pkg_val_1.pck1.vc(1) := 1; +pkg_val_1.pck1.vc(2) := 2 + pkg_val_1.pck1.vc(1); +pkg_val_1.pck1.vd := 4; +pkg_val_1.pck1.vd := 5; +pkg_val_1.pck1.vf.a(1) := 11; +pkg_val_2.pck2.ve := 6; +pkg_val_2.pck2.ve := 7; +raise info '%, %, %, %, %, %', pkg_val_1.pck1.va, pkg_val_1.pck1.vb, pkg_val_1.pck1.vc, pkg_val_1.pck1.vd, pkg_val_1.pck1.vf, ve; +end; +end pck2; +/ +call pck2.p1(); +INFO: (5,4), {1,12}, {1,3}, 5, ({11}), 7 + p1 +---- + +(1 row) + +create or replace package body pck2 is +procedure p1 as +begin +select 11,22 into pkg_val_1.pck1.va; +--select 33 into pkg_val_1.pck1.va.a; not support now +select 11 into pkg_val_1.pck1.vb(1); +select 22 into pkg_val_1.pck1.vb(2); +select 33 into pkg_val_1.pck1.vc(1); +select 44 into pkg_val_1.pck1.vc(2); +select 55 into pkg_val_1.pck1.vd; +select 66 into pkg_val_1.pck1.vd; +select 77 into pkg_val_2.pck2.ve; +select 88 into pkg_val_2.pck2.ve; +raise info '%, %, %, %, %', pkg_val_1.pck1.va, pkg_val_1.pck1.vb, pkg_val_1.pck1.vc, pkg_val_1.pck1.vd, ve; +end; +end pck2; +/ +call pck2.p1(); +INFO: (11,22), {11,22}, {33,44}, 66, 88 + p1 +---- + +(1 row) + +DROP PACKAGE pck2; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP PACKAGE pkg_val_1.pck1; +--test pkg.array.extend +create or replace package pck1 is +type ta is varray(10) of varchar(100); +tb ta; +end pck1; +/ +create or replace package pck2 is +procedure proc1; +end pck2; +/ +create or replace package body pck2 is +procedure proc1() is +begin +pck1.tb.delete; +end; +end pck2; +/ +DROP PACKAGE pck2; +NOTICE: drop cascades to function pkg_val_2.proc1() +DROP PACKAGE pck1; +--test 跨packgae cursor +DROP TABLE if exists test_1; +NOTICE: table "test_1" does not exist, skipping +create table test_1(a int, b int); +insert into test_1 values(11,22); +create or replace package pck1 is +cursor c1 is select * from test_1; +end pck1; +/ +create or replace package pck2 is +procedure p1; +end pck2; +/ +create or replace package body pck2 is +procedure p1 as +type r1 is record (a int, b int); +va r1; +begin +open pck1.c1; +fetch pck1.c1 into va; +raise info '%',va; +end; +end pck2; +/ +call pck2.p1(); +INFO: (11,22) + p1 +---- + +(1 row) + +DROP PACKAGE pck2; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP PACKAGE pck1; +DROP TABLE test_1; +--test pkg.row.col引用 +create or replace package pck1 is +type r1 is record (a int, b int); +va r1; +end pck1; +/ +create or replace package pck2 is +procedure p1; +end pck2; +/ +create or replace package body pck2 is +procedure p1 as +begin +pck1.va.a := 1; +pck1.va := (1,2); +pck1.va.a := pck1.va.b + 1; +pck1.va.a := pck1.va.a + 1; +raise info '%,', pck1.va; +end; +end pck2; +/ +call pck2.p1(); +INFO: (4,2), + p1 +---- + +(1 row) + +DROP PACKAGE pck2; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP PACKAGE pck1; +--test table var index by varchar2 +create or replace package pck1_zjc is + TYPE SalTabTyp is TABLE OF integer index by varchar(10); + aa SalTabTyp; +end pck1_zjc; +/ +declare + a integer; +begin + pck1_zjc.aa('a') = 1; + pck1_zjc.aa('b') = 2; + pck1_zjc.aa('c') = pck1_zjc.aa('a') + pck1_zjc.aa('b'); + RAISE INFO '%', pck1_zjc.aa; +end; +/ +INFO: {1,2,3} +DROP PACKAGE pck1_zjc; +--test table var index by varchar2 with different schema +create or replace package pkg_val_1.pck1_zjc is + TYPE SalTabTyp is TABLE OF integer index by varchar(10); + aa SalTabTyp; +end pck1_zjc; +/ +declare + a integer; +begin + pkg_val_1.pck1_zjc.aa('a') = 1; + pkg_val_1.pck1_zjc.aa('b') = 2; + pkg_val_1.pck1_zjc.aa('c') = pkg_val_1.pck1_zjc.aa('a') + pkg_val_1.pck1_zjc.aa('b'); + RAISE INFO '%', pkg_val_1.pck1_zjc.aa; +end; +/ +INFO: {1,2,3} +DROP PACKAGE pkg_val_1.pck1_zjc; +--test for table of multiset +create or replace package pck1_zjc is + TYPE SalTabTyp is TABLE OF integer; + aa SalTabTyp; + bb SalTabTyp; +end pck1_zjc; +/ +declare + a integer; +begin + pck1_zjc.aa(0) = 1; + pck1_zjc.aa(2) = 2; + pck1_zjc.bb(0) = 2; + pck1_zjc.bb(1) = NULL; + pck1_zjc.aa = pck1_zjc.aa multiset union pck1_zjc.bb; + RAISE INFO '%', pck1_zjc.aa; + pck1_zjc.aa = pck1_zjc.aa multiset union distinct pck1_zjc.bb; + RAISE INFO '%', pck1_zjc.aa; + pck1_zjc.aa = pck1_zjc.aa multiset intersect pck1_zjc.bb; + RAISE INFO '%', pck1_zjc.aa; + pck1_zjc.aa = pck1_zjc.aa multiset intersect distinct pck1_zjc.bb; + RAISE INFO '%', pck1_zjc.aa; + pck1_zjc.aa = pck1_zjc.aa multiset except pck1_zjc.bb; + RAISE INFO '%', pck1_zjc.aa; + pck1_zjc.aa = pck1_zjc.aa multiset except distinct pck1_zjc.bb; + RAISE INFO '%', pck1_zjc.aa; +end; +/ +INFO: {1,NULL,2,2,NULL} +INFO: {1,NULL,2} +INFO: {NULL,2} +INFO: {NULL,2} +INFO: {} +INFO: {} +DROP package pck1_zjc; +--test for table of multiset:record of table +create or replace package pck1_zjc is + TYPE SalTabTyp is TABLE OF integer; + TYPE r1 is record (a SalTabTyp); + aa r1; + bb r1; +end pck1_zjc; +/ +declare + a integer; + begin + pck1_zjc.aa.a(0) = 1; + pck1_zjc.aa.a(2) = 2; + pck1_zjc.bb.a(0) = 2; + pck1_zjc.bb.a(1) = NULL; + pck1_zjc.aa.a = pck1_zjc.aa.a multiset union pck1_zjc.bb.a; + RAISE INFO '%', pck1_zjc.aa; + pck1_zjc.aa.a = pck1_zjc.aa.a multiset union distinct pck1_zjc.bb.a; + RAISE INFO '%', pck1_zjc.aa.a; + pck1_zjc.aa.a = pck1_zjc.aa.a multiset intersect pck1_zjc.bb.a; + RAISE INFO '%', pck1_zjc.aa.a; + pck1_zjc.aa.a = pck1_zjc.aa.a multiset intersect distinct pck1_zjc.bb.a; + RAISE INFO '%', pck1_zjc.aa.a; + pck1_zjc.aa.a = pck1_zjc.aa.a multiset except pck1_zjc.bb.a; + RAISE INFO '%', pck1_zjc.aa.a; + pck1_zjc.aa.a = pck1_zjc.aa.a multiset except distinct pck1_zjc.bb.a; + RAISE INFO '%', pck1_zjc.aa.a; +end; +/ +INFO: ("{1,NULL,2,2,NULL}") +INFO: {1,NULL,2} +INFO: {NULL,2} +INFO: {NULL,2} +INFO: {} +INFO: {} +DROP package pck1_zjc; +--test for table of multiset:跨schema +create or replace package pkg_val_1.pck1_zjc is + TYPE SalTabTyp is TABLE OF integer; + aa SalTabTyp; + bb SalTabTyp; +end pck1_zjc; +/ +declare + a integer; + begin + pkg_val_1.pck1_zjc.aa(0) = 1; + pkg_val_1.pck1_zjc.aa(2) = 2; + pkg_val_1.pck1_zjc.bb(0) = 2; + pkg_val_1.pck1_zjc.bb(1) = NULL; + pkg_val_1.pck1_zjc.aa = pkg_val_1.pck1_zjc.aa multiset union pkg_val_1.pck1_zjc.bb; + RAISE INFO '%', pkg_val_1.pck1_zjc.aa; + pkg_val_1.pck1_zjc.aa = pkg_val_1.pck1_zjc.aa multiset union distinct pkg_val_1.pck1_zjc.bb; + RAISE INFO '%', pkg_val_1.pck1_zjc.aa; + pkg_val_1.pck1_zjc.aa = pkg_val_1.pck1_zjc.aa multiset intersect pkg_val_1.pck1_zjc.bb; + RAISE INFO '%', pkg_val_1.pck1_zjc.aa; + pkg_val_1.pck1_zjc.aa = pkg_val_1.pck1_zjc.aa multiset intersect distinct pkg_val_1.pck1_zjc.bb; + RAISE INFO '%', pkg_val_1.pck1_zjc.aa; + pkg_val_1.pck1_zjc.aa = pkg_val_1.pck1_zjc.aa multiset except pkg_val_1.pck1_zjc.bb; + RAISE INFO '%', pkg_val_1.pck1_zjc.aa; + pkg_val_1.pck1_zjc.aa = pkg_val_1.pck1_zjc.aa multiset except distinct pkg_val_1.pck1_zjc.bb; + RAISE INFO '%', pkg_val_1.pck1_zjc.aa; +end; +/ +INFO: {1,NULL,2,2,NULL} +INFO: {1,NULL,2} +INFO: {NULL,2} +INFO: {NULL,2} +INFO: {} +INFO: {} +DROP package pkg_val_1.pck1_zjc; +--test record of table +declare + TYPE SalTabTyp is TABLE OF integer; + TYPE r1 is record (a SalTabTyp); + aa r1; + bb r1; + begin + aa.a(0) = 1; + aa.a(2) = 2; + bb.a(0) = 2; + bb.a(1) = NULL; + aa.a = aa.a multiset union bb.a; + RAISE INFO '%', aa; + aa.a = aa.a multiset union distinct bb.a; + RAISE INFO '%', aa.a; + aa.a = aa.a multiset intersect bb.a; + RAISE INFO '%', aa.a; + aa.a = aa.a multiset intersect distinct bb.a; + RAISE INFO '%', aa.a; + aa.a = aa.a multiset except bb.a; + RAISE INFO '%', aa.a; + aa.a = aa.a multiset except distinct bb.a; + RAISE INFO '%', aa.a; +end; +/ +INFO: ("{1,NULL,2,2,NULL}") +INFO: {1,NULL,2} +INFO: {NULL,2} +INFO: {NULL,2} +INFO: {} +INFO: {} +--test record of record of table : not support yet +-- create or replace procedure pro1 is +-- TYPE SalTabTyp is TABLE OF integer; +-- TYPE r1 is record (a SalTabTyp); +-- TYPE r2 is record (a r1); +-- aa r2; +-- bb r2; +-- begin +-- aa.a.a(0) = 1; +-- aa.a.a(2) = 2; +-- bb.a.a(0) = 2; +-- bb.a.a(1) = NULL; +-- aa.a.a = aa.a.a multiset union bb.a.a; +-- RAISE INFO '%', aa.a.a; +-- aa.a.a = aa.a.a multiset union distinct bb.a.a; +-- RAISE INFO '%', aa.a.a; +-- aa.a.a = aa.a.a multiset intersect bb.a.a; +-- RAISE INFO '%', aa.a.a; +-- aa.a.a = aa.a.a multiset intersect distinct bb.a.a; +-- RAISE INFO '%', aa.a.a; +-- aa.a.a = aa.a.a multiset except bb.a.a; +-- RAISE INFO '%', aa.a.a; +-- aa.a.a = aa.a.a multiset except distinct bb.a.a; +-- RAISE INFO '%', aa.a.a; +-- end; +-- / +--test package constant variable +create or replace package pck1 is + va constant int := 1; +end pck1; +/ +declare +vb int; +begin +vb := 2; +pck1.va := vb; +end; +/ +ERROR: "va" is declared CONSTANT +LINE 5: pck1.va := vb; + ^ +QUERY: DECLARE +vb int; +begin +vb := 2; +pck1.va := vb; +end +DROP package pck1; +--test error message when not found variable +create or replace package pck1 is + va constant int := 1; +end pck1; +/ +declare +vb int; +begin +vb := 2; +pck1.vb := vb; +end; +/ +ERROR: "pck1.vb" is not a known variable +LINE 5: pck1.vb := vb; + ^ +QUERY: DECLARE +vb int; +begin +vb := 2; +pck1.vb := vb; +end +declare +vb int; +begin +vb := 2; +pck2.vb := vb; +end; +/ +ERROR: "pck2.vb" is not a known variable +LINE 5: pck2.vb := vb; + ^ +QUERY: DECLARE +vb int; +begin +vb := 2; +pck2.vb := vb; +end +DROP package pck1; +--test 嵌套引用package变量 +create or replace package pck1 is +type a is record(a1 varchar2); +type b is record(b1 a,b2 varchar2); +vb b; +end pck1; +/ +--2.跨包record类型嵌套 +create or replace package pck2 is +procedure proc1(); +end pck2; +/ +create or replace package body pck2 is +procedure proc1() as +P1 varchar2; +begin +pck1.vb.b1.a1 :='abc'; +P1 :=pck1.vb.b1.a1; +raise info '%', P1; +end; +end pck2; +/ +call pck2.proc1(); +INFO: abc + proc1 +------- + +(1 row) + +DROP PACKAGE pck2; +NOTICE: drop cascades to function pkg_val_2.proc1() +DROP PACKAGE pck1; +--test procedure param duplicate with package +DROP TABLE if exists test_t1; +NOTICE: table "test_t1" does not exist, skipping +create table test_t1(a int, b int); +create or replace package pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(r1 int, r2 int, r3 int, r4 int, ve int); +end pck1; +/ +DROP package pck1; +NOTICE: drop cascades to function pkg_val_2.p1(integer,integer,integer,integer,integer) +--test procedure var duplicate with package public +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace package pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(a int); + procedure p2(a int); +end pck1; +/ +create or replace package body pck1 is + procedure p1(a int) is + va int; + vb int; + vc int; + vd int; + ve int; + vf int; + r1 int; + r2 int; + r3 int; + r4 int; + begin + va := a; + vb := va + 1; + vc := vb + 1; + vd := vc + 1; + ve := vd + 1; + vf := ve + 1; + r1 := ve + 1; + r2 := r1 + 1; + r3 := r2 + 1; + r4 := r3 + 1; + raise info '%, %, %, %, %, %, %, %, %, %', va,vb,vc,vd,ve,vf,r1,r2,r3,r4; + end; + + procedure p2(a int) is + val1 r1; + val2 r2; + val3 r3; + begin + va := (1 , 2); + vb := array[3,4,5]; + vc := array[7,8,9]; + val1 := va; + val2 := vb; + val3 := vc; + raise info '%, %, %, %, %, %', va,vb,vc,val1,val2,val3; + end; +end pck1; +/ +call pck1.p1(10); +INFO: 10, 11, 12, 13, 14, 15, 15, 16, 17, 18 + p1 +---- + +(1 row) + +call pck1.p2(10); +INFO: (1,2), {3,4,5}, {7,8,9}, (1,2), {3,4,5}, {7,8,9} + p2 +---- + +(1 row) + +DROP package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pkg_val_2.p1(integer) +drop cascades to function pkg_val_2.p2(integer) +--test procedure var duplicate with package private +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace package pck1 is + procedure p1(a int); + procedure p2(a int); +end pck1; +/ +create or replace package body pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(a int) is + va int; + vb int; + vc int; + vd int; + ve int; + vf int; + r1 int; + r2 int; + r3 int; + r4 int; + begin + va := a; + vb := va + 1; + vc := vb + 1; + vd := vc + 1; + ve := vd + 1; + vf := ve + 1; + r1 := ve + 1; + r2 := r1 + 1; + r3 := r2 + 1; + r4 := r3 + 1; + raise info '%, %, %, %, %, %, %, %, %, %', va,vb,vc,vd,ve,vf,r1,r2,r3,r4; + end; + + procedure p2(a int) is + val1 r1; + val2 r2; + val3 r3; + begin + va := (1 , 2); + vb := array[3,4,5]; + vc := array[7,8,9]; + val1 := va; + val2 := vb; + val3 := vc; + raise info '%, %, %, %, %, %', va,vb,vc,val1,val2,val3; + end; +end pck1; +/ +call pck1.p1(10); +INFO: 10, 11, 12, 13, 14, 15, 15, 16, 17, 18 + p1 +---- + +(1 row) + +call pck1.p2(10); +INFO: (1,2), {3,4,5}, {7,8,9}, (1,2), {3,4,5}, {7,8,9} + p2 +---- + +(1 row) + +DROP package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pkg_val_2.p1(integer) +drop cascades to function pkg_val_2.p2(integer) +--test procedure type duplicate with package public +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace package pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(a int); + procedure p2(a int); +end pck1; +/ +create or replace package body pck1 is + procedure p1(a int) is + type r1 is record (a int := 1, b int := 2); + type r2 is record (a int := 3, b int := 4); + type r3 is record (a int := 7, b int := 6); + type r4 is record (a int := 9, b int := 8); + type va is record (a int := 11, b int := 10); + type vb is record (a int := 13, b int := 12); + type vc is record (a int := 15, b int := 14); + type vd is record (a int := 17, b int := 16); + type ve is record (a int := 19, b int := 18); + type vf is record (a int := 21, b int := 20); + val1 r1; + val2 r2; + val3 r3; + val4 r4; + val5 va; + val6 vb; + val7 vc; + val8 vd; + val9 ve; + val10 vf; + begin + raise info '%, %, %, %, %, %, %, %, %, %', val1,val2,val3,val4,val5,val6,val7,val8,val9,val10; + end; + + procedure p2(a int) is + val1 r1; + val2 r2; + val3 r3; + begin + va := (1 , 2); + vb := array[3,4,5]; + vc := array[7,8,9]; + val1 := va; + val2 := vb; + val3 := vc; + raise info '%, %, %, %, %, %', va,vb,vc,val1,val2,val3; + end; +end pck1; +/ +call pck1.p1(10); +INFO: (1,2), (3,4), (7,6), (9,8), (11,10), (13,12), (15,14), (17,16), (19,18), (21,20) + p1 +---- + +(1 row) + +call pck1.p2(10); +INFO: (1,2), {3,4,5}, {7,8,9}, (1,2), {3,4,5}, {7,8,9} + p2 +---- + +(1 row) + +DROP package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pkg_val_2.p1(integer) +drop cascades to function pkg_val_2.p2(integer) +--test procedure type duplicate with package private +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace package pck1 is + procedure p1(a int); + procedure p2(a int); +end pck1; +/ +create or replace package body pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(a int) is + type r1 is record (a int := 1, b int := 2); + type r2 is record (a int := 3, b int := 4); + type r3 is record (a int := 7, b int := 6); + type r4 is record (a int := 9, b int := 8); + type va is record (a int := 11, b int := 10); + type vb is record (a int := 13, b int := 12); + type vc is record (a int := 15, b int := 14); + type vd is record (a int := 17, b int := 16); + type ve is record (a int := 19, b int := 18); + type vf is record (a int := 21, b int := 20); + val1 r1; + val2 r2; + val3 r3; + val4 r4; + val5 va; + val6 vb; + val7 vc; + val8 vd; + val9 ve; + val10 vf; + begin + raise info '%, %, %, %, %, %, %, %, %, %', val1,val2,val3,val4,val5,val6,val7,val8,val9,val10; + end; + + procedure p2(a int) is + val1 r1; + val2 r2; + val3 r3; + begin + va := (1 , 2); + vb := array[3,4,5]; + vc := array[7,8,9]; + val1 := va; + val2 := vb; + val3 := vc; + raise info '%, %, %, %, %, %', va,vb,vc,val1,val2,val3; + end; +end pck1; +/ +call pck1.p1(10); +INFO: (1,2), (3,4), (7,6), (9,8), (11,10), (13,12), (15,14), (17,16), (19,18), (21,20) + p1 +---- + +(1 row) + +call pck1.p2(10); +INFO: (1,2), {3,4,5}, {7,8,9}, (1,2), {3,4,5}, {7,8,9} + p2 +---- + +(1 row) + +DROP package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pkg_val_2.p1(integer) +drop cascades to function pkg_val_2.p2(integer) +--test public var duplicated with private var +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace package pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(a int); +end pck1; +/ +create or replace package body pck1 is + va int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration +DETAIL: name "va" already defined +CONTEXT: compilation of PL/pgSQL package near line 1 +create or replace package body pck1 is + vb int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration +DETAIL: name "vb" already defined +CONTEXT: compilation of PL/pgSQL package near line 1 +create or replace package body pck1 is + ve int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration +DETAIL: name "ve" already defined +CONTEXT: compilation of PL/pgSQL package near line 1 +create or replace package body pck1 is + vf int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration +DETAIL: name "vf" already defined +CONTEXT: compilation of PL/pgSQL package near line 1 +create or replace package body pck1 is + r1 int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration +DETAIL: name "r1" already defined +CONTEXT: compilation of PL/pgSQL package near line 1 +create or replace package body pck1 is + r2 int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration +DETAIL: name "r2" already defined +CONTEXT: compilation of PL/pgSQL package near line 1 +create or replace package body pck1 is + r3 int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration +DETAIL: name "r3" already defined +CONTEXT: compilation of PL/pgSQL package near line 1 +create or replace package body pck1 is + r4 int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration +DETAIL: name "r4" already defined +CONTEXT: compilation of PL/pgSQL package near line 1 +create or replace package body pck1 is + TYPE va is table of int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration +DETAIL: name "va" already defined +CONTEXT: compilation of PL/pgSQL package near line 1 +create or replace package body pck1 is + TYPE r2 is table of int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration +DETAIL: name "r2" already defined +CONTEXT: compilation of PL/pgSQL package near line 1 +create or replace package body pck1 is + va int; + procedure p1(a int) is + va int; + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration +DETAIL: name "va" already defined +CONTEXT: compilation of PL/pgSQL package near line 1 +DROP package pck1; +NOTICE: drop cascades to function pkg_val_2.p1(integer) +--test procedure dulpicalte wite itself +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace package pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(a int); +end pck1; +/ +create or replace package body pck1 is + procedure p1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + va int; + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration at or near "va" +LINE 10: va int; + ^ +QUERY: DECLARE type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + va int; + begin + NULL; + end +create or replace package body pck1 is + procedure p1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + vb int; + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration at or near "vb" +LINE 10: vb int; + ^ +QUERY: DECLARE type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + vb int; + begin + NULL; + end +create or replace package body pck1 is + procedure p1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r1 int; + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration at or near "r1" +LINE 10: r1 int; + ^ +QUERY: DECLARE type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r1 int; + begin + NULL; + end +create or replace package body pck1 is + procedure p1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r2 int; + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration at or near "r2" +LINE 10: r2 int; + ^ +QUERY: DECLARE type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r2 int; + begin + NULL; + end +create or replace package body pck1 is + procedure p1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r4 int; + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration at or near "r4" +LINE 10: r4 int; + ^ +QUERY: DECLARE type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r4 int; + begin + NULL; + end +DROP package pck1; +NOTICE: drop cascades to function pkg_val_2.p1(integer) +--test procedure duplicate +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + va int; +begin + NULL; +end; +/ +ERROR: duplicate declaration at or near "va" +LINE 10: va int; + ^ +QUERY: DECLARE type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + va int; +begin + NULL; +end +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + vb int; +begin + NULL; +end; +/ +ERROR: duplicate declaration at or near "vb" +LINE 10: vb int; + ^ +QUERY: DECLARE type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + vb int; +begin + NULL; +end +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vd r4; + vf int; + cursor ve is select * from test_t1; + vd int; +begin + NULL; +end; +/ +ERROR: duplicate declaration at or near "vd" +LINE 11: vd int; + ^ +QUERY: DECLARE type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vd r4; + vf int; + cursor ve is select * from test_t1; + vd int; +begin + NULL; +end +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + ve int; +begin + NULL; +end; +/ +ERROR: duplicate declaration at or near "ve" +LINE 10: ve int; + ^ +QUERY: DECLARE type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + ve int; +begin + NULL; +end +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r1 int; +begin + NULL; +end; +/ +ERROR: duplicate declaration at or near "r1" +LINE 10: r1 int; + ^ +QUERY: DECLARE type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r1 int; +begin + NULL; +end +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r2 int; +begin + NULL; +end; +/ +ERROR: duplicate declaration at or near "r2" +LINE 10: r2 int; + ^ +QUERY: DECLARE type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r2 int; +begin + NULL; +end +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r3 int; +begin + NULL; +end; +/ +ERROR: duplicate declaration at or near "r3" +LINE 10: r3 int; + ^ +QUERY: DECLARE type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r3 int; +begin + NULL; +end +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r4 int; +begin + NULL; +end; +/ +ERROR: duplicate declaration at or near "r4" +LINE 10: r4 int; + ^ +QUERY: DECLARE type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r4 int; +begin + NULL; +end +DROP procedure pro1(int); +ERROR: function pro1(integer) does not exist +DROP table test_t1; +-- test use type before define +DROP TABLE if exists test_t1; +NOTICE: table "test_t1" does not exist, skipping +create table test_t1(a int, b int); +create or replace package pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(a int); +end pck1; +/ +create or replace package body pck1 is + procedure p1(a int) is + va r1; + r1 int; + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration at or near "r1" +LINE 2: r1 int; + ^ +QUERY: DECLARE va r1; + r1 int; + begin + NULL; + end +create or replace package body pck1 is + procedure p1(a int) is + va r1; + type r1 is record (a int, b int); + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration at or near "r1" +LINE 2: type r1 is record (a int, b int); + ^ +QUERY: DECLARE va r1; + type r1 is record (a int, b int); + begin + NULL; + end +create or replace package body pck1 is + procedure p1(a int) is + va r2; + r2 int; + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration at or near "r2" +LINE 2: r2 int; + ^ +QUERY: DECLARE va r2; + r2 int; + begin + NULL; + end +create or replace package body pck1 is + procedure p1(a int) is + va r3; + r3 int; + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration at or near "r3" +LINE 2: r3 int; + ^ +QUERY: DECLARE va r3; + r3 int; + begin + NULL; + end +create or replace package body pck1 is + procedure p1(a int) is + va r4; + r4 int; + begin + NULL; + end; +end pck1; +/ +ERROR: duplicate declaration at or near "r4" +LINE 2: r4 int; + ^ +QUERY: DECLARE va r4; + r4 int; + begin + NULL; + end +DROP package pck1; +NOTICE: drop cascades to function pkg_val_2.p1(integer) +DROP table test_t1; +-- test duplicated with used package variable +create or replace package pck1 is +TYPE r1 is record (a int, c varchar2(10), d int); +TYPE r2 is varray(10) of int; +va r1; +vb r2; +vc int; +procedure p1; +end pck1; +/ +create or replace package body pck1 is +procedure p1 is +vd int := va; +va int; +begin +end; +end pck1; +/ +ERROR: duplicate declaration at or near "va" +LINE 3: va int; + ^ +QUERY: DECLARE +vd int := va; +va int; +begin +end +create or replace package body pck1 is +procedure p1 is +vd r2 := vb; +vb int; +begin +end; +end pck1; +/ +ERROR: duplicate declaration at or near "vb" +LINE 3: vb int; + ^ +QUERY: DECLARE +vd r2 := vb; +vb int; +begin +end +create or replace package body pck1 is +procedure p1 is +vd int := vc; +vc int; +begin +end; +end pck1; +/ +ERROR: duplicate declaration at or near "vc" +LINE 3: vc int; + ^ +QUERY: DECLARE +vd int := vc; +vc int; +begin +end +DROP PACKAGE pck1; +NOTICE: drop cascades to function pkg_val_2.p1() +--test var name same with type name +create type o1 as (a int, b int); +create or replace package pck1 is +o1 o1; +end pck1; +/ +create or replace package pck1 is +type o1 is varray(10) of o1; +end pck1; +/ +create or replace package pck1 is +type o1 is table of o1; +end pck1; +/ +create or replace package pck1 is +TYPE r1 is record (a int, c varchar2(10), d int); +TYPE r2 is varray(10) of int; +TYPE r3 is table of int; +procedure p1; +end pck1; +/ +create or replace package body pck1 is +procedure p1 is +r1 r1; +begin +end; +end pck1; +/ +create or replace package body pck1 is +procedure p1 is +r2 r2; +begin +end; +end pck1; +/ +create or replace package body pck1 is +procedure p1 is +r3 r3; +begin +end; +end pck1; +/ +create or replace package body pck1 is +procedure p1 is +r2 r3; +begin +end; +end pck1; +/ +create or replace procedure pp1 is +type r1 is record (a int, b int); +r1 r1; +begin +null; +end; +/ +ERROR: duplicate declaration at or near "r1" +LINE 3: r1 r1; + ^ +QUERY: DECLARE +type r1 is record (a int, b int); +r1 r1; +begin +null; +end +create or replace procedure pp1 is +type r1 is varray(10) of int; +r1 r1; +begin +null; +end; +/ +ERROR: duplicate declaration at or near "r1" +LINE 3: r1 r1; + ^ +QUERY: DECLARE +type r1 is varray(10) of int; +r1 r1; +begin +null; +end +create or replace procedure pp1 is +type r1 is table of int; +r1 r1; +begin +null; +end; +/ +ERROR: duplicate declaration at or near "r1" +LINE 3: r1 r1; + ^ +QUERY: DECLARE +type r1 is table of int; +r1 r1; +begin +null; +end +create or replace procedure pp1 is +type r1 is table of int; +r2 r1; +begin +null; +end; +/ +DROP procedure pp1; +DROP PACKAGE pck1; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP TYPE o1 cascade; +-- test row.col%TYPE as procedure param type +-- (1) va.a%TYPE +create or replace package pck2 is +type ta is record(a int, b int); +va ta; +procedure p1(v1 in va.a%type); +end pck2; +/ +create or replace package body pck2 is +procedure p1(v1 in va.a%type) is +begin +raise info '%', v1; +end; +end pck2; +/ +call pck2.p1(11); +INFO: 11 + p1 +---- + +(1 row) + +DROP package pck2; +NOTICE: drop cascades to function pkg_val_2.p1(integer) +--(2) private va.a%TYPE +create or replace package pck2 is +type ta is record(a int, b int); +procedure p1; +end pck2; +/ +create or replace package body pck2 is +va ta; + +procedure p2(v1 in va.a%type) is +begin +raise info '%', v1; +end; + +procedure p1 is +begin +p2(11); +end; + +end pck2; +/ +call pck2.p1(); +INFO: 11 +CONTEXT: SQL statement "CALL p2(11)" +PL/pgSQL function p1() line 3 at PERFORM + p1 +---- + +(1 row) + +DROP package pck2; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function pkg_val_2.p2(integer) +drop cascades to function pkg_val_2.p1() +-- (3) pck.va.a%TYPE +create or replace package pck1 is +type ta is record (a int); +va ta; +end pck1; +/ +create or replace package pck2 is +type ta is record(a int, b int); +procedure p1(v1 in pck1.va.a%type); +end pck2; +/ +create or replace package body pck2 is +procedure p1(v1 in pck1.va.a%type) is +begin +raise info '%', v1; +end; +end pck2; +/ +call pck2.p1(11); +INFO: 11 + p1 +---- + +(1 row) + +DROP package pck2; +NOTICE: drop cascades to function pkg_val_2.p1(integer) +DROP package pck1; +-- (4) schema.pkg.row.col%TYPE +create or replace package pkg_val_1.pck1 is +type ta is record (a int); +va ta; +end pck1; +/ +create or replace package pck2 is +type ta is record(a int, b int); +procedure p1(v1 in pkg_val_1.pck1.va.a%type); +end pck2; +/ +create or replace package body pck2 is +procedure p1(v1 in pkg_val_1.pck1.va.a%type) is +begin +raise info '%', v1; +end; +end pck2; +/ +call pck2.p1(11); +INFO: 11 + p1 +---- + +(1 row) + +DROP package pck2; +NOTICE: drop cascades to function pkg_val_2.p1(integer) +DROP package pkg_val_1.pck1; +--test pkg.val.col%TYPE +create or replace package pck1 as +type t1 is record(c1 int,c2 int); +va t1; +end pck1; +/ +create or replace package pck2 as +vb pck1.va.c1%type; +end pck2; +/ +DROP PACKAGE pck2; +DROP PACKAGE pck1; +-- not allow declare ref cursor type variable in package +drop package if exists pck1; +NOTICE: package pck1() does not exist, skipping +create or replace package pck1 as +type t1 is ref cursor; +v1 t1; +end pck1; +/ +ERROR: not allow use ref cursor in package at or near ";" +LINE 3: v1 t1; + ^ +QUERY: PACKAGE DECLARE +type t1 is ref cursor; +v1 t1; +end +CONTEXT: compilation of PL/pgSQL package near line 3 +create or replace package pck1 as +type t1 is ref cursor; +-- v1 t1; +end pck1; +/ +create or replace package body pck1 as +type t2 is ref cursor; +v1 t2; +end pck1; +/ +ERROR: not allow use ref cursor in package at or near ";" +LINE 3: v1 t2; + ^ +QUERY: PACKAGE DECLARE +type t2 is ref cursor; +v1 t2; +end +CONTEXT: compilation of PL/pgSQL package near line 3 +drop package if exists pck1; +create or replace package pck1 as +type t3 is ref cursor; +v3 pkg_val_2.pck1.t3; +end pck1; +/ +ERROR: not allow use ref cursor in package at or near ";" +LINE 3: v3 pkg_val_2.pck1.t3; + ^ +QUERY: PACKAGE DECLARE +type t3 is ref cursor; +v3 pkg_val_2.pck1.t3; +end +CONTEXT: compilation of PL/pgSQL package near line 3 +create or replace package pck1 as +type t3 is ref cursor; +-- v3 pkg_val_2.pck1.t3; +end pck1; +/ +create or replace package body pck1 as +type t4 is ref cursor; +v4 pkg_val_2.pck1.t4; +end pck1; +/ +ERROR: not allow use ref cursor in package at or near ";" +LINE 3: v4 pkg_val_2.pck1.t4; + ^ +QUERY: PACKAGE DECLARE +type t4 is ref cursor; +v4 pkg_val_2.pck1.t4; +end +CONTEXT: compilation of PL/pgSQL package near line 3 +drop package if exists pck1; +-- test select into error in for in loop condition +create table tab1(a int,b int); +create or replace package pck1 is +procedure p1; +end pck1; +/ +create or replace package body pck1 is +procedure p1 is +v1 tab1%rowtype; +begin +for rec in (select a,b into v1 from tab1) loop +end loop; +end; +end pck1; +/ +ERROR: compile failed when parse the query: (select a,b into v1 from tab1) +DETAIL: select into clause is not supported in cursor or for..in loop condition yet. +CONTEXT: compilation of PL/pgSQL package near line 4 +create or replace package pck1 as +func int; +function func() return int; +end pck1; +/ +ERROR: duplicate declaration at or near ";" +LINE 3: function func() return int; + ^ +QUERY: PACKAGE DECLARE +func int; +function func() return int; +end +CONTEXT: compilation of PL/pgSQL package near line 2 +create or replace package pck1 as +func int; +procedure func(); +end pck1; +/ +ERROR: duplicate declaration at or near ";" +LINE 3: procedure func(); + ^ +QUERY: PACKAGE DECLARE +func int; +procedure func(); +end +CONTEXT: compilation of PL/pgSQL package near line 2 +create or replace package pck1 as +func constant int; +procedure func(); +end pck1; +/ +ERROR: duplicate declaration at or near ";" +LINE 3: procedure func(); + ^ +QUERY: PACKAGE DECLARE +func constant int; +procedure func(); +end +CONTEXT: compilation of PL/pgSQL package near line 2 +create or replace package pck1 as +type arr is varray(10) of int; +func arr; +procedure func(); +end pck1; +/ +ERROR: duplicate declaration at or near ";" +LINE 4: procedure func(); + ^ +QUERY: PACKAGE DECLARE +type arr is varray(10) of int; +func arr; +procedure func(); +end +CONTEXT: compilation of PL/pgSQL package near line 3 +DROP PACKAGE pck1; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP TABLE tab1; +-- test 4 word parse now +-- (1) schema.pkg.row.col +create or replace package pkg_val_1.pck1 is +type r1 is record (a int, b int); +va r1; +end pck1; +/ +create or replace package pck2 is +procedure p1(); +end pck2; +/ +create or replace package body pck2 is +procedure p1() is +v1 int; +begin +select 1 into pkg_val_1.pck1.va.a; +v1 := pkg_val_1.pck1.va.a; +pkg_val_1.pck1.va.b := 11; +raise info '%, %', v1, pkg_val_1.pck1.va; +end; +end pck2; +/ +call pck2.p1(); +INFO: 1, (1,11) + p1 +---- + +(1 row) + +DROP PACKAGE pck2; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP PACKAGE pkg_val_1.pck1; +-- (2) schema.pkg.array.next +create or replace package pkg_val_1.pck1 is +type t1 is varray(10) of int; +va t1; +type t2 is table of int index by varchar2(10); +vb t2; +end pck1; +/ +create or replace package pck2 is +procedure p1(); +end pck2; +/ +create or replace package body pck2 is +procedure p1() is +begin +pkg_val_1.pck1.va.extend(9); +pkg_val_1.pck1.va(1) := 111; +raise NOTICE '%',pkg_val_1.pck1.va.first; +raise NOTICE '%',pkg_val_1.pck1.va.count(); +pkg_val_1.pck1.va.delete; +raise NOTICE '%',pkg_val_1.pck1.va.first; +raise NOTICE '%',pkg_val_1.pck1.va.count(); + +pkg_val_1.pck1.vb('aa') := 222; +raise NOTICE '%',pkg_val_1.pck1.vb.first; +raise NOTICE '%',pkg_val_1.pck1.vb.count(); +pkg_val_1.pck1.vb.delete; +raise NOTICE '%',pkg_val_1.pck1.vb.first; +raise NOTICE '%',pkg_val_1.pck1.vb.count(); + +end; +end pck2; +/ +call pck2.p1(); +NOTICE: 1 +NOTICE: 1 +NOTICE: +NOTICE: 0 +NOTICE: aa +NOTICE: 1 +NOTICE: +NOTICE: 0 + p1 +---- + +(1 row) + +DROP PACKAGE pck2; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP PACKAGE pkg_val_1.pck1; +-- (3) pkg.row.col1.col2 +create or replace package pck1 is +type r1 is record (a int, b int); +type r2 is record (a r1); +va r2; +end pck1; +/ +create or replace package pck2 is +procedure p1(); +end pck2; +/ +create or replace package body pck2 is +procedure p1() is +v1 int; +begin +select 1 into pck1.va.a.a; +v1 := pck1.va.a.a; +pck1.va.a.b := 11; +raise info '%, %', v1, pck1.va.a; +end; +end pck2; +/ +call pck2.p1(); +INFO: 1, (1,11) + p1 +---- + +(1 row) + +DROP PACKAGE pck2; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP PACKAGE pck1; +-- (4) pkg.row.col.extend +create or replace package pck1 is +type t1 is varray(10) of int; +type t2 is table of int index by varchar2(10); +type r1 is record(a t1, b t2); +va r1; +end pck1; +/ +create or replace package pck2 is +procedure p1(); +end pck2; +/ +create or replace package body pck2 is +procedure p1() is +begin +pck1.va.a.extend(9); +pck1.va.a(1) := 111; +raise NOTICE '%',pck1.va.a.first; +raise NOTICE '%',pck1.va.a.count(); +pck1.va.a.delete; +raise NOTICE '%',pck1.va.a.first; +raise NOTICE '%',pck1.va.a.count(); + +pck1.va.b('aa') := 222; +raise NOTICE '%', pck1.va.b.first; +raise NOTICE '%', pck1.va.b.count(); +pck1.va.b.delete; +raise NOTICE '%', pck1.va.b.first; +raise NOTICE '%', pck1.va.b.count(); + +end; +end pck2; +/ +call pck2.p1(); +NOTICE: 1 +NOTICE: 1 +NOTICE: +NOTICE: 0 +NOTICE: aa +NOTICE: 1 +NOTICE: +NOTICE: 0 + p1 +---- + +(1 row) + +DROP PACKAGE pck2; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP PACKAGE pck1; +-- (5) row.col1.col2.col3 +create or replace package pck2 is +procedure p1(); +end pck2; +/ +create or replace package body pck2 is +procedure p1() is +TYPE r1 is record (a int, b int); +TYPE r2 is record (a r1, b int); +TYPE r3 is record (a r2, b int); +v1 int; +va r3; +begin +select 1 into va.a.a.a; +v1 := va.a.a.a; +va.a.a.b := 11; +raise info '%, %', v1, va.a.a; +end; +end pck2; +/ +call pck2.p1(); +INFO: 1, (1,11) + p1 +---- + +(1 row) + +DROP PACKAGE pck2; +NOTICE: drop cascades to function pkg_val_2.p1() +--test package variable as default value +create or replace package pck1 as +type t1 is record(c1 int,c2 varchar2); +v1 t1 := (1,'a'); +procedure p1; +end pck1; +/ +create or replace package body pck1 as +procedure p1 is +begin +v1 := (2, 'b'); +end; +end pck1; +/ +create or replace package pck2 as +v2 int := pck1.v1.c1; +end pck2; +/ +declare +a int; +begin +a := pck2.v2; +raise info '%', a; +a := pck1.v1.c1; +raise info '%', a; +pck1.p1(); +a := pck2.v2; +raise info '%', a; +a := pck1.v1.c1; +raise info '%', a; +end; +/ +INFO: 1 +INFO: 1 +INFO: 1 +INFO: 2 +DROP PACKAGE pck2; +DROP PACKAGE pck1; +NOTICE: drop cascades to function pkg_val_2.p1() +--test package self with schema +create table pkg_val_2.t1(a int, b int); +create or replace package pkg_val_2.pck2 is +va int; +vb int; +procedure p1; +end pck2; +/ +create or replace package body pkg_val_2.pck2 is +procedure p1 is +cursor cur1 is select * from t1 where a between pkg_val_2.pck2.va and pkg_val_2.pck2.vb; +begin +pkg_val_2.pck2.va := 1; +raise info '%', pkg_val_2.pck2.va; +end; +end pck2; +/ +call pkg_val_2.pck2.p1(); +INFO: 1 + p1 +---- + +(1 row) + +DROP PACKAGE pkg_val_2.pck2; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP TABLE pkg_val_2.t1; +-- test package duplicate name +create or replace package pkg_val_1.pckg_test1 as +var varchar2 :='abc'; +var2 int :=4; +procedure p1(c1 int,c2 out varchar2); +end pckg_test1; +/ +create or replace package body pkg_val_1.pckg_test1 as +procedure p1(c1 int,c2 out varchar2) as +begin +c2 :=var||c1; +end; +end pckg_test1; +/ +create or replace package pkg_val_2.pckg_test1 as +var2 varchar2; +procedure p1(); +end pckg_test1; +/ +create or replace package body pkg_val_2.pckg_test1 as +procedure p1() as +begin +pkg_val_1.pckg_test1.p1(pkg_val_1.pckg_test1.var2,var2); +raise info 'var2:%' ,var2; +end; +end pckg_test1; +/ +call pkg_val_2.pckg_test1.p1(); +INFO: var2:abc4 + p1 +---- + +(1 row) + +DROP PACKAGE pkg_val_2.pckg_test1; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP PACKAGE pkg_val_1.pckg_test1; +NOTICE: drop cascades to function pkg_val_1.p1(integer) +-- 1. package variable as => out param +drop package if exists pkg_val_1.pckg_test1; +NOTICE: package pkg_val_1.pckg_test1() does not exist, skipping +create or replace package pkg_val_1.pckg_test1 as +var varchar2 :='abc'; +procedure p1(c1 int,c2 out varchar2); +end pckg_test1; +/ +create or replace package body pkg_val_1.pckg_test1 as +procedure p1(c1 int,c2 out varchar2) as +begin +c2 :=var||c1; +end; +end pckg_test1; +/ +drop package if exists pkg_val_2.pckg_test1; +NOTICE: package pkg_val_2.pckg_test1() does not exist, skipping +create or replace package pkg_val_2.pckg_test1 as + +procedure p1(t1 int ,t2 out varchar2); +var2 varchar2; +end pckg_test1; +/ +create or replace package body pkg_val_2.pckg_test1 as +procedure p1(t1 int ,t2 out varchar2) as +begin +pkg_val_1.pckg_test1.p1(c1 => t1,c2 => pkg_val_1.pckg_test1.var); +raise info '%', pkg_val_1.pckg_test1.var; +end; +end pckg_test1; +/ +call pkg_val_2.pckg_test1.p1(3,''); +INFO: abc3 + t2 +---- + +(1 row) + +drop package if exists pkg_val_1.pckg_test1; +NOTICE: drop cascades to function pkg_val_1.p1(integer) +drop package if exists pkg_val_2.pckg_test1; +NOTICE: drop cascades to function pkg_val_2.p1(integer) +-- 2. package varibal as out param +create or replace package pkg_val_1.pckg_test1 as +var varchar2 :='abc'; +procedure p1(c1 int,c2 out varchar2); +end pckg_test1; +/ +create or replace package body pkg_val_1.pckg_test1 as +procedure p1(c1 int,c2 out varchar2) as +begin +c2 :=var||c1; +end; +end pckg_test1; +/ +create or replace package pkg_val_2.pckg_test1 as + +procedure p1(t1 int ,t2 out varchar2); +var2 varchar2; +end pckg_test1; +/ +create or replace package body pkg_val_2.pckg_test1 as +procedure p1(t1 int ,t2 out varchar2) as +begin +pkg_val_1.pckg_test1.p1(t1,pkg_val_1.pckg_test1.var); +raise info '%', pkg_val_1.pckg_test1.var; +end; +end pckg_test1; +/ +call pkg_val_2.pckg_test1.p1(3,''); +INFO: abc3 + t2 +---- + +(1 row) + +create schema ss1; +create or replace package ss1.pkg8 is + progname varchar2(60); + workdate_bpc varchar2(10); +end pkg8; +/ +create or replace package pkg7 is + var1 int:=1; +type t_pfxp_athfcdtl is record (dapcode int); +procedure proc1(); +end pkg7; +/ +create table testtab (a int, b varchar2(10)); +create or replace package body pkg7 is + procedure proc1() is + v_pfxp_athfcdtl t_pfxp_athfcdtl; + cursor cur_pfxp_athfcdtl is + select a from testtab where b=ss1.pkg8.workdate_bpc + order by a; + begin + raise notice 'pkg7'; + end; +end pkg7; +/ +call pkg7.proc1(); +NOTICE: pkg7 + proc1 +------- + +(1 row) + +drop table testtab; +drop package pkg7; +NOTICE: drop cascades to function pkg_val_2.proc1() +drop package ss1.pkg8; +drop schema ss1; +drop package if exists pkg_val_1.pckg_test1; +NOTICE: drop cascades to function pkg_val_1.p1(integer) +drop package if exists pkg_val_2.pckg_test1; +NOTICE: drop cascades to function pkg_val_2.p1(integer) +--test package cursor +create table t1(a int, b int); +insert into t1 values(1,2); +insert into t1 values(2,4); +insert into t1 values(3,6); +create or replace package pck1 is +cursor c1 is select * from t1; +end pck1; +/ +create or replace package pck2 is +procedure p1; +end pck2; +/ +create or replace package body pck2 is +procedure p1 as +type r1 is record (a int, b int); +va r1; +begin +open pck1.c1; +loop +fetch pck1.c1 into va; +exit when pck1.c1%notfound; +raise info 'va: %',va; +raise info 'rowcount: %', pck1.c1%ROWCOUNT; +raise info 'isopend: %', pck1.c1%isopen; +raise info 'isfound: %', pck1.c1%found; +end loop; +close pck1.c1; +raise info 'isopend: %', pck1.c1%isopen; +end; +end pck2; +/ +call pck2.p1(); +INFO: va: (1,2) +INFO: rowcount: 1 +INFO: isopend: t +INFO: isfound: t +INFO: va: (2,4) +INFO: rowcount: 2 +INFO: isopend: t +INFO: isfound: t +INFO: va: (3,6) +INFO: rowcount: 3 +INFO: isopend: t +INFO: isfound: t +INFO: isopend: f + p1 +---- + +(1 row) + +DROP PACKAGE pck2; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP PACKAGE pck1; +DROP TABLE t1; +-- test package cursor error +create or replace package pck1 is +c1 int; +end pck1; +/ +create or replace package pck2 is +procedure p1; +end pck2; +/ +create or replace package body pck2 is +procedure p1 as +type r1 is record (a int, b int); +va r1; +begin +pck1.c1 := 1; +raise info 'rowcount: %', pck1.c1%ROWCOUNT; +raise info 'isopend: %', pck1.c1%isopen; +raise info 'isfound: %', pck1.c1%found; +end; +end pck2; +/ +ERROR: pck1.c1 isn't a cursor +CONTEXT: compilation of PL/pgSQL package near line 6 +create or replace package body pck2 is +procedure p1 as +type r1 is record (a int, b int); +va r1; +begin +pck1.c1 := 1; +raise info 'rowcount: %', pck1.c2%ROWCOUNT; +raise info 'isopend: %', pck1.c2%isopen; +raise info 'isfound: %', pck1.c2%found; +end; +end pck2; +/ +ERROR: undefined cursor: pck1.c2 +CONTEXT: compilation of PL/pgSQL package near line 6 +DROP PACKAGE pck2; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP PACKAGE pck1; +-- test comment errors +CREATE OR REPLACE PROCEDURE test1 +IS /*aa*/ +a INT:=1; /*aa*/ +c INT:=2; /*aa*/ +BEGIN /*aa*/ +IF a<>1 THEN /*aa*/ +c:=3; /*aa*/ +END IF; /*aa*/ +END; /*aa*/ +/ +CREATE TABLE t1(a INT, b INT); +CREATE OR REPLACE PROCEDURE test2 IS +va INT; +BEGIN +SELECT /* aa */ 1 INTO va; +RAISE INFO '%', va; +INSERT /* aa */ INTO t1 VALUES(3,2); +INSERT /* aa */ INTO t1 VALUES(3,3); +UPDATE /* aa */ t1 SET a = 1 WHERE b =2; +DELETE /* aa */ FROM t1 WHERE a = 1; +END; +/ +CALL test2(); +INFO: 1 + test2 +------- + +(1 row) + +SELECT * FROM t1; + a | b +---+--- + 3 | 3 +(1 row) + +DROP PROCEDURE test1; +DROP PROCEDURE test2; +DROP TABLE t1; +-- test create matview +drop table if exists materialized_view_tb; +NOTICE: table "materialized_view_tb" does not exist, skipping +create table materialized_view_tb(c1 int,c2 int); +create or replace package materialized_view_package as +procedure materialized_view_proc(); +end materialized_view_package; +/ +create or replace package body materialized_view_package AS +procedure materialized_view_proc() AS +begin +CREATE MATERIALIZED VIEW my_mv AS SELECT * FROM materialized_view_tb; +INSERT INTO materialized_view_tb VALUES(1,1),(2,2); +REFRESH MATERIALIZED VIEW my_mv; +end; +end materialized_view_package; +/ +call materialized_view_package.materialized_view_proc(); + materialized_view_proc +------------------------ + +(1 row) + +DROP MATERIALIZED VIEW my_mv; +DROP PACKAGE materialized_view_package; +NOTICE: drop cascades to function pkg_val_2.materialized_view_proc() +DROP TABLE materialized_view_tb; +-- test drop package memory leak when ref other package variable +create type rec is (col1 varchar2,col2 varchar2); +create or replace package pckg_test as +type t_arr is table of rec; +type t_arr1 is table of varchar2; +v_arr t_arr; +v_arr1 t_arr1; +v_rec rec; +end pckg_test; +/ +create or replace package pckg_test1 as +procedure proc_test(i_var1 in varchar2,i_var2 in varchar2); +end pckg_test1; +/ +create or replace package body pckg_test1 as +procedure proc_test(i_var1 in varchar2,i_var2 in varchar2) as +v_var1 varchar2; +begin +pckg_test.v_arr(1) := rec(1,2); +pckg_test.v_arr1(1) := 1; +pckg_test.v_rec.col1 :=1; +end; +end pckg_test1; +/ +call pckg_test1.proc_test('1','1'); + proc_test +----------- + +(1 row) + +drop package if exists pckg_test; +drop package if exists pckg_test1; +NOTICE: drop cascades to function pkg_val_2.proc_test(character varying,character varying) +drop type if exists rec; +-- test drop loop ref variable package +create or replace package pckg_test1 as +procedure p1; +var varchar2 := 'pck1'; +end pckg_test1; +/ +create or replace package pckg_test2 as +procedure pp1; +var2 varchar2 := 'pck2'; +end pckg_test2; +/ +create or replace package pckg_test3 as +procedure ppp1; +var3 varchar2 := 'pck3'; +end pckg_test3; +/ +create or replace package body pckg_test1 as +procedure p1 as +begin +raise info '%', pckg_test3.var3; +end; +end pckg_test1; +/ +create or replace package body pckg_test2 as +procedure pp1 as +begin +raise info '%', pckg_test1.var; +end; +end pckg_test2; +/ +create or replace package body pckg_test3 as +procedure ppp1 as +begin +raise info '%', pckg_test2.var2; +end; +end pckg_test3; +/ +call pckg_test3.ppp1(); +INFO: pck2 + ppp1 +------ + +(1 row) + +call pckg_test2.pp1(); +INFO: pck1 + pp1 +----- + +(1 row) + +call pckg_test1.p1(); +INFO: pck3 + p1 +---- + +(1 row) + +drop package if exists pckg_test1; +NOTICE: drop cascades to function pkg_val_2.p1() +drop package if exists pckg_test2; +NOTICE: drop cascades to function pkg_val_2.pp1() +drop package if exists pckg_test3; +NOTICE: drop cascades to function pkg_val_2.ppp1() +-- test schema.pkg.cursor +create table t1 (a int, b int); +create or replace package pck1 as +cursor c1 for select * from t1; +procedure p1; +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +va t1; +begin +open pkg_val_2.pck1.c1; +close pkg_val_2.pck1.c1; +end; +end pck1; +/ +ERROR: cursor referenced by "pkg_val_2.pck1.c1" is not supported yet +LINE 4: open pkg_val_2.pck1.c1; + ^ +QUERY: DECLARE +va t1; +begin +open pkg_val_2.pck1.c1; +close pkg_val_2.pck1.c1; +end +DROP PACKAGE pck1; +NOTICE: drop cascades to function pkg_val_2.p1() +-- test auto cursor +create or replace package pck2 as +cursor c1 for select * from t1; +procedure p1; +end pck2; +/ +create or replace package pck1 as +cursor c1 for select * from t1; +procedure p1; +end pck1; +/ +-- cross package cursor +create or replace package body pck1 as +procedure p1 as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open pck2.c1; +close pck2.c1; +end; +end pck1; +/ +ERROR: cursor referenced by "pck2.c1" in autonomous procedure is not supported yet +LINE 5: open pck2.c1; + ^ +QUERY: DECLARE +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open pck2.c1; +close pck2.c1; +end +-- own package cursor +create or replace package body pck1 as +procedure p1 as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1; +close c1; +end; +end pck1; +/ +ERROR: cursor referenced by "c1" in autonomous procedure is not supported yet +LINE 5: open c1; + ^ +QUERY: DECLARE +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1; +close c1; +end +DROP PACKAGE pck1; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP PACKAGE pck2; +NOTICE: drop cascades to function pkg_val_2.p1() +DROP TABLE t1; +reset behavior_compat_options; +-- ref package cursor attr at first +create table t1(a int, b int); +create or replace package pck1 is +cursor c1 is select * from t1; +end pck1; +/ +begin +raise info 'isopend: %', pck1.c1%isopen; +end +/ +INFO: isopend: f +drop package pck1; +drop table t1; +-- package cursor attribute reset +drop table t1; +ERROR: table "t1" does not exist +create table t1 (a int, b int); +insert into t1 values (1,2); +create or replace package pck1 as +cursor c1 for select * from t1; +procedure p1; +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +declare +va t1; +begin +raise info 'isopend: %', c1%isopen; +raise info 'rowcount: %', c1%rowcount; +open c1; +fetch c1 into va; +raise info 'va:%', va; +raise info 'isopend: %', c1%isopen; +raise info 'rowcount: %', c1%rowcount; +end; +end pck1; +/ +call pck1.p1(); +INFO: isopend: f +INFO: rowcount: +INFO: va:(1,2) +INFO: isopend: t +INFO: rowcount: 1 + p1 +---- + +(1 row) + +call pck1.p1(); +INFO: isopend: f +INFO: rowcount: +INFO: va:(1,2) +INFO: isopend: t +INFO: rowcount: 1 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to function pkg_val_2.p1() +drop table t1; +-- package cursor with arguments +drop table t1; +ERROR: table "t1" does not exist +create table t1 (a int, b int); +insert into t1 values (1,2); +insert into t1 values (3,4); +create or replace package pck1 as +cursor c1(va int) for select * from t1 where a < va; +procedure p1; +end pck1; +/ +create or replace procedure pp1() as +va t1; +begin +fetch pck1.c1 into va; +raise info 'va:%', va; +raise info 'isopend: %', pck1.c1%isopen; +raise info 'rowcount: %', pck1.c1%rowcount; +end; +/ +create or replace package body pck1 as +procedure p1 as +declare +va t1; +begin +raise info 'isopend: %', c1%isopen; +raise info 'rowcount: %', c1%rowcount; +open pck1.c1(10); +fetch pck1.c1 into va; +raise info 'va:%', va; +raise info 'isopend: %', c1%isopen; +raise info 'rowcount: %', c1%rowcount; +pp1(); +end; +end pck1; +/ +call pck1.p1(); +INFO: isopend: f +INFO: rowcount: +INFO: va:(1,2) +INFO: isopend: t +INFO: rowcount: 1 +INFO: va:(3,4) +CONTEXT: SQL statement "CALL pp1()" +PL/pgSQL function p1() line 12 at PERFORM +INFO: isopend: t +CONTEXT: SQL statement "CALL pp1()" +PL/pgSQL function p1() line 12 at PERFORM +INFO: rowcount: 2 +CONTEXT: SQL statement "CALL pp1()" +PL/pgSQL function p1() line 12 at PERFORM + p1 +---- + +(1 row) + +declare +va t1; +begin +open pck1.c1(4); +fetch pck1.c1 into va; +end; +/ +ERROR: package cursor with arguments is only supported to be opened in the same package. +LINE 4: open pck1.c1(4); + ^ +DETAIL: cursor "pck1.c1" is only supported to be opened in the package "pck1" +QUERY: DECLARE +va t1; +begin +open pck1.c1(4); +fetch pck1.c1 into va; +end +drop procedure pp1; +drop package pck1; +NOTICE: drop cascades to function pkg_val_2.p1() +drop table t1; +-- clean +DROP SCHEMA IF EXISTS pkg_val_1 CASCADE; +DROP SCHEMA IF EXISTS pkg_val_2 CASCADE; diff --git a/src/test/regress/expected/hw_partition_add_drop_partition.out b/src/test/regress/expected/hw_partition_add_drop_partition.out new file mode 100644 index 000000000..879aa757e --- /dev/null +++ b/src/test/regress/expected/hw_partition_add_drop_partition.out @@ -0,0 +1,913 @@ +DROP SCHEMA hw_partition_add_drop_partition CASCADE; +ERROR: schema "hw_partition_add_drop_partition" does not exist +CREATE SCHEMA hw_partition_add_drop_partition; +SET CURRENT_SCHEMA TO hw_partition_add_drop_partition; +-- +----range table---- +-- +--prepare +CREATE TABLE range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (time_id) +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01'), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01'), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01'), + PARTITION time_2011 VALUES LESS THAN ('2012-01-01') +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_sales_pkey" for table "range_sales" +INSERT INTO range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_sales_idx ON range_sales(product_id) LOCAL; +--check for add partition +--fail, can not add subpartition on no-subpartitioned table +ALTER TABLE range_sales ADD PARTITION time_temp1 VALUES LESS THAN ('2013-01-01') + ( + SUBPARTITION time_temp1_part1 VALUES LESS THAN (200), + SUBPARTITION time_temp1_part2 VALUES LESS THAN (500), + SUBPARTITION time_temp1_part3 VALUES LESS THAN (800), + SUBPARTITION time_temp1_part4 VALUES LESS THAN (1200) + ); +ERROR: Un-support feature +DETAIL: Can not add subpartition against NON-SUBPARTITIONED table +--fail, out of range +ALTER TABLE range_sales ADD PARTITION time_temp2 VALUES LESS THAN ('2011-06-01'); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE range_sales ADD PARTITION time_temp3 VALUES ('2013-01-01'); +ERROR: can not add none-range partition to range partition table +--success, add 1 partition +ALTER TABLE range_sales ADD PARTITION time_2012 VALUES LESS THAN ('2013-01-01'); +--success, add 1 partition +ALTER TABLE range_sales ADD PARTITION time_end VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_sales ADD PARTITION time_temp4 VALUES LESS THAN ('2014-01-01'); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +-------------+----------+--------------+-------------+---------------+---------+-------------- + time_2008 | p | r | t | 0 | | {2009-01-01} + time_2009 | p | r | t | 0 | | {2010-01-01} + time_2010 | p | r | t | 0 | | {2011-01-01} + time_2011 | p | r | t | 0 | | {2012-01-01} + time_2012 | p | r | t | 0 | | {2013-01-01} + time_end | p | r | t | 0 | | {NULL} + range_sales | r | r | f | 0 | 3 | +(7 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +--------------------------+----------+--------------+-------------+------------- + time_2008_product_id_idx | x | n | t | t + time_2009_product_id_idx | x | n | t | t + time_2010_product_id_idx | x | n | t | t + time_2011_product_id_idx | x | n | t | t + time_2012_product_id_idx | x | n | t | t + time_end_product_id_idx | x | n | t | t +(6 rows) + +\d+ range_sales + Table "hw_partition_add_drop_partition.range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "range_sales_idx" btree (product_id) LOCAL(PARTITION time_2008_product_id_idx, PARTITION time_2009_product_id_idx, PARTITION time_2010_product_id_idx, PARTITION time_2011_product_id_idx, PARTITION time_2012_product_id_idx, PARTITION time_end_product_id_idx) TABLESPACE pg_default +Partition By RANGE(time_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Has OIDs: no +Options: orientation=row, compression=no + +--check for drop partition (for) +--success, drop partition time_2009 +ALTER TABLE range_sales DROP PARTITION time_2009; +--success, drop partition time_2011 +ALTER TABLE range_sales DROP PARTITION FOR ('2011-06-01'); +--fail, invalid type +ALTER TABLE range_sales DROP PARTITION FOR (1); +ERROR: partition key value must be const or const-evaluable expression +--fail, number not equal to the number of partkey +ALTER TABLE range_sales DROP PARTITION FOR ('2011-06-01', 1); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, can not drop subpartition on no-subpartition table +ALTER TABLE range_sales DROP SUBPARTITION FOR ('2011-06-01', 1); +ERROR: Un-support feature +DETAIL: Can not drop subpartition against NON-SUBPARTITIONED table +--success, drop partition time_2012 +ALTER TABLE range_sales DROP PARTITION FOR ('2011-06-01'); +--check for ok after drop +SELECT count(*) FROM range_sales; + count +------- + 635 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +-------------+----------+--------------+-------------+---------------+---------+-------------- + time_2008 | p | r | t | 0 | | {2009-01-01} + time_2010 | p | r | t | 0 | | {2011-01-01} + time_end | p | r | t | 0 | | {NULL} + range_sales | r | r | f | 0 | 3 | +(4 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +--------------------------+----------+--------------+-------------+------------- + time_2008_product_id_idx | x | n | t | t + time_2010_product_id_idx | x | n | t | t + time_end_product_id_idx | x | n | t | t +(3 rows) + +\d+ range_sales + Table "hw_partition_add_drop_partition.range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "range_sales_idx" btree (product_id) LOCAL(PARTITION time_2008_product_id_idx, PARTITION time_2010_product_id_idx, PARTITION time_end_product_id_idx) TABLESPACE pg_default +Partition By RANGE(time_id) +Number of partitions: 3 (View pg_partition to check each partition range.) +Has OIDs: no +Options: orientation=row, compression=no + +-- +----range table, multiple partkeys---- +-- +--prepare +CREATE TABLE range2_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (time_id, product_id) +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01', 200), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01', 500), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01', 800), + PARTITION time_2011 VALUES LESS THAN ('2012-01-01', 1200) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range2_sales_pkey" for table "range2_sales" +INSERT INTO range2_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range2_sales_idx ON range2_sales(product_id) LOCAL; +--check for add partition +--fail, can not add subpartition on no-subpartitioned table +ALTER TABLE range2_sales ADD PARTITION time_temp1 VALUES LESS THAN ('2013-01-01', 1500) + ( + SUBPARTITION time_temp1_part1 VALUES LESS THAN (200), + SUBPARTITION time_temp1_part2 VALUES LESS THAN (500), + SUBPARTITION time_temp1_part3 VALUES LESS THAN (800), + SUBPARTITION time_temp1_part4 VALUES LESS THAN (1200) + ); +ERROR: Un-support feature +DETAIL: Can not add subpartition against NON-SUBPARTITIONED table +--fail, out of range +ALTER TABLE range2_sales ADD PARTITION time_temp2 VALUES LESS THAN ('2011-06-01', 100); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE range2_sales ADD PARTITION time_temp3 VALUES ('2013-01-01', 1500); +ERROR: can not add none-range partition to range partition table +--success, add 1 partition +ALTER TABLE range2_sales ADD PARTITION time_2012 VALUES LESS THAN ('2013-01-01', 1500); +--success, add 1 partition +ALTER TABLE range2_sales ADD PARTITION time_end VALUES LESS THAN (MAXVALUE, MAXVALUE); +--fail, out of range +ALTER TABLE range2_sales ADD PARTITION time_temp4 VALUES LESS THAN ('2014-01-01', 2000); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range2_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------+----------+--------------+-------------+---------------+---------+------------------- + time_2008 | p | r | t | 0 | | {2009-01-01,200} + time_2009 | p | r | t | 0 | | {2010-01-01,500} + time_2010 | p | r | t | 0 | | {2011-01-01,800} + time_2011 | p | r | t | 0 | | {2012-01-01,1200} + time_2012 | p | r | t | 0 | | {2013-01-01,1500} + time_end | p | r | t | 0 | | {NULL,NULL} + range2_sales | r | r | f | 0 | 3 1 | +(7 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range2_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +--------------------------+----------+--------------+-------------+------------- + time_2008_product_id_idx | x | n | t | t + time_2009_product_id_idx | x | n | t | t + time_2010_product_id_idx | x | n | t | t + time_2011_product_id_idx | x | n | t | t + time_2012_product_id_idx | x | n | t | t + time_end_product_id_idx | x | n | t | t +(6 rows) + +\d+ range2_sales + Table "hw_partition_add_drop_partition.range2_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range2_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "range2_sales_idx" btree (product_id) LOCAL(PARTITION time_2008_product_id_idx, PARTITION time_2009_product_id_idx, PARTITION time_2010_product_id_idx, PARTITION time_2011_product_id_idx, PARTITION time_2012_product_id_idx, PARTITION time_end_product_id_idx) TABLESPACE pg_default +Partition By RANGE(time_id, product_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Has OIDs: no +Options: orientation=row, compression=no + +--check for drop partition (for) +--success, drop partition time_2009 +ALTER TABLE range2_sales DROP PARTITION time_2009; +--success, drop partition time_2011 +ALTER TABLE range2_sales DROP PARTITION FOR ('2011-06-01', 600); +--fail, invalid type +ALTER TABLE range2_sales DROP PARTITION FOR (1, 100); +ERROR: partition key value must be const or const-evaluable expression +--fail, number not equal to the number of partkey +ALTER TABLE range2_sales DROP PARTITION FOR ('2011-06-01'); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, can not drop subpartition on no-subpartition table +ALTER TABLE range2_sales DROP SUBPARTITION FOR ('2011-06-01', 1); +ERROR: Un-support feature +DETAIL: Can not drop subpartition against NON-SUBPARTITIONED table +--success, drop partition time_2012 +ALTER TABLE range2_sales DROP PARTITION FOR ('2011-06-01', 100); +--check for ok after drop +SELECT count(*) FROM range2_sales; + count +------- + 635 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range2_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------+----------+--------------+-------------+---------------+---------+------------------ + time_2008 | p | r | t | 0 | | {2009-01-01,200} + time_2010 | p | r | t | 0 | | {2011-01-01,800} + time_end | p | r | t | 0 | | {NULL,NULL} + range2_sales | r | r | f | 0 | 3 1 | +(4 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range2_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +--------------------------+----------+--------------+-------------+------------- + time_2008_product_id_idx | x | n | t | t + time_2010_product_id_idx | x | n | t | t + time_end_product_id_idx | x | n | t | t +(3 rows) + +\d+ range2_sales + Table "hw_partition_add_drop_partition.range2_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range2_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "range2_sales_idx" btree (product_id) LOCAL(PARTITION time_2008_product_id_idx, PARTITION time_2010_product_id_idx, PARTITION time_end_product_id_idx) TABLESPACE pg_default +Partition By RANGE(time_id, product_id) +Number of partitions: 3 (View pg_partition to check each partition range.) +Has OIDs: no +Options: orientation=row, compression=no + +-- +----interval table---- +-- +--prepare +CREATE TABLE interval_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (time_id) INTERVAL ('1 year') +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01'), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01'), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01') +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "interval_sales_pkey" for table "interval_sales" +INSERT INTO interval_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2009-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX interval_sales_idx ON interval_sales(product_id) LOCAL; +--check for add partition +--fail, can not add subpartition on no-subpartitioned table +ALTER TABLE interval_sales ADD PARTITION time_temp1 VALUES LESS THAN ('2013-01-01') + ( + SUBPARTITION time_temp1_part1 VALUES LESS THAN (200), + SUBPARTITION time_temp1_part2 VALUES LESS THAN (500), + SUBPARTITION time_temp1_part3 VALUES LESS THAN (800), + SUBPARTITION time_temp1_part4 VALUES LESS THAN (1200) + ); +ERROR: can not add partition against interval partitioned table +--fail, not support add interval +ALTER TABLE interval_sales ADD PARTITION time_2012 VALUES LESS THAN ('2013-01-01'); +ERROR: can not add partition against interval partitioned table +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='interval_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +----------------+----------+--------------+-------------+---------------+---------+------------------------------ + sys_p1 | p | i | t | 0 | | {"Sun Jan 01 00:00:00 2012"} + time_2008 | p | r | t | 0 | | {2009-01-01} + time_2009 | p | r | t | 0 | | {2010-01-01} + time_2010 | p | r | t | 0 | | {2011-01-01} + interval_sales | r | i | f | 0 | 3 | +(5 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='interval_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +--------------------------+----------+--------------+-------------+------------- + sys_p1_product_id_idx | x | n | t | t + time_2008_product_id_idx | x | n | t | t + time_2009_product_id_idx | x | n | t | t + time_2010_product_id_idx | x | n | t | t +(4 rows) + +\d+ interval_sales + Table "hw_partition_add_drop_partition.interval_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "interval_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "interval_sales_idx" btree (product_id) LOCAL(PARTITION time_2008_product_id_idx, PARTITION time_2009_product_id_idx, PARTITION time_2010_product_id_idx, PARTITION sys_p1_product_id_idx) TABLESPACE pg_default +Partition By RANGE(time_id) INTERVAL('1 year') +Number of partitions: 4 (View pg_partition to check each partition range.) +Has OIDs: no +Options: orientation=row, compression=no + +--check for drop partition (for) +--success, drop partition time_2009 +ALTER TABLE interval_sales DROP PARTITION time_2009; +--success, drop partition sys_p1 +ALTER TABLE interval_sales DROP PARTITION FOR ('2011-06-01'); +--fail, invalid type +ALTER TABLE interval_sales DROP PARTITION FOR (1); +ERROR: partition key value must be const or const-evaluable expression +--fail, number not equal to the number of partkey +ALTER TABLE interval_sales DROP PARTITION FOR ('2010-06-01', 1); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, can not drop subpartition on no-subpartition table +ALTER TABLE interval_sales DROP SUBPARTITION FOR ('2010-06-01', 1); +ERROR: Un-support feature +DETAIL: Can not drop subpartition against NON-SUBPARTITIONED table +--check for ok after drop +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='interval_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +----------------+----------+--------------+-------------+---------------+---------+-------------- + time_2008 | p | r | t | 0 | | {2009-01-01} + time_2010 | p | r | t | 0 | | {2011-01-01} + interval_sales | r | i | f | 0 | 3 | +(3 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='interval_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +--------------------------+----------+--------------+-------------+------------- + time_2008_product_id_idx | x | n | t | t + time_2010_product_id_idx | x | n | t | t +(2 rows) + +\d+ interval_sales + Table "hw_partition_add_drop_partition.interval_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "interval_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "interval_sales_idx" btree (product_id) LOCAL(PARTITION time_2008_product_id_idx, PARTITION time_2010_product_id_idx) TABLESPACE pg_default +Partition By RANGE(time_id) INTERVAL('1 year') +Number of partitions: 2 (View pg_partition to check each partition range.) +Has OIDs: no +Options: orientation=row, compression=no + +-- +----list table---- +-- +--prepare +CREATE TABLE list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY LIST (channel_id) +( + PARTITION channel1 VALUES ('0', '1', '2'), + PARTITION channel2 VALUES ('3', '4', '5'), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "list_sales_pkey" for table "list_sales" +INSERT INTO list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_sales_idx ON list_sales(product_id) LOCAL; +--check for add partition +--fail, can not add subpartition on no-subpartitioned table +ALTER TABLE list_sales ADD PARTITION channel_temp1 VALUES ('X') + ( + SUBPARTITION channel_temp1_part1 VALUES LESS THAN (200), + SUBPARTITION channel_temp1_part2 VALUES LESS THAN (500), + SUBPARTITION channel_temp1_part3 VALUES LESS THAN (800), + SUBPARTITION channel_temp1_part4 VALUES LESS THAN (1200) + ); +ERROR: Un-support feature +DETAIL: Can not add subpartition against NON-SUBPARTITIONED table +--fail, out of range +ALTER TABLE list_sales ADD PARTITION channel_temp2 VALUES ('8', 'X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, value conflict +ALTER TABLE list_sales ADD PARTITION channel_temp3 VALUES ('X', 'X', 'Z'); +ERROR: list partition channel_temp3 has overlapped value +--fail, invalid format +ALTER TABLE list_sales ADD PARTITION channel_temp4 VALUES LESS THAN('X'); +ERROR: can not add none-list partition to list partition table +--success, add 1 partition +ALTER TABLE list_sales ADD PARTITION channel5 VALUES ('X', 'Z'); +--success, add 1 partition +ALTER TABLE list_sales ADD PARTITION channel_default VALUES (DEFAULT); +--fail, out of range +ALTER TABLE list_sales ADD PARTITION channel_temp5 VALUES ('P'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +-----------------+----------+--------------+-------------+---------------+---------+------------ + channel1 | p | l | t | 0 | | {0,1,2} + channel2 | p | l | t | 0 | | {3,4,5} + channel3 | p | l | t | 0 | | {6,7} + channel4 | p | l | t | 0 | | {8,9} + channel5 | p | l | t | 0 | | {X,Z} + channel_default | p | l | t | 0 | | {NULL} + list_sales | r | l | f | 0 | 4 | +(7 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +--------------------------------+----------+--------------+-------------+------------- + channel1_product_id_idx | x | n | t | t + channel2_product_id_idx | x | n | t | t + channel3_product_id_idx | x | n | t | t + channel4_product_id_idx | x | n | t | t + channel5_product_id_idx | x | n | t | t + channel_default_product_id_idx | x | n | t | t +(6 rows) + +\d+ list_sales + Table "hw_partition_add_drop_partition.list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "list_sales_idx" btree (product_id) LOCAL(PARTITION channel_default_product_id_idx, PARTITION channel5_product_id_idx, PARTITION channel4_product_id_idx, PARTITION channel3_product_id_idx, PARTITION channel2_product_id_idx, PARTITION channel1_product_id_idx) TABLESPACE pg_default +Partition By LIST(channel_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Has OIDs: no +Options: orientation=row, compression=no + +--check for drop partition (for) +--success, drop partition channel2 +ALTER TABLE list_sales DROP PARTITION channel2; +--success, drop partition channel3 +ALTER TABLE list_sales DROP PARTITION FOR ('6'); +--fail, invalid type +ALTER TABLE list_sales DROP PARTITION FOR (10); +ERROR: value too long for type character(1) +--fail, number not equal to the number of partkey +ALTER TABLE list_sales DROP PARTITION FOR ('6', 1); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, can not drop subpartition on no-subpartition table +ALTER TABLE list_sales DROP SUBPARTITION FOR ('6', 1); +ERROR: Un-support feature +DETAIL: Can not drop subpartition against NON-SUBPARTITIONED table +--success, drop partition channel_default +ALTER TABLE list_sales DROP PARTITION FOR ('6'); +--check for ok after drop +SELECT count(*) FROM list_sales; + count +------- + 500 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +------------+----------+--------------+-------------+---------------+---------+------------ + channel1 | p | l | t | 0 | | {0,1,2} + channel4 | p | l | t | 0 | | {8,9} + channel5 | p | l | t | 0 | | {X,Z} + list_sales | r | l | f | 0 | 4 | +(4 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-------------------------+----------+--------------+-------------+------------- + channel1_product_id_idx | x | n | t | t + channel4_product_id_idx | x | n | t | t + channel5_product_id_idx | x | n | t | t +(3 rows) + +\d+ list_sales + Table "hw_partition_add_drop_partition.list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "list_sales_idx" btree (product_id) LOCAL(PARTITION channel5_product_id_idx, PARTITION channel4_product_id_idx, PARTITION channel1_product_id_idx) TABLESPACE pg_default +Partition By LIST(channel_id) +Number of partitions: 3 (View pg_partition to check each partition range.) +Has OIDs: no +Options: orientation=row, compression=no + +-- +----hash table---- +-- +--prepare +CREATE TABLE hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY HASH (product_id) +( + PARTITION product1, + PARTITION product2, + PARTITION product3, + PARTITION product4 +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "hash_sales_pkey" for table "hash_sales" +INSERT INTO hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_sales_idx ON hash_sales(product_id) LOCAL; +--check for add partition +--fail, not support add hash +ALTER TABLE hash_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_part1 VALUES LESS THAN (200), + SUBPARTITION product_temp1_part2 VALUES LESS THAN (500), + SUBPARTITION product_temp1_part3 VALUES LESS THAN (800), + SUBPARTITION product_temp1_part4 VALUES LESS THAN (1200) + ); +ERROR: syntax error at or near "(" +LINE 2: ( + ^ +--fail, not support add hash +ALTER TABLE hash_sales ADD PARTITION product_temp2; +ERROR: syntax error at or near ";" +LINE 1: ALTER TABLE hash_sales ADD PARTITION product_temp2; + ^ +--fail, invalid format +ALTER TABLE hash_sales ADD PARTITION product_temp3 VALUES LESS THAN('X'); +ERROR: can not add hash partition +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +------------+----------+--------------+-------------+---------------+---------+------------ + product1 | p | h | t | 0 | | {0} + product2 | p | h | t | 0 | | {1} + product3 | p | h | t | 0 | | {2} + product4 | p | h | t | 0 | | {3} + hash_sales | r | h | f | 0 | 1 | +(5 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-------------------------+----------+--------------+-------------+------------- + product1_product_id_idx | x | n | t | t + product2_product_id_idx | x | n | t | t + product3_product_id_idx | x | n | t | t + product4_product_id_idx | x | n | t | t +(4 rows) + +\d+ hash_sales + Table "hw_partition_add_drop_partition.hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "hash_sales_idx" btree (product_id) LOCAL(PARTITION product4_product_id_idx, PARTITION product3_product_id_idx, PARTITION product2_product_id_idx, PARTITION product1_product_id_idx) TABLESPACE pg_default +Partition By HASH(product_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Has OIDs: no +Options: orientation=row, compression=no + +--check for drop partition (for) +--fail, not support drop hash +ALTER TABLE hash_sales DROP PARTITION product2; +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_sales DROP PARTITION FOR (0); +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_sales DROP PARTITION FOR (0, 0); +ERROR: Droping hash partition is unsupported. +--fail, can not drop subpartition on no-subpartition table +ALTER TABLE hash_sales DROP SUBPARTITION FOR(0, 0); +ERROR: Un-support feature +DETAIL: Can not drop subpartition against NON-SUBPARTITIONED table +--fail, can not drop subpartition on no-subpartition table +ALTER TABLE hash_sales DROP SUBPARTITION FOR(0); +ERROR: Un-support feature +DETAIL: Can not drop subpartition against NON-SUBPARTITIONED table +--check for ok after drop +SELECT count(*) FROM hash_sales; + count +------- + 1000 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +------------+----------+--------------+-------------+---------------+---------+------------ + product1 | p | h | t | 0 | | {0} + product2 | p | h | t | 0 | | {1} + product3 | p | h | t | 0 | | {2} + product4 | p | h | t | 0 | | {3} + hash_sales | r | h | f | 0 | 1 | +(5 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-------------------------+----------+--------------+-------------+------------- + product1_product_id_idx | x | n | t | t + product2_product_id_idx | x | n | t | t + product3_product_id_idx | x | n | t | t + product4_product_id_idx | x | n | t | t +(4 rows) + +\d+ hash_sales + Table "hw_partition_add_drop_partition.hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "hash_sales_idx" btree (product_id) LOCAL(PARTITION product4_product_id_idx, PARTITION product3_product_id_idx, PARTITION product2_product_id_idx, PARTITION product1_product_id_idx) TABLESPACE pg_default +Partition By HASH(product_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Has OIDs: no +Options: orientation=row, compression=no + +create table test_range_pt (a int primary key, b int, c int) +partition by range(a) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (maxvalue) +)ENABLE ROW MOVEMENT; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_range_pt_pkey" for table "test_range_pt" +insert into test_range_pt values(1),(2001); +create view vp1 as select * from test_range_pt partition for (1); +alter table test_range_pt drop partition p1; +ERROR: Cannot perform this operation because There are views or rules that depend on table test_range_pt. +DETAIL: N/A +HINT: drop the views or rules first. Use pg_rules to find rules. Use pg_class, pg_rewrite, pg_depend, pg_namespacesql to find views +create table tt ( a int, b int,c int); +alter table test_range_pt exchange partition (p1) with table tt update global index; +ERROR: Cannot perform this operation because There are views or rules that depend on table test_range_pt. +DETAIL: N/A +HINT: drop the views or rules first. Use pg_rules to find rules. Use pg_class, pg_rewrite, pg_depend, pg_namespacesql to find views +drop view vp1; +drop table test_range_pt; +drop table tt; +--finish +DROP TABLE range_sales; +DROP TABLE range2_sales; +DROP TABLE interval_sales; +DROP TABLE list_sales; +DROP TABLE hash_sales; +DROP SCHEMA hw_partition_add_drop_partition CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/hw_partition_hash_dml.out b/src/test/regress/expected/hw_partition_hash_dml.out index 390c3b4a3..8bbab4c1a 100644 --- a/src/test/regress/expected/hw_partition_hash_dml.out +++ b/src/test/regress/expected/hw_partition_hash_dml.out @@ -1,25 +1,23 @@ --- section 1: test from delete.sql -create table delete_test_hash ( - id int, - a int, - b text -) partition by hash(a) -( -partition delete_test_hash_p1, -partition delete_test_hash_p2, -partition delete_test_hash_p3); -create index delete_test_hash_index_local1 on delete_test_hash (a) local -( - partition delete_test_hash_p1_index_local tablespace PG_DEFAULT, - partition delete_test_hash_p2_index_local tablespace PG_DEFAULT, - partition delete_test_hash_p3_index_local tablespace PG_DEFAULT -); - -INSERT INTO delete_test_hash (a) VALUES (10); -INSERT INTO delete_test_hash (a, b) VALUES (50, repeat('x', 10000)); -INSERT INTO delete_test_hash (a) VALUES (100); - -SELECT id, a, char_length(b) FROM delete_test_hash order by 1, 2, 3; +-- section 1: test from delete.sql +create table delete_test_hash ( + id int, + a int, + b text +) partition by hash(a) +( +partition delete_test_hash_p1, +partition delete_test_hash_p2, +partition delete_test_hash_p3); +create index delete_test_hash_index_local1 on delete_test_hash (a) local +( + partition delete_test_hash_p1_index_local tablespace PG_DEFAULT, + partition delete_test_hash_p2_index_local tablespace PG_DEFAULT, + partition delete_test_hash_p3_index_local tablespace PG_DEFAULT +); +INSERT INTO delete_test_hash (a) VALUES (10); +INSERT INTO delete_test_hash (a, b) VALUES (50, repeat('x', 10000)); +INSERT INTO delete_test_hash (a) VALUES (100); +SELECT id, a, char_length(b) FROM delete_test_hash order by 1, 2, 3; id | a | char_length ----+-----+------------- | 10 | @@ -27,91 +25,80 @@ SELECT id, a, char_length(b) FROM delete_test_hash order by 1, 2, 3; | 100 | (3 rows) - --- Pseudo Constant Quals -DELETE FROM delete_test_hash where null; - --- allow an alias to be specified for DELETE's target table -DELETE FROM delete_test_hash AS dt WHERE dt.a > 75; - --- if an alias is specified, don't allow the original table name --- to be referenced -DELETE FROM delete_test_hash dt WHERE dt.a > 25; - -SELECT id, a, char_length(b) FROM delete_test_hash order by 1, 2, 3; +-- Pseudo Constant Quals +DELETE FROM delete_test_hash where null; +-- allow an alias to be specified for DELETE's target table +DELETE FROM delete_test_hash AS dt WHERE dt.a > 75; +-- if an alias is specified, don't allow the original table name +-- to be referenced +DELETE FROM delete_test_hash dt WHERE dt.a > 25; +SELECT id, a, char_length(b) FROM delete_test_hash order by 1, 2, 3; id | a | char_length ----+----+------------- | 10 | (1 row) - --- delete a row with a TOASTed value -DELETE FROM delete_test_hash WHERE a > 25; - -SELECT id, a, char_length(b) FROM delete_test_hash order by 1, 2, 3; +-- delete a row with a TOASTed value +DELETE FROM delete_test_hash WHERE a > 25; +SELECT id, a, char_length(b) FROM delete_test_hash order by 1, 2, 3; id | a | char_length ----+----+------------- | 10 | (1 row) - -DROP TABLE delete_test_hash; - --- section 2: -create table hw_hash_partition_dml_t1 (id int, name text)partition by hash(id) ( -partition hw_hash_partition_dml_t1_p1, -partition hw_hash_partition_dml_t1_p2, -partition hw_hash_partition_dml_t1_p3); - -create index hw_hash_partition_dml_t1_index_local1 on hw_hash_partition_dml_t1(id) local -( - partition hw_hash_partition_dml_t1_p1_index_local_1 tablespace PG_DEFAULT, - partition hw_hash_partition_dml_t1_p2_index_local_1 tablespace PG_DEFAULT, - partition hw_hash_partition_dml_t1_p3_index_local_1 tablespace PG_DEFAULT -); -create table hw_hash_partition_dml_t2 (id int, name text)partition by hash(id) ( -partition hw_hash_partition_dml_t2_p1, -partition hw_hash_partition_dml_t2_p2, -partition hw_hash_partition_dml_t2_p3); - -create table hw_hash_partition_dml_t3 (id int, name text)partition by hash(id) ( -partition hw_hash_partition_dml_t3_p1, -partition hw_hash_partition_dml_t3_p2, -partition hw_hash_partition_dml_t3_p3); - --- section 2.1: two table join, both are partitioned table -insert into hw_hash_partition_dml_t1 values (1, 'li'), (11, 'wang'), (21, 'zhang'); -insert into hw_hash_partition_dml_t2 values (1, 'xi'), (11, 'zhao'), (27, 'qi'); -insert into hw_hash_partition_dml_t3 values (1, 'qin'), (11, 'he'), (27, 'xiao'); --- delete 10~20 tupes in hw_partition_dml_t1 -with T2_ID_10TH AS -( -SELECT id -FROM hw_hash_partition_dml_t2 -WHERE id >= 10 and id < 20 -ORDER BY id -) -delete from hw_hash_partition_dml_t1 -using hw_hash_partition_dml_t2 -where hw_hash_partition_dml_t1.id < hw_hash_partition_dml_t2.id - and hw_hash_partition_dml_t2.id IN - (SELECT id FROM T2_ID_10TH) -RETURNING hw_hash_partition_dml_t1.name; +DROP TABLE delete_test_hash; +-- section 2: +create table hw_hash_partition_dml_t1 (id int, name text)partition by hash(id) ( +partition hw_hash_partition_dml_t1_p1, +partition hw_hash_partition_dml_t1_p2, +partition hw_hash_partition_dml_t1_p3); +create index hw_hash_partition_dml_t1_index_local1 on hw_hash_partition_dml_t1(id) local +( + partition hw_hash_partition_dml_t1_p1_index_local_1 tablespace PG_DEFAULT, + partition hw_hash_partition_dml_t1_p2_index_local_1 tablespace PG_DEFAULT, + partition hw_hash_partition_dml_t1_p3_index_local_1 tablespace PG_DEFAULT +); +create table hw_hash_partition_dml_t2 (id int, name text)partition by hash(id) ( +partition hw_hash_partition_dml_t2_p1, +partition hw_hash_partition_dml_t2_p2, +partition hw_hash_partition_dml_t2_p3); +create table hw_hash_partition_dml_t3 (id int, name text)partition by hash(id) ( +partition hw_hash_partition_dml_t3_p1, +partition hw_hash_partition_dml_t3_p2, +partition hw_hash_partition_dml_t3_p3); +-- section 2.1: two table join, both are partitioned table +insert into hw_hash_partition_dml_t1 values (1, 'li'), (11, 'wang'), (21, 'zhang'); +insert into hw_hash_partition_dml_t2 values (1, 'xi'), (11, 'zhao'), (27, 'qi'); +insert into hw_hash_partition_dml_t3 values (1, 'qin'), (11, 'he'), (27, 'xiao'); +-- delete 10~20 tupes in hw_partition_dml_t1 +with T2_ID_10TH AS +( +SELECT id +FROM hw_hash_partition_dml_t2 +WHERE id >= 10 and id < 20 +ORDER BY id +) +delete from hw_hash_partition_dml_t1 +using hw_hash_partition_dml_t2 +where hw_hash_partition_dml_t1.id < hw_hash_partition_dml_t2.id + and hw_hash_partition_dml_t2.id IN + (SELECT id FROM T2_ID_10TH) +RETURNING hw_hash_partition_dml_t1.name; name ------ li (1 row) -select * from hw_hash_partition_dml_t1 order by 1, 2; +select * from hw_hash_partition_dml_t1 order by 1, 2; id | name ----+------- 11 | wang 21 | zhang (2 rows) --- delete all tupes that is less than 11 in hw_hash_partition_dml_t1, that is 3 -insert into hw_hash_partition_dml_t1 values (3, 'AAA'), (13, 'BBB'), (23, 'CCC'), (24, 'DDD'); -select * from hw_hash_partition_dml_t1 order by 1, 2; +-- delete all tupes that is less than 11 in hw_hash_partition_dml_t1, that is 3 +insert into hw_hash_partition_dml_t1 values (3, 'AAA'), (13, 'BBB'), (23, 'CCC'), (24, 'DDD'); +select * from hw_hash_partition_dml_t1 order by 1, 2; id | name ----+------- 3 | AAA @@ -122,13 +109,13 @@ select * from hw_hash_partition_dml_t1 order by 1, 2; 24 | DDD (6 rows) -delete from hw_hash_partition_dml_t1 using hw_hash_partition_dml_t2 where hw_hash_partition_dml_t1.id < hw_hash_partition_dml_t2.id and hw_hash_partition_dml_t2.id = 11 RETURNING hw_hash_partition_dml_t1.id; +delete from hw_hash_partition_dml_t1 using hw_hash_partition_dml_t2 where hw_hash_partition_dml_t1.id < hw_hash_partition_dml_t2.id and hw_hash_partition_dml_t2.id = 11 RETURNING hw_hash_partition_dml_t1.id; id ---- 3 (1 row) -select * from hw_hash_partition_dml_t1 order by 1, 2; +select * from hw_hash_partition_dml_t1 order by 1, 2; id | name ----+------- 11 | wang @@ -138,21 +125,19 @@ select * from hw_hash_partition_dml_t1 order by 1, 2; 24 | DDD (5 rows) - --- section 2.2: delete from only one table, no joining --- delete all tupes remaining: 13, 23, 24 -delete from hw_hash_partition_dml_t1; -select * from hw_hash_partition_dml_t1 order by 1, 2; +-- section 2.2: delete from only one table, no joining +-- delete all tupes remaining: 13, 23, 24 +delete from hw_hash_partition_dml_t1; +select * from hw_hash_partition_dml_t1 order by 1, 2; id | name ----+------ (0 rows) - --- section 3: --- section 3.1: two table join, only one is partitioned table --- and target relation is partitioned -insert into hw_hash_partition_dml_t1 values (1, 'AAA'), (11, 'BBB'), (21, 'CCC'); -select * from hw_hash_partition_dml_t1 order by 1, 2; +-- section 3: +-- section 3.1: two table join, only one is partitioned table +-- and target relation is partitioned +insert into hw_hash_partition_dml_t1 values (1, 'AAA'), (11, 'BBB'), (21, 'CCC'); +select * from hw_hash_partition_dml_t1 order by 1, 2; id | name ----+------ 1 | AAA @@ -160,16 +145,16 @@ select * from hw_hash_partition_dml_t1 order by 1, 2; 21 | CCC (3 rows) --- delete all tupes in hw_hash_partition_dml_t1 -delete from hw_hash_partition_dml_t1 using hw_hash_partition_dml_t3 where hw_hash_partition_dml_t1.id < hw_hash_partition_dml_t3.id and hw_hash_partition_dml_t3.id = 27; -select * from hw_hash_partition_dml_t1 order by 1, 2; +-- delete all tupes in hw_hash_partition_dml_t1 +delete from hw_hash_partition_dml_t1 using hw_hash_partition_dml_t3 where hw_hash_partition_dml_t1.id < hw_hash_partition_dml_t3.id and hw_hash_partition_dml_t3.id = 27; +select * from hw_hash_partition_dml_t1 order by 1, 2; id | name ----+------ (0 rows) --- delete all tupes that is less than 11 in hw_hash_partition_dml_t1, that is 3 -insert into hw_hash_partition_dml_t1 values (3, 'AAA'), (13, 'BBB'), (23, 'CCC'), (24, 'DDD'); -select * from hw_hash_partition_dml_t1 order by 1, 2; +-- delete all tupes that is less than 11 in hw_hash_partition_dml_t1, that is 3 +insert into hw_hash_partition_dml_t1 values (3, 'AAA'), (13, 'BBB'), (23, 'CCC'), (24, 'DDD'); +select * from hw_hash_partition_dml_t1 order by 1, 2; id | name ----+------ 3 | AAA @@ -178,8 +163,8 @@ select * from hw_hash_partition_dml_t1 order by 1, 2; 24 | DDD (4 rows) -delete from hw_hash_partition_dml_t1 using hw_hash_partition_dml_t3 where hw_hash_partition_dml_t1.id < hw_hash_partition_dml_t3.id and hw_hash_partition_dml_t3.id = 11; -select * from hw_hash_partition_dml_t1 order by 1, 2; +delete from hw_hash_partition_dml_t1 using hw_hash_partition_dml_t3 where hw_hash_partition_dml_t1.id < hw_hash_partition_dml_t3.id and hw_hash_partition_dml_t3.id = 11; +select * from hw_hash_partition_dml_t1 order by 1, 2; id | name ----+------ 13 | BBB @@ -187,21 +172,19 @@ select * from hw_hash_partition_dml_t1 order by 1, 2; 24 | DDD (3 rows) - --- section 3.2 delete from only one table, no joining --- delete all tupes remaining: 13, 23, 24 -delete from hw_hash_partition_dml_t1; -select * from hw_hash_partition_dml_t1 order by 1, 2; +-- section 3.2 delete from only one table, no joining +-- delete all tupes remaining: 13, 23, 24 +delete from hw_hash_partition_dml_t1; +select * from hw_hash_partition_dml_t1 order by 1, 2; id | name ----+------ (0 rows) - --- section 3.3: two table join, only one is partitioned table --- and target relation is on-partitioned --- delete all tuples in hw_hash_partition_dml_t3 -insert into hw_hash_partition_dml_t2 values (28, 'EEE'); -select * from hw_hash_partition_dml_t3; +-- section 3.3: two table join, only one is partitioned table +-- and target relation is on-partitioned +-- delete all tuples in hw_hash_partition_dml_t3 +insert into hw_hash_partition_dml_t2 values (28, 'EEE'); +select * from hw_hash_partition_dml_t3; id | name ----+------ 27 | xiao @@ -209,7 +192,7 @@ select * from hw_hash_partition_dml_t3; 1 | qin (3 rows) -select * from hw_hash_partition_dml_t2; +select * from hw_hash_partition_dml_t2; id | name ----+------ 27 | qi @@ -218,17 +201,16 @@ select * from hw_hash_partition_dml_t2; 28 | EEE (4 rows) -delete from hw_hash_partition_dml_t3 using hw_hash_partition_dml_t2 where hw_hash_partition_dml_t3.id < hw_hash_partition_dml_t2.id and hw_hash_partition_dml_t2.id = 28; -select * from hw_hash_partition_dml_t3 order by 1, 2; +delete from hw_hash_partition_dml_t3 using hw_hash_partition_dml_t2 where hw_hash_partition_dml_t3.id < hw_hash_partition_dml_t2.id and hw_hash_partition_dml_t2.id = 28; +select * from hw_hash_partition_dml_t3 order by 1, 2; id | name ----+------ (0 rows) - --- delete all tuples that is less than 11 in hw_hash_partition_dml_t3, that is 3 -insert into hw_hash_partition_dml_t3 values (3, 'AAA'), (13, 'BBB'), (23, 'CCC'), (24, 'DDD'); -delete from hw_hash_partition_dml_t3 using hw_hash_partition_dml_t2 where hw_hash_partition_dml_t3.id < hw_hash_partition_dml_t2.id and hw_hash_partition_dml_t2.id = 11; -select * from hw_hash_partition_dml_t3 order by 1, 2; +-- delete all tuples that is less than 11 in hw_hash_partition_dml_t3, that is 3 +insert into hw_hash_partition_dml_t3 values (3, 'AAA'), (13, 'BBB'), (23, 'CCC'), (24, 'DDD'); +delete from hw_hash_partition_dml_t3 using hw_hash_partition_dml_t2 where hw_hash_partition_dml_t3.id < hw_hash_partition_dml_t2.id and hw_hash_partition_dml_t2.id = 11; +select * from hw_hash_partition_dml_t3 order by 1, 2; id | name ----+------ 13 | BBB @@ -236,50 +218,46 @@ select * from hw_hash_partition_dml_t3 order by 1, 2; 24 | DDD (3 rows) - --- section 3.4 delete from only one table, no joining --- delete all tuples remaining: 13, 23, 24 -delete from hw_hash_partition_dml_t3; -select * from hw_hash_partition_dml_t3 order by 1, 2; +-- section 3.4 delete from only one table, no joining +-- delete all tuples remaining: 13, 23, 24 +delete from hw_hash_partition_dml_t3; +select * from hw_hash_partition_dml_t3 order by 1, 2; id | name ----+------ (0 rows) - --- finally, drop table hw_hash_partition_dml_t1, hw_hash_partition_dml_t2 and hw_hash_partition_dml_t3 -drop table hw_hash_partition_dml_t1; -drop table hw_hash_partition_dml_t2; -drop table hw_hash_partition_dml_t3; - -create schema fvt_other_cmd; -CREATE TABLE FVT_OTHER_CMD.IDEX_LIST_PARTITION_TABLE_001(COL_INT int) -partition by hash (COL_INT) -( - partition IDEX_LIST_PARTITION_TABLE_001_1, - partition IDEX_LIST_PARTITION_TABLE_001_2, - partition IDEX_LIST_PARTITION_TABLE_001_3 -); -declare -i int; -begin i:=1; -while -i<19990 LOOP -Delete from FVT_OTHER_CMD.IDEX_LIST_PARTITION_TABLE_001 where col_int=i; -i:=i+100; -end loop; -end; -/ - -drop table test_index_ht; +-- finally, drop table hw_hash_partition_dml_t1, hw_hash_partition_dml_t2 and hw_hash_partition_dml_t3 +drop table hw_hash_partition_dml_t1; +drop table hw_hash_partition_dml_t2; +drop table hw_hash_partition_dml_t3; +create schema fvt_other_cmd; +CREATE TABLE FVT_OTHER_CMD.IDEX_LIST_PARTITION_TABLE_001(COL_INT int) +partition by hash (COL_INT) +( + partition IDEX_LIST_PARTITION_TABLE_001_1, + partition IDEX_LIST_PARTITION_TABLE_001_2, + partition IDEX_LIST_PARTITION_TABLE_001_3 +); +declare +i int; +begin i:=1; +while +i<19990 LOOP +Delete from FVT_OTHER_CMD.IDEX_LIST_PARTITION_TABLE_001 where col_int=i; +i:=i+100; +end loop; +end; +/ +drop table test_index_ht; ERROR: table "test_index_ht" does not exist -create table test_index_ht (a int, b int, c int) -partition by hash(a) -( - PARTITION p1, - PARTITION p2 -); -insert into test_index_ht select generate_series(3,6); -explain (costs off, verbose on) select * from test_index_ht order by 1; +create table test_index_ht (a int, b int, c int) +partition by hash(a) +( + PARTITION p1, + PARTITION p2 +); +insert into test_index_ht select generate_series(3,6); +explain (costs off, verbose on) select * from test_index_ht order by 1; QUERY PLAN ---------------------------------------------------------- Sort @@ -293,7 +271,7 @@ explain (costs off, verbose on) select * from test_index_ht order by 1; Selected Partitions: 1..2 (9 rows) -select * from test_index_ht order by 1; +select * from test_index_ht order by 1; a | b | c ---+---+--- 3 | | @@ -302,10 +280,10 @@ select * from test_index_ht order by 1; 6 | | (4 rows) -create index test_exchange_index_lt_ha on test_index_ht (a) local; -set enable_seqscan = off; -set enable_bitmapscan = off; -explain (costs off, verbose on) select * from test_index_ht order by 1; +create index test_exchange_index_lt_ha on test_index_ht (a) local; +set enable_seqscan = off; +set enable_bitmapscan = off; +explain (costs off, verbose on) select * from test_index_ht order by 1; QUERY PLAN -------------------------------------------------------------------------------------------- Sort @@ -319,7 +297,7 @@ explain (costs off, verbose on) select * from test_index_ht order by 1; Selected Partitions: 1..2 (9 rows) -select * from test_index_ht order by 1; +select * from test_index_ht order by 1; a | b | c ---+---+--- 3 | | @@ -328,6 +306,6 @@ select * from test_index_ht order by 1; 6 | | (4 rows) -drop table test_index_ht; -drop schema fvt_other_cmd cascade; +drop table test_index_ht; +drop schema fvt_other_cmd cascade; NOTICE: drop cascades to table fvt_other_cmd.idex_list_partition_table_001 diff --git a/src/test/regress/expected/hw_partition_hash_dql.out b/src/test/regress/expected/hw_partition_hash_dql.out index ca5899f42..325f2d1fc 100644 --- a/src/test/regress/expected/hw_partition_hash_dql.out +++ b/src/test/regress/expected/hw_partition_hash_dql.out @@ -1,154 +1,158 @@ - --- ----- test partition for (null) --- - --- 1. test ordinary - -- 1.1 range partitioned table - -- 1.2 interval partitioned table --- 2. test data column of partition key value - -- 2.1 text - -- 2.2 timestamp --- 3. MAXVALUE - -- 3.1 MAXVALUE is first column - -- 3.2 MAXVALUE is second column - -CREATE schema FVT_COMPRESS_QWER; -set search_path to FVT_COMPRESS_QWER; - - --- 1. test ordinary ----- 1.1 range partitioned table -create table test_partition_for_null_hash (a int, b int, c int, d int) -partition by hash (a) -( - partition test_partition_for_null_hash_p1, - partition test_partition_for_null_hash_p2, - partition test_partition_for_null_hash_p3 -); - -insert into test_partition_for_null_hash values (0, 0, 0, 0); -insert into test_partition_for_null_hash values (1, 1, 1, 1); -insert into test_partition_for_null_hash values (5, 5, 5, 5); - --- failed: inserted partition key does not map to any table partition -insert into test_partition_for_null_hash values (null, null, null, null); + +-- +---- test partition for (null) +-- + +-- 1. test ordinary + -- 1.1 range partitioned table + -- 1.2 interval partitioned table +-- 2. test data column of partition key value + -- 2.1 text + -- 2.2 timestamp +-- 3. MAXVALUE + -- 3.1 MAXVALUE is first column + -- 3.2 MAXVALUE is second column + +CREATE schema FVT_COMPRESS_QWER; +set search_path to FVT_COMPRESS_QWER; + + +-- 1. test ordinary +---- 1.1 range partitioned table +create table test_partition_for_null_hash (a int, b int, c int, d int) +partition by hash (a) +( + partition test_partition_for_null_hash_p1, + partition test_partition_for_null_hash_p2, + partition test_partition_for_null_hash_p3 +); + +insert into test_partition_for_null_hash values (0, 0, 0, 0); +insert into test_partition_for_null_hash values (1, 1, 1, 1); +insert into test_partition_for_null_hash values (5, 5, 5, 5); + +-- failed: inserted partition key does not map to any table partition +insert into test_partition_for_null_hash values (null, null, null, null); ERROR: inserted partition key does not map to any table partition --- success -insert into test_partition_for_null_hash values (0, null, null, null); - - --- failed: The partition number is invalid or out-of-range -select * from test_partition_for_null_hash partition for (null) order by 1, 2, 3, 4; -ERROR: The partition number is invalid or out-of-range --- success -select * from test_partition_for_null_hash partition for (0) order by 1, 2, 3, 4; +-- success +insert into test_partition_for_null_hash values (0, null, null, null); + + +-- failed: The partition number is invalid or out-of-range +select * from test_partition_for_null_hash partition for (null) order by 1, 2, 3, 4; +ERROR: Cannot find partition by the value +DETAIL: N/A. +-- success +select * from test_partition_for_null_hash partition for (0) order by 1, 2, 3, 4; a | b | c | d ---+---+---+--- 0 | 0 | 0 | 0 0 | | | (2 rows) - - --- failed: The partition number is invalid or out-of-range -alter table test_partition_for_null_hash rename partition for (null) to test_partition_for_null_hash_part1; + + +-- failed: The partition number is invalid or out-of-range +alter table test_partition_for_null_hash rename partition for (null) to test_partition_for_null_hash_part1; ERROR: The partition number is invalid or out-of-range --- success -alter table test_partition_for_null_hash rename partition for (0) to test_partition_for_null_hash_part1; --- success -select * from test_partition_for_null_hash partition (test_partition_for_null_hash_part1) order by 1, 2, 3, 4; +-- success +alter table test_partition_for_null_hash rename partition for (0) to test_partition_for_null_hash_part1; +-- success +select * from test_partition_for_null_hash partition (test_partition_for_null_hash_part1) order by 1, 2, 3, 4; a | b | c | d ---+---+---+--- 0 | 0 | 0 | 0 0 | | | (2 rows) - -alter table test_partition_for_null_hash drop partition for (NULL); + +alter table test_partition_for_null_hash drop partition for (NULL); ERROR: Droping hash partition is unsupported. -alter table test_partition_for_null_hash drop partition for (0); +alter table test_partition_for_null_hash drop partition for (0); ERROR: Droping hash partition is unsupported. - -CREATE TABLE select_hash_partition_table_000_3( - C_CHAR_1 CHAR(1), - C_CHAR_2 CHAR(10), - C_CHAR_3 CHAR(102400), - C_VARCHAR_1 VARCHAR(1), - C_VARCHAR_2 VARCHAR(10), - C_VARCHAR_3 VARCHAR(1024), - C_INT INTEGER, - C_BIGINT BIGINT, - C_SMALLINT SMALLINT, - C_FLOAT FLOAT, - C_NUMERIC numeric(10,5), - C_DP double precision, - C_DATE DATE, - C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, - C_TS_WITH TIMESTAMP WITH TIME ZONE ) - partition by hash (C_INT) -( - partition select_hash_partition_000_3_1, - partition select_hash_partition_000_3_2 -); - -create index select_list_partition_table_index_000_3 ON select_hash_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_list_partition_000_3_1, partition select_list_partition_000_3_3); -create view select_list_partition_table_view_000_3 as select * from select_hash_partition_table_000_3; - -INSERT INTO select_hash_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); -INSERT INTO select_hash_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); -INSERT INTO select_hash_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); -INSERT INTO select_hash_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); -INSERT INTO select_hash_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); -INSERT INTO select_hash_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); -INSERT INTO select_hash_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); -INSERT INTO select_hash_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_hash_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); - -select * from select_hash_partition_table_000_3 partition for (NULL) order by C_INT; + +CREATE TABLE select_hash_partition_table_000_3( + C_CHAR_1 CHAR(1), + C_CHAR_2 CHAR(10), + C_CHAR_3 CHAR(102400), + C_VARCHAR_1 VARCHAR(1), + C_VARCHAR_2 VARCHAR(10), + C_VARCHAR_3 VARCHAR(1024), + C_INT INTEGER, + C_BIGINT BIGINT, + C_SMALLINT SMALLINT, + C_FLOAT FLOAT, + C_NUMERIC numeric(10,5), + C_DP double precision, + C_DATE DATE, + C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, + C_TS_WITH TIMESTAMP WITH TIME ZONE ) + partition by hash (C_INT) +( + partition select_hash_partition_000_3_1, + partition select_hash_partition_000_3_2 +); + +create index select_list_partition_table_index_000_3 ON select_hash_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_list_partition_000_3_1, partition select_list_partition_000_3_3); +create view select_list_partition_table_view_000_3 as select * from select_hash_partition_table_000_3; + +INSERT INTO select_hash_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); +INSERT INTO select_hash_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); +INSERT INTO select_hash_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); +INSERT INTO select_hash_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); +INSERT INTO select_hash_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); +INSERT INTO select_hash_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); +INSERT INTO select_hash_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); +INSERT INTO select_hash_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_hash_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); + +select * from select_hash_partition_table_000_3 partition for (NULL) order by C_INT; +ERROR: Cannot find partition by the value +DETAIL: N/A. + +alter table select_hash_partition_table_000_3 rename partition for (NULL) to select_hash_partition_table_000_3_p1; ERROR: The partition number is invalid or out-of-range - -alter table select_hash_partition_table_000_3 rename partition for (NULL) to select_hash_partition_table_000_3_p1; -ERROR: The partition number is invalid or out-of-range - -alter table select_hash_partition_table_000_3 drop partition for (NULL); -ERROR: Droping hash partition is unsupported. - - -CREATE TABLE partition_wise_join_table_001_1 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision,RANK SMALLINT) -partition by hash(ID) -( - partition partition_wise_join_table_001_1_1, - partition partition_wise_join_table_001_1_2 -) ; - -INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 1-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,10000,13 ); -INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(41,49),'PARTITION WIASE JOIN 1-3-' || generate_series(40,60),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,15000,15 ); - -create index idx_partition_wise_join_table_001_1_1 on partition_wise_join_table_001_1(ID) LOCAL; -create index idx_partition_wise_join_table_001_1_2 on partition_wise_join_table_001_1(ID,NAME) LOCAL; -create index idx_partition_wise_join_table_001_1_3 on partition_wise_join_table_001_1(RANK) LOCAL; -create index idx_partition_wise_join_table_001_1_4 on partition_wise_join_table_001_1(RANK,SALARY,NAME) LOCAL; - -CREATE TABLE partition_wise_join_table_001_2 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision ) -partition by hash(ID) -( - partition partition_wise_join_table_001_1_1, - partition partition_wise_join_table_001_1_2 -); - -INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 2-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No 66# Science 4 Street of Xi'an of China $$,10000); -INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(71,79),'PARTITION WIASE JOIN 2-3-' || generate_series(70,80),90 + random() * 10,'1990-8-8',$$No 77# Science 4 Street of Xi'an of China $$,15000); - -CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_1 ON PARTITION_WISE_JOIN_TABLE_001_2(ID) LOCAL; -CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_2 ON PARTITION_WISE_JOIN_TABLE_001_2(ID,NAME) LOCAL; -CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_3 ON PARTITION_WISE_JOIN_TABLE_001_2(SALARY,NAME) LOCAL; - -SELECT A.ID,B.ID,A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; + +alter table select_hash_partition_table_000_3 drop partition for (NULL); +ERROR: Cannot perform this operation because There are views or rules that depend on table select_hash_partition_table_000_3. +DETAIL: N/A +HINT: drop the views or rules first. Use pg_rules to find rules. Use pg_class, pg_rewrite, pg_depend, pg_namespacesql to find views + + +CREATE TABLE partition_wise_join_table_001_1 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision,RANK SMALLINT) +partition by hash(ID) +( + partition partition_wise_join_table_001_1_1, + partition partition_wise_join_table_001_1_2 +) ; + +INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 1-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,10000,13 ); +INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(41,49),'PARTITION WIASE JOIN 1-3-' || generate_series(40,60),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,15000,15 ); + +create index idx_partition_wise_join_table_001_1_1 on partition_wise_join_table_001_1(ID) LOCAL; +create index idx_partition_wise_join_table_001_1_2 on partition_wise_join_table_001_1(ID,NAME) LOCAL; +create index idx_partition_wise_join_table_001_1_3 on partition_wise_join_table_001_1(RANK) LOCAL; +create index idx_partition_wise_join_table_001_1_4 on partition_wise_join_table_001_1(RANK,SALARY,NAME) LOCAL; + +CREATE TABLE partition_wise_join_table_001_2 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision ) +partition by hash(ID) +( + partition partition_wise_join_table_001_1_1, + partition partition_wise_join_table_001_1_2 +); + +INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 2-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No 66# Science 4 Street of Xi'an of China $$,10000); +INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(71,79),'PARTITION WIASE JOIN 2-3-' || generate_series(70,80),90 + random() * 10,'1990-8-8',$$No 77# Science 4 Street of Xi'an of China $$,15000); + +CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_1 ON PARTITION_WISE_JOIN_TABLE_001_2(ID) LOCAL; +CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_2 ON PARTITION_WISE_JOIN_TABLE_001_2(ID,NAME) LOCAL; +CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_3 ON PARTITION_WISE_JOIN_TABLE_001_2(SALARY,NAME) LOCAL; + +SELECT A.ID,B.ID,A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; id | id | rank | salary | salary | address | birthday ----+----+------+--------+--------+----------------------------------------------+-------------------------- 1 | 1 | 13 | 10000 | 10000 | No.88# Science 6 Street of Xi'an of China | Wed Aug 08 00:00:00 1990 @@ -1053,11 +1057,11 @@ SELECT A.ID,B.ID,A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WI 9 | 9 | 13 | 10000 | 10000 | No.88# Science 6 Street of Xi'an of China | Wed Aug 08 00:00:00 1990 (900 rows) - -ANALYZE PARTITION_WISE_JOIN_TABLE_001_1; -ANALYZE PARTITION_WISE_JOIN_TABLE_001_2; - -SELECT A.ID,B.ID,A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; + +ANALYZE PARTITION_WISE_JOIN_TABLE_001_1; +ANALYZE PARTITION_WISE_JOIN_TABLE_001_2; + +SELECT A.ID,B.ID,A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; id | id | rank | salary | salary | address | birthday ----+----+------+--------+--------+----------------------------------------------+-------------------------- 1 | 1 | 13 | 10000 | 10000 | No.88# Science 6 Street of Xi'an of China | Wed Aug 08 00:00:00 1990 @@ -1962,15 +1966,15 @@ SELECT A.ID,B.ID,A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WI 9 | 9 | 13 | 10000 | 10000 | No.88# Science 6 Street of Xi'an of China | Wed Aug 08 00:00:00 1990 (900 rows) - -CREATE TABLE HW_PARTITION_SELECT_RT (A INT, B INT) -PARTITION BY hash(A) -( - PARTITION HW_PARTITION_SELECT_RT_P1, - PARTITION HW_PARTITION_SELECT_RT_P2, - PARTITION HW_PARTITION_SELECT_RT_P3 -); -EXPLAIN (COSTS OFF) SELECT B FROM (SELECT B FROM HW_PARTITION_SELECT_RT LIMIT 100) ORDER BY B; + +CREATE TABLE HW_PARTITION_SELECT_RT (A INT, B INT) +PARTITION BY hash(A) +( + PARTITION HW_PARTITION_SELECT_RT_P1, + PARTITION HW_PARTITION_SELECT_RT_P2, + PARTITION HW_PARTITION_SELECT_RT_P3 +); +EXPLAIN (COSTS OFF) SELECT B FROM (SELECT B FROM HW_PARTITION_SELECT_RT LIMIT 100) ORDER BY B; QUERY PLAN ------------------------------------------------------------------ Sort @@ -1982,55 +1986,55 @@ EXPLAIN (COSTS OFF) SELECT B FROM (SELECT B FROM HW_PARTITION_SELECT_RT LIMIT 10 Selected Partitions: 1..3 (7 rows) - -CREATE TABLE DTS2013112504143_TEST1(A INT) PARTITION BY HASH (A)(PARTITION DTS2013112504143_TEST1_P1); -CREATE TABLE DTS2013112504143_TEST2(A INT); -SELECT * FROM DTS2013112504143_TEST1 UNION ALL SELECT * FROM DTS2013112504143_TEST2 order by 1; + +CREATE TABLE TESTTABLE_TEST1(A INT) PARTITION BY HASH (A)(PARTITION TESTTABLE_TEST1_P1); +CREATE TABLE TESTTABLE_TEST2(A INT); +SELECT * FROM TESTTABLE_TEST1 UNION ALL SELECT * FROM TESTTABLE_TEST2 order by 1; a --- (0 rows) - -CREATE TABLE select_partition_table_000_3( - C_CHAR_1 CHAR(1), - C_CHAR_2 CHAR(10), - C_CHAR_3 CHAR(102400), - C_VARCHAR_1 VARCHAR(1), - C_VARCHAR_2 VARCHAR(10), - C_VARCHAR_3 VARCHAR(1024), - C_INT INTEGER, - C_BIGINT BIGINT, - C_SMALLINT SMALLINT, - C_FLOAT FLOAT, - C_NUMERIC numeric(10,5), - C_DP double precision, - C_DATE DATE, - C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, - C_TS_WITH TIMESTAMP WITH TIME ZONE ) - partition by hash (C_INT) -( - partition select_partition_000_3_1, - partition select_partition_000_3_2 -); - -create index select_partition_table_index_000_3 ON select_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_partition_000_3_1, partition select_partition_000_3_3); -create view select_partition_table_view_000_3 as select * from select_partition_table_000_3; - -INSERT INTO select_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); -INSERT INTO select_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); -INSERT INTO select_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); -INSERT INTO select_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); -INSERT INTO select_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); -INSERT INTO select_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); -INSERT INTO select_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); -INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); - -explain (costs off, verbose on) select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; + +CREATE TABLE select_partition_table_000_3( + C_CHAR_1 CHAR(1), + C_CHAR_2 CHAR(10), + C_CHAR_3 CHAR(102400), + C_VARCHAR_1 VARCHAR(1), + C_VARCHAR_2 VARCHAR(10), + C_VARCHAR_3 VARCHAR(1024), + C_INT INTEGER, + C_BIGINT BIGINT, + C_SMALLINT SMALLINT, + C_FLOAT FLOAT, + C_NUMERIC numeric(10,5), + C_DP double precision, + C_DATE DATE, + C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, + C_TS_WITH TIMESTAMP WITH TIME ZONE ) + partition by hash (C_INT) +( + partition select_partition_000_3_1, + partition select_partition_000_3_2 +); + +create index select_partition_table_index_000_3 ON select_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_partition_000_3_1, partition select_partition_000_3_3); +create view select_partition_table_view_000_3 as select * from select_partition_table_000_3; + +INSERT INTO select_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); +INSERT INTO select_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); +INSERT INTO select_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); +INSERT INTO select_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); +INSERT INTO select_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); +INSERT INTO select_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); +INSERT INTO select_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); +INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); + +explain (costs off, verbose on) select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Sort @@ -2050,8 +2054,8 @@ explain (costs off, verbose on) select lower(C_CHAR_3), initcap(C_VARCHAR_3), sq Selected Partitions: 1..2 (15 rows) - -select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; + +select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; lower | initcap | sqrt | ?column? | rank ---------+---------+------------------+----------+------ abcdefg | Abcdefg | 10.5356537528527 | 4.11 | 1 @@ -2067,18 +2071,18 @@ select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, ijklmno | Ijklmno | 40 | 12.99 | 1 (11 rows) - -create table hw_partition_select_rt5 (a int, b int, c int) -partition by hash(c) -( -partition hw_partition_select_rt5_p1 -); - -alter table hw_partition_select_rt5 drop column b; - -update hw_partition_select_rt5 set c=0 where c=-1; - -drop schema FVT_COMPRESS_QWER cascade; + +create table hw_partition_select_rt5 (a int, b int, c int) +partition by hash(c) +( +partition hw_partition_select_rt5_p1 +); + +alter table hw_partition_select_rt5 drop column b; + +update hw_partition_select_rt5 set c=0 where c=-1; + +drop schema FVT_COMPRESS_QWER cascade; NOTICE: drop cascades to 11 other objects DETAIL: drop cascades to table test_partition_for_null_hash drop cascades to table select_hash_partition_table_000_3 @@ -2086,42 +2090,42 @@ drop cascades to view select_list_partition_table_view_000_3 drop cascades to table partition_wise_join_table_001_1 drop cascades to table partition_wise_join_table_001_2 drop cascades to table hw_partition_select_rt -drop cascades to table dts2013112504143_test1 -drop cascades to table dts2013112504143_test2 +drop cascades to table testtable_test1 +drop cascades to table testtable_test2 drop cascades to table select_partition_table_000_3 drop cascades to view select_partition_table_view_000_3 drop cascades to table hw_partition_select_rt5 - ---begin: these test are related to explain output change about partition table. --- major change is as below - --1. - --Selected Partitions: 1 2 6 7 8 9 - -- \|/ - --Selected Partitions: 1..2,6..9 - --2. - --Selected Partitions: 1 3 5 7 9 - -- \|/ - --Selected Partitions: 1,3,5,7,9 -CREATE schema FVT_COMPRESS; -set search_path to FVT_COMPRESS; - - -create table test_explain_format_on_part_table (id int) -partition by hash(id) -( -partition p1, -partition p2, -partition p3, -partition p4, -partition p5, -partition p6, -partition p7, -partition p8, -partition p9 -); --- two continous segments, text formast -explain (verbose on, costs off) - select * from test_explain_format_on_part_table where id <15 or id >51; + +--begin: these test are related to explain output change about partition table. +-- major change is as below + --1. + --Selected Partitions: 1 2 6 7 8 9 + -- \|/ + --Selected Partitions: 1..2,6..9 + --2. + --Selected Partitions: 1 3 5 7 9 + -- \|/ + --Selected Partitions: 1,3,5,7,9 +CREATE schema FVT_COMPRESS; +set search_path to FVT_COMPRESS; + + +create table test_explain_format_on_part_table (id int) +partition by hash(id) +( +partition p1, +partition p2, +partition p3, +partition p4, +partition p5, +partition p6, +partition p7, +partition p8, +partition p9 +); +-- two continous segments, text formast +explain (verbose on, costs off) + select * from test_explain_format_on_part_table where id <15 or id >51; QUERY PLAN -------------------------------------------------------------------------------------------------------------- Partition Iterator @@ -2133,9 +2137,9 @@ explain (verbose on, costs off) Selected Partitions: 1..9 (7 rows) --- no continous segment, text formast -explain (verbose on, costs off) - select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; +-- no continous segment, text formast +explain (verbose on, costs off) + select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Partition Iterator @@ -2147,9 +2151,9 @@ explain (verbose on, costs off) Selected Partitions: 3,5..7 (7 rows) --- two continous segments, non-text formast -explain (verbose on, costs off, FORMAT JSON) - select * from test_explain_format_on_part_table where id <15 or id >51; +-- two continous segments, non-text formast +explain (verbose on, costs off, FORMAT JSON) + select * from test_explain_format_on_part_table where id <15 or id >51; QUERY PLAN --------------------------------------------------------------------------------------------------------------------- [ + @@ -2175,9 +2179,9 @@ explain (verbose on, costs off, FORMAT JSON) ] (1 row) --- no continous segment, non-text formast -explain (verbose on, costs off, FORMAT JSON) - select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; +-- no continous segment, non-text formast +explain (verbose on, costs off, FORMAT JSON) + select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- [ + @@ -2203,24 +2207,24 @@ explain (verbose on, costs off, FORMAT JSON) ] (1 row) - -drop table test_explain_format_on_part_table; ---end: these test are related to explain output change about partition table. - -create table hw_partition_select_parttable ( - c1 int, - c2 int, - c3 text) -partition by hash(c1) -(partition hw_partition_select_parttable_p1, - partition hw_partition_select_parttable_p2, - partition hw_partition_select_parttable_p3); - - insert into hw_partition_select_parttable values (10,40,'abc'); - insert into hw_partition_select_parttable(c1,c2) values (100,20); - insert into hw_partition_select_parttable values(300,200); - -select * from hw_partition_select_parttable order by 1, 2, 3; + +drop table test_explain_format_on_part_table; +--end: these test are related to explain output change about partition table. + +create table hw_partition_select_parttable ( + c1 int, + c2 int, + c3 text) +partition by hash(c1) +(partition hw_partition_select_parttable_p1, + partition hw_partition_select_parttable_p2, + partition hw_partition_select_parttable_p3); + + insert into hw_partition_select_parttable values (10,40,'abc'); + insert into hw_partition_select_parttable(c1,c2) values (100,20); + insert into hw_partition_select_parttable values(300,200); + +select * from hw_partition_select_parttable order by 1, 2, 3; c1 | c2 | c3 -----+-----+----- 10 | 40 | abc @@ -2228,8 +2232,8 @@ select * from hw_partition_select_parttable order by 1, 2, 3; 300 | 200 | (3 rows) - -select c1 from hw_partition_select_parttable order by 1; + +select c1 from hw_partition_select_parttable order by 1; c1 ----- 10 @@ -2237,8 +2241,8 @@ select c1 from hw_partition_select_parttable order by 1; 300 (3 rows) - -select c1,c2 from hw_partition_select_parttable order by 1, 2; + +select c1,c2 from hw_partition_select_parttable order by 1, 2; c1 | c2 -----+----- 10 | 40 @@ -2246,8 +2250,8 @@ select c1,c2 from hw_partition_select_parttable order by 1, 2; 300 | 200 (3 rows) - -select c2 from hw_partition_select_parttable order by 1; + +select c2 from hw_partition_select_parttable order by 1; c2 ----- 20 @@ -2255,8 +2259,8 @@ select c2 from hw_partition_select_parttable order by 1; 200 (3 rows) - -select c1,c2,c3 from hw_partition_select_parttable order by 1, 2, 3; + +select c1,c2,c3 from hw_partition_select_parttable order by 1, 2, 3; c1 | c2 | c3 -----+-----+----- 10 | 40 | abc @@ -2264,112 +2268,112 @@ select c1,c2,c3 from hw_partition_select_parttable order by 1, 2, 3; 300 | 200 | (3 rows) - -select c1 from hw_partition_select_parttable where c1>50 and c1<300 order by 1; + +select c1 from hw_partition_select_parttable where c1>50 and c1<300 order by 1; c1 ----- 100 (1 row) - -select * from hw_partition_select_parttable where c2>100 order by 1, 2, 3; + +select * from hw_partition_select_parttable where c2>100 order by 1, 2, 3; c1 | c2 | c3 -----+-----+---- 300 | 200 | (1 row) - -create table t_select_datatype_int32(c1 int,c2 int,c3 int,c4 text) -partition by hash(c1) -(partition t_select_datatype_int32_p1, - partition t_select_datatype_int32_p2, - partition t_select_datatype_int32_p3, - partition t_select_datatype_int32_p4); - -insert into t_select_datatype_int32 values(-100,20,20,'a'), (100,300,300,'bb'), (150,75,500,NULL), (200,500,50,'ccc'), (250,50,50,NULL), (300,700,125,''), (450,35,150,'dddd'); - ---partition select for int32 ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=50 order by 1, 2, 3, 4; + +create table t_select_datatype_int32(c1 int,c2 int,c3 int,c4 text) +partition by hash(c1) +(partition t_select_datatype_int32_p1, + partition t_select_datatype_int32_p2, + partition t_select_datatype_int32_p3, + partition t_select_datatype_int32_p4); + +insert into t_select_datatype_int32 values(-100,20,20,'a'), (100,300,300,'bb'), (150,75,500,NULL), (200,500,50,'ccc'), (250,50,50,NULL), (300,700,125,''), (450,35,150,'dddd'); + +--partition select for int32 +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=50 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+---- 100 | 300 | 300 | bb (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=250 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+----+----+---- 250 | 50 | 50 | (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=500 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=550 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=550 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+----+----+---- -100 | 20 | 20 | a (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=50 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+----+----+---- -100 | 20 | 20 | a (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+----+----+---- -100 | 20 | 20 | a (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+---- -100 | 20 | 20 | a 100 | 300 | 300 | bb (2 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<150 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+---- -100 | 20 | 20 | a 100 | 300 | 300 | bb (2 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+---- -100 | 20 | 20 | a @@ -2377,9 +2381,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<200 order 150 | 75 | 500 | (3 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=200 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+----- -100 | 20 | 20 | a @@ -2388,9 +2392,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=200 orde 200 | 500 | 50 | ccc (4 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2402,9 +2406,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<500 order 450 | 35 | 150 | dddd (7 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=500 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2416,9 +2420,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=500 orde 450 | 35 | 150 | dddd (7 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2430,9 +2434,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<700 order 450 | 35 | 150 | dddd (7 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=700 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=700 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2444,9 +2448,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=700 orde 450 | 35 | 150 | dddd (7 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 100 | 300 | 300 | bb @@ -2457,9 +2461,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 order 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=50 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 100 | 300 | 300 | bb @@ -2470,9 +2474,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=50 order 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 150 | 75 | 500 | @@ -2482,9 +2486,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 order 450 | 35 | 150 | dddd (5 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 100 | 300 | 300 | bb @@ -2495,9 +2499,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 orde 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>150 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 200 | 500 | 50 | ccc @@ -2506,9 +2510,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>150 order 450 | 35 | 150 | dddd (4 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 150 | 75 | 500 | @@ -2518,9 +2522,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=150 orde 450 | 35 | 150 | dddd (5 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>200 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 250 | 50 | 50 | @@ -2528,9 +2532,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>200 order 450 | 35 | 150 | dddd (3 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=200 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 200 | 500 | 50 | ccc @@ -2539,45 +2543,45 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=200 orde 450 | 35 | 150 | dddd (4 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=500 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+----+----+---- -100 | 20 | 20 | a (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 150 | 75 | 500 | @@ -2587,9 +2591,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_ 450 | 35 | 150 | dddd (5 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 150 | 75 | 500 | @@ -2599,17 +2603,17 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t 450 | 35 | 150 | dddd (5 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+---- 100 | 300 | 300 | bb (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+----- 100 | 300 | 300 | bb @@ -2618,9 +2622,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND 250 | 50 | 50 | (4 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<550 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<550 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 100 | 300 | 300 | bb @@ -2631,9 +2635,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1<=500 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1<=500 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 150 | 75 | 500 | @@ -2643,24 +2647,24 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t 450 | 35 | 150 | dddd (5 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<250 AND t_select_datatype_int32.c1<=250 AND t_select_datatype_int32.c1=200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<250 AND t_select_datatype_int32.c1<=250 AND t_select_datatype_int32.c1=200 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+----+----- 200 | 500 | 50 | ccc (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+----- -100 | 20 | 20 | a @@ -2669,9 +2673,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_s 200 | 500 | 50 | ccc (4 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2683,9 +2687,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_s 450 | 35 | 150 | dddd (7 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2696,9 +2700,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_s 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 OR t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 OR t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 100 | 300 | 300 | bb @@ -2709,9 +2713,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 OR t_s 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 100 | 300 | 300 | bb @@ -2722,9 +2726,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_ 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 100 | 300 | 300 | bb @@ -2735,9 +2739,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2749,9 +2753,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t 450 | 35 | 150 | dddd (7 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+----- -100 | 20 | 20 | a @@ -2760,9 +2764,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 OR t_ 200 | 500 | 50 | ccc (4 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1<=300 OR t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1<=300 OR t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2774,9 +2778,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_ 450 | 35 | 150 | dddd (7 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 OR t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 OR t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2784,45 +2788,45 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 OR t_ 450 | 35 | 150 | dddd (3 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<170 AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<170 AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+---- -100 | 20 | 20 | a 100 | 300 | 300 | bb (2 rows) - ---success -select * from t_select_datatype_int32 where (t_select_datatype_int32.c1<170 OR t_select_datatype_int32.c1<250) AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where (t_select_datatype_int32.c1<170 OR t_select_datatype_int32.c1<250) AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+---- -100 | 20 | 20 | a 100 | 300 | 300 | bb (2 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<400 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<400 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+---- -100 | 20 | 20 | a 300 | 700 | 125 | (2 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+----+-----+------ -100 | 20 | 20 | a 450 | 35 | 150 | dddd (2 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<=100 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<=100 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2830,15 +2834,15 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND 450 | 35 | 150 | dddd (3 rows) - ---IS NULL ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) OR - t_select_datatype_int32.c4 IS NULL - ORDER BY 1, 2, 3, 4; + +--IS NULL +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) OR + t_select_datatype_int32.c4 IS NULL + ORDER BY 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+---- 150 | 75 | 500 | @@ -2846,38 +2850,38 @@ select * from t_select_datatype_int32 where 300 | 700 | 125 | (3 rows) - ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) AND - t_select_datatype_int32.c4 IS NULL - ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) AND + t_select_datatype_int32.c4 IS NULL + ORDER BY 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where - t_select_datatype_int32.c4 IS NULL AND - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) - ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + t_select_datatype_int32.c4 IS NULL AND + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) + ORDER BY 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where - t_select_datatype_int32.c4 IS NULL OR - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) - ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + t_select_datatype_int32.c4 IS NULL OR + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) + ORDER BY 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+---- 150 | 75 | 500 | @@ -2885,169 +2889,171 @@ select * from t_select_datatype_int32 where 300 | 700 | 125 | (3 rows) - ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c4 IS NULL) AND - (t_select_datatype_int32.c2100) - ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c4 IS NULL) AND + (t_select_datatype_int32.c2100) + ORDER BY 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) - ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) + ORDER BY 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- --- check select contarins partition - --- ----- check select from range partition --- - -create table hw_partition_select_ordinary_table (a int, b int); - -create table test_select_hash_partition (a int, b int) -partition by hash(a) -( - partition test_select_hash_partition_p1, - partition test_select_hash_partition_p2, - partition test_select_hash_partition_p3 -); - -insert into test_select_hash_partition values(2); - ---success -select * from test_select_hash_partition partition (test_select_hash_partition_p1) order by 1, 2; + +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +-- check select contarins partition + +-- +---- check select from range partition +-- + +create table hw_partition_select_ordinary_table (a int, b int); + +create table test_select_hash_partition (a int, b int) +partition by hash(a) +( + partition test_select_hash_partition_p1, + partition test_select_hash_partition_p2, + partition test_select_hash_partition_p3 +); + +insert into test_select_hash_partition values(2); + +--success +select * from test_select_hash_partition partition (test_select_hash_partition_p1) order by 1, 2; a | b ---+--- (0 rows) - ---success -select * from test_select_hash_partition partition (test_select_hash_partition_p2) order by 1, 2; + +--success +select * from test_select_hash_partition partition (test_select_hash_partition_p2) order by 1, 2; a | b ---+--- 2 | (1 row) - ---success -select * from test_select_hash_partition partition (test_select_hash_partition_p3) order by 1, 2; + +--success +select * from test_select_hash_partition partition (test_select_hash_partition_p3) order by 1, 2; a | b ---+--- (0 rows) - ---success -select * from test_select_hash_partition partition (test_select_hash_partition_p4) order by 1, 2; + +--success +select * from test_select_hash_partition partition (test_select_hash_partition_p4) order by 1, 2; ERROR: partition "test_select_hash_partition_p4" of relation "test_select_hash_partition" does not exist - ---success -select a from test_select_hash_partition partition (test_select_hash_partition_p2) order by 1; + +--success +select a from test_select_hash_partition partition (test_select_hash_partition_p2) order by 1; a --- 2 (1 row) - ---success -select a from test_select_hash_partition partition for (0) order by 1; + +--success +select a from test_select_hash_partition partition for (0) order by 1; a --- 2 (1 row) - ---success -select a from test_select_hash_partition partition for (1) order by 1; + +--success +select a from test_select_hash_partition partition for (1) order by 1; a --- (0 rows) - ---success -select a from test_select_hash_partition partition for (2) order by 1; + +--success +select a from test_select_hash_partition partition for (2) order by 1; a --- 2 (1 row) - ---success -select a from test_select_hash_partition partition for (5) order by 1; + +--success +select a from test_select_hash_partition partition for (5) order by 1; a --- (0 rows) - ---success -select a from test_select_hash_partition partition for (8) order by 1; + +--success +select a from test_select_hash_partition partition for (8) order by 1; a --- (0 rows) - --- fail: table is not partitioned table -select a from hw_partition_select_ordinary_table partition (test_select_hash_partition_p2); + +-- fail: table is not partitioned table +select a from hw_partition_select_ordinary_table partition (test_select_hash_partition_p2); ERROR: relation "hw_partition_select_ordinary_table" is not partitioned table - --- fail: table is not partitioned table -select a from hw_partition_select_ordinary_table partition for (2); +DETAIL: N/A. + +-- fail: table is not partitioned table +select a from hw_partition_select_ordinary_table partition for (2); ERROR: relation "hw_partition_select_ordinary_table" is not partitioned table - --- --- -CREATE TABLE hw_partition_select_test(C_INT INTEGER) - partition by hash (C_INT) -( - partition hw_partition_select_test_part_1, - partition hw_partition_select_test_part_2, - partition hw_partition_select_test_part_3 -); -insert into hw_partition_select_test values(111); -insert into hw_partition_select_test values(555); -insert into hw_partition_select_test values(888); - -select a.* from hw_partition_select_test partition(hw_partition_select_test_part_1) a; +DETAIL: N/A. + +-- +-- +CREATE TABLE hw_partition_select_test(C_INT INTEGER) + partition by hash (C_INT) +( + partition hw_partition_select_test_part_1, + partition hw_partition_select_test_part_2, + partition hw_partition_select_test_part_3 +); +insert into hw_partition_select_test values(111); +insert into hw_partition_select_test values(555); +insert into hw_partition_select_test values(888); + +select a.* from hw_partition_select_test partition(hw_partition_select_test_part_1) a; c_int ------- 888 (1 row) - -create table hash_partitioned_table (a int) -partition by hash(a) -( - partition hash_partitioned_table_p1, - partition hash_partitioned_table_p2, - partition hash_partitioned_table_p3 -); - -insert into hash_partitioned_table values (1); -insert into hash_partitioned_table values (2); -insert into hash_partitioned_table values (5); -insert into hash_partitioned_table values (6); - -with tmp1 as (select a from hash_partitioned_table partition for (2)) select a from tmp1 order by 1; + +create table hash_partitioned_table (a int) +partition by hash(a) +( + partition hash_partitioned_table_p1, + partition hash_partitioned_table_p2, + partition hash_partitioned_table_p3 +); + +insert into hash_partitioned_table values (1); +insert into hash_partitioned_table values (2); +insert into hash_partitioned_table values (5); +insert into hash_partitioned_table values (6); + +with tmp1 as (select a from hash_partitioned_table partition for (2)) select a from tmp1 order by 1; a --- 2 6 (2 rows) - -drop schema FVT_COMPRESS cascade; + +drop schema FVT_COMPRESS cascade; NOTICE: drop cascades to 6 other objects DETAIL: drop cascades to table hw_partition_select_parttable drop cascades to table t_select_datatype_int32 @@ -3055,8 +3061,8 @@ drop cascades to table hw_partition_select_ordinary_table drop cascades to table test_select_hash_partition drop cascades to table hw_partition_select_test drop cascades to table hash_partitioned_table - - - - - + + + + + diff --git a/src/test/regress/expected/hw_partition_interval_index.out b/src/test/regress/expected/hw_partition_interval_index.out index d8dd02d24..561ac4310 100644 --- a/src/test/regress/expected/hw_partition_interval_index.out +++ b/src/test/regress/expected/hw_partition_interval_index.out @@ -143,14 +143,14 @@ create unique index ip_index_local2 on hw_partition_index_ip (c1) local partition sip2_index_local tablespace PG_DEFAULT, partition sip3_index_local tablespace PG_DEFAULT ); -ERROR: unique index columns must contain the partition key and collation must be default collation +ERROR: unique local index columns must contain all the partition keys and collation must be default collation create unique index ip_index_local3 on hw_partition_index_ip (c2, c1) local ( partition sip1_index_local tablespace PG_DEFAULT, partition sip2_index_local tablespace PG_DEFAULT, partition sip3_index_local tablespace PG_DEFAULT ); -ERROR: unique index columns must contain the partition key and collation must be default collation +ERROR: unique local index columns must contain all the partition keys and collation must be default collation --insert into table insert into hw_partition_index_ip values(7,2,'2020-03-01'); insert into hw_partition_index_ip values(3,1,'2020-04-01'); @@ -260,8 +260,11 @@ INTERVAL ('1 month') ); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "interval_partition_table_003_pkey" for table "interval_partition_table_003" create index interval_partition_table_003_1 ON interval_partition_table_003 USING HASH (logdate) LOCAL; +ERROR: access method "hash" does not support row store create index interval_partition_table_003_2 ON interval_partition_table_003 USING HASH (c2) LOCAL; +ERROR: access method "hash" does not support row store create index interval_partition_table_003_3 ON interval_partition_table_003 USING HASH (c1) LOCAL; +ERROR: access method "hash" does not support row store select relname from pg_partition where INDEXTBLID=(select RELFILENODE from pg_partition where relname='interval_partition_table_003_1') order by 1; relname --------- @@ -391,7 +394,7 @@ INTERVAL('1 MONTH') ( PARTITION p0 VALUES LESS THAN (TO_DATE('1-1-2008', 'DD-MM-YYYY')), PARTITION p1 VALUES LESS THAN (TO_DATE('6-5-2008', 'DD-MM-YYYY')) ); -select relname, case when reltoastrelid > 0 then 'TRUE' else 'FALSE' end as has_toastrelid, boundaries from pg_partition; +select relname, case when reltoastrelid > 0 then 'TRUE' else 'FALSE' end as has_toastrelid, boundaries from pg_partition order by relname; relname | has_toastrelid | boundaries ----------------+----------------+------------------------------ interval_sales | FALSE | @@ -400,7 +403,7 @@ select relname, case when reltoastrelid > 0 then 'TRUE' else 'FALSE' end as has_ (3 rows) insert into interval_sales values (generate_series(1,10), generate_series(1,10), generate_series(TO_DATE('2020-01-01', 'YYYY-MM-DD'),TO_DATE('2020-07-01', 'YYYY-MM-DD'),'1 day'), 1, 1, 1, 1); -select relname, case when reltoastrelid > 0 then 'TRUE' else 'FALSE' end as has_toastrelid, boundaries from pg_partition; +select relname, case when reltoastrelid > 0 then 'TRUE' else 'FALSE' end as has_toastrelid, boundaries from pg_partition order by relname; relname | has_toastrelid | boundaries ----------------+----------------+------------------------------ interval_sales | FALSE | diff --git a/src/test/regress/expected/hw_partition_interval_select.out b/src/test/regress/expected/hw_partition_interval_select.out index 1bce56ca3..aa53e2258 100644 --- a/src/test/regress/expected/hw_partition_interval_select.out +++ b/src/test/regress/expected/hw_partition_interval_select.out @@ -11,7 +11,7 @@ INTERVAL ('1 day') ); insert into interval_tab1 values(1,'2020-4-7 2:0:0', 1, 1); insert into interval_tab1 values(1,'2020-4-8 2:0:0', 1, 1); -select relname, boundaries from pg_partition; +select relname, boundaries from pg_partition order by 1,2; relname | boundaries ---------------+------------------------------ interval_tab1 | @@ -20,7 +20,7 @@ select relname, boundaries from pg_partition; sys_p2 | {"Thu Apr 09 00:00:00 2020"} (4 rows) -select * from interval_tab1 where logdate < '2020-4-7 0:0:0'; +select * from interval_tab1 where logdate < '2020-4-7 0:0:0' order by 1,2,3,4; city_id | logdate | peaktemp | unitsales ---------+---------+----------+----------- (0 rows) @@ -37,7 +37,7 @@ explain (costs off, verbose on) select * from interval_tab1 where logdate < '202 Selected Partitions: 1 (7 rows) -select * from interval_tab1 where logdate > '2020-4-6'; +select * from interval_tab1 where logdate > '2020-4-6' order by 1,2,3,4; city_id | logdate | peaktemp | unitsales ---------+--------------------------+----------+----------- 1 | Tue Apr 07 02:00:00 2020 | 1 | 1 @@ -56,36 +56,36 @@ explain (costs off, verbose on) select * from interval_tab1 where logdate > '202 Selected Partitions: 2..3 (7 rows) -select * from interval_tab1 where logdate = '2020-4-7 2:0:0'; +select * from interval_tab1 where logdate = '2020-4-7 2:0:0' order by 1,2,3,4; city_id | logdate | peaktemp | unitsales ---------+--------------------------+----------+----------- 1 | Tue Apr 07 02:00:00 2020 | 1 | 1 (1 row) insert into interval_tab1 values(1,'2020-4-7 0:0:0', 1, 1); -select * from interval_tab1 where logdate = '2020-4-7 0:0:0'; +select * from interval_tab1 where logdate = '2020-4-7 0:0:0' order by 1,2,3,4; city_id | logdate | peaktemp | unitsales ---------+--------------------------+----------+----------- 1 | Tue Apr 07 00:00:00 2020 | 1 | 1 (1 row) -select * from interval_tab1 where logdate != '2020-4-7 0:0:0'; +select * from interval_tab1 where logdate != '2020-4-7 0:0:0' order by 1,2,3,4; city_id | logdate | peaktemp | unitsales ---------+--------------------------+----------+----------- 1 | Tue Apr 07 02:00:00 2020 | 1 | 1 1 | Wed Apr 08 02:00:00 2020 | 1 | 1 (2 rows) -select * from interval_tab1 where logdate >= '2020-4-7 0:0:0'; +select * from interval_tab1 where logdate >= '2020-4-7 0:0:0' order by 1,2,3,4; city_id | logdate | peaktemp | unitsales ---------+--------------------------+----------+----------- - 1 | Tue Apr 07 02:00:00 2020 | 1 | 1 1 | Tue Apr 07 00:00:00 2020 | 1 | 1 + 1 | Tue Apr 07 02:00:00 2020 | 1 | 1 1 | Wed Apr 08 02:00:00 2020 | 1 | 1 (3 rows) insert into interval_tab1 values(1,'2020-4-5 2:0:0', 1, 1); -select relname, boundaries from pg_partition; +select relname, boundaries from pg_partition order by 1,2; relname | boundaries ---------------+------------------------------ interval_tab1 | @@ -96,15 +96,15 @@ select relname, boundaries from pg_partition; (5 rows) insert into interval_tab1 values(1,'2020-4-9 0:0:0', 1, 1); -select * from interval_tab1 where logdate >= '2020-4-7 0:0:0' and logdate < '2020-4-9 0:0:0'; +select * from interval_tab1 where logdate >= '2020-4-7 0:0:0' and logdate < '2020-4-9 0:0:0' order by 1,2,3,4; city_id | logdate | peaktemp | unitsales ---------+--------------------------+----------+----------- - 1 | Tue Apr 07 02:00:00 2020 | 1 | 1 1 | Tue Apr 07 00:00:00 2020 | 1 | 1 + 1 | Tue Apr 07 02:00:00 2020 | 1 | 1 1 | Wed Apr 08 02:00:00 2020 | 1 | 1 (3 rows) -select * from interval_tab1 where logdate > '2020-4-7 0:0:0' and logdate <= '2020-4-9 0:0:0'; +select * from interval_tab1 where logdate > '2020-4-7 0:0:0' and logdate <= '2020-4-9 0:0:0' order by 1,2,3,4; city_id | logdate | peaktemp | unitsales ---------+--------------------------+----------+----------- 1 | Tue Apr 07 02:00:00 2020 | 1 | 1 @@ -112,20 +112,20 @@ select * from interval_tab1 where logdate > '2020-4-7 0:0:0' and logdate <= '202 1 | Thu Apr 09 00:00:00 2020 | 1 | 1 (3 rows) -select * from interval_tab1 where logdate >= '2020-4-7 0:0:0' and logdate <= '2020-4-9 0:0:0'; +select * from interval_tab1 where logdate >= '2020-4-7 0:0:0' and logdate <= '2020-4-9 0:0:0' order by 1,2,3,4; city_id | logdate | peaktemp | unitsales ---------+--------------------------+----------+----------- - 1 | Tue Apr 07 02:00:00 2020 | 1 | 1 1 | Tue Apr 07 00:00:00 2020 | 1 | 1 + 1 | Tue Apr 07 02:00:00 2020 | 1 | 1 1 | Wed Apr 08 02:00:00 2020 | 1 | 1 1 | Thu Apr 09 00:00:00 2020 | 1 | 1 (4 rows) -select * from interval_tab1 where logdate > '2020-4-6 0:0:0' and logdate <= '2020-4-9 0:0:0'; +select * from interval_tab1 where logdate > '2020-4-6 0:0:0' and logdate <= '2020-4-9 0:0:0' order by 1,2,3,4; city_id | logdate | peaktemp | unitsales ---------+--------------------------+----------+----------- - 1 | Tue Apr 07 02:00:00 2020 | 1 | 1 1 | Tue Apr 07 00:00:00 2020 | 1 | 1 + 1 | Tue Apr 07 02:00:00 2020 | 1 | 1 1 | Wed Apr 08 02:00:00 2020 | 1 | 1 1 | Thu Apr 09 00:00:00 2020 | 1 | 1 (4 rows) diff --git a/src/test/regress/expected/hw_partition_list_ddl.out b/src/test/regress/expected/hw_partition_list_ddl.out index 394c4ed8b..911c65615 100644 --- a/src/test/regress/expected/hw_partition_list_ddl.out +++ b/src/test/regress/expected/hw_partition_list_ddl.out @@ -1,279 +1,273 @@ -CREATE schema FVT_COMPRESS_QWER; -set search_path to FVT_COMPRESS_QWER; -create table bmsql_order_line ( - ol_w_id integer not null, - ol_d_id integer not null, - ol_o_id integer not null, - ol_number integer not null, - ol_i_id integer not null, - ol_delivery_d timestamp, - ol_amount decimal(6,2), - ol_supply_w_id integer, - ol_quantity integer, - ol_dist_info char(24) -) -partition by list(ol_d_id) -( - partition p0 values (1,4,7), - partition p1 values (2,5,8), - partition p2 values (3,6,9) -); -alter table bmsql_order_line add constraint bmsql_order_line_pkey primary key (ol_w_id, ol_d_id, ol_o_id, ol_number); +CREATE schema FVT_COMPRESS_QWER; +set search_path to FVT_COMPRESS_QWER; +create table bmsql_order_line ( + ol_w_id integer not null, + ol_d_id integer not null, + ol_o_id integer not null, + ol_number integer not null, + ol_i_id integer not null, + ol_delivery_d timestamp, + ol_amount decimal(6,2), + ol_supply_w_id integer, + ol_quantity integer, + ol_dist_info char(24) +) +partition by list(ol_d_id) +( + partition p0 values (1,4,7), + partition p1 values (2,5,8), + partition p2 values (3,6,9) +); +alter table bmsql_order_line add constraint bmsql_order_line_pkey primary key (ol_w_id, ol_d_id, ol_o_id, ol_number); NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "bmsql_order_line_pkey" for table "bmsql_order_line" -insert into bmsql_order_line(ol_w_id, ol_d_id, ol_o_id, ol_number, ol_i_id, ol_dist_info) values(1, 1, 1, 1, 1, '123'); -update bmsql_order_line set ol_dist_info='ss' where ol_w_id =1; -delete from bmsql_order_line; - -create table test_partition_for_null_list_timestamp -( - a timestamp without time zone, - b timestamp with time zone, - c int, - d int) -partition by list (a) -( - partition test_partition_for_null_list_timestamp_p1 values ('2000-01-01 01:01:01', '2000-01-01 01:01:02'), - partition test_partition_for_null_list_timestamp_p2 values ('2000-02-02 02:02:02', '2000-02-02 02:02:04'), - partition test_partition_for_null_list_timestamp_p3 values ('2000-03-03 03:03:03', '2000-03-03 03:03:06') -); -create index idx_test_partition_for_null_list_timestamp_1 on test_partition_for_null_list_timestamp(a) LOCAL; -create index idx_test_partition_for_null_list_timestamp_2 on test_partition_for_null_list_timestamp(a,b) LOCAL; -create index idx_test_partition_for_null_list_timestamp_3 on test_partition_for_null_list_timestamp(c) LOCAL; -create index idx_test_partition_for_null_list_timestamp_4 on test_partition_for_null_list_timestamp(b,c,d) LOCAL; - -create table test_partition_for_null_list_text (a text, b varchar(2), c char(1), d varchar(2)) -partition by list (a) -( - partition test_partition_for_null_list_text_p1 values ('A'), - partition test_partition_for_null_list_text_p2 values ('B','C','D','E'), - partition test_partition_for_null_list_text_p3 values ('F','G') -); +insert into bmsql_order_line(ol_w_id, ol_d_id, ol_o_id, ol_number, ol_i_id, ol_dist_info) values(1, 1, 1, 1, 1, '123'); +update bmsql_order_line set ol_dist_info='ss' where ol_w_id =1; +delete from bmsql_order_line; +create table test_partition_for_null_list_timestamp +( + a timestamp without time zone, + b timestamp with time zone, + c int, + d int) +partition by list (a) +( + partition test_partition_for_null_list_timestamp_p1 values ('2000-01-01 01:01:01', '2000-01-01 01:01:02'), + partition test_partition_for_null_list_timestamp_p2 values ('2000-02-02 02:02:02', '2000-02-02 02:02:04'), + partition test_partition_for_null_list_timestamp_p3 values ('2000-03-03 03:03:03', '2000-03-03 03:03:06') +); +create index idx_test_partition_for_null_list_timestamp_1 on test_partition_for_null_list_timestamp(a) LOCAL; +create index idx_test_partition_for_null_list_timestamp_2 on test_partition_for_null_list_timestamp(a,b) LOCAL; +create index idx_test_partition_for_null_list_timestamp_3 on test_partition_for_null_list_timestamp(c) LOCAL; +create index idx_test_partition_for_null_list_timestamp_4 on test_partition_for_null_list_timestamp(b,c,d) LOCAL; +create table test_partition_for_null_list_text (a text, b varchar(2), c char(1), d varchar(2)) +partition by list (a) +( + partition test_partition_for_null_list_text_p1 values ('A'), + partition test_partition_for_null_list_text_p2 values ('B','C','D','E'), + partition test_partition_for_null_list_text_p3 values ('F','G') +); ERROR: column a cannot serve as a list partitioning column because of its datatype -create index idx_test_partition_for_null_list_text_1 on test_partition_for_null_list_text(a) LOCAL; +create index idx_test_partition_for_null_list_text_1 on test_partition_for_null_list_text(a) LOCAL; ERROR: relation "test_partition_for_null_list_text" does not exist -create index idx_test_partition_for_null_list_text_2 on test_partition_for_null_list_text(a,b) LOCAL; +create index idx_test_partition_for_null_list_text_2 on test_partition_for_null_list_text(a,b) LOCAL; ERROR: relation "test_partition_for_null_list_text" does not exist -create index idx_test_partition_for_null_list_text_3 on test_partition_for_null_list_text(c) LOCAL; +create index idx_test_partition_for_null_list_text_3 on test_partition_for_null_list_text(c) LOCAL; ERROR: relation "test_partition_for_null_list_text" does not exist -create index idx_test_partition_for_null_list_text_4 on test_partition_for_null_list_text(b,c,d) LOCAL; +create index idx_test_partition_for_null_list_text_4 on test_partition_for_null_list_text(b,c,d) LOCAL; ERROR: relation "test_partition_for_null_list_text" does not exist -create index idx_test_partition_for_null_list_text_5 on test_partition_for_null_list_text(b,c,d); +create index idx_test_partition_for_null_list_text_5 on test_partition_for_null_list_text(b,c,d); ERROR: relation "test_partition_for_null_list_text" does not exist - -CREATE TABLE select_partition_table_000_1( - C_CHAR_1 CHAR(1), - C_CHAR_2 CHAR(10), - C_CHAR_3 CHAR(102400), - C_VARCHAR_1 VARCHAR(1), - C_VARCHAR_2 VARCHAR(10), - C_VARCHAR_3 VARCHAR(1024), - C_INT INTEGER, - C_BIGINT BIGINT, - C_SMALLINT SMALLINT, - C_FLOAT FLOAT, - C_NUMERIC numeric(10,5), - C_DP double precision, - C_DATE DATE, - C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, - C_TS_WITH TIMESTAMP WITH TIME ZONE ) - partition by list (C_BIGINT) -( - partition select_partition_000_1_1 values (1,2,3,4), - partition select_partition_000_1_2 values (5,6,7,8,9) -); -create index idx_select_partition_table_000_1_1 on select_partition_table_000_1(C_CHAR_1) LOCAL; -create index idx_select_partition_table_000_1_2 on select_partition_table_000_1(C_CHAR_1,C_VARCHAR_1) LOCAL; -create index idx_select_partition_table_000_1_3 on select_partition_table_000_1(C_BIGINT) LOCAL; -create index idx_select_partition_table_000_1_4 on select_partition_table_000_1(C_BIGINT,C_TS_WITH,C_DP) LOCAL; -create index idx_select_partition_table_000_1_5 on select_partition_table_000_1(C_BIGINT,C_NUMERIC,C_TS_WITHOUT); - -CREATE TABLE select_partition_table_000_2( - C_CHAR_1 CHAR(1), - C_CHAR_2 CHAR(10), - C_CHAR_3 CHAR(102400), - C_VARCHAR_1 VARCHAR(1), - C_VARCHAR_2 VARCHAR(10), - C_VARCHAR_3 VARCHAR(1024), - C_INT INTEGER, - C_BIGINT BIGINT, - C_SMALLINT SMALLINT, - C_FLOAT FLOAT, - C_NUMERIC numeric(10,5), - C_DP double precision, - C_DATE DATE, - C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, - C_TS_WITH TIMESTAMP WITH TIME ZONE ) - partition by list (C_SMALLINT) -( - partition select_partition_000_2_1 values (1,2,3,4), - partition select_partition_000_2_2 values (5,6,7,8,9) -); -create index idx_select_partition_table_000_2_1 on select_partition_table_000_2(C_CHAR_2) LOCAL; -create index idx_select_partition_table_000_2_2 on select_partition_table_000_2(C_CHAR_2,C_VARCHAR_2) LOCAL; -create index idx_select_partition_table_000_2_3 on select_partition_table_000_2(C_SMALLINT) LOCAL; -create index idx_select_partition_table_000_2_4 on select_partition_table_000_2(C_SMALLINT,C_TS_WITH,C_DP) LOCAL; -create index idx_select_partition_table_000_2_5 on select_partition_table_000_2(C_SMALLINT,C_NUMERIC,C_TS_WITHOUT); - -CREATE TABLE select_partition_table_000_3( - C_CHAR_1 CHAR(1), - C_CHAR_2 CHAR(10), - C_CHAR_3 CHAR(102400), - C_VARCHAR_1 VARCHAR(1), - C_VARCHAR_2 VARCHAR(10), - C_VARCHAR_3 VARCHAR(1024), - C_INT INTEGER, - C_BIGINT BIGINT, - C_SMALLINT SMALLINT, - C_FLOAT FLOAT, - C_NUMERIC numeric(10,5), - C_DP double precision, - C_DATE DATE, - C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, - C_TS_WITH TIMESTAMP WITH TIME ZONE ) - partition by list (C_NUMERIC) -( - partition select_partition_000_3_1 values (1,2,3,4), - partition select_partition_000_3_2 values (5,6,7,8,9) -); -CREATE TABLE select_partition_table_000_4( - C_CHAR_1 CHAR(1), - C_CHAR_2 CHAR(10), - C_CHAR_3 CHAR(102400), - C_VARCHAR_1 VARCHAR(1), - C_VARCHAR_2 VARCHAR(10), - C_VARCHAR_3 VARCHAR(1024), - C_INT INTEGER, - C_BIGINT BIGINT, - C_SMALLINT SMALLINT, - C_FLOAT FLOAT, - C_NUMERIC numeric(10,5), - C_DP double precision, - C_DATE DATE, - C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, - C_TS_WITH TIMESTAMP WITH TIME ZONE ) - partition by list (C_DP) -( - partition select_partition_000_4_1 values (1,2,3,4), - partition select_partition_000_4_2 values (5,6,7,8,9) -); +CREATE TABLE select_partition_table_000_1( + C_CHAR_1 CHAR(1), + C_CHAR_2 CHAR(10), + C_CHAR_3 CHAR(102400), + C_VARCHAR_1 VARCHAR(1), + C_VARCHAR_2 VARCHAR(10), + C_VARCHAR_3 VARCHAR(1024), + C_INT INTEGER, + C_BIGINT BIGINT, + C_SMALLINT SMALLINT, + C_FLOAT FLOAT, + C_NUMERIC numeric(10,5), + C_DP double precision, + C_DATE DATE, + C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, + C_TS_WITH TIMESTAMP WITH TIME ZONE ) + partition by list (C_BIGINT) +( + partition select_partition_000_1_1 values (1,2,3,4), + partition select_partition_000_1_2 values (5,6,7,8,9) +); +create index idx_select_partition_table_000_1_1 on select_partition_table_000_1(C_CHAR_1) LOCAL; +create index idx_select_partition_table_000_1_2 on select_partition_table_000_1(C_CHAR_1,C_VARCHAR_1) LOCAL; +create index idx_select_partition_table_000_1_3 on select_partition_table_000_1(C_BIGINT) LOCAL; +create index idx_select_partition_table_000_1_4 on select_partition_table_000_1(C_BIGINT,C_TS_WITH,C_DP) LOCAL; +create index idx_select_partition_table_000_1_5 on select_partition_table_000_1(C_BIGINT,C_NUMERIC,C_TS_WITHOUT); +CREATE TABLE select_partition_table_000_2( + C_CHAR_1 CHAR(1), + C_CHAR_2 CHAR(10), + C_CHAR_3 CHAR(102400), + C_VARCHAR_1 VARCHAR(1), + C_VARCHAR_2 VARCHAR(10), + C_VARCHAR_3 VARCHAR(1024), + C_INT INTEGER, + C_BIGINT BIGINT, + C_SMALLINT SMALLINT, + C_FLOAT FLOAT, + C_NUMERIC numeric(10,5), + C_DP double precision, + C_DATE DATE, + C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, + C_TS_WITH TIMESTAMP WITH TIME ZONE ) + partition by list (C_SMALLINT) +( + partition select_partition_000_2_1 values (1,2,3,4), + partition select_partition_000_2_2 values (5,6,7,8,9) +); +create index idx_select_partition_table_000_2_1 on select_partition_table_000_2(C_CHAR_2) LOCAL; +create index idx_select_partition_table_000_2_2 on select_partition_table_000_2(C_CHAR_2,C_VARCHAR_2) LOCAL; +create index idx_select_partition_table_000_2_3 on select_partition_table_000_2(C_SMALLINT) LOCAL; +create index idx_select_partition_table_000_2_4 on select_partition_table_000_2(C_SMALLINT,C_TS_WITH,C_DP) LOCAL; +create index idx_select_partition_table_000_2_5 on select_partition_table_000_2(C_SMALLINT,C_NUMERIC,C_TS_WITHOUT); +CREATE TABLE select_partition_table_000_3( + C_CHAR_1 CHAR(1), + C_CHAR_2 CHAR(10), + C_CHAR_3 CHAR(102400), + C_VARCHAR_1 VARCHAR(1), + C_VARCHAR_2 VARCHAR(10), + C_VARCHAR_3 VARCHAR(1024), + C_INT INTEGER, + C_BIGINT BIGINT, + C_SMALLINT SMALLINT, + C_FLOAT FLOAT, + C_NUMERIC numeric(10,5), + C_DP double precision, + C_DATE DATE, + C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, + C_TS_WITH TIMESTAMP WITH TIME ZONE ) + partition by list (C_NUMERIC) +( + partition select_partition_000_3_1 values (1,2,3,4), + partition select_partition_000_3_2 values (5,6,7,8,9) +); +CREATE TABLE select_partition_table_000_4( + C_CHAR_1 CHAR(1), + C_CHAR_2 CHAR(10), + C_CHAR_3 CHAR(102400), + C_VARCHAR_1 VARCHAR(1), + C_VARCHAR_2 VARCHAR(10), + C_VARCHAR_3 VARCHAR(1024), + C_INT INTEGER, + C_BIGINT BIGINT, + C_SMALLINT SMALLINT, + C_FLOAT FLOAT, + C_NUMERIC numeric(10,5), + C_DP double precision, + C_DATE DATE, + C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, + C_TS_WITH TIMESTAMP WITH TIME ZONE ) + partition by list (C_DP) +( + partition select_partition_000_4_1 values (1,2,3,4), + partition select_partition_000_4_2 values (5,6,7,8,9) +); ERROR: column c_dp cannot serve as a list partitioning column because of its datatype - -create table test_list_default (a int, b int) -partition by list(a) -( - partition p1 values (2000), - partition p2 values (3000), - partition p3 values (4000), - partition p4 values (default) -); -insert into test_list_default values(5000); -select * from test_list_default; +create table test_list_default (a int, b int) +partition by list(a) +( + partition p1 values (2000), + partition p2 values (3000), + partition p3 values (4000), + partition p4 values (default) +); +insert into test_list_default values(5000); +select * from test_list_default; a | b ------+--- 5000 | (1 row) -select * from test_list_default partition (p4); +select * from test_list_default partition (p4); a | b ------+--- 5000 | (1 row) -select * from test_list_default partition for (5000); +select * from test_list_default partition for (5000); a | b ------+--- 5000 | (1 row) -drop table test_list_default; -create table test_list (a int, b int) -partition by list(a) -( -partition p1 values ( 1 ), -partition p2 values ( 2 ), -partition p3 values ( 3 ), -partition p4 values ( 4 ), -partition p5 values ( 5 ), -partition p6 values ( 6 ), -partition p7 values ( 7 ), -partition p8 values ( 8 ), -partition p9 values ( 9 ), -partition p10 values ( 10 ), -partition p11 values ( 11 ), -partition p12 values ( 12 ), -partition p13 values ( 13 ), -partition p14 values ( 14 ), -partition p15 values ( 15 ), -partition p16 values ( 16 ), -partition p17 values ( 17 ), -partition p18 values ( 18 ), -partition p19 values ( 19 ), -partition p20 values ( 20 ), -partition p21 values ( 21 ), -partition p22 values ( 22 ), -partition p23 values ( 23 ), -partition p24 values ( 24 ), -partition p25 values ( 25 ), -partition p26 values ( 26 ), -partition p27 values ( 27 ), -partition p28 values ( 28 ), -partition p29 values ( 29 ), -partition p30 values ( 30 ), -partition p31 values ( 31 ), -partition p32 values ( 32 ), -partition p33 values ( 33 ), -partition p34 values ( 34 ), -partition p35 values ( 35 ), -partition p36 values ( 36 ), -partition p37 values ( 37 ), -partition p38 values ( 38 ), -partition p39 values ( 39 ), -partition p40 values ( 40 ), -partition p41 values ( 41 ), -partition p42 values ( 42 ), -partition p43 values ( 43 ), -partition p44 values ( 44 ), -partition p45 values ( 45 ), -partition p46 values ( 46 ), -partition p47 values ( 47 ), -partition p48 values ( 48 ), -partition p49 values ( 49 ), -partition p50 values ( 50 ), -partition p51 values ( 51 ), -partition p52 values ( 52 ), -partition p53 values ( 53 ), -partition p54 values ( 54 ), -partition p55 values ( 55 ), -partition p56 values ( 56 ), -partition p57 values ( 57 ), -partition p58 values ( 58 ), -partition p59 values ( 59 ), -partition p60 values ( 60 ), -partition p61 values ( 61 ), -partition p62 values ( 62 ), -partition p63 values ( 63 ), -partition p64 values ( 64 ), -partition p65 values ( 65 ), -partition p66 values ( 66 ), -partition p67 values ( 67 ), -partition p68 values ( 68 ), -partition p69 values ( 69 ), -partition p70 values ( 70 ), -partition p71 values ( 71 ), -partition p72 values ( 72 ), -partition p73 values ( 73 ), -partition p74 values ( 74 ), -partition p75 values ( 75 ), -partition p76 values ( 76 ), -partition p77 values ( 77 ), -partition p78 values ( 78 ), -partition p79 values ( 79 ), -partition p80 values ( 80 ), -partition p81 values ( 81 ), -partition p82 values ( 82 ) -); -drop table test_list; -drop schema FVT_COMPRESS_QWER cascade; +drop table test_list_default; +create table test_list (a int, b int) +partition by list(a) +( +partition p1 values ( 1 ), +partition p2 values ( 2 ), +partition p3 values ( 3 ), +partition p4 values ( 4 ), +partition p5 values ( 5 ), +partition p6 values ( 6 ), +partition p7 values ( 7 ), +partition p8 values ( 8 ), +partition p9 values ( 9 ), +partition p10 values ( 10 ), +partition p11 values ( 11 ), +partition p12 values ( 12 ), +partition p13 values ( 13 ), +partition p14 values ( 14 ), +partition p15 values ( 15 ), +partition p16 values ( 16 ), +partition p17 values ( 17 ), +partition p18 values ( 18 ), +partition p19 values ( 19 ), +partition p20 values ( 20 ), +partition p21 values ( 21 ), +partition p22 values ( 22 ), +partition p23 values ( 23 ), +partition p24 values ( 24 ), +partition p25 values ( 25 ), +partition p26 values ( 26 ), +partition p27 values ( 27 ), +partition p28 values ( 28 ), +partition p29 values ( 29 ), +partition p30 values ( 30 ), +partition p31 values ( 31 ), +partition p32 values ( 32 ), +partition p33 values ( 33 ), +partition p34 values ( 34 ), +partition p35 values ( 35 ), +partition p36 values ( 36 ), +partition p37 values ( 37 ), +partition p38 values ( 38 ), +partition p39 values ( 39 ), +partition p40 values ( 40 ), +partition p41 values ( 41 ), +partition p42 values ( 42 ), +partition p43 values ( 43 ), +partition p44 values ( 44 ), +partition p45 values ( 45 ), +partition p46 values ( 46 ), +partition p47 values ( 47 ), +partition p48 values ( 48 ), +partition p49 values ( 49 ), +partition p50 values ( 50 ), +partition p51 values ( 51 ), +partition p52 values ( 52 ), +partition p53 values ( 53 ), +partition p54 values ( 54 ), +partition p55 values ( 55 ), +partition p56 values ( 56 ), +partition p57 values ( 57 ), +partition p58 values ( 58 ), +partition p59 values ( 59 ), +partition p60 values ( 60 ), +partition p61 values ( 61 ), +partition p62 values ( 62 ), +partition p63 values ( 63 ), +partition p64 values ( 64 ), +partition p65 values ( 65 ), +partition p66 values ( 66 ), +partition p67 values ( 67 ), +partition p68 values ( 68 ), +partition p69 values ( 69 ), +partition p70 values ( 70 ), +partition p71 values ( 71 ), +partition p72 values ( 72 ), +partition p73 values ( 73 ), +partition p74 values ( 74 ), +partition p75 values ( 75 ), +partition p76 values ( 76 ), +partition p77 values ( 77 ), +partition p78 values ( 78 ), +partition p79 values ( 79 ), +partition p80 values ( 80 ), +partition p81 values ( 81 ), +partition p82 values ( 82 ) +); +drop table test_list; +drop schema FVT_COMPRESS_QWER cascade; NOTICE: drop cascades to 5 other objects DETAIL: drop cascades to table bmsql_order_line drop cascades to table test_partition_for_null_list_timestamp diff --git a/src/test/regress/expected/hw_partition_list_dml.out b/src/test/regress/expected/hw_partition_list_dml.out index edf43da6b..16d371157 100644 --- a/src/test/regress/expected/hw_partition_list_dml.out +++ b/src/test/regress/expected/hw_partition_list_dml.out @@ -1,27 +1,25 @@ -CREATE schema FVT_COMPRESS_QWER; -set search_path to FVT_COMPRESS_QWER; --- section 1: test from delete.sql -create table delete_test_list ( - id int, - a int, - b text -) partition by list(a) -( -partition delete_test_hash_p1 values(1,2,3,4,5,6,7,8,9,10), -partition delete_test_hash_p2 values(11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50), -partition delete_test_hash_p3 values(51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100)); -create index delete_test_list_index_local1 on delete_test_list (a) local -( - partition delete_test_list_p1_index_local tablespace PG_DEFAULT, - partition delete_test_list_p2_index_local tablespace PG_DEFAULT, - partition delete_test_list_p3_index_local tablespace PG_DEFAULT -); - -INSERT INTO delete_test_list (a) VALUES (10); -INSERT INTO delete_test_list (a, b) VALUES (50, repeat('x', 10000)); -INSERT INTO delete_test_list (a) VALUES (100); - -SELECT id, a, char_length(b) FROM delete_test_list order by 1, 2, 3; +CREATE schema FVT_COMPRESS_QWER; +set search_path to FVT_COMPRESS_QWER; +-- section 1: test from delete.sql +create table delete_test_list ( + id int, + a int, + b text +) partition by list(a) +( +partition delete_test_hash_p1 values(1,2,3,4,5,6,7,8,9,10), +partition delete_test_hash_p2 values(11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50), +partition delete_test_hash_p3 values(51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100)); +create index delete_test_list_index_local1 on delete_test_list (a) local +( + partition delete_test_list_p1_index_local tablespace PG_DEFAULT, + partition delete_test_list_p2_index_local tablespace PG_DEFAULT, + partition delete_test_list_p3_index_local tablespace PG_DEFAULT +); +INSERT INTO delete_test_list (a) VALUES (10); +INSERT INTO delete_test_list (a, b) VALUES (50, repeat('x', 10000)); +INSERT INTO delete_test_list (a) VALUES (100); +SELECT id, a, char_length(b) FROM delete_test_list order by 1, 2, 3; id | a | char_length ----+-----+------------- | 10 | @@ -29,85 +27,74 @@ SELECT id, a, char_length(b) FROM delete_test_list order by 1, 2, 3; | 100 | (3 rows) - --- Pseudo Constant Quals -DELETE FROM delete_test_list where null; - --- allow an alias to be specified for DELETE's target table -DELETE FROM delete_test_list AS dt WHERE dt.a > 75; - --- if an alias is specified, don't allow the original table name --- to be referenced -DELETE FROM delete_test_list dt WHERE dt.a > 25; - -SELECT id, a, char_length(b) FROM delete_test_list order by 1, 2, 3; +-- Pseudo Constant Quals +DELETE FROM delete_test_list where null; +-- allow an alias to be specified for DELETE's target table +DELETE FROM delete_test_list AS dt WHERE dt.a > 75; +-- if an alias is specified, don't allow the original table name +-- to be referenced +DELETE FROM delete_test_list dt WHERE dt.a > 25; +SELECT id, a, char_length(b) FROM delete_test_list order by 1, 2, 3; id | a | char_length ----+----+------------- | 10 | (1 row) - --- delete a row with a TOASTed value -DELETE FROM delete_test_list WHERE a > 25; - -SELECT id, a, char_length(b) FROM delete_test_list order by 1, 2, 3; +-- delete a row with a TOASTed value +DELETE FROM delete_test_list WHERE a > 25; +SELECT id, a, char_length(b) FROM delete_test_list order by 1, 2, 3; id | a | char_length ----+----+------------- | 10 | (1 row) - -DROP TABLE delete_test_list; - --- section 2: -create table hw_list_partition_dml_t1 (id int, name text)partition by list(id) ( -partition hw_list_partition_dml_t1_p1 values(1,2,3,4,5,6,7,8,9), -partition hw_list_partition_dml_t1_p2 values(10,11,12,13,14,15,16,17,18,19), -partition hw_list_partition_dml_t1_p3 values(20,21,22,23,24,25,26,27,28,29)); - -create table hw_list_partition_dml_t2 (id int, name text)partition by list(id) ( -partition hw_list_partition_dml_t2_p1 values(1,2,3,4,5,6,7,8,9), -partition hw_list_partition_dml_t2_p2 values(10,11,12,13,14,15,16,17,18,19), -partition hw_list_partition_dml_t2_p3 values(20,21,22,23,24,25,26,27,28,29)); - -create table hw_list_partition_dml_t3 (id int, name text)partition by list(id) ( -partition hw_list_partition_dml_t3_p1 values(1,2,3,4,5,6,7,8,9), -partition hw_list_partition_dml_t3_p2 values(10,11,12,13,14,15,16,17,18,19), -partition hw_list_partition_dml_t3_p3 values(20,21,22,23,24,25,26,27,28,29)); - --- section 2.1: two table join, both are partitioned table -insert into hw_list_partition_dml_t1 values (1, 'li'), (11, 'wang'), (21, 'zhang'); -insert into hw_list_partition_dml_t2 values (1, 'xi'), (11, 'zhao'), (27, 'qi'); -insert into hw_list_partition_dml_t3 values (1, 'qin'), (11, 'he'), (27, 'xiao'); --- delete 10~20 tupes in hw_partition_dml_t1 -with T2_ID_10TH AS -( -SELECT id -FROM hw_list_partition_dml_t2 -WHERE id >= 10 and id < 20 -ORDER BY id -) -delete from hw_list_partition_dml_t1 -using hw_list_partition_dml_t2 -where hw_list_partition_dml_t1.id < hw_list_partition_dml_t2.id - and hw_list_partition_dml_t2.id IN - (SELECT id FROM T2_ID_10TH) -RETURNING hw_list_partition_dml_t1.name; +DROP TABLE delete_test_list; +-- section 2: +create table hw_list_partition_dml_t1 (id int, name text)partition by list(id) ( +partition hw_list_partition_dml_t1_p1 values(1,2,3,4,5,6,7,8,9), +partition hw_list_partition_dml_t1_p2 values(10,11,12,13,14,15,16,17,18,19), +partition hw_list_partition_dml_t1_p3 values(20,21,22,23,24,25,26,27,28,29)); +create table hw_list_partition_dml_t2 (id int, name text)partition by list(id) ( +partition hw_list_partition_dml_t2_p1 values(1,2,3,4,5,6,7,8,9), +partition hw_list_partition_dml_t2_p2 values(10,11,12,13,14,15,16,17,18,19), +partition hw_list_partition_dml_t2_p3 values(20,21,22,23,24,25,26,27,28,29)); +create table hw_list_partition_dml_t3 (id int, name text)partition by list(id) ( +partition hw_list_partition_dml_t3_p1 values(1,2,3,4,5,6,7,8,9), +partition hw_list_partition_dml_t3_p2 values(10,11,12,13,14,15,16,17,18,19), +partition hw_list_partition_dml_t3_p3 values(20,21,22,23,24,25,26,27,28,29)); +-- section 2.1: two table join, both are partitioned table +insert into hw_list_partition_dml_t1 values (1, 'li'), (11, 'wang'), (21, 'zhang'); +insert into hw_list_partition_dml_t2 values (1, 'xi'), (11, 'zhao'), (27, 'qi'); +insert into hw_list_partition_dml_t3 values (1, 'qin'), (11, 'he'), (27, 'xiao'); +-- delete 10~20 tupes in hw_partition_dml_t1 +with T2_ID_10TH AS +( +SELECT id +FROM hw_list_partition_dml_t2 +WHERE id >= 10 and id < 20 +ORDER BY id +) +delete from hw_list_partition_dml_t1 +using hw_list_partition_dml_t2 +where hw_list_partition_dml_t1.id < hw_list_partition_dml_t2.id + and hw_list_partition_dml_t2.id IN + (SELECT id FROM T2_ID_10TH) +RETURNING hw_list_partition_dml_t1.name; name ------ li (1 row) -select * from hw_list_partition_dml_t1 order by 1, 2; +select * from hw_list_partition_dml_t1 order by 1, 2; id | name ----+------- 11 | wang 21 | zhang (2 rows) --- delete all tupes that is less than 11 in hw_list_partition_dml_t1, that is 3 -insert into hw_list_partition_dml_t1 values (3, 'AAA'), (13, 'BBB'), (23, 'CCC'), (24, 'DDD'); -select * from hw_list_partition_dml_t1 order by 1, 2; +-- delete all tupes that is less than 11 in hw_list_partition_dml_t1, that is 3 +insert into hw_list_partition_dml_t1 values (3, 'AAA'), (13, 'BBB'), (23, 'CCC'), (24, 'DDD'); +select * from hw_list_partition_dml_t1 order by 1, 2; id | name ----+------- 3 | AAA @@ -118,13 +105,13 @@ select * from hw_list_partition_dml_t1 order by 1, 2; 24 | DDD (6 rows) -delete from hw_list_partition_dml_t1 using hw_list_partition_dml_t2 where hw_list_partition_dml_t1.id < hw_list_partition_dml_t2.id and hw_list_partition_dml_t2.id = 11 RETURNING hw_list_partition_dml_t1.id; +delete from hw_list_partition_dml_t1 using hw_list_partition_dml_t2 where hw_list_partition_dml_t1.id < hw_list_partition_dml_t2.id and hw_list_partition_dml_t2.id = 11 RETURNING hw_list_partition_dml_t1.id; id ---- 3 (1 row) -select * from hw_list_partition_dml_t1 order by 1, 2; +select * from hw_list_partition_dml_t1 order by 1, 2; id | name ----+------- 11 | wang @@ -134,21 +121,19 @@ select * from hw_list_partition_dml_t1 order by 1, 2; 24 | DDD (5 rows) - --- section 2.2: delete from only one table, no joining --- delete all tupes remaining: 13, 23, 24 -delete from hw_list_partition_dml_t1; -select * from hw_list_partition_dml_t1 order by 1, 2; +-- section 2.2: delete from only one table, no joining +-- delete all tupes remaining: 13, 23, 24 +delete from hw_list_partition_dml_t1; +select * from hw_list_partition_dml_t1 order by 1, 2; id | name ----+------ (0 rows) - --- section 3: --- section 3.1: two table join, only one is partitioned table --- and target relation is partitioned -insert into hw_list_partition_dml_t1 values (1, 'AAA'), (11, 'BBB'), (21, 'CCC'); -select * from hw_list_partition_dml_t1 order by 1, 2; +-- section 3: +-- section 3.1: two table join, only one is partitioned table +-- and target relation is partitioned +insert into hw_list_partition_dml_t1 values (1, 'AAA'), (11, 'BBB'), (21, 'CCC'); +select * from hw_list_partition_dml_t1 order by 1, 2; id | name ----+------ 1 | AAA @@ -156,16 +141,16 @@ select * from hw_list_partition_dml_t1 order by 1, 2; 21 | CCC (3 rows) --- delete all tupes in hw_list_partition_dml_t1 -delete from hw_list_partition_dml_t1 using hw_list_partition_dml_t3 where hw_list_partition_dml_t1.id < hw_list_partition_dml_t3.id and hw_list_partition_dml_t3.id = 27; -select * from hw_list_partition_dml_t1 order by 1, 2; +-- delete all tupes in hw_list_partition_dml_t1 +delete from hw_list_partition_dml_t1 using hw_list_partition_dml_t3 where hw_list_partition_dml_t1.id < hw_list_partition_dml_t3.id and hw_list_partition_dml_t3.id = 27; +select * from hw_list_partition_dml_t1 order by 1, 2; id | name ----+------ (0 rows) --- delete all tupes that is less than 11 in hw_list_partition_dml_t1, that is 3 -insert into hw_list_partition_dml_t1 values (3, 'AAA'), (13, 'BBB'), (23, 'CCC'), (24, 'DDD'); -select * from hw_list_partition_dml_t1 order by 1, 2; +-- delete all tupes that is less than 11 in hw_list_partition_dml_t1, that is 3 +insert into hw_list_partition_dml_t1 values (3, 'AAA'), (13, 'BBB'), (23, 'CCC'), (24, 'DDD'); +select * from hw_list_partition_dml_t1 order by 1, 2; id | name ----+------ 3 | AAA @@ -174,8 +159,8 @@ select * from hw_list_partition_dml_t1 order by 1, 2; 24 | DDD (4 rows) -delete from hw_list_partition_dml_t1 using hw_list_partition_dml_t3 where hw_list_partition_dml_t1.id < hw_list_partition_dml_t3.id and hw_list_partition_dml_t3.id = 11; -select * from hw_list_partition_dml_t1 order by 1, 2; +delete from hw_list_partition_dml_t1 using hw_list_partition_dml_t3 where hw_list_partition_dml_t1.id < hw_list_partition_dml_t3.id and hw_list_partition_dml_t3.id = 11; +select * from hw_list_partition_dml_t1 order by 1, 2; id | name ----+------ 13 | BBB @@ -183,31 +168,28 @@ select * from hw_list_partition_dml_t1 order by 1, 2; 24 | DDD (3 rows) - --- section 3.2 delete from only one table, no joining --- delete all tupes remaining: 13, 23, 24 -delete from hw_list_partition_dml_t1; -select * from hw_list_partition_dml_t1 order by 1, 2; +-- section 3.2 delete from only one table, no joining +-- delete all tupes remaining: 13, 23, 24 +delete from hw_list_partition_dml_t1; +select * from hw_list_partition_dml_t1 order by 1, 2; id | name ----+------ (0 rows) - --- section 3.3: two table join, only one is partitioned table --- and target relation is on-partitioned --- delete all tuples in hw_list_partition_dml_t3 -insert into hw_list_partition_dml_t2 values (28, 'EEE'); -delete from hw_list_partition_dml_t3 using hw_list_partition_dml_t2 where hw_list_partition_dml_t3.id < hw_list_partition_dml_t2.id and hw_list_partition_dml_t2.id = 28; -select * from hw_list_partition_dml_t3 order by 1, 2; +-- section 3.3: two table join, only one is partitioned table +-- and target relation is on-partitioned +-- delete all tuples in hw_list_partition_dml_t3 +insert into hw_list_partition_dml_t2 values (28, 'EEE'); +delete from hw_list_partition_dml_t3 using hw_list_partition_dml_t2 where hw_list_partition_dml_t3.id < hw_list_partition_dml_t2.id and hw_list_partition_dml_t2.id = 28; +select * from hw_list_partition_dml_t3 order by 1, 2; id | name ----+------ (0 rows) - --- delete all tuples that is less than 11 in hw_list_partition_dml_t3, that is 3 -insert into hw_list_partition_dml_t3 values (3, 'AAA'), (13, 'BBB'), (23, 'CCC'), (24, 'DDD'); -delete from hw_list_partition_dml_t3 using hw_list_partition_dml_t2 where hw_list_partition_dml_t3.id < hw_list_partition_dml_t2.id and hw_list_partition_dml_t2.id = 11; -select * from hw_list_partition_dml_t3 order by 1, 2; +-- delete all tuples that is less than 11 in hw_list_partition_dml_t3, that is 3 +insert into hw_list_partition_dml_t3 values (3, 'AAA'), (13, 'BBB'), (23, 'CCC'), (24, 'DDD'); +delete from hw_list_partition_dml_t3 using hw_list_partition_dml_t2 where hw_list_partition_dml_t3.id < hw_list_partition_dml_t2.id and hw_list_partition_dml_t2.id = 11; +select * from hw_list_partition_dml_t3 order by 1, 2; id | name ----+------ 13 | BBB @@ -215,31 +197,28 @@ select * from hw_list_partition_dml_t3 order by 1, 2; 24 | DDD (3 rows) - --- section 3.4 delete from only one table, no joining --- delete all tuples remaining: 13, 23, 24 -delete from hw_list_partition_dml_t3; -select * from hw_list_partition_dml_t3 order by 1, 2; +-- section 3.4 delete from only one table, no joining +-- delete all tuples remaining: 13, 23, 24 +delete from hw_list_partition_dml_t3; +select * from hw_list_partition_dml_t3 order by 1, 2; id | name ----+------ (0 rows) - --- finally, drop table hw_list_partition_dml_t1, hw_list_partition_dml_t2 and hw_list_partition_dml_t3 -drop table hw_list_partition_dml_t1; -drop table hw_list_partition_dml_t2; -drop table hw_list_partition_dml_t3; - -drop table test_index_lt; +-- finally, drop table hw_list_partition_dml_t1, hw_list_partition_dml_t2 and hw_list_partition_dml_t3 +drop table hw_list_partition_dml_t1; +drop table hw_list_partition_dml_t2; +drop table hw_list_partition_dml_t3; +drop table test_index_lt; ERROR: table "test_index_lt" does not exist -create table test_index_lt (a int, b int, c int) -partition by list(a) -( - PARTITION p1 VALUES (3, 4, 5), - PARTITION p2 VALUES (1, 2) -); -insert into test_index_lt select generate_series(1,4); -explain (costs off, verbose on) select * from test_index_lt order by 1; +create table test_index_lt (a int, b int, c int) +partition by list(a) +( + PARTITION p1 VALUES (3, 4, 5), + PARTITION p2 VALUES (1, 2) +); +insert into test_index_lt select generate_series(1,4); +explain (costs off, verbose on) select * from test_index_lt order by 1; QUERY PLAN --------------------------------------------------------------------- Sort @@ -253,7 +232,7 @@ explain (costs off, verbose on) select * from test_index_lt order by 1; Selected Partitions: 1..2 (9 rows) -select * from test_index_lt order by 1; +select * from test_index_lt order by 1; a | b | c ---+---+--- 1 | | @@ -262,10 +241,10 @@ select * from test_index_lt order by 1; 4 | | (4 rows) -create index test_exchange_index_lt_a on test_index_lt (a) local; -set enable_seqscan = off; -set enable_bitmapscan = off; -explain (costs off, verbose on) select * from test_index_lt order by 1; +create index test_exchange_index_lt_a on test_index_lt (a) local; +set enable_seqscan = off; +set enable_bitmapscan = off; +explain (costs off, verbose on) select * from test_index_lt order by 1; QUERY PLAN ------------------------------------------------------------------------------------------------------ Sort @@ -279,7 +258,7 @@ explain (costs off, verbose on) select * from test_index_lt order by 1; Selected Partitions: 1..2 (9 rows) -select * from test_index_lt order by 1; +select * from test_index_lt order by 1; a | b | c ---+---+--- 1 | | @@ -288,27 +267,25 @@ select * from test_index_lt order by 1; 4 | | (4 rows) -drop table test_index_lt; - -drop schema FVT_COMPRESS_QWER cascade; -create schema fvt_other_cmd; -CREATE TABLE FVT_OTHER_CMD.IDEX_LIST_PARTITION_TABLE_001(COL_INT int) -partition by list (COL_INT) -( - partition IDEX_LIST_PARTITION_TABLE_001_1 values (1000,2000), - partition IDEX_LIST_PARTITION_TABLE_001_2 values (3000,4000,5000), - partition IDEX_LIST_PARTITION_TABLE_001_3 values (6000,7000,8000,9000,10000) -); -declare -i int; -begin i:=1; -while -i<9990 LOOP -Delete from FVT_OTHER_CMD.IDEX_LIST_PARTITION_TABLE_001 where col_int=i; -i:=i+1000; -end loop; -end; -/ - -drop schema fvt_other_cmd cascade; +drop table test_index_lt; +drop schema FVT_COMPRESS_QWER cascade; +create schema fvt_other_cmd; +CREATE TABLE FVT_OTHER_CMD.IDEX_LIST_PARTITION_TABLE_001(COL_INT int) +partition by list (COL_INT) +( + partition IDEX_LIST_PARTITION_TABLE_001_1 values (1000,2000), + partition IDEX_LIST_PARTITION_TABLE_001_2 values (3000,4000,5000), + partition IDEX_LIST_PARTITION_TABLE_001_3 values (6000,7000,8000,9000,10000) +); +declare +i int; +begin i:=1; +while +i<9990 LOOP +Delete from FVT_OTHER_CMD.IDEX_LIST_PARTITION_TABLE_001 where col_int=i; +i:=i+1000; +end loop; +end; +/ +drop schema fvt_other_cmd cascade; NOTICE: drop cascades to table fvt_other_cmd.idex_list_partition_table_001 diff --git a/src/test/regress/expected/hw_partition_list_dql.out b/src/test/regress/expected/hw_partition_list_dql.out index f01861bb7..016fedeec 100644 --- a/src/test/regress/expected/hw_partition_list_dql.out +++ b/src/test/regress/expected/hw_partition_list_dql.out @@ -1,165 +1,163 @@ - --- ----- test partition for (null) --- - --- 1. test ordinary - -- 1.1 range partitioned table - -- 1.2 interval partitioned table --- 2. test data column of partition key value - -- 2.1 text - -- 2.2 timestamp --- 3. MAXVALUE - -- 3.1 MAXVALUE is first column - -- 3.2 MAXVALUE is second column - -CREATE schema FVT_COMPRESS_QWER; -set search_path to FVT_COMPRESS_QWER; - - --- 1. test ordinary ----- 1.1 range partitioned table -create table test_partition_for_null_list (a int, b int, c int, d int) -partition by list (a) -( - partition test_partition_for_null_list_p1 values(0), - partition test_partition_for_null_list_p2 values(1,2,3), - partition test_partition_for_null_list_p3 values(4,5,6) -); - -insert into test_partition_for_null_list values (0, 0, 0, 0); -insert into test_partition_for_null_list values (1, 1, 1, 1); -insert into test_partition_for_null_list values (5, 5, 5, 5); - --- failed: inserted partition key does not map to any table partition -insert into test_partition_for_null_list values (null, null, null, null); + +-- +---- test partition for (null) +-- + +-- 1. test ordinary + -- 1.1 range partitioned table + -- 1.2 interval partitioned table +-- 2. test data column of partition key value + -- 2.1 text + -- 2.2 timestamp +-- 3. MAXVALUE + -- 3.1 MAXVALUE is first column + -- 3.2 MAXVALUE is second column + +CREATE schema FVT_COMPRESS_QWER; +set search_path to FVT_COMPRESS_QWER; + + +-- 1. test ordinary +---- 1.1 range partitioned table +create table test_partition_for_null_list (a int, b int, c int, d int) +partition by list (a) +( + partition test_partition_for_null_list_p1 values(0), + partition test_partition_for_null_list_p2 values(1,2,3), + partition test_partition_for_null_list_p3 values(4,5,6) +); + +insert into test_partition_for_null_list values (0, 0, 0, 0); +insert into test_partition_for_null_list values (1, 1, 1, 1); +insert into test_partition_for_null_list values (5, 5, 5, 5); + +-- failed: inserted partition key does not map to any table partition +insert into test_partition_for_null_list values (null, null, null, null); ERROR: inserted partition key does not map to any table partition --- success -insert into test_partition_for_null_list values (0, null, null, null); - - --- failed: The partition number is invalid or out-of-range -select * from test_partition_for_null_list partition for (null) order by 1, 2, 3, 4; -ERROR: The partition number is invalid or out-of-range --- success -select * from test_partition_for_null_list partition for (0) order by 1, 2, 3, 4; +-- success +insert into test_partition_for_null_list values (0, null, null, null); + + +-- failed: The partition number is invalid or out-of-range +select * from test_partition_for_null_list partition for (null) order by 1, 2, 3, 4; +ERROR: Cannot find partition by the value +DETAIL: N/A. +-- success +select * from test_partition_for_null_list partition for (0) order by 1, 2, 3, 4; a | b | c | d ---+---+---+--- 0 | 0 | 0 | 0 0 | | | (2 rows) - - --- failed: The partition number is invalid or out-of-range -alter table test_partition_for_null_list rename partition for (null) to test_partition_for_null_list_part1; + + +-- failed: The partition number is invalid or out-of-range +alter table test_partition_for_null_list rename partition for (null) to test_partition_for_null_list_part1; ERROR: The partition number is invalid or out-of-range --- success -alter table test_partition_for_null_list rename partition for (0) to test_partition_for_null_list_part1; --- success -select * from test_partition_for_null_list partition (test_partition_for_null_list_part1) order by 1, 2, 3, 4; +-- success +alter table test_partition_for_null_list rename partition for (0) to test_partition_for_null_list_part1; +-- success +select * from test_partition_for_null_list partition (test_partition_for_null_list_part1) order by 1, 2, 3, 4; a | b | c | d ---+---+---+--- 0 | 0 | 0 | 0 0 | | | (2 rows) - - --- failed: The partition number is invalid or out-of-range -alter table test_partition_for_null_list drop partition for (null); -ERROR: The syntax is unsupported for list/hash partition --- success -alter table test_partition_for_null_list drop partition for (0); -ERROR: The syntax is unsupported for list/hash partition --- failed -select * from test_partition_for_null_list partition (test_partition_for_null_list_part1) order by 1, 2, 3, 4; - a | b | c | d ----+---+---+--- - 0 | 0 | 0 | 0 - 0 | | | -(2 rows) - -CREATE TABLE select_list_partition_table_000_3( - C_CHAR_1 CHAR(1), - C_CHAR_2 CHAR(10), - C_CHAR_3 CHAR(102400), - C_VARCHAR_1 VARCHAR(1), - C_VARCHAR_2 VARCHAR(10), - C_VARCHAR_3 VARCHAR(1024), - C_INT INTEGER, - C_BIGINT BIGINT, - C_SMALLINT SMALLINT, - C_FLOAT FLOAT, - C_NUMERIC numeric(10,5), - C_DP double precision, - C_DATE DATE, - C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, - C_TS_WITH TIMESTAMP WITH TIME ZONE ) - partition by list (C_INT) -( - partition select_list_partition_000_3_1 values (111,222,333,444), - partition select_list_partition_000_3_2 values (555,666,777,888,999,1100,1600) -); - -create index select_list_partition_table_index_000_3 ON select_list_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_list_partition_000_3_1, partition select_list_partition_000_3_3); -create view select_list_partition_table_view_000_3 as select * from select_list_partition_table_000_3; - -INSERT INTO select_list_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); -INSERT INTO select_list_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); -INSERT INTO select_list_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); -INSERT INTO select_list_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); -INSERT INTO select_list_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); -INSERT INTO select_list_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); -INSERT INTO select_list_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); -INSERT INTO select_list_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_list_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); - -select * from select_list_partition_table_000_3 partition for (NULL) order by C_INT; + +-- failed: The partition number is invalid or out-of-range +alter table test_partition_for_null_list drop partition for (null); ERROR: The partition number is invalid or out-of-range - -alter table select_list_partition_table_000_3 rename partition for (NULL) to select_list_partition_table_000_3_p1; +-- success +alter table test_partition_for_null_list drop partition for (0); +-- failed +select * from test_partition_for_null_list partition (test_partition_for_null_list_part1) order by 1, 2, 3, 4; +ERROR: partition "test_partition_for_null_list_part1" of relation "test_partition_for_null_list" does not exist + +CREATE TABLE select_list_partition_table_000_3( + C_CHAR_1 CHAR(1), + C_CHAR_2 CHAR(10), + C_CHAR_3 CHAR(102400), + C_VARCHAR_1 VARCHAR(1), + C_VARCHAR_2 VARCHAR(10), + C_VARCHAR_3 VARCHAR(1024), + C_INT INTEGER, + C_BIGINT BIGINT, + C_SMALLINT SMALLINT, + C_FLOAT FLOAT, + C_NUMERIC numeric(10,5), + C_DP double precision, + C_DATE DATE, + C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, + C_TS_WITH TIMESTAMP WITH TIME ZONE ) + partition by list (C_INT) +( + partition select_list_partition_000_3_1 values (111,222,333,444), + partition select_list_partition_000_3_2 values (555,666,777,888,999,1100,1600) +); + +create index select_list_partition_table_index_000_3 ON select_list_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_list_partition_000_3_1, partition select_list_partition_000_3_3); +create view select_list_partition_table_view_000_3 as select * from select_list_partition_table_000_3; + +INSERT INTO select_list_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); +INSERT INTO select_list_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); +INSERT INTO select_list_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); +INSERT INTO select_list_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); +INSERT INTO select_list_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); +INSERT INTO select_list_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); +INSERT INTO select_list_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); +INSERT INTO select_list_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_list_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); + +select * from select_list_partition_table_000_3 partition for (NULL) order by C_INT; +ERROR: Cannot find partition by the value +DETAIL: N/A. + +alter table select_list_partition_table_000_3 rename partition for (NULL) to select_list_partition_table_000_3_p1; ERROR: The partition number is invalid or out-of-range - -alter table select_list_partition_table_000_3 drop partition for (NULL); -ERROR: The syntax is unsupported for list/hash partition - - -CREATE TABLE partition_wise_join_table_001_1 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision,RANK SMALLINT) -partition by list(ID) -( - partition partition_wise_join_table_001_1_1 values (1,42,3,44,5,46,7,48,9), - partition partition_wise_join_table_001_1_2 values (41,2,43,4,45,6,47,8,49) -) ; - -INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 1-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,10000,13 ); -INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(41,49),'PARTITION WIASE JOIN 1-3-' || generate_series(40,60),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,15000,15 ); - -create index idx_partition_wise_join_table_001_1_1 on partition_wise_join_table_001_1(ID) LOCAL; -create index idx_partition_wise_join_table_001_1_2 on partition_wise_join_table_001_1(ID,NAME) LOCAL; -create index idx_partition_wise_join_table_001_1_3 on partition_wise_join_table_001_1(RANK) LOCAL; -create index idx_partition_wise_join_table_001_1_4 on partition_wise_join_table_001_1(RANK,SALARY,NAME) LOCAL; - -CREATE TABLE partition_wise_join_table_001_2 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision ) -partition by list(ID) -( - partition partition_wise_join_table_001_1_1 values (71,2,73,4,75,6,77,8,79), - partition partition_wise_join_table_001_1_2 values (1,72,3,74,5,76,7,78,9) -); - -INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 2-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No 66# Science 4 Street of Xi'an of China $$,10000); -INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(71,79),'PARTITION WIASE JOIN 2-3-' || generate_series(70,80),90 + random() * 10,'1990-8-8',$$No 77# Science 4 Street of Xi'an of China $$,15000); - -CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_1 ON PARTITION_WISE_JOIN_TABLE_001_2(ID) LOCAL; -CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_2 ON PARTITION_WISE_JOIN_TABLE_001_2(ID,NAME) LOCAL; -CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_3 ON PARTITION_WISE_JOIN_TABLE_001_2(SALARY,NAME) LOCAL; - -SELECT A.ID,B.ID, A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; + +alter table select_list_partition_table_000_3 drop partition for (NULL); +ERROR: Cannot perform this operation because There are views or rules that depend on table select_list_partition_table_000_3. +DETAIL: N/A +HINT: drop the views or rules first. Use pg_rules to find rules. Use pg_class, pg_rewrite, pg_depend, pg_namespacesql to find views + + +CREATE TABLE partition_wise_join_table_001_1 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision,RANK SMALLINT) +partition by list(ID) +( + partition partition_wise_join_table_001_1_1 values (1,42,3,44,5,46,7,48,9), + partition partition_wise_join_table_001_1_2 values (41,2,43,4,45,6,47,8,49) +) ; + +INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 1-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,10000,13 ); +INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(41,49),'PARTITION WIASE JOIN 1-3-' || generate_series(40,60),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,15000,15 ); + +create index idx_partition_wise_join_table_001_1_1 on partition_wise_join_table_001_1(ID) LOCAL; +create index idx_partition_wise_join_table_001_1_2 on partition_wise_join_table_001_1(ID,NAME) LOCAL; +create index idx_partition_wise_join_table_001_1_3 on partition_wise_join_table_001_1(RANK) LOCAL; +create index idx_partition_wise_join_table_001_1_4 on partition_wise_join_table_001_1(RANK,SALARY,NAME) LOCAL; + +CREATE TABLE partition_wise_join_table_001_2 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision ) +partition by list(ID) +( + partition partition_wise_join_table_001_1_1 values (71,2,73,4,75,6,77,8,79), + partition partition_wise_join_table_001_1_2 values (1,72,3,74,5,76,7,78,9) +); + +INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 2-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No 66# Science 4 Street of Xi'an of China $$,10000); +INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(71,79),'PARTITION WIASE JOIN 2-3-' || generate_series(70,80),90 + random() * 10,'1990-8-8',$$No 77# Science 4 Street of Xi'an of China $$,15000); + +CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_1 ON PARTITION_WISE_JOIN_TABLE_001_2(ID) LOCAL; +CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_2 ON PARTITION_WISE_JOIN_TABLE_001_2(ID,NAME) LOCAL; +CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_3 ON PARTITION_WISE_JOIN_TABLE_001_2(SALARY,NAME) LOCAL; + +SELECT A.ID,B.ID, A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; id | id | rank | salary | salary | address | birthday ----+----+------+--------+--------+----------------------------------------------+-------------------------- 1 | 1 | 13 | 10000 | 10000 | No.88# Science 6 Street of Xi'an of China | Wed Aug 08 00:00:00 1990 @@ -1064,11 +1062,11 @@ SELECT A.ID,B.ID, A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_W 9 | 9 | 13 | 10000 | 10000 | No.88# Science 6 Street of Xi'an of China | Wed Aug 08 00:00:00 1990 (900 rows) - -ANALYZE PARTITION_WISE_JOIN_TABLE_001_1; -ANALYZE PARTITION_WISE_JOIN_TABLE_001_2; - -SELECT A.ID,B.ID, A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; + +ANALYZE PARTITION_WISE_JOIN_TABLE_001_1; +ANALYZE PARTITION_WISE_JOIN_TABLE_001_2; + +SELECT A.ID,B.ID, A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; id | id | rank | salary | salary | address | birthday ----+----+------+--------+--------+----------------------------------------------+-------------------------- 1 | 1 | 13 | 10000 | 10000 | No.88# Science 6 Street of Xi'an of China | Wed Aug 08 00:00:00 1990 @@ -1973,15 +1971,15 @@ SELECT A.ID,B.ID, A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_W 9 | 9 | 13 | 10000 | 10000 | No.88# Science 6 Street of Xi'an of China | Wed Aug 08 00:00:00 1990 (900 rows) - -CREATE TABLE HW_PARTITION_SELECT_RT (A INT, B INT) -PARTITION BY list (A) -( - PARTITION HW_PARTITION_SELECT_RT_P1 VALUES (0), - PARTITION HW_PARTITION_SELECT_RT_P2 VALUES (1,2,3), - PARTITION HW_PARTITION_SELECT_RT_P3 VALUES (4,5,6) -); -EXPLAIN (COSTS OFF) SELECT B FROM (SELECT B FROM HW_PARTITION_SELECT_RT LIMIT 100) ORDER BY B; + +CREATE TABLE HW_PARTITION_SELECT_RT (A INT, B INT) +PARTITION BY list (A) +( + PARTITION HW_PARTITION_SELECT_RT_P1 VALUES (0), + PARTITION HW_PARTITION_SELECT_RT_P2 VALUES (1,2,3), + PARTITION HW_PARTITION_SELECT_RT_P3 VALUES (4,5,6) +); +EXPLAIN (COSTS OFF) SELECT B FROM (SELECT B FROM HW_PARTITION_SELECT_RT LIMIT 100) ORDER BY B; QUERY PLAN ------------------------------------------------------------------ Sort @@ -1993,55 +1991,55 @@ EXPLAIN (COSTS OFF) SELECT B FROM (SELECT B FROM HW_PARTITION_SELECT_RT LIMIT 10 Selected Partitions: 1..3 (7 rows) - -CREATE TABLE DTS2013112504143_TEST1(A INT) PARTITION BY LIST (A)(PARTITION DTS2013112504143_TEST1_P1 VALUES (1,2,3,4,5,6,7,8,9)); -CREATE TABLE DTS2013112504143_TEST2(A INT); -SELECT * FROM DTS2013112504143_TEST1 UNION ALL SELECT * FROM DTS2013112504143_TEST2 order by 1; + +CREATE TABLE TESTTABLE_TEST1(A INT) PARTITION BY LIST (A)(PARTITION TESTTABLE_TEST1_P1 VALUES (1,2,3,4,5,6,7,8,9)); +CREATE TABLE TESTTABLE_TEST2(A INT); +SELECT * FROM TESTTABLE_TEST1 UNION ALL SELECT * FROM TESTTABLE_TEST2 order by 1; a --- (0 rows) - -CREATE TABLE select_partition_table_000_3( - C_CHAR_1 CHAR(1), - C_CHAR_2 CHAR(10), - C_CHAR_3 CHAR(102400), - C_VARCHAR_1 VARCHAR(1), - C_VARCHAR_2 VARCHAR(10), - C_VARCHAR_3 VARCHAR(1024), - C_INT INTEGER, - C_BIGINT BIGINT, - C_SMALLINT SMALLINT, - C_FLOAT FLOAT, - C_NUMERIC numeric(10,5), - C_DP double precision, - C_DATE DATE, - C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, - C_TS_WITH TIMESTAMP WITH TIME ZONE ) - partition by list (C_INT) -( - partition select_partition_000_3_1 values (111,222,333,444), - partition select_partition_000_3_3 values (555,666,777,888,999,1100,1600) -); - -create index select_partition_table_index_000_3 ON select_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_partition_000_3_1, partition select_partition_000_3_3); -create view select_partition_table_view_000_3 as select * from select_partition_table_000_3; - -INSERT INTO select_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); -INSERT INTO select_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); -INSERT INTO select_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); -INSERT INTO select_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); -INSERT INTO select_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); -INSERT INTO select_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); -INSERT INTO select_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); -INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); - -explain (costs off, verbose on) select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; + +CREATE TABLE select_partition_table_000_3( + C_CHAR_1 CHAR(1), + C_CHAR_2 CHAR(10), + C_CHAR_3 CHAR(102400), + C_VARCHAR_1 VARCHAR(1), + C_VARCHAR_2 VARCHAR(10), + C_VARCHAR_3 VARCHAR(1024), + C_INT INTEGER, + C_BIGINT BIGINT, + C_SMALLINT SMALLINT, + C_FLOAT FLOAT, + C_NUMERIC numeric(10,5), + C_DP double precision, + C_DATE DATE, + C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, + C_TS_WITH TIMESTAMP WITH TIME ZONE ) + partition by list (C_INT) +( + partition select_partition_000_3_1 values (111,222,333,444), + partition select_partition_000_3_3 values (555,666,777,888,999,1100,1600) +); + +create index select_partition_table_index_000_3 ON select_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_partition_000_3_1, partition select_partition_000_3_3); +create view select_partition_table_view_000_3 as select * from select_partition_table_000_3; + +INSERT INTO select_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); +INSERT INTO select_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); +INSERT INTO select_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); +INSERT INTO select_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); +INSERT INTO select_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); +INSERT INTO select_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); +INSERT INTO select_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); +INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); + +explain (costs off, verbose on) select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Sort @@ -2061,8 +2059,8 @@ explain (costs off, verbose on) select lower(C_CHAR_3), initcap(C_VARCHAR_3), sq Selected Partitions: 1..2 (15 rows) - -select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; + +select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; lower | initcap | sqrt | ?column? | rank ---------+---------+------------------+----------+------ abcdefg | Abcdefg | 10.5356537528527 | 4.11 | 1 @@ -2078,18 +2076,18 @@ select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, ijklmno | Ijklmno | 40 | 12.99 | 1 (11 rows) - -create table hw_partition_select_rt5 (a int, b int, c int) -partition by list(c) -( -partition hw_partition_select_rt5_p1 values (0,1) -); - -alter table hw_partition_select_rt5 drop column b; - -update hw_partition_select_rt5 set c=0 where c=-1; - -drop schema FVT_COMPRESS_QWER cascade; + +create table hw_partition_select_rt5 (a int, b int, c int) +partition by list(c) +( +partition hw_partition_select_rt5_p1 values (0,1) +); + +alter table hw_partition_select_rt5 drop column b; + +update hw_partition_select_rt5 set c=0 where c=-1; + +drop schema FVT_COMPRESS_QWER cascade; NOTICE: drop cascades to 11 other objects DETAIL: drop cascades to table test_partition_for_null_list drop cascades to table select_list_partition_table_000_3 @@ -2097,42 +2095,42 @@ drop cascades to view select_list_partition_table_view_000_3 drop cascades to table partition_wise_join_table_001_1 drop cascades to table partition_wise_join_table_001_2 drop cascades to table hw_partition_select_rt -drop cascades to table dts2013112504143_test1 -drop cascades to table dts2013112504143_test2 +drop cascades to table testtable_test1 +drop cascades to table testtable_test2 drop cascades to table select_partition_table_000_3 drop cascades to view select_partition_table_view_000_3 drop cascades to table hw_partition_select_rt5 - ---begin: these test are related to explain output change about partition table. --- major change is as below - --1. - --Selected Partitions: 1 2 6 7 8 9 - -- \|/ - --Selected Partitions: 1..2,6..9 - --2. - --Selected Partitions: 1 3 5 7 9 - -- \|/ - --Selected Partitions: 1,3,5,7,9 -CREATE schema FVT_COMPRESS; -set search_path to FVT_COMPRESS; - - -create table test_explain_format_on_part_table (id int) -partition by list(id) -( -partition p1 values (1,2,3,4,5,6,7,8,9), -partition p2 values (11,12,13,14,15,16,17,18,19), -partition p3 values (21,22,23,24,25,26,27,28,29), -partition p4 values (31,32,33,34,35,36,37,38,39), -partition p5 values (41,42,43,44,45,46,47,48,49), -partition p6 values (51,52,53,54,55,56,57,58,59), -partition p7 values (61,62,63,64,65,66,67,68,69), -partition p8 values (71,72,73,74,75,76,77,78,79), -partition p9 values (81,82,83,84,85,86,87,88,89) -); --- two continous segments, text formast -explain (verbose on, costs off) - select * from test_explain_format_on_part_table where id <15 or id >51; + +--begin: these test are related to explain output change about partition table. +-- major change is as below + --1. + --Selected Partitions: 1 2 6 7 8 9 + -- \|/ + --Selected Partitions: 1..2,6..9 + --2. + --Selected Partitions: 1 3 5 7 9 + -- \|/ + --Selected Partitions: 1,3,5,7,9 +CREATE schema FVT_COMPRESS; +set search_path to FVT_COMPRESS; + + +create table test_explain_format_on_part_table (id int) +partition by list(id) +( +partition p1 values (1,2,3,4,5,6,7,8,9), +partition p2 values (11,12,13,14,15,16,17,18,19), +partition p3 values (21,22,23,24,25,26,27,28,29), +partition p4 values (31,32,33,34,35,36,37,38,39), +partition p5 values (41,42,43,44,45,46,47,48,49), +partition p6 values (51,52,53,54,55,56,57,58,59), +partition p7 values (61,62,63,64,65,66,67,68,69), +partition p8 values (71,72,73,74,75,76,77,78,79), +partition p9 values (81,82,83,84,85,86,87,88,89) +); +-- two continous segments, text formast +explain (verbose on, costs off) + select * from test_explain_format_on_part_table where id <15 or id >51; QUERY PLAN -------------------------------------------------------------------------------------------------------------- Partition Iterator @@ -2144,9 +2142,9 @@ explain (verbose on, costs off) Selected Partitions: 1..9 (7 rows) --- no continous segment, text formast -explain (verbose on, costs off) - select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; +-- no continous segment, text formast +explain (verbose on, costs off) + select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Partition Iterator @@ -2158,9 +2156,9 @@ explain (verbose on, costs off) Selected Partitions: 1,3,5,7,9 (7 rows) --- two continous segments, non-text formast -explain (verbose on, costs off, FORMAT JSON) - select * from test_explain_format_on_part_table where id <15 or id >51; +-- two continous segments, non-text formast +explain (verbose on, costs off, FORMAT JSON) + select * from test_explain_format_on_part_table where id <15 or id >51; QUERY PLAN --------------------------------------------------------------------------------------------------------------------- [ + @@ -2186,9 +2184,9 @@ explain (verbose on, costs off, FORMAT JSON) ] (1 row) --- no continous segment, non-text formast -explain (verbose on, costs off, FORMAT JSON) - select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; +-- no continous segment, non-text formast +explain (verbose on, costs off, FORMAT JSON) + select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- [ + @@ -2214,24 +2212,24 @@ explain (verbose on, costs off, FORMAT JSON) ] (1 row) - -drop table test_explain_format_on_part_table; ---end: these test are related to explain output change about partition table. - -create table hw_partition_select_parttable ( - c1 int, - c2 int, - c3 text) -partition by list(c1) -(partition hw_partition_select_parttable_p1 values (10,20,30,40), - partition hw_partition_select_parttable_p2 values (50,60,70,80,90,100,110,120,130,140), - partition hw_partition_select_parttable_p3 values (150,200,250,300,350)); - - insert into hw_partition_select_parttable values (10,40,'abc'); - insert into hw_partition_select_parttable(c1,c2) values (100,20); - insert into hw_partition_select_parttable values(300,200); - -select * from hw_partition_select_parttable order by 1, 2, 3; + +drop table test_explain_format_on_part_table; +--end: these test are related to explain output change about partition table. + +create table hw_partition_select_parttable ( + c1 int, + c2 int, + c3 text) +partition by list(c1) +(partition hw_partition_select_parttable_p1 values (10,20,30,40), + partition hw_partition_select_parttable_p2 values (50,60,70,80,90,100,110,120,130,140), + partition hw_partition_select_parttable_p3 values (150,200,250,300,350)); + + insert into hw_partition_select_parttable values (10,40,'abc'); + insert into hw_partition_select_parttable(c1,c2) values (100,20); + insert into hw_partition_select_parttable values(300,200); + +select * from hw_partition_select_parttable order by 1, 2, 3; c1 | c2 | c3 -----+-----+----- 10 | 40 | abc @@ -2239,8 +2237,8 @@ select * from hw_partition_select_parttable order by 1, 2, 3; 300 | 200 | (3 rows) - -select c1 from hw_partition_select_parttable order by 1; + +select c1 from hw_partition_select_parttable order by 1; c1 ----- 10 @@ -2248,8 +2246,8 @@ select c1 from hw_partition_select_parttable order by 1; 300 (3 rows) - -select c1,c2 from hw_partition_select_parttable order by 1, 2; + +select c1,c2 from hw_partition_select_parttable order by 1, 2; c1 | c2 -----+----- 10 | 40 @@ -2257,8 +2255,8 @@ select c1,c2 from hw_partition_select_parttable order by 1, 2; 300 | 200 (3 rows) - -select c2 from hw_partition_select_parttable order by 1; + +select c2 from hw_partition_select_parttable order by 1; c2 ----- 20 @@ -2266,8 +2264,8 @@ select c2 from hw_partition_select_parttable order by 1; 200 (3 rows) - -select c1,c2,c3 from hw_partition_select_parttable order by 1, 2, 3; + +select c1,c2,c3 from hw_partition_select_parttable order by 1, 2, 3; c1 | c2 | c3 -----+-----+----- 10 | 40 | abc @@ -2275,112 +2273,112 @@ select c1,c2,c3 from hw_partition_select_parttable order by 1, 2, 3; 300 | 200 | (3 rows) - -select c1 from hw_partition_select_parttable where c1>50 and c1<300 order by 1; + +select c1 from hw_partition_select_parttable where c1>50 and c1<300 order by 1; c1 ----- 100 (1 row) - -select * from hw_partition_select_parttable where c2>100 order by 1, 2, 3; + +select * from hw_partition_select_parttable where c2>100 order by 1, 2, 3; c1 | c2 | c3 -----+-----+---- 300 | 200 | (1 row) - -create table t_select_datatype_int32(c1 int,c2 int,c3 int,c4 text) -partition by list(c1) -(partition t_select_datatype_int32_p1 values(-100, -50, 0, 50), - partition t_select_datatype_int32_p2 values(100, 150, 200, 250), - partition t_select_datatype_int32_p3 values(300, 350), - partition t_select_datatype_int32_p4 values(400, 450, 500)); - -insert into t_select_datatype_int32 values(-100,20,20,'a'), (100,300,300,'bb'), (150,75,500,NULL), (200,500,50,'ccc'), (250,50,50,NULL), (300,700,125,''), (450,35,150,'dddd'); - ---partition select for int32 ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=50 order by 1, 2, 3, 4; + +create table t_select_datatype_int32(c1 int,c2 int,c3 int,c4 text) +partition by list(c1) +(partition t_select_datatype_int32_p1 values(-100, -50, 0, 50), + partition t_select_datatype_int32_p2 values(100, 150, 200, 250), + partition t_select_datatype_int32_p3 values(300, 350), + partition t_select_datatype_int32_p4 values(400, 450, 500)); + +insert into t_select_datatype_int32 values(-100,20,20,'a'), (100,300,300,'bb'), (150,75,500,NULL), (200,500,50,'ccc'), (250,50,50,NULL), (300,700,125,''), (450,35,150,'dddd'); + +--partition select for int32 +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=50 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+---- 100 | 300 | 300 | bb (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=250 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+----+----+---- 250 | 50 | 50 | (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=500 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=550 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=550 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+----+----+---- -100 | 20 | 20 | a (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=50 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+----+----+---- -100 | 20 | 20 | a (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+----+----+---- -100 | 20 | 20 | a (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+---- -100 | 20 | 20 | a 100 | 300 | 300 | bb (2 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<150 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+---- -100 | 20 | 20 | a 100 | 300 | 300 | bb (2 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+---- -100 | 20 | 20 | a @@ -2388,9 +2386,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<200 order 150 | 75 | 500 | (3 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=200 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+----- -100 | 20 | 20 | a @@ -2399,9 +2397,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=200 orde 200 | 500 | 50 | ccc (4 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2413,9 +2411,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<500 order 450 | 35 | 150 | dddd (7 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=500 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2427,9 +2425,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=500 orde 450 | 35 | 150 | dddd (7 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2441,9 +2439,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<700 order 450 | 35 | 150 | dddd (7 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=700 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=700 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2455,9 +2453,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=700 orde 450 | 35 | 150 | dddd (7 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 100 | 300 | 300 | bb @@ -2468,9 +2466,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 order 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=50 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 100 | 300 | 300 | bb @@ -2481,9 +2479,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=50 order 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 150 | 75 | 500 | @@ -2493,9 +2491,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 order 450 | 35 | 150 | dddd (5 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 100 | 300 | 300 | bb @@ -2506,9 +2504,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 orde 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>150 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 200 | 500 | 50 | ccc @@ -2517,9 +2515,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>150 order 450 | 35 | 150 | dddd (4 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 150 | 75 | 500 | @@ -2529,9 +2527,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=150 orde 450 | 35 | 150 | dddd (5 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>200 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 250 | 50 | 50 | @@ -2539,9 +2537,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>200 order 450 | 35 | 150 | dddd (3 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=200 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 200 | 500 | 50 | ccc @@ -2550,45 +2548,45 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=200 orde 450 | 35 | 150 | dddd (4 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=500 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+----+----+---- -100 | 20 | 20 | a (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 150 | 75 | 500 | @@ -2598,9 +2596,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_ 450 | 35 | 150 | dddd (5 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 150 | 75 | 500 | @@ -2610,17 +2608,17 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t 450 | 35 | 150 | dddd (5 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+---- 100 | 300 | 300 | bb (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+----- 100 | 300 | 300 | bb @@ -2629,9 +2627,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND 250 | 50 | 50 | (4 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<550 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<550 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 100 | 300 | 300 | bb @@ -2642,9 +2640,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1<=500 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1<=500 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 150 | 75 | 500 | @@ -2654,24 +2652,24 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t 450 | 35 | 150 | dddd (5 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<250 AND t_select_datatype_int32.c1<=250 AND t_select_datatype_int32.c1=200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<250 AND t_select_datatype_int32.c1<=250 AND t_select_datatype_int32.c1=200 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+----+----- 200 | 500 | 50 | ccc (1 row) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+----- -100 | 20 | 20 | a @@ -2680,9 +2678,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_s 200 | 500 | 50 | ccc (4 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2694,9 +2692,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_s 450 | 35 | 150 | dddd (7 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2707,9 +2705,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_s 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 OR t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 OR t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 100 | 300 | 300 | bb @@ -2720,9 +2718,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 OR t_s 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 100 | 300 | 300 | bb @@ -2733,9 +2731,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_ 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+------ 100 | 300 | 300 | bb @@ -2746,9 +2744,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t 450 | 35 | 150 | dddd (6 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2760,9 +2758,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t 450 | 35 | 150 | dddd (7 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+----- -100 | 20 | 20 | a @@ -2771,9 +2769,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 OR t_ 200 | 500 | 50 | ccc (4 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1<=300 OR t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1<=300 OR t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2785,9 +2783,9 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_ 450 | 35 | 150 | dddd (7 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 OR t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 OR t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2795,45 +2793,45 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 OR t_ 450 | 35 | 150 | dddd (3 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<170 AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<170 AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+---- -100 | 20 | 20 | a 100 | 300 | 300 | bb (2 rows) - ---success -select * from t_select_datatype_int32 where (t_select_datatype_int32.c1<170 OR t_select_datatype_int32.c1<250) AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where (t_select_datatype_int32.c1<170 OR t_select_datatype_int32.c1<250) AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+---- -100 | 20 | 20 | a 100 | 300 | 300 | bb (2 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<400 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<400 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+---- -100 | 20 | 20 | a 300 | 700 | 125 | (2 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+----+-----+------ -100 | 20 | 20 | a 450 | 35 | 150 | dddd (2 rows) - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<=100 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<=100 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; c1 | c2 | c3 | c4 ------+-----+-----+------ -100 | 20 | 20 | a @@ -2841,15 +2839,15 @@ select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND 450 | 35 | 150 | dddd (3 rows) - ---IS NULL ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) OR - t_select_datatype_int32.c4 IS NULL - ORDER BY 1, 2, 3, 4; + +--IS NULL +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) OR + t_select_datatype_int32.c4 IS NULL + ORDER BY 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+---- 150 | 75 | 500 | @@ -2857,38 +2855,38 @@ select * from t_select_datatype_int32 where 300 | 700 | 125 | (3 rows) - ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) AND - t_select_datatype_int32.c4 IS NULL - ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) AND + t_select_datatype_int32.c4 IS NULL + ORDER BY 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where - t_select_datatype_int32.c4 IS NULL AND - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) - ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + t_select_datatype_int32.c4 IS NULL AND + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) + ORDER BY 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where - t_select_datatype_int32.c4 IS NULL OR - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) - ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + t_select_datatype_int32.c4 IS NULL OR + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) + ORDER BY 1, 2, 3, 4; c1 | c2 | c3 | c4 -----+-----+-----+---- 150 | 75 | 500 | @@ -2896,194 +2894,197 @@ select * from t_select_datatype_int32 where 300 | 700 | 125 | (3 rows) - ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c4 IS NULL) AND - (t_select_datatype_int32.c2100) - ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c4 IS NULL) AND + (t_select_datatype_int32.c2100) + ORDER BY 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) - ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) + ORDER BY 1, 2, 3, 4; c1 | c2 | c3 | c4 ----+----+----+---- (0 rows) - --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- --- check select contarins partition - --- ----- check select from range partition --- - -create table hw_partition_select_ordinary_table (a int, b int); - -create table test_select_list_partition (a int, b int) -partition by list(a) -( - partition test_select_list_partition_p1 values (0), - partition test_select_list_partition_p2 values (1,2,3), - partition test_select_list_partition_p3 values (4,5,6) -); - -insert into test_select_list_partition values(2); - ---success -select * from test_select_list_partition partition (test_select_list_partition_p1) order by 1, 2; + +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +-- check select contarins partition + +-- +---- check select from range partition +-- + +create table hw_partition_select_ordinary_table (a int, b int); + +create table test_select_list_partition (a int, b int) +partition by list(a) +( + partition test_select_list_partition_p1 values (0), + partition test_select_list_partition_p2 values (1,2,3), + partition test_select_list_partition_p3 values (4,5,6) +); + +insert into test_select_list_partition values(2); + +--success +select * from test_select_list_partition partition (test_select_list_partition_p1) order by 1, 2; a | b ---+--- (0 rows) - ---success -select * from test_select_list_partition partition (test_select_list_partition_p2) order by 1, 2; + +--success +select * from test_select_list_partition partition (test_select_list_partition_p2) order by 1, 2; a | b ---+--- 2 | (1 row) - ---success -select * from test_select_list_partition partition (test_select_list_partition_p3) order by 1, 2; + +--success +select * from test_select_list_partition partition (test_select_list_partition_p3) order by 1, 2; a | b ---+--- (0 rows) - ---success -select * from test_select_list_partition partition (test_select_list_partition_p4) order by 1, 2; + +--success +select * from test_select_list_partition partition (test_select_list_partition_p4) order by 1, 2; ERROR: partition "test_select_list_partition_p4" of relation "test_select_list_partition" does not exist - ---success -select a from test_select_list_partition partition (test_select_list_partition_p2) order by 1; + +--success +select a from test_select_list_partition partition (test_select_list_partition_p2) order by 1; a --- 2 (1 row) - ---success -select a from test_select_list_partition partition for (0) order by 1; + +--success +select a from test_select_list_partition partition for (0) order by 1; a --- (0 rows) - ---success -select a from test_select_list_partition partition for (1) order by 1; + +--success +select a from test_select_list_partition partition for (1) order by 1; a --- 2 (1 row) - ---success -select a from test_select_list_partition partition for (2) order by 1; + +--success +select a from test_select_list_partition partition for (2) order by 1; a --- 2 (1 row) - ---success -select a from test_select_list_partition partition for (5) order by 1; + +--success +select a from test_select_list_partition partition for (5) order by 1; a --- (0 rows) - ---success -select a from test_select_list_partition partition for (8) order by 1; -ERROR: The partition number is invalid or out-of-range - --- fail: table is not partitioned table -select a from hw_partition_select_ordinary_table partition (test_select_list_partition_p2); + +--success +select a from test_select_list_partition partition for (8) order by 1; +ERROR: Cannot find partition by the value +DETAIL: N/A. + +-- fail: table is not partitioned table +select a from hw_partition_select_ordinary_table partition (test_select_list_partition_p2); ERROR: relation "hw_partition_select_ordinary_table" is not partitioned table - --- fail: table is not partitioned table -select a from hw_partition_select_ordinary_table partition for (2); +DETAIL: N/A. + +-- fail: table is not partitioned table +select a from hw_partition_select_ordinary_table partition for (2); ERROR: relation "hw_partition_select_ordinary_table" is not partitioned table - --- --- -CREATE TABLE hw_partition_select_test(C_INT INTEGER) - partition by list (C_INT) -( - partition hw_partition_select_test_part_1 values (111,222,333), - partition hw_partition_select_test_part_2 values (444,555,666), - partition hw_partition_select_test_part_3 values (777,888,999) -); -insert into hw_partition_select_test values(111); -insert into hw_partition_select_test values(555); -insert into hw_partition_select_test values(888); - -select a.* from hw_partition_select_test partition(hw_partition_select_test_part_1) a; +DETAIL: N/A. + +-- +-- +CREATE TABLE hw_partition_select_test(C_INT INTEGER) + partition by list (C_INT) +( + partition hw_partition_select_test_part_1 values (111,222,333), + partition hw_partition_select_test_part_2 values (444,555,666), + partition hw_partition_select_test_part_3 values (777,888,999) +); +insert into hw_partition_select_test values(111); +insert into hw_partition_select_test values(555); +insert into hw_partition_select_test values(888); + +select a.* from hw_partition_select_test partition(hw_partition_select_test_part_1) a; c_int ------- 111 (1 row) - -create table list_partitioned_table (a int) -partition by list(a) -( - partition list_partitioned_table_p1 values (0), - partition list_partitioned_table_p2 values (1,2,3), - partition list_partitioned_table_p3 values (4,5,6) -); - -insert into list_partitioned_table values (1); -insert into list_partitioned_table values (2); -insert into list_partitioned_table values (5); -insert into list_partitioned_table values (6); - -with tmp1 as (select a from list_partitioned_table partition for (2)) select a from tmp1 order by 1; + +create table list_partitioned_table (a int) +partition by list(a) +( + partition list_partitioned_table_p1 values (0), + partition list_partitioned_table_p2 values (1,2,3), + partition list_partitioned_table_p3 values (4,5,6) +); + +insert into list_partitioned_table values (1); +insert into list_partitioned_table values (2); +insert into list_partitioned_table values (5); +insert into list_partitioned_table values (6); + +with tmp1 as (select a from list_partitioned_table partition for (2)) select a from tmp1 order by 1; a --- 1 2 (2 rows) - --- ----- select union select --- -create table UNION_TABLE_043_1(C_CHAR CHAR(103500), C_VARCHAR VARCHAR(1035), C_INT INTEGER not null, C_DP double precision, C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE) -partition by list (C_INT) -( - partition UNION_TABLE_043_1_1 values (111,222,333), - partition UNION_TABLE_043_1_2 values (444,555) -); -insert into UNION_TABLE_043_1 values('ABCDEFG','abcdefg',111,1.111,'2000-01-01 01:01:01'); -insert into UNION_TABLE_043_1 values('BCDEFGH','bcdefgh',222,2.222,'2000-02-02 02:02:02'); -insert into UNION_TABLE_043_1 values('CDEFGHI','cdefghi',333,3.333,'2000-03-03 03:03:03'); -insert into UNION_TABLE_043_1 values('DEFGHIJ','defghij',444,4.444,'2000-04-04 04:04:04'); -insert into UNION_TABLE_043_1 values('EFGHIJK','efghijk',555,5.555,'2000-05-05 05:05:05'); - - -create table UNION_TABLE_043_2(C_CHAR CHAR(103500), C_VARCHAR VARCHAR(1035), C_INT INTEGER not null, C_DP double precision, C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE) -partition by list (C_INT) -( - partition UNION_TABLE_043_2_1 values (111,222,333), - partition UNION_TABLE_043_2_2 values (444,555) -); -insert into UNION_TABLE_043_2 values('ABCDEFG','abcdefg',111,1.111,'2000-01-01 01:01:01'); -insert into UNION_TABLE_043_2 values('BCDEFGH','bcdefgh',222,2.222,'2010-02-02 02:02:02'); -insert into UNION_TABLE_043_2 values('CDEFGHI','cdefghi',333,3.333,'2000-03-03 03:03:03'); -insert into UNION_TABLE_043_2 values('DEFGHIJ','defghij',444,4.444,'2010-04-04 04:04:04'); -insert into UNION_TABLE_043_2 values('EFGHIJK','efghijk',555,5.555,'2020-05-05 05:05:05'); - -select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_1 union select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_2 order by 1,2,3; + +-- +---- select union select +-- +create table UNION_TABLE_043_1(C_CHAR CHAR(103500), C_VARCHAR VARCHAR(1035), C_INT INTEGER not null, C_DP double precision, C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE) +partition by list (C_INT) +( + partition UNION_TABLE_043_1_1 values (111,222,333), + partition UNION_TABLE_043_1_2 values (444,555) +); +insert into UNION_TABLE_043_1 values('ABCDEFG','abcdefg',111,1.111,'2000-01-01 01:01:01'); +insert into UNION_TABLE_043_1 values('BCDEFGH','bcdefgh',222,2.222,'2000-02-02 02:02:02'); +insert into UNION_TABLE_043_1 values('CDEFGHI','cdefghi',333,3.333,'2000-03-03 03:03:03'); +insert into UNION_TABLE_043_1 values('DEFGHIJ','defghij',444,4.444,'2000-04-04 04:04:04'); +insert into UNION_TABLE_043_1 values('EFGHIJK','efghijk',555,5.555,'2000-05-05 05:05:05'); + + +create table UNION_TABLE_043_2(C_CHAR CHAR(103500), C_VARCHAR VARCHAR(1035), C_INT INTEGER not null, C_DP double precision, C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE) +partition by list (C_INT) +( + partition UNION_TABLE_043_2_1 values (111,222,333), + partition UNION_TABLE_043_2_2 values (444,555) +); +insert into UNION_TABLE_043_2 values('ABCDEFG','abcdefg',111,1.111,'2000-01-01 01:01:01'); +insert into UNION_TABLE_043_2 values('BCDEFGH','bcdefgh',222,2.222,'2010-02-02 02:02:02'); +insert into UNION_TABLE_043_2 values('CDEFGHI','cdefghi',333,3.333,'2000-03-03 03:03:03'); +insert into UNION_TABLE_043_2 values('DEFGHIJ','defghij',444,4.444,'2010-04-04 04:04:04'); +insert into UNION_TABLE_043_2 values('EFGHIJK','efghijk',555,5.555,'2020-05-05 05:05:05'); + +select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_1 union select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_2 order by 1,2,3; c_int | c_dp | c_ts_without -------+-------+-------------------------- 111 | 1.111 | Sat Jan 01 01:01:01 2000 @@ -3096,8 +3097,8 @@ select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_1 union select C_INT,C_DP,C_ 555 | 5.555 | Tue May 05 05:05:05 2020 (8 rows) - -select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_1 partition (UNION_TABLE_043_1_1) union select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_2 partition (UNION_TABLE_043_2_1) order by 1,2,3; + +select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_1 partition (UNION_TABLE_043_1_1) union select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_2 partition (UNION_TABLE_043_2_1) order by 1,2,3; c_int | c_dp | c_ts_without -------+-------+-------------------------- 111 | 1.111 | Sat Jan 01 01:01:01 2000 @@ -3106,11 +3107,11 @@ select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_1 partition (UNION_TABLE_043 333 | 3.333 | Fri Mar 03 03:03:03 2000 (4 rows) - -drop table UNION_TABLE_043_1; -drop table UNION_TABLE_043_2; - -drop schema FVT_COMPRESS cascade; + +drop table UNION_TABLE_043_1; +drop table UNION_TABLE_043_2; + +drop schema FVT_COMPRESS cascade; NOTICE: drop cascades to 6 other objects DETAIL: drop cascades to table hw_partition_select_parttable drop cascades to table t_select_datatype_int32 @@ -3118,8 +3119,8 @@ drop cascades to table hw_partition_select_ordinary_table drop cascades to table test_select_list_partition drop cascades to table hw_partition_select_test drop cascades to table list_partitioned_table - - - - - + + + + + diff --git a/src/test/regress/expected/hw_partition_pruning_2.out b/src/test/regress/expected/hw_partition_pruning_2.out index 7f66a8ce2..d71dfa79b 100644 --- a/src/test/regress/expected/hw_partition_pruning_2.out +++ b/src/test/regress/expected/hw_partition_pruning_2.out @@ -542,7 +542,7 @@ SELECT * FROM pruning_partition_table_000 WHERE C_INT>10; (6 rows) drop table pruning_partition_table_000; -create table t_pruning_DTS2013091303739_1(c1 int,c2 text) +create table t_pruning_TESTTABLE_1(c1 int,c2 text) partition by range(c1) ( partition p1 values less than(100), @@ -550,31 +550,31 @@ partition by range(c1) partition p3 values less than(300) ); explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_1 where c1 IS NULL; +select * from t_pruning_TESTTABLE_1 where c1 IS NULL; QUERY PLAN ------------------------------------------------------------------ Streaming (type: GATHER) -> Partition Iterator Iterations: 0 - -> Partitioned Seq Scan on t_pruning_dts2013091303739_1 + -> Partitioned Seq Scan on t_pruning_testtable_1 Filter: (c1 IS NULL) Selected Partitions: NONE (6 rows) explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_1 where c1 IS NOT NULL; +select * from t_pruning_TESTTABLE_1 where c1 IS NOT NULL; QUERY PLAN ------------------------------------------------------------------ Streaming (type: GATHER) -> Partition Iterator Iterations: 3 - -> Partitioned Seq Scan on t_pruning_dts2013091303739_1 + -> Partitioned Seq Scan on t_pruning_testtable_1 Filter: (c1 IS NOT NULL) Selected Partitions: 1..3 (6 rows) explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_1 where c1=null; +select * from t_pruning_TESTTABLE_1 where c1=null; QUERY PLAN ------------------------------------------------------------------------ Streaming (type: GATHER) @@ -582,12 +582,12 @@ select * from t_pruning_DTS2013091303739_1 where c1=null; One-Time Filter: NULL::boolean -> Partition Iterator Iterations: 3 - -> Partitioned Seq Scan on t_pruning_dts2013091303739_1 + -> Partitioned Seq Scan on t_pruning_testtable_1 Selected Partitions: 1..3 (7 rows) explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_1 where c2=null; +select * from t_pruning_TESTTABLE_1 where c2=null; QUERY PLAN ------------------------------------------------------------------------ Streaming (type: GATHER) @@ -595,24 +595,24 @@ select * from t_pruning_DTS2013091303739_1 where c2=null; One-Time Filter: NULL::boolean -> Partition Iterator Iterations: 3 - -> Partitioned Seq Scan on t_pruning_dts2013091303739_1 + -> Partitioned Seq Scan on t_pruning_testtable_1 Selected Partitions: 1..3 (7 rows) explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_1 where c2 IS NULL; +select * from t_pruning_TESTTABLE_1 where c2 IS NULL; QUERY PLAN ------------------------------------------------------------------ Streaming (type: GATHER) -> Partition Iterator Iterations: 3 - -> Partitioned Seq Scan on t_pruning_dts2013091303739_1 + -> Partitioned Seq Scan on t_pruning_testtable_1 Filter: (c2 IS NULL) Selected Partitions: 1..3 (6 rows) -drop table t_pruning_DTS2013091303739_1; -create table t_pruning_DTS2013091303739_2(c1 int,c2 text) +drop table t_pruning_TESTTABLE_1; +create table t_pruning_TESTTABLE_2(c1 int,c2 text) partition by range(c1) ( partition p1 values less than(100), @@ -621,31 +621,31 @@ partition by range(c1) partition p4 values less than(MAXVALUE) ); explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where c1 IS NULL; +select * from t_pruning_TESTTABLE_2 where c1 IS NULL; QUERY PLAN ------------------------------------------------------------------ Streaming (type: GATHER) -> Partition Iterator Iterations: 1 - -> Partitioned Seq Scan on t_pruning_dts2013091303739_2 + -> Partitioned Seq Scan on t_pruning_testtable_2 Filter: (c1 IS NULL) Selected Partitions: 4 (6 rows) explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where c1 IS NOT NULL; +select * from t_pruning_TESTTABLE_2 where c1 IS NOT NULL; QUERY PLAN ------------------------------------------------------------------ Streaming (type: GATHER) -> Partition Iterator Iterations: 4 - -> Partitioned Seq Scan on t_pruning_dts2013091303739_2 + -> Partitioned Seq Scan on t_pruning_testtable_2 Filter: (c1 IS NOT NULL) Selected Partitions: 1..4 (6 rows) explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where c1=null; +select * from t_pruning_TESTTABLE_2 where c1=null; QUERY PLAN ------------------------------------------------------------------------ Streaming (type: GATHER) @@ -653,12 +653,12 @@ select * from t_pruning_DTS2013091303739_2 where c1=null; One-Time Filter: NULL::boolean -> Partition Iterator Iterations: 4 - -> Partitioned Seq Scan on t_pruning_dts2013091303739_2 + -> Partitioned Seq Scan on t_pruning_testtable_2 Selected Partitions: 1..4 (7 rows) explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where null=c1; +select * from t_pruning_TESTTABLE_2 where null=c1; QUERY PLAN ------------------------------------------------------------------------ Streaming (type: GATHER) @@ -666,12 +666,12 @@ select * from t_pruning_DTS2013091303739_2 where null=c1; One-Time Filter: NULL::boolean -> Partition Iterator Iterations: 4 - -> Partitioned Seq Scan on t_pruning_dts2013091303739_2 + -> Partitioned Seq Scan on t_pruning_testtable_2 Selected Partitions: 1..4 (7 rows) explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where c2=null; +select * from t_pruning_TESTTABLE_2 where c2=null; QUERY PLAN ------------------------------------------------------------------------ Streaming (type: GATHER) @@ -679,47 +679,47 @@ select * from t_pruning_DTS2013091303739_2 where c2=null; One-Time Filter: NULL::boolean -> Partition Iterator Iterations: 4 - -> Partitioned Seq Scan on t_pruning_dts2013091303739_2 + -> Partitioned Seq Scan on t_pruning_testtable_2 Selected Partitions: 1..4 (7 rows) explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where c2 IS NULL; +select * from t_pruning_TESTTABLE_2 where c2 IS NULL; QUERY PLAN ------------------------------------------------------------------ Streaming (type: GATHER) -> Partition Iterator Iterations: 4 - -> Partitioned Seq Scan on t_pruning_dts2013091303739_2 + -> Partitioned Seq Scan on t_pruning_testtable_2 Filter: (c2 IS NULL) Selected Partitions: 1..4 (6 rows) explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where c1 IS NULL and c1>150; +select * from t_pruning_TESTTABLE_2 where c1 IS NULL and c1>150; QUERY PLAN ------------------------------------------------------------------ Streaming (type: GATHER) -> Partition Iterator Iterations: 1 - -> Partitioned Seq Scan on t_pruning_dts2013091303739_2 + -> Partitioned Seq Scan on t_pruning_testtable_2 Filter: ((c1 IS NULL) AND (c1 > 150)) Selected Partitions: 4 (6 rows) explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where c1 IS NULL OR c1<150; +select * from t_pruning_TESTTABLE_2 where c1 IS NULL OR c1<150; QUERY PLAN ------------------------------------------------------------------ Streaming (type: GATHER) -> Partition Iterator Iterations: 3 - -> Partitioned Seq Scan on t_pruning_dts2013091303739_2 + -> Partitioned Seq Scan on t_pruning_testtable_2 Filter: ((c1 IS NULL) OR (c1 < 150)) Selected Partitions: 1..2,4 (6 rows) -drop table t_pruning_DTS2013091303739_2; +drop table t_pruning_TESTTABLE_2; -- where condition has function CREATE TABLE wb_swry(wybz varchar(46), yycsdm varchar(14), diff --git a/src/test/regress/expected/hw_partition_pruning_multikey_2.out b/src/test/regress/expected/hw_partition_pruning_multikey_2.out index 5906d818c..aa1871dd2 100644 --- a/src/test/regress/expected/hw_partition_pruning_multikey_2.out +++ b/src/test/regress/expected/hw_partition_pruning_multikey_2.out @@ -13,59 +13,59 @@ select * from range_table_LLT where a=2 and b>10 and b<20 and c=2 and d=2; (0 rows) drop table range_table_LLT; -create table rt_DTS2014042814100 (a int, b int) +create table rt_TESTTABLE (a int, b int) partition by range(a, b) ( -partition rt_DTS2014042814100_p1 values less than(9, 3), -partition rt_DTS2014042814100_p2 values less than(10, 1) +partition rt_TESTTABLE_p1 values less than(9, 3), +partition rt_TESTTABLE_p2 values less than(10, 1) ); -insert into rt_DTS2014042814100 values (9, 4); -select * from rt_DTS2014042814100 where b=4; +insert into rt_TESTTABLE values (9, 4); +select * from rt_TESTTABLE where b=4; a | b ---+--- 9 | 4 (1 row) -drop table rt_DTS2014042814100; -create table rt_DTS2014042814100 (a int, b int) +drop table rt_TESTTABLE; +create table rt_TESTTABLE (a int, b int) partition by range(a, b) ( -partition rt_DTS2014042814100_p1 values less than(9, 1), -partition rt_DTS2014042814100_p2 values less than(10, 3) +partition rt_TESTTABLE_p1 values less than(9, 1), +partition rt_TESTTABLE_p2 values less than(10, 3) ); -insert into rt_DTS2014042814100 values (9, 4); -select * from rt_DTS2014042814100 where b=4; +insert into rt_TESTTABLE values (9, 4); +select * from rt_TESTTABLE where b=4; a | b ---+--- 9 | 4 (1 row) -drop table rt_DTS2014042814100; -create table rt_DTS2014042814100 (a int, b int, c int) +drop table rt_TESTTABLE; +create table rt_TESTTABLE (a int, b int, c int) partition by range(a, b, c) ( -partition rt_DTS2014042814100_p1 values less than(2, 9, 3), -partition rt_DTS2014042814100_p2 values less than(2, 10, 1) +partition rt_TESTTABLE_p1 values less than(2, 9, 3), +partition rt_TESTTABLE_p2 values less than(2, 10, 1) ); -insert into rt_DTS2014042814100 values (2, 9, 4); -select * from rt_DTS2014042814100 where c=4; +insert into rt_TESTTABLE values (2, 9, 4); +select * from rt_TESTTABLE where c=4; a | b | c ---+---+--- 2 | 9 | 4 (1 row) -drop table rt_DTS2014042814100; -create table rt_DTS2014042814100 (a int, b int, c int) +drop table rt_TESTTABLE; +create table rt_TESTTABLE (a int, b int, c int) partition by range(a, b, c) ( -partition rt_DTS2014042814100_p1 values less than(2, 9, 1), -partition rt_DTS2014042814100_p2 values less than(2, 10, 3) +partition rt_TESTTABLE_p1 values less than(2, 9, 1), +partition rt_TESTTABLE_p2 values less than(2, 10, 3) ); -insert into rt_DTS2014042814100 values (2, 9, 4); -select * from rt_DTS2014042814100 where c=4; +insert into rt_TESTTABLE values (2, 9, 4); +select * from rt_TESTTABLE where c=4; a | b | c ---+---+--- 2 | 9 | 4 (1 row) -drop table rt_DTS2014042814100; +drop table rt_TESTTABLE; diff --git a/src/test/regress/expected/hw_partition_select0.out b/src/test/regress/expected/hw_partition_select0.out index d15781815..80b156958 100644 --- a/src/test/regress/expected/hw_partition_select0.out +++ b/src/test/regress/expected/hw_partition_select0.out @@ -25229,9 +25229,9 @@ EXPLAIN (COSTS OFF, NODES OFF) SELECT B FROM (SELECT B FROM HW_PARTITION_SELECT_ --EXPLAIN(COSTS OFF, NODES OFF) SELECT * FROM HW_PARTITION_SELECT_PTEST WHERE A = 5000 AND B > 20; --INDEXSCAN --EXPLAIN(COSTS OFF, NODES OFF) SELECT * FROM HW_PARTITION_SELECT_PTEST WHERE A = 500 OR A = 3000; --BITMAPSCAN --EXPLAIN(COSTS OFF, NODES OFF) SELECT A FROM HW_PARTITION_SELECT_PTEST WHERE A > 5000; -- INDEXONLYSCAN -CREATE TABLE DTS2013112504143_TEST1(A INT) PARTITION BY RANGE (A)(PARTITION DTS2013112504143_TEST1_P1 VALUES LESS THAN (10)); -CREATE TABLE DTS2013112504143_TEST2(A INT); -SELECT * FROM DTS2013112504143_TEST1 UNION ALL SELECT * FROM DTS2013112504143_TEST2 order by 1; +CREATE TABLE TESTTABLE_TEST1(A INT) PARTITION BY RANGE (A)(PARTITION TESTTABLE_TEST1_P1 VALUES LESS THAN (10)); +CREATE TABLE TESTTABLE_TEST2(A INT); +SELECT * FROM TESTTABLE_TEST1 UNION ALL SELECT * FROM TESTTABLE_TEST2 order by 1; a --- (0 rows) @@ -25501,8 +25501,8 @@ drop cascades to view select_partition_table_view_000_4 drop cascades to table partition_wise_join_table_001_1 drop cascades to table partition_wise_join_table_001_2 drop cascades to table hw_partition_select_rt -drop cascades to table dts2013112504143_test1 -drop cascades to table dts2013112504143_test2 +drop cascades to table testtable_test1 +drop cascades to table testtable_test2 drop cascades to table hw_partition_select_rangetab drop cascades to table select_partition_table_000_2 drop cascades to view select_partition_table_view_000_2 diff --git a/src/test/regress/expected/hw_pbe.out b/src/test/regress/expected/hw_pbe.out index c590f3551..165511962 100644 --- a/src/test/regress/expected/hw_pbe.out +++ b/src/test/regress/expected/hw_pbe.out @@ -3536,9 +3536,9 @@ DEALLOCATE PREPARE pa; drop table pbe_prunning_tmp; drop table pbe_prunning_000; SET enable_pbe_optimization to false; -create table t_DTS2017082304009 (id int,name text); -insert into t_DTS2017082304009 values (1,'a'),(2,'b'),(3,'c'), (4,'d'),(5,'e'); -prepare a as select * from t_DTS2017082304009 where id=$1; +create table t_TESTTABLE (id int,name text); +insert into t_TESTTABLE values (1,'a'),(2,'b'),(3,'c'), (4,'d'),(5,'e'); +prepare a as select * from t_TESTTABLE where id=$1; execute a (1); id | name ----+------ @@ -3576,7 +3576,7 @@ execute a (1); (1 row) deallocate a; -drop table t_DTS2017082304009; +drop table t_TESTTABLE; -- Bugfix for FQS multi-table join create table t1(id1 int, id2 int, num int); insert into t1 values (1,11,11), (2,21,21), (3,31,31), (4,41,41), (5,51,51); @@ -4090,18 +4090,18 @@ deallocate s0; drop table t1_xc_fqs; drop table t2_xc_fqs; set enable_pbe_optimization to true; -create table DTS2018081308755_t1 (id int, num int); -insert into DTS2018081308755_t1 values (1,1),(20,20); -explain analyze create table DTS2018081308755_t2 as select * from DTS2018081308755_t1; +create table TESTTABLE_t1 (id int, num int); +insert into TESTTABLE_t1 values (1,1),(20,20); +explain analyze create table TESTTABLE_t2 as select * from TESTTABLE_t1; QUERY PLAN ----------------------------------------------------------------------------------------------------------------------- ---?Insert on dts2018081308755_t2 (cost=0.00..31.49 rows=2149 width=8) (actual time=.* rows=2 loops=1) ---? -> Seq Scan on dts2018081308755_t1 (cost=0.00..31.49 rows=2149 width=8) (actual time=.* rows=2 loops=1) +--?Insert on testtable_t2 (cost=0.00..31.49 rows=2149 width=8) (actual time=.* rows=2 loops=1) +--? -> Seq Scan on testtable_t1 (cost=0.00..31.49 rows=2149 width=8) (actual time=.* rows=2 loops=1) --? Total runtime: .* ms (3 rows) -drop table DTS2018081308755_t2; -drop table DTS2018081308755_t1; +drop table TESTTABLE_t2; +drop table TESTTABLE_t1; create table cs_his(segment1 varchar(15), period_name varchar(15), currency_code varchar(15), frequency_code varchar(15), segment3 varchar(15), segment3_desc varchar(200), end_balance_dr numeric, end_balance_cr numeric) with (orientation=column); create table bs_his(row_desc varchar(800),row_id numeric,amount1 numeric, amount2 numeric, period_name varchar(80), frequency_code varchar(80), currency_code varchar(80), segment1 varchar(80)) with (orientation=column); insert into cs_his values('11','20190227','22','33','44','55',1.3,3.5); diff --git a/src/test/regress/expected/hw_pct_type_and_rowtype.out b/src/test/regress/expected/hw_pct_type_and_rowtype.out new file mode 100644 index 000000000..38ac66de9 --- /dev/null +++ b/src/test/regress/expected/hw_pct_type_and_rowtype.out @@ -0,0 +1,961 @@ +--For Composite Type with %TYPE and %ROWTYPE +-- check compatibility -- +show sql_compatibility; -- expect ORA -- + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists sql_compositetype; +NOTICE: schema "sql_compositetype" does not exist, skipping +create schema sql_compositetype; +set current_schema = sql_compositetype; +-- initialize table and type-- +create type ctype1 as (a int, b int); +create table foo(a int, b ctype1); +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- +--general create type with %TYPE and %ROWTYPE +create type ctype2 as (a foo.a%TYPE, b foo.b%TYPE); +NOTICE: type reference foo.a%TYPE converted to integer +NOTICE: type reference foo.b%TYPE converted to ctype1 +create type ctype3 as (a foo%ROWTYPE); +--create type with schema of table +create type ctype4 as (a sql_compositetype.foo.a%TYPE, b sql_compositetype.foo.b%TYPE); +NOTICE: type reference sql_compositetype.foo.a%TYPE converted to integer +NOTICE: type reference sql_compositetype.foo.b%TYPE converted to ctype1 +create type ctype5 as (a sql_compositetype.foo%ROWTYPE); +--create type with database and schema of table +create type ctype6 as (a regression.sql_compositetype.foo.a%TYPE, b regression.sql_compositetype.foo.b%TYPE); +NOTICE: type reference regression.sql_compositetype.foo.a%TYPE converted to integer +NOTICE: type reference regression.sql_compositetype.foo.b%TYPE converted to ctype1 +create type ctype7 as (a regression.sql_compositetype.foo%ROWTYPE); +--ERROR: %TYPE with table is not allowed +create type ctype8 as (a foo%TYPE); +ERROR: improper %TYPE reference (too few dotted names): foo +create type ctype9 as (a sql_compositetype.foo%TYPE); +ERROR: relation "sql_compositetype" does not exist +create type ctype10 as (a regression.sql_compositetype.foo%TYPE); +ERROR: schema "regression" does not exist +--ERROR: %ROWTYPE with attribute is not allowed +create type ctype11 as (a foo.a%ROWTYPE, b foo.b%ROWTYPE); +ERROR: schema "foo" does not exist +create type ctype12 as (a sql_compositetype.foo.a%ROWTYPE, b sql_compositetype.foo.b%ROWTYPE); +ERROR: cross-database references are not implemented: "sql_compositetype.foo.a" +create type ctype13 as (a regression.sql_compositetype.foo.a%ROWTYPE, b regression.sql_compositetype.foo.b%ROWTYPE); +ERROR: improper %ROWTYPE reference +DETAIL: improper %ROWTYPE reference (too many dotted names): regression.sql_compositetype.foo.a +--ERROR: %TYPE and %ROWTYPE with type is not allowed +create type ctype14 as (a ctype1%TYPE); +ERROR: improper %TYPE reference (too few dotted names): ctype1 +create type ctype15 as (a ctype1%ROWTYPE); +ERROR: relation does not exist when parse word. +DETAIL: relation "ctype1" referenced by %ROWTYPE does not exist. +--ERROR: %ROWTYPE with incorrect database or schema is not allowed +create type ctype16 as (a postgres.sql_compositetype.foo%ROWTYPE, b postgres.sql_compositetype.foo%ROWTYPE); +ERROR: cross-database references are not implemented: "postgres.sql_compositetype.foo" +create type ctype16 as (a regression.sql.foo%ROWTYPE, b regression.sql.foo%ROWTYPE); +ERROR: schema "sql" does not exist +create type ctype16 as (a sql.foo%ROWTYPE, b sql.foo%ROWTYPE); +ERROR: schema "sql" does not exist +--ERROR: %ROWTYPE with more than 4 dots is not allowed +create type ctype16 as (a regression.sql_compositetype.foo.a%ROWTYPE, b regression.sql_compositetype.foo.b%ROWTYPE); +ERROR: improper %ROWTYPE reference +DETAIL: improper %ROWTYPE reference (too many dotted names): regression.sql_compositetype.foo.a +create type ctype16 as (a postgres.regression.sql_compositetype.foo.a%ROWTYPE, b postgres.regression.sql_compositetype.foo.b%ROWTYPE); +ERROR: improper %ROWTYPE reference +DETAIL: improper %ROWTYPE reference (too many dotted names): postgres.regression.sql_compositetype.foo.a +--test select stmt for %TYPE and %ROWTYPE +create table t1(a int , b int); +create table t2(a int, b t1); +insert into t2 values(1,(2,3)); +drop function if exists get_t2; +NOTICE: function get_t2() does not exist, skipping +create or replace function get_t2() RETURNS record as $$ +declare + v_a record; +begin + select * into v_a from t2; + return v_a; +end; +$$ language plpgsql; +select t.b from get_t2() as t (a t2.a%TYPE, b t1%ROWtype); +NOTICE: type reference t2.a%TYPE converted to integer + b +------- + (2,3) +(1 row) + +--test update stmt for %TYPE and %ROWTYPE +update t2 SET a = t.a + 1 from get_t2() as t (a t2.a%TYPE, b t1%ROWtype); +NOTICE: type reference t2.a%TYPE converted to integer +select * from t2; + a | b +---+------- + 2 | (2,3) +(1 row) + +--test alter type for %TYPE and %ROWTYPE +ALTER TYPE ctype2 ADD ATTRIBUTE c foo.a%TYPE; +NOTICE: type reference foo.a%TYPE converted to integer +NOTICE: type reference foo.a%TYPE converted to integer +ALTER TYPE ctype2 ADD ATTRIBUTE d foo%ROWTYPE; +--test drop function for %TYPE and %ROWTYPE +create or replace function get_int(i int) RETURNS int as $$ +begin + return i.a; +end; +$$ language plpgsql; +drop function get_int(i foo.b%TYPE); --should fail +NOTICE: type reference foo.b%TYPE converted to ctype1 +ERROR: function get_int(ctype1) does not exist +drop function get_int(i foo%ROWTYPE); --should fail +ERROR: function get_int(foo) does not exist +drop function get_int(foo.a%TYPE); --should success +NOTICE: type reference foo.a%TYPE converted to integer +create or replace function get_int(i foo%ROWTYPE) RETURNS int as $$ +begin + return i.a; +end; +$$ language plpgsql; +drop function get_int(i foo.b%TYPE); --should fail +NOTICE: type reference foo.b%TYPE converted to ctype1 +ERROR: function get_int(ctype1) does not exist +drop function get_int(i t2%ROWTYPE); --should fail +ERROR: function get_int(t2) does not exist +drop function get_int(i foo%ROWTYPE); --should success +--test typmod whether reversed by %TYPE and %ROWTYPE +create table v1(a int, b varchar(2)); +create type ctype17 as (a v1.b%TYPE[],b v1%ROWTYPE); +NOTICE: type reference v1.b%TYPE[] converted to character varying[] +create table v2(a int, b ctype17); +insert into v2 values(1, (array['aa','bb'], (2,'cc'))); +select * from v2; + a | b +---+---------------------- + 1 | ("{aa,bb}","(2,cc)") +(1 row) + +insert into v2 values(1, (array['aaa','bb'], (2,'cc'))); +ERROR: value too long for type character varying(2) +CONTEXT: referenced column: b +insert into v2 values(1, (array['aa','bb'], (2,'ccc'))); +ERROR: value too long for type character varying(2) +CONTEXT: referenced column: b +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- +drop table if exists v2; +drop type if exists ctype17; +drop table if exists v1; +drop function if exists get_t2; +drop table if exists t2; +drop table if exists t1; +drop type if exists ctype16; +NOTICE: type "ctype16" does not exist, skipping +drop type if exists ctype15; +NOTICE: type "ctype15" does not exist, skipping +drop type if exists ctype14; +NOTICE: type "ctype14" does not exist, skipping +drop type if exists ctype13; +NOTICE: type "ctype13" does not exist, skipping +drop type if exists ctype12; +NOTICE: type "ctype12" does not exist, skipping +drop type if exists ctype11; +NOTICE: type "ctype11" does not exist, skipping +drop type if exists ctype10; +NOTICE: type "ctype10" does not exist, skipping +drop type if exists ctype9; +NOTICE: type "ctype9" does not exist, skipping +drop type if exists ctype8; +NOTICE: type "ctype8" does not exist, skipping +drop type if exists ctype7; +drop type if exists ctype6; +drop type if exists ctype5; +drop type if exists ctype4; +drop type if exists ctype3; +drop type if exists ctype2; +drop table if exists foo; +drop type if exists ctype1; +-- clean up -- +drop schema if exists sql_compositetype cascade; +--For Function Return Type with %TYPE, %ROWTYPE and [] +-- check compatibility -- +show sql_compatibility; -- expect ORA -- + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists sql_functionreturn; +NOTICE: schema "sql_functionreturn" does not exist, skipping +create schema sql_functionreturn; +set current_schema = sql_functionreturn; +-- initialize table and type-- +create type ctype1 as (a int, b int); +create table foo(a int, b ctype1); +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- +--test function return type for %TYPE with composite type +create or replace function get_ctype1 RETURNS foo.b%TYPE as $$ +declare + v_a ctype1; + begin + v_a := (1,2); + return v_a; +end; +$$ language plpgsql; +NOTICE: type reference foo.b%TYPE converted to ctype1 +select get_ctype1(); + get_ctype1 +------------ + (1,2) +(1 row) + +--test function return type for %TYPE with simple type +create or replace function get_int RETURNS foo.a%TYPE as $$ +declare + v_a int; +begin + v_a := 1; + return v_a; +end; +$$ language plpgsql; +NOTICE: type reference foo.a%TYPE converted to integer +select get_int(); + get_int +--------- + 1 +(1 row) + +--test function return type for %TYPE[] with simple type +create or replace function get_intarray RETURNS foo.a%TYPE[] as $$ +declare + type arr is VARRAY(10) of int; + v_a arr := arr(); +begin + v_a.extend(1); + v_a(1) := 1; + v_a.extend(1); + v_a(2) := 2; + return v_a; +end; +$$ language plpgsql; +NOTICE: type reference foo.a%TYPE[] converted to integer[] +select get_intarray(); + get_intarray +-------------- + {1,2} +(1 row) + +--test function return type for %TYPE[] with composite type +create or replace function get_ctype1array RETURNS foo.b%TYPE[] as $$ +declare + type arr is VARRAY(10) of ctype1; + v_a arr := arr(); +begin + v_a.extend(1); + v_a(1) := (1,2); + v_a.extend(1); + v_a(2) := (3,4); + return v_a; +end; +$$ language plpgsql; +NOTICE: type reference foo.b%TYPE[] converted to ctype1[] +select get_ctype1array(); + get_ctype1array +------------------- + {"(1,2)","(3,4)"} +(1 row) + +--test function return type for %ROWTYPE +create or replace function get_foo RETURNS foo%ROWTYPE as $$ +declare + v_a foo; +begin + v_a := (1,(2,3)); +return v_a; +end; +$$ language plpgsql; +select get_foo(); + get_foo +------------- + (1,"(2,3)") +(1 row) + +--test function return type for %ROWTYPE[] +create or replace function get_fooarray RETURNS foo%ROWTYPE[] as $$ +declare + type arr is VARRAY(10) of foo; + v_a arr := arr(); +begin + v_a.extend(1); + v_a(1) := (1,(2,3)); + v_a.extend(1); + v_a(2) := (4,(5,6)); + return v_a; +end; +$$ language plpgsql; +select get_fooarray(); + get_fooarray +----------------------------------- + {"(1,\"(2,3)\")","(4,\"(5,6)\")"} +(1 row) + +--test function return type for SETOF %TYPE[] with simple type +create or replace function get_set_intarray RETURNS SETOF foo.a%TYPE[] as $$ +declare + type arr is VARRAY(10) of int; + v_a arr := arr(); +begin + v_a.extend(1); + v_a(1) := 1; + RETURN NEXT v_a; + v_a.extend(1); + v_a(2) := 2; + RETURN NEXT v_a; + return; +end; +$$ language plpgsql; +NOTICE: type reference foo.a%TYPE[] converted to integer[] +select get_set_intarray(); + get_set_intarray +------------------ + {1} + {1,2} +(2 rows) + +--test %TYPE for variable +create or replace function f1(ss in int) return int as + va foo%ROWTYPE; + vb va.b%TYPE; + vc va.a%TYPE; +begin + va := (1, (2, 3)); + vb := (4, 5); + vc := 6; + raise info '% % %',va , vb, vc; + vb.a := vc; + va.b := vb; + va.a := vc; + raise info '% % %',va , vb, vc; + return va.a; +end; +/ +select f1(1); +INFO: (1,"(2,3)") (4,5) 6 +CONTEXT: referenced column: f1 +INFO: (6,"(6,5)") (6,5) 6 +CONTEXT: referenced column: f1 + f1 +---- + 6 +(1 row) + +--ERROR: test %TYPE for variable, not existed field +create or replace function f1(ss in int) return int as + va foo%ROWTYPE; + vb va.b%TYPE; + vc va.c%TYPE; +begin + va := (1, (2, 3)); + vb := (4, 5); + vc := 6; + raise info '% % %',va , vb, vc; + vb.a := vc; + va.b := vb; + va.a := vc; + raise info '% % %',va , vb, vc; + return va.a; +end; +/ +ERROR: invalid type name "va.c%TYPE" +LINE 3: vc va.c%TYPE; + ^ +QUERY: DECLARE va foo%ROWTYPE; + vb va.b%TYPE; + vc va.c%TYPE; +begin + va := (1, (2, 3)); + vb := (4, 5); + vc := 6; + raise info '% % %',va , vb, vc; + vb.a := vc; + va.b := vb; + va.a := vc; + raise info '% % %',va , vb, vc; + return va.a; +end +DROP function f1(); +--test synonym type +DROP SCHEMA IF EXISTS sql_compositetype_test; +NOTICE: schema "sql_compositetype_test" does not exist, skipping +CREATE SCHEMA sql_compositetype_test; +CREATE TABLE sql_compositetype_test.tabfoo(a int, b int); +CREATE TYPE sql_compositetype_test.compfoo AS (f1 int, f2 text); +CREATE OR REPLACE SYNONYM tabfoo for sql_compositetype_test.tabfoo; +CREATE OR REPLACE SYNONYM compfoo for sql_compositetype_test.compfoo; +create table t1 (a int, b compfoo); +CREATE OR REPLACE PROCEDURE pro_test_tab (in_tabfoo tabfoo%rowtype) +AS +BEGIN +END; +/ +--防止权限问题,将用户建成sysadmin +create user synonym_user_1 password 'hauwei@123' sysadmin; +create user synonym_user_2 password 'hauwei@123' sysadmin; +--授予public权限 +grant all privileges on schema sql_functionreturn to synonym_user_1; +grant all privileges on schema sql_functionreturn to synonym_user_2; +--创建测试表 +drop table if exists synonym_user_1.tb_test; +NOTICE: table "tb_test" does not exist, skipping +create table synonym_user_1.tb_test(col1 int,col2 int); +--创建同义词 +create or replace synonym sql_functionreturn.tb_test for synonym_user_1.tb_test; +create or replace synonym synonym_user_2.tb_test for synonym_user_1.tb_test; +--创建测试package +create or replace package synonym_user_2.pckg_test as +v_a tb_test.col1%type; +v_b tb_test%rowtype; +procedure proc_test(i_col1 in tb_test.col1%type,o_ret out tb_test.col1%type); +function func_test(i_col1 in int) return tb_test.col1%type; +function func_test1(i_col1 in int) return tb_test%rowtype; +end pckg_test; +/ +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +create or replace package body synonym_user_2.pckg_test as +procedure proc_test(i_col1 in tb_test.col1%type,o_ret out tb_test.col1%type)as +begin +select col1 into o_ret from tb_test where col1=i_col1; +end; +function func_test(i_col1 in int) return tb_test.col1%type as +begin +select col1 into v_a from tb_test where col1=i_col1; +return v_a; +end; +function func_test1(i_col1 in int) return tb_test%rowtype as +begin +for rec in (select col1,col2 from tb_test where col1=i_col1) loop +v_b.col1:=rec.col1; +v_b.col2:=rec.col2; +end loop; +return v_b; +end; +end pckg_test; +/ +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +DROP PACKAGE synonym_user_2.pckg_test; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function synonym_user_2.proc_test(integer) +drop cascades to function synonym_user_2.func_test(integer) +drop cascades to function synonym_user_2.func_test1(integer) +DROP USER synonym_user_2 CASCADE; +NOTICE: drop cascades to synonym synonym_user_2.tb_test +DROP USER synonym_user_1 CASCADE; +--test public synonym in PLpgSQL +set behavior_compat_options= 'bind_procedure_searchpath'; +--防止权限问题,将用户建成sysadmin +create user synonym_user_1 password 'Gauss_234' sysadmin; +create user synonym_user_2 password 'Gauss_234' sysadmin; +--授予public权限 +grant all privileges on schema public to synonym_user_1; +grant all privileges on schema public to synonym_user_2; +--创建测试表 +drop table if exists synonym_user_1.tb_test; +NOTICE: table "tb_test" does not exist, skipping +create table synonym_user_1.tb_test(col1 int,col2 int); +--创建同义词 +create or replace synonym public.tb_test for synonym_user_1.tb_test; +create or replace package synonym_user_2.pckg_test as +v_a tb_test.col1%type; +v_b tb_test%rowtype; +procedure proc_test(i_col1 in tb_test.col1%type,o_ret out tb_test.col1%type); +function func_test(i_col1 in int) return tb_test.col1%type; +function func_test1(i_col1 in int) return tb_test%rowtype; +end pckg_test; +/ +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +create or replace package body synonym_user_2.pckg_test as +procedure proc_test(i_col1 in tb_test.col1%type,o_ret out tb_test.col1%type)as +begin +select col1 into o_ret from tb_test where col1=i_col1; +end; +function func_test(i_col1 in int) return tb_test.col1%type as +begin +select col1 into v_a from tb_test where col1=i_col1; +return v_a; +end; +function func_test1(i_col1 in int) return tb_test%rowtype as +begin +for rec in (select col1,col2 from tb_test where col1=i_col1) loop +v_b.col1:=rec.col1; +v_b.col2:=rec.col2; +end loop; +return v_b; +end; +end pckg_test; +/ +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +NOTICE: type reference tb_test.col1%TYPE converted to integer +DROP PACKAGE synonym_user_2.pckg_test; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function synonym_user_2.proc_test(integer) +drop cascades to function synonym_user_2.func_test(integer) +drop cascades to function synonym_user_2.func_test1(integer) +DROP USER synonym_user_2 CASCADE; +DROP USER synonym_user_1 CASCADE; +reset behavior_compat_options; +--test 同义词引用存储过程和package +set behavior_compat_options= 'bind_procedure_searchpath'; +--防止权限问题,将用户建成sysadmin +create user synonym_user_1 password 'Gauss_234' sysadmin; +create user synonym_user_2 password 'Gauss_234' sysadmin; +--授予public权限 +grant all privileges on schema public to synonym_user_1; +grant all privileges on schema public to synonym_user_2; +set current_schema = public; +--创建测试存储过程 +create or replace procedure synonym_user_1.proc_test()as +begin +raise info 'test procedure'; +end; +/ +--创建测试package +create or replace package synonym_user_1.pckg_test1 as +procedure proc_test2(); +end pckg_test1; +/ +create or replace package body synonym_user_1.pckg_test1 as +procedure proc_test2()as +begin +raise info 'test package procedure'; +end; +end pckg_test1; +/ +--创建同义词 +create or replace synonym public.proc_test for synonym_user_1.proc_test; +create or replace synonym public.pckg_test1 for synonym_user_1.pckg_test1; +show search_path; + search_path +------------- + public +(1 row) + +--创建测试package +create or replace package synonym_user_2.pckg_test as +procedure proc_test1(); +end pckg_test; +/ +create or replace package body synonym_user_2.pckg_test as +procedure proc_test1()as + +begin +proc_test(); +pckg_test1.proc_test2(); +end; +end pckg_test; +/ +call synonym_user_2.pckg_test.proc_test1(); +INFO: test procedure +CONTEXT: SQL statement "CALL proc_test()" +PL/pgSQL function proc_test1() line 3 at PERFORM +INFO: test package procedure +CONTEXT: SQL statement "CALL pckg_test1.proc_test2()" +PL/pgSQL function proc_test1() line 4 at PERFORM + proc_test1 +------------ + +(1 row) + +DROP PACKAGE synonym_user_2.pckg_test; +NOTICE: drop cascades to function synonym_user_2.proc_test1() +DROP PACKAGE synonym_user_1.pckg_test1; +NOTICE: drop cascades to function synonym_user_1.proc_test2() +DROP PROCEDURE synonym_user_1.proc_test(); +DROP USER synonym_user_2 CASCADE; +DROP USER synonym_user_1 CASCADE; +reset behavior_compat_options; +--test Package return record.col%TYPE +create schema synonym_schema1; +create schema synonym_schema2; +set search_path = synonym_schema1; +--测试packgae内部引用 +create or replace package p_test1 as + type t1 is record(c1 varchar2, c2 int); + function f1(ss in t1) return t1.c2%TYPE; +end p_test1; +/ +NOTICE: type reference t1.c2%TYPE converted to integer +create or replace package body p_test1 as + function f1(ss in t1) return t1.c2%TYPE as + begin + return ss.c2; + end; +end p_test1; +/ +NOTICE: type reference t1.c2%TYPE converted to integer +NOTICE: type reference t1.c2%TYPE converted to integer +select p_test1.f1(('aa',5)); + f1 +---- + 5 +(1 row) + +--测试跨packgae引用 +create or replace package p_test2 as + function ff1(ss in p_test1.t1) return p_test1.t1.c2%TYPE; + va p_test1.t1.c2%TYPE; +end p_test2; +/ +NOTICE: type reference p_test1.t1.c2%TYPE converted to integer +create or replace package body p_test2 as + vb p_test1.t1.c2%TYPE; + function ff1(ss in p_test1.t1) return p_test1.t1.c2%TYPE as + begin + return ss.c2; + end; +end p_test2; +/ +NOTICE: type reference p_test1.t1.c2%TYPE converted to integer +NOTICE: type reference p_test1.t1.c2%TYPE converted to integer +select p_test2.ff1(('aa',55)); + ff1 +----- + 55 +(1 row) + +--测试跨schema packgae引用 +set search_path = synonym_schema2; +create or replace package p_test2 as + function fff1(ss in synonym_schema1.p_test1.t1) return synonym_schema1.p_test1.t1.c2%TYPE; +end p_test2; +/ +NOTICE: type reference synonym_schema1.p_test1.t1.c2%TYPE converted to integer +create or replace package body p_test2 as + function fff1(ss in synonym_schema1.p_test1.t1) return synonym_schema1.p_test1.t1.c2%TYPE as + begin + return ss.c2; + end; +end p_test2; +/ +NOTICE: type reference synonym_schema1.p_test1.t1.c2%TYPE converted to integer +NOTICE: type reference synonym_schema1.p_test1.t1.c2%TYPE converted to integer +select p_test2.fff1(('aa',555)); + fff1 +------ + 555 +(1 row) + +DROP PACKAGE p_test2; +--?.* +DROP PACKAGE synonym_schema1.p_test2; +--?.* +DROP PACKAGE synonym_schema1.p_test1; +--?.* +DROP SCHEMA synonym_schema2 CASCADE; +DROP SCHEMA synonym_schema1 CASCADE; +set current_schema = sql_functionreturn; +--test pkg.val%TYPE +create schema synonym_schema1; +create schema synonym_schema2; +set search_path = synonym_schema1; +create or replace package pck1 is +va int; +end pck1; +/ +create or replace package body pck1 as + function f1(ss in int) return int as + begin + return ss; + end; +end pck1; +/ +--测试跨packgae引用 +create or replace package p_test2 as + va pck1.va%TYPE; + procedure p1 (a pck1.va%TYPE); +end p_test2; +/ +create or replace package body p_test2 as + procedure p1 (a pck1.va%TYPE) as + begin + NULL; + end; +end p_test2; +/ +--测试跨schema packgae引用 +set search_path = synonym_schema2; +create or replace package p_test2 as + va synonym_schema1.pck1.va%TYPE; + procedure p1 (a synonym_schema1.pck1.va%TYPE); +end p_test2; +/ +create or replace package body p_test2 as + procedure p1 (a synonym_schema1.pck1.va%TYPE) as + begin + NULL; + end; +end p_test2; +/ +DROP PACKAGE p_test2; +NOTICE: drop cascades to function synonym_schema2.p1(integer) +DROP PACKAGE synonym_schema1.p_test2; +NOTICE: drop cascades to function synonym_schema1.p1(integer) +DROP PACKAGE synonym_schema1.pck1; +NOTICE: drop cascades to function synonym_schema1.f1(integer) +DROP SCHEMA synonym_schema2 CASCADE; +DROP SCHEMA synonym_schema1 CASCADE; +set current_schema = sql_functionreturn; +--test keyword table name used by keyword.col%TYPE +DROP TABLE if EXISTS type; +NOTICE: table "type" does not exist, skipping +CREATE TABLE type(a int, b int); +create or replace package p_test1 as + type r1 is record(c1 type%ROWTYPE, c2 int); + procedure p1 (a in type%ROWTYPE); +end p_test1; +/ +create or replace procedure func1 as + va type%rowtype; + type r1 is record(c1 type%rowtype,c2 varchar2(20)); +begin + va := 'a'; + raise info '%',va; + va := 'b'; + raise info '%',va; +end; +/ +DROP PROCEDURE func1; +DROP PACKAGE p_test1; +NOTICE: drop cascades to function sql_functionreturn.p1(type) +DROP TABLE type; +--test row type default value +CREATE TABLE tb_test(a int, b int); +create or replace package pckg_test as +procedure proc_test(i_col1 in tb_test); +end pckg_test; +/ +create or replace package body pckg_test as +procedure proc_test(i_col1 in tb_test)as +v_idx tb_test%rowtype:=i_col1; +v_idx2 tb_test%rowtype; +begin +raise info '%', v_idx; +raise info '%', v_idx2; +end; +end pckg_test; +/ +call pckg_test.proc_test((11,22)); +INFO: (11,22) +INFO: (,) + proc_test +----------- + +(1 row) + +drop package pckg_test; +NOTICE: drop cascades to function sql_functionreturn.proc_test(tb_test) +drop table tb_test; +-- test record type default value +create or replace procedure p1 is +type r1 is record (a int :=1,b int :=2); +va r1 := (2,3); +begin +raise info '%', va; +end; +/ +call p1(); +INFO: (2,3) + p1 +---- + +(1 row) + +create or replace procedure p1 is +type r1 is record (a int :=1,b int :=2); +va r1 := NULL; +begin +raise info '%', va; +end; +/ +call p1(); +INFO: (,) + p1 +---- + +(1 row) + +DROP procedure p1; +--test record type default value in package +create or replace package pck1 is +type t1 is record(c1 int := 1, c2 int := 2); +va t1 := (4,5); +end pck1; +/ +declare +begin +raise info '%',pck1.va; +end; +/ +INFO: (4,5) +DROP package pck1; +-- test rowVar%TYPE +-- (1) in procedure +create table test1 (a int , b int); +create or replace procedure p1() is +va test1%ROWTYPE; +vb va%ROWTYPE; +begin +vb := (1,2); +raise info '%',vb; +end; +/ +call p1(); +INFO: (1,2) + p1 +---- + +(1 row) + +-- (1) record var%TYPE, should error +create or replace procedure p1() is +TYPE r1 is record (a int, b int); +va r1; +vb va%ROWTYPE; +begin +vb := (1,2); +raise info '%',vb; +end; +/ +ERROR: relation "va" does not exist when parse word. +CONTEXT: compilation of PL/pgSQL function "p1" near line 4 +-- (2) in package +create or replace package pck1 is +va test1%ROWTYPE; +vb va%ROWTYPE; +end pck1; +/ +drop package pck1; +-- (2) record var%TYPE, should error +create or replace package pck1 is +TYPE r1 is record (a varchar(10), b int); +va r1; +vb va%ROWTYPE; +end pck1; +/ +ERROR: relation "va" does not exist when parse word. +CONTEXT: compilation of PL/pgSQL package near line 4 +drop package pck1; +ERROR: package pck1 does not exist +-- (3) across package +create or replace package pck1 is +va test1%ROWTYPE; +end pck1; +/ +create or replace package pck2 is +va pck1.va%ROWTYPE := (2,3); +end pck2; +/ +declare +begin +raise info '%', pck2.va; +end; +/ +INFO: (2,3) +drop package pck2; +drop package pck1; +-- (3) record var%TYPE, should error +create or replace package pck1 is +TYPE r1 is record (a varchar(10), b int); +va r1; +end pck1; +/ +create or replace package pck2 is +va pck1.va%ROWTYPE := (2,3); +end pck2; +/ +ERROR: schema "pck1" does not exist +CONTEXT: compilation of PL/pgSQL package near line 2 +DROP PACKAGE pck2; +ERROR: package pck2 does not exist +DROP PACKAGE pck1; +DROP PROCEDURE p1(); +DROP TABLE test1; +-- test array var%TYPE +create or replace procedure p1() is +type r1 is varray(10) of int; +va r1; +vb va%TYPE; +begin +vb(1) := 1; +raise info '%',vb; +end; +/ +call p1(); +INFO: {1} + p1 +---- + +(1 row) + +create or replace procedure p1() is +type r1 is table of int index by varchar2(10); +va r1; +vb va%TYPE; +begin +vb('aaa') := 1; +raise info '%',vb; +end; +/ +call p1(); +INFO: {1} + p1 +---- + +(1 row) + +DROP PROCEDURE p1; +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- +drop procedure if exists pro_test_tab; +drop table if exists t1; +drop synonym if exists compfoo; +drop synonym if exists tabfoo; +drop type if exists sql_compositetype_test.compfoo; +drop table if exists sql_compositetype_test.tabfoo; +drop function if exists get_set_intarray; +drop function if exists get_fooarray; +drop function if exists get_foo; +drop function if exists get_ctype1array; +drop function if exists get_intarray; +drop function if exists get_int; +drop function if exists get_ctype1; +drop table if exists foo; +drop type if exists ctype1; +-- clean up -- +drop schema if exists sql_compositetype_test cascade; +drop schema if exists sql_functionreturn cascade; +NOTICE: drop cascades to synonym sql_functionreturn.tb_test diff --git a/src/test/regress/expected/hw_subpartition_add_drop_partition.out b/src/test/regress/expected/hw_subpartition_add_drop_partition.out new file mode 100644 index 000000000..34cb17ad1 --- /dev/null +++ b/src/test/regress/expected/hw_subpartition_add_drop_partition.out @@ -0,0 +1,2426 @@ +DROP SCHEMA hw_subpartition_add_drop_partition CASCADE; +ERROR: schema "hw_subpartition_add_drop_partition" does not exist +CREATE SCHEMA hw_subpartition_add_drop_partition; +SET CURRENT_SCHEMA TO hw_subpartition_add_drop_partition; +-- +----range-range table---- +-- +--prepare +CREATE TABLE range_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (customer_id) SUBPARTITION BY RANGE (time_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer1_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer1_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer1_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer2_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer2_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer2_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_all VALUES LESS THAN ('2012-01-01') + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_sales_pkey" for table "range_range_sales" +INSERT INTO range_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_range_sales_idx ON range_range_sales(product_id) LOCAL; +--check for add partition/subpartition +--fail, value conflict +ALTER TABLE range_range_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1500) + ( + SUBPARTITION customer_temp1_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer_temp1_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer_temp1_2010 VALUES LESS THAN ('2012-01-01'), + SUBPARTITION customer_temp1_2011 VALUES LESS THAN ('2011-01-01') + ); +ERROR: partition bound of partition "customer_temp1_2011" is too low +--success, add 4 subpartition +ALTER TABLE range_range_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer5_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer5_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer5_2011 VALUES LESS THAN ('2012-01-01') + ); +--fail, out of range +ALTER TABLE range_range_sales ADD PARTITION customer_temp2 VALUES LESS THAN (1100); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE range_range_sales ADD PARTITION customer_temp3 VALUES (1300); +ERROR: can not add none-range partition to range partition table +--success, add 1 default subpartition +ALTER TABLE range_range_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_range_sales ADD PARTITION customer_temp4 VALUES LESS THAN (1800); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--success, add 1 subpartition +ALTER TABLE range_range_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_2012 VALUES LESS THAN ('2013-01-01'); +--fail, out of range +ALTER TABLE range_range_sales MODIFY PARTITION customer3 ADD SUBPARTITION customer3_temp1 VALUES LESS THAN ('2015-01-01'); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, out of range +ALTER TABLE range_range_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('2011-01-01'); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE range_range_sales MODIFY PARTITION customer2 ADD SUBPARTITION customer2_temp1 VALUES ('2015-01-01'); +ERROR: can not add none-range partition to range partition table +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+-------------- + customer1 | p | r | f | 0 | 3 | {200} + customer2 | p | r | f | 0 | 3 | {500} + customer3 | p | r | f | 0 | 3 | {800} + customer4 | p | r | f | 0 | 3 | {1200} + customer5 | p | r | f | 0 | 3 | {1500} + customer6 | p | r | f | 0 | 3 | {NULL} + range_range_sales | r | r | f | 0 | 2 | + customer1_2008 | s | r | t | 0 | | {2009-01-01} + customer1_2009 | s | r | t | 0 | | {2010-01-01} + customer1_2010 | s | r | t | 0 | | {2011-01-01} + customer1_2011 | s | r | t | 0 | | {2012-01-01} + customer1_2012 | s | r | t | 0 | | {2013-01-01} + customer2_2008 | s | r | t | 0 | | {2009-01-01} + customer2_2009 | s | r | t | 0 | | {2010-01-01} + customer2_2010 | s | r | t | 0 | | {2011-01-01} + customer2_2011 | s | r | t | 0 | | {2012-01-01} + customer3_subpartdefault1 | s | r | t | 0 | | {NULL} + customer4_all | s | r | t | 0 | | {2012-01-01} + customer5_2008 | s | r | t | 0 | | {2009-01-01} + customer5_2009 | s | r | t | 0 | | {2010-01-01} + customer5_2010 | s | r | t | 0 | | {2011-01-01} + customer5_2011 | s | r | t | 0 | | {2012-01-01} + customer6_subpartdefault1 | s | r | t | 0 | | {NULL} +(23 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_2008_product_id_idx | x | n | t | t + customer1_2009_product_id_idx | x | n | t | t + customer1_2010_product_id_idx | x | n | t | t + customer1_2011_product_id_idx | x | n | t | t + customer1_2012_product_id_idx | x | n | t | t + customer2_2008_product_id_idx | x | n | t | t + customer2_2009_product_id_idx | x | n | t | t + customer2_2010_product_id_idx | x | n | t | t + customer2_2011_product_id_idx | x | n | t | t + customer3_subpartdefault1_product_id_idx | x | n | t | t + customer4_all_product_id_idx | x | n | t | t + customer5_2008_product_id_idx | x | n | t | t + customer5_2009_product_id_idx | x | n | t | t + customer5_2010_product_id_idx | x | n | t | t + customer5_2011_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(16 rows) + +\d+ range_range_sales + Table "hw_subpartition_add_drop_partition.range_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_range_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "range_range_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By RANGE(time_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 16 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_range_sales DROP PARTITION customer2; +--success +ALTER TABLE range_range_sales DROP SUBPARTITION customer1_2008; +--fail, the only subpartition +ALTER TABLE range_range_sales DROP SUBPARTITION customer4_all; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--success, drop partition customer3 +ALTER TABLE range_range_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_range_sales DROP PARTITION FOR (400, '2010-01-01'); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, number not equal to the number of partkey +ALTER TABLE range_range_sales DROP SUBPARTITION FOR (1400); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE range_range_sales DROP PARTITION FOR ('2010-01-01'); +ERROR: invalid input syntax for integer: "2010-01-01" +--fail, invalid type +ALTER TABLE range_range_sales DROP SUBPARTITION FOR ('2010-01-01', 1400); +ERROR: invalid input syntax for integer: "2010-01-01" +--success, drop subpartition customer5_2010 +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(1400, '2010-01-01'); +--fail, the only subpartition in customer6 +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(2000, '2009-01-01'); +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, no subpartition find +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(1400, '2012-01-01'); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM range_range_sales; + count +------- + 201 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+-------------- + customer1 | p | r | f | 0 | 3 | {200} + customer4 | p | r | f | 0 | 3 | {1200} + customer5 | p | r | f | 0 | 3 | {1500} + customer6 | p | r | f | 0 | 3 | {NULL} + range_range_sales | r | r | f | 0 | 2 | + customer1_2009 | s | r | t | 0 | | {2010-01-01} + customer1_2010 | s | r | t | 0 | | {2011-01-01} + customer1_2011 | s | r | t | 0 | | {2012-01-01} + customer1_2012 | s | r | t | 0 | | {2013-01-01} + customer4_all | s | r | t | 0 | | {2012-01-01} + customer5_2008 | s | r | t | 0 | | {2009-01-01} + customer5_2009 | s | r | t | 0 | | {2010-01-01} + customer5_2011 | s | r | t | 0 | | {2012-01-01} + customer6_subpartdefault1 | s | r | t | 0 | | {NULL} +(14 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_2009_product_id_idx | x | n | t | t + customer1_2010_product_id_idx | x | n | t | t + customer1_2011_product_id_idx | x | n | t | t + customer1_2012_product_id_idx | x | n | t | t + customer4_all_product_id_idx | x | n | t | t + customer5_2008_product_id_idx | x | n | t | t + customer5_2009_product_id_idx | x | n | t | t + customer5_2011_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(9 rows) + +\d+ range_range_sales + Table "hw_subpartition_add_drop_partition.range_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_range_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "range_range_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By RANGE(time_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 9 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +-- +----range-list table---- +-- +--prepare +CREATE TABLE range_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_list_sales_pkey" for table "range_list_sales" +INSERT INTO range_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales_idx ON range_list_sales(product_id) LOCAL; +--check for add partition/subpartition +--fail, value conflict +ALTER TABLE range_list_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1500) + ( + SUBPARTITION customer_temp1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer_temp1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer_temp1_channel3 VALUES ('6', '7', '5') + ); +ERROR: list partition customer_temp1_channel2 and customer_temp1_channel3 has overlapped value +--fail, value conflict +ALTER TABLE range_list_sales ADD PARTITION customer_temp2 VALUES LESS THAN (1500) + ( + SUBPARTITION customer_temp2_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer_temp2_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer_temp2_channel3 VALUES ('6', '7', '8', '7', '8') + ); +ERROR: list partition customer_temp2_channel3 has overlapped value +--success, add 4 subpartition +ALTER TABLE range_list_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer5_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer5_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer5_channel4 VALUES ('9') + ); +--fail, out of range +ALTER TABLE range_list_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1100); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE range_list_sales ADD PARTITION customer_temp4 VALUES (1300); +ERROR: can not add none-range partition to range partition table +--success, add 1 default subpartition +ALTER TABLE range_list_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_list_sales ADD PARTITION customer_temp5 VALUES LESS THAN (1800); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--success, add 1 subpartition +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel5 VALUES ('X'); +--fail, out of range +ALTER TABLE range_list_sales MODIFY PARTITION customer2 ADD SUBPARTITION customer2_temp1 VALUES ('X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, out of range +ALTER TABLE range_list_sales MODIFY PARTITION customer3 ADD SUBPARTITION customer3_temp1 VALUES ('X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE range_list_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X'); +ERROR: can not add none-list partition to list partition table +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+----------------------- + customer1 | p | r | f | 0 | 4 | {200} + customer2 | p | r | f | 0 | 4 | {500} + customer3 | p | r | f | 0 | 4 | {800} + customer4 | p | r | f | 0 | 4 | {1200} + customer5 | p | r | f | 0 | 4 | {1500} + customer6 | p | r | f | 0 | 4 | {NULL} + range_list_sales | r | r | f | 0 | 2 | + customer1_channel1 | s | l | t | 0 | | {0,1,2} + customer1_channel2 | s | l | t | 0 | | {3,4,5} + customer1_channel3 | s | l | t | 0 | | {6,7,8} + customer1_channel4 | s | l | t | 0 | | {9} + customer1_channel5 | s | l | t | 0 | | {X} + customer2_channel1 | s | l | t | 0 | | {0,1,2,3,4} + customer2_channel2 | s | l | t | 0 | | {NULL} + customer3_subpartdefault1 | s | l | t | 0 | | {NULL} + customer4_channel1 | s | l | t | 0 | | {0,1,2,3,4,5,6,7,8,9} + customer5_channel1 | s | l | t | 0 | | {0,1,2} + customer5_channel2 | s | l | t | 0 | | {3,4,5} + customer5_channel3 | s | l | t | 0 | | {6,7,8} + customer5_channel4 | s | l | t | 0 | | {9} + customer6_subpartdefault1 | s | l | t | 0 | | {NULL} +(21 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_channel1_product_id_idx | x | n | t | t + customer1_channel2_product_id_idx | x | n | t | t + customer1_channel3_product_id_idx | x | n | t | t + customer1_channel4_product_id_idx | x | n | t | t + customer1_channel5_product_id_idx | x | n | t | t + customer2_channel1_product_id_idx | x | n | t | t + customer2_channel2_product_id_idx | x | n | t | t + customer3_subpartdefault1_product_id_idx | x | n | t | t + customer4_channel1_product_id_idx | x | n | t | t + customer5_channel1_product_id_idx | x | n | t | t + customer5_channel2_product_id_idx | x | n | t | t + customer5_channel3_product_id_idx | x | n | t | t + customer5_channel4_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(14 rows) + +\d+ range_list_sales + Table "hw_subpartition_add_drop_partition.range_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_list_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "range_list_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By LIST(channel_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 14 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_list_sales DROP PARTITION customer2; +--success +ALTER TABLE range_list_sales DROP SUBPARTITION customer1_channel1; +--fail, the only subpartition +ALTER TABLE range_list_sales DROP SUBPARTITION customer4_channel1; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--success, drop partition customer3 +ALTER TABLE range_list_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_list_sales DROP PARTITION FOR (400, '4'); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, number not equal to the number of partkey +ALTER TABLE range_list_sales DROP SUBPARTITION FOR (1400); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE range_list_sales DROP PARTITION FOR ('abc'); +ERROR: invalid input syntax for integer: "abc" +--fail, invalid type +ALTER TABLE range_list_sales DROP SUBPARTITION FOR ('abc', 1400); +ERROR: invalid input syntax for integer: "abc" +--success, drop subpartition customer5_channel3 +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(1400, '7'); +--fail, the only subpartition in customer6 +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(2000, 'X'); +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, no subpartition find +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(1100, 'X'); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM range_list_sales; + count +------- + 341 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+----------------------- + customer1 | p | r | f | 0 | 4 | {200} + customer4 | p | r | f | 0 | 4 | {1200} + customer5 | p | r | f | 0 | 4 | {1500} + customer6 | p | r | f | 0 | 4 | {NULL} + range_list_sales | r | r | f | 0 | 2 | + customer1_channel2 | s | l | t | 0 | | {3,4,5} + customer1_channel3 | s | l | t | 0 | | {6,7,8} + customer1_channel4 | s | l | t | 0 | | {9} + customer1_channel5 | s | l | t | 0 | | {X} + customer4_channel1 | s | l | t | 0 | | {0,1,2,3,4,5,6,7,8,9} + customer5_channel1 | s | l | t | 0 | | {0,1,2} + customer5_channel2 | s | l | t | 0 | | {3,4,5} + customer5_channel4 | s | l | t | 0 | | {9} + customer6_subpartdefault1 | s | l | t | 0 | | {NULL} +(14 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_channel2_product_id_idx | x | n | t | t + customer1_channel3_product_id_idx | x | n | t | t + customer1_channel4_product_id_idx | x | n | t | t + customer1_channel5_product_id_idx | x | n | t | t + customer4_channel1_product_id_idx | x | n | t | t + customer5_channel1_product_id_idx | x | n | t | t + customer5_channel2_product_id_idx | x | n | t | t + customer5_channel4_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(9 rows) + +\d+ range_list_sales + Table "hw_subpartition_add_drop_partition.range_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_list_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "range_list_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By LIST(channel_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 9 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +-- +----range-hash table---- +-- +--prepare +CREATE TABLE range_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (customer_id) SUBPARTITION BY HASH (product_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_product1, + SUBPARTITION customer1_product2, + SUBPARTITION customer1_product3, + SUBPARTITION customer1_product4 + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_product1, + SUBPARTITION customer2_product2 + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_product1 + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_hash_sales_pkey" for table "range_hash_sales" +INSERT INTO range_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_hash_sales_idx ON range_hash_sales(product_id) LOCAL; +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE range_hash_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_product1, + SUBPARTITION customer5_product2, + SUBPARTITION customer5_product3, + SUBPARTITION customer5_product4 + ); +--fail, out of range +ALTER TABLE range_hash_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1100); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE range_hash_sales ADD PARTITION customer_temp2 VALUES (1300); +ERROR: can not add none-range partition to range partition table +--success, add 1 default subpartition +ALTER TABLE range_hash_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_hash_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1800); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, not support add hash +ALTER TABLE range_hash_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_temp1; +ERROR: syntax error at or near ";" +LINE 1: ...MODIFY PARTITION customer1 ADD SUBPARTITION customer1_temp1; + ^ +--fail, invalid format +ALTER TABLE range_hash_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X'); +ERROR: can not add hash partition +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+------------ + customer1 | p | r | f | 0 | 1 | {200} + customer2 | p | r | f | 0 | 1 | {500} + customer3 | p | r | f | 0 | 1 | {800} + customer4 | p | r | f | 0 | 1 | {1200} + customer5 | p | r | f | 0 | 1 | {1500} + customer6 | p | r | f | 0 | 1 | {NULL} + range_hash_sales | r | r | f | 0 | 2 | + customer1_product1 | s | h | t | 0 | | {0} + customer1_product2 | s | h | t | 0 | | {1} + customer1_product3 | s | h | t | 0 | | {2} + customer1_product4 | s | h | t | 0 | | {3} + customer2_product1 | s | h | t | 0 | | {0} + customer2_product2 | s | h | t | 0 | | {1} + customer3_subpartdefault1 | s | h | t | 0 | | {0} + customer4_product1 | s | h | t | 0 | | {0} + customer5_product1 | s | h | t | 0 | | {0} + customer5_product2 | s | h | t | 0 | | {1} + customer5_product3 | s | h | t | 0 | | {2} + customer5_product4 | s | h | t | 0 | | {3} + customer6_subpartdefault1 | s | h | t | 0 | | {0} +(20 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_product1_product_id_idx | x | n | t | t + customer1_product2_product_id_idx | x | n | t | t + customer1_product3_product_id_idx | x | n | t | t + customer1_product4_product_id_idx | x | n | t | t + customer2_product1_product_id_idx | x | n | t | t + customer2_product2_product_id_idx | x | n | t | t + customer3_subpartdefault1_product_id_idx | x | n | t | t + customer4_product1_product_id_idx | x | n | t | t + customer5_product1_product_id_idx | x | n | t | t + customer5_product2_product_id_idx | x | n | t | t + customer5_product3_product_id_idx | x | n | t | t + customer5_product4_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(13 rows) + +\d+ range_hash_sales + Table "hw_subpartition_add_drop_partition.range_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_hash_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "range_hash_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By HASH(product_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 13 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_hash_sales DROP PARTITION customer2; +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION customer1_product1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION customer4_product1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--success, drop partition customer3 +ALTER TABLE range_hash_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_hash_sales DROP PARTITION FOR (400, '2010-01-01'); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, invalid type +ALTER TABLE range_hash_sales DROP PARTITION FOR ('2010-01-01'); +ERROR: invalid input syntax for integer: "2010-01-01" +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION FOR(1400, 1); +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--check for ok after drop +SELECT count(*) FROM range_hash_sales; + count +------- + 400 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+------------ + customer1 | p | r | f | 0 | 1 | {200} + customer4 | p | r | f | 0 | 1 | {1200} + customer5 | p | r | f | 0 | 1 | {1500} + customer6 | p | r | f | 0 | 1 | {NULL} + range_hash_sales | r | r | f | 0 | 2 | + customer1_product1 | s | h | t | 0 | | {0} + customer1_product2 | s | h | t | 0 | | {1} + customer1_product3 | s | h | t | 0 | | {2} + customer1_product4 | s | h | t | 0 | | {3} + customer4_product1 | s | h | t | 0 | | {0} + customer5_product1 | s | h | t | 0 | | {0} + customer5_product2 | s | h | t | 0 | | {1} + customer5_product3 | s | h | t | 0 | | {2} + customer5_product4 | s | h | t | 0 | | {3} + customer6_subpartdefault1 | s | h | t | 0 | | {0} +(15 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_product1_product_id_idx | x | n | t | t + customer1_product2_product_id_idx | x | n | t | t + customer1_product3_product_id_idx | x | n | t | t + customer1_product4_product_id_idx | x | n | t | t + customer4_product1_product_id_idx | x | n | t | t + customer5_product1_product_id_idx | x | n | t | t + customer5_product2_product_id_idx | x | n | t | t + customer5_product3_product_id_idx | x | n | t | t + customer5_product4_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(10 rows) + +\d+ range_hash_sales + Table "hw_subpartition_add_drop_partition.range_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_hash_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "range_hash_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By HASH(product_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 10 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +-- +----list-range table---- +-- +--prepare +CREATE TABLE list_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY LIST (channel_id) SUBPARTITION BY RANGE (customer_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_customer1 VALUES LESS THAN (200), + SUBPARTITION channel1_customer2 VALUES LESS THAN (500), + SUBPARTITION channel1_customer3 VALUES LESS THAN (800), + SUBPARTITION channel1_customer4 VALUES LESS THAN (1200) + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_customer1 VALUES LESS THAN (500), + SUBPARTITION channel2_customer2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_customer1 VALUES LESS THAN (1200) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "list_range_sales_pkey" for table "list_range_sales" +INSERT INTO list_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_range_sales_idx ON list_range_sales(product_id) LOCAL; +--check for add partition/subpartition +--fail, value conflict +ALTER TABLE list_range_sales ADD PARTITION channel_temp1 VALUES ('X') + ( + SUBPARTITION channel_temp1_customer1 VALUES LESS THAN (200), + SUBPARTITION channel_temp1_customer2 VALUES LESS THAN (500), + SUBPARTITION channel_temp1_customer3 VALUES LESS THAN (800), + SUBPARTITION channel_temp1_customer4 VALUES LESS THAN (700) + ); +ERROR: partition bound of partition "channel_temp1_customer4" is too low +--success, add 4 subpartition +ALTER TABLE list_range_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_customer1 VALUES LESS THAN (200), + SUBPARTITION channel5_customer2 VALUES LESS THAN (500), + SUBPARTITION channel5_customer3 VALUES LESS THAN (800), + SUBPARTITION channel5_customer4 VALUES LESS THAN (1200) + ); +--fail, value conflict +ALTER TABLE list_range_sales ADD PARTITION channel_temp2 VALUES ('0', 'Z', 'C'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE list_range_sales ADD PARTITION channel_temp3 VALUES LESS THAN ('Z'); +ERROR: can not add none-list partition to list partition table +--success, add 1 default subpartition +ALTER TABLE list_range_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_range_sales ADD PARTITION channel_temp4 VALUES ('M', 'X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--success, add 1 subpartition +ALTER TABLE list_range_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_customer5 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE list_range_sales MODIFY PARTITION channel2 ADD SUBPARTITION channel2_temp1 VALUES LESS THAN (2000); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, out of range +ALTER TABLE list_range_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3_temp1 VALUES LESS THAN (2000); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE list_range_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES (1500); +ERROR: can not add none-range partition to range partition table +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + channel1 | p | l | f | 0 | 2 | {0,1,2} + channel2 | p | l | f | 0 | 2 | {3,4,5} + channel3 | p | l | f | 0 | 2 | {6,7} + channel4 | p | l | f | 0 | 2 | {8,9} + channel5 | p | l | f | 0 | 2 | {X} + channel6 | p | l | f | 0 | 2 | {NULL} + list_range_sales | r | l | f | 0 | 4 | + channel1_customer1 | s | r | t | 0 | | {200} + channel1_customer2 | s | r | t | 0 | | {500} + channel1_customer3 | s | r | t | 0 | | {800} + channel1_customer4 | s | r | t | 0 | | {1200} + channel1_customer5 | s | r | t | 0 | | {NULL} + channel2_customer1 | s | r | t | 0 | | {500} + channel2_customer2 | s | r | t | 0 | | {NULL} + channel3_subpartdefault1 | s | r | t | 0 | | {NULL} + channel4_customer1 | s | r | t | 0 | | {1200} + channel5_customer1 | s | r | t | 0 | | {200} + channel5_customer2 | s | r | t | 0 | | {500} + channel5_customer3 | s | r | t | 0 | | {800} + channel5_customer4 | s | r | t | 0 | | {1200} + channel6_subpartdefault1 | s | r | t | 0 | | {NULL} +(21 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_customer1_product_id_idx | x | n | t | t + channel1_customer2_product_id_idx | x | n | t | t + channel1_customer3_product_id_idx | x | n | t | t + channel1_customer4_product_id_idx | x | n | t | t + channel1_customer5_product_id_idx | x | n | t | t + channel2_customer1_product_id_idx | x | n | t | t + channel2_customer2_product_id_idx | x | n | t | t + channel3_subpartdefault1_product_id_idx | x | n | t | t + channel4_customer1_product_id_idx | x | n | t | t + channel5_customer1_product_id_idx | x | n | t | t + channel5_customer2_product_id_idx | x | n | t | t + channel5_customer3_product_id_idx | x | n | t | t + channel5_customer4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(14 rows) + +\d+ list_range_sales + Table "hw_subpartition_add_drop_partition.list_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_range_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "list_range_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By RANGE(customer_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 14 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_range_sales DROP PARTITION channel2; +--success +ALTER TABLE list_range_sales DROP SUBPARTITION channel1_customer1; +--fail, the only subpartition +ALTER TABLE list_range_sales DROP SUBPARTITION channel4_customer1; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--success, drop partition channel3 +ALTER TABLE list_range_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_range_sales DROP PARTITION FOR('X', 700); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, number not equal to the number of partkey +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X'); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE list_range_sales DROP PARTITION FOR (10); +ERROR: value too long for type character(1) +--fail, invalid type +ALTER TABLE list_range_sales DROP SUBPARTITION FOR(700, 'X'); +ERROR: value too long for type character(1) +--success, drop subpartition channel5_customer3 +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X', 700); +--fail, the only subpartition in channel6 +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('K', 100); +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, no subpartition find +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X', 2500); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM list_range_sales; + count +------- + 441 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + channel1 | p | l | f | 0 | 2 | {0,1,2} + channel4 | p | l | f | 0 | 2 | {8,9} + channel5 | p | l | f | 0 | 2 | {X} + channel6 | p | l | f | 0 | 2 | {NULL} + list_range_sales | r | l | f | 0 | 4 | + channel1_customer2 | s | r | t | 0 | | {500} + channel1_customer3 | s | r | t | 0 | | {800} + channel1_customer4 | s | r | t | 0 | | {1200} + channel1_customer5 | s | r | t | 0 | | {NULL} + channel4_customer1 | s | r | t | 0 | | {1200} + channel5_customer1 | s | r | t | 0 | | {200} + channel5_customer2 | s | r | t | 0 | | {500} + channel5_customer4 | s | r | t | 0 | | {1200} + channel6_subpartdefault1 | s | r | t | 0 | | {NULL} +(14 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_customer2_product_id_idx | x | n | t | t + channel1_customer3_product_id_idx | x | n | t | t + channel1_customer4_product_id_idx | x | n | t | t + channel1_customer5_product_id_idx | x | n | t | t + channel4_customer1_product_id_idx | x | n | t | t + channel5_customer1_product_id_idx | x | n | t | t + channel5_customer2_product_id_idx | x | n | t | t + channel5_customer4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(9 rows) + +\d+ list_range_sales + Table "hw_subpartition_add_drop_partition.list_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_range_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "list_range_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By RANGE(customer_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 9 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +-- +----list-list table---- +-- +--prepare +CREATE TABLE list_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY LIST (channel_id) SUBPARTITION BY LIST (type_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_type1 VALUES (0, 1, 2), + SUBPARTITION channel1_type2 VALUES (3, 4), + SUBPARTITION channel1_type3 VALUES (5, 6, 7), + SUBPARTITION channel1_type4 VALUES (8, 9) + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_type1 VALUES (0, 1, 2, 3), + SUBPARTITION channel2_type2 VALUES (DEFAULT) + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_type1 VALUES (0, 1, 2, 3, 4, 5, 6, 7, 8, 9) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "list_list_sales_pkey" for table "list_list_sales" +INSERT INTO list_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_list_sales_idx ON list_list_sales(product_id) LOCAL; +--check for add partition/subpartition +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp1 VALUES ('X') + ( + SUBPARTITION channel_temp1_type1 VALUES (0, 1, 2), + SUBPARTITION channel_temp1_type2 VALUES (3, 4, 5), + SUBPARTITION channel_temp1_type3 VALUES (6, 7, 5) + ); +ERROR: list partition channel_temp1_type2 and channel_temp1_type3 has overlapped value +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp2 VALUES ('X') + ( + SUBPARTITION channel_temp2_type1 VALUES (0, 1, 2), + SUBPARTITION channel_temp2_type2 VALUES (3, 4, 5), + SUBPARTITION channel_temp2_type3 VALUES (6, 7, 8, 7, 8) + ); +ERROR: list partition channel_temp2_type3 has overlapped value +--success, add 4 subpartition +ALTER TABLE list_list_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_type1 VALUES (0, 1, 2), + SUBPARTITION channel5_type2 VALUES (3, 4), + SUBPARTITION channel5_type3 VALUES (5, 6, 7), + SUBPARTITION channel5_type4 VALUES (8, 9) + ); +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp3 VALUES ('0', 'Z', 'C'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp4 VALUES ('Z', 'Z', 'C'); +ERROR: list partition channel_temp4 has overlapped value +--fail, invalid format +ALTER TABLE list_list_sales ADD PARTITION channel_temp5 VALUES LESS THAN ('Z'); +ERROR: can not add none-list partition to list partition table +--success, add 1 default subpartition +ALTER TABLE list_list_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp6 VALUES ('M', 'X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--success, add 1 subpartition +ALTER TABLE list_list_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_type5 VALUES (DEFAULT); +--fail, out of range +ALTER TABLE list_list_sales MODIFY PARTITION channel2 ADD SUBPARTITION channel2_temp1 VALUES (10, 11, 12); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, out of range +ALTER TABLE list_list_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3_temp1 VALUES (10, 11, 12); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE list_list_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500); +ERROR: can not add none-list partition to list partition table +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+----------------------- + channel1 | p | l | f | 0 | 5 | {0,1,2} + channel2 | p | l | f | 0 | 5 | {3,4,5} + channel3 | p | l | f | 0 | 5 | {6,7} + channel4 | p | l | f | 0 | 5 | {8,9} + channel5 | p | l | f | 0 | 5 | {X} + channel6 | p | l | f | 0 | 5 | {NULL} + list_list_sales | r | l | f | 0 | 4 | + channel1_type1 | s | l | t | 0 | | {0,1,2} + channel1_type2 | s | l | t | 0 | | {3,4} + channel1_type3 | s | l | t | 0 | | {5,6,7} + channel1_type4 | s | l | t | 0 | | {8,9} + channel1_type5 | s | l | t | 0 | | {NULL} + channel2_type1 | s | l | t | 0 | | {0,1,2,3} + channel2_type2 | s | l | t | 0 | | {NULL} + channel3_subpartdefault1 | s | l | t | 0 | | {NULL} + channel4_type1 | s | l | t | 0 | | {0,1,2,3,4,5,6,7,8,9} + channel5_type1 | s | l | t | 0 | | {0,1,2} + channel5_type2 | s | l | t | 0 | | {3,4} + channel5_type3 | s | l | t | 0 | | {5,6,7} + channel5_type4 | s | l | t | 0 | | {8,9} + channel6_subpartdefault1 | s | l | t | 0 | | {NULL} +(21 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_type1_product_id_idx | x | n | t | t + channel1_type2_product_id_idx | x | n | t | t + channel1_type3_product_id_idx | x | n | t | t + channel1_type4_product_id_idx | x | n | t | t + channel1_type5_product_id_idx | x | n | t | t + channel2_type1_product_id_idx | x | n | t | t + channel2_type2_product_id_idx | x | n | t | t + channel3_subpartdefault1_product_id_idx | x | n | t | t + channel4_type1_product_id_idx | x | n | t | t + channel5_type1_product_id_idx | x | n | t | t + channel5_type2_product_id_idx | x | n | t | t + channel5_type3_product_id_idx | x | n | t | t + channel5_type4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(14 rows) + +\d+ list_list_sales + Table "hw_subpartition_add_drop_partition.list_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_list_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "list_list_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By LIST(type_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 14 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_list_sales DROP PARTITION channel2; +--success +ALTER TABLE list_list_sales DROP SUBPARTITION channel1_type1; +--fail, the only subpartition +ALTER TABLE list_list_sales DROP SUBPARTITION channel4_type1; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--success, drop partition channel3 +ALTER TABLE list_list_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_list_sales DROP PARTITION FOR('X', 6); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, number not equal to the number of partkey +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X'); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE list_list_sales DROP PARTITION FOR (10); +ERROR: value too long for type character(1) +--fail, invalid type +ALTER TABLE list_list_sales DROP SUBPARTITION FOR(10, 'X'); +ERROR: value too long for type character(1) +--success, drop subpartition channel5_type3 +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X', 6); +--fail, the only subpartition in channel6 +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('K', 10); +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, no subpartition find +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X', 5); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM list_list_sales; + count +------- + 200 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+----------------------- + channel1 | p | l | f | 0 | 5 | {0,1,2} + channel4 | p | l | f | 0 | 5 | {8,9} + channel5 | p | l | f | 0 | 5 | {X} + channel6 | p | l | f | 0 | 5 | {NULL} + list_list_sales | r | l | f | 0 | 4 | + channel1_type2 | s | l | t | 0 | | {3,4} + channel1_type3 | s | l | t | 0 | | {5,6,7} + channel1_type4 | s | l | t | 0 | | {8,9} + channel1_type5 | s | l | t | 0 | | {NULL} + channel4_type1 | s | l | t | 0 | | {0,1,2,3,4,5,6,7,8,9} + channel5_type1 | s | l | t | 0 | | {0,1,2} + channel5_type2 | s | l | t | 0 | | {3,4} + channel5_type4 | s | l | t | 0 | | {8,9} + channel6_subpartdefault1 | s | l | t | 0 | | {NULL} +(14 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_type2_product_id_idx | x | n | t | t + channel1_type3_product_id_idx | x | n | t | t + channel1_type4_product_id_idx | x | n | t | t + channel1_type5_product_id_idx | x | n | t | t + channel4_type1_product_id_idx | x | n | t | t + channel5_type1_product_id_idx | x | n | t | t + channel5_type2_product_id_idx | x | n | t | t + channel5_type4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(9 rows) + +\d+ list_list_sales + Table "hw_subpartition_add_drop_partition.list_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_list_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "list_list_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By LIST(type_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 9 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +-- +----list-hash table---- +-- +--prepare +CREATE TABLE list_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY LIST (channel_id) SUBPARTITION BY HASH (product_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_product1, + SUBPARTITION channel1_product2, + SUBPARTITION channel1_product3, + SUBPARTITION channel1_product4 + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_product1, + SUBPARTITION channel2_product2 + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_product1 + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "list_hash_sales_pkey" for table "list_hash_sales" +INSERT INTO list_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_hash_sales_idx ON list_hash_sales(product_id) LOCAL; +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE list_hash_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_product1, + SUBPARTITION channel5_product2, + SUBPARTITION channel5_product3, + SUBPARTITION channel5_product4 + ); +--fail, value conflict +ALTER TABLE list_hash_sales ADD PARTITION channel_temp1 VALUES ('0', 'Z', 'C'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, value conflict +ALTER TABLE list_hash_sales ADD PARTITION channel_temp2 VALUES ('Z', 'Z', 'C'); +ERROR: list partition channel_temp2 has overlapped value +--fail, invalid format +ALTER TABLE list_hash_sales ADD PARTITION channel_temp3 VALUES LESS THAN ('Z'); +ERROR: can not add none-list partition to list partition table +--success, add 1 default subpartition +ALTER TABLE list_hash_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_hash_sales ADD PARTITION channel_temp4 VALUES ('M', 'X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, not support add hash +ALTER TABLE list_hash_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_temp1; +ERROR: syntax error at or near ";" +LINE 1: ...s MODIFY PARTITION channel1 ADD SUBPARTITION channel1_temp1; + ^ +--fail, invalid format +ALTER TABLE list_hash_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500); +ERROR: can not add hash partition +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + channel1 | p | l | f | 0 | 1 | {0,1,2} + channel2 | p | l | f | 0 | 1 | {3,4,5} + channel3 | p | l | f | 0 | 1 | {6,7} + channel4 | p | l | f | 0 | 1 | {8,9} + channel5 | p | l | f | 0 | 1 | {X} + channel6 | p | l | f | 0 | 1 | {NULL} + list_hash_sales | r | l | f | 0 | 4 | + channel1_product1 | s | h | t | 0 | | {0} + channel1_product2 | s | h | t | 0 | | {1} + channel1_product3 | s | h | t | 0 | | {2} + channel1_product4 | s | h | t | 0 | | {3} + channel2_product1 | s | h | t | 0 | | {0} + channel2_product2 | s | h | t | 0 | | {1} + channel3_subpartdefault1 | s | h | t | 0 | | {0} + channel4_product1 | s | h | t | 0 | | {0} + channel5_product1 | s | h | t | 0 | | {0} + channel5_product2 | s | h | t | 0 | | {1} + channel5_product3 | s | h | t | 0 | | {2} + channel5_product4 | s | h | t | 0 | | {3} + channel6_subpartdefault1 | s | h | t | 0 | | {0} +(20 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_product1_product_id_idx | x | n | t | t + channel1_product2_product_id_idx | x | n | t | t + channel1_product3_product_id_idx | x | n | t | t + channel1_product4_product_id_idx | x | n | t | t + channel2_product1_product_id_idx | x | n | t | t + channel2_product2_product_id_idx | x | n | t | t + channel3_subpartdefault1_product_id_idx | x | n | t | t + channel4_product1_product_id_idx | x | n | t | t + channel5_product1_product_id_idx | x | n | t | t + channel5_product2_product_id_idx | x | n | t | t + channel5_product3_product_id_idx | x | n | t | t + channel5_product4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(13 rows) + +\d+ list_hash_sales + Table "hw_subpartition_add_drop_partition.list_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_hash_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "list_hash_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By HASH(product_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 13 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_hash_sales DROP PARTITION channel2; +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION channel1_product1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION channel4_product1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--success, drop partition channel3 +ALTER TABLE list_hash_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_hash_sales DROP PARTITION FOR ('6', '2010-01-01'); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, invalid type +ALTER TABLE list_hash_sales DROP PARTITION FOR (10); +ERROR: value too long for type character(1) +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION FOR('X', 6); +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--check for ok after drop +SELECT count(*) FROM list_hash_sales; + count +------- + 500 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + channel1 | p | l | f | 0 | 1 | {0,1,2} + channel4 | p | l | f | 0 | 1 | {8,9} + channel5 | p | l | f | 0 | 1 | {X} + channel6 | p | l | f | 0 | 1 | {NULL} + list_hash_sales | r | l | f | 0 | 4 | + channel1_product1 | s | h | t | 0 | | {0} + channel1_product2 | s | h | t | 0 | | {1} + channel1_product3 | s | h | t | 0 | | {2} + channel1_product4 | s | h | t | 0 | | {3} + channel4_product1 | s | h | t | 0 | | {0} + channel5_product1 | s | h | t | 0 | | {0} + channel5_product2 | s | h | t | 0 | | {1} + channel5_product3 | s | h | t | 0 | | {2} + channel5_product4 | s | h | t | 0 | | {3} + channel6_subpartdefault1 | s | h | t | 0 | | {0} +(15 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_product1_product_id_idx | x | n | t | t + channel1_product2_product_id_idx | x | n | t | t + channel1_product3_product_id_idx | x | n | t | t + channel1_product4_product_id_idx | x | n | t | t + channel4_product1_product_id_idx | x | n | t | t + channel5_product1_product_id_idx | x | n | t | t + channel5_product2_product_id_idx | x | n | t | t + channel5_product3_product_id_idx | x | n | t | t + channel5_product4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(10 rows) + +\d+ list_hash_sales + Table "hw_subpartition_add_drop_partition.list_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_hash_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "list_hash_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By HASH(product_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 10 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +-- +----hash-range table---- +-- +--prepare +CREATE TABLE hash_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY HASH (product_id) SUBPARTITION BY RANGE (customer_id) +( + PARTITION product1 + ( + SUBPARTITION product1_customer1 VALUES LESS THAN (200), + SUBPARTITION product1_customer2 VALUES LESS THAN (500), + SUBPARTITION product1_customer3 VALUES LESS THAN (800), + SUBPARTITION product1_customer4 VALUES LESS THAN (1200) + ), + PARTITION product2 + ( + SUBPARTITION product2_customer1 VALUES LESS THAN (500), + SUBPARTITION product2_customer2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_customer1 VALUES LESS THAN (1200) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "hash_range_sales_pkey" for table "hash_range_sales" +INSERT INTO hash_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_range_sales_idx ON hash_range_sales(product_id) LOCAL; +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_range_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_customer1 VALUES LESS THAN (200), + SUBPARTITION product_temp1_customer2 VALUES LESS THAN (500), + SUBPARTITION product_temp1_customer3 VALUES LESS THAN (800), + SUBPARTITION product_temp1_customer4 VALUES LESS THAN (1200) + ); +ERROR: syntax error at or near "(" +LINE 2: ( + ^ +--fail, not support add hash +ALTER TABLE hash_range_sales ADD PARTITION product_temp2; +ERROR: syntax error at or near ";" +LINE 1: ALTER TABLE hash_range_sales ADD PARTITION product_temp2; + ^ +--success, add 1 subpartition +ALTER TABLE hash_range_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_customer5 VALUES LESS THAN (1800); +--fail, out of range +ALTER TABLE hash_range_sales MODIFY PARTITION product2 ADD SUBPARTITION product2_temp1 VALUES LESS THAN (1800); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES (DEFAULT); +ERROR: can not add none-range partition to range partition table +--success, add 1 subpartition +ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_customer2 VALUES LESS THAN (MAXVALUE); +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + product1 | p | h | f | 0 | 2 | {0} + product2 | p | h | f | 0 | 2 | {1} + product3 | p | h | f | 0 | 2 | {2} + product4 | p | h | f | 0 | 2 | {3} + hash_range_sales | r | h | f | 0 | 1 | + product1_customer1 | s | r | t | 0 | | {200} + product1_customer2 | s | r | t | 0 | | {500} + product1_customer3 | s | r | t | 0 | | {800} + product1_customer4 | s | r | t | 0 | | {1200} + product1_customer5 | s | r | t | 0 | | {1800} + product2_customer1 | s | r | t | 0 | | {500} + product2_customer2 | s | r | t | 0 | | {NULL} + product3_subpartdefault1 | s | r | t | 0 | | {NULL} + product4_customer1 | s | r | t | 0 | | {1200} + product4_customer2 | s | r | t | 0 | | {NULL} +(15 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_customer1_product_id_idx | x | n | t | t + product1_customer2_product_id_idx | x | n | t | t + product1_customer3_product_id_idx | x | n | t | t + product1_customer4_product_id_idx | x | n | t | t + product1_customer5_product_id_idx | x | n | t | t + product2_customer1_product_id_idx | x | n | t | t + product2_customer2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_customer1_product_id_idx | x | n | t | t + product4_customer2_product_id_idx | x | n | t | t +(10 rows) + +\d+ hash_range_sales + Table "hw_subpartition_add_drop_partition.hash_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_range_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "hash_range_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By RANGE(customer_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 10 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION product2; +ERROR: Droping hash partition is unsupported. +--success, drop subpartition product1_customer1 +ALTER TABLE hash_range_sales DROP SUBPARTITION product1_customer1; +--success, drop subpartition product4_customer1 +ALTER TABLE hash_range_sales DROP SUBPARTITION product4_customer1; +--fail, the only subpartition in product4 +ALTER TABLE hash_range_sales DROP SUBPARTITION product4_customer2; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION FOR(0); +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION FOR(0, 100); +ERROR: Droping hash partition is unsupported. +--fail, number not equal to the number of partkey +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR('2010-01-01', 100); +ERROR: invalid input syntax for integer: "2010-01-01" +--success, drop subpartition product1_customer2, but not suggest to do this operation +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0, 100); +--fail, no subpartition find +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0, 2300); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM hash_range_sales; + count +------- + 628 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + product1 | p | h | f | 0 | 2 | {0} + product2 | p | h | f | 0 | 2 | {1} + product3 | p | h | f | 0 | 2 | {2} + product4 | p | h | f | 0 | 2 | {3} + hash_range_sales | r | h | f | 0 | 1 | + product1_customer3 | s | r | t | 0 | | {800} + product1_customer4 | s | r | t | 0 | | {1200} + product1_customer5 | s | r | t | 0 | | {1800} + product2_customer1 | s | r | t | 0 | | {500} + product2_customer2 | s | r | t | 0 | | {NULL} + product3_subpartdefault1 | s | r | t | 0 | | {NULL} + product4_customer2 | s | r | t | 0 | | {NULL} +(12 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_customer3_product_id_idx | x | n | t | t + product1_customer4_product_id_idx | x | n | t | t + product1_customer5_product_id_idx | x | n | t | t + product2_customer1_product_id_idx | x | n | t | t + product2_customer2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_customer2_product_id_idx | x | n | t | t +(7 rows) + +\d+ hash_range_sales + Table "hw_subpartition_add_drop_partition.hash_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_range_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "hash_range_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By RANGE(customer_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 7 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +-- +----hash-list table---- +-- +--prepare +CREATE TABLE hash_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY HASH (product_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION product1 + ( + SUBPARTITION product1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION product1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION product1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION product1_channel4 VALUES ('9') + ), + PARTITION product2 + ( + SUBPARTITION product2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION product2_channel2 VALUES (DEFAULT) + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "hash_list_sales_pkey" for table "hash_list_sales" +INSERT INTO hash_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_list_sales_idx ON hash_list_sales(product_id) LOCAL; +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_list_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION product_temp1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION product_temp1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION product_temp1_channel4 VALUES ('9') + ); +ERROR: syntax error at or near "(" +LINE 2: ( + ^ +--fail, not support add hash +ALTER TABLE hash_list_sales ADD PARTITION product_temp2; +ERROR: syntax error at or near ";" +LINE 1: ALTER TABLE hash_list_sales ADD PARTITION product_temp2; + ^ +--success, add 1 subpartition +ALTER TABLE hash_list_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_channel5 VALUES ('X'); +--fail, out of range +ALTER TABLE hash_list_sales MODIFY PARTITION product2 ADD SUBPARTITION product2_temp1 VALUES ('X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, out of range +ALTER TABLE hash_list_sales MODIFY PARTITION product3 ADD SUBPARTITION product3_temp1 VALUES ('X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES LESS THAN (MAXVALUE); +ERROR: can not add none-list partition to list partition table +--success, add 1 subpartition +ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_channel2 VALUES (DEFAULT); +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+----------------------- + product1 | p | h | f | 0 | 4 | {0} + product2 | p | h | f | 0 | 4 | {1} + product3 | p | h | f | 0 | 4 | {2} + product4 | p | h | f | 0 | 4 | {3} + hash_list_sales | r | h | f | 0 | 1 | + product1_channel1 | s | l | t | 0 | | {0,1,2} + product1_channel2 | s | l | t | 0 | | {3,4,5} + product1_channel3 | s | l | t | 0 | | {6,7,8} + product1_channel4 | s | l | t | 0 | | {9} + product1_channel5 | s | l | t | 0 | | {X} + product2_channel1 | s | l | t | 0 | | {0,1,2,3,4} + product2_channel2 | s | l | t | 0 | | {NULL} + product3_subpartdefault1 | s | l | t | 0 | | {NULL} + product4_channel1 | s | l | t | 0 | | {0,1,2,3,4,5,6,7,8,9} + product4_channel2 | s | l | t | 0 | | {NULL} +(15 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_channel1_product_id_idx | x | n | t | t + product1_channel2_product_id_idx | x | n | t | t + product1_channel3_product_id_idx | x | n | t | t + product1_channel4_product_id_idx | x | n | t | t + product1_channel5_product_id_idx | x | n | t | t + product2_channel1_product_id_idx | x | n | t | t + product2_channel2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_channel1_product_id_idx | x | n | t | t + product4_channel2_product_id_idx | x | n | t | t +(10 rows) + +\d+ hash_list_sales + Table "hw_subpartition_add_drop_partition.hash_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_list_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "hash_list_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By LIST(channel_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 10 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION product2; +ERROR: Droping hash partition is unsupported. +--success, drop subpartition product1_channel1 +ALTER TABLE hash_list_sales DROP SUBPARTITION product1_channel1; +--success, drop subpartition product4_channel1 +ALTER TABLE hash_list_sales DROP SUBPARTITION product4_channel1; +--fail, the only subpartition in product4 +ALTER TABLE hash_list_sales DROP SUBPARTITION product4_channel2; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION FOR(0); +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION FOR(0, '4'); +ERROR: Droping hash partition is unsupported. +--fail, number not equal to the number of partkey +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR('2010-01-01', '4'); +ERROR: invalid input syntax for integer: "2010-01-01" +--success, drop subpartition product1_channel2, but not suggest to do this operation +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0, '4'); +--fail, no subpartition find +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0, 'Z'); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM hash_list_sales; + count +------- + 608 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------- + product1 | p | h | f | 0 | 4 | {0} + product2 | p | h | f | 0 | 4 | {1} + product3 | p | h | f | 0 | 4 | {2} + product4 | p | h | f | 0 | 4 | {3} + hash_list_sales | r | h | f | 0 | 1 | + product1_channel3 | s | l | t | 0 | | {6,7,8} + product1_channel4 | s | l | t | 0 | | {9} + product1_channel5 | s | l | t | 0 | | {X} + product2_channel1 | s | l | t | 0 | | {0,1,2,3,4} + product2_channel2 | s | l | t | 0 | | {NULL} + product3_subpartdefault1 | s | l | t | 0 | | {NULL} + product4_channel2 | s | l | t | 0 | | {NULL} +(12 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_channel3_product_id_idx | x | n | t | t + product1_channel4_product_id_idx | x | n | t | t + product1_channel5_product_id_idx | x | n | t | t + product2_channel1_product_id_idx | x | n | t | t + product2_channel2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_channel2_product_id_idx | x | n | t | t +(7 rows) + +\d+ hash_list_sales + Table "hw_subpartition_add_drop_partition.hash_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_list_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "hash_list_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By LIST(channel_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 7 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +-- +----hash-hash table---- +-- +--prepare +CREATE TABLE hash_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY HASH (product_id) SUBPARTITION BY HASH (customer_id) +( + PARTITION product1 + ( + SUBPARTITION product1_customer1, + SUBPARTITION product1_customer2, + SUBPARTITION product1_customer3, + SUBPARTITION product1_customer4 + ), + PARTITION product2 + ( + SUBPARTITION product2_customer1, + SUBPARTITION product2_customer2 + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_customer1 + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "hash_hash_sales_pkey" for table "hash_hash_sales" +INSERT INTO hash_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_hash_sales_idx ON hash_hash_sales(product_id) LOCAL; +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_hash_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_customer1, + SUBPARTITION product_temp1_customer2, + SUBPARTITION product_temp1_customer3, + SUBPARTITION product_temp1_customer4 + ); +ERROR: syntax error at or near "(" +LINE 2: ( + ^ +--fail, not support add hash +ALTER TABLE hash_hash_sales ADD PARTITION product_temp2; +ERROR: syntax error at or near ";" +LINE 1: ALTER TABLE hash_hash_sales ADD PARTITION product_temp2; + ^ +--fail, not support add hash +ALTER TABLE hash_hash_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_temp1; +ERROR: syntax error at or near ";" +LINE 1: ...s MODIFY PARTITION product1 ADD SUBPARTITION product1_temp1; + ^ +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + product1 | p | h | f | 0 | 2 | {0} + product2 | p | h | f | 0 | 2 | {1} + product3 | p | h | f | 0 | 2 | {2} + product4 | p | h | f | 0 | 2 | {3} + hash_hash_sales | r | h | f | 0 | 1 | + product1_customer1 | s | h | t | 0 | | {0} + product1_customer2 | s | h | t | 0 | | {1} + product1_customer3 | s | h | t | 0 | | {2} + product1_customer4 | s | h | t | 0 | | {3} + product2_customer1 | s | h | t | 0 | | {0} + product2_customer2 | s | h | t | 0 | | {1} + product3_subpartdefault1 | s | h | t | 0 | | {0} + product4_customer1 | s | h | t | 0 | | {0} +(13 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_customer1_product_id_idx | x | n | t | t + product1_customer2_product_id_idx | x | n | t | t + product1_customer3_product_id_idx | x | n | t | t + product1_customer4_product_id_idx | x | n | t | t + product2_customer1_product_id_idx | x | n | t | t + product2_customer2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_customer1_product_id_idx | x | n | t | t +(8 rows) + +\d+ hash_hash_sales + Table "hw_subpartition_add_drop_partition.hash_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_hash_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "hash_hash_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By HASH(customer_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 8 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION product2; +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION product1_customer1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION product4_customer1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION FOR(0); +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION FOR(0, 0); +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION FOR(0, 0); +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION FOR(0); +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--check for ok after drop +SELECT count(*) FROM hash_hash_sales; + count +------- + 1000 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + product1 | p | h | f | 0 | 2 | {0} + product2 | p | h | f | 0 | 2 | {1} + product3 | p | h | f | 0 | 2 | {2} + product4 | p | h | f | 0 | 2 | {3} + hash_hash_sales | r | h | f | 0 | 1 | + product1_customer1 | s | h | t | 0 | | {0} + product1_customer2 | s | h | t | 0 | | {1} + product1_customer3 | s | h | t | 0 | | {2} + product1_customer4 | s | h | t | 0 | | {3} + product2_customer1 | s | h | t | 0 | | {0} + product2_customer2 | s | h | t | 0 | | {1} + product3_subpartdefault1 | s | h | t | 0 | | {0} + product4_customer1 | s | h | t | 0 | | {0} +(13 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_customer1_product_id_idx | x | n | t | t + product1_customer2_product_id_idx | x | n | t | t + product1_customer3_product_id_idx | x | n | t | t + product1_customer4_product_id_idx | x | n | t | t + product2_customer1_product_id_idx | x | n | t | t + product2_customer2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_customer1_product_id_idx | x | n | t | t +(8 rows) + +\d+ hash_hash_sales + Table "hw_subpartition_add_drop_partition.hash_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_hash_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "hash_hash_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By HASH(customer_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 8 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +--finish +DROP TABLE range_range_sales; +DROP TABLE range_list_sales; +DROP TABLE range_hash_sales; +DROP TABLE list_range_sales; +DROP TABLE list_list_sales; +DROP TABLE list_hash_sales; +DROP TABLE hash_range_sales; +DROP TABLE hash_list_sales; +DROP TABLE hash_hash_sales; +DROP SCHEMA hw_subpartition_add_drop_partition CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/hw_subpartition_alter_table.out b/src/test/regress/expected/hw_subpartition_alter_table.out new file mode 100644 index 000000000..04a950f5b --- /dev/null +++ b/src/test/regress/expected/hw_subpartition_alter_table.out @@ -0,0 +1,215 @@ +DROP SCHEMA subpartition_alter_table CASCADE; +ERROR: schema "subpartition_alter_table" does not exist +CREATE SCHEMA subpartition_alter_table; +SET CURRENT_SCHEMA TO subpartition_alter_table; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +--change column type +alter table range_range alter column user_no set data type char(30); +alter table range_range alter column sales_amt set data type varchar; +\d+ range_range + Table "subpartition_alter_table.range_range" + Column | Type | Modifiers | Storage | Stats target | Description +------------+-----------------------+-----------+----------+--------------+------------- + month_code | character varying(30) | not null | extended | | + dept_code | character varying(30) | not null | extended | | + user_no | character(30) | | extended | | + sales_amt | character varying | | extended | | +Indexes: + "range_range_pkey" PRIMARY KEY, btree (month_code, dept_code) LOCAL TABLESPACE pg_default +Partition By RANGE(month_code) Subpartition By RANGE(dept_code) +Number of partitions: 2 (View pg_partition to check each partition range.) +Number of subpartitions: 4 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, compression=no + +-- rename +alter table range_range rename to hahahahahah; +ERROR: Un-support feature +DETAIL: For subpartition table, ALTER TABLE ... RENAME is not yet supported. +alter table range_range rename partition p_201901 to hahahahahah; +ERROR: Un-support feature +DETAIL: For subpartition table, ALTER TABLE ... RENAME PARTITION/SUBPARTITION is not yet supported. +alter table range_range rename partition p_201901_a to hahahahahah; +ERROR: Un-support feature +DETAIL: For subpartition table, ALTER TABLE ... RENAME PARTITION/SUBPARTITION is not yet supported. +--cluster +create index idx_range_range on range_range(month_code,user_no); +alter table range_range cluster on idx_range_range; +ERROR: cannot cluster a subpartition table +-- move tablespace +CREATE TABLESPACE example1 RELATIVE LOCATION 'tablespace1/tablespace_1'; +alter table range_range move PARTITION p_201901 tablespace example1; +ERROR: Un-support feature +DETAIL: For subpartition table, modifying tablespace is not yet supported. +alter table range_range move PARTITION p_201901_a tablespace example1; +ERROR: Un-support feature +DETAIL: For subpartition table, modifying tablespace is not yet supported. +DROP TABLESPACE example1; +-- merge +alter table range_range merge PARTITIONS p_201901 , p_201902 into PARTITION p_range_3; +ERROR: Un-support feature +DETAIL: For subpartition table, merge partitions is not yet supported. +alter table range_range merge SUBPARTITIONS p_201901 , p_201902 into PARTITION p_range_3; +ERROR: syntax error at or near "SUBPARTITIONS" +LINE 1: alter table range_range merge SUBPARTITIONS p_201901 , p_20... + ^ +-- exchange +CREATE TABLE ori +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "ori_pkey" for table "ori" +ALTER TABLE range_range EXCHANGE PARTITION (p_201901) WITH TABLE ori; +ERROR: Un-support feature +DETAIL: For subpartition table, exchange partition is not yet supported. +ALTER TABLE range_range EXCHANGE SUBPARTITION (p_201901) WITH TABLE ori; +ERROR: syntax error at or near "SUBPARTITION (" +LINE 1: ALTER TABLE range_range EXCHANGE SUBPARTITION (p_201901) WIT... + ^ +-- drop +alter table range_range drop partition p_201901; +alter table range_range drop partition p_201901_a; +ERROR: partition "p_201901_a" does not exist +alter table range_range drop subpartition p_201901_a; +ERROR: subpartition "p_201901_a" does not exist +-- add +alter table range_range add partition p_range_4 VALUES LESS THAN('201904'); +ERROR: upper boundary of adding partition MUST overtop last existing partition +-- split +alter table range_range split PARTITION p_201901 at (8) into ( PARTITION add_p_01 , PARTITION add_p_02 ); +ERROR: Un-support feature +DETAIL: For subpartition table, split partition is not supported yet. +drop table ori; +drop table range_range; +CREATE TABLE IF NOT EXISTS range_range_02 +( + col_1 int , + col_2 int , + col_3 VARCHAR2 ( 30 ) NOT NULL , + col_4 int +) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( 10 ) + ) +) ENABLE ROW MOVEMENT; +create index on range_range_02(col_2) local; +alter table range_range_02 MODIFY PARTITION p_range_2 UNUSABLE LOCAL INDEXES; +ERROR: Un-support feature +DETAIL: For subpartition table, UNUSABLE LOCAL INDEXES is not yet supported. +alter table range_range_02 MODIFY PARTITION p_range_2 REBUILD UNUSABLE LOCAL INDEXES; +ERROR: Un-support feature +DETAIL: For subpartition table, REBUILD UNUSABLE LOCAL INDEXES is not yet supported. +alter table range_range_02 alter col_1 type char; +ERROR: cannot alter data type of partitioning column "col_1" +alter table range_range_02 alter col_2 type char; +ERROR: cannot alter data type of subpartitioning column "col_2" +drop table range_range_02; +--validate constraint +CREATE TABLE hash_hash +( + col_1 int , + col_2 int NOT NULL , + col_3 VARCHAR2 ( 30 ) , + col_4 int +) +PARTITION BY hash (col_3) SUBPARTITION BY hash (col_2) +( + PARTITION p_hash_1 + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 , + SUBPARTITION p_hash_1_4 + ), + PARTITION p_hash_2 + ( + SUBPARTITION p_hash_2_1 , + SUBPARTITION p_hash_2_2 + ), + PARTITION p_hash_3, + PARTITION p_hash_4 + ( + SUBPARTITION p_hash_4_1 + ), + PARTITION p_hash_5 +); +INSERT INTO hash_hash VALUES(null,1,1,1); +alter table hash_hash add constraint con_hash_hash check(col_1 is not null) NOT VALID ; +INSERT INTO hash_hash VALUES(null,2,1,1); --error +ERROR: new row for relation "hash_hash" violates check constraint "con_hash_hash" +DETAIL: N/A +INSERT INTO hash_hash VALUES(1,3,1,1); --success +alter table hash_hash VALIDATE CONSTRAINT con_hash_hash; --error +ERROR: check constraint "con_hash_hash" is violated by some row +delete from hash_hash where col_1 is null; +alter table hash_hash VALIDATE CONSTRAINT con_hash_hash; --success +drop table hash_hash cascade; +-- clean +DROP SCHEMA subpartition_alter_table CASCADE; +\h ALTER TABLE SUBPARTITION +Command: ALTER TABLE SUBPARTITION +Description: change the definition of a subpartition +Syntax: +ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )} + action [, ... ]; + +where action can be: + add_clause | + drop_clause | + split_clause | + truncate_clause +where add_clause can be: +ADD { partition_less_than_item | partition_list_item } [ ( subpartition_definition_list ) ] +MODIFY PARTITION partition_name ADD subpartition_definition +where partition_less_than_item can be: +PARTITION partition_name VALUES LESS THAN ( partition_value | MAXVALUE ) [ TABLESPACE tablespacename ] +where partition_list_item can be: +PARTITION partition_name VALUES ( partition_value [, ...] | DEFAULT ) [ TABLESPACE tablespacename ] +where subpartition_definition_list can be: +SUBPARTITION subpartition_name [ VALUES LESS THAN ( partition_value | MAXVALUE ) | VALUES ( partition_value [, ...] | DEFAULT )] [ TABLESPACE tablespace ] +where drop_clause can be: +DROP PARTITION { partition_name | FOR ( partition_value ) } [ UPDATE GLOBAL INDEX ] +DROP SUBPARTITION { subpartition_name | FOR ( partition_value, subpartition_value ) } [ UPDATE GLOBAL INDEX ] +where split_clause can be: +SPLIT SUBPARTITION { subpartition_name } { split_point_clause } [ UPDATE GLOBAL INDEX ] +where split_point_clause can be: +AT ( subpartition_value ) INTO ( SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] , SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] ) | +VALUES ( subpartition_value ) INTO ( SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] , SUBPARTITION subpartition_name [ TABLESPACE tablespacename ] ) +where truncate_clause can be: +TRUNCATE SUBPARTITION { subpartition_name } [ UPDATE GLOBAL INDEX ] +NOTICE: 'ALTER TABLE SUBPARTITION' is only avaliable in CENTRALIZED mode! + diff --git a/src/test/regress/expected/hw_subpartition_analyze_vacuum.out b/src/test/regress/expected/hw_subpartition_analyze_vacuum.out new file mode 100644 index 000000000..0044363f5 --- /dev/null +++ b/src/test/regress/expected/hw_subpartition_analyze_vacuum.out @@ -0,0 +1,62 @@ +-- prepare +DROP SCHEMA subpartition_analyze_vacuum CASCADE; +ERROR: schema "subpartition_analyze_vacuum" does not exist +CREATE SCHEMA subpartition_analyze_vacuum; +SET CURRENT_SCHEMA TO subpartition_analyze_vacuum; +-- base function +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 2 | 1 +(6 rows) + +delete from range_list where month_code = '201902'; +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 2 | 1 +(3 rows) + +analyze range_list; +analyze range_list partition (p_201901); +vacuum range_list; +vacuum range_list partition (p_201901); +drop table range_list; +-- clean +DROP SCHEMA subpartition_analyze_vacuum CASCADE; diff --git a/src/test/regress/expected/hw_subpartition_createtable.out b/src/test/regress/expected/hw_subpartition_createtable.out new file mode 100644 index 000000000..4fa5c6a0b --- /dev/null +++ b/src/test/regress/expected/hw_subpartition_createtable.out @@ -0,0 +1,1791 @@ +--1.create table +--list_list list_hash list_range range_list range_hash range_range +--prepare +DROP SCHEMA subpartition_createtable CASCADE; +ERROR: schema "subpartition_createtable" does not exist +CREATE SCHEMA subpartition_createtable; +SET CURRENT_SCHEMA TO subpartition_createtable; +--1.1 normal table +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(6 rows) + +drop table list_list; +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into list_hash values('201902', '1', '1', 1); +insert into list_hash values('201902', '2', '1', 1); +insert into list_hash values('201902', '3', '1', 1); +insert into list_hash values('201903', '4', '1', 1); +insert into list_hash values('201903', '5', '1', 1); +insert into list_hash values('201903', '6', '1', 1); +select * from list_hash; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 4 | 1 | 1 + 201903 | 5 | 1 | 1 + 201903 | 6 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201902 | 1 | 1 | 1 +(6 rows) + +drop table list_hash; +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('6') + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +insert into list_range values('201902', '1', '1', 1); +insert into list_range values('201902', '2', '1', 1); +insert into list_range values('201902', '3', '1', 1); +insert into list_range values('201903', '4', '1', 1); +insert into list_range values('201903', '5', '1', 1); +insert into list_range values('201903', '6', '1', 1); +ERROR: inserted partition key does not map to any table partition +select * from list_range; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 4 | 1 | 1 + 201903 | 5 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 +(5 rows) + +drop table list_range; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +drop table range_list; +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201902', '2', '1', 1); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +insert into range_hash values('201903', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +select * from range_hash; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +drop table range_hash; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +select * from range_range; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +drop table range_range; +CREATE TABLE hash_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY hash (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into hash_list values('201901', '1', '1', 1); +insert into hash_list values('201901', '2', '1', 1); +insert into hash_list values('201901', '1', '1', 1); +insert into hash_list values('201903', '2', '1', 1); +insert into hash_list values('201903', '1', '1', 1); +insert into hash_list values('201903', '2', '1', 1); +select * from hash_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201901 | 2 | 1 | 1 + 201901 | 1 | 1 | 1 + 201901 | 1 | 1 | 1 +(6 rows) + +drop table hash_list; +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY hash (month_code) SUBPARTITION BY hash (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into hash_hash values('201901', '1', '1', 1); +insert into hash_hash values('201901', '2', '1', 1); +insert into hash_hash values('201901', '1', '1', 1); +insert into hash_hash values('201903', '2', '1', 1); +insert into hash_hash values('201903', '1', '1', 1); +insert into hash_hash values('201903', '2', '1', 1); +select * from hash_hash; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201901 | 2 | 1 | 1 + 201901 | 1 | 1 | 1 + 201901 | 1 | 1 | 1 +(6 rows) + +drop table hash_hash; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY hash (month_code) SUBPARTITION BY range (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES LESS THAN ( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN ( '3' ) + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES LESS THAN ( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN ( '3' ) + ) +); +insert into hash_range values('201901', '1', '1', 1); +insert into hash_range values('201901', '2', '1', 1); +insert into hash_range values('201901', '1', '1', 1); +insert into hash_range values('201903', '2', '1', 1); +insert into hash_range values('201903', '1', '1', 1); +insert into hash_range values('201903', '2', '1', 1); +select * from hash_range; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201901 | 1 | 1 | 1 + 201901 | 1 | 1 | 1 + 201901 | 2 | 1 | 1 +(6 rows) + +drop table hash_range; +--1.2 table with default subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_list; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_list; +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table list_hash; +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_hash; +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_hash; +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +drop table list_range; +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('6') + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_range; +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_range; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +drop table range_list; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_list; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_list; +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table range_hash; +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_hash; +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_hash; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +drop table hash_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 +); +drop table hash_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES( '2' ), + SUBPARTITION p_201902_b VALUES( '3' ) + ) +); +drop table hash_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES( '2' ), + SUBPARTITION p_201901_b VALUES( '3' ) + ), + PARTITION p_201902 +); +drop table hash_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_range; +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table hash_hash; +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 +); +drop table hash_hash; +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_hash; +--1.3 subpartition name check +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_a VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: duplicate subpartition name: "p_201901_a" +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: duplicate subpartition name: "p_201901_a" +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901 VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: duplicate subpartition name: "p_201901" +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201901_subpartdefault1 VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; +--1.4 subpartition key check +-- 一级分区和二级分区分区键是同一列 +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: The two partition keys of a subpartition partition table are the same. +DETAIL: N/A +--二级分区的键值一样 +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '1' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: list partition p_201901_a and p_201901_b has overlapped value +--分区列不存在 +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_codeXXX) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: undefined column"month_codexxx" is used as a partitioning column +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_codeXXX) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: undefined column"dept_codexxx" is used as a partitioning column +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('4') + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +ERROR: partition bound of partition "p_201901_b" is too low +drop table list_range; +ERROR: table "list_range" does not exist +--1.5 list subpartition whith default +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( default ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list partition (p_201901); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(3 rows) + +select * from list_list partition (p_201902); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +drop table list_list; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( default ) + ) +); +drop table list_list; +--1.6 declaration and definition of the subpatiiton type are same. +--error +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY hash (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( default ) + ) +); +ERROR: The syntax format of subpartition is incorrect, the declaration and definition of the subpartition do not match. +DETAIL: The syntax format of subpartition p_201901_a is incorrect. +--1.7 add constraint +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +alter table range_range add constraint constraint_check CHECK (sales_amt IS NOT NULL); +insert into range_range values(1,1,1); +ERROR: new row for relation "range_range" violates check constraint "constraint_check" +DETAIL: N/A +drop table range_range; +-- drop partition column +CREATE TABLE range_hash_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) +PARTITION BY RANGE (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( -10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ), + PARTITION p_range_3 VALUES LESS THAN( 30) + ( + SUBPARTITION p_hash_3_1 , + SUBPARTITION p_hash_3_2 , + SUBPARTITION p_hash_3_3 + ), + PARTITION p_range_4 VALUES LESS THAN( 50) + ( + SUBPARTITION p_hash_4_1 , + SUBPARTITION p_hash_4_2 , + SUBPARTITION range_hash_02 + ), + PARTITION p_range_5 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; +alter table range_hash_02 drop column col_1; +ERROR: cannot drop partitioning column "col_1" +alter table range_hash_02 drop column col_2; +ERROR: cannot drop partitioning column "col_2" +drop table range_hash_02; +--1.8 SET ROW MOVEMENT +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1', '2' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1', '2' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +alter table list_list disable ROW MOVEMENT; +insert into list_list values('201902', '1', '1', 1); +update list_list set month_code = '201903'; +ERROR: fail to update partitioned table "list_list" +DETAIL: disable row movement +update list_list set dept_code = '3'; +ERROR: fail to update partitioned table "list_list" +DETAIL: disable row movement +alter table list_list enable ROW MOVEMENT; +update list_list set month_code = '201903'; +update list_list set dept_code = '3'; +drop table list_list; +--1.9 without subpartition declaration +create table test(a int) +partition by range(a) +( +partition p1 values less than(100) +( +subpartition subp1 values less than(50), +subpartition subp2 values less than(100) +), +partition p2 values less than(200), +partition p3 values less than(maxvalue) +); +ERROR: The syntax format of subpartition is incorrect, missing declaration of subpartition. +DETAIL: N/A +--1.10 create table like +CREATE TABLE range_range +( + col_1 int primary key, + col_2 int NOT NULL , + col_3 VARCHAR2 ( 30 ) NOT NULL , + col_4 int generated always as(2*col_2) stored , + check (col_4 >= col_2) +) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( 10 ) + ) +) ENABLE ROW MOVEMENT; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +CREATE TABLE range_range_02 (like range_range INCLUDING ALL ); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_02_pkey" for table "range_range_02" +drop table range_range; +-- storage parameter +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH(ORIENTATION = COLUMN) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +ERROR: Unsupport feature +DETAIL: cstore/timeseries don't support subpartition table. +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH(segment = on) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH(hashbucket = on) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: hashbucket table need segment storage, set segment to on by default +ERROR: Un-support feature +DETAIL: The subpartition table do not support hashbucket. +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH(STORAGE_TYPE = USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +ERROR: relation "range_range" already exists in schema "subpartition_createtable" +DETAIL: creating new table with existing name in the same schema +--ROW LEVEL SECURITY POLICY +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +ERROR: relation "range_range" already exists in schema "subpartition_createtable" +DETAIL: creating new table with existing name in the same schema +CREATE ROW LEVEL SECURITY POLICY range_range_rls ON range_range USING(user_no = CURRENT_USER); +ERROR: Un-support feature +DETAIL: Do not support row level security policy on subpartition table. +drop table range_range; +-- 账本数据库 +CREATE SCHEMA ledgernsp WITH BLOCKCHAIN; +CREATE TABLE ledgernsp.range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +ERROR: Un-support feature +DETAIL: Subpartition table does not support ledger user table. +DROP SCHEMA ledgernsp; +-- create table as +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +) ENABLE ROW MOVEMENT; +insert into range_range values(201902,1,1,1),(201902,1,1,1),(201902,3,1,1),(201903,1,1,1),(201903,2,1,1),(201903,2,1,1); +select * from range_range subpartition(p_201901_a) where month_code in(201902,201903) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +create table range_range_copy as select * from range_range subpartition(p_201901_a) where month_code in(201902,201903); +select * from range_range_copy order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +drop table range_range; +drop table range_range_copy; +--1.11 create index +create table range_range_03 +( + c_int int, + c_char1 char(3000), + c_char2 char(5000), + c_char3 char(6000), + c_varchar1 varchar(3000), + c_varchar2 varchar(5000), + c_varchar3 varchar, + c_varchar4 varchar, + c_text1 text, + c_text2 text, + c_text3 text, + c int, + primary key(c,c_int) +) with (parallel_workers=10) +partition by range (c_int) subpartition by range (c_char1) +( + partition p1 values less than(50) + ( + subpartition p1_1 values less than('c'), + subpartition p1_2 values less than(maxvalue) + ), + partition p2 values less than(100) + ( + subpartition p2_1 values less than('c'), + subpartition p2_2 values less than(maxvalue) + ), + partition p3 values less than(150) + ( + subpartition p3_1 values less than('c'), + subpartition p3_2 values less than(maxvalue) + ), + partition p4 values less than(200) + ( + subpartition p4_1 values less than('c'), + subpartition p4_2 values less than(maxvalue) + ), + partition p5 values less than(maxvalue)( + subpartition p5_1 values less than('c'), + subpartition p5_2 values less than(maxvalue) + ) +) enable row movement; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_03_pkey" for table "range_range_03" +create index range_range_03_idx1 on range_range_03 (c_varchar1) local; --success +create index range_range_03_idx2 on range_range_03 (c_varchar2) local ( + partition cpt7_p1, + partition cpt7_p2, + partition cpt7_p3, + partition cpt7_p4, + partition cpt7_p5 +); --failed +ERROR: Cannot match subpartitions when create subpartition indexes. +create index range_range_03_idx3 on range_range_03 (c_varchar3); --success, default global +create index range_range_03_idx4 on range_range_03 (c_varchar4) global; --success +create index range_range_03_idx5 on range_range_03 (c_varchar4) local; --failed, can not be same column with global index +ERROR: Global and local partition index should not be on same column +\d+ range_range_03 + Table "subpartition_createtable.range_range_03" + Column | Type | Modifiers | Storage | Stats target | Description +------------+-------------------------+-----------+----------+--------------+------------- + c_int | integer | not null | plain | | + c_char1 | character(3000) | | extended | | + c_char2 | character(5000) | | extended | | + c_char3 | character(6000) | | extended | | + c_varchar1 | character varying(3000) | | extended | | + c_varchar2 | character varying(5000) | | extended | | + c_varchar3 | character varying | | extended | | + c_varchar4 | character varying | | extended | | + c_text1 | text | | extended | | + c_text2 | text | | extended | | + c_text3 | text | | extended | | + c | integer | not null | plain | | +Indexes: + "range_range_03_pkey" PRIMARY KEY, btree (c, c_int) TABLESPACE pg_default + "range_range_03_idx1" btree (c_varchar1) LOCAL TABLESPACE pg_default + "range_range_03_idx3" btree (c_varchar3) TABLESPACE pg_default + "range_range_03_idx4" btree (c_varchar4) TABLESPACE pg_default +Partition By RANGE(c_int) Subpartition By RANGE(c_char1) +Number of partitions: 5 (View pg_partition to check each partition range.) +Number of subpartitions: 10 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, parallel_workers=10, compression=no + +select pg_get_tabledef('range_range_03'); + pg_get_tabledef +---------------------------------------------------------------------------------------------------- + SET search_path = subpartition_createtable; + + CREATE TABLE range_range_03 ( + + c_int integer NOT NULL, + + c_char1 character(3000), + + c_char2 character(5000), + + c_char3 character(6000), + + c_varchar1 character varying(3000), + + c_varchar2 character varying(5000), + + c_varchar3 character varying, + + c_varchar4 character varying, + + c_text1 text, + + c_text2 text, + + c_text3 text, + + c integer NOT NULL + + ) + + WITH (orientation=row, parallel_workers=10, compression=no) + + PARTITION BY RANGE (c_int) SUBPARTITION BY RANGE (c_char1) + + ( + + PARTITION p1 VALUES LESS THAN (50) TABLESPACE pg_default + + ( + + SUBPARTITION p1_1 VALUES LESS THAN ('c') TABLESPACE pg_default, + + SUBPARTITION p1_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ), + + PARTITION p2 VALUES LESS THAN (100) TABLESPACE pg_default + + ( + + SUBPARTITION p2_1 VALUES LESS THAN ('c') TABLESPACE pg_default, + + SUBPARTITION p2_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ), + + PARTITION p3 VALUES LESS THAN (150) TABLESPACE pg_default + + ( + + SUBPARTITION p3_1 VALUES LESS THAN ('c') TABLESPACE pg_default, + + SUBPARTITION p3_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ), + + PARTITION p4 VALUES LESS THAN (200) TABLESPACE pg_default + + ( + + SUBPARTITION p4_1 VALUES LESS THAN ('c') TABLESPACE pg_default, + + SUBPARTITION p4_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ), + + PARTITION p5 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ( + + SUBPARTITION p5_1 VALUES LESS THAN ('c') TABLESPACE pg_default, + + SUBPARTITION p5_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; + + CREATE INDEX range_range_03_idx4 ON range_range_03 USING btree (c_varchar4) TABLESPACE pg_default;+ + CREATE INDEX range_range_03_idx3 ON range_range_03 USING btree (c_varchar3) TABLESPACE pg_default;+ + CREATE INDEX range_range_03_idx1 ON range_range_03 USING btree (c_varchar1) LOCAL + + ( + + PARTITION p1_1_c_varchar1_idx, + + PARTITION p1_2_c_varchar1_idx, + + PARTITION p2_1_c_varchar1_idx, + + PARTITION p2_2_c_varchar1_idx, + + PARTITION p3_1_c_varchar1_idx, + + PARTITION p3_2_c_varchar1_idx, + + PARTITION p4_1_c_varchar1_idx, + + PARTITION p4_2_c_varchar1_idx, + + PARTITION p5_1_c_varchar1_idx, + + PARTITION p5_2_c_varchar1_idx + + ) TABLESPACE pg_default; + + ALTER TABLE range_range_03 ADD CONSTRAINT range_range_03_pkey PRIMARY KEY (c, c_int); +(1 row) + +drop table range_range_03; +--unique local index columns must contain the partition key +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +) ENABLE ROW MOVEMENT; +create unique index idx on range_range(month_code) local; +ERROR: unique local index columns must contain all the partition keys and collation must be default collation +create unique index idx1 on range_range(month_code, user_no) local; +ERROR: unique local index columns must contain all the partition keys and collation must be default collation +drop table range_range; +-- partkey has timestampwithzone type +drop table hash_range; +ERROR: table "hash_range" does not exist +CREATE TABLE hash_range +( + col_1 int PRIMARY KEY USING INDEX, + col_2 int NOT NULL , + col_3 int NOT NULL , + col_4 int, + col_19 TIMESTAMP WITH TIME ZONE +) +PARTITION BY HASH (col_2) SUBPARTITION BY RANGE (col_19) +( partition p_hash_1 + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + partition p_hash_2, + PARTITION p_hash_3, + PARTITION p_hash_4, + PARTITION p_hash_5, + PARTITION p_hash_7 +) ENABLE ROW MOVEMENT; +ERROR: partition key value must be const or const-evaluable expression +CREATE TABLE hash_range +( + col_1 int PRIMARY KEY USING INDEX, + col_2 int NOT NULL , + col_3 int NOT NULL , + col_4 int, + col_19 TIMESTAMP WITH TIME ZONE +) +PARTITION BY HASH (col_19) SUBPARTITION BY RANGE (col_2) +( partition p_hash_1 + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + partition p_hash_2, + PARTITION p_hash_3, + PARTITION p_hash_4, + PARTITION p_hash_5, + PARTITION p_hash_7 +) ENABLE ROW MOVEMENT; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "hash_range_pkey" for table "hash_range" +drop table hash_range; +-- test create table like only support range_range in subpartition +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +create table t1(like range_range including partition); +insert into t1 values('201902', '1', '1', 1); +insert into t1 values('201902', '2', '1', 1); +insert into t1 values('201902', '1', '1', 1); +insert into t1 values('201903', '2', '1', 1); +insert into t1 values('201903', '1', '1', 1); +insert into t1 values('201903', '2', '1', 1); +explain (costs off) select * from t1; + QUERY PLAN +-------------------------------------- + Partition Iterator + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on t1 + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(5 rows) + +select * from t1; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +drop table t1; +drop table range_range; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +create table t1(like list_list including partition); +ERROR: Un-support feature +DETAIL: The Like feature is not supported currently for List and Hash. +drop table list_list; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +create table t1(like range_list including partition); +ERROR: Un-support feature +DETAIL: Create Table like with subpartition only support range strategy. +drop table range_list; +--clean +DROP SCHEMA subpartition_createtable CASCADE; +NOTICE: drop cascades to table range_range_02 +\h CREATE TABLE SUBPARTITION +Command: CREATE TABLE SUBPARTITION +Description: define a new table subpartition +Syntax: +CREATE TABLE [ IF NOT EXISTS ] subpartition_table_name +( { column_name data_type [ COLLATE collation ] [ column_constraint [ ... ] ] + | table_constraint + | LIKE source_table [ like_option [...] ] } + [, ... ] +) + [ WITH ( {storage_parameter = value} [, ... ] ) ] + [ COMPRESS | NOCOMPRESS ] + [ TABLESPACE tablespace_name ] + PARTITION BY {RANGE | LIST | HASH} (partition_key) SUBPARTITION BY {RANGE | LIST | HASH} (subpartition_key) + ( + PARTITION partition_name1 [ VALUES LESS THAN (val1) | VALUES (val1[, ...]) ] [ TABLESPACE tablespace ] + ( + { SUBPARTITION subpartition_name1 [ VALUES LESS THAN (val1_1) | VALUES (val1_1[, ...])] [ TABLESPACE tablespace ] } [, ...] + ) + [, ...] + ) [ { ENABLE | DISABLE } ROW MOVEMENT ]; + +where column_constraint can be: +[ CONSTRAINT constraint_name ] +{ NOT NULL | + NULL | + CHECK ( expression ) | + DEFAULT default_expr | + GENERATED ALWAYS AS ( generation_expr ) STORED | + UNIQUE index_parameters | + PRIMARY KEY index_parameters | + REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] + [ ON DELETE action ] [ ON UPDATE action ] } +[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] +where table_constraint can be: +[ CONSTRAINT constraint_name ] +{ CHECK ( expression ) | + UNIQUE ( column_name [, ... ] ) index_parameters | + PRIMARY KEY ( column_name [, ... ] ) index_parameters | + FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ] + [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] } +[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] +where like_option can be: +{ INCLUDING | EXCLUDING } { DEFAULTS | GENERATED | CONSTRAINTS | INDEXES | STORAGE | COMMENTS | RELOPTIONS | ALL } +where index_parameters can be: +[ WITH ( {storage_parameter = value} [, ... ] ) ] +[ USING INDEX TABLESPACE tablespace_name ] +NOTICE: 'CREATE TABLE SUBPARTITION' is only avaliable in CENTRALIZED mode! + diff --git a/src/test/regress/expected/hw_subpartition_ddl_index.out b/src/test/regress/expected/hw_subpartition_ddl_index.out new file mode 100644 index 000000000..63fd4add4 --- /dev/null +++ b/src/test/regress/expected/hw_subpartition_ddl_index.out @@ -0,0 +1,446 @@ +-- +----test index is Ok when use ddl grammer for subpartition---- +-- +DROP SCHEMA hw_subpartition_ddl_index CASCADE; +ERROR: schema "hw_subpartition_ddl_index" does not exist +CREATE SCHEMA hw_subpartition_ddl_index; +SET CURRENT_SCHEMA TO hw_subpartition_ddl_index; +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_indexonlyscan = ON; +SET enable_bitmapscan = OFF; +-- +--test for add/drop partition/subpartition +-- +--1. first, we create subpartitioned table, and index on the table +CREATE TABLE range_list_sales1 +( + product_id INT4 CONSTRAINT cc1 CHECK (product_id < 2500) NOT NULL, + customer_id INT4, + time_id DATE CONSTRAINT cc2 CHECK (time_id IS NOT NULL) UNIQUE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2), + comment VARCHAR2, + UNIQUE (product_id, customer_id, comment) +) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "range_list_sales1_time_id_tableoid_key" for table "range_list_sales1" +NOTICE: CREATE TABLE / UNIQUE will create implicit index "range_list_sales1_product_id_customer_id_comment_tableoid_key" for table "range_list_sales1" +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000), + 'test' || generate_series(1,1000); +CREATE INDEX range_list_sales1_idx1 ON range_list_sales1(product_id, customer_id) GLOBAL; +CREATE INDEX range_list_sales1_idx2 ON range_list_sales1(channel_id) GLOBAL; +CREATE INDEX range_list_sales1_idx3 ON range_list_sales1(customer_id) LOCAL; +CREATE INDEX range_list_sales1_idx4 ON range_list_sales1(time_id, type_id) LOCAL; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx1 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx2 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 4, Sub Iterations: 8 + -> Partitioned Index Only Scan using range_list_sales1_idx3 on range_list_sales1 + Selected Partitions: 1..4 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 4, Sub Iterations: 8 + -> Partitioned Index Only Scan using range_list_sales1_idx4 on range_list_sales1 + Selected Partitions: 1..4 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +--2. add partition/subpartition will not influence the index +ALTER TABLE range_list_sales1 ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer5_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer5_channel3 VALUES ('6', '7', '8') + ); +ALTER TABLE range_list_sales1 ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +ALTER TABLE range_list_sales1 MODIFY PARTITION customer5 ADD SUBPARTITION customer5_channel4 VALUES ('9'); +INSERT INTO range_list_sales1 SELECT generate_series(1001,2000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1001,2000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000), + 'test' || generate_series(1001,2000); +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx1 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 2000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx2 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 2000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 6, Sub Iterations: 13 + -> Partitioned Index Only Scan using range_list_sales1_idx3 on range_list_sales1 + Selected Partitions: 1..6 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 2000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 6, Sub Iterations: 13 + -> Partitioned Index Only Scan using range_list_sales1_idx4 on range_list_sales1 + Selected Partitions: 1..6 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 2000 +(1 row) + +--3. drop partition/subpartition update global index +ALTER TABLE range_list_sales1 DROP PARTITION customer3 UPDATE GLOBAL INDEX; +ALTER TABLE range_list_sales1 DROP PARTITION FOR (700) UPDATE GLOBAL INDEX; --customer4 +ALTER TABLE range_list_sales1 DROP SUBPARTITION FOR (700, '9') UPDATE GLOBAL INDEX; --customer5_channel4 +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx1 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx2 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 4, Sub Iterations: 10 + -> Partitioned Index Only Scan using range_list_sales1_idx3 on range_list_sales1 + Selected Partitions: 1..4 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 4, Sub Iterations: 10 + -> Partitioned Index Only Scan using range_list_sales1_idx4 on range_list_sales1 + Selected Partitions: 1..4 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +--4. if drop partition without update global index, the gpi will be invalid, we can rebuild the index +ALTER TABLE range_list_sales1 DROP PARTITION FOR (1600); +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +WARNING: Error hint: IndexOnlyScan(range_list_sales1 range_list_sales1_idx1), index "range_list_sales1_idx1" doesn't exist. + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 9 + -> Partitioned Seq Scan on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +ALTER INDEX range_list_sales1_idx1 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx1 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +WARNING: Error hint: IndexOnlyScan(range_list_sales1 range_list_sales1_idx2), index "range_list_sales1_idx2" doesn't exist. + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 9 + -> Partitioned Seq Scan on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +ALTER INDEX range_list_sales1_idx2 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx2 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 9 + -> Partitioned Index Only Scan using range_list_sales1_idx3 on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 9 + -> Partitioned Index Only Scan using range_list_sales1_idx4 on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +--5. if drop subpartition without update global index, the gpi will be invalid, we can rebuild the index +ALTER TABLE range_list_sales1 DROP SUBPARTITION customer5_channel3; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +WARNING: Error hint: IndexOnlyScan(range_list_sales1 range_list_sales1_idx1), index "range_list_sales1_idx1" doesn't exist. + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 8 + -> Partitioned Seq Scan on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +ALTER INDEX range_list_sales1_idx1 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx1 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +WARNING: Error hint: IndexOnlyScan(range_list_sales1 range_list_sales1_idx2), index "range_list_sales1_idx2" doesn't exist. + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 8 + -> Partitioned Seq Scan on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +ALTER INDEX range_list_sales1_idx2 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx2 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 8 + -> Partitioned Index Only Scan using range_list_sales1_idx3 on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 8 + -> Partitioned Index Only Scan using range_list_sales1_idx4 on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +DROP TABLE range_list_sales1; +--finish, clean the environment +DROP SCHEMA hw_subpartition_ddl_index CASCADE; +RESET CURRENT_SCHEMA; +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_indexonlyscan; +RESET enable_bitmapscan; diff --git a/src/test/regress/expected/hw_subpartition_gpi.out b/src/test/regress/expected/hw_subpartition_gpi.out new file mode 100644 index 000000000..8fa5fe750 --- /dev/null +++ b/src/test/regress/expected/hw_subpartition_gpi.out @@ -0,0 +1,1295 @@ +-- prepare +DROP SCHEMA subpartition_gpi CASCADE; +ERROR: schema "subpartition_gpi" does not exist +CREATE SCHEMA subpartition_gpi; +SET CURRENT_SCHEMA TO subpartition_gpi; +-- base function +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 2 | 1 +(6 rows) + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_list.dept_code)::text = '1'::text) +(8 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_list.user_no)::text = '1'::text) +(8 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +drop table range_list; +-- unique +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +create unique index idx_dept_code_global on range_list(dept_code) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +select * from range_list subpartition (p_201901_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 +(1 row) + +select * from range_list subpartition (p_201901_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +select count(*) from range_list; + count +------- + 2 +(1 row) + +--error +insert into range_list values('201902', '1', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_dept_code_global" +DETAIL: Key (dept_code)=(1) already exists. +insert into range_list values('201902', '2', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_dept_code_global" +DETAIL: Key (dept_code)=(2) already exists. +insert into range_list values('201903', '1', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_dept_code_global" +DETAIL: Key (dept_code)=(1) already exists. +insert into range_list values('201903', '2', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_dept_code_global" +DETAIL: Key (dept_code)=(2) already exists. +select count(*) from range_list; + count +------- + 2 +(1 row) + +delete from range_list; +drop index idx_dept_code_global; +create unique index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '2', 1); +insert into range_list values('201903', '1', '3', 1); +insert into range_list values('201903', '2', '4', 1); +select * from range_list subpartition (p_201901_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 +(1 row) + +select * from range_list subpartition (p_201901_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 2 | 1 +(1 row) + +select * from range_list subpartition (p_201902_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 3 | 1 +(1 row) + +select * from range_list subpartition (p_201902_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 4 | 1 +(1 row) + +select count(*) from range_list; + count +------- + 4 +(1 row) + +--error +insert into range_list values('201902', '1', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(1) already exists. +insert into range_list values('201902', '2', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(1) already exists. +insert into range_list values('201903', '1', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(1) already exists. +insert into range_list values('201903', '2', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(1) already exists. +insert into range_list values('201902', '1', '2', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(2) already exists. +insert into range_list values('201902', '2', '2', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(2) already exists. +insert into range_list values('201903', '1', '2', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(2) already exists. +insert into range_list values('201903', '2', '2', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(2) already exists. +insert into range_list values('201902', '1', '3', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(3) already exists. +insert into range_list values('201902', '2', '3', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(3) already exists. +insert into range_list values('201903', '1', '3', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(3) already exists. +insert into range_list values('201903', '2', '3', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(3) already exists. +insert into range_list values('201902', '1', '4', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(4) already exists. +insert into range_list values('201902', '2', '4', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(4) already exists. +insert into range_list values('201903', '1', '4', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(4) already exists. +insert into range_list values('201903', '2', '4', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(4) already exists. +select count(*) from range_list; + count +------- + 4 +(1 row) + +drop table range_list; +-- truncate subpartition +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 2 | 1 +(6 rows) + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_list.dept_code)::text = '1'::text) +(8 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_list.user_no)::text = '1'::text) +(8 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +alter table range_list truncate subpartition p_201901_a update global index; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_list.dept_code)::text = '1'::text) +(8 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_list.user_no)::text = '1'::text) +(8 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +alter table range_list truncate subpartition p_201901_b; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Seq Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_list.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(11 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_list.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(11 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(2 rows) + +drop table range_list; +-- split subpartition +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values (default) + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values (default) + ) +); +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 2 | 1 +(6 rows) + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_list.dept_code)::text = '1'::text) +(8 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_list.user_no)::text = '1'::text) +(8 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +alter table range_list split subpartition p_201901_b values ('3') into +( + subpartition p_201901_b, + subpartition p_201901_c +) update global index; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 3 + -> Partitioned Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_list.dept_code)::text = '1'::text) +(8 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_list.user_no)::text = '1'::text) +(8 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +alter table range_list split subpartition p_201902_b values ('3') into +( + subpartition p_201902_b, + subpartition p_201902_c +); +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 3 + -> Partitioned Bitmap Heap Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Seq Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_list.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(11 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 6 + -> Partitioned Seq Scan on subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_list.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(11 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +drop table range_list; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( MAXVALUE ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '3', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '5', '1', 1); +select * from range_range; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(6 rows) + +create index idx_month_code_local on range_range(month_code) local; +create index idx_dept_code_global on range_range(dept_code) global; +create index idx_user_no_global on range_range(user_no) global; +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.dept_code, range_range.user_no, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +-------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.user_no, range_range.sales_amt + -> Bitmap Heap Scan on subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_range.dept_code)::text = '1'::text) +(8 rows) + +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(2 rows) + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.dept_code, range_range.sales_amt + -> Bitmap Heap Scan on subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_range.user_no)::text = '1'::text) +(8 rows) + +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(6 rows) + +alter table range_range split subpartition p_201901_b at ('3') into +( + subpartition p_201901_c, + subpartition p_201901_d +) update global index; +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.dept_code, range_range.user_no, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 3 + -> Partitioned Bitmap Heap Scan on subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +-------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.user_no, range_range.sales_amt + -> Bitmap Heap Scan on subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_range.dept_code)::text = '1'::text) +(8 rows) + +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(2 rows) + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.dept_code, range_range.sales_amt + -> Bitmap Heap Scan on subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_range.user_no)::text = '1'::text) +(8 rows) + +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(6 rows) + +alter table range_range split subpartition p_201902_b at ('3') into +( + subpartition p_201902_c, + subpartition p_201903_d +); +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.dept_code, range_range.user_no, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 3 + -> Partitioned Bitmap Heap Scan on subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +-------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.user_no, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Seq Scan on subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_range.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(11 rows) + +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(2 rows) + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.dept_code, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 6 + -> Partitioned Seq Scan on subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_range.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(11 rows) + +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(6 rows) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) primary key, + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + I +(1 row) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) primary key, + user_no VARCHAR2 ( 30 ) , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + I +(1 row) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) primary key, + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + I +(1 row) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + i +(1 row) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code, user_no) +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + i +(1 row) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, user_no) +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + I +(1 row) + +drop table range_range; +-- truncate with gpi +CREATE TABLE range_hash_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) +PARTITION BY RANGE (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( -10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ), + PARTITION p_range_3 VALUES LESS THAN( 30) + ( + SUBPARTITION p_hash_3_1 , + SUBPARTITION p_hash_3_2 , + SUBPARTITION p_hash_3_3 + ), + PARTITION p_range_4 VALUES LESS THAN( 50) + ( + SUBPARTITION p_hash_4_1 , + SUBPARTITION p_hash_4_2 , + SUBPARTITION range_hash_02 + ), + PARTITION p_range_5 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; +create index idx on range_hash_02(col_1); +truncate range_hash_02; +drop table range_hash_02; +-- clean +DROP SCHEMA subpartition_gpi CASCADE; diff --git a/src/test/regress/expected/hw_subpartition_index.out b/src/test/regress/expected/hw_subpartition_index.out new file mode 100644 index 000000000..8ecf4d29f --- /dev/null +++ b/src/test/regress/expected/hw_subpartition_index.out @@ -0,0 +1,233 @@ +DROP SCHEMA subpartition_index CASCADE; +ERROR: schema "subpartition_index" does not exist +CREATE SCHEMA subpartition_index; +SET CURRENT_SCHEMA TO subpartition_index; +CREATE TABLE source +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +); +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into source values('201902', '1', '1', 1); +insert into source values('201902', '2', '1', 1); +insert into source values('201902', '1', '1', 1); +insert into source values('201903', '2', '1', 1); +insert into source values('201903', '1', '1', 1); +insert into source values('201903', '2', '1', 1); +insert into range_list select * from source; +CREATE INDEX range_list_idx ON range_list(month_code) LOCAL +( + PARTITION p_201901_idx + ( + SUBPARTITION p_201901_a_idx, + SUBPARTITION p_201901_b_idx + ), + PARTITION p_201902_idx + ( + SUBPARTITION p_201902_a_idx, + SUBPARTITION p_201902_b_idx + ) +); +-- test subpartition index scan +explain (costs off) select * from range_list where month_code = '201902'; + QUERY PLAN +------------------------------------------------------- + Partition Iterator + Iterations: 1, Sub Iterations: 2 + -> Partitioned Seq Scan on range_list + Filter: ((month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(6 rows) + +select * from range_list where month_code = '201902' order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain (costs off) select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201902'; + QUERY PLAN +----------------------------------------------------------------- + Partition Iterator + Iterations: 1, Sub Iterations: 2 + -> Partitioned Index Scan using range_list_idx on range_list + Index Cond: ((month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(6 rows) + +select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201902' order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +-- test index unusable and rebuild +ALTER INDEX range_list_idx MODIFY PARTITION p_201901_a_idx UNUSABLE; +select indisusable from pg_partition where relname = 'p_201901_a_idx'; + indisusable +------------- + f +(1 row) + +REINDEX INDEX range_list_idx PARTITION p_201901_a_idx; +select indisusable from pg_partition where relname = 'p_201901_a_idx'; + indisusable +------------- + t +(1 row) + +truncate table range_list; +ALTER INDEX range_list_idx MODIFY PARTITION p_201901_a_idx UNUSABLE; +ALTER INDEX range_list_idx MODIFY PARTITION p_201901_b_idx UNUSABLE; +ALTER INDEX range_list_idx MODIFY PARTITION p_201902_a_idx UNUSABLE; +ALTER INDEX range_list_idx MODIFY PARTITION p_201902_b_idx UNUSABLE; +insert into range_list select * from source; +explain (costs off) select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201902'; + QUERY PLAN +------------------------------------------------------- + Partition Iterator + Iterations: 1, Sub Iterations: 2 + -> Partitioned Seq Scan on range_list + Filter: ((month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(6 rows) + +explain (costs off) select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201903'; + QUERY PLAN +------------------------------------------------------- + Partition Iterator + Iterations: 1, Sub Iterations: 2 + -> Partitioned Seq Scan on range_list + Filter: ((month_code)::text = '201903'::text) + Selected Partitions: 2 + Selected Subpartitions: ALL +(6 rows) + +REINDEX INDEX range_list_idx; +explain (costs off) select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201902'; + QUERY PLAN +----------------------------------------------------------------- + Partition Iterator + Iterations: 1, Sub Iterations: 2 + -> Partitioned Index Scan using range_list_idx on range_list + Index Cond: ((month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(6 rows) + +explain (costs off) select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201903'; + QUERY PLAN +----------------------------------------------------------------- + Partition Iterator + Iterations: 1, Sub Iterations: 2 + -> Partitioned Index Scan using range_list_idx on range_list + Index Cond: ((month_code)::text = '201903'::text) + Selected Partitions: 2 + Selected Subpartitions: ALL +(6 rows) + +select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201902' order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201903' order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +-- wrong case +CREATE INDEX range_list_idxx ON range_list(month_code) LOCAL +( + PARTITION p_201902_idx + ( + SUBPARTITION p_201901_a_idx, + SUBPARTITION p_201901_b_idx, + SUBPARTITION p_201902_a_idx, + SUBPARTITION p_201902_b_idx + ) +); +ERROR: Wrong number of partitions when create index specify subpartition. +CREATE INDEX range_list_idxx ON range_list(month_code) LOCAL +( + PARTITION p_201901_idx + ( + SUBPARTITION p_201901_a_idx, + SUBPARTITION p_201901_b_idx + ), + PARTITION p_201902_idx + ( + SUBPARTITION p_201902_a_idx, + SUBPARTITION p_201902_b_idx + ), + PARTITION p_201903_idx + ( + SUBPARTITION p_201902_a_idx + ) +); +ERROR: Wrong number of partitions when create index specify subpartition. +CREATE INDEX range_list_idxx ON range_list(month_code) LOCAL +( + PARTITION p_201901_idx + ( + SUBPARTITION p_201901_a_idx + ), + PARTITION p_201902_idx + ( + SUBPARTITION p_201902_a_idx, + SUBPARTITION p_201902_b_idx + ) +); +ERROR: Cannot match subpartitions when create subpartition indexes. +CREATE INDEX range_list_idxx ON range_list(month_code) LOCAL +( + PARTITION p_201901_idx + ( + SUBPARTITION p_201901_a_idx + ), + PARTITION p_201902_idx + ( + SUBPARTITION p_201901_b_idx, + SUBPARTITION p_201902_a_idx, + SUBPARTITION p_201902_b_idx + ) +); +ERROR: Cannot match subpartitions when create subpartition indexes. +drop table source; +drop table range_list; +reset current_schema; +DROP SCHEMA subpartition_index CASCADE; diff --git a/src/test/regress/expected/hw_subpartition_scan.out b/src/test/regress/expected/hw_subpartition_scan.out new file mode 100644 index 000000000..0ddfbd471 --- /dev/null +++ b/src/test/regress/expected/hw_subpartition_scan.out @@ -0,0 +1,829 @@ +DROP SCHEMA subpartition_scan CASCADE; +ERROR: schema "subpartition_scan" does not exist +CREATE SCHEMA subpartition_scan; +SET CURRENT_SCHEMA TO subpartition_scan; +--scan +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +explain(costs off, verbose on) select * from range_list order by 1, 2, 3, 4; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(10 rows) + +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +create index idx_month_code on range_list(month_code) local; +create index idx_dept_code on range_list(dept_code) local; +create index idx_user_no on range_list(user_no) local; +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 + -> Partitioned Bitmap Index Scan on idx_dept_code + Index Cond: ((range_list.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(15 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Bitmap Heap Scan on subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_user_no + Index Cond: ((range_list.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +set enable_bitmapscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Index Scan using idx_month_code on subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(11 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Index Scan using idx_dept_code on subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Index Cond: ((range_list.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(11 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Index Scan using idx_user_no on subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Index Cond: ((range_list.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(11 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +reset enable_seqscan; +reset enable_bitmapscan; +drop table range_list; +CREATE TABLE range_list +( + col_1 VARCHAR2 ( 30 ) , + col_2 VARCHAR2 ( 30 ) NOT NULL , + col_3 VARCHAR2 ( 30 ) NOT NULL , + ccol_4 VARCHAR2 ( 30 ), +col_5 VARCHAR2 ( 30 ), +col_6 VARCHAR2 ( 30 ), +col_7 VARCHAR2 ( 30 ), +col_8 VARCHAR2 ( 30 ), +col_9 VARCHAR2 ( 30 ), +col_10 VARCHAR2 ( 30 ), +col_11 VARCHAR2 ( 30 ), +col_12 VARCHAR2 ( 30 ), +col_13 VARCHAR2 ( 30 ), +col_14 VARCHAR2 ( 30 ), +col_15 VARCHAR2 ( 30 ), +col_16 VARCHAR2 ( 30 ), +col_17 VARCHAR2 ( 30 ), +col_18 VARCHAR2 ( 30 ), +col_19 VARCHAR2 ( 30 ), +col_20 VARCHAR2 ( 30 ), +col_21 VARCHAR2 ( 30 ), +col_22 VARCHAR2 ( 30 ), +col_23 VARCHAR2 ( 30 ), +col_24 VARCHAR2 ( 30 ), +col_25 VARCHAR2 ( 30 ), +col_26 VARCHAR2 ( 30 ), +col_27 VARCHAR2 ( 30 ), +col_28 VARCHAR2 ( 30 ), +col_29 VARCHAR2 ( 30 ), +col_30 VARCHAR2 ( 30 ), +col_31 VARCHAR2 ( 30 ), +col_32 VARCHAR2 ( 30 ), +col_33 VARCHAR2 ( 30 ), +col_34 VARCHAR2 ( 30 ), +col_35 VARCHAR2 ( 30 ), +col_36 VARCHAR2 ( 30 ), +col_37 VARCHAR2 ( 30 ), +col_38 VARCHAR2 ( 30 ), +col_39 VARCHAR2 ( 30 ), +col_40 VARCHAR2 ( 30 ), +col_41 VARCHAR2 ( 30 ), +col_42 VARCHAR2 ( 30 ), +col_43 VARCHAR2 ( 30 ), +col_44 VARCHAR2 ( 30 ), +col_45 VARCHAR2 ( 30 ), +col_46 VARCHAR2 ( 30 ), +col_47 VARCHAR2 ( 30 ), +col_48 VARCHAR2 ( 30 ), +col_49 VARCHAR2 ( 30 ), +col_50 VARCHAR2 ( 30 ), +col_51 VARCHAR2 ( 30 ), +col_52 VARCHAR2 ( 30 ), +col_53 VARCHAR2 ( 30 ), +col_54 VARCHAR2 ( 30 ), +col_55 VARCHAR2 ( 30 ), +col_56 VARCHAR2 ( 30 ), +col_57 VARCHAR2 ( 30 ), +col_58 VARCHAR2 ( 30 ), +col_59 VARCHAR2 ( 30 ), +col_60 VARCHAR2 ( 30 ), +col_61 VARCHAR2 ( 30 ), +col_62 VARCHAR2 ( 30 ), +col_63 VARCHAR2 ( 30 ), +col_64 VARCHAR2 ( 30 ), +col_65 VARCHAR2 ( 30 ), +col_66 VARCHAR2 ( 30 ), +col_67 VARCHAR2 ( 30 ), +col_68 VARCHAR2 ( 30 ), +col_69 VARCHAR2 ( 30 ), +col_70 VARCHAR2 ( 30 ), +col_71 VARCHAR2 ( 30 ), +col_72 VARCHAR2 ( 30 ), +col_73 VARCHAR2 ( 30 ), +col_74 VARCHAR2 ( 30 ), +col_75 VARCHAR2 ( 30 ), +col_76 VARCHAR2 ( 30 ), +col_77 VARCHAR2 ( 30 ), +col_78 VARCHAR2 ( 30 ), +col_79 VARCHAR2 ( 30 ), +col_80 VARCHAR2 ( 30 ), +col_81 VARCHAR2 ( 30 ), +col_82 VARCHAR2 ( 30 ), +col_83 VARCHAR2 ( 30 ) +) +PARTITION BY RANGE (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( '-10' ) + ( +SUBPARTITION p_list_1_1 VALUES ( '-1' ), +SUBPARTITION p_list_1_2 VALUES ( '-2' ), +SUBPARTITION p_list_1_3 VALUES ( '-3' ), +SUBPARTITION p_list_1_4 VALUES ( '-4' ), +SUBPARTITION p_list_1_5 VALUES ( '-5' ), +SUBPARTITION p_list_1_6 VALUES ( '-6' ), +SUBPARTITION p_list_1_7 VALUES ( '-7' ), +SUBPARTITION p_list_1_8 VALUES ( '-8' ), +SUBPARTITION p_list_1_9 VALUES ( '-9' ), +SUBPARTITION p_list_1_10 VALUES ( '-10' ), +SUBPARTITION p_list_1_11 VALUES ( '-11' ), +SUBPARTITION p_list_1_12 VALUES ( '-12' ), +SUBPARTITION p_list_1_13 VALUES ( '-13' ), +SUBPARTITION p_list_1_14 VALUES ( '-14' ), +SUBPARTITION p_list_1_15 VALUES ( '-15' ), +SUBPARTITION p_list_1_16 VALUES ( '-16' ), +SUBPARTITION p_list_1_17 VALUES ( '-17' ), +SUBPARTITION p_list_1_18 VALUES ( '-18' ), +SUBPARTITION p_list_1_19 VALUES ( '-19' ), +SUBPARTITION p_list_1_20 VALUES ( '-20' ), +SUBPARTITION p_list_1_21 VALUES ( '-21' ), +SUBPARTITION p_list_1_22 VALUES ( '-22' ), +SUBPARTITION p_list_1_23 VALUES ( '-23' ), +SUBPARTITION p_list_1_24 VALUES ( '-24' ), +SUBPARTITION p_list_1_25 VALUES ( '-25' ), +SUBPARTITION p_list_1_26 VALUES ( '-26' ), +SUBPARTITION p_list_1_27 VALUES ( '-27' ), +SUBPARTITION p_list_1_28 VALUES ( '-28' ), +SUBPARTITION p_list_1_29 VALUES ( '-29' ), +SUBPARTITION p_list_1_30 VALUES ( '-30' ), +SUBPARTITION p_list_1_31 VALUES ( '-31' ), +SUBPARTITION p_list_1_32 VALUES ( '-32' ), +SUBPARTITION p_list_1_33 VALUES ( '-33' ), +SUBPARTITION p_list_1_34 VALUES ( '-34' ), +SUBPARTITION p_list_1_35 VALUES ( '-35' ), +SUBPARTITION p_list_1_36 VALUES ( '-36' ), +SUBPARTITION p_list_1_37 VALUES ( '-37' ), +SUBPARTITION p_list_1_38 VALUES ( '-38' ), +SUBPARTITION p_list_1_39 VALUES ( '-39' ), +SUBPARTITION p_list_1_40 VALUES ( '-40' ), +SUBPARTITION p_list_1_41 VALUES ( '-41' ), +SUBPARTITION p_list_1_42 VALUES ( '-42' ), +SUBPARTITION p_list_1_43 VALUES ( '-43' ), +SUBPARTITION p_list_1_44 VALUES ( '-44' ), +SUBPARTITION p_list_1_45 VALUES ( '-45' ), +SUBPARTITION p_list_1_46 VALUES ( '-46' ), +SUBPARTITION p_list_1_47 VALUES ( '-47' ), +SUBPARTITION p_list_1_48 VALUES ( '-48' ), +SUBPARTITION p_list_1_49 VALUES ( '-49' ), +SUBPARTITION p_list_1_50 VALUES ( '-50' ), +SUBPARTITION p_list_1_51 VALUES ( default ) + ), + PARTITION p_range_2 VALUES LESS THAN('10 ') + ( +SUBPARTITION p_list_2_1 VALUES ( '1' ), +SUBPARTITION p_list_2_2 VALUES ( '2' ), +SUBPARTITION p_list_2_3 VALUES ( '3' ), +SUBPARTITION p_list_2_4 VALUES ( '4' ), +SUBPARTITION p_list_2_5 VALUES ( '5' ), +SUBPARTITION p_list_2__6 VALUES ( '-6' ), +SUBPARTITION p_list_2_6 VALUES ( '6' ), +SUBPARTITION p_list_2_7 VALUES ( '7' ), +SUBPARTITION p_list_2_8 VALUES ( '8' ), +SUBPARTITION p_list_2_9 VALUES ( '9' ), +SUBPARTITION p_list_2_10 VALUES ( '10' ), +SUBPARTITION p_list_2_11 VALUES ( '11' ), +SUBPARTITION p_list_2_12 VALUES ( '12' ), +SUBPARTITION p_list_2_13 VALUES ( '13' ), +SUBPARTITION p_list_2_14 VALUES ( '14' ), +SUBPARTITION p_list_2_15 VALUES ( '15' ), +SUBPARTITION p_list_2_16 VALUES ( '16' ), +SUBPARTITION p_list_2_17 VALUES ( '17' ), +SUBPARTITION p_list_2_18 VALUES ( '18' ), +SUBPARTITION p_list_2_19 VALUES ( '19' ), +SUBPARTITION p_list_2_20 VALUES ( '20' ), +SUBPARTITION p_list_2_21 VALUES ( '21' ), +SUBPARTITION p_list_2_22 VALUES ( '22' ), +SUBPARTITION p_list_2_23 VALUES ( '23' ), +SUBPARTITION p_list_2_24 VALUES ( '24' ), +SUBPARTITION p_list_2_25 VALUES ( '25' ), +SUBPARTITION p_list_2_26 VALUES ( '26' ), +SUBPARTITION p_list_2_27 VALUES ( '27' ), +SUBPARTITION p_list_2_28 VALUES ( '28' ), +SUBPARTITION p_list_2_29 VALUES ( '29' ), +SUBPARTITION p_list_2_30 VALUES ( '30' ), +SUBPARTITION p_list_2_31 VALUES ( '31' ), +SUBPARTITION p_list_2_32 VALUES ( '32' ), +SUBPARTITION p_list_2_33 VALUES ( '33' ), +SUBPARTITION p_list_2_34 VALUES ( '34' ), +SUBPARTITION p_list_2_35 VALUES ( '35' ), +SUBPARTITION p_list_2_36 VALUES ( '36' ), +SUBPARTITION p_list_2_37 VALUES ( '37' ), +SUBPARTITION p_list_2_38 VALUES ( '38' ), +SUBPARTITION p_list_2_39 VALUES ( '39' ), +SUBPARTITION p_list_2_40 VALUES ( '40' ), +SUBPARTITION p_list_2_41 VALUES ( '41' ), +SUBPARTITION p_list_2_42 VALUES ( '42' ), +SUBPARTITION p_list_2_43 VALUES ( '43' ), +SUBPARTITION p_list_2_44 VALUES ( '44' ), +SUBPARTITION p_list_2_45 VALUES ( '45' ), +SUBPARTITION p_list_2_46 VALUES ( '46' ), +SUBPARTITION p_list_2_47 VALUES ( '47' ), +SUBPARTITION p_list_2_48 VALUES ( '48' ), +SUBPARTITION p_list_2_49 VALUES ( '49' ), +SUBPARTITION p_list_2_50 VALUES ( '50' ), +SUBPARTITION p_list_2_51 VALUES ( default ) + ), + PARTITION p_range_3 VALUES LESS THAN( '20 '), + PARTITION p_range_4 VALUES LESS THAN( '30' ) + ( + SUBPARTITION p_list_4_1 VALUES ( default ) + ), + PARTITION p_range_5 VALUES LESS THAN( '40' ) + ( + SUBPARTITION p_list_5_1 VALUES ( '41' ), +SUBPARTITION p_list_5_2 VALUES ( '42' ), +SUBPARTITION p_list_5_3 VALUES ( '43' ), +SUBPARTITION p_list_5_4 VALUES ( '44' ), +SUBPARTITION p_list_5_5 VALUES ( '45' ), +SUBPARTITION p_list_5_6 VALUES ( '46' ), +SUBPARTITION p_list_5_7 VALUES ( '47' ), +SUBPARTITION p_list_5_8 VALUES ( '48' ), +SUBPARTITION p_list_5_9 VALUES ( '49' ), +SUBPARTITION p_list_5_10 VALUES ( '50' ), +SUBPARTITION p_list_5_11 VALUES ( '51' ), +SUBPARTITION p_list_5_12 VALUES ( '52' ), +SUBPARTITION p_list_5_13 VALUES ( '53' ), +SUBPARTITION p_list_5_14 VALUES ( '54' ), +SUBPARTITION p_list_5_15 VALUES ( '55' ), +SUBPARTITION p_list_5_16 VALUES ( '56' ), +SUBPARTITION p_list_5_17 VALUES ( '57' ), +SUBPARTITION p_list_5_18 VALUES ( '58' ), +SUBPARTITION p_list_5_19 VALUES ( '59' ), +SUBPARTITION p_list_5_20 VALUES ( '60' ), +SUBPARTITION p_list_5_21 VALUES ( '61' ), +SUBPARTITION p_list_5_22 VALUES ( '62' ), +SUBPARTITION p_list_5_23 VALUES ( '63' ), +SUBPARTITION p_list_5_24 VALUES ( '64' ), +SUBPARTITION p_list_5_25 VALUES ( '65' ), +SUBPARTITION p_list_5_26 VALUES ( '66' ), +SUBPARTITION p_list_5_27 VALUES ( '67' ), +SUBPARTITION p_list_5_28 VALUES ( '68' ), +SUBPARTITION p_list_5_29 VALUES ( '69' ), +SUBPARTITION p_list_5_30 VALUES ( '70' ), +SUBPARTITION p_list_5_31 VALUES ( '71' ), +SUBPARTITION p_list_5_32 VALUES ( '72' ), +SUBPARTITION p_list_5_33 VALUES ( '73' ), +SUBPARTITION p_list_5_34 VALUES ( '74' ), +SUBPARTITION p_list_5_35 VALUES ( '75' ), +SUBPARTITION p_list_5_36 VALUES ( '76' ), +SUBPARTITION p_list_5_37 VALUES ( '77' ), +SUBPARTITION p_list_5_38 VALUES ( '78' ), +SUBPARTITION p_list_5_39 VALUES ( '79' ), +SUBPARTITION p_list_5_40 VALUES ( '80' ), +SUBPARTITION p_list_5_41 VALUES ( '81' ), +SUBPARTITION p_list_5_42 VALUES ( '82' ), +SUBPARTITION p_list_5_43 VALUES ( '83' ), +SUBPARTITION p_list_5_44 VALUES ( '84' ), +SUBPARTITION p_list_5_45 VALUES ( '85' ), +SUBPARTITION p_list_5_46 VALUES ( '86' ), +SUBPARTITION p_list_5_47 VALUES ( '87' ), +SUBPARTITION p_list_5_48 VALUES ( '88' ), +SUBPARTITION p_list_5_49 VALUES ( '89' ), +SUBPARTITION p_list_5_50 VALUES ( '90' ), +SUBPARTITION p_list_5_51 VALUES ( '91' ), +SUBPARTITION p_list_5_52 VALUES ( '92' ), +SUBPARTITION p_list_5_53 VALUES ( '93' ), +SUBPARTITION p_list_5_54 VALUES ( '94' ), +SUBPARTITION p_list_5_55 VALUES ( '95' ), +SUBPARTITION p_list_5_56 VALUES ( '96' ), +SUBPARTITION p_list_5_57 VALUES ( '97' ), +SUBPARTITION p_list_5_58 VALUES ( '98' ), +SUBPARTITION p_list_5_59 VALUES ( '99' ), +SUBPARTITION p_list_5_60 VALUES ( '100' ), +SUBPARTITION p_list_5_61 VALUES ( '101' ), +SUBPARTITION p_list_5_62 VALUES ( '102' ), +SUBPARTITION p_list_5_63 VALUES ( '103' ), +SUBPARTITION p_list_5_64 VALUES ( '104' ), +SUBPARTITION p_list_5_65 VALUES ( '105' ), +SUBPARTITION p_list_5_66 VALUES ( '106' ), +SUBPARTITION p_list_5_67 VALUES ( '107' ), +SUBPARTITION p_list_5_68 VALUES ( '108' ), +SUBPARTITION p_list_5_69 VALUES ( '109' ), +SUBPARTITION p_list_5_70 VALUES ( '110' ), +SUBPARTITION p_list_5_71 VALUES ( '111' ), +SUBPARTITION p_list_5_72 VALUES ( '112' ), +SUBPARTITION p_list_5_73 VALUES ( '113' ), +SUBPARTITION p_list_5_74 VALUES ( '114' ), +SUBPARTITION p_list_5_75 VALUES ( '115' ), +SUBPARTITION p_list_5_76 VALUES ( '116' ), +SUBPARTITION p_list_5_77 VALUES ( '117' ), +SUBPARTITION p_list_5_78 VALUES ( '118' ), +SUBPARTITION p_list_5_79 VALUES ( '119' ), +SUBPARTITION p_list_5_80 VALUES ( default ) + ), + PARTITION p_range_6 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; +create index on range_list(col_2) local; +explain (costs off, verbose off) select * from range_list where col_2 in (select col_1 from range_list where col_1 >10 and col_1<100) order by 1 limit 100; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: subpartition_scan.range_list.col_1 + -> Nested Loop + -> HashAggregate + Group By Key: (subpartition_scan.range_list.col_1)::text + -> Partition Iterator + Iterations: 6, Sub Iterations: 186 + -> Partitioned Seq Scan on range_list + Filter: (((col_1)::bigint > 10) AND ((col_1)::bigint < 100)) + Selected Partitions: 1..6 + Selected Subpartitions: ALL + -> Partition Iterator + Iterations: 6, Sub Iterations: 186 + -> Partitioned Index Scan using range_list_col_2_idx on range_list + Index Cond: ((col_2)::text = (subpartition_scan.range_list.col_1)::text) + Selected Partitions: 1..6 + Selected Subpartitions: ALL +(18 rows) + +ALTER INDEX range_list_col_2_idx MODIFY PARTITION p_list_5_14_col_2_idx UNUSABLE; +explain (costs off, verbose off) select * from range_list where col_2 in (select col_1 from range_list where col_1 >10 and col_1<100) order by 1 limit 100; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: subpartition_scan.range_list.col_1 + -> Nested Loop + -> HashAggregate + Group By Key: (subpartition_scan.range_list.col_1)::text + -> Partition Iterator + Iterations: 6, Sub Iterations: 186 + -> Partitioned Seq Scan on range_list + Filter: (((col_1)::bigint > 10) AND ((col_1)::bigint < 100)) + Selected Partitions: 1..6 + Selected Subpartitions: ALL + -> Partition Iterator + Iterations: 6, Sub Iterations: 186 + -> Partitioned Seq Scan on range_list + Filter: ((subpartition_scan.range_list.col_1)::text = (col_2)::text) + Selected Partitions: 1..6 + Selected Subpartitions: ALL +(18 rows) + +drop table range_list; +create table range_range_jade(jid int,jn int,name varchar2)partition by range (jid) subpartition by range(jn) +( + partition hrp1 values less than(16)( + subpartition hrp1_1 values less than(16), +subpartition hrp1_2 values less than(26), +subpartition hrp1_3 values less than(36), + subpartition hrp1_4 values less than(maxvalue)), + partition hrp2 values less than(26)( + subpartition hrp2_1 values less than(maxvalue)), + partition hrp3 values less than(36)( + subpartition hrp3_1 values less than(16), +subpartition hrp3_2 values less than(26), + subpartition hrp3_3 values less than(maxvalue)), + partition hrp4 values less than(maxvalue)( + subpartition hrp4_1 values less than(16), + subpartition hrp4_2 values less than(maxvalue)) +)ENABLE ROW MOVEMENT; +-- no errors +set enable_partition_opfusion = on; +insert into range_range_jade values(1,2,'jade'); +reset enable_partition_opfusion; +drop table range_range_jade; +drop table list_range_02; +ERROR: table "list_range_02" does not exist +CREATE TABLE IF NOT EXISTS list_range_02 +( + col_1 int , + col_2 int, +col_3 VARCHAR2 ( 30 ) , + col_4 int +) +PARTITION BY list (col_1) SUBPARTITION BY range (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_1_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_1_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_1_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_2 VALUES(1,2,3,4,5,6,7,8,9,10 ), + PARTITION p_list_3 VALUES(11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_range_3_1 VALUES LESS THAN( 15 ), + SUBPARTITION p_range_3_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_4 VALUES(21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_range_4_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_4_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_4_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_4_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_4_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_5 VALUES(31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_range_5_1 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_6 VALUES(41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_range_6_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_6_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_6_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_6_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_6_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_7 VALUES(default) +) ENABLE ROW MOVEMENT; +create index index_01 on list_range_02(col_2) local ; +explain (costs off) select * from list_range_02 where col_2 in + (select col_1 from list_range_02 subpartition(p_list_2_subpartdefault1) + where col_1 >10 and col_1 <100) and col_1 +col_2 =50 and col_2 in (100,200,300 ); + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Nested Loop Semi Join + Join Filter: (subpartition_scan.list_range_02.col_2 = subpartition_scan.list_range_02.col_1) + -> Partition Iterator + Iterations: 4, Sub Iterations: 4 + -> Partitioned Index Scan using index_01 on list_range_02 + Index Cond: ((col_2 = ANY ('{100,200,300}'::integer[])) AND (col_2 > 10) AND (col_2 < 100)) + Filter: ((col_1 + col_2) = 50) + Selected Partitions: 1,3,5..6 + Selected Subpartitions: 1:1, 3:1, 5:1, 6:1 + -> Materialize + -> Partition Iterator + Iterations: 1, Sub Iterations: 1 + -> Partitioned Seq Scan on list_range_02 + Filter: ((col_1 > 10) AND (col_1 < 100) AND (col_1 = ANY ('{100,200,300}'::integer[]))) + Selected Partitions: 6 + Selected Subpartitions: ALL +(16 rows) + +explain (format yaml, costs off) select * from list_range_02 where col_2 in + (select col_1 from list_range_02 subpartition(p_list_2_subpartdefault1) + where col_1 >10 and col_1 <100) and col_1 +col_2 =50 and col_2 in (100,200,300 ); + QUERY PLAN +----------------------------------------------------------------------------------------------------------- + - Plan: + + Node Type: "Nested Loop" + + Join Type: "Semi" + + Join Filter: "(subpartition_scan.list_range_02.col_2 = subpartition_scan.list_range_02.col_1)" + + Plans: + + - Node Type: "Partition Iterator" + + Parent Relationship: "Outer" + + Iterations: 4 + + Sub Iterations: 4 + + Plans: + + - Node Type: "Partitioned Index Scan" + + Parent Relationship: "Outer" + + Scan Direction: "Forward" + + Index Name: "index_01" + + Relation Name: "list_range_02" + + Alias: "list_range_02" + + Index Cond: "((col_2 = ANY ('{100,200,300}'::integer[])) AND (col_2 > 10) AND (col_2 < 100))"+ + Filter: "((col_1 + col_2) = 50)" + + Selected Partitions: "1,3,5..6" + + Selected Subpartitions: "1:1, 3:1, 5:1, 6:1" + + - Node Type: "Materialize" + + Parent Relationship: "Inner" + + Plans: + + - Node Type: "Partition Iterator" + + Parent Relationship: "Outer" + + Iterations: 1 + + Sub Iterations: 1 + + Plans: + + - Node Type: "Partitioned Seq Scan" + + Parent Relationship: "Outer" + + Relation Name: "list_range_02" + + Alias: "list_range_02" + + Filter: "((col_1 > 10) AND (col_1 < 100) AND (col_1 = ANY ('{100,200,300}'::integer[])))"+ + Selected Partitions: "6" + + Selected Subpartitions: "ALL" +(1 row) + +explain (format json, costs off) select * from list_range_02 where col_2 in + (select col_1 from list_range_02 subpartition(p_list_2_subpartdefault1) + where col_1 >10 and col_1 <100) and col_1 +col_2 =50 and col_2 in (100,200,300 ); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + [ + + { + + "Plan": { + + "Node Type": "Nested Loop", + + "Join Type": "Semi", + + "Join Filter": "(subpartition_scan.list_range_02.col_2 = subpartition_scan.list_range_02.col_1)", + + "Plans": [ + + { + + "Node Type": "Partition Iterator", + + "Parent Relationship": "Outer", + + "Iterations": 4, + + "Sub Iterations": 4, + + "Plans": [ + + { + + "Node Type": "Partitioned Index Scan", + + "Parent Relationship": "Outer", + + "Scan Direction": "Forward", + + "Index Name": "index_01", + + "Relation Name": "list_range_02", + + "Alias": "list_range_02", + + "Index Cond": "((col_2 = ANY ('{100,200,300}'::integer[])) AND (col_2 > 10) AND (col_2 < 100))",+ + "Filter": "((col_1 + col_2) = 50)", + + "Selected Partitions": "1,3,5..6", + + "Selected Subpartitions": "1:1, 3:1, 5:1, 6:1" + + } + + ] + + }, + + { + + "Node Type": "Materialize", + + "Parent Relationship": "Inner", + + "Plans": [ + + { + + "Node Type": "Partition Iterator", + + "Parent Relationship": "Outer", + + "Iterations": 1, + + "Sub Iterations": 1, + + "Plans": [ + + { + + "Node Type": "Partitioned Seq Scan", + + "Parent Relationship": "Outer", + + "Relation Name": "list_range_02", + + "Alias": "list_range_02", + + "Filter": "((col_1 > 10) AND (col_1 < 100) AND (col_1 = ANY ('{100,200,300}'::integer[])))",+ + "Selected Partitions": "6", + + "Selected Subpartitions": "ALL" + + } + + ] + + } + + ] + + } + + ] + + } + + } + + ] +(1 row) + +drop table list_range_02; +CREATE TABLE IF NOT EXISTS list_list_02 +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) +PARTITION BY list (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( 0,-1,-2,-3,-4,-5,-6,-7,-8,-9 ), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_list_2 VALUES(0,1,2,3,4,5,6,7,8,9) + ( + SUBPARTITION p_list_2_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_2_2 VALUES ( default ), + SUBPARTITION p_list_2_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_2_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_2_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_3 VALUES(10,11,12,13,14,15,16,17,18,19) + ( + SUBPARTITION p_list_3_2 VALUES ( default ) + ), + PARTITION p_list_4 VALUES(default ), + PARTITION p_list_5 VALUES(20,21,22,23,24,25,26,27,28,29) + ( + SUBPARTITION p_list_5_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_5_2 VALUES ( default ), + SUBPARTITION p_list_5_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_5_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_5_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_6 VALUES(30,31,32,33,34,35,36,37,38,39), + PARTITION p_list_7 VALUES(40,41,42,43,44,45,46,47,48,49) + ( + SUBPARTITION p_list_7_1 VALUES ( default ) + ) +) ENABLE ROW MOVEMENT; +explain (costs off) select * from list_list_02 where col_1=(select max(1) from list_list_02); + QUERY PLAN +------------------------------------------------------------------------ + Partition Iterator + Iterations: PART + InitPlan 2 (returns $3) + -> Result + InitPlan 1 (returns $2) + -> Limit + -> Result + One-Time Filter: (1 IS NOT NULL) + -> Partition Iterator + Iterations: 7, Sub Iterations: 16 + -> Partitioned Seq Scan on list_list_02 + Selected Partitions: 1..7 + Selected Subpartitions: ALL + -> Partitioned Seq Scan on list_list_02 + Filter: (col_1 = $3) + Selected Partitions: PART +(16 rows) + +select * from list_list_02 where col_1=(select max(1) from list_list_02); + col_1 | col_2 | col_3 | col_4 +-------+-------+-------+------- +(0 rows) + +drop table list_list_02; +DROP SCHEMA subpartition_scan CASCADE; diff --git a/src/test/regress/expected/hw_subpartition_select.out b/src/test/regress/expected/hw_subpartition_select.out new file mode 100644 index 000000000..422108e1b --- /dev/null +++ b/src/test/regress/expected/hw_subpartition_select.out @@ -0,0 +1,1492 @@ +--prepare +DROP SCHEMA subpartition_select CASCADE; +ERROR: schema "subpartition_select" does not exist +CREATE SCHEMA subpartition_select; +SET CURRENT_SCHEMA TO subpartition_select; +--select +CREATE TABLE t1 +( + c1 int, + c2 int +); +insert into t1 values(generate_series(201901,201910), generate_series(1,10)); +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '3', '1', 1); +ERROR: inserted partition key does not map to any table partition +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '3', '1', 1); +ERROR: inserted partition key does not map to any table partition +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_list where user_no is not null order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_list where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_list where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_list where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_list partition (p_201901) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +select * from range_list partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_list where user_no is not null and dept_code <> '2' UNION ALL select * from range_list partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_list where user_no is not null and dept_code <> '2' UNION ALL select * from range_list partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201902', '2', '1', 1); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +insert into range_hash values('201903', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +select * from range_hash order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_hash where user_no is not null order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_hash where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_hash where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_hash where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_hash partition (p_201901) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +select * from range_hash partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_hash where user_no is not null and dept_code <> '2' UNION ALL select * from range_hash partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_hash where user_no is not null and dept_code <> '2' UNION ALL select * from range_hash partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +explain (costs off, verbose on) select * from range_hash where ctid='(0,1)' order by 1, 2, 3, 4; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Tid Scan on subpartition_select.range_hash + Output: month_code, dept_code, user_no, sales_amt + TID Cond: (range_hash.ctid = '(0,1)'::tid) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(11 rows) + +explain (costs off, verbose on) select * from range_hash where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_hash.month_code, range_hash.dept_code, range_hash.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on subpartition_select.range_hash + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_hash.dept_code)::text = (range_hash.user_no)::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(11 rows) + +explain (costs off, verbose on) select * from range_hash partition (p_201901) order by 1, 2, 3, 4; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Seq Scan on subpartition_select.range_hash + Output: month_code, dept_code, user_no, sales_amt + Selected Partitions: 1 + Selected Subpartitions: ALL +(10 rows) + +explain (costs off, verbose on) select * from range_hash where user_no is not null and dept_code <> '2' UNION ALL select * from range_hash partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: subpartition_select.range_hash.month_code, subpartition_select.range_hash.dept_code, subpartition_select.range_hash.user_no, subpartition_select.range_hash.sales_amt + Sort Key: subpartition_select.range_hash.month_code, subpartition_select.range_hash.dept_code, subpartition_select.range_hash.user_no, subpartition_select.range_hash.sales_amt + -> Result + Output: subpartition_select.range_hash.month_code, subpartition_select.range_hash.dept_code, subpartition_select.range_hash.user_no, subpartition_select.range_hash.sales_amt + -> Append + -> Partition Iterator + Output: subpartition_select.range_hash.month_code, subpartition_select.range_hash.dept_code, subpartition_select.range_hash.user_no, subpartition_select.range_hash.sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on subpartition_select.range_hash + Output: subpartition_select.range_hash.month_code, subpartition_select.range_hash.dept_code, subpartition_select.range_hash.user_no, subpartition_select.range_hash.sales_amt + Filter: ((subpartition_select.range_hash.dept_code)::text <> '2'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL + -> Partition Iterator + Output: subpartition_select.range_hash.month_code, subpartition_select.range_hash.dept_code, subpartition_select.range_hash.user_no, subpartition_select.range_hash.sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Seq Scan on subpartition_select.range_hash + Output: subpartition_select.range_hash.month_code, subpartition_select.range_hash.dept_code, subpartition_select.range_hash.user_no, subpartition_select.range_hash.sales_amt + Filter: ((subpartition_select.range_hash.dept_code)::text = '2'::text) + Selected Partitions: 2 + Selected Subpartitions: ALL +(22 rows) + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +select * from range_range order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_range where user_no is not null order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_range where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_range where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_range where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_range partition (p_201901) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +select * from range_range partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_range where user_no is not null and dept_code <> '2' UNION ALL select * from range_range partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_range where user_no is not null and dept_code <> '2' UNION ALL select * from range_range partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +explain (costs off, verbose on) select * from range_range where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; + QUERY PLAN +-------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.user_no, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Seq Scan on subpartition_select.range_range + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_range.dept_code)::text = '2'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(11 rows) + +explain (costs off, verbose on) select * from range_range partition (p_201901) order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.dept_code, range_range.user_no, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Seq Scan on subpartition_select.range_range + Output: month_code, dept_code, user_no, sales_amt + Selected Partitions: 1 + Selected Subpartitions: ALL +(10 rows) + +--view +create view view_temp as select * from range_list; +select * from view_temp; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +--error +select * from view_temp partition (p_201901); +ERROR: relation "view_temp" is not partitioned table +DETAIL: N/A. +select * from view_temp partition (p_201902); +ERROR: relation "view_temp" is not partitioned table +DETAIL: N/A. +drop view view_temp; +with tmp1 as (select * from range_list ) select * from tmp1 order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +with tmp1 as (select * from range_list partition (p_201901)) select * from tmp1 order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +--join normal table +select * from range_list left join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_list left join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_list right join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_list right join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_list full join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_list full join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_list inner join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_list inner join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +explain (costs off, verbose on) select * from range_list inner join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Sort + Output: range_list.month_code, range_list.dept_code, range_list.user_no, range_list.sales_amt, t1.c1, t1.c2 + Sort Key: range_list.month_code, range_list.dept_code, range_list.user_no, range_list.sales_amt, t1.c1, t1.c2 + -> Hash Join + Output: range_list.month_code, range_list.dept_code, range_list.user_no, range_list.sales_amt, t1.c1, t1.c2 + Hash Cond: (t1.c1 = (range_list.month_code)::bigint) + -> Seq Scan on subpartition_select.t1 + Output: t1.c1, t1.c2 + -> Hash + Output: range_list.month_code, range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: range_list.month_code, range_list.dept_code, range_list.user_no, range_list.sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on subpartition_select.range_list + Output: range_list.month_code, range_list.dept_code, range_list.user_no, range_list.sales_amt + Filter: ((range_list.dept_code)::bigint = 2) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(18 rows) + +select * from range_hash left join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_hash left join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_hash right join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_hash right join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_hash full join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_hash full join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_hash inner join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_hash inner join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +explain (costs off, verbose on) select * from range_hash inner join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Sort + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt, t1.c1, t1.c2 + Sort Key: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt, t1.c1, t1.c2 + -> Hash Join + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt, t1.c1, t1.c2 + Hash Cond: (t1.c1 = (range_hash.month_code)::bigint) + -> Seq Scan on subpartition_select.t1 + Output: t1.c1, t1.c2 + -> Hash + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + -> Partition Iterator + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on subpartition_select.range_hash + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + Filter: ((range_hash.dept_code)::bigint = 2) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(18 rows) + +select * from range_range left join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_range left join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_range right join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_range right join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_range full join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_range full join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_range inner join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_range inner join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +explain (costs off, verbose on) select * from range_range inner join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Sort + Output: range_range.month_code, range_range.dept_code, range_range.user_no, range_range.sales_amt, t1.c1, t1.c2 + Sort Key: range_range.month_code, range_range.dept_code, range_range.user_no, range_range.sales_amt, t1.c1, t1.c2 + -> Hash Join + Output: range_range.month_code, range_range.dept_code, range_range.user_no, range_range.sales_amt, t1.c1, t1.c2 + Hash Cond: (t1.c1 = (range_range.month_code)::bigint) + -> Seq Scan on subpartition_select.t1 + Output: t1.c1, t1.c2 + -> Hash + Output: range_range.month_code, range_range.dept_code, range_range.user_no, range_range.sales_amt + -> Partition Iterator + Output: range_range.month_code, range_range.dept_code, range_range.user_no, range_range.sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on subpartition_select.range_range + Output: range_range.month_code, range_range.dept_code, range_range.user_no, range_range.sales_amt + Filter: ((range_range.dept_code)::bigint = 2) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(18 rows) + +--join range_list and range_hash +select * from range_list left join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_list left join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_list right join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_list right join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_list full join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_list full join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_list inner join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_list inner join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +explain (costs off, verbose on) select * from range_list inner join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: range_list.month_code, range_list.dept_code, range_list.user_no, range_list.sales_amt, range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.user_no, range_list.sales_amt, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + -> Hash Join + Output: range_list.month_code, range_list.dept_code, range_list.user_no, range_list.sales_amt, range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + Hash Cond: ((range_hash.month_code)::text = (range_list.month_code)::text) + -> Partition Iterator + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on subpartition_select.range_hash + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + Selected Partitions: 1..2 + Selected Subpartitions: ALL + -> Hash + Output: range_list.month_code, range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: range_list.month_code, range_list.dept_code, range_list.user_no, range_list.sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on subpartition_select.range_list + Output: range_list.month_code, range_list.dept_code, range_list.user_no, range_list.sales_amt + Filter: ((range_list.dept_code)::bigint = 2) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(23 rows) + +--join range_hash and range_range +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +explain (costs off, verbose on) select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt, range_range.month_code, range_range.dept_code, range_range.user_no, range_range.sales_amt + Sort Key: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt, range_range.dept_code, range_range.user_no, range_range.sales_amt + -> Hash Join + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt, range_range.month_code, range_range.dept_code, range_range.user_no, range_range.sales_amt + Hash Cond: ((range_range.month_code)::text = (range_hash.month_code)::text) + -> Partition Iterator + Output: range_range.month_code, range_range.dept_code, range_range.user_no, range_range.sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on subpartition_select.range_range + Output: range_range.month_code, range_range.dept_code, range_range.user_no, range_range.sales_amt + Selected Partitions: 1..2 + Selected Subpartitions: ALL + -> Hash + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + -> Partition Iterator + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on subpartition_select.range_hash + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + Filter: ((range_hash.dept_code)::bigint = 2) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(23 rows) + +--join range_hash and range_range +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +explain (costs off, verbose on) select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt, range_range.month_code, range_range.dept_code, range_range.user_no, range_range.sales_amt + Sort Key: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt, range_range.dept_code, range_range.user_no, range_range.sales_amt + -> Hash Join + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt, range_range.month_code, range_range.dept_code, range_range.user_no, range_range.sales_amt + Hash Cond: ((range_range.month_code)::text = (range_hash.month_code)::text) + -> Partition Iterator + Output: range_range.month_code, range_range.dept_code, range_range.user_no, range_range.sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on subpartition_select.range_range + Output: range_range.month_code, range_range.dept_code, range_range.user_no, range_range.sales_amt + Selected Partitions: 1..2 + Selected Subpartitions: ALL + -> Hash + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + -> Partition Iterator + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on subpartition_select.range_hash + Output: range_hash.month_code, range_hash.dept_code, range_hash.user_no, range_hash.sales_amt + Filter: ((range_hash.dept_code)::bigint = 2) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(23 rows) + +drop table list_range_02; +ERROR: table "list_range_02" does not exist +CREATE TABLE IF NOT EXISTS list_range_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) +PARTITION BY list (col_1) SUBPARTITION BY range (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_1_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_1_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_1_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_2 VALUES(1,2,3,4,5,6,7,8,9,10 ), + PARTITION p_list_3 VALUES(11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_range_3_1 VALUES LESS THAN( 15 ), + SUBPARTITION p_range_3_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_4 VALUES(21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_range_4_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_4_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_4_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_4_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_4_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_5 VALUES(31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_range_5_1 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_6 VALUES(41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_range_6_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_6_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_6_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_6_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_6_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_7 VALUES(default) +) ENABLE ROW MOVEMENT; +create index index_01 on list_range_02(col_2) local ; +INSERT INTO list_range_02 VALUES (GENERATE_SERIES(0, 19),GENERATE_SERIES(0, 1000),GENERATE_SERIES(0, 99)); + explain (costs off, verbose on) select * from list_range_02 where col_2 >500 and col_2 <8000 order by col_1; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Sort + Output: col_1, col_2, col_3, col_4 + Sort Key: list_range_02.col_1 + -> Partition Iterator + Output: col_1, col_2, col_3, col_4 + Iterations: 4, Sub Iterations: 4 + -> Partitioned Bitmap Heap Scan on subpartition_select.list_range_02 + Output: col_1, col_2, col_3, col_4 + Recheck Cond: ((list_range_02.col_2 > 500) AND (list_range_02.col_2 < 8000)) + Selected Partitions: 1,3,5..6 + Selected Subpartitions: 1:1, 3:1, 5:1, 6:1 + -> Partitioned Bitmap Index Scan on index_01 + Index Cond: ((list_range_02.col_2 > 500) AND (list_range_02.col_2 < 8000)) + Selected Partitions: 1,3,5..6 + Selected Subpartitions: 1:1, 3:1, 5:1, 6:1 +(15 rows) + +drop index index_01; +drop table list_range_02; +create table pjade(jid int,jn int,name varchar2)partition by range(jid) subpartition by range(jn) +( + partition hrp1 values less than(16)( + subpartition hrp1_1 values less than(16), + subpartition hrp1_2 values less than(maxvalue)), + partition hrp2 values less than(maxvalue)( + subpartition hrp3_1 values less than(16), + subpartition hrp3_3 values less than(maxvalue)) +); +create table cjade(jid int,jn int,name varchar2); +insert into pjade values(6,8,'tom'),(8,18,'jerry'),(16,8,'jade'),(18,20,'jack'); +insert into cjade values(6,8,'tom'),(8,18,'jerry'),(16,8,'jade'),(18,20,'jack'); +select * from pjade subpartition(hrp1_1) union select * from cjade order by 1,2,3; + jid | jn | name +-----+----+------- + 6 | 8 | tom + 8 | 18 | jerry + 16 | 8 | jade + 18 | 20 | jack +(4 rows) + +select * from pjade subpartition(hrp1_1) p union select * from cjade order by 1,2,3; + jid | jn | name +-----+----+------- + 6 | 8 | tom + 8 | 18 | jerry + 16 | 8 | jade + 18 | 20 | jack +(4 rows) + +select * from pjade subpartition(hrp1_1) union select * from cjade order by 1,2,3; + jid | jn | name +-----+----+------- + 6 | 8 | tom + 8 | 18 | jerry + 16 | 8 | jade + 18 | 20 | jack +(4 rows) + +select * from pjade subpartition(hrp1_1) p union select * from cjade order by 1,2,3; + jid | jn | name +-----+----+------- + 6 | 8 | tom + 8 | 18 | jerry + 16 | 8 | jade + 18 | 20 | jack +(4 rows) + +drop table pjade; +drop table cjade; +DROP SCHEMA subpartition_select CASCADE; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to table t1 +drop cascades to table range_list +drop cascades to table range_hash +drop cascades to table range_range diff --git a/src/test/regress/expected/hw_subpartition_split.out b/src/test/regress/expected/hw_subpartition_split.out new file mode 100644 index 000000000..f6b0afda7 --- /dev/null +++ b/src/test/regress/expected/hw_subpartition_split.out @@ -0,0 +1,394 @@ +--prepare +DROP SCHEMA subpartition_split CASCADE; +ERROR: schema "subpartition_split" does not exist +CREATE SCHEMA subpartition_split; +SET CURRENT_SCHEMA TO subpartition_split; +--split subpartition +-- list subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '3', '1', 1); +select * from list_list order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 3 | 1 | 1 +(6 rows) + +select * from list_list subpartition (p_201901_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +select * from list_list subpartition (p_201901_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +alter table list_list split subpartition p_201901_b values (2) into +( + subpartition p_201901_b, + subpartition p_201901_c +); +select * from list_list subpartition (p_201901_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +select * from list_list subpartition (p_201901_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +select * from list_list subpartition (p_201901_c) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list partition (p_201901); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(3 rows) + +select * from list_list subpartition (p_201902_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +select * from list_list subpartition (p_201902_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 3 | 1 | 1 +(2 rows) + +alter table list_list split subpartition p_201902_b values (2, 3) into +( + subpartition p_201902_b, + subpartition p_201902_c +); +select * from list_list subpartition (p_201902_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +select * from list_list subpartition (p_201902_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 3 | 1 | 1 +(2 rows) + +select * from list_list subpartition (p_201902_c) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +--error +alter table list_list split subpartition p_201902_a values (3) into +( + subpartition p_201902_ab, + subpartition p_201902_ac +); +ERROR: Only the default boundary subpartition can be splited. +drop table list_list; +-- range subpartition +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '3', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '5', '1', 1); +select * from range_range order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(6 rows) + +select * from range_range subpartition (p_201901_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201901_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 +(2 rows) + +alter table range_range split subpartition p_201901_b at (3) into +( + subpartition p_201901_c, + subpartition p_201901_d +); +select * from range_range subpartition (p_201901_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201901_b) order by 1,2,3,4; +ERROR: subpartition "p_201901_b" of relation "range_range" does not exist +select * from range_range subpartition (p_201901_c) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201901_d) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 3 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201902_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201902_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(2 rows) + +alter table range_range split subpartition p_201902_b at (3) into +( + subpartition p_201902_c, + subpartition p_201902_d +); +select * from range_range subpartition (p_201902_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201902_b) order by 1,2,3,4; +ERROR: subpartition "p_201902_b" of relation "range_range" does not exist +select * from range_range subpartition (p_201902_c) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201902_d) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 5 | 1 | 1 +(1 row) + +drop table range_range; +--test syntax +CREATE TABLE IF NOT EXISTS list_hash +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) +PARTITION BY list (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10 ) + ( + SUBPARTITION p_hash_2_1 , + SUBPARTITION p_hash_2_2 , + SUBPARTITION p_hash_2_3 , + SUBPARTITION p_hash_2_4 , + SUBPARTITION p_hash_2_5 + ), + PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20), + PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30 ) + ( + SUBPARTITION p_hash_4_1 + ), + PARTITION p_list_5 VALUES (default) + ( + SUBPARTITION p_hash_5_1 + ), + PARTITION p_list_6 VALUES (31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_hash_6_1 , + SUBPARTITION p_hash_6_2 , + SUBPARTITION p_hash_6_3 + ) +) ENABLE ROW MOVEMENT ; +alter table list_hash split subPARTITION p_hash_2_3 at(-10) into ( subPARTITION add_p_01 , subPARTITION add_p_02 ); +ERROR: Hash subpartition does not support split. +DETAIL: N/A +drop table list_hash; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +alter table range_range split subpartition p_201901_b values (3) into +( + subpartition p_201901_c, + subpartition p_201901_d +) update global index; +ERROR: The syntax format of split subpartition is incorrect. +DETAIL: SPLIT SUBPARTITION NAME VALUES shouldn't be used, it's for list subpartitions. +drop table range_range; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +alter table list_list split subpartition p_201901_b at (2, 3) into +( + subpartition p_201901_b, + subpartition p_201901_c +); +ERROR: The syntax format of split subpartition is incorrect. +DETAIL: SPLIT SUBPARTITION NAME AT shouldn't be used, it's for range subpartitions. +drop table list_list; +CREATE TABLE IF NOT EXISTS list_list_02 +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) +PARTITION BY list (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( 0,-1,-2,-3,-4,-5,-6,-7,-8,-9 ), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_list_2 VALUES(0,1,2,3,4,5,6,7,8,9) + ( + SUBPARTITION p_list_2_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_2_2 VALUES ( default ), + SUBPARTITION p_list_2_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_2_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_2_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_3 VALUES(10,11,12,13,14,15,16,17,18,19) + ( + SUBPARTITION p_list_3_2 VALUES ( default ) + ), + PARTITION p_list_4 VALUES(default ), + PARTITION p_list_5 VALUES(20,21,22,23,24,25,26,27,28,29) + ( + SUBPARTITION p_list_5_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_5_2 VALUES ( default ), + SUBPARTITION p_list_5_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_5_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_5_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_6 VALUES(30,31,32,33,34,35,36,37,38,39), + PARTITION p_list_7 VALUES(40,41,42,43,44,45,46,47,48,49) + ( + SUBPARTITION p_list_7_1 VALUES ( default ) + ) +) ENABLE ROW MOVEMENT; +alter table list_list_02 split PARTITION for (5) at (8) into ( PARTITION add_p_01 , PARTITION add_p_02 ); +ERROR: Un-support feature +DETAIL: For subpartition table, split partition is not supported yet. +drop table list_list_02; +--clean +DROP SCHEMA subpartition_split CASCADE; diff --git a/src/test/regress/expected/hw_subpartition_truncate.out b/src/test/regress/expected/hw_subpartition_truncate.out new file mode 100644 index 000000000..25028d8fa --- /dev/null +++ b/src/test/regress/expected/hw_subpartition_truncate.out @@ -0,0 +1,139 @@ +--prepare +DROP SCHEMA subpartition_truncate CASCADE; +ERROR: schema "subpartition_truncate" does not exist +CREATE SCHEMA subpartition_truncate; +SET CURRENT_SCHEMA TO subpartition_truncate; +--truncate partition/subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(6 rows) + +select * from list_list partition (p_201901); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(3 rows) + +alter table list_list truncate partition p_201901; +select * from list_list partition (p_201901); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list partition (p_201902); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +alter table list_list truncate partition p_201902; +select * from list_list partition (p_201902); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list subpartition (p_201901_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +alter table list_list truncate subpartition p_201901_a; +select * from list_list subpartition (p_201901_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list subpartition (p_201901_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +alter table list_list truncate subpartition p_201901_b; +select * from list_list subpartition (p_201901_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list subpartition (p_201902_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +alter table list_list truncate subpartition p_201902_a; +select * from list_list subpartition (p_201902_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list subpartition (p_201902_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(2 rows) + +alter table list_list truncate subpartition p_201902_b; +select * from list_list subpartition (p_201902_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +drop table list_list; +DROP SCHEMA subpartition_truncate CASCADE; diff --git a/src/test/regress/expected/hw_subpartition_update.out b/src/test/regress/expected/hw_subpartition_update.out new file mode 100644 index 000000000..d0316c6c6 --- /dev/null +++ b/src/test/regress/expected/hw_subpartition_update.out @@ -0,0 +1,242 @@ +--prepare +DROP SCHEMA subpartition_update CASCADE; +ERROR: schema "subpartition_update" does not exist +CREATE SCHEMA subpartition_update; +SET CURRENT_SCHEMA TO subpartition_update; + +--update +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +)DISABLE ROW MOVEMENT; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); + +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + + +--error +update range_list set month_code = '201903'; +ERROR: fail to update partitioned table "range_list" +DETAIL: disable row movement +--error +update range_list set dept_code = '2'; +ERROR: fail to update partitioned table "range_list" +DETAIL: disable row movement + +update range_list set user_no = '2'; +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 2 | 1 + 201902 | 1 | 2 | 1 + 201902 | 2 | 2 | 1 + 201903 | 1 | 2 | 1 + 201903 | 2 | 2 | 1 + 201903 | 2 | 2 | 1 +(6 rows) + + +-- test for upsert and merge into, both should report error +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt=1; + +CREATE TABLE temp_table +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +); +insert into temp_table values('201802', '1', '1', 1), ('201901', '2', '1', 1), ('201702', '1', '1', 1); +MERGE INTO range_list t1 +USING temp_table t2 +ON (t1.dept_code = t2.dept_code) +WHEN MATCHED THEN + UPDATE SET t1.month_code = t2.month_code WHERE t1.dept_code > 1 +WHEN NOT MATCHED THEN + INSERT VALUES (t2.month_code, t2.dept_code, t2.user_no, t2.sales_amt) WHERE t2.sales_amt = 1; +ERROR: fail to update partitioned table "range_list" +DETAIL: disable row movement + +drop table temp_table; +drop table range_list; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +)ENABLE ROW MOVEMENT; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); + +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + + +select * from range_list subpartition (p_201901_a) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +select * from range_list subpartition (p_201901_b) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +update range_list set dept_code = '2' where month_code = '201902'; +select * from range_list subpartition (p_201901_a) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from range_list subpartition (p_201901_b) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + + +select * from range_list partition (p_201901) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +select * from range_list partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +update range_list set month_code = '201903' where month_code = '201902'; +select * from range_list partition (p_201901) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from range_list partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + + +drop table range_list; + +-- FOREIGN KEY +drop table tb_02; +ERROR: table "tb_02" does not exist +CREATE TABLE tb_02 +( + col_1 int PRIMARY KEY, + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "tb_02_pkey" for table "tb_02" + +drop table range_range_02 cascade; +ERROR: table "range_range_02" does not exist +CREATE TABLE range_range_02 +( + col_1 int , + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int , +FOREIGN KEY(col_1) REFERENCES tb_02(col_1) +) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 80 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( MAXVALUE ) + ) +); + +insert into tb_02 values(0,0,0,0); +insert into range_range_02 values(0,0,0,0); + +update tb_02 set col_1=8 where col_2=0; +ERROR: update or delete on table "tb_02" violates foreign key constraint "range_range_02_col_1_fkey" on table "range_range_02" +DETAIL: Key (col_1)=(0) is still referenced from table "range_range_02". +drop table range_range_02 cascade; +drop table tb_02; +DROP SCHEMA subpartition_update CASCADE; diff --git a/src/test/regress/expected/hw_subpartition_view.out b/src/test/regress/expected/hw_subpartition_view.out new file mode 100644 index 000000000..f1b6eba6c --- /dev/null +++ b/src/test/regress/expected/hw_subpartition_view.out @@ -0,0 +1,365 @@ +-- CREATE partition table +create schema hw_subpartition_view; +set search_path = hw_subpartition_view; +create table tab_interval +( + c1 int, + c2 int, + logdate date not null +) +partition by range (logdate) +INTERVAL ('1 month') +( + PARTITION tab_interval_p0 VALUES LESS THAN ('2020-03-01'), + PARTITION tab_interval_p1 VALUES LESS THAN ('2020-04-01'), + PARTITION tab_interval_p2 VALUES LESS THAN ('2020-05-01') +); +create index ip_index_local1 on tab_interval (c1) local; +create index gpi_index_test on tab_interval(c2) global; +-- CREATE subpartition table +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2,3'), + SUBPARTITION p_201902_c values (DEFAULT) + ), + PARTITION p_max VALUES LESS THAN(maxvalue) +); +create index idx_month_code on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; +---- +-- owner +---- +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from MY_PART_TABLES where schema = 'hw_subpartition_view'; + table_name | partitioning_type | partition_count | partitioning_key_count | def_tablespace_name | schema | subpartitioning_type | def_subpartition_count | subpartitioning_key_count +--------------+-------------------+-----------------+------------------------+---------------------+----------------------+----------------------+------------------------+--------------------------- + tab_interval | INTERVAL | 3 | 1 | DEFAULT TABLESPACE | hw_subpartition_view | NONE | 0 | 0 + range_list | RANGE | 3 | 1 | DEFAULT TABLESPACE | hw_subpartition_view | LIST | 1 | 1 +(2 rows) + +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from SYS.MY_PART_TABLES where schema = 'hw_subpartition_view'; + table_name | partitioning_type | partition_count | partitioning_key_count | def_tablespace_name | schema | subpartitioning_type | def_subpartition_count | subpartitioning_key_count +--------------+-------------------+-----------------+------------------------+---------------------+----------------------+----------------------+------------------------+--------------------------- + tab_interval | INTERVAL | 3 | 1 | DEFAULT TABLESPACE | hw_subpartition_view | NONE | 0 | 0 + range_list | RANGE | 3 | 1 | DEFAULT TABLESPACE | hw_subpartition_view | LIST | 1 | 1 +(2 rows) + +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from MY_TAB_PARTITIONS where schema = 'hw_subpartition_view'; + table_name | partition_name | high_value | tablespace_name | schema | subpartition_count | high_value_length +--------------+-----------------+------------+--------------------+----------------------+--------------------+------------------- + range_list | p_201901 | 201903 | DEFAULT TABLESPACE | hw_subpartition_view | 2 | 6 + range_list | p_201902 | 201910 | DEFAULT TABLESPACE | hw_subpartition_view | 3 | 6 + range_list | p_max | MAXVALUE | DEFAULT TABLESPACE | hw_subpartition_view | 1 | 8 + tab_interval | tab_interval_p0 | 2020-03-01 | DEFAULT TABLESPACE | hw_subpartition_view | 0 | 10 + tab_interval | tab_interval_p1 | 2020-04-01 | DEFAULT TABLESPACE | hw_subpartition_view | 0 | 10 + tab_interval | tab_interval_p2 | 2020-05-01 | DEFAULT TABLESPACE | hw_subpartition_view | 0 | 10 +(6 rows) + +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from SYS.MY_TAB_PARTITIONS where schema = 'hw_subpartition_view'; + table_name | partition_name | high_value | tablespace_name | schema | subpartition_count | high_value_length +--------------+-----------------+------------+--------------------+----------------------+--------------------+------------------- + range_list | p_201901 | 201903 | DEFAULT TABLESPACE | hw_subpartition_view | 2 | 6 + range_list | p_201902 | 201910 | DEFAULT TABLESPACE | hw_subpartition_view | 3 | 6 + range_list | p_max | MAXVALUE | DEFAULT TABLESPACE | hw_subpartition_view | 1 | 8 + tab_interval | tab_interval_p0 | 2020-03-01 | DEFAULT TABLESPACE | hw_subpartition_view | 0 | 10 + tab_interval | tab_interval_p1 | 2020-04-01 | DEFAULT TABLESPACE | hw_subpartition_view | 0 | 10 + tab_interval | tab_interval_p2 | 2020-05-01 | DEFAULT TABLESPACE | hw_subpartition_view | 0 | 10 +(6 rows) + +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from MY_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; + table_name | partition_name | subpartition_name | high_value | tablespace_name | schema | high_value_length +------------+----------------+-----------------------+------------+--------------------+----------------------+------------------- + range_list | p_201901 | p_201901_a | 1 | DEFAULT TABLESPACE | hw_subpartition_view | 1 + range_list | p_201901 | p_201901_b | 2 | DEFAULT TABLESPACE | hw_subpartition_view | 1 + range_list | p_201902 | p_201902_a | 1 | DEFAULT TABLESPACE | hw_subpartition_view | 1 + range_list | p_201902 | p_201902_b | 2,3 | DEFAULT TABLESPACE | hw_subpartition_view | 3 + range_list | p_201902 | p_201902_c | DEFAULT | DEFAULT TABLESPACE | hw_subpartition_view | 7 + range_list | p_max | p_max_subpartdefault1 | DEFAULT | DEFAULT TABLESPACE | hw_subpartition_view | 7 +(6 rows) + +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from SYS.MY_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; + table_name | partition_name | subpartition_name | high_value | tablespace_name | schema | high_value_length +------------+----------------+-----------------------+------------+--------------------+----------------------+------------------- + range_list | p_201901 | p_201901_a | 1 | DEFAULT TABLESPACE | hw_subpartition_view | 1 + range_list | p_201901 | p_201901_b | 2 | DEFAULT TABLESPACE | hw_subpartition_view | 1 + range_list | p_201902 | p_201902_a | 1 | DEFAULT TABLESPACE | hw_subpartition_view | 1 + range_list | p_201902 | p_201902_b | 2,3 | DEFAULT TABLESPACE | hw_subpartition_view | 3 + range_list | p_201902 | p_201902_c | DEFAULT | DEFAULT TABLESPACE | hw_subpartition_view | 7 + range_list | p_max | p_max_subpartdefault1 | DEFAULT | DEFAULT TABLESPACE | hw_subpartition_view | 7 +(6 rows) + +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from MY_PART_INDEXES where schema = 'hw_subpartition_view'; + def_tablespace_name | index_name | partition_count | partitioning_key_count | partitioning_type | schema | table_name | subpartitioning_type | def_subpartition_count | subpartitioning_key_count +---------------------+-----------------+-----------------+------------------------+-------------------+----------------------+--------------+----------------------+------------------------+--------------------------- + DEFAULT TABLESPACE | idx_month_code | 3 | 1 | RANGE | hw_subpartition_view | range_list | LIST | 1 | 1 + DEFAULT TABLESPACE | ip_index_local1 | 3 | 1 | INTERVAL | hw_subpartition_view | tab_interval | NONE | 0 | 0 +(2 rows) + +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from SYS.MY_PART_INDEXES where schema = 'hw_subpartition_view'; + def_tablespace_name | index_name | partition_count | partitioning_key_count | partitioning_type | schema | table_name | subpartitioning_type | def_subpartition_count | subpartitioning_key_count +---------------------+-----------------+-----------------+------------------------+-------------------+----------------------+--------------+----------------------+------------------------+--------------------------- + DEFAULT TABLESPACE | idx_month_code | 3 | 1 | RANGE | hw_subpartition_view | range_list | LIST | 1 | 1 + DEFAULT TABLESPACE | ip_index_local1 | 3 | 1 | INTERVAL | hw_subpartition_view | tab_interval | NONE | 0 | 0 +(2 rows) + +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from MY_IND_PARTITIONS where schema = 'hw_subpartition_view'; + index_name | partition_name | def_tablespace_name | high_value | index_partition_usable | schema | high_value_length +-----------------+-----------------+---------------------+------------+------------------------+----------------------+------------------- + ip_index_local1 | tab_interval_p0 | DEFAULT TABLESPACE | 2020-03-01 | t | hw_subpartition_view | 10 + ip_index_local1 | tab_interval_p1 | DEFAULT TABLESPACE | 2020-04-01 | t | hw_subpartition_view | 10 + ip_index_local1 | tab_interval_p2 | DEFAULT TABLESPACE | 2020-05-01 | t | hw_subpartition_view | 10 +(3 rows) + +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from SYS.MY_IND_PARTITIONS where schema = 'hw_subpartition_view'; + index_name | partition_name | def_tablespace_name | high_value | index_partition_usable | schema | high_value_length +-----------------+-----------------+---------------------+------------+------------------------+----------------------+------------------- + ip_index_local1 | tab_interval_p0 | DEFAULT TABLESPACE | 2020-03-01 | t | hw_subpartition_view | 10 + ip_index_local1 | tab_interval_p1 | DEFAULT TABLESPACE | 2020-04-01 | t | hw_subpartition_view | 10 + ip_index_local1 | tab_interval_p2 | DEFAULT TABLESPACE | 2020-05-01 | t | hw_subpartition_view | 10 +(3 rows) + +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from MY_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; + index_name | partition_name | subpartition_name | def_tablespace_name | high_value | index_partition_usable | schema | high_value_length +----------------+----------------+-----------------------+---------------------+------------+------------------------+----------------------+------------------- + idx_month_code | p_201901 | p_201901_a | DEFAULT TABLESPACE | 1 | t | hw_subpartition_view | 1 + idx_month_code | p_201901 | p_201901_b | DEFAULT TABLESPACE | 2 | t | hw_subpartition_view | 1 + idx_month_code | p_201902 | p_201902_a | DEFAULT TABLESPACE | 1 | t | hw_subpartition_view | 1 + idx_month_code | p_201902 | p_201902_b | DEFAULT TABLESPACE | 2,3 | t | hw_subpartition_view | 3 + idx_month_code | p_201902 | p_201902_c | DEFAULT TABLESPACE | DEFAULT | t | hw_subpartition_view | 7 + idx_month_code | p_max | p_max_subpartdefault1 | DEFAULT TABLESPACE | DEFAULT | t | hw_subpartition_view | 7 +(6 rows) + +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from SYS.MY_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; + index_name | partition_name | subpartition_name | def_tablespace_name | high_value | index_partition_usable | schema | high_value_length +----------------+----------------+-----------------------+---------------------+------------+------------------------+----------------------+------------------- + idx_month_code | p_201901 | p_201901_a | DEFAULT TABLESPACE | 1 | t | hw_subpartition_view | 1 + idx_month_code | p_201901 | p_201901_b | DEFAULT TABLESPACE | 2 | t | hw_subpartition_view | 1 + idx_month_code | p_201902 | p_201902_a | DEFAULT TABLESPACE | 1 | t | hw_subpartition_view | 1 + idx_month_code | p_201902 | p_201902_b | DEFAULT TABLESPACE | 2,3 | t | hw_subpartition_view | 3 + idx_month_code | p_201902 | p_201902_c | DEFAULT TABLESPACE | DEFAULT | t | hw_subpartition_view | 7 + idx_month_code | p_max | p_max_subpartdefault1 | DEFAULT TABLESPACE | DEFAULT | t | hw_subpartition_view | 7 +(6 rows) + +---- +-- others with permission +---- +create user user_spv_authed password 'Gauss@123'; +grant select on range_list to user_spv_authed; +grant select on tab_interval to user_spv_authed; +grant usage on schema sys to user_spv_authed; +set role "user_spv_authed" password 'Gauss@123'; +-- permission denied +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from ADM_PART_TABLES where schema = 'hw_subpartition_view'; +ERROR: permission denied for relation adm_part_tables +DETAIL: N/A +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from ADM_TAB_PARTITIONS where schema = 'hw_subpartition_view'; +ERROR: permission denied for relation adm_tab_partitions +DETAIL: N/A +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from ADM_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; +ERROR: permission denied for relation adm_tab_subpartitions +DETAIL: N/A +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from ADM_PART_INDEXES where schema = 'hw_subpartition_view'; +ERROR: permission denied for relation adm_part_indexes +DETAIL: N/A +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from ADM_IND_PARTITIONS where schema = 'hw_subpartition_view'; +ERROR: permission denied for relation adm_ind_partitions +DETAIL: N/A +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from ADM_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; +ERROR: permission denied for relation adm_ind_subpartitions +DETAIL: N/A +-- visible if granted for DB_xxx views +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from DB_PART_TABLES where schema = 'hw_subpartition_view'; + table_name | partitioning_type | partition_count | partitioning_key_count | def_tablespace_name | schema | subpartitioning_type | def_subpartition_count | subpartitioning_key_count +--------------+-------------------+-----------------+------------------------+---------------------+----------------------+----------------------+------------------------+--------------------------- + tab_interval | INTERVAL | 3 | 1 | DEFAULT TABLESPACE | hw_subpartition_view | NONE | 0 | 0 + range_list | RANGE | 3 | 1 | DEFAULT TABLESPACE | hw_subpartition_view | LIST | 1 | 1 +(2 rows) + +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from DB_TAB_PARTITIONS where schema = 'hw_subpartition_view'; + table_name | partition_name | high_value | tablespace_name | schema | subpartition_count | high_value_length +--------------+-----------------+------------+--------------------+----------------------+--------------------+------------------- + range_list | p_201901 | 201903 | DEFAULT TABLESPACE | hw_subpartition_view | 2 | 6 + range_list | p_201902 | 201910 | DEFAULT TABLESPACE | hw_subpartition_view | 3 | 6 + range_list | p_max | MAXVALUE | DEFAULT TABLESPACE | hw_subpartition_view | 1 | 8 + tab_interval | tab_interval_p0 | 2020-03-01 | DEFAULT TABLESPACE | hw_subpartition_view | 0 | 10 + tab_interval | tab_interval_p1 | 2020-04-01 | DEFAULT TABLESPACE | hw_subpartition_view | 0 | 10 + tab_interval | tab_interval_p2 | 2020-05-01 | DEFAULT TABLESPACE | hw_subpartition_view | 0 | 10 +(6 rows) + +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from DB_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; + table_name | partition_name | subpartition_name | high_value | tablespace_name | schema | high_value_length +------------+----------------+-----------------------+------------+--------------------+----------------------+------------------- + range_list | p_201901 | p_201901_a | 1 | DEFAULT TABLESPACE | hw_subpartition_view | 1 + range_list | p_201901 | p_201901_b | 2 | DEFAULT TABLESPACE | hw_subpartition_view | 1 + range_list | p_201902 | p_201902_a | 1 | DEFAULT TABLESPACE | hw_subpartition_view | 1 + range_list | p_201902 | p_201902_b | 2,3 | DEFAULT TABLESPACE | hw_subpartition_view | 3 + range_list | p_201902 | p_201902_c | DEFAULT | DEFAULT TABLESPACE | hw_subpartition_view | 7 + range_list | p_max | p_max_subpartdefault1 | DEFAULT | DEFAULT TABLESPACE | hw_subpartition_view | 7 +(6 rows) + +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from DB_PART_INDEXES where schema = 'hw_subpartition_view'; + def_tablespace_name | index_name | partition_count | partitioning_key_count | partitioning_type | schema | table_name | subpartitioning_type | def_subpartition_count | subpartitioning_key_count +---------------------+-----------------+-----------------+------------------------+-------------------+----------------------+--------------+----------------------+------------------------+--------------------------- + DEFAULT TABLESPACE | idx_month_code | 3 | 1 | RANGE | hw_subpartition_view | range_list | LIST | 1 | 1 + DEFAULT TABLESPACE | ip_index_local1 | 3 | 1 | INTERVAL | hw_subpartition_view | tab_interval | NONE | 0 | 0 +(2 rows) + +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from DB_IND_PARTITIONS where schema = 'hw_subpartition_view'; + index_name | partition_name | def_tablespace_name | high_value | index_partition_usable | schema | high_value_length +-----------------+-----------------+---------------------+------------+------------------------+----------------------+------------------- + ip_index_local1 | tab_interval_p0 | DEFAULT TABLESPACE | 2020-03-01 | t | hw_subpartition_view | 10 + ip_index_local1 | tab_interval_p1 | DEFAULT TABLESPACE | 2020-04-01 | t | hw_subpartition_view | 10 + ip_index_local1 | tab_interval_p2 | DEFAULT TABLESPACE | 2020-05-01 | t | hw_subpartition_view | 10 +(3 rows) + +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from DB_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; + index_name | partition_name | subpartition_name | def_tablespace_name | high_value | index_partition_usable | schema | high_value_length +----------------+----------------+-----------------------+---------------------+------------+------------------------+----------------------+------------------- + idx_month_code | p_201901 | p_201901_a | DEFAULT TABLESPACE | 1 | t | hw_subpartition_view | 1 + idx_month_code | p_201901 | p_201901_b | DEFAULT TABLESPACE | 2 | t | hw_subpartition_view | 1 + idx_month_code | p_201902 | p_201902_a | DEFAULT TABLESPACE | 1 | t | hw_subpartition_view | 1 + idx_month_code | p_201902 | p_201902_b | DEFAULT TABLESPACE | 2,3 | t | hw_subpartition_view | 3 + idx_month_code | p_201902 | p_201902_c | DEFAULT TABLESPACE | DEFAULT | t | hw_subpartition_view | 7 + idx_month_code | p_max | p_max_subpartdefault1 | DEFAULT TABLESPACE | DEFAULT | t | hw_subpartition_view | 7 +(6 rows) + +-- nothing for this guy's entry +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from MY_PART_TABLES where schema = 'hw_subpartition_view'; + table_name | partitioning_type | partition_count | partitioning_key_count | def_tablespace_name | schema | subpartitioning_type | def_subpartition_count | subpartitioning_key_count +------------+-------------------+-----------------+------------------------+---------------------+--------+----------------------+------------------------+--------------------------- +(0 rows) + +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from MY_TAB_PARTITIONS where schema = 'hw_subpartition_view'; + table_name | partition_name | high_value | tablespace_name | schema | subpartition_count | high_value_length +------------+----------------+------------+-----------------+--------+--------------------+------------------- +(0 rows) + +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from MY_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; + table_name | partition_name | subpartition_name | high_value | tablespace_name | schema | high_value_length +------------+----------------+-------------------+------------+-----------------+--------+------------------- +(0 rows) + +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from MY_PART_INDEXES where schema = 'hw_subpartition_view'; + def_tablespace_name | index_name | partition_count | partitioning_key_count | partitioning_type | schema | table_name | subpartitioning_type | def_subpartition_count | subpartitioning_key_count +---------------------+------------+-----------------+------------------------+-------------------+--------+------------+----------------------+------------------------+--------------------------- +(0 rows) + +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from MY_IND_PARTITIONS where schema = 'hw_subpartition_view'; + index_name | partition_name | def_tablespace_name | high_value | index_partition_usable | schema | high_value_length +------------+----------------+---------------------+------------+------------------------+--------+------------------- +(0 rows) + +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from MY_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; + index_name | partition_name | subpartition_name | def_tablespace_name | high_value | index_partition_usable | schema | high_value_length +------------+----------------+-------------------+---------------------+------------+------------------------+--------+------------------- +(0 rows) + +-- recover +reset role; +---- +-- others without permission +---- +create user user_spv_notauthed password 'Gauss@123'; +set role "user_spv_notauthed" password 'Gauss@123'; +-- permission denied +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from ADM_PART_TABLES where schema = 'hw_subpartition_view'; +ERROR: permission denied for relation adm_part_tables +DETAIL: N/A +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from ADM_TAB_PARTITIONS where schema = 'hw_subpartition_view'; +ERROR: permission denied for relation adm_tab_partitions +DETAIL: N/A +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from ADM_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; +ERROR: permission denied for relation adm_tab_subpartitions +DETAIL: N/A +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from ADM_PART_INDEXES where schema = 'hw_subpartition_view'; +ERROR: permission denied for relation adm_part_indexes +DETAIL: N/A +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from ADM_IND_PARTITIONS where schema = 'hw_subpartition_view'; +ERROR: permission denied for relation adm_ind_partitions +DETAIL: N/A +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from ADM_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; +ERROR: permission denied for relation adm_ind_subpartitions +DETAIL: N/A +-- empty +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from DB_PART_TABLES where schema = 'hw_subpartition_view'; + table_name | partitioning_type | partition_count | partitioning_key_count | def_tablespace_name | schema | subpartitioning_type | def_subpartition_count | subpartitioning_key_count +------------+-------------------+-----------------+------------------------+---------------------+--------+----------------------+------------------------+--------------------------- +(0 rows) + +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from DB_TAB_PARTITIONS where schema = 'hw_subpartition_view'; + table_name | partition_name | high_value | tablespace_name | schema | subpartition_count | high_value_length +------------+----------------+------------+-----------------+--------+--------------------+------------------- +(0 rows) + +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from DB_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; + table_name | partition_name | subpartition_name | high_value | tablespace_name | schema | high_value_length +------------+----------------+-------------------+------------+-----------------+--------+------------------- +(0 rows) + +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from DB_PART_INDEXES where schema = 'hw_subpartition_view'; + def_tablespace_name | index_name | partition_count | partitioning_key_count | partitioning_type | schema | table_name | subpartitioning_type | def_subpartition_count | subpartitioning_key_count +---------------------+------------+-----------------+------------------------+-------------------+--------+------------+----------------------+------------------------+--------------------------- +(0 rows) + +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from DB_IND_PARTITIONS where schema = 'hw_subpartition_view'; + index_name | partition_name | def_tablespace_name | high_value | index_partition_usable | schema | high_value_length +------------+----------------+---------------------+------------+------------------------+--------+------------------- +(0 rows) + +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from DB_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; + index_name | partition_name | subpartition_name | def_tablespace_name | high_value | index_partition_usable | schema | high_value_length +------------+----------------+-------------------+---------------------+------------+------------------------+--------+------------------- +(0 rows) + +-- mpty +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from MY_PART_TABLES where schema = 'hw_subpartition_view'; + table_name | partitioning_type | partition_count | partitioning_key_count | def_tablespace_name | schema | subpartitioning_type | def_subpartition_count | subpartitioning_key_count +------------+-------------------+-----------------+------------------------+---------------------+--------+----------------------+------------------------+--------------------------- +(0 rows) + +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from MY_TAB_PARTITIONS where schema = 'hw_subpartition_view'; + table_name | partition_name | high_value | tablespace_name | schema | subpartition_count | high_value_length +------------+----------------+------------+-----------------+--------+--------------------+------------------- +(0 rows) + +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from MY_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; + table_name | partition_name | subpartition_name | high_value | tablespace_name | schema | high_value_length +------------+----------------+-------------------+------------+-----------------+--------+------------------- +(0 rows) + +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from MY_PART_INDEXES where schema = 'hw_subpartition_view'; + def_tablespace_name | index_name | partition_count | partitioning_key_count | partitioning_type | schema | table_name | subpartitioning_type | def_subpartition_count | subpartitioning_key_count +---------------------+------------+-----------------+------------------------+-------------------+--------+------------+----------------------+------------------------+--------------------------- +(0 rows) + +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from MY_IND_PARTITIONS where schema = 'hw_subpartition_view'; + index_name | partition_name | def_tablespace_name | high_value | index_partition_usable | schema | high_value_length +------------+----------------+---------------------+------------+------------------------+--------+------------------- +(0 rows) + +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from MY_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; + index_name | partition_name | subpartition_name | def_tablespace_name | high_value | index_partition_usable | schema | high_value_length +------------+----------------+-------------------+---------------------+------------+------------------------+--------+------------------- +(0 rows) + +-- recover +reset role; +drop schema hw_subpartition_view cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table tab_interval +drop cascades to table range_list diff --git a/src/test/regress/expected/hw_to_timestamp.out b/src/test/regress/expected/hw_to_timestamp.out index 4b7f93a0e..293476f82 100644 --- a/src/test/regress/expected/hw_to_timestamp.out +++ b/src/test/regress/expected/hw_to_timestamp.out @@ -823,9 +823,6 @@ CONTEXT: referenced column: to_date SELECT to_date('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 11111'); ERROR: the format is not correct CONTEXT: referenced column: to_date -/*---------------------------------------------------------------------- -DTS2021082425612: support format "FF1"、"FF2"、"FF3"、"FF4"、"FF5"、"FF6" -----------------------------------------------------------------------*/ SELECT TO_CHAR(timestamp '2021-08-30 21:21:55.535744', 'YYYYMMDDHH24MISSFF1'); to_char ----------------- @@ -970,9 +967,6 @@ SELECT TO_CHAR(timestamptz '2021-08-30 21:21:55.535744+08', 'yyyymmddhh24missff6 20210830062155535744 (1 row) -/*---------------------------------- -DTS2021083017908: support format "X" -----------------------------------*/ SELECT TO_CHAR(timestamp '2021-08-30 21:21:55.535744', 'YYYYMMDDHH24MISSXFF'); to_char ----------------------- @@ -997,9 +991,6 @@ SELECT TO_CHAR(timestamptz '2021-08-30 21:21:55.535744+08', 'yyyymmddhh24missxff 20210830062155.535744 (1 row) -/*---------------------------------------------- -DTS2017031602663: support timezone Asia/Beijing -----------------------------------------------*/ set timezone='Asia/Beijing'; select extract(timezone from now()); date_part diff --git a/src/test/regress/expected/hybrid_row_column.out b/src/test/regress/expected/hybrid_row_column.out index fbf5c34fc..2f50ccd8f 100644 --- a/src/test/regress/expected/hybrid_row_column.out +++ b/src/test/regress/expected/hybrid_row_column.out @@ -41,8 +41,9 @@ SELECT TRUNC(c_first::MACADDR) FROM columnar_table_06; SELECT COUNT(c_first) FROM columnar_table_06 GROUP BY c_first::MACADDR; count ------- - 2 -(1 row) + 1 + 1 +(2 rows) SELECT * FROM columnar_table_06 ORDER BY c_first::MACADDR; c_id | c_w_id | c_first diff --git a/src/test/regress/expected/index_advisor.out b/src/test/regress/expected/index_advisor.out index b2c14b906..4adf9d0cd 100755 --- a/src/test/regress/expected/index_advisor.out +++ b/src/test/regress/expected/index_advisor.out @@ -9,92 +9,418 @@ ANALYZE t1; CREATE TABLE t2 (col1 int, col2 int); INSERT INTO t2 VALUES(generate_series(1, 1000),generate_series(1, 1000)); ANALYZE t2; +CREATE TEMP TABLE mytemp1 (col1 int, col2 int, col3 text); +INSERT INTO mytemp1 VALUES(generate_series(1, 3000),generate_series(1, 3000),repeat( chr(int4(random()*26)+65),4)); +ANALYZE mytemp1; ---single query --test where -SELECT * FROM gs_index_advise('SELECT * FROM t1 WHERE col1 = 10'); +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT * FROM t1 WHERE col1 = 10') as a; schema | table | column --------+-------+-------- - public | t1 | (col1) + public | t1 | col1 (1 row) --test join -SELECT * FROM gs_index_advise('SELECT * FROM t1 join t2 on t1.col1 = t2.col1'); +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT * FROM t1 join t2 on t1.col1 = t2.col1') as a; schema | table | column --------+-------+-------- - public | t1 | (col1) + public | t1 | col1 public | t2 | (2 rows) --test multi table -SELECT * FROM gs_index_advise('SELECT count(*), t2.col1 FROM t1 join t2 on t1.col2 = t2.col2 WHERE t2.col2 > 2 GROUP BY t2.col1 ORDER BY t2.col1'); +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT count(*), t2.col1 FROM t1 join t2 on t1.col2 = t2.col2 WHERE t2.col2 > 2 GROUP BY t2.col1 ORDER BY t2.col1') as a; schema | table | column --------+-------+-------- - public | t1 | (col2) - public | t2 | (col1) + public | t1 | col2 + public | t2 | col1 (2 rows) --test order by -SELECT * FROM gs_index_advise('SELECT * FROM t1 ORDER BY 2'); +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT * FROM t1 ORDER BY 2') as a; schema | table | column --------+-------+-------- - public | t1 | (col2) + public | t1 | col2 (1 row) -SELECT * FROM gs_index_advise('SELECT * FROM t1 as a WHERE a.col2 in (SELECT col1 FROM t2 ORDER BY 1) ORDER BY 2'); +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT * FROM t1 as a WHERE a.col2 in (SELECT col1 FROM t2 ORDER BY 1) ORDER BY 2') as a; schema | table | column --------+-------+-------- - public | t1 | (col2) - public | t2 | + public | t1 | col2 + public | t2 | col1 (2 rows) -SELECT * FROM gs_index_advise('SELECT * FROM t1 WHERE col1 > 10 ORDER BY 1,col2'); - schema | table | column ---------+-------+------------- - public | t1 | (col1,col2) +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT * FROM t1 WHERE col1 > 10 ORDER BY 1,col2') as a; + schema | table | column +--------+-------+----------- + public | t1 | col1,col2 (1 row) -SELECT * FROM gs_index_advise('SELECT *, *FROM t1 ORDER BY 2, 4'); - schema | table | column ---------+-------+------------- - public | t1 | (col2,col1) +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT *, *FROM t1 ORDER BY 2, 4') as a; + schema | table | column +--------+-------+----------- + public | t1 | col2,col1 (1 row) -SELECT * FROM gs_index_advise('SELECT *, col2 FROM t1 ORDER BY 1, 3'); - schema | table | column ---------+-------+------------- - public | t1 | (col1,col3) +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT *, col2 FROM t1 ORDER BY 1, 3') as a; + schema | table | column +--------+-------+----------- + public | t1 | col1,col3 (1 row) --test string overlength -SELECT * FROM gs_index_advise('SELECT * FROM t1 where col3 in (''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'',''bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'',''ccccccccccccccccccccccccccccccccccccccc'',''ddddddddddddddddddddddddddddddddddddddd'',''ffffffffffffffffffffffffffffffffffffffff'',''ggggggggggggggggggggggggggggggggggggggggggggggggggg'',''ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt'',''vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv'',''ggmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm'')'); +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT * FROM t1 where col3 in (''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'',''bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'',''ccccccccccccccccccccccccccccccccccccccc'',''ddddddddddddddddddddddddddddddddddddddd'',''ffffffffffffffffffffffffffffffffffffffff'',''ggggggggggggggggggggggggggggggggggggggggggggggggggg'',''ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt'',''vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv'',''ggmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm'')') as a; schema | table | column --------+-------+-------- - public | t1 | (col3) + public | t1 | col3 (1 row) +--test union all +SELECT a.schema, a.table, a.column FROM gs_index_advise('select * from ((select col1, col2 from t1 where col1=1) union all (select col1, col2 from t2 where col1=1))') as a; + schema | table | column +--------+-------+-------- + public | t1 | col1 + public | t2 | col1 +(2 rows) + +--test insert +SELECT a.schema, a.table, a.column FROM gs_index_advise('INSERT INTO t2 (SELECT col1, col2 from t1 where col1=1)') as a; +ERROR: can not advise for the query because not found a select statement. +--test delete +SELECT a.schema, a.table, a.column FROM gs_index_advise('DELETE FROM t1 where col1 > (SELECT COUNT(*) from t1 where col1<1000)') as a; +ERROR: can not advise for the query because not found a select statement. +--test update +SELECT a.schema, a.table, a.column FROM gs_index_advise('UPDATE t1 SET col1=(SELECT col2 from t2 where col1=10)') as a; +ERROR: can not advise for the query because not found a select statement. +--test nested select +SELECT a.schema, a.table, a.column FROM gs_index_advise('select count(*) from (select t1.col1, t2.col2 from t1 join t2 on t1.col1 = t2.col1)') as a; + schema | table | column +--------+-------+-------- + public | t1 | col1 + public | t2 | +(2 rows) + +--test temp table +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT * FROM mytemp1 WHERE col1 = 10') as a; +WARNING: can not advise for table mytemp1 due to invalid oid or irregular table. +ERROR: can not advise for the query because can not recognize involved tables. +--test complex sql +SELECT a.schema, a.table, a.column FROM gs_index_advise('select * from ((select t1.col1, t2.col2 from t1 join t2 on t1.col1 = t2.col1) union all (select col1, col2 from t1 where col1=col2 and col2>200 and col3 in (''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'',''bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'',''ccccccccccccccccccccccccccccccccccccccc'',''ddddddddddddddddddddddddddddddddddddddd'',''ffffffffffffffffffffffffffffffffffffffff'',''ggggggggggggggggggggggggggggggggggggggggggggggggggg'',''ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt'',''vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv'',''ggmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm'') order by col3)) order by col2 limit 10') as a; + schema | table | column +--------+-------+-------- + public | t1 | col1 + public | t1 | col3 + public | t2 | +(3 rows) + +SELECT a.schema, a.table, a.column FROM gs_index_advise('select * from ((SELECT t1.col1, t1.col2 from t1 where col1=col2 and col1<99) UNION ALL (select col2, col1 from t1 where col1=col2 and col2>200 order by col1 DESC)) as t3 join t2 on t3.col1 = t2.col1 where t2.col2=4 and t3.col1<100 and t2.col1=4 order by t3.col1, t2.col2 DESC limit 100') as a; + schema | table | column +--------+-------+----------- + public | t2 | col2,col1 + public | t1 | col1 +(2 rows) + ---virtual index --test hypopg_create_index SELECT * FROM hypopg_create_index('CREATE INDEX ON t1(col1)'); indexrelid | indexname ------------+---------------------- ---? .*| .*btree_t1_col1 +--? .*|.*btree_t1_col1 (1 row) ---test hypopg_display_index +SELECT * FROM hypopg_create_index('CREATE INDEX ON t2(col1)'); + indexrelid | indexname +------------+---------------------- +--? .*|.*btree_t2_col1 +(1 row) + +SELECT * FROM hypopg_create_index('SELECT * from t1'); +ERROR: hypopg: SQL order #1 is not a CREATE INDEX statement +SELECT * FROM hypopg_create_index('UPDATE t2 SET col1=(SELECT col2 from t2 where col1=10)'); +ERROR: hypopg: SQL order #1 is not a CREATE INDEX statement +SELECT * FROM hypopg_create_index('DELETE from t2 where col1 <10'); +ERROR: hypopg: SQL order #1 is not a CREATE INDEX statement +SELECT * FROM hypopg_create_index('INSERT INTO t2 VALUES(generate_series(1001, 2000),generate_series(1001, 2000))'); +ERROR: hypopg: SQL order #1 is not a CREATE INDEX statement +--test explain set enable_hypo_index = on;explain SELECT * FROM t1 WHERE col1 = 100; QUERY PLAN -------------------------------------------------------------------------------- ---? Index Scan using .*btree_t1_col1 on t1 .* +--? Index Scan using .*btree_t1_col1 on t1.* Index Cond: (col1 = 100) (2 rows) ---test hypopg_drop_index -SELECT * FROM hypopg_display_index(); - indexname | indexrelid | table | column -----------------------+------------+-------+-------- ---?.*btree_t1_col1 |.*| t1 | (col1) +explain UPDATE t1 SET col1=0 where col1=2; + QUERY PLAN +-------------------------------------------------------------------------------------- +--? Update on t1.* +--? -> Index Scan using .*btree_t1_col1 on t1.* + Index Cond: (col1 = 2) +(3 rows) + +explain UPDATE t1 SET col1=(SELECT col2 from t2 where col1=10); + QUERY PLAN +--------------------------------------------------------------------------------------- +--? Update on t1.* + InitPlan 1 (returns $0) +--? -> Index Scan using .*btree_t2_col1 on t2.* + Index Cond: (col1 = 10) +--? -> Seq Scan on t1.* +(5 rows) + +explain INSERT INTO t1 SELECT * from t1 where col1=10; + QUERY PLAN +-------------------------------------------------------------------------------------- +--? Insert on t1.* +--? -> Index Scan using .*btree_t1_col1 on t1.* + Index Cond: (col1 = 10) +(3 rows) + +explain DELETE FROM t1 where col1 > (SELECT COUNT(*) from t1 where col1<1000); + QUERY PLAN +------------------------------------------------------------------------------------------------------ +--? Delete on t1.* + InitPlan 1 (returns $0) +--? -> Aggregate.* +--? -> Index Only Scan using .*btree_t1_col1 on t1.* + Index Cond: (col1 < 1000) +--? -> Index Scan using .*btree_t1_col1 on t1.* + Index Cond: (col1 > $0) +(7 rows) + +--test partition table +create table range_part_a( +stu_id varchar2(100), +stu_name varchar2(100), +sex varchar2(1), +credit integer default 0 +)partition by range (credit) +(partition p_range_1 values less than (60), +partition p_range_2 values less than (120), +partition p_range_3 values less than (180), +partition p_range_4 values less than (240), +partition p_range_6 values less than (maxvalue) +); +create table range_part_b( +stu_id varchar2(100), +stu_name varchar2(100), +sex varchar2(1), +credit integer default 0 +)partition by range (credit) +(partition p_range_1 values less than (60), +partition p_range_2 values less than (120), +partition p_range_3 values less than (180), +partition p_range_4 values less than (240), +partition p_range_6 values less than (maxvalue) +); +CREATE TABLE range_subpart_a( +col_1 int, +col_2 int, +col_3 VARCHAR2 ( 30 ) , +col_4 int +)PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +(PARTITION p_range_1 VALUES LESS THAN( 1000 ) + (SUBPARTITION p_range_1_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), +PARTITION p_range_2 VALUES LESS THAN( 2001 ) + (SUBPARTITION p_range_2_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( MAXVALUE ) + ) +); +CREATE TABLE range_subpart_b( +col_1 int, +col_2 int, +col_3 VARCHAR2 ( 30 ) , +col_4 int +)PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +(PARTITION p_range_1 VALUES LESS THAN( 1000 ) + (SUBPARTITION p_range_1_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), +PARTITION p_range_2 VALUES LESS THAN( 30001 ) + (SUBPARTITION p_range_2_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( MAXVALUE ) + ) +); +INSERT INTO range_part_a VALUES(repeat( chr(int4(random()*26)+65),4),repeat( chr(int4(random()*26)+65),4),repeat( chr(int4(random()*26)+65),1),generate_series(1, 2000)); +ANALYZE range_part_a; +INSERT INTO range_part_b VALUES(repeat( chr(int4(random()*26)+65),4),repeat( chr(int4(random()*26)+65),4),repeat( chr(int4(random()*26)+65),1),generate_series(1, 3000)); +ANALYZE range_part_b; +INSERT INTO range_subpart_a VALUES(generate_series(1, 2000),generate_series(1, 2000),repeat( chr(int4(random()*26)+65),1),generate_series(1, 2000)); +ANALYZE range_subpart_a; +INSERT INTO range_subpart_b VALUES(generate_series(1, 3000),generate_series(1, 3000),repeat( chr(int4(random()*26)+65),1),generate_series(1, 3000)); +ANALYZE range_subpart_b; +--single query +--test syntax error +select * from gs_index_advise('select * from range_part_a as a where a.stu_id in (select stu_id from range_part_a order by 4)order by 4'); +ERROR: ORDER BY position 4 is not in select list +LINE 1: ...m range_part_a as a where a.stu_id in (select stu_id from r... + ^ +--test local index +--partion +select * from gs_index_advise('select * from range_part_a where credit = 4'); + schema | table | column | indextype +--------+--------------+--------+----------- + public | range_part_a | credit | local (1 row) +select * from gs_index_advise('select * from range_part_a where stu_id = ''10'' and credit = 4'); + schema | table | column | indextype +--------+--------------+---------------+----------- + public | range_part_a | stu_id,credit | local +(1 row) + +select * from gs_index_advise('select * from range_part_a partition(p_range_1) where stu_id = ''10'''); + schema | table | column | indextype +--------+--------------+--------+----------- + public | range_part_a | stu_id | local +(1 row) + +--subpartition +select * from gs_index_advise('select * from range_subpart_a partition(p_range_1) where col_2 = 2'); + schema | table | column | indextype +--------+-----------------+--------+----------- + public | range_subpart_a | col_2 | local +(1 row) + +select * from gs_index_advise('select * from range_subpart_a subpartition(p_range_1_1 ) where col_3 =''2'''); + schema | table | column | indextype +--------+-----------------+--------+----------- + public | range_subpart_a | col_3 | local +(1 row) + +select * from gs_index_advise('select * from range_subpart_a where col_1 =2 and col_2 = 3'); + schema | table | column | indextype +--------+-----------------+-------------+----------- + public | range_subpart_a | col_1,col_2 | local +(1 row) + +--test global index +--partion +select * from gs_index_advise('select * from range_part_a where stu_id = ''10'''); + schema | table | column | indextype +--------+--------------+--------+----------- + public | range_part_a | stu_id | global +(1 row) + +--subpartition +select * from gs_index_advise('select * from range_subpart_a where col_1 = 10'); + schema | table | column | indextype +--------+-----------------+--------+----------- + public | range_subpart_a | col_1 | global +(1 row) + +--test subquery +--partition +select * from gs_index_advise('select * from range_part_a where stu_id = (select stu_id from range_part_a where stu_id=''10'') and credit = 2'); + schema | table | column | indextype +--------+--------------+--------+----------- + public | range_part_a | credit | local + public | range_part_a | stu_id | global +(2 rows) + +--subpartition +select * from gs_index_advise('select * from range_subpart_a where col_1 = (select col_2 from range_part_a where col_3=''10'') and col_2 = 2'); + schema | table | column | indextype +--------+-----------------+--------+----------- + public | range_subpart_a | col_2 | global + public | range_part_a | | +(2 rows) + +--test join +--partition +select * from gs_index_advise('select * from range_part_a join range_part_b on range_part_b.credit = range_part_a.credit where range_part_a.stu_id = ''12'''); + schema | table | column | indextype +--------+--------------+--------+----------- + public | range_part_a | stu_id | global + public | range_part_b | credit | local +(2 rows) + +select * from gs_index_advise('select * from range_part_a join range_part_b partition(p_range_1) on range_part_a.stu_id = range_part_b.stu_id where range_part_a.stu_id = ''12'''); + schema | table | column | indextype +--------+--------------+--------+----------- + public | range_part_a | stu_id | global + public | range_part_b | stu_id | local +(2 rows) + +select * from gs_index_advise('select * from range_part_a partition(p_range_1) join range_part_b partition(p_range_1) on range_part_a.stu_id = range_part_b.stu_id where range_part_a.stu_id = ''12'''); + schema | table | column | indextype +--------+--------------+--------+----------- + public | range_part_a | stu_id | local + public | range_part_b | stu_id | local +(2 rows) + +--subpartition +select * from gs_index_advise('select * from range_subpart_a join range_subpart_b on range_subpart_b.col_2 = range_subpart_a.col_2 where range_subpart_a.col_3 = ''12'''); + schema | table | column | indextype +--------+-----------------+--------+----------- + public | range_subpart_a | col_3 | global + public | range_subpart_b | col_2 | global +(2 rows) + +select * from gs_index_advise('select * from range_part_a join range_subpart_b on range_part_a.credit = range_subpart_b.col_2 where range_subpart_b.col_3 = ''12'''); + schema | table | column | indextype +--------+-----------------+--------+----------- + public | range_part_a | credit | local + public | range_subpart_b | col_3 | global +(2 rows) + +select * from gs_index_advise('select * from range_subpart_a partition(p_range_1) join range_subpart_b subpartition(p_range_1_1) on range_subpart_a.col_3 = range_subpart_b.col_3 where range_subpart_a.col_3 = ''12'''); + schema | table | column | indextype +--------+-----------------+--------+----------- + public | range_subpart_a | col_3 | global + public | range_subpart_b | col_3 | local +(2 rows) + +--virtual index +select * from hypopg_create_index('create index on range_part_a(credit) local'); + indexrelid | indexname +------------+---------------------------------------- +--? .* | .*btree_local_range_part_a_credit +(1 row) + +select * from hypopg_create_index('create index on range_subpart_a(col_2) local'); + indexrelid | indexname +------------+------------------------------------------ +--? .* | .*btree_local_range_subpart_a_col_2 +(1 row) + +explain select * from range_part_a where stu_id = '10' and credit = 2; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ +--? Partition Iterator (.*) +--? Iterations: .* +--? -> Partitioned Index Scan using .*btree_local_range_part_a_credit on range_part_a (.*) + Index Cond: (credit = 2) + Filter: ((stu_id)::text = '10'::text) + Selected Partitions: 1 +--?(.* rows) + +select * from hypopg_create_index('create index on range_part_a(credit)'); + indexrelid | indexname +------------+----------------------------------------- +--? .* | .*btree_global_range_part_a_credit +(1 row) + +select * from hypopg_create_index('create index on range_subpart_a(col_2)'); + indexrelid | indexname +------------+------------------------------------------- +--? .* | .*btree_global_range_subpart_a_col_2 +(1 row) + +--test hypopg_display_index +SELECT * FROM hypopg_display_index(); + indexname | indexrelid | table | column +-------------------------------------------+------------+-----------------+---------- +--? .*btree_t1_col1 | .* | t1 | (col1) +--? .*btree_t2_col1 | .* | t2 | (col1) +--? .*btree_local_range_part_a_credit | .* | range_part_a | (credit) +--? .*btree_local_range_subpart_a_col_2 | .* | range_subpart_a | (col_2) +--? .*btree_global_range_part_a_credit | .* | range_part_a | (credit) +--? .*btree_global_range_subpart_a_col_2 | .* | range_subpart_a | (col_2) +(6 rows) + --test hypopg_reset_index SELECT * FROM hypopg_reset_index(); hypopg_reset_index @@ -104,5 +430,9 @@ SELECT * FROM hypopg_reset_index(); DROP TABLE t1; DROP TABLE t2; +DROP TABLE range_part_a; +DROP TABLE range_part_b; +DROP TABLE range_subpart_a; +DROP TABLE range_subpart_b; \c regression; drop database IF EXISTS pl_test_ind_adv; diff --git a/src/test/regress/expected/int16.out b/src/test/regress/expected/int16.out new file mode 100644 index 000000000..d0b98f1eb --- /dev/null +++ b/src/test/regress/expected/int16.out @@ -0,0 +1,259 @@ +-- +-- INT16 +-- Test int16 128-bit integers. +-- +CREATE SCHEMA schema_int16; +SET search_path = schema_int16; +SET ENABLE_BETA_FEATURES = OFF; +-- should fail +CREATE TABLE INT16_TBL(q1 int16, q2 int16, q3 serial); +ERROR: It's not supported to create int16 column +SET ENABLE_BETA_FEATURES = ON; +CREATE TABLE INT16_TBL(q1 int16, q2 int16, q3 serial); +NOTICE: CREATE TABLE will create implicit sequence "int16_tbl_q3_seq" for serial column "int16_tbl.q3" +-- do not support create btree index on int16 for now +CREATE INDEX int16idx on INT16_TBL(q1); +ERROR: data type int16 has no default operator class for access method "btree" +HINT: You must specify an operator class for the index or define a default operator class for the data type. +INSERT INTO INT16_TBL VALUES(' 123 ',' 456'); +INSERT INTO INT16_TBL VALUES('456 ','12345678901234567890123456789'); +INSERT INTO INT16_TBL VALUES('123456789012345678901234567890','123'); +INSERT INTO INT16_TBL VALUES(+1234567890123456789012345678901,'12345678901234567890123456789012'); +INSERT INTO INT16_TBL VALUES('+123456789012345678901234567890123','-1234567890123456789012345678901234'); +-- test boundary +INSERT INTO INT16_TBL VALUES(170141183460469231731687303715884105727, -170141183460469231731687303715884105728); +INSERT INTO INT16_TBL VALUES(170141183460469231731687303715884105728, 0); +ERROR: int128 out of range +CONTEXT: referenced column: q1 +INSERT INTO INT16_TBL VALUES(0, -170141183460469231731687303715884105729); +ERROR: int128 out of range +CONTEXT: referenced column: q2 +-- bad inputs +INSERT INTO INT16_TBL(q1) VALUES (' '); +ERROR: invalid input syntax for type int16: " " +LINE 1: INSERT INTO INT16_TBL(q1) VALUES (' '); + ^ +DETAIL: cannot convert input text to int16 +CONTEXT: referenced column: q1 +INSERT INTO INT16_TBL(q1) VALUES ('xxx'); +ERROR: invalid input syntax for type int16: "xxx" +LINE 1: INSERT INTO INT16_TBL(q1) VALUES ('xxx'); + ^ +DETAIL: cannot convert input text to int16 +CONTEXT: referenced column: q1 +INSERT INTO INT16_TBL(q1) VALUES ('4321170141183460469231731687303715884105727'); +ERROR: value "4321170141183460469231731687303715884105727" is out of range for type int16 +LINE 1: INSERT INTO INT16_TBL(q1) VALUES ('4321170141183460469231731... + ^ +DETAIL: text exceeds the length of int16 +CONTEXT: referenced column: q1 +INSERT INTO INT16_TBL(q1) VALUES ('-143170141183460469231731687303715884105727'); +ERROR: value "-143170141183460469231731687303715884105727" is out of range for type int16 +LINE 1: INSERT INTO INT16_TBL(q1) VALUES ('-143170141183460469231731... + ^ +DETAIL: text exceeds the length of int16 +CONTEXT: referenced column: q1 +INSERT INTO INT16_TBL(q1) VALUES ('- 123'); +ERROR: invalid input syntax for type int16: "- 123" +LINE 1: INSERT INTO INT16_TBL(q1) VALUES ('- 123'); + ^ +DETAIL: cannot convert input text to int16 +CONTEXT: referenced column: q1 +INSERT INTO INT16_TBL(q1) VALUES (' 345 5'); +ERROR: invalid input syntax for type int16: " 345 5" +LINE 1: INSERT INTO INT16_TBL(q1) VALUES (' 345 5'); + ^ +DETAIL: text contain invalid character +CONTEXT: referenced column: q1 +INSERT INTO INT16_TBL(q1) VALUES (''); +-- do not support order for now +SELECT * FROM INT16_TBL ORDER BY q1,q2; +ERROR: could not identify an ordering operator for type int16 +LINE 1: SELECT * FROM INT16_TBL ORDER BY q1,q2; + ^ +HINT: Use an explicit ordering operator or modify the query. +-- support compare +SELECT count(*) FROM INT16_TBL WHERE q2 = q2 + 1; + count +------- + 0 +(1 row) + +SELECT count(*) FROM INT16_TBL WHERE q2 <> q2 - 1; +ERROR: int16 out of range +DETAIL: result is too large for int16 +SELECT count(*) FROM INT16_TBL WHERE q2 < q2 * 1; + count +------- + 0 +(1 row) + +SELECT count(*) FROM INT16_TBL WHERE q2 <= q2 / 1; + count +------- + 6 +(1 row) + +SELECT count(*) FROM INT16_TBL WHERE q2 <= q2; + count +------- + 6 +(1 row) + +SELECT count(*) FROM INT16_TBL WHERE q2 > q2 + 1; + count +------- + 0 +(1 row) + +SELECT count(*) FROM INT16_TBL WHERE q2 >= q2 + 1; + count +------- + 0 +(1 row) + +-- support type casts +\dC int16 + List of casts + Source type | Target type | Function | Implicit? +------------------+------------------+------------+--------------- + bigint | int16 | int16 | yes + boolean | int16 | int16 | yes + double precision | int16 | int16 | yes + int16 | bigint | int8 | in assignment + int16 | boolean | int16_bool | yes + int16 | double precision | float8 | yes + int16 | integer | int4 | in assignment + int16 | numeric | numeric | yes + int16 | oid | oid | yes + int16 | real | float4 | yes + int16 | smallint | int2 | in assignment + int16 | tinyint | i16toi1 | in assignment + integer | int16 | int16 | yes + numeric | int16 | int16 | yes + oid | int16 | int16 | yes + real | int16 | int16 | yes + smallint | int16 | int16 | yes + tinyint | int16 | int16 | yes +(18 rows) + +SELECT CAST(q1 AS int2) FROM INT16_TBL WHERE q3 < 3 ORDER BY q3; + q1 +----- + 123 + 456 +(2 rows) + +SELECT CAST(q1 AS int4) FROM INT16_TBL WHERE q3 < 3 ORDER BY q3; + q1 +----- + 123 + 456 +(2 rows) + +SELECT CAST(q1 AS int8) FROM INT16_TBL WHERE q3 < 3 ORDER BY q3; + q1 +----- + 123 + 456 +(2 rows) + +SELECT CAST(q1 AS numeric) FROM INT16_TBL ORDER BY q3; + q1 +----------------------------------------- + 123 + 456 + 123456789012345678901234567890 + 1234567890123456789012345678901 + 123456789012345678901234567890123 + 170141183460469231731687303715884105727 + +(7 rows) + +SELECT CAST(q1 AS float4) FROM INT16_TBL ORDER BY q3; + q1 +------------- + 123 + 456 + 1.23457e+29 + 1.23457e+30 + 1.23457e+32 + 1.70141e+38 + +(7 rows) + +SELECT CAST(q1 AS float8) FROM INT16_TBL ORDER BY q3; + q1 +---------------------- + 123 + 456 + 1.23456789012346e+29 + 1.23456789012346e+30 + 1.23456789012346e+32 + 1.70141183460469e+38 + +(7 rows) + +SELECT CAST(q1 AS boolean) FROM INT16_TBL ORDER BY q3; + q1 +---- + t + t + t + t + t + t + +(7 rows) + +-- some may overflow +SELECT CAST(q1 AS int2) FROM INT16_TBL ORDER BY q3; +ERROR: smallint out of range +DETAIL: cannot cast value too large for smallint +CONTEXT: referenced column: q1 +SELECT CAST(q1 AS int4) FROM INT16_TBL ORDER BY q3; +ERROR: integer out of range +DETAIL: cannot cast value too large for integer +CONTEXT: referenced column: q1 +SELECT CAST(q1 AS int8) FROM INT16_TBL ORDER BY q3; +ERROR: bigint out of range +DETAIL: cannot cast value too large for bigint +CONTEXT: referenced column: q1 +SELECT CAST(q1 AS oid) FROM INT16_TBL ORDER BY q3; +ERROR: OID out of range +DETAIL: cannot cast value too large for OID +CONTEXT: referenced column: q1 +-- cast to int16 +CREATE TABLE TEST_TBL( + v1 bigint, + v2 boolean, + v3 double precision, + v4 integer, + v5 numeric, + v6 oid, + v7 real, + v8 smallint, + v9 tinyint); +INSERT INTO TEST_TBL VALUES(1, 2, 3, 4, 5, 6, 7, 8, 9); +CREATE TABLE CAST_TBL( + v1 int16, + v2 int16, + v3 int16, + v4 int16, + v5 int16, + v6 int16, + v7 int16, + v8 int16, + v9 int16); +INSERT INTO CAST_TBL SELECT * FROM TEST_TBL; +SELECT * FROM CAST_TBL; + v1 | v2 | v3 | v4 | v5 | v6 | v7 | v8 | v9 +----+----+----+----+----+----+----+----+---- + 1 | 1 | 3 | 4 | 5 | 6 | 7 | 8 | 9 +(1 row) + +DROP SCHEMA schema_int16 CASCADE; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to table int16_tbl +drop cascades to table test_tbl +drop cascades to table cast_tbl diff --git a/src/test/regress/expected/json_1.out b/src/test/regress/expected/json_1.out deleted file mode 100644 index 7cca40354..000000000 --- a/src/test/regress/expected/json_1.out +++ /dev/null @@ -1,1184 +0,0 @@ --- Strings. -SELECT '""'::json; -- OK. - json ------- - "" -(1 row) - -SELECT $$''$$::json; -- ERROR, single quotes are not allowed -ERROR: invalid input syntax for type json -LINE 1: SELECT $$''$$::json; - ^ -DETAIL: Token "'" is invalid. -CONTEXT: JSON data, line 1: '... -SELECT '"abc"'::json; -- OK - json -------- - "abc" -(1 row) - -SELECT '"abc'::json; -- ERROR, quotes not closed -ERROR: invalid input syntax for type json -LINE 1: SELECT '"abc'::json; - ^ -DETAIL: Token ""abc" is invalid. -CONTEXT: JSON data, line 1: "abc -SELECT '"abc -def"'::json; -- ERROR, unescaped newline in string constant -ERROR: invalid input syntax for type json -LINE 1: SELECT '"abc - ^ -DETAIL: Character with value 0x0a must be escaped. -CONTEXT: JSON data, line 1: "abc -SELECT '"\n\"\\"'::json; -- OK, legal escapes - json ----------- - "\n\"\\" -(1 row) - -SELECT '"\v"'::json; -- ERROR, not a valid JSON escape -ERROR: invalid input syntax for type json -LINE 1: SELECT '"\v"'::json; - ^ -DETAIL: Escape sequence "\v" is invalid. -CONTEXT: JSON data, line 1: "\v... -SELECT '"\u"'::json; -- ERROR, incomplete escape -ERROR: invalid input syntax for type json -LINE 1: SELECT '"\u"'::json; - ^ -DETAIL: "\u" must be followed by four hexadecimal digits. -CONTEXT: JSON data, line 1: "\u" -SELECT '"\u00"'::json; -- ERROR, incomplete escape -ERROR: invalid input syntax for type json -LINE 1: SELECT '"\u00"'::json; - ^ -DETAIL: "\u" must be followed by four hexadecimal digits. -CONTEXT: JSON data, line 1: "\u00" -SELECT '"\u000g"'::json; -- ERROR, g is not a hex digit -ERROR: invalid input syntax for type json -LINE 1: SELECT '"\u000g"'::json; - ^ -DETAIL: "\u" must be followed by four hexadecimal digits. -CONTEXT: JSON data, line 1: "\u000g... -SELECT '"\u0000"'::json; -- OK, legal escape - json ----------- - "\u0000" -(1 row) - -SELECT '"\uaBcD"'::json; -- OK, uppercase and lower case both OK - json ----------- - "\uaBcD" -(1 row) - --- Numbers. -SELECT '1'::json; -- OK - json ------- - 1 -(1 row) - -SELECT '0'::json; -- OK - json ------- - 0 -(1 row) - -SELECT '01'::json; -- ERROR, not valid according to JSON spec -ERROR: invalid input syntax for type json -LINE 1: SELECT '01'::json; - ^ -DETAIL: Token "01" is invalid. -CONTEXT: JSON data, line 1: 01 -SELECT '0.1'::json; -- OK - json ------- - 0.1 -(1 row) - -SELECT '9223372036854775808'::json; -- OK, even though it's too large for int8 - json ---------------------- - 9223372036854775808 -(1 row) - -SELECT '1e100'::json; -- OK - json -------- - 1e100 -(1 row) - -SELECT '1.3e100'::json; -- OK - json ---------- - 1.3e100 -(1 row) - -SELECT '1f2'::json; -- ERROR -ERROR: invalid input syntax for type json -LINE 1: SELECT '1f2'::json; - ^ -DETAIL: Token "1f2" is invalid. -CONTEXT: JSON data, line 1: 1f2 -SELECT '0.x1'::json; -- ERROR -ERROR: invalid input syntax for type json -LINE 1: SELECT '0.x1'::json; - ^ -DETAIL: Token "0.x1" is invalid. -CONTEXT: JSON data, line 1: 0.x1 -SELECT '1.3ex100'::json; -- ERROR -ERROR: invalid input syntax for type json -LINE 1: SELECT '1.3ex100'::json; - ^ -DETAIL: Token "1.3ex100" is invalid. -CONTEXT: JSON data, line 1: 1.3ex100 --- Arrays. -SELECT '[]'::json; -- OK - json ------- - [] -(1 row) - -SELECT '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]'::json; -- OK - json ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] -(1 row) - -SELECT '[1,2]'::json; -- OK - json -------- - [1,2] -(1 row) - -SELECT '[1,2,]'::json; -- ERROR, trailing comma -ERROR: invalid input syntax for type json -LINE 1: SELECT '[1,2,]'::json; - ^ -DETAIL: Expected JSON value, but found "]". -CONTEXT: JSON data, line 1: [1,2,] -SELECT '[1,2'::json; -- ERROR, no closing bracket -ERROR: invalid input syntax for type json -LINE 1: SELECT '[1,2'::json; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: [1,2 -SELECT '[1,[2]'::json; -- ERROR, no closing bracket -ERROR: invalid input syntax for type json -LINE 1: SELECT '[1,[2]'::json; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: [1,[2] --- Objects. -SELECT '{}'::json; -- OK - json ------- - {} -(1 row) - -SELECT '{"abc"}'::json; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc"}'::json; - ^ -DETAIL: Expected ":", but found "}". -CONTEXT: JSON data, line 1: {"abc"} -SELECT '{"abc":1}'::json; -- OK - json ------------ - {"abc":1} -(1 row) - -SELECT '{1:"abc"}'::json; -- ERROR, keys must be strings -ERROR: invalid input syntax for type json -LINE 1: SELECT '{1:"abc"}'::json; - ^ -DETAIL: Expected string or "}", but found "1". -CONTEXT: JSON data, line 1: {1... -SELECT '{"abc",1}'::json; -- ERROR, wrong separator -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc",1}'::json; - ^ -DETAIL: Expected ":", but found ",". -CONTEXT: JSON data, line 1: {"abc",... -SELECT '{"abc"=1}'::json; -- ERROR, totally wrong separator -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc"=1}'::json; - ^ -DETAIL: Token "=" is invalid. -CONTEXT: JSON data, line 1: {"abc"=... -SELECT '{"abc"::1}'::json; -- ERROR, another wrong separator -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc"::1}'::json; - ^ -DETAIL: Expected JSON value, but found ":". -CONTEXT: JSON data, line 1: {"abc"::... -SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::json; -- OK - json ---------------------------------------------------------- - {"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}} -(1 row) - -SELECT '{"abc":1:2}'::json; -- ERROR, colon in wrong spot -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc":1:2}'::json; - ^ -DETAIL: Expected "," or "}", but found ":". -CONTEXT: JSON data, line 1: {"abc":1:... -SELECT '{"abc":1,3}'::json; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT '{"abc":1,3}'::json; - ^ -DETAIL: Expected string, but found "3". -CONTEXT: JSON data, line 1: {"abc":1,3... --- Miscellaneous stuff. -SELECT 'true'::json; -- OK - json ------- - true -(1 row) - -SELECT 'false'::json; -- OK - json -------- - false -(1 row) - -SELECT 'null'::json; -- OK - json ------- - null -(1 row) - -SELECT ' true '::json; -- OK, even with extra whitespace - json --------- - true -(1 row) - -SELECT 'true false'::json; -- ERROR, too many values -ERROR: invalid input syntax for type json -LINE 1: SELECT 'true false'::json; - ^ -DETAIL: Expected end of input, but found "false". -CONTEXT: JSON data, line 1: true false -SELECT 'true, false'::json; -- ERROR, too many values -ERROR: invalid input syntax for type json -LINE 1: SELECT 'true, false'::json; - ^ -DETAIL: Expected end of input, but found ",". -CONTEXT: JSON data, line 1: true,... -SELECT 'truf'::json; -- ERROR, not a keyword -ERROR: invalid input syntax for type json -LINE 1: SELECT 'truf'::json; - ^ -DETAIL: Token "truf" is invalid. -CONTEXT: JSON data, line 1: truf -SELECT 'trues'::json; -- ERROR, not a keyword -ERROR: invalid input syntax for type json -LINE 1: SELECT 'trues'::json; - ^ -DETAIL: Token "trues" is invalid. -CONTEXT: JSON data, line 1: trues -SELECT ''::json; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT ''::json; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: -SELECT ' '::json; -- ERROR, no value -ERROR: invalid input syntax for type json -LINE 1: SELECT ' '::json; - ^ -DETAIL: The input string ended unexpectedly. -CONTEXT: JSON data, line 1: ---constructors --- array_to_json -SELECT array_to_json(array(select 1 as a)); - array_to_json ---------------- - [1] -(1 row) - -SELECT array_to_json(array_agg(q),false) from (select x as b, x * 2 as c from generate_series(1,3) x) q; - array_to_json ---------------------------------------------- - [{"b":1,"c":2},{"b":2,"c":4},{"b":3,"c":6}] -(1 row) - -SELECT array_to_json(array_agg(q),true) from (select x as b, x * 2 as c from generate_series(1,3) x) q; - array_to_json ------------------ - [{"b":1,"c":2},+ - {"b":2,"c":4},+ - {"b":3,"c":6}] -(1 row) - -SELECT array_to_json(array_agg(q),false) - FROM ( SELECT $$a$$ || x AS b, y AS c, - ARRAY[ROW(x.*,ARRAY[1,2,3]), - ROW(y.*,ARRAY[4,5,6])] AS z - FROM generate_series(1,2) x, - generate_series(4,5) y) q; - array_to_json -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - [{"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]},{"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]},{"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]},{"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}] -(1 row) - -SELECT array_to_json(array_agg(x),false) from generate_series(5,10) x; - array_to_json ----------------- - [5,6,7,8,9,10] -(1 row) - -SELECT array_to_json('{{1,5},{99,100}}'::int[]); - array_to_json ------------------- - [[1,5],[99,100]] -(1 row) - --- row_to_json -SELECT row_to_json(row(1,'foo')); - row_to_json ---------------------- - {"f1":1,"f2":"foo"} -(1 row) - -SELECT row_to_json(q) -FROM (SELECT $$a$$ || x AS b, - y AS c, - ARRAY[ROW(x.*,ARRAY[1,2,3]), - ROW(y.*,ARRAY[4,5,6])] AS z - FROM generate_series(1,2) x, - generate_series(4,5) y) q; - row_to_json --------------------------------------------------------------------- - {"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} - {"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} - {"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} - {"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} -(4 rows) - -SELECT row_to_json(q,true) -FROM (SELECT $$a$$ || x AS b, - y AS c, - ARRAY[ROW(x.*,ARRAY[1,2,3]), - ROW(y.*,ARRAY[4,5,6])] AS z - FROM generate_series(1,2) x, - generate_series(4,5) y) q; - row_to_json ------------------------------------------------------ - {"b":"a1", + - "c":4, + - "z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} - {"b":"a1", + - "c":5, + - "z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} - {"b":"a2", + - "c":4, + - "z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} - {"b":"a2", + - "c":5, + - "z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} -(4 rows) - -CREATE TEMP TABLE rows AS -SELECT x, 'txt' || x as y -FROM generate_series(1,3) AS x; -SELECT row_to_json(q,true) -FROM rows q; - row_to_json --------------- - {"x":1, + - "y":"txt1"} - {"x":2, + - "y":"txt2"} - {"x":3, + - "y":"txt3"} -(3 rows) - -SELECT row_to_json(row((select array_agg(x) as d from generate_series(5,10) x)),false); - row_to_json ------------------------ - {"f1":[5,6,7,8,9,10]} -(1 row) - ---json_agg -SELECT json_agg(q) - FROM ( SELECT $$a$$ || x AS b, y AS c, - ARRAY[ROW(x.*,ARRAY[1,2,3]), - ROW(y.*,ARRAY[4,5,6])] AS z - FROM generate_series(1,2) x, - generate_series(4,5) y) q; - json_agg ------------------------------------------------------------------------ - [{"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}, + - {"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}, + - {"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}, + - {"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}] -(1 row) - -SELECT json_agg(q) - FROM rows q; - json_agg ------------------------ - [{"x":1,"y":"txt1"}, + - {"x":2,"y":"txt2"}, + - {"x":3,"y":"txt3"}] -(1 row) - --- non-numeric output -SELECT row_to_json(q) -FROM (SELECT 'NaN'::float8 AS "float8field") q; - row_to_json ------------------------ - {"float8field":"NaN"} -(1 row) - -SELECT row_to_json(q) -FROM (SELECT 'Infinity'::float8 AS "float8field") q; - row_to_json ----------------------------- - {"float8field":"Infinity"} -(1 row) - -SELECT row_to_json(q) -FROM (SELECT '-Infinity'::float8 AS "float8field") q; - row_to_json ------------------------------ - {"float8field":"-Infinity"} -(1 row) - --- json input -SELECT row_to_json(q) -FROM (SELECT '{"a":1,"b": [2,3,4,"d","e","f"],"c":{"p":1,"q":2}}'::json AS "jsonfield") q; - row_to_json ------------------------------------------------------------------- - {"jsonfield":{"a":1,"b": [2,3,4,"d","e","f"],"c":{"p":1,"q":2}}} -(1 row) - --- json extraction functions -CREATE TEMP TABLE test_json ( - json_type text, - test_json json -); -INSERT INTO test_json VALUES -('scalar','"a scalar"'), -('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'), -('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}'); -SELECT test_json -> 'x' -FROM test_json -WHERE json_type = 'scalar'; -ERROR: cannot extract element from a scalar -SELECT test_json -> 'x' -FROM test_json -WHERE json_type = 'array'; -ERROR: cannot extract field from a non-object -SELECT test_json -> 'x' -FROM test_json -WHERE json_type = 'object'; - ?column? ----------- - -(1 row) - -SELECT test_json->'field2' -FROM test_json -WHERE json_type = 'object'; - ?column? ----------- - "val2" -(1 row) - -SELECT test_json->>'field2' -FROM test_json -WHERE json_type = 'object'; - ?column? ----------- - val2 -(1 row) - -SELECT test_json -> 2 -FROM test_json -WHERE json_type = 'scalar'; -ERROR: cannot extract element from a scalar -SELECT test_json -> 2 -FROM test_json -WHERE json_type = 'array'; - ?column? ----------- - "two" -(1 row) - -SELECT test_json -> 2 -FROM test_json -WHERE json_type = 'object'; -ERROR: cannot extract array element from a non-array -SELECT test_json->>2 -FROM test_json -WHERE json_type = 'array'; - ?column? ----------- - two -(1 row) - -SELECT test_json ->> 6 FROM test_json WHERE json_type = 'array'; - ?column? ----------- - [1,2,3] -(1 row) - -SELECT test_json ->> 7 FROM test_json WHERE json_type = 'array'; - ?column? ----------- - {"f1":9} -(1 row) - -SELECT test_json ->> 'field4' FROM test_json WHERE json_type = 'object'; - ?column? ----------- - 4 -(1 row) - -SELECT test_json ->> 'field5' FROM test_json WHERE json_type = 'object'; - ?column? ----------- - [1,2,3] -(1 row) - -SELECT test_json ->> 'field6' FROM test_json WHERE json_type = 'object'; - ?column? ----------- - {"f1":9} -(1 row) - -SELECT json_object_keys(test_json) -FROM test_json -WHERE json_type = 'scalar'; -ERROR: cannot call json_object_keys on a scalar -SELECT json_object_keys(test_json) -FROM test_json -WHERE json_type = 'array'; -ERROR: cannot call json_object_keys on an array -SELECT json_object_keys(test_json) -FROM test_json -WHERE json_type = 'object'; - json_object_keys ------------------- - field1 - field2 - field3 - field4 - field5 - field6 -(6 rows) - --- test extending object_keys resultset - initial resultset size is 256 -select count(*) from - (select json_object_keys(json_object(array_agg(g))) - from (select unnest(array['f'||n,n::text])as g - from generate_series(1,300) as n) x ) y; - count -------- - 300 -(1 row) - --- nulls -select (test_json->'field3') is null as expect_false -from test_json -where json_type = 'object'; - expect_false --------------- - f -(1 row) - -select (test_json->>'field3') is null as expect_true -from test_json -where json_type = 'object'; - expect_true -------------- - t -(1 row) - -select (test_json->3) is null as expect_false -from test_json -where json_type = 'array'; - expect_false --------------- - f -(1 row) - -select (test_json->>3) is null as expect_true -from test_json -where json_type = 'array'; - expect_true -------------- - t -(1 row) - --- array length -SELECT json_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]'); - json_array_length -------------------- - 5 -(1 row) - -SELECT json_array_length('[]'); - json_array_length -------------------- - 0 -(1 row) - -SELECT json_array_length('{"f1":1,"f2":[5,6]}'); -ERROR: cannot get array length of a non-array -SELECT json_array_length('4'); -ERROR: cannot get array length of a scalar --- each -select json_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null}'); - json_each -------------------- - (f1,"[1,2,3]") - (f2,"{""f3"":1}") - (f4,null) -(3 rows) - -select * from json_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q; - key | value ------+----------- - f1 | [1,2,3] - f2 | {"f3":1} - f4 | null - f5 | 99 - f6 | "stringy" -(5 rows) - -select json_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":"null"}'); - json_each_text -------------------- - (f1,"[1,2,3]") - (f2,"{""f3"":1}") - (f4,) - (f5,null) -(4 rows) - -select * from json_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q; - key | value ------+---------- - f1 | [1,2,3] - f2 | {"f3":1} - f4 | - f5 | 99 - f6 | stringy -(5 rows) - --- extract_path, extract_path_as_text -select json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6'); - json_extract_path -------------------- - "stringy" -(1 row) - -select json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2'); - json_extract_path -------------------- - {"f3":1} -(1 row) - -select json_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text); - json_extract_path -------------------- - "f3" -(1 row) - -select json_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text); - json_extract_path -------------------- - 1 -(1 row) - -select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6'); - json_extract_path_text ------------------------- - stringy -(1 row) - -select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2'); - json_extract_path_text ------------------------- - {"f3":1} -(1 row) - -select json_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text); - json_extract_path_text ------------------------- - f3 -(1 row) - -select json_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text); - json_extract_path_text ------------------------- - 1 -(1 row) - --- extract_path nulls -select json_extract_path('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') is null as expect_false; - expect_false --------------- - f -(1 row) - -select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') is null as expect_true; - expect_true -------------- - t -(1 row) - -select json_extract_path('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') is null as expect_false; - expect_false --------------- - f -(1 row) - -select json_extract_path_text('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') is null as expect_true; - expect_true -------------- - t -(1 row) - --- extract_path operators -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json #>array['f4','f6']; - ?column? ------------ - "stringy" -(1 row) - -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json #>array['f2']; - ?column? ----------- - {"f3":1} -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json #>array['f2','0']; - ?column? ----------- - "f3" -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json #>array['f2','1']; - ?column? ----------- - 1 -(1 row) - -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json #>>array['f4','f6']; - ?column? ----------- - stringy -(1 row) - -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json #>>array['f2']; - ?column? ----------- - {"f3":1} -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json #>>array['f2','0']; - ?column? ----------- - f3 -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json #>>array['f2','1']; - ?column? ----------- - 1 -(1 row) - --- same using array literals -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json #>'{f4,f6}'; - ?column? ------------ - "stringy" -(1 row) - -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json #>'{f2}'; - ?column? ----------- - {"f3":1} -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json #>'{f2,0}'; - ?column? ----------- - "f3" -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json #>'{f2,1}'; - ?column? ----------- - 1 -(1 row) - -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json #>>'{f4,f6}'; - ?column? ----------- - stringy -(1 row) - -select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json #>>'{f2}'; - ?column? ----------- - {"f3":1} -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json #>>'{f2,0}'; - ?column? ----------- - f3 -(1 row) - -select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json #>>'{f2,1}'; - ?column? ----------- - 1 -(1 row) - --- array_elements -select json_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]'); - json_array_elements ------------------------ - 1 - true - [1,[2,3]] - null - {"f1":1,"f2":[7,8,9]} - false - "stringy" -(7 rows) - -select * from json_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q; - value ------------------------ - 1 - true - [1,[2,3]] - null - {"f1":1,"f2":[7,8,9]} - false - "stringy" -(7 rows) - -select json_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]'); - json_array_elements_text --------------------------- - 1 - true - [1,[2,3]] - - {"f1":1,"f2":[7,8,9]} - false - stringy -(7 rows) - -select * from json_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q; - value ------------------------ - 1 - true - [1,[2,3]] - - {"f1":1,"f2":[7,8,9]} - false - stringy -(7 rows) - --- populate_record -create type jpop as (a text, b int, c timestamp); -select * from json_populate_record(null::jpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+--- - blurfl | | -(1 row) - -select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":"blurfl","x":43.2}') q; - a | b | c ---------+---+-------------------------- - blurfl | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -select * from json_populate_record(null::jpop,'{"a":"blurfl","x":43.2}', true) q; - a | b | c ---------+---+--- - blurfl | | -(1 row) - -select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":"blurfl","x":43.2}', true) q; - a | b | c ---------+---+-------------------------- - blurfl | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -select * from json_populate_record(null::jpop,'{"a":[100,200,false],"x":43.2}', true) q; - a | b | c ------------------+---+--- - [100,200,false] | | -(1 row) - -select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":[100,200,false],"x":43.2}', true) q; - a | b | c ------------------+---+-------------------------- - [100,200,false] | 3 | Mon Dec 31 15:30:56 2012 -(1 row) - -select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"c":[100,200,false],"x":43.2}', true) q; -ERROR: invalid input syntax for type timestamp: "[100,200,false]" --- populate_recordset -select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]',false) q; - a | b | c ---------+---+-------------------------- - blurfl | | - | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]',false) q; - a | b | c ---------+----+-------------------------- - blurfl | 99 | - def | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]',true) q; - a | b | c ---------+---+-------------------------- - blurfl | | - | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]',true) q; - a | b | c ---------+----+-------------------------- - blurfl | 99 | - def | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]',true) q; - a | b | c ----------------+----+-------------------------- - [100,200,300] | 99 | - {"z":true} | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"c":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]',true) q; -ERROR: invalid input syntax for type timestamp: "[100,200,300]" --- using the default use_json_as_text argument -select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+---+-------------------------- - blurfl | | - | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; - a | b | c ---------+----+-------------------------- - blurfl | 99 | - def | 3 | Fri Jan 20 10:42:53 2012 -(2 rows) - -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; -ERROR: cannot call json_populate_recordset on a nested object -select * from json_populate_recordset(row('def',99,null)::jpop,'[{"c":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; -ERROR: cannot call json_populate_recordset on a nested object --- handling of unicode surrogate pairs -select json '{ "a": "\ud83d\ude04\ud83d\udc36" }' -> 'a' as correct_in_utf8; -ERROR: invalid input syntax for type json -DETAIL: Unicode escape values cannot be used for code point values above 007F when the server encoding is not UTF8. -CONTEXT: JSON data, line 1: { "a":... -select json '{ "a": "\ud83d\ud83d" }' -> 'a'; -- 2 high surrogates in a row -ERROR: invalid input syntax for type json -DETAIL: Unicode high surrogate must not follow a high surrogate. -CONTEXT: JSON data, line 1: { "a":... -select json '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order -ERROR: invalid input syntax for type json -DETAIL: Unicode low surrogate must follow a high surrogate. -CONTEXT: JSON data, line 1: { "a":... -select json '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate -ERROR: invalid input syntax for type json -DETAIL: Unicode low surrogate must follow a high surrogate. -CONTEXT: JSON data, line 1: { "a":... -select json '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate -ERROR: invalid input syntax for type json -DETAIL: Unicode low surrogate must follow a high surrogate. -CONTEXT: JSON data, line 1: { "a":... ---handling of simple unicode escapes -select json '{ "a": "the Copyright \u00a9 sign" }' ->> 'a' as correct_in_utf8; -ERROR: invalid input syntax for type json -DETAIL: Unicode escape values cannot be used for code point values above 007F when the server encoding is not UTF8. -CONTEXT: JSON data, line 1: { "a":... -select json '{ "a": "dollar \u0024 character" }' ->> 'a' as correct_everywhere; - correct_everywhere --------------------- - dollar $ character -(1 row) - -select json '{ "a": "null \u0000 escape" }' ->> 'a' as not_unescaped; - not_unescaped --------------------- - null \u0000 escape -(1 row) - ---json_typeof() function -select value, json_typeof(value) - from (values (json '123.4'), - (json '-1'), - (json '"foo"'), - (json 'true'), - (json 'false'), - (json 'null'), - (json '[1, 2, 3]'), - (json '[]'), - (json '{"x":"foo", "y":123}'), - (json '{}'), - (NULL::json)) - as data(value); - value | json_typeof -----------------------+------------- - 123.4 | number - -1 | number - "foo" | string - true | boolean - false | boolean - null | null - [1, 2, 3] | array - [] | array - {"x":"foo", "y":123} | object - {} | object - | -(11 rows) - --- json_build_array, json_build_object, json_object_agg -SELECT json_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); - json_build_array ------------------------------------------------------------------------ - ["a", 1, "b", 1.2, "c", true, "d", null, "e", {"x": 3, "y": [1,2,3]}] -(1 row) - -SELECT json_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); - json_build_object ----------------------------------------------------------------------------- - {"a" : 1, "b" : 1.2, "c" : true, "d" : null, "e" : {"x": 3, "y": [1,2,3]}} -(1 row) - -SELECT json_build_object( - 'a', json_build_object('b',false,'c',99), - 'd', json_build_object('e',array[9,8,7]::int[], - 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r))); - json_build_object -------------------------------------------------------------------------------------------------- - {"a" : {"b" : false, "c" : 99}, "d" : {"e" : [9,8,7], "f" : {"relkind":"r","name":"pg_class"}}} -(1 row) - --- empty objects/arrays -SELECT json_build_array(); - json_build_array ------------------- - [] -(1 row) - -SELECT json_build_object(); - json_build_object -------------------- - {} -(1 row) - --- make sure keys are quoted -SELECT json_build_object(1,2); - json_build_object -------------------- - {"1" : 2} -(1 row) - --- keys must be scalar and not null -SELECT json_build_object(null,2); -ERROR: arg 1: key cannot be null -SELECT json_build_object(r,2) FROM (SELECT 1 AS a, 2 AS b) r; -ERROR: key value must be scalar, not array, composite or json -SELECT json_build_object(json '{"a":1,"b":2}', 3); -ERROR: key value must be scalar, not array, composite or json -SELECT json_build_object('{1,2,3}'::int[], 3); -ERROR: key value must be scalar, not array, composite or json -CREATE TEMP TABLE foo (serial_num int, name text, type text); -INSERT INTO foo VALUES (847001,'t15','GE1043'); -INSERT INTO foo VALUES (847002,'t16','GE1043'); -INSERT INTO foo VALUES (847003,'sub-alpha','GESS90'); -SELECT json_build_object('turbines',json_object_agg(serial_num,json_build_object('name',name,'type',type))) -FROM foo; - json_build_object -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - {"turbines" : { "847001" : {"name" : "t15", "type" : "GE1043"}, "847002" : {"name" : "t16", "type" : "GE1043"}, "847003" : {"name" : "sub-alpha", "type" : "GESS90"} }} -(1 row) - --- json_object --- one dimension -SELECT json_object('{a,1,b,2,3,NULL,"d e f","a b c"}'); - json_object -------------------------------------------------------- - {"a" : "1", "b" : "2", "3" : null, "d e f" : "a b c"} -(1 row) - --- same but with two dimensions -SELECT json_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}'); - json_object -------------------------------------------------------- - {"a" : "1", "b" : "2", "3" : null, "d e f" : "a b c"} -(1 row) - --- odd number error -SELECT json_object('{a,b,c}'); -ERROR: array must have even number of elements --- one column error -SELECT json_object('{{a},{b}}'); -ERROR: array must have two columns --- too many columns error -SELECT json_object('{{a,b,c},{b,c,d}}'); -ERROR: array must have two columns --- too many dimensions error -SELECT json_object('{{{a,b},{c,d}},{{b,c},{d,e}}}'); -ERROR: wrong number of array subscripts ---two argument form of json_object -select json_object('{a,b,c,"d e f"}','{1,2,3,"a b c"}'); - json_object ------------------------------------------------------- - {"a" : "1", "b" : "2", "c" : "3", "d e f" : "a b c"} -(1 row) - --- too many dimensions -SELECT json_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}', '{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}'); -ERROR: wrong number of array subscripts --- mismatched dimensions -select json_object('{a,b,c,"d e f",g}','{1,2,3,"a b c"}'); -ERROR: mismatched array dimensions -select json_object('{a,b,c,"d e f"}','{1,2,3,"a b c",g}'); -ERROR: mismatched array dimensions --- null key error -select json_object('{a,b,NULL,"d e f"}','{1,2,3,"a b c"}'); -ERROR: null value not allowed for object key --- empty key error -select json_object('{a,b,"","d e f"}','{1,2,3,"a b c"}'); -ERROR: empty value not allowed for object key --- json_to_record and json_to_recordset -select * from json_to_record('{"a":1,"b":"foo","c":"bar"}',true) - as x(a int, b text, d text); - a | b | d ----+-----+--- - 1 | foo | -(1 row) - -select * from json_to_recordset('[{"a":1,"b":"foo","d":false},{"a":2,"b":"bar","c":true}]',false) - as x(a int, b text, c boolean); - a | b | c ----+-----+--- - 1 | foo | - 2 | bar | t -(2 rows) - diff --git a/src/test/regress/expected/large_sequence.out b/src/test/regress/expected/large_sequence.out new file mode 100644 index 000000000..e7a389f03 --- /dev/null +++ b/src/test/regress/expected/large_sequence.out @@ -0,0 +1,772 @@ +CREATE SCHEMA large_sequence; +SET CURRENT_SCHEMA = large_sequence; +-- test psql support +CREATE SEQUENCE S1; +CREATE LARGE SEQUENCE S2; +\d +--?.* +--?.* +--?.* +--? large_sequence | s1 | sequence | .* | +--? large_sequence | s2 | large sequence | .* | +(2 rows) + +\ds +--?.* +--?.* +--?.* +--? large_sequence | s1 | sequence | .* | +--? large_sequence | s2 | large sequence | .* | +(2 rows) + +\d S1 + Sequence "large_sequence.s1" + Column | Type | Value +---------------+---------+--------------------- + sequence_name | name | s1 + last_value | bigint | 1 + start_value | bigint | 1 + increment_by | bigint | 1 + max_value | bigint | 9223372036854775807 + min_value | bigint | 1 + cache_value | bigint | 1 + log_cnt | bigint | 0 + is_cycled | boolean | f + is_called | boolean | f + uuid | bigint | 0 + +\d S2 + Large Sequence "large_sequence.s2" + Column | Type | Value +---------------+---------+----------------------------------------- + sequence_name | name | s2 + last_value | int16 | 1 + start_value | int16 | 1 + increment_by | int16 | 1 + max_value | int16 | 170141183460469231731687303715884105727 + min_value | int16 | 1 + cache_value | int16 | 1 + log_cnt | bigint | 0 + is_cycled | boolean | f + is_called | boolean | f + uuid | bigint | 0 + +COMMENT ON LARGE SEQUENCE S2 IS 'FOO'; +COMMENT ON LARGE SEQUENCE S2 IS NULL; +-- temp sequences not support +CREATE TEMP LARGE SEQUENCE myseq2; +ERROR: Temporary sequences are not supported +CREATE TEMP LARGE SEQUENCE myseq3; +ERROR: Temporary sequences are not supported +-- default no cache, no start, no cycle +CREATE LARGE SEQUENCE S +INCREMENT 17014118346046923173168730371588410572 +MINVALUE 17014118346046923173168730371588410573 +MAXVALUE 170141183460469231731687303715884105721; +-- basic api +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 17014118346046923173168730371588410573 +(1 row) + +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 34028236692093846346337460743176821145 +(1 row) + +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 51042355038140769519506191114765231717 +(1 row) + +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 68056473384187692692674921486353642289 +(1 row) + +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 85070591730234615865843651857942052861 +(1 row) + +SELECT * FROM nextval('s'); + nextval +----------------------------------------- + 102084710076281539039012382229530463433 +(1 row) + +SELECT * FROM nextval('s'); + nextval +----------------------------------------- + 119098828422328462212181112601118874005 +(1 row) + +SELECT * FROM nextval('s'); + nextval +----------------------------------------- + 136112946768375385385349842972707284577 +(1 row) + +SELECT * FROM nextval('s'); + nextval +----------------------------------------- + 153127065114422308558518573344295695149 +(1 row) + +SELECT * FROM nextval('s'); + nextval +----------------------------------------- + 170141183460469231731687303715884105721 +(1 row) + +SELECT * FROM nextval('s'); +ERROR: nextval: reached maximum value of sequence "s" (170141183460469231731687303715884105721) +SELECT * FROM setval('s', 17014118346046923173168730371588410573); + setval +---------------------------------------- + 17014118346046923173168730371588410573 +(1 row) + +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 34028236692093846346337460743176821145 +(1 row) + +SELECT * FROM lastval(); + lastval +---------------------------------------- + 34028236692093846346337460743176821145 +(1 row) + +SELECT * FROM currval('s'); + currval +---------------------------------------- + 34028236692093846346337460743176821145 +(1 row) + +SELECT * FROM setval('s', 17014118346046923173168730371588410573, FALSE); + setval +---------------------------------------- + 17014118346046923173168730371588410573 +(1 row) + +SELECT * FROM setval('s'::text, 17014118346046923173168730371588410573); + setval +---------------------------------------- + 17014118346046923173168730371588410573 +(1 row) + +SELECT * FROM setval('s'::text, 17014118346046923173168730371588410573, FALSE); + setval +---------------------------------------- + 17014118346046923173168730371588410573 +(1 row) + +SELECT * FROM nextval('s'::text); + nextval +---------------------------------------- + 17014118346046923173168730371588410573 +(1 row) + +SELECT * FROM currval('s'::text); + currval +---------------------------------------- + 17014118346046923173168730371588410573 +(1 row) + +SELECT * FROM setval('s'::regclass, 17014118346046923173168730371588410573); + setval +---------------------------------------- + 17014118346046923173168730371588410573 +(1 row) + +SELECT * FROM setval('s'::regclass, 17014118346046923173168730371588410573, FALSE); + setval +---------------------------------------- + 17014118346046923173168730371588410573 +(1 row) + +SELECT * FROM nextval('s'::regclass); + nextval +---------------------------------------- + 17014118346046923173168730371588410573 +(1 row) + +SELECT * FROM currval('s'::regclass); + currval +---------------------------------------- + 17014118346046923173168730371588410573 +(1 row) + +-- needs drop large sequence +DROP SEQUENCE S; +ERROR: "s" is not a sequence +HINT: Use DROP LARGE SEQUENCE to remove a large sequence. +DROP LARGE SEQUENCE S; +-- cycle +CREATE LARGE SEQUENCE S +INCREMENT 17014118346046923173168730371588410572 +MINVALUE 17014118346046923173168730371588410573 +MAXVALUE 51042355038140769519506191114765231717 +CYCLE; +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 17014118346046923173168730371588410573 +(1 row) + +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 34028236692093846346337460743176821145 +(1 row) + +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 51042355038140769519506191114765231717 +(1 row) + +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 17014118346046923173168730371588410573 +(1 row) + +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 34028236692093846346337460743176821145 +(1 row) + +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 51042355038140769519506191114765231717 +(1 row) + +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 17014118346046923173168730371588410573 +(1 row) + +DROP LARGE SEQUENCE S; +-- cache +CREATE LARGE SEQUENCE S +INCREMENT 17014118346046923173168730371588410572 +MINVALUE 17014118346046923173168730371588410573 +MAXVALUE 170141183460469231731687303715884105721 +CACHE 5; +NOTICE: Not advised to use MAXVALUE or MINVALUE together with CACHE. +DETAIL: If CACHE is defined, some sequence values may be wasted, causing available sequence numbers to be less than expected. +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 17014118346046923173168730371588410573 +(1 row) + +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 34028236692093846346337460743176821145 +(1 row) + +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 51042355038140769519506191114765231717 +(1 row) + +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 68056473384187692692674921486353642289 +(1 row) + +SELECT * FROM nextval('s'); + nextval +---------------------------------------- + 85070591730234615865843651857942052861 +(1 row) + +SELECT * FROM nextval('s'); + nextval +----------------------------------------- + 102084710076281539039012382229530463433 +(1 row) + +DROP LARGE SEQUENCE S; +-- start with +CREATE LARGE SEQUENCE S +INCREMENT -17014118346046923173168730371588410572 +MINVALUE 17014118346046923173168730371588410573 +MAXVALUE 170141183460469231731687303715884105721 +START WITH 170141183460469231731687303715884105720 +CACHE 5; +NOTICE: Not advised to use MAXVALUE or MINVALUE together with CACHE. +DETAIL: If CACHE is defined, some sequence values may be wasted, causing available sequence numbers to be less than expected. +DROP LARGE SEQUENCE S; +CREATE LARGE SEQUENCE S +INCREMENT 100000000000000000000000000000000000 +MINVALUE 100000000000000000000000000000000000 +MAXVALUE 100000000000000000000000000000000000000; +-- can create sequence with default nextval() +CREATE TABLE TAB_SEQ(c1 numeric, c2 numeric default nextval('S'), c3 serial); +NOTICE: CREATE TABLE will create implicit sequence "tab_seq_c3_seq" for serial column "tab_seq.c3" +INSERT INTO TAB_SEQ VALUES(0); +INSERT INTO TAB_SEQ VALUES(0); +INSERT INTO TAB_SEQ VALUES(0); +INSERT INTO TAB_SEQ VALUES(0); +INSERT INTO TAB_SEQ VALUES(0); +INSERT INTO TAB_SEQ VALUES(0); +SELECT * FROM TAB_SEQ ORDER BY c3; + c1 | c2 | c3 +----+--------------------------------------+---- + 0 | 100000000000000000000000000000000000 | 1 + 0 | 200000000000000000000000000000000000 | 2 + 0 | 300000000000000000000000000000000000 | 3 + 0 | 400000000000000000000000000000000000 | 4 + 0 | 500000000000000000000000000000000000 | 5 + 0 | 600000000000000000000000000000000000 | 6 +(6 rows) + +\d TAB_SEQ + Table "large_sequence.tab_seq" + Column | Type | Modifiers +--------+---------+------------------------------------------------------ + c1 | numeric | + c2 | numeric | default nextval('s'::regclass) + c3 | integer | not null default nextval('tab_seq_c3_seq'::regclass) + +-- cannot drop +DROP LARGE SEQUENCE S; +ERROR: cannot drop large sequence s because other objects depend on it +DETAIL: default for table tab_seq column c2 depends on large sequence s +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP LARGE SEQUENCE S CASCADE; +NOTICE: drop cascades to default for table tab_seq column c2 +-- default value is dropped accordingly +\d TAB_SEQ + Table "large_sequence.tab_seq" + Column | Type | Modifiers +--------+---------+------------------------------------------------------ + c1 | numeric | + c2 | numeric | + c3 | integer | not null default nextval('tab_seq_c3_seq'::regclass) + +-- alter sequence +CREATE LARGE SEQUENCE foo; +-- rename not supported +ALTER LARGE SEQUENCE foo RENAME TO bar; +ERROR: RENAME SEQUENCE is not yet supported. +SELECT * FROM foo; + sequence_name | last_value | start_value | increment_by | max_value | min_value | cache_value | log_cnt | is_cycled | is_called | uuid +---------------+------------+-------------+--------------+-----------------------------------------+-----------+-------------+---------+-----------+-----------+------ + foo | 1 | 1 | 1 | 170141183460469231731687303715884105727 | 1 | 1 | 0 | f | f | 0 +(1 row) + +-- alter maxvalue - ok +ALTER LARGE SEQUENCE foo MAXVALUE 1000; +-- alter owner role -ok +CREATE ROLE role_foo PASSWORD '!@#123qwe'; +ALTER LARGE SEQUENCE foo OWNER TO role_foo; +-- alter owner column - fail if owners are different +CREATE TABLE tab_foo (a bigint); +ALTER LARGE SEQUENCE foo OWNED BY tab_foo.a; +ERROR: sequence must have same owner as table it is linked to +DROP LARGE SEQUENCE IF EXISTS foo; +CREATE LARGE SEQUENCE foo; +ALTER LARGE SEQUENCE IF EXISTS foo OWNED BY tab_foo.a; +-- owner column set OK +DROP TABLE tab_foo; +-- alter if exitsts works +ALTER LARGE SEQUENCE IF EXISTS foo MAXVALUE 100; +NOTICE: relation "foo" does not exist, skipping +CREATE LARGE SEQUENCE foo INCREMENT 10 CYCLE; +ALTER LARGE SEQUENCE IF EXISTS foo MAXVALUE 30; +SELECT * FROM nextval('foo'); + nextval +--------- + 1 +(1 row) + +SELECT * FROM nextval('foo'); + nextval +--------- + 11 +(1 row) + +SELECT * FROM nextval('foo'); + nextval +--------- + 21 +(1 row) + +SELECT * FROM nextval('foo'); + nextval +--------- + 1 +(1 row) + +ALTER LARGE SEQUENCE IF EXISTS foo NO MAXVALUE; +SELECT * FROM nextval('foo'); + nextval +--------- + 11 +(1 row) + +SELECT * FROM nextval('foo'); + nextval +--------- + 21 +(1 row) + +SELECT * FROM nextval('foo'); + nextval +--------- + 31 +(1 row) + +SELECT * FROM nextval('foo'); + nextval +--------- + 41 +(1 row) + +ALTER LARGE SEQUENCE IF EXISTS foo NOMAXVALUE; +-- alter other attributes are not supported +ALTER LARGE SEQUENCE IF EXISTS foo MINVALUE 1; +ERROR: ALTER SEQUENCE is not yet supported. +ALTER LARGE SEQUENCE IF EXISTS foo NO CYCLE; +ERROR: ALTER SEQUENCE is not yet supported. +ALTER LARGE SEQUENCE IF EXISTS foo START 1; +ERROR: ALTER SEQUENCE is not yet supported. +ALTER LARGE SEQUENCE IF EXISTS foo CACHE 100; +-- test for largeserial +CREATE TABLE serialTest (f1 text, f2 largeserial); +NOTICE: CREATE TABLE will create implicit sequence "serialtest_f2_seq" for serial column "serialtest.f2" +INSERT INTO serialTest VALUES ('foo'); +INSERT INTO serialTest VALUES ('bar'); +INSERT INTO serialTest VALUES ('force', 17014118346046923173168730371588410573); +INSERT INTO serialTest VALUES ('wrong', NULL); +ERROR: null value in column "f2" violates not-null constraint +DETAIL: Failing row contains (wrong, null). +SELECT * FROM serialTest; + f1 | f2 +-------+---------------------------------------- + foo | 1 + bar | 2 + force | 17014118346046923173168730371588410573 +(3 rows) + +CREATE TABLE serialTest2 (f1 text, f2 serial, f3 smallserial, f4 serial2, + f5 bigserial, f6 serial8, f7 largeserial, f8 serial16); +NOTICE: CREATE TABLE will create implicit sequence "serialtest2_f2_seq" for serial column "serialtest2.f2" +NOTICE: CREATE TABLE will create implicit sequence "serialtest2_f3_seq" for serial column "serialtest2.f3" +NOTICE: CREATE TABLE will create implicit sequence "serialtest2_f4_seq" for serial column "serialtest2.f4" +NOTICE: CREATE TABLE will create implicit sequence "serialtest2_f5_seq" for serial column "serialtest2.f5" +NOTICE: CREATE TABLE will create implicit sequence "serialtest2_f6_seq" for serial column "serialtest2.f6" +NOTICE: CREATE TABLE will create implicit sequence "serialtest2_f7_seq" for serial column "serialtest2.f7" +NOTICE: CREATE TABLE will create implicit sequence "serialtest2_f8_seq" for serial column "serialtest2.f8" +INSERT INTO serialTest2 (f1) + VALUES ('test_defaults'); +INSERT INTO serialTest2 (f1, f2, f3, f4, f5, f6, f7, f8) + VALUES ('test_max_vals', 2147483647, 32767, 32767, 9223372036854775807, + 9223372036854775807, 170141183460469231731687303715884105727, 170141183460469231731687303715884105727), + ('test_min_vals', -2147483648, -32768, -32768, -9223372036854775808, + -9223372036854775808, -170141183460469231731687303715884105728, -170141183460469231731687303715884105728); +INSERT INTO serialTest2 (f1, f7) + VALUES ('bogus', -170141183460469231731687303715884105729); +INSERT INTO serialTest2 (f1, f8) + VALUES ('bogus', -170141183460469231731687303715884105729); +INSERT INTO serialTest2 (f1, f7) + VALUES ('bogus', 170141183460469231731687303715884105728); +INSERT INTO serialTest2 (f1, f8) + VALUES ('bogus', 170141183460469231731687303715884105728); +SELECT * FROM serialTest2 ORDER BY f2 ASC; + f1 | f2 | f3 | f4 | f5 | f6 | f7 | f8 +---------------+-------------+--------+--------+----------------------+----------------------+------------------------------------------+------------------------------------------ + test_min_vals | -2147483648 | -32768 | -32768 | -9223372036854775808 | -9223372036854775808 | -170141183460469231731687303715884105728 | -170141183460469231731687303715884105728 + test_defaults | 1 | 1 | 1 | 1 | 1 | 1 | 1 + bogus | 2 | 2 | 2 | 2 | 2 | -170141183460469231731687303715884105729 | 2 + bogus | 3 | 3 | 3 | 3 | 3 | 2 | -170141183460469231731687303715884105729 + bogus | 4 | 4 | 4 | 4 | 4 | 170141183460469231731687303715884105728 | 3 + bogus | 5 | 5 | 5 | 5 | 5 | 3 | 170141183460469231731687303715884105728 + test_max_vals | 2147483647 | 32767 | 32767 | 9223372036854775807 | 9223372036854775807 | 170141183460469231731687303715884105727 | 170141183460469231731687303715884105727 +(7 rows) + +SELECT nextval('serialTest2_f2_seq'); + nextval +--------- + 6 +(1 row) + +SELECT nextval('serialTest2_f3_seq'); + nextval +--------- + 6 +(1 row) + +SELECT nextval('serialTest2_f4_seq'); + nextval +--------- + 6 +(1 row) + +SELECT nextval('serialTest2_f5_seq'); + nextval +--------- + 6 +(1 row) + +SELECT nextval('serialTest2_f6_seq'); + nextval +--------- + 6 +(1 row) + +SELECT nextval('serialTest2_f7_seq'); + nextval +--------- + 4 +(1 row) + +SELECT nextval('serialTest2_f8_seq'); + nextval +--------- + 4 +(1 row) + +-- Create table like +CREATE TABLE cat (like serialTest2); +NOTICE: CREATE TABLE will create implicit sequence "cat_f2_seq" for serial column "cat.f2" +NOTICE: CREATE TABLE will create implicit sequence "cat_f3_seq" for serial column "cat.f3" +NOTICE: CREATE TABLE will create implicit sequence "cat_f4_seq" for serial column "cat.f4" +NOTICE: CREATE TABLE will create implicit sequence "cat_f5_seq" for serial column "cat.f5" +NOTICE: CREATE TABLE will create implicit sequence "cat_f6_seq" for serial column "cat.f6" +NOTICE: CREATE TABLE will create implicit sequence "cat_f7_seq" for serial column "cat.f7" +NOTICE: CREATE TABLE will create implicit sequence "cat_f8_seq" for serial column "cat.f8" +INSERT INTO cat (f1) + VALUES ('eins'); +INSERT INTO cat (f1) + VALUES ('zwei'); +INSERT INTO cat (f1) + VALUES ('drei'); +INSERT INTO cat (f1) + VALUES ('funf'); +SELECT * FROM cat; + f1 | f2 | f3 | f4 | f5 | f6 | f7 | f8 +------+----+----+----+----+----+----+---- + eins | 1 | 1 | 1 | 1 | 1 | 1 | 1 + zwei | 2 | 2 | 2 | 2 | 2 | 2 | 2 + drei | 3 | 3 | 3 | 3 | 3 | 3 | 3 + funf | 4 | 4 | 4 | 4 | 4 | 4 | 4 +(4 rows) + +-- renaming serial sequences +ALTER TABLE serialtest_f7_seq RENAME TO serialtest_f7_foo; +ERROR: relation "serialtest_f7_seq" does not exist +INSERT INTO serialTest VALUES ('more'); +SELECT * FROM serialTest; + f1 | f2 +-------+---------------------------------------- + foo | 1 + bar | 2 + force | 17014118346046923173168730371588410573 + more | 3 +(4 rows) + +-- Check dependencies of serial and ordinary sequences +CREATE LARGE SEQUENCE myseq2; +CREATE LARGE SEQUENCE myseq3; +-- Cannot have type cast in nextval's argument +CREATE TABLE t1 ( + f1 serial, + f2 numeric DEFAULT nextval('myseq2'), + f3 numeric DEFAULT nextval('myseq3'::text) +); +NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1" +ERROR: argument of nextval() must be plain const value +CREATE TABLE t1 ( + f1 largeserial, + f2 numeric DEFAULT nextval('myseq2'), + f3 numeric DEFAULT nextval('myseq3') +); +NOTICE: CREATE TABLE will create implicit sequence "t1_f1_seq" for serial column "t1.f1" +-- Both drops should fail +DROP LARGE SEQUENCE t1_f1_seq; +ERROR: cannot drop large sequence t1_f1_seq because other objects depend on it +DETAIL: default for table t1 column f1 depends on large sequence t1_f1_seq +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP LARGE SEQUENCE myseq2; +ERROR: cannot drop large sequence myseq2 because other objects depend on it +DETAIL: default for table t1 column f2 depends on large sequence myseq2 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TABLE t1; +-- Fails because no longer existent: +DROP SEQUENCE t1_f1_seq; +ERROR: sequence "t1_f1_seq" does not exist +DROP LARGE SEQUENCE myseq2; +-- Information schema do not support large sequence for now +SELECT * FROM information_schema.sequences WHERE sequence_name in ('myseq3'); + sequence_catalog | sequence_schema | sequence_name | data_type | numeric_precision | numeric_precision_radix | numeric_scale | start_value | minimum_value | maximum_value | increment | cycle_option +------------------+-----------------+---------------+-----------+-------------------+-------------------------+---------------+-------------+---------------+-----------------------------------------+-----------+-------------- + regression | large_sequence | myseq3 | int16 | 128 | 2 | 0 | 1 | 1 | 170141183460469231731687303715884105727 | 1 | NO +(1 row) + +-- Privilige check +CREATE LARGE SEQUENCE priv_seq; +CREATE ROLE zeus PASSWORD '123qwe!@#'; +GRANT SELECT ON priv_seq TO zeus; +GRANT ALL ON SCHEMA large_sequence TO zeus; +SET ROLE zeus PASSWORD '123qwe!@#'; +SELECT * FROM priv_seq; + sequence_name | last_value | start_value | increment_by | max_value | min_value | cache_value | log_cnt | is_cycled | is_called | uuid +---------------+------------+-------------+--------------+-----------------------------------------+-----------+-------------+---------+-----------+-----------+------ + priv_seq | 1 | 1 | 1 | 170141183460469231731687303715884105727 | 1 | 1 | 0 | f | f | 0 +(1 row) + +\d priv_seq + Large Sequence "large_sequence.priv_seq" + Column | Type | Value +---------------+---------+----------------------------------------- + sequence_name | name | priv_seq + last_value | int16 | 1 + start_value | int16 | 1 + increment_by | int16 | 1 + max_value | int16 | 170141183460469231731687303715884105727 + min_value | int16 | 1 + cache_value | int16 | 1 + log_cnt | bigint | 0 + is_cycled | boolean | f + is_called | boolean | f + uuid | bigint | 0 + +SELECT nextval('priv_seq'); +ERROR: permission denied for sequence priv_seq +CONTEXT: referenced column: nextval +ALTER LARGE SEQUENCE priv_seq MAXVALUE 100; +ERROR: permission denied for relation priv_seq +DETAIL: N/A +COMMENT ON LARGE SEQUENCE priv_seq IS 'FOO'; +ERROR: permission denied for relation priv_seq +DETAIL: N/A +DROP LARGE SEQUENCE priv_seq; +ERROR: permission denied for relation priv_seq +DETAIL: N/A +RESET ROLE; +GRANT UPDATE ON priv_seq TO zeus; +SET ROLE zeus PASSWORD '123qwe!@#'; +SELECT nextval('priv_seq'); + nextval +--------- + 1 +(1 row) + +ALTER LARGE SEQUENCE priv_seq MAXVALUE 100; +ERROR: permission denied for relation priv_seq +DETAIL: N/A +COMMENT ON LARGE SEQUENCE priv_seq IS 'FOO'; +ERROR: permission denied for relation priv_seq +DETAIL: N/A +DROP LARGE SEQUENCE priv_seq; +ERROR: permission denied for relation priv_seq +DETAIL: N/A +RESET ROLE; +GRANT USAGE ON priv_seq TO zeus; +SET ROLE zeus PASSWORD '123qwe!@#'; +ALTER LARGE SEQUENCE priv_seq MAXVALUE 100; +ERROR: permission denied for relation priv_seq +DETAIL: N/A +COMMENT ON LARGE SEQUENCE priv_seq IS 'FOO'; +ERROR: permission denied for relation priv_seq +DETAIL: N/A +DROP LARGE SEQUENCE priv_seq; +ERROR: permission denied for relation priv_seq +DETAIL: N/A +RESET ROLE; +GRANT ALTER ON priv_seq TO zeus; +SET ROLE zeus PASSWORD '123qwe!@#'; +ALTER LARGE SEQUENCE priv_seq MAXVALUE 100; +COMMENT ON LARGE SEQUENCE priv_seq IS 'FOO'; +ERROR: permission denied for relation priv_seq +DETAIL: N/A +DROP LARGE SEQUENCE priv_seq; +ERROR: permission denied for relation priv_seq +DETAIL: N/A +RESET ROLE; +GRANT COMMENT ON priv_seq TO zeus; +SET ROLE zeus PASSWORD '123qwe!@#'; +COMMENT ON LARGE SEQUENCE priv_seq IS 'FOO'; +DROP LARGE SEQUENCE priv_seq; +ERROR: permission denied for relation priv_seq +DETAIL: N/A +RESET ROLE; +GRANT DROP ON priv_seq TO zeus; +SET ROLE zeus PASSWORD '123qwe!@#'; +DROP LARGE SEQUENCE priv_seq; +RESET ROLE; +CREATE SCHEMA seq_priv_schema; +CREATE LARGE SEQUENCE seq_priv_schema.priv_seq; +GRANT ALL ON SCHEMA seq_priv_schema TO zeus; +GRANT ALL ON ALL SEQUENCES IN SCHEMA seq_priv_schema TO zeus; +SET ROLE zeus PASSWORD '123qwe!@#'; +SET current_schema = seq_priv_schema; +SELECT * FROM priv_seq; + sequence_name | last_value | start_value | increment_by | max_value | min_value | cache_value | log_cnt | is_cycled | is_called | uuid +---------------+------------+-------------+--------------+-----------------------------------------+-----------+-------------+---------+-----------+-----------+------ + priv_seq | 1 | 1 | 1 | 170141183460469231731687303715884105727 | 1 | 1 | 0 | f | f | 0 +(1 row) + +\d priv_seq + Large Sequence "seq_priv_schema.priv_seq" + Column | Type | Value +---------------+---------+----------------------------------------- + sequence_name | name | priv_seq + last_value | int16 | 1 + start_value | int16 | 1 + increment_by | int16 | 1 + max_value | int16 | 170141183460469231731687303715884105727 + min_value | int16 | 1 + cache_value | int16 | 1 + log_cnt | bigint | 0 + is_cycled | boolean | f + is_called | boolean | f + uuid | bigint | 0 + +SELECT nextval('priv_seq'); + nextval +--------- + 1 +(1 row) + +ALTER LARGE SEQUENCE priv_seq MAXVALUE 100; +COMMENT ON LARGE SEQUENCE priv_seq IS 'FOO'; +DROP LARGE SEQUENCE priv_seq; +RESET ROLE; +DROP SCHEMA seq_priv_schema CASCADE; +REVOKE ALL ON SCHEMA large_sequence FROM zeus; +DROP ROLE zeus; +SET current_schema = large_sequence; +create table ctest (a int) with (orientation=column); +create user ctest_user password 'huawei@123'; +alter table ctest owner to "ctest_user"; +drop user ctest_user cascade; +DROP ROLE role_foo; +DROP SCHEMA large_sequence CASCADE; +NOTICE: drop cascades to 8 other objects +DETAIL: drop cascades to sequence s1 +drop cascades to large sequence s2 +drop cascades to table tab_seq +drop cascades to large sequence foo +drop cascades to table serialtest +drop cascades to table serialtest2 +drop cascades to table cat +drop cascades to large sequence myseq3 diff --git a/src/test/regress/expected/leaky_function_operator.out b/src/test/regress/expected/leaky_function_operator.out index d920965c5..5a820a8a5 100644 --- a/src/test/regress/expected/leaky_function_operator.out +++ b/src/test/regress/expected/leaky_function_operator.out @@ -1,22 +1,22 @@ -- test leaky-function protections in selfuncs -- regress_user1 will own a table and provide a view for it. -grant all on schema public to public; create user regress_user1 password 'gauss@123'; create user regress_user2 password 'gauss@123'; SET SESSION AUTHORIZATION regress_user1 password 'gauss@123'; -CREATE TABLE public.atest12 as SELECT x AS a, 10001 - x AS b FROM generate_series(1,10000) x; -CREATE INDEX ON public.atest12 (a); -CREATE INDEX ON public.atest12 (abs(a)); -VACUUM ANALYZE public.atest12; +CREATE TABLE atest12 as SELECT x AS a, 10001 - x AS b FROM generate_series(1,10000) x; +CREATE INDEX ON atest12 (a); +CREATE INDEX ON atest12 (abs(a)); +VACUUM ANALYZE atest12; +GRANT USAGE ON SCHEMA regress_user1 TO regress_user2; -- Check if regress_user2 can break security. SET SESSION AUTHORIZATION regress_user2 password 'gauss@123'; -CREATE FUNCTION public.leak20(integer,integer) RETURNS boolean AS $$begin raise notice 'leak % %', $1, $2; return $1 > $2; end$$ LANGUAGE plpgsql immutable; -CREATE OPERATOR >>>> (procedure = public.leak20, leftarg = integer, rightarg = integer,restrict = scalargtsel); +CREATE FUNCTION leak20(integer,integer) RETURNS boolean AS $$begin raise notice 'leak % %', $1, $2; return $1 > $2; end$$ LANGUAGE plpgsql immutable; +CREATE OPERATOR >>>> (procedure = leak20, leftarg = integer, rightarg = integer,restrict = scalargtsel); -- This should not show any "leak" notices before failing. -EXPLAIN (COSTS OFF) SELECT * FROM public.atest12 ; +EXPLAIN (COSTS OFF) SELECT * FROM regress_user1.atest12; ERROR: permission denied for relation atest12 DETAIL: N/A -- This should not show any "leak" notices before failing.(After Patch) -EXPLAIN (COSTS OFF) SELECT * FROM public.atest12 WHERE a >>>> 99; +EXPLAIN (COSTS OFF) SELECT * FROM regress_user1.atest12 WHERE a >>>> 99; ERROR: permission denied for relation atest12 DETAIL: N/A diff --git a/src/test/regress/expected/limit1.out b/src/test/regress/expected/limit1.out index 4de2931ee..7cfe0f954 100644 --- a/src/test/regress/expected/limit1.out +++ b/src/test/regress/expected/limit1.out @@ -154,6 +154,15 @@ select a.c1, b.c2, b.c3 from (select c1 from limit_table_03 order by c1 offset 1 16 | 16 | 36 (5 rows) +explain (verbose, costs off) select * from limit_table_01 where rownum <= 10; + QUERY PLAN +--------------------------------------------------- + Limit + Output: c1, c2, c3 + -> Seq Scan on distribute_limit.limit_table_01 + Output: c1, c2, c3 +(4 rows) + drop table limit_table_01; drop table limit_table_02; drop table limit_table_03; diff --git a/src/test/regress/expected/llvm_vecagg3.out b/src/test/regress/expected/llvm_vecagg3.out index d5a1b832c..f1e55859a 100644 --- a/src/test/regress/expected/llvm_vecagg3.out +++ b/src/test/regress/expected/llvm_vecagg3.out @@ -273,6 +273,27 @@ select sum(col_num3), avg(col_num3-(-0.25)) from llvm_vecagg_table_03 group by c (13 rows) reset analysis_options; +---- +--- test5 : test distinct in aggregate functions +---- +select col_int, max(distinct col_num1 order by col_num1), min(col_num2 order by col_num2) from llvm_vecagg_table_03 group by col_int order by 1, 2, 3; + col_int | max | min +---------+--------------------+--------------------- + -74 | 3.60000 | -1 + -56 | 56987.25600 | 222337203685475805 + 1 | 360287970189.63967 | 12233720368547758 + 2 | 232879701892.63967 | -923720368547758098 + 6 | 0.00000 | 0 + 8 | .10000 | 1 + 25 | 0.00000 | -2 + 36 | 98125.87589 | 12233720368 + 52 | | + 67 | -15478.25600 | 223372036854775806 + 458 | 7.25412 | + 6985 | 4587962145.36000 | 922337203685473807 + | 0.00000 | 0 +(13 rows) + ---- --- clean table and resource ---- diff --git a/src/test/regress/expected/macaddr.out b/src/test/regress/expected/macaddr.out index ad5498ca7..1b1464be4 100644 --- a/src/test/regress/expected/macaddr.out +++ b/src/test/regress/expected/macaddr.out @@ -41,6 +41,7 @@ SELECT * FROM macaddr_data ORDER BY a; CREATE INDEX macaddr_data_btree ON macaddr_data USING btree (b); CREATE INDEX macaddr_data_hash ON macaddr_data USING hash (b); +ERROR: access method "hash" does not support row store SELECT a, b, trunc(b) FROM macaddr_data ORDER BY 2, 1; a | b | trunc ----+-------------------+------------------- diff --git a/src/test/regress/expected/merge_1.out b/src/test/regress/expected/merge_1.out index 02e66e9dc..7054ab0cc 100644 --- a/src/test/regress/expected/merge_1.out +++ b/src/test/regress/expected/merge_1.out @@ -60,7 +60,7 @@ WHEN MATCHED THEN QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------- Merge on mergeinto_1.products_row p - Update Cond: (p.product_id = 5) + Update Cond: (np.product_id = 5) -> Hash Join Output: np.product_id, np.product_name, np.category, np.total, p.product_id, p.product_name, p.category, p.total, p.ctid, np.ctid Hash Cond: (p.product_id = np.product_id) diff --git a/src/test/regress/expected/merge_misc.out b/src/test/regress/expected/merge_misc.out index 0e9fc69b0..92ecd420a 100644 --- a/src/test/regress/expected/merge_misc.out +++ b/src/test/regress/expected/merge_misc.out @@ -11,15 +11,16 @@ USING { { table_name | view_name } | subquery } [ [ AS ] alias ] ON ( condition ) [ WHEN MATCHED THEN - UPDATE SET { column_name = { expression | DEFAULT } | - ( column_name [, ...] ) = ( { expression | DEFAULT } [, ...] ) } [, ...] + UPDATE SET { column_name = { expression | subquery | DEFAULT } | + ( column_name [, ...] ) = ( { expression | subquery | DEFAULT } [, ...] ) } [, ...] [ WHERE condition ] ] [ WHEN NOT MATCHED THEN INSERT { DEFAULT VALUES | - [ ( column_name [, ...] ) ] VALUES ( { expression | DEFAULT } [, ...] ) [, ...] [ WHERE condition ] } + [ ( column_name [, ...] ) ] VALUES ( { expression | subquery | DEFAULT } [, ...] ) [, ...] [ WHERE condition ] } ]; +NOTICE: 'subquery' in the UPDATE and INSERT clauses are only avaliable in CENTRALIZED mode! -- information_schema.sql_features SELECT * FROM information_schema.sql_features WHERE feature_name LIKE '%MERGE%' ORDER BY 1; diff --git a/src/test/regress/expected/merge_subquery.out b/src/test/regress/expected/merge_subquery.out new file mode 100644 index 000000000..2fc480e3a --- /dev/null +++ b/src/test/regress/expected/merge_subquery.out @@ -0,0 +1,190 @@ +create table merge_subquery_test1(id int, val int); +create table merge_subquery_test2(id int, val int); +insert into merge_subquery_test1 values(generate_series(1, 10), generate_series(1, 5)); +insert into merge_subquery_test2 values(generate_series(1, 5), generate_series(21, 25)); +insert into merge_subquery_test2 values(generate_series(11, 15), generate_series(11, 15)); +explain merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select mg2.val+mg1.val) +when not matched then + insert values(mg2.id, mg2.val); + QUERY PLAN +----------------------------------------------------------------------------------------------- + Merge on merge_subquery_test1 mg1 (cost=58.35..355.67 rows=23091 width=28) + -> Hash Left Join (cost=58.35..355.67 rows=23091 width=28) + Hash Cond: (mg2.id = mg1.id) + -> Seq Scan on merge_subquery_test2 mg2 (cost=0.00..31.49 rows=2149 width=14) + -> Hash (cost=31.49..31.49 rows=2149 width=14) + -> Seq Scan on merge_subquery_test1 mg1 (cost=0.00..31.49 rows=2149 width=14) + SubPlan 1 + -> Result (cost=0.00..0.01 rows=1 width=0) +(8 rows) + + +merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select mg2.val+mg1.val) +when not matched then + insert values(mg2.id, mg2.val); +select * from merge_subquery_test1; + id | val +----+----- + 6 | 1 + 7 | 2 + 8 | 3 + 9 | 4 + 10 | 5 + 1 | 22 + 2 | 24 + 3 | 26 + 4 | 28 + 5 | 30 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 +(15 rows) + +delete from merge_subquery_test1 where id > 10; + +explain merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select sum(val) from merge_subquery_test2 mg3) +when not matched then + insert values(mg2.id, mg2.val); + QUERY PLAN +----------------------------------------------------------------------------------------------- + Merge on merge_subquery_test1 mg1 (cost=95.23..392.55 rows=23091 width=28) + InitPlan 1 (returns $0) + -> Aggregate (cost=36.86..36.87 rows=1 width=12) + -> Seq Scan on merge_subquery_test2 mg3 (cost=0.00..31.49 rows=2149 width=4) + -> Hash Left Join (cost=58.35..355.67 rows=23091 width=28) + Hash Cond: (mg2.id = mg1.id) + -> Seq Scan on merge_subquery_test2 mg2 (cost=0.00..31.49 rows=2149 width=14) + -> Hash (cost=31.49..31.49 rows=2149 width=14) + -> Seq Scan on merge_subquery_test1 mg1 (cost=0.00..31.49 rows=2149 width=14) +(9 rows) + +merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select sum(val) from merge_subquery_test2 mg3) +when not matched then + insert values(mg2.id, mg2.val); +select * from merge_subquery_test1; + id | val +----+----- + 6 | 1 + 7 | 2 + 8 | 3 + 9 | 4 + 10 | 5 + 1 | 180 + 2 | 180 + 3 | 180 + 4 | 180 + 5 | 180 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 +(15 rows) + +delete from merge_subquery_test1 where id > 10; +explain merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=mg2.val +when not matched then + insert values(mg2.id, (select mg2.val * 2)); + QUERY PLAN +----------------------------------------------------------------------------------------------- + Merge on merge_subquery_test1 mg1 (cost=58.35..355.67 rows=23091 width=28) + -> Hash Left Join (cost=58.35..355.67 rows=23091 width=28) + Hash Cond: (mg2.id = mg1.id) + -> Seq Scan on merge_subquery_test2 mg2 (cost=0.00..31.49 rows=2149 width=14) + -> Hash (cost=31.49..31.49 rows=2149 width=14) + -> Seq Scan on merge_subquery_test1 mg1 (cost=0.00..31.49 rows=2149 width=14) + SubPlan 1 + -> Result (cost=0.00..0.01 rows=1 width=0) +(8 rows) + +merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=mg2.val +when not matched then + insert values(mg2.id, (select mg2.val * 2)); +select * from merge_subquery_test1; + id | val +----+----- + 6 | 1 + 7 | 2 + 8 | 3 + 9 | 4 + 10 | 5 + 1 | 21 + 2 | 22 + 3 | 23 + 4 | 24 + 5 | 25 + 11 | 22 + 12 | 24 + 13 | 26 + 14 | 28 + 15 | 30 +(15 rows) + +delete from merge_subquery_test1 where id > 10; +explain merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=mg2.val +when not matched then + insert values(mg2.id, (select mg3.val from merge_subquery_test1 mg3 limit 1)); + QUERY PLAN +----------------------------------------------------------------------------------------------- + Merge on merge_subquery_test1 mg1 (cost=58.37..355.69 rows=23091 width=28) + InitPlan 1 (returns $0) + -> Limit (cost=0.00..0.01 rows=1 width=4) + -> Seq Scan on merge_subquery_test1 mg3 (cost=0.00..31.49 rows=2149 width=4) + -> Hash Left Join (cost=58.35..355.67 rows=23091 width=28) + Hash Cond: (mg2.id = mg1.id) + -> Seq Scan on merge_subquery_test2 mg2 (cost=0.00..31.49 rows=2149 width=14) + -> Hash (cost=31.49..31.49 rows=2149 width=14) + -> Seq Scan on merge_subquery_test1 mg1 (cost=0.00..31.49 rows=2149 width=14) +(9 rows) + +merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=mg2.val +when not matched then + insert values(mg2.id, (select mg3.val from merge_subquery_test1 mg3 limit 1)); +select * from merge_subquery_test1; + id | val +----+----- + 6 | 1 + 7 | 2 + 8 | 3 + 9 | 4 + 10 | 5 + 1 | 21 + 2 | 22 + 3 | 23 + 4 | 24 + 5 | 25 + 11 | 1 + 12 | 1 + 13 | 1 + 14 | 1 + 15 | 1 +(15 rows) + +drop table merge_subquery_test1; +drop table merge_subquery_test2; diff --git a/src/test/regress/expected/merge_subquery2.out b/src/test/regress/expected/merge_subquery2.out new file mode 100644 index 000000000..bcd99e180 --- /dev/null +++ b/src/test/regress/expected/merge_subquery2.out @@ -0,0 +1,196 @@ +create schema merge_subquery2; +set current_schema=merge_subquery2; +create table merge_subquery_utest1(id int, val int) with(storage_type=ustore); +create table merge_subquery_utest2(id int, val int) with(storage_type=ustore); +insert into merge_subquery_utest1 values(generate_series(1, 10), generate_series(1, 5)); +insert into merge_subquery_utest2 values(generate_series(1, 5), generate_series(21, 25)); +insert into merge_subquery_utest2 values(generate_series(11, 15), generate_series(11, 15)); +explain merge into merge_subquery_utest1 mg1 +using merge_subquery_utest2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select mg2.val+mg1.val) +when not matched then + insert values(mg2.id, mg2.val); + QUERY PLAN +------------------------------------------------------------------------------------------------ + Merge on merge_subquery_utest1 mg1 (cost=58.35..355.67 rows=23091 width=28) + -> Hash Left Join (cost=58.35..355.67 rows=23091 width=28) + Hash Cond: (mg2.id = mg1.id) + -> Seq Scan on merge_subquery_utest2 mg2 (cost=0.00..31.49 rows=2149 width=14) + -> Hash (cost=31.49..31.49 rows=2149 width=14) + -> Seq Scan on merge_subquery_utest1 mg1 (cost=0.00..31.49 rows=2149 width=14) + SubPlan 1 + -> Result (cost=0.00..0.01 rows=1 width=0) +(8 rows) + +START TRANSACTION; +merge into merge_subquery_utest1 mg1 +using merge_subquery_utest2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select mg2.val+mg1.val) +when not matched then + insert values(mg2.id, mg2.val); +select * from merge_subquery_utest1; + id | val +----+----- + 1 | 22 + 2 | 24 + 3 | 26 + 4 | 28 + 5 | 30 + 6 | 1 + 7 | 2 + 8 | 3 + 9 | 4 + 10 | 5 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 +(15 rows) + +ROLLBACK; +explain merge into merge_subquery_utest1 mg1 +using merge_subquery_utest2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select sum(val) from merge_subquery_utest2 mg3) +when not matched then + insert values(mg2.id, mg2.val); + QUERY PLAN +------------------------------------------------------------------------------------------------ + Merge on merge_subquery_utest1 mg1 (cost=95.23..392.55 rows=23091 width=28) + InitPlan 1 (returns $0) + -> Aggregate (cost=36.86..36.87 rows=1 width=12) + -> Seq Scan on merge_subquery_utest2 mg3 (cost=0.00..31.49 rows=2149 width=4) + -> Hash Left Join (cost=58.35..355.67 rows=23091 width=28) + Hash Cond: (mg2.id = mg1.id) + -> Seq Scan on merge_subquery_utest2 mg2 (cost=0.00..31.49 rows=2149 width=14) + -> Hash (cost=31.49..31.49 rows=2149 width=14) + -> Seq Scan on merge_subquery_utest1 mg1 (cost=0.00..31.49 rows=2149 width=14) +(9 rows) + +START TRANSACTION; +merge into merge_subquery_utest1 mg1 +using merge_subquery_utest2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select sum(val) from merge_subquery_utest2 mg3) +when not matched then + insert values(mg2.id, mg2.val); +select * from merge_subquery_utest1; + id | val +----+----- + 1 | 180 + 2 | 180 + 3 | 180 + 4 | 180 + 5 | 180 + 6 | 1 + 7 | 2 + 8 | 3 + 9 | 4 + 10 | 5 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 +(15 rows) + +ROLLBACK; +explain merge into merge_subquery_utest1 mg1 +using merge_subquery_utest2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select mg3.val from (select * from merge_subquery_utest1) as mg3 where mg3.id in (select id from merge_subquery_utest2) limit 1) +when not matched then + insert values(mg2.id, mg2.val); + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Merge on merge_subquery_utest1 mg1 (cost=93.55..390.87 rows=23091 width=28) + InitPlan 1 (returns $0) + -> Limit (cost=0.00..35.20 rows=1 width=4) + -> Nested Loop Semi Join (cost=0.00..37804.61 rows=1074 width=4) + Join Filter: (merge_subquery_utest1.id = merge_subquery_utest2.id) + -> Seq Scan on merge_subquery_utest1 (cost=0.00..31.49 rows=2149 width=8) + -> Materialize (cost=0.00..42.23 rows=2149 width=4) + -> Seq Scan on merge_subquery_utest2 (cost=0.00..31.49 rows=2149 width=4) + -> Hash Left Join (cost=58.35..355.67 rows=23091 width=28) + Hash Cond: (mg2.id = mg1.id) + -> Seq Scan on merge_subquery_utest2 mg2 (cost=0.00..31.49 rows=2149 width=14) + -> Hash (cost=31.49..31.49 rows=2149 width=14) + -> Seq Scan on merge_subquery_utest1 mg1 (cost=0.00..31.49 rows=2149 width=14) +(13 rows) + +START TRANSACTION; +merge into merge_subquery_utest1 mg1 +using merge_subquery_utest2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select mg3.val from (select * from merge_subquery_utest1) as mg3 where mg3.id in (select id from merge_subquery_utest2) limit 1) +when not matched then + insert values(mg2.id, mg2.val); +select * from merge_subquery_utest1; + id | val +----+----- + 1 | 1 + 2 | 1 + 3 | 1 + 4 | 1 + 5 | 1 + 6 | 1 + 7 | 2 + 8 | 3 + 9 | 4 + 10 | 5 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 +(15 rows) + +ROLLBACK; +-- subpartition +create table partition_table(id int, val1 int, val2 int, val3 int) +partition by range (id) subpartition by list (val1) +( + partition p_1 values less than(5) + ( + subpartition p_11 values ('1','2'), + subpartition p_12 values ('3','4'), + subpartition p_13 values ('5') + ), + partition p_2 values less than(10) + ( + subpartition p_21 values ('1','2'), + subpartition p_22 values ('3','4'), + subpartition p_23 values ('5') + ), + partition p_3 values less than(20) + ( + subpartition p_31 values ('1','2'), + subpartition p_32 values ('3','4'), + subpartition p_33 values ('5') + ) +); +insert into partition_table values(generate_series(1, 10), generate_series(1,5), generate_series(1,2), generate_series(1,10)); +explain +merge into partition_table t1 +using merge_subquery_utest2 t2 on t1.id=t2.id +when matched then + update set t1.val2 = (select t2.val + t1.val2) and + t1.val3 = (select t3.id from merge_subquery_utest1 t3 where id=3) +when not matched then + insert values(t2.id, t2.val, (select t4.val from merge_subquery_utest1 t4 where id=7), t2.val*2); +ERROR: Subpartition is not supported for MERGE INTO +START TRANSACTION; +merge into partition_table t1 +using merge_subquery_utest2 t2 on t1.id=t2.id +when matched then + update set t1.val2 = (select t2.val + t1.val2) and + t1.val3 = (select t3.id from merge_subquery_utest1 t3 where id=3) +when not matched then + insert values(t2.id, t2.val, (select t4.val from merge_subquery_utest1 t4 where id=7), t2.val*2); +ERROR: Subpartition is not supported for MERGE INTO +select * from partition_table; +ERROR: current transaction is aborted, commands ignored until end of transaction block, firstChar[Q] +ROLLBACK; diff --git a/src/test/regress/expected/merge_subquery3.out b/src/test/regress/expected/merge_subquery3.out new file mode 100644 index 000000000..ff331499d --- /dev/null +++ b/src/test/regress/expected/merge_subquery3.out @@ -0,0 +1,444 @@ +-- 不相关表 +DROP TABLE IF EXISTS tmp_table; +NOTICE: table "tmp_table" does not exist, skipping +CREATE TABLE tmp_table (id int, null_val date); +INSERT INTO tmp_table VALUES(generate_series(1,10), null); +-- ******************************** +-- * 目标表:行存表;源表:行存表 * +-- ******************************** +DROP TABLE IF EXISTS target_table, source_table; +NOTICE: table "target_table" does not exist, skipping +NOTICE: table "source_table" does not exist, skipping +CREATE TABLE target_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=ROW); +CREATE TABLE source_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=ROW); +INSERT INTO source_table VALUES (generate_series(11,20),'A'||(generate_series(11,20))||'Z', date'2000-03-01'+generate_series(11,20), generate_series(11,20)); +INSERT INTO source_table VALUES (21, null, null, null); +-- 相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + 4 FROM target_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21 /* 返回null,select列 */), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - 10 AND c3 >= '2000-01-01') /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + c1 | c2 | to_char | c4 +----+------+------------+-------------- + 1 | A1Z | 2000/03/02 | 1.000000000 + 2 | A2Z | 2000/03/03 | 2.000000000 + 3 | A3Z | 2000/03/04 | 3.000000000 + 4 | A4Z | 2000/03/05 | 4.000000000 + 5 | A5Z | 2000/03/06 | 5.000000000 + 6 | A11Z | | 1.000000000 + 7 | A12Z | | 2.000000000 + 8 | A13Z | | 3.000000000 + 9 | A14Z | | 4.000000000 + 10 | A15Z | | 5.000000000 + 16 | A16Z | 2000/03/17 | 8.000000000 + 17 | A17Z | 2000/03/18 | 9.000000000 + 18 | A18Z | 2000/03/19 | 10.000000000 + 19 | A19Z | 2000/03/20 | + 20 | A20Z | 2000/03/21 | + 21 | | | +(16 rows) + +-- 非相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(id) + 4 FROM tmp_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT null_val FROM tmp_table WHERE id = 1 /* 返回null,select列 */), + (SELECT id FROM tmp_table WHERE id > 7 AND id < 9) /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.id + t2.id FROM tmp_table t1 JOIN tmp_table t2 ON t1.id = t2.id AND t1.id < 2 ) /* 多表级联 */); +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + c1 | c2 | to_char | c4 +----+------+------------+------------- + 1 | A1Z | 2000/03/02 | 1.000000000 + 2 | A2Z | 2000/03/03 | 2.000000000 + 3 | A3Z | 2000/03/04 | 3.000000000 + 4 | A4Z | 2000/03/05 | 4.000000000 + 5 | A5Z | 2000/03/06 | 5.000000000 + 6 | A11Z | | 8.000000000 + 7 | A12Z | | 8.000000000 + 8 | A13Z | | 8.000000000 + 9 | A14Z | | 8.000000000 + 10 | A15Z | | 8.000000000 + 16 | A16Z | 2000/03/17 | 2.000000000 + 17 | A17Z | 2000/03/18 | 2.000000000 + 18 | A18Z | 2000/03/19 | 2.000000000 + 19 | A19Z | 2000/03/20 | 2.000000000 + 20 | A20Z | 2000/03/21 | 2.000000000 + 21 | | | 2.000000000 +(16 rows) + +-- 子查询嵌套 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + (SELECT id FROM tmp_table WHERE id > 3 AND id <= 4) FROM target_table /* SELECT列嵌套 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - (SELECT MAX(id) FROM tmp_table) AND c3 >= '2000-01-01') /* WHERE条件嵌套 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + c1 | c2 | to_char | c4 +----+------+------------+-------------- + 1 | A1Z | 2000/03/02 | 1.000000000 + 2 | A2Z | 2000/03/03 | 2.000000000 + 3 | A3Z | 2000/03/04 | 3.000000000 + 4 | A4Z | 2000/03/05 | 4.000000000 + 5 | A5Z | 2000/03/06 | 5.000000000 + 6 | A11Z | | 1.000000000 + 7 | A12Z | | 2.000000000 + 8 | A13Z | | 3.000000000 + 9 | A14Z | | 4.000000000 + 10 | A15Z | | 5.000000000 + 16 | A16Z | 2000/03/17 | 8.000000000 + 17 | A17Z | 2000/03/18 | 9.000000000 + 18 | A18Z | 2000/03/19 | 10.000000000 + 19 | A19Z | 2000/03/20 | + 20 | A20Z | 2000/03/21 | + 21 | | | +(16 rows) + +-- ******************************** +-- * 目标表:列存表;源表:列存表 * +-- ******************************** +DROP TABLE IF EXISTS target_table, source_table; +CREATE TABLE target_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=COLUMN); +CREATE TABLE source_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=COLUMN); +INSERT INTO source_table VALUES (generate_series(11,20),'A'||(generate_series(11,20))||'Z', date'2000-03-01'+generate_series(11,20), generate_series(11,20)); +INSERT INTO source_table VALUES (21, null, null, null); +-- 相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + 4 FROM target_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21 /* 返回null,select列 */), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - 10 AND c3 >= '2000-01-01') /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + c1 | c2 | to_char | c4 +----+------+------------+-------------- + 1 | A1Z | 2000/03/02 | 1.000000000 + 2 | A2Z | 2000/03/03 | 2.000000000 + 3 | A3Z | 2000/03/04 | 3.000000000 + 4 | A4Z | 2000/03/05 | 4.000000000 + 5 | A5Z | 2000/03/06 | 5.000000000 + 6 | A11Z | | 1.000000000 + 7 | A12Z | | 2.000000000 + 8 | A13Z | | 3.000000000 + 9 | A14Z | | 4.000000000 + 10 | A15Z | | 5.000000000 + 16 | A16Z | 2000/03/17 | 8.000000000 + 17 | A17Z | 2000/03/18 | 9.000000000 + 18 | A18Z | 2000/03/19 | 10.000000000 + 19 | A19Z | 2000/03/20 | + 20 | A20Z | 2000/03/21 | + 21 | | | +(16 rows) + +-- 非相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(id) + 4 FROM tmp_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT null_val FROM tmp_table WHERE id = 1 /* 返回null,select列 */), + (SELECT id FROM tmp_table WHERE id > 7 AND id < 9) /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.id + t2.id FROM tmp_table t1 JOIN tmp_table t2 ON t1.id = t2.id AND t1.id < 2 ) /* 多表级联 */); +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + c1 | c2 | to_char | c4 +----+------+------------+------------- + 1 | A1Z | 2000/03/02 | 1.000000000 + 2 | A2Z | 2000/03/03 | 2.000000000 + 3 | A3Z | 2000/03/04 | 3.000000000 + 4 | A4Z | 2000/03/05 | 4.000000000 + 5 | A5Z | 2000/03/06 | 5.000000000 + 6 | A11Z | | 8.000000000 + 7 | A12Z | | 8.000000000 + 8 | A13Z | | 8.000000000 + 9 | A14Z | | 8.000000000 + 10 | A15Z | | 8.000000000 + 16 | A16Z | 2000/03/17 | 2.000000000 + 17 | A17Z | 2000/03/18 | 2.000000000 + 18 | A18Z | 2000/03/19 | 2.000000000 + 19 | A19Z | 2000/03/20 | 2.000000000 + 20 | A20Z | 2000/03/21 | 2.000000000 + 21 | | | 2.000000000 +(16 rows) + +-- 子查询嵌套 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + (SELECT id FROM tmp_table WHERE id > 3 AND id <= 4) FROM target_table /* SELECT列嵌套 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - (SELECT MAX(id) FROM tmp_table) AND c3 >= '2000-01-01') /* WHERE条件嵌套 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + c1 | c2 | to_char | c4 +----+------+------------+-------------- + 1 | A1Z | 2000/03/02 | 1.000000000 + 2 | A2Z | 2000/03/03 | 2.000000000 + 3 | A3Z | 2000/03/04 | 3.000000000 + 4 | A4Z | 2000/03/05 | 4.000000000 + 5 | A5Z | 2000/03/06 | 5.000000000 + 6 | A11Z | | 1.000000000 + 7 | A12Z | | 2.000000000 + 8 | A13Z | | 3.000000000 + 9 | A14Z | | 4.000000000 + 10 | A15Z | | 5.000000000 + 16 | A16Z | 2000/03/17 | 8.000000000 + 17 | A17Z | 2000/03/18 | 9.000000000 + 18 | A18Z | 2000/03/19 | 10.000000000 + 19 | A19Z | 2000/03/20 | + 20 | A20Z | 2000/03/21 | + 21 | | | +(16 rows) + +-- ******************************** +-- * 目标表:行存表;源表:列存表 * +-- ******************************** +DROP TABLE IF EXISTS target_table, source_table; +CREATE TABLE target_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=ROW); +CREATE TABLE source_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=COLUMN); +INSERT INTO source_table VALUES (generate_series(11,20),'A'||(generate_series(11,20))||'Z', date'2000-03-01'+generate_series(11,20), generate_series(11,20)); +INSERT INTO source_table VALUES (21, null, null, null); +-- 相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + 4 FROM target_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21 /* 返回null,select列 */), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - 10 AND c3 >= '2000-01-01') /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + c1 | c2 | to_char | c4 +----+------+------------+-------------- + 1 | A1Z | 2000/03/02 | 1.000000000 + 2 | A2Z | 2000/03/03 | 2.000000000 + 3 | A3Z | 2000/03/04 | 3.000000000 + 4 | A4Z | 2000/03/05 | 4.000000000 + 5 | A5Z | 2000/03/06 | 5.000000000 + 6 | A11Z | | 1.000000000 + 7 | A12Z | | 2.000000000 + 8 | A13Z | | 3.000000000 + 9 | A14Z | | 4.000000000 + 10 | A15Z | | 5.000000000 + 16 | A16Z | 2000/03/17 | 8.000000000 + 17 | A17Z | 2000/03/18 | 9.000000000 + 18 | A18Z | 2000/03/19 | 10.000000000 + 19 | A19Z | 2000/03/20 | + 20 | A20Z | 2000/03/21 | + 21 | | | +(16 rows) + +-- 非相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(id) + 4 FROM tmp_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT null_val FROM tmp_table WHERE id = 1 /* 返回null,select列 */), + (SELECT id FROM tmp_table WHERE id > 7 AND id < 9) /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.id + t2.id FROM tmp_table t1 JOIN tmp_table t2 ON t1.id = t2.id AND t1.id < 2 ) /* 多表级联 */); +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + c1 | c2 | to_char | c4 +----+------+------------+------------- + 1 | A1Z | 2000/03/02 | 1.000000000 + 2 | A2Z | 2000/03/03 | 2.000000000 + 3 | A3Z | 2000/03/04 | 3.000000000 + 4 | A4Z | 2000/03/05 | 4.000000000 + 5 | A5Z | 2000/03/06 | 5.000000000 + 6 | A11Z | | 8.000000000 + 7 | A12Z | | 8.000000000 + 8 | A13Z | | 8.000000000 + 9 | A14Z | | 8.000000000 + 10 | A15Z | | 8.000000000 + 16 | A16Z | 2000/03/17 | 2.000000000 + 17 | A17Z | 2000/03/18 | 2.000000000 + 18 | A18Z | 2000/03/19 | 2.000000000 + 19 | A19Z | 2000/03/20 | 2.000000000 + 20 | A20Z | 2000/03/21 | 2.000000000 + 21 | | | 2.000000000 +(16 rows) + +-- 子查询嵌套 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + (SELECT id FROM tmp_table WHERE id > 3 AND id <= 4) FROM target_table /* SELECT列嵌套 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - (SELECT MAX(id) FROM tmp_table) AND c3 >= '2000-01-01') /* WHERE条件嵌套 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + c1 | c2 | to_char | c4 +----+------+------------+-------------- + 1 | A1Z | 2000/03/02 | 1.000000000 + 2 | A2Z | 2000/03/03 | 2.000000000 + 3 | A3Z | 2000/03/04 | 3.000000000 + 4 | A4Z | 2000/03/05 | 4.000000000 + 5 | A5Z | 2000/03/06 | 5.000000000 + 6 | A11Z | | 1.000000000 + 7 | A12Z | | 2.000000000 + 8 | A13Z | | 3.000000000 + 9 | A14Z | | 4.000000000 + 10 | A15Z | | 5.000000000 + 16 | A16Z | 2000/03/17 | 8.000000000 + 17 | A17Z | 2000/03/18 | 9.000000000 + 18 | A18Z | 2000/03/19 | 10.000000000 + 19 | A19Z | 2000/03/20 | + 20 | A20Z | 2000/03/21 | + 21 | | | +(16 rows) + +-- ******************************** +-- * 目标表:列存表;源表:行存表 * +-- ******************************** +DROP TABLE IF EXISTS target_table, source_table; +CREATE TABLE target_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=COLUMN); +CREATE TABLE source_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=ROW); +INSERT INTO source_table VALUES (generate_series(11,20),'A'||(generate_series(11,20))||'Z', date'2000-03-01'+generate_series(11,20), generate_series(11,20)); +INSERT INTO source_table VALUES (21, null, null, null); +-- 相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + 4 FROM target_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21 /* 返回null,select列 */), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - 10 AND c3 >= '2000-01-01') /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + c1 | c2 | to_char | c4 +----+------+------------+-------------- + 1 | A1Z | 2000/03/02 | 1.000000000 + 2 | A2Z | 2000/03/03 | 2.000000000 + 3 | A3Z | 2000/03/04 | 3.000000000 + 4 | A4Z | 2000/03/05 | 4.000000000 + 5 | A5Z | 2000/03/06 | 5.000000000 + 6 | A11Z | | 1.000000000 + 7 | A12Z | | 2.000000000 + 8 | A13Z | | 3.000000000 + 9 | A14Z | | 4.000000000 + 10 | A15Z | | 5.000000000 + 16 | A16Z | 2000/03/17 | 8.000000000 + 17 | A17Z | 2000/03/18 | 9.000000000 + 18 | A18Z | 2000/03/19 | 10.000000000 + 19 | A19Z | 2000/03/20 | + 20 | A20Z | 2000/03/21 | + 21 | | | +(16 rows) + +-- 非相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(id) + 4 FROM tmp_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT null_val FROM tmp_table WHERE id = 1 /* 返回null,select列 */), + (SELECT id FROM tmp_table WHERE id > 7 AND id < 9) /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.id + t2.id FROM tmp_table t1 JOIN tmp_table t2 ON t1.id = t2.id AND t1.id < 2 ) /* 多表级联 */); +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + c1 | c2 | to_char | c4 +----+------+------------+------------- + 1 | A1Z | 2000/03/02 | 1.000000000 + 2 | A2Z | 2000/03/03 | 2.000000000 + 3 | A3Z | 2000/03/04 | 3.000000000 + 4 | A4Z | 2000/03/05 | 4.000000000 + 5 | A5Z | 2000/03/06 | 5.000000000 + 6 | A11Z | | 8.000000000 + 7 | A12Z | | 8.000000000 + 8 | A13Z | | 8.000000000 + 9 | A14Z | | 8.000000000 + 10 | A15Z | | 8.000000000 + 16 | A16Z | 2000/03/17 | 2.000000000 + 17 | A17Z | 2000/03/18 | 2.000000000 + 18 | A18Z | 2000/03/19 | 2.000000000 + 19 | A19Z | 2000/03/20 | 2.000000000 + 20 | A20Z | 2000/03/21 | 2.000000000 + 21 | | | 2.000000000 +(16 rows) + +-- 子查询嵌套 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + (SELECT id FROM tmp_table WHERE id > 3 AND id <= 4) FROM target_table /* SELECT列嵌套 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - (SELECT MAX(id) FROM tmp_table) AND c3 >= '2000-01-01') /* WHERE条件嵌套 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + c1 | c2 | to_char | c4 +----+------+------------+-------------- + 1 | A1Z | 2000/03/02 | 1.000000000 + 2 | A2Z | 2000/03/03 | 2.000000000 + 3 | A3Z | 2000/03/04 | 3.000000000 + 4 | A4Z | 2000/03/05 | 4.000000000 + 5 | A5Z | 2000/03/06 | 5.000000000 + 6 | A11Z | | 1.000000000 + 7 | A12Z | | 2.000000000 + 8 | A13Z | | 3.000000000 + 9 | A14Z | | 4.000000000 + 10 | A15Z | | 5.000000000 + 16 | A16Z | 2000/03/17 | 8.000000000 + 17 | A17Z | 2000/03/18 | 9.000000000 + 18 | A18Z | 2000/03/19 | 10.000000000 + 19 | A19Z | 2000/03/20 | + 20 | A20Z | 2000/03/21 | + 21 | | | +(16 rows) + +DROP TABLE IF EXISTS target_table, source_table, tmp_table; diff --git a/src/test/regress/expected/merge_where_col.out b/src/test/regress/expected/merge_where_col.out new file mode 100644 index 000000000..9c5ef7c16 --- /dev/null +++ b/src/test/regress/expected/merge_where_col.out @@ -0,0 +1,141 @@ +-- +-- MERGE INTO +-- +-- part 1 +-- initial +CREATE SCHEMA merge_where_col; +SET current_schema = merge_where_col; +drop table if exists merge_nest_tab1,dt2; +NOTICE: table "merge_nest_tab1" does not exist, skipping +NOTICE: table "dt2" does not exist, skipping +create table merge_nest_tab1(co1 numeric(20,4),co2 varchar2,co3 number,co4 date); +insert into merge_nest_tab1 values(generate_series(1,10),'hello'||generate_series(1,10),generate_series(1,10)*10,sysdate); +create table dt2(c1 numeric(20,4),c2 boolean,c3 character(40),c4 binary_double,c5 nchar(20)) WITH (ORIENTATION = COLUMN); +insert into dt2 values(generate_series(20,50),false,generate_series(20,50)||'gauss',generate_series(20,50)-0.99,'openopen'); +-- we can't use columns of target table in insertion subquery(co1<45) for 'where' +BEGIN; +merge into merge_nest_tab1 a +USING dt2 b + ON a.co1=b.c1-20 + WHEN NOT matched THEN + insert(co1,co2,co3) values(100, + (SELECT 666)||'good', + (SELECT sum(c.c1) + FROM dt2 c + INNER JOIN merge_nest_tab1 d + ON c.c1=d.co1 )) + WHERE co1<45; +ERROR: column "co1" does not exist +LINE 11: WHERE co1<45; + ^ +HINT: There is a column named "co1" in table "a", but it cannot be referenced from this part of the query. +END; +-- we can use columns of source table in insertion subquery(c1<45) for 'where' +BEGIN; +merge into merge_nest_tab1 a +USING dt2 b + ON a.co1=b.c1-20 + WHEN NOT matched THEN + insert(co1,co2,co3) values(100, + (SELECT 666)||'good', + (SELECT sum(c.c1) + FROM dt2 c + INNER JOIN merge_nest_tab1 d + ON c.c1=d.co1 )) + WHERE c1<45; +SELECT co1, co2, co3 FROM merge_nest_tab1 order by 1; + co1 | co2 | co3 +----------+---------+----- + 1.0000 | hello1 | 10 + 2.0000 | hello2 | 20 + 3.0000 | hello3 | 30 + 4.0000 | hello4 | 40 + 5.0000 | hello5 | 50 + 6.0000 | hello6 | 60 + 7.0000 | hello7 | 70 + 8.0000 | hello8 | 80 + 9.0000 | hello9 | 90 + 10.0000 | hello10 | 100 + 100.0000 | 666good | + 100.0000 | 666good | + 100.0000 | 666good | + 100.0000 | 666good | + 100.0000 | 666good | + 100.0000 | 666good | + 100.0000 | 666good | + 100.0000 | 666good | + 100.0000 | 666good | + 100.0000 | 666good | + 100.0000 | 666good | + 100.0000 | 666good | + 100.0000 | 666good | + 100.0000 | 666good | + 100.0000 | 666good | +(25 rows) + +ROLLBACK; +-- we can use columns of source table in insert subquery(c1<45) for 'where' +BEGIN; +merge into merge_nest_tab1 a +USING dt2 b + ON a.co1=b.c1-20 + WHEN matched THEN + UPDATE SET a.co3=a.co3 + b.c4, + a.co2='hello', + a.co4=(SELECT last_day(sysdate)) + WHERE c1 BETWEEN 1 AND 50; +SELECT co1, co2, co3 FROM merge_nest_tab1 order by 1; + co1 | co2 | co3 +---------+-------+-------- + 1.0000 | hello | 30.01 + 2.0000 | hello | 41.01 + 3.0000 | hello | 52.01 + 4.0000 | hello | 63.01 + 5.0000 | hello | 74.01 + 6.0000 | hello | 85.01 + 7.0000 | hello | 96.01 + 8.0000 | hello | 107.01 + 9.0000 | hello | 118.01 + 10.0000 | hello | 129.01 +(10 rows) + +ROLLBACK; +-- part 2 +-- initial +drop table if exists tb_a,tb_b; +NOTICE: table "tb_a" does not exist, skipping +NOTICE: table "tb_b" does not exist, skipping +create table tb_a(id int, a int, b int, c int, d int); +create table tb_b(id int, a int, b int, c int, d int); +insert into tb_a values(1, 1, 1, 1, 1); +insert into tb_a values(2, 2, 2, 2, 2); +insert into tb_a values(3, 3, 3, 3, 3); +insert into tb_a values(4, 4, 4, 4, 4); +insert into tb_b values(1, 100, 1, 1, 1); +insert into tb_b values(2, 2, 2, 2, 2); +insert into tb_b values(3, 3, 3, 3, 3); +insert into tb_b values(4, 4, 4, 4, 4); +-- if the column has the same name, the column in the target table takes precedence +BEGIN; +MERGE INTO tb_b bt +USING tb_a at + ON (at.id = bt.id) + WHEN MATCHED THEN + UPDATE SET a = at.a + 100 WHERE a =100; +SELECT * FROM tb_b ORDER BY 1; + id | a | b | c | d +----+-----+---+---+--- + 1 | 101 | 1 | 1 | 1 + 2 | 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 | 3 + 4 | 4 | 4 | 4 | 4 +(4 rows) + +ROLLBACK; +-- clean up +DROP SCHEMA merge_where_col CASCADE; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to table merge_nest_tab1 +drop cascades to table dt2 +drop cascades to table tb_a +drop cascades to table tb_b diff --git a/src/test/regress/expected/nlssort_pinyin.out b/src/test/regress/expected/nlssort_pinyin.out new file mode 100644 index 000000000..1c03063c3 --- /dev/null +++ b/src/test/regress/expected/nlssort_pinyin.out @@ -0,0 +1,777 @@ +create schema nlssort_pinyin_schema; +set search_path = nlssort_pinyin_schema; +-- test null +select nlssort(NULL, 'nls_sort=schinese_pinyin_m'); + nlssort +--------- + +(1 row) + +select nlssort('', NULL); + nlssort +--------- + +(1 row) + +select nlssort(NULL, NULL); + nlssort +--------- + +(1 row) + +-- test wrong parameter +select nlssort('', ' nls_sort = schinese_pinyin_m '); + nlssort +--------- + +(1 row) + +select nlssort('', ' nls_sort = generic_m_ci '); + nlssort +--------- + +(1 row) + +select nlssort('', 'nls_sort=s chinese_pinyin_m'); +ERROR: Sort method nls_sort=s chinese_pinyin_m is not supported! +DETAIL: Not support the given sort method. +CONTEXT: referenced column: nlssort +select nlssort('', 'nls_sort=g eneric_m_ci'); +ERROR: Sort method nls_sort=g eneric_m_ci is not supported! +DETAIL: Not support the given sort method. +CONTEXT: referenced column: nlssort +select nlssort('', 'nls_sort=schinese'); +ERROR: Sort method nls_sort=schinese is not supported! +DETAIL: Not support the given sort method. +CONTEXT: referenced column: nlssort +select nlssort('', 'nls_sort=generic'); +ERROR: Sort method nls_sort=generic is not supported! +DETAIL: Not support the given sort method. +CONTEXT: referenced column: nlssort +-- test single char nlssort code +select nlssort('', 'nls_sort=schinese_pinyin_m'); + nlssort +--------- + +(1 row) + +select nlssort('', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + FFFF0001000000 +(1 row) + +select nlssort('$', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 00870000010001 +(1 row) + +select nlssort('&', 'nls_sort=schinese_pinyin_m'); + nlssort +---------- + 00000028 +(1 row) + +select nlssort('''', 'nls_sort=schinese_pinyin_m'); + nlssort +---------- + 00000029 +(1 row) + +select nlssort('0', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 01790000020002 +(1 row) + +select nlssort('A', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 01EA0000020006 +(1 row) + +select nlssort('\', 'nls_sort=schinese_pinyin_m'); + nlssort +---------- + 0000003A +(1 row) + +select nlssort('a', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 01EA0000020002 +(1 row) + +select nlssort('倰', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 74950000010001 +(1 row) + +select nlssort('冔', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + AD100000010001 +(1 row) + +select nlssort('勆', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 733E0000010001 +(1 row) + +select nlssort('', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + FFFFE4DE000000 +(1 row) + +select nlssort('「', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + FFFF300C000000 +(1 row) + +select nlssort('★', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + FFFF2605000000 +(1 row) + +select nlssort('ⅰ', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 021B0000020004 +(1 row) + +select nlssort('⒈', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 017A0000020005 +(1 row) + +select nlssort('⑴', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 017A0000020004 +(1 row) + +select nlssort('①', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 017A0000020006 +(1 row) + +select nlssort('㈠', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + FFFF3220000000 +(1 row) + +select nlssort('Ⅰ', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 021B000002000A +(1 row) + +select nlssort('Ⅴ', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 02610000020009 +(1 row) + +select nlssort('', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + FFFFE592000000 +(1 row) + +select nlssort('0', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 01790000020003 +(1 row) + +select nlssort('A', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 01EA0000020007 +(1 row) + +select nlssort('a', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 01EA0000020003 +(1 row) + +select nlssort('ぎ', 'nls_sort=schinese_pinyin_m'); + nlssort +------------------ + 3705000002BD0002 +(1 row) + +select nlssort('ガ', 'nls_sort=schinese_pinyin_m'); + nlssort +------------------ + 3704000002BD0003 +(1 row) + +select nlssort('α', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 028B0000020002 +(1 row) + +select nlssort('猋', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 40650000010001 +(1 row) + +select nlssort('珬', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + AD480000010001 +(1 row) + +select nlssort('甂', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + 3FF60000010001 +(1 row) + +select nlssort('Ꮬ', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + FFFF13DC000000 +(1 row) + +select nlssort('ᴂ', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + FFFF1D02000000 +(1 row) + +select nlssort('겷', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + FFFFACB7000000 +(1 row) + +select nlssort('뛑', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + FFFFB6D1000000 +(1 row) + +select nlssort('', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------- + FFFFF3BC000000 +(1 row) + +select nlssort('𡤝', 'nls_sort=schinese_pinyin_m'); + nlssort +------------------------ + FFFFD846FFFFDD1D000000 +(1 row) + +select nlssort('𦪫', 'nls_sort=schinese_pinyin_m'); + nlssort +------------------------ + FFFFD85AFFFFDEAB000000 +(1 row) + +select nlssort('𰀅', 'nls_sort=schinese_pinyin_m'); + nlssort +------------------------ + FFFFD880FFFFDC05000000 +(1 row) + +select nlssort('', 'nls_sort=generic_m_ci'); + nlssort +--------- + +(1 row) + +select nlssort('', 'nls_sort=generic_m_ci'); + nlssort +-------------- + FFFF00010000 +(1 row) + +select nlssort('$', 'nls_sort=generic_m_ci'); + nlssort +------------ + 0087000001 +(1 row) + +select nlssort('&', 'nls_sort=generic_m_ci'); + nlssort +--------- + 0000 +(1 row) + +select nlssort('''', 'nls_sort=generic_m_ci'); + nlssort +--------- + 0000 +(1 row) + +select nlssort('0', 'nls_sort=generic_m_ci'); + nlssort +------------ + 0179000002 +(1 row) + +select nlssort('A', 'nls_sort=generic_m_ci'); + nlssort +------------ + 01EA000002 +(1 row) + +select nlssort('\', 'nls_sort=generic_m_ci'); + nlssort +--------- + 0000 +(1 row) + +select nlssort('a', 'nls_sort=generic_m_ci'); + nlssort +------------ + 01EA000002 +(1 row) + +select nlssort('倰', 'nls_sort=generic_m_ci'); + nlssort +------------ + 7495000001 +(1 row) + +select nlssort('冔', 'nls_sort=generic_m_ci'); + nlssort +------------ + AD10000001 +(1 row) + +select nlssort('勆', 'nls_sort=generic_m_ci'); + nlssort +------------ + 733E000001 +(1 row) + +select nlssort('', 'nls_sort=generic_m_ci'); + nlssort +-------------- + FFFFE4DE0000 +(1 row) + +select nlssort('「', 'nls_sort=generic_m_ci'); + nlssort +-------------- + FFFF300C0000 +(1 row) + +select nlssort('★', 'nls_sort=generic_m_ci'); + nlssort +-------------- + FFFF26050000 +(1 row) + +select nlssort('ⅰ', 'nls_sort=generic_m_ci'); + nlssort +------------ + 021B000002 +(1 row) + +select nlssort('⒈', 'nls_sort=generic_m_ci'); + nlssort +------------ + 017A000002 +(1 row) + +select nlssort('⑴', 'nls_sort=generic_m_ci'); + nlssort +------------ + 017A000002 +(1 row) + +select nlssort('①', 'nls_sort=generic_m_ci'); + nlssort +------------ + 017A000002 +(1 row) + +select nlssort('㈠', 'nls_sort=generic_m_ci'); + nlssort +-------------- + FFFF32200000 +(1 row) + +select nlssort('Ⅰ', 'nls_sort=generic_m_ci'); + nlssort +------------ + 021B000002 +(1 row) + +select nlssort('Ⅴ', 'nls_sort=generic_m_ci'); + nlssort +------------ + 0261000002 +(1 row) + +select nlssort('', 'nls_sort=generic_m_ci'); + nlssort +-------------- + FFFFE5920000 +(1 row) + +select nlssort('0', 'nls_sort=generic_m_ci'); + nlssort +------------ + 0179000002 +(1 row) + +select nlssort('A', 'nls_sort=generic_m_ci'); + nlssort +------------ + 01EA000002 +(1 row) + +select nlssort('a', 'nls_sort=generic_m_ci'); + nlssort +------------ + 01EA000002 +(1 row) + +select nlssort('ぎ', 'nls_sort=generic_m_ci'); + nlssort +-------------- + 3705000002BD +(1 row) + +select nlssort('ガ', 'nls_sort=generic_m_ci'); + nlssort +-------------- + 3704000002BD +(1 row) + +select nlssort('α', 'nls_sort=generic_m_ci'); + nlssort +------------ + 028B000002 +(1 row) + +select nlssort('猋', 'nls_sort=generic_m_ci'); + nlssort +------------ + 4065000001 +(1 row) + +select nlssort('珬', 'nls_sort=generic_m_ci'); + nlssort +------------ + AD48000001 +(1 row) + +select nlssort('甂', 'nls_sort=generic_m_ci'); + nlssort +------------ + 3FF6000001 +(1 row) + +select nlssort('Ꮬ', 'nls_sort=generic_m_ci'); + nlssort +-------------- + FFFF13DC0000 +(1 row) + +select nlssort('ᴂ', 'nls_sort=generic_m_ci'); + nlssort +-------------- + FFFF1D020000 +(1 row) + +select nlssort('겷', 'nls_sort=generic_m_ci'); + nlssort +-------------- + FFFFACB70000 +(1 row) + +select nlssort('뛑', 'nls_sort=generic_m_ci'); + nlssort +-------------- + FFFFB6D10000 +(1 row) + +select nlssort('', 'nls_sort=generic_m_ci'); + nlssort +-------------- + FFFFF3BC0000 +(1 row) + +select nlssort('𡤝', 'nls_sort=generic_m_ci'); + nlssort +---------------------- + FFFFD846FFFFDD1D0000 +(1 row) + +select nlssort('𦪫', 'nls_sort=generic_m_ci'); + nlssort +---------------------- + FFFFD85AFFFFDEAB0000 +(1 row) + +select nlssort('𰀅', 'nls_sort=generic_m_ci'); + nlssort +---------------------- + FFFFD880FFFFDC050000 +(1 row) + +-- test multi chars nlssort code +select nlssort(' ', 'nls_sort=schinese_pinyin_m'); + nlssort +---------- + 00000023 +(1 row) + +select nlssort('AbC啊 ', 'nls_sort=schinese_pinyin_m'); + nlssort +---------------------------------------- + 01EA01EF01F43B2C0000020202010006020701 +(1 row) + +select nlssort('AbC 啊 ', 'nls_sort=schinese_pinyin_m'); + nlssort +------------------------------------------ + 01EA01EF01F43B2C000002020201000602072301 +(1 row) + +select nlssort(' AbC啊 ', 'nls_sort=schinese_pinyin_m'); + nlssort +-------------------------------------------- + 01EA01EF01F43B2C00000202020100232306020701 +(1 row) + +select nlssort(' ', 'nls_sort=generic_m_ci'); + nlssort +--------- + 0000 +(1 row) + +select nlssort('AbC啊 ', 'nls_sort=generic_m_ci'); + nlssort +------------------------------ + 01EA01EF01F43B2C000002020201 +(1 row) + +select nlssort('AbC 啊 ', 'nls_sort=generic_m_ci'); + nlssort +------------------------------ + 01EA01EF01F43B2C000002020201 +(1 row) + +select nlssort(' AbC啊 ', 'nls_sort=generic_m_ci'); + nlssort +------------------------------ + 01EA01EF01F43B2C000002020201 +(1 row) + +-- test nlssort func user in order by statement +drop table if exists tb_test; +NOTICE: table "tb_test" does not exist, skipping +create table tb_test(c1 text); +insert into tb_test values(''); +insert into tb_test values(''); +insert into tb_test values('$'); +insert into tb_test values('&'); +insert into tb_test values(''''); +insert into tb_test values('0'); +insert into tb_test values('A'); +insert into tb_test values('\'); +insert into tb_test values('a'); +insert into tb_test values('倰'); +insert into tb_test values('冔'); +insert into tb_test values('勆'); +insert into tb_test values(''); +insert into tb_test values('「'); +insert into tb_test values('★'); +insert into tb_test values('ⅰ'); +insert into tb_test values('⒈'); +insert into tb_test values('⑴'); +insert into tb_test values('①'); +insert into tb_test values('㈠'); +insert into tb_test values('Ⅰ'); +insert into tb_test values('Ⅴ'); +insert into tb_test values(''); +insert into tb_test values('0'); +insert into tb_test values('A'); +insert into tb_test values('a'); +insert into tb_test values('ぎ'); +insert into tb_test values('ガ'); +insert into tb_test values('α'); +insert into tb_test values('猋'); +insert into tb_test values('珬'); +insert into tb_test values('甂'); +insert into tb_test values('Ꮬ'); +insert into tb_test values('ᴂ'); +insert into tb_test values('겷'); +insert into tb_test values('뛑'); +insert into tb_test values(''); +insert into tb_test values('𡤝'); +insert into tb_test values('𦪫'); +insert into tb_test values('𰀅'); +insert into tb_test values(' '); +insert into tb_test values('AbC啊 '); +insert into tb_test values('AbC 啊 '); +insert into tb_test values(' AbC啊 '); +select c1, nlssort(c1, 'nls_sort=schinese_pinyin_m') from tb_test order by nlssort(c1, 'nls_sort=schinese_pinyin_m'); + c1 | nlssort +-----------+-------------------------------------------- + | 00000023 + & | 00000028 + ' | 00000029 + \ | 0000003A + $ | 00870000010001 + 0 | 01790000020002 + 0 | 01790000020003 + ⑴ | 017A0000020004 + ⒈ | 017A0000020005 + ① | 017A0000020006 + a | 01EA0000020002 + a | 01EA0000020003 + A | 01EA0000020006 + A | 01EA0000020007 + AbC啊 | 01EA01EF01F43B2C0000020202010006020701 + AbC 啊 | 01EA01EF01F43B2C000002020201000602072301 + AbC啊 | 01EA01EF01F43B2C00000202020100232306020701 + ⅰ | 021B0000020004 + Ⅰ | 021B000002000A + Ⅴ | 02610000020009 + α | 028B0000020002 + ガ | 3704000002BD0003 + ぎ | 3705000002BD0002 + 甂 | 3FF60000010001 + 猋 | 40650000010001 + 勆 | 733E0000010001 + 倰 | 74950000010001 + 冔 | AD100000010001 + 珬 | AD480000010001 + \x01 | FFFF0001000000 + Ꮬ | FFFF13DC000000 + ᴂ | FFFF1D02000000 + ★ | FFFF2605000000 + 「 | FFFF300C000000 + ㈠ | FFFF3220000000 + 겷 | FFFFACB7000000 + 뛑 | FFFFB6D1000000 + 𡤝 | FFFFD846FFFFDD1D000000 + 𦪫 | FFFFD85AFFFFDEAB000000 + 𰀅 | FFFFD880FFFFDC05000000 +  | FFFFE4DE000000 +  | FFFFE592000000 +  | FFFFF3BC000000 + | +(44 rows) + +select c1, nlssort(c1, 'nls_sort=generic_m_ci') from tb_test order by nlssort(c1, 'nls_sort=generic_m_ci'); + c1 | nlssort +-----------+------------------------------ + & | 0000 + | 0000 + \ | 0000 + ' | 0000 + $ | 0087000001 + 0 | 0179000002 + 0 | 0179000002 + ⑴ | 017A000002 + ① | 017A000002 + ⒈ | 017A000002 + a | 01EA000002 + A | 01EA000002 + A | 01EA000002 + a | 01EA000002 + AbC啊 | 01EA01EF01F43B2C000002020201 + AbC啊 | 01EA01EF01F43B2C000002020201 + AbC 啊 | 01EA01EF01F43B2C000002020201 + Ⅰ | 021B000002 + ⅰ | 021B000002 + Ⅴ | 0261000002 + α | 028B000002 + ガ | 3704000002BD + ぎ | 3705000002BD + 甂 | 3FF6000001 + 猋 | 4065000001 + 勆 | 733E000001 + 倰 | 7495000001 + 冔 | AD10000001 + 珬 | AD48000001 + \x01 | FFFF00010000 + Ꮬ | FFFF13DC0000 + ᴂ | FFFF1D020000 + ★ | FFFF26050000 + 「 | FFFF300C0000 + ㈠ | FFFF32200000 + 겷 | FFFFACB70000 + 뛑 | FFFFB6D10000 + 𡤝 | FFFFD846FFFFDD1D0000 + 𦪫 | FFFFD85AFFFFDEAB0000 + 𰀅 | FFFFD880FFFFDC050000 +  | FFFFE4DE0000 +  | FFFFE5920000 +  | FFFFF3BC0000 + | +(44 rows) + +-- test nlssort func used in procedure (compilation should not report errors) +drop table if exists tb_test; +create table tb_test(col1 varchar2); +create or replace package pckg_test as +procedure proc_test(i_col1 in varchar2); +function func_test(i_col1 in varchar2) return varchar2; +end pckg_test; +/ +create or replace package body pckg_test as +procedure proc_test(i_col1 in varchar2) as +v_a varchar2; +v_b varchar2; +begin +if func_test(i_col1) < func_test('阿') then +v_a:= func_test(i_col1); +end if; +select nlssort(col1,'NLS_SORT=SCHINESE_PINYIN_M') into v_b from tb_test where col1=i_col1; +end; +function func_test(i_col1 in varchar2) return varchar2 as +begin +return nlssort(i_col1,'NLS_SORT=SCHINESE_PINYIN_M'); +end; +end pckg_test; +/ +-- It will core when the length of the first argument is 0. +-- ORA compatibility mode treats "" as null, so test it in MySQL compatibility mode. +create database b_dbcompatibility TEMPLATE=template0 dbcompatibility='B'; +\c b_dbcompatibility +set client_encoding=utf8; +select nlssort('', 'nls_sort=schinese_pinyin_m'); + nlssort +--------- + +(1 row) + +\c regression +clean connection to all force for database b_dbcompatibility; +drop database b_dbcompatibility; +-- test nlssort is shippable or not +\sf nlssort +CREATE OR REPLACE FUNCTION pg_catalog.nlssort(text, text) + RETURNS text + LANGUAGE internal + IMMUTABLE NOT FENCED SHIPPABLE +AS $function$nlssort$function$; +drop schema nlssort_pinyin_schema cascade; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to table nlssort_pinyin_schema.tb_test +--?.* +drop cascades to function nlssort_pinyin_schema.proc_test(character varying) +drop cascades to function nlssort_pinyin_schema.func_test(character varying) diff --git a/src/test/regress/expected/numeric_hide_tailing_zero.out b/src/test/regress/expected/numeric_hide_tailing_zero.out new file mode 100644 index 000000000..0b543f9bd --- /dev/null +++ b/src/test/regress/expected/numeric_hide_tailing_zero.out @@ -0,0 +1,27 @@ +set behavior_compat_options=''; +select cast(123.123 as numeric(15,10)); + numeric +---------------- + 123.1230000000 +(1 row) + +set behavior_compat_options='hide_tailing_zero'; +select cast(123.123 as numeric(15,10)); + numeric +--------- + 123.123 +(1 row) + +select cast(0 as numeric(15,10)); + numeric +--------- + 0 +(1 row) + +select cast(009.0000 as numeric(15,10)); + numeric +--------- + 9 +(1 row) + +set behavior_compat_options=''; diff --git a/src/test/regress/expected/opr_sanity.out b/src/test/regress/expected/opr_sanity.out index e29a7d4ba..ceb425622 100644 --- a/src/test/regress/expected/opr_sanity.out +++ b/src/test/regress/expected/opr_sanity.out @@ -2561,6 +2561,10 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 4506 | listagg 4507 | interval_list_agg_noarg2_transfn 4508 | listagg + 4579 | gs_get_active_archiving_standby + 4580 | gs_pitr_archive_slot_force_advance + 4581 | gs_pitr_clean_history_global_barriers + 4582 | gs_pitr_get_warning_for_xlog_force_recycle 4600 | checksum 4601 | checksumtext_agg_transfn 4651 | pg_cbm_tracked_location @@ -2618,6 +2622,9 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 5034 | gs_io_wait_status 5077 | gs_hadr_local_rto_and_rpo_stat 5078 | gs_hadr_remote_rto_and_rpo_stat + 5254 | gs_get_session_memctx_detail + 5255 | gs_get_shared_memctx_detail + 5256 | gs_get_thread_memctx_detail 5345 | pv_builtin_functions 5519 | int1cmp 5520 | hashint1 @@ -2695,7 +2702,6 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 6204 | pg_stop_backup 6224 | gs_get_next_xid_csn 6321 | pg_stat_file_recursive - 7000 | pg_stat_segment_space_info 7001 | pg_stat_segment_extent_usage 7002 | gs_space_shrink 7777 | sysdate diff --git a/src/test/regress/expected/oracle_outerjoin_conversion.out b/src/test/regress/expected/oracle_outerjoin_conversion.out index b2831ee50..dfc1f52e0 100644 --- a/src/test/regress/expected/oracle_outerjoin_conversion.out +++ b/src/test/regress/expected/oracle_outerjoin_conversion.out @@ -1441,7 +1441,20 @@ select t11.c1, t12.c2, t13.c2 from t11, t12, t13 where t11.c2 = t12.c3(+) and t1 -------used (+) with un common expression, like is null, is not null-------- select t11.c1, t12.c2, t13.c2 from t11, t12, t13 where t11.c2 = t12.c3(+) and t11.c3 = t13.c1(+) and t13.c2(+) is not null; -ERROR: Operator "(+)" can only be used in common expression. + c1 | c2 | c2 +----+----+---- + 10 | 10 | 10 + 4 | 4 | 4 + 6 | 6 | 6 + 8 | 8 | 8 + 1 | 1 | 1 + 9 | 9 | 9 + 5 | 5 | 5 + 2 | 2 | 2 + 3 | 3 | 3 + 7 | 7 | 7 +(10 rows) + select t11.c1, t12.c2, t13.c2 from t11, t12, t13 where t11.c2 = t12.c3(+) and t11.c3 = t13.c1(+) and (t13.c2(+) > t12.c1)::bool; ERROR: Operator "(+)" can only be used in common expression. drop view plus_v; diff --git a/src/test/regress/expected/out_param_func.out b/src/test/regress/expected/out_param_func.out new file mode 100644 index 000000000..3cb67a89d --- /dev/null +++ b/src/test/regress/expected/out_param_func.out @@ -0,0 +1,1231 @@ +create schema out_param_schema; +set current_schema= out_param_schema; +set behavior_compat_options='proc_outparam_override'; +--1--------return 变量 +CREATE or replace FUNCTION func1(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +select func1(2, null); + func1 +------- + 1 +(1 row) + +call func1(2, NULL); + func1 | b +-------+--- + 1 | 3 +(1 row) + +select * from func1(2,null); + func1 | b +-------+--- + 1 | 3 +(1 row) + +select func1(a => 2, b => null); + func1 +------- + 1 +(1 row) + +select * from func1(a => 2, b => null); + func1 | b +-------+--- + 1 | 3 +(1 row) + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; +begin + select * into result from func1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func1(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; +begin + func1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: +declare + result text; + a integer := 2; + b integer := NULL; +begin + func1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: +---inout参数 +CREATE or replace FUNCTION func1_1(in a integer, inout b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +select func1_1(2, null); + func1_1 +--------- + 1 +(1 row) + +call func1_1(2, NULL); + func1_1 | b +---------+--- + 1 | 3 +(1 row) + +select * from func1_1(2,null); + func1_1 | b +---------+--- + 1 | 3 +(1 row) + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func1_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func1_1(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; +begin + func1_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: +declare + result text; + a integer := 2; + b integer := NULL; +begin + func1_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: +--2--------return 变量运算 +CREATE or replace FUNCTION func2(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return b + c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +select func2(2, null); + func2 +------- + 4 +(1 row) + +call func2(2, NULL); + func2 | b +-------+--- + 4 | 3 +(1 row) + +select * from func2(2,null); + func2 | b +-------+--- + 4 | 3 +(1 row) + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: 4 +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func2(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: 4 +declare + result integer; + a integer := 2; + b integer := NULL; +begin + func2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: +declare + result text; + a integer := 2; + b integer := NULL; +begin + func2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: +--3------return 常量 +CREATE or replace FUNCTION func3(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return 123; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +select func3(2, null); + func3 +------- + 123 +(1 row) + +call func3(2, NULL); + func3 | b +-------+--- + 123 | 3 +(1 row) + +select * from func3(2,null); + func3 | b +-------+--- + 123 | 3 +(1 row) + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func3(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: 123 +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func3(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: 123 +declare + result integer; + a integer := 2; + b integer := NULL; +begin + func3(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: +declare + result text; + a integer := 2; + b integer := NULL; +begin + func3(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: +--4------多out +CREATE or replace FUNCTION func4(in a integer, out b integer, out d integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + d := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +select func4(2,NULL,NULL); + func4 +------- + 1 +(1 row) + +call func4(2, NULL,NULL); + func4 | b | d +-------+---+--- + 1 | 3 | 3 +(1 row) + +select * from func4(2, NULL,NULL); + func4 | b | d +-------+---+--- + 1 | 3 | 3 +(1 row) + +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + result := func4(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: d is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + result := func4(a, b, d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: d is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + func4(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: d is: 3 +INFO: result is: +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + func4(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: d is: 3 +INFO: result is: +---inout参数 +CREATE or replace FUNCTION func4_1(in a integer, inout b integer, inout d integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + d := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +select func4_1(2,NULL,NULL); + func4_1 +--------- + 1 +(1 row) + +call func4_1(2, NULL,NULL); + func4_1 | b | d +---------+---+--- + 1 | 3 | 3 +(1 row) + +select * from func4_1(2, NULL,NULL); + func4_1 | b | d +---------+---+--- + 1 | 3 | 3 +(1 row) + +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + result := func4_1(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: d is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + result := func4_1(a, b, d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: d is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + func4_1(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: d is: 3 +INFO: result is: +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + func4_1(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: d is: 3 +INFO: result is: +--5-- 有out+ 无return 不支持,在执行时报错-- +--5.1 +CREATE or replace FUNCTION func5_1(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + --return; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +select func5_1(2, NULL); +ERROR: Value assignment for the out parameter in plpgsql language functions, Unsupported return nothing in PL/pgSQL function +DETAIL: N/A +CONTEXT: PL/pgSQL function func5_1(integer) line 0 at RETURN +referenced column: func5_1 +call func5_1(2, NULL); +ERROR: Value assignment for the out parameter in plpgsql language functions, Unsupported return nothing in PL/pgSQL function +DETAIL: N/A +CONTEXT: PL/pgSQL function func5_1(integer) line 0 at RETURN +select * from func5_1(2, NULL); +ERROR: Value assignment for the out parameter in plpgsql language functions, Unsupported return nothing in PL/pgSQL function +DETAIL: N/A +CONTEXT: PL/pgSQL function func5_1(integer) line 0 at RETURN +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func5_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +ERROR: Value assignment for the out parameter in plpgsql language functions, Unsupported return nothing in PL/pgSQL function +DETAIL: N/A +CONTEXT: PL/pgSQL function func5_1(integer) line 0 at RETURN +SQL statement "CALL func5_1(a => a,b=>b)" +PL/pgSQL function inline_code_block line 5 at assignment +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func5_1(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +ERROR: Value assignment for the out parameter in plpgsql language functions, Unsupported return nothing in PL/pgSQL function +DETAIL: N/A +CONTEXT: PL/pgSQL function func5_1(integer) line 0 at RETURN +SQL statement "CALL func5_1(a,b)" +PL/pgSQL function inline_code_block line 5 at assignment +declare + result integer; + a integer := 2; + b integer := NULL; +begin + func5_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +ERROR: Value assignment for the out parameter in plpgsql language functions, Unsupported return nothing in PL/pgSQL function +DETAIL: N/A +CONTEXT: PL/pgSQL function func5_1(integer) line 0 at RETURN +SQL statement "CALL func5_1(a => a,b=>b)" +PL/pgSQL function inline_code_block line 5 at SQL statement +--5.2 +CREATE or replace FUNCTION func5_2(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +select func5_2(2, NULL); +ERROR: Value assignment for the out parameter in plpgsql language functions, Unsupported return nothing in PL/pgSQL function +DETAIL: N/A +CONTEXT: PL/pgSQL function func5_2(integer) line 7 at RETURN +referenced column: func5_2 +call func5_2(2, NULL); +ERROR: Value assignment for the out parameter in plpgsql language functions, Unsupported return nothing in PL/pgSQL function +DETAIL: N/A +CONTEXT: PL/pgSQL function func5_2(integer) line 7 at RETURN +select * from func5_2(2, NULL); +ERROR: Value assignment for the out parameter in plpgsql language functions, Unsupported return nothing in PL/pgSQL function +DETAIL: N/A +CONTEXT: PL/pgSQL function func5_2(integer) line 7 at RETURN +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func5_2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +ERROR: Value assignment for the out parameter in plpgsql language functions, Unsupported return nothing in PL/pgSQL function +DETAIL: N/A +CONTEXT: PL/pgSQL function func5_2(integer) line 7 at RETURN +SQL statement "CALL func5_2(a => a,b=>b)" +PL/pgSQL function inline_code_block line 5 at assignment +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func5_2(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +ERROR: Value assignment for the out parameter in plpgsql language functions, Unsupported return nothing in PL/pgSQL function +DETAIL: N/A +CONTEXT: PL/pgSQL function func5_2(integer) line 7 at RETURN +SQL statement "CALL func5_2(a,b)" +PL/pgSQL function inline_code_block line 5 at assignment +declare + result integer; + a integer := 2; + b integer := NULL; +begin + func5_2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +ERROR: Value assignment for the out parameter in plpgsql language functions, Unsupported return nothing in PL/pgSQL function +DETAIL: N/A +CONTEXT: PL/pgSQL function func5_2(integer) line 7 at RETURN +SQL statement "CALL func5_2(a => a,b=>b)" +PL/pgSQL function inline_code_block line 5 at SQL statement +--6自治事务 +--6.1 单out +CREATE or replace FUNCTION func6_1(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +select func6_1(2, null); + func6_1 +--------- + 1 +(1 row) + +call func6_1(2, NULL); + func6_1 | b +---------+--- + 1 | 3 +(1 row) + +select * from func6_1(2,null); + func6_1 | b +---------+--- + 1 | 3 +(1 row) + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func6_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func6_1(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; +begin + func6_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: +declare + result text; + a integer := 2; + b integer := NULL; +begin + func6_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: +--6.2 多out +CREATE or replace FUNCTION func6_2(in a integer, out b integer, out d integer) +RETURNS int +AS $$ +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; + c int; + BEGIN + c := 1; + b := a + c; + d := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +select func6_2(2,NULL,NULL); + func6_2 +--------- + 1 +(1 row) + +call func6_2(2, NULL,NULL); + func6_2 | b | d +---------+---+--- + 1 | 3 | 3 +(1 row) + +select * from func6_2(2, NULL,NULL); + func6_2 | b | d +---------+---+--- + 1 | 3 | 3 +(1 row) + +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + result := func6_2(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: d is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + result := func6_2(a, b,d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: d is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + func6_2(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: d is: 3 +INFO: result is: +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + func6_2(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: d is: 3 +INFO: result is: +--7 packge +--7.1普通out出参 +create or replace package pck7_1 +is +function func7_1(in a int, out b int) +return int; +end pck7_1; +/ +CREATE or replace package body pck7_1 as FUNCTION func7_1(in a int, out b integer) +RETURN int +AS +DECLARE + --PRAGMA AUTONOMOUS_TRANSACTION; + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; +end pck7_1; +/ +select pck7_1.func7_1(2, null); + func7_1 +--------- + 1 +(1 row) + +call pck7_1.func7_1(2, NULL); + func7_1 | b +---------+--- + 1 | 3 +(1 row) + +select * from pck7_1.func7_1(2,null); + func7_1 | b +---------+--- + 1 | 3 +(1 row) + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := pck7_1.func7_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := pck7_1.func7_1(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; +begin + pck7_1.func7_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: +declare + result text; + a integer := 2; + b integer := NULL; +begin + pck7_1.func7_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: +--7.2带自治事务out出参 +create or replace package pck7_2 +is +function func7_2(in a int, out b int) +return int; +end pck7_2; +/ +CREATE or replace package body pck7_2 as FUNCTION func7_2(in a int, out b integer) +RETURN int +AS +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; +end pck7_2; +/ +select pck7_2.func7_2(2, null); + func7_2 +--------- + 1 +(1 row) + +call pck7_2.func7_2(2, NULL); + func7_2 | b +---------+--- + 1 | 3 +(1 row) + +select * from pck7_2.func7_2(2,null); + func7_2 | b +---------+--- + 1 | 3 +(1 row) + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := pck7_2.func7_2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := pck7_2.func7_2(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: 1 +declare + result integer; + a integer := 2; + b integer := NULL; +begin + pck7_2.func7_2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: +declare + result text; + a integer := 2; + b integer := NULL; +begin + pck7_2.func7_2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +INFO: b is: 3 +INFO: result is: +--8 out出参不允许重载限制 +--8.1 plpgsql语言的带out参数同名函数只能存在一个 +CREATE or replace FUNCTION func8_1(in a integer) +RETURNS int +AS $$ +DECLARE + b int; + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +CREATE or replace FUNCTION func8_1(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +CREATE or replace FUNCTION func8_1(in a integer, out b integer, out d integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + d := b; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +ERROR: "func8_1" functions with plpgsql language and out params are not supported Overloaded. +DETAIL: N/A. +--8.2 同一schema、package下,不允许存在同名的plpgsql语言的out出参函数,但可以replace +CREATE or replace FUNCTION func8_2(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +CREATE or replace FUNCTION func8_2(in a integer, out b integer, out d integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + d := b; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +ERROR: "func8_2" functions with plpgsql language and out params are not supported Overloaded. +DETAIL: N/A. +CREATE or replace FUNCTION func8_2(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +create or replace package pck8_2 +is +function func8_2(in a int, out b int) +return int; +function func8_2(in a int, out b int, out d integer) +return int; +end pck8_2; +/ +ERROR: "func8_2" functions with plpgsql language and out params are not supported Overloaded. +DETAIL: N/A. +--8.3 同一schema、package下,允许存在同名的psql语言的不带out出参函数 +CREATE or replace FUNCTION func8_3(in a integer) +RETURNS int +AS $$ +DECLARE + c int; + b int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +CREATE or replace FUNCTION func8_3(in a integer, in b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +create or replace package pck8_3 +is +function func8_3(in a int) +return int; +function func8_3(in a int, in b int) +return int; +end pck8_3; +/ +select proname from pg_proc where proname = 'func8_3' order by 1; + proname +--------- + func8_3 + func8_3 + func8_3 + func8_3 +(4 rows) + +create or replace function f1(in a int, out b int) return int +as +declare +c int; +begin +c := a - 1; +b := a + 1; +return c; +end; +/ +select * from generate_series(1,100) where generate_series > f1(90, null); + generate_series +----------------- + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 + 100 +(11 rows) + +declare +res int; +begin +res := f1(10, 888); -- out出参传入常量,报错 +raise info 'res is:%',res; +end; +/ +ERROR: when invoking function f1, no destination for argments "" +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 2 +drop function f1; +create or replace package pck1 is +type tp1 is record(v01 number, v03 varchar2, v02 number); +function f1(in a int, out c tp1) return int; +end pck1; +/ +create or replace package body pck1 is +function f1(in a int, out c tp1) return int +as +declare +begin +c.v01:=a; +return a; +end; +end pck1; +/ +select pck1.f1(10,(1,'a',2)); + f1 +---- + 10 +(1 row) + +select *from pck1.f1(10,(1,'a',2)); + f1 | c +----+-------- + 10 | (10,,) +(1 row) + +call pck1.f1(10,(1,'a',2)); + f1 | c +----+-------- + 10 | (10,,) +(1 row) + +--clean +reset behavior_compat_options; +drop schema out_param_schema cascade; +NOTICE: drop cascades to 24 other objects +DETAIL: drop cascades to function func1(integer) +drop cascades to function func1_1(integer,integer) +drop cascades to function func2(integer) +drop cascades to function func3(integer) +drop cascades to function func4(integer) +drop cascades to function func4_1(integer,integer,integer) +drop cascades to function func5_1(integer) +drop cascades to function func5_2(integer) +drop cascades to function func6_1(integer) +drop cascades to function func6_2(integer) +--?.* +drop cascades to function out_param_schema.func7_1(integer) +--?.* +drop cascades to function out_param_schema.func7_2(integer) +drop cascades to function out_param_schema.func8_1(integer) +drop cascades to function out_param_schema.func8_1(integer) +drop cascades to function func8_2(integer) +drop cascades to function func8_3(integer) +drop cascades to function func8_3(integer,integer) +--?.* +drop cascades to function out_param_schema.func8_3(integer) +drop cascades to function out_param_schema.func8_3(integer,integer) +--?.* +drop cascades to function out_param_schema.f1(integer) diff --git a/src/test/regress/expected/parse_page.out b/src/test/regress/expected/parse_page.out new file mode 100644 index 000000000..501d1f045 --- /dev/null +++ b/src/test/regress/expected/parse_page.out @@ -0,0 +1,87 @@ +START TRANSACTION; +DROP TABLE IF EXISTS test_astore; +NOTICE: table "test_astore" does not exist, skipping +CREATE TABLE test_astore (user_id serial PRIMARY KEY, time_clock VARCHAR ( 50 )); +NOTICE: CREATE TABLE will create implicit sequence "test_astore_user_id_seq" for serial column "test_astore.user_id" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_astore_pkey" for table "test_astore" +CREATE INDEX test_astore_idx ON test_astore(user_id); +insert into test_astore select generate_series(1, 200), clock_timestamp(); +update test_astore set time_clock = NULL where user_id = 150; +update test_astore set time_clock = 'time' where user_id = 150; +delete test_astore where user_id > 190; +DROP TABLE IF EXISTS test_ustore; +NOTICE: table "test_ustore" does not exist, skipping +CREATE TABLE test_ustore (user_id serial PRIMARY KEY, time_clock VARCHAR ( 50 )) with(storage_type=ustore); +NOTICE: CREATE TABLE will create implicit sequence "test_ustore_user_id_seq" for serial column "test_ustore.user_id" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_ustore_pkey" for table "test_ustore" +CREATE INDEX test_ustore_idx ON test_ustore(user_id); +insert into test_ustore select generate_series(1, 200), clock_timestamp(); +update test_ustore set time_clock = NULL where user_id = 150; +update test_ustore set time_clock = 'time' where user_id = 150; +delete test_ustore where user_id > 190; +DROP TABLE IF EXISTS test_segment; +NOTICE: table "test_segment" does not exist, skipping +CREATE TABLE test_segment (a int, b int, c int) with(segment=on); +INSERT INTO test_segment values(generate_series(1,10),generate_series(1,10), generate_series(1,10)); +CREATE OR REPLACE FUNCTION gs_parse_page_bypath_test(tablename in varchar2, block_num in int, relation in varchar2, read_mem in bool) +RETURNS table (output text) +LANGUAGE plpgsql +AS +$$ +DECLARE + param1 text; +BEGIN + SELECT pg_relation_filepath(tablename) into param1; + return query SELECT gs_parse_page_bypath(''|| param1 ||'', block_num, relation, read_mem); +END; +$$ +; +SELECT gs_parse_page_bypath_test('test_astore', 0, 'heap', true); +--?.* +--?.* +--?.* +(1 row) + +SELECT gs_parse_page_bypath_test('test_segment', 0, 'segment', true); +--?.* +--?.* +--?.* +(1 row) + +CHECKPOINT; +SELECT gs_parse_page_bypath_test('test_astore', -1, 'heap', true); +--?.* +--?.* +--?.* +(1 row) + +SELECT gs_parse_page_bypath_test('test_astore_idx', 1, 'btree', false); +--?.* +--?.* +--?.* +(1 row) + +SELECT gs_parse_page_bypath_test('test_ustore', -1, 'uheap', false); +--?.* +--?.* +--?.* +(1 row) + +SELECT gs_parse_page_bypath_test('test_ustore_idx', 1, 'ubtree', false); +--?.* +--?.* +--?.* +(1 row) + +SELECT gs_parse_page_bypath_test('test_segment', 0, 'segment', false); +--?.* +--?.* +--?.* +(1 row) + +DROP INDEX IF EXISTS test_astore_idx; +DROP TABLE IF EXISTS test_astore; +DROP INDEX IF EXISTS test_ustore_idx; +DROP TABLE IF EXISTS test_ustore; +DROP TABLE IF EXISTS test_segment; +COMMIT; diff --git a/src/test/regress/expected/parse_xlog.out b/src/test/regress/expected/parse_xlog.out new file mode 100644 index 000000000..392f1782c --- /dev/null +++ b/src/test/regress/expected/parse_xlog.out @@ -0,0 +1,97 @@ +-- gs_xlogdump_lsn +START TRANSACTION; +CREATE OR REPLACE FUNCTION gs_xlogdump_lsn() +RETURNS table (output text) +LANGUAGE plpgsql +AS +$$ +DECLARE + param1 text; + param2 text; +BEGIN + SELECT pg_current_xlog_location() into param1; + CHECKPOINT; + SELECT pg_current_xlog_location() into param2; + return query SELECT gs_xlogdump_lsn(''|| param1 || '', ''|| param2 || ''); +END; +$$ +; +SELECT gs_xlogdump_lsn(); +--?.* +--?.* +--?.* +(1 row) + +COMMIT; +-- gs_xlogdump_xid +SELECT gs_xlogdump_xid('200'); +--?.* +--?.* +--?.* +(1 row) + +-- gs_xlogdump_tablepath +START TRANSACTION; +CREATE OR REPLACE FUNCTION gs_xlogdump_tablepath() +RETURNS table (output text) +LANGUAGE plpgsql +AS +$$ +DECLARE + param1 text; + param2 text; +BEGIN + DROP TABLE IF EXISTS heap_t; + CREATE TABLE heap_t (i INT); + INSERT INTO heap_t SELECT * FROM generate_series(1,10); + CHECKPOINT; + SELECT pg_relation_filepath('heap_t') into param1; + return query SELECT gs_xlogdump_tablepath(''|| param1 || '', 0, 'heap'); +END; +$$ +; +SELECT gs_xlogdump_tablepath(); +NOTICE: table "heap_t" does not exist, skipping +CONTEXT: SQL statement "DROP TABLE IF EXISTS heap_t" +PL/pgSQL function gs_xlogdump_tablepath() line 6 at SQL statement +referenced column: gs_xlogdump_tablepath +--?.* +--?.* +--?.* +(1 row) + +DROP TABLE heap_t; +COMMIT; +-- gs_xlogdump_parsepage_tablepath +START TRANSACTION; +CREATE OR REPLACE FUNCTION gs_xlogdump_parsepage_tablepath() +RETURNS table (output text) +LANGUAGE plpgsql +AS +$$ +DECLARE + param1 text; + param2 text; +BEGIN + DROP TABLE IF EXISTS heap_t1; + CREATE TABLE heap_t1 (i INT); + INSERT INTO heap_t1 SELECT * FROM generate_series(1,10); + CHECKPOINT; + SELECT pg_relation_filepath('heap_t1') into param1; + return query SELECT gs_xlogdump_parsepage_tablepath(''|| param1 || '', 0, 'heap', false); +END; +$$ +; +SELECT gs_xlogdump_parsepage_tablepath(); +NOTICE: table "heap_t1" does not exist, skipping +CONTEXT: SQL statement "DROP TABLE IF EXISTS heap_t1" +PL/pgSQL function gs_xlogdump_parsepage_tablepath() line 6 at SQL statement +referenced column: gs_xlogdump_parsepage_tablepath +--?.* +--?.* +--?.* +--?.* +(1 row) + +DROP TABLE heap_t1; +COMMIT; diff --git a/src/test/regress/expected/partition_dml_operations.out b/src/test/regress/expected/partition_dml_operations.out new file mode 100644 index 000000000..ded930924 --- /dev/null +++ b/src/test/regress/expected/partition_dml_operations.out @@ -0,0 +1,1036 @@ +--select +create table tsource(ld int not null,sd int not null,jname varchar2) partition by range(ld) +( + partition ts1 values less than(6), + partition ts2 values less than(36) +); +insert into tsource values (5),(15); +ERROR: null value in column "sd" violates not-null constraint +DETAIL: Failing row contains (5, null, null). +select * from tsource partition (ts1); + ld | sd | jname +----+----+------- +(0 rows) + +select * from tsource partition for(5); + ld | sd | jname +----+----+------- +(0 rows) + +select * from tsource subpartition (ts1); +ERROR: relation "tsource" is not subpartitioned table +DETAIL: N/A. +select * from tsource subpartition for(3,6); +ERROR: relation "tsource" is not subpartitioned table +DETAIL: N/A. +drop table tsource; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list partition (p_201901); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(3 rows) + +select * from range_list subpartition (p_201901_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +select * from range_list partition for ('201902'); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(3 rows) + +select * from range_list subpartition for ('201902','1'); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +drop table range_list; +--insert +create table test_range_pt (a int, b int, c int) +partition by range(a) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (maxvalue) +)ENABLE ROW MOVEMENT; +insert into test_range_pt partition (p1) values(1); +insert into test_range_pt partition (p2) values(1); +ERROR: inserted partition key does not map to the table partition +DETAIL: N/A. +insert into test_range_pt partition (p3) values(1); +ERROR: inserted partition key does not map to the table partition +DETAIL: N/A. +insert into test_range_pt partition for (1) values(1); +insert into test_range_pt partition for (2001) values(1); +ERROR: inserted partition key does not map to the table partition +DETAIL: N/A. +insert into test_range_pt partition for (3001) values(1); +ERROR: inserted partition key does not map to the table partition +DETAIL: N/A. +drop table test_range_pt; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list partition (p_201901) values('201902', '1', '1', 1); +insert into range_list partition (p_201902) values('201902', '1', '1', 1); +ERROR: inserted partition key does not map to the table partition +DETAIL: N/A. +insert into range_list partition (p_201902_a) values('201902', '1', '1', 1); +ERROR: partition "p_201902_a" of relation "range_list" does not exist +insert into range_list partition (p_201902_c) values('201902', '1', '1', 1); +ERROR: partition "p_201902_c" of relation "range_list" does not exist +insert into range_list subpartition (p_201901_a) values('201902', '1', '1', 1); +insert into range_list subpartition (p_201901_b) values('201902', '1', '1', 1); +ERROR: inserted subpartition key does not map to the table subpartition +DETAIL: N/A. +insert into range_list subpartition (p_201902_a) values('201902', '1', '1', 1); +ERROR: inserted subpartition key does not map to the table subpartition +DETAIL: N/A. +insert into range_list subpartition (p_201902_b) values('201902', '1', '1', 1); +ERROR: inserted subpartition key does not map to the table subpartition +DETAIL: N/A. +insert into range_list subpartition (p_201901) values('201902', '1', '1', 1); +ERROR: subpartition "p_201901" of relation "range_list" does not exist +insert into range_list subpartition (p_201902) values('201902', '1', '1', 1); +ERROR: subpartition "p_201902" of relation "range_list" does not exist +insert into range_list subpartition (p_201903) values('201902', '1', '1', 1); +ERROR: subpartition "p_201903" of relation "range_list" does not exist +insert into range_list partition for ('201902') values('201902', '1', '1', 1); +insert into range_list partition for ('201903') values('201902', '1', '1', 1); +ERROR: inserted partition key does not map to the table partition +DETAIL: N/A. +insert into range_list partition for ('201910') values('201902', '1', '1', 1); +ERROR: Cannot find partition by the value +DETAIL: N/A. +insert into range_list subpartition for ('201902','1') values('201902', '1', '1', 1); +insert into range_list subpartition for ('201902','2') values('201902', '1', '1', 1); +ERROR: inserted subpartition key does not map to the table subpartition +DETAIL: N/A. +insert into range_list subpartition for ('201903','1') values('201902', '1', '1', 1); +ERROR: inserted subpartition key does not map to the table subpartition +DETAIL: N/A. +insert into range_list subpartition for ('201903','2') values('201902', '1', '1', 1); +ERROR: inserted subpartition key does not map to the table subpartition +DETAIL: N/A. +insert into range_list subpartition for ('201902') values('201902', '1', '1', 1); +ERROR: number of partitionkey values is not equal to the number of partitioning columns +DETAIL: N/A. +insert into range_list subpartition for ('201910','1') values('201902', '1', '1', 1); +ERROR: Cannot find partition by the value +DETAIL: N/A. +drop table range_list; +--update +create table test_range_pt (a int, b int, c int) +partition by range(a) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (maxvalue) +)ENABLE ROW MOVEMENT; +insert into test_range_pt values(1, 1, 1); +insert into test_range_pt values(2001, 1, 1); +insert into test_range_pt values(3001, 1, 1); +update test_range_pt partition (p1) set b = 2; +select * from test_range_pt; + a | b | c +------+---+--- + 1 | 2 | 1 + 2001 | 1 | 1 + 3001 | 1 | 1 +(3 rows) + +update test_range_pt partition (p1) set a = 2; +select * from test_range_pt; + a | b | c +------+---+--- + 2 | 2 | 1 + 2001 | 1 | 1 + 3001 | 1 | 1 +(3 rows) + +update test_range_pt partition for (1) set b = 3; +select * from test_range_pt; + a | b | c +------+---+--- + 2 | 3 | 1 + 2001 | 1 | 1 + 3001 | 1 | 1 +(3 rows) + +update test_range_pt partition for (1) set a = 3; +select * from test_range_pt; + a | b | c +------+---+--- + 3 | 3 | 1 + 2001 | 1 | 1 + 3001 | 1 | 1 +(3 rows) + +drop table test_range_pt; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +update range_list partition (p_201901) set user_no = '2'; +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 2 | 1 + 201902 | 1 | 2 | 1 + 201902 | 1 | 2 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +update range_list subpartition (p_201901_a) set user_no = '3'; +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 2 | 1 + 201902 | 1 | 3 | 1 + 201902 | 1 | 3 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +update range_list partition for ('201902') set user_no = '4'; +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 4 | 1 + 201902 | 1 | 4 | 1 + 201902 | 1 | 4 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +update range_list subpartition for ('201902','2') set user_no = '5'; +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 5 | 1 + 201902 | 1 | 4 | 1 + 201902 | 1 | 4 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +drop table range_list; +--delete +create table test_range_pt (a int, b int, c int) +partition by range(a) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (maxvalue) +)ENABLE ROW MOVEMENT; +insert into test_range_pt values(1, 1, 1); +insert into test_range_pt values(2001, 1, 1); +insert into test_range_pt values(3001, 1, 1); +delete from test_range_pt partition (p1); +select * from test_range_pt; + a | b | c +------+---+--- + 2001 | 1 | 1 + 3001 | 1 | 1 +(2 rows) + +delete from test_range_pt partition for (2001); +select * from test_range_pt; + a | b | c +------+---+--- + 3001 | 1 | 1 +(1 row) + +drop table test_range_pt; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +delete from range_list partition (p_201901); +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +delete from range_list partition for ('201903'); +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +delete from range_list subpartition (p_201901_a); +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(4 rows) + +delete from range_list subpartition for ('201903','2'); +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(2 rows) + +drop table range_list; +--upsert +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) PRIMARY KEY , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_list_pkey" for table "range_list" +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 5 +(1 row) + +delete from range_list; +create index idx1 on range_list(month_code,dept_code) local; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 5 +(1 row) + +delete from range_list; +create index idx2 on range_list(month_code) global; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 5 +(1 row) + +delete from range_list; +drop table range_list; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) PRIMARY KEY , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_list_pkey" for table "range_list" +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 5 +(1 row) + +delete from range_list; +create index idx1 on range_list(month_code,dept_code) local; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 5 +(1 row) + +delete from range_list; +create index idx2 on range_list(month_code) global; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 5 +(1 row) + +delete from range_list; +drop table range_list; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) PRIMARY KEY , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_list_pkey" for table "range_list" +insert into range_list partition (p_201901) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list partition (p_201901) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list partition (p_201902) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 10; +ERROR: inserted partition key does not map to the table partition +DETAIL: N/A. +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 5 +(1 row) + +insert into range_list subpartition (p_201901_a) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 10; +insert into range_list subpartition (p_201901_b) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 20; +ERROR: inserted subpartition key does not map to the table subpartition +DETAIL: N/A. +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 10 +(1 row) + +insert into range_list partition for ('201902') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 30; +insert into range_list partition for ('201903') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 40; +ERROR: inserted partition key does not map to the table partition +DETAIL: N/A. +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 30 +(1 row) + +insert into range_list subpartition for ('201902','1') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 40; +insert into range_list subpartition for ('201902','2') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 50; +ERROR: inserted subpartition key does not map to the table subpartition +DETAIL: N/A. +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 40 +(1 row) + +drop table range_list; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int, + PRIMARY KEY(month_code, dept_code) +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_list_pkey" for table "range_list" +insert into range_list partition (p_201901) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list partition (p_201901) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list partition (p_201902) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 10; +ERROR: inserted partition key does not map to the table partition +DETAIL: N/A. +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 5 +(1 row) + +insert into range_list subpartition (p_201901_a) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 10; +insert into range_list subpartition (p_201901_b) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 20; +ERROR: inserted subpartition key does not map to the table subpartition +DETAIL: N/A. +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 10 +(1 row) + +insert into range_list partition for ('201902') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 30; +insert into range_list partition for ('201903') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 40; +ERROR: inserted partition key does not map to the table partition +DETAIL: N/A. +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 30 +(1 row) + +insert into range_list subpartition for ('201902','1') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 40; +insert into range_list subpartition for ('201902','2') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 50; +ERROR: inserted subpartition key does not map to the table subpartition +DETAIL: N/A. +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 40 +(1 row) + +drop table range_list; +drop table test_range_pt; +ERROR: table "test_range_pt" does not exist +create table test_range_pt (a int, b int primary key, c int) +partition by range(a) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (maxvalue) +)ENABLE ROW MOVEMENT; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_range_pt_pkey" for table "test_range_pt" +insert into test_range_pt values (1,1),(2001,2001); +insert into test_range_pt partition (p1) values(1,2001) ON DUPLICATE KEY UPDATE a = 5; +ERROR: The update target partition of upsert is inconsistent with the specified partition in partition table. +DETAIL: N/A. +drop table test_range_pt; +drop table if exists tsource cascade; +NOTICE: table "tsource" does not exist, skipping +create table tsource(ld int ,sd int not null,code int primary key) +partition by range(ld) subpartition by range(sd) +( + partition ts1 values less than(16)( + subpartition ts11 values less than(16), + subpartition ts12 values less than(66) + ), + partition ts2 values less than(66)( + subpartition ts21 values less than(16), + subpartition ts22 values less than(66) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "tsource_pkey" for table "tsource" +insert into tsource values(10,1,1),(60,1,2); +insert into tsource partition (ts1) values(10,1,2) on duplicate key update sd=3; +ERROR: The update target partition of upsert is inconsistent with the specified partition in subpartition table. +DETAIL: N/A. +insert into tsource values(10,60,3); +insert into tsource subpartition (ts11) values(10,1,3) on duplicate key update sd=4; +ERROR: The update target subpartition of upsert is inconsistent with the specified subpartition in subpartition table. +DETAIL: N/A. +drop table if exists tsource cascade; +--Merge into +create table test_range_pt (a int, b int, c int) +partition by range(a) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (maxvalue) +)ENABLE ROW MOVEMENT; +insert into test_range_pt values(1, 1, 1); +insert into test_range_pt values(2001,1 ,1); +create table newtest_range_pt (a int, b int, c int) +partition by range(a) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (maxvalue) +)ENABLE ROW MOVEMENT; +insert into newtest_range_pt values(1,2,2); +insert into newtest_range_pt values(2,2,2); +insert into newtest_range_pt values(2001,2,2); +MERGE INTO test_range_pt p +USING newtest_range_pt np +ON p.a= np.a +WHEN MATCHED THEN + UPDATE SET b = np.b, c = np.c +WHEN NOT MATCHED THEN + INSERT VALUES (np.a, np.b, np.c); + +select * from test_range_pt; + a | b | c +------+---+--- + 1 | 2 | 2 + 2 | 2 | 2 + 2001 | 2 | 2 +(3 rows) + +select * from newtest_range_pt; + a | b | c +------+---+--- + 1 | 2 | 2 + 2 | 2 | 2 + 2001 | 2 | 2 +(3 rows) + +delete from test_range_pt; +delete from newtest_range_pt; +insert into test_range_pt values(1, 1, 1); +insert into test_range_pt values(2001,1 ,1); +insert into newtest_range_pt values(1,2,2); +insert into newtest_range_pt values(2,2,2); +insert into newtest_range_pt values(2001,2,2); +insert into newtest_range_pt values(2002,2,2); +MERGE INTO test_range_pt partition (p1) p +USING newtest_range_pt partition (p1) np +ON p.a= np.a +WHEN MATCHED THEN + UPDATE SET b = np.b, c = np.c +WHEN NOT MATCHED THEN + INSERT VALUES (np.a, np.b, np.c); +select * from test_range_pt; + a | b | c +------+---+--- + 1 | 2 | 2 + 2 | 2 | 2 + 2001 | 1 | 1 +(3 rows) + +select * from newtest_range_pt; + a | b | c +------+---+--- + 1 | 2 | 2 + 2 | 2 | 2 + 2001 | 2 | 2 + 2002 | 2 | 2 +(4 rows) + +delete from test_range_pt; +delete from newtest_range_pt; +insert into test_range_pt values(1, 1, 1); +insert into test_range_pt values(2001,1 ,1); +insert into newtest_range_pt values(1,2,2); +insert into newtest_range_pt values(2,2,2); +insert into newtest_range_pt values(2001,2,2); +insert into newtest_range_pt values(2002,2,2); +MERGE INTO test_range_pt partition for (1) p +USING newtest_range_pt partition for (1) np +ON p.a= np.a +WHEN MATCHED THEN + UPDATE SET b = np.b, c = np.c +WHEN NOT MATCHED THEN + INSERT VALUES (np.a, np.b, np.c); +select * from test_range_pt; + a | b | c +------+---+--- + 1 | 2 | 2 + 2 | 2 | 2 + 2001 | 1 | 1 +(3 rows) + +select * from newtest_range_pt; + a | b | c +------+---+--- + 1 | 2 | 2 + 2 | 2 | 2 + 2001 | 2 | 2 + 2002 | 2 | 2 +(4 rows) + +drop table test_range_pt; +drop table newtest_range_pt; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201901', '1', '1', 1); +insert into range_list values('201902', '2', '1', 2); +CREATE TABLE newrange_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into newrange_list values('201902', '1', '1', 1); +insert into newrange_list values('201903', '1', '1', 2); +MERGE INTO range_list p +USING newrange_list np +ON p.month_code= np.month_code +WHEN MATCHED THEN + UPDATE SET dept_code = np.dept_code, user_no = np.user_no, sales_amt = np.sales_amt +WHEN NOT MATCHED THEN + INSERT VALUES (np.month_code, np.dept_code, np.user_no, np.sales_amt); + +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201901 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 2 +(3 rows) + +select *from newrange_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 2 +(2 rows) + +delete from range_list; +delete from newrange_list; +insert into range_list values('201901', '1', '1', 1); +insert into range_list values('201902', '2', '1', 2); +insert into newrange_list values('201902', '1', '1', 1); +insert into newrange_list values('201903', '1', '1', 2); +MERGE INTO range_list partition (p_201901) p +USING newrange_list partition (p_201901) np +ON p.month_code= np.month_code +WHEN MATCHED THEN + UPDATE SET dept_code = np.dept_code, user_no = np.user_no, sales_amt = np.sales_amt +WHEN NOT MATCHED THEN + INSERT VALUES (np.month_code, np.dept_code, np.user_no, np.sales_amt); +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201901 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +select *from newrange_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 2 +(2 rows) + +delete from range_list; +delete from newrange_list; +insert into range_list values('201901', '1', '1', 1); +insert into range_list values('201902', '2', '1', 2); +insert into newrange_list values('201902', '1', '1', 1); +insert into newrange_list values('201903', '1', '1', 2); +MERGE INTO range_list partition for ('201901') p +USING newrange_list partition for ('201901') np +ON p.month_code= np.month_code +WHEN MATCHED THEN + UPDATE SET dept_code = np.dept_code, user_no = np.user_no, sales_amt = np.sales_amt +WHEN NOT MATCHED THEN + INSERT VALUES (np.month_code, np.dept_code, np.user_no, np.sales_amt); +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201901 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +select *from newrange_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 2 +(2 rows) + +delete from range_list; +delete from newrange_list; +insert into range_list values('201901', '1', '1', 1); +insert into range_list values('201902', '2', '1', 2); +insert into newrange_list values('201902', '1', '1', 1); +insert into newrange_list values('201903', '1', '1', 2); +MERGE INTO range_list subpartition (p_201901_a) p +USING newrange_list subpartition (p_201901_a) np +ON p.month_code= np.month_code +WHEN MATCHED THEN + UPDATE SET dept_code = np.dept_code, user_no = np.user_no, sales_amt = np.sales_amt +WHEN NOT MATCHED THEN + INSERT VALUES (np.month_code, np.dept_code, np.user_no, np.sales_amt); + +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 2 + 201901 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(3 rows) + +select *from newrange_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 2 +(2 rows) + +delete from range_list; +delete from newrange_list; +insert into range_list values('201901', '1', '1', 1); +insert into range_list values('201902', '2', '1', 2); +insert into newrange_list values('201902', '1', '1', 1); +insert into newrange_list values('201903', '1', '1', 2); +MERGE INTO range_list subpartition for ('201901', '1') p +USING newrange_list subpartition for ('201901', '1') np +ON p.month_code= np.month_code +WHEN MATCHED THEN + UPDATE SET dept_code = np.dept_code, user_no = np.user_no, sales_amt = np.sales_amt +WHEN NOT MATCHED THEN + INSERT VALUES (np.month_code, np.dept_code, np.user_no, np.sales_amt); + +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 2 + 201901 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(3 rows) + +select *from newrange_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 2 +(2 rows) + +delete from range_list; +delete from newrange_list; +insert into range_list values('201901', '1', '1', 1); +insert into range_list values('201902', '2', '1', 2); +insert into newrange_list values('201902', '1', '1', 1); +insert into newrange_list values('201903', '1', '1', 2); +MERGE INTO range_list p +USING newrange_list np +ON p.month_code= np.month_code +WHEN MATCHED THEN + UPDATE SET dept_code = '3', user_no = np.user_no, sales_amt = np.sales_amt +WHEN NOT MATCHED THEN + INSERT VALUES (np.month_code, np.dept_code, np.user_no, np.sales_amt); +ERROR: fail to update partitioned table "range_list" +DETAIL: new tuple does not map to any table partition + +select *from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 2 + 201901 | 1 | 1 | 1 +(2 rows) + +select *from newrange_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 2 +(2 rows) + +drop table range_list; +drop table newrange_list; +create table lm_list_range (id int,sd int,name varchar2) +partition by list(id) subpartition by range(sd)( + partition ts1 values(1,2,3,4,5) + (subpartition ts11 values less than(5),subpartition ts12 values less than(10),subpartition ts13 values less than(20)), + partition ts2 values(6,7,8,9,10), + partition ts3 values(11,12,13,14,15) + (subpartition ts31 values less than(5),subpartition ts32 values less than(10),subpartition ts33 values less than(20))); +select * from lm_list_range partition for(5,34); +ERROR: number of partitionkey values is not equal to the number of partitioning columns +DETAIL: N/A. +drop table lm_list_range; diff --git a/src/test/regress/expected/pg_job.out b/src/test/regress/expected/pg_job.out new file mode 100644 index 000000000..a1b78c438 --- /dev/null +++ b/src/test/regress/expected/pg_job.out @@ -0,0 +1,45 @@ +create database pl_test_job DBCOMPATIBILITY 'pg'; +\c pl_test_job; +CREATE TABLE pg_job_test_1(COL1 INT); +CREATE OR REPLACE PROCEDURE pg_job_test() +AS +aaa int; +BEGIN + FOR i IN 0..20 LOOP + INSERT INTO pg_job_test_1(COL1) VALUES (i); + IF i % 2 = 0 THEN + COMMIT; + ELSE + ROLLBACK; + END IF; + END LOOP; +END; +/ +select dbe_task.id_submit(103, 'call pg_job_test();', sysdate, 'sysdate+3.0/24'); + id_submit +----------- + +(1 row) + +select pg_sleep(5); + pg_sleep +---------- + +(1 row) + +select count(*) from pg_job_test_1; + count +------- + 11 +(1 row) + +drop procedure pg_job_test; +drop table if exists pg_job_test_1; +call dbe_task.cancel(103); + cancel +-------- + +(1 row) + +\c regression; +drop database IF EXISTS pl_test_job; diff --git a/src/test/regress/expected/pl_debugger_client.out b/src/test/regress/expected/pl_debugger_client.out index d2e0c5c35..58be1f660 100755 --- a/src/test/regress/expected/pl_debugger_client.out +++ b/src/test/regress/expected/pl_debugger_client.out @@ -1119,6 +1119,19 @@ DETAIL: execute func not attached before execute dbe_pldebugger.backtrace select funcname, lineno, query from dbe_pldebugger.continue(); ERROR: must attach a execute func before execute dbe_pldebugger.continue DETAIL: execute func not attached before execute dbe_pldebugger.continue +-- test_empty +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +select dbe_pldebugger.attach(nodename, port) from debug_info; +--?.* +--?.* +--? (.*,test_empty,0,"[EXECUTION FINISHED]") +(1 row) + -- test set_var select pg_sleep(1); pg_sleep @@ -1423,18 +1436,18 @@ select * from dbe_pldebugger.info_locals(); varname | vartype | value | package_name | isconst -------------+-----------+--------------------+--------------+--------- b | int4 | 2 | z_pk | f - p1 | int4 | 1 | z_pk | f - p2 | int4 | 2 | z_pk | f - p3 | varchar | + | z_pk | f - aa.c1 | varchar | | z_pk | f - aa.c2 | int4 | 0 | z_pk | f - aa | Record | [ (null), (null),] | z_pk | f + p1 | int4 | 1 | | f + p2 | int4 | 2 | | f + p3 | varchar | + | | f + aa.c1 | varchar | | | f + aa.c2 | int4 | 0 | | f + aa | Record | [ (null), (null),] | | f --?.* --?.* - bb | Row | [ (null), (null),] | z_pk | f + bb | Row | [ (null), (null),] | | f --?.* --?.* - p4 | int4 | 0 | z_pk | f + p4 | int4 | 0 | | f a | int4 | 10 | z_pk2 | f (14 rows) @@ -1470,18 +1483,18 @@ SELECT * FROM DBE_PLDEBUGGER.info_locals(); varname | vartype | value | package_name | isconst -------------+-----------+------------+--------------+--------- b | int4 | 2 | z_pk | f - p1 | int4 | 1 | z_pk | f - p2 | int4 | 2 | z_pk | f - p3 | varchar | + | z_pk | f - aa.c1 | varchar | aa | z_pk | f - aa.c2 | int4 | 2 | z_pk | f - aa | Record | [ aa, 2,] | z_pk | f + p1 | int4 | 1 | | f + p2 | int4 | 2 | | f + p3 | varchar | + | | f + aa.c1 | varchar | aa | | f + aa.c2 | int4 | 2 | | f + aa | Record | [ aa, 2,] | | f --?.* --?.* - bb | Row | [ bb, 2,] | z_pk | f + bb | Row | [ bb, 2,] | | f --?.* --?.* - p4 | int4 | 0 | z_pk | f + p4 | int4 | 0 | | f a | int4 | 10 | z_pk2 | f (14 rows) diff --git a/src/test/regress/expected/pl_debugger_server.out b/src/test/regress/expected/pl_debugger_server.out index 15bec7a73..33dd529e0 100755 --- a/src/test/regress/expected/pl_debugger_server.out +++ b/src/test/regress/expected/pl_debugger_server.out @@ -456,6 +456,40 @@ select * from test_debug_recursive (1, 1); 5 | 120 (5 rows) +--test empty procedure +CREATE OR REPLACE PROCEDURE test_empty(i int,j out int) +AS +DECLARE +begin + +end; +/ +truncate debug_info; +select * from turn_on_debugger('test_empty'); + turn_on_debugger +------------------ + 0 +(1 row) + +select * from dbe_pldebugger.local_debug_server_info(); +--?.* +--?.* +--? datanode1 | .* | .* +--? datanode1 | .* | .* +(2 rows) + +select * from debug_info; +--?.* +--?.* +--? datanode1 | .* +(1 row) + +call test_empty(1, ''); + j +--- + +(1 row) + -- test set_var CREATE OR REPLACE PROCEDURE test_setvar(x int) AS DECLARE @@ -493,7 +527,8 @@ select * from dbe_pldebugger.local_debug_server_info(); --?.* --? datanode1 | .* | .* --? datanode1 | .* | .* -(2 rows) +--? datanode1 | .* | .* +(3 rows) select * from debug_info; --?.* @@ -605,7 +640,8 @@ select * from dbe_pldebugger.local_debug_server_info(); --? datanode1 | .* | .* --? datanode1 | .* | .* --? datanode1 | .* | .* -(3 rows) +--? datanode1 | .* | .* +(4 rows) select * from debug_info; --?.* @@ -620,7 +656,7 @@ select z_pk.pro1(1,2,'+'); (1 row) drop schema pl_debugger cascade; -NOTICE: drop cascades to 17 other objects +NOTICE: drop cascades to 18 other objects DETAIL: drop cascades to table test drop cascades to function test_debug(integer) drop cascades to table show_code_table @@ -633,6 +669,7 @@ drop cascades to function test_debug2() drop cascades to function test_debug3(integer) drop cascades to function test_debug4(integer) drop cascades to function test_debug_recursive(integer,integer) +drop cascades to function test_empty(integer) drop cascades to function test_setvar(integer) --?drop cascades to package .* --?drop cascades to package .* diff --git a/src/test/regress/expected/pldeveloper_gs_source.out b/src/test/regress/expected/pldeveloper_gs_source.out new file mode 100644 index 000000000..bfc18b10e --- /dev/null +++ b/src/test/regress/expected/pldeveloper_gs_source.out @@ -0,0 +1,1117 @@ +create role gs_developper password 'Dev@9999'sysadmin; +set role gs_developper password 'Dev@9999'; +create schema gs_source; +set current_schema = gs_source; +set plsql_show_all_error = on; +truncate DBE_PLDEVELOPER.gs_source; +truncate DBE_PLDEVELOPER.gs_errors; +-- +-- basic cases +-- +-- function normal +CREATE OR REPLACE PROCEDURE gourav88(a inout int, b out int, c out int, d in int, e inout int) +PACKAGE +AS DECLARE V1 INT; +param1 INT; +param2 INT; +BEGIN +param1 := 10; +param2 := 20; +V1 := param1 + param2; +a:=10; +b:=100; +c:=1000; +create table if not exists gourav.gourav888 (a integer, b integer, c integer); +insert into gourav.gourav888 values(a,b,c); +END; +/ +create or replace function func1() returns boolean as $$ declare +sql_temp text; +begin + sql_temp := 'create table test(a int);'; + execute immediate sql_temp; + return true; +end; +$$ language plpgsql; +-- function fail +create or replace function func2() returns boolean as $$ declare +sql_temp text; +begin + sql_temp1 := 'create table test(a int);'; + execute immediate sql_temp; + return true; +end; +$$ language plpgsql; +NOTICE: "sql_temp1" is not a known variable +LINE 4: sql_temp1 := 'create table test(a int);'; + ^ +NOTICE: syntax error at or near "sql_temp1" +LINE 4: sql_temp1 := 'create table test(a int);'; + ^ +ERROR: Debug mod,create procedure has error. +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL function "func2" near line 3 +-- procedure fail/procedure does not replace function with the same name +create or replace procedure func1 +is +begin +insert into fasd af asd asdf; +end; +/ +NOTICE: syntax error at or near "af" +LINE 3: insert into fasd af asd asdf; + ^ +QUERY: DECLARE +begin +insert into fasd af asd asdf; +end +ERROR: Debug mod,create procedure has error. +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL function "func1" near line 2 +-- package +CREATE OR REPLACE PACKAGE emp_bonus9 AS +da int; +PROCEDURE aa(gg int,kk varchar2); +PROCEDURE aa(kk int,gg int); +END emp_bonus9; +/ +-- package body with failure +CREATE OR REPLACE PACKAGE BODY emp_bonus9 AS +dd int; +PROCEDURE aa(gg int,kk varchar2) +IS +BEGIN +insert int aa aa; +END; +PROCEDURE aa(kk int,gg int) +IS +BEGIN +insert into test1 values(77); +END; +END emp_bonus9; +/ +NOTICE: syntax error at or near "int" when compile function aa(integer,character varying) +LINE 3: insert int aa aa; + ^ +DETAIL: syntax error +QUERY: DECLARE +BEGIN +insert int aa aa; +END +ERROR: Debug mod,create procedure has error. +DETAIL: N/A +select rolname, name, status, type, src from DBE_PLDEVELOPER.gs_source s join pg_authid a on s.owner = a.oid order by name; + rolname | name | status | type | src +---------------+------------+--------+--------------+------------------------------------------------------------------------------------------------ + gs_developper | emp_bonus9 | t | package | CREATE OR REPLACE PACKAGE emp_bonus9 AS + + | | | | da int; + + | | | | PROCEDURE aa(gg int,kk varchar2); + + | | | | PROCEDURE aa(kk int,gg int); + + | | | | END emp_bonus9; + gs_developper | emp_bonus9 | f | package body | CREATE OR REPLACE PACKAGE BODY emp_bonus9 AS + + | | | | dd int; + + | | | | PROCEDURE aa(gg int,kk varchar2) + + | | | | IS + + | | | | BEGIN + + | | | | insert int aa aa; + + | | | | END; + + | | | | PROCEDURE aa(kk int,gg int) + + | | | | IS + + | | | | BEGIN + + | | | | insert into test1 values(77); + + | | | | END; + + | | | | END emp_bonus9; + gs_developper | func1 | t | function | create or replace function func1() returns boolean as $$ declare + + | | | | sql_temp text; + + | | | | begin + + | | | | sql_temp := 'create table test(a int);'; + + | | | | execute immediate sql_temp; + + | | | | return true; + + | | | | end; + + | | | | $$ language plpgsql; + gs_developper | func1 | f | procedure | create or replace procedure func1 + + | | | | is + + | | | | begin + + | | | | insert into fasd af asd asdf; + + | | | | end; + gs_developper | func2 | f | function | create or replace function func2() returns boolean as $$ declare + + | | | | sql_temp text; + + | | | | begin + + | | | | sql_temp1 := 'create table test(a int);'; + + | | | | execute immediate sql_temp; + + | | | | return true; + + | | | | end; + + | | | | $$ language plpgsql; + gs_developper | gourav88 | t | procedure | CREATE OR REPLACE PROCEDURE gourav88(a inout int, b out int, c out int, d in int, e inout int)+ + | | | | PACKAGE + + | | | | AS DECLARE V1 INT; + + | | | | param1 INT; + + | | | | param2 INT; + + | | | | BEGIN + + | | | | param1 := 10; + + | | | | param2 := 20; + + | | | | V1 := param1 + param2; + + | | | | a:=10; + + | | | | b:=100; + + | | | | c:=1000; + + | | | | create table if not exists gourav.gourav888 (a integer, b integer, c integer); + + | | | | insert into gourav.gourav888 values(a,b,c); + + | | | | END; +(6 rows) + +-- check if id is valid +select distinct p.pkgname, s.name from gs_package p, DBE_PLDEVELOPER.gs_source s where s.id = p.oid order by name; + pkgname | name +------------+------------ + emp_bonus9 | emp_bonus9 +(1 row) + +select distinct p.proname, s.name from pg_proc p, DBE_PLDEVELOPER.gs_source s where s.id = p.oid order by name; + proname | name +----------+---------- + func1 | func1 + gourav88 | gourav88 +(2 rows) + +select s.name, count(*) from DBE_PLDEVELOPER.gs_errors E, DBE_PLDEVELOPER.gs_source s where s.id = e.id group by s.name order by name; + name | count +-------+------- + func1 | 2 + func2 | 2 +(2 rows) + +truncate DBE_PLDEVELOPER.gs_source; +-- +-- extended cases +-- +-- sensitive information masking +create or replace procedure mask +is +begin +create role phil password 'Phil@123'; +end; +/ +-- change of owner +create role jackson_src password 'Jackson#456' sysadmin; +set role jackson_src password 'Jackson#456'; +create or replace procedure func1 +is +begin +insert into fasd af asd asdf; +end; +/ +NOTICE: syntax error at or near "af" +LINE 3: insert into fasd af asd asdf; + ^ +QUERY: DECLARE +begin +insert into fasd af asd asdf; +end +ERROR: Debug mod,create procedure has error. +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL function "func1" near line 2 +set role gs_developper password 'Dev@9999'; +set behavior_compat_options='allow_procedure_compile_check'; +-- [no log] trigger func +CREATE TABLE table_stats ( + table_name text primary key, + num_insert_query int DEFAULT 0); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "table_stats_pkey" for table "table_stats" +CREATE FUNCTION count_insert_query() RETURNS TRIGGER AS $_$ +BEGIN + UPDATE table_stats SET num_insert_query = num_insert_query + 1 WHERE table_name = TG_TABLE_NAME; + RETURN NEW; +END $_$ LANGUAGE 'plpgsql'; +-- [no log] sql function +create function func0(integer,integer) RETURNS integer +AS 'SELECT $1 + $2;' +LANGUAGE SQL +IMMUTABLE SHIPPABLE +RETURNS NULL ON NULL INPUT; +-- [no log] duplicate function definition without replace mark +create function func1() returns boolean as $$ declare +sql_temp text; +begin + sql_temp := 'create table test(a int);'; + execute immediate sql_temp; + return true; +end; +$$ language plpgsql; +ERROR: function "func1" already exists with same argument types +create or replace procedure p1(a varchar2(10)) +is +begin + CREATE ROW LEVEL SECURITY POLICY p02 ON document_row AS WHATEVER + USING (dlevel <= (SELECT aid FROM account_row WHERE aname = current_user)); + insert int asd asd; + insert into test1 values(1); + insert int asd asd; +end; +/ +NOTICE: unrecognized row security option "whatever" +LINE 3: ...TE ROW LEVEL SECURITY POLICY p02 ON document_row AS WHATEVER + ^ +HINT: Only PERMISSIVE or RESTRICTIVE policies are supported currently. +QUERY: DECLARE +begin + CREATE ROW LEVEL SECURITY POLICY p02 ON document_row AS WHATEVER + USING (dlevel <= (SELECT aid FROM account_row WHERE aname = current_user)); + insert int asd asd; + insert into test1 values(1); + insert int asd asd; +end +ERROR: relation "account_row" does not exist +LINE 3: CREATE ROW LEVEL SECURITY POLICY p02 ON document_row AS ... + ^ +DETAIL: +QUERY: DECLARE +begin + CREATE ROW LEVEL SECURITY POLICY p02 ON document_row AS WHATEVER + USING (dlevel <= (SELECT aid FROM account_row WHERE aname = current_user)); + insert int asd asd; + insert into test1 values(1); + insert int asd asd; +end +select name,type,line,src from DBE_PLDEVELOPER.gs_errors order by name; + name | type | line | src +------------+--------------+------+--------------------------------------- + emp_bonus9 | package body | 6 | syntax error + func1 | procedure | 4 | syntax error + func2 | function | 4 | it is not a known variable + func2 | function | 4 | syntax error + p1 | procedure | 5 | unrecognized row security option + p1 | procedure | 7 | relation "account_row" does not exist +(6 rows) + +create or replace package pkg1 +is +procedure proc1(c ff%F); +end pkg1; +/ +NOTICE: syntax error at or near "F" +LINE 3: procedure proc1(c ff%F); + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +ERROR: Debug mod,create procedure has error. +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL package near line 1 +select name,type,line,src from DBE_PLDEVELOPER.gs_errors order by name; + name | type | line | src +------------+--------------+------+--------------------------------------- + emp_bonus9 | package body | 6 | syntax error + func1 | procedure | 4 | syntax error + func2 | function | 4 | syntax error + func2 | function | 4 | it is not a known variable + p1 | procedure | 5 | unrecognized row security option + p1 | procedure | 7 | relation "account_row" does not exist + pkg1 | package | 3 | syntax error +(7 rows) + +create or replace package pkg2 +is +a inv%d; +procedure proc1(c ff%F); +end pkg2; +/ +ERROR: invalid type name "inv%d" +LINE 2: a inv%d; + ^ +QUERY: PACKAGE DECLARE +a inv%d; +procedure proc1(c ff%F); +end +CONTEXT: compilation of PL/pgSQL package near line 2 +create or replace package pkg3 +is +a int; +end pkg3; +/ +create or replace package body pkg3 +is +a b c d; +procedure proc1() +is +begin +insert int asd asd; +end; +end pkg3; +/ +ERROR: Invalid use of identifiers. +LINE 2: a b c d; + ^ +DETAIL: Syntax error found near token "c" +QUERY: PACKAGE DECLARE +a b c d; +procedure proc1() +is +begin +insert int asd asd; +end; +end +CONTEXT: invalid type name "b c d" +compilation of PL/pgSQL package near line 2 +select name,type,line,src from DBE_PLDEVELOPER.gs_errors; + name | type | line | src +------------+--------------+------+--------------------------------------- + func2 | function | 4 | it is not a known variable + func2 | function | 4 | syntax error + emp_bonus9 | package body | 6 | syntax error + func1 | procedure | 4 | syntax error + p1 | procedure | 5 | unrecognized row security option + p1 | procedure | 7 | relation "account_row" does not exist + pkg1 | package | 3 | syntax error + pkg2 | package | 3 | invalid type name +(8 rows) + +create or replace procedure pro70 is +begin +savepoint save_a; +commit; +savepoint save_a; +end; +/ +create table t1 (id int); +create or replace procedure pro71 is +cursor c1 for select pro70() from t1; +val int; +begin +end; +/ +ERROR: column "pro70" has pseudo-type void +CONTEXT: compilation of PL/pgSQL function "pro71" near line 2 +create or replace function bulk_f_039_1() returns int[] +LANGUAGE plpgsql AS +$$ +declare +var1 int[]; +CURSOR C1 IS select id,id from t1 order by 1 desc; +begin +return var1; +end; +$$; +create or replace procedure pro25 +as +type tpc1 is ref cursor; +--v_cur tpc1; +begin +open v_cur for select c1,c2 from tab1; +end; +/ +NOTICE: "v_cur" is not a known variable +LINE 5: open v_cur for select c1,c2 from tab1; + ^ +QUERY: DECLARE +type tpc1 is ref cursor; +--v_cur tpc1; +begin +open v_cur for select c1,c2 from tab1; +end +ERROR: open cursor error +LINE 5: open v_cur for select c1,c2 from tab1; + ^ +QUERY: DECLARE +type tpc1 is ref cursor; +--v_cur tpc1; +begin +open v_cur for select c1,c2 from tab1; +end +CREATE OR REPLACE PACKAGE error2 IS +a int;b int; +FUNCTION func1(a in int, b inout int, c out int) return int; +FUNCTION func2(a in int, b inout int, c out int) return int; +END error2; +/ +CREATE OR REPLACE PACKAGE BODY error2 IS +FUNCTION func1 (a in int, b inout int c out int) return int +IS +a1 NUMBER; +BEGIN +a1 :=10; +RETURN(a1 + a + b); +END; +aaa; +FUNCTION func2 (a in int, b inout int c out int) return int +IS +a1 NUMBER; +BEGIN +a1 :=10; +RETURN(a1 + a + b); +END; +END error2; +/ +NOTICE: syntax error at or near "c" +LINE 2: FUNCTION func1 (a in int, b inout int c out int) return int + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +ERROR: missing data type declaration +LINE 9: aaa; + ^ +QUERY: PACKAGE DECLARE +FUNCTION func1 (a in int, b inout int c out int) return int +IS +a1 NUMBER; +BEGIN +a1 :=10; +RETURN(a1 + a + b); +END; +aaa; +FUNCTION func2 (a in int, b inout int c out int) return int +IS +a1 NUMBER; +BEGIN +a1 :=10; +RETURN(a1 + a + b); +END; +END +CONTEXT: compilation of PL/pgSQL package near line 9 +select name,type,line,src from DBE_PLDEVELOPER.gs_errors order by name; + name | type | line | src +------------+--------------+------+--------------------------------------- + emp_bonus9 | package body | 6 | syntax error + error2 | package body | 9 | missing data type declaration + error2 | package body | 2 | syntax error + func1 | procedure | 4 | syntax error + func2 | function | 4 | it is not a known variable + func2 | function | 4 | syntax error + p1 | procedure | 7 | relation "account_row" does not exist + p1 | procedure | 5 | unrecognized row security option + pkg1 | package | 3 | syntax error + pkg2 | package | 3 | invalid type name + pro25 | procedure | 6 | it is not a known variable + pro71 | procedure | 2 | column "pro70" has pseudo-type void +(12 rows) + +-- [no log] guc off +set plsql_show_all_error = off; +create or replace procedure func00 +is +begin +create role yyy password 'Gauss@123'; +end; +/ +set plsql_show_all_error=on; +create or replace procedure proc4 +is +begin +insert int a; +end; +/ +NOTICE: syntax error at or near "int" +LINE 3: insert int a; + ^ +QUERY: DECLARE +begin +insert int a; +end +ERROR: Debug mod,create procedure has error. +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL function "proc4" near line 2 +create or replace package pkg4 +is +a a; +end pkg4; +/ +ERROR: type "a" does not exist +LINE 2: a a; + ^ +QUERY: PACKAGE DECLARE +a a; +end +CONTEXT: compilation of PL/pgSQL package near line 2 +select name,type,src,line from DBE_PLDEVELOPER.gs_errors order by name; + name | type | src | line +------------+--------------+---------------------------------------+------ + emp_bonus9 | package body | syntax error | 6 + error2 | package body | syntax error | 2 + error2 | package body | missing data type declaration | 9 + func1 | procedure | syntax error | 4 + func2 | function | it is not a known variable | 4 + func2 | function | syntax error | 4 + p1 | procedure | unrecognized row security option | 5 + p1 | procedure | relation "account_row" does not exist | 7 + pkg1 | package | syntax error | 3 + pkg2 | package | invalid type name | 3 + pkg4 | package | type does not exist | 3 + pro25 | procedure | it is not a known variable | 6 + pro71 | procedure | column "pro70" has pseudo-type void | 2 + proc4 | procedure | syntax error | 4 +(14 rows) + +select name,type,status,src from DBE_PLDEVELOPER.gs_source order by name; + name | type | status | src +--------------+--------------+--------+--------------------------------------------------------------------------------- + bulk_f_039_1 | function | t | create or replace function bulk_f_039_1() returns int[] + + | | | LANGUAGE plpgsql AS + + | | | $$ + + | | | declare + + | | | var1 int[]; + + | | | CURSOR C1 IS select id,id from t1 order by 1 desc; + + | | | begin + + | | | return var1; + + | | | end; + + | | | $$; + error2 | package | t | CREATE OR REPLACE PACKAGE error2 IS + + | | | a int;b int; + + | | | FUNCTION func1(a in int, b inout int, c out int) return int; + + | | | FUNCTION func2(a in int, b inout int, c out int) return int; + + | | | END error2; + error2 | package body | f | CREATE OR REPLACE PACKAGE BODY error2 IS + + | | | FUNCTION func1 (a in int, b inout int c out int) return int + + | | | IS + + | | | a1 NUMBER; + + | | | BEGIN + + | | | a1 :=10; + + | | | RETURN(a1 + a + b); + + | | | END; + + | | | aaa; + + | | | FUNCTION func2 (a in int, b inout int c out int) return int + + | | | IS + + | | | a1 NUMBER; + + | | | BEGIN + + | | | a1 :=10; + + | | | RETURN(a1 + a + b); + + | | | END; + + | | | END error2; + func00 | procedure | t | create or replace procedure func00 + + | | | is + + | | | begin + + | | | create role yyy password '********'; + + | | | end; + func1 | procedure | f | create or replace procedure func1 + + | | | is + + | | | begin + + | | | insert into fasd af asd asdf; + + | | | end; + mask | procedure | t | create or replace procedure mask + + | | | is + + | | | begin + + | | | create role phil password '********'; + + | | | end; + p1 | procedure | f | create or replace procedure p1(a varchar2(10)) + + | | | is + + | | | begin + + | | | CREATE ROW LEVEL SECURITY POLICY p02 ON document_row AS WHATEVER + + | | | USING (dlevel <= (SELECT aid FROM account_row WHERE aname = current_user));+ + | | | insert int asd asd; + + | | | insert into test1 values(1); + + | | | insert int asd asd; + + | | | end; + pkg1 | package | f | create or replace package pkg1 + + | | | is + + | | | procedure proc1(c ff%F); + + | | | end pkg1; + pkg2 | package | f | create or replace package pkg2 + + | | | is + + | | | a inv%d; + + | | | procedure proc1(c ff%F); + + | | | end pkg2; + pkg3 | package body | f | create or replace package body pkg3 + + | | | is + + | | | a b c d; + + | | | procedure proc1() + + | | | is + + | | | begin + + | | | insert int asd asd; + + | | | end; + + | | | end pkg3; + pkg3 | package | t | create or replace package pkg3 + + | | | is + + | | | a int; + + | | | end pkg3; + pkg4 | package | f | create or replace package pkg4 + + | | | is + + | | | a a; + + | | | end pkg4; + pro25 | procedure | f | create or replace procedure pro25 + + | | | as + + | | | type tpc1 is ref cursor; + + | | | --v_cur tpc1; + + | | | begin + + | | | open v_cur for select c1,c2 from tab1; + + | | | end; + pro70 | procedure | t | create or replace procedure pro70 is + + | | | begin + + | | | savepoint save_a; + + | | | commit; + + | | | savepoint save_a; + + | | | end; + pro71 | procedure | f | create or replace procedure pro71 is + + | | | cursor c1 for select pro70() from t1; + + | | | val int; + + | | | begin + + | | | end; + proc4 | procedure | f | create or replace procedure proc4 + + | | | is + + | | | begin + + | | | insert int a; + + | | | end; +(16 rows) + +set plsql_show_all_error=off; +create or replace procedure proc4 +is +b int; +c int; +begin +insert int a; +end; +/ +ERROR: syntax error at or near "int" +LINE 5: insert int a; + ^ +QUERY: DECLARE +b int; +c int; +begin +insert int a; +end +create or replace package pkg4 +is +a a; +end pkg4; +/ +ERROR: type "a" does not exist +LINE 2: a a; + ^ +QUERY: PACKAGE DECLARE +a a; +end +CONTEXT: compilation of PL/pgSQL package near line 2 +select name,type,status,src from DBE_PLDEVELOPER.gs_source order by name; + name | type | status | src +--------------+--------------+--------+--------------------------------------------------------------------------------- + bulk_f_039_1 | function | t | create or replace function bulk_f_039_1() returns int[] + + | | | LANGUAGE plpgsql AS + + | | | $$ + + | | | declare + + | | | var1 int[]; + + | | | CURSOR C1 IS select id,id from t1 order by 1 desc; + + | | | begin + + | | | return var1; + + | | | end; + + | | | $$; + error2 | package | t | CREATE OR REPLACE PACKAGE error2 IS + + | | | a int;b int; + + | | | FUNCTION func1(a in int, b inout int, c out int) return int; + + | | | FUNCTION func2(a in int, b inout int, c out int) return int; + + | | | END error2; + error2 | package body | f | CREATE OR REPLACE PACKAGE BODY error2 IS + + | | | FUNCTION func1 (a in int, b inout int c out int) return int + + | | | IS + + | | | a1 NUMBER; + + | | | BEGIN + + | | | a1 :=10; + + | | | RETURN(a1 + a + b); + + | | | END; + + | | | aaa; + + | | | FUNCTION func2 (a in int, b inout int c out int) return int + + | | | IS + + | | | a1 NUMBER; + + | | | BEGIN + + | | | a1 :=10; + + | | | RETURN(a1 + a + b); + + | | | END; + + | | | END error2; + func00 | procedure | t | create or replace procedure func00 + + | | | is + + | | | begin + + | | | create role yyy password '********'; + + | | | end; + func1 | procedure | f | create or replace procedure func1 + + | | | is + + | | | begin + + | | | insert into fasd af asd asdf; + + | | | end; + mask | procedure | t | create or replace procedure mask + + | | | is + + | | | begin + + | | | create role phil password '********'; + + | | | end; + p1 | procedure | f | create or replace procedure p1(a varchar2(10)) + + | | | is + + | | | begin + + | | | CREATE ROW LEVEL SECURITY POLICY p02 ON document_row AS WHATEVER + + | | | USING (dlevel <= (SELECT aid FROM account_row WHERE aname = current_user));+ + | | | insert int asd asd; + + | | | insert into test1 values(1); + + | | | insert int asd asd; + + | | | end; + pkg1 | package | f | create or replace package pkg1 + + | | | is + + | | | procedure proc1(c ff%F); + + | | | end pkg1; + pkg2 | package | f | create or replace package pkg2 + + | | | is + + | | | a inv%d; + + | | | procedure proc1(c ff%F); + + | | | end pkg2; + pkg3 | package body | f | create or replace package body pkg3 + + | | | is + + | | | a b c d; + + | | | procedure proc1() + + | | | is + + | | | begin + + | | | insert int asd asd; + + | | | end; + + | | | end pkg3; + pkg3 | package | t | create or replace package pkg3 + + | | | is + + | | | a int; + + | | | end pkg3; + pkg4 | package | f | create or replace package pkg4 + + | | | is + + | | | a a; + + | | | end pkg4; + pro25 | procedure | f | create or replace procedure pro25 + + | | | as + + | | | type tpc1 is ref cursor; + + | | | --v_cur tpc1; + + | | | begin + + | | | open v_cur for select c1,c2 from tab1; + + | | | end; + pro70 | procedure | t | create or replace procedure pro70 is + + | | | begin + + | | | savepoint save_a; + + | | | commit; + + | | | savepoint save_a; + + | | | end; + pro71 | procedure | f | create or replace procedure pro71 is + + | | | cursor c1 for select pro70() from t1; + + | | | val int; + + | | | begin + + | | | end; + proc4 | procedure | f | create or replace procedure proc4 + + | | | is + + | | | begin + + | | | insert int a; + + | | | end; +(16 rows) + +create or replace procedure proc5 +is +b int; +c int; +begin +insert int a; +end; +/ +ERROR: syntax error at or near "int" +LINE 5: insert int a; + ^ +QUERY: DECLARE +b int; +c int; +begin +insert int a; +end +create or replace package pkg5 +is +a a; +end pkg5; +/ +ERROR: type "a" does not exist +LINE 2: a a; + ^ +QUERY: PACKAGE DECLARE +a a; +end +CONTEXT: compilation of PL/pgSQL package near line 2 +create or replace package pack3 is +array_v1 pack1.array_type1; +procedure pp1(); +end pack3; +/ +ERROR: schema "pack1" does not exist +LINE 2: array_v1 pack1.array_type1; + ^ +QUERY: PACKAGE DECLARE +array_v1 pack1.array_type1; +procedure pp1(); +end +CONTEXT: compilation of PL/pgSQL package near line 2 +set behavior_compat_options='skip_insert_gs_source'; +create or replace procedure SkipInsertGsSource +is +begin +null; +end; +/ +set plsql_show_all_error to on; +select name,type,status,src from DBE_PLDEVELOPER.gs_source order by name; + name | type | status | src +--------------+--------------+--------+--------------------------------------------------------------------------------- + bulk_f_039_1 | function | t | create or replace function bulk_f_039_1() returns int[] + + | | | LANGUAGE plpgsql AS + + | | | $$ + + | | | declare + + | | | var1 int[]; + + | | | CURSOR C1 IS select id,id from t1 order by 1 desc; + + | | | begin + + | | | return var1; + + | | | end; + + | | | $$; + error2 | package | t | CREATE OR REPLACE PACKAGE error2 IS + + | | | a int;b int; + + | | | FUNCTION func1(a in int, b inout int, c out int) return int; + + | | | FUNCTION func2(a in int, b inout int, c out int) return int; + + | | | END error2; + error2 | package body | f | CREATE OR REPLACE PACKAGE BODY error2 IS + + | | | FUNCTION func1 (a in int, b inout int c out int) return int + + | | | IS + + | | | a1 NUMBER; + + | | | BEGIN + + | | | a1 :=10; + + | | | RETURN(a1 + a + b); + + | | | END; + + | | | aaa; + + | | | FUNCTION func2 (a in int, b inout int c out int) return int + + | | | IS + + | | | a1 NUMBER; + + | | | BEGIN + + | | | a1 :=10; + + | | | RETURN(a1 + a + b); + + | | | END; + + | | | END error2; + func00 | procedure | t | create or replace procedure func00 + + | | | is + + | | | begin + + | | | create role yyy password '********'; + + | | | end; + func1 | procedure | f | create or replace procedure func1 + + | | | is + + | | | begin + + | | | insert into fasd af asd asdf; + + | | | end; + mask | procedure | t | create or replace procedure mask + + | | | is + + | | | begin + + | | | create role phil password '********'; + + | | | end; + p1 | procedure | f | create or replace procedure p1(a varchar2(10)) + + | | | is + + | | | begin + + | | | CREATE ROW LEVEL SECURITY POLICY p02 ON document_row AS WHATEVER + + | | | USING (dlevel <= (SELECT aid FROM account_row WHERE aname = current_user));+ + | | | insert int asd asd; + + | | | insert into test1 values(1); + + | | | insert int asd asd; + + | | | end; + pkg1 | package | f | create or replace package pkg1 + + | | | is + + | | | procedure proc1(c ff%F); + + | | | end pkg1; + pkg2 | package | f | create or replace package pkg2 + + | | | is + + | | | a inv%d; + + | | | procedure proc1(c ff%F); + + | | | end pkg2; + pkg3 | package body | f | create or replace package body pkg3 + + | | | is + + | | | a b c d; + + | | | procedure proc1() + + | | | is + + | | | begin + + | | | insert int asd asd; + + | | | end; + + | | | end pkg3; + pkg3 | package | t | create or replace package pkg3 + + | | | is + + | | | a int; + + | | | end pkg3; + pkg4 | package | f | create or replace package pkg4 + + | | | is + + | | | a a; + + | | | end pkg4; + pro25 | procedure | f | create or replace procedure pro25 + + | | | as + + | | | type tpc1 is ref cursor; + + | | | --v_cur tpc1; + + | | | begin + + | | | open v_cur for select c1,c2 from tab1; + + | | | end; + pro70 | procedure | t | create or replace procedure pro70 is + + | | | begin + + | | | savepoint save_a; + + | | | commit; + + | | | savepoint save_a; + + | | | end; + pro71 | procedure | f | create or replace procedure pro71 is + + | | | cursor c1 for select pro70() from t1; + + | | | val int; + + | | | begin + + | | | end; + proc4 | procedure | f | create or replace procedure proc4 + + | | | is + + | | | begin + + | | | insert int a; + + | | | end; +(16 rows) + +select name,type,line,src from DBE_PLDEVELOPER.gs_errors order by name; + name | type | line | src +------------+--------------+------+--------------------------------------- + emp_bonus9 | package body | 6 | syntax error + error2 | package body | 2 | syntax error + error2 | package body | 9 | missing data type declaration + func1 | procedure | 4 | syntax error + func2 | function | 4 | it is not a known variable + func2 | function | 4 | syntax error + p1 | procedure | 5 | unrecognized row security option + p1 | procedure | 7 | relation "account_row" does not exist + pkg1 | package | 3 | syntax error + pkg2 | package | 3 | invalid type name + pkg4 | package | 3 | type does not exist + pro25 | procedure | 6 | it is not a known variable + pro71 | procedure | 2 | column "pro70" has pseudo-type void + proc4 | procedure | 4 | syntax error +(14 rows) + +drop package if exists pkg4; +NOTICE: package pkg4() does not exist, skipping +drop package if exists pkg5; +NOTICE: package pkg5() does not exist, skipping +drop function if exists proc4; +NOTICE: function proc4() does not exist, skipping +drop function if exists proc5; +NOTICE: function proc5() does not exist, skipping +drop package pkg1; +ERROR: package pkg1 does not exist +drop package pkg2; +ERROR: package pkg2 does not exist +drop package pkg3; +drop package emp_bonus9; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function gs_source.aa(integer,character varying) +drop cascades to function gs_source.aa(integer,integer) +select rolname, name, status, type, src from DBE_PLDEVELOPER.gs_source s join pg_authid a on s.owner = a.oid order by name; + rolname | name | status | type | src +---------------+--------------+--------+--------------+--------------------------------------------------------------------------------- + gs_developper | bulk_f_039_1 | t | function | create or replace function bulk_f_039_1() returns int[] + + | | | | LANGUAGE plpgsql AS + + | | | | $$ + + | | | | declare + + | | | | var1 int[]; + + | | | | CURSOR C1 IS select id,id from t1 order by 1 desc; + + | | | | begin + + | | | | return var1; + + | | | | end; + + | | | | $$; + gs_developper | error2 | t | package | CREATE OR REPLACE PACKAGE error2 IS + + | | | | a int;b int; + + | | | | FUNCTION func1(a in int, b inout int, c out int) return int; + + | | | | FUNCTION func2(a in int, b inout int, c out int) return int; + + | | | | END error2; + gs_developper | error2 | f | package body | CREATE OR REPLACE PACKAGE BODY error2 IS + + | | | | FUNCTION func1 (a in int, b inout int c out int) return int + + | | | | IS + + | | | | a1 NUMBER; + + | | | | BEGIN + + | | | | a1 :=10; + + | | | | RETURN(a1 + a + b); + + | | | | END; + + | | | | aaa; + + | | | | FUNCTION func2 (a in int, b inout int c out int) return int + + | | | | IS + + | | | | a1 NUMBER; + + | | | | BEGIN + + | | | | a1 :=10; + + | | | | RETURN(a1 + a + b); + + | | | | END; + + | | | | END error2; + gs_developper | func00 | t | procedure | create or replace procedure func00 + + | | | | is + + | | | | begin + + | | | | create role yyy password '********'; + + | | | | end; + jackson_src | func1 | f | procedure | create or replace procedure func1 + + | | | | is + + | | | | begin + + | | | | insert into fasd af asd asdf; + + | | | | end; + gs_developper | mask | t | procedure | create or replace procedure mask + + | | | | is + + | | | | begin + + | | | | create role phil password '********'; + + | | | | end; + gs_developper | p1 | f | procedure | create or replace procedure p1(a varchar2(10)) + + | | | | is + + | | | | begin + + | | | | CREATE ROW LEVEL SECURITY POLICY p02 ON document_row AS WHATEVER + + | | | | USING (dlevel <= (SELECT aid FROM account_row WHERE aname = current_user));+ + | | | | insert int asd asd; + + | | | | insert into test1 values(1); + + | | | | insert int asd asd; + + | | | | end; + gs_developper | pkg1 | f | package | create or replace package pkg1 + + | | | | is + + | | | | procedure proc1(c ff%F); + + | | | | end pkg1; + gs_developper | pkg2 | f | package | create or replace package pkg2 + + | | | | is + + | | | | a inv%d; + + | | | | procedure proc1(c ff%F); + + | | | | end pkg2; + gs_developper | pkg3 | f | package body | create or replace package body pkg3 + + | | | | is + + | | | | a b c d; + + | | | | procedure proc1() + + | | | | is + + | | | | begin + + | | | | insert int asd asd; + + | | | | end; + + | | | | end pkg3; + gs_developper | pkg3 | t | package | create or replace package pkg3 + + | | | | is + + | | | | a int; + + | | | | end pkg3; + gs_developper | pkg4 | f | package | create or replace package pkg4 + + | | | | is + + | | | | a a; + + | | | | end pkg4; + gs_developper | pro25 | f | procedure | create or replace procedure pro25 + + | | | | as + + | | | | type tpc1 is ref cursor; + + | | | | --v_cur tpc1; + + | | | | begin + + | | | | open v_cur for select c1,c2 from tab1; + + | | | | end; + gs_developper | pro70 | t | procedure | create or replace procedure pro70 is + + | | | | begin + + | | | | savepoint save_a; + + | | | | commit; + + | | | | savepoint save_a; + + | | | | end; + gs_developper | pro71 | f | procedure | create or replace procedure pro71 is + + | | | | cursor c1 for select pro70() from t1; + + | | | | val int; + + | | | | begin + + | | | | end; + gs_developper | proc4 | f | procedure | create or replace procedure proc4 + + | | | | is + + | | | | begin + + | | | | insert int a; + + | | | | end; +(16 rows) + +truncate DBE_PLDEVELOPER.gs_source; +truncate DBE_PLDEVELOPER.gs_errors; +reset role; +reset behavior_compat_options; +drop schema gs_source cascade; +NOTICE: drop cascades to 14 other objects +DETAIL: drop cascades to function gourav88(integer,integer,integer) +drop cascades to function func1() +drop cascades to function mask() +drop cascades to table table_stats +drop cascades to function count_insert_query() +drop cascades to function func0(integer,integer) +drop cascades to function pro70() +drop cascades to table t1 +drop cascades to function bulk_f_039_1() +--?drop cascades to package .* +drop cascades to function gs_source.func1(integer,integer) +drop cascades to function gs_source.func2(integer,integer) +drop cascades to function func00() +drop cascades to function skipinsertgssource() +drop role jackson_src; +drop role gs_developper; diff --git a/src/test/regress/expected/pljson.out b/src/test/regress/expected/pljson.out new file mode 100644 index 000000000..bcacfc548 --- /dev/null +++ b/src/test/regress/expected/pljson.out @@ -0,0 +1,3893 @@ +CREATE SCHEMA DBE_PLJSON; +set current_schema=DBE_PLJSON; +/* not used now */ +create type pljson_element as +( + obj_type number +); +create type pljson_value as ( + /* 1 = object, 2 = array, 3 = string, 4 = number, 5 = bool, 6 = null */ + typeval number(1), + str varchar2(32767), + /* store 1 as true, 0 as false */ + num number, + num_double binary_double, + num_repr_number_p varchar2(1), + num_repr_double_p varchar2(1), + /* object or array in here */ + object_or_array pljson_element, + extended_str clob, + mapname varchar2(4000), + mapindx number(32) +); +create type pljson_list as ( + pljson_list_data pljson_value[] +); +create type pljson as ( + pljson_list_data pljson_value[], + check_for_duplicate number +); +alter type pljson_value add attribute arr pljson_list; +alter type pljson_value add attribute obj pljson; +create or replace package pljson_value as + + function gs_pljson_value() return pljson_value; + function gs_pljson_value(b boolean) return pljson_value; + function gs_pljson_value(str varchar2, esc boolean default true) return pljson_value; + function gs_pljson_value(str clob, esc boolean default true) return pljson_value; + function gs_pljson_value(num number) return pljson_value; + function gs_pljson_value(num_double binary_double) return pljson_value; + function gs_pljson_value(elem pljson_element) return pljson_value; + function gs_makenull() return pljson_value; + + function gs_pljson_value(arr pljson_list) return pljson_value; + function gs_pljson_value(obj pljson) return pljson_value; + + function gs_get_type(json_value pljson_value) return varchar2; + function gs_get_string(json_value pljson_value, max_byte_size number default null, max_char_size number default null) return varchar2; + procedure gs_get_string_clob(json_value pljson_value, buf inout clob); + function gs_get_clob(json_value pljson_value) return clob; + function gs_get_bool(json_value pljson_value) return boolean; + function gs_get_number(json_value pljson_value) return number; + function gs_get_double(json_value pljson_value) return binary_double; + function gs_get_element(json_value pljson_value) return pljson_element; + function gs_get_null(json_value pljson_value) return varchar2; + + function gs_is_string(json_value pljson_value) return boolean; + function gs_is_bool(json_value pljson_value) return boolean; + function gs_is_number(json_value pljson_value) return boolean; + function gs_is_number_repr_number(json_value pljson_value) return boolean; + function gs_is_number_repr_double(json_value pljson_value) return boolean; + function gs_is_object(json_value pljson_value) return boolean; + function gs_is_array(json_value pljson_value) return boolean; + function gs_is_null(json_value pljson_value) return boolean; + + function gs_value_of(json_value pljson_value, max_byte_size number default null, max_char_size number default null) return varchar2; + + procedure gs_parse_number(json_value inout pljson_value, str varchar2); + function gs_number_toString(json_value pljson_value) return varchar2; + function gs_to_char(json_value pljson_value, spaces boolean default true, chars_per_line number default 0) return varchar2; + procedure gs_to_clob(json_value pljson_value, buf inout clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true); + procedure gs_print(json_value pljson_value, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null); + procedure htp(json_value pljson_value, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null); + +end pljson_value; +/ +create or replace package pljson_list as + + function gs_pljson_list() return pljson_list; + function gs_pljson_list(str varchar2) return pljson_list; + function gs_pljson_list(str clob) return pljson_list; + function gs_pljson_list(str blob, charset varchar2 default 'UTF8') return pljson_list; + function gs_pljson_list(str_array varchar2[]) return pljson_list; + function gs_pljson_list(num_array number[]) return pljson_list; + function gs_pljson_list(elem pljson_value) return pljson_list; + + procedure gs_append(json_list inout pljson_list, elem pljson_value, _position integer default null); + procedure gs_append(json_list inout pljson_list, elem varchar2, _position integer default null); + procedure gs_append(json_list inout pljson_list, elem clob, _position integer default null); + procedure gs_append(json_list inout pljson_list, elem number, _position integer default null); + procedure gs_append(json_list inout pljson_list, elem binary_double, _position integer default null); + procedure gs_append(json_list inout pljson_list, elem boolean, _position integer default null); + procedure gs_append(json_list inout pljson_list, elem pljson_list, _position integer default null); + + procedure gs_remove(json_list inout pljson_list, _position integer); + procedure gs_remove_first(json_list inout pljson_list); + procedure gs_remove_last(json_list inout pljson_list); + + function gs_count(json_list pljson_list) return number; + function gs_get(json_list pljson_list, _position integer) return pljson_value; + function gs_get_string(json_list pljson_list, _position integer) return varchar2; + function gs_get_clob(json_list pljson_list, _position integer) return clob; + function gs_get_bool(json_list pljson_list, _position integer) return boolean; + function gs_get_number(json_list pljson_list, _position integer) return number; + function gs_get_double(json_list pljson_list, _position integer) return binary_double; + function gs_get_pljson_list(json_list pljson_list, _position integer) return pljson_list; + function gs_head(json_list pljson_list) return pljson_value; + function gs_last(json_list pljson_list) return pljson_value; + function gs_tail(json_list pljson_list) return pljson_list; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem pljson_value); + procedure gs_replace(json_list inout pljson_list, _position integer, elem varchar2); + procedure gs_replace(json_list inout pljson_list, _position integer, elem clob); + procedure gs_replace(json_list inout pljson_list, _position integer, elem number); + procedure gs_replace(json_list inout pljson_list, _position integer, elem binary_double); + procedure gs_replace(json_list inout pljson_list, _position integer, elem boolean); + procedure gs_replace(json_list inout pljson_list, _position integer, elem pljson_list); + + function gs_to_json_value(json_list pljson_list) return pljson_value; + + function gs_to_char(json_list pljson_list, spaces boolean default true, chars_per_line number default 0) return varchar2; + procedure gs_to_clob(json_list pljson_list, buf inout clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true); + procedure gs_print(json_list pljson_list, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null); + procedure htp(json_list pljson_list, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null); + + function gs_path(json_list pljson_list, json_path varchar2, base number default 1) return pljson_value; + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem pljson_value, base number default 1); + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem varchar2, base number default 1); + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem clob, base number default 1); + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem boolean, base number default 1); + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem number, base number default 1); + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem binary_double, base number default 1); + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem pljson_list, base number default 1); + + procedure gs_path_remove(json_list inout pljson_list, json_path varchar2, base number default 1); + +end pljson_list; +/ +create or replace package pljson as + + function gs_pljson() return pljson; + function gs_pljson(str varchar2) return pljson; + function gs_pljson(str clob) return pljson; + function gs_pljson(str blob, charset varchar2 default 'UTF8') return pljson; + function gs_pljson(str_array varchar2[]) return pljson; + function gs_pljson(elem pljson_value) return pljson; + function gs_pljson(l pljson_list) return pljson; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value pljson_value, _position integer default null); + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value varchar2, _position integer default null); + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value clob, _position integer default null); + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value number, _position integer default null); + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value binary_double, _position integer default null); + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value boolean, _position integer default null); + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value pljson, _position integer default null); + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value pljson_list, _position integer default null); + + procedure gs_remove(pj pljson, pair_name varchar2); + + function gs_count(pj pljson) return number; + function gs_get(pj pljson, pair_name varchar2) return pljson_value; + function gs_get_string(pj pljson, pair_name varchar2) return varchar2; + function gs_get_clob(pj pljson, pair_name varchar2) return clob; + function gs_get_bool(pj pljson, pair_name varchar2) return boolean; + function gs_get_number(pj pljson, pair_name varchar2) return number; + function gs_get_double(pj pljson, pair_name varchar2) return binary_double; + function gs_get_pljson(pj pljson, pair_name varchar2) return pljson; + function gs_get_pljson_list(pj pljson, pair_name varchar2) return pljson_list; + function gs_get(pj pljson, _position integer) return pljson_value; + + function gs_index_of(pj pljson, pair_name varchar2) return number; + function gs_exist(pj pljson, pair_name varchar2) return boolean; + function gs_to_json_value(pj pljson) return pljson_value; + procedure gs_check_duplicate(pj inout pljson, v_set boolean); + procedure gs_remove_duplicates(pj inout pljson); + + function gs_to_char(pj pljson, spaces boolean default true, chars_per_line number default 0) return varchar2; + procedure gs_to_clob(pj pljson, buf inout clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true); + procedure gs_print(pj pljson, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null); + procedure htp(pj pljson, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null); + + function gs_path(pj pljson, json_path varchar2, base number default 1) return pljson_value; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem pljson_value, base number default 1); + procedure gs_path_put(pj inout pljson, json_path varchar2, elem varchar2, base number default 1); + procedure gs_path_put(pj inout pljson, json_path varchar2, elem clob, base number default 1); + procedure gs_path_put(pj inout pljson, json_path varchar2, elem boolean, base number default 1); + procedure gs_path_put(pj inout pljson, json_path varchar2, elem number, base number default 1); + procedure gs_path_put(pj inout pljson, json_path varchar2, elem binary_double, base number default 1); + procedure gs_path_put(pj inout pljson, json_path varchar2, elem pljson, base number default 1); + procedure gs_path_put(pj inout pljson, json_path varchar2, elem pljson_list, base number default 1); + + procedure gs_path_remove(pj inout pljson, json_path varchar2, base number default 1); + + function gs_get_keys(pj pljson) return pljson_list; + function gs_get_values(pj pljson) return pljson_list; + +end pljson; +/ +create or replace package pljson_ext as + + function gs_parsePath(json_path varchar2, base number default 1) return pljson_list; + + --JSON Path getters + function gs_get_json_value(obj pljson, v_path varchar2, base number default 1) return pljson_value; + function gs_get_string(obj pljson, path varchar2, base number default 1) return varchar2; + function gs_get_bool(obj pljson, path varchar2, base number default 1) return boolean; + function gs_get_number(obj pljson, path varchar2, base number default 1) return number; + function gs_get_double(obj pljson, path varchar2, base number default 1) return binary_double; + function gs_get_json(obj pljson, path varchar2, base number default 1) return pljson; + function gs_get_json_list(obj pljson, path varchar2, base number default 1) return pljson_list; + + --JSON Path putters + procedure gs_put(obj inout pljson, path varchar2, elem pljson_value, base number default 1); + procedure gs_put(obj inout pljson, path varchar2, elem varchar2, base number default 1); + procedure gs_put(obj inout pljson, path varchar2, elem boolean, base number default 1); + procedure gs_put(obj inout pljson, path varchar2, elem number, base number default 1); + procedure gs_put(obj inout pljson, path varchar2, elem binary_double, base number default 1); + procedure gs_put(obj inout pljson, path varchar2, elem pljson, base number default 1); + procedure gs_put(obj inout pljson, path varchar2, elem pljson_list, base number default 1); + + procedure gs_remove(obj inout pljson, path varchar2, base number default 1); + + --Pretty print with JSON Path + --function pp(obj pljson, v_path varchar2) return varchar2; + procedure gs_pp(obj pljson, v_path varchar2); + procedure pp_htp(obj pljson, v_path varchar2); + + -- date function + format_string varchar2(30) := 'yyyy-mm-dd hh24:mi:ss'; + function gs_is_integer(v pljson_value) return boolean; + function gs_to_json_value(d date) return pljson_value; + function gs_is_date(v pljson_value) return boolean; + function gs_to_date(v pljson_value) return date; + function gs_to_date2(v pljson_value) return date; + function gs_get_date(obj pljson, path varchar2, base number default 1) return date; + procedure gs_put(obj inout pljson, path varchar2, elem date, base number default 1); + + function gs_encodeBase64Blob2Clob(p_blob blob) return clob; + function gs_decodeBase64Clob2Blob(p_clob clob) return blob; + + function gs_base64(binarydata blob) return pljson_list; + function gs_base64(l pljson_list) return blob; + + function gs_encode(binarydata blob) return pljson_value; + function gs_decode(v pljson_value) return blob; + + procedure gs_blob2clob(b blob, c out clob, charset varchar2 default 'UTF8'); + +end pljson_ext; +/ +create type rToken as ( + type_name varchar2(7), + line integer, + col integer, + data varchar2(32767), + data_overflow clob +); +create type json_src as ( + len number, _offset number, offset_chars number, src varchar2(32767), s_clob clob +); +create or replace package pljson_parser as +/* +create type rToken as ( + type_name varchar2(7), + line integer, + col integer, + data varchar2(32767), + data_overflow clob +); + +create type json_src as ( + len number, _offset number, offset_chars number, src varchar2(32767), s_clob clob +); +*/ + +/* + type rToken is record ( + type_name varchar2(7), + line integer, + col integer, + data varchar2(32767), + data_overflow clob); + type rToken[] is table of rToken index by integer; + type json_src is record (len number, _offset number, offset_chars number, src varchar2(32767), s_clob clob); +*/ + + json_strict boolean not null := false; + + -- private + -- function gs_lengthcc(buf clob) return number; + -- function gs_prepareVarchar2(buf varchar2) return json_src; + -- function gs_prepareClob(buf clob) return json_src; + -- function gs_next_char(indx number, s inout json_src) return varchar2; + -- function gs_next_char2(indx number, s inout json_src, amount number default 1) return varchar2; + -- function gs_lexer(jsrc inout json_src) return rToken[]; + -- function gs_parseObj(tokens rToken[], indx inout integer) return pljson; + -- procedure print_token(t rToken); + + -- public + function gs_parser(str varchar2) return pljson; + function gs_parse_list(str varchar2) return pljson_list; + function gs_parse_any(str varchar2) return pljson_value; + function gs_parser(str clob) return pljson; + function gs_parse_list(str clob) return pljson_list; + function gs_parse_any(str clob) return pljson_value; + + procedure gs_remove_duplicates(obj inout pljson); + function gs_get_version() return varchar2; + +end pljson_parser; +/ +create or replace package body pljson_parser as + + decimalpoint varchar2(1) := '.'; + + procedure s_error(text varchar2, line number, col number) as + begin + raise exception 'JSON Scanner exception'; + end; + + procedure s_error(text varchar2, tok rToken) as + begin + raise exception 'JSON Scanner exception'; + end; + + procedure p_error(text varchar2, tok rToken) as + begin + raise exception 'JSON Parser exception'; + end; + + -- make token + function mt(t varchar2, l integer, c integer, d varchar2) return rToken as + token rToken; + begin + token.type_name := t; + token.line := l; + token.col := c; + token.data := d; + return token; + end; + + procedure print_token(t rToken) as + begin + dbe_output.print_line('Line: '||t.line||' - Column: '||t.col||' - Type: '||t.type_name||' - Content: '||t.data); + end; + + function gs_lengthcc(buf clob) return number as + _offset number := 0; + len number := 0; + src varchar2(32767); + src_len number; + begin + while true loop + -- begin + src := dbe_lob.substr(buf, 4000, _offset+1); + -- exception + -- when ucs2_exception then + -- src := dbe_lob.substr(buf, 3999, offset+1); + -- end; + exit when src is null; + len := len + length(src); + _offset := _offset + length(src); --length2 + end loop; + return len; + end; + + -- procedure update_decimalpoint as + -- begin + -- select substr(value, 1, 1) + -- into decimalpoint + -- from nls_session_parameters + -- where parameter = 'NLS_NUMERIC_CHARACTERS'; + -- end; + function gs_prepareVarchar2(buf varchar2) return json_src as + temp json_src; + begin + temp.s_clob := buf; + temp.offset_chars := 0; + temp._offset := 0; + temp.src := substr(buf, 1, 4000); + temp.len := length(buf); + return temp; + end; + + function gs_prepareClob(buf clob) return json_src as + temp json_src; + begin + temp.s_clob := buf; + temp.offset_chars := 0; + temp._offset := 0; + temp.src := dbe_lob.substr(buf, 4000, temp._offset+1); + temp.len := gs_lengthcc(buf); --dbe_lob.get_length(buf); + return temp; + end; + + procedure gs_updateClob(v_extended inout clob, v_str varchar2) as + begin + dbe_lob.write_append(v_extended, length(v_str), v_str); + end; + + function gs_next_char(indx number, s inout json_src) return varchar2 as + begin + + if (indx > s.len) then + return null; + end if; + + if (indx > length(s.src) + s.offset_chars) then + while (indx > length(s.src) + s.offset_chars) loop + s.offset_chars := s.offset_chars + length(s.src); + s._offset := s._offset + length(s.src); -- length2 + -- begin exception + s.src := dbe_lob.substr(s.s_clob, 4000, s._offset+1); + end loop; + elsif (indx <= s.offset_chars) then + s.offset_chars := 0; + s._offset := 0; + -- begin exception (substr exception?) + s.src := dbe_lob.substr(s.s_clob, 4000, s.offset+1); + while (indx > length(s.src) + s.offset_chars) loop + s.offset_chars := s.offset_chars + length(s.src); + s._offset := s._offset + length(s.src); --length2 + s.src := dbe_lob.substr(s.s_clob, 4000, s.offset+1); + end loop; + end if; + + return substr(s.src, indx-s.offset_chars, 1); + end; + + function gs_next_char2(indx number, s inout json_src, amount number default 1) return varchar2 as + buf varchar2(32767) := ''; + begin + for i in 1..amount loop + buf := buf || gs_next_char(indx-1+i, s); + end loop; + return buf; + end; + + -- [a-zA-Z]([a-zA-Z0-9])* + procedure gs_lexName(jsrc inout json_src, tok inout rToken, indx inout integer) as + varbuf varchar2(32767) := ''; + buf varchar(4); + num number; + begin + buf := gs_next_char(indx, jsrc); + while (REGEXP_LIKE(buf, '^[[:alnum:]\_]$', 'i')) loop + varbuf := varbuf || buf; + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + if (buf is null) then + goto retname; + --debug('Premature string ending'); + end if; + end loop; + <> + --could check for reserved keywords here + --debug(varbuf); + tok.data := varbuf; + indx := indx - 1; + end; + + procedure gs_lexNumber(jsrc inout json_src, tok inout rToken, indx inout integer) as + numbuf varchar2(4000) := ''; + buf varchar2(4); + checkLoop boolean; + begin + buf := gs_next_char(indx, jsrc); + if (buf = '-') then numbuf := '-'; indx := indx + 1; end if; + buf := gs_next_char(indx, jsrc); + --0 or [1-9]([0-9])* + if (buf = '0') then + numbuf := numbuf || '0'; indx := indx + 1; + buf := gs_next_char(indx, jsrc); + elsif (buf >= '1' and buf <= '9') then + numbuf := numbuf || buf; indx := indx + 1; + --read digits + buf := gs_next_char(indx, jsrc); + while (buf >= '0' and buf <= '9') loop + numbuf := numbuf || buf; indx := indx + 1; + buf := gs_next_char(indx, jsrc); + end loop; + end if; + --fraction + if (buf = '.') then + numbuf := numbuf || buf; indx := indx + 1; + buf := gs_next_char(indx, jsrc); + checkLoop := FALSE; + while (buf >= '0' and buf <= '9') loop + checkLoop := TRUE; + numbuf := numbuf || buf; indx := indx + 1; + buf := gs_next_char(indx, jsrc); + end loop; + if (not checkLoop) then + s_error('Expected: digits in fraction', tok); + end if; + end if; + --exp part + if (buf in ('e', 'E')) then + numbuf := numbuf || buf; indx := indx + 1; + buf := gs_next_char(indx, jsrc); + if (buf = '+' or buf = '-') then + numbuf := numbuf || buf; indx := indx + 1; + buf := gs_next_char(indx, jsrc); + end if; + checkLoop := FALSE; + while (buf >= '0' and buf <= '9') loop + checkLoop := TRUE; + numbuf := numbuf || buf; indx := indx + 1; + buf := gs_next_char(indx, jsrc); + end loop; + if (not checkLoop) then + s_error('Expected: digits in exp', tok); + end if; + end if; + + tok.data := numbuf; + end; + + procedure gs_lexString(jsrc inout json_src, tok inout rToken, indx inout integer, endChar char) as + v_extended clob := null; + v_count number := 0; + varbuf varchar2(32767) := ''; + buf varchar(4); + wrong boolean; + max_string_chars number := 5000; + begin + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + while (buf != endChar) loop + --clob control + if (v_count > 8191) then + if (v_extended is null) then + dbe_lob.create_temporary(v_extended, true); + end if; + gs_updateClob(v_extended, varbuf); --unistr() + varbuf := ''; + v_count := 0; + end if; + if (buf = Chr(13) or buf = CHR(9) or buf = CHR(10)) then + s_error('Control characters not allowed (CHR(9),CHR(10),CHR(13))', tok); + end if; + if (buf = '\') then + --varbuf := varbuf || buf; + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + case + when buf in ('\') then + varbuf := varbuf || buf || buf; v_count := v_count + 2; + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + when buf in ('"', '/') then + varbuf := varbuf || buf; v_count := v_count + 1; + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + when buf = '''' then + if (json_strict = false) then + varbuf := varbuf || buf; v_count := v_count + 1; + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + else + s_error('strictmode - expected: " \ / b f n r t u ', tok); + end if; + when buf in ('b', 'f', 'n', 'r', 't') then + --backspace b = U+0008 + --formfeed f = U+000C + --newline n = U+000A + --carret r = U+000D + --tabulator t = U+0009 + case buf + when 'b' then varbuf := varbuf || chr(8); + when 'f' then varbuf := varbuf || chr(12); + when 'n' then varbuf := varbuf || chr(10); + when 'r' then varbuf := varbuf || chr(13); + when 't' then varbuf := varbuf || chr(9); + end case; + --varbuf := varbuf || buf; + v_count := v_count + 1; + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + when buf = 'u' then + --four hexadecimal chars + declare + four varchar2(4); + begin + four := gs_next_char2(indx+1, jsrc, 4); + wrong := FALSE; + if (upper(substr(four, 1, 1)) not in ('0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','a','b','c','d','e','f')) then wrong := TRUE; end if; + if (upper(substr(four, 2, 1)) not in ('0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','a','b','c','d','e','f')) then wrong := TRUE; end if; + if (upper(substr(four, 3, 1)) not in ('0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','a','b','c','d','e','f')) then wrong := TRUE; end if; + if (upper(substr(four, 4, 1)) not in ('0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','a','b','c','d','e','f')) then wrong := TRUE; end if; + if (wrong) then + s_error('expected: " \u([0-9][A-F]){4}', tok); + end if; + -- varbuf := varbuf || buf || four; + varbuf := varbuf || '\'||four;--chr(to_number(four,'XXXX')); + v_count := v_count + 5; + indx := indx + 5; + buf := gs_next_char(indx, jsrc); + end; + else + s_error('expected: " \ / b f n r t u ', tok); + end case; + else + varbuf := varbuf || buf; + v_count := v_count + 1; + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + end if; + end loop; + + if (buf is null) then + s_error('string ending not found', tok); + end if; + + if (v_extended is not null) then + gs_updateClob(v_extended, varbuf); + tok.data_overflow := v_extended; + tok.data := PKG_UTIL.lob_read(v_extended, max_string_chars, 1, 0); + else + tok.data := varbuf; + end if; + end; + + --function gs_lexer(jsrc inout json_src) return rToken[] as + procedure gs_lexer(jsrc inout json_src, tokens out rToken[]) as + -- tokens rToken[]; + indx integer := 1; + tok_indx integer := 1; + buf varchar2(4); + lin_no number := 1; + col_no number := 0; + begin + while (indx <= jsrc.len) loop + --read into buf + buf := gs_next_char(indx, jsrc); + col_no := col_no + 1; + --convert to switch case + case + when buf = '{' then tokens[tok_indx] := mt('{', lin_no, col_no, null); tok_indx := tok_indx + 1; + when buf = '}' then tokens[tok_indx] := mt('}', lin_no, col_no, null); tok_indx := tok_indx + 1; + when buf = ',' then tokens[tok_indx] := mt(',', lin_no, col_no, null); tok_indx := tok_indx + 1; + when buf = ':' then tokens[tok_indx] := mt(':', lin_no, col_no, null); tok_indx := tok_indx + 1; + when buf = '[' then tokens[tok_indx] := mt('[', lin_no, col_no, null); tok_indx := tok_indx + 1; + when buf = ']' then tokens[tok_indx] := mt(']', lin_no, col_no, null); tok_indx := tok_indx + 1; + when buf = 't' then + if (gs_next_char2(indx, jsrc, 4) != 'true') then + if (json_strict = false and REGEXP_LIKE(buf, '^[[:alpha:]]$', 'i')) then + tokens[tok_indx] := mt('STRING', lin_no, col_no, null); + gs_lexName(jsrc, tokens[tok_indx], indx); + col_no := col_no + length(tokens[tok_indx].data) + 1; + tok_indx := tok_indx + 1; + else + s_error('Expected: ''true''', lin_no, col_no); + end if; + else + tokens[tok_indx] := mt('TRUE', lin_no, col_no, null); tok_indx := tok_indx + 1; + indx := indx + 3; + col_no := col_no + 3; + end if; + when buf = 'n' then + if (gs_next_char2(indx, jsrc, 4) != 'null') then + if (json_strict = false and REGEXP_LIKE(buf, '^[[:alpha:]]$', 'i')) then + tokens[tok_indx] := mt('STRING', lin_no, col_no, null); + gs_lexName(jsrc, tokens[tok_indx], indx); + col_no := col_no + length(tokens[tok_indx].data) + 1; + tok_indx := tok_indx + 1; + else + s_error('Expected: ''null''', lin_no, col_no); + end if; + else + tokens[tok_indx] := mt('NULL', lin_no, col_no, null); tok_indx := tok_indx + 1; + indx := indx + 3; + col_no := col_no + 3; + end if; + when buf = 'f' then + if (gs_next_char2(indx, jsrc, 5) != 'false') then + if (json_strict = false and REGEXP_LIKE(buf, '^[[:alpha:]]$', 'i')) then + tokens[tok_indx] := mt('STRING', lin_no, col_no, null); + gs_lexName(jsrc, tokens[tok_indx], indx); + col_no := col_no + length(tokens[tok_indx].data) + 1; + tok_indx := tok_indx + 1; + else + s_error('Expected: ''false''', lin_no, col_no); + end if; + else + tokens[tok_indx] := mt('FALSE', lin_no, col_no, null); tok_indx := tok_indx + 1; + indx := indx + 4; + col_no := col_no + 4; + end if; + /* -- 9 = TAB, 10 = \n, 13 = \r (Linux = \n, Windows = \r\n, Mac = \r */ + when (buf = Chr(10)) then --linux newlines + lin_no := lin_no + 1; + col_no := 0; + when (buf = Chr(13)) then --Windows or Mac way + lin_no := lin_no + 1; + col_no := 0; + if (jsrc.len >= indx+1) then -- better safe than sorry + buf := gs_next_char(indx+1, jsrc); + if (buf = Chr(10)) then --\r\n + indx := indx + 1; + end if; + end if; + when (buf = CHR(9)) then + null; --tabbing + when (buf in ('-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9')) then --number + tokens[tok_indx] := mt('NUMBER', lin_no, col_no, null); + gs_lexNumber(jsrc, tokens[tok_indx], indx); + indx := indx - 1; + col_no := col_no + length(tokens[tok_indx].data); + tok_indx := tok_indx + 1; + when buf = '"' then --string + tokens[tok_indx] := mt('STRING', lin_no, col_no, null); + -- len number, _offset number, offset_chars number, src varchar2(32767), s_clob clob + -- dbe_output.print_line('len: '||jsrc.len||' - offset: '||jsrc._offset||' - offset_chars: '||jsrc.offset_chars||' - src: '||jsrc.src); + gs_lexString(jsrc, tokens[tok_indx], indx, '"'); + -- dbe_output.print_line('len: '||jsrc.len||' - offset: '||jsrc._offset||' - offset_chars: '||jsrc.offset_chars||' - src: '||jsrc.src); + col_no := col_no + length(tokens[tok_indx].data) + 1; + tok_indx := tok_indx + 1; + when buf = '''' and json_strict = false then --string + tokens[tok_indx] := mt('STRING', lin_no, col_no, null); + gs_lexString(jsrc, tokens[tok_indx], indx, ''''); + col_no := col_no + length(tokens[tok_indx].data) + 1; --hovsa her + tok_indx := tok_indx + 1; + when json_strict = false and REGEXP_LIKE(buf, '^[[:alpha:]]$', 'i') then + tokens[tok_indx] := mt('STRING', lin_no, col_no, null); + gs_lexName(jsrc, tokens[tok_indx], indx); + if (tokens[tok_indx].data_overflow is not null) then + col_no := col_no + gs_lengthcc(tokens[tok_indx].data_overflow) + 1; --dbe_lob.get_length(tokens[tok_indx].data_overflow) + 1; + else + col_no := col_no + length(tokens[tok_indx].data) + 1; + end if; + tok_indx := tok_indx + 1; + when json_strict = false and buf||gs_next_char(indx+1, jsrc) = '/*' then --strip comments + declare + saveindx number := indx; + un_esc clob; + begin + indx := indx + 1; + loop + indx := indx + 1; + buf := gs_next_char(indx, jsrc)||gs_next_char(indx+1, jsrc); + exit when buf = '*/'; + exit when buf is null; + end loop; + + if (indx = saveindx+2) then + -- enter unescaped mode + -- un_esc := empty_clob(); + dbe_lob.create_temporary(un_esc, true); + indx := indx + 1; + loop + indx := indx + 1; + buf := gs_next_char(indx, jsrc)||gs_next_char(indx+1, jsrc)||gs_next_char(indx+2, jsrc)||gs_next_char(indx+3, jsrc); + exit when buf = '/**/'; + if buf is null then + s_error('Unexpected sequence /**/ to end unescaped data: '||buf, lin_no, col_no); + end if; + buf := gs_next_char(indx, jsrc); + dbe_lob.write_append(un_esc, length(buf), buf); + end loop; + tokens[tok_indx] := mt('ESTRING', lin_no, col_no, null); + tokens[tok_indx].data_overflow := un_esc; + col_no := col_no + gs_lengthcc(un_esc) + 1; --dbe_lob.get_length(un_esc) + 1; + tok_indx := tok_indx + 1; + indx := indx + 2; + end if; + indx := indx + 1; + end; + when buf = ' ' then null; --space + else + s_error('Unexpected char: '||buf, lin_no, col_no); + end case; + indx := indx + 1; + end loop; + + end; + + /* PARSER FUNCTIONS START */ + procedure gs_parseObj(tokens rToken[], indx inout integer, obj out pljson); + + -- parse array + procedure gs_parseArr(tokens rToken[], indx inout integer, ret_list out pljson_list) as + e_arr pljson_value[]; + v_count number := 0; + tok rToken; + pv pljson_value; + begin + --value, value, value ] + if (indx > tokens.count) then p_error('more elements in array was excepted', tok); end if; + tok := tokens[indx]; + while (tok.type_name != ']') loop + v_count := v_count + 1; + + -- print_token(tok); + + case tok.type_name + when 'TRUE' then e_arr[v_count] := pljson_value.gs_pljson_value(true); + when 'FALSE' then e_arr[v_count] := pljson_value.gs_pljson_value(false); + when 'NULL' then e_arr[v_count] := pljson_value.gs_pljson_value(); + when 'STRING' then + if tok.data_overflow is not null then + e_arr[v_count] := pljson_value.gs_pljson_value(tok.data_overflow); + else + e_arr[v_count] := pljson_value.gs_pljson_value(tok.data); + end if; + when 'ESTRING' then + e_arr[v_count] := pljson_value.gs_pljson_value(tok.data_overflow, false); + when 'NUMBER' then + pv := pljson_value.gs_pljson_value(0); + pljson_value.gs_parse_number(pv, replace(tok.data, '.', decimalpoint)); + e_arr[v_count] := pv; + when '[' then + declare + e_list pljson_list; + begin + indx := indx + 1; + gs_parseArr(tokens, indx, e_list); + e_arr[v_count] := pljson_list.gs_to_json_value(e_list); + end; + when '{' then + declare + temp_pj pljson; + begin + indx := indx + 1; + gs_parseObj(tokens, indx, temp_pj); + e_arr[v_count] := pljson.gs_to_json_value(temp_pj); + end; + else + p_error('Expected a value', tok); + end case; + indx := indx + 1; + if (indx > tokens.count) then p_error('] not found', tok); end if; + tok := tokens[indx]; + if (tok.type_name = ',') then --advance + indx := indx + 1; + if (indx > tokens.count) then p_error('more elements in array was excepted', tok); end if; + tok := tokens[indx]; + if (tok.type_name = ']') then --premature exit + p_error('Premature exit in array', tok); + end if; + elsif (tok.type_name != ']') then --error + p_error('Expected , or ]', tok); + end if; + end loop; + ret_list.pljson_list_data := e_arr; + end; + + -- parse member + procedure gs_parseMem(tokens rToken[], indx inout integer, mem_name varchar2, mem_indx number, mem out pljson_value) as + tok rToken; + pv pljson_value; + + begin + tok := tokens[indx]; + + -- print_token(tok); + + case tok.type_name + when 'TRUE' then mem := pljson_value.gs_pljson_value(true); + when 'FALSE' then mem := pljson_value.gs_pljson_value(false); + when 'NULL' then mem := pljson_value.gs_pljson_value(); + when 'STRING' then + if tok.data_overflow is not null then + mem := pljson_value.gs_pljson_value(tok.data_overflow); + else + mem := pljson_value.gs_pljson_value(tok.data); + end if; + when 'ESTRING' then mem := pljson_value.gs_pljson_value(tok.data_overflow, false); + when 'NUMBER' then + pv := pljson_value.gs_pljson_value(0); + pljson_value.gs_parse_number(pv, replace(tok.data, '.', decimalpoint)); + mem := pv; + when '[' then + declare + e_list pljson_list; + begin + indx := indx + 1; + gs_parseArr(tokens, indx, e_list); + mem := pljson_list.gs_to_json_value(e_list); + end; + when '{' then + declare + temp_pj pljson; + begin + indx := indx + 1; + gs_parseObj(tokens, indx, temp_pj); + mem := pljson.gs_to_json_value(temp_pj); + end; + else + p_error('Found '||tok.type_name, tok); + end case; + mem.mapname := mem_name; + mem.mapindx := mem_indx; + indx := indx + 1; + end; + + procedure gs_parseObj(tokens rToken[], indx inout integer, obj out pljson) as + type memmap is table of number index by varchar2(4000); + mymap memmap; + nullelemfound boolean := false; + + + tok rToken; + mem_name varchar(4000); + arr pljson_value[] := array[]::pljson_value[]; + begin + + while (indx <= tokens.count) loop + tok := tokens[indx]; + case tok.type_name + when 'STRING' then + --member + mem_name := substr(tok.data, 1, 4000); + -- begin exception + if (mem_name is null) then + if (nullelemfound) then + p_error('Duplicate empty member: ', tok); + else + nullelemfound := true; + end if; + elsif (mymap(mem_name) is not null) then + p_error('Duplicate member name: '||mem_name, tok); + end if; + + indx := indx + 1; + if (indx > tokens.count) then p_error('Unexpected end of input', tok); end if; + tok := tokens[indx]; + indx := indx + 1; + if (indx > tokens.count) then p_error('Unexpected end of input', tok); end if; + if (tok.type_name = ':') then + --parse + declare + jmb pljson_value; + x number; + begin + x := arr.count + 1; + gs_parseMem(tokens, indx, mem_name, x, jmb); + arr.extend; + arr[x] := jmb; + end; + + else + p_error('Expected '':''', tok); + end if; + --move indx forward if ',' is found + if (indx > tokens.count) then p_error('Unexpected end of input', tok); end if; + + tok := tokens[indx]; + if (tok.type_name = ',') then + + indx := indx + 1; + tok := tokens[indx]; + if (tok.type_name = '}') then --premature exit + p_error('Premature exit in json object', tok); + end if; + elsif (tok.type_name != '}') then + p_error('A comma seperator is probably missing', tok); + end if; + when '}' then + obj := pljson.gs_pljson(); + obj.pljson_list_data := arr; + return; + else + p_error('Expected string or }', tok); + end case; + end loop; + + p_error('} not found', tokens[indx-1]); + end; + + function gs_parser(str varchar2) return pljson as + tokens rToken[]; + obj pljson; + indx integer := 1; + jsrc json_src; + begin + -- update_decimalpoint(); + jsrc := gs_prepareVarchar2(str); + gs_lexer(jsrc, tokens); + if (tokens[indx].type_name = '{') then + indx := indx + 1; + gs_parseObj(tokens, indx, obj); + else + raise exception 'JSON Parser exception - no { start found'; + end if; + if (tokens.count != indx) then + p_error('} should end the JSON object', tokens[indx]); + end if; + + return obj; + end; + + function gs_parse_list(str varchar2) return pljson_list as + tokens rToken[]; + obj pljson_list; + indx integer := 1; + jsrc json_src; + begin + -- update_decimalpoint(); + jsrc := gs_prepareVarchar2(str); + gs_lexer(jsrc, tokens); + if (tokens[indx].type_name = '[') then + indx := indx + 1; + gs_parseArr(tokens, indx, obj); + else + raise exception 'JSON List Parser exception - no [ start found'; + end if; + if (tokens.count != indx) then + p_error('] should end the JSON List object', tokens[indx]); + end if; + + return obj; + end; + + function gs_parse_any(str varchar2) return pljson_value as + tokens rToken[]; + obj pljson_list; + ret pljson_value; + indx integer := 1; + jsrc json_src; + begin + -- update_decimalpoint(); + jsrc := gs_prepareVarchar2(str); + gs_lexer(jsrc, tokens); + tokens[tokens.count+1].type_name := ']'; + gs_parseArr(tokens, indx, obj); + if (tokens.count != indx) then + p_error('] should end the JSON List object', tokens[indx]); + end if; + ret = pljson_list.gs_head(obj); + return ret; + end; + + function gs_parser(str clob) return pljson as + tokens rToken[]; + obj pljson; + indx integer := 1; + jsrc json_src; + begin + -- update_decimalpoint(); + --dbe_output.print_line('Using clob'); + jsrc := gs_prepareClob(str); + gs_lexer(jsrc, tokens); + + -- for i in 1 .. tokens.count loop + -- print_token(tokens[i]); + -- end loop; + + if (tokens[indx].type_name = '{') then + indx := indx + 1; + gs_parseObj(tokens, indx, obj); + else + raise exception 'JSON Parser exception - no { start found'; + end if; + if (tokens.count != indx) then + p_error('} should end the JSON object', tokens[indx]); + end if; + + return obj; + end; + + function gs_parse_list(str clob) return pljson_list as + tokens rToken[]; + obj pljson_list; + indx integer := 1; + jsrc json_src; + begin + -- update_decimalpoint(); + jsrc := gs_prepareClob(str); + gs_lexer(jsrc, tokens); + if (tokens[indx].type_name = '[') then + indx := indx + 1; + gs_parseArr(tokens, indx, obj); + else + raise exception 'JSON List Parser exception - no [ start found'; + end if; + if (tokens.count != indx) then + p_error('] should end the JSON List object', tokens[indx]); + end if; + + return obj; + end; + + + function gs_parse_any(str clob) return pljson_value as + tokens rToken[]; + obj pljson_list; + ret pljson_value; + indx integer := 1; + jsrc json_src; + begin + -- update_decimalpoint(); + jsrc := gs_prepareClob(str); + gs_lexer(jsrc, tokens); + tokens[tokens.count+1].type_name := ']'; + gs_parseArr(tokens, indx, obj); + if (tokens.count != indx) then + p_error('] should end the JSON List object', tokens[indx]); + end if; + ret = pljson_list.gs_head(obj); + return ret; + end; + + procedure gs_remove_duplicates(obj inout pljson) as + type memberlist is table of pljson_value index by varchar2(4000); + members memberlist; + nulljsonvalue pljson_value; + validated pljson; + indx varchar2(4000); + tmp pljson_value; + begin + + validated := pljson.gs_pljson(); + for i in 1 .. pljson.gs_count(obj) loop + tmp = pljson.gs_get(obj, i); + if (tmp.mapname is null) then + nulljsonvalue := pljson.gs_get(obj, i); + else + tmp = pljson.gs_get(obj, i); + members(tmp.mapname) := pljson.gs_get(obj, i); + end if; + end loop; + + pljson.gs_check_duplicate(validated, false); + indx := members.first; + loop + exit when indx is null; + pljson.gs_put(validated, indx, members(indx)); + indx := members.next(indx); + end loop; + + if (nulljsonvalue is not null) then + pljson.gs_put(validated, '', nulljsonvalue); + end if; + validated.check_for_duplicate := obj.check_for_duplicate; + obj := validated; + end; + + function gs_get_version() return varchar2 as + begin + return 'version1.0'; + end; + +end pljson_parser; +/ +create or replace package pljson_printer as + + indent_string varchar2(10) := ' '; --chr(9); for tab + --newline_char varchar2(2) := chr(10); -- Mac style + newline_char varchar2(2) := chr(13); -- Linux style + ascii_output boolean not null := true; + empty_string_as_null boolean not null := false; + escape_solidus boolean not null := false; + + function gs_pretty_print(obj pljson, spaces boolean default true, line_length number default 0) return varchar2; + function gs_pretty_print_list(obj pljson_list, spaces boolean default true, line_length number default 0) return varchar2; + function gs_pretty_print_any(json_part pljson_value, spaces boolean default true, line_length number default 0) return varchar2; + procedure gs_pretty_print(obj pljson, spaces boolean default true, buf inout clob, line_length number default 0, erase_clob boolean default true); + procedure gs_pretty_print_list(obj pljson_list, spaces boolean default true, buf inout clob, line_length number default 0, erase_clob boolean default true); + procedure gs_pretty_print_any(json_part pljson_value, spaces boolean default true, buf inout clob, line_length number default 0, erase_clob boolean default true); + + procedure gs_dbms_output_clob(my_clob clob, delim varchar2, jsonp varchar2 default null); + procedure htp_output_clob(my_clob clob, jsonp varchar2 default null); + -- made public just for testing/profiling... + function gs_escapeString(str varchar2) return varchar2; + +end pljson_printer; +/ +create or replace package body pljson_printer as + + max_line_len number := 0; + cur_line_len number := 0; + + type Tmap_char_string is table of varchar2(40) index by varchar2(1); /* index by unicode char */ + char_map Tmap_char_string; + char_map_escape_solidus boolean := escape_solidus; + char_map_ascii_output boolean := ascii_output; + + function gs_llcheck(str varchar2) return varchar2 as + begin + --dbe_output.print_line(cur_line_len || ' : ' || str); + if (max_line_len > 0 and length(str)+cur_line_len > max_line_len) then + cur_line_len := length(str); + return newline_char || str; + else + cur_line_len := cur_line_len + length(str); + return str; + end if; + end; + + -- escapes a single character. + function gs_escapeChar(ch char) return varchar2 deterministic is + result varchar2(20); + begin + --backspace b = U+0008 + --formfeed f = U+000C + --newline n = U+000A + --carret r = U+000D + --tabulator t = U+0009 + result := ch; + + case ch + when chr( 8) then result := '\b'; + when chr( 9) then result := '\t'; + when chr(10) then result := '\n'; + when chr(12) then result := '\f'; + when chr(13) then result := '\r'; + when chr(34) then result := '\"'; + when chr(47) then if (escape_solidus) then result := '\/'; end if; + when chr(92) then result := '\\'; + else if (ascii(ch) >= 0 and ascii(ch) < 32) then + result := '\u' || replace(substr(to_char(ascii(ch), 'XXXX'), 2, 4), ' ', '0'); + elsif (ascii_output) then + result := replace(asciistr(ch), '\', '\u'); + end if; + end case; + return result; + end; + + function gs_escapeString(str varchar2) return varchar2 as + sb varchar2(32767) := ''; + buf varchar2(40); + ch varchar2(1); /* unicode char */ + begin + + if (str is null) then return ''; end if; + + -- clear the cache if global parameters have been changed + if char_map_escape_solidus <> escape_solidus or + char_map_ascii_output <> ascii_output + then + char_map.delete; + char_map_escape_solidus := escape_solidus; + char_map_ascii_output := ascii_output; + end if; + + for i in 1 .. length(str) loop + ch := substr(str, i, 1) ; + + --begin + -- it this char has already been processed, I have cached its escaped value + -- buf := char_map(ch); + + --exception when no_Data_found then + -- otherwise, i convert the value and add it to the cache + -- buf := gs_escapeChar(ch); + -- char_map(ch) := buf; + --end; + + buf := ch; + sb := sb || buf; + end loop; + return sb; + end; + + function gs_newline(spaces boolean) return varchar2 as + begin + cur_line_len := 0; + if (spaces) then return newline_char; else return ''; end if; + end; + + function gs_tab(indent number, spaces boolean) return varchar2 as + i varchar(200) := ''; + begin + if (not spaces) then return ''; end if; + for x in 1 .. indent loop i := i || indent_string; end loop; + return i; + end; + + function gs_getCommaSep(spaces boolean) return varchar2 as + begin + if (spaces) then return ', '; else return ','; end if; + end; + + function gs_getMemName(mem pljson_value, spaces boolean) return varchar2 as + begin + if (spaces) then + return gs_llcheck('"'||gs_escapeString(mem.mapname)||'"') || gs_llcheck(' : '); + else + return gs_llcheck('"'||gs_escapeString(mem.mapname)||'"') || gs_llcheck(':'); + end if; + end; + + /* Clob method start here */ + procedure gs_add_to_clob(buf_lob inout clob, buf_str inout varchar2, str varchar2) as + begin + if (lengthb(str) > 32767 - lengthb(buf_str)) then + dbe_lob.append(buf_lob, buf_str); + buf_str := str; + else + buf_str := buf_str || str; + end if; + end; + + procedure gs_flush_clob(buf_lob inout clob, buf_str inout varchar2) as + begin + dbe_lob.append(buf_lob, buf_str); + end; + /* Clob method end here */ + + /* Varchar2 method start here */ + procedure gs_add_buf(buf inout varchar2, str varchar2) as + begin + if (lengthb(str)>32767-lengthb(buf)) then + raise exception 'Length of result JSON more than 32767 bytes. Use to_clob() procedures'; + end if; + buf := buf || str; + end; + + procedure gs_ppString(elem pljson_value, buf inout varchar2) is + _offset number := 1; + v_str varchar(5000); + amount number := 5000; + begin + if empty_string_as_null and elem.extended_str is null and elem.str is null then + gs_add_buf(buf, 'null'); + else + -- gs_add_buf(buf, case when elem.num = 1 then '"' else '/**/' end); + if (elem.num = 1) then + gs_add_buf(buf, '"'); + else + gs_add_buf(buf, "/**/"); + end if; + + if (elem.extended_str is not null) then + while (_offset <= dbe_lob.get_length(elem.extended_str)) loop + v_str := PKG_UTIL.lob_read(elem.extended_str, amount, _offset, 0); + if (elem.num = 1) then + gs_add_buf(buf, gs_escapeString(v_str)); + else + gs_add_buf(buf, v_str); + end if; + _offset := _offset + amount; + end loop; + else + if (elem.num = 1) then + while (_offset <= length(elem.str)) loop + v_str:=substr(elem.str, _offset, amount); + gs_add_buf(buf, gs_escapeString(v_str)); + _offset := _offset + amount; + end loop; + else + gs_add_buf(buf, elem.str); + end if; + end if; + + -- gs_add_buf(buf, case when elem.num = 1 then '"' else '/**/' end); + if (elem.num = 1) then + gs_add_buf(buf, '"'); + else + gs_add_buf(buf, "/**/"); + end if; + end if; + end; + + procedure gs_ppObj(obj pljson, indent number, buf inout varchar2, spaces boolean); + + procedure gs_ppEA(input pljson_list, indent number, buf inout varchar2, spaces boolean) as + elem pljson_value; + arr pljson_value[]; + str varchar2(400); + begin + arr := input.pljson_list_data; + for y in 1 .. arr.count loop + elem := arr[y]; + + -- if (elem is not null) then + case elem.typeval + /* number */ + when 4 then + str := pljson_value.gs_number_toString(elem); + gs_add_buf(buf, gs_llcheck(str)); + /* string */ + when 3 then + gs_ppString(elem, buf); + /* bool */ + when 5 then + if (pljson_value.gs_get_bool(elem)) then + gs_add_buf(buf, gs_llcheck('true')); + else + gs_add_buf(buf, gs_llcheck('false')); + end if; + /* null */ + when 6 then + gs_add_buf(buf, gs_llcheck('null')); + /* array */ + when 2 then + gs_add_buf( buf, gs_llcheck('[')); + gs_ppEA(pljson_list.gs_pljson_list(elem), indent, buf, spaces); + gs_add_buf( buf, gs_llcheck(']')); + /* object */ + when 1 then + gs_ppObj(pljson.gs_pljson(elem), indent, buf, spaces); + else + gs_add_buf(buf, gs_llcheck(pljson_value.gs_get_type(elem))); + end case; + -- end if; + if (y != arr.count) then gs_add_buf(buf, gs_llcheck(gs_getCommaSep(spaces))); end if; + end loop; + end; + + -- Mem = Member + procedure gs_ppMem(mem pljson_value, indent number, buf inout varchar2, spaces boolean) as + str varchar2(400) := ''; + begin + gs_add_buf(buf, gs_llcheck(gs_tab(indent, spaces)) || gs_getMemName(mem, spaces)); + case mem.typeval + /* number */ + when 4 then + str := pljson_value.gs_number_toString(mem); + gs_add_buf(buf, gs_llcheck(str)); + /* string */ + when 3 then + gs_ppString(mem, buf); + /* bool */ + when 5 then + if (pljson_value.gs_get_bool(mem)) then + gs_add_buf(buf, gs_llcheck('true')); + else + gs_add_buf(buf, gs_llcheck('false')); + end if; + /* null */ + when 6 then + gs_add_buf(buf, gs_llcheck('null')); + /* array */ + when 2 then + gs_add_buf(buf, gs_llcheck('[')); + gs_ppEA(pljson_list.gs_pljson_list(mem), indent, buf, spaces); + gs_add_buf(buf, gs_llcheck(']')); + /* object */ + when 1 then + gs_ppObj(pljson.gs_pljson(mem), indent, buf, spaces); + else + gs_add_buf(buf, gs_llcheck(pljson_value.gs_get_type(mem))); + end case; + end; + + procedure gs_ppObj(obj pljson, indent number, buf inout varchar2, spaces boolean) as + begin + gs_add_buf(buf, gs_llcheck('{') || gs_newline(spaces)); + for m in 1 .. obj.pljson_list_data.count loop + gs_ppMem(obj.pljson_list_data[m], indent+1, buf, spaces); + if (m != obj.pljson_list_data.count) then + gs_add_buf(buf, gs_llcheck(',') || gs_newline(spaces)); + else + gs_add_buf(buf, gs_newline(spaces)); + end if; + end loop; + gs_add_buf(buf, gs_llcheck(gs_tab(indent, spaces)) || gs_llcheck('}')); -- || chr(13); + end; + + function gs_pretty_print(obj pljson, spaces boolean default true, line_length number default 0) return varchar2 as + buf varchar2(32767) := ''; + begin + max_line_len := line_length; + cur_line_len := 0; + gs_ppObj(obj, 0, buf, spaces); + return buf; + end; + + function gs_pretty_print_list(obj pljson_list, spaces boolean default true, line_length number default 0) return varchar2 as + buf varchar2(32767) :=''; + begin + max_line_len := line_length; + cur_line_len := 0; + gs_add_buf(buf, gs_llcheck('[')); + gs_ppEA(obj, 0, buf, spaces); + gs_add_buf(buf, gs_llcheck(']')); + return buf; + end; + + function gs_pretty_print_any(json_part pljson_value, spaces boolean default true, line_length number default 0) return varchar2 as + buf varchar2(32767) := ''; + begin + case json_part.typeval + /* number */ + when 4 then + buf := pljson_value.gs_number_toString(json_part); + /* string */ + when 3 then + gs_ppString(json_part, buf); + /* bool */ + when 5 then + if (pljson_value.gs_get_bool(json_part)) then buf := 'true'; else buf := 'false'; end if; + /* null */ + when 6 then + buf := 'null'; + /* array */ + when 2 then + buf := gs_pretty_print_list(pljson_list.gs_pljson_list(json_part), spaces, line_length); + /* object */ + when 1 then + buf := gs_pretty_print(pljson.gs_pljson(json_part), spaces, line_length); + else + buf := 'weird error: ' || pljson_value.gs_get_type(json_part); + end case; + return buf; + end; + + procedure gs_pretty_print(obj pljson, spaces boolean default true, buf inout clob, line_length number default 0, erase_clob boolean default true) as + buf_str varchar2(32767); + amount number := dbe_lob.get_length(buf); + begin + if (erase_clob and amount > 0) then + dbe_lob.STRIP(buf, 0); + end if; + + buf_str := gs_pretty_print(obj, spaces, line_length); + gs_flush_clob(buf, buf_str); + end; + + procedure gs_pretty_print_list(obj pljson_list, spaces boolean default true, buf inout clob, line_length number default 0, erase_clob boolean default true) as + buf_str varchar2(32767); + amount number := dbe_lob.get_length(buf); + begin + if (erase_clob and amount > 0) then + dbe_lob.STRIP(buf, 0); + end if; + + buf_str := gs_pretty_print_list(obj, spaces, line_length); + gs_flush_clob(buf, buf_str); + end; + + procedure gs_pretty_print_any(json_part pljson_value, spaces boolean default true, buf inout clob, line_length number default 0, erase_clob boolean default true) as + buf_str varchar2(32767) := ''; + amount number := dbe_lob.get_length(buf); + begin + if (erase_clob and amount > 0) then + dbe_lob.STRIP(buf, 0); + end if; + + buf_str := gs_pretty_print_any(json_part, spaces, line_length); + gs_flush_clob(buf, buf_str); + end; + + procedure gs_dbms_output_clob(my_clob clob, delim varchar2, jsonp varchar2 default null) as + prev number := 1; + indx number := 1; + size_of_nl number := length(delim); + v_str varchar2(32767); + amount number; + max_string_chars number := 5000; + begin + + if (jsonp is not null) then dbe_output.print_line(jsonp||'('); end if; + while (indx != 0) loop + --read every line + indx := dbe_lob.match(my_clob, delim, prev+1); + + if (indx = 0) then + --emit from prev to end; + amount := max_string_chars; + + loop + -- dbe_lob.read(my_clob, amount, prev, v_str); dbe_lob.read not exists + v_str := PKG_UTIL.lob_read(my_clob, amount, prev, 0); + + dbe_output.print_line(v_str); + prev := prev+amount; + exit when prev >= dbe_lob.get_length(my_clob); + end loop; + else + amount := indx - prev; + if (amount > max_string_chars) then + amount := max_string_chars; + + loop + -- dbe_lob.read(my_clob, amount, prev, v_str); dbe_lob.read not exists + v_str := PKG_UTIL.lob_read(my_clob, amount, prev, 0); + + dbe_output.print_line(v_str); + prev := prev+amount; + amount := indx - prev; + exit when prev >= indx - 1; + if (amount > max_string_chars) then + amount := max_string_chars; + end if; + end loop; + prev := indx + size_of_nl; + else + -- dbe_lob.read(my_clob, amount, prev, v_str); dbe_lob.read not exists + v_str := PKG_UTIL.lob_read(my_clob, amount, prev, 0); + dbe_output.print_line(v_str); + prev := indx + size_of_nl; + end if; + end if; + + end loop; + if (jsonp is not null) then dbe_output.print_line(')'); end if; + end; + + procedure htp_output_clob(my_clob clob, jsonp varchar2 default null) as + l_amt number default 4096; + l_off number default 1; + l_str varchar2(32000); + begin + raise NOTICE '%', 'NOT SUPPORT NOW'; + -- if (jsonp is not null) then htp.prn(jsonp||'('); end if; + --begin + -- loop + -- dbe_lob.read( my_clob, l_amt, l_off, l_str); + -- htp.prn( l_str ); + -- l_off := l_off+l_amt; + -- end loop; + --exception + -- when no_data_found then NULL; + --end; + -- if (jsonp is not null) then htp.prn(')'); end if; + end; + +end pljson_printer; +/ +create or replace package body pljson_ext as + + /* + procedure gs_next_char as + begin + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end; + --skip ws + procedure skipws as begin while (buf in (chr(9), chr(10), chr(13), ' ')) loop gs_next_char; end loop; end; + */ + + --Json Path parser + function gs_parsePath(json_path varchar2, base number default 1) return pljson_list as + build_path varchar2(32767) := '['; + buf varchar2(4); + endstring varchar2(1); + indx number := 1; + ret pljson_list; + begin + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + + while (buf is not null) loop + if (buf = '.') then + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + + if (buf is null) then + raise exception 'JSON Path parse error: . is not a valid json_path end'; + end if; + if (not regexp_like(buf, '^[[:alnum:]\_ ]+', 'c')) then + raise exception 'JSON Path parse error: alpha-numeric character'; + end if; + + if (build_path != '[') then + build_path := build_path || ','; + end if; + + build_path := build_path || '"'; + while (regexp_like(buf, '^[[:alnum:]\_ ]+', 'c')) loop + build_path := build_path || buf; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end loop; + build_path := build_path || '"'; + + elsif (buf = '[') then + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + --skip ws + while (buf in (chr(9), chr(10), chr(13), ' ')) loop + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end loop; + + if (buf is null) then + raise exception 'JSON Path parse error: [ is not a valid json_path end'; + end if; + if (buf in ('1','2','3','4','5','6','7','8','9') or (buf = '0' and base = 0)) then + if (build_path != '[') then + build_path := build_path || ','; + end if; + while (buf in ('0','1','2','3','4','5','6','7','8','9')) loop + build_path := build_path || buf; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end loop; + elsif (regexp_like(buf, '^(\"|\'')', 'c')) then + endstring := buf; + if (build_path != '[') then + build_path := build_path || ','; + end if; + build_path := build_path || '"'; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + + if (buf is null) then + raise exception 'JSON Path parse error: premature json_path end'; + end if; + while (buf != endstring) loop + build_path := build_path || buf; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + if (buf is null) then + raise exception 'JSON Path parse error: premature json_path end'; + end if; + if (buf = '\') then + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + build_path := build_path || '\' || buf; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end if; + end loop; + build_path := build_path || '"'; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + else + raise exception 'JSON Path parse error'; + end if; + --skip ws + while (buf in (chr(9), chr(10), chr(13), ' ')) loop + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end loop; + if (buf is null) then + raise exception 'JSON Path parse error: premature json_path end'; + end if; + if (buf != ']') then + raise exception 'JSON Path parse error: no array ending found. found: '; + end if; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + --skip ws + while (buf in (chr(9), chr(10), chr(13), ' ')) loop + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end loop; + elsif (build_path = '[') then + if (not regexp_like(buf, '^[[:alnum:]\_ ]+', 'c')) then + raise exception 'JSON Path parse error'; + end if; + build_path := build_path || '"'; + while (regexp_like(buf, '^[[:alnum:]\_ ]+', 'c')) loop + build_path := build_path || buf; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end loop; + build_path := build_path || '"'; + else + raise exception 'JSON Path parse error'; + end if; + + end loop; + + build_path := build_path || ']'; + build_path := replace(replace(replace(replace(replace(build_path, chr(9), '\t'), chr(10), '\n'), chr(13), '\f'), chr(8), '\b'), chr(14), '\r'); + + ret := pljson_list.gs_pljson_list(build_path); + if (base != 1) then + --fix base 0 to base 1 + declare + elem pljson_value; + begin + for i in 1 .. ret.count loop + elem := pljson_list.gs_get(ret, i); + if (pljson_value.gs_is_number(elem)) then + pljson_list.gs_replace(ret, i, pljson_value.gs_get_number(elem)+1); + end if; + end loop; + end; + end if; + return ret; + end; + + --JSON Path getters + function gs_get_json_value(obj pljson, v_path varchar2, base number default 1) return pljson_value as + path pljson_list; + ret pljson_value; + o pljson; + l pljson_list; + begin + + path := gs_parsePath(v_path, base); + ret := pljson.gs_to_json_value(obj); + if (pljson_list.gs_count(path) = 0) then + return ret; + end if; + + for i in 1 .. pljson_list.gs_count(path) loop + if (pljson_value.gs_is_string(pljson_list.gs_get(path, i))) then + --string fetch only on json + o := pljson.gs_pljson(ret); + ret := pljson.gs_get(o, pljson_value.gs_get_string(pljson_list.gs_get(path, i))); + else + --number fetch on json and json_list + if (pljson_value.gs_is_array(ret)) then + l := pljson_list.gs_pljson_list(ret); + ret := pljson_list.gs_get(l, pljson_value.gs_get_number(pljson_list.gs_get(path, i))); + else + o := pljson.gs_pljson(ret); + l := pljson.gs_get_values(o); + ret := pljson_list.gs_get(l, pljson_value.gs_get_number(pljson_list.gs_get(path, i))); + end if; + end if; + end loop; + + return ret; + end; + + --JSON Path getters + function gs_get_string(obj pljson, path varchar2, base number default 1) return varchar2 as + temp pljson_value; + begin + temp := gs_get_json_value(obj, path, base); + if (temp is null or not pljson_value.gs_is_string(temp)) then + return null; + else + return pljson_value.gs_get_string(temp); + end if; + end; + + function gs_get_number(obj pljson, path varchar2, base number default 1) return number as + temp pljson_value; + begin + temp := gs_get_json_value(obj, path, base); + if (temp is null or not pljson_value.gs_is_number(temp)) then + return null; + else + return pljson_value.gs_get_number(temp); + end if; + end; + + function gs_get_double(obj pljson, path varchar2, base number default 1) return binary_double as + temp pljson_value; + begin + temp := gs_get_json_value(obj, path, base); + if (temp is null or not pljson_value.gs_is_number(temp)) then + return null; + else + return pljson_value.gs_get_double(temp); + end if; + end; + + function gs_get_json(obj pljson, path varchar2, base number default 1) return pljson as + temp pljson_value; + ret pljson; + begin + temp := gs_get_json_value(obj, path, base); + if (temp is null or not pljson_value.gs_is_object(temp)) then + return null; + else + ret = pljson.gs_pljson(temp); + return ret; + end if; + end; + + function gs_get_json_list(obj pljson, path varchar2, base number default 1) return pljson_list as + temp pljson_value; + ret pljson_list; + begin + temp := gs_get_json_value(obj, path, base); + if (temp is null or not pljson_value.gs_is_array(temp)) then + return null; + else + ret = pljson_list.gs_pljson_list(temp); + return ret; + end if; + end; + + function gs_get_bool(obj pljson, path varchar2, base number default 1) return boolean as + temp pljson_value; + begin + temp := gs_get_json_value(obj, path, base); + if (temp is null or not pljson_value.gs_is_bool(temp)) then + return null; + else + return pljson_value.gs_get_bool(temp); + end if; + end; + + function gs_get_date(obj pljson, path varchar2, base number default 1) return date as + temp pljson_value; + begin + temp := gs_get_json_value(obj, path, base); + if (temp is null or not gs_is_date(temp)) then + return null; + else + return pljson_ext.gs_to_date(temp); + end if; + end; + + --extra function checks if number has no fraction + function gs_is_integer(v pljson_value) return boolean as + num number; + num_double binary_double; + int_number number(38); + int_double binary_double; + begin + + if (not pljson_value.gs_is_number(v)) then + raise exception 'not a number-value'; + end if; + + if (pljson_value.gs_is_number_repr_number(v)) then + num := pljson_value.gs_get_number(v); + int_number := trunc(num); + return (int_number = num); + elsif (pljson_value.gs_is_number_repr_double(v)) then + num_double := pljson_value.gs_get_double(v); + int_double := trunc(num_double); + return (int_double = num_double); + else + return false; + end if; + return false; + end; + + --extension enables json to store dates without compromising the implementation + function gs_to_json_value(d date) return pljson_value as + ret pljson_value; + begin + ret = pljson_value.gs_pljson_value(to_char(d, format_string)); + return ret; + end; + + --notice that a date type in json is also a varchar2 + function gs_is_date(v pljson_value) return boolean as + temp date; + begin + temp := pljson_ext.gs_to_date(v); + return true; + -- exception + -- when others then + -- return false; + end; + + --conversion is needed to extract dates + function gs_to_date(v pljson_value) return date as + begin + if (pljson_value.gs_is_string(v)) then + -- return STANDARD.to_date(pljson_value.gs_get_string(v), format_string); + return to_date(pljson_value.gs_get_string(v), format_string); + else + raise exception 'Anydata did not contain a date-value'; + end if; + end; + + -- alias so that old code doesn't break + function gs_to_date2(v pljson_value) return date as + begin + return gs_to_date(v); + end; + + function gs_decodeBase64Clob2Blob(p_clob clob) return blob as + r_blob blob; + clob_size number; + pos number; + c_buf varchar2(32767); + r_buf raw(32767); + v_read_size number; + v_line_size number; + begin + + dbe_lob.create_temporary(r_blob, false, 0); + clob_size := dbe_lob.get_length(p_clob); + v_line_size := 64; + if clob_size >= 65 and dbe_lob.substr(p_clob, 1, 65) = chr(10) then + v_line_size := 65; + elsif clob_size >= 66 and dbe_lob.substr(p_clob, 1, 65) = chr(13) then + v_line_size := 66; + elsif clob_size >= 77 and dbe_lob.substr(p_clob, 1, 77) = chr(10) then + v_line_size := 77; + elsif clob_size >= 78 and dbe_lob.substr(p_clob, 1, 77) = chr(13) then + v_line_size := 78; + end if; + v_read_size := floor(32767/v_line_size)*v_line_size; + + pos := 1; + while (pos < clob_size) loop + c_buf := PKG_UTIL.lob_read(p_clob, v_read_size, pos, 0); + r_buf := decode(PKG_UTIL.raw_cast_from_varchar2(c_buf), 'base64'); + -- r_buf := PKG_UTIL.raw_cast_from_varchar2(c_buf); + dbe_lob.write_append(r_blob, dbe_raw.get_length(r_buf), r_buf); + pos := pos + v_read_size; + end loop; + + return r_blob; + end; + + function gs_encodeBase64Blob2Clob(p_blob blob) return clob as + r_clob clob; + c_step integer := 12000; + c_buf varchar2(32767); + begin + + if p_blob is not null then + dbe_lob.create_temporary(r_clob, false, 0); + for i in 0 .. trunc((dbe_lob.get_length(p_blob) - 1)/c_step) loop + + c_buf := encode(PKG_UTIL.raw_cast_to_varchar2(dbe_lob.substr(p_blob, c_step, i * c_step + 1))::bytea, 'base64'); + -- c_buf := PKG_UTIL.raw_cast_to_varchar2(dbe_lob.substr(p_blob, c_step, i * c_step + 1)); + if substr(c_buf, length(c_buf)) != chr(10) then + c_buf := c_buf || CHR(13) || CHR(10); + end if; + dbe_lob.write_append(r_clob, length(c_buf), c_buf); + end loop; + end if; + + return r_clob; + end; + + /* JSON Path putter internal function */ + procedure gs_put_internal(obj inout pljson, v_path varchar2, elem pljson_value, base number) as + val pljson_value; + path pljson_list; + backreference pljson_list; + + keyval pljson_value; + keynum number; + keystring varchar2(4000); + temp pljson_value; + obj_temp pljson; + list_temp pljson_list; + inserter pljson_value; + begin + val := elem; + path := gs_parsePath(v_path, base); + if (pljson_list.gs_count(path) = 0) then + raise exception 'PLJSON_EXT put error: cannot put with empty string.'; + end if; + + --build backreference + for i in 1 .. pljson_list.gs_count(path) loop + --backreference.print(false); + keyval := pljson_list.gs_get(path, i); + if (pljson_value.gs_is_number(keyval)) then + --number index + keynum := pljson_value.gs_get_number(keyval); + if ((not pljson_value.gs_is_object(temp)) and (not pljson_value.gs_is_array(temp))) then + if (val is null) then + return; + end if; + pljson_list.gs_remove_last(backreference); + temp := pljson_list.gs_to_json_value(pljson_list.gs_pljson_list()); + pljson_list.gs_append(backreference, temp); + end if; + + if (pljson_value.gs_is_object(temp)) then + obj_temp := pljson.gs_pljson(temp); + if (pljson.gs_count(obj_temp) < keynum) then + if (val is null) then + return; + end if; + raise exception 'PLJSON_EXT put error: access object with too few members.'; + end if; + temp := pljson.gs_get(obj_temp, keynum); + else + list_temp := pljson_list.gs_pljson_list(temp); + if (pljson_list.gs_count(list_temp) < keynum) then + if (val is null) then + return; + end if; + --raise error or quit if val is null + for i in pljson_list.gs_count(list_temp)+1 .. keynum loop + pljson_list.gs_append(list_temp, pljson_value.gs_pljson_value()); + end loop; + pljson_list.gs_remove_last(backreference); + pljson_list.gs_append(backreference, list_temp); + end if; + + temp := pljson_list.gs_get(list_temp, keynum); + end if; + else + --string index + keystring := pljson_value.gs_get_string(keyval); + if (not pljson_value.gs_is_object(temp)) then + --backreference.print; + if (val is null) + then return; + end if; + pljson_list.gs_remove_last(backreference); + temp := pljson.gs_to_json_value(pljson.gs_pljson()); + pljson_list.gs_append(backreference, temp); + --raise_application_error(-20110, 'PLJSON_EXT put error: trying to access a non object with a string.'); + end if; + obj_temp := pljson.gs_pljson(temp); + temp := pljson.gs_get(obj_temp, keystring); + end if; + + if (temp is null) then + if (val is null) then + return; + end if; + + keyval := pljson_list.gs_get(path, i+1); + if (keyval is not null and pljson_value.gs_is_number(keyval)) then + temp := pljson_list.gs_to_json_value(pljson_list.gs_pljson_list()); + else + temp := pljson.gs_to_json_value(pljson.gs_pljson()); + end if; + end if; + pljson_list.gs_append(backreference, temp); + end loop; + + -- backreference.print(false); + -- path.print(false); + + --use backreference and path together + inserter := val; + for i in reverse 1 .. pljson_list.gs_count(backreference) loop + -- inserter.print(false); + if (i = 1) then + keyval := pljson_list.gs_get(path, 1); + if (pljson_value.gs_is_string(keyval)) then + keystring := pljson_value.gs_get_string(keyval); + else + keynum := pljson_value.gs_get_number(keyval); + declare + t1 pljson_value; + begin + t1 := pljson.gs_get(obj, keynum); + keystring := t1.mapname; + end; + end if; + if (inserter is null) then + pljson.gs_remove(obj, keystring); + else + pljson.gs_put(obj, keystring, inserter); + end if; + else + temp := pljson_list.gs_get(backreference, i-1); + if (pljson_value.gs_is_object(temp)) then + keyval := pljson_list.gs_get(path, i); + obj_temp := pljson.gs_pljson(temp); + if (pljson_value.gs_is_string(keyval)) then + keystring := pljson_value.gs_get_string(keyval); + else + keynum := pljson_value.gs_get_number(keyval); + declare + t1 pljson_value; + begin + t1 := pljson.gs_get(obj_temp, keynum); + keystring := t1.mapname; + end; + end if; + if (inserter is null) then + pljson.gs_remove(obj_temp, keystring); + if (obj_temp.count > 0) then + inserter := pljson.gs_to_json_value(obj_temp); + end if; + else + pljson.gs_put(obj_temp, keystring, inserter); + inserter := pljson.gs_to_json_value(obj_temp); + end if; + else + --array only number + keynum := pljson_value.gs_get_number(pljson_list.gs_get(path, i)); + list_temp := pljson_list.gs_pljson_list(temp); + pljson_list.gs_remove(list_temp, keynum); + if (not inserter is null) then + pljson_list.gs_append(list_temp, inserter, keynum); + inserter := pljson_list.gs_to_json_value(list_temp); + else + if (pljson_list.gs_count(list_temp) > 0) then + inserter := pljson_list.gs_to_json_value(list_temp); + end if; + end if; + end if; + end if; + + end loop; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem varchar2, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, pljson_value.gs_pljson_value(), base); + else + gs_put_internal(obj, path, pljson_value.gs_pljson_value(elem), base); + end if; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem number, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, pljson_value.gs_pljson_value(), base); + else + gs_put_internal(obj, path, pljson_value.gs_pljson_value(elem), base); + end if; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem binary_double, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, pljson_value.gs_pljson_value(), base); + else + gs_put_internal(obj, path, pljson_value.gs_pljson_value(elem), base); + end if; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem pljson, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, pljson_value.gs_pljson_value(), base); + else + gs_put_internal(obj, path, pljson.gs_to_json_value(elem), base); + end if; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem pljson_list, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, pljson_value.gs_pljson_value(), base); + else + gs_put_internal(obj, path, pljson_list.gs_to_json_value(elem), base); + end if; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem boolean, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, pljson_value.gs_pljson_value(), base); + else + gs_put_internal(obj, path, pljson_value.gs_pljson_value(elem), base); + end if; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem pljson_value, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, pljson_value.gs_pljson_value(), base); + else + gs_put_internal(obj, path, elem, base); + end if; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem date, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, gs_pljson_value(), base); + else + gs_put_internal(obj, path, pljson_ext.gs_to_json_value(elem), base); + end if; + end; + + procedure gs_remove(obj inout pljson, path varchar2, base number default 1) as + begin + pljson_ext.gs_put_internal(obj, path, null, base); + -- if (json_ext.gs_get_json_value(obj, path) is not null) then + -- end if; + end; + + --Pretty print with JSON Path + procedure gs_pp(obj pljson, v_path varchar2) as --using dbms_output.put_line + json_part pljson_value; + begin + json_part := gs_get_json_value(obj, v_path); + if (json_part is null) then + dbe_output.print_line(''); + else + dbe_output.print_line(pljson_printer.gs_pretty_print_any(json_part)); --escapes a possible internal string + end if; + end; + + procedure pp_htp(obj pljson, v_path varchar2) as --using htp.print + json_part pljson_value; + begin + /* + json_part := pljson_ext.gs_get_json_value(obj, v_path); + if (json_part is null) then + htp.print; + else + htp.print(pljson_printer.gs_pretty_print_any(json_part, false)); + end if; + */ + end; + + function gs_base64(binarydata blob) return pljson_list as + obj pljson_list; + c clob; + + v_clob_offset number := 1; + v_amount integer; + begin + + dbe_lob.create_temporary(c, false, 0); + c := gs_encodeBase64Blob2Clob(binarydata); + v_amount := dbe_lob.get_length(c); + v_clob_offset := 1; + --dbms_output.put_line('V amount: '||v_amount); + while (v_clob_offset < v_amount) loop + --dbms_output.put_line(v_offset); + --temp := ; + --dbms_output.put_line('size: '||length(temp)); + pljson_list.gs_append(obj, dbe_lob.SUBSTR(c, 4000, v_clob_offset)); + v_clob_offset := v_clob_offset + 4000; + end loop; + -- dbms_lob.freetemporary(c); + --dbms_output.put_line(obj.count); + --dbms_output.put_line(obj.get_last().to_char); + return obj; + end; + + function gs_base64(l pljson_list) return blob as + c clob; + b_ret blob; + begin + dbe_lob.create_temporary(c, false, 0); + for i in 1 .. pljson_list.gs_count(l) loop + dbe_lob.append(c, pljson_value.gs_get_string(pljson_list.gs_get(l, i))); + end loop; + b_ret := gs_decodeBase64Clob2Blob(c); + return b_ret; + end; + + function gs_encode(binarydata blob) return pljson_value as + obj pljson_value; + c clob; + begin + dbe_lob.create_temporary(c, false, 0); + c := gs_encodeBase64Blob2Clob(binarydata); + -- c := PKG_UTIL.lob_converttoclob(c, binarydata, 32767, 1, 1); + obj := pljson_value.gs_pljson_value(c); + return obj; + end; + + function gs_decode(v pljson_value) return blob as + c clob; + b_ret blob; + begin + c := pljson_value.gs_get_clob(v); + b_ret := gs_decodeBase64Clob2Blob(c); + -- b_ret := PKG_UTIL.lob_converttoblob(b_ret, c, 32767, 1, 1); + return b_ret; + end; + + procedure gs_blob2clob(b blob, c out clob, charset varchar2 default 'UTF8') as + v_dest_offset integer := 1; + v_src_offset integer := 1; + begin + dbe_lob.create_temporary(c, false, 0); + c := PKG_UTIL.lob_converttoclob(c, b, 32767, 1, 1); + end; + +end pljson_ext; +/ +create or replace package body pljson_value as + + function gs_pljson_value() return pljson_value as + json_value pljson_value; + begin + json_value.typeval := 6; + return json_value; + end; + + function gs_pljson_value(b boolean) return pljson_value as + json_value pljson_value; + begin + json_value.typeval := 5; + json_value.num := 0; + if(b) then + json_value.num := 1; + end if; + if(b is null) then + json_value.typeval := 6; + end if; + return json_value; + end; + + function gs_pljson_value(str varchar2, esc boolean default true) return pljson_value as + json_value pljson_value; + begin + json_value.typeval := 3; + if(esc) then + json_value.num := 1; + else + json_value.num := 0; + end if; --message to pretty printer + json_value.str := str; + return json_value; + end; + + function gs_pljson_value(str clob, esc boolean default true) return pljson_value as + json_value pljson_value; + max_string_chars number := 5000; + lengthcc number; + begin + json_value.typeval := 3; + if(esc) then + json_value.num := 1; + else + json_value.num := 0; + end if; --message to pretty printer + + if (dbe_lob.get_length(str) > max_string_chars) then + json_value.extended_str := str; + end if; + + if dbe_lob.get_length(str) > 0 then + json_value.str := PKG_UTIL.lob_read(str, max_string_chars, 1, 0); + end if; + return json_value; + end; + + function gs_pljson_value(num number) return pljson_value as + json_value pljson_value; + begin + json_value.typeval := 4; + json_value.num := num; + json_value.num_repr_number_p := 't'; + json_value.num_double := num; + if (to_number(json_value.num_double) = json_value.num) then + json_value.num_repr_double_p := 't'; + else + json_value.num_repr_double_p := 'f'; + end if; + + if(json_value.num is null) then + json_value.typeval := 6; + end if; + return json_value; + end; + + function gs_pljson_value(num_double binary_double) return pljson_value as + json_value pljson_value; + begin + json_value.typeval := 4; + json_value.num_double := num_double; + json_value.num_repr_double_p := 't'; + json_value.num := num_double; + -- if (to_binary_double(json_value.num) = json_value.num_double) then + if (to_number(json_value.num) = json_value.num_double) then + json_value.num_repr_number_p := 't'; + else + json_value.num_repr_number_p := 'f'; + end if; + if(json_value.num_double is null) then + json_value.typeval := 6; + end if; + return json_value; + end; + + function gs_pljson_value(elem pljson_element) return pljson_value as + json_value pljson_value; + begin + /* + case + when elem is of (pljson) then self.typeval := 1; + when elem is of (pljson_list) then self.typeval := 2; + else raise_application_error(-20102, 'PLJSON_VALUE init error (PLJSON or PLJSON_LIST allowed)'); + end case; + self.object_or_array := elem; + if(self.object_or_array is null) then self.typeval := 6; end if; + */ + raise exception 'pljson element not support now'; + return json_value; + end; + + function gs_pljson_value(arr pljson_list) return pljson_value as + json_value pljson_value; + begin + json_value.typeval := 2; + json_value.arr := arr; + return json_value; + end; + + function gs_pljson_value(obj pljson) return pljson_value as + json_value pljson_value; + begin + json_value.typeval := 1; + json_value.obj := obj; + return json_value; + end; + + function gs_makenull() return pljson_value as + json_value pljson_value; + begin + return json_value; + end; + + function gs_get_type(json_value pljson_value) return varchar2 as + ret varchar2; + begin + case json_value.typeval + when 1 then ret := 'object'; + when 2 then ret := 'array'; + when 3 then ret := 'string'; + when 4 then ret := 'number'; + when 5 then ret := 'bool'; + when 6 then ret := 'null'; + else + ret := 'unknown type'; + end case; + return ret; + end; + + function gs_get_string(json_value pljson_value, max_byte_size number default null, max_char_size number default null) return varchar2 as + begin + if(json_value.typeval = 3) then + if(max_byte_size is not null) then + return substrb(json_value.str,1,max_byte_size); + elsif (max_char_size is not null) then + return substr(json_value.str,1,max_char_size); + else + return json_value.str; + end if; + end if; + return null; + end; + + procedure gs_get_string_clob(json_value pljson_value, buf inout clob) as + begin + dbe_lob.STRIP(buf, 0); + if(json_value.typeval = 3) then + if(json_value.extended_str is not null) then + dbe_lob.copy(buf, json_value.extended_str, dbe_lob.get_length(json_value.extended_str)); + else + dbe_lob.write_append(buf, length(json_value.str), json_value.str); + end if; + end if; + end; + + function gs_get_clob(json_value pljson_value) return clob as + begin + if(json_value.typeval = 3) then + if(json_value.extended_str is not null) then + return json_value.extended_str; + else + return json_value.str; + end if; + end if; + return null; + end; + + function gs_get_bool(json_value pljson_value) return boolean as + begin + if(json_value.typeval = 5) then + return json_value.num = 1; + end if; + return null; + end; + + function gs_get_number(json_value pljson_value) return number as + begin + if(json_value.typeval = 4) then + return json_value.num; + end if; + return null; + end; + + function gs_get_double(json_value pljson_value) return binary_double as + begin + if(json_value.typeval = 4) then + return json_value.num_double; + end if; + return null; + end; + + function gs_get_element(json_value pljson_value) return pljson_element as + begin + if (json_value.typeval in (1,2)) then + return json_value.object_or_array; + end if; + return null; + end; + + function gs_get_null(json_value pljson_value) return varchar2 as + begin + if(json_value.typeval = 6) then + return 'null'; + end if; + return null; + end; + + function gs_is_string(json_value pljson_value) return boolean as + begin + return json_value.typeval = 3; + end; + + function gs_is_bool(json_value pljson_value) return boolean as + begin + return json_value.typeval = 5; + end; + + function gs_is_number(json_value pljson_value) return boolean as + begin + return json_value.typeval = 4; + end; + + function gs_is_number_repr_number(json_value pljson_value) return boolean as + begin + if json_value.typeval != 4 then + return false; + end if; + return (json_value.num_repr_number_p = 't'); + end; + + function gs_is_number_repr_double(json_value pljson_value) return boolean as + begin + if json_value.typeval != 4 then + return false; + end if; + return (json_value.num_repr_double_p = 't'); + end; + + function gs_is_object(json_value pljson_value) return boolean as + begin + return json_value.typeval = 1; + end; + + function gs_is_array(json_value pljson_value) return boolean as + begin + return json_value.typeval = 2; + end; + + function gs_is_null(json_value pljson_value) return boolean as + begin + return json_value.typeval = 6; + end; + + function gs_value_of(json_value pljson_value, max_byte_size number default null, max_char_size number default null) return varchar2 as + begin + case json_value.typeval + when 1 then return 'json object'; + when 2 then return 'json array'; + when 3 then return pljson_value.gs_get_string(json_value, max_byte_size, max_char_size); + when 4 then return pljson_value.gs_get_number(json_value); + when 5 then if(pljson_value.gs_get_bool(json_value)) then return 'true'; else return 'false'; end if; + else return null; + end case; + end; + + procedure gs_parse_number(json_value inout pljson_value, str varchar2) as + begin + if json_value.typeval != 4 then + return; + end if; + + json_value.num := to_number(str); + json_value.num_repr_number_p := 't'; + -- json_value.num_double := to_binary_double(str); + json_value.num_double := to_number(str); + json_value.num_repr_double_p := 't'; + -- if (to_binary_double(json_value.num) != json_value.num_double) then + if (to_number(json_value.num) != json_value.num_double) then + json_value.num_repr_number_p := 'f'; + end if; + if (to_number(json_value.num_double) != json_value.num) then + json_value.num_repr_double_p := 'f'; + end if; + exception + when others then + raise exception 'input str is not vailed'; + end; + + function gs_number_toString(json_value pljson_value) return varchar2 as + num number; + num_double binary_double; + buf varchar2(4000); + begin + if (json_value.num_repr_number_p = 't') then + num := json_value.num; + if (num > 1e127) then + return '1e309'; -- json representation of infinity + end if; + if (num < -1e127) then + return '-1e309'; -- json representation of infinity + end if; + + buf := to_char(num); + if (-1 < num and num < 0 and substr(buf, 1, 2) = '-.') then + buf := '-0' || substr(buf, 2); + elsif (0 < num and num < 1 and substr(buf, 1, 1) = '.') then + buf := '0' || buf; + end if; + return buf; + else + num_double := json_value.num_double; + if (num_double = +BINARY_DOUBLE_INFINITY) then + return '1e309'; -- json representation of infinity + end if; + if (num_double = -BINARY_DOUBLE_INFINITY) then + return '-1e309'; -- json representation of infinity + end if; + + buf := to_char(num_double); + if (-1 < num_double and num_double < 0 and substr(buf, 1, 2) = '-.') then + buf := '-0' || substr(buf, 2); + elsif (0 < num_double and num_double < 1 and substr(buf, 1, 1) = '.') then + buf := '0' || buf; + end if; + return buf; + end if; + end; + + function gs_to_char(json_value pljson_value, spaces boolean default true, chars_per_line number default 0) return varchar2 as + begin + return pljson_printer.gs_pretty_print_any(json_value, spaces, chars_per_line); + end; + + -- procedure gs_to_clob(json_value pljson_value, buf inout clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true) as + -- begin + -- if(spaces is null) then + -- pljson_printer.gs_pretty_print_any(json_value, false, buf, chars_per_line, erase_clob); + -- else + -- pljson_printer.gs_pretty_print_any(json_value, spaces, buf, chars_per_line, erase_clob); + -- end if; + -- end; + + procedure gs_to_clob(json_value pljson_value, buf inout clob, spaces boolean default true, chars_per_line number default 0, erase_clob boolean default true) as + my_bufstr varchar2; + begin + my_bufstr := pljson_printer.gs_pretty_print_any(json_value, spaces, chars_per_line); + if (erase_clob) then + dbe_lob.STRIP(buf,0); + end if; + dbe_lob.append(buf, my_bufstr); + end; + + -- procedure gs_print(json_value pljson_value, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null) as + -- my_clob clob; + -- begin + -- dbe_lob.create_temporary(my_clob, true); + -- if (chars_per_line>32512) then + -- pljson_printer.gs_pretty_print_any(json_value, spaces, my_clob, 32512); + -- else + -- pljson_printer.gs_pretty_print_any(json_value, spaces, my_clob, chars_per_line); + -- end if; + -- pljson_printer.gs_dbms_output_clob(my_clob, pljson_printer.newline_char, jsonp); + -- end; + + procedure gs_print(json_value pljson_value, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null) as + my_clob clob; + my_bufstr varchar2; + begin + dbe_lob.create_temporary(my_clob, true); + if (chars_per_line>32512) then + my_bufstr := pljson_printer.gs_pretty_print_any(json_value, spaces, 32512); + else + my_bufstr := pljson_printer.gs_pretty_print_any(json_value, spaces, chars_per_line); + end if; + dbe_lob.append(my_clob, my_bufstr); + pljson_printer.gs_dbms_output_clob(my_clob, pljson_printer.newline_char, jsonp); + end; + + procedure htp(json_value pljson_value, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null) as + my_clob clob; + begin + raise exception 'htp not support'; + dbe_lob.create_temporary(my_clob, true); + pljson_printer.gs_pretty_print_any(json_value, spaces, my_clob, chars_per_line); + pljson_printer.htp_output_clob(my_clob, jsonp); + end; + +end pljson_value; +/ +create or replace package body pljson_list as + + function gs_pljson_list() return pljson_list as + json_list pljson_list; + begin + return json_list; + end; + + function gs_pljson_list(str varchar2) return pljson_list as + json_list pljson_list; + begin + json_list := pljson_parser.gs_parse_list(str); + return json_list; + end; + + function gs_pljson_list(str clob) return pljson_list as + json_list pljson_list; + begin + json_list := pljson_parser.gs_parse_list(str); + return json_list; + end; + + function gs_pljson_list(str blob, charset varchar2 default 'UTF8') return pljson_list as + json_list pljson_list; + c_str clob; + begin + pljson_ext.gs_blob2clob(str, c_str, charset); + json_list := pljson_parser.gs_parse_list(c_str); + -- dbms_lob.freetemporary(c_str); + return json_list; + end; + + function gs_pljson_list(str_array varchar2[]) return pljson_list as + json_list pljson_list; + begin + -- json_list.pljson_list_data := pljson_value_array(); + for i in str_array.FIRST .. str_array.LAST loop + gs_append(json_list, str_array[i]); + end loop; + return json_list; + end; + + function gs_pljson_list(num_array number[]) return pljson_list as + json_list pljson_list; + begin + for i in str_array.FIRST .. str_array.LAST loop + gs_append(json_list, num_array[i]); + end loop; + return json_list; + end; + + function gs_pljson_list(elem pljson_value) return pljson_list as + ret_list pljson_list; + begin + -- self := treat(elem.object_or_array as pljson_list); + ret_list := elem.arr; + return ret_list; + end; + + + /* list management */ + procedure gs_append(json_list inout pljson_list, elem pljson_value, _position integer default null) as + indx integer; + insert_value pljson_value; + begin + insert_value := elem; + if insert_value is null then + insert_value := pljson_value.gs_pljson_value(); + end if; + if (_position is null or _position > pljson_list.gs_count(json_list)) then --end of list + indx := pljson_list.gs_count(json_list) + 1; + json_list.pljson_list_data.extend(1); + json_list.pljson_list_data[indx] := insert_value; + elsif (_position < 1) then --new first + indx := pljson_list.gs_count(json_list); + json_list.pljson_list_data.extend(1); + for x in reverse 0 .. indx loop + json_list.pljson_list_data[x+1] := json_list.pljson_list_data[x]; + end loop; + json_list.pljson_list_data[0] := insert_value; + else + indx := pljson_list.gs_count(json_list); + json_list.pljson_list_data.extend(1); + for x in reverse _position .. indx loop + json_list.pljson_list_data[x+1] := json_list.pljson_list_data(x); + end loop; + json_list.pljson_list_data[_position] := insert_value; + end if; + end; + + procedure gs_append(json_list inout pljson_list, elem varchar2, _position integer default null) as + begin + gs_append(json_list, pljson_value.gs_pljson_value(elem), _position); + end; + + procedure gs_append(json_list inout pljson_list, elem clob, _position integer default null) as + begin + gs_append(json_list, pljson_value.gs_pljson_value(elem), _position); + end; + + procedure gs_append(json_list inout pljson_list, elem number, _position integer default null) as + begin + if (elem is null) then + gs_append(json_list, pljson_value.gs_pljson_value(), _position); + else + gs_append(json_list, pljson_value.gs_pljson_value(elem), _position); + end if; + end; + + procedure gs_append(json_list inout pljson_list, elem binary_double, _position integer default null) as + begin + if (elem is null) then + gs_append(json_list, pljson_value.gs_pljson_value(), _position); + else + gs_append(json_list, pljson_value.gs_pljson_value(elem), _position); + end if; + end; + + procedure gs_append(json_list inout pljson_list, elem boolean, _position integer default null) as + begin + if (elem is null) then + gs_append(json_list, pljson_value.gs_pljson_value(), _position); + else + gs_append(json_list, pljson_value.gs_pljson_value(elem), _position); + end if; + end; + + procedure gs_append(json_list inout pljson_list, elem pljson_list, _position integer default null) as + begin + if (elem is null) then + gs_append(json_list, pljson_value.gs_pljson_value(), _position); + else + gs_append(json_list, pljson_list.gs_to_json_value(elem), _position); + end if; + end; + + procedure gs_remove(json_list inout pljson_list, _position integer) as + begin + if (_position is null or _position < 1 or _position > pljson_list.gs_count(json_list)) then + return; + end if; + for x in (_position+1) .. pljson_list.gs_count(json_list) loop + json_list.pljson_list_data[x-1] := json_list.pljson_list_data[x]; + end loop; + json_list.pljson_list_data.trim(1); + end; + + procedure gs_remove_first(json_list inout pljson_list) as + begin + for x in 2 .. pljson_list.gs_count(json_list) loop + json_list.pljson_list_data[x-1] := json_list.pljson_list_data[x]; + end loop; + if (pljson_list.gs_count(json_list) > 0) then + json_list.pljson_list_data.trim(1); + end if; + end; + + procedure gs_remove_last(json_list inout pljson_list) as + begin + if (pljson_list.gs_count(json_list) > 0) then + json_list.pljson_list_data.trim(1); + end if; + end; + + function gs_count(json_list pljson_list) return number as + begin + return json_list.pljson_list_data.count; + end; + + function gs_get(json_list pljson_list, _position integer) return pljson_value as + ret pljson_value; + begin + if (pljson_list.gs_count(json_list) >= _position and _position > 0) then + ret = json_list.pljson_list_data[_position]; + return ret; + end if; + return null; -- do not throw error, just return null + end; + + function gs_get_string(json_list pljson_list, _position integer) return varchar2 as + elem pljson_value; + ret varchar2; + begin + elem := pljson_list.gs_get(json_list, _position); + ret = pljson_value.gs_get_string(elem); + return ret; + end; + + function gs_get_clob(json_list pljson_list, _position integer) return clob as + elem pljson_value; + ret clob; + begin + elem := pljson_list.gs_get(json_list, _position); + ret = pljson_value.gs_get_clob(elem); + return ret; + end; + + function gs_get_bool(json_list pljson_list, _position integer) return boolean as + elem pljson_value; + ret boolean; + begin + elem := pljson_list.gs_get(json_list, _position); + ret = pljson_value.gs_get_bool(elem); + return ret; + end; + + function gs_get_number(json_list pljson_list, _position integer) return number as + elem pljson_value; + ret number; + begin + elem := pljson_list.gs_get(json_list, _position); + ret = pljson_value.gs_get_number(elem); + return ret; + end; + + function gs_get_double(json_list pljson_list, _position integer) return binary_double as + elem pljson_value; + ret binary_double; + begin + elem := pljson_list.gs_get(json_list, _position); + ret = pljson_value.gs_get_double(elem); + return ret; + end; + + function gs_get_pljson_list(json_list pljson_list, _position integer) return pljson_list as + elem pljson_value; + ret pljson_list; + begin + elem := pljson_list.gs_get(json_list, _position); + ret = pljson_list.gs_pljson_list(elem); + -- return treat(elem.object_or_array as pljson_list); + return ret; + end; + + function gs_head(json_list pljson_list) return pljson_value as + ret pljson_value; + begin + if (pljson_list.gs_count(json_list) > 0) then + ret = json_list.pljson_list_data[json_list.pljson_list_data.first]; + -- return json_list.pljson_list_data[0]; + return ret; + end if; + return null; -- do not throw error, just return null + end; + + function gs_last(json_list pljson_list) return pljson_value as + ret pljson_value; + begin + if (pljson_list.gs_count(json_list) > 0) then + ret = json_list.pljson_list_data[json_list.pljson_list_data.last]; + return ret; + end if; + return null; -- do not throw error, just return null + end; + + function gs_tail(json_list pljson_list) return pljson_list as + t pljson_list; + ret pljson_list; + begin + if (pljson_list.gs_count(json_list) > 0) then + t := json_list; --pljson_list(self.to_json_value); + pljson_list.gs_remove(t, 1); + return t; + else + ret = pljson_list.gs_pljson_list(); + return ret; + end if; + end; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem pljson_value) as + insert_value pljson_value; + indx number; + begin + insert_value := elem; + if insert_value is null then + insert_value := pljson_value.gs_pljson_value(); + end if; + if (_position > pljson_list.gs_count(json_list)) then --end of list + indx := pljson_list.gs_count(json_list) + 1; + json_list.pljson_list_data.extend(1); + json_list.pljson_list_data[indx] := insert_value; + elsif (_position < 1) then --maybe an error message here + null; + else + json_list.pljson_list_data[_position] := insert_value; + end if; + end; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem varchar2) as + begin + gs_replace(json_list, _position, pljson_value.gs_pljson_value(elem)); + end; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem clob) as + begin + gs_replace(json_list, _position, pljson_value.gs_pljson_value(elem)); + end; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem number) as + begin + if (elem is null) then + gs_replace(json_list, _position, pljson_value.gs_pljson_value()); + else + gs_replace(json_list, _position, pljson_value.gs_pljson_value(elem)); + end if; + end; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem binary_double) as + begin + if (elem is null) then + gs_replace(json_list, _position, pljson_value.gs_pljson_value()); + else + gs_replace(json_list, _position, pljson_value.gs_pljson_value(elem)); + end if; + end; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem boolean) as + begin + if (elem is null) then + gs_replace(json_list, _position, pljson_value.gs_pljson_value()); + else + gs_replace(json_list, _position, pljson_value.gs_pljson_value(elem)); + end if; + end; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem pljson_list) as + begin + if (elem is null) then + gs_replace(json_list, _position, pljson_value.gs_pljson_value()); + else + gs_replace(json_list, _position, pljson_list.gs_to_json_value(elem)); + end if; + end; + + function gs_to_json_value(json_list pljson_list) return pljson_value as + ret pljson_value; + begin + ret = pljson_value.gs_pljson_value(json_list); + return ret; + end; + + /* output methods */ + function gs_to_char(json_list pljson_list, spaces boolean default true, chars_per_line number default 0) return varchar2 as + begin + if (spaces is null) then + return pljson_printer.gs_pretty_print_list(json_list, chars_per_line); + else + return pljson_printer.gs_pretty_print_list(json_list, spaces, chars_per_line); + end if; + end; + + procedure gs_to_clob(json_list pljson_list, buf inout clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true) as + begin + if (spaces is null) then + pljson_printer.gs_pretty_print_list(json_list, false, buf, chars_per_line, erase_clob); + else + pljson_printer.gs_pretty_print_list(json_list, spaces, buf, chars_per_line, erase_clob); + end if; + end; + + -- procedure gs_print(json_list pljson_list, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null) as + -- my_clob clob; + -- begin + -- dbe_lob.create_temporary(my_clob, true); + -- if (chars_per_line>32512) then + -- pljson_printer.gs_pretty_print_list(json_list, spaces, my_clob, 32512); + -- else + -- pljson_printer.gs_pretty_print_list(json_list, spaces, my_clob, chars_per_line); + -- end if; + -- pljson_printer.gs_dbms_output_clob(my_clob, pljson_printer.newline_char, jsonp); + -- end; + + procedure gs_print(json_list pljson_list, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null) as + my_clob clob; + my_bufstr varchar2; + begin + dbe_lob.create_temporary(my_clob, true); + if (chars_per_line>32512) then + my_bufstr := pljson_printer.gs_pretty_print_list(json_list, spaces, 32512); + else + my_bufstr := pljson_printer.gs_pretty_print_list(json_list, spaces, chars_per_line); + end if; + dbe_lob.append(my_clob, my_bufstr); + pljson_printer.gs_dbms_output_clob(my_clob, pljson_printer.newline_char, jsonp); + end; + + procedure htp(json_list pljson_list, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null) as + my_clob clob; + begin + dbe_lob.create_temporary(my_clob, true); + pljson_printer.gs_pretty_print_list(json_list, spaces, my_clob, chars_per_line); + pljson_printer.htp_output_clob(my_clob, jsonp); + end; + + /* json path */ + function gs_path(json_list pljson_list, json_path varchar2, base number default 1) return pljson_value as + cp pljson_list; + ret pljson_value; + begin + cp := json_list; + ret = pljson_ext.gs_get_json_value(pljson.gs_pljson(cp), json_path, base); + return ret; + end; + + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem pljson_value, base number default 1) as + objlist pljson; + jp pljson_list; + begin + jp := pljson_ext.gs_parsePath(json_path, base); + while (pljson_value.gs_get_number(pljson_list.gs_head(jp)) > pljson_list.gs_count(json_list)) loop + gs_append(json_list, pljson_value.gs_pljson_value()); + end loop; + objlist := pljson.gs_pljson(json_list); + pljson_ext.gs_put(objlist, json_path, elem, base); + json_list := pljson.gs_get_values(objlist); + end; + + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem varchar2, base number default 1) as + objlist pljson; + jp pljson_list; + begin + jp := pljson_ext.gs_parsePath(json_path, base); + while (pljson_value.gs_get_number(pljson_list.gs_head(jp)) > pljson_list.gs_count(json_list)) loop + gs_append(json_list, pljson_value.gs_pljson_value()); + end loop; + objlist := pljson.gs_pljson(json_list); + pljson_ext.gs_put(objlist, json_path, elem, base); + json_list := pljson.gs_get_values(objlist); + end; + + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem clob, base number default 1) as + objlist pljson; + jp pljson_list; + begin + jp := pljson_ext.gs_parsePath(json_path, base); + while (pljson_value.gs_get_number(pljson_list.gs_head(jp)) > pljson_list.gs_count(json_list)) loop + gs_append(json_list, pljson_value.gs_pljson_value()); + end loop; + objlist := pljson.gs_pljson(json_list); + pljson_ext.gs_put(objlist, json_path, elem, base); + json_list := pljson.gs_get_values(objlist); + end; + + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem number, base number default 1) as + objlist pljson; + jp pljson_list; + begin + jp := pljson_ext.gs_parsePath(json_path, base); + while (pljson_value.gs_get_number(pljson_list.gs_head(jp)) > pljson_list.gs_count(json_list)) loop + gs_append(json_list, pljson_value.gs_pljson_value()); + end loop; + objlist := pljson.gs_pljson(json_list); + if (elem is null) then + pljson_ext.gs_put(objlist, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(objlist, json_path, elem, base); + end if; + json_list := pljson.gs_get_values(objlist); + end; + + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem binary_double, base number default 1) as + objlist pljson; + jp pljson_list; + begin + jp := pljson_ext.gs_parsePath(json_path, base); + while (pljson_value.gs_get_number(pljson_list.gs_head(jp)) > pljson_list.gs_count(json_list)) loop + gs_append(json_list, pljson_value.gs_pljson_value()); + end loop; + objlist := pljson.gs_pljson(json_list); + if (elem is null) then + pljson_ext.gs_put(objlist, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(objlist, json_path, elem, base); + end if; + json_list := pljson.gs_get_values(objlist); + end; + + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem boolean, base number default 1) as + objlist pljson; + jp pljson_list; + begin + jp := pljson_ext.gs_parsePath(json_path, base); + while (pljson_value.gs_get_number(pljson_list.gs_head(jp)) > pljson_list.gs_count(json_list)) loop + gs_append(json_list, pljson_value.gs_pljson_value()); + end loop; + objlist := pljson.gs_pljson(json_list); + if (elem is null) then + pljson_ext.gs_put(objlist, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(objlist, json_path, elem, base); + end if; + json_list := pljson.gs_get_values(objlist); + end; + + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem pljson_list, base number default 1) as + objlist pljson; + jp pljson_list; + begin + jp := pljson_ext.gs_parsePath(json_path, base); + while (pljson_value.gs_get_number(pljson_list.gs_head(jp)) > pljson_list.gs_count(json_list)) loop + pljson_list.gs_append(json_list, pljson_value.gs_pljson_value()); + end loop; + objlist := pljson.gs_pljson(json_list); + if (elem is null) then + pljson_ext.gs_put(objlist, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(objlist, json_path, elem, base); + end if; + json_list := pljson.gs_get_values(objlist); + end; + + /* json path_remove */ + procedure gs_path_remove(json_list inout pljson_list, json_path varchar2, base number default 1) as + objlist pljson; + begin + objlist := pljson.gs_pljson(json_list); + pljson_ext.gs_remove(objlist, json_path, base); + json_list := pljson.gs_get_values(objlist); + end; + +end pljson_list; +/ +create or replace package body pljson as + + function gs_pljson() return pljson as + pj pljson; + begin + pj.check_for_duplicate := 1; + return pj; + end; + + function gs_pljson(str varchar2) return pljson as + pj pljson; + begin + pj := pljson_parser.gs_parser(str); + pj.check_for_duplicate := 1; + return pj; + end; + + function gs_pljson(str clob) return pljson as + pj pljson; + begin + pj := pljson_parser.gs_parser(str); + pj.check_for_duplicate := 1; + return pj; + end; + + function gs_pljson(str blob, charset varchar2 default 'UTF8') return pljson as + pj pljson; + c_str clob; + begin + pljson_ext.gs_blob2clob(str, c_str, charset); + pj := pljson_parser.gs_parser(c_str); + pj.check_for_duplicate := 1; + -- dbms_lob.freetemporary(c_str); + return pj; + end; + + function gs_pljson(str_array varchar2[]) return pljson as + pj pljson; + new_pair boolean := True; + pair_name varchar2(32767); + pair_value varchar2(32767); + begin + pj.check_for_duplicate := 1; + for i in str_array.FIRST .. str_array.LAST loop + if new_pair then + pair_name := str_array[i]; + new_pair := False; + else + pair_value := str_array[i]; + gs_put(pj, pair_name, pair_value); + new_pair := True; + end if; + end loop; + return pj; + end; + + function gs_pljson(elem pljson_value) return pljson as + pj pljson; + begin + -- self := treat(elem.object_or_array as pljson); + pj := elem.obj; + return pj; + end; + + function gs_pljson(l pljson_list) return pljson as + pj pljson; + temp pljson_value; + begin + for i in 1 .. pljson_list.gs_count(l) loop + temp = l.pljson_list_data[i]; + if(temp.mapname is null or temp.mapname like 'row%') then + temp.mapname := 'row'||i; + end if; + temp.mapindx := i; + end loop; + + pj.pljson_list_data := l.pljson_list_data; + pj.check_for_duplicate := 1; + return pj; + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value pljson_value, _position integer default null) as + insert_value pljson_value; + indx integer; + x number; + temp pljson_value; + begin + + insert_value := pair_value; + if insert_value is null then + insert_value := pljson_value.gs_pljson_value(); + end if; + insert_value.mapname := pair_name; + if (pj.check_for_duplicate = 1) then + temp := pljson.gs_get(pj, pair_name); + else + temp := null; + end if; + + if (temp is not null) then + insert_value.mapindx := temp.mapindx; + pj.pljson_list_data[temp.mapindx] := insert_value; + return; + elsif (_position is null or _position > pljson.gs_count(pj)) then + --insert at the end of the list + pj.pljson_list_data.extend(1); + insert_value.mapindx := pj.pljson_list_data.count; + pj.pljson_list_data[pj.pljson_list_data.count] := insert_value; + elsif (_position < 2) then + --insert at the start of the list + indx := pj.pljson_list_data.last; + pj.pljson_list_data.extend; + loop + exit when indx is null; + temp := pj.pljson_list_data[indx]; + temp.mapindx := indx+1; + pj.pljson_list_data[temp.mapindx] := temp; + indx := pj.pljson_list_data.prior(indx); + end loop; + insert_value.mapindx := 1; + pj.pljson_list_data[1] := insert_value; + else + --insert somewhere in the list + indx := pj.pljson_list_data.last; + pj.pljson_list_data.extend; + loop + temp := pj.pljson_list_data[indx]; + temp.mapindx := indx + 1; + pj.pljson_list_data[temp.mapindx] := temp; + exit when indx = _position; + indx := pj.pljson_list_data.prior(indx); + end loop; + insert_value.mapindx := _position; + pj.pljson_list_data[_position] := insert_value; + end if; + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value varchar2, _position integer default null) as + begin + gs_put(pj, pair_name, pljson_value.gs_pljson_value(pair_value), _position); + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value clob, _position integer default null) as + begin + gs_put(pj, pair_name, pljson_value.gs_pljson_value(pair_value), _position); + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value number, _position integer default null) as + begin + if (pair_value is null) then + gs_put(pj, pair_name, pljson_value.gs_pljson_value(), _position); + else + gs_put(pj, pair_name, pljson_value.gs_pljson_value(pair_value), _position); + end if; + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value binary_double, _position integer default null) as + begin + if (pair_value is null) then + gs_put(pj, pair_name, pljson_value.gs_pljson_value(), _position); + else + gs_put(pj, pair_name, pljson_value.gs_pljson_value(pair_value), _position); + end if; + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value boolean, _position integer default null) as + begin + if (pair_value is null) then + gs_put(pj, pair_name, pljson_value.gs_pljson_value(), _position); + else + gs_put(pj, pair_name, pljson_value.gs_pljson_value(pair_value), _position); + end if; + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value pljson, _position integer default null) as + begin + if (pair_value is null) then + gs_put(pj, pair_name, pljson_value.gs_pljson_value(), _position); + else + gs_put(pj, pair_name, pljson.gs_to_json_value(pair_value), _position); + end if; + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value pljson_list, _position integer default null) as + begin + if (pair_value is null) then + gs_put(pj, pair_name, pljson_value.gs_pljson_value(), _position); + else + gs_put(pj, pair_name, pljson_list.gs_to_json_value(pair_value), _position); + end if; + end; + + procedure gs_remove(pj pljson, pair_name varchar2) as + temp pljson_value; + indx integer; + begin + temp := pljson.gs_get(pj, pair_name); + if (temp is null) then + return; + end if; + indx := pj.pljson_list_data.next(temp.mapindx); + loop + exit when indx is null; + exit when indx == arr_length(pj.pljson_list_data); + pj.pljson_list_data[indx].mapindx := indx - 1; + pj.pljson_list_data[indx-1] := pj.pljson_list_data[indx]; + indx := pj.pljson_list_data.next(indx); + end loop; + pj.pljson_list_data.trim(1); + end; + + function gs_count(pj pljson) return number as + begin + return pj.pljson_list_data.count; + end; + + function gs_get(pj pljson, pair_name varchar2) return pljson_value as + indx integer; + ret pljson_value; + begin + indx := pj.pljson_list_data.first; + loop + exit when indx is null; + if (pair_name is null and pj.pljson_list_data[indx].mapname is null) then + ret = pj.pljson_list_data[indx]; + return ret; + end if; + if (pj.pljson_list_data[indx].mapname = pair_name) then + ret = pj.pljson_list_data[indx]; + return ret; + end if; + indx := pj.pljson_list_data.next(indx); + end loop; + return null; + end; + + function gs_get_string(pj pljson, pair_name varchar2) return varchar2 as + elem pljson_value; + ret varchar2; + begin + elem := pljson.gs_get(pj, pair_name); + ret = pljson_value.gs_get_string(elem); + return ret; + end; + + function gs_get_clob(pj pljson, pair_name varchar2) return clob as + elem pljson_value; + ret clob; + begin + elem := pljson.gs_get(pj, pair_name); + ret = pljson_value.gs_get_clob(elem); + return ret; + end; + + function gs_get_number(pj pljson, pair_name varchar2) return number as + elem pljson_value; + ret number; + begin + elem := pljson.gs_get(pj, pair_name); + ret = pljson_value.gs_get_number(elem); + return ret; + end; + + function gs_get_double(pj pljson, pair_name varchar2) return binary_double as + elem pljson_value; + ret binary_double; + begin + elem := pljson.gs_get(pj, pair_name); + ret = pljson_value.gs_get_double(elem); + return ret; + end; + + function gs_get_bool(pj pljson, pair_name varchar2) return boolean as + elem pljson_value; + ret boolean; + begin + elem := pljson.gs_get(pj, pair_name); + ret = pljson_value.gs_get_bool(elem); + return ret; + end; + + function gs_get_pljson(pj pljson, pair_name varchar2) return pljson as + elem pljson_value; + ret pljson; + begin + elem := pljson.gs_get(pj, pair_name); + -- return treat(elem.object_or_array as pljson); + return ret; + end; + + function gs_get_pljson_list(pj pljson, pair_name varchar2) return pljson_list as + elem pljson_value; + ret pljson_list; + begin + elem := pljson.gs_get(pj, pair_name); + -- return treat(elem.object_or_array as pljson); + return ret; + end; + + function gs_get(pj pljson, _position integer) return pljson_value as + ret pljson_value; + begin + if (pljson.gs_count(pj) >= _position and _position > 0) then + ret = pj.pljson_list_data[_position]; + return ret; + end if; + return null; -- do not throw error, just return null + end; + + function gs_index_of(pj pljson, pair_name varchar2) return number as + indx integer; + begin + indx := pj.pljson_list_data.first; + loop + exit when indx is null; + if (pair_name is null and pj.pljson_list_data[indx].mapname is null) then + return indx; + end if; + if (pj.pljson_list_data[indx].mapname = pair_name) then + return indx; + end if; + indx := pj.pljson_list_data.next(indx); + end loop; + return -1; + end; + + function gs_exist(pj pljson, pair_name varchar2) return boolean as + begin + return (pljson.gs_get(pj, pair_name) is not null); + end; + + function gs_to_json_value(pj pljson) return pljson_value as + ret pljson_value; + begin + ret = pljson_value.gs_pljson_value(pj); + return ret; + end; + + procedure gs_check_duplicate(pj inout pljson, v_set boolean) as + begin + if (v_set) then + pj.check_for_duplicate := 1; + else + pj.check_for_duplicate := 0; + end if; + end; + + procedure gs_remove_duplicates(pj inout pljson) as + begin + pljson_parser.gs_remove_duplicates(pj); + end; + + function gs_to_char(pj pljson, spaces boolean default true, chars_per_line number default 0) return varchar2 as + begin + if(spaces is null) then + return pljson_printer.gs_pretty_print(pj, chars_per_line); + else + return pljson_printer.gs_pretty_print(pj, spaces, chars_per_line); + end if; + end; + + procedure gs_to_clob(pj pljson, buf inout clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true) as + begin + if(spaces is null) then + pljson_printer.gs_pretty_print(pj, false, buf, chars_per_line, erase_clob); + else + pljson_printer.gs_pretty_print(pj, spaces, buf, chars_per_line, erase_clob); + end if; + end; + + -- procedure gs_print(pj pljson, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null) as + -- my_clob clob; + -- begin + -- dbe_lob.create_temporary(my_clob, true); + -- if (chars_per_line>32512) then + -- pljson_printer.gs_pretty_print(pj, spaces, my_clob, 32512); + -- else + -- pljson_printer.gs_pretty_print(pj, spaces, my_clob, chars_per_line); + -- end if; + -- pljson_printer.gs_dbms_output_clob(my_clob, pljson_printer.newline_char, jsonp); + -- end; + + procedure gs_print(pj pljson, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null) as + my_clob clob; + my_bufstr varchar2; + begin + dbe_lob.create_temporary(my_clob, true); + if (chars_per_line>32512) then + my_bufstr := pljson_printer.gs_pretty_print(pj, spaces, 32512); + else + my_bufstr := pljson_printer.gs_pretty_print(pj, spaces, chars_per_line); + end if; + dbe_lob.append(my_clob,my_bufstr); + pljson_printer.gs_dbms_output_clob(my_clob, pljson_printer.newline_char, jsonp); + end; + + procedure htp(pj pljson, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null) as + my_clob clob; + begin + dbe_lob.create_temporary(my_clob, true); + pljson_printer.gs_pretty_print(pj, spaces, my_clob, chars_per_line); + pljson_printer.htp_output_clob(my_clob, jsonp); + end; + + function gs_path(pj pljson, json_path varchar2, base number default 1) return pljson_value as + ret pljson_value; + begin + ret = pljson_ext.gs_get_json_value(pj, json_path, base); + return ret; + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem pljson_value, base number default 1) as + begin + pljson_ext.gs_put(pj, json_path, elem, base); + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem varchar2, base number default 1) as + begin + pljson_ext.gs_put(pj, json_path, elem, base); + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem clob, base number default 1) as + begin + pljson_ext.gs_put(pj, json_path, elem, base); + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem number, base number default 1) as + begin + if (elem is null) then + pljson_ext.gs_put(pj, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(pj, json_path, elem, base); + end if; + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem binary_double, base number default 1) as + begin + if (elem is null) then + pljson_ext.gs_put(pj, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(pj, json_path, elem, base); + end if; + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem boolean, base number default 1) as + begin + if (elem is null) then + pljson_ext.gs_put(pj, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(pj, json_path, elem, base); + end if; + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem pljson, base number default 1) as + begin + if (elem is null) then + pljson_ext.gs_put(pj, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(pj, json_path, elem, base); + end if; + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem pljson_list, base number default 1) as + begin + if (elem is null) then + pljson_ext.gs_put(pj, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(pj, json_path, elem, base); + end if; + end; + + procedure gs_path_remove(pj inout pljson, json_path varchar2, base number default 1) as + begin + pljson_ext.gs_remove(pj, json_path, base); + end; + + function gs_get_keys(pj pljson) return pljson_list as + keys pljson_list; + indx integer; + begin + keys := pljson_list.gs_pljson_list(); + indx := pj.pljson_list_data.first; + loop + exit when indx is null; + pljson_list.gs_append(keys, pj.pljson_list_data[indx].mapname); + indx := pj.pljson_list_data.next(indx); + end loop; + return keys; + end; + + function gs_get_values(pj pljson) return pljson_list as + vals pljson_list; + begin + vals := pljson_list.gs_pljson_list(); + vals.pljson_list_data := pj.pljson_list_data; + return vals; + end; + +end pljson; +/ +reset current_schema; +grant usage ON schema DBE_PLJSON TO public; +set current_schema=DBE_PLJSON; +create type t1 as ( a int ); +create type tt1 as ( b t1[] ); +alter type t1 add attribute arr tt1; +ERROR: composite type t1 cannot be made a member of itself +create type t2 as ( a int ); +create type tt2 as (pljson_list_data t2[]); +alter type t2 add attribute arr tt2; +declare +obj pljson; +begin +obj := pljson.gs_pljson('{"a": true }'); +pljson.gs_print(obj); +obj := pljson.gs_pljson(' +{ +"a": null, +"b": 12.243, +"c": 2e-3, +"d": [true, false, "abdc", [1,2,3]], +"e": [3, {"e2":3}], +"f": {"f2":true} +}'); +pljson.gs_print(obj); +pljson.gs_print(obj, false); +end; +/ +{ + "a" : true +} +{ + "a" : null, + "b" : 12.243, + "c" : 0.002, + "d" : [true, false, "abdc", [1, 2, 3]], + "e" : [3, { + "e2" : 3 + }], + "f" : { + "f2" : true + } +} +{"a":null,"b":12.243,"c":0.002,"d":[true,false,"abdc",[1,2,3]],"e":[3,{"e2":3}],"f":{"f2":true}} +reset current_schema; diff --git a/src/test/regress/expected/plpgsql_array.out b/src/test/regress/expected/plpgsql_array.out new file mode 100644 index 000000000..475911b20 --- /dev/null +++ b/src/test/regress/expected/plpgsql_array.out @@ -0,0 +1,585 @@ +-- FOR PL/pgSQL VARRAY scenarios -- +-- check compatibility -- +show sql_compatibility; -- expect A -- + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists plpgsql_array; +NOTICE: schema "plpgsql_array" does not exist, skipping +create schema plpgsql_array; +set current_schema = plpgsql_array; +-- initialize tables -- +create table customers ( + id number(10) not null, + c_name varchar2(50) not null, + c_age number(8) not null, + c_address varchar2(50), + salary float(2) not null, + constraint customers_pk primary key (id) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "customers_pk" for table "customers" +insert into customers (id, c_name, c_age, c_address, salary) values (1, 'Vera' ,32, 'Paris', 22999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (2, 'Zera' ,25, 'London', 5999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (3, 'Alice' ,22, 'Bangkok', 9800.98); +insert into customers (id, c_name, c_age, c_address, salary) values (4, 'Jim' ,26, 'Dubai', 18700.00); +insert into customers (id, c_name, c_age, c_address, salary) values (5, 'Kevin' ,28, 'Singapore', 18999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (6, 'Gauss' ,42, 'Beijing', 32999.00); +create table tmp(a int, b varchar(100)); +-- initialize functions, types etc. -- +create type mytype as ( + id integer, + biome varchar2(100) +); +create type mytype2 as ( + id integer, + locale myType +); +-- it turns any input to (9, (1, 'space')) +create or replace function myfunc(habitat in mytype2) +return mytype2 +is + ret mytype2; +begin + ret := (9, (1, 'space')); + return ret; +end; +/ +-- type and function shares the same name -- +-- Oh~oh, what's gonna happened?? -- +create type functype as ( + id integer, + locale myType +); +create or replace function functype(habitat in mytype2) +return mytype2 +is + ret mytype2; +begin + ret := (-1, (1, 'unknown realm')); + return ret; +end; +/ +-- test function datatype priority -- +create or replace function name_list(inint in integer) +return integer +is + ret integer; +begin + ret := 1; + return ret; +end; +/ +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- +-- general declare + assign + access -- +-- support varray with parentheses in SQL -- +DECLARE + CURSOR c_customers is + SELECT c_name FROM customers order by id; + type c_list is varray (6) of customers.c_name%type; + name_list c_list := c_list(); + counter integer := 0; +BEGIN + FOR n IN c_customers LOOP + counter := counter + 1; -- 6 iterations -- + name_list.extend; + name_list(counter) := n.c_name; + END LOOP; + + insert into tmp values (NULL, name_list(1)); + insert into tmp values (NULL, name_list(3)); + name_list(2) := name_list(3); + insert into tmp values (NULL, name_list[2]); -- same as last one -- +END; +/ +select * from tmp order by 1, 2; + a | b +---+------- + | Alice + | Alice + | Vera +(3 rows) + +truncate tmp; +-- ERROR: mix of parens and brackets are not allowed -- +declare + cursor c_customers is (select c_name from customers order by id); + type c_list is varray(6) of customers.c_name%type; + name_list c_list := c_list(); + counter integer := 0; +begin + for n in c_customers loop + counter := counter + 1; -- 6 iterations -- + name_list.extend; + name_list(counter) := n.c_name; + end loop; + + insert into tmp values (null, name_list(1]); +end; +/ +ERROR: mismatched parentheses at or near ";" +LINE 12: insert into tmp values (null, name_list(1]); + ^ +QUERY: DECLARE cursor c_customers is (select c_name from customers order by id); + type c_list is varray(6) of customers.c_name%type; + name_list c_list := c_list(); + counter integer := 0; +begin + for n in c_customers loop + counter := counter + 1; -- 6 iterations -- + name_list.extend; + name_list(counter) := n.c_name; + end loop; + + insert into tmp values (null, name_list(1]); +end +-- parentheses support in SQL 2 -- +-- array of record -- +declare + cursor c_customers is (select * from customers order by id); + type c_list is varray(6) of customers; + customer_list c_list := c_list(); + counter integer := 0; + name varchar2(50) := ''; +begin + for n in c_customers loop + counter := counter + 1; + customer_list.extend; + customer_list(counter) := (n.id, n.c_name, n.c_age, n.c_address, n.salary); -- insert record -- + name := customer_list(counter).c_name; + if customer_list(counter).c_age <= 30 then + dbe_output.print_line('Individual who is below 30: ' || customer_list(counter).c_name); + else + dbe_output.print_line('Individual who is above 30: ' || name); + end if; + insert into tmp values (customer_list(counter).c_age, customer_list(counter).salary); -- parentheses -- + end loop; +end; +/ +Individual who is above 30: Vera +Individual who is below 30: Zera +Individual who is below 30: Alice +Individual who is below 30: Jim +Individual who is below 30: Kevin +Individual who is above 30: Gauss +select * from tmp order by 1, 2; + a | b +----+--------- + 22 | 9800.98 + 25 | 5999 + 26 | 18700 + 28 | 18999 + 32 | 22999 + 42 | 32999 +(6 rows) + +truncate tmp; +-- batch initialization, batch insert varray-- +declare + type students is varray(6) of varchar2(10); + type grades is varray(6) of integer; + marks grades := grades('98', 97, 74 + 4, (87), 92, 100); -- batch initialize -- + names students default students('none'); -- default -- + total integer; +begin + names := students(); -- should append NULL then do the coerce -- + names := students('Vera ', 'Zera ', 'Alice', 'Jim ', 'Kevin', to_char('G') || 'auss'); -- batch insert -- + total := names.count; + dbe_output.print_line('Total '|| total || ' Students'); + for i in 1 .. total loop + dbe_output.print_line('Student: ' || names(i) || ' Marks: ' || marks(i)); + end loop; +end; +/ +Total 6 Students +Student: Vera Marks: 98 +Student: Zera Marks: 97 +Student: Alice Marks: 78 +Student: Jim Marks: 87 +Student: Kevin Marks: 92 +Student: Gauss Marks: 100 +-- block above will be rewritten into this form (close to this form, but with parens and coerces)-- +declare + type students is varray(6) of varchar2(10); + type grades is varray(6) of integer; + marks grades := array['98', 97, 74 + 4, (87), 92, 100]; -- batch initialize -- + names students default array['none']; -- default -- + total integer; +begin + names := array[NULL]; + names := array['Vera ', 'Zera ', 'Alice', 'Jim ', 'Kevin', to_char('G') || 'auss']; -- batch insert -- + total := names.count; + dbe_output.print_line('Total '|| total || ' Students'); + for i in 1 .. total loop + dbe_output.print_line('Student: ' || names(i) || ' Marks: ' || marks(i)); + end loop; +end; +/ +Total 6 Students +Student: Vera Marks: 98 +Student: Zera Marks: 97 +Student: Alice Marks: 78 +Student: Jim Marks: 87 +Student: Kevin Marks: 92 +Student: Gauss Marks: 100 +-- test of PL/SQL data type instantiation -- +-- If we specified our type (use PL/SQL like instantiation), all varray members .. -- +-- should be able to cast to the correct data type. -- +declare + type students is varray(5) of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + dbe_output.print_line('Student: ' || names(i)); + end loop; +end; +/ +Student: 1 +Student: Zera +Student: Alice +Student: Jim +Student: Kevin +-- However, if we use the PL/pgSQL style instantiation, it is not guaranteed -- +-- error out for this one -- +declare + type students is varray(5) of varchar2(10); + names students; +begin + -- we can only make assumptions base on the first element, which, not always a good answer -- + names := array[1, 'Zera ', 'Alice', 'Jim ', 'Kevin']; + for i in 1 .. 5 loop + dbe_output.print_line('Student: ' || names(i)); + end loop; +end; +/ +ERROR: invalid input syntax for integer: "Zera " +LINE 1: SELECT array[1, 'Zera ', 'Alice', 'Jim ', 'Kevin'] + ^ +QUERY: SELECT array[1, 'Zera ', 'Alice', 'Jim ', 'Kevin'] +CONTEXT: referenced column: array +PL/pgSQL function inline_code_block line 5 at assignment +-- test of uneven brackets -- +-- error out -- +declare + type students is varray(5) of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + dbe_output.print_line('Student: ' || names(i]); + end loop; +end; +/ +ERROR: mismatched brackets at or near ")" +LINE 6: dbe_output.print_line('Student: ' || names(i]); + ^ +QUERY: DECLARE type students is varray(5) of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + dbe_output.print_line('Student: ' || names(i]); + end loop; +end +-- Using composite type defined outside of precedure block -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, (1, 'ground')), + mytype2(1, (2, 'air')) + ); +begin + aa.extend(10); + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.3 is: ' || aa(2).locale.biome); -- ... water (not air) -- +end; +/ +locale id is: 1 +biome 1.3 is: water +-- Note: array can handle proper type-in-type declaration -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.3 is: ' || aa(2).locale.biome); -- ... water (not air) -- +end; +/ +locale id is: 1 +biome 1.3 is: water +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := mytype2(2, mytype(3, 'water')); + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.3 is: ' || aa(2).locale.biome); -- ... water (not air) -- +end; +/ +locale id is: 1 +biome 1.3 is: water +-- working with functions -- +-- should be the same, except the result, make sure functions are correctly identified -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + myfunc((1, mytype(1, 'ground'))), -- for records, we need an extra parens to work -- + myfunc((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 9.1 is: ' || aa(2).locale.biome); -- ... space! -- +end; +/ +locale id is: 9 +biome 9.1 is: space +-- This is what going to happened with functions and types shares teh same name -- +-- (Don't try this at home) -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + functype(1, mytype(1, 'ground')), -- we are prioritizing types here -- + functype(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.2 is: ' || aa(2).locale.biome); -- air -- +end; +/ +locale id is: 1 +biome 1.2 is: air +drop type functype; -- abandon type functype -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- here we have to use function functype -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + dbe_output.print_line('locale ?? is: ' || aa(1).id); + dbe_output.print_line('biome ??? is: ' || aa(2).locale.biome); -- weird places -- +end; +/ +locale ?? is: -1 +biome ??? is: unknown realm +drop function functype; -- oops! -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- not sure -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + dbe_output.print_line('This message worth 300 tons of gold (once printed).'); +end; +/ +ERROR: function functype(record) does not exist +LINE 1: SELECT ARRAY[(functype((1, ROW(1, 'ground'))))::plpgsql_arra... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +QUERY: SELECT ARRAY[(functype((1, ROW(1, 'ground'))))::plpgsql_array."mytype2", + (functype((1, ROW(2, 'air'))))::plpgsql_array."mytype2"] +CONTEXT: referenced column: array +PL/pgSQL function inline_code_block line 6 during statement block local variable initialization +-- Multi-dimension arrays -- +declare + type arrayfirst is varray(10) of int; + arr arrayfirst := arrayfirst(1, 2, 3); + mat int[][] := ARRAY[arr, arr]; -- PLpgSQL style -- +begin + dbe_output.print_line('The magic number is: ' || mat(1)(2)); -- should be 2 -- +end; +/ +The magic number is: 2 +-- assignments && statements test -- +declare + type arrayfirst is varray(10) of int; + arr arrayfirst := arrayfirst(1, 2, 3); + mat int[][] := ARRAY[arr, ARRAY[4, 5 ,6]]; -- PLpgSQL style -- +begin + dbe_output.print_line('The magic number is: ' || mat[2](1)); -- should be 4 -- + mat[1](3) = mat(2)[3]; + dbe_output.print_line('The magic number is: ' || mat[1](3)); -- should be 6 -- + + insert into tmp(a) values (mat[1](2)), (mat(1)[2]), (mat(1)(2)), (mat[1][2]); +end; +/ +The magic number is: 4 +The magic number is: 6 +select * from tmp order by 1, 2; + a | b +---+--- + 2 | + 2 | + 2 | + 2 | +(4 rows) + +truncate tmp; +-- error out! -- +declare + type arrayfirst is varray(10) of int; + arr arrayfirst := arrayfirst(1, 2, 3); + type arraySecond is varray(10) of arrayfirst; -- Nested types are not supported, yet -- + mat arraySecond := arraySecond(arr, arr); +begin + dbe_output.print_line('The magic number is: ' || mat(1)(2)); -- should be 2 -- +end; +/ +ERROR: array type nested by array is not supported yet. +DETAIL: Define array type "arraysecond" of array is not supported yet. +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 3 +-- Should be empty -- +create or replace procedure pro1() as +declare + type students is varray(5) of varchar2(10); + names students := students(); +begin + raise NOTICE '%', names; + raise NOTICE '%', names.count; +end; +/ +call pro1(); +NOTICE: {} +NOTICE: 0 + pro1 +------ + +(1 row) + +-- constant! -- +declare + type ta is table of varchar(100); + tb constant ta := ta('10','11'); +begin + tb(1) := 12; + dbe_output.print_line(tb[1]); +end; +/ +ERROR: "tb" is declared CONSTANT +LINE 4: tb(1) := 12; + ^ +QUERY: DECLARE type ta is table of varchar(100); + tb constant ta := ta('10','11'); +begin + tb(1) := 12; + dbe_output.print_line(tb[1]); +end +declare + type ta is table of varchar(100); + tb constant ta := ta('10','11'); +begin + tb := ta('12','13'); + dbe_output.print_line(tb[1]); +end; +/ +ERROR: "tb" is declared CONSTANT +LINE 4: tb := ta('12','13'); + ^ +QUERY: DECLARE type ta is table of varchar(100); + tb constant ta := ta('10','11'); +begin + tb := ta('12','13'); + dbe_output.print_line(tb[1]); +end +-- nested array -- +create or replace package pckg_test as + type rec1 is record(col1 varchar2); + type t_arr is table of rec1; + type rec2 is record(col1 t_arr, col2 t_arr); + type t_arr1 is table of rec2; +procedure proc_test(); +end pckg_test; +/ +create or replace package body pckg_test as +procedure proc_test() as +v_arr t_arr1; +v_rec rec1; +begin + v_arr(1).col1 := array[ROW('hello')]; + v_arr(1).col2 := array[ROW('world')]; + v_rec := v_arr(1).col2[1]; -- normal bracket -- + raise notice '%', v_arr(1).col2(1); -- parentheses -- + insert into tmp(b) values (v_arr(1).col2(1)); -- sql -- +end; +end pckg_test; +/ +call pckg_test.proc_test(); +NOTICE: (world) + proc_test +----------- + +(1 row) + +select * from tmp order by 1, 2; + a | b +---+--------- + | (world) +(1 row) + +CREATE OR REPLACE FUNCTION myarray_sort (ANYARRAY) +RETURNS ANYARRAY LANGUAGE SQL AS $$ +SELECT ARRAY( + SELECT $1[s.i] AS "foo" + FROM + generate_series(array_lower($1,1), array_upper($1,1)) AS s(i) + ORDER BY foo +); +$$; +select myarray_sort(array[9,8,7,1,2,35]); + myarray_sort +---------------- + {1,2,7,8,9,35} +(1 row) + +create table testtbl (plg_id varchar2); +declare + type array_vchar is VARRAY(20) of varchar2; + plg_id array_vchar := array_vchar(); + ans int; + begin + select count(1) into ans from testtbl where plg_id = plg_id(1); +end; + +drop table testtbl; + +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- + +drop package if exists pckg_test; +drop procedure if exists pro1; +drop function if exists functype; +drop function if exists myfunc; +drop function if exists myarray_sort; +drop table if exists tmp; +drop table if exists customers; +drop type if exists functype; +drop type if exists mytype2; +drop type if exists mytype; + +-- clean up -- +drop schema if exists plpgsql_array cascade; +NOTICE: drop cascades to function plpgsql_array.proc_test() +NOTICE: function functype() does not exist, skipping +NOTICE: type "functype" does not exist, skipping +NOTICE: drop cascades to function name_list(integer) diff --git a/src/test/regress/expected/plpgsql_array_of_record.out b/src/test/regress/expected/plpgsql_array_of_record.out new file mode 100644 index 000000000..3cf9da323 --- /dev/null +++ b/src/test/regress/expected/plpgsql_array_of_record.out @@ -0,0 +1,673 @@ +-- FOR PL/pgSQL ARRAY of RECORD TYPE scenarios -- +-- check compatibility -- +show sql_compatibility; -- expect ORA -- + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists plpgsql_arrayofrecord; +NOTICE: schema "plpgsql_arrayofrecord" does not exist, skipping +create schema plpgsql_arrayofrecord; +set current_schema = plpgsql_arrayofrecord; +-- initialize table and type-- +create type info as (name varchar2(50), age int, address varchar2(20)); +create type customer as (id number(10), c_info info); +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- +-- test record of record +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type cust is record (id number(10), info r1); + va cust; +begin + va := (1, ('Vera' ,32, 'Paris')); + return (va.info).age; +end; +$$ language plpgsql; +select get_age(); + get_age +--------- + 32 +(1 row) + +-- test record of array +create or replace function get_age RETURNS int as $$ +declare + type r1 is VARRAY(10) of customer; + type custs is record (c_list r1); + va custs; +begin + va.c_list := ARRAY[(1, ('Vera' ,32, 'Paris')),(2, ('Zera' ,25, 'London')),(3, ('Alice' ,22, 'Bangkok'))]; + return va.c_list[2].c_info.age; +end; +$$ language plpgsql; +select get_age(); + get_age +--------- + 25 +(1 row) + +-- test record of table +create or replace function get_age RETURNS int as $$ +declare + type r1 is table of customer index by varchar(10); + type custs is record (c_list r1); + va custs; +begin + va.c_list('a') := (1, ('Vera' ,32, 'Paris')); + va.c_list('aa') := (2, ('Zera' ,25, 'London')); + va.c_list('aaa') := (3, ('Alice' ,22, 'Bangkok')); + return va.c_list['aa'].c_info.age; +end; +$$ language plpgsql; +select get_age(); + get_age +--------- + 25 +(1 row) + +-- test array of record +create or replace function get_age RETURNS int as $$ +declare + type cust is record (id int, c_info info); + type custs is VARRAY(10) of cust; + va custs; + vb int; +begin + va := ARRAY[(1, ('Vera' ,32, 'Paris')),(2, ('Zera' ,25, 'London')),(3, ('Alice' ,22, 'Bangkok'))]; + return va[2].c_info.age; +end; +$$ language plpgsql; +select get_age(); + get_age +--------- + 25 +(1 row) + +-- test table of record +create or replace function get_age RETURNS int as $$ +declare + type cust is record (id int, c_info info); + type custs is table of cust index by varchar(10); + va custs; +begin + va('a') := (1, ('Vera' ,32, 'Paris')); + va('aa') := (2, ('Zera' ,25, 'London')); + va('aaa') := (3, ('Alice' ,22, 'Bangkok')); + return va['aa'].c_info.age; +end; +$$ language plpgsql; +select get_age(); + get_age +--------- + 25 +(1 row) + +-- test table of record assign value to attribute +create or replace function get_age RETURNS int as $$ +declare + type cust is record (id int, c_info info); + type custs is table of cust index by varchar(10); + va custs; +begin + va('a').id := 1; + va('a').c_info := ('Vera' ,32, 'Paris'); + va('aa').id := 2; + va('aa').c_info := ('Zera' ,25, 'London'); + va('aaa').id := 3; + va('aaa').c_info := ('Alice' ,22, 'Bangkok'); + return va['aa'].c_info.age; +end; +$$ language plpgsql; +select get_age(); + get_age +--------- + 25 +(1 row) + +-- test table of record assign value to attribute +create table custs_record (id int, c_info info); +insert into custs_record values(1, ('Vera' ,32, 'Paris')); +insert into custs_record values(2, ('Zera' ,25, 'London')); +insert into custs_record values(3, ('Alice' ,22, 'Bangkok')); +create or replace function get_age RETURNS int as $$ +declare + type cust is record (id int, c_info info); + type custs is table of cust index by varchar(10); + va custs; +begin + select id into va('a').id from custs_record where id = 1; + select c_info into va('a').c_info from custs_record where id = 1; + select id into va('aa').id from custs_record where id = 2; + select c_info into va('aa').c_info from custs_record where id = 2; + select id into va('aa').id from custs_record where id = 3; + select c_info into va('aa').c_info from custs_record where id = 3; + return va['aa'].c_info.age; +end; +$$ language plpgsql; +select get_age(); + get_age +--------- + 22 +(1 row) + +drop table custs_record; + +-- test record of record of record +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r2 is record (id number(10), info r1); + type cust is record (c_info r2, id int); + + va cust; + vb int; +begin + va := ((1, ('Vera' ,32, 'Paris')),1); + vb := (va.c_info).info.age; + return vb; +end; +$$ language plpgsql; +select get_age(); + get_age +--------- + 32 +(1 row) + +-- test record of record: second use record +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r2 is record (id number(10), info r1); + type r3 is record (id number(10),info r1); + + va r3; + vb r2; + vc int; +begin + va := (1, ('Vera' ,32, 'Paris')); + vb.id := va.id; + vb.info := va.info; + vc := (vb.info).age; + return vc; +end; +$$ language plpgsql; +select get_age(); + get_age +--------- + 32 +(1 row) + +-- array of record of array of record +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is VARRAY(10) of r1; + type r2 is record (col1 r1_arr); + type r2_arr is VARRAY(10) of r2; + + va r2_arr; + vb int; +begin + va(1).col1 := ARRAY[('Vera' ,32, 'Paris'),('Zera' ,25, 'London')]; + va(2).col1 := ARRAY[('Jack' ,22, 'New York'),('Li' ,18, 'Beijing')]; + vb := va[2].col1[2].age; + return vb; +end; +$$ language plpgsql; +select get_age(); + get_age +--------- + 18 +(1 row) + +-- table of record of table of record +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is table of r1; + type r2 is record (col1 r1_arr); + type r2_arr is table of r2; + + va r2_arr; + vb int; +begin + va(1).col1 := ARRAY[('Vera' ,32, 'Paris'),('Zera' ,25, 'London')]; + va(2).col1 := ARRAY[('Jack' ,22, 'New York'),('Li' ,18, 'Beijing')]; + va(2).col1(2).age = 45; + raise info '%', va; + return 1; +end; +$$ language plpgsql; +select get_age(); +INFO: {"(\"{\"\"(Vera,32,Paris)\"\",\"\"(Zera,25,London)\"\"}\")","(\"{\"\"(Jack,22,\\\\\"\"New York\\\\\"\")\"\",\"\"(Li,45,Beijing)\"\"}\")"} +CONTEXT: referenced column: get_age + get_age +--------- + 1 +(1 row) + +-- record of table of record +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is table of r1; + type r2 is record (col1 r1_arr); + + va r2; + vb int; +begin + va.col1 := ARRAY[('Vera' ,32, 'Paris'),('Zera' ,25, 'London')]; + va.col1(2).age = 45; + raise info '%', va; + return 1; +end; +$$ language plpgsql; +select get_age(); +INFO: ("{""(Vera,32,Paris)"",""(Zera,45,London)""}") +CONTEXT: referenced column: get_age + get_age +--------- + 1 +(1 row) + +--test: types should be droped when drop function +select typname from pg_type where typtype = 'c' and typarray != 0 and typnamespace = (select oid from pg_namespace where nspname = current_schema) order by typname desc; +--?.* +--?.* + info + customer +--?.* +(3 rows) + +drop function get_age(); +select typname from pg_type where typtype = 'c' and typarray != 0 and typnamespace = (select oid from pg_namespace where nspname = current_schema) order by typname desc; + typname +---------- + info + customer +(2 rows) + +--test: name of special characters +create or replace function "GET::INT;;INT" RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is VARRAY(10) of r1; + type r2 is record (col1 r1_arr); + type r2_arr is VARRAY(10) of r2; + + va r2_arr; + vb int; +begin + va(1).col1 := ARRAY[('Vera' ,32, 'Paris'),('Zera' ,25, 'London')]; + va(2).col1 := ARRAY[('Jack' ,22, 'New York'),('Li' ,18, 'Beijing')]; + vb := va[2].col1[2].age; + return vb; +end; +$$ language plpgsql; +select "GET::INT;;INT"(); + GET::INT;;INT +--------------- + 18 +(1 row) + +DROP FUNCTION "GET::INT;;INT"(); +-- test record duplicate name situations, record var +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r2 is record (id number(10), info r1); + type cust is record (c_info r2, id int); + type get_age_r1 is record (a int, b int); + va cust; + vb int; + vc get_age_r1; +begin + va := ((1, ('Vera' ,32, 'Paris',1,2,3)),1); + vb := (va.c_info).info.age; + vc := (1,2); + return vc.a; +end; +$$ language plpgsql; +select get_age(); + get_age +--------- + 1 +(1 row) + +-- test record duplicate name situations, record var +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type get_age_r1 is record (a int, b int); + type r2 is record (id number(10), info r1); + type cust is record (c_info r2, id int); + va cust; + vb int; + vc get_age_r1; +begin + va := ((1, ('Vera' ,32, 'Paris',1,2,3)),1); + vb := (va.c_info).info.age; + vc := (4,5); + return vc.b; +end; +$$ language plpgsql; +select get_age(); + get_age +--------- + 5 +(1 row) + +-- test record duplicate name situations, varray var +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type get_age_r1 is VARRAY(10) of int; + type r2 is record (id number(10), info r1); + type cust is record (c_info r2, id int); + va cust; + vb int; + vc get_age_r1; +begin + va := ((1, ('Vera' ,32, 'Paris',1,2,3)),1); + vb := (va.c_info).info.age; + vc := array[1,2,3,4,5]; + return vc(2); +end; +$$ language plpgsql; +select get_age(); + get_age +--------- + 2 +(1 row) + +-- test record duplicate name situations, varray var +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r2 is record (id number(10), info r1); + type cust is record (c_info r2, id int); + type get_age_r1 is VARRAY(10) of int; + va cust; + vb int; + vc get_age_r1; +begin + va := ((1, ('Vera' ,32, 'Paris',1,2,3)),1); + vb := (va.c_info).info.age; + vc := array[1,2,3,4,5]; + return vc(3); +end; +$$ language plpgsql; +select get_age(); + get_age +--------- + 3 +(1 row) + +-- test record duplicate name situations, var +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + get_age_r1 int; + type r2 is record (id number(10), info r1); + type cust is record (c_info r2, id int); + va cust; + vb int; +begin + va := ((1, ('Vera' ,32, 'Paris',1,2,3)),1); + vb := (va.c_info).info.age; + get_age_r1 :=10; + return get_age_r1; +end; +$$ language plpgsql; +select get_age(); + get_age +--------- + 10 +(1 row) + +drop function get_age(); +-- ERROR: typename too long +create or replace function get_age RETURNS int as $$ +declare + type rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr1 is record (name varchar2(50), age int, address varchar2(20)); + type cust is record (id number(10), info rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr1); + va cust; +begin + va := (1, ('Vera' ,32, 'Paris')); + return (va.info).age; +end; +$$ language plpgsql; +ERROR: type name too long +--?.* +CONTEXT: compilation of PL/pgSQL function "get_age" near line 4 +--ERROR: record type nested is not supported in anonymous block. +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type cust is record (id number(10), info r1); + va cust; +begin + va := (1, ('Vera' ,32, 'Paris')); +end; +/ +ERROR: array or record type nesting is not supported in anonymous block yet. +DETAIL: Define a record type of record is not supported yet. +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 2 +--ERROR: record type nested is not supported in anonymous block. +declare + type cust is record (id int, c_info info); + type custs is VARRAY(10) of cust; + va custs; +begin + va := ARRAY[(1, ('Vera' ,32, 'Paris')),(2, ('Zera' ,25, 'London')),(3, ('Alice' ,22, 'Bangkok'))]; +end; +/ +ERROR: array or record type nesting is not supported in anonymous block yet. +DETAIL: Define array type "custs" of record is not supported yet. +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 2 +--ERROR: record type nested is not supported in anonymous block. +declare + type cust is record (id int, c_info info); + type custs is table of cust index by varchar(10); + va custs; +begin + va('a') := (1, ('Vera' ,32, 'Paris')); + va('aa') := (2, ('Zera' ,25, 'London')); + va('aaa') := (3, ('Alice' ,22, 'Bangkok')); +end; +/ +ERROR: array or record type nesting is not supported in anonymous block yet. +DETAIL: Define table type "custs" of record is not supported yet. +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 2 +-- test package: public function +create or replace package package_test as + function get_age() return int; +end package_test; +/ +create or replace package body package_test as + function get_age() return int is +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is VARRAY(10) of r1; + type r2 is record (col1 r1_arr); + type r2_arr is VARRAY(10) of r2; + va r2_arr; + vb int; +begin + va(1).col1 := ARRAY[('Vera' ,32, 'Paris'),('Zera' ,25, 'London')]; + va(2).col1 := ARRAY[('Jack' ,22, 'New York'),('Li' ,18, 'Beijing')]; + vb := va[2].col1[2].age; + return vb; +end; +end package_test; +/ +select package_test.get_age(); + get_age +--------- + 18 +(1 row) + +DROP PACKAGE package_test; +NOTICE: drop cascades to function plpgsql_arrayofrecord.get_age() +-- test package: private function +create or replace package package_test as + a int; +end package_test; +/ +create or replace package body package_test as + function get_age() return int is +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is VARRAY(10) of r1; + type r2 is record (col1 r1_arr); + type r2_arr is VARRAY(10) of r2; + va r2_arr; + vb int; +begin + va(1).col1 := ARRAY[('Vera' ,32, 'Paris'),('Zera' ,25, 'London')]; + va(2).col1 := ARRAY[('Jack' ,22, 'New York'),('Li' ,18, 'Beijing')]; + vb := va[2].col1[2].age; + return vb; +end; +end package_test; +/ +DROP PACKAGE package_test; +NOTICE: drop cascades to function plpgsql_arrayofrecord.get_age() +-- test package: public variable +create or replace package package_test as + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is VARRAY(10) of r1; + type r2 is record (col1 r1_arr); + type r2_arr is VARRAY(10) of r2; +end package_test; +/ +create or replace package body package_test as + a int; +end package_test; +/ +DROP PACKAGE package_test; +-- test package: private variable +create or replace package package_test as + a int; +end package_test; +/ +create or replace package body package_test as + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is VARRAY(10) of r1; + type r2 is record (col1 r1_arr); + type r2_arr is VARRAY(10) of r2; +end package_test; +/ +drop package package_test; +select typname from pg_type where typtype = 'c' and typarray != 0 and typnamespace = (select oid from pg_namespace where nspname = current_schema) order by typname desc; + typname +---------- + info + customer +(2 rows) + +--test table of record variable initialization +create or replace package package_test is +type rec_data is record(aa varchar2(10)); +type tab_data is table of rec_data; +procedure p1; +end package_test; +/ +create or replace package body package_test is +procedure p1 is +var1 tab_data; +begin +var1 :=tab_data(); +end; +end package_test; +/ +call package_test.p1(); + p1 +---- + +(1 row) + +drop package package_test; +NOTICE: drop cascades to function plpgsql_arrayofrecord.p1() +--test duplicated type name +-- create type "package_test.tab_data" as (a int, b int); +-- create or replace package package_test is +-- type rec_data is record(aa varchar2(10)); +-- type tab_data is table of rec_data; +-- procedure p1; +-- end package_test; +-- / +-- DROP TYPE "package_test.tab_data"; +--test record.array.extend +create or replace package pck1 is +type ta is varray(10) of int; +type tb is record(va ta); +procedure p1; +end pck1; +/ +create or replace package body pck1 is +procedure p1() is +v1 tb; +begin +v1.va.extend(9); +raise NOTICE '%',v1.va.first; +raise NOTICE '%',v1.va.count(); +v1.va.delete(); +end; +end pck1; +/ +call pck1.p1(); +NOTICE: +NOTICE: + p1 +---- + +(1 row) + +DROP PACKAGE pck1; +NOTICE: drop cascades to function plpgsql_arrayofrecord.p1() +-- test array of table nest same record +create or replace package pkg100 +as +type ty0 is record (a int, b varchar); +type ty1 is table of ty0 index by varchar; +type ty2 is varray(10) of ty0; +va ty1; +vb ty2; +procedure p1; +end pkg100; +/ +create or replace package body pkg100 +as +procedure p1 as +begin +va ('abc') := (1,'a'); +vb (1) := (2, 'b'); +raise info 'va: %', va; +raise info 'vb: %', vb; +end; +end pkg100; +/ +call pkg100.p1(); +INFO: va: {"(1,a)"} +INFO: vb: {"(2,b)"} + p1 +---- + +(1 row) + +drop package pkg100; +NOTICE: drop cascades to function plpgsql_arrayofrecord.p1() +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- +drop function if exists get_age(); +NOTICE: function get_age() does not exist, skipping +drop type if exists customer; +drop type if exists info; +-- clean up -- +drop schema if exists plpgsql_arrayofrecord cascade; diff --git a/src/test/regress/expected/plpgsql_array_opengauss.out b/src/test/regress/expected/plpgsql_array_opengauss.out new file mode 100644 index 000000000..9f0cc8ee3 --- /dev/null +++ b/src/test/regress/expected/plpgsql_array_opengauss.out @@ -0,0 +1,524 @@ +-- FOR PL/pgSQL VARRAY scenarios -- +-- check compatibility -- +show sql_compatibility; -- expect A -- + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists plpgsql_array_opengauss; +NOTICE: schema "plpgsql_array_opengauss" does not exist, skipping +create schema plpgsql_array_opengauss; +set current_schema = plpgsql_array_opengauss; +-- initialize tables -- +create table customers ( + id number(10) not null, + c_name varchar2(50) not null, + c_age number(8) not null, + c_address varchar2(50), + salary float(2) not null, + constraint customers_pk primary key (id) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "customers_pk" for table "customers" +insert into customers (id, c_name, c_age, c_address, salary) values (1, 'Vera' ,32, 'Paris', 22999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (2, 'Zera' ,25, 'London', 5999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (3, 'Alice' ,22, 'Bangkok', 9800.98); +insert into customers (id, c_name, c_age, c_address, salary) values (4, 'Jim' ,26, 'Dubai', 18700.00); +insert into customers (id, c_name, c_age, c_address, salary) values (5, 'Kevin' ,28, 'Singapore', 18999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (6, 'Gauss' ,42, 'Beijing', 32999.00); +create table tmp(a int, b varchar(100)); +-- initialize functions, types etc. -- +create type mytype as ( + id integer, + biome varchar2(100) +); +create type mytype2 as ( + id integer, + locale myType +); +-- it turns any input to (9, (1, 'space')) +create or replace function myfunc(habitat in mytype2) +return mytype2 +is + ret mytype2; +begin + ret := (9, (1, 'space')); + return ret; +end; +/ +-- type and function shares the same name -- +-- Oh~oh, what's gonna happened?? -- +create type functype as ( + id integer, + locale myType +); +create or replace function functype(habitat in mytype2) +return mytype2 +is + ret mytype2; +begin + ret := (-1, (1, 'unknown realm')); + return ret; +end; +/ +-- test function datatype priority -- +create or replace function name_list(inint in integer) +return integer +is + ret integer; +begin + ret := 1; + return ret; +end; +/ +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- +-- general declare + assign + access -- +-- support varray with parentheses in SQL -- +DECLARE + CURSOR c_customers is + SELECT c_name FROM customers order by id; + type c_list is varray (6) of customers.c_name%type; + name_list c_list := c_list(); + counter integer := 0; +BEGIN + FOR n IN c_customers LOOP + counter := counter + 1; -- 6 iterations -- + name_list.extend; + name_list(counter) := n.c_name; + END LOOP; + + insert into tmp values (NULL, name_list(1)); + insert into tmp values (NULL, name_list(3)); + name_list(2) := name_list(3); + insert into tmp values (NULL, name_list[2]); -- same as last one -- +END; +/ +select * from tmp order by 1, 2; + a | b +---+------- + | Alice + | Alice + | Vera +(3 rows) + +truncate tmp; +-- ERROR: mix of parens and brackets are not allowed -- +declare + cursor c_customers is (select c_name from customers order by id); + type c_list is varray(6) of customers.c_name%type; + name_list c_list := c_list(); + counter integer := 0; +begin + for n in c_customers loop + counter := counter + 1; -- 6 iterations -- + name_list.extend; + name_list(counter) := n.c_name; + end loop; + + insert into tmp values (null, name_list(1]); +end; +/ +ERROR: mismatched parentheses at or near ";" +LINE 12: insert into tmp values (null, name_list(1]); + ^ +QUERY: DECLARE cursor c_customers is (select c_name from customers order by id); + type c_list is varray(6) of customers.c_name%type; + name_list c_list := c_list(); + counter integer := 0; +begin + for n in c_customers loop + counter := counter + 1; -- 6 iterations -- + name_list.extend; + name_list(counter) := n.c_name; + end loop; + + insert into tmp values (null, name_list(1]); +end +-- parentheses support in SQL 2 -- +-- array of record -- +declare + cursor c_customers is (select * from customers order by id); + type c_list is varray(6) of customers; + customer_list c_list := c_list(); + counter integer := 0; + name varchar2(50) := ''; +begin + for n in c_customers loop + counter := counter + 1; + customer_list.extend; + customer_list(counter) := (n.id, n.c_name, n.c_age, n.c_address, n.salary); -- insert record -- + name := customer_list(counter).c_name; + if customer_list(counter).c_age <= 30 then + insert into tmp values (null, customer_list(counter).c_name); + else + insert into tmp values (null, name); + end if; + insert into tmp values (customer_list(counter).c_age, customer_list(counter).salary); -- parentheses -- + end loop; +end; +/ +select * from tmp order by 1, 2; + a | b +----+--------- + 22 | 9800.98 + 25 | 5999 + 26 | 18700 + 28 | 18999 + 32 | 22999 + 42 | 32999 + | Alice + | Gauss + | Jim + | Kevin + | Vera + | Zera +(12 rows) + +truncate tmp; +-- batch initialization, batch insert varray-- +declare + type students is varray(6) of varchar2(10); + type grades is varray(6) of integer; + marks grades := grades('98', 97, 74 + 4, (87), 92, 100); -- batch initialize -- + names students default students('none'); -- default -- + total integer; +begin + names := students(); -- should append NULL then do the coerce -- + names := students('Vera ', 'Zera ', 'Alice', 'Jim ', 'Kevin', to_char('G') || 'auss'); -- batch insert -- + total := names.count; + insert into tmp values (total, null); + for i in 1 .. total loop + insert into tmp values (marks(i), names(i)); + end loop; +end; +/ +select * from tmp order by 1, 2; + a | b +-----+------- + 6 | + 78 | Alice + 87 | Jim + 92 | Kevin + 97 | Zera + 98 | Vera + 100 | Gauss +(7 rows) + +truncate tmp; +-- block above will be rewritten into this form (close to this form, but with parens and coerces)-- +declare + type students is varray(6) of varchar2(10); + type grades is varray(6) of integer; + marks grades := array['98', 97, 74 + 4, (87), 92, 100]; -- batch initialize -- + names students default array['none']; -- default -- + total integer; +begin + names := array[NULL]; + names := array['Vera ', 'Zera ', 'Alice', 'Jim ', 'Kevin', to_char('G') || 'auss']; -- batch insert -- + total := names.count; + insert into tmp values (total, null); + for i in 1 .. total loop + insert into tmp values (marks(i), names(i)); + end loop; +end; +/ +select * from tmp order by 1, 2; + a | b +-----+------- + 6 | + 78 | Alice + 87 | Jim + 92 | Kevin + 97 | Zera + 98 | Vera + 100 | Gauss +(7 rows) + +truncate tmp; +-- test of PL/SQL data type instantiation -- +-- If we specified our type (use PL/SQL like instantiation), all varray members .. -- +-- should be able to cast to the correct data type. -- +declare + type students is varray(5) of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + insert into tmp values (null, names(i)); + end loop; +end; +/ +select * from tmp order by 1, 2; + a | b +---+------- + | 1 + | Alice + | Jim + | Kevin + | Zera +(5 rows) + +truncate tmp; +-- However, if we use the PL/pgSQL style instantiation, it is not guaranteed -- +-- error out for this one -- +declare + type students is varray(5) of varchar2(10); + names students; +begin + -- we can only make assumptions base on the first element, which, not always a good answer -- + names := array[1, 'Zera ', 'Alice', 'Jim ', 'Kevin']; + for i in 1 .. 5 loop + insert into tmp values (null, names(i)); + end loop; +end; +/ +ERROR: invalid input syntax for integer: "Zera " +LINE 1: SELECT array[1, 'Zera ', 'Alice', 'Jim ', 'Kevin'] + ^ +QUERY: SELECT array[1, 'Zera ', 'Alice', 'Jim ', 'Kevin'] +CONTEXT: referenced column: array +PL/pgSQL function inline_code_block line 5 at assignment +select * from tmp order by 1, 2; + a | b +---+--- +(0 rows) + +truncate tmp; +-- test of uneven brackets -- +-- error out -- +declare + type students is varray(5) of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + insert into tmp values (null, names(i]); + end loop; +end; +/ +ERROR: mismatched parentheses at or near ";" +LINE 6: insert into tmp values (null, names(i]); + ^ +QUERY: DECLARE type students is varray(5) of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + insert into tmp values (null, names(i]); + end loop; +end +select * from tmp order by 1, 2; + a | b +---+--- +(0 rows) + +truncate tmp; +-- Using composite type defined outside of precedure block -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, (1, 'ground')), + mytype2(1, (2, 'air')) + ); +begin + aa.extend(10); + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + insert into tmp values (aa(1).id, aa(2).locale.biome); +end; +/ +select * from tmp order by 1, 2; + a | b +---+------- + 1 | water +(1 row) + +truncate tmp; +-- Note: array can handle proper type-in-type declaration for now -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + insert into tmp values (aa(1).id, aa(2).locale.biome); +end; +/ +select * from tmp order by 1, 2; + a | b +---+------- + 1 | water +(1 row) + +truncate tmp; +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := mytype2(2, mytype(3, 'water')); + insert into tmp values (aa(1).id, aa(2).locale.biome); +end; +/ +select * from tmp order by 1, 2; + a | b +---+------- + 1 | water +(1 row) + +truncate tmp; +-- working with functions -- +-- should be the same, except the result, make sure functions are correctly identified -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + myfunc((1, mytype(1, 'ground'))), -- for records, we need an extra parens to work -- + myfunc((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + insert into tmp values (aa(1).id, aa(2).locale.biome); +end; +/ +select * from tmp order by 1, 2; + a | b +---+------- + 9 | space +(1 row) + +truncate tmp; +-- This is what going to happened with functions and types shares teh same name -- +-- (Don't try this at home) -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + functype(1, mytype(1, 'ground')), -- we are prioritizing types here -- + functype(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + insert into tmp values (aa(1).id, aa(2).locale.biome); +end; +/ +select * from tmp order by 1, 2; + a | b +---+----- + 1 | air +(1 row) + +truncate tmp; +drop type functype; -- abandon type functype -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- here we have to use function functype -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + insert into tmp values (aa(1).id, aa(2).locale.biome); +end; +/ +select * from tmp order by 1, 2; + a | b +----+--------------- + -1 | unknown realm +(1 row) + +truncate tmp; +drop function functype; -- oops! -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- not sure -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); +end; +/ +ERROR: function functype(record) does not exist +LINE 1: SELECT ARRAY[(functype((1, ROW(1, 'ground'))))::plpgsql_arra... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +QUERY: SELECT ARRAY[(functype((1, ROW(1, 'ground'))))::plpgsql_array_opengauss."mytype2", + (functype((1, ROW(2, 'air'))))::plpgsql_array_opengauss."mytype2"] +CONTEXT: referenced column: array +PL/pgSQL function inline_code_block line 6 during statement block local variable initialization +-- Multi-dimension arrays -- +declare + type arrayfirst is varray(10) of int; + arr arrayfirst := arrayfirst(1, 2, 3); + mat int[][] := ARRAY[arr, arr]; -- PLpgSQL style -- +begin + insert into tmp values (null, mat(1)(2)); +end; +/ +select * from tmp order by 1, 2; + a | b +---+--- + | 2 +(1 row) + +truncate tmp; +-- assignments && statements test -- +declare + type arrayfirst is varray(10) of int; + arr arrayfirst := arrayfirst(1, 2, 3); + mat int[][] := ARRAY[arr, ARRAY[4, 5 ,6]]; -- PLpgSQL style -- +begin + insert into tmp(a) values (mat[1](2)), (mat(1)[2]), (mat(1)(2)), (mat[1][2]); +end; +/ +select * from tmp order by 1, 2; + a | b +---+--- + 2 | + 2 | + 2 | + 2 | +(4 rows) + +truncate tmp; +-- error out! -- +declare + type arrayfirst is varray(10) of int; + arr arrayfirst := arrayfirst(1, 2, 3); + type arraySecond is varray(10) of arrayfirst; -- Nested types are not supported, yet -- + mat arraySecond := arraySecond(arr, arr); +begin + insert into tmp values (null, mat(1)(2)); +end; +/ +ERROR: array type nested by array is not supported yet. +DETAIL: Define array type "arraysecond" of array is not supported yet. +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 3 +select * from tmp order by 1, 2; + a | b +---+--- +(0 rows) + +truncate tmp; +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- +drop function if exists functype; +NOTICE: function functype() does not exist, skipping +drop function if exists myfunc; +drop table if exists tmp; +drop table if exists customers; +drop type if exists functype; +NOTICE: type "functype" does not exist, skipping +drop type if exists mytype2; +drop type if exists mytype; +-- clean up -- +drop schema if exists plpgsql_array_opengauss cascade; +NOTICE: drop cascades to function name_list(integer) diff --git a/src/test/regress/expected/plpgsql_assign_list.out b/src/test/regress/expected/plpgsql_assign_list.out new file mode 100644 index 000000000..a10f85150 --- /dev/null +++ b/src/test/regress/expected/plpgsql_assign_list.out @@ -0,0 +1,307 @@ +-- FOR PL/pgSQL ARRAY of RECORD TYPE scenarios -- + +-- check compatibility -- +show sql_compatibility; -- expect ORA -- + sql_compatibility +------------------- + A +(1 row) + + +-- create new schema -- +drop schema if exists plpgsql_assignlist; +NOTICE: schema "plpgsql_assignlist" does not exist, skipping +create schema plpgsql_assignlist; +set current_schema = plpgsql_assignlist; + +-- initialize table and type-- +create type o1 as (o1a int, o1b int); +create type o2 as (o2a o1, o2b int); +create type o3 as (o3a o2, o3b int); +create type o4 as (o4a o3, o4b int); +create type o5 as (o5a o2[], o5b int); +create type o6 as (o6a o5, o6b int); + +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- + +-- test assign list without array: nested record +create or replace function get_age RETURNS integer as $$ +declare + type r1 is record (r1a int, r1b int); + type r2 is record (r2a r1, r2b int); + type r3 is record (r3a r2, r3b int); + va r3; +begin + va.r3a.r2a.r1a := 123; + raise info '%', va; + va := (((4,3),2),1); + raise info '%', va; + va.r3a.r2a.r1a := 456; + raise info '%', va; + return va.r3a.r2a.r1a; +end; +$$ language plpgsql; +select get_age(); +INFO: ("(""(123,)"",)",) +CONTEXT: referenced column: get_age +INFO: ("(""(4,3)"",2)",1) +CONTEXT: referenced column: get_age +INFO: ("(""(456,3)"",2)",1) +CONTEXT: referenced column: get_age + get_age +--------- + 456 +(1 row) + + +-- test assign list without array: nested composite type +create or replace function get_age RETURNS integer as $$ +declare + va o4; +begin + va.o4a.o3a.o2a.o1a := 123; + raise info '%', va; + va.o4a.o3a.o2a := (456, 789); + raise info '%', va; + return va.o4a.o3a.o2a.o1a; +end; +$$ language plpgsql; +select get_age(); +INFO: ("(""(""""(123,)"""",)"",)",) +CONTEXT: referenced column: get_age +INFO: ("(""(""""(456,789)"""",)"",)",) +CONTEXT: referenced column: get_age + get_age +--------- + 456 +(1 row) + + +-- test assign list with array: array in first three word +create or replace function get_age RETURNS integer as $$ +declare + TYPE o3_arr is VARRAY(10) of o3; + va o3_arr; +begin + va(1).o3a.o2a.o1a := 123; + raise info '%', va; + va(2).o3a.o2a := (456, 789); + raise info '%', va; + va(3).o3a := ((123, 456),789); + raise info '%', va; + return va(2).o3a.o2a.o1b; +end; +$$ language plpgsql; +select get_age(); +INFO: {"(\"(\"\"(123,)\"\",)\",)"} +CONTEXT: referenced column: get_age +INFO: {"(\"(\"\"(123,)\"\",)\",)","(\"(\"\"(456,789)\"\",)\",)"} +CONTEXT: referenced column: get_age +INFO: {"(\"(\"\"(123,)\"\",)\",)","(\"(\"\"(456,789)\"\",)\",)","(\"(\"\"(123,456)\"\",789)\",)"} +CONTEXT: referenced column: get_age + get_age +--------- + 789 +(1 row) + + + +-- test assign list with array: array in first three word +create or replace function get_age RETURNS integer as $$ +declare + va o5; +begin + va.o5a(1).o2a.o1a := 123; + raise info '%', va; + va.o5a(2).o2a := (456, 789); + raise info '%', va; + va.o5a(3) := ((123, 456),789); + raise info '%', va; + return va.o5a(2).o2a.o1a; +end; +$$ language plpgsql; +select get_age(); +INFO: ("{""(\\""(123,)\\"",)""}",) +CONTEXT: referenced column: get_age +INFO: ("{""(\\""(123,)\\"",)"",""(\\""(456,789)\\"",)""}",) +CONTEXT: referenced column: get_age +INFO: ("{""(\\""(123,)\\"",)"",""(\\""(456,789)\\"",)"",""(\\""(123,456)\\"",789)""}",) +CONTEXT: referenced column: get_age + get_age +--------- + 456 +(1 row) + + +-- test assign list with array: array in first three word +create or replace function get_age RETURNS integer as $$ +declare + va o6; +begin + va.o6a.o5a(1).o2a.o1a := 123; + raise info '%', va; + va.o6a.o5a(2).o2a := (456, 789); + raise info '%', va; + va.o6a.o5a(3) := ((123, 456),789); + raise info '%', va; + return va.o6a.o5a(2).o2a.o1a; +end; +$$ language plpgsql; +select get_age(); +INFO: ("(""{""""(\\\\""""(123,)\\\\"""",)""""}"",)",) +CONTEXT: referenced column: get_age +INFO: ("(""{""""(\\\\""""(123,)\\\\"""",)"""",""""(\\\\""""(456,789)\\\\"""",)""""}"",)",) +CONTEXT: referenced column: get_age +INFO: ("(""{""""(\\\\""""(123,)\\\\"""",)"""",""""(\\\\""""(456,789)\\\\"""",)"""",""""(\\\\""""(123,456)\\\\"""",789)""""}"",)",) +CONTEXT: referenced column: get_age + get_age +--------- + 456 +(1 row) + + +-- test assign list with array: with record nested +create or replace function get_age RETURNS integer as $$ +declare + TYPE r1 is RECORD (r1a int, r1b int); + TYPE r1_arr is VARRAY(10) of r1; + TYPE r2 is RECORD (r2a r1_arr); + va r2; +begin + va.r2a(1).r1a := 123; + raise info '%', va.r2a(1).r1a; + va.r2a(2) := (456, 789); + raise info '%', va; + va.r2a(2).r1b := 999; + raise info '%', va; + return va.r2a(2).r1b; +end; +$$ language plpgsql; +select get_age(); +INFO: 123 +CONTEXT: referenced column: get_age +INFO: ("{""(123,)"",""(456,789)""}") +CONTEXT: referenced column: get_age +INFO: ("{""(123,)"",""(456,999)""}") +CONTEXT: referenced column: get_age + get_age +--------- + 999 +(1 row) + + +-- test assign list with table: with record nested +create or replace function get_age RETURNS integer as $$ +declare + TYPE r1 is RECORD (r1a int, r1b int); + TYPE r1_arr is table of r1 index by varchar2(10); + TYPE r2 is RECORD (r2a r1_arr); + va r2; +begin + va.r2a('a').r1a := 123; + raise info '%', va.r2a('a').r1a; + va.r2a('aa') := (456, 789); + raise info '%', va; + va.r2a('aa').r1b := 999; + raise info '%', va; + return va.r2a('aa').r1b; +end; +$$ language plpgsql; +select get_age(); +INFO: 123 +CONTEXT: referenced column: get_age +INFO: ("{""(123,)"",""(456,789)""}") +CONTEXT: referenced column: get_age +INFO: ("{""(123,)"",""(456,999)""}") +CONTEXT: referenced column: get_age + get_age +--------- + 999 +(1 row) + + +--test assign list with array: array not in first three word +create or replace function get_age RETURNS integer as $$ +declare + TYPE r1 is RECORD (r1a o6, r1b int); + va r1; +begin + va.r1a.o6a.o5a(1).o2a.o1a := 123; + raise info '%', va; + va.r1a.o6a.o5a(2).o2a := (456, 789); + raise info '%', va; + va.r1a.o6a.o5a(3) := ((123, 456),789); + raise info '%', va; + return va.r1a.o6a.o5a[2].o2a.o1a; +end; +$$ language plpgsql; +select get_age(); +INFO: ("(""(""""{""""""""(\\\\\\\\""""""""(123,)\\\\\\\\"""""""",)""""""""}"""",)"",)",) +CONTEXT: referenced column: get_age +INFO: ("(""(""""{""""""""(\\\\\\\\""""""""(123,)\\\\\\\\"""""""",)"""""""",""""""""(\\\\\\\\""""""""(456,789)\\\\\\\\"""""""",)""""""""}"""",)"",)",) +CONTEXT: referenced column: get_age +INFO: ("(""(""""{""""""""(\\\\\\\\""""""""(123,)\\\\\\\\"""""""",)"""""""",""""""""(\\\\\\\\""""""""(456,789)\\\\\\\\"""""""",)"""""""",""""""""(\\\\\\\\""""""""(123,456)\\\\\\\\"""""""",789)""""""""}"""",)"",)",) +CONTEXT: referenced column: get_age + get_age +--------- + 456 +(1 row) + + +--test o1.col1.col2 ref +create type ct as (num int,info text); +create type ct1 as (num int,info ct); +create or replace package autonomous_pkg_a IS +count_public ct1 := (1,(1,'a')::ct)::ct1; +function autonomous_f_public(num1 int) return int; +end autonomous_pkg_a; +/ +create or replace package body autonomous_pkg_a as +count_private ct1 :=(2,(2,'b')::ct)::ct1; +function autonomous_f_public(num1 int) return int +is +declare +re_int int; +begin +count_public.num = num1 + count_public.num; +count_private.num = num1 + count_private.num; +raise info 'count_public.info.num: %', count_public.info.num; +count_public.info.num = count_public.info.num + num1; +raise info 'count_public.info.num: %', count_public.info.num; +count_private.info.num = count_private.info.num + num1; +re_int = count_public.num +count_private.num; +return re_int; +end; +end autonomous_pkg_a; +/ + +select autonomous_pkg_a.autonomous_f_public(10); +INFO: count_public.info.num: 1 +CONTEXT: referenced column: autonomous_f_public +INFO: count_public.info.num: 11 +CONTEXT: referenced column: autonomous_f_public + autonomous_f_public +--------------------- + 23 +(1 row) + +drop package autonomous_pkg_a; +NOTICE: drop cascades to function plpgsql_assignlist.autonomous_f_public(integer) +drop type ct1; +drop type ct; + +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- +drop function if exists get_age(); +drop type if exists o6; +drop type if exists o5; +drop type if exists o4; +drop type if exists o3; +drop type if exists o2; +drop type if exists o1; + +-- clean up -- +drop schema if exists plpgsql_assignlist cascade; diff --git a/src/test/regress/expected/plpgsql_assign_value_to_array_attribute.out b/src/test/regress/expected/plpgsql_assign_value_to_array_attribute.out new file mode 100644 index 000000000..278176179 --- /dev/null +++ b/src/test/regress/expected/plpgsql_assign_value_to_array_attribute.out @@ -0,0 +1,558 @@ +-- FOR PL/pgSQL VARRAY Assign Value to Attribute scenarios -- + +-- check compatibility -- +-- create new schema -- +drop schema if exists plpgsql_arrayassign; +NOTICE: schema "plpgsql_arrayassign" does not exist, skipping +create schema plpgsql_arrayassign; +set current_schema = plpgsql_arrayassign; + +-- initialize table and type-- +create type info as (name varchar2(50), age int, address varchar2(20), salary float(2)); +create type customer as (id number(10), c_info info); +create table customers (id number(10), c_info info); + +insert into customers (id, c_info) values (1, ('Vera' ,32, 'Paris', 22999.00)); +insert into customers (id, c_info) values (2, ('Zera' ,25, 'London', 5999.00)); +insert into customers (id, c_info) values (3, ('Alice' ,22, 'Bangkok', 9800.98)); +insert into customers (id, c_info) values (4, ('Jim' ,26, 'Dubai', 18700.00)); +insert into customers (id, c_info) values (5, ('Kevin' ,28, 'Singapore', 18999.00)); +insert into customers (id, c_info) values (6, ('Gauss' ,42, 'Beijing', 32999.00)); + +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- + +-- test assign value by := +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_list(1) := (1, ('Vera' ,32, 'Paris', 22999.00)); + customer_list(2) := (2, ('Zera' ,25, 'London', 5999.00)); + customer_list(2).id := 3; + customer_list(2).c_info := ('Alice' ,22, 'Bangkok', 9800.98); + customer_list(3).id := 4; + customer_list(3).c_info := ('Jim' ,26, 'Dubai', 18700.00); + return customer_list; +end; +$$ language plpgsql; +select get_customers(); + get_customers +------------------------------------------------------------------------------------------------------- + {"(1,\"(Vera,32,Paris,22999)\")","(3,\"(Alice,22,Bangkok,9800.98)\")","(4,\"(Jim,26,Dubai,18700)\")"} +(1 row) + + +-- test assign value by select into +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_list(1) := (1, ('Vera' ,32, 'Paris', 22999.00)); + customer_list(2) := (2, ('Zera' ,25, 'London', 5999.00)); + select id into customer_list(2).id from customers where id = 3; + select c_info into customer_list(2).c_info from customers where id = 3; + select id into customer_list(3).id from customers where id = 4; + select c_info into customer_list(3).c_info from customers where id = 4; + return customer_list; +end; +$$ language plpgsql; +select get_customers(); + get_customers +------------------------------------------------------------------------------------------------------- + {"(1,\"(Vera,32,Paris,22999)\")","(3,\"(Alice,22,Bangkok,9800.98)\")","(4,\"(Jim,26,Dubai,18700)\")"} +(1 row) + + +-- test assign value by fetch into +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); + CURSOR C1 IS SELECT id FROM customers ORDER by id; + CURSOR C2 IS SELECT c_info FROM customers ORDER by id; +begin + customer_list(1) := (1, ('Vera' ,32, 'Paris', 22999.00)); + customer_list(2) := (2, ('Zera' ,25, 'London', 5999.00)); + OPEN C1; + OPEN C2; + FETCH C1 into customer_list(2).id; + FETCH C2 into customer_list(2).c_info; + FETCH C1 into customer_list(3).id; + FETCH C2 into customer_list(3).c_info; + CLOSE C1; + CLOSE C2; + return customer_list; +end; +$$ language plpgsql; +select get_customers(); + get_customers +--------------------------------------------------------------------------------------------------- + {"(1,\"(Vera,32,Paris,22999)\")","(1,\"(Vera,32,Paris,22999)\")","(2,\"(Zera,25,London,5999)\")"} +(1 row) + + +-- test assign value by EXECUTE IMMEDIATE into +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_list(1) := (1, ('Vera' ,32, 'Paris', 22999.00)); + customer_list(2) := (2, ('Zera' ,25, 'London', 5999.00)); + EXECUTE IMMEDIATE 'select id from customers where id = :1' + INTO customer_list(2).id + USING IN 3; + EXECUTE IMMEDIATE 'select c_info from customers where id = :1' + INTO customer_list(2).c_info + USING IN 3; + EXECUTE IMMEDIATE 'select id from customers where id = :1' + INTO customer_list(3).id + USING IN 4; + EXECUTE IMMEDIATE 'select c_info from customers where id = :1' + INTO customer_list(3).c_info + USING IN 4; + return customer_list; +end; +$$ language plpgsql; +select get_customers(); + get_customers +------------------------------------------------------------------------------------------------------- + {"(1,\"(Vera,32,Paris,22999)\")","(3,\"(Alice,22,Bangkok,9800.98)\")","(4,\"(Jim,26,Dubai,18700)\")"} +(1 row) + + +-- test assign value in two-dimensional arrays +-- (only := support assign value in two-dimensional arrays) +create or replace function get_customers RETURNS customer[] as $$ +declare + customer_list customer[][]; +begin + customer_list:= array[[(1, ('Vera' ,32, 'Paris', 22999.00)),(2, ('Zera' ,25, 'London', 5999.00))], + [(3, ('Alice' ,22, 'Bangkok', 9800.98)),(4, ('Jim' ,26, 'Dubai', 18700.00))]]; + customer_list(1)(1).id := 5; + customer_list(1)(1).c_info := ('Kevin' ,28, 'Singapore', 18999.00); + customer_list(1)(2).id := 6; + customer_list(1)(2).c_info := ('Gauss' ,42, 'Beijing', 32999.00); + customer_list(2)(1).id := 7; + customer_list(2)(1).c_info := ('Bob' ,24, 'Shanghai', 28999.00); + customer_list(2)(2).id := 8; + customer_list(2)(2).c_info := ('Jack' ,56, 'Hongkong', 8999.00); + return customer_list; +end; +$$ language plpgsql; +select get_customers(); + get_customers +--------------------------------------------------------------------------------------------------------------------------------------------------- + {{"(5,\"(Kevin,28,Singapore,18999)\")","(6,\"(Gauss,42,Beijing,32999)\")"},{"(7,\"(Bob,24,Shanghai,28999)\")","(8,\"(Jack,56,Hongkong,8999)\")"}} +(1 row) + + +-- test assign value with loop, if, case +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); + CURSOR C1 IS SELECT * FROM customers ORDER by id; + counter int := 0; +begin + for n in C1 loop + counter := counter + 1; + customer_list(counter).id := n.id; + customer_list(counter).c_info := n.c_info; + end loop; + + IF counter > 1 THEN + counter := counter + 1; + select id into customer_list(counter).id from customers where id = 1; + select c_info into customer_list(counter).c_info from customers where id = 1; + ELSE + counter := counter + 1; + customer_list(counter).id := 7; + customer_list(counter).c_info := ('Bob' ,24, 'Shanghai', 28999.00); + END IF; + + CASE counter + WHEN 1 THEN + customer_list(counter + 1).id := 7; + customer_list(counter + 1).c_info := ('Bob' ,24, 'Shanghai', 28999.00); + WHEN 2 THEN + customer_list(counter + 1).id := 8; + customer_list(counter + 1).c_info := ('Bob' ,24, 'Shanghai', 28999.00); + ELSE + customer_list(counter + 1).id := 9; + customer_list(counter + 1).c_info := ('Bob' ,24, 'Shanghai', 28999.00); + END CASE; +return customer_list; +end; +$$ language plpgsql; +select get_customers(); + get_customers +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + {"(1,\"(Vera,32,Paris,22999)\")","(2,\"(Zera,25,London,5999)\")","(3,\"(Alice,22,Bangkok,9800.98)\")","(4,\"(Jim,26,Dubai,18700)\")","(5,\"(Kevin,28,Singapore,18999)\")","(6,\"(Gauss,42,Beijing,32999)\")","(1,\"(Vera,32,Paris,22999)\")","(9,\"(Bob,24,Shanghai,28999)\")"} +(1 row) + + +--ERROR: assign value to deep level attribute is not supported yet +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).c_info.name; + return customer_list; +end; +$$ language plpgsql; +ERROR: assign value to deep level attribute is not supported in SELECT/FETCH INTO method at or near "." +LINE 6: select 'bob' into customer_list(1).c_info.name; + ^ + +--ERROR: test incorrect attribute name with T_CWORD type +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).a.b.c; + return customer_list; +end; +$$ language plpgsql; +ERROR: attribute "a" does not exist +DETAIL: attribute "a" does not exist in array variable "customer_list" +CONTEXT: compilation of PL/pgSQL function "get_customers" near line 6 + +--ERROR: test incorrect attribute name with T_CWORD type +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).a.b.c.d.e.f; + return customer_list; +end; +$$ language plpgsql; +ERROR: attribute "a" does not exist +DETAIL: attribute "a" does not exist in array variable "customer_list" +CONTEXT: compilation of PL/pgSQL function "get_customers" near line 6 + +--ERROR: test incorrect attribute name with T_DATUM type +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).customer_list.id; + return customer_list; +end; +$$ language plpgsql; +ERROR: attribute "customer_list" does not exist +DETAIL: attribute "customer_list" does not exist in array variable "customer_list" +CONTEXT: compilation of PL/pgSQL function "get_customers" near line 6 + +--ERROR: test incorrect attribute name with T_DATUM type +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).customer_list.c; + return customer_list; +end; +$$ language plpgsql; +ERROR: attribute "customer_list" does not exist +DETAIL: attribute "customer_list" does not exist in array variable "customer_list" +CONTEXT: compilation of PL/pgSQL function "get_customers" near line 6 + +--ERROR: test incorrect attribute name with T_DATUM type +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).customer_list; + return customer_list; +end; +$$ language plpgsql; +ERROR: attribute "customer_list" does not exist +DETAIL: attribute "customer_list" does not exist in array variable "customer_list" +CONTEXT: compilation of PL/pgSQL function "get_customers" near line 6 + +--ERROR: test incorrect attribute name with type name +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).c_list; + return customer_list; +end; +$$ language plpgsql; +ERROR: attribute "c_list" does not exist +DETAIL: attribute "c_list" does not exist in array variable "customer_list" +CONTEXT: compilation of PL/pgSQL function "get_customers" near line 6 + +--ERROR: test incorrect attribute name with T_WORD type +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).a; + return customer_list; +end; +$$ language plpgsql; +ERROR: attribute "a" does not exist +DETAIL: attribute "a" does not exist in array variable "customer_list" +CONTEXT: compilation of PL/pgSQL function "get_customers" near line 6 + +--ERROR: test incorrect attribute name with keyword +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).if; + return customer_list; +end; +$$ language plpgsql; +ERROR: attribute "if" does not exist +DETAIL: attribute "if" does not exist in array variable "customer_list" +CONTEXT: compilation of PL/pgSQL function "get_customers" near line 6 + +--ERROR: test incorrect attribute name with keyword +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).end; + return customer_list; +end; +$$ language plpgsql; +ERROR: attribute "end" does not exist +DETAIL: attribute "end" does not exist in array variable "customer_list" +CONTEXT: compilation of PL/pgSQL function "get_customers" near line 6 + +--ERROR: test missing attribute name +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1). ; + return customer_list; +end; +$$ language plpgsql; +ERROR: missing or illegal attribute name at or near ";" +LINE 6: select 'bob' into customer_list(1). ; + ^ + +--ERROR: test missing attribute name +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_list(1). := 'bob'; + return customer_list; +end; +$$ language plpgsql; +ERROR: missing or illegal attribute name at or near ":=" +LINE 6: customer_list(1). := 'bob'; + ^ + +--ERROR: test illegal attribute name +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_list(1).6a := 'bob'; + return customer_list; +end; +$$ language plpgsql; +ERROR: syntax error at or near ".6" +LINE 6: customer_list(1).6a := 'bob'; + ^ + +--ERROR: test illegal attribute name +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_list(1).%# := 'bob'; + return customer_list; +end; +$$ language plpgsql; +ERROR: missing or illegal attribute name at or near "%#" +LINE 6: customer_list(1).%# := 'bob'; + ^ + +--ERROR: test incorrect array name : spelling error +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_lis(1).id := 3; + return customer_list; +end; +$$ language plpgsql; +ERROR: function "customer_lis" doesn't exist +CONTEXT: compilation of PL/pgSQL function "get_customers" near line 4 + +--ERROR: test incorrect array name : scalar variable +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); + a int; +begin + a(1).id := 3; + return customer_list; +end; +$$ language plpgsql; +call get_customers(); +ERROR: subscripted object in assignment is not an array +CONTEXT: PL/pgSQL function get_customers() line 7 at assignment + +--ERROR: test incorrect array name : array type name +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); + a int; +begin + c_list(1).id := 3; + return customer_list; +end; +$$ language plpgsql; +ERROR: syntax error at or near "c_list" +LINE 7: c_list(1).id := 3; + ^ + +--ERROR: test incorrect array name : array element type is not composite +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + type i_list is VARRAY(10) of int; + customer_list c_list:=c_list(); + a i_list := i_list(); +begin + a(1).id := 3; + return customer_list; +end; +$$ language plpgsql; +call get_customers(); +ERROR: array element type is not composite in assignment +DETAIL: array variable "id" must be composite when assign value to attibute +CONTEXT: PL/pgSQL function get_customers() line 8 at assignment + +--ERROR: test incorrect array name : array element type is record/row +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); + a customers; +begin + a(1).id := 3; + return customer_list; +end; +$$ language plpgsql; +call get_customers(); +ERROR: subscripted object in assignment is not an array +CONTEXT: PL/pgSQL function get_customers() line 7 at assignment + +--ERROR: test array dimensions exceeds the maximum. +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_list(1) := (1, ('Vera' ,32, 'Paris', 22999.00)); + customer_list(2) := (2, ('Zera' ,25, 'London', 5999.00)); + customer_list(1)(2)(3)(4)(5)(6)(7).id := 3; + return customer_list; +end; +$$ language plpgsql; +call get_customers(); +ERROR: number of array dimensions (7) exceeds the maximum allowed (6) in assignment. +CONTEXT: PL/pgSQL function get_customers() line 8 at assignment + +-- Test PLPGSQL ARRAY defined with type from different schema +drop schema if exists test_pl_array_schema; +NOTICE: schema "test_pl_array_schema" does not exist, skipping +create schema test_pl_array_schema; +create TYPE test_pl_array_schema.desc_tab as ( + col_type int , + col_max_len int , + col_name VARCHAR2(32) , + col_name_len int , + col_schema_name VARCHAR2(32) , + col_schema_name_len int , + col_precision int , + col_scale int , + col_charsetid int , + col_charsetform int , + col_null_ok BOOLEAN); +create TYPE test_pl_array_schema.varchar2_table as (str VARCHAR2(2000)); +create TYPE test_pl_array_schema.number_table as (num NUMBER); +create TYPE test_pl_array_schema.date_table as (dat DATE); +create TYPE test_pl_array_schema.comp_table as (a int, b VARCHAR2(10)); + +create or replace function get_table_array RETURNS void as $$ +declare + type num_arr is VARRAY(10) of test_pl_array_schema.number_table; + v_a num_arr:=num_arr(); + type varchar2_arr is VARRAY(10) of test_pl_array_schema.varchar2_table; + v_b varchar2_arr:=varchar2_arr(); + type date_arr is VARRAY(10) of test_pl_array_schema.date_table; + v_c date_arr:=date_arr(); + type tab_arr is VARRAY(10) of test_pl_array_schema.desc_tab; + v_d tab_arr:=tab_arr(); + type comp_arr is VARRAY(10) of test_pl_array_schema.comp_table; + v_e comp_arr:=comp_arr(); +begin + v_a.extend(1); + v_a(1).num := 1; + v_b.extend(1); + v_b(1).str := 'aaa'; + v_c.extend(1); + v_c(1).dat := '2003-04-12 04:05:06'; + v_d.extend(1); + v_d(1).col_type := 22; + v_e.extend(1); + v_e(1) := (11,'haha'); + RAISE NOTICE '% % % % % % ', v_a(1).num, v_b(1).str, v_c(1).dat, v_d(1).col_type, v_e(1).a, v_e(1).b; +end; +$$ language plpgsql; +call get_table_array(); +NOTICE: 1 aaa Sat Apr 12 04:05:06 2003 22 11 haha + get_table_array +----------------- + +(1 row) + + +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- + +drop function if exists get_table_array; +drop type if exists test_pl_array_schema.comp_table; +drop type if exists test_pl_array_schema.date_table; +drop type if exists test_pl_array_schema.number_table; +drop type if exists test_pl_array_schema.varchar2_table; +drop type if exists test_pl_array_schema.desc_tab; +drop function if exists get_customers; +drop table if exists customers; +drop type if exists customer; +drop type if exists info; + +-- clean up -- +drop schema if exists test_pl_array_schema cascade; +drop schema if exists plpgsql_arrayassign cascade; diff --git a/src/test/regress/expected/plpgsql_bulk_collect.out b/src/test/regress/expected/plpgsql_bulk_collect.out new file mode 100644 index 000000000..9a173f24e --- /dev/null +++ b/src/test/regress/expected/plpgsql_bulk_collect.out @@ -0,0 +1,551 @@ +-- FOR PL/pgSQL bulk collect into scenarios -- +-- check compatibility -- +show sql_compatibility; -- expect A -- + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists plpgsql_bulk_collect; +NOTICE: schema "plpgsql_bulk_collect" does not exist, skipping +create schema plpgsql_bulk_collect; +set current_schema = plpgsql_bulk_collect; +-- initialize tables -- +create table customers ( + id number(10) not null, + c_name varchar2(50), + c_age number(8) not null, + c_address varchar2(50), + salary float(2) not null, + constraint customers_pk primary key (id) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "customers_pk" for table "customers" +insert into customers (id, c_name, c_age, c_address, salary) values (1, 'Vera' ,32, 'Paris', 22999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (2, '' ,25, 'London', 5999.00); -- a missing value here +insert into customers (id, c_name, c_age, c_address, salary) values (3, 'Alice' ,22, 'Bangkok', 9800.98); +insert into customers (id, c_name, c_age, c_address, salary) values (4, 'Jim' ,26, 'Dubai', 18700.00); +insert into customers (id, c_name, c_age, c_address, salary) values (5, 'Kevin' ,28, 'Singapore', 18999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (6, 'Gauss' ,42, 'Beijing', 32999.00); +create table bigtmp(a int, b varchar(10000)); +create table tmp(a int, b varchar(100)); +create type mytype as ( + id integer, + biome varchar2(100) +); +create table biomebook ( + id integer, + b_entry mytype +); +insert into biomebook values (1, (1, 'savanna')); +insert into biomebook values (2, (2, 'giant-tree-taiga')); +insert into biomebook values (3, (9, 'nether')); +-- returns a biome record -- +create or replace function biofunc() +return mytype +is + ret mytype; +begin + ret := (2, 'giant-tree-taiga'); + return ret; +end; +/ +-- test function into target priority -- +create or replace function id_arr(inint in integer) +return integer +is + ret integer; +begin + ret := 1234; + return ret; +end; +/ +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- +-- Scene1: select ... bulk collect into -- +-- single scalar out -- +declare + type i_list is varray(6) of integer; + i_arr i_list; +begin + select 1 bulk collect into i_arr; + for i in 1..i_arr.count loop + dbe_output.print_line('id(' || i || '): ' || i_arr(i)); + end loop; +end; +/ +id(1): 1 +-- multiple scalars out -- +declare + type i_list is varray(6) of integer; + i_arr i_list; +begin + select 1 bulk collect into i_arr from customers; + for i in 1..i_arr.count loop + dbe_output.print_line('id(' || i || '): ' || i_arr(i)); + end loop; +end; +/ +id(1): 1 +id(2): 1 +id(3): 1 +id(4): 1 +id(5): 1 +id(6): 1 +-- records handling -- +declare + type ty_list is varray(6) of mytype; + ty_arr ty_list; +begin + select (1, 'savanna')::mytype bulk collect into ty_arr; + for i in 1..ty_arr.count loop + dbe_output.print_line('biome record: ' || ty_arr(i)); + end loop; +end; +/ +biome record: (1,savanna) +declare + type ty_list is varray(6) of mytype; + ty_arr ty_list; +begin + select biofunc() bulk collect into ty_arr; + for i in 1..ty_arr.count loop + dbe_output.print_line('biome record: ' || ty_arr(i)); + end loop; +end; +/ +biome record: (2,giant-tree-taiga) +declare + type ty_list is varray(6) of mytype; + ty_arr ty_list; +begin + select b_entry bulk collect into ty_arr from biomebook order by id; + for i in 1..ty_arr.count loop + dbe_output.print_line('biome record: ' || ty_arr(i)); + end loop; +end; +/ +biome record: (1,savanna) +biome record: (2,giant-tree-taiga) +biome record: (9,nether) +-- varray handling -- +declare + i_arr int[]; +begin + select 1 bulk collect into i_arr from customers; + for i in 1..i_arr.count loop + dbe_output.print_line('id(' || i || '): ' || i_arr(i)); + end loop; +end; +/ +id(1): 1 +id(2): 1 +id(3): 1 +id(4): 1 +id(5): 1 +id(6): 1 +-- success, single target array -- +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; +begin + select id bulk collect into id_arr from customers order by id DESC; + for i in 1..id_arr.count loop + dbe_output.print_line('id(' || i || '): ' || id_arr(i)); + end loop; +end; +/ +id(1): 6 +id(2): 5 +id(3): 4 +id(4): 3 +id(5): 2 +id(6): 1 +-- success, multi target support +declare + type tab is varray(6) of mytype; + tab1 tab := tab(); +begin + select id, c_name bulk collect into tab1 from customers order by id DESC; + raise info '%', tab1; +end; +/ +INFO: {"(6,Gauss)","(5,Kevin)","(4,Jim)","(3,Alice)","(2,)","(1,Vera)"} +declare + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; + cc varchar(100); +begin + select c_name bulk collect into name_arr from customers order by id; + for i in 1..name_arr.count loop + dbe_output.print_line('name(' || i || '): ' || name_arr(i)); + end loop; +end; +/ +name(1): Vera +name(2): +name(3): Alice +name(4): Jim +name(5): Kevin +name(6): Gauss +-- this will take the entire array as a single output -- +-- error out now -- +declare + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; +begin + select array(select c_name from customers order by id) bulk collect into name_arr; + for i in 1..name_arr.count loop + dbe_output.print_line('name(' || i || '): ' || name_arr(i)); + end loop; +end; +/ +name(1): {Vera,NULL,Alice,Jim,Kevin,Gauss} +-- multi target, correct answer -- +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; +begin + select id, c_name bulk collect into id_arr, name_arr from customers order by id; + for i in 1..name_arr.count loop + dbe_output.print_line('id: ' || id_arr(i) || ' with name(' || i || '): ' || name_arr(i)); + end loop; +end; +/ +id: 1 with name(1): Vera +id: 2 with name(2): +id: 3 with name(3): Alice +id: 4 with name(4): Jim +id: 5 with name(5): Kevin +id: 6 with name(6): Gauss +-- should be able cast all elements -- +declare + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; +begin + select count(*) bulk collect into name_arr from customers; + for i in 1..name_arr.count loop + dbe_output.print_line('result ' || i || ': ' || name_arr(i)); + end loop; +end; +/ +result 1: 6 +declare + type name_list is varray(6) of varchar2; + name_arr name_list; +begin + select 99 bulk collect into name_arr from customers; -- 6 of them -- + for i in 1..name_arr.count loop + dbe_output.print_line('result ' || i || ': ' || name_arr(i)); + end loop; +end; +/ +result 1: 99 +result 2: 99 +result 3: 99 +result 4: 99 +result 5: 99 +result 6: 99 +declare + type id_list is varray(6) of integer; + id_arr id_list; +begin + select '696' bulk collect into id_arr from customers; + for i in 1..id_arr.count loop + dbe_output.print_line('result ' || i || ': ' || id_arr(i)); + end loop; +end; +/ +result 1: 696 +result 2: 696 +result 3: 696 +result 4: 696 +result 5: 696 +result 6: 696 +-- intense bulk collect -- +-- 'massive' insert -- +insert into bigtmp select generate_series(1, 10000), '1'; +declare + type i_list is varray(60000) of integer; + i_arr i_list; +begin + select a bulk collect into i_arr from bigtmp order by a; + dbe_output.print_line('result 556: ' || i_arr(556)); + dbe_output.print_line('result 1556: ' || i_arr(1556)); + dbe_output.print_line('result 9999: ' || i_arr(9999)); +end; +/ +result 556: 556 +result 1556: 1556 +result 9999: 9999 +declare + type i_list is varray(60000) of integer; + i_arr i_list; +begin + select b bulk collect into i_arr from bigtmp order by a; -- typecasts + dbe_output.print_line('result 556: ' || i_arr(556)); + dbe_output.print_line('result 1556: ' || i_arr(1556)); + dbe_output.print_line('result 9999: ' || i_arr(9999)); +end; +/ +result 556: 1 +result 1556: 1 +result 9999: 1 +-- toasted -- +insert into bigtmp values (99999997, 'O2Co3PFc0Bdx0pAf55wf7vFKI6sThhbc7dG3aLGbERpYYPRQ6uKoQKzROW2V3ciMkWoSXTeqs592ifC9TSkkubQvF3Ca05fzevCP1Sm3j4jQGyr9FoF2q9iaMVc6XlVsY3xcJpOoWwtzhhgBbSxwjD3w57FfwJHoRRmcftJAD2qOOsqzFXd8A2MGqqDLWdjQGxZQKwS8gtBYX36oTyMxdIf4XHM4b8mtESoQX7FdiMASWJoWbEkS5dAgiVFoYVHZvKx3PCCGf3xXkBSwC4F9kQccZpdgO4r7fLoSc0IsfO2eLxqzOP4yoLPMFobtOU0rJSpddD2BCo4IpW6LCQ2q5HQzQbBDCE3MfVng8ZVpakXoTP5KS4DJTvALLtSYxvrXZgr5Gki8pcrQQov7JDpJ8wclWF1tZ9fOILGtg5TOnDo8sSNgiPUYt4rzXllS2GgobxYlwAQH4uIDhXlYs8NbATQ13ro6WhLrjJFmMW7FNORUcgxGr76ejAG0oGKkmpZ8sXTn26ZiNNI5gV4pr07JHVTc5seTYVYPNg3dkVOhkVhLlFSaQeocerWnsNXJ3Qc7oMFZlfGJyquNzuXCr9yoEKx2BRmidu8L5pIwNvZOJgSjuP3mMkTXAYEreBFDCDuoUuPiRrxF5dFPNSqYGh8Vwly4Pnm2E5OxWnkA37VtbCx7db7EIXRps1ghUUUJPlPx63doze3w325O0voHu5kRFRqe6TFXAGDY2cDY4m91frMcaG06R7XG418Eh3aQevbEp3QpghAwIMTa4vPpceiCEWHIo9Tu540wPbmRRKquFyxwA66Yjl60ROybjhc7w0hiR1q0JPTVIEjfTGih8YEo5Bok2RjSMBpUyhqiAt64AYtdbrN4Iy2znLDxGs1TyoNsu46AUKKi5WOgGDb8VjIn5mluYdvbZ5hfcOI9XVNq0U80o7JBAYZih53klMLQ49bC2z0cWoBAa37WVpSeBUEM3gQw5stegIgU18ACJzD9XhBeIXnVbY1gGQy1QHJ7QouUnVVEt3cTYs8L5k3VPP4VqcppEz9UvcnwysS37pXzOZr03oBTGvM5iQQ7AgtuRBJUkzrp7XmE3wfFwxHThtMEpHv6lJwQOtkrdqvCgH1F5PCsTDtBtPOAQSRYwPLVD5j8ifeg89zcPRQohM6ndzgA15tjl8Lc3ZhxSe81c3k7RuB91BJ00H8e5HImo5pSRnUDH5CL1X7sVPkBAZ7Lyk0ShZEznFVO9Cixd7T7goIgsL2yFOlOExpHriaZCXBWUQTHR9MyKlsAiEUWfgtP4vs6LgqW8NAzGhhzwsJMYkWOM0jnlEm0VMGzaU19ND4YYYSGnzUb3Ai8DWQOr23wVfOBPUtG6RsXOvNjjJ98safY4xxaVkOH1BE3Wsj3d6xwEL6SnGB9x7rxDrzuBKlai0DLaprmhUmpRc2JJTbBvJsRu8feLrAsgVwoU1dGezVCuT8oYHxRvgkblevX5Ud1DNaRGjsL738WzZsJJvD0nD7cOiHJI6mjapiNJ8tFnu1JgqfcQqMxbP4atBI4jH4M3zxWGk4XFtMkAEKjOInaaMEbZ21gkicL2WTNlkWyOZbiYemQjCG1tgmFcHKMipiK8cXhsQeN7bij5dMzqpvkJ3Pmc13YqJLTlZ70hXDj4CvzQVdDsZtc3AEr4z49D0AiYSiPWyf09yXnMrT7PoUDUsug9vsliygXXjvaonkKLEs7kpLDAm7B2YrHge4QJJ2rNPZRtIaWXLegZKIjArp8sU8Cln4Tx4lo8450OfxFElTtduBDlq23yDwBcMb4z0pHwGLlayi1bhQ0Ymz7eRAHKnuRJd3hQdlG7c7xbbPon0ETZFXq42dDDzKxbsRgFEOHhI1wf8TpJYksK7BMX5CJmse5MVRarAJHJHSbVsTvrza2U6bxEkOzk4VI06KdXyoOKLN0g0PCrphFn1xadvj5Czr0ZLmD9hwzfQXARJNlYjeGCKF9N4Vgm3QOSdyQ8eJSYhTmU9rLGl3wuMZCPCLui2tLk9knhAAo0rlf9sJY6e5snZhcyGSCUJ9zQCxT4S3sRuEGx0NQcnEE89exgqcuwcV7konvoXCysRlwxnfszsSKyomCZAfuV4YzcWDz1Tx3Vw1PpntqUDQtHW7eQC8P2POV2yquwnKGdL8dGRHhdwrmCigAENuL1DElNB7IxFLXOFV1l2VFmJp1yrlLDlfMWWJPhElcl6sOeglrpAHxpdvvWGbysY15GC5T4bPT8W4Lc1vfahUTMywDpAZfdHtBWkrjtEwNgE7TnT24vglckMJhPvjxIKlC0IMOP7cfWZm38CuqhYWaaStDemaIKb4UJh4iSp4lvyKcDSx4HfNrshrv2zBYAYAnxntJk54H2rPSNV2p5s8hPAZCrEjlX0w0gtyrmRD3gNHCPqNXqaKdgo5A0bLYYrzErjVP5UI5lyMMe5JuL2O8TN0OjnKbZ8uAOPX7sMJPQ1cJnNFOmoecAaeBgcvxF5Rm'); +insert into bigtmp values (99999998, 'Pn5VELvD'); +insert into bigtmp values (99999999, '5lwSzzCVOoH5rW4Eggq2JR3Ne4SJpZwxtc4aNp7ieQn2Zt5BK3yBaF7dUBcpqH28Z32bbLvxjbexI32JqI4cnVn6Xo86VAl7R7e7smveMsekLtJQi1LUDZg7iqtHG9yMnDbYJo6RVb5OCEe20VOA6iNLl0lW6YYGF9GrHMSOoASPtOmttN8erIDfzVHjOmJqmz7nAJ3q64dygavaIfhRYeH3wGnDnYSL4XRN5ayDpyTqKo2ZoRgFWhfcWHBRQsfziktBY6P4lx4Qra3237ytn0jSFVJfEsTSr7wdhBxF7oSNFt3xuKpJQ72GklM8CZbEeXHRju13lw64dFt6Nbim9POnLQtff7EjWLpaeGbR7al7jmkB9iljwsMEh1FReBgUuOHB9XUFG2LK4QyX81xLmDq60vp6KkQqovTwIUEDzpuuHIb2wGqVaPa91Vw7JwUYui9ZbtHA2K64lQ0XgnQhWxzwOy0gFqeBEsDZ2aj6GNSoH9FPmhJm9Dn7WQw5boIZgeb6b2tLnZyxLNp0yTAhdJmwXTitBm0kz93rfQBPGfwLF5SNTYEdAZvedlSTIIB7D2HVq1nEiPxCOrOQ3C9PgFaods6gthyu6BjO5Mjc6eshS8zVL1YQ4tNcI7FOmX1pikLlqpQdl2h1lJgJTtftmOlSTGl59Ptl583YgqZVgTFtjpdH0MLxQeKXhunr0vuhU3FfhaItFmdXeewBANsnh6QitWKRNMJOYS3YX5HEDTY75dfLSkBCK1rWz35HQYfacy7hl8Dz5pgJmn3hDPi7F1Hy9edvkzxE9eBc8f6nojCM5PyxmBSJE4bYpnF4swLZFKDmyxZ6yxszU6gawBNWJ7bMLAitobTNGNJrgFQCOajbXEOhx19Yb8L3xRWqSqky4dFAvx0bO2IQepn4yawklBDHamYeSoOOhUdzddLKHuUbnCCWdfSPP1mDpfJsbyZ4rPkC8LxA0UVibwKaSBfE4WYiaiw5S291AyMRfdPKDddWCROWUp1WUjW6d03hf5UAQHxfgrBrVgJ06V5IL0GYbeUOm2ceSyGqaxwhwf1Y5DiRsPS0sadcOHdc7WpnB1ysykaYkkgmuyldMMO2UJjDiJomeF6UZ1ZsTagAdieRM3H21ZJ4xv8abOBnwoZA5ZbwlBq7KpVYoaaJ8GL3MkkTLGDo2bWL4ZNTjKfzhMNOL5fVMXKGHC046Afl3sD5WUmW36rfGkkirQ0W9sLz0rxk7lnjKHk3yJXrhHmBoLux71DtTuieIDSbROqlYS9f7p92495hGCmx1wH1NIQ8dmb6AxoZjktLbzG8SMz7ZbfbLGjQovG9Xw3aekVvwENcCEfIypMtVynhvsKpqfDJY6pCgbfyTm1ymCd6QSFTJ26ywDJVT7yFvWbE1b4k3s4PD3CoRJYKC5OsZcvgFu7T2qXPGy9qn6RVlq95lbMZNsPbZ1USRfxPtm73Y7EISqf7UcsfqBbYV6OKdswAdinpNgesSncGBXTtCfPx5Ue8kuR6rDYvCQBoCIug0WxhEbTOxfAPZkXX6FhWtYtuSUu9w7L5I8xBKhGyeD3IA3H1YgD1HYpzl7CaznQaVvN5cxAisaBpW51ZI7NqVuxRKkJAsu5HKp9ExCoQk4EQygJCqPLgexGYVgUl2maQdfMMWAHZhVwH839BvYHeL9KwrrATj6Ts7hNd0WXT5LR97vpiqzQ891Z3eYVD2X4MkuOHWEuBFd8AnhjNn471eIGRPmLhzj5JtbEfSRHDv278kRqr7hxhBoPSilnoCywIqHkrHJBWi9AvLA4KaHC6vdmqOwpE7vebCxxuID5acMYXr55FkUWYGTOAzGRslw5H5xapvsgu7F20MCVYTuKEnbQiyzMhPDuqKGoJJtf97tYBRrGUgVVwTaUXcOiKcVoOnDEL7zKtvgGH8fx0X46ZefNGFLFGO2Kkt7z62Ht0muntXdQygIWwg0uDi2u610aFQWDOG3RF2SXk7GWeyNr2OjvdJ7wwRe7HGmbfBL87e0VWCIrIjMB3Dg1yolFcsuOD7GMUrEOYCACfyM0TsPkQqkSNq1j5bN8JqY6FUzswebtVL1t88Xw45ETOPZeu8QpiYO79ofs3ArGPGZUFW8b3G1tJSsnhJ7l2AtHxW6WDsOAnQ8RtNPQu8KOfaLqtSVd11eHVprmSZUbwlsHwx5Hf0UUR43UERuUWIqONOgZIjxukVUO5HqougHCoTKTpQvcZ4Bs0bN5HC82ddPc0VjhaNOL8zZgsy8aO0BYFr5TdkiXldv3LqXziauo0eDfxonfS8LSBRtVmrXVccpwI3pIpsg9c7h05JGh6mXfDGirdhuHz3JRXlMze6dbVOQIg3Q316fJMZixgNo3ttbr8OrJnS915xzUfcLaBx4eEmmi7EXplgWdHUJC3wNuXvyFGEBAxnoe1zyDl9rtOiinr33Wyf7ae0FcGjE2Eml4zvI6xeznXvDip9u1QnsIvYQHxw3MsClNh5WDhndp6DCUT'); +declare + type s_list is varray(60000) of varchar(10000); + s_arr s_list; +begin + select b bulk collect into s_arr from bigtmp where a > 99999990 order by a; + dbe_output.print_line('result 99999997: ' || s_arr(1)); + dbe_output.print_line('result 99999998: ' || s_arr(2)); + dbe_output.print_line('result 99999999: ' || s_arr(3)); +end; +/ +result 99999997: O2Co3PFc0Bdx0pAf55wf7vFKI6sThhbc7dG3aLGbERpYYPRQ6uKoQKzROW2V3ciMkWoSXTeqs592ifC9TSkkubQvF3Ca05fzevCP1Sm3j4jQGyr9FoF2q9iaMVc6XlVsY3xcJpOoWwtzhhgBbSxwjD3w57FfwJHoRRmcftJAD2qOOsqzFXd8A2MGqqDLWdjQGxZQKwS8gtBYX36oTyMxdIf4XHM4b8mtESoQX7FdiMASWJoWbEkS5dAgiVFoYVHZvKx3PCCGf3xXkBSwC4F9kQccZpdgO4r7fLoSc0IsfO2eLxqzOP4yoLPMFobtOU0rJSpddD2BCo4IpW6LCQ2q5HQzQbBDCE3MfVng8ZVpakXoTP5KS4DJTvALLtSYxvrXZgr5Gki8pcrQQov7JDpJ8wclWF1tZ9fOILGtg5TOnDo8sSNgiPUYt4rzXllS2GgobxYlwAQH4uIDhXlYs8NbATQ13ro6WhLrjJFmMW7FNORUcgxGr76ejAG0oGKkmpZ8sXTn26ZiNNI5gV4pr07JHVTc5seTYVYPNg3dkVOhkVhLlFSaQeocerWnsNXJ3Qc7oMFZlfGJyquNzuXCr9yoEKx2BRmidu8L5pIwNvZOJgSjuP3mMkTXAYEreBFDCDuoUuPiRrxF5dFPNSqYGh8Vwly4Pnm2E5OxWnkA37VtbCx7db7EIXRps1ghUUUJPlPx63doze3w325O0voHu5kRFRqe6TFXAGDY2cDY4m91frMcaG06R7XG418Eh3aQevbEp3QpghAwIMTa4vPpceiCEWHIo9Tu540wPbmRRKquFyxwA66Yjl60ROybjhc7w0hiR1q0JPTVIEjfTGih8YEo5Bok2RjSMBpUyhqiAt64AYtdbrN4Iy2znLDxGs1TyoNsu46AUKKi5WOgGDb8VjIn5mluYdvbZ5hfcOI9XVNq0U80o7JBAYZih53klMLQ49bC2z0cWoBAa37WVpSeBUEM3gQw5stegIgU18ACJzD9XhBeIXnVbY1gGQy1QHJ7QouUnVVEt3cTYs8L5k3VPP4VqcppEz9UvcnwysS37pXzOZr03oBTGvM5iQQ7AgtuRBJUkzrp7XmE3wfFwxHThtMEpHv6lJwQOtkrdqvCgH1F5PCsTDtBtPOAQSRYwPLVD5j8ifeg89zcPRQohM6ndzgA15tjl8Lc3ZhxSe81c3k7RuB91BJ00H8e5HImo5pSRnUDH5CL1X7sVPkBAZ7Lyk0ShZEznFVO9Cixd7T7goIgsL2yFOlOExpHriaZCXBWUQTHR9MyKlsAiEUWfgtP4vs6LgqW8NAzGhhzwsJMYkWOM0jnlEm0VMGzaU19ND4YYYSGnzUb3Ai8DWQOr23wVfOBPUtG6RsXOvNjjJ98safY4xxaVkOH1BE3Wsj3d6xwEL6SnGB9x7rxDrzuBKlai0DLaprmhUmpRc2JJTbBvJsRu8feLrAsgVwoU1dGezVCuT8oYHxRvgkblevX5Ud1DNaRGjsL738WzZsJJvD0nD7cOiHJI6mjapiNJ8tFnu1JgqfcQqMxbP4atBI4jH4M3zxWGk4XFtMkAEKjOInaaMEbZ21gkicL2WTNlkWyOZbiYemQjCG1tgmFcHKMipiK8cXhsQeN7bij5dMzqpvkJ3Pmc13YqJLTlZ70hXDj4CvzQVdDsZtc3AEr4z49D0AiYSiPWyf09yXnMrT7PoUDUsug9vsliygXXjvaonkKLEs7kpLDAm7B2YrHge4QJJ2rNPZRtIaWXLegZKIjArp8sU8Cln4Tx4lo8450OfxFElTtduBDlq23yDwBcMb4z0pHwGLlayi1bhQ0Ymz7eRAHKnuRJd3hQdlG7c7xbbPon0ETZFXq42dDDzKxbsRgFEOHhI1wf8TpJYksK7BMX5CJmse5MVRarAJHJHSbVsTvrza2U6bxEkOzk4VI06KdXyoOKLN0g0PCrphFn1xadvj5Czr0ZLmD9hwzfQXARJNlYjeGCKF9N4Vgm3QOSdyQ8eJSYhTmU9rLGl3wuMZCPCLui2tLk9knhAAo0rlf9sJY6e5snZhcyGSCUJ9zQCxT4S3sRuEGx0NQcnEE89exgqcuwcV7konvoXCysRlwxnfszsSKyomCZAfuV4YzcWDz1Tx3Vw1PpntqUDQtHW7eQC8P2POV2yquwnKGdL8dGRHhdwrmCigAENuL1DElNB7IxFLXOFV1l2VFmJp1yrlLDlfMWWJPhElcl6sOeglrpAHxpdvvWGbysY15GC5T4bPT8W4Lc1vfahUTMywDpAZfdHtBWkrjtEwNgE7TnT24vglckMJhPvjxIKlC0IMOP7cfWZm38CuqhYWaaStDemaIKb4UJh4iSp4lvyKcDSx4HfNrshrv2zBYAYAnxntJk54H2rPSNV2p5s8hPAZCrEjlX0w0gtyrmRD3gNHCPqNXqaKdgo5A0bLYYrzErjVP5UI5lyMMe5JuL2O8TN0OjnKbZ8uAOPX7sMJPQ1cJnNFOmoecAaeBgcvxF5Rm +result 99999998: Pn5VELvD +result 99999999: 5lwSzzCVOoH5rW4Eggq2JR3Ne4SJpZwxtc4aNp7ieQn2Zt5BK3yBaF7dUBcpqH28Z32bbLvxjbexI32JqI4cnVn6Xo86VAl7R7e7smveMsekLtJQi1LUDZg7iqtHG9yMnDbYJo6RVb5OCEe20VOA6iNLl0lW6YYGF9GrHMSOoASPtOmttN8erIDfzVHjOmJqmz7nAJ3q64dygavaIfhRYeH3wGnDnYSL4XRN5ayDpyTqKo2ZoRgFWhfcWHBRQsfziktBY6P4lx4Qra3237ytn0jSFVJfEsTSr7wdhBxF7oSNFt3xuKpJQ72GklM8CZbEeXHRju13lw64dFt6Nbim9POnLQtff7EjWLpaeGbR7al7jmkB9iljwsMEh1FReBgUuOHB9XUFG2LK4QyX81xLmDq60vp6KkQqovTwIUEDzpuuHIb2wGqVaPa91Vw7JwUYui9ZbtHA2K64lQ0XgnQhWxzwOy0gFqeBEsDZ2aj6GNSoH9FPmhJm9Dn7WQw5boIZgeb6b2tLnZyxLNp0yTAhdJmwXTitBm0kz93rfQBPGfwLF5SNTYEdAZvedlSTIIB7D2HVq1nEiPxCOrOQ3C9PgFaods6gthyu6BjO5Mjc6eshS8zVL1YQ4tNcI7FOmX1pikLlqpQdl2h1lJgJTtftmOlSTGl59Ptl583YgqZVgTFtjpdH0MLxQeKXhunr0vuhU3FfhaItFmdXeewBANsnh6QitWKRNMJOYS3YX5HEDTY75dfLSkBCK1rWz35HQYfacy7hl8Dz5pgJmn3hDPi7F1Hy9edvkzxE9eBc8f6nojCM5PyxmBSJE4bYpnF4swLZFKDmyxZ6yxszU6gawBNWJ7bMLAitobTNGNJrgFQCOajbXEOhx19Yb8L3xRWqSqky4dFAvx0bO2IQepn4yawklBDHamYeSoOOhUdzddLKHuUbnCCWdfSPP1mDpfJsbyZ4rPkC8LxA0UVibwKaSBfE4WYiaiw5S291AyMRfdPKDddWCROWUp1WUjW6d03hf5UAQHxfgrBrVgJ06V5IL0GYbeUOm2ceSyGqaxwhwf1Y5DiRsPS0sadcOHdc7WpnB1ysykaYkkgmuyldMMO2UJjDiJomeF6UZ1ZsTagAdieRM3H21ZJ4xv8abOBnwoZA5ZbwlBq7KpVYoaaJ8GL3MkkTLGDo2bWL4ZNTjKfzhMNOL5fVMXKGHC046Afl3sD5WUmW36rfGkkirQ0W9sLz0rxk7lnjKHk3yJXrhHmBoLux71DtTuieIDSbROqlYS9f7p92495hGCmx1wH1NIQ8dmb6AxoZjktLbzG8SMz7ZbfbLGjQovG9Xw3aekVvwENcCEfIypMtVynhvsKpqfDJY6pCgbfyTm1ymCd6QSFTJ26ywDJVT7yFvWbE1b4k3s4PD3CoRJYKC5OsZcvgFu7T2qXPGy9qn6RVlq95lbMZNsPbZ1USRfxPtm73Y7EISqf7UcsfqBbYV6OKdswAdinpNgesSncGBXTtCfPx5Ue8kuR6rDYvCQBoCIug0WxhEbTOxfAPZkXX6FhWtYtuSUu9w7L5I8xBKhGyeD3IA3H1YgD1HYpzl7CaznQaVvN5cxAisaBpW51ZI7NqVuxRKkJAsu5HKp9ExCoQk4EQygJCqPLgexGYVgUl2maQdfMMWAHZhVwH839BvYHeL9KwrrATj6Ts7hNd0WXT5LR97vpiqzQ891Z3eYVD2X4MkuOHWEuBFd8AnhjNn471eIGRPmLhzj5JtbEfSRHDv278kRqr7hxhBoPSilnoCywIqHkrHJBWi9AvLA4KaHC6vdmqOwpE7vebCxxuID5acMYXr55FkUWYGTOAzGRslw5H5xapvsgu7F20MCVYTuKEnbQiyzMhPDuqKGoJJtf97tYBRrGUgVVwTaUXcOiKcVoOnDEL7zKtvgGH8fx0X46ZefNGFLFGO2Kkt7z62Ht0muntXdQygIWwg0uDi2u610aFQWDOG3RF2SXk7GWeyNr2OjvdJ7wwRe7HGmbfBL87e0VWCIrIjMB3Dg1yolFcsuOD7GMUrEOYCACfyM0TsPkQqkSNq1j5bN8JqY6FUzswebtVL1t88Xw45ETOPZeu8QpiYO79ofs3ArGPGZUFW8b3G1tJSsnhJ7l2AtHxW6WDsOAnQ8RtNPQu8KOfaLqtSVd11eHVprmSZUbwlsHwx5Hf0UUR43UERuUWIqONOgZIjxukVUO5HqougHCoTKTpQvcZ4Bs0bN5HC82ddPc0VjhaNOL8zZgsy8aO0BYFr5TdkiXldv3LqXziauo0eDfxonfS8LSBRtVmrXVccpwI3pIpsg9c7h05JGh6mXfDGirdhuHz3JRXlMze6dbVOQIg3Q316fJMZixgNo3ttbr8OrJnS915xzUfcLaBx4eEmmi7EXplgWdHUJC3wNuXvyFGEBAxnoe1zyDl9rtOiinr33Wyf7ae0FcGjE2Eml4zvI6xeznXvDip9u1QnsIvYQHxw3MsClNh5WDhndp6DCUT +-- ERROR handling -- +-- syntax error -- +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; +begin + select 1 collect bulk into id_arr; + for i in 1..id_arr.count loop + dbe_output.print_line('id(' || i || '): ' || id_arr(i)); + end loop; +end; +/ +ERROR: expect 'COLLECT' after 'BULK' at or near "into" +LINE 4: select 1 collect bulk into id_arr; + ^ +QUERY: DECLARE type id_list is varray(6) of customers.id%type; + id_arr id_list; +begin + select 1 collect bulk into id_arr; + for i in 1..id_arr.count loop + dbe_output.print_line('id(' || i || '): ' || id_arr(i)); + end loop; +end +-- insert into is not a into at all! -- +declare + type name_list is varray(6) of varchar2; + name_arr name_list; +begin + insert bulk collect into tmp values (1, '2'); +end; +/ +ERROR: syntax error at or near "bulk" +LINE 4: insert bulk collect into tmp values (1, '2'); + ^ +QUERY: DECLARE type name_list is varray(6) of varchar2; + name_arr name_list; +begin + insert bulk collect into tmp values (1, '2'); +end +-- should error out with type cast failure -- +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; +begin + select array(select id from customers order by id) bulk collect into id_arr; + for i in 1..id_arr.count loop + dbe_output.print_line('id(' || i || '): ' || id_arr(i)); + end loop; +end; +/ +ERROR: Fail to perform bulk collect +DETAIL: Cannot bulk collect numeric[] values into numeric collections +CONTEXT: PL/pgSQL function inline_code_block line 4 at SQL statement +-- query returns more columns than bulk collect targets, error -- +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; +begin + select id, c_name bulk collect into id_arr from customers; + for i in 1..id_arr.count loop + dbe_output.print_line('id(' || i || '): ' || id_arr(i)); + end loop; +end; +/ +ERROR: Cannot bulk collect into targets +DETAIL: Query returns more columns than available targets +CONTEXT: PL/pgSQL function inline_code_block line 4 at SQL statement +-- multi-dimensional array -- +declare + i_arr int[][]; +begin + select 1 bulk collect into i_arr[1] from customers; + for i in 1..i_arr.count loop + dbe_output.print_line('id(' || i || '): ' || i_arr(1)(i)); + end loop; +end; +/ +ERROR: Unsupported bulk collect into target +DETAIL: Unable to recognize the given bulk collect into target +CONTEXT: PL/pgSQL function inline_code_block line 3 at SQL statement +-- multiple targets, keep erroring out, type cast failure -- +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; +begin + select array(select c_name from customers order by id), + array(select id from customers order by id) + bulk collect into name_arr, id_arr; + for i in 1..name_arr.count loop + dbe_output.print_line('id: ' || id_arr(i) || ' with name(' || i || '): ' || name_arr(i)); + end loop; +end; +/ +ERROR: Fail to perform bulk collect +DETAIL: Cannot bulk collect numeric[] values into numeric collections +CONTEXT: PL/pgSQL function inline_code_block line 6 at SQL statement +-- should report error, into something not an array -- +declare + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; + cc record; +begin + select id bulk collect into cc from customers order by id DESC; + dbe_output.print_line('name count: ' || cc); +end; +/ +ERROR: Unsupported bulk collect into target +DETAIL: Unable to recognize the given bulk collect into target +CONTEXT: PL/pgSQL function inline_code_block line 5 at SQL statement +-- .. even if it is actually a single value returning -- +declare + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; + cc integer; +begin + select count(*) bulk collect into cc from customers; + dbe_output.print_line('name count: ' || cc); +end; +/ +ERROR: Unsupported bulk collect into target +DETAIL: Unable to find any array elements in target +CONTEXT: PL/pgSQL function inline_code_block line 5 at SQL statement +-- returns nothing -- +declare + type s_list is varray(6) of integer; + s_arr s_list; +begin + select a bulk collect into s_arr from tmp; -- this is empty -- + dbe_output.print_line('ok, is empty'); +end; +/ +ok, is empty +-- Scene2: returning -- +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; +begin + -- select id bulk collect into id_arr from customers; + delete from customers where id = 2 returning id bulk collect into id_arr; + dbe_output.print_line('delete id: ' || id_arr(1)); + + select count(*) bulk collect into id_arr from customers; + dbe_output.print_line('total left: ' || id_arr(1)); +rollback; +end; +/ +delete id: 2 +total left: 5 +declare + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; +begin + -- select id bulk collect into id_arr from customers; + update customers set c_name = 'Carol' where id = 2 returning c_name bulk collect into name_arr; + dbe_output.print_line('updated name: ' || name_arr(1)); + select c_name bulk collect into name_arr from customers order by id; + for i in 1..name_arr.count loop + dbe_output.print_line('name(' || i || '): ' || name_arr(i)); + end loop; +rollback; +end; +/ +updated name: Carol +name(1): Vera +name(2): Carol +name(3): Alice +name(4): Jim +name(5): Kevin +name(6): Gauss +-- Scene3: fetch -- +-- error out, does not fetch direction statements -- +declare + cursor c_customers is select c_name from customers order by id; + type c_list is varray (6) of customers.c_name%type; + name_arr c_list := c_list(); +begin + open c_customers; + fetch all in c_customers bulk collect into name_arr; + exit when c_customers%NOTFOUND; + close c_customers; + + for i in 1..6 loop + dbe_output.print_line('name(' || i || '): ' || name_arr(i)); + end loop; +end; +/ +ERROR: unexpected fetch direction statement at or near ";" +LINE 6: fetch all in c_customers bulk collect into name_arr; + ^ +QUERY: DECLARE cursor c_customers is select c_name from customers order by id; + type c_list is varray (6) of customers.c_name%type; + name_arr c_list := c_list(); +begin + open c_customers; + fetch all in c_customers bulk collect into name_arr; + exit when c_customers%NOTFOUND; + close c_customers; + + for i in 1..6 loop + dbe_output.print_line('name(' || i || '): ' || name_arr(i)); + end loop; +end +-- fetch ... bulk collect into ... limit ... +declare + cursor c_customers is select c_name from customers order by id; + type c_list is varray (6) of customers.c_name%type; + name_arr c_list := c_list(); +begin + open c_customers; + fetch c_customers bulk collect into name_arr limit 4; + exit when c_customers%NOTFOUND; + close c_customers; + + for i in 1..6 loop + dbe_output.print_line('name(' || i || '): ' || name_arr(i)); + end loop; +end; +/ +name(1): Vera +name(2): +name(3): Alice +name(4): Jim +name(5): +name(6): +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- +drop table if exists tmp; +drop table if exists biomebook; +drop table if exists bigtmp; +drop table if exists customers; +drop type if exists mytype cascade; +NOTICE: drop cascades to function biofunc() +-- clean up -- +drop schema if exists plpgsql_bulk_collect cascade; +NOTICE: drop cascades to function id_arr(integer) diff --git a/src/test/regress/expected/plpgsql_cursor_rowtype.out b/src/test/regress/expected/plpgsql_cursor_rowtype.out new file mode 100644 index 000000000..8ab148a32 --- /dev/null +++ b/src/test/regress/expected/plpgsql_cursor_rowtype.out @@ -0,0 +1,823 @@ +-- test cursor%type +-- check compatibility -- +-- create new schema -- +drop schema if exists plpgsql_cursor_rowtype; +NOTICE: schema "plpgsql_cursor_rowtype" does not exist, skipping +create schema plpgsql_cursor_rowtype; +set current_schema = plpgsql_cursor_rowtype; +set behavior_compat_options='allow_procedure_compile_check'; +create table emp (empno int, ename varchar(10), job varchar(10)); +insert into emp values (1, 'zhangsan', 'job1'); +insert into emp values (2, 'lisi', 'job2'); +create or replace package pck1 is +vvv emp%rowtype; +cursor cur1 is +select * from emp where empno=vvv.empno and ename=vvv.ename; +emp_row cur1%rowtype; +procedure p1(); +end pck1; +/ +create or replace package body pck1 is +procedure p1() is +a int; +begin +vvv.empno = 1; +vvv.ename = 'zhangsan'; +open cur1; +fetch cur1 into emp_row; +raise info '%', emp_row.job; +end; +end pck1; +/ +call pck1.p1(); +INFO: job1 + p1 +---- + +(1 row) + +create or replace procedure pro_cursor_args +is + b varchar(10) := 'job1'; + cursor c_job + is + select empno,ename t + from emp + where job=b; + c_row c_job%rowtype; +begin + for c_row in c_job loop + raise info '%', c_row.t; + end loop; +end; +/ +call pro_cursor_args(); +INFO: zhangsan + pro_cursor_args +----------------- + +(1 row) + +create or replace procedure pro_cursor_no_args_1 +is + b varchar(10); + cursor c_job + is + select empno,ename t + from emp; + c_row c_job%rowtype; +begin + c_row.empno = 3; + raise info '%', c_row.empno; + for c_row in c_job loop + raise info '%', c_row.empno; + end loop; +end; +/ +call pro_cursor_no_args_1(); +INFO: 3 +INFO: 1 +INFO: 2 + pro_cursor_no_args_1 +---------------------- + +(1 row) + +-- test alias error +create or replace procedure pro_cursor_args +is + b varchar(10) := 'job1'; + cursor c_job + is + select empno,ename t + from emp + where job=b; + c_row c_job%rowtype; +begin + for c_row in c_job loop + raise info '%', c_row.ename; + end loop; +end; +/ +call pro_cursor_args(); +ERROR: record "c_row" has no field "ename" when get datum type info +CONTEXT: referenced column: ename +SQL statement "SELECT c_row.ename" +PL/pgSQL function pro_cursor_args() line 10 at RAISE +create or replace procedure pro_cursor_no_args_2 +is + b varchar(10); + cursor c_job + is + select empno,ename t + from emp; + c_row c_job%rowtype; +begin + open c_job; + fetch c_job into c_row; + raise info '%', c_row.empno; + fetch c_job into c_row; + raise info '%', c_row.empno; +end; +/ +call pro_cursor_no_args_2(); +INFO: 1 +INFO: 2 + pro_cursor_no_args_2 +---------------------- + +(1 row) + +create table test12(col1 varchar2,col2 varchar2); +insert into test12 values ('a', 'aa'); +insert into test12 values ('b', 'bb'); +create or replace package pck2 is +cursor cur1 is select col1,col2 from test12; +var1 cur1%rowtype; +procedure pp1; +end pck2; +/ +create or replace package body pck2 is +procedure pp1() is +cursor cur2 is +select col1,col2 from test12; +begin +var1.col1 = 'c'; +raise info '%', var1.col1; +open cur2; +fetch cur2 into var1; +raise info '%', var1.col1; +fetch cur2 into var1; +raise info '%', var1.col1; +end; +end pck2; +/ +call pck2.pp1(); +INFO: c +INFO: a +INFO: b + pp1 +----- + +(1 row) + +create or replace package pck3 is +cursor cur1 is select col1,col2 from test12; +var1 cur1%rowtype; +procedure ppp1; +procedure ppp2(a cur1%rowtype); +end pck3; +/ +create or replace package body pck3 is +procedure ppp1() is +cursor cur2 is +select col1,col2 from test12; +begin +open cur2; +fetch cur2 into var1; +ppp2(var1); +raise info '%', var1.col1; +end; + +procedure ppp2(a cur1%rowtype) is +begin + raise info '%', a.col1; +end; +end pck3; +/ +call pck3.ppp1(); +INFO: a +CONTEXT: SQL statement "CALL ppp2(var1)" +PL/pgSQL function ppp1() line 7 at PERFORM +INFO: a + ppp1 +------ + +(1 row) + +create or replace package pck4 +is +v1 varchar2; +procedure proc1(a1 in v1%type); +end pck4; +/ +create or replace package body pck4 +is +procedure proc1(a1 in v1%type) +is +begin +raise info '%', a1; +end; +end pck4; +/ +call pck4.proc1('aa'); +INFO: aa + proc1 +------- + +(1 row) + +-- test cusor.col +create or replace package pck5 is +cursor cur1 is select col1,col2 from test12; +var1 cur1%rowtype; +var2 cur1.col1%type; +procedure ppppp1(a1 cur1.col1%type); +end pck5; +/ +NOTICE: type reference cur1.col1%TYPE converted to character varying +create or replace package body pck5 +is +procedure ppppp1(a1 cur1.col1%type) +is +begin +var2 = 2; +raise info '%', a1; +raise info '%', var2; +end; +end pck5; +/ +NOTICE: type reference cur1.col1%TYPE converted to character varying +NOTICE: type reference cur1.col1%TYPE converted to character varying +NOTICE: type reference cur1.col1%TYPE converted to character varying +NOTICE: type reference cur1.col1%TYPE converted to character varying +call pck5.ppppp1(1); +INFO: 1 +INFO: 2 + ppppp1 +-------- + +(1 row) + +drop schema if exists schema1; +NOTICE: schema "schema1" does not exist, skipping +create schema schema1; +set search_path=schema1; +create table t11(a int, b varchar(10)); +insert into t11 values (1,'a'); +set search_path=plpgsql_cursor_rowtype; +create or replace procedure cursor1() +as +declare + c_b varchar(10); + cursor cur1 is select schema1.t11.* from schema1.t11 where b = c_b; + var1 cur1%rowtype; +begin + c_b = 'a'; + open cur1; + fetch cur1 into var1; + raise info '%', var1; + raise info '%', var1.a; +end; +/ +call cursor1(); +INFO: (1,a) +INFO: 1 + cursor1 +--------- + +(1 row) + +create or replace package pck6 is + c_b varchar(10); + cursor cur1 is select schema1.t11.* from schema1.t11 where b = c_b; + var1 cur1%rowtype; +procedure p2(); +end pck6; +/ +create or replace package body pck6 +is +procedure p2() +is +begin + c_b = 'a'; + open cur1; + fetch cur1 into var1; + raise info '%', var1; + raise info '%', var1.a; +end; +end pck6; +/ +call pck6.p2(); +INFO: (1,a) +INFO: 1 + p2 +---- + +(1 row) + +create table tb1 (c1 int,c2 varchar2); +insert into tb1 values(4,'a'); +create or replace package pck7 as + cursor cur is select c1,c2 from tb1; + v_s cur%rowtype := (1,'1'); + function func1(c1 in cur%rowtype) return cur%rowtype; + procedure proc1(c1 out cur%rowtype); + procedure proc2(c1 inout cur%rowtype); +end pck7; +/ +create or replace package body pck7 +is + function func1(c1 in cur%rowtype) return cur%rowtype + as + begin + return v_s; + end; + + procedure proc1 (c1 out cur%rowtype) + as + begin + c1 := (4,'d'); + end; + + procedure proc2(c1 inout cur%rowtype) + is + vs cur%rowtype := (2,'1'); + c2 cur%rowtype; + begin + c1 := func1(vs); + proc1(c2); + raise info '%', c2; + end; +end pck7; +/ +call pck7.proc2(row(3,'c')); +INFO: (4,d) + c1 | c2 +----+---- + 1 | 1 +(1 row) + +-- test duplicate column name +create or replace procedure pro_cursor_args +is + b varchar(10) := 'job1'; + cursor c_job + is + select empno,empno,ename + from emp + where job=b; + c_row c_job%rowtype; +begin + for c_row in c_job loop + raise info '%', c_row.empno; + end loop; +end; +/ +call pro_cursor_args(); +INFO: 1 + pro_cursor_args +----------------- + +(1 row) + +create or replace package pck8 is +cursor cur1 is select col2,col2 from test12; +procedure ppp1; +procedure ppp2(a cur1%rowtype); +end pck8; +/ +ERROR: relation does not exist when parse word. +DETAIL: relation "cur1" referenced by %ROWTYPE does not exist. +insert into emp values (1, 'zhangsan', 'job3'); +create or replace package pck8 is +vvv emp%rowtype; +cursor cur1 is +select empno,empno,job from emp where empno=vvv.empno and ename=vvv.ename; +emp_row cur1%rowtype; +procedure p1(); +end pck8; +/ +create or replace package body pck8 is +procedure p1() is +a int; +begin +vvv.empno = 1; +vvv.ename = 'zhangsan'; +open cur1; +fetch cur1 into emp_row; +raise info '%', emp_row.job; +fetch cur1 into emp_row; +raise info '%', emp_row.job; +end; +end pck8; +/ +call pck8.p1(); +INFO: job1 +INFO: job3 + p1 +---- + +(1 row) + +create or replace package pck9 is +vvv emp%rowtype; +cursor cur1 is +select empno,empno,job from emp where empno=vvv.empno and ename=vvv.ename; +emp_row record; +procedure p1(); +end pck9; +/ +create or replace package body pck9 is +procedure p1() is +a int; +begin +vvv.empno = 1; +vvv.ename = 'zhangsan'; +open cur1; +fetch cur1 into emp_row; +raise info '%', emp_row.job; +fetch cur1 into emp_row; +raise info '%', emp_row.job; +end; +end pck9; +/ +call pck9.p1(); +INFO: job1 +INFO: job3 + p1 +---- + +(1 row) + +create or replace package pck10 as + cursor cur is select c2,c2 from tb1; + function func1 return cur%rowtype; +end pck10; +/ +ERROR: relation does not exist when parse word. +DETAIL: relation "cur" referenced by %ROWTYPE does not exist. +create table FOR_LOOP_TEST_001( +deptno smallint, +ename char(100), +salary int +); +create table FOR_LOOP_TEST_002( +deptno smallint, +ename char(100), +salary int +); +insert into FOR_LOOP_TEST_001 values (10,'CLARK',7000),(10,'KING',8000),(10,'MILLER',12000),(20,'ADAMS',5000),(20,'FORD',4000); +create or replace procedure test_forloop_001() +as +begin + for data in update FOR_LOOP_TEST_001 set salary=20000 where ename='CLARK' returning * loop + insert into FOR_LOOP_TEST_002 values(data.deptno,data.ename,data.salary); + end loop; +end; +/ +call test_forloop_001(); + test_forloop_001 +------------------ + +(1 row) + +select * from FOR_LOOP_TEST_001; + deptno | ename | salary +--------+------------------------------------------------------------------------------------------------------+-------- + 10 | KING | 8000 + 10 | MILLER | 12000 + 20 | ADAMS | 5000 + 20 | FORD | 4000 + 10 | CLARK | 20000 +(5 rows) + +select * from FOR_LOOP_TEST_002; + deptno | ename | salary +--------+------------------------------------------------------------------------------------------------------+-------- + 10 | CLARK | 20000 +(1 row) + +--test execption close cursor +create or replace package pckg_test1 as +procedure p1; +end pckg_test1; +/ +create or replace package body pckg_test1 as +procedure p1() is +a number; +begin +a := 2/0; +end; +end pckg_test1; +/ +create or replace package pckg_test2 as +cursor CURRR is select * from FOR_LOOP_TEST_002; +curr_row CURRR%rowtype; +procedure p1; +end pckg_test2; +/ +create or replace package body pckg_test2 as +procedure p1() is +a number; +begin +open CURRR; +fetch CURRR into curr_row; +raise info '%', curr_row; +pckg_test1.p1(); +exception +when others then +raise notice '%', '1111'; +close CURRR; +end; +end pckg_test2; +/ +call pckg_test2.p1(); +INFO: (10,"CLARK ",20000) +NOTICE: 1111 + p1 +---- + +(1 row) + +create or replace procedure pro_close_cursor1 +is + my_cursor REFCURSOR; + sql_stmt VARCHAR2(500); + curr_row record; +begin + sql_stmt := 'select * from FOR_LOOP_TEST_002'; + OPEN my_cursor FOR EXECUTE sql_stmt; + fetch my_cursor into curr_row; + raise info '%', curr_row; + pckg_test1.p1(); + exception + when others then + raise notice '%', '1111'; + close my_cursor; +end; +/ +call pro_close_cursor1(); +INFO: (10,"CLARK ",20000) +NOTICE: 1111 + pro_close_cursor1 +------------------- + +(1 row) + +create or replace procedure pro_close_cursor2 +is + type cursor_type is ref cursor; + my_cursor cursor_type; + sql_stmt VARCHAR2(500); + curr_row record; +begin + sql_stmt := 'select * from FOR_LOOP_TEST_002'; + OPEN my_cursor FOR EXECUTE sql_stmt; + fetch my_cursor into curr_row; + raise info '%', curr_row; + pckg_test1.p1(); + exception + when others then + raise notice '%', '1111'; + close my_cursor; +end; +/ +call pro_close_cursor2(); +INFO: (10,"CLARK ",20000) +NOTICE: 1111 + pro_close_cursor2 +------------------- + +(1 row) + +create table cs_trans_1(a int); +create or replace procedure pro_cs_trans_1() as +cursor c1 is select * from cs_trans_1 order by 1; +rec_1 cs_trans_1%rowtype; +va int; +begin +open c1; +va := 3/0; +exception +when division_by_zero then +close c1; +close c1; +end; +/ +call pro_cs_trans_1(); +--?.* +CONTEXT: PL/pgSQL function pro_cs_trans_1() line 11 at CLOSE +create or replace procedure pro_cs_trans_1() as +cursor c1 is select * from cs_trans_1 order by 1; +rec_1 cs_trans_1%rowtype; +va int; +begin +open c1; +close c1; +va := 3/0; +exception +when division_by_zero then +close c1; +end; +/ +call pro_cs_trans_1(); +--?.* +CONTEXT: PL/pgSQL function pro_cs_trans_1() line 11 at CLOSE +create or replace procedure pro_cs_trans_1() as +cursor c1 is select * from cs_trans_1 order by 1; +rec_1 cs_trans_1%rowtype; +va int; +begin +open c1; +close c1; +close c1; +va := 3/0; +close c1; +exception +when division_by_zero then +null; +when others then +raise info 'cursor alread closed'; +end; +/ +call pro_cs_trans_1(); +INFO: cursor alread closed + pro_cs_trans_1 +---------------- + +(1 row) + +drop procedure pro_cs_trans_1; +drop table cs_trans_1; +-- test for rec in select loop when rec is defined +set behavior_compat_options='proc_implicit_for_loop_variable'; +create table t1(a int, b int); +create table t2(a int, b int, c int); +insert into t1 values(1,1); +insert into t1 values(2,2); +insert into t1 values(3,3); +insert into t2 values(1,1,1); +insert into t2 values(2,2,2); +insert into t2 values(3,3,3); +-- (a) definde as record +create or replace package pck_for is +type r1 is record(a int, b int); +temp_result t1; +procedure p1; +end pck_for; +/ +create or replace package body pck_for is +procedure p1 as +vb t1; +begin +for temp_result in select * from t2 loop +raise info '%', temp_result; + for temp_result in select * from t1 loop + raise info '%', temp_result; + end loop; +end loop; +raise info 'after loop: %', temp_result; +end; +end pck_for; +/ +call pck_for.p1(); +INFO: (1,1,1) +INFO: (1,1) +INFO: (2,2) +INFO: (3,3) +INFO: (2,2,2) +INFO: (1,1) +INFO: (2,2) +INFO: (3,3) +INFO: (3,3,3) +INFO: (1,1) +INFO: (2,2) +INFO: (3,3) +INFO: after loop: (,) + p1 +---- + +(1 row) + +drop package pck_for; +NOTICE: drop cascades to function plpgsql_cursor_rowtype.p1() +-- (b) definde as scarlar +create or replace package pck_for is +temp_result int; +procedure p1; +end pck_for; +/ +create or replace package body pck_for is +procedure p1 as +vb t1; +begin +for temp_result in select * from t2 loop +raise info '%', temp_result; + for temp_result in select * from t1 loop + raise info '%', temp_result; + end loop; +end loop; +raise info 'after loop: %', temp_result; +end; +end pck_for; +/ +call pck_for.p1(); +INFO: (1,1,1) +INFO: (1,1) +INFO: (2,2) +INFO: (3,3) +INFO: (2,2,2) +INFO: (1,1) +INFO: (2,2) +INFO: (3,3) +INFO: (3,3,3) +INFO: (1,1) +INFO: (2,2) +INFO: (3,3) +INFO: after loop: + p1 +---- + +(1 row) + +drop package pck_for; +NOTICE: drop cascades to function plpgsql_cursor_rowtype.p1() +-- (c) select only one col +create or replace package pck_for is +temp_result int; +procedure p1; +end pck_for; +/ +create or replace package body pck_for is +procedure p1 as +vb t1; +begin +for temp_result in select c from t2 loop +raise info '%', temp_result; + for temp_result in select a from t1 loop + raise info '%', temp_result; + end loop; +end loop; +raise info 'after loop: %', temp_result; +end; +end pck_for; +/ +call pck_for.p1(); +INFO: (1) +INFO: (1) +INFO: (2) +INFO: (3) +INFO: (2) +INFO: (1) +INFO: (2) +INFO: (3) +INFO: (3) +INFO: (1) +INFO: (2) +INFO: (3) +INFO: after loop: + p1 +---- + +(1 row) + +drop package pck_for; +NOTICE: drop cascades to function plpgsql_cursor_rowtype.p1() +drop table t1; +drop table t2; +set behavior_compat_options=''; +---- clean ---- +drop package pck1; +NOTICE: drop cascades to function plpgsql_cursor_rowtype.p1() +drop package pck2; +NOTICE: drop cascades to function plpgsql_cursor_rowtype.pp1() +drop package pck3; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_cursor_rowtype.ppp1() +--?.* +drop package pck4; +NOTICE: drop cascades to function plpgsql_cursor_rowtype.proc1(character varying) +drop package pck5; +NOTICE: drop cascades to function plpgsql_cursor_rowtype.ppppp1(character varying) +drop package pck6; +NOTICE: drop cascades to function plpgsql_cursor_rowtype.p2() +drop package pck7; +NOTICE: drop cascades to 3 other objects +--?.* +drop cascades to function plpgsql_cursor_rowtype.proc1() +--?.* +drop package pck8; +NOTICE: drop cascades to function plpgsql_cursor_rowtype.p1() +drop package pck9; +NOTICE: drop cascades to function plpgsql_cursor_rowtype.p1() +drop package pckg_test1; +NOTICE: drop cascades to function plpgsql_cursor_rowtype.p1() +drop package pckg_test2; +NOTICE: drop cascades to function plpgsql_cursor_rowtype.p1() +drop schema plpgsql_cursor_rowtype cascade; +NOTICE: drop cascades to 12 other objects +DETAIL: drop cascades to table emp +drop cascades to function pro_cursor_no_args_1() +drop cascades to function pro_cursor_no_args_2() +drop cascades to table test12 +drop cascades to function cursor1() +drop cascades to table tb1 +drop cascades to function pro_cursor_args() +drop cascades to table for_loop_test_001 +drop cascades to table for_loop_test_002 +drop cascades to function test_forloop_001() +drop cascades to function pro_close_cursor1() +drop cascades to function pro_close_cursor2() +drop schema schema1 cascade; +NOTICE: drop cascades to table schema1.t11 diff --git a/src/test/regress/expected/plpgsql_inout_param.out b/src/test/regress/expected/plpgsql_inout_param.out new file mode 100644 index 000000000..64ed35e05 --- /dev/null +++ b/src/test/regress/expected/plpgsql_inout_param.out @@ -0,0 +1,1671 @@ +-- test create type table of +-- check compatibility -- +show sql_compatibility; -- expect A -- + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists plpgsql_inout; +NOTICE: schema "plpgsql_inout" does not exist, skipping +create schema plpgsql_inout; +set current_schema = plpgsql_inout; +set behavior_compat_options="proc_outparam_override"; +------------------------------------------------ +--------------------inout----------------------- +------------------------------------------------ +create or replace procedure proc1(a1 in out int, a2 in out int) +is +begin +a1 := a1 + 1; +a2 := a2 + 1; +end; +/ +create or replace procedure proc2() +is +a1 int := 1; +a2 int := 2; +begin +raise info '%', a1; +proc1(a1, a2); +raise info 'a1:%', a1; +raise info 'a2:%', a2; +end; +/ +call proc2(); +INFO: 1 +INFO: a1:2 +INFO: a2:3 + proc2 +------- + +(1 row) + +create or replace procedure proc2() +is +a1 int := 1; +a2 int := 2; +begin +raise info '%', a1; +proc1(a1=>a1, a2=>a2); +raise info 'a1:%', a1; +raise info 'a2:%', a2; +end; +/ +call proc2(); +INFO: 1 +INFO: a1:2 +INFO: a2:3 + proc2 +------- + +(1 row) + +-- test table +create or replace procedure proc3() +is +type arr is table OF integer; +a2 arr; +a1 int; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(a2(1), a2(2)); +raise info 'a2:%', a2; +end; +/ +call proc3(); +INFO: a2:{3,4} + proc3 +------- + +(1 row) + +create or replace procedure proc3() +is +type arr is table OF integer; +aa2 arr; +begin +aa2[1] = 2; +aa2[2] = 3; +proc1(a1=>aa2(1), a2=>aa2(2)); +raise info 'aa2:%', aa2; +end; +/ +call proc3(); +INFO: aa2:{3,4} + proc3 +------- + +(1 row) + +-- table nest error +create or replace procedure proc3() +is +type arr is table OF integer; +a2 arr; +a1 int; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(a2(1)(1), a2(2)); +raise info 'a2:%', a2; +end; +/ +create or replace procedure proc3() +is +type arr is table OF integer; +a2 arr; +a1 int; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(a2[1], a2[2]); +raise info 'a2:%', a2; +end; +/ +call proc3(); +INFO: a2:{3,4} + proc3 +------- + +(1 row) + +create or replace procedure proc3() +is +type arr is table OF integer; +aa2 arr; +aa1 int; +begin +aa2[1] = 2; +aa2[2] = 3; +aa1 := 1; +proc1(a1=>aa2[1], a2=>aa2[2]); +raise info 'aa2:%', aa2; +end; +/ +call proc3(); +INFO: aa2:{3,4} + proc3 +------- + +(1 row) + +-- test array +create or replace procedure proc4() +is +type arr is varray(10) OF integer; +a2 arr; +a1 int; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(a2(1), a2(2)); +raise info 'a2:%', a2; +end; +/ +call proc4(); +INFO: a2:{3,4} + proc4 +------- + +(1 row) + +create or replace procedure proc4() +is +type arr is varray(10) OF integer; +a2 arr; +a1 int; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(a2(1), a2(2)); +raise info 'a2:%', a2; +end; +/ +call proc4(); +INFO: a2:{3,4} + proc4 +------- + +(1 row) + +create or replace procedure proc4() +is +type arr is varray(10) OF integer; +a2 arr; +a1 int; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(a2[1], a2[2]); +raise info 'a2:%', a2; +end; +/ +call proc4(); +INFO: a2:{3,4} + proc4 +------- + +(1 row) + +create or replace procedure proc4() +is +type arr is varray(10) OF integer; +aa2 arr; +aa1 int; +begin +aa2[1] = 2; +aa2[2] = 3; +aa1 := 1; +proc1(a1=>aa2[1], a2=>aa2[2]); +raise info 'aa2:%', aa2; +end; +/ +call proc4(); +INFO: aa2:{3,4} + proc4 +------- + +(1 row) + +create or replace procedure proc4() +is +a int[][] := ARRAY[ARRAY[1,2,3],ARRAY[4,5,6],ARRAY[7,8,9]]; +begin +a[1][2] = 2; +a[2][1] = 3; +proc1(a[2][3], a[3][2]); +raise info 'a2:%', a; +end; +/ +call proc4(); +INFO: a2:{{1,2,3},{3,5,7},{7,9,9}} + proc4 +------- + +(1 row) + +create or replace procedure proc4() +is +a int[][] := ARRAY[ARRAY[1,2,3],ARRAY[4,5,6],ARRAY[7,8,9]]; +begin +a[1][2] = 2; +a[2][1] = 3; +proc1(a1=>a[2][3], a2=>a[3][2]); +raise info 'a2:%', a; +end; +/ +call proc4(); +INFO: a2:{{1,2,3},{3,5,7},{7,9,9}} + proc4 +------- + +(1 row) + +create or replace procedure proc5(a1 in out int[]) +is +begin +a1[2] = 2; +end; +/ +create or replace procedure proc6() +is +a1 int[]; +begin +a1[1] = 1; +proc5(a1); +raise info 'a1:%', a1; +end; +/ +call proc6(); +INFO: a1:{1,2} + proc6 +------- + +(1 row) + +create or replace procedure proc6() +is +a1 int[]; +begin +a1[1] = 1; +proc5(a1=>a1); +raise info 'a1:%', a1; +end; +/ +call proc6(); +INFO: a1:{1,2} + proc6 +------- + +(1 row) + +create or replace procedure proc7(a1 in out int[], a2 in out int) +is +begin +a1[2] = 2; +a2 = 2; +end; +/ +create or replace procedure proc8() +is +a1 int[]; +a2 int; +begin +a1[1] = 1; +a2 = 1; +proc7(a1, a2); +raise info 'a1:%', a1; +raise info 'a2:%', a2; +end; +/ +call proc8(); +INFO: a1:{1,2} +INFO: a2:2 + proc8 +------- + +(1 row) + +create or replace procedure proc8() +is +a1 int[]; +a2 int; +begin +a1[1] = 1; +a2 = 1; +proc7(a1=>a1, a2=>a2); +raise info 'a1:%', a1; +raise info 'a2:%', a2; +end; +/ +call proc8(); +INFO: a1:{1,2} +INFO: a2:2 + proc8 +------- + +(1 row) + +create table tb_test(col1 int,col2 int,col3 int); +insert into tb_test values (1,1,1); +insert into tb_test values (2,2,2); +-- test for rec +create or replace procedure proc9() +is +begin +for rec in (select col1,col2,col3 from tb_test) loop +proc1(rec.col1, rec.col2); +raise info 'col1:%', rec.col1; +raise info 'col2:%', rec.col2; +end loop; +end; +/ +call proc9(); +INFO: col1:2 +INFO: col2:2 +INFO: col1:3 +INFO: col2:3 + proc9 +------- + +(1 row) + +create or replace procedure proc9() +is +begin +for rec in (select col1,col2,col3 from tb_test) loop +proc1(a1=>rec.col1, a2=>rec.col2); +raise info 'col1:%', rec.col1; +raise info 'col2:%', rec.col2; +end loop; +end; +/ +call proc9(); +INFO: col1:2 +INFO: col2:2 +INFO: col1:3 +INFO: col2:3 + proc9 +------- + +(1 row) + +create or replace procedure proc10() +is +begin +for rec in (select col1,col2,col3 from tb_test) loop +proc1(rec.col1, rec.col2); +raise info 'col1:%', rec.col1; +end loop; +end; +/ +call proc10(); +INFO: col1:2 +INFO: col1:3 + proc10 +-------- + +(1 row) + +create type info as (name varchar2(50), age int, address varchar2(20), salary float(2)); +-- test 1 out param +create or replace procedure proc12(a inout info) +is + +begin + a = ('Vera' ,32, 'Paris', 22999.00); +end; +/ +-- test record +create or replace procedure proc11() +is + a info; +begin + proc12(a); + raise info '%', a; +end; +/ +call proc11(); +INFO: (Vera,32,Paris,22999) + proc11 +-------- + +(1 row) + +create or replace procedure proc11() +is + a info; +begin + proc12(a=>a); + raise info '%', a; +end; +/ +call proc11(); +INFO: (Vera,32,Paris,22999) + proc11 +-------- + +(1 row) + +-- test 2 out param +create or replace procedure proc20(a inout info, b inout int) +is + +begin + b = 1; + a = ('Vera' ,32, 'Paris', 22999.00); +end; +/ +-- test record +create or replace procedure proc21() +is + a info; + b int; +begin + proc20(a,b); + raise info '%', a; + raise info '%', b; +end; +/ +call proc21(); +INFO: (Vera,32,Paris,22999) +INFO: 1 + proc21 +-------- + +(1 row) + +-- test record +create or replace procedure proc21() +is + a info; + b int; +begin + proc20(a=>a,b=>b); + raise info '%', a; + raise info '%', b; +end; +/ +call proc21(); +INFO: (Vera,32,Paris,22999) +INFO: 1 + proc21 +-------- + +(1 row) + +--test reord error +create or replace procedure proc11() +is + type r is record (name varchar2(50), age int, address varchar2(20), salary float(2)); + a r; +begin + a = ('Vera' ,33, 'Paris', 22999.00); + proc12(row(a)); + raise info '%', a; +end; +/ +--test record nest +create or replace procedure proc12() +is + type r is varray(10) of info; + a r; + a2 int := 1; +begin + a[1] = ('Vera' ,33, 'Paris', 22999.00); + proc1(a[1].age, a2); + raise info '%', a; +end; +/ +call proc12(); +INFO: {"(Vera,34,Paris,22999)"} + proc12 +-------- + +(1 row) + +create or replace procedure proc12() +is + type r is varray(10) of info; + a r; + a2 int := 1; +begin + a[1] = ('Vera' ,33, 'Paris', 22999.00); + proc1(a1=>a[1].age, a2=>a2); + raise info '%', a; +end; +/ +call proc12(); +INFO: {"(Vera,34,Paris,22999)"} + proc12 +-------- + +(1 row) + +create or replace procedure proc12() +is + a info[][] := ARRAY[ARRAY[('',1,'',0), ('',2,'',0)],ARRAY[('',3,'',0), ('',4,'',0)]]; + a2 int := 1; +begin + proc1(a[1][2].age, a2); + raise info '%', a; +end; +/ +call proc12(); +INFO: {{"(,1,,0)","(,3,,0)"},{"(,3,,0)","(,4,,0)"}} + proc12 +-------- + +(1 row) + +create or replace procedure proc12() +is + a info[][] := ARRAY[ARRAY[('',1,'',0), ('',2,'',0)],ARRAY[('',3,'',0), ('',4,'',0)]]; + a2 int := 1; +begin + proc1(a1=>a[1][2].age, a2=>a2); + raise info '%', a; +end; +/ +call proc12(); +INFO: {{"(,1,,0)","(,3,,0)"},{"(,3,,0)","(,4,,0)"}} + proc12 +-------- + +(1 row) + +create type o1 as (a int, b int); +create type o2 as (a int, b o1); +create type o3 as (a int, b o2); +create or replace procedure proc13() +is + a o2; + a2 int := 1; +begin + a.b.b = 1; + proc1(a.b.b, a2); + raise info '%', a; +end; +/ +call proc13(); +INFO: (,"(,2)") + proc13 +-------- + +(1 row) + +create or replace procedure proc13() +is + a o2; + a2 int := 1; +begin + a.b.b = 1; + proc1(a1=>a.b.b, a2=>a2); + raise info '%', a; +end; +/ +call proc13(); +INFO: (,"(,2)") + proc13 +-------- + +(1 row) + +create or replace procedure proc14() +is + a o3; + a2 int := 1; +begin + a.b.b.b = 1; + raise info '%', a; + proc1(a.b.b.b, a2); + raise info '%', a; +end; +/ +call proc14(); +INFO: (,"(,""(,1)"")") +INFO: (,"(,""(,2)"")") + proc14 +-------- + +(1 row) + +create or replace procedure proc14() +is + a o3; + a2 int := 1; +begin + a.b.b.b = 1; + raise info '%', a; + proc1(a1=>a.b.b.b, a2=>a2); + raise info '%', a; +end; +/ +call proc14(); +INFO: (,"(,""(,1)"")") +INFO: (,"(,""(,2)"")") + proc14 +-------- + +(1 row) + +create type customer as (id number(10), c_info info); +create table customers (id number(10), c_info info); +insert into customers (id, c_info) values (1, ('Vera' ,32, 'Paris', 22999.00)); +create or replace procedure proc15() +is +rec record; +begin +for rec in (select id, c_info from customers) loop +proc1(rec.c_info.id,1); +raise info '%', rec.c_info.id; +end loop; +end; +/ +call proc15(); +ERROR: schema "rec" does not exist +CONTEXT: SQL statement "CALL proc1(rec.c_info.id,1)" +PL/pgSQL function proc15() line 5 at SQL statement +create or replace procedure proc15() +is +rec record; +begin +for rec in (select id, c_info from customers) loop +proc1(a1=>rec.c_info.id, a2=>1); +raise info '%', rec.c_info.id; +end loop; +end; +/ +ERROR: when invoking function proc1, no destination for argments "a1" +CONTEXT: compilation of PL/pgSQL function "proc15" near line 4 +call proc15(); +ERROR: schema "rec" does not exist +CONTEXT: SQL statement "CALL proc1(rec.c_info.id,1)" +PL/pgSQL function proc15() line 5 at SQL statement +create or replace procedure proc16(a1 in out varchar) +is +begin +a1 := 'bbbb'; +end; +/ +create or replace procedure proc17() +is +type arr is varray(10) OF varchar(10); +a arr; +begin +a[1] = 'aaa'; +proc16(a[1]); +raise info '%', a; +end; +/ +call proc17(); +INFO: {bbbb} + proc17 +-------- + +(1 row) + +create or replace procedure proc17() +is +type arr is varray(10) OF varchar(10); +a arr; +begin +a[1] = 'aaa'; +proc16(a1=>a[1]); +raise info '%', a; +end; +/ +call proc17(); +INFO: {bbbb} + proc17 +-------- + +(1 row) + +create or replace package pckg_test1 as +array_info info[][] := ARRAY[ARRAY[('',1,'',0), ('',2,'',0)],ARRAY[('',3,'',0), ('',4,'',0)]]; +array_int int[][] := ARRAY[ARRAY[1,2,3],ARRAY[4,5,6],ARRAY[7,8,9]]; +procedure pr_test(i_col1 inout int,i_col2 inout int); +procedure pr_test1(); +procedure pr_test2(); +procedure pr_test3(); +end pckg_test1; +/ +create or replace package body pckg_test1 as +procedure pr_test(i_col1 inout int,i_col2 inout int)as +begin +i_col1 = i_col1+1; +i_col2 = i_col2+2; +end; + +procedure pr_test1()as +begin +for rec in (select col1,col2,col3 from tb_test) loop +raise info '%', rec.col2; +pr_test(rec.col1,rec.col2); +raise info '%', rec.col2; +end loop; +end; + +procedure pr_test2()as +a o2; +b o3; +begin +a.b.b = 1; +b.b.b.b = 1; +pr_test(a.b.b, b.b.b.b); +raise info '%', a; +raise info '%', b; + +pr_test(array_info[1][2].age,array_int[2][3]); +raise info '%',array_info; +raise info '%',array_int; + +end; + +procedure pr_test3()as +type arr is varray(10) OF integer; +a2 arr; +type tbl is table of integer; +a3 tbl; +begin +a2[1] = 1; +a3[2] = 1; +pr_test(a2[1],a3[2]); +raise info '%',a2; +raise info '%',a3; +end; +end pckg_test1; +/ +call pckg_test1.pr_test1(); +INFO: 1 +INFO: 3 +INFO: 2 +INFO: 4 + pr_test1 +---------- + +(1 row) + +call pckg_test1.pr_test2(); +INFO: (,"(,2)") +INFO: (,"(,""(,3)"")") +INFO: {{"(,1,,0)","(,3,,0)"},{"(,3,,0)","(,4,,0)"}} +INFO: {{1,2,3},{4,5,8},{7,8,9}} + pr_test2 +---------- + +(1 row) + +call pckg_test1.pr_test3(); +INFO: {2} +INFO: [2:2]={3} + pr_test3 +---------- + +(1 row) + +create or replace package body pckg_test1 as +procedure pr_test(i_col1 inout int,i_col2 inout int)as +begin +i_col1 = i_col1+1; +i_col2 = i_col2+2; +end; + +procedure pr_test1()as +begin +for rec in (select col1,col2,col3 from tb_test) loop +raise info '%', rec.col2; +pr_test(i_col1=>rec.col1, i_col2=>rec.col2); +raise info '%', rec.col2; +end loop; +end; + +procedure pr_test2()as +a o2; +b o3; +begin +a.b.b = 1; +b.b.b.b = 1; +pr_test(i_col1=>a.b.b, i_col2=>b.b.b.b); +raise info '%', a; +raise info '%', b; + +pr_test(i_col1=>array_info[1][2].age, i_col2=>array_int[2][3]); +raise info '%',array_info; +raise info '%',array_int; + +end; + +procedure pr_test3()as +type arr is varray(10) OF integer; +a2 arr; +type tbl is table of integer; +a3 tbl; +begin +a2[1] = 1; +a3[2] = 1; +pr_test(i_col1=>a2[1], i_col2=>a3[2]); +raise info '%',a2; +raise info '%',a3; +end; +end pckg_test1; +/ +call pckg_test1.pr_test1(); +INFO: 1 +INFO: 3 +INFO: 2 +INFO: 4 + pr_test1 +---------- + +(1 row) + +call pckg_test1.pr_test2(); +INFO: (,"(,2)") +INFO: (,"(,""(,3)"")") +INFO: {{"(,1,,0)","(,3,,0)"},{"(,3,,0)","(,4,,0)"}} +INFO: {{1,2,3},{4,5,8},{7,8,9}} + pr_test2 +---------- + +(1 row) + +call pckg_test1.pr_test3(); +INFO: {2} +INFO: [2:2]={3} + pr_test3 +---------- + +(1 row) + +create or replace procedure proc1(c1 out INT, c2 out INT) +is +begin +raise info '%', c1; +c1 := 10000; +c2 := 20000; +end; +/ +create or replace procedure proc3() +is +type arr is table OF INT; +a2 arr; +a1 INT; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(a2(1), a2(2)); +raise info 'a2:%', a2; +raise info 'a1:%', a1; +end; +/ +call proc3(); +INFO: +CONTEXT: SQL statement "CALL proc1(a2[1],a2[2])" +PL/pgSQL function proc3() line 9 at SQL statement +INFO: a2:{10000,20000} +INFO: a1:1 + proc3 +------- + +(1 row) + +create or replace procedure proc3() +is +type arr is table OF INT; +a2 arr; +a1 INT; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(c1=>a2(1), c2=>a2(2)); +raise info 'a2:%', a2; +raise info 'a1:%', a1; +end; +/ +call proc3(); +INFO: +CONTEXT: SQL statement "CALL proc1(c1=>a2[1],c2=>a2[2])" +PL/pgSQL function proc3() line 9 at SQL statement +INFO: a2:{10000,20000} +INFO: a1:1 + proc3 +------- + +(1 row) + +create or replace procedure proc1(a1 in BIGINT, a2 out BIGINT, a3 inout BIGINT) +is +begin +a1 := a1 + 10000; +a2 := a2 + 20000; +a3 := a3 + 30000; +end; +/ +create or replace procedure proc3() +is +type arr is table OF BIGINT; +a2 arr; +begin +a2[1] = 1; +a2[2] = 2; +a2[3] = 3; +proc1(a2(1), a2(2), a2(3)); +raise info 'a2:%', a2; +raise info 'a2:%', a2[1]; +raise info 'a2:%', a2[2]; +end; +/ +call proc3(); +INFO: a2:{1,NULL,30003} +INFO: a2:1 +INFO: a2: + proc3 +------- + +(1 row) + +create or replace procedure proc3() +is +type arr is table OF BIGINT; +c2 arr; +begin +c2[1] = 1; +c2[2] = 2; +c2[3] = 3; +proc1(a1=>c2(1), a2=>c2(2), a3=>c2(3)); +raise info 'a2:%', c2; +raise info 'a2:%', c2[1]; +raise info 'a2:%', c2[2]; +end; +/ +call proc3(); +INFO: a2:{1,NULL,30003} +INFO: a2:1 +INFO: a2: + proc3 +------- + +(1 row) + +create type t as (a int, b boolean); +-- complex type +create or replace procedure proc1(a1 in out t, a2 in out boolean) +is +begin +a1.a := a1.a + 1; +a1.b := false; +a2 := false; +end; +/ +declare +a t; +b boolean; +begin +a.a = 1; +a.b = true; +b = true; +proc1(a,b); +raise info '%', a.a; +raise info '%', a.b; +raise info '%', b; +end; +/ +INFO: 2 +INFO: f +INFO: f +declare +a t; +b boolean; +begin +a.a = 1; +a.b = true; +b = true; +proc1(a1=>a,a2=>b); +raise info '%', a.a; +raise info '%', a.b; +raise info '%', b; +end; +/ +INFO: 2 +INFO: f +INFO: f +------------------------------------------------ +---------------------out------------------------ +------------------------------------------------ +create or replace procedure proc1(a1 out INT) +is +begin +a1 := 10000; +raise info 'a1:%', a1; +end; +/ +create or replace procedure proc3() +is +type arr is table OF INT; +a2 arr; +a1 int := 2; +begin +a2[1] = 1; +proc1(a2(1)); +raise info 'a2:%', a2; +proc1(a1); +raise info 'a1:%', a1; +end; +/ +call proc3(); +INFO: a1:10000 +CONTEXT: SQL statement "CALL proc1(a2[1])" +PL/pgSQL function proc3() line 7 at SQL statement +INFO: a2:{10000} +INFO: a1:10000 +CONTEXT: SQL statement "CALL proc1(a1)" +PL/pgSQL function proc3() line 9 at SQL statement +INFO: a1:10000 + proc3 +------- + +(1 row) + +create or replace procedure proc3() +is +type arr is table OF INT; +a2 arr; +a1 int := 2; +begin +a2[1] = 1; +proc1(a1=>a2(1)); +raise info 'a2:%', a2; +proc1(a1=>a1); +raise info 'a1:%', a1; +end; +/ +call proc3(); +INFO: a1:10000 +CONTEXT: SQL statement "CALL proc1(a1=>a2[1])" +PL/pgSQL function proc3() line 7 at SQL statement +INFO: a2:{10000} +INFO: a1:10000 +CONTEXT: SQL statement "CALL proc1(a1=>a1)" +PL/pgSQL function proc3() line 9 at SQL statement +INFO: a1:10000 + proc3 +------- + +(1 row) + +create or replace procedure proc3() +is +type arr is varray(10) OF INT; +a2 arr; +a1 int := 2; +begin +a2[1] = 1; +proc1(a2(1)); +raise info 'a2:%', a2; +proc1(a1); +raise info 'a1:%', a1; +end; +/ +call proc3(); +INFO: a1:10000 +CONTEXT: SQL statement "CALL proc1(a2[1])" +PL/pgSQL function proc3() line 7 at SQL statement +INFO: a2:{10000} +INFO: a1:10000 +CONTEXT: SQL statement "CALL proc1(a1)" +PL/pgSQL function proc3() line 9 at SQL statement +INFO: a1:10000 + proc3 +------- + +(1 row) + +create or replace procedure proc1(a1 out t) +is +begin +a1.a := 1; +a1.b := false; +end; +/ +declare +a t; +begin +proc1(a); +raise info '%', a; +end; +/ +INFO: (1,f) +declare +a t; +begin +proc1(a1=>a); +raise info '%', a; +end; +/ +INFO: (1,f) +create or replace procedure proc1(a1 out t, a2 out boolean) +is +begin +a1.a := 1; +a1.b := false; +a2 := false; +end; +/ +declare +a t; +b boolean; +begin +proc1(a,b); +raise info '%', a; +raise info '%', b; +end; +/ +INFO: (1,f) +INFO: f +declare +a t; +b boolean; +begin +proc1(a1=>a,a2=>b); +raise info '%', a; +raise info '%', b; +end; +/ +INFO: (1,f) +INFO: f +create type t1 is table of int; +create or replace procedure p1(c1 in int, c2 out t1) +is +a int; +begin +a := c1; +raise info '%',a; +c2(1) := 1; +c2(2) := 2; +return; +end; +/ +create or replace procedure p2() +is +a t1; +begin +p1(c1=>'12',c2=>a); +raise info '%',a; +end; +/ +call p2(); +INFO: 12 +CONTEXT: SQL statement "CALL p1(c1=>'12',c2=>a)" +PL/pgSQL function p2() line 4 at SQL statement +INFO: {1,2} + p2 +---- + +(1 row) + +create or replace package pck6 is +type tp_2 is record(v01 number, v03 varchar2, v02 number); +end pck6; +/ +create or replace package pck5 is +type tp_1 is record(v01 number, v03 varchar2, v02 number); +procedure pp11(v01 out tp_1); +procedure pp11(v121 out number,v122 out pck6.tp_2); +end pck5; +/ +create or replace package body pck5 is +procedure pp11(v01 out tp_1) is +v122 pck6.tp_2; +begin +pp11(v121 => v01.v01, v122 => v122); +raise notice 'v01 : %', v01.v01; +end; +procedure pp11(v121 out number,v122 out pck6.tp_2) is +v_id1 varchar2; +begin +select id1 into v_id1 from test_tb1 limit 1; +raise notice '%', v_id1; +v121 := 12; +EXCEPTION +when no_data_found then +raise notice 'no data found: %', v121||SQLERRM; +v121 :=1; +WHEN others then +raise notice 'others :%', v121||SQLERRM; +v121 := 2; +end; +end pck5; +/ +create or replace function fun1 return number as +v01 pck5.tp_1; +begin +pck5.pp11(v01); +return 0; +end; +/ +select fun1(); +NOTICE: others :relation "test_tb1" does not exist on datanode1 +CONTEXT: SQL statement "CALL pp11(v121=>v01.v01,v122=>v122)" +--?.* +SQL statement "CALL pck5.pp11(v01)" +PL/pgSQL function fun1() line 4 at SQL statement +referenced column: fun1 +NOTICE: v01 : 2 +CONTEXT: SQL statement "CALL pck5.pp11(v01)" +PL/pgSQL function fun1() line 4 at SQL statement +referenced column: fun1 + fun1 +------ + 0 +(1 row) + +create or replace package body pck5 is +procedure pp11(v01 out tp_1) is +v122 pck6.tp_2; +begin +pp11(v01.v01, v122); +raise notice 'v01 : %', v01.v01; +end; +procedure pp11(v121 out number,v122 out pck6.tp_2) is +v_id1 varchar2; +begin +select id1 into v_id1 from test_tb1 limit 1; +raise notice '%', v_id1; +v121 := 12; +EXCEPTION +when no_data_found then +raise notice 'no data found: %', v121||SQLERRM; +v121 :=1; +WHEN others then +raise notice 'others :%', v121||SQLERRM; +v121 := 2; +end; +end pck5; +/ +select fun1(); +NOTICE: others :relation "test_tb1" does not exist on datanode1 +CONTEXT: SQL statement "CALL pp11(v01.v01,v122)" +--?.* +SQL statement "CALL pck5.pp11(v01)" +PL/pgSQL function fun1() line 4 at SQL statement +referenced column: fun1 +NOTICE: v01 : 2 +CONTEXT: SQL statement "CALL pck5.pp11(v01)" +PL/pgSQL function fun1() line 4 at SQL statement +referenced column: fun1 + fun1 +------ + 0 +(1 row) + +create or replace package pck7 is +type t_out is record(retcode number, + errcode number, + eerm varchar2(4000), + sqlcode varchar2(100), + sqlerrm varchar2(4000) + ); +success constant number(1) = 0; +fail constant number(1) = 1; +end pck7; +/ +create or replace package pck8 is +v_out pck7.t_out; +procedure pp11(in_groupno in varchar2, + in_workdate in varchar2, + o_retcode out number); +procedure pp11(in_groupno in varchar2, + in_workdate in varchar2, + o_base out pck7.t_out); +end pck8; +/ +create or replace package body pck8 is +procedure pp11(in_groupno in varchar2, + in_workdate in varchar2, + o_retcode out number + ) is +v_out pck7.t_out; +begin +pp11(in_groupno=>in_groupno, + in_workdate => in_workdate, + o_base => v_out); +raise notice 'v_out : %', v_out; +end; +procedure pp11(in_groupno in varchar2, + in_workdate in varchar2, + o_base out pck7.t_out)is +v_id1 varchar2; +begin +o_base := (1,1,'a','b','c'); +--o_base.retcode := 2; +end; +end pck8; +/ +declare +va number; +begin +pck8.pp11('a','b',va); +end; +/ +NOTICE: v_out : (1,1,a,b,c) +CONTEXT: SQL statement "CALL pck8.pp11('a','b',va)" +PL/pgSQL function inline_code_block line 4 at SQL statement +drop package pck5; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_inout.pp11() +drop cascades to function plpgsql_inout.pp11() +drop package pck6; +drop package pck7; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_inout.pp11(character varying,character varying) +drop cascades to function plpgsql_inout.pp11(character varying,character varying) +drop package pck8; +ERROR: package pck8 does not exist +drop package pckg_test1; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to function plpgsql_inout.pr_test(integer,integer) +drop cascades to function plpgsql_inout.pr_test1() +drop cascades to function plpgsql_inout.pr_test2() +drop cascades to function plpgsql_inout.pr_test3() +drop package pckg_test2; +ERROR: package pckg_test2 does not exist +create or replace procedure pp1(va int, vb int, vc out int) as +begin +null; +end; +/ +declare +v1 int; +begin +pp1(vd=>v1, va=>v1, vb=>v1); +end; +/ +ERROR: when invoking function pp1, no argments match "vd" +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 2 +drop procedure pp1; +-- two out param, one is valid (should error) +drop package if exists pck1; +NOTICE: package pck1() does not exist, skipping +create or replace package pck1 is +procedure p1(a out varchar2,b out int); +end pck1; +/ +create or replace package body pck1 is +procedure p1(a out varchar2,b out int) is +begin +b:=1; +a:='a'||b; +end; +end pck1; +/ +declare +var varchar2; +begin +pck1.p1(var,1); +raise info 'var:%',var; +end; +/ +ERROR: query has no destination for result data +HINT: If you want to discard the results of a SELECT, use PERFORM instead. +CONTEXT: PL/pgSQL function inline_code_block line 4 at SQL statement +drop package pck1; +NOTICE: drop cascades to function plpgsql_inout.p1() +-- two out param, one is valid, overload situation (should error) +create or replace package pkg070 +is +type type000 is record (c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob); +type type001 is table of integer index by integer; +procedure proc070_1(col2 out type001,col3 out int,col4 out type001,col5 out int); +procedure proc070_1(col4 out type001,col5 out int); +procedure proc070_2(); +end pkg070; +/ +create or replace package body pkg070 +is +procedure proc070_1(col2 out type001,col3 out int,col4 out type001,col5 out int) +is +col1 type001; +begin +col2(1):=3; +col2(2):=4; +col3:=col2.count; +col4(2):=44; +col4(6):=55; +col5:=col4.count; +end; +procedure proc070_1(col4 out type001,col5 out int) +is +begin +col4(1):=4; +col4(2):=44; + --col4(3):=444; +col5:=col4.count; +raise info '2 parameter col5 is %',col5; +end; +procedure proc070_2() +is +tbcor1 type001; +tbcor2 type001; +begin +tbcor1(1):=1; +tbcor1(3):=3; +tbcor2(2):=2; +tbcor2(3):=23; +--proc070_1(tbcor1,tbcor1.count,tbcor2,tbcor2.count); +raise info 'tbcor1 is %',tbcor1; +raise info 'tbcor1.count is %',tbcor1.count; +raise info 'tbcor2 is %',tbcor2; +raise info 'tbcor2.count is %',tbcor2.count; +proc070_1(tbcor2,tbcor2.count); +raise info 'tbcor2 is %',tbcor2; +raise info 'tbcor2.count is %',tbcor2.count; +--raise info 'tbcor2.first is %',tbcor2.first; +end; +end pkg070; +/ +call pkg070.proc070_2(); +INFO: tbcor1 is {1,3} +INFO: tbcor1.count is 2 +INFO: tbcor2 is {2,23} +INFO: tbcor2.count is 2 +INFO: 2 parameter col5 is 2 +CONTEXT: SQL statement "CALL proc070_1(tbcor2,ARRAY_INDEXBY_LENGTH("tbcor2", 1 ) )" +PL/pgSQL function proc070_2() line 14 at SQL statement +ERROR: query has no destination for result data +HINT: If you want to discard the results of a SELECT, use PERFORM instead. +CONTEXT: PL/pgSQL function proc070_2() line 14 at SQL statement +drop package pkg070; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function plpgsql_inout.proc070_1() +drop cascades to function plpgsql_inout.proc070_1() +drop cascades to function plpgsql_inout.proc070_2() +-- two out param, one is valid, => situation (should error) +create or replace procedure pp1(a out int, b out int) as +begin +a := 1; +b := 1; +end; +/ +declare +var1 int; +begin +pp1(a=>var1,b=>3); +end; +/ +ERROR: when invoking function pp1, no destination for argments "b" +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 2 +drop procedure pp1; +-- test one in row, one out shcalar +-- 1 +drop package if exists pck1; +NOTICE: package pck1() does not exist, skipping +create or replace package pck1 is +type tp_1 is record(v01 number, v03 varchar2, v02 number); +type tp_2 is varray(10) of int; +procedure p1(a tp_1,b out varchar2); +procedure p1(a2 tp_2, b2 out varchar2); +end pck1; +/ +create or replace package body pck1 is +procedure p1(a tp_1,b out varchar2) is +begin +b:=a.v01; +raise info 'b:%',b; +end; +procedure p1(a2 tp_2, b2 out varchar2) is +begin +b2:=a2(2); +raise info 'b2:%',b2; +end; +end pck1; +/ +declare +var1 pck1.tp_1:=(2,'a',3); +var2 pck1.tp_2:=array[1,3]; +var varchar2; +begin +pck1.p1(var1,var); +raise info 'var:%', var; +end; +/ +INFO: b:2 +CONTEXT: SQL statement "CALL pck1.p1(var1,var)" +PL/pgSQL function inline_code_block line 6 at SQL statement +INFO: var:2 +drop package if exists pck1; +NOTICE: drop cascades to 2 other objects +--?.* +drop cascades to function plpgsql_inout.p1(integer[]) +-- 2. +drop package if exists pck1; +NOTICE: package pck1() does not exist, skipping +create or replace package pck1 is +type tp_1 is record(v01 number, v03 varchar2, v02 number); +type tp_2 is record(v01 tp_1, v03 varchar2, v02 number); +procedure p1(a tp_1,b out int); +procedure p1(a2 in tp_2,b2 out int); +end pck1; +/ +create or replace package body pck1 is +procedure p1(a tp_1,b out int) is +begin +b:=a.v02; +raise info 'b:%',b; +end; +procedure p1(a2 in tp_2,b2 out int) is +begin +b2:=a2.v01.v01; +raise info 'b2:%',b2; +end; +end pck1; +/ +declare +var1 pck1.tp_1:=(1,'bb',3); +var2 pck1.tp_2:=((2,'aa',4),'c',5); +var int; +varr int; +begin +pck1.p1(var1,var); +pck1.p1(var2,varr); +raise info 'var:%',var; +end; +/ +INFO: b:3 +CONTEXT: SQL statement "CALL pck1.p1(var1,var)" +PL/pgSQL function inline_code_block line 7 at SQL statement +INFO: b2:2 +CONTEXT: SQL statement "CALL pck1.p1(var2,varr)" +PL/pgSQL function inline_code_block line 8 at SQL statement +INFO: var:3 +drop package if exists pck1; +NOTICE: drop cascades to 2 other objects +--?.* +--?.* +--3. +drop table if exists tb_test; +create table tb_test(c1 int, c2 varchar2); +drop package if exists pck1; +NOTICE: package pck1() does not exist, skipping +create or replace package pck1 is +type tp_1 is record(v01 number, v03 varchar2, v02 number); +procedure p1(in a tb_test%rowtype,out b tp_1); +procedure p1(out a tp_1,in b tb_test%rowtype); +end pck1; +/ +create or replace package body pck1 is +procedure p1(in a tb_test%rowtype,out b tp_1) is +begin +b.v01:=a.c1; +b.v03:=a.c2; +end; +procedure p1(out a tp_1,in b tb_test%rowtype) is +begin +a.v01:=b.c1+1; +a.v03:=b.c2; +end; +end pck1; +/ +declare +var1 pck1.tp_1; +var2 tb_test%rowtype:=(1,'a'); +var3 pck1.tp_1; +begin +pck1.p1(a=>var2,b=>var1); +raise info 'var1:%',var1; +pck1.p1(a=>var3,b=>var2); +raise info 'var3:%',var3; +end; +/ +INFO: var1:(1,a,) +INFO: var3:(2,a,) +drop package pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_inout.p1(tb_test) +drop cascades to function plpgsql_inout.p1(tb_test) +drop table tb_test; +-- clean +drop schema if exists plpgsql_inout cascade; +NOTICE: drop cascades to 30 other objects +DETAIL: drop cascades to function proc2() +drop cascades to function proc4() +drop cascades to function proc5(integer[]) +drop cascades to function proc6() +drop cascades to function proc7(integer[],integer) +drop cascades to function proc8() +drop cascades to function proc9() +drop cascades to function proc10() +drop cascades to type info +drop cascades to function proc20(info,integer) +drop cascades to function proc21() +drop cascades to function proc11() +drop cascades to function proc12() +drop cascades to type o1 +drop cascades to type o2 +drop cascades to type o3 +drop cascades to function proc13() +drop cascades to function proc14() +drop cascades to type customer +drop cascades to table customers +drop cascades to function proc15() +drop cascades to function proc16(character varying) +drop cascades to function proc17() +drop cascades to type t +drop cascades to function proc3() +drop cascades to function proc1() +drop cascades to type _int4[] +drop cascades to function p1(integer) +drop cascades to function p2() +drop cascades to function fun1() diff --git a/src/test/regress/expected/plpgsql_insert_record.out b/src/test/regress/expected/plpgsql_insert_record.out new file mode 100644 index 000000000..32b3147c3 --- /dev/null +++ b/src/test/regress/expected/plpgsql_insert_record.out @@ -0,0 +1,267 @@ +-- test insert into table values record +-- check compatibility -- +show sql_compatibility; -- expect A -- + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists plpgsql_table; +NOTICE: schema "plpgsql_table" does not exist, skipping +create schema plpgsql_table; +set current_schema = plpgsql_table; +set behavior_compat_options=''; +create table record_cursor_tbl(result varchar2(10), mod number); +insert into record_cursor_tbl values('a',2); +create or replace procedure record_cursor_p1 +as +begin + for rec in (select a.mod || a.result, a.* from record_cursor_tbl a) loop + insert into record_cursor_tbl values(rec); + null; + end loop; +end; +/ +call record_cursor_p1(); + record_cursor_p1 +------------------ + +(1 row) + +drop procedure record_cursor_p1; +drop table record_cursor_tbl; + +set behavior_compat_options='allow_procedure_compile_check'; +create table plpgsql_table.insert_table(a int, b int); +create table plpgsql_table.insert_table2(a int, b int); +create type plpgsql_table.ComType as (a int, b int); +-- normal insert record type. +create or replace function testInsertRecord() RETURNS int as $$ +declare +TYPE RR1 is record(a int, b int); +r RR1; +r1 ComType; +TYPE AA1 is varray(100) of RR1; +TYPE AA2 is varray(100) of ComType; +TYPE TT1 is table of RR1; +TYPE TT2 is table of ComType; +a1 AA1; +a2 AA2; +t1 TT1; +t2 TT2; +begin +r = (1,1); +r1 = (1,1); +insert into insert_table values r; +insert into insert_table values r1; +insert into insert_table values(r.a, r.b); + +a1[0] = (2,2); +a1[1] = (3,3); +insert into insert_table values a1[0]; +insert into insert_table values a1(1); + +a2[0] = (4,4); +a2[1] = (5,5); +insert into insert_table values a2[0]; +insert into insert_table values a2(1); + +t1(0) = (6,6); +t1(1) = (7,7); +insert into insert_table values t1[0]; +insert into insert_table values t1(1); + +t2(0) = (8,8); +t2(1) = (9,9); +insert into insert_table values t2[0]; +insert into insert_table values t2(1); + +return 1; +end; +$$ language plpgsql; +-- insert unsupport type variable. +create or replace function testInsertRecordError1() RETURNS int as $$ +declare +i int; +begin +i = 1; +insert into insert_table values i; +return 1; +end; +$$ language plpgsql; +ERROR: unsupported insert into table from non record type. at or near "i" +LINE 6: insert into insert_table values i; + ^ +create or replace function testInsertRecordError2() RETURNS int as $$ +declare +TYPE RR1 is record(a int, b int); +r RR1; +i int; +begin +r = (1,1); +i = 1; +insert into insert_table values(1,1) r; +return 1; +end; +$$ language plpgsql; +ERROR: syntax error at or near "r" +LINE 9: insert into insert_table values(1,1) r; + ^ +create or replace function testInsertRecordError3() RETURNS int as $$ +declare +TYPE RR1 is record(a int, b int); +r RR1; +r1 RR1;; +begin +r = (1,1); +r1 = (2,2); +insert into insert_table values r, r1; +return 1; +end; +$$ language plpgsql; +ERROR: syntax error at or near ";" +LINE 5: r1 RR1;; + ^ +create or replace function testInsertRecordError4() RETURNS int as $$ +declare +TYPE RR1 is record(a int, b int); +TYPE AA1 is varray(100) of RR1; +a1 AA1; +begin +a1[0] = (1,1); +a1[1] = (2,2); +insert into insert_table values a1; +return 1; +end; +$$ language plpgsql; +create or replace function testInsertRecordError5() RETURNS int as $$ +declare +TYPE RR1 is record(a int, b int); +TYPE AA1 is table of RR1; +a1 AA1; +begin +a1[0] = (1,1); +a1[1] = (2,2); +insert into insert_table values a1; +return 1; +end; +$$ language plpgsql; +create or replace function testInsertRecordError6() RETURNS int as $$ +declare +TYPE RR1 is record(a int, b int); +TYPE AA1 is table of RR1; +a1 AA1; +begin +a1[0] = (1,1); +a1[1] = (2,2); +insert into insert_table values a1[0], a1[1]; +return 1; +end; +$$ language plpgsql; +ERROR: syntax error at or near "[" +LINE 9: insert into insert_table values a1[0], a1[1]; + ^ +select testInsertRecord(); + testinsertrecord +------------------ + 1 +(1 row) + +select testInsertRecordError4(); +ERROR: missing FROM-clause entry for table "a1" +LINE 1: insert into insert_table values (a1.a,a1.b) + ^ +QUERY: insert into insert_table values (a1.a,a1.b) +CONTEXT: PL/pgSQL function testinsertrecorderror4() line 9 at SQL statement +referenced column: testinsertrecorderror4 +select testInsertRecordError5(); +ERROR: missing FROM-clause entry for table "a1" +LINE 1: insert into insert_table values (a1.a,a1.b) + ^ +QUERY: insert into insert_table values (a1.a,a1.b) +CONTEXT: PL/pgSQL function testinsertrecorderror5() line 9 at SQL statement +referenced column: testinsertrecorderror5 +create or replace function testForInsertRec() RETURNS int as $$ +declare +begin +for rec in (select a, b from insert_table) loop +insert into insert_table2 values rec; +end loop; +return 1; +end; +$$ language plpgsql; +create or replace function testForInsertRecError1() RETURNS int as $$ +declare +begin +for rec in (select a, b, 1 from insert_table) loop +insert into insert_table2 values rec; +end loop; +return 1; +end; +$$ language plpgsql; +ERROR: The 3th column in rec record variable does't have alias. +CONTEXT: compilation of PL/pgSQL function "testforinsertrecerror1" near line 4 +select testForInsertRec(); + testforinsertrec +------------------ + 1 +(1 row) + +select testForInsertRecError1(); +ERROR: function testforinsertrecerror1() does not exist +LINE 1: select testForInsertRecError1(); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +CONTEXT: referenced column: testforinsertrecerror1 +select * from insert_table; + a | b +---+--- + 1 | 1 + 1 | 1 + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 +(11 rows) + +select * from insert_table2; + a | b +---+--- + 1 | 1 + 1 | 1 + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 +(11 rows) + +reset behavior_compat_options; +drop table insert_table; +drop table insert_table2; +drop type ComType; +drop function testInsertRecord; +drop function testInsertRecordError1; +ERROR: function testinsertrecorderror1 does not exist +drop function testInsertRecordError2; +ERROR: function testinsertrecorderror2 does not exist +drop function testInsertRecordError3; +ERROR: function testinsertrecorderror3 does not exist +drop function testInsertRecordError4; +drop function testInsertRecordError5; +drop function testInsertRecordError6; +ERROR: function testinsertrecorderror6 does not exist +drop function testForInsertRec; +drop function testForInsertRecError1; +ERROR: function testforinsertrecerror1 does not exist +drop schema if exists plpgsql_table; diff --git a/src/test/regress/expected/plpgsql_multiset.out b/src/test/regress/expected/plpgsql_multiset.out new file mode 100644 index 000000000..8645abba7 --- /dev/null +++ b/src/test/regress/expected/plpgsql_multiset.out @@ -0,0 +1,601 @@ +-- check compatibility -- +show sql_compatibility; -- expect A -- + sql_compatibility +------------------- + A +(1 row) + +drop schema if exists plpgsql_multiset; +NOTICE: schema "plpgsql_multiset" does not exist, skipping +create schema plpgsql_multiset; +set current_schema = plpgsql_multiset; +create type m_type as ( + id integer, + name varchar, + addr text +); +create type m_type1 as ( + id integer[], + name varchar, + addr text +); +----------------------------------------------------- +------------------ multiset union ------------------- +----------------------------------------------------- +-- test index by error +declare + TYPE SalTabTyp is TABLE OF integer index by integer; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(0) = 1; + aa(2) = 2; + bb(0) = 2; + bb(1) = NULL; + aa = aa multiset union bb; + RAISE INFO '%', aa.count; +end; +/ +ERROR: multiset don't support index by table of type. at or near "multiset" +LINE 10: aa = aa multiset union bb; + ^ +QUERY: DECLARE TYPE SalTabTyp is TABLE OF integer index by integer; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(0) = 1; + aa(2) = 2; + bb(0) = 2; + bb(1) = NULL; + aa = aa multiset union bb; + RAISE INFO '%', aa.count; +end +-- test base type +declare + TYPE SalTabTyp is TABLE OF integer; + aa SalTabTyp; + bb SalTabTyp; + begin + aa(0) = 1; + aa(2) = 2; + bb(0) = 2; + bb(1) = NULL; + aa = aa multiset union bb; + RAISE INFO '%', aa; +end; +/ +INFO: {1,NULL,2,2,NULL} +-- test different type +declare + TYPE SalTabTyp is TABLE OF integer; + aa SalTabTyp; + TYPE SalTabTyp1 is TABLE OF varchar(10); + bb SalTabTyp1; + begin + aa(0) = 1; + aa(2) = 2; + bb(0) = 'aa'; + bb(1) = NULL; + aa = aa multiset union bb; + RAISE INFO '%', aa; +end; +/ +ERROR: aa and bb type are not match. +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 9 +declare + TYPE SalTabTyp is TABLE OF varchar(10); + aa SalTabTyp; + TYPE SalTabTyp1 is TABLE OF integer; + bb SalTabTyp1; + begin + bb(0) = 1; + bb(2) = 2; + aa(0) = 'aa'; + aa(1) = NULL; + aa = aa multiset union bb; + RAISE INFO '%', aa; +end; +/ +ERROR: aa and bb type are not match. +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 9 +declare +TYPE tabint is TABLE OF integer; +bint1 tabint ; +bint2 tabint ; +begin +bint1(2) = 2; +bint1(3) = null; +bint1(4) = 4; +bint2(-1) = null; +bint2(0) = 0; +bint2(1) = null; +bint2(5) = 1; +bint2 = bint1 multiset union bint2; +RAISE INFO '%,%,%,%', bint2,bint2.first,bint2.last,bint2.count; +RAISE INFO '1:%', bint2(1); +RAISE INFO '2:%', bint2(2); +RAISE INFO '3:%', bint2(3); +RAISE INFO '4:%', bint2(4); +RAISE INFO '5:%', bint2(5); +end; +/ +INFO: {2,NULL,4,NULL,0,NULL,NULL,NULL,NULL,1},1,10,10 +INFO: 1:2 +INFO: 2: +INFO: 3:4 +INFO: 4: +INFO: 5:0 +declare +TYPE tabint is TABLE OF integer; +bint1 tabint ; +bint2 tabint ; +begin +bint1(-1) = 2; +bint1(0) = 4; +bint2(4) = 3; +bint2(6) = 1; +bint2(5) = 0; +bint2 = bint1 multiset union bint2; +RAISE INFO '%,%,%,%', bint2,bint2.first,bint2.last,bint2.count; +RAISE INFO '1:%', bint2(1); +RAISE INFO '2:%', bint2(2); +RAISE INFO '3:%', bint2(3); +RAISE INFO '4:%', bint2(4); +RAISE INFO '5:%', bint2(5); +end; +/ +INFO: {2,4,3,0,1},1,5,5 +INFO: 1:2 +INFO: 2:4 +INFO: 3:3 +INFO: 4:0 +INFO: 5:1 +-- test left null right non-null +declare + TYPE SalTabTyp is TABLE OF integer; + aa SalTabTyp; + bb SalTabTyp; + begin + bb(0) = 1; + bb(2) = 2; + aa = aa multiset union bb; + RAISE INFO '%', aa; +end; +/ +INFO: [0:2]={1,NULL,2} +-- test varchar +declare + TYPE SalTabTyp is TABLE OF varchar(10); + aa SalTabTyp; + bb SalTabTyp; + begin + aa(1) = 'abcde'; + aa(2) = 'mgssq'; + bb(1) = 'zxcvb'; + bb(2) = 'abcde'; + aa = aa multiset union distinct bb; + RAISE INFO '%', aa; +end; +/ +INFO: {abcde,mgssq,zxcvb} +-- test int[] error +declare + TYPE SalTabTyp is TABLE OF int[]; + aa SalTabTyp; + bb SalTabTyp; + begin + aa = aa multiset union distinct bb; + RAISE INFO '%', aa; +end; +/ +ERROR: array or table type nested by table type is not supported yet. +DETAIL: Define table type "saltabtyp" of array or table type is not supported yet. +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 1 +-- distinct base type +declare + TYPE SalTabTyp is TABLE OF integer; + aa SalTabTyp; + bb SalTabTyp; + begin + aa(0) = 1; + aa(2) = 2; + bb(0) = 2; + bb(1) = NULL; + aa = aa multiset union distinct bb; + RAISE INFO '%', aa; +end; +/ +INFO: {1,NULL,2} +-- test array +declare + TYPE SalTabTyp is TABLE OF m_type1; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + aa(1) = (ARRAY[1,2], 'lisi', 'beijing'); + bb(1) = (ARRAY[1,2], 'lisi', 'beijing'); + bb(2) = (ARRAY[2,1], 'lisi', 'beijing'); + cc = aa multiset union distinct bb; + RAISE INFO '%', cc; +end; +/ +INFO: {"(\"{1,2}\",lisi,beijing)","(\"{2,1}\",lisi,beijing)"} +-- test left non-null right null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + aa(1) = (1, 'lisi', 'beijing'); + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (1, 'lisi', 'beijing'); + cc = aa multiset union distinct bb; + RAISE INFO '%', cc; + cc(2) = (3, 'lisi', 'beijing'); + RAISE INFO '%', cc; +end; +/ +INFO: {"(1,lisi,beijing)","(2,lisi,beijing)"} +INFO: {"(1,lisi,beijing)","(3,lisi,beijing)"} +-- test left null right non-null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + bb(1) = (1, 'lisi', 'beijing'); + bb(2) = (2, 'lisi', 'beijing'); + bb(3) = (1, 'lisi', 'beijing'); + cc = aa multiset union distinct bb; + RAISE INFO '%', cc; +end; +/ +INFO: {"(1,lisi,beijing)","(2,lisi,beijing)"} +-- test both null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + begin + aa = aa multiset union distinct bb; + RAISE INFO '%', aa; + aa(1) = (1, 'lisi', 'beijing'); + RAISE INFO '%', aa; +end; +/ +INFO: +INFO: {"(1,lisi,beijing)"} +-- test both non-null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + bb(0) = (3, 'lisi', 'beijing'); + bb(1) = (4, 'lisi', 'beijing'); + bb(2) = NULL; + aa = aa multiset union distinct bb; + RAISE INFO '%', aa; +end; +/ +INFO: {NULL,"(2,lisi,beijing)","(3,lisi,beijing)","(4,lisi,beijing)"} +----------------------------------------------------- +---------------- multiset intersect ----------------- +----------------------------------------------------- +declare + TYPE SalTabTyp is TABLE OF integer; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(0) = 1; + aa(2) = 2; + bb(0) = 2; + bb(1) = NULL; + aa = aa multiset intersect bb; + RAISE INFO '%', aa; +end; +/ +INFO: {NULL,2} +-- test left non-null right null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + aa(1) = (1, 'lisi', 'beijing'); + cc = aa multiset intersect bb; + RAISE INFO '%', cc; + cc(2) = (2, 'lisi', 'beijing'); + RAISE INFO '%', cc; +end; +/ +INFO: +INFO: [2:2]={"(2,lisi,beijing)"} +-- test left null right non-null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + bb(1) = (1, 'lisi', 'beijing'); + cc = aa multiset intersect bb; + RAISE INFO '%', cc; + cc(2) = (2, 'lisi', 'beijing'); + RAISE INFO '%', cc; +end; +/ +INFO: +INFO: [2:2]={"(2,lisi,beijing)"} +-- test both null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + begin + aa = aa multiset intersect bb; + RAISE INFO '%', aa; + aa(1) = (1, 'lisi', 'beijing'); + RAISE INFO '%', aa; +end; +/ +INFO: +INFO: {"(1,lisi,beijing)"} +-- test both non-null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + bb(0) = (3, 'lisi', 'beijing'); + bb(1) = (4, 'lisi', 'beijing'); + aa = aa multiset intersect bb; + RAISE INFO '%', aa; +end; +/ +INFO: {"(3,lisi,beijing)"} +-- test both non-null left 2 same value +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + aa(4) = (3, 'lisi', 'beijing'); + aa(5) = (3, 'lisi', 'beijing'); + aa(6) = (4, 'lisi', 'beijing'); + + bb(0) = (3, 'lisi', 'beijing'); + bb(2) = (3, 'lisi', 'beijing'); + bb(1) = (4, 'lisi', 'beijing'); + aa = aa multiset intersect bb; + RAISE INFO '%', aa; +end; +/ +INFO: {"(3,lisi,beijing)","(3,lisi,beijing)","(4,lisi,beijing)"} +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + aa(4) = (3, 'lisi', 'beijing'); + aa(6) = (4, 'lisi', 'beijing'); + + bb(0) = (3, 'lisi', 'beijing'); + bb(2) = (3, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + bb(1) = (4, 'lisi', 'beijing'); + aa = aa multiset intersect bb; + RAISE INFO '%', aa; +end; +/ +INFO: {"(3,lisi,beijing)","(3,lisi,beijing)","(4,lisi,beijing)"} +-- test both non-null right 2 same value +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + bb(0) = (3, 'lisi', 'beijing'); + bb(1) = (3, 'lisi', 'beijing'); + bb(2) = (4, 'lisi', 'beijing'); + aa = aa multiset intersect bb; + RAISE INFO '%', aa; +end; +/ +INFO: {"(3,lisi,beijing)"} +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + aa(4) = (3, 'lisi', 'beijing'); + aa(5) = NULL; + bb(0) = (3, 'lisi', 'beijing'); + bb(1) = (3, 'lisi', 'beijing'); + bb(2) = (4, 'lisi', 'beijing'); + bb(3) = NULL; + bb(4) = NULL; + aa = aa multiset intersect bb; + RAISE INFO '%', aa; +end; +/ +INFO: {NULL,NULL,"(3,lisi,beijing)","(3,lisi,beijing)"} +-- test multiset intersect distinct +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + aa(4) = (3, 'lisi', 'beijing'); + aa(5) = NULL; + bb(0) = (3, 'lisi', 'beijing'); + bb(1) = (3, 'lisi', 'beijing'); + bb(2) = (4, 'lisi', 'beijing'); + bb(3) = NULL; + bb(4) = NULL; + aa = aa multiset intersect distinct bb; + RAISE INFO '%', aa; +end; +/ +INFO: {NULL,"(3,lisi,beijing)"} +----------------------------------------------------- +---------------- multiset except -------------------- +----------------------------------------------------- +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + aa(4) = (3, 'lisi', 'beijing'); + aa(5) = NULL; + bb(0) = (3, 'lisi', 'beijing'); + bb(1) = (3, 'lisi', 'beijing'); + bb(2) = (4, 'lisi', 'beijing'); + bb(3) = NULL; + aa = aa multiset except bb; + RAISE INFO '%', aa; +end; +/ +INFO: {NULL,"(2,lisi,beijing)"} +-- test multiset except distinct +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + aa(4) = (3, 'lisi', 'beijing'); + aa(5) = NULL; + bb(0) = (3, 'lisi', 'beijing'); + bb(2) = (4, 'lisi', 'beijing'); + bb(3) = NULL; + aa = aa multiset except distinct bb; + RAISE INFO '%', aa; +end; +/ +INFO: {"(2,lisi,beijing)"} +-- test left non-null right null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + aa(1) = (1, 'lisi', 'beijing'); + aa(2) = (1, 'lisi', 'beijing'); + cc = aa multiset except bb; + RAISE INFO '%', cc; + cc(2) = (2, 'lisi', 'beijing'); + RAISE INFO '%', cc; +end; +/ +INFO: {"(1,lisi,beijing)","(1,lisi,beijing)"} +INFO: {"(1,lisi,beijing)","(2,lisi,beijing)"} +-- test left non-null right null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + aa(1) = (1, 'lisi', 'beijing'); + aa(2) = (1, 'lisi', 'beijing'); + cc = aa multiset except distinct bb; + RAISE INFO '%', cc; + cc(2) = (2, 'lisi', 'beijing'); + RAISE INFO '%', cc; +end; +/ +INFO: {"(1,lisi,beijing)"} +INFO: {"(1,lisi,beijing)","(2,lisi,beijing)"} +-- test left null right non-null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + bb(1) = (1, 'lisi', 'beijing'); + cc = aa multiset except distinct bb; + RAISE INFO '%', cc; + cc(2) = (2, 'lisi', 'beijing'); + RAISE INFO '%', cc; +end; +/ +INFO: +INFO: [2:2]={"(2,lisi,beijing)"} +-- test left null right non-null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + bb(1) = (1, 'lisi', 'beijing'); + cc = aa multiset except bb; + RAISE INFO '%', cc; + cc(2) = (2, 'lisi', 'beijing'); + RAISE INFO '%', cc; +end; +/ +INFO: +INFO: [2:2]={"(2,lisi,beijing)"} +-- test both null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + begin + aa = aa multiset except bb; + RAISE INFO '%', aa; + aa(1) = (1, 'lisi', 'beijing'); + RAISE INFO '%', aa; +end; +/ +INFO: +INFO: {"(1,lisi,beijing)"} +drop type m_type; +drop type m_type1; +drop schema if exists plpgsql_multiset cascade; diff --git a/src/test/regress/expected/plpgsql_nest_compile.out b/src/test/regress/expected/plpgsql_nest_compile.out new file mode 100644 index 000000000..14f920f4f --- /dev/null +++ b/src/test/regress/expected/plpgsql_nest_compile.out @@ -0,0 +1,209 @@ +-- test create type table of +-- check compatibility -- +show sql_compatibility; -- expect A -- + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists plpgsql_nest_compile; +NOTICE: schema "plpgsql_nest_compile" does not exist, skipping +create schema plpgsql_nest_compile; +set current_schema = plpgsql_nest_compile; +-- test insert into select array +create table tb1 (a varchar(10), b varchar(10), c varchar(10)); +create or replace procedure proc1() +as +declare +type arra is varray(10) of varchar; +a arra; +b arra; +begin +a(1) := 'a'; +a(2) := 'b'; +a(3) := 'c'; +a(4) := 'd'; +a(5) := 'e'; +a(6) := 'f'; +insert into tb1 select a(1),a(2),a(3); +insert into tb1 select a(4),a(5),a(6); +end; +/ +call proc1(); + proc1 +------- + +(1 row) + +select * from tb1; + a | b | c +---+---+--- + a | b | c + d | e | f +(2 rows) + +create or replace procedure proc2() +as +declare +type arra is table of varchar; +a arra; +b arra; +begin +a(1) := 'a1'; +a(2) := 'b1'; +a(3) := 'c1'; +a(4) := 'd1'; +a(5) := 'e1'; +a(6) := 'f1'; +insert into tb1 select a(1),a(2),a(3); +insert into tb1 select a(4),a(5),a(6); +end; +/ +call proc2(); + proc2 +------- + +(1 row) + +select * from tb1; + a | b | c +----+----+---- + a | b | c + d | e | f + a1 | b1 | c1 + d1 | e1 | f1 +(4 rows) + +create or replace package pck1 as +procedure proc1(c out varchar2); +end pck1; +/ +create or replace package body pck1 as +procedure proc1(c out varchar2) +as +declare +type arra is table of varchar; +a arra; +b arra; +begin +a(1) := 'a2'; +a(2) := 'b2'; +a(3) := 'c2'; +a(4) := 'd2'; +a(5) := 'e2'; +a(6) := 'f2'; +insert into tb1 select a(1),a(2),a(3); +insert into tb1 select a(4),a(5),a(6); +end; +end pck1; +/ +select * from pck1.proc1(); + c +--- + +(1 row) + +select * from tb1; + a | b | c +----+----+---- + a | b | c + d | e | f + a1 | b1 | c1 + d1 | e1 | f1 + a2 | b2 | c2 + d2 | e2 | f2 +(6 rows) + +drop package pck1; +NOTICE: drop cascades to function plpgsql_nest_compile.proc1() +-- test for loop +create table emp( +deptno smallint, +ename char(100), +salary int +); +create table emp_back( +deptno smallint, +ename char(100), +salary int +); +insert into emp values (10,'CLARK',7000),(10,'KING',8000),(10,'MILLER',12000),(20,'ADAMS',5000),(20,'FORD',4000); +create or replace PROCEDURE test_forloop_001() +As +begin +for data in delete from emp returning * loop +insert into emp_back values(data.deptno,data.ename,data.salary); +end loop; +end; +/ +call test_forloop_001(); + test_forloop_001 +------------------ + +(1 row) + +select * from emp; + deptno | ename | salary +--------+-------+-------- +(0 rows) + +select * from emp_back; + deptno | ename | salary +--------+------------------------------------------------------------------------------------------------------+-------- + 10 | CLARK | 7000 + 10 | KING | 8000 + 10 | MILLER | 12000 + 20 | ADAMS | 5000 + 20 | FORD | 4000 +(5 rows) + +create or replace package pack0 is + function f1(ss in int) return int; +end pack0; +/ +create or replace package body pack0 is +function f1(ss in int) return int as + va int; +begin + va := ss; + return va; +end; +end pack0; +/ +create or replace package pack01 is +procedure main(); +end pack01; +/ +create or replace package body pack01 is +xx number:=dbe_sql.register_context(); +yy1 int:=pack0.f1(1); +procedure main() is +yy int; +begin +yy :=pack0.f1(1); +end; +end pack01; +/ +create or replace package body pack01 is +xx number:=dbe_sql.register_context(); +yy1 int:=pack0.f1(1); +procedure main() is +yy int; +begin +yy :=pack0.f1(1); +end; +end pack01; +/ +drop package pack01; +NOTICE: drop cascades to function plpgsql_nest_compile.main() +drop package pack0; +NOTICE: drop cascades to function plpgsql_nest_compile.f1(integer) +drop schema if exists plpgsql_nest_compile cascade; +NOTICE: drop cascades to 6 other objects +DETAIL: drop cascades to table tb1 +drop cascades to function proc1() +drop cascades to function proc2() +drop cascades to table emp +drop cascades to table emp_back +drop cascades to function test_forloop_001() diff --git a/src/test/regress/expected/plpgsql_override_out.out b/src/test/regress/expected/plpgsql_override_out.out new file mode 100644 index 000000000..73edac4ee --- /dev/null +++ b/src/test/regress/expected/plpgsql_override_out.out @@ -0,0 +1,753 @@ +-- test plsql's out param override +-- check compatibility -- +show sql_compatibility; -- expect A -- + sql_compatibility +------------------- + A +(1 row) + +drop schema if exists plpgsql_override_out; +NOTICE: schema "plpgsql_override_out" does not exist, skipping +create schema plpgsql_override_out; +set current_schema = plpgsql_override_out; +set behavior_compat_options = 'proc_outparam_override'; +create or replace package pck1 is +procedure p1; +procedure p1(v1 in varchar2); +procedure p1(v1 in varchar2, v2 in varchar2); +procedure p1(v1 in varchar2, v2 in varchar2, v3 in varchar2); + +procedure p1(v1 out int); +procedure p1(v1 out int, v2 out int); +procedure p1(v1 out int, v2 out int, v3 out int); + +procedure p1(v1 in varchar2, v2 out int); +procedure p1(v1 in varchar2, v2 out int, v3 out int); + +procedure p1(v1 out int, v2 in varchar2); +procedure p1(v1 out int, v2 in varchar2, v3 in varchar2); + +procedure p1(v1 inout int, v2 inout int, v3 inout int, v4 inout int); +procedure p1(v1 inout int, v2 inout int, v3 inout varchar2, v4 inout varchar2); + +procedure p; +end pck1; +/ +create or replace package body pck1 is + +procedure p1 is +begin +raise notice 'p1'; +end; + +procedure p1(v1 in varchar2) is +begin +raise notice 'p1_1_varchar2'; +end; + +procedure p1(v1 in varchar2, v2 in varchar2) is +begin +raise notice 'p1_2_varchar2'; +end; + +procedure p1(v1 in varchar2, v2 in varchar2, v3 in varchar2) is +begin +raise notice 'p1_3_varchar2'; +end; + +procedure p1(v1 out int) is +begin +raise notice 'p1_1_int'; +end; + +procedure p1(v1 out int, v2 out int) is +begin +raise notice 'p1_2_int'; +end; + +procedure p1(v1 out int, v2 out int, v3 out int) is +begin +raise notice 'p1_3_int'; +end; + +procedure p1(v1 in varchar2, v2 out int) is +begin +raise notice 'p1_1_varchar_1_int'; +end; + +procedure p1(v1 in varchar2, v2 out int, v3 out int) is +begin +raise notice 'p1_1_varchar_2_int'; +end; + +procedure p1(v1 out int, v2 in varchar2) is +begin +raise notice 'p1_1_int_1_varchar'; +end; + +procedure p1(v1 out int, v2 in varchar2, v3 in varchar2) is +begin +raise notice 'p1_1_int_2_varchar'; +end; + +procedure p1(v1 inout int, v2 inout int, v3 inout int, v4 inout int) is +begin +raise notice 'p1_4_inout_4_int'; +end; + +procedure p1(v1 inout int, v2 inout int, v3 inout varchar2, v4 inout varchar2) is +begin +raise notice 'p1_4_inout_2_int_2_varchar'; +end; + +procedure p is +a1 varchar2(10); +a2 varchar2(10); +a3 varchar2(10); +a4 varchar2(10); +b1 int; +b2 int; +b3 int; +b4 int; +begin +a1 := 'a1'; +a2 := 'a2'; +a3 := 'a3'; +a4 := 'a4'; +b1 := 1; +b2 := 2; +b3 := 3; +b4 := 4; +p1(); +p1(a1); +p1(b1); +p1(a1, a2); +p1(a1, b1); +p1(b1, b2); +p1(b1, a1); +p1(a1, a2, a3); +p1(b1, b2, b3); +p1(a1, b1, b2); +p1(b1, a1, a2); +p1(b1, b2, b3, b4); +p1(b1, b2, a1, a2); +end; +end pck1; +/ +-- test procedure override with out args before in args +CREATE OR REPLACE PROCEDURE test_in_out_in(a in int, b inout int, c out int, d in varchar(200), e out varchar2(200)) +PACKAGE +AS +DECLARE +new_deptno NUMBER; +BEGIN +raise notice '%,%,%,%,%', a,b,c,d,e; +new_deptno :=10; +new_deptno := new_deptno+a+b; +END; +/ +call test_in_out_in(1,2,3,'a','b'); +NOTICE: 1,2,,a, + b | c | e +---+---+--- + 2 | | +(1 row) + +begin; +CURSOR temp_cursor NO SCROLL FOR SELECT test_in_out_in(1,2,3,'a','b'); +FETCH FORWARD 1 FROM temp_cursor; +NOTICE: 1,2,,a, +CONTEXT: referenced column: test_in_out_in + test_in_out_in +---------------- + (2,,) +(1 row) + +end; +SELECT * from test_in_out_in(1,2,3,'a','b'); +NOTICE: 1,2,,a, + b | c | e +---+---+--- + 2 | | +(1 row) + +set behavior_compat_options = ''; +call test_in_out_in(1,2,3,'a','b'); +NOTICE: 1,2,,a, + b | c | e +---+---+--- + 2 | | +(1 row) + +begin; +CURSOR temp_cursor NO SCROLL FOR SELECT test_in_out_in(1,2,'a'); +FETCH FORWARD 1 FROM temp_cursor; +NOTICE: 1,2,,a, +CONTEXT: referenced column: test_in_out_in + test_in_out_in +---------------- + (2,,) +(1 row) + +end; +SELECT * from test_in_out_in(1,2,'a'); +NOTICE: 1,2,,a, + b | c | e +---+---+--- + 2 | | +(1 row) + +---- +-- test in/out/inout args +---- +-- test procedure +CREATE OR REPLACE PROCEDURE iob_proc(a in int, b out int, c inout int) +AS +DECLARE +BEGIN +raise notice '%,%,%', a,b,c; +END; +/ +set behavior_compat_options = ''; +call iob_proc(1,2,3); -- ok +NOTICE: 1,,3 + b | c +---+--- + | 3 +(1 row) + +call iob_proc(1,2); +ERROR: function "iob_proc" with 2 parameters doesn't exist +select * from iob_proc(1,2,3); +ERROR: function iob_proc(integer, integer, integer) does not exist +LINE 1: select * from iob_proc(1,2,3); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select * from iob_proc(1,2); -- ok +NOTICE: 1,,2 + b | c +---+--- + | 2 +(1 row) + +set behavior_compat_options = 'proc_outparam_override'; +call iob_proc(1,2,3); -- ok +NOTICE: 1,,3 + b | c +---+--- + | 3 +(1 row) + +call iob_proc(1,2); +ERROR: function iob_proc(integer, integer) does not exist +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select * from iob_proc(1,2,3); -- ok +NOTICE: 1,,3 + b | c +---+--- + | 3 +(1 row) + +select * from iob_proc(1,2); +ERROR: function iob_proc(integer, integer) does not exist +LINE 1: select * from iob_proc(1,2); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +CREATE OR REPLACE PROCEDURE bio_proc(a inout int, b in int, c out int) +AS +DECLARE +BEGIN +raise notice '%,%,%', a,b,c; +END; +/ +set behavior_compat_options = ''; +call bio_proc(1,2,3); -- ok +NOTICE: 1,2, + a | c +---+--- + 1 | +(1 row) + +call bio_proc(1,2); +ERROR: function "bio_proc" with 2 parameters doesn't exist +select * from bio_proc(1,2,3); +ERROR: function bio_proc(integer, integer, integer) does not exist +LINE 1: select * from bio_proc(1,2,3); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select * from bio_proc(1,2); -- ok +NOTICE: 1,2, + a | c +---+--- + 1 | +(1 row) + +set behavior_compat_options = 'proc_outparam_override'; +call bio_proc(1,2,3); -- ok +NOTICE: 1,2, + a | c +---+--- + 1 | +(1 row) + +call bio_proc(1,2); +ERROR: function bio_proc(integer, integer) does not exist +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select * from bio_proc(1,2,3); -- ok +NOTICE: 1,2, + a | c +---+--- + 1 | +(1 row) + +select * from bio_proc(1,2); +ERROR: function bio_proc(integer, integer) does not exist +LINE 1: select * from bio_proc(1,2); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +CREATE OR REPLACE PROCEDURE obi_proc(a out int, b inout int, c in int) +AS +DECLARE +BEGIN +raise notice '%,%,%', a,b,c; +END; +/ +set behavior_compat_options = ''; +call obi_proc(1,2,3); -- ok +NOTICE: ,2,3 + a | b +---+--- + | 2 +(1 row) + +call obi_proc(1,2); +ERROR: function "obi_proc" with 2 parameters doesn't exist +select * from obi_proc(1,2,3); +ERROR: function obi_proc(integer, integer, integer) does not exist +LINE 1: select * from obi_proc(1,2,3); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select * from obi_proc(1,2); -- ok +NOTICE: ,1,2 + a | b +---+--- + | 1 +(1 row) + +set behavior_compat_options = 'proc_outparam_override'; +call obi_proc(1,2,3); -- ok +NOTICE: ,2,3 + a | b +---+--- + | 2 +(1 row) + +call obi_proc(1,2); +ERROR: function obi_proc(integer, integer) does not exist +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select * from obi_proc(1,2,3); -- ok +NOTICE: ,2,3 + a | b +---+--- + | 2 +(1 row) + +select * from obi_proc(1,2); +ERROR: function obi_proc(integer, integer) does not exist +LINE 1: select * from obi_proc(1,2); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +-- test function +CREATE OR REPLACE FUNCTION iob_func(a in int, b out int, c inout int) RETURNS SETOF RECORD +AS $$ +DECLARE +BEGIN +raise notice '%,%,%', a,b,c; +return; +END +$$ +LANGUAGE plpgsql; +set behavior_compat_options = ''; +call iob_func(1,2,3); --ok +NOTICE: 1,,3 + b | c +---+--- +(0 rows) + +call iob_func(1,2); +ERROR: function "iob_func" with 2 parameters doesn't exist +select * from iob_func(1,2,3); +ERROR: function iob_func(integer, integer, integer) does not exist +LINE 1: select * from iob_func(1,2,3); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select * from iob_func(1,2); -- ok +NOTICE: 1,,2 + b | c +---+--- +(0 rows) + +set behavior_compat_options = 'proc_outparam_override'; +call iob_func(1,2,3); +NOTICE: 1,,3 + iob_func | b | c +----------+---+--- +(0 rows) + +call iob_func(1,2); +ERROR: function iob_func(integer, integer) does not exist +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select * from iob_func(1,2,3); +NOTICE: 1,,3 + iob_func | b | c +----------+---+--- +(0 rows) + +select * from iob_func(1,2); -- ok +ERROR: function iob_func(integer, integer) does not exist +LINE 1: select * from iob_func(1,2); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +CREATE OR REPLACE FUNCTION bio_func(a inout int, b in int, c out int) RETURNS SETOF RECORD +AS $$ +DECLARE +BEGIN +raise notice '%,%,%', a,b,c; +return; +END +$$ +LANGUAGE plpgsql; +set behavior_compat_options = ''; +call bio_func(1,2,3); -- ok +NOTICE: 1,2, + a | c +---+--- +(0 rows) + +call bio_func(1,2); +ERROR: function "bio_func" with 2 parameters doesn't exist +select * from bio_func(1,2,3); +ERROR: function bio_func(integer, integer, integer) does not exist +LINE 1: select * from bio_func(1,2,3); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select * from bio_func(1,2); -- ok +NOTICE: 1,2, + a | c +---+--- +(0 rows) + +set behavior_compat_options = 'proc_outparam_override'; +call bio_func(1,2,3); +NOTICE: 1,2, + bio_func | a | c +----------+---+--- +(0 rows) + +call bio_func(1,2); +ERROR: function bio_func(integer, integer) does not exist +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select * from bio_func(1,2,3); +NOTICE: 1,2, + bio_func | a | c +----------+---+--- +(0 rows) + +select * from bio_func(1,2); -- ok +ERROR: function bio_func(integer, integer) does not exist +LINE 1: select * from bio_func(1,2); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +CREATE OR REPLACE FUNCTION obi_func(a out int, b inout int, c in int) RETURNS SETOF RECORD +AS $$ +DECLARE +BEGIN +raise notice '%,%,%', a,b,c; +return; +END +$$ +LANGUAGE plpgsql; +set behavior_compat_options = ''; +call obi_func(1,2,3); -- ok +NOTICE: ,2,3 + a | b +---+--- +(0 rows) + +call obi_func(1,2); +ERROR: function "obi_func" with 2 parameters doesn't exist +select * from obi_func(1,2,3); +ERROR: function obi_func(integer, integer, integer) does not exist +LINE 1: select * from obi_func(1,2,3); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select * from obi_func(1,2); -- ok +NOTICE: ,1,2 + a | b +---+--- +(0 rows) + +set behavior_compat_options = 'proc_outparam_override'; +call obi_func(1,2,3); +NOTICE: ,2,3 + obi_func | a | b +----------+---+--- +(0 rows) + +call obi_func(1,2); +ERROR: function obi_func(integer, integer) does not exist +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select * from obi_func(1,2,3); +NOTICE: ,2,3 + obi_func | a | b +----------+---+--- +(0 rows) + +select * from obi_func(1,2); -- ok +ERROR: function obi_func(integer, integer) does not exist +LINE 1: select * from obi_func(1,2); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop procedure test_in_out_in; +drop package pck1; +NOTICE: drop cascades to 14 other objects +DETAIL: drop cascades to function plpgsql_override_out.p1() +drop cascades to function plpgsql_override_out.p1(character varying) +drop cascades to function plpgsql_override_out.p1(character varying,character varying) +drop cascades to function plpgsql_override_out.p1(character varying,character varying,character varying) +drop cascades to function plpgsql_override_out.p1() +drop cascades to function plpgsql_override_out.p1() +drop cascades to function plpgsql_override_out.p1() +drop cascades to function plpgsql_override_out.p1(character varying) +drop cascades to function plpgsql_override_out.p1(character varying) +drop cascades to function plpgsql_override_out.p1(character varying) +drop cascades to function plpgsql_override_out.p1(character varying,character varying) +drop cascades to function plpgsql_override_out.p1(integer,integer,integer,integer) +drop cascades to function plpgsql_override_out.p1(integer,integer,character varying,character varying) +drop cascades to function plpgsql_override_out.p() +-- test override procedure with error param +set behavior_compat_options='proc_outparam_override'; +drop package if exists pck1; +NOTICE: package pck1() does not exist, skipping +create type o1_test as (v01 number, v03 varchar2, v02 number); +create or replace package pck1 is +procedure p1(a o1_test,b out varchar2); +procedure p1(a2 int[], b2 out varchar2); +end pck1; +/ +create or replace package body pck1 is +procedure p1(a o1_test,b out varchar2) is +begin +b:=a.v01; +raise info 'b:%',b; +end; +procedure p1(a2 int[], b2 out varchar2) is +begin +b2:=a2(2); +raise info 'b2:%',b2; +end; +end pck1; +/ +-- should error +declare +begin +pck1.p1((1,'b',2),'a'); +end; +/ +INFO: b:1 +CONTEXT: SQL statement "CALL pck1.p1((1,'b',2),'a')" +PL/pgSQL function inline_code_block line 3 at SQL statement +ERROR: query has no destination for result data +HINT: If you want to discard the results of a SELECT, use PERFORM instead. +CONTEXT: PL/pgSQL function inline_code_block line 3 at SQL statement +drop table if exists test_tb; +NOTICE: table "test_tb" does not exist, skipping +create table test_tb(c1 int,c2 varchar2); +insert into test_tb values(1,'a'),(2,'b'),(3,'c'); +drop package if exists pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_override_out.p1(o1_test) +drop cascades to function plpgsql_override_out.p1(integer[]) +create or replace package pck1 is +type tp1 is record(v01 int, v02 varchar2); +procedure p1(a inout tp1,b varchar2); +end pck1; +/ +create or replace package body pck1 is +procedure p1(a inout tp1,b varchar2) is +begin +select * into a from test_tb where c2=b; +end; +end pck1; +/ +declare +var pck1.tp1; +begin +perform pck1.p1(var,'a'); +end; +/ +ERROR: not support perform when behavior_compat_options="proc_outparam_override" +LINE 4: perform pck1.p1(var,'a'); + ^ +QUERY: DECLARE +var pck1.tp1; +begin +perform pck1.p1(var,'a'); +end +set behavior_compat_options='proc_outparam_override'; +create or replace procedure p2(a int,b out int) is +begin +raise info 'a:%', a+1; +end; +/ +drop table if exists test_tb; +create table test_tb(c1 int,c2 varchar2); +insert into test_tb values(1,'a'),(2,'b'),(3,'c'); +drop package if exists pck1; +--?.* +create or replace package pck1 is +type tp1 is table of varchar2(1024) index by varchar2(4000); +procedure p1(out_var out tp1,in_var varchar2); +end pck1; +/ +create or replace package body pck1 is +procedure p1(out_var out tp1,in_var varchar2) is +begin +select c1 into out_var(in_var) from test_tb limit 1; +out_var('aa'):='aa'; +end; +end pck1; +/ +declare +var pck1.tp1; +begin +perform pck1.p1(var,'a');--不支持,报错 +end; +/ +ERROR: not support perform when behavior_compat_options="proc_outparam_override" +LINE 4: perform pck1.p1(var,'a');--不支持,报错 + ^ +QUERY: DECLARE +var pck1.tp1; +begin +perform pck1.p1(var,'a');--不支持,报错 +end +\df + List of functions + Schema | Name | Result data type | Argument data types | Type | fencedmode | propackage | prokind +----------------------+----------+------------------+-------------------------------------------+--------+------------+------------+--------- + plpgsql_override_out | bio_func | SETOF record | INOUT a integer, b integer, OUT c integer | normal | f | f | f + plpgsql_override_out | bio_proc | record | INOUT a integer, b integer, OUT c integer | normal | f | f | p + plpgsql_override_out | iob_func | SETOF record | a integer, OUT b integer, INOUT c integer | normal | f | f | f + plpgsql_override_out | iob_proc | record | a integer, OUT b integer, INOUT c integer | normal | f | f | p + plpgsql_override_out | obi_func | SETOF record | OUT a integer, INOUT b integer, c integer | normal | f | f | f + plpgsql_override_out | obi_proc | record | OUT a integer, INOUT b integer, c integer | normal | f | f | p + plpgsql_override_out | p2 | integer | a integer, OUT b integer | normal | f | f | p +(7 rows) + +drop package pck1; +--?.* +drop type o1_test; +set behavior_compat_options = ''; +create or replace procedure proc_test +as +work_date varchar2; +begin +work_date:='202208'; +end; +/ +call proc_test(); + proc_test +----------- + +(1 row) + +create or replace procedure proc_test +as +workZ varchar2; +begin +workZ:='202208'; +end; +/ +call proc_test(); + proc_test +----------- + +(1 row) + +create or replace procedure proc_test +as +read_1 varchar2; +begin +read_1:='202208'; +end; +/ +call proc_test(); + proc_test +----------- + +(1 row) + +create or replace procedure proc_test +as +transaction_1 varchar2; +begin +transaction_1:='202208'; +end; +/ +call proc_test(); + proc_test +----------- + +(1 row) + +create or replace procedure proc_test +as +isolation1 varchar2; +begin +isolation1:='202208'; +end; +/ +call proc_test(); + proc_test +----------- + +(1 row) + +create or replace procedure proc_test +as +deferrableZ varchar2; +begin +deferrableZ:='202208'; +end; +/ +call proc_test(); + proc_test +----------- + +(1 row) + +create or replace procedure proc_test +as +not_1 varchar2; +begin +not_1:='202208'; +end; +/ +call proc_test(); + proc_test +----------- + +(1 row) + +drop procedure proc_test; +drop schema if exists plpgsql_override_out cascade; +NOTICE: drop cascades to 8 other objects +DETAIL: drop cascades to function iob_proc(integer,integer) +drop cascades to function bio_proc(integer,integer) +drop cascades to function obi_proc(integer,integer) +drop cascades to function iob_func(integer,integer) +drop cascades to function bio_func(integer,integer) +drop cascades to function obi_func(integer,integer) +drop cascades to function p2(integer) +drop cascades to table test_tb diff --git a/src/test/regress/expected/plpgsql_package_param.out b/src/test/regress/expected/plpgsql_package_param.out new file mode 100644 index 000000000..4ed02a3cb --- /dev/null +++ b/src/test/regress/expected/plpgsql_package_param.out @@ -0,0 +1,23 @@ +create or replace package pkg1 as + function func_add_sql(a integer, b integer) return integer immutable; +end pkg1; +create or replace package body pkg1 as data1 integer; + function func_add_sql(a integer, b integer) return integer stable as + begin select a+b into data1; + return data1; + end; +end pkg1; +/ +ERROR: function declared in package specification and package body must be the same, function: func_add_sql +create or replace package pkg1 as + function func_add_sql(a integer, b integer) return integer immutable; +end pkg1; +create or replace package body pkg1 as data1 integer; + function func_add_sql(a integer, b integer) return integer immutable as + begin select a+b into data1; + return data1; + end; +end pkg1; +/ +drop package if exists pkg1; +NOTICE: drop cascades to function public.func_add_sql(integer,integer) diff --git a/src/test/regress/expected/plpgsql_package_type.out b/src/test/regress/expected/plpgsql_package_type.out new file mode 100644 index 000000000..07af4ff6d --- /dev/null +++ b/src/test/regress/expected/plpgsql_package_type.out @@ -0,0 +1,1262 @@ +-- FOR PL/pgSQL ARRAY of RECORD TYPE scenarios -- +-- check compatibility -- +show sql_compatibility; -- expect ORA -- + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists plpgsql_packagetype1; +NOTICE: schema "plpgsql_packagetype1" does not exist, skipping +create schema plpgsql_packagetype1; +drop schema if exists plpgsql_packagetype2; +NOTICE: schema "plpgsql_packagetype2" does not exist, skipping +create schema plpgsql_packagetype2; +-- initialize table and type-- +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- +-- test package type internal use +set search_path=plpgsql_packagetype1; +create or replace package p_test1 as + type t1 is record(c1 varchar2, c2 int); + type t2 is table of t1; + function f1(ss in t1) return t1; + function f2(ss in t2) return t2; + procedure p1(aa in t1,bb in t2,cc out t1,dd out t2); + procedure p2(aa in int); +end p_test1; +/ +create or replace package p_test1 as + type t1 is record(c1 varchar2, c2 int); + type t2 is table of t1; + function f1(ss in t1) return t1; + function f2(ss in t2) return t2; + procedure p1(aa in t1,bb in t2,cc out t1,dd out t2); + procedure p2(aa in int); +end p_test1; +/ +create or replace package body p_test1 as + vb char; + function f1(ss in t1) return t1 as + va t1; + begin + va := ss; + plpgsql_packagetype1.p_test1.vb := ''; + raise info '%',va; + return va; + end; + + function f2(ss in t2) return t2 as + vb t2; + begin + vb := ss; + return vb; + end; + + procedure p1(aa in t1,bb in t2,cc out t1,dd out t2) as + begin + cc := aa; + dd := bb; + raise info '% %', cc,dd; + end; + + procedure p2(aa in int) as + aa t1; + bb t2; + cc t1; + dd t2; + begin + aa := ('a',1); + bb := array[('b',2),('c',3)]; + p1(aa,bb,cc,dd); + end; + +end p_test1; +/ +create or replace package body p_test1 as + function f1(ss in t1) return t1 as + va t1; + begin + va := ss; + raise info '%',va; + return va; + end; + + function f2(ss in t2) return t2 as + vb t2; + begin + vb := ss; + return vb; + end; + + procedure p1(aa in t1,bb in t2,cc out t1,dd out t2) as + begin + cc := aa; + dd := bb; + raise info '% %', cc,dd; + end; + + procedure p2(aa in int) as + aa t1; + bb t2; + cc t1; + dd t2; + begin + aa := ('a',1); + bb := array[('b',2),('c',3)]; + p1(aa,bb,cc,dd); + end; + +end p_test1; +/ +select p_test1.f1(('a',3)); +INFO: (a,3) +CONTEXT: referenced column: f1 + f1 +------- + (a,3) +(1 row) + +select p_test1.f2(array[('a',1)::p_test1.t1,('b',2)::p_test1.t1,('c',4)::p_test1.t1]); + f2 +--------------------------- + {"(a,1)","(b,2)","(c,4)"} +(1 row) + +-- test package type used in another package +create or replace package p_test2 as + var1 p_test1.t1; + type t21 is record(c1 p_test1.t1); + type t22 is table of p_test1.t1; + function ff1(ss in p_test1.t1) return p_test1.t1; + procedure pp1(aa in p_test1.t1,bb in p_test1.t2,cc out p_test1.t1,dd out p_test1.t2); + procedure pp2(aa in int); +end p_test2; +/ +create or replace package body p_test2 as + function ff1(ss in p_test1.t1) return p_test1.t1 as + va p_test1.t1; + begin + va := ss; + raise info '%',va; + return va; + end; + + + procedure pp1(aa in p_test1.t1,bb in p_test1.t2,cc out p_test1.t1,dd out p_test1.t2) as + begin + cc := aa; + dd := bb; + raise info '% %', cc,dd; + end; + + procedure pp2(aa in int) as + aa p_test1.t1; + bb p_test1.t2; + cc p_test1.t1; + dd p_test1.t2; + begin + aa := ('a',1); + bb := array[('b',2),('c',3)]; + pp1(aa,bb,cc,dd); + end; + +end p_test2; +/ +select p_test2.ff1(('a',3)); +INFO: (a,3) +CONTEXT: referenced column: ff1 + ff1 +------- + (a,3) +(1 row) + +-- test package type used in another schema package +set search_path=plpgsql_packagetype2; +create or replace package p_test2 as + var1 plpgsql_packagetype1.p_test1.t1; + type t21 is record(c1 plpgsql_packagetype1.p_test1.t1); + type t22 is table of plpgsql_packagetype1.p_test1.t1; + function ff1(ss in plpgsql_packagetype1.p_test1.t1) return plpgsql_packagetype1.p_test1.t1; + procedure pp1(aa in plpgsql_packagetype1.p_test1.t1,bb in plpgsql_packagetype1.p_test1.t2,cc out plpgsql_packagetype1.p_test1.t1,dd out plpgsql_packagetype1.p_test1.t2); +end p_test2; +/ +create or replace package body p_test2 as + function ff1(ss in plpgsql_packagetype1.p_test1.t1) return plpgsql_packagetype1.p_test1.t1 as + va plpgsql_packagetype1.p_test1.t1; + begin + va := ss; + raise info '%',va; + return va; + end; + + + procedure pp1(aa in plpgsql_packagetype1.p_test1.t1,bb in plpgsql_packagetype1.p_test1.t2,cc out plpgsql_packagetype1.p_test1.t1,dd out plpgsql_packagetype1.p_test1.t2) as + begin + cc := aa; + dd := bb; + raise info '% %', cc,dd; + end; + + procedure pp2(aa in int) as + aa plpgsql_packagetype1.p_test1.t1; + bb plpgsql_packagetype1.p_test1.t2; + cc plpgsql_packagetype1.p_test1.t1; + dd plpgsql_packagetype1.p_test1.t2; + begin + aa := ('a',1); + bb := array[('b',2),('c',3)]; + pp1(aa,bb,cc,dd); + end; + +end p_test2; +/ +select p_test2.ff1(('a',3)); +INFO: (a,3) +CONTEXT: referenced column: ff1 + ff1 +------- + (a,3) +(1 row) + +drop package p_test2; +NOTICE: drop cascades to 3 other objects +--?.* +--?.* +drop cascades to function plpgsql_packagetype2.pp2(integer) +drop package plpgsql_packagetype1.p_test2; +NOTICE: drop cascades to 3 other objects +--?.* +--?.* +drop cascades to function plpgsql_packagetype1.pp2(integer) +drop package plpgsql_packagetype1.p_test1; +NOTICE: drop cascades to 4 other objects +--?.* +--?.* +--?.* +drop cascades to function plpgsql_packagetype1.p2(integer) +--test ref cursortype +create or replace package test_cur +IS +type ref_cur is ref cursor; +end test_cur; +/ +create or replace package body test_cur +IS +a int; +end test_cur; +/ +create or replace package test_cur2 +IS +procedure proc1(cur1 test_cur.ref_cur); +end test_cur2; +/ +create or replace package body test_cur2 +IS +procedure proc1(cur1 test_cur.ref_cur) +is +BEGIN +cur1.a.a:=2; +end; +end test_cur2; +/ +ERROR: "cur1.a.a" is not a known variable +LINE 3: cur1.a.a:=2; + ^ +QUERY: DECLARE +BEGIN +cur1.a.a:=2; +end +create or replace package test_cur3 +IS +procedure proc11(cur1 test_cur.ref_cur); +function func11() return test_cur.ref_cur; +end test_cur3; +/ +create or replace package body test_cur3 +IS +procedure proc11(cur1 test_cur.ref_cur) +is +BEGIN +cur1.a.a:=2; +end; +function func11() return test_cur.ref_cur +is +BEGIN +return 1; +end; +end test_cur3; +/ +ERROR: "cur1.a.a" is not a known variable +LINE 3: cur1.a.a:=2; + ^ +QUERY: DECLARE +BEGIN +cur1.a.a:=2; +end +create or replace package test_cur4 +IS +procedure proc111(cur1 test_cur.ref_cur); +function func111 return test_cur.ref_cur; +end test_cur4; +/ +create or replace package body test_cur4 +IS +procedure proc111(cur1 test_cur.ref_cur) +is +BEGIN +cur1.a.a:=2; +cur.a.a:=2; +end; +function func111() return test_cur.ref_cur +is +BEGIN +return 1; +end; +end test_cur4; +/ +ERROR: "cur1.a.a" is not a known variable +LINE 3: cur1.a.a:=2; + ^ +QUERY: DECLARE +BEGIN +cur1.a.a:=2; +cur.a.a:=2; +end +drop package if exists test_cur; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to function plpgsql_packagetype2.proc1(refcursor) +drop cascades to function plpgsql_packagetype2.proc11(refcursor) +drop cascades to function plpgsql_packagetype2.func11() +drop cascades to function plpgsql_packagetype2.proc111(refcursor) +drop cascades to function plpgsql_packagetype2.func111() +drop package if exists test_cur2; +NOTICE: package test_cur2() does not exist, skipping +drop package if exists test_cur3; +NOTICE: package test_cur3() does not exist, skipping +drop package if exists test_cur4; +NOTICE: package test_cur4() does not exist, skipping +create or replace package pck1 is +type t1 is table of varchar2(10); +procedure pp11 (t1 in varchar2(10)); +procedure pp22 (t1 out varchar2(10)); +end pck1; +/ +create or replace package body pck1 is +procedure pp11 (t1 in varchar2(10)) is +begin +raise info '%', t1; +end; +procedure pp22 (t1 out varchar2(10)) is +begin +t1 := 'bb'; +raise info '%', t1; +end; +end pck1; +/ +call pck1.pp11('aa'); +INFO: aa + pp11 +------ + +(1 row) + +call pck1.pp22('cc'); +INFO: bb + t1 +---- + bb +(1 row) + +DROP PACKAGE pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_packagetype2.pp11(character varying) +drop cascades to function plpgsql_packagetype2.pp22() +--test package, type with same name +create or replace package plpgsql_packagetype1.pck2 is +va int; +end pck2; +/ +create type plpgsql_packagetype1.pck2 as (a int, b int); +DROP table if exists t1; +NOTICE: table "t1" does not exist, skipping +CREATE table t1 (a plpgsql_packagetype1.pck2); +insert into t1 values((1,2)); +select * from t1; + a +------- + (1,2) +(1 row) + +create or replace package pck3 is +va plpgsql_packagetype1.pck2; +end pck3; +/ +DROP package pck3; +DROP package plpgsql_packagetype1.pck2; +DROP table t1; +DROP type plpgsql_packagetype1.pck2; +--test synonym name same with procedure itself +create procedure proc1 as +begin +null; +end; +/ +create synonym proc1 for proc1; +call proc1(); + proc1 +------- + +(1 row) + +DROP procedure proc1(); +--test package type form record +create or replace package pck1 is +type t1 is record (a int, b int); +type ta is varray(10) of varchar2(100); +type tb is varray(10) of int; +type tc is varray(10) of t1; +type td is table of varchar2(100); +type te is table of int; +type tf is table of t1; +end pck1; +/ +create or replace package pck2 is +type tb is record (col1 pck1.ta, col2 pck1.tb, col3 pck1.tc, col4 pck1.td, col5 pck1.te, col6 pck1.tf); +end pck2; +/ +DROP PACKAGE pck2; +DROP PACKAGE pck1; +-- test not support nested type +-- 1. array nest array +create or replace package pck1 as +type t1 is varray(10) of int; +type t2 is varray(10) of t1; +end pck1; +/ +ERROR: array type nested by array is not supported yet. +DETAIL: Define array type "t2" of array is not supported yet. +CONTEXT: compilation of PL/pgSQL package near line 3 +create or replace function func1() return int as +type t1 is varray(10) of int; +type t2 is varray(10) of t1; +begin +return 0; +end; +/ +ERROR: array type nested by array is not supported yet. +DETAIL: Define array type "t2" of array is not supported yet. +CONTEXT: compilation of PL/pgSQL function "func1" near line 3 +-- 2. table nest array +create or replace package pck1 as +type t1 is varray(10) of int; +type t2 is table of t1; +end pck1; +/ +ERROR: array type nested by table type is not supported yet. +DETAIL: Define table type "t2" of array is not supported yet. +CONTEXT: compilation of PL/pgSQL package near line 3 +create or replace function func1() return int as +type t1 is varray(10) of int; +type t2 is table of t1; +begin +return 0; +end; +/ +ERROR: array type nested by table type is not supported yet. +DETAIL: Define table type "t2" of array is not supported yet. +CONTEXT: compilation of PL/pgSQL function "func1" near line 3 +-- 3. array nest table +create or replace package pck1 as +type t1 is table of int; +type t2 is varray(10) of t1; +end pck1; +/ +ERROR: table type nested by array is not supported yet. +DETAIL: Define array type "t2" of table type is not supported yet. +CONTEXT: compilation of PL/pgSQL package near line 3 +create or replace function func1() return int as +type t1 is table of int; +type t2 is varray(10) of t1; +begin +return 0; +end; +/ +ERROR: table type nested by array is not supported yet. +DETAIL: Define array type "t2" of table type is not supported yet. +CONTEXT: compilation of PL/pgSQL function "func1" near line 3 +-- 4. table nest table, will be supported soon +-- create or replace package pck1 as +-- type t1 is table of int; +-- type t2 is table of t1; +-- end pck1; +-- / +-- create or replace function func1() return int as +-- type t1 is table of int; +-- type t2 is table of t1; +-- begin +-- return 0; +-- end; +-- / +-- 5. record nest ref cursor +drop package if exists pck1; +NOTICE: package pck1() does not exist, skipping +create or replace package pck1 as +type t1 is ref cursor; +type t2 is record(c1 t1,c2 int); +end pck1; +/ +ERROR: ref cursor type nested by record is not supported yet. +DETAIL: Define a record type of ref cursor type is not supported yet. +CONTEXT: compilation of PL/pgSQL package near line 3 +create or replace function func1() return int as +type t1 is ref cursor; +type t2 is record(c1 t1,c2 int); +begin +return 0; +end; +/ +ERROR: ref cursor type nested by record is not supported yet. +DETAIL: Define a record type of ref cursor type is not supported yet. +CONTEXT: compilation of PL/pgSQL function "func1" near line 3 +-- 6. table nest ref cursor +create or replace package pck1 as +type t1 is ref cursor; +type t2 is table of t1; +end pck1; +/ +ERROR: ref cursor type nested by table type is not supported yet. +DETAIL: Define table type "t2" of ref cursor is not supported yet. +CONTEXT: compilation of PL/pgSQL package near line 3 +create or replace function func1() return int as +type t1 is ref cursor; +type t2 is table of t1; +begin +return 0; +end; +/ +ERROR: ref cursor type nested by table type is not supported yet. +DETAIL: Define table type "t2" of ref cursor is not supported yet. +CONTEXT: compilation of PL/pgSQL function "func1" near line 3 +-- 7. varray nest ref cursor +create or replace package pck1 as +type t1 is ref cursor; +type t2 is varray(10) of t1; +end pck1; +/ +ERROR: ref cursor type nested by array is not supported yet. +DETAIL: Define array type "t2" of ref cursor type is not supported yet. +CONTEXT: compilation of PL/pgSQL package near line 3 +create or replace function func1() return int as +type t1 is ref cursor; +type t2 is varray(10) of t1; +begin +return 0; +end; +/ +ERROR: ref cursor type nested by array is not supported yet. +DETAIL: Define array type "t2" of ref cursor type is not supported yet. +CONTEXT: compilation of PL/pgSQL function "func1" near line 3 +DROP package pck1; +ERROR: package pck1 does not exist +DROP function func1(); +ERROR: function func1 does not exist +-- 8.package nest +create or replace package pck1 as +type t1 is table of int; +type t2 is varray(10) of int; +type t3 is ref cursor; +end pck1; +/ +create or replace package pck2 as +type t1 is varray(10) of pck1.t2; +v1 t1; +function func1() return int; +end pck2; +/ +ERROR: array or table type nested by array type is not supported yet. +DETAIL: Define array type "t1" of array or table type is not supported yet. +CONTEXT: compilation of PL/pgSQL package near line 2 +-- 9.package nested +create or replace package pck2 as +type t1 is table of pck1.t1; +v1 t1; +function func1() return int; +end pck2; +/ +ERROR: array or table type nested by table type is not supported yet. +DETAIL: Define table type "t1" of array or table type is not supported yet. +CONTEXT: compilation of PL/pgSQL package near line 2 +-- 10.package nested +create or replace package pck2 as +type t1 is table of pck1.t3; +v1 t1; +function func1() return int; +end pck2; +/ +ERROR: ref cursor type nested by table type is not supported yet. +DETAIL: Define table type "t1" of ref cursor type is not supported yet. +CONTEXT: compilation of PL/pgSQL package near line 2 +-- 10.package nested +create or replace package pck2 as +type t1 is record(a pck1.t3); +v1 t1; +function func1() return int; +end pck2; +/ +ERROR: ref cursor type nested by record is not supported yet. +DETAIL: Define record type of ref cursor type is not supported yet. +CONTEXT: compilation of PL/pgSQL package near line 2 +DROP package pck2; +ERROR: package pck2 does not exist +DROP package pck1; +-- test type nested by private type +create or replace package pck1 as +type t1 is varray(10) of int; +type t2 is record (a int, b int); +end pck1; +/ +create or replace package body pck1 as +type t3 is varray(10) of int; +type t4 is record (a t3, b int); +procedure p2 (a pck1.t2) is +type t5 is varray(10) of t4; +type t6 is varray(10) of t2; +begin +null; +end; +end pck1; +/ +DROP PACKAGE pck1; +--?.* +-- test replace body, do not influence head +create or replace package pck1 as +type t1 is varray(10) of int; +type t2 is record (a int, b int); +end pck1; +/ +create or replace package pck2 as +procedure p1 (a pck1.t1); +end pck2; +/ +create or replace package body pck2 as +procedure p1 (a pck1.t1) is +begin +null; +end; +end pck2; +/ +create or replace package body pck1 as +procedure p1 (a int) is +begin +null; +end; +end pck1; +/ +call pck2.p1(array[1,2,4]); + p1 +---- + +(1 row) + +DROP PACKAGE pck2; +NOTICE: drop cascades to function plpgsql_packagetype2.p1(integer[]) +DROP PACKAGE pck1; +NOTICE: drop cascades to function plpgsql_packagetype2.p1(integer) +-- test package table of type +create or replace package pck1 is +type t1 is table of int index by varchar2(10); +end pck1; +/ +create or replace package pck2 is +va pck1.t1; +procedure p1; +end pck2; +/ +create or replace package body pck2 is +procedure p1 is +begin +va('aaa') := 1; +va('a') := 2; +raise info '%', va; +end; +end pck2; +/ +call pck2.p1(); +INFO: {1,2} + p1 +---- + +(1 row) + +DROP PACKAGE pck2; +NOTICE: drop cascades to function plpgsql_packagetype2.p1() +DROP PACKAGE pck1; +-- test package record type as procedure in out param +-- (1) in param +create or replace package pck1 as +type r1 is record (a int, b int); +procedure p1; +procedure p2(a in r1); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +va r1; +begin +va := (1,2); +p2(va); +va := (4,5); +p2(va); +end; +procedure p2 (a in r1) as +begin +raise info '%', a; +end; +end pck1; +/ +call pck1.p1(); +INFO: (1,2) +CONTEXT: SQL statement "CALL p2(va)" +PL/pgSQL function p1() line 5 at PERFORM +INFO: (4,5) +CONTEXT: SQL statement "CALL p2(va)" +PL/pgSQL function p1() line 7 at PERFORM + p1 +---- + +(1 row) + +DROP PACKAGE pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_packagetype2.p1() +--?.* +-- (2) out param +create or replace package pck1 as +type r1 is record (a int, b int); +procedure p1; +procedure p2(a out r1); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +va r1; +begin +raise info '%', va; +p2(va); +raise info '%', va; +end; +procedure p2 (a out r1) as +begin +a.a := 11; +a.b := 22; +end; +end pck1; +/ +call pck1.p1(); +INFO: (,) +INFO: (11,22) + p1 +---- + +(1 row) + +DROP PACKAGE pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_packagetype2.p1() +drop cascades to function plpgsql_packagetype2.p2() +-- (3) inout param +create or replace package pck1 as +type r1 is record (a int, b int); +procedure p1; +procedure p2(a inout r1); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +va r1; +begin +va := (11,22); +raise info '%', va; +p2(va); +raise info '%', va; +end; +procedure p2 (a inout r1) as +begin +a.a := a.a + 1; +a.b := a.b + 1; +end; +end pck1; +/ +call pck1.p1(); +INFO: (11,22) +INFO: (12,23) + p1 +---- + +(1 row) + +DROP PACKAGE pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_packagetype2.p1() +--?.* +-- test more column number +create or replace package pck1 as +type r1 is record (a int, b int); +procedure p1; +procedure p2(a out r1); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +type r2 is record (a int, b int, c int); +va r1; +vb r2; +begin +va := (1,2); +vb := (1,2,3); +raise info '%',va; +p2(vb); +raise info '%',va; +end; +procedure p2 (a out r1) as +begin +raise info '%', a; +a := (4,5); +end; +end pck1; +/ +call pck1.p1(); +INFO: (1,2) +INFO: (,) +CONTEXT: SQL statement "CALL p2(vb)" +PL/pgSQL function p1() line 9 at SQL statement +ERROR: mismatch between assignment and variable filed. +CONTEXT: PL/pgSQL function p1() line 9 at SQL statement +DROP PACKAGE pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_packagetype2.p1() +drop cascades to function plpgsql_packagetype2.p2() +-- test less column number +create or replace package pck1 as +type r1 is record (a int, b int); +procedure p1; +procedure p2(a out r1); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +type r2 is record (a int); +va r1; +vb r2; +begin +va := (1,2); +vb.a := 1; +raise info '%',va; +p2(vb); +raise info '%',va; +end; +procedure p2 (a out r1) as +begin +raise info '%', a; +a := (4,5); +end; +end pck1; +/ +call pck1.p1(); +INFO: (1,2) +INFO: (,) +CONTEXT: SQL statement "CALL p2(vb)" +PL/pgSQL function p1() line 9 at SQL statement +ERROR: mismatch between assignment and variable filed. +CONTEXT: PL/pgSQL function p1() line 9 at SQL statement +DROP PACKAGE pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_packagetype2.p1() +drop cascades to function plpgsql_packagetype2.p2() +-- test wrong column type +create or replace package pck1 as +type r1 is record (a int, b int); +procedure p1; +procedure p2(a out r1); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +type r2 is record (a int, b varchar2(10)); +va r1; +vb r2; +begin +va := (1,2); +vb := (1,'aa'); +raise info '%',va; +p2(vb); +raise info '%',va; +end; +procedure p2 (a out r1) as +begin +raise info '%', a; +a := (4,5); +end; +end pck1; +/ +call pck1.p1(); +INFO: (1,2) +--?.* +CONTEXT: SQL statement "CALL p2(vb)" +PL/pgSQL function p1() line 9 at SQL statement +INFO: (1,2) + p1 +---- + +(1 row) + +DROP PACKAGE pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_packagetype2.p1() +drop cascades to function plpgsql_packagetype2.p2() +-- test package type alter +create or replace package pck1 is +type r1 is record (a int, b int); +type r2 is table of int index by varchar(10); +type r3 is varray(10) of int; +end pck1; +/ +-- (1) grant or revoke +grant drop on type pck1.r1 to public; +ERROR: Not allowed to GRANT/REVOKE type "pck1.r1" +DETAIL: "pck1.r1" is a package or procedure type +grant alter on type pck1.r2 to public; +ERROR: Not allowed to GRANT/REVOKE type "pck1.r2" +DETAIL: "pck1.r2" is a package or procedure type +grant alter on type pck1.r3 to public; +ERROR: Not allowed to GRANT/REVOKE type "pck1.r3" +DETAIL: "pck1.r3" is a package or procedure type +revoke drop on type pck1.r1 from public; +ERROR: Not allowed to GRANT/REVOKE type "pck1.r1" +DETAIL: "pck1.r1" is a package or procedure type +revoke drop on type pck1.r2 from public; +ERROR: Not allowed to GRANT/REVOKE type "pck1.r2" +DETAIL: "pck1.r2" is a package or procedure type +revoke drop on type pck1.r3 from public; +ERROR: Not allowed to GRANT/REVOKE type "pck1.r3" +DETAIL: "pck1.r3" is a package or procedure type +-- (2) drop +DROP TYPE pck1.r1 cascade; +ERROR: Not allowed to drop type "pck1.r1" +DETAIL: "pck1.r1" is a package or procedure type +DROP TYPE pck1.r2 cascade; +ERROR: Not allowed to drop type "pck1.r2" +DETAIL: "pck1.r2" is a package or procedure type +DROP TYPE pck1.r3 cascade; +ERROR: Not allowed to drop type "pck1.r3" +DETAIL: "pck1.r3" is a package or procedure type +-- (3) alter: rename +ALTER TYPE pck1.r1 RENAME TO o1; +ERROR: Not allowed to alter type "pck1.r1" +DETAIL: "pck1.r1" is a package or procedure type +ALTER TYPE pck1.r2 RENAME TO o1; +ERROR: Not allowed to alter type "pck1.r2" +DETAIL: "pck1.r2" is a package or procedure type +ALTER TYPE pck1.r3 RENAME TO o1; +ERROR: Not allowed to alter type "pck1.r3" +DETAIL: "pck1.r3" is a package or procedure type +-- (4) alter: owner +ALTER TYPE pck1.r1 OWNER TO CURRENT_USER; +ERROR: Not allowed to alter type "pck1.r1" +DETAIL: "pck1.r1" is a package or procedure type +ALTER TYPE pck1.r2 OWNER TO CURRENT_USER; +ERROR: Not allowed to alter type "pck1.r2" +DETAIL: "pck1.r2" is a package or procedure type +ALTER TYPE pck1.r3 OWNER TO CURRENT_USER; +ERROR: Not allowed to alter type "pck1.r3" +DETAIL: "pck1.r3" is a package or procedure type +-- (5) alter: set schema +ALTER TYPE pck1.r1 SET SCHEMA public; +ERROR: Not allowed to alter type "pck1.r1" +DETAIL: "pck1.r1" is a package or procedure type +ALTER TYPE pck1.r2 SET SCHEMA public; +ERROR: Not allowed to alter type "pck1.r2" +DETAIL: "pck1.r2" is a package or procedure type +ALTER TYPE pck1.r3 SET SCHEMA public; +ERROR: Not allowed to alter type "pck1.r3" +DETAIL: "pck1.r3" is a package or procedure type +DROP PACKAGE pck1; +-- test package type as table of type column +create or replace package pck1 is +type r1 is record (a int, b int); +type r2 is table of int index by varchar(10); +type r3 is varray(10) of int; +end pck1; +/ +-- (1) as table +create table t1(a pck1.r1); +ERROR: type "pck1.r1" is not supported as column type +DETAIL: "pck1.r1" is a package or procedure type +create table t1(a pck1.r2); +ERROR: type "pck1.r2" is not supported as column type +DETAIL: "pck1.r2" is a package or procedure type +create table t1(a pck1.r3); +ERROR: type "pck1.r3" is not supported as column type +DETAIL: "pck1.r3" is a package or procedure type +-- (2) as type +create type o1 as (a pck1.r1); +ERROR: type "pck1.r1" is not supported as column type +DETAIL: "pck1.r1" is a package or procedure type +create type o1 as (a pck1.r2); +ERROR: type "pck1.r2" is not supported as column type +DETAIL: "pck1.r2" is a package or procedure type +create type o1 as (a pck1.r3); +ERROR: type "pck1.r3" is not supported as column type +DETAIL: "pck1.r3" is a package or procedure type +-- (3) in procedure +create or replace procedure p1 as +begin +create table t1(a pck1.r1); +end; +/ +call p1(); +ERROR: type "pck1.r1" is not supported as column type +DETAIL: "pck1.r1" is a package or procedure type +CONTEXT: SQL statement "create table t1(a pck1.r1)" +PL/pgSQL function p1() line 3 at SQL statement +create or replace procedure p1 as +begin +create table t1(a pck1.r2); +end; +/ +call p1(); +ERROR: type "pck1.r2" is not supported as column type +DETAIL: "pck1.r2" is a package or procedure type +CONTEXT: SQL statement "create table t1(a pck1.r2)" +PL/pgSQL function p1() line 3 at SQL statement +create or replace procedure p1 as +begin +create table t1(a pck1.r3); +end; +/ +call p1(); +ERROR: type "pck1.r3" is not supported as column type +DETAIL: "pck1.r3" is a package or procedure type +CONTEXT: SQL statement "create table t1(a pck1.r3)" +PL/pgSQL function p1() line 3 at SQL statement +create or replace procedure p1 as +begin +create type o1 as (a pck1.r1); +end; +/ +call p1(); +ERROR: type "pck1.r1" is not supported as column type +DETAIL: "pck1.r1" is a package or procedure type +CONTEXT: SQL statement "create type o1 as (a pck1.r1)" +PL/pgSQL function p1() line 3 at SQL statement +create or replace procedure p1 as +begin +create type o1 as (a pck1.r2); +end; +/ +call p1(); +ERROR: type "pck1.r2" is not supported as column type +DETAIL: "pck1.r2" is a package or procedure type +CONTEXT: SQL statement "create type o1 as (a pck1.r2)" +PL/pgSQL function p1() line 3 at SQL statement +create or replace procedure p1 as +begin +create type o1 as (a pck1.r3); +end; +/ +call p1(); +ERROR: type "pck1.r3" is not supported as column type +DETAIL: "pck1.r3" is a package or procedure type +CONTEXT: SQL statement "create type o1 as (a pck1.r3)" +PL/pgSQL function p1() line 3 at SQL statement +DROP procedure p1; +DROP package pck1; +-- test package-depended type clean +create or replace package pck4 is +va int; +end pck4; +/ +create or replace package body pck4 is +type test_pck4_recordtype is record (a int, b int); +type test_pck4_arraytype is varray(10) of int; +type test_pck4_tableoftype is table of int index by varchar2(10); +type test_pck4_refcursor is ref cursor; +end pck4; +/ +select count(*) from pg_type where typname like '%.test_pck4%'; + count +------- + 4 +(1 row) + +select count(*) from PG_SYNONYM where synname like '%.test_pck4%'; + count +------- + 1 +(1 row) + +create or replace package body pck4 is +vb int; +end pck4; +/ +select count(*) from pg_type where typname like '%.test_pck4%'; + count +------- + 0 +(1 row) + +select count(*) from PG_SYNONYM where synname like '%.test_pck4%'; + count +------- + 0 +(1 row) + +DROP PACKAGE pck4; +-- test table of record index by varchar +create or replace package pkg045 +is +type type000 is record(c1 number(7,3),c2 varchar2(30)); +type type001 is table of type000 index by varchar2(30); +function proc045_2(col1 int) return int; +end pkg045; +/ +create or replace package body pkg045 +is +function proc045_2(col1 int) return int +is +b2 pkg045.type001; +begin +b2('1').c1:=1.000; +b2('1').c2:='aaa'; +raise info '%,%', b2('1').c1, b2('1').c2; +return 1; +end; +end pkg045; +/ +call pkg045.proc045_2(1); +INFO: 1.000,aaa + proc045_2 +----------- + 1 +(1 row) + +drop package pkg045; +NOTICE: drop cascades to function plpgsql_packagetype2.proc045_2(integer) +-- test alter package/function owner +-- (a) alter package +reset search_path; +create user alt_user_1 PASSWORD 'gauss@123'; +create user alt_user_2 PASSWORD 'gauss@123'; +SET SESSION AUTHORIZATION alt_user_1 password 'gauss@123'; +drop package if exists pck1; +NOTICE: package pck1() does not exist, skipping +create or replace package pck1 as +type tt1 is record(c1 int,c2 int); +type tt2 is table of tt1; +type tt3 is varray(10) of tt1; +function func1()return int; +procedure proc1(); +end pck1; +/ +create or replace package body pck1 as +function func1()return int as +type tt5 is record(c1 tt1,c2 int); +type tt6 is table of pck1.tt1; +type tt7 is varray(10) of pck1.tt1; +type tt8 is ref cursor; +type tt9 is table of tt5; +begin +return 0; +end; +procedure proc1() as +type tta is record(c1 tt1,c2 int); +type ttb is table of pck1.tt1; +type ttc is varray(10) of pck1.tt1; +type ttd is ref cursor; +type tte is table of tta; +begin +null; +end; +end pck1; +/ +reset session AUTHORIZATION; +alter package alt_user_1.pck1 owner to alt_user_2; +------usename user2 +select usename from pg_user where usesysid = (select typowner from pg_type where typname like '%.tt1%' limit 1); + usename +------------ + alt_user_2 +(1 row) + +select usename from pg_user where usesysid = (select typowner from pg_type where typname like '%.tt2%' limit 1); + usename +------------ + alt_user_2 +(1 row) + +select usename from pg_user where usesysid = (select typowner from pg_type where typname like '%.tt3%' limit 1); + usename +------------ + alt_user_2 +(1 row) + +select usename from pg_user where usesysid = (select typowner from pg_type where typname like '%.tt5%' limit 1); + usename +------------ + alt_user_2 +(1 row) + +select usename from pg_user where usesysid = (select typowner from pg_type where typname like '%.tta%' limit 1); + usename +------------ + alt_user_2 +(1 row) + +drop package alt_user_1.pck1; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function alt_user_1.func1() +drop cascades to function alt_user_1.proc1() +-- (b) alter function +SET SESSION AUTHORIZATION alt_user_1 password 'gauss@123'; +create or replace function func1()return int as +type ttt5 is record(c1 int,c2 int); +type ttt6 is table of int; +type ttt7 is varray(10) of int; +type ttt8 is ref cursor; +type ttt9 is table of ttt5; +begin +return 0; +end; +/ +reset session AUTHORIZATION; +alter function alt_user_1.func1() owner to alt_user_2; +select usename from pg_user where usesysid = (select typowner from pg_type where typname like '%.ttt5%' limit 1); + usename +------------ + alt_user_2 +(1 row) + +select usename from pg_user where usesysid = (select typowner from pg_type where typname like '%.ttt6%' limit 1); + usename +--------- +(0 rows) + +drop function alt_user_1.func1(); +drop user alt_user_1 cascade; +drop user alt_user_2 cascade; +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- +drop package p_test2; +ERROR: package p_test2 does not exist +drop package plpgsql_packagetype1.p_test2; +ERROR: package plpgsql_packagetype1.p_test2 does not exist +drop package plpgsql_packagetype1.p_test1; +ERROR: package plpgsql_packagetype1.p_test1 does not exist +-- clean up -- +drop schema if exists plpgsql_packagetype2 cascade; +NOTICE: drop cascades to synonym plpgsql_packagetype2.proc1 +drop schema if exists plpgsql_packagetype1 cascade; diff --git a/src/test/regress/expected/plpgsql_record_attrname.out b/src/test/regress/expected/plpgsql_record_attrname.out new file mode 100644 index 000000000..28df3ce20 --- /dev/null +++ b/src/test/regress/expected/plpgsql_record_attrname.out @@ -0,0 +1,106 @@ +-- FOR PL/pgSQL ARRAY of RECORD TYPE scenarios -- +-- check compatibility -- +show sql_compatibility; -- expect ORA -- + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists plpgsql_record; +NOTICE: schema "plpgsql_record" does not exist, skipping +create schema plpgsql_record; +set search_path=plpgsql_record; +-- initialize table and type-- +CREATE TABLE DCT_DATACLR_LOG(TYPE int NOT NULL ENABLE); +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- +-- test TYPE as a table col name +create or replace package p_test1 as + TYPE IN_CLEANLOG_TYPE IS RECORD(IN_TYPE DCT_DATACLR_LOG.TYPE%TYPE); + function f1(ss in IN_CLEANLOG_TYPE) return IN_CLEANLOG_TYPE; +end p_test1; +/ +create or replace package body p_test1 as + function f1(ss in IN_CLEANLOG_TYPE) return IN_CLEANLOG_TYPE as + va IN_CLEANLOG_TYPE; + begin + va := ss; + raise info '%',va; + return va; + end; +end p_test1; +/ +select p_test1.f1(ROW(3)); +INFO: (3) +CONTEXT: referenced column: f1 + f1 +----- + (3) +(1 row) + +-- test TYPE as a col name of record +create or replace package p_test1 as + TYPE IN_CLEANLOG_TYPE IS RECORD(TYPE int); + function f1(ss in IN_CLEANLOG_TYPE) return IN_CLEANLOG_TYPE; +end p_test1; +/ +create or replace package body p_test1 as + function f1(ss in IN_CLEANLOG_TYPE) return IN_CLEANLOG_TYPE as + va IN_CLEANLOG_TYPE; + begin + va := ss; + raise info '%',va; + return va; + end; +end p_test1; +/ +select p_test1.f1(ROW(3)); +INFO: (3) +CONTEXT: referenced column: f1 + f1 +----- + (3) +(1 row) + +--test RECORD col name of exist type and var name +create or replace package p_test2 is + type array_type is varray(10) of int; + type tab_type is table of int; + type r_type is record (a int, b int); + va array_type; + vb tab_type; + vc r_type; + type IN_CLEANLOG_TYPE is record (array_type int, tab_type int, r_type int, va int, vb int, vc int); + function f1(ss in IN_CLEANLOG_TYPE) return int; +end p_test2; +/ +create or replace package body p_test2 as + function f1(ss in IN_CLEANLOG_TYPE) return int as + vaa IN_CLEANLOG_TYPE; + begin + vaa := ss; + raise info '%',vaa; + return vaa.va; + end; +end p_test2; +/ +select p_test2.f1((1,2,3,4,5,6)); +INFO: (1,2,3,4,5,6) +CONTEXT: referenced column: f1 + f1 +---- + 4 +(1 row) + +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- +drop package p_test2; +--?.* +drop package p_test1; +--?.* +-- clean up -- +drop schema if exists plpgsql_record cascade; +NOTICE: drop cascades to table dct_dataclr_log diff --git a/src/test/regress/expected/plpgsql_savepoint.out b/src/test/regress/expected/plpgsql_savepoint.out new file mode 100644 index 000000000..9d1481e18 --- /dev/null +++ b/src/test/regress/expected/plpgsql_savepoint.out @@ -0,0 +1,1438 @@ +/* +################################################################################ +# TESTCASE NAME : plpgsql_savepoint +# COMPONENT(S) : plpgsql savepoint +################################################################################ +*/ +CREATE TABLE pl_txn_t(tc1 INT, tc2 INT); +-- normal case 1 +CREATE OR REPLACE PROCEDURE sp_normal_1 IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT SAVE_A; + + INSERT INTO pl_txn_t VALUES(2, 2); + SAVEPOINT SAVE_B; + + INSERT INTO pl_txn_t VALUES(3, 3); + SAVEPOINT SAVE_C; + + INSERT INTO pl_txn_t VALUES(4, 4); + INSERT INTO pl_txn_t VALUES(5, 5); + ROLLBACK TO SAVEPOINT SAVE_C; + + INSERT INTO pl_txn_t VALUES(6, 6); + ROLLBACK TO SAVEPOINT SAVE_B; + + INSERT INTO pl_txn_t VALUES(2, 2); + ROLLBACK TO SAVEPOINT SAVE_A; + END; + / +SELECT sp_normal_1(); + sp_normal_1 +------------- + +(1 row) + +COMMIT; +WARNING: there is no transaction in progress +SELECT sp_normal_1(), sp_normal_1(); + sp_normal_1 | sp_normal_1 +-------------+------------- + | +(1 row) + +DROP PROCEDURE sp_normal_1; +-- normal case 2 +CREATE OR REPLACE PROCEDURE sp_normal_2 IS + BEGIN + SAVEPOINT + SAVE_A; + INSERT INTO pl_txn_t VALUES(1, 1); + ROLLBACK TO SAVEPOINT + SAVE_A; + SAVEPOINT B; + END; + / +SELECT sp_normal_2(); + sp_normal_2 +------------- + +(1 row) + +BEGIN; +SELECT sp_normal_2(); + sp_normal_2 +------------- + +(1 row) + +SELECT sp_normal_2(); -- 执行失败,暂不支持语句外部SAVEPOINT + sp_normal_2 +------------- + +(1 row) + +COMMIT; +DROP PROCEDURE sp_normal_2; +-- savepoint name as variable in PL +CREATE OR REPLACE PROCEDURE sp_name_variable IS + sp_name NVARCHAR2(100) := 'SAVE_A'; + BEGIN + SAVEPOINT sp_name; + ROLLBACK TO sp_name; + END; + / +CALL sp_name_variable(); + sp_name_variable +------------------ + +(1 row) + +CREATE OR REPLACE PROCEDURE sp_name_variable IS + sp_name NVARCHAR2(100) := 'SAVE_A'; + BEGIN + SAVEPOINT sp_name; + ROLLBACK TO SAVE_A; -- no such savepoint + END; + / +CALL sp_name_variable(); +ERROR: no such savepoint +CONTEXT: PL/pgSQL function sp_name_variable() line 4 at ROLLBACK TO SAVEPOINT +DROP PROCEDURE sp_name_variable; +-- length of savepoint name is too big. +CREATE OR REPLACE PROCEDURE sp_name_length IS + BEGIN + SAVEPOINT sp_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx; + ROLLBACK TO sp_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx; + END; + / +NOTICE: identifier "sp_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" will be truncated to "sp_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +NOTICE: identifier "sp_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" will be truncated to "sp_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +NOTICE: identifier "sp_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" will be truncated to "sp_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +CONTEXT: compilation of PL/pgSQL function "sp_name_length" near line 1 +NOTICE: identifier "sp_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" will be truncated to "sp_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +CONTEXT: compilation of PL/pgSQL function "sp_name_length" near line 2 +CALL sp_name_length(); + sp_name_length +---------------- + +(1 row) + +-- no savepoint outside statement +CREATE OR REPLACE PROCEDURE sp_no_outside IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + COMMIT; + ROLLBACK TO SAVEPOINT SAVE_A; + END; + / +CALL sp_no_outside(); +ERROR: no such savepoint +CONTEXT: PL/pgSQL function sp_no_outside() line 4 at ROLLBACK TO SAVEPOINT +BEGIN; +SAVEPOINT SAVE_A; +ROLLBACK TO SAVEPOINT SAVE_A; +SELECT sp_no_outside(); +ERROR: commit/rollback is not allowed in outer sub transaction block. +CONTEXT: PL/pgSQL function sp_no_outside() line 3 at COMMIT +referenced column: sp_no_outside +ROLLBACK; +BEGIN; +SAVEPOINT SAVE_A; +RELEASE SAVEPOINT SAVE_A; +CALL sp_no_outside(); +ERROR: no such savepoint +CONTEXT: PL/pgSQL function sp_no_outside() line 4 at ROLLBACK TO SAVEPOINT +ROLLBACK; +DROP PROCEDURE sp_no_outside; +-- savepoint + commit / rollback +CREATE OR REPLACE PROCEDURE sp_commit_rollback(p INT) IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT SAVE_A; + IF p%2 = 0 then + ROLLBACK; + ELSE + COMMIT; + END IF; + END; + / +SELECT sp_commit_rollback(0); + sp_commit_rollback +-------------------- + +(1 row) + +SELECT sp_commit_rollback(1); + sp_commit_rollback +-------------------- + +(1 row) + +CREATE OR REPLACE PROCEDURE sp_commit_rollback IS + BEGIN + SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(1, 1); + ROLLBACK TO save_a; + COMMIT; + END; + / +CALL sp_commit_rollback(); + sp_commit_rollback +-------------------- + +(1 row) + +DROP PROCEDURE sp_commit_rollback; +CREATE OR REPLACE PROCEDURE pl_commit IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + COMMIT; + END; + / +SELECT pl_commit(); + pl_commit +----------- + +(1 row) + +DROP PROCEDURE pl_commit; +CREATE OR REPLACE PROCEDURE commit_drop_sp IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT SAVE_1; + INSERT INTO pl_txn_t VALUES(2, 2); + SAVEPOINT SAVE_2; + COMMIT; + INSERT INTO pl_txn_t VALUES(4, 4); + ROLLBACK TO SAVEPOINT SAVE_1; --commit已删除了SAVE_1 + END; + / +SELECT commit_drop_sp(); -- no such savepoint +ERROR: no such savepoint +CONTEXT: PL/pgSQL function commit_drop_sp() line 8 at ROLLBACK TO SAVEPOINT +referenced column: commit_drop_sp +DROP PROCEDURE commit_drop_sp; +-- savepoint in cursor +CREATE OR REPLACE FUNCTION sp_inner RETURN INTEGER +AS + BEGIN + SAVEPOINT save_a; + COMMIT; + SAVEPOINT save_a; + RETURN 1; + END; + / +CREATE OR REPLACE PROCEDURE sp_in_cursor IS + CURSOR c1 FOR SELECT sp_inner() FROM pl_txn_t; + val INT; + BEGIN + SAVEPOINT save_a; + OPEN c1; + FETCH c1 INTO val; + CLOSE c1; + EXCEPTION + WHEN OTHERS THEN + RAISE NOTICE 'wrong 1'; + END; + / +SELECT sp_in_cursor(); +NOTICE: wrong 1 +CONTEXT: referenced column: sp_in_cursor + sp_in_cursor +-------------- + +(1 row) + +DROP PROCEDURE sp_in_cursor; +DROP PROCEDURE sp_inner; +CREATE OR REPLACE FUNCTION sp_inner RETURN INTEGER +AS + BEGIN + ROLLBACK TO save_axxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx; + RETURN 1; + END; + / +NOTICE: identifier "save_axxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" will be truncated to "save_axxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +NOTICE: identifier "save_axxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" will be truncated to "save_axxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +CONTEXT: compilation of PL/pgSQL function "sp_inner" near line 1 +CREATE OR REPLACE PROCEDURE sp_in_cursor is + CURSOR c1 FOR SELECT sp_inner() FROM pl_txn_t; + val INT; + BEGIN + SAVEPOINT save_a; + OPEN c1; + FETCH c1 INTO val; + CLOSE c1; + END; + / +CALL sp_in_cursor(); +ERROR: transaction statement in store procedure used as cursor is not supported +CONTEXT: PL/pgSQL function sp_inner() line 2 at ROLLBACK TO SAVEPOINT +referenced column: sp_inner +PL/pgSQL function sp_in_cursor() line 6 at FETCH +DROP PROCEDURE sp_in_cursor; +DROP PROCEDURE sp_inner; +-- savepoint in subroutine +CREATE OR REPLACE PROCEDURE sp_subroutine IS + BEGIN + SAVEPOINT save_0; + INSERT INTO pl_txn_t VALUES(1, 1); + ROLLBACK TO save_0; + SAVEPOINT save_2; + SAVEPOINT save_3; + END; + / +CREATE OR REPLACE PROCEDURE sp_in_subroutine IS + BEGIN + SAVEPOINT save_1; + sp_subroutine(); + INSERT INTO pl_txn_t VALUES(1, 1); + ROLLBACK TO save_1; + INSERT INTO pl_txn_t VALUES(2, 2); + END; + / +SELECT sp_in_subroutine(); + sp_in_subroutine +------------------ + +(1 row) + +SELECT sp_in_subroutine(); + sp_in_subroutine +------------------ + +(1 row) + +CREATE OR REPLACE PROCEDURE sp_in_subroutine IS + BEGIN + sp_subroutine(); + sp_subroutine(); + END; + / +SELECT sp_in_subroutine(); + sp_in_subroutine +------------------ + +(1 row) + +DROP PROCEDURE sp_in_subroutine; +DROP PROCEDURE sp_subroutine; +-- duplicate name +CREATE OR REPLACE PROCEDURE sp_duplicate_name IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT SAVE_A; + ROLLBACK TO SAVEPOINT SAVE_A; + SAVEPOINT SAVE_A; + ROLLBACK TO SAVEPOINT SAVE_A; + END; + / +SELECT sp_duplicate_name(); + sp_duplicate_name +------------------- + +(1 row) + +DROP PROCEDURE sp_duplicate_name; +-- savepoint in SPI executor context +CREATE OR REPLACE PROCEDURE pl_subroutine IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + ROLLBACK; -- 该行会销毁PopOverrideSearchPath,导致不匹配 + END; + / +CREATE OR REPLACE PROCEDURE sp_spi_rollback IS + BEGIN + SAVEPOINT save_1; + pl_subroutine(); + END; + / +SELECT sp_spi_rollback(); + sp_spi_rollback +----------------- + +(1 row) + +DROP PROCEDURE sp_spi_rollback; +DROP PROCEDURE pl_subroutine; +CREATE OR REPLACE PROCEDURE pl_subroutine IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + ROLLBACK TO save_1; -- 该行会销毁SavepointTest0的调用上下文 + END; + / +CREATE OR REPLACE PROCEDURE sp_spi_rollbackto IS + BEGIN + SAVEPOINT save_1; + --ROLLBACK; + pl_subroutine(); + INSERT INTO pl_txn_t VALUES(2, 2); + ROLLBACK TO save_1; + INSERT INTO pl_txn_t VALUES(3, 3); + END; + / +SELECT sp_spi_rollbackto(); + sp_spi_rollbackto +------------------- + +(1 row) + +SELECT sp_spi_rollbackto(); + sp_spi_rollbackto +------------------- + +(1 row) + +DROP PROCEDURE sp_spi_rollbackto; +DROP PROCEDURE pl_subroutine; +-- savepoint + subroutine's commit/rollback +CREATE OR REPLACE PROCEDURE pl_commit IS + BEGIN + COMMIT; -- snapshot destoryed when substransaction finishes. + END; + / +CREATE OR REPLACE PROCEDURE sp_inner_commit IS + BEGIN + SAVEPOINT SAVE_A0; + pl_commit(); + END; + / +SELECT sp_inner_commit(); + sp_inner_commit +----------------- + +(1 row) + +DROP PROCEDURE sp_inner_commit; +DROP PROCEDURE pl_commit; +CREATE OR REPLACE PROCEDURE pl_rollback IS + BEGIN + ROLLBACK; -- snapshot destoryed when substransaction finishes. + END; + / +CREATE OR REPLACE PROCEDURE sp_inner_rollback IS + BEGIN + SAVEPOINT SAVE_A0; + pl_rollback(); + END; + / +CALL sp_inner_rollback(); + sp_inner_rollback +------------------- + +(1 row) + +DROP PROCEDURE sp_inner_rollback; +DROP PROCEDURE pl_rollback; +-- savepoint + exception +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + COMMIT; + SAVEPOINT SAVE_A; + RAISE exc_1; + EXCEPTION + WHEN OTHERS THEN + RAISE NOTICE 'wrong 1'; + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + BEGIN + SavepointTest(); + EXCEPTION + WHEN OTHERS THEN + ROLLBACK TO SAVE_A; + RAISE NOTICE 'wrong 2'; + END; + / +SELECT SavepointTest(); +NOTICE: wrong 1 +CONTEXT: referenced column: savepointtest + savepointtest +--------------- + +(1 row) + +SELECT SavepointTest0(); +NOTICE: wrong 1 +CONTEXT: SQL statement "CALL savepointtest()" +PL/pgSQL function savepointtest0() line 2 at PERFORM +referenced column: savepointtest0 + savepointtest0 +---------------- + +(1 row) + +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT SAVE_A; + INSERT INTO pl_txn_t VALUES(2, 2); + ROLLBACK TO SAVEPOINT SAVE_B; + EXCEPTION + WHEN exc_1 THEN + ROLLBACK TO SAVEPOINT SAVE_A; + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + ROLLBACK TO SAVEPOINT SAVE_B; + INSERT INTO pl_txn_t VALUES(3,3); + END; + / +SELECT SavepointTest(); +NOTICE: wrong 2 +CONTEXT: referenced column: savepointtest +ERROR: no such savepoint +CONTEXT: PL/pgSQL function savepointtest() line 13 at ROLLBACK TO SAVEPOINT +referenced column: savepointtest +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT SAVE_A; + INSERT INTO pl_txn_t VALUES(2, 2); + RAISE exc_1; + EXCEPTION + WHEN exc_1 THEN + ROLLBACK TO SAVEPOINT SAVE_A; + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + ROLLBACK TO SAVEPOINT SAVE_B; + INSERT INTO pl_txn_t VALUES(3,3); + END; + / +SELECT SavepointTest(); +NOTICE: wrong 1 +CONTEXT: referenced column: savepointtest + savepointtest +--------------- + +(1 row) + +DROP PROCEDURE SavepointTest; +DROP PROCEDURE SavepointTest0; +-- savepoint + cursor hold +CREATE OR REPLACE PROCEDURE SavepointTest IS + CURSOR c1 IS SELECT tc1 FROM pl_txn_t; + val INT; + val1 INT; + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + INSERT INTO pl_txn_t VALUES(2,2); + OPEN c1; + SAVEPOINT save_a; + FETCH c1 INTO val; + ROLLBACK TO save_a; + FETCH c1 INTO val1; + CLOSE c1; + END; + / +SELECT SavepointTest(); + savepointtest +--------------- + +(1 row) + +CREATE OR REPLACE PROCEDURE SavepointTest IS + CURSOR c1 IS SELECT tc1 FROM pl_txn_t; + val INT; + val1 INT; + BEGIN + INSERT INTO pl_txn_t values(1,1); + INSERT INTO pl_txn_t values(2,2); + SAVEPOINT save_a; + OPEN c1; + FETCH c1 INTO val; + ROLLBACK to save_a; + FETCH c1 INTO val1; --fetch out of sequence + CLOSE c1; + END; + / +SELECT SavepointTest(); +ERROR: cursor "" does not exist in FETCH statement. +CONTEXT: PL/pgSQL function savepointtest() line 11 at FETCH +referenced column: savepointtest +CREATE OR REPLACE PROCEDURE SavepointTest IS + CURSOR c1 IS SELECT tc1 FROM pl_txn_t; + val INT; + val1 INT; + BEGIN + INSERT INTO pl_txn_t values(1,1); + INSERT INTO pl_txn_t values(2,2); + SAVEPOINT save_a; + OPEN c1; + FETCH c1 INTO val; + COMMIT; + FETCH c1 INTO val1; + CLOSE c1; + END; + / +SELECT SavepointTest(); + savepointtest +--------------- + +(1 row) + +DROP PROCEDURE SavepointTest; +-- spi connect +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + CURSOR c1 IS SELECT tc1 FROM pl_txn_t; + val INT; + val1 INT; + BEGIN + ROLLBACK TO SAVEPOINT SAVE_A0; + --INSERT INTO pl_txn_t VALUES(1, 1); + --INSERT INTO pl_txn_t VALUES(2, 2); + SAVEPOINT SAVE_A1; + OPEN c1; + FETCH c1 INTO val; + COMMIT; + FETCH c1 INTO val; + CLOSE c1; + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + BEGIN + SAVEPOINT SAVE_A0; + --INSERT INTO pl_txn_t VALUES(1, 1); + --INSERT INTO pl_txn_t VALUES(2, 2); + --SAVEPOINT SAVE_A1; + SavepointTest0(); + END; + / +SELECT SavepointTest(); + savepointtest +--------------- + +(1 row) + +DROP PROCEDURE SavepointTest0; +DROP PROCEDURE SavepointTest; +-- savepoint in exception, don't destory exception's subtransaction +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(2, 2); + COMMIT; + INSERT INTO pl_txn_t VALUES(3, 3); + SAVEPOINT save_b; + INSERT INTO pl_txn_t VALUES(4, 4); + EXCEPTION + WHEN exc_1 THEN + ROLLBACK TO SAVEPOINT save_a; + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + ROLLBACK TO SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(5, 5); + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT save_a; + SavepointTest0(); + ROLLBACK TO save_b; + END; + / +TRUNCATE pl_txn_t; +SELECT SavepointTest(); + savepointtest +--------------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +-- exception's subtransaction id changes. +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + exc_1 EXCEPTION; + BEGIN + -- exception's subtransaction id is 3 + INSERT INTO pl_txn_t VALUES(2, 2); + COMMIT; -- exception's subtransaction id changes to 2. + INSERT INTO pl_txn_t VALUES(3, 3); + EXCEPTION + WHEN exc_1 THEN + ROLLBACK TO SAVEPOINT SAVE_A; + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + ROLLBACK TO SAVEPOINT SAVE_A; + INSERT INTO pl_txn_t VALUES(5, 5); + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT save_a; -- subtransaction id = 2 + SavepointTest0(); + END; + / +TRUNCATE pl_txn_t; +SELECT SavepointTest(); + savepointtest +--------------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +-- automatic rollback to the last savepoint:save_b in exception +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + exc_1 EXCEPTION; + BEGIN + -- exception's subtransaction id is 3 + INSERT INTO pl_txn_t VALUES(2, 2); + COMMIT; -- destory save_a automatically + INSERT INTO pl_txn_t VALUES(3, 3); + SAVEPOINT save_b; + INSERT INTO pl_txn_t VALUES(4, 4); + RAISE exc_1; + EXCEPTION + -- auto rollback to save_b + WHEN exc_1 THEN + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + INSERT INTO pl_txn_t VALUES(5, 5); + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT save_a; + SavepointTest0(); + END; + / +TRUNCATE pl_txn_t; +SELECT SavepointTest(); +NOTICE: wrong 1 +CONTEXT: SQL statement "CALL savepointtest0()" +PL/pgSQL function savepointtest() line 5 at PERFORM +referenced column: savepointtest + savepointtest +--------------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +-- rollback to in exception +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + exc_1 EXCEPTION; + BEGIN + -- exception's subtransaction id is 3 + INSERT INTO pl_txn_t VALUES(2, 2); + COMMIT; -- destory save_a automatically + INSERT INTO pl_txn_t VALUES(3, 3); + SAVEPOINT save_b; + INSERT INTO pl_txn_t VALUES(4, 4); + SAVEPOINT save_c; + ROLLBACK TO save_none; -- no such savepoint + EXCEPTION + -- auto rollback to save_c + WHEN exc_1 THEN + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + ROLLBACK TO SAVEPOINT SAVE_b; + INSERT INTO pl_txn_t VALUES(5, 5); + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT save_a; + SavepointTest0(); + END; + / +TRUNCATE pl_txn_t; +SELECT SavepointTest(); +NOTICE: wrong 2 +CONTEXT: SQL statement "CALL savepointtest0()" +PL/pgSQL function savepointtest() line 5 at PERFORM +referenced column: savepointtest + savepointtest +--------------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 1 | 1 + 2 | 2 + 3 | 3 + 5 | 5 +(4 rows) + +-- destory SPI connect while abort subtransaction +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + exc_1 EXCEPTION; + var1 INT; + BEGIN + SAVEPOINT save_c; + ROLLBACK TO SAVEPOINT save_a; + SELECT sum(t1.tc1 + t2.tc2) INTO var1 FROM pl_txn_t t1, pl_txn_t t2 WHERE T1.TC2 + 1 = T2.TC2 + 4; + CREATE TABLE pl_txn_t(tc1 INT, tc2 INT); + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT save_a; + SAVEPOINT save_b; + SavepointTest0(); + END; + / +SELECT SavepointTest(); +ERROR: relation "pl_txn_t" already exists in schema "public" +DETAIL: creating new table with existing name in the same schema +CONTEXT: SQL statement "CREATE TABLE pl_txn_t(tc1 INT, tc2 INT)" +PL/pgSQL function savepointtest0() line 7 at SQL statement +SQL statement "CALL savepointtest0()" +PL/pgSQL function savepointtest() line 6 at PERFORM +referenced column: savepointtest +-- savepoint outside STP +create or replace procedure SavepointTest is + exc_1 exception; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT save_a; + ROLLBACK TO save_out; + SAVEPOINT save_b; + END; + / +BEGIN; +INSERT INTO pl_txn_t VALUES(0, 0); +SAVEPOINT save_out; +SELECT SavepointTest(); + savepointtest +--------------- + +(1 row) + +SELECT SavepointTest(); + savepointtest +--------------- + +(1 row) + +ROLLBACK TO save_b; +COMMIT; +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(2, 2); + INSERT INTO pl_txn_t VALUES(3, 3); + SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(4, 4); + RAISE exc_1; + EXCEPTION + WHEN exc_1 THEN + ROLLBACK TO SAVEPOINT save_out; + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + ROLLBACK TO SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(5, 5); + END; + / +BEGIN; +SAVEPOINT save_out; +SELECT SavepointTest(); +NOTICE: wrong 1 +CONTEXT: referenced column: savepointtest + savepointtest +--------------- + +(1 row) + +SELECT SavepointTest(); +NOTICE: wrong 1 +CONTEXT: referenced column: savepointtest + savepointtest +--------------- + +(1 row) + +ROLLBACK TO save_out; +END; +-- don't switch to top portal's resourceowner since it is invalid. +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(2, 2); + INSERT INTO pl_txn_t VALUES(3, 3); + ROLLBACK TO SAVEPOINT save_out; + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + SavepointTest0(); + INSERT INTO pl_txn_t VALUES(1, 1); + RAISE exc_1; + EXCEPTION + WHEN exc_1 THEN + INSERT INTO pl_txn_t VALUES(4, 4); + WHEN OTHERS THEN + INSERT INTO pl_txn_t VALUES(6, 6); + END; + / +TRUNCATE pl_txn_t; +BEGIN; +SAVEPOINT save_out; +SELECT SavepointTest(); + savepointtest +--------------- + +(1 row) + +SELECT * from pl_txn_t order by 1, 2; + tc1 | tc2 +-----+----- + 4 | 4 +(1 row) + +END; +-- exception's subtransaction is destoryed by rollbackiing to outside savepoint +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(2, 2); + INSERT INTO pl_txn_t VALUES(3, 3); + SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(4, 4); + RAISE exc_1; + EXCEPTION + WHEN exc_1 THEN + ROLLBACK TO SAVEPOINT save_out; + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + ROLLBACK TO SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(5, 5); + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + SavepointTest0(); + INSERT INTO pl_txn_t VALUES(1, 1); + RAISE exc_1; + EXCEPTION + WHEN exc_1 THEN + INSERT INTO pl_txn_t VALUES(4, 4); + WHEN OTHERS THEN + INSERT INTO pl_txn_t VALUES(6, 6); + END; + / +BEGIN; +SAVEPOINT save_out; +SELECT SavepointTest(); +NOTICE: wrong 1 +CONTEXT: SQL statement "CALL savepointtest0()" +PL/pgSQL function savepointtest() line 3 at PERFORM +referenced column: savepointtest + savepointtest +--------------- + +(1 row) + +SELECT SavepointTest(); +NOTICE: wrong 1 +CONTEXT: SQL statement "CALL savepointtest0()" +PL/pgSQL function savepointtest() line 3 at PERFORM +referenced column: savepointtest + savepointtest +--------------- + +(1 row) + +END; +DROP PROCEDURE SavepointTest0; +DROP PROCEDURE SavepointTest; +-- switch to stmt top portal memory context +CREATE OR REPLACE PROCEDURE SavepointTest IS + val VARCHAR(10) := '0'; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT save_a; + val := val || '1'; + ROLLBACK TO SAVEPOINT save_a; + val := val || '2'; + END; + / +SELECT SavepointTest(); + savepointtest +--------------- + +(1 row) + +SELECT SavepointTest(); + savepointtest +--------------- + +(1 row) + +DROP PROCEDURE SavepointTest; +-- don't support execute immedidate savepoint +CREATE OR REPLACE PROCEDURE SavepointTest IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT SAVE_A; + + INSERT INTO pl_txn_t VALUES(2, 2); + execute immediate 'rollback to ' || 'save_a'; + + INSERT INTO pl_txn_t VALUES(2, 2); + ROLLBACK TO SAVEPOINT SAVE_A; + END; + / +select SavepointTest(); +ERROR: cannot call transaction statements in EXECUTE IMMEDIATE statement. +CONTEXT: PL/pgSQL function savepointtest() line 6 at EXECUTE statement +referenced column: savepointtest +DROP PROCEDURE SavepointTest; +-- wrong during execut stage +CREATE OR REPLACE PROCEDURE sp_inner1 IS + BEGIN + SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(2,2); + INSERT INTO pl_txn_t VALUES(2,2); --wrong + END; + / +CREATE OR REPLACE PROCEDURE sp_test is + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + sp_inner1(); + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CREATE UNIQUE INDEX idx_unique_tc1_tc2 ON pl_txn_t(tc1, tc2); +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 1 | 1 + 2 | 2 +(2 rows) + +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 1 | 1 +(1 row) + +DROP INDEX idx_unique_tc1_tc2; +-- wrong during plan stage +CREATE OR REPLACE PROCEDURE sp_inner1 IS + BEGIN + SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(2,2); + INSERT INTO pl_txn_t VALUES(2,2,2); --wrong + END; + / +CREATE OR REPLACE PROCEDURE sp_test is + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + sp_inner1(); + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 1 | 1 + 2 | 2 +(2 rows) + +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 1 | 1 +(1 row) + +-- wrong during pl others +CREATE OR REPLACE PROCEDURE sp_inner1 IS + exc_1 EXCEPTION; + BEGIN + SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(2,2); + RAISE exc_1; + END; + / +CREATE OR REPLACE PROCEDURE sp_test is + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + sp_inner1(); + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 1 | 1 + 2 | 2 +(2 rows) + +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 1 | 1 +(1 row) + +DROP PROCEDURE sp_test; +DROP PROCEDURE sp_inner1; +-- don't rollback exception's subtxn +CREATE OR REPLACE PROCEDURE sp_test is + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + RAISE exc_1; + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 1 | 1 +(1 row) + +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- +(0 rows) + +DROP PROCEDURE sp_test; +-- release savepoint +CREATE OR REPLACE PROCEDURE sp_test is + exc_1 EXCEPTION; + BEGIN + SAVEPOINT s1; + INSERT INTO pl_txn_t VALUES(1,1); + RELEASE s1; + INSERT INTO pl_txn_t VALUES(2,2); + ROLLBACK TO s1; + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 1 | 1 + 2 | 2 +(2 rows) + +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- +(0 rows) + +-- rollback to savepoint before released one +CREATE OR REPLACE PROCEDURE sp_test is + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(0,0); + SAVEPOINT s1; + INSERT INTO pl_txn_t VALUES(1,1); + SAVEPOINT s2; + INSERT INTO pl_txn_t VALUES(2,2); + RELEASE s2; + INSERT INTO pl_txn_t VALUES(3,3); + ROLLBACK TO s1; + INSERT INTO pl_txn_t VALUES(4,4); + RAISE exc_1; + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 0 | 0 + 4 | 4 +(2 rows) + +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 0 | 0 +(1 row) + +DROP PROCEDURE sp_test; +-- wrong during plan stage without savepoint +CREATE OR REPLACE PROCEDURE sp_test is + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + -- cast wrong with hold some resource + INSERT INTO pl_txn_t VALUES(1,1,1); --wrong execute + UPDATE pl_txn_t SET tc2 = 'null'::numeric; --wrong no execute + INSERT INTO pl_txn_t VALUES(2,2); -- no execute + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 1 | 1 +(1 row) + +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- +(0 rows) + +-- wrong during execute stage without savepoint +CREATE OR REPLACE PROCEDURE sp_test is + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + -- cast wrong with hold some resource + UPDATE pl_txn_t SET tc2 = 0 WHERE tc1 / (tc2 - 1) = 1; + SELECT COUNT(1) FROM pl_txn_t WHERE tc1 / (tc2 - 1) = 1; + INSERT INTO pl_txn_t VALUES(2,2); -- no execute + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +CREATE OR REPLACE PROCEDURE sp_test1 is + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + -- cast wrong with hold some resource + SELECT COUNT(1) FROM pl_txn_t WHERE tc1 / (tc2 - 1) = 1; + INSERT INTO pl_txn_t VALUES(2,2); -- no execute + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 1 | 1 +(1 row) + +TRUNCATE TABLE pl_txn_t; +CALL sp_test1(); +INFO: wrong1 + sp_test1 +---------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- + 1 | 1 +(1 row) + +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +INFO: wrong1 + sp_test +--------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- +(0 rows) + +TRUNCATE TABLE pl_txn_t; +CALL sp_test1(); +INFO: wrong1 + sp_test1 +---------- + +(1 row) + +SELECT * FROM pl_txn_t ORDER BY 1, 2; + tc1 | tc2 +-----+----- +(0 rows) + +DROP PROCEDURE sp_test; +DROP PROCEDURE sp_test1; +DROP TABLE pl_txn_t; diff --git a/src/test/regress/expected/plpgsql_sql_with_proc_keyword.out b/src/test/regress/expected/plpgsql_sql_with_proc_keyword.out new file mode 100644 index 000000000..db01dc949 --- /dev/null +++ b/src/test/regress/expected/plpgsql_sql_with_proc_keyword.out @@ -0,0 +1,195 @@ +drop schema if exists plpgsql_table; +NOTICE: schema "plpgsql_table" does not exist, skipping +create schema plpgsql_table; +set current_schema = plpgsql_table; +create table test1(a varchar2(10),b varchar2(10)); +create or replace package keyword_pkg is +cursor c1 is select * from test1 where a=(case when b='1' then 1 else 0 end); +procedure p1(); +end keyword_pkg; +/ +create or replace package body keyword_pkg is +procedure p1() as +declare +rd record; +begin +open c1; +fetch c1 into rd; +end; +end keyword_pkg; +/ +call keyword_pkg.p1(); + p1 +---- + +(1 row) + +drop table test1; +drop package keyword_pkg; +NOTICE: drop cascades to function plpgsql_table.p1() +create or replace package emp_bonus is +var1 int:=1;--公有变量 +var2 int:=2; +procedure testpro1(var3 int);--公有存储过程,可以被外部调用 +------------------------------------------------- + +-----package用例测试 + +-----create package specification语法格式 + +--create [ or replace ] package [ schema ] package_name + +--[ invoker_rights_clause ] { is | as } item_list_1 end package_name; + +--package specification(包规格)声明了包内的公有变量、函数、异常等,可以 + +--被外部函数或者存储过程调用。在package specification中只能声明存储过 + +--程,函数,不能定义存储过程或者函数。 + +--package只支持集中式,无法在分布式中使用。 + +--•在package specification中声明过的函数或者存储过程,必须在package body中找到定义。 + +--•在实例化中,无法调用带有commit/rollback的存储过程。 + +--•不能在trigger中调用package函数。 + +--•不能在外部sql中直接使用package当中的变量。 + +--•不允许在package外部调用package的私有变量和存储过程。 + +--•不支持其它存储过程不支持的用法,例如,在function中不允许调用commit/rollback,则package的function中同样无法调用commit/rollback。 + +--•不支持schema与package同名。 + +--•只支持a风格的存储过程和函数定义。 + +--•不支持package内有同名变量,包括包内同名参数。 + +--•package的全局变量为session级,不同session之间package的变量不共享。 + +--•package中调用自治事务的函数,不允许使用公有变量,以及递归的使用公有变量的函数。 + +--•package中不支持声明ref cursor类型。 + +--•create package specification语法格式create [ or replace ] package [ schema ] package_name + +-- [ invoker_rights_clause ] { is | as } item_list_1 end package_name; + +-- + +--invoker_rights_clause可以被声明为authid definer或者authid invoker,分别为定义者权限和调用者权限。 + +--item_list_1可以为声明的变量或者存储过程以及函数。 + +-- + +--package specification(包规格)声明了包内的公有变量、函数、异常等,可以被外部函数或者存储过程调用。在package specification中只能声明存储过程,函数,不能定义存储过程或者函数。 + +-- + +--•create package body语法格式。create [ or replace ] package body [ schema ] package_name + +-- { is | as } declare_section [ initialize_section ] end package_name; + +-- + +--package body(包体内)定义了包的私有变量,函数等。如果变量或者函数没有在package specification中声明过,那么这个变量或者函数则为私有变量或者函数。 + +-- + +--package body也可以声明实例化部分,用来初始化package,详见示例。 + +------------------------------------------------- +end emp_bonus; +/ +create or replace package body emp_bonus is +var3 int:=3; +var4 int:=4; +procedure testpro1(var3 int) +is +begin +create table if not exists test1(col1 int); +insert into test1 values(var1); +insert into test1 values(var3); +------------------------------------------------- + +-----package用例测试 + +-----create package specification语法格式 + +--create [ or replace ] package [ schema ] package_name + +--[ invoker_rights_clause ] { is | as } item_list_1 end package_name; + +--package specification(包规格)声明了包内的公有变量、函数、异常等,可以 + +--被外部函数或者存储过程调用。在package specification中只能声明存储过 + +--程,函数,不能定义存储过程或者函数。 + +--package只支持集中式,无法在分布式中使用。 + +--•在package specification中声明过的函数或者存储过程,必须在package body中找到定义。 + +--•在实例化中,无法调用带有commit/rollback的存储过程。 + +--•不能在trigger中调用package函数。 + +--•不能在外部sql中直接使用package当中的变量。 + +--•不允许在package外部调用package的私有变量和存储过程。 + +--•不支持其它存储过程不支持的用法,例如,在function中不允许调用commit/rollback,则package的function中同样无法调用commit/rollback。 + +--•不支持schema与package同名。 + +--•只支持a风格的存储过程和函数定义。 + +--•不支持package内有同名变量,包括包内同名参数。 + +--•package的全局变量为session级,不同session之间package的变量不共享。 + +--•package中调用自治事务的函数,不允许使用公有变量,以及递归的使用公有变量的函数。 + +--•package中不支持声明ref cursor类型。 + +--•create package specification语法格式create [ or replace ] package [ schema ] package_name + +-- [ invoker_rights_clause ] { is | as } item_list_1 end package_name; + +-- + +--invoker_rights_clause可以被声明为authid definer或者authid invoker,分别为定义者权限和调用者权限。 + +--item_list_1可以为声明的变量或者存储过程以及函数。 + +-- + +--package specification(包规格)声明了包内的公有变量、函数、异常等,可以被外部函数或者存储过程调用。在package specification中只能声明存储过程,函数,不能定义存储过程或者函数。 + +-- + +--•create package body语法格式。create [ or replace ] package body [ schema ] package_name + +-- { is | as } declare_section [ initialize_section ] end package_name; + +-- + +--package body(包体内)定义了包的私有变量,函数等。如果变量或者函数没有在package specification中声明过,那么这个变量或者函数则为私有变量或者函数。 + +-- + +--package body也可以声明实例化部分,用来初始化package,详见示例。 + +------------------------------------------------- +end; +begin --实例化开始 +var4:=9; +testpro1(var4); +end emp_bonus; +/ +drop package if exists emp_bonus; +NOTICE: drop cascades to function plpgsql_table.testpro1(integer) +drop schema plpgsql_table; diff --git a/src/test/regress/expected/plpgsql_table_opengauss.out b/src/test/regress/expected/plpgsql_table_opengauss.out new file mode 100644 index 000000000..f1f98668e --- /dev/null +++ b/src/test/regress/expected/plpgsql_table_opengauss.out @@ -0,0 +1,972 @@ +-- test create type table of +-- check compatibility -- +-- create new schema -- +drop schema if exists plpgsql_table_opengauss; +NOTICE: schema "plpgsql_table_opengauss" does not exist, skipping +create schema plpgsql_table_opengauss; +set current_schema = plpgsql_table_opengauss; +--test inout param +CREATE TABLE INT8_TBL(q1 int8, q2 int8); +create view tt17v as select * from int8_tbl i where i in (values(i)); +select * from tt17v order by 1,2; + q1 | q2 +----+---- +(0 rows) + +create type s_type as ( + id integer, + name varchar, + addr text +); +create type typeA as table of s_type; +create type typeB as table of s_type.id%type; +NOTICE: type reference s_type.id%TYPE converted to integer +create type typeC as table of s_type.name%type; +NOTICE: type reference s_type.name%TYPE converted to character varying +create type typeD as table of varchar(100); +-- test table of nest table of error +create type typeF as table of typeD; +ERROR: table type does not support nested table. +-- don't support alter attr +alter type typeA ADD ATTRIBUTE a int; +ERROR: table type does not support alter. +-- test type nest table of +create type type1 as table of varchar(10); +create type type2 as (c1 type1); +declare + a type2; + begin + a.c1(1) = ('aaa'); + a.c1(2) = ('bbb'); + RAISE INFO 'a.c1: %' ,a.c1; +end; +/ +INFO: a.c1: {aaa,bbb} +CREATE TYPE type3 as (a varchar2(1000),b varchar2(1000)); +CREATE TYPE type4 AS TABLE OF type3; +CREATE TYPE type5 as (c1 varchar2(1000),c2 varchar2(1000), c3 type4); +declare + a5 type5; + begin + a5.c1 = 'aaa'; + a5.c3(1) = ('1','2'); + a5.c3(2) = ('11','21'); + RAISE INFO 'a.c1: %' ,a5.c3[1]; +end; +/ +INFO: a.c1: (1,2) +-- test record nest table of +create table tycod01(c1 int[],c2 int); +insert into tycod01 values(array[1],1); +create type tycod02 as(c1 int,c2 tycod01%rowtype); +create table tycod03(c1 int[],c2 tycod02,c3 tycod01); +insert into tycod03 values (array[3],(3,(array[3],3)),(array[3],3)); +create type tycode23 is table of tycod03.c3%type; +NOTICE: type reference tycod03.c3%TYPE converted to tycod01 +create or replace procedure recordnes23() +is +type tycode01 is table of varchar(20) index by varchar(20); +type tycode02 is record (c1 tycode01,c2 int,c3 tycode23); +tycode001 tycode02; +begin +tycode001.c1('aa'):=('22','33','44'); +tycode001.c1('bb'):=array['2222']; +tycode001.c2:=2222; +tycode001.c3(1):=(array[1],3); +raise info 'tycode001.c1 is %,tycode001.c2 is %,tycode001.c3 is %', tycode001.c1,tycode001.c2,tycode001.c3; +end; +/ +call recordnes23(); +INFO: tycode001.c1 is {"(22,33,44)","{2222}"},tycode001.c2 is 2222,tycode001.c3 is {"({1},3)"} + recordnes23 +------------- + +(1 row) + +-- test in paramter +create or replace procedure tableof_1(a typeA) +is + +begin + RAISE INFO 'a(1): %' ,a(1); + a(1) = (2, 'lisi', 'beijing'); + a(2) = (3, 'zahngwu', 'chengdu'); +end; +/ +create or replace procedure tableof_2() +is + a typeA; +begin + a(1) = (1, 'zhangsan', 'shanghai'); + RAISE INFO 'before call a(1): %' ,a(1); + perform tableof_1(a); + RAISE INFO 'after call a(2): %' ,a(2); +end; +/ +call tableof_2(); +INFO: before call a(1): (1,zhangsan,shanghai) +INFO: a(1): (1,zhangsan,shanghai) +CONTEXT: referenced column: tableof_1 +SQL statement "SELECT tableof_1(a)" +PL/pgSQL function tableof_2() line 5 at PERFORM +INFO: after call a(2): + tableof_2 +----------- + +(1 row) + +-- don't support create type = () +create or replace procedure tableof_3 + is + aa typeA = typeA(); + begin + RAISE INFO '%' ,aa; +end; +/ +call tableof_3(); +INFO: {} + tableof_3 +----------- + +(1 row) + +-- test return +create or replace function tableof_4() + return typeA as + a typeA; + begin + a(1) = (1, 'lisi', 'beijing'); + return a; +end; +/ +select tableof_4(); + tableof_4 +---------------------- + {"(1,lisi,beijing)"} +(1 row) + +create or replace function tableof_4() + return typeA as + a typeA; + begin + a(1) = (1, 'lisi', 'beijing'); + return a; +end; +/ +select tableof_4(); + tableof_4 +---------------------- + {"(1,lisi,beijing)"} +(1 row) + +create or replace function tableof_5() + return typeA as + a typeA; + b typeA; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ +select tableof_5(); +INFO: a:{"(1,lisi,beijing)"} +CONTEXT: referenced column: tableof_5 + tableof_5 +-------------------------------------------- + {"(1,lisi,beijing)","(2,zahngwu,chengdu)"} +(1 row) + +-- test cast +create or replace function tableof_6() + return typeC as + a typeA; + b typeC; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ +select tableof_6(); +INFO: a:{"(1,lisi,beijing)"} +CONTEXT: referenced column: tableof_6 + tableof_6 +-------------------------------------------- + {"(1,lisi,beijing)","(2,zahngwu,chengdu)"} +(1 row) + +--test return wrong type +create or replace function tableof_7() + return typeB as + a typeA; + b typeC; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ +select tableof_7(); +INFO: a:{"(1,lisi,beijing)"} +CONTEXT: referenced column: tableof_7 +ERROR: invalid input syntax for integer: "(1,lisi,beijing)" +CONTEXT: PL/pgSQL function tableof_7() while casting return value to function's return type +referenced column: tableof_7 +-- add one column from s_type +create type s_type_extend as ( + id integer, + name varchar, + addr text, + comment varchar +); +create type typeA_ext as table of s_type_extend; +create or replace function tableof_8() + return typeA_ext as + a typeA; + b typeA_ext; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu','good'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ +select tableof_8(); +ERROR: malformed record literal: "(1,lisi,beijing)" +DETAIL: Too few columns. +CONTEXT: PL/pgSQL function tableof_8() line 5 at assignment +referenced column: tableof_8 +-- test return index +create or replace function tableof_9() + return typeA as + a typeA; + begin + a(-1) = (1, 'lisi', 'beijing'); + a(2) = (2, 'zahngwu', 'chengdu'); + return a; +end; +/ +select tableof_9(); + tableof_9 +------------------------------------------------------------- + [-1:2]={"(1,lisi,beijing)",NULL,NULL,"(2,zahngwu,chengdu)"} +(1 row) + +create or replace procedure tableof_10() + as + a typeA; + begin + a = tableof_9(); + RAISE INFO 'a(-1):%' ,a(-1); + RAISE INFO 'a(0):%' ,a(0); + RAISE INFO 'a(2):%' ,a(2).id; +end; +/ +call tableof_10(); +INFO: a(-1):(1,lisi,beijing) +INFO: a(0): +INFO: a(2):2 + tableof_10 +------------ + +(1 row) + +create or replace procedure tableof_11() + as + a typeA; + begin + a = tableof_9(); + RAISE INFO 'a(-1):%' ,a(-1); +end; +/ +call tableof_11(); +INFO: a(-1):(1,lisi,beijing) + tableof_11 +------------ + +(1 row) + +-- test index by +create or replace procedure tableof_12 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by BINARY_INTEGER; + aa SalTabTyp; + begin + aa('aa') = 1; + aa('bb') = 2; + RAISE INFO '%' ,aa('aa'); + RAISE INFO '%' ,aa('bb'); +end; +/ +call tableof_12(); +ERROR: invalid input syntax for integer: "aa" +CONTEXT: PL/pgSQL function tableof_12() line 4 at assignment +create or replace procedure tableof_13 + is + TYPE SalTabTyp is TABLE OF integer index by varchar(10); + aa SalTabTyp; + begin + aa('aa') = 1; + aa('bb') = 2; + RAISE INFO '%' ,aa(0); + RAISE INFO '%' ,aa('bb'); +end; +/ +call tableof_13(); +INFO: +INFO: 2 + tableof_13 +------------ + +(1 row) + +create or replace procedure tableof_14 + is + TYPE SalTabTyp is TABLE OF integer index by varchar(10); + aa SalTabTyp; + b varchar(10); + begin + aa('a') = 1; + b = 'aa'; + aa(b) = 2; + RAISE INFO '%' ,aa('a'); + RAISE INFO '%' ,aa('aa'); + RAISE INFO '%' ,aa(b); +end; +/ +call tableof_14(); +INFO: 1 +INFO: 2 +INFO: 2 + tableof_14 +------------ + +(1 row) + + +create or replace procedure tableof_15 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by date; + aa SalTabTyp; + begin + +end; +/ +ERROR: unsupported table index type +CONTEXT: compilation of PL/pgSQL function "tableof_15" near line 1 +create or replace procedure tableof_15 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by text; + aa SalTabTyp; + begin + +end; +/ +ERROR: unsupported table index type +CONTEXT: compilation of PL/pgSQL function "tableof_15" near line 1 +-- test table = table +create or replace procedure tableof_16 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by BINARY_INTEGER; + aa SalTabTyp; + bb SalTabTyp; + begin + aa(-1) = 'b'; + aa(1) = 'a'; + RAISE INFO '%' ,aa(-1); + bb = aa; + RAISE INFO '%' ,bb(-1); + bb(8) = 'g'; + RAISE INFO '%' ,bb(8); + RAISE INFO '%' ,aa(8); + end; +/ +call tableof_16(); +INFO: b +INFO: b +INFO: g +INFO: + tableof_16 +------------ + +(1 row) + +-- test define +create or replace procedure tableof_17 + is + TYPE SalTabTyp is TABLE OF s_type%rowtype index by varchar(10); + aa SalTabTyp; + begin + aa('a') = (1, 'zhangsan', 'shanghai'); + aa('b') = (2, 'lisi', 'beijing'); + RAISE INFO '%' ,aa('a').id; + RAISE INFO '%' ,aa('b'); +end; +/ +call tableof_17(); +INFO: 1 +INFO: (2,lisi,beijing) + tableof_17 +------------ + +(1 row) + +create or replace procedure tableof_18 + is + TYPE SalTabTyp is TABLE OF s_type.id%type index by varchar(10); + aa SalTabTyp; + begin + aa('a') = 1; + aa('b') = 2; + RAISE INFO '%' ,aa('a'); + RAISE INFO '%' ,aa('b'); +end; +/ +call tableof_18(); +INFO: 1 +INFO: 2 + tableof_18 +------------ + +(1 row) + +-- test not null gram +create or replace procedure tableof_19 + is + TYPE SalTabTyp is TABLE OF s_type%rowtype not null index by varchar(10); + aa SalTabTyp; + begin + aa('a') = (1, 'zhangsan', 'shanghai'); + RAISE INFO '%' ,aa('a'); +end; +/ +call tableof_19(); +INFO: (1,zhangsan,shanghai) + tableof_19 +------------ + +(1 row) + +-- test assign one attr +create or replace procedure tableof_20 + is + TYPE SalTabTyp is TABLE OF s_type%rowtype not null index by varchar(10); + aa SalTabTyp; + begin + aa('a') = (1, 'zhangsan', 'shanghai'); + aa('a').id = 1; +end; +/ +call tableof_20(); + tableof_20 +------------ + +(1 row) + +create type info as (name varchar2(50), age int, address varchar2(20), salary float(2)); +create type customer as (id number(10), c_info info); +create table customers (id number(10), c_info info); +insert into customers (id, c_info) values (1, ('Vera' ,32, 'Paris', 22999.00)); +insert into customers (id, c_info) values (2, ('Zera' ,25, 'London', 5999.00)); +insert into customers (id, c_info) values (3, ('Alice' ,22, 'Bangkok', 9800.98)); +insert into customers (id, c_info) values (4, ('Jim' ,26, 'Dubai', 18700.00)); +insert into customers (id, c_info) values (5, ('Kevin' ,28, 'Singapore', 18999.00)); +insert into customers (id, c_info) values (6, ('Gauss' ,42, 'Beijing', 32999.00)); +-- test curosor fetch into +create or replace procedure tableof_21 +as +declare + TYPE id_1 is TABLE OF customer.id%type index by varchar(10); + TYPE c_info_1 is TABLE OF customers.c_info%type index by varchar(10); + CURSOR C1 IS SELECT id FROM customers order by id; + CURSOR C2 IS SELECT c_info FROM customers order by id; + info_a c_info_1:=c_info_1(); + id_a id_1:=id_1(); +begin + OPEN C1; + OPEN C2; + FETCH C1 into id_a(2); + FETCH C2 into info_a(2); + FETCH C1 into id_a(3); + FETCH C2 into info_a(3); + CLOSE C1; + CLOSE C2; + RAISE INFO '%', id_a; + RAISE INFO '%', info_a; +end; +/ +call tableof_21(); +INFO: {1,2} +INFO: {"(Vera,32,Paris,22999)","(Zera,25,London,5999)"} + tableof_21 +------------ + +(1 row) + +-- test select into +create or replace procedure tableof_22 +as +declare + TYPE id_1 is TABLE OF customer.id%type index by varchar(10); + TYPE c_info_1 is TABLE OF customers.c_info%type index by varchar(10); + info_a c_info_1:=c_info_1(); + id_a id_1:=id_1(); +begin + select id into id_a(2) from customers where id = 3; + select c_info into info_a(2) from customers where id = 3; + select id into id_a(3) from customers where id = 4; + select c_info into info_a(3) from customers where id = 4; + RAISE INFO '%', id_a(2); + RAISE INFO '%', info_a(3).age; +end; +/ +call tableof_22(); +INFO: 3 +INFO: 26 + tableof_22 +------------ + +(1 row) + +-- test curosor for +create or replace procedure tableof_23 +as +declare + type c_list is TABLE of customer; + customer_table c_list:=c_list(); + CURSOR C1 IS SELECT * FROM customers order by id; + counter int := 0; +begin + for n in C1 loop + counter := counter + 1; + customer_table(counter) := n; + end loop; + RAISE INFO '%', customer_table(3); +end; +/ +call tableof_23(); +INFO: (3,"(Alice,22,Bangkok,9800.98)") + tableof_23 +------------ + +(1 row) + +create or replace procedure tableof_24 +as +declare + type c_list is TABLE of customers%rowtype; + customer_table c_list:=c_list(); + CURSOR C1 IS SELECT * FROM customers order by id; + counter int := 0; +begin + for n in C1 loop + counter := counter + 1; + customer_table(counter) := n; + end loop; + RAISE INFO '%', customer_table(4); +end; +/ +call tableof_24(); +INFO: (4,"(Jim,26,Dubai,18700)") + tableof_24 +------------ + +(1 row) + +-- test row type +create type typeE as table of s_type%rowtype; +ERROR: relation does not exist when parse word. +DETAIL: relation "s_type" referenced by %ROWTYPE does not exist. +create type typeE as table of customers%rowtype; +create or replace procedure tableof_25 +as +declare + customer_table typeE; + CURSOR C1 IS SELECT * FROM customers order by id; + counter int := 0; +begin + for n in C1 loop + counter := counter + 1; + customer_table(counter) := n; + end loop; + RAISE INFO '%', customer_table(4); +end; +/ +call tableof_25(); +INFO: (4,"(Jim,26,Dubai,18700)") + tableof_25 +------------ + +(1 row) + +-- test insert +create or replace procedure tableof_26 +as +declare + type c_list is TABLE of customers%rowtype; + customer_table c_list:=c_list(); +begin + customer_table(1) := (7, ('Vera' ,32, 'Paris', 22999.00)); + customer_table(2) := (8, ('Vera' ,32, 'Paris', 22999.00)); + insert into customers values (customer_table(1).id, customer_table(1).c_info); + insert into customers values (customer_table(2).id, customer_table(2).c_info); +end; +/ +call tableof_26(); + tableof_26 +------------ + +(1 row) + +select * from customers where id = 7; + id | c_info +----+----------------------- + 7 | (Vera,32,Paris,22999) +(1 row) + +-- expect error table[] +create or replace procedure tableof_27 +as +declare + type c_list is TABLE of customers%rowtype; + customer_table c_list:=c_list(); +begin + customer_table(1) := (7, ('Vera' ,32, 'Paris', 22999.00)); + insert into customers values (customer_table[1].id, customer_table[1].c_info); +end; +/ +ERROR: syntax error at or near "[" +LINE 7: insert into customers values (customer_table[1].id, custome... + ^ +QUERY: +declare + type c_list is TABLE of customers%rowtype; + customer_table c_list:=c_list(); +begin + customer_table(1) := (7, ('Vera' ,32, 'Paris', 22999.00)); + insert into customers values (customer_table[1].id, customer_table[1].c_info); +end +-- test deault +declare + type students is table of varchar2(10); + type grades is table of integer; + marks grades := grades(98, 97, 74 + 4, (87), 92, 100); -- batch initialize -- + names students default students('none'); -- default -- + total integer; +begin + names := students(); -- should append NULL then do the coerce -- + names := students('Vera ', 'Zera ', 'Alice', 'Jim ', 'Kevin', to_char('G') || 'auss'); -- batch insert -- + total := names.count; + RAISE INFO 'Total % Students', total; + for i in 1 .. total loop + RAISE INFO 'Student: % Marks: %', names(i), marks(i); + end loop; +end; +/ +INFO: Total 6 Students +INFO: Student: Vera Marks: 98 +INFO: Student: Zera Marks: 97 +INFO: Student: Alice Marks: 78 +INFO: Student: Jim Marks: 87 +INFO: Student: Kevin Marks: 92 +INFO: Student: Gauss Marks: 100 +create type mytype as ( + id integer, + biome varchar2(100) +); +create type mytype2 as ( + id integer, + locale myType +); +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + RAISE INFO 'locale id is: %', aa(1).id; + RAISE INFO 'biome 1.3 is: %', aa(2).locale.biome; +end; +/ +INFO: locale id is: 1 +INFO: biome 1.3 is: water +-- test of uneven brackets -- +-- error out -- +declare + type students is table of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + RAISE INFO 'Student: %', names(i]; + end loop; +end; +/ +ERROR: mismatched brackets at or near ";" +LINE 6: RAISE INFO 'Student: %', names(i]; + ^ +QUERY: DECLARE type students is table of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + RAISE INFO 'Student: %', names(i]; + end loop; +end +-- Using composite type defined outside of precedure block -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, (1, 'ground')), + mytype2(1, (2, 'air')) + ); +begin + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + RAISE INFO 'locale id is: %', aa(1).id; + RAISE INFO 'biome 1.3 is: %', aa(2).locale.biome; +end; +/ +INFO: locale id is: 1 +INFO: biome 1.3 is: water +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := mytype2(2, mytype(3, 'water')); + RAISE INFO 'locale id is: %', aa(1).id; + RAISE INFO 'biome 1.3 is: %', aa(2).locale.biome; +end; +/ +INFO: locale id is: 1 +INFO: biome 1.3 is: water +create type functype as ( + id integer, + locale myType +); +create or replace function functype(habitat in mytype2) +return mytype2 +is + ret mytype2; +begin + ret := (-1, (1, 'unknown realm')); + return ret; +end; +/ +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + functype(1, mytype(1, 'ground')), -- we are prioritizing types here -- + functype(1, mytype(2, 'air')) + ); +begin + RAISE INFO 'locale id is: %', aa(1).id; + RAISE INFO 'biome 1.2 is: %', aa(2).locale.biome; -- air -- +end; +/ +INFO: locale id is: 1 +INFO: biome 1.2 is: air +-- abandon type functype +drop type functype; +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- here we have to use function functype -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + RAISE INFO 'locale ?? is: %', aa(1).id; + RAISE INFO 'biome ??? is: %', aa(2).locale.biome; -- weird places -- +end; +/ +INFO: locale ?? is: -1 +INFO: biome ??? is: unknown realm +drop function functype; +-- error +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- not sure -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + RAISE INFO 'This message worth 300 tons of gold (once printed).'; +end; +/ +ERROR: function functype(record) does not exist +LINE 1: SELECT ARRAY[(functype((1, ROW(1, 'ground'))))::plpgsql_tabl... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +QUERY: SELECT ARRAY[(functype((1, ROW(1, 'ground'))))::plpgsql_table_opengauss."mytype2", + (functype((1, ROW(2, 'air'))))::plpgsql_table_opengauss."mytype2"] +CONTEXT: referenced column: array +PL/pgSQL function inline_code_block line 6 during statement block local variable initialization +-- test table of array +declare + type arrayfirst is table(10) of int[]; + arr arrayfirst := arrayfirst(); +begin + +end; +/ +ERROR: syntax error at or near "(" +LINE 1: DECLARE type arrayfirst is table(10) of int[]; + ^ +QUERY: DECLARE type arrayfirst is table(10) of int[]; + arr arrayfirst := arrayfirst(); +begin + +end +create type typeG as (a int[]); +declare + type arrayfirst is table of typeG; + arr arrayfirst := arrayfirst(); +begin + arr(1) = row(ARRAY[1, 2, 3]); + RAISE INFO '%', arr(1).a[1]; +end; +/ +INFO: 1 +-- test unreserved key word +declare + index int; +begin + index = 1; +end; +/ +create or replace package pck1 as + type t1 is record(c1 int,c2 varchar2); + type t2 is table of int; + type t3 is varray(10) of int; + v1 t1; + v2 t2; + v3 t3; + v_c1 int; + v_c2 varchar2; +end pck1; +/ +create or replace package body pck1 as + type t5 is record(c1 int,c2 varchar2); + type t6 is table of int; + type t7 is varray(10) of int; + v5 t5; + v6 t6; + v7 t7; +end pck1; +/ +create or replace function func2() return int as +begin + pck1.v2 :=pck1.t2(); + pck1.v2.extend(3); + pck1.v2(0) := 1; + pck1.v2(1) := 2; + plpgsql_table_opengauss.pck1.v2(2) := 3; + raise info 'pck1.v2(0) is %',pck1.v2(0); + raise info 'pck1.v2(1) is %',pck1.v2(1); + raise info 'plpgsql_table_opengauss.pck1.v2(2) is %',plpgsql_table_opengauss.pck1.v2(2); + return 0; +end; +/ +call func2(); +INFO: pck1.v2(0) is 1 +INFO: pck1.v2(1) is 2 +INFO: plpgsql_table_opengauss.pck1.v2(2) is 3 + func2 +------- + 0 +(1 row) + +drop type typeA; +ERROR: cannot drop type _s_type[] because other objects depend on it +DETAIL: function plpgsql_table_opengauss.tableof_1(_s_type[]) depends on type _s_type[] +HINT: Use DROP ... CASCADE to drop the dependent objects too. +drop type typeB; +drop type s_type cascade; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to type _s_type[] +drop cascades to function plpgsql_table_opengauss.tableof_1(_s_type[]) +drop cascades to function tableof_4() +drop cascades to function tableof_5() +drop cascades to function tableof_9() +drop type typeC; +drop type typeE; +drop type typeG; +drop type s_type_extend; +ERROR: cannot drop type s_type_extend because other objects depend on it +DETAIL: type _s_type_extend[] depends on type s_type_extend[] +function tableof_8() depends on type s_type_extend[] +HINT: Use DROP ... CASCADE to drop the dependent objects too. +drop type typeA_ext; +drop type info; +ERROR: cannot drop type info because other objects depend on it +DETAIL: composite type customer column c_info depends on type info +table customers column c_info depends on type info +HINT: Use DROP ... CASCADE to drop the dependent objects too. +drop type customer; +drop type mytype; +ERROR: cannot drop type mytype because other objects depend on it +DETAIL: composite type mytype2 column locale depends on type mytype +HINT: Use DROP ... CASCADE to drop the dependent objects too. +drop type mytype2; +drop procedure tableof_1; +ERROR: function tableof_1 does not exist +drop procedure tableof_2; +drop procedure tableof_3; +drop function tableof_6; +drop function tableof_7; +drop function tableof_8; +drop procedure tableof_10; +drop procedure tableof_11; +drop procedure tableof_12; +drop procedure tableof_13; +drop procedure tableof_14; +drop procedure tableof_16; +drop procedure tableof_17; +drop procedure tableof_18; +drop procedure tableof_19; +drop procedure tableof_21; +drop procedure tableof_22; +drop procedure tableof_23; +drop procedure tableof_24; +drop procedure tableof_25; +drop procedure tableof_26; +drop procedure tableof_27; +ERROR: function tableof_27 does not exist +drop table customers; +drop schema if exists plpgsql_table_opengauss cascade; +NOTICE: drop cascades to 19 other objects +DETAIL: drop cascades to table int8_tbl +drop cascades to view tt17v +drop cascades to type _varchar[] +drop cascades to type _varchar[] +drop cascades to type type2 +drop cascades to type type3 +drop cascades to type _type3[] +drop cascades to type type5 +drop cascades to table tycod01 +drop cascades to type tycod02 +drop cascades to table tycod03 +drop cascades to type _tycod01[] +drop cascades to function recordnes23() +drop cascades to type s_type_extend +drop cascades to function tableof_20() +drop cascades to type info +drop cascades to type mytype +--?.* +drop cascades to function func2() diff --git a/src/test/regress/expected/plpgsql_tableof.out b/src/test/regress/expected/plpgsql_tableof.out new file mode 100644 index 000000000..58a723872 --- /dev/null +++ b/src/test/regress/expected/plpgsql_tableof.out @@ -0,0 +1,2067 @@ +-- test create type table of +-- check compatibility -- +show sql_compatibility; -- expect A -- + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists plpgsql_table; +NOTICE: schema "plpgsql_table" does not exist, skipping +create schema plpgsql_table; +set current_schema = plpgsql_table; +set behavior_compat_options='allow_procedure_compile_check'; +create type s_type as ( + id integer, + name varchar, + addr text +); +create type typeA as table of s_type; +create type typeB as table of s_type.id%type; +NOTICE: type reference s_type.id%TYPE converted to integer +create type typeC as table of s_type.name%type; +NOTICE: type reference s_type.name%TYPE converted to character varying +create type typeD as table of varchar(100); +-- test alter +alter type typeA drop ATTRIBUTE s_type; +ERROR: table type does not support alter. +alter type typeA ADD ATTRIBUTE a int; +ERROR: table type does not support alter. +create type typeC2 as table of s_type.name%type; +NOTICE: type reference s_type.name%TYPE converted to character varying +alter type typeC2 RENAME TO typeC3; +create or replace procedure tableof_alter() +is +a typeC3; +begin + a(1) = (1, 'zhangsan', 'shanghai'); + RAISE INFO 'call a(1): %' ,a(1); +end; +/ +call tableof_alter(); +INFO: call a(1): (1,zhangsan,shanghai) + tableof_alter +--------------- + +(1 row) + +-- test is +create type s_type_1 is ( + id integer, + name varchar, + addr text +); +create type typeA1 is table of s_type_1; +create type typeB1 is table of s_type_1.id%type; +NOTICE: type reference s_type_1.id%TYPE converted to integer +create type typeC1 is table of s_type_1.name%type; +NOTICE: type reference s_type_1.name%TYPE converted to character varying +create type typeD1 is table of varchar(100); +create or replace function tableof_5_1() + return typeA1 as + a typeA1; + b typeA1; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ +select tableof_5_1(); +INFO: a:{"(1,lisi,beijing)"} +CONTEXT: referenced column: tableof_5_1 + tableof_5_1 +-------------------------------------------- + {"(1,lisi,beijing)","(2,zahngwu,chengdu)"} +(1 row) + +drop procedure tableof_5_1; +-- test as +create or replace procedure tableof_12_1 + is + TYPE SalTabTyp as TABLE OF varchar(10) index by BINARY_INTEGER; + aa SalTabTyp; + begin + aa(0) = 1; + aa(1) = 2; + RAISE INFO '%' ,aa(0); + RAISE INFO '%' ,aa(1); +end; +/ +call tableof_12_1(); +INFO: 1 +INFO: 2 + tableof_12_1 +-------------- + +(1 row) + +create or replace procedure tableof_12_2 + is + TYPE SalTabTyp as TABLE OF varchar(10) index by BINARY_INTEGER; + aa SalTabTyp; + begin + aa(-1) = 1; + aa(2) = 2; + RAISE INFO '%' ,aa(0); + RAISE INFO '%' ,aa(1); + RAISE INFO '%', aa.count; + RAISE INFO '%', aa; +end; +/ +call tableof_12_2(); +INFO: +INFO: +INFO: 2 +INFO: {1,2} + tableof_12_2 +-------------- + +(1 row) + +drop procedure tableof_12_1; +drop procedure tableof_12_2; +drop type s_type_1; +ERROR: cannot drop type s_type_1 because other objects depend on it +DETAIL: type _s_type_1[] depends on type s_type_1[] +HINT: Use DROP ... CASCADE to drop the dependent objects too. +drop type typeA1; +drop type typeB1; +drop type typeC1; +drop type typeD1; +-- test table of nest table of error +create type typeF as table of typeD; +ERROR: table type does not support nested table. +-- don't support alter attr +alter type typeA ADD ATTRIBUTE a int; +ERROR: table type does not support alter. +-- test in paramter +create or replace procedure tableof_1(a typeA) +is + +begin + RAISE INFO 'a(1): %' ,a(1); + a(1) = (2, 'lisi', 'beijing'); + a(2) = (3, 'zahngwu', 'chengdu'); +end; +/ +create or replace procedure tableof_2() +is + a typeA; +begin + a(1) = (1, 'zhangsan', 'shanghai'); + RAISE INFO 'before call a(1): %' ,a(1); + perform tableof_1(a); + RAISE INFO 'after call a(2): %' ,a(2); +end; +/ +call tableof_2(); +INFO: before call a(1): (1,zhangsan,shanghai) +INFO: a(1): (1,zhangsan,shanghai) +CONTEXT: referenced column: tableof_1 +SQL statement "SELECT tableof_1(a)" +PL/pgSQL function tableof_2() line 5 at PERFORM +INFO: after call a(2): + tableof_2 +----------- + +(1 row) + +-- don't support create type = () +create or replace procedure tableof_3 + is + aa typeA = typeA(); + begin + RAISE INFO '%' ,aa; +end; +/ +call tableof_3(); +INFO: {} + tableof_3 +----------- + +(1 row) + +-- test return +create or replace function tableof_4() + return typeA as + a typeA; + begin + a(1) = (1, 'lisi', 'beijing'); + return a; +end; +/ +select tableof_4(); + tableof_4 +---------------------- + {"(1,lisi,beijing)"} +(1 row) + +create or replace function tableof_4() + return typeA as + a typeA; + begin + a(1) = (1, 'lisi', 'beijing'); + return a; +end; +/ +select tableof_4(); + tableof_4 +---------------------- + {"(1,lisi,beijing)"} +(1 row) + +create or replace function tableof_5() + return typeA as + a typeA; + b typeA; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ +select tableof_5(); +INFO: a:{"(1,lisi,beijing)"} +CONTEXT: referenced column: tableof_5 + tableof_5 +-------------------------------------------- + {"(1,lisi,beijing)","(2,zahngwu,chengdu)"} +(1 row) + +-- test cast +create or replace function tableof_6() + return typeC as + a typeA; + b typeC; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ +select tableof_6(); +INFO: a:{"(1,lisi,beijing)"} +CONTEXT: referenced column: tableof_6 + tableof_6 +-------------------------------------------- + {"(1,lisi,beijing)","(2,zahngwu,chengdu)"} +(1 row) + +--test return wrong type +create or replace function tableof_7() + return typeB as + a typeA; + b typeC; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ +select tableof_7(); +INFO: a:{"(1,lisi,beijing)"} +CONTEXT: referenced column: tableof_7 +ERROR: invalid input syntax for integer: "(1,lisi,beijing)" +CONTEXT: PL/pgSQL function tableof_7() while casting return value to function's return type +referenced column: tableof_7 +-- add one column from s_type +create type s_type_extend as ( + id integer, + name varchar, + addr text, + comment varchar +); +create type typeA_ext as table of s_type_extend; +create or replace function tableof_8() + return typeA_ext as + a typeA; + b typeA_ext; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu','good'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ +select tableof_8(); +ERROR: malformed record literal: "(1,lisi,beijing)" +DETAIL: Too few columns. +CONTEXT: PL/pgSQL function tableof_8() line 5 at assignment +referenced column: tableof_8 +-- test return index +create or replace function tableof_9() + return typeA as + a typeA; + begin + a(-1) = (1, 'lisi', 'beijing'); + a(2) = (2, 'zahngwu', 'chengdu'); + return a; +end; +/ +select tableof_9(); + tableof_9 +------------------------------------------------------------- + [-1:2]={"(1,lisi,beijing)",NULL,NULL,"(2,zahngwu,chengdu)"} +(1 row) + +create or replace procedure tableof_10() + as + a typeA; + begin + a = tableof_9(); + RAISE INFO 'a(-1):%' ,a(-1); + RAISE INFO 'a(0):%' ,a(0); + RAISE INFO 'a(2):%' ,a(2).id; +end; +/ +call tableof_10(); +INFO: a(-1):(1,lisi,beijing) +INFO: a(0): +INFO: a(2):2 + tableof_10 +------------ + +(1 row) + +create or replace procedure tableof_11() + as + a typeA; + begin + a = tableof_9(); + RAISE INFO 'a(-1):%' ,a(-1); +end; +/ +call tableof_11(); +INFO: a(-1):(1,lisi,beijing) + tableof_11 +------------ + +(1 row) + +-- test index by +create or replace procedure tableof_12 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by BINARY_INTEGER; + aa SalTabTyp; + begin + aa('aa') = 1; + aa('bb') = 2; + RAISE INFO '%' ,aa('aa'); + RAISE INFO '%' ,aa('bb'); +end; +/ +call tableof_12(); +ERROR: invalid input syntax for integer: "aa" +CONTEXT: PL/pgSQL function tableof_12() line 4 at assignment +create or replace procedure tableof_13 + is + TYPE SalTabTyp is TABLE OF integer index by varchar(10); + aa SalTabTyp; + begin + aa('aa') = 1; + aa('bb') = 2; + RAISE INFO '%' ,aa(0); + RAISE INFO '%' ,aa('bb'); +end; +/ +call tableof_13(); +INFO: +INFO: 2 + tableof_13 +------------ + +(1 row) + +create or replace procedure tableof_14 + is + TYPE SalTabTyp is TABLE OF integer index by varchar(10); + aa SalTabTyp; + b varchar(10); + begin + aa('a') = 1; + b = 'aa'; + aa(b) = 2; + RAISE INFO '%' ,aa('a'); + RAISE INFO '%' ,aa('aa'); + RAISE INFO '%' ,aa(b); +end; +/ +call tableof_14(); +INFO: 1 +INFO: 2 +INFO: 2 + tableof_14 +------------ + +(1 row) + + +create or replace procedure tableof_15 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by date; + aa SalTabTyp; + begin + +end; +/ +ERROR: unsupported table index type +CONTEXT: compilation of PL/pgSQL function "tableof_15" near line 1 +create or replace procedure tableof_15 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by text; + aa SalTabTyp; + begin + +end; +/ +ERROR: unsupported table index type +CONTEXT: compilation of PL/pgSQL function "tableof_15" near line 1 +-- test table = table +create or replace procedure tableof_16 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by BINARY_INTEGER; + aa SalTabTyp; + bb SalTabTyp; + begin + aa(-1) = 'b'; + aa(1) = 'a'; + RAISE INFO '%' ,aa(-1); + bb = aa; + RAISE INFO '%' ,bb(-1); + bb(8) = 'g'; + RAISE INFO '%' ,bb(8); + RAISE INFO '%' ,aa(8); + end; +/ +call tableof_16(); +INFO: b +INFO: b +INFO: g +INFO: + tableof_16 +------------ + +(1 row) + +-- test define +create or replace procedure tableof_17 + is + TYPE SalTabTyp is TABLE OF s_type%rowtype index by varchar(10); + aa SalTabTyp; + begin + aa('a') = (1, 'zhangsan', 'shanghai'); + aa('b') = (2, 'lisi', 'beijing'); + RAISE INFO '%' ,aa('a').id; + RAISE INFO '%' ,aa('b'); +end; +/ +call tableof_17(); +INFO: 1 +INFO: (2,lisi,beijing) + tableof_17 +------------ + +(1 row) + +create or replace procedure tableof_18 + is + declare + TYPE SalTabTyp is TABLE OF s_type.id%type index by varchar(10); + aa SalTabTyp; + begin + aa('a') = 1; + aa('b') = 2; + aa = NULL; + RAISE INFO '%' ,aa('a'); + RAISE INFO '%' ,aa('b'); + aa('a') = 1; + aa('C') = 2; + aa('b') = 3; + RAISE INFO '%' ,aa; +end; +/ +call tableof_18(); +INFO: +INFO: +INFO: {1,2,3} + tableof_18 +------------ + +(1 row) + +-- test not null gram +create or replace procedure tableof_19 + is + TYPE SalTabTyp is TABLE OF s_type%rowtype not null index by varchar(10); + aa SalTabTyp; + begin + aa('a') = (1, 'zhangsan', 'shanghai'); + RAISE INFO '%' ,aa('a'); +end; +/ +call tableof_19(); +INFO: (1,zhangsan,shanghai) + tableof_19 +------------ + +(1 row) + +-- test assign one attr +create or replace procedure tableof_20 + is + TYPE SalTabTyp is TABLE OF s_type%rowtype not null index by varchar(10); + aa SalTabTyp; + begin + aa('a') = (1, 'zhangsan', 'shanghai'); + aa('a').id = 1; +end; +/ +call tableof_20(); + tableof_20 +------------ + +(1 row) + +create type info as (name varchar2(50), age int, address varchar2(20), salary float(2)); +create type customer as (id number(10), c_info info); +create table customers (id number(10), c_info info); +insert into customers (id, c_info) values (1, ('Vera' ,32, 'Paris', 22999.00)); +insert into customers (id, c_info) values (2, ('Zera' ,25, 'London', 5999.00)); +insert into customers (id, c_info) values (3, ('Alice' ,22, 'Bangkok', 9800.98)); +insert into customers (id, c_info) values (4, ('Jim' ,26, 'Dubai', 18700.00)); +insert into customers (id, c_info) values (5, ('Kevin' ,28, 'Singapore', 18999.00)); +insert into customers (id, c_info) values (6, ('Gauss' ,42, 'Beijing', 32999.00)); +-- test curosor fetch into +create or replace procedure tableof_21 +as +declare + TYPE id_1 is TABLE OF customer.id%type index by varchar(10); + TYPE c_info_1 is TABLE OF customers.c_info%type index by varchar(10); + CURSOR C1 IS SELECT id FROM customers order by id; + CURSOR C2 IS SELECT c_info FROM customers order by id; + info_a c_info_1:=c_info_1(); + id_a id_1:=id_1(); +begin + OPEN C1; + OPEN C2; + FETCH C1 into id_a(2); + FETCH C2 into info_a(2); + FETCH C1 into id_a(3); + FETCH C2 into info_a(3); + CLOSE C1; + CLOSE C2; + RAISE INFO '%', id_a; + RAISE INFO '%', info_a; +end; +/ +call tableof_21(); +INFO: {1,2} +INFO: {"(Vera,32,Paris,22999)","(Zera,25,London,5999)"} + tableof_21 +------------ + +(1 row) + +-- test select into +create or replace procedure tableof_22 +as +declare + TYPE id_1 is TABLE OF customer.id%type index by varchar(10); + TYPE c_info_1 is TABLE OF customers.c_info%type index by varchar(10); + info_a c_info_1:=c_info_1(); + id_a id_1:=id_1(); +begin + select id into id_a(2) from customers where id = 3; + select c_info into info_a(2) from customers where id = 3; + select id into id_a(3) from customers where id = 4; + select c_info into info_a(3) from customers where id = 4; + RAISE INFO '%', id_a(2); + RAISE INFO '%', info_a(3).age; +end; +/ +call tableof_22(); +INFO: 3 +INFO: 26 + tableof_22 +------------ + +(1 row) + +-- test curosor for +create or replace procedure tableof_23 +as +declare + type c_list is TABLE of customer; + customer_table c_list:=c_list(); + CURSOR C1 IS SELECT * FROM customers order by id; + counter int := 0; +begin + for n in C1 loop + counter := counter + 1; + customer_table(counter) := n; + end loop; + RAISE INFO '%', customer_table(3); +end; +/ +call tableof_23(); +INFO: (3,"(Alice,22,Bangkok,9800.98)") + tableof_23 +------------ + +(1 row) + +create or replace procedure tableof_24 +as +declare + type c_list is TABLE of customers%rowtype; + customer_table c_list:=c_list(); + CURSOR C1 IS SELECT * FROM customers order by id; + counter int := 0; +begin + for n in C1 loop + counter := counter + 1; + customer_table(counter) := n; + end loop; + RAISE INFO '%', customer_table(4); +end; +/ +call tableof_24(); +INFO: (4,"(Jim,26,Dubai,18700)") + tableof_24 +------------ + +(1 row) + +-- test row type +create type typeE as table of s_type%rowtype; +ERROR: relation does not exist when parse word. +DETAIL: relation "s_type" referenced by %ROWTYPE does not exist. +create type typeE as table of customers%rowtype; +create or replace procedure tableof_25 +as +declare + customer_table typeE; + CURSOR C1 IS SELECT * FROM customers order by id; + counter int := 0; +begin + for n in C1 loop + counter := counter + 1; + customer_table(counter) := n; + end loop; + RAISE INFO '%', customer_table(4); +end; +/ +call tableof_25(); +INFO: (4,"(Jim,26,Dubai,18700)") + tableof_25 +------------ + +(1 row) + +-- test insert +create or replace procedure tableof_26 +as +declare + type c_list is TABLE of customers%rowtype; + customer_table c_list:=c_list(); +begin + customer_table(1) := (7, ('Vera' ,32, 'Paris', 22999.00)); + customer_table(2) := (8, ('Vera' ,32, 'Paris', 22999.00)); + insert into customers values (customer_table(1).id, customer_table(1).c_info); + insert into customers values (customer_table(2).id, customer_table(2).c_info); +end; +/ +call tableof_26(); + tableof_26 +------------ + +(1 row) + +select * from customers where id = 7; + id | c_info +----+----------------------- + 7 | (Vera,32,Paris,22999) +(1 row) + +-- expect error table[] +create or replace procedure tableof_27 +as +declare + type c_list is TABLE of customers%rowtype; + customer_table c_list:=c_list(); +begin + customer_table(1) := (7, ('Vera' ,32, 'Paris', 22999.00)); + insert into customers values (customer_table[1].id, customer_table[1].c_info); +end; +/ +ERROR: syntax error at or near "[" +LINE 7: insert into customers values (customer_table[1].id, custome... + ^ +QUERY: +declare + type c_list is TABLE of customers%rowtype; + customer_table c_list:=c_list(); +begin + customer_table(1) := (7, ('Vera' ,32, 'Paris', 22999.00)); + insert into customers values (customer_table[1].id, customer_table[1].c_info); +end +-- test deault +declare + type students is table of varchar2(10); + type grades is table of integer; + marks grades := grades(98, 97, 74 + 4, (87), 92, 100); -- batch initialize -- + names students default students('none'); -- default -- + total integer; +begin + names := students(); -- should append NULL then do the coerce -- + names := students('Vera ', 'Zera ', 'Alice', 'Jim ', 'Kevin', to_char('G') || 'auss'); -- batch insert -- + total := names.count; + dbe_output.print_line('Total '|| total || ' Students'); + for i in 1 .. total loop + dbe_output.print_line('Student: ' || names(i) || ' Marks: ' || marks(i)); + end loop; +end; +/ +Total 6 Students +Student: Vera Marks: 98 +Student: Zera Marks: 97 +Student: Alice Marks: 78 +Student: Jim Marks: 87 +Student: Kevin Marks: 92 +Student: Gauss Marks: 100 +create type mytype as ( + id integer, + biome varchar2(100) +); +create type mytype2 as ( + id integer, + locale myType +); +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.3 is: ' || aa(2).locale.biome); -- ... water (not air) -- +end; +/ +locale id is: 1 +biome 1.3 is: water +-- test of uneven brackets -- +-- error out -- +declare + type students is table of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + dbe_output.print_line('Student: ' || names(i]); + end loop; +end; +/ +ERROR: mismatched brackets at or near ")" +LINE 6: dbe_output.print_line('Student: ' || names(i]); + ^ +QUERY: DECLARE type students is table of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + dbe_output.print_line('Student: ' || names(i]); + end loop; +end +-- Using composite type defined outside of precedure block -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, (1, 'ground')), + mytype2(1, (2, 'air')) + ); +begin + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.3 is: ' || aa(2).locale.biome); -- ... water (not air) -- +end; +/ +locale id is: 1 +biome 1.3 is: water +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := mytype2(2, mytype(3, 'water')); + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.3 is: ' || aa(2).locale.biome); -- ... water (not air) -- +end; +/ +locale id is: 1 +biome 1.3 is: water +create type functype as ( + id integer, + locale myType +); +create or replace function functype(habitat in mytype2) +return mytype2 +is + ret mytype2; +begin + ret := (-1, (1, 'unknown realm')); + return ret; +end; +/ +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + functype(1, mytype(1, 'ground')), -- we are prioritizing types here -- + functype(1, mytype(2, 'air')) + ); +begin + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.2 is: ' || aa(2).locale.biome); -- air -- +end; +/ +locale id is: 1 +biome 1.2 is: air +-- abandon type functype +drop type functype; +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- here we have to use function functype -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + dbe_output.print_line('locale ?? is: ' || aa(1).id); + dbe_output.print_line('biome ??? is: ' || aa(2).locale.biome); -- weird places -- +end; +/ +locale ?? is: -1 +biome ??? is: unknown realm +drop function functype; +-- error +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- not sure -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + dbe_output.print_line('This message worth 300 tons of gold (once printed).'); +end; +/ +ERROR: function functype(record) does not exist +LINE 1: SELECT ARRAY[(functype((1, ROW(1, 'ground'))))::plpgsql_tabl... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +QUERY: SELECT ARRAY[(functype((1, ROW(1, 'ground'))))::plpgsql_table."mytype2", + (functype((1, ROW(2, 'air'))))::plpgsql_table."mytype2"] +CONTEXT: referenced column: array +PL/pgSQL function inline_code_block line 6 during statement block local variable initialization +-- test table of array +declare + type arrayfirst is table(10) of int[]; + arr arrayfirst := arrayfirst(); +begin + +end; +/ +ERROR: syntax error at or near "(" +LINE 1: DECLARE type arrayfirst is table(10) of int[]; + ^ +QUERY: DECLARE type arrayfirst is table(10) of int[]; + arr arrayfirst := arrayfirst(); +begin + +end +create type typeG as (a int[]); +declare + type arrayfirst is table of typeG; + arr arrayfirst := arrayfirst(); +begin + arr(1) = row(ARRAY[1, 2, 3]); + dbe_output.print_line(arr(1).a[1]); +end; +/ +1 +-- test unreserved key word +declare + index int; +begin + index = 1; +end; +/ +-- test package +create or replace package aa +is +type students is table of int; +procedure kk(); +end aa; +/ +create or replace package body aa +is +names students; +procedure kk +is +begin + names := students(1, 2, 3, 4, 5); -- should be able read all values correctly -- + for i in 1 .. 5 loop + raise info '%', names[i]; + end loop; +end; +end aa; +/ +call aa.kk(); +INFO: 1 +INFO: 2 +INFO: 3 +INFO: 4 +INFO: 5 + kk +---- + +(1 row) + +drop package if exists aa; +NOTICE: drop cascades to function plpgsql_table.kk() +create or replace package pck2 is +procedure p1; +type r2 is table of int index by varchar(10); +va r2; +end pck2; +/ +create or replace package body pck2 is +procedure p1 as + +begin + +select 11 into va('a'); +select 111 into va('b'); +va('a') := 1111; + +raise info '%,', va; +end; +end pck2; +/ +call pck2.p1(); +INFO: {1111,111}, + p1 +---- + +(1 row) + +call pck2.p1(); +INFO: {1111,111}, + p1 +---- + +(1 row) + +drop package pck2; +NOTICE: drop cascades to function plpgsql_table.p1() +reset current_schema; +show current_schema; + current_schema +---------------- + "$user",public +(1 row) + +declare + type students is table of plpgsql_table.s_type; + a students; +begin + a(1) = (1, 'lisi', 'beijing'); +end; +/ +set current_schema = plpgsql_table; +-- test [:] +declare + TYPE SalTabTyp is TABLE OF integer index by varchar(10); +aa SalTabTyp; + begin +aa(1) = 1; +aa(2) = 2; +RAISE INFO '%' ,aa(1); +RAISE INFO '%' ,aa[1:2]; +end; +/ +INFO: 1 +ERROR: index by varchar or nested table don't support two subscripts +CONTEXT: PL/pgSQL function inline_code_block line 7 at RAISE +-- test [,] +declare + TYPE SalTabTyp is TABLE OF integer index by varchar(10); +aa SalTabTyp; + begin +aa(1) = 1; +aa(2) = 2; +RAISE INFO '%' ,aa(1); +RAISE INFO '%' ,aa[1,2]; +end; +/ +INFO: 1 +ERROR: index by varchar or nested table don't support two subscripts +CONTEXT: PL/pgSQL function inline_code_block line 7 at RAISE +-- test functions +declare + type b is table of int index by varchar; + a b; + c bool; +begin + a('a') = 1; + a('b') = 2; + c = a.exists('b'); + raise info '%', c; +end; +/ +INFO: t +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by varchar(10); + aa SalTabTyp; + c int; + begin + aa('a') = 'abcde'; + aa('b') = 'fghij'; + c = aa.first; +end; +/ +ERROR: invalid input syntax for integer: "a" +CONTEXT: PL/pgSQL function inline_code_block line 7 at assignment +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by varchar(10); + aa SalTabTyp; + begin + aa('a') = 'abcde'; + aa('b') = 'fghij'; + aa.delete; +end; +/ +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by varchar(10); + aa SalTabTyp; + begin + aa('a') = 'abcde'; + aa('b') = 'fghij'; + aa.trim; +end; +/ +ERROR: index by varchar type don't support trim function at or near "aa" +LINE 6: aa.trim; + ^ +QUERY: DECLARE TYPE SalTabTyp is TABLE OF varchar(10) index by varchar(10); + aa SalTabTyp; + begin + aa('a') = 'abcde'; + aa('b') = 'fghij'; + aa.trim; +end +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by varchar(10); + aa SalTabTyp; + c varchar(10); + begin + aa('a') = 'abcde'; + aa('b') = 'fghij'; + c = aa.next(aa.first); +end; +/ +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by varchar(10); + aa SalTabTyp; + c varchar(10); + begin + aa('a') = 'abcde'; + aa('b') = 'fghij'; + c = aa.prior('a'); +end; +/ +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by varchar(10); + aa SalTabTyp; + c varchar(10); + begin + aa('a') = 'abcde'; + aa('b') = 'fghij'; + c = aa.last; +end; +/ +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by varchar(10); + aa SalTabTyp; + c int; + begin + aa('a') = 'abcde'; + RAISE INFO '%', aa.exists('a'); +end; +/ +INFO: t +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by integer; + aa SalTabTyp; + c int; + begin + aa(1) = 'a'; + aa(-1) = 'c'; + aa(2) = 'b'; + raise info '%', aa.next(1); + raise info '%', aa.prior(1); +end; +/ +INFO: 2 +INFO: -1 +declare + type ta is table of varchar(100); + tb constant ta := ta('10','11'); +begin + tb(1) := 12; + dbe_output.print_line(tb[1]); +end; +/ +ERROR: "tb" is declared CONSTANT +LINE 4: tb(1) := 12; + ^ +QUERY: DECLARE type ta is table of varchar(100); + tb constant ta := ta('10','11'); +begin + tb(1) := 12; + dbe_output.print_line(tb[1]); +end +declare + type ta is table of varchar(100); + tb constant ta := ta('10','11'); +begin + tb := ta('12','13'); + dbe_output.print_line(tb[1]); +end; +/ +ERROR: "tb" is declared CONSTANT +LINE 4: tb := ta('12','13'); + ^ +QUERY: DECLARE type ta is table of varchar(100); + tb constant ta := ta('10','11'); +begin + tb := ta('12','13'); + dbe_output.print_line(tb[1]); +end +reset sql_beta_feature ; +create or replace package pcknesttype is + type aa is table of int; + type bb is table of aa; + procedure proc1(); +end pcknesttype; +/ +create or replace package body pcknesttype +is + mytab aa; + my2 bb; +procedure proc1 + is begin + mytab := aa(1,2,3,4); + my2 := bb(mytab); + +end; +end pcknesttype; +/ +create or replace procedure tableof_nest1() +is + type data_type1 is table of s_type index by integer; + type data_table_type1 is table of data_type1 index by integer; + MyTab data_type1; + tmp_y data_type1; + yy data_table_type1; +begin + MyTab(1).id := 1; + MyTab(2).name := 'B'; + MyTab(3).addr := 'addr'; + yy(0) := MyTab; + yy(1)(1).id := 1; + yy(1)(1).name := 'yy'; + RAISE INFO 'call yy: %' ,yy(1)(1); + RAISE INFO 'call yy count: %' ,yy(0).count; + tmp_y := yy(1); + --RAISE INFO 'call yy(1) next: %' ,tmp_y.next(1); + --RAISE INFO 'call yy first: %' ,yy.first; + --RAISE INFO 'call yy next: %' ,yy.next(1); +end; +/ +call tableof_nest1(); +INFO: call yy: (1,yy,) +INFO: call yy count: 1 + tableof_nest1 +--------------- + +(1 row) + +create or replace procedure tableof_nest2() +is + type data_type1 is table of varchar2(100) index by integer; + type data_table_type1 is table of data_type1 index by integer; + MyTab data_type1; + tmp_y data_type1; + yy data_table_type1; +begin + MyTab(1) := 'A'; + MyTab(2) := 'B'; + MyTab(3) := 'C'; + yy(0) := MyTab; + yy(1)(1) := 'o'; + RAISE INFO 'call yy: %' ,yy(1)(1); + RAISE INFO 'call yy count: %' ,yy(0).count; + tmp_y := yy(1); + --RAISE INFO 'call yy(1) next: %' ,tmp_y.next(1); + --RAISE INFO 'call yy first: %' ,yy.first; + --RAISE INFO 'call yy next: %' ,yy.next(1); +end; +/ +call tableof_nest2(); +INFO: call yy: o +INFO: call yy count: 1 + tableof_nest2 +--------------- + +(1 row) + +create or replace procedure tableof_nest3() +is + type data_type1 is table of varchar2(100) index by varchar2(24); + type data_table_type1 is table of data_type1 index by varchar2(24); + MyTab data_type1; + tmp_y data_type1; + yy data_table_type1; +begin + MyTab('a') := 'A'; + MyTab('b') := 'B'; + MyTab('c') := 'C'; + yy('a') := MyTab; + yy('b')('c') := 'o'; + RAISE INFO 'call yy: %' ,yy('a')('c'); + RAISE INFO 'call yy count: %' ,yy('a').count; + tmp_y := yy('b'); + --RAISE INFO 'call yy next: %' ,tmp_y.next('c'); + --RAISE INFO 'call yy first: %' ,tmp_y.first; + --RAISE INFO 'call yy next: %' ,yy.next('a'); +end; +/ +call tableof_nest3(); +INFO: call yy: C +INFO: call yy count: 1 + tableof_nest3 +--------------- + +(1 row) + +DECLARE + TYPE r1 is TABLE OF int; + type r2 is table of r1; + emp_id r2; +BEGIN + emp_id(1)(1) := 5*7784; + raise info '%,%', emp_id,emp_id(1)(1); +END; +/ +ERROR: Don't print entire nest table of value in raise statement +CONTEXT: PL/pgSQL function inline_code_block line 6 at RAISE +create type type001 as(c1 int,c2 varchar); +create type type002 as(c1 type001,c2 type001.c2%type,c4 int); +NOTICE: type reference type001.c2%TYPE converted to character varying +create type type003 as table of type002; +create type type004 as(c1 type003,c2 int); +create or replace procedure proc_1 as +typecol type004; +begin +typecol.c1(1).c1.c1=1; +typecol.c2=1; +raise info 'typecol %',typecol.c1(1).c1.c1; +raise info 'typecol %',typecol.c2; +raise info 'typecol %',typecol; +end; +/ +call proc_1(); +INFO: typecol 1 +INFO: typecol 1 +INFO: typecol ("{""(\\""(1,)\\"",,)""}",1) + proc_1 +-------- + +(1 row) + +drop type type_nest_23,type_nest_22,type_nest_24,type_nest_25 cascade; +ERROR: type "type_nest_23" does not exist +drop table type_nest_21 cascade; +ERROR: table "type_nest_21" does not exist +create table type_nest_21 (c1 int,c2 text, c3 date); +create type type_nest_22 as(c1 type_nest_21,c2 type_nest_21.c2%type,c3 type_nest_21%rowtype); +NOTICE: type reference type_nest_21.c2%TYPE converted to text +create type type_nest_23 is table of type_nest_22; +create type type_nest_24 is table of type_nest_21; +create type type_nest_25 as(c1 type_nest_21,c2 type_nest_23); +declare + type type1 is varray(6) of varchar2(10); + TYPE type2 is TABLE OF type_nest_21; + TYPE type3 is TABLE OF type2; + TYPE type4 is TABLE OF type3; + vtype5 type3; + vtype6 type_nest_25; +begin + vtype5(1)(1).c2 := 'abc'; + raise info '%', vtype5(1)(1).c2; +end; +/ +INFO: abc +declare + type type1 is varray(6) of varchar2(10); + TYPE type2 is TABLE OF type_nest_21; + TYPE type3 is TABLE OF type2; + vtype6 type3; +begin + vtype6(1)(1)(1).c2 := 'abc'; +end; +/ +ERROR: subscripts list has members more than tableof value vtype6 expected +CONTEXT: PL/pgSQL function inline_code_block line 6 at assignment +declare + TYPE record_user1 is table of type_nest_21; + TYPE record_user2 is table of record_user1; + TYPE record_user3 is table of record_user2; + v_record_user2 record_user2; + v_record_user3 record_user3; +begin + v_record_user2(1) :=1; +end; +/ +ERROR: array value must start with "{" or dimension information +CONTEXT: PL/pgSQL function inline_code_block line 7 at assignment +declare + TYPE record_user1 is table of type_nest_21; + TYPE record_user2 is table of record_user1; + TYPE record_user3 is table of record_user2; + v_record_user2 record_user2; + v_record_user3 record_user3; +begin + v_record_user3(1)(1):=1; + v_record_user2(1).c1 :=1; +end; +/ +ERROR: subscripts list has members less than tableof value v_record_user3 expected +CONTEXT: PL/pgSQL function inline_code_block line 7 at assignment +drop table customers cascade; +NOTICE: drop cascades to type _customers[] +create table customers ( + id number(10) not null, + c_name varchar2(50), + c_age number(8) not null, + c_address varchar2(50), + salary float(2) not null, + constraint customers_pk primary key (id) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "customers_pk" for table "customers" +--test collect in null +declare + cursor c_customers is select c_name from customers order by id; + type c_list is table of customers.c_name%type index by integer; + name_arr c_list := c_list(); +begin + name_arr(2) = (-1, 'Vera' ,32, 'Paris', 22999.00); + name_arr(7) = (-1, 'Vera' ,32, 'Paris', 22999.00); + -- bulk collect + cursor + open c_customers; + fetch c_customers bulk collect into name_arr; + close c_customers; + raise info '%', name_arr.count; + raise info '%', name_arr.last; + raise info '%', name_arr.exists(7); +end; +/ +INFO: 0 +INFO: +INFO: f +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; +begin + id_arr(1) = 1; + raise info '%', id_arr; + select id bulk collect into id_arr from customers order by id DESC; + raise info '%', id_arr; +end; +/ +INFO: {1} +INFO: {} +create type mytype1 as ( + id integer, + biome varchar2(100) +); +-- success, multi target support +declare + type tab is varray(6) of mytype1; + tab1 tab := tab(); +begin + tab1(1) = (1,'a'); + raise info '%', tab1; + select id, c_name bulk collect into tab1 from customers order by id DESC; + raise info '%', tab1; +end; +/ +INFO: {"(1,a)"} +INFO: {} +insert into customers (id, c_name, c_age, c_address, salary) values (1, 'Vera' ,32, 'Paris', 22999.00); +--test bulk collect into +declare + cursor c_customers is select c_name from customers order by id; + type c_list is table of customers.c_name%type index by integer; + name_arr c_list := c_list(); +begin + name_arr(2) = (-1, 'Vera' ,32, 'Paris', 22999.00); + name_arr(7) = (-1, 'Vera' ,32, 'Paris', 22999.00); + -- bulk collect + cursor + open c_customers; + fetch c_customers bulk collect into name_arr; + exit when c_customers%NOTFOUND; + close c_customers; + raise info '%', name_arr.count; + raise info '%', name_arr.last; + raise info '%', name_arr.exists(7); +end; +/ +INFO: 1 +INFO: 1 +INFO: f +insert into customers (id, c_name, c_age, c_address, salary) values (2, '' ,25, 'London', 5999.00); -- a missing value here +insert into customers (id, c_name, c_age, c_address, salary) values (3, 'Alice' ,22, 'Bangkok', 9800.98); +insert into customers (id, c_name, c_age, c_address, salary) values (4, 'Jim' ,26, 'Dubai', 18700.00); +insert into customers (id, c_name, c_age, c_address, salary) values (5, 'Kevin' ,28, 'Singapore', 18999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (6, 'Gauss' ,42, 'Beijing', 32999.00); +--test bulk collect into +declare + cursor c_customers is select c_name from customers order by id; + type c_list is table of customers.c_name%type index by integer; + name_arr c_list := c_list(); +begin + -- bulk collect + cursor + open c_customers; + fetch c_customers bulk collect into name_arr limit 4; + exit when c_customers%NOTFOUND; + close c_customers; + + for i in 1..6 loop + dbe_output.print_line('name(' || i || '): ' || name_arr(i)); + end loop; + + -- assign values directly + name_arr := ARRAY['qqqq', 'sfsafds', 'sadsadas']; + + for i in 1..6 loop + dbe_output.print_line('name(' || i || '): ' || name_arr(i)); + end loop; +end; +/ +name(1): Vera +name(2): +name(3): Alice +name(4): Jim +name(5): +name(6): +name(1): qqqq +name(2): sfsafds +name(3): sadsadas +name(4): +name(5): +name(6): +declare + cursor c_customers is select c_name from customers order by id; + type c_list is table of customers.c_name%type index by varchar; + name_arr c_list := c_list(); +begin + -- bulk collect + cursor + open c_customers; + fetch c_customers bulk collect into name_arr limit 4; + exit when c_customers%NOTFOUND; + close c_customers; +end; +/ +ERROR: index by varchar type name_arr don't support bulk collect into. at or near ";" +LINE 7: fetch c_customers bulk collect into name_arr limit 4; + ^ +QUERY: DECLARE cursor c_customers is select c_name from customers order by id; + type c_list is table of customers.c_name%type index by varchar; + name_arr c_list := c_list(); +begin + -- bulk collect + cursor + open c_customers; + fetch c_customers bulk collect into name_arr limit 4; + exit when c_customers%NOTFOUND; + close c_customers; +end +create table pro_tblof_tbl_018_1(c1 int,c2 varchar(20)); +create table pro_tblof_tbl_018(c1 int,c2 pro_tblof_tbl_018_1); +create type pro_tblof_018 is table of pro_tblof_tbl_018%rowtype; +insert into pro_tblof_tbl_018 values (1,(2,'aaa')); + +create or replace procedure pro_tblof_pro_018_11() +as + tblof001 pro_tblof_018; + cursor cor1 is select c2 from pro_tblof_tbl_018 order by c1 desc; + cursor cor2 is select c1 from pro_tblof_tbl_018 order by c1 desc; + tblcount int; +begin + select count(*) into tblcount from pro_tblof_tbl_018; + for i in 1..tblcount + loop + --open cor1; + -- fetch cor1 bulk collect into tblof001(i).c2; +-- EXIT WHEN cor1%NOTFOUND; + --close cor1; + open cor2; + fetch cor2 bulk collect into tblof001(i).c1; + exit when cor2%notfound; + close cor2; + i=i+1; + end loop; + for i in tblof001.first..tblof001.last + loop + if tblof001(i) is null then + tblof001(i)=tblof001(tblof001.next(i)); + end if; + dbe_output.print_line('tblof001 ('||i||')is '||tblof001(i).c1||'-----'||tblof001(i).c2); + end loop; + raise info 'tblof001 is %',tblof001; +end; +/ +call pro_tblof_pro_018_11(); +ERROR: Unsupported bulk collect into target +DETAIL: Unable to recognize the given bulk collect into target +CONTEXT: PL/pgSQL function pro_tblof_pro_018_11() line 14 at FETCH +create or replace procedure p155() as +type t is table of varchar2 index by integer; +v t; +begin +raise info '%', v.count; +for i in 1..v.count loop +v(i):=0; +end loop; +end; +/ +call p155(); +INFO: 0 + p155 +------ + +(1 row) + +create or replace procedure p156() as +type t is table of varchar2 index by varchar; +v t; +begin +raise info '%', v.count; +for i in 1..v.count loop +v(i):=0; +end loop; +end; +/ +call p156(); +INFO: 0 + p156 +------ + +(1 row) + + create or replace procedure table_column + is + type rec_type is record (name varchar2(100), epno int); + TYPE SalTabTyp as TABLE of rec_type index by BINARY_INTEGER; + aa SalTabTyp; + begin + aa(0).epno = 1; + raise info '%', aa; + select '' into aa(0).name; + raise info '%', aa; +end; +/ +call table_column(); +INFO: {"(,1)"} +INFO: {"(,1)"} + table_column +-------------- + +(1 row) + +create table pkgtbl054(c0 int,c1 number(5),c2 varchar2(20),c3 clob,c4 blob); +insert into pkgtbl054 values(1,1,'varchar1',repeat('clob1',20),'abcdef1'); +insert into pkgtbl054 values(2,2,'varchar10',repeat('clob2',20),'abcdef2'); +create type type0011 as(c0 int,c1 number(5),c2 varchar2(20),c3 clob,c4 blob); +create or replace package pkg054 +is +type type0011 is table of type0011%rowtype index by varchar2(20); +type type002 is table of type0011.c2%type index by integer; +col1 type0011; +col2 type002; +procedure proc054_1(col3 type0011,col4 type002); +function proc054_2(col5 int) return integer; +end pkg054; +/ +create or replace package body pkg054 +is +procedure proc054_1(col3 type0011,col4 type002) +is +begin +raise info 'col13 is %',col3; +raise info 'col14 is %',col4; +exception + when others then + raise info 'sqlerrm is %',sqlerrm; +end; +function proc054_2(col5 int) return integer +as +begin + col1('1').c0:=128909887; +col1('1').c1:=12345; +col1('2').c2:='var2'; +col1('2').c3:='clobcol1'; +col1('2').c4:='123456'; +col2(1):=col1('2').c2; +col2(3):=col1('1').c3; +raise info 'col1 is %',col1; +raise info 'col2 is %',col2; + proc054_1(col3=>pkg054.col1,col4=>pkg054.col2); +return 1; +end; +end pkg054; +/ +call pkg054.proc054_2(1); +INFO: col1 is {"(128909887,12345,,,)","(,,var2,clobcol1,123456)"} +INFO: col2 is {var2,NULL} +INFO: col13 is {"(128909887,12345,,,)","(,,var2,clobcol1,123456)"} +CONTEXT: SQL statement "CALL proc054_1(col3=>pkg054.col1,col4=>pkg054.col2)" +PL/pgSQL function proc054_2(integer) line 12 at PERFORM +INFO: col14 is {var2,NULL} +CONTEXT: SQL statement "CALL proc054_1(col3=>pkg054.col1,col4=>pkg054.col2)" +PL/pgSQL function proc054_2(integer) line 12 at PERFORM + proc054_2 +----------- + 1 +(1 row) + +create or replace package body pkg054 +is +procedure proc054_1(col3 type0011,col4 type002) +is +begin +raise info 'col13 is %',col3; +raise info 'col14 is %',col4; +exception + when others then + raise info 'sqlerrm is %',sqlerrm; +end; +function proc054_2(col5 int) return integer +as +begin + col1('1').c0:=128909887; +col1('1').c1:=12345; +col1('2').c2:='var2'; +col1('2').c3:='clobcol1'; +col1('2').c4:='123456'; +col2(1):=col1('2').c2; +col2(3):=col1('1').c3; +raise info 'col1 is %',col1; +raise info 'col2 is %',col2; + proc054_1(pkg054.col1,pkg054.col2); +return 1; +end; +end pkg054; +/ +call pkg054.proc054_2(1); +INFO: col1 is {"(128909887,12345,,,)","(,,var2,clobcol1,123456)"} +INFO: col2 is {var2,NULL} +INFO: col13 is {"(128909887,12345,,,)","(,,var2,clobcol1,123456)"} +CONTEXT: SQL statement "CALL proc054_1(pkg054.col1,pkg054.col2)" +PL/pgSQL function proc054_2(integer) line 12 at PERFORM +INFO: col14 is {var2,NULL} +CONTEXT: SQL statement "CALL proc054_1(pkg054.col1,pkg054.col2)" +PL/pgSQL function proc054_2(integer) line 12 at PERFORM + proc054_2 +----------- + 1 +(1 row) + +drop package pkg054; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_table.proc054_1(_type0011[],_varchar[]) +drop cascades to function plpgsql_table.proc054_2(integer) +drop type type_nest_23,type_nest_22,type_nest_24,type_nest_25 cascade; +NOTICE: drop cascades to composite type type_nest_25 column c2 +drop table type_nest_21; +create or replace package pkg049 +is +type type001 is table of number(8) index by varchar2(30); +type type002 is record(c1 type001,c2 varchar2(30)); +function proc049_2(col1 int) return type001; +end pkg049; +/ +ERROR: table of index type is not supported as function return type. +DETAIL: N/A +create or replace function tableof_return_1(col1 int) return s_type[] +is +type type001 is table of s_type index by varchar2(30); +a type001; +begin +return a; +end; +/ +ERROR: table of index type is not supported as function return type. at or near ";" +LINE 5: return a; + ^ +QUERY: DECLARE +type type001 is table of s_type index by varchar2(30); +a type001; +begin +return a; +end +create or replace function tableof_return_2(col1 int) return integer +is +type type001 is table of s_type index by varchar2(30); +a type001; +begin +a(1) = (1, 'lisi', 'beijing'); +return a(1).id; +end; +/ +call tableof_return_2(1); + tableof_return_2 +------------------ + 1 +(1 row) + +create or replace function tableof_return_3(col1 int) return integer +is +type type001 is table of integer index by varchar2(30); +a type001; +begin +a(1) = 1; +a(2) = 2; +return a(1); +end; +/ +call tableof_return_3(1); + tableof_return_3 +------------------ + 1 +(1 row) + +-- test varchar as index and text as index +drop table t1; +ERROR: table "t1" does not exist +create table t1 (a varchar2(100), b varchar2(100), c number(10,0), d number(10,0), e number(10,0)); +create or replace package pck1 as +type ra is table of varchar2 index by varchar2(100); +procedure p1 (v01 out ra); +end pck1; +/ +create or replace package body pck1 as +type rb is table of t1 index by varchar; +v02 rb; +v_buff varchar2(1000); +procedure p1(v01 out ra) as +v_value varchar2(200); +v_index varchar2(200); +i int := 1; +begin +v_value := 'testdaa11'||i; +v_index := 'test_' || i; +v02(v_index).a = v_value; +v01(v02(v_index).a) := v_value ||i; +raise info 'v02.a : %', v02(v_index).a; +raise info 'v01.first : %', v01.first(); +raise info 'v01(testdaa111) : %', v01('testdaa111'); +raise info 'v01(v01.first()) : %' ,v01(v01.first()); +raise info 'v01(v02(v_index).a) : %' ,v01(v02(v_index).a); +end; +end pck1; +/ +call pck1.p1(''); +INFO: v02.a : testdaa111 +INFO: v01.first : testdaa111 +INFO: v01(testdaa111) : testdaa1111 +INFO: v01(v01.first()) : testdaa1111 +INFO: v01(v02(v_index).a) : testdaa1111 + v01 +--------------- + {testdaa1111} +(1 row) + +drop package pck1; +NOTICE: drop cascades to function plpgsql_table.p1() +drop table t1; +create table blob1(c1 blob); +create or replace procedure testblob1(count int) +IS +begin +execute immediate 'insert into blob1 values(:p1);' using 1::bit(100)::varchar::blob; +end; +/ +call testblob1(1); + testblob1 +----------- + +(1 row) + +drop table blob1; +-- test table of as out param +create or replace package pck1 as +type r1 is table of int index by int; +type r2 is record(a int, b int); +procedure p1; +procedure p2(va out r2,vb out r1); +procedure p2(vc int, va out r2,vb out r1); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +va r2; +vb r1; +begin +p2(va, vb); +raise info '%',vb.first; +end; +procedure p2(va out r2, vb out r1) as +vc int; +begin +p2(vc,va,vb); +end; +procedure p2(vc int, va out r2, vb out r1) as +begin +va := (1,2); +vb(2) := 2; +end; +end pck1; +/ +call pck1.p1(); +INFO: 2 + p1 +---- + +(1 row) + +drop package pck1; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function plpgsql_table.p1() +drop cascades to function plpgsql_table.p2() +drop cascades to function plpgsql_table.p2(integer) +create or replace package pkgnest002 +as +type ty0 is table of integer index by integer; +type ty1 is table of ty0; +type ty3 is table of ty1 ; +procedure p1; +end pkgnest002; +/ +create or replace package body pkgnest002 +as +procedure p1 +is +v1 ty0:=ty0(); +v2 ty1:=ty1(); +v21 ty1; +v22 ty3; +v31 ty3:=ty3(); +begin +raise info 'v1 is %',v1; +raise info 'v31 is %',v31; +v1(1):=1; +v1(2):=2; +v2(1):=v1; +v1(5):=5; +v2(2):=v1; +raise info 'v1 is %',v1(1); +raise info 'v2 is %',v2(1); +raise info 'v2 is %',v2(2); +v31(4):=v21; +raise info 'v31(4) is %', v31(4); +v21(1)(1):=-1; +raise info 'v21(1) is %', v21(1); +v21(2)(2):=-2; +v31(3):=v21; +v22:=v31; +raise info 'v31 is %', v31(3)(2); +v22:=v2; +end; +end pkgnest002; +/ +call pkgnest002.p1(); +INFO: v1 is {} +INFO: v31 is {} +INFO: v1 is 1 +INFO: v2 is {1,2} +INFO: v2 is {1,2,5} +INFO: v31(4) is +INFO: v21(1) is {-1} +INFO: v31 is {-2} +ERROR: Nest tableof var v22 assigned is mismatch excepted nest layers. +CONTEXT: PL/pgSQL function p1() line 26 at assignment +call pkgnest002.p1(); +INFO: v1 is {} +INFO: v31 is {} +INFO: v1 is 1 +INFO: v2 is {1,2} +INFO: v2 is {1,2,5} +INFO: v31(4) is +INFO: v21(1) is {-1} +INFO: v31 is {-2} +ERROR: Nest tableof var v22 assigned is mismatch excepted nest layers. +CONTEXT: PL/pgSQL function p1() line 26 at assignment +create or replace package pkgnest_auto +as +type ty1 is table of varchar2(20) index by varchar2; +type ty2 is table of ty1 index by varchar2; +type ty3 is table of ty2 index by varchar2; +function p1() return varchar2(20); +pv1 ty1; +pv2 ty2; +pv3 ty3; +end pkgnest_auto; +/ +create or replace package body pkgnest_auto +as +function p1() return varchar2(20) +is +PRAGMA AUTONOMOUS_TRANSACTION; +begin +pv1(1):=10000; +pv1(2):=20000; +pv1(3):=30000; +pv2(1):=pv1; +pv2(2):=pv1; +pv2(3):=pv1; +pv3(1):=pv2; +pv3(2):=pv2; +pv3(3):=pv2; +return pv6.first; +end; +end pkgnest_auto; +/ +ERROR: Un-support feature: nest tableof variable (null) not support pass through autonm function +CONTEXT: compilation of PL/pgSQL package near line 6 +call pkgnest_auto.p1(); +ERROR: package body not defined +---- clean ---- +drop type s_type; +ERROR: cannot drop type s_type because other objects depend on it +DETAIL: type _s_type[] depends on type s_type[] +function plpgsql_table.tableof_1(_s_type[]) depends on type _s_type[] +function tableof_4() depends on type s_type[] +function tableof_5() depends on type s_type[] +function tableof_9() depends on type s_type[] +HINT: Use DROP ... CASCADE to drop the dependent objects too. +drop type typeA; +ERROR: cannot drop type _s_type[] because other objects depend on it +DETAIL: function plpgsql_table.tableof_1(_s_type[]) depends on type _s_type[] +HINT: Use DROP ... CASCADE to drop the dependent objects too. +drop type typeB; +drop type s_type cascade; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to type _s_type[] +drop cascades to function plpgsql_table.tableof_1(_s_type[]) +drop cascades to function tableof_4() +drop cascades to function tableof_5() +drop cascades to function tableof_9() +drop type typeC; +drop type typeE; +ERROR: type "typee" does not exist +drop type typeG; +drop type s_type_extend cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to type _s_type_extend[] +drop cascades to function tableof_8() +drop type typeA_ext; +ERROR: type "typea_ext" does not exist +drop type info cascade; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to composite type customer column c_info +--?.* +--?.* +--?.* +--?.* +drop type customer; +drop type mytype cascade; +NOTICE: drop cascades to composite type mytype2 column locale +drop type mytype2; +drop procedure tableof_1; +ERROR: function tableof_1 does not exist +drop procedure tableof_2; +drop procedure tableof_3; +drop function tableof_6; +drop function tableof_7; +drop function tableof_8; +ERROR: function tableof_8 does not exist +drop procedure tableof_10; +drop procedure tableof_11; +drop procedure tableof_12; +drop procedure tableof_13; +drop procedure tableof_14; +drop procedure tableof_16; +drop procedure tableof_17; +drop procedure tableof_18; +drop procedure tableof_19; +drop procedure tableof_21; +drop procedure tableof_22; +drop procedure tableof_23; +drop procedure tableof_24; +drop procedure tableof_25; +drop procedure tableof_26; +drop procedure tableof_27; +ERROR: function tableof_27 does not exist +drop package pcknesttype; +NOTICE: drop cascades to function plpgsql_table.proc1() +drop procedure tableof_nest2; +drop procedure tableof_nest3; +drop table customers; +drop package pkgnest002; +NOTICE: drop cascades to function plpgsql_table.p1() +drop package pkgnest_auto; +NOTICE: drop cascades to function plpgsql_table.p1() +drop schema if exists plpgsql_table cascade; +NOTICE: drop cascades to 24 other objects +DETAIL: drop cascades to type _varchar[] +drop cascades to type _varchar[] +drop cascades to function tableof_alter() +drop cascades to type s_type_1 +drop cascades to function tableof_20() +drop cascades to function tableof_nest1() +drop cascades to type type001 +drop cascades to type type002 +drop cascades to type _type002[] +drop cascades to type type004 +drop cascades to function proc_1() +drop cascades to type mytype1 +drop cascades to table pro_tblof_tbl_018_1 +drop cascades to table pro_tblof_tbl_018 +drop cascades to type _pro_tblof_tbl_018[] +drop cascades to function pro_tblof_pro_018_11() +drop cascades to function p155() +drop cascades to function p156() +drop cascades to function table_column() +drop cascades to table pkgtbl054 +drop cascades to type type0011 +drop cascades to function tableof_return_2(integer) +drop cascades to function tableof_return_3(integer) +drop cascades to function testblob1(integer) diff --git a/src/test/regress/expected/plpgsql_tableofindex.out b/src/test/regress/expected/plpgsql_tableofindex.out new file mode 100644 index 000000000..8323ed452 --- /dev/null +++ b/src/test/regress/expected/plpgsql_tableofindex.out @@ -0,0 +1,1459 @@ +-- test create type table of +-- check compatibility -- +show sql_compatibility; -- expect A -- + sql_compatibility +------------------- + A +(1 row) + +-- create new schema -- +drop schema if exists plpgsql_tableofindex; +NOTICE: schema "plpgsql_tableofindex" does not exist, skipping +create schema plpgsql_tableofindex; +set current_schema = plpgsql_tableofindex; +create type s_type as ( + id integer, + name varchar, + addr text +); +create type typeA as table of s_type; +-- test 1 paramter - in +create or replace package pck2 is +type r2 is table of s_type index by varchar(10); +procedure p1; +procedure p2(va r2); +end pck2; +/ +create or replace package body pck2 is +procedure p1 as +va r2; +begin +va('a') := (1, 'zhangsan', 'shanghai'); +va('c') := (2, 'zhangsanc', 'shanghai'); +p2(va); +end; + +procedure p2(va r2) as +begin +raise info 'a:%', va('a'); +raise info 'c:%', va('c'); +end; +end pck2; +/ +call pck2.p1(); +INFO: a:(1,zhangsan,shanghai) +CONTEXT: SQL statement "CALL p2(va)" +PL/pgSQL function p1() line 6 at PERFORM +INFO: c:(2,zhangsanc,shanghai) +CONTEXT: SQL statement "CALL p2(va)" +PL/pgSQL function p1() line 6 at PERFORM + p1 +---- + +(1 row) + +-- test 3 paramter - in +create or replace package pck3 is +type r2 is table of s_type index by varchar(10); +type r3 is table of s_type index by integer; +procedure p1; +procedure p2(b int, va r2, a int, vb r3); +end pck3; +/ +create or replace package body pck3 is +procedure p1 as +va r2; +vb r3; +b int; +begin +va('a') := (1, 'zhangsan', 'shanghai'); +vb(5) := (10086,'aa','bb'); +vb(233) := (10087,'aa','bb'); +p2(b,va,1,vb); +end; + +procedure p2(b int, va r2, a int, vb r3) as +begin +raise info 'va:%', va('a'); +raise info 'vb(233):%', vb(233); +raise info 'vb:%', vb; +end; +end pck3; +/ +call pck3.p1(); +INFO: va:(1,zhangsan,shanghai) +CONTEXT: SQL statement "CALL p2(b,va,1,vb)" +PL/pgSQL function p1() line 9 at PERFORM +INFO: vb(233):(10087,aa,bb) +CONTEXT: SQL statement "CALL p2(b,va,1,vb)" +PL/pgSQL function p1() line 9 at PERFORM +INFO: vb:{"(10086,aa,bb)","(10087,aa,bb)"} +CONTEXT: SQL statement "CALL p2(b,va,1,vb)" +PL/pgSQL function p1() line 9 at PERFORM + p1 +---- + +(1 row) + +-- test 1 paramter - out +create or replace package pck4 is +type r2 is table of s_type index by varchar(10); +procedure p1; +procedure p2(va out r2); +end pck4; +/ +create or replace package body pck4 is +procedure p1 as +va1 r2; +begin +p2(va1); +raise info '%,', va1('a'); +end; + +procedure p2(va out r2) as +begin +va('a') := (1, 'zhangsan', 'shanghai'); +end; +end pck4; +/ +call pck4.p1(); +INFO: (1,zhangsan,shanghai), + p1 +---- + +(1 row) + +-- test 3 paramter - out +create or replace package pck5 is +type r2 is table of s_type index by varchar(10); +type r3 is table of s_type index by integer; +procedure p1; +procedure p2(va out r2, a out int, vb out r3); +end pck5; +/ +create or replace package body pck5 is +procedure p1 as +va1 r2; +va2 r3; +a int; +begin +p2(va1, a, va2); +raise info '%', a; +raise info '%', va1('a'); +raise info '%', va2(233); +end; + +procedure p2(va out r2, a out int, vb out r3) as +begin +va('a') := (1, 'zhangsan', 'shanghai'); +vb(233) := (10086,'aa','bb'); +a = 2; +end; +end pck5; +/ +call pck5.p1(); +INFO: 2 +INFO: (1,zhangsan,shanghai) +INFO: (10086,aa,bb) + p1 +---- + +(1 row) + +-- test 3 paramter - out +create or replace package pck6 is +type r2 is table of s_type index by varchar(10); +type r3 is table of s_type index by integer; +procedure p1; +procedure p2(va in r2, a out int, vb out r3); +end pck6; +/ +create or replace package body pck6 is +procedure p1 as +va1 r2; +va2 r3; +a int; +begin +va1('a') := (1, 'zhangsan', 'shanghai'); +p2(va1, a, va2); + +raise info '%', a; +raise info 'va:%', va1; +raise info '%', va2(233); +end; + +procedure p2(va in r2, a out int, vb out r3) as +begin +raise info '%', va('a'); +va('c') := (3, 'zhangsan', 'shanghai'); +vb(233) := (10086,'aa','bb'); +a = 2; +end; +end pck6; +/ +call pck6.p1(); +INFO: (1,zhangsan,shanghai) +CONTEXT: SQL statement "CALL p2(va1,a,va2)" +PL/pgSQL function p1() line 7 at SQL statement +INFO: 2 +INFO: va:{"(1,zhangsan,shanghai)"} +INFO: (10086,aa,bb) + p1 +---- + +(1 row) + +-- test 1 paramter - inout +create or replace package pck7 is +type r2 is table of s_type index by varchar(10); +procedure p1; +procedure p2(va inout r2); +end pck7; +/ +create or replace package body pck7 is +procedure p1 as +va r2; +begin +va('a') := (1, 'zhangsan', 'shanghai'); +va('c') := (2, 'zhangsanc', 'shanghai'); +p2(va); +raise info 'e:%', va('e'); +raise info 'a:%', va('a'); +end; + +procedure p2(va inout r2) as +begin +raise info 'a:%', va('a'); +raise info 'c:%', va('c'); +va('e') := (3, 'zhangsanc', 'shanghai'); +end; +end pck7; +/ +call pck7.p1(); +INFO: a:(1,zhangsan,shanghai) +CONTEXT: SQL statement "CALL p2(va)" +PL/pgSQL function p1() line 6 at SQL statement +INFO: c:(2,zhangsanc,shanghai) +CONTEXT: SQL statement "CALL p2(va)" +PL/pgSQL function p1() line 6 at SQL statement +INFO: e:(3,zhangsanc,shanghai) +INFO: a:(1,zhangsan,shanghai) + p1 +---- + +(1 row) + +-- test 3 paramter - inout +create or replace package pck8 is +type r2 is table of s_type index by varchar(10); +type r3 is table of s_type index by integer; +procedure p1; +procedure p2(va inout r2, a inout int, vb inout r3); +end pck8; +/ +create or replace package body pck8 is +procedure p1 as +va r2; +vb r3; +a int; +begin +va('a') := (1, 'zhangsan', 'shanghai'); +vb(5) := (10086,'aa','bb'); +p2(va,a,vb); +raise info 'vb(233):%', vb(233); +raise info 'va(c):%', va('c'); +end; + +procedure p2(va inout r2, a inout int, vb inout r3) as +begin +raise info 'va:%', va('a'); +vb(233) := (10087,'aa','bb'); +va('c') := (2, 'zhangsanc', 'shanghai'); +raise info 'vb:%', vb; +end; +end pck8; +/ +call pck8.p1(); +INFO: va:(1,zhangsan,shanghai) +CONTEXT: SQL statement "CALL p2(va,a,vb)" +PL/pgSQL function p1() line 8 at SQL statement +INFO: vb:{"(10086,aa,bb)","(10087,aa,bb)"} +CONTEXT: SQL statement "CALL p2(va,a,vb)" +PL/pgSQL function p1() line 8 at SQL statement +INFO: vb(233):(10087,aa,bb) +INFO: va(c):(2,zhangsanc,shanghai) + p1 +---- + +(1 row) + +-- test paramter - in && out +create or replace package pck9 is +type r3 is table of s_type index by integer; +procedure p1; +procedure p2(a int, b varchar2, c out varchar2,vb out r3); +end pck9; +/ +create or replace package body pck9 is +procedure p1 as +vb r3; +a int; +b varchar2; +c varchar2; +begin +vb(5) := (10086,'aa','bb'); +a = 1; +b = 'dddd'; +p2(a,b,c,vb); +raise info 'c:%', c; +raise info 'vb(233):%', vb(233); +end; + +procedure p2(a int, b varchar2, c out varchar2,vb out r3) as +begin +c = 'aaaa'; +vb(233) := (10087,'aa','bb'); +end; +end pck9; +/ +call pck9.p1(); +INFO: c:aaaa +INFO: vb(233):(10087,aa,bb) + p1 +---- + +(1 row) + +drop type if exists type_nest_01 cascade; +NOTICE: type "type_nest_01" does not exist, skipping +drop type if exists type_nest_02 cascade; +NOTICE: type "type_nest_02" does not exist, skipping +drop type if exists type_nest_03 cascade; +NOTICE: type "type_nest_03" does not exist, skipping +declare +type ty1 is table of integer index by integer; +v1 ty1 =ty1(1,2,3,45); +v2 ty1; +begin +v2(1):=12345; +v2(3):=56789; +raise info 'v1 is %',v1; +raise info 'v2.first is %',v2.first; +raise info 'v2.last is %',v2.last; +raise info 'v2 is %',v2; +end; +/ +ERROR: table of index by does not support syntax at or near "1" +LINE 3: v1 ty1 =ty1(1,2,3,45); + ^ +QUERY: DECLARE +type ty1 is table of integer index by integer; +v1 ty1 =ty1(1,2,3,45); +v2 ty1; +begin +v2(1):=12345; +v2(3):=56789; +raise info 'v1 is %',v1; +raise info 'v2.first is %',v2.first; +raise info 'v2.last is %',v2.last; +raise info 'v2 is %',v2; +end +declare +type ty1 is table of integer index by integer; +v1 ty1; +v2 ty1; +begin +v1 =ty1(1,2,3,45); +v2(1):=12345; +v2(3):=56789; +raise info 'v1 is %',v1; +raise info 'v2.first is %',v2.first; +raise info 'v2.last is %',v2.last; +raise info 'v2 is %',v2; +end; +/ +ERROR: table of index by does not support syntax at or near "1" +LINE 6: v1 =ty1(1,2,3,45); + ^ +QUERY: DECLARE +type ty1 is table of integer index by integer; +v1 ty1; +v2 ty1; +begin +v1 =ty1(1,2,3,45); +v2(1):=12345; +v2(3):=56789; +raise info 'v1 is %',v1; +raise info 'v2.first is %',v2.first; +raise info 'v2.last is %',v2.last; +raise info 'v2 is %',v2; +end +declare +type ty1 is table of integer index by integer; +v2 ty1=ty1(); +begin +v2(1):=12345; +v2(3):=56789; +raise info 'v2.first is %',v2.first; +raise info 'v2.last is %',v2.last; +raise info 'v2 is %',v2; +end; +/ +INFO: v2.first is 1 +INFO: v2.last is 3 +INFO: v2 is {12345,56789} +declare +type ty1 is table of integer index by integer; +v2 ty1; +begin +v2=ty1(); +v2(1):=12345; +v2(3):=56789; +raise info 'v2.first is %',v2.first; +raise info 'v2.last is %',v2.last; +raise info 'v2 is %',v2; +end; +/ +INFO: v2.first is 1 +INFO: v2.last is 3 +INFO: v2 is {12345,56789} +create type type_nest_01 as (a int, b int); +create type type_nest_02 as (a int, b sch1.type_nest_01); +ERROR: schema "sch1" does not exist +create type type_nest_03 as (c1 int,c2 text, c3 date); +create or replace package pack3 is + type t1 is table of number; + type t2 is table of t1; + + type type01 is table of type_nest_03; + type type02 is table of type01; + type type03 is table of type02; + + type type0001 is record(c1 number(5,2),c2 varchar2(20)); + type type0002 is table of type0001; + type type0003 is table of type0002; + v1 t2; + v2 type03; + v4 type0003; +procedure main(a int, b int); +end pack3; +/ +create or replace package body pack3 is +procedure main(a int, b int) is +begin +v1(a)(1):=1; +v1(a)(2):=2; +v1(2)(1):=3; +v2(1)(1)(1):=(a,'a1','2021-10-01'); +v4(a)(1).c1:=890; +v4(a)(1).c2:='qwe'; +raise info 'v1 is,%',v1(b); +raise info 'v2 is,%',v2(b)(b); +raise info 'v4 is,%',v4(b); +end; +end pack3; +/ +call pack3.main(1,2); +INFO: v1 is,{3} +INFO: v2 is, +INFO: v4 is, + main +------ + +(1 row) + +call pack3.main(2,1); +INFO: v1 is,{1,2} +INFO: v2 is,{"(2,a1,\"Fri Oct 01 00:00:00 2021\")"} +INFO: v4 is,{"(890.00,qwe)"} + main +------ + +(1 row) + +drop package pack3; +NOTICE: drop cascades to function plpgsql_tableofindex.main(integer,integer) +drop type if exists type_nest_01 cascade; +drop type if exists type_nest_02 cascade; +NOTICE: type "type_nest_02" does not exist, skipping +drop type if exists type_nest_03 cascade; +-- test table of index by in Autonomous +create or replace package pck1 as +type r2 is table of s_type index by varchar(10); +procedure p2 (c1 out r2); +end pck1; +/ +create or replace package body pck1 as +procedure p2 (c1 out r2) as +PRAGMA AUTONOMOUS_TRANSACTION; +va int; +begin +null; +end; +end pck1; +/ +ERROR: Autonomous do not support table of index Or record nested tabel of index as in, out args. +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL package near line 4 +drop package pck1; +NOTICE: drop cascades to function plpgsql_tableofindex.p2() +declare +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +a int; +v1 ty1; +begin +a = 1; +v1(1) = 1; +v1(5):=a||v1.first; +raise info 'v1 is %',v1; +end; +/ +INFO: v1 is {1,11} +declare +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +v1 ty1; +begin +v1(1) = 1; +v1(5):= v1(1)||v1.first; +raise info 'v1 is %',v1; +end; +/ +INFO: v1 is {1,11} +declare +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +v1 ty1; +v2 ty1; +begin +v1(1) = 1; +v2(2) = 2; +v1(5):= v1(1)||v2(2); +raise info 'v1 is %',v1; +end +/ +INFO: v1 is {1,12} +declare +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +v1 ty1; +begin +v1(1) = 1; +v1(5):= v1.first || v1(1); +raise info 'v1 is %',v1; +end; +/ +INFO: v1 is {1,11} +declare +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +v1 ty1; +v2 ty1; +begin +v1(1) = 1; +v2(2) = 2; +v1(5):= v1(1)||v2.first; +raise info 'v1 is %',v1; +end +/ +ERROR: do not support more than 2 table of index by variables call functions in an expr at or near ";" +LINE 8: v1(5):= v1(1)||v2.first; + ^ +QUERY: DECLARE +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +v1 ty1; +v2 ty1; +begin +v1(1) = 1; +v2(2) = 2; +v1(5):= v1(1)||v2.first; +raise info 'v1 is %',v1; +end + + +declare +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +v1 ty1; +v2 ty1; +begin +v1(1) = 1; +v2(2) = 2; +v1(5):= v1.first ||v2.first; +raise info 'v1 is %',v1; +end +/ +ERROR: do not support more than 2 table of index by variables call functions in an expr at or near "v2" +LINE 8: v1(5):= v1.first ||v2.first; + ^ +QUERY: DECLARE +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +v1 ty1; +v2 ty1; +begin +v1(1) = 1; +v2(2) = 2; +v1(5):= v1.first ||v2.first; +raise info 'v1 is %',v1; +end + + +declare +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +v1 ty1; +v2 ty1; +v3 ty1; +begin +v1(1) = 1; +v2(2) = 2; +v3(3) = 3; +v1(5):= v1(1)||v2(2)||v3(3); +raise info 'v1 is %',v1; +end +/ +INFO: v1 is {1,123} +create table tytbl094(c1 int,c2 number(8,1),c3 varchar2(20),c4 date,c5 timestamp,c6 clob,c7 blob); +create or replace procedure tableof_record_assign_1() +is + type type000 is table of tytbl094%rowtype index by integer; + type ta is record(c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob,c6 type000); + c1 ta; + c2 ta; +begin +c1 = c2; +end; +/ +ERROR: record nested table of index variable do not support entire assign at or near "=" +LINE 6: c1 = c2; + ^ +QUERY: DECLARE type type000 is table of tytbl094%rowtype index by integer; + type ta is record(c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob,c6 type000); + c1 ta; + c2 ta; +begin +c1 = c2; +end +/* should error, but now we don't deal it. */ +create or replace procedure tableof_record_assign_3() +is + type type000 is table of tytbl094%rowtype index by integer; + type ta is record(c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob,c6 type000); + type tb is record(c1 int, c2 ta); + c1 tb; + c2 tb; +begin +c1 = c2; +end; +/ +drop precedure tableof_record_assign_3(); +ERROR: syntax error at or near "precedure" +LINE 1: drop precedure tableof_record_assign_3(); + ^ +create or replace package pck_tableof_record_assign is + type r3 is table of tytbl094 index by integer; + type ta is record(c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob,c6 r3); + c1 ta; +procedure p1; +end pck_tableof_record_assign; +/ +create or replace package body pck_tableof_record_assign is +procedure p1 as +c2 ta; +begin +c1 = c2; +end; +end pck_tableof_record_assign; +/ +ERROR: record nested table of index variable do not support entire assign at or near "=" +LINE 4: c1 = c2; + ^ +QUERY: DECLARE +c2 ta; +begin +c1 = c2; +end +drop package pck_tableof_record_assign; +NOTICE: drop cascades to function plpgsql_tableofindex.p1() +declare +type ty1 is table of tytbl094.c1%type index by BINARY_INTEGER; +v1 ty1; +begin +v1(4):=4; +v1(2):=2; +v1(5):=5; +v1.delete(5); +DBE_OUTPUT.PRINT_LINE(v1.first); +DBE_OUTPUT.PRINT_LINE(v1.next(v1.first)); +DBE_OUTPUT.PRINT_LINE(v1.last); +DBE_OUTPUT.PRINT_LINE(v1(5)); +DBE_OUTPUT.PRINT_LINE(v1.count); +end; +/ +2 +4 +4 +2 +create or replace package pkg054 +is +type r2 is table of s_type index by varchar(10); +function proc054_2(col5 r2) return int; +end pkg054; +/ +ERROR: Function do not support table of index Or record nested tabel of index as in, out args. +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL package near line 1 +create or replace package pkg065 +is +type type000 is record(c1 int,c2 number,c3 varchar,c4 clob,c5 blob); +type type001 is table of type000 index by integer; +type type002 is table of type000 index by varchar2(20); +procedure proc065_11(col3 type001,col4 out type002); +procedure proc065_1(col3 type001,col4 out type001,col5 out type002); +function proc065_2(col5 int) return int; +end pkg065; +/ +create or replace package body pkg065 +is +procedure proc065_11(col3 type001,col4 out type002) +is +begin +col4(1):=(41,41,'col4c3','col4c4','c4c5'); +col4(3):=(23,23,'col4c3','col4c4','c4c5'); +end; + +procedure proc065_1(col3 type001,col4 out type001,col5 out type002) +is +begin +col4(1):=(441,441,'col44c3','col44c4','c44c5'); +col5('aaa'):=(55,55,'col4c3','col4c4','c4c5'); +end; + +function proc065_2(col5 int) return int +as +c1 type001; +c2 type002; +c3 type002; +begin +proc065_1(c2,c2,c3); +raise info 'c2 is %',c2; +raise info 'c2.first is %',c2.first; +raise info 'c3 is %',c3; +raise info 'c3.first is %',c3.first; +return c1.count; +end; +end pkg065; +/ +ERROR: procedure table of arg types not match at or near "," +LINE 6: proc065_1(c2,c2,c3); + ^ +QUERY: DECLARE +c1 type001; +c2 type002; +c3 type002; +begin +proc065_1(c2,c2,c3); +raise info 'c2 is %',c2; +raise info 'c2.first is %',c2.first; +raise info 'c3 is %',c3; +raise info 'c3.first is %',c3.first; +return c1.count; +end +create or replace package body pkg065 +is +procedure proc065_11(col3 type001,col4 out type002) +is +begin +col4(1):=(41,41,'col4c3','col4c4','c4c5'); +col4(3):=(23,23,'col4c3','col4c4','c4c5'); +end; + +procedure proc065_1(col3 type001,col4 out type001,col5 out type002) +is +begin +col4(1):=(441,441,'col44c3','col44c4','c44c5'); +col5('aaa'):=(55,55,'col4c3','col4c4','c4c5'); +end; + +function proc065_2(col5 int) return int +as +c1 type001; +c2 type002; +c3 type002; +begin +proc065_1(c1,c2,c3); +raise info 'c2 is %',c2; +raise info 'c2.first is %',c2.first; +raise info 'c3 is %',c3; +raise info 'c3.first is %',c3.first; +return c1.count; +end; +end pkg065; +/ +ERROR: procedure table of arg types not match at or near "," +LINE 6: proc065_1(c1,c2,c3); + ^ +QUERY: DECLARE +c1 type001; +c2 type002; +c3 type002; +begin +proc065_1(c1,c2,c3); +raise info 'c2 is %',c2; +raise info 'c2.first is %',c2.first; +raise info 'c3 is %',c3; +raise info 'c3.first is %',c3.first; +return c1.count; +end +drop package pkg065; +NOTICE: drop cascades to 3 other objects +--?.* +--?.* +drop cascades to function plpgsql_tableofindex.proc065_2(integer) +create or replace package pkg065 +is +type type000 is record(c1 int,c2 number,c3 varchar,c4 clob,c5 blob); +type type001 is table of type000 index by integer; +type type002 is table of type000 index by varchar2(20); +procedure proc065_1(col3 type001,col4 type002); +procedure proc065_1(col3 type001,col4 out type001,col5 out type002); +function proc065_2(col5 int) return int; +end pkg065; +/ +create or replace package body pkg065 +is +procedure proc065_1(col3 type001,col4 type002) +is +begin +col4(1):=(41,41,'col4c3','col4c4','c4c5'); +col4(3):=(23,23,'col4c3','col4c4','c4c5'); +end; + +procedure proc065_1(col3 type001,col4 out type001,col5 out type002) +is +begin +col4(1):=(441,441,'col44c3','col44c4','c44c5'); +col5('aaa'):=(55,55,'col4c3','col4c4','c4c5'); +end; + +function proc065_2(col5 int) return int +as +c1 type001; +c2 type002; +c3 type002; +begin +proc065_1(c1,c2,c3); +raise info 'c2 is %',c2; +raise info 'c2.first is %',c2.first; +raise info 'c3 is %',c3; +raise info 'c3.first is %',c3.first; +return c1.count; +end; +end pkg065; +/ +ERROR: procedure table of arg types not match at or near ";" +LINE 6: proc065_1(c1,c2,c3); + ^ +QUERY: DECLARE +c1 type001; +c2 type002; +c3 type002; +begin +proc065_1(c1,c2,c3); +raise info 'c2 is %',c2; +raise info 'c2.first is %',c2.first; +raise info 'c3 is %',c3; +raise info 'c3.first is %',c3.first; +return c1.count; +end +drop package pkg065; +NOTICE: drop cascades to 3 other objects +--?.* +--?.* +drop cascades to function plpgsql_tableofindex.proc065_2(integer) +create or replace package pkg_nesttb +is + type type001 is table of number(5) index by integer; + type type002 is table of type001 index by integer; + type type003 is table of type002; + type type004 is table of type003; + type type005 is table of type004; + type type006 is table of type005; + pkgvar type006; + procedure checkfunc(); + procedure checkpkgvar(); + procedure checkpkgvar2(); + function checknestset() return integer; +end pkg_nesttb; +/ +create or replace package body pkg_nesttb +is + procedure checkfunc() + is + xx type006; + begin + xx(1)(2)(3)(4)(5)(6):=3; + xx(2)(2)(3)(4)(5)(6):=4; + xx(4)(2)(3)(4)(5)(6):=4; + raise info 'first %', xx.first; + raise info 'last %', xx.last; + raise info 'count %', xx.count; + raise info 'exist %', xx.exists(1); + raise info 'exist % ', xx.exists(5); + raise info 'next %', xx.next(xx.first); + raise info 'prior %', xx.prior(xx.last); + xx.delete(1); + raise info 'count %', xx.count; + raise info 'xx1 %', xx(1)(2)(3)(4)(5)(6); + xx.delete; + raise info 'count %', xx.count; + raise info 'xx %', xx; + end; + function checknestset() return integer + is + xx type002; + yy type002; + begin + xx(1)(2):=3; + xx(3)(2):=4; + yy := xx; + raise info 'xx %', xx(1)(2); + raise info 'yy %', yy(1)(2); + xx(1)(2):=10; + raise info 'yy %', yy(1)(2); + raise info 'xx %', xx(1)(2); + raise info 'xx(1) % ', xx('1'); + return 1; + end; + procedure checkpkgvar() + is + xx type006; + begin + xx(2)(2)(4)(2)(2)(3):=9; + raise info 'pkgvar %', pkgvar(2)(2)(4)(2)(2)(3); + pkgvar :=xx; + raise info 'pkgvar %', pkgvar(2)(2)(4)(2)(2)(3); + end; + procedure checkpkgvar2() + as + begin + pkgvar(1)(2)(3)(4)(5)(6):=100; + pkgvar(2)(2)(4)(2)(2)(3):=4; + raise info 'pkgvar %', pkgvar(2)(2)(4)(2)(2)(3); + end; + end pkg_nesttb; + / +create or replace package pkg_nesttb2 +is + type type001 is table of number(5) index by integer; + type type002 is table of type001 index by integer; + type type003 is table of type002; + type type004 is table of type003; + type type005 is table of type004; + type type006 is table of type005; + type type007 is table of type006; + pkgvar type007; +end pkg_nesttb2; +/ +ERROR: Layer number of nest tableof type exceeds the maximum allowed. +DETAIL: Define nest tableof type "type007" layers (7) exceeds the maximum allowed (6). +CONTEXT: compilation of PL/pgSQL package near line 7 +create or replace package pkg_nesttb_char +is + type type001 is table of number(5) index by varchar; + type type002 is table of type001 index by varchar; + type type003 is table of type002 index by varchar; + type type004 is table of type003 index by varchar; + type type005 is table of type004 index by varchar; + type type006 is table of type005 index by varchar; + procedure checkfunc(); + function checknestset() return integer; + procedure checkpkgvar(); +end pkg_nesttb_char; +/ +create or replace package body pkg_nesttb_char +is + procedure checkfunc() + is + xx type006; + begin + xx('a')('b')('c')('c')('x')('z'):=3; + xx('x')('b')('c')('c')('x')('z'):=4; + xx('c')('b')('p')('c')('x')('z'):=4; + raise info 'first %', xx.first; + raise info 'last %', xx.last; + raise info 'count %', xx.count; + raise info 'exist %', xx.exists('a'); + raise info 'exist % ', xx.exists(''); + raise info 'next %', xx.next(xx.first); + raise info 'prior %', xx.prior(xx.last); + xx.delete('x'); + raise info 'count %', xx.count; + raise info 'xx1 %', xx('c')('b')('c')('c')('x')('z'); + xx.delete; + end; + function checknestset() return integer + is + xx type002; + yy type002; + begin + xx('b')('c'):=3; + xx('a')('c'):=4; + yy := xx; + raise info 'xx %', xx('b')('c'); + raise info 'yy %', yy('a')('c'); + xx('a')('c'):=10; + raise info 'yy %', yy('a')('c'); + raise info 'xx %', xx('a')('c'); + raise info 'xx(1) % ', xx('1'); + return 1; + end; + procedure checkpkgvar() + as + begin + raise info 'pkgvar %', pkg_nesttb.pkgvar(2)(2)(4)(2)(2)(3); + end; + end pkg_nesttb_char; + / +call pkg_nesttb.checkfunc(); +INFO: first 1 +INFO: last 4 +INFO: count 3 +INFO: exist t +INFO: exist f +INFO: next 2 +INFO: prior 2 +INFO: count 2 +INFO: xx1 +INFO: count 0 +ERROR: Don't print entire nest table of value in raise statement +CONTEXT: PL/pgSQL function checkfunc() line 18 at RAISE +call pkg_nesttb.checknestset(); +INFO: xx 3 +INFO: yy 3 +INFO: yy 3 +INFO: xx 10 +INFO: xx(1) {10} + checknestset +-------------- + 1 +(1 row) + +call pkg_nesttb_char.checkpkgvar(); +INFO: pkgvar + checkpkgvar +------------- + +(1 row) + +call pkg_nesttb.checkpkgvar(); +INFO: pkgvar +INFO: pkgvar 9 + checkpkgvar +------------- + +(1 row) + +call pkg_nesttb.checkpkgvar2(); +INFO: pkgvar 4 + checkpkgvar2 +-------------- + +(1 row) + +call pkg_nesttb_char.checknestset(); +INFO: xx 3 +INFO: yy 4 +INFO: yy 4 +INFO: xx 10 +INFO: xx(1) + checknestset +-------------- + 1 +(1 row) + +call pkg_nesttb_char.checkfunc(); +INFO: first a +INFO: last x +INFO: count 3 +INFO: exist t +INFO: exist f +INFO: next c +INFO: prior c +INFO: count 2 +INFO: xx1 + checkfunc +----------- + +(1 row) + +create or replace package pck20 is +type tp_1 is table of varchar2; +type tp_2 is table of varchar2 index by varchar2; +procedure p1(a tp_1,b int); +procedure p1(a tp_2,b int); +end pck20; +/ +ERROR: function declared duplicate: p1 +create or replace package pck20 is +type tp_1 is table of varchar2; +type tp_2 is table of varchar2 index by varchar2; +procedure p1(a out tp_1,b int); +procedure p1(a out tp_2,b int); +end pck20; +/ +ERROR: function declared duplicate: p1 +set behavior_compat_options='proc_outparam_override'; +create or replace package pck20 is +type tp_1 is table of varchar2; +type tp_2 is table of varchar2 index by varchar2; +procedure p1(a out tp_1,b int); +procedure p1(a out tp_2,b int); +end pck20; +/ +ERROR: function declared duplicate: p1 +create or replace package pck20 is +type tp_1 is table of varchar2; +type tp_2 is table of varchar2 index by varchar2; +procedure p1(a tp_1,b int); +procedure p1(a tp_2,b int); +end pck20; +/ +ERROR: function declared duplicate: p1 +create table tytbl114(c1 varchar2(20),c2 int); +insert into tytbl114 values('aaaaa',1); +insert into tytbl114 values('bbbbb',2); +insert into tytbl114 values('ccccc',3); +insert into tytbl114 values('ddddd',4); +insert into tytbl114 values('eeeee',5); +insert into tytbl114 values('fffff',6); +--I1. out +create or replace package pkg114 +as +type ty0 is record (c1 int,c2 varchar2(20)); +type ty1 is table of ty0 index by integer; +procedure p1(c1 out ty1,c2 out ty1); +procedure p1(c1 out ty1,c2 out ty0); +procedure p4(); +pv1 ty1; +pv2 ty0; +end pkg114; +/ +create or replace package body pkg114 +as +procedure p1(c1 out ty1,c2 out ty1) +is +begin +for i in 1..6 loop +select c2,c1 into c1(i).c1,c1(i).c2 from tytbl114 where c2=i; +end loop; +c2:=c1; +c1.delete(3); +raise info 'c1.count is %',c1.count; +raise info 'c2.count is %',c2.count; +end; +procedure p1(c1 out ty1,c2 out ty0) +is +begin +for i in 1..6 loop +select c2,c1 into c1(i).c1,c1(i).c2 from tytbl114 where c2=i; +end loop; +c2:=c1(1); +raise info 'c1.count is %',c1.count; +raise info 'c2 is %',c2; +end; +procedure p4() +is +v1 ty1; +begin +p1(pv1,v1); +raise info 'pv1 is %',pv1; +p1(pv1,pv2); +raise info 'pv1 is %',pv1; +raise info 'pv2 is %',pv2; +end; +end pkg114; +/ +call pkg114.p4(); +INFO: c1.count is 5 +CONTEXT: SQL statement "CALL p1(pv1,v1)" +PL/pgSQL function p4() line 4 at SQL statement +INFO: c2.count is 6 +CONTEXT: SQL statement "CALL p1(pv1,v1)" +PL/pgSQL function p4() line 4 at SQL statement +INFO: pv1 is {"(1,aaaaa)","(2,bbbbb)","(4,ddddd)","(5,eeeee)","(6,fffff)"} +INFO: c1.count is 6 +CONTEXT: SQL statement "CALL p1(pv1,pv2)" +PL/pgSQL function p4() line 6 at SQL statement +INFO: c2 is (1,aaaaa) +CONTEXT: SQL statement "CALL p1(pv1,pv2)" +PL/pgSQL function p4() line 6 at SQL statement +INFO: pv1 is {"(1,aaaaa)","(2,bbbbb)","(3,ccccc)","(4,ddddd)","(5,eeeee)","(6,fffff)"} +INFO: pv2 is (1,aaaaa) + p4 +---- + +(1 row) + +drop package pkg114; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function plpgsql_tableofindex.p1() +drop cascades to function plpgsql_tableofindex.p1() +drop cascades to function plpgsql_tableofindex.p4() +drop table tytbl114; +create table tytblnest007(c1 int,c2 number(8,1),c3 varchar2(20),c4 date,c5 clob,c6 blob); +insert into tytblnest007 values(1,1,'000',to_date('2022-01-01 12:34:56','yyyy-mm-dd hh:mi:ss'),'000','00123'); +insert into tytblnest007 values(2,2.0,'111',to_date('2022-01-01 12:34:56','yyyy-mm-dd hh:mi:ss'),'111','00123'); +insert into tytblnest007 values(3,3.5,'222',to_date('2022-01-01 12:34:56','yyyy-mm-dd hh:mi:ss'),'222','00123'); +insert into tytblnest007 values(4,4.7,'333',to_date('2022-01-01 12:34:56','yyyy-mm-dd hh:mi:ss'),'333','00123'); +insert into tytblnest007 values(5,8.8,'444',to_date('2022-01-01 12:34:56','yyyy-mm-dd hh:mi:ss'),'444','00123'); +insert into tytblnest007 values(6,9.6,'555',to_date('2022-01-01 12:34:56','yyyy-mm-dd hh:mi:ss'),'555','00123'); +create or replace package pkgnest007 +as +type ty0 is table of varchar2(20); +type ty1 is table of ty0 index by integer; +procedure p1(c1 out ty0); +procedure p2(); +pv1 ty1; +pv0 ty0; +end pkgnest007; +/ +create or replace package body pkgnest007 +as +procedure p1(c1 out ty0) +is +begin +for i in 1..6 loop + select c3 into c1(i) from tytblnest007 where c1=i; +end loop; +raise info 'c1 is %',c1; +raise info 'c1.count is %',c1.count; +raise info 'c1.first is %',c1.first; +raise info 'c1.last is %',c1.last; +end; +procedure p2() +is +begin +pv1(1)(1):='11'; +pv1(1)(2):='12'; +pv1(1)(4):='14'; +pv1(3)(0):='30'; +pv1(3)(2):='32'; +pv1(3)(3):='33'; +pv1(6)(4):='64'; +pv1(6)(5):='65'; +pv1(6)(6):='66'; +pv1(2)(1):='21'; +pv1(2)(2):='22'; +for i in pv1.first..pv1.last loop +raise info 'pv1 % is %',i,pv1(i); +end loop; +raise info 'pv1.count is %',pv1.count; +raise info 'pv1.first is %',pv1.first; +raise info 'pv1.first.next is %',pv1.next(pv1.first); +raise info 'pv1.first.next2 is %',pv1.next(pv1.next(pv1.first)); +raise info 'pv1.first.next3 is %',pv1.next(pv1.next(pv1.next(pv1.first))); +raise info 'pv1.last is %',pv1.last; + +raise info 'pv1.first value is %',pv1(pv1.first); +raise info 'pv1.first.next value is %',pv1(pv1.next(pv1.first)); +raise info 'pv1.first.next2 value is %',pv1(pv1.next(pv1.next(pv1.first))); +raise info 'pv1.first.next3 value is %',pv1(pv1.next(pv1.next(pv1.next(pv1.first)))); +pv1.delete; +raise info 'pv1.count is %',pv1.count; +end; +end pkgnest007; +/ +call pkgnest007.p2(); +INFO: pv1 1 is {11,12,NULL,14} +INFO: pv1 2 is {21,22} +INFO: pv1 3 is [0:3]={30,NULL,32,33} +INFO: pv1 4 is +INFO: pv1 5 is +INFO: pv1 6 is [4:6]={64,65,66} +INFO: pv1.count is 4 +INFO: pv1.first is 1 +INFO: pv1.first.next is 2 +INFO: pv1.first.next2 is 3 +INFO: pv1.first.next3 is 6 +INFO: pv1.last is 6 +INFO: pv1.first value is {11,12,NULL,14} +INFO: pv1.first.next value is {21,22} +INFO: pv1.first.next2 value is [0:3]={30,NULL,32,33} +INFO: pv1.first.next3 value is [4:6]={64,65,66} +INFO: pv1.count is 0 + p2 +---- + +(1 row) + +drop package pkgnest007; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_tableofindex.p1() +drop cascades to function plpgsql_tableofindex.p2() +drop table tytblnest007; +create table pkgtbl067067 (c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob); +insert into pkgtbl067067 values(1,1,'c365','c365','1122b'); +insert into pkgtbl067067 values(66,66,'c3665','c3665','1122b6'); +create or replace package pkg067067 +is +type type000 is table of pkgtbl067067%rowtype index by integer; +type type001 is record(c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob,c6 type000); +procedure proc067067_1(col3 type001,col4 out type001); +procedure proc067067_1(col3 type001,col4 out type001,col5 out type001); +procedure proc067067_2(col5 int); +end pkg067067; +/ +create or replace package body pkg067067 +is +procedure proc067067_1(col3 type001,col4 out type001) +is +cursor cor is select c1,c2,c3,c4,c5 from pkgtbl067067 order by 1; +begin +open cor; +loop +fetch cor into col3.c1,col3.c2,col3.c3,col3.c4,col3.c5; +if col3.c1=1 then +col3.c6(1):=(col3.c1,col3.c2,col3.c3,col3.c4,col3.c5); +else +col3.c6(2):=(col3.c1,col3.c2,col3.c3,col3.c4,col3.c5); +end if; +exit when cor%notfound; +end loop; +raise info 'col3 is %',col3; +raise info 'col3.c5 is %',col3.c5; +raise info 'col3.c6.count is %',col3.c6.count; +col4:=col3; +--raise info 'col3.c6.first is %',col3.c6.first; +end; +procedure proc067067_1(col3 type001,col4 out type001,col5 out type001) +is +begin +col3.c1:=3441; +col3.c2:=3441; +col3.c3:='col344c3'; +col3.c4:='col344c4'; +col3.c5:='3c44c5'; +col3.c6(1):=(3441,3441,'col344c3','col344c4','3c44c5'); +col3.c6(2):=(23441,23441,'col2344c3','col2344c4','3c2344c5'); +col4.c1:=441; +col4.c2:=441; +col4.c3:='col44c3'; +col4.c4:='col44c4'; +col4.c5:='3c44c5'; +col4.c6(1):=(441,441,'col44c3','col44c4','3c44c5'); +col5.c1:=555; +col5.c2:=555; +col5.c3:='var555'; +col5.c4:='clob555'; +col5.c5:='b555'; +col5.c6(1):=(555,555,'var555','clob555','b555'); +end; +procedure proc067067_2(col5 int) +as +c1 type001; +c2 type001; +c3 type001; +begin +c1.c1:=1; +c1.c2:=1; +c1.c3:='c1c3'; +c1.c4:='c1c4'; +c1.c5:='c1c5'; +c1.c6(1):=(66,66,'66var','66clob','66bb'); +c2.c1:=1; +c2.c2:=1; +c2.c3:='c1c3'; +c2.c4:='c1c4'; +c2.c5:='c1c5'; +c2.c6(1):=(66,66,'66var','66clob','66bb'); +c3.c1:=22; +c3.c2:=22; +c3.c3:='varc3'; +c3.c4:='clobc4'; +c3.c5:='bbc5'; +c3.c6(2):=(2222,2222,'nest22','nestclob','bb22'); +proc067067_1(col3=>c1,col4=>c2,col5=>c3); +proc067067_1(col3=>c2,col4=>c3); +end; +end pkg067067; +/ +ERROR: record nested table of index variable do not support entire assign at or near ";" +LINE 24: proc067067_1(col3=>c1,col4=>c2,col5=>c3); + ^ +QUERY: DECLARE +c1 type001; +c2 type001; +c3 type001; +begin +c1.c1:=1; +c1.c2:=1; +c1.c3:='c1c3'; +c1.c4:='c1c4'; +c1.c5:='c1c5'; +c1.c6(1):=(66,66,'66var','66clob','66bb'); +c2.c1:=1; +c2.c2:=1; +c2.c3:='c1c3'; +c2.c4:='c1c4'; +c2.c5:='c1c5'; +c2.c6(1):=(66,66,'66var','66clob','66bb'); +c3.c1:=22; +c3.c2:=22; +c3.c3:='varc3'; +c3.c4:='clobc4'; +c3.c5:='bbc5'; +c3.c6(2):=(2222,2222,'nest22','nestclob','bb22'); +proc067067_1(col3=>c1,col4=>c2,col5=>c3); +proc067067_1(col3=>c2,col4=>c3); +end +drop package pkg067067; +NOTICE: drop cascades to 3 other objects +--?.* +--?.* +drop cascades to function plpgsql_tableofindex.proc067067_2(integer) +drop table pkgtbl067067; +set behavior_compat_options=''; +drop package pkg_nesttb_char; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function plpgsql_tableofindex.checkfunc() +drop cascades to function plpgsql_tableofindex.checknestset() +drop cascades to function plpgsql_tableofindex.checkpkgvar() +drop package pkg_nesttb; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to function plpgsql_tableofindex.checkfunc() +drop cascades to function plpgsql_tableofindex.checknestset() +drop cascades to function plpgsql_tableofindex.checkpkgvar() +drop cascades to function plpgsql_tableofindex.checkpkgvar2() +drop package pck2; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_tableofindex.p1() +drop cascades to function plpgsql_tableofindex.p2(_s_type[]) +drop package pck3; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_tableofindex.p1() +drop cascades to function plpgsql_tableofindex.p2(integer,_s_type[],integer,_s_type[]) +drop package pck4; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_tableofindex.p1() +drop cascades to function plpgsql_tableofindex.p2() +drop package pck5; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_tableofindex.p1() +drop cascades to function plpgsql_tableofindex.p2() +drop package pck6; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_tableofindex.p1() +drop cascades to function plpgsql_tableofindex.p2(_s_type[]) +drop package pck7; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_tableofindex.p1() +drop cascades to function plpgsql_tableofindex.p2(_s_type[]) +drop package pck8; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_tableofindex.p1() +drop cascades to function plpgsql_tableofindex.p2(_s_type[],integer,_s_type[]) +drop package pck9; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function plpgsql_tableofindex.p1() +drop cascades to function plpgsql_tableofindex.p2(integer,character varying) +drop schema if exists plpgsql_tableofindex cascade; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to type s_type +drop cascades to type _s_type[] +drop cascades to table tytbl094 +drop cascades to function tableof_record_assign_3() diff --git a/src/test/regress/expected/plsql_show_all_error.out b/src/test/regress/expected/plsql_show_all_error.out new file mode 100644 index 000000000..dafff71ba --- /dev/null +++ b/src/test/regress/expected/plsql_show_all_error.out @@ -0,0 +1,1685 @@ +set plsql_show_all_error to on; +truncate DBE_PLDEVELOPER.gs_source; +truncate DBE_PLDEVELOPER.gs_errors; +create table vector_to_number_tab_002(COL_TONUM varchar) ; +insert into vector_to_number_tab_002 values(to_number(+123.456, '9.9EEEE')); +ERROR: "EEEE" not supported for input +CONTEXT: referenced column: col_tonum +CREATE OR REPLACE PACKAGE AA AS + + type ref_cursor IS ref CURSOR; + + PROCEDURE get_info(appinfo OUT ref_cursor); + + PROCEDURE get_info(appname IN varchar2, servinfo OUT ref_cursor); + + PROCEDURE get_switch(appname IN varchar2, + switchinfo OUT ref_cursor); + + PROCEDURE get_use(checkers OUT ref_cursor); + + PROCEDURE get_define(bb OUT ref_cursor); + + PROCEDURE get_resource_define(bbOUT ref_cursor); + + PROCEDURE get_bb_info(bbRef OUT ref_cursor); + + PROCEDURE get_resource_info(bbRef OUT ref_cursor); +END AA; +/ +select id from dbe_pldeveloper.gs_source; + id +------- +--?.* +(1 row) + +CREATE OR REPLACE PACKAGE AA AS + + type ref_cursor IS ref CURSOR; + + PROCEDURE get_info(appinfo OUT ref_cursor); + + PROCEDURE get_info(appname IN varchar2, servinfo OUT ref_cursor); + + PROCEDURE get_switch(appname IN varchar2, + switchinfo OUT ref_cursor); + + PROCEDURE get_use(checkers OUT ref_cursor); + + PROCEDURE get_define(bb OUT ref_cursor); + + PROCEDURE get_resource_define(bbOUT ref_cursor); + + PROCEDURE get_bb_info(bbRef OUT ref_cursor); + + PROCEDURE get_resource_info(bbRef OUT ref_cursor); +END AA; +/ +select id from dbe_pldeveloper.gs_source; + id +------- +--?.* +(1 row) + +CREATE OR REPLACE PROCEDURE exce_pro2() +AS +a int; +b int; +BEGIN + a:=2/0; + EXCEPTION + WHEN division_by_zeros THEN + insert into t1 value(1); +END; +/ +ERROR: unrecognized exception condition "division_by_zeros" +CONTEXT: compilation of PL/pgSQL function "exce_pro2" near line 5 +create or replace procedure pro1 +as +begin +drop t1; --缺失table关键字 +drop tables t1; --错写 +crop table t1; --错写 +create table t1 t2(c1 int,c2 int);--表名不正确 +create table t1(c1 ,c2 int);--漏写数据类型 +create tables t1(c1 int,c2 int); --行号不对 +creat table t1(c1 int,c2 int); --未标行号 +creat table (c1 int,c2 int) t1; --顺序错误 +end; +/ +NOTICE: syntax error at or near "t1" +LINE 3: drop t1; --缺失table关键字 + ^ +QUERY: DECLARE +begin +drop t1; --缺失table关键字 +drop tables t1; --错写 +crop table t1; --错写 +create table t1 t2(c1 int,c2 int);--表名不正确 +create table t1(c1 ,c2 int);--漏写数据类型 +create tables t1(c1 int,c2 int); --行号不对 +creat table t1(c1 int,c2 int); --未标行号 +creat table (c1 int,c2 int) t1; --顺序错误 +end +NOTICE: syntax error at or near "tables" +LINE 4: drop tables t1; --错写 + ^ +QUERY: DECLARE +begin +drop t1; --缺失table关键字 +drop tables t1; --错写 +crop table t1; --错写 +create table t1 t2(c1 int,c2 int);--表名不正确 +create table t1(c1 ,c2 int);--漏写数据类型 +create tables t1(c1 int,c2 int); --行号不对 +creat table t1(c1 int,c2 int); --未标行号 +creat table (c1 int,c2 int) t1; --顺序错误 +end +NOTICE: syntax error at or near "crop" +LINE 5: crop table t1; --错写 + ^ +QUERY: DECLARE +begin +drop t1; --缺失table关键字 +drop tables t1; --错写 +crop table t1; --错写 +create table t1 t2(c1 int,c2 int);--表名不正确 +create table t1(c1 ,c2 int);--漏写数据类型 +create tables t1(c1 int,c2 int); --行号不对 +creat table t1(c1 int,c2 int); --未标行号 +creat table (c1 int,c2 int) t1; --顺序错误 +end +NOTICE: syntax error at or near "t2" +LINE 6: create table t1 t2(c1 int,c2 int);--表名不正确 + ^ +QUERY: DECLARE +begin +drop t1; --缺失table关键字 +drop tables t1; --错写 +crop table t1; --错写 +create table t1 t2(c1 int,c2 int);--表名不正确 +create table t1(c1 ,c2 int);--漏写数据类型 +create tables t1(c1 int,c2 int); --行号不对 +creat table t1(c1 int,c2 int); --未标行号 +creat table (c1 int,c2 int) t1; --顺序错误 +end +NOTICE: syntax error at or near "int" +LINE 7: create table t1(c1 ,c2 int);--漏写数据类型 + ^ +QUERY: DECLARE +begin +drop t1; --缺失table关键字 +drop tables t1; --错写 +crop table t1; --错写 +create table t1 t2(c1 int,c2 int);--表名不正确 +create table t1(c1 ,c2 int);--漏写数据类型 +create tables t1(c1 int,c2 int); --行号不对 +creat table t1(c1 int,c2 int); --未标行号 +creat table (c1 int,c2 int) t1; --顺序错误 +end +NOTICE: syntax error at or near "tables" +LINE 8: create tables t1(c1 int,c2 int); --行号不对 + ^ +QUERY: DECLARE +begin +drop t1; --缺失table关键字 +drop tables t1; --错写 +crop table t1; --错写 +create table t1 t2(c1 int,c2 int);--表名不正确 +create table t1(c1 ,c2 int);--漏写数据类型 +create tables t1(c1 int,c2 int); --行号不对 +creat table t1(c1 int,c2 int); --未标行号 +creat table (c1 int,c2 int) t1; --顺序错误 +end +NOTICE: syntax error at or near "creat" +LINE 9: creat table t1(c1 int,c2 int); --未标行号 + ^ +QUERY: DECLARE +begin +drop t1; --缺失table关键字 +drop tables t1; --错写 +crop table t1; --错写 +create table t1 t2(c1 int,c2 int);--表名不正确 +create table t1(c1 ,c2 int);--漏写数据类型 +create tables t1(c1 int,c2 int); --行号不对 +creat table t1(c1 int,c2 int); --未标行号 +creat table (c1 int,c2 int) t1; --顺序错误 +end +NOTICE: syntax error at or near "creat" +LINE 10: creat table (c1 int,c2 int) t1; --顺序错误 + ^ +QUERY: DECLARE +begin +drop t1; --缺失table关键字 +drop tables t1; --错写 +crop table t1; --错写 +create table t1 t2(c1 int,c2 int);--表名不正确 +create table t1(c1 ,c2 int);--漏写数据类型 +create tables t1(c1 int,c2 int); --行号不对 +creat table t1(c1 int,c2 int); --未标行号 +creat table (c1 int,c2 int) t1; --顺序错误 +end +ERROR: Debug mod,create procedure has error. +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL function "pro1" near line 2 +select line,src from dbe_pldeveloper.gs_errors where name='pro1'; + line | src +------+-------------- + 4 | syntax error + 5 | syntax error + 6 | syntax error + 7 | syntax error + 8 | syntax error + 9 | syntax error + 10 | syntax error + 11 | syntax error +(8 rows) + +CREATE OR REPLACE PACKAGE AA is + + + TYPE r_list IS RECORD( + indextype aa_mx_stat_info.indextype%TYPE, + value number); + + TYPE cur_list IS REF CURSOR RETURN r_list; + + TYPE r_avgop_time IS RECORD( + opname aa_mx_stat_opavgtime_info.opname%TYPE, + avgtime number); + + TYPE cur_avgop_time IS REF CURSOR RETURN r_avgop_time; + + aaedure aa_update_value(i_hostname in varchar2, + i_indextype in varchar2, + i_value number); + + aaedure aa_value(o_list OUT cur_list); + + aaedure aa_del_value; + + aaedure aa_avgop_time(o_avgop_time OUT cur_avgop_time); + + aaedure aa_del_avgop_time; + + aaedure aa_update_monitor_flag(i_switchType in varchar2, + i_channelType in varchar2, + i_action in varchar2, + i_modifyuser in varchar2, + i_hostname in varchar2); +end AA; + +/ +ERROR: invalid type name "aa_mx_stat_info.indextype%TYPE" +LINE 2: indextype aa_mx_stat_info.indextype%TYPE, + ^ +QUERY: PACKAGE DECLARE TYPE r_list IS RECORD( + indextype aa_mx_stat_info.indextype%TYPE, + value number); + + TYPE cur_list IS REF CURSOR RETURN r_list; + + TYPE r_avgop_time IS RECORD( + opname aa_mx_stat_opavgtime_info.opname%TYPE, + avgtime number); + + TYPE cur_avgop_time IS REF CURSOR RETURN r_avgop_time; + + aaedure aa_update_value(i_hostname in varchar2, + i_indextype in varchar2, + i_value number); + + aaedure aa_value(o_list OUT cur_list); + + aaedure aa_del_value; + + aaedure aa_avgop_time(o_avgop_time OUT cur_avgop_time); + + aaedure aa_del_avgop_time; + + aaedure aa_update_monitor_flag(i_switchType in varchar2, + i_channelType in varchar2, + i_action in varchar2, + i_modifyuser in varchar2, + i_hostname in varchar2); +end +CONTEXT: compilation of PL/pgSQL package near line 2 +CREATE OR REPLACE PACKAGE AA as + + procedure aa_remove(p_self in out nocopy json, pair_name varchar2); + procedure aa_bb(p_self in out nocopy json, pair_name varchar2, pair_value json_value, position pls_integer default null); + procedure aa_bb(p_self in out nocopy json, pair_name varchar2, pair_value varchar2, position pls_integer default null); + procedure aa_bb(p_self in out nocopy json, pair_name varchar2, pair_value number, position pls_integer default null); + procedure aa_bb(p_self in out nocopy json, pair_name varchar2, pair_value boolean, position pls_integer default null); + procedure aa_check_duplicate(p_self in out nocopy json, v_set boolean); + procedure aa_remove_duplicates(p_self in out nocopy json); + + procedure aa_bb(p_self in out nocopy json, pair_name varchar2, pair_value json, position pls_integer default null); + procedure aa_bb(p_self in out nocopy json, pair_name varchar2, pair_value json_list, position pls_integer default null); + + function aa_count(p_self in json) return number; + function aa_get(p_self in json, pair_name varchar2) return json_value; + function aa_get(p_self in json, position pls_integer) return json_value; + function aa_index_of(p_self in json, pair_name varchar2) return number; + function aa_exist(p_self in json, pair_name varchar2) return boolean; + + function aa_to_char(p_self in json, spaces boolean default true, chars_per_line number default 0) return varchar2; + procedure aa_to_clob(p_self in json, buf in out nocopy clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true); + procedure aa_print(p_self in json, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null); + procedure aa_htp(p_self in json, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null); + + function aa_to_json_value(p_self in json) return json_value; + function aa_path(p_self in json, json_path varchar2, base number default 1) return json_value; + + procedure aa_path_bb(p_self in out nocopy json, json_path varchar2, elem json_value, base number default 1); + procedure aa_path_bb(p_self in out nocopy json, json_path varchar2, elem varchar2 , base number default 1); + procedure aa_path_bb(p_self in out nocopy json, json_path varchar2, elem number , base number default 1); + procedure aa_path_bb(p_self in out nocopy json, json_path varchar2, elem boolean , base number default 1); + procedure aa_path_bb(p_self in out nocopy json, json_path varchar2, elem json_list , base number default 1); + procedure aa_path_bb(p_self in out nocopy json, json_path varchar2, elem json , base number default 1); + + procedure aa_path_remove(p_self in out nocopy json, json_path varchar2, base number default 1); + + function aa_get_values(p_self in json) return json_list; + function aa_get_keys(p_self in json) return json_list; + + --json_list type methods + procedure aa_append(p_self in out nocopy json_list, elem json_value, position pls_integer default null); + procedure aa_append(p_self in out nocopy json_list, elem varchar2, position pls_integer default null); + procedure aa_append(p_self in out nocopy json_list, elem number, position pls_integer default null); + procedure aa_append(p_self in out nocopy json_list, elem boolean, position pls_integer default null); + procedure aa_append(p_self in out nocopy json_list, elem json_list, position pls_integer default null); + + procedure aa_replace(p_self in out nocopy json_list, position pls_integer, elem json_value); + procedure aa_replace(p_self in out nocopy json_list, position pls_integer, elem varchar2); + procedure aa_replace(p_self in out nocopy json_list, position pls_integer, elem number); + procedure aa_replace(p_self in out nocopy json_list, position pls_integer, elem boolean); + procedure aa_replace(p_self in out nocopy json_list, position pls_integer, elem json_list); + + function aa_count(p_self in json_list) return number; + procedure aa_remove(p_self in out nocopy json_list, position pls_integer); + procedure aa_remove_first(p_self in out nocopy json_list); + procedure aa_remove_last(p_self in out nocopy json_list); + function aa_get(p_self in json_list, position pls_integer) return json_value; + function aa_head(p_self in json_list) return json_value; + function aa_last(p_self in json_list) return json_value; + function aa_tail(p_self in json_list) return json_list; + + function aa_to_char(p_self in json_list, spaces boolean default true, chars_per_line number default 0) return varchar2; + procedure aa_to_clob(p_self in json_list, buf in out nocopy clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true); + procedure aa_print(p_self in json_list, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null); + procedure aa_htp(p_self in json_list, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null); + + function aa_path(p_self in json_list, json_path varchar2, base number default 1) return json_value; + procedure aa_path_bb(p_self in out nocopy json_list, json_path varchar2, elem json_value, base number default 1); + procedure aa_path_bb(p_self in out nocopy json_list, json_path varchar2, elem varchar2 , base number default 1); + procedure aa_path_bb(p_self in out nocopy json_list, json_path varchar2, elem number , base number default 1); + procedure aa_path_bb(p_self in out nocopy json_list, json_path varchar2, elem boolean , base number default 1); + procedure aa_path_bb(p_self in out nocopy json_list, json_path varchar2, elem json_list , base number default 1); + + procedure aa_path_remove(p_self in out nocopy json_list, json_path varchar2, base number default 1); + + function aa_to_json_value(p_self in json_list) return json_value; + + --json_value + + + function aa_get_type(p_self in json_value) return varchar2; + function aa_get_string(p_self in json_value, max_byte_size number default null, max_char_size number default null) return varchar2; + procedure aa_get_string(p_self in json_value, buf in out nocopy clob); + function aa_get_number(p_self in json_value) return number; + function aa_get_bool(p_self in json_value) return boolean; + function aa_get_null(p_self in json_value) return varchar2; + + function aa_is_object(p_self in json_value) return boolean; + function aa_is_array(p_self in json_value) return boolean; + function aa_is_string(p_self in json_value) return boolean; + function aa_is_number(p_self in json_value) return boolean; + function aa_is_bool(p_self in json_value) return boolean; + function aa_is_null(p_self in json_value) return boolean; + + function aa_to_char(p_self in json_value, spaces boolean default true, chars_per_line number default 0) return varchar2; + procedure aa_to_clob(p_self in json_value, buf in out nocopy clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true); + procedure aa_print(p_self in json_value, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null); + procedure aa_htp(p_self in json_value, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null); + + function aa_value_of(p_self in json_value, max_byte_size number default null, max_char_size number default null) return varchar2; + + +end AA; +/ +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "position" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "clob" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "position" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "clob" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "json_list" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "clob" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +NOTICE: syntax error at or near "clob" +LINE 3: procedure aa_remove(p_self in out nocopy json, pair_name v... + ^ +CONTEXT: compilation of PL/pgSQL package near line 1 +ERROR: Debug mod,create procedure has error. +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL package near line 1 +CREATE OR REPLACE PACKAGE aa AS + + null_as_empty_string boolean not null := true; + include_dates boolean not null := true; + include_clobs boolean not null := true; + include_blobs boolean not null := false; + + function executeList(stmt varchar2, bindvar json default null, cur_num number default null) return json_list; + + function executeObject(stmt varchar2, bindvar json default null, cur_num number default null) return json; + + function executeList2(stmt varchar2, bindvar json default null, cur_num NUMBER default null) return json_list; + + +end aa; +/ +ERROR: type "json_list" does not exist +CREATE OR REPLACE PACKAGE AA IS + TYPE r_menu_list IS RECORD( + MENU_FLAG VARCHAR2(2), + MENU_num CTP_MENU.num%TYPE, + MENU_MINGZI CTP_MENU_NLS.MINGZI%TYPE, + MENU_STATUS CTP_MENU.STATUS%TYPE, + MENU_PARENT CTP_MENU.Parent_num%TYPE, + MENU_PRIVILAGE VARCHAR2(2), + MENU_SERIALNO CTP_MENU.Serialno%TYPE, + menu_LONG varchar2(2) + ); + TYPE r_AA IS RECORD( + AA_num CTP_AA.num%TYPE, + AA_MINGZI CTP_AA_NLS.MINGZI%TYPE, + AA_DESCRIPTION CTP_AA_NLS.DESCRIPTION%TYPE, + AA_LST_MODI_TIME CTP_AA.LST_MODI_TIME%TYPE, + AA_AA_LONG CTP_AA.AA_LONG%TYPE, + AA_BRANCH_num CTP_AA.BRANCH_num%TYPE, + AA_AA_LONG_ADMIN CTP_AA.AA_LONG_ADMIN%TYPE, + AA_BRANCH_num_ADMIN CTP_AA.BRANCH_num_ADMIN%TYPE, + AA_AA_CATEGORY CTP_AA.AA_CATEGORY%TYPE, + AA_LST_MODI_USER_num CTP_AA.LST_MODI_USER_num%TYPE, + AA_PRIVILEGE_ALL CTP_AA.PRIVILEGE_ALL%TYPE, + AA_PRIVILEGE_SELF CTP_AA.PRIVILEGE_SELF%TYPE, + AA_PRIVILEGE_OTHER CTP_AA.PRIVILEGE_OTHER%TYPE, + AA_PRIVILEGE CTP_AA.PRIVILEGE%TYPE, + AA_BRANCH_MINGZI CTP_BRANCH_NLS.MINGZI%TYPE, + AA_BRANCH_LONG CTP_BRANCH.BRANCH_LONG%TYPE + ); + + TYPE r_AA_list IS RECORD( + AA_num CTP_AA.num%TYPE, + AA_MINGZI CTP_AA_NLS.MINGZI%TYPE, + AA_DESCRIPTION CTP_AA_NLS.DESCRIPTION%TYPE, + AA_LST_MODI_TIME CTP_AA.LST_MODI_TIME%TYPE, + AA_AA_LONG CTP_AA.AA_LONG%TYPE, + AA_BRANCH_num CTP_AA.BRANCH_num%TYPE, + AA_AA_LONG_ADMIN CTP_AA.AA_LONG_ADMIN%TYPE, + AA_BRANCH_num_ADMIN CTP_AA.BRANCH_num_ADMIN%TYPE, + AA_AA_CATEGORY CTP_AA.AA_CATEGORY%TYPE, + AA_LST_MODI_USER_num CTP_AA.LST_MODI_USER_num%TYPE, + AA_PRIVILEGE_ALL CTP_AA.PRIVILEGE_ALL%TYPE, + AA_PRIVILEGE_SELF CTP_AA.PRIVILEGE_SELF%TYPE, + AA_PRIVILEGE_OTHER CTP_AA.PRIVILEGE_OTHER%TYPE, + AA_PRIVILEGE CTP_AA.PRIVILEGE%TYPE, + AA_BRANCH_MINGZI CTP_BRANCH_NLS.MINGZI%TYPE, + AA_BRANCH_LONG CTP_BRANCH.BRANCH_LONG%TYPE, + AA_USERNO VARCHAR2(5) + + ); + TYPE r_pos_AA_list IS RECORD( + AA_LST_MODI_USER_num CTP_AA.LST_MODI_USER_num%TYPE, + AA_LST_MODI_TIME CTP_AA.LST_MODI_TIME%TYPE, + AA_PRIVILEGE_ALL CTP_AA.PRIVILEGE_ALL%TYPE, + AA_PRIVILEGE_SELF CTP_AA.PRIVILEGE_SELF%TYPE, + AA_PRIVILEGE_OTHER CTP_AA.PRIVILEGE_OTHER%TYPE, + AA_num CTP_AA.num%TYPE, + AA_MINGZI CTP_AA_NLS.MINGZI%TYPE, + AA_DESCRIPTION CTP_AA_NLS.DESCRIPTION%TYPE, + AA_AA_LONG CTP_AA.AA_LONG%TYPE, + AA_BRANCH_num CTP_AA.BRANCH_num%TYPE, + AA_BRANCH_MINGZI CTP_BRANCH_NLS.MINGZI%TYPE + ); + + TYPE ref_AA IS REF CURSOR RETURN r_AA; + TYPE ref_AA_list IS REF CURSOR RETURN r_AA_list; + TYPE ret_pos_AA_list IS REF CURSOR RETURN r_pos_AA_list; + TYPE c_menu_list IS REF CURSOR RETURN r_menu_list; + + FUNCTION aa_LG_AA_GETUSERNUM( + AAnum IN VARCHAR2 + ) + RETURN INTEGER; + + PROCEDURE aa_LG_AA_GETBRANCHDEP( branchnum IN VARCHAR2, --机构编码 + out_flag OUT VARCHAR2, --存储过程返回标志 + branchDep OUT number --当前机构层级深度 + ) ; + + PROCEDURE aa_LG_AA_QUERYUSERNUM( + AAnum IN VARCHAR2, + out_flag OUT VARCHAR2, + usr_num OUT number); + + PROCEDURE aa_LG_AA_MODAAMENU( + privilege IN VARCHAR2, + menunum IN VARCHAR2, + AAnum IN VARCHAR2, + out_flag OUT VARCHAR2); + + PROCEDURE aa_LG_AA_GETAAMENU( + AALONG IN VARCHAR2, + branchnum IN VARCHAR2, + AAnum IN VARCHAR2, + Language IN VARCHAR2, + out_flag OUT VARCHAR2, + o_menu_list OUT c_menu_list); + + PROCEDURE aa_LG_AA_DELETEAA(AAnum IN VARCHAR2, + out_flag OUT VARCHAR2 + ); + + + + PROCEDURE aa_LG_AA_UPDATEAA(AALONG IN VARCHAR2, + usernum IN VARCHAR2, + priAll IN VARCHAR2, + priSelf IN VARCHAR2, + priOther IN VARCHAR2, + AAnum IN VARCHAR2, + AAMINGZI IN VARCHAR2, + AADes IN VARCHAR2, + Language IN VARCHAR2, + out_flag OUT VARCHAR2 + ) ; + + PROCEDURE aa_LG_AA_ADDAA( AAnum IN VARCHAR2, + branchnum IN VARCHAR2, + AALONG IN VARCHAR2, + usernum IN VARCHAR2, + priSelf IN VARCHAR2, + priAll IN VARCHAR2, + priOther IN VARCHAR2, + AAMINGZI IN VARCHAR2, + AADes IN VARCHAR2, + Language IN VARCHAR2, + out_flag OUT VARCHAR2 + ) ; + + PROCEDURE aa_LG_AA_GETAALIST(branchnum IN VARCHAR2, + branchLONG IN VARCHAR2, + languageCode IN VARCHAR2, + begNum IN VARCHAR2, + fetchNum IN VARCHAR2, + out_flag OUT VARCHAR2, + totalNum OUT VARCHAR2, + ret_AA_list OUT ret_pos_AA_list + ); + + PROCEDURE aa_LG_AA_SEAAABYMINGZI(branchnum IN VARCHAR2, + branchLONG IN VARCHAR2, + languageCode IN VARCHAR2, + begNum IN VARCHAR2, + fetchNum IN VARCHAR2, + keyword IN VARCHAR2, + out_flag OUT VARCHAR2, + o_totalNum OUT VARCHAR2, + ret_AA_list OUT ret_pos_AA_list + ); + + PROCEDURE LOG(proc_MINGZI IN VARCHAR2, + info IN VARCHAR2); +END AA; +/ +ERROR: invalid type name "CTP_MENU.num%TYPE" +LINE 3: MENU_num CTP_MENU.num%TYPE, + ^ +QUERY: PACKAGE DECLARE TYPE r_menu_list IS RECORD( + MENU_FLAG VARCHAR2(2), + MENU_num CTP_MENU.num%TYPE, + MENU_MINGZI CTP_MENU_NLS.MINGZI%TYPE, + MENU_STATUS CTP_MENU.STATUS%TYPE, + MENU_PARENT CTP_MENU.Parent_num%TYPE, + MENU_PRIVILAGE VARCHAR2(2), + MENU_SERIALNO CTP_MENU.Serialno%TYPE, + menu_LONG varchar2(2) + ); + TYPE r_AA IS RECORD( + AA_num CTP_AA.num%TYPE, + AA_MINGZI CTP_AA_NLS.MINGZI%TYPE, + AA_DESCRIPTION CTP_AA_NLS.DESCRIPTION%TYPE, + AA_LST_MODI_TIME CTP_AA.LST_MODI_TIME%TYPE, + AA_AA_LONG CTP_AA.AA_LONG%TYPE, + AA_BRANCH_num CTP_AA.BRANCH_num%TYPE, + AA_AA_LONG_ADMIN CTP_AA.AA_LONG_ADMIN%TYPE, + AA_BRANCH_num_ADMIN CTP_AA.BRANCH_num_ADMIN%TYPE, + AA_AA_CATEGORY CTP_AA.AA_CATEGORY%TYPE, + AA_LST_MODI_USER_num CTP_AA.LST_MODI_USER_num%TYPE, + AA_PRIVILEGE_ALL CTP_AA.PRIVILEGE_ALL%TYPE, + AA_PRIVILEGE_SELF CTP_AA.PRIVILEGE_SELF%TYPE, + AA_PRIVILEGE_OTHER CTP_AA.PRIVILEGE_OTHER%TYPE, + AA_PRIVILEGE CTP_AA.PRIVILEGE%TYPE, + AA_BRANCH_MINGZI CTP_BRANCH_NLS.MINGZI%TYPE, + AA_BRANCH_LONG CTP_BRANCH.BRANCH_LONG%TYPE + ); + + TYPE r_AA_list IS RECORD( + AA_num CTP_AA.num%TYPE, + AA_MINGZI CTP_AA_NLS.MINGZI%TYPE, + AA_DESCRIPTION CTP_AA_NLS.DESCRIPTION%TYPE, + AA_LST_MODI_TIME CTP_AA.LST_MODI_TIME%TYPE, + AA_AA_LONG CTP_AA.AA_LONG%TYPE, + AA_BRANCH_num CTP_AA.BRANCH_num%TYPE, + AA_AA_LONG_ADMIN CTP_AA.AA_LONG_ADMIN%TYPE, + AA_BRANCH_num_ADMIN CTP_AA.BRANCH_num_ADMIN%TYPE, + AA_AA_CATEGORY CTP_AA.AA_CATEGORY%TYPE, + AA_LST_MODI_USER_num CTP_AA.LST_MODI_USER_num%TYPE, + AA_PRIVILEGE_ALL CTP_AA.PRIVILEGE_ALL%TYPE, + AA_PRIVILEGE_SELF CTP_AA.PRIVILEGE_SELF%TYPE, + AA_PRIVILEGE_OTHER CTP_AA.PRIVILEGE_OTHER%TYPE, + AA_PRIVILEGE CTP_AA.PRIVILEGE%TYPE, + AA_BRANCH_MINGZI CTP_BRANCH_NLS.MINGZI%TYPE, + AA_BRANCH_LONG CTP_BRANCH.BRANCH_LONG%TYPE, + AA_USERNO VARCHAR2(5) + + ); + TYPE r_pos_AA_list IS RECORD( + AA_LST_MODI_USER_num CTP_AA.LST_MODI_USER_num%TYPE, + AA_LST_MODI_TIME CTP_AA.LST_MODI_TIME%TYPE, + AA_PRIVILEGE_ALL CTP_AA.PRIVILEGE_ALL%TYPE, + AA_PRIVILEGE_SELF CTP_AA.PRIVILEGE_SELF%TYPE, + AA_PRIVILEGE_OTHER CTP_AA.PRIVILEGE_OTHER%TYPE, + AA_num CTP_AA.num%TYPE, + AA_MINGZI CTP_AA_NLS.MINGZI%TYPE, + AA_DESCRIPTION CTP_AA_NLS.DESCRIPTION%TYPE, + AA_AA_LONG CTP_AA.AA_LONG%TYPE, + AA_BRANCH_num CTP_AA.BRANCH_num%TYPE, + AA_BRANCH_MINGZI CTP_BRANCH_NLS.MINGZI%TYPE + ); + + TYPE ref_AA IS REF CURSOR RETURN r_AA; + TYPE ref_AA_list IS REF CURSOR RETURN r_AA_list; + TYPE ret_pos_AA_list IS REF CURSOR RETURN r_pos_AA_list; + TYPE c_menu_list IS REF CURSOR RETURN r_menu_list; + + FUNCTION aa_LG_AA_GETUSERNUM( + AAnum IN VARCHAR2 + ) + RETURN INTEGER; + + PROCEDURE aa_LG_AA_GETBRANCHDEP( branchnum IN VARCHAR2, --机构编码 + out_flag OUT VARCHAR2, --存储过程返回标志 + branchDep OUT number --当前机构层级深度 + ) ; + + PROCEDURE aa_LG_AA_QUERYUSERNUM( + AAnum IN VARCHAR2, + out_flag OUT VARCHAR2, + usr_num OUT number); + + PROCEDURE aa_LG_AA_MODAAMENU( + privilege IN VARCHAR2, + menunum IN VARCHAR2, + AAnum IN VARCHAR2, + out_flag OUT VARCHAR2); + + PROCEDURE aa_LG_AA_GETAAMENU( + AALONG IN VARCHAR2, + branchnum IN VARCHAR2, + AAnum IN VARCHAR2, + Language IN VARCHAR2, + out_flag OUT VARCHAR2, + o_menu_list OUT c_menu_list); + + PROCEDURE aa_LG_AA_DELETEAA(AAnum IN VARCHAR2, + out_flag OUT VARCHAR2 + ); + + + + PROCEDURE aa_LG_AA_UPDATEAA(AALONG IN VARCHAR2, + usernum IN VARCHAR2, + priAll IN VARCHAR2, + priSelf IN VARCHAR2, + priOther IN VARCHAR2, + AAnum IN VARCHAR2, + AAMINGZI IN VARCHAR2, + AADes IN VARCHAR2, + Language IN VARCHAR2, + out_flag OUT VARCHAR2 + ) ; + + PROCEDURE aa_LG_AA_ADDAA( AAnum IN VARCHAR2, + branchnum IN VARCHAR2, + AALONG IN VARCHAR2, + usernum IN VARCHAR2, + priSelf IN VARCHAR2, + priAll IN VARCHAR2, + priOther IN VARCHAR2, + AAMINGZI IN VARCHAR2, + AADes IN VARCHAR2, + Language IN VARCHAR2, + out_flag OUT VARCHAR2 + ) ; + + PROCEDURE aa_LG_AA_GETAALIST(branchnum IN VARCHAR2, + branchLONG IN VARCHAR2, + languageCode IN VARCHAR2, + begNum IN VARCHAR2, + fetchNum IN VARCHAR2, + out_flag OUT VARCHAR2, + totalNum OUT VARCHAR2, + ret_AA_list OUT ret_pos_AA_list + ); + + PROCEDURE aa_LG_AA_SEAAABYMINGZI(branchnum IN VARCHAR2, + branchLONG IN VARCHAR2, + languageCode IN VARCHAR2, + begNum IN VARCHAR2, + fetchNum IN VARCHAR2, + keyword IN VARCHAR2, + out_flag OUT VARCHAR2, + o_totalNum OUT VARCHAR2, + ret_AA_list OUT ret_pos_AA_list + ); + + PROCEDURE LOG(proc_MINGZI IN VARCHAR2, + info IN VARCHAR2); +END +CONTEXT: compilation of PL/pgSQL package near line 3 +CREATE OR REPLACE PACKAGE AA +IS +a int; +end AA; +/ +CREATE OR REPLACE PACKAGE BODY AA IS + FUNCTION AA_BB_GETCCNUM( + BBId IN VARCHAR2 + ) + RETURN INTEGER IS + temp INTEGER; + BEGIN + SELECT count(*) INTO temp FROM DD_BB_CC_REL WHERE BB_ID=BBId; + RETURN temp; + EXCEPTION + WHEN OTHERS THEN + log('AA_BB_getCCnum()',SQLERRM(SQLCODE)); + RETURN -1; + END; + + PROCEDURE AA_BB_GETBRANCHDEP( branchId IN VARCHAR2, --机构编码 + out_flag OUT VARCHAR2, --存储过程返回标志 + branchDep OUT number --当前机构层级深度 + ) IS + maxLevel number; + curLevel number; + BEGIN + out_flag := '0'; + SELECT MAX(branch_Level) INTO maxLevel FROM DD_BRANCH; + SELECT branch_Level INTO curLevel FROM DD_BRANCH a where a.id=branchId; + branchDep := maxLevel-curLevel; + EXCEPTION + WHEN OTHERS THEN + log('AA_BB_deleteBB()',SQLERRM(SQLCODE)); + out_flag := '-1'; + rollback; + RETURN; + END; + + + PROCEDURE AA_BB_QUERYCCNUM(BBId IN VARCHAR2, --角色编码 + out_flag OUT VARCHAR2,--存储过程返回标志 + usr_num OUT number --关联用户数 + ) IS + BEGIN + out_flag := '0'; + usr_num:=AA_BB_getCCnum(BBId); + RETURN; + EXCEPTION + WHEN OTHERS THEN + log('AA_BB_queryCCnum()',SQLERRM(SQLCODE)); + out_flag := '-1'; + rollback; + RETURN; + END; + + + PROCEDURE AA_BB_MODBBMENU(privilege IN VARCHAR2, --菜单权限 + menuId IN VARCHAR2, --菜单编码 + BBId IN VARCHAR2, --角色编码 + out_flag OUT VARCHAR2 --存储过程返回标志 + ) IS + v_id VARCHAR2 (50); + v_flag VARCHAR2 (8); + + BEGIN + out_flag := '0'; + SELECT nvl(SUBSTR(menuId,1,INSTR(menuId,'M')-1),menuId) INTO v_id FROM dual; + SELECT SUBSTR(menuId,1,1) INTO v_flag FROM dual; + delete from DD_BB_MENU_REL where BB_ID=BBId and MENU_ID=v_id; + delete from DD_BB_ITEM_REL where BB_ID=BBId and ITEM_ID=v_id; + update DD_BB_CC_REL set MENUCHG_FLAG='1' where BB_ID=BBId; + + + IF privilege!='-1' THEN + IF v_flag ='M' THEN + insert into DD_BB_MENU_REL(BB_ID,MENU_ID,PRIVILEGE) VALUES(BBId,v_id,privilege); + END IF; + IF v_flag='I'THEN + insert into DD_BB_ITEM_REL(BB_ID,ITEM_ID,PRIVILEGE) VALUES(BBId,v_id,privilege); + END IF; + END IF; + commit; + RETURN; + EXCEPTION + WHEN OTHERS THEN + log('AA_BB_modifyBBmenu()',SQLERRM(SQLCODE)); + out_flag := '-1'; + rollback; + RETURN; + END; + + PROCEDURE AA_BB_GETBBMENU(BBLevel IN VARCHAR2, --角色级别 + branchId IN VARCHAR2, --机构编码 + BBId IN VARCHAR2, --角色编码 + Language IN VARCHAR2, --语言编码 + out_flag OUT VARCHAR2, --存储过程返回标志 + o_menu_list OUT c_menu_list --菜单列表 + ) IS + BEGIN + out_flag := '0'; + OPEN o_menu_list FOR +select 'M' flag, + menu.menuId menuId, + menu_nls.default_label menuName, + menu.menuState menuState, + menu.menuParentId menuParentId, + menu.menuPrivilege menuPrivilege, + menu.menCCialNo menCCialNo, + level blevel + from (select menuId, menuState, menuParentId, menuPrivilege, menCCialNo + from (select distinct a.id menuId, + F.name menuName, + a.status menuState, + a.parent_id menuParentId, + '-1' menuPrivilege, + a.serialNo menCCialNo + from DD_MENU a, DD_MENU_NLS F + start with a.id in + (SELECT distinct b.menu_id + FROM DD_MENU_ITEM_REL b + WHERE b.item_id in + (SELECT c.ID + FROM DD_ITEM c + WHERE c.status = '1' + and func_DD_lg_canBeUsedByBB(branchId, + BBLevel, + c.item_level, + c.item_branch_id) = '1')) + connect by a.id = prior a.parent_id + and f.locale = Language + and a.id = f.id) + where menuId not in (select d.menu_id + from DD_BB_menu_REL d + WHERE d.BB_ID = BBId) + union + select menuId, menuState, menuParentId, e.Privilege, menCCialNo + from (select distinct a.id menuId, + F.name menuName, + a.status menuState, + a.parent_id menuParentId, + '-1' menuPrivilege, + a.serialNo menCCialNo + from DD_MENU a, DD_MENU_NLS F + start with a.id in + (SELECT distinct b.menu_id + FROM DD_MENU_ITEM_REL b + WHERE b.item_id in + (SELECT c.ID + FROM DD_ITEM c + WHERE (c.status = '1' and + func_DD_lg_canBeUsedByBB(branchId, + BBLevel, + c.item_level, + c.item_branch_id) = '1'))) + connect by a.id = prior a.parent_id + and f.locale = Language + and a.id = f.id) d, + DD_BB_MENU_rel e + where e.BB_id = BBId + and e.menu_id = d.menuId) menu, + DD_menu_nls menu_nls + where menu_nls.locale = Language + and menu.menuId = menu_nls.id +CONNECT BY PRIOR MENUID = MENUPARENTID + START WITH MENUPARENTID IS NULL +UNION +select 'I' flag, + itemId, + itemName, + itemState, + menuId, + itemPrivilege, + itemSerialNo, + menu.blevel + 1 blevel + from (select distinct a.item_id itemId, + d.DEFAULT_LABEL itemName, + b.STATUS itemState, + a.menu_id menuId, + '-1' itemPrivilege, + a.serialno itemSerialNo + from DD_menu_item_rel a, DD_item b, DD_item_nls d + where b.status = '1' + and d.locale = Language + and b.id = d.id + and func_DD_lg_canBeUsedByBB(branchId, + BBLevel, + b.item_Level, + b.item_Branch_Id) = '1' + and b.id not in (select c.item_id + from DD_BB_ITEM_REL c + WHERE c.BB_ID = BBId) + and b.id = a.item_id + union + select distinct a.item_id itemId, + d.DEFAULT_LABEL itemName, + b.STATUS itemState, + a.menu_id menuId, + c.privilege itemPrivilege, + a.serialno itemSerialNo + from DD_menu_item_rel a, + DD_item b, + DD_item_nls d, + DD_BB_item_rel c + where b.status = '1' + and d.locale = Language + and b.id = d.id + and func_DD_lg_canBeUsedByBB(branchId, + BBLevel, + b.item_Level, + b.item_Branch_Id) = '1' + and c.BB_id = BBId + and c.item_id = b.id + and b.id = a.item_id) item, + (select t.id, level blevel + from DD_menu t + start with t.parent_id is null + connect by t.parent_id = prior t.id) menu + where item.menuid = menu.id; + + RETURN; + EXCEPTION + WHEN OTHERS THEN + if o_menu_list%isopen + then + close o_menu_list; + end if; + log('AA_BB_getBBmenu()',SQLERRM(SQLCODE)); + out_flag := '-1'; + RETURN; + END; + + + PROCEDURE AA_BB_DELETEBB(BBId IN VARCHAR2, --角色编码 + out_flag OUT VARCHAR2 --存储过程返回标志 + ) IS + BEGIN + out_flag := '0'; + DELETE FROM DD_BB_CC_REL WHERE BB_ID=BBId; + DELETE FROM DD_BB_ITEM_REL WHERE BB_ID=BBId; + DELETE FROM DD_BB_MENU_REL WHERE BB_ID=BBId; + DELETE FROM DD_BB_NLS WHERE ID=BBId; + DELETE FROM DD_BB WHERE ID=BBId; + --commit; + EXCEPTION + WHEN OTHERS THEN + log('AA_BB_deleteBB()',SQLERRM(SQLCODE)); + out_flag := '-1'; + rollback; + RETURN; + END; + + + PROCEDURE AA_BB_UPDATEBB(BBLevel IN VARCHAR2, --角色级别 + CCId IN VARCHAR2, --用户编码 + priAll IN VARCHAR2, --所有机构标识 + priSelf IN VARCHAR2, --本机构标识 + priOther IN VARCHAR2, --可维护下属机构级别 + BBId IN VARCHAR2, --角色编码 + BBName IN VARCHAR2, --角色名称 + BBDes IN VARCHAR2, --角色描述 + Language IN VARCHAR2, --语言编码 + out_flag OUT VARCHAR2 --存储过程返回标志 + ) IS + tmp_num number; + + BEGIN + out_flag := '0'; + + UPDATE DD_BB SET BB_LEVEL=BBLevel,LST_MODI_TIME=TO_CHAR(SYSDATE, 'YYYYMMDD'),LST_MODI_CC_ID=CCId,PRIVILEGE_ALL=priAll,PRIVILEGE_SELF=priSelf,PRIVILEGE_OTHER=priOther WHERE ID=BBId; + + -- UPDATE DD_BB_NLS SET NAME=BBName,DESCRIPTION=BBDes WHERE ID=BBId and LOCALE=Language; + select count(*) + into tmp_num + from DD_BB_NLS r + where r.id = BBId + AND r.locale = Language; + + if (tmp_num = 0) then + INSERT INTO DD_BB_NLS + (ID,NAME,DESCRIPTION,LOCALE) + VALUES + (BBId, + BBName, + BBDes, + Language); + else + UPDATE DD_BB_NLS SET NAME=BBName,DESCRIPTION=BBDes WHERE ID=BBId and LOCALE=Language; + end if; + --COMMIT; + EXCEPTION + WHEN OTHERS THEN + log('AA_BB_updateBB()',SQLERRM(SQLCODE)); + out_flag := '-1'; + ROLLBACK; + RETURN; + END; + + + PROCEDURE AA_BB_ADDBB( BBId IN VARCHAR2, --角色编码 + branchId IN VARCHAR2, --机构编码 + BBLevel IN VARCHAR2, --角色级别 + CCId IN VARCHAR2, --用户编码 + priSelf IN VARCHAR2, --本机构标识 + priAll IN VARCHAR2, --所有机构标识 + priOther IN VARCHAR2, --可维护下属机构级别 + BBName IN VARCHAR2, --角色名称 + BBDes IN VARCHAR2, --角色描述 + Language IN VARCHAR2, --语言编码 + out_flag OUT VARCHAR2 --存储过程返回标志 + ) IS +BBCount NUMBER; + BEGIN + out_flag := '0'; + + SELECT count(*) + INTO BBCount + FROM DD_BB a + WHERE a.id = BBId; + + IF BBCount != 0 THEN --该角色编号已经存在 + out_flag := '-1'; + RETURN; + END IF; + + INSERT INTO DD_BB(ID,BRANCH_ID,BB_LEVEL,LST_MODI_TIME,LST_MODI_CC_ID,PRIVILEGE_SELF,PRIVILEGE_ALL,PRIVILEGE_OTHER) VALUES(BBId,branchId,BBLevel,TO_CHAR(SYSDATE, 'YYYYMMDD'),CCId,priSelf,priAll,priOther); + + INSERT INTO DD_BB_NLS(ID,NAME,DESCRIPTION,LOCALE) VALUES(BBId,BBName,BBDes,Language); + + --COMMIT; + EXCEPTION + WHEN OTHERS THEN + PCKG_DD_LG_PUBLIC.log('AA_BB_addBB()',SQLERRM(SQLCODE)); + out_flag := '-2'; + ROLLBACK; + RETURN; + END; + + PROCEDURE AA_BB_GETBBLIST(branchId IN VARCHAR2, --机构编码 + branchLevel IN VARCHAR2, --机构级别 + languageCode IN VARCHAR2, --语言编码 + begNum IN VARCHAR2, --开始取数 + fetchNum IN VARCHAR2, --获取数 + out_flag OUT VARCHAR2, --存储过程返回标志 + totalNum OUT VARCHAR2, --最终获取数 + ret_BB_list OUT ret_pos_BB_list--角色列表 + )IS + + BEGIN + out_flag := '0'; + + SELECT COUNT(*) + INTO totalNum + FROM DD_BB A + WHERE(a.BRANCH_ID=branchId + or (substr(a.BB_level,branchLevel,1) ='1' + and branchId in + (select d.id + from DD_branch d start with d.id=a.BRANCH_ID + connect by prior d.id=d.parent_id))); + + OPEN ret_BB_list FOR + SELECT lastModifyCC, + lastModifyDate, + priAll, + priSelf, + priOther, + BBId, + BBName, + BBDes, + BBLevel, + BBBranchId, + BBBranchName + FROM + (SELECT lastModifyCC, + lastModifyDate, + priAll, + priSelf, + priOther, + BBId, + BBName, + BBDes, + BBLevel, + BBBranchId, + BBBranchName, + ROWNUM row_id + FROM + (SELECT DISTINCT A.LST_MODI_CC_ID lastModifyCC, + A.LST_MODI_TIME lastModifyDate, + A.PRIVILEGE_ALL priAll, + A.PRIVILEGE_SELF priSelf, + A.PRIVILEGE_OTHER priOther, + a.ID BBId, + a.NAME BBName, + a.DESCRIPTION BBDes, + a.BB_LEVEL BBLevel, + a.BRANCH_ID BBBranchId, + c.name BBBranchName, + ROWNUM ROW_ID + FROM (select e.LST_MODI_CC_ID, + e.LST_MODI_TIME, + e.PRIVILEGE_ALL, + e.PRIVILEGE_SELF, + e.PRIVILEGE_OTHER, + e.ID, + f.NAME, + f.DESCRIPTION, + e.BB_LEVEL, + e.BRANCH_ID + from DD_BB e + left outer join DD_BB_nls f on e.id = f.id + and f.locale = languageCode) A,DD_BRANCH_NLS c, (select b.id + from DD_branch b + start with b.id = branchId + connect by b.id = prior b.parent_id) tempb + WHERE (a.BRANCH_ID = branchId + or + substr(a.BB_level, branchLevel, 1) = '1') + AND C.ID = A.BRANCH_ID + AND tempb.id = a.branch_Id + AND C.LOCALE = languageCode + order by BBId asc) + WHERE ROW_ID =to_number(begNum); + + + EXCEPTION + WHEN OTHERS THEN + if ret_BB_list%isopen + then + close ret_BB_list; + end if; + log('AA_BB_getBBlist()',SQLERRM(SQLCODE)); + -- out_flag := SQLERRM(SQLCODE); + out_flag := '-1'; + RETURN; + END; + + + PROCEDURE AA_BB_SEABBBYNAME(branchId IN VARCHAR2, --机构编码 + branchLevel IN VARCHAR2, --机构级别 + languageCode IN VARCHAR2, --语言编码 + begNum IN VARCHAR2, --开始取数 + fetchNum IN VARCHAR2, --获取数 + keyword IN VARCHAR2, --角色名称关键字 + out_flag OUT VARCHAR2, --存储过程返回标志 + o_totalNum OUT VARCHAR2, --最终获取数 + ret_BB_list OUT ret_pos_BB_list --角色列表 + ) IS + v_BBName varchar2(20); + BEGIN + out_flag := '0'; + + v_BBName := keyword; + if(v_BBName is null) then + v_BBName := '%'; + end if; + + SELECT COUNT(*) INTO o_totalNum + FROM DD_BB A,DD_BB_NLS B + where B.NAME like '%'||v_BBName||'%' + AND A.ID = B.ID + AND B.LOCALE=languageCode; + + OPEN ret_BB_list FOR + SELECT lastModifyCC, + lastModifyDate, + priAll, + priSelf, + priOther, + BBId, + BBName, + BBDes, + BBLevel, + BBBranchId, + BBBranchName + FROM (SELECT lastModifyCC, + lastModifyDate, + priAll, + priSelf, + priOther, + BBId, + BBName, + BBDes, + BBLevel, + BBBranchId, + BBBranchName + FROM (SELECT lastModifyCC, + lastModifyDate, + priAll, + priSelf, + priOther, + BBId, + BBName, + BBDes, + BBLevel, + BBBranchId, + BBBranchName, + ROWNUM row_id + FROM (SELECT DISTINCT A.LST_MODI_CC_ID lastModifyCC, + A.LST_MODI_TIME lastModifyDate, + A.PRIVILEGE_ALL priAll, + A.PRIVILEGE_SELF priSelf, + A.PRIVILEGE_OTHER priOther, + a.ID BBId, + a.NAME BBName, + a.DESCRIPTION BBDes, + a.BB_LEVEL BBLevel, + a.BRANCH_ID BBBranchId, + c.name BBBranchName, + ROWNUM ROW_ID + FROM (select e.LST_MODI_CC_ID, + e.LST_MODI_TIME, + e.PRIVILEGE_ALL, + e.PRIVILEGE_SELF, + e.PRIVILEGE_OTHER, + e.ID, + f.NAME, + f.DESCRIPTION, + e.BB_LEVEL, + e.BRANCH_ID + from DD_BB e + left outer join DD_BB_nls f on e.id = f.id + and f.locale = languageCode) A,DD_BRANCH_NLS c, (select b.id + from DD_branch b + start with b.id = branchId + connect by b.id = prior b.parent_id) tempb + WHERE (a.BRANCH_ID = branchId + or + substr(a.BB_level, branchLevel, 1) = '1') + AND C.ID = A.BRANCH_ID + AND tempb.id = a.branch_Id + AND C.LOCALE = languageCode + order by BBId asc) + WHERE ROW_ID =to_number(begNum)) BBSet + WHERE BBSet.BBName LIKE '%'||v_BBName||'%'; + + EXCEPTION + WHEN OTHERS THEN + if ret_BB_list%isopen + then + close ret_BB_list; + end if; + log('DD_proc_BB_getBBlist()',SQLERRM(SQLCODE)); + out_flag := '-1'; + RETURN; + END; + + + PROCEDURE log(proc_name IN VARCHAR2, + info IN VARCHAR2) IS + PRAGMA AUTONOMOUS_TRANSACTION; + time_str VARCHAR2(100); + BEGIN + --IF check_log_on THEN + SELECT to_char(SYSDATE,'mm - dd - yyyy hh24 :mi :ss') + INTO time_str + FROM dual; + INSERT INTO DD_proc_log + VALUES + (proc_name, time_str, info); + COMMIT; + --END IF; + RETURN; + END; + +END AA ; +/ +ERROR: relation "dd_bb_cc_rel" does not exist +LINE 3: SELECT count(*) INTO temp FROM DD_BB_CC_REL WHERE BB_ID... + ^ +DETAIL: +QUERY: DECLARE temp INTEGER; + BEGIN + SELECT count(*) INTO temp FROM DD_BB_CC_REL WHERE BB_ID=BBId; + RETURN temp; + EXCEPTION + WHEN OTHERS THEN + log('AA_BB_getCCnum()',SQLERRM(SQLCODE)); + RETURN -1; + END +create or replace procedure test_pro1 +as +type tpc1 is ref cursor; +--v_cur tpc1; +begin +open v_cur for select c1,c2 from tab1; +end; +/ +NOTICE: "v_cur" is not a known variable +LINE 5: open v_cur for select c1,c2 from tab1; + ^ +QUERY: DECLARE +type tpc1 is ref cursor; +--v_cur tpc1; +begin +open v_cur for select c1,c2 from tab1; +end +ERROR: open cursor error +LINE 5: open v_cur for select c1,c2 from tab1; + ^ +QUERY: DECLARE +type tpc1 is ref cursor; +--v_cur tpc1; +begin +open v_cur for select c1,c2 from tab1; +end +create table if not exists tb(eno int); +create or replace procedure test_pro2 +as +type t1 is table of tb.eno%type; +v1 t1; +begin +forall i in 1 .. v1.count save exceptions + insert into tb values v1(i); +end; +/ +ERROR: invalid type's rel tuple for insert. at or near ";" +LINE 6: insert into tb values v1(i); + ^ +QUERY: DECLARE +type t1 is table of tb.eno%type; +v1 t1; +begin +forall i in 1 .. v1.count save exceptions + insert into tb values v1(i); +end +drop table if exists tb; +drop procedure if exists test_pro1; +NOTICE: function test_pro1() does not exist, skipping +create table t1(c1 int, c2 int); +create or replace package pack2 is +procedure pro1(); +procedure pro2(); +end pack2; +/ +create or replace package body pack2 is +procedure pro1 +as +begin +update table t1 set c1=1 and c2=1; +end; +procedure pro2 +as +begin +update table t1 set c1=1 and c2=1; +end; +end pack2; +/ +NOTICE: syntax error at or near "table" when compile function pro1() +LINE 3: update table t1 set c1=1 and c2=1; + ^ +DETAIL: syntax error +QUERY: DECLARE +begin +update table t1 set c1=1 and c2=1; +end +NOTICE: syntax error at or near "table" when compile function pro2() +LINE 3: update table t1 set c1=1 and c2=1; + ^ +DETAIL: syntax error +QUERY: DECLARE +begin +update table t1 set c1=1 and c2=1; +end +ERROR: Debug mod,create procedure has error. +DETAIL: N/A +drop table t1; +drop package pack2; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function public.pro1() +drop cascades to function public.pro2() +create or replace function f(v int[]) return int +as +n int; +begin +n := v(); +return n; +end; +/ +NOTICE: empty index at or near ")" +LINE 4: n := v(); + ^ +QUERY: DECLARE +n int; +begin +n := v(); +return n; +end +ERROR: Debug mod,create procedure has error. +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL function "f" near line 3 +create or replace PACKAGE z_pk2 +AS + PROCEDURE pro(p1 int); +END z_pk2; +/ +create or replace PACKAGE BODY z_pk2 +AS + p1 int := 1; + p2 int := 1 ; + PROCEDURE pro() + AS + p2 int; + BEGIN + select 1 into p2; + END; +END z_pk2; +/ +ERROR: Function definition not found: pro +create or replace PACKAGE package_020 +AS + PROCEDURE pro1(p1 int,p2 int ,p3 VARCHAR2(5) ,p4 out int); + PROCEDURE pro2(p1 int,p2 out int,p3 inout varchar(20)); +END package_020; +/ +create or replace PACKAGE body package_020 +AS + PROCEDURE pro1(p1 int,p2 int ,p3 ,p4 out int ) + as + BEGIN + p4 := 0; + if p3 = '+' then + p4 := p1 + p2; + end if; + + if p3 = '-' then + p4 := p1 - p2; + end if; + + if p3 = '*' then + p4 := p1 * p2; + end if; + + if p3 = '/' then + p4 := p1 / p2; + end if; + END; + PROCEDURE pro2(p1 int,p2 out int,p3 inout varchar(20)) + AS + BEGIN + p2 := p1; + p3 := p1 ||'___a'; + END; +END package_020; +/ +ERROR: type is not exists (null). +DETAIL: CommandType: (null) +drop procedure pro1; +ERROR: function pro1 does not exist +select line,src from dbe_pldeveloper.gs_errors order by line,src; + line | src +------+------------------------------------------------------ + 3 | Function definition not found: pro + 3 | type is not exists (null). + 5 | empty index + 6 | it is not a known variable + 7 | invalid type's rel tuple for insert. + 8 | relation "dd_bb_cc_rel" does not exist + 8 | unrecognized exception condition "division_by_zeros" +(7 rows) + +create table pro_tblof_tbl_013(c1 number(3,2),c2 varchar2(20),c3 clob,c4 blob); +create type pro_tblof_013 is table of pro_tblof_tbl_013.c1%type; +NOTICE: type reference pro_tblof_tbl_013.c1%TYPE converted to numeric +create or replace procedure pro_tblof_pro_013_1() +as +tblof001 pro_tblof_013; +i int :=1; +cursor cor1 is select c1,c2,c3,c4 from pro_tblof_tbl_013 order by 1,2,3,4; +begin +open cor1; +loop +fetch cor1 into tblof001(i).c1,tblof001(i).c2,tblof001(i).c3,tblof001(i).c4; +EXIT WHEN cor1%NOTFOUND; +DBE_OUTPUT.PRINT_LINE('tblof001('||i||') is '||tblof001(i)); +i=i+1; +end loop; +close cor1; +raise info 'tblof001 is %',tblof001; +raise info 'i is%',i; +end; +/ +ERROR: array element type is not composite in assignment +DETAIL: array variable "tblof001" must be composite when assign value to attibute +CONTEXT: compilation of PL/pgSQL function "pro_tblof_pro_013_1" near line 8 +drop procedure pro_tblof_pro_013_1(); +ERROR: function pro_tblof_pro_013_1 does not exist +drop type pro_tblof_013; +drop table pro_tblof_tbl_013; +drop package if exists package_020; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function public.pro1(integer,integer,character varying) +drop cascades to function public.pro2(integer,character varying) +drop package if exists z_pk2; +NOTICE: drop cascades to function public.pro(integer) +drop package if exists aa; +truncate DBE_PLDEVELOPER.gs_source; +truncate DBE_PLDEVELOPER.gs_errors; +set plsql_show_all_error to off; diff --git a/src/test/regress/expected/pri_any_package.out b/src/test/regress/expected/pri_any_package.out new file mode 100644 index 000000000..011dce7f2 --- /dev/null +++ b/src/test/regress/expected/pri_any_package.out @@ -0,0 +1,169 @@ +CREATE USER test_create_any_package_role PASSWORD 'Gauss@1234'; +GRANT create any package to test_create_any_package_role; +CREATE SCHEMA pri_package_schema; +set search_path=pri_package_schema; +SET ROLE test_create_any_package_role PASSWORD 'Gauss@1234'; +set search_path=pri_package_schema; +drop package if exists pri_exp_pkg; +NOTICE: package pri_exp_pkg() does not exist, skipping +create or replace package pri_exp_pkg as + user_exp EXCEPTION; +end pri_exp_pkg; +/ +create or replace package body pri_exp_pkg as +end pri_exp_pkg; +/ +create or replace function pri_func1(param int) return number +as +declare +a exception; +begin + if (param = 1) then + raise pri_exp_pkg.user_exp; + end if; + raise info 'number is %', param; + exception + when pri_exp_pkg.user_exp then + raise info 'user_exp raise'; + return 0; +end; +/ +ERROR: permission denied for schema pri_package_schema +DETAIL: N/A +reset role; +set search_path=pri_package_schema; +drop package if exists pkg_auth_1; +NOTICE: package pkg_auth_1() does not exist, skipping +CREATE OR REPLACE package pkg_auth_1 +is +a int; +END pkg_auth_1; +/ +CREATE OR REPLACE package body pkg_auth_1 +is +END pkg_auth_1; +/ +drop package if exists pkg_auth_2; +NOTICE: package pkg_auth_2() does not exist, skipping +CREATE OR REPLACE package pkg_auth_2 +is +b int; +procedure a(); +END pkg_auth_2; +/ +CREATE OR REPLACE package body pkg_auth_2 +is +procedure a +is +begin +pkg_auth_1.a:=1; +end; +END pkg_auth_2; +/ +CREATE USER test_execute_any_package_role PASSWORD 'Gauss@1234'; +GRANT execute any package to test_execute_any_package_role; +SET ROLE test_execute_any_package_role PASSWORD 'Gauss@1234'; +set search_path=pri_package_schema; +begin +pri_package_schema.pkg_auth_1.a:=1; +end; +/ +begin +pri_package_schema.pkg_auth_2.b:=1; +end; +/ +reset role; +create user user_1 password 'Gauss@1234'; +create user user_2 password 'Gauss@1234'; +create user user_3 password 'Gauss@1234'; +create user user_any password 'Gauss@1234'; +set role user_1 password 'Gauss@1234'; +create or replace package user_1.pri_pkg_same_arg_1 +is +a int; +end pri_pkg_same_arg_1; +/ +create or replace package body user_1.pri_pkg_same_arg_1 +is +end pri_pkg_same_arg_1; +/ +set role user_2 password 'Gauss@1234'; +create or replace package user_2.pri_pkg_same_arg_2 +is +b int; +end pri_pkg_same_arg_2; +/ +create or replace package body user_2.pri_pkg_same_arg_2 +is +end pri_pkg_same_arg_2; +/ +set role user_any password 'Gauss@1234'; +CREATE OR REPLACE package user_any.pkg_auth_2 +is +b int; +procedure a(); +END pkg_auth_2; +/ +CREATE OR REPLACE package body user_any.pkg_auth_2 +is +procedure a +is +begin +user_2.pri_pkg_same_arg_2.b:=1; +user_1.pri_pkg_same_arg_1.a:=2; +end; +END pkg_auth_2; +/ +ERROR: permission denied for package pri_pkg_same_arg_2 +DETAIL: N/A +CONTEXT: compilation of PL/pgSQL package near line 1 +reset role; +GRANT create any package to user_any; +GRANT execute any package to user_any; +set role user_any password 'Gauss@1234'; +CREATE OR REPLACE package user_any.pkg_auth_2 +is +b int; +procedure a(); +END pkg_auth_2; +/ +CREATE OR REPLACE package body user_any.pkg_auth_2 +is +procedure a +is +begin +user_2.pri_pkg_same_arg_2.b:=1; +user_1.pri_pkg_same_arg_1.a:=2; +end; +END pkg_auth_2; +/ +call user_any.pkg_auth_2.a(); + a +--- + +(1 row) + +set role user_3 password 'Gauss@1234'; +call user_any.pkg_auth_2.a(); +ERROR: permission denied for schema user_any +DETAIL: N/A +reset role; +GRANT execute any package to user_3; +set role user_3 password 'Gauss@1234'; +call user_any.pkg_auth_2.a(); + a +--- + +(1 row) + +reset role; +drop package pri_package_schema.pkg_auth_1; +drop package pri_package_schema.pkg_auth_2; +NOTICE: drop cascades to function pri_package_schema.a() +drop package pri_package_schema.pri_exp_pkg; +drop package user_1.pri_pkg_same_arg_1; +drop package user_2.pri_pkg_same_arg_2; +drop package user_any.pkg_auth_2; +NOTICE: drop cascades to function user_any.a() +drop schema pri_package_schema cascade; +drop user user_1,user_2,user_3,user_any,test_create_any_package_role,test_execute_any_package_role cascade; diff --git a/src/test/regress/expected/pri_create_any_index.out b/src/test/regress/expected/pri_create_any_index.out new file mode 100644 index 000000000..bd23215a9 --- /dev/null +++ b/src/test/regress/expected/pri_create_any_index.out @@ -0,0 +1,86 @@ +CREATE USER test_create_any_index_role PASSWORD 'Gauss@1234'; +GRANT create any index to test_create_any_index_role; +CREATE USER test_create_any_index_role_test PASSWORD 'Gauss@1234'; +GRANT create any index to test_create_any_index_role_test; +CREATE SCHEMA pri_index_schema; +set search_path=pri_index_schema; +CREATE TABLE pri_index_schema.pri_index +( + SM_SHIP_MODE_SK INTEGER NOT NULL, + SM_SHIP_MODE_ID CHAR(16) NOT NULL, + SM_TYPE CHAR(30) , + SM_CODE CHAR(10) , + SM_CARRIER CHAR(20) , + SM_CONTRACT CHAR(20) +); +SET ROLE test_create_any_index_role PASSWORD 'Gauss@1234'; +CREATE UNIQUE INDEX pri_index_schema.ds_ship_mode_t1_index1 ON pri_index_schema.pri_index(SM_SHIP_MODE_SK); +--failed +ALTER INDEX pri_index_schema.ds_ship_mode_t1_index1 UNUSABLE; +ERROR: permission denied for relation ds_ship_mode_t1_index1 +DETAIL: N/A +--在表上的SM_SHIP_MODE_SK字段上创建指定B-tree索引。 +CREATE INDEX pri_index_schema.ds_ship_mode_t1_index4 ON pri_index_schema.pri_index USING btree(SM_SHIP_MODE_SK); +--在表上SM_CODE字段上创建表达式索引。 +CREATE INDEX pri_index_schema.ds_ship_mode_t1_index2 ON pri_index_schema.pri_index(SUBSTR(SM_CODE,1 ,4)); +--在表上的SM_SHIP_MODE_SK字段上创建SM_SHIP_MODE_SK大于10的部分索引。 +CREATE UNIQUE INDEX pri_index_schema.ds_ship_mode_t1_index3 ON pri_index_schema.pri_index(SM_SHIP_MODE_SK) WHERE SM_SHIP_MODE_SK>10; +--不能删除成功 +SET ROLE test_create_any_index_role_test PASSWORD 'Gauss@1234'; +DROP INDEX pri_index_schema.ds_ship_mode_t1_index1; +ERROR: permission denied for relation ds_ship_mode_t1_index1 +DETAIL: N/A +DROP INDEX pri_index_schema.ds_ship_mode_t1_index2; +ERROR: permission denied for relation ds_ship_mode_t1_index2 +DETAIL: N/A +DROP INDEX pri_index_schema.ds_ship_mode_t1_index3; +ERROR: permission denied for relation ds_ship_mode_t1_index3 +DETAIL: N/A +DROP INDEX pri_index_schema.ds_ship_mode_t1_index4; +ERROR: permission denied for relation ds_ship_mode_t1_index4 +DETAIL: N/A +reset role; +CREATE TABLE pri_index_schema.tmp_tbl(id int, c1 tsvector); +SET ROLE test_create_any_index_role PASSWORD 'Gauss@1234'; +CREATE INDEX pri_index_schema.tmp_tbl_id_index ON pri_index_schema.tmp_tbl USING gist (c1); +---failed +ALTER TABLE pri_index_schema.ds_ship_mode_t1_index1 ADD CONSTRAINT PK_TBL_DOMAIN PRIMARY KEY (SM_SHIP_MODE_SK) USING INDEX; +ERROR: permission denied for relation ds_ship_mode_t1_index1 +DETAIL: N/A +CREATE TABLE pri_index_schema.default_test (f1 int, f2 int); +ERROR: permission denied for schema pri_index_schema +DETAIL: N/A +CREATE SEQUENCE pri_index_schema.sequence_test1 START WITH 32; +ERROR: permission denied for schema pri_index_schema +DETAIL: N/A +CREATE FUNCTION pri_index_schema.pri_func_add_sql(integer, integer) RETURNS integer +AS 'select $1 + $2;' +LANGUAGE SQL +IMMUTABLE +RETURNS NULL ON NULL INPUT; +ERROR: permission denied for schema pri_index_schema +DETAIL: N/A +CREATE TYPE pri_index_schema.compfoo AS (f1 int, f2 text); +ERROR: permission denied for schema pri_index_schema +DETAIL: N/A +DROP INDEX pri_index_schema.ds_ship_mode_t1_index1; +ERROR: permission denied for relation ds_ship_mode_t1_index1 +DETAIL: N/A +DROP INDEX pri_index_schema.ds_ship_mode_t1_index2; +ERROR: permission denied for relation ds_ship_mode_t1_index2 +DETAIL: N/A +DROP INDEX pri_index_schema.ds_ship_mode_t1_index3; +ERROR: permission denied for relation ds_ship_mode_t1_index3 +DETAIL: N/A +DROP INDEX pri_index_schema.ds_ship_mode_t1_index4; +ERROR: permission denied for relation ds_ship_mode_t1_index4 +DETAIL: N/A +DROP INDEX pri_index_schema.tmp_tbl_id_index; +ERROR: permission denied for relation tmp_tbl_id_index +DETAIL: N/A +reset role; +DROP TABLE pri_index; +DROP TABLE pri_index_schema.tmp_tbl; +DROP SCHEMA pri_index_schema cascade; +DROP USER test_create_any_index_role_test cascade; +DROP USER test_create_any_index_role cascade; diff --git a/src/test/regress/expected/pri_create_any_sequence.out b/src/test/regress/expected/pri_create_any_sequence.out new file mode 100644 index 000000000..8d0826aca --- /dev/null +++ b/src/test/regress/expected/pri_create_any_sequence.out @@ -0,0 +1,130 @@ +CREATE USER test_create_any_sequence_role PASSWORD 'Gauss@1234'; +GRANT create any sequence to test_create_any_sequence_role; +CREATE SCHEMA pri_sequence_schema; +set search_path=pri_sequence_schema; +SET ROLE test_create_any_sequence_role PASSWORD 'Gauss@1234'; +set search_path=pri_sequence_schema; +--- +--- test creation of SERIAL column +--- +CREATE TABLE serialTest (f1 text, f2 serial); +ERROR: permission denied for schema pri_sequence_schema +DETAIL: N/A +reset role; +GRANT create any table to test_create_any_sequence_role; +SET ROLE test_create_any_sequence_role PASSWORD 'Gauss@1234'; +set search_path=pri_sequence_schema; +CREATE TABLE serialTest (f1 text, f2 serial); +NOTICE: CREATE TABLE will create implicit sequence "serialtest_f2_seq" for serial column "serialtest.f2" +INSERT INTO serialTest VALUES ('foo'); +INSERT INTO serialTest VALUES ('bar'); +INSERT INTO serialTest VALUES ('force', 100); +SELECT * FROM serialTest ORDER BY f1, f2; + f1 | f2 +-------+----- + bar | 2 + foo | 1 + force | 100 +(3 rows) + +reset role; +revoke create any table from test_create_any_sequence_role; +SET ROLE test_create_any_sequence_role PASSWORD 'Gauss@1234'; +-- basic sequence operations using both text and oid references +CREATE SEQUENCE sequence_test; +SELECT setval('sequence_test'::text, 32); + setval +-------- + 32 +(1 row) + +SELECT nextval('sequence_test'::regclass); + nextval +--------- + 33 +(1 row) + +SELECT setval('sequence_test'::text, 99, false); + setval +-------- + 99 +(1 row) + +SELECT nextval('sequence_test'::regclass); + nextval +--------- + 99 +(1 row) + +SELECT setval('sequence_test'::regclass, 32); + setval +-------- + 32 +(1 row) + +SELECT nextval('sequence_test'::text); + nextval +--------- + 33 +(1 row) + +SELECT setval('sequence_test'::regclass, 99, false); + setval +-------- + 99 +(1 row) + +SELECT nextval('sequence_test'::text); + nextval +--------- + 99 +(1 row) + +CREATE SEQUENCE sequence_test1 START WITH 32; +CREATE SEQUENCE sequence_test2 START WITH 24 INCREMENT BY 4 MAXVALUE 36 MINVALUE 5 CYCLE; +SELECT nextval('sequence_test2'); + nextval +--------- + 24 +(1 row) + +SELECT nextval('sequence_test2'); + nextval +--------- + 28 +(1 row) + +create sequence seqCycle maxvalue 5 cycle; +--normal case with cache +create sequence seq maxvalue 100 cache 5 increment 2 start 2; +NOTICE: Not advised to use MAXVALUE or MINVALUE together with CACHE. +DETAIL: If CACHE is defined, some sequence values may be wasted, causing available sequence numbers to be less than expected. +select seq.nextval; + nextval +--------- + 2 +(1 row) + +--failed +CREATE TYPE pri_person_type1 AS (id int, name text); --permission denied +ERROR: permission denied for schema pri_sequence_schema +DETAIL: N/A +CREATE TYPE pri_person_type2 AS (id int, name text); --permission denied +ERROR: permission denied for schema pri_sequence_schema +DETAIL: N/A +CREATE FUNCTION pri_func_add_sql(integer, integer) RETURNS integer +AS 'select $1 + $2;' +LANGUAGE SQL +IMMUTABLE +RETURNS NULL ON NULL INPUT; +ERROR: permission denied for schema pri_sequence_schema +DETAIL: N/A +reset role; +drop table pri_sequence_schema.serialtest; +DROP SEQUENCE pri_sequence_schema.sequence_test; +DROP SEQUENCE pri_sequence_schema.sequence_test1; +DROP SEQUENCE pri_sequence_schema.sequence_test2; +DROP SEQUENCE pri_sequence_schema.seqCycle; +DROP SEQUENCE pri_sequence_schema.seq; +DROP SCHEMA pri_sequence_schema cascade; +DROP USER test_create_any_sequence_role cascade; diff --git a/src/test/regress/expected/pri_create_any_type.out b/src/test/regress/expected/pri_create_any_type.out new file mode 100644 index 000000000..a9f491a87 --- /dev/null +++ b/src/test/regress/expected/pri_create_any_type.out @@ -0,0 +1,92 @@ +CREATE USER test_create_any_type_role PASSWORD 'Gauss@1234'; +GRANT create any type to test_create_any_type_role; +CREATE SCHEMA pri_type_schema; +set search_path=pri_type_schema; +SET ROLE test_create_any_type_role PASSWORD 'Gauss@1234'; +CREATE TYPE pri_type_schema.compfoo AS (f1 int, f2 text); +CREATE TABLE test_create_any_type_role.t1_compfoo(a int, b pri_type_schema.compfoo); +--创建一个枚举类型 +CREATE TYPE pri_type_schema.bugstatus AS ENUM ('create', 'modify', 'closed'); +create type pri_type_schema.textrange_c as range(subtype=text, collation="C"); +ERROR: permission denied for language internal +DETAIL: N/A +CREATE TYPE pri_type_schema.bigobj (INPUT = lo_filein, OUTPUT = lo_fileout, INTERNALLENGTH = VARIABLE); +ERROR: function lo_filein(cstring) does not exist +CREATE TYPE pri_type_schema.int42; +CREATE TYPE pri_type_schema.text_w_default; +reset role; +--校验type的drop权限 +CREATE USER test_create_any_type_role_test PASSWORD 'Gauss@1234'; +GRANT create any type to test_create_any_type_role_test; +SET ROLE test_create_any_type_role_test PASSWORD 'Gauss@1234'; +DROP TYPE pri_type_schema.compfoo; +ERROR: permission denied for type compfoo +DETAIL: N/A +reset role; +CREATE FUNCTION pri_type_schema.int42_in(cstring) + RETURNS pri_type_schema.int42 + AS 'int4in' + LANGUAGE internal STRICT; +NOTICE: return type pri_type_schema.int42 is only a shell +CREATE FUNCTION pri_type_schema.int42_out(pri_type_schema.int42) + RETURNS cstring + AS 'int4out' + LANGUAGE internal STRICT; +NOTICE: argument type pri_type_schema.int42 is only a shell +CREATE FUNCTION pri_type_schema.text_w_default_in(cstring) + RETURNS pri_type_schema.text_w_default + AS 'textin' + LANGUAGE internal STRICT; +NOTICE: return type pri_type_schema.text_w_default is only a shell +CREATE FUNCTION pri_type_schema.text_w_default_out(pri_type_schema.text_w_default) + RETURNS cstring + AS 'textout' + LANGUAGE internal STRICT; +NOTICE: argument type pri_type_schema.text_w_default is only a shell +SET ROLE test_create_any_type_role PASSWORD 'Gauss@1234'; +CREATE TYPE pri_type_schema.int42 ( + internallength = 4, + input = pri_type_schema.int42_in, + output = pri_type_schema.int42_out, + alignment = int4, + default = 42, + passedbyvalue +); +ERROR: must be owner of function pri_type_schema.int42_in +DETAIL: N/A +CREATE TYPE pri_type_schema.text_w_default ( + internallength = variable, + input = pri_type_schema.text_w_default_in, + output = pri_type_schema.text_w_default_out, + alignment = int4, + default = 'zippo' +); +ERROR: must be owner of function pri_type_schema.text_w_default_in +DETAIL: N/A +---failed +CREATE TABLE pri_type_schema.default_test (f1 int, f2 int); +ERROR: permission denied for schema pri_type_schema +DETAIL: N/A +CREATE SEQUENCE pri_type_schema.sequence_test1 START WITH 32; +ERROR: permission denied for schema pri_type_schema +DETAIL: N/A +CREATE FUNCTION pri_type_schema.pri_func_add_sql(integer, integer) RETURNS integer +AS 'select $1 + $2;' +LANGUAGE SQL +IMMUTABLE +RETURNS NULL ON NULL INPUT; +ERROR: permission denied for schema pri_type_schema +DETAIL: N/A +reset role; +DROP TABLE test_create_any_type_role.t1_compfoo; +drop type pri_type_schema.compfoo; +drop type pri_type_schema.bugstatus; +drop function pri_type_schema.int42_in(cstring); +drop function pri_type_schema.int42_out(int42); +drop type pri_type_schema.int42; +drop function pri_type_schema.text_w_default_in(cstring); +drop function pri_type_schema.text_w_default_out(text_w_default); +drop type pri_type_schema.text_w_default; +DROP SCHEMA pri_type_schema cascade; +DROP USER test_create_any_type_role cascade; +DROP USER test_create_any_type_role_test cascade; diff --git a/src/test/regress/expected/pri_create_drop_any_table.out b/src/test/regress/expected/pri_create_drop_any_table.out new file mode 100644 index 000000000..1397009e4 --- /dev/null +++ b/src/test/regress/expected/pri_create_drop_any_table.out @@ -0,0 +1,186 @@ +CREATE USER test_create_any_table_role PASSWORD 'Gauss@1234'; +GRANT create any table to test_create_any_table_role; +CREATE SCHEMA pri_create_schema; +set search_path=pri_create_schema; +SET ROLE test_create_any_table_role PASSWORD 'Gauss@1234'; +CREATE table pri_create_schema.tb_pri (id int, name VARCHAR(10)); + --create table +CREATE TABLE pri_create_schema.TBL_DOMAIN_PRI +( + IDOMAINID NUMBER(10) NOT NULL, + SDOMAINNAME VARCHAR2(30) NOT NULL, + b int +); +CREATE TABLE pri_create_schema.pri_test_hash (a int, b int); +reset role; +CREATE TYPE pri_create_schema.pri_person_type1 AS (id int, name text); +CREATE TYPE pri_create_schema.pri_person_type2 AS (id int, name text); +SET ROLE test_create_any_table_role PASSWORD 'Gauss@1234'; +CREATE TABLE pri_create_schema.pri_persons OF pri_create_schema.pri_person_type1; +CREATE TABLE pri_create_schema.pri_stuff (id int); +--trigger +CREATE SEQUENCE pri_create_schema.serial1;--permission denied +ERROR: permission denied for schema pri_create_schema +DETAIL: N/A +create table pri_create_schema.pri_trigtest (i serial primary key);--failed +NOTICE: CREATE TABLE will create implicit sequence "pri_trigtest_i_seq" for serial column "pri_trigtest.i" +ERROR: permission denied for schema pri_create_schema +DETAIL: N/A +reset role; +GRANT create any sequence to test_create_any_table_role; +GRANT create any index to test_create_any_table_role; +SET ROLE test_create_any_table_role PASSWORD 'Gauss@1234'; +CREATE SEQUENCE pri_create_schema.serial1; +create table pri_create_schema.pri_trigtest (i serial primary key); +NOTICE: CREATE TABLE will create implicit sequence "pri_trigtest_i_seq" for serial column "pri_trigtest.i" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pri_trigtest_pkey" for table "pri_trigtest" +reset role; +revoke create any sequence,create any index from test_create_any_table_role; +SET ROLE test_create_any_table_role PASSWORD 'Gauss@1234'; +create function pri_create_schema.pri_trigtest() returns trigger as $$ +begin + raise notice '% % % %', TG_RELNAME, TG_OP, TG_WHEN, TG_LEVEL; + return new; +end;$$ language plpgsql; --failed ok +ERROR: permission denied for schema pri_create_schema +DETAIL: N/A +reset role; +create function pri_create_schema.pri_trigtest() returns trigger as $$ +begin + raise notice '% % % %', TG_RELNAME, TG_OP, TG_WHEN, TG_LEVEL; + return new; +end;$$ language plpgsql; +create table pri_create_schema.pri_trigtest_test (i serial primary key); +NOTICE: CREATE TABLE will create implicit sequence "pri_trigtest_test_i_seq" for serial column "pri_trigtest_test.i" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pri_trigtest_test_pkey" for table "pri_trigtest_test" +SET ROLE test_create_any_table_role PASSWORD 'Gauss@1234'; +create trigger pri_trigtest_b_row_tg before insert or update or delete on pri_create_schema.pri_trigtest +for each row execute procedure pri_create_schema.pri_trigtest(); --success 在自己创建的表上创建触发器 +create trigger pri_trigtest_b_row_tg_test before insert or update or delete on pri_create_schema.pri_trigtest_test +for each row execute procedure pri_create_schema.pri_trigtest(); --failed +ERROR: permission denied for relation pri_trigtest_test +DETAIL: N/A +create table pri_create_schema.pri_storage_para_t1 (a int4, b text) +WITH +( + fillfactor =85, + autovacuum_enabled = ON, + toast.autovacuum_enabled = ON, + autovacuum_vacuum_threshold = 100, + toast.autovacuum_vacuum_threshold = 100, + autovacuum_vacuum_scale_factor = 10, + toast.autovacuum_vacuum_scale_factor = 10, + autovacuum_analyze_threshold = 8, + autovacuum_analyze_scale_factor = 9, +-- autovacuum_vacuum_cost_delay: Valid values are between "0" and "100". + autovacuum_vacuum_cost_delay = 90, + toast.autovacuum_vacuum_cost_delay = 92, +-- autovacuum_vacuum_cost_limit: Valid values are between "1" and "10000". + autovacuum_vacuum_cost_limit = 567, + toast.autovacuum_vacuum_cost_limit = 789, + autovacuum_freeze_min_age = 5000, + toast.autovacuum_freeze_min_age = 6000, +-- autovacuum_freeze_max_age: Valid values are between "100000000" and "2000000000". + autovacuum_freeze_max_age = 300000000, + toast.autovacuum_freeze_max_age = 250000000, + autovacuum_freeze_table_age = 170000000, + toast.autovacuum_freeze_table_age = 180000000 +) +partition by range (a) +( + partition pri_storage_para_t1_p1 values less than (10), + partition pri_storage_para_t1_p2 values less than (20), + partition pri_storage_para_t1_p3 values less than (100) +); +CREATE TABLE pri_table(c_id int,c_first varchar(50) NOT NULL); +--temp table +CREATE TEMP TABLE pri_temp1 (a int primary key); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pri_temp1_pkey" for table "pri_temp1" +reset role; +CREATE TABLE pri_t1 (num int, name text); +CREATE TABLE pri_t2 (num2 int, value text); +SET ROLE test_create_any_table_role PASSWORD 'Gauss@1234'; +CREATE TEMP TABLE pri_tt (num2 int, value text); +CREATE VIEW pri_create_schema.pri_nontemp1 AS SELECT * FROM pri_create_schema.pri_t1 CROSS JOIN pri_create_schema.pri_t2; +CREATE VIEW pri_temporal1 AS SELECT * FROM pri_create_schema.pri_t1 CROSS JOIN pri_tt; +NOTICE: view "pri_temporal1" will be a temporary view +create table pri_create_schema.replication_temp_test(id int); +--create materialized view +create table pri_create_schema.t1(c1 int,c2 int); +insert into pri_create_schema.t1 values(1,1),(2,2); --success +create incremental materialized view pri_create_schema.mv1 as select * from pri_create_schema.t1; +CREATE TABLE pri_create_schema.pri_store_returns +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + sr_item_sk VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER +); +CREATE TABLE pri_create_schema.store_returns_t1 AS SELECT * FROM pri_create_schema.pri_store_returns WHERE sr_item_sk > '4795'; +--failed +CREATE TYPE pri_create_schema.pri_type AS (id int, name text); --permission denied +ERROR: permission denied for schema pri_create_schema +DETAIL: N/A +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +CREATE CLIENT MASTER KEY pri_create_schema.ImgCMK WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +ERROR: permission denied for schema pri_create_schema +DETAIL: N/A +\! gs_ktool -d all +DELETE ALL + 1 +CREATE SEQUENCE pri_create_schema.sequence_test1 START WITH 32; +ERROR: permission denied for schema pri_create_schema +DETAIL: N/A +CREATE FUNCTION pri_create_schema.pri_func_add_sql(integer, integer) RETURNS integer +AS 'select $1 + $2;' +LANGUAGE SQL +IMMUTABLE +RETURNS NULL ON NULL INPUT; +ERROR: permission denied for schema pri_create_schema +DETAIL: N/A +reset role; +CREATE USER test_drop_any_table_role PASSWORD 'Gauss@1234'; +GRANT drop any table to test_drop_any_table_role; +SET ROLE test_drop_any_table_role PASSWORD 'Gauss@1234'; +set search_path = pri_create_schema; +drop table tbl_domain_pri; +drop table pri_test_hash; +drop table pri_persons; +drop table pri_stuff; +drop table pri_trigtest_test; +drop table pri_storage_para_t1; +drop table pri_table; +drop view pri_temporal1; +drop view pri_nontemp1; +drop table pri_t1 cascade; +drop table pri_t2 cascade; +drop table replication_temp_test; +drop materialized view mv1; +drop table t1 cascade; +--failed +drop sequence serial1; +ERROR: permission denied for relation serial1 +DETAIL: N/A +drop function pri_trigtest(); +ERROR: permission denied for function pri_trigtest +DETAIL: N/A +drop type pri_create_schema.pri_person_type1; +ERROR: permission denied for type pri_person_type1 +DETAIL: N/A +drop type pri_create_schema.pri_person_type2; +ERROR: permission denied for type pri_person_type2 +DETAIL: N/A +drop SEQUENCE pri_create_schema.serial1; +ERROR: permission denied for relation serial1 +DETAIL: N/A +reset role; +drop type pri_create_schema.pri_person_type1; +drop type pri_create_schema.pri_person_type2; +drop SEQUENCE pri_create_schema.serial1; +DROP USER test_drop_any_table_role cascade; +DROP USER test_create_any_table_role cascade; diff --git a/src/test/regress/expected/pri_dml_any_table.out b/src/test/regress/expected/pri_dml_any_table.out new file mode 100644 index 000000000..1a238b1e9 --- /dev/null +++ b/src/test/regress/expected/pri_dml_any_table.out @@ -0,0 +1,99 @@ +CREATE USER test_select_any_table_role PASSWORD 'Gauss@1234'; +CREATE SCHEMA pri_select_schema; +set search_path=pri_select_schema; +--create table +CREATE table pri_select_schema.tb_pri (id int, name VARCHAR(10)); +insert into pri_select_schema.tb_pri values(1,'joe'); +SET ROLE test_select_any_table_role PASSWORD 'Gauss@1234'; +select * from pri_select_schema.tb_pri; +ERROR: permission denied for schema pri_select_schema +LINE 1: select * from pri_select_schema.tb_pri; + ^ +DETAIL: N/A +insert into pri_select_schema.tb_pri values(2,'ly'); +ERROR: permission denied for schema pri_select_schema +LINE 1: insert into pri_select_schema.tb_pri values(2,'ly'); + ^ +DETAIL: N/A +update pri_select_schema.tb_pri set name = 'gauss' where id = 1; +ERROR: permission denied for schema pri_select_schema +LINE 1: update pri_select_schema.tb_pri set name = 'gauss' where id ... + ^ +DETAIL: N/A +delete pri_select_schema.tb_pri; +ERROR: permission denied for schema pri_select_schema +LINE 1: delete pri_select_schema.tb_pri; + ^ +DETAIL: N/A +reset role; +GRANT select any table to test_select_any_table_role; +SET ROLE test_select_any_table_role PASSWORD 'Gauss@1234'; +select * from pri_select_schema.tb_pri; + id | name +----+------ + 1 | joe +(1 row) + +insert into pri_select_schema.tb_pri values(1,'joe'); +ERROR: permission denied for relation tb_pri +DETAIL: N/A +update pri_select_schema.tb_pri set name = 'gauss' where id = 1; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +delete pri_select_schema.tb_pri; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +reset role; +revoke select any table from test_select_any_table_role; +GRANT insert any table to test_select_any_table_role; +SET ROLE test_select_any_table_role PASSWORD 'Gauss@1234'; +select * from pri_select_schema.tb_pri; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +insert into pri_select_schema.tb_pri values(2,'johy'); +update pri_select_schema.tb_pri set name = 'gauss' where id = 1; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +delete pri_select_schema.tb_pri; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +reset role; +revoke insert any table from test_select_any_table_role; +GRANT update any table to test_select_any_table_role; +SET ROLE test_select_any_table_role PASSWORD 'Gauss@1234'; +select * from pri_select_schema.tb_pri; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +insert into pri_select_schema.tb_pri values(3,'lili'); +ERROR: permission denied for relation tb_pri +DETAIL: N/A +--failed +update pri_select_schema.tb_pri set name = 'gauss' where id = 1; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +delete pri_select_schema.tb_pri; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +reset role; +grant select on table pri_select_schema.tb_pri to test_select_any_table_role; +SET ROLE test_select_any_table_role PASSWORD 'Gauss@1234'; +update pri_select_schema.tb_pri set name = 'gauss' where id = 1; +reset role; +revoke select on table pri_select_schema.tb_pri from test_select_any_table_role; +revoke update any table from test_select_any_table_role; +GRANT delete any table to test_select_any_table_role; +SET ROLE test_select_any_table_role PASSWORD 'Gauss@1234'; +select * from pri_select_schema.tb_pri; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +insert into pri_select_schema.tb_pri values(3,'lili'); +ERROR: permission denied for relation tb_pri +DETAIL: N/A +update pri_select_schema.tb_pri set name = 'gauss' where id = 3; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +delete pri_select_schema.tb_pri; +reset role; +drop table pri_select_schema.tb_pri; +DROP SCHEMA pri_select_schema cascade; +DROP USER test_select_any_table_role cascade; diff --git a/src/test/regress/expected/pri_indepent_any.out b/src/test/regress/expected/pri_indepent_any.out new file mode 100644 index 000000000..f19b4325f --- /dev/null +++ b/src/test/regress/expected/pri_indepent_any.out @@ -0,0 +1,109 @@ +--测试私有用户 +CREATE USER any_table_role PASSWORD 'Gauss@1234'; +CREATE USER pri_user_independent WITH INDEPENDENT IDENTIFIED BY "1234@abc"; +WARNING: Please carefully use independent user as it need more self-management. +HINT: Self-management include logical backup, password manage and so on. +set role pri_user_independent password "1234@abc"; +CREATE table pri_user_independent.tb_pri (id int, name VARCHAR(10)); +CREATE table pri_user_independent.tb_pri_test (id int, name VARCHAR(10)); +CREATE table pri_user_independent.tb_pri_test1 (id int, name VARCHAR(10)); +insert into pri_user_independent.tb_pri values(1, 'gauss'); +--普通用户 +set role any_table_role PASSWORD 'Gauss@1234'; +select * from pri_user_independent.tb_pri; +ERROR: permission denied for schema pri_user_independent +LINE 1: select * from pri_user_independent.tb_pri; + ^ +DETAIL: N/A +insert into pri_user_independent.tb_pri values(1,'joe'); +ERROR: permission denied for schema pri_user_independent +LINE 1: insert into pri_user_independent.tb_pri values(1,'joe'); + ^ +DETAIL: N/A +update pri_user_independent.tb_pri set name = 'gauss' where id = 1; +ERROR: permission denied for schema pri_user_independent +LINE 1: update pri_user_independent.tb_pri set name = 'gauss' where ... + ^ +DETAIL: N/A +delete from pri_user_independent.tb_pri; +ERROR: permission denied for schema pri_user_independent +LINE 1: delete from pri_user_independent.tb_pri; + ^ +DETAIL: N/A +create table pri_user_independent.tt1(id int); +ERROR: permission denied for schema pri_user_independent +DETAIL: N/A +ALTER TABLE pri_user_independent.tb_pri add column age int; +ERROR: permission denied for schema pri_user_independent +DETAIL: N/A +DROP table pri_user_independent.tb_pri_test; +ERROR: permission denied for schema pri_user_independent +DETAIL: N/A +--初始用户 +reset role; +select * from pri_user_independent.tb_pri; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +insert into pri_user_independent.tb_pri values(1,'joe'); +ERROR: permission denied for relation tb_pri +DETAIL: N/A +update pri_user_independent.tb_pri set name = 'gauss' where id = 1; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +delete from pri_user_independent.tb_pri; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +create table pri_user_independent.tt(id int); +ALTER TABLE pri_user_independent.tb_pri add column age int; +DROP table pri_user_independent.tb_pri_test; +--select any table +reset role; +GRANT select any table to any_table_role; +SET ROLE any_table_role PASSWORD 'Gauss@1234'; +select * from pri_user_independent.tb_pri; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +--insert any table +reset role; +GRANT insert any table to any_table_role; +SET ROLE any_table_role PASSWORD 'Gauss@1234'; +insert into pri_user_independent.tb_pri values(2,'bob'); +ERROR: permission denied for relation tb_pri +DETAIL: N/A +--update any table +reset role; +GRANT update any table to any_table_role; +SET ROLE any_table_role PASSWORD 'Gauss@1234'; +update pri_user_independent.tb_pri set name = 'Bob' where id = 2; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +--delete any table +reset role; +GRANT delete any table to any_table_role; +SET ROLE any_table_role PASSWORD 'Gauss@1234'; +delete from pri_user_independent.tb_pri; +ERROR: permission denied for relation tb_pri +DETAIL: N/A +--create any table +reset role; +GRANT create any table to any_table_role; +SET ROLE any_table_role PASSWORD 'Gauss@1234'; +create table pri_user_independent.tt2(id int); +--alter any table +reset role; +CREATE USER user_test_alter password 'Gauss@1234'; +GRANT alter any table to user_test_alter; +SET ROLE user_test_alter PASSWORD 'Gauss@1234'; +ALTER TABLE pri_user_independent.tb_pri drop column age; +--drop any table +reset role; +GRANT drop any table to user_test_alter; +SET ROLE user_test_alter PASSWORD 'Gauss@1234'; +DROP table pri_user_independent.tb_pri_test1; +reset role; +DROP TABLE pri_user_independent.tb_pri; +DROP TABLE pri_user_independent.tt; +DROP TABLE pri_user_independent.tt2; +DROP USER user_test_alter cascade; +DROP USER any_table_role cascade; +DROP USER pri_user_independent cascade; diff --git a/src/test/regress/expected/pri_samenew_schema.out b/src/test/regress/expected/pri_samenew_schema.out new file mode 100644 index 000000000..55c6c9960 --- /dev/null +++ b/src/test/regress/expected/pri_samenew_schema.out @@ -0,0 +1,282 @@ +DROP USER test_same_schema_user; +ERROR: role "test_same_schema_user" does not exist +DROP USER ordinary_role; +ERROR: role "ordinary_role" does not exist +CREATE USER test_same_schema_user PASSWORD 'Gauss@1234'; +CREATE USER ordinary_role PASSWORD 'Gauss@1234'; +--test same schema +reset role; +SET ROLE ordinary_role PASSWORD 'Gauss@1234'; +--create table +CREATE TABLE test_drop_table(id int); +CREATE TABLE TBL_DOMAIN_PRI +( + IDOMAINID NUMBER(10) NOT NULL, + SDOMAINNAME VARCHAR2(30) NOT NULL, + b int +); +insert into TBL_DOMAIN_PRI values (1,'gauss',1); +reset role; +GRANT create any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +CREATE table ordinary_role.tb_pri (id int, name VARCHAR(10)); +alter table ordinary_role.TBL_DOMAIN_PRI add column c int; +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +drop table ordinary_role.test_drop_table; +ERROR: permission denied for relation test_drop_table +DETAIL: N/A +select * from ordinary_role.TBL_DOMAIN_PRI; +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +insert into ordinary_role.TBL_DOMAIN_PRI values (2,'gauss',2); +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +update ordinary_role.TBL_DOMAIN_PRI set b = 3 where IDOMAINID = 1; +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +reset role; +-- create any type +revoke create any table from test_same_schema_user; +GRANT create any type to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +CREATE TYPE ordinary_role.compfoo AS (f1 int, f2 text); +CREATE TABLE ordinary_role.t1_compfoo(a int, b ordinary_role.compfoo); +ERROR: permission denied for schema ordinary_role +DETAIL: N/A +CREATE TYPE ordinary_role.bugstatus AS ENUM ('create', 'modify', 'closed'); +-- create any function +reset role; +revoke create any type from test_same_schema_user; +GRANT create any function to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +CREATE FUNCTION ordinary_role.pri_func_add_sql(integer, integer) RETURNS integer + AS 'select $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT; +--create any index +reset role; +revoke create any function from test_same_schema_user; +GRANT create any index to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +CREATE UNIQUE INDEX ordinary_role.ds_ship_mode_t1_index1 ON ordinary_role.TBL_DOMAIN_PRI(IDOMAINID); +reset role; +DROP INDEX ordinary_role.ds_ship_mode_t1_index1; +--create any sequence +reset role; +revoke create any index from test_same_schema_user; +GRANT create any sequence to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +CREATE SEQUENCE sequence_test; +--alter any table +reset role; +revoke create any type from test_same_schema_user; +grant alter any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +alter table ordinary_role.TBL_DOMAIN_PRI add column c int; +drop table ordinary_role.test_drop_table; +ERROR: permission denied for relation test_drop_table +DETAIL: N/A +select * from ordinary_role.TBL_DOMAIN_PRI; +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +insert into ordinary_role.TBL_DOMAIN_PRI values (2,'gauss',2,2); +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +update ordinary_role.TBL_DOMAIN_PRI set b = 3 where IDOMAINID = 1; +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +--drop any table +reset role; +revoke alter any table from test_same_schema_user; +grant drop any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +alter table ordinary_role.TBL_DOMAIN_PRI add column c int; +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +drop table ordinary_role.test_drop_table; +select * from ordinary_role.TBL_DOMAIN_PRI; +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +insert into ordinary_role.TBL_DOMAIN_PRI values (2,'gauss',2,2); +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +update ordinary_role.TBL_DOMAIN_PRI set b = 3 where IDOMAINID = 1; +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +--select any table +reset role; +revoke drop any table from test_same_schema_user; +grant select any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +alter table ordinary_role.TBL_DOMAIN_PRI add column c int; +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +drop table ordinary_role.test_drop_table; +ERROR: table "test_drop_table" does not exist +select * from ordinary_role.TBL_DOMAIN_PRI; + idomainid | sdomainname | b | c +-----------+-------------+---+--- + 1 | gauss | 1 | +(1 row) + +insert into ordinary_role.TBL_DOMAIN_PRI values (2,'gauss',2,2); +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +update ordinary_role.TBL_DOMAIN_PRI set b = 3 where IDOMAINID = 1; +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +--insert any table +reset role; +revoke select any table from test_same_schema_user; +grant insert any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +select * from ordinary_role.TBL_DOMAIN_PRI; +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +insert into ordinary_role.TBL_DOMAIN_PRI values (2,'gauss',2,2); +update ordinary_role.TBL_DOMAIN_PRI set b = 3 where IDOMAINID = 1; +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +--update any table +reset role; +revoke insert any table from test_same_schema_user; +grant update any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +select * from ordinary_role.TBL_DOMAIN_PRI; +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +insert into ordinary_role.TBL_DOMAIN_PRI values (2,'gauss',2,2); +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +update ordinary_role.TBL_DOMAIN_PRI set b = 3 where IDOMAINID = 1; +ERROR: permission denied for relation tbl_domain_pri +DETAIL: N/A +reset role; +grant select any table to test_same_schema_user; +update ordinary_role.TBL_DOMAIN_PRI set b = 3 where IDOMAINID = 1; +--delete any table +reset role; +revoke update any table from test_same_schema_user; +revoke select any table from test_same_schema_user; +grant delete any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +delete from ordinary_role.TBL_DOMAIN_PRI; +-- test new schema +reset role; +revoke delete any table from test_same_schema_user; +GRANT create any table to test_same_schema_user; +CREATE SCHEMA pri_new_schema; +CREATE TABLE pri_new_schema.TBL_DOMAIN +( + IDOMAINID NUMBER(10) NOT NULL, + SDOMAINNAME VARCHAR2(30) NOT NULL, + b int +); +insert into pri_new_schema.TBL_DOMAIN values (1,'gauss',1); +CREATE TABLE pri_new_schema.test_new_table1(id int, name text); +GRANT create any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +CREATE TABLE pri_new_schema.test_new_table2(id int, name text); +reset role; +revoke create any table from test_same_schema_user; +grant alter any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +alter table pri_new_schema.TBL_DOMAIN add column d int; +drop table pri_new_schema.test_new_table1; +ERROR: permission denied for relation test_new_table1 +DETAIL: N/A +select * from pri_new_schema.TBL_DOMAIN; +ERROR: permission denied for relation tbl_domain +DETAIL: N/A +insert into pri_new_schema.TBL_DOMAIN values (2,'gauss',2,2); +ERROR: permission denied for relation tbl_domain +DETAIL: N/A +update pri_new_schema.TBL_DOMAIN set b = 3 where IDOMAINID = 1; +ERROR: permission denied for relation tbl_domain +DETAIL: N/A +reset role; +revoke alter any table from test_same_schema_user; +grant drop any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +alter table pri_new_schema.TBL_DOMAIN add column d int; +ERROR: permission denied for relation tbl_domain +DETAIL: N/A +drop table pri_new_schema.test_new_table1; +select * from pri_new_schema.TBL_DOMAIN; +ERROR: permission denied for relation tbl_domain +DETAIL: N/A +insert into pri_new_schema.TBL_DOMAIN values (2,'gauss',2,2); +ERROR: permission denied for relation tbl_domain +DETAIL: N/A +update pri_new_schema.TBL_DOMAIN set b = 3 where IDOMAINID = 1; +ERROR: permission denied for relation tbl_domain +DETAIL: N/A +reset role; +revoke drop any table from test_same_schema_user; +grant select any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +alter table pri_new_schema.TBL_DOMAIN add column d int; +ERROR: permission denied for relation tbl_domain +DETAIL: N/A +drop table pri_new_schema.test_new_table1; +ERROR: table "test_new_table1" does not exist +select * from pri_new_schema.TBL_DOMAIN; + idomainid | sdomainname | b | d +-----------+-------------+---+--- + 1 | gauss | 1 | +(1 row) + +insert into pri_new_schema.TBL_DOMAIN values (2,'gauss',2,2); +ERROR: permission denied for relation tbl_domain +DETAIL: N/A +update pri_new_schema.TBL_DOMAIN set b = 3 where IDOMAINID = 1; +ERROR: permission denied for relation tbl_domain +DETAIL: N/A +reset role; +revoke select any table from test_same_schema_user; +grant insert any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +select * from pri_new_schema.TBL_DOMAIN; +ERROR: permission denied for relation tbl_domain +DETAIL: N/A +insert into pri_new_schema.TBL_DOMAIN values (2,'gauss',2,2); +update pri_new_schema.TBL_DOMAIN set b = 3 where IDOMAINID = 1; +ERROR: permission denied for relation tbl_domain +DETAIL: N/A +reset role; +revoke insert any table from test_same_schema_user; +grant update any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +select * from pri_new_schema.TBL_DOMAIN; +ERROR: permission denied for relation tbl_domain +DETAIL: N/A +insert into pri_new_schema.TBL_DOMAIN values (2,'gauss',2,2); +ERROR: permission denied for relation tbl_domain +DETAIL: N/A +update pri_new_schema.TBL_DOMAIN set b = 3 where IDOMAINID = 1; +ERROR: permission denied for relation tbl_domain +DETAIL: N/A +reset role; +grant select any table to test_same_schema_user; +update pri_new_schema.TBL_DOMAIN set b = 3 where IDOMAINID = 1; +reset role; +revoke update any table from test_same_schema_user; +revoke select any table from test_same_schema_user; +GRANT delete any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +delete pri_new_schema.TBL_DOMAIN; +reset role; +revoke delete any table from test_same_schema_user; +drop table pri_new_schema.TBL_DOMAIN cascade; +drop table pri_new_schema.test_new_table2 cascade; +drop table ordinary_role.tb_pri cascade; +drop table ordinary_role.tbl_domain_pri cascade; +drop type ordinary_role.compfoo; +drop type ordinary_role.bugstatus; +drop function ordinary_role.pri_func_add_sql(integer,integer); +drop schema ordinary_role cascade; +drop user ordinary_role cascade; +drop user test_same_schema_user cascade; +drop schema pri_new_schema cascade; diff --git a/src/test/regress/expected/pri_sys_schema.out b/src/test/regress/expected/pri_sys_schema.out new file mode 100644 index 000000000..1cc98f8bb --- /dev/null +++ b/src/test/regress/expected/pri_sys_schema.out @@ -0,0 +1,156 @@ +CREATE USER test_any_role PASSWORD 'Gauss@1234'; +GRANT insert any table to test_any_role; +GRANT update any table to test_any_role; +GRANT delete any table to test_any_role; +CREATE USER test_user PASSWORD 'Gauss@1234'; +--db4ai +reset role; +SET ROLE test_user PASSWORD 'Gauss@1234'; +insert into db4ai.snapshot(id) values(1); +ERROR: permission denied for relation snapshot +DETAIL: N/A +update db4ai.snapshot set id = 2 where id = 1; +ERROR: permission denied for relation snapshot +DETAIL: N/A +delete from db4ai.snapshot; +ERROR: permission denied for relation snapshot +DETAIL: N/A +select * from db4ai.snapshot; + id | parent_id | matrix_id | root_id | schema | name | owner | commands | comment | published | archived | created | row_count +----+-----------+-----------+---------+--------+------+-------+----------+---------+-----------+----------+---------+----------- +(0 rows) + +reset role; +SET ROLE test_any_role PASSWORD 'Gauss@1234'; +insert into db4ai.snapshot(id) values(1); +ERROR: permission denied for relation snapshot +DETAIL: N/A +update db4ai.snapshot set id = 2 where id = 1; +ERROR: permission denied for relation snapshot +DETAIL: N/A +delete from db4ai.snapshot; +ERROR: permission denied for relation snapshot +DETAIL: N/A +select * from db4ai.snapshot; + id | parent_id | matrix_id | root_id | schema | name | owner | commands | comment | published | archived | created | row_count +----+-----------+-----------+---------+--------+------+-------+----------+---------+-----------+----------+---------+----------- +(0 rows) + +--information_schema +reset role; +SET ROLE test_user PASSWORD 'Gauss@1234'; +insert into information_schema.sql_features(feature_id) values(1); +ERROR: permission denied for relation sql_features +DETAIL: N/A +update information_schema.sql_features set feature_name = 'Embedded Ada1' where feature_id = 'B011'; +ERROR: permission denied for relation sql_features +DETAIL: N/A +delete from information_schema.sql_features; +ERROR: permission denied for relation sql_features +DETAIL: N/A +select * from information_schema.sql_features where feature_id = 'B011'; + feature_id | feature_name | sub_feature_id | sub_feature_name | is_supported | is_verified_by | comments +------------+--------------+----------------+------------------+--------------+----------------+---------- + B011 | Embedded Ada | | | NO | | +(1 row) + +reset role; +SET ROLE test_any_role PASSWORD 'Gauss@1234'; +insert into information_schema.sql_features(feature_id) values(1); +ERROR: permission denied for relation sql_features +DETAIL: N/A +update information_schema.sql_features set feature_name = 'Embedded Ada1' where feature_id = 'B011'; +ERROR: permission denied for relation sql_features +DETAIL: N/A +delete from information_schema.sql_features; +ERROR: permission denied for relation sql_features +DETAIL: N/A +select * from information_schema.sql_features where feature_id = 'B011'; + feature_id | feature_name | sub_feature_id | sub_feature_name | is_supported | is_verified_by | comments +------------+--------------+----------------+------------------+--------------+----------------+---------- + B011 | Embedded Ada | | | NO | | +(1 row) + +--dbe_perf +reset role; +SET ROLE test_user PASSWORD 'Gauss@1234'; +select count(*) from dbe_perf.user_transaction; +ERROR: permission denied for schema dbe_perf +LINE 1: select count(*) from dbe_perf.user_transaction; + ^ +DETAIL: N/A +delete from dbe_perf.user_transaction; +ERROR: permission denied for schema dbe_perf +LINE 1: delete from dbe_perf.user_transaction; + ^ +DETAIL: N/A +reset role; +GRANT select any table to test_any_role; +GRANT delete any table to test_any_role; +SET ROLE test_any_role PASSWORD 'Gauss@1234'; +select count(*) from dbe_perf.user_transaction; +ERROR: permission denied for schema dbe_perf +LINE 1: select count(*) from dbe_perf.user_transaction; + ^ +DETAIL: N/A +delete from dbe_perf.user_transaction; +ERROR: permission denied for schema dbe_perf +LINE 1: delete from dbe_perf.user_transaction; + ^ +DETAIL: N/A +--cstore +reset role; +SET ROLE test_user PASSWORD 'Gauss@1234'; +select count(*) from sys.sys_dummy; +ERROR: permission denied for schema sys +LINE 1: select count(*) from sys.sys_dummy; + ^ +DETAIL: N/A +delete from sys.sys_dummy; +ERROR: permission denied for schema sys +LINE 1: delete from sys.sys_dummy; + ^ +DETAIL: N/A +reset role; +SET ROLE test_any_role PASSWORD 'Gauss@1234'; +select count(*) from sys.sys_dummy; +ERROR: permission denied for schema sys +LINE 1: select count(*) from sys.sys_dummy; + ^ +DETAIL: N/A +delete from sys.sys_dummy; +ERROR: permission denied for schema sys +LINE 1: delete from sys.sys_dummy; + ^ +DETAIL: N/A +--pg_catalog +reset role; +SET ROLE test_user PASSWORD 'Gauss@1234'; +select count(*) from pg_catalog.pg_authid; +ERROR: permission denied for relation pg_authid +DETAIL: N/A +reset role; +GRANT select any table to test_any_role; +SET ROLE test_any_role PASSWORD 'Gauss@1234'; +select count(*) from pg_catalog.pg_authid; +ERROR: permission denied for relation pg_authid +DETAIL: N/A +--sys +reset role; +SET ROLE test_user PASSWORD 'Gauss@1234'; +select count(*) from sys.my_jobs; +ERROR: permission denied for schema sys +LINE 1: select count(*) from sys.my_jobs; + ^ +DETAIL: N/A +reset role; +GRANT select any table to test_any_role; +SET ROLE test_any_role PASSWORD 'Gauss@1234'; +select count(*) from sys.my_jobs; +ERROR: permission denied for schema sys +LINE 1: select count(*) from sys.my_jobs; + ^ +DETAIL: N/A +reset role; +drop user test_any_role cascade; +drop user test_user cascade; diff --git a/src/test/regress/expected/query_rewrite.out b/src/test/regress/expected/query_rewrite.out index 99813f97f..fb1a941c2 100755 --- a/src/test/regress/expected/query_rewrite.out +++ b/src/test/regress/expected/query_rewrite.out @@ -76,8 +76,159 @@ explain (costs off) select * from t1 where ( '1' = '0' and ( '1' = '1' or exists -> Seq Scan on t1 (3 rows) +-- test for optimized join rel as sub-query +set qrw_inlist2join_optmode = 'rule_base'; +CREATE TABLE t3 ( +slot integer NOT NULL, +cid bigint NOT NULL, +name character varying NOT NULL +) +WITH (orientation=row); +insert into t3 (slot, cid, name) values(generate_series(1, 10), generate_series(1, 10), 'records.storage.state'); +analyze t3; +explain (costs off) +select + * +from + t3 +where + slot = '5' + and (name) in ( + select + name + from + t3 + where + slot = '5' + and cid in ( + 5, 1000, 1001, 1002, 1003, 1004, 1005, + 1006, 1007, 2000, 4000, 10781986, 10880002 + ) + limit + 50 + ); + QUERY PLAN +-------------------------------------------------------------------------------- + Nested Loop Semi Join + Join Filter: ((query_rewrite.t3.name)::text = (query_rewrite.t3.name)::text) + -> Seq Scan on t3 + Filter: (slot = 5) + -> Limit + -> Hash Right Semi Join + Hash Cond: ("*VALUES*".column1 = query_rewrite.t3.cid) + -> Values Scan on "*VALUES*" + -> Hash + -> Seq Scan on t3 + Filter: (slot = 5) +(11 rows) + +select + * +from + t3 +where + slot = '5' + and (name) in ( + select + name + from + t3 + where + slot = '5' + and cid in ( + 5, 1000, 1001, 1002, 1003, 1004, 1005, + 1006, 1007, 2000, 4000, 10781986, 10880002 + ) + limit + 50 + ); + slot | cid | name +------+-----+----------------------- + 5 | 5 | records.storage.state +(1 row) + +explain (costs off) +select + * +from + t3 +where + cid in ( + select + cid + from + t3 + where + slot = '5' + and (name) in ( + select + name + from + t3 + where + slot = '5' + and cid in ( + 5, 1000, 1001, 1002, 1003, 1004, 1005, + 1006, 1007, 2000, 4000, 10781986, 10880002 + ) + limit + 50 + ) + ); + QUERY PLAN +-------------------------------------------------------------------------------------------- + Hash Semi Join + Hash Cond: (query_rewrite.t3.cid = query_rewrite.t3.cid) + -> Seq Scan on t3 + -> Hash + -> Nested Loop Semi Join + Join Filter: ((query_rewrite.t3.name)::text = (query_rewrite.t3.name)::text) + -> Seq Scan on t3 + Filter: (slot = 5) + -> Limit + -> Hash Right Semi Join + Hash Cond: ("*VALUES*".column1 = query_rewrite.t3.cid) + -> Values Scan on "*VALUES*" + -> Hash + -> Seq Scan on t3 + Filter: (slot = 5) +(15 rows) + +select + * +from + t3 +where + cid in ( + select + cid + from + t3 + where + slot = '5' + and (name) in ( + select + name + from + t3 + where + slot = '5' + and cid in ( + 5, 1000, 1001, 1002, 1003, 1004, 1005, + 1006, 1007, 2000, 4000, 10781986, 10880002 + ) + limit + 50 + ) + ); + slot | cid | name +------+-----+----------------------- + 5 | 5 | records.storage.state +(1 row) + drop schema query_rewrite cascade; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to table t1 drop cascades to table t2 +drop cascades to table t3 reset current_schema; diff --git a/src/test/regress/expected/rangefuncs_1.out b/src/test/regress/expected/rangefuncs_1.out index b7646521b..495d40acf 100644 --- a/src/test/regress/expected/rangefuncs_1.out +++ b/src/test/regress/expected/rangefuncs_1.out @@ -51,6 +51,7 @@ SELECT name, setting FROM pg_settings WHERE name LIKE 'enable%' ORDER BY name; enable_light_proxy | on enable_logical_io_statistics | on enable_material | on + enable_memory_context_check_debug | off enable_memory_context_control | off enable_memory_limit | on enable_mergejoin | on @@ -89,7 +90,7 @@ SELECT name, setting FROM pg_settings WHERE name LIKE 'enable%' ORDER BY name; enable_vector_engine | on enable_wdr_snapshot | off enable_xlog_prune | on -(115 rows) +--?.* CREATE TABLE foo2(fooid int, f2 int); INSERT INTO foo2 VALUES(1, 11); diff --git a/src/test/regress/expected/rawlike.out b/src/test/regress/expected/rawlike.out new file mode 100644 index 000000000..a6a731545 --- /dev/null +++ b/src/test/regress/expected/rawlike.out @@ -0,0 +1,32 @@ +create database utf8test template template0 encoding 'utf8'; +\c utf8test +create table rawlike_t1(c1 raw); +insert into rawlike_t1 values(hextoraw('D')); +select * from rawlike_t1 where c1 like hextoraw('D'); + c1 +---- + 0D +(1 row) + +insert into rawlike_t1 values(hextoraw('D9')); +select * from rawlike_t1 where c1 like hextoraw('D9'); + c1 +---- + D9 +(1 row) + +insert into rawlike_t1 values(hextoraw('D9a')); +select * from rawlike_t1 where c1 like hextoraw('D9a'); + c1 +------ + 0D9A +(1 row) + +select * from rawlike_t1 where c1 like hextoraw('D9f'); + c1 +---- +(0 rows) + +drop table rawlike_t1; +\c postgres +drop database utf8test; diff --git a/src/test/regress/expected/replace_func_with_two_args.out b/src/test/regress/expected/replace_func_with_two_args.out new file mode 100644 index 000000000..95123fd59 --- /dev/null +++ b/src/test/regress/expected/replace_func_with_two_args.out @@ -0,0 +1,111 @@ +-- +-- replace function with two arguments +-- +select replace('string', ''); + replace +--------- + string +(1 row) + +select replace('string', 'i'); + replace +--------- + strng +(1 row) + +select replace('string', 'in'); + replace +--------- + strg +(1 row) + +select replace('string', 'ing'); + replace +--------- + str +(1 row) + +select replace('', 'ing'); + replace +--------- + +(1 row) + +select replace(NULL, 'ing'); + replace +--------- + +(1 row) + +select replace('ing', ''); + replace +--------- + ing +(1 row) + +select replace('ing', NULL); + replace +--------- + ing +(1 row) + +select replace('', ''); + replace +--------- + +(1 row) + +select replace(NULL, NULL); + replace +--------- + +(1 row) + +select replace(123, '1'); + replace +--------- + 23 +(1 row) + +select replace('123', 1); + replace +--------- + 23 +(1 row) + +select replace(123, 1); + replace +--------- + 23 +(1 row) + +select replace('abc\nabc', '\n'); + replace +--------- + abcabc +(1 row) + +select replace('abc\nabc', E'\n'); + replace +---------- + abc\nabc +(1 row) + +select replace(E'abc\nabc', E'\n'); + replace +--------- + abcabc +(1 row) + +select replace('~!@#$%^&*()', '!@'); + replace +----------- + ~#$%^&*() +(1 row) + +select replace('高斯', '高'); + replace +--------- + 斯 +(1 row) + diff --git a/src/test/regress/expected/row_compression/normal_test.out b/src/test/regress/expected/row_compression/normal_test.out deleted file mode 100644 index 24a096fac..000000000 --- a/src/test/regress/expected/row_compression/normal_test.out +++ /dev/null @@ -1,165 +0,0 @@ -create schema normal_test; -CREATE TABLE normal_test.tbl_pc(id int, c1 text) WITH(compresstype=1); -\d+ normal_test.tbl_pc - Table "normal_test.tbl_pc" - Column | Type | Modifiers | Storage | Stats target | Description ---------+---------+-----------+----------+--------------+------------- - id | integer | | plain | | - c1 | text | | extended | | -Has OIDs: no -Options: orientation=row, compresstype=1 - -INSERT INTO normal_test.tbl_pc SELECT id, id::text FROM generate_series(1,1000) id; -select count(*) from normal_test.tbl_pc; - count -------- - 1000 -(1 row) - -select count(*) from normal_test.tbl_pc where id < 100; - count -------- - 99 -(1 row) - -checkpoint; -vacuum normal_test.tbl_pc; -select count(*) from normal_test.tbl_pc; - count -------- - 1000 -(1 row) - -select count(*) from normal_test.tbl_pc where id < 100; - count -------- - 99 -(1 row) - --- normal index -create index on normal_test.tbl_pc(id) WITH (compresstype=2,compress_chunk_size=1024); -alter index normal_test.tbl_pc_id_idx set (compresstype=1); --failed -ERROR: change compresstype OPTION is not supported -alter index normal_test.tbl_pc_id_idx set (compress_chunk_size=2048); --failed -ERROR: change compress_chunk_size OPTION is not supported -alter index normal_test.tbl_pc_id_idx set (compress_prealloc_chunks=2); --success -alter index normal_test.tbl_pc_id_idx set (compress_level=2); --success -set enable_seqscan = off; -set enable_bitmapscan = off; -select count(*) from normal_test.tbl_pc; - count -------- - 1000 -(1 row) - -CREATE TABLE normal_test.tbl_partition(id int) WITH(compresstype=2,compress_chunk_size=1024) partition by range(id) -( - partition p0 values less than(5000), - partition p1 values less than(10000), - partition p2 values less than(20000), - partition p3 values less than(30000), - partition p4 values less than(40000), - partition p5 values less than(50000), - partition p6 values less than(60000), - partition p7 values less than(70000) -); -insert into normal_test.tbl_partition select generate_series(1,65000); -select count(*) from normal_test.tbl_partition; - count -------- - 65000 -(1 row) - -checkpoint; -vacuum normal_test.tbl_partition; -select count(*) from normal_test.tbl_partition; - count -------- - 65000 -(1 row) - --- exchange -select relname, reloptions from pg_partition where parentid in (Select relfilenode from pg_class where relname like 'tbl_partition') order by relname; - relname | reloptions ----------------+---------------------------------------------------------------------------- - p0 | {orientation=row,compresstype=2,compress_chunk_size=1024} - p1 | {orientation=row,compresstype=2,compress_chunk_size=1024} - p2 | {orientation=row,compresstype=2,compress_chunk_size=1024} - p3 | {orientation=row,compresstype=2,compress_chunk_size=1024} - p4 | {orientation=row,compresstype=2,compress_chunk_size=1024} - p5 | {orientation=row,compresstype=2,compress_chunk_size=1024} - p6 | {orientation=row,compresstype=2,compress_chunk_size=1024} - p7 | {orientation=row,compresstype=2,compress_chunk_size=1024} - tbl_partition | {orientation=row,compresstype=2,compress_chunk_size=1024,wait_clean_gpi=n} -(9 rows) - -create table normal_test.exchange_table(id int) WITH(compresstype=2,compress_chunk_size=1024); -ALTER TABLE normal_test.tbl_partition EXCHANGE PARTITION FOR(2500) WITH TABLE normal_test.exchange_table; -select count(*) from normal_test.tbl_partition; - count -------- - 60001 -(1 row) - --- spilit -ALTER TABLE normal_test.tbl_partition SPLIT PARTITION p1 AT (7500) INTO (PARTITION p10, PARTITION p11); -select relname, reloptions from pg_partition where parentid in (Select relfilenode from pg_class where relname like 'tbl_partition') order by relname; - relname | reloptions ----------------+---------------------------------------------------------------------------- - p0 | {orientation=row,compresstype=2,compress_chunk_size=1024} - p10 | {orientation=row,compresstype=2,compress_chunk_size=1024,wait_clean_gpi=y} - p11 | {orientation=row,compresstype=2,compress_chunk_size=1024,wait_clean_gpi=y} - p2 | {orientation=row,compresstype=2,compress_chunk_size=1024} - p3 | {orientation=row,compresstype=2,compress_chunk_size=1024} - p4 | {orientation=row,compresstype=2,compress_chunk_size=1024} - p5 | {orientation=row,compresstype=2,compress_chunk_size=1024} - p6 | {orientation=row,compresstype=2,compress_chunk_size=1024} - p7 | {orientation=row,compresstype=2,compress_chunk_size=1024} - tbl_partition | {orientation=row,compresstype=2,compress_chunk_size=1024,wait_clean_gpi=y} -(10 rows) - -create index on normal_test.tbl_partition(id) local WITH (compresstype=2,compress_chunk_size=1024); -\d+ normal_test.tbl_partition - Table "normal_test.tbl_partition" - Column | Type | Modifiers | Storage | Stats target | Description ---------+---------+-----------+---------+--------------+------------- - id | integer | | plain | | -Indexes: - "tbl_partition_id_idx" btree (id) LOCAL(PARTITION p0_id_idx, PARTITION p10_id_idx, PARTITION p11_id_idx, PARTITION p2_id_idx, PARTITION p3_id_idx, PARTITION p4_id_idx, PARTITION p5_id_idx, PARTITION p6_id_idx, PARTITION p7_id_idx) WITH (compresstype=2, compress_chunk_size=1024) TABLESPACE pg_default ---?.* ---?.* -Has OIDs: no -Options: orientation=row, compresstype=2, compress_chunk_size=1024 - -select relname, reloptions from pg_partition where parentid in (Select relfilenode from pg_class where relname like 'tbl_partition_id_idx') order by relname; - relname | reloptions -------------+------------------------------------------- - p0_id_idx | {compresstype=2,compress_chunk_size=1024} - p10_id_idx | {compresstype=2,compress_chunk_size=1024} - p11_id_idx | {compresstype=2,compress_chunk_size=1024} - p2_id_idx | {compresstype=2,compress_chunk_size=1024} - p3_id_idx | {compresstype=2,compress_chunk_size=1024} - p4_id_idx | {compresstype=2,compress_chunk_size=1024} - p5_id_idx | {compresstype=2,compress_chunk_size=1024} - p6_id_idx | {compresstype=2,compress_chunk_size=1024} - p7_id_idx | {compresstype=2,compress_chunk_size=1024} -(9 rows) - --- unsupport -alter index normal_test.tbl_partition_id_idx set (compresstype=1); -ERROR: change compresstype OPTION is not supported -alter index normal_test.tbl_partition_id_idx set (compress_chunk_size=2048); -ERROR: change compress_chunk_size OPTION is not supported -alter index normal_test.tbl_partition_id_idx set (compress_prealloc_chunks=2); -ERROR: change partition compress_prealloc_chunks OPTION is not supported -create index rolcompress_index on normal_test.tbl_pc(id) with (compress_chunk_size=4096); -ERROR: compress_chunk_size/compress_prealloc_chunks/compress_level/compress_byte_convert/compress_diff_convert should be used with compresstype. -create table rolcompress_table_001(a int) with (compresstype=2, compress_prealloc_chunks=3); -ERROR: invalid compress_prealloc_chunks 3 , must be less than 2 for rolcompress_table_001 --- support -alter table normal_test.tbl_pc set (compress_prealloc_chunks=1); -drop schema normal_test cascade; -NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to table normal_test.tbl_pc -drop cascades to table normal_test.tbl_partition -drop cascades to table normal_test.exchange_table diff --git a/src/test/regress/expected/row_compression/pg_table_size.out b/src/test/regress/expected/row_compression/pg_table_size.out deleted file mode 100644 index 7f1dbf565..000000000 --- a/src/test/regress/expected/row_compression/pg_table_size.out +++ /dev/null @@ -1,79 +0,0 @@ --- row table pg_table_size -create schema table_size_schema; -CREATE TABLE table_size_schema.normal_table(id int); -CREATE TABLE table_size_schema.compressed_table_1024(id int) WITH(compresstype=2, compress_chunk_size=1024); -CREATE TABLE table_size_schema.compressed_table_2048(id int) WITH(compresstype=2, compress_chunk_size=2048); -CREATE TABLE table_size_schema.compressed_table_4096(id int) WITH(compresstype=2, compress_chunk_size=4096); -select pg_table_size('table_size_schema.normal_table'); - pg_table_size ---------------- - 0 -(1 row) - -select pg_table_size('table_size_schema.compressed_table_1024'); - pg_table_size ---------------- - 5592896 -(1 row) - -select pg_table_size('table_size_schema.compressed_table_2048'); - pg_table_size ---------------- - 3196168 -(1 row) - -select pg_table_size('table_size_schema.compressed_table_4096'); - pg_table_size ---------------- - 2097664 -(1 row) - -drop schema table_size_schema cascade; -NOTICE: drop cascades to 4 other objects -DETAIL: drop cascades to table table_size_schema.normal_table -drop cascades to table table_size_schema.compressed_table_1024 -drop cascades to table table_size_schema.compressed_table_2048 -drop cascades to table table_size_schema.compressed_table_4096 --- partition table pg_table_size -create schema partition_table_size_schema; -create table partition_table_size_schema.normal_partition(INV_DATE_SK integer) -partition by range(inv_date_sk)(partition p0 values less than(5000),partition p1 values less than(10000)); -create table partition_table_size_schema.compressed_partition_1024(INV_DATE_SK integer) -WITH(compresstype=2, compress_chunk_size=1024) -partition by range(inv_date_sk)(partition p0 values less than(5000),partition p1 values less than(10000)); -create table partition_table_size_schema.compressed_partition_2048(INV_DATE_SK integer) -WITH(compresstype=2, compress_chunk_size=2048) -partition by range(inv_date_sk)(partition p0 values less than(5000),partition p1 values less than(10000)); -create table partition_table_size_schema.compressed_partition_4096(INV_DATE_SK integer) -WITH(compresstype=2, compress_chunk_size=4096) -partition by range(inv_date_sk)(partition p0 values less than(5000),partition p1 values less than(10000)); -select pg_table_size('partition_table_size_schema.normal_partition'); - pg_table_size ---------------- - 0 -(1 row) - -select pg_table_size('partition_table_size_schema.compressed_partition_1024'); - pg_table_size ---------------- - 11185792 -(1 row) - -select pg_table_size('partition_table_size_schema.compressed_partition_2048'); - pg_table_size ---------------- - 6392336 -(1 row) - -select pg_table_size('partition_table_size_schema.compressed_partition_4096'); - pg_table_size ---------------- - 4195328 -(1 row) - -drop schema partition_table_size_schema cascade; -NOTICE: drop cascades to 4 other objects -DETAIL: drop cascades to table partition_table_size_schema.normal_partition -drop cascades to table partition_table_size_schema.compressed_partition_1024 -drop cascades to table partition_table_size_schema.compressed_partition_2048 -drop cascades to table partition_table_size_schema.compressed_partition_4096 diff --git a/src/test/regress/expected/row_compression/pg_tablespace_size.out b/src/test/regress/expected/row_compression/pg_tablespace_size.out deleted file mode 100644 index 6ccbe88ce..000000000 --- a/src/test/regress/expected/row_compression/pg_tablespace_size.out +++ /dev/null @@ -1,32 +0,0 @@ -CREATE TABLESPACE normal_tablespace RELATIVE LOCATION 'normal_tablespace'; -SELECT pg_tablespace_size('normal_tablespace'); - pg_tablespace_size --------------------- - 10 -(1 row) - -CREATE TABLE normal_table(id int) TABLESPACE normal_tablespace; -SELECT pg_tablespace_size('normal_tablespace'); - pg_tablespace_size --------------------- - 36 -(1 row) - -CREATE TABLESPACE compress_tablespace RELATIVE LOCATION 'compress_tablespace'; -SELECT pg_tablespace_size('compress_tablespace'); - pg_tablespace_size --------------------- - 10 -(1 row) - -CREATE TABLE compressed_table_1024(id int) WITH(compresstype=2, compress_chunk_size=1024) TABLESPACE compress_tablespace; -SELECT pg_tablespace_size('compress_tablespace'); - pg_tablespace_size --------------------- - 5592972 -(1 row) - -DROP TABLE normal_table; -DROP TABLESPACE normal_tablespace; -DROP TABLE compressed_table_1024; -DROP TABLESPACE compress_tablespace; diff --git a/src/test/regress/expected/row_compression/unsupported_feature.out b/src/test/regress/expected/row_compression/unsupported_feature.out deleted file mode 100644 index 3f3123da7..000000000 --- a/src/test/regress/expected/row_compression/unsupported_feature.out +++ /dev/null @@ -1,79 +0,0 @@ -create schema unspported_feature; --- unspport compressType: 3 -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compresstype=3, compress_chunk_size=1024); -ERROR: value 3 out of bounds for option "compresstype" -DETAIL: Valid values are between "0" and "2". --- unspport compress_chunk_size: 2000 -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compresstype=2, compress_chunk_size=2000); -ERROR: invalid compress_chunk_size 2000 , must be one of 512, 1024, 2048 or 4096 for compressed_table_1024 --- unspport compress_prealloc_chunks: -1 -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compresstype=2, compress_prealloc_chunks=-1); -ERROR: value -1 out of bounds for option "compress_prealloc_chunks" -DETAIL: Valid values are between "0" and "7". --- unspport compress_prealloc_chunks: 8 -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compresstype=2, compress_prealloc_chunks=8); -ERROR: value 8 out of bounds for option "compress_prealloc_chunks" -DETAIL: Valid values are between "0" and "7". --- unspport compress_level: 128 -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compresstype=2, compress_level=128); -ERROR: value 128 out of bounds for option "compress_level" -DETAIL: Valid values are between "-31" and "31". --- compresstype cant be used with column table -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(ORIENTATION = 'column', compresstype=2); -ERROR: only row orientation table support compresstype. --- compresstype cant be used with temp table -CREATE TEMP TABLE compressed_temp_table_1024(id int) WITH(compresstype=2); -ERROR: only row orientation table support compresstype. --- compresstype cant be used with unlogged table -CREATE unlogged TABLE compressed_unlogged_table_1024(id int) WITH(compresstype=2); -ERROR: only row orientation table support compresstype. --- use compress_prealloc_chunks\compress_chunk_size\compress_level without compresstype -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compress_prealloc_chunks=5); -ERROR: compress_chunk_size/compress_prealloc_chunks/compress_level/compress_byte_convert/compress_diff_convert should be used with compresstype. -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compress_chunk_size=1024); -ERROR: compress_chunk_size/compress_prealloc_chunks/compress_level/compress_byte_convert/compress_diff_convert should be used with compresstype. -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compress_byte_convert=true); -ERROR: compress_chunk_size/compress_prealloc_chunks/compress_level/compress_byte_convert/compress_diff_convert should be used with compresstype. -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compress_diff_convert=true); -ERROR: compress_chunk_size/compress_prealloc_chunks/compress_level/compress_byte_convert/compress_diff_convert should be used with compresstype. -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compress_level=5); -ERROR: compress_chunk_size/compress_prealloc_chunks/compress_level/compress_byte_convert/compress_diff_convert should be used with compresstype. --- unspport exchange -CREATE TABLE unspported_feature.exchange_table(id int) WITH(compresstype=2); -CREATE TABLE unspported_feature.alter_table(id int) partition by range(id) -( - partition p0 values less than(5000), - partition p1 values less than(10000), - partition p2 values less than(20000), - partition p3 values less than(30000), - partition p4 values less than(40000), - partition p5 values less than(50000), - partition p6 values less than(60000), - partition p7 values less than(70000) -); -ALTER TABLE unspported_feature.alter_table EXCHANGE PARTITION FOR(2500) WITH TABLE unspported_feature.exchange_table; -ERROR: tables in ALTER TABLE EXCHANGE PARTITION must have the same type of compress --- unspport alter compress_chunk_size -create TABLE unspported_feature.alter_table_option(id int) WITH(compresstype=2); -\d+ unspported_feature.alter_table_option - Table "unspported_feature.alter_table_option" - Column | Type | Modifiers | Storage | Stats target | Description ---------+---------+-----------+---------+--------------+------------- - id | integer | | plain | | -Has OIDs: no -Options: orientation=row, compresstype=2 - -ALTER TABLE unspported_feature.alter_table_option SET(compresstype=0); -- fail -ERROR: change compresstype OPTION is not supported -ALTER TABLE unspported_feature.alter_table_option SET(compress_chunk_size=2048); -- fail -ERROR: change compress_chunk_size OPTION is not supported -ALTER TABLE unspported_feature.alter_table_option SET(compress_level=2, compress_prealloc_chunks=0); --- alter compress_byte_convert\compress_diff_convert -create table unspported_feature.rolcompress_table_001(a int) with (compresstype=2, compress_diff_convert=true); -- fail -ERROR: compress_diff_convert should be used with compress_byte_convert. -create table unspported_feature.t_rowcompress_0007(cid int, name varchar2) with (compresstype=1); -alter table unspported_feature.t_rowcompress_0007 set (compress_diff_convert=true); --fail -ERROR: compress_diff_convert should be used with compress_byte_convert. -alter table unspported_feature.t_rowcompress_0007 set (compress_byte_convert=true, compress_diff_convert=true); --success -alter table unspported_feature.t_rowcompress_0007 set (compress_level=31); --failed -ERROR: compress_level should be used with ZSTD algorithm. diff --git a/src/test/regress/expected/segment_subpartition_add_drop_partition.out b/src/test/regress/expected/segment_subpartition_add_drop_partition.out new file mode 100644 index 000000000..1822f1671 --- /dev/null +++ b/src/test/regress/expected/segment_subpartition_add_drop_partition.out @@ -0,0 +1,2370 @@ +DROP SCHEMA segment_subpartition_add_drop_partition CASCADE; +ERROR: schema "segment_subpartition_add_drop_partition" does not exist +CREATE SCHEMA segment_subpartition_add_drop_partition; +SET CURRENT_SCHEMA TO segment_subpartition_add_drop_partition; +-- +----range-range table---- +-- +--prepare +CREATE TABLE range_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (customer_id) SUBPARTITION BY RANGE (time_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer1_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer1_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer1_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer2_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer2_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer2_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_all VALUES LESS THAN ('2012-01-01') + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_sales_pkey" for table "range_range_sales" +INSERT INTO range_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_range_sales_idx ON range_range_sales(product_id) LOCAL; +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE range_range_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer5_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer5_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer5_2011 VALUES LESS THAN ('2012-01-01') + ); +--fail, out of range +ALTER TABLE range_range_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1100); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE range_range_sales ADD PARTITION customer_temp2 VALUES (1300); +ERROR: can not add none-range partition to range partition table +--success, add 1 default subpartition +ALTER TABLE range_range_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_range_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1800); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--success, add 1 subpartition +ALTER TABLE range_range_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_2012 VALUES LESS THAN ('2013-01-01'); +--fail, out of range +ALTER TABLE range_range_sales MODIFY PARTITION customer3 ADD SUBPARTITION customer3_temp1 VALUES LESS THAN ('2015-01-01'); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, out of range +ALTER TABLE range_range_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('2011-01-01'); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE range_range_sales MODIFY PARTITION customer2 ADD SUBPARTITION customer2_temp1 VALUES ('2015-01-01'); +ERROR: can not add none-range partition to range partition table +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+-------------- + customer1 | p | r | f | 0 | 3 | {200} + customer2 | p | r | f | 0 | 3 | {500} + customer3 | p | r | f | 0 | 3 | {800} + customer4 | p | r | f | 0 | 3 | {1200} + customer5 | p | r | f | 0 | 3 | {1500} + customer6 | p | r | f | 0 | 3 | {NULL} + range_range_sales | r | r | f | 0 | 2 | + customer1_2008 | s | r | t | 0 | | {2009-01-01} + customer1_2009 | s | r | t | 0 | | {2010-01-01} + customer1_2010 | s | r | t | 0 | | {2011-01-01} + customer1_2011 | s | r | t | 0 | | {2012-01-01} + customer1_2012 | s | r | t | 0 | | {2013-01-01} + customer2_2008 | s | r | t | 0 | | {2009-01-01} + customer2_2009 | s | r | t | 0 | | {2010-01-01} + customer2_2010 | s | r | t | 0 | | {2011-01-01} + customer2_2011 | s | r | t | 0 | | {2012-01-01} + customer3_subpartdefault1 | s | r | t | 0 | | {NULL} + customer4_all | s | r | t | 0 | | {2012-01-01} + customer5_2008 | s | r | t | 0 | | {2009-01-01} + customer5_2009 | s | r | t | 0 | | {2010-01-01} + customer5_2010 | s | r | t | 0 | | {2011-01-01} + customer5_2011 | s | r | t | 0 | | {2012-01-01} + customer6_subpartdefault1 | s | r | t | 0 | | {NULL} +(23 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_2008_product_id_idx | x | n | t | t + customer1_2009_product_id_idx | x | n | t | t + customer1_2010_product_id_idx | x | n | t | t + customer1_2011_product_id_idx | x | n | t | t + customer1_2012_product_id_idx | x | n | t | t + customer2_2008_product_id_idx | x | n | t | t + customer2_2009_product_id_idx | x | n | t | t + customer2_2010_product_id_idx | x | n | t | t + customer2_2011_product_id_idx | x | n | t | t + customer3_subpartdefault1_product_id_idx | x | n | t | t + customer4_all_product_id_idx | x | n | t | t + customer5_2008_product_id_idx | x | n | t | t + customer5_2009_product_id_idx | x | n | t | t + customer5_2010_product_id_idx | x | n | t | t + customer5_2011_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(16 rows) + +\d+ range_range_sales + Table "segment_subpartition_add_drop_partition.range_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_range_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "range_range_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By RANGE(time_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 16 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_range_sales DROP PARTITION customer2; +--success +ALTER TABLE range_range_sales DROP SUBPARTITION customer1_2008; +--fail, the only subpartition +ALTER TABLE range_range_sales DROP SUBPARTITION customer4_all; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--success, drop partition customer3 +ALTER TABLE range_range_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_range_sales DROP PARTITION FOR (400, '2010-01-01'); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, number not equal to the number of partkey +ALTER TABLE range_range_sales DROP SUBPARTITION FOR (1400); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE range_range_sales DROP PARTITION FOR ('2010-01-01'); +ERROR: invalid input syntax for integer: "2010-01-01" +--fail, invalid type +ALTER TABLE range_range_sales DROP SUBPARTITION FOR ('2010-01-01', 1400); +ERROR: invalid input syntax for integer: "2010-01-01" +--success, drop subpartition customer5_2010 +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(1400, '2010-01-01'); +--fail, the only subpartition in customer6 +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(2000, '2009-01-01'); +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, no subpartition find +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(1400, '2012-01-01'); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM range_range_sales; + count +------- + 201 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+-------------- + customer1 | p | r | f | 0 | 3 | {200} + customer4 | p | r | f | 0 | 3 | {1200} + customer5 | p | r | f | 0 | 3 | {1500} + customer6 | p | r | f | 0 | 3 | {NULL} + range_range_sales | r | r | f | 0 | 2 | + customer1_2009 | s | r | t | 0 | | {2010-01-01} + customer1_2010 | s | r | t | 0 | | {2011-01-01} + customer1_2011 | s | r | t | 0 | | {2012-01-01} + customer1_2012 | s | r | t | 0 | | {2013-01-01} + customer4_all | s | r | t | 0 | | {2012-01-01} + customer5_2008 | s | r | t | 0 | | {2009-01-01} + customer5_2009 | s | r | t | 0 | | {2010-01-01} + customer5_2011 | s | r | t | 0 | | {2012-01-01} + customer6_subpartdefault1 | s | r | t | 0 | | {NULL} +(14 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_2009_product_id_idx | x | n | t | t + customer1_2010_product_id_idx | x | n | t | t + customer1_2011_product_id_idx | x | n | t | t + customer1_2012_product_id_idx | x | n | t | t + customer4_all_product_id_idx | x | n | t | t + customer5_2008_product_id_idx | x | n | t | t + customer5_2009_product_id_idx | x | n | t | t + customer5_2011_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(9 rows) + +\d+ range_range_sales + Table "segment_subpartition_add_drop_partition.range_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_range_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "range_range_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By RANGE(time_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 9 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +-- +----range-list table---- +-- +--prepare +CREATE TABLE range_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_list_sales_pkey" for table "range_list_sales" +INSERT INTO range_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales_idx ON range_list_sales(product_id) LOCAL; +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE range_list_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer5_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer5_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer5_channel4 VALUES ('9') + ); +--fail, out of range +ALTER TABLE range_list_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1100); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE range_list_sales ADD PARTITION customer_temp2 VALUES (1300); +ERROR: can not add none-range partition to range partition table +--success, add 1 default subpartition +ALTER TABLE range_list_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_list_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1800); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--success, add 1 subpartition +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel5 VALUES ('X'); +--fail, out of range +ALTER TABLE range_list_sales MODIFY PARTITION customer2 ADD SUBPARTITION customer2_temp1 VALUES ('X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, out of range +ALTER TABLE range_list_sales MODIFY PARTITION customer3 ADD SUBPARTITION customer3_temp1 VALUES ('X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE range_list_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X'); +ERROR: can not add none-list partition to list partition table +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+----------------------- + customer1 | p | r | f | 0 | 4 | {200} + customer2 | p | r | f | 0 | 4 | {500} + customer3 | p | r | f | 0 | 4 | {800} + customer4 | p | r | f | 0 | 4 | {1200} + customer5 | p | r | f | 0 | 4 | {1500} + customer6 | p | r | f | 0 | 4 | {NULL} + range_list_sales | r | r | f | 0 | 2 | + customer1_channel1 | s | l | t | 0 | | {0,1,2} + customer1_channel2 | s | l | t | 0 | | {3,4,5} + customer1_channel3 | s | l | t | 0 | | {6,7,8} + customer1_channel4 | s | l | t | 0 | | {9} + customer1_channel5 | s | l | t | 0 | | {X} + customer2_channel1 | s | l | t | 0 | | {0,1,2,3,4} + customer2_channel2 | s | l | t | 0 | | {NULL} + customer3_subpartdefault1 | s | l | t | 0 | | {NULL} + customer4_channel1 | s | l | t | 0 | | {0,1,2,3,4,5,6,7,8,9} + customer5_channel1 | s | l | t | 0 | | {0,1,2} + customer5_channel2 | s | l | t | 0 | | {3,4,5} + customer5_channel3 | s | l | t | 0 | | {6,7,8} + customer5_channel4 | s | l | t | 0 | | {9} + customer6_subpartdefault1 | s | l | t | 0 | | {NULL} +(21 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_channel1_product_id_idx | x | n | t | t + customer1_channel2_product_id_idx | x | n | t | t + customer1_channel3_product_id_idx | x | n | t | t + customer1_channel4_product_id_idx | x | n | t | t + customer1_channel5_product_id_idx | x | n | t | t + customer2_channel1_product_id_idx | x | n | t | t + customer2_channel2_product_id_idx | x | n | t | t + customer3_subpartdefault1_product_id_idx | x | n | t | t + customer4_channel1_product_id_idx | x | n | t | t + customer5_channel1_product_id_idx | x | n | t | t + customer5_channel2_product_id_idx | x | n | t | t + customer5_channel3_product_id_idx | x | n | t | t + customer5_channel4_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(14 rows) + +\d+ range_list_sales + Table "segment_subpartition_add_drop_partition.range_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_list_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "range_list_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By LIST(channel_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 14 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_list_sales DROP PARTITION customer2; +--success +ALTER TABLE range_list_sales DROP SUBPARTITION customer1_channel1; +--fail, the only subpartition +ALTER TABLE range_list_sales DROP SUBPARTITION customer4_channel1; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--success, drop partition customer3 +ALTER TABLE range_list_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_list_sales DROP PARTITION FOR (400, '4'); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, number not equal to the number of partkey +ALTER TABLE range_list_sales DROP SUBPARTITION FOR (1400); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE range_list_sales DROP PARTITION FOR ('abc'); +ERROR: invalid input syntax for integer: "abc" +--fail, invalid type +ALTER TABLE range_list_sales DROP SUBPARTITION FOR ('abc', 1400); +ERROR: invalid input syntax for integer: "abc" +--success, drop subpartition customer5_channel3 +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(1400, '7'); +--fail, the only subpartition in customer6 +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(2000, 'X'); +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, no subpartition find +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(1100, 'X'); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM range_list_sales; + count +------- + 341 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+----------------------- + customer1 | p | r | f | 0 | 4 | {200} + customer4 | p | r | f | 0 | 4 | {1200} + customer5 | p | r | f | 0 | 4 | {1500} + customer6 | p | r | f | 0 | 4 | {NULL} + range_list_sales | r | r | f | 0 | 2 | + customer1_channel2 | s | l | t | 0 | | {3,4,5} + customer1_channel3 | s | l | t | 0 | | {6,7,8} + customer1_channel4 | s | l | t | 0 | | {9} + customer1_channel5 | s | l | t | 0 | | {X} + customer4_channel1 | s | l | t | 0 | | {0,1,2,3,4,5,6,7,8,9} + customer5_channel1 | s | l | t | 0 | | {0,1,2} + customer5_channel2 | s | l | t | 0 | | {3,4,5} + customer5_channel4 | s | l | t | 0 | | {9} + customer6_subpartdefault1 | s | l | t | 0 | | {NULL} +(14 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_channel2_product_id_idx | x | n | t | t + customer1_channel3_product_id_idx | x | n | t | t + customer1_channel4_product_id_idx | x | n | t | t + customer1_channel5_product_id_idx | x | n | t | t + customer4_channel1_product_id_idx | x | n | t | t + customer5_channel1_product_id_idx | x | n | t | t + customer5_channel2_product_id_idx | x | n | t | t + customer5_channel4_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(9 rows) + +\d+ range_list_sales + Table "segment_subpartition_add_drop_partition.range_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_list_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "range_list_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By LIST(channel_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 9 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +-- +----range-hash table---- +-- +--prepare +CREATE TABLE range_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (customer_id) SUBPARTITION BY HASH (product_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_product1, + SUBPARTITION customer1_product2, + SUBPARTITION customer1_product3, + SUBPARTITION customer1_product4 + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_product1, + SUBPARTITION customer2_product2 + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_product1 + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_hash_sales_pkey" for table "range_hash_sales" +INSERT INTO range_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_hash_sales_idx ON range_hash_sales(product_id) LOCAL; +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE range_hash_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_product1, + SUBPARTITION customer5_product2, + SUBPARTITION customer5_product3, + SUBPARTITION customer5_product4 + ); +--fail, out of range +ALTER TABLE range_hash_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1100); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE range_hash_sales ADD PARTITION customer_temp2 VALUES (1300); +ERROR: can not add none-range partition to range partition table +--success, add 1 default subpartition +ALTER TABLE range_hash_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_hash_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1800); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, not support add hash +ALTER TABLE range_hash_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_temp1; +ERROR: syntax error at or near ";" +LINE 1: ...MODIFY PARTITION customer1 ADD SUBPARTITION customer1_temp1; + ^ +--fail, invalid format +ALTER TABLE range_hash_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X'); +ERROR: can not add hash partition +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+------------ + customer1 | p | r | f | 0 | 1 | {200} + customer2 | p | r | f | 0 | 1 | {500} + customer3 | p | r | f | 0 | 1 | {800} + customer4 | p | r | f | 0 | 1 | {1200} + customer5 | p | r | f | 0 | 1 | {1500} + customer6 | p | r | f | 0 | 1 | {NULL} + range_hash_sales | r | r | f | 0 | 2 | + customer1_product1 | s | h | t | 0 | | {0} + customer1_product2 | s | h | t | 0 | | {1} + customer1_product3 | s | h | t | 0 | | {2} + customer1_product4 | s | h | t | 0 | | {3} + customer2_product1 | s | h | t | 0 | | {0} + customer2_product2 | s | h | t | 0 | | {1} + customer3_subpartdefault1 | s | h | t | 0 | | {0} + customer4_product1 | s | h | t | 0 | | {0} + customer5_product1 | s | h | t | 0 | | {0} + customer5_product2 | s | h | t | 0 | | {1} + customer5_product3 | s | h | t | 0 | | {2} + customer5_product4 | s | h | t | 0 | | {3} + customer6_subpartdefault1 | s | h | t | 0 | | {0} +(20 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_product1_product_id_idx | x | n | t | t + customer1_product2_product_id_idx | x | n | t | t + customer1_product3_product_id_idx | x | n | t | t + customer1_product4_product_id_idx | x | n | t | t + customer2_product1_product_id_idx | x | n | t | t + customer2_product2_product_id_idx | x | n | t | t + customer3_subpartdefault1_product_id_idx | x | n | t | t + customer4_product1_product_id_idx | x | n | t | t + customer5_product1_product_id_idx | x | n | t | t + customer5_product2_product_id_idx | x | n | t | t + customer5_product3_product_id_idx | x | n | t | t + customer5_product4_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(13 rows) + +\d+ range_hash_sales + Table "segment_subpartition_add_drop_partition.range_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_hash_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "range_hash_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By HASH(product_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 13 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_hash_sales DROP PARTITION customer2; +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION customer1_product1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION customer4_product1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--success, drop partition customer3 +ALTER TABLE range_hash_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_hash_sales DROP PARTITION FOR (400, '2010-01-01'); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, invalid type +ALTER TABLE range_hash_sales DROP PARTITION FOR ('2010-01-01'); +ERROR: invalid input syntax for integer: "2010-01-01" +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION FOR(1400, 1); +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--check for ok after drop +SELECT count(*) FROM range_hash_sales; + count +------- + 400 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+------------ + customer1 | p | r | f | 0 | 1 | {200} + customer4 | p | r | f | 0 | 1 | {1200} + customer5 | p | r | f | 0 | 1 | {1500} + customer6 | p | r | f | 0 | 1 | {NULL} + range_hash_sales | r | r | f | 0 | 2 | + customer1_product1 | s | h | t | 0 | | {0} + customer1_product2 | s | h | t | 0 | | {1} + customer1_product3 | s | h | t | 0 | | {2} + customer1_product4 | s | h | t | 0 | | {3} + customer4_product1 | s | h | t | 0 | | {0} + customer5_product1 | s | h | t | 0 | | {0} + customer5_product2 | s | h | t | 0 | | {1} + customer5_product3 | s | h | t | 0 | | {2} + customer5_product4 | s | h | t | 0 | | {3} + customer6_subpartdefault1 | s | h | t | 0 | | {0} +(15 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_product1_product_id_idx | x | n | t | t + customer1_product2_product_id_idx | x | n | t | t + customer1_product3_product_id_idx | x | n | t | t + customer1_product4_product_id_idx | x | n | t | t + customer4_product1_product_id_idx | x | n | t | t + customer5_product1_product_id_idx | x | n | t | t + customer5_product2_product_id_idx | x | n | t | t + customer5_product3_product_id_idx | x | n | t | t + customer5_product4_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(10 rows) + +\d+ range_hash_sales + Table "segment_subpartition_add_drop_partition.range_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_hash_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "range_hash_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By HASH(product_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 10 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +-- +----list-range table---- +-- +--prepare +CREATE TABLE list_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY LIST (channel_id) SUBPARTITION BY RANGE (customer_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_customer1 VALUES LESS THAN (200), + SUBPARTITION channel1_customer2 VALUES LESS THAN (500), + SUBPARTITION channel1_customer3 VALUES LESS THAN (800), + SUBPARTITION channel1_customer4 VALUES LESS THAN (1200) + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_customer1 VALUES LESS THAN (500), + SUBPARTITION channel2_customer2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_customer1 VALUES LESS THAN (1200) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "list_range_sales_pkey" for table "list_range_sales" +INSERT INTO list_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_range_sales_idx ON list_range_sales(product_id) LOCAL; +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE list_range_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_customer1 VALUES LESS THAN (200), + SUBPARTITION channel5_customer2 VALUES LESS THAN (500), + SUBPARTITION channel5_customer3 VALUES LESS THAN (800), + SUBPARTITION channel5_customer4 VALUES LESS THAN (1200) + ); +--fail, value conflict +ALTER TABLE list_range_sales ADD PARTITION channel_temp1 VALUES ('0', 'Z', 'C'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE list_range_sales ADD PARTITION channel_temp2 VALUES LESS THAN ('Z'); +ERROR: can not add none-list partition to list partition table +--success, add 1 default subpartition +ALTER TABLE list_range_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_range_sales ADD PARTITION channel_temp3 VALUES ('M', 'X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--success, add 1 subpartition +ALTER TABLE list_range_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_customer5 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE list_range_sales MODIFY PARTITION channel2 ADD SUBPARTITION channel2_temp1 VALUES LESS THAN (2000); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, out of range +ALTER TABLE list_range_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3_temp1 VALUES LESS THAN (2000); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE list_range_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES (1500); +ERROR: can not add none-range partition to range partition table +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + channel1 | p | l | f | 0 | 2 | {0,1,2} + channel2 | p | l | f | 0 | 2 | {3,4,5} + channel3 | p | l | f | 0 | 2 | {6,7} + channel4 | p | l | f | 0 | 2 | {8,9} + channel5 | p | l | f | 0 | 2 | {X} + channel6 | p | l | f | 0 | 2 | {NULL} + list_range_sales | r | l | f | 0 | 4 | + channel1_customer1 | s | r | t | 0 | | {200} + channel1_customer2 | s | r | t | 0 | | {500} + channel1_customer3 | s | r | t | 0 | | {800} + channel1_customer4 | s | r | t | 0 | | {1200} + channel1_customer5 | s | r | t | 0 | | {NULL} + channel2_customer1 | s | r | t | 0 | | {500} + channel2_customer2 | s | r | t | 0 | | {NULL} + channel3_subpartdefault1 | s | r | t | 0 | | {NULL} + channel4_customer1 | s | r | t | 0 | | {1200} + channel5_customer1 | s | r | t | 0 | | {200} + channel5_customer2 | s | r | t | 0 | | {500} + channel5_customer3 | s | r | t | 0 | | {800} + channel5_customer4 | s | r | t | 0 | | {1200} + channel6_subpartdefault1 | s | r | t | 0 | | {NULL} +(21 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_customer1_product_id_idx | x | n | t | t + channel1_customer2_product_id_idx | x | n | t | t + channel1_customer3_product_id_idx | x | n | t | t + channel1_customer4_product_id_idx | x | n | t | t + channel1_customer5_product_id_idx | x | n | t | t + channel2_customer1_product_id_idx | x | n | t | t + channel2_customer2_product_id_idx | x | n | t | t + channel3_subpartdefault1_product_id_idx | x | n | t | t + channel4_customer1_product_id_idx | x | n | t | t + channel5_customer1_product_id_idx | x | n | t | t + channel5_customer2_product_id_idx | x | n | t | t + channel5_customer3_product_id_idx | x | n | t | t + channel5_customer4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(14 rows) + +\d+ list_range_sales + Table "segment_subpartition_add_drop_partition.list_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_range_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "list_range_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By RANGE(customer_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 14 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_range_sales DROP PARTITION channel2; +--success +ALTER TABLE list_range_sales DROP SUBPARTITION channel1_customer1; +--fail, the only subpartition +ALTER TABLE list_range_sales DROP SUBPARTITION channel4_customer1; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--success, drop partition channel3 +ALTER TABLE list_range_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_range_sales DROP PARTITION FOR('X', 700); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, number not equal to the number of partkey +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X'); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE list_range_sales DROP PARTITION FOR (10); +ERROR: value too long for type character(1) +--fail, invalid type +ALTER TABLE list_range_sales DROP SUBPARTITION FOR(700, 'X'); +ERROR: value too long for type character(1) +--success, drop subpartition channel5_customer3 +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X', 700); +--fail, the only subpartition in channel6 +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('K', 100); +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, no subpartition find +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X', 2500); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM list_range_sales; + count +------- + 441 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + channel1 | p | l | f | 0 | 2 | {0,1,2} + channel4 | p | l | f | 0 | 2 | {8,9} + channel5 | p | l | f | 0 | 2 | {X} + channel6 | p | l | f | 0 | 2 | {NULL} + list_range_sales | r | l | f | 0 | 4 | + channel1_customer2 | s | r | t | 0 | | {500} + channel1_customer3 | s | r | t | 0 | | {800} + channel1_customer4 | s | r | t | 0 | | {1200} + channel1_customer5 | s | r | t | 0 | | {NULL} + channel4_customer1 | s | r | t | 0 | | {1200} + channel5_customer1 | s | r | t | 0 | | {200} + channel5_customer2 | s | r | t | 0 | | {500} + channel5_customer4 | s | r | t | 0 | | {1200} + channel6_subpartdefault1 | s | r | t | 0 | | {NULL} +(14 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_customer2_product_id_idx | x | n | t | t + channel1_customer3_product_id_idx | x | n | t | t + channel1_customer4_product_id_idx | x | n | t | t + channel1_customer5_product_id_idx | x | n | t | t + channel4_customer1_product_id_idx | x | n | t | t + channel5_customer1_product_id_idx | x | n | t | t + channel5_customer2_product_id_idx | x | n | t | t + channel5_customer4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(9 rows) + +\d+ list_range_sales + Table "segment_subpartition_add_drop_partition.list_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_range_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "list_range_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By RANGE(customer_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 9 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +-- +----list-list table---- +-- +--prepare +CREATE TABLE list_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY LIST (channel_id) SUBPARTITION BY LIST (type_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_type1 VALUES (0, 1, 2), + SUBPARTITION channel1_type2 VALUES (3, 4), + SUBPARTITION channel1_type3 VALUES (5, 6, 7), + SUBPARTITION channel1_type4 VALUES (8, 9) + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_type1 VALUES (0, 1, 2, 3), + SUBPARTITION channel2_type2 VALUES (DEFAULT) + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_type1 VALUES (0, 1, 2, 3, 4, 5, 6, 7, 8, 9) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "list_list_sales_pkey" for table "list_list_sales" +INSERT INTO list_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_list_sales_idx ON list_list_sales(product_id) LOCAL; +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE list_list_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_type1 VALUES (0, 1, 2), + SUBPARTITION channel5_type2 VALUES (3, 4), + SUBPARTITION channel5_type3 VALUES (5, 6, 7), + SUBPARTITION channel5_type4 VALUES (8, 9) + ); +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp1 VALUES ('0', 'Z', 'C'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE list_list_sales ADD PARTITION channel_temp2 VALUES LESS THAN ('Z'); +ERROR: can not add none-list partition to list partition table +--success, add 1 default subpartition +ALTER TABLE list_list_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp3 VALUES ('M', 'X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--success, add 1 subpartition +ALTER TABLE list_list_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_type5 VALUES (DEFAULT); +--fail, out of range +ALTER TABLE list_list_sales MODIFY PARTITION channel2 ADD SUBPARTITION channel2_temp1 VALUES (10, 11, 12); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, out of range +ALTER TABLE list_list_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3_temp1 VALUES (10, 11, 12); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE list_list_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500); +ERROR: can not add none-list partition to list partition table +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+----------------------- + channel1 | p | l | f | 0 | 5 | {0,1,2} + channel2 | p | l | f | 0 | 5 | {3,4,5} + channel3 | p | l | f | 0 | 5 | {6,7} + channel4 | p | l | f | 0 | 5 | {8,9} + channel5 | p | l | f | 0 | 5 | {X} + channel6 | p | l | f | 0 | 5 | {NULL} + list_list_sales | r | l | f | 0 | 4 | + channel1_type1 | s | l | t | 0 | | {0,1,2} + channel1_type2 | s | l | t | 0 | | {3,4} + channel1_type3 | s | l | t | 0 | | {5,6,7} + channel1_type4 | s | l | t | 0 | | {8,9} + channel1_type5 | s | l | t | 0 | | {NULL} + channel2_type1 | s | l | t | 0 | | {0,1,2,3} + channel2_type2 | s | l | t | 0 | | {NULL} + channel3_subpartdefault1 | s | l | t | 0 | | {NULL} + channel4_type1 | s | l | t | 0 | | {0,1,2,3,4,5,6,7,8,9} + channel5_type1 | s | l | t | 0 | | {0,1,2} + channel5_type2 | s | l | t | 0 | | {3,4} + channel5_type3 | s | l | t | 0 | | {5,6,7} + channel5_type4 | s | l | t | 0 | | {8,9} + channel6_subpartdefault1 | s | l | t | 0 | | {NULL} +(21 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_type1_product_id_idx | x | n | t | t + channel1_type2_product_id_idx | x | n | t | t + channel1_type3_product_id_idx | x | n | t | t + channel1_type4_product_id_idx | x | n | t | t + channel1_type5_product_id_idx | x | n | t | t + channel2_type1_product_id_idx | x | n | t | t + channel2_type2_product_id_idx | x | n | t | t + channel3_subpartdefault1_product_id_idx | x | n | t | t + channel4_type1_product_id_idx | x | n | t | t + channel5_type1_product_id_idx | x | n | t | t + channel5_type2_product_id_idx | x | n | t | t + channel5_type3_product_id_idx | x | n | t | t + channel5_type4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(14 rows) + +\d+ list_list_sales + Table "segment_subpartition_add_drop_partition.list_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_list_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "list_list_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By LIST(type_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 14 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_list_sales DROP PARTITION channel2; +--success +ALTER TABLE list_list_sales DROP SUBPARTITION channel1_type1; +--fail, the only subpartition +ALTER TABLE list_list_sales DROP SUBPARTITION channel4_type1; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--success, drop partition channel3 +ALTER TABLE list_list_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_list_sales DROP PARTITION FOR('X', 6); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, number not equal to the number of partkey +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X'); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE list_list_sales DROP PARTITION FOR (10); +ERROR: value too long for type character(1) +--fail, invalid type +ALTER TABLE list_list_sales DROP SUBPARTITION FOR(10, 'X'); +ERROR: value too long for type character(1) +--success, drop subpartition channel5_type3 +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X', 6); +--fail, the only subpartition in channel6 +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('K', 10); +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, no subpartition find +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X', 5); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM list_list_sales; + count +------- + 200 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+----------------------- + channel1 | p | l | f | 0 | 5 | {0,1,2} + channel4 | p | l | f | 0 | 5 | {8,9} + channel5 | p | l | f | 0 | 5 | {X} + channel6 | p | l | f | 0 | 5 | {NULL} + list_list_sales | r | l | f | 0 | 4 | + channel1_type2 | s | l | t | 0 | | {3,4} + channel1_type3 | s | l | t | 0 | | {5,6,7} + channel1_type4 | s | l | t | 0 | | {8,9} + channel1_type5 | s | l | t | 0 | | {NULL} + channel4_type1 | s | l | t | 0 | | {0,1,2,3,4,5,6,7,8,9} + channel5_type1 | s | l | t | 0 | | {0,1,2} + channel5_type2 | s | l | t | 0 | | {3,4} + channel5_type4 | s | l | t | 0 | | {8,9} + channel6_subpartdefault1 | s | l | t | 0 | | {NULL} +(14 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_type2_product_id_idx | x | n | t | t + channel1_type3_product_id_idx | x | n | t | t + channel1_type4_product_id_idx | x | n | t | t + channel1_type5_product_id_idx | x | n | t | t + channel4_type1_product_id_idx | x | n | t | t + channel5_type1_product_id_idx | x | n | t | t + channel5_type2_product_id_idx | x | n | t | t + channel5_type4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(9 rows) + +\d+ list_list_sales + Table "segment_subpartition_add_drop_partition.list_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_list_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "list_list_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By LIST(type_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 9 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +-- +----list-hash table---- +-- +--prepare +CREATE TABLE list_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY LIST (channel_id) SUBPARTITION BY HASH (product_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_product1, + SUBPARTITION channel1_product2, + SUBPARTITION channel1_product3, + SUBPARTITION channel1_product4 + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_product1, + SUBPARTITION channel2_product2 + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_product1 + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "list_hash_sales_pkey" for table "list_hash_sales" +INSERT INTO list_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_hash_sales_idx ON list_hash_sales(product_id) LOCAL; +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE list_hash_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_product1, + SUBPARTITION channel5_product2, + SUBPARTITION channel5_product3, + SUBPARTITION channel5_product4 + ); +--fail, value conflict +ALTER TABLE list_hash_sales ADD PARTITION channel_temp1 VALUES ('0', 'Z', 'C'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE list_hash_sales ADD PARTITION channel_temp2 VALUES LESS THAN ('Z'); +ERROR: can not add none-list partition to list partition table +--success, add 1 default subpartition +ALTER TABLE list_hash_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_hash_sales ADD PARTITION channel_temp3 VALUES ('M', 'X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, not support add hash +ALTER TABLE list_hash_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_temp1; +ERROR: syntax error at or near ";" +LINE 1: ...s MODIFY PARTITION channel1 ADD SUBPARTITION channel1_temp1; + ^ +--fail, invalid format +ALTER TABLE list_hash_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500); +ERROR: can not add hash partition +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + channel1 | p | l | f | 0 | 1 | {0,1,2} + channel2 | p | l | f | 0 | 1 | {3,4,5} + channel3 | p | l | f | 0 | 1 | {6,7} + channel4 | p | l | f | 0 | 1 | {8,9} + channel5 | p | l | f | 0 | 1 | {X} + channel6 | p | l | f | 0 | 1 | {NULL} + list_hash_sales | r | l | f | 0 | 4 | + channel1_product1 | s | h | t | 0 | | {0} + channel1_product2 | s | h | t | 0 | | {1} + channel1_product3 | s | h | t | 0 | | {2} + channel1_product4 | s | h | t | 0 | | {3} + channel2_product1 | s | h | t | 0 | | {0} + channel2_product2 | s | h | t | 0 | | {1} + channel3_subpartdefault1 | s | h | t | 0 | | {0} + channel4_product1 | s | h | t | 0 | | {0} + channel5_product1 | s | h | t | 0 | | {0} + channel5_product2 | s | h | t | 0 | | {1} + channel5_product3 | s | h | t | 0 | | {2} + channel5_product4 | s | h | t | 0 | | {3} + channel6_subpartdefault1 | s | h | t | 0 | | {0} +(20 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_product1_product_id_idx | x | n | t | t + channel1_product2_product_id_idx | x | n | t | t + channel1_product3_product_id_idx | x | n | t | t + channel1_product4_product_id_idx | x | n | t | t + channel2_product1_product_id_idx | x | n | t | t + channel2_product2_product_id_idx | x | n | t | t + channel3_subpartdefault1_product_id_idx | x | n | t | t + channel4_product1_product_id_idx | x | n | t | t + channel5_product1_product_id_idx | x | n | t | t + channel5_product2_product_id_idx | x | n | t | t + channel5_product3_product_id_idx | x | n | t | t + channel5_product4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(13 rows) + +\d+ list_hash_sales + Table "segment_subpartition_add_drop_partition.list_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_hash_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "list_hash_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By HASH(product_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 13 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_hash_sales DROP PARTITION channel2; +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION channel1_product1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION channel4_product1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--success, drop partition channel3 +ALTER TABLE list_hash_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_hash_sales DROP PARTITION FOR ('6', '2010-01-01'); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, invalid type +ALTER TABLE list_hash_sales DROP PARTITION FOR (10); +ERROR: value too long for type character(1) +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION FOR('X', 6); +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--check for ok after drop +SELECT count(*) FROM list_hash_sales; + count +------- + 500 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + channel1 | p | l | f | 0 | 1 | {0,1,2} + channel4 | p | l | f | 0 | 1 | {8,9} + channel5 | p | l | f | 0 | 1 | {X} + channel6 | p | l | f | 0 | 1 | {NULL} + list_hash_sales | r | l | f | 0 | 4 | + channel1_product1 | s | h | t | 0 | | {0} + channel1_product2 | s | h | t | 0 | | {1} + channel1_product3 | s | h | t | 0 | | {2} + channel1_product4 | s | h | t | 0 | | {3} + channel4_product1 | s | h | t | 0 | | {0} + channel5_product1 | s | h | t | 0 | | {0} + channel5_product2 | s | h | t | 0 | | {1} + channel5_product3 | s | h | t | 0 | | {2} + channel5_product4 | s | h | t | 0 | | {3} + channel6_subpartdefault1 | s | h | t | 0 | | {0} +(15 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_product1_product_id_idx | x | n | t | t + channel1_product2_product_id_idx | x | n | t | t + channel1_product3_product_id_idx | x | n | t | t + channel1_product4_product_id_idx | x | n | t | t + channel4_product1_product_id_idx | x | n | t | t + channel5_product1_product_id_idx | x | n | t | t + channel5_product2_product_id_idx | x | n | t | t + channel5_product3_product_id_idx | x | n | t | t + channel5_product4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(10 rows) + +\d+ list_hash_sales + Table "segment_subpartition_add_drop_partition.list_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_hash_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "list_hash_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By HASH(product_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 10 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +-- +----hash-range table---- +-- +--prepare +CREATE TABLE hash_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY HASH (product_id) SUBPARTITION BY RANGE (customer_id) +( + PARTITION product1 + ( + SUBPARTITION product1_customer1 VALUES LESS THAN (200), + SUBPARTITION product1_customer2 VALUES LESS THAN (500), + SUBPARTITION product1_customer3 VALUES LESS THAN (800), + SUBPARTITION product1_customer4 VALUES LESS THAN (1200) + ), + PARTITION product2 + ( + SUBPARTITION product2_customer1 VALUES LESS THAN (500), + SUBPARTITION product2_customer2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_customer1 VALUES LESS THAN (1200) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "hash_range_sales_pkey" for table "hash_range_sales" +INSERT INTO hash_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_range_sales_idx ON hash_range_sales(product_id) LOCAL; +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_range_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_customer1 VALUES LESS THAN (200), + SUBPARTITION product_temp1_customer2 VALUES LESS THAN (500), + SUBPARTITION product_temp1_customer3 VALUES LESS THAN (800), + SUBPARTITION product_temp1_customer4 VALUES LESS THAN (1200) + ); +ERROR: syntax error at or near "(" +LINE 2: ( + ^ +--fail, not support add hash +ALTER TABLE hash_range_sales ADD PARTITION product_temp2; +ERROR: syntax error at or near ";" +LINE 1: ALTER TABLE hash_range_sales ADD PARTITION product_temp2; + ^ +--success, add 1 subpartition +ALTER TABLE hash_range_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_customer5 VALUES LESS THAN (1800); +--fail, out of range +ALTER TABLE hash_range_sales MODIFY PARTITION product2 ADD SUBPARTITION product2_temp1 VALUES LESS THAN (1800); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES (DEFAULT); +ERROR: can not add none-range partition to range partition table +--success, add 1 subpartition +ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_customer2 VALUES LESS THAN (MAXVALUE); +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + product1 | p | h | f | 0 | 2 | {0} + product2 | p | h | f | 0 | 2 | {1} + product3 | p | h | f | 0 | 2 | {2} + product4 | p | h | f | 0 | 2 | {3} + hash_range_sales | r | h | f | 0 | 1 | + product1_customer1 | s | r | t | 0 | | {200} + product1_customer2 | s | r | t | 0 | | {500} + product1_customer3 | s | r | t | 0 | | {800} + product1_customer4 | s | r | t | 0 | | {1200} + product1_customer5 | s | r | t | 0 | | {1800} + product2_customer1 | s | r | t | 0 | | {500} + product2_customer2 | s | r | t | 0 | | {NULL} + product3_subpartdefault1 | s | r | t | 0 | | {NULL} + product4_customer1 | s | r | t | 0 | | {1200} + product4_customer2 | s | r | t | 0 | | {NULL} +(15 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_customer1_product_id_idx | x | n | t | t + product1_customer2_product_id_idx | x | n | t | t + product1_customer3_product_id_idx | x | n | t | t + product1_customer4_product_id_idx | x | n | t | t + product1_customer5_product_id_idx | x | n | t | t + product2_customer1_product_id_idx | x | n | t | t + product2_customer2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_customer1_product_id_idx | x | n | t | t + product4_customer2_product_id_idx | x | n | t | t +(10 rows) + +\d+ hash_range_sales + Table "segment_subpartition_add_drop_partition.hash_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_range_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "hash_range_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By RANGE(customer_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 10 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION product2; +ERROR: Droping hash partition is unsupported. +--success, drop subpartition product1_customer1 +ALTER TABLE hash_range_sales DROP SUBPARTITION product1_customer1; +--success, drop subpartition product4_customer1 +ALTER TABLE hash_range_sales DROP SUBPARTITION product4_customer1; +--fail, the only subpartition in product4 +ALTER TABLE hash_range_sales DROP SUBPARTITION product4_customer2; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION FOR(0); +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION FOR(0, 100); +ERROR: Droping hash partition is unsupported. +--fail, number not equal to the number of partkey +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR('2010-01-01', 100); +ERROR: invalid input syntax for integer: "2010-01-01" +--success, drop subpartition product1_customer2, but not suggest to do this operation +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0, 100); +--fail, no subpartition find +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0, 2300); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM hash_range_sales; + count +------- + 628 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + product1 | p | h | f | 0 | 2 | {0} + product2 | p | h | f | 0 | 2 | {1} + product3 | p | h | f | 0 | 2 | {2} + product4 | p | h | f | 0 | 2 | {3} + hash_range_sales | r | h | f | 0 | 1 | + product1_customer3 | s | r | t | 0 | | {800} + product1_customer4 | s | r | t | 0 | | {1200} + product1_customer5 | s | r | t | 0 | | {1800} + product2_customer1 | s | r | t | 0 | | {500} + product2_customer2 | s | r | t | 0 | | {NULL} + product3_subpartdefault1 | s | r | t | 0 | | {NULL} + product4_customer2 | s | r | t | 0 | | {NULL} +(12 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_customer3_product_id_idx | x | n | t | t + product1_customer4_product_id_idx | x | n | t | t + product1_customer5_product_id_idx | x | n | t | t + product2_customer1_product_id_idx | x | n | t | t + product2_customer2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_customer2_product_id_idx | x | n | t | t +(7 rows) + +\d+ hash_range_sales + Table "segment_subpartition_add_drop_partition.hash_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_range_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "hash_range_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By RANGE(customer_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 7 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +-- +----hash-list table---- +-- +--prepare +CREATE TABLE hash_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY HASH (product_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION product1 + ( + SUBPARTITION product1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION product1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION product1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION product1_channel4 VALUES ('9') + ), + PARTITION product2 + ( + SUBPARTITION product2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION product2_channel2 VALUES (DEFAULT) + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "hash_list_sales_pkey" for table "hash_list_sales" +INSERT INTO hash_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_list_sales_idx ON hash_list_sales(product_id) LOCAL; +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_list_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION product_temp1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION product_temp1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION product_temp1_channel4 VALUES ('9') + ); +ERROR: syntax error at or near "(" +LINE 2: ( + ^ +--fail, not support add hash +ALTER TABLE hash_list_sales ADD PARTITION product_temp2; +ERROR: syntax error at or near ";" +LINE 1: ALTER TABLE hash_list_sales ADD PARTITION product_temp2; + ^ +--success, add 1 subpartition +ALTER TABLE hash_list_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_channel5 VALUES ('X'); +--fail, out of range +ALTER TABLE hash_list_sales MODIFY PARTITION product2 ADD SUBPARTITION product2_temp1 VALUES ('X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, out of range +ALTER TABLE hash_list_sales MODIFY PARTITION product3 ADD SUBPARTITION product3_temp1 VALUES ('X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES LESS THAN (MAXVALUE); +ERROR: can not add none-list partition to list partition table +--success, add 1 subpartition +ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_channel2 VALUES (DEFAULT); +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+----------------------- + product1 | p | h | f | 0 | 4 | {0} + product2 | p | h | f | 0 | 4 | {1} + product3 | p | h | f | 0 | 4 | {2} + product4 | p | h | f | 0 | 4 | {3} + hash_list_sales | r | h | f | 0 | 1 | + product1_channel1 | s | l | t | 0 | | {0,1,2} + product1_channel2 | s | l | t | 0 | | {3,4,5} + product1_channel3 | s | l | t | 0 | | {6,7,8} + product1_channel4 | s | l | t | 0 | | {9} + product1_channel5 | s | l | t | 0 | | {X} + product2_channel1 | s | l | t | 0 | | {0,1,2,3,4} + product2_channel2 | s | l | t | 0 | | {NULL} + product3_subpartdefault1 | s | l | t | 0 | | {NULL} + product4_channel1 | s | l | t | 0 | | {0,1,2,3,4,5,6,7,8,9} + product4_channel2 | s | l | t | 0 | | {NULL} +(15 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_channel1_product_id_idx | x | n | t | t + product1_channel2_product_id_idx | x | n | t | t + product1_channel3_product_id_idx | x | n | t | t + product1_channel4_product_id_idx | x | n | t | t + product1_channel5_product_id_idx | x | n | t | t + product2_channel1_product_id_idx | x | n | t | t + product2_channel2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_channel1_product_id_idx | x | n | t | t + product4_channel2_product_id_idx | x | n | t | t +(10 rows) + +\d+ hash_list_sales + Table "segment_subpartition_add_drop_partition.hash_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_list_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "hash_list_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By LIST(channel_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 10 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION product2; +ERROR: Droping hash partition is unsupported. +--success, drop subpartition product1_channel1 +ALTER TABLE hash_list_sales DROP SUBPARTITION product1_channel1; +--success, drop subpartition product4_channel1 +ALTER TABLE hash_list_sales DROP SUBPARTITION product4_channel1; +--fail, the only subpartition in product4 +ALTER TABLE hash_list_sales DROP SUBPARTITION product4_channel2; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION FOR(0); +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION FOR(0, '4'); +ERROR: Droping hash partition is unsupported. +--fail, number not equal to the number of partkey +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR('2010-01-01', '4'); +ERROR: invalid input syntax for integer: "2010-01-01" +--success, drop subpartition product1_channel2, but not suggest to do this operation +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0, '4'); +--fail, no subpartition find +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0, 'Z'); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM hash_list_sales; + count +------- + 608 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------- + product1 | p | h | f | 0 | 4 | {0} + product2 | p | h | f | 0 | 4 | {1} + product3 | p | h | f | 0 | 4 | {2} + product4 | p | h | f | 0 | 4 | {3} + hash_list_sales | r | h | f | 0 | 1 | + product1_channel3 | s | l | t | 0 | | {6,7,8} + product1_channel4 | s | l | t | 0 | | {9} + product1_channel5 | s | l | t | 0 | | {X} + product2_channel1 | s | l | t | 0 | | {0,1,2,3,4} + product2_channel2 | s | l | t | 0 | | {NULL} + product3_subpartdefault1 | s | l | t | 0 | | {NULL} + product4_channel2 | s | l | t | 0 | | {NULL} +(12 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_channel3_product_id_idx | x | n | t | t + product1_channel4_product_id_idx | x | n | t | t + product1_channel5_product_id_idx | x | n | t | t + product2_channel1_product_id_idx | x | n | t | t + product2_channel2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_channel2_product_id_idx | x | n | t | t +(7 rows) + +\d+ hash_list_sales + Table "segment_subpartition_add_drop_partition.hash_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_list_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default UNUSABLE + "hash_list_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By LIST(channel_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 7 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +-- +----hash-hash table---- +-- +--prepare +CREATE TABLE hash_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY HASH (product_id) SUBPARTITION BY HASH (customer_id) +( + PARTITION product1 + ( + SUBPARTITION product1_customer1, + SUBPARTITION product1_customer2, + SUBPARTITION product1_customer3, + SUBPARTITION product1_customer4 + ), + PARTITION product2 + ( + SUBPARTITION product2_customer1, + SUBPARTITION product2_customer2 + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_customer1 + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "hash_hash_sales_pkey" for table "hash_hash_sales" +INSERT INTO hash_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_hash_sales_idx ON hash_hash_sales(product_id) LOCAL; +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_hash_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_customer1, + SUBPARTITION product_temp1_customer2, + SUBPARTITION product_temp1_customer3, + SUBPARTITION product_temp1_customer4 + ); +ERROR: syntax error at or near "(" +LINE 2: ( + ^ +--fail, not support add hash +ALTER TABLE hash_hash_sales ADD PARTITION product_temp2; +ERROR: syntax error at or near ";" +LINE 1: ALTER TABLE hash_hash_sales ADD PARTITION product_temp2; + ^ +--fail, not support add hash +ALTER TABLE hash_hash_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_temp1; +ERROR: syntax error at or near ";" +LINE 1: ...s MODIFY PARTITION product1 ADD SUBPARTITION product1_temp1; + ^ +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + product1 | p | h | f | 0 | 2 | {0} + product2 | p | h | f | 0 | 2 | {1} + product3 | p | h | f | 0 | 2 | {2} + product4 | p | h | f | 0 | 2 | {3} + hash_hash_sales | r | h | f | 0 | 1 | + product1_customer1 | s | h | t | 0 | | {0} + product1_customer2 | s | h | t | 0 | | {1} + product1_customer3 | s | h | t | 0 | | {2} + product1_customer4 | s | h | t | 0 | | {3} + product2_customer1 | s | h | t | 0 | | {0} + product2_customer2 | s | h | t | 0 | | {1} + product3_subpartdefault1 | s | h | t | 0 | | {0} + product4_customer1 | s | h | t | 0 | | {0} +(13 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_customer1_product_id_idx | x | n | t | t + product1_customer2_product_id_idx | x | n | t | t + product1_customer3_product_id_idx | x | n | t | t + product1_customer4_product_id_idx | x | n | t | t + product2_customer1_product_id_idx | x | n | t | t + product2_customer2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_customer1_product_id_idx | x | n | t | t +(8 rows) + +\d+ hash_hash_sales + Table "segment_subpartition_add_drop_partition.hash_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_hash_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "hash_hash_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By HASH(customer_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 8 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION product2; +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION product1_customer1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION product4_customer1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION FOR(0); +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION FOR(0, 0); +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION FOR(0, 0); +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION FOR(0); +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--check for ok after drop +SELECT count(*) FROM hash_hash_sales; + count +------- + 1000 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + product1 | p | h | f | 0 | 2 | {0} + product2 | p | h | f | 0 | 2 | {1} + product3 | p | h | f | 0 | 2 | {2} + product4 | p | h | f | 0 | 2 | {3} + hash_hash_sales | r | h | f | 0 | 1 | + product1_customer1 | s | h | t | 0 | | {0} + product1_customer2 | s | h | t | 0 | | {1} + product1_customer3 | s | h | t | 0 | | {2} + product1_customer4 | s | h | t | 0 | | {3} + product2_customer1 | s | h | t | 0 | | {0} + product2_customer2 | s | h | t | 0 | | {1} + product3_subpartdefault1 | s | h | t | 0 | | {0} + product4_customer1 | s | h | t | 0 | | {0} +(13 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_customer1_product_id_idx | x | n | t | t + product1_customer2_product_id_idx | x | n | t | t + product1_customer3_product_id_idx | x | n | t | t + product1_customer4_product_id_idx | x | n | t | t + product2_customer1_product_id_idx | x | n | t | t + product2_customer2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_customer1_product_id_idx | x | n | t | t +(8 rows) + +\d+ hash_hash_sales + Table "segment_subpartition_add_drop_partition.hash_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_hash_sales_pkey" PRIMARY KEY, btree (customer_id) TABLESPACE pg_default + "hash_hash_sales_idx" btree (product_id) LOCAL TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By HASH(customer_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 8 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +--finish +DROP TABLE range_range_sales; +DROP TABLE range_list_sales; +DROP TABLE range_hash_sales; +DROP TABLE list_range_sales; +DROP TABLE list_list_sales; +DROP TABLE list_hash_sales; +DROP TABLE hash_range_sales; +DROP TABLE hash_list_sales; +DROP TABLE hash_hash_sales; +DROP SCHEMA segment_subpartition_add_drop_partition CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/segment_subpartition_alter_table.out b/src/test/regress/expected/segment_subpartition_alter_table.out new file mode 100644 index 000000000..941da92bb --- /dev/null +++ b/src/test/regress/expected/segment_subpartition_alter_table.out @@ -0,0 +1,184 @@ +DROP SCHEMA segment_subpartition_alter_table CASCADE; +ERROR: schema "segment_subpartition_alter_table" does not exist +CREATE SCHEMA segment_subpartition_alter_table; +SET CURRENT_SCHEMA TO segment_subpartition_alter_table; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +--change column type +alter table range_range alter column user_no set data type char(30); +alter table range_range alter column sales_amt set data type varchar; +\d+ range_range + Table "segment_subpartition_alter_table.range_range" + Column | Type | Modifiers | Storage | Stats target | Description +------------+-----------------------+-----------+----------+--------------+------------- + month_code | character varying(30) | not null | extended | | + dept_code | character varying(30) | not null | extended | | + user_no | character(30) | | extended | | + sales_amt | character varying | | extended | | +Indexes: + "range_range_pkey" PRIMARY KEY, btree (month_code, dept_code) LOCAL TABLESPACE pg_default +Partition By RANGE(month_code) Subpartition By RANGE(dept_code) +Number of partitions: 2 (View pg_partition to check each partition range.) +Number of subpartitions: 4 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, segment=on, compression=no + +-- rename +alter table range_range rename to hahahahahah; +ERROR: Un-support feature +DETAIL: For subpartition table, ALTER TABLE ... RENAME is not yet supported. +alter table range_range rename partition p_201901 to hahahahahah; +ERROR: Un-support feature +DETAIL: For subpartition table, ALTER TABLE ... RENAME PARTITION/SUBPARTITION is not yet supported. +alter table range_range rename partition p_201901_a to hahahahahah; +ERROR: Un-support feature +DETAIL: For subpartition table, ALTER TABLE ... RENAME PARTITION/SUBPARTITION is not yet supported. +--cluster +create index idx_range_range on range_range(month_code,user_no); +alter table range_range cluster on idx_range_range; +ERROR: cannot cluster a subpartition table +-- move tablespace +CREATE TABLESPACE example1 RELATIVE LOCATION 'tablespace1/tablespace_1'; +alter table range_range move PARTITION p_201901 tablespace example1; +ERROR: Un-support feature +DETAIL: For subpartition table, modifying tablespace is not yet supported. +alter table range_range move PARTITION p_201901_a tablespace example1; +ERROR: Un-support feature +DETAIL: For subpartition table, modifying tablespace is not yet supported. +DROP TABLESPACE example1; +-- merge +alter table range_range merge PARTITIONS p_201901 , p_201902 into PARTITION p_range_3; +ERROR: Un-support feature +DETAIL: For subpartition table, merge partitions is not yet supported. +alter table range_range merge SUBPARTITIONS p_201901 , p_201902 into PARTITION p_range_3; +ERROR: syntax error at or near "SUBPARTITIONS" +LINE 1: alter table range_range merge SUBPARTITIONS p_201901 , p_20... + ^ +-- exchange +CREATE TABLE ori +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (SEGMENT=ON); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "ori_pkey" for table "ori" +ALTER TABLE range_range EXCHANGE PARTITION (p_201901) WITH TABLE ori; +ERROR: Un-support feature +DETAIL: For subpartition table, exchange partition is not yet supported. +ALTER TABLE range_range EXCHANGE SUBPARTITION (p_201901) WITH TABLE ori; +ERROR: syntax error at or near "SUBPARTITION (" +LINE 1: ALTER TABLE range_range EXCHANGE SUBPARTITION (p_201901) WIT... + ^ +-- drop +alter table range_range drop partition p_201901; +alter table range_range drop partition p_201901_a; +ERROR: partition "p_201901_a" does not exist +alter table range_range drop subpartition p_201901_a; +ERROR: subpartition "p_201901_a" does not exist +-- add +alter table range_range add partition p_range_4 VALUES LESS THAN('201904'); +ERROR: upper boundary of adding partition MUST overtop last existing partition +-- split +alter table range_range split PARTITION p_201901 at (8) into ( PARTITION add_p_01 , PARTITION add_p_02 ); +ERROR: Un-support feature +DETAIL: For subpartition table, split partition is not supported yet. +drop table ori; +drop table range_range; +CREATE TABLE IF NOT EXISTS range_range_02 +( + col_1 int , + col_2 int , + col_3 VARCHAR2 ( 30 ) NOT NULL , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( 10 ) + ) +) ENABLE ROW MOVEMENT; +create index on range_range_02(col_2) local; +alter table range_range_02 MODIFY PARTITION p_range_2 UNUSABLE LOCAL INDEXES; +ERROR: Un-support feature +DETAIL: For subpartition table, UNUSABLE LOCAL INDEXES is not yet supported. +alter table range_range_02 MODIFY PARTITION p_range_2 REBUILD UNUSABLE LOCAL INDEXES; +ERROR: Un-support feature +DETAIL: For subpartition table, REBUILD UNUSABLE LOCAL INDEXES is not yet supported. +alter table range_range_02 alter col_1 type char; +ERROR: cannot alter data type of partitioning column "col_1" +alter table range_range_02 alter col_2 type char; +ERROR: cannot alter data type of subpartitioning column "col_2" +drop table range_range_02; +--validate constraint +CREATE TABLE hash_hash +( + col_1 int , + col_2 int NOT NULL , + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY hash (col_3) SUBPARTITION BY hash (col_2) +( + PARTITION p_hash_1 + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 , + SUBPARTITION p_hash_1_4 + ), + PARTITION p_hash_2 + ( + SUBPARTITION p_hash_2_1 , + SUBPARTITION p_hash_2_2 + ), + PARTITION p_hash_3, + PARTITION p_hash_4 + ( + SUBPARTITION p_hash_4_1 + ), + PARTITION p_hash_5 +); +INSERT INTO hash_hash VALUES(null,1,1,1); +alter table hash_hash add constraint con_hash_hash check(col_1 is not null) NOT VALID ; +INSERT INTO hash_hash VALUES(null,2,1,1); --error +ERROR: new row for relation "hash_hash" violates check constraint "con_hash_hash" +DETAIL: N/A +INSERT INTO hash_hash VALUES(1,3,1,1); --success +alter table hash_hash VALIDATE CONSTRAINT con_hash_hash; --error +ERROR: check constraint "con_hash_hash" is violated by some row +delete from hash_hash where col_1 is null; +alter table hash_hash VALIDATE CONSTRAINT con_hash_hash; --success +drop table hash_hash cascade; +-- clean +DROP SCHEMA ustore_subpartition_alter_table CASCADE; +ERROR: schema "ustore_subpartition_alter_table" does not exist +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/segment_subpartition_analyze_vacuum.out b/src/test/regress/expected/segment_subpartition_analyze_vacuum.out new file mode 100644 index 000000000..1d9c91c74 --- /dev/null +++ b/src/test/regress/expected/segment_subpartition_analyze_vacuum.out @@ -0,0 +1,63 @@ +-- prepare +DROP SCHEMA segment_subpartition_analyze_vacuum CASCADE; +ERROR: schema "segment_subpartition_analyze_vacuum" does not exist +CREATE SCHEMA segment_subpartition_analyze_vacuum; +SET CURRENT_SCHEMA TO segment_subpartition_analyze_vacuum; +-- base function +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 2 | 1 +(6 rows) + +delete from range_list where month_code = '201902'; +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 2 | 1 +(3 rows) + +analyze range_list; +analyze range_list partition (p_201901); +vacuum range_list; +vacuum range_list partition (p_201901); +drop table range_list; +-- clean +DROP SCHEMA segment_subpartition_analyze_vacuum CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/segment_subpartition_createtable.out b/src/test/regress/expected/segment_subpartition_createtable.out new file mode 100644 index 000000000..733574285 --- /dev/null +++ b/src/test/regress/expected/segment_subpartition_createtable.out @@ -0,0 +1,1550 @@ +--1.create table +--list_list list_hash list_range range_list range_hash range_range +--prepare +DROP SCHEMA segment_subpartition_createtable CASCADE; +ERROR: schema "segment_subpartition_createtable" does not exist +CREATE SCHEMA segment_subpartition_createtable; +SET CURRENT_SCHEMA TO segment_subpartition_createtable; +--1.1 normal table +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(6 rows) + +drop table list_list; +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into list_hash values('201902', '1', '1', 1); +insert into list_hash values('201902', '2', '1', 1); +insert into list_hash values('201902', '3', '1', 1); +insert into list_hash values('201903', '4', '1', 1); +insert into list_hash values('201903', '5', '1', 1); +insert into list_hash values('201903', '6', '1', 1); +select * from list_hash; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 4 | 1 | 1 + 201903 | 5 | 1 | 1 + 201903 | 6 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201902 | 1 | 1 | 1 +(6 rows) + +drop table list_hash; +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('6') + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +insert into list_range values('201902', '1', '1', 1); +insert into list_range values('201902', '2', '1', 1); +insert into list_range values('201902', '3', '1', 1); +insert into list_range values('201903', '4', '1', 1); +insert into list_range values('201903', '5', '1', 1); +insert into list_range values('201903', '6', '1', 1); +ERROR: inserted partition key does not map to any table partition +select * from list_range; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 4 | 1 | 1 + 201903 | 5 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 +(5 rows) + +drop table list_range; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +drop table range_list; +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201902', '2', '1', 1); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +insert into range_hash values('201903', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +select * from range_hash; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +drop table range_hash; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +select * from range_range; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +drop table range_range; +CREATE TABLE hash_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY hash (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into hash_list values('201901', '1', '1', 1); +insert into hash_list values('201901', '2', '1', 1); +insert into hash_list values('201901', '1', '1', 1); +insert into hash_list values('201903', '2', '1', 1); +insert into hash_list values('201903', '1', '1', 1); +insert into hash_list values('201903', '2', '1', 1); +select * from hash_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201901 | 2 | 1 | 1 + 201901 | 1 | 1 | 1 + 201901 | 1 | 1 | 1 +(6 rows) + +drop table hash_list; +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY hash (month_code) SUBPARTITION BY hash (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into hash_hash values('201901', '1', '1', 1); +insert into hash_hash values('201901', '2', '1', 1); +insert into hash_hash values('201901', '1', '1', 1); +insert into hash_hash values('201903', '2', '1', 1); +insert into hash_hash values('201903', '1', '1', 1); +insert into hash_hash values('201903', '2', '1', 1); +select * from hash_hash; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201901 | 2 | 1 | 1 + 201901 | 1 | 1 | 1 + 201901 | 1 | 1 | 1 +(6 rows) + +drop table hash_hash; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY hash (month_code) SUBPARTITION BY range (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES LESS THAN ( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN ( '3' ) + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES LESS THAN ( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN ( '3' ) + ) +); +insert into hash_range values('201901', '1', '1', 1); +insert into hash_range values('201901', '2', '1', 1); +insert into hash_range values('201901', '1', '1', 1); +insert into hash_range values('201903', '2', '1', 1); +insert into hash_range values('201903', '1', '1', 1); +insert into hash_range values('201903', '2', '1', 1); +select * from hash_range; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201901 | 1 | 1 | 1 + 201901 | 1 | 1 | 1 + 201901 | 2 | 1 | 1 +(6 rows) + +drop table hash_range; +--1.2 table with default subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_list; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_list; +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table list_hash; +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_hash; +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_hash; +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +drop table list_range; +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('6') + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_range; +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_range; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +drop table range_list; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_list; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_list; +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table range_hash; +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_hash; +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_hash; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +drop table hash_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 +); +drop table hash_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES( '2' ), + SUBPARTITION p_201902_b VALUES( '3' ) + ) +); +drop table hash_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES( '2' ), + SUBPARTITION p_201901_b VALUES( '3' ) + ), + PARTITION p_201902 +); +drop table hash_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_range; +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table hash_hash; +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 +); +drop table hash_hash; +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_hash; +--1.3 subpartition name check +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_a VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: duplicate subpartition name: "p_201901_a" +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: duplicate subpartition name: "p_201901_a" +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901 VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: duplicate subpartition name: "p_201901" +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201901_subpartdefault1 VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; +--1.4 subpartition key check +-- 一级分区和二级分区分区键是同一列 +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: The two partition keys of a subpartition partition table are the same. +DETAIL: N/A +--二级分区的键值一样 +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '1' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: list partition p_201901_a and p_201901_b has overlapped value +--分区列不存在 +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_codeXXX) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: undefined column"month_codexxx" is used as a partitioning column +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_codeXXX) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: undefined column"dept_codexxx" is used as a partitioning column +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('4') + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +ERROR: partition bound of partition "p_201901_b" is too low +drop table list_range; +ERROR: table "list_range" does not exist +--1.5 list subpartition whith default +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( default ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list partition (p_201901); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(3 rows) + +select * from list_list partition (p_201902); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +drop table list_list; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( default ) + ) +); +drop table list_list; +--1.6 declaration and definition of the subpatiiton type are same. +--error +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY hash (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( default ) + ) +); +ERROR: The syntax format of subpartition is incorrect, the declaration and definition of the subpartition do not match. +DETAIL: The syntax format of subpartition p_201901_a is incorrect. +--1.7 add constraint +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +alter table range_range add constraint constraint_check CHECK (sales_amt IS NOT NULL); +insert into range_range values(1,1,1); +ERROR: new row for relation "range_range" violates check constraint "constraint_check" +DETAIL: N/A +drop table range_range; +-- drop partition column +CREATE TABLE range_hash_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( -10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ), + PARTITION p_range_3 VALUES LESS THAN( 30) + ( + SUBPARTITION p_hash_3_1 , + SUBPARTITION p_hash_3_2 , + SUBPARTITION p_hash_3_3 + ), + PARTITION p_range_4 VALUES LESS THAN( 50) + ( + SUBPARTITION p_hash_4_1 , + SUBPARTITION p_hash_4_2 , + SUBPARTITION range_hash_02 + ), + PARTITION p_range_5 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; +alter table range_hash_02 drop column col_1; +ERROR: cannot drop partitioning column "col_1" +alter table range_hash_02 drop column col_2; +ERROR: cannot drop partitioning column "col_2" +drop table range_hash_02; +--1.8 SET ROW MOVEMENT +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1', '2' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1', '2' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +alter table list_list disable ROW MOVEMENT; +insert into list_list values('201902', '1', '1', 1); +update list_list set month_code = '201903'; +ERROR: fail to update partitioned table "list_list" +DETAIL: disable row movement +update list_list set dept_code = '3'; +ERROR: fail to update partitioned table "list_list" +DETAIL: disable row movement +alter table list_list enable ROW MOVEMENT; +update list_list set month_code = '201903'; +update list_list set dept_code = '3'; +drop table list_list; +--1.9 without subpartition declaration +create table test(a int) WITH (SEGMENT=ON) +partition by range(a) +( +partition p1 values less than(100) +( +subpartition subp1 values less than(50), +subpartition subp2 values less than(100) +), +partition p2 values less than(200), +partition p3 values less than(maxvalue) +); +ERROR: The syntax format of subpartition is incorrect, missing declaration of subpartition. +DETAIL: N/A +--1.10 create table like +CREATE TABLE range_range +( + col_1 int primary key, + col_2 int NOT NULL , + col_3 VARCHAR2 ( 30 ) NOT NULL , + col_4 int generated always as(2*col_2) stored , + check (col_4 >= col_2) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( 10 ) + ) +) ENABLE ROW MOVEMENT; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +CREATE TABLE range_range_02 (like range_range INCLUDING ALL ); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_02_pkey" for table "range_range_02" +drop table range_range; +--ROW LEVEL SECURITY POLICY +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +CREATE ROW LEVEL SECURITY POLICY range_range_rls ON range_range USING(user_no = CURRENT_USER); +ERROR: Un-support feature +DETAIL: Do not support row level security policy on subpartition table. +drop table range_range; +-- 账本数据库 +CREATE SCHEMA ledgernsp WITH BLOCKCHAIN; +CREATE TABLE ledgernsp.range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +ERROR: Un-support feature +DETAIL: Subpartition table does not support ledger user table. +DROP SCHEMA ledgernsp; +-- create table as +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +) ENABLE ROW MOVEMENT; +insert into range_range values(201902,1,1,1),(201902,1,1,1),(201902,3,1,1),(201903,1,1,1),(201903,2,1,1),(201903,2,1,1); +select * from range_range subpartition(p_201901_a) where month_code in(201902,201903) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +create table range_range_copy WITH (SEGMENT=ON) as select * from range_range subpartition(p_201901_a) where month_code in(201902,201903); +select * from range_range_copy order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +drop table range_range; +drop table range_range_copy; +--1.11 create index +create table range_range_03 +( + c_int int, + c_char1 char(3000), + c_char2 char(5000), + c_char3 char(6000), + c_varchar1 varchar(3000), + c_varchar2 varchar(5000), + c_varchar3 varchar, + c_varchar4 varchar, + c_text1 text, + c_text2 text, + c_text3 text, + c int, + primary key(c,c_int) +) with (parallel_workers=10, SEGMENT=ON) +partition by range (c_int) subpartition by range (c_char1) +( + partition p1 values less than(50) + ( + subpartition p1_1 values less than('c'), + subpartition p1_2 values less than(maxvalue) + ), + partition p2 values less than(100) + ( + subpartition p2_1 values less than('c'), + subpartition p2_2 values less than(maxvalue) + ), + partition p3 values less than(150) + ( + subpartition p3_1 values less than('c'), + subpartition p3_2 values less than(maxvalue) + ), + partition p4 values less than(200) + ( + subpartition p4_1 values less than('c'), + subpartition p4_2 values less than(maxvalue) + ), + partition p5 values less than(maxvalue)( + subpartition p5_1 values less than('c'), + subpartition p5_2 values less than(maxvalue) + ) +) enable row movement; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_03_pkey" for table "range_range_03" +create index range_range_03_idx1 on range_range_03 (c_varchar1) local; --success +create index range_range_03_idx2 on range_range_03 (c_varchar2) local ( + partition cpt7_p1, + partition cpt7_p2, + partition cpt7_p3, + partition cpt7_p4, + partition cpt7_p5 +); --failed +ERROR: Cannot match subpartitions when create subpartition indexes. +create index range_range_03_idx3 on range_range_03 (c_varchar3); --success, default global +create index range_range_03_idx4 on range_range_03 (c_varchar4) global; --success +create index range_range_03_idx5 on range_range_03 (c_varchar4) local; --failed, can not be same column with global index +ERROR: Global and local partition index should not be on same column +\d+ range_range_03 + Table "segment_subpartition_createtable.range_range_03" + Column | Type | Modifiers | Storage | Stats target | Description +------------+-------------------------+-----------+----------+--------------+------------- + c_int | integer | not null | plain | | + c_char1 | character(3000) | | extended | | + c_char2 | character(5000) | | extended | | + c_char3 | character(6000) | | extended | | + c_varchar1 | character varying(3000) | | extended | | + c_varchar2 | character varying(5000) | | extended | | + c_varchar3 | character varying | | extended | | + c_varchar4 | character varying | | extended | | + c_text1 | text | | extended | | + c_text2 | text | | extended | | + c_text3 | text | | extended | | + c | integer | not null | plain | | +Indexes: + "range_range_03_pkey" PRIMARY KEY, btree (c, c_int) TABLESPACE pg_default + "range_range_03_idx1" btree (c_varchar1) LOCAL TABLESPACE pg_default + "range_range_03_idx3" btree (c_varchar3) TABLESPACE pg_default + "range_range_03_idx4" btree (c_varchar4) TABLESPACE pg_default +Partition By RANGE(c_int) Subpartition By RANGE(c_char1) +Number of partitions: 5 (View pg_partition to check each partition range.) +Number of subpartitions: 10 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, parallel_workers=10, segment=on, compression=no + +select pg_get_tabledef('range_range_03'); + pg_get_tabledef +---------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_createtable; + + CREATE TABLE range_range_03 ( + + c_int integer NOT NULL, + + c_char1 character(3000), + + c_char2 character(5000), + + c_char3 character(6000), + + c_varchar1 character varying(3000), + + c_varchar2 character varying(5000), + + c_varchar3 character varying, + + c_varchar4 character varying, + + c_text1 text, + + c_text2 text, + + c_text3 text, + + c integer NOT NULL + + ) + + WITH (orientation=row, parallel_workers=10, segment=on, compression=no) + + PARTITION BY RANGE (c_int) SUBPARTITION BY RANGE (c_char1) + + ( + + PARTITION p1 VALUES LESS THAN (50) TABLESPACE pg_default + + ( + + SUBPARTITION p1_1 VALUES LESS THAN ('c') TABLESPACE pg_default, + + SUBPARTITION p1_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ), + + PARTITION p2 VALUES LESS THAN (100) TABLESPACE pg_default + + ( + + SUBPARTITION p2_1 VALUES LESS THAN ('c') TABLESPACE pg_default, + + SUBPARTITION p2_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ), + + PARTITION p3 VALUES LESS THAN (150) TABLESPACE pg_default + + ( + + SUBPARTITION p3_1 VALUES LESS THAN ('c') TABLESPACE pg_default, + + SUBPARTITION p3_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ), + + PARTITION p4 VALUES LESS THAN (200) TABLESPACE pg_default + + ( + + SUBPARTITION p4_1 VALUES LESS THAN ('c') TABLESPACE pg_default, + + SUBPARTITION p4_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ), + + PARTITION p5 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ( + + SUBPARTITION p5_1 VALUES LESS THAN ('c') TABLESPACE pg_default, + + SUBPARTITION p5_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; + + CREATE INDEX range_range_03_idx4 ON range_range_03 USING btree (c_varchar4) TABLESPACE pg_default;+ + CREATE INDEX range_range_03_idx3 ON range_range_03 USING btree (c_varchar3) TABLESPACE pg_default;+ + CREATE INDEX range_range_03_idx1 ON range_range_03 USING btree (c_varchar1) LOCAL + + ( + + PARTITION p1_1_c_varchar1_idx, + + PARTITION p1_2_c_varchar1_idx, + + PARTITION p2_1_c_varchar1_idx, + + PARTITION p2_2_c_varchar1_idx, + + PARTITION p3_1_c_varchar1_idx, + + PARTITION p3_2_c_varchar1_idx, + + PARTITION p4_1_c_varchar1_idx, + + PARTITION p4_2_c_varchar1_idx, + + PARTITION p5_1_c_varchar1_idx, + + PARTITION p5_2_c_varchar1_idx + + ) TABLESPACE pg_default; + + ALTER TABLE range_range_03 ADD CONSTRAINT range_range_03_pkey PRIMARY KEY (c, c_int); +(1 row) + +drop table range_range_03; +--unique local index columns must contain the partition key +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +) ENABLE ROW MOVEMENT; +create unique index idx on range_range(month_code) local; +ERROR: unique local index columns must contain all the partition keys and collation must be default collation +create unique index idx1 on range_range(month_code, user_no) local; +ERROR: unique local index columns must contain all the partition keys and collation must be default collation +drop table range_range; +-- partkey has timestampwithzone type +drop table hash_range; +ERROR: table "hash_range" does not exist +CREATE TABLE hash_range +( + col_1 int PRIMARY KEY USING INDEX, + col_2 int NOT NULL , + col_3 int NOT NULL , + col_4 int, + col_19 TIMESTAMP WITH TIME ZONE +) WITH (SEGMENT=ON) +PARTITION BY HASH (col_2) SUBPARTITION BY RANGE (col_19) +( partition p_hash_1 + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + partition p_hash_2, + PARTITION p_hash_3, + PARTITION p_hash_4, + PARTITION p_hash_5, + PARTITION p_hash_7 +) ENABLE ROW MOVEMENT; +ERROR: partition key value must be const or const-evaluable expression +CREATE TABLE hash_range +( + col_1 int PRIMARY KEY USING INDEX, + col_2 int NOT NULL , + col_3 int NOT NULL , + col_4 int, + col_19 TIMESTAMP WITH TIME ZONE +) WITH (SEGMENT=ON) +PARTITION BY HASH (col_19) SUBPARTITION BY RANGE (col_2) +( partition p_hash_1 + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + partition p_hash_2, + PARTITION p_hash_3, + PARTITION p_hash_4, + PARTITION p_hash_5, + PARTITION p_hash_7 +) ENABLE ROW MOVEMENT; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "hash_range_pkey" for table "hash_range" +drop table hash_range; +--clean +DROP SCHEMA segment_subpartition_createtable CASCADE; +NOTICE: drop cascades to table range_range_02 +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/segment_subpartition_ddl_index.out b/src/test/regress/expected/segment_subpartition_ddl_index.out new file mode 100644 index 000000000..922049851 --- /dev/null +++ b/src/test/regress/expected/segment_subpartition_ddl_index.out @@ -0,0 +1,440 @@ +-- +----test index is Ok when use ddl grammer for subpartition---- +-- +DROP SCHEMA segment_subpartition_ddl_index CASCADE; +ERROR: schema "segment_subpartition_ddl_index" does not exist +CREATE SCHEMA segment_subpartition_ddl_index; +SET CURRENT_SCHEMA TO segment_subpartition_ddl_index; +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_indexonlyscan = ON; +SET enable_bitmapscan = OFF; +-- +--test for add/drop partition/subpartition +-- +--1. first, we create subpartitioned table, and index on the table +CREATE TABLE range_list_sales1 +( + product_id INT4, + customer_id INT4, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales1_idx1 ON range_list_sales1(product_id, customer_id) GLOBAL; +CREATE INDEX range_list_sales1_idx2 ON range_list_sales1(channel_id) GLOBAL; +CREATE INDEX range_list_sales1_idx3 ON range_list_sales1(customer_id) LOCAL; +CREATE INDEX range_list_sales1_idx4 ON range_list_sales1(time_id, type_id) LOCAL; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx1 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx2 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 4, Sub Iterations: 8 + -> Partitioned Index Only Scan using range_list_sales1_idx3 on range_list_sales1 + Selected Partitions: 1..4 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 4, Sub Iterations: 8 + -> Partitioned Index Only Scan using range_list_sales1_idx4 on range_list_sales1 + Selected Partitions: 1..4 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +--2. add partition/subpartition will not influence the index +ALTER TABLE range_list_sales1 ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer5_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer5_channel3 VALUES ('6', '7', '8') + ); +ALTER TABLE range_list_sales1 ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +ALTER TABLE range_list_sales1 MODIFY PARTITION customer5 ADD SUBPARTITION customer5_channel4 VALUES ('9'); +INSERT INTO range_list_sales1 SELECT generate_series(1001,2000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx1 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 2000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx2 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 2000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 6, Sub Iterations: 13 + -> Partitioned Index Only Scan using range_list_sales1_idx3 on range_list_sales1 + Selected Partitions: 1..6 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 2000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 6, Sub Iterations: 13 + -> Partitioned Index Only Scan using range_list_sales1_idx4 on range_list_sales1 + Selected Partitions: 1..6 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 2000 +(1 row) + +--3. drop partition/subpartition update global index +ALTER TABLE range_list_sales1 DROP PARTITION customer3 UPDATE GLOBAL INDEX; +ALTER TABLE range_list_sales1 DROP PARTITION FOR (700) UPDATE GLOBAL INDEX; --customer4 +ALTER TABLE range_list_sales1 DROP SUBPARTITION FOR (700, '9') UPDATE GLOBAL INDEX; --customer5_channel4 +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx1 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx2 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 4, Sub Iterations: 10 + -> Partitioned Index Only Scan using range_list_sales1_idx3 on range_list_sales1 + Selected Partitions: 1..4 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 4, Sub Iterations: 10 + -> Partitioned Index Only Scan using range_list_sales1_idx4 on range_list_sales1 + Selected Partitions: 1..4 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +--4. if drop partition without update global index, the gpi will be invalid, we can rebuild the index +ALTER TABLE range_list_sales1 DROP PARTITION FOR (1600); +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +WARNING: Error hint: IndexOnlyScan(range_list_sales1 range_list_sales1_idx1), index "range_list_sales1_idx1" doesn't exist. + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 9 + -> Partitioned Seq Scan on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +ALTER INDEX range_list_sales1_idx1 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx1 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +WARNING: Error hint: IndexOnlyScan(range_list_sales1 range_list_sales1_idx2), index "range_list_sales1_idx2" doesn't exist. + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 9 + -> Partitioned Seq Scan on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +ALTER INDEX range_list_sales1_idx2 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx2 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 9 + -> Partitioned Index Only Scan using range_list_sales1_idx3 on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 9 + -> Partitioned Index Only Scan using range_list_sales1_idx4 on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +--5. if drop subpartition without update global index, the gpi will be invalid, we can rebuild the index +ALTER TABLE range_list_sales1 DROP SUBPARTITION customer5_channel3; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +WARNING: Error hint: IndexOnlyScan(range_list_sales1 range_list_sales1_idx1), index "range_list_sales1_idx1" doesn't exist. + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 8 + -> Partitioned Seq Scan on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +ALTER INDEX range_list_sales1_idx1 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx1 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +WARNING: Error hint: IndexOnlyScan(range_list_sales1 range_list_sales1_idx2), index "range_list_sales1_idx2" doesn't exist. + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 8 + -> Partitioned Seq Scan on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +ALTER INDEX range_list_sales1_idx2 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx2 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 8 + -> Partitioned Index Only Scan using range_list_sales1_idx3 on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 8 + -> Partitioned Index Only Scan using range_list_sales1_idx4 on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +DROP TABLE range_list_sales1; +--finish, clean the environment +DROP SCHEMA segment_subpartition_ddl_index CASCADE; +RESET CURRENT_SCHEMA; +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_indexonlyscan; +RESET enable_bitmapscan; diff --git a/src/test/regress/expected/segment_subpartition_gpi.out b/src/test/regress/expected/segment_subpartition_gpi.out new file mode 100644 index 000000000..ae6abc570 --- /dev/null +++ b/src/test/regress/expected/segment_subpartition_gpi.out @@ -0,0 +1,1296 @@ +-- prepare +DROP SCHEMA segment_subpartition_gpi CASCADE; +ERROR: schema "segment_subpartition_gpi" does not exist +CREATE SCHEMA segment_subpartition_gpi; +SET CURRENT_SCHEMA TO segment_subpartition_gpi; +-- base function +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 2 | 1 +(6 rows) + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_list.dept_code)::text = '1'::text) +(8 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_list.user_no)::text = '1'::text) +(8 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +drop table range_list; +-- unique +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +create unique index idx_dept_code_global on range_list(dept_code) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +select * from range_list subpartition (p_201901_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 +(1 row) + +select * from range_list subpartition (p_201901_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +select count(*) from range_list; + count +------- + 2 +(1 row) + +--error +insert into range_list values('201902', '1', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_dept_code_global" +DETAIL: Key (dept_code)=(1) already exists. +insert into range_list values('201902', '2', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_dept_code_global" +DETAIL: Key (dept_code)=(2) already exists. +insert into range_list values('201903', '1', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_dept_code_global" +DETAIL: Key (dept_code)=(1) already exists. +insert into range_list values('201903', '2', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_dept_code_global" +DETAIL: Key (dept_code)=(2) already exists. +select count(*) from range_list; + count +------- + 2 +(1 row) + +delete from range_list; +drop index idx_dept_code_global; +create unique index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '2', 1); +insert into range_list values('201903', '1', '3', 1); +insert into range_list values('201903', '2', '4', 1); +select * from range_list subpartition (p_201901_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 +(1 row) + +select * from range_list subpartition (p_201901_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 2 | 1 +(1 row) + +select * from range_list subpartition (p_201902_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 3 | 1 +(1 row) + +select * from range_list subpartition (p_201902_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 4 | 1 +(1 row) + +select count(*) from range_list; + count +------- + 4 +(1 row) + +--error +insert into range_list values('201902', '1', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(1) already exists. +insert into range_list values('201902', '2', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(1) already exists. +insert into range_list values('201903', '1', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(1) already exists. +insert into range_list values('201903', '2', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(1) already exists. +insert into range_list values('201902', '1', '2', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(2) already exists. +insert into range_list values('201902', '2', '2', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(2) already exists. +insert into range_list values('201903', '1', '2', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(2) already exists. +insert into range_list values('201903', '2', '2', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(2) already exists. +insert into range_list values('201902', '1', '3', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(3) already exists. +insert into range_list values('201902', '2', '3', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(3) already exists. +insert into range_list values('201903', '1', '3', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(3) already exists. +insert into range_list values('201903', '2', '3', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(3) already exists. +insert into range_list values('201902', '1', '4', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(4) already exists. +insert into range_list values('201902', '2', '4', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(4) already exists. +insert into range_list values('201903', '1', '4', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(4) already exists. +insert into range_list values('201903', '2', '4', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(4) already exists. +select count(*) from range_list; + count +------- + 4 +(1 row) + +drop table range_list; +-- truncate subpartition +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 2 | 1 +(6 rows) + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_list.dept_code)::text = '1'::text) +(8 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_list.user_no)::text = '1'::text) +(8 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +alter table range_list truncate subpartition p_201901_a update global index; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_list.dept_code)::text = '1'::text) +(8 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_list.user_no)::text = '1'::text) +(8 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +alter table range_list truncate subpartition p_201901_b; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Seq Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_list.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(11 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_list.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(11 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(2 rows) + +drop table range_list; +-- split subpartition +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values (default) + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values (default) + ) +); +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 2 | 1 +(6 rows) + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_list.dept_code)::text = '1'::text) +(8 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_list.user_no)::text = '1'::text) +(8 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +alter table range_list split subpartition p_201901_b values ('3') into +( + subpartition p_201901_b, + subpartition p_201901_c +) update global index; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 3 + -> Partitioned Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_list.dept_code)::text = '1'::text) +(8 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_list.user_no)::text = '1'::text) +(8 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +alter table range_list split subpartition p_201902_b values ('3') into +( + subpartition p_201902_b, + subpartition p_201902_c +); +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 3 + -> Partitioned Bitmap Heap Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Seq Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_list.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(11 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 6 + -> Partitioned Seq Scan on segment_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_list.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(11 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +drop table range_list; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( MAXVALUE ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '3', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '5', '1', 1); +select * from range_range; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(6 rows) + +create index idx_month_code_local on range_range(month_code) local; +create index idx_dept_code_global on range_range(dept_code) global; +create index idx_user_no_global on range_range(user_no) global; +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.dept_code, range_range.user_no, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on segment_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +-------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.user_no, range_range.sales_amt + -> Bitmap Heap Scan on segment_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_range.dept_code)::text = '1'::text) +(8 rows) + +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(2 rows) + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.dept_code, range_range.sales_amt + -> Bitmap Heap Scan on segment_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_range.user_no)::text = '1'::text) +(8 rows) + +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(6 rows) + +alter table range_range split subpartition p_201901_b at ('3') into +( + subpartition p_201901_c, + subpartition p_201901_d +) update global index; +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.dept_code, range_range.user_no, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 3 + -> Partitioned Bitmap Heap Scan on segment_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +-------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.user_no, range_range.sales_amt + -> Bitmap Heap Scan on segment_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_range.dept_code)::text = '1'::text) +(8 rows) + +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(2 rows) + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.dept_code, range_range.sales_amt + -> Bitmap Heap Scan on segment_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_range.user_no)::text = '1'::text) +(8 rows) + +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(6 rows) + +alter table range_range split subpartition p_201902_b at ('3') into +( + subpartition p_201902_c, + subpartition p_201903_d +); +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.dept_code, range_range.user_no, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 3 + -> Partitioned Bitmap Heap Scan on segment_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +-------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.user_no, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Seq Scan on segment_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_range.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(11 rows) + +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(2 rows) + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.dept_code, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 6 + -> Partitioned Seq Scan on segment_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_range.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(11 rows) + +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(6 rows) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) primary key, + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + I +(1 row) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) primary key, + user_no VARCHAR2 ( 30 ) , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + I +(1 row) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) primary key, + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + I +(1 row) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + i +(1 row) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code, user_no) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + i +(1 row) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, user_no) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + I +(1 row) + +drop table range_range; +-- truncate with gpi +CREATE TABLE range_hash_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( -10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ), + PARTITION p_range_3 VALUES LESS THAN( 30) + ( + SUBPARTITION p_hash_3_1 , + SUBPARTITION p_hash_3_2 , + SUBPARTITION p_hash_3_3 + ), + PARTITION p_range_4 VALUES LESS THAN( 50) + ( + SUBPARTITION p_hash_4_1 , + SUBPARTITION p_hash_4_2 , + SUBPARTITION range_hash_02 + ), + PARTITION p_range_5 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; +create index idx on range_hash_02(col_1); +truncate range_hash_02; +drop table range_hash_02; +-- clean +DROP SCHEMA segment_subpartition_gpi CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/segment_subpartition_scan.out b/src/test/regress/expected/segment_subpartition_scan.out new file mode 100644 index 000000000..b7730225b --- /dev/null +++ b/src/test/regress/expected/segment_subpartition_scan.out @@ -0,0 +1,660 @@ +--prepare +DROP SCHEMA segment_subpartition_scan CASCADE; +ERROR: schema "segment_subpartition_scan" does not exist +CREATE SCHEMA segment_subpartition_scan; +SET CURRENT_SCHEMA TO segment_subpartition_scan; +--scan +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +explain(costs off, verbose on) select * from range_list order by 1, 2, 3, 4; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on segment_subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(10 rows) + +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +create index idx_month_code on range_list(month_code) local; +create index idx_dept_code on range_list(dept_code) local; +create index idx_user_no on range_list(user_no) local; +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on segment_subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on segment_subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 + -> Partitioned Bitmap Index Scan on idx_dept_code + Index Cond: ((range_list.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(15 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Bitmap Heap Scan on segment_subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_user_no + Index Cond: ((range_list.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +set enable_bitmapscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Index Scan using idx_month_code on segment_subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(11 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Index Scan using idx_dept_code on segment_subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Index Cond: ((range_list.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(11 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Index Scan using idx_user_no on segment_subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Index Cond: ((range_list.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(11 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +reset enable_seqscan; +reset enable_bitmapscan; +drop table range_list; +CREATE TABLE range_list +( + col_1 VARCHAR2 ( 30 ) , + col_2 VARCHAR2 ( 30 ) NOT NULL , + col_3 VARCHAR2 ( 30 ) NOT NULL , + ccol_4 VARCHAR2 ( 30 ), +col_5 VARCHAR2 ( 30 ), +col_6 VARCHAR2 ( 30 ), +col_7 VARCHAR2 ( 30 ), +col_8 VARCHAR2 ( 30 ), +col_9 VARCHAR2 ( 30 ), +col_10 VARCHAR2 ( 30 ), +col_11 VARCHAR2 ( 30 ), +col_12 VARCHAR2 ( 30 ), +col_13 VARCHAR2 ( 30 ), +col_14 VARCHAR2 ( 30 ), +col_15 VARCHAR2 ( 30 ), +col_16 VARCHAR2 ( 30 ), +col_17 VARCHAR2 ( 30 ), +col_18 VARCHAR2 ( 30 ), +col_19 VARCHAR2 ( 30 ), +col_20 VARCHAR2 ( 30 ), +col_21 VARCHAR2 ( 30 ), +col_22 VARCHAR2 ( 30 ), +col_23 VARCHAR2 ( 30 ), +col_24 VARCHAR2 ( 30 ), +col_25 VARCHAR2 ( 30 ), +col_26 VARCHAR2 ( 30 ), +col_27 VARCHAR2 ( 30 ), +col_28 VARCHAR2 ( 30 ), +col_29 VARCHAR2 ( 30 ), +col_30 VARCHAR2 ( 30 ), +col_31 VARCHAR2 ( 30 ), +col_32 VARCHAR2 ( 30 ), +col_33 VARCHAR2 ( 30 ), +col_34 VARCHAR2 ( 30 ), +col_35 VARCHAR2 ( 30 ), +col_36 VARCHAR2 ( 30 ), +col_37 VARCHAR2 ( 30 ), +col_38 VARCHAR2 ( 30 ), +col_39 VARCHAR2 ( 30 ), +col_40 VARCHAR2 ( 30 ), +col_41 VARCHAR2 ( 30 ), +col_42 VARCHAR2 ( 30 ), +col_43 VARCHAR2 ( 30 ), +col_44 VARCHAR2 ( 30 ), +col_45 VARCHAR2 ( 30 ), +col_46 VARCHAR2 ( 30 ), +col_47 VARCHAR2 ( 30 ), +col_48 VARCHAR2 ( 30 ), +col_49 VARCHAR2 ( 30 ), +col_50 VARCHAR2 ( 30 ), +col_51 VARCHAR2 ( 30 ), +col_52 VARCHAR2 ( 30 ), +col_53 VARCHAR2 ( 30 ), +col_54 VARCHAR2 ( 30 ), +col_55 VARCHAR2 ( 30 ), +col_56 VARCHAR2 ( 30 ), +col_57 VARCHAR2 ( 30 ), +col_58 VARCHAR2 ( 30 ), +col_59 VARCHAR2 ( 30 ), +col_60 VARCHAR2 ( 30 ), +col_61 VARCHAR2 ( 30 ), +col_62 VARCHAR2 ( 30 ), +col_63 VARCHAR2 ( 30 ), +col_64 VARCHAR2 ( 30 ), +col_65 VARCHAR2 ( 30 ), +col_66 VARCHAR2 ( 30 ), +col_67 VARCHAR2 ( 30 ), +col_68 VARCHAR2 ( 30 ), +col_69 VARCHAR2 ( 30 ), +col_70 VARCHAR2 ( 30 ), +col_71 VARCHAR2 ( 30 ), +col_72 VARCHAR2 ( 30 ), +col_73 VARCHAR2 ( 30 ), +col_74 VARCHAR2 ( 30 ), +col_75 VARCHAR2 ( 30 ), +col_76 VARCHAR2 ( 30 ), +col_77 VARCHAR2 ( 30 ), +col_78 VARCHAR2 ( 30 ), +col_79 VARCHAR2 ( 30 ), +col_80 VARCHAR2 ( 30 ), +col_81 VARCHAR2 ( 30 ), +col_82 VARCHAR2 ( 30 ), +col_83 VARCHAR2 ( 30 ) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( '-10' ) + ( +SUBPARTITION p_list_1_1 VALUES ( '-1' ), +SUBPARTITION p_list_1_2 VALUES ( '-2' ), +SUBPARTITION p_list_1_3 VALUES ( '-3' ), +SUBPARTITION p_list_1_4 VALUES ( '-4' ), +SUBPARTITION p_list_1_5 VALUES ( '-5' ), +SUBPARTITION p_list_1_6 VALUES ( '-6' ), +SUBPARTITION p_list_1_7 VALUES ( '-7' ), +SUBPARTITION p_list_1_8 VALUES ( '-8' ), +SUBPARTITION p_list_1_9 VALUES ( '-9' ), +SUBPARTITION p_list_1_10 VALUES ( '-10' ), +SUBPARTITION p_list_1_11 VALUES ( '-11' ), +SUBPARTITION p_list_1_12 VALUES ( '-12' ), +SUBPARTITION p_list_1_13 VALUES ( '-13' ), +SUBPARTITION p_list_1_14 VALUES ( '-14' ), +SUBPARTITION p_list_1_15 VALUES ( '-15' ), +SUBPARTITION p_list_1_16 VALUES ( '-16' ), +SUBPARTITION p_list_1_17 VALUES ( '-17' ), +SUBPARTITION p_list_1_18 VALUES ( '-18' ), +SUBPARTITION p_list_1_19 VALUES ( '-19' ), +SUBPARTITION p_list_1_20 VALUES ( '-20' ), +SUBPARTITION p_list_1_21 VALUES ( '-21' ), +SUBPARTITION p_list_1_22 VALUES ( '-22' ), +SUBPARTITION p_list_1_23 VALUES ( '-23' ), +SUBPARTITION p_list_1_24 VALUES ( '-24' ), +SUBPARTITION p_list_1_25 VALUES ( '-25' ), +SUBPARTITION p_list_1_26 VALUES ( '-26' ), +SUBPARTITION p_list_1_27 VALUES ( '-27' ), +SUBPARTITION p_list_1_28 VALUES ( '-28' ), +SUBPARTITION p_list_1_29 VALUES ( '-29' ), +SUBPARTITION p_list_1_30 VALUES ( '-30' ), +SUBPARTITION p_list_1_31 VALUES ( '-31' ), +SUBPARTITION p_list_1_32 VALUES ( '-32' ), +SUBPARTITION p_list_1_33 VALUES ( '-33' ), +SUBPARTITION p_list_1_34 VALUES ( '-34' ), +SUBPARTITION p_list_1_35 VALUES ( '-35' ), +SUBPARTITION p_list_1_36 VALUES ( '-36' ), +SUBPARTITION p_list_1_37 VALUES ( '-37' ), +SUBPARTITION p_list_1_38 VALUES ( '-38' ), +SUBPARTITION p_list_1_39 VALUES ( '-39' ), +SUBPARTITION p_list_1_40 VALUES ( '-40' ), +SUBPARTITION p_list_1_41 VALUES ( '-41' ), +SUBPARTITION p_list_1_42 VALUES ( '-42' ), +SUBPARTITION p_list_1_43 VALUES ( '-43' ), +SUBPARTITION p_list_1_44 VALUES ( '-44' ), +SUBPARTITION p_list_1_45 VALUES ( '-45' ), +SUBPARTITION p_list_1_46 VALUES ( '-46' ), +SUBPARTITION p_list_1_47 VALUES ( '-47' ), +SUBPARTITION p_list_1_48 VALUES ( '-48' ), +SUBPARTITION p_list_1_49 VALUES ( '-49' ), +SUBPARTITION p_list_1_50 VALUES ( '-50' ), +SUBPARTITION p_list_1_51 VALUES ( default ) + ), + PARTITION p_range_2 VALUES LESS THAN('10 ') + ( +SUBPARTITION p_list_2_1 VALUES ( '1' ), +SUBPARTITION p_list_2_2 VALUES ( '2' ), +SUBPARTITION p_list_2_3 VALUES ( '3' ), +SUBPARTITION p_list_2_4 VALUES ( '4' ), +SUBPARTITION p_list_2_5 VALUES ( '5' ), +SUBPARTITION p_list_2__6 VALUES ( '-6' ), +SUBPARTITION p_list_2_6 VALUES ( '6' ), +SUBPARTITION p_list_2_7 VALUES ( '7' ), +SUBPARTITION p_list_2_8 VALUES ( '8' ), +SUBPARTITION p_list_2_9 VALUES ( '9' ), +SUBPARTITION p_list_2_10 VALUES ( '10' ), +SUBPARTITION p_list_2_11 VALUES ( '11' ), +SUBPARTITION p_list_2_12 VALUES ( '12' ), +SUBPARTITION p_list_2_13 VALUES ( '13' ), +SUBPARTITION p_list_2_14 VALUES ( '14' ), +SUBPARTITION p_list_2_15 VALUES ( '15' ), +SUBPARTITION p_list_2_16 VALUES ( '16' ), +SUBPARTITION p_list_2_17 VALUES ( '17' ), +SUBPARTITION p_list_2_18 VALUES ( '18' ), +SUBPARTITION p_list_2_19 VALUES ( '19' ), +SUBPARTITION p_list_2_20 VALUES ( '20' ), +SUBPARTITION p_list_2_21 VALUES ( '21' ), +SUBPARTITION p_list_2_22 VALUES ( '22' ), +SUBPARTITION p_list_2_23 VALUES ( '23' ), +SUBPARTITION p_list_2_24 VALUES ( '24' ), +SUBPARTITION p_list_2_25 VALUES ( '25' ), +SUBPARTITION p_list_2_26 VALUES ( '26' ), +SUBPARTITION p_list_2_27 VALUES ( '27' ), +SUBPARTITION p_list_2_28 VALUES ( '28' ), +SUBPARTITION p_list_2_29 VALUES ( '29' ), +SUBPARTITION p_list_2_30 VALUES ( '30' ), +SUBPARTITION p_list_2_31 VALUES ( '31' ), +SUBPARTITION p_list_2_32 VALUES ( '32' ), +SUBPARTITION p_list_2_33 VALUES ( '33' ), +SUBPARTITION p_list_2_34 VALUES ( '34' ), +SUBPARTITION p_list_2_35 VALUES ( '35' ), +SUBPARTITION p_list_2_36 VALUES ( '36' ), +SUBPARTITION p_list_2_37 VALUES ( '37' ), +SUBPARTITION p_list_2_38 VALUES ( '38' ), +SUBPARTITION p_list_2_39 VALUES ( '39' ), +SUBPARTITION p_list_2_40 VALUES ( '40' ), +SUBPARTITION p_list_2_41 VALUES ( '41' ), +SUBPARTITION p_list_2_42 VALUES ( '42' ), +SUBPARTITION p_list_2_43 VALUES ( '43' ), +SUBPARTITION p_list_2_44 VALUES ( '44' ), +SUBPARTITION p_list_2_45 VALUES ( '45' ), +SUBPARTITION p_list_2_46 VALUES ( '46' ), +SUBPARTITION p_list_2_47 VALUES ( '47' ), +SUBPARTITION p_list_2_48 VALUES ( '48' ), +SUBPARTITION p_list_2_49 VALUES ( '49' ), +SUBPARTITION p_list_2_50 VALUES ( '50' ), +SUBPARTITION p_list_2_51 VALUES ( default ) + ), + PARTITION p_range_3 VALUES LESS THAN( '20 '), + PARTITION p_range_4 VALUES LESS THAN( '30' ) + ( + SUBPARTITION p_list_4_1 VALUES ( default ) + ), + PARTITION p_range_5 VALUES LESS THAN( '40' ) + ( + SUBPARTITION p_list_5_1 VALUES ( '41' ), +SUBPARTITION p_list_5_2 VALUES ( '42' ), +SUBPARTITION p_list_5_3 VALUES ( '43' ), +SUBPARTITION p_list_5_4 VALUES ( '44' ), +SUBPARTITION p_list_5_5 VALUES ( '45' ), +SUBPARTITION p_list_5_6 VALUES ( '46' ), +SUBPARTITION p_list_5_7 VALUES ( '47' ), +SUBPARTITION p_list_5_8 VALUES ( '48' ), +SUBPARTITION p_list_5_9 VALUES ( '49' ), +SUBPARTITION p_list_5_10 VALUES ( '50' ), +SUBPARTITION p_list_5_11 VALUES ( '51' ), +SUBPARTITION p_list_5_12 VALUES ( '52' ), +SUBPARTITION p_list_5_13 VALUES ( '53' ), +SUBPARTITION p_list_5_14 VALUES ( '54' ), +SUBPARTITION p_list_5_15 VALUES ( '55' ), +SUBPARTITION p_list_5_16 VALUES ( '56' ), +SUBPARTITION p_list_5_17 VALUES ( '57' ), +SUBPARTITION p_list_5_18 VALUES ( '58' ), +SUBPARTITION p_list_5_19 VALUES ( '59' ), +SUBPARTITION p_list_5_20 VALUES ( '60' ), +SUBPARTITION p_list_5_21 VALUES ( '61' ), +SUBPARTITION p_list_5_22 VALUES ( '62' ), +SUBPARTITION p_list_5_23 VALUES ( '63' ), +SUBPARTITION p_list_5_24 VALUES ( '64' ), +SUBPARTITION p_list_5_25 VALUES ( '65' ), +SUBPARTITION p_list_5_26 VALUES ( '66' ), +SUBPARTITION p_list_5_27 VALUES ( '67' ), +SUBPARTITION p_list_5_28 VALUES ( '68' ), +SUBPARTITION p_list_5_29 VALUES ( '69' ), +SUBPARTITION p_list_5_30 VALUES ( '70' ), +SUBPARTITION p_list_5_31 VALUES ( '71' ), +SUBPARTITION p_list_5_32 VALUES ( '72' ), +SUBPARTITION p_list_5_33 VALUES ( '73' ), +SUBPARTITION p_list_5_34 VALUES ( '74' ), +SUBPARTITION p_list_5_35 VALUES ( '75' ), +SUBPARTITION p_list_5_36 VALUES ( '76' ), +SUBPARTITION p_list_5_37 VALUES ( '77' ), +SUBPARTITION p_list_5_38 VALUES ( '78' ), +SUBPARTITION p_list_5_39 VALUES ( '79' ), +SUBPARTITION p_list_5_40 VALUES ( '80' ), +SUBPARTITION p_list_5_41 VALUES ( '81' ), +SUBPARTITION p_list_5_42 VALUES ( '82' ), +SUBPARTITION p_list_5_43 VALUES ( '83' ), +SUBPARTITION p_list_5_44 VALUES ( '84' ), +SUBPARTITION p_list_5_45 VALUES ( '85' ), +SUBPARTITION p_list_5_46 VALUES ( '86' ), +SUBPARTITION p_list_5_47 VALUES ( '87' ), +SUBPARTITION p_list_5_48 VALUES ( '88' ), +SUBPARTITION p_list_5_49 VALUES ( '89' ), +SUBPARTITION p_list_5_50 VALUES ( '90' ), +SUBPARTITION p_list_5_51 VALUES ( '91' ), +SUBPARTITION p_list_5_52 VALUES ( '92' ), +SUBPARTITION p_list_5_53 VALUES ( '93' ), +SUBPARTITION p_list_5_54 VALUES ( '94' ), +SUBPARTITION p_list_5_55 VALUES ( '95' ), +SUBPARTITION p_list_5_56 VALUES ( '96' ), +SUBPARTITION p_list_5_57 VALUES ( '97' ), +SUBPARTITION p_list_5_58 VALUES ( '98' ), +SUBPARTITION p_list_5_59 VALUES ( '99' ), +SUBPARTITION p_list_5_60 VALUES ( '100' ), +SUBPARTITION p_list_5_61 VALUES ( '101' ), +SUBPARTITION p_list_5_62 VALUES ( '102' ), +SUBPARTITION p_list_5_63 VALUES ( '103' ), +SUBPARTITION p_list_5_64 VALUES ( '104' ), +SUBPARTITION p_list_5_65 VALUES ( '105' ), +SUBPARTITION p_list_5_66 VALUES ( '106' ), +SUBPARTITION p_list_5_67 VALUES ( '107' ), +SUBPARTITION p_list_5_68 VALUES ( '108' ), +SUBPARTITION p_list_5_69 VALUES ( '109' ), +SUBPARTITION p_list_5_70 VALUES ( '110' ), +SUBPARTITION p_list_5_71 VALUES ( '111' ), +SUBPARTITION p_list_5_72 VALUES ( '112' ), +SUBPARTITION p_list_5_73 VALUES ( '113' ), +SUBPARTITION p_list_5_74 VALUES ( '114' ), +SUBPARTITION p_list_5_75 VALUES ( '115' ), +SUBPARTITION p_list_5_76 VALUES ( '116' ), +SUBPARTITION p_list_5_77 VALUES ( '117' ), +SUBPARTITION p_list_5_78 VALUES ( '118' ), +SUBPARTITION p_list_5_79 VALUES ( '119' ), +SUBPARTITION p_list_5_80 VALUES ( default ) + ), + PARTITION p_range_6 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; +create index on range_list(col_2) local; +explain (costs off, verbose off) select * from range_list where col_2 in (select col_1 from range_list where col_1 >10 and col_1<100) order by 1 limit 100; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: segment_subpartition_scan.range_list.col_1 + -> Nested Loop + -> HashAggregate + Group By Key: (segment_subpartition_scan.range_list.col_1)::text + -> Partition Iterator + Iterations: 6, Sub Iterations: 186 + -> Partitioned Seq Scan on range_list + Filter: (((col_1)::bigint > 10) AND ((col_1)::bigint < 100)) + Selected Partitions: 1..6 + Selected Subpartitions: ALL + -> Partition Iterator + Iterations: 6, Sub Iterations: 186 + -> Partitioned Index Scan using range_list_col_2_idx on range_list + Index Cond: ((col_2)::text = (segment_subpartition_scan.range_list.col_1)::text) + Selected Partitions: 1..6 + Selected Subpartitions: ALL +(18 rows) + +ALTER INDEX range_list_col_2_idx MODIFY PARTITION p_list_5_14_col_2_idx UNUSABLE; +explain (costs off, verbose off) select * from range_list where col_2 in (select col_1 from range_list where col_1 >10 and col_1<100) order by 1 limit 100; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: segment_subpartition_scan.range_list.col_1 + -> Nested Loop + -> HashAggregate + Group By Key: (segment_subpartition_scan.range_list.col_1)::text + -> Partition Iterator + Iterations: 6, Sub Iterations: 186 + -> Partitioned Seq Scan on range_list + Filter: (((col_1)::bigint > 10) AND ((col_1)::bigint < 100)) + Selected Partitions: 1..6 + Selected Subpartitions: ALL + -> Partition Iterator + Iterations: 6, Sub Iterations: 186 + -> Partitioned Seq Scan on range_list + Filter: ((segment_subpartition_scan.range_list.col_1)::text = (col_2)::text) + Selected Partitions: 1..6 + Selected Subpartitions: ALL +(18 rows) + +drop table range_list; +create table range_range_jade(jid int,jn int,name varchar2) WITH (SEGMENT=ON) partition by range (jid) subpartition by range(jn) +( + partition hrp1 values less than(16)( + subpartition hrp1_1 values less than(16), +subpartition hrp1_2 values less than(26), +subpartition hrp1_3 values less than(36), + subpartition hrp1_4 values less than(maxvalue)), + partition hrp2 values less than(26)( + subpartition hrp2_1 values less than(maxvalue)), + partition hrp3 values less than(36)( + subpartition hrp3_1 values less than(16), +subpartition hrp3_2 values less than(26), + subpartition hrp3_3 values less than(maxvalue)), + partition hrp4 values less than(maxvalue)( + subpartition hrp4_1 values less than(16), + subpartition hrp4_2 values less than(maxvalue)) +)ENABLE ROW MOVEMENT; +-- no errors +set enable_partition_opfusion = on; +insert into range_range_jade values(1,2,'jade'); +reset enable_partition_opfusion; +drop table range_range_jade; +drop table list_range_02; +ERROR: table "list_range_02" does not exist +CREATE TABLE IF NOT EXISTS list_range_02 +( + col_1 int , + col_2 int, +col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY list (col_1) SUBPARTITION BY range (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_1_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_1_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_1_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_2 VALUES(1,2,3,4,5,6,7,8,9,10 ), + PARTITION p_list_3 VALUES(11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_range_3_1 VALUES LESS THAN( 15 ), + SUBPARTITION p_range_3_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_4 VALUES(21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_range_4_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_4_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_4_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_4_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_4_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_5 VALUES(31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_range_5_1 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_6 VALUES(41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_range_6_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_6_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_6_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_6_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_6_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_7 VALUES(default) +) ENABLE ROW MOVEMENT; +create index index_01 on list_range_02(col_2) local ; +explain (costs off) select * from list_range_02 where col_2 in + (select col_1 from list_range_02 subpartition(p_list_2_subpartdefault1) + where col_1 >10 and col_1 <100) and col_1 +col_2 =50 and col_2 in (100,200,300 ); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Nested Loop Semi Join + Join Filter: (segment_subpartition_scan.list_range_02.col_2 = segment_subpartition_scan.list_range_02.col_1) + -> Partition Iterator + Iterations: 4, Sub Iterations: 4 + -> Partitioned Index Scan using index_01 on list_range_02 + Index Cond: ((col_2 = ANY ('{100,200,300}'::integer[])) AND (col_2 > 10) AND (col_2 < 100)) + Filter: ((col_1 + col_2) = 50) + Selected Partitions: 1,3,5..6 + Selected Subpartitions: 1:1, 3:1, 5:1, 6:1 + -> Materialize + -> Partition Iterator + Iterations: 1, Sub Iterations: 1 + -> Partitioned Seq Scan on list_range_02 + Filter: ((col_1 > 10) AND (col_1 < 100) AND (col_1 = ANY ('{100,200,300}'::integer[]))) + Selected Partitions: 6 + Selected Subpartitions: ALL +(16 rows) + +drop table list_range_02; +DROP SCHEMA segment_subpartition_scan CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/segment_subpartition_select.out b/src/test/regress/expected/segment_subpartition_select.out new file mode 100644 index 000000000..a63a1dfc6 --- /dev/null +++ b/src/test/regress/expected/segment_subpartition_select.out @@ -0,0 +1,1235 @@ +--prepare +DROP SCHEMA segment_subpartition_select CASCADE; +ERROR: schema "segment_subpartition_select" does not exist +CREATE SCHEMA segment_subpartition_select; +SET CURRENT_SCHEMA TO segment_subpartition_select; +--select +CREATE TABLE t1 +( + c1 int, + c2 int +) WITH (SEGMENT=ON); +insert into t1 values(generate_series(201901,201910), generate_series(1,10)); +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '3', '1', 1); +ERROR: inserted partition key does not map to any table partition +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '3', '1', 1); +ERROR: inserted partition key does not map to any table partition +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_list where user_no is not null order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_list where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_list where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_list where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_list partition (p_201901) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +select * from range_list partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_list where user_no is not null and dept_code <> '2' UNION ALL select * from range_list partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_list where user_no is not null and dept_code <> '2' UNION ALL select * from range_list partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201902', '2', '1', 1); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +insert into range_hash values('201903', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +select * from range_hash order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_hash where user_no is not null order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_hash where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_hash where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_hash where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_hash partition (p_201901) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +select * from range_hash partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_hash where user_no is not null and dept_code <> '2' UNION ALL select * from range_hash partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_hash where user_no is not null and dept_code <> '2' UNION ALL select * from range_hash partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +select * from range_range order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_range where user_no is not null order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_range where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_range where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_range where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_range partition (p_201901) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +select * from range_range partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_range where user_no is not null and dept_code <> '2' UNION ALL select * from range_range partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_range where user_no is not null and dept_code <> '2' UNION ALL select * from range_range partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +--view +create view view_temp as select * from range_list; +select * from view_temp; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +--error +select * from view_temp partition (p_201901); +ERROR: relation "view_temp" is not partitioned table +DETAIL: N/A. +select * from view_temp partition (p_201902); +ERROR: relation "view_temp" is not partitioned table +DETAIL: N/A. +drop view view_temp; +with tmp1 as (select * from range_list ) select * from tmp1 order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +with tmp1 as (select * from range_list partition (p_201901)) select * from tmp1 order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +--join normal table +select * from range_list left join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_list left join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_list right join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_list right join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_list full join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_list full join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_list inner join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_list inner join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_hash left join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_hash left join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_hash right join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_hash right join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_hash full join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_hash full join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_hash inner join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_hash inner join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_range left join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_range left join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_range right join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_range right join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_range full join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_range full join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_range inner join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_range inner join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +--join range_list and range_hash +select * from range_list left join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_list left join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_list right join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_list right join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_list full join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_list full join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_list inner join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_list inner join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +--join range_hash and range_range +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +--join range_hash and range_range +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +drop table list_range_02; +ERROR: table "list_range_02" does not exist +CREATE TABLE IF NOT EXISTS list_range_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY list (col_1) SUBPARTITION BY range (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_1_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_1_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_1_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_2 VALUES(1,2,3,4,5,6,7,8,9,10 ), + PARTITION p_list_3 VALUES(11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_range_3_1 VALUES LESS THAN( 15 ), + SUBPARTITION p_range_3_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_4 VALUES(21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_range_4_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_4_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_4_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_4_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_4_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_5 VALUES(31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_range_5_1 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_6 VALUES(41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_range_6_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_6_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_6_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_6_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_6_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_7 VALUES(default) +) ENABLE ROW MOVEMENT; +create index index_01 on list_range_02(col_2) local ; +INSERT INTO list_range_02 VALUES (GENERATE_SERIES(0, 19),GENERATE_SERIES(0, 1000),GENERATE_SERIES(0, 99)); + explain (costs off, verbose on) select * from list_range_02 where col_2 >500 and col_2 <8000 order by col_1; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Sort + Output: col_1, col_2, col_3, col_4 + Sort Key: list_range_02.col_1 + -> Partition Iterator + Output: col_1, col_2, col_3, col_4 + Iterations: 4, Sub Iterations: 4 + -> Partitioned Bitmap Heap Scan on segment_subpartition_select.list_range_02 + Output: col_1, col_2, col_3, col_4 + Recheck Cond: ((list_range_02.col_2 > 500) AND (list_range_02.col_2 < 8000)) + Selected Partitions: 1,3,5..6 + Selected Subpartitions: 1:1, 3:1, 5:1, 6:1 + -> Partitioned Bitmap Index Scan on index_01 + Index Cond: ((list_range_02.col_2 > 500) AND (list_range_02.col_2 < 8000)) + Selected Partitions: 1,3,5..6 + Selected Subpartitions: 1:1, 3:1, 5:1, 6:1 +(15 rows) + +drop index index_01; +drop table list_range_02; +create table pjade(jid int,jn int,name varchar2) WITH (SEGMENT=ON) partition by range(jid) subpartition by range(jn) +( + partition hrp1 values less than(16)( + subpartition hrp1_1 values less than(16), + subpartition hrp1_2 values less than(maxvalue)), + partition hrp2 values less than(maxvalue)( + subpartition hrp3_1 values less than(16), + subpartition hrp3_3 values less than(maxvalue)) +); +create table cjade(jid int,jn int,name varchar2) WITH (SEGMENT=ON); +insert into pjade values(6,8,'tom'),(8,18,'jerry'),(16,8,'jade'),(18,20,'jack'); +insert into cjade values(6,8,'tom'),(8,18,'jerry'),(16,8,'jade'),(18,20,'jack'); +select * from pjade subpartition(hrp1_1) union select * from cjade order by 1,2,3; + jid | jn | name +-----+----+------- + 6 | 8 | tom + 8 | 18 | jerry + 16 | 8 | jade + 18 | 20 | jack +(4 rows) + +select * from pjade subpartition(hrp1_1) p union select * from cjade order by 1,2,3; + jid | jn | name +-----+----+------- + 6 | 8 | tom + 8 | 18 | jerry + 16 | 8 | jade + 18 | 20 | jack +(4 rows) + +select * from pjade subpartition(hrp1_1) union select * from cjade order by 1,2,3; + jid | jn | name +-----+----+------- + 6 | 8 | tom + 8 | 18 | jerry + 16 | 8 | jade + 18 | 20 | jack +(4 rows) + +select * from pjade subpartition(hrp1_1) p union select * from cjade order by 1,2,3; + jid | jn | name +-----+----+------- + 6 | 8 | tom + 8 | 18 | jerry + 16 | 8 | jade + 18 | 20 | jack +(4 rows) + +drop table pjade; +drop table cjade; +DROP SCHEMA segment_subpartition_select CASCADE; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to table t1 +drop cascades to table range_list +drop cascades to table range_hash +drop cascades to table range_range +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/segment_subpartition_split.out b/src/test/regress/expected/segment_subpartition_split.out new file mode 100644 index 000000000..ee3452bb2 --- /dev/null +++ b/src/test/regress/expected/segment_subpartition_split.out @@ -0,0 +1,395 @@ +--prepare +DROP SCHEMA segment_subpartition_split CASCADE; +ERROR: schema "segment_subpartition_split" does not exist +CREATE SCHEMA segment_subpartition_split; +SET CURRENT_SCHEMA TO segment_subpartition_split; +--split subpartition +-- list subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '3', '1', 1); +select * from list_list order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 3 | 1 | 1 +(6 rows) + +select * from list_list subpartition (p_201901_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +select * from list_list subpartition (p_201901_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +alter table list_list split subpartition p_201901_b values (2) into +( + subpartition p_201901_b, + subpartition p_201901_c +); +select * from list_list subpartition (p_201901_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +select * from list_list subpartition (p_201901_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +select * from list_list subpartition (p_201901_c) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list partition (p_201901); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(3 rows) + +select * from list_list subpartition (p_201902_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +select * from list_list subpartition (p_201902_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 3 | 1 | 1 +(2 rows) + +alter table list_list split subpartition p_201902_b values (2, 3) into +( + subpartition p_201902_b, + subpartition p_201902_c +); +select * from list_list subpartition (p_201902_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +select * from list_list subpartition (p_201902_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 3 | 1 | 1 +(2 rows) + +select * from list_list subpartition (p_201902_c) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +--error +alter table list_list split subpartition p_201902_a values (3) into +( + subpartition p_201902_ab, + subpartition p_201902_ac +); +ERROR: Only the default boundary subpartition can be splited. +drop table list_list; +-- range subpartition +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '3', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '5', '1', 1); +select * from range_range order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(6 rows) + +select * from range_range subpartition (p_201901_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201901_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 +(2 rows) + +alter table range_range split subpartition p_201901_b at (3) into +( + subpartition p_201901_c, + subpartition p_201901_d +); +select * from range_range subpartition (p_201901_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201901_b) order by 1,2,3,4; +ERROR: subpartition "p_201901_b" of relation "range_range" does not exist +select * from range_range subpartition (p_201901_c) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201901_d) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 3 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201902_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201902_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(2 rows) + +alter table range_range split subpartition p_201902_b at (3) into +( + subpartition p_201902_c, + subpartition p_201902_d +); +select * from range_range subpartition (p_201902_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201902_b) order by 1,2,3,4; +ERROR: subpartition "p_201902_b" of relation "range_range" does not exist +select * from range_range subpartition (p_201902_c) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201902_d) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 5 | 1 | 1 +(1 row) + +drop table range_range; +--test syntax +CREATE TABLE IF NOT EXISTS list_hash +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY list (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10 ) + ( + SUBPARTITION p_hash_2_1 , + SUBPARTITION p_hash_2_2 , + SUBPARTITION p_hash_2_3 , + SUBPARTITION p_hash_2_4 , + SUBPARTITION p_hash_2_5 + ), + PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20), + PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30 ) + ( + SUBPARTITION p_hash_4_1 + ), + PARTITION p_list_5 VALUES (default) + ( + SUBPARTITION p_hash_5_1 + ), + PARTITION p_list_6 VALUES (31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_hash_6_1 , + SUBPARTITION p_hash_6_2 , + SUBPARTITION p_hash_6_3 + ) +) ENABLE ROW MOVEMENT ; +alter table list_hash split subPARTITION p_hash_2_3 at(-10) into ( subPARTITION add_p_01 , subPARTITION add_p_02 ); +ERROR: Hash subpartition does not support split. +DETAIL: N/A +drop table list_hash; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +alter table range_range split subpartition p_201901_b values (3) into +( + subpartition p_201901_c, + subpartition p_201901_d +) update global index; +ERROR: The syntax format of split subpartition is incorrect. +DETAIL: SPLIT SUBPARTITION NAME VALUES shouldn't be used, it's for list subpartitions. +drop table range_range; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +alter table list_list split subpartition p_201901_b at (2, 3) into +( + subpartition p_201901_b, + subpartition p_201901_c +); +ERROR: The syntax format of split subpartition is incorrect. +DETAIL: SPLIT SUBPARTITION NAME AT shouldn't be used, it's for range subpartitions. +drop table list_list; +CREATE TABLE IF NOT EXISTS list_list_02 +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY list (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( 0,-1,-2,-3,-4,-5,-6,-7,-8,-9 ), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_list_2 VALUES(0,1,2,3,4,5,6,7,8,9) + ( + SUBPARTITION p_list_2_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_2_2 VALUES ( default ), + SUBPARTITION p_list_2_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_2_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_2_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_3 VALUES(10,11,12,13,14,15,16,17,18,19) + ( + SUBPARTITION p_list_3_2 VALUES ( default ) + ), + PARTITION p_list_4 VALUES(default ), + PARTITION p_list_5 VALUES(20,21,22,23,24,25,26,27,28,29) + ( + SUBPARTITION p_list_5_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_5_2 VALUES ( default ), + SUBPARTITION p_list_5_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_5_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_5_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_6 VALUES(30,31,32,33,34,35,36,37,38,39), + PARTITION p_list_7 VALUES(40,41,42,43,44,45,46,47,48,49) + ( + SUBPARTITION p_list_7_1 VALUES ( default ) + ) +) ENABLE ROW MOVEMENT; +alter table list_list_02 split PARTITION for (5) at (8) into ( PARTITION add_p_01 , PARTITION add_p_02 ); +ERROR: Un-support feature +DETAIL: For subpartition table, split partition is not supported yet. +drop table list_list_02; +--clean +DROP SCHEMA segment_subpartition_split CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/segment_subpartition_truncate.out b/src/test/regress/expected/segment_subpartition_truncate.out new file mode 100644 index 000000000..76396c1ce --- /dev/null +++ b/src/test/regress/expected/segment_subpartition_truncate.out @@ -0,0 +1,140 @@ +--prepare +DROP SCHEMA segment_subpartition_truncate CASCADE; +ERROR: schema "segment_subpartition_truncate" does not exist +CREATE SCHEMA segment_subpartition_truncate; +SET CURRENT_SCHEMA TO segment_subpartition_truncate; +--truncate partition/subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(6 rows) + +select * from list_list partition (p_201901); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(3 rows) + +alter table list_list truncate partition p_201901; +select * from list_list partition (p_201901); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list partition (p_201902); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +alter table list_list truncate partition p_201902; +select * from list_list partition (p_201902); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list subpartition (p_201901_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +alter table list_list truncate subpartition p_201901_a; +select * from list_list subpartition (p_201901_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list subpartition (p_201901_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +alter table list_list truncate subpartition p_201901_b; +select * from list_list subpartition (p_201901_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list subpartition (p_201902_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +alter table list_list truncate subpartition p_201902_a; +select * from list_list subpartition (p_201902_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list subpartition (p_201902_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(2 rows) + +alter table list_list truncate subpartition p_201902_b; +select * from list_list subpartition (p_201902_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +drop table list_list; +DROP SCHEMA segment_subpartition_truncate CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/segment_subpartition_update.out b/src/test/regress/expected/segment_subpartition_update.out new file mode 100644 index 000000000..a21dbf612 --- /dev/null +++ b/src/test/regress/expected/segment_subpartition_update.out @@ -0,0 +1,225 @@ +--prepare +DROP SCHEMA segment_subpartition_update CASCADE; +ERROR: schema "segment_subpartition_update" does not exist +CREATE SCHEMA segment_subpartition_update; +SET CURRENT_SCHEMA TO segment_subpartition_update; +--update +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +)DISABLE ROW MOVEMENT; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +--error +update range_list set month_code = '201903'; +ERROR: fail to update partitioned table "range_list" +DETAIL: disable row movement +--error +update range_list set dept_code = '2'; +ERROR: fail to update partitioned table "range_list" +DETAIL: disable row movement +update range_list set user_no = '2'; +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 2 | 1 + 201902 | 1 | 2 | 1 + 201902 | 2 | 2 | 1 + 201903 | 1 | 2 | 1 + 201903 | 2 | 2 | 1 + 201903 | 2 | 2 | 1 +(6 rows) + +-- test for upsert and merge into, both should report error +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt=1; +CREATE TABLE temp_table +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON); +insert into temp_table values('201802', '1', '1', 1), ('201901', '2', '1', 1), ('201702', '1', '1', 1); +MERGE INTO range_list t1 +USING temp_table t2 +ON (t1.dept_code = t2.dept_code) +WHEN MATCHED THEN + UPDATE SET t1.month_code = t2.month_code WHERE t1.dept_code > 1 +WHEN NOT MATCHED THEN + INSERT VALUES (t2.month_code, t2.dept_code, t2.user_no, t2.sales_amt) WHERE t2.sales_amt = 1; +ERROR: fail to update partitioned table "range_list" +DETAIL: disable row movement +drop table temp_table; +drop table range_list; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +)ENABLE ROW MOVEMENT; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_list subpartition (p_201901_a) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +select * from range_list subpartition (p_201901_b) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +update range_list set dept_code = '2' where month_code = '201902'; +select * from range_list subpartition (p_201901_a) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from range_list subpartition (p_201901_b) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +select * from range_list partition (p_201901) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +select * from range_list partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +update range_list set month_code = '201903' where month_code = '201902'; +select * from range_list partition (p_201901) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from range_list partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +drop table range_list; +-- FOREIGN KEY +drop table tb_02; +ERROR: table "tb_02" does not exist +CREATE TABLE tb_02 +( + col_1 int PRIMARY KEY, + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (SEGMENT=ON); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "tb_02_pkey" for table "tb_02" +drop table range_range_02 cascade; +ERROR: table "range_range_02" does not exist +CREATE TABLE range_range_02 +( + col_1 int , + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int , +FOREIGN KEY(col_1) REFERENCES tb_02(col_1) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 80 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( MAXVALUE ) + ) +); +insert into tb_02 values(0,0,0,0); +insert into range_range_02 values(0,0,0,0); +update tb_02 set col_1=8 where col_2=0; +ERROR: update or delete on table "tb_02" violates foreign key constraint "range_range_02_col_1_fkey" on table "range_range_02" +DETAIL: Key (col_1)=(0) is still referenced from table "range_range_02". +drop table range_range_02 cascade; +drop table tb_02; +DROP SCHEMA segment_subpartition_update CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/select_where_func.out b/src/test/regress/expected/select_where_func.out new file mode 100644 index 000000000..263208e8d --- /dev/null +++ b/src/test/regress/expected/select_where_func.out @@ -0,0 +1,310 @@ +-- check compatibility -- +show sql_compatibility; -- expect A -- + sql_compatibility +------------------- + A +(1 row) + +drop schema if exists sch2; +NOTICE: schema "sch2" does not exist, skipping +create schema sch2; +set current_schema = sch2; +create table tbl_1 (a int, b int); +insert into tbl_1 values (1,1); +insert into tbl_1 values (2,2); +insert into tbl_1 values (3,3); +insert into tbl_1 values (4,4); +insert into tbl_1 values (5,5); +create or replace function func_1() returns integer AS $Summary$ +declare +begin +return 1; +end; +$Summary$ language plpgsql; +create or replace function func_1(a int) returns integer AS $Summary$ +declare +begin +return a; +end; +$Summary$ language plpgsql; +drop schema if exists sch1; +NOTICE: schema "sch1" does not exist, skipping +create schema sch1; +create or replace function sch1.func_1() returns integer AS $Summary$ +declare +begin +return 4; +end; +$Summary$ language plpgsql; +-- package +create or replace package aa +is +function func_2() return integer; +end aa; +/ +create or replace package body aa +is +function func_2 return integer +is +begin + return 3; +end; +end aa; +/ +select aa.func_2(); + func_2 +-------- + 3 +(1 row) + +create or replace package sch1.aa +is +function func_2() return integer; +end aa; +/ +create or replace package body sch1.aa +is +function func_2 return integer +is +begin + return 5; +end; +end aa; +/ +select sch1.aa.func_2(); + func_2 +-------- + 5 +(1 row) + +select * from tbl_1 where a = func_1(); + a | b +---+--- + 1 | 1 +(1 row) + +select * from tbl_1 where a = func_1(2); + a | b +---+--- + 2 | 2 +(1 row) + +select * from tbl_1 where a = func_1; + a | b +---+--- + 1 | 1 +(1 row) + +-- pkg.fun +select * from tbl_1 where a = aa.func_2(); + a | b +---+--- + 3 | 3 +(1 row) + +select * from tbl_1 where a = aa.func_2; + a | b +---+--- + 3 | 3 +(1 row) + +-- schma.fun +select * from tbl_1 where a = sch1.func_1(); + a | b +---+--- + 4 | 4 +(1 row) + +select * from tbl_1 where a = sch1.func_1; + a | b +---+--- + 4 | 4 +(1 row) + +-- pkg.schma.fun +select * from tbl_1 where a = sch1.aa.func_2(); + a | b +---+--- + 5 | 5 +(1 row) + +select * from tbl_1 where a = sch1.aa.func_2; + a | b +---+--- + 5 | 5 +(1 row) + +create or replace package a2 +as +v integer := 1; +function func_11() return integer; +end a2; +/ +create or replace package body a2 +is +function func_11 return integer +is + b integer; +begin + return 1; +end; +end a2; +/ +create or replace package a3 +is +function func_111() return integer; +end a3; +/ +create or replace package body a3 +is +function func_111 return integer +is + cur sys_refcursor; + temp integer; +begin + open cur for + select a from tbl_1 t where (t.a = a2.v or t.b = 3); + fetch cur into temp; + RAISE INFO '%' , temp; + fetch cur into temp; + RAISE INFO '%' , temp; + return 3; +end; +end a3; +/ +select a3.func_111(); +INFO: 1 +CONTEXT: referenced column: func_111 +INFO: 3 +CONTEXT: referenced column: func_111 + func_111 +---------- + 3 +(1 row) + +create table t1(c1 int); +insert into t1 values(1),(2),(3),(4),(5),(6); +create or replace package call_test as +function func1() return int; +function func2(c1 out int) return int; +function func3(c1 int default 3) return int; +procedure proc1(); +procedure proc2(c1 out int); +procedure proc3(c1 int default 3, c2 out int); +end call_test; +/ +create or replace package body call_test as +function func1() return int as +begin + return 1; +end; +function func2(c1 out int) return int as +begin + return 2; +end; +function func3(c1 int default 3) return int as +begin + return 3; +end; +procedure proc1() as +begin + raise info 'proc1'; +end; +procedure proc2(c1 out int) as +begin + c1 := 5; +end; +procedure proc3(c1 int default 3, c2 out int) as +begin + c2 := 6; +end; +end call_test; +/ +begin +call_test.proc1; +end; +/ +INFO: proc1 +CONTEXT: SQL statement "CALL call_test.proc1()" +PL/pgSQL function inline_code_block line 3 at PERFORM +select * from t1 where c1 = call_test.func1; + c1 +---- + 1 +(1 row) + +select * from t1 where c1 = call_test.func2; +ERROR: missing FROM-clause entry for table "call_test" +LINE 1: select * from t1 where c1 = call_test.func2; + ^ +select * from t1 where c1 = call_test.func3; +ERROR: missing FROM-clause entry for table "call_test" +LINE 1: select * from t1 where c1 = call_test.func3; + ^ +select * from t1 where c1 = call_test.proc1; +ERROR: operator does not exist: integer = void +LINE 1: select * from t1 where c1 = call_test.proc1; + ^ +HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. +select * from t1 where c1 = call_test.proc2; +ERROR: missing FROM-clause entry for table "call_test" +LINE 1: select * from t1 where c1 = call_test.proc2; + ^ +select * from t1 where c1 = call_test.proc3; +ERROR: missing FROM-clause entry for table "call_test" +LINE 1: select * from t1 where c1 = call_test.proc3; + ^ +declare +var int; +begin +call_test.func2; +end; +/ +ERROR: function call_test.func2 has no enough parameters +CONTEXT: compilation of PL/pgSQL function "inline_code_block" near line 2 +declare +var int; +begin +select c1 into var from t1 where c1 = call_test.func1; +raise info 'var is %',var; +select c1 into var from t1 where c1 = call_test.func2; +raise info 'var is %',var; +select c1 into var from t1 where c1 = call_test.func3; +raise info 'var is %',var; +select c1 into var from t1 where c1 = call_test.proc2; +raise info 'var is %',var; +select c1 into var from t1 where c1 = call_test.proc3; +raise info 'var is %',var; +--call call_test.proc1; +end; +/ +INFO: var is 1 +ERROR: missing FROM-clause entry for table "call_test" +LINE 1: select c1 from t1 where c1 = call_test.func2 + ^ +QUERY: select c1 from t1 where c1 = call_test.func2 +CONTEXT: PL/pgSQL function inline_code_block line 6 at SQL statement +drop package call_test; +NOTICE: drop cascades to 6 other objects +DETAIL: drop cascades to function sch2.func1() +drop cascades to function sch2.func2() +drop cascades to function sch2.func3(integer) +drop cascades to function sch2.proc1() +drop cascades to function sch2.proc2() +drop cascades to function sch2.proc3(integer) +drop package sch1.aa; +NOTICE: drop cascades to function sch1.func_2() +drop package aa; +NOTICE: drop cascades to function sch2.func_2() +drop package a2; +NOTICE: drop cascades to function sch2.func_11() +drop package a3; +NOTICE: drop cascades to function sch2.func_111() +drop table tbl_1; +drop function func_1; +drop schema if exists sch1 cascade; +NOTICE: drop cascades to function sch1.func_1() +drop schema if exists sch2 cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function func_1(integer) +drop cascades to table t1 diff --git a/src/test/regress/expected/single_node_enum.out b/src/test/regress/expected/single_node_enum.out index 5a2e6ec81..385586d9c 100644 --- a/src/test/regress/expected/single_node_enum.out +++ b/src/test/regress/expected/single_node_enum.out @@ -362,6 +362,7 @@ DROP INDEX enumtest_btree; -- Hash index / opclass with the = operator -- CREATE INDEX enumtest_hash ON enumtest USING hash (col); +ERROR: access method "hash" does not support row store SELECT * FROM enumtest WHERE col = 'orange'; col -------- @@ -369,6 +370,7 @@ SELECT * FROM enumtest WHERE col = 'orange'; (1 row) DROP INDEX enumtest_hash; +ERROR: index "enumtest_hash" does not exist -- -- End index tests -- diff --git a/src/test/regress/expected/single_node_function_commit_rollback.out b/src/test/regress/expected/single_node_function_commit_rollback.out new file mode 100644 index 000000000..99e49cb03 --- /dev/null +++ b/src/test/regress/expected/single_node_function_commit_rollback.out @@ -0,0 +1,1487 @@ +create or replace function test_without_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 2; +end; +/ +call test_without_commit(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_without_commit() line 3 at SQL statement + test_without_commit +--------------------- + +(1 row) + +select * from test_commit; + a +--- + 2 +(1 row) + +create or replace function test_empty_sp() return void +as +begin + insert into test_commit select 1; + insert into test_commit select 2; + insert into test_commit select 3; +end; +/ +call test_empty_sp(); + test_empty_sp +--------------- + +(1 row) + +select * from test_commit; + a +--- + 2 + 1 + 2 + 3 +(4 rows) + +drop table test_commit; +create or replace function test_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + commit; +end; +/ +call test_commit(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit() line 3 at SQL statement + test_commit +------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 1 | 1 +(1 row) + +drop table test_commit; +create or replace function test_commit_insert_option() return void +shippable +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + commit; +end; +/ +call test_commit_insert_option(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_option() line 3 at SQL statement + test_commit_insert_option +--------------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 1 | 1 +(1 row) + +drop table test_commit; +create or replace function test_commit_insert_delete() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + commit; +end; +/ +call test_commit_insert_delete(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_delete() line 3 at SQL statement + test_commit_insert_delete +--------------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 2 | 2 +(1 row) + +drop table test_commit; +create or replace function test_commit_insert_update() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + update test_commit set b = 3 where a = 1; + commit; +end; +/ +call test_commit_insert_update(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_update() line 3 at SQL statement + test_commit_insert_update +--------------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 1 | 3 +(1 row) + +drop table test_commit; +create or replace function test_commit_insert_update_delete() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + update test_commit set b = 3 where a = 1; + delete from test_commit where a = 1; + commit; +end; +/ +call test_commit_insert_update_delete(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_update_delete() line 3 at SQL statement + test_commit_insert_update_delete +---------------------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- +(0 rows) + +drop table test_commit; +create or replace function test_commit_insert_delete_update() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + update test_commit set b = 3 where a = 2; + commit; +end; +/ +call test_commit_insert_delete_update(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_delete_update() line 3 at SQL statement + test_commit_insert_delete_update +---------------------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 2 | 3 +(1 row) + +drop table test_commit; +create or replace function test_commit_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + commit; + insert into test_commit select 2, 2; + commit; +end; +/ +call test_commit_commit(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_commit() line 3 at SQL statement + test_commit_commit +-------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 1 | 1 + 2 | 2 +(2 rows) + +drop table test_commit; +create or replace function test_commit_commit1() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + commit; + update test_commit set b = 3 where a = 2; + delete from test_commit where a = 1; + commit; +end; +/ +call test_commit_commit1(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_commit1() line 3 at SQL statement + test_commit_commit1 +--------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 2 | 3 +(1 row) + +drop table test_commit; +create or replace function test_commit_rollback() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + commit; + insert into test_commit select 2, 2; + rollback; +end; +/ +call test_commit_rollback(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_rollback() line 3 at SQL statement + test_commit_rollback +---------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 1 | 1 +(1 row) + +drop table test_commit; +create or replace function test_commit_rollback1() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + commit; + update test_commit set b = 3 where a = 2; + delete from test_commit where a = 1; + rollback; +end; +/ +call test_commit_rollback1(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_rollback1() line 3 at SQL statement + test_commit_rollback1 +----------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 1 | 1 + 2 | 2 +(2 rows) + +drop table test_commit; +create or replace function test_rollback_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + rollback; + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 3, 3; + insert into test_commit select 4, 4; + insert into test_commit select 5, 5; + update test_commit set b = 6 where a = 5; + delete from test_commit where a = 3; + commit; +end; +/ +call test_rollback_commit(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_rollback_commit() line 3 at SQL statement +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_rollback_commit() line 8 at SQL statement + test_rollback_commit +---------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 4 | 4 + 5 | 6 +(2 rows) + +drop table test_commit; +create or replace function test_commit_insert_exception_rollback() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + commit; + raise exception 'raise exception after commit'; +exception + when others then + insert into test_commit select 2, 2; + rollback; +end; +/ +call test_commit_insert_exception_rollback(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_exception_rollback() line 3 at SQL statement + test_commit_insert_exception_rollback +--------------------------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 1 | 1 +(1 row) + +drop table test_commit; +create or replace function test_commit_insert_exception_commit_rollback() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + commit; + raise exception 'raise exception after commit'; +exception + when others then + insert into test_commit select 2, 2; + commit; + rollback; +end; +/ +call test_commit_insert_exception_commit_rollback(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_exception_commit_rollback() line 3 at SQL statement + test_commit_insert_exception_commit_rollback +---------------------------------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 1 | 1 + 2 | 2 +(2 rows) + +drop table test_commit; +create or replace function test_commit_insert_raise_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + commit; + RAISE EXCEPTION 'After commit'; +end; +/ +call test_commit_insert_raise_commit(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_raise_commit() line 3 at SQL statement +ERROR: After commit +select * from test_commit; + a | b +---+--- + 1 | 1 +(1 row) + +drop table test_commit; +create or replace function test_commit_insert_delete_raise_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + commit; + RAISE EXCEPTION 'After commit'; +end; +/ +call test_commit_insert_delete_raise_commit(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_delete_raise_commit() line 3 at SQL statement +ERROR: After commit +select * from test_commit; + a | b +---+--- + 2 | 2 +(1 row) + +drop table test_commit; +create or replace function test_commit_insert_update_raise_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + update test_commit set b = 3 where a = 1; + commit; + RAISE EXCEPTION 'After commit'; +end; +/ +call test_commit_insert_update_raise_commit(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_update_raise_commit() line 3 at SQL statement +ERROR: After commit +select * from test_commit; + a | b +---+--- + 1 | 3 +(1 row) + +drop table test_commit; +create or replace function test_commit_insert_update_delete_raise_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + update test_commit set b = 3 where a = 1; + delete from test_commit where a = 1; + commit; + RAISE EXCEPTION 'After commit'; +end; +/ +call test_commit_insert_update_delete_raise_commit(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_update_delete_raise_commit() line 3 at SQL statement +ERROR: After commit +select * from test_commit; + a | b +---+--- +(0 rows) + +drop table test_commit; +create or replace function test_commit_insert_delete_update_raise_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + update test_commit set b = 3 where a = 2; + commit; + RAISE EXCEPTION 'After commit'; +end; +/ +call test_commit_insert_delete_update_raise_commit(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_delete_update_raise_commit() line 3 at SQL statement +ERROR: After commit +select * from test_commit; + a | b +---+--- + 2 | 3 +(1 row) + +drop table test_commit; +create or replace function test_commit_insert_commit_raise() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + RAISE EXCEPTION 'Before commit'; + commit; +end; +/ +call test_commit_insert_commit_raise(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_commit_raise() line 3 at SQL statement +ERROR: Before commit +select * from test_commit; +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: select * from test_commit; + ^ +drop table test_commit; +ERROR: table "test_commit" does not exist +create or replace function test_commit_insert_delete_commit_raise() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + RAISE EXCEPTION 'Before commit'; + commit; +end; +/ +call test_commit_insert_delete_commit_raise(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_delete_commit_raise() line 3 at SQL statement +ERROR: Before commit +select * from test_commit; +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: select * from test_commit; + ^ +drop table test_commit; +ERROR: table "test_commit" does not exist +create or replace function test_commit_insert_update_commit_raise() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + update test_commit set b = 3 where a = 1; + RAISE EXCEPTION 'Before commit'; + commit; +end; +/ +call test_commit_insert_update_commit_raise(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_update_commit_raise() line 3 at SQL statement +ERROR: Before commit +select * from test_commit; +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: select * from test_commit; + ^ +drop table test_commit; +ERROR: table "test_commit" does not exist +create or replace function test_commit_insert_update_delete_commit_raise() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + update test_commit set b = 3 where a = 1; + delete from test_commit where a = 1; + RAISE EXCEPTION 'Before commit'; + commit; +end; +/ +call test_commit_insert_update_delete_commit_raise(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_update_delete_commit_raise() line 3 at SQL statement +ERROR: Before commit +select * from test_commit; +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: select * from test_commit; + ^ +drop table test_commit; +ERROR: table "test_commit" does not exist +create or replace function test_commit_insert_delete_update_commit_raise() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + update test_commit set b = 3 where a = 2; + RAISE EXCEPTION 'Before commit'; + commit; +end; +/ +call test_commit_insert_delete_update_commit_raise(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_insert_delete_update_commit_raise() line 3 at SQL statement +ERROR: Before commit +select * from test_commit; +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: select * from test_commit; + ^ +drop table test_commit; +ERROR: table "test_commit" does not exist +create or replace function test_exception_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + update test_commit set b = 3 where a = 2; + commit; +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ +call test_exception_commit(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_exception_commit() line 3 at SQL statement + test_exception_commit +----------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 2 | 3 +(1 row) + +drop table test_commit; +create or replace function test_exception_commit_commit_raise() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + update test_commit set b = 3 where a = 2; + commit; + RAISE EXCEPTION 'After commit'; +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ +call test_exception_commit_commit_raise(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_exception_commit_commit_raise() line 3 at SQL statement +ERROR: (After commit) +select * from test_commit; + a | b +---+--- + 2 | 3 +(1 row) + +drop table test_commit; +create or replace function test_exception_commit_raise_commit() return void +as +begin +drop table if exists test_commit; +create table test_commit(a int, b int); +insert into test_commit select 1, 1; +insert into test_commit select 2, 2; +delete from test_commit where a = 1; +update test_commit set b = 3 where a = 2; +RAISE EXCEPTION 'After commit'; +commit; +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ +call test_exception_commit_raise_commit(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_exception_commit_raise_commit() line 3 at SQL statement +ERROR: (After commit) +select * from test_commit; +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: select * from test_commit; + ^ +drop table test_commit; +ERROR: table "test_commit" does not exist +create or replace function test_gg_1() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + update test_commit set b = 3 where a = 2; + commit; + insert into test_commit select 3, 3; + RAISE EXCEPTION 'After commit'; +EXCEPTION + when raise_exception then + rollback; + insert into test_commit select 4, 4; + commit; +end; +/ +call test_gg_1(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_gg_1() line 3 at SQL statement + test_gg_1 +----------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 2 | 3 + 4 | 4 +(2 rows) + +drop table test_commit; +create or replace function test_commit_exception() return void +is +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 1; + commit; + delete from test_commit; + commit; + update test_commit set a=3; + commit; +exception + WHEN OTHERS THEN + insert into test_commit select 2; + commit; +end; +/ +call test_commit_exception(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_exception() line 3 at SQL statement + test_commit_exception +----------------------- + +(1 row) + +select * from test_commit; + a +--- +(0 rows) + +drop table test_commit; +create or replace function test_commit2() return void +is +begin + drop table if exists test_commit; + create table test_commit(a int); + FOR i IN REVERSE 3..0 LOOP + insert into test_commit select i; + commit; + END LOOP; + FOR i IN REVERSE 2..4 LOOP + update test_commit set a=i; + commit; + END LOOP; +exception +WHEN OTHERS THEN +-- FOR i IN REVERSE 200...101 LOOP + insert into test_commit select 4; +-- END LOOP; + commit; +end; +/ +call test_commit2(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit2() line 3 at SQL statement + test_commit2 +-------------- + +(1 row) + +select * from test_commit; + a +--- + 3 + 2 + 1 + 0 +(4 rows) + +drop table test_commit; +create or replace function test_commit3() return void +is +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 1; + commit; + call test_commit2(); + update test_commit set a=2; + commit; +exception +WHEN OTHERS THEN + insert into test_commit select 3; + commit; +end; +/ +call test_commit3(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit3() line 3 at SQL statement + test_commit3 +-------------- + +(1 row) + +select * from test_commit; + a +--- + 3 + 2 + 1 + 0 + 3 +(5 rows) + +drop table test_commit; +create or replace function test_rollback_with_exception() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 1; + rollback; +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ +call test_rollback_with_exception(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_rollback_with_exception() line 3 at SQL statement + test_rollback_with_exception +------------------------------ + +(1 row) + +select * from test_commit; +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: select * from test_commit; + ^ +drop table test_commit; +ERROR: table "test_commit" does not exist +create or replace function test_nest_function_without_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 3; + commit; + test_without_commit(); +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ +call test_nest_function_without_commit(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_nest_function_without_commit() line 3 at SQL statement + test_nest_function_without_commit +----------------------------------- + +(1 row) + +select * from test_commit; + a +--- + 2 +(1 row) + +drop table test_commit; +create or replace function test_nest_function() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 3; + commit; + test_commit(); +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ +call test_nest_function(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_nest_function() line 3 at SQL statement + test_nest_function +-------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 1 | 1 +(1 row) + +drop table test_commit; +create or replace function test_nest_function1() return void +as +begin + test_commit(); +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ +call test_nest_function1(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit() line 3 at SQL statement +SQL statement "CALL test_commit()" +PL/pgSQL function test_nest_function1() line 3 at PERFORM + test_nest_function1 +--------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 1 | 1 +(1 row) + +drop table test_commit; +create or replace function test_nest_function2() return void +as +begin + test_commit(); +end; +/ +call test_nest_function2(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit() line 3 at SQL statement +SQL statement "CALL test_commit()" +PL/pgSQL function test_nest_function2() line 3 at PERFORM + test_nest_function2 +--------------------- + +(1 row) + +select * from test_commit; + a | b +---+--- + 1 | 1 +(1 row) + +drop table test_commit; +create or replace function test_nest_function_rollback() return void +as +begin + test_without_commit(); + rollback; +end; +/ +call test_nest_function_rollback(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_without_commit() line 3 at SQL statement +SQL statement "CALL test_without_commit()" +PL/pgSQL function test_nest_function_rollback() line 3 at PERFORM + test_nest_function_rollback +----------------------------- + +(1 row) + +select * from test_commit; +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: select * from test_commit; + ^ +drop table test_commit; +ERROR: table "test_commit" does not exist +create or replace function test_nest_function_select() return void +as +begin + insert into tx select 3; + commit; + select test_commit(); +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ +call test_nest_function_select(); +ERROR: relation "tx" does not exist on datanode1 +LINE 1: insert into tx select 3 + ^ +QUERY: insert into tx select 3 +CONTEXT: PL/pgSQL function test_nest_function_select() line 3 at SQL statement +select * from test_commit; +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: select * from test_commit; + ^ +drop table test_commit; +ERROR: table "test_commit" does not exist +create or replace function test_nest_function_calll() return void +as +begin + insert into tx select 3; + commit; + call test_commit(); +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ +call test_nest_function_calll(); +ERROR: relation "tx" does not exist on datanode1 +LINE 1: insert into tx select 3 + ^ +QUERY: insert into tx select 3 +CONTEXT: PL/pgSQL function test_nest_function_calll() line 3 at SQL statement +select * from test_commit; +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: select * from test_commit; + ^ +drop table test_commit; +ERROR: table "test_commit" does not exist +create or replace function test_commit_exception_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 1; + raise exception 'Exception rollback'; + insert into test_commit select 2; +EXCEPTION + when raise_exception then + insert into test_commit select 3; + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ +call test_commit_exception_commit(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_exception_commit() line 3 at SQL statement +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: insert into test_commit select 3 + ^ +QUERY: insert into test_commit select 3 +CONTEXT: PL/pgSQL function test_commit_exception_commit() line 10 at SQL statement +select * from test_commit; +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: select * from test_commit; + ^ +drop table test_commit; +ERROR: table "test_commit" does not exist +create or replace function test_commit_exception_commit_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 1; + raise exception 'Exception rollback'; + insert into test_commit select 2; +EXCEPTION + when raise_exception then + insert into test_commit select 3; + commit; + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ +call test_commit_exception_commit_commit(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_exception_commit_commit() line 3 at SQL statement +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: insert into test_commit select 3 + ^ +QUERY: insert into test_commit select 3 +CONTEXT: PL/pgSQL function test_commit_exception_commit_commit() line 10 at SQL statement +select * from test_commit; +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: select * from test_commit; + ^ +drop table test_commit; +ERROR: table "test_commit" does not exist +create or replace function test_commit_exception_commit_rollback() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 1; + raise exception 'Exception rollback'; + insert into test_commit select 2; +EXCEPTION + when raise_exception then + insert into test_commit select 3; + rollback; + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ +call test_commit_exception_commit_rollback(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_commit_exception_commit_rollback() line 3 at SQL statement +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: insert into test_commit select 3 + ^ +QUERY: insert into test_commit select 3 +CONTEXT: PL/pgSQL function test_commit_exception_commit_rollback() line 10 at SQL statement +select * from test_commit; +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: select * from test_commit; + ^ +drop table test_commit; +ERROR: table "test_commit" does not exist +create or replace function test_rollback return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 1; + rollback; + insert into test_commit select 2; +end; +/ +call test_rollback(); +NOTICE: table "test_commit" does not exist, skipping +CONTEXT: SQL statement "drop table if exists test_commit" +PL/pgSQL function test_rollback() line 3 at SQL statement +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: insert into test_commit select 2 + ^ +QUERY: insert into test_commit select 2 +CONTEXT: PL/pgSQL function test_rollback() line 7 at SQL statement +select * from test_commit; +ERROR: relation "test_commit" does not exist on datanode1 +LINE 1: select * from test_commit; + ^ +drop table test_commit; +ERROR: table "test_commit" does not exist +create or replace function test_commit_inout(p inout int) return void +as +declare +begin + p = 3; + commit; + --DBE_OUTPUT.print_line('Cursor status:' + p); +end; +/ +select test_commit_inout(1); + test_commit_inout +------------------- + +(1 row) + +create or replace function test_rollback_inout(p inout int) return void +as +declare +begin + p = 3; + rollback; + --DBE_OUTPUT.print_line('Cursor status:' + p); +end; +/ +select test_rollback_inout(1); + test_rollback_inout +--------------------- + +(1 row) + +create or replace function test_rollback_out(p out int) return void +as +declare +begin + p = 3; + rollback; + --DBE_OUTPUT.print_line('Cursor status:' + p); +end; +/ +select test_rollback_out(); + test_rollback_out +------------------- + +(1 row) + +create or replace function test_rollback1() return void +as +declare +begin + create table test1(col1 int); + insert into test1 values(1); + rollback; +end; +/ +call test_rollback1(); + test_rollback1 +---------------- + +(1 row) + +create type func_type_04 as ( v_tablefield character varying, v_tablefield2 character varying, v_tablename character varying, v_cur refcursor); +create table test_cursor_table(c1 int,c2 varchar); +insert into test_cursor_table values(1,'Jack'),(2,'Rose'); +CREATE or replace function func_base13_03(v_tablefield character varying, v_tablefield2 character varying,v_tablename character varying) return refcursor +AS +v_cur refcursor; +begin + open v_cur for + 'select '||v_tablefield||' as tablecode, '||v_tablefield2||' as tablename from '||v_tablename|| ' order by 1,2;'; + return v_cur; +end; +/ +CREATE or replace function func_base13_04(v_tablefield character varying, v_tablefield2 character varying, v_tablename character varying) return void +AS + v_record func_type_04; + v_cur refcursor; + num int; +begin + num := 0; + v_cur := func_base13_03(v_tablefield, v_tablefield2, v_tablename); + loop + fetch v_cur into v_record; + num := num+1; + raise notice 'the num is %(%)', num,v_record; + EXIT WHEN v_cur%notfound; + end loop; +end; +/ +call func_base13_04('c1','c2','test_cursor_table'); +NOTICE: the num is 1((1,Jack,,)) +NOTICE: the num is 2((2,Rose,,)) +NOTICE: the num is 3((2,Rose,,)) + func_base13_04 +---------------- + +(1 row) + +CREATE or replace function func_base13_05(v_tablefield character varying, v_tablefield2 character varying,v_tablename character varying) return refcursor +AS +v_cur refcursor; +begin + open v_cur for + 'select '||v_tablefield||' as tablecode, '||v_tablefield2||' as tablename from '||v_tablename|| ' order by 1,2;'; + commit; + return v_cur; +end; +/ +CREATE or replace function func_base13_06(v_tablefield character varying, v_tablefield2 character varying, v_tablename character varying) return void +AS + v_record func_type_04; + v_cur refcursor; +begin + select func_base13_05(v_tablefield, v_tablefield2, v_tablename) into v_cur; + loop + fetch v_cur into v_record; + raise notice '(%)', v_record; + EXIT WHEN v_cur%notfound; + end loop; +end; +/ +call func_base13_06('c1','c2','test_cursor_table'); +NOTICE: ((1,Jack,,)) +NOTICE: ((2,Rose,,)) +NOTICE: ((2,Rose,,)) + func_base13_06 +---------------- + +(1 row) + +CREATE or replace function func_base13_07(v_tablefield character varying, v_tablefield2 character varying,v_tablename character varying) return refcursor +AS +v_cur refcursor; +begin + open v_cur for + 'select '||v_tablefield||' as tablecode, '||v_tablefield2||' as tablename from '||v_tablename|| ' order by 1,2;'; + commit; + return v_cur; +end; +/ +CREATE or replace function func_base13_08(v_tablefield character varying, v_tablefield2 character varying, v_tablename character varying) return void +AS + v_record func_type_04; + v_cur refcursor; +begin + select func_base13_07(v_tablefield, v_tablefield2, v_tablename) into v_cur; + + loop + fetch v_cur into v_record; + raise notice 'before commit(%)', v_record; + commit; + raise notice 'after commit(%)', v_record; + EXIT WHEN v_cur%notfound; + end loop; + return; +end; +/ +call func_base13_08('c1','c2','test_cursor_table'); +NOTICE: before commit((1,Jack,,)) +NOTICE: after commit((1,Jack,,)) +NOTICE: before commit((2,Rose,,)) +NOTICE: after commit((2,Rose,,)) +NOTICE: before commit((2,Rose,,)) +NOTICE: after commit((2,Rose,,)) + func_base13_08 +---------------- + +(1 row) + +select * from test_cursor_table; + c1 | c2 +----+------ + 1 | Jack + 2 | Rose +(2 rows) + +drop table if exists test_cursor_table; +CREATE TABLE EXAMPLE1(COL1 INT); +CREATE OR REPLACE FUNCTION FUNCTION_EXAMPLE1 RETURN INT +AS +BEGIN + FOR i IN 0..20 LOOP + INSERT INTO EXAMPLE1 VALUES(i); + IF mod(i,2) = 0 THEN + COMMIT; + ELSE + ROLLBACK; + END IF; + END LOOP; + RETURN 1; +END; +/ +select FUNCTION_EXAMPLE1(); + function_example1 +------------------- + 1 +(1 row) + +select * from FUNCTION_EXAMPLE1() where 1=1; +ERROR: can not use commit rollback in Complex SQL +CONTEXT: PL/pgSQL function function_example1() line 6 at COMMIT +update EXAMPLE1 set COL1=666 where COL1=2 and FUNCTION_EXAMPLE1(); +ERROR: can not use commit rollback in Complex SQL +CONTEXT: PL/pgSQL function function_example1() line 6 at COMMIT +select (select FUNCTION_EXAMPLE1()); + function_example1 +------------------- + 1 +(1 row) + +select (select * from FUNCTION_EXAMPLE1() where 1=1); +ERROR: can not use commit rollback in Complex SQL +CONTEXT: PL/pgSQL function function_example1() line 6 at COMMIT +referenced column: function_example1 +create or replace function func1() return void +as +declare +a int; +begin +a := 1/0; +exception + WHEN division_by_zero THEN + raise notice '% % %',sqlstate,SQLCODE,sqlerrm; +end; +/ +call func1(); +NOTICE: 22012 22012 division by zero + func1 +------- + +(1 row) + +drop function func1; +drop table if exists EXAMPLE1; +drop function FUNCTION_EXAMPLE1; +drop function test_without_commit; +drop function test_empty_sp; +drop function test_commit; +drop function test_commit_insert_option; +drop function test_commit_insert_delete; +drop function test_commit_insert_update; +drop function test_commit_insert_update_delete; +drop function test_commit_insert_delete_update; +drop function test_commit_commit; +drop function test_commit_commit1; +drop function test_commit_rollback; +drop function test_commit_rollback1; +drop function test_rollback_commit; +drop function test_commit_insert_exception_rollback; +drop function test_commit_insert_exception_commit_rollback; +drop function test_commit_insert_raise_commit; +drop function test_commit_insert_delete_raise_commit; +drop function test_commit_insert_update_raise_commit; +drop function test_commit_insert_update_delete_raise_commit; +drop function test_commit_insert_delete_update_raise_commit; +drop function test_commit_insert_commit_raise; +drop function test_commit_insert_delete_commit_raise; +drop function test_commit_insert_update_commit_raise; +drop function test_commit_insert_update_delete_commit_raise; +drop function test_commit_insert_delete_update_commit_raise; +drop function test_exception_commit; +drop function test_exception_commit_commit_raise; +drop function test_exception_commit_raise_commit; +drop function test_gg_1; +drop function test_commit_exception; +drop function test_commit2; +drop function test_commit3; +drop function test_rollback_with_exception; +drop function test_nest_function_without_commit; +drop function test_nest_function; +drop function test_nest_function1; +drop function test_nest_function2; +drop function test_nest_function_rollback; +drop function test_nest_function_select; +drop function test_nest_function_calll; +drop function test_commit_exception_commit; +drop function test_commit_exception_commit_commit; +drop function test_commit_exception_commit_rollback; +drop function test_rollback; +drop function test_commit_inout; +drop function test_rollback_inout; +drop function test_rollback_out; +drop function test_rollback1; +drop function func_base13_03; +drop function func_base13_04; +drop function func_base13_05; +drop function func_base13_06; +drop function func_base13_07; +drop function func_base13_08; diff --git a/src/test/regress/expected/single_node_insert.out b/src/test/regress/expected/single_node_insert.out index 6d772c5b2..f90a32f33 100644 --- a/src/test/regress/expected/single_node_insert.out +++ b/src/test/regress/expected/single_node_insert.out @@ -81,35 +81,35 @@ select col1, col2, char_length(col3) from inserttest; (8 rows) drop table inserttest; -create table s1_DTS2019021501700(id int, num int); -create sequence ss1_DTS2019021501700; -select setval('ss1_DTS2019021501700', 10); +create table s1_TESTTABLE(id int, num int); +create sequence ss1_TESTTABLE; +select setval('ss1_TESTTABLE', 10); setval -------- 10 (1 row) -select * from ss1_DTS2019021501700; - sequence_name | last_value | start_value | increment_by | max_value | min_value | cache_value | log_cnt | is_cycled | is_called | uuid -----------------------+------------+-------------+--------------+---------------------+-----------+-------------+---------+-----------+-----------+------ - ss1_dts2019021501700 | 10 | 1 | 1 | 9223372036854775807 | 1 | 1 | 0 | f | t | 0 +select * from ss1_TESTTABLE; + sequence_name | last_value | start_value | increment_by | max_value | min_value | cache_value | log_cnt | is_cycled | is_called | uuid +---------------+------------+-------------+--------------+---------------------+-----------+-------------+---------+-----------+-----------+------ + ss1_testtable | 10 | 1 | 1 | 9223372036854775807 | 1 | 1 | 0 | f | t | 0 (1 row) -alter table s1_DTS2019021501700 alter column id set default nextval('ss1_DTS2019021501700'); -insert into s1_DTS2019021501700 (num) values (11); -insert into s1_DTS2019021501700 (num) values (12); -select * from s1_DTS2019021501700 order by id; +alter table s1_TESTTABLE alter column id set default nextval('ss1_TESTTABLE'); +insert into s1_TESTTABLE (num) values (11); +insert into s1_TESTTABLE (num) values (12); +select * from s1_TESTTABLE order by id; id | num ----+----- 11 | 11 12 | 12 (2 rows) -select * from ss1_DTS2019021501700; - sequence_name | last_value | start_value | increment_by | max_value | min_value | cache_value | log_cnt | is_cycled | is_called | uuid -----------------------+------------+-------------+--------------+---------------------+-----------+-------------+---------+-----------+-----------+------ - ss1_dts2019021501700 | 12 | 1 | 1 | 9223372036854775807 | 1 | 1 | 31 | f | t | 0 +select * from ss1_TESTTABLE; + sequence_name | last_value | start_value | increment_by | max_value | min_value | cache_value | log_cnt | is_cycled | is_called | uuid +---------------+------------+-------------+--------------+---------------------+-----------+-------------+---------+-----------+-----------+------ + ss1_testtable | 12 | 1 | 1 | 9223372036854775807 | 1 | 1 | 31 | f | t | 0 (1 row) -drop table s1_DTS2019021501700; -drop sequence ss1_DTS2019021501700; +drop table s1_TESTTABLE; +drop sequence ss1_TESTTABLE; diff --git a/src/test/regress/expected/single_node_macaddr.out b/src/test/regress/expected/single_node_macaddr.out index 28fc8f55d..3b355edd0 100644 --- a/src/test/regress/expected/single_node_macaddr.out +++ b/src/test/regress/expected/single_node_macaddr.out @@ -41,6 +41,7 @@ SELECT * FROM macaddr_data; CREATE INDEX macaddr_data_btree ON macaddr_data USING btree (b); CREATE INDEX macaddr_data_hash ON macaddr_data USING hash (b); +ERROR: access method "hash" does not support row store SELECT a, b, trunc(b) FROM macaddr_data ORDER BY 2, 1; a | b | trunc ----+-------------------+------------------- diff --git a/src/test/regress/expected/single_node_mergeinto.out b/src/test/regress/expected/single_node_mergeinto.out index 16a79c192..86bae4c5a 100644 --- a/src/test/regress/expected/single_node_mergeinto.out +++ b/src/test/regress/expected/single_node_mergeinto.out @@ -211,6 +211,171 @@ select * from fkt; 2 | 2 (2 rows) +-- test for merge with where clauses +create table explain_t1 (a int, b int); +create table explain_t2 (f1 int, f2 int); +explain (verbose on, costs off) merge into explain_t1 + using explain_t2 tt2 on explain_t1.a = tt2.f1 +when not matched then + insert values(1,3) where tt2.f1 = 1; + QUERY PLAN +--------------------------------------------------------------------------------------- + Merge on public.explain_t1 + Insert Cond: (tt2.f1 = 1) + -> Hash Left Join + Output: tt2.f1, tt2.f2, explain_t1.a, explain_t1.b, explain_t1.ctid, tt2.ctid + Hash Cond: (tt2.f1 = explain_t1.a) + -> Seq Scan on public.explain_t2 tt2 + Output: tt2.f1, tt2.f2, tt2.ctid + -> Hash + Output: explain_t1.a, explain_t1.b, explain_t1.ctid + -> Seq Scan on public.explain_t1 + Output: explain_t1.a, explain_t1.b, explain_t1.ctid +(11 rows) + +explain (verbose on, costs off) merge into explain_t1 + using explain_t2 tt2 on explain_t1.a = tt2.f1 +when matched then + update set b = 10 where explain_t1.a = 1; + QUERY PLAN +--------------------------------------------------------------------------------------- + Merge on public.explain_t1 + Update Cond: (explain_t1.a = 1) + -> Hash Join + Output: tt2.f1, tt2.f2, explain_t1.a, explain_t1.b, explain_t1.ctid, tt2.ctid + Hash Cond: (explain_t1.a = tt2.f1) + -> Seq Scan on public.explain_t1 + Output: explain_t1.a, explain_t1.b, explain_t1.ctid + -> Hash + Output: tt2.f1, tt2.f2, tt2.ctid + -> Seq Scan on public.explain_t2 tt2 + Output: tt2.f1, tt2.f2, tt2.ctid +(11 rows) + +explain (verbose on, costs off) merge into explain_t1 + using explain_t2 tt2 on explain_t1.a = tt2.f1 +when matched then + update set b = 10 where explain_t1.a = 1 +when not matched then + insert values(1,3) where tt2.f1 = 1; + QUERY PLAN +--------------------------------------------------------------------------------------- + Merge on public.explain_t1 + Update Cond: (explain_t1.a = 1) + Insert Cond: (tt2.f1 = 1) + -> Hash Left Join + Output: tt2.f1, tt2.f2, explain_t1.a, explain_t1.b, explain_t1.ctid, tt2.ctid + Hash Cond: (tt2.f1 = explain_t1.a) + -> Seq Scan on public.explain_t2 tt2 + Output: tt2.f1, tt2.f2, tt2.ctid + -> Hash + Output: explain_t1.a, explain_t1.b, explain_t1.ctid + -> Seq Scan on public.explain_t1 + Output: explain_t1.a, explain_t1.b, explain_t1.ctid +(12 rows) + +explain (verbose on, costs off) merge into explain_t1 + using explain_t2 tt2 on explain_t1.a = tt2.f1 +when matched then + update set b = 10 where tt2.f2 = 1; + QUERY PLAN +--------------------------------------------------------------------------------------- + Merge on public.explain_t1 + Update Cond: (tt2.f2 = 1) + -> Hash Join + Output: tt2.f1, tt2.f2, explain_t1.a, explain_t1.b, explain_t1.ctid, tt2.ctid + Hash Cond: (explain_t1.a = tt2.f1) + -> Seq Scan on public.explain_t1 + Output: explain_t1.a, explain_t1.b, explain_t1.ctid + -> Hash + Output: tt2.f1, tt2.f2, tt2.ctid + -> Seq Scan on public.explain_t2 tt2 + Output: tt2.f1, tt2.f2, tt2.ctid +(11 rows) + +-- duplicate alias on source table +explain (verbose on, costs off) merge into explain_t2 t2 using ( + select + t1.a, + t1.b, + t1.a aa, + t1.b bb + from + explain_t1 t1 +) tmp on (t2.f1 = tmp.b) +when matched THEN + update + set + t2.f2 = tmp.aa + where + t2.f1 = tmp.bb; + QUERY PLAN +------------------------------------------------------------------------ + Merge on public.explain_t2 t2 + Update Cond: (t2.f1 = tmp.bb) + -> Hash Join + Output: t1.a, t1.b, t1.a, t1.b, t2.f1, t2.f2, t2.ctid, t1.ctid + Hash Cond: (t2.f1 = t1.b) + -> Seq Scan on public.explain_t2 t2 + Output: t2.f1, t2.f2, t2.ctid + -> Hash + Output: t1.a, t1.b, t1.ctid + -> Seq Scan on public.explain_t1 t1 + Output: t1.a, t1.b, t1.ctid +(11 rows) + +explain (verbose on, costs off) merge /*+ leading((t2 t1)) */ into explain_t2 t2 using ( + select + t1.a, + t1.b, + t1.a aa, + t1.b bb + from + explain_t1 t1 +) tmp on (t2.f1 = tmp.b) +when not matched THEN + insert values(1,3) where tmp.bb = 1; + QUERY PLAN +------------------------------------------------------------------------ + Merge on public.explain_t2 t2 + Insert Cond: (tmp.bb = 1) + -> Hash Right Join + Output: t1.a, t1.b, t1.a, t1.b, t2.f1, t2.f2, t2.ctid, t1.ctid + Hash Cond: (t2.f1 = t1.b) + -> Seq Scan on public.explain_t2 t2 + Output: t2.f1, t2.f2, t2.ctid + -> Hash + Output: t1.a, t1.b, t1.ctid + -> Seq Scan on public.explain_t1 t1 + Output: t1.a, t1.b, t1.ctid +(11 rows) + +explain (verbose on, costs off) merge /*+ leading((t1 t2)) */ into explain_t2 t2 using ( + select + t1.a, + t1.b, + t1.a aa, + t1.b bb + from + explain_t1 t1 +) tmp on (t2.f1 = tmp.b) +when not matched THEN + insert values(1,3) where tmp.bb = 1; + QUERY PLAN +------------------------------------------------------------------------ + Merge on public.explain_t2 t2 + Insert Cond: (tmp.bb = 1) + -> Hash Left Join + Output: t1.a, t1.b, t1.a, t1.b, t2.f1, t2.f2, t2.ctid, t1.ctid + Hash Cond: (t1.b = t2.f1) + -> Seq Scan on public.explain_t1 t1 + Output: t1.a, t1.b, t1.ctid + -> Hash + Output: t2.f1, t2.f2, t2.ctid + -> Seq Scan on public.explain_t2 t2 + Output: t2.f1, t2.f2, t2.ctid +(11 rows) + ------------------------------------------------ -- clean up ------------------------------------------------ diff --git a/src/test/regress/expected/single_node_opr_sanity.out b/src/test/regress/expected/single_node_opr_sanity.out index 06d86be2c..25b7d96fc 100755 --- a/src/test/regress/expected/single_node_opr_sanity.out +++ b/src/test/regress/expected/single_node_opr_sanity.out @@ -144,9 +144,23 @@ WHERE p1.oid != p2.oid AND p1.pronargs = p2.pronargs AND p1.proargtypes = p2.proargtypes ORDER by 1, 2, 3; - oid | proname | oid | proname ------+---------+-----+--------- -(0 rows) + oid | proname | oid | proname +-------+------------------------+-------+------------------------ + 15303 | format_write | 15364 | format_write + 15316 | sql_set_sql | 15397 | sql_set_sql + 15317 | sql_run | 15400 | sql_run + 15318 | sql_unregister_context | 15401 | sql_unregister_context + 15357 | close | 15469 | close + 15364 | format_write | 15303 | format_write + 15385 | substr | 15481 | substr + 15386 | substr | 15482 | substr + 15397 | sql_set_sql | 15316 | sql_set_sql + 15400 | sql_run | 15317 | sql_run + 15401 | sql_unregister_context | 15318 | sql_unregister_context + 15469 | close | 15357 | close + 15481 | substr | 15385 | substr + 15482 | substr | 15386 | substr +(14 rows) -- Considering only built-in procs (prolang = 12), look for multiple uses -- of the same internal function (ie, matching prosrc fields). It's OK to @@ -577,7 +591,6 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 1752 | trunc 1753 | trunc 1818 | regexeqsel - 1849 | nlssort 1982 | comm_check_connection_status 1983 | pg_log_comm_status 1984 | pg_comm_recv_stream @@ -669,6 +682,9 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 2852 | pg_current_xlog_insert_location 2853 | pg_stat_get_backend_waiting 2860 | gs_current_xlog_insert_end_location + 2861 | gs_stat_wal_entrytable + 2862 | gs_walwriter_flush_position + 2863 | gs_walwriter_flush_stat 2893 | xml_in 2894 | xml_out 2895 | xmlcomment @@ -1218,6 +1234,7 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 4428 | byteawithoutordercolout 4430 | gs_undo_meta 4431 | gs_undo_translot + 4434 | gs_stat_undo 4435 | gs_upload_obs_file 4436 | gs_download_obs_file 4440 | byteawithoutorderwithequalcolin @@ -1240,6 +1257,10 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 4505 | interval_list_agg_transfn 4507 | interval_list_agg_noarg2_transfn 4520 | gs_stat_activity_timeout + 4579 | gs_get_active_archiving_standby + 4580 | gs_pitr_archive_slot_force_advance + 4581 | gs_pitr_clean_history_global_barriers + 4582 | gs_pitr_get_warning_for_xlog_force_recycle 4601 | checksumtext_agg_transfn 4650 | gs_paxos_stat_replication 4651 | pg_cbm_tracked_location @@ -1275,7 +1296,6 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 4764 | ubtoptions 4765 | ubtcostestimate 4767 | gs_read_block_from_remote - 4768 | gs_read_block_from_remote 4789 | remote_rto_stat 4800 | job_cancel 4801 | job_finish @@ -1331,6 +1351,8 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 5128 | gs_get_obs_file_context 5129 | gs_set_obs_file_context 5130 | gs_get_hadr_key_cn + 5255 | pg_shared_memctx_view + 5256 | pg_thread_memctx_view 5345 | pv_builtin_functions 5519 | int1cmp 5520 | hashint1 @@ -1393,6 +1415,7 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 5813 | hash16out 5814 | hash32in 5815 | hash32out + 5997 | gs_mark_indisvalid 5998 | gs_deployment 5999 | get_gtm_lite_status 6000 | getbucket @@ -1466,7 +1489,6 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 6667 | network_smaller 6668 | max 6669 | min - 7000 | pg_stat_segment_space_info 7001 | pg_stat_segment_extent_usage 7002 | gs_space_shrink 7003 | pg_stat_remain_segment_info @@ -1482,6 +1504,7 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 7106 | db4ai_predict_by_float8 7107 | db4ai_predict_by_numeric 7108 | db4ai_predict_by_text + 7109 | db4ai_predict_by_float8_array 7777 | sysdate 7800 | init 7801 | analyze_query @@ -1500,18 +1523,12 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 7882 | array_varchar_exists 7883 | array_varchar_prior 7886 | array_varchar_deleteidx - 7887 | array_integer_deleteidx - 7888 | array_integer_exists - 7891 | array_integer_next - 7892 | array_integer_prior 7893 | gs_stat_ustore - 7894 | array_union 7998 | set_working_grand_version_num_manually 8001 | get_paxos_replication_info - 8413 | pg_read_binary_file_blocks 8050 | datalength 8642 | gs_txid_oldestxmin - 8700 | array_union_distinct + 8700 | array_cat_distinct 8701 | array_intersect 8702 | array_intersect_distinct 8703 | array_except @@ -1539,6 +1556,11 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 9033 | gs_get_local_barrier_status 9034 | gs_get_global_barriers_status 9035 | gs_set_obs_delete_location_with_slotname + 9037 | gs_set_standby_cluster_target_barrier_id + 9038 | gs_query_standby_cluster_barrier_id_exist + 9039 | gs_get_standby_cluster_barrier_status + 9036 | gs_gsc_clean + 9037 | gs_gsc_dbstat_info 9130 | has_cek_privilege 9131 | has_cek_privilege 9132 | has_cek_privilege @@ -1553,7 +1575,6 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 9141 | gs_streaming_dr_service_truncation_check 9350 | sys_connect_by_path 9351 | connect_by_root - 9376 | gs_write_term_log 9982 | tdigest_mergep 9983 | tdigest_in 9984 | tdigest_out @@ -1564,7 +1585,7 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999 order by 1; 9989 | tdigest_merge 9990 | percentile_of_value 9999 | pg_test_err_contain_err -(1148 rows) +--?.* -- **************** pg_cast **************** -- Catch bogus values in pg_cast columns (other than cases detected by diff --git a/src/test/regress/expected/single_node_path.out b/src/test/regress/expected/single_node_path.out index 7e8b1da80..ea5d74d96 100644 --- a/src/test/regress/expected/single_node_path.out +++ b/src/test/regress/expected/single_node_path.out @@ -79,3 +79,69 @@ SELECT '' AS count, popen(f1) AS open_path FROM PATH_TBL; | [(11,12),(13,14)] (8 rows) +-- test type coercion for index match +set enable_seqscan = off; +create table test2(column1 float8 not null, column2 char not null collate "C", column3 char(100) not null collate "C", column4 int); +create table test3(like test2 including all); +create index on test2(column1); +create index on test2(column2); +create index on test2(column3); +create index on test2(column4); +explain (costs off) update test2 set column4 = 0 from test3 where test2.column1 > test3.column2 and test2.column2 like test3.column2 and test3.column3 < test3.column3; + QUERY PLAN +------------------------------------------------------------------------- + Update on test2 + -> Nested Loop + -> Seq Scan on test3 + Filter: (column3 < column3) + -> Index Scan using test2_column1_idx on test2 + Index Cond: (column1 > (test3.column2)::double precision) + Filter: (column2 ~~ (test3.column2)::text) +(7 rows) + +explain (costs off) select * from test2, test3 where test2.column1 > test3.column1 and test2.column2 like test3.column2; + QUERY PLAN +---------------------------------------------------- + Nested Loop + -> Seq Scan on test3 + -> Index Scan using test2_column1_idx on test2 + Index Cond: (column1 > test3.column1) + Filter: (column2 ~~ (test3.column2)::text) +(5 rows) + +explain (costs off) select /*+ nestloop(test2 test3) */* from test2, test3 where test2.column2 = test3.column2::varchar; + QUERY PLAN +------------------------------------------------------------------------------ + Nested Loop + -> Seq Scan on test3 + -> Index Scan using test2_column2_idx on test2 + Index Cond: (column2 = ((test3.column2)::character varying)::bpchar) +(4 rows) + +/* cannot use index for bpchar <-> text */ +explain (costs off) merge into test2 using (select '1' AS c1, '5278' as c2) V ON (test2.column3 = V.c2) +WHEN NOT MATCHED THEN INSERT (column1, column2, column3, column4) VALUES (V.c1,1,V.c2,1); + QUERY PLAN +--------------------------------------------------------------- + Merge on test2 + -> Nested Loop Left Join + Join Filter: ((test2.column3)::text = ('5278'::text)) + -> Result + -> Seq Scan on test2 +(5 rows) + +/* index with type coercion is acceptable */ +create index on test2(text(column3)); +explain (costs off) merge into test2 using (select '1' AS c1, '5278' as c2) V ON (test2.column3 = V.c2) +WHEN NOT MATCHED THEN INSERT (column1, column2, column3, column4) VALUES (V.c1,1,V.c2,1); + QUERY PLAN +-------------------------------------------------------------- + Merge on test2 + -> Nested Loop Left Join + -> Result + -> Index Scan using test2_text_idx on test2 + Index Cond: ((column3)::text = ('5278'::text)) +(5 rows) + +drop table test2; +drop table test3; diff --git a/src/test/regress/expected/single_node_union.out b/src/test/regress/expected/single_node_union.out index 5f0e5c93a..ffec33a15 100644 --- a/src/test/regress/expected/single_node_union.out +++ b/src/test/regress/expected/single_node_union.out @@ -321,6 +321,68 @@ SELECT q1 FROM int8_tbl EXCEPT ALL SELECT DISTINCT q2 FROM int8_tbl; 4567890123456789 (3 rows) +-- Test that single node stream plan handles INTERSECT and EXCEPT correctly +set query_dop = 10; +SELECT q2 FROM int8_tbl INTERSECT SELECT q1 FROM int8_tbl ORDER BY 1; + q2 +------------------ + 123 + 4567890123456789 + +(3 rows) + +SELECT q2 FROM int8_tbl INTERSECT ALL SELECT q1 FROM int8_tbl ORDER BY 1; + q2 +------------------ + 123 + 4567890123456789 + 4567890123456789 + +(4 rows) + +SELECT q2 FROM int8_tbl EXCEPT SELECT q1 FROM int8_tbl ORDER BY 1; + q2 +------------------- + -4567890123456789 + 456 +(2 rows) + +SELECT q2 FROM int8_tbl EXCEPT ALL SELECT q1 FROM int8_tbl ORDER BY 1; + q2 +------------------- + -4567890123456789 + 456 +(2 rows) + +SELECT q2 FROM int8_tbl EXCEPT ALL SELECT DISTINCT q1 FROM int8_tbl ORDER BY 1; + q2 +------------------- + -4567890123456789 + 456 + 4567890123456789 +(3 rows) + +SELECT q1 FROM int8_tbl EXCEPT SELECT q2 FROM int8_tbl; + q1 +---- +(0 rows) + +SELECT q1 FROM int8_tbl EXCEPT ALL SELECT q2 FROM int8_tbl; + q1 +------------------ + 123 + 4567890123456789 +(2 rows) + +SELECT q1 FROM int8_tbl EXCEPT ALL SELECT DISTINCT q2 FROM int8_tbl; + q1 +------------------ + 123 + 4567890123456789 + 4567890123456789 +(3 rows) + +set query_dop = 1; -- -- Mixed types -- @@ -514,6 +576,42 @@ explain (costs off) Index Cond: (ab = 'ab'::text) (7 rows) +-- Test that single node stream plan handles UNION correctly +set query_dop = 10; +explain (costs off) + SELECT * FROM + (SELECT a || b AS ab FROM t1 + UNION ALL + SELECT * FROM t2) t + WHERE ab = 'ab'; + QUERY PLAN +--------------------------------------------------- + Result + -> Append + -> Index Scan using t1_ab_idx on t1 + Index Cond: ((a || b) = 'ab'::text) + -> Index Only Scan using t2_pkey on t2 + Index Cond: (ab = 'ab'::text) +(6 rows) + +explain (costs off) + SELECT * FROM + (SELECT a || b AS ab FROM t1 + UNION + SELECT * FROM t2) t + WHERE ab = 'ab'; + QUERY PLAN +--------------------------------------------------- + HashAggregate + Group By Key: ((t1.a || t1.b)) + -> Append + -> Index Scan using t1_ab_idx on t1 + Index Cond: ((a || b) = 'ab'::text) + -> Index Only Scan using t2_pkey on t2 + Index Cond: (ab = 'ab'::text) +(7 rows) + +set query_dop = 1; -- -- Test that ORDER BY for UNION ALL can be pushed down to inheritance -- children. @@ -645,6 +743,106 @@ WHERE x > 3; 2 | 4 (1 row) +-- Test that single node stream plan handles UNION sub-selects correctly +set query_dop = 10; +explain (costs off) + SELECT * FROM + (SELECT 1 AS t, * FROM tenk1 a + UNION ALL + SELECT 2 AS t, * FROM tenk1 b) c + WHERE t = 2; +ERROR: relation "tenk1" does not exist on datanode1 +LINE 3: (SELECT 1 AS t, * FROM tenk1 a + ^ +explain (costs off) +SELECT * FROM + (SELECT 1 AS t, 2 AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x < 4; + QUERY PLAN +-------------------------------------------- + Unique + -> Sort + Sort Key: (1), (2) + -> Append + -> Result + -> Result + One-Time Filter: false +(7 rows) + +SELECT * FROM + (SELECT 1 AS t, 2 AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x < 4; + t | x +---+--- + 1 | 2 +(1 row) + +explain (costs off) +SELECT * FROM + (SELECT 1 AS t, generate_series(1,10) AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x < 4 +ORDER BY x; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: ss.x + -> Subquery Scan on ss + Filter: (ss.x < 4) + -> HashAggregate + Group By Key: (1), (generate_series(1, 10)) + -> Append + -> Result + -> Result +(9 rows) + +SELECT * FROM + (SELECT 1 AS t, generate_series(1,10) AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x < 4 +ORDER BY x; + t | x +---+--- + 1 | 1 + 1 | 2 + 1 | 3 +(3 rows) + +explain (costs off) +SELECT * FROM + (SELECT 1 AS t, (random()*3)::int AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x > 3; + QUERY PLAN +---------------------------------------------------------------------------- + Subquery Scan on ss + Filter: (ss.x > 3) + -> Unique + -> Sort + Sort Key: (1), (((random() * 3::double precision))::integer) + -> Append + -> Result + -> Result +(8 rows) + +SELECT * FROM + (SELECT 1 AS t, (random()*3)::int AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x > 3; + t | x +---+--- + 2 | 4 +(1 row) + +set query_dop = 1; -- Test proper handling of parameterized appendrel paths when the -- potential join qual is expensive create function expensivefunc(int) returns int diff --git a/src/test/regress/expected/single_node_uuid.out b/src/test/regress/expected/single_node_uuid.out index f87df8d08..bcc0b6730 100644 --- a/src/test/regress/expected/single_node_uuid.out +++ b/src/test/regress/expected/single_node_uuid.out @@ -120,6 +120,7 @@ SELECT COUNT(*) FROM guid1 WHERE guid_field >= '22222222-2222-2222-2222-22222222 -- btree and hash index creation test CREATE INDEX guid1_btree ON guid1 USING BTREE (guid_field); CREATE INDEX guid1_hash ON guid1 USING HASH (guid_field); +ERROR: access method "hash" does not support row store -- unique index test CREATE UNIQUE INDEX guid1_unique_BTREE ON guid1 USING BTREE (guid_field); -- should fail @@ -130,7 +131,7 @@ DETAIL: Key (guid_field)=(11111111-1111-1111-1111-111111111111) already exists. SELECT count(*) FROM pg_class WHERE relkind='i' AND relname LIKE 'guid%'; count ------- - 3 + 2 (1 row) -- populating the test tables with additional records diff --git a/src/test/regress/expected/single_node_varchar.out b/src/test/regress/expected/single_node_varchar.out index 17858c63f..092a42c4f 100644 --- a/src/test/regress/expected/single_node_varchar.out +++ b/src/test/regress/expected/single_node_varchar.out @@ -108,3 +108,35 @@ SELECT '' AS four, * FROM VARCHAR_TBL; | abcd (4 rows) +create table tab_1(col1 varchar(3)); +create table tab_2(col2 char(3)); +insert into tab_2 values(' '); +insert into tab_1 select col2 from tab_2; +select * from tab_1 where col1 is null; + col1 +------ + +(1 row) + +select * from tab_1 where col1=' '; + col1 +------ +(0 rows) + +delete from tab_1; +set behavior_compat_options = 'char_coerce_compat'; +insert into tab_1 select col2 from tab_2; +select * from tab_1 where col1 is null; + col1 +------ +(0 rows) + +select * from tab_1 where col1=' '; + col1 +------ + +(1 row) + +set behavior_compat_options = ''; +drop table tab_1; +drop table tab_2; diff --git a/src/test/regress/expected/sqlcode_cursor.out b/src/test/regress/expected/sqlcode_cursor.out new file mode 100644 index 000000000..1bcf6d059 --- /dev/null +++ b/src/test/regress/expected/sqlcode_cursor.out @@ -0,0 +1,873 @@ +create schema hw_sqlcode; +set current_schema = hw_sqlcode; +/* ---------anonymous block------------ */ +/* no exception */ +DECLARE + a int; +BEGIN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +NOTICE: SQLSTATE = , SQLCODE = , SQLERRM = +/* exception */ +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = division by zero +CREATE or replace procedure func1_1 IS + --PRAGMA AUTONOMOUS_TRANSACTION; + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN others THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +DECLARE + a int; +BEGIN + func1_1(); + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = division by zero +CONTEXT: SQL statement "CALL func1_1()" +PL/pgSQL function inline_code_block line 3 at PERFORM +NOTICE: SQLSTATE = , SQLCODE = , SQLERRM = +/* commit rollback */ +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + COMMIT; + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = division by zero +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + ROLLBACK; + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = division by zero +/* PRAGMA AUTONOMOUS_TRANSACTION; */ +CREATE OR REPLACE FUNCTION func5() RETURN void +AS +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; + a int; +BEGIN + a := 1/0; +END; +/ +DECLARE + a int; +BEGIN + func5(); +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = ERROR: division by zero +CONTEXT: SQL statement "SELECT 1/0" +PL/pgSQL function func5() line 6 at assignment +referenced column: func5 + +CREATE OR REPLACE FUNCTION func5_1() RETURN void +AS +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; + a int; +BEGIN + RAISE NOTICE 'AUTONOMOUS_TRANSACTION SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; + func5_1(); +END; +/ +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = division by zero +NOTICE: AUTONOMOUS_TRANSACTION SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = +CONTEXT: referenced column: func5_1 + +CREATE or replace procedure func5_2 IS + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN others THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +DECLARE + a int; +BEGIN + func5_2(); + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = division by zero +CONTEXT: SQL statement "CALL func5_2()" +PL/pgSQL function inline_code_block line 3 at PERFORM +NOTICE: SQLSTATE = , SQLCODE = , SQLERRM = +/* CALL function */ +CREATE OR REPLACE FUNCTION func7() RETURN void +AS +DECLARE + a int; +BEGIN + a := 1/0; +END; +/ +DECLARE + a int; +BEGIN + func7(); +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = division by zero +/* RAISE ERROR */ +DECLARE + a int; +BEGIN + RAISE sqlstate 'AA666'; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +ERROR: AA666 +/* ---------function------------ */ +/* no exception */ +CREATE OR REPLACE FUNCTION func1() RETURN void +AS +BEGIN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL func1(); +NOTICE: SQLSTATE = , SQLCODE = , SQLERRM = + func1 +------- + +(1 row) + +/* exception */ +CREATE OR REPLACE FUNCTION func2() RETURN void +AS +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL func2(); +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = division by zero + func2 +------- + +(1 row) + +/* commit rollback */ +CREATE OR REPLACE FUNCTION func3() RETURN void +AS +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + COMMIT; + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL func3(); +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = division by zero + func3 +------- + +(1 row) + +CREATE OR REPLACE FUNCTION func4() RETURN void +AS +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + ROLLBACK; + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL func4(); +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = division by zero + func4 +------- + +(1 row) + +/* PRAGMA AUTONOMOUS_TRANSACTION; */ +CREATE OR REPLACE FUNCTION func5() RETURN void +AS +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; + a int; +BEGIN + a := 1/0; +END; +/ +CREATE OR REPLACE FUNCTION func6() RETURN void +AS +DECLARE + a int; +BEGIN + func5(); +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL func6(); +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = ERROR: division by zero +CONTEXT: SQL statement "SELECT 1/0" +PL/pgSQL function func5() line 6 at assignment +referenced column: func5 + + func6 +------- + +(1 row) + +/* CALL function */ +CREATE OR REPLACE FUNCTION func7() RETURN void +AS +DECLARE + a int; +BEGIN + a := 1/0; +END; +/ +CREATE OR REPLACE FUNCTION func8() RETURN void +AS +DECLARE + a int; +BEGIN + func7(); +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL func8(); +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = division by zero + func8 +------- + +(1 row) + +/* RAISE ERROR */ +CREATE OR REPLACE FUNCTION func9() RETURN void +AS +DECLARE + a int; +BEGIN + RAISE sqlstate 'AA666'; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL func9(); +ERROR: AA666 +/* ---------PROCEDURE------------ */ +/* no exception */ +CREATE OR REPLACE PROCEDURE proc1() +AS +BEGIN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL proc1(); +NOTICE: SQLSTATE = , SQLCODE = , SQLERRM = + proc1 +------- + +(1 row) + +/* exception */ +CREATE OR REPLACE PROCEDURE proc2() +AS +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL proc2(); +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = division by zero + proc2 +------- + +(1 row) + +/* commit rollback */ +CREATE OR REPLACE PROCEDURE proc3() +AS +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + COMMIT; + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL proc3(); +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = division by zero + proc3 +------- + +(1 row) + +CREATE OR REPLACE PROCEDURE proc4() +AS +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + ROLLBACK; + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL proc4(); +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = division by zero + proc4 +------- + +(1 row) + +/* PRAGMA AUTONOMOUS_TRANSACTION; */ +CREATE OR REPLACE PROCEDURE proc5() +AS +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; + a int; +BEGIN + a := 1/0; +END; +/ +CREATE OR REPLACE PROCEDURE proc6() +AS +DECLARE + a int; +BEGIN + proc5(); +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL proc6(); +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = ERROR: division by zero +CONTEXT: SQL statement "SELECT 1/0" +PL/pgSQL function proc5() line 6 at assignment +referenced column: proc5 + + proc6 +------- + +(1 row) + +/* CALL function */ +CREATE OR REPLACE PROCEDURE proc7() +AS +DECLARE + a int; +BEGIN + a := 1/0; +END; +/ +CREATE OR REPLACE PROCEDURE proc8() +AS +DECLARE + a int; +BEGIN + proc7(); +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL proc8(); +NOTICE: SQLSTATE = 22012, SQLCODE = 22012, SQLERRM = division by zero + proc8 +------- + +(1 row) + +/* RAISE ERROR */ +CREATE OR REPLACE PROCEDURE proc9() +AS +DECLARE + a int; +BEGIN + RAISE sqlstate 'AA666'; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL proc9(); +ERROR: AA666 +DROP SCHEMA hw_sqlcode CASCADE; +NOTICE: drop cascades to 21 other objects +DETAIL: drop cascades to function func1_1() +drop cascades to function func5_1() +drop cascades to function func5_2() +drop cascades to function func1() +drop cascades to function func2() +drop cascades to function func3() +drop cascades to function func4() +drop cascades to function func5() +drop cascades to function func6() +drop cascades to function func7() +drop cascades to function func8() +drop cascades to function func9() +drop cascades to function proc1() +drop cascades to function proc2() +drop cascades to function proc3() +drop cascades to function proc4() +drop cascades to function proc5() +drop cascades to function proc6() +drop cascades to function proc7() +drop cascades to function proc8() +drop cascades to function proc9() +create schema hw_cursor_state; +set current_schema = hw_cursor_state; +set behavior_compat_options='COMPAT_CURSOR'; +/* ---------anonymous block------------ */ +/*create*/ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +create table tb_test(col1 int); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ +NOTICE: 1,,,false, +CONTEXT: SQL statement "CALL proc_test()" +PL/pgSQL function inline_code_block line 5 at PERFORM +NOTICE: 1,,,false, +/* select */ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +select 1 into v_count; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ +NOTICE: 1,true,false,false,1 +CONTEXT: SQL statement "CALL proc_test()" +PL/pgSQL function inline_code_block line 5 at PERFORM +NOTICE: 1,true,false,false,1 +/* insert */ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +insert into tb_test select 1; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ +NOTICE: 1,true,false,false,1 +CONTEXT: SQL statement "CALL proc_test()" +PL/pgSQL function inline_code_block line 5 at PERFORM +NOTICE: 1,true,false,false,1 +/* update */ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +update tb_test set col1=2; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ +NOTICE: 1,true,false,false,1 +CONTEXT: SQL statement "CALL proc_test()" +PL/pgSQL function inline_code_block line 5 at PERFORM +NOTICE: 1,true,false,false,1 +/* delete */ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +delete from tb_test; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ +NOTICE: 1,true,false,false,1 +CONTEXT: SQL statement "CALL proc_test()" +PL/pgSQL function inline_code_block line 5 at PERFORM +NOTICE: 1,true,false,false,1 +/*Same layer*/ +CREATE OR REPLACE PROCEDURE proc_test1() +as +v_count int; +BEGIN +v_count := 1; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ +CREATE OR REPLACE PROCEDURE proc_test2() +as +v_count int; +BEGIN +v_count := 1; +update tb_test set col1=2; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ +DECLARE +v_count int; +BEGIN +proc_test2(); +proc_test1(); +end +/ +NOTICE: 1,false,true,false,0 +CONTEXT: SQL statement "CALL proc_test2()" +PL/pgSQL function inline_code_block line 4 at PERFORM +NOTICE: 1,false,true,false,0 +CONTEXT: SQL statement "CALL proc_test1()" +PL/pgSQL function inline_code_block line 5 at PERFORM +/*EXCEPTION*/ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +update tb_test11 set col1=2; +end; +/ +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +EXCEPTION +when others then +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ +NOTICE: 1,,,false, +CONTEXT: SQL statement "CALL proc_test()" +PL/pgSQL function inline_code_block line 5 at PERFORM +NOTICE: 1,,,false, +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +update tb_test11 set col1=2; +EXCEPTION +when others then +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ +NOTICE: 1,,,false, +CONTEXT: SQL statement "CALL proc_test()" +PL/pgSQL function inline_code_block line 5 at PERFORM +NOTICE: 1,,,false, +/*COMMIT ROLLBACK*/ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +update tb_test set col1=2; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +COMMIT; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ +NOTICE: 1,false,true,false,0 +CONTEXT: SQL statement "CALL proc_test()" +PL/pgSQL function inline_code_block line 5 at PERFORM +NOTICE: 1,false,true,false,0 +CONTEXT: SQL statement "CALL proc_test()" +PL/pgSQL function inline_code_block line 5 at PERFORM +NOTICE: 1,false,true,false,0 +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +update tb_test set col1=2; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +ROLLBACK; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ +NOTICE: 1,false,true,false,0 +CONTEXT: SQL statement "CALL proc_test()" +PL/pgSQL function inline_code_block line 5 at PERFORM +NOTICE: 1,false,true,false,0 +CONTEXT: SQL statement "CALL proc_test()" +PL/pgSQL function inline_code_block line 5 at PERFORM +NOTICE: 1,false,true,false,0 +/* PRAGMA AUTONOMOUS_TRANSACTION */ +CREATE OR REPLACE PROCEDURE proc_test() +as +DECLARE +v_count int; +PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN +v_count := 1; +update tb_test set col1=2; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ +NOTICE: 1,false,true,false,0 +CONTEXT: referenced column: proc_test + +NOTICE: 1,false,true,false,0 +/*drop*/ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +drop table tb_test; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ +NOTICE: 1,,,false, +CONTEXT: SQL statement "CALL proc_test()" +PL/pgSQL function inline_code_block line 5 at PERFORM +NOTICE: 1,,,false, +create table staff(id int, name varchar2(10)); +insert into staff values(1, 'xiaoming1'); +insert into staff values(2, 'xiaoming2'); +insert into staff values(3, 'xiaoming'); +insert into staff values(4, 'xiaoming4'); +CREATE OR REPLACE FUNCTION fun_cursor1() return void AS +DECLARE +BEGIN +insert into staff values(3, 'xiaoming'); +dbe_output.print_line('cursor after insert'); +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +SAVEPOINT my_savepoint; +dbe_output.print_line('cursor after savepoint'); +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +update staff set name = 'wdc1' where id = 1; +dbe_output.print_line('cursor after update'); +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +ROLLBACK TO SAVEPOINT my_savepoint; +dbe_output.print_line('cursor after rollback to savepoint'); +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ +CREATE OR REPLACE PROCEDURE fun_cursor2() AS +DECLARE +BEGIN +fun_cursor1(); +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +IF SQL%FOUND THEN +dbe_output.print_line('cursor effective'); +END IF; +delete from staff where id = 3; +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ +call fun_cursor2(); +cursor after insert +NOTICE: true,false,false,1 +CONTEXT: SQL statement "CALL fun_cursor1()" +PL/pgSQL function fun_cursor2() line 4 at PERFORM +cursor after savepoint +NOTICE: false,true,false,0 +CONTEXT: SQL statement "CALL fun_cursor1()" +PL/pgSQL function fun_cursor2() line 4 at PERFORM +cursor after update +NOTICE: true,false,false,1 +CONTEXT: SQL statement "CALL fun_cursor1()" +PL/pgSQL function fun_cursor2() line 4 at PERFORM +cursor after rollback to savepoint +NOTICE: false,true,false,0 +CONTEXT: SQL statement "CALL fun_cursor1()" +PL/pgSQL function fun_cursor2() line 4 at PERFORM +NOTICE: false,true,false,0 +NOTICE: true,false,false,2 + fun_cursor2 +------------- + +(1 row) + +CREATE OR REPLACE FUNCTION fun_cursor1() return void AS +DECLARE +BEGIN +insert into staff values(3, 'xiaoming'); +update staff set name = 'zcna' where id = 1; +--commit; +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +RAISE division_by_zero; +end; +/ +CREATE OR REPLACE PROCEDURE fun_cursor2() AS +DECLARE +BEGIN +fun_cursor1(); +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +IF SQL%FOUND THEN +dbe_output.print_line('cursor effective'); +END IF; +delete from staff where id = 3; +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +EXCEPTION +WHEN division_by_zero THEN +RAISE NOTICE 'test:% ... %',SQLCODE,SQLSTATE; +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ +call fun_cursor2(); +NOTICE: true,false,false,1 +CONTEXT: SQL statement "CALL fun_cursor1()" +PL/pgSQL function fun_cursor2() line 4 at PERFORM +NOTICE: test:22012 ... 22012 +NOTICE: true,false,false,1 + fun_cursor2 +------------- + +(1 row) + +set behavior_compat_options = ''; +DROP SCHEMA hw_cursor_state CASCADE; +NOTICE: drop cascades to 6 other objects +DETAIL: drop cascades to function proc_test1() +drop cascades to function proc_test2() +drop cascades to function proc_test() +drop cascades to table staff +drop cascades to function fun_cursor1() +drop cascades to function fun_cursor2() diff --git a/src/test/regress/expected/sqlldr/load_to_copy_basic.out b/src/test/regress/expected/sqlldr/load_to_copy_basic.out new file mode 100644 index 000000000..525048d23 --- /dev/null +++ b/src/test/regress/expected/sqlldr/load_to_copy_basic.out @@ -0,0 +1,289 @@ +-- setup +create table SQLLDR_TBL +( + ID NUMBER, + NAME VARCHAR2(20), + CON VARCHAR2(20), + DT DATE +); +select copy_summary_create(); + copy_summary_create +--------------------- + t +(1 row) + +select copy_error_log_create(); + copy_error_log_create +----------------------- + t +(1 row) + +-- comments of load data +load truncate into table sqlldr_tbl +-- comments in load data +fields terminated by ',' +TRAILING NULLCOLS; + LOAD TRANSFORM TO COPY RESULT +--------------------------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM STDIN LOAD DELIMITER ',' FILL_MISSING_FIELDS 'multi' IGNORE_EXTRA_DATA; +(1 row) + +load data truncate into table sqlldr_tbl fields terminated by ','; + LOAD TRANSFORM TO COPY RESULT +----------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM STDIN LOAD DELIMITER ',' IGNORE_EXTRA_DATA; +(1 row) + +load append into table sqlldr_tbl fields terminated '|'; + LOAD TRANSFORM TO COPY RESULT +------------------------------------------------------------------- + \COPY sqlldr_tbl FROM STDIN LOAD DELIMITER '|' IGNORE_EXTRA_DATA; +(1 row) + +load data replace into table sqlldr_tbl fields terminated by '\t' TRAILING NULLCOLS; + LOAD TRANSFORM TO COPY RESULT +---------------------------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM STDIN LOAD DELIMITER '\t' FILL_MISSING_FIELDS 'multi' IGNORE_EXTRA_DATA; +(1 row) + +load into table sqlldr_tbl fields terminated '|'; + LOAD TRANSFORM TO COPY RESULT +----------------------------------------------------------------------------------------------------------------------- + SELECT 'has_data_in_table' FROM sqlldr_tbl LIMIT 1; \COPY sqlldr_tbl FROM STDIN LOAD DELIMITER '|' IGNORE_EXTRA_DATA; +(1 row) + +load data insert into table sqlldr_tbl fields terminated by '\t' TRAILING NULLCOLS; + LOAD TRANSFORM TO COPY RESULT +---------------------------------------------------------------------------------------------------------------------------------------------------- + SELECT 'has_data_in_table' FROM sqlldr_tbl LIMIT 1; \COPY sqlldr_tbl FROM STDIN LOAD DELIMITER '\t' FILL_MISSING_FIELDS 'multi' IGNORE_EXTRA_DATA; +(1 row) + +load data infile 'test.csv' truncate into table sqlldr_tbl fields terminated by ',' TRAILING NULLCOLS; + LOAD TRANSFORM TO COPY RESULT +-------------------------------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test.csv' LOAD DELIMITER ',' FILL_MISSING_FIELDS 'multi' IGNORE_EXTRA_DATA; +(1 row) + +load data infile 'test_ver.txt' append into table sqlldr_tbl fields terminated by '|'; + LOAD TRANSFORM TO COPY RESULT +---------------------------------------------------------------------------- + \COPY sqlldr_tbl FROM 'test_ver.txt' LOAD DELIMITER '|' IGNORE_EXTRA_DATA; +(1 row) + +load data infile 'test_tab.txt' replace into table sqlldr_tbl fields terminated '\t'; + LOAD TRANSFORM TO COPY RESULT +--------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test_tab.txt' LOAD DELIMITER '\t' IGNORE_EXTRA_DATA; +(1 row) + +load data infile 'test_tab.txt' into table sqlldr_tbl fields terminated '\t'; + LOAD TRANSFORM TO COPY RESULT +--------------------------------------------------------------------------------------------------------------------------------- + SELECT 'has_data_in_table' FROM sqlldr_tbl LIMIT 1; \COPY sqlldr_tbl FROM 'test_tab.txt' LOAD DELIMITER '\t' IGNORE_EXTRA_DATA; +(1 row) + +load data infile 'test_tab.txt' insert into table sqlldr_tbl fields terminated '\t'; + LOAD TRANSFORM TO COPY RESULT +--------------------------------------------------------------------------------------------------------------------------------- + SELECT 'has_data_in_table' FROM sqlldr_tbl LIMIT 1; \COPY sqlldr_tbl FROM 'test_tab.txt' LOAD DELIMITER '\t' IGNORE_EXTRA_DATA; +(1 row) + +load data infile 'test.csv' truncate into table sqlldr_tbl fields terminated by ',' TRAILING NULLCOLS; + LOAD TRANSFORM TO COPY RESULT +-------------------------------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test.csv' LOAD DELIMITER ',' FILL_MISSING_FIELDS 'multi' IGNORE_EXTRA_DATA; +(1 row) + +load data infile 'test_ver.txt' append into table sqlldr_tbl fields terminated by '|'; + LOAD TRANSFORM TO COPY RESULT +---------------------------------------------------------------------------- + \COPY sqlldr_tbl FROM 'test_ver.txt' LOAD DELIMITER '|' IGNORE_EXTRA_DATA; +(1 row) + +load data infile 'test_tab.txt' replace into table sqlldr_tbl fields terminated '\t'; + LOAD TRANSFORM TO COPY RESULT +--------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test_tab.txt' LOAD DELIMITER '\t' IGNORE_EXTRA_DATA; +(1 row) + +load data infile 'test_ver.txt' into table sqlldr_tbl fields terminated by '|'; + LOAD TRANSFORM TO COPY RESULT +-------------------------------------------------------------------------------------------------------------------------------- + SELECT 'has_data_in_table' FROM sqlldr_tbl LIMIT 1; \COPY sqlldr_tbl FROM 'test_ver.txt' LOAD DELIMITER '|' IGNORE_EXTRA_DATA; +(1 row) + +load data infile 'test_tab.txt' insert into table sqlldr_tbl fields terminated '\t'; + LOAD TRANSFORM TO COPY RESULT +--------------------------------------------------------------------------------------------------------------------------------- + SELECT 'has_data_in_table' FROM sqlldr_tbl LIMIT 1; \COPY sqlldr_tbl FROM 'test_tab.txt' LOAD DELIMITER '\t' IGNORE_EXTRA_DATA; +(1 row) + +load data infile 'test.csv' truncate into table sqlldr_tbl fields terminated by ',,' TRAILING NULLCOLS; + LOAD TRANSFORM TO COPY RESULT +--------------------------------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test.csv' LOAD DELIMITER ',,' FILL_MISSING_FIELDS 'multi' IGNORE_EXTRA_DATA; +(1 row) + +load data infile 'test_ver.txt' append into table sqlldr_tbl fields terminated by '||'; + LOAD TRANSFORM TO COPY RESULT +----------------------------------------------------------------------------- + \COPY sqlldr_tbl FROM 'test_ver.txt' LOAD DELIMITER '||' IGNORE_EXTRA_DATA; +(1 row) + +load data infile 'test_tab.txt' replace into table sqlldr_tbl fields terminated '\t\t'; + LOAD TRANSFORM TO COPY RESULT +----------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test_tab.txt' LOAD DELIMITER '\t\t' IGNORE_EXTRA_DATA; +(1 row) + +load data infile 'test_ver.txt' into table sqlldr_tbl fields terminated by '||'; + LOAD TRANSFORM TO COPY RESULT +--------------------------------------------------------------------------------------------------------------------------------- + SELECT 'has_data_in_table' FROM sqlldr_tbl LIMIT 1; \COPY sqlldr_tbl FROM 'test_ver.txt' LOAD DELIMITER '||' IGNORE_EXTRA_DATA; +(1 row) + +load data infile 'test_tab.txt' insert into table sqlldr_tbl fields terminated '\t\t'; + LOAD TRANSFORM TO COPY RESULT +----------------------------------------------------------------------------------------------------------------------------------- + SELECT 'has_data_in_table' FROM sqlldr_tbl LIMIT 1; \COPY sqlldr_tbl FROM 'test_tab.txt' LOAD DELIMITER '\t\t' IGNORE_EXTRA_DATA; +(1 row) + +-- characterset +load data characterset utf8 infile 'test_tab.txt' truncate into table sqlldr_tbl; + LOAD TRANSFORM TO COPY RESULT +---------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test_tab.txt' LOAD ENCODING 'utf8' IGNORE_EXTRA_DATA; +(1 row) + +load data characterset 'utf8' infile 'test_tab.txt' replace into table sqlldr_tbl; + LOAD TRANSFORM TO COPY RESULT +---------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test_tab.txt' LOAD ENCODING 'utf8' IGNORE_EXTRA_DATA; +(1 row) + +load data characterset "utf8" infile 'test_tab.txt' replace into table sqlldr_tbl; + LOAD TRANSFORM TO COPY RESULT +---------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test_tab.txt' LOAD ENCODING 'utf8' IGNORE_EXTRA_DATA; +(1 row) + +load data characterset AL32UTF8 infile 'test_tab.txt' replace into table sqlldr_tbl; + LOAD TRANSFORM TO COPY RESULT +---------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test_tab.txt' LOAD ENCODING 'utf8' IGNORE_EXTRA_DATA; +(1 row) + +load data characterset al32utf8 infile 'test_tab.txt' replace into table sqlldr_tbl; + LOAD TRANSFORM TO COPY RESULT +---------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test_tab.txt' LOAD ENCODING 'utf8' IGNORE_EXTRA_DATA; +(1 row) + +load data characterset zhs16gbk infile 'test_tab.txt' replace into table sqlldr_tbl; + LOAD TRANSFORM TO COPY RESULT +--------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test_tab.txt' LOAD ENCODING 'gbk' IGNORE_EXTRA_DATA; +(1 row) + +load data characterset zhs32gb18030 infile 'test_tab.txt' replace into table sqlldr_tbl; + LOAD TRANSFORM TO COPY RESULT +------------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test_tab.txt' LOAD ENCODING 'gb18030' IGNORE_EXTRA_DATA; +(1 row) + +-- when +load data infile "test.txt" truncate into table sqlldr_tbl WHEN (1-1) = '1' trailing nullcols; + LOAD TRANSFORM TO COPY RESULT +--------------------------------------------------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test.txt' LOAD LOG ERRORS DATA WHEN (1-1) = '1' FILL_MISSING_FIELDS 'multi' IGNORE_EXTRA_DATA; +(1 row) + +load data infile "test.txt" truncate into table sqlldr_tbl WHEN (2-2) = '|' trailing nullcols; + LOAD TRANSFORM TO COPY RESULT +--------------------------------------------------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test.txt' LOAD LOG ERRORS DATA WHEN (2-2) = '|' FILL_MISSING_FIELDS 'multi' IGNORE_EXTRA_DATA; +(1 row) + +load data infile "test.txt" truncate into table sqlldr_tbl WHEN (2-4) = 'XY' trailing nullcols; + LOAD TRANSFORM TO COPY RESULT +---------------------------------------------------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test.txt' LOAD LOG ERRORS DATA WHEN (2-4) = 'XY' FILL_MISSING_FIELDS 'multi' IGNORE_EXTRA_DATA; +(1 row) + +-- load when exceptions +load data infile "test.txt" truncate into table sqlldr_tbl WHEN (0-1) = '1'; +ERROR: WHEN start position 0 should be > 0 +load data infile "test.txt" truncate into table sqlldr_tbl WHEN (2-0) = '|'; +ERROR: WHEN end position 0 should be > 0 +load data infile "test.txt" truncate into table sqlldr_tbl WHEN (2-1) = 'XY'; +ERROR: WHEN start position 2 should be <= end position 1 +load data infile "test.txt" truncate into table sqlldr_tbl WHEN (-2-1) = 'XY'; +ERROR: syntax error at or near "-" +LINE 1: ...e "test.txt" truncate into table sqlldr_tbl WHEN (-2-1) = 'X... + ^ +-- copy when exceptions +\COPY sqlldr_tbl FROM STDIN ENCODING 'utf8' DELIMITER ',' WHEN (0-1) = '40'; +ERROR: WHEN start position 0 should be > 0 +\COPY sqlldr_tbl FROM STDIN ENCODING 'utf8' DELIMITER ',' WHEN (2-0) = '40'; +ERROR: WHEN end position 0 should be > 0 +\COPY sqlldr_tbl FROM STDIN ENCODING 'utf8' DELIMITER ',' WHEN (3-1) = '40'; +ERROR: WHEN start position 3 should be <= end position 1 +\COPY sqlldr_tbl FROM STDIN ENCODING 'utf8' DELIMITER ',' WHEN (-3-1) = '40'; +ERROR: syntax error at or near "-" +LINE 1: ...l FROM STDIN ENCODING 'utf8' DELIMITER ',' WHEN (-3-1) = '4... + ^ +-- options +OPTIONS() load data infile "test.txt" truncate into table sqlldr_tbl; + LOAD TRANSFORM TO COPY RESULT +-------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test.txt' LOAD IGNORE_EXTRA_DATA; +(1 row) + +OPTIONS(skip=-1) load data infile "test.txt" truncate into table sqlldr_tbl; +ERROR: SKIP=-1 in OPTIONS should be >= 0 +OPTIONS(skip=0) load data infile "test.txt" truncate into table sqlldr_tbl; + LOAD TRANSFORM TO COPY RESULT +--------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test.txt' LOAD SKIP 0 IGNORE_EXTRA_DATA; +(1 row) + +OPTIONS(skip=100) load data infile "test.txt" truncate into table sqlldr_tbl; + LOAD TRANSFORM TO COPY RESULT +----------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test.txt' LOAD SKIP 100 IGNORE_EXTRA_DATA; +(1 row) + +OPTIONS(errors=-1) load data infile "test.txt" truncate into table sqlldr_tbl; +ERROR: ERRORS=-1 in OPTIONS should be >= 0 +OPTIONS(errors=2) load data infile "test.txt" truncate into table sqlldr_tbl; + LOAD TRANSFORM TO COPY RESULT +----------------------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test.txt' LOAD LOG ERRORS DATA REJECT LIMIT '2' IGNORE_EXTRA_DATA; +(1 row) + +OPTIONS(errors=10) load data infile "test.txt" truncate into table sqlldr_tbl; + LOAD TRANSFORM TO COPY RESULT +------------------------------------------------------------------------------------------------------------------------ + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'test.txt' LOAD LOG ERRORS DATA REJECT LIMIT '10' IGNORE_EXTRA_DATA; +(1 row) + +OPTIONS(data='file.csv') load data infile "test.txt" truncate into table sqlldr_tbl; + LOAD TRANSFORM TO COPY RESULT +-------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'file.csv' LOAD IGNORE_EXTRA_DATA; +(1 row) + +OPTIONS(data="file.csv") load data infile "test.txt" truncate into table sqlldr_tbl; + LOAD TRANSFORM TO COPY RESULT +-------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'file.csv' LOAD IGNORE_EXTRA_DATA; +(1 row) + +OPTIONS(data="file.csv", skip=10, errors=64) load data infile "test.txt" truncate into table sqlldr_tbl; + LOAD TRANSFORM TO COPY RESULT +-------------------------------------------------------------------------------------------------------------------------------- + TRUNCATE TABLE sqlldr_tbl ; \COPY sqlldr_tbl FROM 'file.csv' LOAD LOG ERRORS DATA REJECT LIMIT '64' SKIP 10 IGNORE_EXTRA_DATA; +(1 row) + +-- teardown +drop table sqlldr_tbl; diff --git a/src/test/regress/expected/sw_bugfix.out b/src/test/regress/expected/sw_bugfix-1.out similarity index 58% rename from src/test/regress/expected/sw_bugfix.out rename to src/test/regress/expected/sw_bugfix-1.out index cfd2966af..328ba3c16 100644 --- a/src/test/regress/expected/sw_bugfix.out +++ b/src/test/regress/expected/sw_bugfix-1.out @@ -450,7 +450,6 @@ explain (costs off) select t1.ID,t1.VCH,pid,NAME,PTEX from TEST_HCB_FQB t1,TEST_ -> Seq Scan on test_hcb_fqb t1 (19 rows) -/* swcb中参数为proceder参数 */ CREATE OR REPLACE FUNCTION test_hcb_pro1(i_id in int) return int AS o_out int; @@ -858,7 +857,6 @@ where level>=1 start with id = 141 connect by prior pid=id; -> WorkTable Scan on tmp_reuslt (37 rows) -/* core issue */ explain select sysdate from test_hcb_ptb t1 start with id = 141 connect by prior pid=id; QUERY PLAN ------------------------------------------------------------------------------------------------- @@ -882,7 +880,6 @@ select count(sysdate) from test_hcb_ptb t1 start with id = 141 connect by prior 6 (1 row) -/* core issue sys_connect_by_path(cosnt) core issue */ select t1.id,t1.pid,LEVEL,sys_connect_by_path(null, '->') pa, t1.name from test_hcb_ptb t1 start with id = 141 connect by prior id = pid; id | pid | level | pa | name -----+-----+-------+--------------------+---------- @@ -955,7 +952,6 @@ select t1.id,t1.pid,LEVEL,sys_connect_by_path(' ', '->') pa, t1.name from test_ 169 | 151 | 3 | -> -> ->  | 第九单元 (19 rows) -/* core issue, check args */ explain select t1.id,t1.pid,t1.name,level from test_hcb_ptb t1 start with id=141 connect by prior id=pid Order By NLSSORT ( id, ' NLS_SORT = SCHINESE_PINYIN_M ' ); QUERY PLAN ------------------------------------------------------------------------------------------------- @@ -999,7 +995,6 @@ select t1.id,t1.pid,t1.name,level from test_hcb_ptb t1 start with id=141 connect 169 | 151 | 第九单元 | 3 (19 rows) -/* core issue, check args */ drop table if exists region cascade; create table region ( @@ -1030,7 +1025,6 @@ GROUP BY 2; drop table item_price_history; drop table region; -/* */ create table test1(id int,pid int,name text, level int); create table test2(id int,pid int,name text, connect_by_iscycle int); create table test3(id int,pid int,name text, connect_by_isleaf int); @@ -1077,7 +1071,6 @@ drop table test1; drop table test2; drop table test3; drop table test4; -/* */ /* 查询1 */ SELECT TRAIT_VALUE_CD FROM trait_value @@ -1133,7 +1126,6 @@ CONNECT BY PRIOR LOCATION_ID = REGION_MGR_ASSOCIATE_ID; drop table region; drop table item_price_history; -/* */ create table test1(c1 int, c2 int, c3 int); insert into test1 values(1,1,1); insert into test1 values(2,2,2); @@ -1372,1139 +1364,109 @@ explain select * from t1 start with id = 1 connect by prior id != pid; -> Seq Scan on t1 (cost=0.00..1.09 rows=9 width=10) (12 rows) -create table tsc_rtbl(c_int int,c_varchar1 varchar,c_varchar2 varchar); -alter table tsc_rtbl drop column c_varchar2; -alter table tsc_rtbl add column c_varchar2 varchar; -select c_int,c_varchar1,c_varchar2 from tsc_rtbl -start with c_int<10 connect by nocycle prior c_int=c_int; - c_int | c_varchar1 | c_varchar2 --------+------------+------------ -(0 rows) - -create table t1_area (id int4,name text, fatherid int4, name_desc text); -insert into t1_area values (1, '中国', 0, 'China'); -insert into t1_area values (2, '湖南省',1 , 'Hunan'); -insert into t1_area values (3, '广东省',1 , 'Guangdong'); -insert into t1_area values (4, '海南省',1 , 'Hainan'); -insert into t1_area values (5, '河北省',1 , 'Hebei'); -insert into t1_area values (6, '河南省',1 , 'Henan'); -insert into t1_area values (7, '山东省',1 , 'Shandong'); -insert into t1_area values (8, '湖北省',1 , 'Hubei'); -insert into t1_area values (9, '江苏省',1 , 'Jiangsu'); -insert into t1_area values (10,'深圳市',3 , 'Shenzhen'); -insert into t1_area values (11,'长沙市',2 , 'Changsha'); -insert into t1_area values (22,'祁北县',13, 'Qibei'); -insert into t1_area values (12,'南山区',10, 'Nanshan'); -insert into t1_area values (21,'祁西县',13, 'Qixi'); -insert into t1_area values (13,'衡阳市',2 , 'Hengyang'); -insert into t1_area values (14,'耒阳市',13, 'Leiyang'); -insert into t1_area values (15,'龙岗区',10, 'Longgang'); -insert into t1_area values (16,'福田区',10, 'Futian'); -insert into t1_area values (17,'宝安区',10, 'Baoan'); -insert into t1_area values (19,'祁东县',13, 'Qidong'); -insert into t1_area values (18,'常宁市',13, 'Changning'); -insert into t1_area values (20,'祁南县',13, 'Qinan'); -SELECT *, connect_by_root(name_desc), sys_connect_by_path(name_desc, '->') -FROM t1_area -START WITH name = '耒阳市' -CONNECT BY id = PRIOR fatherid; - id | name | fatherid | name_desc | connect_by_root | sys_connect_by_path -----+--------+----------+-----------+-----------------+----------------------------------- - 14 | 耒阳市 | 13 | Leiyang | Leiyang | ->Leiyang - 13 | 衡阳市 | 2 | Hengyang | Leiyang | ->Leiyang->Hengyang - 2 | 湖南省 | 1 | Hunan | Leiyang | ->Leiyang->Hengyang->Hunan - 1 | 中国 | 0 | China | Leiyang | ->Leiyang->Hengyang->Hunan->China -(4 rows) - ---创建drop column并加回场景 -alter table t1_area drop column name_desc; -alter table t1_area add column name_desc text; --- 原有备drop列为空 -SELECT *, connect_by_root(name_desc), sys_connect_by_path(name_desc, '->') -FROM t1_area -START WITH name = '耒阳市' -CONNECT BY id = PRIOR fatherid; - id | name | fatherid | name_desc | connect_by_root | sys_connect_by_path -----+--------+----------+-----------+-----------------+-------------------------- - 14 | 耒阳市 | 13 | | | ->null - 13 | 衡阳市 | 2 | | | ->null->null - 2 | 湖南省 | 1 | | | ->null->null->null - 1 | 中国 | 0 | | | ->null->null->null->null -(4 rows) - --- 新插入相同数据,原有drop列后的空值和当前有效值并存 -insert into t1_area values (1, '中国', 0, 'China'); -insert into t1_area values (2, '湖南省',1 , 'Hunan'); -insert into t1_area values (3, '广东省',1 , 'Guangdong'); -insert into t1_area values (4, '海南省',1 , 'Hainan'); -insert into t1_area values (5, '河北省',1 , 'Hebei'); -insert into t1_area values (6, '河南省',1 , 'Henan'); -insert into t1_area values (7, '山东省',1 , 'Shandong'); -insert into t1_area values (8, '湖北省',1 , 'Hubei'); -insert into t1_area values (9, '江苏省',1 , 'Jiangsu'); -insert into t1_area values (10,'深圳市',3 , 'Shenzhen'); -insert into t1_area values (11,'长沙市',2 , 'Changsha'); -insert into t1_area values (22,'祁北县',13, 'Qibei'); -insert into t1_area values (12,'南山区',10, 'Nanshan'); -insert into t1_area values (21,'祁西县',13, 'Qixi'); -insert into t1_area values (13,'衡阳市',2 , 'Hengyang'); -insert into t1_area values (14,'耒阳市',13, 'Leiyang'); -insert into t1_area values (15,'龙岗区',10, 'Longgang'); -insert into t1_area values (16,'福田区',10, 'Futian'); -insert into t1_area values (17,'宝安区',10, 'Baoan'); -insert into t1_area values (19,'祁东县',13, 'Qidong'); -insert into t1_area values (18,'常宁市',13, 'Changning'); -insert into t1_area values (20,'祁南县',13, 'Qinan'); -SELECT *, connect_by_root(name_desc), sys_connect_by_path(name_desc, '->') -FROM t1_area -START WITH name = '耒阳市' -CONNECT BY id = PRIOR fatherid; - id | name | fatherid | name_desc | connect_by_root | sys_connect_by_path -----+--------+----------+-----------+-----------------+----------------------------------- - 14 | 耒阳市 | 13 | | | ->null - 14 | 耒阳市 | 13 | Leiyang | Leiyang | ->Leiyang - 13 | 衡阳市 | 2 | | | ->Leiyang->null - 13 | 衡阳市 | 2 | | | ->null->null - 13 | 衡阳市 | 2 | Hengyang | Leiyang | ->Leiyang->Hengyang - 13 | 衡阳市 | 2 | Hengyang | null | ->null->Hengyang - 2 | 湖南省 | 1 | | | ->null->Hengyang->null - 2 | 湖南省 | 1 | | | ->Leiyang->Hengyang->null - 2 | 湖南省 | 1 | | | ->null->null->null - 2 | 湖南省 | 1 | | | ->Leiyang->null->null - 2 | 湖南省 | 1 | Hunan | null | ->null->Hengyang->Hunan - 2 | 湖南省 | 1 | Hunan | Leiyang | ->Leiyang->Hengyang->Hunan - 2 | 湖南省 | 1 | Hunan | null | ->null->null->Hunan - 2 | 湖南省 | 1 | Hunan | Leiyang | ->Leiyang->null->Hunan - 1 | 中国 | 0 | | | ->Leiyang->null->Hunan->null - 1 | 中国 | 0 | | | ->null->null->Hunan->null - 1 | 中国 | 0 | | | ->Leiyang->Hengyang->Hunan->null - 1 | 中国 | 0 | | | ->null->Hengyang->Hunan->null - 1 | 中国 | 0 | | | ->Leiyang->null->null->null - 1 | 中国 | 0 | | | ->null->null->null->null - 1 | 中国 | 0 | | | ->Leiyang->Hengyang->null->null - 1 | 中国 | 0 | | | ->null->Hengyang->null->null - 1 | 中国 | 0 | China | Leiyang | ->Leiyang->null->Hunan->China - 1 | 中国 | 0 | China | null | ->null->null->Hunan->China - 1 | 中国 | 0 | China | Leiyang | ->Leiyang->Hengyang->Hunan->China - 1 | 中国 | 0 | China | null | ->null->Hengyang->Hunan->China - 1 | 中国 | 0 | China | Leiyang | ->Leiyang->null->null->China - 1 | 中国 | 0 | China | null | ->null->null->null->China - 1 | 中国 | 0 | China | Leiyang | ->Leiyang->Hengyang->null->China - 1 | 中国 | 0 | China | null | ->null->Hengyang->null->China -(30 rows) - -SELECT * FROM t1_area START WITH id in ('1','2') CONNECT BY PRIOR fatherid = id; - id | name | fatherid | name_desc -----+--------+----------+----------- - 1 | 中国 | 0 | - 2 | 湖南省 | 1 | - 1 | 中国 | 0 | China - 2 | 湖南省 | 1 | Hunan - 1 | 中国 | 0 | - 1 | 中国 | 0 | - 1 | 中国 | 0 | China - 1 | 中国 | 0 | China -(8 rows) - -SELECT * FROM t1_area START WITH (cast(id as varchar) COLLATE "C") in (cast(+ (id) as varchar) COLLATE "C") and id < 4 connect by id = prior fatherid; - id | name | fatherid | name_desc -----+--------+----------+----------- - 1 | 中国 | 0 | - 2 | 湖南省 | 1 | - 3 | 广东省 | 1 | - 1 | 中国 | 0 | China - 2 | 湖南省 | 1 | Hunan - 3 | 广东省 | 1 | Guangdong - 1 | 中国 | 0 | - 1 | 中国 | 0 | - 1 | 中国 | 0 | - 1 | 中国 | 0 | - 1 | 中国 | 0 | China - 1 | 中国 | 0 | China - 1 | 中国 | 0 | China - 1 | 中国 | 0 | China -(14 rows) - -SELECT * FROM t1_area, tsc_rtbl START WITH id = 1 CONNECT BY PRIOR fatherid = id; - id | name | fatherid | name_desc | c_int | c_varchar1 | c_varchar2 -----+------+----------+-----------+-------+------------+------------ -(0 rows) - -SELECT *, connect_by_root(name_desc), sys_connect_by_path(name_desc, '->') -FROM t1_area; -ERROR: Invalid function call. -DETAIL: START WITH CONNECT BY function found in non-hierarchical query. -CONTEXT: referenced column: connect_by_root -/* fix start with in with clause */ -explain (costs off) WITH WITH_001 AS (SELECT 1 FROM offers_20050701 ,trait_value START WITH PARTY_ID=TRAIT_VAL CONNECT BY PRIOR TRAIT_VALUE_CD LIKE '%V%') -SELECT mfg -FROM brand ,trait_value ,WITH_001 -START WITH TRAIT_VALUE_CD=brand_name -CONNECT BY PRIOR brand_cd=UOM_CD; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------- - CTE Scan on tmp_reuslt - CTE tmp_reuslt - -> StartWith Operator - Start With pseudo atts: RUITR, array_key_3 - -> Recursive Union - -> Nested Loop - -> CTE Scan on tmp_reuslt - CTE tmp_reuslt - -> StartWith Operator - Start With pseudo atts: RUITR, array_key_4 - -> Recursive Union - -> Hash Join - Hash Cond: ((swtest.offers_20050701.party_id)::text = (swtest.trait_value.trait_val)::text) - -> Seq Scan on offers_20050701 - -> Hash - -> Seq Scan on trait_value - -> Nested Loop - -> Nested Loop - -> WorkTable Scan on tmp_reuslt - Filter: (("trait_value@trait_value_cd")::text ~~ '%V%'::text) - -> Materialize - -> Seq Scan on trait_value - -> Materialize - -> Seq Scan on offers_20050701 - -> Materialize - -> Hash Join - Hash Cond: ((swtest.brand.brand_name)::text = (swtest.trait_value.trait_value_cd)::text) - -> Seq Scan on brand - -> Hash - -> Seq Scan on trait_value - -> Hash Join - Hash Cond: ((tmp_reuslt."brand@brand_cd")::text = (swtest.trait_value.uom_cd)::text) - -> Nested Loop - -> WorkTable Scan on tmp_reuslt - -> Materialize - -> Seq Scan on brand - -> Hash - -> Nested Loop - -> CTE Scan on tmp_reuslt - CTE tmp_reuslt - -> StartWith Operator - Start With pseudo atts: RUITR, array_key_4 - -> Recursive Union - -> Hash Join - Hash Cond: ((swtest.offers_20050701.party_id)::text = (swtest.trait_value.trait_val)::text) - -> Seq Scan on offers_20050701 - -> Hash - -> Seq Scan on trait_value - -> Nested Loop - -> Nested Loop - -> WorkTable Scan on tmp_reuslt - Filter: (("trait_value@trait_value_cd")::text ~~ '%V%'::text) - -> Materialize - -> Seq Scan on trait_value - -> Materialize - -> Seq Scan on offers_20050701 - -> Materialize - -> Seq Scan on trait_value -(58 rows) - -WITH WITH_001 AS (SELECT 1 FROM offers_20050701 ,trait_value START WITH PARTY_ID=TRAIT_VAL CONNECT BY PRIOR TRAIT_VALUE_CD LIKE '%V%') -SELECT mfg -FROM brand ,trait_value ,WITH_001 -START WITH TRAIT_VALUE_CD=brand_name -CONNECT BY PRIOR brand_cd=UOM_CD; - mfg ------ -(0 rows) - -/* fix reference to level in connect by function calls */ -SELECT 1, level FROM t1_area CONNECT BY length(level) IS NULL; - ?column? | level -----------+------- -(0 rows) - -/* prior params of procedure */ -create or replace function test_tmp1(out id int,out pid int,out name varchar,out level int) return SETOF RECORD -IS -declare -CURSOR C1(sedid int) IS select t1.id,t1.pid,t1.name,level from test_hcb_ptb t1 start with id = sedid connect by prior pid=id; -begin -open C1(141); -loop -fetch C1 into id,pid,name,level; -EXIT WHEN C1%NOTFOUND; -return next; -end loop; -close C1; -end; -/ -select * from test_tmp1(); - id | pid | name | level ------+-----+----------+------- - 141 | 131 | 江南摩卡 | 1 - 131 | 121 | 东山街 | 2 - 121 | 111 | 江宁区 | 3 - 111 | 11 | 南京市 | 4 - 11 | 1 | 江苏省 | 5 - 1 | 0 | 中国 | 6 -(6 rows) - -drop procedure test_tmp1; -drop table t1_area; -drop table tsc_rtbl; --- 原问题单场景,connect_by_root(1)出现在在表达式中报错 -explain -select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id) -from test_hcb_ptb t1 -where connect_by_root(1) > 0 -start with id = 141 -connect by prior pid=id; - QUERY PLAN -------------------------------------------------------------------------------------------------- - Result (cost=37.12..39.89 rows=101 width=190) - One-Time Filter: ((connect_by_root('1'::text))::bigint > 0) - CTE tmp_reuslt - -> StartWith Operator (cost=0.00..37.11 rows=101 width=102) - Start With pseudo atts: RUITR, array_key_9, array_col_1 - -> Recursive Union (cost=0.00..37.11 rows=101 width=102) - -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.80 rows=1 width=102) - Filter: (id = 141) - -> Hash Join (cost=0.33..3.23 rows=10 width=102) - Hash Cond: (t1.id = tmp_reuslt."t1@pid") - -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.64 rows=64 width=102) - -> Hash (cost=0.20..0.20 rows=10 width=4) - -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) - -> CTE Scan on tmp_reuslt (cost=0.01..2.03 rows=101 width=190) -(14 rows) - -select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id) -from test_hcb_ptb t1 -where connect_by_root(1) > 0 -start with id = 141 -connect by prior pid=id; - id | pid | name | le | connect_by_root | connect_by_root ------+-----+----------+----+-----------------+----------------- - 141 | 131 | 江南摩卡 | 1 | 1 | 141 - 131 | 121 | 东山街 | 2 | 1 | 141 - 121 | 111 | 江宁区 | 3 | 1 | 141 - 111 | 11 | 南京市 | 4 | 1 | 141 - 11 | 1 | 江苏省 | 5 | 1 | 141 - 1 | 0 | 中国 | 6 | 1 | 141 -(6 rows) - --- 扩展场景, connect_by_root(id)报错找不到列 -explain -select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id) -from test_hcb_ptb t1 -where connect_by_root(id) > 0 -start with id = 141 -connect by prior pid=id; - QUERY PLAN -------------------------------------------------------------------------------------------------- - CTE Scan on tmp_reuslt (cost=37.11..40.39 rows=34 width=190) - Filter: ((connect_by_root(("t1@id")::text))::bigint > 0) - CTE tmp_reuslt - -> StartWith Operator (cost=0.00..37.11 rows=101 width=102) - Start With pseudo atts: RUITR, array_key_9, array_col_1 - -> Recursive Union (cost=0.00..37.11 rows=101 width=102) - -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.80 rows=1 width=102) - Filter: (id = 141) - -> Hash Join (cost=0.33..3.23 rows=10 width=102) - Hash Cond: (t1.id = tmp_reuslt."t1@pid") - -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.64 rows=64 width=102) - -> Hash (cost=0.20..0.20 rows=10 width=4) - -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) -(13 rows) - -select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id) -from test_hcb_ptb t1 -where connect_by_root(id) > 0 -start with id = 141 -connect by prior pid=id; - id | pid | name | le | connect_by_root | connect_by_root ------+-----+----------+----+-----------------+----------------- - 141 | 131 | 江南摩卡 | 1 | 1 | 141 - 131 | 121 | 东山街 | 2 | 1 | 141 - 121 | 111 | 江宁区 | 3 | 1 | 141 - 111 | 11 | 南京市 | 4 | 1 | 141 - 11 | 1 | 江苏省 | 5 | 1 | 141 - 1 | 0 | 中国 | 6 | 1 | 141 -(6 rows) - --- 扩展场景,sys_connect_by_path(123, '-') is not null -explain -select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id), sys_connect_by_path(123, '-') -from test_hcb_ptb t1 -where sys_connect_by_path(123, '-') is not null -start with id = 141 -connect by prior pid=id; - QUERY PLAN -------------------------------------------------------------------------------------------------- - Result (cost=37.11..40.14 rows=101 width=190) - One-Time Filter: (sys_connect_by_path('123'::text, '-'::text) IS NOT NULL) - CTE tmp_reuslt - -> StartWith Operator (cost=0.00..37.11 rows=101 width=102) - Start With pseudo atts: RUITR, array_key_9, array_col_1 - -> Recursive Union (cost=0.00..37.11 rows=101 width=102) - -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.80 rows=1 width=102) - Filter: (id = 141) - -> Hash Join (cost=0.33..3.23 rows=10 width=102) - Hash Cond: (t1.id = tmp_reuslt."t1@pid") - -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.64 rows=64 width=102) - -> Hash (cost=0.20..0.20 rows=10 width=4) - -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) - -> CTE Scan on tmp_reuslt (cost=0.00..2.02 rows=101 width=190) -(14 rows) - -create table ctI as select t1.id,t1.pid,t1.name,level as le from test_hcb_ptb t1 start with id=141 connect by prior id=pid; -create table ctII as select t1.id,t1.pid,t1.name,level from test_hcb_ptb t1 start with id=141 connect by prior id=pid; -\d ctI; - Table "swtest.cti" - Column | Type | Modifiers ---------+-----------------------+----------- - id | integer | - pid | integer | - name | character varying(80) | - le | integer | - -\d ctII; - Table "swtest.ctii" - Column | Type | Modifiers ---------+-----------------------+----------- - id | integer | - pid | integer | - name | character varying(80) | - level | integer | - -drop table ctI; -drop table ctII; -/* - * NOTE: need do upgrade change to have syc_conenct_by_path()/connect_by_root() to be volatile - */ -/* -select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id), sys_connect_by_path(123, '-') -from test_hcb_ptb t1 -where sys_connect_by_path(123, '-') is not null -start with id = 141 -connect by prior pid=id; -*/ --- 扩展场景,sys_connect_by_path(123, '-') 验证能够被正确匹配 -explain -select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id), sys_connect_by_path(123, '-') -from test_hcb_ptb t1 -where sys_connect_by_path(123, '-') like '-123-123-123%' -start with id = 141 -connect by prior pid=id; - QUERY PLAN -------------------------------------------------------------------------------------------------- - Result (cost=37.11..40.14 rows=101 width=190) - One-Time Filter: (sys_connect_by_path('123'::text, '-'::text) ~~ '-123-123-123%'::text) - CTE tmp_reuslt - -> StartWith Operator (cost=0.00..37.11 rows=101 width=102) - Start With pseudo atts: RUITR, array_key_9, array_col_1 - -> Recursive Union (cost=0.00..37.11 rows=101 width=102) - -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.80 rows=1 width=102) - Filter: (id = 141) - -> Hash Join (cost=0.33..3.23 rows=10 width=102) - Hash Cond: (t1.id = tmp_reuslt."t1@pid") - -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.64 rows=64 width=102) - -> Hash (cost=0.20..0.20 rows=10 width=4) - -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) - -> CTE Scan on tmp_reuslt (cost=0.01..2.02 rows=101 width=190) -(14 rows) - -/* - * NOTE: need do upgrade change to have syc_conenct_by_path()/connect_by_root() to be volatile - */ -/* -select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id), sys_connect_by_path(123, '-') -from test_hcb_ptb t1 -where sys_connect_by_path(123, '-') like '-123-123-123%' -start with id = 141 -connect by prior pid=id; -*/ -/* testing distinct qualifier */ -select distinct id,pid,name,LEVEL from t1 start with id = 1 connect by prior pid=id order by 1; - id | pid | name | level -----+-----+------+------- - 1 | 0 | 1 | 1 -(1 row) - -/* testing NOT expression */ -select t1.id, t1.pid, t1.name from t1 start with not id=1 connect by prior pid=id; - id | pid | name -----+-----+------ - 2 | 1 | 2 - 3 | 0 | 3 - 4 | 1 | 4 - 5 | 2 | 5 - 6 | 3 | 6 - 7 | 4 | 7 - 8 | 4 | 8 - 9 | 7 | 9 - 1 | 0 | 1 - 1 | 0 | 1 - 2 | 1 | 2 - 3 | 0 | 3 - 4 | 1 | 4 - 4 | 1 | 4 - 7 | 4 | 7 - 1 | 0 | 1 - 1 | 0 | 1 - 1 | 0 | 1 - 4 | 1 | 4 - 1 | 0 | 1 -(20 rows) - -/* testing func expr in connect by clause */ -explain select trim(t1.name) from test_hcb_ptb t1 connect by trim(t1.name) is not null; +-- test keywords +CREATE TABLE start(connect int, prior int); +CREATE TABLE connect(start int, prior int); +CREATE TABLE prior(start int, connect int); +CREATE TABLE siblings(start int, connect int, prior int); +INSERT INTO start VALUES(1,2); +INSERT INTO start VALUES(1,3); +INSERT INTO start VALUES(3,4); +INSERT INTO start VALUES(3,5); +INSERT INTO start VALUES(5,6); +INSERT INTO start VALUES(6,7); +INSERT INTO connect VALUES(1,2); +INSERT INTO connect VALUES(1,3); +INSERT INTO connect VALUES(3,4); +INSERT INTO connect VALUES(3,5); +INSERT INTO connect VALUES(5,6); +INSERT INTO connect VALUES(6,7); +EXPLAIN SELECT * FROM START START /* GAUSSDB */ WITH connect = 1 CONNECT +/*GAUSS*/BY PRIOR prior = prior; QUERY PLAN -------------------------------------------------------------------------------------------------- - CTE Scan on tmp_reuslt (cost=13471.92..22689.36 rows=409664 width=178) + CTE Scan on tmp_reuslt (cost=810.83..1047.45 rows=11831 width=8) CTE tmp_reuslt - -> StartWith Operator (cost=0.00..13471.92 rows=409664 width=102) - Start With pseudo atts: RUITR - -> Recursive Union (cost=0.00..13471.92 rows=409664 width=102) - -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.64 rows=64 width=102) - -> Nested Loop (cost=0.00..527.60 rows=40960 width=102) - -> WorkTable Scan on tmp_reuslt (cost=0.00..12.80 rows=640 width=0) - -> Materialize (cost=0.00..2.96 rows=64 width=102) - -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.64 rows=64 width=102) -(10 rows) + -> StartWith Operator (cost=0.00..810.83 rows=11831 width=8) + Start With pseudo atts: RUITR, array_key_2 + -> Recursive Union (cost=0.00..810.83 rows=11831 width=8) + -> Seq Scan on start (cost=0.00..36.86 rows=11 width=8) + Filter: (connect = 1) + -> Hash Join (cost=3.58..53.74 rows=1182 width=8) + Hash Cond: (swtest.start.prior = tmp_reuslt."start@prior") + -> Seq Scan on start (cost=0.00..31.49 rows=2149 width=8) + -> Hash (cost=2.20..2.20 rows=110 width=4) + -> WorkTable Scan on tmp_reuslt (cost=0.00..2.20 rows=110 width=4) +(12 rows) -/* fix create table as with start with */ -create table ct as select t1.id,t1.pid,t1.name,level from test_hcb_ptb t1 start with id=141 connect by prior id=pid; -drop table ct; -set current_schema = public; -create table t1(c1 int,c2 int,c3 int); -insert into t1 values(1,1,1); -insert into t1 values(2,2,2); -select *, connect_by_iscycle from t1 start with c1=1 connect by nocycle prior c1=c2 order siblings by 1,2; - c1 | c2 | c3 | connect_by_iscycle -----+----+----+-------------------- - 1 | 1 | 1 | 0 - 1 | 1 | 1 | 1 -(2 rows) +EXPLAIN SELECT prior AS start, connect AS prior, prior FROM START START +START WITH connect = 1 CONNECT BY PRIOR /* test prior */ prior = prior; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + CTE Scan on tmp_reuslt (cost=810.83..1047.45 rows=11831 width=8) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..810.83 rows=11831 width=8) + Start With pseudo atts: RUITR, array_key_2 + -> Recursive Union (cost=0.00..810.83 rows=11831 width=8) + -> Seq Scan on start (cost=0.00..36.86 rows=11 width=8) + Filter: (connect = 1) + -> Hash Join (cost=3.58..53.74 rows=1182 width=8) + Hash Cond: (start.prior = tmp_reuslt."start@prior") + -> Seq Scan on start (cost=0.00..31.49 rows=2149 width=8) + -> Hash (cost=2.20..2.20 rows=110 width=4) + -> WorkTable Scan on tmp_reuslt (cost=0.00..2.20 rows=110 width=4) +(12 rows) -insert into t1 values(1,1,1); -insert into t1 values(2,2,2); -select *, connect_by_iscycle from t1 start with c1=1 connect by nocycle prior c1=c2 order siblings by 1,2; - c1 | c2 | c3 | connect_by_iscycle -----+----+----+-------------------- - 1 | 1 | 1 | 0 - 1 | 1 | 1 | 0 - 1 | 1 | 1 | 1 -(3 rows) - -insert into t1 values(1,NULL,1); -select *, connect_by_iscycle from t1 start with c1=1 connect by nocycle prior c1=c2 order siblings by 1,2 nulls first; - c1 | c2 | c3 | connect_by_iscycle -----+----+----+-------------------- - 1 | | 1 | 0 - 1 | 1 | 1 | 0 - 1 | 1 | 1 | 0 - 1 | 1 | 1 | 1 -(4 rows) - -select *, connect_by_iscycle from t1 start with c1=1 connect by nocycle prior c1=c2 order siblings by 1,2 nulls last; - c1 | c2 | c3 | connect_by_iscycle -----+----+----+-------------------- - 1 | 1 | 1 | 0 - 1 | 1 | 1 | 0 - 1 | | 1 | 0 - 1 | 1 | 1 | 1 -(4 rows) - -delete from t1 where c2 is null; -select *, connect_by_iscycle from t1 start with c1<3 connect by nocycle prior c1 StartWith Operator (cost=0.00..12299132.73 rows=378304445 width=12) + -> StartWith Operator (cost=0.00..15013894.35 rows=461822249 width=8) Start With pseudo atts: RUITR - -> Recursive Union (cost=0.00..12299132.73 rows=378304445 width=12) - -> Seq Scan on t1 (cost=0.00..29.45 rows=1945 width=12) - -> Nested Loop (cost=0.00..473301.44 rows=37830250 width=12) - -> WorkTable Scan on tmp_reuslt (cost=0.00..389.00 rows=19450 width=0) - -> Materialize (cost=0.00..39.17 rows=1945 width=12) - -> Seq Scan on t1 (cost=0.00..29.45 rows=1945 width=12) + -> Recursive Union (cost=0.00..15013894.35 rows=461822249 width=8) + -> Seq Scan on connect (cost=0.00..31.49 rows=2149 width=8) + -> Nested Loop (cost=0.00..577741.79 rows=46182010 width=8) + -> WorkTable Scan on tmp_reuslt (cost=0.00..429.80 rows=21490 width=0) + -> Materialize (cost=0.00..42.23 rows=2149 width=8) + -> Seq Scan on connect (cost=0.00..31.49 rows=2149 width=8) (10 rows) -select * from t1 connect by level is not null and level < 3; - c1 | c2 | c3 -----+----+---- - 1 | 1 | 1 - 2 | 2 | 2 - 1 | 1 | 1 - 2 | 2 | 2 - 1 | 1 | 1 - 2 | 2 | 2 - 1 | 1 | 1 - 2 | 2 | 2 - 1 | 1 | 1 - 2 | 2 | 2 - 1 | 1 | 1 - 2 | 2 | 2 - 1 | 1 | 1 - 2 | 2 | 2 - 1 | 1 | 1 - 2 | 2 | 2 - 1 | 1 | 1 - 2 | 2 | 2 - 1 | 1 | 1 - 2 | 2 | 2 -(20 rows) - -select * from t1 connect by level; -ERROR: Unsupported expression found in CONNECT BY clause. -DETAIL: Pseudo column expects an operator -select t1.id a.d jack from t1; -ERROR: Invalid use of identifiers. -LINE 1: select t1.id a.d jack from t1; - ^ -DETAIL: Syntax error found near token "a" -select t1.id bauer jack from t1; -ERROR: Invalid use of identifiers. -LINE 1: select t1.id bauer jack from t1; - ^ -DETAIL: Syntax error found near token "bauer" -drop table t1; -/* limit + startwith 场景下执行阶段targetlist报错 */ -CREATE TABLE log_part ( - ts timestamp(6) without time zone DEFAULT now() NOT NULL, - op character(1), - act_no numeric(38,0), - old_blc numeric(38,0), - num numeric(38,0), - threadid bigint, - index integer, - tran integer -) -WITH (orientation=row, compression=no) -PARTITION BY RANGE (ts) -INTERVAL('1 day') -( - PARTITION p_2020_05_21 VALUES LESS THAN ('2020-05-21') TABLESPACE pg_default -) -ENABLE ROW MOVEMENT; -insert into log_part values('2021-09-24 10:12:19.451125','m',255, 10000000, -374929792, 39, 0, 0); -insert into log_part values('2021-09-24 10:12:19.451125','a',548, 10000000, 374929792, 39, 0, 0); -insert into log_part values('2021-09-24 10:12:19.449826','m', 39, 10000000, -473910067, 97, 0, 0); -insert into log_part values('2021-09-24 10:12:19.451221','m',250, 10000000, -757146539, 63, 0, 0); -insert into log_part values('2021-09-24 10:12:19.449643','m',916, 10000000, -418707874, 100, 0, 0); -insert into log_part values('2021-09-24 10:12:19.451052','m',510, 10000000, -868384331, 45, 0, 0); -insert into log_part values('2021-09-24 10:12:19.451039','m',541, 10000000, -782801693, 101, 0, 0); -insert into log_part values('2021-09-24 10:12:19.450232','m', 4, 10000000, -794225803, 33, 0, 0); -insert into log_part values('2021-09-24 10:12:19.450352','m',123, 10000000, -494836087, 58, 0, 0); -insert into log_part values('2021-09-24 10:12:19.449622','m',876, 10000000, -79442930, 60, 0, 0); -insert into log_part values('2021-09-24 10:12:19.449785','m', 21, 10000000, -560326111, 65, 0, 0); -insert into log_part values('2021-09-24 10:12:19.449828','m',484, 10000000, -571750221, 29, 0, 0); -insert into log_part values('2021-09-24 10:12:19.449657','m',167, 10000000, -146895512, 106, 0, 0); -insert into log_part values('2021-09-24 10:12:19.449826','a', 35, 10000000, 473910067, 97, 0, 0); -insert into log_part values('2021-09-24 10:12:19.451221','a',540, 10000000, 757146539, 63, 0, 0); -insert into log_part values('2021-09-24 10:12:19.449706','m',118, 10000000, -318894193, 50, 0, 0); -insert into log_part values('2021-09-24 10:12:19.501816','m',105, 10000000, -997671676, 39, 0, 0); -insert into log_part values('2021-09-24 10:12:19.449602','m',858, 10000000, -207656402, 28, 0, 0); -insert into log_part values('2021-09-24 10:12:19.450566','m',607, 10000000, -479468765, 30, 0, 0); -insert into log_part values('2021-09-24 10:12:19.451052','a',132, 10000000, 868384331, 45, 0, 0); -insert into log_part values('2021-09-24 10:12:19.451039','a',891, 10000000, 782801693, 101, 0, 0); -explain -select * from (select * from log_part where act_no=250) -start with old_blc=10000000 connect by prior old_blc + prior num = old_blc and act_no=prior act_no limit 10; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------- - Limit (cost=188.10..188.30 rows=10 width=122) - CTE tmp_reuslt - -> StartWith Operator (cost=0.00..188.10 rows=11 width=122) - Start With pseudo atts: RUITR, array_key_3, array_key_4, array_key_5 - -> Recursive Union (cost=0.00..188.10 rows=11 width=122) - -> Partition Iterator (cost=0.00..18.05 rows=1 width=122) - Iterations: 2 - -> Partitioned Seq Scan on log_part (cost=0.00..18.05 rows=1 width=122) - Filter: ((act_no = 250::numeric) AND (old_blc = 10000000::numeric)) - Selected Partitions: 1..2 - -> Nested Loop (cost=0.00..16.98 rows=1 width=122) - Join Filter: ((tmp_reuslt."sw_subquery_0@old_blc" + tmp_reuslt."sw_subquery_0@num") = public.log_part.old_blc) - -> WorkTable Scan on tmp_reuslt (cost=0.00..0.22 rows=1 width=90) - Filter: ("sw_subquery_0@act_no" = 250::numeric) - -> Partition Iterator (cost=0.00..16.71 rows=3 width=122) - Iterations: 2 - -> Partitioned Seq Scan on log_part (cost=0.00..16.71 rows=3 width=122) - Filter: (act_no = 250::numeric) - Selected Partitions: 1..2 - -> Result (cost=0.00..0.22 rows=11 width=122) - -> CTE Scan on tmp_reuslt (cost=0.00..0.22 rows=11 width=122) -(21 rows) - -select * from (select * from log_part where act_no=250) -start with old_blc=10000000 connect by prior old_blc + prior num = old_blc and act_no=prior act_no limit 10; - ts | op | act_no | old_blc | num | threadid | index | tran ----------------------------------+----+--------+----------+------------+----------+-------+------ - Fri Sep 24 10:12:19.451221 2021 | m | 250 | 10000000 | -757146539 | 63 | 0 | 0 -(1 row) - -explain -select *, connect_by_root old_blc from (select * from log_part where act_no=250) -start with old_blc=10000000 connect by prior old_blc + prior num = old_blc and act_no=prior act_no limit 10; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------- - Limit (cost=188.10..188.35 rows=10 width=122) - CTE tmp_reuslt - -> StartWith Operator (cost=0.00..188.10 rows=11 width=122) - Start With pseudo atts: RUITR, array_key_3, array_key_4, array_key_5, array_col_4 - -> Recursive Union (cost=0.00..188.10 rows=11 width=122) - -> Partition Iterator (cost=0.00..18.05 rows=1 width=122) - Iterations: 2 - -> Partitioned Seq Scan on log_part (cost=0.00..18.05 rows=1 width=122) - Filter: ((act_no = 250::numeric) AND (old_blc = 10000000::numeric)) - Selected Partitions: 1..2 - -> Nested Loop (cost=0.00..16.98 rows=1 width=122) - Join Filter: ((tmp_reuslt."sw_subquery_0@old_blc" + tmp_reuslt."sw_subquery_0@num") = public.log_part.old_blc) - -> WorkTable Scan on tmp_reuslt (cost=0.00..0.22 rows=1 width=90) - Filter: ("sw_subquery_0@act_no" = 250::numeric) - -> Partition Iterator (cost=0.00..16.71 rows=3 width=122) - Iterations: 2 - -> Partitioned Seq Scan on log_part (cost=0.00..16.71 rows=3 width=122) - Filter: (act_no = 250::numeric) - Selected Partitions: 1..2 - -> Result (cost=0.00..0.28 rows=11 width=122) - -> CTE Scan on tmp_reuslt (cost=0.00..0.28 rows=11 width=122) -(21 rows) - -select *, connect_by_root old_blc from (select * from log_part where act_no=250) -start with old_blc=10000000 connect by prior old_blc + prior num = old_blc and act_no=prior act_no limit 10; - ts | op | act_no | old_blc | num | threadid | index | tran | connect_by_rootold_blc ----------------------------------+----+--------+----------+------------+----------+-------+------+------------------------ - Fri Sep 24 10:12:19.451221 2021 | m | 250 | 10000000 | -757146539 | 63 | 0 | 0 | 10000000 -(1 row) - -select *, connect_by_root old_blc alias_old_blc from (select * from log_part where act_no=250) -start with old_blc=10000000 connect by prior old_blc + prior num = old_blc and act_no=prior act_no limit 10; - ts | op | act_no | old_blc | num | threadid | index | tran | alias_old_blc ----------------------------------+----+--------+----------+------------+----------+-------+------+--------------- - Fri Sep 24 10:12:19.451221 2021 | m | 250 | 10000000 | -757146539 | 63 | 0 | 0 | 10000000 -(1 row) - -SELECT *, CONNECT_BY_ROOT old_blc AS alias_old_blc FROM (SELECT * FROM log_part WHERE act_no=250) -START WITH old_blc=10000000 CONNECT BY PRIOR old_blc + PRIOR num = old_blc AND act_no = PRIOR act_no LIMIT 10; - ts | op | act_no | old_blc | num | threadid | index | tran | alias_old_blc ----------------------------------+----+--------+----------+------------+----------+-------+------+--------------- - Fri Sep 24 10:12:19.451221 2021 | m | 250 | 10000000 | -757146539 | 63 | 0 | 0 | 10000000 -(1 row) - -explain -select op , act_no , old_blc , num , threadid , index , tran ,level from log_part -start with old_blc=10000000 connect by prior old_blc + prior num = old_blc and act_no=prior act_no -order by 1,2,3,4 limit 10; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Limit (cost=228.76..228.79 rows=10 width=118) - CTE tmp_reuslt - -> StartWith Operator (cost=0.00..228.26 rows=13 width=122) - Start With pseudo atts: RUITR, array_key_3, array_key_4, array_key_5 - -> Recursive Union (cost=0.00..228.26 rows=13 width=122) - -> Partition Iterator (cost=0.00..16.71 rows=3 width=122) - Iterations: 2 - -> Partitioned Seq Scan on log_part (cost=0.00..16.71 rows=3 width=122) - Filter: (old_blc = 10000000::numeric) - Selected Partitions: 1..2 - -> Hash Join (cost=1.05..21.13 rows=1 width=122) - Hash Cond: ((public.log_part.old_blc = (tmp_reuslt."log_part@old_blc" + tmp_reuslt."log_part@num")) AND (public.log_part.act_no = tmp_reuslt."log_part@act_no")) - -> Partition Iterator (cost=0.00..15.37 rows=537 width=122) - Iterations: 2 - -> Partitioned Seq Scan on log_part (cost=0.00..15.37 rows=537 width=122) - Selected Partitions: 1..2 - -> Hash (cost=0.60..0.60 rows=30 width=90) - -> WorkTable Scan on tmp_reuslt (cost=0.00..0.60 rows=30 width=90) - -> Sort (cost=0.50..0.53 rows=13 width=118) - Sort Key: tmp_reuslt."log_part@op", tmp_reuslt."log_part@act_no", tmp_reuslt."log_part@old_blc", tmp_reuslt."log_part@num" - -> CTE Scan on tmp_reuslt (cost=0.00..0.26 rows=13 width=118) -(21 rows) - -select op , act_no , old_blc , num , threadid , index , tran ,level from log_part -start with old_blc=10000000 connect by prior old_blc + prior num = old_blc and act_no=prior act_no -order by 1,2,3,4 limit 10; - op | act_no | old_blc | num | threadid | index | tran | level -----+--------+----------+------------+----------+-------+------+------- - a | 35 | 10000000 | 473910067 | 97 | 0 | 0 | 1 - a | 132 | 10000000 | 868384331 | 45 | 0 | 0 | 1 - a | 540 | 10000000 | 757146539 | 63 | 0 | 0 | 1 - a | 548 | 10000000 | 374929792 | 39 | 0 | 0 | 1 - a | 891 | 10000000 | 782801693 | 101 | 0 | 0 | 1 - m | 4 | 10000000 | -794225803 | 33 | 0 | 0 | 1 - m | 21 | 10000000 | -560326111 | 65 | 0 | 0 | 1 - m | 39 | 10000000 | -473910067 | 97 | 0 | 0 | 1 - m | 105 | 10000000 | -997671676 | 39 | 0 | 0 | 1 - m | 118 | 10000000 | -318894193 | 50 | 0 | 0 | 1 -(10 rows) - -drop table log_part; -set current_schema=swtest; -EXPLAIN SELECT * FROM test_area START WITH name = '中国' CONNECT BY PRIOR id = fatherid limit 10; - QUERY PLAN -------------------------------------------------------------------------------------------------- - Limit (cost=23.91..24.11 rows=10 width=72) - CTE tmp_reuslt - -> StartWith Operator (cost=0.00..23.91 rows=221 width=24) - Start With pseudo atts: RUITR, array_key_1 - -> Recursive Union (cost=0.00..23.91 rows=221 width=24) - -> Seq Scan on test_area (cost=0.00..1.27 rows=1 width=24) - Filter: (name = '中国'::text) - -> Hash Join (cost=0.33..1.82 rows=22 width=24) - Hash Cond: (swtest.test_area.fatherid = tmp_reuslt."test_area@id") - -> Seq Scan on test_area (cost=0.00..1.22 rows=22 width=24) - -> Hash (cost=0.20..0.20 rows=10 width=4) - -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) - -> Result (cost=0.00..4.42 rows=221 width=72) - -> CTE Scan on tmp_reuslt (cost=0.00..4.42 rows=221 width=72) -(14 rows) - -SELECT * FROM test_area START WITH name = '中国' CONNECT BY PRIOR id = fatherid limit 10; - id | name | fatherid | name_desc -----+--------+----------+----------- - 1 | 中国 | 0 | China - 2 | 湖南省 | 1 | Hunan - 3 | 广东省 | 1 | Guangdong - 4 | 海南省 | 1 | Hainan - 5 | 河北省 | 1 | Hebei - 6 | 河南省 | 1 | Henan - 7 | 山东省 | 1 | Shandong - 8 | 湖北省 | 1 | Hubei - 9 | 江苏省 | 1 | Jiangsu - 10 | 深圳市 | 3 | Shenzhen -(10 rows) - -set max_recursive_times=100000000; -create table tt22(x int); -create or replace view dual as select 'x' x; -insert into tt22 select level from dual connect by level <=1000000; -select count(*) from tt22; - count ---------- - 1000000 -(1 row) - -set max_recursive_times=200; -insert into tt22 select level from dual connect by level <=1000000; -ERROR: Current Start With...Connect by has exceeded max iteration times 200 -HINT: Please check your connect by clause carefully -drop table tt22; -/* 修复RecursiveUnion的inner分支备planning成BaseResult节点 */ -explain select t1.id,t1.pid,t1.name from test_hcb_ptb t1 start with id=141 connect by (prior pid)=id and prior pid>10 and 1=0; - QUERY PLAN -------------------------------------------------------------------------------------- - CTE Scan on tmp_reuslt (cost=3.12..3.34 rows=11 width=186) - CTE tmp_reuslt - -> StartWith Operator (cost=0.00..3.12 rows=11 width=102) - Start With pseudo atts: RUITR, array_key_9 - -> Recursive Union (cost=0.00..3.12 rows=11 width=102) - -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.80 rows=1 width=102) - Filter: (id = 141) - -> Result (cost=0.00..0.01 rows=1 width=0) - One-Time Filter: false -(9 rows) - -select t1.id,t1.pid,t1.name from test_hcb_ptb t1 start with id=141 connect by (prior pid)=id and prior pid>10 and 1=0; - id | pid | name ------+-----+---------- - 141 | 131 | 江南摩卡 -(1 row) - -explain select t1.id,t1.pid,t1.name from test_hcb_ptb t1 start with id=141 connect by (prior pid)=id and prior pid>10 and null; - QUERY PLAN -------------------------------------------------------------------------------------- - CTE Scan on tmp_reuslt (cost=3.12..3.34 rows=11 width=186) - CTE tmp_reuslt - -> StartWith Operator (cost=0.00..3.12 rows=11 width=102) - Start With pseudo atts: RUITR, array_key_9 - -> Recursive Union (cost=0.00..3.12 rows=11 width=102) - -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.80 rows=1 width=102) - Filter: (id = 141) - -> Result (cost=0.00..0.01 rows=1 width=0) - One-Time Filter: false -(9 rows) - -select t1.id,t1.pid,t1.name from test_hcb_ptb t1 start with id=141 connect by (prior pid)=id and prior pid>10 and null; - id | pid | name ------+-----+---------- - 141 | 131 | 江南摩卡 -(1 row) - -/* connect by level/rownum 不支持not并且in 数据不准确 */ -create table core_060(id varchar); -insert into core_060 values ('a'),('b'),('c'); -SELECT id,level FROM core_060 CONNECT BY level in (1,2); - id | level -----+------- - a | 1 - b | 1 - c | 1 - a | 2 - b | 2 - c | 2 - a | 2 - b | 2 - c | 2 - a | 2 - b | 2 - c | 2 -(12 rows) - -SELECT id,level FROM core_060 CONNECT BY not (level>2); - id | level -----+------- - a | 1 - b | 1 - c | 1 - a | 2 - b | 2 - c | 2 - a | 2 - b | 2 - c | 2 - a | 2 - b | 2 - c | 2 -(12 rows) - -SELECT id,level FROM core_060 CONNECT BY cast(level as number(38,0))<3; - id | level -----+------- - a | 1 - b | 1 - c | 1 - a | 2 - b | 2 - c | 2 - a | 2 - b | 2 - c | 2 - a | 2 - b | 2 - c | 2 -(12 rows) - -drop table core_060; -/* 存在子查询时,随着数据递归层数的增加,性能下降明显 */ -create table t_customer(id int, pid int,num int,depth int); --- verify nestloop can be material-optimized -set enable_hashjoin = off; -set enable_mergejoin = off; -explain -select * from ( select * from t_customer where id<1200040 and id>=1200000) start with id=1200010 connect by prior id=pid; - QUERY PLAN --------------------------------------------------------------------------------------------- - CTE Scan on tmp_reuslt (cost=423.43..423.65 rows=11 width=16) - CTE tmp_reuslt - -> StartWith Operator (cost=0.00..423.43 rows=11 width=16) - Start With pseudo atts: RUITR, array_key_1 - -> Recursive Union (cost=0.00..423.43 rows=11 width=16) - -> Seq Scan on t_customer (cost=0.00..41.08 rows=1 width=16) - Filter: ((id < 1200040) AND (id >= 1200000) AND (id = 1200010)) - -> Nested Loop (cost=0.00..38.21 rows=1 width=16) - Join Filter: (tmp_reuslt."sw_subquery_0@id" = swtest.t_customer.pid) - -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) - -> Materialize (cost=0.00..36.69 rows=9 width=16) - -> Seq Scan on t_customer (cost=0.00..36.64 rows=9 width=16) - Filter: ((id < 1200040) AND (id >= 1200000)) -(13 rows) - -select * from ( select * from t_customer where id<1200040 and id>=1200000) start with id=1200010 connect by prior id=pid; - id | pid | num | depth -----+-----+-----+------- -(0 rows) - -reset enable_hashjoin; -reset enable_mergejoin; --- verify nestloop can be material-optimized -set enable_nestloop = off; -set enable_mergejoin = off; -explain -select * from ( select * from t_customer where id<1200040 and id>=1200000) start with id=1200010 connect by prior id=pid; - QUERY PLAN -------------------------------------------------------------------------------------------------- - CTE Scan on tmp_reuslt (cost=411.29..411.51 rows=11 width=16) - CTE tmp_reuslt - -> StartWith Operator (cost=0.00..411.29 rows=11 width=16) - Start With pseudo atts: RUITR, array_key_1 - -> Recursive Union (cost=0.00..411.29 rows=11 width=16) - -> Seq Scan on t_customer (cost=0.00..41.08 rows=1 width=16) - Filter: ((id < 1200040) AND (id >= 1200000) AND (id = 1200010)) - -> Hash Join (cost=0.33..37.00 rows=1 width=16) - Hash Cond: (swtest.t_customer.pid = tmp_reuslt."sw_subquery_0@id") - -> Materialize (cost=0.00..36.64 rows=9 width=16) - -> Seq Scan on t_customer (cost=0.00..36.64 rows=9 width=16) - Filter: ((id < 1200040) AND (id >= 1200000)) - -> Hash (cost=0.20..0.20 rows=10 width=4) - -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) -(14 rows) - -select * from ( select * from t_customer where id<1200040 and id>=1200000) start with id=1200010 connect by prior id=pid; - id | pid | num | depth -----+-----+-----+------- -(0 rows) - -reset enable_nestloop; -reset enable_mergejoin; --- verify mergejoin is no need to be material-optimized -set enable_hashjoin = off; -set enable_nestloop = off; -explain -select * from ( select * from t_customer where id<1200040 and id>=1200000) start with id=1200010 connect by prior id=pid; - QUERY PLAN -------------------------------------------------------------------------------------------------- - CTE Scan on tmp_reuslt (cost=413.84..414.06 rows=11 width=16) - CTE tmp_reuslt - -> StartWith Operator (cost=0.00..413.84 rows=11 width=16) - Start With pseudo atts: RUITR, array_key_1 - -> Recursive Union (cost=0.00..413.84 rows=11 width=16) - -> Seq Scan on t_customer (cost=0.00..41.08 rows=1 width=16) - Filter: ((id < 1200040) AND (id >= 1200000) AND (id = 1200010)) - -> Merge Join (cost=37.15..37.25 rows=1 width=16) - Merge Cond: (tmp_reuslt."sw_subquery_0@id" = swtest.t_customer.pid) - -> Sort (cost=0.37..0.39 rows=10 width=4) - Sort Key: tmp_reuslt."sw_subquery_0@id" - -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) - -> Sort (cost=36.78..36.81 rows=9 width=16) - Sort Key: swtest.t_customer.pid - -> Seq Scan on t_customer (cost=0.00..36.64 rows=9 width=16) - Filter: ((id < 1200040) AND (id >= 1200000)) -(16 rows) - -select * from ( select * from t_customer where id<1200040 and id>=1200000) start with id=1200010 connect by prior id=pid; - id | pid | num | depth -----+-----+-----+------- -(0 rows) - -reset enable_mergejoin; -reset enable_nestloop; -reset enable_hashjoin; -drop table t_customer; --- test correlated sublink -create table test_place as select id, name, tex from test_hcb_ptb; -select t1.id,t1.pid,t1.name from test_hcb_ptb t1 start with not exists(select * from test_place where id=t1.id and id !=141) connect by prior pid=id; - id | pid | name ------+-----+---------- - 141 | 131 | 江南摩卡 - 131 | 121 | 东山街 - 121 | 111 | 江宁区 - 111 | 11 | 南京市 - 11 | 1 | 江苏省 - 1 | 0 | 中国 +SELECT * FROM START START /*GAUSSDB*/ + WITH connect = 1 CONNECT +/*DB*/ BY PRIOR prior = connect; + connect | prior +---------+------- + 1 | 2 + 1 | 3 + 3 | 4 + 3 | 5 + 5 | 6 + 6 | 7 (6 rows) -drop table test_place; --- test where quals pushdown -drop table if exists brand_sw3 cascade; -create table brand_sw3 -( -mfg varchar(500) primary key , -brand_cd varchar(500) , -brand_name varchar(100) , -brand_party_id number(18,10) NULL,c1 serial -); -drop table if exists usview17_sw3 cascade; -create table usview17_sw3 -( -brand_party_id numeric(18,2) , -sales_tran_id numeric(12,5) , -item_qty numeric(5,0) , -mkb_cost_amt numeric(19,4) , -mkb_exp numeric -); -SELECT MAX(t2.brand_party_id)-COUNT(t2.sales_tran_id) -FROM brand_sw3 t1,usview17_sw3 t2 -WHERE t1.brand_name=PRIOR t1.brand_name -AND PRIOR t1.brand_cd IS NOT NULL -START WITH t1.mfg=t1.brand_name -CONNECT BY NOCYCLE PRIOR t1.mfg -BETWEEN t1.brand_name -AND PRIOR t1.brand_name ; - ?column? ----------- - -(1 row) +SELECT prior AS start, connect AS prior, prior FROM START START START WITH connect = 1 CONNECT BY PRIOR prior = connect; + start | prior | prior +-------+-------+------- + 2 | 1 | 2 + 3 | 1 | 3 + 4 | 3 | 4 + 5 | 3 | 5 + 6 | 5 | 6 + 7 | 6 | 7 +(6 rows) -SELECT MAX(t2.brand_party_id)-COUNT(t2.sales_tran_id) -FROM brand_sw3 t1,usview17_sw3 t2 -where t1.brand_cd IS NOT NULL CONNECT BY rownum < 3; - ?column? ----------- - -(1 row) +SELECT start AS connect, prior AS start FROM CONNECT CONNECT CONNECT BY ROWNUM <5; + connect | start +---------+------- + 1 | 2 + 1 | 3 + 3 | 4 + 3 | 5 +(4 rows) -drop table if exists brand_sw3 cascade; -drop table if exists usview17_sw3 cascade; -create table sw_test1(c0 int); -create table sw_test2(c0 text); -select * from sw_test1,sw_test2 where true connect by true; - c0 | c0 -----+---- -(0 rows) - -drop table sw_test1; -drop table sw_test2; +DROP TABLE IF EXISTS start; +DROP TABLE IF EXISTS connect; +DROP TABLE IF EXISTS siblings; +DROP TABLE IF EXISTS prior; diff --git a/src/test/regress/expected/sw_bugfix-2.out b/src/test/regress/expected/sw_bugfix-2.out new file mode 100644 index 000000000..473e29f60 --- /dev/null +++ b/src/test/regress/expected/sw_bugfix-2.out @@ -0,0 +1,1360 @@ +set client_min_messages = error; +SET CLIENT_ENCODING='UTF8'; +set current_schema=swtest; +create table tsc_rtbl(c_int int,c_varchar1 varchar,c_varchar2 varchar); +alter table tsc_rtbl drop column c_varchar2; +alter table tsc_rtbl add column c_varchar2 varchar; +select c_int,c_varchar1,c_varchar2 from tsc_rtbl +start with c_int<10 connect by nocycle prior c_int=c_int; + c_int | c_varchar1 | c_varchar2 +-------+------------+------------ +(0 rows) + +create table t1_area (id int4,name text, fatherid int4, name_desc text); +insert into t1_area values (1, '中国', 0, 'China'); +insert into t1_area values (2, '湖南省',1 , 'Hunan'); +insert into t1_area values (3, '广东省',1 , 'Guangdong'); +insert into t1_area values (4, '海南省',1 , 'Hainan'); +insert into t1_area values (5, '河北省',1 , 'Hebei'); +insert into t1_area values (6, '河南省',1 , 'Henan'); +insert into t1_area values (7, '山东省',1 , 'Shandong'); +insert into t1_area values (8, '湖北省',1 , 'Hubei'); +insert into t1_area values (9, '江苏省',1 , 'Jiangsu'); +insert into t1_area values (10,'深圳市',3 , 'Shenzhen'); +insert into t1_area values (11,'长沙市',2 , 'Changsha'); +insert into t1_area values (22,'祁北县',13, 'Qibei'); +insert into t1_area values (12,'南山区',10, 'Nanshan'); +insert into t1_area values (21,'祁西县',13, 'Qixi'); +insert into t1_area values (13,'衡阳市',2 , 'Hengyang'); +insert into t1_area values (14,'耒阳市',13, 'Leiyang'); +insert into t1_area values (15,'龙岗区',10, 'Longgang'); +insert into t1_area values (16,'福田区',10, 'Futian'); +insert into t1_area values (17,'宝安区',10, 'Baoan'); +insert into t1_area values (19,'祁东县',13, 'Qidong'); +insert into t1_area values (18,'常宁市',13, 'Changning'); +insert into t1_area values (20,'祁南县',13, 'Qinan'); +SELECT *, connect_by_root(name_desc), sys_connect_by_path(name_desc, '->') +FROM t1_area +START WITH name = '耒阳市' +CONNECT BY id = PRIOR fatherid; + id | name | fatherid | name_desc | connect_by_root | sys_connect_by_path +----+--------+----------+-----------+-----------------+----------------------------------- + 14 | 耒阳市 | 13 | Leiyang | Leiyang | ->Leiyang + 13 | 衡阳市 | 2 | Hengyang | Leiyang | ->Leiyang->Hengyang + 2 | 湖南省 | 1 | Hunan | Leiyang | ->Leiyang->Hengyang->Hunan + 1 | 中国 | 0 | China | Leiyang | ->Leiyang->Hengyang->Hunan->China +(4 rows) + +--创建drop column并加回场景 +alter table t1_area drop column name_desc; +alter table t1_area add column name_desc text; +-- 原有备drop列为空 +SELECT *, connect_by_root(name_desc), sys_connect_by_path(name_desc, '->') +FROM t1_area +START WITH name = '耒阳市' +CONNECT BY id = PRIOR fatherid; + id | name | fatherid | name_desc | connect_by_root | sys_connect_by_path +----+--------+----------+-----------+-----------------+-------------------------- + 14 | 耒阳市 | 13 | | | ->null + 13 | 衡阳市 | 2 | | | ->null->null + 2 | 湖南省 | 1 | | | ->null->null->null + 1 | 中国 | 0 | | | ->null->null->null->null +(4 rows) + +-- 新插入相同数据,原有drop列后的空值和当前有效值并存 +insert into t1_area values (1, '中国', 0, 'China'); +insert into t1_area values (2, '湖南省',1 , 'Hunan'); +insert into t1_area values (3, '广东省',1 , 'Guangdong'); +insert into t1_area values (4, '海南省',1 , 'Hainan'); +insert into t1_area values (5, '河北省',1 , 'Hebei'); +insert into t1_area values (6, '河南省',1 , 'Henan'); +insert into t1_area values (7, '山东省',1 , 'Shandong'); +insert into t1_area values (8, '湖北省',1 , 'Hubei'); +insert into t1_area values (9, '江苏省',1 , 'Jiangsu'); +insert into t1_area values (10,'深圳市',3 , 'Shenzhen'); +insert into t1_area values (11,'长沙市',2 , 'Changsha'); +insert into t1_area values (22,'祁北县',13, 'Qibei'); +insert into t1_area values (12,'南山区',10, 'Nanshan'); +insert into t1_area values (21,'祁西县',13, 'Qixi'); +insert into t1_area values (13,'衡阳市',2 , 'Hengyang'); +insert into t1_area values (14,'耒阳市',13, 'Leiyang'); +insert into t1_area values (15,'龙岗区',10, 'Longgang'); +insert into t1_area values (16,'福田区',10, 'Futian'); +insert into t1_area values (17,'宝安区',10, 'Baoan'); +insert into t1_area values (19,'祁东县',13, 'Qidong'); +insert into t1_area values (18,'常宁市',13, 'Changning'); +insert into t1_area values (20,'祁南县',13, 'Qinan'); +SELECT *, connect_by_root(name_desc), sys_connect_by_path(name_desc, '->') +FROM t1_area +START WITH name = '耒阳市' +CONNECT BY id = PRIOR fatherid; + id | name | fatherid | name_desc | connect_by_root | sys_connect_by_path +----+--------+----------+-----------+-----------------+----------------------------------- + 14 | 耒阳市 | 13 | | | ->null + 14 | 耒阳市 | 13 | Leiyang | Leiyang | ->Leiyang + 13 | 衡阳市 | 2 | | | ->Leiyang->null + 13 | 衡阳市 | 2 | | | ->null->null + 13 | 衡阳市 | 2 | Hengyang | Leiyang | ->Leiyang->Hengyang + 13 | 衡阳市 | 2 | Hengyang | null | ->null->Hengyang + 2 | 湖南省 | 1 | | | ->null->Hengyang->null + 2 | 湖南省 | 1 | | | ->Leiyang->Hengyang->null + 2 | 湖南省 | 1 | | | ->null->null->null + 2 | 湖南省 | 1 | | | ->Leiyang->null->null + 2 | 湖南省 | 1 | Hunan | null | ->null->Hengyang->Hunan + 2 | 湖南省 | 1 | Hunan | Leiyang | ->Leiyang->Hengyang->Hunan + 2 | 湖南省 | 1 | Hunan | null | ->null->null->Hunan + 2 | 湖南省 | 1 | Hunan | Leiyang | ->Leiyang->null->Hunan + 1 | 中国 | 0 | | | ->Leiyang->null->Hunan->null + 1 | 中国 | 0 | | | ->null->null->Hunan->null + 1 | 中国 | 0 | | | ->Leiyang->Hengyang->Hunan->null + 1 | 中国 | 0 | | | ->null->Hengyang->Hunan->null + 1 | 中国 | 0 | | | ->Leiyang->null->null->null + 1 | 中国 | 0 | | | ->null->null->null->null + 1 | 中国 | 0 | | | ->Leiyang->Hengyang->null->null + 1 | 中国 | 0 | | | ->null->Hengyang->null->null + 1 | 中国 | 0 | China | Leiyang | ->Leiyang->null->Hunan->China + 1 | 中国 | 0 | China | null | ->null->null->Hunan->China + 1 | 中国 | 0 | China | Leiyang | ->Leiyang->Hengyang->Hunan->China + 1 | 中国 | 0 | China | null | ->null->Hengyang->Hunan->China + 1 | 中国 | 0 | China | Leiyang | ->Leiyang->null->null->China + 1 | 中国 | 0 | China | null | ->null->null->null->China + 1 | 中国 | 0 | China | Leiyang | ->Leiyang->Hengyang->null->China + 1 | 中国 | 0 | China | null | ->null->Hengyang->null->China +(30 rows) + +SELECT * FROM t1_area START WITH id in ('1','2') CONNECT BY PRIOR fatherid = id; + id | name | fatherid | name_desc +----+--------+----------+----------- + 1 | 中国 | 0 | + 2 | 湖南省 | 1 | + 1 | 中国 | 0 | China + 2 | 湖南省 | 1 | Hunan + 1 | 中国 | 0 | + 1 | 中国 | 0 | + 1 | 中国 | 0 | China + 1 | 中国 | 0 | China +(8 rows) + +SELECT * FROM t1_area START WITH (cast(id as varchar) COLLATE "C") in (cast(+ (id) as varchar) COLLATE "C") and id < 4 connect by id = prior fatherid; + id | name | fatherid | name_desc +----+--------+----------+----------- + 1 | 中国 | 0 | + 2 | 湖南省 | 1 | + 3 | 广东省 | 1 | + 1 | 中国 | 0 | China + 2 | 湖南省 | 1 | Hunan + 3 | 广东省 | 1 | Guangdong + 1 | 中国 | 0 | + 1 | 中国 | 0 | + 1 | 中国 | 0 | + 1 | 中国 | 0 | + 1 | 中国 | 0 | China + 1 | 中国 | 0 | China + 1 | 中国 | 0 | China + 1 | 中国 | 0 | China +(14 rows) + +SELECT * FROM t1_area, tsc_rtbl START WITH id = 1 CONNECT BY PRIOR fatherid = id; + id | name | fatherid | name_desc | c_int | c_varchar1 | c_varchar2 +----+------+----------+-----------+-------+------------+------------ +(0 rows) + +SELECT *, connect_by_root(name_desc), sys_connect_by_path(name_desc, '->') +FROM t1_area; +ERROR: Invalid function call. +DETAIL: START WITH CONNECT BY function found in non-hierarchical query. +CONTEXT: referenced column: connect_by_root +/* fix start with in with clause */ +explain (costs off) WITH WITH_001 AS (SELECT 1 FROM offers_20050701 ,trait_value START WITH PARTY_ID=TRAIT_VAL CONNECT BY PRIOR TRAIT_VALUE_CD LIKE '%V%') +SELECT mfg +FROM brand ,trait_value ,WITH_001 +START WITH TRAIT_VALUE_CD=brand_name +CONNECT BY PRIOR brand_cd=UOM_CD; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------- + CTE Scan on tmp_reuslt + CTE tmp_reuslt + -> StartWith Operator + Start With pseudo atts: RUITR, array_key_3 + -> Recursive Union + -> Nested Loop + -> CTE Scan on tmp_reuslt + CTE tmp_reuslt + -> StartWith Operator + Start With pseudo atts: RUITR, array_key_4 + -> Recursive Union + -> Hash Join + Hash Cond: ((swtest.offers_20050701.party_id)::text = (swtest.trait_value.trait_val)::text) + -> Seq Scan on offers_20050701 + -> Hash + -> Seq Scan on trait_value + -> Nested Loop + -> Nested Loop + -> WorkTable Scan on tmp_reuslt + Filter: (("trait_value@trait_value_cd")::text ~~ '%V%'::text) + -> Materialize + -> Seq Scan on trait_value + -> Materialize + -> Seq Scan on offers_20050701 + -> Materialize + -> Hash Join + Hash Cond: ((swtest.brand.brand_name)::text = (swtest.trait_value.trait_value_cd)::text) + -> Seq Scan on brand + -> Hash + -> Seq Scan on trait_value + -> Hash Join + Hash Cond: ((tmp_reuslt."brand@brand_cd")::text = (swtest.trait_value.uom_cd)::text) + -> Nested Loop + -> WorkTable Scan on tmp_reuslt + -> Materialize + -> Seq Scan on brand + -> Hash + -> Nested Loop + -> CTE Scan on tmp_reuslt + CTE tmp_reuslt + -> StartWith Operator + Start With pseudo atts: RUITR, array_key_4 + -> Recursive Union + -> Hash Join + Hash Cond: ((swtest.offers_20050701.party_id)::text = (swtest.trait_value.trait_val)::text) + -> Seq Scan on offers_20050701 + -> Hash + -> Seq Scan on trait_value + -> Nested Loop + -> Nested Loop + -> WorkTable Scan on tmp_reuslt + Filter: (("trait_value@trait_value_cd")::text ~~ '%V%'::text) + -> Materialize + -> Seq Scan on trait_value + -> Materialize + -> Seq Scan on offers_20050701 + -> Materialize + -> Seq Scan on trait_value +(58 rows) + +WITH WITH_001 AS (SELECT 1 FROM offers_20050701 ,trait_value START WITH PARTY_ID=TRAIT_VAL CONNECT BY PRIOR TRAIT_VALUE_CD LIKE '%V%') +SELECT mfg +FROM brand ,trait_value ,WITH_001 +START WITH TRAIT_VALUE_CD=brand_name +CONNECT BY PRIOR brand_cd=UOM_CD; + mfg +----- +(0 rows) + +/* fix reference to level in connect by function calls */ +SELECT 1, level FROM t1_area CONNECT BY length(level) IS NULL; + ?column? | level +----------+------- +(0 rows) + +/* prior params of procedure */ +create or replace function test_tmp1(out id int,out pid int,out name varchar,out level int) return SETOF RECORD +IS +declare +CURSOR C1(sedid int) IS select t1.id,t1.pid,t1.name,level from test_hcb_ptb t1 start with id = sedid connect by prior pid=id; +begin +open C1(141); +loop +fetch C1 into id,pid,name,level; +EXIT WHEN C1%NOTFOUND; +return next; +end loop; +close C1; +end; +/ +select * from test_tmp1(); + id | pid | name | level +-----+-----+----------+------- + 141 | 131 | 江南摩卡 | 1 + 131 | 121 | 东山街 | 2 + 121 | 111 | 江宁区 | 3 + 111 | 11 | 南京市 | 4 + 11 | 1 | 江苏省 | 5 + 1 | 0 | 中国 | 6 +(6 rows) + +drop procedure test_tmp1; +drop table t1_area; +drop table tsc_rtbl; +-- 原问题单场景,connect_by_root(1)出现在在表达式中报错 +explain +select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id) +from test_hcb_ptb t1 +where connect_by_root(1) > 0 +start with id = 141 +connect by prior pid=id; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (cost=37.12..39.89 rows=101 width=190) + One-Time Filter: ((connect_by_root('1'::text))::bigint > 0) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..37.11 rows=101 width=102) + Start With pseudo atts: RUITR, array_key_9, array_col_1 + -> Recursive Union (cost=0.00..37.11 rows=101 width=102) + -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.80 rows=1 width=102) + Filter: (id = 141) + -> Hash Join (cost=0.33..3.23 rows=10 width=102) + Hash Cond: (t1.id = tmp_reuslt."t1@pid") + -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.64 rows=64 width=102) + -> Hash (cost=0.20..0.20 rows=10 width=4) + -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) + -> CTE Scan on tmp_reuslt (cost=0.01..2.03 rows=101 width=190) +(14 rows) + +select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id) +from test_hcb_ptb t1 +where connect_by_root(1) > 0 +start with id = 141 +connect by prior pid=id; + id | pid | name | le | connect_by_root | connect_by_root +-----+-----+----------+----+-----------------+----------------- + 141 | 131 | 江南摩卡 | 1 | 1 | 141 + 131 | 121 | 东山街 | 2 | 1 | 141 + 121 | 111 | 江宁区 | 3 | 1 | 141 + 111 | 11 | 南京市 | 4 | 1 | 141 + 11 | 1 | 江苏省 | 5 | 1 | 141 + 1 | 0 | 中国 | 6 | 1 | 141 +(6 rows) + +-- 扩展场景, connect_by_root(id)报错找不到列 +explain +select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id) +from test_hcb_ptb t1 +where connect_by_root(id) > 0 +start with id = 141 +connect by prior pid=id; + QUERY PLAN +------------------------------------------------------------------------------------------------- + CTE Scan on tmp_reuslt (cost=37.11..40.39 rows=34 width=190) + Filter: ((connect_by_root(("t1@id")::text))::bigint > 0) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..37.11 rows=101 width=102) + Start With pseudo atts: RUITR, array_key_9, array_col_1 + -> Recursive Union (cost=0.00..37.11 rows=101 width=102) + -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.80 rows=1 width=102) + Filter: (id = 141) + -> Hash Join (cost=0.33..3.23 rows=10 width=102) + Hash Cond: (t1.id = tmp_reuslt."t1@pid") + -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.64 rows=64 width=102) + -> Hash (cost=0.20..0.20 rows=10 width=4) + -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) +(13 rows) + +select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id) +from test_hcb_ptb t1 +where connect_by_root(id) > 0 +start with id = 141 +connect by prior pid=id; + id | pid | name | le | connect_by_root | connect_by_root +-----+-----+----------+----+-----------------+----------------- + 141 | 131 | 江南摩卡 | 1 | 1 | 141 + 131 | 121 | 东山街 | 2 | 1 | 141 + 121 | 111 | 江宁区 | 3 | 1 | 141 + 111 | 11 | 南京市 | 4 | 1 | 141 + 11 | 1 | 江苏省 | 5 | 1 | 141 + 1 | 0 | 中国 | 6 | 1 | 141 +(6 rows) + +-- 扩展场景,sys_connect_by_path(123, '-') is not null +explain +select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id), sys_connect_by_path(123, '-') +from test_hcb_ptb t1 +where sys_connect_by_path(123, '-') is not null +start with id = 141 +connect by prior pid=id; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (cost=37.11..40.14 rows=101 width=190) + One-Time Filter: (sys_connect_by_path('123'::text, '-'::text) IS NOT NULL) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..37.11 rows=101 width=102) + Start With pseudo atts: RUITR, array_key_9, array_col_1 + -> Recursive Union (cost=0.00..37.11 rows=101 width=102) + -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.80 rows=1 width=102) + Filter: (id = 141) + -> Hash Join (cost=0.33..3.23 rows=10 width=102) + Hash Cond: (t1.id = tmp_reuslt."t1@pid") + -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.64 rows=64 width=102) + -> Hash (cost=0.20..0.20 rows=10 width=4) + -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) + -> CTE Scan on tmp_reuslt (cost=0.00..2.02 rows=101 width=190) +(14 rows) + +create table ctI as select t1.id,t1.pid,t1.name,level as le from test_hcb_ptb t1 start with id=141 connect by prior id=pid; +create table ctII as select t1.id,t1.pid,t1.name,level from test_hcb_ptb t1 start with id=141 connect by prior id=pid; +\d ctI; + Table "swtest.cti" + Column | Type | Modifiers +--------+-----------------------+----------- + id | integer | + pid | integer | + name | character varying(80) | + le | integer | + +\d ctII; + Table "swtest.ctii" + Column | Type | Modifiers +--------+-----------------------+----------- + id | integer | + pid | integer | + name | character varying(80) | + level | integer | + +drop table ctI; +drop table ctII; +/* + * NOTE: need do upgrade change to have syc_conenct_by_path()/connect_by_root() to be volatile + */ +/* +select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id), sys_connect_by_path(123, '-') +from test_hcb_ptb t1 +where sys_connect_by_path(123, '-') is not null +start with id = 141 +connect by prior pid=id; +*/ +-- 扩展场景,sys_connect_by_path(123, '-') 验证能够被正确匹配 +explain +select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id), sys_connect_by_path(123, '-') +from test_hcb_ptb t1 +where sys_connect_by_path(123, '-') like '-123-123-123%' +start with id = 141 +connect by prior pid=id; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Result (cost=37.11..40.14 rows=101 width=190) + One-Time Filter: (sys_connect_by_path('123'::text, '-'::text) ~~ '-123-123-123%'::text) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..37.11 rows=101 width=102) + Start With pseudo atts: RUITR, array_key_9, array_col_1 + -> Recursive Union (cost=0.00..37.11 rows=101 width=102) + -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.80 rows=1 width=102) + Filter: (id = 141) + -> Hash Join (cost=0.33..3.23 rows=10 width=102) + Hash Cond: (t1.id = tmp_reuslt."t1@pid") + -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.64 rows=64 width=102) + -> Hash (cost=0.20..0.20 rows=10 width=4) + -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) + -> CTE Scan on tmp_reuslt (cost=0.01..2.02 rows=101 width=190) +(14 rows) + +/* + * NOTE: need do upgrade change to have syc_conenct_by_path()/connect_by_root() to be volatile + */ +/* +select t1.id,t1.pid,t1.name,LEVEL le,connect_by_root(1), connect_by_root(id), sys_connect_by_path(123, '-') +from test_hcb_ptb t1 +where sys_connect_by_path(123, '-') like '-123-123-123%' +start with id = 141 +connect by prior pid=id; +*/ +/* testing distinct qualifier */ +select distinct id,pid,name,LEVEL from t1 start with id = 1 connect by prior pid=id order by 1; + id | pid | name | level +----+-----+------+------- + 1 | 0 | 1 | 1 +(1 row) + +/* testing NOT expression */ +select t1.id, t1.pid, t1.name from t1 start with not id=1 connect by prior pid=id; + id | pid | name +----+-----+------ + 2 | 1 | 2 + 3 | 0 | 3 + 4 | 1 | 4 + 5 | 2 | 5 + 6 | 3 | 6 + 7 | 4 | 7 + 8 | 4 | 8 + 9 | 7 | 9 + 1 | 0 | 1 + 1 | 0 | 1 + 2 | 1 | 2 + 3 | 0 | 3 + 4 | 1 | 4 + 4 | 1 | 4 + 7 | 4 | 7 + 1 | 0 | 1 + 1 | 0 | 1 + 1 | 0 | 1 + 4 | 1 | 4 + 1 | 0 | 1 +(20 rows) + +/* testing func expr in connect by clause */ +explain select trim(t1.name) from test_hcb_ptb t1 connect by trim(t1.name) is not null; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + CTE Scan on tmp_reuslt (cost=13471.92..22689.36 rows=409664 width=178) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..13471.92 rows=409664 width=102) + Start With pseudo atts: RUITR + -> Recursive Union (cost=0.00..13471.92 rows=409664 width=102) + -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.64 rows=64 width=102) + -> Nested Loop (cost=0.00..527.60 rows=40960 width=102) + -> WorkTable Scan on tmp_reuslt (cost=0.00..12.80 rows=640 width=0) + -> Materialize (cost=0.00..2.96 rows=64 width=102) + -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.64 rows=64 width=102) +(10 rows) + +/* fix create table as with start with */ +create table ct as select t1.id,t1.pid,t1.name,level from test_hcb_ptb t1 start with id=141 connect by prior id=pid; +drop table ct; +set current_schema = public; +create table t1(c1 int,c2 int,c3 int); +insert into t1 values(1,1,1); +insert into t1 values(2,2,2); +select *, connect_by_iscycle from t1 start with c1=1 connect by nocycle prior c1=c2 order siblings by 1,2; + c1 | c2 | c3 | connect_by_iscycle +----+----+----+-------------------- + 1 | 1 | 1 | 0 + 1 | 1 | 1 | 1 +(2 rows) + +insert into t1 values(1,1,1); +insert into t1 values(2,2,2); +select *, connect_by_iscycle from t1 start with c1=1 connect by nocycle prior c1=c2 order siblings by 1,2; + c1 | c2 | c3 | connect_by_iscycle +----+----+----+-------------------- + 1 | 1 | 1 | 0 + 1 | 1 | 1 | 0 + 1 | 1 | 1 | 1 +(3 rows) + +insert into t1 values(1,NULL,1); +select *, connect_by_iscycle from t1 start with c1=1 connect by nocycle prior c1=c2 order siblings by 1,2 nulls first; + c1 | c2 | c3 | connect_by_iscycle +----+----+----+-------------------- + 1 | | 1 | 0 + 1 | 1 | 1 | 0 + 1 | 1 | 1 | 0 + 1 | 1 | 1 | 1 +(4 rows) + +select *, connect_by_iscycle from t1 start with c1=1 connect by nocycle prior c1=c2 order siblings by 1,2 nulls last; + c1 | c2 | c3 | connect_by_iscycle +----+----+----+-------------------- + 1 | 1 | 1 | 0 + 1 | 1 | 1 | 0 + 1 | | 1 | 0 + 1 | 1 | 1 | 1 +(4 rows) + +delete from t1 where c2 is null; +select *, connect_by_iscycle from t1 start with c1<3 connect by nocycle prior c1 StartWith Operator (cost=0.00..12299132.73 rows=378304445 width=12) + Start With pseudo atts: RUITR + -> Recursive Union (cost=0.00..12299132.73 rows=378304445 width=12) + -> Seq Scan on t1 (cost=0.00..29.45 rows=1945 width=12) + -> Nested Loop (cost=0.00..473301.44 rows=37830250 width=12) + -> WorkTable Scan on tmp_reuslt (cost=0.00..389.00 rows=19450 width=0) + -> Materialize (cost=0.00..39.17 rows=1945 width=12) + -> Seq Scan on t1 (cost=0.00..29.45 rows=1945 width=12) +(10 rows) + +select * from t1 connect by level is not null and level < 3; + c1 | c2 | c3 +----+----+---- + 1 | 1 | 1 + 2 | 2 | 2 + 1 | 1 | 1 + 2 | 2 | 2 + 1 | 1 | 1 + 2 | 2 | 2 + 1 | 1 | 1 + 2 | 2 | 2 + 1 | 1 | 1 + 2 | 2 | 2 + 1 | 1 | 1 + 2 | 2 | 2 + 1 | 1 | 1 + 2 | 2 | 2 + 1 | 1 | 1 + 2 | 2 | 2 + 1 | 1 | 1 + 2 | 2 | 2 + 1 | 1 | 1 + 2 | 2 | 2 +(20 rows) + +select * from t1 connect by level; +ERROR: Unsupported expression found in CONNECT BY clause. +DETAIL: Pseudo column expects an operator +select t1.id a.d jack from t1; +ERROR: Invalid use of identifiers. +LINE 1: select t1.id a.d jack from t1; + ^ +DETAIL: Syntax error found near token "a" +select t1.id bauer jack from t1; +ERROR: Invalid use of identifiers. +LINE 1: select t1.id bauer jack from t1; + ^ +DETAIL: Syntax error found near token "bauer" +drop table t1; +CREATE TABLE log_part ( + ts timestamp(6) without time zone DEFAULT now() NOT NULL, + op character(1), + act_no numeric(38,0), + old_blc numeric(38,0), + num numeric(38,0), + threadid bigint, + index integer, + tran integer +) +WITH (orientation=row, compression=no) +PARTITION BY RANGE (ts) +INTERVAL('1 day') +( + PARTITION p_2020_05_21 VALUES LESS THAN ('2020-05-21') TABLESPACE pg_default +) +ENABLE ROW MOVEMENT; +insert into log_part values('2021-09-24 10:12:19.451125','m',255, 10000000, -374929792, 39, 0, 0); +insert into log_part values('2021-09-24 10:12:19.451125','a',548, 10000000, 374929792, 39, 0, 0); +insert into log_part values('2021-09-24 10:12:19.449826','m', 39, 10000000, -473910067, 97, 0, 0); +insert into log_part values('2021-09-24 10:12:19.451221','m',250, 10000000, -757146539, 63, 0, 0); +insert into log_part values('2021-09-24 10:12:19.449643','m',916, 10000000, -418707874, 100, 0, 0); +insert into log_part values('2021-09-24 10:12:19.451052','m',510, 10000000, -868384331, 45, 0, 0); +insert into log_part values('2021-09-24 10:12:19.451039','m',541, 10000000, -782801693, 101, 0, 0); +insert into log_part values('2021-09-24 10:12:19.450232','m', 4, 10000000, -794225803, 33, 0, 0); +insert into log_part values('2021-09-24 10:12:19.450352','m',123, 10000000, -494836087, 58, 0, 0); +insert into log_part values('2021-09-24 10:12:19.449622','m',876, 10000000, -79442930, 60, 0, 0); +insert into log_part values('2021-09-24 10:12:19.449785','m', 21, 10000000, -560326111, 65, 0, 0); +insert into log_part values('2021-09-24 10:12:19.449828','m',484, 10000000, -571750221, 29, 0, 0); +insert into log_part values('2021-09-24 10:12:19.449657','m',167, 10000000, -146895512, 106, 0, 0); +insert into log_part values('2021-09-24 10:12:19.449826','a', 35, 10000000, 473910067, 97, 0, 0); +insert into log_part values('2021-09-24 10:12:19.451221','a',540, 10000000, 757146539, 63, 0, 0); +insert into log_part values('2021-09-24 10:12:19.449706','m',118, 10000000, -318894193, 50, 0, 0); +insert into log_part values('2021-09-24 10:12:19.501816','m',105, 10000000, -997671676, 39, 0, 0); +insert into log_part values('2021-09-24 10:12:19.449602','m',858, 10000000, -207656402, 28, 0, 0); +insert into log_part values('2021-09-24 10:12:19.450566','m',607, 10000000, -479468765, 30, 0, 0); +insert into log_part values('2021-09-24 10:12:19.451052','a',132, 10000000, 868384331, 45, 0, 0); +insert into log_part values('2021-09-24 10:12:19.451039','a',891, 10000000, 782801693, 101, 0, 0); +explain +select * from (select * from log_part where act_no=250) +start with old_blc=10000000 connect by prior old_blc + prior num = old_blc and act_no=prior act_no limit 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------- + Limit (cost=188.10..188.30 rows=10 width=122) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..188.10 rows=11 width=122) + Start With pseudo atts: RUITR, array_key_3, array_key_4, array_key_5 + -> Recursive Union (cost=0.00..188.10 rows=11 width=122) + -> Partition Iterator (cost=0.00..18.05 rows=1 width=122) + Iterations: 2 + -> Partitioned Seq Scan on log_part (cost=0.00..18.05 rows=1 width=122) + Filter: ((act_no = 250::numeric) AND (old_blc = 10000000::numeric)) + Selected Partitions: 1..2 + -> Nested Loop (cost=0.00..16.98 rows=1 width=122) + Join Filter: ((tmp_reuslt."sw_subquery_0@old_blc" + tmp_reuslt."sw_subquery_0@num") = public.log_part.old_blc) + -> WorkTable Scan on tmp_reuslt (cost=0.00..0.22 rows=1 width=90) + Filter: ("sw_subquery_0@act_no" = 250::numeric) + -> Partition Iterator (cost=0.00..16.71 rows=3 width=122) + Iterations: 2 + -> Partitioned Seq Scan on log_part (cost=0.00..16.71 rows=3 width=122) + Filter: (act_no = 250::numeric) + Selected Partitions: 1..2 + -> Result (cost=0.00..0.22 rows=11 width=122) + -> CTE Scan on tmp_reuslt (cost=0.00..0.22 rows=11 width=122) +(21 rows) + +select * from (select * from log_part where act_no=250) +start with old_blc=10000000 connect by prior old_blc + prior num = old_blc and act_no=prior act_no limit 10; + ts | op | act_no | old_blc | num | threadid | index | tran +---------------------------------+----+--------+----------+------------+----------+-------+------ + Fri Sep 24 10:12:19.451221 2021 | m | 250 | 10000000 | -757146539 | 63 | 0 | 0 +(1 row) + +explain +select *, connect_by_root old_blc from (select * from log_part where act_no=250) +start with old_blc=10000000 connect by prior old_blc + prior num = old_blc and act_no=prior act_no limit 10; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------- + Limit (cost=188.10..188.35 rows=10 width=122) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..188.10 rows=11 width=122) + Start With pseudo atts: RUITR, array_key_3, array_key_4, array_key_5, array_col_4 + -> Recursive Union (cost=0.00..188.10 rows=11 width=122) + -> Partition Iterator (cost=0.00..18.05 rows=1 width=122) + Iterations: 2 + -> Partitioned Seq Scan on log_part (cost=0.00..18.05 rows=1 width=122) + Filter: ((act_no = 250::numeric) AND (old_blc = 10000000::numeric)) + Selected Partitions: 1..2 + -> Nested Loop (cost=0.00..16.98 rows=1 width=122) + Join Filter: ((tmp_reuslt."sw_subquery_0@old_blc" + tmp_reuslt."sw_subquery_0@num") = public.log_part.old_blc) + -> WorkTable Scan on tmp_reuslt (cost=0.00..0.22 rows=1 width=90) + Filter: ("sw_subquery_0@act_no" = 250::numeric) + -> Partition Iterator (cost=0.00..16.71 rows=3 width=122) + Iterations: 2 + -> Partitioned Seq Scan on log_part (cost=0.00..16.71 rows=3 width=122) + Filter: (act_no = 250::numeric) + Selected Partitions: 1..2 + -> Result (cost=0.00..0.28 rows=11 width=122) + -> CTE Scan on tmp_reuslt (cost=0.00..0.28 rows=11 width=122) +(21 rows) + +select *, connect_by_root old_blc from (select * from log_part where act_no=250) +start with old_blc=10000000 connect by prior old_blc + prior num = old_blc and act_no=prior act_no limit 10; + ts | op | act_no | old_blc | num | threadid | index | tran | connect_by_rootold_blc +---------------------------------+----+--------+----------+------------+----------+-------+------+------------------------ + Fri Sep 24 10:12:19.451221 2021 | m | 250 | 10000000 | -757146539 | 63 | 0 | 0 | 10000000 +(1 row) + +select *, connect_by_root old_blc alias_old_blc from (select * from log_part where act_no=250) +start with old_blc=10000000 connect by prior old_blc + prior num = old_blc and act_no=prior act_no limit 10; + ts | op | act_no | old_blc | num | threadid | index | tran | alias_old_blc +---------------------------------+----+--------+----------+------------+----------+-------+------+--------------- + Fri Sep 24 10:12:19.451221 2021 | m | 250 | 10000000 | -757146539 | 63 | 0 | 0 | 10000000 +(1 row) + +SELECT *, CONNECT_BY_ROOT old_blc AS alias_old_blc FROM (SELECT * FROM log_part WHERE act_no=250) +START WITH old_blc=10000000 CONNECT BY PRIOR old_blc + PRIOR num = old_blc AND act_no = PRIOR act_no LIMIT 10; + ts | op | act_no | old_blc | num | threadid | index | tran | alias_old_blc +---------------------------------+----+--------+----------+------------+----------+-------+------+--------------- + Fri Sep 24 10:12:19.451221 2021 | m | 250 | 10000000 | -757146539 | 63 | 0 | 0 | 10000000 +(1 row) + +explain +select op , act_no , old_blc , num , threadid , index , tran ,level from log_part +start with old_blc=10000000 connect by prior old_blc + prior num = old_blc and act_no=prior act_no +order by 1,2,3,4 limit 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (cost=228.76..228.79 rows=10 width=118) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..228.26 rows=13 width=122) + Start With pseudo atts: RUITR, array_key_3, array_key_4, array_key_5 + -> Recursive Union (cost=0.00..228.26 rows=13 width=122) + -> Partition Iterator (cost=0.00..16.71 rows=3 width=122) + Iterations: 2 + -> Partitioned Seq Scan on log_part (cost=0.00..16.71 rows=3 width=122) + Filter: (old_blc = 10000000::numeric) + Selected Partitions: 1..2 + -> Hash Join (cost=1.05..21.13 rows=1 width=122) + Hash Cond: ((public.log_part.old_blc = (tmp_reuslt."log_part@old_blc" + tmp_reuslt."log_part@num")) AND (public.log_part.act_no = tmp_reuslt."log_part@act_no")) + -> Partition Iterator (cost=0.00..15.37 rows=537 width=122) + Iterations: 2 + -> Partitioned Seq Scan on log_part (cost=0.00..15.37 rows=537 width=122) + Selected Partitions: 1..2 + -> Hash (cost=0.60..0.60 rows=30 width=90) + -> WorkTable Scan on tmp_reuslt (cost=0.00..0.60 rows=30 width=90) + -> Sort (cost=0.50..0.53 rows=13 width=118) + Sort Key: tmp_reuslt."log_part@op", tmp_reuslt."log_part@act_no", tmp_reuslt."log_part@old_blc", tmp_reuslt."log_part@num" + -> CTE Scan on tmp_reuslt (cost=0.00..0.26 rows=13 width=118) +(21 rows) + +select op , act_no , old_blc , num , threadid , index , tran ,level from log_part +start with old_blc=10000000 connect by prior old_blc + prior num = old_blc and act_no=prior act_no +order by 1,2,3,4 limit 10; + op | act_no | old_blc | num | threadid | index | tran | level +----+--------+----------+------------+----------+-------+------+------- + a | 35 | 10000000 | 473910067 | 97 | 0 | 0 | 1 + a | 132 | 10000000 | 868384331 | 45 | 0 | 0 | 1 + a | 540 | 10000000 | 757146539 | 63 | 0 | 0 | 1 + a | 548 | 10000000 | 374929792 | 39 | 0 | 0 | 1 + a | 891 | 10000000 | 782801693 | 101 | 0 | 0 | 1 + m | 4 | 10000000 | -794225803 | 33 | 0 | 0 | 1 + m | 21 | 10000000 | -560326111 | 65 | 0 | 0 | 1 + m | 39 | 10000000 | -473910067 | 97 | 0 | 0 | 1 + m | 105 | 10000000 | -997671676 | 39 | 0 | 0 | 1 + m | 118 | 10000000 | -318894193 | 50 | 0 | 0 | 1 +(10 rows) + +drop table log_part; +set current_schema=swtest; +EXPLAIN SELECT * FROM test_area START WITH name = '中国' CONNECT BY PRIOR id = fatherid limit 10; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Limit (cost=23.91..24.11 rows=10 width=72) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..23.91 rows=221 width=24) + Start With pseudo atts: RUITR, array_key_1 + -> Recursive Union (cost=0.00..23.91 rows=221 width=24) + -> Seq Scan on test_area (cost=0.00..1.27 rows=1 width=24) + Filter: (name = '中国'::text) + -> Hash Join (cost=0.33..1.82 rows=22 width=24) + Hash Cond: (swtest.test_area.fatherid = tmp_reuslt."test_area@id") + -> Seq Scan on test_area (cost=0.00..1.22 rows=22 width=24) + -> Hash (cost=0.20..0.20 rows=10 width=4) + -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) + -> Result (cost=0.00..4.42 rows=221 width=72) + -> CTE Scan on tmp_reuslt (cost=0.00..4.42 rows=221 width=72) +(14 rows) + +SELECT * FROM test_area START WITH name = '中国' CONNECT BY PRIOR id = fatherid limit 10; + id | name | fatherid | name_desc +----+--------+----------+----------- + 1 | 中国 | 0 | China + 2 | 湖南省 | 1 | Hunan + 3 | 广东省 | 1 | Guangdong + 4 | 海南省 | 1 | Hainan + 5 | 河北省 | 1 | Hebei + 6 | 河南省 | 1 | Henan + 7 | 山东省 | 1 | Shandong + 8 | 湖北省 | 1 | Hubei + 9 | 江苏省 | 1 | Jiangsu + 10 | 深圳市 | 3 | Shenzhen +(10 rows) + +set max_recursive_times=1000; +create table tt22(x int); +create or replace view dual as select 'x' x; +insert into tt22 select level from dual connect by level <=1000; +select count(*) from tt22; + count +------- + 1000 +(1 row) + +set max_recursive_times=200; +insert into tt22 select level from dual connect by level <=1000; +ERROR: Current Start With...Connect by has exceeded max iteration times 200 +HINT: Please check your connect by clause carefully +drop table tt22; +/* 修复RecursiveUnion的inner分支备planning成BaseResult节点 */ +explain select t1.id,t1.pid,t1.name from test_hcb_ptb t1 start with id=141 connect by (prior pid)=id and prior pid>10 and 1=0; + QUERY PLAN +------------------------------------------------------------------------------------- + CTE Scan on tmp_reuslt (cost=3.12..3.34 rows=11 width=186) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..3.12 rows=11 width=102) + Start With pseudo atts: RUITR, array_key_9 + -> Recursive Union (cost=0.00..3.12 rows=11 width=102) + -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.80 rows=1 width=102) + Filter: (id = 141) + -> Result (cost=0.00..0.01 rows=1 width=0) + One-Time Filter: false +(9 rows) + +select t1.id,t1.pid,t1.name from test_hcb_ptb t1 start with id=141 connect by (prior pid)=id and prior pid>10 and 1=0; + id | pid | name +-----+-----+---------- + 141 | 131 | 江南摩卡 +(1 row) + +explain select t1.id,t1.pid,t1.name from test_hcb_ptb t1 start with id=141 connect by (prior pid)=id and prior pid>10 and null; + QUERY PLAN +------------------------------------------------------------------------------------- + CTE Scan on tmp_reuslt (cost=3.12..3.34 rows=11 width=186) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..3.12 rows=11 width=102) + Start With pseudo atts: RUITR, array_key_9 + -> Recursive Union (cost=0.00..3.12 rows=11 width=102) + -> Seq Scan on test_hcb_ptb t1 (cost=0.00..2.80 rows=1 width=102) + Filter: (id = 141) + -> Result (cost=0.00..0.01 rows=1 width=0) + One-Time Filter: false +(9 rows) + +select t1.id,t1.pid,t1.name from test_hcb_ptb t1 start with id=141 connect by (prior pid)=id and prior pid>10 and null; + id | pid | name +-----+-----+---------- + 141 | 131 | 江南摩卡 +(1 row) + +create table core_060(id varchar); +insert into core_060 values ('a'),('b'),('c'); +SELECT id,level FROM core_060 CONNECT BY level in (1,2); + id | level +----+------- + a | 1 + b | 1 + c | 1 + a | 2 + b | 2 + c | 2 + a | 2 + b | 2 + c | 2 + a | 2 + b | 2 + c | 2 +(12 rows) + +SELECT id,level FROM core_060 CONNECT BY not (level>2); + id | level +----+------- + a | 1 + b | 1 + c | 1 + a | 2 + b | 2 + c | 2 + a | 2 + b | 2 + c | 2 + a | 2 + b | 2 + c | 2 +(12 rows) + +SELECT id,level FROM core_060 CONNECT BY cast(level as number(38,0))<3; + id | level +----+------- + a | 1 + b | 1 + c | 1 + a | 2 + b | 2 + c | 2 + a | 2 + b | 2 + c | 2 + a | 2 + b | 2 + c | 2 +(12 rows) + +drop table core_060; +create table t_customer(id int, pid int,num int,depth int); +-- verify nestloop can be material-optimized +set enable_hashjoin = off; +set enable_mergejoin = off; +explain +select * from ( select * from t_customer where id<1200040 and id>=1200000) start with id=1200010 connect by prior id=pid; + QUERY PLAN +-------------------------------------------------------------------------------------------- + CTE Scan on tmp_reuslt (cost=423.43..423.65 rows=11 width=16) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..423.43 rows=11 width=16) + Start With pseudo atts: RUITR, array_key_1 + -> Recursive Union (cost=0.00..423.43 rows=11 width=16) + -> Seq Scan on t_customer (cost=0.00..41.08 rows=1 width=16) + Filter: ((id < 1200040) AND (id >= 1200000) AND (id = 1200010)) + -> Nested Loop (cost=0.00..38.21 rows=1 width=16) + Join Filter: (tmp_reuslt."sw_subquery_0@id" = swtest.t_customer.pid) + -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) + -> Materialize (cost=0.00..36.69 rows=9 width=16) + -> Seq Scan on t_customer (cost=0.00..36.64 rows=9 width=16) + Filter: ((id < 1200040) AND (id >= 1200000)) +(13 rows) + +select * from ( select * from t_customer where id<1200040 and id>=1200000) start with id=1200010 connect by prior id=pid; + id | pid | num | depth +----+-----+-----+------- +(0 rows) + +reset enable_hashjoin; +reset enable_mergejoin; +-- verify nestloop can be material-optimized +set enable_nestloop = off; +set enable_mergejoin = off; +explain +select * from ( select * from t_customer where id<1200040 and id>=1200000) start with id=1200010 connect by prior id=pid; + QUERY PLAN +------------------------------------------------------------------------------------------------- + CTE Scan on tmp_reuslt (cost=411.29..411.51 rows=11 width=16) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..411.29 rows=11 width=16) + Start With pseudo atts: RUITR, array_key_1 + -> Recursive Union (cost=0.00..411.29 rows=11 width=16) + -> Seq Scan on t_customer (cost=0.00..41.08 rows=1 width=16) + Filter: ((id < 1200040) AND (id >= 1200000) AND (id = 1200010)) + -> Hash Join (cost=0.33..37.00 rows=1 width=16) + Hash Cond: (swtest.t_customer.pid = tmp_reuslt."sw_subquery_0@id") + -> Materialize (cost=0.00..36.64 rows=9 width=16) + -> Seq Scan on t_customer (cost=0.00..36.64 rows=9 width=16) + Filter: ((id < 1200040) AND (id >= 1200000)) + -> Hash (cost=0.20..0.20 rows=10 width=4) + -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) +(14 rows) + +select * from ( select * from t_customer where id<1200040 and id>=1200000) start with id=1200010 connect by prior id=pid; + id | pid | num | depth +----+-----+-----+------- +(0 rows) + +reset enable_nestloop; +reset enable_mergejoin; +-- verify mergejoin is no need to be material-optimized +set enable_hashjoin = off; +set enable_nestloop = off; +explain +select * from ( select * from t_customer where id<1200040 and id>=1200000) start with id=1200010 connect by prior id=pid; + QUERY PLAN +------------------------------------------------------------------------------------------------- + CTE Scan on tmp_reuslt (cost=413.84..414.06 rows=11 width=16) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..413.84 rows=11 width=16) + Start With pseudo atts: RUITR, array_key_1 + -> Recursive Union (cost=0.00..413.84 rows=11 width=16) + -> Seq Scan on t_customer (cost=0.00..41.08 rows=1 width=16) + Filter: ((id < 1200040) AND (id >= 1200000) AND (id = 1200010)) + -> Merge Join (cost=37.15..37.25 rows=1 width=16) + Merge Cond: (tmp_reuslt."sw_subquery_0@id" = swtest.t_customer.pid) + -> Sort (cost=0.37..0.39 rows=10 width=4) + Sort Key: tmp_reuslt."sw_subquery_0@id" + -> WorkTable Scan on tmp_reuslt (cost=0.00..0.20 rows=10 width=4) + -> Sort (cost=36.78..36.81 rows=9 width=16) + Sort Key: swtest.t_customer.pid + -> Seq Scan on t_customer (cost=0.00..36.64 rows=9 width=16) + Filter: ((id < 1200040) AND (id >= 1200000)) +(16 rows) + +select * from ( select * from t_customer where id<1200040 and id>=1200000) start with id=1200010 connect by prior id=pid; + id | pid | num | depth +----+-----+-----+------- +(0 rows) + +reset enable_mergejoin; +reset enable_nestloop; +reset enable_hashjoin; +drop table t_customer; +-- test correlated sublink +create table test_place as select id, name, tex from test_hcb_ptb; +select t1.id,t1.pid,t1.name from test_hcb_ptb t1 start with not exists(select * from test_place where id=t1.id and id !=141) connect by prior pid=id; + id | pid | name +-----+-----+---------- + 141 | 131 | 江南摩卡 + 131 | 121 | 东山街 + 121 | 111 | 江宁区 + 111 | 11 | 南京市 + 11 | 1 | 江苏省 + 1 | 0 | 中国 +(6 rows) + +drop table test_place; +-- test where quals pushdown +drop table if exists brand_sw3 cascade; +create table brand_sw3 +( +mfg varchar(500) primary key , +brand_cd varchar(500) , +brand_name varchar(100) , +brand_party_id number(18,10) NULL,c1 serial +); +drop table if exists usview17_sw3 cascade; +create table usview17_sw3 +( +brand_party_id numeric(18,2) , +sales_tran_id numeric(12,5) , +item_qty numeric(5,0) , +mkb_cost_amt numeric(19,4) , +mkb_exp numeric +); +SELECT MAX(t2.brand_party_id)-COUNT(t2.sales_tran_id) +FROM brand_sw3 t1,usview17_sw3 t2 +WHERE t1.brand_name=PRIOR t1.brand_name +AND PRIOR t1.brand_cd IS NOT NULL +START WITH t1.mfg=t1.brand_name +CONNECT BY NOCYCLE PRIOR t1.mfg +BETWEEN t1.brand_name +AND PRIOR t1.brand_name ; + ?column? +---------- + +(1 row) + +SELECT MAX(t2.brand_party_id)-COUNT(t2.sales_tran_id) +FROM brand_sw3 t1,usview17_sw3 t2 +where t1.brand_cd IS NOT NULL CONNECT BY rownum < 3; + ?column? +---------- + +(1 row) + +drop table if exists brand_sw3 cascade; +drop table if exists usview17_sw3 cascade; +-- check that order siblings by does not cause result consistency or performance issues +SELECT id,pid,name,rownum,level FROM test_hcb_ptb START WITH id=1 CONNECT BY PRIOR id=pid AND level<4 ORDER SIBLINGS BY 1 DESC; + id | pid | name | rownum | level +-----+-----+--------+--------+------- + 1 | 0 | 中国 | 1 | 1 + 19 | 1 | 武汉省 | 2 | 2 + 18 | 1 | 贵州省 | 3 | 2 + 17 | 1 | 湖北省 | 4 | 2 + 16 | 1 | 湖南省 | 5 | 2 + 15 | 1 | 河北省 | 6 | 2 + 14 | 1 | 河南省 | 7 | 2 + 13 | 1 | 安徽省 | 8 | 2 + 12 | 1 | 山东省 | 9 | 2 + 11 | 1 | 江苏省 | 10 | 2 + 119 | 11 | 泰州市 | 11 | 3 + 118 | 11 | 连云港 | 12 | 3 + 117 | 11 | 常州市 | 13 | 3 + 116 | 11 | 无锡市 | 14 | 3 + 115 | 11 | 盐城市 | 15 | 3 + 114 | 11 | 苏州市 | 16 | 3 + 113 | 11 | 徐州市 | 17 | 3 + 112 | 11 | 宿迁市 | 18 | 3 + 111 | 11 | 南京市 | 19 | 3 +(19 rows) + +SELECT id,pid,name,rownum,level FROM test_hcb_ptb START WITH id=1 CONNECT BY PRIOR id=pid AND level<4; + id | pid | name | rownum | level +-----+-----+--------+--------+------- + 1 | 0 | 中国 | 1 | 1 + 11 | 1 | 江苏省 | 2 | 2 + 12 | 1 | 山东省 | 3 | 2 + 13 | 1 | 安徽省 | 4 | 2 + 14 | 1 | 河南省 | 5 | 2 + 15 | 1 | 河北省 | 6 | 2 + 16 | 1 | 湖南省 | 7 | 2 + 17 | 1 | 湖北省 | 8 | 2 + 18 | 1 | 贵州省 | 9 | 2 + 19 | 1 | 武汉省 | 10 | 2 + 111 | 11 | 南京市 | 11 | 3 + 112 | 11 | 宿迁市 | 12 | 3 + 113 | 11 | 徐州市 | 13 | 3 + 114 | 11 | 苏州市 | 14 | 3 + 115 | 11 | 盐城市 | 15 | 3 + 117 | 11 | 常州市 | 16 | 3 + 116 | 11 | 无锡市 | 17 | 3 + 118 | 11 | 连云港 | 18 | 3 + 119 | 11 | 泰州市 | 19 | 3 +(19 rows) + +SELECT id,pid,name,rownum,level FROM test_hcb_ptb START WITH id=1 CONNECT BY NOCYCLE PRIOR id=pid AND level<4; + id | pid | name | rownum | level +-----+-----+--------+--------+------- + 1 | 0 | 中国 | 1 | 1 + 11 | 1 | 江苏省 | 2 | 2 + 111 | 11 | 南京市 | 3 | 3 + 112 | 11 | 宿迁市 | 4 | 3 + 113 | 11 | 徐州市 | 5 | 3 + 114 | 11 | 苏州市 | 6 | 3 + 115 | 11 | 盐城市 | 7 | 3 + 117 | 11 | 常州市 | 8 | 3 + 116 | 11 | 无锡市 | 9 | 3 + 118 | 11 | 连云港 | 10 | 3 + 119 | 11 | 泰州市 | 11 | 3 + 12 | 1 | 山东省 | 12 | 2 + 13 | 1 | 安徽省 | 13 | 2 + 14 | 1 | 河南省 | 14 | 2 + 15 | 1 | 河北省 | 15 | 2 + 16 | 1 | 湖南省 | 16 | 2 + 17 | 1 | 湖北省 | 17 | 2 + 18 | 1 | 贵州省 | 18 | 2 + 19 | 1 | 武汉省 | 19 | 2 +(19 rows) + +-- test sw dfx +drop table if exists sw_dummy; +create table sw_dummy(swid int); +insert into sw_dummy values(1); +explain performance select * from sw_dummy connect by level < 50; +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* + Start With Iteration Statistics: +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* +--?.* + +--?.* +(42 rows) + +drop table sw_dummy; +--test null pointers in connect by walker +explain select * from t1 connect by exists(select distinct (select id from t1)); + QUERY PLAN +------------------------------------------------------------------------------------------- + CTE Scan on tmp_reuslt (cost=293.65..455.83 rows=8109 width=40) + CTE tmp_reuslt + -> StartWith Operator (cost=0.00..293.64 rows=8109 width=10) + Start With pseudo atts: RUITR + -> Recursive Union (cost=0.00..293.64 rows=8109 width=10) + -> Seq Scan on t1 (cost=0.00..1.09 rows=9 width=10) + -> Nested Loop (cost=0.00..13.04 rows=810 width=10) + -> WorkTable Scan on tmp_reuslt (cost=0.00..1.80 rows=90 width=0) + -> Materialize (cost=0.00..1.14 rows=9 width=10) + -> Seq Scan on t1 (cost=0.00..1.09 rows=9 width=10) + InitPlan 2 (returns $2) + -> Result (cost=0.00..0.01 rows=1 width=0) +(12 rows) + +--test join + where for start with .. connect by +select t1.id,t1.pid,t2.id from test_hcb_ptb t1 join test_hcb_ptb t2 on t1.id=t2.id where t1.id>1 start with t1.id=141 connect by prior t2.id=t1.pid; + id | pid | id +-----+-----+----- + 141 | 131 | 141 + 151 | 141 | 151 + 152 | 141 | 152 + 153 | 141 | 153 + 154 | 141 | 154 + 155 | 141 | 155 + 156 | 141 | 156 + 157 | 141 | 157 + 158 | 141 | 158 + 159 | 141 | 159 + 161 | 151 | 161 + 162 | 151 | 162 + 163 | 151 | 163 + 164 | 151 | 164 + 165 | 151 | 165 + 166 | 151 | 166 + 167 | 151 | 167 + 168 | 151 | 168 + 169 | 151 | 169 +(19 rows) + +create or replace function prior(id int) returns int + LANGUAGE plpgsql AS $$ + begin + return id*3; + end; + $$; +select id,pid,prior(level) from test_hcb_ptb where prior(id)>10 start + with id=141 connect by prior pid=id; + id | pid | prior +-----+-----+------- + 141 | 131 | 3 + 131 | 121 | 6 + 121 | 111 | 9 + 111 | 11 | 12 + 11 | 1 | 15 +(5 rows) + +select prior(1+1); + prior +------- + 6 +(1 row) + +select prior(1); + prior +------- + 3 +(1 row) + +select prior(1,1); +ERROR: function prior(integer, integer) does not exist +LINE 1: select prior(1,1); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +CONTEXT: referenced column: prior +drop function prior(int); +--test dfs rownum +SELECT id,pid,name,rownum,level FROM test_hcb_ptb START WITH id=1 CONNECT BY NOCYCLE PRIOR id=pid AND rownum<7; + id | pid | name | rownum | level +-----+-----+----------+--------+------- + 1 | 0 | 中国 | 1 | 1 + 11 | 1 | 江苏省 | 2 | 2 + 111 | 11 | 南京市 | 3 | 3 + 121 | 111 | 江宁区 | 4 | 4 + 131 | 121 | 东山街 | 5 | 5 + 141 | 131 | 江南摩卡 | 6 | 6 +(6 rows) + +--test subquery pushdown +SELECT subq_0.c1 as c0 +from + (SELECT + 30 as c0, + ref_0.id as c1 + from + test_hcb_ptb as ref_0 + WHERE false) as subq_0 +WHERE true CONNECT BY EXISTS ( + SELECT + pg_stat_get_partition_tuples_inserted(subq_0.c0) as c1 + from + test_hcb_ptb as ref_7 +) +LIMIT 169; +ERROR: Unsupported subquery found in connect by clause. diff --git a/src/test/regress/expected/test_astore_multixact.out b/src/test/regress/expected/test_astore_multixact.out index 3f8077df6..932acff55 100644 --- a/src/test/regress/expected/test_astore_multixact.out +++ b/src/test/regress/expected/test_astore_multixact.out @@ -94,34 +94,6 @@ perform pg_sleep(3); end; / \parallel off -insert into astore_mult1 values (2, 2); -\parallel on 2 -begin -perform * from astore_mult1 where a = 2 for key share; -perform pg_sleep(2); -delete from astore_mult1 where a = 2; -end; -/ -begin -update astore_mult1 set b = 2 where a = 2; -perform pg_sleep(3); -end; -/ -\parallel off -insert into astore_mult1 values (2, 2); -\parallel on 2 -begin -perform * from astore_mult1 where a = 2 for key share; -perform pg_sleep(2); -delete from astore_mult1 where a = 2; -end; -/ -begin -update astore_mult1 set b = 2 where a = 2; -perform pg_sleep(3); -end; -/ -\parallel off vacuum freeze astore_mult1; vacuum freeze astore_mult2; drop table astore_mult2; diff --git a/src/test/regress/expected/test_ustore_index.out b/src/test/regress/expected/test_ustore_index.out index c6186498a..c7ef3a4d3 100644 --- a/src/test/regress/expected/test_ustore_index.out +++ b/src/test/regress/expected/test_ustore_index.out @@ -19,10 +19,10 @@ insert into t2 values(5, 2); insert into t2 values(1, 6); -- Test IndexOnlyScan explain select c1, c3 from t1 where c1 = 2; - QUERY PLAN ----------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------- [Bypass] - Index Only Scan using t1i1 on t1 (cost=0.00..24.36 rows=6 width=40) + Index Only Scan using t1i1 on t1 (cost=0.00..4.35 rows=6 width=40) Index Cond: (c1 = 2) (3 rows) @@ -36,7 +36,7 @@ explain select c1 from t2 order by c1; QUERY PLAN ------------------------------------------------------------------------ [Bypass] - Index Only Scan using t2i1 on t2 (cost=0.00..80.49 rows=2149 width=4) + Index Only Scan using t2i1 on t2 (cost=0.00..40.48 rows=2149 width=4) (2 rows) select c1 from t2 order by c1; @@ -435,11 +435,12 @@ select a from t where a < 10; set enable_indexscan = off; set enable_indexonlyscan = on; explain select a from t where a < 10; - QUERY PLAN -------------------------------------------------------------------------- - Seq Scan on t (cost=10000000000.00..1000000003686.25 rows=716 width=4) - Filter: (a < 10) -(2 rows) + QUERY PLAN +---------------------------------------------------------------------------------------------- + [Bypass] + Index Only Scan using t_a_idx on t (cost=10000000000.00..1000000001678.00 rows=716 width=4) + Index Cond: (a < 10) +(3 rows) select a from t where a < 10; a @@ -548,7 +549,7 @@ explain SELECT /*+ indexonlyscan(t1) */c1 FROM t1 WHERE c1 = 50; QUERY PLAN ---------------------------------------------------------------------------- [Bypass] - Index Only Scan using ustore_index on t1 (cost=0.00..8.27 rows=1 width=4) + Index Only Scan using ustore_index on t1 (cost=0.00..4.27 rows=1 width=4) Index Cond: (c1 = 50) (3 rows) @@ -556,14 +557,14 @@ reset sql_beta_feature; show sql_beta_feature; sql_beta_feature ------------------ - none + a_style_coerce (1 row) explain SELECT c1 FROM t1 WHERE c1 = 50; QUERY PLAN ---------------------------------------------------------------------------- [Bypass] - Index Only Scan using ustore_index on t1 (cost=0.00..8.27 rows=1 width=4) + Index Only Scan using ustore_index on t1 (cost=0.00..4.27 rows=1 width=4) Index Cond: (c1 = 50) (3 rows) @@ -578,7 +579,7 @@ explain SELECT /*+ indexonlyscan(t1) */c1 FROM t1 WHERE c1 = 50; QUERY PLAN ---------------------------------------------------------------------------- [Bypass] - Index Only Scan using ustore_index on t1 (cost=0.00..8.27 rows=1 width=4) + Index Only Scan using ustore_index on t1 (cost=0.00..4.27 rows=1 width=4) Index Cond: (c1 = 50) (3 rows) diff --git a/src/test/regress/expected/test_ustore_index_cache_rightpage.out b/src/test/regress/expected/test_ustore_index_cache_rightpage.out new file mode 100644 index 000000000..e29c8b19e --- /dev/null +++ b/src/test/regress/expected/test_ustore_index_cache_rightpage.out @@ -0,0 +1,245 @@ +-- test fastpath mechanism for index insertion +create table fastpath (a int, b text, c numeric) with (storage_type=USTORE); +create unique index fpindex1 on fastpath(a); +insert into fastpath values (1, 'b1', 100.00); +insert into fastpath values (1, 'b1', 100.00); -- unique key check +ERROR: duplicate key value violates unique constraint "fpindex1" +DETAIL: Key (a)=(1) already exists. +truncate fastpath; +insert into fastpath select generate_series(1,10000), 'b', 100; +-- vacuum the table so as to improve chances of index-only scans. we can't +-- guarantee if index-only scans will be picked up in all cases or not, but +-- that fuzziness actually helps the test. +vacuum fastpath; +set enable_seqscan to false; +set enable_bitmapscan to false; +select sum(a) from fastpath where a >= 5000 and a < 5700; + sum +--------- + 3744650 +(1 row) + +select sum(a) from fastpath where a >= 5000 and a < 5700; + sum +--------- + 3744650 +(1 row) + +select sum(a) from fastpath where a = 6456; + sum +------ + 6456 +(1 row) + +select sum(a) from fastpath where a >= 5000 and a < 5700; + sum +--------- + 3744650 +(1 row) + +-- drop the only index on the table and compute hashes for +-- a few queries which orders the results in various different ways. +drop index fpindex1; +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 0cc0eb7a79985543787ff6bf54ad3dec +(1 row) + +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +-- now create a multi-column index with both column asc +create index fpindex2 on fastpath(a, b); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +-- again, vacuum here either forces index-only scans or creates fuzziness +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 0cc0eb7a79985543787ff6bf54ad3dec +(1 row) + +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +-- same queries with a different kind of index now. the final result must not +-- change irrespective of what kind of index we have. +drop index fpindex2; +create index fpindex3 on fastpath(a desc, b asc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 0cc0eb7a79985543787ff6bf54ad3dec +(1 row) + +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +-- repeat again +drop index fpindex3; +create index fpindex4 on fastpath(a asc, b desc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 0cc0eb7a79985543787ff6bf54ad3dec +(1 row) + +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +-- and again, this time indexing by (b, a). Note that column "b" has non-unique +-- values. +drop index fpindex4; +create index fpindex5 on fastpath(b asc, a desc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 0cc0eb7a79985543787ff6bf54ad3dec +(1 row) + +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +-- one last time +drop index fpindex5; +create index fpindex6 on fastpath(b desc, a desc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 0cc0eb7a79985543787ff6bf54ad3dec +(1 row) + +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2fcf8e6fabfe23e2f0eca7f8222e6d51 +(1 row) + +drop table fastpath; diff --git a/src/test/regress/expected/test_ustore_index_including.out b/src/test/regress/expected/test_ustore_index_including.out new file mode 100644 index 000000000..b6f5d8223 --- /dev/null +++ b/src/test/regress/expected/test_ustore_index_including.out @@ -0,0 +1,339 @@ +/* + * 1.test CREATE INDEX + */ +-- Regular index with included columns +set enable_default_ustore_table = on; +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE INDEX tbl_idx ON tbl using ubtree (c1, c2) INCLUDE (c3,c4); +-- must fail because of intersection of key and included columns +CREATE INDEX tbl_idx ON tbl using ubtree (c1, c2) INCLUDE (c1,c3); +ERROR: included columns must not intersect with key columns +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl'::regclass ORDER BY c.relname; + pg_get_indexdef +---------------------------------------------------------------------------------------------------- + CREATE INDEX tbl_idx ON tbl USING ubtree (c1, c2) WITH (storage_type=USTORE) TABLESPACE pg_default +(1 row) + +DROP TABLE tbl; +-- Unique index and unique constraint +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_idx_unique ON tbl using ubtree (c1, c2) INCLUDE (c3, c4); +ALTER TABLE tbl add UNIQUE USING INDEX tbl_idx_unique; +ALTER TABLE tbl add UNIQUE (c1, c2) INCLUDE (c3, c4); +NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "tbl_c1_c2_c3_c4_key" for table "tbl" +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl'::regclass ORDER BY c.relname; + pg_get_indexdef +----------------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_c1_c2_c3_c4_key ON tbl USING ubtree (c1, c2) WITH (storage_type=USTORE) TABLESPACE pg_default + CREATE UNIQUE INDEX tbl_idx_unique ON tbl USING ubtree (c1, c2) WITH (storage_type=USTORE) TABLESPACE pg_default +(2 rows) + +DROP TABLE tbl; +-- Unique index and unique constraint. Both must fail. +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_idx_unique ON tbl using ubtree (c1, c2) INCLUDE (c3, c4); +ERROR: could not create unique index "tbl_idx_unique" +DETAIL: Key (c1, c2)=(1, 2) is duplicated. +ALTER TABLE tbl add UNIQUE (c1, c2) INCLUDE (c3, c4); +NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "tbl_c1_c2_c3_c4_key" for table "tbl" +ERROR: could not create unique index "tbl_c1_c2_c3_c4_key" +DETAIL: Key (c1, c2)=(1, 2) is duplicated. +DROP TABLE tbl; +-- PK constraint +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT 1, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ALTER TABLE tbl add PRIMARY KEY (c1, c2) INCLUDE (c3, c4); +NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "tbl_pkey" for table "tbl" +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl'::regclass ORDER BY c.relname; + pg_get_indexdef +------------------------------------------------------------------------------------------------------------ + CREATE UNIQUE INDEX tbl_pkey ON tbl USING ubtree (c1, c2) WITH (storage_type=USTORE) TABLESPACE pg_default +(1 row) + +DROP TABLE tbl; +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT 1, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_idx_unique ON tbl using ubtree (c1, c2) INCLUDE (c3, c4); +ALTER TABLE tbl add PRIMARY KEY USING INDEX tbl_idx_unique; +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl'::regclass ORDER BY c.relname; + pg_get_indexdef +------------------------------------------------------------------------------------------------------------------ + CREATE UNIQUE INDEX tbl_idx_unique ON tbl USING ubtree (c1, c2) WITH (storage_type=USTORE) TABLESPACE pg_default +(1 row) + +DROP TABLE tbl; +-- PK constraint. Must fail. +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ALTER TABLE tbl add PRIMARY KEY (c1, c2) INCLUDE (c3, c4); +NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "tbl_pkey" for table "tbl" +ERROR: could not create unique index "tbl_pkey" +DETAIL: Key (c1, c2)=(1, 2) is duplicated. +DROP TABLE tbl; +/* + * 2. Test CREATE TABLE with constraint + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + CONSTRAINT covering UNIQUE(c1,c2) INCLUDE(c3,c4)); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "covering" for table "tbl" +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; + indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass +------------+----------+-------------+-------------+--------------+---------+------------- + covering | 4 | 2 | t | f | 1 2 3 4 | 10179 10179 +(1 row) + +SELECT pg_get_constraintdef(oid), conname, conkey, conincluding FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; + pg_get_constraintdef | conname | conkey | conincluding +----------------------------------+----------+--------+-------------- + UNIQUE (c1, c2) INCLUDE (c3, c4) | covering | {1,2} | {3,4} +(1 row) + +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: duplicate key value violates unique constraint "covering" +DETAIL: Key (c1, c2)=(1, 2) already exists. +DROP TABLE tbl; +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + CONSTRAINT covering PRIMARY KEY(c1,c2) INCLUDE(c3,c4)); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "covering" for table "tbl" +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; + indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass +------------+----------+-------------+-------------+--------------+---------+------------- + covering | 4 | 2 | t | t | 1 2 3 4 | 10179 10179 +(1 row) + +SELECT pg_get_constraintdef(oid), conname, conkey, conincluding FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; + pg_get_constraintdef | conname | conkey | conincluding +---------------------------------------+----------+--------+-------------- + PRIMARY KEY (c1, c2) INCLUDE (c3, c4) | covering | {1,2} | {3,4} +(1 row) + +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: duplicate key value violates unique constraint "covering" +DETAIL: Key (c1, c2)=(1, 2) already exists. +INSERT INTO tbl SELECT 1, NULL, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: null value in column "c2" violates not-null constraint +DETAIL: Failing row contains (1, null, 3, (4,4),(4,4)). +INSERT INTO tbl SELECT x, 2*x, NULL, NULL FROM generate_series(1,10) AS x; +DROP TABLE tbl; +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + UNIQUE(c1,c2) INCLUDE(c3,c4)); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "tbl_c1_c2_c3_c4_key" for table "tbl" +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; + indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass +---------------------+----------+-------------+-------------+--------------+---------+------------- + tbl_c1_c2_c3_c4_key | 4 | 2 | t | f | 1 2 3 4 | 10179 10179 +(1 row) + +SELECT pg_get_constraintdef(oid), conname, conkey, conincluding FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; + pg_get_constraintdef | conname | conkey | conincluding +----------------------------------+---------------------+--------+-------------- + UNIQUE (c1, c2) INCLUDE (c3, c4) | tbl_c1_c2_c3_c4_key | {1,2} | {3,4} +(1 row) + +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: duplicate key value violates unique constraint "tbl_c1_c2_c3_c4_key" +DETAIL: Key (c1, c2)=(1, 2) already exists. +DROP TABLE tbl; +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + PRIMARY KEY(c1,c2) INCLUDE(c3,c4)); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "tbl_pkey" for table "tbl" +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; + indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass +------------+----------+-------------+-------------+--------------+---------+------------- + tbl_pkey | 4 | 2 | t | t | 1 2 3 4 | 10179 10179 +(1 row) + +SELECT pg_get_constraintdef(oid), conname, conkey, conincluding FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; + pg_get_constraintdef | conname | conkey | conincluding +---------------------------------------+----------+--------+-------------- + PRIMARY KEY (c1, c2) INCLUDE (c3, c4) | tbl_pkey | {1,2} | {3,4} +(1 row) + +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: duplicate key value violates unique constraint "tbl_pkey" +DETAIL: Key (c1, c2)=(1, 2) already exists. +INSERT INTO tbl SELECT 1, NULL, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: null value in column "c2" violates not-null constraint +DETAIL: Failing row contains (1, null, 3, (4,4),(4,4)). +INSERT INTO tbl SELECT x, 2*x, NULL, NULL FROM generate_series(1,10) AS x; +DROP TABLE tbl; +/* + * 3.0 Test ALTER TABLE DROP COLUMN. + * Any column deletion leads to index deletion. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 int); +CREATE UNIQUE INDEX tbl_idx ON tbl using ubtree(c1, c2, c3, c4); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +------------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_idx ON tbl USING ubtree (c1, c2, c3, c4) WITH (storage_type=USTORE) TABLESPACE pg_default +(1 row) + +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +DROP TABLE tbl; +/* + * 3.1 Test ALTER TABLE DROP COLUMN. + * Included column deletion leads to the index deletion, + * AS well AS key columns deletion. It's explained in documentation. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box); +CREATE UNIQUE INDEX tbl_idx ON tbl using ubtree(c1, c2) INCLUDE(c3,c4); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +----------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_idx ON tbl USING ubtree (c1, c2) WITH (storage_type=USTORE) TABLESPACE pg_default +(1 row) + +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +DROP TABLE tbl; +/* + * 3.2 Test ALTER TABLE DROP COLUMN. + * Included column deletion leads to the index deletion. + * AS well AS key columns deletion. It's explained in documentation. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "tbl_c1_c2_c3_c4_key" for table "tbl" +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +----------------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_c1_c2_c3_c4_key ON tbl USING ubtree (c1, c2) WITH (storage_type=USTORE) TABLESPACE pg_default +(1 row) + +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +ALTER TABLE tbl DROP COLUMN c1; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +DROP TABLE tbl; +/* + * 4. CREATE INDEX + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "tbl_c1_c2_c3_c4_key" for table "tbl" +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,1000) AS x; +CREATE UNIQUE INDEX on tbl (c1, c2) INCLUDE (c3, c4); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +----------------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_c1_c2_c3_c4_idx ON tbl USING ubtree (c1, c2) WITH (storage_type=USTORE) TABLESPACE pg_default + CREATE UNIQUE INDEX tbl_c1_c2_c3_c4_key ON tbl USING ubtree (c1, c2) WITH (storage_type=USTORE) TABLESPACE pg_default +(2 rows) + +DROP TABLE tbl; +/* + * 5. REINDEX + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "tbl_c1_c2_c3_c4_key" for table "tbl" +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +----------------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_c1_c2_c3_c4_key ON tbl USING ubtree (c1, c2) WITH (storage_type=USTORE) TABLESPACE pg_default +(1 row) + +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +REINDEX INDEX tbl_c1_c2_c3_c4_key; +ERROR: relation "tbl_c1_c2_c3_c4_key" does not exist +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +ALTER TABLE tbl DROP COLUMN c1; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +DROP TABLE tbl; +/* + * 7. Check various AMs. All but ubtree must fail. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 box, c4 box); +CREATE INDEX on tbl USING brin(c1, c2) INCLUDE (c3, c4); +ERROR: access method "brin" does not support row store +CREATE INDEX on tbl USING gist(c3) INCLUDE (c4); +ERROR: gist index is not supported for ustore +CREATE INDEX on tbl USING spgist(c3) INCLUDE (c4); +ERROR: access method "spgist" does not support row store +CREATE INDEX on tbl USING gin(c1, c2) INCLUDE (c3, c4); +ERROR: gin index is not supported for ustore +CREATE INDEX on tbl USING hash(c1, c2) INCLUDE (c3, c4); +ERROR: access method "hash" does not support row store +CREATE INDEX on tbl USING rtree(c1, c2) INCLUDE (c3, c4); +ERROR: access method "rtree" does not support row store +CREATE INDEX on tbl USING ubtree(c1, c2) INCLUDE (c3, c4); +DROP TABLE tbl; +/* + * 8. Update, delete values in indexed table. + */ +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_idx_unique ON tbl using ubtree(c1, c2) INCLUDE (c3,c4); +UPDATE tbl SET c1 = 100 WHERE c1 = 2; +UPDATE tbl SET c1 = 1 WHERE c1 = 3; +-- should fail +UPDATE tbl SET c2 = 2 WHERE c1 = 1; +ERROR: duplicate key value violates unique constraint "tbl_idx_unique" +DETAIL: Key (c1, c2)=(1, 2) already exists. +UPDATE tbl SET c3 = 1; +DELETE FROM tbl WHERE c1 = 5 OR c3 = 12; +DROP TABLE tbl; +/* + * 9. Alter column type. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "tbl_c1_c2_c3_c4_key" for table "tbl" +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ALTER TABLE tbl ALTER c1 TYPE bigint; +ALTER TABLE tbl ALTER c3 TYPE bigint; +\d tbl + Table "public.tbl" + Column | Type | Modifiers +--------+---------+----------- + c1 | bigint | + c2 | integer | + c3 | bigint | + c4 | box | +Indexes: + "tbl_c1_c2_c3_c4_key" UNIQUE CONSTRAINT, ubtree (c1, c2) WITH (storage_type=ustore) TABLESPACE pg_default + +DROP TABLE tbl; +set enable_default_ustore_table = off; diff --git a/src/test/regress/expected/test_ustore_lock.out b/src/test/regress/expected/test_ustore_lock.out index fc5941651..4f53d6a35 100644 --- a/src/test/regress/expected/test_ustore_lock.out +++ b/src/test/regress/expected/test_ustore_lock.out @@ -1,6 +1,23 @@ -- test simple lock for update create table test_lock_for_update (c1 int) with (storage_type=USTORE); insert into test_lock_for_update values (1); +-- test for update/no key update/share/key share +select c1 from test_lock_for_update where c1 = 1 for update; + c1 +---- + 1 +(1 row) + +select c1 from test_lock_for_update where c1 = 1 for no key update; +ERROR: For Key Share and For No Key Update is not support for ustore. +select c1 from test_lock_for_update where c1 = 1 for share; + c1 +---- + 1 +(1 row) + +select c1 from test_lock_for_update where c1 = 1 for key share; +ERROR: For Key Share and For No Key Update is not support for ustore. \parallel on 2 begin delete from test_lock_for_update; diff --git a/src/test/regress/expected/test_ustore_undo_view.out b/src/test/regress/expected/test_ustore_undo_view.out new file mode 100644 index 000000000..328da8aca --- /dev/null +++ b/src/test/regress/expected/test_ustore_undo_view.out @@ -0,0 +1,31 @@ +-- test test_ustore_undo_view +drop table if exists test_ustore_undo_view; +NOTICE: table "test_ustore_undo_view" does not exist, skipping +create table test_ustore_undo_view (c1 int) with (storage_type=USTORE); +insert into test_ustore_undo_view values(1); +select * from gs_undo_meta(0, -1, 0); + zoneId | persistType | insert | discard | end | used | lsn | pid +--------+-------------+------------------+------------------+------------------+------------------+------------------+------------ +--?.* +(1 row) + +select * from gs_undo_translot(0,-1); + groupId | xactId | startUndoPtr | endUndoPtr | lsn | slot_states +---------+------------------+------------------+------------------+------------------+------------- +--?.* +(1 row) + +checkpoint; +select * from gs_undo_translot(1,-1); + groupId | xactId | startUndoPtr | endUndoPtr | lsn | slot_states +---------+------------------+------------------+------------------+------------------+------------- +--?.* +(1 row) + +select * from gs_undo_record(24); + undoptr | xid | cid | reloid | relfilenode | utype | blkprev | blockno | uoffset | prevurp | payloadlen +---------+------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------+------------------ +--?.* +(1 row) + +drop table test_ustore_undo_view; diff --git a/src/test/regress/expected/test_ustore_undozone.out b/src/test/regress/expected/test_ustore_undozone.out new file mode 100644 index 000000000..be95b7512 --- /dev/null +++ b/src/test/regress/expected/test_ustore_undozone.out @@ -0,0 +1,46 @@ +-- test test_undozone +drop table if exists test_undozone; +NOTICE: table "test_undozone" does not exist, skipping +create table test_undozone (c1 int) with (storage_type=USTORE); +select count(*) from gs_undo_meta(0, -1, 0); + count +------- + 1 +(1 row) + +insert into test_undozone(c1) values(1); +select count(*) from gs_undo_meta(0, -1, 0); + count +------- + 2 +(1 row) + +create temp table test_undozone_tmp (c1 int) with (storage_type=USTORE); +select count(*) from gs_undo_meta(0, -1, 0); + count +------- + 2 +(1 row) + +insert into test_undozone_tmp(c1) values(1); +select count(*) from gs_undo_meta(0, -1, 0); + count +------- + 3 +(1 row) + +create unlogged table test_undozone_unlog (c1 int) with (storage_type=USTORE); +select count(*) from gs_undo_meta(0, -1, 0); + count +------- + 3 +(1 row) + +insert into test_undozone_unlog(c1) values(1); +select count(*) from gs_undo_meta(0, -1, 0); + count +------- + 4 +(1 row) + +drop table test_undozone; diff --git a/src/test/regress/expected/test_ustore_update.out b/src/test/regress/expected/test_ustore_update.out index f1f558619..cd22e038d 100644 --- a/src/test/regress/expected/test_ustore_update.out +++ b/src/test/regress/expected/test_ustore_update.out @@ -166,7 +166,7 @@ insert into t4 values(3, 'cde'); insert into t4 values(4, 'def'); insert into t4 values(5, 'efg'); commit; -select * from t4; +select * from t4 order by c1; c1 | c2 ----+----- 1 | abc @@ -179,18 +179,18 @@ select * from t4; start transaction; update t4 set c2 = 'aaaabbbbccccdddd' where c1 = 3; update t4 set c2 = 'aaaabbbbccccdddd' where c1 = 2; -select * from t4; +select * from t4 order by c1; c1 | c2 ----+------------------ 1 | abc + 2 | aaaabbbbccccdddd 3 | aaaabbbbccccdddd 4 | def 5 | efg - 2 | aaaabbbbccccdddd (5 rows) rollback; -select * from t4; +select * from t4 order by c1; c1 | c2 ----+----- 1 | abc diff --git a/src/test/regress/expected/toomanyparams.out b/src/test/regress/expected/toomanyparams.out new file mode 100644 index 000000000..d4879e892 --- /dev/null +++ b/src/test/regress/expected/toomanyparams.out @@ -0,0 +1,1010 @@ +create or replace package a as +procedure f( +a in int, +a1 in int, +a2 in int, +a3 in int, +a4 in int, +a5 in int, +a6 in int, +a7 in int, +a8 in int, +a9 in int, +a10 in int, +a11 in int, +a12 in int, +a13 in int, +a14 in int, +a15 in int, +a16 in int, +a17 in int, +a18 in int, +a19 in int, +a20 in int, +a21 in int, +a22 in int, +a23 in int, +a24 in int, +a25 in int, +a26 in int, +a27 in int, +a28 in int, +a29 in int, +a30 in int, +a31 in int, +a32 in int, +a33 in int, +a34 in int, +a35 in int, +a36 in int, +a37 in int, +a38 in int, +a39 in int, +a40 in int, +a41 in int, +a42 in int, +a43 in int, +a44 in int, +a45 in int, +a46 in int, +a47 in int, +a48 in int, +a49 in int, +a50 in int, +a51 in int, +a52 in int, +a53 in int, +a54 in int, +a55 in int, +a56 in int, +a57 in int, +a58 in int, +a59 in int, +a60 in int, +a61 in int, +a62 in int, +a63 in int, +a64 in int, +a65 in int, +a66 in int, +a67 in int, +a68 in int, +a69 in int, +a70 in int, +a71 in int, +a72 in int, +a73 in int, +a74 in int, +a75 in int, +a76 in int, +a77 in int, +a78 in int, +a79 in int, +a80 in int, +a81 in int, +a82 in int, +a83 in int, +a84 in int, +a85 in int, +a86 in int, +a87 in int, +a88 in int, +a89 in int, +a90 in int, +a91 in int, +a92 in int, +a93 in int, +a94 in int, +a95 in int, +a96 in int, +a97 in int, +a98 in int, +a99 in int, +a100 in int, +aa in int, +aa1 in int, +aa2 in int, +aa3 in int, +aa4 in int, +aa5 in int, +aa6 in int, +aa7 in int, +aa8 in int, +aa9 in int, +aa10 in int, +aa11 in int, +aa12 in int, +aa13 in int, +aa14 in int, +aa15 in int, +aa16 in int, +aa17 in int, +aa18 in int, +aa19 in int, +aa20 in int, +aa21 in int, +aa22 in int, +aa23 in int, +aa24 in int, +aa25 in int, +aa26 in int, +aa27 in int, +aa28 in int, +aa29 in int, +aa30 in int, +aa31 in int, +aa32 in int, +aa33 in int, +aa34 in int, +aa35 in int, +aa36 in int, +aa37 in int, +aa38 in int, +aa39 in int, +aa40 in int, +aa41 in int, +aa42 in int, +aa43 in int, +aa44 in int, +aa45 in int, +aa46 in int, +aa47 in int, +aa48 in int, +aa49 in int, +aa50 in int, +aa51 in int, +aa52 in int, +aa53 in int, +aa54 in int, +aa55 in int, +aa56 in int, +aa57 in int, +aa58 in int, +aa59 in int, +aa60 in int, +aa61 in int, +aa62 in int, +aa63 in int, +aa64 in int, +aa65 in int, +aa66 in int, +aa67 in int, +aa68 in int, +aa69 in int, +aa70 in int, +aa71 in int, +aa72 in int, +aa73 in int, +aa74 in int, +aa75 in int, +aa76 in int, +aa77 in int, +aa78 in int, +aa79 in int, +aa80 in int, +aa81 in int, +aa82 in int, +aa83 in int, +aa84 in int, +aa85 in int, +aa86 in int, +aa87 in int, +aa88 in int, +aa89 in int, +aa90 in int, +aa91 in int, +aa92 in int, +aa93 in int, +aa94 in int, +aa95 in int, +aa96 in int, +aa97 in int, +aa98 in int, +aa99 in int, +aa100 in int, +aaa in int, +aaa1 in int, +aaa2 in int, +aaa3 in int, +aaa4 in int, +aaa5 in int, +aaa6 in int, +aaa7 in int, +aaa8 in int, +aaa9 in int, +aaa10 in int, +aaa11 in int, +aaa12 in int, +aaa13 in int, +aaa14 in int, +aaa15 in int, +aaa16 in int, +aaa17 in int, +aaa18 in int, +aaa19 in int, +aaa20 in int, +aaa21 in int, +aaa22 in int, +aaa23 in int, +aaa24 in int, +aaa25 in int, +aaa26 in int, +aaa27 in int, +aaa28 in int, +aaa29 in int, +aaa30 in int, +aaa31 in int, +aaa32 in int, +aaa33 in int, +aaa34 in int, +aaa35 in int, +aaa36 in int, +aaa37 in int, +aaa38 in int, +aaa39 in int, +aaa40 in int, +aaa41 in int, +aaa42 in int, +aaa43 in int, +aaa44 in int, +aaa45 in int, +aaa46 in int, +aaa47 in int, +aaa48 in int, +aaa49 in int, +aaa50 in int, +aaa51 in int, +aaa52 in int, +aaa53 in int, +aaa54 in int, +aaa55 in int, +aaa56 in int, +aaa57 in int, +aaa58 in int, +aaa59 in int, +aaa60 in int, +aaa61 in int, +aaa62 in int, +aaa63 in int, +aaa64 in int, +aaa65 in int, +aaa66 in int, +aaa67 in int, +aaa68 in int, +aaa69 in int, +aaa70 in int, +aaa71 in int, +aaa72 in int, +aaa73 in int, +aaa74 in int, +aaa75 in int, +aaa76 in int, +aaa77 in int, +aaa78 in int, +aaa79 in int, +aaa80 in int, +aaa81 in int, +aaa82 in int, +aaa83 in int, +aaa84 in int, +aaa85 in int, +aaa86 in int, +aaa87 in int, +aaa88 in int, +aaa89 in int, +aaa90 in int, +aaa91 in int, +aaa92 in int, +aaa93 in int, +aaa94 in int, +aaa95 in int, +aaa96 in int, +aaa97 in int, +aaa98 in int, +aaa99 in int, +aaa100 in int, +aaaa in int, +aaaa1 in int, +aaaa2 in int, +aaaa3 in int, +aaaa4 in int, +aaaa5 in int, +aaaa6 in int, +aaaa7 in int, +aaaa8 in int, +aaaa9 in int, +aaaa10 in int, +aaaa11 in int, +aaaa12 in int, +aaaa13 in int, +aaaa14 in int, +aaaa15 in int, +aaaa16 in int, +aaaa17 in int, +aaaa18 in int, +aaaa19 in int, +aaaa20 in int, +aaaa21 in int, +aaaa22 in int, +aaaa23 in int, +aaaa24 in int, +aaaa25 in int, +aaaa26 in int, +aaaa27 in int, +aaaa28 in int, +aaaa29 in int, +aaaa30 in int, +aaaa31 in int, +aaaa32 in int, +aaaa33 in int, +aaaa34 in int, +aaaa35 in int, +aaaa36 in int, +aaaa37 in int, +aaaa38 in int, +aaaa39 in int, +aaaa40 in int, +aaaa41 in int, +aaaa42 in int, +aaaa43 in int, +aaaa44 in int, +aaaa45 in int, +aaaa46 in int, +aaaa47 in int, +aaaa48 in int, +aaaa49 in int, +aaaa50 in int, +aaaa51 in int, +aaaa52 in int, +aaaa53 in int, +aaaa54 in int, +aaaa55 in int, +aaaa56 in int, +aaaa57 in int, +aaaa58 in int, +aaaa59 in int, +aaaa60 in int, +aaaa61 in int, +aaaa62 in int, +aaaa63 in int, +aaaa64 in int, +aaaa65 in int, +aaaa66 in int, +aaaa67 in int, +aaaa68 in int, +aaaa69 in int, +aaaa70 in int, +aaaa71 in int, +aaaa72 in int, +aaaa73 in int, +aaaa74 in int, +aaaa75 in int, +aaaa76 in int, +aaaa77 in int, +aaaa78 in int, +aaaa79 in int, +aaaa80 in int, +aaaa81 in int, +aaaa82 in int, +aaaa83 in int, +aaaa84 in int, +aaaa85 in int, +aaaa86 in int, +aaaa87 in int, +aaaa88 in int, +aaaa89 in int, +aaaa90 in int, +aaaa91 in int, +aaaa92 in int, +aaaa93 in int, +aaaa94 in int, +aaaa95 in int, +aaaa96 in int, +aaaa97 in int, +aaaa98 in int, +aaaa99 in int, +aaaa100 in int, +b1 out int, +b2 out int, +b3 out int, +b4 out int, +b5 out int, +b6 out int, +b7 out int, +b8 out int, +b9 out int, +b10 out int, +b11 out int, +b12 out int, +b13 out int, +b14 out int, +b15 out int, +b16 out int, +b17 out int, +b18 out int, +b19 out int, +b20 out int, +b21 out int, +b22 out int, +b23 out int, +b24 out int, +b25 out int, +b26 out int, +b27 out int, +b28 out int, +b29 out int, +b30 out int, +b31 out int, +b32 out int, +b33 out int, +b34 out int, +b35 out int, +b36 out int, +b37 out int, +b38 out int, +b39 out int, +b40 out int, +b41 out int, +b42 out int, +b43 out int, +b44 out int, +b45 out int, +b46 out int, +b47 out int, +b48 out int, +b49 out int, +b50 out int, +b51 out int, +b52 out int, +b53 out int, +b54 out int, +b55 out int, +b56 out int, +b57 out int, +b58 out int, +b59 out int, +b60 out int, +b61 out int, +b62 out int, +b63 out int, +b64 out int, +b65 out int, +b66 out int, +b67 out int, +b68 out int, +b69 out int, +b70 out int, +b71 out int, +b72 out int, +b73 out int, +b74 out int, +b75 out int, +b76 out int, +b77 out int, +b78 out int, +b79 out int, +b80 out int, +b81 out int, +b82 out int, +b83 out int, +b84 out int, +b85 out int, +b86 out int, +b87 out int, +b88 out int, +b89 out int, +b90 out int, +b91 out int, +b92 out int, +b93 out int, +b94 out int, +b95 out int, +b96 out int, +b97 out int, +b98 out int, +b99 out int, +b100 out int, +bb1 out int, +bb2 out int, +bb3 out int, +bb4 out int, +bb5 out int, +bb6 out int, +bb7 out int, +bb8 out int, +bb9 out int, +bb10 out int, +bb11 out int, +bb12 out int, +bb13 out int, +bb14 out int, +bb15 out int, +bb16 out int, +bb17 out int, +bb18 out int, +bb19 out int, +bb20 out int, +bb21 out int, +bb22 out int, +bb23 out int, +bb24 out int, +bb25 out int, +bb26 out int, +bb27 out int, +bb28 out int, +bb29 out int, +bb30 out int, +bb31 out int, +bb32 out int, +bb33 out int, +bb34 out int, +bb35 out int, +bb36 out int, +bb37 out int, +bb38 out int, +bb39 out int, +bb40 out int, +bb41 out int, +bb42 out int, +bb43 out int, +bb44 out int, +bb45 out int, +bb46 out int, +bb47 out int, +bb48 out int, +bb49 out int, +bb50 out int, +bb51 out int, +bb52 out int, +bb53 out int, +bb54 out int, +bb55 out int, +bb56 out int, +bb57 out int, +bb58 out int, +bb59 out int, +bb60 out int, +bb61 out int, +bb62 out int, +bb63 out int, +bb64 out int, +bb65 out int, +bb66 out int, +bb67 out int, +bb68 out int, +bb69 out int, +bb70 out int, +bb71 out int, +bb72 out int, +bb73 out int, +bb74 out int, +bb75 out int, +bb76 out int, +bb77 out int, +bb78 out int, +bb79 out int, +bb80 out int, +bb81 out int, +bb82 out int, +bb83 out int, +bb84 out int, +bb85 out int, +bb86 out int, +bb87 out int, +bb88 out int, +bb89 out int, +bb90 out int, +bb91 out int, +bb92 out int, +bb93 out int, +bb94 out int, +bb95 out int, +bb96 out int, +bb97 out int, +bb98 out int, +bb99 out int, +bb100 out int, +bbb1 out int, +bbb2 out int, +bbb3 out int, +bbb4 out int, +bbb5 out int, +bbb6 out int, +bbb7 out int, +bbb8 out int, +bbb9 out int, +bbb10 out int, +bbb11 out int, +bbb12 out int, +bbb13 out int, +bbb14 out int, +bbb15 out int, +bbb16 out int, +bbb17 out int, +bbb18 out int, +bbb19 out int, +bbb20 out int, +bbb21 out int, +bbb22 out int, +bbb23 out int, +bbb24 out int, +bbb25 out int, +bbb26 out int, +bbb27 out int, +bbb28 out int, +bbb29 out int, +bbb30 out int, +bbb31 out int, +bbb32 out int, +bbb33 out int, +bbb34 out int, +bbb35 out int, +bbb36 out int, +bbb37 out int, +bbb38 out int, +bbb39 out int, +bbb40 out int, +bbb41 out int, +bbb42 out int, +bbb43 out int, +bbb44 out int, +bbb45 out int, +bbb46 out int, +bbb47 out int, +bbb48 out int, +bbb49 out int, +bbb50 out int, +bbb51 out int, +bbb52 out int, +bbb53 out int, +bbb54 out int, +bbb55 out int, +bbb56 out int, +bbb57 out int, +bbb58 out int, +bbb59 out int, +bbb60 out int, +bbb61 out int, +bbb62 out int, +bbb63 out int, +bbb64 out int, +bbb65 out int, +bbb66 out int, +bbb67 out int, +bbb68 out int, +bbb69 out int, +bbb70 out int, +bbb71 out int, +bbb72 out int, +bbb73 out int, +bbb74 out int, +bbb75 out int, +bbb76 out int, +bbb77 out int, +bbb78 out int, +bbb79 out int, +bbb80 out int, +bbb81 out int, +bbb82 out int, +bbb83 out int, +bbb84 out int, +bbb85 out int, +bbb86 out int, +bbb87 out int, +bbb88 out int, +bbb89 out int, +bbb90 out int, +bbb91 out int, +bbb92 out int, +bbb93 out int, +bbb94 out int, +bbb95 out int, +bbb96 out int, +bbb97 out int, +bbb98 out int, +bbb99 out int, +bbb100 out int, +bbbb1 out int, +bbbb2 out int, +bbbb3 out int, +bbbb4 out int, +bbbb5 out int, +bbbb6 out int, +bbbb7 out int, +bbbb8 out int, +bbbb9 out int, +bbbb10 out int, +bbbb11 out int, +bbbb12 out int, +bbbb13 out int, +bbbb14 out int, +bbbb15 out int, +bbbb16 out int, +bbbb17 out int, +bbbb18 out int, +bbbb19 out int, +bbbb20 out int, +bbbb21 out int, +bbbb22 out int, +bbbb23 out int, +bbbb24 out int, +bbbb25 out int, +bbbb26 out int, +bbbb27 out int, +bbbb28 out int, +bbbb29 out int, +bbbb30 out int, +bbbb31 out int, +bbbb32 out int, +bbbb33 out int, +bbbb34 out int, +bbbb35 out int, +bbbb36 out int, +bbbb37 out int, +bbbb38 out int, +bbbb39 out int, +bbbb40 out int, +bbbb41 out int, +bbbb42 out int, +bbbb43 out int, +bbbb44 out int, +bbbb45 out int, +bbbb46 out int, +bbbb47 out int, +bbbb48 out int, +bbbb49 out int, +bbbb50 out int, +bbbb51 out int, +bbbb52 out int, +bbbb53 out int, +bbbb54 out int, +bbbb55 out int, +bbbb56 out int, +bbbb57 out int, +bbbb58 out int, +bbbb59 out int, +bbbb60 out int, +bbbb61 out int, +bbbb62 out int, +bbbb63 out int, +bbbb64 out int, +bbbb65 out int, +bbbb66 out int, +bbbb67 out int, +bbbb68 out int, +bbbb69 out int, +bbbb70 out int, +bbbb71 out int, +bbbb72 out int, +bbbb73 out int, +bbbb74 out int, +bbbb75 out int, +bbbb76 out int, +bbbb77 out int, +bbbb78 out int, +bbbb79 out int, +bbbb80 out int, +bbbb81 out int, +bbbb82 out int, +bbbb83 out int, +bbbb84 out int, +bbbb85 out int, +bbbb86 out int, +bbbb87 out int, +bbbb88 out int, +bbbb89 out int, +bbbb90 out int, +bbbb91 out int, +bbbb92 out int, +bbbb93 out int, +bbbb94 out int, +bbbb95 out int, +bbbb96 out int, +bbbb97 out int, +bbbb98 out int, +bbbb99 out int, +bbbb100 out int, +abbbb1 out int, +abbbb2 out int, +abbbb3 out int, +abbbb4 out int, +abbbb5 out int, +abbbb6 out int, +abbbb7 out int, +abbbb8 out int, +abbbb9 out int, +abbbb10 out int, +abbbb11 out int, +abbbb12 out int, +abbbb13 out int, +abbbb14 out int, +abbbb15 out int, +abbbb16 out int, +abbbb17 out int, +abbbb18 out int, +abbbb19 out int, +abbbb20 out int, +abbbb21 out int, +abbbb22 out int, +abbbb23 out int, +abbbb24 out int, +abbbb25 out int, +abbbb26 out int, +abbbb27 out int, +abbbb28 out int, +abbbb29 out int, +abbbb30 out int, +abbbb31 out int, +abbbb32 out int, +abbbb33 out int, +abbbb34 out int, +abbbb35 out int, +abbbb36 out int, +abbbb37 out int, +abbbb38 out int, +abbbb39 out int, +abbbb40 out int, +abbbb41 out int, +abbbb42 out int, +abbbb43 out int, +abbbb44 out int, +abbbb45 out int, +abbbb46 out int, +abbbb47 out int, +abbbb48 out int, +abbbb49 out int, +abbbb50 out int, +abbbb51 out int, +abbbb52 out int, +abbbb53 out int, +abbbb54 out int, +abbbb55 out int, +abbbb56 out int, +abbbb57 out int, +abbbb58 out int, +abbbb59 out int, +abbbb60 out int, +abbbb61 out int, +abbbb62 out int, +abbbb63 out int, +abbbb64 out int, +abbbb65 out int, +abbbb66 out int, +abbbb67 out int, +abbbb68 out int, +abbbb69 out int, +abbbb70 out int, +abbbb71 out int, +abbbb72 out int, +abbbb73 out int, +abbbb74 out int, +abbbb75 out int, +abbbb76 out int, +abbbb77 out int, +abbbb78 out int, +abbbb79 out int, +abbbb80 out int, +abbbb81 out int, +abbbb82 out int, +abbbb83 out int, +abbbb84 out int, +abbbb85 out int, +abbbb86 out int, +abbbb87 out int, +abbbb88 out int, +abbbb89 out int, +abbbb90 out int, +abbbb91 out int, +abbbb92 out int, +abbbb93 out int, +abbbb94 out int, +abbbb95 out int, +abbbb96 out int, +abbbb97 out int, +abbbb98 out int, +abbbb99 out int, +abbbb100 out int, +aabbbb1 out int, +aabbbb2 out int, +aabbbb3 out int, +aabbbb4 out int, +aabbbb5 out int, +aabbbb6 out int, +aabbbb7 out int, +aabbbb8 out int, +aabbbb9 out int, +aabbbb10 out int, +aabbbb11 out int, +aabbbb12 out int, +aabbbb13 out int, +aabbbb14 out int, +aabbbb15 out int, +aabbbb16 out int, +aabbbb17 out int, +aabbbb18 out int, +aabbbb19 out int, +aabbbb20 out int, +aabbbb21 out int, +aabbbb22 out int, +aabbbb23 out int, +aabbbb24 out int, +aabbbb25 out int, +aabbbb26 out int, +aabbbb27 out int, +aabbbb28 out int, +aabbbb29 out int, +aabbbb30 out int, +aabbbb31 out int, +aabbbb32 out int, +aabbbb33 out int, +aabbbb34 out int, +aabbbb35 out int, +aabbbb36 out int, +aabbbb37 out int, +aabbbb38 out int, +aabbbb39 out int, +aabbbb40 out int, +aabbbb41 out int, +aabbbb42 out int, +aabbbb43 out int, +aabbbb44 out int, +aabbbb45 out int, +aabbbb46 out int, +aabbbb47 out int, +aabbbb48 out int, +aabbbb49 out int, +aabbbb50 out int, +aabbbb51 out int, +aabbbb52 out int, +aabbbb53 out int, +aabbbb54 out int, +aabbbb55 out int, +aabbbb56 out int, +aabbbb57 out int, +aabbbb58 out int, +aabbbb59 out int, +aabbbb60 out int, +aabbbb61 out int, +aabbbb62 out int, +aabbbb63 out int, +aabbbb64 out int, +aabbbb65 out int, +aabbbb66 out int, +aabbbb67 out int, +aabbbb68 out int, +aabbbb69 out int, +aabbbb70 out int, +aabbbb71 out int, +aabbbb72 out int, +aabbbb73 out int, +aabbbb74 out int, +aabbbb75 out int, +aabbbb76 out int, +aabbbb77 out int, +aabbbb78 out int, +aabbbb79 out int, +aabbbb80 out int, +aabbbb81 out int, +aabbbb82 out int, +aabbbb83 out int, +aabbbb84 out int, +aabbbb85 out int, +aabbbb86 out int, +aabbbb87 out int, +aabbbb88 out int, +aabbbb89 out int, +aabbbb90 out int, +aabbbb91 out int, +aabbbb92 out int, +aabbbb93 out int, +aabbbb94 out int, +aabbbb95 out int, +aabbbb96 out int, +aabbbb97 out int, +aabbbb98 out int, +aabbbb99 out int, +aabbbb100 out int +); + +end a; +/ diff --git a/src/test/regress/expected/tpch_vector_optimal.out b/src/test/regress/expected/tpch_vector_optimal.out new file mode 100644 index 000000000..0db3b4217 --- /dev/null +++ b/src/test/regress/expected/tpch_vector_optimal.out @@ -0,0 +1,1174 @@ +set try_vector_engine_strategy=optimal; +explain (costs off) +select + l_returnflag, + l_linestatus, + sum(l_quantity) as sum_qty, + sum(l_extendedprice) as sum_base_price, + sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, + sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, + avg(l_quantity) as avg_qty, + avg(l_extendedprice) as avg_price, + avg(l_discount) as avg_disc, + count(*) as count_order +from + lineitem +where + l_shipdate <= date '1998-12-01' - interval '3 day' +group by + l_returnflag, + l_linestatus +order by + l_returnflag, + l_linestatus +; + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Row Adapter + -> Vector Sort + Sort Key: l_returnflag, l_linestatus + -> Vector Sonic Hash Aggregate + Group By Key: l_returnflag, l_linestatus + -> Vector Adapter(type: BATCH MODE) + Filter: (l_shipdate <= 'Sat Nov 28 00:00:00 1998'::timestamp without time zone) + -> Seq Scan on lineitem +(8 rows) + +explain (costs off) +select + s_acctbal, + s_name, + n_name, + p_partkey, + p_mfgr, + s_address, + s_phone, + s_comment +from + part, + supplier, + partsupp, + nation, + region +where + p_partkey = ps_partkey + and s_suppkey = ps_suppkey + and p_size = 15 + and p_type like 'SMALL%' + and s_nationkey = n_nationkey + and n_regionkey = r_regionkey + and r_name = 'EUROPE ' + and ps_supplycost = ( + select + min(ps_supplycost) + from + partsupp, + supplier, + nation, + region + where + p_partkey = ps_partkey + and s_suppkey = ps_suppkey + and s_nationkey = n_nationkey + and n_regionkey = r_regionkey + and r_name = 'EUROPE ' + ) +order by + s_acctbal desc, + n_name, + s_name, + p_partkey +limit 100 +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: public.supplier.s_acctbal DESC, public.nation.n_name, public.supplier.s_name, public.part.p_partkey + -> Nested Loop + Join Filter: (public.nation.n_regionkey = public.region.r_regionkey) + -> Nested Loop + Join Filter: (public.supplier.s_nationkey = public.nation.n_nationkey) + -> Nested Loop + Join Filter: (public.partsupp.ps_suppkey = public.supplier.s_suppkey) + -> Nested Loop + Join Filter: ((public.part.p_partkey = subquery."?column?") AND (public.partsupp.ps_supplycost = subquery.min)) + -> Hash Join + Hash Cond: (public.partsupp.ps_partkey = public.part.p_partkey) + -> Seq Scan on partsupp + -> Hash + -> Seq Scan on part + Filter: (((p_type)::text ~~ 'SMALL%'::text) AND (p_size = 15)) + -> Materialize + -> Subquery Scan on subquery + -> HashAggregate + Group By Key: public.partsupp.ps_partkey + -> Nested Loop + Join Filter: (public.nation.n_nationkey = public.supplier.s_nationkey) + -> Nested Loop + Join Filter: (public.nation.n_regionkey = public.region.r_regionkey) + -> Seq Scan on region + Filter: (r_name = 'EUROPE '::bpchar) + -> Seq Scan on nation + -> Materialize + -> Hash Join + Hash Cond: (public.partsupp.ps_suppkey = public.supplier.s_suppkey) + -> Hash Semi Join + Hash Cond: (public.partsupp.ps_partkey = public.part.p_partkey) + -> Seq Scan on partsupp + -> Hash + -> Seq Scan on part + Filter: (((p_type)::text ~~ 'SMALL%'::text) AND (p_size = 15)) + -> Hash + -> Seq Scan on supplier + -> Seq Scan on supplier + -> Seq Scan on nation + -> Seq Scan on region + Filter: (r_name = 'EUROPE '::bpchar) +(43 rows) + +explain (costs off) +select + l_orderkey, + sum(l_extendedprice * (1 - l_discount)) as revenue, + o_orderdate, + o_shippriority +from + customer, + orders, + lineitem +where + c_mktsegment = 'BUILDING' + and c_custkey = o_custkey + and l_orderkey = o_orderkey + and o_orderdate < '1995-03-15'::date + and l_shipdate > '1995-03-15'::date +group by + l_orderkey, + o_orderdate, + o_shippriority +order by + revenue desc, + o_orderdate +limit 10 +; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: (sum((lineitem.l_extendedprice * (1::numeric - lineitem.l_discount)))) DESC, orders.o_orderdate + -> HashAggregate + Group By Key: lineitem.l_orderkey, orders.o_orderdate, orders.o_shippriority + -> Hash Join + Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) + -> Seq Scan on lineitem + Filter: (l_shipdate > 'Wed Mar 15 00:00:00 1995'::timestamp(0) without time zone) + -> Hash + -> Hash Join + Hash Cond: (orders.o_custkey = customer.c_custkey) + -> Seq Scan on orders + Filter: (o_orderdate < 'Wed Mar 15 00:00:00 1995'::timestamp(0) without time zone) + -> Hash + -> Seq Scan on customer + Filter: (c_mktsegment = 'BUILDING'::bpchar) +(17 rows) + +explain (costs off) +select + o_orderpriority, + count(*) as order_count +from + orders +where + o_orderdate >= '1993-07-01'::date + and o_orderdate < '1993-07-01'::date + interval '3 month' + and exists ( + select + * + from + lineitem + where + l_orderkey = o_orderkey + and l_commitdate < l_receiptdate + ) +group by + o_orderpriority +order by + o_orderpriority; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: orders.o_orderpriority + -> HashAggregate + Group By Key: orders.o_orderpriority + -> Hash Right Semi Join + Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) + -> Seq Scan on lineitem + Filter: (l_commitdate < l_receiptdate) + -> Hash + -> Seq Scan on orders + Filter: ((o_orderdate >= 'Thu Jul 01 00:00:00 1993'::timestamp(0) without time zone) AND (o_orderdate < 'Fri Oct 01 00:00:00 1993'::timestamp without time zone)) +(11 rows) + +explain (costs off) +select + n_name, + sum(l_extendedprice * (1 - l_discount)) as revenue +from + customer, + orders, + lineitem, + supplier, + nation, + region +where + c_custkey = o_custkey + and l_orderkey = o_orderkey + and l_suppkey = s_suppkey + and c_nationkey = s_nationkey + and s_nationkey = n_nationkey + and n_regionkey = r_regionkey + and r_name = 'ASIA' + and o_orderdate >= '1994-01-01'::date + and o_orderdate < '1994-01-01'::date + interval '1 year' +group by + n_name +order by + revenue desc; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: (sum((lineitem.l_extendedprice * (1::numeric - lineitem.l_discount)))) DESC + -> HashAggregate + Group By Key: nation.n_name + -> Hash Join + Hash Cond: ((lineitem.l_suppkey = supplier.s_suppkey) AND (customer.c_nationkey = supplier.s_nationkey)) + -> Hash Join + Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) + -> Seq Scan on lineitem + -> Hash + -> Hash Join + Hash Cond: (orders.o_custkey = customer.c_custkey) + -> Seq Scan on orders + Filter: ((o_orderdate >= 'Sat Jan 01 00:00:00 1994'::timestamp(0) without time zone) AND (o_orderdate < 'Sun Jan 01 00:00:00 1995'::timestamp without time zone)) + -> Hash + -> Hash Join + Hash Cond: (customer.c_nationkey = nation.n_nationkey) + -> Seq Scan on customer + -> Hash + -> Hash Join + Hash Cond: (nation.n_regionkey = region.r_regionkey) + -> Seq Scan on nation + -> Hash + -> Seq Scan on region + Filter: (r_name = 'ASIA'::bpchar) + -> Hash + -> Seq Scan on supplier +(27 rows) + +explain (costs off) +select + sum(l_extendedprice * l_discount) as revenue +from + lineitem +where + l_shipdate >= '1994-01-01'::date + and l_shipdate < '1994-01-01'::date + interval '1 year' + and l_discount >= 0.06 - 0.01 + and l_discount <= 0.06 + 0.01 + and l_quantity < 24; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Aggregate + -> Seq Scan on lineitem + Filter: ((l_shipdate >= 'Sat Jan 01 00:00:00 1994'::timestamp(0) without time zone) AND (l_shipdate < 'Sun Jan 01 00:00:00 1995'::timestamp without time zone) AND (l_discount >= .05) AND (l_discount <= .07) AND (l_quantity < 24::numeric)) +(3 rows) + +explain (costs off) +select + supp_nation, + cust_nation, + l_year, + sum(volume) as revenue +from + ( + select + n1.n_name as supp_nation, + n2.n_name as cust_nation, + extract(year from l_shipdate) as l_year, + l_extendedprice * (1 - l_discount) as volume + from + supplier, + lineitem, + orders, + customer, + nation n1, + nation n2 + where + s_suppkey = l_suppkey + and o_orderkey = l_orderkey + and c_custkey = o_custkey + and s_nationkey = n1.n_nationkey + and c_nationkey = n2.n_nationkey + and ( + (n1.n_name = 'FRANCE ' and n2.n_name = 'GERMANY') + or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE') + ) + and l_shipdate >= date '1995-01-01' + and l_shipdate <= date '1996-12-31' + ) as shipping +group by + supp_nation, + cust_nation, + l_year +order by + supp_nation, + cust_nation, + l_year; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Group By Key: n1.n_name, n2.n_name, (date_part('year'::text, lineitem.l_shipdate)) + -> Sort + Sort Key: n1.n_name, n2.n_name, (date_part('year'::text, lineitem.l_shipdate)) + -> Hash Join + Hash Cond: ((lineitem.l_suppkey = supplier.s_suppkey) AND (n1.n_nationkey = supplier.s_nationkey)) + -> Hash Join + Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) + -> Seq Scan on lineitem + Filter: ((l_shipdate >= 'Sun Jan 01 00:00:00 1995'::timestamp(0) without time zone) AND (l_shipdate <= 'Tue Dec 31 00:00:00 1996'::timestamp(0) without time zone)) + -> Hash + -> Hash Join + Hash Cond: (orders.o_custkey = customer.c_custkey) + -> Seq Scan on orders + -> Hash + -> Hash Join + Hash Cond: (customer.c_nationkey = n2.n_nationkey) + -> Seq Scan on customer + -> Hash + -> Nested Loop + Join Filter: (((n1.n_name = 'FRANCE '::bpchar) AND (n2.n_name = 'GERMANY'::bpchar)) OR ((n1.n_name = 'GERMANY'::bpchar) AND (n2.n_name = 'FRANCE'::bpchar))) + -> Seq Scan on nation n1 + -> Materialize + -> Seq Scan on nation n2 + -> Hash + -> Seq Scan on supplier +(26 rows) + +explain (costs off) +select + o_year, + sum(case + when nation = 'BRAZIL ' then volume + else 0 + end) / sum(volume) as mkt_share +from + ( + select + extract(year from o_orderdate) as o_year, + l_extendedprice * (1 - l_discount) as volume, + n2.n_name as nation + from + part, + supplier, + lineitem, + orders, + customer, + nation n1, + nation n2, + region + where + p_partkey = l_partkey + and s_suppkey = l_suppkey + and l_orderkey = o_orderkey + and o_custkey = c_custkey + and c_nationkey = n1.n_nationkey + and n1.n_regionkey = r_regionkey + and r_name = 'AMERICA' + and s_nationkey = n2.n_nationkey + and o_orderdate >= date '1995-01-01' + and o_orderdate <= date '1996-12-31' + and p_type = 'ECONOMY ANODIZED STEEL' + ) as all_nations +group by + o_year +order by + o_year; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: (date_part('year'::text, orders.o_orderdate)) + -> HashAggregate + Group By Key: date_part('year'::text, orders.o_orderdate) + -> Nested Loop + Join Filter: (supplier.s_nationkey = n2.n_nationkey) + -> Seq Scan on nation n2 + -> Materialize + -> Hash Join + Hash Cond: (lineitem.l_suppkey = supplier.s_suppkey) + -> Hash Join + Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) + -> Hash Join + Hash Cond: (lineitem.l_partkey = part.p_partkey) + -> Seq Scan on lineitem + -> Hash + -> Seq Scan on part + Filter: ((p_type)::text = 'ECONOMY ANODIZED STEEL'::text) + -> Hash + -> Hash Join + Hash Cond: (orders.o_custkey = customer.c_custkey) + -> Seq Scan on orders + Filter: ((o_orderdate >= 'Sun Jan 01 00:00:00 1995'::timestamp(0) without time zone) AND (o_orderdate <= 'Tue Dec 31 00:00:00 1996'::timestamp(0) without time zone)) + -> Hash + -> Hash Join + Hash Cond: (customer.c_nationkey = n1.n_nationkey) + -> Seq Scan on customer + -> Hash + -> Hash Join + Hash Cond: (n1.n_regionkey = region.r_regionkey) + -> Seq Scan on nation n1 + -> Hash + -> Seq Scan on region + Filter: (r_name = 'AMERICA'::bpchar) + -> Hash + -> Seq Scan on supplier +(36 rows) + +explain (costs off) +select + nation, + o_year, + sum(amount) as sum_profit +from + ( + select + n_name as nation, + o_orderdate as o_year, + l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount + from + part, + supplier, + lineitem, + partsupp, + orders, + nation + where + s_suppkey = l_suppkey + and ps_suppkey = l_suppkey + and ps_partkey = l_partkey + and p_partkey = l_partkey + and o_orderkey = l_orderkey + and s_nationkey = n_nationkey + and p_name like '%green%' + ) as profit +group by + nation, + o_year +order by + nation, + o_year desc; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Group By Key: nation.n_name, orders.o_orderdate + -> Sort + Sort Key: nation.n_name, orders.o_orderdate DESC + -> Hash Join + Hash Cond: (lineitem.l_suppkey = supplier.s_suppkey) + -> Hash Join + Hash Cond: (orders.o_orderkey = lineitem.l_orderkey) + -> Seq Scan on orders + -> Hash + -> Hash Join + Hash Cond: ((partsupp.ps_suppkey = lineitem.l_suppkey) AND (partsupp.ps_partkey = lineitem.l_partkey)) + -> Seq Scan on partsupp + -> Hash + -> Hash Join + Hash Cond: (lineitem.l_partkey = part.p_partkey) + -> Seq Scan on lineitem + -> Hash + -> Seq Scan on part + Filter: ((p_name)::text ~~ '%green%'::text) + -> Hash + -> Hash Join + Hash Cond: (supplier.s_nationkey = nation.n_nationkey) + -> Seq Scan on supplier + -> Hash + -> Seq Scan on nation +(26 rows) + +explain (costs off) +select + c_custkey, + c_name, + sum(l_extendedprice * (1 - l_discount)) as revenue, + c_acctbal, + n_name, + c_address, + c_phone, + c_comment +from + customer, + orders, + lineitem, + nation +where + c_custkey = o_custkey + and l_orderkey = o_orderkey + and o_orderdate >= date '1993-10-01' + and o_orderdate < date '1993-10-01' + interval '3 month' + and l_returnflag = 'R' + and c_nationkey = n_nationkey +group by + c_custkey, + c_name, + c_acctbal, + c_phone, + n_name, + c_address, + c_comment +order by + revenue desc +limit 20 +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: (sum((lineitem.l_extendedprice * (1::numeric - lineitem.l_discount)))) DESC + -> HashAggregate + Group By Key: customer.c_custkey, customer.c_name, customer.c_acctbal, customer.c_phone, nation.n_name, customer.c_address, customer.c_comment + -> Hash Join + Hash Cond: (customer.c_nationkey = nation.n_nationkey) + -> Hash Join + Hash Cond: (orders.o_custkey = customer.c_custkey) + -> Hash Join + Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) + -> Seq Scan on lineitem + Filter: (l_returnflag = 'R'::bpchar) + -> Hash + -> Seq Scan on orders + Filter: ((o_orderdate >= 'Fri Oct 01 00:00:00 1993'::timestamp(0) without time zone) AND (o_orderdate < 'Sat Jan 01 00:00:00 1994'::timestamp without time zone)) + -> Hash + -> Seq Scan on customer + -> Hash + -> Seq Scan on nation +(20 rows) + +explain (costs off) +select + ps_partkey, + sum(ps_supplycost * ps_availqty) as value +from + partsupp, + supplier, + nation +where + ps_suppkey = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'GERMANY' +group by + ps_partkey having + sum(ps_supplycost * ps_availqty) > ( + select + sum(ps_supplycost * ps_availqty) * 0.0001 + from + partsupp, + supplier, + nation + where + ps_suppkey = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'GERMANY' + ) +order by + value desc; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: (sum((public.partsupp.ps_supplycost * (public.partsupp.ps_availqty)::numeric))) DESC + InitPlan 1 (returns $0) + -> Aggregate + -> Hash Join + Hash Cond: (public.partsupp.ps_suppkey = public.supplier.s_suppkey) + -> Seq Scan on partsupp + -> Hash + -> Hash Join + Hash Cond: (public.supplier.s_nationkey = public.nation.n_nationkey) + -> Seq Scan on supplier + -> Hash + -> Seq Scan on nation + Filter: (n_name = 'GERMANY'::bpchar) + -> HashAggregate + Group By Key: public.partsupp.ps_partkey + Filter: (sum((public.partsupp.ps_supplycost * (public.partsupp.ps_availqty)::numeric)) > $0) + -> Hash Join + Hash Cond: (public.partsupp.ps_suppkey = public.supplier.s_suppkey) + -> Seq Scan on partsupp + -> Hash + -> Hash Join + Hash Cond: (public.supplier.s_nationkey = public.nation.n_nationkey) + -> Seq Scan on supplier + -> Hash + -> Seq Scan on nation + Filter: (n_name = 'GERMANY'::bpchar) +(27 rows) + +explain (costs off) +select + l_shipmode, + sum(case + when o_orderpriority = '1-URGENT' + or o_orderpriority = '2-HIGH' + then 1 + else 0 + end) as high_line_count, + sum(case + when o_orderpriority <> '1-URGENT' + and o_orderpriority <> '2-HIGH' + then 1 + else 0 + end) as low_line_count +from + orders, + lineitem +where + o_orderkey = l_orderkey + and l_shipmode IN ('MAIL ', 'SHIP ') + and l_commitdate < l_receiptdate + and l_shipdate < l_commitdate + and l_receiptdate >= date '1994-01-01' + and l_receiptdate < date '1994-01-01' + interval '1 year' +group by + l_shipmode +order by + l_shipmode; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: lineitem.l_shipmode + -> HashAggregate + Group By Key: lineitem.l_shipmode + -> Hash Join + Hash Cond: (orders.o_orderkey = lineitem.l_orderkey) + -> Seq Scan on orders + -> Hash + -> Seq Scan on lineitem + Filter: ((l_shipmode = ANY ('{"MAIL ","SHIP "}'::bpchar[])) AND (l_commitdate < l_receiptdate) AND (l_shipdate < l_commitdate) AND (l_receiptdate >= 'Sat Jan 01 00:00:00 1994'::timestamp(0) without time zone) AND (l_receiptdate < 'Sun Jan 01 00:00:00 1995'::timestamp without time zone)) +(10 rows) + +explain (costs off) +select + c_count, + count(*) as custdist +from + ( + select + c_custkey, + count(o_orderkey) + from + customer left outer join orders on + c_custkey = o_custkey + and o_comment not like '%special%request%' + group by + c_custkey + ) as c_orders (c_custkey, c_count) +group by + c_count +order by + custdist desc, + c_count desc; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: (count(*)) DESC, c_orders.c_count DESC + -> HashAggregate + Group By Key: c_orders.c_count + -> Subquery Scan on c_orders + -> HashAggregate + Group By Key: customer.c_custkey + -> Hash Right Join + Hash Cond: (orders.o_custkey = customer.c_custkey) + -> Seq Scan on orders + Filter: ((o_comment)::text !~~ '%special%request%'::text) + -> Hash + -> Seq Scan on customer +(13 rows) + +explain (costs off) +select + 100.00 * sum(case + when p_type like 'PROMO%' + then l_extendedprice * (1 - l_discount) + else 0 + end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue +from + lineitem, + part +where + l_partkey = p_partkey + and l_shipdate >= date '1995-09-01' + and l_shipdate < date '1995-09-01' + interval '1 month' +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Row Adapter + -> Vector Aggregate + -> Vector Sonic Hash Join + Hash Cond: (lineitem.l_partkey = part.p_partkey) + -> Vector Adapter(type: BATCH MODE) + Filter: ((l_shipdate >= 'Fri Sep 01 00:00:00 1995'::timestamp(0) without time zone) AND (l_shipdate < 'Sun Oct 01 00:00:00 1995'::timestamp without time zone)) + -> Seq Scan on lineitem + -> Vector Adapter(type: BATCH MODE) + -> Seq Scan on part +(9 rows) + +explain (costs off) +with revenue (supplier_no, total_revenue) as +( + select + l_suppkey, + sum(l_extendedprice * (1 - l_discount)) + from + lineitem + where + l_shipdate >= date '1996-01-01' + and l_shipdate < date '1996-01-01' + interval '3 month' + group by + l_suppkey +) +select + s_suppkey, + s_name, + s_address, + s_phone, + total_revenue +from + supplier, + revenue +where + s_suppkey = supplier_no +order by + s_suppkey +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Row Adapter + -> Vector Merge Join + Merge Cond: (supplier.s_suppkey = revenue.supplier_no) + -> Vector Sort + Sort Key: supplier.s_suppkey + -> Vector Adapter(type: BATCH MODE) + -> Seq Scan on supplier + -> Vector Sort + Sort Key: revenue.supplier_no + -> Vector Subquery Scan on revenue + -> Vector Sonic Hash Aggregate + Group By Key: lineitem.l_suppkey + -> Vector Adapter(type: BATCH MODE) + Filter: ((l_shipdate >= 'Mon Jan 01 00:00:00 1996'::timestamp(0) without time zone) AND (l_shipdate < 'Mon Apr 01 00:00:00 1996'::timestamp without time zone)) + -> Seq Scan on lineitem +(15 rows) + +explain (costs off) +select + p_brand, + p_type, + p_size, + count(ps_suppkey) as supplier_cnt +from + partsupp, + part +where + p_partkey = ps_partkey + and p_brand <> 'Brand#45' + and p_type not like 'MEDIUM POLISHED%' + and p_size in (49, 14, 23, 45, 19, 3, 36, 9) + and ps_suppkey not in ( + select + s_suppkey + from + supplier + where s_comment like '%Customer%Complaints%' + ) +group by + p_brand, + p_type, + p_size +order by + supplier_cnt desc, + p_brand, + p_type, + p_size +limit 100 +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: (count(partsupp.ps_suppkey)) DESC, part.p_brand, part.p_type, part.p_size + -> HashAggregate + Group By Key: part.p_brand, part.p_type, part.p_size + -> Nested Loop Anti Join + Join Filter: (partsupp.ps_suppkey = supplier.s_suppkey) + -> Hash Join + Hash Cond: (partsupp.ps_partkey = part.p_partkey) + -> Seq Scan on partsupp + -> Hash + -> Seq Scan on part + Filter: ((p_brand <> 'Brand#45'::bpchar) AND ((p_type)::text !~~ 'MEDIUM POLISHED%'::text) AND (p_size = ANY ('{49,14,23,45,19,3,36,9}'::integer[]))) + -> Materialize + -> Seq Scan on supplier + Filter: ((s_comment)::text ~~ '%Customer%Complaints%'::text) +(16 rows) + +explain (costs off) +select + sum(l_extendedprice) / 7.0 as avg_yearly +from + lineitem, + part +where + p_partkey = l_partkey + and p_brand = 'Brand#23' + and p_container = 'MED BOX' + and l_quantity < ( + select + 0.2 * avg(l_quantity) + from + lineitem + where + l_partkey = p_partkey + ) +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Aggregate + -> Nested Loop + Join Filter: (subquery."?column?" = public.part.p_partkey) + -> Seq Scan on part + Filter: ((p_brand = 'Brand#23'::bpchar) AND (p_container = 'MED BOX'::bpchar)) + -> Hash Join + Hash Cond: (public.lineitem.l_partkey = subquery."?column?") + Join Filter: (public.lineitem.l_quantity < (.2 * subquery."?column?")) + -> Seq Scan on lineitem + -> Hash + -> Subquery Scan on subquery + -> HashAggregate + Group By Key: public.lineitem.l_partkey + -> Hash Semi Join + Hash Cond: (public.lineitem.l_partkey = public.part.p_partkey) + -> Seq Scan on lineitem + -> Hash + -> Seq Scan on part + Filter: ((p_brand = 'Brand#23'::bpchar) AND (p_container = 'MED BOX'::bpchar)) +(19 rows) + +explain (costs off) +select + c_name, + c_custkey, + o_orderkey, + o_orderdate, + o_totalprice, + sum(l_quantity) +from + customer, + orders, + lineitem +where + o_orderkey in ( + select + l_orderkey + from + lineitem + group by + l_orderkey having + sum(l_quantity) > 300 + ) + and c_custkey = o_custkey + and o_orderkey = l_orderkey +group by + c_name, + c_custkey, + o_orderkey, + o_orderdate, + o_totalprice +order by + o_totalprice desc, + o_orderdate +limit 100; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: orders.o_totalprice DESC, orders.o_orderdate + -> HashAggregate + Group By Key: orders.o_totalprice, orders.o_orderdate, customer.c_name, customer.c_custkey, orders.o_orderkey + -> Hash Join + Hash Cond: (public.lineitem.l_orderkey = orders.o_orderkey) + -> Seq Scan on lineitem + -> Hash + -> Hash Join + Hash Cond: (orders.o_custkey = customer.c_custkey) + -> Hash Right Semi Join + Hash Cond: (public.lineitem.l_orderkey = orders.o_orderkey) + -> HashAggregate + Group By Key: public.lineitem.l_orderkey + Filter: (sum(public.lineitem.l_quantity) > 300::numeric) + -> Seq Scan on lineitem + -> Hash + -> Seq Scan on orders + -> Hash + -> Seq Scan on customer +(21 rows) + +explain (costs off) +select + sum(l_extendedprice* (1 - l_discount)) as revenue +from + lineitem, + part +where + ( + p_partkey = l_partkey + and substring(p_brand, 1, 7) = 'Brand#1' + and p_container in ('SM CASE', 'SM BOX ', 'SM PACK', 'SM PKG ') + and l_quantity >= 1 and l_quantity <= 1 + 10 + and p_size between 1 and 5 + and l_shipmode in ('AIR ', 'AIR REG') + and substring(l_shipinstruct, 1, 7) = 'DELIVER' + ) + or + ( + p_partkey = l_partkey + and substring(p_brand, 1, 7) = 'Brand#2' + and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') + and l_quantity >= 10 and l_quantity <= 10 + 10 + and p_size between 1 and 10 + and l_shipmode in ('AIR ', 'AIR REG') + and substring(l_shipinstruct, 1, 7) = 'DELIVER' + ) + or + ( + p_partkey = l_partkey + and substring(p_brand, 1, 7) = 'Brand#3' + and p_container in ('LG CASE', 'LG BOX ', 'LG PACK', 'LG PKG ') + and l_quantity >= 20 and l_quantity <= 20 + 10 + and p_size between 1 and 15 + and l_shipmode in ('AIR ', 'AIR REG') + and substring(l_shipinstruct, 1, 7) = 'DELIVER' + ); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Aggregate + -> Hash Join + Hash Cond: (lineitem.l_partkey = part.p_partkey) + Join Filter: ((("substring"((part.p_brand)::text, 1, 7) = 'Brand#1'::text) AND (part.p_container = ANY ('{"SM CASE","SM BOX ","SM PACK","SM PKG "}'::bpchar[])) AND (lineitem.l_quantity >= 1::numeric) AND (lineitem.l_quantity <= 11::numeric) AND (part.p_size <= 5)) OR (("substring"((part.p_brand)::text, 1, 7) = 'Brand#2'::text) AND (part.p_container = ANY ('{"MED BAG","MED BOX","MED PKG","MED PACK"}'::bpchar[])) AND (lineitem.l_quantity >= 10::numeric) AND (lineitem.l_quantity <= 20::numeric) AND (part.p_size <= 10)) OR (("substring"((part.p_brand)::text, 1, 7) = 'Brand#3'::text) AND (part.p_container = ANY ('{"LG CASE","LG BOX ","LG PACK","LG PKG "}'::bpchar[])) AND (lineitem.l_quantity >= 20::numeric) AND (lineitem.l_quantity <= 30::numeric) AND (part.p_size <= 15))) + -> Seq Scan on lineitem + Filter: ((l_shipmode = ANY ('{"AIR ","AIR REG"}'::bpchar[])) AND ("substring"((l_shipinstruct)::text, 1, 7) = 'DELIVER'::text)) + -> Hash + -> Seq Scan on part + Filter: (p_size >= 1) +(9 rows) + +explain (costs off) +select + s_name, + s_address +from + supplier, + nation +where + s_suppkey in ( + select + ps_suppkey + from + partsupp + where + ps_partkey in ( + select + p_partkey + from + part + ) + and ps_availqty > ( + select + 0.5 * sum(l_quantity) + from + lineitem + where + l_partkey = ps_partkey + and l_suppkey = ps_suppkey + and l_shipdate >= date '1994-01-01' + and l_shipdate < date '1994-01-01' + interval '1 year' + ) + ) + and s_nationkey = n_nationkey + and n_name = 'CANADA' +order by + s_name; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: supplier.s_name + -> Nested Loop + Join Filter: (supplier.s_nationkey = nation.n_nationkey) + -> Seq Scan on nation + Filter: (n_name = 'CANADA'::bpchar) + -> Hash Right Semi Join + Hash Cond: (lineitem.l_suppkey = supplier.s_suppkey) + -> Hash Join + Hash Cond: ((partsupp.ps_partkey = part.p_partkey) AND (partsupp.ps_suppkey = lineitem.l_suppkey)) + Join Filter: ((partsupp.ps_availqty)::numeric > (.5 * (sum(lineitem.l_quantity)))) + -> Seq Scan on partsupp + -> Hash + -> Hash Join + Hash Cond: (lineitem.l_partkey = part.p_partkey) + -> HashAggregate + Group By Key: lineitem.l_partkey, lineitem.l_suppkey + -> Seq Scan on lineitem + Filter: ((l_shipdate >= 'Sat Jan 01 00:00:00 1994'::timestamp(0) without time zone) AND (l_shipdate < 'Sun Jan 01 00:00:00 1995'::timestamp without time zone)) + -> Hash + -> HashAggregate + Group By Key: part.p_partkey + -> Seq Scan on part + -> Hash + -> Seq Scan on supplier +(25 rows) + +explain (costs off) +select + s_name, + count(*) as numwait +from + supplier, + lineitem l1, + orders, + nation +where + s_suppkey = l1.l_suppkey + and o_orderkey = l1.l_orderkey + and o_orderstatus = 'F' + and l1.l_receiptdate > l1.l_commitdate + and exists ( + select + * + from + lineitem l2 + where + l2.l_orderkey = l1.l_orderkey + and l2.l_suppkey <> l1.l_suppkey + ) + and not exists ( + select + * + from + lineitem l3 + where + l3.l_orderkey = l1.l_orderkey + and l3.l_suppkey <> l1.l_suppkey + and l3.l_receiptdate > l3.l_commitdate + ) + and s_nationkey = n_nationkey + and n_name = 'SAUDI ARABIA' +group by + s_name +order by + numwait desc, + s_name +limit 100; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: (count(*)) DESC, supplier.s_name + -> HashAggregate + Group By Key: supplier.s_name + -> Hash Right Semi Join + Hash Cond: (l2.l_orderkey = orders.o_orderkey) + Join Filter: (l2.l_suppkey <> l1.l_suppkey) + -> Seq Scan on lineitem l2 + -> Hash + -> Hash Right Anti Join + Hash Cond: (l3.l_orderkey = l1.l_orderkey) + Join Filter: (l3.l_suppkey <> l1.l_suppkey) + -> Seq Scan on lineitem l3 + Filter: (l_receiptdate > l_commitdate) + -> Hash + -> Hash Join + Hash Cond: (orders.o_orderkey = l1.l_orderkey) + -> Seq Scan on orders + Filter: (o_orderstatus = 'F'::bpchar) + -> Hash + -> Hash Join + Hash Cond: (l1.l_suppkey = supplier.s_suppkey) + -> Seq Scan on lineitem l1 + Filter: (l_receiptdate > l_commitdate) + -> Hash + -> Hash Join + Hash Cond: (supplier.s_nationkey = nation.n_nationkey) + -> Seq Scan on supplier + -> Hash + -> Seq Scan on nation + Filter: (n_name = 'SAUDI ARABIA'::bpchar) +(32 rows) + +explain (costs off) +select + cntrycode, + count(*) as numcust, + sum(c_acctbal) as totacctbal +from + ( + select + substring(c_phone from 1 for 2) as cntrycode, + c_acctbal + from + customer + where + substring(c_phone from 1 for 2) in + ('13', '31', '23', '29', '30', '18', '17') + and c_acctbal > ( + select + avg(c_acctbal) + from + customer + where + c_acctbal > 0.00 + and substring(c_phone from 1 for 2) in + ('13', '31', '23', '29', '30', '18', '17') + ) + and not exists ( + select + * + from + orders + where + o_custkey = c_custkey + ) + ) as custsale +group by + cntrycode +order by + cntrycode; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Group By Key: ("substring"((public.customer.c_phone)::text, 1, 2)) + InitPlan 1 (returns $0) + -> Row Adapter + -> Vector Aggregate + -> Vector Adapter(type: BATCH MODE) + Filter: ((c_acctbal > 0.00) AND ("substring"((c_phone)::text, 1, 2) = ANY ('{13,31,23,29,30,18,17}'::text[]))) + -> Seq Scan on customer + -> Sort + Sort Key: ("substring"((public.customer.c_phone)::text, 1, 2)) + -> Hash Right Anti Join + Hash Cond: (orders.o_custkey = public.customer.c_custkey) + -> Seq Scan on orders + -> Hash + -> Seq Scan on customer + Filter: ((c_acctbal > $0) AND ("substring"((c_phone)::text, 1, 2) = ANY ('{13,31,23,29,30,18,17}'::text[]))) +(16 rows) + diff --git a/src/test/regress/expected/trunc_func_for_date.out b/src/test/regress/expected/trunc_func_for_date.out new file mode 100644 index 000000000..bf786cb16 --- /dev/null +++ b/src/test/regress/expected/trunc_func_for_date.out @@ -0,0 +1,217 @@ +--- +--- data type 1 : timestamp +--- +-- format can recognize +select trunc(timestamp '2021-08-11 20:19:39', 'cc'); -- century + trunc +-------------------------- + Mon Jan 01 00:00:00 2001 +(1 row) + +select trunc(timestamp '2021-08-11 20:19:39', 'yyyy'); -- year + trunc +-------------------------- + Fri Jan 01 00:00:00 2021 +(1 row) + +select trunc(timestamp '2021-08-11 20:19:39', 'q'); -- quarter + trunc +-------------------------- + Thu Jul 01 00:00:00 2021 +(1 row) + +select trunc(timestamp '2021-08-11 20:19:39', 'mm'); -- month + trunc +-------------------------- + Sun Aug 01 00:00:00 2021 +(1 row) + +select trunc(timestamp '2021-08-11 20:19:39', 'j'); -- day + trunc +-------------------------- + Wed Aug 11 00:00:00 2021 +(1 row) + +select trunc(timestamp '2021-08-11 20:19:39', 'dd'); -- day + trunc +-------------------------- + Wed Aug 11 00:00:00 2021 +(1 row) + +select trunc(timestamp '2021-08-11 20:19:39', 'ddd'); -- day + trunc +-------------------------- + Wed Aug 11 00:00:00 2021 +(1 row) + +select trunc(timestamp '2021-08-11 20:19:39', 'hh'); -- hour + trunc +-------------------------- + Wed Aug 11 20:00:00 2021 +(1 row) + +select trunc(timestamp '2021-08-11 20:19:39', 'mi'); -- minute + trunc +-------------------------- + Wed Aug 11 20:19:00 2021 +(1 row) + +-- format can not recognize +select trunc(timestamp '2021-08-11 20:19:39', 'qq'); -- quarter +ERROR: timestamp units "qq" not recognized +CONTEXT: referenced column: trunc +select trunc(timestamp '2021-08-11 20:19:39', 'mmm'); -- month +ERROR: timestamp units "mmm" not recognized +CONTEXT: referenced column: trunc +select trunc(timestamp '2021-08-11 20:19:39', 'dddd'); -- day +ERROR: timestamp units "dddd" not recognized +CONTEXT: referenced column: trunc +select trunc(timestamp '2021-08-11 20:19:39', 'hhh'); -- hour +ERROR: timestamp units "hhh" not recognized +CONTEXT: referenced column: trunc +--- +--- data type 2 : timestamptz +--- +-- format can recognize +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'cc'); -- century + trunc +------------------------------ + Mon Jan 01 00:00:00 2001 PST +(1 row) + +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'yyyy'); -- year + trunc +------------------------------ + Fri Jan 01 00:00:00 2021 PST +(1 row) + +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'q'); -- quarter + trunc +------------------------------ + Thu Jul 01 00:00:00 2021 PDT +(1 row) + +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'mm'); -- month + trunc +------------------------------ + Sun Aug 01 00:00:00 2021 PDT +(1 row) + +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'j'); -- day + trunc +------------------------------ + Wed Aug 11 00:00:00 2021 PDT +(1 row) + +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'dd'); -- day + trunc +------------------------------ + Wed Aug 11 00:00:00 2021 PDT +(1 row) + +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'ddd'); -- day + trunc +------------------------------ + Wed Aug 11 00:00:00 2021 PDT +(1 row) + +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'hh'); -- hour + trunc +------------------------------ + Wed Aug 11 17:00:00 2021 PDT +(1 row) + +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'mi'); -- minute + trunc +------------------------------ + Wed Aug 11 17:48:00 2021 PDT +(1 row) + +-- format can't recognize +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'qq'); -- quarter +ERROR: timestamp with time zone units "qq" not recognized +CONTEXT: referenced column: trunc +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'mmm'); -- month +ERROR: timestamp with time zone units "mmm" not recognized +CONTEXT: referenced column: trunc +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'dddd'); -- day +ERROR: timestamp with time zone units "dddd" not recognized +CONTEXT: referenced column: trunc +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'hhh'); -- hour +ERROR: timestamp with time zone units "hhh" not recognized +CONTEXT: referenced column: trunc +--- +--- data type 3 : interval +--- +-- format can recognize +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'cc'); -- century + trunc +------- + @ 0 +(1 row) + +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'yyyy'); -- year + trunc +----------- + @ 2 years +(1 row) + +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'q'); -- quarter + trunc +------------------ + @ 2 years 3 mons +(1 row) + +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'mm'); -- month + trunc +------------------ + @ 2 years 3 mons +(1 row) + +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'j'); -- day + trunc +------------------------- + @ 2 years 3 mons 4 days +(1 row) + +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'dd'); -- day + trunc +------------------------- + @ 2 years 3 mons 4 days +(1 row) + +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'ddd'); -- day + trunc +------------------------- + @ 2 years 3 mons 4 days +(1 row) + +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'hh'); -- hour + trunc +--------------------------------- + @ 2 years 3 mons 4 days 5 hours +(1 row) + +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'mi'); -- minute + trunc +---------------------------------------- + @ 2 years 3 mons 4 days 5 hours 6 mins +(1 row) + +-- format can not recognize +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'qq'); -- quarter +ERROR: interval units "qq" not recognized +CONTEXT: referenced column: trunc +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'mmm'); -- month +ERROR: interval units "mmm" not recognized +CONTEXT: referenced column: trunc +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'dddd'); -- day +ERROR: interval units "dddd" not recognized +CONTEXT: referenced column: trunc +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'hhh'); -- hour +ERROR: interval units "hhh" not recognized +CONTEXT: referenced column: trunc +-- not supported +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'w'); -- week +ERROR: interval units "w" not supported +CONTEXT: referenced column: trunc diff --git a/src/test/regress/expected/truncate_gpi.out b/src/test/regress/expected/truncate_gpi.out new file mode 100644 index 000000000..d1d1ce427 --- /dev/null +++ b/src/test/regress/expected/truncate_gpi.out @@ -0,0 +1,1067 @@ +set datestyle = 'ISO, MDY'; +create materialized view pg_partition_before_truncate as + select oid, relname, reloptions, parentid, boundaries + from pg_partition where parentid = ( + select oid from pg_class where relname like 'tg_%' + ); +create view check_truncate_results as + select pg_class.relname tablename, + bef.relname relname, + bef.oid < aft.oid oid_changed, + bef.parentid = aft.parentid parentid_ok, + bef.boundaries = aft.boundaries boundaries_ok + from pg_partition_before_truncate bef, pg_partition aft, pg_class + where bef.relname = aft.relname + and bef.parentid = aft.parentid + and bef.parentid = pg_class.oid + order by bef.oid; +-- range +create table tg_range(a date, b int) +partition by range(a) +( + partition p1 values less than ('2022-01-31 00:00:00'), + partition p2 values less than ('2022-02-28 00:00:00'), + partition p3 values less than ('2022-03-31 00:00:00') +); +create index i_tg_range_global_b on tg_range(b) global; +create index i_tg_range_global_a_b on tg_range(a,b) global; +create index i_tg_range_local_a on tg_range(a) local; +insert into tg_range select '2022-1-5'::date+n1*'1 month'::interval+10*n2*'1 day'::interval, 10*(n1+1)+(n2+1) from generate_series(0,2) t1(n1), generate_series(0,2) t2(n2); +refresh materialized view pg_partition_before_truncate; +begin; +alter table tg_range truncate partition p1 update global index; +alter table tg_range truncate partition p2 update global index; +select relname, reloptions, boundaries from pg_partition_before_truncate + where parentid = (select oid from pg_class where relname = 'tg_range') order by oid; + relname | reloptions | boundaries +----------+---------------------------------------------------+------------------------- + tg_range | {orientation=row,compression=no,wait_clean_gpi=n} | + p1 | {orientation=row,compression=no} | {"2022-01-31 00:00:00"} + p2 | {orientation=row,compression=no} | {"2022-02-28 00:00:00"} + p3 | {orientation=row,compression=no} | {"2022-03-31 00:00:00"} +(4 rows) + +select relname, reloptions, boundaries from pg_partition + where parentid = (select oid from pg_class where relname = 'tg_range') order by oid; + relname | reloptions | boundaries +----------+---------------------------------------------------+------------------------- + tg_range | {orientation=row,compression=no,wait_clean_gpi=y} | + p3 | {orientation=row,compression=no} | {"2022-03-31 00:00:00"} + p1 | | {"2022-01-31 00:00:00"} + p2 | | {"2022-02-28 00:00:00"} +(4 rows) + +select * from check_truncate_results where tablename = 'tg_range'; + tablename | relname | oid_changed | parentid_ok | boundaries_ok +-----------+----------+-------------+-------------+--------------- + tg_range | tg_range | f | t | + tg_range | p1 | t | t | t + tg_range | p2 | t | t | t + tg_range | p3 | f | t | t +(4 rows) + +select * from tg_range; + a | b +---------------------+---- + 2022-03-05 00:00:00 | 31 + 2022-03-15 00:00:00 | 32 + 2022-03-25 00:00:00 | 33 +(3 rows) + +explain(costs off) select /*+ indexscan(tg_range i_tg_range_global_b) */ * from tg_range where b < 40; + QUERY PLAN +-------------------------------------------------- + Index Scan using i_tg_range_global_b on tg_range + Index Cond: (b < 40) +(2 rows) + +select /*+ indexscan(tg_range i_tg_range_global_b) */ * from tg_range where b < 40; + a | b +---------------------+---- + 2022-03-05 00:00:00 | 31 + 2022-03-15 00:00:00 | 32 + 2022-03-25 00:00:00 | 33 +(3 rows) + +explain(costs off) select /*+ indexscan(tg_range i_tg_range_local_a) */ * from tg_range where a < '2022-03-31 00:00:00'; + QUERY PLAN +------------------------------------------------------------------------------ + Partition Iterator + Iterations: 3 + -> Partitioned Index Scan using i_tg_range_local_a on tg_range + Index Cond: (a < '2022-03-31 00:00:00'::timestamp without time zone) + Selected Partitions: 1..3 +(5 rows) + +select /*+ indexscan(tg_range i_tg_range_local_a) */ * from tg_range where a < '2022-03-31 00:00:00'; + a | b +---------------------+---- + 2022-03-05 00:00:00 | 31 + 2022-03-15 00:00:00 | 32 + 2022-03-25 00:00:00 | 33 +(3 rows) + +rollback; +select relname, reloptions, boundaries from pg_partition_before_truncate + where parentid = (select oid from pg_class where relname = 'tg_range') order by oid; + relname | reloptions | boundaries +----------+---------------------------------------------------+------------------------- + tg_range | {orientation=row,compression=no,wait_clean_gpi=n} | + p1 | {orientation=row,compression=no} | {"2022-01-31 00:00:00"} + p2 | {orientation=row,compression=no} | {"2022-02-28 00:00:00"} + p3 | {orientation=row,compression=no} | {"2022-03-31 00:00:00"} +(4 rows) + +select relname, reloptions, boundaries from pg_partition + where parentid = (select oid from pg_class where relname = 'tg_range') order by oid; + relname | reloptions | boundaries +----------+---------------------------------------------------+------------------------- + tg_range | {orientation=row,compression=no,wait_clean_gpi=y} | + p1 | {orientation=row,compression=no} | {"2022-01-31 00:00:00"} + p2 | {orientation=row,compression=no} | {"2022-02-28 00:00:00"} + p3 | {orientation=row,compression=no} | {"2022-03-31 00:00:00"} +(4 rows) + +select * from check_truncate_results where tablename = 'tg_range'; + tablename | relname | oid_changed | parentid_ok | boundaries_ok +-----------+----------+-------------+-------------+--------------- + tg_range | tg_range | f | t | + tg_range | p1 | f | t | t + tg_range | p2 | f | t | t + tg_range | p3 | f | t | t +(4 rows) + +select * from tg_range; + a | b +---------------------+---- + 2022-01-05 00:00:00 | 11 + 2022-01-15 00:00:00 | 12 + 2022-01-25 00:00:00 | 13 + 2022-02-05 00:00:00 | 21 + 2022-02-15 00:00:00 | 22 + 2022-02-25 00:00:00 | 23 + 2022-03-05 00:00:00 | 31 + 2022-03-15 00:00:00 | 32 + 2022-03-25 00:00:00 | 33 +(9 rows) + +explain(costs off) select /*+ indexscan(tg_range i_tg_range_global_b) */ * from tg_range where b < 40; + QUERY PLAN +-------------------------------------------------- + Index Scan using i_tg_range_global_b on tg_range + Index Cond: (b < 40) +(2 rows) + +select /*+ indexscan(tg_range i_tg_range_global_b) */ * from tg_range where b < 40; + a | b +---------------------+---- + 2022-01-05 00:00:00 | 11 + 2022-01-15 00:00:00 | 12 + 2022-01-25 00:00:00 | 13 + 2022-02-05 00:00:00 | 21 + 2022-02-15 00:00:00 | 22 + 2022-02-25 00:00:00 | 23 + 2022-03-05 00:00:00 | 31 + 2022-03-15 00:00:00 | 32 + 2022-03-25 00:00:00 | 33 +(9 rows) + +explain(costs off) select /*+ indexscan(tg_range i_tg_range_local_a) */ * from tg_range where a < '2022-03-31 00:00:00'; + QUERY PLAN +------------------------------------------------------------------------------ + Partition Iterator + Iterations: 3 + -> Partitioned Index Scan using i_tg_range_local_a on tg_range + Index Cond: (a < '2022-03-31 00:00:00'::timestamp without time zone) + Selected Partitions: 1..3 +(5 rows) + +select /*+ indexscan(tg_range i_tg_range_local_a) */ * from tg_range where a < '2022-03-31 00:00:00'; + a | b +---------------------+---- + 2022-01-05 00:00:00 | 11 + 2022-01-15 00:00:00 | 12 + 2022-01-25 00:00:00 | 13 + 2022-02-05 00:00:00 | 21 + 2022-02-15 00:00:00 | 22 + 2022-02-25 00:00:00 | 23 + 2022-03-05 00:00:00 | 31 + 2022-03-15 00:00:00 | 32 + 2022-03-25 00:00:00 | 33 +(9 rows) + +drop table tg_range; +-- range without gpi +create table tg_range_no_gpi(a date, b int) +partition by range(a) +( + partition p1 values less than ('2022-01-31 00:00:00'), + partition p2 values less than ('2022-02-28 00:00:00'), + partition p3 values less than ('2022-03-31 00:00:00') +); +insert into tg_range_no_gpi select '2022-1-5'::date+n1*'1 month'::interval+10*n2*'1 day'::interval, 10*(n1+1)+(n2+1) from generate_series(0,2) t1(n1), generate_series(0,2) t2(n2); +refresh materialized view pg_partition_before_truncate; +alter table tg_range_no_gpi truncate partition p1 update global index; +alter table tg_range_no_gpi truncate partition p2 update global index; +select relname, reloptions, boundaries from pg_partition_before_truncate + where parentid = (select oid from pg_class where relname = 'tg_range_no_gpi') order by oid; + relname | reloptions | boundaries +-----------------+---------------------------------------------------+------------------------- + tg_range_no_gpi | {orientation=row,compression=no,wait_clean_gpi=n} | + p1 | {orientation=row,compression=no} | {"2022-01-31 00:00:00"} + p2 | {orientation=row,compression=no} | {"2022-02-28 00:00:00"} + p3 | {orientation=row,compression=no} | {"2022-03-31 00:00:00"} +(4 rows) + +select relname, reloptions, boundaries from pg_partition + where parentid = (select oid from pg_class where relname = 'tg_range_no_gpi') order by oid; + relname | reloptions | boundaries +-----------------+---------------------------------------------------+------------------------- + tg_range_no_gpi | {orientation=row,compression=no,wait_clean_gpi=y} | + p1 | {orientation=row,compression=no,wait_clean_gpi=y} | {"2022-01-31 00:00:00"} + p2 | {orientation=row,compression=no,wait_clean_gpi=y} | {"2022-02-28 00:00:00"} + p3 | {orientation=row,compression=no} | {"2022-03-31 00:00:00"} +(4 rows) + +select * from check_truncate_results where tablename = 'tg_range_no_gpi'; + tablename | relname | oid_changed | parentid_ok | boundaries_ok +-----------------+-----------------+-------------+-------------+--------------- + tg_range_no_gpi | tg_range_no_gpi | f | t | + tg_range_no_gpi | p1 | f | t | t + tg_range_no_gpi | p2 | f | t | t + tg_range_no_gpi | p3 | f | t | t +(4 rows) + +select * from tg_range_no_gpi; + a | b +---------------------+---- + 2022-03-05 00:00:00 | 31 + 2022-03-15 00:00:00 | 32 + 2022-03-25 00:00:00 | 33 +(3 rows) + +drop table tg_range_no_gpi; +-- list +create table tg_list(a int, b int) +partition by list(a) +( + partition p1 values (0,3,6), + partition p2 values (1,4,7), + partition p3 values (default) +); +create index i_tg_list_global_b on tg_list(b) global; +create index i_tg_list_global_a_b on tg_list(a,b) global; +create index i_tg_list_local_a on tg_list(a) local; +insert into tg_list select a,b from generate_series(0,8) t1(a), generate_series(0,8) t2(b); +refresh materialized view pg_partition_before_truncate; +begin; +alter table tg_list truncate partition p1 update global index; +alter table tg_list truncate partition p2 update global index; +alter table tg_list truncate partition p3 update global index; +select relname, reloptions, boundaries from pg_partition_before_truncate + where parentid = (select oid from pg_class where relname = 'tg_list') order by oid; + relname | reloptions | boundaries +---------+---------------------------------------------------+------------ + tg_list | {orientation=row,compression=no,wait_clean_gpi=n} | + p1 | {orientation=row,compression=no} | {0,3,6} + p2 | {orientation=row,compression=no} | {1,4,7} + p3 | {orientation=row,compression=no} | {NULL} +(4 rows) + +select relname, reloptions, boundaries from pg_partition + where parentid = (select oid from pg_class where relname = 'tg_list') order by oid; + relname | reloptions | boundaries +---------+---------------------------------------------------+------------ + tg_list | {orientation=row,compression=no,wait_clean_gpi=y} | + p1 | | {0,3,6} + p2 | | {1,4,7} + p3 | | {NULL} +(4 rows) + +select * from check_truncate_results where tablename = 'tg_list'; + tablename | relname | oid_changed | parentid_ok | boundaries_ok +-----------+---------+-------------+-------------+--------------- + tg_list | tg_list | f | t | + tg_list | p1 | t | t | t + tg_list | p2 | t | t | t + tg_list | p3 | t | t | t +(4 rows) + +select * from tg_list; + a | b +---+--- +(0 rows) + +explain(costs off) select /*+ indexscan(tg_list i_tg_list_global_b) */ * from tg_list where b < 9; + QUERY PLAN +------------------------------------------------ + Index Scan using i_tg_list_global_b on tg_list + Index Cond: (b < 9) +(2 rows) + +select /*+ indexscan(tg_list i_tg_list_global_b) */ * from tg_list where b < 9; + a | b +---+--- +(0 rows) + +explain(costs off) select /*+ indexscan(tg_list i_tg_list_local_a) */ * from tg_list where a < 9; + QUERY PLAN +----------------------------------------------------------------- + Partition Iterator + Iterations: 3 + -> Partitioned Index Scan using i_tg_list_local_a on tg_list + Index Cond: (a < 9) + Selected Partitions: 1..3 +(5 rows) + +select /*+ indexscan(tg_list i_tg_list_local_a) */ * from tg_list where a << 9; + a | b +---+--- +(0 rows) + +rollback; +select relname, reloptions, boundaries from pg_partition_before_truncate + where parentid = (select oid from pg_class where relname = 'tg_list') order by oid; + relname | reloptions | boundaries +---------+---------------------------------------------------+------------ + tg_list | {orientation=row,compression=no,wait_clean_gpi=n} | + p1 | {orientation=row,compression=no} | {0,3,6} + p2 | {orientation=row,compression=no} | {1,4,7} + p3 | {orientation=row,compression=no} | {NULL} +(4 rows) + +select relname, reloptions, boundaries from pg_partition + where parentid = (select oid from pg_class where relname = 'tg_list') order by oid; + relname | reloptions | boundaries +---------+---------------------------------------------------+------------ + tg_list | {orientation=row,compression=no,wait_clean_gpi=y} | + p1 | {orientation=row,compression=no} | {0,3,6} + p2 | {orientation=row,compression=no} | {1,4,7} + p3 | {orientation=row,compression=no} | {NULL} +(4 rows) + +select * from check_truncate_results where tablename = 'tg_list'; + tablename | relname | oid_changed | parentid_ok | boundaries_ok +-----------+---------+-------------+-------------+--------------- + tg_list | tg_list | f | t | + tg_list | p1 | f | t | t + tg_list | p2 | f | t | t + tg_list | p3 | f | t | t +(4 rows) + +select * from tg_list; + a | b +---+--- + 2 | 0 + 2 | 1 + 2 | 2 + 2 | 3 + 2 | 4 + 2 | 5 + 2 | 6 + 2 | 7 + 2 | 8 + 5 | 0 + 5 | 1 + 5 | 2 + 5 | 3 + 5 | 4 + 5 | 5 + 5 | 6 + 5 | 7 + 5 | 8 + 8 | 0 + 8 | 1 + 8 | 2 + 8 | 3 + 8 | 4 + 8 | 5 + 8 | 6 + 8 | 7 + 8 | 8 + 1 | 0 + 1 | 1 + 1 | 2 + 1 | 3 + 1 | 4 + 1 | 5 + 1 | 6 + 1 | 7 + 1 | 8 + 4 | 0 + 4 | 1 + 4 | 2 + 4 | 3 + 4 | 4 + 4 | 5 + 4 | 6 + 4 | 7 + 4 | 8 + 7 | 0 + 7 | 1 + 7 | 2 + 7 | 3 + 7 | 4 + 7 | 5 + 7 | 6 + 7 | 7 + 7 | 8 + 0 | 0 + 0 | 1 + 0 | 2 + 0 | 3 + 0 | 4 + 0 | 5 + 0 | 6 + 0 | 7 + 0 | 8 + 3 | 0 + 3 | 1 + 3 | 2 + 3 | 3 + 3 | 4 + 3 | 5 + 3 | 6 + 3 | 7 + 3 | 8 + 6 | 0 + 6 | 1 + 6 | 2 + 6 | 3 + 6 | 4 + 6 | 5 + 6 | 6 + 6 | 7 + 6 | 8 +(81 rows) + +explain(costs off) select /*+ indexscan(tg_list i_tg_list_global_b) */ * from tg_list where b < 9; + QUERY PLAN +------------------------------------------------ + Index Scan using i_tg_list_global_b on tg_list + Index Cond: (b < 9) +(2 rows) + +select /*+ indexscan(tg_list i_tg_list_global_b) */ * from tg_list where b < 9; + a | b +---+--- + 8 | 0 + 7 | 0 + 6 | 0 + 5 | 0 + 4 | 0 + 3 | 0 + 2 | 0 + 1 | 0 + 0 | 0 + 8 | 1 + 7 | 1 + 6 | 1 + 5 | 1 + 4 | 1 + 3 | 1 + 2 | 1 + 1 | 1 + 0 | 1 + 8 | 2 + 7 | 2 + 6 | 2 + 5 | 2 + 4 | 2 + 3 | 2 + 2 | 2 + 1 | 2 + 0 | 2 + 8 | 3 + 7 | 3 + 6 | 3 + 5 | 3 + 4 | 3 + 3 | 3 + 2 | 3 + 1 | 3 + 0 | 3 + 8 | 4 + 7 | 4 + 6 | 4 + 5 | 4 + 4 | 4 + 3 | 4 + 2 | 4 + 1 | 4 + 0 | 4 + 8 | 5 + 7 | 5 + 6 | 5 + 5 | 5 + 4 | 5 + 3 | 5 + 2 | 5 + 1 | 5 + 0 | 5 + 8 | 6 + 7 | 6 + 6 | 6 + 5 | 6 + 4 | 6 + 3 | 6 + 2 | 6 + 1 | 6 + 0 | 6 + 8 | 7 + 7 | 7 + 6 | 7 + 5 | 7 + 4 | 7 + 3 | 7 + 2 | 7 + 1 | 7 + 0 | 7 + 8 | 8 + 7 | 8 + 6 | 8 + 5 | 8 + 4 | 8 + 3 | 8 + 2 | 8 + 1 | 8 + 0 | 8 +(81 rows) + +explain(costs off) select /*+ indexscan(tg_list i_tg_list_local_a) */ * from tg_list where a < 9; + QUERY PLAN +----------------------------------------------------------------- + Partition Iterator + Iterations: 3 + -> Partitioned Index Scan using i_tg_list_local_a on tg_list + Index Cond: (a < 9) + Selected Partitions: 1..3 +(5 rows) + +select /*+ indexscan(tg_list i_tg_list_local_a) */ * from tg_list where a < 9; + a | b +---+--- + 2 | 8 + 2 | 7 + 2 | 6 + 2 | 5 + 2 | 4 + 2 | 3 + 2 | 2 + 2 | 1 + 2 | 0 + 5 | 8 + 5 | 7 + 5 | 6 + 5 | 5 + 5 | 4 + 5 | 3 + 5 | 2 + 5 | 1 + 5 | 0 + 8 | 8 + 8 | 7 + 8 | 6 + 8 | 5 + 8 | 4 + 8 | 3 + 8 | 2 + 8 | 1 + 8 | 0 + 1 | 8 + 1 | 7 + 1 | 6 + 1 | 5 + 1 | 4 + 1 | 3 + 1 | 2 + 1 | 1 + 1 | 0 + 4 | 8 + 4 | 7 + 4 | 6 + 4 | 5 + 4 | 4 + 4 | 3 + 4 | 2 + 4 | 1 + 4 | 0 + 7 | 8 + 7 | 7 + 7 | 6 + 7 | 5 + 7 | 4 + 7 | 3 + 7 | 2 + 7 | 1 + 7 | 0 + 0 | 8 + 0 | 7 + 0 | 6 + 0 | 5 + 0 | 4 + 0 | 3 + 0 | 2 + 0 | 1 + 0 | 0 + 3 | 8 + 3 | 7 + 3 | 6 + 3 | 5 + 3 | 4 + 3 | 3 + 3 | 2 + 3 | 1 + 3 | 0 + 6 | 8 + 6 | 7 + 6 | 6 + 6 | 5 + 6 | 4 + 6 | 3 + 6 | 2 + 6 | 1 + 6 | 0 +(81 rows) + +drop table tg_list; +-- hash +create table tg_hash(a int, b int) +partition by hash(a) +( + partition p1, + partition p2, + partition p3 +); +create index i_tg_hash_global_b on tg_hash(b) global; +create index i_tg_hash_global_a_b on tg_hash(a,b) global; +create index i_tg_hash_local_a on tg_hash(a) local; +insert into tg_hash select a,b from generate_series(0,8) t1(a), generate_series(0,8) t2(b); +refresh materialized view pg_partition_before_truncate; +begin; +alter table tg_hash truncate partition p1 update global index; +alter table tg_hash truncate partition p2 update global index; +select relname, reloptions, boundaries from pg_partition_before_truncate + where parentid = (select oid from pg_class where relname = 'tg_hash') order by oid; + relname | reloptions | boundaries +---------+---------------------------------------------------+------------ + tg_hash | {orientation=row,compression=no,wait_clean_gpi=n} | + p1 | {orientation=row,compression=no} | {0} + p2 | {orientation=row,compression=no} | {1} + p3 | {orientation=row,compression=no} | {2} +(4 rows) + +select relname, reloptions, boundaries from pg_partition + where parentid = (select oid from pg_class where relname = 'tg_hash') order by oid; + relname | reloptions | boundaries +---------+---------------------------------------------------+------------ + tg_hash | {orientation=row,compression=no,wait_clean_gpi=y} | + p3 | {orientation=row,compression=no} | {2} + p1 | | {0} + p2 | | {1} +(4 rows) + +select * from check_truncate_results where tablename = 'tg_hash'; + tablename | relname | oid_changed | parentid_ok | boundaries_ok +-----------+---------+-------------+-------------+--------------- + tg_hash | tg_hash | f | t | + tg_hash | p1 | t | t | t + tg_hash | p2 | t | t | t + tg_hash | p3 | f | t | t +(4 rows) + +select * from tg_hash; + a | b +---+--- + 3 | 0 + 3 | 1 + 3 | 2 + 3 | 3 + 3 | 4 + 3 | 5 + 3 | 6 + 3 | 7 + 3 | 8 + 5 | 0 + 5 | 1 + 5 | 2 + 5 | 3 + 5 | 4 + 5 | 5 + 5 | 6 + 5 | 7 + 5 | 8 +(18 rows) + +explain(costs off) select /*+ indexscan(tg_hash i_tg_hash_global_b) */ * from tg_hash where b < 9; + QUERY PLAN +------------------------------------------------ + Index Scan using i_tg_hash_global_b on tg_hash + Index Cond: (b < 9) +(2 rows) + +select /*+ indexscan(tg_hash i_tg_hash_global_b) */ * from tg_hash where b < 9; + a | b +---+--- + 5 | 0 + 3 | 0 + 5 | 1 + 3 | 1 + 5 | 2 + 3 | 2 + 5 | 3 + 3 | 3 + 5 | 4 + 3 | 4 + 5 | 5 + 3 | 5 + 5 | 6 + 3 | 6 + 5 | 7 + 3 | 7 + 5 | 8 + 3 | 8 +(18 rows) + +explain(costs off) select /*+ indexscan(tg_hash i_tg_hash_local_a) */ * from tg_hash where a < 9; + QUERY PLAN +----------------------------------------------------------------- + Partition Iterator + Iterations: 3 + -> Partitioned Index Scan using i_tg_hash_local_a on tg_hash + Index Cond: (a < 9) + Selected Partitions: 1..3 +(5 rows) + +select /*+ indexscan(tg_hash i_tg_hash_local_a) */ * from tg_hash where a << 9; + a | b +---+--- + 3 | 0 + 3 | 1 + 3 | 2 + 3 | 3 + 3 | 4 + 3 | 5 + 3 | 6 + 3 | 7 + 3 | 8 + 5 | 0 + 5 | 1 + 5 | 2 + 5 | 3 + 5 | 4 + 5 | 5 + 5 | 6 + 5 | 7 + 5 | 8 +(18 rows) + +rollback; +select relname, reloptions, boundaries from pg_partition_before_truncate + where parentid = (select oid from pg_class where relname = 'tg_hash') order by oid; + relname | reloptions | boundaries +---------+---------------------------------------------------+------------ + tg_hash | {orientation=row,compression=no,wait_clean_gpi=n} | + p1 | {orientation=row,compression=no} | {0} + p2 | {orientation=row,compression=no} | {1} + p3 | {orientation=row,compression=no} | {2} +(4 rows) + +select relname, reloptions, boundaries from pg_partition + where parentid = (select oid from pg_class where relname = 'tg_hash') order by oid; + relname | reloptions | boundaries +---------+---------------------------------------------------+------------ + tg_hash | {orientation=row,compression=no,wait_clean_gpi=y} | + p1 | {orientation=row,compression=no} | {0} + p2 | {orientation=row,compression=no} | {1} + p3 | {orientation=row,compression=no} | {2} +(4 rows) + +select * from check_truncate_results where tablename = 'tg_hash'; + tablename | relname | oid_changed | parentid_ok | boundaries_ok +-----------+---------+-------------+-------------+--------------- + tg_hash | tg_hash | f | t | + tg_hash | p1 | f | t | t + tg_hash | p2 | f | t | t + tg_hash | p3 | f | t | t +(4 rows) + +select * from tg_hash; + a | b +---+--- + 3 | 0 + 3 | 1 + 3 | 2 + 3 | 3 + 3 | 4 + 3 | 5 + 3 | 6 + 3 | 7 + 3 | 8 + 5 | 0 + 5 | 1 + 5 | 2 + 5 | 3 + 5 | 4 + 5 | 5 + 5 | 6 + 5 | 7 + 5 | 8 + 0 | 0 + 0 | 1 + 0 | 2 + 0 | 3 + 0 | 4 + 0 | 5 + 0 | 6 + 0 | 7 + 0 | 8 + 2 | 0 + 2 | 1 + 2 | 2 + 2 | 3 + 2 | 4 + 2 | 5 + 2 | 6 + 2 | 7 + 2 | 8 + 6 | 0 + 6 | 1 + 6 | 2 + 6 | 3 + 6 | 4 + 6 | 5 + 6 | 6 + 6 | 7 + 6 | 8 + 7 | 0 + 7 | 1 + 7 | 2 + 7 | 3 + 7 | 4 + 7 | 5 + 7 | 6 + 7 | 7 + 7 | 8 + 1 | 0 + 1 | 1 + 1 | 2 + 1 | 3 + 1 | 4 + 1 | 5 + 1 | 6 + 1 | 7 + 1 | 8 + 4 | 0 + 4 | 1 + 4 | 2 + 4 | 3 + 4 | 4 + 4 | 5 + 4 | 6 + 4 | 7 + 4 | 8 + 8 | 0 + 8 | 1 + 8 | 2 + 8 | 3 + 8 | 4 + 8 | 5 + 8 | 6 + 8 | 7 + 8 | 8 +(81 rows) + +explain(costs off) select /*+ indexscan(tg_hash i_tg_hash_global_b) */ * from tg_hash where b < 9; + QUERY PLAN +------------------------------------------------ + Index Scan using i_tg_hash_global_b on tg_hash + Index Cond: (b < 9) +(2 rows) + +select /*+ indexscan(tg_hash i_tg_hash_global_b) */ * from tg_hash where b < 9; + a | b +---+--- + 8 | 0 + 7 | 0 + 6 | 0 + 5 | 0 + 4 | 0 + 3 | 0 + 2 | 0 + 1 | 0 + 0 | 0 + 8 | 1 + 7 | 1 + 6 | 1 + 5 | 1 + 4 | 1 + 3 | 1 + 2 | 1 + 1 | 1 + 0 | 1 + 8 | 2 + 7 | 2 + 6 | 2 + 5 | 2 + 4 | 2 + 3 | 2 + 2 | 2 + 1 | 2 + 0 | 2 + 8 | 3 + 7 | 3 + 6 | 3 + 5 | 3 + 4 | 3 + 3 | 3 + 2 | 3 + 1 | 3 + 0 | 3 + 8 | 4 + 7 | 4 + 6 | 4 + 5 | 4 + 4 | 4 + 3 | 4 + 2 | 4 + 1 | 4 + 0 | 4 + 8 | 5 + 7 | 5 + 6 | 5 + 5 | 5 + 4 | 5 + 3 | 5 + 2 | 5 + 1 | 5 + 0 | 5 + 8 | 6 + 7 | 6 + 6 | 6 + 5 | 6 + 4 | 6 + 3 | 6 + 2 | 6 + 1 | 6 + 0 | 6 + 8 | 7 + 7 | 7 + 6 | 7 + 5 | 7 + 4 | 7 + 3 | 7 + 2 | 7 + 1 | 7 + 0 | 7 + 8 | 8 + 7 | 8 + 6 | 8 + 5 | 8 + 4 | 8 + 3 | 8 + 2 | 8 + 1 | 8 + 0 | 8 +(81 rows) + +explain(costs off) select /*+ indexscan(tg_hash i_tg_hash_local_a) */ * from tg_hash where a < 9; + QUERY PLAN +----------------------------------------------------------------- + Partition Iterator + Iterations: 3 + -> Partitioned Index Scan using i_tg_hash_local_a on tg_hash + Index Cond: (a < 9) + Selected Partitions: 1..3 +(5 rows) + +select /*+ indexscan(tg_hash i_tg_hash_local_a) */ * from tg_hash where a < 9; + a | b +---+--- + 3 | 8 + 3 | 7 + 3 | 6 + 3 | 5 + 3 | 4 + 3 | 3 + 3 | 2 + 3 | 1 + 3 | 0 + 5 | 8 + 5 | 7 + 5 | 6 + 5 | 5 + 5 | 4 + 5 | 3 + 5 | 2 + 5 | 1 + 5 | 0 + 0 | 8 + 0 | 7 + 0 | 6 + 0 | 5 + 0 | 4 + 0 | 3 + 0 | 2 + 0 | 1 + 0 | 0 + 2 | 8 + 2 | 7 + 2 | 6 + 2 | 5 + 2 | 4 + 2 | 3 + 2 | 2 + 2 | 1 + 2 | 0 + 6 | 8 + 6 | 7 + 6 | 6 + 6 | 5 + 6 | 4 + 6 | 3 + 6 | 2 + 6 | 1 + 6 | 0 + 7 | 8 + 7 | 7 + 7 | 6 + 7 | 5 + 7 | 4 + 7 | 3 + 7 | 2 + 7 | 1 + 7 | 0 + 1 | 8 + 1 | 7 + 1 | 6 + 1 | 5 + 1 | 4 + 1 | 3 + 1 | 2 + 1 | 1 + 1 | 0 + 4 | 8 + 4 | 7 + 4 | 6 + 4 | 5 + 4 | 4 + 4 | 3 + 4 | 2 + 4 | 1 + 4 | 0 + 8 | 8 + 8 | 7 + 8 | 6 + 8 | 5 + 8 | 4 + 8 | 3 + 8 | 2 + 8 | 1 + 8 | 0 +(81 rows) + +drop table tg_hash; +-- 清理过程 +drop view check_truncate_results; +drop materialized view pg_partition_before_truncate; diff --git a/src/test/regress/expected/updatable_views.out b/src/test/regress/expected/updatable_views.out new file mode 100644 index 000000000..cd41624df --- /dev/null +++ b/src/test/regress/expected/updatable_views.out @@ -0,0 +1,152 @@ +CREATE USER regress_view_user1 PASSWORD 'Gauss@123'; +CREATE USER regress_view_user2 PASSWORD 'Gauss@123'; +-- nested-view permissions check +CREATE TABLE base_tbl(a int, b text, c float); +INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0); +SET SESSION AUTHORIZATION regress_view_user1 PASSWORD 'Gauss@123'; +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl; +GRANT ALL ON SCHEMA regress_view_user1 TO regress_view_user2; +SELECT * FROM rw_view1; -- not allowed +ERROR: permission denied for relation base_tbl +DETAIL: N/A +SELECT * FROM rw_view1 FOR UPDATE; -- not allowed +ERROR: permission denied for relation base_tbl +DETAIL: N/A +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- not allowed +ERROR: permission denied for relation base_tbl +DETAIL: N/A +SET SESSION AUTHORIZATION regress_view_user2 PASSWORD 'Gauss@123'; +CREATE VIEW rw_view2 AS SELECT * FROM regress_view_user1.rw_view1; +SELECT * FROM rw_view2; -- not allowed +ERROR: permission denied for relation rw_view1 +DETAIL: N/A +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +ERROR: permission denied for relation rw_view1 +DETAIL: N/A +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed +ERROR: permission denied for relation rw_view1 +DETAIL: N/A +RESET SESSION AUTHORIZATION; +GRANT SELECT ON base_tbl TO regress_view_user1; +SET SESSION AUTHORIZATION regress_view_user1 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view1; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +SELECT * FROM rw_view1 FOR UPDATE; -- not allowed +ERROR: permission denied for relation base_tbl +DETAIL: N/A +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- unlike pgsql, we do not support updating views +ERROR: cannot update view "rw_view1" +HINT: You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger. +SET SESSION AUTHORIZATION regress_view_user2 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view2; -- not allowed +ERROR: permission denied for relation rw_view1 +DETAIL: N/A +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +ERROR: permission denied for relation rw_view1 +DETAIL: N/A +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed +ERROR: permission denied for relation rw_view1 +DETAIL: N/A +SET SESSION AUTHORIZATION regress_view_user1 PASSWORD 'Gauss@123'; +GRANT SELECT ON rw_view1 TO regress_view_user2; +SET SESSION AUTHORIZATION regress_view_user2 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view2; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +ERROR: permission denied for relation rw_view1 +DETAIL: N/A +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- unlike pgsql, we do not support updating views +ERROR: cannot update view "rw_view2" +HINT: You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger. +RESET SESSION AUTHORIZATION; +GRANT UPDATE ON base_tbl TO regress_view_user1; +SET SESSION AUTHORIZATION regress_view_user1 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view1; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +SELECT * FROM rw_view1 FOR UPDATE; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- unlike pgsql, we do not support updating views +ERROR: cannot update view "rw_view1" +HINT: You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger. +SET SESSION AUTHORIZATION regress_view_user2 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view2; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +ERROR: permission denied for relation rw_view1 +DETAIL: N/A +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- unlike pgsql, we do not support updating views +ERROR: cannot update view "rw_view2" +HINT: You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger. +SET SESSION AUTHORIZATION regress_view_user1 PASSWORD 'Gauss@123'; +GRANT UPDATE ON rw_view1 TO regress_view_user2; +SET SESSION AUTHORIZATION regress_view_user2 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view2; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +SELECT * FROM rw_view2 FOR UPDATE; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- unlike pgsql, we do not support updating views +ERROR: cannot update view "rw_view2" +HINT: You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger. +RESET SESSION AUTHORIZATION; +REVOKE UPDATE ON base_tbl FROM regress_view_user1; +SET SESSION AUTHORIZATION regress_view_user1 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view1; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +SELECT * FROM rw_view1 FOR UPDATE; -- not allowed +ERROR: permission denied for relation base_tbl +DETAIL: N/A +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- unlike pgsql, we do not support updating views +ERROR: cannot update view "rw_view1" +HINT: You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger. +SET SESSION AUTHORIZATION regress_view_user2 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view2; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +ERROR: permission denied for relation base_tbl +DETAIL: N/A +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- unlike pgsql, we do not support updating views +ERROR: cannot update view "rw_view2" +HINT: You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger. +RESET SESSION AUTHORIZATION; +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view regress_view_user1.rw_view1 +drop cascades to view regress_view_user2.rw_view2 +DROP USER regress_view_user1; +DROP USER regress_view_user2; diff --git a/src/test/regress/expected/update_for_wait_s1.out b/src/test/regress/expected/update_for_wait_s1.out new file mode 100644 index 000000000..f4f2af4d2 --- /dev/null +++ b/src/test/regress/expected/update_for_wait_s1.out @@ -0,0 +1,239 @@ +create table hw_t1 (id1 int, id2 int, num int); +insert into hw_t1 values (1,11,11), (2,21,21), (3,31,31), (4,41,41), (5,51,51); +/*----------------test1 Locking succeeded. */ +select current_time; +--?.* +--?.* +--?.* +(1 row) + +begin; +select * from hw_t1 where id1 = 3 for update; + id1 | id2 | num +-----+-----+----- + 3 | 31 | 31 +(1 row) + +select pg_sleep(2); + pg_sleep +---------- + +(1 row) + +--time delay +end; +select current_time; +--?.* +--?.* +--?.* +(1 row) + +select pg_sleep(1);--wait session2 + pg_sleep +---------- + +(1 row) + +/*----------------test2 Locking failed. */ +select current_time; +--?.* +--?.* +--?.* +(1 row) + +begin; +select * from hw_t1 where id1 = 3 for update; + id1 | id2 | num +-----+-----+----- + 3 | 31 | 31 +(1 row) + +select pg_sleep(4); + pg_sleep +---------- + +(1 row) + +--time delay +end; +select current_time; +--?.* +--?.* +--?.* +(1 row) + +create table t1(val int, val2 int); +insert into t1 values(1,11),(2,11); insert into t1 values(1,11),(2,11); +insert into t1 values(3,11),(4,11); insert into t1 values(5,11),(6,11); +/*----------------test3 Locking succeeded. */ +select current_time; +--?.* +--?.* +--?.* +(1 row) + +begin; +select * from (select * from t1 for update of t1 nowait) as foo; + val | val2 +-----+------ + 1 | 11 + 2 | 11 + 1 | 11 + 2 | 11 + 3 | 11 + 4 | 11 + 5 | 11 + 6 | 11 +(8 rows) + +--time delay +select pg_sleep(2); + pg_sleep +---------- + +(1 row) + +--time delay +end; +select current_time; +--?.* +--?.* +--?.* +(1 row) + +select pg_sleep(1);--wait session2 + pg_sleep +---------- + +(1 row) + +/*----------------test4 Locking failed. */ +select current_time; +--?.* +--?.* +--?.* +(1 row) + +begin; +select * from (select * from t1 for update of t1 nowait) as foo; + val | val2 +-----+------ + 1 | 11 + 2 | 11 + 1 | 11 + 2 | 11 + 3 | 11 + 4 | 11 + 5 | 11 + 6 | 11 +(8 rows) + +--time delay +select pg_sleep(4); + pg_sleep +---------- + +(1 row) + +--time delay +end; +select current_time; +--?.* +--?.* +--?.* +(1 row) + +/*----------------test5 Locking update. */ +select current_time; +--?.* +--?.* +--?.* +(1 row) + +begin; +update hw_t1 set num=666 where id1 = 2; +select pg_sleep(4); + pg_sleep +---------- + +(1 row) + +--time 4 +end; +select current_time; +--?.* +--?.* +--?.* +(1 row) + +/*----------------test5_1 Locking update. */ +select current_time; +--?.* +--?.* +--?.* +(1 row) + +begin; +update hw_t1 set num=666; +select pg_sleep(4); + pg_sleep +---------- + +(1 row) + +--time 4 +end; +select current_time; +--?.* +--?.* +--?.* +(1 row) + +/*----------------test6 Locking delete. */ +select current_time; +--?.* +--?.* +--?.* +(1 row) + +begin; +delete hw_t1 where id1 = 3; +select pg_sleep(4); + pg_sleep +---------- + +(1 row) + +--time 4 +end; +select current_time; +--?.* +--?.* +--?.* +(1 row) + +/*----------------test6_1 Locking delete. */ +select current_time; +--?.* +--?.* +--?.* +(1 row) + +begin; +delete from hw_t1; +select pg_sleep(4); + pg_sleep +---------- + +(1 row) + +--time 4 +end; +select current_time; +--?.* +--?.* +--?.* +(1 row) + +drop table hw_t1; +drop table t1; diff --git a/src/test/regress/expected/update_for_wait_s2.out b/src/test/regress/expected/update_for_wait_s2.out new file mode 100644 index 000000000..174e46f91 --- /dev/null +++ b/src/test/regress/expected/update_for_wait_s2.out @@ -0,0 +1,224 @@ +\timing on +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +--? Time: .* ms +/*----------------test1 Locking succeeded. */ +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +select * from hw_t1 where id1 = 3 for update WAIT 10; + id1 | id2 | num +-----+-----+----- + 3 | 31 | 31 +(1 row) + +--? Time: .* ms +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +--time delay +select pg_sleep(2);--wait session1 + pg_sleep +---------- + +(1 row) + +--? Time: .* ms +--time delay +/*----------------test2 Locking failed. */ +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +select * from hw_t1 where id1 = 3 for update WAIT 2; +ERROR: could not obtain lock on row in relation,waitSec = 2 +--? Time: .* ms +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +--time delay +select pg_sleep(2);--wait session1 + pg_sleep +---------- + +(1 row) + +--? Time: .* ms +--time delay +/*----------------test3 Locking succeeded. */ +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +select * from (select * from t1 for update of t1 wait 10) as foo; + val | val2 +-----+------ + 1 | 11 + 2 | 11 + 1 | 11 + 2 | 11 + 3 | 11 + 4 | 11 + 5 | 11 + 6 | 11 +(8 rows) + +--? Time: .* ms +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +--time delay +select pg_sleep(2); + pg_sleep +---------- + +(1 row) + +--? Time: .* ms +--time delay +/*----------------test4 Locking failed. */ +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +select * from (select * from t1 for update of t1 wait 2) as foo; +ERROR: could not obtain lock on row in relation,waitSec = 2 +--? Time: .* ms +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +--time delay +select pg_sleep(2); + pg_sleep +---------- + +(1 row) + +--? Time: .* ms +/*----------------test5 Locking update. */ +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +select * from hw_t1 where id1 = 2 for update wait 2; +ERROR: could not obtain lock on row in relation,waitSec = 2 +--? Time: .* ms +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +select pg_sleep(2); + pg_sleep +---------- + +(1 row) + +--? Time: .* ms +/*----------------test5_1 Locking update. */ +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +select * from hw_t1 where id1 = 2 for update wait 2; +ERROR: could not obtain lock on row in relation,waitSec = 2 +--? Time: .* ms +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +/*----------------test6 Locking delete. */ +select pg_sleep(2); + pg_sleep +---------- + +(1 row) + +--? Time: .* ms +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +select * from hw_t1 where id1 = 3 for update WAIT 2; +ERROR: could not obtain lock on row in relation,waitSec = 2 +--? Time: .* ms +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +select pg_sleep(2); + pg_sleep +---------- + +(1 row) + +--? Time: .* ms +/*----------------test6_1 Locking delete. */ +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +select * from hw_t1 where id1 = 2 for update WAIT 2; +ERROR: could not obtain lock on row in relation,waitSec = 2 +--? Time: .* ms +select current_time; +--?.* +--?.* +--?.* +(1 row) + +--? Time: .* ms +\timing off diff --git a/src/test/regress/expected/uppercase_attribute_name.out b/src/test/regress/expected/uppercase_attribute_name.out new file mode 100644 index 000000000..f3076b5b7 --- /dev/null +++ b/src/test/regress/expected/uppercase_attribute_name.out @@ -0,0 +1,203 @@ +drop table if exists test_tb; +NOTICE: table "test_tb" does not exist, skipping +create table test_tb(col1 int, Col_2 int, "col_第三列" int, "CoL_Four" int); +insert into test_tb values(1, 1, 1, 1); +insert into test_tb values(2, 2, 2, 2); +insert into test_tb values(3, 3, 3, 3); +select * from test_tb order by col1; + col1 | col_2 | col_第三列 | CoL_Four +------+-------+------------+---------- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 +(3 rows) + +set uppercase_attribute_name=true; +-- During \d(+) one table, PQfnumber() is matched according to the lowercase column name. +-- So we need to restrict uppercase_attribute_name to not take effect in \d(+). +\d+ pg_class + Table "pg_catalog.pg_class" + Column | Type | Modifiers | Storage | Stats target | Description +------------------+------------------+-----------+----------+--------------+------------- + relname | name | not null | plain | | + relnamespace | oid | not null | plain | | + reltype | oid | not null | plain | | + reloftype | oid | not null | plain | | + relowner | oid | not null | plain | | + relam | oid | not null | plain | | + relfilenode | oid | not null | plain | | + reltablespace | oid | not null | plain | | + relpages | double precision | not null | plain | | + reltuples | double precision | not null | plain | | + relallvisible | integer | not null | plain | | + reltoastrelid | oid | not null | plain | | + reltoastidxid | oid | not null | plain | | + reldeltarelid | oid | not null | plain | | + reldeltaidx | oid | not null | plain | | + relcudescrelid | oid | not null | plain | | + relcudescidx | oid | not null | plain | | + relhasindex | boolean | not null | plain | | + relisshared | boolean | not null | plain | | + relpersistence | "char" | not null | plain | | + relkind | "char" | not null | plain | | + relnatts | smallint | not null | plain | | + relchecks | smallint | not null | plain | | + relhasoids | boolean | not null | plain | | + relhaspkey | boolean | not null | plain | | + relhasrules | boolean | not null | plain | | + relhastriggers | boolean | not null | plain | | + relhassubclass | boolean | not null | plain | | + relcmprs | tinyint | not null | plain | | + relhasclusterkey | boolean | not null | plain | | + relrowmovement | boolean | not null | plain | | + parttype | "char" | not null | plain | | + relfrozenxid | xid32 | not null | plain | | + relacl | aclitem[] | | extended | | + reloptions | text[] | | extended | | + relreplident | "char" | | plain | | + relfrozenxid64 | xid | | plain | | + relbucket | oid | | plain | | + relbucketkey | int2vector | | plain | | + relminmxid | xid | | plain | | +Indexes: + "pg_class_oid_index" UNIQUE, btree (oid) TABLESPACE pg_default + "pg_class_relname_nsp_index" UNIQUE, btree (relname, relnamespace) TABLESPACE pg_default + "pg_class_tblspc_relfilenode_index" btree (reltablespace, relfilenode) TABLESPACE pg_default +Replica Identity: NOTHING +Has OIDs: yes + +select * from test_tb order by col1; + COL1 | COL_2 | COL_第三列 | CoL_Four +------+-------+------------+---------- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 +(3 rows) + +reset uppercase_attribute_name; +drop table test_tb; +-- utf8 encoding +create database utf8 encoding='utf8' LC_COLLATE='en_US.UTF-8' LC_CTYPE ='en_US.UTF-8' TEMPLATE=template0 dbcompatibility='A'; +\c utf8 +set client_encoding=utf8; +drop table if exists test_tb; +NOTICE: table "test_tb" does not exist, skipping +create table test_tb(col1 int, Col_2 int, "col_第三列" int, "CoL_Four" int); +insert into test_tb values(1, 1, 1, 1); +insert into test_tb values(2, 2, 2, 2); +insert into test_tb values(3, 3, 3, 3); +select * from test_tb order by col1; + col1 | col_2 | col_第三列 | CoL_Four +------+-------+------------+---------- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 +(3 rows) + +set uppercase_attribute_name=true; +select * from test_tb order by col1; + COL1 | COL_2 | COL_第三列 | CoL_Four +------+-------+------------+---------- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 +(3 rows) + +reset uppercase_attribute_name; +drop table test_tb; +-- gbk encoding +create database gbk encoding='gbk' LC_COLLATE='zh_CN.GBK' LC_CTYPE ='zh_CN.GBK' TEMPLATE=template0 dbcompatibility='A'; +\c gbk +set client_encoding=utf8; +drop table if exists test_tb; +NOTICE: table "test_tb" does not exist, skipping +create table test_tb(col1 int, Col_2 int, "col_第三列" int, "CoL_Four" int); +insert into test_tb values(1, 1, 1, 1); +insert into test_tb values(2, 2, 2, 2); +insert into test_tb values(3, 3, 3, 3); +select * from test_tb order by col1; + col1 | col_2 | col_第三列 | CoL_Four +------+-------+------------+---------- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 +(3 rows) + +set uppercase_attribute_name=true; +select * from test_tb order by col1; + COL1 | COL_2 | COL_第三列 | CoL_Four +------+-------+------------+---------- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 +(3 rows) + +reset uppercase_attribute_name; +drop table test_tb; +-- gb18030 encoding +create database gb18030 encoding='gb18030' LC_COLLATE='zh_CN.GB18030' LC_CTYPE ='zh_CN.GB18030' TEMPLATE=template0 dbcompatibility='A'; +\c gb18030 +set client_encoding=utf8; +drop table if exists test_tb; +NOTICE: table "test_tb" does not exist, skipping +create table test_tb(col1 int, Col_2 int, "col_第三列" int, "CoL_Four" int); +insert into test_tb values(1, 1, 1, 1); +insert into test_tb values(2, 2, 2, 2); +insert into test_tb values(3, 3, 3, 3); +select * from test_tb order by col1; + col1 | col_2 | col_第三列 | CoL_Four +------+-------+------------+---------- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 +(3 rows) + +set uppercase_attribute_name=true; +select * from test_tb order by col1; + COL1 | COL_2 | COL_第三列 | CoL_Four +------+-------+------------+---------- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 +(3 rows) + +reset uppercase_attribute_name; +drop table test_tb; +-- 'B' dbcompatibility +create database b_dbcompatibility TEMPLATE=template0 dbcompatibility='B'; +\c b_dbcompatibility +set client_encoding=utf8; +drop table if exists test_tb; +NOTICE: table "test_tb" does not exist, skipping +create table test_tb(col1 int, Col_2 int, "col_第三列" int, "CoL_Four" int); +insert into test_tb values(1, 1, 1, 1); +insert into test_tb values(2, 2, 2, 2); +insert into test_tb values(3, 3, 3, 3); +select * from test_tb order by col1; + col1 | col_2 | col_第三列 | CoL_Four +------+-------+------------+---------- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 +(3 rows) + +set uppercase_attribute_name=true; +select * from test_tb order by col1; + col1 | col_2 | col_第三列 | CoL_Four +------+-------+------------+---------- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 +(3 rows) + +reset uppercase_attribute_name; +drop table test_tb; +\c regression +clean connection to all force for database utf8; +clean connection to all force for database gbk; +clean connection to all force for database gb18030; +clean connection to all force for database b_dbcompatibility; +drop database utf8; +drop database gbk; +drop database gb18030; +drop database b_dbcompatibility; diff --git a/src/test/regress/expected/upsert_001.out b/src/test/regress/expected/upsert_001.out index 4c38b10d4..4b9d90962 100755 --- a/src/test/regress/expected/upsert_001.out +++ b/src/test/regress/expected/upsert_001.out @@ -8,11 +8,19 @@ Command: INSERT Description: create new rows in a table Syntax: [ WITH [ RECURSIVE ] with_query [, ...] ] -INSERT [/*+ plan_hint */] INTO table_name [ ( column_name [, ...] ) ] +INSERT [/*+ plan_hint */] INTO table_name [partition_clause] [ AS alias ] [ ( column_name [, ...] ) ] { DEFAULT VALUES | VALUES {( { expression | DEFAULT } [, ...] ) }[, ...] | query } - [ ON DUPLICATE KEY UPDATE { NOTHING | { column_name = { expression | DEFAULT } } [, ...] } ] + [ ON DUPLICATE KEY UPDATE { NOTHING | { column_name = { expression | DEFAULT } } [, ...] [ WHERE condition ] } ] [ RETURNING {* | {output_expression [ [ AS ] output_name ] }[, ...]} ]; +where with_query can be: +with_query_name [ ( column_name [, ...] ) ] AS [ [ NOT ] MATERIALIZED ] +( {select | values | insert | update | delete} ) +where partition_clause can be: +PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) } | +SUBPARTITION { ( subpartition_name ) | FOR ( subpartition_value [, ...] ) } +NOTICE: 'partition_clause' is only avaliable in CENTRALIZED mode! + -- test permission --- test with no sequence column CREATE TABLE t00 (col1 INT DEFAULT 1 PRIMARY KEY, col2 INT); diff --git a/src/test/regress/expected/upsert_explain.out b/src/test/regress/expected/upsert_explain.out index 450a2ebcd..353ccea7b 100644 --- a/src/test/regress/expected/upsert_explain.out +++ b/src/test/regress/expected/upsert_explain.out @@ -40,7 +40,7 @@ EXPLAIN PERFORMANCE insert into up_expl_hash values(1,1,1) on duplicate key upda --? Insert on upsert_test_explain.up_expl_hash (cost=.* rows=1 width=0) (actual time=.* rows=1 loops=1) Conflict Resolution: UPDATE Conflict Arbiter Indexes: up_expl_hash_c3_key - (Buffers: shared hit=4) +--? (Buffers: shared hit=.*) --? (CPU: ex c/r=.*, ex row=1, ex cyc=.*, inc cyc=.*) --? -> Result (cost=.* rows=1 width=0) (actual time=.* rows=1 loops=1) Output: 1, 1, 1 diff --git a/src/test/regress/expected/upsert_grammer_test_01.out b/src/test/regress/expected/upsert_grammer_test_01.out index e362bf852..d39b40206 100644 --- a/src/test/regress/expected/upsert_grammer_test_01.out +++ b/src/test/regress/expected/upsert_grammer_test_01.out @@ -41,6 +41,30 @@ INSERT INTO t_grammer (c1, c3, c4.a) VALUES(84, '{81, 82, 83}', 850) ON DUPLICAT -- support UPDATE NOTHING INSERT INTO t_grammer VALUES(91, 1, '{91, 92, 93}', ROW(94, 95)) ON DUPLICATE KEY UPDATE NOTHING; INSERT INTO t_grammer (c5, c1, c2, c4) VALUES('{91,92}', 92, DEFAULT, ROW(910, 920)) ON DUPLICATE KEY UPDATE NOTHING; +-- support INSERT with assigned alias +INSERT INTO t_grammer AS T1 VALUES(991, 1); +INSERT INTO t_grammer T2 VALUES(992, 1); +ERROR: syntax error at or near "T2" +LINE 1: INSERT INTO t_grammer T2 VALUES(992, 1); + ^ +INSERT INTO t_grammer * AS T1 VALUES(993, 1); +ERROR: syntax error at or near "*" +LINE 1: INSERT INTO t_grammer * AS T1 VALUES(993, 1); + ^ +INSERT INTO t_grammer AS T1 VALUES(991, 2) ON DUPLICATE KEY UPDATE T1.C2 = EXCLUDED.C2 + 1; +INSERT INTO t_grammer T2 VALUES(992, 3) ON DUPLICATE KEY UPDATE T2.C2 = EXCLUDED.C2 + 1 WHERE T2.C1 > 100; +ERROR: syntax error at or near "T2" +LINE 1: INSERT INTO t_grammer T2 VALUES(992, 3) ON DUPLICATE KEY UPD... + ^ +INSERT INTO t_grammer * AS T1 VALUES(993, 4) ON DUPLICATE KEY UPDATE NOTHING; +ERROR: syntax error at or near "*" +LINE 1: INSERT INTO t_grammer * AS T1 VALUES(993, 4) ON DUPLICATE KE... + ^ +-- excluded is reserved keyword +INSERT INTO t_grammer * AS excluded VALUES(993, 4) ON DUPLICATE KEY UPDATE NOTHING; +ERROR: syntax error at or near "*" +LINE 1: INSERT INTO t_grammer * AS excluded VALUES(993, 4) ON DUPLIC... + ^ -- UPDATE target: unsupport with schema but support with tablename INSERT INTO t_grammer VALUES(0, 0, '{0,0}', ROW(0, 0), '{107, 108}') ON DUPLICATE KEY UPDATE upsert_test.t_grammer.c2 = c2 * 10, upsert_test.t_grammer.c3[1:2] = c5[1:2], upsert_test.t_grammer.c4.a = c1; @@ -75,9 +99,6 @@ INSERT INTO t_default VALUES(91, 0.91, '2020-05-17') ON DUPLICATE KEY UPDATE c2 INSERT INTO t_default VALUES(92, DEFAULT, '2020-05-17') ON DUPLICATE KEY UPDATE c2 = 10, c3 = DEFAULT; -- unsupport alias INSERT INTO t_grammer AS alias VALUES (91) ON DUPLICATE KEY UPDATE c2 = VALUES(c1); -ERROR: syntax error at or near "AS" -LINE 1: INSERT INTO t_grammer AS alias VALUES (91) ON DUPLICATE KEY ... - ^ -- unsupport VALUES with expr INSERT INTO t_grammer VALUES(0, 0) ON DUPLICATE KEY UPDATE c2 = sqrt(VALUES(c2)); ERROR: syntax error at or near "(" @@ -163,12 +184,13 @@ SELECT * FROM t_grammer ORDER BY 1; 82 | -100 | {881,882} | | 83 | -100 | {81,82,83} | (810,820) | 84 | -100 | {81,82,83} | (850,) | - 91 | 1 | {91,92,93} | (94,95) | + 91 | 91 | {91,92,93} | (94,95) | 92 | -100 | | (910,920) | {91,92} 101 | 1 | {102,103,104} | (105,106) | {107,108} 102 | 2 | {102,103,104} | (105,106) | {107,108} 103 | 3 | {102,103,104} | (105,106) | {107,108} -(35 rows) + 991 | 3 | | | +(36 rows) SELECT * FROM t_default ORDER BY 1; --? c1 | c2 | c3 diff --git a/src/test/regress/expected/upsert_grammer_test_02.out b/src/test/regress/expected/upsert_grammer_test_02.out index 9f3d1d576..d151c299d 100644 --- a/src/test/regress/expected/upsert_grammer_test_02.out +++ b/src/test/regress/expected/upsert_grammer_test_02.out @@ -75,9 +75,6 @@ INSERT INTO t_default VALUES(91, 0.91, '2020-05-17') ON DUPLICATE KEY UPDATE c2 INSERT INTO t_default VALUES(92, DEFAULT, '2020-05-17') ON DUPLICATE KEY UPDATE c2 = 10, c3 = DEFAULT; -- unsupport alias INSERT INTO t_grammer AS alias VALUES (91) ON DUPLICATE KEY UPDATE c2 = VALUES(c1); -ERROR: syntax error at or near "AS" -LINE 1: INSERT INTO t_grammer AS alias VALUES (91) ON DUPLICATE KEY ... - ^ -- unsupport VALUES with expr INSERT INTO t_grammer VALUES(0, 0) ON DUPLICATE KEY UPDATE c2 = sqrt(VALUES(c2)); ERROR: syntax error at or near "(" @@ -163,12 +160,13 @@ SELECT * FROM t_grammer ORDER BY 1; 82 | 881 | {881,882} | | {881,882} 83 | -100 | {81,82,83} | (810,820) | 84 | -100 | {81,82,83} | (850,83) | - 91 | 1 | {91,92,93} | (94,95) | + 91 | 91 | {91,92,93} | (94,95) | 92 | -100 | | (910,920) | {91,92} 101 | 10 | {107,108,104} | (101,106) | {107,108} 102 | 20 | {107,108,104} | (102,106) | {107,108} 103 | 30 | {107,108,104} | (103,106) | {107,108} -(35 rows) + 991 | 3 | | | +(36 rows) SELECT * FROM t_default ORDER BY 1; --? c1 | c2 | c3 diff --git a/src/test/regress/expected/upsert_restriction.out b/src/test/regress/expected/upsert_restriction.out index 0efdbb8fe..eb88a58be 100644 --- a/src/test/regress/expected/upsert_restriction.out +++ b/src/test/regress/expected/upsert_restriction.out @@ -94,11 +94,6 @@ insert into up_neg_05 values(1,1,1,1,1) on duplicate key update c3 = 2; ERROR: INSERT ON DUPLICATE KEY UPDATE don't allow update on primary key or unique key. insert into up_neg_05 values(1,1,1,1,1) on duplicate key update c1 =1, c2 = 1, c3=2, c4=1,c5=1; ERROR: INSERT ON DUPLICATE KEY UPDATE don't allow update on primary key or unique key. ----- where clause -insert into up_neg_05 values(1,1,1,1,1) on duplicate key update c4 = 1 where c1=1; -ERROR: syntax error at or near "where" -LINE 1: ... values(1,1,1,1,1) on duplicate key update c4 = 1 where c1=1... - ^ ---- from clause insert into up_neg_05 values(1,1,1,1,1) on duplicate key update c4 = 1 from up_neg_04 where c1=1; ERROR: syntax error at or near "from" diff --git a/src/test/regress/expected/upsert_where.out b/src/test/regress/expected/upsert_where.out new file mode 100644 index 000000000..0f96ca532 --- /dev/null +++ b/src/test/regress/expected/upsert_where.out @@ -0,0 +1,440 @@ +---- +-- setup +---- +CREATE SCHEMA schema_upsert_where; +SET search_path = schema_upsert_where; +---- +-- basic syntax +---- +CREATE TABLE tab_target1(c1 int unique, c2 int); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "tab_target1_c1_key" for table "tab_target1" +INSERT INTO tab_target1 VALUES(generate_series(1,10), generate_series(1,10)); +CREATE TABLE tab_target2(c1 int unique, c2 int unique, c3 int); +NOTICE: CREATE TABLE / UNIQUE will create implicit index "tab_target2_c1_key" for table "tab_target2" +NOTICE: CREATE TABLE / UNIQUE will create implicit index "tab_target2_c2_key" for table "tab_target2" +INSERT INTO tab_target2 VALUES(generate_series(1,10), generate_series(1,10), generate_series(1,10)); +CREATE TABLE tab_target3(c1 int, c2 int, c3 int, unique(c1, c2)) ; +NOTICE: CREATE TABLE / UNIQUE will create implicit index "tab_target3_c1_c2_key" for table "tab_target3" +INSERT INTO tab_target3 VALUES(generate_series(1,10), generate_series(1,10), generate_series(1,10)); +CREATE TABLE tab_source(c1 int, c2 int, c3 int, c4 int); +INSERT INTO tab_source VALUES(generate_series(1,10), generate_series(10,1, -1), generate_series(1,10), generate_series(1,10)); +begin; +-- no conflict where clause is ignored (of course) +INSERT INTO tab_target1 VALUES(0,0) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE false; +INSERT INTO tab_target1 VALUES(1,2) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 = 1; +SELECT * FROM tab_target1 ORDER BY 1,2; + c1 | c2 +----+---- + 0 | 0 + 1 | 2 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 + 10 | 10 +(11 rows) + +INSERT INTO tab_target1 VALUES(1,3) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 + excluded.c2 = 5; +SELECT * FROM tab_target1 ORDER BY 1,2; + c1 | c2 +----+---- + 0 | 0 + 1 | 3 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 + 10 | 10 +(11 rows) + +-- multiple values +INSERT INTO tab_target1 VALUES(2,3),(3,4) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 = 2; +SELECT * FROM tab_target1 ORDER BY 1,2; + c1 | c2 +----+---- + 0 | 0 + 1 | 3 + 2 | 3 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 + 10 | 10 +(11 rows) + +INSERT INTO tab_target1 VALUES(3,4),(3,5),(3,6),(3,7),(3,8),(3,9) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 = 3; +SELECT * FROM tab_target1 ORDER BY 1,2; + c1 | c2 +----+---- + 0 | 0 + 1 | 3 + 2 | 3 + 3 | 4 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 + 10 | 10 +(11 rows) + +-- from source table +INSERT INTO tab_target1 (SELECT c1, c2 FROM tab_source) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c1 > 5; +SELECT * FROM tab_target1 ORDER BY 1,2; + c1 | c2 +----+---- + 0 | 0 + 1 | 3 + 2 | 3 + 3 | 4 + 4 | 4 + 5 | 5 + 6 | 5 + 7 | 4 + 8 | 3 + 9 | 2 + 10 | 1 +(11 rows) + +-- multiple confliction - where clause will not affect target row of upsert +INSERT INTO tab_target2 VALUES(1,2,10) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c1 = 1; +INSERT INTO tab_target2 VALUES(2,3,30) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c1 != 2; +SELECT * FROM tab_target2 ORDER BY 1,2,3; + c1 | c2 | c3 +----+----+---- + 1 | 1 | 10 + 2 | 2 | 2 + 3 | 3 | 3 + 4 | 4 | 4 + 5 | 5 | 5 + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 + 9 | 9 | 9 + 10 | 10 | 10 +(10 rows) + +-- multi-column unique constraint with coercion +INSERT INTO tab_target3 VALUES(1,1,10) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c1 < 5; +INSERT INTO tab_target3 VALUES(2,2,20) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c2 between 1 and 3; +INSERT INTO tab_target3 VALUES(3,3,30) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c2 = 3 and (c3 < 30 or c1 = 2) and (not (c1 between 20 and 30)); +SELECT * FROM tab_target3 ORDER BY 1,2,3; + c1 | c2 | c3 +----+----+---- + 1 | 1 | 10 + 2 | 2 | 20 + 3 | 3 | 30 + 4 | 4 | 4 + 5 | 5 | 5 + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 + 9 | 9 | 9 + 10 | 10 | 10 +(10 rows) + +-- test pbe +PREPARE P1 AS INSERT INTO tab_target3 VALUES($1,$2,$3) ON DUPLICATE KEY UPDATE c3 = excluded.c3 + $4 WHERE c1 < $5; +EXECUTE P1(4, 4, 40, 4, 4); +SELECT * FROM tab_target3 ORDER BY 1,2,3; + c1 | c2 | c3 +----+----+---- + 1 | 1 | 10 + 2 | 2 | 20 + 3 | 3 | 30 + 4 | 4 | 4 + 5 | 5 | 5 + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 + 9 | 9 | 9 + 10 | 10 | 10 +(10 rows) + +EXECUTE P1(4, 4, 40, 4, 5); +SELECT * FROM tab_target3 ORDER BY 1,2,3; + c1 | c2 | c3 +----+----+---- + 1 | 1 | 10 + 2 | 2 | 20 + 3 | 3 | 30 + 4 | 4 | 44 + 5 | 5 | 5 + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 + 9 | 9 | 9 + 10 | 10 | 10 +(10 rows) + +---- +-- test a_expr cases +---- +INSERT INTO tab_target1 VALUES(0,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE NULL; +INSERT INTO tab_target1 VALUES(1,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE 0::boolean; +INSERT INTO tab_target1 VALUES(2,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE false and true; +INSERT INTO tab_target1 VALUES(3,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE 'abc' not like '%c'; +INSERT INTO tab_target1 VALUES(4,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE 1 IS NULL; +INSERT INTO tab_target1 VALUES(5,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE + (timestamp with time zone '2000-11-26', timestamp with time zone '2000-11-27') + OVERLAPS (timestamp with time zone '2000-11-27 12:00', timestamp with time zone '2000-11-30'); +INSERT INTO tab_target1 VALUES(6,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE + row(1,1) is distinct from row(1,1); +INSERT INTO tab_target1 VALUES(7,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE 1 is unknown; +INSERT INTO tab_target1 VALUES(8,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE 1 not in (1,2,3); +SELECT COUNT(*) FROM tab_target1 WHERE c2 = -1; + count +------- + 0 +(1 row) + +rollback; +-- explain analyze +begin; +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF) INSERT INTO tab_target1 VALUES(1,2) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 + excluded.c2 = 3; + QUERY PLAN +----------------------------------------------------------- + Insert on tab_target1 (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target1_c1_key + Conflict Filter: ((tab_target1.c2 + "excluded".c2) = 3) + -> Result (actual rows=1 loops=1) +--? .* +(6 rows) + +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF) INSERT INTO tab_target1 VALUES(3,4),(3,5),(3,6),(3,7),(3,8),(3,9) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 = 3; + QUERY PLAN +--------------------------------------------------------- + Insert on tab_target1 (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target1_c1_key + Conflict Filter: (tab_target1.c2 = 3) + Rows Removed by Conflict Filter: 5 + -> Values Scan on "*VALUES*" (actual rows=6 loops=1) +--? .* +(7 rows) + +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF) INSERT INTO tab_target2 VALUES(1,2,10) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c1 = 1; + QUERY PLAN +-------------------------------------------------------------------- + Insert on tab_target2 (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target2_c1_key, tab_target2_c2_key + Conflict Filter: (tab_target2.c1 = 1) + -> Result (actual rows=1 loops=1) +--? .* +(6 rows) + +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF) INSERT INTO tab_target3 VALUES(3,3,30) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c2 = 3 and (c3 < 30 or c1 = 2) and (not (c1 between 20 and 30)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------- + Insert on tab_target3 (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target3_c1_c2_key + Conflict Filter: ((tab_target3.c2 = 3) AND ((tab_target3.c3 < 30) OR (tab_target3.c1 = 2)) AND ((tab_target3.c1 < 20) OR (tab_target3.c1 > 30))) + -> Result (actual rows=1 loops=1) +--? .* +(6 rows) + +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF) EXECUTE P1(4, 4, 40, 4, 5); + QUERY PLAN +--------------------------------------------------- + Insert on tab_target3 (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target3_c1_c2_key + Conflict Filter: (tab_target3.c1 < 5) + -> Result (actual rows=1 loops=1) +--? .* +(6 rows) + +rollback; +---- +-- test with synonym +---- +create synonym stt1 for tab_target1; +create synonym stt2 for tab_target2; +create synonym stt3 for tab_target3; +create synonym sts for tab_source; +begin; +-- no conflict where clause is ignored (of course) +INSERT INTO stt1 VALUES(0,0) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE false; +INSERT INTO stt1 VALUES(1,2) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 = 1; +SELECT * FROM stt1 ORDER BY 1,2; + c1 | c2 +----+---- + 0 | 0 + 1 | 2 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 + 10 | 10 +(11 rows) + +INSERT INTO stt1 VALUES(1,3) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 + excluded.c2 = 5; +SELECT * FROM stt1 ORDER BY 1,2; + c1 | c2 +----+---- + 0 | 0 + 1 | 3 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 + 10 | 10 +(11 rows) + +-- multiple values +INSERT INTO stt1 VALUES(2,3),(3,4) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 = 2; +SELECT * FROM stt1 ORDER BY 1,2; + c1 | c2 +----+---- + 0 | 0 + 1 | 3 + 2 | 3 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 + 10 | 10 +(11 rows) + +INSERT INTO stt1 VALUES(3,4),(3,5),(3,6),(3,7),(3,8),(3,9) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 = 3; +SELECT * FROM stt1 ORDER BY 1,2; + c1 | c2 +----+---- + 0 | 0 + 1 | 3 + 2 | 3 + 3 | 4 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 + 10 | 10 +(11 rows) + +-- from source table +INSERT INTO stt1 (SELECT c1, c2 FROM sts) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c1 > 5; +SELECT * FROM stt1 ORDER BY 1,2; + c1 | c2 +----+---- + 0 | 0 + 1 | 3 + 2 | 3 + 3 | 4 + 4 | 4 + 5 | 5 + 6 | 5 + 7 | 4 + 8 | 3 + 9 | 2 + 10 | 1 +(11 rows) + +-- multiple confliction - where clause will not affect target row of upsert +INSERT INTO stt2 VALUES(1,2,10) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c1 = 1; +INSERT INTO stt2 VALUES(2,3,30) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c1 != 2; +SELECT * FROM stt2 ORDER BY 1,2,3; + c1 | c2 | c3 +----+----+---- + 1 | 1 | 10 + 2 | 2 | 2 + 3 | 3 | 3 + 4 | 4 | 4 + 5 | 5 | 5 + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 + 9 | 9 | 9 + 10 | 10 | 10 +(10 rows) + +-- multi-column unique constraint +INSERT INTO stt3 VALUES(1,1,10) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c1 < 5; +SELECT * FROM stt3 ORDER BY 1,2,3; + c1 | c2 | c3 +----+----+---- + 1 | 1 | 10 + 2 | 2 | 2 + 3 | 3 | 3 + 4 | 4 | 4 + 5 | 5 | 5 + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 + 9 | 9 | 9 + 10 | 10 | 10 +(10 rows) + +-- test pbe +deallocate p1; +PREPARE P1 AS INSERT INTO stt3 VALUES($1,$2,$3) ON DUPLICATE KEY UPDATE c3 = excluded.c3 + $4 WHERE c1 < $5; +EXECUTE P1(2, 2, 20, 2, 2); +SELECT * FROM stt3 ORDER BY 1,2,3; + c1 | c2 | c3 +----+----+---- + 1 | 1 | 10 + 2 | 2 | 2 + 3 | 3 | 3 + 4 | 4 | 4 + 5 | 5 | 5 + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 + 9 | 9 | 9 + 10 | 10 | 10 +(10 rows) + +EXECUTE P1(2, 2, 20, 2, 3); +SELECT * FROM tab_target3 ORDER BY 1,2,3; + c1 | c2 | c3 +----+----+---- + 1 | 1 | 10 + 2 | 2 | 22 + 3 | 3 | 3 + 4 | 4 | 4 + 5 | 5 | 5 + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 + 9 | 9 | 9 + 10 | 10 | 10 +(10 rows) + +rollback; +DROP SCHEMA schema_upsert_where CASCADE; +NOTICE: drop cascades to 8 other objects +DETAIL: drop cascades to table tab_target1 +drop cascades to table tab_target2 +drop cascades to table tab_target3 +drop cascades to table tab_source +drop cascades to synonym schema_upsert_where.stt1 +drop cascades to synonym schema_upsert_where.stt2 +drop cascades to synonym schema_upsert_where.stt3 +drop cascades to synonym schema_upsert_where.sts diff --git a/src/test/regress/expected/upsert_where_sublink.out b/src/test/regress/expected/upsert_where_sublink.out new file mode 100644 index 000000000..6d7290f55 --- /dev/null +++ b/src/test/regress/expected/upsert_where_sublink.out @@ -0,0 +1,2040 @@ +---- +-- setup +---- +CREATE SCHEMA schema_upsert_where_sublink; +SET search_path = schema_upsert_where_sublink; +create table tab_target( +c1 int unique not null, +c2 bigint default 0, +c3 numeric default 0, +c4 varchar(100) default 'abcdefjfieE##$#KFAEOJop13SEFJeo', +primary key(c2,c3)); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "tab_target_pkey" for table "tab_target" +NOTICE: CREATE TABLE / UNIQUE will create implicit index "tab_target_c1_key" for table "tab_target" +INSERT INTO tab_target (c1, c2, c3) VALUES( + generate_series(1,10), + generate_series(1,10), + generate_series(1,10)); +CREATE TABLE tab_source(c1 int, c2 int, c3 int, c4 int); +INSERT INTO tab_source VALUES(generate_series(1,10), generate_series(10,1, -1), generate_series(1,10), generate_series(1,10)); +--------------------------------------- +-- not corelated sublink +--------------------------------------- +begin; +-- in/not in sublink +-- multi confliction -> primary key first +insert into tab_target values(1,1,1) on duplicate key update c4 = 'conflict1' where excluded.c1 in (select 1); +insert into tab_target values(1,1,1) on duplicate key update c4 = 'ERROR' where excluded.c1 not in (select 1); +insert into tab_target values(1,2,2) on duplicate key update c4 = 'conflict2' where excluded.c3 in (select c1 from tab_source); +insert into tab_target values(1,2,2) on duplicate key update c4 = 'ERROR' where excluded.c3 not in (select c1 from tab_source); +insert into tab_target values(0,3,3) on duplicate key update c4 = 'conflict3' where excluded.c1 not in (select 1); +insert into tab_target values(0,3,3) on duplicate key update c4 = 'ERROR' where excluded.c1 in (select 1); +-- (not) exists sublink +insert into tab_target values(4,1,2) on duplicate key update c4 = 'conflict4' where exists (select c1 from tab_source where c4 = 4); +insert into tab_target values(4,1,2) on duplicate key update c4 = 'ERROR' where not exists (select c1 from tab_source where c4 = 4); +insert into tab_target values(0,5,5) on duplicate key update c4 = 'conflict5' where not exists (select c2 from tab_source where c4 = 4 and c1 = 1); +insert into tab_target values(0,5,5) on duplicate key update c4 = 'ERROR' where exists (select c2 from tab_source where c4 = 4 and c1 = 1); +-- any/some +insert into tab_target values(6,0,0) on duplicate key update c4 = 'conflict6' where excluded.c3 = any (select 0); +insert into tab_target values(6,0,0) on duplicate key update c4 = 'ERROR' where excluded.c3 != any (select 0); +insert into tab_target values(7,0,0) on duplicate key update c4 = 'conflict7' where excluded.c3 > some (select -1); +insert into tab_target values(7,0,0) on duplicate key update c4 = 'ERROR' where excluded.c3 < some (select -1); +-- opr sublink +insert into tab_target values(8,8,8) on duplicate key update c4 = 'conflict8' where not (excluded.c3 > (select c2 from tab_source where c3 < 8 limit 1)); +insert into tab_target values(8,8,8) on duplicate key update c4 = 'ERROR' where (excluded.c3 > (select c2 from tab_source where c3 < 8 limit 1)); +-- nested sublink +insert into tab_target values(9,9,9) on duplicate key update c4 = 'conflict9' where excluded.c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = 9 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); +insert into tab_target values(9,9,9) on duplicate key update c4 = 'ERROR' where excluded.c1 != ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = 9 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); +-- sublink with CTE +insert into tab_target values(10,10,10) on duplicate key update c4 = 'conflict10' where c1 = ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = 10 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); +insert into tab_target values(10,10,10) on duplicate key update c4 = 'ERROR' where c1 != ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = 10 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); +select * from tab_target order by 1,2,3,4; + c1 | c2 | c3 | c4 +----+----+----+------------ + 1 | 1 | 1 | conflict1 + 2 | 2 | 2 | conflict2 + 3 | 3 | 3 | conflict3 + 4 | 4 | 4 | conflict4 + 5 | 5 | 5 | conflict5 + 6 | 6 | 6 | conflict6 + 7 | 7 | 7 | conflict7 + 8 | 8 | 8 | conflict8 + 9 | 9 | 9 | conflict9 + 10 | 10 | 10 | conflict10 +(10 rows) + +rollback; +-- check plan +begin; +-- in/not in sublink +-- multi confliction -> primary key first +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,1,1) on duplicate key update c4 = 'conflict1' where excluded.c1 in (select 1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (hashed SubPlan 1) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,1,1) on duplicate key update c4 = 'ERROR' where excluded.c1 not in (select 1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (NOT (hashed SubPlan 1)) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,2,2) on duplicate key update c4 = 'conflict2' where excluded.c3 in (select c1 from tab_source); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (hashed SubPlan 1) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (actual rows=10 loops=1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,2,2) on duplicate key update c4 = 'ERROR' where excluded.c3 not in (select c1 from tab_source); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (NOT (hashed SubPlan 1)) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (actual rows=10 loops=1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,3,3) on duplicate key update c4 = 'conflict3' where excluded.c1 not in (select 1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (NOT (hashed SubPlan 1)) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,3,3) on duplicate key update c4 = 'ERROR' where excluded.c1 in (select 1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (hashed SubPlan 1) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +-- (not) exists sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(4,1,2) on duplicate key update c4 = 'conflict4' where exists (select c1 from tab_source where c4 = 4); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: $0 + InitPlan 1 (returns $0) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c4 = 4) + Rows Removed by Filter: 3 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(4,1,2) on duplicate key update c4 = 'ERROR' where not exists (select c1 from tab_source where c4 = 4); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (NOT $0) + Rows Removed by Conflict Filter: 1 + InitPlan 1 (returns $0) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c4 = 4) + Rows Removed by Filter: 3 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,5,5) on duplicate key update c4 = 'conflict5' where not exists (select c2 from tab_source where c4 = 4 and c1 = 1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (NOT $0) + InitPlan 1 (returns $0) + -> Seq Scan on tab_source (actual rows=0 loops=1) + Filter: ((c4 = 4) AND (c1 = 1)) + Rows Removed by Filter: 10 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,5,5) on duplicate key update c4 = 'ERROR' where exists (select c2 from tab_source where c4 = 4 and c1 = 1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: $0 + Rows Removed by Conflict Filter: 1 + InitPlan 1 (returns $0) + -> Seq Scan on tab_source (actual rows=0 loops=1) + Filter: ((c4 = 4) AND (c1 = 1)) + Rows Removed by Filter: 10 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +-- any/some +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,0,0) on duplicate key update c4 = 'conflict6' where excluded.c3 = any (select 0); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (hashed SubPlan 1) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,0,0) on duplicate key update c4 = 'ERROR' where excluded.c3 != any (select 0); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Materialize (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(7,0,0) on duplicate key update c4 = 'conflict7' where excluded.c3 > some (select -1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Materialize (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(7,0,0) on duplicate key update c4 = 'ERROR' where excluded.c3 < some (select -1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Materialize (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +-- opr sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(8,8,8) on duplicate key update c4 = 'conflict8' where not (excluded.c3 > (select c2 from tab_source where c3 < 8 limit 1)); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: ("excluded".c3 <= ($0)::numeric) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c3 < 8) + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(8,8,8) on duplicate key update c4 = 'ERROR' where (excluded.c3 > (select c2 from tab_source where c3 < 8 limit 1)); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: ("excluded".c3 > ($0)::numeric) + Rows Removed by Conflict Filter: 1 + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c3 < 8) + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +-- nested sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(9,9,9) on duplicate key update c4 = 'conflict9' where excluded.c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = 9 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: ("excluded".c1 = $6) + InitPlan 7 (returns $6) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $5) + Rows Removed by Filter: 9 + InitPlan 6 (returns $5) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $4) + Rows Removed by Filter: 9 + InitPlan 5 (returns $4) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $3) + Rows Removed by Filter: 9 + InitPlan 4 (returns $3) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $2) + Rows Removed by Filter: 9 + InitPlan 3 (returns $2) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $1) + Rows Removed by Filter: 9 + InitPlan 2 (returns $1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $0) + Rows Removed by Filter: 9 + InitPlan 1 (returns $0) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = 9) + Rows Removed by Filter: 9 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(9,9,9) on duplicate key update c4 = 'ERROR' where excluded.c1 != ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = 9 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: ("excluded".c1 <> $6) + Rows Removed by Conflict Filter: 1 + InitPlan 7 (returns $6) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $5) + Rows Removed by Filter: 9 + InitPlan 6 (returns $5) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $4) + Rows Removed by Filter: 9 + InitPlan 5 (returns $4) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $3) + Rows Removed by Filter: 9 + InitPlan 4 (returns $3) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $2) + Rows Removed by Filter: 9 + InitPlan 3 (returns $2) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $1) + Rows Removed by Filter: 9 + InitPlan 2 (returns $1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $0) + Rows Removed by Filter: 9 + InitPlan 1 (returns $0) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = 9) + Rows Removed by Filter: 9 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +-- sublink with CTE +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(10,10,10) on duplicate key update c4 = 'conflict10' where c1 = ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = 10 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (tab_target.c1 = $0) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c3 = 10) + Rows Removed by Filter: 9 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(10,10,10) on duplicate key update c4 = 'ERROR' where c1 != ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = 10 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (tab_target.c1 <> $0) + Rows Removed by Conflict Filter: 1 + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c3 = 10) + Rows Removed by Filter: 9 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +select * from tab_target order by 1,2,3,4; + c1 | c2 | c3 | c4 +----+----+----+------------ + 1 | 1 | 1 | conflict1 + 2 | 2 | 2 | conflict2 + 3 | 3 | 3 | conflict3 + 4 | 4 | 4 | conflict4 + 5 | 5 | 5 | conflict5 + 6 | 6 | 6 | conflict6 + 7 | 7 | 7 | conflict7 + 8 | 8 | 8 | conflict8 + 9 | 9 | 9 | conflict9 + 10 | 10 | 10 | conflict10 +(10 rows) + +rollback; +--------------------------------------- +-- corelated sublink - reference target +--------------------------------------- +begin; +-- in/not in sublink +-- multi confliction -> primary key first +insert into tab_target values(1,1,1) on duplicate key update c4 = 'conflict1' where excluded.c1 in (select 1 where tab_target.c1 = 1); +insert into tab_target values(1,1,1) on duplicate key update c4 = 'ERROR' where excluded.c1 not in (select 1 where tab_target.c1 = 1); +insert into tab_target values(1,2,2) on duplicate key update c4 = 'conflict2' where excluded.c3 in (select c1 from tab_source where tab_target.c1 = c1); +insert into tab_target values(1,2,2) on duplicate key update c4 = 'ERROR' where excluded.c3 not in (select c1 from tab_source where tab_target.c1 = c1); +insert into tab_target values(0,3,3) on duplicate key update c4 = 'conflict3' where excluded.c1 not in (select 1 where tab_target.c1 = 1); +insert into tab_target values(0,3,3) on duplicate key update c4 = 'ERROR' where excluded.c1 in (select 1 where tab_target.c1 = 1); +-- (not) exists sublink +insert into tab_target values(4,1,2) on duplicate key update c4 = 'conflict4' where exists (select c1 from tab_source where c4 = 4 and c3 = tab_target.c3); +insert into tab_target values(4,1,2) on duplicate key update c4 = 'ERROR' where not exists (select c1 from tab_source where c4 = 4 and c3 = tab_target.c3); +insert into tab_target values(0,5,5) on duplicate key update c4 = 'conflict5' where not exists (select c2 from tab_source where c4 = 4 and c3 = tab_target.c3); +insert into tab_target values(0,5,5) on duplicate key update c4 = 'ERROR' where exists (select c2 from tab_source where c4 = 4 and c3 = tab_target.c3); +-- any/some +insert into tab_target values(6,0,6) on duplicate key update c4 = 'conflict6' where excluded.c3 = any (select c3 from tab_source where tab_target.c1 = c1); +insert into tab_target values(6,0,6) on duplicate key update c4 = 'ERROR' where excluded.c3 != any (select c3 from tab_source where tab_target.c1 = c1); +insert into tab_target values(7,0,7) on duplicate key update c4 = 'conflict7' where excluded.c3 > some (select -1 from tab_source where tab_target.c1 = c1); +insert into tab_target values(7,0,7) on duplicate key update c4 = 'ERROR' where excluded.c3 < some (select -1 from tab_source where tab_target.c1 = c1); +-- opr sublink +insert into tab_target values(8,8,8) on duplicate key update c4 = 'conflict8' where not (excluded.c3 > (select c2 from tab_source where c3 < tab_target.c1 limit 1)); +insert into tab_target values(8,8,8) on duplicate key update c4 = 'ERROR' where (excluded.c3 > (select c2 from tab_source where c3 < tab_target.c1 limit 1)); +-- nested sublink +insert into tab_target values(9,9,9) on duplicate key update c4 = 'conflict9' where excluded.c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = tab_target.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); +insert into tab_target values(9,9,9) on duplicate key update c4 = 'ERROR' where tab_target.c1 != ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = excluded.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); +-- sublink with CTE +insert into tab_target values(10,10,10) on duplicate key update c4 = 'conflict10' where c1 = ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = tab_target.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); +insert into tab_target values(10,10,10) on duplicate key update c4 = 'ERROR' where c1 != ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = tab_target.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); +select * from tab_target order by 1,2,3,4; + c1 | c2 | c3 | c4 +----+----+----+------------ + 1 | 1 | 1 | conflict1 + 2 | 2 | 2 | conflict2 + 3 | 3 | 3 | conflict3 + 4 | 4 | 4 | conflict4 + 5 | 5 | 5 | conflict5 + 6 | 6 | 6 | conflict6 + 7 | 7 | 7 | conflict7 + 8 | 8 | 8 | conflict8 + 9 | 9 | 9 | conflict9 + 10 | 10 | 10 | conflict10 +(10 rows) + +rollback; +-- check plan +begin; +-- in/not in sublink +-- multi confliction -> primary key first +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,1,1) on duplicate key update c4 = 'conflict1' where excluded.c1 in (select 1 where tab_target.c1 = 1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Result (actual rows=1 loops=1) + One-Time Filter: (tab_target.c1 = 1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,1,1) on duplicate key update c4 = 'ERROR' where excluded.c1 not in (select 1 where tab_target.c1 = 1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (NOT (SubPlan 1)) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Result (actual rows=1 loops=1) + One-Time Filter: (tab_target.c1 = 1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,2,2) on duplicate key update c4 = 'conflict2' where excluded.c3 in (select c1 from tab_source where tab_target.c1 = c1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (tab_target.c1 = c1) + Rows Removed by Filter: 1 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,2,2) on duplicate key update c4 = 'ERROR' where excluded.c3 not in (select c1 from tab_source where tab_target.c1 = c1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (NOT (SubPlan 1)) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (tab_target.c1 = c1) + Rows Removed by Filter: 1 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,3,3) on duplicate key update c4 = 'conflict3' where excluded.c1 not in (select 1 where tab_target.c1 = 1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (NOT (SubPlan 1)) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Result (actual rows=0 loops=1) + One-Time Filter: (tab_target.c1 = 1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,3,3) on duplicate key update c4 = 'ERROR' where excluded.c1 in (select 1 where tab_target.c1 = 1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Result (actual rows=0 loops=1) + One-Time Filter: (tab_target.c1 = 1) +--?.* +--?.* + +-- (not) exists sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(4,1,2) on duplicate key update c4 = 'conflict4' where exists (select c1 from tab_source where c4 = 4 and c3 = tab_target.c3); + QUERY PLAN +------------------------------------------------------------------ + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (alternatives: SubPlan 1 or hashed SubPlan 2) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (Actual time: never executed) + Filter: ((c4 = 4) AND ((c3)::numeric = tab_target.c3)) + SubPlan 2 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c4 = 4) + Rows Removed by Filter: 9 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(4,1,2) on duplicate key update c4 = 'ERROR' where not exists (select c1 from tab_source where c4 = 4 and c3 = tab_target.c3); + QUERY PLAN +------------------------------------------------------------------------ + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (NOT (alternatives: SubPlan 1 or hashed SubPlan 2)) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (Actual time: never executed) + Filter: ((c4 = 4) AND ((c3)::numeric = tab_target.c3)) + SubPlan 2 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c4 = 4) + Rows Removed by Filter: 9 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,5,5) on duplicate key update c4 = 'conflict5' where not exists (select c2 from tab_source where c4 = 4 and c3 = tab_target.c3); + QUERY PLAN +------------------------------------------------------------------------ + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (NOT (alternatives: SubPlan 1 or hashed SubPlan 2)) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (Actual time: never executed) + Filter: ((c4 = 4) AND ((c3)::numeric = tab_target.c3)) + SubPlan 2 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c4 = 4) + Rows Removed by Filter: 9 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,5,5) on duplicate key update c4 = 'ERROR' where exists (select c2 from tab_source where c4 = 4 and c3 = tab_target.c3); + QUERY PLAN +------------------------------------------------------------------ + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (alternatives: SubPlan 1 or hashed SubPlan 2) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (Actual time: never executed) + Filter: ((c4 = 4) AND ((c3)::numeric = tab_target.c3)) + SubPlan 2 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c4 = 4) + Rows Removed by Filter: 9 +--?.* +--?.* + +-- any/some +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,0,6) on duplicate key update c4 = 'conflict6' where excluded.c3 = any (select c3 from tab_source where tab_target.c1 = c1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (tab_target.c1 = c1) + Rows Removed by Filter: 5 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,0,6) on duplicate key update c4 = 'ERROR' where excluded.c3 != any (select c3 from tab_source where tab_target.c1 = c1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (tab_target.c1 = c1) + Rows Removed by Filter: 9 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(7,0,7) on duplicate key update c4 = 'conflict7' where excluded.c3 > some (select -1 from tab_source where tab_target.c1 = c1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (tab_target.c1 = c1) + Rows Removed by Filter: 6 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(7,0,7) on duplicate key update c4 = 'ERROR' where excluded.c3 < some (select -1 from tab_source where tab_target.c1 = c1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (tab_target.c1 = c1) + Rows Removed by Filter: 9 +--?.* +--?.* + +-- opr sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(8,8,8) on duplicate key update c4 = 'conflict8' where not (excluded.c3 > (select c2 from tab_source where c3 < tab_target.c1 limit 1)); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: ("excluded".c3 <= ((SubPlan 1))::numeric) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Limit (actual rows=1 loops=1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c3 < tab_target.c1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(8,8,8) on duplicate key update c4 = 'ERROR' where (excluded.c3 > (select c2 from tab_source where c3 < tab_target.c1 limit 1)); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: ("excluded".c3 > ((SubPlan 1))::numeric) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Limit (actual rows=1 loops=1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c3 < tab_target.c1) +--?.* +--?.* + +-- nested sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(9,9,9) on duplicate key update c4 = 'conflict9' where excluded.c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = tab_target.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: ("excluded".c1 = (SubPlan 7)) + -> Result (actual rows=1 loops=1) + SubPlan 7 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $6) + Rows Removed by Filter: 9 + InitPlan 6 (returns $6) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $5) + Rows Removed by Filter: 9 + InitPlan 5 (returns $5) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $4) + Rows Removed by Filter: 9 + InitPlan 4 (returns $4) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $3) + Rows Removed by Filter: 9 + InitPlan 3 (returns $3) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $2) + Rows Removed by Filter: 9 + InitPlan 2 (returns $2) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $1) + Rows Removed by Filter: 9 + InitPlan 1 (returns $1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = tab_target.c1) + Rows Removed by Filter: 9 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(9,9,9) on duplicate key update c4 = 'ERROR' where tab_target.c1 != ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = excluded.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (tab_target.c1 <> (SubPlan 7)) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 7 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $6) + Rows Removed by Filter: 9 + InitPlan 6 (returns $6) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $5) + Rows Removed by Filter: 9 + InitPlan 5 (returns $5) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $4) + Rows Removed by Filter: 9 + InitPlan 4 (returns $4) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $3) + Rows Removed by Filter: 9 + InitPlan 3 (returns $3) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $2) + Rows Removed by Filter: 9 + InitPlan 2 (returns $2) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $1) + Rows Removed by Filter: 9 + InitPlan 1 (returns $1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = "excluded".c1) + Rows Removed by Filter: 9 +--?.* +--?.* + +-- sublink with CTE +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(10,10,10) on duplicate key update c4 = 'conflict10' where c1 = ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = tab_target.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (tab_target.c1 = (SubPlan 1)) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Limit (actual rows=1 loops=1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c3 = tab_target.c1) + Rows Removed by Filter: 9 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(10,10,10) on duplicate key update c4 = 'ERROR' where c1 != ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = tab_target.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (tab_target.c1 <> (SubPlan 1)) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Limit (actual rows=1 loops=1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c3 = tab_target.c1) + Rows Removed by Filter: 9 +--?.* +--?.* + +select * from tab_target order by 1,2,3,4; + c1 | c2 | c3 | c4 +----+----+----+------------ + 1 | 1 | 1 | conflict1 + 2 | 2 | 2 | conflict2 + 3 | 3 | 3 | conflict3 + 4 | 4 | 4 | conflict4 + 5 | 5 | 5 | conflict5 + 6 | 6 | 6 | conflict6 + 7 | 7 | 7 | conflict7 + 8 | 8 | 8 | conflict8 + 9 | 9 | 9 | conflict9 + 10 | 10 | 10 | conflict10 +(10 rows) + +rollback; +----------------------------------------- +-- corelated sublink - reference conflict +----------------------------------------- +begin; +-- in/not in sublink +-- multi confliction -> primary key first +insert into tab_target values(1,1,1) on duplicate key update c4 = 'conflict1' where excluded.c1 in (select 1 where excluded.c1 = 1); +insert into tab_target values(1,1,1) on duplicate key update c4 = 'ERROR' where excluded.c1 not in (select 1 where excluded.c1 = 1); +insert into tab_target values(1,2,2) on duplicate key update c4 = 'conflict2' where excluded.c3 in (select c1 + 1 from tab_source where excluded.c1 = c1); +insert into tab_target values(1,2,2) on duplicate key update c4 = 'ERROR' where excluded.c3 not in (select c1 + 1 from tab_source where excluded.c1 = c1); +insert into tab_target values(0,3,3) on duplicate key update c4 = 'conflict3' where excluded.c1 not in (select 1 where excluded.c1 = 1); +insert into tab_target values(0,3,3) on duplicate key update c4 = 'ERROR' where excluded.c1 in (select 1 where excluded.c1 = 1); +-- (not) exists sublink +insert into tab_target values(4,1,4) on duplicate key update c4 = 'conflict4' where exists (select c1 from tab_source where c4 = 4 and c3 = excluded.c3); +insert into tab_target values(4,1,4) on duplicate key update c4 = 'ERROR' where not exists (select c1 from tab_source where c4 = 4 and c3 = excluded.c3); +insert into tab_target values(0,5,5) on duplicate key update c4 = 'conflict5' where not exists (select c2 from tab_source where c4 = 4 and c3 = excluded.c3); +insert into tab_target values(0,5,5) on duplicate key update c4 = 'ERROR' where exists (select c2 from tab_source where c4 = 4 and c3 = excluded.c3); +-- any/some +insert into tab_target values(6,0,6) on duplicate key update c4 = 'conflict6' where excluded.c3 = any (select c3 from tab_source where excluded.c1 = c1); +insert into tab_target values(6,0,6) on duplicate key update c4 = 'ERROR' where excluded.c3 != any (select c3 from tab_source where excluded.c1 = c1); +insert into tab_target values(7,0,7) on duplicate key update c4 = 'conflict7' where excluded.c3 > some (select -1 from tab_source where excluded.c1 = c1); +insert into tab_target values(7,0,7) on duplicate key update c4 = 'ERROR' where excluded.c3 < some (select -1 from tab_source where excluded.c1 = c1); +-- opr sublink +insert into tab_target values(8,8,8) on duplicate key update c4 = 'conflict8' where not (excluded.c3 > (select c2 from tab_source where c3 < excluded.c1 limit 1)); +insert into tab_target values(8,8,8) on duplicate key update c4 = 'ERROR' where (excluded.c3 > (select c2 from tab_source where c3 < excluded.c1 limit 1)); +-- nested sublink +insert into tab_target values(9,9,9) on duplicate key update c4 = 'conflict9' where excluded.c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = excluded.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); +insert into tab_target values(9,9,9) on duplicate key update c4 = 'ERROR' where excluded.c1 != ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = excluded.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); +-- sublink with CTE +insert into tab_target values(10,10,10) on duplicate key update c4 = 'conflict10' where c1 = ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = excluded.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); +insert into tab_target values(10,10,10) on duplicate key update c4 = 'ERROR' where c1 != ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = excluded.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); +select * from tab_target order by 1,2,3,4; + c1 | c2 | c3 | c4 +----+----+----+------------ + 1 | 1 | 1 | conflict1 + 2 | 2 | 2 | conflict2 + 3 | 3 | 3 | conflict3 + 4 | 4 | 4 | conflict4 + 5 | 5 | 5 | conflict5 + 6 | 6 | 6 | conflict6 + 7 | 7 | 7 | conflict7 + 8 | 8 | 8 | conflict8 + 9 | 9 | 9 | conflict9 + 10 | 10 | 10 | conflict10 +(10 rows) + +rollback; +-- check plan +begin; +-- in/not in sublink +-- multi confliction -> primary key first +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,1,1) on duplicate key update c4 = 'conflict1' where excluded.c1 in (select 1 where excluded.c1 = 1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Result (actual rows=1 loops=1) + One-Time Filter: ("excluded".c1 = 1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,1,1) on duplicate key update c4 = 'ERROR' where excluded.c1 not in (select 1 where excluded.c1 = 1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (NOT (SubPlan 1)) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Result (actual rows=1 loops=1) + One-Time Filter: ("excluded".c1 = 1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,2,2) on duplicate key update c4 = 'conflict2' where excluded.c3 in (select c1 + 1 from tab_source where excluded.c1 = c1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: ("excluded".c1 = c1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,2,2) on duplicate key update c4 = 'ERROR' where excluded.c3 not in (select c1 + 1 from tab_source where excluded.c1 = c1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (NOT (SubPlan 1)) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: ("excluded".c1 = c1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,3,3) on duplicate key update c4 = 'conflict3' where excluded.c1 not in (select 1 where excluded.c1 = 1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (NOT (SubPlan 1)) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Result (actual rows=0 loops=1) + One-Time Filter: ("excluded".c1 = 1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,3,3) on duplicate key update c4 = 'ERROR' where excluded.c1 in (select 1 where excluded.c1 = 1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Result (actual rows=0 loops=1) + One-Time Filter: ("excluded".c1 = 1) +--?.* +--?.* + +-- (not) exists sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(4,1,4) on duplicate key update c4 = 'conflict4' where exists (select c1 from tab_source where c4 = 4 and c3 = excluded.c3); + QUERY PLAN +------------------------------------------------------------------ + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (alternatives: SubPlan 1 or hashed SubPlan 2) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (Actual time: never executed) + Filter: ((c4 = 4) AND ((c3)::numeric = "excluded".c3)) + SubPlan 2 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c4 = 4) + Rows Removed by Filter: 9 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(4,1,4) on duplicate key update c4 = 'ERROR' where not exists (select c1 from tab_source where c4 = 4 and c3 = excluded.c3); + QUERY PLAN +------------------------------------------------------------------------ + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (NOT (alternatives: SubPlan 1 or hashed SubPlan 2)) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (Actual time: never executed) + Filter: ((c4 = 4) AND ((c3)::numeric = "excluded".c3)) + SubPlan 2 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c4 = 4) + Rows Removed by Filter: 9 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,5,5) on duplicate key update c4 = 'conflict5' where not exists (select c2 from tab_source where c4 = 4 and c3 = excluded.c3); + QUERY PLAN +------------------------------------------------------------------------ + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (NOT (alternatives: SubPlan 1 or hashed SubPlan 2)) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (Actual time: never executed) + Filter: ((c4 = 4) AND ((c3)::numeric = "excluded".c3)) + SubPlan 2 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c4 = 4) + Rows Removed by Filter: 9 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,5,5) on duplicate key update c4 = 'ERROR' where exists (select c2 from tab_source where c4 = 4 and c3 = excluded.c3); + QUERY PLAN +------------------------------------------------------------------ + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (alternatives: SubPlan 1 or hashed SubPlan 2) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (Actual time: never executed) + Filter: ((c4 = 4) AND ((c3)::numeric = "excluded".c3)) + SubPlan 2 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c4 = 4) + Rows Removed by Filter: 9 +--?.* +--?.* + +-- any/some +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,0,6) on duplicate key update c4 = 'conflict6' where excluded.c3 = any (select c3 from tab_source where excluded.c1 = c1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: ("excluded".c1 = c1) + Rows Removed by Filter: 5 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,0,6) on duplicate key update c4 = 'ERROR' where excluded.c3 != any (select c3 from tab_source where excluded.c1 = c1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: ("excluded".c1 = c1) + Rows Removed by Filter: 9 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(7,0,7) on duplicate key update c4 = 'conflict7' where excluded.c3 > some (select -1 from tab_source where excluded.c1 = c1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: ("excluded".c1 = c1) + Rows Removed by Filter: 6 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(7,0,7) on duplicate key update c4 = 'ERROR' where excluded.c3 < some (select -1 from tab_source where excluded.c1 = c1); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (SubPlan 1) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: ("excluded".c1 = c1) + Rows Removed by Filter: 9 +--?.* +--?.* + +-- opr sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(8,8,8) on duplicate key update c4 = 'conflict8' where not (excluded.c3 > (select c2 from tab_source where c3 < excluded.c1 limit 1)); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: ("excluded".c3 <= ((SubPlan 1))::numeric) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Limit (actual rows=1 loops=1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c3 < "excluded".c1) +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(8,8,8) on duplicate key update c4 = 'ERROR' where (excluded.c3 > (select c2 from tab_source where c3 < excluded.c1 limit 1)); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: ("excluded".c3 > ((SubPlan 1))::numeric) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Limit (actual rows=1 loops=1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c3 < "excluded".c1) +--?.* +--?.* + +-- nested sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(9,9,9) on duplicate key update c4 = 'conflict9' where excluded.c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = excluded.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: ("excluded".c1 = (SubPlan 7)) + -> Result (actual rows=1 loops=1) + SubPlan 7 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $6) + Rows Removed by Filter: 9 + InitPlan 6 (returns $6) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $5) + Rows Removed by Filter: 9 + InitPlan 5 (returns $5) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $4) + Rows Removed by Filter: 9 + InitPlan 4 (returns $4) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $3) + Rows Removed by Filter: 9 + InitPlan 3 (returns $3) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $2) + Rows Removed by Filter: 9 + InitPlan 2 (returns $2) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $1) + Rows Removed by Filter: 9 + InitPlan 1 (returns $1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = "excluded".c1) + Rows Removed by Filter: 9 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(9,9,9) on duplicate key update c4 = 'ERROR' where excluded.c1 != ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = excluded.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: ("excluded".c1 <> (SubPlan 7)) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 7 + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $6) + Rows Removed by Filter: 9 + InitPlan 6 (returns $6) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $5) + Rows Removed by Filter: 9 + InitPlan 5 (returns $5) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $4) + Rows Removed by Filter: 9 + InitPlan 4 (returns $4) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $3) + Rows Removed by Filter: 9 + InitPlan 3 (returns $3) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $2) + Rows Removed by Filter: 9 + InitPlan 2 (returns $2) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = $1) + Rows Removed by Filter: 9 + InitPlan 1 (returns $1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = "excluded".c1) + Rows Removed by Filter: 9 +--?.* +--?.* + +-- sublink with CTE +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(10,10,10) on duplicate key update c4 = 'conflict10' where c1 = ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = excluded.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (tab_target.c1 = (SubPlan 1)) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Limit (actual rows=1 loops=1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c3 = "excluded".c1) + Rows Removed by Filter: 9 +--?.* +--?.* + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(10,10,10) on duplicate key update c4 = 'ERROR' where c1 != ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = excluded.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (tab_target.c1 <> (SubPlan 1)) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Limit (actual rows=1 loops=1) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c3 = "excluded".c1) + Rows Removed by Filter: 9 +--?.* +--?.* + +select * from tab_target order by 1,2,3,4; + c1 | c2 | c3 | c4 +----+----+----+------------ + 1 | 1 | 1 | conflict1 + 2 | 2 | 2 | conflict2 + 3 | 3 | 3 | conflict3 + 4 | 4 | 4 | conflict4 + 5 | 5 | 5 | conflict5 + 6 | 6 | 6 | conflict6 + 7 | 7 | 7 | conflict7 + 8 | 8 | 8 | conflict8 + 9 | 9 | 9 | conflict9 + 10 | 10 | 10 | conflict10 +(10 rows) + +rollback; +-------- +-- misc +-------- +begin; +-- agg + group by +insert into tab_target values(1,2,3) on duplicate key update c4 = 'conflict1' where excluded.c2 = (select count(1) from tab_source where c2 < 3); +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,2,3) on duplicate key update c4 = 'ERROR' where excluded.c2 != (select count(1) from tab_source where c2 < 3); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: ("excluded".c2 <> $0) + Rows Removed by Conflict Filter: 1 + InitPlan 1 (returns $0) + -> Aggregate (actual rows=1 loops=1) + -> Seq Scan on tab_source (actual rows=2 loops=1) + Filter: (c2 < 3) + Rows Removed by Filter: 8 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +-- limit + offset +insert into tab_target values(2,3,5) on duplicate key update c4 = 'conflict2' where excluded.c2 = (select c2 from tab_source order by c1 asc limit 1 offset 7); +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(2,3,5) on duplicate key update c4 = 'ERROR' where excluded.c2 != (select c2 from tab_source order by c1 asc limit 1 offset 7) + and c3 = (select c1 from tab_source order by c1 desc limit 1 offset 7); + QUERY PLAN +-------------------------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (("excluded".c2 <> $0) AND (tab_target.c3 = ($1)::numeric)) + Rows Removed by Conflict Filter: 1 + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Sort (actual rows=8 loops=1) + Sort Key: schema_upsert_where_sublink.tab_source.c1 + Sort Method: quicksort Memory: 26kB + -> Seq Scan on tab_source (actual rows=10 loops=1) + InitPlan 2 (returns $1) + -> Limit (Actual time: never executed) + -> Sort (Actual time: never executed) + Sort Key: schema_upsert_where_sublink.tab_source.c1 DESC + -> Seq Scan on tab_source (Actual time: never executed) + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +-- window funcs +insert into tab_target values(3,5,7) on duplicate key update c4 = 'conflict3' where c2 = (select sum_rows from ( + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following) as sum_rows + FROM generate_series(1, 3) i order by 2 limit 1 +)); +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(3,5,7) on duplicate key update c4 = 'ERROR' where c2 > (select sum_rows from ( + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following) as sum_rows + FROM generate_series(1, 3) i order by 2 limit 1 +)); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (tab_target.c2 > $0) + Rows Removed by Conflict Filter: 1 + InitPlan 1 (returns $0) + -> Subquery Scan on __unnamed_subquery__ (actual rows=1 loops=1) + -> Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (sum(i.i) OVER (ORDER BY i.i USING = NULLS LAST ROWS BETWEEN 1::bigint PRECEDING AND 1::bigint FOLLOWING)) + Sort Method: top-N heapsort Memory: 25kB + -> WindowAgg (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: i.i + Sort Method: quicksort Memory: 25kB + -> Function Scan on generate_series i (actual rows=3 loops=1) + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +-- setopt +insert into tab_target values(4,8,9) on duplicate key update c4 = 'conflict4' where c1 = ( + select count(*) / 2.5 from ( + (select c1, c2 from tab_source) + union + (select c2, c1 from tab_source) + ) +); +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(4,8,9) on duplicate key update c4 = 'ERROR' where c1 = ( + select count(*) / 4 from ( + (select c1, c2 from tab_source) + union all + (select c2, c1 from tab_source) + minus + (select c1, c4 from tab_source) + intersect + (select c2, c3 from tab_source) + ) +); + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: ((tab_target.c1)::double precision = $0) + Rows Removed by Conflict Filter: 1 + InitPlan 1 (returns $0) + -> Aggregate (actual rows=1 loops=1) + -> Subquery Scan on __unnamed_subquery__ (actual rows=10 loops=1) + -> HashSetOp Except (actual rows=10 loops=1) + -> Append (actual rows=20 loops=1) + -> Result (actual rows=20 loops=1) + -> Append (actual rows=20 loops=1) + -> Seq Scan on tab_source (actual rows=10 loops=1) + -> Seq Scan on tab_source (actual rows=10 loops=1) + -> Result (actual rows=0 loops=1) + -> HashSetOp Intersect (actual rows=0 loops=1) + -> Append (actual rows=20 loops=1) + -> Subquery Scan on "*SELECT* 3" (actual rows=10 loops=1) + -> Seq Scan on tab_source (actual rows=10 loops=1) + -> Subquery Scan on "*SELECT* 4" (actual rows=10 loops=1) + -> Seq Scan on tab_source (actual rows=10 loops=1) + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +-- with param +prepare p1 as +insert into tab_target values($1,$2,$3) on duplicate key update c4 = $4 where c1 in ( + with cte as ( + select c1 from tab_source where c2 in ( + select c2 from tab_source where c1 <= $5 + ) + ) select c1 from cte where c1 >= $6 +); +-- gplan not supported yet +set plan_cache_mode = force_generic_plan; +explain (analyze on, verbose off, timing off, costs off) +execute p1(5, 6, 7, 'conflict5', 5, 5); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (hashed SubPlan 1) + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Hash Join (actual rows=1 loops=1) + Hash Cond: (schema_upsert_where_sublink.tab_source.c2 = schema_upsert_where_sublink.tab_source.c2) + -> Seq Scan on tab_source (actual rows=6 loops=1) + Filter: (c1 >= 5) + Rows Removed by Filter: 4 + -> Hash (actual rows=5 loops=1) + Buckets: 32768 Batches: 1 Memory Usage: 1kB + -> HashAggregate (actual rows=5 loops=1) + Group By Key: schema_upsert_where_sublink.tab_source.c2 + -> Seq Scan on tab_source (actual rows=5 loops=1) + Filter: (c1 <= 5) + Rows Removed by Filter: 5 +--?.* +--?.* + +set plan_cache_mode = force_custom_plan; +explain (analyze on, verbose off, timing off, costs off) +execute p1(5, 6, 7, 'ERROR', 4, 5); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (hashed SubPlan 1) + Rows Removed by Conflict Filter: 1 + -> Result (actual rows=1 loops=1) + SubPlan 1 + -> Hash Join (actual rows=0 loops=1) + Hash Cond: (schema_upsert_where_sublink.tab_source.c2 = schema_upsert_where_sublink.tab_source.c2) + -> Seq Scan on tab_source (actual rows=6 loops=1) + Filter: (c1 >= 5) + Rows Removed by Filter: 4 + -> Hash (actual rows=4 loops=1) + Buckets: 32768 Batches: 1 Memory Usage: 1kB + -> HashAggregate (actual rows=4 loops=1) + Group By Key: schema_upsert_where_sublink.tab_source.c2 + -> Seq Scan on tab_source (actual rows=4 loops=1) + Filter: (c1 <= 4) + Rows Removed by Filter: 6 +--?.* +--?.* + +-- test with hint +-- blockname +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,8,7) on duplicate key update c4 = 'conflict6' where c1 = ( + select /*+ blockname(tt) */ c4 from tab_source where c1 = 6 +); +WARNING: unused hint: BlockName(tt) + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (tab_target.c1 = $0) + InitPlan 1 (returns $0) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = 6) + Rows Removed by Filter: 9 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +-- no_expand +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,8,7) on duplicate key update c4 = 'ERROR' where c1 != ( + select /*+ no_expand */ c4 from tab_source where c1 = 6 +); + QUERY PLAN +---------------------------------------------------------------- + Insert on tab_target (actual rows=0 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (tab_target.c1 <> $0) + Rows Removed by Conflict Filter: 1 + InitPlan 1 (returns $0) + -> Seq Scan on tab_source (actual rows=1 loops=1) + Filter: (c1 = 6) + Rows Removed by Filter: 9 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +-- leading/join +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(7,4,7) on duplicate key update c4 = 'conflict7' where c1 = ( + select /*+ leading((t2 t1)) mergejoin(t1 t2) */ t1.c4 from tab_source t1, tab_source t2 where t1.c2 = t2.c2 and t1.c3 = 7 +); + QUERY PLAN +------------------------------------------------------------------------ + Insert on tab_target (actual rows=1 loops=1) + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: tab_target_pkey, tab_target_c1_key + Conflict Filter: (tab_target.c1 = $0) + InitPlan 1 (returns $0) + -> Merge Join (actual rows=1 loops=1) + Merge Cond: (t2.c2 = t1.c2) + -> Sort (actual rows=5 loops=1) + Sort Key: t2.c2 + Sort Method: quicksort Memory: 26kB + -> Seq Scan on tab_source t2 (actual rows=10 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: t1.c2 + Sort Method: quicksort Memory: 25kB + -> Seq Scan on tab_source t1 (actual rows=1 loops=1) + Filter: (c3 = 7) + Rows Removed by Filter: 9 + -> Result (actual rows=1 loops=1) +--?.* +--?.* + +-- rowmarks +insert into tab_target values(8,4,3) on duplicate key update c4 = 'conflict8' where c1 in (select c4 from tab_source where c4 = 8 for update); +insert into tab_target values(9,6,3) on duplicate key update c4 = 'conflict9' where c1 = (select c3 from tab_source where c1 = 9 for share); +select * from tab_target where c1 < 10 order by 1,2,3,4; + c1 | c2 | c3 | c4 +----+----+----+----------- + 1 | 1 | 1 | conflict1 + 2 | 2 | 2 | conflict2 + 3 | 3 | 3 | conflict3 + 4 | 4 | 4 | conflict4 + 5 | 5 | 5 | conflict5 + 6 | 6 | 6 | conflict6 + 7 | 7 | 7 | conflict7 + 8 | 8 | 8 | conflict8 + 9 | 9 | 9 | conflict9 +(9 rows) + +rollback; +DROP SCHEMA schema_upsert_where_sublink CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table tab_target +drop cascades to table tab_source diff --git a/src/test/regress/expected/ustore_subpartition_add_drop_partition.out b/src/test/regress/expected/ustore_subpartition_add_drop_partition.out new file mode 100644 index 000000000..cbdc2b95e --- /dev/null +++ b/src/test/regress/expected/ustore_subpartition_add_drop_partition.out @@ -0,0 +1,2370 @@ +DROP SCHEMA ustore_subpartition_add_drop_partition CASCADE; +ERROR: schema "ustore_subpartition_add_drop_partition" does not exist +CREATE SCHEMA ustore_subpartition_add_drop_partition; +SET CURRENT_SCHEMA TO ustore_subpartition_add_drop_partition; +-- +----range-range table---- +-- +--prepare +CREATE TABLE range_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (customer_id) SUBPARTITION BY RANGE (time_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer1_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer1_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer1_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer2_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer2_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer2_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_all VALUES LESS THAN ('2012-01-01') + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_sales_pkey" for table "range_range_sales" +INSERT INTO range_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_range_sales_idx ON range_range_sales(product_id) LOCAL; +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE range_range_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer5_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer5_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer5_2011 VALUES LESS THAN ('2012-01-01') + ); +--fail, out of range +ALTER TABLE range_range_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1100); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE range_range_sales ADD PARTITION customer_temp2 VALUES (1300); +ERROR: can not add none-range partition to range partition table +--success, add 1 default subpartition +ALTER TABLE range_range_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_range_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1800); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--success, add 1 subpartition +ALTER TABLE range_range_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_2012 VALUES LESS THAN ('2013-01-01'); +--fail, out of range +ALTER TABLE range_range_sales MODIFY PARTITION customer3 ADD SUBPARTITION customer3_temp1 VALUES LESS THAN ('2015-01-01'); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, out of range +ALTER TABLE range_range_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('2011-01-01'); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE range_range_sales MODIFY PARTITION customer2 ADD SUBPARTITION customer2_temp1 VALUES ('2015-01-01'); +ERROR: can not add none-range partition to range partition table +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+-------------- + customer1 | p | r | f | 0 | 3 | {200} + customer2 | p | r | f | 0 | 3 | {500} + customer3 | p | r | f | 0 | 3 | {800} + customer4 | p | r | f | 0 | 3 | {1200} + customer5 | p | r | f | 0 | 3 | {1500} + customer6 | p | r | f | 0 | 3 | {NULL} + range_range_sales | r | r | f | 0 | 2 | + customer1_2008 | s | r | t | 0 | | {2009-01-01} + customer1_2009 | s | r | t | 0 | | {2010-01-01} + customer1_2010 | s | r | t | 0 | | {2011-01-01} + customer1_2011 | s | r | t | 0 | | {2012-01-01} + customer1_2012 | s | r | t | 0 | | {2013-01-01} + customer2_2008 | s | r | t | 0 | | {2009-01-01} + customer2_2009 | s | r | t | 0 | | {2010-01-01} + customer2_2010 | s | r | t | 0 | | {2011-01-01} + customer2_2011 | s | r | t | 0 | | {2012-01-01} + customer3_subpartdefault1 | s | r | t | 0 | | {NULL} + customer4_all | s | r | t | 0 | | {2012-01-01} + customer5_2008 | s | r | t | 0 | | {2009-01-01} + customer5_2009 | s | r | t | 0 | | {2010-01-01} + customer5_2010 | s | r | t | 0 | | {2011-01-01} + customer5_2011 | s | r | t | 0 | | {2012-01-01} + customer6_subpartdefault1 | s | r | t | 0 | | {NULL} +(23 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_2008_product_id_idx | x | n | t | t + customer1_2009_product_id_idx | x | n | t | t + customer1_2010_product_id_idx | x | n | t | t + customer1_2011_product_id_idx | x | n | t | t + customer1_2012_product_id_idx | x | n | t | t + customer2_2008_product_id_idx | x | n | t | t + customer2_2009_product_id_idx | x | n | t | t + customer2_2010_product_id_idx | x | n | t | t + customer2_2011_product_id_idx | x | n | t | t + customer3_subpartdefault1_product_id_idx | x | n | t | t + customer4_all_product_id_idx | x | n | t | t + customer5_2008_product_id_idx | x | n | t | t + customer5_2009_product_id_idx | x | n | t | t + customer5_2010_product_id_idx | x | n | t | t + customer5_2011_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(16 rows) + +\d+ range_range_sales + Table "ustore_subpartition_add_drop_partition.range_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_range_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default + "range_range_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By RANGE(time_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 16 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_range_sales DROP PARTITION customer2; +--success +ALTER TABLE range_range_sales DROP SUBPARTITION customer1_2008; +--fail, the only subpartition +ALTER TABLE range_range_sales DROP SUBPARTITION customer4_all; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--success, drop partition customer3 +ALTER TABLE range_range_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_range_sales DROP PARTITION FOR (400, '2010-01-01'); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, number not equal to the number of partkey +ALTER TABLE range_range_sales DROP SUBPARTITION FOR (1400); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE range_range_sales DROP PARTITION FOR ('2010-01-01'); +ERROR: invalid input syntax for integer: "2010-01-01" +--fail, invalid type +ALTER TABLE range_range_sales DROP SUBPARTITION FOR ('2010-01-01', 1400); +ERROR: invalid input syntax for integer: "2010-01-01" +--success, drop subpartition customer5_2010 +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(1400, '2010-01-01'); +--fail, the only subpartition in customer6 +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(2000, '2009-01-01'); +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, no subpartition find +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(1400, '2012-01-01'); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM range_range_sales; + count +------- + 201 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+-------------- + customer1 | p | r | f | 0 | 3 | {200} + customer4 | p | r | f | 0 | 3 | {1200} + customer5 | p | r | f | 0 | 3 | {1500} + customer6 | p | r | f | 0 | 3 | {NULL} + range_range_sales | r | r | f | 0 | 2 | + customer1_2009 | s | r | t | 0 | | {2010-01-01} + customer1_2010 | s | r | t | 0 | | {2011-01-01} + customer1_2011 | s | r | t | 0 | | {2012-01-01} + customer1_2012 | s | r | t | 0 | | {2013-01-01} + customer4_all | s | r | t | 0 | | {2012-01-01} + customer5_2008 | s | r | t | 0 | | {2009-01-01} + customer5_2009 | s | r | t | 0 | | {2010-01-01} + customer5_2011 | s | r | t | 0 | | {2012-01-01} + customer6_subpartdefault1 | s | r | t | 0 | | {NULL} +(14 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_2009_product_id_idx | x | n | t | t + customer1_2010_product_id_idx | x | n | t | t + customer1_2011_product_id_idx | x | n | t | t + customer1_2012_product_id_idx | x | n | t | t + customer4_all_product_id_idx | x | n | t | t + customer5_2008_product_id_idx | x | n | t | t + customer5_2009_product_id_idx | x | n | t | t + customer5_2011_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(9 rows) + +\d+ range_range_sales + Table "ustore_subpartition_add_drop_partition.range_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_range_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default UNUSABLE + "range_range_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By RANGE(time_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 9 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +-- +----range-list table---- +-- +--prepare +CREATE TABLE range_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_list_sales_pkey" for table "range_list_sales" +INSERT INTO range_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales_idx ON range_list_sales(product_id) LOCAL; +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE range_list_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer5_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer5_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer5_channel4 VALUES ('9') + ); +--fail, out of range +ALTER TABLE range_list_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1100); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE range_list_sales ADD PARTITION customer_temp2 VALUES (1300); +ERROR: can not add none-range partition to range partition table +--success, add 1 default subpartition +ALTER TABLE range_list_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_list_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1800); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--success, add 1 subpartition +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel5 VALUES ('X'); +--fail, out of range +ALTER TABLE range_list_sales MODIFY PARTITION customer2 ADD SUBPARTITION customer2_temp1 VALUES ('X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, out of range +ALTER TABLE range_list_sales MODIFY PARTITION customer3 ADD SUBPARTITION customer3_temp1 VALUES ('X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE range_list_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X'); +ERROR: can not add none-list partition to list partition table +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+----------------------- + customer1 | p | r | f | 0 | 4 | {200} + customer2 | p | r | f | 0 | 4 | {500} + customer3 | p | r | f | 0 | 4 | {800} + customer4 | p | r | f | 0 | 4 | {1200} + customer5 | p | r | f | 0 | 4 | {1500} + customer6 | p | r | f | 0 | 4 | {NULL} + range_list_sales | r | r | f | 0 | 2 | + customer1_channel1 | s | l | t | 0 | | {0,1,2} + customer1_channel2 | s | l | t | 0 | | {3,4,5} + customer1_channel3 | s | l | t | 0 | | {6,7,8} + customer1_channel4 | s | l | t | 0 | | {9} + customer1_channel5 | s | l | t | 0 | | {X} + customer2_channel1 | s | l | t | 0 | | {0,1,2,3,4} + customer2_channel2 | s | l | t | 0 | | {NULL} + customer3_subpartdefault1 | s | l | t | 0 | | {NULL} + customer4_channel1 | s | l | t | 0 | | {0,1,2,3,4,5,6,7,8,9} + customer5_channel1 | s | l | t | 0 | | {0,1,2} + customer5_channel2 | s | l | t | 0 | | {3,4,5} + customer5_channel3 | s | l | t | 0 | | {6,7,8} + customer5_channel4 | s | l | t | 0 | | {9} + customer6_subpartdefault1 | s | l | t | 0 | | {NULL} +(21 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_channel1_product_id_idx | x | n | t | t + customer1_channel2_product_id_idx | x | n | t | t + customer1_channel3_product_id_idx | x | n | t | t + customer1_channel4_product_id_idx | x | n | t | t + customer1_channel5_product_id_idx | x | n | t | t + customer2_channel1_product_id_idx | x | n | t | t + customer2_channel2_product_id_idx | x | n | t | t + customer3_subpartdefault1_product_id_idx | x | n | t | t + customer4_channel1_product_id_idx | x | n | t | t + customer5_channel1_product_id_idx | x | n | t | t + customer5_channel2_product_id_idx | x | n | t | t + customer5_channel3_product_id_idx | x | n | t | t + customer5_channel4_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(14 rows) + +\d+ range_list_sales + Table "ustore_subpartition_add_drop_partition.range_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_list_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default + "range_list_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By LIST(channel_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 14 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_list_sales DROP PARTITION customer2; +--success +ALTER TABLE range_list_sales DROP SUBPARTITION customer1_channel1; +--fail, the only subpartition +ALTER TABLE range_list_sales DROP SUBPARTITION customer4_channel1; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--success, drop partition customer3 +ALTER TABLE range_list_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_list_sales DROP PARTITION FOR (400, '4'); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, number not equal to the number of partkey +ALTER TABLE range_list_sales DROP SUBPARTITION FOR (1400); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE range_list_sales DROP PARTITION FOR ('abc'); +ERROR: invalid input syntax for integer: "abc" +--fail, invalid type +ALTER TABLE range_list_sales DROP SUBPARTITION FOR ('abc', 1400); +ERROR: invalid input syntax for integer: "abc" +--success, drop subpartition customer5_channel3 +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(1400, '7'); +--fail, the only subpartition in customer6 +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(2000, 'X'); +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, no subpartition find +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(1100, 'X'); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM range_list_sales; + count +------- + 341 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+----------------------- + customer1 | p | r | f | 0 | 4 | {200} + customer4 | p | r | f | 0 | 4 | {1200} + customer5 | p | r | f | 0 | 4 | {1500} + customer6 | p | r | f | 0 | 4 | {NULL} + range_list_sales | r | r | f | 0 | 2 | + customer1_channel2 | s | l | t | 0 | | {3,4,5} + customer1_channel3 | s | l | t | 0 | | {6,7,8} + customer1_channel4 | s | l | t | 0 | | {9} + customer1_channel5 | s | l | t | 0 | | {X} + customer4_channel1 | s | l | t | 0 | | {0,1,2,3,4,5,6,7,8,9} + customer5_channel1 | s | l | t | 0 | | {0,1,2} + customer5_channel2 | s | l | t | 0 | | {3,4,5} + customer5_channel4 | s | l | t | 0 | | {9} + customer6_subpartdefault1 | s | l | t | 0 | | {NULL} +(14 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_channel2_product_id_idx | x | n | t | t + customer1_channel3_product_id_idx | x | n | t | t + customer1_channel4_product_id_idx | x | n | t | t + customer1_channel5_product_id_idx | x | n | t | t + customer4_channel1_product_id_idx | x | n | t | t + customer5_channel1_product_id_idx | x | n | t | t + customer5_channel2_product_id_idx | x | n | t | t + customer5_channel4_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(9 rows) + +\d+ range_list_sales + Table "ustore_subpartition_add_drop_partition.range_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_list_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default UNUSABLE + "range_list_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By LIST(channel_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 9 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +-- +----range-hash table---- +-- +--prepare +CREATE TABLE range_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (customer_id) SUBPARTITION BY HASH (product_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_product1, + SUBPARTITION customer1_product2, + SUBPARTITION customer1_product3, + SUBPARTITION customer1_product4 + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_product1, + SUBPARTITION customer2_product2 + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_product1 + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_hash_sales_pkey" for table "range_hash_sales" +INSERT INTO range_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_hash_sales_idx ON range_hash_sales(product_id) LOCAL; +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE range_hash_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_product1, + SUBPARTITION customer5_product2, + SUBPARTITION customer5_product3, + SUBPARTITION customer5_product4 + ); +--fail, out of range +ALTER TABLE range_hash_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1100); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE range_hash_sales ADD PARTITION customer_temp2 VALUES (1300); +ERROR: can not add none-range partition to range partition table +--success, add 1 default subpartition +ALTER TABLE range_hash_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_hash_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1800); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, not support add hash +ALTER TABLE range_hash_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_temp1; +ERROR: syntax error at or near ";" +LINE 1: ...MODIFY PARTITION customer1 ADD SUBPARTITION customer1_temp1; + ^ +--fail, invalid format +ALTER TABLE range_hash_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X'); +ERROR: can not add hash partition +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+------------ + customer1 | p | r | f | 0 | 1 | {200} + customer2 | p | r | f | 0 | 1 | {500} + customer3 | p | r | f | 0 | 1 | {800} + customer4 | p | r | f | 0 | 1 | {1200} + customer5 | p | r | f | 0 | 1 | {1500} + customer6 | p | r | f | 0 | 1 | {NULL} + range_hash_sales | r | r | f | 0 | 2 | + customer1_product1 | s | h | t | 0 | | {0} + customer1_product2 | s | h | t | 0 | | {1} + customer1_product3 | s | h | t | 0 | | {2} + customer1_product4 | s | h | t | 0 | | {3} + customer2_product1 | s | h | t | 0 | | {0} + customer2_product2 | s | h | t | 0 | | {1} + customer3_subpartdefault1 | s | h | t | 0 | | {0} + customer4_product1 | s | h | t | 0 | | {0} + customer5_product1 | s | h | t | 0 | | {0} + customer5_product2 | s | h | t | 0 | | {1} + customer5_product3 | s | h | t | 0 | | {2} + customer5_product4 | s | h | t | 0 | | {3} + customer6_subpartdefault1 | s | h | t | 0 | | {0} +(20 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_product1_product_id_idx | x | n | t | t + customer1_product2_product_id_idx | x | n | t | t + customer1_product3_product_id_idx | x | n | t | t + customer1_product4_product_id_idx | x | n | t | t + customer2_product1_product_id_idx | x | n | t | t + customer2_product2_product_id_idx | x | n | t | t + customer3_subpartdefault1_product_id_idx | x | n | t | t + customer4_product1_product_id_idx | x | n | t | t + customer5_product1_product_id_idx | x | n | t | t + customer5_product2_product_id_idx | x | n | t | t + customer5_product3_product_id_idx | x | n | t | t + customer5_product4_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(13 rows) + +\d+ range_hash_sales + Table "ustore_subpartition_add_drop_partition.range_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_hash_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default + "range_hash_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By HASH(product_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 13 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_hash_sales DROP PARTITION customer2; +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION customer1_product1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION customer4_product1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--success, drop partition customer3 +ALTER TABLE range_hash_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_hash_sales DROP PARTITION FOR (400, '2010-01-01'); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, invalid type +ALTER TABLE range_hash_sales DROP PARTITION FOR ('2010-01-01'); +ERROR: invalid input syntax for integer: "2010-01-01" +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION FOR(1400, 1); +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--check for ok after drop +SELECT count(*) FROM range_hash_sales; + count +------- + 400 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +---------------------------+----------+--------------+-------------+---------------+---------+------------ + customer1 | p | r | f | 0 | 1 | {200} + customer4 | p | r | f | 0 | 1 | {1200} + customer5 | p | r | f | 0 | 1 | {1500} + customer6 | p | r | f | 0 | 1 | {NULL} + range_hash_sales | r | r | f | 0 | 2 | + customer1_product1 | s | h | t | 0 | | {0} + customer1_product2 | s | h | t | 0 | | {1} + customer1_product3 | s | h | t | 0 | | {2} + customer1_product4 | s | h | t | 0 | | {3} + customer4_product1 | s | h | t | 0 | | {0} + customer5_product1 | s | h | t | 0 | | {0} + customer5_product2 | s | h | t | 0 | | {1} + customer5_product3 | s | h | t | 0 | | {2} + customer5_product4 | s | h | t | 0 | | {3} + customer6_subpartdefault1 | s | h | t | 0 | | {0} +(15 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +------------------------------------------+----------+--------------+-------------+------------- + customer1_product1_product_id_idx | x | n | t | t + customer1_product2_product_id_idx | x | n | t | t + customer1_product3_product_id_idx | x | n | t | t + customer1_product4_product_id_idx | x | n | t | t + customer4_product1_product_id_idx | x | n | t | t + customer5_product1_product_id_idx | x | n | t | t + customer5_product2_product_id_idx | x | n | t | t + customer5_product3_product_id_idx | x | n | t | t + customer5_product4_product_id_idx | x | n | t | t + customer6_subpartdefault1_product_id_idx | x | n | t | t +(10 rows) + +\d+ range_hash_sales + Table "ustore_subpartition_add_drop_partition.range_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "range_hash_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default UNUSABLE + "range_hash_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By RANGE(customer_id) Subpartition By HASH(product_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 10 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +-- +----list-range table---- +-- +--prepare +CREATE TABLE list_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (channel_id) SUBPARTITION BY RANGE (customer_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_customer1 VALUES LESS THAN (200), + SUBPARTITION channel1_customer2 VALUES LESS THAN (500), + SUBPARTITION channel1_customer3 VALUES LESS THAN (800), + SUBPARTITION channel1_customer4 VALUES LESS THAN (1200) + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_customer1 VALUES LESS THAN (500), + SUBPARTITION channel2_customer2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_customer1 VALUES LESS THAN (1200) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "list_range_sales_pkey" for table "list_range_sales" +INSERT INTO list_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_range_sales_idx ON list_range_sales(product_id) LOCAL; +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE list_range_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_customer1 VALUES LESS THAN (200), + SUBPARTITION channel5_customer2 VALUES LESS THAN (500), + SUBPARTITION channel5_customer3 VALUES LESS THAN (800), + SUBPARTITION channel5_customer4 VALUES LESS THAN (1200) + ); +--fail, value conflict +ALTER TABLE list_range_sales ADD PARTITION channel_temp1 VALUES ('0', 'Z', 'C'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE list_range_sales ADD PARTITION channel_temp2 VALUES LESS THAN ('Z'); +ERROR: can not add none-list partition to list partition table +--success, add 1 default subpartition +ALTER TABLE list_range_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_range_sales ADD PARTITION channel_temp3 VALUES ('M', 'X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--success, add 1 subpartition +ALTER TABLE list_range_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_customer5 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE list_range_sales MODIFY PARTITION channel2 ADD SUBPARTITION channel2_temp1 VALUES LESS THAN (2000); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, out of range +ALTER TABLE list_range_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3_temp1 VALUES LESS THAN (2000); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE list_range_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES (1500); +ERROR: can not add none-range partition to range partition table +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + channel1 | p | l | f | 0 | 2 | {0,1,2} + channel2 | p | l | f | 0 | 2 | {3,4,5} + channel3 | p | l | f | 0 | 2 | {6,7} + channel4 | p | l | f | 0 | 2 | {8,9} + channel5 | p | l | f | 0 | 2 | {X} + channel6 | p | l | f | 0 | 2 | {NULL} + list_range_sales | r | l | f | 0 | 4 | + channel1_customer1 | s | r | t | 0 | | {200} + channel1_customer2 | s | r | t | 0 | | {500} + channel1_customer3 | s | r | t | 0 | | {800} + channel1_customer4 | s | r | t | 0 | | {1200} + channel1_customer5 | s | r | t | 0 | | {NULL} + channel2_customer1 | s | r | t | 0 | | {500} + channel2_customer2 | s | r | t | 0 | | {NULL} + channel3_subpartdefault1 | s | r | t | 0 | | {NULL} + channel4_customer1 | s | r | t | 0 | | {1200} + channel5_customer1 | s | r | t | 0 | | {200} + channel5_customer2 | s | r | t | 0 | | {500} + channel5_customer3 | s | r | t | 0 | | {800} + channel5_customer4 | s | r | t | 0 | | {1200} + channel6_subpartdefault1 | s | r | t | 0 | | {NULL} +(21 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_customer1_product_id_idx | x | n | t | t + channel1_customer2_product_id_idx | x | n | t | t + channel1_customer3_product_id_idx | x | n | t | t + channel1_customer4_product_id_idx | x | n | t | t + channel1_customer5_product_id_idx | x | n | t | t + channel2_customer1_product_id_idx | x | n | t | t + channel2_customer2_product_id_idx | x | n | t | t + channel3_subpartdefault1_product_id_idx | x | n | t | t + channel4_customer1_product_id_idx | x | n | t | t + channel5_customer1_product_id_idx | x | n | t | t + channel5_customer2_product_id_idx | x | n | t | t + channel5_customer3_product_id_idx | x | n | t | t + channel5_customer4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(14 rows) + +\d+ list_range_sales + Table "ustore_subpartition_add_drop_partition.list_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_range_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default + "list_range_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By RANGE(customer_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 14 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_range_sales DROP PARTITION channel2; +--success +ALTER TABLE list_range_sales DROP SUBPARTITION channel1_customer1; +--fail, the only subpartition +ALTER TABLE list_range_sales DROP SUBPARTITION channel4_customer1; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--success, drop partition channel3 +ALTER TABLE list_range_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_range_sales DROP PARTITION FOR('X', 700); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, number not equal to the number of partkey +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X'); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE list_range_sales DROP PARTITION FOR (10); +ERROR: value too long for type character(1) +--fail, invalid type +ALTER TABLE list_range_sales DROP SUBPARTITION FOR(700, 'X'); +ERROR: value too long for type character(1) +--success, drop subpartition channel5_customer3 +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X', 700); +--fail, the only subpartition in channel6 +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('K', 100); +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, no subpartition find +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X', 2500); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM list_range_sales; + count +------- + 441 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + channel1 | p | l | f | 0 | 2 | {0,1,2} + channel4 | p | l | f | 0 | 2 | {8,9} + channel5 | p | l | f | 0 | 2 | {X} + channel6 | p | l | f | 0 | 2 | {NULL} + list_range_sales | r | l | f | 0 | 4 | + channel1_customer2 | s | r | t | 0 | | {500} + channel1_customer3 | s | r | t | 0 | | {800} + channel1_customer4 | s | r | t | 0 | | {1200} + channel1_customer5 | s | r | t | 0 | | {NULL} + channel4_customer1 | s | r | t | 0 | | {1200} + channel5_customer1 | s | r | t | 0 | | {200} + channel5_customer2 | s | r | t | 0 | | {500} + channel5_customer4 | s | r | t | 0 | | {1200} + channel6_subpartdefault1 | s | r | t | 0 | | {NULL} +(14 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_customer2_product_id_idx | x | n | t | t + channel1_customer3_product_id_idx | x | n | t | t + channel1_customer4_product_id_idx | x | n | t | t + channel1_customer5_product_id_idx | x | n | t | t + channel4_customer1_product_id_idx | x | n | t | t + channel5_customer1_product_id_idx | x | n | t | t + channel5_customer2_product_id_idx | x | n | t | t + channel5_customer4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(9 rows) + +\d+ list_range_sales + Table "ustore_subpartition_add_drop_partition.list_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_range_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default UNUSABLE + "list_range_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By RANGE(customer_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 9 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +-- +----list-list table---- +-- +--prepare +CREATE TABLE list_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (channel_id) SUBPARTITION BY LIST (type_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_type1 VALUES (0, 1, 2), + SUBPARTITION channel1_type2 VALUES (3, 4), + SUBPARTITION channel1_type3 VALUES (5, 6, 7), + SUBPARTITION channel1_type4 VALUES (8, 9) + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_type1 VALUES (0, 1, 2, 3), + SUBPARTITION channel2_type2 VALUES (DEFAULT) + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_type1 VALUES (0, 1, 2, 3, 4, 5, 6, 7, 8, 9) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "list_list_sales_pkey" for table "list_list_sales" +INSERT INTO list_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_list_sales_idx ON list_list_sales(product_id) LOCAL; +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE list_list_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_type1 VALUES (0, 1, 2), + SUBPARTITION channel5_type2 VALUES (3, 4), + SUBPARTITION channel5_type3 VALUES (5, 6, 7), + SUBPARTITION channel5_type4 VALUES (8, 9) + ); +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp1 VALUES ('0', 'Z', 'C'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE list_list_sales ADD PARTITION channel_temp2 VALUES LESS THAN ('Z'); +ERROR: can not add none-list partition to list partition table +--success, add 1 default subpartition +ALTER TABLE list_list_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp3 VALUES ('M', 'X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--success, add 1 subpartition +ALTER TABLE list_list_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_type5 VALUES (DEFAULT); +--fail, out of range +ALTER TABLE list_list_sales MODIFY PARTITION channel2 ADD SUBPARTITION channel2_temp1 VALUES (10, 11, 12); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, out of range +ALTER TABLE list_list_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3_temp1 VALUES (10, 11, 12); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE list_list_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500); +ERROR: can not add none-list partition to list partition table +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+----------------------- + channel1 | p | l | f | 0 | 5 | {0,1,2} + channel2 | p | l | f | 0 | 5 | {3,4,5} + channel3 | p | l | f | 0 | 5 | {6,7} + channel4 | p | l | f | 0 | 5 | {8,9} + channel5 | p | l | f | 0 | 5 | {X} + channel6 | p | l | f | 0 | 5 | {NULL} + list_list_sales | r | l | f | 0 | 4 | + channel1_type1 | s | l | t | 0 | | {0,1,2} + channel1_type2 | s | l | t | 0 | | {3,4} + channel1_type3 | s | l | t | 0 | | {5,6,7} + channel1_type4 | s | l | t | 0 | | {8,9} + channel1_type5 | s | l | t | 0 | | {NULL} + channel2_type1 | s | l | t | 0 | | {0,1,2,3} + channel2_type2 | s | l | t | 0 | | {NULL} + channel3_subpartdefault1 | s | l | t | 0 | | {NULL} + channel4_type1 | s | l | t | 0 | | {0,1,2,3,4,5,6,7,8,9} + channel5_type1 | s | l | t | 0 | | {0,1,2} + channel5_type2 | s | l | t | 0 | | {3,4} + channel5_type3 | s | l | t | 0 | | {5,6,7} + channel5_type4 | s | l | t | 0 | | {8,9} + channel6_subpartdefault1 | s | l | t | 0 | | {NULL} +(21 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_type1_product_id_idx | x | n | t | t + channel1_type2_product_id_idx | x | n | t | t + channel1_type3_product_id_idx | x | n | t | t + channel1_type4_product_id_idx | x | n | t | t + channel1_type5_product_id_idx | x | n | t | t + channel2_type1_product_id_idx | x | n | t | t + channel2_type2_product_id_idx | x | n | t | t + channel3_subpartdefault1_product_id_idx | x | n | t | t + channel4_type1_product_id_idx | x | n | t | t + channel5_type1_product_id_idx | x | n | t | t + channel5_type2_product_id_idx | x | n | t | t + channel5_type3_product_id_idx | x | n | t | t + channel5_type4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(14 rows) + +\d+ list_list_sales + Table "ustore_subpartition_add_drop_partition.list_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_list_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default + "list_list_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By LIST(type_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 14 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_list_sales DROP PARTITION channel2; +--success +ALTER TABLE list_list_sales DROP SUBPARTITION channel1_type1; +--fail, the only subpartition +ALTER TABLE list_list_sales DROP SUBPARTITION channel4_type1; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--success, drop partition channel3 +ALTER TABLE list_list_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_list_sales DROP PARTITION FOR('X', 6); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, number not equal to the number of partkey +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X'); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE list_list_sales DROP PARTITION FOR (10); +ERROR: value too long for type character(1) +--fail, invalid type +ALTER TABLE list_list_sales DROP SUBPARTITION FOR(10, 'X'); +ERROR: value too long for type character(1) +--success, drop subpartition channel5_type3 +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X', 6); +--fail, the only subpartition in channel6 +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('K', 10); +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, no subpartition find +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X', 5); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM list_list_sales; + count +------- + 200 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+----------------------- + channel1 | p | l | f | 0 | 5 | {0,1,2} + channel4 | p | l | f | 0 | 5 | {8,9} + channel5 | p | l | f | 0 | 5 | {X} + channel6 | p | l | f | 0 | 5 | {NULL} + list_list_sales | r | l | f | 0 | 4 | + channel1_type2 | s | l | t | 0 | | {3,4} + channel1_type3 | s | l | t | 0 | | {5,6,7} + channel1_type4 | s | l | t | 0 | | {8,9} + channel1_type5 | s | l | t | 0 | | {NULL} + channel4_type1 | s | l | t | 0 | | {0,1,2,3,4,5,6,7,8,9} + channel5_type1 | s | l | t | 0 | | {0,1,2} + channel5_type2 | s | l | t | 0 | | {3,4} + channel5_type4 | s | l | t | 0 | | {8,9} + channel6_subpartdefault1 | s | l | t | 0 | | {NULL} +(14 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_type2_product_id_idx | x | n | t | t + channel1_type3_product_id_idx | x | n | t | t + channel1_type4_product_id_idx | x | n | t | t + channel1_type5_product_id_idx | x | n | t | t + channel4_type1_product_id_idx | x | n | t | t + channel5_type1_product_id_idx | x | n | t | t + channel5_type2_product_id_idx | x | n | t | t + channel5_type4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(9 rows) + +\d+ list_list_sales + Table "ustore_subpartition_add_drop_partition.list_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_list_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default UNUSABLE + "list_list_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By LIST(type_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 9 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +-- +----list-hash table---- +-- +--prepare +CREATE TABLE list_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (channel_id) SUBPARTITION BY HASH (product_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_product1, + SUBPARTITION channel1_product2, + SUBPARTITION channel1_product3, + SUBPARTITION channel1_product4 + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_product1, + SUBPARTITION channel2_product2 + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_product1 + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "list_hash_sales_pkey" for table "list_hash_sales" +INSERT INTO list_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_hash_sales_idx ON list_hash_sales(product_id) LOCAL; +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE list_hash_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_product1, + SUBPARTITION channel5_product2, + SUBPARTITION channel5_product3, + SUBPARTITION channel5_product4 + ); +--fail, value conflict +ALTER TABLE list_hash_sales ADD PARTITION channel_temp1 VALUES ('0', 'Z', 'C'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE list_hash_sales ADD PARTITION channel_temp2 VALUES LESS THAN ('Z'); +ERROR: can not add none-list partition to list partition table +--success, add 1 default subpartition +ALTER TABLE list_hash_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_hash_sales ADD PARTITION channel_temp3 VALUES ('M', 'X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, not support add hash +ALTER TABLE list_hash_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_temp1; +ERROR: syntax error at or near ";" +LINE 1: ...s MODIFY PARTITION channel1 ADD SUBPARTITION channel1_temp1; + ^ +--fail, invalid format +ALTER TABLE list_hash_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500); +ERROR: can not add hash partition +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + channel1 | p | l | f | 0 | 1 | {0,1,2} + channel2 | p | l | f | 0 | 1 | {3,4,5} + channel3 | p | l | f | 0 | 1 | {6,7} + channel4 | p | l | f | 0 | 1 | {8,9} + channel5 | p | l | f | 0 | 1 | {X} + channel6 | p | l | f | 0 | 1 | {NULL} + list_hash_sales | r | l | f | 0 | 4 | + channel1_product1 | s | h | t | 0 | | {0} + channel1_product2 | s | h | t | 0 | | {1} + channel1_product3 | s | h | t | 0 | | {2} + channel1_product4 | s | h | t | 0 | | {3} + channel2_product1 | s | h | t | 0 | | {0} + channel2_product2 | s | h | t | 0 | | {1} + channel3_subpartdefault1 | s | h | t | 0 | | {0} + channel4_product1 | s | h | t | 0 | | {0} + channel5_product1 | s | h | t | 0 | | {0} + channel5_product2 | s | h | t | 0 | | {1} + channel5_product3 | s | h | t | 0 | | {2} + channel5_product4 | s | h | t | 0 | | {3} + channel6_subpartdefault1 | s | h | t | 0 | | {0} +(20 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_product1_product_id_idx | x | n | t | t + channel1_product2_product_id_idx | x | n | t | t + channel1_product3_product_id_idx | x | n | t | t + channel1_product4_product_id_idx | x | n | t | t + channel2_product1_product_id_idx | x | n | t | t + channel2_product2_product_id_idx | x | n | t | t + channel3_subpartdefault1_product_id_idx | x | n | t | t + channel4_product1_product_id_idx | x | n | t | t + channel5_product1_product_id_idx | x | n | t | t + channel5_product2_product_id_idx | x | n | t | t + channel5_product3_product_id_idx | x | n | t | t + channel5_product4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(13 rows) + +\d+ list_hash_sales + Table "ustore_subpartition_add_drop_partition.list_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_hash_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default + "list_hash_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By HASH(product_id) +Number of partitions: 6 (View pg_partition to check each partition range.) +Number of subpartitions: 13 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_hash_sales DROP PARTITION channel2; +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION channel1_product1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION channel4_product1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--success, drop partition channel3 +ALTER TABLE list_hash_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_hash_sales DROP PARTITION FOR ('6', '2010-01-01'); +ERROR: number of boundary items NOT EQUAL to number of partition keys +--fail, invalid type +ALTER TABLE list_hash_sales DROP PARTITION FOR (10); +ERROR: value too long for type character(1) +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION FOR('X', 6); +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--check for ok after drop +SELECT count(*) FROM list_hash_sales; + count +------- + 500 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + channel1 | p | l | f | 0 | 1 | {0,1,2} + channel4 | p | l | f | 0 | 1 | {8,9} + channel5 | p | l | f | 0 | 1 | {X} + channel6 | p | l | f | 0 | 1 | {NULL} + list_hash_sales | r | l | f | 0 | 4 | + channel1_product1 | s | h | t | 0 | | {0} + channel1_product2 | s | h | t | 0 | | {1} + channel1_product3 | s | h | t | 0 | | {2} + channel1_product4 | s | h | t | 0 | | {3} + channel4_product1 | s | h | t | 0 | | {0} + channel5_product1 | s | h | t | 0 | | {0} + channel5_product2 | s | h | t | 0 | | {1} + channel5_product3 | s | h | t | 0 | | {2} + channel5_product4 | s | h | t | 0 | | {3} + channel6_subpartdefault1 | s | h | t | 0 | | {0} +(15 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + channel1_product1_product_id_idx | x | n | t | t + channel1_product2_product_id_idx | x | n | t | t + channel1_product3_product_id_idx | x | n | t | t + channel1_product4_product_id_idx | x | n | t | t + channel4_product1_product_id_idx | x | n | t | t + channel5_product1_product_id_idx | x | n | t | t + channel5_product2_product_id_idx | x | n | t | t + channel5_product3_product_id_idx | x | n | t | t + channel5_product4_product_id_idx | x | n | t | t + channel6_subpartdefault1_product_id_idx | x | n | t | t +(10 rows) + +\d+ list_hash_sales + Table "ustore_subpartition_add_drop_partition.list_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "list_hash_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default UNUSABLE + "list_hash_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By LIST(channel_id) Subpartition By HASH(product_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 10 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +-- +----hash-range table---- +-- +--prepare +CREATE TABLE hash_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (product_id) SUBPARTITION BY RANGE (customer_id) +( + PARTITION product1 + ( + SUBPARTITION product1_customer1 VALUES LESS THAN (200), + SUBPARTITION product1_customer2 VALUES LESS THAN (500), + SUBPARTITION product1_customer3 VALUES LESS THAN (800), + SUBPARTITION product1_customer4 VALUES LESS THAN (1200) + ), + PARTITION product2 + ( + SUBPARTITION product2_customer1 VALUES LESS THAN (500), + SUBPARTITION product2_customer2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_customer1 VALUES LESS THAN (1200) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "hash_range_sales_pkey" for table "hash_range_sales" +INSERT INTO hash_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_range_sales_idx ON hash_range_sales(product_id) LOCAL; +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_range_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_customer1 VALUES LESS THAN (200), + SUBPARTITION product_temp1_customer2 VALUES LESS THAN (500), + SUBPARTITION product_temp1_customer3 VALUES LESS THAN (800), + SUBPARTITION product_temp1_customer4 VALUES LESS THAN (1200) + ); +ERROR: syntax error at or near "(" +LINE 2: ( + ^ +--fail, not support add hash +ALTER TABLE hash_range_sales ADD PARTITION product_temp2; +ERROR: syntax error at or near ";" +LINE 1: ALTER TABLE hash_range_sales ADD PARTITION product_temp2; + ^ +--success, add 1 subpartition +ALTER TABLE hash_range_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_customer5 VALUES LESS THAN (1800); +--fail, out of range +ALTER TABLE hash_range_sales MODIFY PARTITION product2 ADD SUBPARTITION product2_temp1 VALUES LESS THAN (1800); +ERROR: upper boundary of adding partition MUST overtop last existing partition +--fail, invalid format +ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES (DEFAULT); +ERROR: can not add none-range partition to range partition table +--success, add 1 subpartition +ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_customer2 VALUES LESS THAN (MAXVALUE); +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + product1 | p | h | f | 0 | 2 | {0} + product2 | p | h | f | 0 | 2 | {1} + product3 | p | h | f | 0 | 2 | {2} + product4 | p | h | f | 0 | 2 | {3} + hash_range_sales | r | h | f | 0 | 1 | + product1_customer1 | s | r | t | 0 | | {200} + product1_customer2 | s | r | t | 0 | | {500} + product1_customer3 | s | r | t | 0 | | {800} + product1_customer4 | s | r | t | 0 | | {1200} + product1_customer5 | s | r | t | 0 | | {1800} + product2_customer1 | s | r | t | 0 | | {500} + product2_customer2 | s | r | t | 0 | | {NULL} + product3_subpartdefault1 | s | r | t | 0 | | {NULL} + product4_customer1 | s | r | t | 0 | | {1200} + product4_customer2 | s | r | t | 0 | | {NULL} +(15 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_customer1_product_id_idx | x | n | t | t + product1_customer2_product_id_idx | x | n | t | t + product1_customer3_product_id_idx | x | n | t | t + product1_customer4_product_id_idx | x | n | t | t + product1_customer5_product_id_idx | x | n | t | t + product2_customer1_product_id_idx | x | n | t | t + product2_customer2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_customer1_product_id_idx | x | n | t | t + product4_customer2_product_id_idx | x | n | t | t +(10 rows) + +\d+ hash_range_sales + Table "ustore_subpartition_add_drop_partition.hash_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_range_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default + "hash_range_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By RANGE(customer_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 10 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION product2; +ERROR: Droping hash partition is unsupported. +--success, drop subpartition product1_customer1 +ALTER TABLE hash_range_sales DROP SUBPARTITION product1_customer1; +--success, drop subpartition product4_customer1 +ALTER TABLE hash_range_sales DROP SUBPARTITION product4_customer1; +--fail, the only subpartition in product4 +ALTER TABLE hash_range_sales DROP SUBPARTITION product4_customer2; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION FOR(0); +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION FOR(0, 100); +ERROR: Droping hash partition is unsupported. +--fail, number not equal to the number of partkey +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR('2010-01-01', 100); +ERROR: invalid input syntax for integer: "2010-01-01" +--success, drop subpartition product1_customer2, but not suggest to do this operation +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0, 100); +--fail, no subpartition find +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0, 2300); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM hash_range_sales; + count +------- + 628 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + product1 | p | h | f | 0 | 2 | {0} + product2 | p | h | f | 0 | 2 | {1} + product3 | p | h | f | 0 | 2 | {2} + product4 | p | h | f | 0 | 2 | {3} + hash_range_sales | r | h | f | 0 | 1 | + product1_customer3 | s | r | t | 0 | | {800} + product1_customer4 | s | r | t | 0 | | {1200} + product1_customer5 | s | r | t | 0 | | {1800} + product2_customer1 | s | r | t | 0 | | {500} + product2_customer2 | s | r | t | 0 | | {NULL} + product3_subpartdefault1 | s | r | t | 0 | | {NULL} + product4_customer2 | s | r | t | 0 | | {NULL} +(12 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_customer3_product_id_idx | x | n | t | t + product1_customer4_product_id_idx | x | n | t | t + product1_customer5_product_id_idx | x | n | t | t + product2_customer1_product_id_idx | x | n | t | t + product2_customer2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_customer2_product_id_idx | x | n | t | t +(7 rows) + +\d+ hash_range_sales + Table "ustore_subpartition_add_drop_partition.hash_range_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_range_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default UNUSABLE + "hash_range_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By RANGE(customer_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 7 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +-- +----hash-list table---- +-- +--prepare +CREATE TABLE hash_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (product_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION product1 + ( + SUBPARTITION product1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION product1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION product1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION product1_channel4 VALUES ('9') + ), + PARTITION product2 + ( + SUBPARTITION product2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION product2_channel2 VALUES (DEFAULT) + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "hash_list_sales_pkey" for table "hash_list_sales" +INSERT INTO hash_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_list_sales_idx ON hash_list_sales(product_id) LOCAL; +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_list_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION product_temp1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION product_temp1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION product_temp1_channel4 VALUES ('9') + ); +ERROR: syntax error at or near "(" +LINE 2: ( + ^ +--fail, not support add hash +ALTER TABLE hash_list_sales ADD PARTITION product_temp2; +ERROR: syntax error at or near ";" +LINE 1: ALTER TABLE hash_list_sales ADD PARTITION product_temp2; + ^ +--success, add 1 subpartition +ALTER TABLE hash_list_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_channel5 VALUES ('X'); +--fail, out of range +ALTER TABLE hash_list_sales MODIFY PARTITION product2 ADD SUBPARTITION product2_temp1 VALUES ('X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, out of range +ALTER TABLE hash_list_sales MODIFY PARTITION product3 ADD SUBPARTITION product3_temp1 VALUES ('X'); +ERROR: list boundary of adding partition MUST NOT overlap with existing partition +--fail, invalid format +ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES LESS THAN (MAXVALUE); +ERROR: can not add none-list partition to list partition table +--success, add 1 subpartition +ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_channel2 VALUES (DEFAULT); +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+----------------------- + product1 | p | h | f | 0 | 4 | {0} + product2 | p | h | f | 0 | 4 | {1} + product3 | p | h | f | 0 | 4 | {2} + product4 | p | h | f | 0 | 4 | {3} + hash_list_sales | r | h | f | 0 | 1 | + product1_channel1 | s | l | t | 0 | | {0,1,2} + product1_channel2 | s | l | t | 0 | | {3,4,5} + product1_channel3 | s | l | t | 0 | | {6,7,8} + product1_channel4 | s | l | t | 0 | | {9} + product1_channel5 | s | l | t | 0 | | {X} + product2_channel1 | s | l | t | 0 | | {0,1,2,3,4} + product2_channel2 | s | l | t | 0 | | {NULL} + product3_subpartdefault1 | s | l | t | 0 | | {NULL} + product4_channel1 | s | l | t | 0 | | {0,1,2,3,4,5,6,7,8,9} + product4_channel2 | s | l | t | 0 | | {NULL} +(15 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_channel1_product_id_idx | x | n | t | t + product1_channel2_product_id_idx | x | n | t | t + product1_channel3_product_id_idx | x | n | t | t + product1_channel4_product_id_idx | x | n | t | t + product1_channel5_product_id_idx | x | n | t | t + product2_channel1_product_id_idx | x | n | t | t + product2_channel2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_channel1_product_id_idx | x | n | t | t + product4_channel2_product_id_idx | x | n | t | t +(10 rows) + +\d+ hash_list_sales + Table "ustore_subpartition_add_drop_partition.hash_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_list_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default + "hash_list_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By LIST(channel_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 10 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION product2; +ERROR: Droping hash partition is unsupported. +--success, drop subpartition product1_channel1 +ALTER TABLE hash_list_sales DROP SUBPARTITION product1_channel1; +--success, drop subpartition product4_channel1 +ALTER TABLE hash_list_sales DROP SUBPARTITION product4_channel1; +--fail, the only subpartition in product4 +ALTER TABLE hash_list_sales DROP SUBPARTITION product4_channel2; +ERROR: Cannot drop the only subpartition of a partitioned table +DETAIL: N/A +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION FOR(0); +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION FOR(0, '4'); +ERROR: Droping hash partition is unsupported. +--fail, number not equal to the number of partkey +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0); +ERROR: Number of boundary items NOT EQUAL to number of partition keys +DETAIL: There must be 2 boundary items for DROP SUBPARTITION in a subpartitioned table +--fail, invalid type +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR('2010-01-01', '4'); +ERROR: invalid input syntax for integer: "2010-01-01" +--success, drop subpartition product1_channel2, but not suggest to do this operation +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0, '4'); +--fail, no subpartition find +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0, 'Z'); +ERROR: The subpartition number is invalid or out-of-range +DETAIL: N/A +--check for ok after drop +SELECT count(*) FROM hash_list_sales; + count +------- + 608 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------- + product1 | p | h | f | 0 | 4 | {0} + product2 | p | h | f | 0 | 4 | {1} + product3 | p | h | f | 0 | 4 | {2} + product4 | p | h | f | 0 | 4 | {3} + hash_list_sales | r | h | f | 0 | 1 | + product1_channel3 | s | l | t | 0 | | {6,7,8} + product1_channel4 | s | l | t | 0 | | {9} + product1_channel5 | s | l | t | 0 | | {X} + product2_channel1 | s | l | t | 0 | | {0,1,2,3,4} + product2_channel2 | s | l | t | 0 | | {NULL} + product3_subpartdefault1 | s | l | t | 0 | | {NULL} + product4_channel2 | s | l | t | 0 | | {NULL} +(12 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_channel3_product_id_idx | x | n | t | t + product1_channel4_product_id_idx | x | n | t | t + product1_channel5_product_id_idx | x | n | t | t + product2_channel1_product_id_idx | x | n | t | t + product2_channel2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_channel2_product_id_idx | x | n | t | t +(7 rows) + +\d+ hash_list_sales + Table "ustore_subpartition_add_drop_partition.hash_list_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_list_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default UNUSABLE + "hash_list_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By LIST(channel_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 7 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +-- +----hash-hash table---- +-- +--prepare +CREATE TABLE hash_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (product_id) SUBPARTITION BY HASH (customer_id) +( + PARTITION product1 + ( + SUBPARTITION product1_customer1, + SUBPARTITION product1_customer2, + SUBPARTITION product1_customer3, + SUBPARTITION product1_customer4 + ), + PARTITION product2 + ( + SUBPARTITION product2_customer1, + SUBPARTITION product2_customer2 + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_customer1 + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "hash_hash_sales_pkey" for table "hash_hash_sales" +INSERT INTO hash_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_hash_sales_idx ON hash_hash_sales(product_id) LOCAL; +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_hash_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_customer1, + SUBPARTITION product_temp1_customer2, + SUBPARTITION product_temp1_customer3, + SUBPARTITION product_temp1_customer4 + ); +ERROR: syntax error at or near "(" +LINE 2: ( + ^ +--fail, not support add hash +ALTER TABLE hash_hash_sales ADD PARTITION product_temp2; +ERROR: syntax error at or near ";" +LINE 1: ALTER TABLE hash_hash_sales ADD PARTITION product_temp2; + ^ +--fail, not support add hash +ALTER TABLE hash_hash_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_temp1; +ERROR: syntax error at or near ";" +LINE 1: ...s MODIFY PARTITION product1 ADD SUBPARTITION product1_temp1; + ^ +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + product1 | p | h | f | 0 | 2 | {0} + product2 | p | h | f | 0 | 2 | {1} + product3 | p | h | f | 0 | 2 | {2} + product4 | p | h | f | 0 | 2 | {3} + hash_hash_sales | r | h | f | 0 | 1 | + product1_customer1 | s | h | t | 0 | | {0} + product1_customer2 | s | h | t | 0 | | {1} + product1_customer3 | s | h | t | 0 | | {2} + product1_customer4 | s | h | t | 0 | | {3} + product2_customer1 | s | h | t | 0 | | {0} + product2_customer2 | s | h | t | 0 | | {1} + product3_subpartdefault1 | s | h | t | 0 | | {0} + product4_customer1 | s | h | t | 0 | | {0} +(13 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_customer1_product_id_idx | x | n | t | t + product1_customer2_product_id_idx | x | n | t | t + product1_customer3_product_id_idx | x | n | t | t + product1_customer4_product_id_idx | x | n | t | t + product2_customer1_product_id_idx | x | n | t | t + product2_customer2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_customer1_product_id_idx | x | n | t | t +(8 rows) + +\d+ hash_hash_sales + Table "ustore_subpartition_add_drop_partition.hash_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_hash_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default + "hash_hash_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By HASH(customer_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 8 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION product2; +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION product1_customer1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION product4_customer1; +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION FOR(0); +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION FOR(0, 0); +ERROR: Droping hash partition is unsupported. +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION FOR(0, 0); +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION FOR(0); +ERROR: Un-support feature +DETAIL: The syntax is unsupported for hash subpartition +--check for ok after drop +SELECT count(*) FROM hash_hash_sales; + count +------- + 1000 +(1 row) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; + relname | parttype | partstrategy | hasfilenode | reltablespace | partkey | boundaries +--------------------------+----------+--------------+-------------+---------------+---------+------------ + product1 | p | h | f | 0 | 2 | {0} + product2 | p | h | f | 0 | 2 | {1} + product3 | p | h | f | 0 | 2 | {2} + product4 | p | h | f | 0 | 2 | {3} + hash_hash_sales | r | h | f | 0 | 1 | + product1_customer1 | s | h | t | 0 | | {0} + product1_customer2 | s | h | t | 0 | | {1} + product1_customer3 | s | h | t | 0 | | {2} + product1_customer4 | s | h | t | 0 | | {3} + product2_customer1 | s | h | t | 0 | | {0} + product2_customer2 | s | h | t | 0 | | {1} + product3_subpartdefault1 | s | h | t | 0 | | {0} + product4_customer1 | s | h | t | 0 | | {0} +(13 rows) + +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; + relname | parttype | partstrategy | hasfilenode | indisusable +-----------------------------------------+----------+--------------+-------------+------------- + product1_customer1_product_id_idx | x | n | t | t + product1_customer2_product_id_idx | x | n | t | t + product1_customer3_product_id_idx | x | n | t | t + product1_customer4_product_id_idx | x | n | t | t + product2_customer1_product_id_idx | x | n | t | t + product2_customer2_product_id_idx | x | n | t | t + product3_subpartdefault1_product_id_idx | x | n | t | t + product4_customer1_product_id_idx | x | n | t | t +(8 rows) + +\d+ hash_hash_sales + Table "ustore_subpartition_add_drop_partition.hash_hash_sales" + Column | Type | Modifiers | Storage | Stats target | Description +---------------+--------------------------------+-----------+----------+--------------+------------- + product_id | integer | not null | plain | | + customer_id | integer | not null | plain | | + time_id | timestamp(0) without time zone | | plain | | + channel_id | character(1) | | extended | | + type_id | integer | | plain | | + quantity_sold | numeric(3,0) | | main | | + amount_sold | numeric(10,2) | | main | | +Indexes: + "hash_hash_sales_pkey" PRIMARY KEY, ubtree (customer_id) WITH (storage_type=USTORE) TABLESPACE pg_default + "hash_hash_sales_idx" ubtree (product_id) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By HASH(product_id) Subpartition By HASH(customer_id) +Number of partitions: 4 (View pg_partition to check each partition range.) +Number of subpartitions: 8 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +--finish +DROP TABLE range_range_sales; +DROP TABLE range_list_sales; +DROP TABLE range_hash_sales; +DROP TABLE list_range_sales; +DROP TABLE list_list_sales; +DROP TABLE list_hash_sales; +DROP TABLE hash_range_sales; +DROP TABLE hash_list_sales; +DROP TABLE hash_hash_sales; +DROP SCHEMA ustore_subpartition_add_drop_partition CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/ustore_subpartition_alter_table.out b/src/test/regress/expected/ustore_subpartition_alter_table.out new file mode 100644 index 000000000..4b0301a08 --- /dev/null +++ b/src/test/regress/expected/ustore_subpartition_alter_table.out @@ -0,0 +1,183 @@ +DROP SCHEMA ustore_subpartition_alter_table CASCADE; +ERROR: schema "ustore_subpartition_alter_table" does not exist +CREATE SCHEMA ustore_subpartition_alter_table; +SET CURRENT_SCHEMA TO ustore_subpartition_alter_table; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +--change column type +alter table range_range alter column user_no set data type char(30); +alter table range_range alter column sales_amt set data type varchar; +\d+ range_range + Table "ustore_subpartition_alter_table.range_range" + Column | Type | Modifiers | Storage | Stats target | Description +------------+-----------------------+-----------+----------+--------------+------------- + month_code | character varying(30) | not null | extended | | + dept_code | character varying(30) | not null | extended | | + user_no | character(30) | | extended | | + sales_amt | character varying | | extended | | +Indexes: + "range_range_pkey" PRIMARY KEY, ubtree (month_code, dept_code) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By RANGE(month_code) Subpartition By RANGE(dept_code) +Number of partitions: 2 (View pg_partition to check each partition range.) +Number of subpartitions: 4 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, storage_type=ustore, compression=no + +-- rename +alter table range_range rename to hahahahahah; +ERROR: Un-support feature +DETAIL: For subpartition table, ALTER TABLE ... RENAME is not yet supported. +alter table range_range rename partition p_201901 to hahahahahah; +ERROR: Un-support feature +DETAIL: For subpartition table, ALTER TABLE ... RENAME PARTITION/SUBPARTITION is not yet supported. +alter table range_range rename partition p_201901_a to hahahahahah; +ERROR: Un-support feature +DETAIL: For subpartition table, ALTER TABLE ... RENAME PARTITION/SUBPARTITION is not yet supported. +--cluster +create index idx_range_range on range_range(month_code,user_no); +alter table range_range cluster on idx_range_range; +ERROR: cannot cluster a subpartition table +-- move tablespace +CREATE TABLESPACE example1 RELATIVE LOCATION 'tablespace1/tablespace_1'; +alter table range_range move PARTITION p_201901 tablespace example1; +ERROR: Un-support feature +DETAIL: For subpartition table, modifying tablespace is not yet supported. +alter table range_range move PARTITION p_201901_a tablespace example1; +ERROR: Un-support feature +DETAIL: For subpartition table, modifying tablespace is not yet supported. +DROP TABLESPACE example1; +-- merge +alter table range_range merge PARTITIONS p_201901 , p_201902 into PARTITION p_range_3; +ERROR: Un-support feature +DETAIL: For subpartition table, merge partitions is not yet supported. +alter table range_range merge SUBPARTITIONS p_201901 , p_201902 into PARTITION p_range_3; +ERROR: syntax error at or near "SUBPARTITIONS" +LINE 1: alter table range_range merge SUBPARTITIONS p_201901 , p_20... + ^ +-- exchange +CREATE TABLE ori +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (STORAGE_TYPE=USTORE); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "ori_pkey" for table "ori" +ALTER TABLE range_range EXCHANGE PARTITION (p_201901) WITH TABLE ori; +ERROR: Un-support feature +DETAIL: For subpartition table, exchange partition is not yet supported. +ALTER TABLE range_range EXCHANGE SUBPARTITION (p_201901) WITH TABLE ori; +ERROR: syntax error at or near "SUBPARTITION (" +LINE 1: ALTER TABLE range_range EXCHANGE SUBPARTITION (p_201901) WIT... + ^ +-- drop +alter table range_range drop partition p_201901; +alter table range_range drop partition p_201901_a; +ERROR: partition "p_201901_a" does not exist +alter table range_range drop subpartition p_201901_a; +ERROR: subpartition "p_201901_a" does not exist +-- add +alter table range_range add partition p_range_4 VALUES LESS THAN('201904'); +ERROR: upper boundary of adding partition MUST overtop last existing partition +-- split +alter table range_range split PARTITION p_201901 at (8) into ( PARTITION add_p_01 , PARTITION add_p_02 ); +ERROR: Un-support feature +DETAIL: For subpartition table, split partition is not supported yet. +drop table ori; +drop table range_range; +CREATE TABLE IF NOT EXISTS range_range_02 +( + col_1 int , + col_2 int , + col_3 VARCHAR2 ( 30 ) NOT NULL , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( 10 ) + ) +) ENABLE ROW MOVEMENT; +create index on range_range_02(col_2) local; +alter table range_range_02 MODIFY PARTITION p_range_2 UNUSABLE LOCAL INDEXES; +ERROR: Un-support feature +DETAIL: For subpartition table, UNUSABLE LOCAL INDEXES is not yet supported. +alter table range_range_02 MODIFY PARTITION p_range_2 REBUILD UNUSABLE LOCAL INDEXES; +ERROR: Un-support feature +DETAIL: For subpartition table, REBUILD UNUSABLE LOCAL INDEXES is not yet supported. +alter table range_range_02 alter col_1 type char; +ERROR: cannot alter data type of partitioning column "col_1" +alter table range_range_02 alter col_2 type char; +ERROR: cannot alter data type of subpartitioning column "col_2" +drop table range_range_02; +--validate constraint +CREATE TABLE hash_hash +( + col_1 int , + col_2 int NOT NULL , + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY hash (col_3) SUBPARTITION BY hash (col_2) +( + PARTITION p_hash_1 + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 , + SUBPARTITION p_hash_1_4 + ), + PARTITION p_hash_2 + ( + SUBPARTITION p_hash_2_1 , + SUBPARTITION p_hash_2_2 + ), + PARTITION p_hash_3, + PARTITION p_hash_4 + ( + SUBPARTITION p_hash_4_1 + ), + PARTITION p_hash_5 +); +INSERT INTO hash_hash VALUES(null,1,1,1); +alter table hash_hash add constraint con_hash_hash check(col_1 is not null) NOT VALID ; +INSERT INTO hash_hash VALUES(null,2,1,1); --error +ERROR: new row for relation "hash_hash" violates check constraint "con_hash_hash" +DETAIL: N/A +INSERT INTO hash_hash VALUES(1,3,1,1); --success +alter table hash_hash VALIDATE CONSTRAINT con_hash_hash; --error +ERROR: check constraint "con_hash_hash" is violated by some row +delete from hash_hash where col_1 is null; +alter table hash_hash VALIDATE CONSTRAINT con_hash_hash; --success +drop table hash_hash cascade; +-- clean +DROP SCHEMA ustore_subpartition_alter_table CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/ustore_subpartition_analyze_vacuum.out b/src/test/regress/expected/ustore_subpartition_analyze_vacuum.out new file mode 100644 index 000000000..8c0572193 --- /dev/null +++ b/src/test/regress/expected/ustore_subpartition_analyze_vacuum.out @@ -0,0 +1,63 @@ +-- prepare +DROP SCHEMA ustore_subpartition_analyze_vacuum CASCADE; +ERROR: schema "ustore_subpartition_analyze_vacuum" does not exist +CREATE SCHEMA ustore_subpartition_analyze_vacuum; +SET CURRENT_SCHEMA TO ustore_subpartition_analyze_vacuum; +-- base function +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 2 | 1 +(6 rows) + +delete from range_list where month_code = '201902'; +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 2 | 1 +(3 rows) + +analyze range_list; +analyze range_list partition (p_201901); +vacuum range_list; +vacuum range_list partition (p_201901); +drop table range_list; +-- clean +DROP SCHEMA ustore_subpartition_analyze_vacuum CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/ustore_subpartition_createtable.out b/src/test/regress/expected/ustore_subpartition_createtable.out new file mode 100644 index 000000000..c1d76a4c5 --- /dev/null +++ b/src/test/regress/expected/ustore_subpartition_createtable.out @@ -0,0 +1,1550 @@ +--1.create table +--list_list list_hash list_range range_list range_hash range_range +--prepare +DROP SCHEMA ustore_subpartition_createtable CASCADE; +ERROR: schema "ustore_subpartition_createtable" does not exist +CREATE SCHEMA ustore_subpartition_createtable; +SET CURRENT_SCHEMA TO ustore_subpartition_createtable; +--1.1 normal table +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(6 rows) + +drop table list_list; +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into list_hash values('201902', '1', '1', 1); +insert into list_hash values('201902', '2', '1', 1); +insert into list_hash values('201902', '3', '1', 1); +insert into list_hash values('201903', '4', '1', 1); +insert into list_hash values('201903', '5', '1', 1); +insert into list_hash values('201903', '6', '1', 1); +select * from list_hash; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 4 | 1 | 1 + 201903 | 5 | 1 | 1 + 201903 | 6 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201902 | 1 | 1 | 1 +(6 rows) + +drop table list_hash; +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('6') + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +insert into list_range values('201902', '1', '1', 1); +insert into list_range values('201902', '2', '1', 1); +insert into list_range values('201902', '3', '1', 1); +insert into list_range values('201903', '4', '1', 1); +insert into list_range values('201903', '5', '1', 1); +insert into list_range values('201903', '6', '1', 1); +ERROR: inserted partition key does not map to any table partition +select * from list_range; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 4 | 1 | 1 + 201903 | 5 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 +(5 rows) + +drop table list_range; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +drop table range_list; +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201902', '2', '1', 1); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +insert into range_hash values('201903', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +select * from range_hash; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +drop table range_hash; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +select * from range_range; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +drop table range_range; +CREATE TABLE hash_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY hash (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into hash_list values('201901', '1', '1', 1); +insert into hash_list values('201901', '2', '1', 1); +insert into hash_list values('201901', '1', '1', 1); +insert into hash_list values('201903', '2', '1', 1); +insert into hash_list values('201903', '1', '1', 1); +insert into hash_list values('201903', '2', '1', 1); +select * from hash_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201901 | 2 | 1 | 1 + 201901 | 1 | 1 | 1 + 201901 | 1 | 1 | 1 +(6 rows) + +drop table hash_list; +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY hash (month_code) SUBPARTITION BY hash (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into hash_hash values('201901', '1', '1', 1); +insert into hash_hash values('201901', '2', '1', 1); +insert into hash_hash values('201901', '1', '1', 1); +insert into hash_hash values('201903', '2', '1', 1); +insert into hash_hash values('201903', '1', '1', 1); +insert into hash_hash values('201903', '2', '1', 1); +select * from hash_hash; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201901 | 2 | 1 | 1 + 201901 | 1 | 1 | 1 + 201901 | 1 | 1 | 1 +(6 rows) + +drop table hash_hash; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY hash (month_code) SUBPARTITION BY range (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES LESS THAN ( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN ( '3' ) + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES LESS THAN ( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN ( '3' ) + ) +); +insert into hash_range values('201901', '1', '1', 1); +insert into hash_range values('201901', '2', '1', 1); +insert into hash_range values('201901', '1', '1', 1); +insert into hash_range values('201903', '2', '1', 1); +insert into hash_range values('201903', '1', '1', 1); +insert into hash_range values('201903', '2', '1', 1); +select * from hash_range; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201901 | 1 | 1 | 1 + 201901 | 1 | 1 | 1 + 201901 | 2 | 1 | 1 +(6 rows) + +drop table hash_range; +--1.2 table with default subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_list; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_list; +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table list_hash; +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_hash; +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_hash; +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +drop table list_range; +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('6') + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_range; +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_range; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +drop table range_list; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_list; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_list; +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table range_hash; +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_hash; +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_hash; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +drop table hash_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 +); +drop table hash_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES( '2' ), + SUBPARTITION p_201902_b VALUES( '3' ) + ) +); +drop table hash_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES( '2' ), + SUBPARTITION p_201901_b VALUES( '3' ) + ), + PARTITION p_201902 +); +drop table hash_range; +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_range; +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table hash_hash; +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 +); +drop table hash_hash; +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_hash; +--1.3 subpartition name check +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_a VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: duplicate subpartition name: "p_201901_a" +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: duplicate subpartition name: "p_201901_a" +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901 VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: duplicate subpartition name: "p_201901" +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201901_subpartdefault1 VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; +--1.4 subpartition key check +-- 一级分区和二级分区分区键是同一列 +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: The two partition keys of a subpartition partition table are the same. +DETAIL: N/A +--二级分区的键值一样 +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '1' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: list partition p_201901_a and p_201901_b has overlapped value +--分区列不存在 +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_codeXXX) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: undefined column"month_codexxx" is used as a partitioning column +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_codeXXX) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +ERROR: undefined column"dept_codexxx" is used as a partitioning column +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('4') + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +ERROR: partition bound of partition "p_201901_b" is too low +drop table list_range; +ERROR: table "list_range" does not exist +--1.5 list subpartition whith default +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( default ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list partition (p_201901); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(3 rows) + +select * from list_list partition (p_201902); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +drop table list_list; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( default ) + ) +); +drop table list_list; +--1.6 declaration and definition of the subpatiiton type are same. +--error +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY hash (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( default ) + ) +); +ERROR: The syntax format of subpartition is incorrect, the declaration and definition of the subpartition do not match. +DETAIL: The syntax format of subpartition p_201901_a is incorrect. +--1.7 add constraint +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +alter table range_range add constraint constraint_check CHECK (sales_amt IS NOT NULL); +insert into range_range values(1,1,1); +ERROR: new row for relation "range_range" violates check constraint "constraint_check" +DETAIL: N/A +drop table range_range; +-- drop partition column +CREATE TABLE range_hash_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( -10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ), + PARTITION p_range_3 VALUES LESS THAN( 30) + ( + SUBPARTITION p_hash_3_1 , + SUBPARTITION p_hash_3_2 , + SUBPARTITION p_hash_3_3 + ), + PARTITION p_range_4 VALUES LESS THAN( 50) + ( + SUBPARTITION p_hash_4_1 , + SUBPARTITION p_hash_4_2 , + SUBPARTITION range_hash_02 + ), + PARTITION p_range_5 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; +alter table range_hash_02 drop column col_1; +ERROR: cannot drop partitioning column "col_1" +alter table range_hash_02 drop column col_2; +ERROR: cannot drop partitioning column "col_2" +drop table range_hash_02; +--1.8 SET ROW MOVEMENT +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1', '2' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1', '2' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +alter table list_list disable ROW MOVEMENT; +insert into list_list values('201902', '1', '1', 1); +update list_list set month_code = '201903'; +ERROR: fail to update partitioned table "list_list" +DETAIL: disable row movement +update list_list set dept_code = '3'; +ERROR: fail to update partitioned table "list_list" +DETAIL: disable row movement +alter table list_list enable ROW MOVEMENT; +update list_list set month_code = '201903'; +update list_list set dept_code = '3'; +drop table list_list; +--1.9 without subpartition declaration +create table test(a int) WITH (STORAGE_TYPE=USTORE) +partition by range(a) +( +partition p1 values less than(100) +( +subpartition subp1 values less than(50), +subpartition subp2 values less than(100) +), +partition p2 values less than(200), +partition p3 values less than(maxvalue) +); +ERROR: The syntax format of subpartition is incorrect, missing declaration of subpartition. +DETAIL: N/A +--1.10 create table like +CREATE TABLE range_range +( + col_1 int primary key, + col_2 int NOT NULL , + col_3 VARCHAR2 ( 30 ) NOT NULL , + col_4 int generated always as(2*col_2) stored , + check (col_4 >= col_2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( 10 ) + ) +) ENABLE ROW MOVEMENT; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +CREATE TABLE range_range_02 (like range_range INCLUDING ALL ); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_02_pkey" for table "range_range_02" +drop table range_range; +--ROW LEVEL SECURITY POLICY +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +CREATE ROW LEVEL SECURITY POLICY range_range_rls ON range_range USING(user_no = CURRENT_USER); +ERROR: Un-support feature +DETAIL: Do not support row level security policy on subpartition table. +drop table range_range; +-- 账本数据库 +CREATE SCHEMA ledgernsp WITH BLOCKCHAIN; +CREATE TABLE ledgernsp.range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +ERROR: Un-support feature +DETAIL: Subpartition table does not support ledger user table. +DROP SCHEMA ledgernsp; +-- create table as +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +) ENABLE ROW MOVEMENT; +insert into range_range values(201902,1,1,1),(201902,1,1,1),(201902,3,1,1),(201903,1,1,1),(201903,2,1,1),(201903,2,1,1); +select * from range_range subpartition(p_201901_a) where month_code in(201902,201903) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +create table range_range_copy WITH (STORAGE_TYPE=USTORE) as select * from range_range subpartition(p_201901_a) where month_code in(201902,201903); +select * from range_range_copy order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +drop table range_range; +drop table range_range_copy; +--1.11 create index +create table range_range_03 +( + c_int int, + c_char1 char(3000), + c_char2 char(5000), + c_char3 char(6000), + c_varchar1 varchar(3000), + c_varchar2 varchar(5000), + c_varchar3 varchar, + c_varchar4 varchar, + c_text1 text, + c_text2 text, + c_text3 text, + c int, + primary key(c,c_int) +) with (parallel_workers=10, STORAGE_TYPE=USTORE) +partition by range (c_int) subpartition by range (c_char1) +( + partition p1 values less than(50) + ( + subpartition p1_1 values less than('c'), + subpartition p1_2 values less than(maxvalue) + ), + partition p2 values less than(100) + ( + subpartition p2_1 values less than('c'), + subpartition p2_2 values less than(maxvalue) + ), + partition p3 values less than(150) + ( + subpartition p3_1 values less than('c'), + subpartition p3_2 values less than(maxvalue) + ), + partition p4 values less than(200) + ( + subpartition p4_1 values less than('c'), + subpartition p4_2 values less than(maxvalue) + ), + partition p5 values less than(maxvalue)( + subpartition p5_1 values less than('c'), + subpartition p5_2 values less than(maxvalue) + ) +) enable row movement; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_03_pkey" for table "range_range_03" +create index range_range_03_idx1 on range_range_03 (c_varchar1) local; --success +create index range_range_03_idx2 on range_range_03 (c_varchar2) local ( + partition cpt7_p1, + partition cpt7_p2, + partition cpt7_p3, + partition cpt7_p4, + partition cpt7_p5 +); --failed +ERROR: Cannot match subpartitions when create subpartition indexes. +create index range_range_03_idx3 on range_range_03 (c_varchar3); --success, default global +create index range_range_03_idx4 on range_range_03 (c_varchar4) global; --success +create index range_range_03_idx5 on range_range_03 (c_varchar4) local; --failed, can not be same column with global index +ERROR: Global and local partition index should not be on same column +\d+ range_range_03 + Table "ustore_subpartition_createtable.range_range_03" + Column | Type | Modifiers | Storage | Stats target | Description +------------+-------------------------+-----------+----------+--------------+------------- + c_int | integer | not null | plain | | + c_char1 | character(3000) | | extended | | + c_char2 | character(5000) | | extended | | + c_char3 | character(6000) | | extended | | + c_varchar1 | character varying(3000) | | extended | | + c_varchar2 | character varying(5000) | | extended | | + c_varchar3 | character varying | | extended | | + c_varchar4 | character varying | | extended | | + c_text1 | text | | extended | | + c_text2 | text | | extended | | + c_text3 | text | | extended | | + c | integer | not null | plain | | +Indexes: + "range_range_03_pkey" PRIMARY KEY, ubtree (c, c_int) WITH (storage_type=USTORE) TABLESPACE pg_default + "range_range_03_idx1" ubtree (c_varchar1) LOCAL WITH (storage_type=USTORE) TABLESPACE pg_default + "range_range_03_idx3" ubtree (c_varchar3) WITH (storage_type=USTORE) TABLESPACE pg_default + "range_range_03_idx4" ubtree (c_varchar4) WITH (storage_type=USTORE) TABLESPACE pg_default +Partition By RANGE(c_int) Subpartition By RANGE(c_char1) +Number of partitions: 5 (View pg_partition to check each partition range.) +Number of subpartitions: 10 (View pg_partition to check each subpartition range.) +Has OIDs: no +Options: orientation=row, parallel_workers=10, storage_type=ustore, compression=no + +select pg_get_tabledef('range_range_03'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_createtable; + + CREATE TABLE range_range_03 ( + + c_int integer NOT NULL, + + c_char1 character(3000), + + c_char2 character(5000), + + c_char3 character(6000), + + c_varchar1 character varying(3000), + + c_varchar2 character varying(5000), + + c_varchar3 character varying, + + c_varchar4 character varying, + + c_text1 text, + + c_text2 text, + + c_text3 text, + + c integer NOT NULL + + ) + + WITH (orientation=row, parallel_workers=10, storage_type=ustore, compression=no) + + PARTITION BY RANGE (c_int) SUBPARTITION BY RANGE (c_char1) + + ( + + PARTITION p1 VALUES LESS THAN (50) TABLESPACE pg_default + + ( + + SUBPARTITION p1_1 VALUES LESS THAN ('c') TABLESPACE pg_default, + + SUBPARTITION p1_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ), + + PARTITION p2 VALUES LESS THAN (100) TABLESPACE pg_default + + ( + + SUBPARTITION p2_1 VALUES LESS THAN ('c') TABLESPACE pg_default, + + SUBPARTITION p2_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ), + + PARTITION p3 VALUES LESS THAN (150) TABLESPACE pg_default + + ( + + SUBPARTITION p3_1 VALUES LESS THAN ('c') TABLESPACE pg_default, + + SUBPARTITION p3_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ), + + PARTITION p4 VALUES LESS THAN (200) TABLESPACE pg_default + + ( + + SUBPARTITION p4_1 VALUES LESS THAN ('c') TABLESPACE pg_default, + + SUBPARTITION p4_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ), + + PARTITION p5 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ( + + SUBPARTITION p5_1 VALUES LESS THAN ('c') TABLESPACE pg_default, + + SUBPARTITION p5_2 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; + + CREATE INDEX range_range_03_idx4 ON range_range_03 USING ubtree (c_varchar4) WITH (storage_type=USTORE) TABLESPACE pg_default;+ + CREATE INDEX range_range_03_idx3 ON range_range_03 USING ubtree (c_varchar3) WITH (storage_type=USTORE) TABLESPACE pg_default;+ + CREATE INDEX range_range_03_idx1 ON range_range_03 USING ubtree (c_varchar1) LOCAL + + ( + + PARTITION p1_1_c_varchar1_idx, + + PARTITION p1_2_c_varchar1_idx, + + PARTITION p2_1_c_varchar1_idx, + + PARTITION p2_2_c_varchar1_idx, + + PARTITION p3_1_c_varchar1_idx, + + PARTITION p3_2_c_varchar1_idx, + + PARTITION p4_1_c_varchar1_idx, + + PARTITION p4_2_c_varchar1_idx, + + PARTITION p5_1_c_varchar1_idx, + + PARTITION p5_2_c_varchar1_idx + + ) WITH (storage_type=USTORE) TABLESPACE pg_default; + + ALTER TABLE range_range_03 ADD CONSTRAINT range_range_03_pkey PRIMARY KEY (c, c_int) WITH (storage_type=USTORE); +(1 row) + +drop table range_range_03; +--unique local index columns must contain the partition key +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +) ENABLE ROW MOVEMENT; +create unique index idx on range_range(month_code) local; +ERROR: unique local index columns must contain all the partition keys and collation must be default collation +create unique index idx1 on range_range(month_code, user_no) local; +ERROR: unique local index columns must contain all the partition keys and collation must be default collation +drop table range_range; +-- partkey has timestampwithzone type +drop table hash_range; +ERROR: table "hash_range" does not exist +CREATE TABLE hash_range +( + col_1 int PRIMARY KEY USING INDEX, + col_2 int NOT NULL , + col_3 int NOT NULL , + col_4 int, + col_19 TIMESTAMP WITH TIME ZONE +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (col_2) SUBPARTITION BY RANGE (col_19) +( partition p_hash_1 + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + partition p_hash_2, + PARTITION p_hash_3, + PARTITION p_hash_4, + PARTITION p_hash_5, + PARTITION p_hash_7 +) ENABLE ROW MOVEMENT; +ERROR: partition key value must be const or const-evaluable expression +CREATE TABLE hash_range +( + col_1 int PRIMARY KEY USING INDEX, + col_2 int NOT NULL , + col_3 int NOT NULL , + col_4 int, + col_19 TIMESTAMP WITH TIME ZONE +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (col_19) SUBPARTITION BY RANGE (col_2) +( partition p_hash_1 + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + partition p_hash_2, + PARTITION p_hash_3, + PARTITION p_hash_4, + PARTITION p_hash_5, + PARTITION p_hash_7 +) ENABLE ROW MOVEMENT; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "hash_range_pkey" for table "hash_range" +drop table hash_range; +--clean +DROP SCHEMA ustore_subpartition_createtable CASCADE; +NOTICE: drop cascades to table range_range_02 +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/ustore_subpartition_ddl_index.out b/src/test/regress/expected/ustore_subpartition_ddl_index.out new file mode 100644 index 000000000..00131865c --- /dev/null +++ b/src/test/regress/expected/ustore_subpartition_ddl_index.out @@ -0,0 +1,440 @@ +-- +----test index is Ok when use ddl grammer for subpartition---- +-- +DROP SCHEMA ustore_subpartition_ddl_index CASCADE; +ERROR: schema "ustore_subpartition_ddl_index" does not exist +CREATE SCHEMA ustore_subpartition_ddl_index; +SET CURRENT_SCHEMA TO ustore_subpartition_ddl_index; +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_indexonlyscan = ON; +SET enable_bitmapscan = OFF; +-- +--test for add/drop partition/subpartition +-- +--1. first, we create subpartitioned table, and index on the table +CREATE TABLE range_list_sales1 +( + product_id INT4, + customer_id INT4, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales1_idx1 ON range_list_sales1(product_id, customer_id) GLOBAL; +CREATE INDEX range_list_sales1_idx2 ON range_list_sales1(channel_id) GLOBAL; +CREATE INDEX range_list_sales1_idx3 ON range_list_sales1(customer_id) LOCAL; +CREATE INDEX range_list_sales1_idx4 ON range_list_sales1(time_id, type_id) LOCAL; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx1 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx2 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 4, Sub Iterations: 8 + -> Partitioned Index Only Scan using range_list_sales1_idx3 on range_list_sales1 + Selected Partitions: 1..4 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 4, Sub Iterations: 8 + -> Partitioned Index Only Scan using range_list_sales1_idx4 on range_list_sales1 + Selected Partitions: 1..4 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +--2. add partition/subpartition will not influence the index +ALTER TABLE range_list_sales1 ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer5_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer5_channel3 VALUES ('6', '7', '8') + ); +ALTER TABLE range_list_sales1 ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +ALTER TABLE range_list_sales1 MODIFY PARTITION customer5 ADD SUBPARTITION customer5_channel4 VALUES ('9'); +INSERT INTO range_list_sales1 SELECT generate_series(1001,2000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx1 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 2000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx2 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 2000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 6, Sub Iterations: 13 + -> Partitioned Index Only Scan using range_list_sales1_idx3 on range_list_sales1 + Selected Partitions: 1..6 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 2000 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 6, Sub Iterations: 13 + -> Partitioned Index Only Scan using range_list_sales1_idx4 on range_list_sales1 + Selected Partitions: 1..6 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 2000 +(1 row) + +--3. drop partition/subpartition update global index +ALTER TABLE range_list_sales1 DROP PARTITION customer3 UPDATE GLOBAL INDEX; +ALTER TABLE range_list_sales1 DROP PARTITION FOR (700) UPDATE GLOBAL INDEX; --customer4 +ALTER TABLE range_list_sales1 DROP SUBPARTITION FOR (700, '9') UPDATE GLOBAL INDEX; --customer5_channel4 +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx1 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx2 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 4, Sub Iterations: 10 + -> Partitioned Index Only Scan using range_list_sales1_idx3 on range_list_sales1 + Selected Partitions: 1..4 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 4, Sub Iterations: 10 + -> Partitioned Index Only Scan using range_list_sales1_idx4 on range_list_sales1 + Selected Partitions: 1..4 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +--4. if drop partition without update global index, the gpi will be invalid, we can rebuild the index +ALTER TABLE range_list_sales1 DROP PARTITION FOR (1600); +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +WARNING: Error hint: IndexOnlyScan(range_list_sales1 range_list_sales1_idx1), index "range_list_sales1_idx1" doesn't exist. + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 9 + -> Partitioned Seq Scan on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +ALTER INDEX range_list_sales1_idx1 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx1 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +WARNING: Error hint: IndexOnlyScan(range_list_sales1 range_list_sales1_idx2), index "range_list_sales1_idx2" doesn't exist. + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 9 + -> Partitioned Seq Scan on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +ALTER INDEX range_list_sales1_idx2 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx2 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 9 + -> Partitioned Index Only Scan using range_list_sales1_idx3 on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 9 + -> Partitioned Index Only Scan using range_list_sales1_idx4 on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +--5. if drop subpartition without update global index, the gpi will be invalid, we can rebuild the index +ALTER TABLE range_list_sales1 DROP SUBPARTITION customer5_channel3; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +WARNING: Error hint: IndexOnlyScan(range_list_sales1 range_list_sales1_idx1), index "range_list_sales1_idx1" doesn't exist. + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 8 + -> Partitioned Seq Scan on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +ALTER INDEX range_list_sales1_idx1 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx1 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +WARNING: Error hint: IndexOnlyScan(range_list_sales1 range_list_sales1_idx2), index "range_list_sales1_idx2" doesn't exist. + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 8 + -> Partitioned Seq Scan on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +ALTER INDEX range_list_sales1_idx2 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using range_list_sales1_idx2 on range_list_sales1 +(2 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 8 + -> Partitioned Index Only Scan using range_list_sales1_idx3 on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + QUERY PLAN +------------------------------------------------------------------------------------------- + Aggregate + -> Partition Iterator + Iterations: 3, Sub Iterations: 8 + -> Partitioned Index Only Scan using range_list_sales1_idx4 on range_list_sales1 + Selected Partitions: 1..3 + Selected Subpartitions: ALL +(6 rows) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 998 +(1 row) + +DROP TABLE range_list_sales1; +--finish, clean the environment +DROP SCHEMA ustore_subpartition_ddl_index CASCADE; +RESET CURRENT_SCHEMA; +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_indexonlyscan; +RESET enable_bitmapscan; diff --git a/src/test/regress/expected/ustore_subpartition_gpi.out b/src/test/regress/expected/ustore_subpartition_gpi.out new file mode 100644 index 000000000..5bb341429 --- /dev/null +++ b/src/test/regress/expected/ustore_subpartition_gpi.out @@ -0,0 +1,1296 @@ +-- prepare +DROP SCHEMA ustore_subpartition_gpi CASCADE; +ERROR: schema "ustore_subpartition_gpi" does not exist +CREATE SCHEMA ustore_subpartition_gpi; +SET CURRENT_SCHEMA TO ustore_subpartition_gpi; +-- base function +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 2 | 1 +(6 rows) + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_list.dept_code)::text = '1'::text) +(8 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_list.user_no)::text = '1'::text) +(8 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +drop table range_list; +-- unique +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +create unique index idx_dept_code_global on range_list(dept_code) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +select * from range_list subpartition (p_201901_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 +(1 row) + +select * from range_list subpartition (p_201901_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +select count(*) from range_list; + count +------- + 2 +(1 row) + +--error +insert into range_list values('201902', '1', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_dept_code_global" +DETAIL: Key (dept_code)=(1) already exists. +insert into range_list values('201902', '2', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_dept_code_global" +DETAIL: Key (dept_code)=(2) already exists. +insert into range_list values('201903', '1', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_dept_code_global" +DETAIL: Key (dept_code)=(1) already exists. +insert into range_list values('201903', '2', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_dept_code_global" +DETAIL: Key (dept_code)=(2) already exists. +select count(*) from range_list; + count +------- + 2 +(1 row) + +delete from range_list; +drop index idx_dept_code_global; +create unique index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '2', 1); +insert into range_list values('201903', '1', '3', 1); +insert into range_list values('201903', '2', '4', 1); +select * from range_list subpartition (p_201901_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 +(1 row) + +select * from range_list subpartition (p_201901_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 2 | 1 +(1 row) + +select * from range_list subpartition (p_201902_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 3 | 1 +(1 row) + +select * from range_list subpartition (p_201902_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 4 | 1 +(1 row) + +select count(*) from range_list; + count +------- + 4 +(1 row) + +--error +insert into range_list values('201902', '1', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(1) already exists. +insert into range_list values('201902', '2', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(1) already exists. +insert into range_list values('201903', '1', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(1) already exists. +insert into range_list values('201903', '2', '1', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(1) already exists. +insert into range_list values('201902', '1', '2', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(2) already exists. +insert into range_list values('201902', '2', '2', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(2) already exists. +insert into range_list values('201903', '1', '2', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(2) already exists. +insert into range_list values('201903', '2', '2', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(2) already exists. +insert into range_list values('201902', '1', '3', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(3) already exists. +insert into range_list values('201902', '2', '3', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(3) already exists. +insert into range_list values('201903', '1', '3', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(3) already exists. +insert into range_list values('201903', '2', '3', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(3) already exists. +insert into range_list values('201902', '1', '4', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(4) already exists. +insert into range_list values('201902', '2', '4', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(4) already exists. +insert into range_list values('201903', '1', '4', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(4) already exists. +insert into range_list values('201903', '2', '4', 1); +ERROR: duplicate key value violates unique constraint "idx_user_no_global" +DETAIL: Key (user_no)=(4) already exists. +select count(*) from range_list; + count +------- + 4 +(1 row) + +drop table range_list; +-- truncate subpartition +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 2 | 1 +(6 rows) + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_list.dept_code)::text = '1'::text) +(8 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_list.user_no)::text = '1'::text) +(8 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +alter table range_list truncate subpartition p_201901_a update global index; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_list.dept_code)::text = '1'::text) +(8 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_list.user_no)::text = '1'::text) +(8 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +alter table range_list truncate subpartition p_201901_b; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Seq Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_list.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(11 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_list.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(11 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(2 rows) + +drop table range_list; +-- split subpartition +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values (default) + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values (default) + ) +); +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 2 | 1 +(6 rows) + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_list.dept_code)::text = '1'::text) +(8 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_list.user_no)::text = '1'::text) +(8 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +alter table range_list split subpartition p_201901_b values ('3') into +( + subpartition p_201901_b, + subpartition p_201901_c +) update global index; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 3 + -> Partitioned Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_list.dept_code)::text = '1'::text) +(8 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_list.user_no)::text = '1'::text) +(8 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +alter table range_list split subpartition p_201902_b values ('3') into +( + subpartition p_201902_b, + subpartition p_201902_c +); +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 3 + -> Partitioned Bitmap Heap Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Seq Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_list.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(11 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 6 + -> Partitioned Seq Scan on ustore_subpartition_gpi.range_list + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_list.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(11 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +drop table range_list; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( MAXVALUE ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '3', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '5', '1', 1); +select * from range_range; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(6 rows) + +create index idx_month_code_local on range_range(month_code) local; +create index idx_dept_code_global on range_range(dept_code) global; +create index idx_user_no_global on range_range(user_no) global; +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.dept_code, range_range.user_no, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on ustore_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +-------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.user_no, range_range.sales_amt + -> Bitmap Heap Scan on ustore_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_range.dept_code)::text = '1'::text) +(8 rows) + +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(2 rows) + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.dept_code, range_range.sales_amt + -> Bitmap Heap Scan on ustore_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_range.user_no)::text = '1'::text) +(8 rows) + +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(6 rows) + +alter table range_range split subpartition p_201901_b at ('3') into +( + subpartition p_201901_c, + subpartition p_201901_d +) update global index; +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.dept_code, range_range.user_no, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 3 + -> Partitioned Bitmap Heap Scan on ustore_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +-------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.user_no, range_range.sales_amt + -> Bitmap Heap Scan on ustore_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.dept_code)::text = '1'::text) + -> Bitmap Index Scan on idx_dept_code_global + Index Cond: ((range_range.dept_code)::text = '1'::text) +(8 rows) + +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(2 rows) + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.dept_code, range_range.sales_amt + -> Bitmap Heap Scan on ustore_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.user_no)::text = '1'::text) + -> Bitmap Index Scan on idx_user_no_global + Index Cond: ((range_range.user_no)::text = '1'::text) +(8 rows) + +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(6 rows) + +alter table range_range split subpartition p_201902_b at ('3') into +( + subpartition p_201902_c, + subpartition p_201903_d +); +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.dept_code, range_range.user_no, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 3 + -> Partitioned Bitmap Heap Scan on ustore_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code_local + Index Cond: ((range_range.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +-------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.user_no, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Seq Scan on ustore_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_range.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(11 rows) + +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(2 rows) + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_range.month_code, range_range.dept_code, range_range.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 6 + -> Partitioned Seq Scan on ustore_subpartition_gpi.range_range + Output: month_code, dept_code, user_no, sales_amt + Filter: ((range_range.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(11 rows) + +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(6 rows) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) primary key, + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + I +(1 row) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) primary key, + user_no VARCHAR2 ( 30 ) , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + I +(1 row) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) primary key, + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + I +(1 row) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + i +(1 row) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code, user_no) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + i +(1 row) + +drop table range_range; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, user_no) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "range_range_pkey" for table "range_range" +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); + relkind +--------- + I +(1 row) + +drop table range_range; +-- truncate with gpi +CREATE TABLE range_hash_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( -10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ), + PARTITION p_range_3 VALUES LESS THAN( 30) + ( + SUBPARTITION p_hash_3_1 , + SUBPARTITION p_hash_3_2 , + SUBPARTITION p_hash_3_3 + ), + PARTITION p_range_4 VALUES LESS THAN( 50) + ( + SUBPARTITION p_hash_4_1 , + SUBPARTITION p_hash_4_2 , + SUBPARTITION range_hash_02 + ), + PARTITION p_range_5 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; +create index idx on range_hash_02(col_1); +truncate range_hash_02; +drop table range_hash_02; +-- clean +DROP SCHEMA ustore_subpartition_gpi CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/ustore_subpartition_scan.out b/src/test/regress/expected/ustore_subpartition_scan.out new file mode 100644 index 000000000..a0a66200a --- /dev/null +++ b/src/test/regress/expected/ustore_subpartition_scan.out @@ -0,0 +1,660 @@ +--prepare +DROP SCHEMA ustore_subpartition_scan CASCADE; +ERROR: schema "ustore_subpartition_scan" does not exist +CREATE SCHEMA ustore_subpartition_scan; +SET CURRENT_SCHEMA TO ustore_subpartition_scan; +--scan +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +explain(costs off, verbose on) select * from range_list order by 1, 2, 3, 4; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Seq Scan on ustore_subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(10 rows) + +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +create index idx_month_code on range_list(month_code) local; +create index idx_dept_code on range_list(dept_code) local; +create index idx_user_no on range_list(user_no) local; +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on ustore_subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_month_code + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Bitmap Heap Scan on ustore_subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 + -> Partitioned Bitmap Index Scan on idx_dept_code + Index Cond: ((range_list.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(15 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Bitmap Heap Scan on ustore_subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Recheck Cond: ((range_list.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL + -> Partitioned Bitmap Index Scan on idx_user_no + Index Cond: ((range_list.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(15 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +set enable_bitmapscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.dept_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 1, Sub Iterations: 2 + -> Partitioned Index Scan using idx_month_code on ustore_subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Index Cond: ((range_list.month_code)::text = '201902'::text) + Selected Partitions: 1 + Selected Subpartitions: ALL +(11 rows) + +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.user_no, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 2 + -> Partitioned Index Scan using idx_dept_code on ustore_subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Index Cond: ((range_list.dept_code)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: 1:1, 2:1 +(11 rows) + +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Sort + Output: month_code, dept_code, user_no, sales_amt + Sort Key: range_list.month_code, range_list.dept_code, range_list.sales_amt + -> Partition Iterator + Output: month_code, dept_code, user_no, sales_amt + Iterations: 2, Sub Iterations: 4 + -> Partitioned Index Scan using idx_user_no on ustore_subpartition_scan.range_list + Output: month_code, dept_code, user_no, sales_amt + Index Cond: ((range_list.user_no)::text = '1'::text) + Selected Partitions: 1..2 + Selected Subpartitions: ALL +(11 rows) + +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +reset enable_seqscan; +reset enable_bitmapscan; +drop table range_list; +CREATE TABLE range_list +( + col_1 VARCHAR2 ( 30 ) , + col_2 VARCHAR2 ( 30 ) NOT NULL , + col_3 VARCHAR2 ( 30 ) NOT NULL , + ccol_4 VARCHAR2 ( 30 ), +col_5 VARCHAR2 ( 30 ), +col_6 VARCHAR2 ( 30 ), +col_7 VARCHAR2 ( 30 ), +col_8 VARCHAR2 ( 30 ), +col_9 VARCHAR2 ( 30 ), +col_10 VARCHAR2 ( 30 ), +col_11 VARCHAR2 ( 30 ), +col_12 VARCHAR2 ( 30 ), +col_13 VARCHAR2 ( 30 ), +col_14 VARCHAR2 ( 30 ), +col_15 VARCHAR2 ( 30 ), +col_16 VARCHAR2 ( 30 ), +col_17 VARCHAR2 ( 30 ), +col_18 VARCHAR2 ( 30 ), +col_19 VARCHAR2 ( 30 ), +col_20 VARCHAR2 ( 30 ), +col_21 VARCHAR2 ( 30 ), +col_22 VARCHAR2 ( 30 ), +col_23 VARCHAR2 ( 30 ), +col_24 VARCHAR2 ( 30 ), +col_25 VARCHAR2 ( 30 ), +col_26 VARCHAR2 ( 30 ), +col_27 VARCHAR2 ( 30 ), +col_28 VARCHAR2 ( 30 ), +col_29 VARCHAR2 ( 30 ), +col_30 VARCHAR2 ( 30 ), +col_31 VARCHAR2 ( 30 ), +col_32 VARCHAR2 ( 30 ), +col_33 VARCHAR2 ( 30 ), +col_34 VARCHAR2 ( 30 ), +col_35 VARCHAR2 ( 30 ), +col_36 VARCHAR2 ( 30 ), +col_37 VARCHAR2 ( 30 ), +col_38 VARCHAR2 ( 30 ), +col_39 VARCHAR2 ( 30 ), +col_40 VARCHAR2 ( 30 ), +col_41 VARCHAR2 ( 30 ), +col_42 VARCHAR2 ( 30 ), +col_43 VARCHAR2 ( 30 ), +col_44 VARCHAR2 ( 30 ), +col_45 VARCHAR2 ( 30 ), +col_46 VARCHAR2 ( 30 ), +col_47 VARCHAR2 ( 30 ), +col_48 VARCHAR2 ( 30 ), +col_49 VARCHAR2 ( 30 ), +col_50 VARCHAR2 ( 30 ), +col_51 VARCHAR2 ( 30 ), +col_52 VARCHAR2 ( 30 ), +col_53 VARCHAR2 ( 30 ), +col_54 VARCHAR2 ( 30 ), +col_55 VARCHAR2 ( 30 ), +col_56 VARCHAR2 ( 30 ), +col_57 VARCHAR2 ( 30 ), +col_58 VARCHAR2 ( 30 ), +col_59 VARCHAR2 ( 30 ), +col_60 VARCHAR2 ( 30 ), +col_61 VARCHAR2 ( 30 ), +col_62 VARCHAR2 ( 30 ), +col_63 VARCHAR2 ( 30 ), +col_64 VARCHAR2 ( 30 ), +col_65 VARCHAR2 ( 30 ), +col_66 VARCHAR2 ( 30 ), +col_67 VARCHAR2 ( 30 ), +col_68 VARCHAR2 ( 30 ), +col_69 VARCHAR2 ( 30 ), +col_70 VARCHAR2 ( 30 ), +col_71 VARCHAR2 ( 30 ), +col_72 VARCHAR2 ( 30 ), +col_73 VARCHAR2 ( 30 ), +col_74 VARCHAR2 ( 30 ), +col_75 VARCHAR2 ( 30 ), +col_76 VARCHAR2 ( 30 ), +col_77 VARCHAR2 ( 30 ), +col_78 VARCHAR2 ( 30 ), +col_79 VARCHAR2 ( 30 ), +col_80 VARCHAR2 ( 30 ), +col_81 VARCHAR2 ( 30 ), +col_82 VARCHAR2 ( 30 ), +col_83 VARCHAR2 ( 30 ) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( '-10' ) + ( +SUBPARTITION p_list_1_1 VALUES ( '-1' ), +SUBPARTITION p_list_1_2 VALUES ( '-2' ), +SUBPARTITION p_list_1_3 VALUES ( '-3' ), +SUBPARTITION p_list_1_4 VALUES ( '-4' ), +SUBPARTITION p_list_1_5 VALUES ( '-5' ), +SUBPARTITION p_list_1_6 VALUES ( '-6' ), +SUBPARTITION p_list_1_7 VALUES ( '-7' ), +SUBPARTITION p_list_1_8 VALUES ( '-8' ), +SUBPARTITION p_list_1_9 VALUES ( '-9' ), +SUBPARTITION p_list_1_10 VALUES ( '-10' ), +SUBPARTITION p_list_1_11 VALUES ( '-11' ), +SUBPARTITION p_list_1_12 VALUES ( '-12' ), +SUBPARTITION p_list_1_13 VALUES ( '-13' ), +SUBPARTITION p_list_1_14 VALUES ( '-14' ), +SUBPARTITION p_list_1_15 VALUES ( '-15' ), +SUBPARTITION p_list_1_16 VALUES ( '-16' ), +SUBPARTITION p_list_1_17 VALUES ( '-17' ), +SUBPARTITION p_list_1_18 VALUES ( '-18' ), +SUBPARTITION p_list_1_19 VALUES ( '-19' ), +SUBPARTITION p_list_1_20 VALUES ( '-20' ), +SUBPARTITION p_list_1_21 VALUES ( '-21' ), +SUBPARTITION p_list_1_22 VALUES ( '-22' ), +SUBPARTITION p_list_1_23 VALUES ( '-23' ), +SUBPARTITION p_list_1_24 VALUES ( '-24' ), +SUBPARTITION p_list_1_25 VALUES ( '-25' ), +SUBPARTITION p_list_1_26 VALUES ( '-26' ), +SUBPARTITION p_list_1_27 VALUES ( '-27' ), +SUBPARTITION p_list_1_28 VALUES ( '-28' ), +SUBPARTITION p_list_1_29 VALUES ( '-29' ), +SUBPARTITION p_list_1_30 VALUES ( '-30' ), +SUBPARTITION p_list_1_31 VALUES ( '-31' ), +SUBPARTITION p_list_1_32 VALUES ( '-32' ), +SUBPARTITION p_list_1_33 VALUES ( '-33' ), +SUBPARTITION p_list_1_34 VALUES ( '-34' ), +SUBPARTITION p_list_1_35 VALUES ( '-35' ), +SUBPARTITION p_list_1_36 VALUES ( '-36' ), +SUBPARTITION p_list_1_37 VALUES ( '-37' ), +SUBPARTITION p_list_1_38 VALUES ( '-38' ), +SUBPARTITION p_list_1_39 VALUES ( '-39' ), +SUBPARTITION p_list_1_40 VALUES ( '-40' ), +SUBPARTITION p_list_1_41 VALUES ( '-41' ), +SUBPARTITION p_list_1_42 VALUES ( '-42' ), +SUBPARTITION p_list_1_43 VALUES ( '-43' ), +SUBPARTITION p_list_1_44 VALUES ( '-44' ), +SUBPARTITION p_list_1_45 VALUES ( '-45' ), +SUBPARTITION p_list_1_46 VALUES ( '-46' ), +SUBPARTITION p_list_1_47 VALUES ( '-47' ), +SUBPARTITION p_list_1_48 VALUES ( '-48' ), +SUBPARTITION p_list_1_49 VALUES ( '-49' ), +SUBPARTITION p_list_1_50 VALUES ( '-50' ), +SUBPARTITION p_list_1_51 VALUES ( default ) + ), + PARTITION p_range_2 VALUES LESS THAN('10 ') + ( +SUBPARTITION p_list_2_1 VALUES ( '1' ), +SUBPARTITION p_list_2_2 VALUES ( '2' ), +SUBPARTITION p_list_2_3 VALUES ( '3' ), +SUBPARTITION p_list_2_4 VALUES ( '4' ), +SUBPARTITION p_list_2_5 VALUES ( '5' ), +SUBPARTITION p_list_2__6 VALUES ( '-6' ), +SUBPARTITION p_list_2_6 VALUES ( '6' ), +SUBPARTITION p_list_2_7 VALUES ( '7' ), +SUBPARTITION p_list_2_8 VALUES ( '8' ), +SUBPARTITION p_list_2_9 VALUES ( '9' ), +SUBPARTITION p_list_2_10 VALUES ( '10' ), +SUBPARTITION p_list_2_11 VALUES ( '11' ), +SUBPARTITION p_list_2_12 VALUES ( '12' ), +SUBPARTITION p_list_2_13 VALUES ( '13' ), +SUBPARTITION p_list_2_14 VALUES ( '14' ), +SUBPARTITION p_list_2_15 VALUES ( '15' ), +SUBPARTITION p_list_2_16 VALUES ( '16' ), +SUBPARTITION p_list_2_17 VALUES ( '17' ), +SUBPARTITION p_list_2_18 VALUES ( '18' ), +SUBPARTITION p_list_2_19 VALUES ( '19' ), +SUBPARTITION p_list_2_20 VALUES ( '20' ), +SUBPARTITION p_list_2_21 VALUES ( '21' ), +SUBPARTITION p_list_2_22 VALUES ( '22' ), +SUBPARTITION p_list_2_23 VALUES ( '23' ), +SUBPARTITION p_list_2_24 VALUES ( '24' ), +SUBPARTITION p_list_2_25 VALUES ( '25' ), +SUBPARTITION p_list_2_26 VALUES ( '26' ), +SUBPARTITION p_list_2_27 VALUES ( '27' ), +SUBPARTITION p_list_2_28 VALUES ( '28' ), +SUBPARTITION p_list_2_29 VALUES ( '29' ), +SUBPARTITION p_list_2_30 VALUES ( '30' ), +SUBPARTITION p_list_2_31 VALUES ( '31' ), +SUBPARTITION p_list_2_32 VALUES ( '32' ), +SUBPARTITION p_list_2_33 VALUES ( '33' ), +SUBPARTITION p_list_2_34 VALUES ( '34' ), +SUBPARTITION p_list_2_35 VALUES ( '35' ), +SUBPARTITION p_list_2_36 VALUES ( '36' ), +SUBPARTITION p_list_2_37 VALUES ( '37' ), +SUBPARTITION p_list_2_38 VALUES ( '38' ), +SUBPARTITION p_list_2_39 VALUES ( '39' ), +SUBPARTITION p_list_2_40 VALUES ( '40' ), +SUBPARTITION p_list_2_41 VALUES ( '41' ), +SUBPARTITION p_list_2_42 VALUES ( '42' ), +SUBPARTITION p_list_2_43 VALUES ( '43' ), +SUBPARTITION p_list_2_44 VALUES ( '44' ), +SUBPARTITION p_list_2_45 VALUES ( '45' ), +SUBPARTITION p_list_2_46 VALUES ( '46' ), +SUBPARTITION p_list_2_47 VALUES ( '47' ), +SUBPARTITION p_list_2_48 VALUES ( '48' ), +SUBPARTITION p_list_2_49 VALUES ( '49' ), +SUBPARTITION p_list_2_50 VALUES ( '50' ), +SUBPARTITION p_list_2_51 VALUES ( default ) + ), + PARTITION p_range_3 VALUES LESS THAN( '20 '), + PARTITION p_range_4 VALUES LESS THAN( '30' ) + ( + SUBPARTITION p_list_4_1 VALUES ( default ) + ), + PARTITION p_range_5 VALUES LESS THAN( '40' ) + ( + SUBPARTITION p_list_5_1 VALUES ( '41' ), +SUBPARTITION p_list_5_2 VALUES ( '42' ), +SUBPARTITION p_list_5_3 VALUES ( '43' ), +SUBPARTITION p_list_5_4 VALUES ( '44' ), +SUBPARTITION p_list_5_5 VALUES ( '45' ), +SUBPARTITION p_list_5_6 VALUES ( '46' ), +SUBPARTITION p_list_5_7 VALUES ( '47' ), +SUBPARTITION p_list_5_8 VALUES ( '48' ), +SUBPARTITION p_list_5_9 VALUES ( '49' ), +SUBPARTITION p_list_5_10 VALUES ( '50' ), +SUBPARTITION p_list_5_11 VALUES ( '51' ), +SUBPARTITION p_list_5_12 VALUES ( '52' ), +SUBPARTITION p_list_5_13 VALUES ( '53' ), +SUBPARTITION p_list_5_14 VALUES ( '54' ), +SUBPARTITION p_list_5_15 VALUES ( '55' ), +SUBPARTITION p_list_5_16 VALUES ( '56' ), +SUBPARTITION p_list_5_17 VALUES ( '57' ), +SUBPARTITION p_list_5_18 VALUES ( '58' ), +SUBPARTITION p_list_5_19 VALUES ( '59' ), +SUBPARTITION p_list_5_20 VALUES ( '60' ), +SUBPARTITION p_list_5_21 VALUES ( '61' ), +SUBPARTITION p_list_5_22 VALUES ( '62' ), +SUBPARTITION p_list_5_23 VALUES ( '63' ), +SUBPARTITION p_list_5_24 VALUES ( '64' ), +SUBPARTITION p_list_5_25 VALUES ( '65' ), +SUBPARTITION p_list_5_26 VALUES ( '66' ), +SUBPARTITION p_list_5_27 VALUES ( '67' ), +SUBPARTITION p_list_5_28 VALUES ( '68' ), +SUBPARTITION p_list_5_29 VALUES ( '69' ), +SUBPARTITION p_list_5_30 VALUES ( '70' ), +SUBPARTITION p_list_5_31 VALUES ( '71' ), +SUBPARTITION p_list_5_32 VALUES ( '72' ), +SUBPARTITION p_list_5_33 VALUES ( '73' ), +SUBPARTITION p_list_5_34 VALUES ( '74' ), +SUBPARTITION p_list_5_35 VALUES ( '75' ), +SUBPARTITION p_list_5_36 VALUES ( '76' ), +SUBPARTITION p_list_5_37 VALUES ( '77' ), +SUBPARTITION p_list_5_38 VALUES ( '78' ), +SUBPARTITION p_list_5_39 VALUES ( '79' ), +SUBPARTITION p_list_5_40 VALUES ( '80' ), +SUBPARTITION p_list_5_41 VALUES ( '81' ), +SUBPARTITION p_list_5_42 VALUES ( '82' ), +SUBPARTITION p_list_5_43 VALUES ( '83' ), +SUBPARTITION p_list_5_44 VALUES ( '84' ), +SUBPARTITION p_list_5_45 VALUES ( '85' ), +SUBPARTITION p_list_5_46 VALUES ( '86' ), +SUBPARTITION p_list_5_47 VALUES ( '87' ), +SUBPARTITION p_list_5_48 VALUES ( '88' ), +SUBPARTITION p_list_5_49 VALUES ( '89' ), +SUBPARTITION p_list_5_50 VALUES ( '90' ), +SUBPARTITION p_list_5_51 VALUES ( '91' ), +SUBPARTITION p_list_5_52 VALUES ( '92' ), +SUBPARTITION p_list_5_53 VALUES ( '93' ), +SUBPARTITION p_list_5_54 VALUES ( '94' ), +SUBPARTITION p_list_5_55 VALUES ( '95' ), +SUBPARTITION p_list_5_56 VALUES ( '96' ), +SUBPARTITION p_list_5_57 VALUES ( '97' ), +SUBPARTITION p_list_5_58 VALUES ( '98' ), +SUBPARTITION p_list_5_59 VALUES ( '99' ), +SUBPARTITION p_list_5_60 VALUES ( '100' ), +SUBPARTITION p_list_5_61 VALUES ( '101' ), +SUBPARTITION p_list_5_62 VALUES ( '102' ), +SUBPARTITION p_list_5_63 VALUES ( '103' ), +SUBPARTITION p_list_5_64 VALUES ( '104' ), +SUBPARTITION p_list_5_65 VALUES ( '105' ), +SUBPARTITION p_list_5_66 VALUES ( '106' ), +SUBPARTITION p_list_5_67 VALUES ( '107' ), +SUBPARTITION p_list_5_68 VALUES ( '108' ), +SUBPARTITION p_list_5_69 VALUES ( '109' ), +SUBPARTITION p_list_5_70 VALUES ( '110' ), +SUBPARTITION p_list_5_71 VALUES ( '111' ), +SUBPARTITION p_list_5_72 VALUES ( '112' ), +SUBPARTITION p_list_5_73 VALUES ( '113' ), +SUBPARTITION p_list_5_74 VALUES ( '114' ), +SUBPARTITION p_list_5_75 VALUES ( '115' ), +SUBPARTITION p_list_5_76 VALUES ( '116' ), +SUBPARTITION p_list_5_77 VALUES ( '117' ), +SUBPARTITION p_list_5_78 VALUES ( '118' ), +SUBPARTITION p_list_5_79 VALUES ( '119' ), +SUBPARTITION p_list_5_80 VALUES ( default ) + ), + PARTITION p_range_6 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; +create index on range_list(col_2) local; +explain (costs off, verbose off) select * from range_list where col_2 in (select col_1 from range_list where col_1 >10 and col_1<100) order by 1 limit 100; + QUERY PLAN +----------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: ustore_subpartition_scan.range_list.col_1 + -> Nested Loop + -> HashAggregate + Group By Key: (ustore_subpartition_scan.range_list.col_1)::text + -> Partition Iterator + Iterations: 6, Sub Iterations: 186 + -> Partitioned Seq Scan on range_list + Filter: (((col_1)::bigint > 10) AND ((col_1)::bigint < 100)) + Selected Partitions: 1..6 + Selected Subpartitions: ALL + -> Partition Iterator + Iterations: 6, Sub Iterations: 186 + -> Partitioned Index Scan using range_list_col_2_idx on range_list + Index Cond: ((col_2)::text = (ustore_subpartition_scan.range_list.col_1)::text) + Selected Partitions: 1..6 + Selected Subpartitions: ALL +(18 rows) + +ALTER INDEX range_list_col_2_idx MODIFY PARTITION p_list_5_14_col_2_idx UNUSABLE; +explain (costs off, verbose off) select * from range_list where col_2 in (select col_1 from range_list where col_1 >10 and col_1<100) order by 1 limit 100; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: ustore_subpartition_scan.range_list.col_1 + -> Nested Loop + -> HashAggregate + Group By Key: (ustore_subpartition_scan.range_list.col_1)::text + -> Partition Iterator + Iterations: 6, Sub Iterations: 186 + -> Partitioned Seq Scan on range_list + Filter: (((col_1)::bigint > 10) AND ((col_1)::bigint < 100)) + Selected Partitions: 1..6 + Selected Subpartitions: ALL + -> Partition Iterator + Iterations: 6, Sub Iterations: 186 + -> Partitioned Seq Scan on range_list + Filter: ((ustore_subpartition_scan.range_list.col_1)::text = (col_2)::text) + Selected Partitions: 1..6 + Selected Subpartitions: ALL +(18 rows) + +drop table range_list; +create table range_range_jade(jid int,jn int,name varchar2) WITH (STORAGE_TYPE=USTORE) partition by range (jid) subpartition by range(jn) +( + partition hrp1 values less than(16)( + subpartition hrp1_1 values less than(16), +subpartition hrp1_2 values less than(26), +subpartition hrp1_3 values less than(36), + subpartition hrp1_4 values less than(maxvalue)), + partition hrp2 values less than(26)( + subpartition hrp2_1 values less than(maxvalue)), + partition hrp3 values less than(36)( + subpartition hrp3_1 values less than(16), +subpartition hrp3_2 values less than(26), + subpartition hrp3_3 values less than(maxvalue)), + partition hrp4 values less than(maxvalue)( + subpartition hrp4_1 values less than(16), + subpartition hrp4_2 values less than(maxvalue)) +)ENABLE ROW MOVEMENT; +-- no errors +set enable_partition_opfusion = on; +insert into range_range_jade values(1,2,'jade'); +reset enable_partition_opfusion; +drop table range_range_jade; +drop table list_range_02; +ERROR: table "list_range_02" does not exist +CREATE TABLE IF NOT EXISTS list_range_02 +( + col_1 int , + col_2 int, +col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY list (col_1) SUBPARTITION BY range (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_1_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_1_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_1_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_2 VALUES(1,2,3,4,5,6,7,8,9,10 ), + PARTITION p_list_3 VALUES(11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_range_3_1 VALUES LESS THAN( 15 ), + SUBPARTITION p_range_3_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_4 VALUES(21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_range_4_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_4_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_4_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_4_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_4_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_5 VALUES(31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_range_5_1 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_6 VALUES(41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_range_6_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_6_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_6_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_6_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_6_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_7 VALUES(default) +) ENABLE ROW MOVEMENT; +create index index_01 on list_range_02(col_2) local ; +explain (costs off) select * from list_range_02 where col_2 in + (select col_1 from list_range_02 subpartition(p_list_2_subpartdefault1) + where col_1 >10 and col_1 <100) and col_1 +col_2 =50 and col_2 in (100,200,300 ); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Nested Loop Semi Join + Join Filter: (ustore_subpartition_scan.list_range_02.col_2 = ustore_subpartition_scan.list_range_02.col_1) + -> Partition Iterator + Iterations: 4, Sub Iterations: 4 + -> Partitioned Index Scan using index_01 on list_range_02 + Index Cond: ((col_2 = ANY ('{100,200,300}'::integer[])) AND (col_2 > 10) AND (col_2 < 100)) + Filter: ((col_1 + col_2) = 50) + Selected Partitions: 1,3,5..6 + Selected Subpartitions: 1:1, 3:1, 5:1, 6:1 + -> Materialize + -> Partition Iterator + Iterations: 1, Sub Iterations: 1 + -> Partitioned Seq Scan on list_range_02 + Filter: ((col_1 > 10) AND (col_1 < 100) AND (col_1 = ANY ('{100,200,300}'::integer[]))) + Selected Partitions: 6 + Selected Subpartitions: ALL +(16 rows) + +drop table list_range_02; +DROP SCHEMA ustore_subpartition_scan CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/ustore_subpartition_select.out b/src/test/regress/expected/ustore_subpartition_select.out new file mode 100644 index 000000000..03e5bb033 --- /dev/null +++ b/src/test/regress/expected/ustore_subpartition_select.out @@ -0,0 +1,1235 @@ +--prepare +DROP SCHEMA ustore_subpartition_select CASCADE; +ERROR: schema "ustore_subpartition_select" does not exist +CREATE SCHEMA ustore_subpartition_select; +SET CURRENT_SCHEMA TO ustore_subpartition_select; +--select +CREATE TABLE t1 +( + c1 int, + c2 int +) WITH (STORAGE_TYPE=USTORE); +insert into t1 values(generate_series(201901,201910), generate_series(1,10)); +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '3', '1', 1); +ERROR: inserted partition key does not map to any table partition +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '3', '1', 1); +ERROR: inserted partition key does not map to any table partition +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_list where user_no is not null order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_list where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_list where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_list where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_list partition (p_201901) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +select * from range_list partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_list where user_no is not null and dept_code <> '2' UNION ALL select * from range_list partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_list where user_no is not null and dept_code <> '2' UNION ALL select * from range_list partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201902', '2', '1', 1); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +insert into range_hash values('201903', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +select * from range_hash order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_hash where user_no is not null order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_hash where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_hash where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_hash where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_hash partition (p_201901) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +select * from range_hash partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_hash where user_no is not null and dept_code <> '2' UNION ALL select * from range_hash partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_hash where user_no is not null and dept_code <> '2' UNION ALL select * from range_hash partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +select * from range_range order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_range where user_no is not null order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_range where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_range where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_range where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +select * from range_range partition (p_201901) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +select * from range_range partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +select * from range_range where user_no is not null and dept_code <> '2' UNION ALL select * from range_range partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_range where user_no is not null and dept_code <> '2' UNION ALL select * from range_range partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(5 rows) + +--view +create view view_temp as select * from range_list; +select * from view_temp; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(6 rows) + +--error +select * from view_temp partition (p_201901); +ERROR: relation "view_temp" is not partitioned table +DETAIL: N/A. +select * from view_temp partition (p_201902); +ERROR: relation "view_temp" is not partitioned table +DETAIL: N/A. +drop view view_temp; +with tmp1 as (select * from range_list ) select * from tmp1 order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +with tmp1 as (select * from range_list partition (p_201901)) select * from tmp1 order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +--join normal table +select * from range_list left join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_list left join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_list right join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_list right join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_list full join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_list full join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_list inner join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_list inner join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_hash left join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_hash left join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_hash right join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_hash right join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_hash full join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_hash full join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_hash inner join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_hash inner join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_range left join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_range left join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_range right join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_range right join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_range full join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + | | | | 201901 | 1 + | | | | 201904 | 4 + | | | | 201905 | 5 + | | | | 201906 | 6 + | | | | 201907 | 7 + | | | | 201908 | 8 + | | | | 201909 | 9 + | | | | 201910 | 10 +(14 rows) + +select * from range_range full join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +select * from range_range inner join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 1 | 1 | 1 | 201902 | 2 + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 1 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(6 rows) + +select * from range_range inner join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + month_code | dept_code | user_no | sales_amt | c1 | c2 +------------+-----------+---------+-----------+--------+---- + 201902 | 2 | 1 | 1 | 201902 | 2 + 201903 | 2 | 1 | 1 | 201903 | 3 + 201903 | 2 | 1 | 1 | 201903 | 3 +(3 rows) + +--join range_list and range_hash +select * from range_list left join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_list left join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_list right join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_list right join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_list full join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_list full join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_list inner join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_list inner join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +--join range_hash and range_range +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +--join range_hash and range_range +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 | 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(18 rows) + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + month_code | dept_code | user_no | sales_amt | month_code | dept_code | user_no | sales_amt +------------+-----------+---------+-----------+------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 | 201902 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 | 201903 | 2 | 1 | 1 +(9 rows) + +drop table list_range_02; +ERROR: table "list_range_02" does not exist +CREATE TABLE IF NOT EXISTS list_range_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY list (col_1) SUBPARTITION BY range (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_1_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_1_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_1_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_2 VALUES(1,2,3,4,5,6,7,8,9,10 ), + PARTITION p_list_3 VALUES(11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_range_3_1 VALUES LESS THAN( 15 ), + SUBPARTITION p_range_3_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_4 VALUES(21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_range_4_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_4_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_4_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_4_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_4_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_5 VALUES(31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_range_5_1 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_6 VALUES(41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_range_6_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_6_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_6_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_6_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_6_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_7 VALUES(default) +) ENABLE ROW MOVEMENT; +create index index_01 on list_range_02(col_2) local ; +INSERT INTO list_range_02 VALUES (GENERATE_SERIES(0, 19),GENERATE_SERIES(0, 1000),GENERATE_SERIES(0, 99)); + explain (costs off, verbose on) select * from list_range_02 where col_2 >500 and col_2 <8000 order by col_1; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Sort + Output: col_1, col_2, col_3, col_4 + Sort Key: list_range_02.col_1 + -> Partition Iterator + Output: col_1, col_2, col_3, col_4 + Iterations: 4, Sub Iterations: 4 + -> Partitioned Bitmap Heap Scan on ustore_subpartition_select.list_range_02 + Output: col_1, col_2, col_3, col_4 + Recheck Cond: ((list_range_02.col_2 > 500) AND (list_range_02.col_2 < 8000)) + Selected Partitions: 1,3,5..6 + Selected Subpartitions: 1:1, 3:1, 5:1, 6:1 + -> Partitioned Bitmap Index Scan on index_01 + Index Cond: ((list_range_02.col_2 > 500) AND (list_range_02.col_2 < 8000)) + Selected Partitions: 1,3,5..6 + Selected Subpartitions: 1:1, 3:1, 5:1, 6:1 +(15 rows) + +drop index index_01; +drop table list_range_02; +create table pjade(jid int,jn int,name varchar2) WITH (STORAGE_TYPE=USTORE) partition by range(jid) subpartition by range(jn) +( + partition hrp1 values less than(16)( + subpartition hrp1_1 values less than(16), + subpartition hrp1_2 values less than(maxvalue)), + partition hrp2 values less than(maxvalue)( + subpartition hrp3_1 values less than(16), + subpartition hrp3_3 values less than(maxvalue)) +); +create table cjade(jid int,jn int,name varchar2) WITH (STORAGE_TYPE=USTORE); +insert into pjade values(6,8,'tom'),(8,18,'jerry'),(16,8,'jade'),(18,20,'jack'); +insert into cjade values(6,8,'tom'),(8,18,'jerry'),(16,8,'jade'),(18,20,'jack'); +select * from pjade subpartition(hrp1_1) union select * from cjade order by 1,2,3; + jid | jn | name +-----+----+------- + 6 | 8 | tom + 8 | 18 | jerry + 16 | 8 | jade + 18 | 20 | jack +(4 rows) + +select * from pjade subpartition(hrp1_1) p union select * from cjade order by 1,2,3; + jid | jn | name +-----+----+------- + 6 | 8 | tom + 8 | 18 | jerry + 16 | 8 | jade + 18 | 20 | jack +(4 rows) + +select * from pjade subpartition(hrp1_1) union select * from cjade order by 1,2,3; + jid | jn | name +-----+----+------- + 6 | 8 | tom + 8 | 18 | jerry + 16 | 8 | jade + 18 | 20 | jack +(4 rows) + +select * from pjade subpartition(hrp1_1) p union select * from cjade order by 1,2,3; + jid | jn | name +-----+----+------- + 6 | 8 | tom + 8 | 18 | jerry + 16 | 8 | jade + 18 | 20 | jack +(4 rows) + +drop table pjade; +drop table cjade; +DROP SCHEMA ustore_subpartition_select CASCADE; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to table t1 +drop cascades to table range_list +drop cascades to table range_hash +drop cascades to table range_range +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/ustore_subpartition_split.out b/src/test/regress/expected/ustore_subpartition_split.out new file mode 100644 index 000000000..83083d0ee --- /dev/null +++ b/src/test/regress/expected/ustore_subpartition_split.out @@ -0,0 +1,395 @@ +--prepare +DROP SCHEMA ustore_subpartition_split CASCADE; +ERROR: schema "ustore_subpartition_split" does not exist +CREATE SCHEMA ustore_subpartition_split; +SET CURRENT_SCHEMA TO ustore_subpartition_split; +--split subpartition +-- list subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '3', '1', 1); +select * from list_list order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 3 | 1 | 1 +(6 rows) + +select * from list_list subpartition (p_201901_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +select * from list_list subpartition (p_201901_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +alter table list_list split subpartition p_201901_b values (2) into +( + subpartition p_201901_b, + subpartition p_201901_c +); +select * from list_list subpartition (p_201901_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +select * from list_list subpartition (p_201901_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +select * from list_list subpartition (p_201901_c) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list partition (p_201901); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(3 rows) + +select * from list_list subpartition (p_201902_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +select * from list_list subpartition (p_201902_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 3 | 1 | 1 +(2 rows) + +alter table list_list split subpartition p_201902_b values (2, 3) into +( + subpartition p_201902_b, + subpartition p_201902_c +); +select * from list_list subpartition (p_201902_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +select * from list_list subpartition (p_201902_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 3 | 1 | 1 +(2 rows) + +select * from list_list subpartition (p_201902_c) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +--error +alter table list_list split subpartition p_201902_a values (3) into +( + subpartition p_201902_ab, + subpartition p_201902_ac +); +ERROR: Only the default boundary subpartition can be splited. +drop table list_list; +-- range subpartition +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '3', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '5', '1', 1); +select * from range_range order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(6 rows) + +select * from range_range subpartition (p_201901_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201901_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 3 | 1 | 1 +(2 rows) + +alter table range_range split subpartition p_201901_b at (3) into +( + subpartition p_201901_c, + subpartition p_201901_d +); +select * from range_range subpartition (p_201901_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201901_b) order by 1,2,3,4; +ERROR: subpartition "p_201901_b" of relation "range_range" does not exist +select * from range_range subpartition (p_201901_c) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201901_d) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 3 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201902_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201902_b) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 5 | 1 | 1 +(2 rows) + +alter table range_range split subpartition p_201902_b at (3) into +( + subpartition p_201902_c, + subpartition p_201902_d +); +select * from range_range subpartition (p_201902_a) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201902_b) order by 1,2,3,4; +ERROR: subpartition "p_201902_b" of relation "range_range" does not exist +select * from range_range subpartition (p_201902_c) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 +(1 row) + +select * from range_range subpartition (p_201902_d) order by 1,2,3,4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 5 | 1 | 1 +(1 row) + +drop table range_range; +--test syntax +CREATE TABLE IF NOT EXISTS list_hash +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY list (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10 ) + ( + SUBPARTITION p_hash_2_1 , + SUBPARTITION p_hash_2_2 , + SUBPARTITION p_hash_2_3 , + SUBPARTITION p_hash_2_4 , + SUBPARTITION p_hash_2_5 + ), + PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20), + PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30 ) + ( + SUBPARTITION p_hash_4_1 + ), + PARTITION p_list_5 VALUES (default) + ( + SUBPARTITION p_hash_5_1 + ), + PARTITION p_list_6 VALUES (31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_hash_6_1 , + SUBPARTITION p_hash_6_2 , + SUBPARTITION p_hash_6_3 + ) +) ENABLE ROW MOVEMENT ; +alter table list_hash split subPARTITION p_hash_2_3 at(-10) into ( subPARTITION add_p_01 , subPARTITION add_p_02 ); +ERROR: Hash subpartition does not support split. +DETAIL: N/A +drop table list_hash; +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +alter table range_range split subpartition p_201901_b values (3) into +( + subpartition p_201901_c, + subpartition p_201901_d +) update global index; +ERROR: The syntax format of split subpartition is incorrect. +DETAIL: SPLIT SUBPARTITION NAME VALUES shouldn't be used, it's for list subpartitions. +drop table range_range; +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +alter table list_list split subpartition p_201901_b at (2, 3) into +( + subpartition p_201901_b, + subpartition p_201901_c +); +ERROR: The syntax format of split subpartition is incorrect. +DETAIL: SPLIT SUBPARTITION NAME AT shouldn't be used, it's for range subpartitions. +drop table list_list; +CREATE TABLE IF NOT EXISTS list_list_02 +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY list (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( 0,-1,-2,-3,-4,-5,-6,-7,-8,-9 ), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_list_2 VALUES(0,1,2,3,4,5,6,7,8,9) + ( + SUBPARTITION p_list_2_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_2_2 VALUES ( default ), + SUBPARTITION p_list_2_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_2_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_2_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_3 VALUES(10,11,12,13,14,15,16,17,18,19) + ( + SUBPARTITION p_list_3_2 VALUES ( default ) + ), + PARTITION p_list_4 VALUES(default ), + PARTITION p_list_5 VALUES(20,21,22,23,24,25,26,27,28,29) + ( + SUBPARTITION p_list_5_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_5_2 VALUES ( default ), + SUBPARTITION p_list_5_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_5_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_5_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_6 VALUES(30,31,32,33,34,35,36,37,38,39), + PARTITION p_list_7 VALUES(40,41,42,43,44,45,46,47,48,49) + ( + SUBPARTITION p_list_7_1 VALUES ( default ) + ) +) ENABLE ROW MOVEMENT; +alter table list_list_02 split PARTITION for (5) at (8) into ( PARTITION add_p_01 , PARTITION add_p_02 ); +ERROR: Un-support feature +DETAIL: For subpartition table, split partition is not supported yet. +drop table list_list_02; +--clean +DROP SCHEMA ustore_subpartition_split CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/ustore_subpartition_truncate.out b/src/test/regress/expected/ustore_subpartition_truncate.out new file mode 100644 index 000000000..8b592b23f --- /dev/null +++ b/src/test/regress/expected/ustore_subpartition_truncate.out @@ -0,0 +1,140 @@ +--prepare +DROP SCHEMA ustore_subpartition_truncate CASCADE; +ERROR: schema "ustore_subpartition_truncate" does not exist +CREATE SCHEMA ustore_subpartition_truncate; +SET CURRENT_SCHEMA TO ustore_subpartition_truncate; +--truncate partition/subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(6 rows) + +select * from list_list partition (p_201901); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(3 rows) + +alter table list_list truncate partition p_201901; +select * from list_list partition (p_201901); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list partition (p_201902); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 +(3 rows) + +alter table list_list truncate partition p_201902; +select * from list_list partition (p_201902); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list subpartition (p_201901_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +alter table list_list truncate subpartition p_201901_a; +select * from list_list subpartition (p_201901_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list subpartition (p_201901_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +alter table list_list truncate subpartition p_201901_b; +select * from list_list subpartition (p_201901_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list subpartition (p_201902_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 +(1 row) + +alter table list_list truncate subpartition p_201902_a; +select * from list_list subpartition (p_201902_a); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list subpartition (p_201902_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(2 rows) + +alter table list_list truncate subpartition p_201902_b; +select * from list_list subpartition (p_201902_b); + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from list_list; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +drop table list_list; +DROP SCHEMA ustore_subpartition_truncate CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/ustore_subpartition_update.out b/src/test/regress/expected/ustore_subpartition_update.out new file mode 100644 index 000000000..2be93047d --- /dev/null +++ b/src/test/regress/expected/ustore_subpartition_update.out @@ -0,0 +1,348 @@ +--prepare +DROP SCHEMA ustore_subpartition_update CASCADE; +ERROR: schema "ustore_subpartition_update" does not exist +CREATE SCHEMA ustore_subpartition_update; +SET CURRENT_SCHEMA TO ustore_subpartition_update; +--update +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +)DISABLE ROW MOVEMENT; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +--error +update range_list set month_code = '201903'; +ERROR: fail to update partitioned table "range_list" +DETAIL: disable row movement +--error +update range_list set dept_code = '2'; +ERROR: fail to update partitioned table "range_list" +DETAIL: disable row movement +update range_list set user_no = '2'; +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 2 | 1 + 201902 | 1 | 2 | 1 + 201902 | 2 | 2 | 1 + 201903 | 1 | 2 | 1 + 201903 | 2 | 2 | 1 + 201903 | 2 | 2 | 1 +(6 rows) + +-- test for upsert and merge into, both should report error +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt=1; +CREATE TABLE temp_table +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE); +insert into temp_table values('201802', '1', '1', 1), ('201901', '2', '1', 1), ('201702', '1', '1', 1); +MERGE INTO range_list t1 +USING temp_table t2 +ON (t1.dept_code = t2.dept_code) +WHEN MATCHED THEN + UPDATE SET t1.month_code = t2.month_code WHERE t1.dept_code > 1 +WHEN NOT MATCHED THEN + INSERT VALUES (t2.month_code, t2.dept_code, t2.user_no, t2.sales_amt) WHERE t2.sales_amt = 1; +ERROR: fail to update partitioned table "range_list" +DETAIL: disable row movement +drop table temp_table; +drop table range_list; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +)ENABLE ROW MOVEMENT; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 + 201902 | 2 | 1 | 1 + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +select * from range_list subpartition (p_201901_a) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 1 | 1 | 1 + 201902 | 1 | 1 | 1 +(2 rows) + +select * from range_list subpartition (p_201901_b) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 +(1 row) + +update range_list set dept_code = '2' where month_code = '201902'; +select * from range_list subpartition (p_201901_a) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from range_list subpartition (p_201901_b) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +select * from range_list partition (p_201901) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 + 201902 | 2 | 1 | 1 +(3 rows) + +select * from range_list partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(3 rows) + +update range_list set month_code = '201903' where month_code = '201902'; +select * from range_list partition (p_201901) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- +(0 rows) + +select * from range_list partition (p_201902) order by 1, 2, 3, 4; + month_code | dept_code | user_no | sales_amt +------------+-----------+---------+----------- + 201903 | 1 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 + 201903 | 2 | 1 | 1 +(6 rows) + +drop table range_list; +-- FOREIGN KEY +drop table tb_02; +ERROR: table "tb_02" does not exist +CREATE TABLE tb_02 +( + col_1 int PRIMARY KEY, + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (STORAGE_TYPE=USTORE); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "tb_02_pkey" for table "tb_02" +drop table range_range_02 cascade; +ERROR: table "range_range_02" does not exist +CREATE TABLE range_range_02 +( + col_1 int , + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int , +FOREIGN KEY(col_1) REFERENCES tb_02(col_1) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 80 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( MAXVALUE ) + ) +); +insert into tb_02 values(0,0,0,0); +insert into range_range_02 values(0,0,0,0); +update tb_02 set col_1=8 where col_2=0; +ERROR: update or delete on table "tb_02" violates foreign key constraint "range_range_02_col_1_fkey" on table "range_range_02" +DETAIL: Key (col_1)=(0) is still referenced from table "range_range_02". +drop table range_range_02 cascade; +drop table tb_02; +drop table tb_02; +ERROR: table "tb_02" does not exist +CREATE TABLE tb_02 +( + col_1 int PRIMARY KEY, + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "tb_02_pkey" for table "tb_02" +drop table list_list_02 cascade; +ERROR: table "list_list_02" does not exist +CREATE TABLE list_list_02 +( + col_1 int , + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int , + FOREIGN KEY(col_1) REFERENCES tb_02(col_1) +) +PARTITION BY list (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( 0,-1,-2,-3,-4,-5,-6,-7,-8,-9 ), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_list_2 VALUES(0,1,2,3,4,5,6,7,8,9) + ( + SUBPARTITION p_list_2_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_2_2 VALUES ( default ), + SUBPARTITION p_list_2_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_2_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_2_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_3 VALUES(10,11,12,13,14,15,16,17,18,19) + ( + SUBPARTITION p_list_3_2 VALUES ( default ) + ), + PARTITION p_list_4 VALUES(default ), + PARTITION p_list_5 VALUES(20,21,22,23,24,25,26,27,28,29) + ( + SUBPARTITION p_list_5_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_5_2 VALUES ( default ), + SUBPARTITION p_list_5_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_5_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_5_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_6 VALUES(30,31,32,33,34,35,36,37,38,39), + PARTITION p_list_7 VALUES(40,41,42,43,44,45,46,47,48,49) + ( + SUBPARTITION p_list_7_1 VALUES ( default ) + ) +) ENABLE ROW MOVEMENT; +insert into list_list_02 values(0,0,0,0); +ERROR: insert or update on table "list_list_02" violates foreign key constraint "list_list_02_col_1_fkey" +DETAIL: Key (col_1)=(0) is not present in table "tb_02". +insert into tb_02 values(0,0,0,0); +insert into list_list_02 values(0,0,0,0); +update list_list_02 set col_2=8 where col_2=0; +update list_list_02 set col_1=8 where col_2=8; +ERROR: insert or update on table "list_list_02" violates foreign key constraint "list_list_02_col_1_fkey" +DETAIL: Key (col_1)=(8) is not present in table "tb_02". +delete list_list_02 where col_2=8; +drop table tb_02; +ERROR: cannot drop table tb_02 because other objects depend on it +DETAIL: constraint list_list_02_col_1_fkey on table list_list_02 depends on table tb_02 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +drop table list_list_02 cascade; +drop table range_list_02; +ERROR: table "range_list_02" does not exist +CREATE TABLE IF NOT EXISTS range_list_02 +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) +PARTITION BY RANGE (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( -10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( '-1','-2','-3','-4','-5'), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_list_2_1 VALUES ( '6','7','8','9','10'), + SUBPARTITION p_list_2_2 VALUES ( default ) + ), + PARTITION p_range_3 VALUES LESS THAN( 30 ) + ( + SUBPARTITION p_list_3_1 VALUES ( default ) + ), + PARTITION p_range_4 VALUES LESS THAN( 40 ) + ( + SUBPARTITION p_list_4_1 VALUES ( default ) + ), + PARTITION p_range_5 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; +create unique index on range_list_02(col_1,col_2); +INSERT INTO range_list_02 VALUES (GENERATE_SERIES(-500, 500,2),GENERATE_SERIES(1500, 2500,2), +GENERATE_SERIES(3500, 4500,2)); +insert into range_list_02 values(1,1,1,1),(4,4,4,4),(5,5,5,5),(8,8,8,8),(9,9,9,9); +insert into range_list_02 values(11,11,1,1),(15,15,5,5),(18,81,8,8),(29,9,9,9); +insert into range_list_02 values(21,11,1,1),(15,150,5,5),(18,811,8,8),(-2978,31,9,9); +insert into range_list_02 values(-1,1,1,1),(-1,-15,5,5),(-8,7,8,8),(-9,29,9,9); +insert into range_list_02 values(-8,18,1); +update range_list_02 set col_4=80 where col_1=4; +update range_list_02 set col_4=col_1 where col_1<5; +update range_list_02 set col_4=col_1+ col_2 where col_1<5; +update range_list_02 set col_1=83,col_2=8 where col_1=8; +ERROR: duplicate key value violates unique constraint "range_list_02_col_1_col_2_idx" +DETAIL: Key (col_1, col_2)=(83, 8) already exists. +update range_list_02 set col_2=-56 where col_1=-2978; +update range_list_02 set col_2=80 where col_2=-3; +update range_list_02 set col_2=27 where col_2=80; +alter table range_list_02 truncate partition p_range_1; +alter table range_list_02 truncate SUBPARTITION p_list_2_2; +delete from range_list_02 where col_1 >188 ; +delete from range_list_02 where col_2 <10 and col_3>5; +drop table range_list_02; +DROP SCHEMA ustore_subpartition_update CASCADE; +NOTICE: drop cascades to table tb_02 +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/expected/uuid_1.out b/src/test/regress/expected/uuid_1.out index e24228f1e..58aefb782 100644 --- a/src/test/regress/expected/uuid_1.out +++ b/src/test/regress/expected/uuid_1.out @@ -120,6 +120,7 @@ SELECT COUNT(*) FROM guid1 WHERE guid_field >= '22222222-2222-2222-2222-22222222 -- btree and hash index creation test CREATE INDEX guid1_btree ON guid1 USING BTREE (guid_field); CREATE INDEX guid1_hash ON guid1 USING HASH (guid_field); +ERROR: access method "hash" does not support row store -- unique index test CREATE UNIQUE INDEX guid1_unique_BTREE ON guid1 USING BTREE (guid_field); -- should fail @@ -130,7 +131,7 @@ DETAIL: Key (guid_field)=(11111111-1111-1111-1111-111111111111) already exists. SELECT count(*) FROM pg_class WHERE relkind='i' AND relname LIKE 'guid%'; count ------- - 3 + 2 (1 row) -- populating the test tables with additional records diff --git a/src/test/regress/expected/vec_bitmap_1.out b/src/test/regress/expected/vec_bitmap_1.out index 784164bff..920a156c9 100644 --- a/src/test/regress/expected/vec_bitmap_1.out +++ b/src/test/regress/expected/vec_bitmap_1.out @@ -2009,6 +2009,15 @@ select * from vector_bitmap_table_01 t1 inner join vector_bitmap_table_02 t2 on -> Vector Nest Loop Output: t1.col_int1, t1.col_int2, t1.col_int3, t1.col_int4, t1.col_int5, t2.col_int1, t2.col_int2, t2.col_int3, t2.col_int4, t2.col_int5 Join Filter: (t1.col_int1 = t2.col_int1) + -> Vector Partition Iterator + Output: t2.col_int1, t2.col_int2, t2.col_int3, t2.col_int4, t2.col_int5 + Iterations: 4 + -> Partitioned CStore Index Heap Scan on vector_bitmap_engine.vector_bitmap_table_02 t2 + Output: t2.col_int1, t2.col_int2, t2.col_int3, t2.col_int4, t2.col_int5 + Recheck Cond: (t2.col_int5 < 12) + Selected Partitions: 1..4 + -> Partitioned CStore Index Ctid Scan on bitmap_02_e + Index Cond: (t2.col_int5 < 12) -> CStore Index Heap Scan on vector_bitmap_engine.vector_bitmap_table_01 t1 Output: t1.col_int1, t1.col_int2, t1.col_int3, t1.col_int4, t1.col_int5 Recheck Cond: ((t1.col_int2 < 10) OR (t1.col_int3 < 12) OR (t1.col_int4 < 5)) @@ -2019,15 +2028,6 @@ select * from vector_bitmap_table_01 t1 inner join vector_bitmap_table_02 t2 on Index Cond: (t1.col_int3 < 12) -> CStore Index Ctid Scan on bitmap_01_d Index Cond: (t1.col_int4 < 5) - -> Vector Partition Iterator - Output: t2.col_int1, t2.col_int2, t2.col_int3, t2.col_int4, t2.col_int5 - Iterations: 4 - -> Partitioned CStore Index Heap Scan on vector_bitmap_engine.vector_bitmap_table_02 t2 - Output: t2.col_int1, t2.col_int2, t2.col_int3, t2.col_int4, t2.col_int5 - Recheck Cond: (t2.col_int5 < 12) - Selected Partitions: 1..4 - -> Partitioned CStore Index Ctid Scan on bitmap_02_e - Index Cond: (t2.col_int5 < 12) (29 rows) select * from vector_bitmap_table_01 t1 inner join vector_bitmap_table_02 t2 on t1.col_int1=t2.col_int1 where (t1.col_int2 < 10 or t1.col_int3 < 12 or t1.col_int4 < 5) and t2.col_int5 < 12 order by 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 limit 50; diff --git a/src/test/regress/expected/vec_set_func.out b/src/test/regress/expected/vec_set_func.out index 4de271202..dafac096b 100644 --- a/src/test/regress/expected/vec_set_func.out +++ b/src/test/regress/expected/vec_set_func.out @@ -1,3 +1,4 @@ +set enable_vector_engine=on; create table hl_test002(a int,b varchar2(15), c varchar2(15)); insert into hl_test002 values(1,'gauss,ap', 'xue,dong,pu'); insert into hl_test002 values(1,'gauss,ap', NULL); @@ -9,7 +10,9 @@ insert into hl_test002 values(2,'wang,,rui', 'xi,an'); insert into hl_test002 values(2,'wang,,rui', NULL); create table hl_test001(a int,b varchar2(15), c varchar2(15)) with (ORIENTATION = COLUMN); insert into hl_test001 select * from hl_test002; -select a,b,c,regexp_split_to_table(b,E',') from hl_test001 order by 1, 2, 3, 4; +create table hl_test003(a int,b int[5]) with (ORIENTATION = COLUMN); +insert into hl_test003 values(1, array[1,2,3]),(2,array[5,4,6]); +select a,b,c,regexp_split_to_table(b,E',') from hl_test001 order by 1, 2, 3, 4 nulls last; a | b | c | regexp_split_to_table ---+-------------+-------------+----------------------- 1 | gauss,ap | xue,dong,pu | ap @@ -22,24 +25,24 @@ select a,b,c,regexp_split_to_table(b,E',') from hl_test001 order by 1, 2, 3, 4; 1 | xue,dong,pu | | dong 1 | xue,dong,pu | | pu 1 | xue,dong,pu | | xue - 2 | wang,,rui | xi,an | 2 | wang,,rui | xi,an | rui 2 | wang,,rui | xi,an | wang - 2 | wang,,rui | | + 2 | wang,,rui | xi,an | 2 | wang,,rui | | rui 2 | wang,,rui | | wang + 2 | wang,,rui | | 2 | xi,an | wang,,rui | an 2 | xi,an | wang,,rui | xi 2 | xi,an | | an 2 | xi,an | | xi (20 rows) -select a,b,c,regexp_split_to_table(b,NULL) from hl_test001 order by 1, 2, 3, 4; +select a,b,c,regexp_split_to_table(b,NULL) from hl_test001 order by 1, 2, 3, 4 nulls last; a | b | c | regexp_split_to_table ---+---+---+----------------------- (0 rows) -select a,b,c,regexp_split_to_table(b,E','), regexp_split_to_table(c,E',') from hl_test001 order by 1, 2, 3, 4, 5; +select a,b,c,regexp_split_to_table(b,E','), regexp_split_to_table(c,E',') from hl_test001 order by 1, 2, 3, 4, 5 nulls last; a | b | c | regexp_split_to_table | regexp_split_to_table ---+-------------+-------------+-----------------------+----------------------- 1 | gauss,ap | xue,dong,pu | ap | dong @@ -51,24 +54,35 @@ select a,b,c,regexp_split_to_table(b,E','), regexp_split_to_table(c,E',') from h 1 | xue,dong,pu | gauss,ap,db | dong | ap 1 | xue,dong,pu | gauss,ap,db | pu | db 1 | xue,dong,pu | gauss,ap,db | xue | gauss - 2 | wang,,rui | xi,an | | an - 2 | wang,,rui | xi,an | | xi 2 | wang,,rui | xi,an | rui | an 2 | wang,,rui | xi,an | rui | xi 2 | wang,,rui | xi,an | wang | an 2 | wang,,rui | xi,an | wang | xi - 2 | xi,an | wang,,rui | an | + 2 | wang,,rui | xi,an | | an + 2 | wang,,rui | xi,an | | xi 2 | xi,an | wang,,rui | an | rui 2 | xi,an | wang,,rui | an | wang - 2 | xi,an | wang,,rui | xi | + 2 | xi,an | wang,,rui | an | 2 | xi,an | wang,,rui | xi | rui 2 | xi,an | wang,,rui | xi | wang + 2 | xi,an | wang,,rui | xi | (21 rows) select regexp_split_to_table(b,E','), generate_series(1, 3) from hl_test001; ERROR: set-return function not supported in vector eninge CONTEXT: referenced column: generate_series -select a,b,c,regexp_split_to_table(regexp_split_to_table(b,E','), E'u') from hl_test001 order by 1, 2, 3, 4; +select a, b, unnest(b) from hl_test003; + a | b | unnest +---+---------+-------- + 1 | {1,2,3} | 1 + 1 | {1,2,3} | 2 + 1 | {1,2,3} | 3 + 2 | {5,4,6} | 5 + 2 | {5,4,6} | 4 + 2 | {5,4,6} | 6 +(6 rows) + +select a,b,c,regexp_split_to_table(regexp_split_to_table(b,E','), E'u') from hl_test001 order by 1, 2, 3, 4 nulls last; a | b | c | regexp_split_to_table ---+-------------+-------------+----------------------- 1 | gauss,ap | xue,dong,pu | ap @@ -77,21 +91,19 @@ select a,b,c,regexp_split_to_table(regexp_split_to_table(b,E','), E'u') from hl_ 1 | gauss,ap | | ap 1 | gauss,ap | | ga 1 | gauss,ap | | ss - 1 | xue,dong,pu | gauss,ap,db | 1 | xue,dong,pu | gauss,ap,db | dong 1 | xue,dong,pu | gauss,ap,db | e 1 | xue,dong,pu | gauss,ap,db | p 1 | xue,dong,pu | gauss,ap,db | x - 1 | xue,dong,pu | | + 1 | xue,dong,pu | gauss,ap,db | 1 | xue,dong,pu | | dong 1 | xue,dong,pu | | e 1 | xue,dong,pu | | p 1 | xue,dong,pu | | x - 2 | wang,,rui | xi,an | + 1 | xue,dong,pu | | 2 | wang,,rui | xi,an | i 2 | wang,,rui | xi,an | r 2 | wang,,rui | xi,an | wang - 2 | wang,,rui | | 2 | wang,,rui | | i 2 | wang,,rui | | r 2 | wang,,rui | | wang @@ -99,9 +111,9 @@ select a,b,c,regexp_split_to_table(regexp_split_to_table(b,E','), E'u') from hl_ 2 | xi,an | wang,,rui | xi 2 | xi,an | | an 2 | xi,an | | xi -(28 rows) +(26 rows) -select a,b,c,substring(regexp_split_to_table(b,E','), 1, 100) from hl_test001 order by 1, 2, 3, 4; +select a,b,c,substring(regexp_split_to_table(b,E','), 1, 100) from hl_test001 order by 1, 2, 3, 4 nulls last; a | b | c | substring ---+-------------+-------------+----------- 1 | gauss,ap | xue,dong,pu | ap @@ -126,7 +138,7 @@ select a,b,c,substring(regexp_split_to_table(b,E','), 1, 100) from hl_test001 or 2 | xi,an | | xi (20 rows) -select a,b,c,regexp_split_to_table(substring(b,1, 100), E',') from hl_test001 order by 1, 2, 3, 4; +select a,b,c,regexp_split_to_table(substring(b,1, 100), E',') from hl_test001 order by 1, 2, 3, 4 nulls last; a | b | c | regexp_split_to_table ---+-------------+-------------+----------------------- 1 | gauss,ap | xue,dong,pu | ap @@ -139,12 +151,12 @@ select a,b,c,regexp_split_to_table(substring(b,1, 100), E',') from hl_test001 or 1 | xue,dong,pu | | dong 1 | xue,dong,pu | | pu 1 | xue,dong,pu | | xue - 2 | wang,,rui | xi,an | 2 | wang,,rui | xi,an | rui 2 | wang,,rui | xi,an | wang - 2 | wang,,rui | | + 2 | wang,,rui | xi,an | 2 | wang,,rui | | rui 2 | wang,,rui | | wang + 2 | wang,,rui | | 2 | xi,an | wang,,rui | an 2 | xi,an | wang,,rui | xi 2 | xi,an | | an @@ -153,3 +165,5 @@ select a,b,c,regexp_split_to_table(substring(b,1, 100), E',') from hl_test001 or drop table hl_test001; drop table hl_test002; +drop table hl_test003; +reset enable_vector_engine; diff --git a/src/test/regress/expected/vector_procedure.out b/src/test/regress/expected/vector_procedure.out new file mode 100644 index 000000000..7fba8feb1 --- /dev/null +++ b/src/test/regress/expected/vector_procedure.out @@ -0,0 +1,49 @@ +create schema force_vector_engine; +set current_schema=force_vector_engine; +create type pro_tblof_ty_015 as (c1 int,c2 char(10),c3 clob,c4 blob); +create table pro_tblof_tbl_015(c1 int,c2 pro_tblof_ty_015); +insert into pro_tblof_tbl_015 values(1,(1,'char',repeat('静夜思',16),hextoraw('12345'))); +insert into pro_tblof_tbl_015 values(2,(2,'char',repeat('静夜思',16),hextoraw('12345'))); +insert into pro_tblof_tbl_015 values(3,(3,'char',repeat('静夜思',16),hextoraw('12345'))); +insert into pro_tblof_tbl_015 values(4,(4,'char',repeat('静夜思',16),hextoraw('12345'))); +insert into pro_tblof_tbl_015 values(5,(5,'char',repeat('静夜思',16),hextoraw('12345'))); +create type pro_tblof_015 is table of pro_tblof_ty_015; +create or replace procedure pro_tblof_pro_015(col1 int,col2 int) +as +tblof001 pro_tblof_015; +tblof002 pro_tblof_ty_015; +i int:=1; +begin +select count(*) into col1 from pro_tblof_tbl_015; +loop +select c2 into tblof001(i) from pro_tblof_tbl_015 where c1=i; +if tblof001(i).c1%2=0 then +tblof001(i).c1=0; +else +tblof001(i).c1=1; +end if; +i=i+1; +if i>col1 then +exit; +end if; +end loop; +raise info 'tblof001 is %',tblof001; +raise info 'i is %',i; +end; +/ +set try_vector_engine_strategy='force'; +call pro_tblof_pro_015(6,6); +INFO: tblof001 is {"(1,\"char \",静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思,012345)","(0,\"char \",静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思,012345)","(1,\"char \",静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思,012345)","(0,\"char \",静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思,012345)","(1,\"char \",静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思静夜思,012345)"} +INFO: i is 6 + pro_tblof_pro_015 +------------------- + +(1 row) + +set try_vector_engine_strategy='off'; +drop schema force_vector_engine cascade; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to type pro_tblof_ty_015 +drop cascades to table pro_tblof_tbl_015 +drop cascades to type _pro_tblof_ty_015[] +drop cascades to function pro_tblof_pro_015(integer,integer) diff --git a/src/test/regress/expected/vector_subpartition.out b/src/test/regress/expected/vector_subpartition.out new file mode 100644 index 000000000..058d99ba3 --- /dev/null +++ b/src/test/regress/expected/vector_subpartition.out @@ -0,0 +1,235 @@ +DROP SCHEMA IF exists vector_subpartition; +NOTICE: schema "vector_subpartition" does not exist, skipping +CREATE SCHEMA vector_subpartition; +set current_schema=vector_subpartition; +set try_vector_engine_strategy=force; +CREATE TABLE IF NOT EXISTS range_range_02 +(col_1 int, col_2 int, col_3 int , col_4 int) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_2_3 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_3 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; +INSERT INTO range_range_02 VALUES (GENERATE_SERIES(-190, 1900),GENERATE_SERIES(-290, 1800),GENERATE_SERIES(-90, 2000)); +create unique index on range_range_02 (col_1,col_2 nulls first) where col_2 < 4; +create unique index on range_range_02 (col_1,col_2,col_3 nulls first) where col_2 < 4; +explain (costs off) select /*+ indexscan(range_range_02 + range_range_02_col_1_col_2_idx)*/ * from range_range_02 where col_2 in (select + col_1 from range_range_02 aa where col_1 >10 and col_1 <100) and col_1 +col_2 =50 + and col_2 < 4; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Row Adapter + -> Vector Nest Loop + -> Vector Sonic Hash Aggregate + Group By Key: aa.col_1 + -> Vector Partition Iterator + Iterations: 0 + -> Vector Adapter(type: BATCH MODE) + Filter: ((col_1 > 10) AND (col_1 < 100) AND (col_1 < 4)) + -> Partitioned Seq Scan on range_range_02 aa + Selected Partitions: NONE + -> Vector Partition Iterator + Iterations: 0 + -> Vector Adapter + -> Partitioned Index Scan using range_range_02_col_1_col_2_idx on range_range_02 + Index Cond: ((col_2 = $4) AND (col_2 < 4) AND (col_2 > 10) AND (col_2 < 100)) + Filter: ((col_1 + col_2) = 50) + Selected Partitions: NONE +(17 rows) + +CREATE TABLE list_list +( + col_1 int primary key, + col_2 int NOT NULL , + col_3 VARCHAR2 ( 30 ) NOT NULL , + col_4 int generated always as(2*col_2) stored , + check (col_4 >= col_2) +) with(FILLFACTOR=80) +PARTITION BY list (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( 0,-1,-2,-3,-4,-5,-6,-7,-8,-9 ), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_list_2 VALUES(0,1,2,3,4,5,6,7,8,9) + ( + SUBPARTITION p_list_2_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_2_2 VALUES ( default ), + SUBPARTITION p_list_2_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_2_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_2_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_3 VALUES(10,11,12,13,14,15,16,17,18,19) + ( + SUBPARTITION p_list_3_2 VALUES ( default ) + ), + PARTITION p_list_4 VALUES(default ), + PARTITION p_list_5 VALUES(20,21,22,23,24,25,26,27,28,29) + ( + SUBPARTITION p_list_5_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_5_2 VALUES ( default ), + SUBPARTITION p_list_5_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_5_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_5_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_6 VALUES(30,31,32,33,34,35,36,37,38,39), + PARTITION p_list_7 VALUES(40,41,42,43,44,45,46,47,48,49) + ( + SUBPARTITION p_list_7_1 VALUES ( default ) + ) +) ENABLE ROW MOVEMENT; +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "list_list_pkey" for table "list_list" +insert into list_list values(1,1,'aa'); +insert into list_list values(5,5,'bb'); +insert into list_list values(11,2,'cc'); +insert into list_list values(19,8,'dd'); +explain (costs off) select * from list_list; + QUERY PLAN +----------------------------------------------------- + Row Adapter + -> Vector Partition Iterator + Iterations: 7, Sub Iterations: 16 + -> Vector Adapter(type: BATCH MODE) + -> Partitioned Seq Scan on list_list + Selected Partitions: 1..7 + Selected Subpartitions: ALL +(7 rows) + +select * from list_list; + col_1 | col_2 | col_3 | col_4 +-------+-------+-------+------- + 11 | 2 | cc | 4 + 19 | 8 | dd | 16 + 1 | 1 | aa | 2 + 5 | 5 | bb | 10 +(4 rows) + +drop table if exists list_list_02; +NOTICE: table "list_list_02" does not exist, skipping +CREATE TABLE IF NOT EXISTS list_list_02 +( + col_1 int , + col_2 int DEFAULT 20 , + col_3 int , + col_4 int +) +PARTITION BY list (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( 0,-1,-2,-3,-4,-5,-6,-7,-8,-9 ), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_list_2 VALUES(0,1,2,3,4,5,6,7,8,9) + ( + SUBPARTITION p_list_2_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_2_2 VALUES ( default ), + SUBPARTITION p_list_2_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_2_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_2_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_3 VALUES(10,11,12,13,14,15,16,17,18,19) + ( + SUBPARTITION p_list_3_2 VALUES ( default ) + ), + PARTITION p_list_4 VALUES(default ), + PARTITION p_list_5 VALUES(20,21,22,23,24,25,26,27,28,29) + ( + SUBPARTITION p_list_5_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_5_2 VALUES ( default ), + SUBPARTITION p_list_5_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_5_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_5_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_6 VALUES(30,31,32,33,34,35,36,37,38,39), + PARTITION p_list_7 VALUES(40,41,42,43,44,45,46,47,48,49) + ( + SUBPARTITION p_list_7_1 VALUES ( default ) + ) +) ENABLE ROW MOVEMENT; +insert into list_list_02(col_1,col_3,col_4) values(1,1,1),(5,5,5); +select * from list_list_02; + col_1 | col_2 | col_3 | col_4 +-------+-------+-------+------- + 1 | 20 | 1 | 1 + 5 | 20 | 5 | 5 +(2 rows) + +set try_vector_engine_strategy=force; +select * from list_list_02 partition(p_list_2); + col_1 | col_2 | col_3 | col_4 +-------+-------+-------+------- + 1 | 20 | 1 | 1 + 5 | 20 | 5 | 5 +(2 rows) + +truncate list_list_02; +insert into list_list_02 values(0,0,0,0); +insert into list_list_02 values(-11,1,1,1),(-14,1,4,4),(-25,15,5,5),(-808,8,8,8),(-9,9,9,9); +insert into list_list_02 values(1,11,1,12),(4,41,4,48),(5,54,5,57),(8,87,8,84),(9,19,9,97); +insert into list_list_02 values(11,1,1,13),(14,1,4,49),(15,5,5,52),(18,8,8,81),(19,1,9,93); +create index index_01 on list_list_02(col_2 ASC ) local; +create index index_02 on list_list_02(col_2 DESC) local; +create index index_03 on list_list_02(col_2 NULLS first) local; +create index index_04 on list_list_02(col_2 NULLS LAST ) local; +explain (analyze on, timing off) select /*+ indexscan (list_list_02 index_01)*/ * from list_list_02 where col_2 in (select col_1 from list_list_02 aa where col_1 >10 and col_1<100) order by 2 asc limit 100; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Row Adapter (cost=51.68..51.68 rows=4 width=16) (actual rows=3 loops=1) + -> Vector Limit (cost=51.67..51.68 rows=4 width=16) (actual rows=3 loops=1) + -> Vector Sort (cost=51.67..51.68 rows=4 width=16) (actual rows=3 loops=1) + Sort Key: list_list_02.col_2 + Sort Method: quicksort Memory: 2kB + -> Vector Nest Loop (cost=36.66..51.63 rows=4 width=16) (actual rows=3 loops=1) + -> Vector Sonic Hash Aggregate (cost=36.66..36.68 rows=2 width=4) (actual rows=5 loops=1) + Group By Key: aa.col_1 + -> Vector Partition Iterator (cost=0.00..36.64 rows=9 width=4) (actual rows=5 loops=1) + Iterations: 7, Sub Iterations: 16 + -> Vector Adapter(type: BATCH MODE) (cost=36.64..36.64 rows=9 width=4) (actual rows=5 loops=16) + Filter: ((col_1 > 10) AND (col_1 < 100)) + Rows Removed by Filter: 11 + -> Partitioned Seq Scan on list_list_02 aa (cost=0.00..36.64 rows=9 width=4) (actual rows=16 loops=16) + Selected Partitions: 1..7 + Selected Subpartitions: ALL + -> Vector Partition Iterator (cost=0.00..7.39 rows=9 width=16) (actual rows=3 loops=5) + Iterations: 7, Sub Iterations: 16 + -> Vector Adapter (cost=7.39..7.39 rows=9 width=16) (actual rows=3 loops=80) + -> Partitioned Index Scan using index_01 on list_list_02 (cost=0.00..7.39 rows=9 width=16) (actual rows=3 loops=80) + Index Cond: ((col_2 = $4) AND (col_2 > 10) AND (col_2 < 100)) + Selected Partitions: 1..7 + Selected Subpartitions: ALL +--?.* +(24 rows) + +explain (analyze on, timing off) select * from list_list_02 where ctid='(0,2)'; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Row Adapter (cost=4.01..4.01 rows=1 width=16) (actual rows=4 loops=1) + -> Vector Partition Iterator (cost=0.00..4.01 rows=1 width=16) (actual rows=4 loops=1) + Iterations: 7, Sub Iterations: 16 + -> Vector Adapter (cost=4.01..4.01 rows=1 width=16) (actual rows=4 loops=16) + -> Partitioned Tid Scan on list_list_02 (cost=0.00..4.01 rows=1 width=16) (actual rows=4 loops=16) + TID Cond: (ctid = '(0,2)'::tid) + Selected Partitions: 1..7 + Selected Subpartitions: ALL +--?.* +(9 rows) + +set current_schema=public; +drop schema vector_subpartition cascade; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to table vector_subpartition.range_range_02 +drop cascades to table vector_subpartition.list_list +drop cascades to table vector_subpartition.list_list_02 diff --git a/src/test/regress/expected/wlm_memory_trace.out b/src/test/regress/expected/wlm_memory_trace.out new file mode 100644 index 000000000..d8c03049e --- /dev/null +++ b/src/test/regress/expected/wlm_memory_trace.out @@ -0,0 +1,41 @@ +\c postgres +select * from gs_get_shared_memctx_detail('CBBTopMemoryContext') limit 1; +--?.* +--?.* +--?.* +--?.* + +select * from gs_get_shared_memctx_detail('AbnormalContext'); + file | line | size +------+------+------ +(0 rows) + +select * from gs_get_shared_memctx_detail(NULL); +ERROR: NULL input for detoast datum packed +select * from gs_get_session_memctx_detail('CBBTopMemoryContext') limit 1; +--?.* +--?.* +--?.* +--?.* + +select * from gs_get_session_memctx_detail('AbnormalContext'); + file | line | size +------+------+------ +(0 rows) + +select * from gs_get_session_memctx_detail(NULL); +ERROR: NULL input for detoast datum packed +select * from gs_get_thread_memctx_detail(100, 'CBBTopMemoryContext'); +ERROR: can not find pid 100 + +select * from gs_get_thread_memctx_detail(100, NULL); +ERROR: NULL input for detoast datum packed +select gs_get_thread_memctx_detail(tid, 'CBBTopMemoryContext') from pv_thread_memory_context where contextname = 'CBBTopMemoryContext' limit 1; +--?.* +--?.* +--?.* +--?.* + +select gs_get_thread_memctx_detail(tid, NULL) from pv_thread_memory_context where contextname = 'CBBTopMemoryContext'; +--?.* +--?.* \ No newline at end of file diff --git a/src/test/regress/expected/xc_node.out b/src/test/regress/expected/xc_node.out index 8d19c8fdd..e01960a39 100644 --- a/src/test/regress/expected/xc_node.out +++ b/src/test/regress/expected/xc_node.out @@ -291,7 +291,7 @@ execute direct on (coordinator2) 'select node_name from pgxc_node order by node_ datanode9 (15 rows) -create table DTS2018120706312_t1 (id int, num int) distribute by replication; +create table TESTTABLE_t1 (id int, num int) distribute by replication; execute direct on (coordinator1) 'select * from pgxc_node_str()'; pgxc_node_str --------------- @@ -310,26 +310,26 @@ execute direct on (datanode1) 'select * from pgxc_node_str()'; datanode1 (1 row) -execute direct on (coordinator1) 'select * from DTS2018120706312_t1'; +execute direct on (coordinator1) 'select * from TESTTABLE_t1'; ERROR: EXECUTE DIRECT cannot execute SELECT query with normal table on coordinator -execute direct on (coordinator2) 'select * from DTS2018120706312_t1'; +execute direct on (coordinator2) 'select * from TESTTABLE_t1'; ERROR: EXECUTE DIRECT cannot execute SELECT query with normal table on coordinator -execute direct on (datanode1) 'select * from DTS2018120706312_t1'; +execute direct on (datanode1) 'select * from TESTTABLE_t1'; id | num ----+----- (0 rows) -execute direct on (coordinator1) 'execute direct on (coordinator1) ''select * from DTS2018120706312_t1'''; +execute direct on (coordinator1) 'execute direct on (coordinator1) ''select * from TESTTABLE_t1'''; ERROR: EXECUTE DIRECT cannot execute recursively -execute direct on (coordinator2) 'execute direct on (coordinator1) ''select * from DTS2018120706312_t1'''; +execute direct on (coordinator2) 'execute direct on (coordinator1) ''select * from TESTTABLE_t1'''; ERROR: EXECUTE DIRECT cannot execute recursively -execute direct on (datanode1) 'execute direct on (coordinator1) ''select * from DTS2018120706312_t1'''; +execute direct on (datanode1) 'execute direct on (coordinator1) ''select * from TESTTABLE_t1'''; ERROR: EXECUTE DIRECT cannot execute recursively -execute direct on (coordinator1) 'execute direct on (datanode1) ''select * from DTS2018120706312_t1'''; +execute direct on (coordinator1) 'execute direct on (datanode1) ''select * from TESTTABLE_t1'''; ERROR: EXECUTE DIRECT cannot execute recursively -execute direct on (coordinator2) 'execute direct on (datanode1) ''select * from DTS2018120706312_t1'''; +execute direct on (coordinator2) 'execute direct on (datanode1) ''select * from TESTTABLE_t1'''; ERROR: EXECUTE DIRECT cannot execute recursively -execute direct on (datanode1) 'execute direct on (datanode1) ''select * from DTS2018120706312_t1'''; +execute direct on (datanode1) 'execute direct on (datanode1) ''select * from TESTTABLE_t1'''; ERROR: EXECUTE DIRECT cannot execute recursively execute direct on (coordinator1)'select count(*) from gs_wlm_operator_info'; ERROR: relation "gs_wlm_operator_info" does not exist @@ -337,5 +337,5 @@ HINT: please use database "postgres" execute direct on (coordinator2)'select count(*) from gs_wlm_operator_info'; ERROR: relation "gs_wlm_operator_info" does not exist HINT: please use database "postgres" -drop table DTS2018120706312_t1; +drop table TESTTABLE_t1; execute direct on (datanode1) ''; diff --git a/src/test/regress/expected/xc_rownum.out b/src/test/regress/expected/xc_rownum.out index 3e0ffdb9e..4a3e81a1a 100755 --- a/src/test/regress/expected/xc_rownum.out +++ b/src/test/regress/expected/xc_rownum.out @@ -10,6 +10,36 @@ select oid as rownum from pg_class; ERROR: ROWNUM cannot be used as an alias LINE 1: select oid as rownum from pg_class; ^ +--test compat +drop table if exists tb_test; +NOTICE: table "tb_test" does not exist, skipping +create table tb_test(c1 int,c2 varchar2,c3 varchar2); +insert into tb_test values(1,'a','b'); +create or replace view v_test as select rownum from tb_test; +\d+ v_test + View "public.v_test" + Column | Type | Modifiers | Storage | Description +--------+--------+-----------+---------+------------- + rownum | bigint | | plain | +View definition: + SELECT ROWNUM AS "rownum" + FROM tb_test; + +set behavior_compat_options = 'rownum_type_compat'; +create or replace view v_test1 as select rownum from tb_test; +\d+ v_test1 + View "public.v_test1" + Column | Type | Modifiers | Storage | Description +--------+---------+-----------+---------+------------- + rownum | numeric | | main | +View definition: + SELECT ROWNUM AS "rownum" + FROM tb_test; + +set behavior_compat_options = ''; +drop view v_test; +drop view v_test1; +drop table tb_test; ------------------------------------ --test the basic function of rownum ------------------------------------ @@ -216,19 +246,19 @@ select rownum, name from (select name from distributors where rownum <= 4 inters select rownum from distributors group by rownum; rownum -------- - 2 - 1 - 4 3 + 1 + 2 + 4 (4 rows) select rownum rn from distributors group by rn; rn ---- - 2 - 1 - 4 3 + 1 + 2 + 4 (4 rows) --test having @@ -244,10 +274,10 @@ select id from distributors group by rownum,id having rownum < 5; select rownum from distributors group by rownum having rownum < 5; rownum -------- - 2 - 1 - 4 3 + 1 + 2 + 4 (4 rows) select id from distributors group by id having rownum < 5; @@ -456,483 +486,562 @@ insert into test values(10, 'test10'); -- operator '<' (with 'and') -- n > 1 explain select * from student where rownum < 5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < 5::numeric) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.08 rows=4 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum < 5 and id > 5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM < 5::numeric) AND (id > 5)) -(2 rows) + QUERY PLAN +----------------------------------------------------------------- + Limit (cost=0.00..0.26 rows=4 width=42) + -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id > 5) +(3 rows) explain select * from student where rownum < 5 and id > 5 and id < 9; - QUERY PLAN -------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=2 width=42) - Filter: ((ROWNUM < 5::numeric) AND (id > 5) AND (id < 9)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------- + Limit (cost=0.00..18.01 rows=4 width=42) + -> Seq Scan on student (cost=0.00..27.01 rows=6 width=42) + Filter: ((id > 5) AND (id < 9)) +(3 rows) explain select * from student where rownum < 5 and rownum < 6; - QUERY PLAN -------------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM < 5::numeric) AND (ROWNUM < 6::numeric)) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.08 rows=4 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum < 5 and rownum < 6 and rownum < 9; - QUERY PLAN ---------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=42 width=42) - Filter: ((ROWNUM < 5::numeric) AND (ROWNUM < 6::numeric) AND (ROWNUM < 9::numeric)) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.08 rows=4 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum < 5 and rownum < 6 and rownum < 9 and rownum < 12; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..32.68 rows=14 width=42) - Filter: ((ROWNUM < 5::numeric) AND (ROWNUM < 6::numeric) AND (ROWNUM < 9::numeric) AND (ROWNUM < 12::numeric)) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.08 rows=4 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) -- n <= 1 explain select * from student where rownum < 1; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < 1::numeric) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum < -5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < (-5)::numeric) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum < -5 and id > 5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM < (-5)::numeric) AND (id > 5)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum < -5 and id > 5 and id < 9; - QUERY PLAN ----------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=2 width=42) - Filter: ((ROWNUM < (-5)::numeric) AND (id > 5) AND (id < 9)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum < -5 and rownum < 6; - QUERY PLAN ----------------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM < (-5)::numeric) AND (ROWNUM < 6::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum < -5 and rownum < 6 and rownum < 9; - QUERY PLAN ------------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=42 width=42) - Filter: ((ROWNUM < (-5)::numeric) AND (ROWNUM < 6::numeric) AND (ROWNUM < 9::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum < -5 and rownum < 6 and rownum < 9 and rownum < 12; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..32.68 rows=14 width=42) - Filter: ((ROWNUM < (-5)::numeric) AND (ROWNUM < 6::numeric) AND (ROWNUM < 9::numeric) AND (ROWNUM < 12::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) -- operator '<=' (with 'and') -- n >= 1 explain select * from student where rownum <= 1; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM <= 1::numeric) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.02 rows=1 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum <= 5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM <= 5::numeric) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.09 rows=5 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum <= 5 and id > 5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM <= 5::numeric) AND (id > 5)) -(2 rows) + QUERY PLAN +----------------------------------------------------------------- + Limit (cost=0.00..0.32 rows=5 width=42) + -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id > 5) +(3 rows) explain select * from student where rownum <= 5 and id > 5 and id < 9; - QUERY PLAN --------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=2 width=42) - Filter: ((ROWNUM <= 5::numeric) AND (id > 5) AND (id < 9)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------- + Limit (cost=0.00..22.51 rows=5 width=42) + -> Seq Scan on student (cost=0.00..27.01 rows=6 width=42) + Filter: ((id > 5) AND (id < 9)) +(3 rows) explain select * from student where rownum <= 5 and rownum < 6; - QUERY PLAN --------------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM <= 5::numeric) AND (ROWNUM < 6::numeric)) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.09 rows=5 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum <= 5 and rownum < 6 and rownum < 9; - QUERY PLAN ----------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=42 width=42) - Filter: ((ROWNUM <= 5::numeric) AND (ROWNUM < 6::numeric) AND (ROWNUM < 9::numeric)) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.09 rows=5 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum <= 5 and rownum < 6 and rownum < 9 and rownum < 12; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..32.68 rows=14 width=42) - Filter: ((ROWNUM <= 5::numeric) AND (ROWNUM < 6::numeric) AND (ROWNUM < 9::numeric) AND (ROWNUM < 12::numeric)) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.09 rows=5 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) -- n < 1 explain select * from student where rownum <= -5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM <= (-5)::numeric) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum <= -5 and id > 5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM <= (-5)::numeric) AND (id > 5)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum <= -5 and id > 5 and id < 9; - QUERY PLAN ------------------------------------------------------------------ - Seq Scan on student (cost=0.00..29.85 rows=2 width=42) - Filter: ((ROWNUM <= (-5)::numeric) AND (id > 5) AND (id < 9)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum <= -5 and rownum < 6; - QUERY PLAN ------------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM <= (-5)::numeric) AND (ROWNUM < 6::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum <= -5 and rownum < 6 and rownum < 9; - QUERY PLAN -------------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=42 width=42) - Filter: ((ROWNUM <= (-5)::numeric) AND (ROWNUM < 6::numeric) AND (ROWNUM < 9::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum <= -5 and rownum < 6 and rownum < 9 and rownum < 12; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..32.68 rows=14 width=42) - Filter: ((ROWNUM <= (-5)::numeric) AND (ROWNUM < 6::numeric) AND (ROWNUM < 9::numeric) AND (ROWNUM < 12::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) -- operator '=' (with 'and') -- n = 1 explain select * from student where rownum = 1; - QUERY PLAN ---------------------------------------------------------- - Seq Scan on student (cost=0.00..24.18 rows=6 width=42) - Filter: (ROWNUM = 1::numeric) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.02 rows=1 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum = 1 and id > 5; - QUERY PLAN ---------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=2 width=42) - Filter: ((ROWNUM = 1::numeric) AND (id > 5)) -(2 rows) + QUERY PLAN +----------------------------------------------------------------- + Limit (cost=0.00..0.06 rows=1 width=42) + -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id > 5) +(3 rows) explain select * from student where rownum = 1 and rownum = 2 and id > 5; - QUERY PLAN --------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=1 width=42) - Filter: ((ROWNUM = 1::numeric) AND (ROWNUM = 2::numeric) AND (id > 5)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) -- n != 1 explain select * from student where rownum = 2; - QUERY PLAN ---------------------------------------------------------- - Seq Scan on student (cost=0.00..24.18 rows=6 width=42) - Filter: (ROWNUM = 2::numeric) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum = 2 and id > 5; - QUERY PLAN ---------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=2 width=42) - Filter: ((ROWNUM = 2::numeric) AND (id > 5)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) -- operator '!=' (with 'and') -- n = 1 explain select * from student where rownum != 1; - QUERY PLAN ------------------------------------------------------------- - Seq Scan on student (cost=0.00..24.18 rows=1128 width=42) - Filter: (ROWNUM <> 1::numeric) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum != 1 and id > 5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=376 width=42) - Filter: ((ROWNUM <> 1::numeric) AND (id > 5)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum != 1 and rownum != 2 and id > 5; - QUERY PLAN ----------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=374 width=42) - Filter: ((ROWNUM <> 1::numeric) AND (ROWNUM <> 2::numeric) AND (id > 5)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) -- n > 1 explain select * from student where rownum != 5; - QUERY PLAN ------------------------------------------------------------- - Seq Scan on student (cost=0.00..24.18 rows=1128 width=42) - Filter: (ROWNUM <> 5::numeric) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.08 rows=4 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum != 5 and id > 5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=376 width=42) - Filter: ((ROWNUM <> 5::numeric) AND (id > 5)) -(2 rows) + QUERY PLAN +----------------------------------------------------------------- + Limit (cost=0.00..0.26 rows=4 width=42) + -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id > 5) +(3 rows) explain select * from student where rownum != 5 and rownum != 8 and id > 5; - QUERY PLAN ----------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=374 width=42) - Filter: ((ROWNUM <> 5::numeric) AND (ROWNUM <> 8::numeric) AND (id > 5)) -(2 rows) + QUERY PLAN +----------------------------------------------------------------- + Limit (cost=0.00..0.26 rows=4 width=42) + -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id > 5) +(3 rows) -- n < 1 explain select * from student where rownum != -5; QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=1128 width=42) - Filter: (ROWNUM <> (-5)::numeric) -(2 rows) + Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(1 row) explain select * from student where rownum != -5 and id > 5; QUERY PLAN ----------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=376 width=42) - Filter: ((ROWNUM <> (-5)::numeric) AND (id > 5)) + Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id > 5) (2 rows) explain select * from student where rownum != -5 and rownum != -8 and id > 5; - QUERY PLAN ----------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=374 width=42) - Filter: ((ROWNUM <> (-5)::numeric) AND (ROWNUM <> (-8)::numeric) AND (id > 5)) + QUERY PLAN +----------------------------------------------------------- + Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id > 5) (2 rows) -- operator '>' (with 'and') -- n >= 1 explain select * from student where rownum > 1; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM > 1::numeric) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum > 5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM > 5::numeric) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum > 5 and id > 5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM > 5::numeric) AND (id > 5)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum > 5 and id > 5 and id < 9; - QUERY PLAN -------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=2 width=42) - Filter: ((ROWNUM > 5::numeric) AND (id > 5) AND (id < 9)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum > 5 and rownum > 6; - QUERY PLAN -------------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM > 5::numeric) AND (ROWNUM > 6::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum > 5 and rownum > 6 and rownum > 9; - QUERY PLAN ---------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=42 width=42) - Filter: ((ROWNUM > 5::numeric) AND (ROWNUM > 6::numeric) AND (ROWNUM > 9::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum > 5 and rownum < 6 and rownum < 9 and rownum < 12; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..32.68 rows=14 width=42) - Filter: ((ROWNUM > 5::numeric) AND (ROWNUM < 6::numeric) AND (ROWNUM < 9::numeric) AND (ROWNUM < 12::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) --n < 1 explain select * from student where rownum > -5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM > (-5)::numeric) -(2 rows) + QUERY PLAN +------------------------------------------------------------ + Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(1 row) explain select * from student where rownum > -5 and id > 5; QUERY PLAN ----------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM > (-5)::numeric) AND (id > 5)) + Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id > 5) (2 rows) explain select * from student where rownum > -5 and id > 5 and id < 9; - QUERY PLAN ----------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=2 width=42) - Filter: ((ROWNUM > (-5)::numeric) AND (id > 5) AND (id < 9)) + QUERY PLAN +--------------------------------------------------------- + Seq Scan on student (cost=0.00..27.01 rows=6 width=42) + Filter: ((id > 5) AND (id < 9)) (2 rows) explain select * from student where rownum > -5 and rownum > 6; - QUERY PLAN ----------------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM > (-5)::numeric) AND (ROWNUM > 6::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum > -5 and rownum > 6 and rownum < 9; - QUERY PLAN ------------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=42 width=42) - Filter: ((ROWNUM > (-5)::numeric) AND (ROWNUM > 6::numeric) AND (ROWNUM < 9::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum > -5 and rownum > 6 and rownum < 9 and rownum < 12; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..32.68 rows=14 width=42) - Filter: ((ROWNUM > (-5)::numeric) AND (ROWNUM > 6::numeric) AND (ROWNUM < 9::numeric) AND (ROWNUM < 12::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) -- operator '>=' (with 'and') -- n > 1 explain select * from student where rownum >= 5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM >= 5::numeric) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum >= 5 and id > 5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM >= 5::numeric) AND (id > 5)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum >= 5 and id > 5 and id < 9; - QUERY PLAN --------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=2 width=42) - Filter: ((ROWNUM >= 5::numeric) AND (id > 5) AND (id < 9)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum >= 5 and rownum > 6; - QUERY PLAN --------------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM >= 5::numeric) AND (ROWNUM > 6::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum >= 5 and rownum > 6 and rownum > 9; - QUERY PLAN ----------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=42 width=42) - Filter: ((ROWNUM >= 5::numeric) AND (ROWNUM > 6::numeric) AND (ROWNUM > 9::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum >= 5 and rownum < 6 and rownum < 9 and rownum < 12; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..32.68 rows=14 width=42) - Filter: ((ROWNUM >= 5::numeric) AND (ROWNUM < 6::numeric) AND (ROWNUM < 9::numeric) AND (ROWNUM < 12::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) -- n <= 1 explain select * from student where rownum >= 1; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM >= 1::numeric) -(2 rows) + QUERY PLAN +------------------------------------------------------------ + Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(1 row) explain select * from student where rownum >= -5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM >= (-5)::numeric) -(2 rows) + QUERY PLAN +------------------------------------------------------------ + Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(1 row) explain select * from student where rownum >= -5 and id > 5; QUERY PLAN ----------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM >= (-5)::numeric) AND (id > 5)) + Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id > 5) (2 rows) explain select * from student where rownum >= -5 and id > 5 and id < 9; - QUERY PLAN ------------------------------------------------------------------ - Seq Scan on student (cost=0.00..29.85 rows=2 width=42) - Filter: ((ROWNUM >= (-5)::numeric) AND (id > 5) AND (id < 9)) + QUERY PLAN +--------------------------------------------------------- + Seq Scan on student (cost=0.00..27.01 rows=6 width=42) + Filter: ((id > 5) AND (id < 9)) (2 rows) explain select * from student where rownum >= -5 and rownum > 6; - QUERY PLAN ------------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM >= (-5)::numeric) AND (ROWNUM > 6::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum >= -5 and rownum > 6 and rownum < 9; - QUERY PLAN -------------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=42 width=42) - Filter: ((ROWNUM >= (-5)::numeric) AND (ROWNUM > 6::numeric) AND (ROWNUM < 9::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum >= -5 and rownum > 6 and rownum < 9 and rownum < 12; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------- - Seq Scan on student (cost=0.00..32.68 rows=14 width=42) - Filter: ((ROWNUM >= (-5)::numeric) AND (ROWNUM > 6::numeric) AND (ROWNUM < 9::numeric) AND (ROWNUM < 12::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) -- operator '<' with 'or' -- n > 1 @@ -941,29 +1050,31 @@ explain select * from student where rownum < 5 or id > 5; QUERY PLAN ----------------------------------------------------------- Seq Scan on student (cost=0.00..27.01 rows=630 width=42) - Filter: ((ROWNUM < 5::numeric) OR (id > 5)) + Filter: ((ROWNUM < 5) OR (id > 5)) (2 rows) -- n <= 1 explain select * from student where rownum < -5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < (-5)::numeric) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum < -5 or id > 5; QUERY PLAN ----------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=630 width=42) - Filter: ((ROWNUM < (-5)::numeric) OR (id > 5)) + Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id > 5) (2 rows) explain select * from student where rownum < -5 or id > 5 or id < 9; - QUERY PLAN --------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=798 width=42) - Filter: ((ROWNUM < (-5)::numeric) OR (id > 5) OR (id < 9)) + QUERY PLAN +----------------------------------------------------------- + Seq Scan on student (cost=0.00..27.01 rows=630 width=42) + Filter: ((id > 5) OR (id < 9)) (2 rows) -- operator '<=' with 'or' @@ -973,29 +1084,31 @@ explain select * from student where rownum <= 5 or id > 5; QUERY PLAN ----------------------------------------------------------- Seq Scan on student (cost=0.00..27.01 rows=630 width=42) - Filter: ((ROWNUM <= 5::numeric) OR (id > 5)) + Filter: ((ROWNUM <= 5) OR (id > 5)) (2 rows) -- n < 1 explain select * from student where rownum <= -5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM <= (-5)::numeric) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum <= -5 or id > 5; QUERY PLAN ----------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=630 width=42) - Filter: ((ROWNUM <= (-5)::numeric) OR (id > 5)) + Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id > 5) (2 rows) explain select * from student where rownum <= -5 or id > 5 or id < 9; - QUERY PLAN ---------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=798 width=42) - Filter: ((ROWNUM <= (-5)::numeric) OR (id > 5) OR (id < 9)) + QUERY PLAN +----------------------------------------------------------- + Seq Scan on student (cost=0.00..27.01 rows=630 width=42) + Filter: ((id > 5) OR (id < 9)) (2 rows) -- operator '=' with 'or' @@ -1005,22 +1118,22 @@ explain select * from student where rownum = 5 or id > 5; QUERY PLAN ----------------------------------------------------------- Seq Scan on student (cost=0.00..27.01 rows=382 width=42) - Filter: ((ROWNUM = 5::numeric) OR (id > 5)) + Filter: ((ROWNUM = 5) OR (id > 5)) (2 rows) -- n <= 0 explain select * from student where rownum = 0 or id > 5; QUERY PLAN ----------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=382 width=42) - Filter: ((ROWNUM = 0::numeric) OR (id > 5)) + Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id > 5) (2 rows) explain select * from student where rownum = -1 or id > 5; QUERY PLAN ----------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=382 width=42) - Filter: ((ROWNUM = (-1)::numeric) OR (id > 5)) + Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id > 5) (2 rows) -- operator '!=' with 'or' @@ -1030,16 +1143,15 @@ explain select * from student where rownum != 6 or id > 5; QUERY PLAN ------------------------------------------------------------ Seq Scan on student (cost=0.00..27.01 rows=1130 width=42) - Filter: ((ROWNUM <> 6::numeric) OR (id > 5)) + Filter: ((ROWNUM <> 6) OR (id > 5)) (2 rows) -- n<1 explain select * from student where rownum != 0 or id > 5; QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=1130 width=42) - Filter: ((ROWNUM <> 0::numeric) OR (id > 5)) -(2 rows) + Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(1 row) -- operator '>' with 'or' -- n >= 1 @@ -1048,30 +1160,27 @@ explain select * from student where rownum > 5 or id > 5; QUERY PLAN ----------------------------------------------------------- Seq Scan on student (cost=0.00..27.01 rows=630 width=42) - Filter: ((ROWNUM > 5::numeric) OR (id > 5)) + Filter: ((ROWNUM > 5) OR (id > 5)) (2 rows) -- n < 1 explain select * from student where rownum > -5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM > (-5)::numeric) -(2 rows) + QUERY PLAN +------------------------------------------------------------ + Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(1 row) explain select * from student where rownum > -5 or id > 5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=630 width=42) - Filter: ((ROWNUM > (-5)::numeric) OR (id > 5)) -(2 rows) + QUERY PLAN +------------------------------------------------------------ + Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(1 row) explain select * from student where rownum > -5 or id > 5 or id < 9; - QUERY PLAN --------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=798 width=42) - Filter: ((ROWNUM > (-5)::numeric) OR (id > 5) OR (id < 9)) -(2 rows) + QUERY PLAN +------------------------------------------------------------ + Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(1 row) -- operator '>=' with 'or' -- n > 1 @@ -1080,470 +1189,705 @@ explain select * from student where rownum >= 5 or id > 5; QUERY PLAN ----------------------------------------------------------- Seq Scan on student (cost=0.00..27.01 rows=630 width=42) - Filter: ((ROWNUM >= 5::numeric) OR (id > 5)) + Filter: ((ROWNUM >= 5) OR (id > 5)) (2 rows) -- n <= 1 explain select * from student where rownum >= -5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM >= (-5)::numeric) -(2 rows) + QUERY PLAN +------------------------------------------------------------ + Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(1 row) explain select * from student where rownum >= -5 or id > 5; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=630 width=42) - Filter: ((ROWNUM >= (-5)::numeric) OR (id > 5)) -(2 rows) + QUERY PLAN +------------------------------------------------------------ + Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(1 row) explain select * from student where rownum >= -5 or id > 5 or id < 9; - QUERY PLAN ---------------------------------------------------------------- - Seq Scan on student (cost=0.00..29.85 rows=798 width=42) - Filter: ((ROWNUM >= (-5)::numeric) OR (id > 5) OR (id < 9)) -(2 rows) + QUERY PLAN +------------------------------------------------------------ + Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(1 row) -- limit explain select * from student where rownum < 5 limit 3; - QUERY PLAN ------------------------------------------------------------------ - Limit (cost=0.00..0.19 rows=3 width=42) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < 5::numeric) -(3 rows) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.06 rows=3 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(2 rows) explain select * from student where rownum < 3 limit 5; - QUERY PLAN ------------------------------------------------------------------ - Limit (cost=0.00..0.32 rows=5 width=42) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < 3::numeric) -(3 rows) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.04 rows=2 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(2 rows) explain select * from student where rownum <= 5 limit 3; - QUERY PLAN ------------------------------------------------------------------ - Limit (cost=0.00..0.19 rows=3 width=42) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM <= 5::numeric) -(3 rows) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.06 rows=3 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(2 rows) explain select * from student where rownum <= 3 limit 5; - QUERY PLAN ------------------------------------------------------------------ - Limit (cost=0.00..0.32 rows=5 width=42) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM <= 3::numeric) -(3 rows) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.06 rows=3 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(2 rows) -- subqueries explain select * from (select * from student where rownum < 5); - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < 5::numeric) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.08 rows=4 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from (select * from student where rownum < 5) where rownum < 9; - QUERY PLAN ------------------------------------------------------------------------------ - Subquery Scan on __unnamed_subquery__ (cost=0.00..28.90 rows=126 width=42) - Filter: (ROWNUM < 9::numeric) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < 5::numeric) -(4 rows) + QUERY PLAN +------------------------------------------------------------------------ + Limit (cost=0.00..0.12 rows=4 width=42) + -> Limit (cost=0.00..0.08 rows=4 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(3 rows) explain select * from (select * from student where rownum < 5 and id < 7); - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM < 5::numeric) AND (id < 7)) -(2 rows) + QUERY PLAN +----------------------------------------------------------------- + Limit (cost=0.00..0.26 rows=4 width=42) + -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id < 7) +(3 rows) explain select * from (select * from student where rownum < 3 and id < 10) where rownum < 5; - QUERY PLAN ----------------------------------------------------------------------------- - Subquery Scan on __unnamed_subquery__ (cost=0.00..28.58 rows=42 width=42) - Filter: (ROWNUM < 5::numeric) - -> Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM < 3::numeric) AND (id < 10)) + QUERY PLAN +----------------------------------------------------------------------- + Limit (cost=0.00..0.15 rows=2 width=42) + -> Limit (cost=0.00..0.13 rows=2 width=42) + -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id < 10) (4 rows) explain select * from (select * from student where rownum < 3 and id < 10) where rownum < 2 and stuname = 'stu1'; - QUERY PLAN --------------------------------------------------------------------------------------------- - Subquery Scan on __unnamed_subquery__ (cost=0.00..29.86 rows=1 width=42) - Filter: (ROWNUM < 2::numeric) - -> Seq Scan on student (cost=0.00..29.85 rows=1 width=42) - Filter: ((ROWNUM < 3::numeric) AND (id < 10) AND ((stuname)::text = 'stu1'::text)) + QUERY PLAN +------------------------------------------------------------------------ + Limit (cost=0.00..13.51 rows=1 width=42) + -> Limit (cost=0.00..27.01 rows=2 width=42) + -> Seq Scan on student (cost=0.00..27.01 rows=2 width=42) + Filter: ((id < 10) AND ((stuname)::text = 'stu1'::text)) (4 rows) --sublink explain select * from student where id in (select id from test where rownum < 4); - QUERY PLAN -------------------------------------------------------------------- - Hash Semi Join (cost=32.68..63.30 rows=567 width=42) - Hash Cond: (student.id = test.id) - -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) - -> Hash (cost=27.96..27.96 rows=378 width=4) - -> Seq Scan on test (cost=0.00..24.18 rows=378 width=4) - Filter: (ROWNUM < 4::numeric) -(6 rows) - -explain select * from student where id in (select id from test where rownum < 4) and rownum < 6; - QUERY PLAN ---------------------------------------------------------------- - Seq Scan on student (cost=25.12..52.13 rows=189 width=42) - Filter: ((hashed SubPlan 1) AND (ROWNUM < 6::numeric)) - SubPlan 1 - -> Seq Scan on test (cost=0.00..24.18 rows=378 width=4) - Filter: (ROWNUM < 4::numeric) -(5 rows) - -explain select * from student where id in (select id from test where rownum < 4) and stuname in (select stuname from student where rownum < 6); QUERY PLAN -------------------------------------------------------------------------------- - Hash Semi Join (cost=65.36..100.63 rows=284 width=42) - Hash Cond: ((public.student.stuname)::text = (public.student.stuname)::text) - -> Hash Semi Join (cost=32.68..63.30 rows=567 width=42) - Hash Cond: (public.student.id = test.id) - -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) - -> Hash (cost=27.96..27.96 rows=378 width=4) - -> Seq Scan on test (cost=0.00..24.18 rows=378 width=4) - Filter: (ROWNUM < 4::numeric) - -> Hash (cost=27.96..27.96 rows=378 width=38) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=38) - Filter: (ROWNUM < 6::numeric) -(11 rows) - -explain select * from student where id in (select id from test where rownum < 4 and id < 7); - QUERY PLAN -------------------------------------------------------------------- - Hash Semi Join (cost=29.84..60.18 rows=567 width=42) + Hash Join (cost=0.16..24.51 rows=567 width=42) Hash Cond: (student.id = test.id) -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) - -> Hash (cost=28.27..28.27 rows=126 width=4) - -> Seq Scan on test (cost=0.00..27.01 rows=126 width=4) - Filter: ((ROWNUM < 4::numeric) AND (id < 7)) + -> Hash (cost=0.12..0.12 rows=3 width=4) + -> HashAggregate (cost=0.09..0.12 rows=3 width=4) + Group By Key: test.id + -> Limit (cost=0.00..0.06 rows=3 width=4) + -> Seq Scan on test (cost=0.00..21.34 rows=1134 width=4) +(8 rows) + +explain select * from student where id in (select id from test where rownum < 4) and rownum < 6; + QUERY PLAN +---------------------------------------------------------------------------- + Limit (cost=0.06..0.28 rows=5 width=42) + -> Seq Scan on student (cost=0.06..24.24 rows=567 width=42) + Filter: (hashed SubPlan 1) + SubPlan 1 + -> Limit (cost=0.00..0.06 rows=3 width=4) + -> Seq Scan on test (cost=0.00..21.34 rows=1134 width=4) (6 rows) +explain select * from student where id in (select id from test where rownum < 4) and stuname in (select stuname from student where rownum < 6); + QUERY PLAN +------------------------------------------------------------------------------------------ + Hash Join (cost=0.43..26.34 rows=284 width=42) + Hash Cond: ((public.student.stuname)::text = ("ANY_subquery".stuname)::text) + -> Hash Join (cost=0.16..24.51 rows=567 width=42) + Hash Cond: (public.student.id = test.id) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) + -> Hash (cost=0.12..0.12 rows=3 width=4) + -> HashAggregate (cost=0.09..0.12 rows=3 width=4) + Group By Key: test.id + -> Limit (cost=0.00..0.06 rows=3 width=4) + -> Seq Scan on test (cost=0.00..21.34 rows=1134 width=4) + -> Hash (cost=0.21..0.21 rows=5 width=38) + -> HashAggregate (cost=0.16..0.21 rows=5 width=38) + Group By Key: ("ANY_subquery".stuname)::text + -> Subquery Scan on "ANY_subquery" (cost=0.00..0.14 rows=5 width=38) + -> Limit (cost=0.00..0.09 rows=5 width=38) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=38) +(16 rows) + +explain select * from student where id in (select id from test where rownum < 4 and id < 7); + QUERY PLAN +------------------------------------------------------------------------------- + Hash Join (cost=0.30..24.64 rows=567 width=42) + Hash Cond: (student.id = test.id) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) + -> Hash (cost=0.26..0.26 rows=3 width=4) + -> HashAggregate (cost=0.23..0.26 rows=3 width=4) + Group By Key: test.id + -> Limit (cost=0.00..0.19 rows=3 width=4) + -> Seq Scan on test (cost=0.00..24.18 rows=378 width=4) + Filter: (id < 7) +(9 rows) + explain select * from student where id in (select id from test where rownum < 4) and rownum < 6 and id > 3; - QUERY PLAN ------------------------------------------------------------------------ - Seq Scan on student (cost=25.12..54.97 rows=63 width=42) - Filter: ((hashed SubPlan 1) AND (ROWNUM < 6::numeric) AND (id > 3)) - SubPlan 1 - -> Seq Scan on test (cost=0.00..24.18 rows=378 width=4) - Filter: (ROWNUM < 4::numeric) -(5 rows) + QUERY PLAN +---------------------------------------------------------------------------- + Limit (cost=0.06..0.78 rows=5 width=42) + -> Seq Scan on student (cost=0.06..27.07 rows=189 width=42) + Filter: ((hashed SubPlan 1) AND (id > 3)) + SubPlan 1 + -> Limit (cost=0.00..0.06 rows=3 width=4) + -> Seq Scan on test (cost=0.00..21.34 rows=1134 width=4) +(6 rows) -- insert explain insert into test select * from student where rownum < 5; - QUERY PLAN ------------------------------------------------------------------ - Insert on test (cost=0.00..27.96 rows=378 width=42) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < 5::numeric) + QUERY PLAN +------------------------------------------------------------------------ + Insert on test (cost=0.00..0.12 rows=4 width=42) + -> Limit (cost=0.00..0.08 rows=4 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (3 rows) explain insert into test select * from student where rownum < 5 and id > 3; - QUERY PLAN ------------------------------------------------------------------ - Insert on test (cost=0.00..28.27 rows=126 width=42) - -> Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM < 5::numeric) AND (id > 3)) -(3 rows) + QUERY PLAN +----------------------------------------------------------------------- + Insert on test (cost=0.00..0.30 rows=4 width=42) + -> Limit (cost=0.00..0.26 rows=4 width=42) + -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) + Filter: (id > 3) +(4 rows) -- between explain select * from student where rownum between 1 and 5; - QUERY PLAN ---------------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM >= 1::numeric) AND (ROWNUM <= 5::numeric)) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.09 rows=5 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum between 2 and 8; - QUERY PLAN ---------------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM >= 2::numeric) AND (ROWNUM <= 8::numeric)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum between -5 and 8; QUERY PLAN ------------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM >= (-5)::numeric) AND (ROWNUM <= 8::numeric)) + Limit (cost=0.00..0.15 rows=8 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum between -5 and -2; QUERY PLAN --------------------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=126 width=42) - Filter: ((ROWNUM >= (-5)::numeric) AND (ROWNUM <= (-2)::numeric)) -(2 rows) + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) --update explain update student set id = 5 where rownum < 3; - QUERY PLAN ------------------------------------------------------------------ - Update on student (cost=0.00..24.18 rows=378 width=44) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=44) - Filter: (ROWNUM < 3::numeric) + QUERY PLAN +------------------------------------------------------------------------ + Update on student (cost=0.00..0.04 rows=2 width=44) + -> Limit (cost=0.00..0.04 rows=2 width=44) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=44) (3 rows) explain update student set id = 5 where rownum < 3 and rownum < 5; - QUERY PLAN -------------------------------------------------------------------- - Update on student (cost=0.00..27.01 rows=126 width=44) - -> Seq Scan on student (cost=0.00..27.01 rows=126 width=44) - Filter: ((ROWNUM < 3::numeric) AND (ROWNUM < 5::numeric)) + QUERY PLAN +------------------------------------------------------------------------ + Update on student (cost=0.00..0.04 rows=2 width=44) + -> Limit (cost=0.00..0.04 rows=2 width=44) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=44) (3 rows) explain update student set id = 5 where rownum > 3; - QUERY PLAN ------------------------------------------------------------------ - Update on student (cost=0.00..24.18 rows=378 width=44) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=44) - Filter: (ROWNUM > 3::numeric) -(3 rows) + QUERY PLAN +--------------------------------------------------------------------------- + Update on student (cost=0.00..21.34 rows=1 width=44) + -> Limit (cost=0.00..21.34 rows=1 width=44) + -> Result (cost=0.00..21.34 rows=1 width=44) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=44) +(5 rows) --delete explain delete from student where rownum < 3; - QUERY PLAN ----------------------------------------------------------------- - Delete on student (cost=0.00..24.18 rows=378 width=6) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=6) - Filter: (ROWNUM < 3::numeric) + QUERY PLAN +----------------------------------------------------------------------- + Delete on student (cost=0.00..0.04 rows=2 width=6) + -> Limit (cost=0.00..0.04 rows=2 width=6) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=6) (3 rows) explain delete from student where rownum < 3 and rownum < 5; - QUERY PLAN -------------------------------------------------------------------- - Delete on student (cost=0.00..27.01 rows=126 width=6) - -> Seq Scan on student (cost=0.00..27.01 rows=126 width=6) - Filter: ((ROWNUM < 3::numeric) AND (ROWNUM < 5::numeric)) + QUERY PLAN +----------------------------------------------------------------------- + Delete on student (cost=0.00..0.04 rows=2 width=6) + -> Limit (cost=0.00..0.04 rows=2 width=6) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=6) (3 rows) explain delete from student where rownum > 3; - QUERY PLAN ----------------------------------------------------------------- - Delete on student (cost=0.00..24.18 rows=378 width=6) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=6) - Filter: (ROWNUM > 3::numeric) -(3 rows) + QUERY PLAN +-------------------------------------------------------------------------- + Delete on student (cost=0.00..21.34 rows=1 width=6) + -> Limit (cost=0.00..21.34 rows=1 width=6) + -> Result (cost=0.00..21.34 rows=1 width=6) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=6) +(5 rows) -- have not been optimized yet explain select * from student where rownum < 6.5; QUERY PLAN ----------------------------------------------------------- - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < 6.5) + Seq Scan on student (cost=0.00..27.01 rows=378 width=42) + Filter: ((ROWNUM)::numeric < 6.5) (2 rows) explain select * from student where rownum <= 6.5; QUERY PLAN ----------------------------------------------------------- - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM <= 6.5) + Seq Scan on student (cost=0.00..27.01 rows=378 width=42) + Filter: ((ROWNUM)::numeric <= 6.5) (2 rows) explain select * from student where rownum = 6.5; QUERY PLAN --------------------------------------------------------- - Seq Scan on student (cost=0.00..24.18 rows=6 width=42) - Filter: (ROWNUM = 6.5) + Seq Scan on student (cost=0.00..27.01 rows=6 width=42) + Filter: ((ROWNUM)::numeric = 6.5) (2 rows) explain select * from student where rownum != 6.5; QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=1128 width=42) - Filter: (ROWNUM <> 6.5) + Seq Scan on student (cost=0.00..27.01 rows=1128 width=42) + Filter: ((ROWNUM)::numeric <> 6.5) (2 rows) explain select * from student where rownum > 6.5; QUERY PLAN ----------------------------------------------------------- - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM > 6.5) + Seq Scan on student (cost=0.00..27.01 rows=378 width=42) + Filter: ((ROWNUM)::numeric > 6.5) (2 rows) explain select * from student where rownum >= 6.5; QUERY PLAN ----------------------------------------------------------- - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM >= 6.5) + Seq Scan on student (cost=0.00..27.01 rows=378 width=42) + Filter: ((ROWNUM)::numeric >= 6.5) (2 rows) -explain delete from student where 3 > rownum; - QUERY PLAN ----------------------------------------------------------------- - Delete on student (cost=0.00..24.18 rows=378 width=6) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=6) - Filter: (3::numeric > ROWNUM) +-- optimize rownum to limit +-- rownum bigint to numeric +select rownum from student where rownum < 6.4; + rownum +-------- + 1 + 2 + 3 + 4 + 5 + 6 +(6 rows) + +select rownum from student where rownum < 6.5; + rownum +-------- + 1 + 2 + 3 + 4 + 5 + 6 +(6 rows) + +select rownum from student where rownum <= 6.4; + rownum +-------- + 1 + 2 + 3 + 4 + 5 + 6 +(6 rows) + +select rownum from student where rownum <= 6.5; + rownum +-------- + 1 + 2 + 3 + 4 + 5 + 6 +(6 rows) + +select rownum from student where rownum > 0.5; + rownum +-------- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +select rownum from student where rownum > 1.5; + rownum +-------- +(0 rows) + +select rownum from student where rownum >= 0.5; + rownum +-------- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +select rownum from student where rownum >= 1.5; + rownum +-------- +(0 rows) + +set behavior_compat_options = 'rownum_type_compat'; +explain (costs off) select * from student where rownum < 6.5; + QUERY PLAN +--------------------------- + Limit + -> Seq Scan on student +(2 rows) + +explain (costs off) select * from student where rownum <= 6.5; + QUERY PLAN +--------------------------- + Limit + -> Seq Scan on student +(2 rows) + +select rownum from student where rownum < 6.4; + rownum +-------- + 1 + 2 + 3 + 4 + 5 + 6 +(6 rows) + +select rownum from student where rownum < 6.5; + rownum +-------- + 1 + 2 + 3 + 4 + 5 + 6 +(6 rows) + +select rownum from student where rownum <= 6.4; + rownum +-------- + 1 + 2 + 3 + 4 + 5 + 6 +(6 rows) + +select rownum from student where rownum <= 6.5; + rownum +-------- + 1 + 2 + 3 + 4 + 5 + 6 +(6 rows) + +explain (costs off) select * from student where rownum > 6.5; + QUERY PLAN +--------------------------------- + Limit + -> Result + One-Time Filter: false + -> Seq Scan on student +(4 rows) + +explain (costs off) select * from student where rownum >= 6.5; + QUERY PLAN +--------------------------------- + Limit + -> Result + One-Time Filter: false + -> Seq Scan on student +(4 rows) + +select rownum from student where rownum > 0.5; + rownum +-------- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +select rownum from student where rownum > 1.5; + rownum +-------- +(0 rows) + +select rownum from student where rownum >= 0.5; + rownum +-------- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +select rownum from student where rownum >= 1.5; + rownum +-------- +(0 rows) + +explain (costs off) select * from student where rownum = 6.5; + QUERY PLAN +-------------------------- + Seq Scan on student + Filter: (ROWNUM = 6.5) +(2 rows) + +explain (costs off) select * from student where rownum != 6.5; + QUERY PLAN +--------------------------- + Seq Scan on student + Filter: (ROWNUM <> 6.5) +(2 rows) + +-- reset +set behavior_compat_options = ''; +explain (costs off) delete from student where 3 > rownum; + QUERY PLAN +------------------------------ + Delete on student + -> Seq Scan on student + Filter: (3 > ROWNUM) (3 rows) -explain delete from student where 3 < rownum; - QUERY PLAN ----------------------------------------------------------------- - Delete on student (cost=0.00..24.18 rows=378 width=6) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=6) - Filter: (3::numeric < ROWNUM) +explain (costs off) delete from student where 3 < rownum; + QUERY PLAN +------------------------------ + Delete on student + -> Seq Scan on student + Filter: (3 < ROWNUM) (3 rows) explain delete from student where rownum < 5 or rownum < 6; - QUERY PLAN ------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------- Delete on student (cost=0.00..27.01 rows=630 width=6) -> Seq Scan on student (cost=0.00..27.01 rows=630 width=6) - Filter: ((ROWNUM < 5::numeric) OR (ROWNUM < 6::numeric)) + Filter: ((ROWNUM < 5) OR (ROWNUM < 6)) (3 rows) explain delete from student where rownum > 5 or rownum > 6; - QUERY PLAN ------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------- Delete on student (cost=0.00..27.01 rows=630 width=6) -> Seq Scan on student (cost=0.00..27.01 rows=630 width=6) - Filter: ((ROWNUM > 5::numeric) OR (ROWNUM > 6::numeric)) + Filter: ((ROWNUM > 5) OR (ROWNUM > 6)) (3 rows) -- ROWNUM with type cast explain select * from student where rownum < 3::bigint; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < 3::numeric) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.04 rows=2 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum < 3::int4; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < 3::numeric) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.04 rows=2 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum < 3::int2; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < 3::numeric) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.04 rows=2 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum < 3::int1; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < 3::numeric) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.04 rows=2 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) -- ROWNUM with LIMIT ALL explain select * from student where rownum <= 3 limit all; - QUERY PLAN ------------------------------------------------------------------ - Limit (cost=0.00..24.18 rows=378 width=42) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM <= 3::numeric) -(3 rows) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.06 rows=3 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(2 rows) explain select * from student where rownum <= 18 limit 3.14; - QUERY PLAN ------------------------------------------------------------------ - Limit (cost=0.00..0.19 rows=3 width=42) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM <= 18::numeric) -(3 rows) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.06 rows=3 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(2 rows) -- ROWNUM with constant expression explain select * from student where rownum > 3 + 2; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM > 5::numeric) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where rownum < 3 + 2; - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < 5::numeric) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.08 rows=4 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum < 9 + (-1 * 5); - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < 4::numeric) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.06 rows=3 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where rownum <= 9 + (-1 * 5) and id = 4; - QUERY PLAN ---------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=2 width=42) - Filter: ((ROWNUM <= 4::numeric) AND (id = 4)) -(2 rows) + QUERY PLAN +--------------------------------------------------------------- + Limit (cost=0.00..16.12 rows=4 width=42) + -> Seq Scan on student (cost=0.00..24.18 rows=6 width=42) + Filter: (id = 4) +(3 rows) explain select * from student where rownum > -3 + 100 or id = 4; QUERY PLAN ----------------------------------------------------------- Seq Scan on student (cost=0.00..27.01 rows=382 width=42) - Filter: ((ROWNUM > 97::numeric) OR (id = 4)) + Filter: ((ROWNUM > 97) OR (id = 4)) (2 rows) explain select * from student where rownum > -3 + 100.1 or id = 4; -- not optimized QUERY PLAN ----------------------------------------------------------- - Seq Scan on student (cost=0.00..27.01 rows=382 width=42) - Filter: ((ROWNUM > 97.1) OR (id = 4)) + Seq Scan on student (cost=0.00..29.85 rows=382 width=42) + Filter: (((ROWNUM)::numeric > 97.1) OR (id = 4)) (2 rows) explain select * from student where rownum < -2 and id = (select id from student where rownum = 1); - QUERY PLAN ----------------------------------------------------------------- - Seq Scan on student (cost=24.18..51.19 rows=2 width=42) - Filter: ((ROWNUM < (-2)::numeric) AND (id = $0)) - InitPlan 1 (returns $0) - -> Seq Scan on student (cost=0.00..24.18 rows=6 width=4) - Filter: (ROWNUM = 1::numeric) -(5 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) -- ROWNUM and NOT expression explain select * from student where not(rownum < -2); - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM >= (-2)::numeric) -(2 rows) + QUERY PLAN +------------------------------------------------------------ + Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) +(1 row) explain select * from student where not(rownum > 3); - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM <= 3::numeric) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.06 rows=3 width=42) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=42) (2 rows) explain select * from student where not(rownum < 3 + 2); - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM >= 5::numeric) -(2 rows) + QUERY PLAN +--------------------------------------------------------------------- + Limit (cost=0.00..21.34 rows=1 width=42) + -> Result (cost=0.00..21.34 rows=1 width=42) + One-Time Filter: false + -> Seq Scan on student (cost=0.00..21.34 rows=1 width=42) +(4 rows) explain select * from student where not(rownum < 3 and id = 1); QUERY PLAN ------------------------------------------------------------ Seq Scan on student (cost=0.00..27.01 rows=1126 width=42) - Filter: ((ROWNUM >= 3::numeric) OR (id <> 1)) + Filter: ((ROWNUM >= 3) OR (id <> 1)) (2 rows) explain select * from student where not(rownum > 3 or id = 1); - QUERY PLAN ------------------------------------------------------------ - Seq Scan on student (cost=0.00..27.01 rows=374 width=42) - Filter: ((ROWNUM <= 3::numeric) AND (id <> 1)) -(2 rows) + QUERY PLAN +------------------------------------------------------------------ + Limit (cost=0.00..0.06 rows=3 width=42) + -> Seq Scan on student (cost=0.00..24.18 rows=1123 width=42) + Filter: (id <> 1) +(3 rows) -- ROWNUM with ORDER BY explain select * from test where rownum < 5 order by 1; @@ -1552,7 +1896,7 @@ explain select * from test where rownum < 5 order by 1; Sort (cost=40.36..41.30 rows=378 width=42) Sort Key: id -> Seq Scan on test (cost=0.00..24.18 rows=378 width=42) - Filter: (ROWNUM < 5::numeric) + Filter: (ROWNUM < 5) (4 rows) -- ROWNUM with GROUP BY @@ -1562,35 +1906,137 @@ explain select id from test where rownum < 5 group by id; HashAggregate (cost=25.12..25.79 rows=67 width=4) Group By Key: id -> Seq Scan on test (cost=0.00..24.18 rows=378 width=4) - Filter: (ROWNUM < 5::numeric) + Filter: (ROWNUM < 5) (4 rows) -- ROWNUM with UNION and ORDER BY explain select id from student where rownum < 3 union select id from (select id from student order by 1) where rownum < 5; - QUERY PLAN ------------------------------------------------------------------------------------------ - HashAggregate (cost=129.51..137.07 rows=756 width=4) + QUERY PLAN +----------------------------------------------------------------------------------- + HashAggregate (cost=79.04..79.10 rows=6 width=4) Group By Key: public.student.id - -> Append (cost=0.00..127.62 rows=756 width=4) - -> Seq Scan on student (cost=0.00..24.18 rows=378 width=4) - Filter: (ROWNUM < 3::numeric) - -> Subquery Scan on __unnamed_subquery__ (cost=78.87..95.88 rows=378 width=4) - Filter: (ROWNUM < 5::numeric) + -> Append (cost=0.00..79.02 rows=6 width=4) + -> Limit (cost=0.00..0.04 rows=2 width=4) + -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=4) + -> Limit (cost=78.87..78.92 rows=4 width=4) -> Sort (cost=78.87..81.71 rows=1134 width=4) Sort Key: public.student.id -> Seq Scan on student (cost=0.00..21.34 rows=1134 width=4) -(10 rows) +(9 rows) select * from test where id < 2 union select * from (select * from test order by id desc) where rownum < 5; id | testchar ----+---------- 8 | test8 7 | test7 + 9 | test9 1 | test1 10 | test10 - 9 | test9 (5 rows) +-- ROWNUM for Column-Oriented +create table student_cstore1(id int, stuname varchar(10) ) WITH (orientation=column) ; +create table student_cstore2(id int, stuname varchar(10) ) WITH (orientation=column) ; +insert into student_cstore1 select * from student; +-- test rownum for cstorescan +select * from student_cstore1 where rownum < 5; + id | stuname +----+--------- + 1 | stu1 + 2 | stu2 + 3 | stu3 + 4 | stu4 +(4 rows) + +select rownum, * from student_cstore1 where rownum < 1; + rownum | id | stuname +--------+----+--------- +(0 rows) + +select rownum, * from student_cstore1 where rownum <= 1; + rownum | id | stuname +--------+----+--------- + 1 | 1 | stu1 +(1 row) + +select rownum, * from student_cstore1 where rownum <= 10; + rownum | id | stuname +--------+----+--------- + 1 | 1 | stu1 + 2 | 2 | stu2 + 3 | 3 | stu3 + 4 | 4 | stu4 + 5 | 5 | stu5 + 6 | 6 | stu6 + 7 | 7 | stu7 + 8 | 8 | stu8 + 9 | 9 | stu9 + 10 | 10 | stu10 +(10 rows) + +select rownum, * from student_cstore1 where stuname = 'stu5' and rownum < 4; + rownum | id | stuname +--------+----+--------- + 1 | 5 | stu5 +(1 row) + +select rownum, stuname from student_cstore1 where stuname = 'stu5' or rownum < 8; + rownum | stuname +--------+--------- + 1 | stu1 + 2 | stu2 + 3 | stu3 + 4 | stu4 + 5 | stu5 + 6 | stu6 + 7 | stu7 +(7 rows) + +-- test rownum for join +insert into student_cstore2 select * from student; +select * from student_cstore2 where rownum > 2; + id | stuname +----+--------- +(0 rows) + +select * from student_cstore2 where rownum = 2; + id | stuname +----+--------- +(0 rows) + +select rownum, sc1.stuname, sc2.id from student_cstore2 as sc1, student_cstore2 as sc2 where sc1.id = sc2.id; + rownum | stuname | id +--------+---------+---- + 1 | stu1 | 1 + 2 | stu2 | 2 + 3 | stu3 | 3 + 4 | stu4 | 4 + 5 | stu5 | 5 + 6 | stu6 | 6 + 7 | stu7 | 7 + 8 | stu8 | 8 + 9 | stu9 | 9 + 10 | stu10 | 10 +(10 rows) + +-- test rownum for agg +select * from (select rownum, max(id) as max_id from student_cstore1 group by rownum) as t order by max_id; + rownum | max_id +--------+-------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 + 10 | 10 +(10 rows) + +drop table student_cstore1; +drop table student_cstore2; drop table student; drop table test; --test partition table diff --git a/src/test/regress/input/bulkload_compatibility_test.source b/src/test/regress/input/bulkload_compatibility_test.source index 690a29c17..d30cac081 100644 --- a/src/test/regress/input/bulkload_compatibility_test.source +++ b/src/test/regress/input/bulkload_compatibility_test.source @@ -111,7 +111,7 @@ CREATE FOREIGN TABLE EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE C_NULL BIGINT POSITION(30,0), C_VARCHAR VARCHAR(50) POSITION(30,30), C_NUMERIC NUMERIC(20,5) POSITION(60,10) -)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_DTS2016060600832.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); +)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_TESTTABLE.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); SELECT * FROM EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; DROP FOREIGN TABLE EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; DROP TABLE COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; @@ -136,7 +136,7 @@ CREATE FOREIGN TABLE EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE C_CLOB CLOB POSITION(102,29), C_NUMERIC NUMERIC(20,5) POSITION(131,14), C_DP DOUBLE PRECISION POSITION(145,14) -)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_DTS2016060600832_all.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); +)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_TESTTABLE_all.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); SELECT COUNT(*) FROM EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; INSERT INTO COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE SELECT * FROM EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; SELECT COUNT(*) FROM COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; diff --git a/src/test/regress/input/bulkload_compatibility_test_part1.source b/src/test/regress/input/bulkload_compatibility_test_part1.source index ba4b5b1eb..231b6086f 100644 --- a/src/test/regress/input/bulkload_compatibility_test_part1.source +++ b/src/test/regress/input/bulkload_compatibility_test_part1.source @@ -111,7 +111,7 @@ CREATE FOREIGN TABLE EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE C_NULL BIGINT POSITION(30,0), C_VARCHAR VARCHAR(50) POSITION(30,30), C_NUMERIC NUMERIC(20,5) POSITION(60,10) -)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_DTS2016060600832.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); +)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_TESTTABLE.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); SELECT * FROM EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; DROP FOREIGN TABLE EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; DROP TABLE COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; @@ -136,7 +136,7 @@ CREATE FOREIGN TABLE EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE C_CLOB CLOB POSITION(102,29), C_NUMERIC NUMERIC(20,5) POSITION(131,14), C_DP DOUBLE PRECISION POSITION(145,14) -)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_DTS2016060600832_all.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); +)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_TESTTABLE_all.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); SELECT COUNT(*) FROM EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; INSERT INTO COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE SELECT * FROM EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; SELECT COUNT(*) FROM COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; diff --git a/src/test/regress/input/ce_alteruser.source b/src/test/regress/input/ce_alteruser.source new file mode 100644 index 000000000..1fd490769 --- /dev/null +++ b/src/test/regress/input/ce_alteruser.source @@ -0,0 +1,21 @@ +\! gs_ktool -d all +\! gs_ktool -g +DROP CLIENT MASTER KEY IF EXISTS CMK1 CASCADE; +DROP USER IF EXISTS ce_user1; +CREATE USER ce_user1 PASSWORD 'gauss@123'; +SET SESSION AUTHORIZATION ce_user1 PASSWORD 'gauss@123'; +CREATE CLIENT MASTER KEY CMK1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY CEK1 WITH VALUES (CLIENT_MASTER_KEY = CMK1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE t_alteruser (c1 int, c2 text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = CEK1, ENCRYPTION_TYPE = DETERMINISTIC)); +INSERT INTO t_alteruser VALUES (1,'1'), (2,'2'), (3,'3'), (4,'4'), (5,'5'); +RESET SESSION AUTHORIZATION; +ALTER USER ce_user1 PASSWORD 'gauss@1234' EXPIRED; +ALTER USER ce_user1 identified by 'gauss@1235' replace 'gauss@1234'; +\! @abs_bindir@/gsql -r -p @portstring@ -d regression -C -U ce_user1 -W 'gauss@1235' -c "select * from t_alteruser order by c1;" +SET SESSION AUTHORIZATION ce_user1 PASSWORD 'gauss@1235'; +DROP TABLE t_alteruser; +DROP COLUMN ENCRYPTION KEY CEK1; +DROP CLIENT MASTER KEY CMK1; +RESET SESSION AUTHORIZATION; +DROP USER IF EXISTS ce_user1; +\! gs_ktool -d all diff --git a/src/test/regress/input/ce_copy.source b/src/test/regress/input/ce_copy.source new file mode 100644 index 000000000..c27488a9a --- /dev/null +++ b/src/test/regress/input/ce_copy.source @@ -0,0 +1,72 @@ +\! gs_ktool -d all +\! gs_ktool -g + +DROP CLIENT MASTER KEY IF EXISTS copyCMK CASCADE; +CREATE CLIENT MASTER KEY copyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY copyCEK WITH VALUES (CLIENT_MASTER_KEY = copyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS CopyFromTbl(i0 INT, i1 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = copyCEK, ENCRYPTION_TYPE = DETERMINISTIC) , i2 INT); +COPY copyfromtbl FROM stdin; +5 10 7 +20 20 8 +30 10 12 +50 35 12 +80 15 23 +\. +-- fail error +COPY copyfromtbl FROM stdin; +1 2 3 4 +\. + +SELECT * FROM CopyFromTbl ORDER BY i0; + +COPY copyfromtbl (i0, i1,i2) FROM stdin; +5 10 7 +20 20 8 +30 10 12 +50 35 12 +80 15 23 +\. +SELECT * FROM CopyFromTbl ORDER BY i0; + +-- false +COPY copyfromtbl (i0, i1,i2) FROM stdin; +5 12 7\ +\. + +COPY copyfromtbl (i0, i1,i2) FROM stdin; +5 12\ 7 +\. + +\copy copyfromtbl (i0, i1,i2) FROM stdin; +5 12 7\ +\. + +\copy copyfromtbl (i0, i1,i2) FROM stdin; +5 12\ 7 +\. + +\copy CopyFromTbl FROM '@abs_srcdir@/data/ce_copy_from.csv' WITH DELIMITER ',' CSV HEADER; +SELECT * FROM CopyFromTbl ORDER BY i0; +\copy (SELECT * FROM CopyFromTbl ORDER BY i2) TO '@abs_srcdir@/data/ce_copy_to.csv' WITH DELIMITER ',' CSV HEADER; + +copy CopyFromTbl FROM '@abs_srcdir@/data/ce_copy_from.csv' WITH DELIMITER ',' CSV HEADER; +copy CopyFromTbl (i0, i1,i2) FROM '@abs_srcdir@/data/ce_copy_from.csv' WITH DELIMITER ',' CSV HEADER; +copy CopyFromTbl TO '@abs_srcdir@/data/ce_copy_to.csv' WITH DELIMITER ',' CSV HEADER; +copy (SELECT * FROM CopyFromTbl ORDER BY i2) TO '@abs_srcdir@/data/ce_copy_to.csv' WITH DELIMITER ',' CSV HEADER; + +CREATE TABLE IF NOT EXISTS CopyTOTbl(i0 INT, i1 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=copyCEK, ENCRYPTION_TYPE = DETERMINISTIC) , i2 INT); +\copy CopyToTbl FROM '@abs_srcdir@/data/ce_copy_to.csv' WITH DELIMITER ',' CSV HEADER; +SELECT * FROM CopyToTbl ORDER BY i0; +COPY (SELECT * FROM CopyFromTbl ORDER BY i0) TO stdout; + +DROP TABLE IF EXISTS encrypted_tb; +create table encrypted_tb( inv_date_sk integer not null,inv_item_sk integer not null, inv_warehouse_sk integer encrypted with (column_encryption_key = copyCEK, encryption_type = DETERMINISTIC)); +\copy encrypted_tb FROM '@abs_srcdir@/data/ce_copy_from.csv' WITH (delimiter',',IGNORE_EXTRA_DATA 'on'); +select * FROM encrypted_tb ORDER BY inv_date_sk; + +DROP TABLE IF EXISTS encrypted_tb; +DROP TABLE IF EXISTS CopyFromTbl; +DROP TABLE IF EXISTS CopyToTbl; +DROP CLIENT MASTER KEY copyCMK CASCADE; + +\! gs_ktool -d all \ No newline at end of file diff --git a/src/test/regress/input/ce_create_jdbc.source b/src/test/regress/input/ce_create_jdbc.source new file mode 100644 index 000000000..95ec96ba8 --- /dev/null +++ b/src/test/regress/input/ce_create_jdbc.source @@ -0,0 +1,39 @@ +DROP USER IF EXISTS test CASCADE; +CREATE USER test WITH CREATEDB PASSWORD "Gauss@123"; +SET ROLE test PASSWORD 'Gauss@123'; +\! gs_ktool -d all +\! gs_ktool -g +\! gs_ktool -g + +\! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/jdbc_client_lib/gsjdbc4.jar:@abs_builddir@/jdbc_ce_test/ce_ddl_pbe/. ClientEncryptionCreateTest @portstring@ > /dev/null 2>&1 +INSERT INTO ce_t1 VALUES(1); +INSERT INTO ce_t2 VALUES(1); +INSERT INTO ce_t3 VALUES(1); +INSERT INTO ce_t4 VALUES(1); +INSERT INTO ce_t5 VALUES(1); +INSERT INTO ce_t6 VALUES(1); +SELECT * FROM ce_t1; +SELECT * FROM ce_t2; +SELECT * FROM ce_t3; +SELECT * FROM ce_t4; +SELECT * FROM ce_t5; +SELECT * FROM ce_t6; +DROP TABLE IF EXISTS ce_t1; +DROP TABLE IF EXISTS ce_t2; +DROP TABLE IF EXISTS ce_t3; +DROP TABLE IF EXISTS ce_t4; +DROP TABLE IF EXISTS ce_t5; +DROP TABLE IF EXISTS ce_t6; + +DROP COLUMN ENCRYPTION KEY ImgCEK1; +DROP COLUMN ENCRYPTION KEY ImgCEK; +DROP CLIENT MASTER KEY ImgCMK1 CASCADE; +DROP CLIENT MASTER KEY ImgCMK CASCADE; + +select count(*), 'count' FROM gs_client_global_keys; +select count(*), 'count' FROM gs_column_keys; +SELECT count(*), 'count' FROM gs_encrypted_columns; +\! gs_ktool -d all + +RESET ROLE; +DROP USER IF EXISTS test CASCADE; diff --git a/src/test/regress/input/ce_fetchsize_jdbc.source b/src/test/regress/input/ce_fetchsize_jdbc.source new file mode 100644 index 000000000..9a80394f0 --- /dev/null +++ b/src/test/regress/input/ce_fetchsize_jdbc.source @@ -0,0 +1,26 @@ +DROP USER IF EXISTS test CASCADE; +CREATE USER test WITH CREATEDB PASSWORD "Gauss@123"; +SET ROLE test PASSWORD 'Gauss@123'; +\! gs_ktool -d all +\! gs_ktool -g + +\! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/jdbc_client_lib/gsjdbc4.jar:@abs_builddir@/jdbc_ce_test/ce_ddl_pbe/. ClientEncryptionFetchSizeTest @portstring@ > /dev/null 2>&1 + +SELECT global_key_name, key_namespace, key_owner FROM gs_client_global_keys; + +SELECT column_key_name, key_namespace, key_owner FROM gs_column_keys; + +SELECT column_name, encryption_type, data_type_original_oid FROM gs_encrypted_columns; + +DROP TABLE IF EXISTS fetchsize_tab; +DROP TABLE IF EXISTS sqlbypassfetchsize_tab; + +DROP CLIENT MASTER KEY FetchSizeCMK CASCADE; + +select count(*), 'count' FROM gs_client_global_keys; +select count(*), 'count' FROM gs_column_keys; +SELECT count(*), 'count' FROM gs_encrypted_columns; +\! gs_ktool -d all + +RESET ROLE; +DROP USER IF EXISTS test CASCADE; diff --git a/src/test/regress/input/ce_mul_query_jdbc.source b/src/test/regress/input/ce_mul_query_jdbc.source new file mode 100644 index 000000000..d9eb9f2fd --- /dev/null +++ b/src/test/regress/input/ce_mul_query_jdbc.source @@ -0,0 +1,27 @@ +DROP USER IF EXISTS test CASCADE; +CREATE USER test WITH CREATEDB PASSWORD "Gauss@123"; +SET ROLE test PASSWORD 'Gauss@123'; +\! gs_ktool -d all +\! gs_ktool -g +\! gs_ktool -g + +\! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/jdbc_client_lib/gsjdbc4.jar:@abs_builddir@/jdbc_ce_test/ce_ddl_pbe/. ClientEncryptionMulSql @portstring@ > /dev/null 2>&1 + +SELECT global_key_name, key_namespace, key_owner FROM gs_client_global_keys; + +SELECT column_key_name, key_namespace, key_owner FROM gs_column_keys; + +--支持 +select count(*) from test11; + +DROP TABLE IF EXISTS test11; + +DROP CLIENT MASTER KEY MulCMK1 CASCADE; + +select count(*), 'count' FROM gs_client_global_keys; +select count(*), 'count' FROM gs_column_keys; +SELECT count(*), 'count' FROM gs_encrypted_columns; +\! gs_ktool -d all + +RESET ROLE; +DROP USER IF EXISTS test CASCADE; diff --git a/src/test/regress/input/ce_prepare_jdbc.source b/src/test/regress/input/ce_prepare_jdbc.source new file mode 100644 index 000000000..220bbc4c0 --- /dev/null +++ b/src/test/regress/input/ce_prepare_jdbc.source @@ -0,0 +1,25 @@ +DROP USER IF EXISTS test CASCADE; +CREATE USER test WITH CREATEDB PASSWORD "Gauss@123"; +SET ROLE test PASSWORD 'Gauss@123'; +\! gs_ktool -d all +\! gs_ktool -g +\! gs_ktool -g + +\! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/jdbc_client_lib/gsjdbc4.jar:@abs_builddir@/jdbc_ce_test/ce_ddl_pbe/. ClientEncryptionPrepareTest @portstring@ > /dev/null 2>&1 + +SELECT global_key_name, key_namespace, key_owner FROM gs_client_global_keys; + +SELECT column_key_name, key_namespace, key_owner FROM gs_column_keys; + +DROP TABLE IF EXISTS products; +DROP COLUMN ENCRYPTION KEY pre_ImgCEK; +DROP CLIENT MASTER KEY pre_ImgCMK1 CASCADE; +DROP CLIENT MASTER KEY pre_ImgCMK CASCADE; + +select count(*), 'count' FROM gs_client_global_keys; +select count(*), 'count' FROM gs_column_keys; +SELECT count(*), 'count' FROM gs_encrypted_columns; +\! gs_ktool -d all + +RESET ROLE; +DROP USER IF EXISTS test CASCADE; diff --git a/src/test/regress/input/ce_select_jdbc.source b/src/test/regress/input/ce_select_jdbc.source new file mode 100644 index 000000000..6e12bbd00 --- /dev/null +++ b/src/test/regress/input/ce_select_jdbc.source @@ -0,0 +1,50 @@ +DROP USER IF EXISTS test CASCADE; +CREATE USER test WITH CREATEDB PASSWORD "Gauss@123"; +SET ROLE test PASSWORD 'Gauss@123'; +\! gs_ktool -d all +\! gs_ktool -g +\! gs_ktool -g + +\! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/jdbc_client_lib/gsjdbc4.jar:@abs_builddir@/jdbc_ce_test/ce_ddl_pbe/. ClientEncryptionSelectTest @portstring@ > /dev/null 2>&1 + +SELECT global_key_name, key_namespace, key_owner FROM gs_client_global_keys; + +SELECT column_key_name, key_namespace, key_owner FROM gs_column_keys; + +--支持 +select count(*) from creditcard_info; +select count(*) from creditcard_info1; +select count(*) from creditcard_info2; +select count(*) from creditcard_info3; +select count(*) from creditcard_info2_1; +select count(*) from creditcard_info3_1; +select count(*) from creditcard_info4; +select count(*) from creditcard_info5; +select count(*) from un_encrypted_table; +select count(*) from batch_table; +select count(*) from table_random; + +DROP TABLE IF EXISTS creditcard_info; +DROP TABLE IF EXISTS creditcard_info1; +DROP TABLE IF EXISTS creditcard_info2; +DROP TABLE IF EXISTS creditcard_info3; +DROP TABLE IF EXISTS creditcard_info2_1; +DROP TABLE IF EXISTS creditcard_info3_1; +DROP TABLE IF EXISTS creditcard_info4; +DROP TABLE IF EXISTS creditcard_info5; +DROP TABLE IF EXISTS un_encrypted_table; +DROP TABLE IF EXISTS batch_table; +DROP TABLE IF EXISTS table_random; + +DROP COLUMN ENCRYPTION KEY ImgCEK1; +DROP COLUMN ENCRYPTION KEY ImgCEK; +DROP CLIENT MASTER KEY ImgCMK1 CASCADE; +DROP CLIENT MASTER KEY ImgCMK CASCADE; + +select count(*), 'count' FROM gs_client_global_keys; +select count(*), 'count' FROM gs_column_keys; +SELECT count(*), 'count' FROM gs_encrypted_columns; +\! gs_ktool -d all + +RESET ROLE; +DROP USER IF EXISTS test CASCADE; diff --git a/src/test/regress/input/ce_transaction_jdbc.source b/src/test/regress/input/ce_transaction_jdbc.source new file mode 100644 index 000000000..7cc0f0137 --- /dev/null +++ b/src/test/regress/input/ce_transaction_jdbc.source @@ -0,0 +1,28 @@ +DROP USER IF EXISTS test CASCADE; +CREATE USER test WITH CREATEDB PASSWORD "Gauss@123"; +SET ROLE test PASSWORD 'Gauss@123'; +\! gs_ktool -d all +\! gs_ktool -g + +\! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/jdbc_client_lib/gsjdbc4.jar:@abs_builddir@/jdbc_ce_test/ce_ddl_pbe/. ClientEncryptionTransactionTest @portstring@ > /dev/null 2>&1 + +SELECT global_key_name, key_namespace, key_owner FROM gs_client_global_keys; + +SELECT column_key_name, key_namespace, key_owner FROM gs_column_keys; + +SELECT column_name, encryption_type, data_type_original_oid FROM gs_encrypted_columns; + +--支持 +select count(*) from test_table; + +DROP TABLE IF EXISTS test_table; + +DROP CLIENT MASTER KEY TransactionCMK CASCADE; + +select count(*), 'count' FROM gs_client_global_keys; +select count(*), 'count' FROM gs_column_keys; +SELECT count(*), 'count' FROM gs_encrypted_columns; +\! gs_ktool -d all + +RESET ROLE; +DROP USER IF EXISTS test CASCADE; diff --git a/src/test/regress/input/ce_trigger_jdbc.source b/src/test/regress/input/ce_trigger_jdbc.source new file mode 100644 index 000000000..9ec141504 --- /dev/null +++ b/src/test/regress/input/ce_trigger_jdbc.source @@ -0,0 +1,30 @@ +DROP USER IF EXISTS test CASCADE; +CREATE USER test WITH CREATEDB PASSWORD "Gauss@123"; +SET ROLE test PASSWORD 'Gauss@123'; +\! gs_ktool -d all +\! gs_ktool -g + +\! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/jdbc_client_lib/gsjdbc4.jar:@abs_builddir@/jdbc_ce_test/ce_ddl_pbe/. ClientEncryptionTriggerTest @portstring@ > /dev/null 2>&1 + +SELECT global_key_name, key_namespace, key_owner FROM gs_client_global_keys; + +SELECT column_key_name, key_namespace, key_owner FROM gs_column_keys; + +SELECT column_name, encryption_type, data_type_original_oid FROM gs_encrypted_columns; + +--支持 +select count(*) from test_trigger_src_tbl; +select count(*) from test_trigger_des_tbl; + +DROP TABLE IF EXISTS test_trigger_src_tbl; +DROP TABLE IF EXISTS test_trigger_des_tbl; + +DROP CLIENT MASTER KEY triggerCMK CASCADE; + +select count(*), 'count' FROM gs_client_global_keys; +select count(*), 'count' FROM gs_column_keys; +SELECT count(*), 'count' FROM gs_encrypted_columns; +\! gs_ktool -d all + +RESET ROLE; +DROP USER IF EXISTS test CASCADE; diff --git a/src/test/regress/input/component_view_enhancements.source b/src/test/regress/input/component_view_enhancements.source new file mode 100644 index 000000000..76a217539 --- /dev/null +++ b/src/test/regress/input/component_view_enhancements.source @@ -0,0 +1,81 @@ +select * from gs_stat_undo(); + +select * from gs_stat_wal_entrytable(-1) limit 1; +select * from gs_stat_wal_entrytable(1); +select * from gs_stat_wal_entrytable(-2); + +select * from gs_walwriter_flush_position(); + +select * from gs_walwriter_flush_stat(1); +select * from gs_walwriter_flush_stat(2); + +DROP TABLE IF EXISTS test_ustore; +DROP INDEX IF EXISTS test_ustore_idx; +DROP INDEX IF EXISTS test_ustore_idx2; + +START TRANSACTION; +CREATE TABLE test_ustore (a int, b int ,c int) with(storage_type=ustore); +CREATE INDEX test_ustore_idx ON test_ustore(a); +CREATE INDEX test_ustore_idx2 ON test_ustore(b,c); +INSERT INTO test_ustore values(generate_series(1,1000000),generate_series(1,1000000), generate_series(1,1000000)); +CHECKPOINT; +CREATE OR REPLACE FUNCTION proc_gs_index_verify(tablename in varchar2) +RETURNS SETOF varchar +LANGUAGE plpgsql +AS +$$ +DECLARE + relationOid oid; + stat varchar; +BEGIN + SELECT relfilenode into relationOid from pg_class where relname=tablename; + for stat in ( select status from gs_index_verify(relationOid, 0) ) loop + RETURN NEXT stat; + end loop; + for stat in ( select status from gs_index_verify(relationOid, 1) ) loop + RETURN NEXT stat; + end loop; + return; +END; +$$ +; +SELECT proc_gs_index_verify('test_ustore_idx'); +SELECT proc_gs_index_verify('test_ustore_idx2'); + +DROP TABLE IF EXISTS test_ustore; +COMMIT; + +START TRANSACTION; +CREATE TABLE test_ustore (a int, b int ,c int) with(storage_type=ustore); +CREATE INDEX test_ustore_idx ON test_ustore(a); +CREATE INDEX test_ustore_idx2 ON test_ustore(b,c); +INSERT INTO test_ustore values(generate_series(1,1000000),generate_series(1,1000000), generate_series(1,1000000)); +CHECKPOINT; +CREATE OR REPLACE FUNCTION proc_gs_index_recycle_queue(tablename in varchar2) +RETURNS SETOF varchar +LANGUAGE plpgsql +AS +$$ +DECLARE + relationOid oid; + stat varchar; +BEGIN + SELECT relfilenode into relationOid from pg_class where relname=tablename; + for stat in ( select rblkno from gs_index_recycle_queue(relationOid, 0, 0) ) loop + RETURN NEXT stat; + end loop; + for stat in ( select rblkno from gs_index_recycle_queue(relationOid, 1, 0) ) loop + RETURN NEXT stat; + end loop; + for stat in ( select rblkno from gs_index_recycle_queue(relationOid, 2, 1) ) loop + RETURN NEXT stat; + end loop; + return; +END; +$$ +; +SELECT proc_gs_index_recycle_queue('test_ustore_idx'); +SELECT proc_gs_index_recycle_queue('test_ustore_idx2'); + +DROP TABLE IF EXISTS test_ustore; +COMMIT; \ No newline at end of file diff --git a/src/test/regress/input/copy.source b/src/test/regress/input/copy.source index 1a7494755..3621f4dde 100644 --- a/src/test/regress/input/copy.source +++ b/src/test/regress/input/copy.source @@ -277,7 +277,7 @@ select * from time_format_his_010_05 order by C_INT; drop table time_format_his_010_05; ---- ---- -create table DTS2016111106370_tbl +create table TESTTABLE_tbl ( dp_demo_sk integer not null, dp_gender char(3) , @@ -299,58 +299,58 @@ dp_text_ts tsquery partition by range (dp_date) ( -partition DTS2016111106370_tbl_1 values less than(1950), -partition DTS2016111106370_tbl_2 values less than(2000), -partition DTS2016111106370_tbl_3 values less than(2050), -partition DTS2016111106370_tbl_4 values less than(2100), -partition DTS2016111106370_tbl_5 values less than(3000), -partition DTS2016111106370_tbl_6 values less than(maxvalue) +partition TESTTABLE_tbl_1 values less than(1950), +partition TESTTABLE_tbl_2 values less than(2000), +partition TESTTABLE_tbl_3 values less than(2050), +partition TESTTABLE_tbl_4 values less than(2100), +partition TESTTABLE_tbl_5 values less than(3000), +partition TESTTABLE_tbl_6 values less than(maxvalue) ) ; -insert into DTS2016111106370_tbl values(1,'M','M','Primary' ,500,'Good' ,0,0,0,1950,'13033333333','440900197702065080','桂D IIUQ6','2015年11月4日,张家口市公安局、。·ˉˇ¨〃々—~‖…‘“”〔〕〈〉《》°′″℃$¤¢£成功破获一起拉杆箱式伪基站诈骗案件。该局民警在排查安全「」『』〖〗【】±≡≌≈∽' ,null,null); -insert into DTS2016111106370_tbl values(2,'F','M','Primary' ,500,'Good' ,0,0,0,2000,'13044444444','321282198903046569','云N ESBH7','∝≠≮≯≤≥∞∶∵∴∷♂♀隐患时,发现一嫌疑人正在以中国建设银行客服',null,null); -insert into DTS2016111106370_tbl values(3,'M','S','Primary' ,500,'Good' ,0,0,0,2050,'13055555555','450400198802202694','贵A Y2OM0','号码“95533”名义,利用短信群发器向不⊿▲▼◣◤◢特定人群大量发送关于“银行账户积分兑换现金活动”的诈骗短信,办', null,null); -insert into DTS2016111106370_tbl values(4,'F','S','Primary' ,500,'Good' ,0,0,0,2100,'13066666666','371722198810125946','川T B6KS3','案民警随即将犯罪嫌疑人周某成功抓获■△▽⊿▲▼◣◤◢◥▁▂▃▄▅▆▇█▉▊▋,当场收缴作案工具短信', null,null); -insert into DTS2016111106370_tbl values(5,'M','D','Primary' ,500,'Good' ,0,0,0,3000,'13077777777','150100199204056110','藏E NVDG5','群发器一套。经查,周某多次在张家口市区利用囧⊙●○⊕◎Θ⊙¤㈱㊣★☆♀◆ ▅ ▆ ▇ █ █ ■ ▓ 回 □⊥﹃﹄┌ ┐└ ┘∟「」↑↓→短信群发器累计发送约7万余条诈骗短信。', null,null); -insert into DTS2016111106370_tbl values(6,'F','D','Primary' ,500,'Good' ,0,0,0,3100,'13088888888','130684198503211498','陕K PV806','  2015年10月16日,保定市公安局端掉一涉嫌诈骗窝点,查获◇◣◢◥▲▼△▽⊿◤ ◥ ', null,null); -insert into DTS2016111106370_tbl values(7,'M','W','Primary' ,500,'Good' ,0,0,0,1950,'13099999999','320800198505033823','甘D 3CK27','▂ ▃ ▄私刻的公司、发票专用章、纪念币、纪念银条、客户资料、发货单、电话等物,抓获嫌疑人6名。经查,自2015年3月以来,犯罪嫌疑人赵',null,null); -insert into DTS2016111106370_tbl values(8,'F','W','Primary' ,500,'Good' ,0,0,0,2000,'13012345678','350603197710270827','宁C Q8AO5','某与冯某从网上购买假的纪念币、银条、‰§№☆★〇○●◎◇◆ 回□▌▍▎▏▓※→←↑↓↖' ,null,null); -insert into DTS2016111106370_tbl values(9,'M','U','Primary' ,500,'Good' ,0,0,0,2050,'13087654321','431281198203182139','青E 1P4V4','化妆品及全国各地的个人信息,雇佣张某:?;.﹛﹏﹊︽︻〗▄ ▅ ✄@㊨→↔囍某等四人,冒充北京藏品有限公司及电视购物买卖宝工作人员,用假名字和自编工号向' ,null,null); -insert into DTS2016111106370_tbl values(10,'F','U','Primary' ,500,'Good' ,0,0,0,2100,'13023456789','440601198011301931','新Q 05245','全国各地人群打电话推销假纪念币及化妆品,涉案10万余元。', null,null); -insert into DTS2016111106370_tbl values(11,'M','M','Secondary' ,500,'Good' ,0,0,0,3000,'13100000000','63250019881115391X','军T CIMP5','  2015年5月30日,沧州市沧县公安局成功抓获一名涉嫌利用QQ聊天诈骗的犯罪 〓≡ ╝╚╔ ╗╬ ═ ╓ ╩ ┠ ┨┯ ┷┏ ┓┗ ┛┳嫌疑人刘某某。经查,', null,null); -insert into DTS2016111106370_tbl values(12,'F','M','Secondary' ,500,'Good' ,0,0,0,3100,'13111111111','350782198903130244','北M D8OC9','自2015年1月份以来,刘某某化名“刘某”通过QQ聊天、打电话等方式以做生意缺钱为由先后五次诈骗杨某某1.22万元。', null,null); -insert into DTS2016111106370_tbl values(13,'M','S','Secondary' ,500,'Good' ,0,0,0,1950,'13122222222','542125199104061520','南A 2ER97','  2015年5月12日,冀中公安局成功破获一起利用QQ聊天进行诈骗的案件,抓获', null,null); -insert into DTS2016111106370_tbl values(14,'F','S','Secondary' ,500,'Good' ,0,0,0,2000,'13133333333','530326197803047984','广U 006T9','犯罪嫌疑人1名。经查,犯罪+-×÷∧∨∑∏∪∩∈√⊥∥∠⌒⊙∫∮嫌疑人殷某某在5月1', null,null); -insert into DTS2016111106370_tbl values(15,'M','D','Secondary' ,500,'Good' ,0,0,0,2050,'13144444444','341225199204262781','沈R 6TCH7','日至6日期间,在QQ上自称韩俊,以帮助受害人朱某破解命中情劫⑶⑷⑸、稳定姻缘、改变命运等一系列迷信说法,诈骗朱某4.6万元。', null,null); -insert into DTS2016111106370_tbl values(16,'F','D','Secondary' ,500,'Good' ,0,0,0,2100,'13155555555','450223198706295636','成Y LRFL5','  2015年8月8日,石家庄市公安局抓获犯罪嫌疑人罗某、罗某某、黎某,成功破获“6·16”QQ诈', null,null); -insert into DTS2016111106370_tbl values(17,'M','W','Secondary' ,500,'Good' ,0,0,0,3000,'13166666666','141128198604215986','兰M Y6WZ2','骗案。经查,罗某、罗某某于6月16日,ⅰⅱ⒈⒉⒊⒋ ⒌⑴⑵⑹⑺⑿在QQ上冒充长安区某公司老板通过QQ指令公司会计转账,诈骗该公司124万元。', null,null); -insert into DTS2016111106370_tbl values(18,'F','W','Secondary' ,500,'Good' ,0,0,0,3100,'13177777777','610103198203159498','济C WDPF9','1、对于犯罪分子决定刑罚的时候,应当根据犯罪的事实、犯罪的性质、情节和对于社会的', null,null); -insert into DTS2016111106370_tbl values(19,'M','U','Secondary' ,500,'Good' ,0,0,0,1950,'13188888888','430523198403027119','空U EWWG0','危害程度,依照《刑法》的有关规定判处;', null,null); -insert into DTS2016111106370_tbl values(20,'F','U','Secondary' ,500,'Good' ,0,0,0,2000,'13199999999','420525198009025685','海O 03SM4','  2、法律依据:1)《刑法》  第二百六十六条 【诈骗罪】诈骗公私财物,数额较大的', null,null); -insert into DTS2016111106370_tbl values(21,'M','M','College' ,500,'Good' ,0,0,0,2050,'13112345678','230714198011139338','京G HVT11',',http://www.hanzify.org/software/12299.html)处三年以下', null,null); -insert into DTS2016111106370_tbl values(22,'F','M','College' ,500,'Good' ,0,0,0,2100,'13187654321','430300197612109014','津C 83BH0','有期徒刑、拘役或者管制,并处或者单处罚金;数额巨大或者有其他严重情节的,处三年以上十年以〗★●', null,null); -insert into DTS2016111106370_tbl values(23,'M','S','College' ,500,'Good' ,0,0,0,3000,'13123456789','210781198002130012','沪B5HBK8' ,'△♢♦♂下有期徒刑,并处罚金;数额特别巨大或者有其他特别严重情节的,处十年以上有期徒刑或者无期徒刑⒁⒂⒃⒄⒅⒆⒇,并处罚金或者没收财产。本法另有规定的,依照规定。', null,null); -insert into DTS2016111106370_tbl values(24,'F','S','College' ,500,'Good' ,0,0,0,3100,'13200000000','32132219801114113X','渝AZ2FT2' ,'  2)《关于办理诈骗刑事案件具体应用法律若干问题的解释》 第一条 诈骗公私财物价值三千元ⅲⅳⅴⅵⅶ', null,null); -insert into DTS2016111106370_tbl values(25,'M','D','College' ,500,'Good' ,0,0,0,1950,'13211111111','370502199010193059','冀ADEYZ7' ,'ⅷⅸⅹ①②③⑨⑩至一万元以上、三万元至十万元以上、五十万元以上的,应当分别认定为刑法第二百', null,null); -insert into DTS2016111106370_tbl values(26,'F','D','College' ,500,'Good' ,0,0,0,2000,'13222222222','210682198302018541','豫LAA0C2' ,'六十六条规定的“数额较大↗↘↙〓”⒍⒎⒏⒐⒚⒛、“数额巨大”、“数额特别巨大”。', null,null); -insert into DTS2016111106370_tbl values(27,'M','W','College' ,500,'Good' ,0,0,0,2050,'13233333333','532925198410018974','鲁A95P23' ,'  各省、自治区、直辖市高级人民法院、人民检察院可以结合⒑⒒⒓⒔⒕⒖⒗⒘⒙本地区经济社会⑻⑼⑽⑾发展状况,在前款规', null,null); -insert into DTS2016111106370_tbl values(28,'F','W','College' ,500,'Good' ,0,0,0,2100,'13244444444','42030119750519262X','晋J4Y158' ,'定的数额幅度内,共同研究确定本地区执行的具体数额标准,报最高人民法院、最高人民检察院备案', null,null); -insert into DTS2016111106370_tbl values(29,'M','U','College' ,500,'Good' ,0,0,0,3000,'13255555555','210102199412021827','蒙KMSYB9' ,'←↘↙♀♂┇┅ ﹉﹊﹍﹎╭ ╮╰ ╯ *^_^* ^*^ ^-^ ^执行的具体数额标准,', null,null); -insert into DTS2016111106370_tbl values(30,'F','U','College' ,500,'Good' ,0,0,0,3100,'13266666666','120105198208259208','辽V53UR9' ,'_^ ^︵^ ∵∴‖︱ ︳︴高级人民法院、﹏﹋﹌︵︶︹︺ 【', null,null); -update DTS2016111106370_tbl set dp_text_tv=to_tsvector('ngram',coalesce(dp_text,'')); +insert into TESTTABLE_tbl values(1,'M','M','Primary' ,500,'Good' ,0,0,0,1950,'13033333333','440900197702065080','桂D IIUQ6','2015年11月4日,张家口市公安局、。·ˉˇ¨〃々—~‖…‘“”〔〕〈〉《》°′″℃$¤¢£成功破获一起拉杆箱式伪基站诈骗案件。该局民警在排查安全「」『』〖〗【】±≡≌≈∽' ,null,null); +insert into TESTTABLE_tbl values(2,'F','M','Primary' ,500,'Good' ,0,0,0,2000,'13044444444','321282198903046569','云N ESBH7','∝≠≮≯≤≥∞∶∵∴∷♂♀隐患时,发现一嫌疑人正在以中国建设银行客服',null,null); +insert into TESTTABLE_tbl values(3,'M','S','Primary' ,500,'Good' ,0,0,0,2050,'13055555555','450400198802202694','贵A Y2OM0','号码“95533”名义,利用短信群发器向不⊿▲▼◣◤◢特定人群大量发送关于“银行账户积分兑换现金活动”的诈骗短信,办', null,null); +insert into TESTTABLE_tbl values(4,'F','S','Primary' ,500,'Good' ,0,0,0,2100,'13066666666','371722198810125946','川T B6KS3','案民警随即将犯罪嫌疑人周某成功抓获■△▽⊿▲▼◣◤◢◥▁▂▃▄▅▆▇█▉▊▋,当场收缴作案工具短信', null,null); +insert into TESTTABLE_tbl values(5,'M','D','Primary' ,500,'Good' ,0,0,0,3000,'13077777777','150100199204056110','藏E NVDG5','群发器一套。经查,周某多次在张家口市区利用囧⊙●○⊕◎Θ⊙¤㈱㊣★☆♀◆ ▅ ▆ ▇ █ █ ■ ▓ 回 □⊥﹃﹄┌ ┐└ ┘∟「」↑↓→短信群发器累计发送约7万余条诈骗短信。', null,null); +insert into TESTTABLE_tbl values(6,'F','D','Primary' ,500,'Good' ,0,0,0,3100,'13088888888','130684198503211498','陕K PV806','  2015年10月16日,保定市公安局端掉一涉嫌诈骗窝点,查获◇◣◢◥▲▼△▽⊿◤ ◥ ', null,null); +insert into TESTTABLE_tbl values(7,'M','W','Primary' ,500,'Good' ,0,0,0,1950,'13099999999','320800198505033823','甘D 3CK27','▂ ▃ ▄私刻的公司、发票专用章、纪念币、纪念银条、客户资料、发货单、电话等物,抓获嫌疑人6名。经查,自2015年3月以来,犯罪嫌疑人赵',null,null); +insert into TESTTABLE_tbl values(8,'F','W','Primary' ,500,'Good' ,0,0,0,2000,'13012345678','350603197710270827','宁C Q8AO5','某与冯某从网上购买假的纪念币、银条、‰§№☆★〇○●◎◇◆ 回□▌▍▎▏▓※→←↑↓↖' ,null,null); +insert into TESTTABLE_tbl values(9,'M','U','Primary' ,500,'Good' ,0,0,0,2050,'13087654321','431281198203182139','青E 1P4V4','化妆品及全国各地的个人信息,雇佣张某:?;.﹛﹏﹊︽︻〗▄ ▅ ✄@㊨→↔囍某等四人,冒充北京藏品有限公司及电视购物买卖宝工作人员,用假名字和自编工号向' ,null,null); +insert into TESTTABLE_tbl values(10,'F','U','Primary' ,500,'Good' ,0,0,0,2100,'13023456789','440601198011301931','新Q 05245','全国各地人群打电话推销假纪念币及化妆品,涉案10万余元。', null,null); +insert into TESTTABLE_tbl values(11,'M','M','Secondary' ,500,'Good' ,0,0,0,3000,'13100000000','63250019881115391X','军T CIMP5','  2015年5月30日,沧州市沧县公安局成功抓获一名涉嫌利用QQ聊天诈骗的犯罪 〓≡ ╝╚╔ ╗╬ ═ ╓ ╩ ┠ ┨┯ ┷┏ ┓┗ ┛┳嫌疑人刘某某。经查,', null,null); +insert into TESTTABLE_tbl values(12,'F','M','Secondary' ,500,'Good' ,0,0,0,3100,'13111111111','350782198903130244','北M D8OC9','自2015年1月份以来,刘某某化名“刘某”通过QQ聊天、打电话等方式以做生意缺钱为由先后五次诈骗杨某某1.22万元。', null,null); +insert into TESTTABLE_tbl values(13,'M','S','Secondary' ,500,'Good' ,0,0,0,1950,'13122222222','542125199104061520','南A 2ER97','  2015年5月12日,冀中公安局成功破获一起利用QQ聊天进行诈骗的案件,抓获', null,null); +insert into TESTTABLE_tbl values(14,'F','S','Secondary' ,500,'Good' ,0,0,0,2000,'13133333333','530326197803047984','广U 006T9','犯罪嫌疑人1名。经查,犯罪+-×÷∧∨∑∏∪∩∈√⊥∥∠⌒⊙∫∮嫌疑人殷某某在5月1', null,null); +insert into TESTTABLE_tbl values(15,'M','D','Secondary' ,500,'Good' ,0,0,0,2050,'13144444444','341225199204262781','沈R 6TCH7','日至6日期间,在QQ上自称韩俊,以帮助受害人朱某破解命中情劫⑶⑷⑸、稳定姻缘、改变命运等一系列迷信说法,诈骗朱某4.6万元。', null,null); +insert into TESTTABLE_tbl values(16,'F','D','Secondary' ,500,'Good' ,0,0,0,2100,'13155555555','450223198706295636','成Y LRFL5','  2015年8月8日,石家庄市公安局抓获犯罪嫌疑人罗某、罗某某、黎某,成功破获“6·16”QQ诈', null,null); +insert into TESTTABLE_tbl values(17,'M','W','Secondary' ,500,'Good' ,0,0,0,3000,'13166666666','141128198604215986','兰M Y6WZ2','骗案。经查,罗某、罗某某于6月16日,ⅰⅱ⒈⒉⒊⒋ ⒌⑴⑵⑹⑺⑿在QQ上冒充长安区某公司老板通过QQ指令公司会计转账,诈骗该公司124万元。', null,null); +insert into TESTTABLE_tbl values(18,'F','W','Secondary' ,500,'Good' ,0,0,0,3100,'13177777777','610103198203159498','济C WDPF9','1、对于犯罪分子决定刑罚的时候,应当根据犯罪的事实、犯罪的性质、情节和对于社会的', null,null); +insert into TESTTABLE_tbl values(19,'M','U','Secondary' ,500,'Good' ,0,0,0,1950,'13188888888','430523198403027119','空U EWWG0','危害程度,依照《刑法》的有关规定判处;', null,null); +insert into TESTTABLE_tbl values(20,'F','U','Secondary' ,500,'Good' ,0,0,0,2000,'13199999999','420525198009025685','海O 03SM4','  2、法律依据:1)《刑法》  第二百六十六条 【诈骗罪】诈骗公私财物,数额较大的', null,null); +insert into TESTTABLE_tbl values(21,'M','M','College' ,500,'Good' ,0,0,0,2050,'13112345678','230714198011139338','京G HVT11',',http://www.hanzify.org/software/12299.html)处三年以下', null,null); +insert into TESTTABLE_tbl values(22,'F','M','College' ,500,'Good' ,0,0,0,2100,'13187654321','430300197612109014','津C 83BH0','有期徒刑、拘役或者管制,并处或者单处罚金;数额巨大或者有其他严重情节的,处三年以上十年以〗★●', null,null); +insert into TESTTABLE_tbl values(23,'M','S','College' ,500,'Good' ,0,0,0,3000,'13123456789','210781198002130012','沪B5HBK8' ,'△♢♦♂下有期徒刑,并处罚金;数额特别巨大或者有其他特别严重情节的,处十年以上有期徒刑或者无期徒刑⒁⒂⒃⒄⒅⒆⒇,并处罚金或者没收财产。本法另有规定的,依照规定。', null,null); +insert into TESTTABLE_tbl values(24,'F','S','College' ,500,'Good' ,0,0,0,3100,'13200000000','32132219801114113X','渝AZ2FT2' ,'  2)《关于办理诈骗刑事案件具体应用法律若干问题的解释》 第一条 诈骗公私财物价值三千元ⅲⅳⅴⅵⅶ', null,null); +insert into TESTTABLE_tbl values(25,'M','D','College' ,500,'Good' ,0,0,0,1950,'13211111111','370502199010193059','冀ADEYZ7' ,'ⅷⅸⅹ①②③⑨⑩至一万元以上、三万元至十万元以上、五十万元以上的,应当分别认定为刑法第二百', null,null); +insert into TESTTABLE_tbl values(26,'F','D','College' ,500,'Good' ,0,0,0,2000,'13222222222','210682198302018541','豫LAA0C2' ,'六十六条规定的“数额较大↗↘↙〓”⒍⒎⒏⒐⒚⒛、“数额巨大”、“数额特别巨大”。', null,null); +insert into TESTTABLE_tbl values(27,'M','W','College' ,500,'Good' ,0,0,0,2050,'13233333333','532925198410018974','鲁A95P23' ,'  各省、自治区、直辖市高级人民法院、人民检察院可以结合⒑⒒⒓⒔⒕⒖⒗⒘⒙本地区经济社会⑻⑼⑽⑾发展状况,在前款规', null,null); +insert into TESTTABLE_tbl values(28,'F','W','College' ,500,'Good' ,0,0,0,2100,'13244444444','42030119750519262X','晋J4Y158' ,'定的数额幅度内,共同研究确定本地区执行的具体数额标准,报最高人民法院、最高人民检察院备案', null,null); +insert into TESTTABLE_tbl values(29,'M','U','College' ,500,'Good' ,0,0,0,3000,'13255555555','210102199412021827','蒙KMSYB9' ,'←↘↙♀♂┇┅ ﹉﹊﹍﹎╭ ╮╰ ╯ *^_^* ^*^ ^-^ ^执行的具体数额标准,', null,null); +insert into TESTTABLE_tbl values(30,'F','U','College' ,500,'Good' ,0,0,0,3100,'13266666666','120105198208259208','辽V53UR9' ,'_^ ^︵^ ∵∴‖︱ ︳︴高级人民法院、﹏﹋﹌︵︶︹︺ 【', null,null); +update TESTTABLE_tbl set dp_text_tv=to_tsvector('ngram',coalesce(dp_text,'')); ---- compressed row relation -alter table DTS2016111106370_tbl set compress ; -select count(*) from DTS2016111106370_tbl; +alter table TESTTABLE_tbl set compress ; +select count(*) from TESTTABLE_tbl; ---- create compressed pages and compressed tuples -vacuum full DTS2016111106370_tbl; +vacuum full TESTTABLE_tbl; ---- copy to 1B/4B varlen values -copy DTS2016111106370_tbl (dp_text_ts) to '@abs_srcdir@/data/datanode1/DTS2016111106370_tbl.txt' with (encoding 'utf8'); -drop table DTS2016111106370_tbl; +copy TESTTABLE_tbl (dp_text_ts) to '@abs_srcdir@/data/datanode1/TESTTABLE_tbl.txt' with (encoding 'utf8'); +drop table TESTTABLE_tbl; ---- ---- -CREATE TABLE DTS2016112411747_tbl( c int, d date) ; -COPY DTS2016112411747_tbl FROM STDIN with(delimiter ',',timestamp_format 'yyyymondd'); +CREATE TABLE TESTTABLE_tbl( c int, d date) ; +COPY TESTTABLE_tbl FROM STDIN with(delimiter ',',timestamp_format 'yyyymondd'); 01,2000JAN01 \. -SELECT * FROM DTS2016112411747_tbl; -DROP TABLE DTS2016112411747_tbl; +SELECT * FROM TESTTABLE_tbl; +DROP TABLE TESTTABLE_tbl; diff --git a/src/test/regress/input/copy_3.source b/src/test/regress/input/copy_3.source index 8a67b3bea..997af77be 100644 --- a/src/test/regress/input/copy_3.source +++ b/src/test/regress/input/copy_3.source @@ -49,7 +49,7 @@ select * from time_format_his_010_05 order by C_INT; drop table time_format_his_010_05; ---- ---- -create table DTS2016111106370_tbl +create table TESTTABLE_tbl ( dp_demo_sk integer not null, dp_gender char(3) , @@ -71,58 +71,58 @@ dp_text_ts tsquery distribute by replication partition by range (dp_date) ( -partition DTS2016111106370_tbl_1 values less than(1950), -partition DTS2016111106370_tbl_2 values less than(2000), -partition DTS2016111106370_tbl_3 values less than(2050), -partition DTS2016111106370_tbl_4 values less than(2100), -partition DTS2016111106370_tbl_5 values less than(3000), -partition DTS2016111106370_tbl_6 values less than(maxvalue) +partition TESTTABLE_tbl_1 values less than(1950), +partition TESTTABLE_tbl_2 values less than(2000), +partition TESTTABLE_tbl_3 values less than(2050), +partition TESTTABLE_tbl_4 values less than(2100), +partition TESTTABLE_tbl_5 values less than(3000), +partition TESTTABLE_tbl_6 values less than(maxvalue) ) ; -insert into DTS2016111106370_tbl values(1,'M','M','Primary' ,500,'Good' ,0,0,0,1950,'13033333333','440900197702065080','桂D IIUQ6','2015年11月4日,张家口市公安局、。·ˉˇ¨〃々—~‖…‘“”〔〕〈〉《》°′″℃$¤¢£成功破获一起拉杆箱式伪基站诈骗案件。该局民警在排查安全「」『』〖〗【】±≡≌≈∽' ,null,null); -insert into DTS2016111106370_tbl values(2,'F','M','Primary' ,500,'Good' ,0,0,0,2000,'13044444444','321282198903046569','云N ESBH7','∝≠≮≯≤≥∞∶∵∴∷♂♀隐患时,发现一嫌疑人正在以中国建设银行客服',null,null); -insert into DTS2016111106370_tbl values(3,'M','S','Primary' ,500,'Good' ,0,0,0,2050,'13055555555','450400198802202694','贵A Y2OM0','号码“95533”名义,利用短信群发器向不⊿▲▼◣◤◢特定人群大量发送关于“银行账户积分兑换现金活动”的诈骗短信,办', null,null); -insert into DTS2016111106370_tbl values(4,'F','S','Primary' ,500,'Good' ,0,0,0,2100,'13066666666','371722198810125946','川T B6KS3','案民警随即将犯罪嫌疑人周某成功抓获■△▽⊿▲▼◣◤◢◥▁▂▃▄▅▆▇█▉▊▋,当场收缴作案工具短信', null,null); -insert into DTS2016111106370_tbl values(5,'M','D','Primary' ,500,'Good' ,0,0,0,3000,'13077777777','150100199204056110','藏E NVDG5','群发器一套。经查,周某多次在张家口市区利用囧⊙●○⊕◎Θ⊙¤㈱㊣★☆♀◆ ▅ ▆ ▇ █ █ ■ ▓ 回 □⊥﹃﹄┌ ┐└ ┘∟「」↑↓→短信群发器累计发送约7万余条诈骗短信。', null,null); -insert into DTS2016111106370_tbl values(6,'F','D','Primary' ,500,'Good' ,0,0,0,3100,'13088888888','130684198503211498','陕K PV806','  2015年10月16日,保定市公安局端掉一涉嫌诈骗窝点,查获◇◣◢◥▲▼△▽⊿◤ ◥ ', null,null); -insert into DTS2016111106370_tbl values(7,'M','W','Primary' ,500,'Good' ,0,0,0,1950,'13099999999','320800198505033823','甘D 3CK27','▂ ▃ ▄私刻的公司、发票专用章、纪念币、纪念银条、客户资料、发货单、电话等物,抓获嫌疑人6名。经查,自2015年3月以来,犯罪嫌疑人赵',null,null); -insert into DTS2016111106370_tbl values(8,'F','W','Primary' ,500,'Good' ,0,0,0,2000,'13012345678','350603197710270827','宁C Q8AO5','某与冯某从网上购买假的纪念币、银条、‰§№☆★〇○●◎◇◆ 回□▌▍▎▏▓※→←↑↓↖' ,null,null); -insert into DTS2016111106370_tbl values(9,'M','U','Primary' ,500,'Good' ,0,0,0,2050,'13087654321','431281198203182139','青E 1P4V4','化妆品及全国各地的个人信息,雇佣张某:?;.﹛﹏﹊︽︻〗▄ ▅ ✄@㊨→↔囍某等四人,冒充北京藏品有限公司及电视购物买卖宝工作人员,用假名字和自编工号向' ,null,null); -insert into DTS2016111106370_tbl values(10,'F','U','Primary' ,500,'Good' ,0,0,0,2100,'13023456789','440601198011301931','新Q 05245','全国各地人群打电话推销假纪念币及化妆品,涉案10万余元。', null,null); -insert into DTS2016111106370_tbl values(11,'M','M','Secondary' ,500,'Good' ,0,0,0,3000,'13100000000','63250019881115391X','军T CIMP5','  2015年5月30日,沧州市沧县公安局成功抓获一名涉嫌利用QQ聊天诈骗的犯罪 〓≡ ╝╚╔ ╗╬ ═ ╓ ╩ ┠ ┨┯ ┷┏ ┓┗ ┛┳嫌疑人刘某某。经查,', null,null); -insert into DTS2016111106370_tbl values(12,'F','M','Secondary' ,500,'Good' ,0,0,0,3100,'13111111111','350782198903130244','北M D8OC9','自2015年1月份以来,刘某某化名“刘某”通过QQ聊天、打电话等方式以做生意缺钱为由先后五次诈骗杨某某1.22万元。', null,null); -insert into DTS2016111106370_tbl values(13,'M','S','Secondary' ,500,'Good' ,0,0,0,1950,'13122222222','542125199104061520','南A 2ER97','  2015年5月12日,冀中公安局成功破获一起利用QQ聊天进行诈骗的案件,抓获', null,null); -insert into DTS2016111106370_tbl values(14,'F','S','Secondary' ,500,'Good' ,0,0,0,2000,'13133333333','530326197803047984','广U 006T9','犯罪嫌疑人1名。经查,犯罪+-×÷∧∨∑∏∪∩∈√⊥∥∠⌒⊙∫∮嫌疑人殷某某在5月1', null,null); -insert into DTS2016111106370_tbl values(15,'M','D','Secondary' ,500,'Good' ,0,0,0,2050,'13144444444','341225199204262781','沈R 6TCH7','日至6日期间,在QQ上自称韩俊,以帮助受害人朱某破解命中情劫⑶⑷⑸、稳定姻缘、改变命运等一系列迷信说法,诈骗朱某4.6万元。', null,null); -insert into DTS2016111106370_tbl values(16,'F','D','Secondary' ,500,'Good' ,0,0,0,2100,'13155555555','450223198706295636','成Y LRFL5','  2015年8月8日,石家庄市公安局抓获犯罪嫌疑人罗某、罗某某、黎某,成功破获“6·16”QQ诈', null,null); -insert into DTS2016111106370_tbl values(17,'M','W','Secondary' ,500,'Good' ,0,0,0,3000,'13166666666','141128198604215986','兰M Y6WZ2','骗案。经查,罗某、罗某某于6月16日,ⅰⅱ⒈⒉⒊⒋ ⒌⑴⑵⑹⑺⑿在QQ上冒充长安区某公司老板通过QQ指令公司会计转账,诈骗该公司124万元。', null,null); -insert into DTS2016111106370_tbl values(18,'F','W','Secondary' ,500,'Good' ,0,0,0,3100,'13177777777','610103198203159498','济C WDPF9','1、对于犯罪分子决定刑罚的时候,应当根据犯罪的事实、犯罪的性质、情节和对于社会的', null,null); -insert into DTS2016111106370_tbl values(19,'M','U','Secondary' ,500,'Good' ,0,0,0,1950,'13188888888','430523198403027119','空U EWWG0','危害程度,依照《刑法》的有关规定判处;', null,null); -insert into DTS2016111106370_tbl values(20,'F','U','Secondary' ,500,'Good' ,0,0,0,2000,'13199999999','420525198009025685','海O 03SM4','  2、法律依据:1)《刑法》  第二百六十六条 【诈骗罪】诈骗公私财物,数额较大的', null,null); -insert into DTS2016111106370_tbl values(21,'M','M','College' ,500,'Good' ,0,0,0,2050,'13112345678','230714198011139338','京G HVT11',',http://www.hanzify.org/software/12299.html)处三年以下', null,null); -insert into DTS2016111106370_tbl values(22,'F','M','College' ,500,'Good' ,0,0,0,2100,'13187654321','430300197612109014','津C 83BH0','有期徒刑、拘役或者管制,并处或者单处罚金;数额巨大或者有其他严重情节的,处三年以上十年以〗★●', null,null); -insert into DTS2016111106370_tbl values(23,'M','S','College' ,500,'Good' ,0,0,0,3000,'13123456789','210781198002130012','沪B5HBK8' ,'△♢♦♂下有期徒刑,并处罚金;数额特别巨大或者有其他特别严重情节的,处十年以上有期徒刑或者无期徒刑⒁⒂⒃⒄⒅⒆⒇,并处罚金或者没收财产。本法另有规定的,依照规定。', null,null); -insert into DTS2016111106370_tbl values(24,'F','S','College' ,500,'Good' ,0,0,0,3100,'13200000000','32132219801114113X','渝AZ2FT2' ,'  2)《关于办理诈骗刑事案件具体应用法律若干问题的解释》 第一条 诈骗公私财物价值三千元ⅲⅳⅴⅵⅶ', null,null); -insert into DTS2016111106370_tbl values(25,'M','D','College' ,500,'Good' ,0,0,0,1950,'13211111111','370502199010193059','冀ADEYZ7' ,'ⅷⅸⅹ①②③⑨⑩至一万元以上、三万元至十万元以上、五十万元以上的,应当分别认定为刑法第二百', null,null); -insert into DTS2016111106370_tbl values(26,'F','D','College' ,500,'Good' ,0,0,0,2000,'13222222222','210682198302018541','豫LAA0C2' ,'六十六条规定的“数额较大↗↘↙〓”⒍⒎⒏⒐⒚⒛、“数额巨大”、“数额特别巨大”。', null,null); -insert into DTS2016111106370_tbl values(27,'M','W','College' ,500,'Good' ,0,0,0,2050,'13233333333','532925198410018974','鲁A95P23' ,'  各省、自治区、直辖市高级人民法院、人民检察院可以结合⒑⒒⒓⒔⒕⒖⒗⒘⒙本地区经济社会⑻⑼⑽⑾发展状况,在前款规', null,null); -insert into DTS2016111106370_tbl values(28,'F','W','College' ,500,'Good' ,0,0,0,2100,'13244444444','42030119750519262X','晋J4Y158' ,'定的数额幅度内,共同研究确定本地区执行的具体数额标准,报最高人民法院、最高人民检察院备案', null,null); -insert into DTS2016111106370_tbl values(29,'M','U','College' ,500,'Good' ,0,0,0,3000,'13255555555','210102199412021827','蒙KMSYB9' ,'←↘↙♀♂┇┅ ﹉﹊﹍﹎╭ ╮╰ ╯ *^_^* ^*^ ^-^ ^执行的具体数额标准,', null,null); -insert into DTS2016111106370_tbl values(30,'F','U','College' ,500,'Good' ,0,0,0,3100,'13266666666','120105198208259208','辽V53UR9' ,'_^ ^︵^ ∵∴‖︱ ︳︴高级人民法院、﹏﹋﹌︵︶︹︺ 【', null,null); -update DTS2016111106370_tbl set dp_text_tv=to_tsvector('ngram',coalesce(dp_text,'')); +insert into TESTTABLE_tbl values(1,'M','M','Primary' ,500,'Good' ,0,0,0,1950,'13033333333','440900197702065080','桂D IIUQ6','2015年11月4日,张家口市公安局、。·ˉˇ¨〃々—~‖…‘“”〔〕〈〉《》°′″℃$¤¢£成功破获一起拉杆箱式伪基站诈骗案件。该局民警在排查安全「」『』〖〗【】±≡≌≈∽' ,null,null); +insert into TESTTABLE_tbl values(2,'F','M','Primary' ,500,'Good' ,0,0,0,2000,'13044444444','321282198903046569','云N ESBH7','∝≠≮≯≤≥∞∶∵∴∷♂♀隐患时,发现一嫌疑人正在以中国建设银行客服',null,null); +insert into TESTTABLE_tbl values(3,'M','S','Primary' ,500,'Good' ,0,0,0,2050,'13055555555','450400198802202694','贵A Y2OM0','号码“95533”名义,利用短信群发器向不⊿▲▼◣◤◢特定人群大量发送关于“银行账户积分兑换现金活动”的诈骗短信,办', null,null); +insert into TESTTABLE_tbl values(4,'F','S','Primary' ,500,'Good' ,0,0,0,2100,'13066666666','371722198810125946','川T B6KS3','案民警随即将犯罪嫌疑人周某成功抓获■△▽⊿▲▼◣◤◢◥▁▂▃▄▅▆▇█▉▊▋,当场收缴作案工具短信', null,null); +insert into TESTTABLE_tbl values(5,'M','D','Primary' ,500,'Good' ,0,0,0,3000,'13077777777','150100199204056110','藏E NVDG5','群发器一套。经查,周某多次在张家口市区利用囧⊙●○⊕◎Θ⊙¤㈱㊣★☆♀◆ ▅ ▆ ▇ █ █ ■ ▓ 回 □⊥﹃﹄┌ ┐└ ┘∟「」↑↓→短信群发器累计发送约7万余条诈骗短信。', null,null); +insert into TESTTABLE_tbl values(6,'F','D','Primary' ,500,'Good' ,0,0,0,3100,'13088888888','130684198503211498','陕K PV806','  2015年10月16日,保定市公安局端掉一涉嫌诈骗窝点,查获◇◣◢◥▲▼△▽⊿◤ ◥ ', null,null); +insert into TESTTABLE_tbl values(7,'M','W','Primary' ,500,'Good' ,0,0,0,1950,'13099999999','320800198505033823','甘D 3CK27','▂ ▃ ▄私刻的公司、发票专用章、纪念币、纪念银条、客户资料、发货单、电话等物,抓获嫌疑人6名。经查,自2015年3月以来,犯罪嫌疑人赵',null,null); +insert into TESTTABLE_tbl values(8,'F','W','Primary' ,500,'Good' ,0,0,0,2000,'13012345678','350603197710270827','宁C Q8AO5','某与冯某从网上购买假的纪念币、银条、‰§№☆★〇○●◎◇◆ 回□▌▍▎▏▓※→←↑↓↖' ,null,null); +insert into TESTTABLE_tbl values(9,'M','U','Primary' ,500,'Good' ,0,0,0,2050,'13087654321','431281198203182139','青E 1P4V4','化妆品及全国各地的个人信息,雇佣张某:?;.﹛﹏﹊︽︻〗▄ ▅ ✄@㊨→↔囍某等四人,冒充北京藏品有限公司及电视购物买卖宝工作人员,用假名字和自编工号向' ,null,null); +insert into TESTTABLE_tbl values(10,'F','U','Primary' ,500,'Good' ,0,0,0,2100,'13023456789','440601198011301931','新Q 05245','全国各地人群打电话推销假纪念币及化妆品,涉案10万余元。', null,null); +insert into TESTTABLE_tbl values(11,'M','M','Secondary' ,500,'Good' ,0,0,0,3000,'13100000000','63250019881115391X','军T CIMP5','  2015年5月30日,沧州市沧县公安局成功抓获一名涉嫌利用QQ聊天诈骗的犯罪 〓≡ ╝╚╔ ╗╬ ═ ╓ ╩ ┠ ┨┯ ┷┏ ┓┗ ┛┳嫌疑人刘某某。经查,', null,null); +insert into TESTTABLE_tbl values(12,'F','M','Secondary' ,500,'Good' ,0,0,0,3100,'13111111111','350782198903130244','北M D8OC9','自2015年1月份以来,刘某某化名“刘某”通过QQ聊天、打电话等方式以做生意缺钱为由先后五次诈骗杨某某1.22万元。', null,null); +insert into TESTTABLE_tbl values(13,'M','S','Secondary' ,500,'Good' ,0,0,0,1950,'13122222222','542125199104061520','南A 2ER97','  2015年5月12日,冀中公安局成功破获一起利用QQ聊天进行诈骗的案件,抓获', null,null); +insert into TESTTABLE_tbl values(14,'F','S','Secondary' ,500,'Good' ,0,0,0,2000,'13133333333','530326197803047984','广U 006T9','犯罪嫌疑人1名。经查,犯罪+-×÷∧∨∑∏∪∩∈√⊥∥∠⌒⊙∫∮嫌疑人殷某某在5月1', null,null); +insert into TESTTABLE_tbl values(15,'M','D','Secondary' ,500,'Good' ,0,0,0,2050,'13144444444','341225199204262781','沈R 6TCH7','日至6日期间,在QQ上自称韩俊,以帮助受害人朱某破解命中情劫⑶⑷⑸、稳定姻缘、改变命运等一系列迷信说法,诈骗朱某4.6万元。', null,null); +insert into TESTTABLE_tbl values(16,'F','D','Secondary' ,500,'Good' ,0,0,0,2100,'13155555555','450223198706295636','成Y LRFL5','  2015年8月8日,石家庄市公安局抓获犯罪嫌疑人罗某、罗某某、黎某,成功破获“6·16”QQ诈', null,null); +insert into TESTTABLE_tbl values(17,'M','W','Secondary' ,500,'Good' ,0,0,0,3000,'13166666666','141128198604215986','兰M Y6WZ2','骗案。经查,罗某、罗某某于6月16日,ⅰⅱ⒈⒉⒊⒋ ⒌⑴⑵⑹⑺⑿在QQ上冒充长安区某公司老板通过QQ指令公司会计转账,诈骗该公司124万元。', null,null); +insert into TESTTABLE_tbl values(18,'F','W','Secondary' ,500,'Good' ,0,0,0,3100,'13177777777','610103198203159498','济C WDPF9','1、对于犯罪分子决定刑罚的时候,应当根据犯罪的事实、犯罪的性质、情节和对于社会的', null,null); +insert into TESTTABLE_tbl values(19,'M','U','Secondary' ,500,'Good' ,0,0,0,1950,'13188888888','430523198403027119','空U EWWG0','危害程度,依照《刑法》的有关规定判处;', null,null); +insert into TESTTABLE_tbl values(20,'F','U','Secondary' ,500,'Good' ,0,0,0,2000,'13199999999','420525198009025685','海O 03SM4','  2、法律依据:1)《刑法》  第二百六十六条 【诈骗罪】诈骗公私财物,数额较大的', null,null); +insert into TESTTABLE_tbl values(21,'M','M','College' ,500,'Good' ,0,0,0,2050,'13112345678','230714198011139338','京G HVT11',',http://www.hanzify.org/software/12299.html)处三年以下', null,null); +insert into TESTTABLE_tbl values(22,'F','M','College' ,500,'Good' ,0,0,0,2100,'13187654321','430300197612109014','津C 83BH0','有期徒刑、拘役或者管制,并处或者单处罚金;数额巨大或者有其他严重情节的,处三年以上十年以〗★●', null,null); +insert into TESTTABLE_tbl values(23,'M','S','College' ,500,'Good' ,0,0,0,3000,'13123456789','210781198002130012','沪B5HBK8' ,'△♢♦♂下有期徒刑,并处罚金;数额特别巨大或者有其他特别严重情节的,处十年以上有期徒刑或者无期徒刑⒁⒂⒃⒄⒅⒆⒇,并处罚金或者没收财产。本法另有规定的,依照规定。', null,null); +insert into TESTTABLE_tbl values(24,'F','S','College' ,500,'Good' ,0,0,0,3100,'13200000000','32132219801114113X','渝AZ2FT2' ,'  2)《关于办理诈骗刑事案件具体应用法律若干问题的解释》 第一条 诈骗公私财物价值三千元ⅲⅳⅴⅵⅶ', null,null); +insert into TESTTABLE_tbl values(25,'M','D','College' ,500,'Good' ,0,0,0,1950,'13211111111','370502199010193059','冀ADEYZ7' ,'ⅷⅸⅹ①②③⑨⑩至一万元以上、三万元至十万元以上、五十万元以上的,应当分别认定为刑法第二百', null,null); +insert into TESTTABLE_tbl values(26,'F','D','College' ,500,'Good' ,0,0,0,2000,'13222222222','210682198302018541','豫LAA0C2' ,'六十六条规定的“数额较大↗↘↙〓”⒍⒎⒏⒐⒚⒛、“数额巨大”、“数额特别巨大”。', null,null); +insert into TESTTABLE_tbl values(27,'M','W','College' ,500,'Good' ,0,0,0,2050,'13233333333','532925198410018974','鲁A95P23' ,'  各省、自治区、直辖市高级人民法院、人民检察院可以结合⒑⒒⒓⒔⒕⒖⒗⒘⒙本地区经济社会⑻⑼⑽⑾发展状况,在前款规', null,null); +insert into TESTTABLE_tbl values(28,'F','W','College' ,500,'Good' ,0,0,0,2100,'13244444444','42030119750519262X','晋J4Y158' ,'定的数额幅度内,共同研究确定本地区执行的具体数额标准,报最高人民法院、最高人民检察院备案', null,null); +insert into TESTTABLE_tbl values(29,'M','U','College' ,500,'Good' ,0,0,0,3000,'13255555555','210102199412021827','蒙KMSYB9' ,'←↘↙♀♂┇┅ ﹉﹊﹍﹎╭ ╮╰ ╯ *^_^* ^*^ ^-^ ^执行的具体数额标准,', null,null); +insert into TESTTABLE_tbl values(30,'F','U','College' ,500,'Good' ,0,0,0,3100,'13266666666','120105198208259208','辽V53UR9' ,'_^ ^︵^ ∵∴‖︱ ︳︴高级人民法院、﹏﹋﹌︵︶︹︺ 【', null,null); +update TESTTABLE_tbl set dp_text_tv=to_tsvector('ngram',coalesce(dp_text,'')); ---- compressed row relation -alter table DTS2016111106370_tbl set compress ; -select count(*) from DTS2016111106370_tbl; +alter table TESTTABLE_tbl set compress ; +select count(*) from TESTTABLE_tbl; ---- create compressed pages and compressed tuples -vacuum full DTS2016111106370_tbl; +vacuum full TESTTABLE_tbl; ---- copy to 1B/4B varlen values -copy DTS2016111106370_tbl (dp_text_ts) to '@abs_srcdir@/data/datanode1/DTS2016111106370_tbl.txt' with (encoding 'utf8'); -drop table DTS2016111106370_tbl; +copy TESTTABLE_tbl (dp_text_ts) to '@abs_srcdir@/data/datanode1/TESTTABLE_tbl.txt' with (encoding 'utf8'); +drop table TESTTABLE_tbl; ---- ---- -CREATE TABLE DTS2016112411747_tbl( c int, d date) distribute by hash(c); -COPY DTS2016112411747_tbl FROM STDIN with(delimiter ',',timestamp_format 'yyyymondd'); +CREATE TABLE TESTTABLE_tbl( c int, d date) distribute by hash(c); +COPY TESTTABLE_tbl FROM STDIN with(delimiter ',',timestamp_format 'yyyymondd'); 01,2000JAN01 \. -SELECT * FROM DTS2016112411747_tbl; -DROP TABLE DTS2016112411747_tbl; +SELECT * FROM TESTTABLE_tbl; +DROP TABLE TESTTABLE_tbl; diff --git a/src/test/regress/input/copy_support_transform.source b/src/test/regress/input/copy_support_transform.source index fee717fc0..80d348af1 100644 --- a/src/test/regress/input/copy_support_transform.source +++ b/src/test/regress/input/copy_support_transform.source @@ -75,7 +75,6 @@ select * from copy_transform_explicit_cast order by c_bigint; copy copy_transform_explicit_cast from '@abs_srcdir@/data/copy_transform_explicit_cast.data' TRANSFORM (c_bigint bigint AS c_bigint::text::date) delimiter ','; drop table copy_transform_explicit_cast; -----DTS2021042807RJQKP1F00 CREATE TABLE float_type_t2 ( FT_COL INTEGER, diff --git a/src/test/regress/input/cstore_alter_table10.source b/src/test/regress/input/cstore_alter_table10.source index 5246b95b2..64d9c78bb 100644 --- a/src/test/regress/input/cstore_alter_table10.source +++ b/src/test/regress/input/cstore_alter_table10.source @@ -5,7 +5,7 @@ set time zone 'PRC'; -- -- -CREATE TABLE DTS2016112903778_tbl +CREATE TABLE TESTTABLE_tbl ( D_ID int, D_W_ID int, @@ -14,7 +14,7 @@ D_STREET_1 varchar(20) ) with(orientation = column) ; -COPY DTS2016112903778_tbl FROM STDIN; +COPY TESTTABLE_tbl FROM STDIN; 4 1 cayzjsdtio wiojkmfcmp 10 1 osmgkxgssu qjrlstdsqgwbirqefi 4 2 faelfr kydmzwgnsnrdtpvuztm @@ -116,12 +116,12 @@ COPY DTS2016112903778_tbl FROM STDIN; 2 10 ktmaswdx fdbpzfktvsdxmcerxax 9 10 qnrbwfhzkm scyoylodarv \. -alter table DTS2016112903778_tbl add column d6 decimal(64,10) default null; -SELECT DISTINCT d6 FROM DTS2016112903778_tbl; -DROP TABLE DTS2016112903778_tbl; +alter table TESTTABLE_tbl add column d6 decimal(64,10) default null; +SELECT DISTINCT d6 FROM TESTTABLE_tbl; +DROP TABLE TESTTABLE_tbl; -- -- -create table DTS2016120906561_tbl +create table TESTTABLE_tbl ( D_ID int, D_W_ID int, @@ -131,7 +131,7 @@ D_STREET_1 varchar(20) with(orientation = column) ; -- rows number is (56, 64), and then touch this bug -copy DTS2016120906561_tbl from STDIN; +copy TESTTABLE_tbl from STDIN; 1 1 cayzjsdtio wiojkmfcmp 1 1 osmgkxgssu qjrlstdsqgwbirqefi 1 2 faelfr kydmzwgnsnrdtpvuztm @@ -204,6 +204,6 @@ copy DTS2016120906561_tbl from STDIN; 1 10 luwklnucd ptgijzicsocsyquyglp 1 1 swotbb ogsxoekiohenrovqcr \. -delete from DTS2016120906561_tbl; -alter table DTS2016120906561_tbl add column d11 decimal(32,10) not null; -drop table DTS2016120906561_tbl; +delete from TESTTABLE_tbl; +alter table TESTTABLE_tbl add column d11 decimal(32,10) not null; +drop table TESTTABLE_tbl; diff --git a/src/test/regress/input/db4ai_explain_model.source b/src/test/regress/input/db4ai_explain_model.source new file mode 100644 index 000000000..41a693215 --- /dev/null +++ b/src/test/regress/input/db4ai_explain_model.source @@ -0,0 +1,45 @@ +-- create table +CREATE TABLE kmeans_2d( +id SERIAL, +position DOUBLE PRECISION[] +); +-- insert data +INSERT INTO kmeans_2d( position) +SELECT +ARRAY[ +x + random() * 15.0, +y + random() * 15.0 +]::DOUBLE PRECISION[] AS position +FROM ( +SELECT +random() * 100.0 AS x, +random() * 100.0 AS y +FROM generate_series(1,10) +) AS centroids, generate_series(1,2) i; + +-- clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); +-- enable creating/dropping model audit +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=4206599" > /dev/null 2>&1 +\! sleep 1s +-- create model +CREATE MODEL test_explain_model_function USING kmeans +FEATURES position +FROM kmeans_2d +WITH max_iterations=default; + +select substring(gs_explain_model('test_explain_model_function'),0, 196); +-- cleanup models in random order +DROP MODEL test_explain_model_function; + +-- query audit logs +select type, result, object_name, detail_info from pg_query_audit('1012-11-10', '3012-11-11') where type in ('ddl_model'); + +-- clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1 + +-- cleanup tables +DROP TABLE IF EXISTS kmeans_2d; +-- output finish +SELECT 'DB4AI EXPLAIN MODEL FUNCTION TEST COMPLETED'; \ No newline at end of file diff --git a/src/test/regress/input/db4ai_gd_houses.source b/src/test/regress/input/db4ai_gd_houses.source index fcf4d31db..734faae70 100644 --- a/src/test/regress/input/db4ai_gd_houses.source +++ b/src/test/regress/input/db4ai_gd_houses.source @@ -65,24 +65,27 @@ CREATE MODEL houses_linregr_ngd USING linear_regression FROM db4ai_houses WITH seed = 1, optimizer='ngd', learning_rate=2.0; --- linear regression with normalized data, pure stochastic gd +-- linear regression with normalized data, pure stochastic gd with arrays CREATE MODEL houses_linregr_gd USING linear_regression - FEATURES tax_n, bath_n, size_n TARGET price - FROM db4ai_houses WITH seed = 1, batch_size=1, learning_rate=1.0; + FEATURES feat_n TARGET price + FROM (SELECT price, ARRAY[tax_n, bath_n, size_n] AS feat_n FROM db4ai_houses) + WITH seed = 1, batch_size=1, learning_rate=1.0; -- just dump some residual SELECT id, abs(target-prediction) as residual FROM ( - SELECT id, price AS target, PREDICT BY houses_linregr_gd (FEATURES tax_n, bath_n, size_n) AS prediction + SELECT id, price AS target, PREDICT BY houses_linregr_gd (FEATURES ARRAY[tax_n, bath_n, size_n]) AS prediction FROM db4ai_houses ) ORDER BY residual DESC LIMIT 3; -- take a look at the model warehouse (skipping time-dependent columns) -SELECT modelname, processedtuples, discardedtuples, iterations, outputtype, modeltype, query, modeldata, weight, - hyperparametersoids, coefnames, coefvalues, coefoids, trainingscoresname, trainingscoresvalue, modeldescribe - FROM gs_model_warehouse; +SELECT modelname, processedtuples, discardedtuples, iterations, outputtype, modeltype, query, weight, + hyperparametersnames, hyperparametersoids, hyperparametersvalues, + trainingscoresname, trainingscoresvalue, length(modeldata) as model_data_len + FROM gs_model_warehouse + ORDER BY modelname; -- cleanup models in random order DROP MODEL houses_svm_gd; diff --git a/src/test/regress/input/db4ai_gd_pca_train_predict.source b/src/test/regress/input/db4ai_gd_pca_train_predict.source new file mode 100644 index 000000000..de5756f5e --- /dev/null +++ b/src/test/regress/input/db4ai_gd_pca_train_predict.source @@ -0,0 +1,2092 @@ +-- Setting parameters + +SET statement_timeout = 0; +SET xmloption = content; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET search_path = public; +SET default_tablespace = ''; +SET default_with_oids = false; + +-- Creating data tables + +CREATE TABLE multivariate_2_1000_1_test ( + id integer NOT NULL, + "position" double precision[] NOT NULL +) +WITH (orientation=row, compression=no); + +CREATE TABLE multivariate_7_1000_1_test ( + id integer NOT NULL, + "position" float[] NOT NULL +) +WITH (orientation=row, compression=no); + +-- Filling out data tables + +COPY multivariate_2_1000_1_test (id, "position") FROM stdin; +191 {-1.28395788469999994,1.21088597980000001} +754 {1.19425879539999991,-1.46282073119999989} +15 {-.29519902739999998,-.121100656299999998} +46 {-2.38693404029999989,.696336447099999978} +500 {-1.3676591923000001,.0823187754000000022} +697 {2.28170664740000007,-2.89199068169999984} +196 {-3.26102742949999991,4.44773826579999998} +422 {-.0490042462999999984,-.219723148700000009} +75 {-.0271333841000000013,-.0364516722000000001} +682 {-.241603210499999999,-.227414200600000005} +711 {-1.42202125999999995,1.6187621713} +66 {-.76548128400000004,1.76116957800000007} +955 {1.34196145589999993,-1.57651638100000002} +459 {-2.43593173219999981,1.05356365139999997} +721 {1.96820081079999998,-3.08008761899999994} +444 {1.25732067309999995,-1.48406108000000003} +25 {1.79220675419999997,-1.94000092150000003} +86 {-1.88889113499999994,2.99511275259999987} +490 {2.29193025570000009,-1.66820332239999991} +344 {.92548643730000002,-3.20081101719999994} +440 {-.221173282500000012,-.683347723799999973} +742 {1.45714704960000008,-1.58310747669999996} +540 {-2.18498851980000008,3.2168583272000002} +87 {-.486877328100000006,-.959177425499999958} +630 {-2.5111159689,3.78727591859999979} +167 {.554421551899999976,-1.6944195554999999} +76 {-.191585446999999992,.333718930299999994} +432 {-1.09309218130000008,1.72754351349999991} +566 {2.0009350872999998,-2.23088176929999982} +470 {1.05056202579999991,1.31570561060000002} +544 {.960320522299999979,-.0594487256000000019} +438 {.188814869999999996,.508771407300000034} +571 {-1.30461262539999989,-.246166091899999995} +960 {1.66222236730000006,-.694658432100000001} +466 {.324395937000000023,.540392038899999982} +650 {-2.01307638129999988,.232861802199999995} +770 {-.84808063460000005,.902713080800000012} +781 {-1.35008083800000001,1.9178592606} +289 {.428973739700000023,-1.67719156129999991} +430 {-1.25745832330000007,2.80550622479999978} +611 {-1.02522744770000007,1.91455811479999993} +975 {-2.42854685419999994,1.80736301590000004} +796 {-1.01232646370000001,1.23291642330000006} +441 {1.55970280650000004,-1.89349824739999995} +178 {1.51177377719999995,1.93591231289999999} +92 {-.741516624900000032,-1.40292512039999995} +765 {1.08252406279999991,-1.8301508633000001} +746 {3.81974757849999991,-3.62636166510000013} +226 {2.72875706479999991,-.630334474300000003} +24 {-.536654480499999975,.0242611197000000001} +271 {.566744937299999973,-.282466873900000026} +465 {2.18827939059999999,-2.1099791307000002} +889 {-1.29071236589999994,1.06636871609999995} +772 {-1.85647621649999994,1.36271395719999999} +849 {.586647819800000025,-1.6222139082} +669 {1.62772047909999995,-1.59346481940000007} +43 {-3.09068480949999991,1.22220174429999995} +904 {.160835284800000006,-3.50341377320000014} +393 {-.16447156369999999,-.114725925699999995} +556 {1.94407216180000009,-.0512013804000000031} +399 {1.72982539969999993,-1.28184037560000008} +891 {-3.30811295280000017,1.69512221349999992} +327 {.763427377099999971,.501713003700000049} +528 {.0528269090999999996,-.314907744300000014} +818 {-1.60949493859999992,2.63706280039999985} +294 {3.67082382360000015,-2.4702123009000001} +16 {-.19760758149999999,-3.49448243289999994} +395 {-1.62707497729999995,2.82390402969999998} +600 {3.33093015429999983,-4.52216205630000001} +487 {-.175292582700000005,-.811823014500000051} +255 {-.531747241699999984,.277507535199999988} +283 {-1.3462419402000001,1.38036199339999999} +783 {.57864486989999997,-2.05935914430000011} +84 {.265940139700000022,1.20011281310000006} +193 {2.35072094160000011,-1.64516313020000005} +115 {.470853314299999992,-.741710963199999962} +331 {-3.8014398184,5.96650481999999993} +931 {-1.01554300490000005,.612183452599999955} +672 {.463368616200000027,2.39288342409999988} +752 {.345742839199999985,1.24383104250000009} +339 {.66372913389999999,-.498199713699999991} +668 {1.69952446410000002,-.245993335099999988} +254 {-1.4274680393000001,2.34747861829999982} +214 {-.0182849095999999996,.986712963100000007} +845 {.709429564100000021,-1.30717723620000004} +122 {-1.76681655630000001,-.827722344400000032} +110 {.426291611000000015,1.06283746480000008} +506 {-.896608833399999972,3.20995462620000005} +994 {.444467785999999976,-2.47387391560000003} +95 {-.302852779299999986,-.475120953600000007} +523 {-1.20747768519999998,1.3060419699000001} +230 {-1.1602228672999999,4.57954011370000025} +455 {-1.4781425828000001,2.06295111649999985} +841 {-.245067242200000007,.275478034799999993} +592 {2.60940405440000012,-3.29973785530000008} +791 {-.400619269300000025,2.06457246549999995} +532 {-.851669878699999994,1.94696539750000008} +777 {-1.78693182090000002,3.22678521279999986} +535 {1.04945171449999997,.0906670891999999951} +224 {-.793826075699999967,1.04199637559999991} +451 {-2.35724375790000007,-.338309223400000025} +599 {-1.20140375740000005,3.97798077450000021} +242 {-.0657231142000000018,-.760352303699999954} +309 {.98680506059999995,1.39226249949999992} +495 {.220754289999999992,-.704631171800000011} +537 {-1.33859211290000002,.978834573600000035} +644 {1.50558516129999997,-1.29890751290000006} +946 {-.246603692499999999,-.219423442400000002} +219 {-.233778942200000006,-.196017121200000005} +464 {-2.26371453679999979,2.28473358970000007} +887 {.418974025699999975,1.52942773710000002} +576 {1.25432623959999989,-1.82535615019999997} +351 {-1.58147269860000006,2.60557885430000002} +181 {2.01137676879999994,-2.06761885620000019} +265 {-1.77028504760000005,4.33568724189999966} +554 {1.25631111839999998,-2.21642162149999988} +394 {-1.67445844910000008,2.29776674380000001} +439 {-.26892269860000001,1.04589602780000002} +665 {-1.83946045520000001,1.48892491799999993} +478 {-1.13091999380000008,1.78304677349999996} +370 {.64272730339999995,-2.93946550920000016} +8 {-.91218139340000004,.79678157390000004} +2 {1.42614182,-3.0705532658000001} +877 {-2.69421257350000021,2.33301942059999989} +942 {1.08133114530000007,-.0432086698000000019} +306 {-.710215206500000029,1.75604903679999991} +316 {-2.27833241700000011,.606106020499999953} +319 {-2.53940161540000009,2.04547924940000003} +907 {.995256602300000015,1.1533793911000001} +48 {2.68412757270000002,-2.15283213550000019} +266 {.760259136199999963,.966432978000000054} +782 {1.34250988279999994,.276165725899999992} +900 {-.166894654499999989,.598434007599999984} +5 {-.025230713200000001,-1.84650202340000003} +69 {.0585877716999999992,.676880640299999947} +365 {1.97025445659999998,-3.00925171249999979} +633 {3.31835205799999988,-2.21107786289999986} +792 {1.25303046670000007,-.722036953599999975} +494 {-1.92125598410000009,3.99249693390000004} +609 {.853610241199999953,-.0544059845999999986} +357 {-2.04985432080000018,.220458657800000013} +239 {1.11649594820000009,-2.7038974777} +625 {-.587973298600000027,1.41822535640000003} +382 {.356264024100000021,-2.29251995919999985} +212 {-.434194746999999992,.931818663199999953} +429 {-.409233341899999992,.207673302700000006} +421 {.0795121649000000014,-1.12414898890000003} +39 {2.57476224120000019,.310489898799999997} +361 {-.0924212853999999973,-1.74520636530000006} +714 {1.63560037390000002,-1.6738569620999999} +334 {.0902087196000000008,-.509687431899999988} +231 {-1.71071087979999992,2.48832352330000006} +793 {2.08744087150000013,1.35436804050000004} +515 {-1.71983919530000007,-.380012427400000019} +659 {.335547969599999996,.395733359600000001} +947 {-.740700377000000021,2.14766016140000016} +895 {-1.88323393169999997,1.29997236130000005} +151 {-.189535519099999994,-2.01026533639999982} +834 {-.933398320600000053,-.453585175499999993} +549 {.834313665200000054,1.5963555839000001} +508 {.578099325000000053,.2638312518} +673 {2.87374749539999996,-2.80582682969999997} +1 {2.3938695608999998,-1.00708501739999989} +114 {-.175283285499999997,-3.46051702679999984} +512 {-.238453008399999999,.998005507300000039} +389 {1.86826955789999993,-2.15784513219999985} +183 {-.391655405899999975,2.26338763790000019} +526 {1.19408540609999991,-1.12049863180000009} +521 {-.196335958599999999,1.75976672300000003} +270 {-1.48210197149999989,3.33210712240000007} +736 {.0737160240999999994,1.5998336111} +286 {-.630752629900000028,1.66112903690000002} +22 {2.96043228429999994,-3.85393519889999991} +716 {-.243853311099999998,.967507819100000055} +428 {.598036349200000039,-2.36317155909999999} +828 {.213708539400000008,-.736310576399999972} +124 {-.510681663499999994,-.513317111400000026} +688 {-2.37771247789999984,1.68546751109999993} +252 {-1.84580008879999991,1.64396609370000002} +49 {.259077087900000003,1.61855731730000008} +954 {-2.2997981985,2.20671321970000012} +112 {-2.08848383300000018,-.0158348152000000013} +565 {.747813525600000029,-1.64837849199999997} +81 {-1.52231143649999989,2.19232987799999979} +136 {.572981696599999979,-.52596487380000001} +728 {-.895489882899999978,1.42204732220000007} +307 {-.262716151200000025,1.6520998737999999} +902 {-2.59992228770000011,1.97991843680000001} +237 {1.04715387060000009,-1.6961429737} +185 {-.820942674700000041,2.52408746720000021} +107 {1.47981861259999992,-2.23107048060000013} +999 {2.2356784073,-4.50654378970000025} +653 {.729555020099999951,-.913381380999999992} +584 {.231215349999999986,-1.51903720089999994} +282 {.262260639399999995,1.41929606850000001} +268 {.689474037899999947,-.426530990499999985} +400 {.337865357099999986,-.353513274599999983} +755 {.489223683999999992,-.292672437100000016} +308 {-1.14362098200000006,1.38909660439999993} +52 {3.0733917867999998,-3.18487167990000009} +146 {.141298616300000013,-.990323678500000026} +896 {.287308843199999997,.37498214940000002} +145 {.540992889300000002,-.642856828899999955} +704 {-.890641095799999949,.291544447500000026} +966 {-1.04001962820000005,-.473145490800000013} +917 {1.66656242799999998,-3.38441540869999979} +749 {.673237218300000051,-2.36337477779999983} +340 {-1.51062519500000003,2.55550733400000007} +321 {-.272351021999999998,1.25903154699999997} +113 {-.310274010699999991,-2.48651284040000009} +847 {1.60138216870000005,-2.62984807869999981} +629 {.796321362200000049,-2.26394623689999985} +817 {2.49933962079999983,-.413356510100000019} +403 {1.94844556990000006,-1.21508226769999994} +256 {.502430456100000034,.523255457500000021} +573 {-1.08064884500000002,1.32200302750000009} +411 {1.92447163189999992,-4.56275085650000012} +45 {.263621757899999976,-.796370069299999983} +671 {-.811942253899999966,.540455845500000032} +726 {-1.30090097949999994,.598743805200000034} +353 {-1.54437903229999995,2.6133750871000001} +550 {2.34240532470000007,-2.27694573740000017} +715 {-.377355182499999997,.509994501899999952} +384 {-.686571967100000013,.490268592299999972} +868 {1.4786644404,3.0852904956999998} +85 {-.630051586200000013,-.45781933520000001} +541 {1.34832466000000006,-1.04252528969999991} +388 {.224985178099999988,-1.05737545569999991} +486 {.624900183499999984,-2.56465028160000008} +55 {2.11781590050000013,-3.74502968969999994} +169 {.155073711599999997,.0478558443999999999} +569 {-.611345750300000046,-2.08602999000000011} +741 {-2.97957912079999998,2.7783274963000002} +780 {-.214568128100000005,-2.34544161110000005} +4 {-.0558881077000000018,1.33575016760000009} +144 {1.66620214259999999,.936217061699999964} +240 {.885458842599999962,-1.1833027586} +910 {1.68646559280000008,-1.31464152589999994} +759 {.808173972500000004,.316568965199999997} +129 {1.66697810599999996,-1.62405132890000004} +150 {.0696965768000000069,1.14327686969999998} +619 {.426066434599999988,-1.30023164759999998} +396 {-1.52843807030000001,3.0991212196000002} +14 {-.206206704800000001,2.30400236769999989} +867 {.326367018200000025,.273774945800000002} +869 {.217793450500000013,.244967441899999999} +775 {-.919115258000000046,.944265380000000043} +735 {1.6898526168000001,-3.28268984709999989} +546 {-.338925185100000026,.674304815499999988} +37 {-.122138024499999998,2.2612473327} +883 {.336950356000000006,-2.54337018020000016} +510 {-2.2953480706999998,3.0605767479999999} +812 {-.163009823800000009,-.864804415599999987} +643 {2.01611172570000008,-3.0269810936999999} +564 {.384596553100000016,-.0125623964999999997} +710 {1.5245357974,2.36348668129999995} +820 {-1.28625490139999998,1.47810191700000004} +731 {-1.8702293311,2.49184586639999983} +419 {1.17126268680000001,-1.98729906699999992} +29 {1.0742102536,-3.19563043969999994} +788 {-.602505907399999985,.340124644400000009} +677 {-1.05180917790000006,1.83753482009999991} +238 {-2.33506398879999999,.524829422000000045} +764 {.444508578900000006,1.39900220950000009} +872 {-.968158961300000009,-1.29738036290000003} +689 {.149353389800000014,2.35986766820000016} +761 {1.77557825549999992,-.790565468800000004} +622 {.062124813000000001,2.23787365229999979} +472 {2.17773107719999981,-2.11136806880000005} +180 {.793648410900000045,1.2969033459999999} +635 {-.0620476141000000023,4.1883346033000004} +51 {.662454575100000054,2.33561084659999985} +277 {-.773696060899999982,-.729667266900000056} +397 {-.83133848779999997,1.07903760700000007} +332 {-.206301179299999998,1.50955159069999989} +977 {-.979504158000000014,1.43924972210000002} +74 {-3.45372116069999979,1.59310038889999994} +262 {-1.40970775930000003,1.03433594659999994} +802 {.744110426899999955,-2.57885566049999992} +634 {-2.79883187730000005,2.67660151980000016} +73 {-.111713109500000005,-.157526771499999996} +897 {-1.15015635129999994,1.31388587169999993} +661 {.918590847499999974,-1.73201895040000009} +860 {-2.11862060380000017,2.07101520219999991} +468 {2.39882886739999979,-1.39653758799999994} +517 {-.999216641700000019,1.67841020069999991} +923 {.343472094200000022,-.232770030500000003} +189 {-.109644914499999996,1.34293515950000009} +827 {-.283525729900000023,2.62440133489999994} +971 {-1.85917199430000002,2.56090061399999991} +19 {2.25498819470000011,-2.50970242449999992} +778 {-3.42291932090000017,3.32737058759999993} +814 {.919614120900000054,-2.28429251769999997} +502 {-1.13845436500000008,.479554684399999986} +756 {.418556058700000011,.156811592999999999} +106 {1.85916346020000001,-.491932400599999986} +855 {.257769881599999973,-3.94835095999999997} +717 {.485476309100000003,-.706738055800000042} +3 {-3.42653783879999985,1.97587818209999999} +799 {1.08119944060000006,.424623503000000013} +343 {-2.6454066330999999,1.44285073410000009} +172 {-3.23292748519999984,1.33664859860000007} +431 {-.283598870499999989,-1.37524241699999994} +423 {-.711817279999999997,.842934318299999985} +281 {-1.64168390869999992,2.80498845129999985} +706 {1.94962518819999997,-2.91501222800000015} +972 {1.0199229917999999,-2.07666423740000017} +552 {2.23630228049999991,-1.4362000104999999} +171 {.0466317390999999998,-.120960774199999996} +786 {.681537279399999973,-1.68215668000000007} +248 {1.8329216801999999,-1.39540845299999994} +748 {1.75316907020000001,-3.88747790399999982} +345 {.203386684099999993,.530183084499999957} +922 {.805641928000000007,-.318164653899999983} +134 {-.781764000200000031,-.129045879499999988} +378 {-.202022531899999996,-1.00598844919999997} +585 {-.944216283499999975,-.16717898719999999} +674 {.454221314099999995,.960655990599999954} +915 {-1.62667536180000005,2.45933538599999979} +690 {1.98067208769999992,-.450538485200000005} +505 {.0688504340999999964,.649437606200000017} +137 {1.7344848746999999,-1.87043355880000006} +893 {1.61161819920000005,-2.230891395} +462 {-.247176995499999996,2.37578169079999979} +880 {-.250056951600000021,2.77565318049999998} +838 {1.69228140430000007,-3.25570115089999979} +133 {.461480290100000023,-.147614105100000004} +989 {-.424400530300000023,-1.31530091309999997} +456 {.465608618299999999,2.38335494300000006} +381 {-1.20889909669999995,.547582625799999945} +884 {-1.07959341489999994,2.50363604519999994} +269 {1.4804604695000001,1.35881218789999991} +606 {2.55029783489999984,-3.02883120690000007} +161 {1.43698358220000011,-2.54443077039999999} +200 {.481377221000000022,-1.63505992189999994} +908 {-.609111258800000055,1.52949891580000008} +740 {.00593412350000000009,.604544316300000051} +121 {1.74165644419999999,-5.37270571929999985} +723 {-.2481071309,1.70770914080000002} +302 {.944233511800000036,1.20459260769999998} +516 {2.84427235089999986,-1.48836066659999999} +727 {.591920568699999983,.968137980399999987} +930 {-.401681080400000001,-.768950586200000008} +42 {-3.35547131919999986,2.0088079057999999} +204 {-.370839704900000011,.845165766799999996} +798 {-1.46232728570000003,2.13441130120000011} +543 {2.31792786129999984,-3.74244523320000022} +504 {-.327582267400000016,.507315425399999964} +965 {-.680753260699999974,.545732883200000018} +60 {-.519555234000000032,3.18892226580000004} +733 {.977312908899999999,-.632239574800000037} +858 {.00838962489999999991,2.80268884269999985} +846 {.315674569899999979,-.392684022500000007} +322 {.668593744800000001,.49344914379999999} +998 {-.728393241100000033,2.01149669260000019} +926 {-.421472491599999999,1.57962918870000002} +589 {.381058306199999974,.652281057499999983} +209 {-.136915833999999986,-.0201990473999999996} +71 {-.922138249700000023,3.33938464040000005} +582 {2.47396153549999998,-1.21140155680000006} +18 {-.323354579699999978,1.30910027909999993} +810 {-.0392295334000000023,-1.33192986370000011} +366 {1.22655533330000011,-.744431610899999985} +879 {-3.02083931999999988,2.22093467130000022} +434 {1.47148195860000008,.155944910500000006} +890 {-.478108445400000026,-1.89066600410000007} +757 {-.26273152449999998,-.0855207035000000032} +313 {-2.62677181219999989,-.130962975600000003} +974 {-1.48157313220000009,5.30190015989999974} +533 {1.97116654679999992,-1.81275191370000011} +695 {-.97657250470000001,.505141600700000026} +485 {-.439133487699999991,-.265581894699999987} +100 {.0944650617000000054,.705000842100000025} +769 {-.329853972200000012,-.579609199700000022} +940 {-1.21596819210000007,.486061468399999974} +251 {-.552910653199999991,1.59063431979999992} +988 {-.221429105099999995,1.06296142150000006} +970 {-.916359481600000025,1.9962170426000001} +216 {.649535280099999945,-.349064818700000001} +773 {-.927933219000000031,-.251837046200000025} +40 {.954274280100000039,-2.33609109439999996} +587 {-1.1629246083,3.9696077498000002} +229 {-1.26287691089999998,3.34898474889999997} +443 {.21589955529999999,-.946682342400000043} +26 {-.0662362274000000001,1.20839932639999992} +632 {1.91799165560000007,-1.85644199330000004} +617 {-1.20020367550000007,.31414574839999998} +639 {1.03993926470000009,-1.90312013429999993} +805 {1.61261763239999989,-1.01160128379999992} +379 {1.17061426000000002,-.418684823799999994} +61 {.582709219099999953,-1.94548264350000011} +822 {1.66922638979999993,-.606543264000000026} +539 {1.00769232629999994,-.21134660999999999} +493 {-.743236582700000037,-.820756679900000008} +141 {-1.31937838850000011,3.94832966229999993} +135 {1.97421468030000002,-3.34006665780000001} +94 {-1.20340676030000004,2.83338817209999982} +449 {-.156805327299999991,-.538966882999999952} +613 {-.727195809999999998,.43441230019999999} +218 {.378433428299999985,-1.78288639230000001} +524 {-.658362522999999977,-.313454905800000017} +310 {-.300634207799999997,.355932482400000028} +199 {-3.11740458799999987,-.685097447400000004} +832 {.0683143229000000018,1.18617819619999998} +264 {1.41874847630000001,-.778940037200000046} +453 {.454816186700000014,-2.19231130259999984} +873 {-1.61394100389999995,.0494405117000000019} +152 {.966238007299999979,-.845546308900000043} +32 {-1.97357991290000001,4.1752487532} +657 {-.0653927074999999941,-1.13910779999999989} +188 {1.54196004529999997,-2.4356833256999999} +148 {1.21922454879999997,-1.96280509900000011} +447 {-.0848792359999999968,1.0579347590999999} +918 {-1.58455601800000001,-2.21173949079999987} +450 {-.111362591999999996,-2.4412459166999998} +442 {-3.06842361369999983,1.54482212329999991} +473 {2.11411544810000018,-1.11437178430000006} +285 {-.12651986870000001,.871117955400000032} +863 {-1.7603566717000001,1.88947875139999999} +358 {2.56938422109999998,-3.47732039100000012} +118 {-.186691767000000008,1.03373657889999992} +995 {.202426161800000004,.888941313500000052} +990 {1.75342230320000003,-.946503592699999952} +870 {1.12483459530000007,-2.5160215266999999} +143 {.793718877899999953,-.57817706889999998} +862 {.0577172655000000034,.987374408099999989} +72 {2.16407892480000008,-1.37338824420000005} +318 {-.128565860199999993,1.80560310559999992} +83 {1.02121287959999996,-1.20640826400000001} +215 {.759934308800000013,.91255349259999996} +97 {2.93388999339999978,-1.26017164939999993} +578 {1.71977145619999994,-2.40661700880000007} +260 {-1.07311702340000004,1.91063029719999999} +337 {2.61563580520000016,-2.5765221036999999} +89 {1.66368667160000006,-1.36249424070000003} +577 {.478561168599999975,1.27029208869999999} +160 {-.434174321400000007,-3.44706167909999994} +175 {-.829963683799999963,4.01102091240000025} +336 {-1.79370827360000007,-.437508771399999996} +10 {.875679537500000049,-1.28476430680000009} +797 {-2.47569363300000012,2.23113818720000001} +563 {-1.40415717309999999,2.8537049485999999} +859 {-.858362854200000025,-1.04315602250000006} +341 {-.733274060599999999,.212562264899999992} +830 {.111302741900000002,2.61281514790000013} +768 {.681785477699999976,1.50721073829999996} +267 {-1.80943831179999992,.124381010099999995} +561 {-.341663560500000019,-.852945769700000023} +610 {.828065622699999948,-1.84800063689999994} +70 {4.04079761589999986,-4.43778839580000017} +927 {-1.32833431860000006,-1.80534990259999994} +335 {.337438008800000022,1.83315708059999993} +317 {2.12541717529999996,.0351635807000000022} +795 {-.953906818999999961,-.199204198599999993} +598 {.96710746940000003,1.43919010199999997} +420 {1.5227681529999999,-2.16960989480000022} +914 {.1986645675,-.242438661500000013} +436 {-.528520802800000022,-2.04019454559999991} +458 {-.951222394099999979,1.95551209199999998} +425 {2.10898845509999999,-1.14064671779999993} +225 {1.25987041209999995,-2.0830732906999998} +405 {1.13230745700000002,.651345956999999975} +36 {1.10651618230000004,-2.11195691140000008} +654 {.255654615500000015,-1.86314849019999995} +666 {-.198315027499999991,-.824704063600000037} +163 {.77807429370000003,-2.0260589856000002} +574 {2.07255421659999994,-.905766733300000015} +527 {-.497396918899999985,-.495894923900000006} +362 {-.975239133700000038,-.394375845000000003} +103 {.934046235599999997,.184238147099999999} +784 {.255846188699999977,-.543032645699999983} +886 {1.48631310310000009,-1.73695381839999996} +376 {-.471098004100000023,2.28808836040000019} +916 {-.163987925499999992,-2.3793319543} +385 {-.685442682200000042,1.39653053700000007} +536 {.904018197299999993,-.927191429600000006} +996 {4.2631805786000001,-4.31097553680000001} +591 {-.185047285899999997,1.42953480599999994} +667 {-.0915438756999999992,.625989071400000041} +417 {1.13163906200000008,-.676996176299999974} +984 {-1.8873294489000001,.0100732904000000005} +685 {.634863018100000009,-.173547596500000012} +686 {1.05018551210000011,-1.19886212099999989} +329 {-.149765000000000009,-1.58657303439999997} +350 {1.2721512223,.254832208999999976} +454 {-.970064045400000019,.300042676600000002} +771 {3.79074505740000012,-4.09664252029999965} +545 {.829269365499999966,3.12915133359999986} +192 {.351961777400000009,.0061772703999999996} +642 {.458014670800000023,1.53812351559999994} +787 {-2.56223330830000018,2.78259459690000011} +88 {1.80109409740000004,-3.96442355910000011} +38 {.56867955020000005,4.32010260840000004} +983 {.265521240499999978,-2.47599773969999992} +511 {-1.4127584675,.0277201364999999991} +555 {2.09790366209999979,1.02467832550000004} +234 {-1.67352541849999992,4.95366585629999978} +794 {.988187483499999963,-1.91636452980000005} +170 {2.09872381670000019,-4.04649418540000028} +404 {-1.97172000730000008,1.10699664509999995} +853 {-.180225303100000012,3.21344907179999995} +91 {2.0429626610999998,-2.08063515190000015} +901 {-.399292148399999991,-1.85077052590000002} +53 {-1.06409857810000008,-.927879309699999988} +257 {-1.31258105980000006,1.63578298750000006} +641 {2.16647494700000021,-3.96831863950000008} +691 {1.92466593199999991,-.971833334900000012} +626 {-.711793678299999955,.990216369200000002} +747 {-1.47879292899999992,4.71057265489999999} +699 {.658609143399999986,-2.86657326130000012} +992 {-.898496129099999985,.133987614000000005} +244 {-.68115192729999996,-.534307074300000018} +220 {-.884900571700000027,-.562025912299999986} +709 {-.0386670212999999999,.638295870999999959} +63 {1.29711930919999996,.777365394299999957} +243 {-.674159663399999998,.828863573699999967} +326 {-1.67220163570000002,2.58386346999999983} +292 {.656119107099999987,2.61645543699999994} +603 {.566241861700000015,.794640664200000013} +153 {.352220432500000014,1.34399109949999995} +953 {.0850627181000000038,-.348727691200000023} +595 {-.572515204999999971,.909159649799999969} +719 {1.81804964490000009,-.376018967900000001} +41 {.470894733999999981,.249694486100000002} +529 {2.22937987689999995,-3.82214754409999991} +920 {.425686285599999992,.868767655400000005} +839 {1.11011048190000006,-3.00766013169999979} +223 {-2.61502305469999996,1.47163384330000002} +108 {.922908157900000026,.411573020700000014} +572 {-.519502517399999975,1.55189423860000009} +278 {-.418956105299999992,-.470169112599999994} +750 {1.98539754249999989,-2.72198523760000022} +744 {.0136555519,-.399098684500000023} +424 {-1.5245441153999999,-1.09120469689999999} +79 {.0183465547,1.9484779698000001} +987 {-.149609972899999999,-1.35309056639999992} +531 {2.59800575740000017,.0647810954999999966} +627 {.58000471720000002,-.289000565499999973} +96 {-1.1108223008,1.70768101940000006} +553 {.441758101100000022,2.26022393329999982} +373 {.19803646820000001,-.679398910199999984} +982 {1.42921766110000004,-.941002214499999989} +843 {1.27634559679999993,-2.20669811759999979} +418 {-1.59551679189999995,1.6570803529} +293 {-.99732343950000002,1.29635933429999994} +655 {-.514957116599999987,1.47842251450000006} +367 {.691955478899999976,-3.82848478440000006} +608 {.305725014199999978,.896059220199999951} +525 {-.688766164999999986,2.61375283530000013} +480 {-1.0458368495999999,-.891412185400000001} +776 {.751859417300000055,-.828689387099999952} +182 {.976103137600000004,-.813716472299999993} +596 {-.810035855900000046,-.581150186300000038} +410 {-.0538396538999999993,1.0642086850000001} +518 {-.840321323900000028,.587648744400000034} +482 {.596064186999999968,1.31595770340000007} +779 {.662501408400000047,-.0225660189999999997} +808 {.972804571399999984,2.21850963310000004} +951 {-.801616302000000003,-1.88928389100000005} +851 {-.0409903513999999997,-1.74270359059999991} +530 {-.466401554499999982,-.28039967640000002} +412 {.86598894329999998,-1.06725514810000011} +739 {.541154118000000017,-1.10019634059999993} +980 {1.05218133530000002,.0145452719000000001} +857 {.963126848499999966,-2.86460096099999983} +718 {-.0765848387999999991,.244818251200000003} +807 {2.04648119940000006,.652610762400000044} +201 {1.13891763490000009,-.743363494100000022} +745 {-2.96473532120000005,1.30014168730000002} +298 {-1.03563412710000002,.272055254700000027} +774 {-.228130497799999998,.242115347799999991} +790 {1.20142531969999999,.58761834369999999} +649 {2.95465384490000016,-2.09623824579999996} +567 {-1.3399993267000001,.730865271899999946} +297 {-1.57345392970000009,1.0398305510000001} +724 {.834871979300000011,-.403634780799999981} +803 {-.699528849300000033,-1.55036973470000006} +670 {.855411941700000011,-1.3820816385000001} +620 {-1.37254384939999996,1.32031055250000007} +618 {-.293354648100000004,-2.39235913849999982} +624 {.639512774499999992,.81949837120000002} +785 {.778669487900000012,-4.68800414549999989} +67 {.275978398399999991,-.89376559769999997} +558 {.455207058200000014,-.742141672100000038} +520 {.148481785300000002,-1.29344428089999997} +628 {1.54616932060000001,-1.3273921514} +177 {-1.61824396209999999,-.421257776799999983} +28 {1.93763384569999997,-1.65084946039999991} +938 {-1.25195843309999999,.675805503400000007} +824 {.0028817373999999998,-.812905936099999948} +597 {-.0938581545999999933,-.343103347100000011} +291 {-.436254347799999997,-.577593314799999979} +825 {-1.08127742360000001,-.995586827799999963} +950 {-.487475461900000018,1.92612186389999995} +371 {-.649184355699999993,-.25901903770000001} +789 {-.383358451300000014,.0240312934000000006} +949 {-.226919784200000002,-.109012925400000002} +93 {-1.12081151610000007,.850574697200000007} +959 {.435235108199999998,.271767047599999978} +377 {-1.19534918189999995,.0311107707999999997} +333 {-1.2415746231,-.410375945099999972} +963 {-1.3531403527000001,.423580116100000015} +708 {.712056824000000033,-1.12109166189999998} +263 {-.828788369500000011,1.78606133569999992} +491 {.260756781300000018,-.863147049199999961} +338 {-.143411366600000006,-1.75593327200000004} +203 {-2.46430465170000002,1.84016449580000008} +222 {1.17006628029999993,.494587779200000022} +194 {-.0233371994000000013,1.0869317409999999} +806 {1.26811791249999994,-3.21531117990000004} +580 {-1.9101960268,1.10341212570000002} +623 {-.132179965300000007,-.726921890500000001} +594 {1.04468156730000006,-1.50791616120000005} +359 {-.243432610900000013,-1.7549785980999999} +684 {-2.76201196809999994,2.13754035879999993} +615 {-.620275390199999999,.530059495599999986} +866 {-1.00632026359999993,.457523824500000009} +944 {-.242927194600000007,-.025776615499999999} +985 {-.643647941000000001,.477536980499999986} +20 {1.6909847912,1.1256418881000001} +957 {3.06100676900000002,-3.50694138950000012} +78 {-.082638494899999998,-.156591417900000002} +288 {-.460294776399999994,.386483139500000017} +819 {.074651455699999994,.0980197876999999995} +415 {.0516591403999999976,.772689638200000029} +1000 {-1.95121232969999991,2.09329037959999997} +986 {-.210761572099999989,-2.21945956669999989} +202 {-.0694853636999999968,-2.34300218150000017} +693 {3.22705627139999995,.396664109300000012} +375 {1.80266641240000003,-1.66880491519999996} +261 {1.08163533119999999,-2.21896843350000017} +210 {-1.41855867569999994,1.14590515489999989} +662 {.189853671400000007,.475482859500000021} +140 {.7695932298,.129063744899999999} +348 {-1.27037371039999991,1.95610286919999998} +355 {-.674157518500000053,1.62062874420000003} +560 {.725660732799999986,.410318640800000023} +58 {.502817126599999997,.101605246499999996} +120 {-.948390003500000023,.790974166599999973} +542 {-1.97515986650000008,1.4618808887000001} +734 {.389110089900000011,.528944950099999978} +909 {.322294964199999978,-.637434790100000037} +303 {-.790698793699999958,-1.27633616040000009} +652 {-1.7354126299999999,2.0395670535999999} +604 {.234396306500000012,-2.59174946339999979} +102 {-.645877199299999982,-1.56256806830000006} +601 {.57124007659999998,.629831640000000026} +31 {-.113802124399999996,-.681232685200000021} +664 {2.44790972420000008,.425776720300000022} +612 {-.263362560999999995,-.0716576154999999937} +605 {-1.14560790670000001,.411279769799999972} +551 {.130903397499999991,-.267889460199999985} +702 {-1.41581646550000007,1.97980198890000003} +368 {.686188346100000013,-1.3914127544999999} +751 {-.51722066489999996,-.17231462480000001} +687 {-1.67660822639999996,-.287491794600000017} +509 {1.25703784789999995,-1.44203305129999992} +498 {-1.00011265700000007,2.75298672090000007} +176 {.969492103400000027,-2.02165292209999992} +813 {3.04386975129999993,-4.36723546660000039} +315 {1.6798739882,-1.35661272860000004} +852 {.836986983600000012,1.37736732780000004} +207 {1.21337389610000002,1.01668999219999989} +426 {-.407553274199999982,1.36411626200000002} +738 {-.826179874799999991,-.665182370899999986} +227 {2.47395461569999986,-3.48141529000000016} +607 {1.92604723020000002,.259312868199999991} +507 {2.33746435449999979,-2.55174391850000015} +479 {1.70170470889999992,-.3726370289} +164 {.80257241909999999,-.634287251000000052} +675 {1.0749164859,1.0171546172999999} +98 {-.455975561899999993,.877627594000000011} +956 {.670070607999999956,-.445974450299999992} +228 {.540339585100000019,-.667891066000000033} +330 {-.672295232500000006,-2.74527140899999988} +47 {.00510395119999999987,1.66094796329999994} +99 {-.268008789600000019,-.298118086399999982} +881 {.436226444200000008,1.04474250029999993} +12 {-.834655459299999958,.81282401950000005} +17 {.825524199299999983,-.76387564320000001} +233 {-1.03144662520000008,.100116750500000004} +323 {.122599089800000005,-.930131097500000004} +646 {1.19375559089999994,-1.33512991939999992} +729 {1.18758791670000008,-3.47019838579999984} +579 {1.98460308760000004,-1.16202066679999994} +295 {-.0422643254999999982,-1.1954391727} +924 {.601638070699999972,-.0648517014000000031} +804 {-1.62402518139999996,2.16354572779999987} +57 {.25856895299999999,3.14151242209999992} +258 {-1.27103145509999993,.960591054799999955} +497 {1.4812133514000001,-.409538866300000026} +364 {-2.18979950569999993,2.10968632870000006} +993 {3.17853716539999986,-1.72829020290000002} +645 {-.4089096968,2.0309940179999999} +911 {-.200625635699999999,-.179889462899999991} +816 {-.319282250200000006,.88932710699999995} +815 {-2.77118261900000018,4.72304110820000034} +157 {.308762591899999994,-1.45364071940000006} +636 {-.265845182000000013,.552813241399999988} +514 {-.364170993800000009,-.277103635500000001} +279 {-.843253711799999972,.968387773899999971} +943 {-.499040333800000013,-.581423344499999994} +503 {1.97810431249999996,-2.27554612550000002} +680 {-1.05105124270000005,-2.06863081580000019} +35 {1.8825292133,-2.82356998129999992} +80 {-1.94500916670000001,3.07354618000000013} +835 {.363955481299999994,1.15895024580000006} +59 {-1.03095165479999995,3.37716786269999991} +899 {-.595246516100000012,2.65899710060000016} +197 {-.398542198700000017,3.53080376300000021} +892 {.333579115699999984,.238585500700000008} +856 {.0908056118000000051,1.67388235789999995} +437 {-.156531133999999988,1.27149790799999995} +217 {-.212145259000000003,1.86038253470000003} +696 {-1.37416929620000006,1.85174755090000009} +435 {-.209952327599999988,-.562991291100000013} +232 {-1.43630747870000008,-1.40545170759999993} +663 {.225643601400000005,.530949211899999995} +287 {.537706494699999982,-.486020483999999975} +372 {-.379056408899999975,-1.13589253560000003} +304 {-3.12847697449999984,-.478523305999999982} +174 {.105301872899999996,.183973784299999998} +875 {2.70095022580000022,-1.96727260419999994} +766 {-.518103695800000041,-1.30601723889999999} +979 {.19628113820000001,-1.32831968119999999} +130 {-1.72003052710000004,3.07320808080000019} +401 {.269322007299999999,-2.49420483829999995} +762 {1.13106578579999995,.908067572300000014} +190 {-1.21272329850000005,.435767143000000023} +173 {.301516085400000022,-1.80682053469999993} +538 {-.0264547457999999985,-.267163503600000019} +570 {-.475690859500000007,-.515448515600000046} +27 {.771964571299999958,-.541954456899999992} +919 {-.270540054299999999,.264377841300000027} +352 {.0961521107000000014,-2.85819755880000015} +275 {-1.48247449249999996,.244104070899999998} +878 {2.15239960750000003,1.01598316979999992} +973 {-.286784087700000023,-1.78724113239999993} +614 {.858292628599999996,.296129524899999996} +469 {-1.6082830318000001,-.877487809100000016} +967 {-1.58035726459999992,.432377284199999989} +247 {.802706021400000025,-.224520023200000002} +801 {1.60813080339999992,-3.87468648450000019} +325 {1.08893477659999993,-.65582101930000003} +452 {-1.35795426339999992,1.07687111160000004} +280 {.92984334209999997,1.42111558670000004} +964 {-3.25238967280000013,.784083539800000007} +147 {1.16828937639999997,-1.67561992750000011} +929 {1.87734461050000001,.893868637800000032} +159 {.257351489200000005,.775221826600000052} +360 {-.855904844799999953,1.80751316629999992} +305 {-.750145839999999953,1.60572743459999989} +939 {-2.34519540279999994,.724759905199999999} +68 {.699213318600000022,-1.27649819990000002} +737 {-.566739300399999979,-1.97358202439999997} +489 {1.26613402120000007,-1.46752533420000009} +386 {1.22094140800000006,.0284740822000000005} +934 {-1.61387736860000008,-1.11199601470000009} +320 {-2.76851772950000008,2.7559157622999999} +945 {.535465219100000001,1.59950741779999994} +888 {.962727286599999954,-2.4308654838999999} +656 {-.0302470724000000017,-.305150059699999976} +826 {-3.01786850809999985,1.42399134150000006} +104 {-2.42181591079999992,1.07284975719999998} +117 {.213486130200000013,-1.11718087009999989} +559 {.558342127700000024,1.57430561480000009} +448 {.791666267800000045,-.391035927100000014} +898 {-1.79490370839999991,.819329230100000028} +758 {.0606389155000000013,-2.02429303429999985} +156 {1.50275026519999999,-1.13103789560000001} +11 {-.413883170700000025,-.964271198399999974} +471 {1.64553132200000007,-1.11033163130000001} +363 {2.33034884389999997,1.79659339920000005} +864 {.601044417400000031,-2.84385955109999999} +427 {.989953523399999979,-2.30653762890000014} +713 {.376873196199999982,1.86547294919999995} +557 {-.0236887164999999983,-1.96625904529999995} +457 {1.40329290949999996,-1.82515524289999997} +720 {-.0501258737999999981,-1.06655149970000007} +354 {-.670332895699999987,-.320606645500000009} +638 {-2.97604945739999982,3.03027510539999989} +476 {.347115251899999977,1.36388910640000005} +484 {-.96058011430000001,.630117332800000041} +33 {.597766421200000009,1.68191841610000004} +408 {-.0496866550000000032,-.324535283999999979} +445 {.359235958999999994,.679994790900000035} +374 {-1.1397587306000001,2.76771996250000019} +154 {1.99245638960000004,-2.51359817579999989} +276 {-.267650586099999976,3.4034501639000001} +581 {2.42030960220000013,-.49296489269999999} +6 {-.954998908700000038,1.08531977990000006} +547 {1.0962096590999999,-1.41825052239999994} +958 {.219330692800000004,2.32004475199999982} +648 {-1.35770926959999993,.178399248199999999} +324 {.476200120800000015,-.391574649800000008} +54 {-1.98534699179999996,1.16083682569999991} +894 {-.510925490600000032,1.53568163070000008} +461 {1.21998056239999997,-1.54904451629999995} +698 {-.654445281099999998,1.34400743249999999} +861 {.161053264800000012,-3.22197220990000011} +519 {-3.20760159989999982,1.53699418209999994} +138 {-.169460621899999997,-1.0292506587000001} +211 {1.16745417419999997,-1.25272421759999997} +936 {.884938896800000041,-1.07476403129999998} +647 {.122599585600000005,1.66308804139999999} +9 {-.957676970099999969,-.366956358500000024} +575 {-.0803326953000000066,.920241853599999993} +311 {.388921540999999982,-.191577156500000012} +299 {1.40128753169999998,-3.06581457859999995} +767 {-.108745159899999999,.166169219499999993} +707 {.985302724399999996,2.24581756400000021} +213 {-.676618100100000008,1.19199126560000002} +149 {-1.46257566980000009,-.092527738499999998} +235 {-1.128472425,2.49221377759999996} +406 {-.461199517399999981,3.0346309548999999} +522 {-1.18050276040000002,-.67228416660000001} +272 {-1.30301219260000001,3.4655082355000002} +952 {2.98640541289999994,-2.47511764660000022} +221 {-.365730493699999992,.447922601400000009} +850 {-1.12415967159999997,3.53163033349999989} +743 {-2.38028832170000015,2.95228618229999995} +342 {-.949872854700000047,2.40368601800000015} +245 {-1.07517934330000009,-.482866929100000009} +948 {-.0864590360999999974,-1.5837701368999999} +274 {-.126634432800000002,.677565906400000029} +301 {-1.13099166820000008,2.35122397740000011} +105 {-1.05111889079999998,1.99079272600000001} +249 {1.32920361489999994,-.605297634899999992} +833 {-.496950166500000012,-.124418262500000001} +65 {.818821393699999955,-1.62026001219999993} +346 {2.21813403960000022,-3.71348641199999996} +206 {.175991426300000003,1.49323971690000001} +831 {-.398851995999999986,1.19646339319999995} +593 {-.0978146619000000023,-.144071944000000007} +700 {-.341572677099999999,-1.38558449780000004} +981 {2.83116648549999983,-.407481995400000019} +90 {.569793944900000016,2.71309015400000009} +392 {-.902228250900000028,1.09294439189999992} +64 {1.13558713080000007,-1.5493676837999999} +184 {-.0181309563999999986,.455814753199999978} +763 {-.22060873419999999,-1.18277034360000011} +933 {-1.00788777549999997,1.71854431370000005} +383 {-.38429220460000002,.893559303700000029} +821 {2.56266756489999992,-1.20331941580000001} +126 {.5545882902,-1.07203117840000006} +568 {.2977611114,1.45302366990000009} +842 {-.354818318199999982,.030388841199999999} +119 {.202267588199999987,.484840111299999987} +349 {.0990116359000000057,-.95516765299999995} +874 {-.919723593399999984,2.06468468490000001} +481 {-.85035119299999995,-2.66968810789999988} +678 {.406957324000000009,-2.19189529430000007} +132 {-.451745682300000007,.0885771875999999958} +433 {-3.09974357209999996,1.2681509487} +753 {1.18411838150000004,.818131201899999994} +730 {-.709260878699999986,.921301754300000053} +477 {1.26786465039999996,-1.05697661739999993} +905 {-1.44121255269999993,2.07267106609999985} +139 {.809714846800000054,-1.65603726070000001} +314 {1.61514612440000005,-.727628380900000038} +800 {.149745044299999991,.0933302476999999936} +179 {-.183880559199999988,1.72726505299999999} +676 {.0819308229999999998,-1.81498989669999999} +407 {.450616215900000006,.559804402499999965} +829 {2.22699802979999983,-2.86745875909999981} +913 {-1.38767490009999994,1.43275156000000004} +414 {-.0753301626999999946,-.667072328499999978} +865 {-1.56905323120000006,1.84038524850000007} +722 {-.525305248600000052,-2.08469663029999985} +413 {1.12862739279999991,1.39154045440000007} +391 {1.26779380400000008,-.214884335499999996} +836 {2.72242883699999982,-1.23870735760000006} +876 {-2.49557088910000013,.531631426799999973} +811 {-.222728814999999997,.596028727999999952} +13 {.0668287991000000026,-1.13937711919999995} +637 {-.857142654300000029,-.96107234829999999} +651 {1.25800531160000006,.349549351800000019} +968 {-.485254639900000007,-.975030617499999974} +854 {-1.88102668809999996,1.65770899980000008} +848 {1.62844196279999998,-.766358892800000024} +602 {-1.93205068980000005,2.51583048610000004} +296 {-1.28028833129999997,-.135992569099999988} +390 {1.5594781191,-2.24077130419999992} +586 {.591712165699999959,-1.88380079900000008} +823 {.402389089600000027,-.510685849699999972} +912 {-.453625698199999983,-2.66627305540000004} +837 {-1.4190705441,1.05201289710000001} +701 {-.0373501629999999987,1.87402350149999997} +937 {.98589149620000005,-1.82096155969999995} +534 {-.558388644999999961,.742199287199999946} +621 {.471798654200000001,.843579670000000004} +732 {1.33855140419999996,.953934353099999965} +978 {.0412430958999999986,.72821729540000002} +703 {1.65261190980000006,-1.03760268090000007} +991 {-.529412867000000009,1.69862164119999992} +712 {-.477118898199999975,1.83223300380000009} +660 {.900076477299999955,-.034902078000000003} +236 {.510186185400000047,1.20607045839999993} +253 {3.70699819119999985,-2.89278214680000012} +380 {.0468245706999999983,3.1792505083} +725 {2.13531029839999986,-2.26778835859999983} +658 {.561710315799999971,-.453466730699999987} +640 {1.54510089359999991,-.992661092199999984} +483 {.800697231999999981,-1.88270823459999992} +941 {.557738581099999964,.659441891499999988} +111 {-2.13031956629999986,4.24491390579999983} +705 {-2.4165561589000002,.708487980000000017} +50 {.816430341300000029,-1.38255089810000009} +369 {1.00844361689999995,1.69380947069999999} +475 {-1.68147853210000009,.320793846599999988} +127 {1.38645449489999995,1.24862881989999996} +513 {1.16911529480000009,-.998492009000000014} +195 {.819265878199999964,1.81526630020000002} +402 {-.724601146100000038,1.32799431590000006} +77 {.992928066599999992,-1.82698832069999995} +961 {-.907858208499999986,-.106103434799999993} +82 {.45964504439999998,.0120849904000000005} +109 {1.10735845789999998,-1.37446194560000001} +300 {-.419507247400000005,-1.18685404429999997} +246 {1.66841733820000004,-3.07008689179999994} +969 {-1.09327923699999996,-.221644550899999987} +962 {-.946392306699999963,-.0498555498999999971} +809 {2.15847858709999985,-.750785425699999953} +131 {-.551561924200000053,-.065288380300000004} +590 {1.45564207750000008,-.919136956400000038} +681 {-.445755046799999999,3.56731877969999989} +932 {-.450037205300000021,1.79793882140000005} +23 {2.4812761442000002,-.146641054500000007} +165 {-2.07771580339999984,.896117828399999983} +840 {-1.78817764890000008,.960823476299999979} +208 {-1.01838850200000008,3.07798405610000003} +7 {1.18813736429999994,-1.81451440850000001} +997 {2.16266473740000009,-2.03986977930000002} +548 {1.91609386120000003,-2.78741633050000015} +679 {.576184002999999945,-1.50883884049999994} +259 {-.59013701119999995,-1.22933577510000003} +492 {.494216130199999992,1.12786883090000001} +488 {-3.11017513050000005,3.48615760240000006} +198 {-.747022172899999948,1.83213442469999999} +241 {.155675446800000006,1.24223838059999991} +116 {.464243202399999999,-.555342519400000012} +501 {.150003813499999999,.0772391882999999974} +928 {.198409033699999987,.493534152299999973} +21 {-1.04559129249999994,-.00592247860000000032} +398 {-2.71356030299999995,3.59853564129999981} +499 {.309084717999999981,-1.1544847399} +631 {-1.20219169060000008,1.58670858129999992} +885 {-.785026538500000037,1.88394672219999992} +290 {-.87539347540000001,.87993176399999995} +125 {3.82825940119999997,-6.08263589269999994} +312 {.112616767899999998,-.0955611606000000002} +166 {-.362604321299999977,.816732485800000019} +56 {-1.49870876070000003,1.08515866949999995} +142 {.470021046800000009,1.10787327169999994} +387 {2.04815346419999988,-1.49491903490000011} +683 {1.89315036009999993,-1.0514409482} +44 {-.660895155700000014,-.608169778299999964} +562 {.0613071364999999979,1.15852570840000002} +34 {.981442035000000046,-1.6943631292000001} +903 {.651345688499999964,.199033203200000008} +463 {.5065475854,-1.10392626640000002} +409 {.329449981900000022,1.44976752940000009} +474 {-2.08852826409999981,1.31188506689999995} +460 {-.906368981399999996,1.66922723380000004} +416 {-2.16366360979999994,1.86993513900000008} +128 {-1.76810527930000005,.288417958699999977} +62 {.408868040599999982,-.836707383399999949} +101 {.107947859899999998,-.290843100899999984} +925 {-.0666944758999999943,-.153251602999999986} +446 {2.2162644628999999,-.456376217500000014} +187 {.918299741000000003,-2.88742489400000002} +844 {.784854778200000047,-2.63470805680000009} +158 {1.6524189574999999,2.66450416150000002} +588 {-1.5622080034000001,1.81749798579999999} +205 {.195217298499999997,-.636939554000000019} +168 {.646422633699999971,-.727912772100000049} +496 {-.1151249939,.805281600199999947} +273 {2.71711775479999984,.319329990800000019} +356 {-2.38254553370000011,2.47005000919999995} +155 {-2.0298160596999999,.907304968699999992} +347 {-.259098651499999999,-.38440251139999998} +935 {-1.19250094690000008,.0580059342999999991} +583 {-.711918769499999993,1.91819211069999995} +694 {-.0150112747000000005,-.289214967499999975} +284 {.109643880700000002,-1.65684052709999996} +616 {.197563564400000002,.259118358300000018} +906 {2.23346934929999996,1.93004956379999992} +921 {-.0204604224000000008,-.838764524300000036} +30 {-.339375132299999993,.77750285429999999} +328 {-.679108409300000049,2.99430120950000012} +123 {1.52790706380000008,-2.21376850779999979} +871 {-.999952212399999985,.367222541899999977} +976 {.364643837799999981,-2.10339129860000007} +250 {-.0264198837000000006,.203443428399999987} +692 {-.866930191999999988,2.05615321370000004} +162 {-1.43840655919999993,-.654724247800000048} +186 {-.215677639899999996,.534796317099999974} +760 {-.469724343400000022,1.30614545010000005} +467 {.296536066799999998,-1.64353421440000003} +882 {-.0454926236999999983,.137550490500000011} +\. +; + +-- +-- Data for Name: multivariate_7_1000_1_test; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY multivariate_7_1000_1_test (id, "position") FROM stdin; +1 {-.679280164699999967,-.0390913183999999983,.893555545500000048,-.017647794599999999,-1.29655690879999996,-.667980506000000029,.181708778300000012} +2 {.831050790299999953,-.548248471399999993,-.638031502699999975,.00708853170000000015,-.669853694399999977,-.827762331399999995,.612500030400000006} +3 {-.367588410199999993,-.289493409099999988,-.916874670799999958,-.58380545880000001,.0646152901999999946,.0467438598000000025,-.750726978700000047} +4 {-.144232687400000004,1.47556079689999997,-.206478974900000001,-.260697278899999996,-.138217971899999986,-2.54339721709999989,.577418536699999985} +5 {-.0361098631000000003,-.226172568200000007,.715002217100000004,1.57726688649999991,-.392273861499999987,1.18277085580000008,1.79311368739999999} +6 {-.731345967300000011,-.629817681300000043,2.07069457080000019,-.903006007400000033,1.73554553759999997,1.38308886009999998,-.375366639900000021} +7 {-.304310199499999989,1.25357124879999993,-.273845179200000011,-.0463293814000000012,.798982470100000008,.539955889200000039,.0858503389000000033} +8 {1.35529122070000008,.0138508607999999998,.751363694999999998,-1.57490917819999998,-.237378253099999992,.336490592600000027,-.0795996727999999931} +9 {-.540872242299999972,-1.38043394280000009,1.71655186030000007,.418111803999999976,1.52495658739999995,.686476331299999964,-.588555970900000003} +10 {1.31674790940000008,-.832984144800000048,.773960297400000052,-.803515406499999973,-.085430275,1.49619789519999991,.397766793499999993} +11 {3.33842986689999988,1.80093502639999992,1.7562786105999999,.667473659600000047,-1.0501681048,.329370536200000008,.455081038499999979} +12 {-2.34700732049999994,-.613899237399999964,-2.1618005886999998,-1.00134312020000005,-.462267628300000011,-.817561061399999955,.184392038199999997} +13 {-.394916975999999975,-1.66955730899999999,-.945147843600000037,.00356999350000000021,1.15573946640000003,1.87743135410000006,.112198930299999999} +14 {.181213237700000002,1.27830445030000006,.571057775600000039,-.470907728999999997,.463358374799999972,2.01732509730000009,2.14970486589999998} +15 {-.371083318800000006,-.744291014500000014,-1.28072385940000011,-1.38866450650000006,-.382468587099999979,1.48132078899999997,-1.33166877280000007} +16 {-1.04828207370000004,-.143772369099999991,.1808578193,2.33491952189999985,.351698871700000026,.371411387199999998,-.7211061728} +17 {1.7267817648999999,-.363406455199999978,1.90217295229999994,.407579941200000018,-1.00144250270000001,.28598554209999999,-.335991817199999987} +18 {.907278955699999945,1.31571368989999993,.794294170799999977,-.395686031100000002,.572730213599999982,-.634445164900000025,-.535421109700000009} +19 {.76415535450000005,.1930349747,-.455114207699999995,.489069528899999995,-.467453517699999987,.0409796169000000016,.504810330900000048} +20 {2.82636347710000013,-.661678478100000023,-.644995893799999997,1.76567711450000009,1.5136797770999999,.341917115599999988,-.0781384970000000012} +21 {-.176174240499999996,-2.41572884280000011,-.96119218289999997,-.0189786306999999987,-.0415344646000000012,-.13400574870000001,.116948935899999995} +22 {.694510314500000003,-.673723312100000027,-.0578020594000000013,-.158126148299999991,.0128326229,1.36320778310000001,-1.36044993940000003} +23 {.969076168199999977,-1.06479112640000007,.610471529300000038,.321501864099999979,.283449781700000014,.714294320000000038,-.226119189199999987} +24 {.186013646199999994,.992014195400000021,-.440693883000000008,-.694761457999999998,-1.32119779890000011,.948226381000000007,-.340549670799999982} +25 {-.957021129300000051,1.25978756170000006,-1.67346054499999997,1.16367699970000005,.0500304480999999995,.398546264500000025,2.22384244059999991} +26 {1.42896417969999989,-.221385758099999996,-.518658865300000049,-1.41736103670000002,-.783959266200000005,-.080428846499999998,-.84173107359999999} +27 {1.24620146040000002,-.2118324439,-.51096484090000005,-.776972241399999985,.520374153599999945,2.05212938420000013,.881162426099999951} +28 {-.318935219499999978,.336199830600000027,-.187460577500000003,-.367274710000000004,.0660742322999999937,.545795387199999982,.075504867500000003} +29 {-.137244227100000005,-.451763216199999984,-1.47091903420000003,.65332501580000002,.686816082999999966,-1.69395567139999992,-.704724665199999989} +30 {-.735212283700000024,.752579492600000011,1.30040457390000008,.771171796900000039,1.0350692306,-.662785960300000032,.645534411299999999} +31 {.837919440700000018,.774549432799999993,-.29119552310000002,.298172577400000027,.992704418800000044,-1.4900665634000001,1.6711020807000001} +32 {-1.46080427410000002,-1.26078929380000004,-.217023274799999999,-1.91129641750000001,-.1226030907,-2.50646607849999992,.329341565000000003} +33 {-.23297151960000001,.324718077100000002,-.107588903599999994,.149324331100000013,-.647559600999999985,-.130582830900000002,.605684308800000015} +34 {.141477445400000013,.457517211300000004,-.663357862100000029,-.0186098864999999988,1.21821348940000007,-2.68636650620000017,-1.23581190149999998} +35 {-1.63173940290000008,1.06870502609999996,-.622001212999999997,-.3571997757,-.661842593200000051,2.67770217190000004,-1.93037810570000001} +36 {.387910565500000026,-.412102145699999978,.969765061199999945,1.69622726039999994,-1.2367133077000001,-.857917803999999951,1.16597921589999998} +37 {-.129518488099999995,-1.20308709400000002,1.1034742770999999,-.385793753199999978,-.375377845799999998,-.315976601299999993,-.208863830400000006} +38 {.322785539200000027,.173718091500000005,-.546810166700000044,-.557383640700000016,1.38087793520000002,-1.1365620367,.400775959199999976} +39 {-.0221292556999999994,1.2131972862,-.260108154799999991,-.118530388799999997,-.81570145719999998,.566360577199999993,-.664723882200000005} +40 {.538297238199999972,.550112751000000011,-.922848220600000047,1.95056202340000007,.328759429800000003,1.05012769210000001,.555172087500000022} +41 {.073730435900000002,1.16543646300000003,1.64682376600000002,.378401169599999976,-.122394263700000006,.0988322816999999965,-.602720466199999971} +42 {.817167979699999991,-.463734291600000015,.852794933800000043,-.632557737099999962,-1.02300853659999991,-.939454766300000021,.0487497465000000033} +43 {.835133515899999956,-.132571912699999989,-1.50853142479999991,.675841770300000033,-.00858207719999999966,.246362986000000006,1.14349686940000006} +44 {1.39363722330000006,-.552082967299999949,-1.41976869960000007,-.585029132800000018,1.05110893159999996,.114351073999999997,.215966102699999996} +45 {-.824044377300000019,1.15579572009999998,2.83799354970000017,.180006255899999995,.692003452799999974,-.303686192999999993,-2.65205492900000017} +46 {1.00510797619999992,-.905889130900000006,-1.00610328719999997,-1.31515143840000004,.544229376300000012,-.940450603499999982,.561364757400000025} +47 {.0400885770000000002,-1.45327250229999994,-.621457306100000051,-.253626067899999974,.349814738100000022,.387793939300000001,-.844134682099999978} +48 {.452144723999999998,-.118330685199999994,.108467366199999998,.126011508900000013,1.4679667005999999,-1.55230799089999993,.0326168883000000007} +49 {-.0560402700000000031,-2.26129319919999983,-1.00703939490000005,.210897484299999993,-1.07348494549999995,.0736541138999999984,.190691622799999988} +50 {-.580523764399999975,2.28694705719999991,.678117870000000011,-.822843707500000021,-1.13189178629999998,-1.26074662140000004,1.05742048860000004} +51 {2.24819086719999994,-.128616512399999994,1.08867004189999994,.555122798900000047,1.3749796809999999,1.40687121169999996,-.231254323099999992} +52 {.682742002999999986,.0194111214999999997,-.273946047299999995,1.33639742579999998,-.0126818212000000003,.305481929799999996,-.574213957999999969} +53 {1.26042505389999993,-.150857197599999993,.242077738399999992,.642311022599999948,-1.45431757789999994,1.07853448000000007,-.767598743600000022} +54 {-.0766918840000000018,.866372480300000025,-.848247589800000035,-.423304374400000016,-.898073107500000023,.0719936211999999964,.246182067899999996} +55 {.214909555899999993,1.64422780990000006,.264998600799999984,-.0163233633999999989,.740356730899999982,.573041111300000017,1.7500497142} +56 {-2.2809478970999999,1.13521725519999994,-.27876306150000002,2.21185326139999994,-.522509758400000024,.824450799099999965,-2.18048992019999988} +57 {-2.3409838905,.336702583399999977,-.846651074800000014,.796623714600000032,.165667744899999997,-.048602012,-1.66760727769999995} +58 {-1.72367471129999994,-.197640433599999998,-.259386363100000017,.364618961000000019,.136546072099999999,-.325208331899999981,.123098404499999994} +59 {1.14161449570000006,.848702654799999956,1.43168781029999992,-.712318789799999963,1.52666859919999998,-.0957667147999999996,-.0959517839000000039} +60 {-.992220549599999946,-.0536509938999999991,.816583791799999958,-.170747720699999994,-.303700479799999978,.386842191700000004,-.473265149100000004} +61 {.422327888099999993,-.0127895049,.673829082799999979,.531541240000000026,1.08806848580000004,.454321885699999983,.146449570400000006} +62 {-.148386449599999998,1.16413826139999999,.264697844800000026,-1.01084560609999996,-.16351827699999999,-.27593344190000002,-.618949745700000054} +63 {-.919587918900000023,-.255812690499999995,.0919927311000000025,.818410847300000022,1.04343654069999991,-1.82909572719999991,-.543591473999999963} +64 {-.555247067799999994,.248092452600000013,.881224960299999971,-.691699455399999996,1.908647757,1.19550800489999998,1.7304244445000001} +65 {-.935600243200000015,.377944211400000007,-.140158785400000013,-.883327876399999989,1.56843312700000004,-.811525804900000036,2.48385980659999994} +66 {-1.1965684030999999,.70314598989999999,-1.00463507100000005,-1.83489439129999998,-.721452383200000047,-.547762753700000049,-1.1705587784} +67 {2.43632733819999991,-.789317741199999978,1.05427159740000009,.356853210199999993,1.14231159189999998,.732438923200000036,-.550467591900000053} +68 {-1.63327644740000011,-1.03313208889999997,.780940712699999984,-1.20834547840000006,.619340232099999999,-.291322697999999991,.108888254000000004} +69 {.955263913699999967,-.0459704808999999992,-.567008988899999955,-.471545578700000023,.171791246300000006,-.476436365400000006,-.778423061799999982} +70 {-.752041531399999963,-.978519685500000014,1.16698589669999997,-1.13219479660000011,.561458206299999985,.324543838400000006,1.28204958179999995} +71 {.128462625800000008,.929720542300000008,.375356198400000007,-.0184795679999999983,.125377869899999994,-.386737130799999995,.773012684100000014} +72 {-1.29105747249999991,.0363277127000000005,2.59288519080000013,.210790219400000006,-.998452264800000044,.774900184000000047,-.371934238199999989} +73 {-.125622518799999999,.351428412400000001,.663802003899999993,-.918093782699999972,.349740071500000027,-.889031628000000018,-.0939736422999999932} +74 {-.412775361100000016,-1.20630208229999991,-.750599027699999977,.68378315919999999,.756557216200000049,-.892669894099999972,-.986029715599999967} +75 {-.125782562099999995,-.579701682800000007,.743545527400000017,.99235330690000001,.308435549899999994,-1.23823796150000009,1.94955158349999991} +76 {.53176844270000001,1.12565481980000004,-1.26562397939999993,-.996492889500000034,.482257322499999974,.116588976100000005,1.0355181818000001} +77 {1.82894915750000009,-1.03666634159999993,1.42553051970000011,.396413615300000022,.143124257000000005,-.911400773200000014,1.62528159329999999} +78 {1.90050656209999991,1.84760260460000003,-.0885753184999999998,.394444381000000011,-1.03692690389999997,-.717742819200000048,-.187210029599999994} +79 {2.20179959910000012,-.541166991500000027,-.969382219799999945,-.293041707900000004,-.577007384900000031,-.589819565200000007,.167429935899999993} +80 {.650385273199999947,1.5426844166,-1.1482870168999999,.964216455099999981,.183439994599999989,1.1419237887,-.941638115199999959} +81 {.152336364199999991,.0766911608999999994,-1.08422101439999996,-.0884951858999999952,.528836575200000047,-.44118423280000002,.775352924699999946} +82 {.376102478399999995,-.0155177036000000001,-.321956323599999983,-.00723537469999999995,.300048610599999988,-.907249640299999993,-.612299942299999955} +83 {.0868089196000000063,-.305141081699999983,-.661910650100000053,.458926988500000022,2.20843778749999986,-.697584687999999953,.302840569599999998} +84 {2.56758278250000016,.482402818000000011,-.0295620709999999988,-.854917620399999967,-.895506413300000026,-.860533801700000045,-1.10056309240000005} +85 {-.219959081499999987,-.724381359999999974,-.43094465580000002,.980140129999999998,-1.28908688539999994,-.293427207999999995,-1.07471104299999998} +86 {-.791079843100000013,.954918181299999946,.660450575699999987,1.40103056249999991,-.553059669000000032,-1.35894108869999997,-2.18823457650000019} +87 {-2.20424901019999986,-.524694628700000054,.665456806799999967,-.496764874500000009,.793674317300000043,-.183758605399999997,.989328828799999949} +88 {-.799913502899999962,.279651478299999978,.690227536099999983,1.55674788780000006,-.210280648900000006,.0671075119999999942,.272033826799999989} +89 {.0997871325000000003,.0787705670999999941,.0010739298,-1.83731347580000004,-1.66281210959999992,1.12972498259999998,.461574625000000016} +90 {1.17499932880000002,.0637990603999999961,-1.59359516219999997,-.942777186599999983,1.48663755259999997,1.29630512850000001,-.0899262685999999978} +91 {1.17613772439999997,-1.7762017953,-.100093439399999998,-1.93645862530000001,.255835333399999976,-.190497781599999999,.710776683900000039} +92 {.46765245900000002,.742552846699999991,.085752770300000003,-.572137533299999945,.333081425300000011,.0132200545999999995,.761662863199999984} +93 {.196510579699999993,-1.1696328494999999,.792494857600000047,.533984490499999964,.805038678199999969,-.475007012900000025,1.6716268917999999} +94 {-.104754151599999998,-1.19069256360000009,-.470241489899999976,-2.30751527420000002,-2.65894303169999979,1.86583353019999998,-.14429876820000001} +95 {.924161520299999983,-.868427845799999987,-1.02778652399999992,.0630971047000000035,-.299736050400000009,.236023318799999993,1.47412813960000011} +96 {-1.25462149680000001,-1.37702684319999991,1.82952431969999996,-.222825812200000001,-.100310059300000004,-1.30442509800000006,.464249873699999982} +97 {.0514450489999999996,-1.05661710499999995,.869507229999999964,-.512893547400000038,-.291292639099999973,-.664395052499999972,1.09971238400000004} +98 {-1.85034772850000007,-.581749010899999974,.647337806400000049,-1.66750444599999992,.142260204599999995,.489654581099999986,1.55148899620000003} +99 {-1.24974514029999995,-.181228320599999992,-.421823392999999991,-.888571564000000036,.597297525700000054,.0692543646000000068,-.603628022700000044} +100 {.889814856800000054,.938138153499999961,-1.10617187510000003,.861859593399999957,.0672542502000000042,-1.9260273792,-1.08022645689999996} +101 {.889467880700000046,-.468868920000000022,-.623642230199999981,-.471544078500000019,.721271480399999998,-1.66648773810000006,.288421943000000014} +102 {-1.43378459809999992,-1.07368658789999993,1.79717333349999997,-.875650993399999966,-.170270601500000007,-1.36361773240000006,-.598668486500000041} +103 {.742205768000000043,-.0646446989999999999,-1.26646698930000001,-.682137669599999996,-.857300874799999946,1.62997840710000008,2.64136004830000015} +104 {-1.53167021299999995,.15367627519999999,1.37810792970000007,-.909348899299999958,.857922501000000004,.217371032600000014,.483992983400000021} +105 {-2.24285588710000017,.479959122999999988,-.554997800099999994,.705362771999999971,1.77896821499999991,.449560000000000015,-1.6553426846999999} +106 {-.45407642920000002,-.568245097100000041,-.265133592799999984,-1.01513737120000003,.13851795959999999,-.352987738699999998,-.797212024699999966} +107 {1.23871677220000009,1.26088732819999993,-.0793779161000000011,-.329512715000000012,1.30599913429999992,-.836095145399999962,-.693735691999999959} +108 {-.141306038999999994,-.819043509600000053,.818794135400000012,.409960289300000003,.0327517656000000032,2.23439642180000009,-.84557272579999998} +109 {-.343416791399999977,.249191303700000005,-1.37779410580000006,-.268795805900000018,.390153397600000007,-.480227449299999998,-.0706188992000000015} +110 {-.47943756329999998,.558995725499999985,.853995802599999965,.750402139700000026,1.43259492240000008,.188203035099999999,.157367418700000011} +111 {-.599173789499999998,1.30677389609999994,-.379509538099999988,1.09078761620000009,-.621848050999999957,.886767030799999967,.609550891199999945} +112 {-.631069709200000051,.218579459100000012,-.115040758000000007,-.178042737700000009,-1.1712113930000001,.592595393199999987,-1.13806870020000006} +113 {.986086064899999948,-1.06907644220000009,1.2361114313999999,-.581485380399999974,.251350433599999978,-1.8980182780999999,.873494935900000047} +114 {1.20993818459999991,.14558265170000001,.236322230500000008,-.0431485656000000009,.188379116600000007,-1.56979010329999991,-.506827218199999985} +115 {.487998727000000021,1.36285355500000005,.322739948900000018,-1.37913301919999998,-.411678536699999986,.792000180599999992,1.12962970489999992} +116 {-.322589150800000002,1.83001746190000003,.315186588300000015,.658159776299999999,-.0347536700000000004,-.254958711299999974,.230436038900000001} +117 {1.2032785024999999,-.0376585134999999974,.706333686100000024,1.34608482039999999,-2.01486294760000018,.6057218781,-.222437813300000009} +118 {.789426134499999987,1.64111676320000011,-.0526901965000000014,-.508022496000000046,.0361333383999999994,.56715477670000003,-1.51338928229999992} +119 {.00293115239999999994,.791532667100000031,.207370477100000006,-1.11599511200000001,.195204583900000006,.819247681899999969,-.678257919800000053} +120 {1.06511057980000001,-.594430725099999946,.0556153266000000007,-.732568735299999996,-.034276354600000003,-.181811523899999994,-.497885455699999979} +121 {.159020991800000011,-1.06635278889999996,-1.66961235009999998,1.47514637199999998,.470470457299999978,-.285065919800000012,.702796318800000042} +122 {.692429749899999947,-1.00830689059999989,.418300606899999983,-1.28214327249999993,-.286241054699999997,-.100771611499999997,-.879537808299999946} +123 {1.19450581919999999,-.198365166000000009,-1.1241202146,-.764570788200000018,.544768711300000041,-2.16813680199999981,-.974875711299999947} +124 {1.02923810110000002,.988795538999999946,-.146852588799999989,-.51635226710000004,-.369677483500000015,-1.73215919110000005,-.1094867702} +125 {.788737710100000045,.132067849799999998,.466074909900000001,-.1880998173,.564913862599999983,-.1547011623,-.606593602699999956} +126 {1.0909447753999999,-.3807087569,-2.1462298433,-.764125407099999987,.151012417799999993,-.527790678599999952,.879441527899999964} +127 {-.277515405900000012,.251270043900000017,.686886791800000007,1.55018130799999998,1.00954553139999992,-.0593693610000000024,.683404780900000008} +128 {.553736375299999972,.202376619499999993,-.109678556600000005,-.4678750411,-.077893325200000002,-1.76084803840000004,-1.64879023829999993} +129 {-1.64541660200000006,-.949830379699999949,-.0652929201999999931,.318124286599999972,-1.2492740310999999,-.678517918800000008,-.372282851400000003} +130 {-1.03389692009999989,.547115389599999946,.325673126500000021,1.9326616753000001,.981542805200000035,-.426878762700000025,-.665338997099999996} +131 {.787875522200000011,-1.70382907909999992,-.726556329100000009,-.633965971399999995,.616037412700000009,.853323732600000051,-.344958651800000016} +132 {-.172889636900000004,1.50085746719999991,.354308166599999974,-.462985830499999973,-1.58337058610000003,.230925159500000005,1.53060695809999991} +133 {-.154789151799999997,.226900952699999992,.569154428100000054,.207414176199999994,-.963145658199999977,1.67787722699999997,.466331783700000002} +134 {-1.1249245456000001,-1.57140589609999992,.734822780599999947,1.54177172959999997,1.15097811649999993,.214164756000000012,1.52322886829999993} +135 {-.162065434199999997,1.4787324823000001,.491773178099999986,-1.4608353516999999,-.339388476300000019,-1.17612163799999991,-.783609611799999994} +136 {.242792132599999988,1.15425086790000009,.88681658480000003,.0251877078999999983,-.791029709899999989,.560650454999999992,1.19026897539999998} +137 {.75973468249999998,-.731517723900000028,-1.24448768789999997,-.594783946900000027,-2.36161410629999979,.416921164600000005,1.26996400319999991} +138 {.560053144900000022,-.827428160400000001,-.671884318399999958,-.137538156299999992,-.307154915799999984,-.447100067699999992,.437090699199999988} +139 {-1.40589198060000009,-.122609593599999997,-.668298329900000043,-2.17543322680000006,.558161379299999982,.885606084399999993,-.260765601100000022} +140 {1.60310626329999995,.213827092999999996,.182388110800000003,-.442974159899999997,.345682845999999988,1.07848338930000009,-.519861901500000001} +141 {-1.01436987289999991,-1.34383561360000003,1.6210354254999999,.154408095100000003,-.356903831700000007,-.0805249628999999967,.489965623199999978} +142 {1.03604479020000007,.602177643300000009,-.699535625200000011,1.31205965809999991,.216191415999999997,-.616239655799999952,-.956618558399999985} +143 {-.761312274999999983,.104921191299999994,.143369372299999992,-.796299677299999953,-.348471801699999972,1.383598565,-.395465307400000021} +144 {-.229129653200000005,.156039731200000004,.048157906200000003,.491906505300000019,-.627139169300000021,1.69113110840000003,1.63495539959999991} +145 {-.372094574499999997,.404355519300000021,.529045707400000009,.879243703899999951,-.248517526099999997,-1.60549687730000001,.75042407160000002} +146 {-.988163554400000033,-.826191825999999963,-.166787584600000011,.549912020399999979,.817745673999999978,.0066815358999999996,-.254721887699999983} +147 {-.410236668699999973,-1.20295326569999994,-1.29291975000000003,1.98944571240000001,.669171600099999986,-.698908901300000052,.566847999399999969} +148 {-.58776848650000002,-.0950452148999999941,-2.24357883590000018,-.852845465699999972,.1038565984,-.808785252499999996,-.1373285266} +149 {1.1037723343000001,-.825710714499999998,-1.1730019156,-.844578509800000043,.183798778800000007,-.460496161000000015,-.61151561259999998} +150 {-.481762195999999976,1.40174980289999995,.835211188699999996,-.0630473955000000058,-.34790767039999998,-.641801811200000016,.036950142200000001} +151 {-.186930636499999997,1.55935497390000011,-1.31842534649999998,-.326227751200000027,-.473701375799999991,1.8171928624,1.59966807869999994} +152 {-.595706310600000011,.841518853700000014,1.37874589739999998,-.0784414065000000049,-.390568749599999987,.180927119400000003,.734046165900000047} +153 {.690939961999999963,.632321699400000048,-.102477027700000001,-.936287391300000049,-.1217427383,-.0185039525000000003,-.201240590699999999} +154 {.704837276699999959,.456354928800000004,-1.52830513609999996,.924924155399999948,.943094887799999948,.0833660655999999972,-1.38153905209999994} +155 {-.167450092100000003,1.62129046600000004,-1.21012326439999995,.671702745000000045,.53342244289999996,.580038118699999994,2.66786217420000016} +156 {-.237063534399999998,.265141205400000024,.766751329400000015,-.328256174399999989,1.34022493150000011,-.787807878500000003,.583566388599999986} +157 {1.60713232300000008,1.6384113949000001,-.178429948300000002,.0915612749999999975,-.107577418699999997,1.56419689629999992,.341546463000000022} +158 {.308990719500000011,1.83293696419999996,.878735387999999951,-.786372142199999957,1.46739303789999997,1.966556003,1.35979454479999995} +159 {1.12784719550000001,-.0165692344999999984,-1.37874459400000005,.318397684399999992,-.24263353239999999,.390536212699999996,1.42668166319999989} +160 {.507568353300000052,.719011203899999951,-.2389789098,-.796397688300000017,.0428816952000000001,.840695645699999994,-.982147320199999951} +161 {1.14016716649999994,.269008684499999984,.221433822800000013,.523063275399999972,-.696452027500000015,1.39956770630000005,-.350806090299999984} +162 {-.937271676599999992,-.294673641500000028,.208271042099999998,1.18657484010000003,-.42760995839999999,-1.81353312189999993,-.332725219000000016} +163 {-.642382414299999982,-.912520206900000019,-.951549636199999993,-.363369582199999985,.766119614700000007,-.755867211700000041,.270773125299999973} +164 {1.44966219519999995,.611672219799999972,-.0561892521000000034,.590567723100000053,.877347050999999989,-.516163507200000038,.334732876299999982} +165 {1.16795572309999995,1.20290588510000007,-.641020224499999958,1.38814417760000008,.381201839900000006,-1.33609965100000005,-.377348703600000002} +166 {1.6929020736,1.10625889780000008,1.73042928460000001,.63977600339999996,-.497956443299999996,.905180911900000051,.161725155000000009} +167 {-.915835208900000031,-.660438525200000015,-.681063163599999966,.413877113600000013,-.953248191499999953,-.81341555600000004,2.05895464000000006} +168 {-1.09269705249999993,.326465987299999982,.266533833599999992,.680712702400000036,1.01815878810000005,.233218001899999999,-.129432587399999993} +169 {.881349338000000038,1.82516547900000004,-.728461225099999998,-.068417133000000005,-.158519520800000008,.73070965379999997,-.421319115999999994} +170 {-.400449668400000014,.288342648699999982,-.566585109200000048,-.888059132400000006,-.0656497268999999967,-.294278115600000001,.676449921500000051} +171 {1.57752776110000004,-.840330059599999957,2.04173858630000016,2.32984183519999988,-1.14503528589999992,.399557771200000023,.778942424400000055} +172 {-1.35138604420000008,.589780428800000034,.396061735699999973,.890327567399999964,.163950096699999992,-1.65838572200000001,-.801303270899999975} +173 {-.400134923500000017,1.7838232303999999,-.575824705099999945,1.34718688340000003,1.33255513840000006,.213841448200000006,.804811582599999964} +174 {.597063510400000053,.473064011600000001,.579196152600000014,-.786963436500000002,-.717102337000000034,.715167333499999947,-.184210741099999992} +175 {-.207332464200000011,-.508642073500000014,-.1336930432,.600338638300000005,.719135020099999966,-.433855855999999984,-.00333081680000000017} +176 {.138187183800000002,.298053043999999989,-.839490654100000011,-.500216638499999977,-.20518891119999999,-1.83582950870000006,.298015076200000006} +177 {-.647382156699999989,-.960035165199999985,.0949214161999999972,.330002789500000004,.867773725399999973,.0434536245000000029,1.59024415100000005} +178 {-.0924541791000000057,-.583577951700000042,.447311575800000027,.951425740200000036,-.411261937600000016,.632379838199999988,-.497868947399999984} +179 {.26124772839999999,.405688258799999979,.146524903399999989,1.0814786863000001,.00406575759999999991,.556992412900000011,-1.09462303549999995} +180 {-1.7564172066999999,1.12562690450000003,-.840881184799999959,.381742521800000012,1.34155235680000007,-.140583148099999999,2.3210439999000001} +181 {.451312361699999998,-1.95766206290000011,.182651126899999988,-.0433996271999999989,2.86954124649999986,-.18594722729999999,.224559855400000008} +182 {-.599534280900000049,-1.1799120622999999,-2.08161810909999989,.322046524999999972,.727392617899999983,-.698425549999999951,1.08073136769999989} +183 {.235727170200000002,1.51535569119999991,-1.45751082070000004,.32036148809999998,1.34153314819999991,1.41018310959999993,-.894210836900000028} +184 {1.05309181900000004,-.000452680300000000003,-.286014678899999986,1.16479264569999996,.834980497300000013,-.212197230999999986,.0857531170999999981} +185 {1.22753693939999997,-.949659017100000002,-.688678886100000054,2.06665280190000011,.313364225200000013,-1.40806001729999997,-1.04307007809999996} +186 {.879920402999999962,.995405862899999994,-1.21384621090000011,.376930310300000015,.510292239199999997,-.197296012900000012,.178819366000000007} +187 {-1.14002074499999995,-.360190270599999973,.717850881699999999,-.0211565213000000016,-.229872390499999996,-.0457393757000000012,-.82966467239999997} +188 {.392892114899999978,.0214388278000000009,.234701317999999992,.475436065500000005,.629564636500000052,.514406110699999974,.642514639099999951} +189 {-.652300516900000016,.132794446499999996,.443594557499999986,.157827957399999996,.503505353999999961,1.7122071673999999,1.76037511600000007} +190 {-1.28662317249999991,-.0674758805999999961,-.138712799800000008,-.691014875799999984,-.0640309887999999938,.379649505200000015,1.18874398829999994} +191 {.846319721800000035,1.13852020079999994,-.131784414099999997,.598323722099999955,-.447254518699999992,-.56791855229999999,-.110813991599999995} +192 {.324106341799999975,1.93793389580000008,2.01006230339999981,-.227501153000000012,.317708198900000016,.938505080799999947,.751857674000000031} +193 {1.34915393950000007,.0573070734999999998,-1.21055963780000009,-.735695109699999961,.645806597600000032,.284649078399999977,-1.27762524430000002} +194 {.403015834299999987,-.702319904899999958,-.735164966799999986,-2.06458880169999981,-.3117864105,2.20894107400000017,-.168991014899999986} +195 {-.302405008500000017,1.32417694300000011,.403215755999999981,-1.93190812609999996,-.169419242299999995,.444059271899999997,.259476933099999973} +196 {.734559425599999982,-.203544288600000012,-1.17271506939999992,-1.20284066680000001,-1.66310722420000001,-.253180068599999974,.104466248100000006} +197 {1.72710678579999999,1.3853946129000001,.872612605399999963,1.3462207249,.477322521300000024,-.606380240999999987,.908162213400000029} +198 {2.25718653870000008,2.20643163009999999,-.0104997386000000009,-.209696083100000014,-.683070304999999989,-.233840594499999999,-.961172095000000004} +199 {.462761906399999978,1.59595529070000008,-.641589998599999989,-.457756219500000006,.541094700799999995,.460668661400000012,-1.5952519408000001} +200 {-.238915340700000006,-1.16599048999999999,-.0261248265,1.27820666900000002,-.990302491999999979,.526708013599999969,1.15593068189999992} +201 {.268875287000000018,-.62296499449999998,-.330651912200000009,-1.69027670210000003,-.954441376899999971,1.36367864709999997,-.856292481700000052} +202 {.689176716800000033,2.14820951560000006,.498053006399999998,-.351916704000000025,-.0270459119999999983,.419833138900000014,1.06634697079999996} +203 {2.55256130310000007,-.994642477800000036,.272436050200000002,-.333724070400000017,.986035965200000031,.263606529799999989,1.5201540897000001} +204 {1.14403184699999994,-.182860421299999992,-.263943658400000003,.128113654100000002,-.170564863099999992,.525515185099999949,.339569718500000006} +205 {-.199949830800000006,-.0535677883000000021,.124071737299999998,1.27164723510000011,.82701131189999999,-.0350608846000000016,-.77017510840000003} +206 {1.37667149309999992,-.95973742679999996,-.36742834219999998,-1.76111902969999989,-.173540226399999997,1.03666363000000006,.583956905199999987} +207 {.219488471399999996,1.49355736500000003,-.296005551199999994,-.90992407080000004,-.362967513399999997,.414022882500000022,1.02610014240000003} +208 {-.626356511699999952,.451006771500000014,.830666475499999946,-1.69760924499999999,-.496097363399999991,.214937204200000009,-1.30814467289999992} +209 {.610130707299999964,.936258917400000001,1.46304517580000004,.683588185800000026,.146078652199999992,.2721657086,.599936775900000052} +210 {1.18197894870000009,-1.2669122181000001,.0515611762999999973,1.15438572570000009,-.396409672499999977,-1.70880150109999995,-.57787715340000001} +211 {-.956037801100000029,.3785139421,-.439687890899999989,.00415066319999999982,.423100240199999977,-2.08408695630000018,.145084581099999999} +212 {1.01921145110000011,-.226671830099999999,-1.6649089157000001,.624440916600000007,.00955146899999999993,-.268799605599999991,-2.07370435930000019} +213 {-.871882128900000053,2.67174851949999992,-.230836330799999989,-1.03435180919999992,2.38415523010000019,1.2262660742,-1.64874036519999989} +214 {.470900368199999975,-1.23728225639999989,1.38869986449999994,-.700926176099999965,-.361773524899999976,-.429102584299999978,.241832189100000011} +215 {1.07011020759999997,.828239685400000036,1.2763823151,.835332467099999976,-.300512544300000017,.342190845500000007,.0896510513000000064} +216 {.292761892500000009,.359300273999999975,-.183769358299999991,-.216575713000000003,.0424143033999999988,-1.3685560094,.565282802100000037} +217 {.703327599199999987,-.328551898600000003,.625360324600000017,1.24194033189999997,.513903299300000005,.79113168519999999,1.30704948859999992} +218 {-.154306285099999996,-.952851227599999961,.310914560000000006,1.23690165240000005,.757178845600000039,-.622301646299999955,-.362391166900000006} +219 {-1.21641192329999992,-.0760625444000000012,.0502443865000000017,.476879506600000003,1.45398986849999989,-.230718612699999992,-.599637059099999981} +220 {-.593195789300000054,.118963653799999999,2.65146250809999984,-.574563455700000003,-1.29852496299999998,-.1652479684,-.64904922460000003} +221 {-.730071020699999962,-.159567434000000008,.0315072963000000011,-.642881441600000048,.105291766100000003,.525893202100000012,-.125172919800000004} +222 {-1.24988166070000006,1.17002024940000005,-2.39418428570000019,.26081060020000002,.463391132699999986,.380796480900000012,-.150081142000000001} +223 {-1.75612131499999991,-.944323258599999993,.523058050699999999,.939027702199999981,.99504780209999999,.404737104399999992,-1.08290412659999991} +224 {.178953363300000001,-.223755840399999989,.544645688999999988,-2.78746393170000006,.476705902600000009,-.759555165300000001,-1.7921715787000001} +225 {.397795907299999973,-.421426869900000001,-.124510450199999997,-.268142685699999994,-.212733585800000013,.840345141499999948,1.12676505819999995} +226 {-.280215882799999982,1.19305702609999997,.876440713799999993,.248756123400000001,1.46007476750000009,2.18909694340000005,.691194481200000044} +227 {-.738586596500000026,-.66721663649999996,-.709087469099999979,-.708078655699999948,.249833497999999987,-1.73164081199999997,.0668768606999999987} +228 {-1.02283480260000004,.606172471699999993,.756252329599999995,.94170176910000003,.104740193499999995,.154381590200000002,1.12481806839999998} +229 {-1.7266576816000001,.520473117800000051,-1.34441206499999999,-.489290229000000021,-1.49455995019999999,-1.13593549959999995,.529627545300000002} +230 {1.12795641160000004,.262986498499999999,.886993730799999991,-1.4725283521000001,1.43142524530000004,1.67757150489999995,.680434798399999985} +231 {2.12177881920000022,1.50976258270000008,.770647312599999967,-.0274393648999999989,-.947825017999999964,-.155789190100000002,.280352965100000018} +232 {-.114018334099999993,-.697716710699999965,2.12905547660000005,-1.20930142210000002,.643230374599999988,-1.03496178140000006,-1.93832260050000005} +233 {1.61941048779999996,-.223324149500000013,.42611984009999998,1.74818015959999995,1.20880000970000001,-1.12577222209999994,.866940013200000026} +234 {.052215470299999997,.112698246000000002,-.899679771500000003,.256064839099999997,1.79247532240000007,.698468815200000037,1.16755179339999993} +235 {.577969726600000011,.281453795700000009,-.113612538900000004,.00201565090000000021,-.562425976999999966,-.756306875299999959,-1.33799260060000003} +236 {-2.11086872959999994,-.817727624400000019,-.198314007300000011,1.6516456156999999,.149479896700000003,-.383105445599999983,1.5576889746} +237 {-.657304014099999945,.0778514626000000043,1.87987995200000002,-.278979751399999976,.591653573600000038,.0477829194999999998,.864079532500000025} +238 {-.347595186399999978,-.383732512099999978,-.65287182730000004,-.873353124800000047,.254571146000000026,1.02210247899999995,1.90422143479999995} +239 {.769514926900000007,-.992578579400000049,-.119117814700000005,1.18367778060000006,-1.24672882400000007,.776474354800000044,-1.42297564840000001} +240 {-1.25075306359999994,-.403285335600000017,-.171413955499999993,.0445253809000000014,-.248180094900000009,-.191242379399999995,.89274864070000004} +241 {-.767634980900000041,.548972032400000032,-1.35879077819999994,.496215351500000013,-.482630881000000012,.220800732899999991,-.0836122764000000046} +242 {1.12009768979999991,-.866787932699999986,1.13902484749999999,.442652503400000008,.410334822400000021,-.774739526399999989,-.786301156399999979} +243 {1.77819956590000006,-.0286709810999999995,-1.23395254799999998,.585436259399999992,1.45675593380000001,-.126059818500000004,2.12225773770000004} +244 {-1.31569695789999996,.0247847211999999983,.180298903399999988,-2.58586942519999985,.0635146050000000018,1.21600181689999998,.673665649000000033} +245 {-1.41165230069999992,.00586818039999999989,1.95006510020000001,-.600387757999999994,-1.23760769970000006,-1.48188364420000007,.169434014599999988} +246 {.112649805500000005,-2.13964574900000004,.0403707353000000019,.719769690000000018,-1.23129349119999998,.257843053099999997,.420404620800000017} +247 {-1.61393694710000002,-1.09748499379999997,.338253219400000027,-.703877578099999957,-.28468875560000001,.22827963879999999,.385303755500000011} +248 {1.03426193699999991,3.02603706219999991,.152337115699999998,.300338477699999995,.786773236399999965,-1.07297095920000007,-.699423401200000017} +249 {-.343546354200000015,-.677185394600000046,-1.47974212929999993,-1.12888205280000009,-.93151062650000005,-.643306945900000038,.882490120100000053} +250 {1.88919807360000003,.251042909599999975,-1.74554409450000003,-1.12446282370000006,-1.88448537670000005,.00681455010000000042,1.50551059889999994} +251 {1.99313334489999994,-2.11294668430000021,-.266432878099999992,-.174904041799999993,1.7766417751000001,.305121564299999981,.986091900699999968} +252 {-.0318201261999999999,.068235080399999995,.235682226699999997,-1.56344000720000009,-.755128983999999948,1.09144896430000005,-.549092171399999995} +253 {.837665389400000016,1.03961184639999993,-.315986327000000011,.293054242399999998,1.31213546309999995,.673386172700000007,-1.11088231679999994} +254 {-.902797575499999949,-.0323422980999999998,-.334416215999999988,-1.5982195374999999,-2.31388218779999999,-.803254289499999996,.23332418969999999} +255 {.36519844709999999,1.12725735119999992,-.583789564399999961,-.357370322299999987,.760000892899999947,-1.31724082949999999,-.224802628599999998} +256 {.200960131699999989,.432167406099999984,-.804484711500000005,.223544881499999987,-1.25545765180000002,-.505506295600000044,-.416349239199999999} +257 {1.48911343209999991,-.116735605399999998,.315434744499999975,-.279287618999999987,-1.52336677269999998,-1.81853339260000002,-.181809197799999989} +258 {.455588188200000022,.527908782999999993,.112497337899999994,-2.33509447809999982,-1.00806595850000003,.575960797599999963,1.56221538330000009} +259 {2.6760025315,.696139024599999989,1.39602245459999996,-.426047028100000003,.696953359400000028,.225431412899999989,-.249505623700000012} +260 {1.17960987160000008,.0968713310999999994,-.140328970500000011,-.24490121749999999,-.317291591300000009,-2.12183379970000008,-.970618681900000002} +261 {.163899819700000005,.138957287900000004,-.0493648104000000004,-1.13778918640000004,.733317364399999949,-.114702473999999999,-1.74937251670000005} +262 {1.16568846710000007,-1.35144639830000002,-1.10832852390000003,-2.48316749289999983,-.189231087800000003,.0215783750000000003,.297749178399999981} +263 {.857659219099999981,-.0143111821999999996,-.59434016089999997,.563511746300000005,.261312102200000007,.24024390100000001,.0440015282000000008} +264 {.420819855700000023,.315028424799999984,-.0466499064999999977,-.146267049600000004,-.294801811500000011,.844031033799999997,-.92911250710000004} +265 {-2.04780687620000013,.138778553100000007,.461241755199999992,1.31312150359999991,1.74378712690000004,-.280958573699999992,-.774021634599999953} +266 {-.710328487399999986,-.182875276000000003,-.314781595400000003,.978442416399999959,-.70497398499999997,.58951174620000002,-1.12883754269999992} +267 {-1.73750057930000001,-.834102927999999966,-.673366201200000014,.618976552400000046,-.216308802800000005,.689588537200000018,-.673751642900000003} +268 {-.875690872600000003,-.280198863700000023,-1.64036293409999989,-.900444755400000019,-.850517252800000012,-.390342322700000022,.390114057999999986} +269 {.791412084900000035,.756344451300000054,.617108647699999957,-.34905716819999999,-.702141338400000037,-.16991708459999999,-.163262376900000006} +270 {.374534802900000019,1.43048141669999995,-.17248863619999999,-.299851894999999979,-.518087935099999952,1.04720635630000003,-.0604743630000000032} +271 {-1.15086352960000005,-.158720592100000002,-.211448509199999996,-.340968039599999995,1.04499649110000004,-.560695994699999978,-1.79085541379999991} +272 {2.08886165289999992,.0694257481000000037,.0594976988999999998,-.191110719600000006,-1.60860954959999991,.155186271699999989,.938644101299999978} +273 {-.291027550400000001,.468685461000000025,.0914838748999999979,2.14104208070000013,-.602008723900000042,.1534123503,1.74134612550000001} +274 {.304428167600000021,.449059630200000004,1.16265278209999989,.512627097099999984,-.6350076214,-.616919633299999992,-.66196055949999999} +275 {-.600943784399999958,-.946463257000000002,-.515791093799999989,-2.27490615929999995,-1.40216195539999999,-.476157515400000009,-.0286450500999999996} +276 {-.72744882870000005,-.950732644400000049,-1.10539167710000008,-.645658110900000026,-.339414670500000015,-.967052066399999966,-.76470162559999999} +277 {-.808064781500000051,-.640953894999999996,.836987269099999986,-1.30041054319999994,.664671154099999995,.71339209910000001,-.830842054999999968} +278 {-.200593087899999994,-1.41195027279999996,-1.03629644129999998,2.79286823750000002,-1.69866367579999999,-.199006064099999991,.255052621599999974} +279 {-1.24152558060000007,-.685120869899999985,.412668897899999998,.0288477650000000009,.530058095200000023,.137290182000000011,-.804384205099999972} +280 {.735955417799999956,.639745477199999968,1.98027872540000005,1.41145022879999993,.999676049600000005,.202230660400000012,.185720644200000007} +281 {-1.55942898879999992,-1.32010546700000009,-1.12243354030000009,-.450203946199999983,-.0537677479000000028,-.147418620400000006,-1.66641280659999991} +282 {-.104645716900000005,-1.02856475110000001,-.15488009920000001,.605178448400000013,-.29684984809999998,-1.16678044470000009,1.22644319980000005} +283 {.392424771900000025,-.370301443900000027,.862016709000000048,-.628459251100000005,-.444695790099999988,2.22324951879999988,.702694623800000029} +284 {.310886027100000017,-1.45086906050000009,.141588360800000007,.738055237999999947,2.9819107585000002,-.44059189700000001,1.51269061410000005} +285 {-.121462238200000003,-.509479624899999983,.960074846799999992,1.5637605240000001,-1.78962189189999998,-1.36478878409999993,.285074042199999989} +286 {-.245355402599999994,-1.57608796539999996,1.6824999289,-.176951577800000009,1.73533912210000008,-1.60256210160000001,.659067562000000051} +287 {.158925424300000007,1.53129934609999996,1.4418918436999999,-1.63866175139999992,1.34973513900000008,-.664335617999999961,2.07501691339999983} +288 {-.0257149236000000017,.705987502599999983,-1.1043416707,1.38340203429999997,-.804692831999999969,2.6442580109999998,.96234228690000001} +289 {-.914802569699999957,-1.4426557550000001,-.403511094600000009,.349382177299999985,-.294879171399999973,.862594187500000054,-1.28689541709999999} +290 {.537106351599999976,2.00863756150000006,1.36721752520000006,1.59330678119999991,-.963580817800000045,.509064056499999973,-1.18868806790000003} +291 {-.79776947060000003,.961912365500000033,-.337708938000000014,-.818872511100000033,-1.45236855510000007,.627214471299999965,.245253593399999997} +292 {-.491468394799999986,-1.26067911339999994,.312167224700000023,1.18859111229999992,.650146895000000002,1.22810972799999996,-.395909948399999989} +293 {-.724906786100000033,-.266817550999999986,1.30988730729999991,-1.56657465849999999,.0801381949999999954,.422304853099999988,-.0781362892999999947} +294 {.767995741999999981,.486401114600000017,.290400250000000026,1.17896675449999999,.414822447699999985,.831105965799999957,-.0481566666000000018} +295 {-.560099495799999958,.307031193199999997,-.940870495299999998,.0577405026000000002,-.0429363011999999988,.799643938600000048,-1.87170498649999995} +296 {.33277159919999999,-.816910472600000048,.753895715700000024,.243657371099999992,-.32147748009999999,-.811689744599999985,-1.07646353220000002} +297 {-.409102947699999997,.773848287400000001,-1.36489756749999991,.595542132499999988,-1.30260391860000002,1.93834528830000008,-1.30266183420000004} +298 {-1.82397289149999997,-.730615835499999977,-1.03796443150000006,.289227975299999995,.176883528499999998,2.07023614720000015,.956072196100000049} +299 {.469825302399999978,.699644008599999978,-.118802991699999999,-.741050411799999975,.194064862100000013,-2.11199754770000014,1.66829038519999995} +300 {.845436760000000009,-1.3251878077999999,-.614769520600000008,.907981292899999959,1.39302266140000008,.489293049699999982,-1.62938092969999992} +301 {.286287532199999972,.421419099999999991,.514900188499999967,-.934227101399999982,.758154264600000016,-.540006140599999962,-1.06463744540000005} +302 {.331709827299999982,-1.0168729366,2.15246214979999984,.91186960100000003,.234475966799999991,1.60859334710000002,.712974481299999963} +303 {1.26412273050000001,-1.95817834440000005,-.168080946700000011,.837547679000000045,.811178077300000022,.476041136199999992,-.031995310300000003} +304 {-.368573119700000007,.0897907839000000041,-1.08844004010000006,.297913802999999977,-.226885744400000006,-.035532230300000002,-1.73904956330000005} +305 {-.70043595049999996,-.524838706399999966,.221367791000000008,-.825127636199999959,-2.97300995440000015,-.720835333999999994,-.92127118210000003} +306 {-1.19251403530000011,-.471853618899999994,.0156697382999999996,-1.36421681679999995,-.0398020820999999969,-1.0290975779,.365163497700000006} +307 {.496803884500000015,.179715160099999993,.300918090400000005,-.553144421399999975,.696205009399999963,1.70849620879999997,-.435266824499999982} +308 {.0835377099000000067,.763433994699999952,-.896845105000000031,.196370939999999994,-1.37800191820000006,-1.59215427370000007,.821723562700000021} +309 {-.486691956699999984,-.166708812300000009,-.852125054900000012,-.605089612400000032,-.902889263900000016,-.260457089199999992,-.742034287399999992} +310 {-.509308481000000035,.777705221800000013,1.92494218569999997,-1.26442757909999992,-.940195208500000046,.187315242800000004,1.25488163210000003} +311 {-.674356240699999998,-.140434547699999995,.110938636199999996,-.561973633699999975,-.0640010060999999936,.429394573200000018,2.10994323250000004} +312 {-.354977699699999982,-.414997239199999979,-.695136132699999965,.269422052999999995,-1.67256441269999989,-.103736590700000006,-.544080938899999977} +313 {.615688259000000015,.410188042199999991,2.63185895099999989,-2.01482799719999983,.535123171900000028,.189445680899999996,.383689563399999989} +314 {.279478146599999977,.331114675700000027,.455681233399999996,.538305730299999952,-.366418395299999977,.438845019800000014,.0856931306000000004} +315 {-1.48740590090000002,.284592903299999977,1.21281886949999995,1.3166270055,-.337583444899999974,-2.27025181610000004,-.934280166500000009} +316 {1.10694583170000005,-.983479771599999997,.159304366499999989,.872298217700000045,1.03415783119999993,-.739159845199999999,-1.4975263272999999} +317 {-.821531821400000029,-.313773683799999981,-.415733711200000022,-.355005628799999973,-1.41055804690000008,-.487340323799999975,1.28068970410000005} +318 {-2.75576856120000002,.217878657000000003,-1.38336948149999994,.283154308799999976,.325190960899999992,.369355446300000001,-.203378455699999988} +319 {.94915132170000005,.202514584100000006,.177220125899999997,.931742239800000038,-.16592130699999999,-1.3905671743000001,.468501768799999996} +320 {.180519265300000009,1.29955660470000001,-1.00138780530000004,-.306778331600000009,-.998135093200000045,-1.19729156189999997,1.70774174170000004} +321 {-2.25519978499999985,.814489636999999989,1.63856747770000011,1.42488990380000002,.525865063200000038,-.992715385300000031,1.3405574992} +322 {.0703703980000000007,-1.14221749829999997,.302943703000000009,.770022803299999947,.25181796369999998,.661244592900000039,1.39291275679999993} +323 {-.370978364599999999,-1.42979763450000008,-.231596617100000013,-2.21720320099999979,1.6698073779,.0515929040000000019,-.138721868999999998} +324 {-1.26367059679999993,-.0959723660000000034,-1.5219004384999999,-.250900578400000018,.0105861474000000007,-.58374446560000004,.980138409199999949} +325 {1.1346277010000001,-1.07866146229999993,-.344187288500000022,-2.17606565449999989,.915729019599999972,-.426485142500000025,-.894223081699999978} +326 {.486764605999999989,.211472498700000011,.197059750700000008,.344776999199999989,-.494755880699999984,.231837212799999998,-1.12875031739999998} +327 {-1.89069601389999997,-.0878982424000000068,.159859568099999988,1.26366159019999991,1.81106130489999995,-.70692247330000002,1.24428118199999993} +328 {-.462304299099999982,2.02745354910000009,-.7471578002,.906742127899999972,.392563646300000013,-.145738446100000002,-.688348359599999959} +329 {.365293272800000013,1.44670585810000007,-.1812878198,.546418885999999993,-.553906717699999995,-1.05893712249999994,.0159691695000000015} +330 {-.435212708000000004,-1.71257109670000007,-.450716705399999973,1.10382312769999991,-.257278313799999991,1.32393141400000003,-.49583124449999999} +331 {.994851382799999961,-.618604781499999978,1.94844148109999993,-.237957950800000012,-1.32062892520000008,1.11948684209999993,.60652979380000005} +332 {-.69755101730000002,-.0655495922000000064,1.79105722490000008,.17890202429999999,.546106997700000041,-1.88355028799999991,-.655873904300000032} +333 {-.0900792911000000002,.894269105300000033,-.452041118099999972,-.616192736499999949,1.61703642830000005,-.365648063799999978,-.264682358499999992} +334 {-.0653709138999999939,-.320024478999999973,.655492870899999969,.0456022809000000001,-.865624300000000013,1.09580202139999994,-.240624276800000009} +335 {.14730214350000001,1.43790138709999993,-1.09238765709999996,-.517928675199999966,-.291090132200000018,.147241509599999987,-.748043468899999953} +336 {-.0146519738000000007,-.206187994699999988,-.164250675900000004,1.10366785369999998,.448527136300000018,.593697534300000052,.820839591599999974} +337 {.794897105899999956,1.87325167339999998,-1.99275784829999991,-.665182257600000049,-1.24434149050000009,.364382067600000026,.129521810900000006} +338 {.397344664100000022,.522516785099999992,-.954207362500000045,-2.36960937799999982,-.362597784400000012,-.252781014299999995,.94709244420000005} +339 {-1.21585543450000011,-.249404056000000013,.569891528700000038,.769986894500000019,-1.26422429199999997,1.01732365040000006,-.143815357000000005} +340 {-.316406726199999988,1.17184001180000008,.703330723200000008,2.42204834599999996,1.82045090030000001,.227769985300000005,-.688922056200000021} +341 {-1.15016110230000002,1.55952561420000002,.450356286200000011,-.223056370799999987,-1.3368108083000001,.536355647800000002,1.43198786429999991} +342 {-.0144051173000000008,-.00476639750000000008,-1.94131399610000011,-.382127095000000028,-.0268200035000000017,.991130505999999967,-.00349404739999999983} +343 {1.11517642159999997,-.242248203800000006,-.35323497949999999,.875143816200000035,-1.58008105539999999,1.73903288839999992,-1.22781001290000003} +344 {-1.03012598719999993,-1.97314528060000005,-.637876246100000044,.278137029899999999,-.425119978000000009,1.49293761199999997,1.25553350619999993} +345 {.571305045200000028,.952921329599999978,1.11542626709999992,-.359707351800000019,-.798259930600000001,.267731139200000023,-.251532390599999989} +346 {-1.49298988119999998,-.107299297799999999,2.27000335610000015,-.0944134278000000049,.390845286000000014,1.32600418330000003,.00605477540000000022} +347 {1.46709766329999991,.344240959899999976,-1.59691734290000009,1.88709743190000001,.475685923499999996,-1.8045714115,.647207277699999972} +348 {-1.84597486129999999,.271745092099999974,1.56028869229999989,-1.20463209530000004,-.8484454991,1.39984859670000006,-.2075913754} +349 {-2.39776595970000006,.516764777699999955,-1.6814312254999999,.966952908699999947,.480903763900000003,-1.84665839789999997,-.940889355699999985} +350 {-.20416688320000001,-.928371291499999973,.158651591700000011,-.204720923599999993,-1.6511528500999999,-1.32912789170000001,1.80230135850000006} +351 {.148702451499999999,-.506141383500000042,2.11019002179999982,1.57091099870000006,.553171586000000048,-1.07674172239999999,-.119874576600000005} +352 {-.414733917499999993,1.4467055795999999,.254451327700000007,-.537741203599999995,2.00580300569999981,1.21759338040000009,-.0945320726000000028} +353 {-.0878004978000000019,.606211748999999966,1.48167461130000011,-.149313781099999987,-1.15818754309999994,1.41376475609999996,-.826566784700000001} +354 {.379919717900000009,-.275955014200000015,1.28703710580000008,-.0597762031000000013,-2.00844348630000002,-1.03193734649999991,.472046275299999984} +355 {.1037567987,1.3682850473999999,.246376318700000013,-.744504071900000008,.112361837899999997,-1.1343188649,-.147944150899999988} +356 {-.178899955599999994,1.09714908899999997,1.13742705059999993,-.00994645679999999963,-.0157262749,-1.63119871020000007,.382264786799999978} +357 {1.11608101040000007,-.438869064699999978,-.210167498099999989,.746513479800000046,-3.02894588520000019,.785674530500000023,.659220919799999971} +358 {-.614980713699999959,.374309532100000009,1.3863538836,-.0707282490000000069,1.04504401350000009,1.96606170949999992,1.13284566349999993} +359 {2.40242493010000002,.604316236600000001,1.01015463390000004,-1.2760122103,1.12314836679999996,-.292428605000000008,.640133017699999973} +360 {2.16148181360000002,.711364889700000003,.708431234199999982,-.602823907999999964,-.069024530099999995,-1.55689971239999991,1.57457639610000011} +361 {-1.04849299029999998,1.57142138889999994,-.551774973699999949,.0334705681000000005,-.593370215900000053,.552581643799999966,-.0576799309000000024} +362 {1.7136804109999999,-.1306427228,-.895227830599999996,-.544480707900000027,-1.31158432480000009,.573536988800000036,1.87282146420000006} +363 {.908351185999999977,-2.31638295989999987,.504141801900000019,1.87929375900000006,.091010149999999998,.775131463000000021,-.329059254899999987} +364 {-.135502005399999986,1.0728326291000001,1.15990535649999993,-1.22999365930000004,-.385970444999999995,-.188716299700000006,-.220883768199999997} +365 {-.593532260699999981,-.617800747999999955,-.00468777419999999966,1.06367575320000007,.65081334099999999,.822359399500000032,1.58286974790000001} +366 {1.26808791799999998,-1.17542086550000002,1.01746397049999993,-2.78519512520000001,-1.22394201429999994,-.418017481199999985,.146245149800000002} +367 {.640754785000000049,.582724759700000039,.916567720500000016,-.152392390399999994,1.75091012089999998,.19889735280000001,.098574678900000004} +368 {1.8408975541999999,-.0521965936000000005,.1167969227,.00937672879999999938,-1.17670368660000002,.626194214700000029,1.69480288250000011} +369 {.122973872299999995,1.07030469680000007,.521370025099999967,.0731634674999999957,.359446782799999975,.489417869299999975,.286450262700000013} +370 {1.47250744040000003,-.793305523699999959,-.400462417000000015,1.56075048919999992,.164744184900000007,-.937585762500000031,.883245987699999957} +371 {-.326719984700000021,-.442450328299999973,.453801773000000019,.987237031399999965,-.371024530100000027,1.25288467999999997,-.0660601625999999997} +372 {-.255108941500000019,.00240312739999999983,-.229398530099999998,-1.09608887840000002,1.42693238989999993,1.86363413899999997,-.269838914500000027} +373 {-.104077513600000005,-.368539766099999999,-1.09113855029999995,.88035824159999998,-.582443008100000048,-.361478615999999975,.140346729400000009} +374 {1.68555068259999996,-1.19239842539999996,.147600406999999989,.813252497100000027,-.00123406959999999995,-.1631749438,-.294766712200000003} +375 {.0589294278999999976,1.1153044902,.235195857199999997,1.01272677850000004,.179105760199999992,2.5589331636999999,-.447791975200000025} +376 {1.59615701439999991,.176286450700000014,-1.06423990090000009,-.902439304800000008,.847389922600000012,-.783345075499999988,-.899457201799999995} +377 {-.0690238833000000052,.71491517910000002,-1.5977424845999999,-.746587669399999987,-.277534709499999976,-.00875805839999999926,1.6926610364000001} +378 {1.6021166805,-1.64208226000000002,.0144723707000000007,.551196542500000053,1.26517129540000006,.3192534078,-.0115205988999999998} +379 {.199666484000000005,-.453163427499999993,1.8881201942000001,-.0730184076999999959,.1456817631,.2642796959,.297354891299999979} +380 {1.79477209190000009,-2.43301455350000007,.420608767200000011,.0555282404999999993,-.428361841600000015,.472512865000000004,.0670555964000000004} +381 {.441738979099999984,.728581258099999962,.0396413860000000007,-.740342449599999952,-.524878715000000051,.0718627107999999931,-.827501426800000051} +382 {-.477399729900000014,-.36194857720000001,1.24169640160000005,-1.69473546779999995,-1.27285707599999998,.674868314499999955,-1.12093516009999994} +383 {-.267110452200000015,.487284345999999979,.609140620499999952,-1.18866960549999989,-.973091141199999954,1.44272311580000001,-.154795097900000012} +384 {-.157522560099999986,-.208466491000000004,-.0487785035000000006,1.07770084339999994,.114580812899999995,1.46341532219999992,-.705870091500000019} +385 {.645305614500000013,-.936290679600000009,1.42261081299999992,-.251974394499999976,.65192805620000005,.562565764100000032,1.39345881510000003} +386 {-1.53010058999999998,1.10043734389999992,-.0756898396000000029,-1.02033760229999992,1.1905105373,.0699757473000000019,1.02514534019999992} +387 {-.0219203268000000007,-.939948998800000046,-1.60012083010000006,2.09991249979999983,-1.73762631350000007,-1.18250483719999999,-.988920957100000053} +388 {.568709701199999973,-.208310854600000012,2.24815975470000007,1.46998923710000007,-1.38025708149999993,-1.53436050060000007,-.816695783000000008} +389 {-.650390328600000012,1.53436690610000004,-1.89878462899999989,-1.35839958390000004,1.17004109600000006,.410662879500000022,1.48913285329999989} +390 {.102854642900000001,1.75420308079999998,-.825169641699999956,-.498008903700000005,-.500152417799999971,.0869030248000000061,-.376863964300000021} +391 {-1.19038646939999992,-.685714048200000037,2.10417386369999981,.571577059699999945,-1.5172272980999999,-.406879681300000018,.68415905450000003} +392 {1.98559376469999993,.219841803899999993,.545102519600000002,-1.47702445729999998,-.958983780199999969,.227889536199999992,-.598702578799999996} +393 {-.303930690800000014,-1.56290319809999989,-1.03671965759999996,.713462150799999972,-1.45361240509999989,1.02750221310000001,1.17779922439999996} +394 {.209470700600000004,.715818259099999965,.763444846399999988,-1.43393899960000004,-1.3918601934999999,-.402973777500000019,2.19101659390000014} +395 {1.12961929930000005,-.596176894499999999,2.5335540943999999,1.03608791860000005,1.41471414200000001,.104101957800000006,.1865057933} +396 {-.244762171599999995,-.498944165399999973,.604094528399999997,1.16157501749999992,-.685298645699999986,-1.41246524720000011,-.105060197399999999} +397 {.860072800700000029,.505710191699999978,.291245188900000007,-.34858952339999999,1.34423496780000007,-.233554210600000006,.327904337900000009} +398 {-.294306823599999989,-.489873348699999989,.907223305799999991,1.55576932120000011,-.160423584499999994,-.262096239600000003,.159432478300000013} +399 {.907181452500000041,-.179428524700000008,1.20781831219999991,.0722424217000000035,-2.33001046260000022,.271600707800000007,-.267154522900000024} +400 {1.07475878060000007,-.44271705090000002,.260632468000000006,-.876666875600000006,-2.31632467269999998,-.452718045600000008,-.998629654000000033} +401 {.405671949799999987,.356474548599999996,.306279869900000001,1.26157816280000001,-1.22211725130000004,-.711416796099999971,-.239716140599999988} +402 {.378429137400000004,1.32638107030000008,.101475191000000006,1.5918001422000001,1.48271945999999999,-.499709992599999986,-.755450846599999948} +403 {1.02390849160000008,.00159313860000000001,.647361797600000011,.0344330378999999978,.962072232200000044,-.720508982999999992,-2.17063980610000007} +404 {.0178856717000000012,-1.37753197890000001,-.834281939300000053,.0445978125000000003,-.752359004300000023,-.948935235899999951,.533129872099999957} +405 {.814699937999999957,.0160530694000000015,-1.5460789452999999,-1.4344577193000001,-.66149786850000003,-.191428755499999992,-.546875313899999993} +406 {-.4526861358,.285031298900000007,.277128312599999982,-.391255673799999981,-1.05344268210000003,-2.64407030140000021,-1.20100503849999996} +407 {-.387864890999999989,-1.67477370429999994,-.581865054700000051,1.02835446949999998,.518935345800000003,-1.2313251221999999,-.358076627599999975} +408 {-1.34366679609999995,-.203774498400000004,-.363189528099999992,-.696114467300000039,-.294857039300000012,-.709475332599999953,.369531943099999982} +409 {-.230801693000000002,-1.01081738330000004,.227789713899999996,-2.23661548069999983,-.00749619129999999981,-2.43806591859999999,-.572537871500000017} +410 {-.428507551699999989,-.645171909200000004,-.988285549099999994,-1.14187992500000002,-.341405432000000009,.167510841999999993,-.830665149799999947} +411 {1.6359135815000001,-1.58877944069999999,-.257782923800000008,-2.18467241349999997,1.48988955660000011,1.29441119640000002,.820866932900000013} +412 {.371531408000000007,1.00357349120000006,-.687487565800000033,-2.37090684790000017,.46545252199999998,-.226062885099999999,.668850233499999947} +413 {-.0812429422999999956,-.0693526715999999988,.606297591599999963,-.178874875799999999,.0165461476999999996,-1.50802505639999995,-.516519617900000005} +414 {1.29772908460000003,.938743439100000021,-.0973837322999999977,-.417107873600000012,2.90784151810000013,-.388842770700000007,-.787591083000000025} +415 {.520434721200000006,1.77690830029999991,-.761432862499999974,-.86707179599999995,.231608431400000009,1.45538748160000009,-1.10508536220000009} +416 {.0150948179999999994,-.127077697399999995,-.104512851700000006,.0674419648999999982,-1.28801577579999993,-1.13511631299999993,-2.56560733709999989} +417 {-.467886673199999992,-.438224182299999987,-.758838663100000033,.886843655099999961,.597457380699999985,-.758862455000000047,.181419207400000004} +418 {-.878321326100000022,-.612781453199999993,.215889669300000003,-1.7293359585000001,-.424326617700000019,-.993225074799999996,1.1985120414999999} +419 {1.53166541399999989,-.538649964300000006,1.27877402380000005,1.10594401040000001,.851319672499999958,.747139547299999984,-.0953179331000000019} +420 {.143560983599999997,-.56550392490000001,-.117838308899999994,.124726404200000002,-1.65608866449999992,-1.34359374050000002,-1.27431100019999999} +421 {1.68741667569999998,.368748703600000005,1.0614564192,-.883179300999999972,-.33795145510000002,1.74716407779999994,.80142596570000002} +422 {-.594335812399999996,-.0215479457999999992,-1.05781188770000001,.848601375899999955,-2.1215888612999998,2.45856701780000009,.849489046299999995} +423 {.258771460000000009,.618619678299999975,-.467438109599999985,.979171657400000006,.390603952500000018,-.904049315700000022,.360092749499999976} +424 {-1.22342591779999998,1.79308741320000009,.608345883799999987,.36888462700000002,-.612681796800000011,1.06817057049999997,-1.12838585770000011} +425 {1.46953807280000004,1.27469052030000007,.903334854100000029,1.06953845639999989,.0495970707999999968,-1.4292992847999999,1.01521163939999992} +426 {-.258594123700000011,.217853260800000004,-.650114791100000033,.162822760799999994,-.0388920946000000031,-.2438624083,-.29839545369999998} +427 {-1.11025665900000003,.307744520600000016,-.303926275799999979,-.2914827049,-.506728716899999987,.148683854199999993,1.75340432729999995} +428 {.507028760099999976,2.03014259409999998,-.161735959499999998,1.6464490623000001,.615878546300000052,1.19936941389999996,-.803310912499999974} +429 {-.309578308099999977,.712022795999999958,-.151945046299999997,1.84401253430000001,.946315120900000029,.532491358100000034,1.49294312520000005} +430 {1.34134369940000009,1.05558158200000007,1.61716253649999997,.173239468600000002,-.617688498100000039,.643485370199999962,.0322699042999999991} +431 {-.298355752699999976,-.689610865100000026,.921756549499999966,-1.94766560399999999,-1.0873696679,1.72081365680000009,-.365900561299999982} +432 {-.382533666800000005,2.63414748010000022,.343803913700000019,-.561120457100000025,-1.10880713880000004,-.44900390979999999,1.3327701824} +433 {-1.27929004540000002,1.54878109909999995,-.733095341199999972,-.851149656699999979,-.100426127800000001,1.51889959249999995,-1.49331540519999995} +434 {.174429602200000006,.816827898500000038,.440960667600000022,-.440617340099999977,-.373710396300000025,.611513070799999947,.481779809599999997} +435 {-.156577333499999999,-.402532470500000017,-.776053870699999981,1.23981695840000006,1.79502497800000005,1.25212422370000009,-2.90326904610000014} +436 {-.372367924700000019,-.946325465799999987,-.313278959999999995,1.70636984689999993,1.78746626050000001,-.931257642900000016,-.799253063800000052} +437 {-1.0357220949999999,1.04119289010000005,2.31824751750000013,1.47283282689999995,-1.10707152810000009,-2.74476058599999995,-.880488179899999945} +438 {.744974491200000033,-.422213988799999995,-1.44482719699999995,-.317871083100000007,-.109146578800000005,-1.3233062967,.309697930700000013} +439 {.785365002000000034,.0481544407999999965,.67573685859999999,-1.35631182170000009,.977090384799999989,.000845476200000000021,-.918989633599999967} +440 {-.642214177499999983,.356576202800000019,.433057564000000006,-1.31585769139999997,.0917912523999999952,1.0646451643999999,.7943853528} +441 {-.980670066600000001,-1.02315466119999998,-.764233354399999953,-2.06942846209999987,1.2975256033,-.0165881301999999987,-1.17156368020000001} +442 {-1.0364739730000001,-.464015120299999972,.0339940074999999994,-1.73044703639999997,1.49391378890000004,.719529593299999992,.178775271099999994} +443 {.664971564599999954,.97901815059999997,-.0656822982999999971,.767190750200000049,-.480350283900000008,-.74566045110000001,-2.04309706160000015} +444 {.442573942800000009,.500858764299999981,1.71553219779999999,-2.19916249549999998,.955154234899999954,.387808704700000006,.579147894600000046} +445 {-.838299043400000055,1.94824181850000011,-.680915165100000053,.6766557339,1.26184995070000006,-.271253503499999993,.94551762880000001} +446 {.886596281300000011,-1.10005603690000009,-.911515557300000001,-.523108620599999963,-.0852142693000000062,1.65770270870000003,-.840186101100000027} +447 {-1.22738794169999998,.644776569899999985,-.475323010599999984,1.0529219486000001,-.0264812450999999983,.578775162400000043,-.0911778547999999994} +448 {.738792061900000019,-.524617339199999977,-.0686652113999999952,.0932493256999999964,1.12382844260000003,.769387646700000039,-.835452886899999969} +449 {-.870761580499999965,1.17920313180000003,.214740468699999992,.474464215000000022,.195785111000000012,-.101663604699999993,1.58592243780000008} +450 {-1.18137944920000004,-.496346142599999984,-.0411087090000000002,1.23816011569999995,-1.42950691640000005,1.80613109979999997,-2.55482100479999996} +451 {-.0162767155000000004,-.142086324900000005,.676324814300000021,-1.08197151469999997,.246650285699999999,.421347199399999983,1.10298747479999992} +452 {-.187440772000000005,-2.5090590279999998,.744853996100000026,1.87211118980000002,.163328908200000011,.661362322699999949,-.184454517499999998} +453 {.307211995999999987,.971451416299999981,-.173011232699999989,.132947834600000009,.131015628500000009,-.785063751200000048,-.216878608299999998} +454 {-1.29721231079999999,-.826997120300000055,.421169711499999988,.579790793799999982,.463763393899999987,-1.27733225049999999,1.43873447640000007} +455 {-.198602405799999993,.674388246599999963,-1.02352219789999999,-1.57795003020000002,-.0248284096000000003,-2.38874401619999999,.761506557200000045} +456 {1.85713663979999999,-.834694637100000025,-.69905833169999998,.0648370322000000054,.124634324699999993,.891339214000000046,-.159296284399999988} +457 {-1.20372540049999999,.0848503166000000031,1.66108151389999992,.809143948500000043,-1.1408613836999999,-.300438030199999984,1.11678605199999992} +458 {-1.47279357389999999,-1.93216686009999994,-1.23782911969999998,-.0175452273000000006,.877667210299999945,-.448730027199999992,.518820712300000042} +459 {-.635614221400000012,1.33864661109999994,-1.27969868290000011,.755200458200000035,1.8724712443,.148181693100000011,.82685755839999997} +460 {-.680601303799999946,-1.52628915150000011,.611162093999999989,.0402606974999999978,-.188930103200000005,-1.56051707900000003,-1.59763092750000002} +461 {-.217204484199999992,.132357791499999988,.686992714800000015,-.992669794700000052,-.909036968400000034,1.78911304960000006,-.16561725129999999} +462 {-.180070849100000013,-.618294950899999973,-.427455843900000021,.171654218999999997,1.04940954359999994,-1.15453404690000005,-.562964305399999976} +463 {-.562692398499999968,1.76376670589999995,.639974784099999972,-.0570351331000000031,.902703596000000053,-.82733238099999995,1.60778039510000004} +464 {.393892541400000018,-.593870894900000001,.676759047600000052,-1.58498560590000004,-.624905983100000006,.295375255000000003,.0525780388999999987} +465 {-.115190791200000003,-1.64642623810000011,.0777732139999999933,-.0356489954000000012,.0270011450999999986,-.528776143099999985,-.241002679599999992} +466 {.592960757299999974,-.356318424799999978,2.1897119141000001,-.238879123199999988,-.675888747200000028,.304878378000000005,.459713681799999996} +467 {1.66780501920000002,.614483857099999975,1.08045973260000006,-1.77721404099999991,.812177691899999998,.782603371999999964,-1.6014921580999999} +468 {.361391089200000015,.196103640800000006,-.867857965200000026,1.82581356550000007,.77459707769999997,.841988231800000042,.16165557459999999} +469 {-2.67243571129999991,.66681040030000005,.745716065300000008,.101100092399999994,-.442517944599999991,.93438236210000003,.106093184899999998} +470 {-1.53642697400000006,-1.3272116692,.634796024699999961,-.385448982200000012,-.66642058719999997,.107223902300000007,1.85573602640000002} +471 {-.382808862299999997,-1.22534064549999999,.822404228499999945,.469392640000000028,.197180700600000008,-.828493845799999962,-1.15004601369999992} +472 {-1.49225187399999992,-1.2183657645999999,-.375231580399999987,.189443967600000013,-.249221285999999986,-2.12723929800000011,1.8162026322} +473 {2.60590853980000015,.0713464912999999951,.805100761800000009,.62430913170000002,1.02171901590000003,-1.29683017070000006,-.635559073300000055} +474 {.106007556500000003,-.776394661899999949,.206348844599999998,.319602619099999985,-1.27802273550000001,-.630292795699999964,-1.4145009516} +475 {2.46109615489999989,.728306553999999995,-.315179170200000025,.395189694500000022,-.0918025828999999988,-.932667134100000017,-.204114064200000001} +476 {-.915765809399999964,.468398326600000026,-.559881416500000006,-.0174543794000000009,.270645205100000008,-2.05716018220000008,.987825068899999992} +477 {-.767895311800000036,2.74035630179999989,-.265155683799999986,-1.341762444,.568802452899999977,.839207610500000034,-.318706129800000015} +478 {-1.45239797669999993,-.20050405939999999,-.779080115299999987,1.57102294729999992,-.570462788399999976,-1.25175919409999992,-.691482233000000002} +479 {-.287352161499999981,-1.1228360504999999,.839973539399999969,1.5003724543000001,-.771458659899999954,1.19146159220000003,-.658768281700000014} +480 {1.3561053567000001,.96362225410000002,1.90568636060000007,1.97772067409999996,-.217652814100000008,-.53168750720000002,.225682244700000006} +481 {-1.32923678350000007,-1.04484200969999996,.208680631599999999,.515818582299999995,.962176562999999985,-.35211185880000001,-.981084812099999981} +482 {-.46825043030000002,.222011075199999991,.163630715399999987,1.1029751888999999,-.826665296900000013,-.197732802600000007,-2.21419250280000002} +483 {1.26484246539999989,-1.87060864859999998,.89587387230000004,.330574138000000017,1.14589053650000006,1.03243994910000003,.144106338800000011} +484 {1.82371296499999991,-1.11922999600000006,-1.21987484150000003,2.42699051740000016,.347469716099999992,1.15653956990000006,-1.01561876190000011} +485 {-.169556953099999991,.599056026500000005,-1.98286486510000004,-.430102085000000023,.663359558599999954,.0629153741999999988,.503723316800000043} +486 {.895163801799999992,1.04413938109999993,1.22020697789999999,-1.25274880709999992,-1.0266405596999999,.169631771900000006,2.08692538719999998} +487 {-.359150982499999993,-1.19336779730000009,-.271091385600000012,.595802433899999984,.675331183800000012,.423431214399999978,.366583000599999986} +488 {-.379415538499999982,1.29276719410000007,-1.24330191749999996,.0205911299999999992,.75881605799999996,-.906917722400000015,.240231892100000011} +489 {.862288574300000055,-.00661189440000000024,.662235211599999984,-.72764332509999996,.665456559600000053,-1.07291512130000011,-1.03355315140000004} +490 {-.943854082899999947,-3.05158932530000016,-.812666159000000055,-.919767817300000012,-.0188180545000000005,.514327925699999966,.673235814899999951} +491 {-.223886291200000004,.255456706400000011,-.875917898600000022,-.0224664081000000015,.159551289099999993,-1.1295657454000001,-1.08920816649999996} +492 {.696775097700000012,.902130527399999993,1.0370231379999999,-.0522288272000000012,-.0245807122000000003,.0605425666000000001,.123617234899999998} +493 {1.20643482839999994,-1.23089500399999996,.0129639829000000005,-.922991053499999992,.805893268299999965,.166140318799999992,.725314982600000047} +494 {.258812546699999979,-.604128279200000007,-.0646710900999999949,-.60199057850000004,-.622811821700000046,-1.47644235859999995,-.577042922900000033} +495 {-.537724691800000043,.59386047409999998,-.758750391900000043,.515198818899999966,-.0439678831000000006,-.571971604200000039,1.12102292120000002} +496 {.0550722550000000005,1.2505110116,-1.88352288899999998,.374528729499999991,-.87542911609999996,-2.00103521240000015,-1.33691201790000003} +497 {1.20556425950000001,.503927272300000006,.0776460627000000042,.499901277300000002,2.2774258715000002,.390436464400000016,-.0862218696999999951} +498 {-1.08395910680000007,-.659756083400000026,.368309984599999973,-.856944117800000016,.958853149700000018,-.887193623799999997,1.52841631109999998} +499 {.82729592789999995,.3115158227,.576659105099999958,-.295487934600000002,1.35402891969999994,.00600716139999999971,.58769749800000004} +500 {-1.76265449790000006,.525430438699999947,.0831099782000000037,-.392135858100000012,.538751012099999982,.930514363200000028,-.948684136699999958} +501 {.572807137900000041,2.0301418174000001,-.701772980300000015,1.226127411,.651187709000000003,-1.2123227674999999,1.4099385217} +502 {-.353116916800000014,-1.48989580759999996,.7364186801,-.629707283100000015,.509962114300000047,2.50340529609999995,-.669917703500000017} +503 {-.194350025600000004,-.857103593599999991,.422565178900000005,1.44072097480000005,-.362970456499999983,-.908049662699999982,.538260613599999993} +504 {.761968623999999983,-.522641035799999987,1.49836467140000007,.915999798999999948,-.833619272199999983,-1.40037470140000009,-.88538520129999998} +505 {-.921433264799999985,-1.34152230010000006,1.03188753150000001,-.231905928399999989,1.39414619949999996,2.10108875960000008,2.19788253330000005} +506 {.370268607899999991,1.01688671079999993,-1.51991840309999993,.956366764199999997,.829227058900000036,.483758394500000022,.669288061000000045} +507 {1.27766349349999997,-1.28015241720000006,.355942521700000014,.627232813799999955,-.36184739589999998,.669951647600000033,1.70002511619999996} +508 {-.752676486100000042,-.30220368219999999,-.670458566300000003,-.821355052399999952,1.10258942289999995,-.493503346399999987,.343459748299999978} +509 {.807750209299999966,-.540807078699999999,.948196119799999959,-1.69434076189999994,.585862519400000004,.274574404800000005,.725668254000000013} +510 {1.70262402749999997,-1.57586747180000009,.00708355560000000026,-1.5377570961,-.0533131360999999984,.00430112220000000017,1.02982804140000006} +511 {.370408747099999991,-1.11752565289999994,-2.5671612585000001,1.0258390155999999,-.312193507400000014,-.53603225600000004,1.06180403749999996} +512 {-.328090196000000001,.57733864909999999,-.873748306500000016,.642651634200000044,-.220982632099999993,-.155449888499999994,.677129516599999981} +513 {-1.3232667117000001,-.0350914132999999989,.0603763469000000025,-.33767401159999999,1.51703251170000009,-.157290211699999988,-.876486947299999986} +514 {-.337764984200000007,-.21983856739999999,.241687637099999991,1.51032012300000007,2.11066562709999994,1.55984284970000009,.463617708299999987} +515 {1.08973586600000005,.00593325890000000016,-.511797871600000009,-.0033582019999999998,.883894399899999961,-1.69369243909999989,.125314475799999991} +516 {1.31734444909999993,-.868527495000000038,-.277832607399999987,1.23119757380000006,1.99519370130000007,.313464661899999997,-1.77481190759999996} +517 {-.344771374999999991,-3.13309592839999995,.127377727800000007,-1.76171756329999996,-1.51347550950000009,.910372248900000014,.69079768610000003} +518 {-.704975192200000045,.244201056199999988,-2.64991741230000022,-1.50408054460000007,-.267152085500000025,-.933243167700000043,.268738929500000001} +519 {-.398311180099999995,.778840680499999993,.944987001300000018,-1.12254544559999991,1.3156795803000001,-1.2387635133999999,-.343705233700000001} +520 {-.367579971100000014,-.224967119500000007,-.408594563999999993,.293165944399999989,-1.38606700630000002,-.142592033400000012,-.311313496099999976} +521 {-.0637001378999999951,.101837005300000005,1.36981733270000006,.0996786877999999987,-.483185439200000011,-.0192940077999999987,1.0647355691} +522 {-.368973115199999979,.447420329299999975,.217815412600000008,.215232214000000005,.0112273075000000003,.929261974999999962,.162498847800000012} +523 {-.931219743400000022,1.27053641179999999,.849337649099999981,1.37060244240000006,-1.2406663751,.105047369900000007,-.359456857399999996} +524 {.324928065099999996,1.16601840040000004,.792656398200000001,.0827561267000000017,-.497213205200000008,-.0536931301,1.44992456029999994} +525 {-1.50302264289999998,.804677475199999992,-.00330706870000000005,.327847528499999985,-.168067132400000013,-.367957423800000016,-.408335492000000022} +526 {-2.09500715770000001,-1.02939137269999992,1.76390403920000005,.252635384000000018,.12657090809999999,.251581504700000014,.811750722500000021} +527 {.318469548099999999,-1.44753413870000003,.750235127900000021,-.292622313799999978,1.00921215370000006,1.23746291730000002,-.00579573530000000031} +528 {.0609300435999999973,1.46806842439999996,-1.56628830119999995,.20577346090000001,-.197903466599999994,.504226961999999945,-.776933687899999947} +529 {1.16548134299999995,-.563115857100000006,.59170884349999997,-.0240409946000000006,-1.57463879699999998,1.27256763009999996,.868950504099999965} +530 {.274695489000000015,.808481160299999946,-.540406022899999949,.623898047099999964,1.75742734320000005,-.377266833099999976,-.689942011299999991} +531 {.789933098699999947,-.200816322600000013,.743133349800000009,-.619182114000000006,1.89094710469999994,-.0976437165000000051,-1.72734867300000006} +532 {.479977886799999975,1.30342502059999998,.585305365299999991,-.0564991344999999992,.758059605999999997,-1.64501047569999992,-.396490193599999985} +533 {-.154974293699999988,.539917872500000007,.699166390399999949,-1.64759718580000003,.885967506700000018,.271628295899999983,-.491204984200000028} +534 {1.60062607099999998,-.729515389999999986,-.101409100200000005,.00366897689999999988,1.04490809300000009,-.0802274523999999961,-.262598618299999975} +535 {-1.18727493979999998,-1.6284034300000001,.218386375599999988,-.317090073500000014,-.0613711306999999984,-1.21948566349999998,.95205224860000004} +536 {.766212500500000004,-.0902548697999999983,.0545420699999999981,2.23411843379999997,-.366482441400000025,-.580537352500000048,-.0685927850000000033} +537 {.469130777399999976,-.259347306499999986,-.097027796999999999,-.178511316799999986,-1.08057869939999995,-1.57714877239999995,.638024349599999985} +538 {-.526898773300000012,1.66478608870000011,.700135846599999945,1.07645591100000004,.404980090700000017,-.0197353317000000007,-1.0639348876000001} +539 {-.98875827000000005,1.66338556969999996,.2611049748,1.10498343019999989,.33481315989999999,-.743629292399999975,1.05225479720000004} +540 {-.228041385399999991,.30644384899999999,-.680739293399999945,-.655048734399999999,-.502647308599999953,-.406667300999999981,1.19083078130000009} +541 {.0679440250000000051,.446758401600000021,2.00275545750000017,.963709436300000055,-.244123727799999996,-.679033458000000034,-1.1120517540999999} +542 {2.52745663919999997,-2.14895873850000019,1.2068141308,-.31664892779999998,.2875934671,.17726837849999999,1.85701314929999994} +543 {.116440410300000005,-1.69345633659999995,2.03030217329999996,.305657578200000002,1.52358206210000002,.384876146699999977,.792030596399999953} +544 {-.432094180100000003,1.06407491269999999,1.22297320759999995,-1.75080660419999989,.655535570199999973,.501157532499999947,-.786280805199999988} +545 {2.07846753480000013,-1.22541162449999996,2.78854587189999981,.727475786499999999,-.0440961694999999973,.851412607300000013,-1.15981692609999998} +546 {1.63155202040000002,-.453590593900000005,-3.47295263479999994,-.14177731569999999,-.585159525300000039,1.34982137190000007,1.25521009830000008} +547 {-.247120820999999991,1.02023670690000001,-.233000272700000011,-.856704015299999955,-.106330741399999998,1.08859114430000004,-.787778152500000051} +548 {1.35048437759999995,.92683414129999997,-.348037661800000009,1.47269347689999996,-.112389162299999998,.0312325955000000016,.934826464100000032} +549 {-2.46842686809999989,-.28162165090000002,.0741654436000000056,.238288081000000013,-1.00841677449999989,.00650419640000000042,-1.77841747640000003} +550 {-.904082941700000031,1.92869978089999994,-.465045964799999989,.630417888500000023,-.64726876489999996,1.46314862170000004,1.51795244819999997} +551 {-.214314914200000012,-1.51571767470000007,1.55474871069999998,.031740815399999997,-.302613863799999994,-.552552743400000046,-.0241290780000000017} +552 {-1.12142984239999999,.000582301700000000043,1.82790722620000001,1.26368358539999992,-.419339162099999996,1.64252753749999991,-.426498943799999974} +553 {.500824365900000013,.94176649400000001,-.445783368399999991,-.383643247199999982,-.277683216800000021,-1.19337117290000005,.48431397399999998} +554 {1.54722485160000001,1.52251268249999994,.788741315899999962,-1.07858910329999991,1.35968185240000006,-.663417619600000008,-.203887455599999989} +555 {-.192137879399999989,-.338242960599999976,1.98451391169999991,1.9996201576999999,2.55627765990000011,1.97374904370000004,.26513851129999999} +556 {.3899670623,-.46029514589999998,.843124469799999998,1.53489675879999998,1.03726026189999998,-.895836608300000004,-1.18299559779999997} +557 {.731879134699999989,-1.01463766620000007,-.147178772799999996,-.656051042400000006,.273030961000000016,.282544017699999983,-2.15151405860000011} +558 {-.0109989152000000007,3.15036891020000009,-.0138369990000000009,.916325686700000031,-1.25920255819999993,.725220523700000008,-.201113312999999988} +559 {-.832698714700000009,-1.46544743030000002,-.594249314000000028,-.48006192139999998,1.13834234379999999,.0853239466000000057,.277049839799999975} +560 {-.680369630499999989,1.07319743330000006,.428299013399999984,-1.52606007870000004,-1.32376880069999991,-.37027928269999999,-1.8847548274999999} +561 {.526206095100000049,.72465667850000004,.853688609899999951,-.411898761999999974,.525506768900000032,2.22342149320000004,.657810890200000054} +562 {-.0891871786999999944,-.621205454499999976,.968740555899999967,-.639583562600000044,.148647101799999987,-1.04897821939999991,1.73910212980000001} +563 {.400384107200000006,.733486617800000018,1.5984482416000001,1.06435067629999991,.229788828599999995,.273893918299999983,-.0781159503000000005} +564 {-1.2869252967,.33082642750000002,-.501676219799999989,.319974762600000018,-1.62968144839999995,-.408935294999999976,.997117583099999982} +565 {-.361153830499999995,-.253972970099999973,.428590195999999979,-1.0701275603,.469548123099999992,1.3432391628,1.17049831529999993} +566 {2.12880066750000019,.436729484100000009,.531608427200000011,1.17899804590000001,.823618201099999947,-.17891871270000001,-.106396499500000005} +567 {1.02269347609999994,.0127966776000000004,-1.36150924150000008,1.95040248350000001,.486921411799999981,.104594804999999999,2.79265856720000016} +568 {.109403478900000004,.56612644879999996,.619236721000000045,.490065054200000016,-1.05928800129999989,.163181384000000013,-.909778711600000012} +569 {-1.08247548020000006,.531609917100000051,1.71649658810000005,-1.60350402839999995,-1.48273337399999994,.973798838400000033,.261231216599999982} +570 {-1.18378168000000006,.0855642357999999992,-.533866719699999992,-.157614576000000006,-1.0835268777,-.0556991151000000018,-1.36185851350000009} +571 {3.27379514169999997,-1.23107336559999991,.174496703000000003,-.0127802226999999997,-.242351033999999993,1.41363142779999995,1.73913792109999998} +572 {1.24674703059999992,.805130727099999999,.28494118559999998,-3.2434177973999998,-.426859897200000005,-.53438689880000001,.850695151799999993} +573 {-1.05568675540000001,1.27940721739999996,1.67407952080000011,.959750587599999982,.342028847700000027,.24141272259999999,-.57061429379999995} +574 {1.20730221960000006,.480364631500000028,.598919452900000038,-.559859884700000032,.228230589300000009,1.04843045849999994,-.589037315899999969} +575 {-.544567591199999979,-.740471052199999979,1.35708328299999992,.0288823586999999993,-1.46645542900000003,-.130069948099999999,-1.02975590270000006} +576 {1.1929916384999999,.715147415100000017,-1.03048238820000004,-.635920137499999982,-2.37755045679999988,-1.37297296169999994,.0686025056999999966} +577 {-1.0994046542,-.0658136698999999964,-1.26221197310000011,.447293289500000024,1.57759398830000008,.00891478130000000063,1.41451078619999993} +578 {1.03932809879999999,-1.33912552429999998,-.280893267999999974,.551790693099999952,-.0693957820999999975,-1.63722370270000006,-2.0500581242} +579 {-.276799982,.200759889400000002,.044565707000000003,.877477921400000027,.487839997099999978,-1.87251485259999995,.198235722199999992} +580 {.165479469900000009,.40016435039999998,.605291573599999966,.843556350499999996,.817262933800000035,-1.0197959780000001,1.00129142729999998} +581 {.780035499699999968,1.1359454147000001,.844628929099999981,-.104410521300000003,.039123815499999999,1.06191483669999998,-1.71124578459999999} +582 {.543153402900000026,1.04959433609999997,1.10911401829999989,-.671788055699999997,-.518232502699999986,.702669465399999971,.270197929399999981} +583 {-.858750180699999976,-2.20586284349999984,1.04341466840000008,-.370558753399999996,.722536705699999948,-1.07311932999999993,.4565963455} +584 {.244741049900000007,.763778969999999946,.270417505900000021,.360420430399999991,.4561486602,-.861568598399999996,.942659021999999958} +585 {.184016050399999992,-.827281158699999963,1.3382573646,1.47930153419999999,.0675651020999999963,-.670661087499999975,1.95632186840000011} +586 {-1.56893506340000011,-1.95494216889999994,-.694582554699999966,-1.04558747109999994,-2.14616700520000014,1.35309641709999995,-.447590776199999985} +587 {-.268510336000000016,-1.00464764799999995,-.581771884699999964,-.259961445399999991,-1.04203557150000004,.932042655800000008,-.364773023799999985} +588 {-.0358663791999999995,.174043478000000001,.0568702440000000004,.636430336500000027,.287821743500000005,.823730203999999966,.96456525780000002} +589 {-.963324295299999966,.921692927199999956,.493568747299999999,-.0434325491000000022,-1.86195193439999995,-1.36289022609999999,.646373293599999954} +590 {-1.29105028490000007,1.27792751560000006,.0971036403999999964,-.846176730499999974,.455928810399999995,-.425327243399999999,-.711132107399999991} +591 {1.36295089969999994,-.405734077799999981,-1.00885850210000005,1.69419132419999996,.724588776499999976,.243351838600000009,2.08907489160000015} +592 {2.36900788390000017,.662464707199999947,-.4132025332,2.96820336180000011,-.29151096139999999,-1.2537880910000001,.18878344720000001} +593 {.0653585160000000054,.145544760599999989,.775304139299999973,-.868938478200000031,-.0106644956000000006,-1.59778465059999997,.031160251600000001} +594 {-1.25878841660000007,1.61292855249999989,-1.68220441880000005,.0577831632000000012,.68926398489999996,.971261113299999979,.401298665700000001} +595 {1.4308635621000001,.353462320100000005,.640442753400000009,-.116621837500000006,.908468482399999999,-.863811346700000038,2.63580915610000011} +596 {-.595265187700000031,-.904460948600000036,-.0957145194999999976,-1.3468763587999999,1.0489313508,-1.14981008120000006,-1.8654937708999999} +597 {-1.72017201489999993,-1.42382786850000009,-.147789978300000013,-.237794518700000013,.587045398899999959,1.66528507400000003,-.724828464899999969} +598 {-.386505410400000027,-.0759911251999999954,-.420487559100000019,-1.11897170410000002,1.08007607030000008,-.129498564199999999,-.178953536100000005} +599 {-.900379689899999947,.95192014420000004,-.410596881000000025,-.564837005799999958,.0600942986999999998,-1.00556549620000002,2.12729595859999998} +600 {1.1254795446000001,.0996740153000000018,.6879734448,.758925684499999975,2.07610834990000015,.36508121189999998,-.292008409000000024} +601 {-.96500292870000004,-.742207272499999959,-.0680405540000000031,.754295841399999945,.629649425800000051,.140442558599999989,1.42603374290000007} +602 {1.0885367103000001,1.44727334400000007,.149671758900000007,-.340775002900000012,-.130377810199999999,.531466819000000035,-1.1314564304000001} +603 {-1.08252387779999992,.640916378400000042,1.37456192549999989,-.178995024899999994,.199264661700000012,-.0624717572999999998,.109025204099999995} +604 {1.08898040609999991,.385935610599999979,.427466188500000011,-.0179436704000000002,1.46085711589999989,1.78106206689999991,-1.9932497991} +605 {-.0264602781000000016,-.704170362299999963,.587543864200000043,-.912728697100000042,-.780969019299999956,.0219612848000000013,.999153105700000044} +606 {2.28371080629999978,-.533563017999999945,-.479318828600000024,.709994483100000018,1.26183417100000006,-1.13100989520000006,.921568913600000039} +607 {.764955612199999968,.607173295399999979,-.837248743500000003,.275957390899999988,-1.07589329380000009,-.308189358400000002,-2.52368164080000001} +608 {.765292614900000001,-.887298410999999954,-.456646830999999975,-1.5369035754,-.923011893800000038,1.62013859610000011,.603488052600000047} +609 {-.588039318500000019,.622014144099999999,.356483234900000001,-.60763915150000003,.719034081200000008,.123531734899999995,-.165982131000000005} +610 {.960386882700000055,-.908999565500000051,-.316558235700000024,1.51991788690000007,-.315839367199999999,-.168095907399999994,-.528329665999999976} +611 {-.860030486899999946,.588574768099999979,-.940047799499999948,.170643382399999988,-.0267492969999999983,-.652538770000000046,.478944140700000021} +612 {.798376917799999974,-.2304729217,.400979532799999983,-1.13254003170000006,-.0714752934000000006,.0920213522999999978,-1.89533203449999998} +613 {-1.94812613430000003,1.4912202041,1.22546232389999998,-.399668356300000027,.233616049900000011,-.157811570200000001,-.353791711600000003} +614 {.672869978499999966,-1.38185551590000011,.755436068500000002,.682593190900000035,.0661336280999999943,-2.01954620480000013,.182204949799999988} +615 {-.824053180199999957,.88744013190000004,-.36949941679999998,.486722594399999986,-.503913262400000006,-.130203275400000013,1.04901251139999996} +616 {.41576800260000002,.392060919800000018,.522125686099999986,-.0526374485999999975,2.5858854255999999,.817498423099999982,1.27566809460000008} +617 {-.303445629599999989,-.472370296700000025,.151315685499999991,-.436502895999999974,-1.83610782020000007,.364777743200000004,1.29135232430000002} +618 {.850746334899999956,-1.61931948599999997,-.799067599499999948,-.000782076699999999952,1.66431948760000004,.773984002700000007,.477803499600000015} +619 {1.92711197150000002,.749752962500000009,-.494648212699999978,-.856892929699999972,1.65228174729999999,.0827671596999999953,.0904812621999999955} +620 {.167340921400000009,-.624908240100000012,-1.00723692620000005,.829885250400000052,-.473069990200000012,-.870093748499999986,-.376951832599999981} +621 {.405335769199999973,.766013765199999974,1.11163824380000009,.384404984800000016,-2.18500554090000021,.222387636599999994,.0139127334000000002} +622 {1.36357361249999998,1.42992374050000004,1.5352209509000001,.957847599900000013,.0632673640000000065,-3.2070375218999998,-.337791469100000019} +623 {.289171713900000016,-.801295675899999948,.150973081199999998,-.154733750000000003,.378121520699999991,-.465688405299999997,-.471562528599999975} +624 {.345956138999999996,1.10818545079999997,-.999662823199999995,-.109074860999999995,.571536589799999994,.521656169999999975,1.02616562070000006} +625 {.512145141199999987,.192389807100000004,1.74718223070000001,1.10116340639999999,.669591755200000027,-.0120713923000000008,-.723848118199999946} +626 {.572719055099999985,-2.33774514560000002,-.176454849400000002,.394411211799999994,.066834288399999997,.0856339624999999938,.0427686390999999985} +627 {-.655407768999999973,.361744494799999983,-1.50128950930000005,-1.0014429292,1.08422285330000001,-1.36138244740000003,.719108776200000022} +628 {.778338283299999989,-1.66117405679999997,.315490099099999977,-.193301158299999998,.228389379100000012,-1.15375801509999998,-1.18331284319999996} +629 {-1.79590277639999996,-.146512878900000004,-.3359856544,-.222471416000000005,-.266117713600000028,-.977517925699999957,.553348509300000013} +630 {.358967155300000007,.591173367000000005,.198098224900000008,-.29756420309999998,-2.78477594599999989,.169742082200000005,-.233817220399999998} +631 {-.451309618600000018,-.207292891099999987,-.0988106061000000035,.0313993700000000028,.414081876999999987,.404155665900000016,.763828568400000019} +632 {-.601002072699999945,-.23331129610000001,.30395803389999998,.373710377799999993,.694043656299999978,-1.62889141850000008,.837494809299999976} +633 {-.635018487199999981,1.40224026849999994,.598974435000000027,-.454165584899999974,-.751287034700000023,.13284265910000001,-.720566604599999994} +634 {-1.99546243410000002,.391786285800000023,-.250566594300000001,-.162414680400000011,-.860051388799999983,-1.96475368919999993,-.586109240199999992} +635 {.260065002200000006,-.0848319575999999936,-.370167050900000016,-.532280132000000017,1.87444394730000008,-1.70721048589999991,-.537179409699999999} +636 {1.40627053199999996,-.124105556000000006,.140722923199999989,-1.32765761529999993,2.12552253989999995,.632904506899999997,.7791999275} +637 {-.225080134900000006,1.64611721220000007,1.24035535539999997,.525316005099999983,-.15495860589999999,-.827641710900000049,-.321139640900000023} +638 {-1.14846834390000008,-.449453958499999973,.421560058399999993,2.17799241119999998,.650608854999999986,-1.92916495010000011,-1.506284768} +639 {.370120800299999997,-.155608243899999998,-.491786249299999978,.682437437200000052,-1.43360561109999995,-.1129879258,-.264122126800000023} +640 {-1.11552295309999994,1.16280010210000007,.106917319799999994,-.423889893999999989,.72288162509999998,1.1810003953999999,.297104163099999985} +641 {.879978428899999998,1.04887070100000002,-.312884976599999975,.178089087099999988,-.117783505799999993,1.18703999630000001,1.71061561590000011} +642 {.923315024500000026,1.0710124124,-1.9880782963000001,-1.57713809129999993,-.0412981842999999987,-2.49948492180000015,-.334189993100000027} +643 {-.835778616799999985,.111620943700000003,.504456353599999963,-.319693894099999987,1.40894279459999994,.417931334400000021,-1.26054092839999998} +644 {-2.0890831368999998,.281960675100000002,.203248173100000012,-1.89561946820000005,-.550116787400000029,-1.65839517960000005,-1.62204964330000001} +645 {-.15022796129999999,-.503707734400000051,1.63050970340000001,-2.4523822700000002,.0846980873000000023,-.768294437600000046,1.21384469350000002} +646 {.59296222750000005,.155726870399999995,-.0351839087000000023,-.109850370500000002,-.98747070189999997,-.263676868299999978,.264403771299999979} +647 {.0406493898999999992,-.657566965400000036,1.0586224797999999,.359466481800000015,.672071965600000043,-1.35735700380000002,.578781725300000049} +648 {-1.88562371839999998,-1.86139385689999992,-.957785244700000038,1.86939339579999997,-.728628378799999976,-.0612250577000000018,-.722906098699999999} +649 {1.72250278849999994,-.894040598200000036,.0733430399999999982,1.52564827030000005,-.191281673699999988,1.3408292061,1.85316186440000008} +650 {-.51484544880000005,-1.51252376210000006,-.627405500399999982,.862259798099999974,-2.09699702649999997,-.341618162800000014,-1.92129068240000001} +651 {-.389545409399999987,-.289556075999999996,-1.14763543229999998,-.543351303500000049,-1.23035818349999992,1.0106936713000001,-.31453111789999999} +652 {.531466345799999962,1.34223784329999996,.113692431699999999,-.0361231681999999998,.420062421700000022,-1.15977003810000001,.218937924400000011} +653 {-2.97094136689999999,1.05933317859999998,-1.01122060700000005,-.632809873499999953,-1.10480270530000002,-1.61624329879999995,1.15940810250000004} +654 {.740851386999999972,-.273901147299999981,.443118855199999995,.252763751400000014,.169064275200000003,.664734029699999951,-1.30734963770000001} +655 {-.940571335000000008,-.828744130499999954,.0367474832000000001,.991771447299999998,-1.18685628509999996,-1.02574373949999997,-.245755652399999996} +656 {2.12553011970000005,1.07797227059999989,.620336150800000041,-1.43742926780000002,-1.08299000379999999,-1.63537509170000006,1.71753441530000006} +657 {-1.20606228729999998,1.97004564900000001,-.672093344700000039,.906515780100000002,-1.00628334759999993,.626077295800000044,-.198578533000000002} +658 {-.754075124899999949,-1.62968398739999998,-1.17913289719999992,-1.32232664820000001,-.917095253099999952,-.154092914400000003,-.667446048199999975} +659 {.495072933699999995,.5771171155,.471021955200000009,.351620127000000005,.231273745400000008,.184572633000000014,-.76536326610000005} +660 {.504532488599999995,.869773132700000007,-.619776088199999964,.116417724200000003,-.225340130299999997,.153477637299999997,-.333951775399999984} +661 {-.703435089800000024,-.308090273099999978,-2.26215736840000003,.599979057500000024,.6562050049,.207084174399999987,-.985097455999999982} +662 {.0253526168999999996,-.443285199299999988,-.2386157156,.28568939539999999,-2.33486866719999986,-.154375883299999989,1.19486314500000002} +663 {-.315738870700000007,-1.25484951980000004,.701283648500000023,.145800241100000005,-1.75982342219999999,-1.45823787300000007,-.2429231637} +664 {-.159631982100000014,-1.2682662735000001,1.6886983955999999,.115271834300000001,.690507850599999973,.238889430200000003,-.644764382800000035} +665 {1.5392670905000001,1.62175277020000008,-.170289650099999995,1.85542778000000008,.755789681799999946,.175977650900000004,-1.15855714409999999} +666 {.258885963599999991,-.492453268900000019,.132938338600000011,-.592867218700000032,-.46756781460000002,.403065408000000014,1.44769448040000004} +667 {-.467522700800000024,.823579288699999967,-.125282679000000008,-.295856828299999985,-1.08967083209999993,.73500511589999995,.462917317399999972} +668 {.626526476200000015,.762503236599999967,.057677222799999997,1.19382593989999997,-.422335311099999999,-.699328547100000031,1.53977904709999991} +669 {-.0978352041999999977,.0350472152000000023,-1.21014511550000003,1.0572633413000001,-.186239290000000002,-1.90856510710000005,.777987904800000019} +670 {.780396207100000017,.750151022399999978,-1.69291332739999989,1.00243835530000003,-.158769619100000009,-1.57043679020000004,-.447549848299999997} +671 {.291050105700000006,1.83945621979999996,-.748006298600000008,-.515648496399999989,-1.00937785359999999,.378431334600000002,.210029251599999994} +672 {-.0720269627000000023,-1.50289249010000003,-.151024168799999997,.619891944499999958,1.39313815870000002,.0807467377999999958,.0991606671000000023} +673 {-.15744671930000001,.656765413899999984,-2.43428275240000014,.1512948821,-1.22386843719999994,-1.11394897310000007,-.30324793080000001} +674 {1.29802164620000005,-.547857624499999973,.641785500300000034,.520070055200000048,1.39848300390000002,1.99428746320000005,1.05356156680000002} +675 {.529138699099999998,.131597563400000006,.221405009699999988,1.59471146730000002,-.67731852469999998,-.894536974600000034,-1.46191949500000007} +676 {.339456582399999984,-.534519208400000001,.450727327200000027,1.35389609909999997,.587908329300000032,1.49374215859999993,2.4202779284} +677 {-.495950831699999983,-.519843814200000032,-.348459075800000018,1.71906717789999997,.396449421299999993,-.441986816000000005,.897484891199999968} +678 {.164387054300000002,-1.11573325670000001,.000414041499999999983,1.02131227690000004,1.5917740373,.102869217999999998,-.261393895499999973} +679 {-.607608092200000005,-.468451608399999997,-.381534684499999999,-1.11198895989999991,-.799286622000000002,1.00522757009999997,-.53153848469999998} +680 {.782444524400000008,.788453663500000013,-.70845571669999996,.843892738700000034,-.2486315516,-.699699808800000045,-1.3098567418} +681 {-.929493551700000031,-1.4538681766999999,-.893223670200000042,-1.39599969020000003,-.571305914900000045,.513652293599999976,.502445757499999979} +682 {-.539730419600000011,.0449598072000000001,-.615006140400000012,-.480619359199999985,-1.01233511060000003,-2.40899420220000016,.779094989099999968} +683 {-1.00317756960000004,-1.27085712989999999,.204172661799999988,-.334789971199999981,2.04006413430000011,.625213112600000009,.684485952399999986} +684 {1.80439768200000006,-.0863289626000000032,.00856960290000000069,-.644303046500000032,.155398615200000006,.205350192600000009,.249815137200000009} +685 {.52545958540000004,.972217807299999959,-.436961736699999992,-.362099730800000019,.129397631499999999,-.107170138999999998,-.190903609899999993} +686 {.706657845099999959,-2.23207426450000002,-1.71577114189999991,.835988961500000016,1.22202905380000004,-.177194131000000005,-.629147668400000026} +687 {.617045060799999989,1.5269556205999999,-1.25651544729999998,-.277434369299999983,-.601093243300000024,-.0158092498000000012,.522368842200000016} +688 {.440387256700000007,-.276634425999999989,-1.82309378529999999,.044680910499999997,.345316169399999984,-1.33846349569999989,-.520702685499999984} +689 {-.648278872200000023,-1.30085748059999995,-.645392928799999988,-.238826198900000009,.414938419999999974,.925630216499999992,-.524220089700000003} +690 {-.391650442599999982,1.31344801599999994,-1.41665866410000008,-1.20225835430000005,.743126993299999983,.846590427700000037,-.48031278220000001} +691 {-.0343741420999999978,.253388048899999996,-.120234416400000002,.268169195500000013,-.693838121900000049,-.893420659400000039,-.338774652899999973} +692 {-1.35922372879999998,1.25259800639999996,.777016010700000037,.0618299986999999995,-.170679841800000009,.863121235900000006,1.06104217670000001} +693 {1.05969608920000002,.079519583899999996,-1.54409218939999993,-.752630578300000019,.660204821899999961,-2.06413604069999979,-2.34554353590000009} +694 {-.843097766199999987,.155672226599999991,-1.19773364199999999,.757652661499999991,-.619051200599999962,-1.31753252210000005,.296888115499999994} +695 {-1.84513380289999995,-.715457917900000018,-1.09083322239999991,.296600469099999986,1.55717242430000002,.406685993999999995,-.0493504732000000015} +696 {-.334897186199999974,.130407239600000002,-.71873057380000005,-.0338620124999999966,-.254083563800000023,.201922810799999991,-1.6468028281} +697 {-1.42164920129999994,.617676797999999971,.450115404800000007,-.893810553100000016,-2.3691286535999998,2.28663604099999995,-1.77028470160000007} +698 {-.0790137867999999993,2.15784416290000003,-1.18997703359999996,-.586289768600000039,-.391299652200000014,.457503632699999996,.615693446800000044} +699 {-.637996750000000001,-1.47475141700000001,.0785606123999999933,-1.15581486169999992,-.118325369900000005,1.0447669074999999,.459048089400000026} +700 {-.292063536199999974,.405899897000000009,-1.10078257340000008,.902118807200000039,.287519177999999986,-1.2350350296999999,.783699827900000034} +701 {-.967970810199999976,-1.48202796869999998,-.692712019300000037,-.4270938876,-.439539699800000017,.710999347000000004,.665376469599999965} +702 {.176895492300000012,-.637497428900000052,-.568976427399999984,2.76856881430000001,-.15366806790000001,-.305346370300000003,-.669507300899999991} +703 {.117608880799999996,.171617908000000013,-.822631466599999972,.202867623599999991,-.0349321322000000004,.591498740300000048,-2.07342474600000015} +704 {.652902068199999963,-.71977773860000005,.768036000900000015,-.0263163663999999992,-1.20138065680000006,-.581727854699999991,-.196153893200000012} +705 {-.183623186500000007,-.610265521400000011,-.346857367199999989,.32193329380000002,-1.24229352860000009,-1.58665649050000002,-.700230172100000048} +706 {1.43021639399999989,-.146449714499999994,-.994693030700000036,-.301882971599999994,-.904100387800000016,.106658895000000004,-.154054989199999992} +707 {.166660055800000012,-1.25229321930000004,-1.95272051669999991,-.929161667000000024,-.0952737770000000039,1.44379694059999997,-.68103856829999998} +708 {-.197953979599999996,-1.29652065579999998,1.1629765192999999,-1.26663640170000003,-.752160697099999997,-.82468317999999996,-.23479734599999999} +709 {.470134937899999994,-.886142703600000026,.493376903600000027,-.383838921399999977,1.2209603081,1.08240012269999997,.142252294000000001} +710 {1.61558177740000009,-.524949140200000053,1.33652766850000004,-1.21370688630000001,.478510199899999988,2.08942454800000021,.838156616200000038} +711 {.240728768199999998,.0367975578999999997,.906508048600000005,.352894051300000011,-.0460622118000000025,.318023089300000006,.674370366999999971} +712 {-.437341256199999973,-.986129654299999991,-.442981377700000012,.0555218360999999994,.0453640235000000031,1.28443675419999992,-.0920688313000000008} +713 {1.16519780119999994,.00983054239999999954,.991619991800000045,-1.26101262340000009,-1.05840967230000005,-1.00335247339999989,1.5879746052999999} +714 {1.50577413240000002,.81687965490000003,.116894454300000006,.561106439799999968,-.536836384200000016,.681872351500000029,-1.53817711009999991} +715 {-.272526519999999994,2.48567120219999982,-.526312397600000037,-.158829175200000006,-.305292592099999982,.361363407299999995,.22034244380000001} +716 {1.64543707230000003,.213088504699999987,-1.35691883670000002,-.537384609800000024,-.7784015138,-.0803221514999999942,-.553213851300000004} +717 {-.844653008400000016,.547106553600000045,-.117256107700000001,-.689305504599999952,-1.44263358129999997,-1.38139956070000003,-1.02367664549999993} +718 {.79175332350000005,1.0853513750999999,.524835463900000021,-1.04199612349999993,.90327531130000005,1.2197949909000001,.0719003941999999951} +719 {-.644987381500000012,1.08030181700000005,-.411959536500000001,-1.03864606840000007,-1.49437033930000007,-1.30243811009999999,1.72665298170000003} +720 {.749952946900000028,1.47213209569999992,-1.25824931329999989,.0609655117000000024,-1.09042379979999993,-.343411650800000023,-1.21042151229999995} +721 {1.14707147060000003,-.0552826635999999969,-.116304930200000003,.105464117900000001,-.277288651100000005,-.341350961399999986,1.05885504530000008} +722 {-.628727550399999946,1.37437059899999992,-1.57670217220000008,1.2717035069,.872041316200000027,2.16208675179999998,.17645319000000001} +723 {.865774373199999947,.0948645194999999941,.613317076399999994,.394354755599999995,-1.34830510100000001,-.626082819600000007,1.31870266949999992} +724 {1.13033642230000009,.125722777499999994,-.851983572299999992,-.687923195999999959,-.954499894200000032,-.777121975499999951,.544051807799999954} +725 {-.372152022500000013,.911456960600000032,1.12365529629999994,-.269832946899999981,-1.40968378890000001,1.15781843889999991,-3.36156458180000017} +726 {-2.01744673949999997,.195782205200000003,2.5030762488999998,-.230545433900000013,1.24348763090000003,-2.51378700559999979,.435751271499999981} +727 {-.251756710100000003,.107447366899999994,-.188038511399999997,.3185392326,.688286909799999957,1.15923049190000005,.952172462199999958} +728 {-.0649463987000000048,.294671427900000005,-2.0400714836999998,-.307887975899999999,-.0702897977999999979,-1.70862243539999992,-.230278117199999993} +729 {.395478278100000025,-.742640479099999973,.334317364100000014,1.0808236980999999,-.731728511300000051,1.36069426949999994,.775953689199999963} +730 {-.597283928500000005,.434594504200000009,-1.18780619069999993,.00228859449999999987,-.588501792700000026,-1.3591381562,.0142331197999999992} +731 {.169381970599999987,.425040300900000001,.282360291400000019,-.610114440399999958,-.156026540800000002,-.22941517119999999,-1.41822732370000004} +732 {1.43030070170000001,.685925299899999952,-.866579133799999957,-.416529610800000005,-.530964036099999981,-.136256318200000004,.9031659098} +733 {-.293030121300000002,-.138232793999999992,2.16497596329999986,-.20780223049999999,-1.14956314059999998,-.874618410299999982,-.0848437378999999936} +734 {1.57085355270000004,.153650559900000011,-2.0023581624000002,-1.7973701930999999,-.30942912509999998,1.80876047799999995,1.2779599720999999} +735 {-.243278639500000005,.0322633365999999977,-.670278411900000037,.607494721599999954,.090871573799999994,-.413337534299999976,-.876659614299999945} +736 {.127800212800000007,.243574223700000009,.815873151200000035,1.61195753630000005,-.235804172300000003,-.340571908199999995,.612167952399999993} +737 {-.626228142400000021,.855201521999999992,-1.25424216860000004,.00902583669999999956,-.187865283500000008,.1066530854,-.513369725999999971} +738 {.57481194889999998,2.41362432940000016,-.157433422500000003,.720045222600000034,1.0815368003000001,.175006027400000014,-.352505478500000025} +739 {-.00406736939999999964,.717302529699999969,-.0786046903000000019,-.176454268999999997,-.105804360900000005,.165592290100000011,-1.02179473260000009} +740 {-1.77032798969999994,-.426950700800000027,1.4262856223,1.3741787607,.759571162600000016,-.507970007399999979,-.930305279100000049} +741 {1.14401258600000011,.2210210705,.0471882429000000034,-1.75818935609999993,.202010746699999993,.632731863800000016,-.653398400699999993} +742 {1.13105165169999999,-1.3172467536000001,.0212314535999999984,-.816626827200000016,.586814632199999964,-1.77095294319999996,-.129186456700000007} +743 {-.466468498200000026,-.221764567500000009,-.0103501797999999996,1.41342433509999998,-.846043499999999948,.511571032600000031,-1.83973805820000003} +744 {1.37466793130000009,-.777561708000000018,-.636492401299999955,.735474263800000028,-.0347562698000000031,-.788196679799999966,-.19723025990000001} +745 {-1.70772484689999993,-2.77691339840000007,.230018895499999987,-.215199840100000012,-.803549371400000045,.983153835999999948,-.862576196999999989} +746 {.975338406099999999,-.746741133699999948,1.07126686599999998,.527410052499999948,.77452124219999996,.711866604999999986,-1.14064876009999994} +747 {-.759440544199999956,-.786324859199999948,-.387653101700000002,1.14376074010000006,-.0168510301000000008,-.459710720999999989,-1.43016674239999997} +748 {-1.04597991179999994,-.293540366300000022,1.00326039420000002,-.0877632420000000052,.799655938200000027,.634989847699999999,.0114524598999999998} +749 {-.869698091800000017,.677896024699999988,.157359623000000004,.286076761899999976,.591862963299999989,-2.25052938329999996,-.96215694279999997} +750 {.520804417599999958,-.78313564449999995,-.278345460499999975,-.0910759651000000031,-.954130305199999973,-.853844828900000019,2.23748494969999978} +751 {1.1119830866,-.592171762300000015,.462083794599999986,.194354674300000002,1.86848806739999995,2.16911623189999991,-.433386144500000015} +752 {-.157417280500000006,-1.31019059289999995,.259072440500000001,1.74322622910000002,-.57140063029999999,-.841499221700000022,1.56503370889999993} +753 {-.545996303799999971,-1.19278835830000007,-.978142497899999963,.339942924500000021,.114664808899999998,1.6483889974999999,-.348797519800000011} +754 {.447812437999999979,1.09037581310000009,-.828623152300000054,-1.49818080849999991,1.3540324131999999,.179058825899999996,-.320345181699999981} +755 {.766713652099999976,.0813831500999999957,-.0241825446000000013,.0189881536999999996,1.1433037238999999,-1.50289123529999991,.280225931799999994} +756 {1.2850464143,.817618237200000042,.686125770400000001,1.3131234972000001,-.1016471667,.121562130899999996,-.0380899119000000022} +757 {2.40647155590000006,-.52692028599999996,-.0776813878000000013,.840727139199999973,-.2866110402,.745969981200000021,-.0760825591000000023} +758 {-1.60313285680000006,-1.58653895489999996,-.608620735699999971,-.285873290000000002,-.164236446400000013,.702950668399999978,-.478329691999999973} +759 {1.33627191479999996,1.02645312070000005,-.292364262400000019,.633436128299999957,-1.05798893310000008,.802267325700000034,.198314404700000002} +760 {1.813046728,.880423478500000023,-.135907124699999993,-.313950656200000011,-1.3511119238,.238650598699999994,-.433569331800000013} +761 {-.408133290599999976,.737316996200000019,-.416584581999999981,-.980270899900000048,.632035064700000038,-.6877796786,2.54919413279999985} +762 {.810942715599999975,-1.5206647928999999,-.551501496700000038,-1.71475797610000003,-.670528382600000028,-.279170242800000024,-.507303266099999983} +763 {.421483463300000027,.0242852196000000013,-.597723978200000006,-.721866345799999976,.314479626800000023,1.10005227410000006,1.36082004619999997} +764 {.927642306299999997,-1.07325762349999998,-2.60210637060000005,-.247712990100000002,.676763201900000055,-1.03830412810000006,-1.53638012900000009} +765 {-.0975782147999999933,-1.63155460359999993,-.15779279739999999,.443591095699999993,2.75564751229999993,-.113449224000000001,-.73214631320000001} +766 {.641499118200000051,-.14892491020000001,-.194815974499999989,-.39249966460000002,-.543655150999999948,.957635763900000048,.437834940299999975} +767 {-1.2062071191999999,-.732116787100000055,-.140622858299999987,.537030600699999972,1.520615673,-1.67986639759999989,-1.45059229220000008} +768 {-1.54084352069999997,.120286463800000007,.447896546399999984,2.8725730824000002,.1588189079,-.196080148700000012,.515910445699999998} +769 {1.03786558349999991,.151064504699999991,-1.20668428420000007,1.54619467920000009,-2.76626334170000021,.65305175120000003,-1.84617899969999999} +770 {-.485495797200000023,.94601148469999996,1.10028497619999999,-.160549350000000007,.887836022699999949,1.38667518019999991,.479138008599999998} +771 {-.852776758400000001,1.05770361450000006,-.396006022499999999,.558076740399999993,.884246752699999972,-.776163315200000037,1.77423352750000007} +772 {-.393565662699999985,-.00678952159999999996,-.553942317500000003,.853074566400000012,-.820395977799999954,-.284010961399999984,.423488265799999986} +773 {-.816529542299999966,.398535072800000001,.93658992640000005,-.78630688559999995,-.0123080003,-1.07212893609999993,-2.03942310540000005} +774 {.522955028599999983,-.0777990587000000067,-1.013520011,.759756215300000037,.60929695880000001,1.01338139960000007,-1.94191800850000007} +775 {.533451907999999975,.109169397900000006,-.630640789400000012,-.776081531300000016,-1.12335739459999995,1.22364232839999998,-2.1406462663000001} +776 {.464946199700000007,-.313755837199999998,-.628242301400000036,1.21688018209999993,-.684809683999999974,-.357856835200000001,.385952122299999978} +777 {-.00845333870000000015,.104028840400000003,1.92669256729999994,-2.15962463689999984,-.373761198299999986,.555734385599999992,1.03921396089999996} +778 {1.7031043104000001,1.24736843260000008,-.734998405899999985,.0218648174000000001,-.482150972899999974,.307173706099999999,.226664625300000006} +779 {-1.33671055909999992,-1.74992060699999996,-1.1984339423999999,-1.11711418779999994,-.794494374999999975,1.1636263495000001,-.610233785699999975} +780 {-.00974886030000000035,-.774407320999999982,1.80639433449999998,-1.09102211249999992,-.523721750700000044,.302230256699999977,1.3703635192000001} +781 {-.82904162800000003,-.983633742300000002,-.792115559700000027,.817840523700000044,-.279079402500000018,.655646772300000014,.604619125100000043} +782 {1.93500749429999996,-.465747137099999986,-1.22619953759999989,.0293696170000000006,-1.3156782924999999,-.103754295799999993,-.378001650700000025} +783 {1.92159585119999998,-.962514063800000041,-1.53524737910000009,-.065402573399999997,-.982096010699999966,-.3386194025,.707750819000000031} +784 {-.328886257399999993,1.22500201649999996,-.209765511999999987,.195558357699999991,.120731624800000006,-.0928237585000000059,2.56841536820000016} +785 {.464013937099999996,-1.12194328519999997,.837524760199999996,2.07706257800000005,1.31708408919999997,-.763399888999999998,-.574739429299999993} +786 {-.279745976100000016,1.27328446340000001,1.07000854990000005,-1.06213859209999995,-.7472832264,-.331605386200000019,-1.31129768669999991} +787 {1.21922683539999999,.0283077512999999996,-.536181589699999961,-.525727871900000032,2.66689402989999991,.178673455700000011,-.849318565000000025} +788 {-.661140779999999983,-1.27250714909999996,.936394388099999975,-.0535895537000000005,-.811355942599999991,.553896950599999993,-.146812490200000006} +789 {1.24693527330000009,.0844971478999999998,.813722984800000049,1.10667447549999998,.759991936000000035,-.00336179489999999985,-.088544828000000006} +790 {-1.01493167299999998,.772354299800000033,-.550960797000000002,-.103114045599999996,-.50919511340000001,1.73251274370000008,-1.47172855059999996} +791 {-.699644850899999948,-1.45229865799999991,-.829192182699999947,-.190540382700000011,1.28166335800000009,.0389484917999999986,-.603240461199999967} +792 {-1.75429104800000002,1.12432785090000009,-.285210468800000005,-.250884786299999996,.199015164200000011,-1.11151888449999992,1.11209766059999993} +793 {.415549779299999977,-.892495431099999958,1.26848892999999996,-.212824095600000013,1.52668479360000009,-1.6188243175999999,.113648948499999999} +794 {-1.23370107070000001,1.41482758319999991,-1.16960086889999992,1.43170205539999995,-.503238405499999986,.667189193799999969,.336916183400000013} +795 {-.00712250780000000042,-.846239392400000012,-1.18478099420000005,.902076098600000043,1.2664497160999999,-.534822670900000019,-.344574990199999986} +796 {.693641254999999957,-.726281689799999963,.865207830899999975,.424102081799999975,1.12149198379999993,1.19593654120000004,-.143687750199999992} +797 {-.417761711899999999,.0623760511999999975,.804361569200000015,-1.23451658459999991,1.65138940890000008,.918525731500000053,1.69524252099999995} +798 {-.0248001587000000016,-.255621421600000009,-.275752372099999976,-.208959280999999997,-.444243923500000026,.462000652800000022,.280519947400000025} +799 {-.416602873599999979,.434569199100000025,-1.80329655229999997,.640968167000000033,.859713816399999997,.176936762199999986,.777057859000000017} +800 {.742512962499999984,-.122435903700000001,-.788789586000000043,1.00604887250000008,.746195423099999977,-.734196494199999994,-1.87003239269999999} +801 {.196992498200000005,.834352687500000023,-1.97739101649999993,1.0182341614999999,1.84850203069999997,-.227551804100000005,-1.61522128210000004} +802 {.116366057800000006,.0482620272000000017,.465218274200000004,1.20456618959999995,-.709263227999999968,-1.56473907400000001,.600072750699999991} +803 {-.824265962899999982,.970124120200000051,-.187883215500000006,-.720744132300000007,1.53755528240000006,1.32544451200000002,-.276944548700000015} +804 {-.350885763600000011,-.00241594990000000001,.520664367800000027,.216484390199999988,.171657558999999987,.0931023426999999931,-.323454652000000009} +805 {-1.9174930577,-1.54642952519999999,-.409454247400000027,-.0407252349999999985,-1.3668131803000001,-.455817854299999992,.163798402399999987} +806 {1.23202776739999997,.495986827600000013,.956076798600000055,.543419302100000001,1.0207808275000001,.364266643700000003,1.42388461020000001} +807 {.663414437100000032,.0537953315000000015,-.549233236299999983,1.38892408499999997,-.605406710000000015,2.35722695300000007,-.659600904399999965} +808 {1.09123609340000005,.0788577307999999977,1.14155872329999997,1.23678475709999991,1.29942991489999993,-.320857953799999984,-.349390712300000017} +809 {1.10559687919999994,.554654998399999988,-1.86871072269999994,-.462254342400000018,-.0143184173000000005,.0920906164000000055,.21698491889999999} +810 {-.650912035599999994,.577920605900000051,-.353056830300000013,-.887698689299999955,-.576495118700000031,-.239688513800000003,-2.85143841319999991} +811 {-1.49698947059999998,.971800800100000028,.374194449699999987,.374203797599999999,-.249144860899999987,-.895621098500000046,-.911722884300000036} +812 {-.77728080580000003,.979012240099999986,1.0119198505,.0476611359999999998,-1.027326199,.234999614899999987,.340701141400000018} +813 {.212601687700000014,2.60525391969999998,1.13055877009999994,-.748550040300000052,-3.14472924380000007,-1.0385019498000001,.098598353400000005} +814 {1.45804000300000003,.269373441699999994,.382928406300000024,-.344723177400000025,-.0892920662000000059,-1.31944239599999991,-1.05954732500000004} +815 {2.42531782680000019,1.08008818759999992,.581433631200000023,.713077885599999961,-.310648870799999977,-.0313175776000000003,-1.13922656280000001} +816 {.86895930109999997,-.506975697400000036,-1.05186829850000008,.905222807400000051,.604968451600000012,-.829544977199999978,-.422188655000000024} +817 {-.567504506300000044,-.678683311200000028,.0902824494000000061,.219378130700000007,-.400297369000000014,1.07655570510000009,.881152319499999948} +818 {-3.15922774039999998,.495094228600000019,-.449887149099999994,.296502931999999997,-.922374535899999959,-1.52606922919999999,.226363891799999994} +819 {-1.2497212016999999,-.401860325299999988,-.318741338500000027,.630403180600000002,-.781254760000000048,-.869564090999999983,-1.01026166439999998} +820 {1.16994872989999998,-.665713713299999954,-1.70985922679999991,-.018142001599999999,-1.71170480269999992,.327764746399999973,-.654245824999999948} +821 {-.803884862300000003,.497945141799999991,-.322332678799999994,-1.36614671930000009,.964509376400000007,.0567144322999999992,.593050977999999951} +822 {.696922836100000054,.123355263699999995,.804476802800000002,.968304059099999948,-.256937423499999984,-.14725845139999999,-.640186333699999999} +823 {-.180120707600000013,.857766396000000042,.753629691799999946,2.21556882309999992,-.541953768299999972,-1.32122641219999992,1.71771271040000006} +824 {.1161871872,2.33974629880000018,.329809313699999995,-.464299458900000017,-.390918182499999989,1.08217776769999996,1.33459647640000001} +825 {.370945693999999992,-.508932924000000009,-.649278520800000036,-.188750524199999992,.850837033499999951,-1.23944884470000005,1.09191343749999992} +826 {-1.0345832987000001,-.408475590300000024,-1.35965543760000007,.950039152400000031,-.812911175500000027,-.517717399399999945,.270891158500000007} +827 {-1.5832468999,-.703461557499999945,.814187331099999967,-1.38121443310000003,-.834020580500000053,.0828035403000000059,-1.68759778410000005} +828 {1.4100272479,.373073853100000019,.127527401600000007,-.0228658495000000003,1.71591761910000007,.174704294399999988,-.79899607100000003} +829 {-1.49296198679999992,-.742878865400000032,.835000639800000055,-1.16369705379999999,-.354525437700000001,.453002733699999993,-.575072794300000023} +830 {.0819329765000000043,1.44183900049999991,.786821553699999954,-1.11634371300000002,-.719475029399999966,-.260989132800000023,-1.2463127196999999} +831 {1.00256079479999993,.738331332799999962,.4254025736,.868313804599999961,.377330276899999983,.293653755800000027,.303058363200000014} +832 {.613136712400000006,-.345564057500000021,-1.21103715420000002,-.606458635199999985,1.6440605587999999,.0907181482000000011,-.911361344599999956} +833 {.0456779795000000002,-.145554973000000004,.509120244300000024,-1.40783177270000004,-.234402707099999996,-.633871928799999984,.527068769400000026} +834 {.544183134299999982,.2216937722,-.405021598599999977,.349036678300000014,.571212123999999988,1.25519076820000008,.0678415722000000054} +835 {1.44197132910000003,-.364456834399999985,.714652020700000001,.479015539300000015,-.83318300040000004,-.849848155500000035,1.30939211960000002} +836 {2.05965509450000006,.140233841599999992,2.1202202765,-1.2295339836000001,1.20015966150000009,1.48531894459999991,-.0490851866999999983} +837 {.417524390999999995,-.239061214900000013,-.517206303699999981,-.28063059600000001,-1.36432504910000008,.0699169035000000022,2.03287078060000015} +838 {-1.53156875720000007,1.04302032309999992,1.51770650619999992,-1.25385546240000001,.212019529600000006,-.933353689199999947,.600915728299999952} +839 {1.27362911190000005,-.154965404599999995,.971695156999999976,-.902199202299999947,.14032966429999999,-.853801379199999988,-1.32643234269999999} +840 {-.614436515800000027,-.628960440299999979,-.109543077200000005,.673055382699999982,-1.12926155240000003,-.500870029400000027,-1.83948171689999995} +841 {-1.18782380319999992,-1.92668133470000003,-.39070483210000001,.495081109799999974,-.352516528399999973,-.990996469699999993,.357321058000000025} +842 {-.156029221200000012,.0811688093000000055,.452959292399999991,-.0272458045999999995,-1.2287819284999999,-1.03629753819999992,1.06999194819999999} +843 {-.246010211099999987,-.974796445800000022,-2.25241478239999982,-1.30516616749999992,.0999701090999999958,-1.57462863899999994,1.89018948389999997} +844 {.686132738600000014,-.241282423100000004,1.81995780190000001,.962501467900000018,-.488662930100000004,.270337100399999974,-1.28934921480000009} +845 {.539021260299999971,1.31227005209999992,.258020021699999991,.217618934799999997,.336131131500000013,.888637184699999971,-1.30621905640000002} +846 {.983514273199999955,-1.20314630830000002,-.103824997299999999,-1.43887623229999995,.262748707299999995,1.6580919288,.559457661499999981} +847 {.237996394299999997,-.498652206599999981,.488595206100000012,1.77374479899999993,-.754128969900000001,-.20153504920000001,-.65217272989999997} +848 {.222898642400000013,.131444944300000005,1.19659745020000008,-.509350153600000022,-.38423114949999998,-.259981795399999993,-.129121835200000007} +849 {.319241335000000015,.443866612300000019,-.0960008735000000002,-.664846030999999948,.156399111600000001,-.423686298500000003,-1.5392262874} +850 {.797886584599999948,.939397890499999999,-1.8806204340999999,-.367734524200000024,.729196316200000028,-1.65553955379999995,.916946193499999951} +851 {-.272447295599999972,1.07725776839999998,1.35660511989999999,-.129598489499999997,-.656414717900000055,2.16966965529999989,-.778903181999999972} +852 {-.0861829308000000016,1.46296310550000008,.118807847999999994,1.0122777866999999,-.311806586900000016,-.610494856999999946,-.346099440199999997} +853 {-.717088196800000022,.518252843600000013,-1.19132235929999997,1.31179666759999991,1.94402183649999993,-1.58908201800000004,.451010106399999999} +854 {1.26872632580000011,1.78140402379999996,-.881133853100000031,2.19224659529999988,.0543482456999999988,.0150261222000000003,1.56627437140000003} +855 {.268237496999999991,.672894501599999972,.993068392700000024,.0152073911999999999,1.88503077050000001,-1.75087929750000004,-.527534384099999976} +856 {.407524995800000012,-.924353840999999954,1.13446701649999993,.774022686000000015,-.0387094688000000006,.951696920199999963,-.306159796500000025} +857 {-.144929641400000003,1.01150321119999997,-.146811581200000013,.124751272199999999,.299573253500000025,.597840472300000036,-.57545864950000003} +858 {.507074704999999959,-.665657733700000032,-.50961062629999998,.552874595699999971,.853466119300000048,1.40040754340000007,-.978361521199999973} +859 {.437440220699999993,-.136184841400000006,1.10093755860000009,-1.06161246909999996,-.747647475900000025,2.24561829710000005,2.57827477619999978} +860 {1.39446817299999992,-.565370552400000004,-.752434720500000043,-2.17653503960000005,.0119319561000000001,-.83139016659999998,-.0690917234999999935} +861 {.466348304599999985,.200310880999999996,.546364782199999954,-.527513302699999964,.530828462899999964,.825779747200000025,1.2668800952999999} +862 {.853946387899999992,.574121629099999975,-.305506233700000018,.163350648399999993,-.870025785299999965,-.342492022100000015,.886661568199999972} +863 {-.508977163800000021,.228900923400000011,.268520554299999992,.898200071999999961,.99451714010000003,.273114108099999975,.834373543600000045} +864 {-1.06801335070000003,-1.77991398289999991,.163711081700000011,.666438816899999953,1.15039650050000009,.0156012984999999993,-1.06089161669999998} +865 {-.0313935293000000004,-.232710188099999993,-.582710535699999976,.152661577399999998,2.31572163789999985,.146022618800000004,1.44044358160000008} +866 {.834353668200000009,1.34972739870000003,.84058525279999996,.0591148525000000022,-.852801698399999974,.0523338028999999968,.0210106993999999991} +867 {-1.11349851510000009,-2.88042148330000014,1.23489107380000007,-3.30691467049999988,.395372069800000003,.401626560399999999,-.97091499130000003} +868 {-1.36948990119999991,2.20264542000000008,1.0690921094000001,.54332227980000003,-.063565424199999998,-1.3903792237999999,.722915360599999945} +869 {.734731374999999964,.0495625547999999985,1.51435818470000005,1.17344318669999992,-.281834719800000022,.729981352499999958,-.392180137900000003} +870 {.674683230800000033,-1.29522837759999998,-.304951470000000002,.62434764840000001,.96331222900000002,.0980171325000000065,-.675626214799999958} +871 {.419797901899999992,1.08870011209999995,2.07061992610000001,-1.75816326879999996,-1.23729177599999995,.83609963679999999,-1.40230839080000003} +872 {.408407499399999985,-.132705026000000004,.132414789799999988,1.58960236449999992,-.540585618000000045,.546784281400000016,.259100325700000023} +873 {-.417239839599999995,.628499061400000003,.125812039399999992,-.596720618999999952,-.458527417200000009,-.218816465799999998,-.930564818000000016} +874 {-.0154448108000000001,-.382360959900000019,.155470743900000014,-1.54008951099999991,-.505465134200000055,1.85094601049999996,1.33565284959999997} +875 {.533541365599999984,-.907076263100000002,.89878297330000001,.0115241849999999993,1.80930025259999994,.319894987399999997,.708866632200000013} +876 {.410028331700000026,.468568676199999978,.997795938799999949,.231103171899999987,-.992335993900000046,1.96033668160000007,-.1403151074} +877 {1.08431799400000006,1.48686247499999991,-1.08107067329999995,.61541856490000002,-1.27179969589999997,-1.61627982019999994,-1.1472916044999999} +878 {-1.3592611938000001,-.883321978799999963,-.0117783383999999994,-.678289242800000025,-.0316951848999999994,-.735993032600000041,-1.34663898250000003} +879 {1.15855139039999999,-.295091429000000016,-.0142104147,.787203159799999996,-.638253410999999993,.386589571600000015,2.30827873289999985} +880 {-.517295293799999945,-.520699224999999988,-.00980958000000000004,1.3921342471,-.481157830700000011,.364106690000000011,-.796151228499999974} +881 {-.0512646291000000004,1.66162487129999992,-.0579982262000000026,-1.15690345960000007,-.291288454099999983,-.603226958000000035,-1.4870040256999999} +882 {-.658643127499999981,-.157213826499999987,.510246175100000032,-1.40256251919999997,-.203770555999999992,-2.00539470580000012,-.8289930405} +883 {1.3218739154000001,.429486257199999999,-1.14549734470000009,.278827328400000019,-2.0139313082000001,-.254463466299999996,.52296440590000004} +884 {-.0697202252999999939,-1.43345726009999996,-.422356514500000002,.667200455399999948,.0689637169999999938,-1.88525808830000008,-.619437134799999956} +885 {-1.05612182169999991,.163676020199999994,.232211934600000003,-1.14903301349999998,.704630673000000041,.281027100099999982,1.36164044539999995} +886 {-.775019456400000029,-1.64781333380000006,-.574496998999999953,1.03188059649999997,-.399964233900000021,.961407608300000049,1.1850829236} +887 {-.778703425999999976,.209266157099999989,.0516565926999999966,.826652366000000027,-.135955152800000006,-.561101869100000039,-2.05663398459999991} +888 {.408824090699999976,-1.69083077260000003,.134558460800000002,.182509972900000011,.924888879000000053,1.67176188389999991,.535204404799999978} +889 {-.544784752600000033,.291447271900000027,-.100854002700000001,1.10226555240000001,-2.28990967379999999,.136760134000000005,1.00993568450000004} +890 {1.24680197730000009,.776379058199999972,-1.2990607648000001,1.41682940619999997,-1.21965453859999995,.79632510990000005,-.0553107450000000014} +891 {-.859220696399999984,-.441888876799999997,-.471641872199999979,1.32462376869999998,-.573602939799999989,-.175236422899999994,-1.46131582449999997} +892 {.492358810799999991,.701990948700000006,.142300841600000005,-1.08905382309999998,2.46004558120000016,-.307428591400000006,1.07684732049999998} +893 {-1.42777464279999999,.596656818399999955,-1.68988840210000002,.513324396300000041,.755273067199999981,-1.26216282599999996,.0766307308000000048} +894 {.664408379599999988,.277575550800000015,.4598226849,-1.6693471554999999,-1.46614491740000008,-1.65159977310000006,-.152405659500000012} +895 {1.46076323620000004,-1.04166450890000006,.705607649499999989,.239661961899999998,.646722981899999994,-.630657006399999998,-1.4011534819} +896 {1.39902252420000006,-1.09141014119999991,-.124164647200000006,.159946632499999991,.455344952600000019,-.587688087800000036,1.73216245339999997} +897 {-.632677525600000012,-2.15966232619999987,1.601318464,-.488299246600000014,1.0649086459999999,-1.1214909366000001,.294188781200000027} +898 {-1.66952122520000001,2.18809167820000017,.713276509499999989,-.0236322483999999999,.252899475900000004,-.438933389900000026,1.03364449339999998} +899 {.156863467799999989,.78332715350000004,.479163261700000009,-.493260716499999974,.717265715000000026,.755864141999999961,-.338629052799999997} +900 {.226309971100000007,-.703979314100000031,.64215295969999997,-.730787098900000043,-.251335191999999985,1.00182159429999995,-1.2687240607000001} +901 {.649266545899999992,3.44297733789999993,.582679103499999962,-1.46686131510000006,-.679819311499999968,-.725582708899999984,2.09044737739999986} +902 {.469335168700000027,-.691306000900000051,.202327575000000009,.59716415580000004,-.299375986699999985,-.309694595800000028,-.998125769399999974} +903 {1.95254548150000007,.913792325500000002,.938776156500000014,-1.49895133990000007,-.232218844199999996,-.0554549542999999978,-.668669630400000048} +904 {-.856149562100000039,.364070071800000006,.691422779000000043,-1.57959836800000009,.521734866600000013,-.875420146800000021,1.00169843800000002} +905 {-.0114157081999999993,1.71606306590000002,2.12027307199999981,.853328078899999953,.503227357200000003,.368222905699999992,-.864253300699999949} +906 {-1.16144055229999998,1.23362594580000007,1.4922281879999999,-1.09116266539999995,-1.87461189630000002,.898953364899999996,-.669288538200000005} +907 {.954010746000000021,-.693587869899999987,-1.80035237730000008,2.19860615579999985,-.762509717200000048,.127320923800000013,-.420143229100000015} +908 {.403936934700000028,.408860792899999992,1.96746215010000003,.141399903599999988,-1.40473972260000002,-.450194360999999987,.409994847699999998} +909 {.551868688799999951,-.122385007599999998,-.88301166809999998,.177719895300000014,.98953376329999998,.606983429400000052,1.0924964278} +910 {-.678258223099999946,.855433019799999972,.268268610799999974,.733654048899999967,1.91344917179999996,-.786201377499999965,1.00997305809999993} +911 {-.473587068800000011,.871131274500000052,-1.51364891549999991,-.132179246599999994,.454095454299999979,-.38913450799999999,-1.41018290390000001} +912 {.208530340199999997,-.244129409200000008,.956401703000000047,.683378481500000023,.276809523599999996,.868551515199999957,1.03834861760000008} +913 {1.0845732897,-.163394437799999986,-.0167274616000000001,-2.2417208005,.221633652699999989,.444400443599999972,-.813590115600000052} +914 {-.491389202999999997,-.578836445199999994,.515996464400000021,1.64118677179999994,.560686950300000042,.425490538600000023,-2.59492310359999978} +915 {.324250666099999973,2.19998794159999989,.465490828600000017,-.201739325800000008,-.27095684040000001,-2.07217169870000006,-.776664374799999946} +916 {.681452008000000053,1.25043557030000008,.502767348300000028,1.69745214890000007,-.332594282100000027,1.02954330909999991,.601792464299999996} +917 {1.19052746580000002,.310014118399999983,-.987163400999999996,1.0179620305999999,-.178409266099999991,1.00862701079999995,-.700511357000000001} +918 {.487922764800000019,-.398465294299999995,.797970258400000021,-1.1343414986,1.32301295119999995,-.117421271199999996,.975383072600000012} +919 {-.216904779800000003,-.558518515599999987,-1.87701600010000003,-.769702465599999996,2.11252299650000008,-.488972569700000026,.990025345499999987} +920 {-1.40823443649999991,-.143640037700000001,-.125533253000000011,1.28402308210000005,1.86211586409999996,-.127144834699999987,.190509267300000007} +921 {-.51424550010000003,-1.41556886550000005,.0644480825000000035,.477685757900000008,1.03524253569999991,-.0328911714999999966,.574186968200000036} +922 {-.82109062960000001,1.32581859150000003,-1.04219045089999995,1.09289482160000007,-2.06359269400000001,.00678891619999999994,-1.0922540315} +923 {-.00421129859999999981,1.5212785834,.0683862969999999987,-.170510508199999994,-.423232258799999983,-.559873771799999975,.466809845300000026} +924 {.317029503900000009,1.3251842895999999,1.6088289375,.127872910000000006,1.2731575985000001,.281781396000000017,-1.08841963749999993} +925 {-.75192723039999998,.285968821500000026,-.193055516700000013,-.186004018500000007,-1.27925209390000005,.0975294983999999976,-1.13963688919999995} +926 {-.1129397704,.286250367399999983,-1.14603696330000004,.708442835800000004,-.805357368100000026,-.380573960099999997,-.0411780611999999976} +927 {-.629459438599999976,-.320243496500000002,-1.00374908809999996,.0831886582999999957,-.971414338099999997,-.474722524899999998,-1.05186679310000009} +928 {.370226256400000009,.802184896999999952,.873185871699999949,-1.71261570070000002,1.93912430179999995,-.0892093702999999932,-1.05727367520000004} +929 {-.118096148400000003,-.984965509799999972,.704407256400000015,-1.40835697929999992,.0412647106999999985,.359364773999999998,-.789274890900000026} +930 {.194189285200000006,-1.71488335060000008,.53029461109999998,-1.05871724350000007,.981134466700000041,.218667786800000014,-.24573754980000001} +931 {.947279959499999991,1.29983413780000001,1.97612876569999996,1.88300359799999995,-.570561939600000012,.0654524380999999961,-.219295887200000011} +932 {3.20779995349999991,.852919260499999998,-1.22970778989999996,-.139929576400000005,2.35296963090000011,.33462362750000002,-1.47078784490000003} +933 {-.735308774800000009,-.878335184799999946,.0252954638999999988,-1.09063535909999998,-.105923634500000002,.625823655100000042,-.854202376399999963} +934 {-.228913384299999995,-.660303865100000054,2.07850154399999987,-.735280149199999955,.697819296899999997,.344168947599999997,-.35932196449999998} +935 {-1.85438273340000004,1.55310226330000001,-1.50653090959999991,-1.81917558389999989,-1.0954815979000001,-2.06603227419999991,1.3057200565} +936 {.761973164100000044,.807939222100000021,.338906942900000008,-1.72467342659999989,-.167090681600000013,-.0816265139000000028,-.159900758100000012} +937 {-1.15944479810000001,-.0954063341999999981,.148264465099999992,-.220857827400000012,-.324779547900000021,.0540965097999999978,-.557471274099999969} +938 {-2.29203846299999991,-1.4692212575000001,.216672321700000003,1.08093226840000001,-1.5017375973,-2.4172406351000002,-.701438366500000021} +939 {.731404488000000019,.305995716899999992,-.490590708300000011,.526819284699999946,-1.40625816470000009,1.07102024080000002,.646604705699999949} +940 {-.571375065200000032,.958769273499999963,1.27533413590000011,-.505857298100000019,-1.31849689689999994,-.583662767299999996,.886907276500000008} +941 {-.0842571010000000009,-.393781412499999983,1.54804278020000008,-.377038872000000025,-.0648170829999999976,-.180603271799999993,-.693537031400000004} +942 {.984671750499999998,-1.68421276470000003,-.704637773299999992,.864930916900000013,-.000394322899999999992,1.07380035759999992,.633304610399999968} +943 {-1.23063978249999995,.164548098300000001,1.75239545959999998,1.36742537399999997,.578800079900000042,-.192996138799999994,.690681649600000003} +944 {.0300329010999999986,-.191165204199999994,-.0591941008999999976,1.9198624433,1.2071709986000001,-2.5504110835999998,-.63790911589999999} +945 {.571379135000000038,-.531953084700000023,-.248896654999999994,-1.55641622310000005,1.00406207520000001,-.442947594800000011,.404739035999999996} +946 {.410000184499999976,-.515220265800000021,-2.8805926877000001,-.722029623700000012,.339386021400000015,-.625201576100000045,.330849876200000004} +947 {-.766830005800000047,-.617302572099999947,-.70658551930000002,.473019604500000024,.399174873200000002,.208482139600000005,.173181972699999992} +948 {.088134859600000004,-.180984702800000008,-.729284205799999974,2.21615679999999982,.274779870199999998,-.661767364000000025,.480193183099999976} +949 {-1.77223849059999994,.0633409681000000058,.254401994299999989,1.03233634810000008,-2.17254257550000007,1.6939743705000001,-.321549727200000024} +950 {-.335016985599999984,-.619906357099999972,-.446908791799999983,-.857010101600000018,1.27641794080000004,-1.65276744119999996,-.188864817200000007} +951 {-.192030000000000006,-1.28667086030000011,.249414583400000001,.103866753699999995,.00203329729999999998,1.54334025670000008,-.604689071899999986} +952 {1.20816676319999994,-.752635848399999974,1.37341823670000007,1.87117381559999996,-.188175046699999987,-.998589463399999988,-.845670820399999967} +953 {.199173482900000004,.940835741300000028,-.389940306800000025,.710089759000000043,.860991196399999992,-.477983674600000019,.541895807399999985} +954 {-1.38585590970000005,-.354720303299999984,.362245530400000015,1.87463201820000003,.365366036499999991,-.103997108800000002,.183254022799999999} +955 {-.18543968990000001,1.22100544849999992,1.03193862830000005,.278078365599999988,-.0289538972999999998,.227042661600000001,1.4148220319} +956 {-1.06706912479999994,1.38266796469999997,1.65274966120000011,-2.1425370319999999,-1.27580104849999998,-.0843159512999999972,-.0336816647000000025} +957 {.0192505616999999983,-1.34142458620000005,.129496366499999987,.242201001199999988,.721105481800000003,1.40059498210000011,-.498602920100000013} +958 {-1.0824453181,.483003869099999983,-.109453922100000003,-.386565743600000011,.147482019200000009,-.280941456300000003,-1.01502062520000003} +959 {.407917829299999979,1.15345432050000007,-.17304567030000001,.57634011669999996,.439505472100000016,.162112514000000013,.471013364400000023} +960 {1.12431169349999993,-.76344822499999998,-1.20421435580000002,1.1769587211000001,-.350081667699999988,-1.13632651990000011,.572605440000000021} +961 {-.0252819728000000001,-.692059509100000025,-.195554999199999996,-.874770602599999947,1.77320615310000007,-.881166005800000041,.765514967400000024} +962 {.698353132499999973,-.185609130099999992,-1.4574788622999999,.636775672099999968,-.138870748699999991,.701470665100000002,-.764687970700000053} +963 {-.697625421899999987,-.610620414100000009,.670364276399999959,-.0561726660999999974,.241731986600000004,-.043753642199999998,-.9365405443} +964 {-.445396843599999981,-.430109113499999973,-.362166942800000002,.859075213100000012,.0393881430000000005,1.02932102099999989,-1.21786716930000005} +965 {-.126381949999999993,.501352228599999949,1.33738629969999989,.136574364799999987,.197208158299999992,-.498714144699999973,.960811178000000043} +966 {1.3049909905999999,-2.27133777649999979,-1.16183275419999998,1.04432738809999992,-.254723391700000024,1.6655303077000001,.557652273300000001} +967 {-1.35132786240000002,-.632256453500000037,-.295795597199999982,-.581045833000000012,.324683677299999973,.638709331299999961,-1.77692131530000008} +968 {-.120240193300000006,.42785788050000001,-1.02308397899999992,.407816963899999996,.0601302058000000028,.755946749699999976,.817548529800000034} +969 {.426727321100000001,1.4367122134000001,1.0432341972000001,-.313892105600000026,-.759967350799999997,-.549051955000000036,-.551600891999999954} +970 {-.0741471803999999984,.917098441399999964,-2.6192288437000002,-1.33988667429999997,-.182189359500000009,.568216980300000007,-.0531100840000000018} +971 {-.377093883799999974,-.520484353700000013,-.047112296900000003,.0619913453000000003,.968933299599999964,-.59736580429999997,1.57188868320000008} +972 {-1.79092484479999992,-.963563280300000047,-.244887384399999991,.177160919199999989,.534234100000000045,-1.52195560190000001,.0572041033000000032} +973 {-.66924779879999996,2.08876757850000017,-.609308122299999977,1.64301102609999994,1.51405171549999995,1.48674347579999999,.0689875504999999944} +974 {1.2861756337000001,.317962177100000021,.0554899243,-2.76923346749999988,-.245963309699999993,.166242375500000011,-.638806274799999962} +975 {.302466803299999976,.725622152400000053,.159182512100000007,-.696962839799999956,1.10914842160000005,-.946756105800000025,.610064786999999997} +976 {1.76926998270000002,.1915548735,1.20852541999999996,-1.83621454989999999,.489020559500000007,-.180192393499999992,.0861708256000000006} +977 {.0890708984999999953,1.02165975779999996,.396162242499999984,-1.97438189730000002,-1.31639236240000002,.468948140100000022,.578267716199999948} +978 {3.81596305899999999,-.0752894152000000011,1.22874485409999989,-.34401148170000001,-.60293661409999999,-1.73670491040000008,-1.34967213730000002} +979 {.363137535499999997,-.249188526599999988,.143770077100000004,.485635864600000022,-.248843908499999988,-.378490280899999976,.833624267699999977} +980 {.622501226000000019,.497106488899999976,-1.33962174819999991,.362473866500000019,-1.2461734109,.391844193200000024,1.02528984480000007} +981 {-.384596219000000017,-.770412823100000033,-1.65080170950000005,-.701575110299999971,-.236027047800000001,-.0930945457999999976,-.572952987400000002} +982 {-.0585530698999999974,.254103893299999994,-1.10669696919999994,.92723278440000001,1.54901502390000001,2.47350985960000003,-1.60290761009999994} +983 {1.27727143309999991,-.98130734070000003,1.47056766809999995,-.0779630959000000012,-.700182656800000025,-.0482123382999999975,-1.06129754630000006} +984 {.374395889599999976,-.354548487400000001,.451433358499999993,.780539419400000045,-.743345178200000012,1.46968860729999995,-.838439041400000029} +985 {1.35060551320000011,.778508637499999989,-.500864532799999984,-.619246187999999975,.112550077400000004,-.956398619600000055,-1.7477973336999999} +986 {-1.41138196309999997,.414554666700000018,-.31560937750000001,-.433170143600000002,-1.32478244690000002,.877389351399999962,.619442110499999976} +987 {-.18143779069999999,-.3355292473,.111174970299999995,.515750556600000021,-1.84532246280000001,-.541402038400000007,.032716658400000001} +988 {1.01893832769999992,.378251614399999991,2.14589194699999997,-.124763145699999994,.454165169199999996,-1.27359607670000008,.902309110000000025} +989 {-1.54856588299999998,-.181674206100000013,.723721445000000019,1.95151071070000004,-.931262678700000013,1.38000265989999993,-.0611850675000000024} +990 {-2.51674759299999984,1.84086786150000004,-1.18488812280000011,-.70473682950000005,.450240381500000009,1.71147135669999995,.46996363889999998} +991 {-.961938594100000044,-1.9304573946000001,.0920584536000000064,-.00605350860000000018,.138979703399999988,1.02023121059999999,-.469924267900000026} +992 {-1.3191943477000001,-.907261229299999972,.956622596200000008,-1.29144461089999996,.063054408199999995,1.72325185590000007,-.0153563312} +993 {-.286329743099999978,-.460476938100000011,-.729120913100000045,.194630038199999994,.636778373399999986,-.484935115699999975,-1.31975202160000005} +994 {.191581073800000001,-.381619132600000022,.39152344389999999,1.00012367530000001,-.221120326099999986,-.402539806399999978,-1.28340309930000007} +995 {.867869126700000049,1.23740177690000008,-.367942419600000015,.603225568599999984,-.469980195499999975,.056629065800000003,1.21323942490000003} +996 {-1.77010136630000003,-.942975448100000002,.0669344711999999981,-2.3128676813000002,1.39298435589999992,-.694643792799999993,-.209453455300000008} +997 {-.0834743440000000059,1.6286473508999999,-1.08926973230000002,-.136149935899999991,-1.45645855169999994,.858922551799999989,.592540473699999959} +998 {.0847548062000000046,.291964872799999997,.224799692200000012,-.967997088499999991,.816994693099999947,1.00957376919999997,-.27638102790000002} +999 {-.291616029200000015,1.32491980610000004,.0884881534999999997,-.666581132899999984,-.628715659100000046,-1.34722614740000002,.282751338899999982} +1000 {-.467243306999999997,-1.64175936900000008,.46417042289999999,-.0289161113999999997,.329442297900000014,-.90642406590000002,-.491403329900000019} +\. +; + +-- Running the tests + +-- Testing PCA training + +CREATE MODEL pca_fit_2_2_1000 USING pca FEATURES position FROM multivariate_2_1000_1_test WITH number_components = 2, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977; + +CREATE MODEL pca_fit_7_7_1000 USING pca FEATURES position FROM multivariate_7_1000_1_test WITH number_components = 7, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977; + +SELECT modelname, processedtuples, discardedtuples, iterations, modeltype, length(modeldata) as model_data_len, trainingscoresvalue, weight FROM gs_model_warehouse WHERE modelname = 'pca_fit_2_2_1000'; + +SELECT modelname, processedtuples, discardedtuples, iterations, modeltype, length(modeldata) as model_data_len, trainingscoresvalue, weight FROM gs_model_warehouse WHERE modelname = 'pca_fit_7_7_1000'; + +EXPLAIN CREATE MODEL pca_fit_2_2_1000_explain USING pca FEATURES position FROM multivariate_2_1000_1_test WITH number_components = 2, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977; + +SELECT modelname, processedtuples, discardedtuples, iterations, modeltype, length(modeldata) as model_data_len, trainingscoresvalue, weight FROM gs_model_warehouse WHERE modelname = 'pca_fit_2_2_1000_explain'; + +CREATE MODEL pca_fit_7_8_1000_more_components USING pca FEATURES position FROM multivariate_7_1000_1_test WITH number_components = 8, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977; + +CREATE MODEL pca_fit_7_4_1000_less_components USING pca FEATURES position FROM multivariate_7_1000_1_test WITH number_components = 4, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977; + +SELECT modelname, processedtuples, discardedtuples, iterations, modeltype, length(modeldata) as model_data_len, trainingscoresvalue, weight FROM gs_model_warehouse WHERE modelname = 'pca_fit_7_8_1000_more_components'; + +SELECT modelname, processedtuples, discardedtuples, iterations, modeltype, length(modeldata) as model_data_len, trainingscoresvalue, weight FROM gs_model_warehouse WHERE modelname = 'pca_fit_7_4_1000_less_components'; + +-- Wrong input format + +CREATE MODEL pca_fit_2_2_1000_wrong_optimizer USING pca FEATURES position FROM multivariate_2_1000_1_test WITH number_components = 2, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977, optimizer = 'gd'; +CREATE MODEL pca_fit_2_2_1000_wrong_optimizer USING pca FEATURES position FROM multivariate_2_1000_1_test WITH number_components = 2, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977, optimizer = 'ngd'; +CREATE MODEL pca_fit_2_2_1000_wrong_num_components USING pca FEATURES position FROM multivariate_2_1000_1_test WITH number_components = -2, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977; + +-- Prediction + +SELECT id, PREDICT BY pca_fit_2_2_1000(FEATURES position) as projection_2d FROM multivariate_2_1000_1_test WHERE id <= 10; +SELECT id, PREDICT BY pca_fit_7_7_1000(FEATURES position) as projection_7d FROM multivariate_7_1000_1_test WHERE id <= 10; +SELECT id, PREDICT BY pca_fit_7_8_1000_more_components(FEATURES position) as projection_7d FROM multivariate_7_1000_1_test WHERE id <= 10; +SELECT id, PREDICT BY pca_fit_7_4_1000_less_components(FEATURES position) as projection_4d FROM multivariate_7_1000_1_test WHERE id <= 10; + +-- Undoable projection + +SELECT id, PREDICT BY pca_fit_7_7_1000(FEATURES position) as projection_7d FROM multivariate_2_1000_1_test WHERE id <= 10; +SELECT id, PREDICT BY pca_fit_2_2_1000(FEATURES position) as projection_2d FROM multivariate_7_1000_1_test WHERE id <= 10; + +-- Cleanup + +DROP MODEL pca_fit_2_2_1000; +DROP MODEL pca_fit_7_7_1000; +DROP MODEL pca_fit_7_8_1000_more_components; +DROP MODEL pca_fit_7_4_1000_less_components; + + +DROP TABLE IF EXISTS multivariate_2_1000_1_test CASCADE; +DROP TABLE IF EXISTS multivariate_7_1000_1_test CASCADE; + +SELECT 'DB4AI PCA TEST COMPLETED'; diff --git a/src/test/regress/input/db4ai_gd_snapshots.source b/src/test/regress/input/db4ai_gd_snapshots.source index d4f8cc155..c672b3ce8 100644 --- a/src/test/regress/input/db4ai_gd_snapshots.source +++ b/src/test/regress/input/db4ai_gd_snapshots.source @@ -56,9 +56,9 @@ CREATE MODEL abalone USING linear_regression FROM abalone@train WITH seed = 1; -SELECT modelname, processedtuples, discardedtuples, iterations, outputtype, modeltype, query, modeldata, weight, - hyperparametersnames, hyperparametersvalues, hyperparametersoids, coefnames, coefvalues, coefoids, - trainingscoresname, trainingscoresvalue, modeldescribe +SELECT modelname, processedtuples, discardedtuples, iterations, outputtype, modeltype, query, weight, + hyperparametersnames, hyperparametersvalues, hyperparametersoids, + trainingscoresname, trainingscoresvalue, length(modeldata) as model_data_len FROM gs_model_warehouse; SELECT id, target, prediction, abs(prediction-target) as residual diff --git a/src/test/regress/input/db4ai_gd_train_predict.source b/src/test/regress/input/db4ai_gd_train_predict.source index cc0c5d1dc..e642dd374 100644 --- a/src/test/regress/input/db4ai_gd_train_predict.source +++ b/src/test/regress/input/db4ai_gd_train_predict.source @@ -19,27 +19,27 @@ CREATE MODEL m using logistic_regression FROM db4ai_houses; CREATE MODEL m using logistic_regression FEATURES size,lot FROM db4ai_houses; -- Errors with semantic validation of hyperparameters -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with batch_size = 0; -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with decay = 0.0; -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with learning_rate = 0.0; -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_iterations = 0; -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_seconds = -1; -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with optimizer = nogd; -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with tolerance = 0.0; -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with verbose = ttrue; +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with batch_size = 0, seed=1; +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with decay = 0.0, seed=1; +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with learning_rate = 0.0, seed=1; +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_iterations = 0, seed=1; +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_seconds = -1, seed=1; +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with optimizer = nogd, seed=1; +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with tolerance = 0.0, seed=1; +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with verbose = ttrue, seed=1; -CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with batch_size = 'a_wrong_parameter'; -CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with decay = 'a_wrong_parameter'; -CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with learning_rate = 'a_wrong_parameter'; -CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_iterations = 'a_wrong_parameter'; -CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_seconds = 'a_wrong_parameter'; +CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with batch_size = 'a_wrong_parameter', seed=1; +CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with decay = 'a_wrong_parameter', seed=1; +CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with learning_rate = 'a_wrong_parameter', seed=1; +CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_iterations = 'a_wrong_parameter', seed=1; +CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_seconds = 'a_wrong_parameter', seed=1; -CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with lambda = 0.0; -CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with lambda = 'a_wrong_parameter'; -CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with optimizer = 'a_wrong_parameter'; -CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with tolerance = 'a_wrong_parameter'; -CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with verbose = 'a_wrong_parameter'; +CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with lambda = 0.0, seed=1; +CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with lambda = 'a_wrong_parameter', seed=1; +CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with optimizer = 'a_wrong_parameter', seed=1; +CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with tolerance = 'a_wrong_parameter', seed=1; +CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with verbose = 'a_wrong_parameter', seed=1; -- Normal model with logistic_regression @@ -64,21 +64,76 @@ DROP model m3; -- EXPLAIN test -EXPLAIN CREATE MODEL m using logistic_regression FEATURES bedroom, bath TARGET price < 100000 FROM db4ai_houses; +show enable_material; +EXPLAIN CREATE MODEL m using logistic_regression FEATURES bedroom, bath TARGET price < 100000 FROM db4ai_houses WITH seed=1; -EXPLAIN VERBOSE CREATE MODEL m using logistic_regression FEATURES bedroom, bath as renamed_bath TARGET price < 100000 as target_price FROM db4ai_houses; -EXPLAIN VERBOSE CREATE MODEL m using logistic_regression FEATURES bedroom, bath::float as transformed_bath TARGET price < 100000 FROM db4ai_houses; -EXPLAIN VERBOSE CREATE MODEL m using logistic_regression FEATURES bedroom, bath TARGET price < 100000 FROM (SELECT * FROM db4ai_houses); -EXPLAIN VERBOSE CREATE MODEL m using linear_regression FEATURES bedroom, bath TARGET price FROM (SELECT * FROM db4ai_houses ORDER BY id); -EXPLAIN VERBOSE CREATE MODEL m using linear_regression FEATURES bedroom, bath TARGET price FROM (SELECT * FROM db4ai_houses ORDER BY id LIMIT 5); -EXPLAIN VERBOSE CREATE MODEL m using linear_regression FEATURES f1, f2 TARGET price FROM (SELECT bedroom as f1, bath as f2, price FROM db4ai_houses ORDER BY id LIMIT 5); -EXPLAIN VERBOSE CREATE model m using svm_classification FEATURES f1, f2 TARGET price > 100000 FROM (SELECT AVG(bath) as f1, SUM(bath) as f2, AVG(price) as price FROM db4ai_houses GROUP BY bedroom); -EXPLAIN VERBOSE CREATE model m using svm_classification FEATURES f1, f2 TARGET price > 100000 FROM (SELECT AVG(bath) as f1, SUM(bath) as f2, AVG(price) as price FROM db4ai_houses GROUP BY bedroom HAVING bedroom < 15); -EXPLAIN VERBOSE CREATE model m using svm_classification FEATURES bedroom + bath, tax*1.2 as normalized_tax TARGET price < 100000 FROM db4ai_houses; +EXPLAIN VERBOSE CREATE MODEL m using logistic_regression FEATURES bedroom, bath as renamed_bath TARGET price < 100000 as target_price FROM db4ai_houses WITH seed=1; +EXPLAIN VERBOSE CREATE MODEL m using logistic_regression FEATURES bedroom, bath::float as transformed_bath TARGET price < 100000 FROM db4ai_houses WITH seed=1; +EXPLAIN VERBOSE CREATE MODEL m using logistic_regression FEATURES bedroom, bath TARGET price < 100000 FROM (SELECT * FROM db4ai_houses) WITH seed=1; +EXPLAIN VERBOSE CREATE MODEL m using linear_regression FEATURES bedroom, bath TARGET price FROM (SELECT * FROM db4ai_houses ORDER BY id) WITH seed=1; +EXPLAIN VERBOSE CREATE MODEL m using linear_regression FEATURES bedroom, bath TARGET price FROM (SELECT * FROM db4ai_houses ORDER BY id LIMIT 5) WITH seed=1; +EXPLAIN VERBOSE CREATE MODEL m using linear_regression FEATURES f1, f2 TARGET price FROM (SELECT bedroom as f1, bath as f2, price FROM db4ai_houses ORDER BY id LIMIT 5) WITH seed=1; +EXPLAIN VERBOSE CREATE model m using svm_classification FEATURES f1, f2 TARGET price > 100000 FROM (SELECT AVG(bath) as f1, SUM(bath) as f2, AVG(price) as price FROM db4ai_houses GROUP BY bedroom) WITH seed=1; +EXPLAIN VERBOSE CREATE model m using svm_classification FEATURES f1, f2 TARGET price > 100000 FROM (SELECT AVG(bath) as f1, SUM(bath) as f2, AVG(price) as price FROM db4ai_houses GROUP BY bedroom HAVING bedroom < 15) WITH seed=1; +EXPLAIN VERBOSE CREATE model m using svm_classification FEATURES bedroom + bath, tax*1.2 as normalized_tax TARGET price < 100000 FROM db4ai_houses WITH seed=1; +-- Expect no materialization +EXPLAIN CREATE MODEL m using logistic_regression FEATURES bedroom, bath TARGET price < 100000 as target_price FROM (SELECT * FROM db4ai_houses ORDER BY id) WITH seed=1; +set enable_material = off; +EXPLAIN CREATE MODEL m using logistic_regression FEATURES bedroom, bath TARGET price < 100000 as target_price FROM db4ai_houses WITH seed=1; +set enable_material = on; +-- svm multiclass +CREATE TABLE db4ai_ecoli ( + id BIGSERIAL, + f1 REAL, + f2 REAL, + f3 REAL, + f4 REAL, + f5 REAL, + f6 REAL, + f7 REAL, + cat VARCHAR, + PRIMARY KEY (id) +); + +\copy db4ai_ecoli(f1,f2,f3,f4,f5,f6,f7,cat) FROM '@abs_srcdir@/data/ecoli.csv' DELIMITER ','; + +CREATE MODEL ecoli_svmc USING multiclass + FEATURES f1, f2, f3, f4, f5, f6, f7 TARGET cat + FROM db4ai_ecoli WITH seed = 1, max_iterations=250, tolerance=1e-7, + learning_rate=2.0, lambda=50; + +SELECT id, cat AS target, PREDICT BY ecoli_svmc (FEATURES f1, f2, f3, f4, f5, f6, f7) AS prediction + FROM db4ai_ecoli + WHERE MOD(id,10)=0 + ORDER BY id; + +SELECT COUNT(*)/(SELECT COUNT(*) FROM db4ai_ecoli) AS accuracy +FROM (SELECT id, cat AS target, PREDICT BY ecoli_svmc (FEATURES f1, f2, f3, f4, f5, f6, f7) AS prediction + FROM db4ai_ecoli + WHERE prediction=target + ); + +CREATE MODEL ecoli_logregr USING multiclass + FEATURES f1, f2, f3, f4, f5, f6, f7 TARGET cat + FROM db4ai_ecoli WITH seed = 1, max_iterations=250, tolerance=1e-7, + learning_rate=30.0, classifier='logistic_regression'; + +SELECT id, cat AS target, PREDICT BY ecoli_logregr (FEATURES f1, f2, f3, f4, f5, f6, f7) AS prediction + FROM db4ai_ecoli + WHERE MOD(id,10)=1 + ORDER BY id; + +SELECT modelname, modeltype, processedtuples, discardedtuples, outputtype, trainingscoresname, trainingscoresvalue + FROM gs_model_warehouse + WHERE modelname LIKE 'ecoli%' + ORDER BY modelname; -- Cleanup +DROP MODEL ecoli_svmc; +DROP MODEL ecoli_logregr; DROP TABLE IF EXISTS db4ai_houses; +DROP TABLE IF EXISTS db4ai_ecoli; SELECT 'DB4AI TEST COMPLETED'; diff --git a/src/test/regress/input/db4ai_kmeans_train_predict.source b/src/test/regress/input/db4ai_kmeans_train_predict.source index 4b652688f..05e38f476 100644 --- a/src/test/regress/input/db4ai_kmeans_train_predict.source +++ b/src/test/regress/input/db4ai_kmeans_train_predict.source @@ -1063,6 +1063,8 @@ COPY multivariate_7_1000_10_real_centroids (id, "position", closest_centroid, l1 \. ; +CREATE TABLE multivariate_7_1000_10_unnested AS SELECT id, position[1] AS x_1, position[2] AS x_2, position[3] AS x_3, position[4] AS x_4, position[5] AS x_5, position[6] AS x_6, position[7] AS x_7, closest_centroid, l1_distance, l2_distance, l2_squared_distance, linf_distance FROM multivariate_7_1000_10; + -- Running the tests -- Random++ @@ -1093,6 +1095,30 @@ SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr( SELECT id, PREDICT BY my_kmeans_pp_l2_sqr(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id > 0; SELECT id, PREDICT BY my_kmeans_pp_l2_sqr(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id <= 0; +CREATE MODEL my_kmeans_pp_l2_sqr_no_num_features USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; +SELECT SUM(l2_squared_distance) FROM multivariate_7_1000_10 AS l2_squared_approx; +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids); +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id > 0); +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id <= 0); +SELECT id, PREDICT BY my_kmeans_pp_l2_sqr_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id > 0; +SELECT id, PREDICT BY my_kmeans_pp_l2_sqr_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id <= 0; + +CREATE MODEL my_kmeans_pp_l2_sqr_unnested USING kmeans FROM (SELECT ARRAY[x_1, x_2, x_3, x_4, x_5, x_6, x_7] AS position FROM multivariate_7_1000_10_unnested) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; +SELECT SUM(l2_squared_distance) FROM multivariate_7_1000_10_unnested AS l2_squared_approx; +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_unnested(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids); +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_unnested(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id > 0); +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_unnested(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id <= 0); +SELECT id, PREDICT BY my_kmeans_pp_l2_sqr_unnested(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id > 0; +SELECT id, PREDICT BY my_kmeans_pp_l2_sqr_unnested(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id <= 0; + +CREATE MODEL my_kmeans_pp_l2_sqr_unnested_no_num_features USING kmeans FROM (SELECT ARRAY[x_1, x_2, x_3, x_4, x_5, x_6, x_7] AS position FROM multivariate_7_1000_10_unnested) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; +SELECT SUM(l2_squared_distance) FROM multivariate_7_1000_10_unnested AS l2_squared_approx; +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_unnested_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids); +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_unnested_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id > 0); +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_unnested_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id <= 0); +SELECT id, PREDICT BY my_kmeans_pp_l2_sqr_unnested_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id > 0; +SELECT id, PREDICT BY my_kmeans_pp_l2_sqr_unnested_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id <= 0; + CREATE MODEL my_kmeans_pp_linf USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'Linf', seeding_function = 'Random++', verbose = 1, seed = 1255025990; SELECT SUM(linf_distance) FROM multivariate_7_1000_10 AS linf_approx; SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_linf(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids); @@ -1104,6 +1130,9 @@ SELECT id, PREDICT BY my_kmeans_pp_linf(features position) AS centroid_id FROM m DROP MODEL my_kmeans_pp_l1; DROP MODEL my_kmeans_pp_l2; DROP MODEL my_kmeans_pp_l2_sqr; +DROP MODEL my_kmeans_pp_l2_sqr_no_num_features; +DROP MODEL my_kmeans_pp_l2_sqr_unnested; +DROP MODEL my_kmeans_pp_l2_sqr_unnested_no_num_features; DROP MODEL my_kmeans_pp_linf; SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l1(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids); @@ -1216,5 +1245,6 @@ CREATE MODEL my_kmeans_pp_more_attr USING kmeans FROM (SELECT ARRAY[135.52912207 -- Cleanup DROP TABLE IF EXISTS multivariate_7_1000_10; DROP TABLE IF EXISTS multivariate_7_1000_10_real_centroids; +DROP TABLE IF EXISTS multivariate_7_1000_10_unnested; SELECT 'DB4AI KMEANS TEST COMPLETED'; diff --git a/src/test/regress/input/db4ai_snapshots.source b/src/test/regress/input/db4ai_snapshots.source index e08318142..304b8cb38 100644 --- a/src/test/regress/input/db4ai_snapshots.source +++ b/src/test/regress/input/db4ai_snapshots.source @@ -1,2 +1,2 @@ --run -\! sh @abs_srcdir@/snapshots_test/test.sh -r -p @portstring@ -d regression +\! @abs_srcdir@/snapshots_test/test.sh -r -p @portstring@ -d regression diff --git a/src/test/regress/input/db4ai_svm_kernels.source b/src/test/regress/input/db4ai_svm_kernels.source new file mode 100644 index 000000000..01ec504e7 --- /dev/null +++ b/src/test/regress/input/db4ai_svm_kernels.source @@ -0,0 +1,65 @@ +CREATE TABLE moons( + id BIGSERIAL, + cls SMALLINT, + x REAL, + y REAL +); + +\copy moons(cls, x, y) FROM '@abs_srcdir@/data/moons.csv' DELIMITER ','; + +SELECT COUNT(*) FROM moons; + +-- linear, expected accuracy = 0.890 +CREATE MODEL moons_linear USING svm_classification + FEATURES x, y TARGET cls + FROM moons + WITH seed=54, batch_size=8, decay=1e-20, + learning_rate=0.01215337, lambda=920.90725960, + tolerance=0.06377824, max_iterations=2; + +-- gaussian, expected accuracy = 0.935 +CREATE MODEL moons_gaussian USING svm_classification + FEATURES x, y TARGET cls + FROM moons + WITH seed=1, batch_size=4, decay=0.80858937, + learning_rate=0.16556385, lambda=274.28986109, + tolerance=0.00714786, max_iterations=33, + kernel='gaussian', gamma=0.96736585; + +-- polynomial, expected accuracy = 1.000 +CREATE MODEL moons_polynomial USING svm_classification + FEATURES x, y TARGET cls + FROM moons + WITH seed=1, batch_size=2, decay=0.87908244, + learning_rate=0.40456318, lambda=53.75794302, + tolerance=0.00003070, max_iterations=35, + kernel='polynomial', degree=4, coef0=1.11311435; + +-- display the three models +SELECT modelname, processedtuples, discardedtuples, iterations, outputtype, modeltype, query, weight, + hyperparametersnames, hyperparametersoids, hyperparametersvalues, + trainingscoresname, trainingscoresvalue, length(modeldata) as model_data_len + FROM gs_model_warehouse + WHERE modelname LIKE 'moons%' + ORDER BY modelname; + +-- validate the modes by predicting all at the same time, expected [0.89, 0.935, 1.0] +SELECT (SUM(CASE WHEN t=p1 THEN 1 ELSE 0 END) / (SELECT COUNT(*) FROM moons)) AS acc_lin, + (SUM(CASE WHEN t=p2 THEN 1 ELSE 0 END) / (SELECT COUNT(*) FROM moons)) AS acc_gauss, + (SUM(CASE WHEN t=p3 THEN 1 ELSE 0 END) / (SELECT COUNT(*) FROM moons)) AS acc_poly + FROM (SELECT cls AS t, + PREDICT BY moons_linear (FEATURES x, y) AS p1, + PREDICT BY moons_gaussian (FEATURES x, y) AS p2, + PREDICT BY moons_polynomial (FEATURES x, y) AS p3 + FROM moons +); + +-- cleanup models +DROP MODEL moons_linear; +DROP MODEL moons_gaussian; +DROP MODEL moons_polynomial; + +-- cleanup tables +DROP TABLE IF EXISTS moons; + +SELECT 'DB4AI SVM KERNELS TEST COMPLETED'; diff --git a/src/test/regress/input/db4ai_xgboost_train_predict.source b/src/test/regress/input/db4ai_xgboost_train_predict.source new file mode 100644 index 000000000..cb3d7075e --- /dev/null +++ b/src/test/regress/input/db4ai_xgboost_train_predict.source @@ -0,0 +1,103 @@ +CREATE TABLE db4ai_rain (id INT, Location VARCHAR(20), MinTemp FLOAT, MaxTemp FLOAT, Rainfall FLOAT, WindGustSpeed INT, WindSpeed9am INT, + WindSpeed3pm INT, Humidity9am INT, Humidity3pm INT, Pressure9am FLOAT, Pressure3pm FLOAT, Cloud9am INT, Cloud3pm INT, + Temp9am FLOAT, Temp3pm FLOAT, RainToday INT, RainTomorrow INT) +WITH (orientation=row, compression=no); + +COPY db4ai_rain FROM '@abs_srcdir@/data/rain.txt' WITH (FORMAT csv); + + +-- Error in FEATURES / TARGET +CREATE MODEL m using xgboost_binary_logistic FEATURES * TARGET price FROM db4ai_rain; + +CREATE MODEL m using xgboost_binary_logistic FEATURES Temp9am,Temp3pm TARGET * FROM db4ai_rain; + +CREATE MODEL m using xgboost_binary_logistic FROM db4ai_rain; + + +-- Errors with semantic validation of hyperparameters +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH n_iter=-1; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH batch_size=0; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH max_depth=-1; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH min_child_weight=-1; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH eta=-0.1; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH seed=-1; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH nthread=-1; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH booster=10; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH tree_method=10; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH verbosity=10; + +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH n_iter='a_wrong_parameter'; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH batch_size='a_wrong_parameter'; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH max_depth='a_wrong_parameter'; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH min_child_weight='a_wrong_parameter'; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH seed='a_wrong_parameter'; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH eta='a_wrong_parameter'; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH nthread='a_wrong_parameter'; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH booster=10; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH tree_method=10; +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH verbosity='a_wrong_parameter'; + + +-- Normal mode +CREATE MODEL m1 USING xgboost_binary_logistic FEATURES raintoday TARGET raintomorrow FROM db4ai_rain +WITH n_iter=10, batch_size=1000000, booster='gbtree', tree_method='exact', eval_metric='auc', nthread=4, seed=3141; +SELECT id, raintomorrow, round(PREDICT BY m1 (FEATURES raintoday), 1) as pred FROM db4ai_rain; + +DROP MODEL m1; + + +CREATE MODEL m3 USING xgboost_regression_squarederror FEATURES raintoday TARGET raintomorrow FROM db4ai_rain +WITH n_iter=10, batch_size=1000000, booster='gbtree', tree_method='exact', eval_metric='auc', nthread=4, seed=3141; +SELECT id, raintomorrow, round(PREDICT BY m3 (FEATURES raintoday), 1) as pred FROM db4ai_rain; + +DROP MODEL m3; + + +CREATE MODEL m4 USING xgboost_regression_gamma FEATURES raintoday TARGET raintomorrow FROM db4ai_rain +WITH n_iter=10, batch_size=1000000, booster='gbtree', tree_method='exact', eval_metric='auc', nthread=4, seed=3141; +SELECT id, raintomorrow, round(PREDICT BY m4 (FEATURES raintoday), 1) as pred FROM db4ai_rain; + +DROP MODEL m4; + +-- empty resultset +CREATE MODEL m1 USING xgboost_binary_logistic FEATURES raintoday TARGET raintomorrow FROM db4ai_rain +WITH n_iter=10, batch_size=1000000, booster='gbtree', tree_method='exact', eval_metric='auc', nthread=4, seed=3141; +SELECT id, raintomorrow, round(PREDICT BY m1 (FEATURES raintoday), 1) as pred FROM db4ai_rain where id < 0; + +DROP MODEL m1; + + +CREATE MODEL m2 USING xgboost_regression_logistic FEATURES raintoday TARGET raintomorrow FROM db4ai_rain +WITH n_iter=10, batch_size=1000000, booster='gbtree', tree_method='exact', eval_metric='auc', nthread=4, seed=3141; +SELECT id, raintomorrow, round(PREDICT BY m2 (FEATURES raintoday), 1) as pred FROM db4ai_rain where id < 0; + +DROP MODEL m2; + + +CREATE MODEL m3 USING xgboost_regression_squarederror FEATURES raintoday TARGET raintomorrow FROM db4ai_rain +WITH n_iter=10, batch_size=1000000, booster='gbtree', tree_method='exact', eval_metric='auc', nthread=4, seed=3141; +SELECT id, raintomorrow, round(PREDICT BY m3 (FEATURES raintoday), 1) as pred FROM db4ai_rain where id < 0; + +DROP MODEL m3; + + +CREATE MODEL m4 USING xgboost_regression_gamma FEATURES raintoday TARGET raintomorrow FROM db4ai_rain +WITH n_iter=10, batch_size=1000000, booster='gbtree', tree_method='exact', eval_metric='auc', nthread=4, seed=3141; +SELECT id, raintomorrow, round(PREDICT BY m4 (FEATURES raintoday), 1) as pred FROM db4ai_rain where id < 0; + +DROP MODEL m4; + +-- Explain test +EXPLAIN CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain; + +EXPLAIN VERBOSE CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain; +EXPLAIN VERBOSE CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday != 0 FROM db4ai_rain; +EXPLAIN VERBOSE CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am, windgustspeed::float as transformed_windgust TARGET raintoday FROM db4ai_rain; +EXPLAIN VERBOSE CREATE MODEL m USING xgboost_regression_logistic FEATURES rainfall, temp9am, windgustspeed::float as transformed_windgust TARGET raintoday FROM db4ai_rain; +EXPLAIN VERBOSE CREATE MODEL m USING xgboost_regression_squarederror FEATURES rainfall, temp9am, windgustspeed::float as transformed_windgust TARGET raintoday FROM db4ai_rain; +EXPLAIN VERBOSE CREATE MODEL m USING xgboost_regression_gamma FEATURES rainfall, temp9am, windgustspeed::float as transformed_windgust TARGET raintoday FROM db4ai_rain; + +-- Cleanup +DROP TABLE IF EXISTS db4ai_rain; + +SELECT 'DB4AI TEST COMPLETED'; diff --git a/src/test/regress/input/dfs_dts002.source b/src/test/regress/input/dfs_dts002.source index 9c7cec2cf..27bd1b3b6 100644 --- a/src/test/regress/input/dfs_dts002.source +++ b/src/test/regress/input/dfs_dts002.source @@ -476,8 +476,8 @@ analyze empty_part_table; drop table empty_part_table; reset cstore_insert_mode; -create schema DTS2017052409444; -set current_schema=DTS2017052409444; +create schema testschema; +set current_schema=testschema; set cstore_insert_mode = main; create table household_demographics_h ( @@ -527,11 +527,11 @@ drop foreign table call_center_f; drop table household_demographics_h cascade; drop table call_center_h cascade; drop procedure test; -drop schema DTS2017052409444 cascade; +drop schema testschema cascade; set current_schema = dfs; -create schema DTS2017072002456; -set current_schema=DTS2017072002456; +create schema testschema2; +set current_schema=testschema2; set cstore_insert_mode = main; create table item_inventory_hdfs ( @@ -575,7 +575,7 @@ from (select location_id b, count(*) as cc group by tt.location_id order by 1, 2; -drop schema DTS2017072002456 cascade; +drop schema testschema2 cascade; set current_schema = dfs; drop tablespace hdfs_ts_part; diff --git a/src/test/regress/input/dw_switch.source b/src/test/regress/input/dw_switch.source new file mode 100644 index 000000000..fb8a2a0f7 --- /dev/null +++ b/src/test/regress/input/dw_switch.source @@ -0,0 +1,10 @@ +\! @abs_bindir@/gs_guc set -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_double_write=off" +\! @abs_bindir@/gs_ctl stop -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! @abs_bindir@/gs_ctl start -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! sleep 5 +\! @abs_bindir@/gs_ctl stop -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! @abs_bindir@/gs_ctl start -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! sleep 5 +\! @abs_bindir@/gs_guc set -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_double_write=on" +\! @abs_bindir@/gs_ctl stop -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! @abs_bindir@/gs_ctl start -D @abs_srcdir@/tmp_check/datanode1 > /dev/null diff --git a/src/test/regress/input/fdw_audit.source b/src/test/regress/input/fdw_audit.source new file mode 100644 index 000000000..3a9fe69be --- /dev/null +++ b/src/test/regress/input/fdw_audit.source @@ -0,0 +1,29 @@ +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=67108863" > /dev/null 2>&1 + +CREATE ROLE regress_test_foreign SYSADMIN IDENTIFIED BY 'test-1234'; + +CREATE FOREIGN DATA WRAPPER fdw_audit_dummy; +ALTER FOREIGN DATA WRAPPER fdw_audit_dummy NO VALIDATOR; +ALTER FOREIGN DATA WRAPPER fdw_audit_dummy OPTIONS (a '1', b '2'); +ALTER FOREIGN DATA WRAPPER fdw_audit_dummy OWNER TO regress_test_foreign; +ALTER FOREIGN DATA WRAPPER fdw_audit_dummy RENAME TO fdw_audit_dummy2; + + +CREATE SERVER s1 FOREIGN DATA WRAPPER fdw_audit_dummy2; +ALTER SERVER s1 VERSION '1.1'; +ALTER SERVER s1 OPTIONS (connect_timeout '30'); +ALTER SERVER s1 OWNER TO regress_test_foreign; +ALTER SERVER s1 RENAME to s1new; + +DROP SERVER s1new; +DROP FOREIGN DATA WRAPPER IF EXISTS fdw_audit_dummy2; +DROP ROLE regress_test_foreign; + +SELECT object_name,detail_info FROM pg_query_audit('2022-01-13 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_foreign_data_wrapper' or type = 'ddl_serverforhadoop' order by object_name,detail_info; +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1 + diff --git a/src/test/regress/input/gs_global_config_audit.source b/src/test/regress/input/gs_global_config_audit.source new file mode 100644 index 000000000..87d320942 --- /dev/null +++ b/src/test/regress/input/gs_global_config_audit.source @@ -0,0 +1,14 @@ +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_enabled=on" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=33554431" > /dev/null 2>&1 +show audit_enabled; +show audit_system_object; +SELECT object_name,detail_info FROM pg_query_audit('2022-02-01 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_globalconfig'; +select * from gs_global_config; +ALTER GLOBAL CONFIGURATION with(lockwait_timeout=2000, lockwait_interval=2); +ALTER GLOBAL CONFIGURATION with(last_catchup_threshold=5000); +select * from gs_global_config; +DROP GLOBAL CONFIGURATION lockwait_timeout; +DROP GLOBAL CONFIGURATION last_catchup_threshold, lockwait_interval; +select * from gs_global_config; +SELECT object_name,detail_info FROM pg_query_audit('2022-02-01 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_globalconfig'; +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "reset audit_system_object" > /dev/null 2>&1 diff --git a/src/test/regress/input/gsc_db.source b/src/test/regress/input/gsc_db.source new file mode 100644 index 000000000..5711065b8 --- /dev/null +++ b/src/test/regress/input/gsc_db.source @@ -0,0 +1,23 @@ +create database gsc1; +\c gsc1 +create table gsc1_t1(c1 int); +insert into gsc1_t1 values(1); +create database gsc2; +\c gsc2 +create table gsc2_t1(c1 int); +insert into gsc2_t1 values(2); +\! @abs_bindir@/gsql -p @portstring@ -d gsc1 -c "select * from gsc1_t1;" > /dev/null 2>&1 & +\! @abs_bindir@/gsql -p @portstring@ -d gsc2 -c "select * from gsc2_t1;" > /dev/null 2>&1 & +\c postgres +select pg_sleep(0.5); +alter database gsc1 rename to gsc_temp; +alter database gsc2 rename to gsc1; +alter database gsc_temp rename to gsc2; +\c gsc1 +select * from gsc2_t1; +\c gsc2 +select * from gsc1_t1; +\c postgres +drop database gsc1; +drop database gsc2; +\c regression \ No newline at end of file diff --git a/src/test/regress/input/guc_help.source b/src/test/regress/input/guc_help.source new file mode 100644 index 000000000..90fef3af1 --- /dev/null +++ b/src/test/regress/input/guc_help.source @@ -0,0 +1 @@ +\! @abs_bindir@/gs_guc -? diff --git a/src/test/regress/input/hw_audit_detailinfo.source b/src/test/regress/input/hw_audit_detailinfo.source new file mode 100644 index 000000000..edebe4d69 --- /dev/null +++ b/src/test/regress/input/hw_audit_detailinfo.source @@ -0,0 +1,59 @@ +-- 初始开关关闭, 审计日志查询 +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + +show audit_xid_info; + +-- 参数设置 +set audit_xid_info=1; + +-- 打开xid开关 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_xid_info=1" > /dev/null 2>&1 +select pg_sleep(1); +show audit_xid_info; +CREATE USER audit_user1 PASSWORD 'Gauss@123'; +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type='ddl_user' and object_name='audit_user1'; + +-- DDL +create database db_audit1; +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type='ddl_database'; + +-- DCL +create table t1(id int); +grant all on table t1 to audit_user1; +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type='grant_role' and object_name='audit_user1'; + +-- DML +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state=1" > /dev/null 2>&1 +select pg_sleep(1); +show audit_dml_state; +create table t2(id int); +insert into t2 values(1); +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type='dml_action' and object_name='t2'; + +-- DQL +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state_select=1" > /dev/null 2>&1 +select pg_sleep(1); +show audit_dml_state_select; +create table t3(id int); +select * from t3; +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type='dml_action_select' and object_name='t3'; + +-- 关闭xid开关 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_xid_info=0" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state=0" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state_select=0" > /dev/null 2>&1 +select pg_sleep(1); +show audit_xid_info; +show audit_dml_state; +show audit_dml_state_select; + +-- 恢复 +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); +drop table t1; +drop table t2; +drop table t3; +drop user audit_user1; +drop database db_audit1; + +/* remove hard xid */ +\! security_scripts/post_case_audit.sh @abs_builddir@/results/hw_audit_detailinfo.out diff --git a/src/test/regress/input/hw_audit_multi_thread.source b/src/test/regress/input/hw_audit_multi_thread.source new file mode 100644 index 000000000..fce12e71f --- /dev/null +++ b/src/test/regress/input/hw_audit_multi_thread.source @@ -0,0 +1,58 @@ +-- generating audit logs +create user user_audit_mul_x with password 'Gauss@123'; +create user user_audit_mul_y with password 'Gauss@123'; +create user user_audit_mul_z with password 'Gauss@123'; + +select type,result,object_name,detail_info from pg_query_audit('1111-1-1','2222-2-2') where detail_info like '%create user user_audit_mul%'order by time; + +-- audit thread number change to 3 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_thread_num=3" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=6" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_set_parameter=0" > /dev/null 2>&1 + +select pg_sleep(1); +\! @abs_bindir@/gs_ctl stop -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! @abs_bindir@/gs_ctl start -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! sleep 5 + +-- generating audit items +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select * from pg_delete_audit('0007-1-1','9999-12-31');"; +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "create user user_audit_mul_001 with password 'Gauss@123'"; +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "create user user_audit_mul_002 with password 'Gauss@123'"; +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "create user user_audit_mul_003 with password 'Gauss@123'"; + + +-- check audit logs +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select type,result,object_name,detail_info from pg_query_audit('1111-1-1','2222-2-2') where detail_info like '%create user user_audit_mul%'order by time;"; + +-- audit thread number change to 1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_thread_num=1" > /dev/null 2>&1 + + +\! @abs_bindir@/gs_ctl stop -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! @abs_bindir@/gs_ctl start -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! sleep 5 + +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select * from pg_delete_audit('0007-1-1','9999-12-31');"; +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "create user user_audit_mul_004 with password 'Gauss@123'"; +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "create user user_audit_mul_005 with password 'Gauss@123'"; +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "create user user_audit_mul_006 with password 'Gauss@123'"; + +-- check audit logs +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select type,result,object_name,detail_info from pg_query_audit('1111-1-1','2222-2-2') where detail_info like '%create user user_audit_mul%'order by time;"; + +-- reset env +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop user user_audit_mul_001"; +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop user user_audit_mul_002"; +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop user user_audit_mul_003"; +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop user user_audit_mul_004"; +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop user user_audit_mul_005"; +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop user user_audit_mul_006"; + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_thread_num" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_set_parameter" > /dev/null 2>&1 +\! @abs_bindir@/gs_ctl stop -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! @abs_bindir@/gs_ctl start -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! sleep 5 + diff --git a/src/test/regress/input/hw_audit_rotation_interval.source b/src/test/regress/input/hw_audit_rotation_interval.source new file mode 100644 index 000000000..c94ba9546 --- /dev/null +++ b/src/test/regress/input/hw_audit_rotation_interval.source @@ -0,0 +1,8 @@ +-- 设置guc参数 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_rotation_interval=1" > /dev/null 2>&1 + +-- 间隔60s 获取pg_audit 中最新adt文件编号 查看是否增加了1 +\! num1=`expr $(echo $(ls @abs_srcdir@/tmp_check/datanode1/pg_audit -lt | head -n 2 | awk '{print $9}')| tr -cd "[0-9]") + 1` && sleep 1m && num2=$(echo $(ls @abs_srcdir@/tmp_check/datanode1/pg_audit -lt | head -n 2 | awk '{print $9}')| tr -cd "[0-9]") && [[ $num1 == $num2 ]] && echo 'add a new log after interval-- 60 seconds' || echo 'fail to add new logs' + +-- 恢复guc参数 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_rotation_interval" > /dev/null 2>&1 diff --git a/src/test/regress/input/hw_audit_rotation_size.source b/src/test/regress/input/hw_audit_rotation_size.source new file mode 100644 index 000000000..07e5577c2 --- /dev/null +++ b/src/test/regress/input/hw_audit_rotation_size.source @@ -0,0 +1,27 @@ +-- 设置guc参数 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_rotation_size=2048" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state=1" > /dev/null 2>&1 + +-- 产生dml_action 审计日志 +CREATE TABLE T_TEST_ROTATION_SIZE +( + COL1 int4 DEFAULT 1, + COL2 VARCHAR(1024) DEFAULT 'test_rotation_size'); +CREATE OR REPLACE PROCEDURE TRANSACTION_TEST_ROTATION_SIZE() +AS +BEGIN +FOR i IN 0..1000000 LOOP +INSERT INTO T_TEST_ROTATION_SIZE(COL1, COL2) VALUES (i, 'test_time'); +COMMIT; +END LOOP; +END; +/ +CALL TRANSACTION_TEST_ROTATION_SIZE(); + +-- 提取新生成的文件大小 与2.1M比较 全部小于2.1M 为执行成功 +\! flag=0 && for i in $(find @abs_srcdir@/tmp_check/datanode1/pg_audit -newermt $(date -d "-75 seconds" +%H:%M:%S) -name "*_adt"); do size=$(du -h --exclude=index_table_new $i | grep -oP '\d*\.\d+M'); if [[ $size > '2.1M' ]]; then flag=1; echo $size; echo $i; fi; done && [[ "$flag" == 0 ]] && echo 'all the logs are less than 2.1M' || echo 'error -- some logs exceed limit' + +-- 恢复guc参数 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_rotation_size" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state" > /dev/null 2>&1 + diff --git a/src/test/regress/input/hw_audit_space.source b/src/test/regress/input/hw_audit_space.source new file mode 100644 index 000000000..3b9a20724 --- /dev/null +++ b/src/test/regress/input/hw_audit_space.source @@ -0,0 +1,35 @@ +-- 修改guc参数 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_resource_policy = 1" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_space_limit = 512MB" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state =1" > /dev/null 2>&1 + +-- 获取pg_audit 中最早建立的adt文件编号 +\! echo $(echo $(ls @abs_srcdir@/tmp_check/datanode1/pg_audit -tr | head -2 | xargs) | tr -cd "[0-9]") + +-- 生成 dml_action 审计日志 +CREATE TABLE T_TEST_SPACE +( + COL1 int4 DEFAULT 1, + COL2 VARCHAR(1024) DEFAULT 'test_space'); +CREATE OR REPLACE PROCEDURE TRANSACTION_TEST_SPACE() +AS +BEGIN +FOR i IN 0..3500000 LOOP +INSERT INTO T_TEST_SPACE(COL1, COL2) VALUES (i, 'a'); +COMMIT; +END LOOP; +END; +/ +CALL TRANSACTION_TEST_SPACE(); + +-- 获取 pg_audit 中最早建立的adt文件编号 与原编号 0_adt 比较 观察是否删除了最早的日志文件 +\! [[ $(echo $(ls @abs_srcdir@/tmp_check/datanode1/pg_audit -tr | head -2 | xargs) | tr -cd "[0-9]") > 1 ]] && echo 'delete oldest files' || echo 'fail to delete oldest files' + +-- 查看pg_audit 文件总大小 是否超过设置的512M +\! [[ $(du -h --exclude=done @abs_srcdir@/tmp_check/datanode1/pg_audit | grep -oP '\d*M') > '530M' ]] && echo 'size of total logs exceeds upper limit' || echo 'size of total logs not exceeds upper limit' + +-- 恢复guc参数 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_resource_policy" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_space_limit" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state" > /dev/null 2>&1 + diff --git a/src/test/regress/input/hw_audit_toughness.source b/src/test/regress/input/hw_audit_toughness.source new file mode 100644 index 000000000..b3cf90b48 --- /dev/null +++ b/src/test/regress/input/hw_audit_toughness.source @@ -0,0 +1,34 @@ +-- 清理历史审计日志 +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + +create user audit_fault_user1 with password 'Gauss@123'; +select pg_sleep(1); +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_user' and object_name = 'audit_fault_user1'; + +-- 故障1:覆盖 +\! cur_audit_file=`find @abs_srcdir@/tmp_check/datanode1/pg_audit -name "*_adt" | sort -r | head -1` && echo "fault 1: overwritten xxxxx" > $cur_audit_file +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_user' and object_name = 'audit_fault_user1'; + +select pg_sleep(1); + +create user audit_fault_user2 with password 'Gauss@123'; +select pg_sleep(1); +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_user' and object_name = 'audit_fault_user2'; + +-- 故障2: 追加 +\! cur_audit_file=`find @abs_srcdir@/tmp_check/datanode1/pg_audit -name "*_adt" | sort -r | head -1` && echo "fault 2: appending yyyyy" >> $cur_audit_file + +create user audit_fault_user3 with password 'Gauss@123'; +select pg_sleep(1); +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_user' and object_name = 'audit_fault_user3'; + +create user audit_fault_user4 with password 'Gauss@123'; +select pg_sleep(1); +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_user' and object_name = 'audit_fault_user4'; + +-- 恢复 +drop user audit_fault_user1; +drop user audit_fault_user2; +drop user audit_fault_user3; +drop user audit_fault_user4; +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); diff --git a/src/test/regress/input/hw_cstore_load2.source b/src/test/regress/input/hw_cstore_load2.source index 4c5a499e1..19c71ad3e 100644 --- a/src/test/regress/input/hw_cstore_load2.source +++ b/src/test/regress/input/hw_cstore_load2.source @@ -403,14 +403,14 @@ DROP TABLE LINEITEM_PART_DATE; -- -- SET DATESTYLE='ISO, MDY'; -CREATE TABLE DTS2017011006555_TBL +CREATE TABLE TESTTABLE_TBL ( C_INT INT, c_timestamp timestamp, c_timestamp_w timestamp without time zone ) distribute by hash(C_INT); -COPY DTS2017011006555_TBL FROM STDIN with (delimiter ',',timestamp_format 'yy/mm/dd/hh24/mi/ss'); +COPY TESTTABLE_TBL FROM STDIN with (delimiter ',',timestamp_format 'yy/mm/dd/hh24/mi/ss'); 02,00/01/01/00/00/00,00/01/01/00/00/00 \. -SELECT * FROM DTS2017011006555_TBL; -DROP TABLE DTS2017011006555_TBL; +SELECT * FROM TESTTABLE_TBL; +DROP TABLE TESTTABLE_TBL; diff --git a/src/test/regress/input/hw_partition_merge1.source b/src/test/regress/input/hw_partition_merge1.source index dfc26c802..3130bc237 100644 --- a/src/test/regress/input/hw_partition_merge1.source +++ b/src/test/regress/input/hw_partition_merge1.source @@ -96,7 +96,7 @@ drop table test_merge_table_tablespace; -- -- -CREATE TABLE DTS2016122106940_tbl +CREATE TABLE TESTTABLE_tbl ( id int, info varchar(200) @@ -108,20 +108,20 @@ partition p1 values less than(100000), partition p2 values less than(300000), partition p3 values less than(maxvalue) ); -CREATE INDEX idx_dts2016122106940_tbl on DTS2016122106940_tbl(id) local ( +CREATE INDEX idx_testtable_tbl on TESTTABLE_tbl(id) local ( partition idx_p1, partition idx_p2 tablespace partition_merge_ts1, partition idx_p3 tablespace partition_merge_ts2 ); -select index_name,partition_name,def_tablespace_name from dba_ind_partitions where index_name ='idx_dts2016122106940_tbl' order by partition_name; -alter table DTS2016122106940_tbl merge partitions p2,p3 into partition p3; -select index_name,partition_name,def_tablespace_name from dba_ind_partitions where index_name ='idx_dts2016122106940_tbl' order by partition_name; +select index_name,partition_name,def_tablespace_name from dba_ind_partitions where index_name ='idx_testtable_tbl' order by partition_name; +alter table TESTTABLE_tbl merge partitions p2,p3 into partition p3; +select index_name,partition_name,def_tablespace_name from dba_ind_partitions where index_name ='idx_testtable_tbl' order by partition_name; START TRANSACTION; -alter table DTS2016122106940_tbl merge partitions p1,p3 into partition p3; -select index_name,partition_name,def_tablespace_name from dba_ind_partitions where index_name ='idx_dts2016122106940_tbl' order by partition_name; +alter table TESTTABLE_tbl merge partitions p1,p3 into partition p3; +select index_name,partition_name,def_tablespace_name from dba_ind_partitions where index_name ='idx_testtable_tbl' order by partition_name; ROLLBACK; -DROP INDEX idx_dts2016122106940_tbl; -DROP TABLE DTS2016122106940_tbl; +DROP INDEX idx_testtable_tbl; +DROP TABLE TESTTABLE_tbl; drop tablespace partition_merge_ts0; drop tablespace partition_merge_ts1; diff --git a/src/test/regress/input/hw_subpartition_tablespace.source b/src/test/regress/input/hw_subpartition_tablespace.source new file mode 100644 index 000000000..afbd21d77 --- /dev/null +++ b/src/test/regress/input/hw_subpartition_tablespace.source @@ -0,0 +1,896 @@ +DROP SCHEMA hw_subpartition_tablespace CASCADE; +CREATE SCHEMA hw_subpartition_tablespace; +SET CURRENT_SCHEMA TO hw_subpartition_tablespace; + +--prepare +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts1' +\! mkdir '@testtablespace@/hw_subpartition_tablespace_ts1' +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts2' +\! mkdir '@testtablespace@/hw_subpartition_tablespace_ts2' +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts3' +\! mkdir '@testtablespace@/hw_subpartition_tablespace_ts3' +CREATE TABLESPACE hw_subpartition_tablespace_ts1 LOCATION '@testtablespace@/hw_subpartition_tablespace_ts1'; +CREATE TABLESPACE hw_subpartition_tablespace_ts2 LOCATION '@testtablespace@/hw_subpartition_tablespace_ts2'; +CREATE TABLESPACE hw_subpartition_tablespace_ts3 LOCATION '@testtablespace@/hw_subpartition_tablespace_ts3'; + +-- +----test create subpartition with tablespace---- +-- +--range-range +CREATE TABLE t_range_range1(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_range1'); +DROP TABLE t_range_range1; + +CREATE TABLE t_range_range2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_range2'); +DROP TABLE t_range_range2; + +--range-list +CREATE TABLE t_range_list1(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_list1'); +DROP TABLE t_range_list1; + +CREATE TABLE t_range_list2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_list2'); +DROP TABLE t_range_list2; + +--range-hash +CREATE TABLE t_range_hash1(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_hash1'); +DROP TABLE t_range_hash1; + +CREATE TABLE t_range_hash2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_hash2'); +DROP TABLE t_range_hash2; + +--list-range +CREATE TABLE t_list_range1(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_range1'); +DROP TABLE t_list_range1; + +CREATE TABLE t_list_range2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_range2'); +DROP TABLE t_list_range2; + +--list-list +CREATE TABLE t_list_list1(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_list1'); +DROP TABLE t_list_list1; + +CREATE TABLE t_list_list2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_list2'); +DROP TABLE t_list_list2; + +--list-hash +CREATE TABLE t_list_hash1(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_hash1'); +DROP TABLE t_list_hash1; + +CREATE TABLE t_list_hash2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_hash2'); +DROP TABLE t_list_hash2; + +--hash-range +CREATE TABLE t_hash_range1(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_range1'); +DROP TABLE t_hash_range1; + +CREATE TABLE t_hash_range2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_range2'); +DROP TABLE t_hash_range2; + +--hash-list +CREATE TABLE t_hash_list1(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_list1'); +DROP TABLE t_hash_list1; + +CREATE TABLE t_hash_list2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_list2'); +DROP TABLE t_hash_list2; + +--hash-hash +CREATE TABLE t_hash_hash1(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_hash1'); +DROP TABLE t_hash_hash1; + +CREATE TABLE t_hash_hash2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_hash2'); +DROP TABLE t_hash_hash2; + +-- +----test add partition with tablespace---- +-- +--since the add subpartition define use the same code, we only test different partition type: range/list +--range-list +CREATE TABLE t_range_list3(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ) +); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1; +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_range_list3'); +DROP TABLE t_range_list3; + + +CREATE TABLE t_range_list4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ) +); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE6 VALUES LESS THAN (30); +SELECT pg_get_tabledef('t_range_list4'); +DROP TABLE t_range_list4; + +--list-hash +CREATE TABLE t_list_hash3(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ) +); + +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ); +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1; +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25); +SELECT pg_get_tabledef('t_list_hash3'); +DROP TABLE t_list_hash3; + +CREATE TABLE t_list_hash4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ) +); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST6 VALUES (26,27,28,29,30); +SELECT pg_get_tabledef('t_list_hash4'); +DROP TABLE t_list_hash4; + +-- +----test add subpartition with tablespace---- +-- +--list-range +CREATE TABLE t_list_range3(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_4 VALUES LESS THAN (20); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_4 VALUES LESS THAN (20); +SELECT pg_get_tabledef('t_list_range3'); +DROP TABLE t_list_range3; + +CREATE TABLE t_list_range4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts2; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_5 VALUES LESS THAN (25); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST4 ADD SUBPARTITION P_LIST4_5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_list_range4'); +DROP TABLE t_list_range4; + +--hash-list +CREATE TABLE t_hash_list3(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20); +SELECT pg_get_tabledef('t_hash_list3'); +DROP TABLE t_hash_list3; + +CREATE TABLE t_hash_list4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_5 VALUES(21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_5 VALUES(21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts2; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_5 VALUES(21,22,23,24,25); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH4 ADD SUBPARTITION P_HASH4_5 VALUES(21,22,23,24,25); +SELECT pg_get_tabledef('t_hash_list4'); +DROP TABLE t_hash_list4; + +--finish +drop tablespace hw_subpartition_tablespace_ts1; +drop tablespace hw_subpartition_tablespace_ts2; +drop tablespace hw_subpartition_tablespace_ts3; +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts1' +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts2' +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts3' + +DROP SCHEMA hw_subpartition_tablespace CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/input/hw_subpartition_vacuum_partition.source b/src/test/regress/input/hw_subpartition_vacuum_partition.source new file mode 100644 index 000000000..6b18d356d --- /dev/null +++ b/src/test/regress/input/hw_subpartition_vacuum_partition.source @@ -0,0 +1,162 @@ +DROP SCHEMA hw_subpartition_vacuum_partition CASCADE; +CREATE SCHEMA hw_subpartition_vacuum_partition; +SET CURRENT_SCHEMA TO hw_subpartition_vacuum_partition; + +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "autovacuum = off" >/dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_defer_calculate_snapshot = off" >/dev/null 2>&1 + +CREATE TABLE temp1(c1 int, c2 int); + +-- +--1. test for basic function +-- +CREATE TABLE range_list1 +( + month_code VARCHAR2 (30), + dept_code VARCHAR2 (30), + user_no VARCHAR2 (30), + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN('201903') + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN('201910') + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +--vacuum, success +VACUUM range_list1 PARTITION (p_201901); +VACUUM range_list1 SUBPARTITION (p_201902_a); + +--vacuum full, success +VACUUM FULL range_list1 PARTITION (p_201901); +VACUUM FULL range_list1 SUBPARTITION (p_201902_a); + +--vacuum full compact, fail +VACUUM FULL COMPACT range_list1 PARTITION (p_201901); +VACUUM FULL COMPACT range_list1 SUBPARTITION (p_201902_a); + +--vacuum freeze, success +VACUUM FREEZE range_list1 PARTITION (p_201901); +VACUUM FREEZE range_list1 SUBPARTITION (p_201902_a); + +--vacuum verbose, success +VACUUM VERBOSE range_list1 PARTITION (p_201901); +VACUUM VERBOSE range_list1 SUBPARTITION (p_201902_a); + +--vacuum option all +VACUUM (FULL, VERBOSE, FREEZE) range_list1 PARTITION (p_201901); +VACUUM (FULL, VERBOSE, FREEZE) range_list1 SUBPARTITION (p_201902_a); + +-- +--2. test the actual work +-- +CREATE TABLE range_list_sales1 +( + product_id INT4, + customer_id INT4, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (1200) + ( + SUBPARTITION customer3_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales1_idx1 ON range_list_sales1(product_id, customer_id) GLOBAL; +CREATE INDEX range_list_sales1_idx2 ON range_list_sales1(channel_id) GLOBAL; +CREATE INDEX range_list_sales1_idx3 ON range_list_sales1(customer_id) LOCAL; +CREATE INDEX range_list_sales1_idx4 ON range_list_sales1(time_id, type_id) LOCAL; + +SELECT pg_relation_size('range_list_sales1'); + +--delete & insert +DELETE FROM range_list_sales1; +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); + +SELECT pg_relation_size('range_list_sales1'); + +--vacuum full partition +INSERT INTO temp1 VALUES(1,1); +VACUUM FULL range_list_sales1 PARTITION (customer1); +VACUUM FULL range_list_sales1 PARTITION (customer2); +VACUUM FULL range_list_sales1 PARTITION (customer3); +SELECT pg_relation_size('range_list_sales1'); + +--delete & insert +DELETE FROM range_list_sales1; +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); + +SELECT pg_relation_size('range_list_sales1'); + +--vacuum full subpartition +INSERT INTO temp1 VALUES(1,1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel2); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel3); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel4); +VACUUM FULL range_list_sales1 SUBPARTITION (customer2_channel1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer2_channel2); +VACUUM FULL range_list_sales1 SUBPARTITION (customer3_channel1); +SELECT pg_relation_size('range_list_sales1'); + +--check index is ok +SELECT /*+ tablescan(range_list_sales1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +--finish +DROP TABLE temp1; +DROP TABLE range_list1; +DROP TABLE range_list_sales1; + +DROP SCHEMA hw_subpartition_vacuum_partition CASCADE; +RESET CURRENT_SCHEMA; + +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "autovacuum" >/dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_defer_calculate_snapshot" >/dev/null 2>&1 diff --git a/src/test/regress/input/hw_user.source b/src/test/regress/input/hw_user.source index c35d38265..32410840f 100644 --- a/src/test/regress/input/hw_user.source +++ b/src/test/regress/input/hw_user.source @@ -380,9 +380,6 @@ drop user user02; drop user current_user01; drop user current_user02; -/*----------------------------------------------------------------- - test alter user with pguser (DTS2017031606532) ------------------------------------------------------------------*/ create or replace function isMD5(pwd text) returns bool as $$ begin return left(pwd, 3) = 'md5'; end; $$ language plpgsql; --alter with admin diff --git a/src/test/regress/input/hw_user_alter_pguser.source b/src/test/regress/input/hw_user_alter_pguser.source index 15dbe93f4..8f15e4165 100644 --- a/src/test/regress/input/hw_user_alter_pguser.source +++ b/src/test/regress/input/hw_user_alter_pguser.source @@ -1,6 +1,3 @@ -/*----------------------------------------------------------------- - test alter user with pguser (DTS2017031606532) ------------------------------------------------------------------*/ create or replace function ismd5(pwd text) returns bool as $$ begin return left(pwd, 3) = 'md5'; end; $$ language plpgsql; --alter with admin diff --git a/src/test/regress/input/hw_user_audit.source b/src/test/regress/input/hw_user_audit.source index ea4e3c5eb..2ce379843 100644 --- a/src/test/regress/input/hw_user_audit.source +++ b/src/test/regress/input/hw_user_audit.source @@ -69,11 +69,11 @@ select * from pg_delete_audit('1111-1-1','2222-2-2'); \! @abs_bindir@/gs_guc reload -Z coordinator -D @abs_srcdir@/tmp_check/coordinator1/ -c "audit_dml_state=1" > /dev/null 2>&1 \! @abs_bindir@/gs_guc reload -Z coordinator -D @abs_srcdir@/tmp_check/coordinator1/ -c "audit_dml_state_select=1" > /dev/null 2>&1 select pg_sleep(1); -\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "show audit_dml_state;show audit_dml_state_select;create table DTS2018091711491_t1 (id int, num int) distribute by hash(id);" -\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "insert into DTS2018091711491_t1 values (1,1);" -\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select * from DTS2018091711491_t1 where id=1;" -\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop table DTS2018091711491_t1;" -\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select type,result,object_name,detail_info from pg_query_audit('0007-1-1','9999-12-31') where OBJECT_name ='dts2018091711491_t1' order by time;"; +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "show audit_dml_state;show audit_dml_state_select;create table TESTTABLE_t1 (id int, num int) distribute by hash(id);" +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "insert into TESTTABLE_t1 values (1,1);" +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select * from TESTTABLE_t1 where id=1;" +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop table TESTTABLE_t1;" +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select type,result,object_name,detail_info from pg_query_audit('0007-1-1','9999-12-31') where OBJECT_name ='testtable_t1' order by time;"; \! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select * from pg_delete_audit('0007-1-1','9999-12-31');"; -- llt for pbe and lightcn diff --git a/src/test/regress/input/ledger_table_case.source b/src/test/regress/input/ledger_table_case.source index a0755e609..91e830693 100644 --- a/src/test/regress/input/ledger_table_case.source +++ b/src/test/regress/input/ledger_table_case.source @@ -391,7 +391,7 @@ CREATE SCHEMA blockchain; CREATE USER test_normal_user PASSWORD 'Gauss_234'; ALTER SCHEMA blockchain OWNER TO test_normal_user; DROP USER test_normal_user CASCADE; -ALTER SCHEMA blockchain OWNER TO @login_user@; +ALTER SCHEMA blockchain OWNER TO "@login_user@"; DROP USER test_normal_user CASCADE; diff --git a/src/test/regress/input/predefined_roles.source b/src/test/regress/input/predefined_roles.source index b1d1df853..e23cc6de3 100644 --- a/src/test/regress/input/predefined_roles.source +++ b/src/test/regress/input/predefined_roles.source @@ -202,7 +202,7 @@ SELECT kill_snapshot(); SELECT pg_create_physical_replication_slot('privs_test_physlot', true); SELECT pg_create_logical_replication_slot('privs_test_slot', 'mppdb_decoding'); -SELECT pg_create_physical_replication_slot_extern('uuid', false, 'obs.cnnorth-7.ulanqab.huawei.com;dyk;19D772JBCACXX3KWS51D;********;caoshufeng_uuid/dn1'); +SELECT pg_create_physical_replication_slot_extern('uuid', false, 'obs.cnnorth-7.ulanqab.huawei.com;dyk;19D772JBCACXX3KWS51D;********;caoshufeng_uuid/dn1', false); SELECT pg_replication_slot_advance('privs_test_slot', NULL); diff --git a/src/test/regress/input/pri_alter_any_table.source b/src/test/regress/input/pri_alter_any_table.source new file mode 100644 index 000000000..e222306a2 --- /dev/null +++ b/src/test/regress/input/pri_alter_any_table.source @@ -0,0 +1,290 @@ +CREATE USER user1 PASSWORD 'Gauss@1234'; +CREATE USER test_alter_any_table_role PASSWORD 'Gauss@1234'; +GRANT alter any table to test_alter_any_table_role; + +CREATE TABLESPACE pri_al_tsp LOCATION '@testtablespace@/pri_tsp'; + +CREATE SCHEMA pri_al_schema_test; + +CREATE SCHEMA pri_al_schema; +set search_path=pri_al_schema; + +CREATE table pri_al_schema.tb_pri (id int, name VARCHAR(10)); + --create table +CREATE TABLE pri_al_schema.TBL_DOMAIN_PRI +( + IDOMAINID NUMBER(10) NOT NULL, + SDOMAINNAME VARCHAR2(30) NOT NULL, + b int +); + +CREATE TABLE pri_al_schema.pri_al_test_hash (a int, b int); + +CREATE TYPE pri_al_schema.pri_al_person_type1 AS (id int, name text); +CREATE TYPE pri_al_schema.pri_al_person_type2 AS (id int, name text); +CREATE TABLE pri_al_schema.pri_al_persons OF pri_al_schema.pri_al_person_type1; +CREATE TABLE IF NOT EXISTS pri_al_schema.pri_al_persons OF pri_al_schema.pri_al_persons; +CREATE TABLE pri_al_schema.pri_al_stuff (id int); + + +--trigger +create table pri_al_trigtest (i serial primary key); +create function pri_al_schema.pri_al_trigtest() returns trigger as $$ +begin + raise notice '% % % %', TG_RELNAME, TG_OP, TG_WHEN, TG_LEVEL; + return new; +end;$$ language plpgsql; +create trigger pri_al_trigtest_b_row_tg before insert or update or delete on pri_al_trigtest +for each row execute procedure pri_al_trigtest(); +CREATE SEQUENCE pri_al_schema.serial START 101; +CREATE TABLE pri_al_schema.T1(C1 bigint default nextval('serial')); + + --alter +SET ROLE test_alter_any_table_role PASSWORD 'Gauss@1234'; +CREATE UNIQUE INDEX pri_al_schema.ds_ship_mode_t1_index1 ON pri_al_schema.T1(C1);--应该报权限不足 +ALTER SEQUENCE pri_al_schema.serial OWNED BY T1.C1; +ALTER TABLE pri_al_schema.pri_al_persons OF pri_al_schema.pri_al_person_type2; +ALTER TABLE pri_al_schema.pri_al_persons INHERIT pri_al_schema.pri_al_stuff; +ALTER TABLE pri_al_schema.pri_al_persons NOT OF; + +--trigger +alter table pri_al_schema.pri_al_trigtest disable trigger pri_al_trigtest_b_row_tg; +alter table pri_al_schema.pri_al_trigtest enable trigger pri_al_trigtest_b_row_tg; + +alter table pri_al_schema.pri_al_test_hash DISABLE ROW LEVEL SECURITY; +ALTER TABLE pri_al_schema.pri_al_test_hash REPLICA IDENTITY FULL; +ALTER TABLE pri_al_schema.pri_al_test_hash alter COLUMN b SET STATISTICS 1000; + +ALTER TABLE pri_al_schema.tb_pri add column age int; +ALTER TABLE pri_al_schema.tb_pri modify name VARCHAR(60); +ALTER TABLE pri_al_schema.tb_pri ALTER COLUMN name TYPE text; +ALTER TABLE pri_al_schema.tb_pri ALTER name SET STORAGE EXTERNAL; +ALTER TABLE pri_al_schema.tb_pri add check (age>10); +ALTER TABLE pri_al_schema.tb_pri alter name set not null; +ALTER TABLE pri_al_schema.tb_pri ALTER COLUMN name DROP NOT NULL; +ALTER TABLE pri_al_schema.tb_pri rename age to age_1; +ALTER TABLE pri_al_schema.tb_pri drop column age_1; + +ALTER TABLE pri_al_schema.tb_pri SET TABLESPACE pri_al_tsp; + +ALTER TABLE pri_al_schema.tb_pri SET SCHEMA pri_al_schema_test; --error must be owner of relation tb_pri +ALTER TABLE pri_al_schema.tb_pri owner to user1; --error must be owner of relation tb_pri + +ALTER TABLE pri_al_schema.tb_pri RENAME TO test_table; --failed + +ALTER TABLE pri_al_schema.tb_pri SET WITHOUT CLUSTER; --success + +ALTER TABLE pri_al_schema.tb_pri SET NOCOMPRESS;--success + +ALTER TABLE pri_al_schema.tb_pri SET WITHOUT OIDS;-- Un-support feature + +ALTER TABLE pri_al_schema.pri_al_test_hash add column c serial; --not supported + +ALTER TABLE pri_al_schema.pri_al_test_hash add column d int default 10; --success + +ALTER TABLE pri_al_schema.TBL_DOMAIN_PRI ADD CONSTRAINT b_le_20 CHECK (b <= 20) NOT VALID; + +ALTER TABLE pri_al_schema.TBL_DOMAIN_PRI ADD CONSTRAINT PK_TBL_DOMAIN PRIMARY KEY (IDOMAINID) USING INDEX; + +ALTER TABLE pri_al_schema.TBL_DOMAIN_PRI ADD CONSTRAINT IX_TBL_DOMAIN UNIQUE (SDOMAINNAME) USING INDEX; + +reset role; +grant create any index to test_alter_any_table_role; +SET ROLE test_alter_any_table_role PASSWORD 'Gauss@1234'; +ALTER TABLE pri_al_schema.TBL_DOMAIN_PRI ADD CONSTRAINT PK_TBL_DOMAIN PRIMARY KEY (IDOMAINID) USING INDEX; + +ALTER TABLE pri_al_schema.TBL_DOMAIN_PRI ADD CONSTRAINT IX_TBL_DOMAIN UNIQUE (SDOMAINNAME) USING INDEX; +\d pri_al_schema.TBL_DOMAIN_PRI + +ALTER TABLE pri_al_schema.TBL_DOMAIN_PRI RENAME CONSTRAINT PK_TBL_DOMAIN TO MY_PK_TBL_DOMAIN; + +ALTER TABLE pri_al_schema.TBL_DOMAIN_PRI DROP CONSTRAINT MY_PK_TBL_DOMAIN; + +reset role; +revoke create any index from test_alter_any_table_role; +SET ROLE test_alter_any_table_role PASSWORD 'Gauss@1234'; +\d pri_al_schema.TBL_DOMAIN_PRI + +ALTER TABLE pri_al_schema.tb_pri DISTRIBUTE BY HASH(id); + +DROP TABLE pri_al_schema.tb_pri; --应该失败 + + +reset role; +SET ROLE user1 PASSWORD 'Gauss@1234'; +create table user1.pri_al_storage_para_t1 (a int4, b text) +WITH +( + fillfactor =85, + autovacuum_enabled = ON, + toast.autovacuum_enabled = ON, + autovacuum_vacuum_threshold = 100, + toast.autovacuum_vacuum_threshold = 100, + autovacuum_vacuum_scale_factor = 10, + toast.autovacuum_vacuum_scale_factor = 10, + autovacuum_analyze_threshold = 8, + autovacuum_analyze_scale_factor = 9, +-- autovacuum_vacuum_cost_delay: Valid values are between "0" and "100". + autovacuum_vacuum_cost_delay = 90, + toast.autovacuum_vacuum_cost_delay = 92, +-- autovacuum_vacuum_cost_limit: Valid values are between "1" and "10000". + autovacuum_vacuum_cost_limit = 567, + toast.autovacuum_vacuum_cost_limit = 789, + autovacuum_freeze_min_age = 5000, + toast.autovacuum_freeze_min_age = 6000, +-- autovacuum_freeze_max_age: Valid values are between "100000000" and "2000000000". + autovacuum_freeze_max_age = 300000000, + toast.autovacuum_freeze_max_age = 250000000, + autovacuum_freeze_table_age = 170000000, + toast.autovacuum_freeze_table_age = 180000000 +) +partition by range (a) +( + partition pri_al_storage_para_t1_p1 values less than (10), + partition pri_al_storage_para_t1_p2 values less than (20), + partition pri_al_storage_para_t1_p3 values less than (100) +); +SET ROLE test_alter_any_table_role PASSWORD 'Gauss@1234'; +alter table user1.pri_al_storage_para_t1 add partition p4_rtest_t1 values less than (200); + +alter table user1.pri_al_storage_para_t1 +RESET +( + fillfactor, + autovacuum_enabled, + autovacuum_vacuum_threshold, + autovacuum_vacuum_scale_factor, + autovacuum_analyze_threshold, + autovacuum_analyze_scale_factor, + autovacuum_vacuum_cost_delay, + autovacuum_vacuum_cost_limit, + autovacuum_freeze_min_age, + autovacuum_freeze_max_age, + autovacuum_freeze_table_age +); + +-- step 2.1: alter table: storage parameters +alter table user1.pri_al_storage_para_t1 +SET +( + fillfactor =86, + autovacuum_enabled = OFF, + toast.autovacuum_enabled = ON, + autovacuum_vacuum_threshold = 1000, + toast.autovacuum_vacuum_threshold = 1000, + --"0.000000" and "100.000000" + autovacuum_vacuum_scale_factor = 15, + toast.autovacuum_vacuum_scale_factor = 89, + autovacuum_analyze_threshold = 800, + --"0.000000" and "100.000000" + autovacuum_analyze_scale_factor = 55, +-- autovacuum_vacuum_cost_delay: Valid values are between "0" and "100". + autovacuum_vacuum_cost_delay = 99, + toast.autovacuum_vacuum_cost_delay = 98, +-- autovacuum_vacuum_cost_limit: Valid values are between "1" and "10000". + autovacuum_vacuum_cost_limit = 555, + toast.autovacuum_vacuum_cost_limit = 798, + autovacuum_freeze_min_age = 6000, + toast.autovacuum_freeze_min_age = 4000, +-- autovacuum_freeze_max_age: Valid values are between "100000000" and "2000000000". + autovacuum_freeze_max_age = 400000000, + toast.autovacuum_freeze_max_age = 280000000, + autovacuum_freeze_table_age = 150000000, + toast.autovacuum_freeze_table_age = 160000000 +); + +drop table user1.pri_al_storage_para_t1; --应该失败 + +reset role; +REVOKE alter any table from test_alter_any_table_role; +SET ROLE test_alter_any_table_role PASSWORD 'Gauss@1234'; + +ALTER TABLE pri_al_schema.pri_al_persons OF pri_al_schema.pri_al_person_type2; +ALTER TABLE pri_al_schema.pri_al_persons INHERIT pri_al_schema.pri_al_stuff; +ALTER TABLE pri_al_schema.pri_al_persons NOT OF; + +--trigger +alter table pri_al_schema.pri_al_trigtest disable trigger pri_al_trigtest_b_row_tg; +alter table pri_al_schema.pri_al_trigtest enable trigger pri_al_trigtest_b_row_tg; + +alter table pri_al_schema.pri_al_test_hash DISABLE ROW LEVEL SECURITY; +ALTER TABLE pri_al_schema.pri_al_test_hash REPLICA IDENTITY FULL; + +ALTER TABLE pri_al_schema.tb_pri add column age int; +ALTER TABLE pri_al_schema.tb_pri modify name VARCHAR(60); + +reset role; +set search_path=pri_al_schema,user1; + +drop table pri_al_persons,pri_al_stuff,pri_al_test_hash,pri_al_trigtest,t1,tb_pri,tbl_domain_pri; +drop table user1.pri_al_storage_para_t1; +reset role; +--view +CREATE USER user2 password 'Gauss@1234'; +GRANT alter any table to test_alter_any_table_role; +set search_path=pri_al_schema; + +CREATE TABLE pri_al_schema.customer ( + cid int primary key, + name text not null, + tel text, + passwd text +); +INSERT INTO pri_al_schema.customer + VALUES (101, 'regress_alice', '+81-12-3456-7890', 'passwd123'), + (102, 'regress_bob', '+01-234-567-8901', 'beafsteak'), + (103, 'regress_eve', '+49-8765-43210', 'hamburger'); + --create view +CREATE VIEW pri_al_schema.my_property_normal AS + SELECT * FROM pri_al_schema.customer WHERE name = current_user; + +--create materialized view +create table pri_al_schema.pri_al_t1(c1 int,c2 int); +insert into pri_al_schema.pri_al_t1 values(1,1),(2,2); +create incremental materialized view pri_al_schema.mv1 as select * from pri_al_schema.pri_al_t1; + +SET ROLE test_alter_any_table_role PASSWORD 'Gauss@1234'; + +ALTER VIEW pri_al_schema.my_property_normal SET (security_barrier=true); +alter view pri_al_schema.my_property_normal rename to pri_al_property_normal; --failed +drop view pri_al_schema.my_property_normal; + +--materialized view +--alter user mat_pri1 poladmin; +alter materialized view pri_al_schema.mv1 enable row level security; -- no support ALTER MATERIALIZED VIEW is not yet supported. +alter materialized view pri_al_schema.mv1 set SCHEMA user2; + + +create table pri_al_schema.test_create_pri (id int); + +reset role; +--收回权限 +REVOKE alter any table from test_alter_any_table_role; +SET ROLE test_alter_any_table_role PASSWORD 'Gauss@1234'; + +ALTER VIEW pri_al_schema.my_property_normal SET (security_barrier=true); +alter view pri_al_schema.my_property_normal rename to pri_al_property_normal; +drop view pri_al_schema.pri_al_property_normal; + +--materialized view +--alter user mat_pri1 poladmin; +alter materialized view pri_al_schema.mv1 enable row level security; + +reset role; +drop type pri_al_schema.pri_al_person_type1; +drop type pri_al_schema.pri_al_person_type2; +drop function pri_al_schema.pri_al_trigtest(); +DROP materialized view pri_al_schema.mv1; +drop sequence pri_al_schema.serial; +drop view pri_al_schema.my_property_normal; +drop table pri_al_schema.customer; +drop table pri_al_schema.pri_al_t1; +DROP SCHEMA pri_al_schema CASCADE; +DROP SCHEMA pri_al_schema_test CASCADE; + +DROP TABLESPACE pri_al_tsp; +\! rm -rf @testtablespace@/pri_tsp +DROP USER user1 cascade; +DROP USER test_alter_any_table_role cascade; +DROP USER user2 cascade; + \ No newline at end of file diff --git a/src/test/regress/input/pri_create_any_function.source b/src/test/regress/input/pri_create_any_function.source new file mode 100644 index 000000000..6b2c57edd --- /dev/null +++ b/src/test/regress/input/pri_create_any_function.source @@ -0,0 +1,123 @@ +CREATE USER test_create_any_function_role PASSWORD 'Gauss@1234'; +GRANT create any function to test_create_any_function_role; +CREATE TABLESPACE pri_create_fun_tsp LOCATION '@testtablespace@/pri_create_fun_tsp'; +CREATE SCHEMA pri_fun_schema; +set search_path=pri_fun_schema; + +SET ROLE test_create_any_function_role PASSWORD 'Gauss@1234'; +--定义函数为SQL查询。 +CREATE FUNCTION pri_fun_schema.pri_func_add_sql(integer, integer) RETURNS integer + AS 'select $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT; +--利用参数名用 PL/pgSQL 自增一个整数。 +CREATE OR REPLACE FUNCTION pri_fun_schema.pri_func_increment_plsql(i integer) RETURNS integer AS $$ + BEGIN + RETURN i + 1; + END; +$$ LANGUAGE plpgsql; +--返回RECORD类型 +CREATE OR REPLACE FUNCTION pri_fun_schema.pri_compute(i int, out result_1 bigint, out result_2 bigint) +returns SETOF RECORD +as $$ +begin + result_1 = i + 1; + result_2 = i * 10; +return next; +end; +$$language plpgsql; +--返回一个包含多个输出参数的记录。 +CREATE FUNCTION pri_fun_schema.pri_func_dup_sql(in int, out f1 int, out f2 text) + AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ + LANGUAGE SQL; + +--计算两个整数的和,并返回结果。若果输入为null,则返回null。 +CREATE FUNCTION pri_fun_schema.pri_func_add_sql2(num1 integer, num2 integer) RETURN integer +AS +BEGIN +RETURN num1 + num2; +END; +/ +--创建package属性的重载函数 +create or replace function pri_fun_schema.pri_get_sal(NAME VARCHAR2) RETURN NUMBER package +IS + BEGIN + RETURN 1; + END; + / + +create or replace function pri_fun_schema.pri_get_sal(NAME int) RETURN NUMBER package +IS + BEGIN + RETURN 1; + END; + / +select pri_fun_schema.pri_func_add_sql(1,2); +select pri_fun_schema.pri_func_increment_plsql(1); +select pri_fun_schema.pri_func_dup_sql(1); +select pri_fun_schema.pri_func_add_sql2(1,2); +select pri_fun_schema.pri_compute(1); +select pri_fun_schema.pri_get_sal('name'); +select pri_fun_schema.pri_get_sal(1); +--PROCEDURE +CREATE OR REPLACE PROCEDURE pri_fun_schema.pri_prc_add +( + param1 IN INTEGER, + param2 IN OUT INTEGER +) +AS +BEGIN + param2:= param1 + param2; +END; +/ + +CREATE OR REPLACE PROCEDURE pri_fun_schema.pri_autonomous(out res int) AS +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + res := 55; +END; +/ +select pri_prc_add(1,2); +select pri_autonomous(); +reset role; + +CREATE TABLE pri_fun_schema.creditcard_info (id_number int, name text, credit_card varchar(19)); +SET ROLE test_create_any_function_role PASSWORD 'Gauss@1234'; +CREATE OR REPLACE PROCEDURE pri_fun_schema.pri_autonomous_1() AS +BEGIN + insert into pri_fun_schema.creditcard_info values(66, 66,66); + select * from pri_fun_schema.creditcard_info; + commit; + insert into pri_fun_schema.creditcard_info values(77, 77,77); + rollback; +END; +/ + +call pri_fun_schema.pri_autonomous_1(); + +-- CREATE TABLE in procedure +create or replace procedure pri_fun_schema.pri_test_proc_create(i in integer) +as +begin + create table pri_fun_schema.pri_t11(id int) tablespace pri_create_fun_tsp; +end; +/ +select pri_fun_schema.pri_test_proc_create(1); + +reset role; + +--删除函数。 +DROP FUNCTION pri_fun_schema.pri_func_add_sql(integer, integer); +DROP FUNCTION pri_fun_schema.pri_func_increment_plsql(integer); +DROP FUNCTION pri_fun_schema.pri_func_dup_sql(int); +DROP FUNCTION pri_fun_schema.pri_func_add_sql2(integer, integer); +DROP FUNCTION pri_fun_schema.pri_compute(int); +DROP FUNCTION pri_fun_schema.pri_get_sal(VARCHAR2); +DROP FUNCTION pri_fun_schema.pri_get_sal(int); + +DROP SCHEMA pri_fun_schema cascade; +DROP TABLESPACE pri_create_fun_tsp; +\! rm -rf '@testtablespace@/pri_create_fun_tsp' +DROP USER test_create_any_function_role cascade; \ No newline at end of file diff --git a/src/test/regress/input/pri_execute_any_function.source b/src/test/regress/input/pri_execute_any_function.source new file mode 100644 index 000000000..9ca5d360f --- /dev/null +++ b/src/test/regress/input/pri_execute_any_function.source @@ -0,0 +1,157 @@ +CREATE USER test_execute_any_function_role PASSWORD 'Gauss@1234'; +GRANT execute any function to test_execute_any_function_role; +CREATE TABLESPACE pri_execute_fun_tsp LOCATION '@testtablespace@/exe_fun_tsp'; +CREATE SCHEMA exe_fun_schema; +set search_path=exe_fun_schema; + + +--定义函数为SQL查询。 +CREATE FUNCTION exe_fun_schema.exe_fun_func_add_sql(integer, integer) RETURNS integer + AS 'select $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT; +REVOKE EXECUTE ON FUNCTION exe_fun_schema.exe_fun_func_add_sql(integer, integer) FROM public; +--利用参数名用 PL/pgSQL 自增一个整数。 +CREATE OR REPLACE FUNCTION exe_fun_schema.exe_fun_func_increment_plsql(i integer) RETURNS integer AS $$ + BEGIN + RETURN i + 1; + END; +$$ LANGUAGE plpgsql; +REVOKE EXECUTE ON FUNCTION exe_fun_schema.exe_fun_func_increment_plsql(integer) FROM public; +--返回RECORD类型 +CREATE OR REPLACE FUNCTION exe_fun_schema.exe_fun_compute(i int, out result_1 bigint, out result_2 bigint) +returns SETOF RECORD +as $$ +begin + result_1 = i + 1; + result_2 = i * 10; +return next; +end; +$$language plpgsql; +REVOKE EXECUTE ON FUNCTION exe_fun_schema.exe_fun_compute(int) FROM public; +--返回一个包含多个输出参数的记录。 +CREATE FUNCTION exe_fun_schema.exe_fun_func_dup_sql(in int, out f1 int, out f2 text) + AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ + LANGUAGE SQL; +REVOKE EXECUTE ON FUNCTION exe_fun_schema.exe_fun_func_dup_sql(int) FROM public; + +--计算两个整数的和,并返回结果。若果输入为null,则返回null。 +CREATE FUNCTION exe_fun_schema.exe_fun_func_add_sql2(num1 integer, num2 integer) RETURN integer +AS +BEGIN +RETURN num1 + num2; +END; +/ +REVOKE EXECUTE ON FUNCTION exe_fun_schema.exe_fun_func_add_sql2(integer, integer) FROM public; +--创建package属性的重载函数 +create or replace function exe_fun_schema.exe_fun_get_sal(NAME VARCHAR2) RETURN NUMBER package +IS + BEGIN + RETURN 1; + END; + / + +create or replace function exe_fun_schema.exe_fun_get_sal(NAME int) RETURN NUMBER package +IS + BEGIN + RETURN 1; + END; + / +REVOKE EXECUTE ON FUNCTION exe_fun_schema.exe_fun_get_sal(VARCHAR2) FROM public; +REVOKE EXECUTE ON FUNCTION exe_fun_schema.exe_fun_get_sal(int) FROM public; +SET ROLE test_execute_any_function_role PASSWORD 'Gauss@1234'; +select exe_fun_schema.exe_fun_func_add_sql(1,2); +select exe_fun_schema.exe_fun_func_increment_plsql(1); +select exe_fun_schema.exe_fun_func_dup_sql(1); +select exe_fun_schema.exe_fun_func_add_sql2(1,2); +select exe_fun_schema.exe_fun_compute(1); +select exe_fun_schema.exe_fun_get_sal('name'); +select exe_fun_schema.exe_fun_get_sal(1); + +reset role; +--删除函数。 +DROP FUNCTION exe_fun_schema.exe_fun_func_add_sql(integer, integer); +DROP FUNCTION exe_fun_schema.exe_fun_func_increment_plsql(integer); +DROP FUNCTION exe_fun_schema.exe_fun_func_dup_sql(int); +DROP FUNCTION exe_fun_schema.exe_fun_func_add_sql2(integer, integer); +DROP FUNCTION exe_fun_schema.exe_fun_compute(int); +DROP FUNCTION exe_fun_schema.exe_fun_get_sal(VARCHAR2); +DROP FUNCTION exe_fun_schema.exe_fun_get_sal(int); +SET ROLE test_execute_any_function_role PASSWORD 'Gauss@1234'; +--存储过程 +CREATE OR REPLACE PROCEDURE exe_fun_schema.prc_add +( + param1 IN INTEGER, + param2 IN OUT INTEGER +) +AS +BEGIN + param2:= param1 + param2; +END; +/ + +reset role; +CREATE OR REPLACE PROCEDURE exe_fun_schema.prc_add +( + param1 IN INTEGER, + param2 IN OUT INTEGER +) +AS +BEGIN + param2:= param1 + param2; +END; +/ +REVOKE EXECUTE ON PROCEDURE exe_fun_schema.prc_add(INTEGER,INTEGER) FROM public; + +CREATE OR REPLACE PROCEDURE exe_fun_schema.pri_autonomous(out res int) AS +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + res := 55; +END; +/ +REVOKE EXECUTE ON PROCEDURE exe_fun_schema.pri_autonomous() FROM public; + +CREATE TABLE exe_fun_schema.creditcard_info (id_number int, name text, credit_card varchar(19)); +CREATE OR REPLACE PROCEDURE exe_fun_schema.pri_autonomous_1() AS +BEGIN + insert into exe_fun_schema.creditcard_info values(66, 66,66); + select * from exe_fun_schema.creditcard_info; + commit; + insert into exe_fun_schema.creditcard_info values(77, 77,77); + rollback; +END; +/ +REVOKE EXECUTE ON PROCEDURE exe_fun_schema.pri_autonomous_1() FROM public; + +-- CREATE TABLE in procedure +create or replace procedure exe_fun_schema.pri_test_proc_create(i in integer) +as +begin + create table exe_fun_schema.pri_t11(id int) tablespace pri_execute_fun_tsp; +end; +/ +REVOKE EXECUTE ON PROCEDURE exe_fun_schema.pri_test_proc_create(integer) FROM public; + +SET ROLE test_execute_any_function_role PASSWORD 'Gauss@1234'; +SELECT exe_fun_schema.prc_add(2,3); +select exe_fun_schema.pri_autonomous(); +call exe_fun_schema.pri_autonomous_1(); +select exe_fun_schema.pri_test_proc_create(1); + +DROP PROCEDURE exe_fun_schema.prc_add(INTEGER,INTEGER); +DROP PROCEDURE exe_fun_schema.pri_autonomous(); +DROP PROCEDURE exe_fun_schema.apri_autonomous_1(); +DROP PROCEDURE exe_fun_schema.pri_test_proc_create(integer); + +reset role; +DROP PROCEDURE exe_fun_schema.prc_add(INTEGER,INTEGER); +DROP PROCEDURE exe_fun_schema.pri_autonomous(); +DROP PROCEDURE exe_fun_schema.pri_autonomous_1(); +DROP PROCEDURE exe_fun_schema.pri_test_proc_create(integer); +DROP table creditcard_info cascade; +DROP SCHEMA exe_fun_schema cascade; +DROP TABLESPACE pri_execute_fun_tsp; +\! rm -rf @testtablespace@/pri/exe_fun_tsp +DROP user test_execute_any_function_role cascade; \ No newline at end of file diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/input/publication.source similarity index 80% rename from src/test/regress/sql/publication.sql rename to src/test/regress/input/publication.source index 5bd21bdd2..6195e4777 100644 --- a/src/test/regress/sql/publication.sql +++ b/src/test/regress/input/publication.source @@ -1,8 +1,17 @@ -- -- PUBLICATION -- +-- check help +\h CREATE PUBLICATION +\h ALTER PUBLICATION +\h DROP PUBLICATION +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); +--enable publication and subscription audit +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=16777215" > /dev/null 2>&1 --- prepare CREATE ROLE regress_publication_user LOGIN SYSADMIN PASSWORD 'Abcdef@123'; +CREATE ROLE regress_publication_user2 LOGIN SYSADMIN PASSWORD 'Abcdef@123'; SET SESSION AUTHORIZATION 'regress_publication_user' PASSWORD 'Abcdef@123'; CREATE TABLE testpub_tbl1 (id int primary key, data text); CREATE TABLE testpub_tbl2 (id int primary key, data text); @@ -53,12 +62,16 @@ ALTER PUBLICATION testpub_foralltables ADD TABLE testpub_tbl2; ALTER PUBLICATION testpub_foralltables DROP TABLE testpub_tbl2; -- fail - can't add to for all tables publication ALTER PUBLICATION testpub_foralltables SET TABLE pub_test.testpub_nopk; +-- alter owner +ALTER PUBLICATION testpub_foralltables OWNER TO regress_publication_user2; +-- rename +ALTER PUBLICATION testpub_foralltables rename to testpub_foralltables_rename; --- drop testpub_tbl1 DROP TABLE testpub_tbl1; select pubname, tablename from pg_publication_tables where tablename='testpub_tbl1'; --- drop publication -DROP PUBLICATION testpub_foralltables; -select * from pg_publication where pubname='testpub_foralltables'; +DROP PUBLICATION testpub_foralltables_rename; +select * from pg_publication where pubname='testpub_foralltables_rename'; DROP PUBLICATION IF EXISTS testpub_nonexists; --- clean DROP TABLE testpub_tbl2; @@ -72,6 +85,7 @@ DROP PUBLICATION IF EXISTS testpub_multitbls; --- DROP PUBLICATION IF EXISTS testpub_cascade_tbl1; RESET SESSION AUTHORIZATION; DROP ROLE regress_publication_user; +DROP ROLE regress_publication_user2; --- permission CREATE ROLE normal_user LOGIN PASSWORD 'Abcdef@123'; SET SESSION AUTHORIZATION 'normal_user' PASSWORD 'Abcdef@123'; @@ -79,3 +93,9 @@ SET SESSION AUTHORIZATION 'normal_user' PASSWORD 'Abcdef@123'; create publication p1; RESET SESSION AUTHORIZATION; DROP ROLE normal_user; + +SELECT object_name,detail_info FROM pg_query_audit('2022-01-13 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_publication_subscription'; +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1 diff --git a/src/test/regress/input/row_compression/row_compression_basebackup.source b/src/test/regress/input/row_compression/row_compression_basebackup.source deleted file mode 100644 index d7f2d0689..000000000 --- a/src/test/regress/input/row_compression/row_compression_basebackup.source +++ /dev/null @@ -1,6 +0,0 @@ -\! @abs_bindir@/gsql -dpostgres -p @portstring@ -c "create database gs_basebackup;" -\! @abs_bindir@/gsql -dgs_basebackup -p @portstring@ -f "@abs_srcdir@/sql/gs_basebackup/init/compress_data.sql"; -\! mkdir @abs_bindir@/../gs_basebackup_node_nstream_np -\! chmod 700 @abs_bindir@/../gs_basebackup_node_nstream_np -\! chmod +x @abs_srcdir@/script/gs_basebackup/gs_basebackup.sh -\! @abs_srcdir@/script/gs_basebackup/gs_basebackup.sh @abs_bindir@ @abs_srcdir@ @portstring@ gs_basebackup_node_nstream_np compress_data.sql diff --git a/src/test/regress/input/segment_subpartition_tablespace.source b/src/test/regress/input/segment_subpartition_tablespace.source new file mode 100644 index 000000000..791d165f8 --- /dev/null +++ b/src/test/regress/input/segment_subpartition_tablespace.source @@ -0,0 +1,896 @@ +DROP SCHEMA segment_subpartition_tablespace CASCADE; +CREATE SCHEMA segment_subpartition_tablespace; +SET CURRENT_SCHEMA TO segment_subpartition_tablespace; + +--prepare +\! rm -fr '@testtablespace@/segment_subpartition_tablespace_ts1' +\! mkdir '@testtablespace@/segment_subpartition_tablespace_ts1' +\! rm -fr '@testtablespace@/segment_subpartition_tablespace_ts2' +\! mkdir '@testtablespace@/segment_subpartition_tablespace_ts2' +\! rm -fr '@testtablespace@/segment_subpartition_tablespace_ts3' +\! mkdir '@testtablespace@/segment_subpartition_tablespace_ts3' +CREATE TABLESPACE segment_subpartition_tablespace_ts1 LOCATION '@testtablespace@/segment_subpartition_tablespace_ts1'; +CREATE TABLESPACE segment_subpartition_tablespace_ts2 LOCATION '@testtablespace@/segment_subpartition_tablespace_ts2'; +CREATE TABLESPACE segment_subpartition_tablespace_ts3 LOCATION '@testtablespace@/segment_subpartition_tablespace_ts3'; + +-- +----test create subpartition with tablespace---- +-- +--range-range +CREATE TABLE t_range_range1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_range1'); +DROP TABLE t_range_range1; + +CREATE TABLE t_range_range2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_range2'); +DROP TABLE t_range_range2; + +--range-list +CREATE TABLE t_range_list1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_list1'); +DROP TABLE t_range_list1; + +CREATE TABLE t_range_list2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_list2'); +DROP TABLE t_range_list2; + +--range-hash +CREATE TABLE t_range_hash1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_hash1'); +DROP TABLE t_range_hash1; + +CREATE TABLE t_range_hash2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_hash2'); +DROP TABLE t_range_hash2; + +--list-range +CREATE TABLE t_list_range1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_range1'); +DROP TABLE t_list_range1; + +CREATE TABLE t_list_range2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_range2'); +DROP TABLE t_list_range2; + +--list-list +CREATE TABLE t_list_list1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_list1'); +DROP TABLE t_list_list1; + +CREATE TABLE t_list_list2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_list2'); +DROP TABLE t_list_list2; + +--list-hash +CREATE TABLE t_list_hash1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_hash1'); +DROP TABLE t_list_hash1; + +CREATE TABLE t_list_hash2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_hash2'); +DROP TABLE t_list_hash2; + +--hash-range +CREATE TABLE t_hash_range1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH4 TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_range1'); +DROP TABLE t_hash_range1; + +CREATE TABLE t_hash_range2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH3 TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH5 TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_range2'); +DROP TABLE t_hash_range2; + +--hash-list +CREATE TABLE t_hash_list1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_list1'); +DROP TABLE t_hash_list1; + +CREATE TABLE t_hash_list2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_list2'); +DROP TABLE t_hash_list2; + +--hash-hash +CREATE TABLE t_hash_hash1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 + ), + PARTITION P_HASH4 TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_hash1'); +DROP TABLE t_hash_hash1; + +CREATE TABLE t_hash_hash2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 + ), + PARTITION P_HASH3 TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 + ), + PARTITION P_HASH5 TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_hash2'); +DROP TABLE t_hash_hash2; + +-- +----test add partition with tablespace---- +-- +--since the add subpartition define use the same code, we only test different partition type: range/list +--range-list +CREATE TABLE t_range_list3(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ) +); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1; +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_range_list3'); +DROP TABLE t_range_list3; + + +CREATE TABLE t_range_list4(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ) +); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3; +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE6 VALUES LESS THAN (30); +SELECT pg_get_tabledef('t_range_list4'); +DROP TABLE t_range_list4; + +--list-hash +CREATE TABLE t_list_hash3(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ) +); + +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ); +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1; +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25); +SELECT pg_get_tabledef('t_list_hash3'); +DROP TABLE t_list_hash3; + +CREATE TABLE t_list_hash4(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ) +); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3; +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST6 VALUES (26,27,28,29,30); +SELECT pg_get_tabledef('t_list_hash4'); +DROP TABLE t_list_hash4; + +-- +----test add subpartition with tablespace---- +-- +--list-range +CREATE TABLE t_list_range3(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts3; +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_4 VALUES LESS THAN (20); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_4 VALUES LESS THAN (20); +SELECT pg_get_tabledef('t_list_range3'); +DROP TABLE t_list_range3; + +CREATE TABLE t_list_range4(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts2; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_5 VALUES LESS THAN (25); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST4 ADD SUBPARTITION P_LIST4_5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_list_range4'); +DROP TABLE t_list_range4; + +--hash-list +CREATE TABLE t_hash_list3(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20); +SELECT pg_get_tabledef('t_hash_list3'); +DROP TABLE t_hash_list3; + +CREATE TABLE t_hash_list4(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_5 VALUES(21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_5 VALUES(21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts2; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_5 VALUES(21,22,23,24,25); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH4 ADD SUBPARTITION P_HASH4_5 VALUES(21,22,23,24,25); +SELECT pg_get_tabledef('t_hash_list4'); +DROP TABLE t_hash_list4; + +--finish +drop tablespace segment_subpartition_tablespace_ts1; +drop tablespace segment_subpartition_tablespace_ts2; +drop tablespace segment_subpartition_tablespace_ts3; +\! rm -fr '@testtablespace@/segment_subpartition_tablespace_ts1' +\! rm -fr '@testtablespace@/segment_subpartition_tablespace_ts2' +\! rm -fr '@testtablespace@/segment_subpartition_tablespace_ts3' + +DROP SCHEMA segment_subpartition_tablespace CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/input/segment_subpartition_vacuum_partition.source b/src/test/regress/input/segment_subpartition_vacuum_partition.source new file mode 100644 index 000000000..36c6e4609 --- /dev/null +++ b/src/test/regress/input/segment_subpartition_vacuum_partition.source @@ -0,0 +1,165 @@ +-- +--segment table don't support vacuum full now +-- +DROP SCHEMA segment_subpartition_vacuum_partition CASCADE; +CREATE SCHEMA segment_subpartition_vacuum_partition; +SET CURRENT_SCHEMA TO segment_subpartition_vacuum_partition; + +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "autovacuum = off" >/dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_defer_calculate_snapshot = off" >/dev/null 2>&1 + +CREATE TABLE temp1(c1 int, c2 int); + +-- +--1. test for basic function +-- +CREATE TABLE range_list1 +( + month_code VARCHAR2 (30), + dept_code VARCHAR2 (30), + user_no VARCHAR2 (30), + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN('201903') + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN('201910') + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +--vacuum, success +VACUUM range_list1 PARTITION (p_201901); +VACUUM range_list1 SUBPARTITION (p_201902_a); + +--vacuum full, success +VACUUM FULL range_list1 PARTITION (p_201901); +VACUUM FULL range_list1 SUBPARTITION (p_201902_a); + +--vacuum full compact, fail +VACUUM FULL COMPACT range_list1 PARTITION (p_201901); +VACUUM FULL COMPACT range_list1 SUBPARTITION (p_201902_a); + +--vacuum freeze, success +VACUUM FREEZE range_list1 PARTITION (p_201901); +VACUUM FREEZE range_list1 SUBPARTITION (p_201902_a); + +--vacuum verbose, success +VACUUM VERBOSE range_list1 PARTITION (p_201901); +VACUUM VERBOSE range_list1 SUBPARTITION (p_201902_a); + +--vacuum option all +VACUUM (FULL, VERBOSE, FREEZE) range_list1 PARTITION (p_201901); +VACUUM (FULL, VERBOSE, FREEZE) range_list1 SUBPARTITION (p_201902_a); + +-- +--2. test the actual work +-- +CREATE TABLE range_list_sales1 +( + product_id INT4, + customer_id INT4, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (1200) + ( + SUBPARTITION customer3_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales1_idx1 ON range_list_sales1(product_id, customer_id) GLOBAL; +CREATE INDEX range_list_sales1_idx2 ON range_list_sales1(channel_id) GLOBAL; +CREATE INDEX range_list_sales1_idx3 ON range_list_sales1(customer_id) LOCAL; +CREATE INDEX range_list_sales1_idx4 ON range_list_sales1(time_id, type_id) LOCAL; + +SELECT pg_relation_size('range_list_sales1'); + +--delete & insert +DELETE FROM range_list_sales1; +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); + +SELECT pg_relation_size('range_list_sales1'); + +--vacuum full partition +INSERT INTO temp1 VALUES(1,1); +VACUUM FULL range_list_sales1 PARTITION (customer1); +VACUUM FULL range_list_sales1 PARTITION (customer2); +VACUUM FULL range_list_sales1 PARTITION (customer3); +SELECT pg_relation_size('range_list_sales1'); + +--delete & insert +DELETE FROM range_list_sales1; +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); + +SELECT pg_relation_size('range_list_sales1'); + +--vacuum full subpartition +INSERT INTO temp1 VALUES(1,1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel2); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel3); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel4); +VACUUM FULL range_list_sales1 SUBPARTITION (customer2_channel1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer2_channel2); +VACUUM FULL range_list_sales1 SUBPARTITION (customer3_channel1); +SELECT pg_relation_size('range_list_sales1'); + +--check index is ok +SELECT /*+ tablescan(range_list_sales1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +--finish +DROP TABLE temp1; +DROP TABLE range_list1; +DROP TABLE range_list_sales1; + +DROP SCHEMA segment_subpartition_vacuum_partition CASCADE; +RESET CURRENT_SCHEMA; + +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "autovacuum" >/dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_defer_calculate_snapshot" >/dev/null 2>&1 diff --git a/src/test/regress/input/single_node_user_mapping.source b/src/test/regress/input/single_node_user_mapping.source new file mode 100644 index 000000000..8d23e37b2 --- /dev/null +++ b/src/test/regress/input/single_node_user_mapping.source @@ -0,0 +1,36 @@ +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); +--- prepare +\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} rm -f {}/bin/usermapping.key.cipher +\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} rm -f {}/bin/usermapping.key.rand +\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} @abs_bindir@/gs_guc generate -S 123456@pwd -D {}/bin -o usermapping > /dev/null 2>&1 ; echo $? +CREATE ROLE regress_usermapping_user LOGIN SYSADMIN PASSWORD 'Abcdef@123'; +SET SESSION AUTHORIZATION 'regress_usermapping_user' PASSWORD 'Abcdef@123'; + +CREATE FOREIGN DATA WRAPPER dummy; +CREATE SERVER dummy_srv FOREIGN DATA WRAPPER dummy; +CREATE SERVER dummy_srv2 FOREIGN DATA WRAPPER dummy; +CREATE USER MAPPING FOR current_user SERVER dummy_srv OPTIONS(username 'test', password 'shouldBeEncrypt'); +CREATE USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(username 'test'); + +ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(ADD password 'shouldBeEncrypt'); +ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(SET password 'shouldBeEncrypt2'); +ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(DROP password); + +DROP USER MAPPING FOR current_user SERVER dummy_srv2; +DROP USER MAPPING FOR current_user SERVER dummy_srv; + +DROP SERVER dummy_srv; +DROP SERVER dummy_srv2; + +--DROP FOREIGN DATA WRAPPER is not supported now +--DROP FOREIGN DATA WRAPPER dummy; + +RESET SESSION AUTHORIZATION; +--can't drop role regress_usermapping_user, since FOREIGN DATA WRAPPER dummy depend on it +--DROP ROLE regress_usermapping_user; + +SELECT object_name,detail_info FROM pg_query_audit('2022-01-13 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_user' and object_name = 'current_user'; + +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); \ No newline at end of file diff --git a/src/test/regress/input/sqlldr/gs_loader_column.source b/src/test/regress/input/sqlldr/gs_loader_column.source index 1616cf944..1590c1372 100644 --- a/src/test/regress/input/sqlldr/gs_loader_column.source +++ b/src/test/regress/input/sqlldr/gs_loader_column.source @@ -45,7 +45,6 @@ select * from SQLLDR_COL_TBL order by 1,2,3,4,5,6,7,8,9,10; \! @abs_bindir@/gs_loader control=@abs_srcdir@/data/gs_loader_position4.ctl data=@abs_srcdir@/data/gs_loader_position4.data port=@portstring@ db=regression passwd=gauss@123 errors=100 select * from SQLLDR_COL_TBL where col_6 is null; --- issue: DTS2021092327784 TIMESTAMP CREATE TABLE SQLLDR_COL_001( ID INTEGER, NAME VARCHAR2(20), diff --git a/src/test/regress/input/sqlldr/gs_loader_issues.source b/src/test/regress/input/sqlldr/gs_loader_issues.source index 4f7d4b66e..ec1c216cb 100644 --- a/src/test/regress/input/sqlldr/gs_loader_issues.source +++ b/src/test/regress/input/sqlldr/gs_loader_issues.source @@ -1,4 +1,3 @@ --- issue: DTS2021091619762 when > should not support create table sqlldr_p4(sequence int,name VARCHAR2(20),con VARCHAR2(20)) partition by range(sequence) ( @@ -9,10 +8,9 @@ create table sqlldr_p4(sequence int,name VARCHAR2(20),con VARCHAR2(20)) partition p5 values less than (MAXVALUE) ); -\! @abs_bindir@/gs_loader control=@abs_srcdir@/data/gs_loader_issue_DTS2021091619762.ctl data=@abs_srcdir@/data/gs_loader_issue_DTS2021091619762.csv port=@portstring@ db=regression passwd=gauss@123 rows=1000000 bindsize=1000000 +\! @abs_bindir@/gs_loader control=@abs_srcdir@/data/gs_loader_issue_TESTTABLE.ctl data=@abs_srcdir@/data/gs_loader_issue_TESTTABLE.csv port=@portstring@ db=regression passwd=gauss@123 rows=1000000 bindsize=1000000 drop table sqlldr_p4; --- issue: DTS2021090116724 discard not valid CREATE TABLE sqlldr_issue_001( ID INTEGER, NAME VARCHAR2(20), @@ -23,7 +21,6 @@ select * from sqlldr_issue_001 order by 1,2,3; \! cat @abs_srcdir@/data/gs_loader_issue_001.dsc drop table sqlldr_issue_001; --- issue: DTS2021091020369 multi POSITION in one line CREATE TABLE sqlldr_tb21( ID INTEGER, NAME VARCHAR2(20), @@ -35,7 +32,6 @@ select count(*) COUNT from sqlldr_tb21; select max(ID) MAX from sqlldr_tb21; drop table sqlldr_tb21; --- issue: DTS2021090720669 permission denied file report error CREATE TABLE sqlldr_issue_permission( ID INTEGER, NAME VARCHAR2(20), @@ -50,7 +46,6 @@ CREATE TABLE sqlldr_issue_permission( \! @abs_bindir@/gs_loader control=@abs_srcdir@/data/gs_loader_issue_permission.ctl data=@abs_srcdir@/data/gs_loader_issue_permission.csv port=@portstring@ db=regression passwd=gauss@123 \! chmod +r @abs_srcdir@/data/gs_loader_issue_permission.ctl --- issue: DTS2021091621098 constant ""/'' CREATE TABLE sqlldr_issue_001( ID INTEGER, NAME VARCHAR2(20), @@ -63,7 +58,6 @@ drop table sqlldr_issue_001; -- teardown drop table sqlldr_issue_permission; --- issue: DTS2021092327751 error for options in control file CREATE TABLE sqlldr_issue_options( ID INTEGER, NAME VARCHAR2(20), @@ -76,7 +70,6 @@ select * from sqlldr_issue_options; -- teardown drop table sqlldr_issue_options; --- issue: DTS2021092920305 error for options in control file CREATE TABLE sqlldr_issue_options( ID INTEGER, NAME VARCHAR2(20), @@ -89,7 +82,6 @@ select * from sqlldr_issue_options; -- teardown drop table sqlldr_issue_options; --- issue: DTS2021092425716 error for badfile in control file CREATE TABLE sqlldr_issue_badfile( ID INTEGER, NAME VARCHAR2(20), @@ -102,7 +94,6 @@ select * from sqlldr_issue_badfile; -- teardown drop table sqlldr_issue_badfile; --- issue: DTS2021092327796 error for infile in control file CREATE TABLE sqlldr_issue_infile( ID INTEGER, NAME VARCHAR2(20), @@ -115,7 +106,6 @@ select * from sqlldr_issue_infile; -- teardown drop table sqlldr_issue_infile; --- issue: DTS2021101915706 suport login -U -h -W -p CREATE TABLE sqlldr_issue_login( ID INTEGER, NAME VARCHAR2(20), @@ -128,7 +118,6 @@ select * from sqlldr_issue_login; -- teardown drop table sqlldr_issue_login; --- issue: DTS2021101425838 hide password CREATE TABLE sqlldr_issue_hide_passwd( ID INTEGER, NAME VARCHAR2(20), diff --git a/src/test/regress/sql/subscription.sql b/src/test/regress/input/subscription.source similarity index 60% rename from src/test/regress/sql/subscription.sql rename to src/test/regress/input/subscription.source index e22a10d8b..a7ab85070 100644 --- a/src/test/regress/sql/subscription.sql +++ b/src/test/regress/input/subscription.source @@ -1,8 +1,20 @@ -- -- SUBSCRIPTION -- +-- check help +\h CREATE SUBSCRIPTION +\h ALTER SUBSCRIPTION +\h DROP SUBSCRIPTION +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); +--enable publication and subscription audit +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=16777215" > /dev/null 2>&1 --- prepare +\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} rm -f {}/bin/subscription.key.cipher +\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} rm -f {}/bin/subscription.key.rand +\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} @abs_bindir@/gs_guc generate -S 123456@pwd -D {}/bin -o subscription > /dev/null 2>&1 ; echo $? CREATE ROLE regress_subscription_user LOGIN SYSADMIN PASSWORD 'Abcdef@123'; +CREATE ROLE regress_subscription_user2 LOGIN SYSADMIN PASSWORD 'Abcdef@123'; SET SESSION AUTHORIZATION 'regress_subscription_user' PASSWORD 'Abcdef@123'; DROP SUBSCRIPTION IF EXISTS testsub; --- create subscription @@ -18,6 +30,12 @@ CREATE SUBSCRIPTION testsub CONNECTION 'testconn' PUBLICATION testpub; -- fail - unrecognized subscription parameter: create_slot CREATE SUBSCRIPTION testsub CONNECTION 'dbname=doesnotexist' PUBLICATION testpub WITH (create_slot=false); CREATE SUBSCRIPTION testsub CONNECTION 'dbname=doesnotexist' PUBLICATION testpub WITH (ENABLED=false, slot_name='testsub', synchronous_commit=off); +-- create SUBSCRIPTION with conninfo in two single quote, used to check mask string bug +CREATE SUBSCRIPTION testsub_maskconninfo CONNECTION 'host=''1.2.3.4'' port=''12345'' user=''username'' dbname=''postgres'' password=''password_1234''' PUBLICATION testpub WITH (ENABLED=false, slot_name='testsub', synchronous_commit=off); + +-- alter connection +ALTER SUBSCRIPTION testsub CONNECTION 'host=''1.2.3.4'' port=''12345'' user=''username'' dbname=''postgres'' password=''password_1234'''; +ALTER SUBSCRIPTION testsub CONNECTION 'dbname=does_not_exist'; reset client_min_messages; select subname, pg_get_userbyid(subowner) as Owner, subenabled, subconninfo, subpublications from pg_subscription where subname='testsub'; --- alter subscription @@ -28,6 +46,8 @@ select subname, subenabled, subpublications from pg_subscription where subname= ALTER SUBSCRIPTION testsub CONNECTION 'dbname=doesnotexist2'; select subname, subenabled, subconninfo from pg_subscription where subname='testsub'; ALTER SUBSCRIPTION testsub SET (conninfo='dbname=doesnotexist3'); +------ alter SUBSCRIPTION with conninfo in two single quote, used to check mask string bug +ALTER SUBSCRIPTION testsub_maskconninfo SET (conninfo='host=''1.2.3.4'' port=''12345'' user=''username'' dbname=''postgres'' password=''password_1234''', synchronous_commit=on); select subname, subenabled, subconninfo from pg_subscription where subname='testsub'; ------ modify synchronous_commit ALTER SUBSCRIPTION testsub SET (synchronous_commit=on); @@ -35,24 +55,30 @@ select subname, subenabled, subsynccommit from pg_subscription where subname='t ------ modify slot_name to non-null value ------ fail - Currently enabled=false, cannot change slot_name to a non-null value. ALTER SUBSCRIPTION testsub SET (slot_name='testsub'); +-- alter owner +ALTER SUBSCRIPTION testsub owner to regress_subscription_user2; +--rename +ALTER SUBSCRIPTION testsub rename to testsub_rename; --- inside a transaction block ------ CREATE SUBSCRIPTION ... WITH (enabled = true) ------ fail - ERROR: CREATE SUBSCRIPTION ... WITH (enabled = true) cannot run inside a transaction block BEGIN; -CREATE SUBSCRIPTION testsub CONNECTION 'dbname=doesnotexist' PUBLICATION testpub WITH (ENABLED=true); +CREATE SUBSCRIPTION testsub_rename CONNECTION 'dbname=doesnotexist' PUBLICATION testpub WITH (ENABLED=true); COMMIT; -- -- active SUBSCRIPTION BEGIN; -ALTER SUBSCRIPTION testsub ENABLE; -select subname, subenabled from pg_subscription where subname='testsub'; -ALTER SUBSCRIPTION testsub SET (ENABLED=false); -select subname, subenabled from pg_subscription where subname='testsub'; +ALTER SUBSCRIPTION testsub_rename ENABLE; +select subname, subenabled from pg_subscription where subname='testsub_rename'; +ALTER SUBSCRIPTION testsub_rename SET (ENABLED=false); +select subname, subenabled from pg_subscription where subname='testsub_rename'; COMMIT; --- drop subscription -DROP SUBSCRIPTION IF EXISTS testsub; +DROP SUBSCRIPTION IF EXISTS testsub_rename; +DROP SUBSCRIPTION IF EXISTS testsub_maskconninfo; --- cleanup RESET SESSION AUTHORIZATION; DROP ROLE regress_subscription_user; +DROP ROLE regress_subscription_user2; -- built-in function test select pg_replication_origin_create('origin_test'); @@ -86,3 +112,9 @@ select pg_replication_origin_drop('origin_test'); -- error select pg_replication_origin_session_setup('origin_test'); drop table t_origin_test; + +SELECT object_name,detail_info FROM pg_query_audit('2022-01-13 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_publication_subscription' order by time; +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1 diff --git a/src/test/regress/input/timecapsule_partition_ustore_test_1.source b/src/test/regress/input/timecapsule_partition_ustore_test_1.source new file mode 100644 index 000000000..e09fb6c97 --- /dev/null +++ b/src/test/regress/input/timecapsule_partition_ustore_test_1.source @@ -0,0 +1,75 @@ +set enable_default_ustore_table = on; + +show undo_retention_time; + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 900" > /dev/null 2>&1 +select pg_sleep(4); +show undo_retention_time; + +create table t_timecapsule_test_tmp(id int, snaptime timestamptz, snapcsn bigint); + +CREATE OR REPLACE FUNCTION findCsn(int8) + RETURNS INTEGER + LANGUAGE plpgsql +AS +$BODY$ +declare + count integer; +begin + count = (select snapcsn from t_timecapsule_test_tmp where id = $1); + return count; +end; +$BODY$; + +CREATE OR REPLACE FUNCTION findTime(int8) + RETURNS timestamptz + LANGUAGE plpgsql +AS +$BODY$ +declare + count timestamptz; +begin + count = (select snaptime from t_timecapsule_test_tmp where id = $1); + return count; +end; +$BODY$; + +--parition table +--timecapsule query normal table +drop table if exists t1; +create table t1 (id int) +partition by range (id) +( +partition p1 values less than (10), +partition p2 values less than (20) +); +insert into t1 values(1); +select pg_sleep(4); +insert into t_timecapsule_test_tmp select 1, now(), int8in(xidout(next_csn)) from gs_get_next_xid_csn(); +update t1 set id = 2 where id = 1; +select * from t1 timecapsule csn findCsn(1); +drop table t1; + +delete from t_timecapsule_test_tmp; + +--timecapsule table normal table +drop table if exists t1; +create table t1 (id int) +partition by range (id) +( +partition p1 values less than (10), +partition p2 values less than (20) +); +insert into t1 values(1); +insert into t1 values(2); +select pg_sleep(4); +insert into t_timecapsule_test_tmp select 1, now(), int8in(xidout(next_csn)) from gs_get_next_xid_csn(); +delete from t1 where id = 1; +select * from t1 order by id; +TIMECAPSULE TABLE t1 TO csn findCsn(1); +select * from t1; +drop table t1; + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 0" > /dev/null 2>&1 + +set enable_default_ustore_table = off; diff --git a/src/test/regress/input/timecapsule_partition_ustore_test_2.source b/src/test/regress/input/timecapsule_partition_ustore_test_2.source new file mode 100644 index 000000000..3ac80ff2c --- /dev/null +++ b/src/test/regress/input/timecapsule_partition_ustore_test_2.source @@ -0,0 +1,97 @@ +set enable_default_ustore_table = on; + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_recyclebin = on" > /dev/null 2>&1 + +--parition table +drop table if exists t1; +purge table t1; +create table t1 (id int) +partition by range (id) +( +partition p1 values less than (10), +partition p2 values less than (20) +); +insert into t1 values(1); +select pg_sleep(1); +drop table t1; +timecapsule table t1 to before drop; +select * from t1; +drop table t1; +purge table t1; + +--timecapsule truncate normal table +drop table if exists t1; +purge recyclebin; +create table t1 (id int) +partition by range (id) +( +partition p1 values less than (10), +partition p2 values less than (20) +); +insert into t1 values(1); +select pg_sleep(1); +truncate table t1; +select * from t1; +timecapsule table t1 to before truncate; +select * from t1; +drop table t1; +purge table t1; +purge recyclebin; + +--truncate purge partition table +create table t1 (id int) +partition by range (id) +( +partition p1 values less than (10), +partition p2 values less than (20) +); +insert into t1 values(1); +select pg_sleep(1); +truncate table t1; +select rcyoriginname from gs_recyclebin; +purge table t1; +select pg_sleep(1); +select * from gs_recyclebin; +drop table t1; +purge recyclebin; + +--truncate partition toast table +create table SPLIT_PARTITION_TABLE_001 +( +c_smallint smallint, +c_integer integer, +c_bigint bigint, +c_decimal decimal, +c_numeric numeric, +c_real real, +c_double double precision, +c_character_1 character varying(1024000), +c_varchar varchar(100), +c_character_2 character(100), +c_char_1 char(100), +c_character_3 character, +c_char_2 char, +c_text text, +c_nvarchar2 nvarchar2, +c_name name, +c_timestamp_1 timestamp without time zone , +c_timestamp_2 timestamp with time zone, +c_date date, +c_tsvector tsvector, +c_tsquery tsquery , +constraint SPLIT_PARTITION_TABLE_001_constraint primary key(c_smallint,c_integer,c_bigint,c_decimal,c_double) +) +partition by range (c_smallint,c_integer,c_bigint,c_decimal) +( +partition SPLIT_PARTITION_TABLE_001_1 values less than (0,0,0,0), +partition SPLIT_PARTITION_TABLE_001_2 values less than (30,30,300,400.3), +partition SPLIT_PARTITION_TABLE_001_3 values less than (60,60,600,800.6), +partition SPLIT_PARTITION_TABLE_001_4 values less than (100,100,1000,1100.2) +); +insert into SPLIT_PARTITION_TABLE_001 values(generate_series(-10,99),-10,100,100.3,10.3,10.2,1000.25,rpad('xy',4096,'ab'),'ABCD','ABC','DEF','A','A','HK','FVT_DATA_PARTITIONFVT_DATA_PARTITION','b','1954-2-6 00:00:30+8','1954-2-6 23:12:12.2356','1954-2-6 13:12:12.2356','abc db','ege'); +truncate table split_partition_table_001; +timecapsule table split_partition_table_001 to before truncate; +select count(*) from split_partition_table_001; + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_recyclebin = off" > /dev/null 2>&1 +set enable_default_ustore_table = off; diff --git a/src/test/regress/input/timecapsule_version_ustore_test_2.source b/src/test/regress/input/timecapsule_version_ustore_test_2.source index 11a44695f..fb5114f50 100644 --- a/src/test/regress/input/timecapsule_version_ustore_test_2.source +++ b/src/test/regress/input/timecapsule_version_ustore_test_2.source @@ -1,16 +1,11 @@ -- test: sighup set enable_default_ustore_table = on; -show vacuum_defer_cleanup_age; -show version_retention_age; +show undo_retention_time; -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 10000" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 900" > /dev/null 2>&1 select pg_sleep(4); -show vacuum_defer_cleanup_age; - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 10000" > /dev/null 2>&1 -select pg_sleep(4); -show version_retention_age; +show undo_retention_time; create schema schema_test_3; set search_path = schema_test_3; @@ -109,14 +104,12 @@ select * from pg_class timecapsule csn 0; timecapsule table pg_class to timestamp 0; timecapsule table pg_class to csn 0; - - drop table if exists tv_lineitem; create table tv_lineitem ( l_orderkey bigint not null , l_partkey bigint not null -) +) with (orientation = column) ; select * from tv_lineitem timecapsule timestamp 0; @@ -207,6 +200,7 @@ select * from fb_qtbl_invalid_1 timecapsule csn -1; drop table fb_qtbl_invalid_1; + --timecapsule query with invalid timestamp drop table if exists fb_qtbl_invalid_2; create table fb_qtbl_invalid_2 (id int); @@ -521,10 +515,10 @@ begin execute immediate 'timecapsule table fb_tbl_normal to timestamp to_timestamp (''' || scn_num || ''',''yyyy-mm-dd hh24:mi:ss'')'; end; / --- call pro_fb_tbl_normal_rb1(1); --- select * from fb_tbl_normal order by id; --- call pro_fb_tbl_normal_rb1(2); --- select * from fb_tbl_normal order by id; +call pro_fb_tbl_normal_rb1(1); +select * from fb_tbl_normal order by id; +call pro_fb_tbl_normal_rb1(2); +select * from fb_tbl_normal order by id; call pro_fb_tbl_normal_rb1(3); select * from fb_tbl_normal order by id; drop table temp; @@ -534,16 +528,5 @@ drop view dual; drop schema schema_test_3 cascade; reset search_path; --- reset vacuum_defer_cleanup_age -show vacuum_defer_cleanup_age; -show version_retention_age; - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 0" > /dev/null 2>&1 -select pg_sleep(4); -show vacuum_defer_cleanup_age; - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 0" > /dev/null 2>&1 -select pg_sleep(4); -show version_retention_age; - -set enable_default_ustore_table = off; +set enable_default_ustore_table = off; +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 0" > /dev/null 2>&1 diff --git a/src/test/regress/input/timecapsule_version_ustore_test_3.source b/src/test/regress/input/timecapsule_version_ustore_test_3.source index 06e1b0d08..b38b12b36 100644 --- a/src/test/regress/input/timecapsule_version_ustore_test_3.source +++ b/src/test/regress/input/timecapsule_version_ustore_test_3.source @@ -1,16 +1,11 @@ -- test: sighup set enable_default_ustore_table = on; -show vacuum_defer_cleanup_age; -show version_retention_age; +show undo_retention_time; -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 10000" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 900" > /dev/null 2>&1 select pg_sleep(4); -show vacuum_defer_cleanup_age; - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 10000" > /dev/null 2>&1 -select pg_sleep(4); -show version_retention_age; +show undo_retention_time; ---------------------------------------------------------------------------------- -- test version query, restore in one transaction @@ -210,16 +205,11 @@ drop procedure f_clean_csn; drop schema schema_test_3 cascade; reset search_path; --- reset vacuum_defer_cleanup_age -show vacuum_defer_cleanup_age; -show version_retention_age; +-- reset undo_retention_time +show undo_retention_time; -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 0" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 0" > /dev/null 2>&1 select pg_sleep(4); -show vacuum_defer_cleanup_age; - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 0" > /dev/null 2>&1 -select pg_sleep(4); -show version_retention_age; +show undo_retention_time; set enable_default_ustore_table = off; diff --git a/src/test/regress/input/timecapsule_version_ustore_test_4.source b/src/test/regress/input/timecapsule_version_ustore_test_4.source index 31d6aff1a..32151f0df 100644 --- a/src/test/regress/input/timecapsule_version_ustore_test_4.source +++ b/src/test/regress/input/timecapsule_version_ustore_test_4.source @@ -1,16 +1,11 @@ -- test: sighup set enable_default_ustore_table = on; -show vacuum_defer_cleanup_age; -show version_retention_age; +show undo_retention_time; -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 10000" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 900" > /dev/null 2>&1 select pg_sleep(4); -show vacuum_defer_cleanup_age; - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 10000" > /dev/null 2>&1 -select pg_sleep(4); -show version_retention_age; +show undo_retention_time; ---------------------------------------------------------------------------------- -- test version query, restore in one transaction @@ -212,16 +207,11 @@ drop procedure f_clean_csn; drop schema schema_test_3 cascade; reset search_path; --- reset vacuum_defer_cleanup_age -show vacuum_defer_cleanup_age; -show version_retention_age; +-- reset undo_retention_time +show undo_retention_time; -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 0" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 0" > /dev/null 2>&1 select pg_sleep(4); -show vacuum_defer_cleanup_age; - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 0" > /dev/null 2>&1 -select pg_sleep(4); -show version_retention_age; +show undo_retention_time; set enable_default_ustore_table = off; diff --git a/src/test/regress/input/timecapsule_version_ustore_test_5.source b/src/test/regress/input/timecapsule_version_ustore_test_5.source index 51a82c8c3..682a32b2e 100644 --- a/src/test/regress/input/timecapsule_version_ustore_test_5.source +++ b/src/test/regress/input/timecapsule_version_ustore_test_5.source @@ -1,15 +1,10 @@ set enable_default_ustore_table = on; -show vacuum_defer_cleanup_age; -show version_retention_age; +show undo_retention_time; -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 10000" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 900" > /dev/null 2>&1 select pg_sleep(4); -show vacuum_defer_cleanup_age; - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 10000" > /dev/null 2>&1 -select pg_sleep(4); -show version_retention_age; +show undo_retention_time; create schema schema_test_ztt; set search_path = schema_test_ztt; @@ -400,16 +395,10 @@ drop TABLESPACE new_tablespace_1; drop schema schema_test_ztt; reset search_path; - -show vacuum_defer_cleanup_age; show version_retention_age; -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 0" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 0" > /dev/null 2>&1 select pg_sleep(4); -show vacuum_defer_cleanup_age; - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 0" > /dev/null 2>&1 -select pg_sleep(4); -show version_retention_age; +show undo_retention_time; set enable_default_ustore_table = off; diff --git a/src/test/regress/input/timecapsule_version_ustore_test_6.source b/src/test/regress/input/timecapsule_version_ustore_test_6.source index 97870b8e9..eb1ae3590 100644 --- a/src/test/regress/input/timecapsule_version_ustore_test_6.source +++ b/src/test/regress/input/timecapsule_version_ustore_test_6.source @@ -1,15 +1,10 @@ set enable_default_ustore_table = on; -show vacuum_defer_cleanup_age; -show version_retention_age; +show undo_retention_time; -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 10000" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 900" > /dev/null 2>&1 select pg_sleep(4); -show vacuum_defer_cleanup_age; - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 10000" > /dev/null 2>&1 -select pg_sleep(4); -show version_retention_age; +show undo_retention_time; create schema schema_test_ztt2; set search_path = schema_test_ztt2; @@ -731,15 +726,10 @@ drop TABLESPACE new_tablespace_2; drop schema schema_test_ztt2; reset search_path; -show vacuum_defer_cleanup_age; show version_retention_age; -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 0" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 0" > /dev/null 2>&1 select pg_sleep(4); -show vacuum_defer_cleanup_age; - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 0" > /dev/null 2>&1 -select pg_sleep(4); -show version_retention_age; +show undo_retention_time; set enable_default_ustore_table = off; diff --git a/src/test/regress/input/ustore_subpartition_tablespace.source b/src/test/regress/input/ustore_subpartition_tablespace.source new file mode 100644 index 000000000..6692d397c --- /dev/null +++ b/src/test/regress/input/ustore_subpartition_tablespace.source @@ -0,0 +1,896 @@ +DROP SCHEMA ustore_subpartition_tablespace CASCADE; +CREATE SCHEMA ustore_subpartition_tablespace; +SET CURRENT_SCHEMA TO ustore_subpartition_tablespace; + +--prepare +\! rm -fr '@testtablespace@/ustore_subpartition_tablespace_ts1' +\! mkdir '@testtablespace@/ustore_subpartition_tablespace_ts1' +\! rm -fr '@testtablespace@/ustore_subpartition_tablespace_ts2' +\! mkdir '@testtablespace@/ustore_subpartition_tablespace_ts2' +\! rm -fr '@testtablespace@/ustore_subpartition_tablespace_ts3' +\! mkdir '@testtablespace@/ustore_subpartition_tablespace_ts3' +CREATE TABLESPACE ustore_subpartition_tablespace_ts1 LOCATION '@testtablespace@/ustore_subpartition_tablespace_ts1'; +CREATE TABLESPACE ustore_subpartition_tablespace_ts2 LOCATION '@testtablespace@/ustore_subpartition_tablespace_ts2'; +CREATE TABLESPACE ustore_subpartition_tablespace_ts3 LOCATION '@testtablespace@/ustore_subpartition_tablespace_ts3'; + +-- +----test create subpartition with tablespace---- +-- +--range-range +CREATE TABLE t_range_range1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_range1'); +DROP TABLE t_range_range1; + +CREATE TABLE t_range_range2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_range2'); +DROP TABLE t_range_range2; + +--range-list +CREATE TABLE t_range_list1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_list1'); +DROP TABLE t_range_list1; + +CREATE TABLE t_range_list2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_list2'); +DROP TABLE t_range_list2; + +--range-hash +CREATE TABLE t_range_hash1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_hash1'); +DROP TABLE t_range_hash1; + +CREATE TABLE t_range_hash2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_hash2'); +DROP TABLE t_range_hash2; + +--list-range +CREATE TABLE t_list_range1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_range1'); +DROP TABLE t_list_range1; + +CREATE TABLE t_list_range2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_range2'); +DROP TABLE t_list_range2; + +--list-list +CREATE TABLE t_list_list1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_list1'); +DROP TABLE t_list_list1; + +CREATE TABLE t_list_list2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_list2'); +DROP TABLE t_list_list2; + +--list-hash +CREATE TABLE t_list_hash1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_hash1'); +DROP TABLE t_list_hash1; + +CREATE TABLE t_list_hash2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_hash2'); +DROP TABLE t_list_hash2; + +--hash-range +CREATE TABLE t_hash_range1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH4 TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_range1'); +DROP TABLE t_hash_range1; + +CREATE TABLE t_hash_range2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH3 TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH5 TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_range2'); +DROP TABLE t_hash_range2; + +--hash-list +CREATE TABLE t_hash_list1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_list1'); +DROP TABLE t_hash_list1; + +CREATE TABLE t_hash_list2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_list2'); +DROP TABLE t_hash_list2; + +--hash-hash +CREATE TABLE t_hash_hash1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 + ), + PARTITION P_HASH4 TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_hash1'); +DROP TABLE t_hash_hash1; + +CREATE TABLE t_hash_hash2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 + ), + PARTITION P_HASH3 TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 + ), + PARTITION P_HASH5 TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_hash2'); +DROP TABLE t_hash_hash2; + +-- +----test add partition with tablespace---- +-- +--since the add subpartition define use the same code, we only test different partition type: range/list +--range-list +CREATE TABLE t_range_list3(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ) +); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1; +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_range_list3'); +DROP TABLE t_range_list3; + + +CREATE TABLE t_range_list4(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ) +); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3; +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE6 VALUES LESS THAN (30); +SELECT pg_get_tabledef('t_range_list4'); +DROP TABLE t_range_list4; + +--list-hash +CREATE TABLE t_list_hash3(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ) +); + +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ); +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1; +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25); +SELECT pg_get_tabledef('t_list_hash3'); +DROP TABLE t_list_hash3; + +CREATE TABLE t_list_hash4(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ) +); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3; +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST6 VALUES (26,27,28,29,30); +SELECT pg_get_tabledef('t_list_hash4'); +DROP TABLE t_list_hash4; + +-- +----test add subpartition with tablespace---- +-- +--list-range +CREATE TABLE t_list_range3(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts3; +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_4 VALUES LESS THAN (20); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_4 VALUES LESS THAN (20); +SELECT pg_get_tabledef('t_list_range3'); +DROP TABLE t_list_range3; + +CREATE TABLE t_list_range4(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts2; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_5 VALUES LESS THAN (25); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST4 ADD SUBPARTITION P_LIST4_5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_list_range4'); +DROP TABLE t_list_range4; + +--hash-list +CREATE TABLE t_hash_list3(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20); +SELECT pg_get_tabledef('t_hash_list3'); +DROP TABLE t_hash_list3; + +CREATE TABLE t_hash_list4(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_5 VALUES(21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_5 VALUES(21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts2; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_5 VALUES(21,22,23,24,25); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH4 ADD SUBPARTITION P_HASH4_5 VALUES(21,22,23,24,25); +SELECT pg_get_tabledef('t_hash_list4'); +DROP TABLE t_hash_list4; + +--finish +drop tablespace ustore_subpartition_tablespace_ts1; +drop tablespace ustore_subpartition_tablespace_ts2; +drop tablespace ustore_subpartition_tablespace_ts3; +\! rm -fr '@testtablespace@/ustore_subpartition_tablespace_ts1' +\! rm -fr '@testtablespace@/ustore_subpartition_tablespace_ts2' +\! rm -fr '@testtablespace@/ustore_subpartition_tablespace_ts3' + +DROP SCHEMA ustore_subpartition_tablespace CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/input/ustore_subpartition_vacuum_partition.source b/src/test/regress/input/ustore_subpartition_vacuum_partition.source new file mode 100644 index 000000000..e5bb0b194 --- /dev/null +++ b/src/test/regress/input/ustore_subpartition_vacuum_partition.source @@ -0,0 +1,162 @@ +DROP SCHEMA ustore_subpartition_vacuum_partition CASCADE; +CREATE SCHEMA ustore_subpartition_vacuum_partition; +SET CURRENT_SCHEMA TO ustore_subpartition_vacuum_partition; + +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "autovacuum = off" >/dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_defer_calculate_snapshot = off" >/dev/null 2>&1 + +CREATE TABLE temp1(c1 int, c2 int); + +-- +--1. test for basic function +-- +CREATE TABLE range_list1 +( + month_code VARCHAR2 (30), + dept_code VARCHAR2 (30), + user_no VARCHAR2 (30), + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN('201903') + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN('201910') + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +--vacuum, success +VACUUM range_list1 PARTITION (p_201901); +VACUUM range_list1 SUBPARTITION (p_201902_a); + +--vacuum full, success +VACUUM FULL range_list1 PARTITION (p_201901); +VACUUM FULL range_list1 SUBPARTITION (p_201902_a); + +--vacuum full compact, fail +VACUUM FULL COMPACT range_list1 PARTITION (p_201901); +VACUUM FULL COMPACT range_list1 SUBPARTITION (p_201902_a); + +--vacuum freeze, success +VACUUM FREEZE range_list1 PARTITION (p_201901); +VACUUM FREEZE range_list1 SUBPARTITION (p_201902_a); + +--vacuum verbose, success +VACUUM VERBOSE range_list1 PARTITION (p_201901); +VACUUM VERBOSE range_list1 SUBPARTITION (p_201902_a); + +--vacuum option all +VACUUM (FULL, VERBOSE, FREEZE) range_list1 PARTITION (p_201901); +VACUUM (FULL, VERBOSE, FREEZE) range_list1 SUBPARTITION (p_201902_a); + +-- +--2. test the actual work +-- +CREATE TABLE range_list_sales1 +( + product_id INT4, + customer_id INT4, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (1200) + ( + SUBPARTITION customer3_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales1_idx1 ON range_list_sales1(product_id, customer_id) GLOBAL; +CREATE INDEX range_list_sales1_idx2 ON range_list_sales1(channel_id) GLOBAL; +CREATE INDEX range_list_sales1_idx3 ON range_list_sales1(customer_id) LOCAL; +CREATE INDEX range_list_sales1_idx4 ON range_list_sales1(time_id, type_id) LOCAL; + +SELECT pg_relation_size('range_list_sales1'); + +--delete & insert +DELETE FROM range_list_sales1; +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); + +SELECT pg_relation_size('range_list_sales1'); + +--vacuum full partition +INSERT INTO temp1 VALUES(1,1); +VACUUM FULL range_list_sales1 PARTITION (customer1); +VACUUM FULL range_list_sales1 PARTITION (customer2); +VACUUM FULL range_list_sales1 PARTITION (customer3); +SELECT pg_relation_size('range_list_sales1'); + +--delete & insert +DELETE FROM range_list_sales1; +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); + +SELECT pg_relation_size('range_list_sales1'); + +--vacuum full subpartition +INSERT INTO temp1 VALUES(1,1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel2); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel3); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel4); +VACUUM FULL range_list_sales1 SUBPARTITION (customer2_channel1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer2_channel2); +VACUUM FULL range_list_sales1 SUBPARTITION (customer3_channel1); +SELECT pg_relation_size('range_list_sales1'); + +--check index is ok +SELECT /*+ tablescan(range_list_sales1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +--finish +DROP TABLE temp1; +DROP TABLE range_list1; +DROP TABLE range_list_sales1; + +DROP SCHEMA ustore_subpartition_vacuum_partition CASCADE; +RESET CURRENT_SCHEMA; + +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "autovacuum" >/dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_defer_calculate_snapshot" >/dev/null 2>&1 diff --git a/src/test/regress/input/vec_nestloop_end.source b/src/test/regress/input/vec_nestloop_end.source index cac54ac00..9ed52cc0c 100644 --- a/src/test/regress/input/vec_nestloop_end.source +++ b/src/test/regress/input/vec_nestloop_end.source @@ -6,9 +6,6 @@ SET standard_conforming_strings = on; SET check_function_bodies = false; SET default_tablespace = ''; SET default_with_oids = false; ----- ---- Special Case: nestloop + hashjoin + operator with parameters pushed down (dts2014111302175/2014120306303) ----- CREATE INDEX vecvtor_nestloop_base_index_01 ON VECTOR_NESTLOOP_TABLE_05 USING psort (id) LOCAL(PARTITION b1_p1_id_idx, PARTITION b1_p2_id_idx, PARTITION b1_p3_id_idx) ; CREATE INDEX vecvtor_nestloop_base_index_02 ON VECTOR_NESTLOOP_TABLE_06 USING psort (id, c_d_id, c_id) LOCAL(PARTITION b5_p1_id_c_d_id_c_id_idx, PARTITION b5_p2_id_c_d_id_c_id_idx, PARTITION b5_p3_id_c_d_id_c_id_idx, PARTITION b5_p4_id_c_d_id_c_id_idx, PARTITION b5_p5_id_c_d_id_c_id_idx, PARTITION b5_p6_id_c_d_id_c_id_idx) ; CREATE INDEX vecvtor_nestloop_base_index_03 ON VECTOR_NESTLOOP_TABLE_07 USING psort (id, c_d_id, c_w_id) LOCAL(PARTITION b7_p1_id_c_d_id_c_w_id_idx, PARTITION b7_p2_id_c_d_id_c_w_id_idx, PARTITION b7_p3_id_c_d_id_c_w_id_idx, PARTITION b7_p4_id_c_d_id_c_w_id_idx, PARTITION b7_p5_id_c_d_id_c_w_id_idx, PARTITION b7_p6_id_c_d_id_c_w_id_idx, PARTITION b7_p7_id_c_d_id_c_w_id_idx, PARTITION b7_p8_id_c_d_id_c_w_id_idx, PARTITION b7_p9_id_c_d_id_c_w_id_idx, PARTITION b7_p10_id_c_d_id_c_w_id_idx, PARTITION b7_p11_id_c_d_id_c_w_id_idx) ; diff --git a/src/gausskernel/dbmind/tools/ai_server/README.md b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionCreateTest.class similarity index 100% rename from src/gausskernel/dbmind/tools/ai_server/README.md rename to src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionCreateTest.class diff --git a/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionCreateTest.java b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionCreateTest.java new file mode 100644 index 000000000..1ba1915ba --- /dev/null +++ b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionCreateTest.java @@ -0,0 +1,160 @@ +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Map.Entry; +import java.util.concurrent.CountDownLatch; +import java.sql.*; +import java.util.*; +public class ClientEncryptionCreateTest { + + public static void main(String[] args) throws IOException { + // prepare connect info + String PORT = args[0]; + String urls = "jdbc:postgresql://localhost:"+ PORT +"/regression?enable_ce=1"; + Properties urlProps = new Properties(); + urlProps.setProperty("user", "test"); + urlProps.setProperty("password", "Gauss@123"); + + // create connection + int conNumber = 2; + Connection[] cons = new Connection[conNumber]; + try{ + for (int i = 0; i < conNumber; i++) { + cons[i] = DriverManager.getConnection(urls, urlProps); + } + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + + Connection con; + Statement st; + PreparedStatement pst; + int i = 0; + int j = 0; + try { + con = cons[0]; + UtilTool.executeSql(con, "DROP CLIENT MASTER KEY IF EXISTS CreateCMK1 CASCADE;"); + UtilTool.executeSql(con, "DROP CLIENT MASTER KEY IF EXISTS CreateCMK CASCADE;"); + UtilTool.executeSql(con, "CREATE CLIENT MASTER KEY CreateCMK1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = \"gs_ktool/1\" , ALGORITHM = AES_256_CBC);"); + UtilTool.executeSql(con, "CREATE CLIENT MASTER KEY CreateCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = \"gs_ktool/2\" , ALGORITHM = AES_256_CBC);"); + UtilTool.executeSql(con, "CREATE COLUMN ENCRYPTION KEY CreateCEK1 WITH VALUES (CLIENT_MASTER_KEY = CreateCMK1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256);"); + UtilTool.executeSql(con, "CREATE COLUMN ENCRYPTION KEY CreateCEK WITH VALUES (CLIENT_MASTER_KEY = CreateCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256);"); + System.out.println("ok"); + //数值类型 + pst = con.prepareStatement("drop table if exists t1;"); + pst.executeUpdate(); + System.out.println("drop table if exists t1;"); + try { + System.out.println(new Date( ) + "\n"); + Thread.sleep(1000*30); // 休眠3秒 + System.out.println(new Date( ) + "\n"); + } catch (Exception e) { + System.out.println("Got an exception!"); + } + pst = con.prepareStatement("create table t1(c0 int, c1 TINYINT default ?, c2 SMALLINT default ?, c3 INTEGER default 1 + ?, c4 BINARY_INTEGER default ?, c5 BIGINT default ?, c6 DECIMAL(10,4) default ?, c7 NUMERIC(10,4) default ?, c8 int default ?, c9 int default ?, c10 int default ?, c11 FLOAT4 default ?, c12 FLOAT8 default ?, c13 FLOAT(3) default ?, c14 BINARY_DOUBLE default ?, c15 DEC(10,4) default ?, c16 INTEGER(6,3) default ?);"); + pst.setInt(1, 123); + pst.setInt(2, 100); + pst.setInt(3, 4 + 5 + 6); + pst.setInt(4, 123); + pst.setInt(5, 123); + pst.setString(6, "147258.14"); + pst.setString(7, "12369.01"); + pst.setString(8, "12"); + pst.setString(9, "12"); + pst.setString(10, "23"); + pst.setString(11, "10.365456"); + pst.setString(12, "123456.1234"); + pst.setString(13, "10.3214"); + pst.setString(14, "321.321"); + pst.setString(15, "123.123654"); + pst.setString(16, "123.123654"); + pst.executeUpdate(); + System.out.println("create table t1"); + + //字符类型 + //pst = con.prepareStatement("drop table if exists t2;"); + // pst.executeUpdate(); + + /* try { + System.out.println(new Date( ) + "\n"); + Thread.sleep(1000*30); // 休眠3秒 + System.out.println(new Date( ) + "\n"); + } catch (Exception e) { + System.out.println("Got an exception!"); + }*/ + System.out.println("drop table if exists t2;"); + pst = con.prepareStatement("create table t2(c0 int, c1 char(4) default ? , c2 varchar(5) default ?, c3 VARCHAR2(100) default ?,c4 NVARCHAR2(100) default ?, c5 CLOB default ?, c6 TEXT default ? encrypted with (column_encryption_key = CreateCEK, encryption_type = DETERMINISTIC));"); + pst.setString(1, "TRUE"); + pst.setString(2, "FALSE"); + pst.setString(3, "d's你好'd你好asd"); + pst.setString(4, "你好sdsd"); + pst.setString(5, "s%s/'adas"); + pst.setString(6, "sad''你好'sada"); + pst.executeUpdate(); + + //布尔类型 + pst = con.prepareStatement("drop table if exists t3;"); + pst.executeUpdate(); + pst = con.prepareStatement("create table t3(c0 int, c1 BOOLEAN default ? , c2 BOOLEAN default ?, c3 BOOLEAN default ?);"); + pst.setBoolean(1, true); + pst.setBoolean(2, false); + pst.setString(3, "1"); + pst.executeUpdate(); + + //不影响正常建表 + pst = con.prepareStatement("drop table if exists t4;"); + pst.executeUpdate(); + pst = con.prepareStatement("create table t4(c0 int, c1 BOOLEAN default true, c2 int default 1, c3 varchar default 'abc' );"); + pst.executeUpdate(); + + //alter table + pst = con.prepareStatement("drop table if exists t5;"); + pst.executeUpdate(); + pst = con.prepareStatement("create table t5(c0 int, c2 int default 1);"); + pst.executeUpdate(); + pst = con.prepareStatement("alter table t5 add column c3 varchar default ? ;"); + pst.setString(1, "abc"); + pst.executeUpdate(); + + pst = con.prepareStatement("drop table if exists t6;"); + pst.executeUpdate(); + pst = con.prepareStatement("create table t6(c0 int, c1 BOOLEAN default true, c2 int default 1);"); + pst.executeUpdate(); + pst = con.prepareStatement("alter table t6 alter column c2 set default ?;"); + pst.setInt(1, 2); + pst.executeUpdate(); + + pst.close(); + + } catch (SQLException e) { + System.out.println(i); + System.out.println(j); + e.printStackTrace(); + System.exit(0); + } + + // close all connection; + try { + for (i = 0; i < conNumber; i++) { + cons[i].close(); + } + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + } +} + diff --git a/src/gausskernel/dbmind/tools/ai_server/agent/__init__.py b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionFetchSizeTest.class similarity index 100% rename from src/gausskernel/dbmind/tools/ai_server/agent/__init__.py rename to src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionFetchSizeTest.class diff --git a/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionFetchSizeTest.java b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionFetchSizeTest.java new file mode 100644 index 000000000..a8f1999f6 --- /dev/null +++ b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionFetchSizeTest.java @@ -0,0 +1,139 @@ +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Map.Entry; +import java.util.concurrent.CountDownLatch; +import java.sql.*; + +public class ClientEncryptionFetchSizeTest { + public static void testSqlByPassFetchSize(Connection conn) throws SQLException { + Statement stmt = conn.createStatement(); + stmt.execute("DROP TABLE IF EXISTS sqlbypassfetchsize_tab;"); + stmt.execute("CREATE TABLE sqlbypassfetchsize_tab(id int, name varchar(50) encrypted with (column_encryption_key = FetchSizeCEK, encryption_type = DETERMINISTIC), birthday timestamp );"); + + PreparedStatement pstm = conn.prepareStatement("INSERT INTO sqlbypassfetchsize_tab values(?,?,?)"); + long time = System.currentTimeMillis(); + for (int i = 0; i < 50; i++) { + pstm.setInt(1, 1); + pstm.setString(2, "name" + i); + pstm.setTimestamp(3, new Timestamp(time)); + pstm.addBatch(); + } + pstm.executeBatch(); + + stmt.execute("set enable_opfusion=on;"); + conn.setAutoCommit(false); + PreparedStatement pstm1 = conn.prepareStatement("select * from sqlbypassfetchsize_tab where id = ? and birthday = ?"); + pstm1.setInt(1, 1); + pstm1.setTimestamp(2, new Timestamp(time)); + pstm1.setFetchSize(10); + ResultSet rs = pstm1.executeQuery(); + while (rs.next()) { + System.out.println("id: " + rs.getInt(1)+ " name: "+rs.getString(2)+" birthday: " + rs.getTimestamp(3)); + } + stmt.execute("DROP TABLE IF EXISTS sqlbypassfetchsize_tab;"); + conn.commit(); + rs.close(); + pstm1.close(); + pstm.close(); + stmt.close(); + System.out.println("OK (testSqlByPassFetchSize test)"); + } + + public static void testFetchSize(Connection conn) throws SQLException { + Statement stmt = conn.createStatement(); + stmt.execute("DROP TABLE IF EXISTS fetchsize_tab;"); + stmt.execute("CREATE TABLE fetchsize_tab(id int, name varchar(50) encrypted with (column_encryption_key = FetchSizeCEK, encryption_type = DETERMINISTIC), birthday timestamp );"); + + PreparedStatement pstm = conn.prepareStatement("INSERT INTO fetchsize_tab values(?,?,?);"); + long time = System.currentTimeMillis(); + for (int i = 0; i < 50; i++) { + pstm.setInt(1, 1); + pstm.setString(2, "name" + i); + pstm.setTimestamp(3, new Timestamp(time)); + pstm.addBatch(); + } + pstm.executeBatch(); + + conn.setAutoCommit(false); + PreparedStatement pstm1 = conn.prepareStatement("select * from fetchsize_tab where id = ? and birthday = ?"); + pstm1.setInt(1, 1); + pstm1.setTimestamp(2, new Timestamp(time)); + pstm1.setFetchSize(10); + ResultSet rs = pstm1.executeQuery(); + while (rs.next()) { + System.out.println("id: " + rs.getInt(1)+ " name: "+rs.getString(2)+" birthday: " + rs.getTimestamp(3)); + } + stmt.execute("DROP TABLE IF EXISTS fetchsize_tab;"); + conn.commit(); + conn.setAutoCommit(true); + rs.close(); + pstm1.close(); + pstm.close(); + stmt.close(); + System.out.println("OK (testFetchSize test)"); + } + + public static void main(String[] args) throws IOException { + // prepare connect info + String PORT = args[0]; + String urls = "jdbc:postgresql://localhost:"+ PORT +"/regression?enable_ce=1"; + Properties urlProps = new Properties(); + + // create connection + Connection con = null; + try { + urlProps.setProperty("user", "test"); + urlProps.setProperty("password", "Gauss@123"); + con = DriverManager.getConnection(urls, urlProps); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + + try { + UtilTool.executeSql(con, "DROP TABLE IF EXISTS fetchsize_tab;"); + UtilTool.executeSql(con, "DROP TABLE IF EXISTS sqlbypassfetchsize_tab;"); + UtilTool.executeSql(con, "DROP CLIENT MASTER KEY IF EXISTS FetchSizeCMK CASCADE;"); + UtilTool.executeSql(con, "CREATE CLIENT MASTER KEY FetchSizeCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = \"gs_ktool/1\" , ALGORITHM = AES_256_CBC);"); + UtilTool.executeSql(con, "CREATE COLUMN ENCRYPTION KEY FetchSizeCEK WITH VALUES (CLIENT_MASTER_KEY = FetchSizeCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256);"); + testFetchSize(con); + testSqlByPassFetchSize(con); + try { + con.close(); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + } +} + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/ai_server/agent/task/__init__.py b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionMulSql.class similarity index 100% rename from src/gausskernel/dbmind/tools/ai_server/agent/task/__init__.py rename to src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionMulSql.class diff --git a/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionMulSql.java b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionMulSql.java new file mode 100644 index 000000000..4f5829cd9 --- /dev/null +++ b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionMulSql.java @@ -0,0 +1,116 @@ +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Map.Entry; +import java.util.concurrent.CountDownLatch; +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSetMetaData; +import java.sql.SQLWarning; +import java.sql.*; + +public class ClientEncryptionMulSql { + + public static void main(String[] args) throws IOException { + // prepare connect info + String PORT = args[0]; + String serverAddress = "localhost"; + String databseName = "regression"; + String username = "test"; + String password = "Gauss@123"; + //创建连接对象 + Connection con = null; + String jdbcConnectionString = + String.format("jdbc:postgresql://%s:%s/%s?enable_ce=1&loggerLevel=trace&loggerFile=cek_error.log", + serverAddress, PORT, databseName); + String unce_jdbcConnectionString = + String.format("jdbc:postgresql://%s:%s/%s", + serverAddress, PORT, databseName); + + try { + con = DriverManager.getConnection(jdbcConnectionString, username, password); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + + // CMK CEK测试 + UtilTool.executeSql(con, "drop table if exists test111;"); + UtilTool.executeSql(con, "drop table if exists test11;"); + UtilTool.executeSql(con, "drop table if exists test21;"); + UtilTool.executeSql(con, "drop table if exists test21;"); + UtilTool.executeSql(con, "DROP CLIENT MASTER KEY IF EXISTS MulCMK1 CASCADE;"); + + UtilTool.executeSql(con, "CREATE CLIENT MASTER KEY MulCMK1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = \"gs_ktool/1\" , ALGORITHM = SM4);"); + UtilTool.executeSql(con, "CREATE COLUMN ENCRYPTION KEY CEK_jdbc1 WITH VALUES (CLIENT_MASTER_KEY = MulCMK1, ALGORITHM = SM4_sm3);"); + System.out.println("test encrypted_key key end------------------------------------------------------------------------\n "); + + // 加密表插入,插入数据 + System.out.println("test encrypted_key table start"); + System.out.println("create encrypted_key table start------------------------------------------------------------------------\n "); + UtilTool.executeSql(con, "DROP CLIENT MASTER KEY IF EXISTS MulCMK1 CASCADE;CREATE CLIENT MASTER KEY MulCMK1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = \"gs_ktool/1\" , ALGORITHM = SM4);"); + System.out.println("drop create CEK_jdbc1 end"); + + // 多SQL执行(), 预期失败,不支持多sql场景 + String createtab_t1 = "CREATE COLUMN ENCRYPTION KEY CEK_jdbc1 WITH VALUES (CLIENT_MASTER_KEY = MulCMK1, ALGORITHM = SM4_sm3);drop table if exists test11;CREATE TABLE test11 ("+ + " id_number int4 encrypted with (column_encryption_key = CEK_jdbc1, encryption_type = DETERMINISTIC), "+ + " dtime varchar2(20) , "+ + " name text encrypted with (column_encryption_key = CEK_jdbc1, encryption_type = DETERMINISTIC) "+ + ")with (orientation=row);" ; + UtilTool.executeSql(con,createtab_t1); + System.out.println("create encrypted_key table end------------------------------------------------------------------------\n "); + + UtilTool.executeSql(con,"drop COLUMN ENCRYPTION KEY IF EXISTS CEK_jdbc1 CASCADE;"); + UtilTool.executeSql(con,"CREATE COLUMN ENCRYPTION KEY CEK_jdbc1 WITH VALUES (CLIENT_MASTER_KEY = MulCMK1, ALGORITHM = SM4_sm3);"); + UtilTool.executeSql(con,"CREATE TABLE test11 ( "+ + " id_number int4 encrypted with (column_encryption_key = CEK_jdbc1, encryption_type = DETERMINISTIC), "+ + " dtime varchar2(20) , "+ + " name text encrypted with (column_encryption_key = CEK_jdbc1, encryption_type = DETERMINISTIC) "+ + ")with (orientation=row);"); + + UtilTool.executeSql(con, "insert into test11 values(1, '18:00', 'hell1');"); + UtilTool.executeSql(con, "insert into test11 values(2, '18:00', 'hell2');"); + UtilTool.executeSql(con, "insert into test11 values(3, '18:00', 'hell3');"); + + // 批量insert, 预期成功, JDBC内部实际是多SQL实现, 针对此种场景不进行限制 + String pre_sql = "insert into test11 values(?, ?, ?);"; + try { + PreparedStatement ps = con.prepareStatement(pre_sql); + ps.setInt(1, 50); + ps.setString(2, "19:00"); + ps.setString(3, "hello"); + ps.addBatch(); + + ps.setInt(1, 51); + ps.setString(2, "20:00"); + ps.setString(3, "hello2"); + ps.addBatch(); + + int[] actual = ps.executeBatch(); + //int rs1 = ps.executeUpdate(); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + + try { + con.close(); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + } +} + diff --git a/src/gausskernel/dbmind/tools/ai_server/app/__init__.py b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionPrepareTest.class similarity index 100% rename from src/gausskernel/dbmind/tools/ai_server/app/__init__.py rename to src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionPrepareTest.class diff --git a/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionPrepareTest.java b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionPrepareTest.java new file mode 100644 index 000000000..8aaf11307 --- /dev/null +++ b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionPrepareTest.java @@ -0,0 +1,108 @@ +import java.io.IOException; +import java.sql.*; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.Statement; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Properties; +import java.util.concurrent.CountDownLatch; + +public class ClientEncryptionPrepareTest { + public static void main(String[] args) throws SQLException { + // prepare connect info + String PORT = args[0]; + String serverAddress = "localhost"; + String databseName = "regression"; + String username = "test"; + String password = "Gauss@123"; + // 创建连接对象 + Connection con = null; + String jdbcConnectionString = + String.format("jdbc:postgresql://%s:%s/%s?enable_ce=1", serverAddress, PORT, databseName); + try { + con = DriverManager.getConnection(jdbcConnectionString, username, password); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + // CMK CEK测试 + System.out.println( + "drop table start------------------------------------------------------------------------\n "); + + System.out.println("test------------------------------------------------------------------------\n "); + UtilTool.executeSql(con, "drop table if exists products;"); + System.out.println( + "drop table end------------------------------------------------------------------------\n "); + System.out.println( + "drop pre_ImgCMK start------------------------------------------------------------------------\n "); + UtilTool.executeSql(con, "DROP CLIENT MASTER KEY IF EXISTS pre_ImgCMK1 CASCADE;"); + UtilTool.executeSql(con, "DROP CLIENT MASTER KEY IF EXISTS pre_ImgCMK CASCADE;"); + System.out.println( + "drop pre_ImgCMK end------------------------------------------------------------------------\n "); + + UtilTool.executeSql( + con, + "CREATE CLIENT MASTER KEY pre_ImgCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = \"gs_ktool/1\" ," + + " ALGORITHM = AES_256_CBC);"); + System.out.println( + "CREATE CLIENT MASTER KEY pre_ImgCMK1 WITH ( KEY_STORE = ? , KEY_PATH = \"gs_ktool/2\" , ALGORITHM =" + + " ?);"); + + PreparedStatement pstm = + con.prepareStatement( + "CREATE CLIENT MASTER KEY pre_ImgCMK1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = \"gs_ktool/2\"" + + " , ALGORITHM = SM4);"); + pstm.executeUpdate(); + System.out.println( + "create pre_ImgCMK1 end------------------------------------------------------------------------\n "); + + System.out.println( + "create pre_ImgCEK start------------------------------------------------------------------------\n" + + " "); + Statement st = con.createStatement(); + int rowsAffected = 0; + rowsAffected = + st.executeUpdate( + "CREATE COLUMN ENCRYPTION KEY pre_ImgCEK WITH VALUES (CLIENT_MASTER_KEY = pre_ImgCMK1," + + " ALGORITHM = SM4_sm3);"); + + System.out.println( + "create pre_ImgCEK end------------------------------------------------------------------------\n "); + String createtab_products = + " CREATE TABLE products ( product_id INTEGER, " + + " product_name VARCHAR2(60) encrypted with (column_encryption_key = pre_ImgCEK, encryption_type" + + " = DETERMINISTIC), category VARCHAR2(60) );"; + PreparedStatement pstm1 = con.prepareStatement(createtab_products); + /* + * check isValid for client encrytion connection, should return true even though + * parsingerror in jdbc client encrytion routine + */ + if (con.isValid(0)) { + int rs1 = pstm1.executeUpdate(); + } + + try { + st.close(); + pstm.close(); + pstm1.close(); + con.close(); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + } +} diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/__init__.py b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionSelectTest.class similarity index 100% rename from src/gausskernel/dbmind/tools/ai_server/app/monitor/__init__.py rename to src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionSelectTest.class diff --git a/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionSelectTest.java b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionSelectTest.java new file mode 100644 index 000000000..2d3805471 --- /dev/null +++ b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionSelectTest.java @@ -0,0 +1,254 @@ +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Map.Entry; +import java.util.concurrent.CountDownLatch; +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSetMetaData; +import java.sql.SQLWarning; +import java.sql.*; + +public class ClientEncryptionSelectTest { + public static void main(String[] args) throws IOException { + // prepare connect info + String PORT = args[0]; + String serverAddress = "localhost"; + String databseName = "regression"; + String username = "test"; + String password = "Gauss@123"; + Connection con = null; + String jdbcConnectionString = + String.format("jdbc:postgresql://%s:%s/%s?enable_ce=1", + serverAddress, PORT, databseName); + try { + con = DriverManager.getConnection(jdbcConnectionString, username, password); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + UtilTool.executeSql(con, "drop table if exists creditcard_info;"); + UtilTool.executeSql(con, "drop table if exists creditcard_info1;"); + UtilTool.executeSql(con, "drop table if exists creditcard_info2;"); + UtilTool.executeSql(con, "drop table if exists creditcard_info3;"); + UtilTool.executeSql(con, "drop table if exists creditcard_info2_1;"); + UtilTool.executeSql(con, "drop table if exists creditcard_info3_1;"); + UtilTool.executeSql(con, "drop table if exists un_encrypted_table;"); + UtilTool.executeSql(con, "drop table if exists batch_table;"); + UtilTool.executeSql(con, "DROP TABLE IF EXISTS table_random;"); + + // create un_encrypted table + UtilTool.executeSql(con,"CREATE TABLE un_encrypted_table(id_number int, name varchar(50), credit_card varchar(19));"); + UtilTool.executeSql(con, "INSERT INTO un_encrypted_table VALUES (1,'joe','6217986500001288393');"); + UtilTool.executeSql(con, "INSERT INTO un_encrypted_table VALUES (2, 'joy','6219985678349800033');"); + UtilTool.fetchData(con, "select * from un_encrypted_table;"); + UtilTool.executeSql(con, "ALTER TABLE un_encrypted_table ALTER COLUMN name SET DEFAULT 'gauss';"); + UtilTool.executeSql(con, "INSERT INTO un_encrypted_table(id_number, credit_card) VALUES (3,'6219985678349800033');"); + UtilTool.fetchData(con, "select * from un_encrypted_table;"); + + + UtilTool.executeSql(con, "DROP CLIENT MASTER KEY IF EXISTS ImgCMK1 CASCADE;"); + UtilTool.executeSql(con, "DROP CLIENT MASTER KEY IF EXISTS ImgCMK CASCADE;"); + UtilTool.executeSql(con, "CREATE CLIENT MASTER KEY ImgCMK1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = \"gs_ktool/1\" , ALGORITHM = AES_256_CBC);"); + UtilTool.executeSql(con, "CREATE CLIENT MASTER KEY ImgCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = \"gs_ktool/2\" , ALGORITHM = AES_256_CBC);"); + UtilTool.executeSql(con, "CREATE COLUMN ENCRYPTION KEY ImgCEK1 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256);"); + UtilTool.executeSql(con, "CREATE COLUMN ENCRYPTION KEY ImgCEK WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256);"); + + UtilTool.executeSql(con,"CREATE TABLE creditcard_info (id_number int, name varchar(50) encrypted with (column_encryption_key = ImgCEK, encryption_type = DETERMINISTIC),"+ + "credit_card varchar(19) encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC));"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info VALUES (1,'joe','6217986500001288393');"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info VALUES (2, 'joy','6219985678349800033');"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info VALUES (3, 'xiaoli', '6211877800001008888');"); + List parameters = new ArrayList<>(); + parameters.add(4); + parameters.add("Nina"); + parameters.add("6189486985800056893"); + UtilTool.updateDataWithPrepareStmnt(con,"INSERT INTO creditcard_info VALUES (?, ?, ?);",parameters); + System.out.println("update--------------------"); + List parameters_up = new ArrayList<>(); + parameters_up.add("Nina1"); + parameters_up.add(4); + UtilTool.updateDataWithPrepareStmnt(con,"update creditcard_info set name = ? where id_number = ?;",parameters_up); + + UtilTool.executeSql(con, "INSERT INTO creditcard_info VALUES (5, 'fanny', '7689458639568569354');"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info VALUES (6, 'cora', '7584572945579384675');"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info VALUES (7, 'nancy', '7497593456879650677');"); + System.out.println("select * from creditcard_info where name = 'Nina';"); + UtilTool.fetchData(con, "select * from creditcard_info where name = 'Nina';"); + + System.out.println("delete--------------------"); + List parameters_de = new ArrayList<>(); + parameters_de.add(5); + parameters_de.add("fanny"); + UtilTool.updateDataWithPrepareStmnt(con,"delete from creditcard_info where id_number = ? and name = ?;",parameters_de); + System.out.println("select * from creditcard_info;"); + UtilTool.fetchData(con, "select * from creditcard_info;"); + + // 加上就有问题 + List select_parameters = new ArrayList<>(); + select_parameters.add("joe"); + select_parameters.add("6217986500001288393"); + UtilTool.fetchDataWithPrepareStmnt(con, "SELECT * from creditcard_info where name = ? and credit_card = ?;", select_parameters); + + UtilTool.executeSql(con, "CREATE TABLE creditcard_info1 (id_number int, name text encrypted with (column_encryption_key = ImgCEK, encryption_type = DETERMINISTIC),"+ + "credit_card varchar(19) encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC));"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info1 VALUES (1,'joe','6217986500001288393');"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info1 VALUES (2, 'joy','6219985678349800033');"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info1 VALUES (3, 'xiaoli', '6211877800001008888');"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info1 VALUES (4, 'Nina', '6189486985800056893');"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info1 VALUES (5, 'fanny', '7689458639568569354');"); + System.out.println("select * from creditcard_info1 where name = (select name from creditcard_info order by id_number limit 1);"); + UtilTool.fetchData(con, "select * from creditcard_info1 where name = (select name from creditcard_info order by id_number limit 1);"); + + UtilTool.executeSql(con, "CREATE TABLE creditcard_info2 (id_number int, name1 text encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC),"+ + "name2 text encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC),"+ + "credit_card varchar(19) encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC));"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info2 VALUES (1,'joe','joe','6217986500001288393');"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info2 VALUES (2, 'joy','joy','6219985678349800033');"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info2 VALUES (3, 'xiaoli','xiaoli', '6211877800001008888');"); + + UtilTool.executeSql(con, "CREATE TABLE creditcard_info3 (id_number int, name1 text encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC),"+ + "name2 text encrypted with (column_encryption_key = ImgCEK, encryption_type = DETERMINISTIC),"+ + "credit_card int encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC));"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info3 VALUES (1,'joe','joe',62176500);"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info3 VALUES (2, 'joy','joy',62199856);"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info3 VALUES (3, 'xiaoli','xiaoli', 621187780);"); + System.out.println("select * from creditcard_info2 where name1 = (select name1 from creditcard_info3 order by id_number limit 1);"); + UtilTool.fetchData(con, "select * from creditcard_info2 where name1 = (select name1 from creditcard_info3 order by id_number limit 1);"); + UtilTool.fetchData(con, "select * from (select * from creditcard_info3) where credit_card = 62176500;"); + UtilTool.fetchData(con, "select name2 from (select * from creditcard_info3) group by name1 ,name2 having name1 = 'joe';"); + UtilTool.fetchData(con, "select * from (select * from creditcard_info3 where credit_card = 62176500);"); + UtilTool.fetchData(con, "select * from (select * from creditcard_info3) as a , (select * from creditcard_info2) as b where a.credit_card = 62176500 and a.name1='joe' and b.name1='joe';"); + UtilTool.fetchData(con, "select credit_card, name1 from (select name1,credit_card from creditcard_info3) as a , (select name2 from creditcard_info2) as b where name1='joe' and name2='joe' group by credit_card, name1 having credit_card = 62176500;"); + + UtilTool.executeSql(con, "CREATE TABLE creditcard_info2_1 (id_number int, name1 text encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC),"+ + "name2 text encrypted with (column_encryption_key = ImgCEK1, encryption_type = RANDOMIZED),"+ + "credit_card varchar(19) encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC));"); + + UtilTool.executeSql(con, "INSERT INTO creditcard_info2_1 VALUES (1,'joe','joe','6217986500001288393');"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info2_1 VALUES (2, 'joy','joy','6219985678349800033');"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info2_1 VALUES (3, 'xiaoli','xiaoli', '6211877800001008888');"); + + UtilTool.executeSql(con, "CREATE TABLE creditcard_info3_1 (id_number int, name1 text encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC),"+ + "name2 text encrypted with (column_encryption_key = ImgCEK, encryption_type = DETERMINISTIC),"+ + "credit_card int encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC));"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info3_1 VALUES (1,'joe','joe',62176500);"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info3_1 VALUES (2, 'joy','joy',62199856);"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info3_1 VALUES (3, 'xiaoli','xiaoli', 621187780);"); + UtilTool.fetchData(con, "select name1 from creditcard_info2 INTERSECT select name2 from creditcard_info2;"); + UtilTool.fetchData(con, "select name1 from creditcard_info3 UNION select name2 from creditcard_info2;"); + System.out.println("select name2 from creditcard_info3 INTERSECT select name2 from creditcard_info2;"); + UtilTool.fetchData(con, "select name2 from creditcard_info3 INTERSECT select name2 from creditcard_info2;"); + + UtilTool.executeSql(con, "CREATE TEMP TABLE creditcard_info4 (id_number int, name1 text encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC),"+ + "name2 text encrypted with (column_encryption_key = ImgCEK1, encryption_type = RANDOMIZED),"+ + "credit_card varchar(19) encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC));"); + + UtilTool.executeSql(con, "INSERT INTO creditcard_info4 VALUES (1,'joe','joe','6217986500001288393');"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info4 VALUES (2, 'joy','joy','6219985678349800033');"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info4 VALUES (3, 'xiaoli','xiaoli', '6211877800001008888');"); + + UtilTool.executeSql(con, "CREATE TEMP TABLE creditcard_info5 (id_number int, name1 text encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC),"+ + "name2 text encrypted with (column_encryption_key = ImgCEK, encryption_type = DETERMINISTIC),"+ + "credit_card int encrypted with (column_encryption_key = ImgCEK1, encryption_type = DETERMINISTIC));"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info5 VALUES (1,'joe','joe',62176500);"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info5 VALUES (2, 'joy','joy',62199856);"); + UtilTool.executeSql(con, "INSERT INTO creditcard_info5 VALUES (3, 'xiaoli','xiaoli', 621187780);"); + + UtilTool.fetchData(con, "select * from creditcard_info4 where name1 = (select name1 from creditcard_info5 order by id_number limit 1);"); + UtilTool.fetchData(con, "select * from (select * from creditcard_info5) where credit_card = 62176500;"); + System.out.println("select name2 from (select * from creditcard_info5) group by name1 ,name2 having name1 = 'joe';"); + UtilTool.fetchData(con, "select name2 from (select * from creditcard_info5) group by name1 ,name2 having name1 = 'joe';"); + System.out.println("select * from (select * from creditcard_info5 where credit_card = 62176500);"); + UtilTool.fetchData(con, "select * from (select * from creditcard_info5 where credit_card = 62176500);"); + + + // batchpreparedstatement + + UtilTool.executeSql(con, "CREATE TABLE IF NOT EXISTS batch_table(id INT, name varchar(50) encrypted with (column_encryption_key = ImgCEK, encryption_type = DETERMINISTIC), address varchar(50) encrypted with (column_encryption_key = ImgCEK, encryption_type = DETERMINISTIC));"); + PreparedStatement statemnet = null; + try { + String sql = "INSERT INTO batch_table (id, name, address) VALUES (?,?,?)"; + System.out.println("starting batch : " + sql); + statemnet = con.prepareStatement(sql); + int loopCount = 20; + System.out.println("Number of rows to add: " + loopCount); + for (int i = 1; i < loopCount + 1; ++i) { + statemnet.setInt(1, i); + statemnet.setString(2, "Name " + i); + statemnet.setString(3, "Address " + i); + // Add row to the batch. + statemnet.addBatch(); + + } + System.out.println("executing batch ..."); + statemnet.executeBatch(); + statemnet.close(); + + } catch (SQLException e) { + e.printStackTrace(); + } + // test metadata + String query = "Select * from batch_table order by id"; + try { + Statement stmt_metadata = con.createStatement(); + ResultSet rs = stmt_metadata.executeQuery(query); + ResultSetMetaData resultSetMetaData = rs.getMetaData(); + for (int k = 1; k < resultSetMetaData.getColumnCount() + 1; ++k) { + System.out.println("Index: " + k + " column name: " + resultSetMetaData.getColumnName(k)); + System.out.println(" getColumnDisplaySize is: " + resultSetMetaData.getColumnDisplaySize(k)); + System.out.println(" getColumnClassName is: " + resultSetMetaData.getColumnClassName(k)); + System.out.println(" getColumnLabel is: " + resultSetMetaData.getColumnLabel(k)); + System.out.println(" getColumnType is: " + resultSetMetaData.getColumnType(k)); + System.out.println(" getColumnTypeName is: " + resultSetMetaData.getColumnTypeName(k)); + System.out.println(" getPrecision is: " + resultSetMetaData.getPrecision(k)); + System.out.println(" getScale is: " + resultSetMetaData.getScale(k)); + System.out.println(" isNullable is: " + resultSetMetaData.isNullable(k)); + System.out.println(" isNullable is: " + resultSetMetaData.isAutoIncrement(k)); + System.out.println(" isCaseSensitive is: " + resultSetMetaData.isCaseSensitive(k)); + System.out.println(" isCurrency is: " + resultSetMetaData.isCurrency(k)); + System.out.println(" isReadOnly is: " + resultSetMetaData.isReadOnly(k)); + System.out.println(" isSigned is: " + resultSetMetaData.isSigned(k)); + System.out.println(" isWritable is: " + resultSetMetaData.isWritable(k)); + System.out.println(" isDefinitelyWritable is: " + resultSetMetaData.isDefinitelyWritable(k)); + System.out.println(" isSearchable is: " + resultSetMetaData.isSearchable(k)); + System.out.println(" "); + System.out.println("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); + } + rs.close(); + stmt_metadata.close(); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + UtilTool.executeSql(con, "DROP TABLE IF EXISTS table_random;"); + UtilTool.executeSql(con, "CREATE TABLE IF NOT EXISTS table_random (i1 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = ImgCEK, ENCRYPTION_TYPE = RANDOMIZED) , i2 INT);"); + UtilTool.executeSql(con, "INSERT INTO table_random (i1, i2) VALUES (12, 13);"); + UtilTool.executeSql(con, "INSERT INTO table_random VALUES (15,16);"); + UtilTool.executeSql(con, "INSERT INTO table_random (i1, i2) VALUES (22, 23), (24, 25), (26,27);"); + UtilTool.executeSql(con, "INSERT INTO table_random VALUES (35,36), (36,37), (38,39);"); + UtilTool.fetchData(con, "SELECT * from table_random ORDER BY i2;"); + UtilTool.fetchData(con, "SELECT i1 FROM table_random WHERE i2 = 25;"); + UtilTool.fetchData(con, "SELECT i1 FROM table_random WHERE i1 = 24;"); + + try { + con.close(); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + } +} + diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/__init__.py b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionTransactionTest.class similarity index 100% rename from src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/__init__.py rename to src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionTransactionTest.class diff --git a/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionTransactionTest.java b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionTransactionTest.java new file mode 100644 index 000000000..46d4bffdd --- /dev/null +++ b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionTransactionTest.java @@ -0,0 +1,123 @@ +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Map.Entry; +import java.util.concurrent.CountDownLatch; +import java.sql.*; + +public class ClientEncryptionTransactionTest { + + public static void testEnd(Connection con) throws SQLException { + String[] sqls = { + "begin", + "insert into test_table values(1, '1')", + "rollback", + "begin", + "insert into test_table values(2, '2')", + "commit transaction", + "begin", + "insert into test_table values(3, '3')", + "end", + "begin", + "insert into test_table values(4, '4')", + "end", + "begin", + "insert into test_table values(5, '5')", + "end", + "begin", + "insert into test_table values(6, '6')", + "end transaction", + "begin", + "insert into test_table values(7, '7')", + "rollback" + }; + + con.setAutoCommit(false); + UtilTool.fetchData(con, "select * from test_table;"); + for(String sql : sqls) { + UtilTool.executeSql(con, sql); + System.out.println("sql--"+sql); + } + //another session + PreparedStatement pstmt = con.prepareStatement("select * from test_table order by id"); + ResultSet resultSet = pstmt.executeQuery(); + while(resultSet.next()) { + System.out.println(resultSet.getInt(1) + " " +resultSet.getString(2)); + } + pstmt.close(); + System.out.println("OK testEnd test"); + } + + public static void testRollback(Connection con) throws SQLException { + String[] sqls = { + "drop table if exists test_table;", + "create table test_table(id int, name varchar2(20) encrypted with (column_encryption_key = TransactionCEK, encryption_type = DETERMINISTIC));", + "insert into test_table values(1,'x');", + "insert into test_table values(11,'xx');", + "insert into test_table values(111,'xxx');", + "insert into test_table values(1,'xca');", + "begin;", + "insert into test_table values(1,'abc');", + "rollback;", + "begin work;", + "insert into test_table values(1,'def');", + "commit;" + }; + for(String sql : sqls) { + UtilTool.executeSql(con, sql); + } + UtilTool.fetchData(con, "select * from test_table;"); + UtilTool.executeSql(con, "truncate table test_table;"); + UtilTool.fetchData(con, "select * from test_table;"); + System.out.println("OK testRollback test"); + } + + public static void main(String[] args) throws IOException { + // prepare connect info + String PORT = args[0]; + String urls = "jdbc:postgresql://localhost:"+ PORT +"/regression?enable_ce=1"; + Properties urlProps = new Properties(); + + // create connection + Connection con = null; + try { + urlProps.setProperty("user", "test"); + urlProps.setProperty("password", "Gauss@123"); + con = DriverManager.getConnection(urls, urlProps); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + + try { + UtilTool.executeSql(con, "DROP TABLE IF EXISTS test_table;"); + + UtilTool.executeSql(con, "DROP CLIENT MASTER KEY IF EXISTS TransactionCMK CASCADE;"); + UtilTool.executeSql(con, "CREATE CLIENT MASTER KEY TransactionCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = \"gs_ktool/1\" , ALGORITHM = AES_256_CBC);"); + UtilTool.executeSql(con, "CREATE COLUMN ENCRYPTION KEY TransactionCEK WITH VALUES (CLIENT_MASTER_KEY = TransactionCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256);"); + testRollback(con); + testEnd(con); + try { + con.close(); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + } +} \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/__init__.py b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionTriggerTest.class similarity index 100% rename from src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/__init__.py rename to src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionTriggerTest.class diff --git a/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionTriggerTest.java b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionTriggerTest.java new file mode 100644 index 000000000..693d9473f --- /dev/null +++ b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/ClientEncryptionTriggerTest.java @@ -0,0 +1,109 @@ +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Map.Entry; +import java.util.concurrent.CountDownLatch; +import java.sql.*; + +public class ClientEncryptionTriggerTest { + public static void testTrigger(Connection conn) throws SQLException { + String[] sqls = { + "CREATE TABLE test_trigger_src_tbl(id1 INT, id2 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = triggerCEK1, ENCRYPTION_TYPE = DETERMINISTIC), id3 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = triggerCEK2, ENCRYPTION_TYPE = DETERMINISTIC));", + "CREATE TABLE test_trigger_des_tbl(id1 INT, id2 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = triggerCEK1, ENCRYPTION_TYPE = DETERMINISTIC), id3 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = triggerCEK2, ENCRYPTION_TYPE = DETERMINISTIC));", + "CREATE OR REPLACE FUNCTION tri_insert_func() RETURNS TRIGGER AS\n" + + " $$\n" + + " DECLARE\n" + + " BEGIN\n" + + " INSERT INTO test_trigger_des_tbl VALUES(NEW.id1, NEW.id2, NEW.id3);\n" + + " RETURN NEW;\n" + + " END\n" + + " $$ LANGUAGE PLPGSQL;", + "CREATE OR REPLACE FUNCTION tri_update_func() RETURNS TRIGGER AS\n" + + " $$\n" + + " DECLARE\n" + + " BEGIN\n" + + " UPDATE test_trigger_des_tbl SET id3 = NEW.id3 WHERE id2=OLD.id2;\n" + + " RETURN OLD;\n" + + " END\n" + + " $$ LANGUAGE PLPGSQL;", + "CREATE OR REPLACE FUNCTION TRI_DELETE_FUNC() RETURNS TRIGGER AS\n" + + " $$\n" + + " DECLARE\n" + + " BEGIN\n" + + " DELETE FROM test_trigger_des_tbl WHERE id2=OLD.id2;\n" + + " RETURN OLD;\n" + + " END\n" + + " $$ LANGUAGE PLPGSQL;", + "CREATE TRIGGER insert_trigger BEFORE INSERT ON test_trigger_src_tbl FOR EACH ROW EXECUTE PROCEDURE tri_insert_func();", + "CREATE TRIGGER update_trigger AFTER UPDATE ON test_trigger_src_tbl FOR EACH ROW EXECUTE PROCEDURE tri_update_func();", + "CREATE TRIGGER delete_trigger BEFORE DELETE ON test_trigger_src_tbl FOR EACH ROW EXECUTE PROCEDURE tri_delete_func();", + "INSERT INTO test_trigger_src_tbl VALUES(100,200,300);", + "SELECT * FROM test_trigger_src_tbl;", + "SELECT * FROM test_trigger_des_tbl;", + + }; + for (int i = 0; i < sqls.length - 2; i++) { + UtilTool.executeSql(conn, sqls[i]); + } + for (int i = sqls.length - 2; i < sqls.length; i++) { + UtilTool.fetchData(conn, sqls[i]); + } + UtilTool.executeSql(conn, "UPDATE test_trigger_src_tbl SET id3=400 WHERE id2=200;"); + for (int i = sqls.length - 2; i < sqls.length; i++) { + UtilTool.fetchData(conn, sqls[i]); + } + UtilTool.executeSql(conn, "DELETE FROM test_trigger_src_tbl WHERE id2=200;"); + for (int i = sqls.length - 2; i < sqls.length; i++) { + UtilTool.fetchData(conn, sqls[i]); + } + System.out.println("OK (1 test)"); + } + public static void main(String[] args) throws IOException { + // prepare connect info + String PORT = args[0]; + String urls = "jdbc:postgresql://localhost:"+ PORT +"/regression?enable_ce=1"; + Properties urlProps = new Properties(); + + // create connection + Connection con = null; + try { + urlProps.setProperty("user", "test"); + urlProps.setProperty("password", "Gauss@123"); + con = DriverManager.getConnection(urls, urlProps); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + + try { + UtilTool.executeSql(con, "drop table if exists test_trigger_src_tbl;"); + UtilTool.executeSql(con, "drop table if exists test_trigger_des_tbl;"); + UtilTool.executeSql(con, "DROP CLIENT MASTER KEY IF EXISTS triggerCMK CASCADE;"); + UtilTool.executeSql(con, "CREATE CLIENT MASTER KEY triggerCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = \"gs_ktool/1\" , ALGORITHM = AES_256_CBC);"); + UtilTool.executeSql(con, "CREATE COLUMN ENCRYPTION KEY triggerCEK1 WITH VALUES (CLIENT_MASTER_KEY = triggerCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256);"); + UtilTool.executeSql(con, "CREATE COLUMN ENCRYPTION KEY triggerCEK2 WITH VALUES (CLIENT_MASTER_KEY = triggerCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256);"); + testTrigger(con); + try { + con.close(); + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + } catch (SQLException e) { + e.printStackTrace(); + System.exit(0); + } + } +} \ No newline at end of file diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/similarity.py b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/UtilTool.class similarity index 100% rename from src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/anomaly_detection/similarity.py rename to src/test/regress/jdbc_ce_test/ce_ddl_pbe/UtilTool.class diff --git a/src/test/regress/jdbc_ce_test/ce_ddl_pbe/UtilTool.java b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/UtilTool.java new file mode 100644 index 000000000..abba85d66 --- /dev/null +++ b/src/test/regress/jdbc_ce_test/ce_ddl_pbe/UtilTool.java @@ -0,0 +1,266 @@ +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Map.Entry; +import java.util.concurrent.CountDownLatch; +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSetMetaData; +import java.sql.SQLWarning; +import java.sql.*; + +public class UtilTool { + + + public static void addValueToLine(StringBuilder line, String value, int MaxLen, String space, boolean centerIt) { + //line.append(space); + int numberOfPrefixSpaces = 1; + if (centerIt) { + numberOfPrefixSpaces = (MaxLen - value.length()) / 2 + 1; + } + for (int i = 0; i < numberOfPrefixSpaces; ++i) { + line.append(space); + } + line.append(value); + for (int i = numberOfPrefixSpaces + value.length(); i < MaxLen + 2; ++i) { + line.append(space); + } + + } + + public static void printRS4Test(ResultSet rs) { + try { + ResultSetMetaData rsmd = rs.getMetaData(); + int columnsCount = rsmd.getColumnCount(); + List maxColumnValueLength = new ArrayList<>(); + //Calculate the + for (int colIndex = 1; colIndex < columnsCount + 1; ++colIndex) { + maxColumnValueLength.add(rsmd.getColumnName(colIndex).length()); + } + List> data = new ArrayList<>(); + while (rs.next()){ + List record = new ArrayList<>(); + for (int colIndex = 1; colIndex < columnsCount + 1; ++colIndex) { + String colValue = rs.getString(colIndex); + if (colValue!= null) { + if (colValue.length() > maxColumnValueLength.get(colIndex - 1)) { + maxColumnValueLength.set(colIndex - 1, colValue.length()); + } + record.add(colValue); + } + else { + record.add("null"); + } + + } + data.add(record); + } + + String space = " "; + String dash = "-"; + String connector = "+"; + String sep = "|"; + //Print the result set headers; + StringBuilder lineHeaders = new StringBuilder(); + for (int colIndex = 1; colIndex < columnsCount + 1; ++colIndex) { + String colName = rsmd.getColumnName(colIndex); + addValueToLine(lineHeaders, colName,maxColumnValueLength.get(colIndex - 1), space, true); + if (colIndex != columnsCount) { + lineHeaders.append(sep); + } + } + System.out.println(lineHeaders); + StringBuilder lineSep = new StringBuilder(); + for (int colIndex = 1; colIndex < columnsCount + 1; ++colIndex) { + addValueToLine(lineSep, "", maxColumnValueLength.get(colIndex - 1), dash, true); + if (colIndex != columnsCount) { + lineSep.append(connector); + } + } + System.out.println(lineSep); + for (List record : data) { + StringBuilder lineData = new StringBuilder(); + for (int colIndex = 1; colIndex < columnsCount + 1; ++colIndex) { + addValueToLine(lineData, record.get(colIndex - 1), maxColumnValueLength.get(colIndex - 1), space, false); + if (colIndex != columnsCount) { + lineData.append(sep); + } + } + System.out.println(lineData); + } + if (data.size() == 0) { + System.out.println("(0 row)"); + } + else{ + System.out.println("(" + data.size() + " rows)"); + } + System.out.println(""); + + } catch (SQLException e1) { + e1.printStackTrace(); + return; + } + + ResultSetMetaData rsmd; + try { + rsmd = rs.getMetaData(); + } catch (SQLException e1) { + e1.printStackTrace(); + return; + } + int columnsNumber = 0; + try { + columnsNumber= rsmd.getColumnCount(); + } catch (SQLException e1) { + e1.printStackTrace(); + return; + } + /*for (int i = 1 ; i < columnsNumber + 1; ++i) { + try { + System.out.print(rsmd.getColumnName(i)); + } catch (SQLException e) { + e.printStackTrace(); + return; + } + System.out.print(" "); + }*/ + final String line_sep = "------------------------------"; + try { + while (rs.next()){ + System.out.println(""); + System.out.println(line_sep); + for (int i = 1 ; i < columnsNumber + 1; ++i) { + String value = rs.getString(i); + System.out.print(value); + System.out.print(" | "); + } + } + System.out.println(""); + //System.out.println(line_sep); + } catch (SQLException e) { + e.printStackTrace(); + return; + } + try { + rs.close(); + } catch (SQLException e) { + e.printStackTrace(); + return; + } + + } + public static boolean fetchData(Connection conn, String sql) { + Statement st = null; + try { + st = conn.createStatement(); + ResultSet rs = null; + rs = st.executeQuery(sql); + printRS4Test(rs); + if (rs!=null) { + try { + rs.close(); + } catch (Exception e) { + e.printStackTrace(); + } + } + st.close(); + } catch (SQLException e) { + e.printStackTrace(); + return false; + } + return true; + } + public static boolean executeSql(Connection conn, String sql) { + Statement st = null; + try { + st = conn.createStatement(); + } catch (SQLException e) { + e.printStackTrace(); + return false; + } + int rowsAffected = 0; + try { + rowsAffected = st.executeUpdate(sql); + SQLWarning warnings = st.getWarnings(); + if (warnings != null) { + System.out.println("NOTICE: " + warnings.getMessage()); + warnings.getNextWarning(); + } + + } catch (SQLException e) { + e.printStackTrace(); + return false; + } + try { + st.close(); + } catch (SQLException e) { + e.printStackTrace(); + return false; + } + return true; + } + public static void setStmntParameters(List parameters, PreparedStatement statement) throws SQLException { + int i = 0; + for (Object param: parameters) { + ++i; + if (param instanceof Integer) { + statement.setInt(i, (Integer)param); + } + else if (param instanceof Float) { + statement.setFloat(i, (Float)param); + } + else if (param instanceof Double) { + statement.setDouble(i, (Double)param); + } + else{ + statement.setString(i,(String) param); + } + } + } + public static boolean updateDataWithPrepareStmnt(Connection conn, String sql, List parameters) { + try { + PreparedStatement statement = conn.prepareStatement(sql); + setStmntParameters(parameters, statement); + int numberOfRowsUpdated = statement.executeUpdate(); + System.out.println("rows updated: " + numberOfRowsUpdated); + statement.close(); + } catch (SQLException e) { + e.printStackTrace(); + return false; + } + return true; + } + public static boolean fetchDataWithPrepareStmnt(Connection conn, String sql, List parameters) { + //String sql = "select count(*) from t_varchar where name = ?"; + try { + PreparedStatement statement = conn.prepareStatement(sql); + setStmntParameters(parameters, statement); + ResultSet rs = statement.executeQuery(); + printRS4Test(rs); + if (rs!=null) { + try { + rs.close(); + } catch (Exception e) { + e.printStackTrace(); + } + } + statement.close(); + } catch (SQLException e) { + e.printStackTrace(); + return false; + } + return true; + } +} \ No newline at end of file diff --git a/src/test/regress/jdbc_client/.gitignore b/src/test/regress/jdbc_client/.gitignore new file mode 100644 index 000000000..1ba172159 --- /dev/null +++ b/src/test/regress/jdbc_client/.gitignore @@ -0,0 +1,4 @@ +classes +.classpath +/bin/ +.project diff --git a/src/test/regress/jdbc_client/CMakeLists.txt b/src/test/regress/jdbc_client/CMakeLists.txt new file mode 100644 index 000000000..cecd9b712 --- /dev/null +++ b/src/test/regress/jdbc_client/CMakeLists.txt @@ -0,0 +1,7 @@ +set(JDBC_PATH ${CMAKE_CURRENT_SOURCE_DIR}/../jdbc_client_lib/gsjdbc4.jar) +find_package(Java REQUIRED) +include(UseJava) +file(GLOB_RECURSE java_files ${CMAKE_CURRENT_SOURCE_DIR}/*.java) +message(STATUS "Java file ${java_files}") +add_jar(jdbc_client ${java_files} INCLUDE_JARS ${JDBC_PATH} MANIFEST ${CMAKE_CURRENT_SOURCE_DIR}/manifest.txt) +message(STATUS "gsjdbc4.jar file path ${JDBC_PATH}") \ No newline at end of file diff --git a/src/test/regress/jdbc_client/Makefile b/src/test/regress/jdbc_client/Makefile new file mode 100644 index 000000000..2a3af2e23 --- /dev/null +++ b/src/test/regress/jdbc_client/Makefile @@ -0,0 +1,31 @@ +subdir = distribute/test/regress/jdbc_client +top_builddir = ../../../../opengauss +include $(top_builddir)/src/Makefile.global + +JFLAGS = -cp src:../jdbc_client_lib/gsjdbc4.jar -d classes +JRFLAGS = cfm jdbc_client.jar manifest.txt -C classes gauss + +JC = ${JAVA_HOME}/bin/javac +JR = ${JAVA_HOME}/bin/jar + + +.SUFFIXES: .java .class + +JCLASSES = $(shell find $(.) -name '*.java') + +all: jdbc_client.jar + +jdbc_client.jar: cclasses + $(JR) $(JRFLAGS) + +classes: + mkdir -p classes + +.java.class: classes + $(JC) $(JFLAGS) $*.java + +cclasses: classes $(JCLASSES:.java=.class) + +clean: + rm -rf classes + rm -f jdbc_client.jar diff --git a/src/test/regress/jdbc_client/manifest.txt b/src/test/regress/jdbc_client/manifest.txt new file mode 100644 index 000000000..9d22baef9 --- /dev/null +++ b/src/test/regress/jdbc_client/manifest.txt @@ -0,0 +1,5 @@ +Manifest-Version: 1.0 +Class-Path: . ../jdbc_client_lib/gsjdbc4.jar +Main-Class: gauss.regress.jdbc.JdbcClient + + diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/IBinaryTest.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/IBinaryTest.java new file mode 100644 index 000000000..888b09f31 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/IBinaryTest.java @@ -0,0 +1,10 @@ +package gauss.regress.jdbc; + +import gauss.regress.jdbc.utils.DatabaseConnection4Test; +/** + * Interface binary tests has to implement + * + */ +public interface IBinaryTest { + void execute(DatabaseConnection4Test conn); +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/JdbcClient.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/JdbcClient.java new file mode 100644 index 000000000..ba07781ee --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/JdbcClient.java @@ -0,0 +1,165 @@ +/** + * JDBC client for parsing SQL files runnig as jdbc but not gsql + */ +package gauss.regress.jdbc; + +import java.io.File; +import java.io.IOException; +import java.lang.reflect.Field; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.*; +import java.io.*; + +import gauss.regress.jdbc.utils.CommandType; +import gauss.regress.jdbc.utils.DBUtils; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; +import gauss.regress.jdbc.utils.IInputFileParser; +import gauss.regress.jdbc.utils.InputFileParser; +import gauss.regress.jdbc.utils.OutFileWriter; +import gauss.regress.jdbc.utils.SQLCommand; +import gauss.regress.jdbc.utils.SyntaxError; + +public class JdbcClient { + static public final int BAD_INPUT_EXIT_CODE = 1; + static public final int CANNOT_CONNECT_EXIT_CODE = 2; + static public final int CANNOT_OPEN_OUTPUT_FILE_EXIT_CODE = 3; + static public final int CANNOT_OPEN_INPUT_FILE_EXIT_CODE = 4; + static public final int CANNOT_WRITE_EXIT_CODE = 5; + static public final int CANNOT_SHELLCMD_EXIT_CODE = 6; + + static private OutFileWriter m_outputFile = null; + + /** + * provide usage information + */ + private static void help() { + m_outputFile.writeLine("jdbc_client.jar server_address port databs_ename user_name password input_file output_file"); + m_outputFile.writeLine("Example: jdbc_client.jar localhost 28777 regression jdbc_regress 1q@W3e4r */sql/cl_varchar.sql */results/cl_varchar.out"); + } + + /** + * Parse, execute and writes output for the a sql file + * @param conn database connection + * @param inputFileName input file path + */ + private static void executeFile(DatabaseConnection4Test conn, String inputFileName) { + IInputFileParser sqlFileParser = new InputFileParser(); + if (!sqlFileParser.load(inputFileName)) { + m_outputFile.writeLine("failed opening input file: " + inputFileName); + System.exit(CANNOT_OPEN_INPUT_FILE_EXIT_CODE); + } + CommandType lastCommandType = CommandType.EMPTY; + while (sqlFileParser.moveNext()) { + SQLCommand action = sqlFileParser.get(); + if (action != null) { + switch (action.getCommandType()) { + case SHELL: + conn.gsKtoolExec(action.getCommand()); + break; + case COMMENT: + m_outputFile.writeLine(action.getCommand()); + break; + case EMPTY: + if (lastCommandType.equals(CommandType.EXECUTE)) { + //This is really nasty, but that's what gsql is doing + if (action.getCommand().length() > 0) { + m_outputFile.writeLine(action.getCommand()); + } + } + break; + case EXECUTE: + conn.executeSql(action.getCommand()); + break; + case SELECT: + conn.fetchData(action.getCommand()); + break; + case DESCRIBE: + m_outputFile.writeLine(action.getCommand()); + conn.describeObject(action.getCommand()); + break; + case DESCRIBE_FUNCTION: + m_outputFile.writeLine(action.getCommand()); + //conn.describeFunction(action.getCommand()); + break; + default: + System.out.println("Unknown command type"); + break; + } + } + lastCommandType = action.getCommandType(); + } + } + /** + * execute binary tests - test that is java code + * @param[in] con4Test connection to the database with some utility functions + * @param[in] testName the name of the + */ + private static void executeBinTest(DatabaseConnection4Test con4Test, String testName) { + String className = "gauss.regress.jdbc.bintests." + testName; + try { + IBinaryTest test = (IBinaryTest)Class.forName(className).newInstance(); + test.execute(con4Test); + + } catch (InstantiationException e) { + e.printStackTrace(); + con4Test.getFileWriter().writeLine("Test failed with InstantiationException"); + } catch (IllegalAccessException e) { + e.printStackTrace(); + con4Test.getFileWriter().writeLine("Test failed with IllegalAccessException"); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + con4Test.getFileWriter().writeLine("Test failed with ClassNotFoundException"); + } + } + + /** + * @param args + */ + public static void main(String[] args) { + int EXPECTED_PARAMETERS_COUNT = 7; + if (args.length != EXPECTED_PARAMETERS_COUNT) { + help(); + System.exit(BAD_INPUT_EXIT_CODE); + } + + String serverAddress = args[0]; + String port = args[1]; + String databseName = args[2]; + String username = args[3]; + String password = args[4]; + String inputFile = args[5]; + String outputFile = args[6]; + DatabaseConnection4Test con4Test = null; + try { + m_outputFile = new OutFileWriter(); + m_outputFile.openFile(outputFile); + con4Test = new DatabaseConnection4Test(m_outputFile); + if (!con4Test.connect(serverAddress, port, databseName, username, password)) { + m_outputFile.writeLine("database connection failed"); + System.exit(CANNOT_CONNECT_EXIT_CODE); + } + if (inputFile.endsWith("Bin")) { + executeBinTest(con4Test, inputFile); + } + else { + executeFile(con4Test, inputFile); + } + m_outputFile.close(); + } catch (IOException e) { + e.printStackTrace(); + System.exit(CANNOT_OPEN_OUTPUT_FILE_EXIT_CODE); + } + if (con4Test != null) { + con4Test.close(); + } + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/JdbcClientWrapper4Test.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/JdbcClientWrapper4Test.java new file mode 100644 index 000000000..e1408f16a --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/JdbcClientWrapper4Test.java @@ -0,0 +1,41 @@ + +package gauss.regress.jdbc; + +import java.sql.SQLException; +import gauss.regress.jdbc.utils.DBUtils; +public class JdbcClientWrapper4Test { + + public static void main(String[] args) { + + String testName = "FunctionsMetadataBin"; + String port = "25632"; + String serverAddress = "localhost"; + String databaseName = "java_test"; + String userName = "jdbc_regress"; + String password = "1q@W3e4r"; + execute_test_job(testName, serverAddress, port, databaseName, userName, password); + } + + protected static void execute_test_job(String testName, String serverAddress, String port, String databaseName, + String userName, String password) { + String baseLocation = System.getenv("CODE_BASE") + "/../../distribute/test/regress/"; + String inputFile = ""; + if (testName.endsWith("Bin")){ + inputFile = testName; + } + else { + inputFile = baseLocation + "sql/" + testName + ".sql"; + } + String outputFile = baseLocation + "results/" + testName + ".out"; + + try { + DBUtils.createDatabase4Test(serverAddress, port, "postgres", userName, password, "java_test"); + } catch (SQLException e) { + e.printStackTrace(); + return ; + } + + String[] args2Send = {serverAddress, port, databaseName, userName, password, inputFile, outputFile}; + JdbcClient.main(args2Send); + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/TestLeak.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/TestLeak.java new file mode 100644 index 000000000..e57bae8d6 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/TestLeak.java @@ -0,0 +1,16 @@ +package gauss.regress.jdbc; + +// JdbcClientWrapper4LeakTest +public class TestLeak extends JdbcClientWrapper4Test{ + + public static void main(String[] args) { + String serverAddress = args[0]; + String port = args[1]; + String databseName = args[2]; + String username = args[3]; + String password = args[4]; + String testName = args[5]; + execute_test_job(testName, serverAddress, port, databseName, username, password); + + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/BatchPreparedStatementsBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/BatchPreparedStatementsBin.java new file mode 100644 index 000000000..70f4e7434 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/BatchPreparedStatementsBin.java @@ -0,0 +1,45 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.PreparedStatement; +import java.sql.SQLException; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class BatchPreparedStatementsBin implements IBinaryTest{ + + @Override + public void execute(DatabaseConnection4Test conn) { + String sql; + BinUtils.createCLSettings(conn); + sql = "CREATE TABLE IF NOT EXISTS t_varchar(id INT, " + + "name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"; + conn.executeSql(sql); + + PreparedStatement statemnet = null; + try { + sql = "INSERT INTO t_varchar (id, name, address) VALUES (?,?,?);"; + conn.getFileWriter().writeLine("starting batch : " + sql); + statemnet = conn.getConnection().prepareStatement(sql); + int loopCount = 20; + conn.getFileWriter().writeLine("Number of rows to add: " + loopCount); + for (int i = 1; i < loopCount + 1; ++i) { + statemnet.setInt(1, i); + statemnet.setString(2, "Name " + i); + statemnet.setString(3, "Address " + i); + statemnet.addBatch(); + } + conn.getFileWriter().writeLine("executing batch ..."); + statemnet.executeBatch(); + + } catch (SQLException e) { + conn.getFileWriter().writeLine("Unexpected error:" + e.getMessage()); + e.printStackTrace(); + } + conn.fetchData("select * from t_varchar order by id;"); + conn.executeSql("DROP table t_varchar;"); + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1;"); + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/BatchSimpleQueryBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/BatchSimpleQueryBin.java new file mode 100644 index 000000000..736bcaac1 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/BatchSimpleQueryBin.java @@ -0,0 +1,37 @@ +package gauss.regress.jdbc.bintests; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class BatchSimpleQueryBin implements IBinaryTest{ + + @Override + public void execute(DatabaseConnection4Test conn) { + String sql; + + BinUtils.createCLSettings(conn); + sql = "CREATE TABLE IF NOT EXISTS t_varchar(id INT, " + + "name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"; + conn.executeSql(sql); + StringBuilder sqlBuilder = new StringBuilder(); + int loopCount = 2; + for (int i = 1; i < loopCount + 1; ++i) { + sqlBuilder.append("INSERT INTO t_varchar (id, name, address) " + + "VALUES (" + i + ", 'MyName-" + i + "', 'MyAddress-" + i + "');"); + } + conn.executeSql(sqlBuilder.toString()); + + // split mutiple statements to adapt to current features + for (int i = 1; i < loopCount + 1; ++i) { + sql = "INSERT INTO t_varchar (id, name, address) " + + "VALUES (" + i + ", 'MyName-" + i + "', 'MyAddress-" + i + "');"; + conn.executeSql(sql); + } + + conn.fetchData("SELECT * from t_varchar ORDER BY id;"); + conn.executeSql("DROP table t_varchar;"); + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1;"); + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/BatchStatamentBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/BatchStatamentBin.java new file mode 100644 index 000000000..c175df029 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/BatchStatamentBin.java @@ -0,0 +1,40 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.SQLException; +import java.sql.Statement; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class BatchStatamentBin implements IBinaryTest{ + + @Override + public void execute(DatabaseConnection4Test conn) { + String sql; + BinUtils.createCLSettings(conn); + sql = "CREATE TABLE IF NOT EXISTS t_varchar(id INT, " + + "name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"; + conn.executeSql(sql); + + try { + Statement statemnet = conn.getConnection().createStatement(); + int loopCount = 20; + for (int i = 1; i < loopCount + 1; ++i) { + sql = "INSERT INTO t_varchar (id, name, address) VALUES (" + i + ", 'MyName" + i + "', 'MyAddress" + i + "');"; + conn.getFileWriter().writeLine("added to batch " + sql); + statemnet.addBatch(sql); + } + conn.getFileWriter().writeLine("executing batch ..."); + statemnet.executeBatch(); + conn.fetchData("select * from t_varchar order by id;"); + } catch (SQLException e) { + conn.getFileWriter().writeLine("Unexpected error:" + e.getMessage()); + e.printStackTrace(); + } + + conn.executeSql("DROP table t_varchar;"); + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1;"); + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/BinUtils.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/BinUtils.java new file mode 100644 index 000000000..acdf44477 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/BinUtils.java @@ -0,0 +1,108 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.CallableStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; + +import com.sun.net.httpserver.Authenticator.Result; + +import gauss.regress.jdbc.utils.DBUtils; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class BinUtils { + + static public void getMultipleResultsets(DatabaseConnection4Test conn, String sql) { + try { + Statement stmt = conn.getConnection().createStatement(); + boolean results = stmt.execute(sql); + do { + if (results) { + ResultSet rs = stmt.getResultSet(); + conn.printRS4Test(rs); + } + results = stmt.getMoreResults(); + } while (results); + } catch (SQLException e) { + e.printStackTrace(); + } + + } + + static public void createCLSettings(DatabaseConnection4Test conn) { + conn.gsKtoolExec("\\! gs_ktool -d all"); + conn.gsKtoolExec("\\! gs_ktool -g"); + String sql; + sql = "CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = \"gs_ktool/1\" , ALGORITHM = AES_256_CBC);"; + conn.executeSql(sql); + sql = "CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256);"; + conn.executeSql(sql); + } + + static public void dropCLSettings(DatabaseConnection4Test conn) { + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1;"); + conn.gsKtoolExec("\\! gs_ktool -d all"); + + } + + static public void cleanCLSettings(DatabaseConnection4Test conn) { + String sql; + sql = "DROP COLUMN ENCRYPTION KEY cek1;"; + conn.executeSql(sql); + sql = "DROP CLIENT MASTER KEY cmk1;"; + conn.executeSql(sql); + conn.gsKtoolExec("\\! gs_ktool -d all"); + } + + /** + * Prints the data of a parameter + * @param conn connection object + * @param parameter the parameter value + * @param functionName the na,e of the server function it was fetched from + * @param index the parameter index in the server function + */ + public static void printParameter(DatabaseConnection4Test conn, Object parameter, String functionName, int index) { + conn.getFileWriter().writeLine(functionName + " value of index " + index + + " Type is " + parameter.getClass().getName() + " value is " + parameter); + } + + /** + * Invoke a server function using CallableStatement and parse its output parameters + * @param conn connection object + * @param functionName method name + * @param numberOfParameters parameter count in the function + */ + static void invokeFunctionWithIntegerOutParams(DatabaseConnection4Test conn, String functionName, + int numberOfParameters) { + try { + + conn.getFileWriter().writeLine("Invoking " + functionName + " using CallableStatement:"); + StringBuilder command = new StringBuilder(); + command.append("{call "); + command.append(functionName); + command.append("("); + for (int index = 0; index < numberOfParameters; ++index) { + if (index > 0) { + command.append(","); + } + command.append("?"); + } + command.append(")}"); + CallableStatement callStmnt = conn.getConnection().prepareCall(command.toString()); + for (int index = 0; index < numberOfParameters; ++index) { + callStmnt.registerOutParameter(index + 1, Types.INTEGER); + } + callStmnt.execute(); + for (int index = 0; index < numberOfParameters; ++index) { + Object data = callStmnt.getObject(index + 1); + BinUtils.printParameter(conn, data, functionName, index); + } + } catch (SQLException e) { + e.printStackTrace(); + conn.getFileWriter().writeLine("ERROR running " + functionName + " " + e.getMessage()); + } + + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/ClientLogicCacheRetryBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/ClientLogicCacheRetryBin.java new file mode 100644 index 000000000..21061d17e --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/ClientLogicCacheRetryBin.java @@ -0,0 +1,517 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class ClientLogicCacheRetryBin implements IBinaryTest{ + + /** + * This test it added to validate the client logic cache retry mechanism of the JDBC driver + */ + @Override + public void execute(DatabaseConnection4Test conn) { + + // Create CL column settings + createCLColumnSettings(conn); + createCLSettingsAndTable(conn); + // batch and cache retry is not yet supported createCLSettingsAndInsertBatch2Table(conn); + // Create and update Data while refreshing the cache: + createCLSettingsAndInsert2Table(conn); + createCLSettingsAndInsert2TableUsingPrepare(conn); + createCLSettingsAndUpdating2Table(conn); + + // Read data while performing cache refresh + createCLSettingsAndReadwithCriteria(conn, true); + createCLSettingsAndReadwithCriteria(conn, false); + // multiple sql is not supported createTableAndReadFrom2ndConnectionMultiRS(conn); + createTableAndReadFrom2ndConnection(conn); + + // Function with CL settings + checkFunctionCacheRefresh(conn); + } + /** + * Writes header to the output file, to make it easy to distinguish between test cases + * @param conn + * @param header + */ + private void writHeader(DatabaseConnection4Test conn, String header) { + conn.getFileWriter().writeLine(""); + for (int i = 0; i < header.length() + 4; ++i) { + conn.getFileWriter().write("*"); + } + conn.getFileWriter().writeLine(""); + conn.getFileWriter().write("* "); + conn.getFileWriter().write(header); + conn.getFileWriter().write(" *"); + conn.getFileWriter().writeLine(""); + + for (int i = 0; i < header.length() + 4; ++i) { + conn.getFileWriter().write("*"); + } + conn.getFileWriter().writeLine(""); + } + /** + * Validate the cache refresh mechanism as per function definitions with client logic parameters + * @param conn + */ + private void checkFunctionCacheRefresh(DatabaseConnection4Test conn) { + + writHeader(conn, "Cache retry for functions with client logic parameters"); + + DatabaseConnection4Test conn0 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn0.reconnect(); + /*conn0 is used as a 2nd connection that has the cache not updated*/ + DatabaseConnection4Test conn1 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn1.reconnect(); + + DatabaseConnection4Test conn2 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn2.connectWithoutReloadingCacheOnIsValid(); + + BinUtils.createCLSettings(conn0); + + conn0.executeSql("CREATE TABLE t_processed " + + "(name text, val INT ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val2 INT)"); + conn0.executeSql("insert into t_processed " + + "values('one',1,10),('two',2,20),('three',3,30),('four',4,40),('five',5,50),('six',6,60)," + + "('seven',7,70),('eight',8,80),('nine',9,90),('ten',10,100)"); + + try { + if (!conn1.getConnection().isValid(60)) { + conn1.getFileWriter().writeLine("isValid Failed for connection 1"); + } + } catch (SQLException e1) { + // TODO Auto-generated catch block + e1.printStackTrace(); + } + /* Use case - return record ID + * This use case is not yet covered as we must access the server to resolve the actual data type + */ + conn0.executeSql("CREATE FUNCTION f_out_only(out1 OUT int, out2 OUT int) AS " + + "'SELECT val, val2 from t_processed ORDER BY name LIMIT 1' LANGUAGE SQL"); + conn0.fetchData("SELECT f_out_only ()"); + conn1.fetchData("SELECT f_out_only ()"); + + try { + if (!conn1.getConnection().isValid(60)) { + conn1.getFileWriter().writeLine("isValid Failed for connection 1"); + } + } catch (SQLException e) { + conn1.getFileWriter().writeLine("isValid Failed with error"); + e.printStackTrace(); + } + conn1.getFileWriter().writeLine("Trying SELECT f_out_only () again after calling to isValid method:"); + conn1.fetchData("SELECT f_out_only()"); + conn2.getFileWriter().writeLine("conn2, which is to be used now have refreshClientEncryption set to zero"); + conn2.fetchData("SELECT f_out_only()"); + try { + if (!conn2.getConnection().isValid(60)) { + conn2.getFileWriter().writeLine("isValid Failed for connection 1"); + } + } catch (SQLException e) { + conn2.getFileWriter().writeLine("conn2 isValid Failed with error"); + e.printStackTrace(); + } + conn2.fetchData("SELECT f_out_only()"); + + /* Use case - In out parameters + * This is use case handled by capturing the error of function not defined since on the server it is defined with client logic and not integer + */ + conn0.executeSql("CREATE FUNCTION f_plaintext_out(out1 INOUT int, out2 INOUT int) AS " + + "'SELECT val, val2 from t_processed where val=out1 AND val2=out2 ORDER BY name LIMIT 1' LANGUAGE SQL"); + conn0.fetchData("CALL f_plaintext_out (3, 30)"); + conn0.fetchData("SELECT f_plaintext_out (3, 30)"); + + try { + if (!conn1.getConnection().isValid(60)) { + conn1.getFileWriter().writeLine("isValid Failed for connection 1"); + } + } catch (SQLException e) { + conn1.getFileWriter().writeLine("isValid Failed with error"); + e.printStackTrace(); + } + conn1.fetchData("SELECT f_plaintext_out (3, 30)"); + + conn0.executeSql("CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC))"); + conn0.executeSql("INSERT INTO t_num (id, num) VALUES (1, 5555)"); + conn0.executeSql("INSERT INTO t_num (id, num) VALUES (2, 6666)"); + conn0.fetchData("SELECT * from t_num"); + + /* Use case - function return value + * This is use case handled on the resultset level when it cannot extract user value from client logic value + */ + conn0.executeSql("CREATE FUNCTION select1 () RETURNS t_num LANGUAGE SQL AS 'SELECT * from t_num;';"); + conn0.fetchData("call select1()"); + conn1.fetchData("call select1()"); + + /* Use case - function return value + * This is use case handled on the resultset level when it cannot extract user value from client logic value + */ + conn0.executeSql("CREATE FUNCTION f_processed_in_out_plpgsql(in1 int, out out1 int, in2 int, out out2 int)" + + "as $$ " + + "begin " + + "select val, val2 INTO out1, out2 from t_processed where val = in2 or val = in1 limit 1; " + + "end;$$ " + + "LANGUAGE plpgsql"); + conn0.fetchData("SELECT f_processed_in_out_plpgsql(17,3)"); + + try { + if (!conn1.getConnection().isValid(60)) { + conn1.getFileWriter().writeLine("isValid Failed for connection 1"); + } + } catch (SQLException e) { + conn1.getFileWriter().writeLine("isValid Failed with error"); + e.printStackTrace(); + } + conn1.fetchData("SELECT f_processed_in_out_plpgsql(17,3)"); + + conn0.executeSql("DROP function f_out_only"); + conn0.executeSql("DROP function f_plaintext_out;"); + conn0.executeSql("DROP function select1"); + conn0.executeSql("DROP function f_processed_in_out_plpgsql"); + conn0.executeSql("DROP TABLE t_num CASCADE"); + conn0.executeSql("DROP TABLE t_processed CASCADE"); + BinUtils.dropCLSettings(conn0); + + conn0.close(); + conn1.close(); + conn2.close(); + } + /** + * Validate the cache refresh when trying to read client logic data and change it to user format on a 2nd connection + * @param conn + */ + private void createTableAndReadFrom2ndConnection(DatabaseConnection4Test conn) { + + writHeader(conn, "Validate the cache refresh when trying to read data"); + /* conn0 is used to create the CL settings */ + DatabaseConnection4Test conn0 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn0.reconnect(); + /* conn0 is used as a 2nd connection that has the cache not updated */ + DatabaseConnection4Test conn1 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn1.reconnect(); + + BinUtils.createCLSettings(conn0); + conn0.executeSql("CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC))"); + + conn0.executeSql("INSERT INTO t_num (id, num) VALUES (1, 555)"); + conn0.fetchData("select * from t_num"); + + try { + if (!conn1.getConnection().isValid(60)) { + conn1.getFileWriter().writeLine("isValid Failed for connection 1"); + } + } catch (SQLException e) { + conn1.getFileWriter().writeLine("isValid Failed with error"); + e.printStackTrace(); + } + conn1.fetchData("select * from t_num"); + + conn0.executeSql("DROP TABLE t_num"); + BinUtils.dropCLSettings(conn0); + + conn0.close(); + conn1.close(); + } + /** + * Validate the cache refresh when trying to read data - multiple resultsets + * @param conn + */ + private void createTableAndReadFrom2ndConnectionMultiRS(DatabaseConnection4Test conn) { + writHeader(conn, "Validate the cache refresh when trying to read data - multiple resultsets"); + + /*conn0 is used to create the CL settings*/ + DatabaseConnection4Test conn0 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn0.reconnect(); + /*conn0 is used as a 2nd connection that has the cache not updated*/ + DatabaseConnection4Test conn1 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn1.reconnect(); + + BinUtils.createCLSettings(conn0); + + conn0.executeSql("CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC))"); + + conn0.executeSql("INSERT INTO t_num (id, num) VALUES (1, 555)"); + /* did not support mmultiple sql */ + String sql = "select * from t_num;select num from t_num;"; + BinUtils.getMultipleResultsets(conn0, sql); + BinUtils.getMultipleResultsets(conn1, sql); + + conn0.executeSql("DROP TABLE t_num"); + BinUtils.dropCLSettings(conn0); + + conn0.close(); + conn1.close(); + } + /** + * Validate the cache retry mechanism when inserting data to a table using prepare statement + * @param conn + */ + private void createCLSettingsAndInsert2TableUsingPrepare(DatabaseConnection4Test conn) { + writHeader(conn, "Validate the cache retry mechanism when inserting data to a table using prepare statement"); + + /*conn0 is used to create the CL settings*/ + DatabaseConnection4Test conn0 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn0.reconnect(); + /*conn0 is used as a 2nd connection that has the cache not updated*/ + DatabaseConnection4Test conn1 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn1.reconnect(); + + BinUtils.createCLSettings(conn0); + conn0.executeSql("CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC))"); + + try { + if (!conn1.getConnection().isValid(60)) { + conn1.getFileWriter().writeLine("isValid Failed for connection 1"); + } + } catch (SQLException e) { + conn1.getFileWriter().writeLine("isValid Failed with error"); + e.printStackTrace(); + } + String sql = "INSERT INTO t_num (id, num) VALUES (?,?)"; + List parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("2"); + conn1.updateDataWithPrepareStmnt(sql, parameters); + + conn1.fetchData("select * from t_num"); + + conn0.executeSql("DROP TABLE t_num"); + BinUtils.dropCLSettings(conn0); + + conn0.close(); + conn1.close(); + } + /** + * Validate the cache retry mechanism when trying to select with a where clause pointing to a field with client logic + * @param conn + * @param usePrepare if to use prepare statement + */ + private void createCLSettingsAndReadwithCriteria(DatabaseConnection4Test conn, boolean usePrepare) { + String header = "Validate cache retry when applying where clause on client logic field"; + if (usePrepare) { + header += " using prepare statament"; + } + else { + header += " Using simple queries"; + } + writHeader(conn, header); + + /*conn0 is used to create the CL settings*/ + DatabaseConnection4Test conn0 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn0.reconnect(); + /*conn1 is used as a 2nd connection that has the cache not updated*/ + DatabaseConnection4Test conn1 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn1.reconnect(); + + BinUtils.createCLSettings(conn0); + conn0.executeSql("CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC))"); + + String sql = "INSERT INTO t_num (id, num) VALUES (?,?)"; + List parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("2"); + conn0.updateDataWithPrepareStmnt(sql, parameters); + conn0.fetchData("select * from t_num"); + + try { + if (!conn1.getConnection().isValid(60)) { + conn1.getFileWriter().writeLine("isValid Failed for connection 1"); + } + } catch (SQLException e) { + conn1.getFileWriter().writeLine("isValid Failed with error"); + e.printStackTrace(); + } + + if (usePrepare) { + sql = "SELECT * FROM t_num where num = ?"; + List parameters2 = new ArrayList<>(); + parameters2.add("2"); + conn1.fetchDataWithPrepareStmnt(sql, parameters2); + } + else { + conn1.fetchData("SELECT * FROM t_num where num = 2"); + } + conn1.fetchData("select * from t_num"); + + conn0.executeSql("DROP TABLE t_num"); + BinUtils.dropCLSettings(conn0); + + conn0.close(); + conn1.close(); + } + + /** + * Validate cache retry when updating data using simple query + * @param conn + */ + private void createCLSettingsAndUpdating2Table(DatabaseConnection4Test conn) { + this.writHeader(conn, "Validate cache retry when updating data using simple query"); + + /*conn0 is used to create the CL settings*/ + DatabaseConnection4Test conn0 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn0.reconnect(); + /*conn0 is used as a 2nd connection that has the cache not updated*/ + DatabaseConnection4Test conn1 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn1.reconnect(); + + BinUtils.createCLSettings(conn0); + conn0.executeSql("CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC))"); + + conn0.executeSql("INSERT INTO t_num (id, num) VALUES (1, 555)"); + conn0.executeSql("INSERT INTO t_num (id, num) VALUES (1, 666)"); + conn0.fetchData("select * from t_num"); + + try { + if (!conn1.getConnection().isValid(60)) { + conn1.getFileWriter().writeLine("isValid Failed for connection 1"); + } + } catch (SQLException e) { + conn1.getFileWriter().writeLine("isValid Failed with error"); + e.printStackTrace(); + } + conn1.executeSql("update t_num set num = 7000"); + conn1.fetchData("select * from t_num"); + + conn0.executeSql("DROP TABLE t_num"); + BinUtils.dropCLSettings(conn0); + + conn0.close(); + conn1.close(); + } + /** + * Validate cache retry when inserting data using simple query + * @param conn + */ + private void createCLSettingsAndInsert2Table(DatabaseConnection4Test conn) { + this.writHeader(conn, "Validate cache retry when inserting data using simple query"); + + /*conn0 is used to create the CL settings*/ + DatabaseConnection4Test conn0 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn0.reconnect(); + + /*conn1 is used as a 2nd connection that has the cache not updated*/ + DatabaseConnection4Test conn1 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn1.reconnect(); + + BinUtils.createCLSettings(conn0); + conn0.executeSql("CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC))"); + + conn0.executeSql("INSERT INTO t_num (id, num) VALUES (1, 555)"); + conn0.fetchData("select * from t_num"); + + try { + if (!conn1.getConnection().isValid(60)) { + conn1.getFileWriter().writeLine("isValid Failed for connection 1"); + } + } catch (SQLException e) { + conn1.getFileWriter().writeLine("isValid Failed with error"); + e.printStackTrace(); + } + conn1.executeSql("INSERT INTO t_num (id, num) VALUES (1, 666)"); + conn1.fetchData("select * from t_num"); + + conn0.executeSql("DROP TABLE t_num"); + BinUtils.dropCLSettings(conn0); + + conn0.close(); + conn1.close(); + } + /** + * Validate cache retry when inserting data using batch query + * @param conn + */ + private void createCLSettingsAndInsertBatch2Table(DatabaseConnection4Test conn) { + this.writHeader(conn, "Validate cache retry when inserting data in batch"); + // + /*conn0 is used to create the CL settings*/ + DatabaseConnection4Test conn0 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn0.reconnect(); + /*conn0 is used as a 2nd connection that has the cache not updated*/ + DatabaseConnection4Test conn1 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn1.reconnect(); + // + String sql; + BinUtils.createCLSettings(conn0); + sql = "CREATE TABLE t_varchar(id INT, " + + "name varchar(50) ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC)," + + "address varchar(50) ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC));"; + conn0.executeSql(sql); + + try { + Statement statemnet = conn1.getConnection().createStatement(); + int loopCount = 20; + for (int i = 1; i < loopCount + 1; ++i) { + sql = "INSERT INTO t_varchar (id, name, address) " + + "VALUES (" + i + ", 'MyName" + i + "', 'MyAddress" + i + "')"; + conn1.getFileWriter().writeLine("added to batch " + sql); + statemnet.addBatch(sql); + } + conn1.getFileWriter().writeLine("executing batch ..."); + statemnet.executeBatch(); + conn1.fetchData("select * from t_varchar order by id"); + } catch (SQLException e) { + e.printStackTrace(); + conn1.getFileWriter().writeLine("Unexpected error:" + e.getMessage()); + } + + conn0.executeSql("DROP table t_varchar"); + BinUtils.dropCLSettings(conn0); + + conn0.close(); + conn1.close(); + } + + /** + * Validate cache retry when creating a column settings + * @param conn + */ + private void createCLColumnSettings(DatabaseConnection4Test conn) { + this.writHeader(conn, "Validate cache retry when creating a column settings"); + /*conn0 is used to create the CL settings*/ + DatabaseConnection4Test conn0 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn0.reconnect(); + /*conn0 is used as a 2nd connection that has the cache not updated*/ + DatabaseConnection4Test conn1 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn1.reconnect(); + conn1.gsKtoolExec("\\! gs_ktool -d all"); + conn1.gsKtoolExec("\\! gs_ktool -g"); + String sql; + sql = "CREATE CLIENT MASTER KEY cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = \"gs_ktool/1\" , ALGORITHM = AES_256_CBC);"; + conn1.executeSql(sql); + sql = "CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256);"; + conn1.executeSql(sql); + BinUtils.dropCLSettings(conn0); + + conn0.close(); + conn1.close(); + } + + /** + * Validate cache retry when using 2nd connection to create a table + * @param conn + */ + private void createCLSettingsAndTable(DatabaseConnection4Test conn) { + this.writHeader(conn, "Validate cache retry when using 2nd connection to create a table"); + // + /*conn0 is used to create the CL settings*/ + DatabaseConnection4Test conn0 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn0.reconnect(); + /*conn0 is used as a 2nd connection that has the cache not updated*/ + DatabaseConnection4Test conn1 = new DatabaseConnection4Test(conn, conn.getFileWriter()); + conn1.reconnect(); + // + BinUtils.createCLSettings(conn0); + conn1.executeSql("CREATE TABLE t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC))"); + + conn1.executeSql("DROP TABLE t_num"); + BinUtils.dropCLSettings(conn0); + + conn0.close(); + conn1.close(); + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/CreateFunctionBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/CreateFunctionBin.java new file mode 100644 index 000000000..1e8734e5f --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/CreateFunctionBin.java @@ -0,0 +1,43 @@ +package gauss.regress.jdbc.bintests; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class CreateFunctionBin implements IBinaryTest{ + + @Override + public void execute(DatabaseConnection4Test conn) { + BinUtils.createCLSettings(conn); + String sql; + sql = "CREATE TABLE sbtest1(id int," + + "k INTEGER ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC)," + + "c CHAR(120) ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC)," + + "pad CHAR(60) ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC));"; + + conn.executeSql(sql); + sql = "insert into sbtest1 values (1,1,1,1)"; + + conn.executeSql(sql); + sql = "create function select_data() " + + "returns table(a int, b INTEGER, c CHAR(120), d CHAR(120)) " + + "as " + + "$BODY$ " + + "begin " + + "return query(select * from sbtest1); " + + "end; " + + "$BODY$ " + + "LANGUAGE plpgsql; "; + conn.executeSql(sql); + + sql = "call select_data(); "; + conn.fetchData(sql); + sql = "DROP FUNCTION select_data"; + conn.executeSql(sql); + sql = "DROP TABLE sbtest1;"; + conn.executeSql(sql); + + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1"); + } + +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/CreateTableWithPrepareStatamentBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/CreateTableWithPrepareStatamentBin.java new file mode 100644 index 000000000..581d94dff --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/CreateTableWithPrepareStatamentBin.java @@ -0,0 +1,48 @@ +package gauss.regress.jdbc.bintests; + +import java.util.ArrayList; +import java.util.List; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class CreateTableWithPrepareStatamentBin implements IBinaryTest{ + /** + * This test is to validate the prepare statement flow + */ + @Override + public void execute(DatabaseConnection4Test conn) { + BinUtils.createCLSettings(conn); + + List parameters; + parameters = new ArrayList<>(); + String sqlCreate = "CREATE TABLE IF NOT EXISTS " + + "t_string(key int," + + "_varchar_ varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "_char_ char(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "_text_ text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"; + + + conn.executeSql(sqlCreate); + + String sqlInsert; + sqlInsert = "INSERT INTO t_string (key, _varchar_, _char_, _text_) VALUES (?,?,?,?);"; + parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("varchar data"); + parameters.add("char data"); + parameters.add("text data"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + String sqlInsertSimple = "INSERT INTO t_string (key, _varchar_, _char_, _text_) " + + "VALUES (1,'2','3','4');"; + conn.executeSql(sqlInsertSimple); + + conn.executeSql("INSERT INTO t_string (key, _varchar_, _char_, _text_) " + + "VALUES (1,'2','3','4');"); + conn.fetchData("select * from t_string;"); + + conn.executeSql("DROP TABLE t_string;"); + BinUtils.dropCLSettings(conn); + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/DatabaseMetadataGetColumnsBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/DatabaseMetadataGetColumnsBin.java new file mode 100644 index 000000000..1baeb0084 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/DatabaseMetadataGetColumnsBin.java @@ -0,0 +1,68 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; +import gauss.regress.jdbc.utils.MetadtaUtils; + +/** + * Test the Database metadata API on table Run it 3 times: On table with client + * logic column On table with no client logic On table with no client logic with + * no CL in the connection string + */ +public class DatabaseMetadataGetColumnsBin implements IBinaryTest { + @Override + public void execute(DatabaseConnection4Test conn) { + MetadtaUtils.create_client_logic_table(conn); + MetadtaUtils.create_simple_table(conn); + try { + conn.getFileWriter().writeLine("Testing table with client logic ..."); + String query = "Select * from metadata_client_logic_test_tbl;"; + String tableName = "metadata_client_logic_test_tbl"; + runTest(conn, query, tableName); + MetadtaUtils.drop_client_logic_tbl_data(conn); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("Testing table with no client logic ..."); + query = "Select * from metadata_simple_test_tbl;"; + tableName = "metadata_simple_test_tbl"; + runTest(conn, query, tableName); + conn.getFileWriter() + .writeLine("Testing table with no client logic and with no client logic in connection string ..."); + conn.connectWithNoCL(); + runTest(conn, query, tableName); + } catch (Exception e) { + conn.getFileWriter().writeLine("Failed ResutsetMetadata"); + e.printStackTrace(); + } + MetadtaUtils.drop_simple_tbl_data(conn); + } + + /** + * Encapsulate the test into a function to run it twice + * + * @param conn JDBC connection + * @param query SQL to use + * @param tableName use for table properties + * @throws SQLException + */ + + private void runTest(DatabaseConnection4Test conn, String query, String tableName) throws SQLException { + DatabaseMetaData metaData = conn.getConnection().getMetaData(); + ResultSet columns = metaData.getColumns(null, null, tableName, null); + while (columns.next()) { + conn.getFileWriter().writeLine("Column name: " + columns.getString("COLUMN_NAME")); + conn.getFileWriter().writeLine("Column size" + "(" + columns.getInt("COLUMN_SIZE") + ")"); + conn.getFileWriter().writeLine("Ordinal position: " + columns.getInt("ORDINAL_POSITION")); + conn.getFileWriter().writeLine("Catalog: " + columns.getString("TABLE_CAT")); + conn.getFileWriter().writeLine("Data type (integer value): " + columns.getInt("DATA_TYPE")); + conn.getFileWriter().writeLine("Data type name: " + columns.getString("TYPE_NAME")); + conn.getFileWriter().writeLine("Decimal value: " + columns.getBigDecimal("DECIMAL_DIGITS")); + conn.getFileWriter().writeLine(" "); + conn.getFileWriter().writeLine("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); + conn.getFileWriter().writeLine(" "); + } + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/DisconnectedRsBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/DisconnectedRsBin.java new file mode 100644 index 000000000..6bdca8baf --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/DisconnectedRsBin.java @@ -0,0 +1,52 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class DisconnectedRsBin implements IBinaryTest { + + @Override + public void execute(DatabaseConnection4Test conn) { + String sql; + + BinUtils.createCLSettings(conn); + + sql = "CREATE TABLE IF NOT EXISTS t_varchar_regular(id INT, name varchar(50), address varchar(50));"; + conn.executeSql(sql); + + sql = "CREATE TABLE IF NOT EXISTS t_varchar(id INT, " + + "name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"; + conn.executeSql(sql); + + sql = "INSERT INTO t_varchar (id, name, address) VALUES (1, 'MyName', 'MyAddress');"; + conn.executeSql(sql); + sql = "INSERT INTO t_varchar VALUES (2, 'MyName2', 'MyAddress2');"; + conn.executeSql(sql); + + sql = "INSERT INTO t_varchar_regular (id, name, address) VALUES (1, 'MyName', 'MyAddress');"; + conn.executeSql(sql); + sql = "INSERT INTO t_varchar_regular VALUES (2, 'MyName2', 'MyAddress2');"; + conn.executeSql(sql); + try { + Statement st = conn.getConnection().createStatement(); + sql = "SELECT * from t_varchar_regular ORDER BY id;"; + ResultSet rs = st.executeQuery(sql); + conn.executeSql("drop table t_varchar;"); + conn.executeSql("drop table t_varchar_regular;"); + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1;"); + conn.getConnection().close(); + conn.getFileWriter().writeLine("Connection is closed"); + conn.printRS4Test(rs); + } catch (SQLException e) { + e.printStackTrace(); + } + + } + +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsInOutParamBinaryModeBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsInOutParamBinaryModeBin.java new file mode 100644 index 000000000..88e0f1a11 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsInOutParamBinaryModeBin.java @@ -0,0 +1,82 @@ +package gauss.regress.jdbc.bintests; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class FunctionsInOutParamBinaryModeBin implements IBinaryTest { + /** + * This test is added to test the functionality of replacing the return value of a function when + * The function returns a record that contains client logic fields + */ + @Override + public void execute(DatabaseConnection4Test conn) { + conn.connectInBinaryMode(); + BinUtils.createCLSettings(conn); + + conn.executeSql("CREATE TABLE t_processed (name text, " + + "val INT ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val2 INT)"); + conn.executeSql("insert into t_processed values('one',1,10),('two',2,20),('three',3,30),('four',4,40)," + + "('five',5,50),('six',6,60),('seven',7,70),('eight',8,80),('nine',9,90),('ten',10,100)"); + + conn.executeSql("CREATE TABLE t_processed_b (name text, " + + "val text ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val2 INT)"); + conn.executeSql("INSERT INTO t_processed_b VALUES('name1', 'one', 10),('name2', 'two', 20)," + + "('name3', 'three', 30),('name4', 'four', 40),('name5', 'five', 50),('name6', 'six', 60)," + + "('name7', 'seven', 70),('name8', 'eight', 80),('name9', 'nine', 90),('name10', 'ten', 100)"); + conn.executeSql("CREATE OR REPLACE FUNCTION f_processed_in_out_1param( out1 OUT int,in1 int) " + + "AS 'SELECT val from t_processed where val = in1 LIMIT 1' LANGUAGE SQL"); + conn.executeSql("CREATE OR REPLACE FUNCTION f_processed_in_out(out1 OUT int,in1 int, out2 OUT int) " + + "AS 'SELECT val, val2 from t_processed where val = in1 LIMIT 1' LANGUAGE SQL"); + conn.executeSql("CREATE OR REPLACE FUNCTION f_processed_in_out_b(out1 OUT text, out2 OUT int,in1 text, in2 text) " + + "AS 'SELECT val, val2 from t_processed_b where val = in1 or name = in2 LIMIT 1' LANGUAGE SQL"); + + conn.executeSql("CREATE OR REPLACE FUNCTION f_processed_in_out_plpgsql(in1 int, out out1 int, in2 int, out out2 int)" + + "as $$ " + + "begin " + + "select val, val2 INTO out1, out2 from t_processed where val = in2 or val = in1 limit 1; " + + "end;$$ " + + "LANGUAGE plpgsql"); + + conn.executeSql("CREATE OR REPLACE FUNCTION " + + "f_processed_in_out_plpgsql2(out out1 t_processed.val%TYPE, " + + "out out2 t_processed.val%TYPE, in1 t_processed.val%TYPE) " + + "as $$ " + + "begin " + + " select val, val2 INTO out1, out2 from t_processed where val = in1 limit 1; " + + "end;$$ " + + "LANGUAGE plpgsql"); + + conn.executeSql("CREATE OR REPLACE FUNCTION f_processed_in_out_aliases_plpgsql(out out1 int, in1 int,out out2 int) as " + + "$BODY$ " + + "DECLARE " + + " val1 ALIAS FOR out1; " + + " input_p ALIAS for in1; " + + "begin " + + " select val, val2 INTO val1, out2 from t_processed where val = input_p; " + + "end; " + + "$BODY$ " + + "LANGUAGE plpgsql; "); + conn.fetchData("select proname, prorettype, proallargtypes, prorettype_orig, proallargtypes_orig " + + "FROM pg_proc " + + "LEFT JOIN gs_encrypted_proc ON pg_proc.Oid = gs_encrypted_proc.func_id " + + "WHERE proname IN " + + "('f_processed_in_out', 'f_processed_in_out_plpgsql', 'f_processed_in_out_plpgsql2', " + + "'f_processed_in_out_aliases_plpgsql', 'f_processed_in_out_1param') ORDER BY proname"); + + conn.fetchData("SELECT f_processed_in_out_1param(2)"); + conn.fetchData("SELECT f_processed_in_out(5)"); + conn.fetchData("SELECT f_processed_in_out_b('ten','name70')"); + conn.fetchData("SELECT f_processed_in_out_plpgsql(17,3)"); + conn.fetchData("SELECT f_processed_in_out_plpgsql2(6)"); + conn.fetchData("SELECT f_processed_in_out_aliases_plpgsql(4)"); + conn.executeSql("DROP TABLE t_processed CASCADE"); + conn.executeSql("DROP TABLE t_processed_b CASCADE"); + conn.executeSql("DROP FUNCTION f_processed_in_out_1param"); + conn.executeSql("DROP FUNCTION f_processed_in_out"); + conn.executeSql("DROP FUNCTION f_processed_in_out_b"); + conn.executeSql("DROP FUNCTION f_processed_in_out_plpgsql"); + conn.executeSql("DROP FUNCTION f_processed_in_out_plpgsql2"); + conn.executeSql("DROP FUNCTION f_processed_in_out_aliases_plpgsql"); + BinUtils.dropCLSettings(conn); + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsInOutParamsBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsInOutParamsBin.java new file mode 100644 index 000000000..84590fbc4 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsInOutParamsBin.java @@ -0,0 +1,84 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.CallableStatement; +import java.sql.SQLException; +import java.sql.Types; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class FunctionsInOutParamsBin implements IBinaryTest { + /** + * This test checks function with an input and out parameters that are client logic + */ + @Override + public void execute(DatabaseConnection4Test conn) { + + BinUtils.createCLSettings(conn); + + conn.executeSql("CREATE TABLE t_processed " + + "(name varchar(100) ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), " + + "id INT ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), " + + "val INT ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val2 INT)"); + + conn.executeSql("insert into t_processed values" + + "('one',1,10,10),('two',2,20,20),('three',3,30,30),('four',4,40,40),('five',5,50,50),('six',6,60,60),('seven',7,70,70)," + + "('eight',8,80,80),('nine',9,90,90),('ten',10,100,100)"); + + conn.fetchData("SELECT * from t_processed order by val2"); + + conn.executeSql("CREATE OR REPLACE FUNCTION f_processed_in_out_1_int_param(in1 int, out1 OUT int) " + + "AS 'SELECT val from t_processed where id = in1 LIMIT 1' LANGUAGE SQL"); + + conn.fetchData("SELECT f_processed_in_out_1_int_param(2)"); + try { + conn.getFileWriter().writeLine("Invoking f_processed_in_out_1_int_param using CallableStatement"); + CallableStatement stmnt = conn.getConnection().prepareCall("{? = call f_processed_in_out_1_int_param(?)}"); + stmnt.setInt(1, 2); + stmnt.registerOutParameter(2, Types.INTEGER); + stmnt.execute(); + Object data = stmnt.getObject(2); + BinUtils.printParameter(conn, data, "f_processed_in_out_1param", 2); + } catch (SQLException e) { + conn.getFileWriter().writeLine("Error invoking f_processed_in_out_1param :" + e.getMessage()); + e.printStackTrace(); + } + + conn.executeSql("CREATE OR REPLACE FUNCTION f_processed_in_int_out_varchar(in1 int, out1 OUT varchar) " + + "AS 'SELECT name from t_processed where id = in1 LIMIT 1' LANGUAGE SQL"); + conn.fetchData("SELECT f_processed_in_int_out_varchar(2)"); + try { + conn.getFileWriter().writeLine("Invoking f_processed_in_int_out_varchar using CallableStatement"); + CallableStatement stmnt = conn.getConnection().prepareCall("{? = call f_processed_in_int_out_varchar(?)}"); + stmnt.setInt(1, 2); + stmnt.registerOutParameter(2, Types.VARCHAR); + stmnt.execute(); + Object data = stmnt.getObject(2); + BinUtils.printParameter(conn, data, "f_processed_in_out_1param_varchar_out", 2); + } catch (SQLException e) { + conn.getFileWriter().writeLine("Error invoking f_processed_in_out_1param_varchar :" + e.getMessage()); + e.printStackTrace(); + } + + conn.executeSql("CREATE OR REPLACE FUNCTION f_processed_varchar_in_int_out(in1 varchar, out1 OUT int) " + + "AS 'SELECT id from t_processed where name = in1 LIMIT 1' LANGUAGE SQL"); + conn.fetchData("SELECT f_processed_varchar_in_int_out('one')"); + try { + conn.getFileWriter().writeLine("Invoking f_processed_varchar_in_int_out using CallableStatement"); + CallableStatement stmnt = conn.getConnection().prepareCall("{? = call f_processed_varchar_in_int_out(?)}"); + stmnt.setString(1, "one"); + stmnt.registerOutParameter(2, Types.INTEGER); + stmnt.execute(); + Object data = stmnt.getObject(2); + BinUtils.printParameter(conn, data, "f_processed_varchar_in_int_out", 2); + } catch (SQLException e) { + conn.getFileWriter().writeLine("Error invoking f_processed_varchar_in_int_out :" + e.getMessage()); + e.printStackTrace(); + } + conn.executeSql("DROP FUNCTION f_processed_in_out_1_int_param"); + conn.executeSql("DROP FUNCTION f_processed_in_int_out_varchar"); + conn.executeSql("DROP FUNCTION f_processed_varchar_in_int_out"); + conn.executeSql("DROP TABLE t_processed CASCADE"); + BinUtils.dropCLSettings(conn); + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsInoutParamBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsInoutParamBin.java new file mode 100644 index 000000000..e3af40e5c --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsInoutParamBin.java @@ -0,0 +1,82 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.CallableStatement; +import java.sql.SQLException; +import java.sql.Types; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class FunctionsInoutParamBin implements IBinaryTest { + /** + * This test checks function with an inout parameters that are client logic using the callable statement interface + */ + @Override + public void execute(DatabaseConnection4Test conn) { + + BinUtils.createCLSettings(conn); + + conn.executeSql("CREATE TABLE t_processed " + + "(name text, val INT ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val2 INT)"); + conn.executeSql("insert into t_processed " + + "values('one',1,10),('two',2,20),('three',3,30),('four',4,40),('five',5,50),('six',6,60)," + + "('seven',7,70),('eight',8,80),('nine',9,90),('ten',10,100)"); + + conn.executeSql("CREATE OR REPLACE FUNCTION f_plaintext_out(out1 INOUT int, out2 INOUT int) AS " + + "'SELECT val, val2 from t_processed where val= out1 ORDER BY name LIMIT 1' LANGUAGE SQL"); + conn.fetchData("CALL f_plaintext_out (1, 1)"); + executeWithCallableStatament(conn, 1, 1); + conn.executeSql("DROP FUNCTION f_plaintext_out"); + // + conn.executeSql("CREATE OR REPLACE FUNCTION f_plaintext_out(out1 INOUT int, out2 INOUT int) AS " + + "'SELECT val, val2 from t_processed where val=out1 AND val2=out2 ORDER BY name LIMIT 1' LANGUAGE SQL"); + conn.fetchData("CALL f_plaintext_out (3, 30)"); + conn.fetchData("SELECT f_plaintext_out (3, 30)"); + executeWithCallableStatament(conn, 3, 30); + // + conn.executeSql("DROP FUNCTION f_plaintext_out"); + conn.executeSql("CREATE OR REPLACE FUNCTION f_plaintext_out(out1 INOUT int, out2 INOUT int) AS " + + "$$ BEGIN SELECT val, val2 from t_processed ORDER BY name LIMIT 1 INTO out1, out2; END;" + + "$$ LANGUAGE PLPGSQL"); + conn.fetchData("CALL f_plaintext_out (2, 3)"); + conn.fetchData("SELECT f_plaintext_out (2, 3)"); + executeWithCallableStatament(conn, 2, 30); + // + conn.executeSql("DROP FUNCTION f_plaintext_out"); + conn.executeSql("CREATE OR REPLACE FUNCTION f_plaintext_out(out1 INOUT int, out2 INOUT int) AS " + + "$$ BEGIN SELECT val, val2 from t_processed where val=out1 or val2=out2 " + + "ORDER BY name LIMIT 1 INTO out1, out2; END; $$ LANGUAGE PLPGSQL"); + conn.fetchData("CALL f_plaintext_out (2, 30)"); + conn.fetchData("SELECT f_plaintext_out (2, 30)"); + executeWithCallableStatament(conn, 2, 30); + + conn.executeSql("DROP FUNCTION f_plaintext_out"); + conn.executeSql("DROP TABLE t_processed"); + BinUtils.dropCLSettings(conn); + } + /** + * Method to execute the inout functions defined in this test + * @param conn + * @param p1 + * @param p2 + */ + private void executeWithCallableStatament(DatabaseConnection4Test conn, int p1, int p2) { + try { + CallableStatement callableStatement = conn.getConnection().prepareCall("{CALL f_plaintext_out (?, ?)}"); + callableStatement.registerOutParameter(1, Types.INTEGER); + callableStatement.registerOutParameter(2, Types.INTEGER); + callableStatement.setInt(1, p1); + callableStatement.setInt(2, p2); + callableStatement.execute(); + Object data1 = callableStatement.getObject(1); + BinUtils.printParameter(conn, data1, "f_plaintext_out", 1); + Object data2 = callableStatement.getObject(2); + BinUtils.printParameter(conn, data2, "f_plaintext_out", 2); + } catch (SQLException e) { + conn.getFileWriter().writeLine("executeWithCallableStatament failed, error:" + e.getMessage()); + e.printStackTrace(); + } + + } +} + diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsMetadataBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsMetadataBin.java new file mode 100644 index 000000000..79b93e8c8 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsMetadataBin.java @@ -0,0 +1,141 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class FunctionsMetadataBin implements IBinaryTest { + /** + * This test checks that the metedata service returns information of the original type + */ + @Override + public void execute(DatabaseConnection4Test conn) { + //Run the test 3 times: + // 1. Without client logic objects + createScenario(conn, false); + getFunctionMetadata(conn); + removeScenario(conn); + // + // 2. With client logic objects + BinUtils.createCLSettings(conn); + createScenario(conn, true); + getFunctionMetadata(conn); + removeScenario(conn); + BinUtils.dropCLSettings(conn); + // + // 3. with connection that has no client logic - to make sure we did not affect the regular queris + conn.connectWithNoCL(); + createScenario(conn, false); + getFunctionMetadata(conn); + removeScenario(conn); + } + /** + * Gets the function metadata from the DatabaseMetaData service + * @param conn + * @param metadataService + */ + private void getFunctionMetadata(DatabaseConnection4Test conn) { + DatabaseMetaData metadataService = null; + try { + metadataService = conn.getConnection().getMetaData(); + } catch (SQLException e) { + conn.getFileWriter().writeLine("Failed obtaining the DatabaseMetaData service, existing test ..."); + e.printStackTrace(); + return; + } + + try { + conn.getFileWriter().writeLine("Obtaining the list of columns"); + ResultSet columns = metadataService.getFunctionColumns(null, "public", null, null); + Set ignoreColumn = new HashSet<>(); + ignoreColumn.add("SPECIFIC_NAME"); + conn.printRS4Test(columns, ignoreColumn); + } catch (SQLException e) { + conn.getFileWriter().writeLine("metadataService failed , error:" + e.getMessage()); + e.printStackTrace(); + } + } + /** + * Create the scenario with with out client logic for comparison + * @param conn + * @param withCl + */ + void createScenario(DatabaseConnection4Test conn, boolean withCl) { + + final String clientLogicFragment = "ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC)"; + + List tables = Arrays.asList(new String[]{ + "CREATE TABLE t_processed (name varchar(100) CLIENT_LOGIC, " + + "id INT CLIENT_LOGIC, val INT CLIENT_LOGIC, val2 INT)", + "CREATE TABLE t_num(id INT, num int CLIENT_LOGIC)" + }); + + for (String table : tables) { + if (withCl) { + conn.executeSql(table.replaceAll("CLIENT_LOGIC", clientLogicFragment)); + } + else { + conn.executeSql(table.replaceAll("CLIENT_LOGIC", "")); + } + } + + conn.executeSql("CREATE OR REPLACE FUNCTION " + + "t_processed(in1 int, in2 int, in3 int, out1 OUT int, out2 OUT varchar, out3 OUT int) " + + "AS 'SELECT val, name, val2 from t_processed " + + "where id = in1 and id = in2 and val2 = in3 LIMIT 1' " + + "LANGUAGE SQL"); + + conn.executeSql("CREATE OR REPLACE FUNCTION f_plaintext_out(out1 INOUT int, out2 INOUT int) AS " + + "$$ BEGIN SELECT val, val2 from t_processed ORDER BY name LIMIT 1 INTO out1, out2; END;" + + "$$ LANGUAGE PLPGSQL"); + + + conn.executeSql("CREATE FUNCTION select4 () RETURNS setof t_num LANGUAGE SQL AS 'SELECT id, num from t_num;'"); + + conn.executeSql("CREATE FUNCTION select5 () RETURNS int LANGUAGE SQL AS 'SELECT num from t_num;'"); + conn.getFileWriter().writeLine("Invoking select5 using simple query:"); + + conn.executeSql("CREATE FUNCTION select6 () RETURNS setof int LANGUAGE SQL AS 'SELECT num from t_num;';"); + + conn.executeSql("CREATE FUNCTION select7 () RETURNS TABLE(a INT, b INT) LANGUAGE SQL AS " + + "'SELECT id, num from t_num;';"); + + conn.executeSql("CREATE OR REPLACE FUNCTION get_rows_setof() RETURNS SETOF t_num AS \n" + + "$BODY$ \n" + + "DECLARE \n" + + "r t_num%rowtype; \n" + + "BEGIN \n" + + "FOR r IN \n" + + "SELECT * FROM t_num \n" + + "LOOP \n" + + "-- can do some processing here \n" + + "RETURN NEXT r; -- return current row of SELECT \n" + + "END LOOP; \n" + + "RETURN; \n" + + "END \n" + + "$BODY$ \n" + + "LANGUAGE plpgsql;"); + } + /** + * Removes the database objects created by the test + * @param conn + */ + void removeScenario(DatabaseConnection4Test conn) { + conn.executeSql("DROP FUNCTION t_processed"); + conn.executeSql("DROP FUNCTION f_plaintext_out"); + conn.executeSql("DROP FUNCTION select4"); + conn.executeSql("DROP FUNCTION select5"); + conn.executeSql("DROP FUNCTION select6"); + conn.executeSql("DROP FUNCTION select7"); + conn.executeSql("DROP FUNCTION get_rows_setof"); + conn.executeSql("DROP TABLE t_processed"); + conn.executeSql("DROP TABLE t_num"); + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsOutParamBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsOutParamBin.java new file mode 100644 index 000000000..eeededcf3 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsOutParamBin.java @@ -0,0 +1,63 @@ +package gauss.regress.jdbc.bintests; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class FunctionsOutParamBin implements IBinaryTest { + + @Override + public void execute(DatabaseConnection4Test conn) { + String sql; + BinUtils.createCLSettings(conn); + + sql = "CREATE TABLE t_processed (name text, val INT ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val2 INT)"; + conn.executeSql(sql); + sql = "INSERT INTO t_processed VALUES('name', 1, 2)"; + conn.executeSql(sql); + conn.fetchData("select * from t_processed"); + conn.executeSql("CREATE TABLE t_processed_b (name text, val bytea ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC), val2 INT)"); + conn.executeSql("INSERT INTO t_processed_b VALUES('name', 'test', 2)"); + + sql = "CREATE OR REPLACE FUNCTION f_processed_out_1param(out1 OUT int) AS 'SELECT val from t_processed LIMIT 1' LANGUAGE SQL"; + conn.executeSql(sql); + conn.executeSql("CREATE OR REPLACE FUNCTION f_processed_out(out1 OUT int, out2 OUT int) AS 'SELECT val, val2 from t_processed LIMIT 1' LANGUAGE SQL"); + conn.executeSql("CREATE OR REPLACE FUNCTION f_processed_out_b(out1 OUT bytea, out2 OUT int) AS 'SELECT val, val2 from t_processed_b LIMIT 1' LANGUAGE SQL"); + conn.executeSql("CREATE OR REPLACE FUNCTION f_processed_out_plpgsql(out out1 int, out out2 int)\n" + + "as $$\n" + + "begin\n" + + " select val, val2 INTO out1, out2 from t_processed;\n" + + "end;$$\n" + + "LANGUAGE plpgsql"); + conn.executeSql("CREATE OR REPLACE FUNCTION f_processed_out_plpgsql2(out out1 t_processed.val%TYPE, out out2 t_processed.val%TYPE)\n" + + "as $$\n" + + "begin\n" + + " select val, val2 INTO out1, out2 from t_processed;\n" + + "end;$$\n" + + "LANGUAGE plpgsql"); + conn.executeSql("CREATE OR REPLACE FUNCTION f_processed_aliases_plpgsql(out out1 int, out out2 int) as\n" + + "$BODY$\n" + + "DECLARE\n" + + " val1 ALIAS FOR out1;\n" + + "begin\n" + + " select val, val2 INTO val1, out2 from t_processed;\n" + + "end;\n" + + "$BODY$\n" + + "LANGUAGE plpgsql"); + conn.fetchData("select f_processed_out_1param()"); + conn.fetchData("select f_processed_out()"); + conn.fetchData("select f_processed_out_b()"); + conn.fetchData("select f_processed_out_plpgsql()"); + conn.fetchData("select f_processed_out_plpgsql2()"); + conn.fetchData("select f_processed_aliases_plpgsql()"); + + conn.executeSql("drop function f_processed_out_1param"); + conn.executeSql("drop function f_processed_out"); + conn.executeSql("drop function f_processed_out_b"); + conn.executeSql("drop function f_processed_out_plpgsql"); + conn.executeSql("drop function f_processed_out_plpgsql2"); + conn.executeSql("drop function f_processed_aliases_plpgsql"); + conn.executeSql("drop table t_processed"); + conn.executeSql("drop table t_processed_b"); + BinUtils.dropCLSettings(conn); + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsReturnValuesBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsReturnValuesBin.java new file mode 100644 index 000000000..bd2fd6072 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/FunctionsReturnValuesBin.java @@ -0,0 +1,137 @@ +package gauss.regress.jdbc.bintests; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class FunctionsReturnValuesBin implements IBinaryTest { + + /** + * This test is for testing the callable statement with function return values + */ + @Override + public void execute(DatabaseConnection4Test conn) { + BinUtils.createCLSettings(conn); + + + //Add a table without client logic if any test need to be compared with "regular" results + conn.executeSql("CREATE TABLE IF NOT EXISTS t_num_non_cl(id INT, num int)"); + conn.executeSql("INSERT INTO t_num_non_cl (id, num) VALUES (1, 5555)"); + conn.executeSql("INSERT INTO t_num_non_cl (id, num) VALUES (2, 6666)"); + conn.fetchData("SELECT * from t_num_non_cl order by id"); + + + conn.executeSql("CREATE TABLE IF NOT EXISTS t_num(id INT, num int ENCRYPTED WITH (column_encryption_key = cek1, encryption_type = DETERMINISTIC))"); + conn.executeSql("INSERT INTO t_num (id, num) VALUES (1, 5555)"); + conn.executeSql("INSERT INTO t_num (id, num) VALUES (2, 6666)"); + conn.fetchData("SELECT * from t_num order by id"); + + conn.executeSql("CREATE FUNCTION reffunc(refcursor) RETURNS refcursor AS " + + "'BEGIN OPEN $1 FOR SELECT * FROM t_num; " + + " RETURN $1; " + + "END; " + + "' LANGUAGE plpgsql;"); + + conn.executeSql("CREATE FUNCTION f_processed_return_table() RETURNS TABLE(val_p int, val2_p int) \n" + + "as \n" + + "$BODY$ \n" + + "begin \n" + + "return query (SELECT id, num from t_num); \n" + + "end; \n" + + "$BODY$ \n" + + "language plpgsql ;\n"); + + + conn.executeSql("CREATE FUNCTION select1 () RETURNS t_num LANGUAGE SQL AS 'SELECT * from t_num;'"); + conn.getFileWriter().writeLine("Invoking select1 using simple query:"); + conn.fetchData("call select1();"); + BinUtils.invokeFunctionWithIntegerOutParams(conn, "select1", 2); + + conn.executeSql("CREATE FUNCTION select2 () RETURNS t_num LANGUAGE SQL AS 'SELECT id, num from t_num;';"); + conn.getFileWriter().writeLine("Invoking select2 using simple query:"); + conn.fetchData("call select2();"); + BinUtils.invokeFunctionWithIntegerOutParams(conn, "select2", 2); + + conn.executeSql("CREATE FUNCTION select3 () RETURNS setof t_num LANGUAGE SQL AS 'SELECT * from t_num;'"); + conn.getFileWriter().writeLine("Invoking select3 using simple query:"); + conn.fetchData("call select3();"); + BinUtils.invokeFunctionWithIntegerOutParams(conn, "select3", 2); + //TODO: can only get the values of the first record. + // I was expecting that we should be able to get one output parameter with the type of Types.REF_CURSOR + // But it did not work, throw an error: + // org.postgresql.util.PSQLException: A CallableStatement was executed with an invalid number of parameters + // https://stackoverflow.com/questions/44982250/why-cant-postgres-functions-that-return-setof-be-called-from-a-jdbc-callablesta + // Seems not supported - see this https://jdbc.postgresql.org/documentation/81/callproc.html + //Maybe this bug in the postgres JDBC driver is related: https://github.com/pgjdbc/pgjdbc/issues/633 + //Code is below +// try { +// +// conn.getFileWriter().writeLine("Invoking select3 using CallableStatement:"); +// CallableStatement callStmnt = conn.getConnection().prepareCall("{call select3(?)}"); +// callStmnt.registerOutParameter(1, Types.REF_CURSOR); +// callStmnt.execute(); +// } catch (SQLException e) { +// e.printStackTrace(); +// conn.getFileWriter().writeLine("ERROR running select3 " + e.getMessage()); +// } + conn.executeSql("CREATE FUNCTION select4 () RETURNS setof t_num LANGUAGE SQL AS 'SELECT id, num from t_num;'"); + conn.fetchData("call select4();"); + BinUtils.invokeFunctionWithIntegerOutParams(conn, "select4", 2); + + conn.executeSql("CREATE FUNCTION select5 () RETURNS int LANGUAGE SQL AS 'SELECT num from t_num;'"); + conn.getFileWriter().writeLine("Invoking select5 using simple query:"); + conn.fetchData("call select5();"); + BinUtils.invokeFunctionWithIntegerOutParams(conn, "select5", 1); + + conn.executeSql("CREATE FUNCTION select6 () RETURNS setof int LANGUAGE SQL AS 'SELECT num from t_num;';"); + conn.fetchData("call select6();"); + BinUtils.invokeFunctionWithIntegerOutParams(conn, "select6", 1); + //As above cannot use REF_CURSOR to get the entire set of records as it is expecting an integer parameter + + conn.executeSql("CREATE FUNCTION select7 () RETURNS TABLE(a INT, b INT) LANGUAGE SQL AS " + + "'SELECT id, num from t_num;';"); + conn.fetchData("call select7();"); + //As above cannot use REF_CURSOR to get the entire set of records as it is expecting an integer parameter + + conn.executeSql("CREATE OR REPLACE FUNCTION get_rows_setof() RETURNS SETOF t_num AS \n" + + "$BODY$ \n" + + "DECLARE \n" + + "r t_num%rowtype; \n" + + "BEGIN \n" + + "FOR r IN \n" + + "SELECT * FROM t_num \n" + + "LOOP \n" + + "-- can do some processing here \n" + + "RETURN NEXT r; -- return current row of SELECT \n" + + "END LOOP; \n" + + "RETURN; \n" + + "END \n" + + "$BODY$ \n" + + "LANGUAGE plpgsql;"); + conn.fetchData("call get_rows_setof()"); + conn.fetchData("CALL f_processed_return_table();"); + //As above cannot use REF_CURSOR to get the entire set of records as it is expecting an integer parameter + //conn.fetchData("BEGIN;); + //SELECT reffunc('funccursor'); + //FETCH ALL IN funccursor; + //COMMIT; + //SELECT * FROM get_rows_setof(); + // + conn.executeSql("DROP FUNCTION select1;"); + conn.executeSql("DROP FUNCTION select2;"); + conn.executeSql("DROP FUNCTION select3;"); + conn.executeSql("DROP FUNCTION select4;"); + conn.executeSql("DROP FUNCTION select5;"); + conn.executeSql("DROP FUNCTION select6;"); + conn.executeSql("DROP FUNCTION select7;"); + conn.executeSql("DROP FUNCTION reffunc(refcursor);"); + conn.executeSql("DROP FUNCTION get_rows_setof();"); + conn.executeSql("DROP FUNCTION f_processed_return_table();"); + conn.executeSql("DROP TABLE t_num CASCADE;"); + conn.fetchData("SELECT COUNT(*) FROM gs_encrypted_proc;"); + conn.fetchData("SELECT proname, prorettype, proallargtypes " + + "FROM gs_encrypted_proc JOIN pg_proc ON pg_proc.Oid = gs_encrypted_proc.func_id;"); + + BinUtils.dropCLSettings(conn); + + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/HandlingBadSQLBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/HandlingBadSQLBin.java new file mode 100644 index 000000000..b6953e78d --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/HandlingBadSQLBin.java @@ -0,0 +1,64 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.SQLException; +import java.util.List; +import java.util.ArrayList; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.*; + +/** + * Test the Handling bad SQL queries Run it 3 times: On table with client logic + * column On table with no client logic On table with no client logic with no CL + * in the connection string + */ +public class HandlingBadSQLBin implements IBinaryTest { + @Override + public void execute(DatabaseConnection4Test conn) { + MetadtaUtils.create_client_logic_table(conn); + MetadtaUtils.create_simple_table(conn); + List queriesClList = new ArrayList<>(); + queriesClList.add("select 1* from metadata_client_logic_test_tbl;"); + queriesClList.add("from * select metadata_client_logic_test_tbl;"); + queriesClList.add("select col from metadata_client_logic_test_tbl;"); + queriesClList.add("select * frm metadata_client_logic_test_tbl;"); + queriesClList.add("select * from \"mEtadata_client_logic_test_tbl\";"); + queriesClList.add("select * from metadata_client_logic_test_tbl select * from metadata_client_logic_test_tbl;;"); + List queriesNoClList = new ArrayList<>(); + queriesNoClList.add("select 1* from metadata_simple_test_tbl;"); + queriesNoClList.add("from * select metadata_simple_test_tbl;"); + queriesNoClList.add("select col from metadata_simple_test_tbl;"); + queriesNoClList.add("select * frm metadata_simple_test_tbl;"); + queriesNoClList.add("select * from \"mEtadata_simple_test_tbl\";"); + queriesNoClList.add("select * from metadata_simple_test_tbl select * from metadata_simple_test_tbl;"); + try { + conn.getFileWriter().writeLine("Testing table with client logic ..."); + runTest(conn, queriesClList); + conn.getFileWriter().writeLine(""); + MetadtaUtils.drop_client_logic_tbl_data(conn); + conn.getFileWriter().writeLine("Testing table with no client logic ..."); + runTest(conn, queriesNoClList); + conn.getFileWriter().writeLine("Testing table with no client logic and with no client logic in connection string ..."); + conn.connectWithNoCL(); + runTest(conn, queriesNoClList); + } catch (Exception e) { + conn.getFileWriter().writeLine("Failed to execute bad sql queries"); + e.printStackTrace(); + } + MetadtaUtils.drop_simple_tbl_data(conn); + } + + /** + * Encapsulate the test into a function to run it twice + * @param conn JDBC connection + * @param query SQL to use + * @throws SQLException + */ + private void runTest(DatabaseConnection4Test conn, List query) throws SQLException { + for (int i = 0; i < query.size(); i++) { + conn.fetchData(query.get(i)); + conn.getFileWriter().writeLine("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); + conn.getFileWriter().writeLine(" "); + } + } +} \ No newline at end of file diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/MultiThreadClientLogicBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/MultiThreadClientLogicBin.java new file mode 100644 index 000000000..cf659c1bb --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/MultiThreadClientLogicBin.java @@ -0,0 +1,188 @@ +package gauss.regress.jdbc.bintests; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; +import gauss.regress.jdbc.utils.OutFileWriter; + +/** + * @author dev + * simulate spring web server scenario + */ +public class MultiThreadClientLogicBin implements IBinaryTest { + + List conns = null; + Set usedConns = null; + String workerInsertSql = "INSERT INTO t_varchar VALUES ((SELECT COALESCE(MAX(ID),0) FROM t_varchar) + 1, 'worker_%d')"; + String validateSql = "SELECT * FROM t_varchar ORDER BY ID"; + int numOfExtraConns = 8; + int selectedWorkerId = -1; + + private int nextInt() { + int ret = -1; + do { + ++ret; + } while (usedConns.contains(ret) && usedConns.size() < numOfExtraConns); + usedConns.add(ret); + return ret; + } + + @Override + public void execute(DatabaseConnection4Test conn) { + usedConns = new HashSet(); + conns = new ArrayList(); + conns.add(conn); + selectedWorkerId = 0; + usedConns.add(selectedWorkerId); + setupStep(); + createConnectionsStep(); + selectedWorkerId = nextInt(); + insertStep(new int[]{1}, false, 1); + insertStep(new int[]{2}, false, 1); + conns.get(selectedWorkerId).close(); + insertStep(new int[]{3}, true, 3); + insertStep(new int[]{4, 5}, true, 3); + cleanStep(conn); + String tagretName = conn.getFileWriter().getFilename(); + for (DatabaseConnection4Test databaseConnection4Test : conns) { + databaseConnection4Test.close(); + } + conns.clear(); + conns = null; + try { + /* join output from all connections into single file (of original connection) + * in order to compare with expected result */ + String joinCmd = String.format("cat %s?* >> %s", tagretName, tagretName); + String[] commands = {"/bin/sh", "-c", joinCmd}; + Runtime.getRuntime().exec(commands); + } catch (IOException e) { + e.printStackTrace(); + } + + } + + /* + * run a thread that initializes test env - create CL settings + table + */ + private void setupStep() { + Runnable myRunnable = new Runnable() { + public void run(){ + DatabaseConnection4Test conn = conns.get(selectedWorkerId); + BinUtils.createCLSettings(conn); + String sql = "CREATE TABLE IF NOT EXISTS t_varchar(id int, " + + "name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"; + conn.executeSql(sql); + } + }; + Thread t = new Thread(myRunnable); + t.start(); + try { + t.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + /* + * run a thread that duplicates the JDBC connection numOfExtraConns times + * these connections can be used by other threads down the road + */ + private void createConnectionsStep() { + DatabaseConnection4Test conn = conns.get(selectedWorkerId); + Runnable myRunnable = new Runnable() { + @SuppressWarnings("resource") + public void run() { + for (int i = 0; i < numOfExtraConns; ++i) { + String outputFileName = conn.getFileWriter().getFilename() + Integer.toString(i); + OutFileWriter outputFile = new OutFileWriter(); + try { + outputFile.openFile(outputFileName); + } catch (IOException e) { + e.printStackTrace(); + return; + } + DatabaseConnection4Test workerConn = new DatabaseConnection4Test(conn, outputFile); + workerConn.reconnect(); + conns.add(workerConn); + } + } + }; + Thread t = new Thread(myRunnable); + t.start(); + try { + t.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + /** + * run threads that insert data into table, and validate + * @param workerIds - for naming and comparison + * @param useNewConnectionFromPool if true, use connection[nextInt()] from pool, + * otherwise use connection[selectedWorkerId] + * @param insertsCount num of rows to insert + */ + private void insertStep(int[] workerIds, boolean useNewConnectionFromPool, int insertsCount) { + final int threadCount = workerIds.length; + AtomicInteger ai = new AtomicInteger(); + ai.set(0); + List threads = new ArrayList(); + for (int threadIndex = 0 ; threadIndex < threadCount; ++threadIndex) { + final int index = threadIndex; + final int connIndex = (useNewConnectionFromPool) ? nextInt() : selectedWorkerId; + Runnable myRunnable = new Runnable() { + public void run(){ + DatabaseConnection4Test conn = conns.get(connIndex); + for (int i=0; i < insertsCount; ++i) { + conn.executeSql(String.format(workerInsertSql, workerIds[index])); + } + ai.incrementAndGet(); + while(ai.get() < threadCount) { + ; + } + conn.fetchData(validateSql); + } + }; + Thread t = new Thread(myRunnable); + threads.add(t); + } + for (Thread t: threads) { + t.start(); + } + for (Thread t: threads) { + try { + t.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } + + /* + * run a thread that cleans up, and brings the DB to the state it was prior to test + */ + private void cleanStep(DatabaseConnection4Test conn) { + Runnable myRunnable = new Runnable() { + public void run(){ + String sql = "DROP TABLE t_varchar;"; + conn.executeSql(sql); + BinUtils.cleanCLSettings(conn); + } + }; + Thread t = new Thread(myRunnable); + t.start(); + try { + t.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/NullEmptyStringBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/NullEmptyStringBin.java new file mode 100644 index 000000000..b66288e99 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/NullEmptyStringBin.java @@ -0,0 +1,70 @@ +package gauss.regress.jdbc.bintests; + +import java.util.ArrayList; +import java.util.List; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +/** + * Testing for client logic fields wih null, empty string and regular values + * @author Avi Kessel + */ +public class NullEmptyStringBin implements IBinaryTest { + + @Override + public void execute(DatabaseConnection4Test conn) { + BinUtils.createCLSettings(conn); + + conn.executeSql("CREATE TABLE IF NOT EXISTS t_not_cl(id INT, name varchar(50));"); + String sqlWith2Params = "INSERT INTO t_not_cl (id, name) VALUES (?, ?);"; + String sqlWithNoParams = "INSERT INTO t_not_cl (id) VALUES (2);"; + + List parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("MyName"); + conn.updateDataWithPrepareStmnt(sqlWith2Params, parameters); + + conn.executeSql(sqlWithNoParams); + + parameters = new ArrayList<>(); + parameters.add("3"); + parameters.add(""); + conn.updateDataWithPrepareStmnt(sqlWith2Params, parameters); + parameters = new ArrayList<>(); + parameters.add("4"); + parameters.add(null); + conn.updateDataWithPrepareStmnt(sqlWith2Params, parameters); + + conn.fetchData("select id is null, name is null from t_not_cl order by id;"); + conn.fetchData("select * from t_not_cl order by id;"); + + conn.executeSql("CREATE TABLE IF NOT EXISTS t_with_cl(id INT, " + + "name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"); + sqlWith2Params = "INSERT INTO t_with_cl (id, name) VALUES (?, ?);"; + sqlWithNoParams = "INSERT INTO t_with_cl (id) VALUES (2);"; + + parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("MyName"); + conn.updateDataWithPrepareStmnt(sqlWith2Params, parameters); + + conn.executeSql(sqlWithNoParams); + + parameters = new ArrayList<>(); + parameters.add("3"); + parameters.add(""); + conn.updateDataWithPrepareStmnt(sqlWith2Params, parameters); + parameters = new ArrayList<>(); + parameters.add("4"); + parameters.add(null); + conn.updateDataWithPrepareStmnt(sqlWith2Params, parameters); + + conn.fetchData("select id is null, name is null from t_with_cl order by id;"); + conn.fetchData("select * from t_with_cl order by id;"); + + conn.executeSql("DROP TABLE t_not_cl;"); + conn.executeSql("DROP TABLE t_with_cl;"); + BinUtils.dropCLSettings(conn); + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/ParameterMetaDataTestBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/ParameterMetaDataTestBin.java new file mode 100644 index 000000000..264230bbb --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/ParameterMetaDataTestBin.java @@ -0,0 +1,85 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.*; + +/** + * Test the Parameter metadata API on table Run it 3 times: On table with client + * logic column On table with no client logic On table with no client logic with + * no CL in the connection string + */ +public class ParameterMetaDataTestBin implements IBinaryTest { + @Override + public void execute(DatabaseConnection4Test conn) { + conn.getFileWriter().writeLine(" "); + conn.getFileWriter().writeLine("Test with client logic table"); + MetadtaUtils.create_client_logic_table(conn); + String query = "Select * from metadata_client_logic_test_tbl where id = ? or char_col != ?;"; + runTest(conn, query); + MetadtaUtils.drop_client_logic_tbl_data(conn); + + conn.getFileWriter().writeLine(" "); + conn.getFileWriter().writeLine("Test with client logic connection on regular table"); + query = "Select * from metadata_simple_test_tbl where id = ? or char_col != ?;"; + MetadtaUtils.create_simple_table(conn); + runTest(conn, query); + MetadtaUtils.drop_simple_tbl_data(conn); + + conn.getFileWriter().writeLine(" "); + conn.getFileWriter().writeLine("Test with regular connection on regular table"); + conn.connectWithNoCL(); + MetadtaUtils.create_simple_table(conn); + runTest(conn, query); + MetadtaUtils.drop_simple_tbl_data(conn); + } + + /** + * Encapsulate the test into a function to run it twice + * @param conn JDBC connection + * @param query SQL to use + * @throws SQLException + */ + private void runTest(DatabaseConnection4Test conn, String query) { + try { + PreparedStatement pstmt = conn.getConnection().prepareStatement(query); + ParameterMetaData paramMetaData = pstmt.getParameterMetaData(); + if (paramMetaData == null) { + conn.getFileWriter().writeLine("there is no support for the ParameterMetaData"); + } else { + conn.getFileWriter().writeLine("there is a support for the ParameterMetaData"); + int paramCount = paramMetaData.getParameterCount(); + conn.getFileWriter().writeLine("paramCount=" + paramCount); + for (int param = 1; param <= paramCount; param++) { + conn.getFileWriter().writeLine("param number=" + param); + int paramMode = paramMetaData.getParameterMode(param); + conn.getFileWriter().writeLine("param mode=" + paramMode); + if (paramMode == ParameterMetaData.parameterModeOut) { + conn.getFileWriter().writeLine("the parameter's mode is OUT."); + } else if (paramMode == ParameterMetaData.parameterModeIn) { + conn.getFileWriter().writeLine("the parameter's mode is IN."); + } else if (paramMode == ParameterMetaData.parameterModeInOut) { + conn.getFileWriter().writeLine("the parameter's mode is INOUT."); + } else { + conn.getFileWriter().writeLine("the mode of a parameter is unknown."); + } + conn.getFileWriter().writeLine("param type = " + paramMetaData.getParameterType(param)); + conn.getFileWriter().writeLine("param class name = " + paramMetaData.getParameterClassName(param)); + conn.getFileWriter().writeLine("param count = " + paramMetaData.getParameterCount()); + conn.getFileWriter().writeLine("param precision = " + paramMetaData.getPrecision(param)); + conn.getFileWriter().writeLine("param scale = " + paramMetaData.getScale(param)); + conn.getFileWriter().writeLine("param isNullable = " + paramMetaData.isNullable(param)); + conn.getFileWriter().writeLine("param isSugned = " + paramMetaData.isSigned(param)); + conn.getFileWriter().writeLine(" "); + conn.getFileWriter().writeLine("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); + conn.getFileWriter().writeLine(" "); + } + } + pstmt.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} \ No newline at end of file diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatamentIntBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatamentIntBin.java new file mode 100644 index 000000000..38c35534c --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatamentIntBin.java @@ -0,0 +1,99 @@ +package gauss.regress.jdbc.bintests; +import java.util.ArrayList; +import java.util.List; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class PrepareStatamentIntBin implements IBinaryTest { + @Override + public void execute(DatabaseConnection4Test conn) { + BinUtils.createCLSettings(conn); + conn.executeSql("CREATE TABLE IF NOT EXISTS " + + "t_int(key int," + + "_smallint_ smallint ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "_int_ int ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "_bigint_ bigint ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"); + + conn.getFileWriter().writeLine("*******inserting data to the int table"); + List parameters; + String sqlInsert; + sqlInsert = "INSERT INTO t_int (key, _smallint_, _int_, _bigint_) VALUES (?,?,?,?);"; + parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("-3333"); + parameters.add("0"); + parameters.add("3333"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + parameters = new ArrayList<>(); + parameters.add("2"); + parameters.add("-1234"); + parameters.add("256"); + parameters.add("1234"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + conn.getFileWriter().writeLine("*************inserting data verification"); + String sqlSelect; + sqlSelect = "select * from t_int where (_smallint_ = ? and _int_ =?) or _bigint_ =? order by key"; + parameters = new ArrayList<>(); + parameters.add("-3333"); + parameters.add("0"); + parameters.add("1234"); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + conn.getFileWriter().writeLine("***************updating data"); + String sqlUpdate; + sqlUpdate = "Update t_int set _smallint_= ? , _int_= ? where _bigint_ = ? or key = ?"; + parameters = new ArrayList<>(); + parameters.add("5555"); + parameters.add("5555"); + parameters.add("1234"); + parameters.add("2"); + conn.updateDataWithPrepareStmnt(sqlUpdate, parameters); + + conn.getFileWriter().writeLine("**************updating data verification"); + sqlSelect = "select * from t_int where _smallint_ = ? and _int_ =?"; + parameters = new ArrayList<>(); + parameters.add("5555"); + parameters.add("5555"); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + conn.getFileWriter().writeLine("*************deleting data"); + String sqlDelete; + sqlDelete = "delete from t_int where _smallint_= ? and _bigint_= ? and _int_ = ?"; + parameters = new ArrayList<>(); + parameters.add("5555"); + parameters.add("1234"); + parameters.add("5555"); + conn.updateDataWithPrepareStmnt(sqlDelete, parameters); + + conn.getFileWriter().writeLine("*******************deleting data verification"); + sqlSelect = "select * from t_int where _smallint_= ? and _bigint_= ? and _int_ = ?"; + parameters = new ArrayList<>(); + parameters.add("5555"); + parameters.add("1234"); + parameters.add("5555"); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + sqlSelect = "select * from t_int;"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + conn.getFileWriter().writeLine("*************deleting all data"); + sqlDelete = "delete from t_int"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlDelete, parameters); + + conn.getFileWriter().writeLine("**************deleting all data verification"); + sqlSelect = "select * from t_int;"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + + conn.executeSql("drop table t_int;"); + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1;"); + + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatamentNumericBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatamentNumericBin.java new file mode 100644 index 000000000..319fb097b --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatamentNumericBin.java @@ -0,0 +1,108 @@ +package gauss.regress.jdbc.bintests; +import java.util.ArrayList; +import java.util.List; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class PrepareStatamentNumericBin implements IBinaryTest { + @Override + public void execute(DatabaseConnection4Test conn) { + BinUtils.createCLSettings(conn); + conn.executeSql("CREATE TABLE IF NOT EXISTS " + + "t_numeric(key int," + + "_real_ real ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "_decimal_ decimal ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "_doubleprecision_ double precision ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "_numeric_ numeric ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"); + + conn.getFileWriter().writeLine("*******inserting data to the serial table"); + List parameters; + String sqlInsert; + sqlInsert = "INSERT INTO t_numeric (key, _real_, _decimal_, _numeric_, _doubleprecision_) VALUES (?,?,?,?,?);"; + parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("1234.1234"); + parameters.add("5678.5678"); + parameters.add("91011.91011"); + parameters.add("12131415.12131415"); + + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + parameters = new ArrayList<>(); + parameters.add("2"); + parameters.add("111213.111213"); + parameters.add("141516.141516"); + parameters.add("17181920.17181920"); + parameters.add("2122232425.2122232425"); + + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + conn.getFileWriter().writeLine("*************inserting data verification"); + String sqlSelect; + sqlSelect = "select * from t_numeric where (_real_ = ? and _decimal_ =?) or (_numeric_ =? and _doubleprecision_= ?) order by key;"; + parameters = new ArrayList<>(); + parameters.add("1234.1234"); + parameters.add("5678.5678"); + parameters.add("17181920.17181920"); + parameters.add("2122232425.2122232425"); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + conn.getFileWriter().writeLine("***************updating data"); + String sqlUpdate; + sqlUpdate = "Update t_numeric set _real_= ? , _decimal_= ? where _numeric_ = ? or key = ?"; + parameters = new ArrayList<>(); + parameters.add("212223.212223"); + parameters.add("24252627.24252627"); + parameters.add("17181920.17181920"); + parameters.add("2"); + conn.updateDataWithPrepareStmnt(sqlUpdate, parameters); + + conn.getFileWriter().writeLine("**************updating data verification"); + sqlSelect = "select * from t_numeric where _real_ = ? and _decimal_ =?"; + parameters = new ArrayList<>(); + parameters.add("212223.212223"); + parameters.add("24252627.24252627"); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + conn.getFileWriter().writeLine("*************deleting data"); + String sqlDelete; + sqlDelete = "delete from t_numeric where _real_= ? and _numeric_= ? and _decimal_ = ? and _doubleprecision_ =?"; + parameters = new ArrayList<>(); + parameters.add("212223.212223"); + parameters.add("17181920.17181920"); + parameters.add("24252627.24252627"); + parameters.add("2122232425.2122232425"); + + conn.updateDataWithPrepareStmnt(sqlDelete, parameters); + + conn.getFileWriter().writeLine("*******************deleting data verification"); + sqlSelect = "select * from t_numeric where _real_= ? and _numeric_= ? and _decimal_ = ? and _doubleprecision_ =?"; + parameters = new ArrayList<>(); + parameters.add("212223.212223"); + parameters.add("17181920.17181920"); + parameters.add("24252627.24252627"); + parameters.add("2122232425.2122232425"); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + sqlSelect = "select * from t_numeric;"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + conn.getFileWriter().writeLine("*************deleting all data"); + sqlDelete = "delete from t_numeric;"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlDelete, parameters); + + conn.getFileWriter().writeLine("**************deleting all data verification"); + sqlSelect = "select * from t_numeric;"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + + conn.executeSql("drop table t_numeric;"); + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1;"); + + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatamentStringBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatamentStringBin.java new file mode 100644 index 000000000..922cb216d --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatamentStringBin.java @@ -0,0 +1,102 @@ +package gauss.regress.jdbc.bintests; +import java.util.ArrayList; +import java.util.List; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class PrepareStatamentStringBin implements IBinaryTest { + @Override + public void execute(DatabaseConnection4Test conn) { + BinUtils.createCLSettings(conn); + conn.executeSql("CREATE TABLE IF NOT EXISTS " + + "t_string(key int," + + "_varchar_ varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "_char_ char(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "_text_ text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"); + + conn.getFileWriter().writeLine("*******inserting data to the serial table"); + List parameters; + String sqlInsert; + sqlInsert = "INSERT INTO t_string (key, _varchar_, _char_, _text_) VALUES (?,?,?,?);"; + parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("varchar data"); + parameters.add("char data"); + parameters.add("text data"); + + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + parameters = new ArrayList<>(); + parameters.add("2"); + parameters.add("varchar data 2"); + parameters.add("char data 2"); + parameters.add("text data 2"); + + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + conn.getFileWriter().writeLine("*************inserting data verification"); + String sqlSelect; + sqlSelect = "select * from t_string where (_varchar_ = ? and _char_ = ?) or _text_ =? order by key"; + parameters = new ArrayList<>(); + parameters.add("varchar data"); + parameters.add("char data"); + parameters.add("text data 2"); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + conn.getFileWriter().writeLine("***************updating data"); + String sqlUpdate; + sqlUpdate = "Update t_string set _varchar_ =? , _char_ = ? where _text_ = ? or key = ?"; + parameters = new ArrayList<>(); + parameters.add("varchar updated data"); + parameters.add("char updated data"); + parameters.add("text data 2"); + parameters.add("2"); + conn.updateDataWithPrepareStmnt(sqlUpdate, parameters); + + conn.getFileWriter().writeLine("**************updating data verification"); + sqlSelect = "select * from t_string where _varchar_ = ? and _char_ = ?"; + parameters = new ArrayList<>(); + parameters.add("varchar updated data"); + parameters.add("char updated data"); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + conn.getFileWriter().writeLine("*************deleting data"); + String sqlDelete; + sqlDelete = "delete from t_string where _varchar_ = ? and _text_ = ? and _char_ = ? "; + parameters = new ArrayList<>(); + parameters.add("varchar updated data"); + parameters.add("text data 2"); + parameters.add("char updated data"); + + conn.updateDataWithPrepareStmnt(sqlDelete, parameters); + + conn.getFileWriter().writeLine("*******************deleting data verification"); + sqlSelect = "select * from t_string where _varchar_ = ? and _text_ = ? and _char_ = ?"; + parameters = new ArrayList<>(); + parameters.add("varchar updated data"); + parameters.add("text data 2"); + parameters.add("char updated data"); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + sqlSelect = "select * from t_string"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + conn.getFileWriter().writeLine("*************deleting all data"); + sqlDelete = "delete from t_string"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlDelete, parameters); + + conn.getFileWriter().writeLine("**************deleting all data verification"); + sqlSelect = "select * from t_string"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + + conn.executeSql("drop table t_string;"); + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1;"); + + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatamentVarCharBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatamentVarCharBin.java new file mode 100644 index 000000000..c5aeff942 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatamentVarCharBin.java @@ -0,0 +1,31 @@ +package gauss.regress.jdbc.bintests; + +import java.util.ArrayList; +import java.util.List; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class PrepareStatamentVarCharBin implements IBinaryTest{ + + @Override + public void execute(DatabaseConnection4Test conn) { + BinUtils.createCLSettings(conn); + conn.executeSql("CREATE TABLE IF NOT EXISTS " + + "t_varchar(id INT, " + + "name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"); + List parameters; + String sqlInsert = "INSERT INTO t_varchar (id, name, address) VALUES (?,?,?);"; + parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("MyName"); + parameters.add("MyAddress"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + conn.fetchData("SELECT * from t_varchar ORDER BY id;"); + + conn.executeSql("drop table t_varchar;"); + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1;"); + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatamentWithHardCodedValuesBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatamentWithHardCodedValuesBin.java new file mode 100644 index 000000000..05bf96122 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatamentWithHardCodedValuesBin.java @@ -0,0 +1,53 @@ +package gauss.regress.jdbc.bintests; + +import java.util.ArrayList; +import java.util.List; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class PrepareStatamentWithHardCodedValuesBin implements IBinaryTest { + + @Override + public void execute(DatabaseConnection4Test conn) { + BinUtils.createCLSettings(conn); + conn.executeSql("CREATE TABLE IF NOT EXISTS " + + "t_varchar(id INT, " + + "name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"); + List parameters; + String sqlInsert = "INSERT INTO t_varchar (id, name, address) VALUES (?,?,?);"; + + parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("MyName"); + parameters.add("MyAddress"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + parameters = new ArrayList<>(); + parameters.add("2"); + parameters.add("MyName2"); + parameters.add("MyAddress2"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + sqlInsert = "INSERT INTO t_varchar (id, name, address) VALUES (?, 'MyName3',?);"; + parameters = new ArrayList<>(); + parameters.add("3"); + parameters.add("MyAddress3"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + conn.fetchData("SELECT * from t_varchar ORDER BY id;"); + + parameters = new ArrayList<>(); + parameters.add("MyName"); + conn.fetchDataWithPrepareStmnt("select * from t_varchar where name = ? and address = 'MyAddress';", parameters); + + conn.fetchData("SELECT * from t_varchar ORDER BY id;"); + conn.executeSql("drop table t_varchar;"); + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1;"); + + + } + +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatementDeleteResultSetBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatementDeleteResultSetBin.java new file mode 100644 index 000000000..b144f82b7 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatementDeleteResultSetBin.java @@ -0,0 +1,68 @@ +package gauss.regress.jdbc.bintests; +import java.util.ArrayList; +import java.util.List; +import java.sql.ResultSet; +import java.sql.Statement; +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class PrepareStatementDeleteResultSetBin implements IBinaryTest { + @Override + public void execute(DatabaseConnection4Test conn){ + BinUtils.createCLSettings(conn); + conn.executeSql("CREATE TABLE IF NOT EXISTS " + + "t_delete_rows_tbl(key int PRIMARY KEY ," + + "col_varchar varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "col_int int ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "col_float float ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"); + + conn.getFileWriter().writeLine("*******inserting data to the t_delete_rows_tbl"); + List parameters; + String sqlInsert; + sqlInsert = "INSERT INTO t_delete_rows_tbl (key, col_varchar, col_int, col_float) VALUES (?,?,?,?);"; + parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("this_row_will_be_deleted"); + parameters.add("1"); + parameters.add("1.1"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + parameters = new ArrayList<>(); + parameters.add("2"); + parameters.add("this_row_will_not_deleted"); + parameters.add("2"); + parameters.add("2.2"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + String sqlSelect; + conn.getFileWriter().writeLine("*************verify data before the delete"); + sqlSelect = "select * from t_delete_rows_tbl order by key;"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + try { + Statement stmt= conn.getConnection().createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + sqlSelect = "select * from t_delete_rows_tbl;"; + ResultSet rs = stmt.executeQuery(sqlSelect); + while(rs.next()) { + if(rs.getInt("key")==1) { + rs.deleteRow(); + + } + } + } + catch (Exception e) { + System.out.println("Failed to delete result row\n"+ e); + } + conn.getFileWriter().writeLine("*************verifying the deleted data"); + sqlSelect = "select * from t_delete_rows_tbl;"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + conn.executeSql("drop table t_delete_rows_tbl;"); + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1;"); + + } +} + diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatementInsertResultSetBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatementInsertResultSetBin.java new file mode 100644 index 000000000..31aea2e86 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatementInsertResultSetBin.java @@ -0,0 +1,71 @@ +package gauss.regress.jdbc.bintests; +import java.util.ArrayList; +import java.util.List; +import java.sql.ResultSet; +import java.sql.Statement; +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class PrepareStatementInsertResultSetBin implements IBinaryTest { + @Override + public void execute(DatabaseConnection4Test conn){ + BinUtils.createCLSettings(conn); + conn.executeSql("CREATE TABLE IF NOT EXISTS " + + "t_insert_rows_tbl(key int PRIMARY KEY ," + + "col_varchar varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "col_int int ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "col_float float ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"); + + conn.getFileWriter().writeLine("*******inserting data to the t_insert_rows_tbl"); + List parameters; + String sqlInsert; + sqlInsert = "INSERT INTO t_insert_rows_tbl (key, col_varchar, col_int, col_float) VALUES (?,?,?,?);"; + parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("this_row_exists_before_insert"); + parameters.add("1"); + parameters.add("1.1"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + parameters = new ArrayList<>(); + parameters.add("2"); + parameters.add("this_row_exists_before_insert"); + parameters.add("2"); + parameters.add("2.2"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + String sqlSelect; + conn.getFileWriter().writeLine("*************verify data before the insert"); + sqlSelect = "select * from t_insert_rows_tbl order by key;"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + try { + Statement stmt= conn.getConnection().createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + sqlSelect = "select * from t_insert_rows_tbl order by key;"; + ResultSet rs = stmt.executeQuery(sqlSelect); + rs.last(); + int id = rs.getInt("key")+1; + //deleting the row data + rs.moveToInsertRow(); + rs.updateInt("key", id); + rs.updateInt("col_int", id); + rs.updateString("col_varchar", "new_data_was_inserted"); + rs.updateFloat("col_float", 3); + rs.insertRow(); + rs.beforeFirst(); + } + catch (Exception e) { + System.out.println("Failed to insert result row\n"+ e); + } + conn.getFileWriter().writeLine("*************verifying the insert data"); + sqlSelect = "select * from t_insert_rows_tbl order by key desc;"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + conn.executeSql("drop table t_insert_rows_tbl;"); + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1;"); + } +} + diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatementUniqueBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatementUniqueBin.java new file mode 100644 index 000000000..9658bc985 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatementUniqueBin.java @@ -0,0 +1,37 @@ +package gauss.regress.jdbc.bintests; + +import java.util.ArrayList; +import java.util.List; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class PrepareStatementUniqueBin implements IBinaryTest{ + + @Override + public void execute(DatabaseConnection4Test conn) { + BinUtils.createCLSettings(conn); + conn.executeSql("CREATE TABLE IF NOT EXISTS t_unique(id INT, " + + "name text UNIQUE ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"); + + String sqlInsert = "INSERT INTO t_unique values (?,?)"; + List parameters = new ArrayList<>(); + parameters.add("5"); + parameters.add("John"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + parameters = new ArrayList<>(); + parameters.add("2"); + parameters.add("Moses"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + parameters = new ArrayList<>(); + parameters.add("6"); + parameters.add("John"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + conn.fetchData("SELECT * FROM t_unique order by id"); + conn.executeSql("DROP TABLE t_unique"); + BinUtils.dropCLSettings(conn); + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatementUpdateResultSetBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatementUpdateResultSetBin.java new file mode 100644 index 000000000..c40ee8fac --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatementUpdateResultSetBin.java @@ -0,0 +1,70 @@ +package gauss.regress.jdbc.bintests; +import java.util.ArrayList; +import java.util.List; +import java.sql.ResultSet; +import java.sql.Statement; +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class PrepareStatementUpdateResultSetBin implements IBinaryTest { + @Override + public void execute(DatabaseConnection4Test conn){ + BinUtils.createCLSettings(conn); + conn.executeSql("CREATE TABLE IF NOT EXISTS " + + "t_update_rows_tbl(key int PRIMARY KEY ," + + "col_varchar varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "col_int int ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "col_float float ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"); + + conn.getFileWriter().writeLine("*******inserting data to the t_update_rows_tbl"); + List parameters; + String sqlInsert; + sqlInsert = "INSERT INTO t_update_rows_tbl (key, col_varchar, col_int, col_float) VALUES (?,?,?,?);"; + parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("this_row_will_be_updated"); + parameters.add("1"); + parameters.add("1.1"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + parameters = new ArrayList<>(); + parameters.add("2"); + parameters.add("this_row_will_not_updated"); + parameters.add("2"); + parameters.add("2.2"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + String sqlSelect; + conn.getFileWriter().writeLine("*************verify data before the update"); + sqlSelect = "select * from t_update_rows_tbl order by key"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + try { + Statement stmt= conn.getConnection().createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + sqlSelect = "select * from t_update_rows_tbl order by key"; + ResultSet rs = stmt.executeQuery(sqlSelect); + while(rs.next()) { + if(rs.getInt("key")==1) { + //updating the table columns data + rs.updateInt("col_int", 10); + rs.updateString("col_varchar", "this_row_was_updated"); + rs.updateFloat("col_float", 10); + rs.updateRow(); + } + } + } + catch (Exception e) { + System.out.println("Failed to update result row\n"+ e); + } + conn.getFileWriter().writeLine("*************verifying the updated data"); + sqlSelect = "select * from t_update_rows_tbl order by key"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + conn.executeSql("drop table t_update_rows_tbl;"); + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1;"); + + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatementViewBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatementViewBin.java new file mode 100644 index 000000000..c8b7ea94d --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/PrepareStatementViewBin.java @@ -0,0 +1,64 @@ +package gauss.regress.jdbc.bintests; +import java.util.ArrayList; +import java.util.List; +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class PrepareStatementViewBin implements IBinaryTest { + @Override + public void execute(DatabaseConnection4Test conn){ + BinUtils.createCLSettings(conn); + conn.executeSql("CREATE TABLE IF NOT EXISTS " + + "t_table_4_view(key int PRIMARY KEY ," + + "col_varchar varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "col_int int ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "col_float float ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"); + + conn.getFileWriter().writeLine("*******inserting data to the t_table_4_view"); + List parameters; + String sqlInsert; + sqlInsert = "INSERT INTO t_table_4_view (key, col_varchar, col_int, col_float) VALUES (?,?,?,?);"; + parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("data_4_view"); + parameters.add("1"); + parameters.add("1.1"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + parameters = new ArrayList<>(); + parameters.add("2"); + parameters.add("data_4_view2"); + parameters.add("2"); + parameters.add("2.2"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + + String sqlSelect; + conn.getFileWriter().writeLine("*************verify data before creating the view"); + sqlSelect = "select * from t_table_4_view order by key;"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + conn.executeSql("CREATE View " + + "v_view_from_table_4_view as " + + "select * from t_table_4_view;"); + conn.getFileWriter().writeLine("*************verifying that new view was successfully created"); + sqlSelect = "select * from v_view_from_table_4_view order by key;"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + + conn.getFileWriter().writeLine("**************verifying view creation"); + sqlSelect = "select *, col_int from v_view_from_table_4_view where col_float = ? or key = ?"; + parameters = new ArrayList<>(); + parameters.add("2.2"); + parameters.add("1"); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + + conn.executeSql("drop view v_view_from_table_4_view;"); + conn.executeSql("drop table t_table_4_view;"); + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1;"); + + } +} + diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/ResultSetFetchNoCLBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/ResultSetFetchNoCLBin.java new file mode 100644 index 000000000..928440f8c --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/ResultSetFetchNoCLBin.java @@ -0,0 +1,37 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class ResultSetFetchNoCLBin implements IBinaryTest { + + @Override + public void execute(DatabaseConnection4Test conn) { + + String sql; + sql = "CREATE TABLE IF NOT EXISTS t_varchar(id INT, name varchar(50), address varchar(50));"; + conn.executeSql(sql); + //Create records + for (int i = 0; i < 65; ++i) { + sql = "INSERT INTO t_varchar (id, name, address) VALUES (" + i + ", 'MyName', 'MyAddress')"; + conn.executeSql(sql); + } + try { + conn.getConnection().setAutoCommit(false);//Auto commit must be off for this to happen + Statement st = conn.getConnection().createStatement(); + st.setFetchSize(10);//set the client fetch size to 10 + ResultSet rs = st.executeQuery("select * from t_varchar order by id"); + conn.printRS4Test(rs);//loop thru the entire data set (should perform initial call & 6 fetch + st.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + conn.executeSql("DROP table t_varchar"); + conn.fetchData("select * from gs_column_keys"); + } + +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/ResultSetMetaDataTestBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/ResultSetMetaDataTestBin.java new file mode 100644 index 000000000..b9ae91d48 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/ResultSetMetaDataTestBin.java @@ -0,0 +1,73 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.*; + +/** + * Test the Resultset metadata API on table Run it 3 times: On table with client + * logic column On table with no client logic On table with no client logic with + * no CL in the connection string + */ +public class ResultSetMetaDataTestBin implements IBinaryTest { + @Override + public void execute(DatabaseConnection4Test conn) { + MetadtaUtils.create_client_logic_table(conn); + MetadtaUtils.create_simple_table(conn); + try { + conn.getFileWriter().writeLine("Testing table with client logic ..."); + String query = "Select * from metadata_client_logic_test_tbl"; + runTest(conn, query); + MetadtaUtils.drop_client_logic_tbl_data(conn); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("Testing table with no client logic ..."); + query = "Select * from metadata_simple_test_tbl"; + runTest(conn, query); + conn.getFileWriter().writeLine("Testing table with no client logic and with no client logic in connection string ..."); + conn.connectWithNoCL(); + runTest(conn, query); + } catch (Exception e) { + conn.getFileWriter().writeLine("Failed ResutsetMetadata"); + e.printStackTrace(); + } + MetadtaUtils.drop_simple_tbl_data(conn); + } + + /** + * Encapsulate the test into a function to run it twice + * + * @param conn JDBC connection + * @param query SQL to use + * @throws SQLException + */ + private void runTest(DatabaseConnection4Test conn, String query) throws SQLException { + Statement stmt = conn.getConnection().createStatement(); + ResultSet rs = stmt.executeQuery(query); + ResultSetMetaData resultSetMetaData = rs.getMetaData(); + for (int i = 1; i < resultSetMetaData.getColumnCount() + 1; ++i) { + conn.getFileWriter().writeLine("Index: " + i + " column name: " + resultSetMetaData.getColumnName(i)); + conn.getFileWriter().writeLine(" getColumnDisplaySize is: " + resultSetMetaData.getColumnDisplaySize(i)); + conn.getFileWriter().writeLine(" getColumnClassName is: " + resultSetMetaData.getColumnClassName(i)); + conn.getFileWriter().writeLine(" getColumnLabel is: " + resultSetMetaData.getColumnLabel(i)); + conn.getFileWriter().writeLine(" getColumnType is: " + resultSetMetaData.getColumnType(i)); + conn.getFileWriter().writeLine(" getColumnTypeName is: " + resultSetMetaData.getColumnTypeName(i)); + conn.getFileWriter().writeLine(" getPrecision is: " + resultSetMetaData.getPrecision(i)); + conn.getFileWriter().writeLine(" getScale is: " + resultSetMetaData.getScale(i)); + conn.getFileWriter().writeLine(" isNullable is: " + resultSetMetaData.isNullable(i)); + conn.getFileWriter().writeLine(" isNullable is: " + resultSetMetaData.isAutoIncrement(i)); + conn.getFileWriter().writeLine(" isCaseSensitive is: " + resultSetMetaData.isCaseSensitive(i)); + conn.getFileWriter().writeLine(" isCurrency is: " + resultSetMetaData.isCurrency(i)); + conn.getFileWriter().writeLine(" isReadOnly is: " + resultSetMetaData.isReadOnly(i)); + conn.getFileWriter().writeLine(" isSigned is: " + resultSetMetaData.isSigned(i)); + conn.getFileWriter().writeLine(" isWritable is: " + resultSetMetaData.isWritable(i)); + conn.getFileWriter().writeLine(" isDefinitelyWritable is: " + resultSetMetaData.isDefinitelyWritable(i)); + conn.getFileWriter().writeLine(" isSearchable is: " + resultSetMetaData.isSearchable(i)); + conn.getFileWriter().writeLine(" "); + conn.getFileWriter().writeLine("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); + conn.getFileWriter().writeLine(" "); + } + } +} \ No newline at end of file diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/ResultsetFetchBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/ResultsetFetchBin.java new file mode 100644 index 000000000..c69ec85f2 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/ResultsetFetchBin.java @@ -0,0 +1,49 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.DatabaseConnection4Test; + +public class ResultsetFetchBin implements IBinaryTest { + + /** + * This test is for validating the result set cursor - see https://jdbc.postgresql.org/documentation/head/query.html#query-with-cursor + * It is specifically meant to test the PgResultset next function in case were CursorResultHandler is invoked + * with connection.getQueryExecutor().fetch + */ + @Override + public void execute(DatabaseConnection4Test conn) { + + String sql; + BinUtils.createCLSettings(conn); + sql = "CREATE TABLE IF NOT EXISTS t_varchar(id INT, " + + "name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC));"; + conn.executeSql(sql); + //Create records + for (int i = 0; i < 65; ++i) { + sql = "INSERT INTO t_varchar (id, name, address) VALUES (" + i + ", 'MyName', 'MyAddress');"; + conn.executeSql(sql); + } + try { + conn.getConnection().setAutoCommit(false);//Auto commit must be off for this to happen + Statement st = conn.getConnection().createStatement(); + st.setFetchSize(10);//set the client fetch size to 10 + ResultSet rs = st.executeQuery("select * from t_varchar order by id;"); + conn.printRS4Test(rs);//loop thru the entire data set (should perform initial call & 6 fetch + st.close(); + conn.getConnection().setAutoCommit(true);//set Auto commit back to on to make the test cleanup + } catch (SQLException e) { + e.printStackTrace(); + } + + conn.executeSql("DROP table t_varchar;"); + conn.executeSql("DROP COLUMN ENCRYPTION KEY cek1;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1;"); + conn.fetchData("select * from gs_column_keys;"); + } + +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/SimpleQueryBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/SimpleQueryBin.java new file mode 100644 index 000000000..bd4c59de0 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/SimpleQueryBin.java @@ -0,0 +1,181 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.*; + +/** + * Test the Simple SQL queries Run it 3 times: On table with client logic column + * On table with no client logic On table with no client logic with no CL in the + * connection string + */ +public class SimpleQueryBin implements IBinaryTest { + @Override + public void execute(DatabaseConnection4Test conn) { + MetadtaUtils.create_client_logic_table(conn); + MetadtaUtils.create_simple_table(conn); + try { + String query; + conn.getFileWriter().writeLine("Testing table with client logic ..."); + query = "select * from metadata_client_logic_test_tbl;"; + runTest(conn, query); + MetadtaUtils.drop_client_logic_tbl_data(conn); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("Testing table with no client logic ..."); + query = "select * from metadata_simple_test_tbl;"; + runTest(conn, query); + conn.getFileWriter().writeLine("Testing table with no client logic and with no client logic in connection string ..."); + conn.connectWithNoCL(); + runTest(conn, query); + } catch (Exception e) { + conn.getFileWriter().writeLine("Failed to execute simple sql queries"); + e.printStackTrace(); + } + MetadtaUtils.drop_simple_tbl_data(conn); + } + + /** + * Encapsulate the test into a function to run it three times + * + * @param conn JDBC connection + * @param query SQL to use + * @throws SQLException + */ + private void runTest(DatabaseConnection4Test conn, String query) throws SQLException, NullPointerException { + Statement stmt = conn.getConnection().createStatement(); + if (stmt == null) { + conn.getFileWriter().writeLine("Error creating statement"); + throw new NullPointerException(); + } + stmt.execute(query); + if (!stmt.isClosed()) { + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* setCursorName()"); + stmt.setCursorName("non_existant_cursor"); + conn.getFileWriter().writeLine("setCursorName() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* getWarnings()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.getWarnings())); + conn.getFileWriter().writeLine("getWarnings() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* clearWarnings()"); + stmt.clearWarnings(); + conn.getFileWriter().writeLine("clearWarnings() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* getWarnings()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.getWarnings())); + conn.getFileWriter().writeLine("getWarnings() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* setEscapeProcessing()"); + stmt.setEscapeProcessing(true); + conn.getFileWriter().writeLine("setEscapeProcessing() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* setFetchDirection()"); + stmt.setFetchDirection(ResultSet.FETCH_FORWARD); + conn.getFileWriter().writeLine("setFetchDirection() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* getFetchDirection()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.getFetchDirection())); + conn.getFileWriter().writeLine("getFetchDirection() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* setFetchSize()"); + stmt.setFetchSize(20); + conn.getFileWriter().writeLine("setFetchSize() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* getFetchSize()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.getFetchSize())); + conn.getFileWriter().writeLine("getFetchSize() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* setMaxFieldSize()"); + stmt.setMaxFieldSize(40); + conn.getFileWriter().writeLine("setMaxFieldSize() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* getMaxFieldSize()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.getMaxFieldSize())); + conn.getFileWriter().writeLine("getMaxFieldSize() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* setMaxRows()"); + stmt.setMaxRows(50); + conn.getFileWriter().writeLine("setMaxRows() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* getMaxRows()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.getMaxRows())); + conn.getFileWriter().writeLine("getMaxRows() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* setPoolable()"); + stmt.setPoolable(false); + conn.getFileWriter().writeLine("setPoolable() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* isPoolable()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.isPoolable())); + conn.getFileWriter().writeLine("isPoolable() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* setQueryTimeout()"); + stmt.setQueryTimeout(100); + conn.getFileWriter().writeLine("setQueryTimeout() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* getQueryTimeout()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.getQueryTimeout())); + conn.getFileWriter().writeLine("getQueryTimeout() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* getGeneratedKeys()"); + stmt.getGeneratedKeys(); + conn.getFileWriter().writeLine("getGeneratedKeys() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* getMoreResults()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.getMoreResults())); + conn.getFileWriter().writeLine("getMoreResults() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* getResultSet()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.getResultSet())); + conn.getFileWriter().writeLine("getResultSet() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* getResultSetConcurrency()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.getResultSetConcurrency())); + conn.getFileWriter().writeLine("getResultSetConcurrency() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* getResultSetHoldability()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.getResultSetHoldability())); + conn.getFileWriter().writeLine("getResultSetHoldability() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* getResultSetType()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.getResultSetType())); + conn.getFileWriter().writeLine("getResultSetType() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* getResultSetType()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.getResultSetType())); + conn.getFileWriter().writeLine("getResultSetType() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* getUpdateCount()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.getUpdateCount())); + conn.getFileWriter().writeLine("getUpdateCount() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* cancel()"); + stmt.cancel(); + conn.getFileWriter().writeLine("cancel() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* isCloseOnCompletion()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.isCloseOnCompletion())); + conn.getFileWriter().writeLine("isCloseOnCompletion() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* closeOnCompletion()"); + stmt.closeOnCompletion(); + conn.getFileWriter().writeLine("closeOnCompletion() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* close()"); + stmt.close(); + conn.getFileWriter().writeLine("close() executed successfully "); + conn.getFileWriter().writeLine(""); + conn.getFileWriter().writeLine("* isClosed()"); + conn.getFileWriter().writeLine(String.valueOf(stmt.isClosed())); + conn.getFileWriter().writeLine("isClosed() executed successfully "); + conn.getFileWriter().writeLine(""); + } else { + throw new SQLException(); + } + } +} + diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/StmtBatchClientLogicBin.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/StmtBatchClientLogicBin.java new file mode 100644 index 000000000..2c9e02d4b --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/bintests/StmtBatchClientLogicBin.java @@ -0,0 +1,59 @@ +package gauss.regress.jdbc.bintests; + +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +import gauss.regress.jdbc.IBinaryTest; +import gauss.regress.jdbc.utils.*; + +/** + * Test the batch queries on the client logic table + */ +public class StmtBatchClientLogicBin implements IBinaryTest { + @Override + public void execute(DatabaseConnection4Test conn) { + MetadtaUtils.create_client_logic_table(conn); + int[] count; + String query; + try { + Statement stmt = conn.getConnection().createStatement(); + List parameters; + query = "Select * from metadata_client_logic_test_tbl order by id desc;"; + stmt.addBatch(query); + query = "insert into metadata_client_logic_test_tbl values (10,20,'new data', 5.5 );"; + stmt.addBatch(query); + query = "insert into metadata_client_logic_test_tbl values (30,-40,'new data 2', -6.6 );"; + stmt.addBatch(query); + query = "update metadata_client_logic_test_tbl set char_col = 'new_data was updated' where key = 10;"; + stmt.addBatch(query); + count = stmt.executeBatch(); + conn.getConnection().commit(); + conn.getFileWriter().writeLine("verifying the batch cmd"); + query = "select * from metadata_client_logic_test_tbl order by id desc;"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(query, parameters); + } + catch (Exception e) { + conn.getFileWriter().writeLine("Failed to execute batch" + e); + } + try { + Statement stmt = conn.getConnection().createStatement(); + List parameters; + query = "insert into metadata_client_logic_test_tbl values (100,-400,'this data will not appear', -8.8 );"; + stmt.addBatch(query); + query = "update metadata_client_logic_test_tbl set char_col = 'this update will not executed' where key = 10;"; + stmt.addBatch(query); + stmt.clearBatch(); + count = stmt.executeBatch(); + conn.getFileWriter().writeLine("verifying the clear batch cmd"); + query = "select * from metadata_client_logic_test_tbl;"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(query, parameters); + } + catch (Exception e) { + conn.getFileWriter().writeLine("Failed to execute clear batch cmd" + e); + } + MetadtaUtils.drop_client_logic_tbl_data(conn); + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/CommandType.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/CommandType.java new file mode 100644 index 000000000..9dcb27db4 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/CommandType.java @@ -0,0 +1,11 @@ +package gauss.regress.jdbc.utils; + +public enum CommandType { + SELECT, + EXECUTE, + DESCRIBE, + DESCRIBE_FUNCTION, + COMMENT, + SHELL, + EMPTY +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/DBUtils.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/DBUtils.java new file mode 100644 index 000000000..97baf24f8 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/DBUtils.java @@ -0,0 +1,33 @@ +package gauss.regress.jdbc.utils; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; + +public class DBUtils { + public static Connection connect(String serverAddress, String port, String databseName, String username, String password) { + Connection conn = null; + // currentSchema=public& + String jdbcConnectionString = + String.format("jdbc:postgresql://%s:%s/%s?enable_ce=1", + serverAddress, port, databseName); + try { + conn = DriverManager.getConnection(jdbcConnectionString, username, password); + } catch (SQLException e) { + e.printStackTrace(); + } + return conn; + } + + public static void createDatabase4Test(String serverAddress, String port, String databseName, String username, String password, String db2Create) throws SQLException { + Connection conn = connect(serverAddress, port, "postgres", username, password); + Statement st = conn.createStatement(); + st.executeUpdate("DROP DATABASE IF EXISTS " + db2Create); + st.executeUpdate("CREATE DATABASE " + db2Create); + conn.close(); + + } + +} + diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/DatabaseConnection4Test.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/DatabaseConnection4Test.java new file mode 100644 index 000000000..dad9598d2 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/DatabaseConnection4Test.java @@ -0,0 +1,879 @@ +package gauss.regress.jdbc.utils; + +import java.io.Closeable; +import java.io.IOException; +import java.io.BufferedReader; +import java.io.InputStreamReader; +import java.lang.reflect.Field; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public class DatabaseConnection4Test implements Closeable{ + + private gauss.regress.jdbc.utils.OutFileWriter m_outputFile = null; + private Connection m_conn = null; + private String connectionStringWithNoCL = ""; + private String connectionStringWithCL = ""; + private String username; + private String password; + private Boolean m_isDebug = false; + private boolean m_is_derived = false; + /** + * Constructor + * @param outputFile + */ + public DatabaseConnection4Test(OutFileWriter outputFile){ + m_outputFile = outputFile; + m_isDebug = java.lang.management.ManagementFactory.getRuntimeMXBean(). + getInputArguments().toString().indexOf("jdwp") >= 0; + } + + /** + * Copy Constructor alternative + * @param source other to copy from + */ + public DatabaseConnection4Test(DatabaseConnection4Test source, OutFileWriter outputFile){ + m_isDebug = java.lang.management.ManagementFactory.getRuntimeMXBean(). + getInputArguments().toString().indexOf("jdwp") >= 0; + + m_outputFile = outputFile; + connectionStringWithNoCL = source.connectionStringWithNoCL; + username = source.username; + password = source.password; + m_is_derived = true; + } + + /** + * Closes the current JDBC connection + */ + private void closeConnection() { + if (m_conn == null) { + return; + } + + try { + if (!m_conn.isClosed()) { + m_conn.close(); + } + } catch (SQLException e) { + e.printStackTrace(); + } + } + + /** + * Connects to the database with CL and in binary mode + * @return true on success or false on failure + */ + public boolean connectWithoutReloadingCacheOnIsValid() { + closeConnection(); + String jdbcConnectionString = connectionStringWithNoCL; + jdbcConnectionString += "&enable_ce=1&refreshClientEncryption=0"; + try { + m_conn = DriverManager.getConnection(jdbcConnectionString, username, password); + } catch (SQLException e) { + e.printStackTrace(); + return false; + } + return true; + } + + /** + * Connects to the database with CL and in binary mode + * @return true on success or false on failure + */ + public boolean connectInBinaryMode() { + closeConnection(); + String connectionString = connectionStringWithCL + "&binaryTransfer=true"; + try { + m_conn = DriverManager.getConnection(connectionString, username, password); + } catch (SQLException e) { + e.printStackTrace(); + return false; + } + return true; + } + + /** + * Connects to the database with client logic turned off + * @return true on success or false on failure + */ + public boolean connectWithNoCL() { + closeConnection(); + try { + m_conn = DriverManager.getConnection(connectionStringWithNoCL, username, password); + } catch (SQLException e) { + e.printStackTrace(); + return false; + } + return true; + } + /** + * Close and reopen the database connection with CL + * @return true on success or false on failure + */ + public boolean reConnectWithCL() { + closeConnection(); + try { + m_conn = DriverManager.getConnection(connectionStringWithCL, username, password); + } catch (SQLException e) { + e.printStackTrace(); + return false; + } + return true; + } + /** + * Connects to the database + * @param serverAddress + * @param port + * @param databseName + * @param username + * @param password + * @return true on success and false on failure + */ + public boolean connect(String serverAddress, String port, String databseName, String username, String password) { + this.username = username; + this.password = password; + //&binaryTransfer=true + + String jdbcConnectionString = + String.format("jdbc:postgresql://%s:%s/%s", + serverAddress, port, databseName); + + //loggerLevel=OFF&binaryTransfer=true + if (m_isDebug) { + jdbcConnectionString += "?loggerLevel=OFF";//"?loggerLevel=DEBUG"; + } + else { + jdbcConnectionString += "?loggerLevel=OFF"; + } + connectionStringWithNoCL = jdbcConnectionString; + connectionStringWithCL = jdbcConnectionString + "&enable_ce=1"; + try { + m_conn = DriverManager.getConnection(connectionStringWithCL, username, password); + } catch (SQLException e) { + e.printStackTrace(); + return false; + } + return true; + } + + /** + * ReConnects to the database (may be called by a cloned, unconnected connection) + * @return true on success and false on failure + */ + public boolean reconnect() { + if (connectionStringWithNoCL.isEmpty()) { + return false; + } + String jdbcConnectionString = connectionStringWithNoCL; + jdbcConnectionString += "&enable_ce=1"; + try { + if(m_conn != null) { + m_conn.close(); + } + m_conn = DriverManager.getConnection(jdbcConnectionString, username, password); + } catch (SQLException e) { + e.printStackTrace(); + return false; + } + return true; + } + + /** + * @return the m_conn + */ + public Connection getConnection() { + return m_conn; + } + + @Override + public void close(){ + if (m_conn != null) { + try { + m_conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + if (!m_is_derived && m_outputFile != null) { + try { + m_outputFile.close(); + m_outputFile = null; + } catch (IOException e) { + e.printStackTrace(); + } + } + else if (m_outputFile != null){ + m_outputFile.flush(); + } + } + + public OutFileWriter getFileWriter() { + return m_outputFile; + } + + /** + * execute shell command of key tool and print to the output file + * @param conn database connection + * @param command input shell command + */ + public void gsKtoolExec(String command) { + m_outputFile.writeLine(command); + Process process = null; + try { + String[] commandShell = command.substring(2).split("&&"); + for (String c : commandShell) { + process = Runtime.getRuntime().exec(c); + BufferedReader pcReader = new BufferedReader(new InputStreamReader(process.getInputStream())); + + List results = new ArrayList<>(); + String outputline = ""; + while ((outputline = pcReader.readLine()) != null) { + results.add(outputline); + } + + String[] strs = results.toArray(new String[results.size()]); + for (String s : strs) { + m_outputFile.writeLine(s); + } + } + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** + * fetch data from the database and prints it to the output file + * @param conn database connection + * @param sql SQL statement to invoke + */ + public void fetchData(String sql) { + m_outputFile.writeLine(sql); + Statement st = null; + try { + st = m_conn.createStatement(); + if (st == null || sql == null) { + System.out.println("found null"); + } + ResultSet rs = null; + rs = st.executeQuery(sql); + printRS4Test(rs); + st.close(); + } catch (SQLException e) { + if (m_isDebug) { + e.printStackTrace(); + } + String errorMessage = e.getMessage(); + printError(errorMessage, sql); + } + } + + /** + * Executes SQL and prints any warning / errors to the output file + * @param conn + * @param sql + */ + public void executeSql(String sql) { + Statement st = null; + try { + sql = sql.replace("\n\n", "\n").replace("\n\n", "\n"); + m_outputFile.writeLine(sql); + st = m_conn.createStatement(); + st.executeUpdate(sql); + displayQueryWarning(st); + st.close(); + } catch (SQLException e) { + if (st!= null) { + try { + displayQueryWarning(st); + } catch (SQLException e1) { + } + } + if (m_isDebug) { + e.printStackTrace(); + } + String errorMessage = e.getMessage(); + printError(errorMessage, sql); + return; + } + } + /** + * Fetch data using prepare statement and print results to the writer + * @param sql the statement sql + * @param parameters parameters + */ + public void fetchDataWithPrepareStmnt(String sql, List parameters) { + try { + m_outputFile.writeLine(sql); + PreparedStatement statement = m_conn.prepareStatement(sql); + int i = 0; + for (String param: parameters) { + if (i > 0) { + m_outputFile.write(","); + } + ++i; + m_outputFile.write(param); + statement.setString(i, param); + } + m_outputFile.writeLine(""); + ResultSet rs = statement.executeQuery(); + printRS4Test(rs); + statement.close(); + } catch (SQLException e) { + String errorMessage = e.getMessage(); + printError(errorMessage, sql); + return; + } + } + + /** + * Execute prepare statement + * @param[in] sql the query + * @param[in] parameters parameter values + * @param expexetedAffectedRecords + */ + public void updateDataWithPrepareStmnt(String sql, List parameters) { + try { + m_outputFile.writeLine(sql); + PreparedStatement statement = m_conn.prepareStatement(sql); + int i = 0; + for (String param: parameters) { + if (i > 0) { + m_outputFile.write(","); + } + ++i; + if (param != null) { + m_outputFile.write(param); + } + statement.setString(i, param); + } + m_outputFile.writeLine(""); + statement.executeUpdate(); + statement.close(); + } catch (SQLException e) { + String errorMessage = e.getMessage(); + printError(errorMessage, sql); + return; + } + } + + /** + * Describes database object into the output file + * @param conn database connection + * @param command \d command in gsql style + */ + public void describeObject(String command) { + int pos = command.indexOf("d"); + String describeObjectName = ""; + if (pos < command.length() + 1) { + describeObjectName = command.substring(pos+1).trim(); + } + describeObjectName = describeObjectName.replace(";", ""); + describeObjectName = describeObjectName.replace("+", "");//Not supported yet + describeObjectName = describeObjectName.trim(); + try { + DatabaseMetaData metadata = m_conn.getMetaData(); + ResultSet columnsRS = metadata.getColumns("", "", describeObjectName, ""); + String schemaName = "public"; + List> data = new ArrayList<>(); + + while (columnsRS.next()) { + List record = new ArrayList<>(); + record.add(columnsRS.getString("COLUMN_NAME")); + String type_name = getDBTypeDisplayName(columnsRS.getString("TYPE_NAME")); + record.add(type_name); + + String modifiers = ""; + if (columnsRS.getString("COLUMN_DEF") != null) { + modifiers += " default " + columnsRS.getString("COLUMN_DEF"); + } + if (columnsRS.getString("IS_NULLABLE").equals("NO")) { + modifiers += "not null"; + } + if (columnsRS.getString("SQL_DATA_TYPE") != null && columnsRS.getString("SQL_DATA_TYPE") != "null") { + modifiers += " encrypted"; + } + record.add(modifiers); + data.add(record); + } + List headers = Arrays.asList("Column", "Type", "Modifiers"); + List columnTypes = Arrays.asList(String.class.getClass().getName(), String.class.getClass().getName(), + String.class.getClass().getName()); + + String header = "Table " + "\"" + schemaName + "." + describeObjectName + "\""; + if (!data.isEmpty()) { + printDataAsTables(headers, columnTypes, data, header, false); + } + } catch (SQLException e) { + e.printStackTrace(); + } + } + + /** + * Describes database object into the output file + * @param conn database connection + * @param command \d command in gsql style + */ + public void describeFunction(String command) { + int pos = command.indexOf("df"); + String describeFunctionName = ""; + if (pos < command.length() + 1) { + describeFunctionName = command.substring(pos+1).trim(); + } + describeFunctionName = describeFunctionName.replace(";", ""); + describeFunctionName = describeFunctionName.replace("+", "");//Not supported yet + describeFunctionName = describeFunctionName.trim(); + try { + DatabaseMetaData metadata = m_conn.getMetaData(); + ResultSet columnsRS = metadata.getFunctions("", "", describeFunctionName); + String schemaName = "public"; + List> data = new ArrayList<>(); + + while (columnsRS.next()) { + List record = new ArrayList<>(); + record.add(columnsRS.getString("COLUMN_NAME")); + record.add(getDBTypeDisplayName(columnsRS.getString("TYPE_NAME"))); + if (columnsRS.getString("SQL_DATA_TYPE") != null && columnsRS.getString("SQL_DATA_TYPE") != "null"){ + record.add(" clientlogic"); + } + else { + record.add(""); + } + data.add(record); + } + List headers = Arrays.asList("Schema", "Name", "Result data type", "Argument data types ", "Type"); + List columnTypes = Arrays.asList(String.class.getClass().getName(), String.class.getClass().getName(), + String.class.getClass().getName()); + + String header = "Table " + "\"" + schemaName + "." + describeFunctionName + "\""; + printDataAsTables(headers, columnTypes, data, header, false); + } catch (SQLException e) { + e.printStackTrace(); + } + } + /** + * Converts the field database type returned from JDBC to those displayed in gsql in \d command + * @param dbTypeName JDBC type name + * @return the gsql displayed name + */ + private static String getDBTypeDisplayName(String dbTypeName) { + + switch (dbTypeName){ + case "int1": + case "int2": + case "int4": + case "int8": + return "integer"; + + case "varchar": + return "character varying"; + + case "numeric": + return "numeric"; + } + return dbTypeName; + } + /** + * Prints the content of a result set to the output file + * @param rs + */ + public void printRS4Test(ResultSet rs) { + printRS4Test(rs, null); + } + public void printRS4Test(ResultSet rs, Set skipColumns) { + try { + ResultSetMetaData rsmd = rs.getMetaData(); + int columnsCount = rsmd.getColumnCount(); + List headers = new ArrayList<>(); + List types = new ArrayList<>(); + List> data = new ArrayList<>(); + Set skipColumnIndexes = new HashSet<>(); + //fill data in headers and types + for (int colIndex = 1; colIndex < columnsCount + 1; ++colIndex) { + headers.add(rsmd.getColumnName(colIndex)); + types.add(rsmd.getColumnClassName(colIndex)); + if (skipColumns != null && skipColumns.contains(rsmd.getColumnName(colIndex))) { + skipColumnIndexes.add(colIndex); + } + } + while (rs.next()){ + List record = new ArrayList<>(); + for (int colIndex = 1; colIndex < columnsCount + 1; ++colIndex) { + String colValue = rs.getString(colIndex); + if (colValue == null || skipColumnIndexes.contains(colIndex)) { + colValue = ""; + } + record.add(colValue); + } + data.add(record); + } + + printDataAsTables(headers, types, data, "", true); + } catch (SQLException e1) { + m_outputFile.writeLine("Exception in printRS4Test"); + e1.printStackTrace(); + return; + } + } + + /** + * Prints tabular data to the output file + * @param headers table header names + * @param columnTypes the types of the columns + * @param data the table data + * @param tableHeader header to print on top of the table + * @param writeFooter if to write table footer + */ + public void printDataAsTables(List headers, List columnTypes, List> data, + String tableHeader, boolean writeFooter) { + List maxColumnValueLength = new ArrayList<>(); + int columnsCount = headers.size(); + if (columnTypes.size() != columnsCount) { + System.out.println("Invalid column headers size, exit printDataAsTables!"); + return; + } + // Calculate the maximum header width per column + for (String header : headers) { + maxColumnValueLength.add(StringUtils.getStringWidth(header)); + } + // Calculate the maximum column using the actual data + for (int recordIndex = 0; recordIndex < data.size(); ++recordIndex) { + List record = data.get(recordIndex); + if (record.size() != columnsCount) { + System.out.println("Invalid record size, exit printDataAsTables!"); + return; + } + int colIndex = 0; + for (String colValue : record) { + int width = StringUtils.getStringWidth(colValue); + if (width > maxColumnValueLength.get(colIndex)) { + maxColumnValueLength.set(colIndex, width); + } + ++colIndex; + } + } + String space = " "; + String dash = "-"; + String connector = "+"; + String sep = "|"; + + if (tableHeader.length() > 0) { + int tableWidth = maxColumnValueLength.stream().mapToInt(Integer::intValue).sum(); + tableWidth += headers.size()*2;// space before and space after + tableWidth += headers.size() - 1; // sperator between columns + StringBuilder headerLine = new StringBuilder(); + addValueToLine(headerLine, tableHeader, tableWidth - 2, space, "C", true); + m_outputFile.writeLine(headerLine.toString()); + } + + // Print the result set headers; + StringBuilder lineHeaders = new StringBuilder(); + for (int colIndex = 0; colIndex < columnsCount ; ++colIndex) { + String colName = headers.get(colIndex); + addValueToLine(lineHeaders, colName,maxColumnValueLength.get(colIndex), space, "C", false); + if (colIndex != columnsCount - 1) { + lineHeaders.append(sep); + } + } + m_outputFile.writeLine(lineHeaders.toString()); + StringBuilder lineSep = new StringBuilder(); + for (int colIndex = 0; colIndex < columnsCount; ++colIndex) { + addValueToLine(lineSep, "", maxColumnValueLength.get(colIndex), dash, "C", false); + if (colIndex != columnsCount - 1) { + lineSep.append(connector); + } + } + m_outputFile.writeLine(lineSep.toString()); + for (List record : data) { + StringBuilder lineData = new StringBuilder(); + for (int colIndex = 0; colIndex < columnsCount; ++colIndex) { + String aligment = "L"; + if (columnTypes.get(colIndex).equals("java.lang.Integer") || + columnTypes.get(colIndex).equals("java.lang.Long") || + columnTypes.get(colIndex).equals("java.math.BigDecimal")) { + aligment = "R"; + } + boolean isLast = false; + if (colIndex == columnsCount - 1) { + isLast = true; + } + addValueToLine(lineData, record.get(colIndex), maxColumnValueLength.get(colIndex), space, + aligment, isLast); + if (!isLast) { + lineData.append(sep); + } + } + m_outputFile.writeLine(lineData.toString()); + } + if (writeFooter) { + if (data.size() == 1) { + m_outputFile.writeLine("(1 row)"); + } + else { //if (data.size() > 0) + m_outputFile.writeLine("(" + data.size() + " rows)"); + } + } + m_outputFile.writeLine(""); + } + /** + * Add field value to a line while printing a table to the output file + * @param line current line to add to + * @param value the value to print + * @param MaxLen maximum column length + * @param space padding character + * @param aligmnet R - right, C - center, L - left + * @param lastField is last field in the line + */ + private void addValueToLine(StringBuilder line, String value, int MaxLen, String space, String aligmnet, boolean lastField) { + // line.append(space); + int numberOfPrefixSpaces = 1; + int valueWidth = StringUtils.getStringWidth(value); + switch (aligmnet){ + case "C": // center + numberOfPrefixSpaces = (MaxLen - valueWidth) / 2 + 1; + break; + case "L": // left + numberOfPrefixSpaces = 1; + break; + case "R": // Right + numberOfPrefixSpaces = 1 + MaxLen - valueWidth; + + } + for (int i = 0; i < numberOfPrefixSpaces; ++i) { + line.append(space); + } + line.append(value); + if (!lastField) { + for (int i = numberOfPrefixSpaces + valueWidth; i < MaxLen + 2; ++i) { + line.append(space); + } + } + } + /** + * gets SQL with syntax error and return the text to apear for syntax error LINE 1" ...SQL... + * Similar functionality to reportErrorPosition function in libPQ + * @param sql + * @param poistion + * @return structure with SQLline and positon in that line + */ + private static SyntaxError getSynatxErrorLineAndPosition(String sql, int position) { + final int MAX_WIDTH = 60; + final int MIN_RIGHT_WIDTH = 10; + final String GAP = "..."; + int lineNumber = 0; + SyntaxError returnValue = new SyntaxError(); + String[] lines = sql.split("\n"); + String sqlLine = ""; + int positionInLine = 0; + if (position < lines[0].length()) { + lineNumber = 1; + returnValue.setPositionInLine(position); + positionInLine = position; + sqlLine = sql; + } + else{ + // Find the line with error and the position in that line + int acumPosition = 0; + boolean found = false; + for (int lineCounter = 0; lineCounter < lines.length && !found; ++lineCounter) { + if (position < acumPosition + lines[lineCounter].length()) { + lineNumber = lineCounter + 1; + positionInLine = position - acumPosition; + sqlLine = lines[lineCounter]; + found = true; + } + else { + acumPosition += lines[lineCounter].length() + 1;//1 for the \n character + } + } + } + String linePrefix = "LINE " + lineNumber + ": "; + if (sqlLine.length() > MAX_WIDTH) { + //The part with syntax error is at the beginning + if (positionInLine < MAX_WIDTH - MIN_RIGHT_WIDTH) { + sqlLine = sqlLine.substring(0, 60) + GAP; + } + else{ + //The part with syntax error is at the middle: + //For Example- ...CLIENT_LOGIC GLOBAL_SETTING test_drop_cmk2 WITH ( KEY_STORE ... + // ^ + if (sqlLine.length() > positionInLine + MIN_RIGHT_WIDTH) { + sqlLine = GAP + sqlLine.substring(positionInLine - 50 -1, positionInLine + MIN_RIGHT_WIDTH - 1) + GAP; + positionInLine = MAX_WIDTH - MIN_RIGHT_WIDTH + GAP.length() + 1; + } + //The part with syntax error is at the end: + //For Example- ...CLIENT_LOGIC GLOBAL_SETTING test_drop_cmk2 WITH (KEY_STORE) + // ^ + else { + sqlLine = GAP + sqlLine.substring(sqlLine.length() - MAX_WIDTH); + positionInLine = positionInLine + MAX_WIDTH - sqlLine.length(); + } + } + } + returnValue.setPositionInLine(positionInLine + linePrefix.length() - 1); + returnValue.setSqlLine(linePrefix + sqlLine); + return returnValue; + } + /** + * prints syntax error in gsql style if any + * @param messages the error messages from the server + * @param sql the statement caused the error / warning + * @param outputLines List of errors to print + * @return true if it is a syntax error and false if it is not + */ + private static boolean printSyntaxError(String[] messages, String sql, List outputLines) { + + String message = ""; +// if (sql.contains("\n")) {//Do not handle syntax errors in multi-line SQLs - too complex +// return false; +// } + //Looking for the position message for syntax error + for (int msgIndex = 0; msgIndex < messages.length; ++msgIndex) { + String[] items = messages[msgIndex].split(":"); + if (items.length > 1) { + if (items[0].trim().toUpperCase().equals("POSITION")){ + message = messages[msgIndex]; + break; + } + } + } + if (message.length() == 0) { + return false;//No position message + } + boolean isSyntaxError = false; + String[] items = message.split(":"); + if (items.length == 2) { + try { + int position = Integer.parseInt(items[1].trim()); + SyntaxError sqlLine = getSynatxErrorLineAndPosition(sql, position); + outputLines.add(sqlLine.getSqlLine()); + StringBuilder errorLine2 = new StringBuilder(); + for (int index = 0; index < sqlLine.getPositionInLine(); ++index) { + errorLine2.append(" "); + } + errorLine2.append("^"); + outputLines.add(errorLine2.toString()); + isSyntaxError = true; + } + catch (NumberFormatException e) { + //do nothing here, just catch it and isSyntaxError is false; + } + } + return isSyntaxError; + } + /** + * prints an error to the output file. Some changes are required to match gsql & libpq formatting of the message + * @param message the error message + * @param sql the statement caused the error / warning + */ + private void printError(String message, String sql) { + int MAX_WARNING_HEADER = 10;//Warning header is short string like NOTICE, INFO HINT + // Server error contains prefix with [connection details] - remove it + if (message.length() == 0) { + return; + } + if (message.charAt(0) == '[') { + int secondBracket = message.indexOf("]"); + if (secondBracket > 0){// Cannot be zero as character in index zero is '[' + if (message.length() > secondBracket) { + message = message.substring(secondBracket + 1); + } + } + } + String[] messages = message.split("\n"); + // For some reason the output in gsql and jdbc of the warning is a bit different, changing it here + boolean didPrintSyntaxError = false; + List outputLines = new ArrayList<>(); + int lineNumber = 0; + for (String item : messages ) { + ++lineNumber; + String[] itemSplited = item.split(":"); + if (itemSplited.length > 1) { + if (lineNumber == 2) {// print syntax error as 2nd line if there is syntax error + didPrintSyntaxError = printSyntaxError(messages, sql, outputLines); + } + if (itemSplited[0].trim().length() < MAX_WARNING_HEADER) { + String errorHeader = itemSplited[0].trim().toUpperCase(); + if (errorHeader.equals("WHERE")){ + errorHeader = "CONTEXT"; + } + boolean skipThisPart = false; + if (errorHeader.equals("POSITION")) { + skipThisPart = didPrintSyntaxError; + } + if (!skipThisPart) { + StringBuilder lineOutput = new StringBuilder(); + lineOutput.append(errorHeader); + lineOutput.append(": "); + lineOutput.append(itemSplited[1].trim()); + // If there is more ':' character, just write it etc. + for (int warningPart = 2; warningPart < itemSplited.length; ++warningPart) { + lineOutput.append(":"); + lineOutput.append(itemSplited[warningPart]); + } + // writeLine(""); + outputLines.add(lineOutput.toString()); + } + } + else { + outputLines.add(item); + // writeLine(item); + } + } + else { + outputLines.add(item); + // writeLine(item); + } + } + for (String outputLine : outputLines) { + m_outputFile.writeLine(outputLine); + } + if (message.charAt(message.length() - 1) == '\n') { + m_outputFile.writeLine("");// for errors coming from libpq with \n at the end + } + } + + /** + * Display query warning (notice & hints supplied by the server in case of a successful query execution) + * @param st the statement issued + * @throws SQLException + */ + private void displayQueryWarning(Statement st) throws SQLException { + + SQLWarning warnings = st.getWarnings(); + while (warnings != null) { + // warnings.getMessage() does not return the entire message so have to use reflection to get the detailedMessage field + try { + Field f = Throwable.class.getDeclaredField("detailMessage"); + f.setAccessible(true); + String message = (String) f.get(warnings); // IllegalAccessException + printError(message, ""); + } catch (NoSuchFieldException e) { + m_outputFile.writeLine("failed resing detailMessage - NoSuchFieldException"); + e.printStackTrace(); + } catch (SecurityException e) { + m_outputFile.writeLine("failed resing detailMessage - SecurityException"); + e.printStackTrace(); + } catch (IllegalArgumentException e) { + m_outputFile.writeLine("failed resing detailMessage - IllegalArgumentException"); + e.printStackTrace(); + } catch (IllegalAccessException e) { + m_outputFile.writeLine("failed resing detailMessage - IllegalAccessException"); + e.printStackTrace(); + } + warnings = warnings.getNextWarning(); + } + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/IInputFileParser.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/IInputFileParser.java new file mode 100644 index 000000000..f89ebf637 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/IInputFileParser.java @@ -0,0 +1,7 @@ +package gauss.regress.jdbc.utils; + +public interface IInputFileParser { + public boolean load(String path); + public boolean moveNext(); + public SQLCommand get(); +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/InputFileParser.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/InputFileParser.java new file mode 100644 index 000000000..44d57aba2 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/InputFileParser.java @@ -0,0 +1,143 @@ +package gauss.regress.jdbc.utils; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; + +public class InputFileParser implements IInputFileParser { + + String[] m_arrLines; + int m_linesPointer = -1; + @Override + public boolean load(String path) { + byte[] encoded = null; + try { + encoded = Files.readAllBytes(Paths.get(path)); + } catch (IOException e) { + e.printStackTrace(); + return true; + } + String content = new String(encoded, StandardCharsets.UTF_8); + m_arrLines = content.split("\n"); + return true; + } + + @Override + public boolean moveNext() { + ++m_linesPointer; + return isSafeIndex(m_linesPointer); + } + + private String extractCommand(int currentPosition) { + if (!isSafeIndex(currentPosition)) { + System.out.println("extractCommand - reached end of lines"); + return ""; + } + StringBuilder result = new StringBuilder(); + int index = currentPosition; + boolean foundSemiColumn = false; + boolean isFirstLine = true; + boolean isCreateFunction = false; + boolean isFunctionEnded = false; // helper variable for ending function oracle style + while (!foundSemiColumn && isSafeIndex(index)&& !isFunctionEnded) { + if (!isFirstLine) { + result.append("\n"); + } + isFirstLine = false; + result.append(m_arrLines[index]); + if (m_arrLines[index].startsWith("\\")) { + ++index; + return result.toString(); + } + // This is still very primitive statement parser, may be improved in later time + if (m_arrLines[index].toUpperCase().startsWith("CREATE FUNCTION") + || m_arrLines[index].toUpperCase().startsWith("CREATE OR REPLACE FUNCTION") + || m_arrLines[index].toUpperCase().startsWith("CREATE PROCEDURE") + || m_arrLines[index].toUpperCase().startsWith("CREATE OR REPLACE PROCEDURE") + ){ + isCreateFunction = true; + } + if (isCreateFunction) { + if (m_arrLines[index].toUpperCase().contains("LANGUAGE")) { + isCreateFunction = false; + } + else if (m_arrLines[index].toUpperCase().trim().equals("/")) { + /*in oracle style functions, the function ends with slash. for instance: + * create or replace function select1() RETURN SETOF t_num + AS + BEGIN + return query (SELECT * from t_num); + END; + / + * + */ + isFunctionEnded = true; + } + } + if (!isCreateFunction && m_arrLines[index].contains(";")) { + foundSemiColumn = true; + } + m_linesPointer = index; + ++index; + } + return result.toString(); + } + private boolean isSafeIndex(int index) { + if (index < m_arrLines.length) { + return true; + } + return false; + } + @Override + public SQLCommand get() { + if (!isSafeIndex(m_linesPointer)) { + System.out.println("SQLCommand get - reached end of lines"); + return null; + } + SQLCommand command = null; + // Comments: + if (m_arrLines[m_linesPointer].trim().startsWith("--") + || m_arrLines[m_linesPointer].trim().startsWith("\\set") + || m_arrLines[m_linesPointer].trim().equals("\\d") + || m_arrLines[m_linesPointer].trim().startsWith("\\sf")){ + command = new SQLCommand(m_arrLines[m_linesPointer], CommandType.COMMENT); + return command ; + } + if (m_arrLines[m_linesPointer].trim().startsWith("\\!")) { + command = new SQLCommand(m_arrLines[m_linesPointer], CommandType.SHELL); + return command ; + } + // Empty lines + if (m_arrLines[m_linesPointer].trim().length() == 0) { + command = new SQLCommand(m_arrLines[m_linesPointer], CommandType.EMPTY); + return command ; + } + // Describe table + if (m_arrLines[m_linesPointer].trim().startsWith("\\d ") || m_arrLines[m_linesPointer].trim().startsWith("\\d+")) { + command = new SQLCommand(m_arrLines[m_linesPointer], CommandType.DESCRIBE); + return command; + } + // Describe function + if (m_arrLines[m_linesPointer].trim().startsWith("\\df")) { + command = new SQLCommand(m_arrLines[m_linesPointer], CommandType.DESCRIBE_FUNCTION); + return command; + } + // Extract the SQL itself + String sql = extractCommand(m_linesPointer); + String normSQL = sql.toLowerCase().trim(); + if (normSQL.startsWith("select") || + normSQL.startsWith("with") || + normSQL.startsWith("explain") || + normSQL.startsWith("show") || + normSQL.startsWith("fetch") || + normSQL.startsWith("call")){ + command = new SQLCommand(sql, CommandType.SELECT); + } + else { + command = new SQLCommand(sql, CommandType.EXECUTE); + } + return command; + } + +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/MetadtaUtils.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/MetadtaUtils.java new file mode 100644 index 000000000..71277c8ff --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/MetadtaUtils.java @@ -0,0 +1,59 @@ +package gauss.regress.jdbc.utils; + +import java.util.ArrayList; +import java.util.List; + +import gauss.regress.jdbc.bintests.BinUtils; + +public class MetadtaUtils { + public static void create_simple_table(DatabaseConnection4Test conn) { + conn.executeSql( + "create table metadata_simple_test_tbl (key int , id int primary key, char_col varchar(30), float_col float);"); + conn.getFileWriter().writeLine("*******inserting data to the metadata_simple_test_tbl"); + List parameters; + String sqlInsert; + sqlInsert = "insert into metadata_simple_test_tbl (key, id, char_col, float_col ) values (?,?,?,?);"; + parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("2"); + parameters.add("test_data_4_meta_data"); + parameters.add("1.1"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + String sqlSelect; + conn.getFileWriter().writeLine("*************verifying data"); + sqlSelect = "select * from metadata_simple_test_tbl;"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + } + + public static void create_client_logic_table(DatabaseConnection4Test conn) { + BinUtils.createCLSettings(conn); + conn.executeSql("CREATE TABLE IF NOT EXISTS metadata_client_logic_test_tbl(key int," + + "id int PRIMARY KEY ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "char_col varchar(30) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC)," + + "float_col float ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=cek1, ENCRYPTION_TYPE = DETERMINISTIC))"); + conn.getFileWriter().writeLine("*******inserting data to the metadata_client_logic_test_tbl;"); + List parameters; + String sqlInsert; + sqlInsert = "insert into metadata_client_logic_test_tbl (key, id, char_col, float_col ) values (?,?,?,?);"; + parameters = new ArrayList<>(); + parameters.add("1"); + parameters.add("2"); + parameters.add("test_data_4_meta_data"); + parameters.add("1.1"); + conn.updateDataWithPrepareStmnt(sqlInsert, parameters); + String sqlSelect; + conn.getFileWriter().writeLine("*************verifying data"); + sqlSelect = "select * from metadata_client_logic_test_tbl;"; + parameters = new ArrayList<>(); + conn.fetchDataWithPrepareStmnt(sqlSelect, parameters); + } + public static void drop_client_logic_tbl_data(DatabaseConnection4Test conn) { + conn.executeSql("drop table metadata_client_logic_test_tbl;"); + conn.executeSql("DROP CLIENT MASTER KEY cmk1 CASCADE;"); + } + + public static void drop_simple_tbl_data(DatabaseConnection4Test conn) { + conn.executeSql("drop table metadata_simple_test_tbl;"); + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/OutFileWriter.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/OutFileWriter.java new file mode 100644 index 000000000..91dd64751 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/OutFileWriter.java @@ -0,0 +1,77 @@ +package gauss.regress.jdbc.utils; +import java.io.Closeable; +import java.io.FileWriter; +import java.io.IOException; + +public class OutFileWriter implements Closeable{ + private FileWriter m_output = null; + private String m_filename = null; + boolean m_isDebug = false; + + public OutFileWriter() { + m_isDebug = java.lang.management.ManagementFactory.getRuntimeMXBean(). + getInputArguments().toString().indexOf("jdwp") >= 0; + } + /** + *open file handler to write to + * @param[in] filename + * @throws IOException + */ + public void openFile(String filename) throws IOException{ + m_filename = filename; + m_output = new FileWriter(filename); + } + + /** + * closes the file handler + */ + @Override + public void close() throws IOException { + if (m_output != null) { + m_output.close(); + m_output = null; + } + } + public void flush() { + if (m_output != null) { + try { + m_output.flush(); + } catch (IOException e) { + System.out.println("flush failed ..."); + e.printStackTrace(); + } + } + } + + /** + * Writes a line to the output file + * @param line line data to write + */ + public void writeLine(String line) { + write(line); + write(System.getProperty( "line.separator")); + } + + /** + * Writes data to the output file + * @param info data to write + */ + public void write(String info) { + try { + if (m_isDebug) { + System.out.print(info); + } + m_output.write(info); + } catch (IOException e) { + e.printStackTrace(); + System.exit(1); + } + } + + /** + * get file name + */ + public String getFilename() { + return m_filename; + } +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/SQLCommand.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/SQLCommand.java new file mode 100644 index 000000000..0a057eac6 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/SQLCommand.java @@ -0,0 +1,24 @@ +package gauss.regress.jdbc.utils; + +public class SQLCommand { + String command; + CommandType commandType; + public SQLCommand(String command, CommandType commandType) { + super(); + this.command = command; + this.commandType = commandType; + } + public String getCommand() { + return command; + } + public void setCommand(String command) { + this.command = command; + } + public CommandType getCommandType() { + return commandType; + } + public void setCommandType(CommandType commandType) { + this.commandType = commandType; + } + +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/StringUtils.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/StringUtils.java new file mode 100644 index 000000000..e9d772795 --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/StringUtils.java @@ -0,0 +1,32 @@ +package gauss.regress.jdbc.utils; + +public class StringUtils { + + /** + * Counts the number of Chinese characters in a string + * @param s input string + * @return number of Chinese characters + */ + public static int countHanCharacters(String s) { + int result = 0; + for (int i = 0; i < s.length(); ++i) { + int codepoint = s.codePointAt(i); + if (Character.UnicodeScript.of(codepoint) == Character.UnicodeScript.HAN) { + ++result; + } + } + return result; + } + + /** + * Get a string width for printing + * @param s input string + * @return the string width for printing + */ + public static int getStringWidth(String s) { + int result = s.length(); + result += countHanCharacters(s); + return result; + } + +} diff --git a/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/SyntaxError.java b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/SyntaxError.java new file mode 100644 index 000000000..1004f512f --- /dev/null +++ b/src/test/regress/jdbc_client/src/gauss/regress/jdbc/utils/SyntaxError.java @@ -0,0 +1,19 @@ +package gauss.regress.jdbc.utils; + +public class SyntaxError { + private String sqlLine = ""; + private int positionInLine = 0; + public String getSqlLine() { + return sqlLine; + } + public void setSqlLine(String sqlLine) { + this.sqlLine = sqlLine; + } + public int getPositionInLine() { + return positionInLine; + } + public void setPositionInLine(int positionInLine) { + this.positionInLine = positionInLine; + } + +} diff --git a/src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/timeseries/__init__.py b/src/test/regress/jdbc_client_lib/gsjdbc4.jar similarity index 100% rename from src/gausskernel/dbmind/tools/ai_server/app/monitor/algorithm/timeseries/__init__.py rename to src/test/regress/jdbc_client_lib/gsjdbc4.jar diff --git a/src/test/regress/jdbc_test/bypass_pbe/BypassPbe.java b/src/test/regress/jdbc_test/bypass_pbe/BypassPbe.java index bc8a39c3b..a71def0cf 100644 --- a/src/test/regress/jdbc_test/bypass_pbe/BypassPbe.java +++ b/src/test/regress/jdbc_test/bypass_pbe/BypassPbe.java @@ -1,368 +1,382 @@ -import java.io.FileInputStream; -import java.io.IOException; -import java.sql.*; -import java.util.*; -import java.text.SimpleDateFormat; -import org.postgresql.util.*; - -public -class BypassPbe { -public - static Connection GetConnection(String port) - { - String urls = "jdbc:postgresql://localhost:" + port + "/regression?prepareThreshold=0&loggerLevel=off"; - String driver = "org.postgresql.Driver"; - - Properties urlProps = new Properties(); - urlProps.setProperty("user", "tom"); - urlProps.setProperty("password", "tom@1234"); - - Connection conn = null; - try { - Class.forName(driver).newInstance(); - conn = DriverManager.getConnection(urls, urlProps); - System.out.println("Connection succeed!"); - } catch (Exception exception) { - exception.printStackTrace(); - return null; - } - - return conn; - }; - - -public - static void CreateTable(Connection conn) - { - Statement stmt = null; - try { - stmt = conn.createStatement(); - - int drc = stmt.executeUpdate("drop table if exists jdbcpbebypass ;"); - - int rc = stmt.executeUpdate("create table jdbcpbebypass(id int, class int, name text, score float);"); - - stmt.executeUpdate("create index on jdbcpbebypass(class);"); - - stmt.close(); - } catch (SQLException exception) { - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException exception1) { - exception1.printStackTrace(); - } - } - exception.printStackTrace(); - } - } - -public - static void InsertData(Connection conn) - { - Statement stmt = null; - PreparedStatement pstmt = null; - try { - String insertSql = "insert into jdbcpbebypass(id, class, name, score ) values (?,?,?,?);"; - - pstmt = conn.prepareStatement(insertSql); - for (int j = 0; j <= 6; j++) { - for (int i = 1; i <= 10; i++) { - pstmt.setInt(1, j * 10 + i); - pstmt.setInt(2, j); - pstmt.setString(3, "name" + (j * 10 + i)); - pstmt.setFloat(4, (float)Math.random() * 100 + 1); - int ans = pstmt.executeUpdate(); - } - } - pstmt.close(); - } catch (PSQLException exception2) { - if (pstmt != null) { - try { - System.out.println("insert again"); - pstmt.executeUpdate(); - } catch (SQLException exception1) { - exception1.printStackTrace(); - } - } - System.out.println("over"); - exception2.printStackTrace(); - } catch (SQLException exception) { - if (pstmt != null) { - try { - pstmt.close(); - } catch (SQLException exception1) { - exception1.printStackTrace(); - } - } - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException exception1) { - exception1.printStackTrace(); - } - } - exception.printStackTrace(); - } - } - -public - static void SelectData(Connection conn) - { - setFusion(conn); - - Statement stmt = null; - PreparedStatement pstmt = null; - PreparedStatement pstmt2 = null; - try { - String selectSql = "select name from jdbcpbebypass where class=?;"; - String selectSql2 = "select id from jdbcpbebypass where class=?;"; - - conn.setAutoCommit(false); - pstmt = conn.prepareStatement(selectSql); - pstmt2 = conn.prepareStatement(selectSql2); - pstmt.setFetchSize(3); - pstmt2.setFetchSize(3); - - pstmt.setInt(1, 1); - - ResultSet rs = pstmt.executeQuery(); // P1 B1 - int round = 0; - while (rs.next()) { // E1 E1 分了两次取结果,每次最多取3条 - System.err.println("name=" + rs.getString(1)); - System.err.println(); - round++; - if (round == 6) - break; - } - System.err.println("break of a resultset of pstmt1"); - - pstmt2.setInt(1, 3); - - round = 0; - ResultSet rs2 = pstmt2.executeQuery(); // P2 B2 - while (rs2.next()) { // E2E2 - System.err.println("id=" + rs2.getInt(1)); - System.err.println(); - round++; - if (round == 6) - break; - } - System.err.println("break of a resultset of pstmt2"); - - round = 0; - System.err.println("start E1E1"); - while (rs.next()) { // E1E1 - System.err.println("name=" + rs.getString(1)); - System.err.println(); - round++; - if (round == 4) - break; - } - System.err.println("end E1E1"); - - round = 0; - System.err.println("start E2E2"); - while (rs2.next()) { // E2E2 - System.err.println("id=" + rs2.getInt(1)); - System.err.println(); - round++; - if (round == 4) - break; - } - System.err.println("end E2E2"); - - pstmt.close(); - pstmt2.close(); - } catch (SQLException exception) { - if (pstmt != null) { - try { - pstmt.close(); - } catch (SQLException exception1) { - exception1.printStackTrace(); - } - } - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException exception1) { - exception1.printStackTrace(); - } - } - exception.printStackTrace(); - } - } - -public - static void UpdateData(Connection conn) - { - setFusion(conn); - - PreparedStatement pstmt = null; - try { - String selectSql = "update jdbcpbebypass set name='name_k' where class=?;"; - - conn.setAutoCommit(false); - pstmt = conn.prepareStatement(selectSql); - pstmt.setFetchSize(3); - pstmt.setInt(1, 1); - - int aff_row = pstmt.executeUpdate(); - System.err.println("aff_row=" + aff_row); - - pstmt.close(); - } catch (SQLException exception) { - if (pstmt != null) { - try { - pstmt.close(); - } catch (SQLException exception1) { - exception1.printStackTrace(); - } - } - exception.printStackTrace(); - } - } - - -public - static void SelectDataDirectly(Connection conn) - { - setFusion(conn); - Statement stmt = null; - - try { - stmt = conn.createStatement(); - - ResultSet rs = stmt.executeQuery("select * from jdbcpbebypass;"); // P B E - while (rs.next()) { - System.err.println("id=" + rs.getInt(1) + ",class=" + rs.getInt(2) + ",name=" + rs.getString(3)); - } - stmt.close(); - } catch (SQLException exception) { - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException exception1) { - exception1.printStackTrace(); - } - } - exception.printStackTrace(); - } - } - - static void setFusion(Connection conn) - { - Statement stmt = null; - - try { - stmt = conn.createStatement(); - stmt.executeUpdate("set enable_bitmapscan=off;"); - stmt.executeUpdate("set enable_seqscan=off;"); - stmt.executeUpdate("set enable_opfusion=on;"); - } catch (SQLException exception) { - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException exception1) { - exception1.printStackTrace(); - } - } - exception.printStackTrace(); - } - } - -public static void SelectDataDataRow(Connection conn) { - Statement stmt = null; - PreparedStatement pstmt = null; - PreparedStatement pstmt2 = null; - try{ - stmt = conn.createStatement(); - conn.setAutoCommit(false); - int rc = stmt.executeUpdate("create table t1(c1 int primary key, c2 int, c3 int);"); - rc = stmt.executeUpdate("insert into t1 select i,i,i from generate_series(1,100) i;"); - rc = stmt.executeUpdate("set enable_bitmapscan =off;"); // P B E - rc = stmt.executeUpdate("set enable_seqscan =off;"); // P B E - String selectSql = "select * from t1 where c1>? limit 2;"; - pstmt = conn.prepareStatement(selectSql); - pstmt.setMaxRows(2); - pstmt.setInt(1,1); - ResultSet rs = pstmt.executeQuery(); // P1 B1 E1 - int round =0; - while(rs.next()){ //E1 E1 E1 - System.err.println("c2="+rs.getInt(1)); - System.err.println(); - round++; - } - conn.commit(); - System.err.println("break of a resultset of pstmt1"); - round = 0; - pstmt.setMaxRows(2); - pstmt.setInt(1,1); - rs = pstmt.executeQuery(); - while(rs.next()) { - System.err.println("c2="+rs.getInt(1));System.err.println(); - } - System.err.println("break of a resultset of pstmt1"); - round = 0; - pstmt.setMaxRows(2); - pstmt.setInt(1,1); - rs = pstmt.executeQuery(); - while(rs.next()) { - System.err.println("c2="+rs.getInt(1));System.err.println(); - } - System.err.println("break of a resultset of pstmt1"); - round = 0; - while(rs.next()) { // - System.err.println("c2="+rs.getInt(1)); - System.err.println(); - round++; - } - System.err.println("end of a resultset"); - rc = stmt.executeUpdate("drop table t1;"); - pstmt.close(); - } catch (SQLException e) { - if (pstmt != null) { - try { - pstmt.close(); - } catch (SQLException e1) { - e1.printStackTrace(); - } - - } - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException e1) { - e1.printStackTrace(); - } - } - e.printStackTrace(); - } - } - - - -public - static void main(String[] args) - { - String PORT = args[0]; - Connection conn = GetConnection(PORT); - - if (conn == null) { - System.out.println("connection failed"); - return; - } - CreateTable(conn); - InsertData(conn); - SelectData(conn); - UpdateData(conn); - SelectDataDirectly(conn); - SelectDataDataRow(conn); - try { - conn.close(); - System.out.println("close connection"); - } catch (SQLException exception) { - exception.printStackTrace(); - } - } -} +import java.io.FileInputStream; +import java.io.IOException; +import java.sql.*; +import java.util.*; +import java.text.SimpleDateFormat; +import org.postgresql.util.*; + +public +class BypassPbe { +public + static Connection GetConnection(String port) + { + String urls = "jdbc:postgresql://localhost:" + port + "/regression?prepareThreshold=0&loggerLevel=off"; + String driver = "org.postgresql.Driver"; + + Properties urlProps = new Properties(); + urlProps.setProperty("user", "tom"); + urlProps.setProperty("password", "tom@1234"); + + Connection conn = null; + try { + Class.forName(driver).newInstance(); + conn = DriverManager.getConnection(urls, urlProps); + System.out.println("Connection succeed!"); + } catch (Exception exception) { + exception.printStackTrace(); + return null; + } + + return conn; + }; + + +public + static void CreateTable(Connection conn) + { + Statement stmt = null; + try { + stmt = conn.createStatement(); + + int drc = stmt.executeUpdate("drop table if exists jdbcpbebypass ;"); + + int rc = stmt.executeUpdate("create table jdbcpbebypass(id int, class int, name text, score float);"); + + stmt.executeUpdate("create index on jdbcpbebypass(class);"); + + stmt.close(); + } catch (SQLException exception) { + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException exception1) { + exception1.printStackTrace(); + } + } + exception.printStackTrace(); + } + } + +public + static void InsertData(Connection conn) + { + Statement stmt = null; + PreparedStatement pstmt = null; + try { + String insertSql = "insert into jdbcpbebypass(id, class, name, score ) values (?,?,?,?);"; + + pstmt = conn.prepareStatement(insertSql); + for (int j = 0; j <= 6; j++) { + for (int i = 1; i <= 10; i++) { + pstmt.setInt(1, j * 10 + i); + pstmt.setInt(2, j); + pstmt.setString(3, "name" + (j * 10 + i)); + pstmt.setFloat(4, (float)Math.random() * 100 + 1); + int ans = pstmt.executeUpdate(); + } + } + pstmt.close(); + } catch (PSQLException exception2) { + if (pstmt != null) { + try { + System.out.println("insert again"); + pstmt.executeUpdate(); + } catch (SQLException exception1) { + exception1.printStackTrace(); + } + } + System.out.println("over"); + exception2.printStackTrace(); + } catch (SQLException exception) { + if (pstmt != null) { + try { + pstmt.close(); + } catch (SQLException exception1) { + exception1.printStackTrace(); + } + } + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException exception1) { + exception1.printStackTrace(); + } + } + exception.printStackTrace(); + } + } + +public + static void SelectData(Connection conn) + { + setFusion(conn); + + Statement stmt = null; + PreparedStatement pstmt = null; + PreparedStatement pstmt2 = null; + PreparedStatement pstmt3 = null; + try { + String selectSql = "select name from jdbcpbebypass where class=?;"; + String selectSql2 = "select id from jdbcpbebypass where class=?;"; + String selectSql3 = "select name from jdbcpbebypass where class=? offset 1 limit 10;"; + + conn.setAutoCommit(false); + pstmt = conn.prepareStatement(selectSql); + pstmt2 = conn.prepareStatement(selectSql2); + pstmt3 = conn.prepareStatement(selectSql2); + pstmt.setFetchSize(3); + pstmt2.setFetchSize(3); + pstmt3.setFetchSize(2); + pstmt.setInt(1, 1); + + ResultSet rs = pstmt.executeQuery(); // P1 B1 + int round = 0; + while (rs.next()) { // E1 E1 分了两次取结果,每次最多取3条 + System.err.println("name=" + rs.getString(1)); + System.err.println(); + round++; + if (round == 6) + break; + } + System.err.println("break of a resultset of pstmt1"); + + pstmt2.setInt(1, 3); + + round = 0; + ResultSet rs2 = pstmt2.executeQuery(); // P2 B2 + while (rs2.next()) { // E2E2 + System.err.println("id=" + rs2.getInt(1)); + System.err.println(); + round++; + if (round == 6) + break; + } + System.err.println("break of a resultset of pstmt2"); + + round = 0; + System.err.println("start E1E1"); + while (rs.next()) { // E1E1 + System.err.println("name=" + rs.getString(1)); + System.err.println(); + round++; + if (round == 4) + break; + } + System.err.println("end E1E1"); + + round = 0; + System.err.println("start E2E2"); + while (rs2.next()) { // E2E2 + System.err.println("id=" + rs2.getInt(1)); + System.err.println(); + round++; + if (round == 4) + break; + } + System.err.println("end E2E2"); + System.err.println("start OFFSET 1 LIMIT 10 E2"); + round = 0; + ResultSet rs3 = pstmt2.executeQuery(); + while (rs3.next()) { + System.err.println("name=" + rs.getString(1)); + System.err.println(); + round++; + if (round == 10) + break; + } + System.err.println("end OFFSET 1 LIMIT 10 E2"); + pstmt.close(); + pstmt2.close(); + pstmt3.close(); + } catch (SQLException exception) { + if (pstmt != null) { + try { + pstmt.close(); + } catch (SQLException exception1) { + exception1.printStackTrace(); + } + } + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException exception1) { + exception1.printStackTrace(); + } + } + exception.printStackTrace(); + } + } + +public + static void UpdateData(Connection conn) + { + setFusion(conn); + + PreparedStatement pstmt = null; + try { + String selectSql = "update jdbcpbebypass set name='name_k' where class=?;"; + + conn.setAutoCommit(false); + pstmt = conn.prepareStatement(selectSql); + pstmt.setFetchSize(3); + pstmt.setInt(1, 1); + + int aff_row = pstmt.executeUpdate(); + System.err.println("aff_row=" + aff_row); + + pstmt.close(); + } catch (SQLException exception) { + if (pstmt != null) { + try { + pstmt.close(); + } catch (SQLException exception1) { + exception1.printStackTrace(); + } + } + exception.printStackTrace(); + } + } + + +public + static void SelectDataDirectly(Connection conn) + { + setFusion(conn); + Statement stmt = null; + + try { + stmt = conn.createStatement(); + + ResultSet rs = stmt.executeQuery("select * from jdbcpbebypass;"); // P B E + while (rs.next()) { + System.err.println("id=" + rs.getInt(1) + ",class=" + rs.getInt(2) + ",name=" + rs.getString(3)); + } + stmt.close(); + } catch (SQLException exception) { + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException exception1) { + exception1.printStackTrace(); + } + } + exception.printStackTrace(); + } + } + + static void setFusion(Connection conn) + { + Statement stmt = null; + + try { + stmt = conn.createStatement(); + stmt.executeUpdate("set enable_bitmapscan=off;"); + stmt.executeUpdate("set enable_seqscan=off;"); + stmt.executeUpdate("set enable_opfusion=on;"); + } catch (SQLException exception) { + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException exception1) { + exception1.printStackTrace(); + } + } + exception.printStackTrace(); + } + } + +public static void SelectDataDataRow(Connection conn) { + Statement stmt = null; + PreparedStatement pstmt = null; + PreparedStatement pstmt2 = null; + try{ + stmt = conn.createStatement(); + conn.setAutoCommit(false); + int rc = stmt.executeUpdate("create table t1(c1 int primary key, c2 int, c3 int);"); + rc = stmt.executeUpdate("insert into t1 select i,i,i from generate_series(1,100) i;"); + rc = stmt.executeUpdate("set enable_bitmapscan =off;"); // P B E + rc = stmt.executeUpdate("set enable_seqscan =off;"); // P B E + String selectSql = "select * from t1 where c1>? limit 2;"; + pstmt = conn.prepareStatement(selectSql); + pstmt.setMaxRows(2); + pstmt.setInt(1,1); + ResultSet rs = pstmt.executeQuery(); // P1 B1 E1 + int round =0; + while(rs.next()){ //E1 E1 E1 + System.err.println("c2="+rs.getInt(1)); + System.err.println(); + round++; + } + conn.commit(); + System.err.println("break of a resultset of pstmt1"); + round = 0; + pstmt.setMaxRows(2); + pstmt.setInt(1,1); + rs = pstmt.executeQuery(); + while(rs.next()) { + System.err.println("c2="+rs.getInt(1));System.err.println(); + } + System.err.println("break of a resultset of pstmt1"); + round = 0; + pstmt.setMaxRows(2); + pstmt.setInt(1,1); + rs = pstmt.executeQuery(); + while(rs.next()) { + System.err.println("c2="+rs.getInt(1));System.err.println(); + } + System.err.println("break of a resultset of pstmt1"); + round = 0; + while(rs.next()) { // + System.err.println("c2="+rs.getInt(1)); + System.err.println(); + round++; + } + System.err.println("end of a resultset"); + rc = stmt.executeUpdate("drop table t1;"); + pstmt.close(); + } catch (SQLException e) { + if (pstmt != null) { + try { + pstmt.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + + } + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } + e.printStackTrace(); + } + } + + + +public + static void main(String[] args) + { + String PORT = args[0]; + Connection conn = GetConnection(PORT); + + if (conn == null) { + System.out.println("connection failed"); + return; + } + CreateTable(conn); + InsertData(conn); + SelectData(conn); + UpdateData(conn); + SelectDataDirectly(conn); + SelectDataDataRow(conn); + try { + conn.close(); + System.out.println("close connection"); + } catch (SQLException exception) { + exception.printStackTrace(); + } + } +} diff --git a/src/test/regress/make_check_postgresql_2pc.conf b/src/test/regress/make_check_postgresql_2pc.conf index 0f0a5f270..34ec8847b 100644 --- a/src/test/regress/make_check_postgresql_2pc.conf +++ b/src/test/regress/make_check_postgresql_2pc.conf @@ -5,3 +5,4 @@ enable_gtm_free = on enable_thread_pool = on comm_stat_mode = off enable_twophase_commit = on +enable_global_syscache = on diff --git a/src/test/regress/make_fastcheck_postgresql.conf b/src/test/regress/make_fastcheck_postgresql.conf index cec0ae637..644855f8f 100644 --- a/src/test/regress/make_fastcheck_postgresql.conf +++ b/src/test/regress/make_fastcheck_postgresql.conf @@ -31,6 +31,7 @@ uncontrolled_memory_context='HashCacheContext,TupleHashTable,TupleSort,AggContex enable_thread_pool = on enable_default_cfunc_libpath = off enable_stateless_pooler_reuse = on +enable_ustore = on wal_level = logical -undo_zone_count = 1000 sql_beta_feature = 'a_style_coerce' +enable_global_syscache = on diff --git a/src/test/regress/make_fastcheck_single_mot_postgresql.conf b/src/test/regress/make_fastcheck_single_mot_postgresql.conf index f1619f773..ebe5efa18 100644 --- a/src/test/regress/make_fastcheck_single_mot_postgresql.conf +++ b/src/test/regress/make_fastcheck_single_mot_postgresql.conf @@ -28,4 +28,4 @@ enable_thread_pool = on enable_incremental_checkpoint = false enable_double_write = off password_encryption_type = 1 - +enable_global_syscache = on diff --git a/src/test/regress/make_redocheck_postgresql.conf b/src/test/regress/make_redocheck_postgresql.conf index 89a90d2f6..4e2f84ea6 100644 --- a/src/test/regress/make_redocheck_postgresql.conf +++ b/src/test/regress/make_redocheck_postgresql.conf @@ -15,3 +15,4 @@ checkpoint_timeout = 10min gtm_conn_check_interval = 10 enable_absolute_tablespace = true enable_thread_pool = on +enable_global_syscache = on diff --git a/src/test/regress/make_wlmcheck_postgresql.conf b/src/test/regress/make_wlmcheck_postgresql.conf index ff060e54e..33e425340 100644 --- a/src/test/regress/make_wlmcheck_postgresql.conf +++ b/src/test/regress/make_wlmcheck_postgresql.conf @@ -24,3 +24,4 @@ resource_track_cost=10 resource_track_duration=0 resource_track_level='operator' enable_thread_pool = on +enable_global_syscache = on diff --git a/src/test/regress/obstools/Makefile b/src/test/regress/obstools/Makefile index 7c49c7fd9..1216af835 100644 --- a/src/test/regress/obstools/Makefile +++ b/src/test/regress/obstools/Makefile @@ -6,7 +6,7 @@ endif EXTRA_LIB_DIR=-L../lib EXTRA_INCLUDE_DIR=-I../include -LIB=-leSDKOBS -lsecurec +LIB=-leSDKOBS -l$(SECURE_C_CHECK) LINKOPT=$(EXTRA_LIB_DIR) $(LIB) TARGET_TOOL=obstool diff --git a/src/test/regress/output/area.source b/src/test/regress/output/area.source new file mode 100644 index 000000000..ef50cef10 --- /dev/null +++ b/src/test/regress/output/area.source @@ -0,0 +1,78 @@ +CREATE TABLE area_example1(id SERIAL primary key, somedata int, text varchar(120))with(storage_type = ustore); +NOTICE: CREATE TABLE will create implicit sequence "area_example1_id_seq" for serial column "area_example1.id" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "area_example1_pkey" for table "area_example1" +CREATE TABLE area_example2(a int primary key,b int,c int); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "area_example2_pkey" for table "area_example2" +CREATE TABLE area_example3(a int,b int,c int); +CREATE OR REPLACE function decode_area_proc(plugin text) returns SETOF text +LANGUAGE plpgsql +AS +$$ +declare o_ret text; + my_sql text; + param1 text; +begin +truncate table area_example1; +truncate table area_example2; +truncate table area_example3; +EXECUTE('SELECT pg_current_xlog_location();') into o_ret; +INSERT INTO area_example1(somedata, text) VALUES (1, 1); +INSERT INTO area_example1(somedata, text) VALUES (1, 2); +update area_example1 set somedata=somedata*10 where somedata=1; +delete from area_example1 where somedata=10; + +INSERT INTO area_example2 VALUES (1, 1, 1); +INSERT INTO area_example2 VALUES (2, 2, 2); +update area_example2 set b=b*10 where a=1; +delete from area_example2 where c=1; + +INSERT INTO area_example3 VALUES (1, 1, 1); +INSERT INTO area_example3 VALUES (2, 2, 2); +update area_example3 set b=b*10 where a=1; +delete from area_example3 where c=1; +my_sql = 'select data from pg_logical_get_area_changes(''' || o_ret || ''',NULL,NULL,''' || plugin || ''',NULL);'; +return query EXECUTE(my_sql); +END; +$$ +; +call decode_area_proc('mppdb_decoding'); + decode_area_proc +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + {"table_name":"public.area_example1","op_type":"INSERT","columns_name":["id","somedata","text"],"columns_type":["integer","integer","character varying"],"columns_val":["1","1","'1'"],"old_keys_name":[],"old_keys_type":[],"old_keys_val":[]} + {"table_name":"public.area_example1","op_type":"INSERT","columns_name":["id","somedata","text"],"columns_type":["integer","integer","character varying"],"columns_val":["2","1","'2'"],"old_keys_name":[],"old_keys_type":[],"old_keys_val":[]} + {"table_name":"public.area_example1","op_type":"UPDATE","columns_name":["id","somedata","text"],"columns_type":["integer","integer","character varying"],"columns_val":["1","10","'1'"],"old_keys_name":["id","somedata","text"],"old_keys_type":["integer","integer","character varying"],"old_keys_val":["1","1","'1'"]} + {"table_name":"public.area_example1","op_type":"UPDATE","columns_name":["id","somedata","text"],"columns_type":["integer","integer","character varying"],"columns_val":["2","10","'2'"],"old_keys_name":["id","somedata","text"],"old_keys_type":["integer","integer","character varying"],"old_keys_val":["2","1","'2'"]} + {"table_name":"public.area_example1","op_type":"DELETE","columns_name":[],"columns_type":[],"columns_val":[],"old_keys_name":["id","somedata","text"],"old_keys_type":["integer","integer","character varying"],"old_keys_val":["1","10","'1'"]} + {"table_name":"public.area_example1","op_type":"DELETE","columns_name":[],"columns_type":[],"columns_val":[],"old_keys_name":["id","somedata","text"],"old_keys_type":["integer","integer","character varying"],"old_keys_val":["2","10","'2'"]} + {"table_name":"public.area_example2","op_type":"INSERT","columns_name":["a","b","c"],"columns_type":["integer","integer","integer"],"columns_val":["1","1","1"],"old_keys_name":[],"old_keys_type":[],"old_keys_val":[]} + {"table_name":"public.area_example2","op_type":"INSERT","columns_name":["a","b","c"],"columns_type":["integer","integer","integer"],"columns_val":["2","2","2"],"old_keys_name":[],"old_keys_type":[],"old_keys_val":[]} + {"table_name":"public.area_example2","op_type":"UPDATE","columns_name":["a","b","c"],"columns_type":["integer","integer","integer"],"columns_val":["1","10","1"],"old_keys_name":["a"],"old_keys_type":["integer"],"old_keys_val":["1"]} + {"table_name":"public.area_example2","op_type":"DELETE","columns_name":[],"columns_type":[],"columns_val":[],"old_keys_name":["a"],"old_keys_type":["integer"],"old_keys_val":["1"]} + {"table_name":"public.area_example3","op_type":"INSERT","columns_name":["a","b","c"],"columns_type":["integer","integer","integer"],"columns_val":["1","1","1"],"old_keys_name":[],"old_keys_type":[],"old_keys_val":[]} + {"table_name":"public.area_example3","op_type":"INSERT","columns_name":["a","b","c"],"columns_type":["integer","integer","integer"],"columns_val":["2","2","2"],"old_keys_name":[],"old_keys_type":[],"old_keys_val":[]} + {"table_name":"public.area_example3","op_type":"UPDATE","columns_name":["a","b","c"],"columns_type":["integer","integer","integer"],"columns_val":["1","10","1"],"old_keys_name":[],"old_keys_type":[],"old_keys_val":[]} + {"table_name":"public.area_example3","op_type":"DELETE","columns_name":[],"columns_type":[],"columns_val":[],"old_keys_name":[],"old_keys_type":[],"old_keys_val":[]} +(14 rows) + +call decode_area_proc('sql_decoding'); + decode_area_proc +------------------------------------------------------------------------------------------------------------------------------------- + insert into public.area_example1 values (3, 1, '1'); + insert into public.area_example1 values (4, 1, '2'); + delete from public.area_example1 where id = 3 and somedata = 1 and text = '1';insert into public.area_example1 values (3, 10, '1'); + delete from public.area_example1 where id = 4 and somedata = 1 and text = '2';insert into public.area_example1 values (4, 10, '2'); + delete from public.area_example1 where id = 3 and somedata = 10 and text = '1'; + delete from public.area_example1 where id = 4 and somedata = 10 and text = '2'; + insert into public.area_example2 values (1, 1, 1); + insert into public.area_example2 values (2, 2, 2); + delete from public.area_example2 where a = 1;insert into public.area_example2 values (1, 10, 1); + delete from public.area_example2 where a = 1; + insert into public.area_example3 values (1, 1, 1); + insert into public.area_example3 values (2, 2, 2); + delete from public.area_example3 where (no-tuple-data);insert into public.area_example3 values (1, 10, 1); + delete from public.area_example3 where (no-tuple-data); +(14 rows) + +drop table area_example1; +drop table area_example2; +drop table area_example3; diff --git a/src/test/regress/output/bulkload_compatibility_test.source b/src/test/regress/output/bulkload_compatibility_test.source index a054fbad6..20dbb213c 100644 --- a/src/test/regress/output/bulkload_compatibility_test.source +++ b/src/test/regress/output/bulkload_compatibility_test.source @@ -144,7 +144,7 @@ CREATE FOREIGN TABLE EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE C_NULL BIGINT POSITION(30,0), C_VARCHAR VARCHAR(50) POSITION(30,30), C_NUMERIC NUMERIC(20,5) POSITION(60,10) -)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_DTS2016060600832.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); +)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_TESTTABLE.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); WARNING: The above Read-Only foreign table is using FIXED mode without specifying 'fix' option. HINT: Please use 'fix' option to specify expected fixed record length in order to parser the data file correctly. SELECT * FROM EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; @@ -176,7 +176,7 @@ CREATE FOREIGN TABLE EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE C_CLOB CLOB POSITION(102,29), C_NUMERIC NUMERIC(20,5) POSITION(131,14), C_DP DOUBLE PRECISION POSITION(145,14) -)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_DTS2016060600832_all.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); +)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_TESTTABLE_all.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); WARNING: The above Read-Only foreign table is using FIXED mode without specifying 'fix' option. HINT: Please use 'fix' option to specify expected fixed record length in order to parser the data file correctly. SELECT COUNT(*) FROM EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; diff --git a/src/test/regress/output/bulkload_compatibility_test_part1.source b/src/test/regress/output/bulkload_compatibility_test_part1.source index 44d2e091c..02864cf0e 100644 --- a/src/test/regress/output/bulkload_compatibility_test_part1.source +++ b/src/test/regress/output/bulkload_compatibility_test_part1.source @@ -144,7 +144,7 @@ CREATE FOREIGN TABLE EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE C_NULL BIGINT POSITION(30,0), C_VARCHAR VARCHAR(50) POSITION(30,30), C_NUMERIC NUMERIC(20,5) POSITION(60,10) -)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_DTS2016060600832.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); +)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_TESTTABLE.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); WARNING: The above Read-Only foreign table is using FIXED mode without specifying 'fix' option. HINT: Please use 'fix' option to specify expected fixed record length in order to parser the data file correctly. SELECT * FROM EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; @@ -176,7 +176,7 @@ CREATE FOREIGN TABLE EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE C_CLOB CLOB POSITION(102,29), C_NUMERIC NUMERIC(20,5) POSITION(131,14), C_DP DOUBLE PRECISION POSITION(145,14) -)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_DTS2016060600832_all.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); +)SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://127.0.0.1:8900/bulkload_compatible_illegal_chars_test_data/invalid_char_for_just_char_type_fixed_format_TESTTABLE_all.data',FORMAT 'FIXED',MODE 'Normal',COMPATIBLE_ILLEGAL_CHARS 'TRUE',ENCODING 'UTF8'); WARNING: The above Read-Only foreign table is using FIXED mode without specifying 'fix' option. HINT: Please use 'fix' option to specify expected fixed record length in order to parser the data file correctly. SELECT COUNT(*) FROM EXT_COMPATIBLE_ILLEGAL_CHARS_TEST_JUST_CHAR_TYPE; diff --git a/src/test/regress/output/bypass_pbe.source b/src/test/regress/output/bypass_pbe.source index ce1ea20eb..209335977 100644 --- a/src/test/regress/output/bypass_pbe.source +++ b/src/test/regress/output/bypass_pbe.source @@ -51,6 +51,28 @@ id=32 id=31 end E2E2 +start OFFSET 1 LIMIT 10 E2 +name=name11 + +name=name11 + +name=name11 + +name=name11 + +name=name11 + +name=name11 + +name=name11 + +name=name11 + +name=name11 + +name=name11 + +end OFFSET 1 LIMIT 10 E2 aff_row=10 id=1,class=0,name=name1 id=2,class=0,name=name2 diff --git a/src/test/regress/output/ce_copy.source b/src/test/regress/output/ce_copy.source new file mode 100644 index 000000000..f1c38e352 --- /dev/null +++ b/src/test/regress/output/ce_copy.source @@ -0,0 +1,148 @@ +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +DROP CLIENT MASTER KEY IF EXISTS copyCMK CASCADE; +NOTICE: client master key "copycmk" does not exist +CREATE CLIENT MASTER KEY copyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY copyCEK WITH VALUES (CLIENT_MASTER_KEY = copyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS CopyFromTbl(i0 INT, i1 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = copyCEK, ENCRYPTION_TYPE = DETERMINISTIC) , i2 INT); +COPY copyfromtbl FROM stdin; +-- fail error +COPY copyfromtbl FROM stdin; +ERROR: extra data after last expected column +--?CONTEXT: COPY copyfromtbl, line 1: .* +SELECT * FROM CopyFromTbl ORDER BY i0; + i0 | i1 | i2 +----+----+---- + 5 | 10 | 7 + 20 | 20 | 8 + 30 | 10 | 12 + 50 | 35 | 12 + 80 | 15 | 23 +(5 rows) + +COPY copyfromtbl (i0, i1,i2) FROM stdin; +SELECT * FROM CopyFromTbl ORDER BY i0; + i0 | i1 | i2 +----+----+---- + 5 | 10 | 7 + 5 | 10 | 7 + 20 | 20 | 8 + 20 | 20 | 8 + 30 | 10 | 12 + 30 | 10 | 12 + 50 | 35 | 12 + 50 | 35 | 12 + 80 | 15 | 23 + 80 | 15 | 23 +(10 rows) + +-- false +COPY copyfromtbl (i0, i1,i2) FROM stdin; +COPY copyfromtbl (i0, i1,i2) FROM stdin; +ERROR: COPY from stdin failed: aborted because of read failure +CONTEXT: COPY copyfromtbl, line 1 +\. +invalid command \. +\copy copyfromtbl (i0, i1,i2) FROM stdin; +\copy copyfromtbl (i0, i1,i2) FROM stdin; +ERROR: COPY from stdin failed: aborted because of read failure +CONTEXT: COPY copyfromtbl, line 1 +\. +invalid command \. +\copy CopyFromTbl FROM '@abs_srcdir@/data/ce_copy_from.csv' WITH DELIMITER ',' CSV HEADER; +SELECT * FROM CopyFromTbl ORDER BY i0; + i0 | i1 | i2 +---------+------+---- + 5 | 10 | 7 + 5 | 10 | 7 + 5 | 12 | 7 + 5 | 12 | 7 + 20 | 20 | 8 + 20 | 20 | 8 + 30 | 10 | 12 + 30 | 10 | 12 + 50 | 35 | 12 + 50 | 35 | 12 + 80 | 15 | 23 + 80 | 15 | 23 + 2450812 | 1388 | 23 + 2450835 | 1393 | 21 + 2450845 | 1399 | 22 + 2450855 | 1400 | 23 +(16 rows) + +\copy (SELECT * FROM CopyFromTbl ORDER BY i2) TO '@abs_srcdir@/data/ce_copy_to.csv' WITH DELIMITER ',' CSV HEADER; +copy CopyFromTbl FROM '@abs_srcdir@/data/ce_copy_from.csv' WITH DELIMITER ',' CSV HEADER; +ERROR(CLIENT): column encryption does't support copy from server file to table +copy CopyFromTbl (i0, i1,i2) FROM '@abs_srcdir@/data/ce_copy_from.csv' WITH DELIMITER ',' CSV HEADER; +ERROR(CLIENT): column encryption does't support copy from server file to table +copy CopyFromTbl TO '@abs_srcdir@/data/ce_copy_to.csv' WITH DELIMITER ',' CSV HEADER; +ERROR(CLIENT): column encryption does't support copy from server file to table +copy (SELECT * FROM CopyFromTbl ORDER BY i2) TO '@abs_srcdir@/data/ce_copy_to.csv' WITH DELIMITER ',' CSV HEADER; +ERROR(CLIENT): column encryption does't support copy from server file to table +CREATE TABLE IF NOT EXISTS CopyTOTbl(i0 INT, i1 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=copyCEK, ENCRYPTION_TYPE = DETERMINISTIC) , i2 INT); +\copy CopyToTbl FROM '@abs_srcdir@/data/ce_copy_to.csv' WITH DELIMITER ',' CSV HEADER; +SELECT * FROM CopyToTbl ORDER BY i0; + i0 | i1 | i2 +---------+------+---- + 5 | 10 | 7 + 5 | 10 | 7 + 5 | 12 | 7 + 5 | 12 | 7 + 20 | 20 | 8 + 20 | 20 | 8 + 30 | 10 | 12 + 30 | 10 | 12 + 50 | 35 | 12 + 50 | 35 | 12 + 80 | 15 | 23 + 80 | 15 | 23 + 2450812 | 1388 | 23 + 2450835 | 1393 | 21 + 2450845 | 1399 | 22 + 2450855 | 1400 | 23 +(16 rows) + +COPY (SELECT * FROM CopyFromTbl ORDER BY i0) TO stdout; +5 10 7 +5 10 7 +5 12 7 +5 12 7 +20 20 8 +20 20 8 +30 10 12 +30 10 12 +50 35 12 +50 35 12 +80 15 23 +80 15 23 +2450812 1388 23 +2450835 1393 21 +2450845 1399 22 +2450855 1400 23 +DROP TABLE IF EXISTS encrypted_tb; +NOTICE: table "encrypted_tb" does not exist, skipping +create table encrypted_tb( inv_date_sk integer not null,inv_item_sk integer not null, inv_warehouse_sk integer encrypted with (column_encryption_key = copyCEK, encryption_type = DETERMINISTIC)); +\copy encrypted_tb FROM '@abs_srcdir@/data/ce_copy_from.csv' WITH (delimiter',',IGNORE_EXTRA_DATA 'on'); +select * FROM encrypted_tb ORDER BY inv_date_sk; + inv_date_sk | inv_item_sk | inv_warehouse_sk +-------------+-------------+------------------ + 2450811 | 1382 | 24 + 2450812 | 1388 | 23 + 2450835 | 1393 | 21 + 2450845 | 1399 | 22 + 2450855 | 1400 | 23 +(5 rows) + +DROP TABLE IF EXISTS encrypted_tb; +DROP TABLE IF EXISTS CopyFromTbl; +DROP TABLE IF EXISTS CopyToTbl; +DROP CLIENT MASTER KEY copyCMK CASCADE; +NOTICE: drop cascades to column encryption key: copycek +\! gs_ktool -d all +DELETE ALL + 1 diff --git a/src/test/regress/output/ce_create_jdbc.source b/src/test/regress/output/ce_create_jdbc.source new file mode 100644 index 000000000..95ec96ba8 --- /dev/null +++ b/src/test/regress/output/ce_create_jdbc.source @@ -0,0 +1,39 @@ +DROP USER IF EXISTS test CASCADE; +CREATE USER test WITH CREATEDB PASSWORD "Gauss@123"; +SET ROLE test PASSWORD 'Gauss@123'; +\! gs_ktool -d all +\! gs_ktool -g +\! gs_ktool -g + +\! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/jdbc_client_lib/gsjdbc4.jar:@abs_builddir@/jdbc_ce_test/ce_ddl_pbe/. ClientEncryptionCreateTest @portstring@ > /dev/null 2>&1 +INSERT INTO ce_t1 VALUES(1); +INSERT INTO ce_t2 VALUES(1); +INSERT INTO ce_t3 VALUES(1); +INSERT INTO ce_t4 VALUES(1); +INSERT INTO ce_t5 VALUES(1); +INSERT INTO ce_t6 VALUES(1); +SELECT * FROM ce_t1; +SELECT * FROM ce_t2; +SELECT * FROM ce_t3; +SELECT * FROM ce_t4; +SELECT * FROM ce_t5; +SELECT * FROM ce_t6; +DROP TABLE IF EXISTS ce_t1; +DROP TABLE IF EXISTS ce_t2; +DROP TABLE IF EXISTS ce_t3; +DROP TABLE IF EXISTS ce_t4; +DROP TABLE IF EXISTS ce_t5; +DROP TABLE IF EXISTS ce_t6; + +DROP COLUMN ENCRYPTION KEY ImgCEK1; +DROP COLUMN ENCRYPTION KEY ImgCEK; +DROP CLIENT MASTER KEY ImgCMK1 CASCADE; +DROP CLIENT MASTER KEY ImgCMK CASCADE; + +select count(*), 'count' FROM gs_client_global_keys; +select count(*), 'count' FROM gs_column_keys; +SELECT count(*), 'count' FROM gs_encrypted_columns; +\! gs_ktool -d all + +RESET ROLE; +DROP USER IF EXISTS test CASCADE; diff --git a/src/test/regress/output/ce_fetchsize_jdbc.source b/src/test/regress/output/ce_fetchsize_jdbc.source new file mode 100644 index 000000000..3aa5f685a --- /dev/null +++ b/src/test/regress/output/ce_fetchsize_jdbc.source @@ -0,0 +1,57 @@ +DROP USER IF EXISTS test CASCADE; +NOTICE: role "test" does not exist, skipping +CREATE USER test WITH CREATEDB PASSWORD "Gauss@123"; +SET ROLE test PASSWORD 'Gauss@123'; +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +\! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/jdbc_client_lib/gsjdbc4.jar:@abs_builddir@/jdbc_ce_test/ce_ddl_pbe/. ClientEncryptionFetchSizeTest @portstring@ > /dev/null 2>&1 +SELECT global_key_name, key_namespace, key_owner FROM gs_client_global_keys; + global_key_name | key_namespace | key_owner +-----------------+---------------+----------- +--?.* +(1 row) + +SELECT column_key_name, key_namespace, key_owner FROM gs_column_keys; + column_key_name | key_namespace | key_owner +-----------------+---------------+----------- +--?.* +(1 row) + +SELECT column_name, encryption_type, data_type_original_oid FROM gs_encrypted_columns; + column_name | encryption_type | data_type_original_oid +-------------+-----------------+------------------------ +--?.* +(1 row) + +DROP TABLE IF EXISTS fetchsize_tab; +DROP TABLE IF EXISTS sqlbypassfetchsize_tab; +NOTICE: table "sqlbypassfetchsize_tab" does not exist, skipping +DROP CLIENT MASTER KEY FetchSizeCMK CASCADE; +NOTICE: drop cascades to column encryption key: fetchsizecek +select count(*), 'count' FROM gs_client_global_keys; + count | ?column? +-------+---------- + 0 | count +(1 row) + +select count(*), 'count' FROM gs_column_keys; + count | ?column? +-------+---------- + 0 | count +(1 row) + +SELECT count(*), 'count' FROM gs_encrypted_columns; + count | ?column? +-------+---------- + 0 | count +(1 row) + +\! gs_ktool -d all +DELETE ALL + 1 +RESET ROLE; +DROP USER IF EXISTS test CASCADE; diff --git a/src/test/regress/output/ce_mul_query_jdbc.source b/src/test/regress/output/ce_mul_query_jdbc.source new file mode 100644 index 000000000..28190a967 --- /dev/null +++ b/src/test/regress/output/ce_mul_query_jdbc.source @@ -0,0 +1,59 @@ +DROP USER IF EXISTS test CASCADE; +NOTICE: role "test" does not exist, skipping +CREATE USER test WITH CREATEDB PASSWORD "Gauss@123"; +SET ROLE test PASSWORD 'Gauss@123'; +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +\! gs_ktool -g +GENERATE +2 +\! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/jdbc_client_lib/gsjdbc4.jar:@abs_builddir@/jdbc_ce_test/ce_ddl_pbe/. ClientEncryptionMulSql @portstring@ > /dev/null 2>&1 +SELECT global_key_name, key_namespace, key_owner FROM gs_client_global_keys; + global_key_name | key_namespace | key_owner +-----------------+---------------+----------- +--?.* +(1 row) + +SELECT column_key_name, key_namespace, key_owner FROM gs_column_keys; + column_key_name | key_namespace | key_owner +-----------------+---------------+----------- +--?.* +(1 row) + +--支持 +select count(*) from test11; + count +------- + 5 +(1 row) + +DROP TABLE IF EXISTS test11; +DROP CLIENT MASTER KEY MulCMK1 CASCADE; +NOTICE: drop cascades to column encryption key: cek_jdbc1 +select count(*), 'count' FROM gs_client_global_keys; + count | ?column? +-------+---------- + 0 | count +(1 row) + +select count(*), 'count' FROM gs_column_keys; + count | ?column? +-------+---------- + 0 | count +(1 row) + +SELECT count(*), 'count' FROM gs_encrypted_columns; + count | ?column? +-------+---------- + 0 | count +(1 row) + +\! gs_ktool -d all +DELETE ALL + 1 2 +RESET ROLE; +DROP USER IF EXISTS test CASCADE; diff --git a/src/test/regress/output/ce_prepare_jdbc.source b/src/test/regress/output/ce_prepare_jdbc.source new file mode 100644 index 000000000..09c3cf1de --- /dev/null +++ b/src/test/regress/output/ce_prepare_jdbc.source @@ -0,0 +1,54 @@ +DROP USER IF EXISTS test CASCADE; +NOTICE: role "test" does not exist, skipping +CREATE USER test WITH CREATEDB PASSWORD "Gauss@123"; +SET ROLE test PASSWORD 'Gauss@123'; +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +\! gs_ktool -g +GENERATE +2 +\! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/jdbc_client_lib/gsjdbc4.jar:@abs_builddir@/jdbc_ce_test/ce_ddl_pbe/. ClientEncryptionPrepareTest @portstring@ > /dev/null 2>&1 +SELECT global_key_name, key_namespace, key_owner FROM gs_client_global_keys; + global_key_name | key_namespace | key_owner +-----------------+---------------+----------- +--?.* +--?.* +(2 rows) + +SELECT column_key_name, key_namespace, key_owner FROM gs_column_keys; + column_key_name | key_namespace | key_owner +-----------------+---------------+----------- +--?.* +(1 row) + +DROP TABLE IF EXISTS products; +DROP COLUMN ENCRYPTION KEY pre_ImgCEK; +DROP CLIENT MASTER KEY pre_ImgCMK1 CASCADE; +DROP CLIENT MASTER KEY pre_ImgCMK CASCADE; +select count(*), 'count' FROM gs_client_global_keys; + count | ?column? +-------+---------- + 0 | count +(1 row) + +select count(*), 'count' FROM gs_column_keys; + count | ?column? +-------+---------- + 0 | count +(1 row) + +SELECT count(*), 'count' FROM gs_encrypted_columns; + count | ?column? +-------+---------- + 0 | count +(1 row) + +\! gs_ktool -d all +DELETE ALL + 1 2 +RESET ROLE; +DROP USER IF EXISTS test CASCADE; diff --git a/src/test/regress/output/ce_select_jdbc.source b/src/test/regress/output/ce_select_jdbc.source new file mode 100644 index 000000000..23b8600ef --- /dev/null +++ b/src/test/regress/output/ce_select_jdbc.source @@ -0,0 +1,131 @@ +DROP USER IF EXISTS test CASCADE; +NOTICE: role "test" does not exist, skipping +CREATE USER test WITH CREATEDB PASSWORD "Gauss@123"; +SET ROLE test PASSWORD 'Gauss@123'; +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +\! gs_ktool -g +GENERATE +2 +\! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/jdbc_client_lib/gsjdbc4.jar:@abs_builddir@/jdbc_ce_test/ce_ddl_pbe/. ClientEncryptionSelectTest @portstring@ > /dev/null 2>&1 +SELECT global_key_name, key_namespace, key_owner FROM gs_client_global_keys; + global_key_name | key_namespace | key_owner +-----------------+---------------+----------- +--?.* +--?.* +(2 rows) + +SELECT column_key_name, key_namespace, key_owner FROM gs_column_keys; + column_key_name | key_namespace | key_owner +-----------------+---------------+----------- +--?.* +--?.* +(2 rows) + +--支持 +select count(*) from creditcard_info; + count +------- + 6 +(1 row) + +select count(*) from creditcard_info1; + count +------- + 5 +(1 row) + +select count(*) from creditcard_info2; + count +------- + 3 +(1 row) + +select count(*) from creditcard_info3; + count +------- + 3 +(1 row) + +select count(*) from creditcard_info2_1; + count +------- + 3 +(1 row) + +select count(*) from creditcard_info3_1; + count +------- + 3 +(1 row) + +select count(*) from creditcard_info4; +ERROR: relation "creditcard_info4" does not exist on datanode1 +LINE 1: select count(*) from creditcard_info4; + ^ +select count(*) from creditcard_info5; +ERROR: relation "creditcard_info5" does not exist on datanode1 +LINE 1: select count(*) from creditcard_info5; + ^ +select count(*) from un_encrypted_table; + count +------- + 3 +(1 row) + +select count(*) from batch_table; + count +------- + 20 +(1 row) + +select count(*) from table_random; + count +------- + 8 +(1 row) + +DROP TABLE IF EXISTS creditcard_info; +DROP TABLE IF EXISTS creditcard_info1; +DROP TABLE IF EXISTS creditcard_info2; +DROP TABLE IF EXISTS creditcard_info3; +DROP TABLE IF EXISTS creditcard_info2_1; +DROP TABLE IF EXISTS creditcard_info3_1; +DROP TABLE IF EXISTS creditcard_info4; +NOTICE: table "creditcard_info4" does not exist, skipping +DROP TABLE IF EXISTS creditcard_info5; +NOTICE: table "creditcard_info5" does not exist, skipping +DROP TABLE IF EXISTS un_encrypted_table; +DROP TABLE IF EXISTS batch_table; +DROP TABLE IF EXISTS table_random; +DROP COLUMN ENCRYPTION KEY ImgCEK1; +DROP COLUMN ENCRYPTION KEY ImgCEK; +DROP CLIENT MASTER KEY ImgCMK1 CASCADE; +DROP CLIENT MASTER KEY ImgCMK CASCADE; +select count(*), 'count' FROM gs_client_global_keys; + count | ?column? +-------+---------- + 0 | count +(1 row) + +select count(*), 'count' FROM gs_column_keys; + count | ?column? +-------+---------- + 0 | count +(1 row) + +SELECT count(*), 'count' FROM gs_encrypted_columns; + count | ?column? +-------+---------- + 0 | count +(1 row) + +\! gs_ktool -d all +DELETE ALL + 1 2 +RESET ROLE; +DROP USER IF EXISTS test CASCADE; diff --git a/src/test/regress/output/ce_transaction_jdbc.source b/src/test/regress/output/ce_transaction_jdbc.source new file mode 100644 index 000000000..6a7538091 --- /dev/null +++ b/src/test/regress/output/ce_transaction_jdbc.source @@ -0,0 +1,62 @@ +DROP USER IF EXISTS test CASCADE; +NOTICE: role "test" does not exist, skipping +CREATE USER test WITH CREATEDB PASSWORD "Gauss@123"; +SET ROLE test PASSWORD 'Gauss@123'; +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +\! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/jdbc_client_lib/gsjdbc4.jar:@abs_builddir@/jdbc_ce_test/ce_ddl_pbe/. ClientEncryptionTransactionTest @portstring@ > /dev/null 2>&1 +SELECT global_key_name, key_namespace, key_owner FROM gs_client_global_keys; + global_key_name | key_namespace | key_owner +-----------------+---------------+----------- +--?.* +(1 row) + +SELECT column_key_name, key_namespace, key_owner FROM gs_column_keys; + column_key_name | key_namespace | key_owner +-----------------+---------------+----------- +--?.* +(1 row) + +SELECT column_name, encryption_type, data_type_original_oid FROM gs_encrypted_columns; + column_name | encryption_type | data_type_original_oid +-------------+-----------------+------------------------ +--?.* +(1 row) + +--支持 +select count(*) from test_table; + count +------- + 5 +(1 row) + +DROP TABLE IF EXISTS test_table; +DROP CLIENT MASTER KEY TransactionCMK CASCADE; +NOTICE: drop cascades to column encryption key: transactioncek +select count(*), 'count' FROM gs_client_global_keys; + count | ?column? +-------+---------- + 0 | count +(1 row) + +select count(*), 'count' FROM gs_column_keys; + count | ?column? +-------+---------- + 0 | count +(1 row) + +SELECT count(*), 'count' FROM gs_encrypted_columns; + count | ?column? +-------+---------- + 0 | count +(1 row) + +\! gs_ktool -d all +DELETE ALL + 1 +RESET ROLE; +DROP USER IF EXISTS test CASCADE; diff --git a/src/test/regress/output/ce_trigger_jdbc.source b/src/test/regress/output/ce_trigger_jdbc.source new file mode 100644 index 000000000..42659f449 --- /dev/null +++ b/src/test/regress/output/ce_trigger_jdbc.source @@ -0,0 +1,75 @@ +DROP USER IF EXISTS test CASCADE; +NOTICE: role "test" does not exist, skipping +CREATE USER test WITH CREATEDB PASSWORD "Gauss@123"; +SET ROLE test PASSWORD 'Gauss@123'; +\! gs_ktool -d all +DELETE ALL + +\! gs_ktool -g +GENERATE +1 +\! @abs_bindir@/../jre/bin/java -cp $CLASSPATH:@abs_builddir@/jdbc_client_lib/gsjdbc4.jar:@abs_builddir@/jdbc_ce_test/ce_ddl_pbe/. ClientEncryptionTriggerTest @portstring@ > /dev/null 2>&1 +SELECT global_key_name, key_namespace, key_owner FROM gs_client_global_keys; + global_key_name | key_namespace | key_owner +-----------------+---------------+----------- +--?.* +(1 row) + +SELECT column_key_name, key_namespace, key_owner FROM gs_column_keys; + column_key_name | key_namespace | key_owner +-----------------+---------------+----------- +--?.* +--?.* +(2 rows) + +SELECT column_name, encryption_type, data_type_original_oid FROM gs_encrypted_columns; + column_name | encryption_type | data_type_original_oid +-------------+-----------------+------------------------ +--?.* +--?.* +--?.* +--?.* +(4 rows) + +--支持 +select count(*) from test_trigger_src_tbl; + count +------- + 0 +(1 row) + +select count(*) from test_trigger_des_tbl; + count +------- + 0 +(1 row) + +DROP TABLE IF EXISTS test_trigger_src_tbl; +DROP TABLE IF EXISTS test_trigger_des_tbl; +DROP CLIENT MASTER KEY triggerCMK CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to column encryption key: triggercek1 +drop cascades to column encryption key: triggercek2 +select count(*), 'count' FROM gs_client_global_keys; + count | ?column? +-------+---------- + 0 | count +(1 row) + +select count(*), 'count' FROM gs_column_keys; + count | ?column? +-------+---------- + 0 | count +(1 row) + +SELECT count(*), 'count' FROM gs_encrypted_columns; + count | ?column? +-------+---------- + 0 | count +(1 row) + +\! gs_ktool -d all +DELETE ALL + 1 +RESET ROLE; +DROP USER IF EXISTS test CASCADE; diff --git a/src/test/regress/output/component_view_enhancements.source b/src/test/regress/output/component_view_enhancements.source new file mode 100644 index 000000000..14e5b995b --- /dev/null +++ b/src/test/regress/output/component_view_enhancements.source @@ -0,0 +1,127 @@ +select * from gs_stat_undo(); +--?.* +--?.* +--?.* +(1 row) + +select * from gs_stat_wal_entrytable(-1) limit 1; +--?.* +--?.* +--?.* +(1 row) + +select * from gs_stat_wal_entrytable(1); +--?.* +--?.* +--?.* +(1 row) + +select * from gs_stat_wal_entrytable(-2); +ERROR: The idx out of range. +select * from gs_walwriter_flush_position(); +--?.* +--?.* +--?.* +(1 row) + +select * from gs_walwriter_flush_stat(1); +--?.* +--?.* +--?.* +(1 row) + +select * from gs_walwriter_flush_stat(2); +--?.* +--?.* +--?.* +(1 row) + +DROP TABLE IF EXISTS test_ustore; +--?.* +DROP INDEX IF EXISTS test_ustore_idx; +--?.* +DROP INDEX IF EXISTS test_ustore_idx2; +--?.* +START TRANSACTION; +CREATE TABLE test_ustore (a int, b int ,c int) with(storage_type=ustore); +CREATE INDEX test_ustore_idx ON test_ustore(a); +CREATE INDEX test_ustore_idx2 ON test_ustore(b,c); +INSERT INTO test_ustore values(generate_series(1,1000000),generate_series(1,1000000), generate_series(1,1000000)); +CHECKPOINT; +CREATE OR REPLACE FUNCTION proc_gs_index_verify(tablename in varchar2) +RETURNS SETOF varchar +LANGUAGE plpgsql +AS +$$ +DECLARE + relationOid oid; + stat varchar; +BEGIN + SELECT relfilenode into relationOid from pg_class where relname=tablename; + for stat in ( select status from gs_index_verify(relationOid, 0) ) loop + RETURN NEXT stat; + end loop; + for stat in ( select status from gs_index_verify(relationOid, 1) ) loop + RETURN NEXT stat; + end loop; + return; +END; +$$ +; +SELECT proc_gs_index_verify('test_ustore_idx'); + proc_gs_index_verify +---------------------- + normal + normal +(2 rows) + +SELECT proc_gs_index_verify('test_ustore_idx2'); + proc_gs_index_verify +---------------------- + normal + normal +(2 rows) + +DROP TABLE IF EXISTS test_ustore; +COMMIT; +START TRANSACTION; +CREATE TABLE test_ustore (a int, b int ,c int) with(storage_type=ustore); +CREATE INDEX test_ustore_idx ON test_ustore(a); +CREATE INDEX test_ustore_idx2 ON test_ustore(b,c); +INSERT INTO test_ustore values(generate_series(1,1000000),generate_series(1,1000000), generate_series(1,1000000)); +CHECKPOINT; +CREATE OR REPLACE FUNCTION proc_gs_index_recycle_queue(tablename in varchar2) +RETURNS SETOF varchar +LANGUAGE plpgsql +AS +$$ +DECLARE + relationOid oid; + stat varchar; +BEGIN + SELECT relfilenode into relationOid from pg_class where relname=tablename; + for stat in ( select rblkno from gs_index_recycle_queue(relationOid, 0, 0) ) loop + RETURN NEXT stat; + end loop; + for stat in ( select rblkno from gs_index_recycle_queue(relationOid, 1, 0) ) loop + RETURN NEXT stat; + end loop; + for stat in ( select rblkno from gs_index_recycle_queue(relationOid, 2, 1) ) loop + RETURN NEXT stat; + end loop; + return; +END; +$$ +; +SELECT proc_gs_index_recycle_queue('test_ustore_idx'); + proc_gs_index_recycle_queue +----------------------------- +(0 rows) + +SELECT proc_gs_index_recycle_queue('test_ustore_idx2'); + proc_gs_index_recycle_queue +----------------------------- +(0 rows) + +DROP TABLE IF EXISTS test_ustore; +COMMIT; \ No newline at end of file diff --git a/src/test/regress/output/copy.source b/src/test/regress/output/copy.source index 17ea38ac5..db0607906 100644 --- a/src/test/regress/output/copy.source +++ b/src/test/regress/output/copy.source @@ -276,7 +276,7 @@ select * from time_format_his_010_05 order by C_INT; drop table time_format_his_010_05; ---- ---- -create table DTS2016111106370_tbl +create table TESTTABLE_tbl ( dp_demo_sk integer not null, dp_gender char(3) , @@ -298,66 +298,66 @@ dp_text_ts tsquery partition by range (dp_date) ( -partition DTS2016111106370_tbl_1 values less than(1950), -partition DTS2016111106370_tbl_2 values less than(2000), -partition DTS2016111106370_tbl_3 values less than(2050), -partition DTS2016111106370_tbl_4 values less than(2100), -partition DTS2016111106370_tbl_5 values less than(3000), -partition DTS2016111106370_tbl_6 values less than(maxvalue) +partition TESTTABLE_tbl_1 values less than(1950), +partition TESTTABLE_tbl_2 values less than(2000), +partition TESTTABLE_tbl_3 values less than(2050), +partition TESTTABLE_tbl_4 values less than(2100), +partition TESTTABLE_tbl_5 values less than(3000), +partition TESTTABLE_tbl_6 values less than(maxvalue) ) ; -insert into DTS2016111106370_tbl values(1,'M','M','Primary' ,500,'Good' ,0,0,0,1950,'13033333333','440900197702065080','桂D IIUQ6','2015年11月4日,张家口市公安局、。·ˉˇ¨〃々—~‖…‘“”〔〕〈〉《》°′″℃$¤¢£成功破获一起拉杆箱式伪基站诈骗案件。该局民警在排查安全「」『』〖〗【】±≡≌≈∽' ,null,null); -insert into DTS2016111106370_tbl values(2,'F','M','Primary' ,500,'Good' ,0,0,0,2000,'13044444444','321282198903046569','云N ESBH7','∝≠≮≯≤≥∞∶∵∴∷♂♀隐患时,发现一嫌疑人正在以中国建设银行客服',null,null); -insert into DTS2016111106370_tbl values(3,'M','S','Primary' ,500,'Good' ,0,0,0,2050,'13055555555','450400198802202694','贵A Y2OM0','号码“95533”名义,利用短信群发器向不⊿▲▼◣◤◢特定人群大量发送关于“银行账户积分兑换现金活动”的诈骗短信,办', null,null); -insert into DTS2016111106370_tbl values(4,'F','S','Primary' ,500,'Good' ,0,0,0,2100,'13066666666','371722198810125946','川T B6KS3','案民警随即将犯罪嫌疑人周某成功抓获■△▽⊿▲▼◣◤◢◥▁▂▃▄▅▆▇█▉▊▋,当场收缴作案工具短信', null,null); -insert into DTS2016111106370_tbl values(5,'M','D','Primary' ,500,'Good' ,0,0,0,3000,'13077777777','150100199204056110','藏E NVDG5','群发器一套。经查,周某多次在张家口市区利用囧⊙●○⊕◎Θ⊙¤㈱㊣★☆♀◆ ▅ ▆ ▇ █ █ ■ ▓ 回 □⊥﹃﹄┌ ┐└ ┘∟「」↑↓→短信群发器累计发送约7万余条诈骗短信。', null,null); -insert into DTS2016111106370_tbl values(6,'F','D','Primary' ,500,'Good' ,0,0,0,3100,'13088888888','130684198503211498','陕K PV806','  2015年10月16日,保定市公安局端掉一涉嫌诈骗窝点,查获◇◣◢◥▲▼△▽⊿◤ ◥ ', null,null); -insert into DTS2016111106370_tbl values(7,'M','W','Primary' ,500,'Good' ,0,0,0,1950,'13099999999','320800198505033823','甘D 3CK27','▂ ▃ ▄私刻的公司、发票专用章、纪念币、纪念银条、客户资料、发货单、电话等物,抓获嫌疑人6名。经查,自2015年3月以来,犯罪嫌疑人赵',null,null); -insert into DTS2016111106370_tbl values(8,'F','W','Primary' ,500,'Good' ,0,0,0,2000,'13012345678','350603197710270827','宁C Q8AO5','某与冯某从网上购买假的纪念币、银条、‰§№☆★〇○●◎◇◆ 回□▌▍▎▏▓※→←↑↓↖' ,null,null); -insert into DTS2016111106370_tbl values(9,'M','U','Primary' ,500,'Good' ,0,0,0,2050,'13087654321','431281198203182139','青E 1P4V4','化妆品及全国各地的个人信息,雇佣张某:?;.﹛﹏﹊︽︻〗▄ ▅ ✄@㊨→↔囍某等四人,冒充北京藏品有限公司及电视购物买卖宝工作人员,用假名字和自编工号向' ,null,null); -insert into DTS2016111106370_tbl values(10,'F','U','Primary' ,500,'Good' ,0,0,0,2100,'13023456789','440601198011301931','新Q 05245','全国各地人群打电话推销假纪念币及化妆品,涉案10万余元。', null,null); -insert into DTS2016111106370_tbl values(11,'M','M','Secondary' ,500,'Good' ,0,0,0,3000,'13100000000','63250019881115391X','军T CIMP5','  2015年5月30日,沧州市沧县公安局成功抓获一名涉嫌利用QQ聊天诈骗的犯罪 〓≡ ╝╚╔ ╗╬ ═ ╓ ╩ ┠ ┨┯ ┷┏ ┓┗ ┛┳嫌疑人刘某某。经查,', null,null); -insert into DTS2016111106370_tbl values(12,'F','M','Secondary' ,500,'Good' ,0,0,0,3100,'13111111111','350782198903130244','北M D8OC9','自2015年1月份以来,刘某某化名“刘某”通过QQ聊天、打电话等方式以做生意缺钱为由先后五次诈骗杨某某1.22万元。', null,null); -insert into DTS2016111106370_tbl values(13,'M','S','Secondary' ,500,'Good' ,0,0,0,1950,'13122222222','542125199104061520','南A 2ER97','  2015年5月12日,冀中公安局成功破获一起利用QQ聊天进行诈骗的案件,抓获', null,null); -insert into DTS2016111106370_tbl values(14,'F','S','Secondary' ,500,'Good' ,0,0,0,2000,'13133333333','530326197803047984','广U 006T9','犯罪嫌疑人1名。经查,犯罪+-×÷∧∨∑∏∪∩∈√⊥∥∠⌒⊙∫∮嫌疑人殷某某在5月1', null,null); -insert into DTS2016111106370_tbl values(15,'M','D','Secondary' ,500,'Good' ,0,0,0,2050,'13144444444','341225199204262781','沈R 6TCH7','日至6日期间,在QQ上自称韩俊,以帮助受害人朱某破解命中情劫⑶⑷⑸、稳定姻缘、改变命运等一系列迷信说法,诈骗朱某4.6万元。', null,null); -insert into DTS2016111106370_tbl values(16,'F','D','Secondary' ,500,'Good' ,0,0,0,2100,'13155555555','450223198706295636','成Y LRFL5','  2015年8月8日,石家庄市公安局抓获犯罪嫌疑人罗某、罗某某、黎某,成功破获“6·16”QQ诈', null,null); -insert into DTS2016111106370_tbl values(17,'M','W','Secondary' ,500,'Good' ,0,0,0,3000,'13166666666','141128198604215986','兰M Y6WZ2','骗案。经查,罗某、罗某某于6月16日,ⅰⅱ⒈⒉⒊⒋ ⒌⑴⑵⑹⑺⑿在QQ上冒充长安区某公司老板通过QQ指令公司会计转账,诈骗该公司124万元。', null,null); -insert into DTS2016111106370_tbl values(18,'F','W','Secondary' ,500,'Good' ,0,0,0,3100,'13177777777','610103198203159498','济C WDPF9','1、对于犯罪分子决定刑罚的时候,应当根据犯罪的事实、犯罪的性质、情节和对于社会的', null,null); -insert into DTS2016111106370_tbl values(19,'M','U','Secondary' ,500,'Good' ,0,0,0,1950,'13188888888','430523198403027119','空U EWWG0','危害程度,依照《刑法》的有关规定判处;', null,null); -insert into DTS2016111106370_tbl values(20,'F','U','Secondary' ,500,'Good' ,0,0,0,2000,'13199999999','420525198009025685','海O 03SM4','  2、法律依据:1)《刑法》  第二百六十六条 【诈骗罪】诈骗公私财物,数额较大的', null,null); -insert into DTS2016111106370_tbl values(21,'M','M','College' ,500,'Good' ,0,0,0,2050,'13112345678','230714198011139338','京G HVT11',',http://www.hanzify.org/software/12299.html)处三年以下', null,null); -insert into DTS2016111106370_tbl values(22,'F','M','College' ,500,'Good' ,0,0,0,2100,'13187654321','430300197612109014','津C 83BH0','有期徒刑、拘役或者管制,并处或者单处罚金;数额巨大或者有其他严重情节的,处三年以上十年以〗★●', null,null); -insert into DTS2016111106370_tbl values(23,'M','S','College' ,500,'Good' ,0,0,0,3000,'13123456789','210781198002130012','沪B5HBK8' ,'△♢♦♂下有期徒刑,并处罚金;数额特别巨大或者有其他特别严重情节的,处十年以上有期徒刑或者无期徒刑⒁⒂⒃⒄⒅⒆⒇,并处罚金或者没收财产。本法另有规定的,依照规定。', null,null); -insert into DTS2016111106370_tbl values(24,'F','S','College' ,500,'Good' ,0,0,0,3100,'13200000000','32132219801114113X','渝AZ2FT2' ,'  2)《关于办理诈骗刑事案件具体应用法律若干问题的解释》 第一条 诈骗公私财物价值三千元ⅲⅳⅴⅵⅶ', null,null); -insert into DTS2016111106370_tbl values(25,'M','D','College' ,500,'Good' ,0,0,0,1950,'13211111111','370502199010193059','冀ADEYZ7' ,'ⅷⅸⅹ①②③⑨⑩至一万元以上、三万元至十万元以上、五十万元以上的,应当分别认定为刑法第二百', null,null); -insert into DTS2016111106370_tbl values(26,'F','D','College' ,500,'Good' ,0,0,0,2000,'13222222222','210682198302018541','豫LAA0C2' ,'六十六条规定的“数额较大↗↘↙〓”⒍⒎⒏⒐⒚⒛、“数额巨大”、“数额特别巨大”。', null,null); -insert into DTS2016111106370_tbl values(27,'M','W','College' ,500,'Good' ,0,0,0,2050,'13233333333','532925198410018974','鲁A95P23' ,'  各省、自治区、直辖市高级人民法院、人民检察院可以结合⒑⒒⒓⒔⒕⒖⒗⒘⒙本地区经济社会⑻⑼⑽⑾发展状况,在前款规', null,null); -insert into DTS2016111106370_tbl values(28,'F','W','College' ,500,'Good' ,0,0,0,2100,'13244444444','42030119750519262X','晋J4Y158' ,'定的数额幅度内,共同研究确定本地区执行的具体数额标准,报最高人民法院、最高人民检察院备案', null,null); -insert into DTS2016111106370_tbl values(29,'M','U','College' ,500,'Good' ,0,0,0,3000,'13255555555','210102199412021827','蒙KMSYB9' ,'←↘↙♀♂┇┅ ﹉﹊﹍﹎╭ ╮╰ ╯ *^_^* ^*^ ^-^ ^执行的具体数额标准,', null,null); -insert into DTS2016111106370_tbl values(30,'F','U','College' ,500,'Good' ,0,0,0,3100,'13266666666','120105198208259208','辽V53UR9' ,'_^ ^︵^ ∵∴‖︱ ︳︴高级人民法院、﹏﹋﹌︵︶︹︺ 【', null,null); -update DTS2016111106370_tbl set dp_text_tv=to_tsvector('ngram',coalesce(dp_text,'')); +insert into TESTTABLE_tbl values(1,'M','M','Primary' ,500,'Good' ,0,0,0,1950,'13033333333','440900197702065080','桂D IIUQ6','2015年11月4日,张家口市公安局、。·ˉˇ¨〃々—~‖…‘“”〔〕〈〉《》°′″℃$¤¢£成功破获一起拉杆箱式伪基站诈骗案件。该局民警在排查安全「」『』〖〗【】±≡≌≈∽' ,null,null); +insert into TESTTABLE_tbl values(2,'F','M','Primary' ,500,'Good' ,0,0,0,2000,'13044444444','321282198903046569','云N ESBH7','∝≠≮≯≤≥∞∶∵∴∷♂♀隐患时,发现一嫌疑人正在以中国建设银行客服',null,null); +insert into TESTTABLE_tbl values(3,'M','S','Primary' ,500,'Good' ,0,0,0,2050,'13055555555','450400198802202694','贵A Y2OM0','号码“95533”名义,利用短信群发器向不⊿▲▼◣◤◢特定人群大量发送关于“银行账户积分兑换现金活动”的诈骗短信,办', null,null); +insert into TESTTABLE_tbl values(4,'F','S','Primary' ,500,'Good' ,0,0,0,2100,'13066666666','371722198810125946','川T B6KS3','案民警随即将犯罪嫌疑人周某成功抓获■△▽⊿▲▼◣◤◢◥▁▂▃▄▅▆▇█▉▊▋,当场收缴作案工具短信', null,null); +insert into TESTTABLE_tbl values(5,'M','D','Primary' ,500,'Good' ,0,0,0,3000,'13077777777','150100199204056110','藏E NVDG5','群发器一套。经查,周某多次在张家口市区利用囧⊙●○⊕◎Θ⊙¤㈱㊣★☆♀◆ ▅ ▆ ▇ █ █ ■ ▓ 回 □⊥﹃﹄┌ ┐└ ┘∟「」↑↓→短信群发器累计发送约7万余条诈骗短信。', null,null); +insert into TESTTABLE_tbl values(6,'F','D','Primary' ,500,'Good' ,0,0,0,3100,'13088888888','130684198503211498','陕K PV806','  2015年10月16日,保定市公安局端掉一涉嫌诈骗窝点,查获◇◣◢◥▲▼△▽⊿◤ ◥ ', null,null); +insert into TESTTABLE_tbl values(7,'M','W','Primary' ,500,'Good' ,0,0,0,1950,'13099999999','320800198505033823','甘D 3CK27','▂ ▃ ▄私刻的公司、发票专用章、纪念币、纪念银条、客户资料、发货单、电话等物,抓获嫌疑人6名。经查,自2015年3月以来,犯罪嫌疑人赵',null,null); +insert into TESTTABLE_tbl values(8,'F','W','Primary' ,500,'Good' ,0,0,0,2000,'13012345678','350603197710270827','宁C Q8AO5','某与冯某从网上购买假的纪念币、银条、‰§№☆★〇○●◎◇◆ 回□▌▍▎▏▓※→←↑↓↖' ,null,null); +insert into TESTTABLE_tbl values(9,'M','U','Primary' ,500,'Good' ,0,0,0,2050,'13087654321','431281198203182139','青E 1P4V4','化妆品及全国各地的个人信息,雇佣张某:?;.﹛﹏﹊︽︻〗▄ ▅ ✄@㊨→↔囍某等四人,冒充北京藏品有限公司及电视购物买卖宝工作人员,用假名字和自编工号向' ,null,null); +insert into TESTTABLE_tbl values(10,'F','U','Primary' ,500,'Good' ,0,0,0,2100,'13023456789','440601198011301931','新Q 05245','全国各地人群打电话推销假纪念币及化妆品,涉案10万余元。', null,null); +insert into TESTTABLE_tbl values(11,'M','M','Secondary' ,500,'Good' ,0,0,0,3000,'13100000000','63250019881115391X','军T CIMP5','  2015年5月30日,沧州市沧县公安局成功抓获一名涉嫌利用QQ聊天诈骗的犯罪 〓≡ ╝╚╔ ╗╬ ═ ╓ ╩ ┠ ┨┯ ┷┏ ┓┗ ┛┳嫌疑人刘某某。经查,', null,null); +insert into TESTTABLE_tbl values(12,'F','M','Secondary' ,500,'Good' ,0,0,0,3100,'13111111111','350782198903130244','北M D8OC9','自2015年1月份以来,刘某某化名“刘某”通过QQ聊天、打电话等方式以做生意缺钱为由先后五次诈骗杨某某1.22万元。', null,null); +insert into TESTTABLE_tbl values(13,'M','S','Secondary' ,500,'Good' ,0,0,0,1950,'13122222222','542125199104061520','南A 2ER97','  2015年5月12日,冀中公安局成功破获一起利用QQ聊天进行诈骗的案件,抓获', null,null); +insert into TESTTABLE_tbl values(14,'F','S','Secondary' ,500,'Good' ,0,0,0,2000,'13133333333','530326197803047984','广U 006T9','犯罪嫌疑人1名。经查,犯罪+-×÷∧∨∑∏∪∩∈√⊥∥∠⌒⊙∫∮嫌疑人殷某某在5月1', null,null); +insert into TESTTABLE_tbl values(15,'M','D','Secondary' ,500,'Good' ,0,0,0,2050,'13144444444','341225199204262781','沈R 6TCH7','日至6日期间,在QQ上自称韩俊,以帮助受害人朱某破解命中情劫⑶⑷⑸、稳定姻缘、改变命运等一系列迷信说法,诈骗朱某4.6万元。', null,null); +insert into TESTTABLE_tbl values(16,'F','D','Secondary' ,500,'Good' ,0,0,0,2100,'13155555555','450223198706295636','成Y LRFL5','  2015年8月8日,石家庄市公安局抓获犯罪嫌疑人罗某、罗某某、黎某,成功破获“6·16”QQ诈', null,null); +insert into TESTTABLE_tbl values(17,'M','W','Secondary' ,500,'Good' ,0,0,0,3000,'13166666666','141128198604215986','兰M Y6WZ2','骗案。经查,罗某、罗某某于6月16日,ⅰⅱ⒈⒉⒊⒋ ⒌⑴⑵⑹⑺⑿在QQ上冒充长安区某公司老板通过QQ指令公司会计转账,诈骗该公司124万元。', null,null); +insert into TESTTABLE_tbl values(18,'F','W','Secondary' ,500,'Good' ,0,0,0,3100,'13177777777','610103198203159498','济C WDPF9','1、对于犯罪分子决定刑罚的时候,应当根据犯罪的事实、犯罪的性质、情节和对于社会的', null,null); +insert into TESTTABLE_tbl values(19,'M','U','Secondary' ,500,'Good' ,0,0,0,1950,'13188888888','430523198403027119','空U EWWG0','危害程度,依照《刑法》的有关规定判处;', null,null); +insert into TESTTABLE_tbl values(20,'F','U','Secondary' ,500,'Good' ,0,0,0,2000,'13199999999','420525198009025685','海O 03SM4','  2、法律依据:1)《刑法》  第二百六十六条 【诈骗罪】诈骗公私财物,数额较大的', null,null); +insert into TESTTABLE_tbl values(21,'M','M','College' ,500,'Good' ,0,0,0,2050,'13112345678','230714198011139338','京G HVT11',',http://www.hanzify.org/software/12299.html)处三年以下', null,null); +insert into TESTTABLE_tbl values(22,'F','M','College' ,500,'Good' ,0,0,0,2100,'13187654321','430300197612109014','津C 83BH0','有期徒刑、拘役或者管制,并处或者单处罚金;数额巨大或者有其他严重情节的,处三年以上十年以〗★●', null,null); +insert into TESTTABLE_tbl values(23,'M','S','College' ,500,'Good' ,0,0,0,3000,'13123456789','210781198002130012','沪B5HBK8' ,'△♢♦♂下有期徒刑,并处罚金;数额特别巨大或者有其他特别严重情节的,处十年以上有期徒刑或者无期徒刑⒁⒂⒃⒄⒅⒆⒇,并处罚金或者没收财产。本法另有规定的,依照规定。', null,null); +insert into TESTTABLE_tbl values(24,'F','S','College' ,500,'Good' ,0,0,0,3100,'13200000000','32132219801114113X','渝AZ2FT2' ,'  2)《关于办理诈骗刑事案件具体应用法律若干问题的解释》 第一条 诈骗公私财物价值三千元ⅲⅳⅴⅵⅶ', null,null); +insert into TESTTABLE_tbl values(25,'M','D','College' ,500,'Good' ,0,0,0,1950,'13211111111','370502199010193059','冀ADEYZ7' ,'ⅷⅸⅹ①②③⑨⑩至一万元以上、三万元至十万元以上、五十万元以上的,应当分别认定为刑法第二百', null,null); +insert into TESTTABLE_tbl values(26,'F','D','College' ,500,'Good' ,0,0,0,2000,'13222222222','210682198302018541','豫LAA0C2' ,'六十六条规定的“数额较大↗↘↙〓”⒍⒎⒏⒐⒚⒛、“数额巨大”、“数额特别巨大”。', null,null); +insert into TESTTABLE_tbl values(27,'M','W','College' ,500,'Good' ,0,0,0,2050,'13233333333','532925198410018974','鲁A95P23' ,'  各省、自治区、直辖市高级人民法院、人民检察院可以结合⒑⒒⒓⒔⒕⒖⒗⒘⒙本地区经济社会⑻⑼⑽⑾发展状况,在前款规', null,null); +insert into TESTTABLE_tbl values(28,'F','W','College' ,500,'Good' ,0,0,0,2100,'13244444444','42030119750519262X','晋J4Y158' ,'定的数额幅度内,共同研究确定本地区执行的具体数额标准,报最高人民法院、最高人民检察院备案', null,null); +insert into TESTTABLE_tbl values(29,'M','U','College' ,500,'Good' ,0,0,0,3000,'13255555555','210102199412021827','蒙KMSYB9' ,'←↘↙♀♂┇┅ ﹉﹊﹍﹎╭ ╮╰ ╯ *^_^* ^*^ ^-^ ^执行的具体数额标准,', null,null); +insert into TESTTABLE_tbl values(30,'F','U','College' ,500,'Good' ,0,0,0,3100,'13266666666','120105198208259208','辽V53UR9' ,'_^ ^︵^ ∵∴‖︱ ︳︴高级人民法院、﹏﹋﹌︵︶︹︺ 【', null,null); +update TESTTABLE_tbl set dp_text_tv=to_tsvector('ngram',coalesce(dp_text,'')); ---- compressed row relation -alter table DTS2016111106370_tbl set compress ; -select count(*) from DTS2016111106370_tbl; +alter table TESTTABLE_tbl set compress ; +select count(*) from TESTTABLE_tbl; count ------- 30 (1 row) ---- create compressed pages and compressed tuples -vacuum full DTS2016111106370_tbl; +vacuum full TESTTABLE_tbl; ---- copy to 1B/4B varlen values -copy DTS2016111106370_tbl (dp_text_ts) to '@abs_srcdir@/data/datanode1/DTS2016111106370_tbl.txt' with (encoding 'utf8'); -drop table DTS2016111106370_tbl; +copy TESTTABLE_tbl (dp_text_ts) to '@abs_srcdir@/data/datanode1/TESTTABLE_tbl.txt' with (encoding 'utf8'); +drop table TESTTABLE_tbl; ---- ---- -CREATE TABLE DTS2016112411747_tbl( c int, d date) ; -COPY DTS2016112411747_tbl FROM STDIN with(delimiter ',',timestamp_format 'yyyymondd'); -SELECT * FROM DTS2016112411747_tbl; +CREATE TABLE TESTTABLE_tbl( c int, d date) ; +COPY TESTTABLE_tbl FROM STDIN with(delimiter ',',timestamp_format 'yyyymondd'); +SELECT * FROM TESTTABLE_tbl; c | d ---+-------------------------- 1 | Sat Jan 01 00:00:00 2000 (1 row) -DROP TABLE DTS2016112411747_tbl; +DROP TABLE TESTTABLE_tbl; diff --git a/src/test/regress/output/copy_3.source b/src/test/regress/output/copy_3.source index 077fe3503..42fd153b8 100644 --- a/src/test/regress/output/copy_3.source +++ b/src/test/regress/output/copy_3.source @@ -78,7 +78,7 @@ select * from time_format_his_010_05 order by C_INT; drop table time_format_his_010_05; ---- ---- -create table DTS2016111106370_tbl +create table TESTTABLE_tbl ( dp_demo_sk integer not null, dp_gender char(3) , @@ -100,66 +100,66 @@ dp_text_ts tsquery distribute by replication partition by range (dp_date) ( -partition DTS2016111106370_tbl_1 values less than(1950), -partition DTS2016111106370_tbl_2 values less than(2000), -partition DTS2016111106370_tbl_3 values less than(2050), -partition DTS2016111106370_tbl_4 values less than(2100), -partition DTS2016111106370_tbl_5 values less than(3000), -partition DTS2016111106370_tbl_6 values less than(maxvalue) +partition TESTTABLE_tbl_1 values less than(1950), +partition TESTTABLE_tbl_2 values less than(2000), +partition TESTTABLE_tbl_3 values less than(2050), +partition TESTTABLE_tbl_4 values less than(2100), +partition TESTTABLE_tbl_5 values less than(3000), +partition TESTTABLE_tbl_6 values less than(maxvalue) ) ; -insert into DTS2016111106370_tbl values(1,'M','M','Primary' ,500,'Good' ,0,0,0,1950,'13033333333','440900197702065080','桂D IIUQ6','2015年11月4日,张家口市公安局、。·ˉˇ¨〃々—~‖…‘“”〔〕〈〉《》°′″℃$¤¢£成功破获一起拉杆箱式伪基站诈骗案件。该局民警在排查安全「」『』〖〗【】±≡≌≈∽' ,null,null); -insert into DTS2016111106370_tbl values(2,'F','M','Primary' ,500,'Good' ,0,0,0,2000,'13044444444','321282198903046569','云N ESBH7','∝≠≮≯≤≥∞∶∵∴∷♂♀隐患时,发现一嫌疑人正在以中国建设银行客服',null,null); -insert into DTS2016111106370_tbl values(3,'M','S','Primary' ,500,'Good' ,0,0,0,2050,'13055555555','450400198802202694','贵A Y2OM0','号码“95533”名义,利用短信群发器向不⊿▲▼◣◤◢特定人群大量发送关于“银行账户积分兑换现金活动”的诈骗短信,办', null,null); -insert into DTS2016111106370_tbl values(4,'F','S','Primary' ,500,'Good' ,0,0,0,2100,'13066666666','371722198810125946','川T B6KS3','案民警随即将犯罪嫌疑人周某成功抓获■△▽⊿▲▼◣◤◢◥▁▂▃▄▅▆▇█▉▊▋,当场收缴作案工具短信', null,null); -insert into DTS2016111106370_tbl values(5,'M','D','Primary' ,500,'Good' ,0,0,0,3000,'13077777777','150100199204056110','藏E NVDG5','群发器一套。经查,周某多次在张家口市区利用囧⊙●○⊕◎Θ⊙¤㈱㊣★☆♀◆ ▅ ▆ ▇ █ █ ■ ▓ 回 □⊥﹃﹄┌ ┐└ ┘∟「」↑↓→短信群发器累计发送约7万余条诈骗短信。', null,null); -insert into DTS2016111106370_tbl values(6,'F','D','Primary' ,500,'Good' ,0,0,0,3100,'13088888888','130684198503211498','陕K PV806','  2015年10月16日,保定市公安局端掉一涉嫌诈骗窝点,查获◇◣◢◥▲▼△▽⊿◤ ◥ ', null,null); -insert into DTS2016111106370_tbl values(7,'M','W','Primary' ,500,'Good' ,0,0,0,1950,'13099999999','320800198505033823','甘D 3CK27','▂ ▃ ▄私刻的公司、发票专用章、纪念币、纪念银条、客户资料、发货单、电话等物,抓获嫌疑人6名。经查,自2015年3月以来,犯罪嫌疑人赵',null,null); -insert into DTS2016111106370_tbl values(8,'F','W','Primary' ,500,'Good' ,0,0,0,2000,'13012345678','350603197710270827','宁C Q8AO5','某与冯某从网上购买假的纪念币、银条、‰§№☆★〇○●◎◇◆ 回□▌▍▎▏▓※→←↑↓↖' ,null,null); -insert into DTS2016111106370_tbl values(9,'M','U','Primary' ,500,'Good' ,0,0,0,2050,'13087654321','431281198203182139','青E 1P4V4','化妆品及全国各地的个人信息,雇佣张某:?;.﹛﹏﹊︽︻〗▄ ▅ ✄@㊨→↔囍某等四人,冒充北京藏品有限公司及电视购物买卖宝工作人员,用假名字和自编工号向' ,null,null); -insert into DTS2016111106370_tbl values(10,'F','U','Primary' ,500,'Good' ,0,0,0,2100,'13023456789','440601198011301931','新Q 05245','全国各地人群打电话推销假纪念币及化妆品,涉案10万余元。', null,null); -insert into DTS2016111106370_tbl values(11,'M','M','Secondary' ,500,'Good' ,0,0,0,3000,'13100000000','63250019881115391X','军T CIMP5','  2015年5月30日,沧州市沧县公安局成功抓获一名涉嫌利用QQ聊天诈骗的犯罪 〓≡ ╝╚╔ ╗╬ ═ ╓ ╩ ┠ ┨┯ ┷┏ ┓┗ ┛┳嫌疑人刘某某。经查,', null,null); -insert into DTS2016111106370_tbl values(12,'F','M','Secondary' ,500,'Good' ,0,0,0,3100,'13111111111','350782198903130244','北M D8OC9','自2015年1月份以来,刘某某化名“刘某”通过QQ聊天、打电话等方式以做生意缺钱为由先后五次诈骗杨某某1.22万元。', null,null); -insert into DTS2016111106370_tbl values(13,'M','S','Secondary' ,500,'Good' ,0,0,0,1950,'13122222222','542125199104061520','南A 2ER97','  2015年5月12日,冀中公安局成功破获一起利用QQ聊天进行诈骗的案件,抓获', null,null); -insert into DTS2016111106370_tbl values(14,'F','S','Secondary' ,500,'Good' ,0,0,0,2000,'13133333333','530326197803047984','广U 006T9','犯罪嫌疑人1名。经查,犯罪+-×÷∧∨∑∏∪∩∈√⊥∥∠⌒⊙∫∮嫌疑人殷某某在5月1', null,null); -insert into DTS2016111106370_tbl values(15,'M','D','Secondary' ,500,'Good' ,0,0,0,2050,'13144444444','341225199204262781','沈R 6TCH7','日至6日期间,在QQ上自称韩俊,以帮助受害人朱某破解命中情劫⑶⑷⑸、稳定姻缘、改变命运等一系列迷信说法,诈骗朱某4.6万元。', null,null); -insert into DTS2016111106370_tbl values(16,'F','D','Secondary' ,500,'Good' ,0,0,0,2100,'13155555555','450223198706295636','成Y LRFL5','  2015年8月8日,石家庄市公安局抓获犯罪嫌疑人罗某、罗某某、黎某,成功破获“6·16”QQ诈', null,null); -insert into DTS2016111106370_tbl values(17,'M','W','Secondary' ,500,'Good' ,0,0,0,3000,'13166666666','141128198604215986','兰M Y6WZ2','骗案。经查,罗某、罗某某于6月16日,ⅰⅱ⒈⒉⒊⒋ ⒌⑴⑵⑹⑺⑿在QQ上冒充长安区某公司老板通过QQ指令公司会计转账,诈骗该公司124万元。', null,null); -insert into DTS2016111106370_tbl values(18,'F','W','Secondary' ,500,'Good' ,0,0,0,3100,'13177777777','610103198203159498','济C WDPF9','1、对于犯罪分子决定刑罚的时候,应当根据犯罪的事实、犯罪的性质、情节和对于社会的', null,null); -insert into DTS2016111106370_tbl values(19,'M','U','Secondary' ,500,'Good' ,0,0,0,1950,'13188888888','430523198403027119','空U EWWG0','危害程度,依照《刑法》的有关规定判处;', null,null); -insert into DTS2016111106370_tbl values(20,'F','U','Secondary' ,500,'Good' ,0,0,0,2000,'13199999999','420525198009025685','海O 03SM4','  2、法律依据:1)《刑法》  第二百六十六条 【诈骗罪】诈骗公私财物,数额较大的', null,null); -insert into DTS2016111106370_tbl values(21,'M','M','College' ,500,'Good' ,0,0,0,2050,'13112345678','230714198011139338','京G HVT11',',http://www.hanzify.org/software/12299.html)处三年以下', null,null); -insert into DTS2016111106370_tbl values(22,'F','M','College' ,500,'Good' ,0,0,0,2100,'13187654321','430300197612109014','津C 83BH0','有期徒刑、拘役或者管制,并处或者单处罚金;数额巨大或者有其他严重情节的,处三年以上十年以〗★●', null,null); -insert into DTS2016111106370_tbl values(23,'M','S','College' ,500,'Good' ,0,0,0,3000,'13123456789','210781198002130012','沪B5HBK8' ,'△♢♦♂下有期徒刑,并处罚金;数额特别巨大或者有其他特别严重情节的,处十年以上有期徒刑或者无期徒刑⒁⒂⒃⒄⒅⒆⒇,并处罚金或者没收财产。本法另有规定的,依照规定。', null,null); -insert into DTS2016111106370_tbl values(24,'F','S','College' ,500,'Good' ,0,0,0,3100,'13200000000','32132219801114113X','渝AZ2FT2' ,'  2)《关于办理诈骗刑事案件具体应用法律若干问题的解释》 第一条 诈骗公私财物价值三千元ⅲⅳⅴⅵⅶ', null,null); -insert into DTS2016111106370_tbl values(25,'M','D','College' ,500,'Good' ,0,0,0,1950,'13211111111','370502199010193059','冀ADEYZ7' ,'ⅷⅸⅹ①②③⑨⑩至一万元以上、三万元至十万元以上、五十万元以上的,应当分别认定为刑法第二百', null,null); -insert into DTS2016111106370_tbl values(26,'F','D','College' ,500,'Good' ,0,0,0,2000,'13222222222','210682198302018541','豫LAA0C2' ,'六十六条规定的“数额较大↗↘↙〓”⒍⒎⒏⒐⒚⒛、“数额巨大”、“数额特别巨大”。', null,null); -insert into DTS2016111106370_tbl values(27,'M','W','College' ,500,'Good' ,0,0,0,2050,'13233333333','532925198410018974','鲁A95P23' ,'  各省、自治区、直辖市高级人民法院、人民检察院可以结合⒑⒒⒓⒔⒕⒖⒗⒘⒙本地区经济社会⑻⑼⑽⑾发展状况,在前款规', null,null); -insert into DTS2016111106370_tbl values(28,'F','W','College' ,500,'Good' ,0,0,0,2100,'13244444444','42030119750519262X','晋J4Y158' ,'定的数额幅度内,共同研究确定本地区执行的具体数额标准,报最高人民法院、最高人民检察院备案', null,null); -insert into DTS2016111106370_tbl values(29,'M','U','College' ,500,'Good' ,0,0,0,3000,'13255555555','210102199412021827','蒙KMSYB9' ,'←↘↙♀♂┇┅ ﹉﹊﹍﹎╭ ╮╰ ╯ *^_^* ^*^ ^-^ ^执行的具体数额标准,', null,null); -insert into DTS2016111106370_tbl values(30,'F','U','College' ,500,'Good' ,0,0,0,3100,'13266666666','120105198208259208','辽V53UR9' ,'_^ ^︵^ ∵∴‖︱ ︳︴高级人民法院、﹏﹋﹌︵︶︹︺ 【', null,null); -update DTS2016111106370_tbl set dp_text_tv=to_tsvector('ngram',coalesce(dp_text,'')); +insert into TESTTABLE_tbl values(1,'M','M','Primary' ,500,'Good' ,0,0,0,1950,'13033333333','440900197702065080','桂D IIUQ6','2015年11月4日,张家口市公安局、。·ˉˇ¨〃々—~‖…‘“”〔〕〈〉《》°′″℃$¤¢£成功破获一起拉杆箱式伪基站诈骗案件。该局民警在排查安全「」『』〖〗【】±≡≌≈∽' ,null,null); +insert into TESTTABLE_tbl values(2,'F','M','Primary' ,500,'Good' ,0,0,0,2000,'13044444444','321282198903046569','云N ESBH7','∝≠≮≯≤≥∞∶∵∴∷♂♀隐患时,发现一嫌疑人正在以中国建设银行客服',null,null); +insert into TESTTABLE_tbl values(3,'M','S','Primary' ,500,'Good' ,0,0,0,2050,'13055555555','450400198802202694','贵A Y2OM0','号码“95533”名义,利用短信群发器向不⊿▲▼◣◤◢特定人群大量发送关于“银行账户积分兑换现金活动”的诈骗短信,办', null,null); +insert into TESTTABLE_tbl values(4,'F','S','Primary' ,500,'Good' ,0,0,0,2100,'13066666666','371722198810125946','川T B6KS3','案民警随即将犯罪嫌疑人周某成功抓获■△▽⊿▲▼◣◤◢◥▁▂▃▄▅▆▇█▉▊▋,当场收缴作案工具短信', null,null); +insert into TESTTABLE_tbl values(5,'M','D','Primary' ,500,'Good' ,0,0,0,3000,'13077777777','150100199204056110','藏E NVDG5','群发器一套。经查,周某多次在张家口市区利用囧⊙●○⊕◎Θ⊙¤㈱㊣★☆♀◆ ▅ ▆ ▇ █ █ ■ ▓ 回 □⊥﹃﹄┌ ┐└ ┘∟「」↑↓→短信群发器累计发送约7万余条诈骗短信。', null,null); +insert into TESTTABLE_tbl values(6,'F','D','Primary' ,500,'Good' ,0,0,0,3100,'13088888888','130684198503211498','陕K PV806','  2015年10月16日,保定市公安局端掉一涉嫌诈骗窝点,查获◇◣◢◥▲▼△▽⊿◤ ◥ ', null,null); +insert into TESTTABLE_tbl values(7,'M','W','Primary' ,500,'Good' ,0,0,0,1950,'13099999999','320800198505033823','甘D 3CK27','▂ ▃ ▄私刻的公司、发票专用章、纪念币、纪念银条、客户资料、发货单、电话等物,抓获嫌疑人6名。经查,自2015年3月以来,犯罪嫌疑人赵',null,null); +insert into TESTTABLE_tbl values(8,'F','W','Primary' ,500,'Good' ,0,0,0,2000,'13012345678','350603197710270827','宁C Q8AO5','某与冯某从网上购买假的纪念币、银条、‰§№☆★〇○●◎◇◆ 回□▌▍▎▏▓※→←↑↓↖' ,null,null); +insert into TESTTABLE_tbl values(9,'M','U','Primary' ,500,'Good' ,0,0,0,2050,'13087654321','431281198203182139','青E 1P4V4','化妆品及全国各地的个人信息,雇佣张某:?;.﹛﹏﹊︽︻〗▄ ▅ ✄@㊨→↔囍某等四人,冒充北京藏品有限公司及电视购物买卖宝工作人员,用假名字和自编工号向' ,null,null); +insert into TESTTABLE_tbl values(10,'F','U','Primary' ,500,'Good' ,0,0,0,2100,'13023456789','440601198011301931','新Q 05245','全国各地人群打电话推销假纪念币及化妆品,涉案10万余元。', null,null); +insert into TESTTABLE_tbl values(11,'M','M','Secondary' ,500,'Good' ,0,0,0,3000,'13100000000','63250019881115391X','军T CIMP5','  2015年5月30日,沧州市沧县公安局成功抓获一名涉嫌利用QQ聊天诈骗的犯罪 〓≡ ╝╚╔ ╗╬ ═ ╓ ╩ ┠ ┨┯ ┷┏ ┓┗ ┛┳嫌疑人刘某某。经查,', null,null); +insert into TESTTABLE_tbl values(12,'F','M','Secondary' ,500,'Good' ,0,0,0,3100,'13111111111','350782198903130244','北M D8OC9','自2015年1月份以来,刘某某化名“刘某”通过QQ聊天、打电话等方式以做生意缺钱为由先后五次诈骗杨某某1.22万元。', null,null); +insert into TESTTABLE_tbl values(13,'M','S','Secondary' ,500,'Good' ,0,0,0,1950,'13122222222','542125199104061520','南A 2ER97','  2015年5月12日,冀中公安局成功破获一起利用QQ聊天进行诈骗的案件,抓获', null,null); +insert into TESTTABLE_tbl values(14,'F','S','Secondary' ,500,'Good' ,0,0,0,2000,'13133333333','530326197803047984','广U 006T9','犯罪嫌疑人1名。经查,犯罪+-×÷∧∨∑∏∪∩∈√⊥∥∠⌒⊙∫∮嫌疑人殷某某在5月1', null,null); +insert into TESTTABLE_tbl values(15,'M','D','Secondary' ,500,'Good' ,0,0,0,2050,'13144444444','341225199204262781','沈R 6TCH7','日至6日期间,在QQ上自称韩俊,以帮助受害人朱某破解命中情劫⑶⑷⑸、稳定姻缘、改变命运等一系列迷信说法,诈骗朱某4.6万元。', null,null); +insert into TESTTABLE_tbl values(16,'F','D','Secondary' ,500,'Good' ,0,0,0,2100,'13155555555','450223198706295636','成Y LRFL5','  2015年8月8日,石家庄市公安局抓获犯罪嫌疑人罗某、罗某某、黎某,成功破获“6·16”QQ诈', null,null); +insert into TESTTABLE_tbl values(17,'M','W','Secondary' ,500,'Good' ,0,0,0,3000,'13166666666','141128198604215986','兰M Y6WZ2','骗案。经查,罗某、罗某某于6月16日,ⅰⅱ⒈⒉⒊⒋ ⒌⑴⑵⑹⑺⑿在QQ上冒充长安区某公司老板通过QQ指令公司会计转账,诈骗该公司124万元。', null,null); +insert into TESTTABLE_tbl values(18,'F','W','Secondary' ,500,'Good' ,0,0,0,3100,'13177777777','610103198203159498','济C WDPF9','1、对于犯罪分子决定刑罚的时候,应当根据犯罪的事实、犯罪的性质、情节和对于社会的', null,null); +insert into TESTTABLE_tbl values(19,'M','U','Secondary' ,500,'Good' ,0,0,0,1950,'13188888888','430523198403027119','空U EWWG0','危害程度,依照《刑法》的有关规定判处;', null,null); +insert into TESTTABLE_tbl values(20,'F','U','Secondary' ,500,'Good' ,0,0,0,2000,'13199999999','420525198009025685','海O 03SM4','  2、法律依据:1)《刑法》  第二百六十六条 【诈骗罪】诈骗公私财物,数额较大的', null,null); +insert into TESTTABLE_tbl values(21,'M','M','College' ,500,'Good' ,0,0,0,2050,'13112345678','230714198011139338','京G HVT11',',http://www.hanzify.org/software/12299.html)处三年以下', null,null); +insert into TESTTABLE_tbl values(22,'F','M','College' ,500,'Good' ,0,0,0,2100,'13187654321','430300197612109014','津C 83BH0','有期徒刑、拘役或者管制,并处或者单处罚金;数额巨大或者有其他严重情节的,处三年以上十年以〗★●', null,null); +insert into TESTTABLE_tbl values(23,'M','S','College' ,500,'Good' ,0,0,0,3000,'13123456789','210781198002130012','沪B5HBK8' ,'△♢♦♂下有期徒刑,并处罚金;数额特别巨大或者有其他特别严重情节的,处十年以上有期徒刑或者无期徒刑⒁⒂⒃⒄⒅⒆⒇,并处罚金或者没收财产。本法另有规定的,依照规定。', null,null); +insert into TESTTABLE_tbl values(24,'F','S','College' ,500,'Good' ,0,0,0,3100,'13200000000','32132219801114113X','渝AZ2FT2' ,'  2)《关于办理诈骗刑事案件具体应用法律若干问题的解释》 第一条 诈骗公私财物价值三千元ⅲⅳⅴⅵⅶ', null,null); +insert into TESTTABLE_tbl values(25,'M','D','College' ,500,'Good' ,0,0,0,1950,'13211111111','370502199010193059','冀ADEYZ7' ,'ⅷⅸⅹ①②③⑨⑩至一万元以上、三万元至十万元以上、五十万元以上的,应当分别认定为刑法第二百', null,null); +insert into TESTTABLE_tbl values(26,'F','D','College' ,500,'Good' ,0,0,0,2000,'13222222222','210682198302018541','豫LAA0C2' ,'六十六条规定的“数额较大↗↘↙〓”⒍⒎⒏⒐⒚⒛、“数额巨大”、“数额特别巨大”。', null,null); +insert into TESTTABLE_tbl values(27,'M','W','College' ,500,'Good' ,0,0,0,2050,'13233333333','532925198410018974','鲁A95P23' ,'  各省、自治区、直辖市高级人民法院、人民检察院可以结合⒑⒒⒓⒔⒕⒖⒗⒘⒙本地区经济社会⑻⑼⑽⑾发展状况,在前款规', null,null); +insert into TESTTABLE_tbl values(28,'F','W','College' ,500,'Good' ,0,0,0,2100,'13244444444','42030119750519262X','晋J4Y158' ,'定的数额幅度内,共同研究确定本地区执行的具体数额标准,报最高人民法院、最高人民检察院备案', null,null); +insert into TESTTABLE_tbl values(29,'M','U','College' ,500,'Good' ,0,0,0,3000,'13255555555','210102199412021827','蒙KMSYB9' ,'←↘↙♀♂┇┅ ﹉﹊﹍﹎╭ ╮╰ ╯ *^_^* ^*^ ^-^ ^执行的具体数额标准,', null,null); +insert into TESTTABLE_tbl values(30,'F','U','College' ,500,'Good' ,0,0,0,3100,'13266666666','120105198208259208','辽V53UR9' ,'_^ ^︵^ ∵∴‖︱ ︳︴高级人民法院、﹏﹋﹌︵︶︹︺ 【', null,null); +update TESTTABLE_tbl set dp_text_tv=to_tsvector('ngram',coalesce(dp_text,'')); ---- compressed row relation -alter table DTS2016111106370_tbl set compress ; -select count(*) from DTS2016111106370_tbl; +alter table TESTTABLE_tbl set compress ; +select count(*) from TESTTABLE_tbl; count ------- 30 (1 row) ---- create compressed pages and compressed tuples -vacuum full DTS2016111106370_tbl; +vacuum full TESTTABLE_tbl; ---- copy to 1B/4B varlen values -copy DTS2016111106370_tbl (dp_text_ts) to '@abs_srcdir@/data/datanode1/DTS2016111106370_tbl.txt' with (encoding 'utf8'); -drop table DTS2016111106370_tbl; +copy TESTTABLE_tbl (dp_text_ts) to '@abs_srcdir@/data/datanode1/TESTTABLE_tbl.txt' with (encoding 'utf8'); +drop table TESTTABLE_tbl; ---- ---- -CREATE TABLE DTS2016112411747_tbl( c int, d date) distribute by hash(c); -COPY DTS2016112411747_tbl FROM STDIN with(delimiter ',',timestamp_format 'yyyymondd'); -SELECT * FROM DTS2016112411747_tbl; +CREATE TABLE TESTTABLE_tbl( c int, d date) distribute by hash(c); +COPY TESTTABLE_tbl FROM STDIN with(delimiter ',',timestamp_format 'yyyymondd'); +SELECT * FROM TESTTABLE_tbl; c | d ---+-------------------------- 1 | Sat Jan 01 00:00:00 2000 (1 row) -DROP TABLE DTS2016112411747_tbl; +DROP TABLE TESTTABLE_tbl; diff --git a/src/test/regress/output/copy_support_transform.source b/src/test/regress/output/copy_support_transform.source index df16137d4..aebea5376 100644 --- a/src/test/regress/output/copy_support_transform.source +++ b/src/test/regress/output/copy_support_transform.source @@ -135,7 +135,6 @@ copy copy_transform_explicit_cast from '@abs_srcdir@/data/copy_transform_explici ERROR: cannot convert timestamp without time zone to integer DETAIL: column "c_bigint" is of type integer, but expression is of type timestamp without time zone drop table copy_transform_explicit_cast; -----DTS2021042807RJQKP1F00 CREATE TABLE float_type_t2 ( FT_COL INTEGER, diff --git a/src/test/regress/output/cstore_alter_table10.source b/src/test/regress/output/cstore_alter_table10.source index 0c6e8372c..bd8cd1b50 100644 --- a/src/test/regress/output/cstore_alter_table10.source +++ b/src/test/regress/output/cstore_alter_table10.source @@ -5,7 +5,7 @@ set time zone 'PRC'; -- -- -CREATE TABLE DTS2016112903778_tbl +CREATE TABLE TESTTABLE_tbl ( D_ID int, D_W_ID int, @@ -14,18 +14,18 @@ D_STREET_1 varchar(20) ) with(orientation = column) ; -COPY DTS2016112903778_tbl FROM STDIN; -alter table DTS2016112903778_tbl add column d6 decimal(64,10) default null; -SELECT DISTINCT d6 FROM DTS2016112903778_tbl; +COPY TESTTABLE_tbl FROM STDIN; +alter table TESTTABLE_tbl add column d6 decimal(64,10) default null; +SELECT DISTINCT d6 FROM TESTTABLE_tbl; d6 ---- (1 row) -DROP TABLE DTS2016112903778_tbl; +DROP TABLE TESTTABLE_tbl; -- -- -create table DTS2016120906561_tbl +create table TESTTABLE_tbl ( D_ID int, D_W_ID int, @@ -35,7 +35,7 @@ D_STREET_1 varchar(20) with(orientation = column) ; -- rows number is (56, 64), and then touch this bug -copy DTS2016120906561_tbl from STDIN; -delete from DTS2016120906561_tbl; -alter table DTS2016120906561_tbl add column d11 decimal(32,10) not null; -drop table DTS2016120906561_tbl; +copy TESTTABLE_tbl from STDIN; +delete from TESTTABLE_tbl; +alter table TESTTABLE_tbl add column d11 decimal(32,10) not null; +drop table TESTTABLE_tbl; diff --git a/src/test/regress/output/db4ai_explain_model.source b/src/test/regress/output/db4ai_explain_model.source new file mode 100644 index 000000000..8c2d51f96 --- /dev/null +++ b/src/test/regress/output/db4ai_explain_model.source @@ -0,0 +1,77 @@ +-- create table +CREATE TABLE kmeans_2d( +id SERIAL, +position DOUBLE PRECISION[] +); +NOTICE: CREATE TABLE will create implicit sequence "kmeans_2d_id_seq" for serial column "kmeans_2d.id" +-- insert data +INSERT INTO kmeans_2d( position) +SELECT +ARRAY[ +x + random() * 15.0, +y + random() * 15.0 +]::DOUBLE PRECISION[] AS position +FROM ( +SELECT +random() * 100.0 AS x, +random() * 100.0 AS y +FROM generate_series(1,10) +) AS centroids, generate_series(1,2) i; +-- clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + pg_delete_audit +----------------- + +(1 row) + +-- enable creating/dropping model audit +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=4206599" > /dev/null 2>&1 +\! sleep 1s +-- create model +CREATE MODEL test_explain_model_function USING kmeans +FEATURES position +FROM kmeans_2d +WITH max_iterations=default; +select substring(gs_explain_model('test_explain_model_function'),0, 196); + substring +-------------------------------------------------------------- + Name: test_explain_model_function + + Algorithm: kmeans + + Query: CREATE MODEL test_explain_model_function USING kmeans+ + FEATURES position + + FROM kmeans_2d + + WITH max_iterations=default; + + Return type: Int32 + + +(1 row) + +-- cleanup models in random order +DROP MODEL test_explain_model_function; +-- query audit logs +select type, result, object_name, detail_info from pg_query_audit('1012-11-10', '3012-11-11') where type in ('ddl_model'); + type | result | object_name | detail_info +-----------+--------+-----------------------------+------------------------------------------------------- + ddl_model | ok | test_explain_model_function | CREATE MODEL test_explain_model_function USING kmeans+ + | | | FEATURES position + + | | | FROM kmeans_2d + + | | | WITH max_iterations=default; + ddl_model | ok | test_explain_model_function | DROP MODEL test_explain_model_function; +(2 rows) + +-- clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + pg_delete_audit +----------------- + +(1 row) + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1 +-- cleanup tables +DROP TABLE IF EXISTS kmeans_2d; +-- output finish +SELECT 'DB4AI EXPLAIN MODEL FUNCTION TEST COMPLETED'; + ?column? +--------------------------------------------- + DB4AI EXPLAIN MODEL FUNCTION TEST COMPLETED +(1 row) + diff --git a/src/test/regress/output/db4ai_gd_houses.source b/src/test/regress/output/db4ai_gd_houses.source index e3f0fc94a..4db718641 100644 --- a/src/test/regress/output/db4ai_gd_houses.source +++ b/src/test/regress/output/db4ai_gd_houses.source @@ -12,16 +12,6 @@ UPDATE db4ai_houses CREATE MODEL houses_logregr_ngd USING logistic_regression FEATURES tax, bath, size TARGET price < 100000 FROM db4ai_houses WITH seed = 1, optimizer='ngd', learning_rate=10.0; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value 10.000000 -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value ngd -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value 1 -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 1035 -- compute accuracy SELECT COUNT(*)/(SELECT COUNT(*) FROM db4ai_houses) AS accuracy FROM ( @@ -39,16 +29,6 @@ FROM ( CREATE MODEL houses_logregr_gd USING logistic_regression FEATURES tax_n, bath_n, size_n TARGET price < 100000 FROM db4ai_houses WITH seed = 2, learning_rate=1.0; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value 1.000000 -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value 2 -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 1035 -- just predict again SELECT PREDICT BY houses_logregr_gd (FEATURES tax_n, bath_n, size_n) AS prediction, price < 100000 AS target @@ -69,17 +49,6 @@ CREATE MODEL houses_svm_ngd USING svm_classification FEATURES tax, bath, size TARGET price < 100000 FROM db4ai_houses WITH seed = 10, batch_size=5, optimizer='ngd', learning_rate=10.0, lambda=50; -NOTICE: Hyperparameter batch_size takes value 5 -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter lambda takes value 50.000000 -NOTICE: Hyperparameter learning_rate takes value 10.000000 -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value ngd -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value 10 -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 60786 -- compute accuracy SELECT COUNT(*)/(SELECT COUNT(*) FROM db4ai_houses) AS accuracy FROM ( @@ -90,7 +59,7 @@ FROM ( ); accuracy ------------------ - .866666666666667 + .933333333333333 (1 row) -- svm binary classification with normalized data @@ -98,88 +67,61 @@ CREATE MODEL houses_svm_gd USING svm_classification FEATURES tax_n, bath_n, size_n TARGET price < 100000 FROM db4ai_houses WITH seed = 10, batch_size=3, learning_rate=1.0, lambda=50; -NOTICE: Hyperparameter batch_size takes value 3 -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter lambda takes value 50.000000 -NOTICE: Hyperparameter learning_rate takes value 1.000000 -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value 10 -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 85597 -- linear regression with data not normalized CREATE MODEL houses_linregr_ngd USING linear_regression FEATURES tax, bath, size TARGET price FROM db4ai_houses WITH seed = 1, optimizer='ngd', learning_rate=2.0; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value 2.000000 -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value ngd -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value 1 -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 1035 --- linear regression with normalized data, pure stochastic gd +-- linear regression with normalized data, pure stochastic gd with arrays CREATE MODEL houses_linregr_gd USING linear_regression - FEATURES tax_n, bath_n, size_n TARGET price - FROM db4ai_houses WITH seed = 1, batch_size=1, learning_rate=1.0; -NOTICE: Hyperparameter batch_size takes value 1 -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value 1.000000 -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value 1 -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 85597 + FEATURES feat_n TARGET price + FROM (SELECT price, ARRAY[tax_n, bath_n, size_n] AS feat_n FROM db4ai_houses) + WITH seed = 1, batch_size=1, learning_rate=1.0; -- just dump some residual SELECT id, abs(target-prediction) as residual FROM ( - SELECT id, price AS target, PREDICT BY houses_linregr_gd (FEATURES tax_n, bath_n, size_n) AS prediction + SELECT id, price AS target, PREDICT BY houses_linregr_gd (FEATURES ARRAY[tax_n, bath_n, size_n]) AS prediction FROM db4ai_houses ) ORDER BY residual DESC LIMIT 3; id | residual ----+---------- - 8 | 66412 - 7 | 59139 - 13 | 45514 + 8 | 65880 + 7 | 58779 + 13 | 44699 (3 rows) -- take a look at the model warehouse (skipping time-dependent columns) -SELECT modelname, processedtuples, discardedtuples, iterations, outputtype, modeltype, query, modeldata, weight, - hyperparametersoids, coefnames, coefvalues, coefoids, trainingscoresname, trainingscoresvalue, modeldescribe - FROM gs_model_warehouse; - modelname | processedtuples | discardedtuples | iterations | outputtype | modeltype | query | modeldata | weight | hyperparametersoids | coefnames | coefvalues | coefoids | trainingscoresname | trainingscoresvalue | modeldescribe ---------------------+-----------------+-----------------+------------+------------+---------------------+------------------------------------------------------------------------------+-----------+--------------------------------------------+---------------------------------------+--------------+--------------+----------+-------------------------------------+-------------------------------------------+--------------- - houses_logregr_ngd | 15 | 0 | 44 | 16 | logistic_regression | CREATE MODEL houses_logregr_ngd USING logistic_regression +| | {-.000678481,-.430199,-.000379371,2.03712} | {23,701,701,23,23,1043,701,23,16} | {categories} | {false,true} | | {accuracy,f1,precision,recall,loss} | {.866667,.857143,.857143,.857143,.463442} | - | | | | | | FEATURES tax, bath, size TARGET price < 100000 +| | | | | | | | | - | | | | | | FROM db4ai_houses WITH seed = 1, optimizer='ngd', learning_rate=10.0; | | | | | | | | | - houses_logregr_gd | 15 | 0 | 45 | 16 | logistic_regression | CREATE MODEL houses_logregr_gd USING logistic_regression +| | {-.911978,-.952683,-.766043,.657569} | {23,701,701,23,23,1043,701,23,16} | {categories} | {false,true} | | {accuracy,f1,precision,recall,loss} | {.8,.769231,.833333,.714286,.525489} | - | | | | | | FEATURES tax_n, bath_n, size_n TARGET price < 100000 +| | | | | | | | | - | | | | | | FROM db4ai_houses WITH seed = 2, learning_rate=1.0; | | | | | | | | | - houses_svm_ngd | 15 | 0 | 63 | 16 | svm_classification | CREATE MODEL houses_svm_ngd USING svm_classification +| | {-.0194004,-5.64974,-.0185561,57.7798} | {23,701,701,701,23,23,1043,701,23,16} | {categories} | {false,true} | | {accuracy,f1,precision,recall,loss} | {.866667,.857143,.857143,.857143,3.89375} | - | | | | | | FEATURES tax, bath, size TARGET price < 100000 +| | | | | | | | | - | | | | | | FROM db4ai_houses WITH seed = 10, batch_size=5, optimizer='ngd', +| | | | | | | | | - | | | | | | learning_rate=10.0, lambda=50; | | | | | | | | | - houses_svm_gd | 15 | 0 | 100 | 16 | svm_classification | CREATE MODEL houses_svm_gd USING svm_classification +| | {-38.8473,9.12503,-55.4844,24.7961} | {23,701,701,701,23,23,1043,701,23,16} | {categories} | {false,true} | | {accuracy,f1,precision,recall,loss} | {.933333,.933333,.875,1,2.96786} | - | | | | | | FEATURES tax_n, bath_n, size_n TARGET price < 100000 +| | | | | | | | | - | | | | | | FROM db4ai_houses WITH seed = 10, batch_size=3, +| | | | | | | | | - | | | | | | learning_rate=1.0, lambda=50; | | | | | | | | | - houses_linregr_ngd | 15 | 0 | 100 | 23 | linear_regression | CREATE MODEL houses_linregr_ngd USING linear_regression +| | {25.0811,16472.8,24.4941,21515.9} | {23,701,701,23,23,1043,701,23,16} | | | | {mse} | {1.11119e+09} | - | | | | | | FEATURES tax, bath, size TARGET price +| | | | | | | | | - | | | | | | FROM db4ai_houses WITH seed = 1, optimizer='ngd', +| | | | | | | | | - | | | | | | learning_rate=2.0; | | | | | | | | | - houses_linregr_gd | 15 | 0 | 85 | 23 | linear_regression | CREATE MODEL houses_linregr_gd USING linear_regression +| | {103992,21220,102907,36957.9} | {23,701,701,23,23,1043,701,23,16} | | | | {mse} | {1.36347e+10} | - | | | | | | FEATURES tax_n, bath_n, size_n TARGET price +| | | | | | | | | - | | | | | | FROM db4ai_houses WITH seed = 1, batch_size=1, learning_rate=1.0; | | | | | | | | | +SELECT modelname, processedtuples, discardedtuples, iterations, outputtype, modeltype, query, weight, + hyperparametersnames, hyperparametersoids, hyperparametersvalues, + trainingscoresname, trainingscoresvalue, length(modeldata) as model_data_len + FROM gs_model_warehouse + ORDER BY modelname; + modelname | processedtuples | discardedtuples | iterations | outputtype | modeltype | query | weight | hyperparametersnames | hyperparametersoids | hyperparametersvalues | trainingscoresname | trainingscoresvalue | model_data_len +--------------------+-----------------+-----------------+------------+------------+---------------------+--------------------------------------------------------------------------------------+--------+------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------+--------------------------------------------------------+-------------------------------------+-------------------------------------------+---------------- + houses_linregr_gd | 15 | 0 | 100 | 23 | linear_regression | CREATE MODEL houses_linregr_gd USING linear_regression +| | {batch_size,decay,learning_rate,max_iterations,max_seconds,optimizer,tolerance,seed,verbose} | {23,701,701,23,23,1043,701,23,16} | {1,.95,1,100,0,gd,.0005,1,false} | {mse} | {1.36468e+10} | 340 + | | | | | | FEATURES feat_n TARGET price +| | | | | | | + | | | | | | FROM (SELECT price, ARRAY[tax_n, bath_n, size_n] AS feat_n FROM db4ai_houses)+| | | | | | | + | | | | | | WITH seed = 1, batch_size=1, learning_rate=1.0; | | | | | | | + houses_linregr_ngd | 15 | 0 | 100 | 23 | linear_regression | CREATE MODEL houses_linregr_ngd USING linear_regression +| | {batch_size,decay,learning_rate,max_iterations,max_seconds,optimizer,tolerance,seed,verbose} | {23,701,701,23,23,1043,701,23,16} | {1000,.95,2,100,0,ngd,.0005,1,false} | {mse} | {1.11119e+09} | 340 + | | | | | | FEATURES tax, bath, size TARGET price +| | | | | | | + | | | | | | FROM db4ai_houses WITH seed = 1, optimizer='ngd', +| | | | | | | + | | | | | | learning_rate=2.0; | | | | | | | + houses_logregr_gd | 15 | 0 | 45 | 16 | logistic_regression | CREATE MODEL houses_logregr_gd USING logistic_regression +| | {batch_size,decay,learning_rate,max_iterations,max_seconds,optimizer,tolerance,seed,verbose} | {23,701,701,23,23,1043,701,23,16} | {1000,.95,1,100,0,gd,.0005,2,false} | {accuracy,f1,precision,recall,loss} | {.8,.769231,.833333,.714286,.525489} | 392 + | | | | | | FEATURES tax_n, bath_n, size_n TARGET price < 100000 +| | | | | | | + | | | | | | FROM db4ai_houses WITH seed = 2, learning_rate=1.0; | | | | | | | + houses_logregr_ngd | 15 | 0 | 44 | 16 | logistic_regression | CREATE MODEL houses_logregr_ngd USING logistic_regression +| | {batch_size,decay,learning_rate,max_iterations,max_seconds,optimizer,tolerance,seed,verbose} | {23,701,701,23,23,1043,701,23,16} | {1000,.95,10,100,0,ngd,.0005,1,false} | {accuracy,f1,precision,recall,loss} | {.866667,.857143,.857143,.857143,.463442} | 392 + | | | | | | FEATURES tax, bath, size TARGET price < 100000 +| | | | | | | + | | | | | | FROM db4ai_houses WITH seed = 1, optimizer='ngd', learning_rate=10.0; | | | | | | | + houses_svm_gd | 15 | 0 | 100 | 16 | svm_classification | CREATE MODEL houses_svm_gd USING svm_classification +| | {batch_size,decay,learning_rate,max_iterations,max_seconds,optimizer,tolerance,seed,verbose,lambda,kernel,components,gamma,degree,coef0} | {23,701,701,23,23,1043,701,23,16,701,1043,23,701,23,701} | {3,.95,1,100,0,gd,.0005,10,false,50,linear,0,.5,2,1} | {accuracy,f1,precision,recall,loss} | {.933333,.933333,.875,1,3.32112} | 392 + | | | | | | FEATURES tax_n, bath_n, size_n TARGET price < 100000 +| | | | | | | + | | | | | | FROM db4ai_houses WITH seed = 10, batch_size=3, +| | | | | | | + | | | | | | learning_rate=1.0, lambda=50; | | | | | | | + houses_svm_ngd | 15 | 0 | 100 | 16 | svm_classification | CREATE MODEL houses_svm_ngd USING svm_classification +| | {batch_size,decay,learning_rate,max_iterations,max_seconds,optimizer,tolerance,seed,verbose,lambda,kernel,components,gamma,degree,coef0} | {23,701,701,23,23,1043,701,23,16,701,1043,23,701,23,701} | {5,.95,10,100,0,ngd,.0005,10,false,50,linear,0,.5,2,1} | {accuracy,f1,precision,recall,loss} | {.933333,.933333,.875,1,3.43783} | 392 + | | | | | | FEATURES tax, bath, size TARGET price < 100000 +| | | | | | | + | | | | | | FROM db4ai_houses WITH seed = 10, batch_size=5, optimizer='ngd', +| | | | | | | + | | | | | | learning_rate=10.0, lambda=50; | | | | | | | (6 rows) -- cleanup models in random order diff --git a/src/test/regress/output/db4ai_gd_pca_train_predict.source b/src/test/regress/output/db4ai_gd_pca_train_predict.source new file mode 100644 index 000000000..e044fb80c --- /dev/null +++ b/src/test/regress/output/db4ai_gd_pca_train_predict.source @@ -0,0 +1,158 @@ +-- Setting parameters +SET statement_timeout = 0; +SET xmloption = content; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET search_path = public; +SET default_tablespace = ''; +SET default_with_oids = false; +-- Creating data tables +CREATE TABLE multivariate_2_1000_1_test ( + id integer NOT NULL, + "position" double precision[] NOT NULL +) +WITH (orientation=row, compression=no); +CREATE TABLE multivariate_7_1000_1_test ( + id integer NOT NULL, + "position" float[] NOT NULL +) +WITH (orientation=row, compression=no); +-- Filling out data tables +COPY multivariate_2_1000_1_test (id, "position") FROM stdin; +; +-- +-- Data for Name: multivariate_7_1000_1_test; Type: TABLE DATA; Schema: public; Owner: - +-- +COPY multivariate_7_1000_1_test (id, "position") FROM stdin; +; +-- Running the tests +-- Testing PCA training +CREATE MODEL pca_fit_2_2_1000 USING pca FEATURES position FROM multivariate_2_1000_1_test WITH number_components = 2, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977; +CREATE MODEL pca_fit_7_7_1000 USING pca FEATURES position FROM multivariate_7_1000_1_test WITH number_components = 7, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977; +SELECT modelname, processedtuples, discardedtuples, iterations, modeltype, length(modeldata) as model_data_len, trainingscoresvalue, weight FROM gs_model_warehouse WHERE modelname = 'pca_fit_2_2_1000'; + modelname | processedtuples | discardedtuples | iterations | modeltype | model_data_len | trainingscoresvalue | weight +------------------+-----------------+-----------------+------------+-----------+----------------+---------------------+-------- + pca_fit_2_2_1000 | 1000 | 0 | 10 | pca | 404 | {.382552} | +(1 row) + +SELECT modelname, processedtuples, discardedtuples, iterations, modeltype, length(modeldata) as model_data_len, trainingscoresvalue, weight FROM gs_model_warehouse WHERE modelname = 'pca_fit_7_7_1000'; + modelname | processedtuples | discardedtuples | iterations | modeltype | model_data_len | trainingscoresvalue | weight +------------------+-----------------+-----------------+------------+-----------+----------------+---------------------+-------- + pca_fit_7_7_1000 | 1000 | 0 | 10 | pca | 1284 | {.0080319} | +(1 row) + +EXPLAIN CREATE MODEL pca_fit_2_2_1000_explain USING pca FEATURES position FROM multivariate_2_1000_1_test WITH number_components = 2, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977; + QUERY PLAN +------------------------------------------------------------------------------------------- + Train Model - pca (cost=0.00..0.00 rows=0 width=0) + -> Materialize (cost=0.00..28.57 rows=1238 width=32) + -> Seq Scan on multivariate_2_1000_1_test (cost=0.00..22.38 rows=1238 width=32) +(3 rows) + +SELECT modelname, processedtuples, discardedtuples, iterations, modeltype, length(modeldata) as model_data_len, trainingscoresvalue, weight FROM gs_model_warehouse WHERE modelname = 'pca_fit_2_2_1000_explain'; + modelname | processedtuples | discardedtuples | iterations | modeltype | model_data_len | trainingscoresvalue | weight +-----------+-----------------+-----------------+------------+-----------+----------------+---------------------+-------- +(0 rows) + +CREATE MODEL pca_fit_7_8_1000_more_components USING pca FEATURES position FROM multivariate_7_1000_1_test WITH number_components = 8, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977; +CREATE MODEL pca_fit_7_4_1000_less_components USING pca FEATURES position FROM multivariate_7_1000_1_test WITH number_components = 4, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977; +SELECT modelname, processedtuples, discardedtuples, iterations, modeltype, length(modeldata) as model_data_len, trainingscoresvalue, weight FROM gs_model_warehouse WHERE modelname = 'pca_fit_7_8_1000_more_components'; + modelname | processedtuples | discardedtuples | iterations | modeltype | model_data_len | trainingscoresvalue | weight +----------------------------------+-----------------+-----------------+------------+-----------+----------------+---------------------+-------- + pca_fit_7_8_1000_more_components | 1000 | 0 | 10 | pca | 1284 | {.0080319} | +(1 row) + +SELECT modelname, processedtuples, discardedtuples, iterations, modeltype, length(modeldata) as model_data_len, trainingscoresvalue, weight FROM gs_model_warehouse WHERE modelname = 'pca_fit_7_4_1000_less_components'; + modelname | processedtuples | discardedtuples | iterations | modeltype | model_data_len | trainingscoresvalue | weight +----------------------------------+-----------------+-----------------+------------+-----------+----------------+---------------------+-------- + pca_fit_7_4_1000_less_components | 1000 | 0 | 10 | pca | 948 | {.00828935} | +(1 row) + +-- Wrong input format +CREATE MODEL pca_fit_2_2_1000_wrong_optimizer USING pca FEATURES position FROM multivariate_2_1000_1_test WITH number_components = 2, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977, optimizer = 'gd'; +ERROR: Invalid hyperparameter optimizer +CREATE MODEL pca_fit_2_2_1000_wrong_optimizer USING pca FEATURES position FROM multivariate_2_1000_1_test WITH number_components = 2, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977, optimizer = 'ngd'; +ERROR: Invalid hyperparameter optimizer +CREATE MODEL pca_fit_2_2_1000_wrong_num_components USING pca FEATURES position FROM multivariate_2_1000_1_test WITH number_components = -2, tolerance = 0.0001, batch_size = 1000, max_iterations = 10, seed = 96176977; +ERROR: Hyperparameter number_components must be in the range [1,2147483647] +-- Prediction +SELECT id, PREDICT BY pca_fit_2_2_1000(FEATURES position) as projection_2d FROM multivariate_2_1000_1_test WHERE id <= 10; + id | projection_2d +----+--------------------------------------- + 8 | {-1.15986975744937,-.348766278080407} + 2 | {3.35878001447117,-.425176037961929} + 5 | {1.55032570006237,-1.00334268076016} + 1 | {2.12593545225663,1.49172053673385} + 4 | {-1.16092554286634,.663026300191944} + 3 | {-3.49555162634873,-1.85104699528611} + 10 | {1.55371491574894,.058343268701519} + 6 | {-1.42699402009033,-.231581533993596} + 9 | {-.198532034911188,-1.00617452721559} + 7 | {2.16850938142969,.0412310289866401} +(10 rows) + +SELECT id, PREDICT BY pca_fit_7_7_1000(FEATURES position) as projection_7d FROM multivariate_7_1000_1_test WHERE id <= 10; + id | projection_7d +----+---------------------------------------------------------------------------------------------------------------------------------- + 1 | {.678714994382065,.976432283385473,.0266050404237083,.0866369780025167,-.759932630535208,-.464128782050144,1.09853425713183} + 2 | {.0760125437005675,.490027694537528,1.26651369875808,.135297175928951,-.56960244483657,.82425549663981,-.18787121259391} + 3 | {.742392865048418,.231803951757561,-.185105563745649,-.390443998155235,.767333271159934,.391164113010215,-.661034215244053} + 4 | {.784869330775296,.225892747592101,1.93505364376692,1.43588784529663,.906132133208712,-.365825897640064,1.30457165822626} + 5 | {-1.46244046462834,.0480416790101441,-.970249679579649,1.12422894394046,-1.82872692581671,.246067014195788,.261596848049019} + 6 | {-.878135462594663,-.335694232789492,-1.39444134364323,-.992156242174232,.0796398554027303,-2.5604949481646,-.851795895966088} + 7 | {-.603614919834191,-.49653429677693,-.561079352045715,.566968287015253,1.17228061530521,-.075097558455854,.226797169704934} + 8 | {-1.07660437757058,.563607554596825,.806692544447932,-1.62441014357104,.329949498873683,-.376632582157374,.202218970179358} + 9 | {-.0232818353312938,-1.01237573968119,-1.03692265037781,-.607688676612508,-1.00157739929268,-1.95832289799181,-1.07448696975176} + 10 | {-1.69609469059258,.400439995333213,-.122637001186274,-1.51137175629667,-.650670099719702,-.0835378769839427,-.556543011509711} +(10 rows) + +SELECT id, PREDICT BY pca_fit_7_8_1000_more_components(FEATURES position) as projection_7d FROM multivariate_7_1000_1_test WHERE id <= 10; + id | projection_7d +----+---------------------------------------------------------------------------------------------------------------------------------- + 1 | {.678714994382065,.976432283385473,.0266050404237083,.0866369780025167,-.759932630535208,-.464128782050144,1.09853425713183} + 2 | {.0760125437005675,.490027694537528,1.26651369875808,.135297175928951,-.56960244483657,.82425549663981,-.18787121259391} + 3 | {.742392865048418,.231803951757561,-.185105563745649,-.390443998155235,.767333271159934,.391164113010215,-.661034215244053} + 4 | {.784869330775296,.225892747592101,1.93505364376692,1.43588784529663,.906132133208712,-.365825897640064,1.30457165822626} + 5 | {-1.46244046462834,.0480416790101441,-.970249679579649,1.12422894394046,-1.82872692581671,.246067014195788,.261596848049019} + 6 | {-.878135462594663,-.335694232789492,-1.39444134364323,-.992156242174232,.0796398554027303,-2.5604949481646,-.851795895966088} + 7 | {-.603614919834191,-.49653429677693,-.561079352045715,.566968287015253,1.17228061530521,-.075097558455854,.226797169704934} + 8 | {-1.07660437757058,.563607554596825,.806692544447932,-1.62441014357104,.329949498873683,-.376632582157374,.202218970179358} + 9 | {-.0232818353312938,-1.01237573968119,-1.03692265037781,-.607688676612508,-1.00157739929268,-1.95832289799181,-1.07448696975176} + 10 | {-1.69609469059258,.400439995333213,-.122637001186274,-1.51137175629667,-.650670099719702,-.0835378769839427,-.556543011509711} +(10 rows) + +SELECT id, PREDICT BY pca_fit_7_4_1000_less_components(FEATURES position) as projection_4d FROM multivariate_7_1000_1_test WHERE id <= 10; + id | projection_4d +----+----------------------------------------------------------------------------- + 1 | {-.678714994382065,-.976432283385472,-.0266050404237085,-.0866369780025167} + 2 | {-.0760125437005672,-.490027694537527,-1.26651369875808,-.135297175928951} + 3 | {-.742392865048418,-.231803951757562,.185105563745649,.390443998155235} + 4 | {-.784869330775296,-.225892747592101,-1.93505364376692,-1.43588784529663} + 5 | {1.46244046462834,-.0480416790101434,.970249679579648,-1.12422894394046} + 6 | {.878135462594663,.335694232789491,1.39444134364323,.992156242174233} + 7 | {.603614919834191,.49653429677693,.561079352045715,-.566968287015253} + 8 | {1.07660437757058,-.563607554596825,-.806692544447932,1.62441014357104} + 9 | {.0232818353312931,1.01237573968119,1.03692265037781,.607688676612509} + 10 | {1.69609469059258,-.400439995333213,.122637001186273,1.51137175629667} +(10 rows) + +-- Undoable projection +SELECT id, PREDICT BY pca_fit_7_7_1000(FEATURES position) as projection_7d FROM multivariate_2_1000_1_test WHERE id <= 10; +ERROR: Input array must be 1-dimensional of 7 elements, must not contain nulls, and must be of type float8 or float. +CONTEXT: referenced column: projection_7d +SELECT id, PREDICT BY pca_fit_2_2_1000(FEATURES position) as projection_2d FROM multivariate_7_1000_1_test WHERE id <= 10; +ERROR: Input array must be 1-dimensional of 2 elements, must not contain nulls, and must be of type float8 or float. +CONTEXT: referenced column: projection_2d +-- Cleanup +DROP MODEL pca_fit_2_2_1000; +DROP MODEL pca_fit_7_7_1000; +DROP MODEL pca_fit_7_8_1000_more_components; +DROP MODEL pca_fit_7_4_1000_less_components; +DROP TABLE IF EXISTS multivariate_2_1000_1_test CASCADE; +DROP TABLE IF EXISTS multivariate_7_1000_1_test CASCADE; +SELECT 'DB4AI PCA TEST COMPLETED'; + ?column? +-------------------------- + DB4AI PCA TEST COMPLETED +(1 row) + diff --git a/src/test/regress/output/db4ai_gd_snapshots.source b/src/test/regress/output/db4ai_gd_snapshots.source index e72ee17ec..3b080b8b0 100644 --- a/src/test/regress/output/db4ai_gd_snapshots.source +++ b/src/test/regress/output/db4ai_gd_snapshots.source @@ -98,28 +98,18 @@ CREATE MODEL abalone USING linear_regression TARGET rings FROM abalone@train WITH seed = 1; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value 1 -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 379 -SELECT modelname, processedtuples, discardedtuples, iterations, outputtype, modeltype, query, modeldata, weight, - hyperparametersnames, hyperparametersvalues, hyperparametersoids, coefnames, coefvalues, coefoids, - trainingscoresname, trainingscoresvalue, modeldescribe +SELECT modelname, processedtuples, discardedtuples, iterations, outputtype, modeltype, query, weight, + hyperparametersnames, hyperparametersvalues, hyperparametersoids, + trainingscoresname, trainingscoresvalue, length(modeldata) as model_data_len FROM gs_model_warehouse; - modelname | processedtuples | discardedtuples | iterations | outputtype | modeltype | query | modeldata | weight | hyperparametersnames | hyperparametersvalues | hyperparametersoids | coefnames | coefvalues | coefoids | trainingscoresname | trainingscoresvalue | modeldescribe ------------+-----------------+-----------------+------------+------------+-------------------+----------------------------------------------------------------+-----------+-------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------+--------------------------------------+-----------------------------------+-----------+------------+----------+--------------------+---------------------+--------------- - abalone | 50 | 0 | 59 | 23 | linear_regression | CREATE MODEL abalone USING linear_regression +| | {1.90244,.435078,2.12253,2.23828,1.8411,.734881,1.63897,-.233642,.218489,1.07668,4.46004} | {batch_size,decay,learning_rate,max_iterations,max_seconds,optimizer,tolerance,seed,verbose} | {1000,.95,.8,100,0,gd,.0005,1,false} | {23,701,701,23,23,1043,701,23,16} | | | | {mse} | {5.90028} | - | | | | | | FEATURES sex_f, sex_i, sex_m, length, diameter, height,+| | | | | | | | | | | - | | | | | | whole, shucked, viscera, shell +| | | | | | | | | | | - | | | | | | TARGET rings +| | | | | | | | | | | - | | | | | | FROM abalone@train +| | | | | | | | | | | - | | | | | | WITH seed = 1; | | | | | | | | | | | + modelname | processedtuples | discardedtuples | iterations | outputtype | modeltype | query | weight | hyperparametersnames | hyperparametersvalues | hyperparametersoids | trainingscoresname | trainingscoresvalue | model_data_len +-----------+-----------------+-----------------+------------+------------+-------------------+----------------------------------------------------------------+--------+----------------------------------------------------------------------------------------------+--------------------------------------+-----------------------------------+--------------------+---------------------+---------------- + abalone | 50 | 0 | 59 | 23 | linear_regression | CREATE MODEL abalone USING linear_regression +| | {batch_size,decay,learning_rate,max_iterations,max_seconds,optimizer,tolerance,seed,verbose} | {1000,.95,.8,100,0,gd,.0005,1,false} | {23,701,701,23,23,1043,701,23,16} | {mse} | {5.90028} | 452 + | | | | | | FEATURES sex_f, sex_i, sex_m, length, diameter, height,+| | | | | | | + | | | | | | whole, shucked, viscera, shell +| | | | | | | + | | | | | | TARGET rings +| | | | | | | + | | | | | | FROM abalone@train +| | | | | | | + | | | | | | WITH seed = 1; | | | | | | | (1 row) SELECT id, target, prediction, abs(prediction-target) as residual diff --git a/src/test/regress/output/db4ai_gd_train_predict.source b/src/test/regress/output/db4ai_gd_train_predict.source index 7a9cb1c24..65ef18577 100644 --- a/src/test/regress/output/db4ai_gd_train_predict.source +++ b/src/test/regress/output/db4ai_gd_train_predict.source @@ -13,128 +13,44 @@ ERROR: Supervised ML algorithms require FEATURES clause CREATE MODEL m using logistic_regression FEATURES size,lot FROM db4ai_houses; ERROR: Supervised ML algorithms require TARGET clause -- Errors with semantic validation of hyperparameters -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with batch_size = 0; -NOTICE: Hyperparameter batch_size takes value 0 +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with batch_size = 0, seed=1; ERROR: Hyperparameter batch_size must be in the range [1,2147483647] -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with decay = 0.0; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value 0.000000 +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with decay = 0.0, seed=1; ERROR: Hyperparameter decay must be in the range (0,1.7976931e+308] -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with learning_rate = 0.0; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value 0.000000 +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with learning_rate = 0.0, seed=1; ERROR: Hyperparameter learning_rate must be in the range (0,1.7976931e+308] -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_iterations = 0; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value 0 +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_iterations = 0, seed=1; ERROR: Hyperparameter max_iterations must be in the range [1,2147483647] -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_seconds = -1; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value -1 +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_seconds = -1, seed=1; ERROR: Hyperparameter max_seconds must be in the range [0,2147483647] -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with optimizer = nogd; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value nogd -ERROR: Invalid hyperparameter value for optimizer. Valid values are: gd, ngd. (default is gd) -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with tolerance = 0.0; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value 0.000000 +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with optimizer = nogd, seed=1; +ERROR: Invalid hyperparameter value for optimizer. Valid values are: gd, ngd. +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with tolerance = 0.0, seed=1; ERROR: Hyperparameter tolerance must be in the range (0,1.7976931e+308] -CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with verbose = ttrue; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value DEFAULT (0) +CREATE MODEL m USING logistic_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with verbose = ttrue, seed=1; ERROR: Hyperparameter verbose is not a valid string for boolean (i.e. 'true' or 'false') -CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with batch_size = 'a_wrong_parameter'; +CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with batch_size = 'a_wrong_parameter', seed=1; ERROR: Hyperparameter batch_size must be an integer -CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with decay = 'a_wrong_parameter'; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) +CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with decay = 'a_wrong_parameter', seed=1; ERROR: Hyperparameter decay must be a floating point number -CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with learning_rate = 'a_wrong_parameter'; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) +CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with learning_rate = 'a_wrong_parameter', seed=1; ERROR: Hyperparameter learning_rate must be a floating point number -CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_iterations = 'a_wrong_parameter'; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) +CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_iterations = 'a_wrong_parameter', seed=1; ERROR: Hyperparameter max_iterations must be an integer -CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_seconds = 'a_wrong_parameter'; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) +CREATE MODEL m USING linear_regression FEATURES size, lot TARGET price <100000 FROM db4ai_houses with max_seconds = 'a_wrong_parameter', seed=1; ERROR: Hyperparameter max_seconds must be an integer -CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with lambda = 0.0; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter lambda takes value 0.000000 +CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with lambda = 0.0, seed=1; ERROR: Hyperparameter lambda must be in the range (0,1.7976931e+308] -CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with lambda = 'a_wrong_parameter'; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) +CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with lambda = 'a_wrong_parameter', seed=1; ERROR: Hyperparameter lambda must be a floating point number -CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with optimizer = 'a_wrong_parameter'; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter lambda takes value DEFAULT (0.010000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value a_wrong_parameter -ERROR: Invalid hyperparameter value for optimizer. Valid values are: gd, ngd. (default is gd) -CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with tolerance = 'a_wrong_parameter'; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter lambda takes value DEFAULT (0.010000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) +CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with optimizer = 'a_wrong_parameter', seed=1; +ERROR: Invalid hyperparameter value for optimizer. Valid values are: gd, ngd. +CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with tolerance = 'a_wrong_parameter', seed=1; ERROR: Hyperparameter tolerance must be a floating point number -CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with verbose = 'a_wrong_parameter'; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter lambda takes value DEFAULT (0.010000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value DEFAULT (0) +CREATE MODEL m USING svm_classification FEATURES size, lot TARGET price <100000 FROM db4ai_houses with verbose = 'a_wrong_parameter', seed=1; ERROR: Hyperparameter verbose is not a valid string for boolean (i.e. 'true' or 'false') -- Normal model with logistic_regression CREATE MODEL m1 using logistic_regression FEATURES bedroom, bath TARGET price < 100000 FROM db4ai_houses WITH seed = 1; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value 1 -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 1375 SELECT id, price, PREDICT BY m1 (FEATURES bedroom, bath) from db4ai_houses ORDER BY id; id | price | m1_pred ----+--------+--------- @@ -178,17 +94,6 @@ SELECT id, pred FROM db4ai_houses; DROP MODEL m1; CREATE MODEL m2 using svm_classification FEATURES price / (SELECT max(price) from db4ai_houses) TARGET (size > 2000 AND bath > 2) as large_house FROM db4ai_houses with batch_size=1, lambda=10, tolerance=default, seed = 1, max_iterations=100; -NOTICE: Hyperparameter batch_size takes value 1 -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter lambda takes value 10.000000 -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value 100 -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value 1 -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 85597 SELECT id, size, bath, price, PREDICT BY m2(FEATURES price / (SELECT max(price) from db4ai_houses)) as large_house FROM db4ai_houses ORDER BY id; id | size | bath | price | large_house ----+------+------+--------+------------- @@ -211,16 +116,6 @@ SELECT id, size, bath, price, PREDICT BY m2(FEATURES price / (SELECT max(price) DROP MODEL m2; CREATE MODEL m3 using linear_regression FEATURES tax, bath TARGET price FROM db4ai_houses with optimizer=ngd, seed = 1; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value ngd -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value 1 -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 1375 SELECT id, tax, bath, price, PREDICT BY m3 (FEATURES tax, bath) FROM db4ai_houses; id | tax | bath | price | m3_pred ----+------+------+--------+--------- @@ -243,95 +138,54 @@ SELECT id, tax, bath, price, PREDICT BY m3 (FEATURES tax, bath) FROM db4ai_house DROP model m3; -- EXPLAIN test -EXPLAIN CREATE MODEL m using logistic_regression FEATURES bedroom, bath TARGET price < 100000 FROM db4ai_houses; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value DEFAULT (0) -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 1375 - QUERY PLAN ------------------------------------------------------------------------ - Gradient Descent (cost=0.00..0.00 rows=0 width=0) - -> Seq Scan on db4ai_houses (cost=0.00..26.21 rows=1297 width=16) -(2 rows) +show enable_material; + enable_material +----------------- + on +(1 row) -EXPLAIN VERBOSE CREATE MODEL m using logistic_regression FEATURES bedroom, bath as renamed_bath TARGET price < 100000 as target_price FROM db4ai_houses; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value DEFAULT (0) -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 1375 - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Gradient Descent (cost=0.00..0.00 rows=0 width=0) - Output: GD(algorithm), GD(optimizer), GD(result_type), GD(num_iterations), GD(exec_time_msecs), GD(processed), GD(discarded), GD(weights), GD(accuracy), GD(f1), GD(precision), GD(recall), GD(loss), GD(categories) - -> Seq Scan on public.db4ai_houses (cost=0.00..26.21 rows=1297 width=16) +EXPLAIN CREATE MODEL m using logistic_regression FEATURES bedroom, bath TARGET price < 100000 FROM db4ai_houses WITH seed=1; + QUERY PLAN +----------------------------------------------------------------------------- + Train Model - logistic_regression (cost=0.00..0.00 rows=0 width=0) + -> Materialize (cost=0.00..32.70 rows=1297 width=16) + -> Seq Scan on db4ai_houses (cost=0.00..26.21 rows=1297 width=16) +(3 rows) + +EXPLAIN VERBOSE CREATE MODEL m using logistic_regression FEATURES bedroom, bath as renamed_bath TARGET price < 100000 as target_price FROM db4ai_houses WITH seed=1; + QUERY PLAN +------------------------------------------------------------------------------------ + Train Model - logistic_regression (cost=0.00..0.00 rows=0 width=0) + -> Materialize (cost=0.00..32.70 rows=1297 width=16) Output: (price < 100000), bedroom, bath -(4 rows) + -> Seq Scan on public.db4ai_houses (cost=0.00..26.21 rows=1297 width=16) + Output: (price < 100000), bedroom, bath +(5 rows) -EXPLAIN VERBOSE CREATE MODEL m using logistic_regression FEATURES bedroom, bath::float as transformed_bath TARGET price < 100000 FROM db4ai_houses; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value DEFAULT (0) -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 1375 - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Gradient Descent (cost=0.00..0.00 rows=0 width=0) - Output: GD(algorithm), GD(optimizer), GD(result_type), GD(num_iterations), GD(exec_time_msecs), GD(processed), GD(discarded), GD(weights), GD(accuracy), GD(f1), GD(precision), GD(recall), GD(loss), GD(categories) - -> Seq Scan on public.db4ai_houses (cost=0.00..26.21 rows=1297 width=16) +EXPLAIN VERBOSE CREATE MODEL m using logistic_regression FEATURES bedroom, bath::float as transformed_bath TARGET price < 100000 FROM db4ai_houses WITH seed=1; + QUERY PLAN +------------------------------------------------------------------------------------ + Train Model - logistic_regression (cost=0.00..0.00 rows=0 width=0) + -> Materialize (cost=0.00..32.70 rows=1297 width=16) Output: (price < 100000), bedroom, bath -(4 rows) + -> Seq Scan on public.db4ai_houses (cost=0.00..26.21 rows=1297 width=16) + Output: (price < 100000), bedroom, bath +(5 rows) -EXPLAIN VERBOSE CREATE MODEL m using logistic_regression FEATURES bedroom, bath TARGET price < 100000 FROM (SELECT * FROM db4ai_houses); -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value DEFAULT (0) -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 1375 - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Gradient Descent (cost=0.00..0.00 rows=0 width=0) - Output: GD(algorithm), GD(optimizer), GD(result_type), GD(num_iterations), GD(exec_time_msecs), GD(processed), GD(discarded), GD(weights), GD(accuracy), GD(f1), GD(precision), GD(recall), GD(loss), GD(categories) - -> Seq Scan on public.db4ai_houses (cost=0.00..26.21 rows=1297 width=16) +EXPLAIN VERBOSE CREATE MODEL m using logistic_regression FEATURES bedroom, bath TARGET price < 100000 FROM (SELECT * FROM db4ai_houses) WITH seed=1; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Train Model - logistic_regression (cost=0.00..0.00 rows=0 width=0) + -> Materialize (cost=0.00..32.70 rows=1297 width=16) Output: (db4ai_houses.price < 100000), db4ai_houses.bedroom, db4ai_houses.bath -(4 rows) + -> Seq Scan on public.db4ai_houses (cost=0.00..26.21 rows=1297 width=16) + Output: (db4ai_houses.price < 100000), db4ai_houses.bedroom, db4ai_houses.bath +(5 rows) -EXPLAIN VERBOSE CREATE MODEL m using linear_regression FEATURES bedroom, bath TARGET price FROM (SELECT * FROM db4ai_houses ORDER BY id); -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value DEFAULT (0) -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 1375 +EXPLAIN VERBOSE CREATE MODEL m using linear_regression FEATURES bedroom, bath TARGET price FROM (SELECT * FROM db4ai_houses ORDER BY id) WITH seed=1; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - Gradient Descent (cost=0.00..0.00 rows=0 width=0) - Output: GD(algorithm), GD(optimizer), GD(result_type), GD(num_iterations), GD(exec_time_msecs), GD(processed), GD(discarded), GD(weights), GD(mse) + Train Model - linear_regression (cost=0.00..0.00 rows=0 width=0) -> Subquery Scan on __unnamed_subquery__ (cost=90.03..106.24 rows=1297 width=16) Output: __unnamed_subquery__.price, __unnamed_subquery__.bedroom, __unnamed_subquery__.bath -> Sort (cost=90.03..93.27 rows=1297 width=33) @@ -339,133 +193,235 @@ NOTICE: GD shuffle cache size 1375 Sort Key: db4ai_houses.id -> Seq Scan on public.db4ai_houses (cost=0.00..22.97 rows=1297 width=33) Output: db4ai_houses.id, db4ai_houses.tax, db4ai_houses.bedroom, db4ai_houses.bath, db4ai_houses.price, db4ai_houses.size, db4ai_houses.lot, db4ai_houses.pred -(9 rows) +(8 rows) -EXPLAIN VERBOSE CREATE MODEL m using linear_regression FEATURES bedroom, bath TARGET price FROM (SELECT * FROM db4ai_houses ORDER BY id LIMIT 5); -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value DEFAULT (0) -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 1375 - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Gradient Descent (cost=0.00..0.00 rows=0 width=0) - Output: GD(algorithm), GD(optimizer), GD(result_type), GD(num_iterations), GD(exec_time_msecs), GD(processed), GD(discarded), GD(weights), GD(mse) - -> Subquery Scan on __unnamed_subquery__ (cost=44.51..44.58 rows=5 width=16) +EXPLAIN VERBOSE CREATE MODEL m using linear_regression FEATURES bedroom, bath TARGET price FROM (SELECT * FROM db4ai_houses ORDER BY id LIMIT 5) WITH seed=1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Train Model - linear_regression (cost=0.00..0.00 rows=0 width=0) + -> Materialize (cost=44.51..44.60 rows=5 width=16) Output: __unnamed_subquery__.price, __unnamed_subquery__.bedroom, __unnamed_subquery__.bath - -> Limit (cost=44.51..44.53 rows=5 width=33) - Output: db4ai_houses.id, db4ai_houses.tax, db4ai_houses.bedroom, db4ai_houses.bath, db4ai_houses.price, db4ai_houses.size, db4ai_houses.lot, db4ai_houses.pred - -> Sort (cost=44.51..47.76 rows=1297 width=33) + -> Subquery Scan on __unnamed_subquery__ (cost=44.51..44.58 rows=5 width=16) + Output: __unnamed_subquery__.price, __unnamed_subquery__.bedroom, __unnamed_subquery__.bath + -> Limit (cost=44.51..44.53 rows=5 width=33) Output: db4ai_houses.id, db4ai_houses.tax, db4ai_houses.bedroom, db4ai_houses.bath, db4ai_houses.price, db4ai_houses.size, db4ai_houses.lot, db4ai_houses.pred - Sort Key: db4ai_houses.id - -> Seq Scan on public.db4ai_houses (cost=0.00..22.97 rows=1297 width=33) + -> Sort (cost=44.51..47.76 rows=1297 width=33) Output: db4ai_houses.id, db4ai_houses.tax, db4ai_houses.bedroom, db4ai_houses.bath, db4ai_houses.price, db4ai_houses.size, db4ai_houses.lot, db4ai_houses.pred -(11 rows) + Sort Key: db4ai_houses.id + -> Seq Scan on public.db4ai_houses (cost=0.00..22.97 rows=1297 width=33) + Output: db4ai_houses.id, db4ai_houses.tax, db4ai_houses.bedroom, db4ai_houses.bath, db4ai_houses.price, db4ai_houses.size, db4ai_houses.lot, db4ai_houses.pred +(12 rows) -EXPLAIN VERBOSE CREATE MODEL m using linear_regression FEATURES f1, f2 TARGET price FROM (SELECT bedroom as f1, bath as f2, price FROM db4ai_houses ORDER BY id LIMIT 5); -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value DEFAULT (0) -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 1375 - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------- - Gradient Descent (cost=0.00..0.00 rows=0 width=0) - Output: GD(algorithm), GD(optimizer), GD(result_type), GD(num_iterations), GD(exec_time_msecs), GD(processed), GD(discarded), GD(weights), GD(mse) - -> Subquery Scan on __unnamed_subquery__ (cost=44.51..44.58 rows=5 width=16) +EXPLAIN VERBOSE CREATE MODEL m using linear_regression FEATURES f1, f2 TARGET price FROM (SELECT bedroom as f1, bath as f2, price FROM db4ai_houses ORDER BY id LIMIT 5) WITH seed=1; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Train Model - linear_regression (cost=0.00..0.00 rows=0 width=0) + -> Materialize (cost=44.51..44.60 rows=5 width=16) Output: __unnamed_subquery__.price, __unnamed_subquery__.f1, __unnamed_subquery__.f2 - -> Limit (cost=44.51..44.53 rows=5 width=20) - Output: db4ai_houses.bedroom, db4ai_houses.bath, db4ai_houses.price, db4ai_houses.id - -> Sort (cost=44.51..47.76 rows=1297 width=20) + -> Subquery Scan on __unnamed_subquery__ (cost=44.51..44.58 rows=5 width=16) + Output: __unnamed_subquery__.price, __unnamed_subquery__.f1, __unnamed_subquery__.f2 + -> Limit (cost=44.51..44.53 rows=5 width=20) Output: db4ai_houses.bedroom, db4ai_houses.bath, db4ai_houses.price, db4ai_houses.id - Sort Key: db4ai_houses.id - -> Seq Scan on public.db4ai_houses (cost=0.00..22.97 rows=1297 width=20) + -> Sort (cost=44.51..47.76 rows=1297 width=20) Output: db4ai_houses.bedroom, db4ai_houses.bath, db4ai_houses.price, db4ai_houses.id -(11 rows) + Sort Key: db4ai_houses.id + -> Seq Scan on public.db4ai_houses (cost=0.00..22.97 rows=1297 width=20) + Output: db4ai_houses.bedroom, db4ai_houses.bath, db4ai_houses.price, db4ai_houses.id +(12 rows) -EXPLAIN VERBOSE CREATE model m using svm_classification FEATURES f1, f2 TARGET price > 100000 FROM (SELECT AVG(bath) as f1, SUM(bath) as f2, AVG(price) as price FROM db4ai_houses GROUP BY bedroom); -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter lambda takes value DEFAULT (0.010000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value DEFAULT (0) -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 1375 - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Gradient Descent (cost=0.00..0.00 rows=0 width=0) - Output: GD(algorithm), GD(optimizer), GD(result_type), GD(num_iterations), GD(exec_time_msecs), GD(processed), GD(discarded), GD(weights), GD(accuracy), GD(f1), GD(precision), GD(recall), GD(loss), GD(categories) - -> Subquery Scan on __unnamed_subquery__ (cost=35.94..41.44 rows=200 width=48) +EXPLAIN VERBOSE CREATE model m using svm_classification FEATURES f1, f2 TARGET price > 100000 FROM (SELECT AVG(bath) as f1, SUM(bath) as f2, AVG(price) as price FROM db4ai_houses GROUP BY bedroom) WITH seed=1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Train Model - svm_classification (cost=0.00..0.00 rows=0 width=0) + -> Materialize (cost=35.94..42.44 rows=200 width=48) Output: (__unnamed_subquery__.price > 100000::numeric), __unnamed_subquery__.f1, __unnamed_subquery__.f2 - -> HashAggregate (cost=35.94..38.94 rows=200 width=88) - Output: avg(db4ai_houses.bath), sum(db4ai_houses.bath), avg(db4ai_houses.price), db4ai_houses.bedroom - Group By Key: db4ai_houses.bedroom - -> Seq Scan on public.db4ai_houses (cost=0.00..22.97 rows=1297 width=16) - Output: db4ai_houses.bedroom, db4ai_houses.bath, db4ai_houses.price -(9 rows) - -EXPLAIN VERBOSE CREATE model m using svm_classification FEATURES f1, f2 TARGET price > 100000 FROM (SELECT AVG(bath) as f1, SUM(bath) as f2, AVG(price) as price FROM db4ai_houses GROUP BY bedroom HAVING bedroom < 15); -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter lambda takes value DEFAULT (0.010000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value DEFAULT (0) -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 1375 - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Gradient Descent (cost=0.00..0.00 rows=0 width=0) - Output: GD(algorithm), GD(optimizer), GD(result_type), GD(num_iterations), GD(exec_time_msecs), GD(processed), GD(discarded), GD(weights), GD(accuracy), GD(f1), GD(precision), GD(recall), GD(loss), GD(categories) - -> Subquery Scan on __unnamed_subquery__ (cost=30.53..32.38 rows=67 width=48) - Output: (__unnamed_subquery__.price > 100000::numeric), __unnamed_subquery__.f1, __unnamed_subquery__.f2 - -> HashAggregate (cost=30.53..31.54 rows=67 width=88) - Output: avg(db4ai_houses.bath), sum(db4ai_houses.bath), avg(db4ai_houses.price), db4ai_houses.bedroom - Group By Key: db4ai_houses.bedroom - -> Seq Scan on public.db4ai_houses (cost=0.00..26.21 rows=432 width=16) - Output: db4ai_houses.bedroom, db4ai_houses.bath, db4ai_houses.price - Filter: (db4ai_houses.bedroom < 15) + -> Subquery Scan on __unnamed_subquery__ (cost=35.94..41.44 rows=200 width=48) + Output: (__unnamed_subquery__.price > 100000::numeric), __unnamed_subquery__.f1, __unnamed_subquery__.f2 + -> HashAggregate (cost=35.94..38.94 rows=200 width=88) + Output: avg(db4ai_houses.bath), sum(db4ai_houses.bath), avg(db4ai_houses.price), db4ai_houses.bedroom + Group By Key: db4ai_houses.bedroom + -> Seq Scan on public.db4ai_houses (cost=0.00..22.97 rows=1297 width=16) + Output: db4ai_houses.bedroom, db4ai_houses.bath, db4ai_houses.price (10 rows) -EXPLAIN VERBOSE CREATE model m using svm_classification FEATURES bedroom + bath, tax*1.2 as normalized_tax TARGET price < 100000 FROM db4ai_houses; -NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) -NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) -NOTICE: Hyperparameter lambda takes value DEFAULT (0.010000) -NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) -NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) -NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) -NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) -NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) -NOTICE: Hyperparameter seed takes value DEFAULT (0) -NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) -NOTICE: GD shuffle cache size 1375 - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Gradient Descent (cost=0.00..0.00 rows=0 width=0) - Output: GD(algorithm), GD(optimizer), GD(result_type), GD(num_iterations), GD(exec_time_msecs), GD(processed), GD(discarded), GD(weights), GD(accuracy), GD(f1), GD(precision), GD(recall), GD(loss), GD(categories) - -> Seq Scan on public.db4ai_houses (cost=0.00..39.18 rows=1297 width=20) +EXPLAIN VERBOSE CREATE model m using svm_classification FEATURES f1, f2 TARGET price > 100000 FROM (SELECT AVG(bath) as f1, SUM(bath) as f2, AVG(price) as price FROM db4ai_houses GROUP BY bedroom HAVING bedroom < 15) WITH seed=1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Train Model - svm_classification (cost=0.00..0.00 rows=0 width=0) + -> Materialize (cost=30.53..32.71 rows=67 width=48) + Output: (__unnamed_subquery__.price > 100000::numeric), __unnamed_subquery__.f1, __unnamed_subquery__.f2 + -> Subquery Scan on __unnamed_subquery__ (cost=30.53..32.38 rows=67 width=48) + Output: (__unnamed_subquery__.price > 100000::numeric), __unnamed_subquery__.f1, __unnamed_subquery__.f2 + -> HashAggregate (cost=30.53..31.54 rows=67 width=88) + Output: avg(db4ai_houses.bath), sum(db4ai_houses.bath), avg(db4ai_houses.price), db4ai_houses.bedroom + Group By Key: db4ai_houses.bedroom + -> Seq Scan on public.db4ai_houses (cost=0.00..26.21 rows=432 width=16) + Output: db4ai_houses.bedroom, db4ai_houses.bath, db4ai_houses.price + Filter: (db4ai_houses.bedroom < 15) +(11 rows) + +EXPLAIN VERBOSE CREATE model m using svm_classification FEATURES bedroom + bath, tax*1.2 as normalized_tax TARGET price < 100000 FROM db4ai_houses WITH seed=1; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Train Model - svm_classification (cost=0.00..0.00 rows=0 width=0) + -> Materialize (cost=0.00..45.67 rows=1297 width=20) Output: (price < 100000), ((bedroom)::double precision + bath), ((tax)::numeric * 1.2) -(4 rows) + -> Seq Scan on public.db4ai_houses (cost=0.00..39.18 rows=1297 width=20) + Output: (price < 100000), ((bedroom)::double precision + bath), ((tax)::numeric * 1.2) +(5 rows) + +-- Expect no materialization +EXPLAIN CREATE MODEL m using logistic_regression FEATURES bedroom, bath TARGET price < 100000 as target_price FROM (SELECT * FROM db4ai_houses ORDER BY id) WITH seed=1; + QUERY PLAN +-------------------------------------------------------------------------------------- + Train Model - logistic_regression (cost=0.00..0.00 rows=0 width=0) + -> Subquery Scan on __unnamed_subquery__ (cost=90.03..109.49 rows=1297 width=16) + -> Sort (cost=90.03..93.27 rows=1297 width=33) + Sort Key: db4ai_houses.id + -> Seq Scan on db4ai_houses (cost=0.00..22.97 rows=1297 width=33) +(5 rows) + +set enable_material = off; +EXPLAIN CREATE MODEL m using logistic_regression FEATURES bedroom, bath TARGET price < 100000 as target_price FROM db4ai_houses WITH seed=1; + QUERY PLAN +----------------------------------------------------------------------- + Train Model - logistic_regression (cost=0.00..0.00 rows=0 width=0) + -> Seq Scan on db4ai_houses (cost=0.00..26.21 rows=1297 width=16) +(2 rows) + +set enable_material = on; +-- svm multiclass +CREATE TABLE db4ai_ecoli ( + id BIGSERIAL, + f1 REAL, + f2 REAL, + f3 REAL, + f4 REAL, + f5 REAL, + f6 REAL, + f7 REAL, + cat VARCHAR, + PRIMARY KEY (id) +); +NOTICE: CREATE TABLE will create implicit sequence "db4ai_ecoli_id_seq" for serial column "db4ai_ecoli.id" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "db4ai_ecoli_pkey" for table "db4ai_ecoli" +\copy db4ai_ecoli(f1,f2,f3,f4,f5,f6,f7,cat) FROM '@abs_srcdir@/data/ecoli.csv' DELIMITER ','; +CREATE MODEL ecoli_svmc USING multiclass + FEATURES f1, f2, f3, f4, f5, f6, f7 TARGET cat + FROM db4ai_ecoli WITH seed = 1, max_iterations=250, tolerance=1e-7, + learning_rate=2.0, lambda=50; +SELECT id, cat AS target, PREDICT BY ecoli_svmc (FEATURES f1, f2, f3, f4, f5, f6, f7) AS prediction + FROM db4ai_ecoli + WHERE MOD(id,10)=0 + ORDER BY id; + id | target | prediction +-----+--------+------------ + 10 | cp | cp + 20 | cp | cp + 30 | cp | cp + 40 | cp | cp + 50 | cp | cp + 60 | cp | cp + 70 | cp | cp + 80 | cp | cp + 90 | cp | cp + 100 | cp | cp + 110 | cp | cp + 120 | cp | cp + 130 | cp | cp + 140 | cp | cp + 150 | im | im + 160 | im | im + 170 | im | im + 180 | im | im + 190 | im | imU + 200 | im | im + 210 | im | im + 220 | im | im + 230 | imU | imU + 240 | imU | im + 250 | imU | imU + 260 | om | om + 270 | om | om + 280 | omL | pp + 290 | pp | pp + 300 | pp | pp + 310 | pp | pp + 320 | pp | pp + 330 | pp | pp +(33 rows) + +SELECT COUNT(*)/(SELECT COUNT(*) FROM db4ai_ecoli) AS accuracy +FROM (SELECT id, cat AS target, PREDICT BY ecoli_svmc (FEATURES f1, f2, f3, f4, f5, f6, f7) AS prediction + FROM db4ai_ecoli + WHERE prediction=target + ); + accuracy +------------------ + .860119047619048 +(1 row) + +CREATE MODEL ecoli_logregr USING multiclass + FEATURES f1, f2, f3, f4, f5, f6, f7 TARGET cat + FROM db4ai_ecoli WITH seed = 1, max_iterations=250, tolerance=1e-7, + learning_rate=30.0, classifier='logistic_regression'; +SELECT id, cat AS target, PREDICT BY ecoli_logregr (FEATURES f1, f2, f3, f4, f5, f6, f7) AS prediction + FROM db4ai_ecoli + WHERE MOD(id,10)=1 + ORDER BY id; + id | target | prediction +-----+--------+------------ + 1 | cp | cp + 11 | cp | cp + 21 | cp | cp + 31 | cp | cp + 41 | cp | cp + 51 | cp | cp + 61 | cp | cp + 71 | cp | cp + 81 | cp | cp + 91 | cp | cp + 101 | cp | cp + 111 | cp | cp + 121 | cp | cp + 131 | cp | cp + 141 | cp | cp + 151 | im | im + 161 | im | im + 171 | im | im + 181 | im | im + 191 | im | im + 201 | im | im + 211 | im | im + 221 | imS | pp + 231 | imU | imU + 241 | imU | im + 251 | imU | imU + 261 | om | pp + 271 | om | cp + 281 | omL | cp + 291 | pp | pp + 301 | pp | cp + 311 | pp | pp + 321 | pp | pp + 331 | pp | cp +(34 rows) + +SELECT modelname, modeltype, processedtuples, discardedtuples, outputtype, trainingscoresname, trainingscoresvalue + FROM gs_model_warehouse + WHERE modelname LIKE 'ecoli%' + ORDER BY modelname; + modelname | modeltype | processedtuples | discardedtuples | outputtype | trainingscoresname | trainingscoresvalue +---------------+------------+-----------------+-----------------+------------+--------------------+--------------------- + ecoli_logregr | multiclass | 336 | 0 | 1043 | {loss} | {1.45926} + ecoli_svmc | multiclass | 336 | 0 | 1043 | {loss} | {14.9452} +(2 rows) -- Cleanup +DROP MODEL ecoli_svmc; +DROP MODEL ecoli_logregr; DROP TABLE IF EXISTS db4ai_houses; +DROP TABLE IF EXISTS db4ai_ecoli; SELECT 'DB4AI TEST COMPLETED'; ?column? ---------------------- diff --git a/src/test/regress/output/db4ai_kmeans_train_predict.source b/src/test/regress/output/db4ai_kmeans_train_predict.source index a056ac8a9..4aa618da7 100644 --- a/src/test/regress/output/db4ai_kmeans_train_predict.source +++ b/src/test/regress/output/db4ai_kmeans_train_predict.source @@ -33,19 +33,12 @@ COPY multivariate_7_1000_10 (id, "position", closest_centroid, l1_distance, l2_d ; COPY multivariate_7_1000_10_real_centroids (id, "position", closest_centroid, l1_distance, l2_distance, l2_squared_distance, linf_distance) FROM stdin; ; +CREATE TABLE multivariate_7_1000_10_unnested AS SELECT id, position[1] AS x_1, position[2] AS x_2, position[3] AS x_3, position[4] AS x_4, position[5] AS x_5, position[6] AS x_6, position[7] AS x_7, closest_centroid, l1_distance, l2_distance, l2_squared_distance, linf_distance FROM multivariate_7_1000_10; -- Running the tests -- Random++ CREATE MODEL my_kmeans_pp_l1 USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L1', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value L1 -NOTICE: Hyperparameter seeding_function takes value Random++ -NOTICE: Hyperparameter verbose takes value 1 -NOTICE: Hyperparameter seed takes value 1255025990 NOTICE: *** Initial statistics gathered: +NOTICE: *** Dimension used for computations: 7 NOTICE: *** Number of valid points: 1000 NOTICE: *** Number of dead points: 5 NOTICE: *** random++ oversampling factor: 4.000000, expected number of candidates: 40 @@ -112,16 +105,8 @@ SELECT PREDICT BY my_kmeans_pp_l1(features position) AS centroid_id FROM (SELECT ERROR: k-means predict: array of coordinates cannot be null CONTEXT: referenced column: centroid_id CREATE MODEL my_kmeans_pp_l2 USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L2', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value L2 -NOTICE: Hyperparameter seeding_function takes value Random++ -NOTICE: Hyperparameter verbose takes value 1 -NOTICE: Hyperparameter seed takes value 1255025990 NOTICE: *** Initial statistics gathered: +NOTICE: *** Dimension used for computations: 7 NOTICE: *** Number of valid points: 1000 NOTICE: *** Number of dead points: 5 NOTICE: *** random++ oversampling factor: 4.000000, expected number of candidates: 40 @@ -179,16 +164,8 @@ SELECT id, PREDICT BY my_kmeans_pp_l2(features position) AS centroid_id FROM mul (5 rows) CREATE MODEL my_kmeans_pp_l2_sqr USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value L2_Squared -NOTICE: Hyperparameter seeding_function takes value Random++ -NOTICE: Hyperparameter verbose takes value 1 -NOTICE: Hyperparameter seed takes value 1255025990 NOTICE: *** Initial statistics gathered: +NOTICE: *** Dimension used for computations: 7 NOTICE: *** Number of valid points: 1000 NOTICE: *** Number of dead points: 5 NOTICE: *** random++ oversampling factor: 4.000000, expected number of candidates: 40 @@ -245,17 +222,186 @@ SELECT id, PREDICT BY my_kmeans_pp_l2_sqr(features position) AS centroid_id FROM 15 | -1 (5 rows) -CREATE MODEL my_kmeans_pp_linf USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'Linf', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value Linf -NOTICE: Hyperparameter seeding_function takes value Random++ -NOTICE: Hyperparameter verbose takes value 1 -NOTICE: Hyperparameter seed takes value 1255025990 +CREATE MODEL my_kmeans_pp_l2_sqr_no_num_features USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; NOTICE: *** Initial statistics gathered: +NOTICE: *** Dimension used for computations: 7 +NOTICE: *** Number of valid points: 1000 +NOTICE: *** Number of dead points: 5 +NOTICE: *** random++ oversampling factor: 4.000000, expected number of candidates: 40 +NOTICE: *** k-means++ begin: consolidating 87 candidates to 10 centroid(s) +NOTICE: *** Number of centroids constructed: 10 +NOTICE: *** Value of global objective function: 6862.937872 +NOTICE: *** Seed: 1255025990 +SELECT SUM(l2_squared_distance) FROM multivariate_7_1000_10 AS l2_squared_approx; + sum +------------------ + 6939.87196307609 +(1 row) + +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids); + count +------- + 11 +(1 row) + +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id > 0); + count +------- + 10 +(1 row) + +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id <= 0); + count +------- + 1 +(1 row) + +SELECT id, PREDICT BY my_kmeans_pp_l2_sqr_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id > 0; + id | centroid_id +----+------------- + 6 | 6 + 8 | 5 + 2 | 1 + 10 | 2 + 5 | 3 + 3 | 9 + 4 | 7 + 1 | 4 + 9 | 8 + 7 | 10 +(10 rows) + +SELECT id, PREDICT BY my_kmeans_pp_l2_sqr_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id <= 0; + id | centroid_id +----+------------- + 11 | -1 + 12 | -1 + 13 | -1 + 14 | -1 + 15 | -1 +(5 rows) + +CREATE MODEL my_kmeans_pp_l2_sqr_unnested USING kmeans FROM (SELECT ARRAY[x_1, x_2, x_3, x_4, x_5, x_6, x_7] AS position FROM multivariate_7_1000_10_unnested) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; +NOTICE: *** Initial statistics gathered: +NOTICE: *** Dimension used for computations: 7 +NOTICE: *** Number of valid points: 1001 +NOTICE: *** Number of dead points: 4 +NOTICE: *** random++ oversampling factor: 4.000000, expected number of candidates: 40 +NOTICE: *** k-means++ begin: consolidating 87 candidates to 10 centroid(s) +NOTICE: *** Number of centroids constructed: 10 +NOTICE: *** Value of global objective function: 6882.021765 +NOTICE: *** Seed: 1255025990 +SELECT SUM(l2_squared_distance) FROM multivariate_7_1000_10_unnested AS l2_squared_approx; + sum +------------------ + 6939.87196307609 +(1 row) + +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_unnested(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids); + count +------- + 11 +(1 row) + +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_unnested(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id > 0); + count +------- + 10 +(1 row) + +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_unnested(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id <= 0); + count +------- + 1 +(1 row) + +SELECT id, PREDICT BY my_kmeans_pp_l2_sqr_unnested(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id > 0; + id | centroid_id +----+------------- + 6 | 6 + 8 | 5 + 2 | 1 + 10 | 2 + 5 | 3 + 3 | 9 + 4 | 7 + 1 | 4 + 9 | 8 + 7 | 10 +(10 rows) + +SELECT id, PREDICT BY my_kmeans_pp_l2_sqr_unnested(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id <= 0; + id | centroid_id +----+------------- + 11 | -1 + 12 | -1 + 13 | -1 + 14 | -1 + 15 | -1 +(5 rows) + +CREATE MODEL my_kmeans_pp_l2_sqr_unnested_no_num_features USING kmeans FROM (SELECT ARRAY[x_1, x_2, x_3, x_4, x_5, x_6, x_7] AS position FROM multivariate_7_1000_10_unnested) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; +NOTICE: *** Initial statistics gathered: +NOTICE: *** Dimension used for computations: 7 +NOTICE: *** Number of valid points: 1001 +NOTICE: *** Number of dead points: 4 +NOTICE: *** random++ oversampling factor: 4.000000, expected number of candidates: 40 +NOTICE: *** k-means++ begin: consolidating 87 candidates to 10 centroid(s) +NOTICE: *** Number of centroids constructed: 10 +NOTICE: *** Value of global objective function: 6882.021765 +NOTICE: *** Seed: 1255025990 +SELECT SUM(l2_squared_distance) FROM multivariate_7_1000_10_unnested AS l2_squared_approx; + sum +------------------ + 6939.87196307609 +(1 row) + +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_unnested_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids); + count +------- + 11 +(1 row) + +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_unnested_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id > 0); + count +------- + 10 +(1 row) + +SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l2_sqr_unnested_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id <= 0); + count +------- + 1 +(1 row) + +SELECT id, PREDICT BY my_kmeans_pp_l2_sqr_unnested_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id > 0; + id | centroid_id +----+------------- + 6 | 6 + 8 | 5 + 2 | 1 + 10 | 2 + 5 | 3 + 3 | 9 + 4 | 7 + 1 | 4 + 9 | 8 + 7 | 10 +(10 rows) + +SELECT id, PREDICT BY my_kmeans_pp_l2_sqr_unnested_no_num_features(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids where centroid_id <= 0; + id | centroid_id +----+------------- + 11 | -1 + 12 | -1 + 13 | -1 + 14 | -1 + 15 | -1 +(5 rows) + +CREATE MODEL my_kmeans_pp_linf USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'Linf', seeding_function = 'Random++', verbose = 1, seed = 1255025990; +NOTICE: *** Initial statistics gathered: +NOTICE: *** Dimension used for computations: 7 NOTICE: *** Number of valid points: 1000 NOTICE: *** Number of dead points: 5 NOTICE: *** random++ oversampling factor: 4.000000, expected number of candidates: 40 @@ -315,6 +461,9 @@ SELECT id, PREDICT BY my_kmeans_pp_linf(features position) AS centroid_id FROM m DROP MODEL my_kmeans_pp_l1; DROP MODEL my_kmeans_pp_l2; DROP MODEL my_kmeans_pp_l2_sqr; +DROP MODEL my_kmeans_pp_l2_sqr_no_num_features; +DROP MODEL my_kmeans_pp_l2_sqr_unnested; +DROP MODEL my_kmeans_pp_l2_sqr_unnested_no_num_features; DROP MODEL my_kmeans_pp_linf; SELECT COUNT(DISTINCT(centroid_id)) FROM (SELECT PREDICT BY my_kmeans_pp_l1(features position) AS centroid_id FROM multivariate_7_1000_10_real_centroids); ERROR: There is no model called "my_kmeans_pp_l1". @@ -330,16 +479,8 @@ ERROR: There is no model called "my_kmeans_pp_linf". CONTEXT: referenced column: centroid_id -- K-Means|| CREATE MODEL my_kmeans_bb_l1 USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L1', seeding_function = 'KMeans||', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value L1 -NOTICE: Hyperparameter seeding_function takes value KMeans|| -NOTICE: Hyperparameter verbose takes value 1 -NOTICE: Hyperparameter seed takes value 1255025990 NOTICE: *** Initial statistics gathered: +NOTICE: *** Dimension used for computations: 7 NOTICE: *** Number of valid points: 1000 NOTICE: *** Number of dead points: 5 NOTICE: *** k-means|| oversampling factor: 4.000000, expected number of candidates per round: 40 @@ -406,16 +547,8 @@ SELECT PREDICT BY my_kmeans_bb_l1(features position) AS centroid_id FROM (SELECT ERROR: k-means predict: array of coordinates cannot be null CONTEXT: referenced column: centroid_id CREATE MODEL my_kmeans_bb_l2 USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L2', seeding_function = 'KMeans||', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value L2 -NOTICE: Hyperparameter seeding_function takes value KMeans|| -NOTICE: Hyperparameter verbose takes value 1 -NOTICE: Hyperparameter seed takes value 1255025990 NOTICE: *** Initial statistics gathered: +NOTICE: *** Dimension used for computations: 7 NOTICE: *** Number of valid points: 1000 NOTICE: *** Number of dead points: 5 NOTICE: *** k-means|| oversampling factor: 4.000000, expected number of candidates per round: 40 @@ -473,16 +606,8 @@ SELECT id, PREDICT BY my_kmeans_bb_l2(features position) AS centroid_id FROM mul (5 rows) CREATE MODEL my_kmeans_bb_l2_sqr USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'KMeans||', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value L2_Squared -NOTICE: Hyperparameter seeding_function takes value KMeans|| -NOTICE: Hyperparameter verbose takes value 1 -NOTICE: Hyperparameter seed takes value 1255025990 NOTICE: *** Initial statistics gathered: +NOTICE: *** Dimension used for computations: 7 NOTICE: *** Number of valid points: 1000 NOTICE: *** Number of dead points: 5 NOTICE: *** k-means|| oversampling factor: 4.000000, expected number of candidates per round: 40 @@ -540,16 +665,8 @@ SELECT id, PREDICT BY my_kmeans_bb_l2_sqr(features position) AS centroid_id FROM (5 rows) CREATE MODEL my_kmeans_bb_linf USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'Linf', seeding_function = 'KMeans||', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value Linf -NOTICE: Hyperparameter seeding_function takes value KMeans|| -NOTICE: Hyperparameter verbose takes value 1 -NOTICE: Hyperparameter seed takes value 1255025990 NOTICE: *** Initial statistics gathered: +NOTICE: *** Dimension used for computations: 7 NOTICE: *** Number of valid points: 1000 NOTICE: *** Number of dead points: 5 NOTICE: *** k-means|| oversampling factor: 4.000000, expected number of candidates per round: 40 @@ -625,134 +742,53 @@ CONTEXT: referenced column: centroid_id -- Wrong parameters -- Number of iterations CREATE MODEL my_kmeans_pp_empty USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 0, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 0 -ERROR: max_iterations must be in [1, 2147483647] +ERROR: Hyperparameter max_iterations must be in the range [1,2147483647] -- Number of centroids CREATE MODEL my_kmeans_pp_empty USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 0, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 0 -ERROR: num_centroids must be in [1, 1000000] +ERROR: Hyperparameter num_centroids must be in the range [1,1000000] CREATE MODEL my_kmeans_pp_empty USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 1000001, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 1000001 -ERROR: num_centroids must be in [1, 1000000] +ERROR: Hyperparameter num_centroids must be in the range [1,1000000] -- Tolerance CREATE MODEL my_kmeans_pp_empty USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = -0.1, batch_size = 1000, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value -0.100000 -ERROR: tolerance must be in (0, 1.0] +ERROR: Hyperparameter tolerance must be in the range (0,1] CREATE MODEL my_kmeans_pp_empty USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 1.1, batch_size = 1000, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 1.100000 -ERROR: tolerance must be in (0, 1.0] +ERROR: Hyperparameter tolerance must be in the range (0,1] -- Batch size CREATE MODEL my_kmeans_pp_empty USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 0, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 0 -ERROR: batch_size must be in [1, 1000000] +ERROR: Hyperparameter batch_size must be in the range [1,1000000] CREATE MODEL my_kmeans_pp_empty USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000001, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000001 -ERROR: batch_size must be in [1, 1000000] +ERROR: Hyperparameter batch_size must be in the range [1,1000000] -- Num of features (not matching the data) CREATE MODEL my_kmeans_pp_empty USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 9, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 9 -NOTICE: Hyperparameter distance_function takes value L2_Squared -NOTICE: Hyperparameter seeding_function takes value Random++ -NOTICE: Hyperparameter verbose takes value 1 -NOTICE: Hyperparameter seed takes value 1255025990 NOTICE: *** Initial statistics gathered: +NOTICE: *** Dimension used for computations: 9 NOTICE: *** Number of valid points: 0 NOTICE: *** Number of dead points: 1005 ERROR: k-means exec: no valid point found (no input array seems to be a one-dimensional array, or no point seems to be fully dimensional (perhaps all points have a null dimension?)) CREATE MODEL my_kmeans_pp_empty USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 5, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 5 -NOTICE: Hyperparameter distance_function takes value L2_Squared -NOTICE: Hyperparameter seeding_function takes value Random++ -NOTICE: Hyperparameter verbose takes value 1 -NOTICE: Hyperparameter seed takes value 1255025990 NOTICE: *** Initial statistics gathered: +NOTICE: *** Dimension used for computations: 5 NOTICE: *** Number of valid points: 0 NOTICE: *** Number of dead points: 1005 ERROR: k-means exec: no valid point found (no input array seems to be a one-dimensional array, or no point seems to be fully dimensional (perhaps all points have a null dimension?)) -- Distance function CREATE MODEL my_kmeans_pp_empty USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'X', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value X -ERROR: No known distance function chosen. Current candidates are: L1, L2, L2_Squared (default), Linf +ERROR: Invalid hyperparameter value for distance_function. Valid values are: L1, L2, L2_Squared, Linf. -- Seeding function CREATE MODEL my_kmeans_pp_empty USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'X', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value L2_Squared -NOTICE: Hyperparameter seeding_function takes value X -ERROR: No known seeding function chosen. Current candidates are: Random++ (default), KMeans|| +ERROR: Invalid hyperparameter value for seeding_function. Valid values are: Random++, KMeans||. -- Verbosity CREATE MODEL my_kmeans_pp_empty USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = -1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value L2_Squared -NOTICE: Hyperparameter seeding_function takes value Random++ -NOTICE: Hyperparameter verbose takes value -1 -ERROR: Verbosity level must be between 0 (no output), 1 (less output), or 2 (full output) +ERROR: Hyperparameter verbose must be in the range [0,2] CREATE MODEL my_kmeans_pp_empty USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 3, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value L2_Squared -NOTICE: Hyperparameter seeding_function takes value Random++ -NOTICE: Hyperparameter verbose takes value 3 -ERROR: Verbosity level must be between 0 (no output), 1 (less output), or 2 (full output) +ERROR: Hyperparameter verbose must be in the range [0,2] -- Seed CREATE MODEL my_kmeans_pp_empty USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = -1; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value L2_Squared -NOTICE: Hyperparameter seeding_function takes value Random++ -NOTICE: Hyperparameter verbose takes value 1 -NOTICE: Hyperparameter seed takes value -1 -ERROR: seed must be in [0, 2147483647] +ERROR: Hyperparameter seed must be in the range [0,2147483647] -- As many centroids as data (and more) CREATE MODEL my_kmeans_pp_all_centroids_1kc USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 1000, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 1000 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value L2_Squared -NOTICE: Hyperparameter seeding_function takes value Random++ -NOTICE: Hyperparameter verbose takes value 1 -NOTICE: Hyperparameter seed takes value 1255025990 NOTICE: *** Initial statistics gathered: +NOTICE: *** Dimension used for computations: 7 NOTICE: *** Number of valid points: 1000 NOTICE: *** Number of dead points: 5 NOTICE: *** random++ oversampling factor: 4.000000, expected number of candidates: 4000 @@ -804,16 +840,8 @@ SELECT id, PREDICT BY my_kmeans_pp_all_centroids_1kc(features position) AS centr (5 rows) CREATE MODEL my_kmeans_pp_all_centroids_10kc USING kmeans FROM (SELECT position FROM multivariate_7_1000_10) WITH max_iterations = 50, num_centroids = 10000, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L2_Squared', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10000 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value L2_Squared -NOTICE: Hyperparameter seeding_function takes value Random++ -NOTICE: Hyperparameter verbose takes value 1 -NOTICE: Hyperparameter seed takes value 1255025990 NOTICE: *** Initial statistics gathered: +NOTICE: *** Dimension used for computations: 7 NOTICE: *** Number of valid points: 1000 NOTICE: *** Number of dead points: 5 NOTICE: *** random++ oversampling factor: 4.000000, expected number of candidates: 40000 @@ -868,30 +896,13 @@ DROP MODEL my_kmeans_pp_all_centroids_1kc; DROP MODEL my_kmeans_pp_all_centroids_10kc; -- Wrong input format CREATE MODEL my_kmeans_pp_no_array USING kmeans FROM (SELECT -67.9280164728999978,-3.90913184339999997,89.3555545521000028,-1.76477945900000011,-129.65569087770001,-66.7980506008999981,18.1708778307999985) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L1', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value L1 -NOTICE: Hyperparameter seeding_function takes value Random++ -NOTICE: Hyperparameter verbose takes value 1 -NOTICE: Hyperparameter seed takes value 1255025990 ERROR: k-means exec: data is not of type float8 (double precision) array CREATE MODEL my_kmeans_pp_more_attr USING kmeans FROM (SELECT ARRAY[135.529122074100002,1.38508608310000003,75.1363695022999991,-157.490917820600004,-23.7378253056999995,33.6490592628000016,-7.9599672791999998]::DOUBLE PRECISION[], ARRAY[-73.1345967305999949,-62.9817681270000023,207.069457080299998,-90.3006007433999969,173.55455376110001,138.308886012699986,-37.5366639877000026]::DOUBLE PRECISION[]) WITH max_iterations = 50, num_centroids = 10, tolerance = 0.00001, batch_size = 1000, num_features = 7, distance_function = 'L1', seeding_function = 'Random++', verbose = 1, seed = 1255025990; -NOTICE: Hyperparameter max_iterations takes value 50 -NOTICE: Hyperparameter num_centroids takes value 10 -NOTICE: Hyperparameter tolerance takes value 0.000010 -NOTICE: Hyperparameter batch_size takes value 1000 -NOTICE: Hyperparameter num_features takes value 7 -NOTICE: Hyperparameter distance_function takes value L1 -NOTICE: Hyperparameter seeding_function takes value Random++ -NOTICE: Hyperparameter verbose takes value 1 -NOTICE: Hyperparameter seed takes value 1255025990 ERROR: k-means exec: relation should contain only a single attribute (point coordinates in a double precision array) -- Cleanup DROP TABLE IF EXISTS multivariate_7_1000_10; DROP TABLE IF EXISTS multivariate_7_1000_10_real_centroids; +DROP TABLE IF EXISTS multivariate_7_1000_10_unnested; SELECT 'DB4AI KMEANS TEST COMPLETED'; ?column? ----------------------------- diff --git a/src/test/regress/output/db4ai_snapshots.source b/src/test/regress/output/db4ai_snapshots.source index 95f80c8d4..00e2472e5 100644 --- a/src/test/regress/output/db4ai_snapshots.source +++ b/src/test/regress/output/db4ai_snapshots.source @@ -1,5 +1,5 @@ --run -\! sh @abs_srcdir@/snapshots_test/test.sh -r -p @portstring@ -d regression +\! @abs_srcdir@/snapshots_test/test.sh -r -p @portstring@ -d regression ## SETTING UP ## harness: SetupTestHarness.sql ..... PASS # RUNNING TEST # case: 00 CreateSnapshotAPI.sql .... PASS # RUNNING TEST # case: 01 PrepareSnapshotAPI.sql ... PASS diff --git a/src/test/regress/output/db4ai_svm_kernels.source b/src/test/regress/output/db4ai_svm_kernels.source new file mode 100644 index 000000000..39a2e5845 --- /dev/null +++ b/src/test/regress/output/db4ai_svm_kernels.source @@ -0,0 +1,95 @@ +CREATE TABLE moons( + id BIGSERIAL, + cls SMALLINT, + x REAL, + y REAL +); +NOTICE: CREATE TABLE will create implicit sequence "moons_id_seq" for serial column "moons.id" +\copy moons(cls, x, y) FROM '@abs_srcdir@/data/moons.csv' DELIMITER ','; +SELECT COUNT(*) FROM moons; + count +------- + 200 +(1 row) + +-- linear, expected accuracy = 0.890 +CREATE MODEL moons_linear USING svm_classification + FEATURES x, y TARGET cls + FROM moons + WITH seed=54, batch_size=8, decay=1e-20, + learning_rate=0.01215337, lambda=920.90725960, + tolerance=0.06377824, max_iterations=2; +-- gaussian, expected accuracy = 0.935 +CREATE MODEL moons_gaussian USING svm_classification + FEATURES x, y TARGET cls + FROM moons + WITH seed=1, batch_size=4, decay=0.80858937, + learning_rate=0.16556385, lambda=274.28986109, + tolerance=0.00714786, max_iterations=33, + kernel='gaussian', gamma=0.96736585; +-- polynomial, expected accuracy = 1.000 +CREATE MODEL moons_polynomial USING svm_classification + FEATURES x, y TARGET cls + FROM moons + WITH seed=1, batch_size=2, decay=0.87908244, + learning_rate=0.40456318, lambda=53.75794302, + tolerance=0.00003070, max_iterations=35, + kernel='polynomial', degree=4, coef0=1.11311435; +-- display the three models +SELECT modelname, processedtuples, discardedtuples, iterations, outputtype, modeltype, query, weight, + hyperparametersnames, hyperparametersoids, hyperparametersvalues, + trainingscoresname, trainingscoresvalue, length(modeldata) as model_data_len + FROM gs_model_warehouse + WHERE modelname LIKE 'moons%' + ORDER BY modelname; + modelname | processedtuples | discardedtuples | iterations | outputtype | modeltype | query | weight | hyperparametersnames | hyperparametersoids | hyperparametersvalues | trainingscoresname | trainingscoresvalue | model_data_len +------------------+-----------------+-----------------+------------+------------+--------------------+--------------------------------------------------------------------------+--------+------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------+-------------------------------------------------------------------------------------------+-------------------------------------+------------------------------------+---------------- + moons_gaussian | 200 | 0 | 32 | 21 | svm_classification | CREATE MODEL moons_gaussian USING svm_classification +| | {batch_size,decay,learning_rate,max_iterations,max_seconds,optimizer,tolerance,seed,verbose,lambda,kernel,components,gamma,degree,coef0} | {23,701,701,23,23,1043,701,23,16,701,1043,23,701,23,701} | {4,.80858937,.16556385,33,0,gd,.00714786,1,false,274.28986109,gaussian,0,.96736585,2,1} | {accuracy,f1,precision,recall,loss} | {.935,.934673,.939394,.93,7.73244} | 2396 + | | | | | | FEATURES x, y TARGET cls +| | | | | | | + | | | | | | FROM moons +| | | | | | | + | | | | | | WITH seed=1, batch_size=4, decay=0.80858937, +| | | | | | | + | | | | | | learning_rate=0.16556385, lambda=274.28986109, +| | | | | | | + | | | | | | tolerance=0.00714786, max_iterations=33, +| | | | | | | + | | | | | | kernel='gaussian', gamma=0.96736585; | | | | | | | + moons_linear | 200 | 0 | 1 | 21 | svm_classification | CREATE MODEL moons_linear USING svm_classification +| | {batch_size,decay,learning_rate,max_iterations,max_seconds,optimizer,tolerance,seed,verbose,lambda,kernel,components,gamma,degree,coef0} | {23,701,701,23,23,1043,701,23,16,701,1043,23,701,23,701} | {8,1e-20,.01215337,2,0,gd,.06377824,54,false,920.9072596,linear,0,.5,2,1} | {accuracy,f1,precision,recall,loss} | {.89,.888889,.897959,.88,10.6976} | 380 + | | | | | | FEATURES x, y TARGET cls +| | | | | | | + | | | | | | FROM moons +| | | | | | | + | | | | | | WITH seed=54, batch_size=8, decay=1e-20, +| | | | | | | + | | | | | | learning_rate=0.01215337, lambda=920.90725960, +| | | | | | | + | | | | | | tolerance=0.06377824, max_iterations=2; | | | | | | | + moons_polynomial | 200 | 0 | 35 | 21 | svm_classification | CREATE MODEL moons_polynomial USING svm_classification +| | {batch_size,decay,learning_rate,max_iterations,max_seconds,optimizer,tolerance,seed,verbose,lambda,kernel,components,gamma,degree,coef0} | {23,701,701,23,23,1043,701,23,16,701,1043,23,701,23,701} | {2,.87908244,.40456318,35,0,gd,3.07e-05,1,false,53.75794302,polynomial,0,.5,4,1.11311435} | {accuracy,f1,precision,recall,loss} | {1,1,1,1,.489574} | 2396 + | | | | | | FEATURES x, y TARGET cls +| | | | | | | + | | | | | | FROM moons +| | | | | | | + | | | | | | WITH seed=1, batch_size=2, decay=0.87908244, +| | | | | | | + | | | | | | learning_rate=0.40456318, lambda=53.75794302, +| | | | | | | + | | | | | | tolerance=0.00003070, max_iterations=35, +| | | | | | | + | | | | | | kernel='polynomial', degree=4, coef0=1.11311435; | | | | | | | +(3 rows) + +-- validate the modes by predicting all at the same time, expected [0.89, 0.935, 1.0] +SELECT (SUM(CASE WHEN t=p1 THEN 1 ELSE 0 END) / (SELECT COUNT(*) FROM moons)) AS acc_lin, + (SUM(CASE WHEN t=p2 THEN 1 ELSE 0 END) / (SELECT COUNT(*) FROM moons)) AS acc_gauss, + (SUM(CASE WHEN t=p3 THEN 1 ELSE 0 END) / (SELECT COUNT(*) FROM moons)) AS acc_poly + FROM (SELECT cls AS t, + PREDICT BY moons_linear (FEATURES x, y) AS p1, + PREDICT BY moons_gaussian (FEATURES x, y) AS p2, + PREDICT BY moons_polynomial (FEATURES x, y) AS p3 + FROM moons +); + acc_lin | acc_gauss | acc_poly +---------+-----------+---------- + .89 | .935 | 1 +(1 row) + +-- cleanup models +DROP MODEL moons_linear; +DROP MODEL moons_gaussian; +DROP MODEL moons_polynomial; +-- cleanup tables +DROP TABLE IF EXISTS moons; +SELECT 'DB4AI SVM KERNELS TEST COMPLETED'; + ?column? +---------------------------------- + DB4AI SVM KERNELS TEST COMPLETED +(1 row) + diff --git a/src/test/regress/output/db4ai_xgboost_train_predict.source b/src/test/regress/output/db4ai_xgboost_train_predict.source new file mode 100644 index 000000000..b0124609c --- /dev/null +++ b/src/test/regress/output/db4ai_xgboost_train_predict.source @@ -0,0 +1,235 @@ +CREATE TABLE db4ai_rain (id INT, Location VARCHAR(20), MinTemp FLOAT, MaxTemp FLOAT, Rainfall FLOAT, WindGustSpeed INT, WindSpeed9am INT, + WindSpeed3pm INT, Humidity9am INT, Humidity3pm INT, Pressure9am FLOAT, Pressure3pm FLOAT, Cloud9am INT, Cloud3pm INT, + Temp9am FLOAT, Temp3pm FLOAT, RainToday INT, RainTomorrow INT) +WITH (orientation=row, compression=no); +COPY db4ai_rain FROM '@abs_srcdir@/data/rain.txt' WITH (FORMAT csv); +-- Error in FEATURES / TARGET +CREATE MODEL m using xgboost_binary_logistic FEATURES * TARGET price FROM db4ai_rain; +ERROR: FEATURES clause cannot be * +CREATE MODEL m using xgboost_binary_logistic FEATURES Temp9am,Temp3pm TARGET * FROM db4ai_rain; +ERROR: TARGET clause cannot be * +CREATE MODEL m using xgboost_binary_logistic FROM db4ai_rain; +ERROR: Supervised ML algorithms require FEATURES clause +-- Errors with semantic validation of hyperparameters +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH n_iter=-1; +ERROR: Hyperparameter n_iter must be in the range [1,2147483647] +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH batch_size=0; +ERROR: Hyperparameter batch_size must be in the range [1,2147483647] +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH max_depth=-1; +ERROR: Hyperparameter max_depth must be in the range [0,2147483647] +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH min_child_weight=-1; +ERROR: Hyperparameter min_child_weight must be in the range [0,2147483647] +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH eta=-0.1; +ERROR: Hyperparameter eta must be in the range [0,1] +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH seed=-1; +ERROR: Hyperparameter seed must be in the range [0,2147483647] +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH nthread=-1; +ERROR: Hyperparameter nthread must be in the range [0,2147483647] +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH booster=10; +ERROR: Hyperparameter booster must be a string +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH tree_method=10; +ERROR: Hyperparameter tree_method must be a string +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH verbosity=10; +ERROR: Hyperparameter verbosity must be in the range [0,3] +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH n_iter='a_wrong_parameter'; +ERROR: Hyperparameter n_iter must be an integer +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH batch_size='a_wrong_parameter'; +ERROR: Hyperparameter batch_size must be an integer +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH max_depth='a_wrong_parameter'; +ERROR: Hyperparameter max_depth must be an integer +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH min_child_weight='a_wrong_parameter'; +ERROR: Hyperparameter min_child_weight must be an integer +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH seed='a_wrong_parameter'; +ERROR: Hyperparameter seed must be an integer +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH eta='a_wrong_parameter'; +ERROR: Hyperparameter eta must be a floating point number +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH nthread='a_wrong_parameter'; +ERROR: Hyperparameter nthread must be an integer +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH booster=10; +ERROR: Hyperparameter booster must be a string +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH tree_method=10; +ERROR: Hyperparameter tree_method must be a string +CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain WITH verbosity='a_wrong_parameter'; +ERROR: Hyperparameter verbosity must be an integer +-- Normal mode +CREATE MODEL m1 USING xgboost_binary_logistic FEATURES raintoday TARGET raintomorrow FROM db4ai_rain +WITH n_iter=10, batch_size=1000000, booster='gbtree', tree_method='exact', eval_metric='auc', nthread=4, seed=3141; +SELECT id, raintomorrow, round(PREDICT BY m1 (FEATURES raintoday), 1) as pred FROM db4ai_rain; + id | raintomorrow | pred +----+--------------+------ + 1 | 0 | .2 + 2 | 0 | .2 + 3 | 1 | .2 + 4 | 0 | .2 + 5 | 0 | .2 + 6 | 1 | .2 + 7 | 0 | .2 + 8 | 0 | .2 + 9 | 1 | .2 + 10 | 0 | .2 + 11 | 0 | .2 + 12 | 0 | .2 + 13 | 0 | .2 + 14 | 0 | .2 + 15 | 0 | .2 + 16 | 0 | .2 + 17 | 0 | .2 + 18 | 0 | .2 + 19 | 0 | .2 + 20 | 1 | .2 +(20 rows) + +DROP MODEL m1; +CREATE MODEL m3 USING xgboost_regression_squarederror FEATURES raintoday TARGET raintomorrow FROM db4ai_rain +WITH n_iter=10, batch_size=1000000, booster='gbtree', tree_method='exact', eval_metric='auc', nthread=4, seed=3141; +SELECT id, raintomorrow, round(PREDICT BY m3 (FEATURES raintoday), 1) as pred FROM db4ai_rain; + id | raintomorrow | pred +----+--------------+------ + 1 | 0 | .2 + 2 | 0 | .2 + 3 | 1 | .2 + 4 | 0 | .2 + 5 | 0 | .2 + 6 | 1 | .2 + 7 | 0 | .2 + 8 | 0 | .2 + 9 | 1 | .2 + 10 | 0 | .2 + 11 | 0 | .2 + 12 | 0 | .2 + 13 | 0 | .2 + 14 | 0 | .2 + 15 | 0 | .2 + 16 | 0 | .2 + 17 | 0 | .2 + 18 | 0 | .2 + 19 | 0 | .2 + 20 | 1 | .2 +(20 rows) + +DROP MODEL m3; +CREATE MODEL m4 USING xgboost_regression_gamma FEATURES raintoday TARGET raintomorrow FROM db4ai_rain +WITH n_iter=10, batch_size=1000000, booster='gbtree', tree_method='exact', eval_metric='auc', nthread=4, seed=3141; +SELECT id, raintomorrow, round(PREDICT BY m4 (FEATURES raintoday), 1) as pred FROM db4ai_rain; + id | raintomorrow | pred +----+--------------+------ + 1 | 0 | .2 + 2 | 0 | .2 + 3 | 1 | .2 + 4 | 0 | .2 + 5 | 0 | .2 + 6 | 1 | .2 + 7 | 0 | .2 + 8 | 0 | .2 + 9 | 1 | .2 + 10 | 0 | .2 + 11 | 0 | .2 + 12 | 0 | .2 + 13 | 0 | .2 + 14 | 0 | .2 + 15 | 0 | .2 + 16 | 0 | .2 + 17 | 0 | .2 + 18 | 0 | .2 + 19 | 0 | .2 + 20 | 1 | .2 +(20 rows) + +DROP MODEL m4; +-- empty resultset +CREATE MODEL m1 USING xgboost_binary_logistic FEATURES raintoday TARGET raintomorrow FROM db4ai_rain +WITH n_iter=10, batch_size=1000000, booster='gbtree', tree_method='exact', eval_metric='auc', nthread=4, seed=3141; +SELECT id, raintomorrow, round(PREDICT BY m1 (FEATURES raintoday), 1) as pred FROM db4ai_rain where id < 0; + id | raintomorrow | pred +----+--------------+------ +(0 rows) + +DROP MODEL m1; +CREATE MODEL m2 USING xgboost_regression_logistic FEATURES raintoday TARGET raintomorrow FROM db4ai_rain +WITH n_iter=10, batch_size=1000000, booster='gbtree', tree_method='exact', eval_metric='auc', nthread=4, seed=3141; +SELECT id, raintomorrow, round(PREDICT BY m2 (FEATURES raintoday), 1) as pred FROM db4ai_rain where id < 0; + id | raintomorrow | pred +----+--------------+------ +(0 rows) + +DROP MODEL m2; +CREATE MODEL m3 USING xgboost_regression_squarederror FEATURES raintoday TARGET raintomorrow FROM db4ai_rain +WITH n_iter=10, batch_size=1000000, booster='gbtree', tree_method='exact', eval_metric='auc', nthread=4, seed=3141; +SELECT id, raintomorrow, round(PREDICT BY m3 (FEATURES raintoday), 1) as pred FROM db4ai_rain where id < 0; + id | raintomorrow | pred +----+--------------+------ +(0 rows) + +DROP MODEL m3; +CREATE MODEL m4 USING xgboost_regression_gamma FEATURES raintoday TARGET raintomorrow FROM db4ai_rain +WITH n_iter=10, batch_size=1000000, booster='gbtree', tree_method='exact', eval_metric='auc', nthread=4, seed=3141; +SELECT id, raintomorrow, round(PREDICT BY m4 (FEATURES raintoday), 1) as pred FROM db4ai_rain where id < 0; + id | raintomorrow | pred +----+--------------+------ +(0 rows) + +DROP MODEL m4; +-- Explain test +EXPLAIN CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain; + QUERY PLAN +------------------------------------------------------------------------- + Train Model - xgboost_binary_logistic (cost=0.00..0.00 rows=0 width=0) + -> Seq Scan on db4ai_rain (cost=0.00..14.44 rows=444 width=20) +(2 rows) + +EXPLAIN VERBOSE CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday FROM db4ai_rain; + QUERY PLAN +--------------------------------------------------------------------------- + Train Model - xgboost_binary_logistic (cost=0.00..0.00 rows=0 width=0) + -> Seq Scan on public.db4ai_rain (cost=0.00..14.44 rows=444 width=20) + Output: raintoday, rainfall, temp9am +(3 rows) + +EXPLAIN VERBOSE CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am TARGET raintoday != 0 FROM db4ai_rain; + QUERY PLAN +--------------------------------------------------------------------------- + Train Model - xgboost_binary_logistic (cost=0.00..0.00 rows=0 width=0) + -> Seq Scan on public.db4ai_rain (cost=0.00..15.55 rows=444 width=20) + Output: (raintoday <> 0), rainfall, temp9am +(3 rows) + +EXPLAIN VERBOSE CREATE MODEL m USING xgboost_binary_logistic FEATURES rainfall, temp9am, windgustspeed::float as transformed_windgust TARGET raintoday FROM db4ai_rain; + QUERY PLAN +--------------------------------------------------------------------------------- + Train Model - xgboost_binary_logistic (cost=0.00..0.00 rows=0 width=0) + -> Seq Scan on public.db4ai_rain (cost=0.00..15.55 rows=444 width=24) + Output: raintoday, rainfall, temp9am, (windgustspeed)::double precision +(3 rows) + +EXPLAIN VERBOSE CREATE MODEL m USING xgboost_regression_logistic FEATURES rainfall, temp9am, windgustspeed::float as transformed_windgust TARGET raintoday FROM db4ai_rain; + QUERY PLAN +--------------------------------------------------------------------------------- + Train Model - xgboost_regression_logistic (cost=0.00..0.00 rows=0 width=0) + -> Seq Scan on public.db4ai_rain (cost=0.00..15.55 rows=444 width=24) + Output: raintoday, rainfall, temp9am, (windgustspeed)::double precision +(3 rows) + +EXPLAIN VERBOSE CREATE MODEL m USING xgboost_regression_squarederror FEATURES rainfall, temp9am, windgustspeed::float as transformed_windgust TARGET raintoday FROM db4ai_rain; + QUERY PLAN +--------------------------------------------------------------------------------- + Train Model - xgboost_regression_squarederror (cost=0.00..0.00 rows=0 width=0) + -> Seq Scan on public.db4ai_rain (cost=0.00..15.55 rows=444 width=24) + Output: raintoday, rainfall, temp9am, (windgustspeed)::double precision +(3 rows) + +EXPLAIN VERBOSE CREATE MODEL m USING xgboost_regression_gamma FEATURES rainfall, temp9am, windgustspeed::float as transformed_windgust TARGET raintoday FROM db4ai_rain; + QUERY PLAN +--------------------------------------------------------------------------------- + Train Model - xgboost_regression_gamma (cost=0.00..0.00 rows=0 width=0) + -> Seq Scan on public.db4ai_rain (cost=0.00..15.55 rows=444 width=24) + Output: raintoday, rainfall, temp9am, (windgustspeed)::double precision +(3 rows) + +-- Cleanup +DROP TABLE IF EXISTS db4ai_rain; +SELECT 'DB4AI TEST COMPLETED'; + ?column? +---------------------- + DB4AI TEST COMPLETED +(1 row) + diff --git a/src/test/regress/output/ddl.source b/src/test/regress/output/ddl.source index c7845430f..e67d3c221 100644 --- a/src/test/regress/output/ddl.source +++ b/src/test/regress/output/ddl.source @@ -16,12 +16,15 @@ HINT: Replication slot names may only contain lower letters, numbers and the un -- fail twice because of an invalid parameter values SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', 'frakbar'); ERROR: could not parse value "frakbar" for parameter "include-xids" +DETAIL: N/A CONTEXT: slot "regression_slot", output plugin "test_decoding", in the startup callback SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'nonexistant-option', 'frakbar'); ERROR: option "nonexistant-option" = "frakbar" is unknown +DETAIL: N/A CONTEXT: slot "regression_slot", output plugin "test_decoding", in the startup callback SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', 'frakbar'); ERROR: could not parse value "frakbar" for parameter "include-xids" +DETAIL: N/A CONTEXT: slot "regression_slot", output plugin "test_decoding", in the startup callback -- succeed once SELECT pg_drop_replication_slot('regression_slot'); @@ -265,7 +268,7 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.toasttable: INSERT: id[integer]:2 toasted_col1[text]:null rand1[double precision]:3077 toasted_col2[text]:'0001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500' rand2[double precision]:4576 --?.* BEGIN - table public.toasttable: UPDATE: id[integer]:1 toasted_col1[text]:'12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000' rand1[double precision]:79 toasted_col2[text]:null rand2[double precision]:1578 + table public.toasttable: UPDATE: old-key: id[integer]:1 new-tuple: id[integer]:1 toasted_col1[text]:'12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000' rand1[double precision]:79 toasted_col2[text]:null rand2[double precision]:1578 --?.* (25 rows) @@ -307,13 +310,13 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.toasttable: INSERT: id[integer]:3 toasted_col1[text]:'12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000' rand1[double precision]:6075 toasted_col2[text]:null rand2[double precision]:7574 --?.* BEGIN - table public.toasttable: UPDATE: id[integer]:1 toasted_col1[text]:unchanged-toast-datum rand1[double precision]:79 toasted_col2[text]:'1''2''3''4''5''6''7''8''9''10''11''12''13''14''15''16''17''18''19''20''21''22''23''24''25''26''27''28''29''30''31''32''33''34''35''36''37''38''39''40''41''42''43''44''45''46''47''48''49''50''51''52''53''54''55''56''57''58''59''60''61''62''63''64''65''66''67''68''69''70''71''72''73''74''75''76''77''78''79''80''81''82''83''84''85''86''87''88''89''90''91''92''93''94''95''96''97''98''99''100''101''102''103''104''105''106''107''108''109''110''111''112''113''114''115''116''117''118''119''120''121''122''123''124''125''126''127''128''129''130''131''132''133''134''135''136''137''138''139''140''141''142''143''144''145''146''147''148''149''150''151''152''153''154''155''156''157''158''159''160''161''162''163''164''165''166''167''168''169''170''171''172''173''174''175''176''177''178''179''180''181''182''183''184''185''186''187''188''189''190''191''192''193''194''195''196''197''198''199''200''201''202''203''204''205''206''207''208''209''210''211''212''213''214''215''216''217''218''219''220''221''222''223''224''225''226''227''228''229''230''231''232''233''234''235''236''237''238''239''240''241''242''243''244''245''246''247''248''249''250''251''252''253''254''255''256''257''258''259''260''261''262''263''264''265''266''267''268''269''270''271''272''273''274''275''276''277''278''279''280''281''282''283''284''285''286''287''288''289''290''291''292''293''294''295''296''297''298''299''300''301''302''303''304''305''306''307''308''309''310''311''312''313''314''315''316''317''318''319''320''321''322''323''324''325''326''327''328''329''330''331''332''333''334''335''336''337''338''339''340''341''342''343''344''345''346''347''348''349''350''351''352''353''354''355''356''357''358''359''360''361''362''363''364''365''366''367''368''369''370''371''372''373''374''375''376''377''378''379''380''381''382''383''384''385''386''387''388''389''390''391''392''393''394''395''396''397''398''399''400''401''402''403''404''405''406''407''408''409''410''411''412''413''414''415''416''417''418''419''420''421''422''423''424''425''426''427''428''429''430''431''432''433''434''435''436''437''438''439''440''441''442''443''444''445''446''447''448''449''450''451''452''453''454''455''456''457''458''459''460''461''462''463''464''465''466''467''468''469''470''471''472''473''474''475''476''477''478''479''480''481''482''483''484''485''486''487''488''489''490''491''492''493''494''495''496''497''498''499''500''501''502''503''504''505''506''507''508''509''510''511''512''513''514''515''516''517''518''519''520''521''522''523''524''525''526''527''528''529''530''531''532''533''534''535''536''537''538''539''540''541''542''543''544''545''546''547''548''549''550''551''552''553''554''555''556''557''558''559''560''561''562''563''564''565''566''567''568''569''570''571''572''573''574''575''576''577''578''579''580''581''582''583''584''585''586''587''588''589''590''591''592''593''594''595''596''597''598''599''600''601''602''603''604''605''606''607''608''609''610''611''612''613''614''615''616''617''618''619''620''621''622''623''624''625''626''627''628''629''630''631''632''633''634''635''636''637''638''639''640''641''642''643''644''645''646''647''648''649''650''651''652''653''654''655''656''657''658''659''660''661''662''663''664''665''666''667''668''669''670''671''672''673''674''675''676''677''678''679''680''681''682''683''684''685''686''687''688''689''690''691''692''693''694''695''696''697''698''699''700''701''702''703''704''705''706''707''708''709''710''711''712''713''714''715''716''717''718''719''720''721''722''723''724''725''726''727''728''729''730''731''732''733''734''735''736''737''738''739''740''741''742''743''744''745''746''747''748''749''750''751''752''753''754''755''756''757''758''759''760''761''762''763''764''765''766''767''768''769''770''771''772''773''774''775''776''777''778''779''780''781''782''783''784''785''786''787''788''789''790''791''792''793''794''795''796''797''798''799''800''801''802''803''804''805''806''807''808''809''810''811''812''813''814''815''816''817''818''819''820''821''822''823''824''825''826''827''828''829''830''831''832''833''834''835''836''837''838''839''840''841''842''843''844''845''846''847''848''849''850''851''852''853''854''855''856''857''858''859''860''861''862''863''864''865''866''867''868''869''870''871''872''873''874''875''876''877''878''879''880''881''882''883''884''885''886''887''888''889''890''891''892''893''894''895''896''897''898''899''900''901''902''903''904''905''906''907''908''909''910''911''912''913''914''915''916''917''918''919''920''921''922''923''924''925''926''927''928''929''930''931''932''933''934''935''936''937''938''939''940''941''942''943''944''945''946''947''948''949''950''951''952''953''954''955''956''957''958''959''960''961''962''963''964''965''966''967''968''969''970''971''972''973''974''975''976''977''978''979''980''981''982''983''984''985''986''987''988''989''990''991''992''993''994''995''996''997''998''999''1000''1001''1002''1003''1004''1005''1006''1007''1008''1009''1010''1011''1012''1013''1014''1015''1016''1017''1018''1019''1020''1021''1022''1023''1024''1025''1026''1027''1028''1029''1030''1031''1032''1033''1034''1035''1036''1037''1038''1039''1040''1041''1042''1043''1044''1045''1046''1047''1048''1049''1050''1051''1052''1053''1054''1055''1056''1057''1058''1059''1060''1061''1062''1063''1064''1065''1066''1067''1068''1069''1070''1071''1072''1073''1074''1075''1076''1077''1078''1079''1080''1081''1082''1083''1084''1085''1086''1087''1088''1089''1090''1091''1092''1093''1094''1095''1096''1097''1098''1099''1100''1101''1102''1103''1104''1105''1106''1107''1108''1109''1110''1111''1112''1113''1114''1115''1116''1117''1118''1119''1120''1121''1122''1123''1124''1125''1126''1127''1128''1129''1130''1131''1132''1133''1134''1135''1136''1137''1138''1139''1140''1141''1142''1143''1144''1145''1146''1147''1148''1149''1150''1151''1152''1153''1154''1155''1156''1157''1158''1159''1160''1161''1162''1163''1164''1165''1166''1167''1168''1169''1170''1171''1172''1173''1174''1175''1176''1177''1178''1179''1180''1181''1182''1183''1184''1185''1186''1187''1188''1189''1190''1191''1192''1193''1194''1195''1196''1197''1198''1199''1200''1201''1202''1203''1204''1205''1206''1207''1208''1209''1210''1211''1212''1213''1214''1215''1216''1217''1218''1219''1220''1221''1222''1223''1224''1225''1226''1227''1228''1229''1230''1231''1232''1233''1234''1235''1236''1237''1238''1239''1240''1241''1242''1243''1244''1245''1246''1247''1248''1249''1250''1251''1252''1253''1254''1255''1256''1257''1258''1259''1260''1261''1262''1263''1264''1265''1266''1267''1268''1269''1270''1271''1272''1273''1274''1275''1276''1277''1278''1279''1280''1281''1282''1283''1284''1285''1286''1287''1288''1289''1290''1291''1292''1293''1294''1295''1296''1297''1298''1299''1300''1301''1302''1303''1304''1305''1306''1307''1308''1309''1310''1311''1312''1313''1314''1315''1316''1317''1318''1319''1320''1321''1322''1323''1324''1325''1326''1327''1328''1329''1330''1331''1332''1333''1334''1335''1336''1337''1338''1339''1340''1341''1342''1343''1344''1345''1346''1347''1348''1349''1350''1351''1352''1353''1354''1355''1356''1357''1358''1359''1360''1361''1362''1363''1364''1365''1366''1367''1368''1369''1370''1371''1372''1373''1374''1375''1376''1377''1378''1379''1380''1381''1382''1383''1384''1385''1386''1387''1388''1389''1390''1391''1392''1393''1394''1395''1396''1397''1398''1399''1400''1401''1402''1403''1404''1405''1406''1407''1408''1409''1410''1411''1412''1413''1414''1415''1416''1417''1418''1419''1420''1421''1422''1423''1424''1425''1426''1427''1428''1429''1430''1431''1432''1433''1434''1435''1436''1437''1438''1439''1440''1441''1442''1443''1444''1445''1446''1447''1448''1449''1450''1451''1452''1453''1454''1455''1456''1457''1458''1459''1460''1461''1462''1463''1464''1465''1466''1467''1468''1469''1470''1471''1472''1473''1474''1475''1476''1477''1478''1479''1480''1481''1482''1483''1484''1485''1486''1487''1488''1489''1490''1491''1492''1493''1494''1495''1496''1497''1498''1499''1500''1501''1502''1503''1504''1505''1506''1507''1508''1509''1510''1511''1512''1513''1514''1515''1516''1517''1518''1519''1520''1521''1522''1523''1524''1525''1526''1527''1528''1529''1530''1531''1532''1533''1534''1535''1536''1537''1538''1539''1540''1541''1542''1543''1544''1545''1546''1547''1548''1549''1550''1551''1552''1553''1554''1555''1556''1557''1558''1559''1560''1561''1562''1563''1564''1565''1566''1567''1568''1569''1570''1571''1572''1573''1574''1575''1576''1577''1578''1579''1580''1581''1582''1583''1584''1585''1586''1587''1588''1589''1590''1591''1592''1593''1594''1595''1596''1597''1598''1599''1600''1601''1602''1603''1604''1605''1606''1607''1608''1609''1610''1611''1612''1613''1614''1615''1616''1617''1618''1619''1620''1621''1622''1623''1624''1625''1626''1627''1628''1629''1630''1631''1632''1633''1634''1635''1636''1637''1638''1639''1640''1641''1642''1643''1644''1645''1646''1647''1648''1649''1650''1651''1652''1653''1654''1655''1656''1657''1658''1659''1660''1661''1662''1663''1664''1665''1666''1667''1668''1669''1670''1671''1672''1673''1674''1675''1676''1677''1678''1679''1680''1681''1682''1683''1684''1685''1686''1687''1688''1689''1690''1691''1692''1693''1694''1695''1696''1697''1698''1699''1700''1701''1702''1703''1704''1705''1706''1707''1708''1709''1710''1711''1712''1713''1714''1715''1716''1717''1718''1719''1720''1721''1722''1723''1724''1725''1726''1727''1728''1729''1730''1731''1732''1733''1734''1735''1736''1737''1738''1739''1740''1741''1742''1743''1744''1745''1746''1747''1748''1749''1750''1751''1752''1753''1754''1755''1756''1757''1758''1759''1760''1761''1762''1763''1764''1765''1766''1767''1768''1769''1770''1771''1772''1773''1774''1775''1776''1777''1778''1779''1780''1781''1782''1783''1784''1785''1786''1787''1788''1789''1790''1791''1792''1793''1794''1795''1796''1797''1798''1799''1800''1801''1802''1803''1804''1805''1806''1807''1808''1809''1810''1811''1812''1813''1814''1815''1816''1817''1818''1819''1820''1821''1822''1823''1824''1825''1826''1827''1828''1829''1830''1831''1832''1833''1834''1835''1836''1837''1838''1839''1840''1841''1842''1843''1844''1845''1846''1847''1848''1849''1850''1851''1852''1853''1854''1855''1856''1857''1858''1859''1860''1861''1862''1863''1864''1865''1866''1867''1868''1869''1870''1871''1872''1873''1874''1875''1876''1877''1878''1879''1880''1881''1882''1883''1884''1885''1886''1887''1888''1889''1890''1891''1892''1893''1894''1895''1896''1897''1898''1899''1900''1901''1902''1903''1904''1905''1906''1907''1908''1909''1910''1911''1912''1913''1914''1915''1916''1917''1918''1919''1920''1921''1922''1923''1924''1925''1926''1927''1928''1929''1930''1931''1932''1933''1934''1935''1936''1937''1938''1939''1940''1941''1942''1943''1944''1945''1946''1947''1948''1949''1950''1951''1952''1953''1954''1955''1956''1957''1958''1959''1960''1961''1962''1963''1964''1965''1966''1967''1968''1969''1970''1971''1972''1973''1974''1975''1976''1977''1978''1979''1980''1981''1982''1983''1984''1985''1986''1987''1988''1989''1990''1991''1992''1993''1994''1995''1996''1997''1998''1999''2000' rand2[double precision]:1578 + table public.toasttable: UPDATE: old-key: id[integer]:1 new-tuple: id[integer]:1 toasted_col1[text]:'12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000' rand1[double precision]:79 toasted_col2[text]:'1''2''3''4''5''6''7''8''9''10''11''12''13''14''15''16''17''18''19''20''21''22''23''24''25''26''27''28''29''30''31''32''33''34''35''36''37''38''39''40''41''42''43''44''45''46''47''48''49''50''51''52''53''54''55''56''57''58''59''60''61''62''63''64''65''66''67''68''69''70''71''72''73''74''75''76''77''78''79''80''81''82''83''84''85''86''87''88''89''90''91''92''93''94''95''96''97''98''99''100''101''102''103''104''105''106''107''108''109''110''111''112''113''114''115''116''117''118''119''120''121''122''123''124''125''126''127''128''129''130''131''132''133''134''135''136''137''138''139''140''141''142''143''144''145''146''147''148''149''150''151''152''153''154''155''156''157''158''159''160''161''162''163''164''165''166''167''168''169''170''171''172''173''174''175''176''177''178''179''180''181''182''183''184''185''186''187''188''189''190''191''192''193''194''195''196''197''198''199''200''201''202''203''204''205''206''207''208''209''210''211''212''213''214''215''216''217''218''219''220''221''222''223''224''225''226''227''228''229''230''231''232''233''234''235''236''237''238''239''240''241''242''243''244''245''246''247''248''249''250''251''252''253''254''255''256''257''258''259''260''261''262''263''264''265''266''267''268''269''270''271''272''273''274''275''276''277''278''279''280''281''282''283''284''285''286''287''288''289''290''291''292''293''294''295''296''297''298''299''300''301''302''303''304''305''306''307''308''309''310''311''312''313''314''315''316''317''318''319''320''321''322''323''324''325''326''327''328''329''330''331''332''333''334''335''336''337''338''339''340''341''342''343''344''345''346''347''348''349''350''351''352''353''354''355''356''357''358''359''360''361''362''363''364''365''366''367''368''369''370''371''372''373''374''375''376''377''378''379''380''381''382''383''384''385''386''387''388''389''390''391''392''393''394''395''396''397''398''399''400''401''402''403''404''405''406''407''408''409''410''411''412''413''414''415''416''417''418''419''420''421''422''423''424''425''426''427''428''429''430''431''432''433''434''435''436''437''438''439''440''441''442''443''444''445''446''447''448''449''450''451''452''453''454''455''456''457''458''459''460''461''462''463''464''465''466''467''468''469''470''471''472''473''474''475''476''477''478''479''480''481''482''483''484''485''486''487''488''489''490''491''492''493''494''495''496''497''498''499''500''501''502''503''504''505''506''507''508''509''510''511''512''513''514''515''516''517''518''519''520''521''522''523''524''525''526''527''528''529''530''531''532''533''534''535''536''537''538''539''540''541''542''543''544''545''546''547''548''549''550''551''552''553''554''555''556''557''558''559''560''561''562''563''564''565''566''567''568''569''570''571''572''573''574''575''576''577''578''579''580''581''582''583''584''585''586''587''588''589''590''591''592''593''594''595''596''597''598''599''600''601''602''603''604''605''606''607''608''609''610''611''612''613''614''615''616''617''618''619''620''621''622''623''624''625''626''627''628''629''630''631''632''633''634''635''636''637''638''639''640''641''642''643''644''645''646''647''648''649''650''651''652''653''654''655''656''657''658''659''660''661''662''663''664''665''666''667''668''669''670''671''672''673''674''675''676''677''678''679''680''681''682''683''684''685''686''687''688''689''690''691''692''693''694''695''696''697''698''699''700''701''702''703''704''705''706''707''708''709''710''711''712''713''714''715''716''717''718''719''720''721''722''723''724''725''726''727''728''729''730''731''732''733''734''735''736''737''738''739''740''741''742''743''744''745''746''747''748''749''750''751''752''753''754''755''756''757''758''759''760''761''762''763''764''765''766''767''768''769''770''771''772''773''774''775''776''777''778''779''780''781''782''783''784''785''786''787''788''789''790''791''792''793''794''795''796''797''798''799''800''801''802''803''804''805''806''807''808''809''810''811''812''813''814''815''816''817''818''819''820''821''822''823''824''825''826''827''828''829''830''831''832''833''834''835''836''837''838''839''840''841''842''843''844''845''846''847''848''849''850''851''852''853''854''855''856''857''858''859''860''861''862''863''864''865''866''867''868''869''870''871''872''873''874''875''876''877''878''879''880''881''882''883''884''885''886''887''888''889''890''891''892''893''894''895''896''897''898''899''900''901''902''903''904''905''906''907''908''909''910''911''912''913''914''915''916''917''918''919''920''921''922''923''924''925''926''927''928''929''930''931''932''933''934''935''936''937''938''939''940''941''942''943''944''945''946''947''948''949''950''951''952''953''954''955''956''957''958''959''960''961''962''963''964''965''966''967''968''969''970''971''972''973''974''975''976''977''978''979''980''981''982''983''984''985''986''987''988''989''990''991''992''993''994''995''996''997''998''999''1000''1001''1002''1003''1004''1005''1006''1007''1008''1009''1010''1011''1012''1013''1014''1015''1016''1017''1018''1019''1020''1021''1022''1023''1024''1025''1026''1027''1028''1029''1030''1031''1032''1033''1034''1035''1036''1037''1038''1039''1040''1041''1042''1043''1044''1045''1046''1047''1048''1049''1050''1051''1052''1053''1054''1055''1056''1057''1058''1059''1060''1061''1062''1063''1064''1065''1066''1067''1068''1069''1070''1071''1072''1073''1074''1075''1076''1077''1078''1079''1080''1081''1082''1083''1084''1085''1086''1087''1088''1089''1090''1091''1092''1093''1094''1095''1096''1097''1098''1099''1100''1101''1102''1103''1104''1105''1106''1107''1108''1109''1110''1111''1112''1113''1114''1115''1116''1117''1118''1119''1120''1121''1122''1123''1124''1125''1126''1127''1128''1129''1130''1131''1132''1133''1134''1135''1136''1137''1138''1139''1140''1141''1142''1143''1144''1145''1146''1147''1148''1149''1150''1151''1152''1153''1154''1155''1156''1157''1158''1159''1160''1161''1162''1163''1164''1165''1166''1167''1168''1169''1170''1171''1172''1173''1174''1175''1176''1177''1178''1179''1180''1181''1182''1183''1184''1185''1186''1187''1188''1189''1190''1191''1192''1193''1194''1195''1196''1197''1198''1199''1200''1201''1202''1203''1204''1205''1206''1207''1208''1209''1210''1211''1212''1213''1214''1215''1216''1217''1218''1219''1220''1221''1222''1223''1224''1225''1226''1227''1228''1229''1230''1231''1232''1233''1234''1235''1236''1237''1238''1239''1240''1241''1242''1243''1244''1245''1246''1247''1248''1249''1250''1251''1252''1253''1254''1255''1256''1257''1258''1259''1260''1261''1262''1263''1264''1265''1266''1267''1268''1269''1270''1271''1272''1273''1274''1275''1276''1277''1278''1279''1280''1281''1282''1283''1284''1285''1286''1287''1288''1289''1290''1291''1292''1293''1294''1295''1296''1297''1298''1299''1300''1301''1302''1303''1304''1305''1306''1307''1308''1309''1310''1311''1312''1313''1314''1315''1316''1317''1318''1319''1320''1321''1322''1323''1324''1325''1326''1327''1328''1329''1330''1331''1332''1333''1334''1335''1336''1337''1338''1339''1340''1341''1342''1343''1344''1345''1346''1347''1348''1349''1350''1351''1352''1353''1354''1355''1356''1357''1358''1359''1360''1361''1362''1363''1364''1365''1366''1367''1368''1369''1370''1371''1372''1373''1374''1375''1376''1377''1378''1379''1380''1381''1382''1383''1384''1385''1386''1387''1388''1389''1390''1391''1392''1393''1394''1395''1396''1397''1398''1399''1400''1401''1402''1403''1404''1405''1406''1407''1408''1409''1410''1411''1412''1413''1414''1415''1416''1417''1418''1419''1420''1421''1422''1423''1424''1425''1426''1427''1428''1429''1430''1431''1432''1433''1434''1435''1436''1437''1438''1439''1440''1441''1442''1443''1444''1445''1446''1447''1448''1449''1450''1451''1452''1453''1454''1455''1456''1457''1458''1459''1460''1461''1462''1463''1464''1465''1466''1467''1468''1469''1470''1471''1472''1473''1474''1475''1476''1477''1478''1479''1480''1481''1482''1483''1484''1485''1486''1487''1488''1489''1490''1491''1492''1493''1494''1495''1496''1497''1498''1499''1500''1501''1502''1503''1504''1505''1506''1507''1508''1509''1510''1511''1512''1513''1514''1515''1516''1517''1518''1519''1520''1521''1522''1523''1524''1525''1526''1527''1528''1529''1530''1531''1532''1533''1534''1535''1536''1537''1538''1539''1540''1541''1542''1543''1544''1545''1546''1547''1548''1549''1550''1551''1552''1553''1554''1555''1556''1557''1558''1559''1560''1561''1562''1563''1564''1565''1566''1567''1568''1569''1570''1571''1572''1573''1574''1575''1576''1577''1578''1579''1580''1581''1582''1583''1584''1585''1586''1587''1588''1589''1590''1591''1592''1593''1594''1595''1596''1597''1598''1599''1600''1601''1602''1603''1604''1605''1606''1607''1608''1609''1610''1611''1612''1613''1614''1615''1616''1617''1618''1619''1620''1621''1622''1623''1624''1625''1626''1627''1628''1629''1630''1631''1632''1633''1634''1635''1636''1637''1638''1639''1640''1641''1642''1643''1644''1645''1646''1647''1648''1649''1650''1651''1652''1653''1654''1655''1656''1657''1658''1659''1660''1661''1662''1663''1664''1665''1666''1667''1668''1669''1670''1671''1672''1673''1674''1675''1676''1677''1678''1679''1680''1681''1682''1683''1684''1685''1686''1687''1688''1689''1690''1691''1692''1693''1694''1695''1696''1697''1698''1699''1700''1701''1702''1703''1704''1705''1706''1707''1708''1709''1710''1711''1712''1713''1714''1715''1716''1717''1718''1719''1720''1721''1722''1723''1724''1725''1726''1727''1728''1729''1730''1731''1732''1733''1734''1735''1736''1737''1738''1739''1740''1741''1742''1743''1744''1745''1746''1747''1748''1749''1750''1751''1752''1753''1754''1755''1756''1757''1758''1759''1760''1761''1762''1763''1764''1765''1766''1767''1768''1769''1770''1771''1772''1773''1774''1775''1776''1777''1778''1779''1780''1781''1782''1783''1784''1785''1786''1787''1788''1789''1790''1791''1792''1793''1794''1795''1796''1797''1798''1799''1800''1801''1802''1803''1804''1805''1806''1807''1808''1809''1810''1811''1812''1813''1814''1815''1816''1817''1818''1819''1820''1821''1822''1823''1824''1825''1826''1827''1828''1829''1830''1831''1832''1833''1834''1835''1836''1837''1838''1839''1840''1841''1842''1843''1844''1845''1846''1847''1848''1849''1850''1851''1852''1853''1854''1855''1856''1857''1858''1859''1860''1861''1862''1863''1864''1865''1866''1867''1868''1869''1870''1871''1872''1873''1874''1875''1876''1877''1878''1879''1880''1881''1882''1883''1884''1885''1886''1887''1888''1889''1890''1891''1892''1893''1894''1895''1896''1897''1898''1899''1900''1901''1902''1903''1904''1905''1906''1907''1908''1909''1910''1911''1912''1913''1914''1915''1916''1917''1918''1919''1920''1921''1922''1923''1924''1925''1926''1927''1928''1929''1930''1931''1932''1933''1934''1935''1936''1937''1938''1939''1940''1941''1942''1943''1944''1945''1946''1947''1948''1949''1950''1951''1952''1953''1954''1955''1956''1957''1958''1959''1960''1961''1962''1963''1964''1965''1966''1967''1968''1969''1970''1971''1972''1973''1974''1975''1976''1977''1978''1979''1980''1981''1982''1983''1984''1985''1986''1987''1988''1989''1990''1991''1992''1993''1994''1995''1996''1997''1998''1999''2000' rand2[double precision]:1578 --?.* BEGIN table public.bmsql_order_line: INSERT: ol_w_id[integer]:1 ol_d_id[integer]:1 ol_o_id[integer]:1 ol_number[integer]:1 ol_i_id[integer]:1 ol_delivery_d[timestamp without time zone]:null ol_amount[numeric]:null ol_supply_w_id[integer]:null ol_quantity[integer]:null ol_dist_info[character]:'123 ' --?.* BEGIN - table public.bmsql_order_line: UPDATE: ol_w_id[integer]:1 ol_d_id[integer]:1 ol_o_id[integer]:1 ol_number[integer]:1 ol_i_id[integer]:1 ol_delivery_d[timestamp without time zone]:null ol_amount[numeric]:null ol_supply_w_id[integer]:null ol_quantity[integer]:null ol_dist_info[character]:'ss ' + table public.bmsql_order_line: UPDATE: old-key: ol_w_id[integer]:1 ol_d_id[integer]:1 ol_o_id[integer]:1 ol_number[integer]:1 new-tuple: ol_w_id[integer]:1 ol_d_id[integer]:1 ol_o_id[integer]:1 ol_number[integer]:1 ol_i_id[integer]:1 ol_delivery_d[timestamp without time zone]:null ol_amount[numeric]:null ol_supply_w_id[integer]:null ol_quantity[integer]:null ol_dist_info[character]:'ss ' --?.* BEGIN table public.bmsql_order_line: DELETE: ol_w_id[integer]:1 ol_d_id[integer]:1 ol_o_id[integer]:1 ol_number[integer]:1 diff --git a/src/test/regress/output/dfs_dts002.source b/src/test/regress/output/dfs_dts002.source index 7a19ce427..04f3a22c0 100644 --- a/src/test/regress/output/dfs_dts002.source +++ b/src/test/regress/output/dfs_dts002.source @@ -1068,8 +1068,8 @@ set enable_global_stats=off; analyze empty_part_table; drop table empty_part_table; reset cstore_insert_mode; -create schema DTS2017052409444; -set current_schema=DTS2017052409444; +create schema testschema; +set current_schema=testschema; set cstore_insert_mode = main; create table household_demographics_h ( @@ -1124,10 +1124,10 @@ drop foreign table call_center_f; drop table household_demographics_h cascade; drop table call_center_h cascade; drop procedure test; -drop schema DTS2017052409444 cascade; +drop schema testschema cascade; set current_schema = dfs; -create schema DTS2017072002456; -set current_schema=DTS2017072002456; +create schema testschema2; +set current_schema=testschema2; set cstore_insert_mode = main; create table item_inventory_hdfs ( @@ -1180,7 +1180,7 @@ from (select location_id b, count(*) as cc 8 | 21 (6 rows) -drop schema DTS2017072002456 cascade; +drop schema testschema2 cascade; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table item_inventory_hdfs drop cascades to foreign table item_inventory diff --git a/src/test/regress/output/dw_switch.source b/src/test/regress/output/dw_switch.source new file mode 100644 index 000000000..533f3da84 --- /dev/null +++ b/src/test/regress/output/dw_switch.source @@ -0,0 +1,24 @@ +\! @abs_bindir@/gs_guc set -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_double_write=off" +The gs_guc run with the following arguments: [@abs_srcdir@/tmp_check/install//home/liyifeng/gaussMaster/gauss_app1/bin/gs_guc -D @abs_srcdir@/tmp_check/datanode1/ -c enable_double_write=off set ]. +expected instance path: [@abs_srcdir@/tmp_check/datanode1/postgresql.conf] +gs_guc set: enable_double_write=off: [@abs_srcdir@/tmp_check/datanode1/postgresql.conf] + +Total instances: 1. Failed instances: 0. +Success to perform gs_guc! + +\! @abs_bindir@/gs_ctl stop -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! @abs_bindir@/gs_ctl start -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! sleep 5 +\! @abs_bindir@/gs_ctl stop -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! @abs_bindir@/gs_ctl start -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! sleep 5 +\! @abs_bindir@/gs_guc set -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_double_write=on" +The gs_guc run with the following arguments: [@abs_srcdir@/tmp_check/install//home/liyifeng/gaussMaster/gauss_app1/bin/gs_guc -D @abs_srcdir@/tmp_check/datanode1/ -c enable_double_write=on set ]. +expected instance path: [@abs_srcdir@/tmp_check/datanode1/postgresql.conf] +gs_guc set: enable_double_write=on: [@abs_srcdir@/tmp_check/datanode1/postgresql.conf] + +Total instances: 1. Failed instances: 0. +Success to perform gs_guc! + +\! @abs_bindir@/gs_ctl stop -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! @abs_bindir@/gs_ctl start -D @abs_srcdir@/tmp_check/datanode1 > /dev/null diff --git a/src/test/regress/output/fdw_audit.source b/src/test/regress/output/fdw_audit.source new file mode 100644 index 000000000..d30d71e30 --- /dev/null +++ b/src/test/regress/output/fdw_audit.source @@ -0,0 +1,47 @@ +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + pg_delete_audit +----------------- + +(1 row) + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=67108863" > /dev/null 2>&1 +CREATE ROLE regress_test_foreign SYSADMIN IDENTIFIED BY 'test-1234'; +CREATE FOREIGN DATA WRAPPER fdw_audit_dummy; +ALTER FOREIGN DATA WRAPPER fdw_audit_dummy NO VALIDATOR; +ALTER FOREIGN DATA WRAPPER fdw_audit_dummy OPTIONS (a '1', b '2'); +ALTER FOREIGN DATA WRAPPER fdw_audit_dummy OWNER TO regress_test_foreign; +ALTER FOREIGN DATA WRAPPER fdw_audit_dummy RENAME TO fdw_audit_dummy2; +CREATE SERVER s1 FOREIGN DATA WRAPPER fdw_audit_dummy2; +ALTER SERVER s1 VERSION '1.1'; +ALTER SERVER s1 OPTIONS (connect_timeout '30'); +ALTER SERVER s1 OWNER TO regress_test_foreign; +ALTER SERVER s1 RENAME to s1new; +DROP SERVER s1new; +DROP FOREIGN DATA WRAPPER IF EXISTS fdw_audit_dummy2; +DROP ROLE regress_test_foreign; +SELECT object_name,detail_info FROM pg_query_audit('2022-01-13 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_foreign_data_wrapper' or type = 'ddl_serverforhadoop' order by object_name,detail_info; + object_name | detail_info +------------------+--------------------------------------------------------------------------- + fdw_audit_dummy | ALTER FOREIGN DATA WRAPPER fdw_audit_dummy NO VALIDATOR; + fdw_audit_dummy | ALTER FOREIGN DATA WRAPPER fdw_audit_dummy OPTIONS (a '1', b '2'); + fdw_audit_dummy | ALTER FOREIGN DATA WRAPPER fdw_audit_dummy OWNER TO regress_test_foreign; + fdw_audit_dummy | ALTER FOREIGN DATA WRAPPER fdw_audit_dummy RENAME TO fdw_audit_dummy2; + fdw_audit_dummy | CREATE FOREIGN DATA WRAPPER fdw_audit_dummy; + fdw_audit_dummy2 | DROP FOREIGN DATA WRAPPER IF EXISTS fdw_audit_dummy2; + s1 | ALTER SERVER s1 OPTIONS (connect_timeout '30'); + s1 | ALTER SERVER s1 OWNER TO regress_test_foreign; + s1 | ALTER SERVER s1 RENAME to s1new; + s1 | ALTER SERVER s1 VERSION '1.1'; + s1 | CREATE SERVER s1 FOREIGN DATA WRAPPER fdw_audit_dummy2; + s1new | DROP SERVER s1new; +(12 rows) + +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + pg_delete_audit +----------------- + +(1 row) + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1 diff --git a/src/test/regress/output/gs_global_config_audit.source b/src/test/regress/output/gs_global_config_audit.source new file mode 100644 index 000000000..66c5779f6 --- /dev/null +++ b/src/test/regress/output/gs_global_config_audit.source @@ -0,0 +1,56 @@ +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_enabled=on" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=33554431" > /dev/null 2>&1 +show audit_enabled; + audit_enabled +--------------- + on +(1 row) + +show audit_system_object; + audit_system_object +--------------------- + 33554431 +(1 row) + +SELECT object_name,detail_info FROM pg_query_audit('2022-02-01 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_globalconfig'; + object_name | detail_info +-------------+------------- +(0 rows) + +select * from gs_global_config; + name | value +-------------+------- + buckets_len | 16384 +(1 row) + +ALTER GLOBAL CONFIGURATION with(lockwait_timeout=2000, lockwait_interval=2); +ALTER GLOBAL CONFIGURATION with(last_catchup_threshold=5000); +select * from gs_global_config; + name | value +------------------------+------- + buckets_len | 16384 + lockwait_timeout | 2000 + lockwait_interval | 2 + last_catchup_threshold | 5000 +(4 rows) + +DROP GLOBAL CONFIGURATION lockwait_timeout; +DROP GLOBAL CONFIGURATION last_catchup_threshold, lockwait_interval; +select * from gs_global_config; + name | value +-------------+------- + buckets_len | 16384 +(1 row) + +SELECT object_name,detail_info FROM pg_query_audit('2022-02-01 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_globalconfig'; + object_name | detail_info +------------------------+------------------------------------------------------------------------------ + lockwait_timeout | ALTER GLOBAL CONFIGURATION with(lockwait_timeout=2000, lockwait_interval=2); + lockwait_interval | ALTER GLOBAL CONFIGURATION with(lockwait_timeout=2000, lockwait_interval=2); + last_catchup_threshold | ALTER GLOBAL CONFIGURATION with(last_catchup_threshold=5000); + lockwait_timeout | DROP GLOBAL CONFIGURATION lockwait_timeout; + last_catchup_threshold | DROP GLOBAL CONFIGURATION last_catchup_threshold, lockwait_interval; + lockwait_interval | DROP GLOBAL CONFIGURATION last_catchup_threshold, lockwait_interval; +(6 rows) + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "reset audit_system_object" > /dev/null 2>&1 diff --git a/src/test/regress/output/gsc_db.source b/src/test/regress/output/gsc_db.source new file mode 100644 index 000000000..73a59a820 --- /dev/null +++ b/src/test/regress/output/gsc_db.source @@ -0,0 +1,38 @@ +create database gsc1; +\c gsc1 +create table gsc1_t1(c1 int); +insert into gsc1_t1 values(1); +create database gsc2; +\c gsc2 +create table gsc2_t1(c1 int); +insert into gsc2_t1 values(2); +\! @abs_bindir@/gsql -p @portstring@ -d gsc1 -c "select * from gsc1_t1;" > /dev/null 2>&1 & +\! @abs_bindir@/gsql -p @portstring@ -d gsc2 -c "select * from gsc2_t1;" > /dev/null 2>&1 & +\c postgres +select pg_sleep(0.5); + pg_sleep +---------- + +(1 row) + +alter database gsc1 rename to gsc_temp; +alter database gsc2 rename to gsc1; +alter database gsc_temp rename to gsc2; +\c gsc1 +select * from gsc2_t1; + c1 +---- + 2 +(1 row) + +\c gsc2 +select * from gsc1_t1; + c1 +---- + 1 +(1 row) + +\c postgres +drop database gsc1; +drop database gsc2; +\c regression diff --git a/src/test/regress/output/guc_help.source b/src/test/regress/output/guc_help.source new file mode 100644 index 000000000..05d95c797 --- /dev/null +++ b/src/test/regress/output/guc_help.source @@ -0,0 +1,64 @@ +\! @abs_bindir@/gs_guc -? +gs_guc is an inferface to modify config files or encrypt plain text to cipher text. + +Checking GUC parameters: + gs_guc check [-Z NODE-TYPE] -D DATADIR {-c "parameter", -c "parameter", ...} + gs_guc check [-Z NODE-TYPE] -D DATADIR {-c parameter, -c parameter, ...} + +Configuring GUC parameters: + Usage: + gs_guc {set | reload} [-Z NODE-TYPE] -D DATADIR [--lcname=LCNAME] [--ignore-node=NODES] {-c "parameter = value" -c "parameter = value" ...} + gs_guc {set | reload} [-Z NODE-TYPE] -D DATADIR [--lcname=LCNAME] [--ignore-node=NODES] {-c " parameter = value " -c " parameter = value " ...} + gs_guc {set | reload} [-Z NODE-TYPE] -D DATADIR [--lcname=LCNAME] [--ignore-node=NODES] {-c "parameter = 'value'" -c "parameter = 'value'" ...} + gs_guc {set | reload} [-Z NODE-TYPE] -D DATADIR [--lcname=LCNAME] [--ignore-node=NODES] {-c " parameter = 'value' " -c " parameter = 'value' " ...} + gs_guc {set | reload} [-Z NODE-TYPE] -D DATADIR [--lcname=LCNAME] [--ignore-node=NODES] {-c "parameter" -c "parameter" ...} + e.g. gs_guc set -Z datanode -D /datanode/data -c "program = '\"Hello\", World\!'". + e.g. gs_guc reload -Z datanode -D /datanode/data -c "program = '\"Hello\", World\!'". + + If parameter value set or reload to DEFAULT OR COMMENT configuration parameter, use the form: -c "parameter" + + You can choose Usage as you like, and perhaps the first one will be more suitable for you! + +Configuring Authentication Policy: + gs_guc {set | reload} [-Z NODE-TYPE] -D DATADIR [--ignore-node=NODES] -h "HOSTTYPE DATABASE USERNAME IPADDR IPMASK AUTHMEHOD authentication-options" + gs_guc {set | reload} [-Z NODE-TYPE] -D DATADIR [--ignore-node=NODES] -h "HOSTTYPE DATABASE USERNAME IPADDR-WITH-IPMASK AUTHMEHOD authentication-options" + gs_guc {set | reload} [-Z NODE-TYPE] -D DATADIR [--ignore-node=NODES] -h "HOSTTYPE DATABASE USERNAME HOSTNAME AUTHMEHOD authentication-options" + If authentication policy need to set/reload DEFAULT OR COMMENT then provide without authentication menthod, use the form: + gs_guc {set | reload} [-Z NODE-TYPE] -D DATADIR [--ignore-node=NODES] -h "HOSTTYPE DATABASE USERNAME IPADDR IPMASK" + gs_guc {set | reload} [-Z NODE-TYPE] -D DATADIR [--ignore-node=NODES] -h "HOSTTYPE DATABASE USERNAME IPADDR-WITH-IPMASK " + gs_guc {set | reload} [-Z NODE-TYPE] -D DATADIR [--ignore-node=NODES] -h "HOSTTYPE DATABASE USERNAME HOSTNAME" + +Encrypt plain text to cipher text: + gs_guc encrypt [-M keymode] -K password [-U username] {-D DATADIR | -R RANDFILEDIR -C CIPHERFILEDIR} + +Generate plain cipher key to cipher text: + gs_guc generate [-o prefix] -S cipherkey -D DATADIR + +Common options: + -D, --pgdata=DATADIR location of the database storage area + -c parameter=value the parameter to set + -c parameter the parameter value to DEFAULT (i.e comments in configuration file) + --lcname=LCNAME logical cluter name. It only can be used with datanode + --ignore-node=NODES Nodes which need to ignore. It only can be used with set/reload operation,and CN/DN nodetype + -h host-auth-policy to set authentication policy in HBA conf file + -?, --help show this help, then exit + -V, --version output version information, then exit + +Options for set with -c parameter: + -Z NODE-TYPE can only be "datanode", default is "datanode". NODE-TYPE is used to identify configuration file (with -c parameter) in data directory + "datanode" -- postgresql.conf + +Options for set and reload with -h host-auth-policy: + -Z NODE-TYPE can only be "datanode", default is "datanode" + +Options for encrypt: + -M, --keymode=MODE the cipher files will be applies in server, client or source,default value is server mode + -K PASSWORD the plain password you want to encrypt, which length should between 8~16 and at least 3 different types of characters + -U, --keyuser=USER if appointed, the cipher files will name with the user name + -R RANDFILEDIR set the dir that put the rand file + -C CIPHERFILEDIR set the dir that put the cipher file + + +Options for generate: + -o PREFIX the cipher files prefix. default value is obsserver + -S CIPHERKEY the plain password you want to encrypt, which length should between 8~16 and at least 3 different types of characters diff --git a/src/test/regress/output/hw_audit_detailinfo.source b/src/test/regress/output/hw_audit_detailinfo.source new file mode 100644 index 000000000..514db5c58 --- /dev/null +++ b/src/test/regress/output/hw_audit_detailinfo.source @@ -0,0 +1,144 @@ +-- 初始开关关闭, 审计日志查询 +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + pg_delete_audit +----------------- + +(1 row) + +show audit_xid_info; + audit_xid_info +---------------- + 0 +(1 row) + +-- 参数设置 +set audit_xid_info=1; +ERROR: parameter "audit_xid_info" cannot be changed now +-- 打开xid开关 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_xid_info=1" > /dev/null 2>&1 +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +show audit_xid_info; + audit_xid_info +---------------- + 1 +(1 row) + +CREATE USER audit_user1 PASSWORD 'Gauss@123'; +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type='ddl_user' and object_name='audit_user1'; + detail_info +--------------------------------------------------------- + xid=XIDNUM, CREATE USER audit_user1 PASSWORD '********'; +(1 row) + +-- DDL +create database db_audit1; +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type='ddl_database'; + detail_info +--------------------------------------- + xid=XIDNUM, create database db_audit1; +(1 row) + +-- DCL +create table t1(id int); +grant all on table t1 to audit_user1; +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type='grant_role' and object_name='audit_user1'; + detail_info +-------------------------------------------------- + xid=XIDNUM, grant all on table t1 to audit_user1; +(1 row) + +-- DML +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state=1" > /dev/null 2>&1 +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +show audit_dml_state; + audit_dml_state +----------------- + 1 +(1 row) + +create table t2(id int); +insert into t2 values(1); +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type='dml_action' and object_name='t2'; + detail_info +-------------------------------------- + xid=XIDNUM, insert into t2 values(1); +(1 row) + +-- DQL +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state_select=1" > /dev/null 2>&1 +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +show audit_dml_state_select; + audit_dml_state_select +------------------------ + 1 +(1 row) + +create table t3(id int); +select * from t3; + id +---- +(0 rows) + +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type='dml_action_select' and object_name='t3'; + detail_info +------------------------------ + xid=XIDNUM, select * from t3; +(1 row) + +-- 关闭xid开关 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_xid_info=0" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state=0" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state_select=0" > /dev/null 2>&1 +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +show audit_xid_info; + audit_xid_info +---------------- + 0 +(1 row) + +show audit_dml_state; + audit_dml_state +----------------- + 0 +(1 row) + +show audit_dml_state_select; + audit_dml_state_select +------------------------ + 0 +(1 row) + +-- 恢复 +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + pg_delete_audit +----------------- + +(1 row) + +drop table t1; +drop table t2; +drop table t3; +drop user audit_user1; +drop database db_audit1; +/* remove hard xid */ +\! security_scripts/post_case_audit.sh @abs_builddir@/results/hw_audit_detailinfo.out diff --git a/src/test/regress/output/hw_audit_multi_thread.source b/src/test/regress/output/hw_audit_multi_thread.source new file mode 100644 index 000000000..d3c390fbd --- /dev/null +++ b/src/test/regress/output/hw_audit_multi_thread.source @@ -0,0 +1,92 @@ +-- generating audit logs +create user user_audit_mul_x with password 'Gauss@123'; +create user user_audit_mul_y with password 'Gauss@123'; +create user user_audit_mul_z with password 'Gauss@123'; +select type,result,object_name,detail_info from pg_query_audit('1111-1-1','2222-2-2') where detail_info like '%create user user_audit_mul%'order by time; + type | result | object_name | detail_info +----------+--------+------------------+-------------------------------------------------------- + ddl_user | ok | user_audit_mul_x | create user user_audit_mul_x with password '********'; + ddl_user | ok | user_audit_mul_y | create user user_audit_mul_y with password '********'; + ddl_user | ok | user_audit_mul_z | create user user_audit_mul_z with password '********'; +(3 rows) + +-- audit thread number change to 3 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_thread_num=3" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=6" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_set_parameter=0" > /dev/null 2>&1 +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +\! @abs_bindir@/gs_ctl stop -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! @abs_bindir@/gs_ctl start -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! sleep 5 +-- generating audit items +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select * from pg_delete_audit('0007-1-1','9999-12-31');"; + pg_delete_audit +----------------- + +(1 row) + +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "create user user_audit_mul_001 with password 'Gauss@123'"; +CREATE ROLE +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "create user user_audit_mul_002 with password 'Gauss@123'"; +CREATE ROLE +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "create user user_audit_mul_003 with password 'Gauss@123'"; +CREATE ROLE +-- check audit logs +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select type,result,object_name,detail_info from pg_query_audit('1111-1-1','2222-2-2') where detail_info like '%create user user_audit_mul%'order by time;"; + type | result | object_name | detail_info +----------+--------+--------------------+--------------------------------------------------------- + ddl_user | ok | user_audit_mul_001 | create user user_audit_mul_001 with password '********' + ddl_user | ok | user_audit_mul_002 | create user user_audit_mul_002 with password '********' + ddl_user | ok | user_audit_mul_003 | create user user_audit_mul_003 with password '********' +(3 rows) + +-- audit thread number change to 1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_thread_num=1" > /dev/null 2>&1 +\! @abs_bindir@/gs_ctl stop -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! @abs_bindir@/gs_ctl start -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! sleep 5 +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select * from pg_delete_audit('0007-1-1','9999-12-31');"; + pg_delete_audit +----------------- + +(1 row) + +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "create user user_audit_mul_004 with password 'Gauss@123'"; +CREATE ROLE +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "create user user_audit_mul_005 with password 'Gauss@123'"; +CREATE ROLE +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "create user user_audit_mul_006 with password 'Gauss@123'"; +CREATE ROLE +-- check audit logs +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select type,result,object_name,detail_info from pg_query_audit('1111-1-1','2222-2-2') where detail_info like '%create user user_audit_mul%'order by time;"; + type | result | object_name | detail_info +----------+--------+--------------------+--------------------------------------------------------- + ddl_user | ok | user_audit_mul_004 | create user user_audit_mul_004 with password '********' + ddl_user | ok | user_audit_mul_005 | create user user_audit_mul_005 with password '********' + ddl_user | ok | user_audit_mul_006 | create user user_audit_mul_006 with password '********' +(3 rows) + +-- reset env +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop user user_audit_mul_001"; +DROP ROLE +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop user user_audit_mul_002"; +DROP ROLE +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop user user_audit_mul_003"; +DROP ROLE +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop user user_audit_mul_004"; +DROP ROLE +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop user user_audit_mul_005"; +DROP ROLE +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop user user_audit_mul_006"; +DROP ROLE +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_thread_num" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_set_parameter" > /dev/null 2>&1 +\! @abs_bindir@/gs_ctl stop -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! @abs_bindir@/gs_ctl start -D @abs_srcdir@/tmp_check/datanode1 > /dev/null +\! sleep 5 diff --git a/src/test/regress/output/hw_audit_rotation_interval.source b/src/test/regress/output/hw_audit_rotation_interval.source new file mode 100644 index 000000000..0b792914c --- /dev/null +++ b/src/test/regress/output/hw_audit_rotation_interval.source @@ -0,0 +1,7 @@ +-- 设置guc参数 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_rotation_interval=1" > /dev/null 2>&1 +-- 间隔60s 获取pg_audit 中最新adt文件编号 查看是否增加了1 +\! num1=`expr $(echo $(ls @abs_srcdir@/tmp_check/datanode1/pg_audit -lt | head -n 2 | awk '{print $9}')| tr -cd "[0-9]") + 1` && sleep 1m && num2=$(echo $(ls @abs_srcdir@/tmp_check/datanode1/pg_audit -lt | head -n 2 | awk '{print $9}')| tr -cd "[0-9]") && [[ $num1 == $num2 ]] && echo 'add a new log after interval-- 60 seconds' || echo 'fail to add new logs' +add a new log after interval-- 60 seconds +-- 恢复guc参数 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_rotation_interval" > /dev/null 2>&1 diff --git a/src/test/regress/output/hw_audit_rotation_size.source b/src/test/regress/output/hw_audit_rotation_size.source new file mode 100644 index 000000000..ff760403f --- /dev/null +++ b/src/test/regress/output/hw_audit_rotation_size.source @@ -0,0 +1,29 @@ +-- 设置guc参数 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_rotation_size=2048" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state=1" > /dev/null 2>&1 +-- 产生dml_action 审计日志 +CREATE TABLE T_TEST_ROTATION_SIZE +( + COL1 int4 DEFAULT 1, + COL2 VARCHAR(1024) DEFAULT 'test_rotation_size'); +CREATE OR REPLACE PROCEDURE TRANSACTION_TEST_ROTATION_SIZE() +AS +BEGIN +FOR i IN 0..1000000 LOOP +INSERT INTO T_TEST_ROTATION_SIZE(COL1, COL2) VALUES (i, 'test_time'); +COMMIT; +END LOOP; +END; +/ +CALL TRANSACTION_TEST_ROTATION_SIZE(); + transaction_test_rotation_size +-------------------------------- + +(1 row) + +-- 提取新生成的文件大小 与2.1M比较 全部小于2.1M 为执行成功 +\! flag=0 && for i in $(find @abs_srcdir@/tmp_check/datanode1/pg_audit -newermt $(date -d "-75 seconds" +%H:%M:%S) -name "*_adt"); do size=$(du -h --exclude=index_table_new $i | grep -oP '\d*\.\d+M'); if [[ $size > '2.1M' ]]; then flag=1; echo $size; echo $i; fi; done && [[ "$flag" == 0 ]] && echo 'all the logs are less than 2.1M' || echo 'error -- some logs exceed limit' +all the logs are less than 2.1M +-- 恢复guc参数 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_rotation_size" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state" > /dev/null 2>&1 diff --git a/src/test/regress/output/hw_audit_space.source b/src/test/regress/output/hw_audit_space.source new file mode 100644 index 000000000..8b5919d12 --- /dev/null +++ b/src/test/regress/output/hw_audit_space.source @@ -0,0 +1,37 @@ +-- 修改guc参数 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_resource_policy = 1" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_space_limit = 512MB" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state =1" > /dev/null 2>&1 +-- 获取pg_audit 中最早建立的adt文件编号 +\! echo $(echo $(ls @abs_srcdir@/tmp_check/datanode1/pg_audit -tr | head -2 | xargs) | tr -cd "[0-9]") + +-- 生成 dml_action 审计日志 +CREATE TABLE T_TEST_SPACE +( + COL1 int4 DEFAULT 1, + COL2 VARCHAR(1024) DEFAULT 'test_space'); +CREATE OR REPLACE PROCEDURE TRANSACTION_TEST_SPACE() +AS +BEGIN +FOR i IN 0..3500000 LOOP +INSERT INTO T_TEST_SPACE(COL1, COL2) VALUES (i, 'a'); +COMMIT; +END LOOP; +END; +/ +CALL TRANSACTION_TEST_SPACE(); + transaction_test_space +------------------------ + +(1 row) + +-- 获取 pg_audit 中最早建立的adt文件编号 与原编号 0_adt 比较 观察是否删除了最早的日志文件 +\! [[ $(echo $(ls @abs_srcdir@/tmp_check/datanode1/pg_audit -tr | head -2 | xargs) | tr -cd "[0-9]") > 1 ]] && echo 'delete oldest files' || echo 'fail to delete oldest files' +delete oldest files +-- 查看pg_audit 文件总大小 是否超过设置的512M +\! [[ $(du -h --exclude=done @abs_srcdir@/tmp_check/datanode1/pg_audit | grep -oP '\d*M') > '530M' ]] && echo 'size of total logs exceeds upper limit' || echo 'size of total logs not exceeds upper limit' +size of total logs not exceeds upper limit +-- 恢复guc参数 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_resource_policy" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_space_limit" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_dml_state" > /dev/null 2>&1 diff --git a/src/test/regress/output/hw_audit_toughness.source b/src/test/regress/output/hw_audit_toughness.source new file mode 100644 index 000000000..f2d0ee0db --- /dev/null +++ b/src/test/regress/output/hw_audit_toughness.source @@ -0,0 +1,86 @@ +-- 清理历史审计日志 +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + pg_delete_audit +----------------- + +(1 row) + +create user audit_fault_user1 with password 'Gauss@123'; +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_user' and object_name = 'audit_fault_user1'; + detail_info +--------------------------------------------------------- + create user audit_fault_user1 with password '********'; +(1 row) + +-- 故障1:覆盖 +\! cur_audit_file=`find @abs_srcdir@/tmp_check/datanode1/pg_audit -name "*_adt" | sort -r | head -1` && echo "fault 1: overwritten xxxxx" > $cur_audit_file +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_user' and object_name = 'audit_fault_user1'; +--?.* + detail_info +------------- +(0 rows) + +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +create user audit_fault_user2 with password 'Gauss@123'; +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_user' and object_name = 'audit_fault_user2'; + detail_info +--------------------------------------------------------- + create user audit_fault_user2 with password '********'; +(1 row) + +-- 故障2: 追加 +\! cur_audit_file=`find @abs_srcdir@/tmp_check/datanode1/pg_audit -name "*_adt" | sort -r | head -1` && echo "fault 2: appending yyyyy" >> $cur_audit_file +create user audit_fault_user3 with password 'Gauss@123'; +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_user' and object_name = 'audit_fault_user3'; +--?.* + detail_info +------------- +(0 rows) + +create user audit_fault_user4 with password 'Gauss@123'; +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +SELECT detail_info FROM pg_query_audit('2021-12-17 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_user' and object_name = 'audit_fault_user4'; + detail_info +--------------------------------------------------------- + create user audit_fault_user4 with password '********'; +(1 row) + +-- 恢复 +drop user audit_fault_user1; +drop user audit_fault_user2; +drop user audit_fault_user3; +drop user audit_fault_user4; +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + pg_delete_audit +----------------- + +(1 row) + diff --git a/src/test/regress/output/hw_cstore_load2.source b/src/test/regress/output/hw_cstore_load2.source index ff3486130..c98b7854b 100644 --- a/src/test/regress/output/hw_cstore_load2.source +++ b/src/test/regress/output/hw_cstore_load2.source @@ -401,17 +401,17 @@ DROP TABLE LINEITEM_PART_DATE; -- -- SET DATESTYLE='ISO, MDY'; -CREATE TABLE DTS2017011006555_TBL +CREATE TABLE TESTTABLE_TBL ( C_INT INT, c_timestamp timestamp, c_timestamp_w timestamp without time zone ) distribute by hash(C_INT); -COPY DTS2017011006555_TBL FROM STDIN with (delimiter ',',timestamp_format 'yy/mm/dd/hh24/mi/ss'); -SELECT * FROM DTS2017011006555_TBL; +COPY TESTTABLE_TBL FROM STDIN with (delimiter ',',timestamp_format 'yy/mm/dd/hh24/mi/ss'); +SELECT * FROM TESTTABLE_TBL; c_int | c_timestamp | c_timestamp_w -------+---------------------+--------------------- 2 | 2000-01-01 00:00:00 | 2000-01-01 00:00:00 (1 row) -DROP TABLE DTS2017011006555_TBL; +DROP TABLE TESTTABLE_TBL; diff --git a/src/test/regress/output/hw_partition_merge1.source b/src/test/regress/output/hw_partition_merge1.source index 8814c5756..11c94748a 100644 --- a/src/test/regress/output/hw_partition_merge1.source +++ b/src/test/regress/output/hw_partition_merge1.source @@ -130,7 +130,7 @@ select relname, boundaries, spcname from pg_partition, pg_tablespace where paren drop table test_merge_table_tablespace; -- -- -CREATE TABLE DTS2016122106940_tbl +CREATE TABLE TESTTABLE_tbl ( id int, info varchar(200) @@ -142,38 +142,38 @@ partition p1 values less than(100000), partition p2 values less than(300000), partition p3 values less than(maxvalue) ); -CREATE INDEX idx_dts2016122106940_tbl on DTS2016122106940_tbl(id) local ( +CREATE INDEX idx_testtable_tbl on TESTTABLE_tbl(id) local ( partition idx_p1, partition idx_p2 tablespace partition_merge_ts1, partition idx_p3 tablespace partition_merge_ts2 ); -select index_name,partition_name,def_tablespace_name from dba_ind_partitions where index_name ='idx_dts2016122106940_tbl' order by partition_name; - index_name | partition_name | def_tablespace_name ---------------------------+----------------+--------------------- - idx_dts2016122106940_tbl | idx_p1 | DEFAULT TABLESPACE - idx_dts2016122106940_tbl | idx_p2 | partition_merge_ts1 - idx_dts2016122106940_tbl | idx_p3 | partition_merge_ts2 +select index_name,partition_name,def_tablespace_name from dba_ind_partitions where index_name ='idx_testtable_tbl' order by partition_name; + index_name | partition_name | def_tablespace_name +-------------------+----------------+--------------------- + idx_testtable_tbl | idx_p1 | DEFAULT TABLESPACE + idx_testtable_tbl | idx_p2 | partition_merge_ts1 + idx_testtable_tbl | idx_p3 | partition_merge_ts2 (3 rows) -alter table DTS2016122106940_tbl merge partitions p2,p3 into partition p3; -select index_name,partition_name,def_tablespace_name from dba_ind_partitions where index_name ='idx_dts2016122106940_tbl' order by partition_name; - index_name | partition_name | def_tablespace_name ---------------------------+----------------+--------------------- - idx_dts2016122106940_tbl | idx_p1 | DEFAULT TABLESPACE - idx_dts2016122106940_tbl | idx_p3 | partition_merge_ts2 +alter table TESTTABLE_tbl merge partitions p2,p3 into partition p3; +select index_name,partition_name,def_tablespace_name from dba_ind_partitions where index_name ='idx_testtable_tbl' order by partition_name; + index_name | partition_name | def_tablespace_name +-------------------+----------------+--------------------- + idx_testtable_tbl | idx_p1 | DEFAULT TABLESPACE + idx_testtable_tbl | idx_p3 | partition_merge_ts2 (2 rows) START TRANSACTION; -alter table DTS2016122106940_tbl merge partitions p1,p3 into partition p3; -select index_name,partition_name,def_tablespace_name from dba_ind_partitions where index_name ='idx_dts2016122106940_tbl' order by partition_name; - index_name | partition_name | def_tablespace_name ---------------------------+----------------+--------------------- - idx_dts2016122106940_tbl | idx_p3 | partition_merge_ts2 +alter table TESTTABLE_tbl merge partitions p1,p3 into partition p3; +select index_name,partition_name,def_tablespace_name from dba_ind_partitions where index_name ='idx_testtable_tbl' order by partition_name; + index_name | partition_name | def_tablespace_name +-------------------+----------------+--------------------- + idx_testtable_tbl | idx_p3 | partition_merge_ts2 (1 row) ROLLBACK; -DROP INDEX idx_dts2016122106940_tbl; -DROP TABLE DTS2016122106940_tbl; +DROP INDEX idx_testtable_tbl; +DROP TABLE TESTTABLE_tbl; drop tablespace partition_merge_ts0; drop tablespace partition_merge_ts1; drop tablespace partition_merge_ts2; diff --git a/src/test/regress/output/hw_subpartition_tablespace.source b/src/test/regress/output/hw_subpartition_tablespace.source new file mode 100644 index 000000000..9fe20d299 --- /dev/null +++ b/src/test/regress/output/hw_subpartition_tablespace.source @@ -0,0 +1,2089 @@ +DROP SCHEMA hw_subpartition_tablespace CASCADE; +ERROR: schema "hw_subpartition_tablespace" does not exist +CREATE SCHEMA hw_subpartition_tablespace; +SET CURRENT_SCHEMA TO hw_subpartition_tablespace; +--prepare +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts1' +\! mkdir '@testtablespace@/hw_subpartition_tablespace_ts1' +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts2' +\! mkdir '@testtablespace@/hw_subpartition_tablespace_ts2' +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts3' +\! mkdir '@testtablespace@/hw_subpartition_tablespace_ts3' +CREATE TABLESPACE hw_subpartition_tablespace_ts1 LOCATION '@testtablespace@/hw_subpartition_tablespace_ts1'; +CREATE TABLESPACE hw_subpartition_tablespace_ts2 LOCATION '@testtablespace@/hw_subpartition_tablespace_ts2'; +CREATE TABLESPACE hw_subpartition_tablespace_ts3 LOCATION '@testtablespace@/hw_subpartition_tablespace_ts3'; +-- +----test create subpartition with tablespace---- +-- +--range-range +CREATE TABLE t_range_range1(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_range1'); + pg_get_tabledef +--------------------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_range_range1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE pg_default + + ( + + SUBPARTITION p_range3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES LESS THAN (15) TABLESPACE pg_default + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE hw_subpartition_tablespace_ts1+ + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE pg_default + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_range1; +CREATE TABLE t_range_range2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_range2'); + pg_get_tabledef +--------------------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_range_range2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + TABLESPACE hw_subpartition_tablespace_ts1 + + PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range1_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range2_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range3_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range4_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE hw_subpartition_tablespace_ts3+ + ), + + PARTITION p_range6 VALUES LESS THAN (30) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range6_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE hw_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_range2; +--range-list +CREATE TABLE t_range_list1(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_list1'); + pg_get_tabledef +---------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_range_list1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE pg_default + + ( + + SUBPARTITION p_range3_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_subpartdefault1 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts1+ + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE pg_default + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_list1; +CREATE TABLE t_range_list2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_list2'); + pg_get_tabledef +---------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_range_list2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + TABLESPACE hw_subpartition_tablespace_ts1 + + PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range1_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range2_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range3_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range3_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range4_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range4_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts3+ + ), + + PARTITION p_range6 VALUES LESS THAN (30) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range6_subpartdefault1 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_list2; +--range-hash +CREATE TABLE t_range_hash1(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_hash1'); + pg_get_tabledef +----------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_range_hash1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE pg_default + + ( + + SUBPARTITION p_range3_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 TABLESPACE pg_default + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_subpartdefault1 TABLESPACE hw_subpartition_tablespace_ts1+ + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE pg_default + + ( + + SUBPARTITION p_range5_subpartdefault1 TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_hash1; +CREATE TABLE t_range_hash2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_hash2'); + pg_get_tabledef +----------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_range_hash2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + TABLESPACE hw_subpartition_tablespace_ts1 + + PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range1_4 TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range2_4 TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range3_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range3_4 TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range4_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range4_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range4_4 TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range5_subpartdefault1 TABLESPACE hw_subpartition_tablespace_ts3+ + ), + + PARTITION p_range6 VALUES LESS THAN (30) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range6_subpartdefault1 TABLESPACE hw_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_hash2; +--list-range +CREATE TABLE t_list_range1(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_range1'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_list_range1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ( + + SUBPARTITION p_list3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES LESS THAN (15) TABLESPACE pg_default + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE hw_subpartition_tablespace_ts1+ + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE pg_default + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_range1; +CREATE TABLE t_list_range2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_range2'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_list_range2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + TABLESPACE hw_subpartition_tablespace_ts1 + + PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list1_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list2_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list4_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE hw_subpartition_tablespace_ts3+ + ), + + PARTITION p_list6 VALUES (26,27,28,29,30) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list6_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE hw_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_range2; +--list-list +CREATE TABLE t_list_list1(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_list1'); + pg_get_tabledef +--------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_list_list1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ( + + SUBPARTITION p_list3_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_subpartdefault1 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts1+ + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE pg_default + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_list1; +CREATE TABLE t_list_list2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_list2'); + pg_get_tabledef +--------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_list_list2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + TABLESPACE hw_subpartition_tablespace_ts1 + + PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list1_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list2_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list3_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list4_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts3+ + ), + + PARTITION p_list6 VALUES (26,27,28,29,30) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list6_subpartdefault1 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_list2; +--list-hash +CREATE TABLE t_list_hash1(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_hash1'); + pg_get_tabledef +----------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_list_hash1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ( + + SUBPARTITION p_list3_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 TABLESPACE pg_default + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list4_subpartdefault1 TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE pg_default + + ( + + SUBPARTITION p_list5_subpartdefault1 TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_hash1; +CREATE TABLE t_list_hash2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_hash2'); + pg_get_tabledef +----------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_list_hash2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + TABLESPACE hw_subpartition_tablespace_ts1 + + PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list1_4 TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list2_4 TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3+ + ( + + SUBPARTITION p_list3_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_4 TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list4_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list4_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list4_4 TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3+ + ( + + SUBPARTITION p_list5_subpartdefault1 TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_list6 VALUES (26,27,28,29,30) TABLESPACE hw_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list6_subpartdefault1 TABLESPACE hw_subpartition_tablespace_ts1 + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_hash2; +--hash-range +CREATE TABLE t_hash_range1(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_range1'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_hash_range1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_hash1 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES LESS THAN (15) TABLESPACE pg_default + + ), + + PARTITION p_hash4 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE hw_subpartition_tablespace_ts1+ + ), + + PARTITION p_hash5 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_range1; +CREATE TABLE t_hash_range2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_range2'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_hash_range2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + TABLESPACE hw_subpartition_tablespace_ts1 + + PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_hash1 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash1_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash2_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash3_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash4 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash4_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash5 TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE hw_subpartition_tablespace_ts3+ + ), + + PARTITION p_hash6 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash6_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE hw_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_range2; +--hash-list +CREATE TABLE t_hash_list1(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_list1'); + pg_get_tabledef +--------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_hash_list1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_hash1 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash3_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ), + + PARTITION p_hash4 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_subpartdefault1 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts1+ + ), + + PARTITION p_hash5 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_list1; +CREATE TABLE t_hash_list2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_list2'); + pg_get_tabledef +--------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_hash_list2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + TABLESPACE hw_subpartition_tablespace_ts1 + + PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_hash1 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash1_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash2_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash3_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash3_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash4 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash4_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash4_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash5 TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts3+ + ), + + PARTITION p_hash6 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash6_subpartdefault1 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_list2; +--hash-hash +CREATE TABLE t_hash_hash1(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_hash1'); + pg_get_tabledef +---------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_hash_hash1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_hash1 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash3_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 TABLESPACE pg_default + + ), + + PARTITION p_hash4 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_subpartdefault1 TABLESPACE hw_subpartition_tablespace_ts1+ + ), + + PARTITION p_hash5 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash5_subpartdefault1 TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_hash1; +CREATE TABLE t_hash_hash2(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_hash2'); + pg_get_tabledef +---------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_hash_hash2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + TABLESPACE hw_subpartition_tablespace_ts1 + + PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_hash1 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash1_4 TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash2_4 TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash3_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash3_4 TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash4 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash4_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash4_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash4_4 TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash5 TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash5_subpartdefault1 TABLESPACE hw_subpartition_tablespace_ts3+ + ), + + PARTITION p_hash6 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash6_subpartdefault1 TABLESPACE hw_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_hash2; +-- +----test add partition with tablespace---- +-- +--since the add subpartition define use the same code, we only test different partition type: range/list +--range-list +CREATE TABLE t_range_list3(c1 int, c2 int, c3 int) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ) +); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1; +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_range_list3'); + pg_get_tabledef +---------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_range_list3 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE pg_default + + ( + + SUBPARTITION p_range3_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_subpartdefault1 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts1+ + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE pg_default + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_list3; +CREATE TABLE t_range_list4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ) +); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE6 VALUES LESS THAN (30); +SELECT pg_get_tabledef('t_range_list4'); + pg_get_tabledef +---------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_range_list4 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + TABLESPACE hw_subpartition_tablespace_ts1 + + PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range1_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range2_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range3_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range3_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_range4_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_range4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_range4_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts3+ + ), + + PARTITION p_range6 VALUES LESS THAN (30) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range6_subpartdefault1 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_list4; +--list-hash +CREATE TABLE t_list_hash3(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ) +); +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ); +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1; +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25); +SELECT pg_get_tabledef('t_list_hash3'); + pg_get_tabledef +----------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_list_hash3 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ( + + SUBPARTITION p_list3_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 TABLESPACE pg_default + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list4_subpartdefault1 TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE pg_default + + ( + + SUBPARTITION p_list5_subpartdefault1 TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_hash3; +CREATE TABLE t_list_hash4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ) +); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST6 VALUES (26,27,28,29,30); +SELECT pg_get_tabledef('t_list_hash4'); + pg_get_tabledef +----------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_list_hash4 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + TABLESPACE hw_subpartition_tablespace_ts1 + + PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list1_4 TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list2_4 TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3+ + ( + + SUBPARTITION p_list3_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_4 TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list4_1 TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_2 TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list4_3 TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list4_4 TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3+ + ( + + SUBPARTITION p_list5_subpartdefault1 TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_list6 VALUES (26,27,28,29,30) TABLESPACE hw_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list6_subpartdefault1 TABLESPACE hw_subpartition_tablespace_ts1 + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_hash4; +-- +----test add subpartition with tablespace---- +-- +--list-range +CREATE TABLE t_list_range3(c1 int, c2 int, c3 int) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_4 VALUES LESS THAN (20); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_4 VALUES LESS THAN (20); +SELECT pg_get_tabledef('t_list_range3'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_list_range3 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ( + + SUBPARTITION p_list3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES LESS THAN (15) TABLESPACE pg_default, + + SUBPARTITION p_list3_4 VALUES LESS THAN (20) TABLESPACE pg_default + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE hw_subpartition_tablespace_ts1+ + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE pg_default + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_range3; +CREATE TABLE t_list_range4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts2; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_5 VALUES LESS THAN (25); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST4 ADD SUBPARTITION P_LIST4_5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_list_range4'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_list_range4 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + TABLESPACE hw_subpartition_tablespace_ts1 + + PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list1_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list2_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list3_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_1 VALUES LESS THAN (5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_2 VALUES LESS THAN (10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_list4_3 VALUES LESS THAN (15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_list4_4 VALUES LESS THAN (20) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_5 VALUES LESS THAN (25) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE hw_subpartition_tablespace_ts3+ + ), + + PARTITION p_list6 VALUES (26,27,28,29,30) TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list6_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE hw_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_range4; +--hash-list +CREATE TABLE t_hash_list3(c1 int, c2 int, c3 int) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE hw_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20); +SELECT pg_get_tabledef('t_hash_list3'); + pg_get_tabledef +--------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_hash_list3 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_hash1 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash2 TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash3_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES (11,12,13,14,15) TABLESPACE pg_default, + + SUBPARTITION p_hash3_4 VALUES (16,17,18,19,20) TABLESPACE pg_default + + ), + + PARTITION p_hash4 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_subpartdefault1 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts1+ + ), + + PARTITION p_hash5 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_list3; +CREATE TABLE t_hash_list4(c1 int, c2 int, c3 int) TABLESPACE hw_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE hw_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE hw_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE hw_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE hw_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE hw_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE hw_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_5 VALUES(21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_5 VALUES(21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts2; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_5 VALUES(21,22,23,24,25); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH4 ADD SUBPARTITION P_HASH4_5 VALUES(21,22,23,24,25); +SELECT pg_get_tabledef('t_hash_list4'); + pg_get_tabledef +--------------------------------------------------------------------------------------------------------- + SET search_path = hw_subpartition_tablespace; + + CREATE TABLE t_hash_list4 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, compression=no) + + TABLESPACE hw_subpartition_tablespace_ts1 + + PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_hash1 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash1_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash2 TABLESPACE hw_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash2_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash3_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash3_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash3_5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash4 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_1 VALUES (1,2,3,4,5) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash4_2 VALUES (6,7,8,9,10) TABLESPACE hw_subpartition_tablespace_ts2, + + SUBPARTITION p_hash4_3 VALUES (11,12,13,14,15) TABLESPACE hw_subpartition_tablespace_ts3, + + SUBPARTITION p_hash4_4 VALUES (16,17,18,19,20) TABLESPACE hw_subpartition_tablespace_ts1, + + SUBPARTITION p_hash4_5 VALUES (21,22,23,24,25) TABLESPACE hw_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash5 TABLESPACE hw_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts3+ + ), + + PARTITION p_hash6 TABLESPACE hw_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash6_subpartdefault1 VALUES (DEFAULT) TABLESPACE hw_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_list4; +--finish +drop tablespace hw_subpartition_tablespace_ts1; +drop tablespace hw_subpartition_tablespace_ts2; +drop tablespace hw_subpartition_tablespace_ts3; +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts1' +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts2' +\! rm -fr '@testtablespace@/hw_subpartition_tablespace_ts3' +DROP SCHEMA hw_subpartition_tablespace CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/output/hw_subpartition_vacuum_partition.source b/src/test/regress/output/hw_subpartition_vacuum_partition.source new file mode 100644 index 000000000..3c7712d3c --- /dev/null +++ b/src/test/regress/output/hw_subpartition_vacuum_partition.source @@ -0,0 +1,215 @@ +DROP SCHEMA hw_subpartition_vacuum_partition CASCADE; +ERROR: schema "hw_subpartition_vacuum_partition" does not exist +CREATE SCHEMA hw_subpartition_vacuum_partition; +SET CURRENT_SCHEMA TO hw_subpartition_vacuum_partition; +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "autovacuum = off" >/dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_defer_calculate_snapshot = off" >/dev/null 2>&1 +CREATE TABLE temp1(c1 int, c2 int); +-- +--1. test for basic function +-- +CREATE TABLE range_list1 +( + month_code VARCHAR2 (30), + dept_code VARCHAR2 (30), + user_no VARCHAR2 (30), + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN('201903') + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN('201910') + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +--vacuum, success +VACUUM range_list1 PARTITION (p_201901); +VACUUM range_list1 SUBPARTITION (p_201902_a); +--vacuum full, success +VACUUM FULL range_list1 PARTITION (p_201901); +VACUUM FULL range_list1 SUBPARTITION (p_201902_a); +--vacuum full compact, fail +VACUUM FULL COMPACT range_list1 PARTITION (p_201901); +ERROR: COMPACT can not be used with PARTITION +VACUUM FULL COMPACT range_list1 SUBPARTITION (p_201902_a); +ERROR: COMPACT can not be used with SUBPARTITION +--vacuum freeze, success +VACUUM FREEZE range_list1 PARTITION (p_201901); +VACUUM FREEZE range_list1 SUBPARTITION (p_201902_a); +--vacuum verbose, success +VACUUM VERBOSE range_list1 PARTITION (p_201901); +--?INFO: vacuuming "hw_subpartition_vacuum_partition.range_list1"(datanode1 pid=.*) +--?INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 out of 0 pages(datanode1 pid=.*) +DETAIL: 0 dead row versions cannot be removed yet. There were 0 unused item pointers. 0 pages are entirely empty. CPU 0.00s/0.00u sec elapsed 0.00 sec. +--?INFO: vacuuming "hw_subpartition_vacuum_partition.range_list1"(datanode1 pid=.*) +--?INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 out of 0 pages(datanode1 pid=.*) +DETAIL: 0 dead row versions cannot be removed yet. There were 0 unused item pointers. 0 pages are entirely empty. CPU 0.00s/0.00u sec elapsed 0.00 sec. +VACUUM VERBOSE range_list1 SUBPARTITION (p_201902_a); +--?INFO: vacuuming "hw_subpartition_vacuum_partition.range_list1"(datanode1 pid=.*) +--?INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 out of 0 pages(datanode1 pid=.*) +DETAIL: 0 dead row versions cannot be removed yet. There were 0 unused item pointers. 0 pages are entirely empty. CPU 0.00s/0.00u sec elapsed 0.00 sec. +--vacuum option all +VACUUM (FULL, VERBOSE, FREEZE) range_list1 PARTITION (p_201901); +--?INFO: vacuuming "hw_subpartition_vacuum_partition.range_list1"(datanode1 pid=.*) +--?INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 pages(datanode1 pid=.*) +DETAIL: 0 dead row versions cannot be removed yet. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +--?INFO: vacuuming "hw_subpartition_vacuum_partition.range_list1"(datanode1 pid=.*) +--?INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 pages(datanode1 pid=.*) +DETAIL: 0 dead row versions cannot be removed yet. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +VACUUM (FULL, VERBOSE, FREEZE) range_list1 SUBPARTITION (p_201902_a); +--?INFO: vacuuming "hw_subpartition_vacuum_partition.range_list1"(datanode1 pid=.*) +--?INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 pages(datanode1 pid=.*) +DETAIL: 0 dead row versions cannot be removed yet. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +-- +--2. test the actual work +-- +CREATE TABLE range_list_sales1 +( + product_id INT4, + customer_id INT4, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (1200) + ( + SUBPARTITION customer3_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales1_idx1 ON range_list_sales1(product_id, customer_id) GLOBAL; +CREATE INDEX range_list_sales1_idx2 ON range_list_sales1(channel_id) GLOBAL; +CREATE INDEX range_list_sales1_idx3 ON range_list_sales1(customer_id) LOCAL; +CREATE INDEX range_list_sales1_idx4 ON range_list_sales1(time_id, type_id) LOCAL; +SELECT pg_relation_size('range_list_sales1'); + pg_relation_size +------------------ + 106496 +(1 row) + +--delete & insert +DELETE FROM range_list_sales1; +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +SELECT pg_relation_size('range_list_sales1'); + pg_relation_size +------------------ + 172032 +(1 row) + +--vacuum full partition +INSERT INTO temp1 VALUES(1,1); +VACUUM FULL range_list_sales1 PARTITION (customer1); +VACUUM FULL range_list_sales1 PARTITION (customer2); +VACUUM FULL range_list_sales1 PARTITION (customer3); +SELECT pg_relation_size('range_list_sales1'); + pg_relation_size +------------------ + 106496 +(1 row) + +--delete & insert +DELETE FROM range_list_sales1; +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +SELECT pg_relation_size('range_list_sales1'); + pg_relation_size +------------------ + 172032 +(1 row) + +--vacuum full subpartition +INSERT INTO temp1 VALUES(1,1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel2); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel3); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel4); +VACUUM FULL range_list_sales1 SUBPARTITION (customer2_channel1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer2_channel2); +VACUUM FULL range_list_sales1 SUBPARTITION (customer3_channel1); +SELECT pg_relation_size('range_list_sales1'); + pg_relation_size +------------------ + 106496 +(1 row) + +--check index is ok +SELECT /*+ tablescan(range_list_sales1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +--finish +DROP TABLE temp1; +DROP TABLE range_list1; +DROP TABLE range_list_sales1; +DROP SCHEMA hw_subpartition_vacuum_partition CASCADE; +RESET CURRENT_SCHEMA; +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "autovacuum" >/dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_defer_calculate_snapshot" >/dev/null 2>&1 diff --git a/src/test/regress/output/hw_user.source b/src/test/regress/output/hw_user.source index ef4c58d14..7cafaca4b 100644 --- a/src/test/regress/output/hw_user.source +++ b/src/test/regress/output/hw_user.source @@ -643,9 +643,6 @@ ERROR: Permission denied. ERROR: Permission denied. drop user current_user01; drop user current_user02; -/*----------------------------------------------------------------- - test alter user with pguser (DTS2017031606532) ------------------------------------------------------------------*/ create or replace function isMD5(pwd text) returns bool as $$ begin return left(pwd, 3) = 'md5'; end; $$ language plpgsql; --alter with admin create user md5user_default with password 'Gauss@123'; diff --git a/src/test/regress/output/hw_user_alter_pguser.source b/src/test/regress/output/hw_user_alter_pguser.source index a6601a4a7..bd06c1a68 100644 --- a/src/test/regress/output/hw_user_alter_pguser.source +++ b/src/test/regress/output/hw_user_alter_pguser.source @@ -1,6 +1,3 @@ -/*----------------------------------------------------------------- - test alter user with pguser (DTS2017031606532) ------------------------------------------------------------------*/ create or replace function ismd5(pwd text) returns bool as $$ begin return left(pwd, 3) = 'md5'; end; $$ language plpgsql; --alter with admin create user md5user_default with password 'Gauss@123'; diff --git a/src/test/regress/output/hw_user_audit.source b/src/test/regress/output/hw_user_audit.source index 751cc7a11..40fafc615 100644 --- a/src/test/regress/output/hw_user_audit.source +++ b/src/test/regress/output/hw_user_audit.source @@ -124,7 +124,7 @@ select pg_sleep(1); (1 row) -\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "show audit_dml_state;show audit_dml_state_select;create table DTS2018091711491_t1 (id int, num int) distribute by hash(id);" +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "show audit_dml_state;show audit_dml_state_select;create table TESTTABLE_t1 (id int, num int) distribute by hash(id);" audit_dml_state ----------------- 1 @@ -136,21 +136,21 @@ select pg_sleep(1); (1 row) CREATE TABLE -\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "insert into DTS2018091711491_t1 values (1,1);" +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "insert into TESTTABLE_t1 values (1,1);" INSERT 0 1 -\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select * from DTS2018091711491_t1 where id=1;" +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select * from TESTTABLE_t1 where id=1;" id | num ----+----- 1 | 1 (1 row) -\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop table DTS2018091711491_t1;" +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "drop table TESTTABLE_t1;" DROP TABLE -\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select type,result,object_name,detail_info from pg_query_audit('0007-1-1','9999-12-31') where OBJECT_name ='dts2018091711491_t1' order by time;"; - type | result | object_name | detail_info --------------------+--------+---------------------+----------------------------------------------- - dml_action | ok | dts2018091711491_t1 | insert into DTS2018091711491_t1 values (1,1); - dml_action_select | ok | dts2018091711491_t1 | select * from DTS2018091711491_t1 where id=1; +\! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select type,result,object_name,detail_info from pg_query_audit('0007-1-1','9999-12-31') where OBJECT_name ='testtable_t1' order by time;"; + type | result | object_name | detail_info +-------------------+--------+--------------+----------------------------------------------- + dml_action | ok | testtable_t1 | insert into TESTTABLE_t1 values (1,1); + dml_action_select | ok | testtable_t1 | select * from TESTTABLE_t1 where id=1; (2 rows) \! @abs_bindir@/gsql -r -p @portstring@ -d postgres -c "select * from pg_delete_audit('0007-1-1','9999-12-31');"; diff --git a/src/test/regress/output/ledger_table_case.source b/src/test/regress/output/ledger_table_case.source index 9216507c2..23748e0ce 100644 --- a/src/test/regress/output/ledger_table_case.source +++ b/src/test/regress/output/ledger_table_case.source @@ -869,7 +869,7 @@ CREATE USER test_normal_user PASSWORD 'Gauss_234'; ALTER SCHEMA blockchain OWNER TO test_normal_user; DROP USER test_normal_user CASCADE; ERROR: The schema 'blockchain' doesn't allow to drop -ALTER SCHEMA blockchain OWNER TO @login_user@; +ALTER SCHEMA blockchain OWNER TO "@login_user@"; DROP USER test_normal_user CASCADE; CREATE TABLE blockchain.ledgernsp_t1_hist(a int, b text); ERROR: cannot create table under blockchain namspace. diff --git a/src/test/regress/output/predefined_roles.source b/src/test/regress/output/predefined_roles.source index e5b781172..4b6d69f27 100755 --- a/src/test/regress/output/predefined_roles.source +++ b/src/test/regress/output/predefined_roles.source @@ -265,7 +265,7 @@ SELECT pg_create_logical_replication_slot('privs_test_slot', 'mppdb_decoding'); --?.* (1 row) -SELECT pg_create_physical_replication_slot_extern('uuid', false, 'obs.cnnorth-7.ulanqab.huawei.com;dyk;19D772JBCACXX3KWS51D;********;caoshufeng_uuid/dn1'); +SELECT pg_create_physical_replication_slot_extern('uuid', false, 'obs.cnnorth-7.ulanqab.huawei.com;dyk;19D772JBCACXX3KWS51D;********;caoshufeng_uuid/dn1', false); ERROR: message is inleagel "obs.cnnorth-7.ulanqab.huawei.com;dyk;19D772JBCACXX3KWS51D;********;caoshufeng_uuid/dn1" CONTEXT: referenced column: pg_create_physical_replication_slot_extern SELECT pg_replication_slot_advance('privs_test_slot', NULL); diff --git a/src/test/regress/output/pri_alter_any_table.source b/src/test/regress/output/pri_alter_any_table.source new file mode 100644 index 000000000..e3a0f6842 --- /dev/null +++ b/src/test/regress/output/pri_alter_any_table.source @@ -0,0 +1,325 @@ +CREATE USER user1 PASSWORD 'Gauss@1234'; +CREATE USER test_alter_any_table_role PASSWORD 'Gauss@1234'; +GRANT alter any table to test_alter_any_table_role; +CREATE TABLESPACE pri_al_tsp LOCATION '@testtablespace@/pri_tsp'; +CREATE SCHEMA pri_al_schema_test; +CREATE SCHEMA pri_al_schema; +set search_path=pri_al_schema; +CREATE table pri_al_schema.tb_pri (id int, name VARCHAR(10)); + --create table +CREATE TABLE pri_al_schema.TBL_DOMAIN_PRI +( + IDOMAINID NUMBER(10) NOT NULL, + SDOMAINNAME VARCHAR2(30) NOT NULL, + b int +); +CREATE TABLE pri_al_schema.pri_al_test_hash (a int, b int); +CREATE TYPE pri_al_schema.pri_al_person_type1 AS (id int, name text); +CREATE TYPE pri_al_schema.pri_al_person_type2 AS (id int, name text); +CREATE TABLE pri_al_schema.pri_al_persons OF pri_al_schema.pri_al_person_type1; +CREATE TABLE IF NOT EXISTS pri_al_schema.pri_al_persons OF pri_al_schema.pri_al_persons; +NOTICE: relation "pri_al_persons" already exists, skipping +CREATE TABLE pri_al_schema.pri_al_stuff (id int); +--trigger +create table pri_al_trigtest (i serial primary key); +NOTICE: CREATE TABLE will create implicit sequence "pri_al_trigtest_i_seq" for serial column "pri_al_trigtest.i" +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "pri_al_trigtest_pkey" for table "pri_al_trigtest" +create function pri_al_schema.pri_al_trigtest() returns trigger as $$ +begin + raise notice '% % % %', TG_RELNAME, TG_OP, TG_WHEN, TG_LEVEL; + return new; +end;$$ language plpgsql; +create trigger pri_al_trigtest_b_row_tg before insert or update or delete on pri_al_trigtest +for each row execute procedure pri_al_trigtest(); +CREATE SEQUENCE pri_al_schema.serial START 101; +CREATE TABLE pri_al_schema.T1(C1 bigint default nextval('serial')); + --alter +SET ROLE test_alter_any_table_role PASSWORD 'Gauss@1234'; +CREATE UNIQUE INDEX pri_al_schema.ds_ship_mode_t1_index1 ON pri_al_schema.T1(C1);--应该报权限不足 +ERROR: permission denied for relation t1 +DETAIL: N/A +ALTER SEQUENCE pri_al_schema.serial OWNED BY T1.C1; +ERROR: permission denied for relation serial +DETAIL: N/A +ALTER TABLE pri_al_schema.pri_al_persons OF pri_al_schema.pri_al_person_type2; +ALTER TABLE pri_al_schema.pri_al_persons INHERIT pri_al_schema.pri_al_stuff; +ERROR: cannot change inheritance of typed table +ALTER TABLE pri_al_schema.pri_al_persons NOT OF; +--trigger +alter table pri_al_schema.pri_al_trigtest disable trigger pri_al_trigtest_b_row_tg; +alter table pri_al_schema.pri_al_trigtest enable trigger pri_al_trigtest_b_row_tg; +alter table pri_al_schema.pri_al_test_hash DISABLE ROW LEVEL SECURITY; +ALTER TABLE pri_al_schema.pri_al_test_hash REPLICA IDENTITY FULL; +ALTER TABLE pri_al_schema.pri_al_test_hash alter COLUMN b SET STATISTICS 1000; +ALTER TABLE pri_al_schema.tb_pri add column age int; +ALTER TABLE pri_al_schema.tb_pri modify name VARCHAR(60); +ALTER TABLE pri_al_schema.tb_pri ALTER COLUMN name TYPE text; +ALTER TABLE pri_al_schema.tb_pri ALTER name SET STORAGE EXTERNAL; +ALTER TABLE pri_al_schema.tb_pri add check (age>10); +ALTER TABLE pri_al_schema.tb_pri alter name set not null; +ALTER TABLE pri_al_schema.tb_pri ALTER COLUMN name DROP NOT NULL; +ALTER TABLE pri_al_schema.tb_pri rename age to age_1; +ALTER TABLE pri_al_schema.tb_pri drop column age_1; +ALTER TABLE pri_al_schema.tb_pri SET TABLESPACE pri_al_tsp; +ERROR: permission denied for tablespace pri_al_tsp +DETAIL: N/A +ALTER TABLE pri_al_schema.tb_pri SET SCHEMA pri_al_schema_test; --error must be owner of relation tb_pri +ERROR: must be owner of relation tb_pri +DETAIL: N/A +ALTER TABLE pri_al_schema.tb_pri owner to user1; --error must be owner of relation tb_pri +ERROR: must be owner of relation tb_pri +DETAIL: N/A +ALTER TABLE pri_al_schema.tb_pri RENAME TO test_table; --failed +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +ALTER TABLE pri_al_schema.tb_pri SET WITHOUT CLUSTER; --success +ALTER TABLE pri_al_schema.tb_pri SET NOCOMPRESS;--success +ALTER TABLE pri_al_schema.tb_pri SET WITHOUT OIDS;-- Un-support feature +ERROR: Un-support feature +DETAIL: ALTER TABLE ... SET WITHOUT OIDS is not yet supported. +ALTER TABLE pri_al_schema.pri_al_test_hash add column c serial; --not supported +ERROR: It's not supported to alter table add serial column +ALTER TABLE pri_al_schema.pri_al_test_hash add column d int default 10; --success +ALTER TABLE pri_al_schema.TBL_DOMAIN_PRI ADD CONSTRAINT b_le_20 CHECK (b <= 20) NOT VALID; +ALTER TABLE pri_al_schema.TBL_DOMAIN_PRI ADD CONSTRAINT PK_TBL_DOMAIN PRIMARY KEY (IDOMAINID) USING INDEX; +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +ALTER TABLE pri_al_schema.TBL_DOMAIN_PRI ADD CONSTRAINT IX_TBL_DOMAIN UNIQUE (SDOMAINNAME) USING INDEX; +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +reset role; +grant create any index to test_alter_any_table_role; +SET ROLE test_alter_any_table_role PASSWORD 'Gauss@1234'; +ALTER TABLE pri_al_schema.TBL_DOMAIN_PRI ADD CONSTRAINT PK_TBL_DOMAIN PRIMARY KEY (IDOMAINID) USING INDEX; +NOTICE: ALTER TABLE / ADD PRIMARY KEY will create implicit index "pk_tbl_domain" for table "tbl_domain_pri" +ALTER TABLE pri_al_schema.TBL_DOMAIN_PRI ADD CONSTRAINT IX_TBL_DOMAIN UNIQUE (SDOMAINNAME) USING INDEX; +NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index "ix_tbl_domain" for table "tbl_domain_pri" +\d pri_al_schema.TBL_DOMAIN_PRI + Table "pri_al_schema.tbl_domain_pri" + Column | Type | Modifiers +-------------+-----------------------+----------- + idomainid | numeric(10,0) | not null + sdomainname | character varying(30) | not null + b | integer | +Indexes: + "pk_tbl_domain" PRIMARY KEY, btree (idomainid) TABLESPACE pg_default + "ix_tbl_domain" UNIQUE CONSTRAINT, btree (sdomainname) TABLESPACE pg_default +Check constraints: + "b_le_20" CHECK (b <= 20) NOT VALID + +ALTER TABLE pri_al_schema.TBL_DOMAIN_PRI RENAME CONSTRAINT PK_TBL_DOMAIN TO MY_PK_TBL_DOMAIN; +ALTER TABLE pri_al_schema.TBL_DOMAIN_PRI DROP CONSTRAINT MY_PK_TBL_DOMAIN; +reset role; +revoke create any index from test_alter_any_table_role; +SET ROLE test_alter_any_table_role PASSWORD 'Gauss@1234'; +\d pri_al_schema.TBL_DOMAIN_PRI + Table "pri_al_schema.tbl_domain_pri" + Column | Type | Modifiers +-------------+-----------------------+----------- + idomainid | numeric(10,0) | not null + sdomainname | character varying(30) | not null + b | integer | +Indexes: + "ix_tbl_domain" UNIQUE CONSTRAINT, btree (sdomainname) TABLESPACE pg_default +Check constraints: + "b_le_20" CHECK (b <= 20) NOT VALID + +ALTER TABLE pri_al_schema.tb_pri DISTRIBUTE BY HASH(id); +ERROR: Un-support feature +DETAIL: The distributed capability is not supported currently. +DROP TABLE pri_al_schema.tb_pri; --应该失败 +ERROR: permission denied for relation tb_pri +DETAIL: N/A +reset role; +SET ROLE user1 PASSWORD 'Gauss@1234'; +create table user1.pri_al_storage_para_t1 (a int4, b text) +WITH +( + fillfactor =85, + autovacuum_enabled = ON, + toast.autovacuum_enabled = ON, + autovacuum_vacuum_threshold = 100, + toast.autovacuum_vacuum_threshold = 100, + autovacuum_vacuum_scale_factor = 10, + toast.autovacuum_vacuum_scale_factor = 10, + autovacuum_analyze_threshold = 8, + autovacuum_analyze_scale_factor = 9, +-- autovacuum_vacuum_cost_delay: Valid values are between "0" and "100". + autovacuum_vacuum_cost_delay = 90, + toast.autovacuum_vacuum_cost_delay = 92, +-- autovacuum_vacuum_cost_limit: Valid values are between "1" and "10000". + autovacuum_vacuum_cost_limit = 567, + toast.autovacuum_vacuum_cost_limit = 789, + autovacuum_freeze_min_age = 5000, + toast.autovacuum_freeze_min_age = 6000, +-- autovacuum_freeze_max_age: Valid values are between "100000000" and "2000000000". + autovacuum_freeze_max_age = 300000000, + toast.autovacuum_freeze_max_age = 250000000, + autovacuum_freeze_table_age = 170000000, + toast.autovacuum_freeze_table_age = 180000000 +) +partition by range (a) +( + partition pri_al_storage_para_t1_p1 values less than (10), + partition pri_al_storage_para_t1_p2 values less than (20), + partition pri_al_storage_para_t1_p3 values less than (100) +); +SET ROLE test_alter_any_table_role PASSWORD 'Gauss@1234'; +alter table user1.pri_al_storage_para_t1 add partition p4_rtest_t1 values less than (200); +alter table user1.pri_al_storage_para_t1 +RESET +( + fillfactor, + autovacuum_enabled, + autovacuum_vacuum_threshold, + autovacuum_vacuum_scale_factor, + autovacuum_analyze_threshold, + autovacuum_analyze_scale_factor, + autovacuum_vacuum_cost_delay, + autovacuum_vacuum_cost_limit, + autovacuum_freeze_min_age, + autovacuum_freeze_max_age, + autovacuum_freeze_table_age +); +-- step 2.1: alter table: storage parameters +alter table user1.pri_al_storage_para_t1 +SET +( + fillfactor =86, + autovacuum_enabled = OFF, + toast.autovacuum_enabled = ON, + autovacuum_vacuum_threshold = 1000, + toast.autovacuum_vacuum_threshold = 1000, + --"0.000000" and "100.000000" + autovacuum_vacuum_scale_factor = 15, + toast.autovacuum_vacuum_scale_factor = 89, + autovacuum_analyze_threshold = 800, + --"0.000000" and "100.000000" + autovacuum_analyze_scale_factor = 55, +-- autovacuum_vacuum_cost_delay: Valid values are between "0" and "100". + autovacuum_vacuum_cost_delay = 99, + toast.autovacuum_vacuum_cost_delay = 98, +-- autovacuum_vacuum_cost_limit: Valid values are between "1" and "10000". + autovacuum_vacuum_cost_limit = 555, + toast.autovacuum_vacuum_cost_limit = 798, + autovacuum_freeze_min_age = 6000, + toast.autovacuum_freeze_min_age = 4000, +-- autovacuum_freeze_max_age: Valid values are between "100000000" and "2000000000". + autovacuum_freeze_max_age = 400000000, + toast.autovacuum_freeze_max_age = 280000000, + autovacuum_freeze_table_age = 150000000, + toast.autovacuum_freeze_table_age = 160000000 +); +drop table user1.pri_al_storage_para_t1; --应该失败 +ERROR: permission denied for relation pri_al_storage_para_t1 +DETAIL: N/A +reset role; +REVOKE alter any table from test_alter_any_table_role; +SET ROLE test_alter_any_table_role PASSWORD 'Gauss@1234'; +ALTER TABLE pri_al_schema.pri_al_persons OF pri_al_schema.pri_al_person_type2; +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +ALTER TABLE pri_al_schema.pri_al_persons INHERIT pri_al_schema.pri_al_stuff; +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +ALTER TABLE pri_al_schema.pri_al_persons NOT OF; +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +--trigger +alter table pri_al_schema.pri_al_trigtest disable trigger pri_al_trigtest_b_row_tg; +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +alter table pri_al_schema.pri_al_trigtest enable trigger pri_al_trigtest_b_row_tg; +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +alter table pri_al_schema.pri_al_test_hash DISABLE ROW LEVEL SECURITY; +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +ALTER TABLE pri_al_schema.pri_al_test_hash REPLICA IDENTITY FULL; +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +ALTER TABLE pri_al_schema.tb_pri add column age int; +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +ALTER TABLE pri_al_schema.tb_pri modify name VARCHAR(60); +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +reset role; +set search_path=pri_al_schema,user1; +drop table pri_al_persons,pri_al_stuff,pri_al_test_hash,pri_al_trigtest,t1,tb_pri,tbl_domain_pri; +drop table user1.pri_al_storage_para_t1; +reset role; +--view +CREATE USER user2 password 'Gauss@1234'; +GRANT alter any table to test_alter_any_table_role; +set search_path=pri_al_schema; +CREATE TABLE pri_al_schema.customer ( + cid int primary key, + name text not null, + tel text, + passwd text +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "customer_pkey" for table "customer" +INSERT INTO pri_al_schema.customer + VALUES (101, 'regress_alice', '+81-12-3456-7890', 'passwd123'), + (102, 'regress_bob', '+01-234-567-8901', 'beafsteak'), + (103, 'regress_eve', '+49-8765-43210', 'hamburger'); + --create view +CREATE VIEW pri_al_schema.my_property_normal AS + SELECT * FROM pri_al_schema.customer WHERE name = current_user; +--create materialized view +create table pri_al_schema.pri_al_t1(c1 int,c2 int); +insert into pri_al_schema.pri_al_t1 values(1,1),(2,2); +create incremental materialized view pri_al_schema.mv1 as select * from pri_al_schema.pri_al_t1; +SET ROLE test_alter_any_table_role PASSWORD 'Gauss@1234'; +ALTER VIEW pri_al_schema.my_property_normal SET (security_barrier=true); +alter view pri_al_schema.my_property_normal rename to pri_al_property_normal; --failed +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +drop view pri_al_schema.my_property_normal; +ERROR: permission denied for relation my_property_normal +DETAIL: N/A +--materialized view +--alter user mat_pri1 poladmin; +alter materialized view pri_al_schema.mv1 enable row level security; -- no support ALTER MATERIALIZED VIEW is not yet supported. +ERROR: ALTER MATERIALIZED VIEW is not yet supported. +alter materialized view pri_al_schema.mv1 set SCHEMA user2; +ERROR: ALTER MATERIALIZED VIEW is not yet supported. +create table pri_al_schema.test_create_pri (id int); +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +reset role; +--收回权限 +REVOKE alter any table from test_alter_any_table_role; +SET ROLE test_alter_any_table_role PASSWORD 'Gauss@1234'; +ALTER VIEW pri_al_schema.my_property_normal SET (security_barrier=true); +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +alter view pri_al_schema.my_property_normal rename to pri_al_property_normal; +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +drop view pri_al_schema.pri_al_property_normal; +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +--materialized view +--alter user mat_pri1 poladmin; +alter materialized view pri_al_schema.mv1 enable row level security; +ERROR: permission denied for schema pri_al_schema +DETAIL: N/A +reset role; +drop type pri_al_schema.pri_al_person_type1; +drop type pri_al_schema.pri_al_person_type2; +drop function pri_al_schema.pri_al_trigtest(); +DROP materialized view pri_al_schema.mv1; +drop sequence pri_al_schema.serial; +drop view pri_al_schema.my_property_normal; +drop table pri_al_schema.customer; +drop table pri_al_schema.pri_al_t1; +DROP SCHEMA pri_al_schema CASCADE; +DROP SCHEMA pri_al_schema_test CASCADE; +DROP TABLESPACE pri_al_tsp; +\! rm -rf @testtablespace@/pri_tsp +DROP USER user1 cascade; +DROP USER test_alter_any_table_role cascade; +DROP USER user2 cascade; + diff --git a/src/test/regress/output/pri_create_any_function.source b/src/test/regress/output/pri_create_any_function.source new file mode 100644 index 000000000..edb1787e0 --- /dev/null +++ b/src/test/regress/output/pri_create_any_function.source @@ -0,0 +1,173 @@ +CREATE USER test_create_any_function_role PASSWORD 'Gauss@1234'; +GRANT create any function to test_create_any_function_role; +CREATE TABLESPACE pri_create_fun_tsp LOCATION '@testtablespace@/pri_create_fun_tsp'; +CREATE SCHEMA pri_fun_schema; +set search_path=pri_fun_schema; +SET ROLE test_create_any_function_role PASSWORD 'Gauss@1234'; +--定义函数为SQL查询。 +CREATE FUNCTION pri_fun_schema.pri_func_add_sql(integer, integer) RETURNS integer + AS 'select $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT; +--利用参数名用 PL/pgSQL 自增一个整数。 +CREATE OR REPLACE FUNCTION pri_fun_schema.pri_func_increment_plsql(i integer) RETURNS integer AS $$ + BEGIN + RETURN i + 1; + END; +$$ LANGUAGE plpgsql; +--返回RECORD类型 +CREATE OR REPLACE FUNCTION pri_fun_schema.pri_compute(i int, out result_1 bigint, out result_2 bigint) +returns SETOF RECORD +as $$ +begin + result_1 = i + 1; + result_2 = i * 10; +return next; +end; +$$language plpgsql; +--返回一个包含多个输出参数的记录。 +CREATE FUNCTION pri_fun_schema.pri_func_dup_sql(in int, out f1 int, out f2 text) + AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ + LANGUAGE SQL; +--计算两个整数的和,并返回结果。若果输入为null,则返回null。 +CREATE FUNCTION pri_fun_schema.pri_func_add_sql2(num1 integer, num2 integer) RETURN integer +AS +BEGIN +RETURN num1 + num2; +END; +/ +--创建package属性的重载函数 +create or replace function pri_fun_schema.pri_get_sal(NAME VARCHAR2) RETURN NUMBER package +IS + BEGIN + RETURN 1; + END; + / +create or replace function pri_fun_schema.pri_get_sal(NAME int) RETURN NUMBER package +IS + BEGIN + RETURN 1; + END; + / +select pri_fun_schema.pri_func_add_sql(1,2); + pri_func_add_sql +------------------ + 3 +(1 row) + +select pri_fun_schema.pri_func_increment_plsql(1); + pri_func_increment_plsql +-------------------------- + 2 +(1 row) + +select pri_fun_schema.pri_func_dup_sql(1); + pri_func_dup_sql +------------------ + (1,"1 is text") +(1 row) + +select pri_fun_schema.pri_func_add_sql2(1,2); + pri_func_add_sql2 +------------------- + 3 +(1 row) + +select pri_fun_schema.pri_compute(1); + pri_compute +------------- + (2,10) +(1 row) + +select pri_fun_schema.pri_get_sal('name'); + pri_get_sal +------------- + 1 +(1 row) + +select pri_fun_schema.pri_get_sal(1); + pri_get_sal +------------- + 1 +(1 row) + +--PROCEDURE +CREATE OR REPLACE PROCEDURE pri_fun_schema.pri_prc_add +( + param1 IN INTEGER, + param2 IN OUT INTEGER +) +AS +BEGIN + param2:= param1 + param2; +END; +/ +CREATE OR REPLACE PROCEDURE pri_fun_schema.pri_autonomous(out res int) AS +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + res := 55; +END; +/ +select pri_prc_add(1,2); + pri_prc_add +------------- + 3 +(1 row) + +select pri_autonomous(); + pri_autonomous +---------------- + 55 +(1 row) + +reset role; +CREATE TABLE pri_fun_schema.creditcard_info (id_number int, name text, credit_card varchar(19)); +SET ROLE test_create_any_function_role PASSWORD 'Gauss@1234'; +CREATE OR REPLACE PROCEDURE pri_fun_schema.pri_autonomous_1() AS +BEGIN + insert into pri_fun_schema.creditcard_info values(66, 66,66); + select * from pri_fun_schema.creditcard_info; + commit; + insert into pri_fun_schema.creditcard_info values(77, 77,77); + rollback; +END; +/ +call pri_fun_schema.pri_autonomous_1(); +ERROR: permission denied for relation creditcard_info +DETAIL: N/A +CONTEXT: SQL statement "insert into pri_fun_schema.creditcard_info values(66, 66,66)" +PL/pgSQL function pri_autonomous_1() line 3 at SQL statement +-- CREATE TABLE in procedure +create or replace procedure pri_fun_schema.pri_test_proc_create(i in integer) +as +begin + create table pri_fun_schema.pri_t11(id int) tablespace pri_create_fun_tsp; +end; +/ +select pri_fun_schema.pri_test_proc_create(1); +ERROR: permission denied for schema pri_fun_schema +DETAIL: N/A +CONTEXT: SQL statement "create table pri_fun_schema.pri_t11(id int) tablespace pri_create_fun_tsp" +PL/pgSQL function pri_test_proc_create(integer) line 3 at SQL statement +referenced column: pri_test_proc_create +reset role; +--删除函数。 +DROP FUNCTION pri_fun_schema.pri_func_add_sql(integer, integer); +DROP FUNCTION pri_fun_schema.pri_func_increment_plsql(integer); +DROP FUNCTION pri_fun_schema.pri_func_dup_sql(int); +DROP FUNCTION pri_fun_schema.pri_func_add_sql2(integer, integer); +DROP FUNCTION pri_fun_schema.pri_compute(int); +DROP FUNCTION pri_fun_schema.pri_get_sal(VARCHAR2); +DROP FUNCTION pri_fun_schema.pri_get_sal(int); +DROP SCHEMA pri_fun_schema cascade; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to function pri_prc_add(integer,integer) +drop cascades to function pri_autonomous() +drop cascades to table creditcard_info +drop cascades to function pri_autonomous_1() +drop cascades to function pri_test_proc_create(integer) +DROP TABLESPACE pri_create_fun_tsp; +\! rm -rf '@testtablespace@/pri_create_fun_tsp' +DROP USER test_create_any_function_role cascade; diff --git a/src/test/regress/output/pri_execute_any_function.source b/src/test/regress/output/pri_execute_any_function.source new file mode 100644 index 000000000..00bf661c1 --- /dev/null +++ b/src/test/regress/output/pri_execute_any_function.source @@ -0,0 +1,208 @@ +CREATE USER test_execute_any_function_role PASSWORD 'Gauss@1234'; +GRANT execute any function to test_execute_any_function_role; +CREATE TABLESPACE pri_execute_fun_tsp LOCATION '@testtablespace@/exe_fun_tsp'; +CREATE SCHEMA exe_fun_schema; +set search_path=exe_fun_schema; +--定义函数为SQL查询。 +CREATE FUNCTION exe_fun_schema.exe_fun_func_add_sql(integer, integer) RETURNS integer + AS 'select $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT; +REVOKE EXECUTE ON FUNCTION exe_fun_schema.exe_fun_func_add_sql(integer, integer) FROM public; +--利用参数名用 PL/pgSQL 自增一个整数。 +CREATE OR REPLACE FUNCTION exe_fun_schema.exe_fun_func_increment_plsql(i integer) RETURNS integer AS $$ + BEGIN + RETURN i + 1; + END; +$$ LANGUAGE plpgsql; +REVOKE EXECUTE ON FUNCTION exe_fun_schema.exe_fun_func_increment_plsql(integer) FROM public; +--返回RECORD类型 +CREATE OR REPLACE FUNCTION exe_fun_schema.exe_fun_compute(i int, out result_1 bigint, out result_2 bigint) +returns SETOF RECORD +as $$ +begin + result_1 = i + 1; + result_2 = i * 10; +return next; +end; +$$language plpgsql; +REVOKE EXECUTE ON FUNCTION exe_fun_schema.exe_fun_compute(int) FROM public; +--返回一个包含多个输出参数的记录。 +CREATE FUNCTION exe_fun_schema.exe_fun_func_dup_sql(in int, out f1 int, out f2 text) + AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ + LANGUAGE SQL; +REVOKE EXECUTE ON FUNCTION exe_fun_schema.exe_fun_func_dup_sql(int) FROM public; +--计算两个整数的和,并返回结果。若果输入为null,则返回null。 +CREATE FUNCTION exe_fun_schema.exe_fun_func_add_sql2(num1 integer, num2 integer) RETURN integer +AS +BEGIN +RETURN num1 + num2; +END; +/ +REVOKE EXECUTE ON FUNCTION exe_fun_schema.exe_fun_func_add_sql2(integer, integer) FROM public; +--创建package属性的重载函数 +create or replace function exe_fun_schema.exe_fun_get_sal(NAME VARCHAR2) RETURN NUMBER package +IS + BEGIN + RETURN 1; + END; + / +create or replace function exe_fun_schema.exe_fun_get_sal(NAME int) RETURN NUMBER package +IS + BEGIN + RETURN 1; + END; + / +REVOKE EXECUTE ON FUNCTION exe_fun_schema.exe_fun_get_sal(VARCHAR2) FROM public; +REVOKE EXECUTE ON FUNCTION exe_fun_schema.exe_fun_get_sal(int) FROM public; +SET ROLE test_execute_any_function_role PASSWORD 'Gauss@1234'; +select exe_fun_schema.exe_fun_func_add_sql(1,2); + exe_fun_func_add_sql +---------------------- + 3 +(1 row) + +select exe_fun_schema.exe_fun_func_increment_plsql(1); + exe_fun_func_increment_plsql +------------------------------ + 2 +(1 row) + +select exe_fun_schema.exe_fun_func_dup_sql(1); + exe_fun_func_dup_sql +---------------------- + (1,"1 is text") +(1 row) + +select exe_fun_schema.exe_fun_func_add_sql2(1,2); + exe_fun_func_add_sql2 +----------------------- + 3 +(1 row) + +select exe_fun_schema.exe_fun_compute(1); + exe_fun_compute +----------------- + (2,10) +(1 row) + +select exe_fun_schema.exe_fun_get_sal('name'); + exe_fun_get_sal +----------------- + 1 +(1 row) + +select exe_fun_schema.exe_fun_get_sal(1); + exe_fun_get_sal +----------------- + 1 +(1 row) + +reset role; +--删除函数。 +DROP FUNCTION exe_fun_schema.exe_fun_func_add_sql(integer, integer); +DROP FUNCTION exe_fun_schema.exe_fun_func_increment_plsql(integer); +DROP FUNCTION exe_fun_schema.exe_fun_func_dup_sql(int); +DROP FUNCTION exe_fun_schema.exe_fun_func_add_sql2(integer, integer); +DROP FUNCTION exe_fun_schema.exe_fun_compute(int); +DROP FUNCTION exe_fun_schema.exe_fun_get_sal(VARCHAR2); +DROP FUNCTION exe_fun_schema.exe_fun_get_sal(int); +SET ROLE test_execute_any_function_role PASSWORD 'Gauss@1234'; +--存储过程 +CREATE OR REPLACE PROCEDURE exe_fun_schema.prc_add +( + param1 IN INTEGER, + param2 IN OUT INTEGER +) +AS +BEGIN + param2:= param1 + param2; +END; +/ +ERROR: permission denied for schema exe_fun_schema +DETAIL: N/A +reset role; +CREATE OR REPLACE PROCEDURE exe_fun_schema.prc_add +( + param1 IN INTEGER, + param2 IN OUT INTEGER +) +AS +BEGIN + param2:= param1 + param2; +END; +/ +REVOKE EXECUTE ON PROCEDURE exe_fun_schema.prc_add(INTEGER,INTEGER) FROM public; +CREATE OR REPLACE PROCEDURE exe_fun_schema.pri_autonomous(out res int) AS +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + res := 55; +END; +/ +REVOKE EXECUTE ON PROCEDURE exe_fun_schema.pri_autonomous() FROM public; +CREATE TABLE exe_fun_schema.creditcard_info (id_number int, name text, credit_card varchar(19)); +CREATE OR REPLACE PROCEDURE exe_fun_schema.pri_autonomous_1() AS +BEGIN + insert into exe_fun_schema.creditcard_info values(66, 66,66); + select * from exe_fun_schema.creditcard_info; + commit; + insert into exe_fun_schema.creditcard_info values(77, 77,77); + rollback; +END; +/ +REVOKE EXECUTE ON PROCEDURE exe_fun_schema.pri_autonomous_1() FROM public; +-- CREATE TABLE in procedure +create or replace procedure exe_fun_schema.pri_test_proc_create(i in integer) +as +begin + create table exe_fun_schema.pri_t11(id int) tablespace pri_execute_fun_tsp; +end; +/ +REVOKE EXECUTE ON PROCEDURE exe_fun_schema.pri_test_proc_create(integer) FROM public; +SET ROLE test_execute_any_function_role PASSWORD 'Gauss@1234'; +SELECT exe_fun_schema.prc_add(2,3); + prc_add +--------- + 5 +(1 row) + +select exe_fun_schema.pri_autonomous(); + pri_autonomous +---------------- + 55 +(1 row) + +call exe_fun_schema.pri_autonomous_1(); +ERROR: permission denied for relation creditcard_info +DETAIL: N/A +CONTEXT: SQL statement "insert into exe_fun_schema.creditcard_info values(66, 66,66)" +PL/pgSQL function pri_autonomous_1() line 3 at SQL statement +select exe_fun_schema.pri_test_proc_create(1); +ERROR: permission denied for schema exe_fun_schema +DETAIL: N/A +CONTEXT: SQL statement "create table exe_fun_schema.pri_t11(id int) tablespace pri_execute_fun_tsp" +PL/pgSQL function pri_test_proc_create(integer) line 3 at SQL statement +referenced column: pri_test_proc_create +DROP PROCEDURE exe_fun_schema.prc_add(INTEGER,INTEGER); +ERROR: permission denied for function exe_fun_schema.prc_add +DETAIL: N/A +DROP PROCEDURE exe_fun_schema.pri_autonomous(); +ERROR: permission denied for function exe_fun_schema.pri_autonomous +DETAIL: N/A +DROP PROCEDURE exe_fun_schema.apri_autonomous_1(); +ERROR: function exe_fun_schema.apri_autonomous_1 does not exist +DROP PROCEDURE exe_fun_schema.pri_test_proc_create(integer); +ERROR: permission denied for function exe_fun_schema.pri_test_proc_create +DETAIL: N/A +reset role; +DROP PROCEDURE exe_fun_schema.prc_add(INTEGER,INTEGER); +DROP PROCEDURE exe_fun_schema.pri_autonomous(); +DROP PROCEDURE exe_fun_schema.pri_autonomous_1(); +DROP PROCEDURE exe_fun_schema.pri_test_proc_create(integer); +DROP table creditcard_info cascade; +DROP SCHEMA exe_fun_schema cascade; +DROP TABLESPACE pri_execute_fun_tsp; +\! rm -rf @testtablespace@/pri/exe_fun_tsp +DROP user test_execute_any_function_role cascade; diff --git a/src/test/regress/expected/publication.out b/src/test/regress/output/publication.source similarity index 65% rename from src/test/regress/expected/publication.out rename to src/test/regress/output/publication.source index 681f4a364..b8cbc25b0 100644 --- a/src/test/regress/expected/publication.out +++ b/src/test/regress/output/publication.source @@ -1,8 +1,45 @@ -- -- PUBLICATION -- +-- check help +\h CREATE PUBLICATION +Command: CREATE PUBLICATION +Description: define a new publication +Syntax: +CREATE PUBLICATION name + [ FOR TABLE table_name [, ...] + | FOR ALL TABLES ] + [ WITH ( publication_parameter [= value] [, ... ] ) ] + +\h ALTER PUBLICATION +Command: ALTER PUBLICATION +Description: change the definition of a publication +Syntax: +ALTER PUBLICATION name ADD TABLE table_name [, ...] +ALTER PUBLICATION name SET TABLE table_name [, ...] +ALTER PUBLICATION name DROP TABLE table_name [, ...] +ALTER PUBLICATION name SET ( publication_parameter [= value] [, ... ] ) +ALTER PUBLICATION name OWNER TO new_owner +ALTER PUBLICATION name RENAME TO new_name + +\h DROP PUBLICATION +Command: DROP PUBLICATION +Description: remove a publication +Syntax: +DROP PUBLICATION [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] + +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + pg_delete_audit +----------------- + +(1 row) + +--enable publication and subscription audit +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=16777215" > /dev/null 2>&1 --- prepare CREATE ROLE regress_publication_user LOGIN SYSADMIN PASSWORD 'Abcdef@123'; +CREATE ROLE regress_publication_user2 LOGIN SYSADMIN PASSWORD 'Abcdef@123'; SET SESSION AUTHORIZATION 'regress_publication_user' PASSWORD 'Abcdef@123'; CREATE TABLE testpub_tbl1 (id int primary key, data text); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "testpub_tbl1_pkey" for table "testpub_tbl1" @@ -111,6 +148,10 @@ DETAIL: Tables cannot be added to or dropped from FOR ALL TABLES publications. ALTER PUBLICATION testpub_foralltables SET TABLE pub_test.testpub_nopk; ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES DETAIL: Tables cannot be added to or dropped from FOR ALL TABLES publications. +-- alter owner +ALTER PUBLICATION testpub_foralltables OWNER TO regress_publication_user2; +-- rename +ALTER PUBLICATION testpub_foralltables rename to testpub_foralltables_rename; --- drop testpub_tbl1 DROP TABLE testpub_tbl1; select pubname, tablename from pg_publication_tables where tablename='testpub_tbl1'; @@ -119,8 +160,8 @@ select pubname, tablename from pg_publication_tables where tablename='testpub_tb (0 rows) --- drop publication -DROP PUBLICATION testpub_foralltables; -select * from pg_publication where pubname='testpub_foralltables'; +DROP PUBLICATION testpub_foralltables_rename; +select * from pg_publication where pubname='testpub_foralltables_rename'; pubname | pubowner | puballtables | pubinsert | pubupdate | pubdelete ---------+----------+--------------+-----------+-----------+----------- (0 rows) @@ -139,6 +180,7 @@ DROP PUBLICATION IF EXISTS testpub_multitbls; --- DROP PUBLICATION IF EXISTS testpub_cascade_tbl1; RESET SESSION AUTHORIZATION; DROP ROLE regress_publication_user; +DROP ROLE regress_publication_user2; --- permission CREATE ROLE normal_user LOGIN PASSWORD 'Abcdef@123'; SET SESSION AUTHORIZATION 'normal_user' PASSWORD 'Abcdef@123'; @@ -148,3 +190,33 @@ ERROR: permission denied for database regression DETAIL: N/A RESET SESSION AUTHORIZATION; DROP ROLE normal_user; +SELECT object_name,detail_info FROM pg_query_audit('2022-01-13 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_publication_subscription'; + object_name | detail_info +-----------------------------+------------------------------------------------------------------------------- + testpub_default | CREATE PUBLICATION testpub_default; + testpub_foralltables | CREATE PUBLICATION testpub_foralltables FOR ALL TABLES; + testpub_only_tbl1 | CREATE PUBLICATION testpub_only_tbl1 FOR TABLE ONLY testpub_tbl1; + testpub_multitbls | CREATE PUBLICATION testpub_multitbls FOR TABLE testpub_tbl2, testpub_tbl3; + testpub_only_insert | CREATE PUBLICATION testpub_only_insert with (publish='insert'); + testpub_default | ALTER PUBLICATION testpub_default ADD TABLE testpub_tbl1; + testpub_default | ALTER PUBLICATION testpub_default SET TABLE testpub_tbl2; + testpub_multitbls | ALTER PUBLICATION testpub_multitbls DROP TABLE ONLY testpub_tbl2; + testpub_default | ALTER PUBLICATION testpub_default SET (publish='insert, delete'); + testpub_foralltables | ALTER PUBLICATION testpub_foralltables OWNER TO regress_publication_user2; + testpub_foralltables | ALTER PUBLICATION testpub_foralltables rename to testpub_foralltables_rename; + testpub_foralltables_rename | DROP PUBLICATION testpub_foralltables_rename; + testpub_nonexists | DROP PUBLICATION IF EXISTS testpub_nonexists; + testpub_default | DROP PUBLICATION IF EXISTS testpub_default; + testpub_only_tbl1 | DROP PUBLICATION IF EXISTS testpub_only_tbl1; + testpub_only_insert | DROP PUBLICATION IF EXISTS testpub_only_insert; + testpub_multitbls | DROP PUBLICATION IF EXISTS testpub_multitbls; +(17 rows) + +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + pg_delete_audit +----------------- + +(1 row) + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1 diff --git a/src/test/regress/output/recovery_2pc_tools.source b/src/test/regress/output/recovery_2pc_tools.source index aad219a32..6b4253581 100644 --- a/src/test/regress/output/recovery_2pc_tools.source +++ b/src/test/regress/output/recovery_2pc_tools.source @@ -22,6 +22,7 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c application_name | string | | | application_type | enum | | | archive_command | string | | | + archive_interval | integer | s | 1 | 1000 archive_mode | bool | | | archive_timeout | integer | s | 0 | 1073741823 array_nulls | bool | | | @@ -38,6 +39,7 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c audit_directory | string | | | audit_dml_state | integer | | 0 | 1 audit_dml_state_select | integer | | 0 | 1 + audit_xid_info | integer | | 0 | 1 audit_enabled | bool | | | audit_file_remain_threshold | integer | | 1 | 1048576 audit_file_remain_time | integer | | 0 | 730 @@ -53,6 +55,7 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c audit_user_locked | integer | | 0 | 1 audit_user_violation | integer | | 0 | 1 authentication_timeout | integer | s | 1 | 600 + auto_csn_barrier | bool | | | auth_iteration_count | integer | | 2048 | 134217728 autoanalyze | bool | | | autoanalyze_timeout | integer | s | 0 | 2147483 @@ -151,6 +154,8 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c default_with_oids | bool | | | dfs_partition_directory_length | integer | | 92 | 7999 disable_memory_protect | bool | | | + dw_file_num | integer | | 1 | 16 + dw_file_size | integer | | 32 | 256 dynamic_library_path | string | | | effective_cache_size | integer | 8kB | 1 | 2147483647 effective_io_concurrency | integer | | 0 | 1000 @@ -204,6 +209,7 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c enable_light_proxy | bool | | | enable_logical_io_statistics | bool | | | enable_material | bool | | | + enable_memory_context_check_debug | bool | | | enable_memory_context_control | bool | | | enable_memory_limit | bool | | | enable_mergejoin | bool | | | @@ -511,6 +517,7 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c temp_file_limit | integer | kB | -1 | 2147483647 temp_tablespaces | string | | | thread_pool_attr | string | | | + thread_pool_stream_attr | string | | | TimeZone | string | | | timezone_abbreviations | string | | | topsql_retention_time | integer | | 0 | 3650 @@ -541,6 +548,7 @@ select name,vartype,unit,min_val,max_val from pg_settings where name <> 'qunit_c update_lockwait_timeout | integer | ms | 0 | 2147483647 update_process_title | bool | | | upgrade_mode | integer | | 0 | 2147483647 + uppercase_attribute_name | bool | | | user_metric_retention_time | integer | | 0 | 3650 use_workload_manager | bool | | | vacuum_cost_delay | integer | ms | 0 | 100 diff --git a/src/test/regress/output/row_compression/row_compression_basebackup.source b/src/test/regress/output/row_compression/row_compression_basebackup.source deleted file mode 100644 index ee90bb59b..000000000 --- a/src/test/regress/output/row_compression/row_compression_basebackup.source +++ /dev/null @@ -1,28 +0,0 @@ ---?.* -CREATE DATABASE ---?.* -CREATE TABLE -CREATE INDEX -INSERT 0 1000 -CHECKPOINT ---?.* ---?.* ---?.* ---?.* ---?.* ---?.* ---?.* ---?.* - count -------- - 1000 -(1 row) - -SET - count -------- - 1000 -(1 row) - ---?.* -SHUTDOWN diff --git a/src/test/regress/output/segment_subpartition_tablespace.source b/src/test/regress/output/segment_subpartition_tablespace.source new file mode 100644 index 000000000..0426600ae --- /dev/null +++ b/src/test/regress/output/segment_subpartition_tablespace.source @@ -0,0 +1,2089 @@ +DROP SCHEMA segment_subpartition_tablespace CASCADE; +ERROR: schema "segment_subpartition_tablespace" does not exist +CREATE SCHEMA segment_subpartition_tablespace; +SET CURRENT_SCHEMA TO segment_subpartition_tablespace; +--prepare +\! rm -fr '@testtablespace@/segment_subpartition_tablespace_ts1' +\! mkdir '@testtablespace@/segment_subpartition_tablespace_ts1' +\! rm -fr '@testtablespace@/segment_subpartition_tablespace_ts2' +\! mkdir '@testtablespace@/segment_subpartition_tablespace_ts2' +\! rm -fr '@testtablespace@/segment_subpartition_tablespace_ts3' +\! mkdir '@testtablespace@/segment_subpartition_tablespace_ts3' +CREATE TABLESPACE segment_subpartition_tablespace_ts1 LOCATION '@testtablespace@/segment_subpartition_tablespace_ts1'; +CREATE TABLESPACE segment_subpartition_tablespace_ts2 LOCATION '@testtablespace@/segment_subpartition_tablespace_ts2'; +CREATE TABLESPACE segment_subpartition_tablespace_ts3 LOCATION '@testtablespace@/segment_subpartition_tablespace_ts3'; +-- +----test create subpartition with tablespace---- +-- +--range-range +CREATE TABLE t_range_range1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_range1'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_range_range1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE pg_default + + ( + + SUBPARTITION p_range3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES LESS THAN (15) TABLESPACE pg_default + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE segment_subpartition_tablespace_ts1+ + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE pg_default + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_range1; +CREATE TABLE t_range_range2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_range2'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_range_range2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + TABLESPACE segment_subpartition_tablespace_ts1 + + PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range1_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range2_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range3_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range4_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range4_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range4_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE segment_subpartition_tablespace_ts3+ + ), + + PARTITION p_range6 VALUES LESS THAN (30) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range6_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE segment_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_range2; +--range-list +CREATE TABLE t_range_list1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_list1'); + pg_get_tabledef +--------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_range_list1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE pg_default + + ( + + SUBPARTITION p_range3_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_subpartdefault1 VALUES (DEFAULT) TABLESPACE segment_subpartition_tablespace_ts1+ + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE pg_default + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_list1; +CREATE TABLE t_range_list2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_list2'); + pg_get_tabledef +--------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_range_list2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + TABLESPACE segment_subpartition_tablespace_ts1 + + PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range1_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range2_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range3_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range3_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range4_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range4_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range4_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES (DEFAULT) TABLESPACE segment_subpartition_tablespace_ts3+ + ), + + PARTITION p_range6 VALUES LESS THAN (30) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range6_subpartdefault1 VALUES (DEFAULT) TABLESPACE segment_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_list2; +--range-hash +CREATE TABLE t_range_hash1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_hash1'); + pg_get_tabledef +---------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_range_hash1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE pg_default + + ( + + SUBPARTITION p_range3_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 TABLESPACE pg_default + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_subpartdefault1 TABLESPACE segment_subpartition_tablespace_ts1+ + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE pg_default + + ( + + SUBPARTITION p_range5_subpartdefault1 TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_hash1; +CREATE TABLE t_range_hash2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_hash2'); + pg_get_tabledef +---------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_range_hash2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + TABLESPACE segment_subpartition_tablespace_ts1 + + PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range1_4 TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range2_4 TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range3_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range3_4 TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range4_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range4_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range4_4 TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range5_subpartdefault1 TABLESPACE segment_subpartition_tablespace_ts3+ + ), + + PARTITION p_range6 VALUES LESS THAN (30) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range6_subpartdefault1 TABLESPACE segment_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_hash2; +--list-range +CREATE TABLE t_list_range1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_range1'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_list_range1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ( + + SUBPARTITION p_list3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES LESS THAN (15) TABLESPACE pg_default + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE segment_subpartition_tablespace_ts1+ + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE pg_default + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_range1; +CREATE TABLE t_list_range2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_range2'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_list_range2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + TABLESPACE segment_subpartition_tablespace_ts1 + + PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list1_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list2_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list4_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list4_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE segment_subpartition_tablespace_ts3+ + ), + + PARTITION p_list6 VALUES (26,27,28,29,30) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list6_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE segment_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_range2; +--list-list +CREATE TABLE t_list_list1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_list1'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_list_list1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ( + + SUBPARTITION p_list3_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_subpartdefault1 VALUES (DEFAULT) TABLESPACE segment_subpartition_tablespace_ts1+ + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE pg_default + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_list1; +CREATE TABLE t_list_list2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_list2'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_list_list2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + TABLESPACE segment_subpartition_tablespace_ts1 + + PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list1_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list2_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list3_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list4_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list4_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES (DEFAULT) TABLESPACE segment_subpartition_tablespace_ts3+ + ), + + PARTITION p_list6 VALUES (26,27,28,29,30) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list6_subpartdefault1 VALUES (DEFAULT) TABLESPACE segment_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_list2; +--list-hash +CREATE TABLE t_list_hash1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_hash1'); + pg_get_tabledef +---------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_list_hash1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ( + + SUBPARTITION p_list3_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 TABLESPACE pg_default + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list4_subpartdefault1 TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE pg_default + + ( + + SUBPARTITION p_list5_subpartdefault1 TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_hash1; +CREATE TABLE t_list_hash2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_hash2'); + pg_get_tabledef +---------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_list_hash2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + TABLESPACE segment_subpartition_tablespace_ts1 + + PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list1_4 TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list2_4 TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3+ + ( + + SUBPARTITION p_list3_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_4 TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list4_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list4_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list4_4 TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3+ + ( + + SUBPARTITION p_list5_subpartdefault1 TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_list6 VALUES (26,27,28,29,30) TABLESPACE segment_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list6_subpartdefault1 TABLESPACE segment_subpartition_tablespace_ts1 + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_hash2; +--hash-range +CREATE TABLE t_hash_range1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH4 TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_range1'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_hash_range1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_hash1 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES LESS THAN (15) TABLESPACE pg_default + + ), + + PARTITION p_hash4 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE segment_subpartition_tablespace_ts1+ + ), + + PARTITION p_hash5 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_range1; +CREATE TABLE t_hash_range2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH3 TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH5 TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_range2'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_hash_range2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + TABLESPACE segment_subpartition_tablespace_ts1 + + PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_hash1 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash1_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash2_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash3_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash4 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash4_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash4_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash4_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash5 TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE segment_subpartition_tablespace_ts3+ + ), + + PARTITION p_hash6 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash6_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE segment_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_range2; +--hash-list +CREATE TABLE t_hash_list1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_list1'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_hash_list1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_hash1 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash3_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ), + + PARTITION p_hash4 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_subpartdefault1 VALUES (DEFAULT) TABLESPACE segment_subpartition_tablespace_ts1+ + ), + + PARTITION p_hash5 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_list1; +CREATE TABLE t_hash_list2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_list2'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_hash_list2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + TABLESPACE segment_subpartition_tablespace_ts1 + + PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_hash1 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash1_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash2_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash3_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash3_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash4 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash4_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash4_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash4_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash5 TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES (DEFAULT) TABLESPACE segment_subpartition_tablespace_ts3+ + ), + + PARTITION p_hash6 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash6_subpartdefault1 VALUES (DEFAULT) TABLESPACE segment_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_list2; +--hash-hash +CREATE TABLE t_hash_hash1(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 + ), + PARTITION P_HASH4 TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_hash1'); + pg_get_tabledef +--------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_hash_hash1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_hash1 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash3_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 TABLESPACE pg_default + + ), + + PARTITION p_hash4 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_subpartdefault1 TABLESPACE segment_subpartition_tablespace_ts1+ + ), + + PARTITION p_hash5 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash5_subpartdefault1 TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_hash1; +CREATE TABLE t_hash_hash2(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 + ), + PARTITION P_HASH3 TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 + ), + PARTITION P_HASH5 TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_hash2'); + pg_get_tabledef +--------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_hash_hash2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + TABLESPACE segment_subpartition_tablespace_ts1 + + PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_hash1 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash1_4 TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash2_4 TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash3_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash3_4 TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash4 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash4_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash4_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash4_4 TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash5 TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash5_subpartdefault1 TABLESPACE segment_subpartition_tablespace_ts3+ + ), + + PARTITION p_hash6 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash6_subpartdefault1 TABLESPACE segment_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_hash2; +-- +----test add partition with tablespace---- +-- +--since the add subpartition define use the same code, we only test different partition type: range/list +--range-list +CREATE TABLE t_range_list3(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ) +); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1; +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_range_list3'); + pg_get_tabledef +--------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_range_list3 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE pg_default + + ( + + SUBPARTITION p_range3_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_subpartdefault1 VALUES (DEFAULT) TABLESPACE segment_subpartition_tablespace_ts1+ + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE pg_default + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_list3; +CREATE TABLE t_range_list4(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ) +); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3; +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE6 VALUES LESS THAN (30); +SELECT pg_get_tabledef('t_range_list4'); + pg_get_tabledef +--------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_range_list4 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + TABLESPACE segment_subpartition_tablespace_ts1 + + PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range1_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range2_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range3_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range3_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_range4_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_range4_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_range4_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES (DEFAULT) TABLESPACE segment_subpartition_tablespace_ts3+ + ), + + PARTITION p_range6 VALUES LESS THAN (30) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range6_subpartdefault1 VALUES (DEFAULT) TABLESPACE segment_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_list4; +--list-hash +CREATE TABLE t_list_hash3(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ) +); +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ); +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1; +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25); +SELECT pg_get_tabledef('t_list_hash3'); + pg_get_tabledef +---------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_list_hash3 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ( + + SUBPARTITION p_list3_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 TABLESPACE pg_default + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list4_subpartdefault1 TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE pg_default + + ( + + SUBPARTITION p_list5_subpartdefault1 TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_hash3; +CREATE TABLE t_list_hash4(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ) +); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3; +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST6 VALUES (26,27,28,29,30); +SELECT pg_get_tabledef('t_list_hash4'); + pg_get_tabledef +---------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_list_hash4 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + TABLESPACE segment_subpartition_tablespace_ts1 + + PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list1_4 TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list2_4 TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3+ + ( + + SUBPARTITION p_list3_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_4 TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list4_1 TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_2 TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list4_3 TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list4_4 TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3+ + ( + + SUBPARTITION p_list5_subpartdefault1 TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_list6 VALUES (26,27,28,29,30) TABLESPACE segment_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list6_subpartdefault1 TABLESPACE segment_subpartition_tablespace_ts1 + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_hash4; +-- +----test add subpartition with tablespace---- +-- +--list-range +CREATE TABLE t_list_range3(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts3; +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_4 VALUES LESS THAN (20); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_4 VALUES LESS THAN (20); +SELECT pg_get_tabledef('t_list_range3'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_list_range3 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ( + + SUBPARTITION p_list3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES LESS THAN (15) TABLESPACE pg_default, + + SUBPARTITION p_list3_4 VALUES LESS THAN (20) TABLESPACE pg_default + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE segment_subpartition_tablespace_ts1+ + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE pg_default + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_range3; +CREATE TABLE t_list_range4(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts2; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_5 VALUES LESS THAN (25); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST4 ADD SUBPARTITION P_LIST4_5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_list_range4'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_list_range4 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + TABLESPACE segment_subpartition_tablespace_ts1 + + PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list1_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list2_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list3_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_1 VALUES LESS THAN (5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_2 VALUES LESS THAN (10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_list4_3 VALUES LESS THAN (15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_list4_4 VALUES LESS THAN (20) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_5 VALUES LESS THAN (25) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE segment_subpartition_tablespace_ts3+ + ), + + PARTITION p_list6 VALUES (26,27,28,29,30) TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list6_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE segment_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_range4; +--hash-list +CREATE TABLE t_hash_list3(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE segment_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20); +SELECT pg_get_tabledef('t_hash_list3'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_hash_list3 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_hash1 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash2 TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash3_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES (11,12,13,14,15) TABLESPACE pg_default, + + SUBPARTITION p_hash3_4 VALUES (16,17,18,19,20) TABLESPACE pg_default + + ), + + PARTITION p_hash4 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_subpartdefault1 VALUES (DEFAULT) TABLESPACE segment_subpartition_tablespace_ts1+ + ), + + PARTITION p_hash5 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_list3; +CREATE TABLE t_hash_list4(c1 int, c2 int, c3 int) WITH (SEGMENT=ON) TABLESPACE segment_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE segment_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE segment_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE segment_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE segment_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE segment_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE segment_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_5 VALUES(21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_5 VALUES(21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts2; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_5 VALUES(21,22,23,24,25); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH4 ADD SUBPARTITION P_HASH4_5 VALUES(21,22,23,24,25); +SELECT pg_get_tabledef('t_hash_list4'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------- + SET search_path = segment_subpartition_tablespace; + + CREATE TABLE t_hash_list4 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, segment=on, compression=no) + + TABLESPACE segment_subpartition_tablespace_ts1 + + PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_hash1 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash1_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash2 TABLESPACE segment_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash2_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash3_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash3_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash3_5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash4 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_1 VALUES (1,2,3,4,5) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash4_2 VALUES (6,7,8,9,10) TABLESPACE segment_subpartition_tablespace_ts2, + + SUBPARTITION p_hash4_3 VALUES (11,12,13,14,15) TABLESPACE segment_subpartition_tablespace_ts3, + + SUBPARTITION p_hash4_4 VALUES (16,17,18,19,20) TABLESPACE segment_subpartition_tablespace_ts1, + + SUBPARTITION p_hash4_5 VALUES (21,22,23,24,25) TABLESPACE segment_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash5 TABLESPACE segment_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES (DEFAULT) TABLESPACE segment_subpartition_tablespace_ts3+ + ), + + PARTITION p_hash6 TABLESPACE segment_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash6_subpartdefault1 VALUES (DEFAULT) TABLESPACE segment_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_list4; +--finish +drop tablespace segment_subpartition_tablespace_ts1; +drop tablespace segment_subpartition_tablespace_ts2; +drop tablespace segment_subpartition_tablespace_ts3; +\! rm -fr '@testtablespace@/segment_subpartition_tablespace_ts1' +\! rm -fr '@testtablespace@/segment_subpartition_tablespace_ts2' +\! rm -fr '@testtablespace@/segment_subpartition_tablespace_ts3' +DROP SCHEMA segment_subpartition_tablespace CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/output/segment_subpartition_vacuum_partition.source b/src/test/regress/output/segment_subpartition_vacuum_partition.source new file mode 100644 index 000000000..7a7c1b8d1 --- /dev/null +++ b/src/test/regress/output/segment_subpartition_vacuum_partition.source @@ -0,0 +1,220 @@ +-- +--segment table don't support vacuum full now +-- +DROP SCHEMA segment_subpartition_vacuum_partition CASCADE; +ERROR: schema "segment_subpartition_vacuum_partition" does not exist +CREATE SCHEMA segment_subpartition_vacuum_partition; +SET CURRENT_SCHEMA TO segment_subpartition_vacuum_partition; +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "autovacuum = off" >/dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_defer_calculate_snapshot = off" >/dev/null 2>&1 +CREATE TABLE temp1(c1 int, c2 int); +-- +--1. test for basic function +-- +CREATE TABLE range_list1 +( + month_code VARCHAR2 (30), + dept_code VARCHAR2 (30), + user_no VARCHAR2 (30), + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN('201903') + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN('201910') + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +--vacuum, success +VACUUM range_list1 PARTITION (p_201901); +VACUUM range_list1 SUBPARTITION (p_201902_a); +--vacuum full, success +VACUUM FULL range_list1 PARTITION (p_201901); +INFO: skipping segment table "range_list1" --- please use gs_space_shrink to recycle segment space. +VACUUM FULL range_list1 SUBPARTITION (p_201902_a); +INFO: skipping segment table "range_list1" --- please use gs_space_shrink to recycle segment space. +--vacuum full compact, fail +VACUUM FULL COMPACT range_list1 PARTITION (p_201901); +ERROR: COMPACT can not be used with PARTITION +VACUUM FULL COMPACT range_list1 SUBPARTITION (p_201902_a); +ERROR: COMPACT can not be used with SUBPARTITION +--vacuum freeze, success +VACUUM FREEZE range_list1 PARTITION (p_201901); +VACUUM FREEZE range_list1 SUBPARTITION (p_201902_a); +--vacuum verbose, success +VACUUM VERBOSE range_list1 PARTITION (p_201901); +--?INFO: vacuuming "segment_subpartition_vacuum_partition.range_list1"(datanode1 pid=.*) +--?INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 out of 0 pages(datanode1 pid=.*) +DETAIL: 0 dead row versions cannot be removed yet. There were 0 unused item pointers. 0 pages are entirely empty. CPU 0.00s/0.00u sec elapsed 0.00 sec. +--?INFO: vacuuming "segment_subpartition_vacuum_partition.range_list1"(datanode1 pid=.*) +--?INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 out of 0 pages(datanode1 pid=.*) +DETAIL: 0 dead row versions cannot be removed yet. There were 0 unused item pointers. 0 pages are entirely empty. CPU 0.00s/0.00u sec elapsed 0.00 sec. +VACUUM VERBOSE range_list1 SUBPARTITION (p_201902_a); +--?INFO: vacuuming "segment_subpartition_vacuum_partition.range_list1"(datanode1 pid=.*) +--?INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 out of 0 pages(datanode1 pid=.*) +DETAIL: 0 dead row versions cannot be removed yet. There were 0 unused item pointers. 0 pages are entirely empty. CPU 0.00s/0.00u sec elapsed 0.00 sec. +--vacuum option all +VACUUM (FULL, VERBOSE, FREEZE) range_list1 PARTITION (p_201901); +INFO: skipping segment table "range_list1" --- please use gs_space_shrink to recycle segment space. +VACUUM (FULL, VERBOSE, FREEZE) range_list1 SUBPARTITION (p_201902_a); +INFO: skipping segment table "range_list1" --- please use gs_space_shrink to recycle segment space. +-- +--2. test the actual work +-- +CREATE TABLE range_list_sales1 +( + product_id INT4, + customer_id INT4, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (1200) + ( + SUBPARTITION customer3_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales1_idx1 ON range_list_sales1(product_id, customer_id) GLOBAL; +CREATE INDEX range_list_sales1_idx2 ON range_list_sales1(channel_id) GLOBAL; +CREATE INDEX range_list_sales1_idx3 ON range_list_sales1(customer_id) LOCAL; +CREATE INDEX range_list_sales1_idx4 ON range_list_sales1(time_id, type_id) LOCAL; +SELECT pg_relation_size('range_list_sales1'); + pg_relation_size +------------------ + 106496 +(1 row) + +--delete & insert +DELETE FROM range_list_sales1; +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +SELECT pg_relation_size('range_list_sales1'); + pg_relation_size +------------------ + 172032 +(1 row) + +--vacuum full partition +INSERT INTO temp1 VALUES(1,1); +VACUUM FULL range_list_sales1 PARTITION (customer1); +INFO: skipping segment table "range_list_sales1" --- please use gs_space_shrink to recycle segment space. +VACUUM FULL range_list_sales1 PARTITION (customer2); +INFO: skipping segment table "range_list_sales1" --- please use gs_space_shrink to recycle segment space. +VACUUM FULL range_list_sales1 PARTITION (customer3); +INFO: skipping segment table "range_list_sales1" --- please use gs_space_shrink to recycle segment space. +SELECT pg_relation_size('range_list_sales1'); + pg_relation_size +------------------ + 172032 +(1 row) + +--delete & insert +DELETE FROM range_list_sales1; +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +SELECT pg_relation_size('range_list_sales1'); + pg_relation_size +------------------ + 229376 +(1 row) + +--vacuum full subpartition +INSERT INTO temp1 VALUES(1,1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel1); +INFO: skipping segment table "range_list_sales1" --- please use gs_space_shrink to recycle segment space. +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel2); +INFO: skipping segment table "range_list_sales1" --- please use gs_space_shrink to recycle segment space. +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel3); +INFO: skipping segment table "range_list_sales1" --- please use gs_space_shrink to recycle segment space. +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel4); +INFO: skipping segment table "range_list_sales1" --- please use gs_space_shrink to recycle segment space. +VACUUM FULL range_list_sales1 SUBPARTITION (customer2_channel1); +INFO: skipping segment table "range_list_sales1" --- please use gs_space_shrink to recycle segment space. +VACUUM FULL range_list_sales1 SUBPARTITION (customer2_channel2); +INFO: skipping segment table "range_list_sales1" --- please use gs_space_shrink to recycle segment space. +VACUUM FULL range_list_sales1 SUBPARTITION (customer3_channel1); +INFO: skipping segment table "range_list_sales1" --- please use gs_space_shrink to recycle segment space. +SELECT pg_relation_size('range_list_sales1'); + pg_relation_size +------------------ + 229376 +(1 row) + +--check index is ok +SELECT /*+ tablescan(range_list_sales1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +--finish +DROP TABLE temp1; +DROP TABLE range_list1; +DROP TABLE range_list_sales1; +DROP SCHEMA segment_subpartition_vacuum_partition CASCADE; +RESET CURRENT_SCHEMA; +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "autovacuum" >/dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_defer_calculate_snapshot" >/dev/null 2>&1 diff --git a/src/test/regress/output/single_node_user_mapping.source b/src/test/regress/output/single_node_user_mapping.source new file mode 100644 index 000000000..32315f2da --- /dev/null +++ b/src/test/regress/output/single_node_user_mapping.source @@ -0,0 +1,50 @@ +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + pg_delete_audit +----------------- + +(1 row) + +--- prepare +\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} rm -f {}/bin/usermapping.key.cipher +\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} rm -f {}/bin/usermapping.key.rand +\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} @abs_bindir@/gs_guc generate -S 123456@pwd -D {}/bin -o usermapping > /dev/null 2>&1 ; echo $? +0 +CREATE ROLE regress_usermapping_user LOGIN SYSADMIN PASSWORD 'Abcdef@123'; +SET SESSION AUTHORIZATION 'regress_usermapping_user' PASSWORD 'Abcdef@123'; +CREATE FOREIGN DATA WRAPPER dummy; +CREATE SERVER dummy_srv FOREIGN DATA WRAPPER dummy; +CREATE SERVER dummy_srv2 FOREIGN DATA WRAPPER dummy; +CREATE USER MAPPING FOR current_user SERVER dummy_srv OPTIONS(username 'test', password 'shouldBeEncrypt'); +CREATE USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(username 'test'); +ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(ADD password 'shouldBeEncrypt'); +ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(SET password 'shouldBeEncrypt2'); +ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(DROP password); +DROP USER MAPPING FOR current_user SERVER dummy_srv2; +DROP USER MAPPING FOR current_user SERVER dummy_srv; +DROP SERVER dummy_srv; +DROP SERVER dummy_srv2; +--DROP FOREIGN DATA WRAPPER is not supported now +--DROP FOREIGN DATA WRAPPER dummy; +RESET SESSION AUTHORIZATION; +--can't drop role regress_usermapping_user, since FOREIGN DATA WRAPPER dummy depend on it +--DROP ROLE regress_usermapping_user; +SELECT object_name,detail_info FROM pg_query_audit('2022-01-13 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_user' and object_name = 'current_user'; + object_name | detail_info +--------------+------------------------------------------------------------------------------------------------------ + current_user | CREATE USER MAPPING FOR current_user SERVER dummy_srv OPTIONS(username 'test', password '********'); + current_user | CREATE USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(username 'test'); + current_user | ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(ADD password '********'); + current_user | ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(SET password '********'); + current_user | ALTER USER MAPPING FOR current_user SERVER dummy_srv2 OPTIONS(DROP password); + current_user | DROP USER MAPPING FOR current_user SERVER dummy_srv2; + current_user | DROP USER MAPPING FOR current_user SERVER dummy_srv; +(7 rows) + +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + pg_delete_audit +----------------- + +(1 row) + diff --git a/src/test/regress/output/sqlldr/gs_loader_column.source b/src/test/regress/output/sqlldr/gs_loader_column.source index 736d3b3a7..f9d690c5a 100644 --- a/src/test/regress/output/sqlldr/gs_loader_column.source +++ b/src/test/regress/output/sqlldr/gs_loader_column.source @@ -176,7 +176,6 @@ select * from SQLLDR_COL_TBL where col_6 is null; 2 | constant | | | 1234 | | constant | c | 2 | 999 (1 row) --- issue: DTS2021092327784 TIMESTAMP CREATE TABLE SQLLDR_COL_001( ID INTEGER, NAME VARCHAR2(20), diff --git a/src/test/regress/output/sqlldr/gs_loader_issues.source b/src/test/regress/output/sqlldr/gs_loader_issues.source index cfe4c4309..87c086fd5 100644 --- a/src/test/regress/output/sqlldr/gs_loader_issues.source +++ b/src/test/regress/output/sqlldr/gs_loader_issues.source @@ -1,4 +1,3 @@ --- issue: DTS2021091619762 when > should not support create table sqlldr_p4(sequence int,name VARCHAR2(20),con VARCHAR2(20)) partition by range(sequence) ( @@ -8,9 +7,9 @@ create table sqlldr_p4(sequence int,name VARCHAR2(20),con VARCHAR2(20)) partition p4 values less than (40), partition p5 values less than (MAXVALUE) ); -\! @abs_bindir@/gs_loader control=@abs_srcdir@/data/gs_loader_issue_DTS2021091619762.ctl data=@abs_srcdir@/data/gs_loader_issue_DTS2021091619762.csv port=@portstring@ db=regression passwd=gauss@123 rows=1000000 bindsize=1000000 +\! @abs_bindir@/gs_loader control=@abs_srcdir@/data/gs_loader_issue_TESTTABLE.ctl data=@abs_srcdir@/data/gs_loader_issue_TESTTABLE.csv port=@portstring@ db=regression passwd=gauss@123 rows=1000000 bindsize=1000000 ERROR: after transform: BEGIN; -\COPY sqlldr_p4 ( sequence, name, con ) FROM '@abs_srcdir@/data/gs_loader_issue_DTS2021091619762.csv' LOAD LOG ERRORS DATA DELIMITER ',' WHEN sequence > '10' SEQUENCE(sequence(1, 2)) IGNORE_EXTRA_DATA; +\COPY sqlldr_p4 ( sequence, name, con ) FROM '@abs_srcdir@/data/gs_loader_issue_TESTTABLE.csv' LOAD LOG ERRORS DATA DELIMITER ',' WHEN sequence > '10' SEQUENCE(sequence(1, 2)) IGNORE_EXTRA_DATA; select 'copy_txid:'||txid_current(); COMMIT; ERROR: BEGIN @@ -19,7 +18,6 @@ CONTEXT: COPY sqlldr_p4, line 1: "1,1a,1bb" gsql:.gs_loader_file.tmp:54: ERROR: current transaction is aborted, commands ignored until end of transaction block, firstChar[Q] ROLLBACK drop table sqlldr_p4; --- issue: DTS2021090116724 discard not valid CREATE TABLE sqlldr_issue_001( ID INTEGER, NAME VARCHAR2(20), @@ -43,7 +41,6 @@ select * from sqlldr_issue_001 order by 1,2,3; \! cat @abs_srcdir@/data/gs_loader_issue_001.dsc 33,c,e drop table sqlldr_issue_001; --- issue: DTS2021091020369 multi POSITION in one line CREATE TABLE sqlldr_tb21( ID INTEGER, NAME VARCHAR2(20), @@ -84,7 +81,6 @@ select max(ID) MAX from sqlldr_tb21; (1 row) drop table sqlldr_tb21; --- issue: DTS2021090720669 permission denied file report error CREATE TABLE sqlldr_issue_permission( ID INTEGER, NAME VARCHAR2(20), @@ -98,7 +94,6 @@ ERROR: data file @abs_srcdir@/data/gs_loader_issue_permission.csv Permission den \! @abs_bindir@/gs_loader control=@abs_srcdir@/data/gs_loader_issue_permission.ctl data=@abs_srcdir@/data/gs_loader_issue_permission.csv port=@portstring@ db=regression passwd=gauss@123 ERROR: control file @abs_srcdir@/data/gs_loader_issue_permission.ctl Permission denied \! chmod +r @abs_srcdir@/data/gs_loader_issue_permission.ctl --- issue: DTS2021091621098 constant ""/'' CREATE TABLE sqlldr_issue_001( ID INTEGER, NAME VARCHAR2(20), @@ -123,7 +118,6 @@ select * from sqlldr_issue_001 order by 1,2,3; drop table sqlldr_issue_001; -- teardown drop table sqlldr_issue_permission; --- issue: DTS2021092327751 error for options in control file CREATE TABLE sqlldr_issue_options( ID INTEGER, NAME VARCHAR2(20), @@ -146,7 +140,6 @@ select * from sqlldr_issue_options; -- teardown drop table sqlldr_issue_options; --- issue: DTS2021092920305 error for options in control file CREATE TABLE sqlldr_issue_options( ID INTEGER, NAME VARCHAR2(20), @@ -169,7 +162,6 @@ select * from sqlldr_issue_options; -- teardown drop table sqlldr_issue_options; --- issue: DTS2021092425716 error for badfile in control file CREATE TABLE sqlldr_issue_badfile( ID INTEGER, NAME VARCHAR2(20), @@ -192,7 +184,6 @@ select * from sqlldr_issue_badfile; -- teardown drop table sqlldr_issue_badfile; --- issue: DTS2021092327796 error for infile in control file CREATE TABLE sqlldr_issue_infile( ID INTEGER, NAME VARCHAR2(20), @@ -215,7 +206,6 @@ select * from sqlldr_issue_infile; -- teardown drop table sqlldr_issue_infile; --- issue: DTS2021101915706 suport login -U -h -W -p CREATE TABLE sqlldr_issue_login( ID INTEGER, NAME VARCHAR2(20), @@ -238,7 +228,6 @@ select * from sqlldr_issue_login; -- teardown drop table sqlldr_issue_login; --- issue: DTS2021101425838 hide password CREATE TABLE sqlldr_issue_hide_passwd( ID INTEGER, NAME VARCHAR2(20), diff --git a/src/test/regress/expected/subscription.out b/src/test/regress/output/subscription.source similarity index 55% rename from src/test/regress/expected/subscription.out rename to src/test/regress/output/subscription.source index 9dfbbfabd..c0bce67d3 100644 --- a/src/test/regress/expected/subscription.out +++ b/src/test/regress/output/subscription.source @@ -1,8 +1,49 @@ -- -- SUBSCRIPTION -- +-- check help +\h CREATE SUBSCRIPTION +Command: CREATE SUBSCRIPTION +Description: define a new subscription +Syntax: +CREATE SUBSCRIPTION subscription_name + CONNECTION 'conninfo' + PUBLICATION publication_name [, ...] + [ WITH ( subscription_parameter [= value] [, ... ] ) ] + +\h ALTER SUBSCRIPTION +Command: ALTER SUBSCRIPTION +Description: change the definition of a subscription +Syntax: +ALTER SUBSCRIPTION name CONNECTION 'conninfo' +ALTER SUBSCRIPTION name SET PUBLICATION publication_name [, ...] +ALTER SUBSCRIPTION name ENABLE +ALTER SUBSCRIPTION name SET ( subscription_parameter [= value] [, ... ] ) +ALTER SUBSCRIPTION name OWNER TO new_owner +ALTER SUBSCRIPTION name RENAME TO new_name + +\h DROP SUBSCRIPTION +Command: DROP SUBSCRIPTION +Description: remove a subscription +Syntax: +DROP SUBSCRIPTION [ IF EXISTS ] name [ CASCADE | RESTRICT ] + +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + pg_delete_audit +----------------- + +(1 row) + +--enable publication and subscription audit +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object=16777215" > /dev/null 2>&1 --- prepare +\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} rm -f {}/bin/subscription.key.cipher +\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} rm -f {}/bin/subscription.key.rand +\! echo $GAUSSHOME | sed 's/^.*tmp_check\/install\///g' | xargs -I{} @abs_bindir@/gs_guc generate -S 123456@pwd -D {}/bin -o subscription > /dev/null 2>&1 ; echo $? +0 CREATE ROLE regress_subscription_user LOGIN SYSADMIN PASSWORD 'Abcdef@123'; +CREATE ROLE regress_subscription_user2 LOGIN SYSADMIN PASSWORD 'Abcdef@123'; SET SESSION AUTHORIZATION 'regress_subscription_user' PASSWORD 'Abcdef@123'; DROP SUBSCRIPTION IF EXISTS testsub; NOTICE: subscription "testsub" does not exist, skipping @@ -19,23 +60,26 @@ LINE 1: CREATE SUBSCRIPTION testsub PUBLICATION foo; ^ -- fail - could not connect to the publisher create subscription testsub2 connection 'host=abc' publication pub; -WARNING: apply worker could not connect to the remote server : could not translate host name "abc" to address: Name or service not known - +WARNING: apply worker could not connect to the remote server ERROR: could not connect to the publisher set client_min_messages to error; -- fail - syntax error, invalid connection string syntax: missing "=" CREATE SUBSCRIPTION testsub CONNECTION 'testconn' PUBLICATION testpub; -ERROR: invalid connection string syntax: missing "=" after "testconn" in connection info string - +ERROR: invalid connection string syntax -- fail - unrecognized subscription parameter: create_slot CREATE SUBSCRIPTION testsub CONNECTION 'dbname=doesnotexist' PUBLICATION testpub WITH (create_slot=false); ERROR: unrecognized subscription parameter: create_slot CREATE SUBSCRIPTION testsub CONNECTION 'dbname=doesnotexist' PUBLICATION testpub WITH (ENABLED=false, slot_name='testsub', synchronous_commit=off); +-- create SUBSCRIPTION with conninfo in two single quote, used to check mask string bug +CREATE SUBSCRIPTION testsub_maskconninfo CONNECTION 'host=''1.2.3.4'' port=''12345'' user=''username'' dbname=''postgres'' password=''password_1234''' PUBLICATION testpub WITH (ENABLED=false, slot_name='testsub', synchronous_commit=off); +-- alter connection +ALTER SUBSCRIPTION testsub CONNECTION 'host=''1.2.3.4'' port=''12345'' user=''username'' dbname=''postgres'' password=''password_1234'''; +ALTER SUBSCRIPTION testsub CONNECTION 'dbname=does_not_exist'; reset client_min_messages; select subname, pg_get_userbyid(subowner) as Owner, subenabled, subconninfo, subpublications from pg_subscription where subname='testsub'; - subname | owner | subenabled | subconninfo | subpublications ----------+---------------------------+------------+----------------------+----------------- - testsub | regress_subscription_user | f | dbname=doesnotexist | {testpub} + subname | owner | subenabled | subconninfo | subpublications +---------+---------------------------+------------+------------------------+----------------- + testsub | regress_subscription_user | f | dbname=does_not_exist | {testpub} (1 row) --- alter subscription @@ -56,6 +100,8 @@ select subname, subenabled, subconninfo from pg_subscription where subname='tes (1 row) ALTER SUBSCRIPTION testsub SET (conninfo='dbname=doesnotexist3'); +------ alter SUBSCRIPTION with conninfo in two single quote, used to check mask string bug +ALTER SUBSCRIPTION testsub_maskconninfo SET (conninfo='host=''1.2.3.4'' port=''12345'' user=''username'' dbname=''postgres'' password=''password_1234''', synchronous_commit=on); select subname, subenabled, subconninfo from pg_subscription where subname='testsub'; subname | subenabled | subconninfo ---------+------------+----------------------- @@ -74,31 +120,36 @@ select subname, subenabled, subsynccommit from pg_subscription where subname='t ------ fail - Currently enabled=false, cannot change slot_name to a non-null value. ALTER SUBSCRIPTION testsub SET (slot_name='testsub'); ERROR: Currently enabled=false, cannot change slot_name to a non-null value. +-- alter owner +ALTER SUBSCRIPTION testsub owner to regress_subscription_user2; +--rename +ALTER SUBSCRIPTION testsub rename to testsub_rename; --- inside a transaction block ------ CREATE SUBSCRIPTION ... WITH (enabled = true) ------ fail - ERROR: CREATE SUBSCRIPTION ... WITH (enabled = true) cannot run inside a transaction block BEGIN; -CREATE SUBSCRIPTION testsub CONNECTION 'dbname=doesnotexist' PUBLICATION testpub WITH (ENABLED=true); +CREATE SUBSCRIPTION testsub_rename CONNECTION 'dbname=doesnotexist' PUBLICATION testpub WITH (ENABLED=true); ERROR: CREATE SUBSCRIPTION ... WITH (enabled = true) cannot run inside a transaction block COMMIT; -- -- active SUBSCRIPTION BEGIN; -ALTER SUBSCRIPTION testsub ENABLE; -WARNING: apply worker could not connect to the remote server : connect to server failed: No such file or directory - +ALTER SUBSCRIPTION testsub_rename ENABLE; +WARNING: apply worker could not connect to the remote server ERROR: could not connect to the publisher -select subname, subenabled from pg_subscription where subname='testsub'; +select subname, subenabled from pg_subscription where subname='testsub_rename'; ERROR: current transaction is aborted, commands ignored until end of transaction block, firstChar[Q] -ALTER SUBSCRIPTION testsub SET (ENABLED=false); +ALTER SUBSCRIPTION testsub_rename SET (ENABLED=false); ERROR: current transaction is aborted, commands ignored until end of transaction block, firstChar[Q] -select subname, subenabled from pg_subscription where subname='testsub'; +select subname, subenabled from pg_subscription where subname='testsub_rename'; ERROR: current transaction is aborted, commands ignored until end of transaction block, firstChar[Q] COMMIT; --- drop subscription -DROP SUBSCRIPTION IF EXISTS testsub; +DROP SUBSCRIPTION IF EXISTS testsub_rename; +DROP SUBSCRIPTION IF EXISTS testsub_maskconninfo; --- cleanup RESET SESSION AUTHORIZATION; DROP ROLE regress_subscription_user; +DROP ROLE regress_subscription_user2; -- built-in function test select pg_replication_origin_create('origin_test'); pg_replication_origin_create @@ -216,3 +267,30 @@ select pg_replication_origin_session_setup('origin_test'); ERROR: could not find tuple for replication origin 'origin_test' CONTEXT: referenced column: pg_replication_origin_session_setup drop table t_origin_test; +SELECT object_name,detail_info FROM pg_query_audit('2022-01-13 9:30:00', '2031-12-12 22:00:00') where type = 'ddl_publication_subscription' order by time; + object_name | detail_info +----------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + testsub | DROP SUBSCRIPTION IF EXISTS testsub; + testsub | CREATE SUBSCRIPTION testsub CONNECTION **********************PUBLICATION testpub WITH (ENABLED=false, slot_name='testsub', synchronous_commit=off); + testsub_maskconninfo | CREATE SUBSCRIPTION testsub_maskconninfo CONNECTION ***************************************************************************************************PUBLICATION testpub WITH (ENABLED=false, slot_name='testsub', synchronous_commit=off); + testsub | ALTER SUBSCRIPTION testsub CONNECTION '*************************************************************************************************; + testsub | ALTER SUBSCRIPTION testsub CONNECTION '**********************; + testsub | ALTER SUBSCRIPTION testsub SET PUBLICATION testpub2, testpub3; + testsub | ALTER SUBSCRIPTION testsub CONNECTION '*********************; + testsub | ALTER SUBSCRIPTION testsub SET (conninfo='**********************; + testsub_maskconninfo | ALTER SUBSCRIPTION testsub_maskconninfo SET (conninfo='*************************************************************************************************************************; + testsub | ALTER SUBSCRIPTION testsub SET (synchronous_commit=on); + testsub | ALTER SUBSCRIPTION testsub owner to regress_subscription_user2; + testsub | ALTER SUBSCRIPTION testsub rename to testsub_rename; + testsub_rename | DROP SUBSCRIPTION IF EXISTS testsub_rename; + testsub_maskconninfo | DROP SUBSCRIPTION IF EXISTS testsub_maskconninfo; +(14 rows) + +--clear audit log +SELECT pg_delete_audit('1012-11-10', '3012-11-11'); + pg_delete_audit +----------------- + +(1 row) + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "audit_system_object" > /dev/null 2>&1 diff --git a/src/test/regress/output/timecapsule_partition_ustore_test_1.source b/src/test/regress/output/timecapsule_partition_ustore_test_1.source new file mode 100644 index 000000000..e7521a7ea --- /dev/null +++ b/src/test/regress/output/timecapsule_partition_ustore_test_1.source @@ -0,0 +1,108 @@ +set enable_default_ustore_table = on; +show undo_retention_time; + undo_retention_time +--------------------- + 0 +(1 row) + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 900" > /dev/null 2>&1 +select pg_sleep(4); + pg_sleep +---------- + +(1 row) + +show undo_retention_time; + undo_retention_time +--------------------- + 15min +(1 row) + +create table t_timecapsule_test_tmp(id int, snaptime timestamptz, snapcsn bigint); +CREATE OR REPLACE FUNCTION findCsn(int8) + RETURNS INTEGER + LANGUAGE plpgsql +AS +$BODY$ +declare + count integer; +begin + count = (select snapcsn from t_timecapsule_test_tmp where id = $1); + return count; +end; +$BODY$; +CREATE OR REPLACE FUNCTION findTime(int8) + RETURNS timestamptz + LANGUAGE plpgsql +AS +$BODY$ +declare + count timestamptz; +begin + count = (select snaptime from t_timecapsule_test_tmp where id = $1); + return count; +end; +$BODY$; +--parition table +--timecapsule query normal table +drop table if exists t1; +NOTICE: table "t1" does not exist, skipping +create table t1 (id int) +partition by range (id) +( +partition p1 values less than (10), +partition p2 values less than (20) +); +insert into t1 values(1); +select pg_sleep(4); + pg_sleep +---------- + +(1 row) + +insert into t_timecapsule_test_tmp select 1, now(), int8in(xidout(next_csn)) from gs_get_next_xid_csn(); +update t1 set id = 2 where id = 1; +select * from t1 timecapsule csn findCsn(1); + id +---- + 1 +(1 row) + +drop table t1; +delete from t_timecapsule_test_tmp; +--timecapsule table normal table +drop table if exists t1; +NOTICE: table "t1" does not exist, skipping +create table t1 (id int) +partition by range (id) +( +partition p1 values less than (10), +partition p2 values less than (20) +); +insert into t1 values(1); +insert into t1 values(2); +select pg_sleep(4); + pg_sleep +---------- + +(1 row) + +insert into t_timecapsule_test_tmp select 1, now(), int8in(xidout(next_csn)) from gs_get_next_xid_csn(); +delete from t1 where id = 1; +select * from t1 order by id; + id +---- + 2 +(1 row) + +TIMECAPSULE TABLE t1 TO csn findCsn(1); +select * from t1; + id +---- + 2 + 1 +(2 rows) + +drop table t1; +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 0" > /dev/null 2>&1 +set enable_default_ustore_table = off; diff --git a/src/test/regress/output/timecapsule_partition_ustore_test_2.source b/src/test/regress/output/timecapsule_partition_ustore_test_2.source new file mode 100644 index 000000000..93f354bd4 --- /dev/null +++ b/src/test/regress/output/timecapsule_partition_ustore_test_2.source @@ -0,0 +1,145 @@ +set enable_default_ustore_table = on; +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_recyclebin = on" > /dev/null 2>&1 +--parition table +drop table if exists t1; +NOTICE: table "t1" does not exist, skipping +purge table t1; +ERROR: recycle object "t1" desired does not exist +create table t1 (id int) +partition by range (id) +( +partition p1 values less than (10), +partition p2 values less than (20) +); +insert into t1 values(1); +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +drop table t1; +timecapsule table t1 to before drop; +select * from t1; + id +---- + 1 +(1 row) + +drop table t1; +purge table t1; +--timecapsule truncate normal table +drop table if exists t1; +NOTICE: table "t1" does not exist, skipping +purge recyclebin; +create table t1 (id int) +partition by range (id) +( +partition p1 values less than (10), +partition p2 values less than (20) +); +insert into t1 values(1); +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +truncate table t1; +select * from t1; + id +---- +(0 rows) + +timecapsule table t1 to before truncate; +select * from t1; + id +---- + 1 +(1 row) + +drop table t1; +purge table t1; +purge recyclebin; +--truncate purge partition table +create table t1 (id int) +partition by range (id) +( +partition p1 values less than (10), +partition p2 values less than (20) +); +insert into t1 values(1); +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +truncate table t1; +select rcyoriginname from gs_recyclebin; + rcyoriginname +--------------- + t1p2 + t1p1 + t1 +(3 rows) + +purge table t1; +select pg_sleep(1); + pg_sleep +---------- + +(1 row) + +select * from gs_recyclebin; + rcybaseid | rcydbid | rcyrelid | rcyname | rcyoriginname | rcyoperation | rcytype | rcyrecyclecsn | rcyrecycletime | rcycreatecsn | rcychangecsn | rcynamespace | rcyowner | rcytablespace | rcyrelfilenode | rcycanrestore | rcycanpurge | rcyfrozenxid | rcyfrozenxid64 +-----------+---------+----------+---------+---------------+--------------+---------+---------------+----------------+--------------+--------------+--------------+----------+---------------+----------------+---------------+-------------+--------------+---------------- +(0 rows) + +drop table t1; +purge recyclebin; +--truncate partition toast table +create table SPLIT_PARTITION_TABLE_001 +( +c_smallint smallint, +c_integer integer, +c_bigint bigint, +c_decimal decimal, +c_numeric numeric, +c_real real, +c_double double precision, +c_character_1 character varying(1024000), +c_varchar varchar(100), +c_character_2 character(100), +c_char_1 char(100), +c_character_3 character, +c_char_2 char, +c_text text, +c_nvarchar2 nvarchar2, +c_name name, +c_timestamp_1 timestamp without time zone , +c_timestamp_2 timestamp with time zone, +c_date date, +c_tsvector tsvector, +c_tsquery tsquery , +constraint SPLIT_PARTITION_TABLE_001_constraint primary key(c_smallint,c_integer,c_bigint,c_decimal,c_double) +) +partition by range (c_smallint,c_integer,c_bigint,c_decimal) +( +partition SPLIT_PARTITION_TABLE_001_1 values less than (0,0,0,0), +partition SPLIT_PARTITION_TABLE_001_2 values less than (30,30,300,400.3), +partition SPLIT_PARTITION_TABLE_001_3 values less than (60,60,600,800.6), +partition SPLIT_PARTITION_TABLE_001_4 values less than (100,100,1000,1100.2) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "split_partition_table_001_constraint" for table "split_partition_table_001" +insert into SPLIT_PARTITION_TABLE_001 values(generate_series(-10,99),-10,100,100.3,10.3,10.2,1000.25,rpad('xy',4096,'ab'),'ABCD','ABC','DEF','A','A','HK','FVT_DATA_PARTITIONFVT_DATA_PARTITION','b','1954-2-6 00:00:30+8','1954-2-6 23:12:12.2356','1954-2-6 13:12:12.2356','abc db','ege'); +truncate table split_partition_table_001; +timecapsule table split_partition_table_001 to before truncate; +select count(*) from split_partition_table_001; + count +------- + 110 +(1 row) + +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "enable_recyclebin = off" > /dev/null 2>&1 +set enable_default_ustore_table = off; diff --git a/src/test/regress/output/timecapsule_version_ustore_test_2.source b/src/test/regress/output/timecapsule_version_ustore_test_2.source index b5affd6ad..b3b54db78 100644 --- a/src/test/regress/output/timecapsule_version_ustore_test_2.source +++ b/src/test/regress/output/timecapsule_version_ustore_test_2.source @@ -1,41 +1,22 @@ -- test: sighup set enable_default_ustore_table = on; -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- +show undo_retention_time; + undo_retention_time +--------------------- 0 (1 row) -show version_retention_age; - version_retention_age ------------------------ - 0 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 10000" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 900" > /dev/null 2>&1 select pg_sleep(4); pg_sleep ---------- (1 row) -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- - 10000 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 10000" > /dev/null 2>&1 -select pg_sleep(4); - pg_sleep ----------- - -(1 row) - -show version_retention_age; - version_retention_age ------------------------ - 10000 +show undo_retention_time; + undo_retention_time +--------------------- + 15min (1 row) create schema schema_test_3; @@ -86,13 +67,13 @@ select * from dual where exists (select 1 from gs_txn_snapshot where snpxmin - g select * from dual where exists (select 1 from gs_txn_snapshot where snpxmin - gs_txid_oldestxmin() < 10000 - 1000); dummy ------- -(0 rows) + X +(1 row) select * from dual where exists (select 1 from gs_txn_snapshot where snpxmin - gs_txid_oldestxmin() >= 10000 - 1000); dummy ------- - X -(1 row) +(0 rows) -- test: syntax - [timecapsule { timestamp | csn } expression ] select * from dual timecapsule timestamp 0; @@ -170,13 +151,15 @@ partition by range (ca_address_sk) ) enable row movement; select * from tv_web_returns_p2 timecapsule timestamp 0; -ERROR: timecapsule feature does not support partitioned table +ERROR: argument of TIMESTAMP must be type timestamp with time zone, not type integer +LINE 1: select * from tv_web_returns_p2 timecapsule timestamp 0; + ^ select * from tv_web_returns_p2 timecapsule csn 0; -ERROR: timecapsule feature does not support partitioned table +ERROR: restore point not found timecapsule table tv_web_returns_p2 to timestamp 0; -ERROR: timecapsule feature does not support partitioned table +ERROR: argument of TIMESTAMP must be type timestamp with time zone, not type integer timecapsule table tv_web_returns_p2 to csn 0; -ERROR: timecapsule feature does not support partitioned table +ERROR: restore point not found drop table if exists tv_web_returns_p2; select * from pg_class timecapsule timestamp 0; ERROR: timecapsule feature does not support system table @@ -192,17 +175,17 @@ create table tv_lineitem ( l_orderkey bigint not null , l_partkey bigint not null -) +) with (orientation = column) ; select * from tv_lineitem timecapsule timestamp 0; -ERROR: timecapsule feature does not support non-row oriented table +ERROR: timecapsule feature does not support heap table select * from tv_lineitem timecapsule csn 0; -ERROR: timecapsule feature does not support non-row oriented table +ERROR: timecapsule feature does not support heap table timecapsule table tv_lineitem to timestamp 0; -ERROR: timecapsule feature does not support non-row oriented table +ERROR: timecapsule feature does not support heap table timecapsule table tv_lineitem to csn 0; -ERROR: timecapsule feature does not support non-row oriented table +ERROR: timecapsule feature does not support heap table drop table if exists tv_lineitem; drop table if exists tv_cities; NOTICE: table "tv_cities" does not exist, skipping @@ -399,7 +382,7 @@ select pg_sleep(4); alter table fb_qtbl_invalid_4 add partition p3 values less than (30); select * from fb_qtbl_invalid_4 timecapsule timestamp now() - 1/(24*60*60); -ERROR: timecapsule feature does not support partitioned table +ERROR: The table definition of "fb_qtbl_invalid_4" has been changed. drop table fb_qtbl_invalid_4; --timecapsule query normal table drop table if exists fb_qtbl_normal_1; @@ -464,22 +447,19 @@ select * from fb_qtbl_normal_2 timecapsule timestamp now() - 1/(24*60*60) where (1 row) explain (costs off) select * from fb_qtbl_normal_2 timecapsule timestamp now() - 1/(24*60*60) where id > 0; - QUERY PLAN -------------------------------------------------- - Bitmap Heap Scan on fb_qtbl_normal_2 - Recheck Cond: (id > 0) - -> Bitmap Index Scan on idx_fb_qtbl_normal_2 - Index Cond: (id > 0) -(4 rows) + QUERY PLAN +---------------------------------------------------------------- + Index Only Scan using idx_fb_qtbl_normal_2 on fb_qtbl_normal_2 + Index Cond: (id > 0) +(2 rows) explain (costs off) select * from fb_qtbl_normal_2 where id > 0; - QUERY PLAN -------------------------------------------------- - Bitmap Heap Scan on fb_qtbl_normal_2 - Recheck Cond: (id > 0) - -> Bitmap Index Scan on idx_fb_qtbl_normal_2 - Index Cond: (id > 0) -(4 rows) + QUERY PLAN +---------------------------------------------------------------- + [Bypass] + Index Only Scan using idx_fb_qtbl_normal_2 on fb_qtbl_normal_2 + Index Cond: (id > 0) +(3 rows) drop table fb_qtbl_normal_2; --timecapsule to before ddl @@ -649,13 +629,12 @@ select * from fb_tbl_normal_2 where id > 0 order by id; (1 row) explain (costs off) select * from fb_tbl_normal_2 where id > 0; - QUERY PLAN ------------------------------------------------- - Bitmap Heap Scan on fb_tbl_normal_2 - Recheck Cond: (id > 0) - -> Bitmap Index Scan on idx_fb_tbl_normal_2 - Index Cond: (id > 0) -(4 rows) + QUERY PLAN +-------------------------------------------------------------- + [Bypass] + Index Only Scan using idx_fb_tbl_normal_2 on fb_tbl_normal_2 + Index Cond: (id > 0) +(3 rows) drop table temp; drop table fb_tbl_normal_2; @@ -970,10 +949,31 @@ begin execute immediate 'timecapsule table fb_tbl_normal to timestamp to_timestamp (''' || scn_num || ''',''yyyy-mm-dd hh24:mi:ss'')'; end; / --- call pro_fb_tbl_normal_rb1(1); --- select * from fb_tbl_normal order by id; --- call pro_fb_tbl_normal_rb1(2); --- select * from fb_tbl_normal order by id; +call pro_fb_tbl_normal_rb1(1); + pro_fb_tbl_normal_rb1 +----------------------- + +(1 row) + +select * from fb_tbl_normal order by id; + id | tt +----+------------ + 1 | adgasdfasg +(1 row) + +call pro_fb_tbl_normal_rb1(2); + pro_fb_tbl_normal_rb1 +----------------------- + +(1 row) + +select * from fb_tbl_normal order by id; + id | tt +-----+------------ + 10 | adgasdfasg + 100 | sfgsafjhaf +(2 rows) + call pro_fb_tbl_normal_rb1(3); pro_fb_tbl_normal_rb1 ----------------------- @@ -1000,43 +1000,5 @@ drop cascades to function pro_fb_tbl_normal_re1() drop cascades to function pro_timecapsule1() drop cascades to function pro_fb_tbl_normal_rb1(integer) reset search_path; --- reset vacuum_defer_cleanup_age -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- - 10000 -(1 row) - -show version_retention_age; - version_retention_age ------------------------ - 10000 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 0" > /dev/null 2>&1 -select pg_sleep(4); - pg_sleep ----------- - -(1 row) - -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- - 0 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 0" > /dev/null 2>&1 -select pg_sleep(4); - pg_sleep ----------- - -(1 row) - -show version_retention_age; - version_retention_age ------------------------ - 0 -(1 row) - -set enable_default_ustore_table = off; +set enable_default_ustore_table = off; +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 0" > /dev/null 2>&1 diff --git a/src/test/regress/output/timecapsule_version_ustore_test_3.source b/src/test/regress/output/timecapsule_version_ustore_test_3.source index a956256fc..8340a7a9c 100644 --- a/src/test/regress/output/timecapsule_version_ustore_test_3.source +++ b/src/test/regress/output/timecapsule_version_ustore_test_3.source @@ -1,41 +1,22 @@ -- test: sighup set enable_default_ustore_table = on; -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- +show undo_retention_time; + undo_retention_time +--------------------- 0 (1 row) -show version_retention_age; - version_retention_age ------------------------ - 0 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 10000" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 900" > /dev/null 2>&1 select pg_sleep(4); pg_sleep ---------- (1 row) -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- - 10000 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 10000" > /dev/null 2>&1 -select pg_sleep(4); - pg_sleep ----------- - -(1 row) - -show version_retention_age; - version_retention_age ------------------------ - 10000 +show undo_retention_time; + undo_retention_time +--------------------- + 15min (1 row) ---------------------------------------------------------------------------------- @@ -531,42 +512,23 @@ drop function f_get_csn; drop procedure f_clean_csn; drop schema schema_test_3 cascade; reset search_path; --- reset vacuum_defer_cleanup_age -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- - 10000 +-- reset undo_retention_time +show undo_retention_time; + undo_retention_time +--------------------- + 15min (1 row) -show version_retention_age; - version_retention_age ------------------------ - 10000 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 0" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 0" > /dev/null 2>&1 select pg_sleep(4); pg_sleep ---------- (1 row) -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- - 0 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 0" > /dev/null 2>&1 -select pg_sleep(4); - pg_sleep ----------- - -(1 row) - -show version_retention_age; - version_retention_age ------------------------ +show undo_retention_time; + undo_retention_time +--------------------- 0 (1 row) diff --git a/src/test/regress/output/timecapsule_version_ustore_test_4.source b/src/test/regress/output/timecapsule_version_ustore_test_4.source index 5809bfbdc..e001ae08f 100644 --- a/src/test/regress/output/timecapsule_version_ustore_test_4.source +++ b/src/test/regress/output/timecapsule_version_ustore_test_4.source @@ -1,41 +1,22 @@ -- test: sighup set enable_default_ustore_table = on; -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- +show undo_retention_time; + undo_retention_time +--------------------- 0 (1 row) -show version_retention_age; - version_retention_age ------------------------ - 0 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 10000" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 900" > /dev/null 2>&1 select pg_sleep(4); pg_sleep ---------- (1 row) -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- - 10000 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 10000" > /dev/null 2>&1 -select pg_sleep(4); - pg_sleep ----------- - -(1 row) - -show version_retention_age; - version_retention_age ------------------------ - 10000 +show undo_retention_time; + undo_retention_time +--------------------- + 15min (1 row) ---------------------------------------------------------------------------------- @@ -583,42 +564,23 @@ drop function f_get_csn; drop procedure f_clean_csn; drop schema schema_test_3 cascade; reset search_path; --- reset vacuum_defer_cleanup_age -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- - 10000 +-- reset undo_retention_time +show undo_retention_time; + undo_retention_time +--------------------- + 15min (1 row) -show version_retention_age; - version_retention_age ------------------------ - 10000 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 0" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 0" > /dev/null 2>&1 select pg_sleep(4); pg_sleep ---------- (1 row) -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- - 0 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 0" > /dev/null 2>&1 -select pg_sleep(4); - pg_sleep ----------- - -(1 row) - -show version_retention_age; - version_retention_age ------------------------ +show undo_retention_time; + undo_retention_time +--------------------- 0 (1 row) diff --git a/src/test/regress/output/timecapsule_version_ustore_test_5.source b/src/test/regress/output/timecapsule_version_ustore_test_5.source index b881ee2eb..2a0dc0721 100644 --- a/src/test/regress/output/timecapsule_version_ustore_test_5.source +++ b/src/test/regress/output/timecapsule_version_ustore_test_5.source @@ -1,40 +1,21 @@ set enable_default_ustore_table = on; -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- +show undo_retention_time; + undo_retention_time +--------------------- 0 (1 row) -show version_retention_age; - version_retention_age ------------------------ - 0 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 10000" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 900" > /dev/null 2>&1 select pg_sleep(4); pg_sleep ---------- (1 row) -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- - 10000 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 10000" > /dev/null 2>&1 -select pg_sleep(4); - pg_sleep ----------- - -(1 row) - -show version_retention_age; - version_retention_age ------------------------ - 10000 +show undo_retention_time; + undo_retention_time +--------------------- + 15min (1 row) create schema schema_test_ztt; @@ -1000,41 +981,22 @@ ERROR: cannot drop schema schema_test_ztt because other objects depend on it DETAIL: function tri_insert_func() depends on schema schema_test_ztt HINT: Use DROP ... CASCADE to drop the dependent objects too. reset search_path; -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- - 10000 -(1 row) - show version_retention_age; version_retention_age ----------------------- - 10000 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 0" > /dev/null 2>&1 -select pg_sleep(4); - pg_sleep ----------- - -(1 row) - -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- 0 (1 row) -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 0" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 0" > /dev/null 2>&1 select pg_sleep(4); pg_sleep ---------- (1 row) -show version_retention_age; - version_retention_age ------------------------ +show undo_retention_time; + undo_retention_time +--------------------- 0 (1 row) diff --git a/src/test/regress/output/timecapsule_version_ustore_test_6.source b/src/test/regress/output/timecapsule_version_ustore_test_6.source index c22c8d190..4c36df723 100644 --- a/src/test/regress/output/timecapsule_version_ustore_test_6.source +++ b/src/test/regress/output/timecapsule_version_ustore_test_6.source @@ -1,40 +1,21 @@ set enable_default_ustore_table = on; -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- +show undo_retention_time; + undo_retention_time +--------------------- 0 (1 row) -show version_retention_age; - version_retention_age ------------------------ - 0 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 10000" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 900" > /dev/null 2>&1 select pg_sleep(4); pg_sleep ---------- (1 row) -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- - 10000 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 10000" > /dev/null 2>&1 -select pg_sleep(4); - pg_sleep ----------- - -(1 row) - -show version_retention_age; - version_retention_age ------------------------ - 10000 +show undo_retention_time; + undo_retention_time +--------------------- + 15min (1 row) create schema schema_test_ztt2; @@ -1436,41 +1417,22 @@ drop TABLESPACE new_tablespace_1; drop TABLESPACE new_tablespace_2; drop schema schema_test_ztt2; reset search_path; -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- - 10000 -(1 row) - show version_retention_age; version_retention_age ----------------------- - 10000 -(1 row) - -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "vacuum_defer_cleanup_age = 0" > /dev/null 2>&1 -select pg_sleep(4); - pg_sleep ----------- - -(1 row) - -show vacuum_defer_cleanup_age; - vacuum_defer_cleanup_age --------------------------- 0 (1 row) -\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "version_retention_age = 0" > /dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -D @abs_srcdir@/tmp_check/datanode1/ -c "undo_retention_time = 0" > /dev/null 2>&1 select pg_sleep(4); pg_sleep ---------- (1 row) -show version_retention_age; - version_retention_age ------------------------ +show undo_retention_time; + undo_retention_time +--------------------- 0 (1 row) diff --git a/src/test/regress/output/toast.source b/src/test/regress/output/toast.source index 826df2910..273327f94 100644 --- a/src/test/regress/output/toast.source +++ b/src/test/regress/output/toast.source @@ -64,10 +64,10 @@ SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot', table public.xpto: INSERT: id[integer]:2 toasted_col1[text]:null rand1[double precision]:3077 toasted_col2[text]:'00010002000300040005000600070008000900100011001200130014001500160017001800190020002100 --?.* BEGIN - table public.xpto: UPDATE: id[integer]:1 toasted_col1[text]:'1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374 + table public.xpto: UPDATE: old-key: id[integer]:1 new-tuple: id[integer]:1 toasted_col1[text]:'123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657 --?.* BEGIN - table public.xpto: UPDATE: id[integer]:1 toasted_col1[text]:unchanged-toast-datum rand1[double precision]:123.456 toasted_col2[text]:unchanged-toast-datum rand2[double precision]:1578 + table public.xpto: UPDATE: old-key: id[integer]:1 new-tuple: id[integer]:1 toasted_col1[text]:'123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657 --?.* BEGIN table public.xpto: DELETE: id[integer]:1 @@ -76,7 +76,7 @@ SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot', table public.toasted_key: INSERT: id[integer]:1 toasted_key[text]:'1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 --?.* BEGIN - table public.toasted_key: UPDATE: id[integer]:1 toasted_key[text]:unchanged-toast-datum toasted_col1[text]:unchanged-toast-datum toasted_col2[text]:'987654321098765432109876543210987654321098765432109 + table public.toasted_key: UPDATE: old-key: toasted_key[text]:'123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678 --?.* BEGIN table public.toasted_key: UPDATE: old-key: toasted_key[text]:'123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678 @@ -343,7 +343,7 @@ WHERE data NOT LIKE '%INSERT: %'; regexp_replace ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ BEGIN - table public.toasted_several: UPDATE: old-key: id[integer]:1 toasted_key[text]:'98765432109876543210..7654321098765432109876543210987654321098765432109876543210' toasted_col2[text]:unchanged-toast-datum + table public.toasted_several: UPDATE: old-key: id[integer]:1 toasted_key[text]:'98765432109876543210..876543210987654321098765432109876543210987654321098765432109876543210987654321098765432109876543210' table public.toasted_several: DELETE: id[integer]:1 toasted_key[text]:'98765432109876543210987654321..876543210987654321098765432109876543210987654321098765432109876543210987654321098765432109876543210' --?.* (4 rows) diff --git a/src/test/regress/output/ustore_subpartition_tablespace.source b/src/test/regress/output/ustore_subpartition_tablespace.source new file mode 100644 index 000000000..4d37cae12 --- /dev/null +++ b/src/test/regress/output/ustore_subpartition_tablespace.source @@ -0,0 +1,2089 @@ +DROP SCHEMA ustore_subpartition_tablespace CASCADE; +ERROR: schema "ustore_subpartition_tablespace" does not exist +CREATE SCHEMA ustore_subpartition_tablespace; +SET CURRENT_SCHEMA TO ustore_subpartition_tablespace; +--prepare +\! rm -fr '@testtablespace@/ustore_subpartition_tablespace_ts1' +\! mkdir '@testtablespace@/ustore_subpartition_tablespace_ts1' +\! rm -fr '@testtablespace@/ustore_subpartition_tablespace_ts2' +\! mkdir '@testtablespace@/ustore_subpartition_tablespace_ts2' +\! rm -fr '@testtablespace@/ustore_subpartition_tablespace_ts3' +\! mkdir '@testtablespace@/ustore_subpartition_tablespace_ts3' +CREATE TABLESPACE ustore_subpartition_tablespace_ts1 LOCATION '@testtablespace@/ustore_subpartition_tablespace_ts1'; +CREATE TABLESPACE ustore_subpartition_tablespace_ts2 LOCATION '@testtablespace@/ustore_subpartition_tablespace_ts2'; +CREATE TABLESPACE ustore_subpartition_tablespace_ts3 LOCATION '@testtablespace@/ustore_subpartition_tablespace_ts3'; +-- +----test create subpartition with tablespace---- +-- +--range-range +CREATE TABLE t_range_range1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_range1'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_range_range1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE pg_default + + ( + + SUBPARTITION p_range3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES LESS THAN (15) TABLESPACE pg_default + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE ustore_subpartition_tablespace_ts1+ + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE pg_default + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_range1; +CREATE TABLE t_range_range2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES LESS THAN (20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_range2'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_range_range2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + TABLESPACE ustore_subpartition_tablespace_ts1 + + PARTITION BY RANGE (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range1_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range2_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range3_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range4_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range4_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range4_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE ustore_subpartition_tablespace_ts3+ + ), + + PARTITION p_range6 VALUES LESS THAN (30) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range6_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE ustore_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_range2; +--range-list +CREATE TABLE t_range_list1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_list1'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_range_list1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE pg_default + + ( + + SUBPARTITION p_range3_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_subpartdefault1 VALUES (DEFAULT) TABLESPACE ustore_subpartition_tablespace_ts1+ + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE pg_default + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_list1; +CREATE TABLE t_range_list2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_list2'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_range_list2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + TABLESPACE ustore_subpartition_tablespace_ts1 + + PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range1_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range2_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range3_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range3_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range4_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range4_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range4_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES (DEFAULT) TABLESPACE ustore_subpartition_tablespace_ts3+ + ), + + PARTITION p_range6 VALUES LESS THAN (30) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range6_subpartdefault1 VALUES (DEFAULT) TABLESPACE ustore_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_list2; +--range-hash +CREATE TABLE t_range_hash1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_RANGE5 VALUES LESS THAN (25) +); +SELECT pg_get_tabledef('t_range_hash1'); + pg_get_tabledef +--------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_range_hash1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE pg_default + + ( + + SUBPARTITION p_range3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 TABLESPACE pg_default + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_subpartdefault1 TABLESPACE ustore_subpartition_tablespace_ts1+ + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE pg_default + + ( + + SUBPARTITION p_range5_subpartdefault1 TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_hash1; +CREATE TABLE t_range_hash2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 + ), + PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 + ), + PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 + ), + PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_RANGE6 VALUES LESS THAN (30) +); +SELECT pg_get_tabledef('t_range_hash2'); + pg_get_tabledef +--------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_range_hash2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + TABLESPACE ustore_subpartition_tablespace_ts1 + + PARTITION BY RANGE (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range1_4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range2_4 TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range3_4 TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range4_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range4_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range4_4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range5_subpartdefault1 TABLESPACE ustore_subpartition_tablespace_ts3+ + ), + + PARTITION p_range6 VALUES LESS THAN (30) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range6_subpartdefault1 TABLESPACE ustore_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_hash2; +--list-range +CREATE TABLE t_list_range1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_range1'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------------------ + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_list_range1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ( + + SUBPARTITION p_list3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES LESS THAN (15) TABLESPACE pg_default + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE ustore_subpartition_tablespace_ts1+ + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE pg_default + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_range1; +CREATE TABLE t_list_range2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_range2'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------------------ + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_list_range2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + TABLESPACE ustore_subpartition_tablespace_ts1 + + PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list1_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list2_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list4_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list4_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE ustore_subpartition_tablespace_ts3+ + ), + + PARTITION p_list6 VALUES (26,27,28,29,30) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list6_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE ustore_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_range2; +--list-list +CREATE TABLE t_list_list1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_list1'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_list_list1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ( + + SUBPARTITION p_list3_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_subpartdefault1 VALUES (DEFAULT) TABLESPACE ustore_subpartition_tablespace_ts1+ + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE pg_default + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_list1; +CREATE TABLE t_list_list2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_list2'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_list_list2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + TABLESPACE ustore_subpartition_tablespace_ts1 + + PARTITION BY LIST (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list1_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list2_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list3_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list4_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list4_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES (DEFAULT) TABLESPACE ustore_subpartition_tablespace_ts3+ + ), + + PARTITION p_list6 VALUES (26,27,28,29,30) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list6_subpartdefault1 VALUES (DEFAULT) TABLESPACE ustore_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_list2; +--list-hash +CREATE TABLE t_list_hash1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +SELECT pg_get_tabledef('t_list_hash1'); + pg_get_tabledef +--------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_list_hash1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ( + + SUBPARTITION p_list3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 TABLESPACE pg_default + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list4_subpartdefault1 TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE pg_default + + ( + + SUBPARTITION p_list5_subpartdefault1 TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_hash1; +CREATE TABLE t_list_hash2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +SELECT pg_get_tabledef('t_list_hash2'); + pg_get_tabledef +--------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_list_hash2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + TABLESPACE ustore_subpartition_tablespace_ts1 + + PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list1_4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list2_4 TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3+ + ( + + SUBPARTITION p_list3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_4 TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list4_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list4_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list4_4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3+ + ( + + SUBPARTITION p_list5_subpartdefault1 TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_list6 VALUES (26,27,28,29,30) TABLESPACE ustore_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list6_subpartdefault1 TABLESPACE ustore_subpartition_tablespace_ts1 + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_hash2; +--hash-range +CREATE TABLE t_hash_range1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) + ), + PARTITION P_HASH4 TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_range1'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------------------ + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_hash_range1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_hash1 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES LESS THAN (15) TABLESPACE pg_default + + ), + + PARTITION p_hash4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE ustore_subpartition_tablespace_ts1+ + ), + + PARTITION p_hash5 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_range1; +CREATE TABLE t_hash_range2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH3 TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES LESS THAN (20) + ), + PARTITION P_HASH5 TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_range2'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------------------ + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_hash_range2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + TABLESPACE ustore_subpartition_tablespace_ts1 + + PARTITION BY HASH (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_hash1 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash1_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash2_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash3_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash4_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash4_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash4_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash5 TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE ustore_subpartition_tablespace_ts3+ + ), + + PARTITION p_hash6 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash6_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE ustore_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_range2; +--hash-list +CREATE TABLE t_hash_list1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_list1'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_hash_list1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_hash1 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash3_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ), + + PARTITION p_hash4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_subpartdefault1 VALUES (DEFAULT) TABLESPACE ustore_subpartition_tablespace_ts1+ + ), + + PARTITION p_hash5 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_list1; +CREATE TABLE t_hash_list2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_list2'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_hash_list2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + TABLESPACE ustore_subpartition_tablespace_ts1 + + PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_hash1 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash1_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash2_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash3_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash3_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash4_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash4_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash4_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash5 TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES (DEFAULT) TABLESPACE ustore_subpartition_tablespace_ts3+ + ), + + PARTITION p_hash6 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash6_subpartdefault1 VALUES (DEFAULT) TABLESPACE ustore_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_list2; +--hash-hash +CREATE TABLE t_hash_hash1(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 + ), + PARTITION P_HASH4 TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +SELECT pg_get_tabledef('t_hash_hash1'); + pg_get_tabledef +-------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_hash_hash1 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_hash1 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 TABLESPACE pg_default + + ), + + PARTITION p_hash4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_subpartdefault1 TABLESPACE ustore_subpartition_tablespace_ts1+ + ), + + PARTITION p_hash5 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash5_subpartdefault1 TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_hash1; +CREATE TABLE t_hash_hash2(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 + ), + PARTITION P_HASH3 TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 + ), + PARTITION P_HASH5 TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +SELECT pg_get_tabledef('t_hash_hash2'); + pg_get_tabledef +-------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_hash_hash2 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + TABLESPACE ustore_subpartition_tablespace_ts1 + + PARTITION BY HASH (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_hash1 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash1_4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash2 TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash2_4 TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash3_4 TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash4_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash4_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash4_4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash5 TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash5_subpartdefault1 TABLESPACE ustore_subpartition_tablespace_ts3+ + ), + + PARTITION p_hash6 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash6_subpartdefault1 TABLESPACE ustore_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_hash2; +-- +----test add partition with tablespace---- +-- +--since the add subpartition define use the same code, we only test different partition type: range/list +--range-list +CREATE TABLE t_range_list3(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) + ) +); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) + ); +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1; +ALTER TABLE t_range_list3 ADD PARTITION P_RANGE5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_range_list3'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_range_list3 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE pg_default + + ( + + SUBPARTITION p_range3_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_subpartdefault1 VALUES (DEFAULT) TABLESPACE ustore_subpartition_tablespace_ts1+ + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE pg_default + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_list3; +CREATE TABLE t_range_list4(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_RANGE1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_RANGE1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_RANGE2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_RANGE2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE2_4 VALUES (16,17,18,19,20) + ) +); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_RANGE3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE3_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE3_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE4 VALUES LESS THAN (20) + ( + SUBPARTITION P_RANGE4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_RANGE4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_RANGE4_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_RANGE4_4 VALUES (16,17,18,19,20) + ); +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3; +ALTER TABLE t_range_list4 ADD PARTITION P_RANGE6 VALUES LESS THAN (30); +SELECT pg_get_tabledef('t_range_list4'); + pg_get_tabledef +-------------------------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_range_list4 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + TABLESPACE ustore_subpartition_tablespace_ts1 + + PARTITION BY RANGE (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_range1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range1_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range1_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range1_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_range2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_range2_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range2_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range2_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_range3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range3_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range3_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range3_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range3_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_range4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range4_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_range4_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_range4_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_range4_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_range5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_range5_subpartdefault1 VALUES (DEFAULT) TABLESPACE ustore_subpartition_tablespace_ts3+ + ), + + PARTITION p_range6 VALUES LESS THAN (30) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_range6_subpartdefault1 VALUES (DEFAULT) TABLESPACE ustore_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_range_list4; +--list-hash +CREATE TABLE t_list_hash3(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 + ) +); +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 + ); +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1; +ALTER TABLE t_list_hash3 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25); +SELECT pg_get_tabledef('t_list_hash3'); + pg_get_tabledef +--------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_list_hash3 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ( + + SUBPARTITION p_list3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 TABLESPACE pg_default + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list4_subpartdefault1 TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE pg_default + + ( + + SUBPARTITION p_list5_subpartdefault1 TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_hash3; +CREATE TABLE t_list_hash4(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 + ) +); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 + ); +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3; +ALTER TABLE t_list_hash4 ADD PARTITION P_LIST6 VALUES (26,27,28,29,30); +SELECT pg_get_tabledef('t_list_hash4'); + pg_get_tabledef +--------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_list_hash4 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + TABLESPACE ustore_subpartition_tablespace_ts1 + + PARTITION BY LIST (c1) SUBPARTITION BY HASH (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list1_4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list2_4 TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3+ + ( + + SUBPARTITION p_list3_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_4 TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list4_1 TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_2 TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list4_3 TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list4_4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3+ + ( + + SUBPARTITION p_list5_subpartdefault1 TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_list6 VALUES (26,27,28,29,30) TABLESPACE ustore_subpartition_tablespace_ts1+ + ( + + SUBPARTITION p_list6_subpartdefault1 TABLESPACE ustore_subpartition_tablespace_ts1 + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_hash4; +-- +----test add subpartition with tablespace---- +-- +--list-range +CREATE TABLE t_list_range3(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_LIST5 VALUES (21,22,23,24,25) +); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts3; +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_4 VALUES LESS THAN (20); +ALTER TABLE t_list_range3 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_4 VALUES LESS THAN (20); +SELECT pg_get_tabledef('t_list_range3'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------------------ + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_list_range3 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE pg_default + + ( + + SUBPARTITION p_list3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES LESS THAN (15) TABLESPACE pg_default, + + SUBPARTITION p_list3_4 VALUES LESS THAN (20) TABLESPACE pg_default + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE ustore_subpartition_tablespace_ts1+ + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE pg_default + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_range3; +CREATE TABLE t_list_range4(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) +( + PARTITION P_LIST1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_LIST1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST1_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_LIST2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST2_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_LIST3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST3_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST3_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST4 VALUES (16,17,18,19,20) + ( + SUBPARTITION P_LIST4_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_LIST4_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_LIST4_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_LIST4_4 VALUES LESS THAN (20) + ), + PARTITION P_LIST5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_LIST6 VALUES (26,27,28,29,30) +); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST1 ADD SUBPARTITION P_LIST1_5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST2 ADD SUBPARTITION P_LIST2_5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts2; +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST3 ADD SUBPARTITION P_LIST3_5 VALUES LESS THAN (25); +ALTER TABLE t_list_range4 MODIFY PARTITION P_LIST4 ADD SUBPARTITION P_LIST4_5 VALUES LESS THAN (25); +SELECT pg_get_tabledef('t_list_range4'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------------------ + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_list_range4 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + TABLESPACE ustore_subpartition_tablespace_ts1 + + PARTITION BY LIST (c1) SUBPARTITION BY RANGE (c2) + + ( + + PARTITION p_list1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list1_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list1_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list1_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list1_5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_list2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_list2_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list2_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list2_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list2_5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_list3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list3_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list3_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list3_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list3_5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_list4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list4_1 VALUES LESS THAN (5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_2 VALUES LESS THAN (10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_list4_3 VALUES LESS THAN (15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_list4_4 VALUES LESS THAN (20) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_list4_5 VALUES LESS THAN (25) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_list5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_list5_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE ustore_subpartition_tablespace_ts3+ + ), + + PARTITION p_list6 VALUES (26,27,28,29,30) TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_list6_subpartdefault1 VALUES LESS THAN (MAXVALUE) TABLESPACE ustore_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_list_range4; +--hash-list +CREATE TABLE t_hash_list3(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) + ), + PARTITION P_HASH4 TABLESPACE ustore_subpartition_tablespace_ts1, + PARTITION P_HASH5 +); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20); +ALTER TABLE t_hash_list3 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20); +SELECT pg_get_tabledef('t_hash_list3'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_hash_list3 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_hash1 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash2 TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash3_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES (11,12,13,14,15) TABLESPACE pg_default, + + SUBPARTITION p_hash3_4 VALUES (16,17,18,19,20) TABLESPACE pg_default + + ), + + PARTITION p_hash4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_subpartdefault1 VALUES (DEFAULT) TABLESPACE ustore_subpartition_tablespace_ts1+ + ), + + PARTITION p_hash5 TABLESPACE pg_default + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES (DEFAULT) TABLESPACE pg_default + + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_list3; +CREATE TABLE t_hash_list4(c1 int, c2 int, c3 int) WITH (STORAGE_TYPE=USTORE) TABLESPACE ustore_subpartition_tablespace_ts1 +PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) +( + PARTITION P_HASH1 TABLESPACE ustore_subpartition_tablespace_ts1 + ( + SUBPARTITION P_HASH1_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH1_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH1_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH2 TABLESPACE ustore_subpartition_tablespace_ts2 + ( + SUBPARTITION P_HASH2_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH2_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH2_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH3 TABLESPACE ustore_subpartition_tablespace_ts3 + ( + SUBPARTITION P_HASH3_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH3_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH3_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH3_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH4 + ( + SUBPARTITION P_HASH4_1 VALUES ( 1, 2, 3, 4, 5) TABLESPACE ustore_subpartition_tablespace_ts1, + SUBPARTITION P_HASH4_2 VALUES ( 6, 7, 8, 9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + SUBPARTITION P_HASH4_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + SUBPARTITION P_HASH4_4 VALUES (16,17,18,19,20) + ), + PARTITION P_HASH5 TABLESPACE ustore_subpartition_tablespace_ts3, + PARTITION P_HASH6 +); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH1 ADD SUBPARTITION P_HASH1_5 VALUES(21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH2 ADD SUBPARTITION P_HASH2_5 VALUES(21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts2; +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH3 ADD SUBPARTITION P_HASH3_5 VALUES(21,22,23,24,25); +ALTER TABLE t_hash_list4 MODIFY PARTITION P_HASH4 ADD SUBPARTITION P_HASH4_5 VALUES(21,22,23,24,25); +SELECT pg_get_tabledef('t_hash_list4'); + pg_get_tabledef +------------------------------------------------------------------------------------------------------------- + SET search_path = ustore_subpartition_tablespace; + + CREATE TABLE t_hash_list4 ( + + c1 integer, + + c2 integer, + + c3 integer + + ) + + WITH (orientation=row, storage_type=ustore, compression=no) + + TABLESPACE ustore_subpartition_tablespace_ts1 + + PARTITION BY HASH (c1) SUBPARTITION BY LIST (c2) + + ( + + PARTITION p_hash1 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash1_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash1_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash1_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash1_5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash2 TABLESPACE ustore_subpartition_tablespace_ts2 + + ( + + SUBPARTITION p_hash2_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash2_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash2_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash2_5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts2 + + ), + + PARTITION p_hash3 TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash3_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash3_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash3_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash3_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash3_5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts3 + + ), + + PARTITION p_hash4 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash4_1 VALUES (1,2,3,4,5) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash4_2 VALUES (6,7,8,9,10) TABLESPACE ustore_subpartition_tablespace_ts2, + + SUBPARTITION p_hash4_3 VALUES (11,12,13,14,15) TABLESPACE ustore_subpartition_tablespace_ts3, + + SUBPARTITION p_hash4_4 VALUES (16,17,18,19,20) TABLESPACE ustore_subpartition_tablespace_ts1, + + SUBPARTITION p_hash4_5 VALUES (21,22,23,24,25) TABLESPACE ustore_subpartition_tablespace_ts1 + + ), + + PARTITION p_hash5 TABLESPACE ustore_subpartition_tablespace_ts3 + + ( + + SUBPARTITION p_hash5_subpartdefault1 VALUES (DEFAULT) TABLESPACE ustore_subpartition_tablespace_ts3+ + ), + + PARTITION p_hash6 TABLESPACE ustore_subpartition_tablespace_ts1 + + ( + + SUBPARTITION p_hash6_subpartdefault1 VALUES (DEFAULT) TABLESPACE ustore_subpartition_tablespace_ts1+ + ) + + ) + + ENABLE ROW MOVEMENT; +(1 row) + +DROP TABLE t_hash_list4; +--finish +drop tablespace ustore_subpartition_tablespace_ts1; +drop tablespace ustore_subpartition_tablespace_ts2; +drop tablespace ustore_subpartition_tablespace_ts3; +\! rm -fr '@testtablespace@/ustore_subpartition_tablespace_ts1' +\! rm -fr '@testtablespace@/ustore_subpartition_tablespace_ts2' +\! rm -fr '@testtablespace@/ustore_subpartition_tablespace_ts3' +DROP SCHEMA ustore_subpartition_tablespace CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/output/ustore_subpartition_vacuum_partition.source b/src/test/regress/output/ustore_subpartition_vacuum_partition.source new file mode 100644 index 000000000..7cea6046b --- /dev/null +++ b/src/test/regress/output/ustore_subpartition_vacuum_partition.source @@ -0,0 +1,224 @@ +DROP SCHEMA ustore_subpartition_vacuum_partition CASCADE; +ERROR: schema "ustore_subpartition_vacuum_partition" does not exist +CREATE SCHEMA ustore_subpartition_vacuum_partition; +SET CURRENT_SCHEMA TO ustore_subpartition_vacuum_partition; +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "autovacuum = off" >/dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_defer_calculate_snapshot = off" >/dev/null 2>&1 +CREATE TABLE temp1(c1 int, c2 int); +-- +--1. test for basic function +-- +CREATE TABLE range_list1 +( + month_code VARCHAR2 (30), + dept_code VARCHAR2 (30), + user_no VARCHAR2 (30), + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN('201903') + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN('201910') + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +--vacuum, success +VACUUM range_list1 PARTITION (p_201901); +VACUUM range_list1 SUBPARTITION (p_201902_a); +--vacuum full, success +VACUUM FULL range_list1 PARTITION (p_201901); +VACUUM FULL range_list1 SUBPARTITION (p_201902_a); +--vacuum full compact, fail +VACUUM FULL COMPACT range_list1 PARTITION (p_201901); +ERROR: COMPACT can not be used with PARTITION +VACUUM FULL COMPACT range_list1 SUBPARTITION (p_201902_a); +ERROR: COMPACT can not be used with SUBPARTITION +--vacuum freeze, success +VACUUM FREEZE range_list1 PARTITION (p_201901); +VACUUM FREEZE range_list1 SUBPARTITION (p_201902_a); +--vacuum verbose, success +VACUUM VERBOSE range_list1 PARTITION (p_201901); +--?INFO: vacuuming uheap "ustore_subpartition_vacuum_partition.range_list1" oldestXmin:.* +INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 out of 0 pages +--?DETAIL: 0 dead row versions cannot be removed yet, oldest xmin: .* +There were 0 unused item pointers. +0 pages are entirely empty. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +--?INFO: vacuuming uheap "ustore_subpartition_vacuum_partition.range_list1" oldestXmin:.* +INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 out of 0 pages +--?DETAIL: 0 dead row versions cannot be removed yet, oldest xmin: .* +There were 0 unused item pointers. +0 pages are entirely empty. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +VACUUM VERBOSE range_list1 SUBPARTITION (p_201902_a); +--?INFO: vacuuming uheap "ustore_subpartition_vacuum_partition.range_list1" oldestXmin:.* +INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 out of 0 pages +--?DETAIL: 0 dead row versions cannot be removed yet, oldest xmin: .* +There were 0 unused item pointers. +0 pages are entirely empty. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +--vacuum option all +VACUUM (FULL, VERBOSE, FREEZE) range_list1 PARTITION (p_201901); +--?INFO: vacuuming "ustore_subpartition_vacuum_partition.range_list1"(datanode1 pid=.*) +--?INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 pages(datanode1 pid=.*) +DETAIL: 0 dead row versions cannot be removed yet. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +--?INFO: vacuuming "ustore_subpartition_vacuum_partition.range_list1"(datanode1 pid=.*) +--?INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 pages(datanode1 pid=.*) +DETAIL: 0 dead row versions cannot be removed yet. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +VACUUM (FULL, VERBOSE, FREEZE) range_list1 SUBPARTITION (p_201902_a); +--?INFO: vacuuming "ustore_subpartition_vacuum_partition.range_list1"(datanode1 pid=.*) +--?INFO: "range_list1": found 0 removable, 0 nonremovable row versions in 0 pages(datanode1 pid=.*) +DETAIL: 0 dead row versions cannot be removed yet. +CPU 0.00s/0.00u sec elapsed 0.00 sec. +-- +--2. test the actual work +-- +CREATE TABLE range_list_sales1 +( + product_id INT4, + customer_id INT4, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (1200) + ( + SUBPARTITION customer3_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales1_idx1 ON range_list_sales1(product_id, customer_id) GLOBAL; +CREATE INDEX range_list_sales1_idx2 ON range_list_sales1(channel_id) GLOBAL; +CREATE INDEX range_list_sales1_idx3 ON range_list_sales1(customer_id) LOCAL; +CREATE INDEX range_list_sales1_idx4 ON range_list_sales1(time_id, type_id) LOCAL; +SELECT pg_relation_size('range_list_sales1'); + pg_relation_size +------------------ + 73728 +(1 row) + +--delete & insert +DELETE FROM range_list_sales1; +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +SELECT pg_relation_size('range_list_sales1'); + pg_relation_size +------------------ + 114688 +(1 row) + +--vacuum full partition +INSERT INTO temp1 VALUES(1,1); +VACUUM FULL range_list_sales1 PARTITION (customer1); +VACUUM FULL range_list_sales1 PARTITION (customer2); +VACUUM FULL range_list_sales1 PARTITION (customer3); +SELECT pg_relation_size('range_list_sales1'); + pg_relation_size +------------------ + 73728 +(1 row) + +--delete & insert +DELETE FROM range_list_sales1; +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +SELECT pg_relation_size('range_list_sales1'); + pg_relation_size +------------------ + 114688 +(1 row) + +--vacuum full subpartition +INSERT INTO temp1 VALUES(1,1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel2); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel3); +VACUUM FULL range_list_sales1 SUBPARTITION (customer1_channel4); +VACUUM FULL range_list_sales1 SUBPARTITION (customer2_channel1); +VACUUM FULL range_list_sales1 SUBPARTITION (customer2_channel2); +VACUUM FULL range_list_sales1 SUBPARTITION (customer3_channel1); +SELECT pg_relation_size('range_list_sales1'); + pg_relation_size +------------------ + 73728 +(1 row) + +--check index is ok +SELECT /*+ tablescan(range_list_sales1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + count +------- + 1000 +(1 row) + +--finish +DROP TABLE temp1; +DROP TABLE range_list1; +DROP TABLE range_list_sales1; +DROP SCHEMA ustore_subpartition_vacuum_partition CASCADE; +RESET CURRENT_SCHEMA; +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "autovacuum" >/dev/null 2>&1 +\! @abs_bindir@/gs_guc reload -Z datanode -D @abs_srcdir@/tmp_check/datanode1 -c "enable_defer_calculate_snapshot" >/dev/null 2>&1 diff --git a/src/test/regress/output/vec_nestloop_end.source b/src/test/regress/output/vec_nestloop_end.source index fe574bc41..6ccd435e2 100644 --- a/src/test/regress/output/vec_nestloop_end.source +++ b/src/test/regress/output/vec_nestloop_end.source @@ -6,9 +6,6 @@ SET standard_conforming_strings = on; SET check_function_bodies = false; SET default_tablespace = ''; SET default_with_oids = false; ----- ---- Special Case: nestloop + hashjoin + operator with parameters pushed down (dts2014111302175/2014120306303) ----- CREATE INDEX vecvtor_nestloop_base_index_01 ON VECTOR_NESTLOOP_TABLE_05 USING psort (id) LOCAL(PARTITION b1_p1_id_idx, PARTITION b1_p2_id_idx, PARTITION b1_p3_id_idx) ; CREATE INDEX vecvtor_nestloop_base_index_02 ON VECTOR_NESTLOOP_TABLE_06 USING psort (id, c_d_id, c_id) LOCAL(PARTITION b5_p1_id_c_d_id_c_id_idx, PARTITION b5_p2_id_c_d_id_c_id_idx, PARTITION b5_p3_id_c_d_id_c_id_idx, PARTITION b5_p4_id_c_d_id_c_id_idx, PARTITION b5_p5_id_c_d_id_c_id_idx, PARTITION b5_p6_id_c_d_id_c_id_idx) ; CREATE INDEX vecvtor_nestloop_base_index_03 ON VECTOR_NESTLOOP_TABLE_07 USING psort (id, c_d_id, c_w_id) LOCAL(PARTITION b7_p1_id_c_d_id_c_w_id_idx, PARTITION b7_p2_id_c_d_id_c_w_id_idx, PARTITION b7_p3_id_c_d_id_c_w_id_idx, PARTITION b7_p4_id_c_d_id_c_w_id_idx, PARTITION b7_p5_id_c_d_id_c_w_id_idx, PARTITION b7_p6_id_c_d_id_c_w_id_idx, PARTITION b7_p7_id_c_d_id_c_w_id_idx, PARTITION b7_p8_id_c_d_id_c_w_id_idx, PARTITION b7_p9_id_c_d_id_c_w_id_idx, PARTITION b7_p10_id_c_d_id_c_w_id_idx, PARTITION b7_p11_id_c_d_id_c_w_id_idx) ; diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule index f9779297d..2e148a247 100644 --- a/src/test/regress/parallel_schedule +++ b/src/test/regress/parallel_schedule @@ -813,3 +813,6 @@ test: gs_basebackup # gs_ledger test: ledger_table_case + +# wlm_memory_trace +test: wlm_memory_trace diff --git a/src/test/regress/parallel_schedule.db4ai b/src/test/regress/parallel_schedule.db4ai index e56ce3525..842346a69 100644 --- a/src/test/regress/parallel_schedule.db4ai +++ b/src/test/regress/parallel_schedule.db4ai @@ -8,9 +8,13 @@ #-------------------------- ## DB4AI #-------------------------- +test: single_node_opr_sanity test: db4ai_snapshots test: db4ai_gd_train_predict test: db4ai_gd_houses test: db4ai_gd_snapshots +test: db4ai_svm_kernels +test: db4ai_gd_pca_train_predict test: db4ai_kmeans_train_predict - +test: db4ai_xgboost_train_predict +test: db4ai_explain_model diff --git a/src/test/regress/parallel_schedule.decode b/src/test/regress/parallel_schedule.decode index 12d14ba35..15189cf2b 100644 --- a/src/test/regress/parallel_schedule.decode +++ b/src/test/regress/parallel_schedule.decode @@ -13,3 +13,4 @@ test: ustore_rewrite test: ustore_spill test: ustore_xact test: ustore_replication_origin +test: area diff --git a/src/test/regress/parallel_schedule.lite b/src/test/regress/parallel_schedule.lite new file mode 100644 index 000000000..98157e8cb --- /dev/null +++ b/src/test/regress/parallel_schedule.lite @@ -0,0 +1,865 @@ +# ---------- +# src/test/regress/parallel_schedule +# +# By convention, we put no more than twenty tests in any one parallel group; +# this limits the number of connections needed to run the tests. +# ---------- + +# test nlssort_pinyin +test: nlssort_pinyin + +# test replace func and trunc func +test: gs_dump_package +test: replace_func_with_two_args +test: trunc_func_for_date + +# test AI4DB +test: dbe_scheduler +test: dbe_scheduler_privilege dbe_scheduler_calendar +test: dbe_scheduler_rename_user +test: plpgsql_override_out +test: plpgsql_sql_with_proc_keyword +test: plsql_show_all_error +test: pldeveloper_gs_source +test: index_advisor +test: pl_debugger_server pl_debugger_client +test: plan_hint plan_hint_set plan_hint_no_expand plan_hint_iud +test: large_sequence int16 forall_save_exceptions gs_dump_sequence +test: gs_dump_tableof +test: analyze_commands +#test: single_node_job +test: single_node_ddl +test: single_node_sqlbypass +test: median deferrable +test: gsql + +test: pg_job +test: hw_subpartition_prepare +test: hw_subpartition_createtable hw_subpartition_scan hw_subpartition_select hw_subpartition_split hw_subpartition_truncate hw_subpartition_update hw_subpartition_gpi hw_subpartition_analyze_vacuum hw_subpartition_alter_table +test: hw_subpartition_clean +test: gs_dump_subpartition + +# run tablespace by itself, and first, because it forces a checkpoint; +# we'd prefer not to have checkpoints later in the tests because that +# interferes with crash-recovery testing. +test: single_node_tablespace + +#test startwith...connect by +test: sw_prepare +test: sw_basic sw_icbc sw_siblings sw_bugfix sw_by_rownum_level +test: sw_clearup + +# ---------- +# The first group of parallel tests +# ---------- +test: single_node_boolean single_node_char single_node_name single_node_varchar single_node_text single_node_int2 single_node_int4 single_node_int8 single_node_oid single_node_float4 single_node_float8 single_node_bit single_node_numeric single_node_txid single_node_uuid single_node_enum single_node_money + +# Depends on things setup during char, varchar and text +#test: single_node_strings +# Depends on int2, int4, int8, float4, float8 +test: single_node_numerology + +# ---------- +# The second group of parallel tests +# ---------- +test: single_node_point single_node_lseg single_node_box single_node_path single_node_polygon single_node_circle single_node_date single_node_time single_node_timetz single_node_timestamp single_node_timestamptz +#test: single_node_interval +test: single_node_abstime single_node_reltime +#test: single_node_tinterval +test: single_node_inet single_node_macaddr single_node_tstypes single_node_comments + +# ---------- +# Another group of parallel tests +# geometry depends on point, lseg, box, path, polygon and circle +# horology depends on interval, timetz, timestamp, timestamptz, reltime and abstime +# ---------- +#test: single_node_geometry single_node_horology +#test: single_node_regex +test: single_node_regex_temp +test: single_node_oidjoins single_node_type_sanity + +# ---------- +# These four each depend on the previous one +# ---------- +test: single_node_insert xc_rownum +test: single_node_temple +test: single_node_create_function_1 +#test: single_node_create_type +#test: single_node_create_table +#test: single_node_create_function_2 + +# ---------- +# Load huge amounts of data +# We should split the data files into single files and then +# execute two copy tests parallel, to check that copy itself +# is concurrent safe. +# ---------- +#test: single_node_copy single_node_copyselect + +# ---------- +# More groups of parallel tests +# ---------- +#test: single_node_create_misc +#test: single_node_create_operator +# These depend on the above two +#test: single_node_create_index +#test: single_node_create_view + +# ---------- +# Another group of parallel tests +# ---------- +test: single_node_create_aggregate +#test: single_node_create_function_3 single_node_create_cast +#test: single_node_constraints single_node_triggers single_node_inherit single_node_create_table_like single_node_typed_table +test: single_node_vacuum +#test: single_node_drop_if_exists + +# ---------- +# sanity_check does a vacuum, affecting the sort order of SELECT * +# results. So it should not run parallel to other tests. +# ---------- +#test: single_node_sanity_check + +# ---------- +# Believe it or not, select creates a table, subsequent +# tests need. +# ---------- +test: single_node_errors +#test: single_node_select +ignore: single_node_random + +# ---------- +# Another group of parallel tests +# ---------- +#test: single_node_select_into single_node_select_distinct +#test: single_node_select_distinct_on single_node_select_implicit single_node_select_having +test: single_node_select_implicit single_node_select_having +#test: single_node_subselect +test: single_node_union +#test: single_node_case single_node_join single_node_aggregates +#test: single_node_transactions +test: single_node_random +#test: single_node_portals +#test: single_node_arrays +#test: single_node_btree_index single_node_hash_index single_node_update +test: single_node_update +#test single_node_namespace +#test: single_node_prepared_xacts +#test: single_node_delete + +# ---------- +# Another group of parallel tests +# ---------- +#test: single_node_privileges +#test: single_node_security_label single_node_collate + +#test: single_node_misc +# rules cannot run concurrently with any test that creates a view +#test: single_node_rules + +# ---------- +# Another group of parallel tests +# ---------- +#test: single_node_select_views +#test: single_node_portals_p2 +test: single_node_foreign_key +#test: single_node_foreign_key single_node_cluster single_node_dependency +#test: single_node_guc +test: single_node_bitmapops single_node_combocid +#test: single_node_tsearch +#test: single_node_tsdicts +#test: single_node_foreign_data +#single_node_window +#test: single_node_xmlmap +#test: single_node_functional_deps single_node_advisory_lock single_node_json single_node_equivclass + +# ---------- +# Another group of parallel tests +# NB: temp.sql does a reconnect which transiently uses 2 connections, +# so keep this parallel group to at most 19 tests +# ---------- +test: single_node_sequence +#test: single_node_plancache single_node_limit single_node_plpgsql single_node_copy2 single_node_temp single_node_domain single_node_rangefuncs single_node_prepare single_node_without_oid single_node_conversion single_node_truncate single_node_alter_table single_node_sequence single_node_polymorphism +#test: single_node_rowtypes +#test: single_node_returning single_node_largeobject single_node_with single_node_xml + +# run stats by itself because its delay may be insufficient under heavy load +#test: single_node_stats + +# run and check forbidden functions are still forbidden to use in single node +test: single_node_forbidden +test: single_node_mergeinto + +# Trigger tests +test: single_node_triggers +#test: single_node_xc_trigship + +# Synonym tests +#test: single_node_synonym + +# unsupported view tests +test: single_node_unsupported_view +#test: hw_cstore + +# ---------- +# single_node_commit/rollback tests +# ---------- +test: single_node_produce_commit_rollback +test: single_node_function_commit_rollback + +test: instr_unique_sql +test: auto_explain +test: shutdown + +# List/Hash table exchange +test: hw_partition_list_exchange +test: hw_partition_hash_exchange + +# List/Hash table truncate +test: hw_partition_list_truncate hw_partition_hash_truncate + +#test: hw_partition_start_end + +# interval partition +test: hw_partition_interval +test: hw_partition_interval_exchange +test: hw_partition_interval_index +test: hw_partition_interval_unusable_index +test: hw_partition_interval_reindex +test: hw_partition_interval_movement +# To check create interval partition parallel +test: hw_partition_interval_parallel_prepare +test: hw_partition_interval_parallel_insert hw_partition_interval_parallel_insert_01 hw_partition_interval_parallel_insert_02 +test: hw_partition_interval_parallel_end +test: hw_partition_interval_select +test: hw_partition_interval_check_syntax +test: hw_partition_interval_split +test: hw_partition_interval_merge +test: hw_partition_interval_compatibility +test: hw_partition_interval_dump_restore + +# Global Partition Index feature testcase +# gpi create +test: gpi_build_index + +# gpi check +test: gpi_create_constraint +test: gpi_unique_check + +# gpi index scan +test: gpi_index + +# gpi index only scan +test: gpi_index_only + +# gpi bitmap +test: gpi_bitmapscan + +# gpi pwj +test: gpi_pwj + +# gpi set unusable +test: gpi_set_index_unusable + +# gpi rebuild +#test: gpi_rebuild_index + +# gpi cluster +test: gpi_cluster_01 gpi_cluster_02 gpi_cluster_03 + +# gpi interval +test: gpi_interval + +# gpi range +test: gpi_range + +# gpi invliad part +test: gpi_invalid_part +test: gpi_clean_wait + +# gpi vacuum +test: gpi_vacuum_lazy +test: gpi_hw_partition_vacuum_full +test: gpi_hw_partition_vacuum_full_01 + +# gpi alter +test: gpi_alter_partition +test: gpi_alter_partition_with_update +# test: gpi_merge_partitions + +# global temporary table tests +test: gtt_stats +test: gtt_function +test: gtt_prepare +test: gtt_parallel_1 gtt_parallel_2 +test: gtt_clean + +#openGauss synchronization test cases +test: partiton_pathkey_col_plan partiton_pathkey_col_randomexec partiton_pathkey_row_plan partiton_pathkey_row_randomexec +#test the locales setting expect not affacted each other +#test: pg_session_locale +# ---------- +# These four each depend on the previous one(duplicate) +# duplicated create_function_1 create_type create_table copy +# ---------- +#test: type_sanity +#test: create_function_1 +test: create_table +test: temp__4 +#test: copy# + +# ---------- +# More groups of parallel tests +# duplicated create_misc +# ---------- +#test: hw_hashagg_start +test: create_misc +test: create_view1 create_view2 create_view3 create_view4 create_view5 +#test: int8# + +#dupliacated select int8 +#test: select +#test: misc +#test: stats +#test: alter_system_set + +#dispatch from 13 +test: function +test: aggregates_part1 aggregates_part2 aggregates_part3 count_distinct_part1 count_distinct_part2 count_distinct_part4 +#test: count_distinct_part3# + +test: hw_dfx_thread_status + +test: stable_function_shippable +# ---------- +# data partition +# ---------- +test: physical_slot + +test: hw_smp + +# test MERGE INTO + +# test INSERT UPDATE UPSERT +#test: insert_update_002 insert_update_003 insert_update_008 insert_update_009 insert_update_010 +#test: insert_update_001# +test: delete update namespace case select_having select_implicit +test: hw_test_operate_user +test: hw_createtbl_llt +#test: gsqlerr# +test: sqlLLT +#test: hw_sql_llt# + +test: upsert_prepare +test: upsert_001 upsert_002 upsert_003 upsert_008 upsert_009 upsert_010 +test: upsert_grammer_test_01 upsert_unlog_test upsert_tmp_test +test: upsert_grammer_test_02 upsert_restriction upsert_composite +test: upsert_trigger_test upsert_explain +test: upsert_clean + +# all pass +# run tablespace by itself, and first, because it forces a checkpoint; +# we'd prefer not to have checkpoints later in the tests because that +# interferes with crash-recovery testing. +test: hw_alter_session +test: tablespace +test: hw_account_lock + +# ---------- +# Another group of parallel tests +# ---------- +#test: hw_independent_user hw_user_basic hw_user_revoke hw_user_privilege hw_user_pguser hw_user_namespace +test: hw_interval_format hw_function_p_3 hw_function_p_4 hw_current_schema hw_functions +#test: hw_function_p_1 hw_function_p_2# +test: hw_dba_enable_partition hw_tablespace +test: hw_procedure_define +#test: hw_anonymous_block +#test: hw_procedure# +test: hw_grant_all hw_dynamic_sql hw_func_return_out +test: hw_package_function + +#show plan +#test: plan_hint + +###split from parallel_schedule4### + +# ---------- +# Another group of parallel tests +# ---------- +# plsql_packages tests + +test: hw_empty_str_to_null +test: hw_schema + +test: tpchrush +test: tpch01 tpch03 tpch04 libcomm_check_status tpch03_querymem +test: tpch05 tpch06 tpch07 tpch08 +test: tpch09 tpch10 tpch11 tpch12 +test: tpch13 tpch14 tpch15 tpch16 +test: tpch18 tpch19 tpch20 tpch18_querymem +test: tpch21 tpch22 tpch11_pretty_performance +#test: tpch02 tpch17 + +#test export +test: temp__2 + +test: vec_prepare_001 vec_prepare_002 +test: vec_prepare_003 + +#test sort optimize +test: sort_optimize_row sort_optimize_column sort_optimize_001 +#test early free +test: early_free +#test for col tpch with vector engine disabled +test: tpch_disablevec01 tpch_disablevec03 tpch_disablevec04 +test: tpch_disablevec05 tpch_disablevec06 tpch_disablevec07 +test: tpch_disablevec08 tpch_disablevec09 tpch_disablevec12 +test: tpch_disablevec13 tpch_disablevec14 tpch_disablevec16 +test: tpch_disablevec18 tpch_disablevec19 tpch_disablevec21 + +# ---------- +# Postgres-XC additional tests +# ---------- + +# This was used by triggers +test: xc_create_function +# Now xc_misc is used by xc_returning_step1 and xc_returning_step2 +test: xc_misc +# Those ones can be run in parallel +test: xc_groupby xc_distkey xc_having +#test: hw_rewrite_lazyagg hw_light +test: xc_temp xc_FQS +#test: xc_remote hw_pbe +test: xc_FQS_join xc_copy +#test: xc_alter_table +test: xc_constraints xc_limit xc_sort +#test: xc_params xc_returning_step1 +test: xc_params +test: xc_returning_step2 + +#test row compress +#test: compress compress01 compress02 cmpr_toast_000 cmpr_toast_update cmpr_index_00 cmpr_6bytes cmpr_int cmpr_datetime cmpr_numstr cmpr_numstr01 cmpr_float cmpr_nulls_delta cmpr_nulls_prefix cmpr_copyto cmpr_mode_none00 cmpr_mode_none01 cmpr_references_00 cmpr_references_01 +#test: cmpr_rollback cmpr_drop_column cmpr_drop_column_01 cmpr_drop_column_02 cmpr_drop_column_03 cmpr_dead_loop_00 cmpr_timewithzone cmpr_cluster_00 + +# Cluster setting related test is independant + + +test: xc_dml +# --------------------------- +# test cases for CStore +# --------------------------- +#test: hw_cstore_alter cstore_alter_table2 cstore_alter_table3 cstore_alter_table4 cstore_alter_table5 cstore_alter_table6 cstore_alter_table8 cstore_alter_table9 cstore_alter_table10 hw_cstore_copy hw_alter_table_instant hw_cstore_copy1 +#test: cstore_alter_table cstore_alter_table1 cstore_alter_table7 + +test: hw_cstore_tablespace hw_cstore_truncate hw_cstore_update +#test: hw_cstore_roughcheck +test: hw_cstore_partition_update hw_cstore_partition_update1 hw_cstore_partition_update2 + +#------------------------------ +# CStore compression test cases +#----------------------------- +test: cstore_cmpr_delta cstore_cmpr_date cstore_cmpr_timestamp_with_timezone cstore_cmpr_time_with_timezone cstore_cmpr_delta_nbits cstore_cmpr_delta_int cstore_cmpr_str cstore_cmpr_dict_00 cstore_cmpr_rle_2byte_runs +test: cstore_cmpr_every_datatype cstore_cmpr_zlib cstore_unsupported_feature cstore_unsupported_feature1 cstore_cmpr_rle_bound cstore_cmpr_rle_bound1 cstore_nan cstore_infinity cstore_log2_error cstore_create_clause cstore_create_clause1 cstore_nulls_00 cstore_partial_cluster_info +test: cstore_replication_table_delete + +test: hw_cstore_index hw_cstore_index1 hw_cstore_index2 +test: hw_cstore_vacuum +test: hw_cstore_insert hw_cstore_delete hw_cstore_unsupport + +# test on extended statistics +test: hw_es_multi_column_stats_prepare +test: hw_es_multi_column_stats_1 hw_es_multi_column_stats_1_1 hw_es_multi_column_stats_1_2 hw_es_multi_column_stats_1_3 hw_es_multi_column_stats_1_4 hw_es_multi_column_stats_1_5 hw_es_multi_column_stats_1_6 hw_es_multi_column_stats_2_1 hw_es_multi_column_stats_2_2 hw_es_multi_column_stats_2_3 hw_es_multi_column_stats_3 hw_es_multi_column_stats_3_1 hw_es_multi_column_stats_3_2 +test: hw_es_multi_column_stats_end + +test: limit1 setop setop_1 setop_2 +#test: checksum +test: distinct prepare1 +test: unsupported_features statistic statistic_2 + +test: hw_setop_writefile + +test: vec_nestloop_pre vec_mergejoin_prepare vec_result vec_limit vec_mergejoin_1 vec_mergejoin_2 vec_stream +test: vec_mergejoin_inner vec_mergejoin_left vec_mergejoin_semi vec_mergejoin_anti llvm_vecexpr1 llvm_vecexpr2 llvm_vecexpr3 llvm_target_expr llvm_target_expr2 llvm_target_expr3 llvm_vecexpr_td +#test: vec_nestloop1 +test: vec_mergejoin_aggregation llvm_vecagg llvm_vecagg2 llvm_vecagg3 llvm_vechashjoin +#test: vec_nestloop_end + +# ----------$ +# The first group of parallel tests$ +# ----------$ +test: boolean name oid bit txid uuid numeric_hide_tailing_zero rawlike +#test: float8 numeric char varchar text int2 int4 float4 numeric_2 money + +# Depends on things setup during char, varchar and text +# Depends on int2, int4, int8, float4, float8 +#test: strings numerology + +# ---------- +# The second group of parallel tests +# ---------- +#test: lseg box path polygon circle date time timetz timestamptz abstime reltime inet +test: interval tinterval macaddr tstypes comments +#test: point timestamp + +# ---------- +# Another group of parallel tests +# geometry depends on point, lseg, box, path, polygon and circle +# horology depends on interval, timetz, timestamp, timestamptz, reltime and abstime +# ---------- +#test: geometry horology + +# ---------- +# Load huge amounts of data +# We should split the data files into single files and then +# execute two copy tests parallel, to check that copy itself +# is concurrent safe.(duplicate) +# ---------- +test: copyselect copy_error_log copy_support_transform copy_from_support_parallel +#test: copy_eol + +# ---------- +# More groups of parallel tests +# ---------- +#test: create_index + +# Postgres-XC : Removed this test from the parallel group of tests since it used to give inconsistent plan outputs. +#test: inherit +# ---------- +# Another group of parallel tests +# ---------- +test: create_function_3 vacuum +#test: constraints drop_if_exists + +#test: errors subplan_base +test: subplan_new +#test: select +test: col_subplan_new +#test: col_subplan_base_1 +#test: join +test: select_into subselect_part2 gs_aggregate +#test: select_distinct subselect_part1 transactions btree_index select_distinct_on arrays hash_index +#test: transactions_control random union +#test: aggregates +test: holdable_cursor +#test: portals_p2 window tsearch temp__6 col_subplan_base_2 + +test: alter_table_000 alter_table_002 alter_table_003 +#test: alter_table_001 + +#test: with + +###split from parallel_schedule2### +#test: hw_sec_account_lock_unlock rowlevelsecurity +test: resolve_unknown +test: query_rewrite +test: create_schema +#test: view_dump +test: hw_function_p_3 hw_function_p_4 +#test: hw_function_p_2 +#test: hw_function_p_1 +test: create_c_function +test: cstore_replication_table_delete + +test: hw_cursor_part1 hw_cursor_part2 hw_cursor_part3 hw_cursor_part4 hw_cursor_part5 hw_cursor_part6 hw_cursor_part7 hw_cursor_part8 +test: vec_append_part1 vec_append_part2 vec_append_part3 +test: vec_cursor_part1 vec_cursor_part2 +test: vec_delete_part1 vec_delete_part2 +test: vec_set_func + +test: alter_schema_db_rename_seq + +test: a_outerjoin_conversion + +# test on plan_table +#test: plan_table04 + +test: setrefs +test: agg + +# test sql by pass +test: bypass_simplequery_support +test: bypass_preparedexecute_support +test: sqlbypass_partition +#test: sqlbypass_partition_prepare + +test: string_digit_to_numeric +# Another group of parallel tests +# ---------- +#test: collate tablesample tablesample_1 tablesample_2 matview +test: matview_single + +# ---------- +# Another group of parallel tests +# ---------- +test: hll_hash hll_func hll_para hll_mpp hll_cstore hll_misc + +test: function_get_table_def + +# ---------- +# Another group of parallel tests +# ---------- +test: hw_order + + +# ---------- +# Database security +# ---------- +test: hw_pwd_reuse +#test: hw_auditadmin + +test: performance_enhance +test: explain_fqs +#test: explain_pbe +# temp__3 create_table copy vec_prepare_001 vec_prepare_002 vec_prepare_003 int4 int8 are duplicated +test: temp__3 + +#security_plugin +test: sp_set_policy_plugin_enable +test: sp_masking_udf +test: sp_set_policy_plugin_disable + +# ---------- +# Another group of parallel tests +# NB: temp.sql does a reconnect which transiently uses 2 connections, +# so keep this parallel group to at most 19 tests +# ---------- +#test: plpgsql +test: select_where_func +test: plpgsql_array plpgsql_bulk_collect plpgsql_tableof plpgsql_table_opengauss arrayinterface_single arrayinterface_ted +test: plpgsql_assign_value_to_array_attribute plpgsql_cursor_rowtype plpgsql_inout_param arrayinterface_indexby +test: plpgsql_array_of_record plpgsql_nest_compile plpgsql_tableofindex +test: plpgsql_assign_list +test: plpgsql_package_type plpgsql_package_param +test: plpgsql_record_attrname +test: plpgsql_insert_record +test: hw_package_variable +#test: plancache limit rangefuncs prepare +test: returning largeobject +test: hw_explain_pretty1 hw_explain_pretty2 hw_explain_pretty3 +test: goto +test: equivalence_class +test: tsdb_delta2_compress +test: tsdb_xor_compress +#test: tsdb_aggregate + +test: readline +test: hw_to_timestamp hw_view_privilege + +test: hw_identifier +#test: hw_hashint1 hw_smalldatetime_hash hw_rawtype_hash +#test: hw_nvarchar2_hash cmpr_smallint cmpr_prefix_150left cmpr_uint32_oid +test: oidjoins opr_sanity_2 regex +#test: opr_sanity_1 + +test: pmk +# Cluster setting related test is independant +# ---------- +# Test of changed data type compatible with Oracle + +test: hw_datatype_2 hw_datatype_3 +test: hw_datatype +test: test_regex llt_atc + +# ---------- +# test for set operations +# ---------- +test: select_nest_views +#test: enum +#show plan +test: col_joinplan col_joinnew +test: col_limit col_distinct col_prepare +test: col_function_1 col_function_2 col_count_distinct_1 col_count_distinct_2 col_count_distinct_3 col_count_distinct_4 +test: directory_test +test: analyse_verify +test: create_compositetype +test: hw_pct_type_and_rowtype +#test: create_basetype +#test: tabletype +#test with recursive +test: recursive_ref_recursive +#test: recursive_prepare +#test: recursive_cte +#test: recursive_cte_col +#test: nohashjoin_recursive_cte +#test: nohashjoin_recursive_cte_col +#test: others +#test: icbc_customer +#test: recursive_unshippable +#test: recursive_finalize +#test: recursive_cte_1 +test: test_relpages + +test: temp__3 +test: vec_window_pre +test: gin_test_2 +#test: window1 +test: vec_window_001 +#test: vec_window_002 +test: vec_numeric_sop_1 vec_numeric_sop_2 vec_numeric_sop_3 vec_numeric_sop_4 vec_numeric_sop_5 +#test: vec_window_end + +test: vec_unique_pre vec_bitmap_prepare +test: vec_unique vec_setop_001 vec_setop_002 vec_setop_003 vec_setop_004 hw_vec_int4 hw_vec_int8 hw_vec_float4 hw_vec_float8 +#test: vec_setop_005 +test: hw_vec_constrainst vec_numeric vec_numeric_1 vec_numeric_2 vec_bitmap_1 vec_bitmap_2 +test: disable_vector_engine +test: hybrid_row_column +test: retry +test: hw_replication_slots +test: insert +test: copy2 temp +test: truncate +#test: temp_table + +#test: b_compatibility +test: hw_compatibility +test: hw_groupingsets hw_row_grouping_set +test: char_truncation_common char_truncation_cast + +#this case is dispatched from schedule10(gin_test) +test: gin_test1 gin_test2 gin_test3 + +#the fallowing part is dispatched from schedule15 + +# FIXME: move me back to the parallel test when the refcnt issue is fixed +# Below two teste are unstable, temporarily ignoring. This is same to distribute_dattistic, relallvisible, Dongwang will solve the problem. + +#test: hw_expression_alias + + +#========================================================================================================================================== +# privilege test +test: predefined_roles + +# ---------- +# src/test/regress/parallel_schedule.33 +# +# By convention, we put no more than twenty tests in any one parallel group; +# this limits the number of connections needed to run the tests. +# ---------- + + +test: udf_crem + +test: create_c_function + +#---1. Drop-Column test +test: cstore_drop_column_replicated +#test: cstore_drop_column + +#split from parallel_schedule2 + +# ---------- +# Advisory lock need to be tested in series in Postgres-XC +# --------- +test: advisory_lock + +# ---------- +# Another group of parallel tests +# ---------- +test: cluster dependency bitmapops tsdicts functional_deps +test: json_and_jsonb json jsonb jsonb2 +#test: guc + +# test for vec sonic hash +test: vec_sonic_hashjoin_number_prepare +test: vec_sonic_hashjoin_number_nospill + +test: timeout +test: dml +test: hashfilter hashfilter_1 +test: reduce_orderby +#test: backtrace_log +#test: bulkload_start +test: bulkload_parallel_test_2 bulkload_parallel_test_3 +#test: bulkload_parallel_test_1 bulkload_parallel_test_4 + +test: tpchcol05 tpchcol07 tpchcol08 tpchcol09 + +test: tpchcol01 +test: tpchcol06 +test: tpchcol03 tpchcol04 +test: tpchcol12 tpchcol13 tpchcol14 tpchcol16 tpchcol18 tpchcol19 tpchcol21 + +test: vec_partition vec_partition_1 vec_material_001 + +test: llvm_vecsort llvm_vecsort2 + +test: udf_crem create_c_function + +# procedure, Function Test +#test: create_procedure postgres_fdw +test: create_procedure +test: create_function +test: pg_compatibility + +# gs_basebackup +# test: gs_basebackup + +# autonomous transaction Test +test: autonomous_test + +#test: partition for hash list +test: pbe_hash_list_partition +test: hw_partition_list_insert +test: hw_partition_list_ddl +test: hw_partition_hash_insert +test: hw_partition_hash_ddl +test: hw_partition_hash_dml +test: hw_partition_hash_dql +test: hw_partition_list_dml +test: hw_partition_list_dql +test: hw_cipher_sm4 +test: hw_cipher_aes128 +test: hw_pwd_encryption_sm3 +test: rule_test + +#delete limit +test: delete_limit + +test: hotpatch + +# -------------------------- +# DB4AI +# -------------------------- +test: db4ai_snapshots +test: db4ai_gd_train_predict +test: db4ai_gd_houses +test: db4ai_gd_snapshots +test: db4ai_kmeans_train_predict +test: db4ai_explain_model + +test: leaky_function_operator + +# ---------- +# gs_guc test +# ---------- +#test: gs_guc + +test: smp + +test: cstore_unique_index + +#generated column test +test: generated_col + +# gs_ledger +test: ledger_table_case + +# store and restore comment on ora function and procedure +test: comment_proc +test: hw_package hw_dbms_sql1 +test: hw_cipher_sm4 +test: hw_cipher_aes128 +test: hw_pwd_encryption_sm3 +test: hw_package_single +test: sequence_cache_test +test: procedure_privilege_test + +# pljson +test: pljson + +test: toomanyparams + +test: guc_help +test: dw_switch diff --git a/src/test/regress/parallel_schedule.liteA b/src/test/regress/parallel_schedule.liteA new file mode 100644 index 000000000..d18060f41 --- /dev/null +++ b/src/test/regress/parallel_schedule.liteA @@ -0,0 +1,558 @@ +# ---------- +# src/test/regress/parallel_schedule0A +# +# By convention, we put no more than twenty tests in any one parallel group; +# this limits the number of connections needed to run the tests. +# ---------- + +# test nlssort_pinyin +test: nlssort_pinyin + +# test replace func and trunc func +test: replace_func_with_two_args +test: trunc_func_for_date + +test: analyze_commands +#test: single_node_job +test: single_node_ddl +test: single_node_sqlbypass +test: median + + +# run tablespace by itself, and first, because it forces a checkpoint; +# we'd prefer not to have checkpoints later in the tests because that +# interferes with crash-recovery testing. +test: single_node_tablespace + +test: large_sequence int16 gs_dump_sequence + +# ---------- +# The first group of parallel tests +# ---------- +test: single_node_boolean single_node_char single_node_name single_node_varchar single_node_text single_node_int2 single_node_int4 single_node_int8 single_node_oid single_node_float4 single_node_float8 single_node_bit single_node_numeric single_node_txid single_node_uuid single_node_enum single_node_money + +# Depends on things setup during char, varchar and text +#test: single_node_strings +# Depends on int2, int4, int8, float4, float8 +test: single_node_numerology + +# ---------- +# The second group of parallel tests +# ---------- +test: single_node_point single_node_lseg single_node_box single_node_path single_node_polygon single_node_circle single_node_date single_node_time single_node_timetz single_node_timestamp single_node_timestamptz +#test: single_node_interval +test: single_node_abstime single_node_reltime +#test: single_node_tinterval +test: single_node_inet single_node_macaddr single_node_tstypes single_node_comments + +# ---------- +# Another group of parallel tests +# geometry depends on point, lseg, box, path, polygon and circle +# horology depends on interval, timetz, timestamp, timestamptz, reltime and abstime +# ---------- +#test: single_node_geometry single_node_horology +#test: single_node_regex +test: single_node_regex_temp +test: single_node_oidjoins single_node_type_sanity + +# ---------- +# These four each depend on the previous one +# ---------- +test: single_node_insert xc_rownum +test: single_node_temple +test: single_node_create_function_1 +#test: single_node_create_type +#test: single_node_create_table +#test: single_node_create_function_2 + +# ---------- +# Load huge amounts of data +# We should split the data files into single files and then +# execute two copy tests parallel, to check that copy itself +# is concurrent safe. +# ---------- +#test: single_node_copy single_node_copyselect + +# ---------- +# More groups of parallel tests +# ---------- +#test: single_node_create_misc +#test: single_node_create_operator +# These depend on the above two +#test: single_node_create_index +#test: single_node_create_view + +# ---------- +# Another group of parallel tests +# ---------- +test: single_node_create_aggregate +#test: single_node_create_function_3 single_node_create_cast +#test: single_node_constraints single_node_triggers single_node_inherit single_node_create_table_like single_node_typed_table +test: single_node_vacuum +#test: single_node_drop_if_exists + +# ---------- +# sanity_check does a vacuum, affecting the sort order of SELECT * +# results. So it should not run parallel to other tests. +# ---------- +#test: single_node_sanity_check + +# ---------- +# Believe it or not, select creates a table, subsequent +# tests need. +# ---------- +test: single_node_errors +#test: single_node_select +ignore: single_node_random + +# ---------- +# Another group of parallel tests +# ---------- +#test: single_node_select_into single_node_select_distinct +#test: single_node_select_distinct_on single_node_select_implicit single_node_select_having +test: single_node_select_implicit single_node_select_having +#test: single_node_subselect +test: single_node_union +#test: single_node_case single_node_join single_node_aggregates +#test: single_node_transactions +test: single_node_random +#test: single_node_portals +#test: single_node_arrays +#test: single_node_btree_index single_node_hash_index single_node_update +test: single_node_update +#test single_node_namespace +#test: single_node_prepared_xacts +#test: single_node_delete + +# ---------- +# Another group of parallel tests +# ---------- +#test: single_node_privileges +#test: single_node_security_label single_node_collate + +#test: single_node_misc +# rules cannot run concurrently with any test that creates a view +#test: single_node_rules + +# ---------- +# Another group of parallel tests +# ---------- +#test: single_node_select_views +#test: single_node_portals_p2 +test: single_node_foreign_key +#test: single_node_foreign_key single_node_cluster single_node_dependency +#test: single_node_guc +test: single_node_bitmapops single_node_combocid +#test: single_node_tsearch +#test: single_node_tsdicts +#test: single_node_foreign_data +#single_node_window +#test: single_node_xmlmap +#test: single_node_functional_deps single_node_advisory_lock single_node_json single_node_equivclass + +# ---------- +# Another group of parallel tests +# NB: temp.sql does a reconnect which transiently uses 2 connections, +# so keep this parallel group to at most 19 tests +# ---------- +test: single_node_sequence +#test: single_node_plancache single_node_limit single_node_plpgsql single_node_copy2 single_node_temp single_node_domain single_node_rangefuncs single_node_prepare single_node_without_oid single_node_conversion single_node_truncate single_node_alter_table single_node_sequence single_node_polymorphism +#test: single_node_rowtypes +#test: single_node_returning single_node_largeobject single_node_with single_node_xml + +# run stats by itself because its delay may be insufficient under heavy load +#test: single_node_stats + +# run and check forbidden functions are still forbidden to use in single node +test: single_node_forbidden +test: single_node_mergeinto + +# Trigger tests +test: single_node_triggers +#test: single_node_xc_trigship + +# Synonym tests +#test: single_node_synonym + +# unsupported view tests +test: single_node_unsupported_view +#test: hw_cstore + +# ---------- +# single_node_commit/rollback tests +# ---------- +# interval partition +test: hw_partition_interval_index +test: hw_partition_interval_select +test: hw_partition_interval_compatibility +#openGauss synchronization test cases +test: partiton_pathkey_col_plan partiton_pathkey_col_randomexec partiton_pathkey_row_plan partiton_pathkey_row_randomexec +#test the locales setting expect not affacted each other +#test: pg_session_locale +# ---------- +# These four each depend on the previous one(duplicate) +# duplicated create_function_1 create_type create_table copy +# ---------- +#test: type_sanity +#test: create_function_1 +test: create_table +test: temp__4 +#test: copy# + +# ---------- +# More groups of parallel tests +# duplicated create_misc +# ---------- +#test: hw_hashagg_start +test: create_misc +test: create_view1 create_view2 create_view3 create_view4 create_view5 +#test: int8# + +#dupliacated select int8 +#test: select +#test: misc +#test: stats +#test: alter_system_set + +# test for hll +test: hll_hash hll_func hll_para hll_mpp hll_cstore hll_misc + +# test for function pg_get_tabledef +test: function_get_table_def + +#dispatch from 13 +test: function +test: aggregates_part1 aggregates_part2 aggregates_part3 count_distinct_part1 count_distinct_part2 count_distinct_part4 +#test: count_distinct_part3# + +test: hw_dfx_thread_status + +test: stable_function_shippable +# ---------- +# data partition +# ---------- +test: physical_slot + +test: hw_smp + +# test MERGE INTO + +# test INSERT UPDATE UPSERT +#test: insert_update_002 insert_update_003 insert_update_008 insert_update_009 insert_update_010 +#test: insert_update_001# +test: delete update namespace case select_having select_implicit +test: hw_test_operate_user +test: hw_createtbl_llt +#test: gsqlerr# +test: sqlLLT +#test: hw_sql_llt# + +#test: upsert_prepare +#test: upsert_001 upsert_002 upsert_003 upsert_008 upsert_009 upsert_010 +#test: upsert_grammer_test_01 upsert_unlog_test upsert_tmp_test +#test: upsert_grammer_test_02 upsert_restriction upsert_composite +#test: upsert_trigger_test upsert_explain +#test: upsert_clean + +# all pass +# run tablespace by itself, and first, because it forces a checkpoint; +# we'd prefer not to have checkpoints later in the tests because that +# interferes with crash-recovery testing. +test: hw_alter_session +test: tablespace +test: hw_account_lock + +# ---------- +# Another group of parallel tests +# ---------- +#test: hw_independent_user hw_user_basic hw_user_revoke hw_user_privilege hw_user_pguser hw_user_namespace +test: hw_interval_format hw_function_p_3 hw_function_p_4 hw_current_schema hw_functions +#test: hw_function_p_1 hw_function_p_2# +test: hw_dba_enable_partition hw_tablespace +test: hw_procedure_define +#test: hw_anonymous_block +#test: hw_procedure# +test: hw_grant_all hw_func_return_out +#hw_dynamic_sql +test: hw_package_function + +#show plan +#test: plan_hint + +###split from parallel_schedule4### + +# ---------- +# Another group of parallel tests +# ---------- +# plsql_packages tests + +test: hw_empty_str_to_null +test: hw_schema + +test: tpchrush +test: tpch01 tpch03 tpch04 libcomm_check_status tpch03_querymem +test: tpch05 tpch06 tpch07 tpch08 +test: tpch09 tpch10 tpch11 tpch12 +test: tpch13 tpch14 tpch15 tpch16 +test: tpch18 tpch19 tpch20 tpch18_querymem +test: tpch21 tpch22 tpch11_pretty_performance +#test: tpch02 tpch17 + +#test export +test: temp__2 + +test: vec_prepare_001 vec_prepare_002 +test: vec_prepare_003 + +#test sort optimize +test: sort_optimize_row sort_optimize_column sort_optimize_001 +#test early free +test: early_free +#test for col tpch with vector engine disabled +test: tpch_disablevec01 tpch_disablevec03 tpch_disablevec04 +test: tpch_disablevec05 tpch_disablevec06 tpch_disablevec07 +test: tpch_disablevec08 tpch_disablevec09 tpch_disablevec12 +test: tpch_disablevec13 tpch_disablevec14 tpch_disablevec16 +test: tpch_disablevec18 tpch_disablevec19 tpch_disablevec21 + +# ---------- +# Postgres-XC additional tests +# ---------- + +# This was used by triggers +test: xc_create_function +# Now xc_misc is used by xc_returning_step1 and xc_returning_step2 +test: xc_misc +# Those ones can be run in parallel +test: xc_groupby xc_distkey xc_having +#test: hw_rewrite_lazyagg hw_light +test: xc_temp xc_FQS +#test: xc_remote hw_pbe +test: xc_FQS_join xc_copy +#test: xc_alter_table +test: xc_constraints xc_limit xc_sort +#test: xc_params xc_returning_step1 +test: xc_params +test: xc_returning_step2 + +#test row compress +#test: compress compress01 compress02 cmpr_toast_000 cmpr_toast_update cmpr_index_00 cmpr_6bytes cmpr_int cmpr_datetime cmpr_numstr cmpr_numstr01 cmpr_float cmpr_nulls_delta cmpr_nulls_prefix cmpr_copyto cmpr_mode_none00 cmpr_mode_none01 cmpr_references_00 cmpr_references_01 +#test: cmpr_rollback cmpr_drop_column cmpr_drop_column_01 cmpr_drop_column_02 cmpr_drop_column_03 cmpr_dead_loop_00 cmpr_timewithzone cmpr_cluster_00 + +# Cluster setting related test is independant + + +test: xc_dml + +# Postgres-XC : Removed this test from the parallel group of tests since it used to give inconsistent plan outputs. +#test: inherit +# ---------- +# Another group of parallel tests +# ---------- +test: create_function_3 vacuum +#test: constraints drop_if_exists + +#test: errors subplan_base +test: subplan_new +#test: select +test: col_subplan_new +#test: col_subplan_base_1 +#test: join +test: select_into subselect_part2 gs_aggregate +#test: select_distinct subselect_part1 transactions btree_index select_distinct_on arrays hash_index +#test: transactions_control random union +#test: aggregates +test: holdable_cursor +#test: portals_p2 window tsearch temp__6 col_subplan_base_2 + +test: alter_table_000 alter_table_002 alter_table_003 +#test: alter_table_001 + +#test: with + +# ---------- +# Database security +# ---------- +test: hw_pwd_reuse +#test: hw_auditadmin + +test: performance_enhance +test: explain_fqs +#test: explain_pbe +# temp__3 create_table copy vec_prepare_001 vec_prepare_002 vec_prepare_003 int4 int8 are duplicated +test: temp__3 +# ---------- +# Another group of parallel tests +# NB: temp.sql does a reconnect which transiently uses 2 connections, +# so keep this parallel group to at most 19 tests +# ---------- +#test: plpgsql +test: plpgsql_multiset +test: plpgsql_array_opengauss +test: plpgsql_table_opengauss +test: arrayinterface_single +#arrayinterface_ted +#test: plancache limit rangefuncs prepare +# arrayinterface_ted +test: plpgsql_assign_value_to_array_attribute +test: plpgsql_cursor_rowtype +test: plpgsql_array_of_record +test: plpgsql_assign_list +test: plpgsql_package_type +test: plpgsql_record_attrname +test: hw_package_variable +test: returning largeobject +test: hw_explain_pretty1 hw_explain_pretty2 hw_explain_pretty3 +test: goto +test: equivalence_class +test: tsdb_delta2_compress +test: tsdb_xor_compress +#test: tsdb_aggregate + +test: readline +test: hw_to_timestamp hw_view_privilege + +test: hw_identifier +#test: hw_hashint1 hw_smalldatetime_hash hw_rawtype_hash +#test: hw_nvarchar2_hash cmpr_smallint cmpr_prefix_150left cmpr_uint32_oid +test: oidjoins opr_sanity_2 regex +#test: opr_sanity_1 + +test: pmk +# Cluster setting related test is independant +# ---------- +# Test of changed data type compatible with Oracle + +test: hw_datatype_2 hw_datatype_3 +test: hw_datatype +test: test_regex llt_atc + +# ---------- +# test for set operations +# ---------- +test: select_nest_views +#test: enum +#show plan +test: col_joinplan col_joinnew +test: col_limit col_distinct col_prepare +test: col_function_1 col_function_2 col_count_distinct_1 col_count_distinct_2 col_count_distinct_3 col_count_distinct_4 +test: directory_test +test: analyse_verify +test: create_compositetype +test: hw_pct_type_and_rowtype +#test: create_basetype +#test: tabletype +#test with recursive +test: recursive_ref_recursive +#test: recursive_prepare +#test: recursive_cte +#test: recursive_cte_col +#test: nohashjoin_recursive_cte +#test: nohashjoin_recursive_cte_col +#test: others +#test: icbc_customer +#test: recursive_unshippable +#test: recursive_finalize +#test: recursive_cte_1 +test: test_relpages + +test: temp__3 +test: vec_window_pre +test: gin_test_2 +#test: window1 +test: vec_window_001 +#test: vec_window_002 +test: vec_numeric_sop_1 vec_numeric_sop_2 vec_numeric_sop_3 vec_numeric_sop_4 vec_numeric_sop_5 +#test: vec_window_end + +test: vec_unique_pre vec_bitmap_prepare +test: vec_unique vec_setop_001 vec_setop_002 vec_setop_003 vec_setop_004 hw_vec_int4 hw_vec_int8 hw_vec_float4 hw_vec_float8 +#test: vec_setop_005 +test: hw_vec_constrainst vec_numeric vec_numeric_1 vec_numeric_2 vec_bitmap_1 vec_bitmap_2 +test: disable_vector_engine +test: hybrid_row_column +test: retry +test: hw_replication_slots +test: insert +test: copy2 temp +test: truncate +#test: temp_table + +#test: b_compatibility +test: hw_compatibility +test: hw_groupingsets hw_row_grouping_set +test: char_truncation_common char_truncation_cast + +#this case is dispatched from schedule10(gin_test) +test: gin_test1 gin_test2 gin_test3 + +#the fallowing part is dispatched from schedule15 + +# FIXME: move me back to the parallel test when the refcnt issue is fixed +# Below two teste are unstable, temporarily ignoring. This is same to distribute_dattistic, relallvisible, Dongwang will solve the problem. + +#test: hw_expression_alias + + +#========================================================================================================================================== + + +# ---------- +# src/test/regress/parallel_schedule.33 +# +# By convention, we put no more than twenty tests in any one parallel group; +# this limits the number of connections needed to run the tests. +# ---------- + + +test: udf_crem + +test: create_c_function + +#---1. Drop-Column test +test: cstore_drop_column_replicated +#test: cstore_drop_column + +#split from parallel_schedule2 + +# ---------- +# Advisory lock need to be tested in series in Postgres-XC +# --------- +test: advisory_lock + +# ---------- +# Another group of parallel tests +# ---------- +test: cluster dependency bitmapops tsdicts functional_deps +test: json_and_jsonb json jsonb jsonb2 +#test: guc + +# test for vec sonic hash +test: vec_sonic_hashjoin_number_prepare +test: vec_sonic_hashjoin_number_nospill + +test: dml +test: hashfilter hashfilter_1 +test: reduce_orderby +#test: backtrace_log +#test: bulkload_start +test: bulkload_parallel_test_2 bulkload_parallel_test_3 +#test: bulkload_parallel_test_1 bulkload_parallel_test_4 + +test: tpchcol05 tpchcol07 tpchcol08 tpchcol09 + +test: tpchcol01 +test: tpchcol06 +test: tpchcol03 tpchcol04 +test: tpchcol12 tpchcol13 tpchcol14 tpchcol16 tpchcol18 tpchcol19 tpchcol21 + +test: vec_partition vec_partition_1 vec_material_001 + +test: llvm_vecsort llvm_vecsort2 + +test: udf_crem create_c_function + +test: smp +# test: hw_package +test: guc_help +test: dw_switch diff --git a/src/test/regress/parallel_schedule.liteB b/src/test/regress/parallel_schedule.liteB new file mode 100644 index 000000000..aeb55979d --- /dev/null +++ b/src/test/regress/parallel_schedule.liteB @@ -0,0 +1,247 @@ +# ---------- +# src/test/regress/parallel_schedule0B +# +# By convention, we put no more than twenty tests in any one parallel group; +# this limits the number of connections needed to run the tests. +# ---------- + +###split from parallel_schedule0### + +# ---------- +# single_node_commit/rollback tests +# ---------- +#test: pldeveloper_gs_source +test: plan_hint plan_hint_set plan_hint_no_expand plan_hint_iud +test: single_node_produce_commit_rollback + +test: instr_unique_sql +test: shutdown + +# List/Hash table exchange +test: hw_partition_list_exchange +# test: hw_partition_hash_exchange + +# interval partition +test: hw_partition_interval +test: hw_partition_interval_exchange +test: hw_partition_interval_unusable_index psql_desc_unusable_index +test: hw_partition_interval_reindex +test: hw_partition_interval_movement +# To check create interval partition parallel +test: hw_partition_interval_parallel_prepare +test: hw_partition_interval_parallel_insert hw_partition_interval_parallel_insert_01 hw_partition_interval_parallel_insert_02 +test: hw_partition_interval_parallel_end +test: hw_partition_interval_check_syntax +test: hw_partition_interval_split +test: hw_partition_interval_merge +test: hw_partition_interval_dump_restore + +# Global Partition Index feature testcase +# gpi create +test: gpi_build_index + +# gpi check +test: gpi_create_constraint +test: gpi_unique_check + +# gpi index scan +test: gpi_index + +# gpi index only scan +test: gpi_index_only + +# gpi bitmap +test: gpi_bitmapscan + +# gpi pwj +test: gpi_pwj + +# gpi set unusable +test: gpi_set_index_unusable + +# gpi rebuild +#test: gpi_rebuild_index + +# gpi cluster +test: gpi_cluster_01 gpi_cluster_02 gpi_cluster_03 + +# gpi interval +test: gpi_interval + +# gpi range +test: gpi_range + +# gpi invliad part +test: gpi_invalid_part +test: gpi_clean_wait + +# gpi vacuum +test: gpi_vacuum_lazy +test: gpi_hw_partition_vacuum_full +test: gpi_hw_partition_vacuum_full_01 + +# gpi alter +test: gpi_alter_partition +test: gpi_alter_partition_with_update +# test: gpi_merge_partitions + +# global temporary table tests +test: gtt_stats +test: gtt_function +test: gtt_prepare +test: gtt_parallel_1 gtt_parallel_2 +test: gtt_clean + +# --------------------------- +# test cases for CStore +# --------------------------- +#test: hw_cstore_alter cstore_alter_table2 cstore_alter_table3 cstore_alter_table4 cstore_alter_table5 cstore_alter_table6 cstore_alter_table8 cstore_alter_table9 cstore_alter_table10 hw_cstore_copy hw_alter_table_instant hw_cstore_copy1 +#test: cstore_alter_table cstore_alter_table1 cstore_alter_table7 + +test: hw_cstore_tablespace hw_cstore_truncate hw_cstore_update +#test: hw_cstore_roughcheck +test: hw_cstore_partition_update hw_cstore_partition_update1 hw_cstore_partition_update2 + +#------------------------------ +# CStore compression test cases +#----------------------------- +test: cstore_cmpr_delta cstore_cmpr_date cstore_cmpr_timestamp_with_timezone cstore_cmpr_time_with_timezone cstore_cmpr_delta_nbits cstore_cmpr_delta_int cstore_cmpr_str cstore_cmpr_dict_00 cstore_cmpr_rle_2byte_runs +test: cstore_cmpr_every_datatype cstore_cmpr_zlib cstore_unsupported_feature cstore_unsupported_feature1 cstore_cmpr_rle_bound cstore_cmpr_rle_bound1 cstore_nan cstore_infinity cstore_log2_error cstore_create_clause cstore_create_clause1 cstore_nulls_00 cstore_partial_cluster_info +test: cstore_replication_table_delete + +test: hw_cstore_index hw_cstore_index1 hw_cstore_index2 +test: hw_cstore_vacuum +test: hw_cstore_insert hw_cstore_delete hw_cstore_unsupport + +# test on extended statistics +test: hw_es_multi_column_stats_prepare +test: hw_es_multi_column_stats_1 hw_es_multi_column_stats_1_1 hw_es_multi_column_stats_1_2 hw_es_multi_column_stats_1_3 hw_es_multi_column_stats_1_4 hw_es_multi_column_stats_1_5 hw_es_multi_column_stats_1_6 hw_es_multi_column_stats_2_1 hw_es_multi_column_stats_2_2 hw_es_multi_column_stats_2_3 hw_es_multi_column_stats_3 hw_es_multi_column_stats_3_1 hw_es_multi_column_stats_3_2 +test: hw_es_multi_column_stats_end + +test: limit1 setop setop_1 setop_2 +#test: checksum +test: distinct prepare1 +test: unsupported_features statistic statistic_2 + +test: hw_setop_writefile + +test: vec_nestloop_pre vec_mergejoin_prepare vec_result vec_limit vec_mergejoin_1 vec_mergejoin_2 vec_stream +test: vec_mergejoin_inner vec_mergejoin_left vec_mergejoin_semi vec_mergejoin_anti llvm_vecexpr1 llvm_vecexpr2 llvm_vecexpr3 llvm_target_expr llvm_target_expr2 llvm_target_expr3 llvm_vecexpr_td +#test: vec_nestloop1 +test: vec_mergejoin_aggregation llvm_vecagg llvm_vecagg2 llvm_vecagg3 llvm_vechashjoin +#test: vec_nestloop_end + +# ----------$ +# The first group of parallel tests$ +# ----------$ +test: boolean name oid bit txid uuid +#test: float8 numeric char varchar text int2 int4 float4 numeric_2 money + +# Depends on things setup during char, varchar and text +# Depends on int2, int4, int8, float4, float8 +#test: strings numerology + +# ---------- +# The second group of parallel tests +# ---------- +#test: lseg box path polygon circle date time timetz timestamptz abstime reltime inet +test: interval tinterval macaddr tstypes comments +#test: point timestamp + +# ---------- +# Another group of parallel tests +# geometry depends on point, lseg, box, path, polygon and circle +# horology depends on interval, timetz, timestamp, timestamptz, reltime and abstime +# ---------- +#test: geometry horology + +# ---------- +# Load huge amounts of data +# We should split the data files into single files and then +# execute two copy tests parallel, to check that copy itself +# is concurrent safe.(duplicate) +# ---------- +test: copyselect copy_error_log copy_support_transform +#test: copy_eol + +# ---------- +# More groups of parallel tests +# ---------- +#test: create_index + +###split from parallel_schedule2### +#test: hw_sec_account_lock_unlock rowlevelsecurity +test: resolve_unknown +test: query_rewrite +test: create_schema +#test: view_dump +test: hw_function_p_3 hw_function_p_4 +#test: hw_function_p_2 +#test: hw_function_p_1 +test: create_c_function +test: cstore_replication_table_delete + +#test: hw_cursor_part1 hw_cursor_part2 hw_cursor_part3 hw_cursor_part4 hw_cursor_part5 hw_cursor_part6 hw_cursor_part7 hw_cursor_part8 +test: vec_append_part1 vec_append_part2 vec_append_part3 +test: vec_cursor_part1 vec_cursor_part2 +test: vec_delete_part1 vec_delete_part2 +test: vec_name + +test: alter_schema_db_rename_seq + +test: a_outerjoin_conversion + +# test on plan_table +#test: plan_table04 + +test: setrefs +test: agg + +# test sql by pass +#test: bypass_simplequery_support +test: bypass_preparedexecute_support +test: sqlbypass_partition +#test: sqlbypass_partition_prepare + +test: string_digit_to_numeric +# Another group of parallel tests +# ---------- +#test: collate tablesample tablesample_1 tablesample_2 matview +test: matview_single + +# ---------- +# Another group of parallel tests +# ---------- +test: hw_order + +# ---------- +# Another group of parallel tests +# ---------- +test: timeout + +# procedure, Function Test +#test: create_procedure postgres_fdw +test: create_function +test: pg_compatibility +# gs_basebackup +# test: gs_basebackup + +# autonomous transaction Test +#test: autonomous_transaction + +#test: partition for hash list +test: pbe_hash_list_partition +test: hw_partition_list_insert +test: hw_partition_list_ddl +test: hw_partition_hash_insert +test: hw_partition_hash_ddl +test: hw_partition_hash_dml +test: hw_partition_hash_dql +test: hw_partition_list_dml +test: hw_partition_list_dql +test: hw_cipher_sm4 +test: hw_cipher_aes128 +test: hw_pwd_encryption_sm3 +test: cstore_unique_index +test: rule_test +test: plpgsql_savepoint diff --git a/src/test/regress/parallel_schedule0 b/src/test/regress/parallel_schedule0 index ba8441463..2228e393d 100644 --- a/src/test/regress/parallel_schedule0 +++ b/src/test/regress/parallel_schedule0 @@ -4,17 +4,49 @@ # By convention, we put no more than twenty tests in any one parallel group; # this limits the number of connections needed to run the tests. # ---------- +test: alter_hw_package +test: hw_grant_package gsc_func gsc_db +test: uppercase_attribute_name +test: replace_func_with_two_args trunc_func_for_date nlssort_pinyin updatable_views + +# parse xlog and page +test: parse_page +test: parse_xlog + +test: gs_dump_package +test: out_param_func # test AI4DB +test: plpgsql_override_out +test: plpgsql_sql_with_proc_keyword +test: plsql_show_all_error +test: pldeveloper_gs_source test: index_advisor test: pl_debugger_server pl_debugger_client +test: update_for_wait_s1 update_for_wait_s2 test: plan_hint plan_hint_set plan_hint_no_expand plan_hint_iud +test: large_sequence int16 gs_dump_sequence +test: gs_dump_tableof test: analyze_commands #test: single_node_job test: single_node_ddl test: single_node_sqlbypass -#test: mediavarchar.cppn +test: median deferrable test: array_funcs first_last_agg + +test: hw_pwd_encryption_sm3 + + +# test subpartition +test: hw_subpartition_createtable hw_subpartition_scan hw_subpartition_select hw_subpartition_split hw_subpartition_truncate hw_subpartition_update hw_subpartition_gpi hw_subpartition_analyze_vacuum hw_subpartition_alter_table hw_subpartition_index hw_subpartition_add_drop_partition hw_subpartition_tablespace hw_subpartition_ddl_index +test: hw_subpartition_vacuum_partition +test: gs_dump_subpartition +test: partition_dml_operations + +# test subpartition with segment=on +test: segment_subpartition_createtable segment_subpartition_scan segment_subpartition_select segment_subpartition_split segment_subpartition_truncate segment_subpartition_update segment_subpartition_gpi segment_subpartition_analyze_vacuum segment_subpartition_alter_table segment_subpartition_add_drop_partition segment_subpartition_tablespace segment_subpartition_ddl_index +test: segment_subpartition_vacuum_partition + test: get_instr_unique_sql # run tablespace by itself, and first, because it forces a checkpoint; @@ -24,14 +56,17 @@ test: single_node_tablespace #test startwith...connect by test: sw_prepare -test: sw_basic sw_icbc sw_siblings sw_bugfix sw_by_rownum_level +test: sw_basic sw_icbc sw_siblings sw_bugfix-1 sw_bugfix-2 sw_by_rownum_level test: sw_clearup +#-------------- +# any privilege +# ------------- +test: pri_alter_any_table pri_create_any_function pri_create_any_index pri_create_any_sequence pri_create_any_type pri_dml_any_table pri_execute_any_function pri_indepent_any pri_any_package pri_samenew_schema # ---------- # The first group of parallel tests # ---------- -test: single_node_boolean single_node_char single_node_name single_node_varchar single_node_text single_node_int2 single_node_int4 single_node_int8 single_node_oid single_node_float4 single_node_float8 single_node_bit single_node_numeric single_node_txid single_node_uuid single_node_enum single_node_money -test: single_node_nvarchar +test: single_node_boolean single_node_char single_node_name single_node_varchar single_node_text single_node_int2 single_node_int4 single_node_int8 single_node_oid single_node_float4 single_node_float8 single_node_bit single_node_numeric single_node_txid single_node_uuid single_node_enum single_node_money single_node_nvarchar # Depends on things setup during char, varchar and text #test: single_node_strings @@ -41,10 +76,10 @@ test: single_node_numerology # ---------- # The second group of parallel tests # ---------- -test: single_node_point single_node_lseg single_node_box single_node_path single_node_polygon single_node_circle single_node_date single_node_time single_node_timetz single_node_timestamp single_node_timestamptz -#test: single_node_interval -test: single_node_abstime single_node_reltime -#test: single_node_tinterval +test: single_node_point single_node_lseg single_node_box single_node_path single_node_polygon single_node_circle single_node_date single_node_time single_node_timetz single_node_timestamp single_node_timestamptz +#test: single_node_interval +test: single_node_abstime single_node_reltime +#test: single_node_tinterval test: single_node_inet single_node_macaddr single_node_tstypes single_node_comments # ---------- @@ -52,11 +87,10 @@ test: single_node_inet single_node_macaddr single_node_tstypes single_node_comme # geometry depends on point, lseg, box, path, polygon and circle # horology depends on interval, timetz, timestamp, timestamptz, reltime and abstime # ---------- -#test: single_node_geometry single_node_horology +#test: single_node_geometry single_node_horology #test: single_node_regex -test: single_node_regex_temp -test: single_node_type_sanity -#test: single_node_oidjoins single_node_opr_sanity +test: single_node_regex_temp +test: single_node_oidjoins single_node_type_sanity # ---------- # These four each depend on the previous one @@ -79,16 +113,16 @@ test: single_node_create_function_1 # ---------- # More groups of parallel tests # ---------- -#test: single_node_create_misc +#test: single_node_create_misc #test: single_node_create_operator # These depend on the above two -#test: single_node_create_index +#test: single_node_create_index #test: single_node_create_view # ---------- # Another group of parallel tests # ---------- -test: single_node_create_aggregate +test: single_node_create_aggregate #test: single_node_create_function_3 single_node_create_cast #test: single_node_constraints single_node_triggers single_node_inherit single_node_create_table_like single_node_typed_table test: single_node_vacuum @@ -111,28 +145,26 @@ ignore: single_node_random # ---------- # Another group of parallel tests # ---------- -#test: single_node_select_into single_node_select_distinct -#test: single_node_select_distinct_on single_node_select_implicit single_node_select_having -test: single_node_select_implicit single_node_select_having +#test: single_node_select_into single_node_select_distinct +#test: single_node_select_distinct_on single_node_select_implicit single_node_select_having +test: single_node_select_implicit single_node_select_having #test: single_node_subselect test: single_node_union -#test: single_node_case single_node_join single_node_aggregates -#test: single_node_transactions -test: single_node_random +#test: single_node_case single_node_join single_node_aggregates +#test: single_node_transactions +test: single_node_random #test: single_node_portals -#test: single_node_arrays -#test: single_node_btree_index single_node_hash_index single_node_update -test: single_node_update -test: hash_index_001 -test: hash_index_002 +#test: single_node_arrays +#test: single_node_btree_index single_node_hash_index single_node_update +test: single_node_update #test single_node_namespace -#test: single_node_prepared_xacts +#test: single_node_prepared_xacts #test: single_node_delete # ---------- # Another group of parallel tests # ---------- -#test: single_node_privileges +#test: single_node_privileges #test: single_node_security_label single_node_collate #test: single_node_misc @@ -142,17 +174,17 @@ test: hash_index_002 # ---------- # Another group of parallel tests # ---------- -#test: single_node_select_views -#test: single_node_portals_p2 +#test: single_node_select_views +#test: single_node_portals_p2 test: single_node_foreign_key #test: single_node_foreign_key single_node_cluster single_node_dependency -#test: single_node_guc -test: single_node_bitmapops single_node_combocid +#test: single_node_guc +test: single_node_bitmapops single_node_combocid #test: single_node_tsearch -#test: single_node_tsdicts -#test: single_node_foreign_data +#test: single_node_tsdicts +#test: single_node_foreign_data #single_node_window -#test: single_node_xmlmap +#test: single_node_xmlmap #test: single_node_functional_deps single_node_advisory_lock single_node_json single_node_equivclass # ---------- @@ -162,7 +194,7 @@ test: single_node_bitmapops single_node_combocid # ---------- test: single_node_sequence #test: single_node_plancache single_node_limit single_node_plpgsql single_node_copy2 single_node_temp single_node_domain single_node_rangefuncs single_node_prepare single_node_without_oid single_node_conversion single_node_truncate single_node_alter_table single_node_sequence single_node_polymorphism -#test: single_node_rowtypes +#test: single_node_rowtypes #test: single_node_returning single_node_largeobject single_node_with single_node_xml # run stats by itself because its delay may be insufficient under heavy load @@ -170,7 +202,9 @@ test: single_node_sequence # run and check forbidden functions are still forbidden to use in single node test: single_node_forbidden -test: single_node_mergeinto merge_1 + +test: single_node_mergeinto merge_subquery merge_subquery3 merge_1 +test: merge_where_col # Trigger tests test: single_node_triggers @@ -186,9 +220,11 @@ test: single_node_unsupported_view # ---------- # single_node_commit/rollback tests # ---------- -test: single_node_produce_commit_rollback +test: single_node_produce_commit_rollback +test: single_node_function_commit_rollback test: instr_unique_sql +test: auto_explain test: shutdown # List/Hash table exchange @@ -198,10 +234,14 @@ test: hw_partition_hash_exchange # List/Hash table truncate test: hw_partition_list_truncate hw_partition_hash_truncate +# add/drop partition +test: hw_partition_add_drop_partition + +#test: hw_partition_start_end # To check min_max fuc support IP test: min_max_support_IP -# interval partition +# interval partition test: hw_partition_interval test: hw_partition_interval_exchange test: hw_partition_interval_index @@ -335,7 +375,7 @@ test: upsert_prepare test: upsert_001 upsert_002 upsert_003 upsert_008 upsert_009 upsert_010 test: upsert_grammer_test_01 upsert_unlog_test upsert_tmp_test test: upsert_grammer_test_02 upsert_restriction upsert_composite -test: upsert_trigger_test upsert_explain +test: upsert_trigger_test upsert_explain upsert_where upsert_where_sublink test: upsert_subquery test: upsert_clean @@ -354,11 +394,10 @@ test: hw_account_lock test: hw_interval_format hw_function_p_3 hw_function_p_4 hw_current_schema hw_functions #test: hw_function_p_1 hw_function_p_2# test: hw_dba_enable_partition hw_tablespace -test: hw_procedure_define +test: hw_procedure_define #test: hw_anonymous_block #test: hw_procedure# -test: hw_grant_all hw_func_return_out -#test: hw_dynamic_sql +test: hw_grant_all hw_dynamic_sql hw_func_return_out test: hw_package_function #show plan @@ -379,9 +418,10 @@ test: tpch01 tpch03 tpch04 libcomm_check_status tpch03_querymem test: tpch05 tpch06 tpch07 tpch08 test: tpch09 tpch10 tpch11 tpch12 test: tpch13 tpch14 tpch15 tpch16 +#test: tpch_vector_optimal test: tpch18 tpch19 tpch20 tpch18_querymem -test: tpch21 tpch22 tpch11_pretty_performance -#test: tpch02 tpch17 +test: tpch21 tpch22 tpch11_pretty_performance vector_procedure +#test: tpch02 tpch17 #test export test: temp__2 @@ -411,10 +451,9 @@ test: xc_misc # Those ones can be run in parallel test: xc_groupby xc_distkey xc_having #test: hw_rewrite_lazyagg hw_light -test: xc_temp xc_FQS +test: xc_temp xc_FQS #test: xc_remote hw_pbe -#test: hw_pbe -test: xc_FQS_join xc_copy +test: xc_FQS_join xc_copy #test: xc_alter_table test: xc_constraints xc_limit xc_sort #test: xc_params xc_returning_step1 @@ -433,7 +472,7 @@ test: xc_dml # test cases for CStore # --------------------------- #test: hw_cstore_alter cstore_alter_table2 cstore_alter_table3 cstore_alter_table4 cstore_alter_table5 cstore_alter_table6 cstore_alter_table8 cstore_alter_table9 cstore_alter_table10 hw_cstore_copy hw_alter_table_instant hw_cstore_copy1 -#test: cstore_alter_table cstore_alter_table1 cstore_alter_table7 +#test: cstore_alter_table cstore_alter_table1 cstore_alter_table7 test: hw_cstore_tablespace hw_cstore_truncate hw_cstore_update #test: hw_cstore_roughcheck @@ -451,7 +490,7 @@ test: hw_cstore_vacuum test: hw_cstore_insert hw_cstore_delete hw_cstore_unsupport # test on extended statistics -test: hw_es_multi_column_stats_prepare +test: hw_es_multi_column_stats_prepare hw_es_multi_column_stats_eqclass test: hw_es_multi_column_stats_1 hw_es_multi_column_stats_1_1 hw_es_multi_column_stats_1_2 hw_es_multi_column_stats_1_3 hw_es_multi_column_stats_1_4 hw_es_multi_column_stats_1_5 hw_es_multi_column_stats_1_6 hw_es_multi_column_stats_2_1 hw_es_multi_column_stats_2_2 hw_es_multi_column_stats_2_3 hw_es_multi_column_stats_3 hw_es_multi_column_stats_3_1 hw_es_multi_column_stats_3_2 test: hw_es_multi_column_stats_end @@ -462,16 +501,16 @@ test: unsupported_features statistic statistic_2 test: hw_setop_writefile -test: vec_nestloop_pre vec_mergejoin_prepare vec_result vec_limit vec_mergejoin_1 vec_mergejoin_2 vec_stream +test: vec_nestloop_pre vec_mergejoin_prepare vec_result vec_limit vec_mergejoin_1 vec_mergejoin_2 vec_stream force_vector_engine force_vector_engine2 test: vec_mergejoin_inner vec_mergejoin_left vec_mergejoin_semi vec_mergejoin_anti llvm_vecexpr1 llvm_vecexpr2 llvm_vecexpr3 llvm_target_expr llvm_target_expr2 llvm_target_expr3 llvm_vecexpr_td #test: vec_nestloop1 -test: vec_mergejoin_aggregation llvm_vecagg llvm_vecagg2 llvm_vecagg3 llvm_vechashjoin +test: vec_mergejoin_aggregation llvm_vecagg llvm_vecagg2 llvm_vecagg3 llvm_vechashjoin vector_subpartition #test: vec_nestloop_end # ----------$ # The first group of parallel tests$ # ----------$ -test: boolean name oid bit txid uuid +test: boolean name oid bit txid uuid numeric_hide_tailing_zero rawlike #test: float8 numeric char varchar text int2 int4 float4 numeric_2 money # Depends on things setup during char, varchar and text @@ -498,8 +537,7 @@ test: interval tinterval macaddr tstypes comments # execute two copy tests parallel, to check that copy itself # is concurrent safe.(duplicate) # ---------- -test: copyselect copy_error_log copy_support_transform -#test: copy_from_support_parallel +test: copyselect copy_error_log copy_support_transform copy_from_support_parallel #test: copy_eol # ---------- @@ -513,7 +551,7 @@ test: copyselect copy_error_log copy_support_transform # Another group of parallel tests # ---------- test: create_function_3 vacuum -#test: drop_if_exists +test: drop_if_exists #test: constraints #test: errors subplan_base @@ -538,7 +576,7 @@ test: alter_table_000 alter_table_002 alter_table_003 #test: hw_sec_account_lock_unlock rowlevelsecurity test: resolve_unknown test: query_rewrite -test: create_schema +test: create_schema #test: view_dump test: hw_function_p_3 hw_function_p_4 #test: hw_function_p_2 @@ -546,10 +584,11 @@ test: hw_function_p_3 hw_function_p_4 test: create_c_function test: cstore_replication_table_delete -test: hw_cursor_part1 hw_cursor_part2 hw_cursor_part3 hw_cursor_part4 hw_cursor_part7 hw_cursor_part8 hw_cursor_part5 hw_cursor_part6 +test: hw_cursor_part1 hw_cursor_part2 hw_cursor_part3 hw_cursor_part4 hw_cursor_part5 hw_cursor_part6 hw_cursor_part7 hw_cursor_part8 test: vec_append_part1 vec_append_part2 vec_append_part3 test: vec_cursor_part1 vec_cursor_part2 test: vec_delete_part1 vec_delete_part2 +test: vec_set_func test: alter_schema_db_rename_seq @@ -576,8 +615,7 @@ test: matview_single # ---------- # Another group of parallel tests # ---------- -test: hll_hash hll_func hll_para hll_mpp hll_cstore -#test: hll_misc +test: hll_hash hll_func hll_para hll_mpp hll_cstore hll_misc test: function_get_table_def @@ -593,6 +631,9 @@ test: hw_order test: hw_pwd_reuse #test: hw_auditadmin +test: hw_audit_toughness +test: hw_audit_detailinfo + test: performance_enhance test: explain_fqs #test: explain_pbe @@ -610,6 +651,21 @@ test: sp_set_policy_plugin_disable # so keep this parallel group to at most 19 tests # ---------- #test: plpgsql +test: select_where_func +test: arrayinterface_single +test: plpgsql_table_opengauss +test: plpgsql_assign_value_to_array_attribute +test: plpgsql_array_of_record +test: arrayinterface_indexby +test: arrayinterface_ted +test: plpgsql_inout_param +test: plpgsql_cursor_rowtype +test: plpgsql_assign_list +test: plpgsql_package_type plpgsql_package_param +test: plpgsql_record_attrname +test: plpgsql_insert_record +test: hw_package_variable +test: autonomous_cursor #test: plancache limit rangefuncs prepare test: returning largeobject test: hw_explain_pretty1 hw_explain_pretty2 hw_explain_pretty3 @@ -619,14 +675,14 @@ test: tsdb_delta2_compress test: tsdb_xor_compress #test: tsdb_aggregate + test: readline test: hw_to_timestamp hw_view_privilege test: hw_identifier #test: hw_hashint1 hw_smalldatetime_hash hw_rawtype_hash #test: hw_nvarchar2_hash cmpr_smallint cmpr_prefix_150left cmpr_uint32_oid -test: opr_sanity_2 regex -#test: oidjoins +test: oidjoins opr_sanity_2 regex #test: opr_sanity_1 test: pmk @@ -650,6 +706,7 @@ test: col_function_1 col_function_2 col_count_distinct_1 col_count_distinct_2 co test: directory_test test: analyse_verify test: create_compositetype +test: hw_pct_type_and_rowtype #test: create_basetype #test: tabletype #test with recursive @@ -707,6 +764,7 @@ test: gin_test1 gin_test2 gin_test3 #========================================================================================================================================== # privilege test test: predefined_roles +test: gs_db_privilege # ---------- # src/test/regress/parallel_schedule.33 @@ -766,7 +824,6 @@ test: udf_crem create_c_function # procedure, Function Test #test: create_procedure postgres_fdw -#test: create_procedure test: create_function test: pg_compatibility @@ -774,12 +831,11 @@ test: pg_compatibility # test: gs_basebackup # autonomous transaction Test -#test: autonomous_test #test jdbc pbe for bypass test: bypass_pbe #test: partition for hash list -test: pbe_hash_list_partition +test: pbe_hash_list_partition test: hw_partition_list_insert test: hw_partition_list_ddl test: hw_partition_hash_insert @@ -788,28 +844,14 @@ test: hw_partition_hash_dml test: hw_partition_hash_dql test: hw_partition_list_dml test: hw_partition_list_dql -#test: hw_cipher_sm4 +test: hw_cipher_sm4 test: hw_cipher_aes128 test: hw_pwd_encryption_sm3 -# ---------- -# timecapsule -# ---------- -test: timecapsule_recyclebin_test_9 -test: timecapsule_recyclebin_test_7 -test: timecapsule_recyclebin_test_8 -test: timecapsule_version_test_1 -test: timecapsule_version_test_2 -test: timecapsule_version_test_3 -test: timecapsule_version_test_4 -test: timecapsule_version_test_5 -test: timecapsule_version_test_6 -test: timecapsule_version_test_7 test: rule_test #delete limit test: delete_limit -#test: hotpatch # -------------------------- # DB4AI @@ -818,7 +860,11 @@ test: db4ai_snapshots test: db4ai_gd_train_predict test: db4ai_gd_houses test: db4ai_gd_snapshots +test: db4ai_gd_pca_train_predict test: db4ai_kmeans_train_predict +test: db4ai_xgboost_train_predict + +test: db4ai_explain_model test: leaky_function_operator @@ -831,6 +877,8 @@ test: smp test: cstore_unique_index +test: cast_privileges_test + #generated column test test: generated_col @@ -840,17 +888,22 @@ test: ledger_table_case # store and restore comment on ora function and procedure test: comment_proc test: hw_package -test: procedure_privilege_test -test: cast_privileges_test -#test: hw_dbms_sql1 -#test: hw_cipher_sm4 +test: hw_cipher_sm4 test: hw_cipher_aes128 test: sequence_cache_test test: pg_buffercache_pages +test: procedure_privilege_test + + +test: toomanyparams test: test_astore_multixact -test: row_compression/pg_table_size row_compression/unsupported_feature row_compression/normal_test -#test: row_compression/pg_tablespace_size -# publication and subscription -test: publication subscription +test: component_view_enhancements single_node_user_mapping + +# publication and subscription, we need to record audit log for them, so seperate them into two test group +test: publication +test: subscription +test: fdw_audit +test: gs_global_config_audit test: detail + diff --git a/src/test/regress/parallel_schedule037 b/src/test/regress/parallel_schedule037 index 8cd704618..407c32a44 100644 --- a/src/test/regress/parallel_schedule037 +++ b/src/test/regress/parallel_schedule037 @@ -1,4 +1,18 @@ # ustore tests +#test: test_ustore_undo_view +#test: test_ustore_undozone + +test: ustore_subpartition_createtable +test: ustore_subpartition_scan +test: ustore_subpartition_select +test: ustore_subpartition_split +test: ustore_subpartition_truncate +test: ustore_subpartition_update +test: ustore_subpartition_gpi +test: ustore_subpartition_alter_table +test: ustore_subpartition_add_drop_partition +test: ustore_subpartition_tablespace +test: ustore_subpartition_ddl_index test: test_ustore_simple_insert test: test_ustore_simple_delete @@ -12,11 +26,13 @@ test: test_ustore_concurrent_delete_rollback test: test_inplace_tpd_rollback_delete test: test_ustore_concurrent_delete_update_2 test: test_ustore_concurrent_update +test: test_ustore_index_including test: test_ustore_index -test: test_ustore_multi_insert +test: test_ustore_index_cache_rightpage +#test: test_ustore_multi_insert test: test_ustore_repeatable_read test: test_ustore_insert_update -test: test_ustore_insert_select +#test: test_ustore_insert_select test: test_ustore_toast test: test_ustore_union @@ -34,16 +50,16 @@ test: test_ustore_orderby_3 test: test_ustore_groupby_1 test: test_ustore_groupby_2 test: test_ustore_groupby_3 -test: test_ustore_insert_select_new +#test: test_ustore_insert_select_new test: test_unione_insert_multi_col test: test_unione_update_multi_col test: test_unione_delete_multi_col test: test_unione_select_multi_col -test: test_unione_insert_select_multi_col +#test: test_unione_insert_select_multi_col test: test_unione_insert_single_col -test: test_unione_insert_select_single_col +#test: test_unione_insert_select_single_col test: test_unione_delete_single_col test: test_unione_update_single_col @@ -61,30 +77,34 @@ test: test_ustore_subtrans_selectforshare #test: test_ustore_multixact test: test_ustore_concurrent_selectforshare -test: test_unione_mix_tables_1 -test: test_unione_mix_tables_2 -test: test_unione_mix_tables_3 -test: test_unione_mix_tables_4 -test: test_unione_mix_tables_5 -test: test_unione_mix_tables_6 -test: test_unione_mix_tables_7 +#test: test_unione_mix_tables_1 +#test: test_unione_mix_tables_2 +#test: test_unione_mix_tables_3 +#test: test_unione_mix_tables_4 +#test: test_unione_mix_tables_5 +#test: test_unione_mix_tables_6 +#test: test_unione_mix_tables_7 -test: insert_update_001_ustore +#test: insert_update_001_ustore test: insert_update_002_ustore test: insert_update_003_ustore test: insert_update_004_ustore -test: insert_update_008_ustore +#test: insert_update_008_ustore test: ustore_trigger ustore_fk ustore_pt_fk -test: ustore_matview +#test: ustore_matview test: test_ustore_tdslot_deadlock_detector #test: timecapsule_version_ustore_test_1 -#test: timecapsule_version_ustore_test_2 +test: timecapsule_version_ustore_test_2 test: timecapsule_version_ustore_test_3 -#test: timecapsule_version_ustore_test_4 +test: timecapsule_version_ustore_test_4 test: timecapsule_version_ustore_test_5 test: timecapsule_version_ustore_test_6 +test: timecapsule_partition_ustore_test_1 +test: timecapsule_partition_ustore_test_2 +test: ustore_hw_partition_list_insert + +test: component_view_enhancements -test: ustore_hw_partition_list_insert \ No newline at end of file diff --git a/src/test/regress/parallel_schedule0A b/src/test/regress/parallel_schedule0A index 4ea6ae0f7..dd2c353bd 100644 --- a/src/test/regress/parallel_schedule0A +++ b/src/test/regress/parallel_schedule0A @@ -5,12 +5,10 @@ # this limits the number of connections needed to run the tests. # ---------- -# test nlssort_pinyin -test: nlssort_pinyin +test: hw_subpartition_createtable hw_subpartition_scan hw_subpartition_select hw_subpartition_split hw_subpartition_truncate hw_subpartition_update hw_subpartition_gpi hw_subpartition_alter_table hw_subpartition_index hw_subpartition_add_drop_partition hw_subpartition_tablespace hw_subpartition_ddl_index +test: hw_subpartition_vacuum_partition -# test replace func and trunc func -test: replace_func_with_two_args -test: trunc_func_for_date +test: replace_func_with_two_args trunc_func_for_date updatable_views test: analyze_commands #test: single_node_job @@ -165,7 +163,7 @@ test: single_node_sequence # run and check forbidden functions are still forbidden to use in single node test: single_node_forbidden -test: single_node_mergeinto +test: single_node_mergeinto merge_subquery merge_subquery3 # Trigger tests test: single_node_triggers @@ -247,11 +245,13 @@ test: hw_createtbl_llt test: sqlLLT #test: hw_sql_llt# +test: upsert_where + #test: upsert_prepare #test: upsert_001 upsert_002 upsert_003 upsert_008 upsert_009 upsert_010 #test: upsert_grammer_test_01 upsert_unlog_test upsert_tmp_test #test: upsert_grammer_test_02 upsert_restriction upsert_composite -#test: upsert_trigger_test upsert_explain +#test: upsert_trigger_test upsert_explain upsert_where upsert_where_sublink #test: upsert_clean # all pass @@ -294,6 +294,7 @@ test: tpch01 tpch03 tpch04 libcomm_check_status tpch03_querymem test: tpch05 tpch06 tpch07 tpch08 test: tpch09 tpch10 tpch11 tpch12 test: tpch13 tpch14 tpch15 tpch16 +#test: tpch_vector_optimal test: tpch18 tpch19 tpch20 tpch18_querymem test: tpch21 tpch22 tpch11_pretty_performance #test: tpch02 tpch17 @@ -387,17 +388,20 @@ test: temp__3 # so keep this parallel group to at most 19 tests # ---------- #test: plpgsql -test: plpgsql_array_opengauss plpgsql_table_opengauss plpgsql_multiset arrayinterface_single +test: plpgsql_multiset +test: plpgsql_array_opengauss +test: plpgsql_table_opengauss +test: arrayinterface_single #arrayinterface_ted #test: plancache limit rangefuncs prepare -test: plpgsql_array_opengauss arrayinterface_single # arrayinterface_ted -test: plpgsql_assign_value_to_array_attribute plpgsql_cursor_rowtype +test: plpgsql_assign_value_to_array_attribute +test: plpgsql_cursor_rowtype test: plpgsql_array_of_record test: plpgsql_assign_list test: plpgsql_package_type test: plpgsql_record_attrname -test: hw_package_valriable +test: hw_package_variable test: returning largeobject test: hw_explain_pretty1 hw_explain_pretty2 hw_explain_pretty3 test: goto @@ -549,5 +553,9 @@ test: llvm_vecsort llvm_vecsort2 test: udf_crem create_c_function -test: smp +test: smp single_node_user_mapping # test: hw_package +# publication and subscription, we need to record audit log for them, so seperate them into two test group +test: publication +test: subscription +test: fdw_audit diff --git a/src/test/regress/parallel_schedule0B b/src/test/regress/parallel_schedule0B index 70dee157a..5b1959242 100644 --- a/src/test/regress/parallel_schedule0B +++ b/src/test/regress/parallel_schedule0B @@ -78,7 +78,7 @@ test: gpi_clean_wait # gpi vacuum test: gpi_vacuum_lazy test: gpi_hw_partition_vacuum_full -test: gpi_hw_partition_vacuum_full_01 +#test: gpi_hw_partition_vacuum_full_01 # gpi alter test: gpi_alter_partition @@ -114,7 +114,7 @@ test: hw_cstore_vacuum test: hw_cstore_insert hw_cstore_delete hw_cstore_unsupport # test on extended statistics -test: hw_es_multi_column_stats_prepare +test: hw_es_multi_column_stats_prepare hw_es_multi_column_stats_eqclass test: hw_es_multi_column_stats_1 hw_es_multi_column_stats_1_1 hw_es_multi_column_stats_1_2 hw_es_multi_column_stats_1_3 hw_es_multi_column_stats_1_4 hw_es_multi_column_stats_1_5 hw_es_multi_column_stats_1_6 hw_es_multi_column_stats_2_1 hw_es_multi_column_stats_2_2 hw_es_multi_column_stats_2_3 hw_es_multi_column_stats_3 hw_es_multi_column_stats_3_1 hw_es_multi_column_stats_3_2 test: hw_es_multi_column_stats_end @@ -125,10 +125,10 @@ test: unsupported_features statistic statistic_2 test: hw_setop_writefile -test: vec_nestloop_pre vec_mergejoin_prepare vec_result vec_limit vec_mergejoin_1 vec_mergejoin_2 vec_stream +test: vec_nestloop_pre vec_mergejoin_prepare vec_result vec_limit vec_mergejoin_1 vec_mergejoin_2 vec_stream force_vector_engine force_vector_engine2 test: vec_mergejoin_inner vec_mergejoin_left vec_mergejoin_semi vec_mergejoin_anti llvm_vecexpr1 llvm_vecexpr2 llvm_vecexpr3 llvm_target_expr llvm_target_expr2 llvm_target_expr3 llvm_vecexpr_td #test: vec_nestloop1 -test: vec_mergejoin_aggregation llvm_vecagg llvm_vecagg2 llvm_vecagg3 llvm_vechashjoin +test: vec_mergejoin_aggregation llvm_vecagg llvm_vecagg2 llvm_vecagg3 llvm_vechashjoin vector_subpartition #test: vec_nestloop_end # ----------$ @@ -241,7 +241,7 @@ test: hw_partition_hash_dml test: hw_partition_hash_dql test: hw_partition_list_dml test: hw_partition_list_dql -#test: hw_cipher_sm4 +test: hw_cipher_sm4 test: hw_cipher_aes128 test: hw_pwd_encryption_sm3 test: cstore_unique_index diff --git a/src/test/regress/parallel_schedule0C b/src/test/regress/parallel_schedule0C index 3b1de597f..3f8411f72 100644 --- a/src/test/regress/parallel_schedule0C +++ b/src/test/regress/parallel_schedule0C @@ -1,9 +1,9 @@ -#test: sqlldr/load_to_copy_basic +test: sqlldr/load_to_copy_basic test: sqlldr/load_to_table_basic -#test: sqlldr/load_to_table_column -#test: sqlldr/gs_loader_basic -#test: sqlldr/gs_loader_parameter -#test: sqlldr/gs_loader_column -#test: sqlldr/gs_loader_multi_data -#test: sqlldr/gs_loader_issues -#test: sqlldr/gs_loader_column_filler +test: sqlldr/load_to_table_column +test: sqlldr/gs_loader_basic +test: sqlldr/gs_loader_parameter +test: sqlldr/gs_loader_column +test: sqlldr/gs_loader_multi_data +test: sqlldr/gs_loader_issues +test: sqlldr/gs_loader_column_filler \ No newline at end of file diff --git a/src/test/regress/parallel_schedule0U b/src/test/regress/parallel_schedule0U new file mode 100644 index 000000000..fe1e4f0a9 --- /dev/null +++ b/src/test/regress/parallel_schedule0U @@ -0,0 +1,555 @@ +# ---------- +# src/test/regress/parallel_schedule0A +# +# By convention, we put no more than twenty tests in any one parallel group; +# this limits the number of connections needed to run the tests. +# ---------- + +# test nlssort_pinyin +test: nlssort_pinyin + +# test replace func and trunc func +test: replace_func_with_two_args +test: trunc_func_for_date + +test: analyze_commands +#test: single_node_job +test: single_node_ddl +test: single_node_sqlbypass +test: median + + +# run tablespace by itself, and first, because it forces a checkpoint; +# we'd prefer not to have checkpoints later in the tests because that +# interferes with crash-recovery testing. +test: single_node_tablespace + +test: large_sequence int16 gs_dump_sequence + +# ---------- +# The first group of parallel tests +# ---------- +test: single_node_boolean single_node_char single_node_name single_node_varchar single_node_text single_node_int2 single_node_int4 single_node_int8 single_node_oid single_node_float4 single_node_float8 single_node_bit single_node_numeric single_node_txid single_node_uuid single_node_enum single_node_money + +# Depends on things setup during char, varchar and text +#test: single_node_strings +# Depends on int2, int4, int8, float4, float8 +test: single_node_numerology + +# ---------- +# The second group of parallel tests +# ---------- +test: single_node_point single_node_lseg single_node_box single_node_path single_node_polygon single_node_circle single_node_date single_node_time single_node_timetz single_node_timestamp single_node_timestamptz +#test: single_node_interval +test: single_node_abstime single_node_reltime +#test: single_node_tinterval +test: single_node_inet single_node_macaddr single_node_tstypes single_node_comments + +# ---------- +# Another group of parallel tests +# geometry depends on point, lseg, box, path, polygon and circle +# horology depends on interval, timetz, timestamp, timestamptz, reltime and abstime +# ---------- +#test: single_node_geometry single_node_horology +#test: single_node_regex +test: single_node_regex_temp + +# ---------- +# These four each depend on the previous one +# ---------- +test: single_node_insert xc_rownum +test: single_node_temple +test: single_node_create_function_1 +#test: single_node_create_type +#test: single_node_create_table +#test: single_node_create_function_2 + +# ---------- +# Load huge amounts of data +# We should split the data files into single files and then +# execute two copy tests parallel, to check that copy itself +# is concurrent safe. +# ---------- +#test: single_node_copy single_node_copyselect + +# ---------- +# More groups of parallel tests +# ---------- +#test: single_node_create_misc +#test: single_node_create_operator +# These depend on the above two +#test: single_node_create_index +#test: single_node_create_view + +# ---------- +# Another group of parallel tests +# ---------- +test: single_node_create_aggregate +#test: single_node_create_function_3 single_node_create_cast +#test: single_node_constraints single_node_triggers single_node_inherit single_node_create_table_like single_node_typed_table +test: single_node_vacuum +#test: single_node_drop_if_exists + +# ---------- +# sanity_check does a vacuum, affecting the sort order of SELECT * +# results. So it should not run parallel to other tests. +# ---------- +#test: single_node_sanity_check + +# ---------- +# Believe it or not, select creates a table, subsequent +# tests need. +# ---------- +test: single_node_errors +#test: single_node_select +ignore: single_node_random + +# ---------- +# Another group of parallel tests +# ---------- +#test: single_node_select_into single_node_select_distinct +#test: single_node_select_distinct_on single_node_select_implicit single_node_select_having +test: single_node_select_implicit single_node_select_having +#test: single_node_subselect +test: single_node_union +#test: single_node_case single_node_join single_node_aggregates +#test: single_node_transactions +test: single_node_random +#test: single_node_portals +#test: single_node_arrays +#test: single_node_btree_index single_node_hash_index single_node_update +test: single_node_update +#test single_node_namespace +#test: single_node_prepared_xacts +#test: single_node_delete + +# ---------- +# Another group of parallel tests +# ---------- +#test: single_node_privileges +#test: single_node_security_label single_node_collate + +#test: single_node_misc +# rules cannot run concurrently with any test that creates a view +#test: single_node_rules + +# ---------- +# Another group of parallel tests +# ---------- +#test: single_node_select_views +#test: single_node_portals_p2 +test: single_node_foreign_key +#test: single_node_foreign_key single_node_cluster single_node_dependency +#test: single_node_guc +test: single_node_bitmapops single_node_combocid +#test: single_node_tsearch +#test: single_node_tsdicts +#test: single_node_foreign_data +#single_node_window +#test: single_node_xmlmap +#test: single_node_functional_deps single_node_advisory_lock single_node_json single_node_equivclass + +# ---------- +# Another group of parallel tests +# NB: temp.sql does a reconnect which transiently uses 2 connections, +# so keep this parallel group to at most 19 tests +# ---------- +test: single_node_sequence +#test: single_node_plancache single_node_limit single_node_plpgsql single_node_copy2 single_node_temp single_node_domain single_node_rangefuncs single_node_prepare single_node_without_oid single_node_conversion single_node_truncate single_node_alter_table single_node_sequence single_node_polymorphism +#test: single_node_rowtypes +#test: single_node_returning single_node_largeobject single_node_with single_node_xml + +# run stats by itself because its delay may be insufficient under heavy load +#test: single_node_stats + +# run and check forbidden functions are still forbidden to use in single node +test: single_node_forbidden +test: single_node_mergeinto + +# Trigger tests +test: single_node_triggers +#test: single_node_xc_trigship + +# Synonym tests +#test: single_node_synonym + +# unsupported view tests +test: single_node_unsupported_view +#test: hw_cstore + +# ---------- +# single_node_commit/rollback tests +# ---------- +# interval partition +test: hw_partition_interval_index +test: hw_partition_interval_select +test: hw_partition_interval_compatibility +#openGauss synchronization test cases +test: partiton_pathkey_col_plan partiton_pathkey_col_randomexec partiton_pathkey_row_plan partiton_pathkey_row_randomexec +#test the locales setting expect not affacted each other +#test: pg_session_locale +# ---------- +# These four each depend on the previous one(duplicate) +# duplicated create_function_1 create_type create_table copy +# ---------- +#test: type_sanity +#test: create_function_1 +test: create_table +test: temp__4 +#test: copy# + +# ---------- +# More groups of parallel tests +# duplicated create_misc +# ---------- +#test: hw_hashagg_start +test: create_misc +test: create_view1 create_view2 create_view3 create_view4 create_view5 +#test: int8# + +#dupliacated select int8 +#test: select +#test: misc +#test: stats +#test: alter_system_set + +# test for hll +test: hll_hash hll_func hll_para hll_mpp hll_cstore hll_misc + +# test for function pg_get_tabledef +test: function_get_table_def + +#dispatch from 13 +test: function +test: aggregates_part1 aggregates_part2 aggregates_part3 count_distinct_part1 count_distinct_part2 count_distinct_part4 +#test: count_distinct_part3# + +test: hw_dfx_thread_status + +test: stable_function_shippable +# ---------- +# data partition +# ---------- +test: physical_slot + +test: hw_smp + +# test MERGE INTO + +# test INSERT UPDATE UPSERT +#test: insert_update_002 insert_update_003 insert_update_008 insert_update_009 insert_update_010 +#test: insert_update_001# +test: delete update namespace case select_having select_implicit +test: hw_test_operate_user +test: hw_createtbl_llt +#test: gsqlerr# +test: sqlLLT +#test: hw_sql_llt# + +#test: upsert_prepare +#test: upsert_001 upsert_002 upsert_003 upsert_008 upsert_009 upsert_010 +#test: upsert_grammer_test_01 upsert_unlog_test upsert_tmp_test +#test: upsert_grammer_test_02 upsert_restriction upsert_composite +#test: upsert_trigger_test upsert_explain +#test: upsert_clean + +# all pass +# run tablespace by itself, and first, because it forces a checkpoint; +# we'd prefer not to have checkpoints later in the tests because that +# interferes with crash-recovery testing. +test: hw_alter_session +test: tablespace +test: hw_account_lock + +# ---------- +# Another group of parallel tests +# ---------- +#test: hw_independent_user hw_user_basic hw_user_revoke hw_user_privilege hw_user_pguser hw_user_namespace +test: hw_interval_format hw_function_p_3 hw_function_p_4 hw_current_schema hw_functions +#test: hw_function_p_1 hw_function_p_2# +test: hw_dba_enable_partition hw_tablespace +test: hw_procedure_define +#test: hw_anonymous_block +#test: hw_procedure# +test: hw_grant_all hw_func_return_out +#hw_dynamic_sql +test: hw_package_function + +#show plan +#test: plan_hint + +###split from parallel_schedule4### + +# ---------- +# Another group of parallel tests +# ---------- +# plsql_packages tests + +test: hw_empty_str_to_null +test: hw_schema + +test: tpchrush +test: tpch01 tpch03 tpch04 libcomm_check_status tpch03_querymem +test: tpch05 tpch06 tpch07 tpch08 +test: tpch09 tpch10 tpch11 tpch12 +test: tpch13 tpch14 tpch15 tpch16 +test: tpch18 tpch19 tpch20 tpch18_querymem +test: tpch21 tpch22 tpch11_pretty_performance +#test: tpch02 tpch17 + +#test export +test: temp__2 + +test: vec_prepare_001 vec_prepare_002 +test: vec_prepare_003 + +#test sort optimize +test: sort_optimize_row sort_optimize_column sort_optimize_001 +#test early free +test: early_free +#test for col tpch with vector engine disabled +test: tpch_disablevec01 tpch_disablevec03 tpch_disablevec04 +test: tpch_disablevec05 tpch_disablevec06 tpch_disablevec07 +test: tpch_disablevec08 tpch_disablevec09 tpch_disablevec12 +test: tpch_disablevec13 tpch_disablevec14 tpch_disablevec16 +test: tpch_disablevec18 tpch_disablevec19 tpch_disablevec21 + +# ---------- +# Postgres-XC additional tests +# ---------- + +# This was used by triggers +test: xc_create_function +# Now xc_misc is used by xc_returning_step1 and xc_returning_step2 +test: xc_misc +# Those ones can be run in parallel +test: xc_groupby xc_distkey xc_having +#test: hw_rewrite_lazyagg hw_light +test: xc_temp xc_FQS +#test: xc_remote hw_pbe +test: xc_FQS_join xc_copy +#test: xc_alter_table +test: xc_constraints xc_limit xc_sort +#test: xc_params xc_returning_step1 +test: xc_params +test: xc_returning_step2 + +#test row compress +#test: compress compress01 compress02 cmpr_toast_000 cmpr_toast_update cmpr_index_00 cmpr_6bytes cmpr_int cmpr_datetime cmpr_numstr cmpr_numstr01 cmpr_float cmpr_nulls_delta cmpr_nulls_prefix cmpr_copyto cmpr_mode_none00 cmpr_mode_none01 cmpr_references_00 cmpr_references_01 +#test: cmpr_rollback cmpr_drop_column cmpr_drop_column_01 cmpr_drop_column_02 cmpr_drop_column_03 cmpr_dead_loop_00 cmpr_timewithzone cmpr_cluster_00 + +# Cluster setting related test is independant + + +test: xc_dml + +# Postgres-XC : Removed this test from the parallel group of tests since it used to give inconsistent plan outputs. +#test: inherit +# ---------- +# Another group of parallel tests +# ---------- +test: create_function_3 vacuum +#test: constraints drop_if_exists + +#test: errors subplan_base +test: subplan_new +#test: select +test: col_subplan_new +#test: col_subplan_base_1 +#test: join +test: select_into subselect_part2 gs_aggregate +#test: select_distinct subselect_part1 transactions btree_index select_distinct_on arrays hash_index +#test: transactions_control random union +#test: aggregates +test: holdable_cursor +#test: portals_p2 window tsearch temp__6 col_subplan_base_2 + +test: alter_table_000 alter_table_002 alter_table_003 +#test: alter_table_001 + +#test: with + +# ---------- +# Database security +# ---------- +test: hw_pwd_reuse +#test: hw_auditadmin + +test: performance_enhance +test: explain_fqs +#test: explain_pbe +# temp__3 create_table copy vec_prepare_001 vec_prepare_002 vec_prepare_003 int4 int8 are duplicated +test: temp__3 +# ---------- +# Another group of parallel tests +# NB: temp.sql does a reconnect which transiently uses 2 connections, +# so keep this parallel group to at most 19 tests +# ---------- +#test: plpgsql +test: plpgsql_multiset +test: plpgsql_array_opengauss +test: plpgsql_table_opengauss +test: arrayinterface_single +#arrayinterface_ted +#test: plancache limit rangefuncs prepare +# arrayinterface_ted +test: plpgsql_assign_value_to_array_attribute +test: plpgsql_cursor_rowtype +test: plpgsql_array_of_record +test: plpgsql_assign_list +test: plpgsql_package_type +test: plpgsql_record_attrname +test: hw_package_variable +test: returning largeobject +test: hw_explain_pretty1 hw_explain_pretty2 hw_explain_pretty3 +test: goto +test: equivalence_class +test: tsdb_delta2_compress +test: tsdb_xor_compress +#test: tsdb_aggregate + +test: readline +test: hw_to_timestamp hw_view_privilege + +test: hw_identifier +#test: hw_hashint1 hw_smalldatetime_hash hw_rawtype_hash +#test: hw_nvarchar2_hash cmpr_smallint cmpr_prefix_150left cmpr_uint32_oid +test: oidjoins opr_sanity_2 regex +#test: opr_sanity_1 + +test: pmk +# Cluster setting related test is independant +# ---------- +# Test of changed data type compatible with Oracle + +test: hw_datatype_2 hw_datatype_3 +test: hw_datatype +test: test_regex llt_atc + +# ---------- +# test for set operations +# ---------- +test: select_nest_views +#test: enum +#show plan +test: col_joinplan col_joinnew +test: col_limit col_distinct col_prepare +test: col_function_1 col_function_2 col_count_distinct_1 col_count_distinct_2 col_count_distinct_3 col_count_distinct_4 +test: directory_test +test: analyse_verify +test: create_compositetype +test: hw_pct_type_and_rowtype +#test: create_basetype +#test: tabletype +#test with recursive +test: recursive_ref_recursive +#test: recursive_prepare +#test: recursive_cte +#test: recursive_cte_col +#test: nohashjoin_recursive_cte +#test: nohashjoin_recursive_cte_col +#test: others +#test: icbc_customer +#test: recursive_unshippable +#test: recursive_finalize +#test: recursive_cte_1 +test: test_relpages + +test: temp__3 +test: vec_window_pre +test: gin_test_2 +#test: window1 +test: vec_window_001 +#test: vec_window_002 +test: vec_numeric_sop_1 vec_numeric_sop_2 vec_numeric_sop_3 vec_numeric_sop_4 vec_numeric_sop_5 +#test: vec_window_end + +test: vec_unique_pre vec_bitmap_prepare +test: vec_unique vec_setop_001 vec_setop_002 vec_setop_003 vec_setop_004 hw_vec_int4 hw_vec_int8 hw_vec_float4 hw_vec_float8 +#test: vec_setop_005 +test: hw_vec_constrainst vec_numeric vec_numeric_1 vec_numeric_2 vec_bitmap_1 vec_bitmap_2 +test: disable_vector_engine +test: hybrid_row_column +test: retry +test: hw_replication_slots +test: insert +test: copy2 temp +test: truncate +#test: temp_table + +#test: b_compatibility +test: hw_compatibility +test: hw_groupingsets hw_row_grouping_set +test: char_truncation_common char_truncation_cast + +#this case is dispatched from schedule10(gin_test) +test: gin_test1 gin_test2 gin_test3 + +#the fallowing part is dispatched from schedule15 + +# FIXME: move me back to the parallel test when the refcnt issue is fixed +# Below two teste are unstable, temporarily ignoring. This is same to distribute_dattistic, relallvisible, Dongwang will solve the problem. + +#test: hw_expression_alias + + +#========================================================================================================================================== + + +# ---------- +# src/test/regress/parallel_schedule.33 +# +# By convention, we put no more than twenty tests in any one parallel group; +# this limits the number of connections needed to run the tests. +# ---------- + + +test: udf_crem + +test: create_c_function + +#---1. Drop-Column test +test: cstore_drop_column_replicated +#test: cstore_drop_column + +#split from parallel_schedule2 + +# ---------- +# Advisory lock need to be tested in series in Postgres-XC +# --------- +test: advisory_lock + +# ---------- +# Another group of parallel tests +# ---------- +test: cluster dependency bitmapops tsdicts functional_deps +test: json_and_jsonb json jsonb jsonb2 +#test: guc + +# test for vec sonic hash +test: vec_sonic_hashjoin_number_prepare +test: vec_sonic_hashjoin_number_nospill + +test: dml +test: hashfilter hashfilter_1 +test: reduce_orderby +#test: backtrace_log +#test: bulkload_start +test: bulkload_parallel_test_2 bulkload_parallel_test_3 +#test: bulkload_parallel_test_1 bulkload_parallel_test_4 + +test: tpchcol05 tpchcol07 tpchcol08 tpchcol09 + +test: tpchcol01 +test: tpchcol06 +test: tpchcol03 tpchcol04 +test: tpchcol12 tpchcol13 tpchcol14 tpchcol16 tpchcol18 tpchcol19 tpchcol21 + +test: vec_partition vec_partition_1 vec_material_001 + +test: llvm_vecsort llvm_vecsort2 + +test: udf_crem create_c_function + +test: smp +# test: hw_package diff --git a/src/test/regress/pg_regress.cpp b/src/test/regress/pg_regress.cpp index eeefd3c9a..e1fa1c1b0 100644 --- a/src/test/regress/pg_regress.cpp +++ b/src/test/regress/pg_regress.cpp @@ -107,6 +107,7 @@ char* bindir = PGBINDIR; char* libdir = LIBDIR; char* datadir = PGSHAREDIR; char* host_platform = HOST_TUPLE; +char* top_builddir = NULL; #ifndef WIN32_ONLY_COMPILER static char* makeprog = MAKEPROG; @@ -364,13 +365,11 @@ static char* encoding = NULL; static _stringlist* schedulelist = NULL; static int upgrade_cn_num = 1; static int upgrade_dn_num = 4; -static int gray_upgrade_step = 0; static _stringlist* upgrade_schedulelist = NULL; static _stringlist* extra_tests = NULL; static char* pcRegConfFile = NULL; static char* temp_install = NULL; static char* temp_config = NULL; -static char* top_builddir = NULL; static bool nolocale = false; static bool use_existing = false; static char* hostname = NULL; @@ -458,6 +457,13 @@ static bool passwd_altered = true; bool test_single_node = false; static char* platform = "euleros2.0_sp2_x86_64"; +/* client logic jdbc run regression tests */ +static bool use_jdbc_client = false; +static bool to_create_jdbc_user = false; +static bool is_skip_environment_cleanup = false; +static char* client_logic_hook = "encryption"; +static _stringlist* destination_files = NULL; + static bool directory_exists(const char* dir); static void make_directory(const char* dir); static void convertSourcefilesIn(char*, char*, char*, char*); @@ -474,6 +480,7 @@ static void regrCalcCpuUsagePct(const REGR_RESRC_STAT_STRU* pstCurrUsage, const double* pdUcpuUsagePct, double* pdScpuUsagePct); static void regrConvertSizeInBytesToReadableForm(unsigned long int ulValue, char* pcBuf, unsigned int uiBufSize); static void kill_node(int i, int type); +static void cleanup_environment(); static void header(const char* fmt, ...) /* This extension allows gcc to check the format string for consistency with @@ -513,7 +520,7 @@ static void setBinAndLibPath(bool); /* * allow core files if possible. */ -void restartPostmaster(); +void restartPostmaster(bool isOld); void checkProcInsert(); @@ -1172,38 +1179,23 @@ static void stop_postmaster(void) if (postmaster_running) { #ifdef PGXC - if (gray_upgrade_step == UPGRADE_GRAY_STAGE || gray_upgrade_step == UPGRADE_GRAY_STAGE_ROLLBACK) { - stop_node(0, COORD, false); - for (i = 1; i < upgrade_cn_num; i++) { - stop_node(i, COORD, false); - } - for (i = 0; i < upgrade_dn_num; i++) { - stop_node(i, DATANODE, standby_defined); - } - for (i = 0; i < myinfo.shell_count; i++) { - (void)kill(myinfo.shell_pid[i], SIGKILL); - } - } else { - /* - * It is necessary to stop first the Coordinator 1, - * Then other nodes are stopped nicely. - * This is due to connection dependencies between nodes. - */ - for (i = 0; i < myinfo.co_num; i++) { - stop_node(i, COORD, false); - } - for (i = 0; i < myinfo.dn_num; i++) { - stop_node(i, DATANODE, standby_defined); - } + /* + * stop cn\dn + */ + for (i = 0; i < myinfo.co_num; i++) { + stop_node(i, COORD, false); + } + for (i = 0; i < myinfo.dn_num; i++) { + stop_node(i, DATANODE, standby_defined); + } - for (i = 0; i < myinfo.shell_count; i++) { - (void)kill(myinfo.shell_pid[i], SIGKILL); - } + for (i = 0; i < myinfo.shell_count; i++) { + (void)kill(myinfo.shell_pid[i], SIGKILL); + } - if (!test_single_node) { - /* Stop GTM at the end */ - stop_gtm(); - } + if (!test_single_node) { + /* Stop GTM at the end */ + stop_gtm(); } #else @@ -1231,6 +1223,12 @@ static void stop_postmaster(void) } } +static void atexit_cleanup() +{ + stop_postmaster(); + cleanup_environment(); +} + #ifdef PGXC /* * Handy subroutine for setting an environment variable "var" to "val" @@ -1329,12 +1327,75 @@ static void start_single_node() char buf[MAXPGPATH * 4]; int port_number = myinfo.dn_port[0]; char* data_folder = get_node_info_name(0, DATANODE, false); + PID_TYPE guc_pid; + char guc_buf[MAXPGPATH * 4]; + char upgrade_from_str[MAXPGPATH] = {'\0'}; + + if (inplace_upgrade || grayscale_upgrade != -1) { + (void)snprintf(guc_buf, + sizeof(guc_buf), + SYSTEMQUOTE "\"%s/gs_guc\" set -Z datanode -D \"%s/%s\" -c \"%s\" > /dev/null 2>&1" SYSTEMQUOTE, + bindir, + temp_install, + data_folder, + upgrade_from == 0 ? "upgrade_mode=0" : grayscale_upgrade == -1 ? "upgrade_mode=1" : "upgrade_mode=2"); + guc_pid = spawn_process(guc_buf); + if (guc_pid == INVALID_PID) { + fprintf(stderr, _("\n%s: could not spawn GUC parameter: %s\n"), progname, strerror(errno)); + exit_nicely(2); + } + // check if guc parameter is set successfully + int result = 0; + int count = 0; + while (result != 1) { + count++; + char cmd[MAXPGPATH] = {'\0'}; +#ifdef BUILD_BY_CMAKE + (void)snprintf(cmd, + sizeof(cmd), + "find %s/%s/ -name \"postgresql.conf\" | xargs grep \"upgrade_mode = %s\" | wc -l", + temp_install, + data_folder, + upgrade_from == 0 ? "0" : grayscale_upgrade == -1 ? "1" : "2"); + +#else + (void)snprintf(cmd, + sizeof(cmd), + "sync; find ./tmp_check/%s/ -name \"postgresql.conf\" | xargs grep \"upgrade_mode = %s\" | wc -l", + data_folder, + upgrade_from == 0 ? "0" : grayscale_upgrade == -1 ? "1" : "2"); +#endif + + FILE* fstream = NULL; + char buf[1000]; + memset(buf, 0, sizeof(buf)); + fstream = popen(cmd, "r"); + if (NULL != fgets(buf, sizeof(buf), fstream)) { + result = atoi(buf); + } + if (result != 1) { + sleep(1); + } + if (count > 180) { + fprintf(stderr, _("\n%s: fail to set upgrade_mode GUC\n count %d"), progname, count); + exit(2); + } + if (NULL != fstream) { + pclose(fstream); + } + } + + if (upgrade_from) + (void)snprintf( + upgrade_from_str, sizeof(upgrade_from_str), "-u %d -c allow_system_table_mods=true", upgrade_from); + } (void)snprintf(buf, sizeof(buf), - SYSTEMQUOTE "\"%s/gaussdb\" %s %s -i -p %d -D \"%s/%s\"%s -c log_statement=all -c logging_collector=true -c " + SYSTEMQUOTE "\"%s/gaussdb\" %s %s %s -i -p %d -D \"%s/%s\"%s -c log_statement=all -c logging_collector=true -c " "\"listen_addresses=%s\" > \"%s/log/postmaster_%s.log\" 2>&1" SYSTEMQUOTE, bindir, + upgrade_from_str, "--single_node", (securitymode) ? "--securitymode" : " ", port_number, @@ -1344,7 +1405,7 @@ static void start_single_node() hostname ? hostname : "*", outputdir, data_folder); - + header( _("\nstart cmd is : %s\n"), buf); PID_TYPE datanode_pid = spawn_process(buf); if (datanode_pid == INVALID_PID) { fprintf(stderr, _("\n%s: could not spawn postmaster: %s\n"), progname, strerror(errno)); @@ -1471,6 +1532,7 @@ static void start_my_node(int i, int type, bool is_main, bool standby, int upgra outputdir, data_folder); } + header( _("\nstart cmd is : %s\n"), buf); node_pid = spawn_process(buf); if (node_pid == INVALID_PID) { fprintf(stderr, _("\n%s: could not spawn postmaster: %s\n"), progname, strerror(errno)); @@ -3030,6 +3092,7 @@ static void convertSourcefilesIn(char* pcSourceSubdir, char* pcDestDir, char* pc fprintf(stderr, _("%s: could not open file \"%s\" for writing: %s\n"), progname, destfile, strerror(errno)); exit_nicely(2); } + add_stringlist_item(&destination_files, destfile); while (fgets(line, sizeof(line), infile)) { if (!is_cmdline(line)) { for (iIter = 0; iIter < g_stRegrReplcPatt.iNumOfPatterns; iIter++) @@ -3051,7 +3114,7 @@ static void convertSourcefilesIn(char* pcSourceSubdir, char* pcDestDir, char* pc replace_string(line, "@coordinator1@", get_node_info_name(0, COORD, true)); replace_string(line, "@coordinator2@", get_node_info_name(1, COORD, true)); replace_string(line, "@pgbench_dir@", pgbenchdir); - + replace_string(line, "@client_logic_hook@", client_logic_hook); char* ptr = GetStartNodeCmdString(0, DATANODE); replace_string(line, "@start_datanode1@", ptr); free(ptr); @@ -3144,6 +3207,7 @@ static void convertSourcefilesIn(char* pcSourceSubdir, char* pcDestDir, char* pc /* Create the .sql and .out files from the .source files, if any */ static void convert_sourcefiles(void) { + errno_t rc = EOK; convertSourcefilesIn("input", inputdir, "sql", "sql"); convertSourcefilesIn("output", outputdir, "expected", "out"); @@ -3154,6 +3218,20 @@ static void convert_sourcefiles(void) if (g_stRegrReplcPatt.puiPatternOffset != NULL) REGR_FREE(g_stRegrReplcPatt.puiPatternOffset); + + /* + * backup the sql directory + */ + char buff[MAXPGPATH * 4] = {0}; + char src[MAXPGPATH] = {0}; + char dest[MAXPGPATH] = {0}; + rc = snprintf_s(src, sizeof(src), sizeof(src) - 1, "%s/sql", inputdir); + securec_check_ss_c(rc, "", ""); + rc = snprintf_s(dest, sizeof(dest), sizeof(dest) - 1, "%s/.sql", inputdir); + securec_check_ss_c(rc, "", ""); + rc = snprintf_s(buff, sizeof(buff), sizeof(buff) - 1, "cp -r %s %s", src, dest); + securec_check_ss_c(rc, "", ""); + system(buff); } /* @@ -3448,6 +3526,20 @@ static void initialize_environment(void) convert_sourcefiles(); load_resultmap(); } + +static void cleanup_environment() +{ + if (is_skip_environment_cleanup) { + return; + } + for (; destination_files != NULL; destination_files = destination_files->next) { + unlink(destination_files->str); + } + for (; destination_files != NULL; destination_files = destination_files->next) { + unlink(destination_files->str); + } +} + #define MAKEFILE_BUF_MORE_LEN 32 static void setBinAndLibPath(bool isOld) { @@ -4758,6 +4850,8 @@ static void run_schedule(const char* schedule, test_function tfunc, diag_functio status(_("system_table_ddl_test %-24s .... "), tests[0]); } else if (isPlanAndProto) { status(_("plan_proto_test %-24s .... "), tests[0]); + } else if (use_jdbc_client) { + status(_("jdbc test %-24s .... "), tests[0]); } else { status(_("test %-24s .... "), tests[0]); } @@ -4765,7 +4859,7 @@ static void run_schedule(const char* schedule, test_function tfunc, diag_functio REGR_START_TIMER_TEMP(0); - pids[0] = (tfunc)(tests[0], &resultfiles[0], &expectfiles[0], &tags[0]); + pids[0] = (tfunc)(tests[0], &resultfiles[0], &expectfiles[0], &tags[0], use_jdbc_client); } wait_for_tests(pids, statuses, NULL, 1); @@ -4789,7 +4883,7 @@ static void run_schedule(const char* schedule, test_function tfunc, diag_functio REGR_START_TIMER; /* Invoke the single test file */ - pids[i] = (tfunc)(tests[i], &resultfiles[i], &expectfiles[i], &tags[i]); + pids[i] = (tfunc)(tests[i], &resultfiles[i], &expectfiles[i], &tags[i], use_jdbc_client); i++; } @@ -4840,7 +4934,7 @@ static void run_schedule(const char* schedule, test_function tfunc, diag_functio REGR_START_TIMER_TEMP(i); - pids[i] = (tfunc)(tests[i], &resultfiles[i], &expectfiles[i], &tags[i]); + pids[i] = (tfunc)(tests[i], &resultfiles[i], &expectfiles[i], &tags[i], use_jdbc_client); i++; } @@ -4938,40 +5032,22 @@ static void run_schedule(const char* schedule, test_function tfunc, diag_functio status(_("%-18s"), "failed (ignored)"); fail_ignore_count++; } else if (isSystemTableDDL) { - if (gray_upgrade_step == 2) { - status(_("%-18s"), "ok (grayscale observe stage)"); - } else { - status(_("%-18s"), "ok (grayscale stage)"); - } + status(_("%-18s"), "ok (grayscale stage)"); success_count++; } else if (isPlanAndProto) { - if (gray_upgrade_step == 1) { - status(_("%-18s"), "ok (grayscale stage)"); - success_count++; - } else { - status(_("%-18s"), "FAILED (grayscale observe stage)"); - fail_count++; - } + status(_("%-18s"), "FAILED (grayscale observe stage)"); + fail_count++; } else { status(_("%-18s"), "FAILED"); fail_count++; } } else { if (isSystemTableDDL) { - if (gray_upgrade_step == 2) { - status(_("%-18s"), "FAILED (grayscale observe stage)"); - } else { - status(_("%-18s"), "FAILED (grayscale stage)"); - } + status(_("%-18s"), "FAILED (grayscale stage)"); fail_count++; } else if (isPlanAndProto) { - if (gray_upgrade_step == 2) { - status(_("%-18s"), "ok (grayscale observe stage)"); - success_count++; - } else { - status(_("%-18s"), "FAILED (grayscale stage)"); - fail_count++; - } + status(_("%-18s"), "FAILED (grayscale stage)"); + fail_count++; } else { status(_("%-18s"), "ok"); success_count++; @@ -5027,12 +5103,16 @@ static void run_single_test(const char* test, test_function tfunc, diag_function _stringlist *rl, *el, *tl; bool differ = false; - status(_("test %-24s .... "), test); + if (!use_jdbc_client) { + status(_("test %-24s .... "), test); + } else { + status(_("jdbc test %-24s .... "), test); + } makeNestedDirectory(test); REGR_START_TIMER; - pid = (tfunc)(test, &resultfiles, &expectfiles, &tags); + pid = (tfunc)(test, &resultfiles, &expectfiles, &tags, use_jdbc_client); wait_for_tests(&pid, &exit_status, NULL, 1); REGR_STOP_TIMER; @@ -5158,7 +5238,7 @@ static void check_global_variables() } } -#define BASE_PGXC_LIKE_MACRO_NUM 1414 +#define BASE_PGXC_LIKE_MACRO_NUM 1401 static void check_pgxc_like_macros() { #ifdef BUILD_BY_CMAKE @@ -5198,6 +5278,7 @@ static void open_result_files(void) { char file[MAXPGPATH]; FILE* difffile = NULL; + errno_t rc = EOK; /* create the log file (copy of running status output) */ (void)snprintf(file, sizeof(file), "%s/regression.out", outputdir); @@ -5209,7 +5290,14 @@ static void open_result_files(void) } /* create the diffs file as empty */ - (void)snprintf(file, sizeof(file), "%s/regression.diffs", outputdir); + if (to_create_jdbc_user) { + rc = snprintf_s(file, sizeof(file), sizeof(file) - 1, "%s/regression_jdbc.diffs", outputdir); + securec_check_ss_c(rc, "", ""); + } else { + rc = snprintf_s(file, sizeof(file), sizeof(file) - 1, "%s/regression.diffs", outputdir); + securec_check_ss_c(rc, "", ""); + } + difffilename = strdup(file); difffile = fopen(difffilename, "w"); if (!difffile) { @@ -5220,12 +5308,30 @@ static void open_result_files(void) fclose(difffile); /* also create the output directory if not present */ - (void)snprintf(file, sizeof(file), "%s/results", outputdir); + if (!use_jdbc_client) { + rc = snprintf_s(file, sizeof(file), sizeof(file) - 1, "%s/results", outputdir); + securec_check_ss_c(rc, "", ""); + } else { + rc = snprintf_s(file, sizeof(file), sizeof(file) - 1, "%s/results_jdbc", outputdir); + securec_check_ss_c(rc, "", ""); + } if (!directory_exists(file)) { make_directory(file); } } +/* create jdbc_user & grant all database to it */ +static void create_jdbc_user(const _stringlist* granted_dbs) { + const char *user_name = "jdbc_regress"; + const char *user_password = "1q@W3e4r"; + header(_("creating user for JDBC: \"%s\""), user_name); + psql_command("postgres", "CREATE USER %s WITH PASSWORD '%s' LOGIN", user_name, user_password); + psql_command("postgres", "ALTER USER %s sysadmin CREATEROLE createdb", user_name); + for (; granted_dbs != NULL; granted_dbs = granted_dbs->next) { + psql_command("postgres", "GRANT ALL ON DATABASE \"%s\" TO \"%s\"", granted_dbs->str, user_name); + } +} + static void create_database(const char* dbname) { _stringlist* sl = NULL; @@ -5324,6 +5430,7 @@ static void help(void) printf(_(" --temp-install=DIR create a temporary installation in DIR\n")); printf(_(" --use-existing use an existing installation\n")); printf(_(" --launcher=CMD use CMD as launcher of gsql\n")); + printf(_(" --skip_environment_cleanup do not clean generated sql scripts\n")); printf(_("\n")); printf(_("Options for \"temp-install\" mode:\n")); printf(_(" --no-locale use C locale\n")); @@ -5348,6 +5455,7 @@ static void help(void) printf(_(" --user=USER connect as USER\n")); printf(_(" --psqldir=DIR use gsql in DIR (default: find in PATH)\n")); printf(_(" --enable-segment create table default with segment=on")); + printf(_(" --jdbc enable jdbc regression test")); printf(_("\n")); printf(_("The exit status is 0 if all tests passed, 1 if some tests failed, and 2\n")); printf(_("if the tests could not be run for some reason.\n")); @@ -6063,26 +6171,16 @@ static void start_postmaster(void) header(_("starting postmaster")); if (!test_single_node) { - if (gray_upgrade_step == UPGRADE_GRAY_STAGE || gray_upgrade_step == UPGRADE_GRAY_STAGE_ROLLBACK) { - start_my_node(0, COORD, true, false, upgrade_from); - for (i = 1; i < upgrade_cn_num; i++) { - start_my_node(i, COORD, false, false, upgrade_from); - } - for (i = 0; i < upgrade_dn_num; i++) { - start_my_node(i, DATANODE, false, standby_defined, upgrade_from); - } - } else { - /* Start GTM */ - start_gtm(); + /* Start GTM */ + start_gtm(); - start_my_node(0, COORD, true, false, upgrade_from); - for (i = 1; i < myinfo.co_num; i++) { - start_my_node(i, COORD, false, false, upgrade_from); - } + start_my_node(0, COORD, true, false, upgrade_from); + for (i = 1; i < myinfo.co_num; i++) { + start_my_node(i, COORD, false, false, upgrade_from); + } - for (i = 0; i < myinfo.dn_num; i++) { - start_my_node(i, DATANODE, false, standby_defined, upgrade_from); - } + for (i = 0; i < myinfo.dn_num; i++) { + start_my_node(i, DATANODE, false, standby_defined, upgrade_from); } } else { assert(0 == myinfo.co_num && 1 == myinfo.dn_num); @@ -6186,6 +6284,11 @@ int regression_main(int argc, char* argv[], init_function ifunc, test_function t struct timeval end_time; double total_time; + struct timeval start_time_total; + struct timeval end_time_total; + + (void)gettimeofday(&start_time_total, NULL); + static struct option long_options[] = {{"help", no_argument, NULL, 'h'}, {"version", no_argument, NULL, 'V'}, {"dbname", required_argument, NULL, 1}, @@ -6245,6 +6348,9 @@ int regression_main(int argc, char* argv[], init_function ifunc, test_function t {"g_aiehost", required_argument, NULL, 56}, {"g_aieport", required_argument, NULL, 57}, {"enable-segment", no_argument, NULL, 58}, + {"client_logic_hook", required_argument, NULL, 59}, + {"jdbc", no_argument, NULL, 60}, + {"skip_environment_cleanup", no_argument, NULL, 61}, {NULL, 0, NULL, 0} }; @@ -6478,11 +6584,13 @@ int regression_main(int argc, char* argv[], init_function ifunc, test_function t break; case 50: grayscale_upgrade = 0; + inplace_upgrade = true; super_user_altered = false; passwd_altered = false; break; case 51: grayscale_upgrade = 1; + inplace_upgrade = true; super_user_altered = false; passwd_altered = false; break; @@ -6513,6 +6621,17 @@ int regression_main(int argc, char* argv[], init_function ifunc, test_function t case 58: g_enable_segment = true; break; + case 59: + client_logic_hook = make_absolute_path(optarg); + break; + case 60: + printf("\n starting with jdbc\n"); + use_jdbc_client = true; + to_create_jdbc_user = true; + break; + case 61: + is_skip_environment_cleanup = true; + break; default: /* getopt_long already emitted a complaint */ fprintf(stderr, _("\nTry \"%s -h\" for more information.\n"), progname); @@ -6520,6 +6639,11 @@ int regression_main(int argc, char* argv[], init_function ifunc, test_function t } } + if (strcmp(platform, "openeuler_aarch64") == 0) { + grayscale_upgrade = -1; + inplace_upgrade = false; + } + if (run_test_case && run_qunit) { fprintf(stderr, _("Can not run qunit and other check simultaneously\n")); exit_nicely(2); @@ -6581,7 +6705,7 @@ int regression_main(int argc, char* argv[], init_function ifunc, test_function t // Do not stop postmaster if we are told not to run test cases. if (run_test_case) - (void)atexit(stop_postmaster); + (void)atexit(atexit_cleanup); /* * create regression.out regression.diff result @@ -6801,8 +6925,10 @@ int regression_main(int argc, char* argv[], init_function ifunc, test_function t } /* If only init database for inplace upgrade, we are done. */ - if (init_database) + if (init_database) { + cleanup_environment(); return 0; + } /* * Adjust the default postgresql.conf as needed for regression @@ -6815,7 +6941,9 @@ int regression_main(int argc, char* argv[], init_function ifunc, test_function t * and 2PC related information. * PGXCTODO: calculate port of GTM before setting configuration files */ - initdb_node_config_file(standby_defined); + if (!(inplace_upgrade || grayscale_upgrade != -1)) { + initdb_node_config_file(standby_defined); + } } /*Execute shell cmds in source files*/ exec_cmds_from_inputfiles(); @@ -6825,153 +6953,159 @@ int regression_main(int argc, char* argv[], init_function ifunc, test_function t */ (void)start_postmaster(); + if (inplace_upgrade || grayscale_upgrade != -1) { + checkProcInsert(); + setup_super_user(); + + super_user_altered = true; + // Execute the upgrade script in the old bin + (void)snprintf_s(buf, + sizeof(buf), + sizeof(buf) - 1, + SYSTEMQUOTE "python %s/upgradeCheck.py -u -p %d -f %d -s %s%sgsql" SYSTEMQUOTE, + upgrade_script_dir, + get_port_number(0, DATANODE), + upgrade_from, + psqldir ? psqldir : "", + psqldir ? "/" : ""); + header("%s", buf); + (void)gettimeofday(&end_time, NULL); + if (regr_system(buf)) { + fprintf(stderr, _("Failed to exec upgrade.\nCommand was: %s\n"), buf); + exit_nicely(2); + } + + // start with new bin + (void)gettimeofday(&start_time, NULL); + header(_("shutting down postmaster")); + (void)stop_postmaster(); + setBinAndLibPath(false); + + if (grayscale_upgrade == 1) { + (void)start_postmaster(); + header(_("sleeping")); + pg_usleep(10000000L); + (void)gettimeofday(&end_time, NULL); + total_time = (end_time.tv_sec - start_time.tv_sec) + (end_time.tv_usec - start_time.tv_usec) * 0.000001; + printf("It takes %fs to restart cluster.\n", total_time); + /* Execute the post upgrade script */ + (void)snprintf_s(buf, + sizeof(buf), + sizeof(buf) - 1, + SYSTEMQUOTE "python %s/upgradeCheck.py --post -p %d -f %d -s %s%sgsql" SYSTEMQUOTE, + upgrade_script_dir, + get_port_number(0, DATANODE), + upgrade_from, + psqldir ? psqldir : "", + psqldir ? "/" : ""); + header("%s", buf); + (void)gettimeofday(&end_time, NULL); + if (regr_system(buf)) { + fprintf(stderr, _("Failed to exec post-upgrade.\nCommand was: %s\n"), buf); + exit_nicely(2); + } + + /* Execute the post rollback script */ + (void)snprintf_s(buf, + sizeof(buf), + sizeof(buf) - 1, + SYSTEMQUOTE "python %s/upgradeCheck.py --rollback -p %d -f %d -s %s%sgsql" SYSTEMQUOTE, + upgrade_script_dir, + get_port_number(0, DATANODE), + upgrade_from, + psqldir ? psqldir : "", + psqldir ? "/" : ""); + header("%s", buf); + (void)gettimeofday(&end_time, NULL); + if (regr_system(buf)) { + fprintf(stderr, _("Failed to exec post-rollback.\nCommand was: %s\n"), buf); + exit_nicely(2); + } + + // start with old bin + restartPostmaster(true); + + /* Execute the rollback script */ + (void)snprintf_s(buf, + sizeof(buf), + sizeof(buf) - 1, + SYSTEMQUOTE "python %s/upgradeCheck.py -o -p %d -f %d -s %s%sgsql" SYSTEMQUOTE, + upgrade_script_dir, + get_port_number(0, DATANODE), + upgrade_from, + psqldir ? psqldir : "", + psqldir ? "/" : ""); + header("%s", buf); + (void)gettimeofday(&end_time, NULL); + if (regr_system(buf)) { + fprintf(stderr, _("Failed to exec post-rollback.\nCommand was: %s\n"), buf); + exit_nicely(2); + } + + // Execute the upgrade script in the old bin + (void)snprintf_s(buf, + sizeof(buf), + sizeof(buf) - 1, + SYSTEMQUOTE "python %s/upgradeCheck.py -u -p %d -f %d -s %s%sgsql" SYSTEMQUOTE, + upgrade_script_dir, + get_port_number(0, DATANODE), + upgrade_from, + psqldir ? psqldir : "", + psqldir ? "/" : ""); + header("%s", buf); + (void)gettimeofday(&end_time, NULL); + if (regr_system(buf)) { + fprintf(stderr, _("Failed to exec upgrade.\nCommand was: %s\n"), buf); + exit_nicely(2); + } + + // start with new bin + (void)gettimeofday(&start_time, NULL); + header(_("shutting down postmaster")); + (void)stop_postmaster(); + setBinAndLibPath(false); + + } + + + initdb_node_config_file(standby_defined); + (void)start_postmaster(); + header(_("sleeping")); + pg_usleep(10000000L); + (void)gettimeofday(&end_time, NULL); + total_time = (end_time.tv_sec - start_time.tv_sec) + (end_time.tv_usec - start_time.tv_usec) * 0.000001; + printf("It takes %fs to restart cluster.\n", total_time); + /* Execute the post upgrade script */ + (void)snprintf_s(buf, + sizeof(buf), + sizeof(buf) - 1, + SYSTEMQUOTE "python %s/upgradeCheck.py --post -p %d -f %d -s %s%sgsql" SYSTEMQUOTE, + upgrade_script_dir, + get_port_number(0, DATANODE), + upgrade_from, + psqldir ? psqldir : "", + psqldir ? "/" : ""); + header("%s", buf); + (void)gettimeofday(&end_time, NULL); + if (regr_system(buf)) { + fprintf(stderr, _("Failed to exec post-upgrade.\nCommand was: %s\n"), buf); + exit_nicely(2); + } + + header(_("shutting down postmaster")); + (void)stop_postmaster(); + upgrade_from = 0; + // switch to the new binary + setBinAndLibPath(false); + (void)start_postmaster(); + header(_("sleeping")); + pg_usleep(10000000L); + } + if (!test_single_node) { /* Postmaster is finally running, so set up connection information on Coordinators */ if (myinfo.keep_data == false) { setup_connection_information(standby_defined); - if (inplace_upgrade || grayscale_upgrade != -1) { - checkProcInsert(); - setup_super_user(); - - super_user_altered = true; - // Execute the upgrade script - (void)snprintf(buf, - sizeof(buf), - SYSTEMQUOTE "python %s/upgradeCheck.py -p %d -f %d -s %s%sgsql" SYSTEMQUOTE, - upgrade_script_dir, - get_port_number(0, COORD), - upgrade_from, - psqldir ? psqldir : "", - psqldir ? "/" : ""); - header("%s", buf); - if (regr_system(buf)) { - fprintf(stderr, _("Failed to exec upgrade.\nCommand was: %s\n"), buf); - exit_nicely(2); - } - if (grayscale_upgrade != -1) { - for (int step = UPGRADE_GRAY_STAGE; step < UPGRADE_FINASH; step += 2) { - gray_upgrade_step = step; - restartPostmaster(); - // create database - if (!use_existing) { - for (ssl = dblist; ssl; ssl = ssl->next) { - create_database(ssl->str); - } - for (ssl = extraroles; ssl; ssl = ssl->next) { - create_role(ssl->str, dblist); - } - use_existing = true; - } - /* - * Ready to run the tests - */ - if (gray_upgrade_step == UPGRADE_GRAY_STAGE) { - header(_("running regression test queries during the grayscale stage of the grayscale " - "upgrade")); - } else { - header(_("running regression test queries during the grayscale observe stage of the " - "grayscale upgrade")); - } - (void)gettimeofday(&start_time, NULL); - // run use cases - for (ssl = upgrade_schedulelist; ssl != NULL; ssl = ssl->next) { - run_schedule(ssl->str, tfunc, dfunc); - } - - (void)gettimeofday(&end_time, NULL); - - total_time = (end_time.tv_sec - start_time.tv_sec) + - (end_time.tv_usec - start_time.tv_usec) * 0.000001; - - /* - * Emit nice-looking summary message - */ - if (fail_count == 0 && fail_ignore_count == 0) { - (void)snprintf(buf, - sizeof(buf), - _(" All %d tests execution results are normal during grayscale upgrade. "), - success_count); - } else if (fail_count == 0) { /* fail_count=0, fail_ignore_count>0 */ - (void)snprintf(buf, - sizeof(buf), - _(" %d of %d tests execution results are normal, %d failed test(s) ignored. "), - success_count, - success_count + fail_ignore_count, - fail_ignore_count); - } else if (fail_ignore_count == 0) { /* fail_count>0 && fail_ignore_count=0 */ - (void)snprintf(buf, - sizeof(buf), - _(" %d of %d tests execution results are abnormal. "), - fail_count, - success_count + fail_count); - } else { /* fail_count>0 && fail_ignore_count>0 */ - (void)snprintf(buf, - sizeof(buf), - _(" %d of %d tests execution results are abnormal., %d of these failures " - "ignored. "), - fail_count + fail_ignore_count, - success_count + fail_count + fail_ignore_count, - fail_ignore_count); - } - - (void)putchar('\n'); - for (i = strlen(buf); i > 0; i--) { - (void)putchar('='); - } - printf("\n%s\n", buf); - printf(" Total Time: %fs\n", total_time); - for (i = strlen(buf); i > 0; i--) { - (void)putchar('='); - } - (void)putchar('\n'); - (void)putchar('\n'); - - if (file_size(difffilename) > 0) { - printf(_("The differences that caused some tests to fail can be viewed in the\n" - "file \"%s\". A copy of the test summary that you see\n" - "above is saved in the file \"%s\".\n\n"), - difffilename, - logfilename); - } else { - unlink(difffilename); - unlink(logfilename); - } - if (fail_count != 0) { - exit_nicely(1); - } - // run rollback - gray_upgrade_step++; - stop_postmaster(); - setBinAndLibPath(true); - start_postmaster(); - sleep(60); - // Execute the upgrade rollback script - (void)snprintf(buf, - sizeof(buf), - SYSTEMQUOTE "python %s/upgradeCheck.py --rollback -p %d -f %d -s %s%sgsql" SYSTEMQUOTE, - upgrade_script_dir, - get_port_number(0, COORD), - upgrade_from, - psqldir ? psqldir : "", - psqldir ? "/" : ""); - header("%s", buf); - if (regr_system(buf)) { - fprintf(stderr, _("Failed to exec rollback.\nCommand was: %s\n"), buf); - exit_nicely(2); - } - } - gray_upgrade_step = UPGRADE_FINASH; - } - - header(_("shutting down postmaster")); - (void)stop_postmaster(); - upgrade_from = 0; - // switch to the new binary - setBinAndLibPath(false); - (void)start_postmaster(); - header(_("sleeping")); - pg_usleep(10000000L); - } } else { pg_usleep(2000000L); rebuild_node_group(); @@ -7034,10 +7168,16 @@ int regression_main(int argc, char* argv[], init_function ifunc, test_function t } if (myinfo.run_check == true) { + (void)gettimeofday(&end_time_total, NULL); + total_time = (end_time_total.tv_sec - start_time_total.tv_sec) + (end_time_total.tv_usec - start_time_total.tv_usec) * 0.000001; + printf("Preparation before the test case execution takes %fs..\n", total_time); if (!use_existing) { for (ssl = dblist; ssl; ssl = ssl->next) { create_database(ssl->str); } + if (to_create_jdbc_user) { + create_jdbc_user(dblist); + } for (ssl = extraroles; ssl; ssl = ssl->next) { create_role(ssl->str, dblist); } @@ -7174,7 +7314,7 @@ int regression_main(int argc, char* argv[], init_function ifunc, test_function t stop_postmaster(); } } - + cleanup_environment(); return 0; } @@ -7209,12 +7349,19 @@ void checkProcInsert() } // only for restart during grayscale upgrade -void restartPostmaster() +void restartPostmaster(bool isOld) { + struct timeval start_time; + struct timeval end_time; + double total_time; + (void)gettimeofday(&start_time, NULL); header(_("shutting down postmaster")); (void)stop_postmaster(); - setBinAndLibPath(false); + setBinAndLibPath(isOld); (void)start_postmaster(); header(_("sleeping")); pg_usleep(10000000L); + (void)gettimeofday(&end_time, NULL); + total_time = (end_time.tv_sec - start_time.tv_sec) + (end_time.tv_usec - start_time.tv_usec) * 0.000001; + printf("It takes %fs to restart cluster.\n", total_time); } diff --git a/src/test/regress/pg_regress.h b/src/test/regress/pg_regress.h index 76a8b52b7..fe6337fc1 100644 --- a/src/test/regress/pg_regress.h +++ b/src/test/regress/pg_regress.h @@ -99,7 +99,7 @@ typedef struct tagREGR_REPLACE_PATTERNS_STRU { /* To store the values of the regress.conf values */ extern REGR_CONF_ITEMS_STRU g_stRegrConfItems; -typedef PID_TYPE (*test_function)(const char*, _stringlist**, _stringlist**, _stringlist**); +typedef PID_TYPE (*test_function)(const char*, _stringlist**, _stringlist**, _stringlist**, bool use_jdbc_client); typedef void (*init_function)(void); typedef PID_TYPE (*diag_function)(char*); @@ -114,6 +114,7 @@ extern bool debug; extern char* inputdir; extern char* outputdir; extern char* launcher; +extern char* top_builddir; /* * This should not be global but every module should be able to read command diff --git a/src/test/regress/pg_regress_main.cpp b/src/test/regress/pg_regress_main.cpp index 6086a2158..0cfbf0284 100644 --- a/src/test/regress/pg_regress_main.cpp +++ b/src/test/regress/pg_regress_main.cpp @@ -26,35 +26,129 @@ extern char* difffilename; #define DG_LEVEL_LOG_FILE 8 #define DG_LEVEL_DB_FOLDER 16 +const int SQL_CMD_LEN = (MAXPGPATH * 5); + extern bool test_single_node; +int EndsWith(const char *str, const char *suffix) +{ + if (!str || !suffix) + return 0; + size_t lenstr = strlen(str); + size_t lensuffix = strlen(suffix); + if (lensuffix > lenstr) + return 0; + return strncmp(str + lenstr - lensuffix, suffix, lensuffix) == 0; +} +int EndsWithBin(const char *str) +{ + return EndsWith(str, "_bin"); +} +int EndsWithJavaBin(const char *str) { + return EndsWith(str, "Bin"); +} +/* generate gsql/jdbc command for specific flag */ +static void gen_sql_cmd(char * const psql_cmd, int psql_size, const char* testname, const char* infile, + const char* outfile, bool is_binary, bool use_jdbc_client, bool is_jdbc_binary_test) +{ + errno_t rc = EOK; + size_t offset = 0; + if (launcher) { + rc = snprintf_s(psql_cmd + offset, (SQL_CMD_LEN - offset), psql_size - offset, "%s ", launcher); + securec_check_ss_c(rc, "", ""); + offset += strlen(launcher); + } + int port = test_single_node ? myinfo.dn_port[0] : myinfo.co_port[0]; + if (!is_binary){ + if (use_jdbc_client) { + const char *JDBC_LOCATION_REF = "/opengauss/src/test/regress/jdbc_client/jdbc_client.jar"; + const char *JNI_LOCATION_REF = "/lib"; + char jni_location[MAXPGPATH] = {0}; + char jdbc_location[MAXPGPATH] = {0}; + rc = sprintf_s(jni_location, MAXPGPATH, "%s/%s", outputdir, JNI_LOCATION_REF); + securec_check_ss_c(rc, "", ""); + rc = sprintf_s(jdbc_location, MAXPGPATH, "%s/%s", outputdir, JDBC_LOCATION_REF); + securec_check_ss_c(rc, "", ""); + if (is_jdbc_binary_test) { + (void)snprintf_s(psql_cmd, (SQL_CMD_LEN - offset), psql_size, + "java -Djava.library.path=%s -cp %s gauss.regress.jdbc.JdbcClient \ + localhost %d %s jdbc_regress 1q@W3e4r %s %s", + jni_location, jdbc_location, port, dblist->str, + testname, // use the test name as parameter + outfile); + securec_check_ss_c(rc, "\0", "\0"); + } else { + (void)snprintf_s(psql_cmd, (SQL_CMD_LEN - offset), psql_size, + "java -Djava.library.path=%s -cp %s gauss.regress.jdbc.JdbcClient \ + localhost %d %s jdbc_regress 1q@W3e4r %s %s", + jni_location, jdbc_location, port, dblist->str, + infile, // use the SQL file name as parameter + outfile); + securec_check_ss_c(rc, "\0", "\0"); + } + } else{ + (void)snprintf_s(psql_cmd + offset, + (SQL_CMD_LEN - offset), + psql_size - offset, + SYSTEMQUOTE "\"%s%sgsql\" -X -p %d -a %s %s -q -d \"%s\" -C " + "< \"%s\" > \"%s\" 2>&1" SYSTEMQUOTE, + psqldir ? psqldir : "", + psqldir ? "/" : "", + port, + (char*)g_stRegrConfItems.acFieldSepForAllText, + (char*)g_stRegrConfItems.acTuplesOnly, + dblist->str, + infile, + outfile); + securec_check_ss_c(rc, "\0", "\0"); + } + } else { + rc = snprintf_s(psql_cmd + offset, + (SQL_CMD_LEN - offset), psql_size - offset, + "%s > %s 2>&1", infile, outfile); + securec_check_ss_c(rc, "\0", "\0"); + } +} + /* * start a psql test process for specified file (including redirection), * and return process ID */ static PID_TYPE psql_start_test( - const char* testname, _stringlist** resultfiles, _stringlist** expectfiles, _stringlist** tags) + const char* testname, _stringlist** resultfiles, _stringlist** expectfiles, _stringlist** tags, + bool use_jdbc_client) { PID_TYPE pid; + errno_t rc = EOK; char infile[MAXPGPATH]; char outfile[MAXPGPATH]; char expectfile[MAXPGPATH]; - char psql_cmd[MAXPGPATH * 3]; - size_t offset = 0; - - /* - * Look for files in the output dir first, consistent with a vpath search. - * This is mainly to create more reasonable error messages if the file is - * not found. It also allows local test overrides when running pg_regress - * outside of the source tree. - */ - snprintf(infile, sizeof(infile), "%s/sql/%s.sql", outputdir, testname); - if (!file_exists(infile)) - snprintf(infile, sizeof(infile), "%s/sql/%s.sql", inputdir, testname); + char psql_cmd[SQL_CMD_LEN]; + bool is_binary = EndsWithBin(testname); + bool is_jdbc_binary_test = false; + if (use_jdbc_client && EndsWithJavaBin(testname)) { + is_jdbc_binary_test = true; + } + if (!is_binary && !is_jdbc_binary_test) { + /* + * Look for files in the output dir first, consistent with a vpath search. + * This is mainly to create more reasonable error messages if the file is + * not found. It also allows local test overrides when running pg_regress + * outside of the source tree. + */ + snprintf(infile, sizeof(infile), "%s/sql/%s.sql", outputdir, testname); + if (!file_exists(infile)) { + snprintf(infile, sizeof(infile), "%s/sql/%s.sql", inputdir, testname); + } + } else if (is_binary) { + rc = snprintf_s(infile, MAXPGPATH, MAXPGPATH - 1, "%s/binary/%s.out", inputdir, testname); + + securec_check_ss_c(rc, "", ""); + } /* If the .sql file does not exist, then record the error in diff summary * file and cont */ - if (!file_exists(infile)) { + if (!is_jdbc_binary_test && !file_exists(infile)) { FILE* fp = fopen(difffilename, "a"); if (fp) { @@ -64,7 +158,11 @@ static PID_TYPE psql_start_test( fprintf(stderr, _("\n COULD NOT OPEN [%s]!!!!\n"), difffilename); } - (void)snprintf(outfile, sizeof(outfile), "%s/results/%s.out", outputdir, testname); + if (!use_jdbc_client) { + (void)snprintf(outfile, sizeof(outfile), "%s/results/%s.out", outputdir, testname); + } else { + (void)snprintf(outfile, sizeof(outfile), "%s/results_jdbc/%s.out", outputdir, testname); + }; snprintf(expectfile, sizeof(expectfile), "%s/expected/%s.out", outputdir, testname); if (!file_exists(expectfile)) @@ -73,22 +171,7 @@ static PID_TYPE psql_start_test( add_stringlist_item(resultfiles, outfile); add_stringlist_item(expectfiles, expectfile); - if (launcher) - offset += snprintf(psql_cmd + offset, sizeof(psql_cmd) - offset, "%s ", launcher); - - int port = test_single_node ? myinfo.dn_port[0] : myinfo.co_port[0]; - (void)snprintf(psql_cmd + offset, - sizeof(psql_cmd) - offset, - SYSTEMQUOTE "\"%s%sgsql\" -X -p %d -a %s %s -q -d \"%s\" -C" - "< \"%s\" > \"%s\" 2>&1" SYSTEMQUOTE, - psqldir ? psqldir : "", - psqldir ? "/" : "", - port, - (char*)g_stRegrConfItems.acFieldSepForAllText, - (char*)g_stRegrConfItems.acTuplesOnly, - dblist->str, - infile, - outfile); + gen_sql_cmd(psql_cmd, sizeof(psql_cmd), testname, infile, outfile, is_binary, use_jdbc_client, is_jdbc_binary_test); pid = spawn_process(psql_cmd); diff --git a/src/test/regress/security_audit_schedule0 b/src/test/regress/security_audit_schedule0 new file mode 100644 index 000000000..7b157854b --- /dev/null +++ b/src/test/regress/security_audit_schedule0 @@ -0,0 +1,7 @@ +# ---------- +# # Database security audit +# # ---------- + +test: hw_audit_space +test: hw_audit_rotation_interval +test: hw_audit_rotation_size diff --git a/src/test/regress/security_scripts/post_case_audit.sh b/src/test/regress/security_scripts/post_case_audit.sh new file mode 100644 index 000000000..7fc9bfc7b --- /dev/null +++ b/src/test/regress/security_scripts/post_case_audit.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +FILE=$1 + +sed -ir 's/roles\[1[6-9][[:digit:]][[:digit:]][[:digit:]],1[6-9][[:digit:]][[:digit:]][[:digit:]]/roles\[roid1,roid2/g' $FILE +if [ $? -ne 0 ]; then + echo "remove oid1 failed" +fi + +sed -ir 's/roles\[1[6-9][[:digit:]][[:digit:]][[:digit:]]/roles\[roid1/g' $FILE +if [ $? -ne 0 ]; then + echo "remove oid2 failed" +fi + +sed -i -r 's/xid=[0-9]+/xid=XIDNUM/g' $FILE +if [ $? -ne 0 ]; then + echo "remove xid failed" +fi diff --git a/src/test/regress/security_scripts/prepare_syslog.sh b/src/test/regress/security_scripts/prepare_syslog.sh new file mode 100644 index 000000000..5762ce404 --- /dev/null +++ b/src/test/regress/security_scripts/prepare_syslog.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +current_user=$(whoami) +if [ "$current_user" != "root" ]; then + sudo -l -n -U $current_user | grep "(ALL) NOPASSWD" + if [ $? -ne 0 ] + then + echo "please config sudo privilege for ${current_user} for rsyslog operation" + exit 1 + fi +fi + +sudo cp /etc/rsyslog.conf /etc/rsyslog.conf_bk +sudo sed -i '/^local0*/d' /etc/rsyslog.conf +echo "local0.* /var/log/localmessages" | sudo tee -a /etc/rsyslog.conf +sudo rm -rf /var/log/localmessages +sudo touch /var/log/localmessages +sudo chmod a+r /var/log/localmessages + +if [ `which systemctl` ];then + sudo systemctl restart rsyslog +else + echo "no systemctl service" + exit 1 +fi + diff --git a/src/test/regress/security_scripts/syslog_event_find.sh b/src/test/regress/security_scripts/syslog_event_find.sh new file mode 100644 index 000000000..a795b942b --- /dev/null +++ b/src/test/regress/security_scripts/syslog_event_find.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +PGPORT=$1 +POL_NAME=$2 +QUERY=$3 +EXPECTED=$4 + +GSQL_OPTS="-t -d regression -p $PGPORT -U policy_officer -W 1q@W3e4r" +GET_ID_QUERY="select oid from gs_auditing_policy where polname = lower('$POL_NAME');" +POL_ID=`gsql $GSQL_OPTS -c "$GET_ID_QUERY" | awk NF | xargs` +if [ "$POL_ID" == "" ]; then echo "no policy found by name '$POL_NAME'" ; exit 1 ; fi +if [ $(sudo grep PGAUDIT /var/log/localmessages | grep "$POL_ID" | grep -v "\[policy_officer\]" | tail -"$EXPECTED" | grep "$QUERY" | wc -l) -eq "$EXPECTED" ]; then echo "Yay"; exit 0 ; else echo "Nay"; fi +echo "logs found: (without expected query: \"$QUERY\")" +grep PGAUDIT /var/log/localmessages | grep "$POL_ID" | grep -v "\[policy_officer\]" | tail -"$EXPECTED" +echo "" + diff --git a/src/test/regress/security_scripts/syslog_new_event_find.sh b/src/test/regress/security_scripts/syslog_new_event_find.sh new file mode 100644 index 000000000..8bec41e63 --- /dev/null +++ b/src/test/regress/security_scripts/syslog_new_event_find.sh @@ -0,0 +1,31 @@ +#!/bin/bash + + +# example: ./syslog_new_event_find.sh postgres gs_auditing_policy audit_access_dml_policy select 1 + +PGDB=$1 +PGPORT=$2 +PGPOLICY=$3 +POL_NAME=$4 +QUERY=$5 +EXPECTED=$6 + +GSQL_OPTS="-t -d $PGDB -p $PGPORT -U policy_officer -W 1q@W3e4r" +#echo "connection str: " $GSQL_OPTS +GET_ID_QUERY="select oid from $PGPOLICY where polname = lower('$POL_NAME');" +POL_ID=`gsql $GSQL_OPTS -c "$GET_ID_QUERY" | awk NF | xargs` +if [ "$POL_ID" == "" ]; then echo "no policy found by name '$POL_NAME'" ; exit 1 ; fi + +if [ "$PGPOLICY" == "gs_security_policy" ]; then + if [ $(sudo grep PGACCESSCONROL /var/log/localmessages | grep "$POL_ID" | grep -v "\[policy_officer\]" | grep gsql | grep "$QUERY" | tail -"$EXPECTED" | wc -l) -eq "$EXPECTED" ]; then echo "Yay"; exit 0 ; else echo "Nay"; fi +elif [ "$PGPOLICY" == "gs_masking_policy" ]; then + if [ $(sudo grep PGMASKING /var/log/localmessages | grep "$POL_ID" | grep -v "\[policy_officer\]" | tail -"$EXPECTED" | grep "$QUERY" | wc -l) -eq "$EXPECTED" ]; then echo "Yay"; exit 0 ; else echo "Nay"; fi +elif [ "$PGPOLICY" == "gs_rls_policy" ]; then + if [ $(sudo grep PGRLS /var/log/localmessages | tail -"$EXPECTED" | grep "$QUERY" | wc -l) -eq "$EXPECTED" ]; then echo "Yay"; exit 0 ; else echo "Nay"; fi +else + if [ $(sudo grep PGAUDIT /var/log/localmessages | grep "$POL_ID" | grep gsql | grep "$QUERY" | tail -"$EXPECTED" | wc -l) -eq "$EXPECTED" ]; then echo "Yay"; exit 0 ; else echo "Nay"; fi +fi + +echo "logs found: (without expected query: \"$QUERY\")" +grep PGAUDIT /var/log/localmessages | grep "$POL_ID" | grep -v "\[policy_officer\]" | tail -"$EXPECTED" +echo "" diff --git a/src/test/regress/single_check.sh b/src/test/regress/single_check.sh index a84d7b28b..75c686a94 100755 --- a/src/test/regress/single_check.sh +++ b/src/test/regress/single_check.sh @@ -92,6 +92,14 @@ function real_regresscheck_old() $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG --seprate_unix_socket_dir $MAXCONNOPT --regconf=$REGCONF } +function real_upgradecheck_single() +{ + set_hotpatch_env + set_common_env $1 $2 + echo "regresscheck_single: $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --single_node --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG $MAXCONNOPT --regconf=$REGCONF --data_base_dir=$DATA_BASE_DIR --platform=$PLATFORM --upgrade_script_dir=$UPGRADE_SCRIPT_DIR --old_bin_dir=\'$OLD_BIN_DIR\' --grayscale_full_mode --upgrade_schedule=$UPGRADE_SCHEDULE --upgrade_from=$UPGRADE_FROM" + $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --single_node --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG $MAXCONNOPT --regconf=$REGCONF --data_base_dir=$DATA_BASE_DIR --platform=$PLATFORM --upgrade_script_dir=$UPGRADE_SCRIPT_DIR --old_bin_dir=\'$OLD_BIN_DIR\' --grayscale_full_mode --upgrade_schedule=$UPGRADE_SCHEDULE --upgrade_from=$UPGRADE_FROM +} + function real_regresscheck_single() { set_hotpatch_env @@ -100,6 +108,15 @@ function real_regresscheck_single() $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --single_node --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG $MAXCONNOPT --regconf=$REGCONF } +function real_regresscheck_single_audit() +{ + set_hotpatch_env + set_common_env $1 $2 + echo "regresscheck_single: $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --single_node --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG $MAXCONNOPT --regconf=$REGCONF" + $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --single_node --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG $MAXCONNOPT --regconf=$REGCONF +} + + function real_regresscheck_single_mot() { set_common_env $1 $2 @@ -110,16 +127,24 @@ function real_regresscheck_single_mot() function real_regresscheck_ledger() { set_common_env $1 $2 - echo "regresscheck_ledger: $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $PARALLEL_INITDB $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG --seprate_unix_socket_dir $MAXCONNOPT --regconf=$REGCONF" - $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $PARALLEL_INITDB $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG --seprate_unix_socket_dir $MAXCONNOPT --regconf=$REGCONF + echo "regresscheck_ledger: $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG --seprate_unix_socket_dir $MAXCONNOPT --regconf=$REGCONF" + $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG --seprate_unix_socket_dir $MAXCONNOPT --regconf=$REGCONF } function real_regresscheck_ce() { set_hotpatch_env set_common_env $1 $2 - echo "regresscheck_ce: $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $PARALLEL_INITDB $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG --seprate_unix_socket_dir $MAXCONNOPT --regconf=$REGCONF" - $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $PARALLEL_INITDB $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG --seprate_unix_socket_dir $MAXCONNOPT --regconf=$REGCONF + echo "regresscheck_ce: $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --single_node --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG $MAXCONNOPT --regconf=$REGCONF" + $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --single_node --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG $MAXCONNOPT --regconf=$REGCONF +} + +function real_regresscheck_ce_jdbc() +{ + set_hotpatch_env + set_common_env $1 $2 + echo "regresscheck_ce_jdbc: $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG $MAXCONNOPT --regconf=$REGCONF" + $pg_regress_check --dlpath=$DL_PATH $EXTRA_REGRESS_OPTS $3 -b $TEMP_INSTALL --abs_gausshome=\'$PREFIX_HOME\' --single_node --schedule=$SCHEDULE -w --keep_last_data=$keep_last_data --temp-config=$TEMP_CONFIG $MAXCONNOPT --regconf=$REGCONF --jdbc } function real_regresscheck_policy_plugin() @@ -240,6 +265,7 @@ function set_common_env() export PLATFORM=$(sh $CODE_BASE/src/get_PlatForm_str.sh) export GAUSSHOME=$PREFIX_HOME REGRESS_PATH=$ROOT_CODE_PATH/${OPENGS}/src/test/regress + REGRESS_PATH=$(cd ${REGRESS_PATH}/; pwd) PGREGRESS_BIN=./${OPENGS}/src/test/regress/pg_regress_single if [ "${ENABLE_MEMORY_CHECK}" = "ON" ];then export ASAN_OPTIONS="unmap_shadow_on_exit=1:abort_on_error=1:detect_leaks=1:force_check_leak=1:halt_on_error=0:alloc_dealloc_mismatch=0:fast_unwind_on_fatal=1:log_path=/home/$(whoami)/memchk/asan/runlog" @@ -258,6 +284,7 @@ function set_common_env() cp -r $REGRESS_PATH/jdbc_ce_test . cp -r $REGRESS_PATH/jdbc_client . cp -r $REGRESS_PATH/jdbc_client_lib . + cp -r $REGRESS_PATH/jdbc_client_lib ./opengauss/src/test/regress/ cp -r $REGRESS_PATH/security_scripts . cp ./lib/regress.so $PREFIX_HOME/lib cp ./lib/refint.so $PREFIX_HOME/lib @@ -272,7 +299,7 @@ function set_common_env() SCHEDULE=$INPUT_DIR/$1 TEMP_CONFIG=$INPUT_DIR/$2 - DATA_BASE_DIR=$INPUT_DIR/../grayscale_upgrade + DATA_BASE_DIR=$INPUT_DIR/../../../../privategauss/test/grayscale_upgrade UPGRADE_SCRIPT_DIR=$INPUT_DIR/../grayscale_upgrade OLD_BIN_DIR=$TEMP_INSTALL/bin UPGRADE_SCHEDULE=$INPUT_DIR/upgrade_schedule @@ -291,13 +318,7 @@ function set_common_env() echo "--------------------LD_LIBRARY_PATH----------------------------------------------------" echo $LD_LIBRARY_PATH export - if [ "X$PLATFORM" = "Xeuleros2.0_sp2_x86_64" ]; then - UPGRADE_FROM=92202 - elif [ "X$PLATFORM" = "Xeuleros2.0_sp5_x86_64" ]; then - UPGRADE_FROM=92202 - else - UPGRADE_FROM=92023 - fi + UPGRADE_FROM=92497 } function parse_args() @@ -349,6 +370,17 @@ case $DO_CMD in --fastcheck_single|fastcheck_single) args_val="-d 1 -c 0 -p $p -r 1 " real_regresscheck_single parallel_schedule0$part make_fastcheck_postgresql.conf "${args_val}" ;; + --fastcheck_single_audit|fastcheck_single_audit) + args_val="-d 1 -c 0 -p $p -r 1 " + real_regresscheck_single_audit security_audit_schedule0$part make_fastcheck_postgresql.conf "${args_val}" ;; + --fastcheck_lite|fastcheck_lite) + args_val="-d 1 -c 0 -p $p -r 1 " + real_regresscheck_single parallel_schedule.lite$part make_fastcheck_postgresql.conf "${args_val}" ;; + + --upgradecheck_single|upgradecheck_single) + args_val="-d 1 -c 0 -p $p -r 1 " + real_upgradecheck_single parallel_schedule0$part make_fastcheck_postgresql.conf "${args_val}" ;; + --fastcheck_single_mot|fastcheck_single_mot) args_val="-d 1 -c 0 -p $p -r 1 " real_regresscheck_single_mot parallel_schedule20 make_fastcheck_single_mot_postgresql.conf "${args_val}" ;; @@ -368,8 +400,11 @@ case $DO_CMD in args_val="-d 6 -c 3 -p $p -r ${runtest} -n 2002" real_regresscheck_ledger ledger_schedule make_fastcheck_postgresql.conf "${args_val}" ;; --fastcheck_ce_single|fastcheck_ce_single) - args_val="-d 6 -c 3 -p $p -r ${runtest} -n 2002" + args_val="-d 1 -c 0 -p $p -r ${runtest} -n 2002" real_regresscheck_ce ce_sched make_fastcheck_postgresql.conf "${args_val}" ;; + --execute_fastcheck_ce_single_jdbc|execute_fastcheck_ce_single_jdbc) + args_val="-d 1 -c 0 -p $p -r ${runtest} -n 2002" + real_regresscheck_ce_jdbc ce_sched_jdbc make_fastcheck_postgresql.conf "${args_val}" ;; --fastcheck_policy_plugin_single|fastcheck_policy_plugin_single) args_val="-d 6 -c 3 -p $p -r ${runtest} -n 2002" real_regresscheck_policy_plugin security_plugin_schedule make_fastcheck_postgresql.conf "${args_val}" ;; diff --git a/src/test/regress/sql/alter_hw_package.sql b/src/test/regress/sql/alter_hw_package.sql new file mode 100644 index 000000000..f580aa63b --- /dev/null +++ b/src/test/regress/sql/alter_hw_package.sql @@ -0,0 +1,73 @@ +SELECT SESSION_USER, CURRENT_USER; +reset session AUTHORIZATION; + +create user user1 PASSWORD 'Gauss123'; +create user user2 PASSWORD 'Gauss123'; +SET SESSION AUTHORIZATION user1 password 'Gauss123'; +drop procedure p1; +create procedure p1 +is +begin +null; +end; +/ +drop package if exists pck1; +create or replace package user1.pck1 as +procedure p1(); +end pck1; +/ + +--包内嵌套定义 +create or replace package body pck1 as +procedure p1 is +begin +null; +end; +end pck1; +/ + +SELECT SESSION_USER, CURRENT_USER; +reset session AUTHORIZATION; +SELECT SESSION_USER, CURRENT_USER; +---修改 package owner +alter package user1.pck1 owner to user2; + +---校验 +------usename 为 user2 +select usename from pg_user where usesysid = (select pkgowner from gs_package where pkgname = 'pck1'); + +grant usage on schema user1 to user2; +grant execute on package user1.pck1 to user2; +------调用成功,结果正确 +SET SESSION AUTHORIZATION user2 password 'Gauss123'; +drop procedure p1; +call user1.pck1.p1(); + +------原owner create or replace 预期失败 +SET SESSION AUTHORIZATION user1 password 'Gauss123'; +create or replace package pck1 as + type t1 is record(c1 int,c2 int); + type t2 is table of t1; + type t3 is varray(10) of t1; + type t4 is ref cursor; +end pck1; +/ + +create or replace package body pck1 as + type t5 is record(c1 t1,c2 int); + type t6 is table of t5; + type t7 is varray(10) of t1; + type t8 is ref cursor; +end pck1; +/ + +reset session AUTHORIZATION; +SELECT SESSION_USER, CURRENT_USER; +select usename from pg_user where usesysid = (select pkgowner from gs_package where pkgname = 'pck1'); + +---清理 +SET SESSION AUTHORIZATION user1 password 'Gauss123'; +drop package if exists pck1; +reset session AUTHORIZATION; +drop user if exists user1 cascade; +drop user if exists user2 cascade; diff --git a/src/test/regress/sql/alter_table_000.sql b/src/test/regress/sql/alter_table_000.sql index 8532d030f..c17f8376e 100644 --- a/src/test/regress/sql/alter_table_000.sql +++ b/src/test/regress/sql/alter_table_000.sql @@ -296,3 +296,4 @@ drop table tt_row_rep_1; drop table tt_row_rep_2; drop table tt_col_rep_1; drop table tt_col_rep_2; +select pg_catalog.ledger_hist_repair('0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', 65536); diff --git a/src/test/regress/sql/area.sql b/src/test/regress/sql/area.sql new file mode 100644 index 000000000..dcceb0ac5 --- /dev/null +++ b/src/test/regress/sql/area.sql @@ -0,0 +1,39 @@ +CREATE TABLE area_example1(id SERIAL primary key, somedata int, text varchar(120))with(storage_type = ustore); +CREATE TABLE area_example2(a int primary key,b int,c int); +CREATE TABLE area_example3(a int,b int,c int); +CREATE OR REPLACE function decode_area_proc(plugin text) returns SETOF text +LANGUAGE plpgsql +AS +$$ +declare o_ret text; + my_sql text; + param1 text; +begin +truncate table area_example1; +truncate table area_example2; +truncate table area_example3; +EXECUTE('SELECT pg_current_xlog_location();') into o_ret; +INSERT INTO area_example1(somedata, text) VALUES (1, 1); +INSERT INTO area_example1(somedata, text) VALUES (1, 2); +update area_example1 set somedata=somedata*10 where somedata=1; +delete from area_example1 where somedata=10; + +INSERT INTO area_example2 VALUES (1, 1, 1); +INSERT INTO area_example2 VALUES (2, 2, 2); +update area_example2 set b=b*10 where a=1; +delete from area_example2 where c=1; + +INSERT INTO area_example3 VALUES (1, 1, 1); +INSERT INTO area_example3 VALUES (2, 2, 2); +update area_example3 set b=b*10 where a=1; +delete from area_example3 where c=1; +my_sql = 'select data from pg_logical_get_area_changes(''' || o_ret || ''',NULL,NULL,''' || plugin || ''',NULL);'; +return query EXECUTE(my_sql); +END; +$$ +; +call decode_area_proc('mppdb_decoding'); +call decode_area_proc('sql_decoding'); +drop table area_example1; +drop table area_example2; +drop table area_example3; diff --git a/src/test/regress/sql/arrayinterface_indexby.sql b/src/test/regress/sql/arrayinterface_indexby.sql new file mode 100644 index 000000000..f347aeae0 --- /dev/null +++ b/src/test/regress/sql/arrayinterface_indexby.sql @@ -0,0 +1,944 @@ +-- FOR VARRAY INTERFACE -- + +-- check compatibility -- +show sql_compatibility; -- expect A -- + +-- create new schema -- +drop schema if exists plpgsql_array_interface_indexby; +create schema plpgsql_array_interface_indexby; +set current_schema = plpgsql_array_interface_indexby; + +-- test array interface count -- +create or replace procedure array_interface_p1() as +declare + type ta is table of varchar(32) index by varchar; + colors ta; -- array['red', 'orange', null, '', 'green', 'blue', 'indigo', 'violet'] +begin + colors('a1234567') := 'red'; + colors('a12345678') := 'orange'; + colors('a2345671') := null; + colors('a3456712') := ''; + colors('a4567123') := 'green'; + colors('a5671234') := 'blue'; + colors('a6712345') := 'indigo'; + colors('a7123456') := 'violet'; + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + colors[1] := null; + colors[4] := null; + colors[6] := ''; + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; +end; +/ + +call array_interface_p1(); + +create or replace procedure array_interface_p1() as +declare + type ta is table of varchar(32) index by integer; + colors ta; -- array['red', 'orange', null, '', 'green', 'blue', 'indigo', 'violet'] +begin + colors(5) := 'red'; + colors(-1) := 'orange'; + colors(2) := null; + colors(8) := ''; + colors(-6) := 'green'; + colors(10) := 'blue'; + colors(-3) := 'indigo'; + colors(3) := 'violet'; + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + colors[1] := null; + colors[4] := null; + colors[6] := ''; + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; +end; +/ + +call array_interface_p1(); + +-- test array interface exists -- +create or replace procedure array_interface_p2() as +declare + type ta is table of varchar(32) index by varchar; + colors ta; -- array[null,'red','orange',null,'green','','blue',null,'indigo','violet',null] + ind varchar2(32); +begin + colors('1') := null; + colors('2') := 'red'; + colors('3') := 'orange'; + colors('4') := null; + colors('5') := 'green'; + colors('6') := ''; + colors('7') := 'blue'; + colors('8') := null; + colors('9') := 'indigo'; + colors('10') := 'violet'; + colors('11') := null; + raise NOTICE '--------------------colors--------------------------'; + raise NOTICE '%', colors; + ind := colors.first; + raise NOTICE '%', ind; + while colors.exists(ind) loop + raise NOTICE '%:%', ind, colors(ind); + ind := colors.next(ind); + end loop; +end; +/ + +call array_interface_p2(); + +-- test array interface exists -- +create or replace procedure array_interface_p2() as +declare + type ta is table of varchar(32) index by integer; + colors ta; -- array[null,'red','orange',null,'green','','blue',null,'indigo','violet',null] + ind varchar2(32); +begin + colors(5) := null; + colors(-3) := 'orange'; + colors(-5) := null; + colors(-1) := 'green'; + colors(0) := ''; + colors(3) := 'indigo'; + colors(1) := 'blue'; + colors(-2) := null; + colors(2) := null; + colors(-4) := 'red'; + colors(4) := 'violet'; + + raise NOTICE '--------------------colors--------------------------'; + raise NOTICE '%', colors; + ind := colors.first; + raise NOTICE '%', ind; + while colors.exists(ind) loop + raise NOTICE '%:%', ind, colors(ind); + ind := colors.next(ind); + end loop; +end; +/ + +call array_interface_p2(); + +-- test array interface exists -- +create or replace procedure array_interface_p3() as +declare + type ta is table of integer index by varchar; + colors ta; -- array[1,2,'',3,4,null,5,6,7,8,9] + ind varchar2(32); +begin + colors('a') := 1; + colors('ab') := 2; + colors('ba') := ''; + colors('bab') := 3; + colors('bb') := 4; + colors('bc') := null; + colors('ca') := 5; + colors('cb') := 6; + colors('cab') := 7; + colors('cba') := 8; + colors('cbb') := 9; + raise NOTICE '--------------------colors--------------------------'; + raise NOTICE '%', colors; + ind := colors.first; + raise NOTICE '%', ind; + while colors.exists(ind) loop + raise NOTICE '%:%', ind, colors[ind]; + raise NOTICE '%', colors.exists(ind); + ind := colors.next(ind); + end loop; +end; +/ + +call array_interface_p3(); + +-- test array interface first and last -- +create or replace procedure array_interface_p4() as +declare + type ta is table of varchar(32) index by varchar; + type tb is table of integer index by varchar; + colors1 ta; -- array['red','orange',null,'green','','blue'] + colors2 ta; -- array['red','orange',null,'green','blue',null] + colors3 ta; -- array[null,'red','orange',null,'green','blue'] + colors4 tb; -- array[null,1,2,3,4,null,5,6,7,8,null,''] +begin + colors1('123') := 'red'; + colors1('132') := 'orange'; + colors1('213') := null; + colors1('231') := 'green'; + colors1('312') := ''; + colors1('321') := 'blue'; + raise NOTICE '---------colors1---------'; + raise NOTICE '%', colors1; + raise NOTICE 'colors1 first number: %', colors1.first; + raise NOTICE 'colors1 first: %', colors1[colors1.first]; + raise NOTICE 'colors1 last number: %', colors1.last; + raise NOTICE 'colors1 last: %', colors1[colors1.last]; + + colors2('abc') := 'red'; + colors2('acb') := 'orange'; + colors2('bac') := null; + colors2('bca') := 'green'; + colors2('cab') := 'blue'; + colors2('cba') := null; + raise NOTICE '---------colors2---------'; + raise NOTICE '%', colors2; + raise NOTICE 'colors2 first number: %', colors2.first; + raise NOTICE 'colors2 first: %', colors2[colors2.first]; + raise NOTICE 'colors2 last number: %', colors2.last; + raise NOTICE 'colors2 last: %', colors2[colors2.last]; + + colors3('a1') := null; + colors3('a2') := 'red'; + colors3('b1') := 'orange'; + colors3('ba') := null; + colors3('b2') := 'green'; + colors3('a0') := 'blue'; + raise NOTICE '---------colors3---------'; + raise NOTICE '%', colors3; + raise NOTICE 'colors3 first number: %', colors3.first; + raise NOTICE 'colors3 first: %', colors3[colors3.first]; + raise NOTICE 'colors3 last number: %', colors3.last; + raise NOTICE 'colors3 last: %', colors3[colors3.last]; + + colors4('a312') := null; + colors4('a123') := 1; + colors4('b1') := 2; + colors4('ba') := 3; + colors4('b0') := 4; + colors4('a0') := null; + colors4('b1') := 5; + colors4('bc') := 6; + colors4('bb') := 7; + colors4('c1') := 8; + colors4('ca') := null; + colors4('cb') := ''; + raise NOTICE '---------colors4---------'; + raise NOTICE '%', colors4; + raise NOTICE 'colors4 first number: %', colors4.first; + raise NOTICE 'colors4 first: %', colors4[colors4.first]; + raise NOTICE 'colors4 last number: %', colors4.last; + raise NOTICE 'colors4 last: %', colors4[colors4.last]; +end; +/ + +call array_interface_p4(); + +-- test array interface first and last -- +create or replace procedure array_interface_p4() as +declare + type ta is table of varchar(32) index by integer; + colors1 ta; -- array['red','orange',null,'green','','blue'] +begin + colors1(132) := 'orange'; + colors1(321) := 'blue'; + colors1(213) := null; + colors1(123) := 'red'; + colors1(231) := 'green'; + colors1(312) := ''; + + raise NOTICE '---------colors1---------'; + raise NOTICE '%', colors1; + raise NOTICE 'colors1 first number: %', colors1.first; + raise NOTICE 'colors1 first: %', colors1[colors1.first]; + raise NOTICE 'colors1 last number: %', colors1.last; + raise NOTICE 'colors1 last: %', colors1[colors1.last]; +end; +/ + +call array_interface_p4(); + +-- next&prior +create or replace procedure array_interface_p5() as +declare + type ta is table of varchar(32) index by varchar; + type tb is table of integer index by varchar; + colors1 ta; -- array['red','orange',null,'green','blue','','indigo','violet'] + colors2 tb; -- array[1,2,3,null,4,5,6,'',7,8] + ind varchar2(32); + tmp varchar2(32); +begin + colors1('1') := 'red'; + colors1('2') := 'orange'; + colors1('3') := null; + colors1('4') := 'green'; + colors1('5') := 'blue'; + colors1('6') := ''; + colors1('7') := 'indigo'; + colors1('8') := 'violet'; + raise NOTICE '--------------------colors1---------------------'; + raise NOTICE '%', colors1; + ind := colors1.first; + while colors1.exists(ind) loop + raise NOTICE 'current is: %', colors1[ind]; + raise NOTICE 'next index is: %', colors1.next(ind); + tmp := colors1.next(ind); + if tmp is null then + raise NOTICE 'next element is: %', tmp; + else + raise NOTICE 'next element is: %', colors1[tmp]; + end if; + raise NOTICE 'prior index is: %', colors1.prior(ind); + tmp := colors1.prior(ind); + if tmp is null then + raise NOTICE 'prior element is: %', tmp; + else + raise NOTICE 'prior element is: %', colors1[tmp]; + end if; + raise NOTICE '-------slash-------'; + ind := colors1.next(ind); + end loop; + + colors1('a') := 1; + colors1('b') := 2; + colors1('c') := 3; + colors1('d') := null; + colors1('e') := 4; + colors1('f') := 5; + colors1('g') := 6; + colors1('h') := ''; + colors1('i') := 7; + colors1('j') := 8; + raise NOTICE '--------------------colors2---------------------'; + raise NOTICE '%', colors2; + ind := colors1.first; + while colors1.exists(ind) loop + raise NOTICE 'current is: %', colors2[ind]; + raise NOTICE 'next index is: %', colors2.next(ind); + raise NOTICE 'next element is: %', colors2[colors2.next(ind)]; + raise NOTICE 'prior index is: %', colors2.prior(ind); + raise NOTICE 'prior element is: %', colors2[colors2.prior(ind)]; + raise NOTICE '-----------'; + ind := colors1.next(ind); + end loop; +end; +/ + +call array_interface_p5(); + +create or replace procedure array_interface_p5() as +declare + type ta is table of varchar(32) index by integer; + colors1 ta; -- array['red','orange',null,'green','blue','','indigo','violet'] + ind varchar2(32); + tmp varchar2(32); +begin + colors1(-15) := 'red'; + colors1(-8) := 'orange'; + colors1(-1) := null; + colors1(0) := 'green'; + colors1(10) := 'blue'; + colors1(24) := ''; + colors1(45) := 'indigo'; + colors1(50) := 'violet'; + raise NOTICE '--------------------colors1---------------------'; + raise NOTICE '%', colors1; + ind := colors1.first; + while colors1.exists(ind) loop + raise NOTICE 'current is: %', colors1[ind]; + raise NOTICE 'next index is: %', colors1.next(ind); + tmp := colors1.next(ind); + if tmp is null then + raise NOTICE 'next element is: %', tmp; + else + raise NOTICE 'next element is: %', colors1[tmp]; + end if; + raise NOTICE 'prior index is: %', colors1.prior(ind); + tmp := colors1.prior(ind); + if tmp is null then + raise NOTICE 'prior element is: %', tmp; + else + raise NOTICE 'prior element is: %', colors1[tmp]; + end if; + raise NOTICE '-------slash-------'; + ind := colors1.next(ind); + end loop; +end; +/ + +call array_interface_p5(); + +-- test empty array exists interface return +create or replace procedure array_interface_p6() as +declare + type ta is table of varchar(32) index by varchar; + type tb is table of integer index by varchar; + colors1 ta := array[]::varchar[]; + colors2 tb := array[]::integer[]; + vi varchar2(32); +begin + raise NOTICE 'colors1 is %', colors1; + raise NOTICE 'colors1 length is %', colors1.count; + raise NOTICE 'colors1 first is %', colors1.first; + raise NOTICE 'colors1 last is %', colors1.last; + raise NOTICE 'colors2 is %', colors2; + raise NOTICE 'colors2 length is %', colors2.count; + raise NOTICE 'colors2 first is %', colors2.first; + raise NOTICE 'colors2 last is %', colors2.last; + vi := 111; + raise NOTICE 'colors1[%] exists return %', vi, colors1.exists(vi); + vi := '1'; + raise NOTICE 'colors1["%"] exists return %', vi, colors1.exists(vi); + vi := 123432; + raise NOTICE 'colors2[%] exists return %', vi, colors2.exists(vi); + vi := '43243442'; + raise NOTICE 'colors2["%"] exists return %', vi, colors2.exists(vi); +end; +/ + +call array_interface_p6(); + +-- test empty array exists interface return +create or replace procedure array_interface_p6() as +declare + type ta is table of varchar(32) index by integer; + type tb is table of integer index by integer; + colors1 ta := array[]::varchar[]; + colors2 tb := array[]::integer[]; + vi varchar2(32); +begin + raise NOTICE 'colors1 is %', colors1; + raise NOTICE 'colors1 length is %', colors1.count; + raise NOTICE 'colors1 first is %', colors1.first; + raise NOTICE 'colors1 last is %', colors1.last; + raise NOTICE 'colors2 is %', colors2; + raise NOTICE 'colors2 length is %', colors2.count; + raise NOTICE 'colors2 first is %', colors2.first; + raise NOTICE 'colors2 last is %', colors2.last; + vi := 111; + raise NOTICE 'colors1[%] exists return %', vi, colors1.exists(vi); + vi := '1'; + raise NOTICE 'colors1["%"] exists return %', vi, colors1.exists(vi); + vi := 123432; + raise NOTICE 'colors2[%] exists return %', vi, colors2.exists(vi); + vi := '43243442'; + raise NOTICE 'colors2["%"] exists return %', vi, colors2.exists(vi); +end; +/ + +call array_interface_p6(); + +-- test array exists interface A.B input parameter +create or replace procedure array_interface_p7() as +declare + type ta is table of varchar(32) index by varchar; + v_a ta := array[]::varchar2[]; +begin + raise NOTICE 'v_a is %', v_a; + for rec in (select generate_series(1,10) x) loop + if v_a.exists(rec.x) then + raise NOTICE 'v_a[%] is exist', rec.x; + else + raise NOTICE 'v_a[%] is not exist', rec.x; + end if; + end loop; + for i in 1 .. 10 loop + v_a(i) := i; + end loop; + raise NOTICE 'v_a is %', v_a; + for rec in (select generate_series(1,10) x) loop + if v_a.exists(rec.x) then + raise NOTICE 'v_a[%] is exist', rec.x; + else + raise NOTICE 'v_a[%] is not exist', rec.x; + end if; + end loop; +end; +/ + +call array_interface_p7(); + +-- test array exists interface A.B input parameter +create or replace procedure array_interface_p7() as +declare + type ta is table of varchar(32) index by integer; + v_a ta := array[]::varchar2[]; +begin + raise NOTICE 'v_a is %', v_a; + for rec in (select generate_series(1,10) x) loop + if v_a.exists(rec.x) then + raise NOTICE 'v_a[%] is exist', rec.x; + else + raise NOTICE 'v_a[%] is not exist', rec.x; + end if; + end loop; + for i in 1 .. 10 loop + v_a(i) := i; + end loop; + raise NOTICE 'v_a is %', v_a; + for rec in (select generate_series(1,10) x) loop + if v_a.exists(rec.x) then + raise NOTICE 'v_a[%] is exist', rec.x; + else + raise NOTICE 'v_a[%] is not exist', rec.x; + end if; + end loop; +end; +/ + +call array_interface_p7(); + +create or replace procedure array_interface_p8() as +declare + type ta is table of varchar(32) index by varchar; + colors ta; +begin + -- colors := array['red','orange','yellow','green','blue','indigo','violet','c8','c9','c10','c11','c12','c13','c14','c15']; + colors('0') := 'red'; + colors('1') := 'orange'; + colors('2') := 'yellow'; + colors('3') := 'green'; + colors('4') := 'blue'; + colors('5') := 'indigo'; + colors('6') := 'violet'; + colors('7') := 'c8'; + colors('8') := 'c9'; + colors('9') := 'c10'; + colors('10') := 'c11'; + colors('11') := 'c12'; + colors('12') := 'c13'; + colors('13') := 'c14'; + colors('14') := 'c15'; + if colors.exists(1+1) then + raise NOTICE 'array exist, element is %', colors[1+1]; + else + raise NOTICE 'array not exist'; + end if; + if colors.exists('1' || '2') then + raise NOTICE 'array exist, element is %', colors['1'||'2']; + else + raise NOTICE 'array not exist'; + end if; +end; +/ + +call array_interface_p8(); + +create or replace procedure array_interface_p8() as +declare + type ta is table of varchar(32) index by integer; + colors ta; +begin + -- colors := array['red','orange','yellow','green','blue','indigo','violet','c8','c9','c10','c11','c12','c13','c14','c15']; + colors(0) := 'red'; + colors(1) := 'orange'; + colors(2) := 'yellow'; + colors(3) := 'green'; + colors(4) := 'blue'; + colors(5) := 'indigo'; + colors(6) := 'violet'; + colors(7) := 'c8'; + colors(8) := 'c9'; + colors(9) := 'c10'; + colors(10) := 'c11'; + colors(11) := 'c12'; + colors(12) := 'c13'; + colors(13) := 'c14'; + colors(14) := 'c15'; + if colors.exists(1+1) then + raise NOTICE 'array exist, element is %', colors[1+1]; + else + raise NOTICE 'array not exist'; + end if; + if colors.exists('1' || '2') then + raise NOTICE 'array exist, element is %', colors['1'||'2']; + else + raise NOTICE 'array not exist'; + end if; +end; +/ + +call array_interface_p8(); + +create or replace procedure array_interface_p9() as +declare + type ta is table of varchar(32) index by varchar; + colors ta; +begin + -- colors := array['red','orange','yellow','green','blue','indigo','violet']; + colors('1') := 'red'; + colors('2') := 'orange'; + colors('3') := 'yellow'; + colors('4') := 'green'; + colors('5') := 'blue'; + colors('6') := 'indigo'; + colors('7') := 'violet'; + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + raise NOTICE '%', colors.count(); + raise NOTICE '%', colors.first; + raise NOTICE '%', colors.first(); + raise NOTICE '%', colors.last; + raise NOTICE '%', colors.last(); + for i in colors.first .. colors.last loop + raise NOTICE '%', colors[i]; + end loop; + for i in 1 .. colors.count loop + raise NOTICE '%', colors[i]; + end loop; + for i in colors.first() .. colors.last() loop + raise NOTICE '%', colors[i]; + end loop; + for i in 1 .. colors.count() loop + raise NOTICE '%', colors[i]; + end loop; + colors.delete('7'); + raise NOTICE '%', colors; + colors.delete('1'); + raise NOTICE '%', colors; + colors.delete('13424'); + raise NOTICE '%', colors; + colors.delete(); + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + raise NOTICE '%', colors.first; + raise NOTICE '%', colors.last; + raise NOTICE '%', colors.next('1'); + raise NOTICE '%', colors.prior('1'); + raise NOTICE '%', colors; + colors.delete('1'); + raise NOTICE '%', colors; +end; +/ + +call array_interface_p9(); + +create or replace procedure array_interface_p9() as +declare + type ta is table of varchar(32) index by integer; + colors ta; +begin + -- colors := array['red','orange','yellow','green','blue','indigo','violet']; + colors(1) := 'red'; + colors(2) := 'orange'; + colors(3) := 'yellow'; + colors(4) := 'green'; + colors(5) := 'blue'; + colors(6) := 'indigo'; + colors(7) := 'violet'; + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + raise NOTICE '%', colors.count(); + raise NOTICE '%', colors.first; + raise NOTICE '%', colors.first(); + raise NOTICE '%', colors.last; + raise NOTICE '%', colors.last(); + for i in colors.first .. colors.last loop + raise NOTICE '%', colors[i]; + end loop; + for i in 1 .. colors.count loop + raise NOTICE '%', colors[i]; + end loop; + for i in colors.first() .. colors.last() loop + raise NOTICE '%', colors[i]; + end loop; + for i in 1 .. colors.count() loop + raise NOTICE '%', colors[i]; + end loop; + colors.delete(7); + raise NOTICE '%', colors; + colors.delete(1); + raise NOTICE '%', colors; + colors.delete(13424); + raise NOTICE '%', colors; + colors.delete(); + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + raise NOTICE '%', colors.first; + raise NOTICE '%', colors.last; + raise NOTICE '%', colors.next(1); + raise NOTICE '%', colors.prior(1); + raise NOTICE '%', colors; + colors.delete(1); + raise NOTICE '%', colors; +end; +/ + +call array_interface_p9(); + +declare +type ta is table of varchar2(10) index by varchar2; +va ta; +var varchar(10); +begin +va('a1') = 'a'; +va('a2') = 'b'; +va('a3') = 'c'; +va('aaa') = 'd'; +var = 'a'; +raise notice '%' , va.exists('a'||'2'); +raise notice '%' , va.exists('a'||'4'); +if va.exists('a'|| var ||'a') then +raise NOTICE 'aaa exists'; +else +raise NOTICE 'not exists'; +end if; +raise notice '%' , va.next('a'||'2'); +raise notice '%' , va.prior('a'||'2'); +raise notice '%' , va(va.prior('a'||'2')); +raise notice '%' , va(va.first()); +raise notice '%' , va(va.last()); +end; +/ + +declare +type ta is table of varchar2(10) index by integer; +va ta; +var varchar(10); +begin +va(11) = 'a'; +va(12) = 'b'; +va(13) = 'c'; +va('111') = 'd'; +var = '1'; +raise notice '%' , va.exists('1'||'2'); +raise notice '%' , va.exists('1'||'4'); +if va.exists('1'|| var ||'1') then +raise NOTICE '111 exists'; +else +raise NOTICE 'not exists'; +end if; +raise notice '%' , va.next('1'||'2'); +raise notice '%' , va.prior('1'||'2'); +raise notice '%' , va(va.prior('1'||'2')); +raise notice '%' , va(va.first()); +raise notice '%' , va(va.last()); +end; +/ + +declare + type t_arr is table of number index by varchar2(20); + v_arr t_arr; +begin + if v_arr.exists('1'||'12')=false then + raise info 'not exists'; + end if; +end; +/ + +declare + type t_arr is table of number index by integer; + v_arr t_arr; +begin + if v_arr.exists(12)=false then + raise info 'not exists'; + end if; +end; +/ + +declare + type t_arr is varray(10) of number; + v_arr t_arr; +begin + if v_arr.exists(12)=false then + raise info 'not exists'; + end if; +end; +/ + +create or replace procedure indexbychar1() +as +type ta is table of varchar2(10) index by varchar2; +va ta; +var varchar(10); +begin +va('a1') = 'a'; +va('a2') = 'b'; +va('a3') = 'c'; +va('aaa') = 'd'; +var = 'a'; +raise notice '%' , va.exists('a'||'2'); +raise notice '%' , va.exists('a'||'4'); +if va.exists('a'|| var ||'a') then +raise NOTICE 'aaa exists'; +else +raise NOTICE 'not exists'; +end if; +raise notice '%' , va.next('a'||'2'); +raise notice '%' , va.prior('a'||'2'); +raise notice '%' , va(va.prior('a'||'2')); +raise notice '%' , va(va.first()); +raise notice '%' , va(va.last()); +raise notice '%' , va.count; +va.delete(); +raise notice '%' , va.count; +raise notice '%' , va(va.last()); +exception when others then +raise notice 'delete all the array items'; +for i in 1..10 loop +va(i) := 'va'||(i::varchar(2)); +END LOOP; +raise notice '%', va.COUNT; +raise notice '%', va(1); +raise notice '%', va(10); +raise notice 'first%', va.FIRST; +raise notice 'last%', va.LAST; +raise notice '%', va; +raise notice '%', va(va.LAST); +raise notice '%', va(va.NEXT(va.FIRST)); +raise notice '%', va(va.PRIOR(va.LAST)); +end; +/ + +call indexbychar1(); + +create table pkgtbl085 (c1 int,c2 number,c3 varchar2(30),c4 clob,c5 text,c6 blob); +insert into pkgtbl085 values(1,1,'var1','clob1','text1','bb1'); +insert into pkgtbl085 values(2,2,'var2','clob2','text2','bb2'); +insert into pkgtbl085 values(3,3,'var3','clob3','text3','bb3'); +--I2.table of index by varchar2(20) +create or replace package pkg085 +as +type ty1 is table of varchar2(20) index by varchar2(20); +type ty2 is record (c1 number,c2 pkgtbl085%rowtype); +procedure p1(); +end pkg085; +/ + +create or replace package body pkg085 +as +procedure p1() +is +va ty2; +vb ty1; +numcount int; +begin +for i in 1..3 loop +select c3 into va.c2.c3 from pkgtbl085 where c1=i; +if va.c2.c3 is not null then +vb(va.c2.c3)=va.c2.c3; +end if; +end loop; +raise info 'vb is %',vb; +raise info 'vb.count is %',vb.count; +raise info 'vb(va.c2,c3) is %',vb(va.c2.c3); +raise info 'va.c2.c3 is %',va.c2.c3; +raise info 'vb.prior(va.c2.c3) is %',vb.prior(va.c2.c3); +raise info 'vb.prior(var3) is %',vb.prior('var3'); +va.c2.c3='var1'; +raise info 'vb.next(va.c2.c3) is %',vb.next(va.c2.c3); +raise info 'vb.exists(va.c2.c3) is %',vb.exists(va.c2.c3); +if vb.exists(va.c2.c3) then +raise notice 'true'; +end if; +end; +end pkg085; +/ + +call pkg085.p1(); +drop package pkg085; +drop table pkgtbl085; + +create or replace procedure array_interface_p10() as +declare + type ta is table of varchar(32) index by varchar; + c1 ta; + c2 ta; +begin + if c1.first = c2.first then + null; + end if; +end; +/ + +create or replace procedure array_interface_p10() as +declare + type ta is table of varchar(32) index by varchar; + c1 ta; + c2 ta; +begin + raise info '%', c1.next(c2.first); +end; +/ + +create or replace procedure tableof_delete_1() +is +type ty4 is table of integer index by varchar; +pv4 ty4; +begin +pv4('1'):=2; +pv4('-1'):=-1; +pv4.delete('-1'); +raise info '%', pv4('1'); +raise info '%', pv4('-1'); +raise info '%', pv4; +end; +/ + +call tableof_delete_1(); + +create or replace procedure tableof_delete_2() +is +type ty4 is table of integer index by integer; +pv4 ty4; +begin +pv4(1):=2; +pv4(-1):=-1; +pv4.delete(-1); +raise info '%', pv4(1); +raise info '%', pv4(-1); +raise info '%', pv4; +end; +/ + +call tableof_delete_2(); + +create or replace procedure tableof_delete_3() +is +type ty4 is varray(10) of integer; +pv4 ty4; +begin +pv4(1):=2; +pv4(-1):=-1; +pv4(-2):=-2; +raise info '%', pv4; +pv4.delete(-1); +raise info '%', pv4(1); +raise info '%', pv4(-1); +raise info '%', pv4; +end; +/ + +call tableof_delete_3(); + +create or replace procedure tableof_delete_4() +is +type ty4 is varray(10) of integer; +pv4 ty4; +begin +pv4(4):=2; +pv4(3):=-1; +pv4(2):=-2; +raise info '%', pv4; +pv4.delete(3); +raise info '%', pv4(4); +raise info '%', pv4; +end; +/ + +call tableof_delete_4(); + +create or replace procedure tableof_delete_5() +is +type ty4 is varray(10) of integer; +pv4 ty4; +a integer; +begin +a = 1; +pv4(1):=2; +pv4(-1):=-1; +pv4(-2):=-2; +raise info '%', pv4; +pv4.delete(-a); +raise info '%', pv4(1); +raise info '%', pv4(-1); +raise info '%', pv4; +end; +/ + +call tableof_delete_5(); + +-- clean up -- +drop schema if exists plpgsql_array_interface_indexby cascade; diff --git a/src/test/regress/sql/arrayinterface_single.sql b/src/test/regress/sql/arrayinterface_single.sql new file mode 100644 index 000000000..efa5a0468 --- /dev/null +++ b/src/test/regress/sql/arrayinterface_single.sql @@ -0,0 +1,374 @@ +-- FOR VARRAY INTERFACE -- + +-- check compatibility -- +show sql_compatibility; -- expect A -- + +-- create new schema -- +drop schema if exists plpgsql_array_interface; +create schema plpgsql_array_interface; +set current_schema = plpgsql_array_interface; + +-- test array interface count -- +create or replace procedure array_interface_p1() +as +declare + colors varchar[] := array['red','orange',null,'','green','blue','indigo','violet']; + colors1 int[] := array[1,2,null,3,'',4,null,5,6,null,null,7,8]; + colors2 varchar[] := array['red','orange','null','green','blue',null,'indigo','violet']; +begin + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + raise NOTICE '%', colors2; + raise NOTICE '%', colors2.count; + raise NOTICE '%', colors1; + raise NOTICE '%', colors1.count; +end; +/ + +call array_interface_p1(); + +-- test array interface count -- +create or replace procedure array_interface_p2() +as +declare + colors varchar[] := array['red','orange','green','blue','indigo','violet']; + colors1 int[] := array[1,2,3,4,5,6,7,8]; +begin + raise NOTICE '%', colors; + colors[1] := null; + colors[4] := null; + colors[6] := ''; + raise NOTICE '%', colors; + raise NOTICE '%', colors1; + colors1[1] := null; + colors1[4] := null; + colors1[6] := ''; + raise NOTICE '%', colors1; +end; +/ + +call array_interface_p2(); + +-- test array interface exists -- +create or replace procedure array_interface_p3() +as +declare + colors varchar[] := array[null,'red','orange',null,'green','','blue',null,'indigo','violet',null]; + ind int := 1; + colors1 varchar[] := array['null','red','orange',null,'green','blue',null,'indigo','violet',null]; +begin + raise NOTICE '--------------------colors--------------------------'; + raise NOTICE '%', colors; + for ind in 1..colors.last + loop + raise NOTICE '%', colors[ind]; + raise NOTICE '%', colors.exists(ind); + if colors.exists(ind) then + raise NOTICE ' exists'; + else + raise NOTICE ' not exists'; + end if; + raise NOTICE '----------------'; + end loop; + + raise NOTICE '--------------------colors1--------------------------'; + raise NOTICE '%', colors1; + for ind in 1 .. colors1.last + loop + raise NOTICE '%', colors1[ind]; + raise NOTICE '%', colors1.exists(ind); + if colors1.exists(ind) then + raise NOTICE ' exists'; + else + raise NOTICE ' not exists'; + end if; + raise NOTICE '----------------'; + end loop; +end; +/ + +call array_interface_p3(); + +-- test array interface exists -- +create or replace procedure array_interface_p4() +as +declare + colors int[] := array[1,2,'',3,4,null,5,6,7,8]; + ind int := 1; + colors1 int[] := array[null,1,2,3,4,null,5,6,'',7,8,null]; +begin + raise NOTICE '--------------------colors--------------------------'; + raise NOTICE '%', colors; + for ind in 1 .. colors.last + loop + raise NOTICE '%', colors[ind]; + raise NOTICE '%', colors.exists(ind); + if colors.exists(ind) then + raise NOTICE ' exists'; + else + raise NOTICE ' not exists'; + end if; + raise NOTICE '----------------'; + end loop; + + raise NOTICE '--------------------colors1--------------------------'; + raise NOTICE '%', colors1; + for ind in 1 .. colors1.last + loop + raise NOTICE '%', colors1[ind]; + raise NOTICE '%', colors1.exists(ind); + if colors1.exists(ind) then + raise NOTICE ' exists'; + else + raise NOTICE ' not exists'; + end if; + raise NOTICE '----------------'; + end loop; +end; +/ + +call array_interface_p4(); + +-- test array interface first and last -- +create or replace procedure array_interface_p5() +as +declare + colors1 varchar[] := array['red','orange',null,'green','','blue']; + colors2 varchar[] := array['red','orange',null,'green','blue',null]; + colors3 varchar[] := array[null,'red','orange',null,'green','blue']; + colors4 int[] := array[null,1,2,3,4,null,5,6,7,8,null,'']; +begin + raise NOTICE '---------colors1---------'; + raise NOTICE '%', colors1; + raise NOTICE 'colors1 first number: %', colors1.first; + raise NOTICE 'colors1 first: %', colors1[colors1.first]; + raise NOTICE 'colors1 last number: %', colors1.last; + raise NOTICE 'colors1 last: %', colors1[colors1.last]; + + raise NOTICE '---------colors2---------'; + raise NOTICE '%', colors2; + raise NOTICE 'colors2 first number: %', colors2.first; + raise NOTICE 'colors2 first: %', colors2[colors2.first]; + raise NOTICE 'colors2 last number: %', colors2.last; + raise NOTICE 'colors2 last: %', colors2[colors2.last]; + + raise NOTICE '---------colors3---------'; + raise NOTICE '%', colors3; + raise NOTICE 'colors3 first number: %', colors3.first; + raise NOTICE 'colors3 first: %', colors3[colors3.first]; + raise NOTICE 'colors3 last number: %', colors3.last; + raise NOTICE 'colors3 last: %', colors3[colors3.last]; + + raise NOTICE '---------colors4---------'; + raise NOTICE '%', colors4; + raise NOTICE 'colors4 first number: %', colors4.first; + raise NOTICE 'colors4 first: %', colors4[colors4.first]; + raise NOTICE 'colors4 last number: %', colors4.last; + raise NOTICE 'colors4 last: %', colors4[colors4.last]; +end; +/ + +call array_interface_p5(); + +-- next&prior +create or replace procedure array_interface_p6() +as +declare + colors1 varchar[] := array['red','orange',null,'green','blue','','indigo','violet']; + colors2 int[]:=array[1,2,3,null,4,5,6,'',7,8]; + colors3 int[]:=array[null,1,2,3,null,4,5,'',6,7,8,null]; + ind int := 1; +begin + raise NOTICE '--------------------colors1---------------------'; + raise NOTICE '%', colors1; + for ind in 1 .. colors1.last + loop + raise NOTICE 'current is: %', colors1[ind]; + raise NOTICE 'next number is: %', colors1.next(ind); + raise NOTICE 'next is: %', colors1[colors1.next(ind)]; + raise NOTICE 'prior number is: %', colors1.prior(ind); + raise NOTICE 'prior is: %', colors1[colors1.prior(ind)]; + raise NOTICE '-------'; + end loop; + + raise NOTICE '--------------------colors2---------------------'; + raise NOTICE '%', colors2; + for ind in 1 .. colors2.last + loop + raise NOTICE 'current is: %', colors2[ind]; + raise NOTICE 'next number is: %', colors2.next(ind); + raise NOTICE 'next is: %', colors2[colors2.next(ind)]; + raise NOTICE 'prior number is: %', colors2.prior(ind); + raise NOTICE 'prior is: %', colors2[colors2.prior(ind)]; + raise NOTICE '-------'; + end loop; + raise NOTICE '--------------------colors3---------------------'; + raise NOTICE '%', colors3; + for ind in 1 .. colors3.last + loop + raise NOTICE 'current is: %', colors3[ind]; + raise NOTICE 'next number is: %', colors3.next(ind); + raise NOTICE 'next is: %', colors3[colors3.next(ind)]; + raise NOTICE 'prior number is: %', colors3.prior(ind); + raise NOTICE 'prior is: %', colors3[colors3.prior(ind)]; + raise NOTICE '-------'; + end loop; +end; +/ + +call array_interface_p6(); + +-- test empty array exists interface return +create or replace procedure array_interface_p7() +as +declare + colors1 varchar[] := array[]::varchar[]; + colors2 integer[]:= array[]::integer[]; + vi varchar2(32); +begin + raise NOTICE 'colors1 is %', colors1; + raise NOTICE 'colors2 is %', colors2; + vi := 111; + raise NOTICE 'colors1[%] exists return %', vi, colors1.exists(vi); + vi := '1'; + raise NOTICE 'colors1["%"] exists return %', vi, colors1.exists(vi); + vi := 123432; + raise NOTICE 'colors2[%] exists return %', vi, colors2.exists(vi); + vi := '43243442'; + raise NOTICE 'colors2["%"] exists return %', vi, colors2.exists(vi); +end; +/ + +call array_interface_p7(); + +-- test array exists interface string input parameter +create or replace procedure array_interface_p8() +as +declare + colors1 varchar2[] := array['11', '12', '13']; + line varchar[]:=array['--------------------------------']; + chk boolean := false; +begin + raise NOTICE'%', colors1; + chk := colors.exists(2); + raise NOTICE'check exists return %', chk; +end; +/ + +--call array_interface_p8(); + +-- test array exists interface A.B input parameter +create or replace procedure array_interface_p9() +as +declare + v_a varchar2[] := array[]::varchar2[]; +begin + raise NOTICE 'v_a is %', v_a; + for rec in (select generate_series(1,10) x) loop + if v_a.exists(rec.x) then + raise NOTICE 'v_a[%] is exist', rec.x; + else + raise NOTICE 'v_a[%] is not exist', rec.x; + end if; + end loop; + v_a.extend(10); + for i in 1 .. 10 loop + v_a(i) := i; + end loop; + raise NOTICE 'v_a is %', v_a; + for rec in (select generate_series(1,10) x) loop + if v_a.exists(rec.x) then + raise NOTICE 'v_a[%] is exist', rec.x; + else + raise NOTICE 'v_a[%] is not exist', rec.x; + end if; + end loop; +end; +/ + +call array_interface_p9(); + +create or replace procedure array_interface_p10() as +declare + colors varchar2[]; +begin + colors := array['red','orange','yellow','green','blue','indigo','violet','c8','c9','c10','c11','c12','c13','c14','c15']; + if colors.exists(1+1) then + raise NOTICE 'array exist, element is %', colors[1+1]; + else + raise NOTICE 'array not exist'; + end if; + if colors.exists('1' || '2') then + raise NOTICE 'array exist, element is %', colors['1'||'2']; + else + raise NOTICE 'array not exist'; + end if; +end; +/ + +call array_interface_p10(); + +create or replace procedure array_interface_p11() as +declare + colors varchar2[]; +begin + colors := array['red','orange','yellow','green','blue','indigo','violet','c8','c9','c10','c11','c12','c13','c14','c15']; + if colors.exists(1+1) then + raise NOTICE 'array exist, element is %', colors[1+1]; + else + raise NOTICE 'array not exist'; + end if; + if colors.exists('1'||'2') then + raise NOTICE 'array exist, element is %', colors['1'||'2']; + else + raise NOTICE 'array not exist'; + end if; +end; +/ + +call array_interface_p11(); + +create or replace procedure array_interface_p12() as +declare + colors varchar2[]; +begin + colors := array['red','orange','yellow','green','blue','indigo','violet']; + raise NOTICE '%', colors; + raise NOTICE '%', colors.count; + raise NOTICE '%', colors.count(); + raise NOTICE '%', colors.first; + raise NOTICE '%', colors.first(); + raise NOTICE '%', colors.last; + raise NOTICE '%', colors.last(); + for i in colors.first .. colors.last loop + raise NOTICE '%', colors[i]; + end loop; + for i in 1 .. colors.count loop + raise NOTICE '%', colors[i]; + end loop; + for i in colors.first() .. colors.last() loop + raise NOTICE '%', colors[i]; + end loop; + for i in 1 .. colors.count() loop + raise NOTICE '%', colors[i]; + end loop; + colors.extend; + raise NOTICE '%', colors; + colors.extend(); + raise NOTICE '%', colors; + colors.extend(2); + raise NOTICE '%', colors; + colors.delete; + raise NOTICE '%', colors; + colors.extend(); + raise NOTICE '%', colors; + colors.delete(); + raise NOTICE '%', colors; +end; +/ + +call array_interface_p12(); + + +-- clean up -- +drop schema if exists plpgsql_array_interface cascade; diff --git a/src/test/regress/sql/arrayinterface_ted.sql b/src/test/regress/sql/arrayinterface_ted.sql new file mode 100644 index 000000000..cccc47c8c --- /dev/null +++ b/src/test/regress/sql/arrayinterface_ted.sql @@ -0,0 +1,292 @@ +-- FOR VARRAY INTERFACE -- + +-- check compatibility -- +show sql_compatibility; +-- create new schema -- +drop schema if exists plpgsql_array_interface_ted; +create schema plpgsql_array_interface_ted; +set current_schema = plpgsql_array_interface_ted; + +-- test array interface extend trim and delete -- +create or replace procedure array_interface_p1() +as +declare + colors varchar[] := array['red','orange',null,'blue']; + colors1 varchar[] := array['red','blue']; +begin + raise NOTICE'%',colors; + colors.extend; + colors.extend(1); + raise NOTICE'%',colors; + colors.trim; + colors.trim(1); + raise NOTICE'%',colors; + colors.delete; + raise NOTICE'%',colors; + colors1.delete; + raise NOTICE'%',colors1; +end; +/ + +call array_interface_p1(); + +-- test array interface extend trim and delete -- +create or replace procedure array_interface_p1() +as +declare + colors varchar[] := array['red']; + colors1 varchar[] := array['red','blue']; +begin + raise NOTICE'%',colors; + colors.EXTEND; + colors.EXTEND(3); + raise NOTICE'%',colors; + colors.TRIM; + colors.TRIM(1); + raise NOTICE'%',colors; + colors.DELETE; + raise NOTICE'%',colors; + colors1.DELETE; + raise NOTICE'%',colors1; +end; +/ + +call array_interface_p1(); + +-- test array interface extend mistake format -- +create or replace procedure array_interface_p1() +as +declare + colors varchar[] := array['red']; +begin + raise NOTICE'%',colors; + colors.extend[1]; + raise NOTICE'%',colors; + colors.extend[-1]; + raise NOTICE'%',colors; +end; +/ + +--call array_interface_p1(); + +-- test array interface trim mistake format -- +create or replace procedure array_interface_p1() +as +declare + colors varchar[] := array['red','orange','green','blue','indigo','violet']; +begin + raise NOTICE'%',colors; + colors.trim[1]; + raise NOTICE'%',colors; + colors.trim[-1]; + raise NOTICE'%',colors; +end; +/ + +--call array_interface_p1(); + +-- test array interface delete mistake format -- +create or replace procedure array_interface_p1() +as +declare + colors varchar[] := array['red','orange','green','blue','indigo','violet']; + colors2 varchar[] := array['red','orange','green','blue','indigo','violet']; + colors3 varchar[] := array['red','orange','green','blue','indigo','violet']; + colors4 varchar[] := array['red','orange','green','blue','indigo','violet']; +begin + raise NOTICE'%',colors; + colors.delete(-1); + raise NOTICE'%',colors; + raise NOTICE'%',colors2; + colors2.delete(1); + raise NOTICE'%',colors2; + raise NOTICE'%',colors3; + colors3.delete(10); + raise NOTICE'%',colors3; + raise NOTICE'%',colors4; + colors4.delete[-1]; + raise NOTICE'%',colors4; +end; +/ + +--call array_interface_p1(); + +-- test array name use special character -- +create or replace procedure array_interface_p1() +as +declare + "!arr#%" varchar[]:=array['red','orange','green','blue','indigo']; + "@*ar&" integer[]:=array[1,0,5,6,8,3,9]; +begin + raise NOTICE '%',"!arr#%"; + "!arr#%".extend(3); + raise NOTICE '%',"!arr#%"; + "!arr#%".trim(3); + raise NOTICE '%',"!arr#%"; + "@*ar&".delete; + raise NOTICE '%',"@*ar&"; +end; +/ + +call array_interface_p1(); + +-- test array name use special character mistake -- +create or replace procedure array_interface_p1() +as +declare + @*ar& integer[]:=array[1,0,5,6,8,3,9]; +begin + raise NOTICE'%',@*ar&; + @*ar&.extend(3); + raise NOTICE'%',@*ar&; +end; +/ + +-- call array_interface_p1(); + +-- test array interface extend with large parameter -- +create or replace procedure array_interface_p1() +as +declare + arr integer[] := array[1,0,5,6,8,3,9]; +begin + raise NOTICE'%',arr; + arr.extend(10000); + raise NOTICE'%',arr.count; +end; +/ + +call array_interface_p1(); + +-- test array interface trim -- +create or replace procedure array_interface_p1() +as +declare + arr integer[] := array[1,0,5]; +begin + raise NOTICE'%',arr; + arr.trim(10); + raise NOTICE'%',arr; + end; +/ + +call array_interface_p1(); + +-- test array interface delete with empty array -- +create or replace procedure array_interface_p1() +as +declare + arr integer[] := array[]::integer[]; +begin + raise NOTICE'%',arr; + arr.delete; + raise NOTICE'%',arr; +end; +/ + +call array_interface_p1(); + +-- test array interface delete missing ; -- +create or replace procedure array_interface_p1() +as +declare + colors varchar[] := array['red','orange','green','blue','indigo','violet']; +begin + raise NOTICE'%',colors; + colors.delete + raise NOTICE'%',colors; +end; +/ + +-- test call array interface of another package +create or replace package pck1 is + type ta is varray(10) of varchar(100); + tb ta := ta('1','2','3', '4', '5'); +end pck1; +/ + +create or replace package pck2 is + procedure proc1; + end pck2; +/ + +create or replace package body pck2 is +procedure proc1() is +begin + raise NOTICE '%',pck1.tb; + raise NOTICE '%',pck1.tb.count; + raise NOTICE '%',pck1.tb.first; + raise NOTICE '%',pck1.tb.last; + raise NOTICE '%',pck1.tb.count(); + raise NOTICE '%',pck1.tb.first(); + raise NOTICE '%',pck1.tb.last(); + for i in pck1.tb.first .. pck1.tb.last + loop + if pck1.tb.exists(i) then + raise NOTICE '%',pck1.tb[i]; + else + raise NOTICE ''; + end if; + end loop; + pck1.tb.extend; + raise NOTICE '%',pck1.tb; + pck1.tb.extend(); + raise NOTICE '%',pck1.tb; + pck1.tb.extend(2); + raise NOTICE '%',pck1.tb; + pck1.tb.trim; + raise NOTICE '%',pck1.tb; + pck1.tb.trim(); + raise NOTICE '%',pck1.tb; + pck1.tb.trim(2); + raise NOTICE '%',pck1.tb; + pck1.tb.delete; + raise NOTICE '%',pck1.tb; + pck1.tb.extend; + raise NOTICE '%',pck1.tb; + pck1.tb.delete(); + raise NOTICE '%',pck1.tb; +end; +end pck2; +/ + +call pck2.proc1(); + +-- test array interface delete with index -- +create or replace procedure array_interface_p1() as +declare + array1 integer[] := array[1,2,3,4,5]; + indx integer; +begin + raise NOTICE '%', array1; + raise NOTICE '%', array1.count; + raise NOTICE '%', array1.first; + raise NOTICE '%', array1.last; + indx := array1.first; + array1.delete(indx); + raise NOTICE '%', array1; + raise NOTICE '%', array1.count; + raise NOTICE '%', array1.first; + raise NOTICE '%', array1.last; + array1 := array[1,2,3,4,5]; + indx := array1.last; + array1.delete(indx); + raise NOTICE '%', array1; + raise NOTICE '%', array1.count; + raise NOTICE '%', array1.first; + raise NOTICE '%', array1.last; + array1 := array[1,2,3,4,5]; + array1.delete(3); + raise NOTICE '%', array1; + raise NOTICE '%', array1.count; + raise NOTICE '%', array1.first; + raise NOTICE '%', array1.last; +end; +/ + +call array_interface_p1(); + +-- clean up -- +drop package if exists pck2; +drop package if exists pck1; +drop schema if exists plpgsql_array_interface_ted cascade; diff --git a/src/test/regress/sql/auto_explain.sql b/src/test/regress/sql/auto_explain.sql new file mode 100644 index 000000000..eb499226b --- /dev/null +++ b/src/test/regress/sql/auto_explain.sql @@ -0,0 +1,139 @@ +set enable_auto_explain = false; +create or replace function data_table returns int as $$ +begin +drop table if exists course; +drop table if exists stu; +drop table if exists teacher; +create table course(cno int,name varchar); +insert into course values(1,'test1'); +insert into course values(2,'test2'); +insert into course values(3,'test2'); +create table stu(sno int, name varchar,sex varchar,cno int); +insert into stu values(1,'zhang','M',1); +insert into stu values(1,'zhang','M',2); +insert into stu values(2,'wangwei','M',2); +insert into stu values(3,'liu','F',3); +create table teacher(tno int,name varchar,sex varchar,cno int); +insert into teacher values(1,'Yang','F',1); +insert into teacher values(2,'zhang','F',2); +insert into teacher values(3,'liu','F',3); +return 1; +end; +$$ +LANGUAGE plpgsql; + +select data_table(); + +CREATE OR REPLACE FUNCTION course_delete_trigger() +RETURNS TRIGGER AS $$ +BEGIN + DELETE FROM teacher where teacher.cno = OLD.cno; + RETURN OLD; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER delete_trigger + AFTER DELETE ON course + FOR EACH ROW EXECUTE PROCEDURE course_delete_trigger(); + +CREATE OR REPLACE FUNCTION courseUpdate() + RETURNS trigger AS $$ + BEGIN + UPDATE teacher SET teacher.cno = NEW.cno where teacher.cno = NEW.cno; + UPDATE student set student.cno = NEW.cno where student.cno = NEW.cno; + END; +$$ +LANGUAGE plpgsql VOLATILE; + +CREATE TRIGGER course_Update AFTER UPDATE OF "cno" ON "public"."course" +FOR EACH ROW +EXECUTE PROCEDURE "public".courseUpdate(); + +create or replace function process_test() returns int as $$ +declare status int; +begin +select complicate_process() into status; +return status; +END +$$ +LANGUAGE plpgsql; + +prepare get_stu_lesson(varchar) as select stu.name,course.name from stu left join course on course.cno = stu.cno where stu.name = $1; +execute get_stu_lesson('liu'); +prepare get_stu_info(varchar) as select stu.name,course.name,teacher.name from stu left join course on course.cno =stu.cno left join teacher on course.cno = teacher.cno where stu.name = $1; + +set auto_explain_level = notice; +set enable_auto_explain = true; +execute get_stu_info(''); +set enable_auto_explain = false; + +create or replace function open_cursor(myCursor OUT REFCURSOR) as $$ +begin +open myCursor for select teacher.name,stu.name from teacher left join course on course.cno = teacher.cno left join stu on stu.cno = course.cno; +END +$$ +LANGUAGE plpgsql; + +create or replace function complicate_process(status out int) as $$ +declare sql varchar; +numbers int; +declare docType varchar:='REISSUE'; +declare v_count1 int; +declare v_count2 int; +declare tt REFCURSOR; +declare teacher_name varchar; +declare stu_name varchar; +begin +status:=0; +if docType = 'REISSUE' then + select count(1) into v_count1 from stu; + select count(2) into v_count2 from teacher; + if v_count1>0 and v_count2>0 then + insert into stu values(4,'liu','F',1); + insert into teacher values(4,'li',4); + end if; +end if; +update teacher set tno =100 where tno = 3; +select open_cursor() into tt; +fetch next from tt into teacher_name,stu_name; +While true +loop + fetch next from tt into teacher_name,stu_name; + if found then + else + Exit ; + end if; +end loop; +status:=1; + +END +$$ +LANGUAGE plpgsql; + +set auto_explain_level = notice; +set enable_auto_explain = true; +select process_test(); +set enable_auto_explain = false; +drop table if exists test1; +create table test1(id number, val number); +insert into test1 values(generate_series(1,1000), generate_series(1,1000)); +create OR REPLACE function test_merge_into() returns int as $$ +declare tt REFCURSOR; +id_val int; +begin +id_val:=103; +merge into test1 t1 using (select count(*) cnt from test1 where id = id_val) t2 on (cnt <> 0) +when matched then update set val = val + 1 where id = id_val when not matched then insert values(id_val, 1); +return 1; +end; +$$ +LANGUAGE plpgsql; +set enable_auto_explain = true; +set auto_explain_level = notice; +select test_merge_into(); +set enable_auto_explain = false; +drop table if exists course; +drop table if exists stu; +drop table if exists teacher; +drop table if exists test1; diff --git a/src/test/regress/sql/autonomous_cursor.sql b/src/test/regress/sql/autonomous_cursor.sql new file mode 100644 index 000000000..e65efb5df --- /dev/null +++ b/src/test/regress/sql/autonomous_cursor.sql @@ -0,0 +1,1543 @@ +-- test for autonomous transaction with out ref cursor param + +create schema pl_auto_ref; +set current_schema to pl_auto_ref; + +-- 1. (a) base use, no commit +create table t1(a int,b number(3),c varchar2(20),d clob,e blob,f text); +insert into t1 values (1,100,'var1','clob1','1234abd1','text1'); +insert into t1 values (2,200,'var2','clob2','1234abd2','text2'); +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- 1. (b) base use, fetch before return +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +fetch c1 into va; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- 2. base use, commit +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +commit; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- 2. (a) base use, commit, and error +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +open c1 for select * from t1; +commit; +vb := 3/0; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- 2. base use, fetch before commit +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +fetch c1 into va; +commit; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- 3. cursor not use +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +null; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- 4. (a) cursor close after open, no commit +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +close c1; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- 4. (b) cursor close after open, commit +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +commit; +close c1; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- 5. nested call, not support now +-- (a) p1->p2->p3, p2,p3 auto +create or replace package pck1 as +procedure p1; +procedure p2 (c2 out sys_refcursor); +procedure p3 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as + +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; + +procedure p2 (c2 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +p3(c2); +end; + +procedure p3 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +commit; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- (b) p1->p2->p3, p2,auto +create or replace package pck1 as +procedure p1; +procedure p2 (c2 out sys_refcursor); +procedure p3 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as + +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; + +procedure p2 (c2 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +p3(c2); +end; + +procedure p3 (c1 out sys_refcursor) as +--PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +commit; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- (c) p1->p2->p3, p3,auto +create or replace package pck1 as +procedure p1; +procedure p2 (c2 out sys_refcursor); +procedure p3 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as + +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; + +procedure p2 (c2 out sys_refcursor) as +--PRAGMA AUTONOMOUS_TRANSACTION; +begin +p3(c2); +end; + +procedure p3 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +commit; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- 6. exception情况 +-- (a).1 自治事务open前异常 +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +vb := 3/0; +open c1 for select * from t1; +commit; +exception when division_by_zero then +commit; +return; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- (a).2 自治事务open后异常 +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +open c1 for select * from t1; +vb := 3/0; +exception when division_by_zero then +commit; +return; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- (a).3 自治事务匿名块exception +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +vb := 0; +begin +vb := 3/0; +exception when division_by_zero then +commit; +return; +end; +open c1 for select * from t1; +commit; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- (a).4 自治事务匿名块exception +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +vb := 0; +open c1 for select * from t1; +commit; +begin +vb := 3/0; +exception when division_by_zero then +commit; +return; +end; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- (a).4 自治事务匿名块exception +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +vb := 0; +open c1 for select * from t1; +begin +vb := 3/0; +exception when division_by_zero then +commit; +return; +end; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- (b).1 主事务exception +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +vb int; +begin +p2(c1); +vb := 3/0; +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +exception when division_by_zero then +close c1; +commit; +return; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +commit; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- 7.自治事务commit,rollback,savepoint +-- (a) before open +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +insert into t1 values (1,100,'var1','clob1','1234abd1','text1'); +commit; +insert into t1 values (2,200,'var2','clob2','1234abd2','text2'); +savepoint s1; +rollback to s1; +open c1 for select * from t1; +fetch c1 into va; +end; +end pck1; +/ +truncate table t1; +call pck1.p1(); +drop package pck1; + +-- (b) rollback before open +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +insert into t1 values (1,100,'var1','clob1','1234abd1','text1'); +commit; +insert into t1 values (2,200,'var2','clob2','1234abd2','text2'); +savepoint s1; +open c1 for select * from t1; +fetch c1 into va; +rollback to s1; +end; +end pck1; +/ +truncate table t1; +call pck1.p1(); +drop package pck1; + +-- (c) rollback after open +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +insert into t1 values (1,100,'var1','clob1','1234abd1','text1'); +commit; +insert into t1 values (2,200,'var2','clob2','1234abd2','text2'); +open c1 for select * from t1; +savepoint s1; +fetch c1 into va; +rollback to s1; +end; +end pck1; +/ +truncate table t1; +call pck1.p1(); +drop package pck1; + +--8. multi param +create table t1_test(a int, b int, c int); +create table t2_test(a int, b varchar2(10)); +insert into t1_test values(1,2,3); +insert into t1_test values(4,5,6); +insert into t2_test values(1,'aaa'); +insert into t2_test values(2,'bbb'); +create or replace package pck1 as +procedure p1; +procedure p2 (c1 in int, c2 in int, c3 out sys_refcursor, c4 out int, c5 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 int; +c2 int; +c3 sys_refcursor; +c4 int; +c5 sys_refcursor; +v1 t1_test; +v2 t2_test; +begin +p2(c1,c2,c3,c4,c5); +raise info 'c3 rowcount: %', c3%rowcount; +fetch c3 into v1; +raise info 'c3: %', v1; +raise info 'c3: rowcount: %', c3%rowcount; +close c3; +raise info 'c5 rowcount: %', c5%rowcount; +fetch c5 into v2; +raise info 'c5: %', v2; +raise info 'c5: rowcount: %', c5%rowcount; +close c5; +end; +procedure p2 (c1 in int, c2 in int, c3 out sys_refcursor, c4 out int, c5 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1_test; +vb t2_test; +begin +c4 := 4; +open c3 for select * from t1_test; +open c5 for select * from t2_test; +fetch c3 into va; +fetch c5 into vb; +commit; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; +drop table t1_test; +drop table t2_test; + +-- 9.自治事务存在重载的情况,私有存过带有重载的情况 +drop table t1; +create table t1(a int,b number(3),c varchar2(20),d clob,e blob,f text); +insert into t1 values (1,100,'var1','clob1','1234abd1','text1'); +insert into t1 values (2,200,'var2','clob2','1234abd2','text2'); +-- 9.(a)公有的同名自治事务存过 +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +procedure p2(c1 int,c2 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +vn int; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +loop +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +p2(vn,c1); +loop +fetch c1 into vc; +exit when c1%notfound; +raise info 'c1 rowcount %',c1%rowcount; +end loop; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1 order by t1; +end; +procedure p2(c1 int,c2 out sys_refcursor) as +pragma autonomous_transaction; +begin +open c2 for select * from t1; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; +-- 9.(b)顺序调用自治事务 +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +procedure p3(c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +vn int; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +loop +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +p3(c1); +loop +fetch c1 into vc; +raise info 'p3 %',vc; +raise info 'p3 rowcount %',c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1 order by t1; +end; +procedure p3(c1 out sys_refcursor) as +pragma autonomous_transaction; +c2 int; +begin +open c1 for select * from t1 order by t1 desc; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- 9.(c)顺序调用自治事务以及非自治事务 +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +procedure p3(c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +vn int; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +loop +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +p3(c1); +loop +fetch c1 into vc; +raise info 'p3 %',vc; +raise info 'p3 rowcount %',c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1 order by t1; +end; +procedure p3(c1 out sys_refcursor) as +c2 int; +begin +open c1 for select * from t1 order by t1 desc; +end; +end pck1; +/ +call pck1.p1(); +drop package pck1; + +-- 9.(d) 包外存储过程 +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace procedure p3(c1 out sys_refcursor) +is +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +raise notice 'public.c1'; +end; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p3(c1); +raise info 'public.p2%',c1%rowcount; +loop +fetch c1 into vc; +exit when c1%notfound; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +end loop; +close c1; +p2(c1); +raise info 'rowcount: %', c1%rowcount; +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1 order by t1 desc; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; +drop procedure p3; + +-- 10.自治事务增删查改 +drop table t1; +create table t1(a int); +insert into t1 values (1); +insert into t1 values (2); +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +p2(c1); +raise info 'rowcount: %', c1%rowcount; +loop +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +update t1 set a=100; +insert into t1 values(1000); +commit; +select count(*) into vb from t1; +raise info 'vb is %',vb; +open c1 for select * from t1; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- 10.(b) p1里delete之后进行commit +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t1; +begin +delete from t1;---delete数据 +commit; +p2(c1); +raise info 'rowcount: %', c1%rowcount; +loop +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +vb int; +begin +update t1 set a=10; +insert into t1 values(8); +commit; +select count(*) into vb from t1; +raise info 'vb is %',vb; +open c1 for select * from t1; +end; +end pck1; +/ + +call pck1.p1(); +drop package pck1; + +-- 11. 跨包调用 +drop table t1_test; +create table t1_test(a int, b int, c int); +insert into t1_test values(1,2,3); +insert into t1_test values(4,5,6); + + +create or replace package pck1 as +procedure p1; +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c3 sys_refcursor; +v1 t1_test; +begin +p2(c3); +raise info 'c3 rowcount: %', c3%rowcount; +fetch c3 into v1; +raise info 'c3: %', v1; +raise info 'c3: rowcount: %', c3%rowcount; +close c3; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +if c1%isopen then + close c1; + raise notice 'cursor is open'; +else + open c1 for select * from t1_test; + raise info 'cursor is close'; +end if; +end; +end pck1; +/ + +create or replace package pck2 as +procedure p1(); +procedure p2(c1 out sys_refcursor); +end pck2; +/ + +create or replace package body pck2 as +procedure p1 as +c1 sys_refcursor; +v1 t1_test; +begin + pck1.p2(c1); + loop + fetch c1 into v1; + exit when c1%notfound; + raise info 'v1 is %',v1; + raise info 'c1 rowcount is %',c1%rowcount; + end loop; + close c1; + end; + procedure p2(c1 out sys_refcursor) as + PRAGMA AUTONOMOUS_TRANSACTION; + c2 sys_refcursor; + va t1_test; + begin + pck1.p2(c2); + loop + fetch c2 into va; + exit when c2%notfound; + raise info 'va is %',va; + raise info 'c2 rowcount %',c2%rowcount; + end loop; + close c2; + end; +end pck2; +/ + +call pck1.p1(); +call pck2.p1(); +drop package pck1; +drop package pck2; + +-- 12.自治事务里调用自治事务 +drop table t1_test; +create table t1_test(a int primary key,b number(3),c varchar2(20),d clob,e blob,f text) partition by range(a)(partition p1 values less than(10),partition p2 values less than(20),partition p3 values less than(maxvalue)); + +insert into t1_test values (1,100,'var1','clob1','1234abd1','text1'); +insert into t1_test values (2,200,'var2','clob2','1234abd2','text2'); +insert into t1_test values (11,100,'var1','clob1','1234abd1','text1'); +insert into t1_test values (12,200,'var2','clob2','1234abd2','text2'); +insert into t1_test values (21,100,'var1','clob1','1234abd1','text1'); +insert into t1_test values (32,200,'var2','clob2','1234abd2','text2'); + +create or replace package pck1 as +procedure p1(c1 t1_test); +procedure p2 (c1 out sys_refcursor); +end pck1; +/ + +create or replace package body pck1 as +procedure p1(c1 t1_test) as +PRAGMA AUTONOMOUS_TRANSACTION; +c2 sys_refcursor; +begin +p2(c2); +loop +fetch c2 into c1; +exit when c2%notfound; +raise info 'c2 rowcount is %',c2%rowcount; +raise info 'c1 is %',c1; +end loop; +end; +procedure p2(c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +open c1 for select * from t1_test where a<15; +end; +end pck1; +/ +call pck1.p1((1,100,'var1','clob1','1234abd1','text1')); +drop package pck1; + + +-- 13.(a) 主事务commit rollback +drop table t1; +create table t1(a int); +insert into t1 values(1); +insert into t1 values(2); +create or replace procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1 for select * from t1; +end; +/ +CREATE OR REPLACE PROCEDURE check1(a int) AS +DECLARE +c1 sys_refcursor; +vc t1; +BEGIN +p2(c1); +fetch c1 into vc; +while c1%found loop +raise notice 'isopen:%',c1%isopen; +raise notice 'found:%',c1%found; +raise notice 'ans;%', vc; +fetch c1 into vc; +insert into t1 values(a); +if vc.a > 1 then +commit; +else +rollback; +end if; +end loop; +close c1; +END; +/ +call check1(1); +DROP procedure check1; + +-- 13.(b) savepoint +truncate table t1; +CREATE OR REPLACE PROCEDURE check4(a int) AS +DECLARE +c1 sys_refcursor; +vc t1; +BEGIN + insert into t1 values (1); + insert into t1 values (2); + insert into t1 values (1); + insert into t1 values (2); + commit; + p2(c1); + fetch c1 into vc; + savepoint sp1; + raise notice 'found:%',c1%found; + raise notice 'isopen:%',c1%isopen; + raise notice 'rowcount:%',c1%rowcount; + while c1%found loop + raise notice 'isopen:%',c1%isopen; + raise notice 'found:%',c1%found; + raise notice 'ans;%', vc; + fetch c1 into vc; + insert into t1 values(a); + if vc.a > 1 then + commit; + savepoint sp1; + raise '%',1/0; + else + rollback to sp1; + end if; + end loop; + close c1; + exception + when others then + raise notice 'exception'; + fetch c1 into vc; + raise notice 'isopen:%',c1%isopen; + raise notice 'found:%',c1%found; + raise notice 'ans;%', vc; + close c1; + +END; +/ + +call check4(5); + +-- 13. (c) before cursor +truncate table t1; +insert into t1 values(1); +insert into t1 values(2); +CREATE OR REPLACE PROCEDURE check6(a int) AS +DECLARE +c1 sys_refcursor; +vc t1; +BEGIN +insert into t1 values(a); +savepoint aa; +p2(c1); +fetch c1 into vc; +while c1%found loop +raise notice 'isopen:%',c1%isopen; +raise notice 'found:%',c1%found; +raise notice 'ans;%', vc; +fetch c1 into vc; +end loop; +rollback to aa; +close c1; +END; +/ +call check6(3); +drop procedure check6; +drop procedure p2; + +-- test call auto procedure at last +create or replace procedure p4 as +PRAGMA AUTONOMOUS_TRANSACTION; +va int; +begin +va := 1; +commit; +end; +/ +create or replace procedure p3(c3 out sys_refcursor) as +begin +open c3 for select * from t1; +end; +/ +create or replace procedure p2(c2 out sys_refcursor) as +begin +p3(c2); +p4(); +raise info 'p2:%',c2; +end; +/ +create or replace procedure p1() as +c1 sys_refcursor; +begin +p2(c1); +raise info 'p1:%',c1; +end; +/ + +call p1(); + +drop procedure p4; +drop procedure p3; +drop procedure p2; +drop procedure p1; + +-- test only in param procedure +create or replace procedure out_refcursor_t2_u1_a(c1 in sys_refcursor) + as PRAGMA AUTONOMOUS_TRANSACTION; + begin + open c1 for select id from count_info; + end; +/ +declare + c1 sys_refcursor; + v1 int; + begin + out_refcursor_t2_u1_a(c1); + fetch c1 into v1; + end; +/ +drop procedure out_refcursor_t2_u1_a; + +-- test deadlock caused by autonomous session +create type type001 as(c1 number(7,2),c2 varchar(30)); +drop table if exists t2_test; +create table t2_test(a int,b number(3), c varchar2(20),d clob,e blob,f text,g type001); +insert into t2_test values (1,100,'var1','clob1','1234abd1','text1',(1.00,'aaa')); +insert into t2_test values (2,200,'var2','clob2','1234abd2','text2',(2.00,'bbb')); + +create or replace package pck1 as procedure p1; procedure p2 (c1 out sys_refcursor); end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +c1 sys_refcursor; +vc t2_test; +begin +delete from t2_test;---delete数据 +pg_sleep(30); +p2(c1); +raise info 'rowcount: %', c1%rowcount; +loop +fetch c1 into vc; +raise info '%', vc; +raise info 'rowcount: %', c1%rowcount; +exit when c1%notfound; +end loop; +close c1; +end; +procedure p2 (c1 out sys_refcursor) as +PRAGMA AUTONOMOUS_TRANSACTION; +va t2_test; +vb int; +begin +update t2_test set a=10; +insert into t2_test values(8); +commit; +select count(*) into vb from t2_test; +raise info 'vb is %',vb; +open c1 for select * from t2_test; +end; +end pck1; +/ + +call pck1.p1(); +select * from t2_test; +drop package pck1; +drop table t2_test; +drop type type001; + +-- test dynquery sql when open cursor +drop table count_info; +drop table refcursor_info; +create table count_info (id bigserial primary key,count int,info text); +create table refcursor_info (v varchar,info varchar); +insert into count_info (count,info) values (1,'a'),(2,'b'),(3,'c'),(4,'d'); +create or replace package out_refcursor_029_pkg_t1 IS + procedure out_refcursor_029_t1(cur1 out sys_refcursor); + procedure invoke(); + end out_refcursor_029_pkg_t1; +/ +create or replace package body out_refcursor_029_pkg_t1 as + procedure out_refcursor_029_t1(cur1 out sys_refcursor) + as PRAGMA AUTONOMOUS_TRANSACTION; + begin + open cur1 for 'select count,info from count_info where count<:c' using 4; + end; + procedure invoke() is + declare + c1 sys_refcursor; + v1 int; + v2 text; + tmp_v1 int; + tmp_v2 varchar; + begin + out_refcursor_029_t1(c1); + if c1%ISOPEN then + LOOP + FETCH c1 INTO v1,v2; + tmp_v1:=c1%ROWCOUNT; + tmp_v2:=v1||v2||c1%FOUND; + insert into refcursor_info values (tmp_v1,tmp_v2); + EXIT WHEN C1%NOTFOUND; + END LOOP; + end if; + tmp_v1:=c1%ROWCOUNT; + tmp_v2:=to_char(c1%ISOPEN)||to_char(c1%FOUND); + insert into refcursor_info values (tmp_v1,tmp_v2); + close c1; + tmp_v1:=c1%ROWCOUNT; + tmp_v2:=to_char(c1%ISOPEN)||to_char(c1%FOUND)||to_char(c1%NOTFOUND); + insert into refcursor_info values (tmp_v1,tmp_v2); + end; + end out_refcursor_029_pkg_t1; +/ +call out_refcursor_029_pkg_t1.invoke(); +select * from refcursor_info; +drop table refcursor_info; +drop table count_info; +drop package out_refcursor_029_pkg_t1; + +-- test cursor assign value (should error) +drop table t1; +create table t1 (a int, b int); +create or replace procedure p1 ( out sys_refcursor) +as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +$1 := 'abc'; +end; +/ + +declare +va sys_refcursor; +begin +p1(va); +end; +/ + +create or replace procedure p1 (va out sys_refcursor) +as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +va := 'abc'; +end; +/ + +declare +va sys_refcursor; +begin +p1(va); +end; +/ + +drop table t1; +drop procedure p1; + +-- test function with ref cursor +CREATE OR REPLACE function f1( C2 out SYS_REFCURSOR) +LANGUAGE plpgsql +AS $$ +declare +PRAGMA AUTONOMOUS_TRANSACTION; +begin + return 1; + END; +$$; + +CREATE OR REPLACE function f1( ) returns SYS_REFCURSOR +LANGUAGE plpgsql +AS $$ +declare +PRAGMA AUTONOMOUS_TRANSACTION; +begin + return 1; + END; +$$; + +CREATE OR REPLACE function f1( C2 out SYS_REFCURSOR, C1 out INT) +LANGUAGE plpgsql +AS $$ +declare +PRAGMA AUTONOMOUS_TRANSACTION; +begin + null; + END; +$$; + +CREATE OR REPLACE function f1( ) returns SYS_REFCURSOR +LANGUAGE plpgsql +AS $$ +declare +begin + return 1; + END; +$$; + +drop function f1(); + + +-- clean +drop schema pl_auto_ref cascade; + + diff --git a/src/test/regress/sql/autonomous_test.sql b/src/test/regress/sql/autonomous_test.sql index b8d26d3db..c561ae27a 100644 --- a/src/test/regress/sql/autonomous_test.sql +++ b/src/test/regress/sql/autonomous_test.sql @@ -1291,3 +1291,190 @@ DROP PROCEDURE p1; DROP PROCEDURE p2; DROP PROCEDURE p3; DROP PACKAGE pck2; + +-- 8. multi auto procedure (session will reuse) +create or replace package pck1 as +type r1 is record(a int, b int); +va int; +procedure p1; +procedure p2; +procedure p3; +end pck1; +/ + +create or replace package body pck1 as +procedure p1 as +begin +va := 1; +raise info 'before p2: %', va; +p2(); +raise info 'after p2: %', va; +va := 123; +p3(); +raise info 'after p3: %', va; +end; + +procedure p2 as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +raise info 'in p2: %', va; +va := 11; +end; +procedure p3 as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +raise info 'in p3: %', va; +end; +end pck1; +/ + +call pck1.p1(); + +drop package pck1; + +-- 9. autosesion call another auto procedure +create or replace package pkg1 IS +va int:=1; +function f1(num1 int) return int; +end pkg1; +/ + +create or replace package body pkg1 as +function f1(num1 int) return int +is +declare +PRAGMA AUTONOMOUS_TRANSACTION; +re_int int; +begin +raise notice 'just in f1, pkg.va:%',va; +va:=va+num1; +raise notice 'in f1, pkg.va:%',va; +re_int = 1; +return re_int; +end; +end pkg1; +/ + +create or replace function f2(num1 int) return int +is +declare PRAGMA AUTONOMOUS_TRANSACTION; +re_int int; +begin +pkg1.va = 111; +raise notice 'before f1: pkg.va: %',pkg1.va; +re_int = pkg1.f1(num1); +raise notice 'after f1: pkg.va: %', pkg1.va; +return re_int; +end; +/ + +select f2(10); + +drop function f2; +drop package pkg1; + +-- 9. autosesion call another nomarl procedure +create or replace package pkg1 IS +va int:=1; +function f1(num1 int) return int; +end pkg1; +/ + +create or replace package body pkg1 as +function f1(num1 int) return int +is +declare +re_int int; +begin +raise notice 'just in f1, pkg.va:%',va; +va:=va+num1; +raise notice 'in f1, pkg.va:%',va; +re_int = 1; +return re_int; +end; +end pkg1; +/ + +create or replace function f2(num1 int) return int +is +declare PRAGMA AUTONOMOUS_TRANSACTION; +re_int int; +begin +pkg1.va = 111; +raise notice 'before f1: pkg.va: %',pkg1.va; +re_int = pkg1.f1(num1); +raise notice 'after f1: pkg.va: %', pkg1.va; +return re_int; +end; +/ + +select f2(10); + +drop function f2; +drop package pkg1; + +-- auto procedure call normal procedure (auto procedure without package) +create or replace package autonomous_pkg_setup IS +count_public int:=1; +end autonomous_pkg_setup; +/ +create or replace package body autonomous_pkg_setup as + count_private int :=1; +end autonomous_pkg_setup; +/ + +create or replace procedure out_015(num1 int) +is +declare +va int:=30; +re_int int; +begin +autonomous_pkg_setup.count_public = autonomous_pkg_setup.count_public + va; +raise info 'in out_015,autonomous_pkg_setup.count_public:%', autonomous_pkg_setup.count_public; +end; +/ +create or replace procedure app015_1() +is +declare PRAGMA AUTONOMOUS_TRANSACTION; +begin +out_015(1); +end; +/ + +call app015_1(); +call app015_1(); + +drop procedure app015_1; +drop procedure out_015; +drop package autonomous_pkg_setup; + +-- 10. package var same name with function param +create or replace package pck1 IS +va int:=1; +procedure p1(va int,vb int); +end pck1; +/ +create or replace package body pck1 as +vb int :=1; +procedure p1(va int,vb int) +is +declare +PRAGMA AUTONOMOUS_TRANSACTION; +begin +va:=pck1.va+va; +pck1.vb:=pck1.vb+vb; +raise info 'in p1, va : %', va; +raise info 'in p1, vb : %', vb; +raise info 'in p1, pck1.va : %', pck1.va; +raise info 'in p1, pck1.vb : %', pck1.vb; +end; +begin +va := 2; +vb := 2; +end pck1; +/ + +call pck1.p1(10,20); +call pck1.p1(10,20); + +drop package pck1; diff --git a/src/test/regress/sql/bypass_simplequery_support.sql b/src/test/regress/sql/bypass_simplequery_support.sql index 5e9bf0b72..d7c644c7f 100755 --- a/src/test/regress/sql/bypass_simplequery_support.sql +++ b/src/test/regress/sql/bypass_simplequery_support.sql @@ -138,10 +138,13 @@ explain select * from test_bypass_sq1 where col3='test_update2'; --bypass explain select * from test_bypass_sq1 where col1>0 and col2>0 order by col1 limit 3 offset 3; select * from test_bypass_sq1 where col1>0 and col2>0 order by col1 limit 3 offset 3; +select * from test_bypass_sq1 where col1>0 and col2>0 order by col1 limit 3 offset 30; explain select * from test_bypass_sq1 where col1>0 order by col1 for update limit 3 offset 3; explain select * from test_bypass_sq1 where col1>0 order by col1 for update limit 3 offset null; +explain select * from test_bypass_sq1 where col1>0 order by col1 for update limit 3 offset 30; explain select * from test_bypass_sq1 where col1>0 and col2>0 order by col1 offset 3; select * from test_bypass_sq1 where col1>0 and col2>0 order by col1 offset 3; +select * from test_bypass_sq1 where col1>0 order by col1 for update limit 3 offset 30; explain select * from test_bypass_sq1 where col1>0 order by col1 for update offset 3; explain update test_bypass_sq1 set col2=3*7 where col1=3 and col2=2; update test_bypass_sq1 set col2=3*7 where col1=3 and col2=2; @@ -413,6 +416,16 @@ revoke update on test_bypass_sq7 from qps; revoke select on test_bypass_sq7 from qps; DROP OWNED BY qps; DROP ROLE qps; + +-- test rule do nothing +create table test(a int); +create view v_test as select * from test; + +CREATE OR REPLACE RULE v_delete as ON DELETE TO v_test DO INSTEAD NOTHING; +delete from v_test; + +drop table test cascade; + -- end reset track_activities; set track_sql_count = off; diff --git a/src/test/regress/sql/cast_privileges_test.sql b/src/test/regress/sql/cast_privileges_test.sql index 204c2d5d7..1caa6ab32 100644 --- a/src/test/regress/sql/cast_privileges_test.sql +++ b/src/test/regress/sql/cast_privileges_test.sql @@ -1,7 +1,5 @@ create user user1 password '1234567i*'; grant all on schema public to user1; -create schema privilege_test; -grant all on schema privilege_test to user1; set role user1 password '1234567i*'; CREATE TYPE public.int111 AS (f1 int, f2 int); @@ -11,7 +9,7 @@ create table public.bb_text(bb text111); insert into public.aa_int values((111,222)); insert into public.bb_text values((111,222)); -CREATE OR REPLACE FUNCTION privilege_test.text_int(text111)RETURNS int111 AS $$ +CREATE OR REPLACE FUNCTION public.text_int(text111)RETURNS int111 AS $$ declare res public.int111; begin @@ -20,12 +18,7 @@ begin return res; end;$$ language plpgsql security invoker; -select privilege_test.text_int((111,222)); -CREATE CAST (text111 AS int111) WITH FUNCTION privilege_test.text_int(text111) AS IMPLICIT; +select public.text_int((111,222)); +CREATE CAST (text111 AS int111) WITH FUNCTION public.text_int(text111) AS IMPLICIT; reset role; select aa ,bb from aa_int ,bb_text where aa_int.aa=bb_text.bb::int111; -drop table aa_int; -drop table bb_text; -drop type int111 cascade; -drop type text111 cascade; -drop user user1 cascade; \ No newline at end of file diff --git a/src/test/regress/sql/ce_alter_add_drop_column.sql b/src/test/regress/sql/ce_alter_add_drop_column.sql new file mode 100644 index 000000000..cfcca7b71 --- /dev/null +++ b/src/test/regress/sql/ce_alter_add_drop_column.sql @@ -0,0 +1,46 @@ +\! gs_ktool -d all +\! gs_ktool -g +\! gs_ktool -g + +DROP CLIENT MASTER KEY IF EXISTS ImgCMK1_sm4 CASCADE; +DROP CLIENT MASTER KEY IF EXISTS ImgCMK_sm4 CASCADE; +CREATE CLIENT MASTER KEY ImgCMK1_sm4 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = SM4); +CREATE CLIENT MASTER KEY ImgCMK_sm4 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/2" , ALGORITHM = SM4); +CREATE COLUMN ENCRYPTION KEY ImgCEK1_sm4 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK1_sm4, ALGORITHM = SM4_sm3); +CREATE COLUMN ENCRYPTION KEY ImgCEK_sm4 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK_sm4, ALGORITHM = SM4_sm3); + +-- 创建目标表products和源表newproducts,并插入数据 +drop table IF EXISTS products; +CREATE TABLE products +( +product_id INTEGER, +product_name VARCHAR2(60) encrypted with (column_encryption_key = ImgCEK_sm4, encryption_type = DETERMINISTIC), +category VARCHAR2(60) +); +INSERT INTO products VALUES (15011, 'vivitar 35mm', 'electrncs'); +INSERT INTO products VALUES (15021, 'olympus is50', 'electrncs'); +INSERT INTO products VALUES (16001, 'play gym', 'toys'); +INSERT INTO products VALUES (16011, 'lamaze', 'toys'); + +ALTER TABLE products drop COLUMN product_name; +ALTER TABLE products drop COLUMN category; +ALTER TABLE products ADD COLUMN product_name VARCHAR2(60) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = ImgCEK_sm4, ENCRYPTION_TYPE = DETERMINISTIC) ; +ALTER TABLE products ADD COLUMN category VARCHAR2(60) ; +ALTER TABLE products drop COLUMN product_name; +ALTER TABLE products ADD COLUMN product_name VARCHAR2(60) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = ImgCEK_sm4, ENCRYPTION_TYPE = DETERMINISTIC) ; +ALTER TABLE products ADD COLUMN product_name_2 text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = ImgCEK_sm4, ENCRYPTION_TYPE = DETERMINISTIC) ; + +\d products + +INSERT INTO products VALUES (175011, 'vivitar 35mm', 'electrncs', 'car'); +INSERT INTO products VALUES (17021, 'olympus is50', 'electrncs', 'shoe'); +INSERT INTO products VALUES (18001, 'play gym', 'toys', 'book'); +INSERT INTO products VALUES (18011, 'lamaze', 'toys', 'computer'); +INSERT INTO products VALUES (18661, 'harry potter', 'dvd', 'cup'); + +SELECT * FROM products ORDER BY product_id; + +drop table IF EXISTS products; +DROP CLIENT MASTER KEY IF EXISTS ImgCMK1_sm4 CASCADE; +DROP CLIENT MASTER KEY IF EXISTS ImgCMK_sm4 CASCADE; +\! gs_ktool -d all diff --git a/src/test/regress/sql/ce_cmk_cek_test.sql b/src/test/regress/sql/ce_cmk_cek_test.sql index f28dba5be..3cbc683ee 100644 --- a/src/test/regress/sql/ce_cmk_cek_test.sql +++ b/src/test/regress/sql/ce_cmk_cek_test.sql @@ -16,17 +16,26 @@ CREATE CLIENT MASTER KEY ImgCMK WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool -- fail didn't support RSA_2048 algorithm CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2" , ALGORITHM = RSA_2048); +-- fail invalid algorithm +CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2" , ALGORITHM = AES_256_CBC_1); + -- fail ALGORITHM is missing or invalid CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2"); -- fail KEY_PATHis missing or invalid CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktool, ALGORITHM = AES_256_CBC); +-- fail KEY_PATHis invalid +CREATE CLIENT MASTER KEY ImgCMK WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/" , ALGORITHM = AES_256_CBC); + -- fail KEY_STORE is missing or invalid CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_PATH = "gs_ktool/2", ALGORITHM = AES_256_CBC); -- fail duplicate KEY_PATHargs CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2", KEY_PATH = "gs_ktool/3", ALGORITHM = AES_256_CBC); +-- fail invalid KEY_STORE +CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktoolgs_ktoolgs_ktool, KEY_PATH = "gs_ktool/3", ALGORITHM = AES_256_CBC); + -- fail duplicate KEY_STORE args CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktool, KEY_STORE = kmc, KEY_PATH = "gs_ktool/2", ALGORITHM = AES_256_CBC); @@ -36,10 +45,26 @@ CREATE CLIENT MASTER KEY ImgCMK1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktoo -- case create CEK - success CREATE COLUMN ENCRYPTION KEY ImgCEK WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +-- case create CEK - success +CREATE COLUMN ENCRYPTION KEY ImgCEK128 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); + +-- case create CEK - fail +CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256_1); + +-- case create CEK - fail +CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256_1); + CREATE COLUMN ENCRYPTION KEY ImgCEK1 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE='abcdefghijklmnopqrstuvwxyz12'); --fail encryption key too short CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE='abcdefghijklmnopqrstuvwxyz1'); +--sucess +CREATE COLUMN ENCRYPTION KEY ImgCEK_256 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, +ENCRYPTED_VALUE='1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111'); + +--fail encryption key too long +CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, +ENCRYPTED_VALUE='1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111'); --fail object does not exist CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); @@ -53,10 +78,10 @@ CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK1, A -- fail didn't support AES_128_CBC algorithm CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK, ALGORITHM = AES_128_CBC); --- fail syntax error parsing cek creation query +-- fail invalid algorithm CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (CLIENT_MASTER_KEY = ImgCMK); --- fail syntax error parsing cek creation query +-- fail invalid algorithm CREATE COLUMN ENCRYPTION KEY ImgCEK2 WITH VALUES (ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); select count(*), 'count' from gs_client_global_keys; @@ -87,4 +112,4 @@ select count(*), 'count' from gs_column_keys; DROP CLIENT MASTER KEY test_sm2_cmk CASCADE; -\! gs_ktool -d all +\! gs_ktool -d all \ No newline at end of file diff --git a/src/test/regress/sql/ce_copy.sql b/src/test/regress/sql/ce_copy.sql deleted file mode 100644 index dcf88544b..000000000 --- a/src/test/regress/sql/ce_copy.sql +++ /dev/null @@ -1,49 +0,0 @@ -\! gs_ktool -d all -\! gs_ktool -g - -DROP CLIENT MASTER KEY IF EXISTS copyCMK CASCADE; -CREATE CLIENT MASTER KEY copyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); -CREATE COLUMN ENCRYPTION KEY copyCEK WITH VALUES (CLIENT_MASTER_KEY = copyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); -CREATE TABLE IF NOT EXISTS CopyFromTbl(i0 INT, i1 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = copyCEK, ENCRYPTION_TYPE = DETERMINISTIC) , i2 INT); -COPY copyfromtbl FROM stdin; -5 10 7 -20 20 8 -30 10 12 -50 35 12 -80 15 23 -\. --- fail error -COPY copyfromtbl FROM stdin; -1 2 3 4 -\. - -SELECT * FROM CopyFromTbl ORDER BY i0; - -COPY copyfromtbl (i0, i1,i2) FROM stdin; -5 10 7 -20 20 8 -30 10 12 -50 35 12 -80 15 23 -\. -SELECT * FROM CopyFromTbl ORDER BY i0; - -\copy CopyFromTbl FROM 'ce_copy_from.csv' WITH DELIMITER ',' CSV HEADER; -SELECT * FROM CopyFromTbl ORDER BY i0; -\copy (SELECT * FROM CopyFromTbl ORDER BY i2) TO 'ce_copy_to.csv' WITH DELIMITER ',' CSV HEADER; - -copy CopyFromTbl FROM 'ce_copy_from.csv' WITH DELIMITER ',' CSV HEADER; -copy CopyFromTbl (i0, i1,i2) FROM 'ce_copy_from.csv' WITH DELIMITER ',' CSV HEADER; -copy CopyFromTbl TO 'ce_copy_to.csv' WITH DELIMITER ',' CSV HEADER; -copy (SELECT * FROM CopyFromTbl ORDER BY i2) TO 'ce_copy_to.csv' WITH DELIMITER ',' CSV HEADER; - -CREATE TABLE IF NOT EXISTS CopyTOTbl(i0 INT, i1 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY=copyCEK, ENCRYPTION_TYPE = DETERMINISTIC) , i2 INT); -\copy CopyToTbl FROM 'ce_copy_to.csv' WITH DELIMITER ',' CSV HEADER; -SELECT * FROM CopyToTbl ORDER BY i0; -COPY (SELECT * FROM CopyFromTbl ORDER BY i0) TO stdout; - -DROP TABLE CopyFromTbl; -DROP TABLE CopyToTbl; -DROP CLIENT MASTER KEY copyCMK CASCADE; - -\! gs_ktool -d all \ No newline at end of file diff --git a/src/test/regress/sql/ce_copy_options.sql b/src/test/regress/sql/ce_copy_options.sql new file mode 100644 index 000000000..daffb9127 --- /dev/null +++ b/src/test/regress/sql/ce_copy_options.sql @@ -0,0 +1,229 @@ +\! gs_ktool -d all +\! gs_ktool -g + +DROP CLIENT MASTER KEY IF EXISTS copyCMK CASCADE; +CREATE CLIENT MASTER KEY copyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +-- test AEAD_AES_128_CBC_HMAC_SHA256 +CREATE COLUMN ENCRYPTION KEY copyCEK1 WITH VALUES (CLIENT_MASTER_KEY = copyCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY copyCEK2 WITH VALUES (CLIENT_MASTER_KEY = copyCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY copyCEK3 WITH VALUES (CLIENT_MASTER_KEY = copyCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); + + +CREATE TABLE IF NOT EXISTS CopyTbl( + i0 INT, + i1 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = copyCEK1, ENCRYPTION_TYPE = DETERMINISTIC), + i2 TEXT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = copyCEK2, ENCRYPTION_TYPE = DETERMINISTIC), + i3 TEXT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = copyCEK3, ENCRYPTION_TYPE = DETERMINISTIC) default 'stuff' + ); + + + + +-- 1 check copy from +-- 1.a check copy the whole table +-- missing data: should fail +COPY CopyTbl FROM stdin; + +\. + +-- check \N +copy CopyTbl from stdin; +1 \N \\N \NN +\. + +--1.b check copy part of the table +COPY CopyTbl (i0, i1,i2) FROM stdin; +10 10 7 +11 20 8 +\. + +copy CopyTbl(i0,i2) from stdin; +1001 12 +\. + +-- should fail: non-existent column in column list +copy CopyTbl(col2) from stdin; + +-- should fail: too many columns in column list +copy CopyTbl(i0,i1,i2,i3,i1,i3) from stdin; + +SELECT * FROM CopyTbl ORDER BY i0; + + +--3 check options + +--3.a format +COPY CopyTbl from stdin(FORMAT CSV); +3000,1,2,3 +\. + +COPY CopyTbl from stdin(FORMAT TEXT); +\. + +--3.b oids DO NOT SUPPORT oids +-- should fail: table "CopyTbl" does not have OIDs +COPY CopyTbl from stdin WITH OIDS; + +--3.c option:delimiter +copy CopyTbl from stdin with delimiter ','; +1002,1,2,3 +\. + +--should fail +copy CopyTbl from stdin with delimiter 'a'; +--should fail +copy CopyTbl from stdin with delimiter E'\r'; +--should fail: delimiter must be no more than 10 bytes +copy CopyTbl from stdin with delimiter '|,%^&*@#$%%^||||'; + + +--3.d option:null force not null +COPY CopyTbl from stdin WITH NULL AS ''; +1006 2 3 +\. + +--should fail +COPY CopyTbl from stdin WITH NULL AS E'\r'; +--should fail +COPY CopyTbl from stdin WITH delimiter ',' NULL ','; +--should fail +COPY CopyTbl from stdin WITH CSV quote ',' NULL ','; + +-- force not null only available in csv mode and copy from +-- ? no use +COPY CopyTbl from stdin WITH CSV FORCE NOT NULL i2; +1,2,3,4 +\. +COPY CopyTbl from stdin (FORMAT CSV, FORCE_NOT_NULL(i2)); +1,2,3,4 +\. + + +--3.e option:quote force_quote +COPY CopyTbl TO stdout WITH csv; +COPY CopyTbl TO stdout WITH csv quote '''' delimiter '|'; + +COPY CopyTbl TO stdout WITH CSV FORCE QUOTE i3; +COPY CopyTbl TO stdout WITH CSV FORCE QUOTE *; + + +--3.f escape +--fail to decrypt +-- COPY CopyTbl TO stdout (FORMAT CSV, ESCAPE E'\\'); + + +--3.g option: eol +-- fail +-- COPY CopyTbl from stdin WITH EOL 'EOL_CRNL'; +-- COPY CopyTbl from stdin WITH EOL 'EOL_CR'; +-- COPY CopyTbl from stdin WITH EOL 'EOL_NL'; + + +--3.h ignore extra data +copy CopyTbl from stdin with delimiter '|' ignore_extra_data; +1|2|3|4|5 +\. + + +--3.h encoding +COPY CopyTbl to stdout WITH DELIMITER AS ',' ENCODING 'utf8'; +COPY CopyTbl to stdout WITH DELIMITER AS ',' ENCODING 'sql_ascii'; + + +--4 check copy out +COPY CopyTbl TO stdout WITH CSV; +COPY CopyTbl TO stdout WITH CSV QUOTE '''' DELIMITER '|'; +COPY CopyTbl TO stdout WITH CSV FORCE QUOTE *; +COPY CopyTbl TO stdout WITH CSV FORCE QUOTE i2 ENCODING 'sql_ascii'; +-- Repeat above tests with new 9.0 option syntax +COPY CopyTbl TO stdout (FORMAT CSV); +COPY CopyTbl TO stdout (FORMAT TEXT); +COPY CopyTbl TO stdout (FORMAT CSV, QUOTE '''', DELIMITER '|'); +COPY CopyTbl TO stdout (FORMAT CSV, FORCE_QUOTE *); +COPY CopyTbl TO stdout (FORMAT CSV, FORCE_QUOTE(i2),ENCODING 'sql_ascii'); +-- Repeat above tests with \copy +\copy CopyTbl TO stdout (FORMAT CSV); +\copy CopyTbl TO stdout (FORMAT TEXT); +\copy CopyTbl TO stdout (FORMAT CSV, QUOTE '''', DELIMITER '|'); +\copy CopyTbl TO stdout (FORMAT CSV, FORCE_QUOTE *); +\copy CopyTbl TO stdout (FORMAT CSV, FORCE_QUOTE(i2),ENCODING 'sql_ascii'); + + +-- test end of copy marker +CREATE COLUMN ENCRYPTION KEY copyCEK4 WITH VALUES (CLIENT_MASTER_KEY = copyCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); +create table test_eoc( + a int, + b text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = copyCEK4, ENCRYPTION_TYPE = DETERMINISTIC) +); + +copy test_eoc from stdin csv; +1,a\. +2,\.b +3,c\.d +4,"\." +\. + +select * from test_eoc order by a; + +--5 check copy select +CREATE COLUMN ENCRYPTION KEY copyCEK5 WITH VALUES (CLIENT_MASTER_KEY = copyCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); +create table test_select( + a int, + b text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = copyCEK5, ENCRYPTION_TYPE = DETERMINISTIC) +); + +insert into test_select values (1, 'a'); +insert into test_select values (2, 'b'); +insert into test_select values (3, 'c'); +insert into test_select values (4, 'd'); +insert into test_select values (5, 'e'); + +CREATE COLUMN ENCRYPTION KEY copyCEK6 WITH VALUES (CLIENT_MASTER_KEY = copyCMK, ALGORITHM = AEAD_AES_128_CBC_HMAC_SHA256); +create table test_select_2( + a int, + b text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = copyCEK6, ENCRYPTION_TYPE = DETERMINISTIC) +); + +insert into test_select_2 values (1, 'A'); +insert into test_select_2 values (2, 'B'); +insert into test_select_2 values (3, 'C'); +insert into test_select_2 values (4, 'D'); +insert into test_select_2 values (5, 'E'); + + +--6. test COPY select table TO +--a. test COPY (select) TO +copy (select * from test_select order by 1) to stdout; +copy (select * from test_select order by 1) to stdout; +copy (select b from test_select where a=1) to stdout; + +--b. test COPY (select for update) TO +copy (select b from test_select where a=3 for update) to stdout; +-- should fail +copy (select * from test_select) from stdin; +-- should fail +copy (select * from test_select) (a,b) to stdout; + +--c.test join +copy (select * from test_select join test_select_2 using (a) order by 1) to stdout; + +--d. Test subselect +copy (select * from (select b from test_select where a = 1)) to stdout; + +--e. test headers, CSV and quotes +copy (select b from test_select where a = 1) to stdout csv header force quote b; + +--f. test psql builtins, plain table +\copy (select * from test_select order by 1) to stdout; + +-- fail to decrypt +-- \copy (select "a",'a','a""'||b,(a + 1)*a,b,"test_select"."b" from test_select where a=3) to stdout; + +DROP TABLE CopyTbl; +DROP TABLE test_eoc; +DROP TABLE test_select; +DROP TABLE test_select_2; +DROP CLIENT MASTER KEY copyCMK CASCADE; + +\! gs_ktool -d all + diff --git a/src/test/regress/sql/ce_create_procedure.sql b/src/test/regress/sql/ce_create_procedure.sql new file mode 100644 index 000000000..c9084fcd6 --- /dev/null +++ b/src/test/regress/sql/ce_create_procedure.sql @@ -0,0 +1,91 @@ +\! gs_ktool -d all +\! gs_ktool -g + +DROP CLIENT MASTER KEY IF EXISTS proc_cmk1 CASCADE; +CREATE CLIENT MASTER KEY proc_cmk1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY proc_cek1 WITH VALUES (CLIENT_MASTER_KEY = proc_cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); + +create table accounts ( + id serial, + name varchar(100) not null ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek1, ENCRYPTION_TYPE = DETERMINISTIC), + balance dec(15,2) not null ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek1, ENCRYPTION_TYPE = DETERMINISTIC), + primary key(id) +); + +insert into accounts(name,balance) +values('Bob',10000); + +insert into accounts(name,balance) +values('Alice',10000); + +select * from accounts; + +create or replace procedure transfer( + sender int, + receiver int, + amount dec +) +as +begin + -- subtracting the amount from the sender's account + update accounts + set balance = balance - amount + where id = sender; + + -- adding the amount to the receiver's account + update accounts + set balance = balance + amount + where id = receiver; + + commit; +end; +/ + +call transfer(1,2,1000); +SELECT * FROM accounts; + +drop table if exists accounts; + +create table accounts ( + id serial, + name varchar(100) not null ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek1, ENCRYPTION_TYPE = DETERMINISTIC), + balance dec(15,2) not null ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek1, ENCRYPTION_TYPE = DETERMINISTIC), + primary key(id) +) DISTRIBUTE BY REPLICATION; + +insert into accounts(name,balance) +values('Bob',10000); + +insert into accounts(name,balance) +values('Alice',10000); + +select * from accounts; + +create or replace procedure transfer( + sender int, + receiver int, + amount dec +) +as +begin + -- subtracting the amount from the sender's account + update accounts + set balance = balance - amount + where id = sender; + + -- adding the amount to the receiver's account + update accounts + set balance = balance + amount + where id = receiver; + + commit; +end; +/ + +call transfer(1,2,1000); +SELECT * FROM accounts; +DROP TABLE accounts CASCADE; +DROP FUNCTION transfer; +DROP COLUMN ENCRYPTION KEY proc_cmk1; +DROP CLIENT MASTER KEY proc_cek1; +\! gs_ktool -d all diff --git a/src/test/regress/sql/ce_crt_cek.sql b/src/test/regress/sql/ce_crt_cek.sql new file mode 100644 index 000000000..2ae7d46b4 --- /dev/null +++ b/src/test/regress/sql/ce_crt_cek.sql @@ -0,0 +1,120 @@ +------------------------------------------------------------------------------------------------------------------------- +-- grop : security +-- module : client encrypt +-- +-- function : test {sql:CREATE CEK} +-- CREATE COLUMN ENCRYPTION KEY WITH VALUES (CLIENT_MASTER_KEY = $cmk, ALGORITHM = $algo, ENCRYPTED_VALUE = "$value"); +-- +-- dependency : +-- service : Huawei KMS (https://console.huaweicloud.com/dew/?region=cn-north-4#/kms/keyList/customKey) +-- cmk : CREATE CLIENT MASTER KEY $cmk WITH (KEY_STORE = huawei_kms, ...) +------------------------------------------------------------------------------------------------------------------------- + +-- prepare | succeed +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk3 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk4 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd", ALGORITHM = AES_256); + +-- create cek | succeed +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek3 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek4 WITH VALUES (CLIENT_MASTER_KEY = cmk4, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = '0123456789abcdef0123456789abcdef'); + +-- drop cek | succeed +DROP COLUMN ENCRYPTION KEY cek1; +DROP COLUMN ENCRYPTION KEY cek2; +DROP COLUMN ENCRYPTION KEY cek3; +DROP COLUMN ENCRYPTION KEY cek4; + +-- create after drop cek | succeed +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +DROP COLUMN ENCRYPTION KEY cek1; +DROP COLUMN ENCRYPTION KEY cek2; +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); + +-- prepare | succeed +DROP COLUMN ENCRYPTION KEY cek1; +DROP COLUMN ENCRYPTION KEY cek2; + +-- create cek | invalud cek object name | error +CREATE COLUMN ENCRYPTION KEY WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY . WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY ecek 1 WITH VALUES (CLIENT_MASTER_KEY = cmk3, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY ecek ecek WITH VALUES (CLIENT_MASTER_KEY = cmk4, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY 啊 WITH VALUES (CLIENT_MASTER_KEY = cmk4, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); + +-- create cek | loss args | errr +CREATE COLUMN ENCRYPTION KEY ecek1 WITH VALUES (); +CREATE COLUMN ENCRYPTION KEY ecek2 WITH VALUES (CLIENT_MASTER_KEY = cmk1); +CREATE COLUMN ENCRYPTION KEY ecek3 WITH VALUES (ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY ecek4 WITH VALUES (CLIENT_MASTER_KEY = cmk1, CLIENT_MASTER_KEY = cmk1); +CREATE COLUMN ENCRYPTION KEY ecek5 WITH VALUES (ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY ecek6 WITH VALUES (CLIENT_MASTER_KEY = cmk1, CLIENT_MASTER_KEY = cmk1, CLIENT_MASTER_KEY = cmk1, CLIENT_MASTER_KEY = cmk1, CLIENT_MASTER_KEY = cmk1, CLIENT_MASTER_KEY = cmk1); + +-- create cek | reduant args | error +CREATE COLUMN ENCRYPTION KEY ecek20 WITH VALUES (CLIENT_MASTER_KEY = cmk1, CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY ecek21 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY ecek22 WITH VALUES (CLIENT_MASTER_KEY = cmk3, CLIENT_MASTER_KEY = cmk3, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = "123456789abcdef0123456"); +CREATE COLUMN ENCRYPTION KEY ecek23 WITH VALUES (CLIENT_MASTER_KEY = cmk4, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = "123456789abcdef0123456"); + +-- create cek | invalid args | error +CREATE COLUMN ENCRYPTION KEY ecek40 WITH VALUES (CLIENT_MASTER_KEY = cmk5, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +-- +CREATE COLUMN ENCRYPTION KEY ecek50 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = ""); +CREATE COLUMN ENCRYPTION KEY ecek51 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = "0123456789abcdef0"); +CREATE COLUMN ENCRYPTION KEY ecek52 WITH VALUES (CLIENT_MASTER_KEY = cmk3, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = "0123456789abcdef0123456789abcdef01"); +CREATE COLUMN ENCRYPTION KEY ecek53 WITH VALUES (CLIENT_MASTER_KEY = cmk4, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"); + +-- clear | succeed +DROP CLIENT MASTER KEY cmk1 CASCADE; +DROP CLIENT MASTER KEY cmk2 CASCADE; +DROP CLIENT MASTER KEY cmk3 CASCADE; +DROP CLIENT MASTER KEY cmk4 CASCADE; +SELECT * FROM gs_column_keys; +SELECT * FROM gs_client_global_keys; + +------------------------------------------------------------------------------------------------------------- +-- dependency : +-- tool : gs_ktool (sorce code: src/bin/gs_ktool) +------------------------------------------------------------------------------------------------------------- + +-- prepare | succeed +\! gs_ktool -d all +\! gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk3 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/3", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk4 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/4", ALGORITHM = AES_256_CBC); + +-- create cek | succeed +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek3 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek4 WITH VALUES (CLIENT_MASTER_KEY = cmk4, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = '0123456789abcdef0123456789abcdef'); + +-- drop cek | succeed +DROP COLUMN ENCRYPTION KEY cek1; +DROP COLUMN ENCRYPTION KEY cek2; +DROP COLUMN ENCRYPTION KEY cek3; +DROP COLUMN ENCRYPTION KEY cek4; + +-- create after drop cek | succeed +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +DROP COLUMN ENCRYPTION KEY cek1; +DROP COLUMN ENCRYPTION KEY cek2; +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); + +-- clear | succeed +DROP CLIENT MASTER KEY cmk1 CASCADE; +DROP CLIENT MASTER KEY cmk2 CASCADE; +DROP CLIENT MASTER KEY cmk3 CASCADE; +DROP CLIENT MASTER KEY cmk4 CASCADE; +SELECT * FROM gs_column_keys; +SELECT * FROM gs_client_global_keys; +\! gs_ktool -d all \ No newline at end of file diff --git a/src/test/regress/sql/ce_crt_cmk_by_gskt.sql b/src/test/regress/sql/ce_crt_cmk_by_gskt.sql new file mode 100644 index 000000000..fc347ee07 --- /dev/null +++ b/src/test/regress/sql/ce_crt_cmk_by_gskt.sql @@ -0,0 +1,143 @@ +------------------------------------------------------------------------------------------------------------------------- +-- grop : security +-- module : client encrypt +-- +-- function : test {sql:CREATE CEK} +-- CREATE CLIENT MASTER KEY $cmk WITH (KEY_STORE = $key_store, KEY_PATH = "$key_id" , ALGORITHM = $algo); +-- +-- dependency : +-- tool : gs_ktool (sorce code: src/bin/gs_ktool) +------------------------------------------------------------------------------------------------------------------------- + +-- prepare | succeed +\! gs_ktool -d all +\! gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g + +-- create cmk | succeed +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2" , ALGORITHM = SM4); +CREATE CLIENT MASTER KEY cmk5 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5" , ALGORITHM = AES_256_CBC); + +-- drop cmk | succeed +DROP CLIENT MASTER KEY cmk1; +DROP CLIENT MASTER KEY cmk2; + +-- create after drop cmk | succeed +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2", ALGORITHM = SM4); +DROP CLIENT MASTER KEY cmk1; +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/4", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk4 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBC); + +-- prepare | succeed +\! gs_ktool -d all +DROP CLIENT MASTER KEY cmk1; +DROP CLIENT MASTER KEY cmk2; +DROP CLIENT MASTER KEY cmk4; +DROP CLIENT MASTER KEY cmk5; +\! gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g + +-- in word "ecmk", 'e' means 'error' +-- create cmk | invalid cmk object name | error +CREATE CLIENT MASTER KEY ecmk 1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk 1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk ecmk WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY "ecmk" ecmk WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY . WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY 你 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); + +-- create cmk | loss args | error +CREATE CLIENT MASTER KEY ecmk1 WITH (KEY_STORE = gs_ktool); +CREATE CLIENT MASTER KEY ecmk2 WITH (KEY_PATH = "gs_ktool/1"); +CREATE CLIENT MASTER KEY ecmk3 WITH (ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk4 WITH (KEY_PATH = "gs_ktool/2", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk5 WITH (KEY_STORE = gs_ktool, ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk6 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/3"); +CREATE CLIENT MASTER KEY ecmk7 WITH (KEY_PATH = "gs_ktool/4", KEY_PATH = "gs_ktool/4", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk8 WITH (KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk9 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5", KEY_PATH = "gs_ktool/5"); +CREATE CLIENT MASTER KEY ecmk10 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5", KEY_PATH = "gs_ktool/6"); +CREATE CLIENT MASTER KEY ecmk11 WITH (KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool); +CREATE CLIENT MASTER KEY ecmk12 WITH (KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_STORE = gs_ktool); + +-- create cmk | redundant args | error +CREATE CLIENT MASTER KEY ecmk20 WITH (KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk21 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2", KEY_PATH = "gs_ktool/2", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk22 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/3", KEY_PATH = "gs_ktool/4", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk23 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5", ALGORITHM = AES_256_CBC, ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk24 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5", ALGORITHM = AES_256_CBC, ALGORITHM = AES_256_CBC, ALGORITHM = AES_256_CBC, ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk25 WITH (KEY_STORE = gs_ktool, KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBC, ALGORITHM = AES_256_CBC); + +-- create cmk | invalid args | error +CREATE CLIENT MASTER KEY ecmk40 WITH (KEY_STORE = , KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk41 WITH (KEY_STORE = gs, KEY_PATH = "gs_ktool/2", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk42 WITH (KEY_STORE = gs_ktooll, KEY_PATH = "gs_ktool/3", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk43 WITH (KEY_STORE = gs_ktoal, KEY_PATH = "gs_ktool/4", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk44 WITH (KEY_STORE = "gs_ktoal", KEY_PATH = "gs_ktool/4", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk45 WITH (KEY_STORE = gs_ktoolllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll1111111111111111111111111111111111, KEY_PATH = "gs_ktool/5", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk46 WITH (KEY_STORE = 很, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk47 WITH (KEY_STORE = ,, KEY_PATH = "gs_ktool/2", ALGORITHM = AES_256_CBC); +-- -- +CREATE CLIENT MASTER KEY ecmk60 WITH (KEY_STORE = gs_ktool, KEY_PATH = , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk61 WITH (KEY_STORE = gs_ktool, KEY_PATH = "g", ALGORITHM = ); +CREATE CLIENT MASTER KEY ecmk62 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktoo/1", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk63 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk64 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk65 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktooll/1", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk66 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktoal/2", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk67 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool3", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk68 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool//4", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk69 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/\", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk70 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5.", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk71 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/.", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk72 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/6/", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk73 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/闲", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk74 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktoolllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll/1", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk75 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555", ALGORITHM = AES_256_CBC); +-- -- +CREATE CLIENT MASTER KEY ecmk80 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM =); +CREATE CLIENT MASTER KEY ecmk81 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES); +CREATE CLIENT MASTER KEY ecmk82 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk83 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CFB); +CREATE CLIENT MASTER KEY ecmk84 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_128_CBC); +CREATE CLIENT MASTER KEY ecmk85 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = RSA_2048); +CREATE CLIENT MASTER KEY ecmk86 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = RSA_3072); +CREATE CLIENT MASTER KEY ecmk87 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBCB); +CREATE CLIENT MASTER KEY ecmk88 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = "AES_256_CBC\0"); +CREATE CLIENT MASTER KEY ecmk89 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = .); +CREATE CLIENT MASTER KEY ecmk90 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = AES_256_CBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC); +CREATE CLIENT MASTER KEY ecmk91 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1", ALGORITHM = 的); +-- create cmk | invalid keys | error +CREATE CLIENT MASTER KEY ecmk100 WITH (gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk101 WITH (KEY_STOR = gs_ktool, KEY_PATH = "gs_ktool/2" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk102 WITH (KEY_STORE = gs_ktool, KEY = "gs_ktool/3" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk103 WITH (KEY_STORE = gs_ktool, KEY_PATHH = "gs_ktool/4" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk104 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5" , ALGORITHMA = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk105 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/6" , = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk106 WITH (KEY_STORE = gs_ktool, 吗 = "gs_ktool/1" , = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk107 WITH (KEY_STOR = gs_ktool, KEY_STOR = "gs_ktool/2" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk108 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC, YES = 1); + +-- prepare | succeed +\! gs_ktool -d all +\! gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g && gs_ktool -g +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk3 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/3" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk4 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/4" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk5 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/5" , ALGORITHM = AES_256_CBC); + +-- create cmk | unserviceable args | error +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/6" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk6 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk10 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/10" , ALGORITHM = AES_256_CBC); + +-- clear | succeed +\! gs_ktool -d all +DROP CLIENT MASTER KEY cmk1; +DROP CLIENT MASTER KEY cmk2; +DROP CLIENT MASTER KEY cmk3; +DROP CLIENT MASTER KEY cmk4; +DROP CLIENT MASTER KEY cmk5; +SELECT * FROM gs_client_global_keys; + diff --git a/src/test/regress/sql/ce_crt_cmk_by_hwkms.sql b/src/test/regress/sql/ce_crt_cmk_by_hwkms.sql new file mode 100644 index 000000000..c53045914 --- /dev/null +++ b/src/test/regress/sql/ce_crt_cmk_by_hwkms.sql @@ -0,0 +1,109 @@ +------------------------------------------------------------------------------------------------------------------------- +-- grop : security +-- module : client encrypt +-- +-- function : test {sql:CREATE CEK} +-- CREATE CLIENT MASTER KEY $cmk WITH (KEY_STORE = $key_store, KEY_PATH = "$key_id" , ALGORITHM = $algo); +-- +-- dependency : +-- service : Huawei KMS (https://console.huaweicloud.com/dew/?region=cn-north-4#/kms/keyList/customKey) +------------------------------------------------------------------------------------------------------------------------- + +-- prepare | generate keys in Huawei KMS website | succeed +-- cec162c2-983d-4a66-8532-c67b915fb409 | ok +-- 31938a5e-6460-49ce-a358-886f46c6f643 | ok +-- d6107fb0-fa39-4ae5-ae84-019066ce9073 | ok +-- 3be6f4e0-80bf-4209-8ba2-13cdd303f1bd | ok +-- 43e7df16-afdc-4883-97c3-1bc7686ffc2f | to be deleted +-- f1d088d8-3b48-4ca6-bcf1-d77496e1aba3 | unservice + +-- create cmk | succeeed +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409" , ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643" , ALGORITHM = AES_256); + +-- drop cmk | succeed +DROP CLIENT MASTER KEY cmk1; +DROP CLIENT MASTER KEY cmk2; + +-- create after drop cmk | succeed +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073" , ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd" , ALGORITHM = AES_256); +DROP CLIENT MASTER KEY cmk1; +DROP CLIENT MASTER KEY cmk2; +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073" , ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk3 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd" , ALGORITHM = AES_256); + +-- prepare | succeed +DROP CLIENT MASTER KEY cmk1; +DROP CLIENT MASTER KEY cmk3; + +-- in the word "ecmk", 'e' means 'error' +-- create cmk | loss args | error +CREATE CLIENT MASTER KEY ecmk1 WITH (KEY_STORE = huawei_kms); +CREATE CLIENT MASTER KEY ecmk2 WITH (KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409"); +CREATE CLIENT MASTER KEY ecmk3 WITH (ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk4 WITH (KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk5 WITH (KEY_STORE = huawei_kms, ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk6 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073"); +CREATE CLIENT MASTER KEY ecmk7 WITH (KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd", KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk8 WITH (KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk9 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409"); +CREATE CLIENT MASTER KEY ecmk10 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073"); +CREATE CLIENT MASTER KEY ecmk11 WITH (KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms); +CREATE CLIENT MASTER KEY ecmk12 WITH (KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_STORE = huawei_kms); + +-- create cmk | redundant args | error +CREATE CLIENT MASTER KEY ecmk20 WITH (KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk21 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk22 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073", KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk23 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = AES_256, ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk24 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = AES_256, ALGORITHM = AES_256, ALGORITHM = AES_256, ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk25 WITH (KEY_STORE = huawei_kms, KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_256, ALGORITHM = AES_256); + +-- create cmk | invalid args | error +CREATE CLIENT MASTER KEY ecmk60 WITH (KEY_STORE = huawei_kms, KEY_PATH = , ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk61 WITH (KEY_STORE = huawei_kms, KEY_PATH = "c", ALGORITHM = ); +CREATE CLIENT MASTER KEY ecmk62 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb40", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk63 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2983d4a668532c67b915fb409", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk64 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2_983d_4a66_8532_c67b915fb409", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk65 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2_983d_4a66_8532_c67b915fb4099", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk66 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2_983d_4a66_8532_c67b915fb409cec162c2_983d_4a66_8532_c67b915fb409", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk68 WITH (KEY_STORE = huawei_kms, KEY_PATH = "z1938a5e-6460-49ce-a358-886f46c6f64", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk69 WITH (KEY_STORE = huawei_kms, KEY_PATH = ".1938a5e-6460-49ce-a358-886f46c6f64", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk70 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bz", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk73 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1b闲", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY ecmk74 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40cec162c2-983d-4a66-8532-c67b915fb40", ALGORITHM = AES_256); +-- +CREATE CLIENT MASTER KEY ecmk80 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM =); +CREATE CLIENT MASTER KEY ecmk81 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = AES); +CREATE CLIENT MASTER KEY ecmk82 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073", ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY ecmk83 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd", ALGORITHM = AES_256_CFB); +CREATE CLIENT MASTER KEY ecmk84 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_128_CBC); +CREATE CLIENT MASTER KEY ecmk85 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = RSA_2048); +CREATE CLIENT MASTER KEY ecmk86 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073", ALGORITHM = RSA_3072); +CREATE CLIENT MASTER KEY ecmk87 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd", ALGORITHM = AES_257); +CREATE CLIENT MASTER KEY ecmk88 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = "AES_256_"); +CREATE CLIENT MASTER KEY ecmk89 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = .); +CREATE CLIENT MASTER KEY ecmk90 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073", ALGORITHM = AES_25666666666666666666666666666666666666666666666666666); +CREATE CLIENT MASTER KEY ecmk91 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd", ALGORITHM = 嘿); + +-- create cmk | invalid keys | error +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "43e7df16-afdc-4883-97c3-1bc7686ffc2f", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "f1d088d8-3b48-4ca6-bcf1-d77496e1aba3", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "aad088d8-3b48-4ca6-bcf1-d77496e1ab55", ALGORITHM = AES_256); + +-- prepare | succeed +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk3 WITH (KEY_STORE = huawei_kms, KEY_PATH = "d6107fb0-fa39-4ae5-ae84-019066ce9073", ALGORITHM = AES_256); + +-- create cmk | unserviceable args | error +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "3be6f4e0-80bf-4209-8ba2-13cdd303f1bd", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk4 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409", ALGORITHM = AES_256); + +-- clear | succeed +DROP CLIENT MASTER KEY cmk1; +DROP CLIENT MASTER KEY cmk2; +DROP CLIENT MASTER KEY cmk3; +SELECT * FROM gs_client_global_keys; \ No newline at end of file diff --git a/src/test/regress/sql/ce_crt_tbl.sql b/src/test/regress/sql/ce_crt_tbl.sql new file mode 100644 index 000000000..bdd527780 --- /dev/null +++ b/src/test/regress/sql/ce_crt_tbl.sql @@ -0,0 +1,58 @@ + +------------------------------------------------------------------------------------------------------------------------- +-- grop : security +-- module : client encrypt +-- +-- function : test {sql:CREATE/INSERT/UPDATE/DELETE/SELECT TABLE} +-- CREATE TABLE $tbl ($col $dat_type ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = $cek, ENCRYPTION_TYPE = $enc_type)); +-- +-- dependency : +-- service : Huawei KMS (https://console.huaweicloud.com/dew/?region=cn-north-4#/kms/keyList/customKey) +-- cmk : CREATE CLIENT MASTER KEY $cmk WITH (KEY_STORE = huawei_kms, ...) +-- cek : CREATE COLUMN ENCRYPTION KEY $cek ... +------------------------------------------------------------------------------------------------------------------------- + +-- prepare | succeed +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = huawei_kms, KEY_PATH = "cec162c2-983d-4a66-8532-c67b915fb409" , ALGORITHM = AES_256); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = huawei_kms, KEY_PATH = "31938a5e-6460-49ce-a358-886f46c6f643" , ALGORITHM = AES_256); +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek3 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); + +-- create table | succeed +CREATE TABLE IF NOT EXISTS tbl1 ( + col1 INT, + col2 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek1, ENCRYPTION_TYPE = DETERMINISTIC), + col3 TEXT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek2, ENCRYPTION_TYPE = DETERMINISTIC), + col4 VARCHAR(20) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek2, ENCRYPTION_TYPE = DETERMINISTIC)); + +-- insert | succeed +INSERT INTO tbl1 VALUES (1, 1, 'row1 col3', 'row1 col4'); +INSERT INTO tbl1 VALUES (2, 11111, 'row2 col3', 'row2 col4'); +INSERT INTO tbl1 VALUES (3, 11111111, 'row3 col3', 'row3 col4'); + +-- update | succeed +UPDATE tbl1 SET col2 = 22222 WHERE col1=1; +UPDATE tbl1 SET col3 = 'new row2 col3' WHERE col1=2; +UPDATE tbl1 SET col4 = 'new row3 col4' WHERE col1=3; + +-- select | succeed +SELECT * FROM tbl1 ORDER BY col1; +SELECT * FROM tbl1 WHERE col2 = 1; +SELECT * FROM tbl1 WHERE col3 = 'new row2 col3'; +SELECT * FROM tbl1 WHERE col4 = 'new row3 col4' AND col1 = 3; +SELECT * FROM tbl1 WHERE col3 = 'row1 col3' AND col4 = 'row1 col4'; + +-- delete | succeed +DELETE FROM tbl1 WHERE col2=22222; +DELETE FROM tbl1 WHERE col3='new row2 col3'; +DELETE FROM tbl1 WHERE col4='row3 col4'; + +-- clear | succeed +SELECT * FROM tbl1; +DROP TABLE tbl1; +DROP CLIENT MASTER KEY cmk1 CASCADE; +DROP CLIENT MASTER KEY cmk2 CASCADE; +SELECT * FROM gs_column_keys; +SELECT * FROM gs_client_global_keys; + diff --git a/src/test/regress/sql/ce_crt_tbl_as.sql b/src/test/regress/sql/ce_crt_tbl_as.sql new file mode 100644 index 000000000..21aab836f --- /dev/null +++ b/src/test/regress/sql/ce_crt_tbl_as.sql @@ -0,0 +1,191 @@ +-- moudle : client encryption +-- purpose : function test +-- detail : test CREATE AS & SELECT INTO (with encrypted columns) + +-- (0) prepare | clean environment | succeed +CREATE SCHEMA ce_crt_tbl_as; +SET search_path TO ce_crt_tbl_as; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP CLIENT MASTER KEY IF EXISTS cmk1 CASCADE; +DROP CLIENT MASTER KEY IF EXISTS cmk2 CASCADE; +\! gs_ktool -d all + +-- (0) prepare | create cmk & cek | succeed +\! gs_ktool -g +\! gs_ktool -g + +CREATE CLIENT MASTER KEY cmk1 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE CLIENT MASTER KEY cmk2 WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/2" , ALGORITHM = AES_256_CBC); + +CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek2 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE COLUMN ENCRYPTION KEY cek3 WITH VALUES (CLIENT_MASTER_KEY = cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256, ENCRYPTED_VALUE = '1234567890abcdef1234567890abcde'); + +-- (0) prepare | create table & insert data | succeed +CREATE TABLE IF NOT EXISTS t1 ( + c1 INT, + c2 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek1, ENCRYPTION_TYPE = DETERMINISTIC), + c3 TEXT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek2, ENCRYPTION_TYPE = DETERMINISTIC), + c4 VARCHAR(20) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek3, ENCRYPTION_TYPE = DETERMINISTIC)); + +CREATE TABLE IF NOT EXISTS t2 ( + c1 INT, + c2 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek1, ENCRYPTION_TYPE = DETERMINISTIC), + c3 TEXT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek2, ENCRYPTION_TYPE = DETERMINISTIC), + c4 VARCHAR(20) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek3, ENCRYPTION_TYPE = DETERMINISTIC)); + +INSERT INTO t1 VALUES (1, 12, 'r1 c3', 'r1 c4'); +INSERT INTO t1 VALUES (2, 22, 'r2 c3', 'r2 c4'); +INSERT INTO t1 VALUES (3, 32, 'r3 c3', 'r3 c4'); + +-- INSERT INTO t2 (nothing) + +-- (1) SELECT INTO | succeed +SELECT c1,c2 INTO t1_1 FROM t1; +SELECT c1,c3,c4 INTO t1_2 FROM t1; +SELECT c1,c2,c3,c4 INTO t1_3 FROM t1; +SELECT * INTO t1_4 FROM t1; + +SELECT c1,c4 INTO t1_5 FROM t1 WHERE c4 = 'r4 c4'; +SELECT c1,c3,c4 INTO t1_6 FROM t1 WHERE c3 = 'r2 c3'; +SELECT c1,c2,c4 INTO t1_7 FROM t1 WHERE c2 = 12 AND c4 = 'r1 c4'; +SELECT * INTO t1_8 FROM t1 WHERE c2 = 32; + +SELECT c1,c2 INTO t2_1 FROM t2; +SELECT c1,c3,c4 INTO t2_2 FROM t2; +SELECT c1,c2,c3,c4 INTO t2_3 FROM t2; +SELECT * INTO t2_4 FROM t2; + +SELECT c1,c4 INTO t2_5 FROM t2 WHERE c4 = 'r4 c4'; +SELECT c1,c3,c4 INTO t2_6 FROM t2 WHERE c3 = 'r2 c3'; +SELECT c1,c2,c4 INTO t2_7 FROM t2 WHERE c2 = 12 AND c4 = 'r1 c4'; +SELECT * INTO t2_8 FROM t2 WHERE c2 = 32; + +-- (2) SELECT INTO | no distribute column | failed +-- but, in fastcheck, i can't undstand why they succeed +SELECT c2 INTO t1_20 FROM t1; +SELECT c2,c3 INTO t1_21 FROM t1; + +SELECT c2 INTO t2_20 FROM t2; +SELECT c2,c3 INTO t2_21 FROM t2; + +-- (3) SELECT INTO | result table alreay exist | failed +SELECT c1,c2 INTO t1_1 FROM t1; + +SELECT c1,c2 INTO t2_1 FROM t2; + +-- (4) show results | succeed +SELECT count(*) FROM gs_encrypted_columns; + +SELECT * FROM t1_1 ORDER BY c1 ASC; +SELECT * FROM t1_2 ORDER BY c1 ASC; +SELECT * FROM t1_3 ORDER BY c1 ASC; +SELECT * FROM t1_4 ORDER BY c1 ASC; +SELECT * FROM t1_5 ORDER BY c1 ASC; +SELECT * FROM t1_6 ORDER BY c1 ASC; +SELECT * FROM t1_7 ORDER BY c1 ASC; +SELECT * FROM t1_8 ORDER BY c1 ASC; + +SELECT * FROM t2_1 ORDER BY c1 ASC; +SELECT * FROM t2_2 ORDER BY c1 ASC; +SELECT * FROM t2_3 ORDER BY c1 ASC; +SELECT * FROM t2_4 ORDER BY c1 ASC; +SELECT * FROM t2_5 ORDER BY c1 ASC; +SELECT * FROM t2_6 ORDER BY c1 ASC; +SELECT * FROM t2_7 ORDER BY c1 ASC; +SELECT * FROM t2_8 ORDER BY c1 ASC; + +-- (5) clean copyted table | succeed +DROP TABLE t1_1; +DROP TABLE t1_2; +DROP TABLE t1_3; +DROP TABLE t1_4; +DROP TABLE t1_5; +DROP TABLE t1_6; +DROP TABLE t1_7; +DROP TABLE t1_8; + +DROP TABLE t2_1; +DROP TABLE t2_2; +DROP TABLE t2_3; +DROP TABLE t2_4; +DROP TABLE t2_5; +DROP TABLE t2_6; +DROP TABLE t2_7; +DROP TABLE t2_8; + +DROP TABLE IF EXISTS t1_20; +DROP TABLE IF EXISTS t1_21; +DROP TABLE IF EXISTS t2_20; +DROP TABLE IF EXISTS t2_21; + +-- (6) CREATE AS | succeed +CREATE TABLE t1_1 AS SELECT c1,c2 FROM t1; +CREATE TABLE t1_2 AS SELECT c1,c3,c4 FROM t1; +CREATE TABLE t1_3 AS SELECT c1,c2,c3,c4 FROM t1; +CREATE TABLE t1_4 AS SELECT * FROM t1; + +-- TODO : not support yet +CREATE TABLE t1_4 AS SELECT c1,c4 FROM t1 WHERE c4 = 'r4 c4'; +CREATE TABLE t1_5 AS SELECT c1,c3,c4 FROM t1 WHERE c3 = 'r2 c3'; +CREATE TABLE t1_6 AS SELECT c1,c2,c4 FROM t1 WHERE c2 = 12 AND c4 = 'r1 c4'; +CREATE TABLE t1_7 AS SELECT * FROM t1 WHERE c2 = 32; + +CREATE TABLE t2_1 AS SELECT c1,c2 FROM t1; +CREATE TABLE t2_2 AS SELECT c1,c3,c4 FROM t1; +CREATE TABLE t2_3 AS SELECT c1,c2,c3,c4 FROM t1; +CREATE TABLE t2_4 AS SELECT * FROM t1; + +-- TODO : not support yet +CREATE TABLE t2_4 AS SELECT c1,c4 FROM t2 WHERE c4 = 'r4 c4'; +CREATE TABLE t2_5 AS SELECT c1,c3,c4 FROM t2 WHERE c3 = 'r2 c3'; +CREATE TABLE t2_6 AS SELECT c1,c2,c4 FROM t2 WHERE c2 = 12 AND c4 = 'r1 c4'; +CREATE TABLE t2_7 AS SELECT * FROM t2 WHERE c2 = 32; + +-- (7) show results | succeed +SELECT count(*) FROM gs_encrypted_columns; + +SELECT * FROM t1_1 ORDER BY c1 ASC; +SELECT * FROM t1_2 ORDER BY c1 ASC; +SELECT * FROM t1_3 ORDER BY c1 ASC; +SELECT * FROM t1_4 ORDER BY c1 ASC; +SELECT * FROM t1_5 ORDER BY c1 ASC; +SELECT * FROM t1_6 ORDER BY c1 ASC; +SELECT * FROM t1_7 ORDER BY c1 ASC; +SELECT * FROM t1_8 ORDER BY c1 ASC; + +SELECT * FROM t2_1 ORDER BY c1 ASC; +SELECT * FROM t2_2 ORDER BY c1 ASC; +SELECT * FROM t2_3 ORDER BY c1 ASC; +SELECT * FROM t2_4 ORDER BY c1 ASC; +SELECT * FROM t2_5 ORDER BY c1 ASC; +SELECT * FROM t2_6 ORDER BY c1 ASC; +SELECT * FROM t2_7 ORDER BY c1 ASC; +SELECT * FROM t2_8 ORDER BY c1 ASC; + +-- (8) clean copyted table +DROP TABLE t1_1; +DROP TABLE t1_2; +DROP TABLE t1_3; +DROP TABLE t1_4; + +DROP TABLE t2_1; +DROP TABLE t2_2; +DROP TABLE t2_3; +DROP TABLE t2_4; + +-- (9) finish | clean environment | succeed +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP CLIENT MASTER KEY cmk1 CASCADE; +DROP CLIENT MASTER KEY cmk2 CASCADE; +\! gs_ktool -d all + +-- should be empty +SELECT * FROM gs_encrypted_columns; + +-- reset +RESET search_path; + \ No newline at end of file diff --git a/src/test/regress/sql/ce_default_values.sql b/src/test/regress/sql/ce_default_values.sql index 6c60a088b..6fa5787b6 100644 --- a/src/test/regress/sql/ce_default_values.sql +++ b/src/test/regress/sql/ce_default_values.sql @@ -9,6 +9,7 @@ CREATE TABLE products ( product_no integer DEFAULT 1, name text ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = defaultcek, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT 'Test Product', title varchar(35) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = defaultcek, ENCRYPTION_TYPE = DETERMINISTIC) NOT NULL DEFAULT ' ', + value varchar(35) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = defaultcek, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT '', price numeric ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = defaultcek, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT 9.99, max_price decimal(6,0) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = defaultcek, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT NULL ); diff --git a/src/test/regress/sql/ce_describe.sql b/src/test/regress/sql/ce_describe.sql index abb64cf04..3af740aa1 100644 --- a/src/test/regress/sql/ce_describe.sql +++ b/src/test/regress/sql/ce_describe.sql @@ -7,7 +7,7 @@ CREATE COLUMN ENCRYPTION KEY cek1 WITH VALUES (CLIENT_MASTER_KEY = cmk1, ALGORIT CREATE TABLE IF NOT EXISTS t_varchar (id INT, name varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek1, ENCRYPTION_TYPE = DETERMINISTIC), - address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek1, ENCRYPTION_TYPE = DETERMINISTIC)); + address varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = cek1, ENCRYPTION_TYPE = RANDOMIZED)); SELECT attname, atttypid::regtype FROM pg_attribute JOIN pg_class On attrelid = Oid WHERE relname = 't_varchar' AND attnum >0; \d t_varchar; \d+ t_varchar; diff --git a/src/test/regress/sql/ce_escaping.sql b/src/test/regress/sql/ce_escaping.sql index f679637e8..effc931ee 100644 --- a/src/test/regress/sql/ce_escaping.sql +++ b/src/test/regress/sql/ce_escaping.sql @@ -3,17 +3,17 @@ CREATE CLIENT MASTER KEY MyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY MyCEK WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); -CREATE TABLE ce_customer ( - ce_customer_id integer NOT NULL, +CREATE TABLE customer ( + customer_id integer NOT NULL, id integer ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), first_name character varying(45) NOT NULL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), last_name character varying(45) NOT NULL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC) ); -insert into ce_customer values (770, 1234, 'Ido''s', 'shemer'); -insert into ce_customer (ce_customer_id, id, first_name, last_name) values (771, 1234, 'Eli''s', 'shemer'); -select * from ce_customer order by ce_customer_id; -select * from ce_customer where first_name = 'Ido''s'; -drop table ce_customer; +insert into customer values (770, 1234, 'Ido''s', 'shemer'); +insert into customer (customer_id, id, first_name, last_name) values (771, 1234, 'Eli''s', 'shemer'); +select * from customer order by customer_id; +select * from customer where first_name = 'Ido''s'; +drop table customer; DROP CLIENT MASTER KEY mycmk CASCADE; \! gs_ktool -d all \ No newline at end of file diff --git a/src/test/regress/sql/ce_foreign_key.sql b/src/test/regress/sql/ce_foreign_key.sql index 7ac7ea036..b03f2fcbf 100644 --- a/src/test/regress/sql/ce_foreign_key.sql +++ b/src/test/regress/sql/ce_foreign_key.sql @@ -6,7 +6,7 @@ CREATE CLIENT MASTER KEY MyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktoo CREATE COLUMN ENCRYPTION KEY MyCEK WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); CREATE COLUMN ENCRYPTION KEY MyCEK2 WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); CREATE TABLE so_headers ( - id INTEGER unique ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), + id INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), customer_id INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK2, ENCRYPTION_TYPE = DETERMINISTIC), ship_to VARCHAR (255) ); @@ -38,7 +38,6 @@ CREATE TABLE so_items_c ( PRIMARY KEY (item_id,so_id) ); -ALTER TABLE IF EXISTS so_headers ADD CONSTRAINT so_headers_unique1 UNIQUE (id,customer_id); CREATE TABLE payments ( pay_id int, so_id INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), @@ -65,17 +64,18 @@ SELECT * from so_items ORDER BY item_id; INSERT INTO so_items_r VALUES (10001, 1, 1001, 100, 37.28); INSERT INTO so_items_r VALUES (10002, 6, 1001, 100, 37.28); +INSERT INTO so_items VALUES (10003, 2, 1001, 100, 37.28); SELECT * from so_items_r ORDER BY item_id; INSERT INTO so_items_a VALUES (10001, 1, 1001, 100, 37.28); INSERT INTO so_items_a VALUES (10002, 6, 1001, 100, 37.28); -INSERT INTO so_items_a VALUES (10001, 1, 1001, 110, 36.28); +INSERT INTO so_items VALUES (10003, 2, 1001, 100, 37.28); SELECT * from so_items_a ORDER BY item_id; INSERT INTO so_items_c VALUES (10001, 1, 1001, 100, 37.28); INSERT INTO so_items_c VALUES (10002, 6, 1001, 100, 37.28); -INSERT INTO so_items_c VALUES (10001, 1, 1011, 101, 36.28); +INSERT INTO so_items VALUES (10003, 2, 1001, 100, 37.28); SELECT * from so_items_c ORDER BY item_id; -DELETE from so_headers where id = 2; +DELETE from so_headers where id =2; SELECT * from so_items ORDER BY item_id; SELECT * from so_items_a ORDER BY item_id; @@ -84,14 +84,21 @@ SELECT * from so_items_c ORDER BY item_id; INSERT INTO payments VALUES (100001, 1, 101); INSERT INTO payments VALUES (100002, 1, 102); -SELECT * from payments ORDER BY pay_id; +ALTER TABLE so_items_a ADD CONSTRAINT fkey_a FOREIGN KEY (so_id) REFERENCES so_headers (id); +ALTER TABLE so_items_a DROP CONSTRAINT fkey_a; +ALTER TABLE so_items_a ADD CONSTRAINT constraint_fk +FOREIGN KEY (so_id) +REFERENCES so_headers (id) +ON DELETE CASCADE; + +DROP TABLE so_headers; DROP TABLE so_items; DROP TABLE so_items_r; DROP TABLE so_items_a; +DROP TABLE so_items_b; DROP TABLE so_items_c; DROP TABLE payments; -DROP TABLE so_headers; DROP CLIENT MASTER KEY MyCMK CASCADE; \! gs_ktool -d all \ No newline at end of file diff --git a/src/test/regress/sql/ce_functions_anonymous_block.sql b/src/test/regress/sql/ce_functions_anonymous_block.sql new file mode 100644 index 000000000..b94d96ec8 --- /dev/null +++ b/src/test/regress/sql/ce_functions_anonymous_block.sql @@ -0,0 +1,88 @@ +\! gs_ktool -g + +DROP CLIENT MASTER KEY IF EXISTS anonymous_block_cmk CASCADE; +CREATE CLIENT MASTER KEY anonymous_block_cmk WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = SM4); +CREATE COLUMN ENCRYPTION KEY anonymous_block_cek WITH VALUES (CLIENT_MASTER_KEY = anonymous_block_cmk, ALGORITHM = SM4_SM3); + +BEGIN +CREATE TABLE creditcard_info (id_number int, name text encrypted with (column_encryption_key = anonymous_block_cek, encryption_type = DETERMINISTIC), +credit_card varchar(19) encrypted with (column_encryption_key = anonymous_block_cek, encryption_type = DETERMINISTIC)); +END; +/ + +do $$ +<> +begin +insert into creditcard_info values(0, 'King', '123456'); +end first_block $$; +select * from creditcard_info; +delete from creditcard_info; + +BEGIN +insert into creditcard_info values(1, 'Avi', '123456'); +insert into creditcard_info values(2, 'Eli', '641245'); +END; +/ + +select * from creditcard_info order by id_number; +delete from creditcard_info; + +CREATE OR REPLACE PROCEDURE autonomous_1() AS +BEGIN + insert into creditcard_info values(66, 66,66); + commit; + insert into creditcard_info values(77, 77,77); + rollback; +END; +/ +call autonomous_1(); +select * from creditcard_info order by id_number; + +--success without return +CREATE OR REPLACE PROCEDURE exec_insert1 () AS +BEGIN + insert into creditcard_info values(3, 'Rafi', '3'); + update creditcard_info set name='Sun' where credit_card = 3; +END; +/ +call exec_insert1 (); +--success return void +CREATE or replace FUNCTION exec_insert2() RETURN void +AS +BEGIN + insert into creditcard_info values(4,'Gil',4); + update creditcard_info set name='Joy' where credit_card = 4; +END; +/ +SELECT exec_insert2(); +call exec_insert2(); +--success RETURN integer +CREATE or replace FUNCTION exec_insert3() RETURN integer +AS +BEGIN + insert into creditcard_info values(5,'Peter',5); + update creditcard_info set name= 'Xavier' where credit_card = 5; + return 1; +END; +/ +SELECT exec_insert3(); +call exec_insert3(); + +-- plpgsql IF +CREATE or replace FUNCTION exec_insert4() RETURN void +AS +BEGIN +IF 2<5 THEN + insert into creditcard_info values(6,'Ziv',6); + update creditcard_info set name='Peter' where credit_card = 6; +END IF; +END; +/ +SELECT exec_insert4(); +call exec_insert4(); +select * from creditcard_info order by id_number; + +DROP TABLE creditcard_info; +DROP CLIENT MASTER KEY anonymous_block_cmk CASCADE; +\! gs_ktool -d all + diff --git a/src/test/regress/sql/ce_functions_create_replace.sql b/src/test/regress/sql/ce_functions_create_replace.sql index 670fd7bfb..ba15a8e5b 100644 --- a/src/test/regress/sql/ce_functions_create_replace.sql +++ b/src/test/regress/sql/ce_functions_create_replace.sql @@ -31,7 +31,7 @@ BEGIN RETURN c; END; $$ LANGUAGE plpgsql; -\df +\df f_processed_in_plpgsql SELECT COUNT(*) FROM gs_encrypted_proc where func_id NOT in (SELECT Oid FROM pg_proc); CREATE OR REPLACE FUNCTION f_processed_out_plpgsql(out1 OUT varchar(100), out2 OUT dec(15,2)) AS $$ @@ -39,7 +39,7 @@ BEGIN SELECT INTO out1, out2 name, balance from accounts LIMIT 1; END; $$ LANGUAGE plpgsql; -\df +\df f_processed_out_plpgsql -- FAILED CREATE OR REPLACE FUNCTION f_processed_out_plpgsql(out1 OUT varchar(100), out2 OUT dec(15,2)) AS $$ @@ -48,9 +48,16 @@ SELECT INTO out1, out2 name, balance from accounts LIMIT 1; END; $$ LANGUAGE plpgsql; SELECT f_processed_out_plpgsql(); -\df +\df f_processed_out_plpgsql DROP FUNCTION f_processed_in_plpgsql; DROP FUNCTION f_processed_out_plpgsql; +CREATE OR REPLACE FUNCTION select1() RETURNS varchar(100) LANGUAGE SQL AS 'SELECT name from accounts;'; +select proname, pronargs, prorettype, proargtypes, proallargtypes, proargnames, prorettype_orig, proargcachedcol, proallargtypes_orig +from pg_proc join gs_encrypted_proc on pg_proc.oid = func_id where proname = 'select1'; +CREATE OR REPLACE FUNCTION select1() RETURNS varchar(100) LANGUAGE SQL AS 'SELECT ''aaa'';'; +select proname, pronargs, prorettype, proargtypes, proallargtypes, proargnames, prorettype_orig, proargcachedcol, proallargtypes_orig +from pg_proc join gs_encrypted_proc on pg_proc.oid = func_id where proname = 'select1'; +DROP FUNCTION select1(); DROP TABLE accounts; DROP COLUMN ENCRYPTION KEY create_replace_cek; DROP CLIENT MASTER KEY create_replace_cmk; diff --git a/src/test/regress/sql/ce_functions_describe.sql b/src/test/regress/sql/ce_functions_describe.sql index b264fcfcb..bfcfd1c7a 100644 --- a/src/test/regress/sql/ce_functions_describe.sql +++ b/src/test/regress/sql/ce_functions_describe.sql @@ -22,7 +22,7 @@ BEGIN END; $$ LANGUAGE plpgsql; -\df +\df f_processed_in_plpgsql DROP FUNCTION f_processed_in_plpgsql(); DROP TABLE accounts; diff --git a/src/test/regress/sql/ce_functions_examples.sql b/src/test/regress/sql/ce_functions_examples.sql index 652d80964..a26d31e18 100755 --- a/src/test/regress/sql/ce_functions_examples.sql +++ b/src/test/regress/sql/ce_functions_examples.sql @@ -60,25 +60,6 @@ CREATE FUNCTION f_plaintext_return_table3(int, int) RETURNS TABLE(name text, val SELECT * FROM t_plaintext WHERE val=$1 or val2=$2; $$ LANGUAGE SQL; - - returns table ( - film_title varchar, - film_release_year int - ) - language plpgsql -as $$ -begin - return query - select - title, - release_year::integer - from - film - where - title ilike p_pattern; -end;$$ - - CREATE OR REPLACE FUNCTION get_all_plaintext_setof() RETURNS SETOF t_plaintext AS $BODY$ DECLARE @@ -179,13 +160,12 @@ CREATE OR REPLACE FUNCTION foo() end; $BODY$ LANGUAGE 'plpgsql' VOLATILE; -select proname, prorettype::regtype from pg_proc where Oid in (select func_id from gs_encrypted_proc); +select proname, prorettype::regtype from pg_proc where Oid in (select func_id from gs_encrypted_proc) order by proname; CALL f_processed_in_plpgsql(); CALL f_processed_return_table(); CALL f_processed_return_table(); DROP TABLE t_plaintext CASCADE; DROP TABLE t_processed CASCADE; -\d DROP FUNCTION f_hardcoded; DROP FUNCTION f_hardcoded_variable; DROP FUNCTION f_plaintext_in; @@ -203,7 +183,6 @@ DROP FUNCTION reffunc_plaintext; DROP FUNCTION reffunc_processed; DROP FUNCTION f_plaintext_out; DROP FUNCTION f_processed_out; -\df DROP COLUMN ENCRYPTION KEY func_cek; DROP CLIENT MASTER KEY func_cmk CASCADE; \! gs_ktool -d all \ No newline at end of file diff --git a/src/test/regress/sql/ce_functions_hardcoded.sql b/src/test/regress/sql/ce_functions_hardcoded.sql index 4cc1732a1..06bf0d1fc 100644 --- a/src/test/regress/sql/ce_functions_hardcoded.sql +++ b/src/test/regress/sql/ce_functions_hardcoded.sql @@ -4,41 +4,40 @@ DROP CLIENT MASTER KEY IF EXISTS hardcode_cmk CASCADE; CREATE CLIENT MASTER KEY hardcode_cmk WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY hardcode_cek WITH VALUES (CLIENT_MASTER_KEY = hardcode_cmk, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE IF NOT EXISTS hardcoded_t1(id int, i1 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = hardcode_cek, ENCRYPTION_TYPE = DETERMINISTIC)); -CREATE TABLE IF NOT EXISTS t1(id int, i1 INT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = hardcode_cek, ENCRYPTION_TYPE = DETERMINISTIC)); +INSERT INTO hardcoded_t1 VALUES(1,1),(2,2),(3,3),(4,4),(5,5); -INSERT INTO t1 VALUES(1,1),(2,2),(3,3),(4,4),(5,5); - -CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS 'SELECT id FROM t1 WHERE i1=1;' LANGUAGE SQL; +CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS 'SELECT id FROM hardcoded_t1 WHERE i1=1;' LANGUAGE SQL; SELECT select_func(); -CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS 'SELECT id FROM t1 WHERE i1=2' LANGUAGE SQL; +CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS 'SELECT id FROM hardcoded_t1 WHERE i1=2' LANGUAGE SQL; SELECT select_func(); -CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS $func_tag$SELECT id FROM t1 WHERE i1=3;$func_tag$ LANGUAGE SQL; +CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS $func_tag$SELECT id FROM hardcoded_t1 WHERE i1=3;$func_tag$ LANGUAGE SQL; SELECT select_func(); -CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS $func_tag$SELECT id FROM t1 WHERE i1=4$func_tag$ LANGUAGE SQL; +CREATE OR REPLACE FUNCTION select_func() RETURNS INT AS $func_tag$SELECT id FROM hardcoded_t1 WHERE i1=4$func_tag$ LANGUAGE SQL; SELECT select_func(); -CREATE OR REPLACE FUNCTION insert_func() RETURNS VOID AS 'INSERT INTO t1 VALUES(9,9);' LANGUAGE SQL; +CREATE OR REPLACE FUNCTION insert_func() RETURNS VOID AS 'INSERT INTO hardcoded_t1 VALUES(9,9);' LANGUAGE SQL; SELECT insert_func(); SELECT insert_func(); SELECT insert_func(); SELECT insert_func(); -SELECT count(*) from t1 where id=9; +SELECT count(*) from hardcoded_t1 where id=9; -CREATE OR REPLACE FUNCTION insert_select_func() RETURNS SETOF INTEGER AS 'INSERT INTO t1 VALUES(8,8); SELECT id FROM t1 WHERE i1=9;' LANGUAGE SQL; +CREATE OR REPLACE FUNCTION insert_select_func() RETURNS SETOF INTEGER AS 'INSERT INTO hardcoded_t1 VALUES(8,8); SELECT id FROM hardcoded_t1 WHERE i1=9;' LANGUAGE SQL; SELECT insert_select_func(); SELECT insert_select_func(); SELECT insert_select_func(); SELECT insert_select_func(); -SELECT count(*) from t1 where id=8; +SELECT count(*) from hardcoded_t1 where id=8; -SELECT * from t1 order by id; +SELECT * from hardcoded_t1 order by id; CREATE FUNCTION f_hardcoded_variable() RETURNS int AS $$ BEGIN -RETURN(SELECT id from t1 where i1 = 5 LIMIT 1); +RETURN(SELECT id from hardcoded_t1 where i1 = 5 LIMIT 1); END; $$ LANGUAGE plpgsql; SELECT f_hardcoded_variable(); @@ -53,7 +52,6 @@ CALL f_hardcoded_variable(); DROP FUNCTION f_hardcoded_variable; SELECT * FROM t_processed ORDER BY name; DROP TABLE t_processed; -DROP TABLE t1 CASCADE; create table accounts ( id serial, @@ -70,6 +68,128 @@ select insert_func300(); select * from accounts ORDER BY id; +--hardcoded control +DROP FUNCTION IF EXISTS f_hardcoded1; +CREATE OR REPLACE FUNCTION f_hardcoded1() RETURNS SETOF int AS $$ +DECLARE + r integer; +BEGIN + FOR r IN + SELECT id FROM hardcoded_t1 where i1 = 5 + LOOP + RETURN NEXT r; + END LOOP; + RETURN; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded1(); +DROP FUNCTION f_hardcoded1; + +DROP FUNCTION IF EXISTS f_hardcoded11; +CREATE OR REPLACE FUNCTION f_hardcoded11() RETURNS SETOF int AS $$ +DECLARE + r integer; +BEGIN + FOR r IN SELECT id FROM hardcoded_t1 where i1 = 5 + LOOP + RETURN NEXT r; + END LOOP; + RETURN; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded11(); +DROP FUNCTION f_hardcoded11; + +DROP FUNCTION IF EXISTS f_hardcoded12; +CREATE OR REPLACE FUNCTION f_hardcoded12() RETURNS SETOF int AS $$ +DECLARE + r integer; +BEGIN + FOR r IN SELECT id FROM hardcoded_t1 where i1 = 5 LOOP + RETURN NEXT r; + END LOOP; + RETURN; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded12(); +DROP FUNCTION f_hardcoded12; + +DROP FUNCTION IF EXISTS f_hardcoded2; +CREATE OR REPLACE FUNCTION f_hardcoded2() RETURNS SETOF int AS $$ +BEGIN + IF 1 > 0 THEN + RETURN QUERY(SELECT id FROM hardcoded_t1 where i1 = 5); + ELSIF 2 > 0 THEN + RETURN QUERY(SELECT id FROM hardcoded_t1 where i1 = 4); + ELSE + RETURN QUERY(SELECT id FROM hardcoded_t1 where i1 = 3); + END IF; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded2(); +DROP FUNCTION f_hardcoded2; + +DROP FUNCTION IF EXISTS f_hardcoded3; +CREATE OR REPLACE FUNCTION f_hardcoded3() RETURNS SETOF int AS $$ +DECLARE + x integer := 5; +BEGIN + CASE + WHEN x BETWEEN 0 AND 5 THEN + RETURN QUERY(SELECT id FROM hardcoded_t1 where i1 = 5); + WHEN x BETWEEN 6 AND 10 THEN + RETURN QUERY(SELECT id FROM hardcoded_t1 where i1 = 10); + END CASE; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded3(); +DROP FUNCTION f_hardcoded3; + +DROP FUNCTION IF EXISTS f_hardcoded4; +CREATE OR REPLACE FUNCTION f_hardcoded4() RETURNS SETOF int AS $$ +DECLARE + x integer := 5; +BEGIN + WHILE x > 0 LOOP + RETURN QUERY (SELECT id FROM hardcoded_t1 where i1 = 5); + x := x - 1; + END LOOP; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded4(); +DROP FUNCTION f_hardcoded4; + +DROP FUNCTION IF EXISTS f_hardcoded5; +CREATE OR REPLACE FUNCTION f_hardcoded5() RETURNS SETOF int AS $$ +BEGIN + FOR i IN 1..10 LOOP + RETURN QUERY (SELECT id FROM hardcoded_t1 where i1 = 5); + END LOOP; + FOR i IN REVERSE 10..1 LOOP + RETURN QUERY (SELECT id FROM hardcoded_t1 where i1 = 5); + END LOOP; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded5(); +DROP FUNCTION f_hardcoded5; + +DROP FUNCTION IF EXISTS f_hardcoded6; +CREATE OR REPLACE FUNCTION f_hardcoded6() RETURNS int AS $$ +BEGIN + UPDATE hardcoded_t1 set i1 = 5 where i1 = 5; + BEGIN + UPDATE hardcoded_t1 set i1 = 5 where i1 = 5; + EXCEPTION + WHEN division_by_zero THEN + RAISE NOTICE 'caught division_by_zero'; + RETURN 2; + END; + RETURN 1; +END; +$$ LANGUAGE plpgsql; +SELECT f_hardcoded6(); +DROP FUNCTION f_hardcoded6; +DROP TABLE hardcoded_t1 CASCADE; DROP TABLE accounts CASCADE; DROP FUNCTION insert_func100; DROP FUNCTION insert_func200; diff --git a/src/test/regress/sql/ce_functions_in_out_params.sql b/src/test/regress/sql/ce_functions_in_out_params.sql index 87b6a6277..3f5bc9eee 100755 --- a/src/test/regress/sql/ce_functions_in_out_params.sql +++ b/src/test/regress/sql/ce_functions_in_out_params.sql @@ -50,14 +50,14 @@ SELECT f_processed_in_out_b('ten','name70'); SELECT f_processed_in_out_plpgsql(17,3); SELECT f_processed_in_out_plpgsql2(6); SELECT f_processed_in_out_aliases_plpgsql(4); -DROP TABLE t_processed CASCADE; -DROP TABLE t_processed_b CASCADE; DROP FUNCTION f_processed_in_out_1param; -DROP FUNCTION f_processed_in_out; +DROP FUNCTION f_processed_in_out(int); DROP FUNCTION f_processed_in_out_b; DROP FUNCTION f_processed_in_out_plpgsql; DROP FUNCTION f_processed_in_out_plpgsql2; DROP FUNCTION f_processed_in_out_aliases_plpgsql; +DROP TABLE t_processed CASCADE; +DROP TABLE t_processed_b CASCADE; DROP COLUMN ENCRYPTION KEY in_out_cek; DROP CLIENT MASTER KEY in_out_cmk; \! gs_ktool -d all \ No newline at end of file diff --git a/src/test/regress/sql/ce_functions_input_params.sql b/src/test/regress/sql/ce_functions_input_params.sql index b96e380c1..146bbde47 100755 --- a/src/test/regress/sql/ce_functions_input_params.sql +++ b/src/test/regress/sql/ce_functions_input_params.sql @@ -36,13 +36,13 @@ create table accounts ( CREATE OR REPLACE FUNCTION insert_func_2(name varchar(100), balance dec(15,2)) RETURNS VOID AS 'INSERT INTO accounts(name,balance) VALUES($1, $2);' LANGUAGE SQL; -call insert_func_2('Bob', 101.30); +call regression.public.insert_func_2('Bob', 101.30); call insert_func_2('George', 505.70); select insert_func_2('Joe', 710.00); select * from insert_func_2('Donald', 1214.88); select * from accounts order by id; -drop FUNCTION f_processed_in_sql; +drop FUNCTION f_processed_in_sql(int, int); drop FUNCTION f_processed_in_sql_named; drop FUNCTION insert_func_2; diff --git a/src/test/regress/sql/ce_functions_return_table.sql b/src/test/regress/sql/ce_functions_return_table.sql index b02b62987..38b39927d 100644 --- a/src/test/regress/sql/ce_functions_return_table.sql +++ b/src/test/regress/sql/ce_functions_return_table.sql @@ -18,12 +18,42 @@ CREATE FUNCTION select2() RETURNS accounts LANGUAGE SQL AS 'SELECT * from accoun CREATE FUNCTION select4() RETURNS SETOF accounts LANGUAGE SQL AS 'SELECT * from accounts;'; CALL select2(); CALL select4(); -SELECT select2(); -SELECT select4(); + +DROP TABLE IF EXISTS fuc_creditcard_info; +CREATE TABLE fuc_creditcard_info (id_number int, name text encrypted with (column_encryption_key = ret_cek1, encryption_type = DETERMINISTIC), +credit_card varchar(19) encrypted with (column_encryption_key = ret_cek1, encryption_type = DETERMINISTIC)); +INSERT INTO fuc_creditcard_info VALUES (1,2,3); +--函数定义的返回表字段类型与加密表字段类型一致,可以正常加解密 +DROP FUNCTION IF EXISTS select5(); +CREATE or replace FUNCTION select5() RETURNS TABLE ( + name text , + credit_card varchar(19) +) LANGUAGE SQL +AS 'SELECT name, credit_card from fuc_creditcard_info;'; +call select5(); +--函数定义的返回表字段类型为VARCHAR与加密表name的text类型不一致,可以正常加解密 +DROP FUNCTION IF EXISTS select6; +CREATE or replace FUNCTION select6() RETURNS TABLE ( +name VARCHAR, +credit_card VARCHAR +) LANGUAGE SQL +AS 'SELECT name, credit_card from fuc_creditcard_info;'; +call select6(); +--函数定义的返回表字段类型为INT与加密表字段类型varchar(19)不一致,报错 +DROP FUNCTION IF EXISTS select7; +CREATE or replace FUNCTION select7() RETURNS TABLE ( +name text, +credit_card INT +) LANGUAGE SQL +AS 'SELECT name, credit_card from fuc_creditcard_info;'; DROP FUNCTION select2(); DROP FUNCTION select4(); +DROP FUNCTION select5(); +DROP FUNCTION select6(); +DROP FUNCTION select7(); DROP TABLE accounts; +DROP TABLE fuc_creditcard_info; DROP COLUMN ENCRYPTION KEY ret_cek1; DROP CLIENT MASTER KEY ret_cmk1; \! gs_ktool -d all \ No newline at end of file diff --git a/src/test/regress/sql/ce_functions_return_values.sql b/src/test/regress/sql/ce_functions_return_values.sql index 0f831e11a..39c170e31 100644 --- a/src/test/regress/sql/ce_functions_return_values.sql +++ b/src/test/regress/sql/ce_functions_return_values.sql @@ -55,7 +55,14 @@ end; $BODY$ language plpgsql ; -\df +\df select1 +\df select2 +\df select3 +\df select4 +\df select5 +\df select6 +\df select7 + call select1(); call select2(); call select3(); @@ -63,7 +70,7 @@ call select4(); call select5(); call select6(); call select7(); -call select7(); + CALL f_processed_return_table(); BEGIN; SELECT reffunc('funccursor'); @@ -72,14 +79,14 @@ COMMIT; SELECT * FROM get_rows_setof(); DROP TABLE t_num CASCADE; -\df + DROP FUNCTION select6; DROP FUNCTION select5; DROP FUNCTION select7; DROP FUNCTION reffunc(refcursor); DROP FUNCTION get_rows_setof(); DROP FUNCTION f_processed_return_table(); -\df + SELECT COUNT(*) FROM gs_encrypted_proc; SELECT proname, prorettype, proallargtypes FROM gs_encrypted_proc JOIN pg_proc ON pg_proc.Oid = gs_encrypted_proc.func_id; DROP COLUMN ENCRYPTION KEY ret_cek2; diff --git a/src/test/regress/sql/ce_functions_return_variable.sql b/src/test/regress/sql/ce_functions_return_variable.sql index b599b1eb8..a657cd7b9 100644 --- a/src/test/regress/sql/ce_functions_return_variable.sql +++ b/src/test/regress/sql/ce_functions_return_variable.sql @@ -29,6 +29,27 @@ CALL f_processed_in_plpgsql('Bob',10000); DROP FUNCTION f_processed_in_plpgsql(); DROP TABLE accounts; + +CREATE TABLE creditcard_info1 (id_number int,name text, credit_card varchar(19)); +CREATE TABLE creditcard_info2 (id_number int,name text encrypted with (column_encryption_key = ret_cek3, encryption_type = DETERMINISTIC),credit_card varchar(19) encrypted with (column_encryption_key = ret_cek3, encryption_type = DETERMINISTIC)); +CREATE or replace FUNCTION exec_insert1() RETURNS void AS $$ + insert into creditcard_info1 values(1,2,3); + select credit_card from creditcard_info1; + $$ LANGUAGE SQL; +CREATE or replace FUNCTION exec_insert2() RETURNS void AS $$ + insert into creditcard_info2 values(1,2,3); + select credit_card from creditcard_info2; + $$ LANGUAGE SQL; +CREATE or replace FUNCTION exec_insert1() RETURNS int AS $$ + insert into creditcard_info1 values(1,2,3); + select credit_card from creditcard_info1; + $$ LANGUAGE SQL; +CREATE or replace FUNCTION exec_insert2() RETURNS int AS $$ + insert into creditcard_info2 values(1,2,3); + select credit_card from creditcard_info2; + $$ LANGUAGE SQL; +DROP TABLE creditcard_info1; +DROP TABLE creditcard_info2; DROP COLUMN ENCRYPTION KEY ret_cek3; DROP CLIENT MASTER KEY ret_cmk3; \! gs_ktool -d all diff --git a/src/test/regress/sql/ce_insert_from_select_test.sql b/src/test/regress/sql/ce_insert_from_select_test.sql index 14e824b9d..592098173 100644 --- a/src/test/regress/sql/ce_insert_from_select_test.sql +++ b/src/test/regress/sql/ce_insert_from_select_test.sql @@ -39,6 +39,13 @@ insert into creditcard_info select * from creditcard_info1 ; insert into creditcard_info1 select * from creditcard_info ; insert into creditcard_info1 select * from creditcard_info2 ; +-- error +INSERT INTO creditcard_info1(id_number, name, credit_card) SELECT id_number, name, credit_card FROM creditcard_info2; +INSERT INTO creditcard_info1(id_number, credit_card) SELECT id_number, credit_card FROM creditcard_info2; + +-- succeed +INSERT INTO creditcard_info1(id_number, name) SELECT id_number, name FROM creditcard_info2; + drop table creditcard_info; drop table creditcard_info1; drop table creditcard_info2; @@ -51,4 +58,4 @@ DROP COLUMN ENCRYPTION KEY MyCEK1; DROP CLIENT MASTER KEY MyCMK CASCADE; DROP CLIENT MASTER KEY MyCMK1 CASCADE; -\! gs_ktool -d all +\! gs_ktool -d all \ No newline at end of file diff --git a/src/test/regress/sql/ce_kt_toughness.sql b/src/test/regress/sql/ce_kt_toughness.sql new file mode 100644 index 000000000..384776c38 --- /dev/null +++ b/src/test/regress/sql/ce_kt_toughness.sql @@ -0,0 +1,38 @@ +\! rm -f $GAUSSHOME/etc/gs_ktool.log + +-- 1 primary file : not exist, secondary file : not exist | succeed +\! rm -f $GAUSSHOME/etc/gs_ktool_file/*.dat && ls $GAUSSHOME/etc/gs_ktool_file/ + +\! gs_ktool -g && ls $GAUSSHOME/etc/gs_ktool_file/ + +-- 2 primary file : exist, secondary file : exist | succeed +\! gs_ktool -g && ls $GAUSSHOME/etc/gs_ktool_file/ + +-- 3 primary file : not exist, secondary file : exist | succeed +\! rm -f $GAUSSHOME/etc/gs_ktool_file/primary_ksf.dat && ls $GAUSSHOME/etc/gs_ktool_file/ + +\! gs_ktool -g && ls $GAUSSHOME/etc/gs_ktool_file/ + +-- 4 primary file : exist, secondary file : not exist | succeed +\! rm -f $GAUSSHOME/etc/gs_ktool_file/secondary_ksf.dat && ls $GAUSSHOME/etc/gs_ktool_file/ + +\! gs_ktool -g && ls $GAUSSHOME/etc/gs_ktool_file/ + +-- 5 primary file : tainted, secondary file :normal | succeed +\! echo 'invalid data' > $GAUSSHOME/etc/gs_ktool_file/primary_ksf.dat && cat $GAUSSHOME/etc/gs_ktool_file/primary_ksf.dat + +\! gs_ktool -g && cat $GAUSSHOME/etc/gs_ktool_file/primary_ksf.dat | grep 'invalid data' + +-- 6 primary file : normal, secondary file tainted | succeed +\! echo 'invalid data' > $GAUSSHOME/etc/gs_ktool_file/secondary_ksf.dat && cat $GAUSSHOME/etc/gs_ktool_file/secondary_ksf.dat + +\! gs_ktool -g && cat $GAUSSHOME/etc/gs_ktool_file/secondary_ksf.dat | grep 'invalid data' + +-- 7 primary file : tainted, secondary file tainted | falied +\! echo 'invalid data' > $GAUSSHOME/etc/gs_ktool_file/primary_ksf.dat && cat $GAUSSHOME/etc/gs_ktool_file/primary_ksf.dat +\! echo 'invalid data' > $GAUSSHOME/etc/gs_ktool_file/secondary_ksf.dat && cat $GAUSSHOME/etc/gs_ktool_file/secondary_ksf.dat +\! gs_ktool -g && cat $GAUSSHOME/etc/gs_ktool_file/primary_ksf.dat | grep 'invalid data' +\! gs_ktool -g && cat $GAUSSHOME/etc/gs_ktool_file/secondary_ksf.dat | grep 'invalid data' + +-- clear +\! rm -f $GAUSSHOME/etc/gs_ktool_file/*.dat \ No newline at end of file diff --git a/src/test/regress/sql/ce_orderby.sql b/src/test/regress/sql/ce_orderby.sql index 5f2d8abf1..76f7ecbd0 100644 --- a/src/test/regress/sql/ce_orderby.sql +++ b/src/test/regress/sql/ce_orderby.sql @@ -47,6 +47,6 @@ DROP COLUMN ENCRYPTION KEY public.UnsupportCEK; DROP CLIENT MASTER KEY testns.UnsupportCMK; DROP CLIENT MASTER KEY public.UnsupportCMK; -DROP SCHEMA testns CASCADE; + \! gs_ktool -d all diff --git a/src/test/regress/sql/ce_permission_on_keys_schema.sql b/src/test/regress/sql/ce_permission_on_keys_schema.sql index 0090fcb53..4ab6d2c4a 100644 --- a/src/test/regress/sql/ce_permission_on_keys_schema.sql +++ b/src/test/regress/sql/ce_permission_on_keys_schema.sql @@ -86,8 +86,8 @@ delete from gs_column_keys; RESET SESSION AUTHORIZATION; REVOKE USAGE ON COLUMN_ENCRYPTION_KEY MyCEK1 FROM newuser; REVOKE USAGE ON CLIENT_MASTER_KEY MyCMK1 FROM newuser; -GRANT DROP ON COLUMN_ENCRYPTION_KEY MyCEK1 to newuser; -GRANT DROP ON CLIENT_MASTER_KEY MyCMK1 to newuser; +GRANT DROP ON COLUMN_ENCRYPTION_KEY testns.MyCEK1 to newuser; +GRANT DROP ON CLIENT_MASTER_KEY testns.MyCMK1 to newuser; SELECT has_cmk_privilege('newuser', 'testns.MyCMK1', 'USAGE'); SELECT has_cek_privilege('newuser', 'testns.MyCEK1', 'USAGE'); @@ -118,4 +118,61 @@ DROP SCHEMA IF EXISTS testns CASCADE; DROP SCHEMA IF EXISTS newuser CASCADE; DROP ROLE IF EXISTS newuser; +\! gs_ktool -d all +\! gs_ktool -g + +CREATE USER d_user1 PASSWORD 'gauss@123'; +GRANT ALL ON SCHEMA public TO d_user1; +SET SESSION AUTHORIZATION d_user1 PASSWORD 'gauss@123'; +SET search_path to public; +CREATE CLIENT MASTER KEY MyCMK1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY MyCEK1 WITH VALUES (CLIENT_MASTER_KEY = MyCMK1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +CREATE TABLE acltest1 (x int, x2 varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK1, ENCRYPTION_TYPE = DETERMINISTIC)); +RESET SESSION AUTHORIZATION; +drop ROLE d_user1; +drop USER d_user1; +drop USER d_user1 cascade; +select count(*) from gs_client_global_keys; +select count(*) from gs_column_keys; +\! gs_ktool -d all + +\! gs_ktool -g +CREATE USER sysadmin1 with sysadmin PASSWORD "Gauss_234"; +CREATE USER rsr3 with PASSWORD "Gauss_234"; +CREATE USER user1 with PASSWORD "Gauss_234"; +RESET search_path; +set role user1 password "Gauss_234"; +CREATE CLIENT MASTER KEY MyCMK1 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY MyCEK1 WITH VALUES (CLIENT_MASTER_KEY = MyCMK1, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); +drop table if exists test_01; +CREATE TABLE test_01 +( + id_number INTEGER NOT NULL, + name VARCHAR(20) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK1, ENCRYPTION_TYPE = DETERMINISTIC) NOT NULL +) WITH (ORIENTATION = COLUMN, COMPRESSION=HIGH); +insert into test_01 values(1,123); + +set role sysadmin1 password "Gauss_234"; +insert into user1.test_01 values(1,123); + +set role user1 password "Gauss_234"; +insert into test_01 values(1,123); +select * from test_01; + +set role sysadmin1 password "Gauss_234"; +grant usage on schema user1 to rsr3; +grant insert on user1.test_01 to rsr3; + +set role rsr3 password "Gauss_234"; +select current_user; +insert into user1.test_01 values(1,123); + +reset role; +drop table if exists user1.test_01 cascade; +drop COLUMN ENCRYPTION KEY user1.MyCEK1 cascade; +drop CLIENT MASTER KEY user1.MyCMK1 cascade; + +drop USER rsr3 cascade; +drop USER user1 cascade; +drop USER sysadmin1 cascade; \! gs_ktool -d all \ No newline at end of file diff --git a/src/test/regress/sql/ce_privileges_dba.sql b/src/test/regress/sql/ce_privileges_dba.sql index 5eb7b5d61..719355c2b 100644 --- a/src/test/regress/sql/ce_privileges_dba.sql +++ b/src/test/regress/sql/ce_privileges_dba.sql @@ -20,6 +20,27 @@ SELECT has_cek_privilege(session_user, 'MyCEK', 'USAGE'); DROP TABLE IF EXISTS acltest1; CREATE TABLE acltest1 (x int, x2 varchar(50) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC)); DROP TABLE acltest1; + +CREATE USER user_check PASSWORD '1234567i*'; +SELECT has_cmk_privilege('user_check','mycmk','USAGE'); +SELECT has_cmk_privilege('mycmk','USAGE'); +SELECT has_cmk_privilege('user_check',(select oid from gs_client_global_keys where global_key_name='mycmk'),'USAGE'); +SELECT has_cmk_privilege((select oid from gs_client_global_keys where global_key_name='mycmk'),'USAGE'); +SELECT has_cmk_privilege((select oid from pg_authid where rolname='user_check'),'mycmk','USAGE'); +SELECT has_cmk_privilege((select oid from pg_authid where rolname='user_check'),(select oid from gs_client_global_keys where global_key_name='mycmk'),'USAGE'); + +SELECT has_cek_privilege('user_check','mycek','USAGE'); +SELECT has_cek_privilege('mycek','USAGE'); +SELECT has_cek_privilege('user_check',(select oid from gs_column_keys where column_key_name='mycek'),'USAGE'); +SELECT has_cek_privilege((select oid from gs_column_keys where column_key_name='mycek'),'USAGE'); +SELECT has_cek_privilege((select oid from pg_authid where rolname='user_check'),'mycek','USAGE'); +SELECT has_cek_privilege((select oid from pg_authid where rolname='user_check'),(select oid from gs_column_keys where column_key_name='mycek'),'USAGE'); + +GRANT ALL ON CLIENT_MASTER_KEY mycmk to user_check; +GRANT ALL ON COLUMN_ENCRYPTION_KEY mycek to user_check; +DROP OWNED BY user_check CASCADE; +DROP USER user_check; + DROP CLIENT MASTER KEY IF EXISTS MyCMK CASCADE; \! gs_ktool -d all \ No newline at end of file diff --git a/src/test/regress/sql/ce_proc_test.sql b/src/test/regress/sql/ce_proc_test.sql new file mode 100644 index 000000000..1d9998d10 --- /dev/null +++ b/src/test/regress/sql/ce_proc_test.sql @@ -0,0 +1,137 @@ +\! gs_ktool -d all +\! gs_ktool -g + +DROP CLIENT MASTER KEY IF EXISTS proc_cmk2 CASCADE; +CREATE CLIENT MASTER KEY proc_cmk2 WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +CREATE COLUMN ENCRYPTION KEY proc_cek2 WITH VALUES (CLIENT_MASTER_KEY = proc_cmk2, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); + +-- function test1 ,we need to support the operator of insert, select, update,delete CLIENT_LOGIC data in function,and create CLIENT_LOGIC table, create cmk/cek(which will not frush the cache in function now) +create or replace function fun_001() returns void as $$ +declare +begin + create table schema_tbl_001(a int, b int CLIENT_LOGIC WITH (COLUMN_SETTING = ImgCEK)) ; + insert into schema_tbl_001 values(1,1); +end; +$$ LANGUAGE plpgsql; +call fun_001(); +select * from schema_tbl_001; +\d schema_tbl_001 + + +--function test2 +CREATE TABLE sbtest1( + a int, + b INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT '0' NOT NULL, + c CHAR(120) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT '' NOT NULL, + d CHAR(60) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT '' NOT NULL); + +create function select_data() returns table(a int, b INTEGER, c CHAR(120), d CHAR(60)) +as +$BODY$ +begin +return query(select * from sbtest1); +end; +$BODY$ +LANGUAGE plpgsql; +call select_data(); +--function test3 +--normal table +CREATE TABLE basket_a ( + id INT PRIMARY KEY, + fruit VARCHAR (100) NOT NULL, + age INT NOT NULL +); + +CREATE TABLE basket_aa( + id INT, + fruit VARCHAR (100) NOT NULL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC), + age INT NOT NULL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC) +); + +CREATE FUNCTION MyInsert1(_id integer, _fruit varchar, _age integer) + RETURNS void AS + $BODY$ + BEGIN + INSERT INTO basket_a(id,fruit,age) + VALUES(_id, _fruit, _age); + END; + $BODY$ + LANGUAGE 'plpgsql' VOLATILE + COST 100; + +CREATE FUNCTION MyInsert2(_id integer, _fruit varchar, _age integer) + RETURNS void AS + $BODY$ + BEGIN + INSERT INTO basket_aa(id,fruit,age) + VALUES(_id, _fruit, _age); + END; + $BODY$ + LANGUAGE 'plpgsql' VOLATILE + COST 100; + +select * from MyInsert1(1,'apple',1 ); +select * from basket_a; +select * from MyInsert2(1,'apple',1 ); +select * from basket_a; + + +-- procedure test1 +CREATE TABLE sbtest2( + id int, + k INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT '0' NOT NULL, + c CHAR(120) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT '' NOT NULL, + pad CHAR(60) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT '' NOT NULL); + +insert into sbtest2 values(1,1,1,1); + +CREATE OR REPLACE PROCEDURE select2 +( + id IN int, + k OUT int, + c OUT int +) +AS +BEGIN + EXECUTE IMMEDIATE 'select k, c from sbtest2 where id = 1' + INTO k, c + USING IN id; +END; +/ +call select2(1,a,b); + + +-- procedure test2 +create table staffs(staff_id int, +first_name varchar(20) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC), +salary numeric ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = proc_cek2, ENCRYPTION_TYPE = DETERMINISTIC)); + +insert into staffs values(1,'Alice',12.23); + +CREATE OR REPLACE PROCEDURE dynamic_proc +AS +DECLARE + staff_id int; + first_name varchar(20); + salary numeric; +BEGIN + EXECUTE IMMEDIATE 'begin select first_name, salary into :first_name, :salary from staffs where staff_id= 1; end;' + USING OUT first_name, OUT salary, IN staff_id; + dbe_output.print_line(first_name|| ' ' || salary); +-- I think dbe_output.print_line is not needed to support, because server user function is not need to support, but delete dbe_output.print_line doesn't affect other operator +END; +/ +DROP TABLE schema_tbl_001 CASCADE; +DROP TABLE basket_a CASCADE; +DROP TABLE basket_aa CASCADE; +DROP TABLE sbtest1 CASCADE; +DROP TABLE sbtest2 CASCADE; +DROP TABLE staffs CASCADE; +DROP FUNCTION dynamic_proc; +DROP FUNCTION fun_001; +DROP FUNCTION myinsert1; +DROP FUNCTION myinsert2; +DROP FUNCTION select2; +DROP FUNCTION select_data; +DROP CLIENT MASTER KEY proc_cmk2 CASCADE; +\! gs_ktool -d all \ No newline at end of file diff --git a/src/test/regress/sql/ce_procedure.sql b/src/test/regress/sql/ce_procedure.sql index c24c9f33f..8da12d6c5 100644 --- a/src/test/regress/sql/ce_procedure.sql +++ b/src/test/regress/sql/ce_procedure.sql @@ -2,7 +2,6 @@ -- unsupport procedure -- ------------------------- ---set enable_stream_operator = on; ---------------------forall------------------------ SET CHECK_FUNCTION_BODIES TO ON; @@ -55,10 +54,9 @@ CREATE OR REPLACE PROCEDURE INSERT_IMAGE ) AS BEGIN - INSERT INTO Image VALUES ( id_param, title_param, artist_param, description_param, dataTime_param, xresolution_param, yresolution_param, resolution_unit_param, imageSize_param, alititude_param, latitude_param, longitude_param, imagePath_param); + INSERT INTO Image VALUES ( id_param, artist_param, artist_param, description_param, dataTime_param, xresolution_param, yresolution_param, resolution_unit_param, imageSize_param, alititude_param, latitude_param, longitude_param, imagePath_param); END; / - CALL INSERT_IMAGE(8, 'img4214196','ZAVIER', 'a river', '2019-11-22 12:45:26', 720, 720, 'px', 1244, 510, 29.75, 105.79, '/DCIM/Camera/img4214196'); CREATE OR REPLACE PROCEDURE UPDATE_DESCRIPTION(title_param IN VARCHAR(30), description_param IN TEXT, result OUT VARCHAR(30)) diff --git a/src/test/regress/sql/ce_rlspolicy.sql b/src/test/regress/sql/ce_rlspolicy.sql index b5603523d..980e79dd2 100644 --- a/src/test/regress/sql/ce_rlspolicy.sql +++ b/src/test/regress/sql/ce_rlspolicy.sql @@ -11,6 +11,8 @@ INSERT INTO all_data VALUES(1, 'alice', 'alice data'); INSERT INTO all_data VALUES(2, 'bob', 'bob data'); INSERT INTO all_data VALUES(3, 'peter', 'peter data'); GRANT SELECT ON all_data TO alice, bob; +GRANT USAGE ON COLUMN_ENCRYPTION_KEY rlspolicy_cek to alice, bob; +GRANT USAGE ON CLIENT_MASTER_KEY rlspolicy_cmk to alice, bob; ALTER TABLE all_data ENABLE ROW LEVEL SECURITY; CREATE ROW LEVEL SECURITY POLICY all_data_rls ON all_data USING(role = 'alice'); SET ROLE alice PASSWORD 'Gauss@123'; @@ -22,10 +24,10 @@ SELECT * FROM all_data; RESET ROLE; DROP ROW LEVEL SECURITY POLICY all_data_rls ON all_data; DROP TABLE all_data; -DROP ROLE alice; -DROP ROLE bob; DROP COLUMN ENCRYPTION KEY rlspolicy_cek; DROP CLIENT MASTER KEY rlspolicy_cmk; +DROP ROLE alice; +DROP ROLE bob; CREATE USER rlspolicy_user1 PASSWORD 'gauss@123'; CREATE USER rlspolicy_user2 PASSWORD 'gauss@123'; diff --git a/src/test/regress/sql/ce_select.sql b/src/test/regress/sql/ce_select.sql index f2a3a9e73..d8ed8d9e5 100644 --- a/src/test/regress/sql/ce_select.sql +++ b/src/test/regress/sql/ce_select.sql @@ -50,6 +50,8 @@ INSERT INTO creditcard_info3 VALUES (3, 'xiaoli','xiaoli', 621187780); explain INSERT INTO creditcard_info3 VALUES (3, 'xiaoli','xiaoli', 621187780); --支持 +select * from creditcard_info2 where regression.public.creditcard_info2.name1 = (select name1 from creditcard_info3 order by id_number limit 1); +select * from creditcard_info2 where public.creditcard_info2.name1 = (select name1 from creditcard_info3 order by id_number limit 1); select * from creditcard_info2 where name1 = (select name1 from creditcard_info3 order by id_number limit 1); select * from (select * from creditcard_info3) where credit_card = 62176500; select name2 from (select * from creditcard_info3) group by name1 ,name2 having name1 = 'joe'; diff --git a/src/test/regress/sql/ce_select_where_encrypt_test.sql b/src/test/regress/sql/ce_select_where_encrypt_test.sql index 12e143d81..964525ae2 100644 --- a/src/test/regress/sql/ce_select_where_encrypt_test.sql +++ b/src/test/regress/sql/ce_select_where_encrypt_test.sql @@ -5,64 +5,64 @@ DROP CLIENT MASTER KEY IF EXISTS MyCMK CASCADE; CREATE CLIENT MASTER KEY MyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY MyCEK WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); -CREATE TABLE customer_ce ( - customer_ce_id integer NOT NULL, +CREATE TABLE customer ( + customer_id integer NOT NULL, id integer ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), first_name character varying(45) NOT NULL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), last_name character varying(45) NOT NULL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC), alias_name character (50) NOT NULL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = MyCEK, ENCRYPTION_TYPE = DETERMINISTIC) DEFAULT 'ali' ); -INSERT INTO customer_ce VALUES(1,1,'Jared','Ely'); -INSERT INTO customer_ce VALUES(1,2,'Mary','Smith'); -INSERT INTO customer_ce VALUES(1,3,'Patricia','Johnson'); -INSERT INTO customer_ce VALUES(1,4,'Linda','Williams'); -INSERT INTO customer_ce VALUES(1,5,'Barbara','Jones'); -INSERT INTO customer_ce VALUES(1,6,'Elizabeth','Brown'); -INSERT INTO customer_ce VALUES(1,7,'Jennifer','Davis'); -INSERT INTO customer_ce VALUES(1,8,'Maria','Miller'); -INSERT INTO customer_ce VALUES(1,9,'Susan','Wilson'); -INSERT INTO customer_ce VALUES(1,10,'Margaret','Moore'); -INSERT INTO customer_ce VALUES(1,11,'Dorothy','Taylor'); -INSERT INTO customer_ce VALUES(1,12,'Lisa','Anderson'); -INSERT INTO customer_ce VALUES(1,13,'Nancy','Thomas'); -INSERT INTO customer_ce VALUES(1,14,'Karen','Jackson'); -INSERT INTO customer_ce VALUES(1,15,'Betty','White'); -INSERT INTO customer_ce VALUES(1,16,'Helen','Harris'); -INSERT INTO customer_ce VALUES(1,17,'Sandra','Martin'); -INSERT INTO customer_ce VALUES(1,18,'Adam','Rodriguez'); -INSERT INTO customer_ce VALUES(1,19,'Carol','Garcia'); -INSERT INTO customer_ce VALUES(1,20,'Jamie','Rice'); -INSERT INTO customer_ce VALUES(1,21,'Annette','Olson'); -INSERT INTO customer_ce VALUES(1,22,'Annie','Russell'); +INSERT INTO customer VALUES(1,1,'Jared','Ely'); +INSERT INTO customer VALUES(1,2,'Mary','Smith'); +INSERT INTO customer VALUES(1,3,'Patricia','Johnson'); +INSERT INTO customer VALUES(1,4,'Linda','Williams'); +INSERT INTO customer VALUES(1,5,'Barbara','Jones'); +INSERT INTO customer VALUES(1,6,'Elizabeth','Brown'); +INSERT INTO customer VALUES(1,7,'Jennifer','Davis'); +INSERT INTO customer VALUES(1,8,'Maria','Miller'); +INSERT INTO customer VALUES(1,9,'Susan','Wilson'); +INSERT INTO customer VALUES(1,10,'Margaret','Moore'); +INSERT INTO customer VALUES(1,11,'Dorothy','Taylor'); +INSERT INTO customer VALUES(1,12,'Lisa','Anderson'); +INSERT INTO customer VALUES(1,13,'Nancy','Thomas'); +INSERT INTO customer VALUES(1,14,'Karen','Jackson'); +INSERT INTO customer VALUES(1,15,'Betty','White'); +INSERT INTO customer VALUES(1,16,'Helen','Harris'); +INSERT INTO customer VALUES(1,17,'Sandra','Martin'); +INSERT INTO customer VALUES(1,18,'Adam','Rodriguez'); +INSERT INTO customer VALUES(1,19,'Carol','Garcia'); +INSERT INTO customer VALUES(1,20,'Jamie','Rice'); +INSERT INTO customer VALUES(1,21,'Annette','Olson'); +INSERT INTO customer VALUES(1,22,'Annie','Russell'); -select * from customer_ce where customer_ce_id = 1 AND id = 1; -select * from customer_ce where customer_ce_id = 2 OR first_name = 'Jamie'; -select * from customer_ce where id = 1; -select * from customer_ce where id <> 1; +select * from customer where customer_id = 1 AND id = 1; +select * from customer where customer_id = 2 OR first_name = 'Jamie'; +select * from customer where id = 1; +select * from customer where id <> 1; -SELECT last_name, first_name FROM customer_ce WHERE first_name = 'Jamie'; -SELECT last_name, first_name FROM customer_ce WHERE first_name <> 'Jamie'; -SELECT last_name, first_name FROM customer_ce WHERE first_name = 'Jamie' AND last_name = 'Rice'; -SELECT first_name, last_name FROM customer_ce WHERE last_name = 'Rodriguez' OR first_name = 'Adam'; +SELECT last_name, first_name FROM customer WHERE first_name = 'Jamie'; +SELECT last_name, first_name FROM customer WHERE first_name <> 'Jamie'; +SELECT last_name, first_name FROM customer WHERE first_name = 'Jamie' AND last_name = 'Rice'; +SELECT first_name, last_name FROM customer WHERE last_name = 'Rodriguez' OR first_name = 'Adam'; SELECT first_name, last_name FROM - customer_ce + customer WHERE first_name IN ('Ann','Anne','Annie'); SELECT first_name, last_name FROM - customer_ce + customer WHERE first_name LIKE 'Ann%'; SELECT first_name, LENGTH(first_name) name_length FROM - customer_ce + customer WHERE first_name LIKE 'A%' AND LENGTH(first_name) BETWEEN 3 AND 5 @@ -72,7 +72,7 @@ SELECT first_name, last_name FROM - customer_ce + customer WHERE first_name LIKE 'Bra%' AND last_name <> 'Motley'; @@ -80,13 +80,13 @@ SELECT first_name, last_name FROM - customer_ce + customer WHERE - customer_ce.first_name = 'Jamie'; -SELECT * from customer_ce where id > 1; -SELECT * from customer_ce where id < 1; -SELECT * from customer_ce where id != 1; -DROP TABLE customer_ce; + customer.first_name = 'Jamie'; +SELECT * from customer where id > 1; +SELECT * from customer where id < 1; +SELECT * from customer where id != 1; +DROP TABLE customer; DROP COLUMN ENCRYPTION KEY MyCEK; DROP CLIENT MASTER KEY MyCMK; diff --git a/src/test/regress/sql/ce_table_type.sql b/src/test/regress/sql/ce_table_type.sql index 941cf79f5..19096671d 100644 --- a/src/test/regress/sql/ce_table_type.sql +++ b/src/test/regress/sql/ce_table_type.sql @@ -165,10 +165,25 @@ CREATE TABLE t_distributeby2( CREATE TABLE t_distributeby3( id_number int, name text, - data text) distribute by list(name)(slice s1 values (('China'),('Germary')),slice s2 values (('Japan')), slice s3 values (('USA')), slice s4 values (default)); + data text) distribute by list(name)(slice s1 values (('China')),slice s2 values (('Japan')), slice s3 values (('USA')), slice s4 values ('Germary'), + slice s5 values ('Israel'), slice s6 values ('India'), slice s7 values ('Peru'), slice s8 values ('Thailand'), + slice s9 values ('South Africa'), slice s10 values ('New Zealand'), slice s11 values ('Nepal'), slice s12 values (default)); +CREATE TABLE t_distributeby4( + id_number int, + name text, + data text encrypted with(column_encryption_key = distributeby_cek,encryption_type = DETERMINISTIC)) +distribute by list(name)(slice s1 values (('China')),slice s2 values (('Japan')), slice s3 values (('USA')), slice s4 values ('Germary'), + slice s5 values ('Israel'), slice s6 values ('India'), slice s7 values ('Peru'), slice s8 values ('Thailand'), + slice s9 values ('South Africa'), slice s10 values ('New Zealand'), slice s11 values ('Nepal'), slice s12 values (default)); +create table ce_t1 (id BYTEAWITHOUTORDERWITHEQUALCOL); +create table ce_t2 (id BYTEAWITHOUTORDERCOL); + +DROP table IF EXISTS ce_t1; +DROP table IF EXISTS ce_t2; DROP table IF EXISTS t_distributeby1; DROP table IF EXISTS t_distributeby2; DROP table IF EXISTS t_distributeby3; +DROP table IF EXISTS t_distributeby4; DROP COLUMN ENCRYPTION KEY IF EXISTS distributeby_cek; DROP CLIENT MASTER KEY IF EXISTS distributeby_cmk; \! gs_ktool -d all \ No newline at end of file diff --git a/src/test/regress/sql/ce_trigger.sql b/src/test/regress/sql/ce_trigger.sql index 27a53772d..4a514e472 100644 --- a/src/test/regress/sql/ce_trigger.sql +++ b/src/test/regress/sql/ce_trigger.sql @@ -144,6 +144,9 @@ DROP TRIGGER update_trigger ON test_trigger_src_tbl; DROP TRIGGER delete_trigger ON test_trigger_src_tbl; DROP TABLE IF EXISTS test_trigger_src_tbl; DROP TABLE IF EXISTS test_trigger_des_tbl; +DROP FUNCTION tri_delete_func; +DROP FUNCTION tri_insert_func; +DROP FUNCTION tri_update_func; DROP COLUMN ENCRYPTION KEY triggerCEK1; DROP COLUMN ENCRYPTION KEY triggerCEK2; diff --git a/src/test/regress/sql/ce_type_char.sql b/src/test/regress/sql/ce_type_char.sql index 1ba4704b5..fa11d4bb0 100644 --- a/src/test/regress/sql/ce_type_char.sql +++ b/src/test/regress/sql/ce_type_char.sql @@ -52,8 +52,8 @@ UPDATE char_type SET c4 = 'dd' where c4 = 'cc'; SELECT * from char_type order by c1; -- test empty data -insert into char_type values(12, '', '','','', '','', '', '', '',''); -insert into char_type values(13, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); +insert into char_type values(8, '', '','','', '','', '', '', '',''); +insert into char_type values(9, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); SELECT * from char_type order by c1; create table IF NOT EXISTS char_type_enc1( diff --git a/src/test/regress/sql/ce_type_float.sql b/src/test/regress/sql/ce_type_float.sql index 9c6a647c3..55f8d02c4 100644 --- a/src/test/regress/sql/ce_type_float.sql +++ b/src/test/regress/sql/ce_type_float.sql @@ -6,7 +6,7 @@ CREATE CLIENT MASTER KEY floatCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_k CREATE COLUMN ENCRYPTION KEY floatCEK WITH VALUES (CLIENT_MASTER_KEY = floatCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); CREATE TABLE IF NOT EXISTS float_type_t1(id INT, fl_col1 float4 ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC), -fl_col2 float8 ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; +fl_col2 float8 ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); INSERT INTO float_type_t1 (id, fl_col1, fl_col2) VALUES (1, 5.555555, 5.555555567876534); INSERT INTO float_type_t1 (id, fl_col1, fl_col2) VALUES (2, -5.5555556, -5.5555555678765342); @@ -37,9 +37,9 @@ SELECT * from float_type_t1 order by id; DROP TABLE float_type_t1; -CREATE TABLE IF NOT EXISTS t_float_1(id INT, num float(1) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; -CREATE TABLE IF NOT EXISTS t_float_53(id INT, num float(53) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; -CREATE TABLE IF NOT EXISTS t_float_3(id INT, num float(3) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; +CREATE TABLE IF NOT EXISTS t_float_1(id INT, num float(1) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); +CREATE TABLE IF NOT EXISTS t_float_53(id INT, num float(53) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); +CREATE TABLE IF NOT EXISTS t_float_3(id INT, num float(3) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); CREATE TABLE IF NOT EXISTS t_float_4(id INT, num1 NUMERIC(10,3) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC), num2 decimal(10,3) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC), @@ -47,8 +47,8 @@ num3 NUMBER(10,3) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_T num4 INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC) ); --core dump ---CREATE TABLE IF NOT EXISTS t_float_54(id INT, num float(54) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; ---CREATE TABLE IF NOT EXISTS t_float_0(id INT, num float(0) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; +--CREATE TABLE IF NOT EXISTS t_float_54(id INT, num float(54) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); +--CREATE TABLE IF NOT EXISTS t_float_0(id INT, num float(0) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); INSERT INTO t_float_3 (id, num) VALUES (1, 123.333); INSERT INTO t_float_3 (id, num) VALUES (2, 123.4445); @@ -63,10 +63,10 @@ DROP TABLE IF EXISTS t_float_4; DROP TABLE IF EXISTS t_float_1; DROP TABLE IF EXISTS t_float_53; -CREATE TABLE IF NOT EXISTS float_type_t2_test1(id INT, num DECIMAL(5,5) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; -CREATE TABLE IF NOT EXISTS float_type_t2_test2(id INT, num DECIMAL(5,0) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; -CREATE TABLE IF NOT EXISTS float_type_t2_test3(id INT, num DECIMAL(5,6) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; -CREATE TABLE IF NOT EXISTS float_type_t2_test4(id INT, num DECIMAL(5,-1) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)) ; +CREATE TABLE IF NOT EXISTS float_type_t2_test1(id INT, num DECIMAL(5,5) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); +CREATE TABLE IF NOT EXISTS float_type_t2_test2(id INT, num DECIMAL(5,0) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); +CREATE TABLE IF NOT EXISTS float_type_t2_test3(id INT, num DECIMAL(5,6) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); +CREATE TABLE IF NOT EXISTS float_type_t2_test4(id INT, num DECIMAL(5,-1) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC)); CREATE TABLE IF NOT EXISTS float_type_t2(id INT, d1 DECIMAL(5,3) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC), @@ -75,7 +75,7 @@ d3 DECIMAL ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = D n1 NUMERIC ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC), n2 NUMERIC (5) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC), n3 NUMERIC (5,2) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = floatCEK, ENCRYPTION_TYPE = DETERMINISTIC) -) ; +); INSERT INTO float_type_t2 VALUES (1, 0, 0, 0, 0, 0, 0); diff --git a/src/test/regress/sql/ce_type_int.sql b/src/test/regress/sql/ce_type_int.sql index a406bd8e8..163fa0d75 100644 --- a/src/test/regress/sql/ce_type_int.sql +++ b/src/test/regress/sql/ce_type_int.sql @@ -10,7 +10,7 @@ int_col2 smallint ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = intCEK, ENCRYPTION_TYP int_col3 INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = intCEK, ENCRYPTION_TYPE = DETERMINISTIC), int_col4 BINARY_INTEGER ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = intCEK, ENCRYPTION_TYPE = DETERMINISTIC), int_col5 BIGINT ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = intCEK, ENCRYPTION_TYPE = DETERMINISTIC) -) ; +); diff --git a/src/test/regress/sql/ce_type_money.sql b/src/test/regress/sql/ce_type_money.sql index b7232f6f0..7e5c6d09f 100644 --- a/src/test/regress/sql/ce_type_money.sql +++ b/src/test/regress/sql/ce_type_money.sql @@ -5,6 +5,35 @@ DROP CLIENT MASTER KEY IF EXISTS moneyCMK CASCADE; CREATE CLIENT MASTER KEY moneyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY moneyCEK WITH VALUES (CLIENT_MASTER_KEY = moneyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); + +-- create table IF NOT EXISTS money_type(c1 int, +-- c2 money +-- ); + +-- -- -92233720368547758.08 to +92233720368547758.07 +-- insert into money_type values(1, 92233720368547758.07); +-- insert into money_type values(2, -92233720368547758.08); +-- insert into money_type values(3, 0); +-- insert into money_type values(4, 12.3456); +-- insert into money_type values(5, -12.3456); +-- insert into money_type values(6, 92233720368547758.08); +-- insert into money_type values(7, -92233720368547758.09); + +-- --when insert the encrypted money type,it will lost its form of momey and scope +-- select * from money_type ORDER BY c1; + +-- select * from money_type where c2 = '$12.3456'; + +-- DELETE FROM money_type where c2 = '$12.3456'; +-- select * from money_type ORDER BY c1; + +-- DELETE FROM money_type as alias_test where alias_test.c2 = '$-12.3456'; +-- select * from money_type ORDER BY c1; + +-- UPDATE money_type SET c2 = 23.2 where c2 = '$0'; +-- select * from money_type ORDER BY c1; + + create table IF NOT EXISTS money_type_enc(c1 int, c2 money ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = moneyCEK, ENCRYPTION_TYPE = DETERMINISTIC) ); @@ -14,4 +43,4 @@ DROP TABLE IF EXISTS money_type_enc; DROP COLUMN ENCRYPTION KEY moneyCEK; DROP CLIENT MASTER KEY moneyCMK; -\! gs_ktool -d all +\! gs_ktool -d all \ No newline at end of file diff --git a/src/test/regress/sql/ce_verify_column_alter.sql b/src/test/regress/sql/ce_verify_column_alter.sql index 2a968ac11..5d238dc64 100644 --- a/src/test/regress/sql/ce_verify_column_alter.sql +++ b/src/test/regress/sql/ce_verify_column_alter.sql @@ -13,6 +13,7 @@ INSERT INTO t_varchar (id, name) VALUES (1, 'MyName'); SELECT * from t_varchar; ALTER table t_varchar RENAME COLUMN name TO newname; SELECT c.relname, g.column_name from gs_encrypted_columns g join pg_class c on (g.rel_id=c.oid); +SELECT * FROM t_varchar where newname = 'MyName'; --verify tablename alter @@ -21,6 +22,7 @@ SELECT * from t_varchar; SELECT relname from pg_class join gs_encrypted_columns on pg_class.oid = gs_encrypted_columns.rel_id; ALTER table t_varchar RENAME TO newtable; SELECT * FROM newtable; +SELECT * FROM newtable where newname = 'MyName'; SELECT relname from pg_class join gs_encrypted_columns on pg_class.oid = gs_encrypted_columns.rel_id; DROP TABLE newtable; diff --git a/src/test/regress/sql/ce_verify_schema_alter.sql b/src/test/regress/sql/ce_verify_schema_alter.sql index 8ab23b5f9..2ec04fe6d 100644 --- a/src/test/regress/sql/ce_verify_schema_alter.sql +++ b/src/test/regress/sql/ce_verify_schema_alter.sql @@ -12,6 +12,7 @@ SELECT * from test.t_varchar; ALTER SCHEMA test RENAME TO test1; SELECT c.relname, g.column_name from gs_encrypted_columns g join pg_class c on (g.rel_id=c.oid); SELECT * FROM test1.t_varchar; +SELECT * FROM test1.t_varchar WHERE name = 'MyName'; DROP TABLE test1.t_varchar; DROP SCHEMA test1; DROP COLUMN ENCRYPTION KEY MyCEK; diff --git a/src/test/regress/sql/ce_view.sql b/src/test/regress/sql/ce_view.sql index 38c07dcd5..f05bdec2d 100644 --- a/src/test/regress/sql/ce_view.sql +++ b/src/test/regress/sql/ce_view.sql @@ -4,9 +4,9 @@ DROP CLIENT MASTER KEY IF EXISTS MyCMK CASCADE; CREATE CLIENT MASTER KEY MyCMK WITH ( KEY_STORE = gs_ktool , KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); CREATE COLUMN ENCRYPTION KEY MyCEK WITH VALUES (CLIENT_MASTER_KEY = MyCMK, ALGORITHM = AEAD_AES_256_CBC_HMAC_SHA256); -DROP TABLE IF EXISTS public.client_customer CASCADE; -CREATE TABLE public.client_customer ( - client_customer_id integer, +DROP TABLE IF EXISTS public.customer CASCADE; +CREATE TABLE public.customer ( + customer_id integer, store_id integer NOT NULL, first_name character varying(45) NOT NULL, last_name character varying(45) NOT NULL, @@ -41,27 +41,605 @@ CREATE TABLE public.country ( country character varying(50) NOT NULL, last_update timestamp without time zone DEFAULT now() NOT NULL ); -INSERT INTO client_customer VALUES (1, 1, 'Mary', 'Smith', 'mary.smith@sakilaclient_customer.org', 5, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (2, 1, 'Patricia', 'Johnson', 'patricia.johnson@sakilaclient_customer.org', 6, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (3, 1, 'Linda', 'Williams', 'linda.williams@sakilaclient_customer.org', 7, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (4, 2, 'Barbara', 'Jones', 'barbara.jones@sakilaclient_customer.org', 8, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (5, 1, 'Elizabeth', 'Brown', 'elizabeth.brown@sakilaclient_customer.org', 9, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (6, 2, 'Jennifer', 'Davis', 'jennifer.davis@sakilaclient_customer.org', 10, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (7, 1, 'Maria', 'Miller', 'maria.miller@sakilaclient_customer.org', 11, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (8, 2, 'Susan', 'Wilson', 'susan.wilson@sakilaclient_customer.org', 12, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (9, 2, 'Margaret', 'Moore', 'margaret.moore@sakilaclient_customer.org', 13, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (10, 1, 'Dorothy', 'Taylor', 'dorothy.taylor@sakilaclient_customer.org', 14, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (11, 2, 'Lisa', 'Anderson', 'lisa.anderson@sakilaclient_customer.org', 15, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (12, 1, 'Nancy', 'Thomas', 'nancy.thomas@sakilaclient_customer.org', 16, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (13, 2, 'Karen', 'Jackson', 'karen.jackson@sakilaclient_customer.org', 17, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (14, 2, 'Betty', 'White', 'betty.white@sakilaclient_customer.org', 18, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (15, 1, 'Helen', 'Harris', 'helen.harris@sakilaclient_customer.org', 19, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (16, 2, 'Sandra', 'Martin', 'sandra.martin@sakilaclient_customer.org', 20, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); -INSERT INTO client_customer VALUES (17, 1, 'Donna', 'Thompson', 'donna.thompson@sakilaclient_customer.org', 21, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (18, 2, 'Carol', 'Garcia', 'carol.garcia@sakilaclient_customer.org', 22, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (19, 1, 'Gina', 'Williamson', 'gina.williamson@sakilaclient_customer.org', 217, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); -INSERT INTO client_customer VALUES (20, 1, 'Derrick', 'Bourque', 'derrick.bourque@sakilaclient_customer.org', 481, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); - +INSERT INTO customer VALUES (524, 1, 'Jared', 'Ely', 'jared.ely@sakilacustomer.org', 530, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (1, 1, 'Mary', 'Smith', 'mary.smith@sakilacustomer.org', 5, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (2, 1, 'Patricia', 'Johnson', 'patricia.johnson@sakilacustomer.org', 6, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (3, 1, 'Linda', 'Williams', 'linda.williams@sakilacustomer.org', 7, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (4, 2, 'Barbara', 'Jones', 'barbara.jones@sakilacustomer.org', 8, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (5, 1, 'Elizabeth', 'Brown', 'elizabeth.brown@sakilacustomer.org', 9, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (6, 2, 'Jennifer', 'Davis', 'jennifer.davis@sakilacustomer.org', 10, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (7, 1, 'Maria', 'Miller', 'maria.miller@sakilacustomer.org', 11, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (8, 2, 'Susan', 'Wilson', 'susan.wilson@sakilacustomer.org', 12, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (9, 2, 'Margaret', 'Moore', 'margaret.moore@sakilacustomer.org', 13, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (10, 1, 'Dorothy', 'Taylor', 'dorothy.taylor@sakilacustomer.org', 14, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (11, 2, 'Lisa', 'Anderson', 'lisa.anderson@sakilacustomer.org', 15, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (12, 1, 'Nancy', 'Thomas', 'nancy.thomas@sakilacustomer.org', 16, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (13, 2, 'Karen', 'Jackson', 'karen.jackson@sakilacustomer.org', 17, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (14, 2, 'Betty', 'White', 'betty.white@sakilacustomer.org', 18, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (15, 1, 'Helen', 'Harris', 'helen.harris@sakilacustomer.org', 19, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (16, 2, 'Sandra', 'Martin', 'sandra.martin@sakilacustomer.org', 20, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (17, 1, 'Donna', 'Thompson', 'donna.thompson@sakilacustomer.org', 21, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (18, 2, 'Carol', 'Garcia', 'carol.garcia@sakilacustomer.org', 22, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (19, 1, 'Ruth', 'Martinez', 'ruth.martinez@sakilacustomer.org', 23, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (20, 2, 'Sharon', 'Robinson', 'sharon.robinson@sakilacustomer.org', 24, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (21, 1, 'Michelle', 'Clark', 'michelle.clark@sakilacustomer.org', 25, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (22, 1, 'Laura', 'Rodriguez', 'laura.rodriguez@sakilacustomer.org', 26, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (23, 2, 'Sarah', 'Lewis', 'sarah.lewis@sakilacustomer.org', 27, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (24, 2, 'Kimberly', 'Lee', 'kimberly.lee@sakilacustomer.org', 28, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (25, 1, 'Deborah', 'Walker', 'deborah.walker@sakilacustomer.org', 29, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (26, 2, 'Jessica', 'Hall', 'jessica.hall@sakilacustomer.org', 30, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (27, 2, 'Shirley', 'Allen', 'shirley.allen@sakilacustomer.org', 31, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (28, 1, 'Cynthia', 'Young', 'cynthia.young@sakilacustomer.org', 32, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (29, 2, 'Angela', 'Hernandez', 'angela.hernandez@sakilacustomer.org', 33, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (30, 1, 'Melissa', 'King', 'melissa.king@sakilacustomer.org', 34, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (31, 2, 'Brenda', 'Wright', 'brenda.wright@sakilacustomer.org', 35, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (32, 1, 'Amy', 'Lopez', 'amy.lopez@sakilacustomer.org', 36, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (33, 2, 'Anna', 'Hill', 'anna.hill@sakilacustomer.org', 37, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (34, 2, 'Rebecca', 'Scott', 'rebecca.scott@sakilacustomer.org', 38, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (35, 2, 'Virginia', 'Green', 'virginia.green@sakilacustomer.org', 39, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (36, 2, 'Kathleen', 'Adams', 'kathleen.adams@sakilacustomer.org', 40, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (37, 1, 'Pamela', 'Baker', 'pamela.baker@sakilacustomer.org', 41, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (38, 1, 'Martha', 'Gonzalez', 'martha.gonzalez@sakilacustomer.org', 42, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (39, 1, 'Debra', 'Nelson', 'debra.nelson@sakilacustomer.org', 43, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (40, 2, 'Amanda', 'Carter', 'amanda.carter@sakilacustomer.org', 44, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (41, 1, 'Stephanie', 'Mitchell', 'stephanie.mitchell@sakilacustomer.org', 45, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (42, 2, 'Carolyn', 'Perez', 'carolyn.perez@sakilacustomer.org', 46, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (43, 2, 'Christine', 'Roberts', 'christine.roberts@sakilacustomer.org', 47, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (44, 1, 'Marie', 'Turner', 'marie.turner@sakilacustomer.org', 48, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (45, 1, 'Janet', 'Phillips', 'janet.phillips@sakilacustomer.org', 49, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (46, 2, 'Catherine', 'Campbell', 'catherine.campbell@sakilacustomer.org', 50, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (47, 1, 'Frances', 'Parker', 'frances.parker@sakilacustomer.org', 51, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (48, 1, 'Ann', 'Evans', 'ann.evans@sakilacustomer.org', 52, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (49, 2, 'Joyce', 'Edwards', 'joyce.edwards@sakilacustomer.org', 53, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (50, 1, 'Diane', 'Collins', 'diane.collins@sakilacustomer.org', 54, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (51, 1, 'Alice', 'Stewart', 'alice.stewart@sakilacustomer.org', 55, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (52, 1, 'Julie', 'Sanchez', 'julie.sanchez@sakilacustomer.org', 56, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (53, 1, 'Heather', 'Morris', 'heather.morris@sakilacustomer.org', 57, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (54, 1, 'Teresa', 'Rogers', 'teresa.rogers@sakilacustomer.org', 58, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (55, 2, 'Doris', 'Reed', 'doris.reed@sakilacustomer.org', 59, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (56, 1, 'Gloria', 'Cook', 'gloria.cook@sakilacustomer.org', 60, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (57, 2, 'Evelyn', 'Morgan', 'evelyn.morgan@sakilacustomer.org', 61, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (58, 1, 'Jean', 'Bell', 'jean.bell@sakilacustomer.org', 62, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (59, 1, 'Cheryl', 'Murphy', 'cheryl.murphy@sakilacustomer.org', 63, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (60, 1, 'Mildred', 'Bailey', 'mildred.bailey@sakilacustomer.org', 64, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (61, 2, 'Katherine', 'Rivera', 'katherine.rivera@sakilacustomer.org', 65, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (62, 1, 'Joan', 'Cooper', 'joan.cooper@sakilacustomer.org', 66, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (63, 1, 'Ashley', 'Richardson', 'ashley.richardson@sakilacustomer.org', 67, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (64, 2, 'Judith', 'Cox', 'judith.cox@sakilacustomer.org', 68, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (65, 2, 'Rose', 'Howard', 'rose.howard@sakilacustomer.org', 69, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (66, 2, 'Janice', 'Ward', 'janice.ward@sakilacustomer.org', 70, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (67, 1, 'Kelly', 'Torres', 'kelly.torres@sakilacustomer.org', 71, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (68, 1, 'Nicole', 'Peterson', 'nicole.peterson@sakilacustomer.org', 72, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (69, 2, 'Judy', 'Gray', 'judy.gray@sakilacustomer.org', 73, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (70, 2, 'Christina', 'Ramirez', 'christina.ramirez@sakilacustomer.org', 74, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (71, 1, 'Kathy', 'James', 'kathy.james@sakilacustomer.org', 75, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (72, 2, 'Theresa', 'Watson', 'theresa.watson@sakilacustomer.org', 76, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (73, 2, 'Beverly', 'Brooks', 'beverly.brooks@sakilacustomer.org', 77, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (74, 1, 'Denise', 'Kelly', 'denise.kelly@sakilacustomer.org', 78, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (75, 2, 'Tammy', 'Sanders', 'tammy.sanders@sakilacustomer.org', 79, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (76, 2, 'Irene', 'Price', 'irene.price@sakilacustomer.org', 80, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (77, 2, 'Jane', 'Bennett', 'jane.bennett@sakilacustomer.org', 81, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (78, 1, 'Lori', 'Wood', 'lori.wood@sakilacustomer.org', 82, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (79, 1, 'Rachel', 'Barnes', 'rachel.barnes@sakilacustomer.org', 83, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (80, 1, 'Marilyn', 'Ross', 'marilyn.ross@sakilacustomer.org', 84, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (81, 1, 'Andrea', 'Henderson', 'andrea.henderson@sakilacustomer.org', 85, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (82, 1, 'Kathryn', 'Coleman', 'kathryn.coleman@sakilacustomer.org', 86, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (83, 1, 'Louise', 'Jenkins', 'louise.jenkins@sakilacustomer.org', 87, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (84, 2, 'Sara', 'Perry', 'sara.perry@sakilacustomer.org', 88, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (85, 2, 'Anne', 'Powell', 'anne.powell@sakilacustomer.org', 89, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (86, 2, 'Jacqueline', 'Long', 'jacqueline.long@sakilacustomer.org', 90, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (87, 1, 'Wanda', 'Patterson', 'wanda.patterson@sakilacustomer.org', 91, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (88, 2, 'Bonnie', 'Hughes', 'bonnie.hughes@sakilacustomer.org', 92, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (89, 1, 'Julia', 'Flores', 'julia.flores@sakilacustomer.org', 93, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (90, 2, 'Ruby', 'Washington', 'ruby.washington@sakilacustomer.org', 94, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (91, 2, 'Lois', 'Butler', 'lois.butler@sakilacustomer.org', 95, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (92, 2, 'Tina', 'Simmons', 'tina.simmons@sakilacustomer.org', 96, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (93, 1, 'Phyllis', 'Foster', 'phyllis.foster@sakilacustomer.org', 97, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (94, 1, 'Norma', 'Gonzales', 'norma.gonzales@sakilacustomer.org', 98, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (95, 2, 'Paula', 'Bryant', 'paula.bryant@sakilacustomer.org', 99, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (96, 1, 'Diana', 'Alexander', 'diana.alexander@sakilacustomer.org', 100, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (97, 2, 'Annie', 'Russell', 'annie.russell@sakilacustomer.org', 101, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (98, 1, 'Lillian', 'Griffin', 'lillian.griffin@sakilacustomer.org', 102, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (99, 2, 'Emily', 'Diaz', 'emily.diaz@sakilacustomer.org', 103, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (100, 1, 'Robin', 'Hayes', 'robin.hayes@sakilacustomer.org', 104, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (101, 1, 'Peggy', 'Myers', 'peggy.myers@sakilacustomer.org', 105, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (102, 1, 'Crystal', 'Ford', 'crystal.ford@sakilacustomer.org', 106, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (103, 1, 'Gladys', 'Hamilton', 'gladys.hamilton@sakilacustomer.org', 107, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (104, 1, 'Rita', 'Graham', 'rita.graham@sakilacustomer.org', 108, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (105, 1, 'Dawn', 'Sullivan', 'dawn.sullivan@sakilacustomer.org', 109, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (106, 1, 'Connie', 'Wallace', 'connie.wallace@sakilacustomer.org', 110, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (107, 1, 'Florence', 'Woods', 'florence.woods@sakilacustomer.org', 111, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (108, 1, 'Tracy', 'Cole', 'tracy.cole@sakilacustomer.org', 112, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (109, 2, 'Edna', 'West', 'edna.west@sakilacustomer.org', 113, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (110, 2, 'Tiffany', 'Jordan', 'tiffany.jordan@sakilacustomer.org', 114, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (111, 1, 'Carmen', 'Owens', 'carmen.owens@sakilacustomer.org', 115, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (112, 2, 'Rosa', 'Reynolds', 'rosa.reynolds@sakilacustomer.org', 116, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (113, 2, 'Cindy', 'Fisher', 'cindy.fisher@sakilacustomer.org', 117, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (114, 2, 'Grace', 'Ellis', 'grace.ellis@sakilacustomer.org', 118, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (115, 1, 'Wendy', 'Harrison', 'wendy.harrison@sakilacustomer.org', 119, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (116, 1, 'Victoria', 'Gibson', 'victoria.gibson@sakilacustomer.org', 120, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (117, 1, 'Edith', 'Mcdonald', 'edith.mcdonald@sakilacustomer.org', 121, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (118, 1, 'Kim', 'Cruz', 'kim.cruz@sakilacustomer.org', 122, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (119, 1, 'Sherry', 'Marshall', 'sherry.marshall@sakilacustomer.org', 123, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (120, 2, 'Sylvia', 'Ortiz', 'sylvia.ortiz@sakilacustomer.org', 124, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (121, 1, 'Josephine', 'Gomez', 'josephine.gomez@sakilacustomer.org', 125, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (122, 1, 'Thelma', 'Murray', 'thelma.murray@sakilacustomer.org', 126, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (123, 2, 'Shannon', 'Freeman', 'shannon.freeman@sakilacustomer.org', 127, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (124, 1, 'Sheila', 'Wells', 'sheila.wells@sakilacustomer.org', 128, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (125, 1, 'Ethel', 'Webb', 'ethel.webb@sakilacustomer.org', 129, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (126, 1, 'Ellen', 'Simpson', 'ellen.simpson@sakilacustomer.org', 130, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (127, 2, 'Elaine', 'Stevens', 'elaine.stevens@sakilacustomer.org', 131, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (128, 1, 'Marjorie', 'Tucker', 'marjorie.tucker@sakilacustomer.org', 132, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (129, 1, 'Carrie', 'Porter', 'carrie.porter@sakilacustomer.org', 133, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (130, 1, 'Charlotte', 'Hunter', 'charlotte.hunter@sakilacustomer.org', 134, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (131, 2, 'Monica', 'Hicks', 'monica.hicks@sakilacustomer.org', 135, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (132, 2, 'Esther', 'Crawford', 'esther.crawford@sakilacustomer.org', 136, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (133, 1, 'Pauline', 'Henry', 'pauline.henry@sakilacustomer.org', 137, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (134, 1, 'Emma', 'Boyd', 'emma.boyd@sakilacustomer.org', 138, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (135, 2, 'Juanita', 'Mason', 'juanita.mason@sakilacustomer.org', 139, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (136, 2, 'Anita', 'Morales', 'anita.morales@sakilacustomer.org', 140, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (137, 2, 'Rhonda', 'Kennedy', 'rhonda.kennedy@sakilacustomer.org', 141, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (138, 1, 'Hazel', 'Warren', 'hazel.warren@sakilacustomer.org', 142, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (139, 1, 'Amber', 'Dixon', 'amber.dixon@sakilacustomer.org', 143, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (140, 1, 'Eva', 'Ramos', 'eva.ramos@sakilacustomer.org', 144, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (141, 1, 'Debbie', 'Reyes', 'debbie.reyes@sakilacustomer.org', 145, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (142, 1, 'April', 'Burns', 'april.burns@sakilacustomer.org', 146, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (143, 1, 'Leslie', 'Gordon', 'leslie.gordon@sakilacustomer.org', 147, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (144, 1, 'Clara', 'Shaw', 'clara.shaw@sakilacustomer.org', 148, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (145, 1, 'Lucille', 'Holmes', 'lucille.holmes@sakilacustomer.org', 149, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (146, 1, 'Jamie', 'Rice', 'jamie.rice@sakilacustomer.org', 150, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (147, 2, 'Joanne', 'Robertson', 'joanne.robertson@sakilacustomer.org', 151, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (148, 1, 'Eleanor', 'Hunt', 'eleanor.hunt@sakilacustomer.org', 152, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (149, 1, 'Valerie', 'Black', 'valerie.black@sakilacustomer.org', 153, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (150, 2, 'Danielle', 'Daniels', 'danielle.daniels@sakilacustomer.org', 154, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (151, 2, 'Megan', 'Palmer', 'megan.palmer@sakilacustomer.org', 155, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (152, 1, 'Alicia', 'Mills', 'alicia.mills@sakilacustomer.org', 156, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (153, 2, 'Suzanne', 'Nichols', 'suzanne.nichols@sakilacustomer.org', 157, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (154, 2, 'Michele', 'Grant', 'michele.grant@sakilacustomer.org', 158, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (155, 1, 'Gail', 'Knight', 'gail.knight@sakilacustomer.org', 159, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (156, 1, 'Bertha', 'Ferguson', 'bertha.ferguson@sakilacustomer.org', 160, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (157, 2, 'Darlene', 'Rose', 'darlene.rose@sakilacustomer.org', 161, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (158, 1, 'Veronica', 'Stone', 'veronica.stone@sakilacustomer.org', 162, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (159, 1, 'Jill', 'Hawkins', 'jill.hawkins@sakilacustomer.org', 163, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (160, 2, 'Erin', 'Dunn', 'erin.dunn@sakilacustomer.org', 164, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (161, 1, 'Geraldine', 'Perkins', 'geraldine.perkins@sakilacustomer.org', 165, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (162, 2, 'Lauren', 'Hudson', 'lauren.hudson@sakilacustomer.org', 166, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (163, 1, 'Cathy', 'Spencer', 'cathy.spencer@sakilacustomer.org', 167, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (164, 2, 'Joann', 'Gardner', 'joann.gardner@sakilacustomer.org', 168, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (165, 2, 'Lorraine', 'Stephens', 'lorraine.stephens@sakilacustomer.org', 169, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (166, 1, 'Lynn', 'Payne', 'lynn.payne@sakilacustomer.org', 170, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (167, 2, 'Sally', 'Pierce', 'sally.pierce@sakilacustomer.org', 171, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (168, 1, 'Regina', 'Berry', 'regina.berry@sakilacustomer.org', 172, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (169, 2, 'Erica', 'Matthews', 'erica.matthews@sakilacustomer.org', 173, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (170, 1, 'Beatrice', 'Arnold', 'beatrice.arnold@sakilacustomer.org', 174, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (171, 2, 'Dolores', 'Wagner', 'dolores.wagner@sakilacustomer.org', 175, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (172, 1, 'Bernice', 'Willis', 'bernice.willis@sakilacustomer.org', 176, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (173, 1, 'Audrey', 'Ray', 'audrey.ray@sakilacustomer.org', 177, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (174, 2, 'Yvonne', 'Watkins', 'yvonne.watkins@sakilacustomer.org', 178, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (175, 1, 'Annette', 'Olson', 'annette.olson@sakilacustomer.org', 179, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (176, 1, 'June', 'Carroll', 'june.carroll@sakilacustomer.org', 180, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (177, 2, 'Samantha', 'Duncan', 'samantha.duncan@sakilacustomer.org', 181, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (178, 2, 'Marion', 'Snyder', 'marion.snyder@sakilacustomer.org', 182, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (179, 1, 'Dana', 'Hart', 'dana.hart@sakilacustomer.org', 183, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (180, 2, 'Stacy', 'Cunningham', 'stacy.cunningham@sakilacustomer.org', 184, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (181, 2, 'Ana', 'Bradley', 'ana.bradley@sakilacustomer.org', 185, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (182, 1, 'Renee', 'Lane', 'renee.lane@sakilacustomer.org', 186, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (183, 2, 'Ida', 'Andrews', 'ida.andrews@sakilacustomer.org', 187, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (184, 1, 'Vivian', 'Ruiz', 'vivian.ruiz@sakilacustomer.org', 188, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (185, 1, 'Roberta', 'Harper', 'roberta.harper@sakilacustomer.org', 189, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (186, 2, 'Holly', 'Fox', 'holly.fox@sakilacustomer.org', 190, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (187, 2, 'Brittany', 'Riley', 'brittany.riley@sakilacustomer.org', 191, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (188, 1, 'Melanie', 'Armstrong', 'melanie.armstrong@sakilacustomer.org', 192, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (189, 1, 'Loretta', 'Carpenter', 'loretta.carpenter@sakilacustomer.org', 193, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (190, 2, 'Yolanda', 'Weaver', 'yolanda.weaver@sakilacustomer.org', 194, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (191, 1, 'Jeanette', 'Greene', 'jeanette.greene@sakilacustomer.org', 195, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (192, 1, 'Laurie', 'Lawrence', 'laurie.lawrence@sakilacustomer.org', 196, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (193, 2, 'Katie', 'Elliott', 'katie.elliott@sakilacustomer.org', 197, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (194, 2, 'Kristen', 'Chavez', 'kristen.chavez@sakilacustomer.org', 198, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (195, 1, 'Vanessa', 'Sims', 'vanessa.sims@sakilacustomer.org', 199, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (196, 1, 'Alma', 'Austin', 'alma.austin@sakilacustomer.org', 200, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (197, 2, 'Sue', 'Peters', 'sue.peters@sakilacustomer.org', 201, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (198, 2, 'Elsie', 'Kelley', 'elsie.kelley@sakilacustomer.org', 202, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (199, 2, 'Beth', 'Franklin', 'beth.franklin@sakilacustomer.org', 203, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (200, 2, 'Jeanne', 'Lawson', 'jeanne.lawson@sakilacustomer.org', 204, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (201, 1, 'Vicki', 'Fields', 'vicki.fields@sakilacustomer.org', 205, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (202, 2, 'Carla', 'Gutierrez', 'carla.gutierrez@sakilacustomer.org', 206, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (203, 1, 'Tara', 'Ryan', 'tara.ryan@sakilacustomer.org', 207, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (204, 1, 'Rosemary', 'Schmidt', 'rosemary.schmidt@sakilacustomer.org', 208, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (205, 2, 'Eileen', 'Carr', 'eileen.carr@sakilacustomer.org', 209, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (206, 1, 'Terri', 'Vasquez', 'terri.vasquez@sakilacustomer.org', 210, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (207, 1, 'Gertrude', 'Castillo', 'gertrude.castillo@sakilacustomer.org', 211, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (208, 1, 'Lucy', 'Wheeler', 'lucy.wheeler@sakilacustomer.org', 212, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (209, 2, 'Tonya', 'Chapman', 'tonya.chapman@sakilacustomer.org', 213, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (210, 2, 'Ella', 'Oliver', 'ella.oliver@sakilacustomer.org', 214, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (211, 1, 'Stacey', 'Montgomery', 'stacey.montgomery@sakilacustomer.org', 215, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (212, 2, 'Wilma', 'Richards', 'wilma.richards@sakilacustomer.org', 216, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (213, 1, 'Gina', 'Williamson', 'gina.williamson@sakilacustomer.org', 217, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (214, 1, 'Kristin', 'Johnston', 'kristin.johnston@sakilacustomer.org', 218, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (215, 2, 'Jessie', 'Banks', 'jessie.banks@sakilacustomer.org', 219, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (216, 1, 'Natalie', 'Meyer', 'natalie.meyer@sakilacustomer.org', 220, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (217, 2, 'Agnes', 'Bishop', 'agnes.bishop@sakilacustomer.org', 221, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (218, 1, 'Vera', 'Mccoy', 'vera.mccoy@sakilacustomer.org', 222, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (219, 2, 'Willie', 'Howell', 'willie.howell@sakilacustomer.org', 223, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (220, 2, 'Charlene', 'Alvarez', 'charlene.alvarez@sakilacustomer.org', 224, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (221, 1, 'Bessie', 'Morrison', 'bessie.morrison@sakilacustomer.org', 225, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (222, 2, 'Delores', 'Hansen', 'delores.hansen@sakilacustomer.org', 226, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (223, 1, 'Melinda', 'Fernandez', 'melinda.fernandez@sakilacustomer.org', 227, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (224, 2, 'Pearl', 'Garza', 'pearl.garza@sakilacustomer.org', 228, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (225, 1, 'Arlene', 'Harvey', 'arlene.harvey@sakilacustomer.org', 229, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (226, 2, 'Maureen', 'Little', 'maureen.little@sakilacustomer.org', 230, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (227, 1, 'Colleen', 'Burton', 'colleen.burton@sakilacustomer.org', 231, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (228, 2, 'Allison', 'Stanley', 'allison.stanley@sakilacustomer.org', 232, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (229, 1, 'Tamara', 'Nguyen', 'tamara.nguyen@sakilacustomer.org', 233, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (230, 2, 'Joy', 'George', 'joy.george@sakilacustomer.org', 234, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (231, 1, 'Georgia', 'Jacobs', 'georgia.jacobs@sakilacustomer.org', 235, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (232, 2, 'Constance', 'Reid', 'constance.reid@sakilacustomer.org', 236, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (233, 2, 'Lillie', 'Kim', 'lillie.kim@sakilacustomer.org', 237, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (234, 1, 'Claudia', 'Fuller', 'claudia.fuller@sakilacustomer.org', 238, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (235, 1, 'Jackie', 'Lynch', 'jackie.lynch@sakilacustomer.org', 239, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (236, 1, 'Marcia', 'Dean', 'marcia.dean@sakilacustomer.org', 240, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (237, 1, 'Tanya', 'Gilbert', 'tanya.gilbert@sakilacustomer.org', 241, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (238, 1, 'Nellie', 'Garrett', 'nellie.garrett@sakilacustomer.org', 242, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (239, 2, 'Minnie', 'Romero', 'minnie.romero@sakilacustomer.org', 243, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (240, 1, 'Marlene', 'Welch', 'marlene.welch@sakilacustomer.org', 244, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (241, 2, 'Heidi', 'Larson', 'heidi.larson@sakilacustomer.org', 245, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (242, 1, 'Glenda', 'Frazier', 'glenda.frazier@sakilacustomer.org', 246, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (243, 1, 'Lydia', 'Burke', 'lydia.burke@sakilacustomer.org', 247, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (244, 2, 'Viola', 'Hanson', 'viola.hanson@sakilacustomer.org', 248, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (245, 1, 'Courtney', 'Day', 'courtney.day@sakilacustomer.org', 249, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (246, 1, 'Marian', 'Mendoza', 'marian.mendoza@sakilacustomer.org', 250, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (247, 1, 'Stella', 'Moreno', 'stella.moreno@sakilacustomer.org', 251, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (248, 1, 'Caroline', 'Bowman', 'caroline.bowman@sakilacustomer.org', 252, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (249, 2, 'Dora', 'Medina', 'dora.medina@sakilacustomer.org', 253, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (250, 2, 'Jo', 'Fowler', 'jo.fowler@sakilacustomer.org', 254, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (251, 2, 'Vickie', 'Brewer', 'vickie.brewer@sakilacustomer.org', 255, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (252, 2, 'Mattie', 'Hoffman', 'mattie.hoffman@sakilacustomer.org', 256, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (253, 1, 'Terry', 'Carlson', 'terry.carlson@sakilacustomer.org', 258, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (254, 2, 'Maxine', 'Silva', 'maxine.silva@sakilacustomer.org', 259, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (255, 2, 'Irma', 'Pearson', 'irma.pearson@sakilacustomer.org', 260, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (256, 2, 'Mabel', 'Holland', 'mabel.holland@sakilacustomer.org', 261, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (257, 2, 'Marsha', 'Douglas', 'marsha.douglas@sakilacustomer.org', 262, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (258, 1, 'Myrtle', 'Fleming', 'myrtle.fleming@sakilacustomer.org', 263, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (259, 2, 'Lena', 'Jensen', 'lena.jensen@sakilacustomer.org', 264, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (260, 1, 'Christy', 'Vargas', 'christy.vargas@sakilacustomer.org', 265, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (261, 1, 'Deanna', 'Byrd', 'deanna.byrd@sakilacustomer.org', 266, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (262, 2, 'Patsy', 'Davidson', 'patsy.davidson@sakilacustomer.org', 267, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (263, 1, 'Hilda', 'Hopkins', 'hilda.hopkins@sakilacustomer.org', 268, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (264, 1, 'Gwendolyn', 'May', 'gwendolyn.may@sakilacustomer.org', 269, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (265, 2, 'Jennie', 'Terry', 'jennie.terry@sakilacustomer.org', 270, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (266, 2, 'Nora', 'Herrera', 'nora.herrera@sakilacustomer.org', 271, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (267, 1, 'Margie', 'Wade', 'margie.wade@sakilacustomer.org', 272, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (268, 1, 'Nina', 'Soto', 'nina.soto@sakilacustomer.org', 273, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (269, 1, 'Cassandra', 'Walters', 'cassandra.walters@sakilacustomer.org', 274, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (270, 1, 'Leah', 'Curtis', 'leah.curtis@sakilacustomer.org', 275, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (271, 1, 'Penny', 'Neal', 'penny.neal@sakilacustomer.org', 276, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (272, 1, 'Kay', 'Caldwell', 'kay.caldwell@sakilacustomer.org', 277, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (273, 2, 'Priscilla', 'Lowe', 'priscilla.lowe@sakilacustomer.org', 278, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (274, 1, 'Naomi', 'Jennings', 'naomi.jennings@sakilacustomer.org', 279, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (275, 2, 'Carole', 'Barnett', 'carole.barnett@sakilacustomer.org', 280, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (276, 1, 'Brandy', 'Graves', 'brandy.graves@sakilacustomer.org', 281, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (277, 2, 'Olga', 'Jimenez', 'olga.jimenez@sakilacustomer.org', 282, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (278, 2, 'Billie', 'Horton', 'billie.horton@sakilacustomer.org', 283, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (279, 2, 'Dianne', 'Shelton', 'dianne.shelton@sakilacustomer.org', 284, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (280, 2, 'Tracey', 'Barrett', 'tracey.barrett@sakilacustomer.org', 285, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (281, 2, 'Leona', 'Obrien', 'leona.obrien@sakilacustomer.org', 286, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (282, 2, 'Jenny', 'Castro', 'jenny.castro@sakilacustomer.org', 287, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (283, 1, 'Felicia', 'Sutton', 'felicia.sutton@sakilacustomer.org', 288, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (284, 1, 'Sonia', 'Gregory', 'sonia.gregory@sakilacustomer.org', 289, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (285, 1, 'Miriam', 'Mckinney', 'miriam.mckinney@sakilacustomer.org', 290, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (286, 1, 'Velma', 'Lucas', 'velma.lucas@sakilacustomer.org', 291, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (287, 2, 'Becky', 'Miles', 'becky.miles@sakilacustomer.org', 292, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (288, 1, 'Bobbie', 'Craig', 'bobbie.craig@sakilacustomer.org', 293, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (289, 1, 'Violet', 'Rodriquez', 'violet.rodriquez@sakilacustomer.org', 294, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (290, 1, 'Kristina', 'Chambers', 'kristina.chambers@sakilacustomer.org', 295, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (291, 1, 'Toni', 'Holt', 'toni.holt@sakilacustomer.org', 296, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (292, 2, 'Misty', 'Lambert', 'misty.lambert@sakilacustomer.org', 297, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (293, 2, 'Mae', 'Fletcher', 'mae.fletcher@sakilacustomer.org', 298, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (294, 2, 'Shelly', 'Watts', 'shelly.watts@sakilacustomer.org', 299, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (295, 1, 'Daisy', 'Bates', 'daisy.bates@sakilacustomer.org', 300, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (296, 2, 'Ramona', 'Hale', 'ramona.hale@sakilacustomer.org', 301, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (297, 1, 'Sherri', 'Rhodes', 'sherri.rhodes@sakilacustomer.org', 302, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (298, 1, 'Erika', 'Pena', 'erika.pena@sakilacustomer.org', 303, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (299, 2, 'James', 'Gannon', 'james.gannon@sakilacustomer.org', 304, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (300, 1, 'John', 'Farnsworth', 'john.farnsworth@sakilacustomer.org', 305, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (301, 2, 'Robert', 'Baughman', 'robert.baughman@sakilacustomer.org', 306, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (302, 1, 'Michael', 'Silverman', 'michael.silverman@sakilacustomer.org', 307, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (303, 2, 'William', 'Satterfield', 'william.satterfield@sakilacustomer.org', 308, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (304, 2, 'David', 'Royal', 'david.royal@sakilacustomer.org', 309, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (305, 1, 'Richard', 'Mccrary', 'richard.mccrary@sakilacustomer.org', 310, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (306, 1, 'Charles', 'Kowalski', 'charles.kowalski@sakilacustomer.org', 311, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (307, 2, 'Joseph', 'Joy', 'joseph.joy@sakilacustomer.org', 312, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (308, 1, 'Thomas', 'Grigsby', 'thomas.grigsby@sakilacustomer.org', 313, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (309, 1, 'Christopher', 'Greco', 'christopher.greco@sakilacustomer.org', 314, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (310, 2, 'Daniel', 'Cabral', 'daniel.cabral@sakilacustomer.org', 315, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (311, 2, 'Paul', 'Trout', 'paul.trout@sakilacustomer.org', 316, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (312, 2, 'Mark', 'Rinehart', 'mark.rinehart@sakilacustomer.org', 317, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (313, 2, 'Donald', 'Mahon', 'donald.mahon@sakilacustomer.org', 318, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (314, 1, 'George', 'Linton', 'george.linton@sakilacustomer.org', 319, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (315, 2, 'Kenneth', 'Gooden', 'kenneth.gooden@sakilacustomer.org', 320, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (316, 1, 'Steven', 'Curley', 'steven.curley@sakilacustomer.org', 321, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (317, 2, 'Edward', 'Baugh', 'edward.baugh@sakilacustomer.org', 322, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (318, 1, 'Brian', 'Wyman', 'brian.wyman@sakilacustomer.org', 323, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (319, 2, 'Ronald', 'Weiner', 'ronald.weiner@sakilacustomer.org', 324, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (320, 2, 'Anthony', 'Schwab', 'anthony.schwab@sakilacustomer.org', 325, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (321, 1, 'Kevin', 'Schuler', 'kevin.schuler@sakilacustomer.org', 326, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (322, 1, 'Jason', 'Morrissey', 'jason.morrissey@sakilacustomer.org', 327, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (323, 2, 'Matthew', 'Mahan', 'matthew.mahan@sakilacustomer.org', 328, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (324, 2, 'Gary', 'Coy', 'gary.coy@sakilacustomer.org', 329, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (325, 1, 'Timothy', 'Bunn', 'timothy.bunn@sakilacustomer.org', 330, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (326, 1, 'Jose', 'Andrew', 'jose.andrew@sakilacustomer.org', 331, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (327, 2, 'Larry', 'Thrasher', 'larry.thrasher@sakilacustomer.org', 332, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (328, 2, 'Jeffrey', 'Spear', 'jeffrey.spear@sakilacustomer.org', 333, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (329, 2, 'Frank', 'Waggoner', 'frank.waggoner@sakilacustomer.org', 334, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (330, 1, 'Scott', 'Shelley', 'scott.shelley@sakilacustomer.org', 335, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (331, 1, 'Eric', 'Robert', 'eric.robert@sakilacustomer.org', 336, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (332, 1, 'Stephen', 'Qualls', 'stephen.qualls@sakilacustomer.org', 337, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (333, 2, 'Andrew', 'Purdy', 'andrew.purdy@sakilacustomer.org', 338, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (334, 2, 'Raymond', 'Mcwhorter', 'raymond.mcwhorter@sakilacustomer.org', 339, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (335, 1, 'Gregory', 'Mauldin', 'gregory.mauldin@sakilacustomer.org', 340, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (336, 1, 'Joshua', 'Mark', 'joshua.mark@sakilacustomer.org', 341, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (337, 1, 'Jerry', 'Jordon', 'jerry.jordon@sakilacustomer.org', 342, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (338, 1, 'Dennis', 'Gilman', 'dennis.gilman@sakilacustomer.org', 343, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (339, 2, 'Walter', 'Perryman', 'walter.perryman@sakilacustomer.org', 344, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (340, 1, 'Patrick', 'Newsom', 'patrick.newsom@sakilacustomer.org', 345, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (341, 1, 'Peter', 'Menard', 'peter.menard@sakilacustomer.org', 346, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (342, 1, 'Harold', 'Martino', 'harold.martino@sakilacustomer.org', 347, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (343, 1, 'Douglas', 'Graf', 'douglas.graf@sakilacustomer.org', 348, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (344, 1, 'Henry', 'Billingsley', 'henry.billingsley@sakilacustomer.org', 349, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (345, 1, 'Carl', 'Artis', 'carl.artis@sakilacustomer.org', 350, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (346, 1, 'Arthur', 'Simpkins', 'arthur.simpkins@sakilacustomer.org', 351, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (347, 2, 'Ryan', 'Salisbury', 'ryan.salisbury@sakilacustomer.org', 352, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (348, 2, 'Roger', 'Quintanilla', 'roger.quintanilla@sakilacustomer.org', 353, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (349, 2, 'Joe', 'Gilliland', 'joe.gilliland@sakilacustomer.org', 354, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (350, 1, 'Juan', 'Fraley', 'juan.fraley@sakilacustomer.org', 355, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (351, 1, 'Jack', 'Foust', 'jack.foust@sakilacustomer.org', 356, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (352, 1, 'Albert', 'Crouse', 'albert.crouse@sakilacustomer.org', 357, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (353, 1, 'Jonathan', 'Scarborough', 'jonathan.scarborough@sakilacustomer.org', 358, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (354, 2, 'Justin', 'Ngo', 'justin.ngo@sakilacustomer.org', 359, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (355, 2, 'Terry', 'Grissom', 'terry.grissom@sakilacustomer.org', 360, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (356, 2, 'Gerald', 'Fultz', 'gerald.fultz@sakilacustomer.org', 361, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (357, 1, 'Keith', 'Rico', 'keith.rico@sakilacustomer.org', 362, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (358, 2, 'Samuel', 'Marlow', 'samuel.marlow@sakilacustomer.org', 363, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (359, 2, 'Willie', 'Markham', 'willie.markham@sakilacustomer.org', 364, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (360, 2, 'Ralph', 'Madrigal', 'ralph.madrigal@sakilacustomer.org', 365, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (361, 2, 'Lawrence', 'Lawton', 'lawrence.lawton@sakilacustomer.org', 366, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (362, 1, 'Nicholas', 'Barfield', 'nicholas.barfield@sakilacustomer.org', 367, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (363, 2, 'Roy', 'Whiting', 'roy.whiting@sakilacustomer.org', 368, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (364, 1, 'Benjamin', 'Varney', 'benjamin.varney@sakilacustomer.org', 369, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (365, 2, 'Bruce', 'Schwarz', 'bruce.schwarz@sakilacustomer.org', 370, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (366, 1, 'Brandon', 'Huey', 'brandon.huey@sakilacustomer.org', 371, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (367, 1, 'Adam', 'Gooch', 'adam.gooch@sakilacustomer.org', 372, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (368, 1, 'Harry', 'Arce', 'harry.arce@sakilacustomer.org', 373, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (369, 2, 'Fred', 'Wheat', 'fred.wheat@sakilacustomer.org', 374, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (370, 2, 'Wayne', 'Truong', 'wayne.truong@sakilacustomer.org', 375, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (371, 1, 'Billy', 'Poulin', 'billy.poulin@sakilacustomer.org', 376, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (372, 2, 'Steve', 'Mackenzie', 'steve.mackenzie@sakilacustomer.org', 377, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (373, 1, 'Louis', 'Leone', 'louis.leone@sakilacustomer.org', 378, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (374, 2, 'Jeremy', 'Hurtado', 'jeremy.hurtado@sakilacustomer.org', 379, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (375, 2, 'Aaron', 'Selby', 'aaron.selby@sakilacustomer.org', 380, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (376, 1, 'Randy', 'Gaither', 'randy.gaither@sakilacustomer.org', 381, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (377, 1, 'Howard', 'Fortner', 'howard.fortner@sakilacustomer.org', 382, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (378, 1, 'Eugene', 'Culpepper', 'eugene.culpepper@sakilacustomer.org', 383, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (379, 1, 'Carlos', 'Coughlin', 'carlos.coughlin@sakilacustomer.org', 384, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (380, 1, 'Russell', 'Brinson', 'russell.brinson@sakilacustomer.org', 385, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (381, 2, 'Bobby', 'Boudreau', 'bobby.boudreau@sakilacustomer.org', 386, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (382, 2, 'Victor', 'Barkley', 'victor.barkley@sakilacustomer.org', 387, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (383, 1, 'Martin', 'Bales', 'martin.bales@sakilacustomer.org', 388, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (384, 2, 'Ernest', 'Stepp', 'ernest.stepp@sakilacustomer.org', 389, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (385, 1, 'Phillip', 'Holm', 'phillip.holm@sakilacustomer.org', 390, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (386, 1, 'Todd', 'Tan', 'todd.tan@sakilacustomer.org', 391, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (387, 2, 'Jesse', 'Schilling', 'jesse.schilling@sakilacustomer.org', 392, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (388, 2, 'Craig', 'Morrell', 'craig.morrell@sakilacustomer.org', 393, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (389, 1, 'Alan', 'Kahn', 'alan.kahn@sakilacustomer.org', 394, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (390, 1, 'Shawn', 'Heaton', 'shawn.heaton@sakilacustomer.org', 395, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (391, 1, 'Clarence', 'Gamez', 'clarence.gamez@sakilacustomer.org', 396, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (392, 2, 'Sean', 'Douglass', 'sean.douglass@sakilacustomer.org', 397, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (393, 1, 'Philip', 'Causey', 'philip.causey@sakilacustomer.org', 398, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (394, 2, 'Chris', 'Brothers', 'chris.brothers@sakilacustomer.org', 399, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (395, 2, 'Johnny', 'Turpin', 'johnny.turpin@sakilacustomer.org', 400, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (396, 1, 'Earl', 'Shanks', 'earl.shanks@sakilacustomer.org', 401, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (397, 1, 'Jimmy', 'Schrader', 'jimmy.schrader@sakilacustomer.org', 402, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (398, 1, 'Antonio', 'Meek', 'antonio.meek@sakilacustomer.org', 403, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (399, 1, 'Danny', 'Isom', 'danny.isom@sakilacustomer.org', 404, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (400, 2, 'Bryan', 'Hardison', 'bryan.hardison@sakilacustomer.org', 405, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (401, 2, 'Tony', 'Carranza', 'tony.carranza@sakilacustomer.org', 406, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (402, 1, 'Luis', 'Yanez', 'luis.yanez@sakilacustomer.org', 407, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (403, 1, 'Mike', 'Way', 'mike.way@sakilacustomer.org', 408, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (404, 2, 'Stanley', 'Scroggins', 'stanley.scroggins@sakilacustomer.org', 409, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (405, 1, 'Leonard', 'Schofield', 'leonard.schofield@sakilacustomer.org', 410, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (406, 1, 'Nathan', 'Runyon', 'nathan.runyon@sakilacustomer.org', 411, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (407, 1, 'Dale', 'Ratcliff', 'dale.ratcliff@sakilacustomer.org', 412, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (408, 1, 'Manuel', 'Murrell', 'manuel.murrell@sakilacustomer.org', 413, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (409, 2, 'Rodney', 'Moeller', 'rodney.moeller@sakilacustomer.org', 414, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (410, 2, 'Curtis', 'Irby', 'curtis.irby@sakilacustomer.org', 415, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (411, 1, 'Norman', 'Currier', 'norman.currier@sakilacustomer.org', 416, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (412, 2, 'Allen', 'Butterfield', 'allen.butterfield@sakilacustomer.org', 417, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (413, 2, 'Marvin', 'Yee', 'marvin.yee@sakilacustomer.org', 418, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (414, 1, 'Vincent', 'Ralston', 'vincent.ralston@sakilacustomer.org', 419, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (415, 1, 'Glenn', 'Pullen', 'glenn.pullen@sakilacustomer.org', 420, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (416, 2, 'Jeffery', 'Pinson', 'jeffery.pinson@sakilacustomer.org', 421, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (417, 1, 'Travis', 'Estep', 'travis.estep@sakilacustomer.org', 422, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (418, 2, 'Jeff', 'East', 'jeff.east@sakilacustomer.org', 423, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (419, 1, 'Chad', 'Carbone', 'chad.carbone@sakilacustomer.org', 424, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (420, 1, 'Jacob', 'Lance', 'jacob.lance@sakilacustomer.org', 425, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (421, 1, 'Lee', 'Hawks', 'lee.hawks@sakilacustomer.org', 426, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (422, 1, 'Melvin', 'Ellington', 'melvin.ellington@sakilacustomer.org', 427, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (423, 2, 'Alfred', 'Casillas', 'alfred.casillas@sakilacustomer.org', 428, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (424, 2, 'Kyle', 'Spurlock', 'kyle.spurlock@sakilacustomer.org', 429, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (425, 2, 'Francis', 'Sikes', 'francis.sikes@sakilacustomer.org', 430, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (426, 1, 'Bradley', 'Motley', 'bradley.motley@sakilacustomer.org', 431, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (427, 2, 'Jesus', 'Mccartney', 'jesus.mccartney@sakilacustomer.org', 432, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (428, 2, 'Herbert', 'Kruger', 'herbert.kruger@sakilacustomer.org', 433, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (429, 2, 'Frederick', 'Isbell', 'frederick.isbell@sakilacustomer.org', 434, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (430, 1, 'Ray', 'Houle', 'ray.houle@sakilacustomer.org', 435, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (431, 2, 'Joel', 'Francisco', 'joel.francisco@sakilacustomer.org', 436, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (432, 1, 'Edwin', 'Burk', 'edwin.burk@sakilacustomer.org', 437, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (433, 1, 'Don', 'Bone', 'don.bone@sakilacustomer.org', 438, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (434, 1, 'Eddie', 'Tomlin', 'eddie.tomlin@sakilacustomer.org', 439, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (435, 2, 'Ricky', 'Shelby', 'ricky.shelby@sakilacustomer.org', 440, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (436, 1, 'Troy', 'Quigley', 'troy.quigley@sakilacustomer.org', 441, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (437, 2, 'Randall', 'Neumann', 'randall.neumann@sakilacustomer.org', 442, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (438, 1, 'Barry', 'Lovelace', 'barry.lovelace@sakilacustomer.org', 443, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (439, 2, 'Alexander', 'Fennell', 'alexander.fennell@sakilacustomer.org', 444, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (440, 1, 'Bernard', 'Colby', 'bernard.colby@sakilacustomer.org', 445, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (441, 1, 'Mario', 'Cheatham', 'mario.cheatham@sakilacustomer.org', 446, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (442, 1, 'Leroy', 'Bustamante', 'leroy.bustamante@sakilacustomer.org', 447, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (443, 2, 'Francisco', 'Skidmore', 'francisco.skidmore@sakilacustomer.org', 448, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (444, 2, 'Marcus', 'Hidalgo', 'marcus.hidalgo@sakilacustomer.org', 449, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (445, 1, 'Micheal', 'Forman', 'micheal.forman@sakilacustomer.org', 450, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (446, 2, 'Theodore', 'Culp', 'theodore.culp@sakilacustomer.org', 451, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (447, 1, 'Clifford', 'Bowens', 'clifford.bowens@sakilacustomer.org', 452, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (448, 1, 'Miguel', 'Betancourt', 'miguel.betancourt@sakilacustomer.org', 453, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (449, 2, 'Oscar', 'Aquino', 'oscar.aquino@sakilacustomer.org', 454, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (450, 1, 'Jay', 'Robb', 'jay.robb@sakilacustomer.org', 455, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (451, 1, 'Jim', 'Rea', 'jim.rea@sakilacustomer.org', 456, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (452, 1, 'Tom', 'Milner', 'tom.milner@sakilacustomer.org', 457, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (453, 1, 'Calvin', 'Martel', 'calvin.martel@sakilacustomer.org', 458, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (454, 2, 'Alex', 'Gresham', 'alex.gresham@sakilacustomer.org', 459, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (455, 2, 'Jon', 'Wiles', 'jon.wiles@sakilacustomer.org', 460, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (456, 2, 'Ronnie', 'Ricketts', 'ronnie.ricketts@sakilacustomer.org', 461, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (457, 2, 'Bill', 'Gavin', 'bill.gavin@sakilacustomer.org', 462, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (458, 1, 'Lloyd', 'Dowd', 'lloyd.dowd@sakilacustomer.org', 463, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (459, 1, 'Tommy', 'Collazo', 'tommy.collazo@sakilacustomer.org', 464, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (460, 1, 'Leon', 'Bostic', 'leon.bostic@sakilacustomer.org', 465, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (461, 1, 'Derek', 'Blakely', 'derek.blakely@sakilacustomer.org', 466, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (462, 2, 'Warren', 'Sherrod', 'warren.sherrod@sakilacustomer.org', 467, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (463, 2, 'Darrell', 'Power', 'darrell.power@sakilacustomer.org', 468, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (464, 1, 'Jerome', 'Kenyon', 'jerome.kenyon@sakilacustomer.org', 469, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (465, 1, 'Floyd', 'Gandy', 'floyd.gandy@sakilacustomer.org', 470, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (466, 1, 'Leo', 'Ebert', 'leo.ebert@sakilacustomer.org', 471, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (467, 2, 'Alvin', 'Deloach', 'alvin.deloach@sakilacustomer.org', 472, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (468, 1, 'Tim', 'Cary', 'tim.cary@sakilacustomer.org', 473, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (469, 2, 'Wesley', 'Bull', 'wesley.bull@sakilacustomer.org', 474, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (470, 1, 'Gordon', 'Allard', 'gordon.allard@sakilacustomer.org', 475, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (471, 1, 'Dean', 'Sauer', 'dean.sauer@sakilacustomer.org', 476, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (472, 1, 'Greg', 'Robins', 'greg.robins@sakilacustomer.org', 477, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (473, 2, 'Jorge', 'Olivares', 'jorge.olivares@sakilacustomer.org', 478, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (474, 2, 'Dustin', 'Gillette', 'dustin.gillette@sakilacustomer.org', 479, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (475, 2, 'Pedro', 'Chestnut', 'pedro.chestnut@sakilacustomer.org', 480, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (476, 1, 'Derrick', 'Bourque', 'derrick.bourque@sakilacustomer.org', 481, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (477, 1, 'Dan', 'Paine', 'dan.paine@sakilacustomer.org', 482, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (478, 1, 'Lewis', 'Lyman', 'lewis.lyman@sakilacustomer.org', 483, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (479, 1, 'Zachary', 'Hite', 'zachary.hite@sakilacustomer.org', 484, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (480, 1, 'Corey', 'Hauser', 'corey.hauser@sakilacustomer.org', 485, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (481, 1, 'Herman', 'Devore', 'herman.devore@sakilacustomer.org', 486, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (482, 1, 'Maurice', 'Crawley', 'maurice.crawley@sakilacustomer.org', 487, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (483, 2, 'Vernon', 'Chapa', 'vernon.chapa@sakilacustomer.org', 488, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (484, 1, 'Roberto', 'Vu', 'roberto.vu@sakilacustomer.org', 489, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (485, 1, 'Clyde', 'Tobias', 'clyde.tobias@sakilacustomer.org', 490, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (486, 1, 'Glen', 'Talbert', 'glen.talbert@sakilacustomer.org', 491, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (487, 2, 'Hector', 'Poindexter', 'hector.poindexter@sakilacustomer.org', 492, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (488, 2, 'Shane', 'Millard', 'shane.millard@sakilacustomer.org', 493, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (489, 1, 'Ricardo', 'Meador', 'ricardo.meador@sakilacustomer.org', 494, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (490, 1, 'Sam', 'Mcduffie', 'sam.mcduffie@sakilacustomer.org', 495, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (491, 2, 'Rick', 'Mattox', 'rick.mattox@sakilacustomer.org', 496, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (492, 2, 'Lester', 'Kraus', 'lester.kraus@sakilacustomer.org', 497, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (493, 1, 'Brent', 'Harkins', 'brent.harkins@sakilacustomer.org', 498, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (494, 2, 'Ramon', 'Choate', 'ramon.choate@sakilacustomer.org', 499, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (495, 2, 'Charlie', 'Bess', 'charlie.bess@sakilacustomer.org', 500, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (496, 2, 'Tyler', 'Wren', 'tyler.wren@sakilacustomer.org', 501, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (497, 2, 'Gilbert', 'Sledge', 'gilbert.sledge@sakilacustomer.org', 502, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (498, 1, 'Gene', 'Sanborn', 'gene.sanborn@sakilacustomer.org', 503, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (499, 2, 'Marc', 'Outlaw', 'marc.outlaw@sakilacustomer.org', 504, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (500, 1, 'Reginald', 'Kinder', 'reginald.kinder@sakilacustomer.org', 505, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (501, 1, 'Ruben', 'Geary', 'ruben.geary@sakilacustomer.org', 506, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (502, 1, 'Brett', 'Cornwell', 'brett.cornwell@sakilacustomer.org', 507, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (503, 1, 'Angel', 'Barclay', 'angel.barclay@sakilacustomer.org', 508, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (504, 1, 'Nathaniel', 'Adam', 'nathaniel.adam@sakilacustomer.org', 509, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (505, 1, 'Rafael', 'Abney', 'rafael.abney@sakilacustomer.org', 510, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (506, 2, 'Leslie', 'Seward', 'leslie.seward@sakilacustomer.org', 511, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (507, 2, 'Edgar', 'Rhoads', 'edgar.rhoads@sakilacustomer.org', 512, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (508, 2, 'Milton', 'Howland', 'milton.howland@sakilacustomer.org', 513, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (509, 1, 'Raul', 'Fortier', 'raul.fortier@sakilacustomer.org', 514, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (510, 2, 'Ben', 'Easter', 'ben.easter@sakilacustomer.org', 515, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (511, 1, 'Chester', 'Benner', 'chester.benner@sakilacustomer.org', 516, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (512, 1, 'Cecil', 'Vines', 'cecil.vines@sakilacustomer.org', 517, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (513, 2, 'Duane', 'Tubbs', 'duane.tubbs@sakilacustomer.org', 519, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (514, 2, 'Franklin', 'Troutman', 'franklin.troutman@sakilacustomer.org', 520, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (515, 1, 'Andre', 'Rapp', 'andre.rapp@sakilacustomer.org', 521, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (516, 2, 'Elmer', 'Noe', 'elmer.noe@sakilacustomer.org', 522, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (517, 2, 'Brad', 'Mccurdy', 'brad.mccurdy@sakilacustomer.org', 523, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (518, 1, 'Gabriel', 'Harder', 'gabriel.harder@sakilacustomer.org', 524, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (519, 2, 'Ron', 'Deluca', 'ron.deluca@sakilacustomer.org', 525, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (520, 2, 'Mitchell', 'Westmoreland', 'mitchell.westmoreland@sakilacustomer.org', 526, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (521, 2, 'Roland', 'South', 'roland.south@sakilacustomer.org', 527, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (522, 2, 'Arnold', 'Havens', 'arnold.havens@sakilacustomer.org', 528, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (523, 1, 'Harvey', 'Guajardo', 'harvey.guajardo@sakilacustomer.org', 529, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (525, 2, 'Adrian', 'Clary', 'adrian.clary@sakilacustomer.org', 531, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (526, 2, 'Karl', 'Seal', 'karl.seal@sakilacustomer.org', 532, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (527, 1, 'Cory', 'Meehan', 'cory.meehan@sakilacustomer.org', 533, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (528, 1, 'Claude', 'Herzog', 'claude.herzog@sakilacustomer.org', 534, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (529, 2, 'Erik', 'Guillen', 'erik.guillen@sakilacustomer.org', 535, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (530, 2, 'Darryl', 'Ashcraft', 'darryl.ashcraft@sakilacustomer.org', 536, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (531, 2, 'Jamie', 'Waugh', 'jamie.waugh@sakilacustomer.org', 537, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (532, 2, 'Neil', 'Renner', 'neil.renner@sakilacustomer.org', 538, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (533, 1, 'Jessie', 'Milam', 'jessie.milam@sakilacustomer.org', 539, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (534, 1, 'Christian', 'Jung', 'christian.jung@sakilacustomer.org', 540, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (535, 1, 'Javier', 'Elrod', 'javier.elrod@sakilacustomer.org', 541, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (536, 2, 'Fernando', 'Churchill', 'fernando.churchill@sakilacustomer.org', 542, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (537, 2, 'Clinton', 'Buford', 'clinton.buford@sakilacustomer.org', 543, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (538, 2, 'Ted', 'Breaux', 'ted.breaux@sakilacustomer.org', 544, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (539, 1, 'Mathew', 'Bolin', 'mathew.bolin@sakilacustomer.org', 545, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (540, 1, 'Tyrone', 'Asher', 'tyrone.asher@sakilacustomer.org', 546, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (541, 2, 'Darren', 'Windham', 'darren.windham@sakilacustomer.org', 547, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (542, 2, 'Lonnie', 'Tirado', 'lonnie.tirado@sakilacustomer.org', 548, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (543, 1, 'Lance', 'Pemberton', 'lance.pemberton@sakilacustomer.org', 549, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (544, 2, 'Cody', 'Nolen', 'cody.nolen@sakilacustomer.org', 550, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (545, 2, 'Julio', 'Noland', 'julio.noland@sakilacustomer.org', 551, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (546, 1, 'Kelly', 'Knott', 'kelly.knott@sakilacustomer.org', 552, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (547, 1, 'Kurt', 'Emmons', 'kurt.emmons@sakilacustomer.org', 553, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (548, 1, 'Allan', 'Cornish', 'allan.cornish@sakilacustomer.org', 554, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (549, 1, 'Nelson', 'Christenson', 'nelson.christenson@sakilacustomer.org', 555, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (550, 2, 'Guy', 'Brownlee', 'guy.brownlee@sakilacustomer.org', 556, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (551, 2, 'Clayton', 'Barbee', 'clayton.barbee@sakilacustomer.org', 557, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (552, 2, 'Hugh', 'Waldrop', 'hugh.waldrop@sakilacustomer.org', 558, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (553, 1, 'Max', 'Pitt', 'max.pitt@sakilacustomer.org', 559, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (554, 1, 'Dwayne', 'Olvera', 'dwayne.olvera@sakilacustomer.org', 560, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (555, 1, 'Dwight', 'Lombardi', 'dwight.lombardi@sakilacustomer.org', 561, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (556, 2, 'Armando', 'Gruber', 'armando.gruber@sakilacustomer.org', 562, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (557, 1, 'Felix', 'Gaffney', 'felix.gaffney@sakilacustomer.org', 563, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (558, 1, 'Jimmie', 'Eggleston', 'jimmie.eggleston@sakilacustomer.org', 564, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (559, 2, 'Everett', 'Banda', 'everett.banda@sakilacustomer.org', 565, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (560, 1, 'Jordan', 'Archuleta', 'jordan.archuleta@sakilacustomer.org', 566, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (561, 2, 'Ian', 'Still', 'ian.still@sakilacustomer.org', 567, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (562, 1, 'Wallace', 'Slone', 'wallace.slone@sakilacustomer.org', 568, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (563, 2, 'Ken', 'Prewitt', 'ken.prewitt@sakilacustomer.org', 569, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (564, 2, 'Bob', 'Pfeiffer', 'bob.pfeiffer@sakilacustomer.org', 570, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (565, 2, 'Jaime', 'Nettles', 'jaime.nettles@sakilacustomer.org', 571, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (566, 1, 'Casey', 'Mena', 'casey.mena@sakilacustomer.org', 572, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (567, 2, 'Alfredo', 'Mcadams', 'alfredo.mcadams@sakilacustomer.org', 573, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (568, 2, 'Alberto', 'Henning', 'alberto.henning@sakilacustomer.org', 574, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (569, 2, 'Dave', 'Gardiner', 'dave.gardiner@sakilacustomer.org', 575, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (570, 2, 'Ivan', 'Cromwell', 'ivan.cromwell@sakilacustomer.org', 576, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (571, 2, 'Johnnie', 'Chisholm', 'johnnie.chisholm@sakilacustomer.org', 577, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (572, 1, 'Sidney', 'Burleson', 'sidney.burleson@sakilacustomer.org', 578, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (573, 1, 'Byron', 'Box', 'byron.box@sakilacustomer.org', 579, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (574, 2, 'Julian', 'Vest', 'julian.vest@sakilacustomer.org', 580, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (575, 2, 'Isaac', 'Oglesby', 'isaac.oglesby@sakilacustomer.org', 581, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (576, 2, 'Morris', 'Mccarter', 'morris.mccarter@sakilacustomer.org', 582, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (577, 2, 'Clifton', 'Malcolm', 'clifton.malcolm@sakilacustomer.org', 583, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (578, 2, 'Willard', 'Lumpkin', 'willard.lumpkin@sakilacustomer.org', 584, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (579, 2, 'Daryl', 'Larue', 'daryl.larue@sakilacustomer.org', 585, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (580, 1, 'Ross', 'Grey', 'ross.grey@sakilacustomer.org', 586, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (581, 1, 'Virgil', 'Wofford', 'virgil.wofford@sakilacustomer.org', 587, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (582, 2, 'Andy', 'Vanhorn', 'andy.vanhorn@sakilacustomer.org', 588, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (583, 1, 'Marshall', 'Thorn', 'marshall.thorn@sakilacustomer.org', 589, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (584, 2, 'Salvador', 'Teel', 'salvador.teel@sakilacustomer.org', 590, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (585, 1, 'Perry', 'Swafford', 'perry.swafford@sakilacustomer.org', 591, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (586, 1, 'Kirk', 'Stclair', 'kirk.stclair@sakilacustomer.org', 592, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (587, 1, 'Sergio', 'Stanfield', 'sergio.stanfield@sakilacustomer.org', 593, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (588, 1, 'Marion', 'Ocampo', 'marion.ocampo@sakilacustomer.org', 594, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (589, 1, 'Tracy', 'Herrmann', 'tracy.herrmann@sakilacustomer.org', 595, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (590, 2, 'Seth', 'Hannon', 'seth.hannon@sakilacustomer.org', 596, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (591, 1, 'Kent', 'Arsenault', 'kent.arsenault@sakilacustomer.org', 597, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (592, 1, 'Terrance', 'Roush', 'terrance.roush@sakilacustomer.org', 598, 't', '2006-02-14', '2013-05-26 14:49:45.738', 0); +INSERT INTO customer VALUES (593, 2, 'Rene', 'Mcalister', 'rene.mcalister@sakilacustomer.org', 599, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (594, 1, 'Eduardo', 'Hiatt', 'eduardo.hiatt@sakilacustomer.org', 600, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (595, 1, 'Terrence', 'Gunderson', 'terrence.gunderson@sakilacustomer.org', 601, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (596, 1, 'Enrique', 'Forsythe', 'enrique.forsythe@sakilacustomer.org', 602, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (597, 1, 'Freddie', 'Duggan', 'freddie.duggan@sakilacustomer.org', 603, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (598, 1, 'Wade', 'Delvalle', 'wade.delvalle@sakilacustomer.org', 604, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); +INSERT INTO customer VALUES (599, 2, 'Austin', 'Cintron', 'austin.cintron@sakilacustomer.org', 605, 't', '2006-02-14', '2013-05-26 14:49:45.738', 1); INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (5,'1913 Hanoi Way', 'Nagasaki', 463, '35200', '28303384290', '2006-02-15 09:45:30'); INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (6,'1121 Loja Avenue', 'California', 449, '17886', '838635286649', '2006-02-15 09:45:30'); INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (7,'692 Joliet Street', 'Attika', 38, '83579', '448477190408', '2006-02-15 09:45:30'); @@ -75,9 +653,347 @@ INSERT INTO address (address_id, address, district, city_id, postal_code, phone, INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (17,'270 Amroha Parkway', 'Osmaniye', 384, '29610', '695479687538', '2006-02-15 09:45:30'); INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (18,'770 Bydgoszcz Avenue', 'California', 120, '16266', '517338314235', '2006-02-15 09:45:30'); INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (20,'360 Toulouse Parkway', 'England', 495, '54308', '949312333307', '2006-02-15 09:45:30'); -INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (21,'1001 Miyakonojo Lane', 'Taizz', 518, '67924', '584316724815', '2006-02-15 09:45:30'); -INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (22,'1153 Allende Way', 'Qubec', 179, '20336', '856872225376', '2006-02-15 09:45:30'); - +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (21,'270 Toulon Boulevard', 'Kalmykia', 156, '81766', '407752414682', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (22,'320 Brest Avenue', 'Kaduna', 252, '43331', '747791594069', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (27,'1780 Hino Boulevard', 'Liepaja', 303, '7716', '902731229323', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (28,'96 Tafuna Way', 'Crdoba', 128, '99865', '934730187245', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (32,'1425 Shikarpur Manor', 'Bihar', 346, '65599', '678220867005', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (33,'786 Aurora Avenue', 'Yamaguchi', 474, '65750', '18461860151', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (34,'1668 Anpolis Street', 'Taipei', 316, '50199', '525255540978', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (38,'61 Tama Street', 'Okayama', 284, '94065', '708403338270', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (39,'391 Callao Drive', 'Midi-Pyrnes', 544, '34021', '440512153169', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (41,'1440 Fukuyama Loop', 'Henan', 362, '47929', '912257250465', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (44,'671 Graz Street', 'Oriental', 353, '94399', '680768868518', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (45,'42 Brindisi Place', 'Yerevan', 586, '16744', '42384721397', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (46,'1632 Bislig Avenue', 'Nonthaburi', 394, '61117', '471675840679', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (47,'1447 Imus Way', 'Tahiti', 167, '48942', '539758313890', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (48,'1998 Halifax Drive', 'Lipetsk', 308, '76022', '177727722820', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (49,'1718 Valencia Street', 'Antofagasta', 27, '37359', '675292816413', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (51,'686 Garland Manor', 'Cear', 247, '52535', '69493378813', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (52,'909 Garland Manor', 'Tatarstan', 367, '69367', '705800322606', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (53,'725 Isesaki Place', 'Mekka', 237, '74428', '876295323994', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (54,'115 Hidalgo Parkway', 'Khartum', 379, '80168', '307703950263', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (55,'1135 Izumisano Parkway', 'California', 171, '48150', '171822533480', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (56,'939 Probolinggo Loop', 'Galicia', 1, '4166', '680428310138', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (57,'17 Kabul Boulevard', 'Chiba', 355, '38594', '697760867968', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (61,'943 Tokat Street', 'Vaduz', 560, '45428', '889318963672', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (62,'1114 Liepaja Street', 'Sarawak', 282, '69226', '212869228936', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (63,'1213 Ranchi Parkway', 'Karnataka', 350, '94352', '800024380485', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (64,'81 Hodeida Way', 'Rajasthan', 231, '55561', '250767749542', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (65,'915 Ponce Place', 'Basel-Stadt', 56, '83980', '1395251317', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (66,'1717 Guadalajara Lane', 'Missouri', 441, '85505', '914090181665', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (67,'1214 Hanoi Way', 'Nebraska', 306, '67055', '491001136577', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (68,'1966 Amroha Avenue', 'Sichuan', 139, '70385', '333489324603', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (69,'698 Otsu Street', 'Cayenne', 105, '71110', '409983924481', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (71,'1586 Guaruj Place', 'Hunan', 579, '5135', '947233365992', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (73,'1031 Daugavpils Parkway', 'Bchar', 63, '59025', '107137400143', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (74,'1124 Buenaventura Drive', 'Mekka', 13, '6856', '407733804223', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (79,'1551 Rampur Lane', 'Changhwa', 108, '72394', '251164340471', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (80,'602 Paarl Street', 'Pavlodar', 402, '98889', '896314772871', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (83,'586 Tete Way', 'Kanagawa', 256, '1079', '18581624103', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (85,'320 Baiyin Parkway', 'Mahajanga', 319, '37307', '223664661973', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (87,'929 Tallahassee Loop', 'Gauteng', 497, '74671', '800716535041', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (89,'1557 Ktahya Boulevard', 'England', 88, '88002', '720998247660', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (90,'870 Ashqelon Loop', 'Songkhla', 489, '84931', '135117278909', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (91,'1740 Portoviejo Avenue', 'Sucre', 480, '29932', '198123170793', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (92,'1942 Ciparay Parkway', 'Cheju', 113, '82624', '978987363654', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (94,'1952 Chatsworth Drive', 'Guangdong', 332, '25958', '991562402283', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (96,'984 Effon-Alaiye Avenue', 'Gois', 183, '17119', '132986892228', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (98,'152 Kitwe Parkway', 'Caraga', 82, '53182', '835433605312', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (99,'1697 Tanauan Lane', 'Punjab', 399, '22870', '4764773857', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (100,'1308 Arecibo Way', 'Georgia', 41, '30695', '6171054059', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (101,'1599 Plock Drive', 'Tete', 534, '71986', '817248913162', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (104,'1913 Kamakura Place', 'Lipetsk', 238, '97287', '942570536750', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (105,'733 Mandaluyong Place', 'Asir', 2, '77459', '196568435814', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (108,'1386 Yangor Avenue', 'Provence-Alpes-Cte', 543, '80720', '449216226468', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (112,'1002 Ahmadnagar Manor', 'Mxico', 213, '93026', '371490777743', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (114,'804 Elista Drive', 'Hubei', 159, '61069', '379804592943', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (121,'1967 Sincelejo Place', 'Gujarat', 176, '73644', '577812616052', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (122,'333 Goinia Way', 'Texas', 185, '78625', '909029256431', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (124,'241 Mosul Lane', 'Risaralda', 147, '76157', '765345144779', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (126,'1175 Tanauan Way', 'Lima', 305, '64615', '937222955822', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (128,'848 Tafuna Manor', 'Ktahya', 281, '45142', '614935229095', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (129,'569 Baicheng Lane', 'Gauteng', 85, '60304', '490211944645', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (131,'801 Hagonoy Drive', 'Smolensk', 484, '8439', '237426099212', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (133,'1854 Tieli Street', 'Shandong', 302, '15819', '509492324775', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (134,'758 Junan Lane', 'Gois', 190, '82639', '935448624185', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (138,'765 Southampton Drive', 'al-Qalyubiya', 421, '4285', '23712411567', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (139,'943 Johannesburg Avenue', 'Maharashtra', 417, '5892', '90921003005', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (140,'788 Atinsk Street', 'Karnataka', 211, '81691', '146497509724', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (141,'1749 Daxian Place', 'Gelderland', 29, '11044', '963369996279', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (143,'1029 Dzerzinsk Manor', 'Ynlin', 542, '57519', '33173584456', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (144,'1666 Beni-Mellal Place', 'Tennessee', 123, '13377', '9099941466', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (145,'928 Jaffna Loop', 'Hiroshima', 172, '93762', '581852137991', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (146,'483 Ljubertsy Parkway', 'Scotland', 149, '60562', '581174211853', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (148,'1027 Songkhla Manor', 'Minsk', 340, '30861', '563660187896', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (149,'999 Sanaa Loop', 'Gauteng', 491, '3439', '918032330119', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (150,'879 Newcastle Way', 'Michigan', 499, '90732', '206841104594', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (151,'1337 Lincoln Parkway', 'Saitama', 555, '99457', '597815221267', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (152,'1952 Pune Lane', 'Saint-Denis', 442, '92150', '354615066969', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (153,'782 Mosul Street', 'Massachusetts', 94, '25545', '885899703621', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (155,'1560 Jelets Boulevard', 'Shandong', 291, '77777', '189446090264', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (156,'1963 Moscow Place', 'Assam', 354, '64863', '761379480249', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (158,'798 Cianjur Avenue', 'Shanxi', 590, '76990', '499408708580', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (167,'1287 Xiangfan Boulevard', 'Gifu', 253, '57844', '819416131190', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (168,'842 Salzburg Lane', 'Adana', 529, '3313', '697151428760', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (169,'154 Tallahassee Loop', 'Xinxiang', 199, '62250', '935508855935', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (171,'1540 Wroclaw Drive', 'Maharashtra', 107, '62686', '182363341674', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (172,'475 Atinsk Way', 'Gansu', 240, '59571', '201705577290', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (173,'1294 Firozabad Drive', 'Jiangxi', 407, '70618', '161801569569', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (174,'1877 Ezhou Lane', 'Rajasthan', 550, '63337', '264541743403', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (175,'316 Uruapan Street', 'Perak', 223, '58194', '275788967899', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (176,'29 Pyongyang Loop', 'Batman', 58, '47753', '734780743462', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (177,'1010 Klerksdorp Way', 'Steiermark', 186, '6802', '493008546874', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (178,'1848 Salala Boulevard', 'Miranda', 373, '25220', '48265851133', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (179,'431 Xiangtan Avenue', 'Kerala', 18, '4854', '230250973122', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (180,'757 Rustenburg Avenue', 'Skikda', 483, '89668', '506134035434', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (181,'146 Johannesburg Way', 'Tamaulipas', 330, '54132', '953689007081', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (183,'1089 Iwatsuki Avenue', 'Kirov', 270, '35109', '866092335135', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (186,'533 al-Ayn Boulevard', 'California', 126, '8862', '662227486184', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (187,'1839 Szkesfehrvr Parkway', 'Gois', 317, '55709', '947468818183', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (188,'741 Ambattur Manor', 'Noord-Brabant', 438, '43310', '302590383819', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (191,'140 Chiayi Parkway', 'Sumy', 506, '38982', '855863906434', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (192,'1166 Changhwa Street', 'Caraga', 62, '58852', '650752094490', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (197,'447 Surakarta Loop', 'Nyanza', 271, '10428', '940830176580', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (198,'345 Oshawa Boulevard', 'Tokyo-to', 204, '32114', '104491201771', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (200,'1074 Binzhou Manor', 'Baden-Wrttemberg', 325, '36490', '331132568928', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (201,'817 Bradford Loop', 'Jiangsu', 109, '89459', '264286442804', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (204,'387 Mwene-Ditu Drive', 'Ahal', 35, '8073', '764477681869', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (205,'68 Molodetno Manor', 'Nordrhein-Westfalen', 575, '4662', '146640639760', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (206,'642 Nador Drive', 'Maharashtra', 77, '3924', '369050085652', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (208,'1215 Pyongyang Parkway', 'Usak', 557, '25238', '646237101779', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (211,'850 Salala Loop', 'Kitaa', 371, '10800', '403404780639', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (213,'43 Dadu Avenue', 'Rajasthan', 74, '4855', '95666951770', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (214,'751 Lima Loop', 'Aden', 7, '99405', '756460337785', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (215,'1333 Haldia Street', 'Jilin', 174, '82161', '408304391718', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (216,'660 Jedda Boulevard', 'Washington', 65, '25053', '168758068397', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (217,'1001 Miyakonojo Lane', 'Taizz', 518, '67924', '584316724815', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (218,'226 Brest Manor', 'California', 508, '2299', '785881412500', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (219,'1229 Valencia Parkway', 'Haskovo', 498, '99124', '352679173732', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (220,'1201 Qomsheh Manor', 'Gois', 28, '21464', '873492228462', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (222,'1168 Najafabad Parkway', 'Kabol', 251, '40301', '886649065861', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (225,'1926 Gingoog Street', 'Sisilia', 511, '22824', '469738825391', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (227,'1820 Maring Parkway', 'Punjab', 324, '88307', '99760893676', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (230,'201 Effon-Alaiye Way', 'Asuncin', 37, '64344', '684192903087', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (231,'430 Alessandria Loop', 'Saarland', 439, '47446', '669828224459', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (233,'356 Olomouc Manor', 'Gois', 26, '93323', '22326410776', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (234,'1256 Bislig Boulevard', 'Botosani', 86, '50598', '479007229460', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (236,'885 Yingkou Manor', 'Kaduna', 596, '31390', '588964509072', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (237,'1736 Cavite Place', 'Qina', 216, '98775', '431770603551', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (238,'346 Skikda Parkway', 'Hawalli', 233, '90628', '630424482919', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (242,'1964 Gijn Manor', 'Karnataka', 473, '14408', '918119601885', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (244,'1148 Saarbrcken Parkway', 'Fukushima', 226, '1921', '137773001988', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (245,'1103 Bilbays Parkway', 'Hubei', 578, '87660', '279979529227', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (246,'1246 Boksburg Parkway', 'Hebei', 422, '28349', '890283544295', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (247,'1483 Pathankot Street', 'Tucumn', 454, '37288', '686015532180', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (249,'300 Junan Street', 'Kyonggi', 553, '81314', '890289150158', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (251,'1473 Changhwa Parkway', 'Mxico', 124, '75933', '266798132374', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (252,'1309 Weifang Street', 'Florida', 520, '57338', '435785045362', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (253,'1760 Oshawa Manor', 'Tianjin', 535, '38140', '56257502250', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (256,'1497 Yuzhou Drive', 'England', 312, '3433', '246810237916', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (258,'752 Ondo Loop', 'Miyazaki', 338, '32474', '134673576619', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (261,'51 Laredo Avenue', 'Sagaing', 342, '68146', '884536620568', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (262,'771 Yaound Manor', 'Sofala', 64, '86768', '245477603573', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (263,'532 Toulon Street', 'Santiago', 460, '69517', '46871694740', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (267,'816 Cayenne Parkway', 'Manab', 414, '93629', '282874611748', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (269,'446 Kirovo-Tepetsk Lane', 'Osaka', 203, '19428', '303967439816', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (271,'1587 Loja Manor', 'Salzburg', 447, '5410', '621625204422', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (272,'1762 Paarl Parkway', 'Hunan', 298, '53928', '192459639410', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (273,'1519 Ilorin Place', 'Kerala', 395, '49298', '357445645426', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (274,'920 Kumbakonam Loop', 'California', 446, '75090', '685010736240', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (275,'906 Goinia Way', 'Wielkopolskie', 255, '83565', '701767622697', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (279,'1884 Shikarpur Avenue', 'Haryana', 263, '85548', '959949395183', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (280,'1980 Kamjanets-Podilskyi Street', 'Illinois', 404, '89502', '874337098891', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (281,'1944 Bamenda Way', 'Michigan', 573, '24645', '75975221996', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (283,'457 Tongliao Loop', 'Bursa', 222, '56254', '880756161823', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (286,'1308 Sumy Loop', 'Fujian', 175, '30657', '583021225407', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (287,'1405 Chisinau Place', 'Ponce', 411, '8160', '62781725285', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (288,'226 Halifax Street', 'Xinxiang', 277, '58492', '790651020929', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (293,'86 Higashiosaka Lane', 'Guanajuato', 563, '33768', '957128697225', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (295,'544 Tarsus Boulevard', 'Gurico', 562, '53145', '892523334', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (296,'1936 Cuman Avenue', 'Virginia', 433, '61195', '976798660411', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (297,'1192 Tongliao Street', 'Sharja', 470, '19065', '350970907017', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (298,'44 Najafabad Way', 'Baskimaa', 146, '61391', '96604821070', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (299,'32 Pudukkottai Lane', 'Ohio', 140, '38834', '967274728547', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (300,'661 Chisinau Lane', 'Pietari', 274, '8856', '816436065431', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (303,'898 Jining Lane', 'Pohjois-Pohjanmaa', 387, '40070', '161643343536', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (304,'1635 Kuwana Boulevard', 'Hiroshima', 205, '52137', '710603868323', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (306,'1883 Maikop Lane', 'Kaliningrad', 254, '68469', '96110042435', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (307,'1908 Gaziantep Place', 'Liaoning', 536, '58979', '108053751300', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (308,'687 Alessandria Parkway', 'Sanaa', 455, '57587', '407218522294', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (309,'827 Yuncheng Drive', 'Callao', 99, '79047', '504434452842', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (316,'746 Joliet Lane', 'Kursk', 286, '94878', '688485191923', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (317,'780 Kimberley Way', 'Tabuk', 515, '17032', '824396883951', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (318,'1774 Yaound Place', 'Hubei', 166, '91400', '613124286867', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (321,'651 Pathankot Loop', 'Maharashtra', 336, '59811', '139378397418', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (322,'1359 Zhoushan Parkway', 'Streymoyar', 545, '29763', '46568045367', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (323,'1769 Iwaki Lane', 'Kujawsko-Pomorskie', 97, '25787', '556100547674', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (324,'1145 Vilnius Manor', 'Mxico', 451, '73170', '674805712553', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (326,'470 Boksburg Street', 'Central', 81, '97960', '908029859266', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (334,'1816 Bydgoszcz Loop', 'Dhaka', 234, '64308', '965273813662', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (335,'587 Benguela Manor', 'Illinois', 42, '91590', '165450987037', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (337,'1838 Tabriz Lane', 'Dhaka', 143, '1195', '38988715447', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (338,'431 Szkesfehrvr Avenue', 'Baki', 48, '57828', '119501405123', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (339,'503 Sogamoso Loop', 'Sumqayit', 505, '49812', '834626715837', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (340,'507 Smolensk Loop', 'Sousse', 492, '22971', '80303246192', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (342,'124 al-Manama Way', 'Hiroshima', 382, '52368', '647899404952', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (344,'1909 Benguela Lane', 'Henan', 581, '19913', '624138001031', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (345,'68 Ponce Parkway', 'Hanoi', 201, '85926', '870635127812', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (346,'1217 Konotop Avenue', 'Gelderland', 151, '504', '718917251754', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (349,'1516 Escobar Drive', 'Tongatapu', 370, '46069', '64536069371', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (350,'1628 Nagareyama Lane', 'Central', 453, '60079', '20064292617', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (351,'1157 Nyeri Loop', 'Adygea', 320, '56380', '262744791493', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (352,'1673 Tangail Drive', 'Daugavpils', 137, '26857', '627924259271', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (353,'381 Kabul Way', 'Taipei', 209, '87272', '55477302294', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (356,'1378 Beira Loop', 'Krasnojarsk', 597, '40792', '840957664136', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (357,'1641 Changhwa Place', 'Nord-Ouest', 52, '37636', '256546485220', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (358,'1698 Southport Loop', 'Hidalgo', 393, '49009', '754358349853', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (360,'619 Hunuco Avenue', 'Shimane', 331, '81508', '142596392389', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (363,'604 Bern Place', 'Jharkhand', 429, '5373', '620719383725', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (365,'1009 Zanzibar Lane', 'Arecibo', 32, '64875', '102396298916', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (367,'1163 London Parkway', 'Par', 66, '6066', '675120358494', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (368,'1658 Jastrzebie-Zdrj Loop', 'Central', 372, '96584', '568367775448', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (369,'817 Laredo Avenue', 'Jalisco', 188, '77449', '151249681135', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (370,'1565 Tangail Manor', 'Okinawa', 377, '45750', '634445428822', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (371,'1912 Emeishan Drive', 'Balikesir', 50, '33050', '99883471275', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (373,'1922 Miraj Way', 'Esfahan', 356, '13203', '320471479776', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (374,'433 Florencia Street', 'Chihuahua', 250, '91330', '561729882725', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (375,'1049 Matamoros Parkway', 'Karnataka', 191, '69640', '960505250340', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (379,'1133 Rizhao Avenue', 'Pernambuco', 572, '2800', '600264533987', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (381,'1618 Olomouc Manor', 'Kurgan', 285, '26385', '96846695220', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (382,'220 Hidalgo Drive', 'Kermanshah', 265, '45298', '342720754566', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (384,'97 Mogiljov Lane', 'Gujarat', 73, '89294', '924815207181', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (388,'368 Hunuco Boulevard', 'Namibe', 360, '17165', '106439158941', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (389,'500 Lincoln Parkway', 'Jiangsu', 210, '95509', '550306965159', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (390,'102 Chapra Drive', 'Ibaragi', 521, '14073', '776031833752', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (391,'1793 Meixian Place', 'Hmelnytskyi', 258, '33535', '619966287415', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (392,'514 Ife Way', 'Shaba', 315, '69973', '900235712074', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (394,'753 Ilorin Avenue', 'Sichuan', 157, '3656', '464511145118', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (396,'767 Pyongyang Drive', 'Osaka', 229, '83536', '667736124769', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (398,'954 Lapu-Lapu Way', 'Moskova', 278, '8816', '737229003916', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (399,'331 Bydgoszcz Parkway', 'Asturia', 181, '966', '537374465982', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (401,'168 Cianjur Manor', 'Saitama', 228, '73824', '679095087143', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (402,'616 Hagonoy Avenue', 'Krasnojarsk', 39, '46043', '604177838256', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (404,'734 Bchar Place', 'Punjab', 375, '30586', '280578750435', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (405,'530 Lausanne Lane', 'Texas', 135, '11067', '775235029633', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (406,'454 Patiala Lane', 'Fukushima', 276, '13496', '794553031307', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (407,'1346 Mysore Drive', 'Bretagne', 92, '61507', '516647474029', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (409,'1266 Laredo Parkway', 'Saitama', 380, '7664', '1483365694', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (413,'692 Amroha Drive', 'Northern', 230, '35575', '359478883004', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (417,'791 Salinas Street', 'Punjab', 208, '40509', '129953030512', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (419,'397 Sunnyvale Avenue', 'Guanajuato', 19, '55566', '680851640676', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (420,'992 Klerksdorp Loop', 'Utrecht', 23, '33711', '855290087237', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (421,'966 Arecibo Loop', 'Sind', 134, '94018', '15273765306', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (424,'1948 Bayugan Parkway', 'Bihar', 264, '60622', '987306329957', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (425,'1866 al-Qatif Avenue', 'California', 155, '89420', '546793516940', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (428,'1727 Matamoros Place', 'Sawhaj', 465, '78813', '129673677866', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (429,'1269 Botosani Manor', 'Guangdong', 468, '47394', '736517327853', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (431,'1596 Acua Parkway', 'Jharkhand', 418, '70425', '157133457169', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (433,'1823 Hoshiarpur Lane', 'Komi', 510, '33191', '307133768620', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (434,'1404 Taguig Drive', 'Okayama', 547, '87212', '572068624538', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (436,'287 Cuautla Boulevard', 'Chuquisaca', 501, '72736', '82619513349', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (438,'596 Huixquilucan Place', 'Nampula', 351, '65892', '342709348083', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (440,'722 Bradford Lane', 'Shandong', 249, '90920', '746251338300', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (443,'1836 Korla Parkway', 'Copperbelt', 272, '55405', '689681677428', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (444,'231 Kaliningrad Place', 'Lombardia', 70, '57833', '575081026569', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (445,'495 Bhimavaram Lane', 'Maharashtra', 144, '3', '82088937724', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (446,'1924 Shimonoseki Drive', 'Batna', 59, '52625', '406784385440', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (449,'1289 Belm Boulevard', 'Tartumaa', 530, '88306', '237368926031', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (452,'207 Cuernavaca Loop', 'Tatarstan', 352, '52671', '782900030287', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (453,'319 Springs Loop', 'Baijeri', 160, '99552', '72524459905', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (456,'814 Simferopol Loop', 'Sinaloa', 154, '48745', '524567129902', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (458,'138 Caracas Boulevard', 'Zulia', 326, '16790', '974433019532', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (461,'1889 Valparai Way', 'Ziguinchor', 600, '75559', '670370974122', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (462,'1485 Bratislava Place', 'Illinois', 435, '83183', '924663855568', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (464,'76 Kermanshah Manor', 'Esfahan', 423, '23343', '762361821578', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (465,'734 Tanshui Avenue', 'Caquet', 170, '70664', '366776723320', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (467,'1621 Tongliao Avenue', 'Irkutsk', 558, '22173', '209342540247', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (469,'1872 Toulon Loop', 'OHiggins', 428, '7939', '928809465153', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (470,'1088 Ibirit Place', 'Jalisco', 595, '88502', '49084281333', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (471,'1322 Mosul Parkway', 'Shandong', 145, '95400', '268053970382', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (472,'1447 Chatsworth Place', 'Chihuahua', 129, '41545', '769370126331', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (473,'1257 Guadalajara Street', 'Karnataka', 78, '33599', '195337700615', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (474,'1469 Plock Lane', 'Galicia', 388, '95835', '622884741180', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (476,'270 Tambaram Parkway', 'Gauteng', 244, '9668', '248446668735', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (479,'1854 Okara Boulevard', 'Drenthe', 158, '42123', '131912793873', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (480,'421 Yaound Street', 'Sumy', 385, '11363', '726875628268', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (481,'1153 Allende Way', 'Qubec', 179, '20336', '856872225376', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (482,'808 Naala-Porto Parkway', 'England', 500, '41060', '553452430707', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (484,'98 Pyongyang Boulevard', 'Ohio', 11, '88749', '191958435142', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (485,'984 Novoterkassk Loop', 'Gaziantep', 180, '28165', '435118527255', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (486,'64 Korla Street', 'Mwanza', 347, '25145', '510383179153', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (488,'698 Jelets Boulevard', 'Denizli', 142, '2596', '975185523021', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (489,'1297 Alvorada Parkway', 'Ningxia', 587, '11839', '508348602835', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (490,'1909 Dayton Avenue', 'Guangdong', 469, '88513', '702955450528', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (492,'185 Mannheim Lane', 'Stavropol', 408, '23661', '589377568313', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (495,'656 Matamoros Drive', 'Boyac', 487, '19489', '17305839123', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (496,'775 ostka Drive', 'al-Daqahliya', 337, '22358', '171973024401', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (498,'319 Plock Parkway', 'Istanbul', 504, '26101', '854259976812', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (500,'362 Rajkot Lane', 'Gansu', 47, '98030', '962020153680', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (501,'1060 Tandil Lane', 'Shandong', 432, '72349', '211256301880', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (502,'1515 Korla Way', 'England', 589, '57197', '959467760895', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (506,'414 Mandaluyong Street', 'Lubelskie', 314, '16370', '52709222667', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (508,'496 Celaya Drive', 'Nagano', 552, '90797', '759586584889', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (509,'786 Matsue Way', 'Illinois', 245, '37469', '111177206479', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (512,'1269 Ipoh Avenue', 'Eskisehir', 163, '54674', '402630109080', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (514,'1747 Rustenburg Place', 'Bihar', 110, '51369', '442673923363', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (515,'886 Tonghae Place', 'Volgograd', 259, '19450', '711928348157', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (516,'1574 Goinia Boulevard', 'Heilongjiang', 502, '39529', '59634255214', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (517,'548 Uruapan Street', 'Ontario', 312, '35653', '879347453467', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (520,'1778 Gijn Manor', 'Hubei', 594, '35156', '288910576761', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (522,'1768 Udine Loop', 'Battambang', 60, '32347', '448876499197', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (523,'608 Birgunj Parkway', 'Taipei', 116, '400', '627425618482', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (525,'1949 Sanya Street', 'Gumma', 224, '61244', '132100972047', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (527,'1993 0 Loop', 'Liaoning', 588, '41214', '25865528181', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (532,'1427 Tabuk Place', 'Florida', 101, '31342', '214756839122', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (533,'556 Asuncin Way', 'Mogiljov', 339, '35364', '338244023543', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (534,'486 Ondo Parkway', 'Benguela', 67, '35202', '105882218332', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (537,'958 Sagamihara Lane', 'Mie', 287, '88408', '427274926505', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (539,'1332 Gaziantep Lane', 'Shandong', 80, '22813', '383353187467', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (541,'195 Ilorin Street', 'Chari-Baguirmi', 363, '49250', '8912935608', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (542,'193 Bhusawal Place', 'Kang-won', 539, '9750', '745267607502', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (543,'43 Vilnius Manor', 'Colorado', 42, '79814', '484500282381', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (544,'183 Haiphong Street', 'Jilin', 46, '69953', '488600270038', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (547,'379 Lublin Parkway', 'Toscana', 309, '74568', '921960450089', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (549,'454 Qinhuangdao Drive', 'Tadla-Azilal', 68, '25866', '786270036240', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (551,'182 Nukualofa Drive', 'Sumy', 275, '15414', '426346224043', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (552,'390 Wroclaw Way', 'Hainan', 462, '5753', '357593328658', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (553,'1421 Quilmes Lane', 'Ishikawa', 260, '19151', '135407755975', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (558,'904 Clarksville Drive', 'Zhejiang', 193, '52234', '955349440539', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (559,'1917 Kumbakonam Parkway', 'Vojvodina', 368, '11892', '698182547686', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (560,'1447 Imus Place', 'Gujarat', 426, '12905', '62127829280', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (561,'1497 Fengshan Drive', 'KwaZulu-Natal', 112, '63022', '368738360376', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (562,'869 Shikarpur Way', 'England', 496, '57380', '590764256785', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (563,'1059 Yuncheng Avenue', 'Vilna', 570, '47498', '107092893983', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (564,'505 Madiun Boulevard', 'Dolnoslaskie', 577, '97271', '970638808606', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (565,'1741 Hoshiarpur Boulevard', 'al-Sharqiya', 79, '22372', '855066328617', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (569,'1342 Abha Boulevard', 'Bukarest', 95, '10714', '997453607116', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (570,'415 Pune Avenue', 'Shandong', 580, '44274', '203202500108', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (571,'1746 Faaa Way', 'Huanuco', 214, '32515', '863080561151', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (572,'539 Hami Way', 'Tokat', 538, '52196', '525518075499', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (573,'1407 Surakarta Manor', 'Moskova', 466, '33224', '324346485054', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (575,'1052 Pathankot Avenue', 'Sichuan', 299, '77397', '128499386727', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (578,'1405 Hagonoy Avenue', 'Slaskie', 133, '86587', '867287719310', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (580,'923 Tangail Boulevard', 'Tokyo-to', 10, '33384', '315528269898', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (581,'186 Skikda Lane', 'Morelos', 131, '89422', '14465669789', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (582,'1568 Celaya Parkway', 'Kaohsiung', 168, '34750', '278669994384', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (583,'1489 Kakamigahara Lane', 'Taipei', 526, '98883', '29341849811', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (584,'1819 Alessandria Loop', 'Campeche', 103, '53829', '377633994405', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (585,'1208 Tama Loop', 'Ninawa', 344, '73605', '954786054144', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (587,'760 Miyakonojo Drive', 'Guerrero', 246, '64682', '294449058179', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (588,'966 Asuncin Way', 'Hidalgo', 212, '62703', '995527378381', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (589,'1584 Ljubertsy Lane', 'England', 494, '22954', '285710089439', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (590,'247 Jining Parkway', 'Banjul', 54, '53446', '170115379190', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (593,'1402 Zanzibar Boulevard', 'Guanajuato', 106, '71102', '387448063440', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (594,'1464 Kursk Parkway', 'Shandong', 574, '17381', '338758048786', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (595,'1074 Sanaa Parkway', 'Loja', 311, '22474', '154124128457', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (596,'1759 Niznekamsk Avenue', 'al-Manama', 14, '39414', '864392582257', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (598,'42 Fontana Avenue', 'Fejr', 512, '14684', '437829801725', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (599,'1895 Zhezqazghan Drive', 'California', 177, '36693', '137809746111', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (601,'844 Bucuresti Place', 'Liaoning', 242, '36603', '935952366111', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (603,'1103 Quilmes Boulevard', 'Piura', 503, '52137', '644021380889', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (604,'1331 Usak Boulevard', 'Vaud', 296, '61960', '145308717464', '2006-02-15 09:45:30'); +INSERT INTO address (address_id, address, district, city_id, postal_code, phone, last_update) VALUES (605,'1325 Fukuyama Street', 'Heilongjiang', 537, '27107', '288241215394', '2006-02-15 09:45:30'); INSERT INTO city (city_id, city, country_id, last_update) VALUES (1,'A Corua (La Corua)', 87, '2006-02-15 09:45:25 '); INSERT INTO city (city_id, city, country_id, last_update) VALUES (2,'Abha', 82, '2006-02-15 09:45:25 '); INSERT INTO city (city_id, city, country_id, last_update) VALUES (3,'Abu Dhabi', 101, '2006-02-15 09:45:25 '); @@ -97,8 +1013,587 @@ INSERT INTO city (city_id, city, country_id, last_update) VALUES (16,'al-Qatif', INSERT INTO city (city_id, city, country_id, last_update) VALUES (17,'Alessandria', 49, '2006-02-15 09:45:25 '); INSERT INTO city (city_id, city, country_id, last_update) VALUES (18,'Allappuzha (Alleppey)', 44, '2006-02-15 09:45:25 '); INSERT INTO city (city_id, city, country_id, last_update) VALUES (19,'Allende', 60, '2006-02-15 09:45:25 '); -INSERT INTO city (city_id, city, country_id, last_update) VALUES (20,'Gatineau', 20, '2006-02-15 09:45:25 '); - +INSERT INTO city (city_id, city, country_id, last_update) VALUES (20,'Almirante Brown', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (21,'Alvorada', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (22,'Ambattur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (23,'Amersfoort', 67, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (24,'Amroha', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (25,'Angra dos Reis', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (26,'Anpolis', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (27,'Antofagasta', 22, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (28,'Aparecida de Goinia', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (29,'Apeldoorn', 67, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (30,'Araatuba', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (31,'Arak', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (32,'Arecibo', 77, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (33,'Arlington', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (34,'Ashdod', 48, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (35,'Ashgabat', 98, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (36,'Ashqelon', 48, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (37,'Asuncin', 73, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (38,'Athenai', 39, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (39,'Atinsk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (40,'Atlixco', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (41,'Augusta-Richmond County', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (42,'Aurora', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (43,'Avellaneda', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (44,'Bag', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (45,'Baha Blanca', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (46,'Baicheng', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (47,'Baiyin', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (48,'Baku', 10, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (49,'Balaiha', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (50,'Balikesir', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (51,'Balurghat', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (52,'Bamenda', 19, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (53,'Bandar Seri Begawan', 16, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (54,'Banjul', 37, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (55,'Barcelona', 104, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (56,'Basel', 91, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (57,'Bat Yam', 48, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (58,'Batman', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (59,'Batna', 2, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (60,'Battambang', 18, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (61,'Baybay', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (62,'Bayugan', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (63,'Bchar', 2, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (64,'Beira', 63, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (65,'Bellevue', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (66,'Belm', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (67,'Benguela', 4, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (68,'Beni-Mellal', 62, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (69,'Benin City', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (70,'Bergamo', 49, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (71,'Berhampore (Baharampur)', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (72,'Bern', 91, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (73,'Bhavnagar', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (74,'Bhilwara', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (75,'Bhimavaram', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (76,'Bhopal', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (77,'Bhusawal', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (78,'Bijapur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (79,'Bilbays', 29, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (80,'Binzhou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (81,'Birgunj', 66, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (82,'Bislig', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (83,'Blumenau', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (84,'Boa Vista', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (85,'Boksburg', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (86,'Botosani', 78, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (87,'Botshabelo', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (88,'Bradford', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (89,'Braslia', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (90,'Bratislava', 84, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (91,'Brescia', 49, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (92,'Brest', 34, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (93,'Brindisi', 49, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (94,'Brockton', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (95,'Bucuresti', 78, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (96,'Buenaventura', 24, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (97,'Bydgoszcz', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (98,'Cabuyao', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (99,'Callao', 74, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (100,'Cam Ranh', 105, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (101,'Cape Coral', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (102,'Caracas', 104, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (103,'Carmen', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (104,'Cavite', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (105,'Cayenne', 35, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (106,'Celaya', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (107,'Chandrapur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (108,'Changhwa', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (109,'Changzhou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (110,'Chapra', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (111,'Charlotte Amalie', 106, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (112,'Chatsworth', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (113,'Cheju', 86, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (114,'Chiayi', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (115,'Chisinau', 61, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (116,'Chungho', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (117,'Cianjur', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (118,'Ciomas', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (119,'Ciparay', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (120,'Citrus Heights', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (121,'Citt del Vaticano', 41, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (122,'Ciudad del Este', 73, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (123,'Clarksville', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (124,'Coacalco de Berriozbal', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (125,'Coatzacoalcos', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (126,'Compton', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (127,'Coquimbo', 22, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (128,'Crdoba', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (129,'Cuauhtmoc', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (130,'Cuautla', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (131,'Cuernavaca', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (132,'Cuman', 104, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (133,'Czestochowa', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (134,'Dadu', 72, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (135,'Dallas', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (136,'Datong', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (137,'Daugavpils', 54, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (138,'Davao', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (139,'Daxian', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (140,'Dayton', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (141,'Deba Habe', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (142,'Denizli', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (143,'Dhaka', 12, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (144,'Dhule (Dhulia)', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (145,'Dongying', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (146,'Donostia-San Sebastin', 87, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (147,'Dos Quebradas', 24, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (148,'Duisburg', 38, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (149,'Dundee', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (150,'Dzerzinsk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (151,'Ede', 67, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (152,'Effon-Alaiye', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (153,'El Alto', 14, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (154,'El Fuerte', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (155,'El Monte', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (156,'Elista', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (157,'Emeishan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (158,'Emmen', 67, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (159,'Enshi', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (160,'Erlangen', 38, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (161,'Escobar', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (162,'Esfahan', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (163,'Eskisehir', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (164,'Etawah', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (165,'Ezeiza', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (166,'Ezhou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (167,'Faaa', 36, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (168,'Fengshan', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (169,'Firozabad', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (170,'Florencia', 24, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (171,'Fontana', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (172,'Fukuyama', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (173,'Funafuti', 99, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (174,'Fuyu', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (175,'Fuzhou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (176,'Gandhinagar', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (177,'Garden Grove', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (178,'Garland', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (179,'Gatineau', 20, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (180,'Gaziantep', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (181,'Gijn', 87, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (182,'Gingoog', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (183,'Goinia', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (184,'Gorontalo', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (185,'Grand Prairie', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (186,'Graz', 9, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (187,'Greensboro', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (188,'Guadalajara', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (189,'Guaruj', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (190,'guas Lindas de Gois', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (191,'Gulbarga', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (192,'Hagonoy', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (193,'Haining', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (194,'Haiphong', 105, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (195,'Haldia', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (196,'Halifax', 20, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (197,'Halisahar', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (198,'Halle/Saale', 38, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (199,'Hami', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (200,'Hamilton', 68, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (201,'Hanoi', 105, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (202,'Hidalgo', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (203,'Higashiosaka', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (204,'Hino', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (205,'Hiroshima', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (206,'Hodeida', 107, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (207,'Hohhot', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (208,'Hoshiarpur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (209,'Hsichuh', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (210,'Huaian', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (211,'Hubli-Dharwad', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (212,'Huejutla de Reyes', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (213,'Huixquilucan', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (214,'Hunuco', 74, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (215,'Ibirit', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (216,'Idfu', 29, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (217,'Ife', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (218,'Ikerre', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (219,'Iligan', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (220,'Ilorin', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (221,'Imus', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (222,'Inegl', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (223,'Ipoh', 59, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (224,'Isesaki', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (225,'Ivanovo', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (226,'Iwaki', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (227,'Iwakuni', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (228,'Iwatsuki', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (229,'Izumisano', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (230,'Jaffna', 88, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (231,'Jaipur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (232,'Jakarta', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (233,'Jalib al-Shuyukh', 53, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (234,'Jamalpur', 12, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (235,'Jaroslavl', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (236,'Jastrzebie-Zdrj', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (237,'Jedda', 82, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (238,'Jelets', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (239,'Jhansi', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (240,'Jinchang', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (241,'Jining', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (242,'Jinzhou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (243,'Jodhpur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (244,'Johannesburg', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (245,'Joliet', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (246,'Jos Azueta', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (247,'Juazeiro do Norte', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (248,'Juiz de Fora', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (249,'Junan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (250,'Jurez', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (251,'Kabul', 1, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (252,'Kaduna', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (253,'Kakamigahara', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (254,'Kaliningrad', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (255,'Kalisz', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (256,'Kamakura', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (257,'Kamarhati', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (258,'Kamjanets-Podilskyi', 100, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (259,'Kamyin', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (260,'Kanazawa', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (261,'Kanchrapara', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (262,'Kansas City', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (263,'Karnal', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (264,'Katihar', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (265,'Kermanshah', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (266,'Kilis', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (267,'Kimberley', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (268,'Kimchon', 86, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (269,'Kingstown', 81, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (270,'Kirovo-Tepetsk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (271,'Kisumu', 52, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (272,'Kitwe', 109, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (273,'Klerksdorp', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (274,'Kolpino', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (275,'Konotop', 100, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (276,'Koriyama', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (277,'Korla', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (278,'Korolev', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (279,'Kowloon and New Kowloon', 42, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (280,'Kragujevac', 108, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (281,'Ktahya', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (282,'Kuching', 59, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (283,'Kumbakonam', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (284,'Kurashiki', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (285,'Kurgan', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (286,'Kursk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (287,'Kuwana', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (288,'La Paz', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (289,'La Plata', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (290,'La Romana', 27, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (291,'Laiwu', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (292,'Lancaster', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (293,'Laohekou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (294,'Lapu-Lapu', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (295,'Laredo', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (296,'Lausanne', 91, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (297,'Le Mans', 34, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (298,'Lengshuijiang', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (299,'Leshan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (300,'Lethbridge', 20, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (301,'Lhokseumawe', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (302,'Liaocheng', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (303,'Liepaja', 54, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (304,'Lilongwe', 58, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (305,'Lima', 74, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (306,'Lincoln', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (307,'Linz', 9, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (308,'Lipetsk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (309,'Livorno', 49, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (310,'Ljubertsy', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (311,'Loja', 28, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (312,'London', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (313,'New London', 20, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (314,'Lublin', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (315,'Lubumbashi', 25, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (316,'Lungtan', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (317,'Luzinia', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (318,'Madiun', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (319,'Mahajanga', 57, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (320,'Maikop', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (321,'Malm', 90, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (322,'Manchester', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (323,'Mandaluyong', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (324,'Mandi Bahauddin', 72, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (325,'Mannheim', 38, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (326,'Maracabo', 104, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (327,'Mardan', 72, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (328,'Maring', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (329,'Masqat', 71, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (330,'Matamoros', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (331,'Matsue', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (332,'Meixian', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (333,'Memphis', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (334,'Merlo', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (335,'Mexicali', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (336,'Miraj', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (337,'Mit Ghamr', 29, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (338,'Miyakonojo', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (339,'Mogiljov', 13, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (340,'Molodetno', 13, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (341,'Monclova', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (342,'Monywa', 64, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (343,'Moscow', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (344,'Mosul', 47, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (345,'Mukateve', 100, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (346,'Munger (Monghyr)', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (347,'Mwanza', 93, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (348,'Mwene-Ditu', 25, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (349,'Myingyan', 64, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (350,'Mysore', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (351,'Naala-Porto', 63, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (352,'Nabereznyje Telny', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (353,'Nador', 62, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (354,'Nagaon', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (355,'Nagareyama', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (356,'Najafabad', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (357,'Naju', 86, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (358,'Nakhon Sawan', 94, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (359,'Nam Dinh', 105, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (360,'Namibe', 4, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (361,'Nantou', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (362,'Nanyang', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (363,'NDjamna', 21, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (364,'Newcastle', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (365,'Nezahualcyotl', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (366,'Nha Trang', 105, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (367,'Niznekamsk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (368,'Novi Sad', 108, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (369,'Novoterkassk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (370,'Nukualofa', 95, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (371,'Nuuk', 40, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (372,'Nyeri', 52, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (373,'Ocumare del Tuy', 104, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (374,'Ogbomosho', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (375,'Okara', 72, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (376,'Okayama', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (377,'Okinawa', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (378,'Olomouc', 26, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (379,'Omdurman', 89, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (380,'Omiya', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (381,'Ondo', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (382,'Onomichi', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (383,'Oshawa', 20, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (384,'Osmaniye', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (385,'ostka', 100, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (386,'Otsu', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (387,'Oulu', 33, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (388,'Ourense (Orense)', 87, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (389,'Owo', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (390,'Oyo', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (391,'Ozamis', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (392,'Paarl', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (393,'Pachuca de Soto', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (394,'Pak Kret', 94, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (395,'Palghat (Palakkad)', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (396,'Pangkal Pinang', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (397,'Papeete', 36, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (398,'Parbhani', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (399,'Pathankot', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (400,'Patiala', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (401,'Patras', 39, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (402,'Pavlodar', 51, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (403,'Pemalang', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (404,'Peoria', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (405,'Pereira', 24, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (406,'Phnom Penh', 18, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (407,'Pingxiang', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (408,'Pjatigorsk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (409,'Plock', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (410,'Po', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (411,'Ponce', 77, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (412,'Pontianak', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (413,'Poos de Caldas', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (414,'Portoviejo', 28, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (415,'Probolinggo', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (416,'Pudukkottai', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (417,'Pune', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (418,'Purnea (Purnia)', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (419,'Purwakarta', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (420,'Pyongyang', 70, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (421,'Qalyub', 29, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (422,'Qinhuangdao', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (423,'Qomsheh', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (424,'Quilmes', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (425,'Rae Bareli', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (426,'Rajkot', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (427,'Rampur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (428,'Rancagua', 22, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (429,'Ranchi', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (430,'Richmond Hill', 20, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (431,'Rio Claro', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (432,'Rizhao', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (433,'Roanoke', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (434,'Robamba', 28, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (435,'Rockford', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (436,'Ruse', 17, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (437,'Rustenburg', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (438,'s-Hertogenbosch', 67, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (439,'Saarbrcken', 38, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (440,'Sagamihara', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (441,'Saint Louis', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (442,'Saint-Denis', 79, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (443,'Sal', 62, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (444,'Salala', 71, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (445,'Salamanca', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (446,'Salinas', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (447,'Salzburg', 9, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (448,'Sambhal', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (449,'San Bernardino', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (450,'San Felipe de Puerto Plata', 27, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (451,'San Felipe del Progreso', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (452,'San Juan Bautista Tuxtepec', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (453,'San Lorenzo', 73, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (454,'San Miguel de Tucumn', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (455,'Sanaa', 107, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (456,'Santa Brbara dOeste', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (457,'Santa F', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (458,'Santa Rosa', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (459,'Santiago de Compostela', 87, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (460,'Santiago de los Caballeros', 27, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (461,'Santo Andr', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (462,'Sanya', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (463,'Sasebo', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (464,'Satna', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (465,'Sawhaj', 29, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (466,'Serpuhov', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (467,'Shahr-e Kord', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (468,'Shanwei', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (469,'Shaoguan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (470,'Sharja', 101, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (471,'Shenzhen', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (472,'Shikarpur', 72, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (473,'Shimoga', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (474,'Shimonoseki', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (475,'Shivapuri', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (476,'Shubra al-Khayma', 29, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (477,'Siegen', 38, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (478,'Siliguri (Shiliguri)', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (479,'Simferopol', 100, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (480,'Sincelejo', 24, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (481,'Sirjan', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (482,'Sivas', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (483,'Skikda', 2, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (484,'Smolensk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (485,'So Bernardo do Campo', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (486,'So Leopoldo', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (487,'Sogamoso', 24, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (488,'Sokoto', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (489,'Songkhla', 94, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (490,'Sorocaba', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (491,'Soshanguve', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (492,'Sousse', 96, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (493,'South Hill', 5, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (494,'Southampton', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (495,'Southend-on-Sea', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (496,'Southport', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (497,'Springs', 85, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (498,'Stara Zagora', 17, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (499,'Sterling Heights', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (500,'Stockport', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (501,'Sucre', 14, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (502,'Suihua', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (503,'Sullana', 74, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (504,'Sultanbeyli', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (505,'Sumqayit', 10, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (506,'Sumy', 100, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (507,'Sungai Petani', 59, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (508,'Sunnyvale', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (509,'Surakarta', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (510,'Syktyvkar', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (511,'Syrakusa', 49, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (512,'Szkesfehrvr', 43, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (513,'Tabora', 93, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (514,'Tabriz', 46, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (515,'Tabuk', 82, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (516,'Tafuna', 3, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (517,'Taguig', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (518,'Taizz', 107, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (519,'Talavera', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (520,'Tallahassee', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (521,'Tama', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (522,'Tambaram', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (523,'Tanauan', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (524,'Tandil', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (525,'Tangail', 12, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (526,'Tanshui', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (527,'Tanza', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (528,'Tarlac', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (529,'Tarsus', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (530,'Tartu', 30, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (531,'Teboksary', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (532,'Tegal', 45, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (533,'Tel Aviv-Jaffa', 48, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (534,'Tete', 63, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (535,'Tianjin', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (536,'Tiefa', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (537,'Tieli', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (538,'Tokat', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (539,'Tonghae', 86, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (540,'Tongliao', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (541,'Torren', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (542,'Touliu', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (543,'Toulon', 34, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (544,'Toulouse', 34, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (545,'Trshavn', 32, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (546,'Tsaotun', 92, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (547,'Tsuyama', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (548,'Tuguegarao', 75, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (549,'Tychy', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (550,'Udaipur', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (551,'Udine', 49, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (552,'Ueda', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (553,'Uijongbu', 86, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (554,'Uluberia', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (555,'Urawa', 50, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (556,'Uruapan', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (557,'Usak', 97, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (558,'Usolje-Sibirskoje', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (559,'Uttarpara-Kotrung', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (560,'Vaduz', 55, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (561,'Valencia', 104, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (562,'Valle de la Pascua', 104, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (563,'Valle de Santiago', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (564,'Valparai', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (565,'Vancouver', 20, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (566,'Varanasi (Benares)', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (567,'Vicente Lpez', 6, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (568,'Vijayawada', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (569,'Vila Velha', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (570,'Vilnius', 56, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (571,'Vinh', 105, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (572,'Vitria de Santo Anto', 15, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (573,'Warren', 103, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (574,'Weifang', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (575,'Witten', 38, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (576,'Woodridge', 8, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (577,'Wroclaw', 76, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (578,'Xiangfan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (579,'Xiangtan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (580,'Xintai', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (581,'Xinxiang', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (582,'Yamuna Nagar', 44, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (583,'Yangor', 65, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (584,'Yantai', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (585,'Yaound', 19, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (586,'Yerevan', 7, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (587,'Yinchuan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (588,'Yingkou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (589,'York', 102, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (590,'Yuncheng', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (591,'Yuzhou', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (592,'Zalantun', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (593,'Zanzibar', 93, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (594,'Zaoyang', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (595,'Zapopan', 60, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (596,'Zaria', 69, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (597,'Zeleznogorsk', 80, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (598,'Zhezqazghan', 51, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (599,'Zhoushan', 23, '2006-02-15 09:45:25 '); +INSERT INTO city (city_id, city, country_id, last_update) VALUES (600,'Ziguinchor', 83, '2006-02-15 09:45:25 '); INSERT INTO country (country_id, country, last_update) VALUES (1,'Afghanistan', '2006-02-15 09:44:00 '); INSERT INTO country (country_id, country, last_update) VALUES (2,'Algeria', '2006-02-15 09:44:00 '); INSERT INTO country (country_id, country, last_update) VALUES (3,'American Samoa', '2006-02-15 09:44:00 '); @@ -118,9 +1613,96 @@ INSERT INTO country (country_id, country, last_update) VALUES (16,'Brunei', '200 INSERT INTO country (country_id, country, last_update) VALUES (17,'Bulgaria', '2006-02-15 09:44:00 '); INSERT INTO country (country_id, country, last_update) VALUES (18,'Cambodia', '2006-02-15 09:44:00 '); INSERT INTO country (country_id, country, last_update) VALUES (19,'Cameroon', '2006-02-15 09:44:00 '); -INSERT INTO country (country_id, country, last_update) VALUES (20,'Yemen', '2006-02-15 09:44:00 '); - -SELECT cu.client_customer_id AS id, +INSERT INTO country (country_id, country, last_update) VALUES (20,'Canada', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (21,'Chad', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (22,'Chile', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (23,'China', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (24,'Colombia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (25,'Congo, The Democratic Republic of the', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (26,'Czech Republic', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (27,'Dominican Republic', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (28,'Ecuador', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (29,'Egypt', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (30,'Estonia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (31,'Ethiopia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (32,'Faroe Islands', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (33,'Finland', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (34,'France', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (35,'French Guiana', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (36,'French Polynesia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (37,'Gambia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (38,'Germany', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (39,'Greece', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (40,'Greenland', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (41,'Holy See (Vatican City State)', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (42,'Hong Kong', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (43,'Hungary', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (44,'India', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (45,'Indonesia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (46,'Iran', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (47,'Iraq', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (48,'Israel', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (49,'Italy', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (50,'Japan', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (51,'Kazakstan', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (52,'Kenya', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (53,'Kuwait', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (54,'Latvia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (55,'Liechtenstein', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (56,'Lithuania', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (57,'Madagascar', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (58,'Malawi', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (59,'Malaysia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (60,'Mexico', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (61,'Moldova', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (62,'Morocco', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (63,'Mozambique', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (64,'Myanmar', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (65,'Nauru', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (66,'Nepal', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (67,'Netherlands', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (68,'New Zealand', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (69,'Nigeria', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (70,'North Korea', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (71,'Oman', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (72,'Pakistan', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (73,'Paraguay', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (74,'Peru', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (75,'Philippines', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (76,'Poland', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (77,'Puerto Rico', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (78,'Romania', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (79,'Runion', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (80,'Russian Federation', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (81,'Saint Vincent and the Grenadines', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (82,'Saudi Arabia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (83,'Senegal', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (84,'Slovakia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (85,'South Africa', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (86,'South Korea', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (87,'Spain', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (88,'Sri Lanka', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (89,'Sudan', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (90,'Sweden', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (91,'Switzerland', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (92,'Taiwan', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (93,'Tanzania', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (94,'Thailand', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (95,'Tonga', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (96,'Tunisia', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (97,'Turkey', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (98,'Turkmenistan', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (99,'Tuvalu', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (100,'Ukraine', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (101,'United Arab Emirates', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (102,'United Kingdom', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (103,'United States', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (104,'Venezuela', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (105,'Vietnam', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (106,'Virgin Islands, U.S.', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (107,'Yemen', '2006-02-15 09:44:00 '); +INSERT INTO country (country_id, country, last_update) VALUES (108,'Yugoslavia', '2006-02-15 09:44:00 '); +SELECT cu.customer_id AS id, cu.first_name || ' ' || cu.last_name AS name, a.address, a.postal_code AS "zip code", @@ -132,13 +1714,13 @@ SELECT cu.client_customer_id AS id, ELSE '' END AS notes, cu.store_id AS sid - FROM client_customer cu + FROM customer cu INNER JOIN address a USING (address_id) INNER JOIN city USING (city_id) INNER JOIN country USING (country_id) ORDER BY address, city, country, id; -CREATE VIEW client_customer_master AS - SELECT cu.client_customer_id AS id, +CREATE VIEW customer_master AS + SELECT cu.customer_id AS id, cu.first_name || ' ' || cu.last_name AS name, a.address, a.postal_code AS "zip code", @@ -150,21 +1732,21 @@ CREATE VIEW client_customer_master AS ELSE '' END AS notes, cu.store_id AS sid - FROM client_customer cu + FROM customer cu INNER JOIN address a USING (address_id) INNER JOIN city USING (city_id) INNER JOIN country USING (country_id) ORDER BY address, city, country, id; -SELECT * FROM client_customer_master ORDER BY address, city, country, id; +SELECT * FROM customer_master ORDER BY address, city, country, id; -SELECT * FROM client_customer_master where country='Canada'; +SELECT * FROM customer_master where country='Canada'; CREATE VIEW test_view AS SELECT city_id, country_id, city from city ORDER BY city; SELECT * FROM test_view ORDER BY city; -DROP VIEW client_customer_master; +DROP VIEW customer_master; DROP VIEW test_view; -DROP table client_customer cascade; +DROP table customer cascade; DROP table address cascade; DROP table city cascade; DROP table country cascade; diff --git a/src/test/regress/sql/create_compositetype.sql b/src/test/regress/sql/create_compositetype.sql index 7b6190d52..72b3b86cd 100644 --- a/src/test/regress/sql/create_compositetype.sql +++ b/src/test/regress/sql/create_compositetype.sql @@ -558,6 +558,59 @@ END; $$ LANGUAGE plpgsql; select test_func_VARRAY(); +--NULLTEST FOR AFORMAT +set behavior_compat_options='aformat_null_test'; + +explain (verbose, costs off) +select r, r is null as isnull, r is not null as isnotnull +from (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) r(a,b); + +select r, r is null as isnull, r is not null as isnotnull +from (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) r(a,b); + +explain (verbose, costs off) +with r(a,b) as + (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) +select r, r is null as isnull, r is not null as isnotnull from r; + +with r(a,b) as + (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) +select r, r is null as isnull, r is not null as isnotnull from r; + +explain (verbose, costs off) +with r(a,b) as materialized + (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) +select r, r is null as isnull, r is not null as isnotnull from r; + +with r(a,b) as materialized + (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) +select r, r is null as isnull, r is not null as isnotnull from r; + +declare +type ta as record (a int, b int); +va ta; +begin +va.b = 2; +if va is not null then +raise info '1111'; +else +raise info '2222'; +end if; +end; +/ + +select r, r is null as isnull, r is not null as isnotnull +from (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) r(a,b); + +reset behavior_compat_options; + drop function avg_transfn1(); drop function test_func_VARRAY(); drop type avg_state cascade; diff --git a/src/test/regress/sql/create_function.sql b/src/test/regress/sql/create_function.sql index 3f25fe4e4..fba65fd46 100644 --- a/src/test/regress/sql/create_function.sql +++ b/src/test/regress/sql/create_function.sql @@ -3,4 +3,60 @@ AS 'SELECT $1 + $2;' LANGUAGE SQL IMMUTABLE SHIPPABLE RETURNS NULL ON NULL INPUT; -drop function create_function_test; +create or replace procedure proc_commit +is +begin +commit; +end; +/ +create or replace procedure proc_test +IMMUTABLE +is +begin +proc_commit(); +end; +/ +CREATE OR REPLACE FUNCTION public.func_jbpm_createtime( i_businessid IN VARCHAR2 ) + RETURN timestamp without time zone NOT SHIPPABLE NOT FENCED +AS DECLARE v_tm TIMESTAMP ; +BEGIN + BEGIN + SELECT + t.start_time INTO v_tm + FROM + dams_wf_process t + WHERE + t.business_id = i_businessid ; + + EXCEPTION + WHEN no_data_found THEN + SELECT + t.start_time INTO v_tm + FROM + dams_wf_hist_process t + WHERE + t.business_id = i_businessid ; + + END ; + + RETURN v_tm ; + +END ; +/ + +call proc_test(); +create or replace procedure p1(a out varchar2,b int) is +begin +a:='aa'||b; +raise info 'a:%',a; +end; +/ + +declare +var varchar2; +begin +var=p1(:var,3); +raise info 'var:%',var; +end; +/ + diff --git a/src/test/regress/sql/create_procedure.sql b/src/test/regress/sql/create_procedure.sql index dbdd3b057..6d449022c 100644 --- a/src/test/regress/sql/create_procedure.sql +++ b/src/test/regress/sql/create_procedure.sql @@ -1,52 +1,96 @@ -create procedure test_procedure_test(int,int) -SHIPPABLE IMMUTABLE -as -begin - select $1 + $2; -end; -/ - -create or replace function test2(space boolean default true) return integer as -declare result integer; -begin -if(space is null) then -perform oracle."put$json_printer.pretty_print".test1(12) into result; -return result; -else -return oracle."put$json_printer.pretty_print".test1(15); -end if; -end; -/ - -drop function test2; -drop procedure test_procedure_test; - - -create schema "test.test.test"; - -CREATE OR REPLACE PROCEDURE "test.test.test".prc_add -( - param1 IN INTEGER, - param2 IN OUT INTEGER -) -AS -BEGIN - param2:= param1 + param2; - dbe_output.print_line('result is: '||to_char(param2)); -END; -/ - -CREATE OR REPLACE PROCEDURE "test.test.test".prc_add2 -( - param1 IN INTEGER, - param2 IN INTEGER -) -AS -BEGIN - "test.test.test".prc_add(param1, param2); -END; -/ - -drop procedure "test.test.test".prc_add2; -drop procedure "test.test.test".prc_add; -drop schema "test.test.test"; +create procedure test_procedure_test(int,int) +SHIPPABLE IMMUTABLE +as +begin + select $1 + $2; +end; +/ + +create or replace function test2(space boolean default true) return integer as +declare result integer; +begin +if(space is null) then +perform oracle."put$json_printer.pretty_print".test1(12) into result; +return result; +else +return oracle."put$json_printer.pretty_print".test1(15); +end if; +end; +/ + +drop function test2; +drop procedure test_procedure_test; + + +create schema "test.test.test"; + +CREATE OR REPLACE PROCEDURE "test.test.test".prc_add +( + param1 IN INTEGER, + param2 IN OUT INTEGER +) +AS +BEGIN + param2:= param1 + param2; + dbe_output.print_line('result is: '||to_char(param2)); +END; +/ + +CREATE OR REPLACE PROCEDURE "test.test.test".prc_add2 +( + param1 IN INTEGER, + param2 IN INTEGER +) +AS +BEGIN + "test.test.test".prc_add(param1, param2); +END; +/ + +drop procedure "test.test.test".prc_add2; +drop procedure "test.test.test".prc_add; +drop schema "test.test.test"; + +set behavior_compat_options='allow_procedure_compile_check'; +drop table if exists bbb; +drop table if exists aaa; +CREATE TABLE bbb(id1 INT, id2 INT, id3 INT); +CREATE TABLE aaa(id1 INT, id2 INT, id3 INT); +CREATE OR REPLACE FUNCTION tri_insert_func() RETURNS TRIGGER AS +$$ +DECLARE +v int; +BEGIN +select count(1) INTO v from bbb where id1 = NEW.id1; +RAISE INFO 'v1: : %' ,v; +RETURN NEW; +END +$$ LANGUAGE PLPGSQL; +CREATE TRIGGER insert_trigger11 +BEFORE INSERT ON aaa +FOR EACH ROW +EXECUTE PROCEDURE tri_insert_func(); +insert into aaa values(1,2,3); +select * from aaa; +drop TRIGGER insert_trigger11 ON aaa; +drop FUNCTION tri_insert_func; +drop table if exists bbb; +drop table if exists aaa; +create or replace function checkqweerr(a integer) returns int as $$ +declare +b int; +begin +select multi_call211(a) + 1 into b; +return b; +end; +$$ language plpgsql; +call checkqweerr(1); +create or replace procedure checkipoooowdsd2() as +declare +c1 sys_refcursor; +begin +open c1 for delete from tb_test111; +end; +/ +call checkipoooowdsd2(); +set behavior_compat_options=''; diff --git a/src/test/regress/sql/create_table_like.sql b/src/test/regress/sql/create_table_like.sql index e22eb3b24..31bc92319 100644 --- a/src/test/regress/sql/create_table_like.sql +++ b/src/test/regress/sql/create_table_like.sql @@ -163,16 +163,16 @@ create table ctltestg(a1 int, a2 int, constraint firstkey primary key(oid))with \d+ ctltestg drop table if exists ctltestf, ctltestg; -create schema DTS2019071912119; -CREATE OR REPLACE FUNCTION DTS2019071912119.func_increment_plsql(i integer) RETURNS integer AS $$ +create schema testschema; +CREATE OR REPLACE FUNCTION testschema.func_increment_plsql(i integer) RETURNS integer AS $$ BEGIN RETURN i + 1; END; $$ LANGUAGE plpgsql IMMUTABLE ; -create table DTS2019071912119.test1 (a int , b int default DTS2019071912119.func_increment_plsql(1)); -alter schema DTS2019071912119 rename to DTS2019071912119_bak; -create table DTS2019071912119_bak.test2 (like DTS2019071912119_bak.test1 including all); +create table testschema.test1 (a int , b int default testschema.func_increment_plsql(1)); +alter schema testschema rename to TESTTABLE_bak; +create table TESTTABLE_bak.test2 (like TESTTABLE_bak.test1 including all); -drop table DTS2019071912119_bak.test2; -drop table DTS2019071912119_bak.test1; -drop function DTS2019071912119_bak.func_increment_plsql(); +drop table TESTTABLE_bak.test2; +drop table TESTTABLE_bak.test1; +drop function TESTTABLE_bak.func_increment_plsql(); diff --git a/src/test/regress/sql/cte_inline.sql b/src/test/regress/sql/cte_inline.sql new file mode 100644 index 000000000..66ce701a3 --- /dev/null +++ b/src/test/regress/sql/cte_inline.sql @@ -0,0 +1,192 @@ +create schema cte_inline; +set current_schema = cte_inline; + +-- Set up some simple test tables +CREATE TABLE test ( + f1 integer, + f2 integer, + f3 float +); + +INSERT INTO test VALUES (1, 2, 3); +INSERT INTO test VALUES (2, 3, 4); +INSERT INTO test VALUES (3, 4, 5); +INSERT INTO test VALUES (1, 1, 1); +INSERT INTO test VALUES (2, 2, 2); +INSERT INTO test VALUES (3, 3, 3); +INSERT INTO test VALUES (6, 7, 8); +INSERT INTO test VALUES (8, 9, NULL); + +CREATE TABLE test_1 (like test); + +-- +-- Tests for CTE inlining behavior +-- + +-- Basic subquery that can be inlined +explain (verbose, costs off) +with x as (select * from (select f1 from test) ss) +select * from x where f1 = 1; + +-- Deep deep subquery +explain (verbose, costs off) +with a as ( + with b as ( + with c as ( + with d as (select * from ( + with z as ( + with y as ( + with x as (select f1 from test) + select * from x) + select * from y) + select * from z) + ) select * from d) + select * from c) + select * from b) +select * from a where f1 = 1; + +-- Explicitly request materialization +explain (verbose, costs off) +with x as materialized (select * from (select f1 from test) ss) +select * from x where f1 = 1; + +-- Stable functions are safe to inline +explain (verbose, costs off) +with x as (select * from (select f1, now() from test) ss) +select * from x where f1 = 1; + +-- Volatile functions prevent inlining +explain (verbose, costs off) +with x as (select * from (select f1, random() from test) ss) +select * from x where f1 = 1; + +-- SELECT FOR UPDATE/SHARE cannot be inlined +explain (verbose, costs off) +with x as (select * from (select f1 from test for update) ss) +select * from x where f1 = 1; + +explain (verbose, costs off) +with x as not materialized (select * from (select f1 from test for share) ss) +select * from x where f1 = 1; + +-- IUDs cannot be inlined +explain (verbose, costs off) +with x as not materialized (insert into test_1 values(1,2,4) returning *) +select * from x; + +explain (verbose, costs off) +with x as not materialized (update test_1 set f3 = 3 where f1 = 1 returning *) +select * from x; + +explain (verbose, costs off) +with x as not materialized (delete from test_1 returning *) +select * from x; + +-- Multiply-referenced CTEs are inlined only when requested +explain (verbose, costs off) +with x as (select * from (select f1, now() as n from test) ss) +select * from x, x x2 where x.n = x2.n; + +explain (verbose, costs off) +with x as not materialized (select * from (select f1, now() as n from test) ss) +select * from x, x x2 where x.n = x2.n; + +-- Check handling of outer references +explain (verbose, costs off) +with x as (select * from test) +select * from (with y as (select * from x) select * from y) ss; + +explain (verbose, costs off) +with x as materialized (select * from test) +select * from (with y as (select * from x) select * from y) ss; + +-- Ensure that we inline the currect CTE when there are +-- multiple CTEs with the same name +explain (verbose, costs off) +with x as (select 1 as y) +select * from (with x as (select 2 as y) select * from x) ss; + +-- Row marks are not pushed into CTEs (opengauss not supported) +explain (verbose, costs off) +with x as (select * from test) +select * from x for update; + +-- For CTEs in subquery +explain (verbose, costs off) +select * from (with x as (select * from test_1) select x.f1 from x) tmp where tmp.f1 = 1; + +explain (verbose, costs off) +select * from (with x as materialized (select * from test_1) select x.f1 from x) tmp where tmp.f1 = 1; + +-- cte within in/any/some sublink are handled correctly +explain (verbose, costs off) +select * from test where test.f1 in +(with x as (select * from test_1) select x.f1 from x); + +explain (verbose, costs off) +select * from test where test.f1 in +(with x as materialized (select * from test_1) select x.f1 from x); + +explain (verbose, costs off) +select * from test where test.f1 = any +(with x as (select * from test_1) select x.f1 from x); + +explain (verbose, costs off) +select * from test where test.f1 = any +(with x as materialized (select * from test_1) select x.f1 from x); + +-- not expanded subquery +explain (verbose, costs off) +select * from test where test.f1 = any +(with x as (select * from test_1) select x.f1 from x); + +explain (verbose, costs off) +select * from test where test.f1 = any +(with x as materialized (select * from test_1) select /*+ no_expand */ x.f1 from x); + +explain (verbose, costs off) +select * from test where exists +(with x as (select * from test_1) select /*+ no_expand */ x.f1 from x where test.f1 = x.f1); + +-- intargetlist rewrite +explain (verbose, costs off) +select * from test where test.f1 = (with x as (select * from test_1) select x.f2 from x where x.f2 = test.f2 and x.f2 < 10 order by 1 limit 1) and test.f2 < 50 order by 1,2,3; + +explain (verbose, costs off) +select * from test where test.f1 = (with x as materialized (select * from test_1) select x.f2 from x where x.f2 = test.f2 and x.f2 < 10 order by 1 limit 1) and test.f2 < 50 order by 1,2,3; + +-- not referenced cte contains DML +explain (verbose, costs off) +with x as (select f1 from test), +y as (insert into test_1 default values) +select * from x; + +explain (verbose, costs off) +with a as( with z as (insert into test default values) select 1) +select 1; + +-- cte with subquery and referenced in grouping function will not be inlined +explain (verbose, costs off) +WITH cte AS not materialized ( + SELECT + ( + CASE WHEN ( + NOT EXISTS ( + select + * + from + test + ) + ) THEN ('P') END + ) col + FROM + test_1 +) +SELECT + col, GROUPING(col) +FROM + cte +GROUP BY + GROUPING SETS(col); + +drop schema cte_inline cascade; \ No newline at end of file diff --git a/src/test/regress/sql/dbe_scheduler.sql b/src/test/regress/sql/dbe_scheduler.sql new file mode 100644 index 000000000..4a042f597 --- /dev/null +++ b/src/test/regress/sql/dbe_scheduler.sql @@ -0,0 +1,278 @@ +-- check define_program_argument +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 3, false, 'test'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select what, job_name from pg_job_proc; +select DBE_SCHEDULER.define_program_argument('program1', 0, 'arg0', 'int', 16); +select DBE_SCHEDULER.define_program_argument('program1', 4, 'arg4', 'int', 16); +select DBE_SCHEDULER.define_program_argument('program2', 1, 'arg1', 'boolean', false); +select DBE_SCHEDULER.define_program_argument('program1', 1, 'arg1', 'boolean', false); +select * from gs_job_argument; +select DBE_SCHEDULER.define_program_argument('program1', 1, 'arg1', 'int', 16); +select * from gs_job_argument; +select DBE_SCHEDULER.define_program_argument('program1', 2, 'arg2', 'boolean', 'false', false); +select * from gs_job_argument; +select DBE_SCHEDULER.drop_program('program1', true); +select * from gs_job_attribute where attribute_name <> 'owner'; +select * from gs_job_argument; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; + +-- check create_program / drop_program +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 3, false, 'test'); +select DBE_SCHEDULER.create_program('program2', 'sql', 'select pg_sleep(1);', 3, false, 'test'); +select DBE_SCHEDULER.create_program('program2', 'STORED_PROCEDURE', 'select pg_sleep(1);', 3, false, 'test'); +select DBE_SCHEDULER.create_job('job1', 'program1', '2021-07-20', 'interval ''3 minute''', '2121-07-20', 'DEFAULT_JOB_CLASS', false, false,'test', 'style', NULL, NULL); +select what, job_name from pg_job_proc; +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.drop_program('program2,program1', false); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.drop_program('program1,program2', true); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.drop_job('program1,job1', true, false, 'STOP_ON_FIRST_ERROR'); +select DBE_SCHEDULER.drop_job('job1,program1', true, false, 'TRANSACTIONAL'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.drop_job('job1,program1', true, false, 'ABSORB_ERRORS'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select * from gs_job_argument; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; + +-- set_attribute +--program +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 3, false, 'test'); +select DBE_SCHEDULER.set_attribute('program1', 'number_of_argument', '2', NULL); +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', '2', NULL); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 3, NULL); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.set_attribute('program1', 'enabled', true); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 0, NULL); +select DBE_SCHEDULER.set_attribute('program1', 'enabled', true); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.create_schedule('schedule1', NULL, 'sysdate', NULL, 'test'); +select DBE_SCHEDULER.create_job(job_name=>'job1', program_name=>'program1', schedule_name=>'schedule1'); +select what, job_name from pg_job_proc; +select DBE_SCHEDULER.set_attribute('program1', 'program_action', 'create role r1 password ''12345'';', NULL); -- failed +select DBE_SCHEDULER.set_attribute('program1', 'program_action', 'select pg_sleep(2);', NULL); +select what, job_name from pg_job_proc; +--schedule +select * from gs_job_attribute where attribute_name <> 'owner'; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select DBE_SCHEDULER.set_attribute('schedule1', 'start_date', '2021-7-20'); +select DBE_SCHEDULER.set_attribute('schedule1', 'end_date', '2021-7-20'); +select DBE_SCHEDULER.set_attribute('schedule1', 'repeat_interval', 'interval ''2000 s'''); +select * from gs_job_attribute where attribute_name <> 'owner'; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +--job +select DBE_SCHEDULER.create_program('program2', 'STORED_PROCEDURE', 'select pg_sleep(1);', 3, false, 'test'); +select DBE_SCHEDULER.create_schedule('schedule2', NULL, 'sysdate', NULL, 'test'); +select DBE_SCHEDULER.set_attribute('job1', 'program_name', 'program2'); +select DBE_SCHEDULER.set_attribute('job1', 'schedule_name', 'schedule2'); +select DBE_SCHEDULER.set_attribute('job1', 'job_class', 'unknown'); +select DBE_SCHEDULER.create_job_class('test'); +select DBE_SCHEDULER.set_attribute('job1', 'job_class', 'test'); +select DBE_SCHEDULER.set_attribute('job1', 'enabled', true); +select * from gs_job_attribute where attribute_name <> 'owner'; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; +select DBE_SCHEDULER.drop_program('program1,program2', true); +select DBE_SCHEDULER.drop_job('job1', true, false, 'STOP_ON_FIRST_ERROR'); +select DBE_SCHEDULER.drop_schedule('schedule1,schedule2', false); +select DBE_SCHEDULER.drop_job_class('test'); +select DBE_SCHEDULER.create_job(job_name=>'job1', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.set_attribute('job1', 'start_date', '2021-7-20'); +select DBE_SCHEDULER.set_attribute('job1', 'end_date', '2021-7-20'); +select DBE_SCHEDULER.set_attribute('job1', 'repeat_interval', 'interval ''2000 s'''); +select DBE_SCHEDULER.set_attribute('job1', 'number_of_arguments', 2); +select DBE_SCHEDULER.set_attribute('job1', 'job_action', 'create role r1 password ''12345'';'); -- failed +select DBE_SCHEDULER.set_attribute('job1', 'job_action', 'select pg_sleep(2);'); +select DBE_SCHEDULER.set_attribute('job1', 'job_type', 'STORED_PROCEDURE'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; +select DBE_SCHEDULER.drop_job('job1', true, false, 'STOP_ON_FIRST_ERROR'); + +--create_schedule dropxxx +select DBE_SCHEDULER.create_schedule('schedule1', NULL, 'sysdate', NULL, 'test'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.create_schedule('schedule2', NULL, 'sysdate', NULL, 'test'); +select DBE_SCHEDULER.create_job('job1', 'schedule1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 0, 'DEFAULT_JOB_CLASS', true, true, NULL, NULL, NULL); +select * from gs_job_attribute where attribute_name <> 'owner'; +select * from gs_job_argument; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; +select DBE_SCHEDULER.drop_job('schedule1', true, false, 'STOP_ON_FIRST_ERROR'); +select DBE_SCHEDULER.drop_program('schedule1', false); +select DBE_SCHEDULER.drop_schedule('schedule1', false); +select DBE_SCHEDULER.drop_schedule('schedule1,schedule2', true); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.drop_job('job1', true, false, 'STOP_ON_FIRST_ERROR'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select * from gs_job_argument; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; + +--set_job_arguemnt_value +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 3, false, 'test'); +select DBE_SCHEDULER.create_job('job1', 'program1', '2021-07-20', 'sysdate', '2121-07-20', 'DEFAULT_JOB_CLASS', false, false,'test', 'style', NULL, NULL); +select * from gs_job_attribute where attribute_name <> 'owner'; +select * from gs_job_argument; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; +select DBE_SCHEDULER.set_job_argument_value('job1', 1, 1); +select * from gs_job_argument; +select DBE_SCHEDULER.set_job_argument_value('job1', 1, 11); +select * from gs_job_argument; +select DBE_SCHEDULER.set_job_argument_value('job1', 'default_name_of_arg1', 111); +select * from gs_job_argument; +select DBE_SCHEDULER.define_program_argument('program1', 2, 'arg2', 'boolean', 'false', false); +select * from gs_job_argument; +select DBE_SCHEDULER.set_job_argument_value('job1', 'arg2', 2); +select * from gs_job_argument; +select DBE_SCHEDULER.set_job_argument_value('job1', 'arg2', 22); +select * from gs_job_argument; +select DBE_SCHEDULER.drop_job('job1', true, false, 'STOP_ON_FIRST_ERROR'); +select DBE_SCHEDULER.drop_program('program1', false); +select * from gs_job_attribute where attribute_name <> 'owner'; +select * from gs_job_argument; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; + +-- check create_job +select DBE_SCHEDULER.create_schedule('schedule1', NULL, 'sysdate', '2021-7-28', 'test'); +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 3, false, 'test'); +select DBE_SCHEDULER.create_job(job_name=>'job1', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; +select DBE_SCHEDULER.drop_job('job1', true, false, 'STOP_ON_FIRST_ERROR'); +select DBE_SCHEDULER.create_job(job_name=>'job2', program_name=>'program1', schedule_name=>'schedule1'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; +select DBE_SCHEDULER.drop_job('job2', true, false, 'STOP_ON_FIRST_ERROR'); +select DBE_SCHEDULER.create_job(job_name=>'job3', program_name=>'program1'); +select job_name, enable from pg_job where job_name = 'job3'; +select * from gs_job_attribute where attribute_name <> 'owner'; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; +select DBE_SCHEDULER.drop_job('job3', true, false, 'STOP_ON_FIRST_ERROR'); +select DBE_SCHEDULER.create_job(job_name=>'job4', schedule_name=>'schedule1', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(4);'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; +select DBE_SCHEDULER.drop_job('job4', true, false, 'STOP_ON_FIRST_ERROR'); +select DBE_SCHEDULER.drop_schedule('schedule1', true); +select DBE_SCHEDULER.drop_program('program1', false); +select * from gs_job_attribute where attribute_name <> 'owner'; +select * from gs_job_argument; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; + +-- enable/disable +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 0, false, 'test'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.enable('program1', 'STOP_ON_FIRST_ERROR'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.disable('program1', false, 'STOP_ON_FIRST_ERROR'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.create_job(job_name=>'job1', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.enable('job1', 'STOP_ON_FIRST_ERROR'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.disable('job1', false, 'STOP_ON_FIRST_ERROR'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.drop_job('job1', true, false, 'STOP_ON_FIRST_ERROR'); +select DBE_SCHEDULER.drop_program('program1', false); +select * from gs_job_attribute where attribute_name <> 'owner'; +select * from gs_job_argument; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; + +--create / drop job_class +select DBE_SCHEDULER.create_job_class('test'); +select DBE_SCHEDULER.create_job(job_name=>'job1', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);', job_class=>'test'); +select DBE_SCHEDULER.create_job(job_name=>'job3', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);', job_class=>'testxxx'); +select DBE_SCHEDULER.create_job(job_name=>'job2', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.set_attribute('job2', 'job_class', 'test'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.drop_job_class('test', false); +select DBE_SCHEDULER.drop_job_class('test', true); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.drop_job('job1,job2', true, false, 'STOP_ON_FIRST_ERROR'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select * from gs_job_argument; +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job where start_date is not null; +select what, job_name from pg_job_proc; + +-- generate_job_name +select DBE_SCHEDULER.generate_job_name(); +select DBE_SCHEDULER.generate_job_name(''); +select DBE_SCHEDULER.generate_job_name('job_prefix_'); + +create table t1(c1 int); +create or replace procedure p1(id int) as +begin + insert into t1 values(id); +end; +/ +select DBE_SCHEDULER.create_program('program2', 'STORED_PROCEDURE', 'public.p1', 1, true, 'test'); +select DBE_SCHEDULER.create_program('program2', 'STORED_PROCEDURE', 'public.p1', 1, false, 'test'); +select DBE_SCHEDULER.define_program_argument('program2', 1, 'arg1', 'int', 2); +select DBE_SCHEDULER.enable('program2', 'TRANSACTIONAL'); +select DBE_SCHEDULER.create_job(job_name=>'job2', program_name=>'program2', enabled=>true, auto_drop=>false); +select DBE_SCHEDULER.run_job('job2', false); +select pg_sleep(2); +select * from t1; +select DBE_SCHEDULER.create_job(job_name=>'job3', job_type=>'PLSQL_BLOCK', job_action=>'insert into public.t1 values(3);', enabled=>true, auto_drop=>false); +select DBE_SCHEDULER.run_job('job3', false); +select pg_sleep(2); +select * from t1; +drop table t1; + +select DBE_SCHEDULER.drop_job('job2', true); +select DBE_SCHEDULER.drop_job('job3', true); +select DBE_SCHEDULER.drop_program('program2', true); + +-- others +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 1, false, 'test'); +select DBE_SCHEDULER.enable('program1', 'TRANSACTIONAL'); +select DBE_SCHEDULER.define_program_argument('program1', 1, 'arg1', 'int', 16); +select DBE_SCHEDULER.enable('program1', 'TRANSACTIONAL'); +select * from gs_job_attribute where attribute_name <> 'owner'; +select DBE_SCHEDULER.set_attribute('program1', 'program_type', 'PLSQL_BLOCK'); +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 0); +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 1); +select DBE_SCHEDULER.set_attribute('program1', 'program_type', 'STORED_PROCEDURE'); +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', -1); +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 0); +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 1); +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 255); +select DBE_SCHEDULER.set_attribute('program1', 'number_of_arguments', 256); + +select DBE_SCHEDULER.drop_program('program1', true); +select * from gs_job_attribute where attribute_name <> 'owner'; -- empty + + +select DBE_SCHEDULER.create_program('programdb1', 'PLSQL_BLOCK', 'select pg_sleep(1);', 0, false, 'test'); +select DBE_SCHEDULER.create_job('jobdb1', 'programdb1', '2021-07-20', 'sysdate', '2121-07-20', 'DEFAULT_JOB_CLASS', false, false,'test', 'style', NULL, NULL); +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job; +create database test11; +\c test11 +select DBE_SCHEDULER.create_program('programdb1', 'PLSQL_BLOCK', 'select pg_sleep(1);', 0, false, 'test'); +select DBE_SCHEDULER.create_job('jobdb1', 'programdb1', '2021-07-20', 'sysdate', '2121-07-20', 'DEFAULT_JOB_CLASS', false, false,'test', 'style', NULL, NULL); +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job; +select dbe_scheduler.run_job('jobdb1', false); +select dbe_scheduler.drop_job('jobdb1'); +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job; +\c regression +select dbe_scheduler.drop_job('jobdb1'); +select DBE_SCHEDULER.drop_program('programdb1', true); +select dbname, node_name, interval, nspname, job_name, end_date, enable from pg_job; +select * from gs_job_attribute where attribute_name <> 'owner'; \ No newline at end of file diff --git a/src/test/regress/sql/dbe_scheduler_calendar.sql b/src/test/regress/sql/dbe_scheduler_calendar.sql new file mode 100644 index 000000000..8d219461a --- /dev/null +++ b/src/test/regress/sql/dbe_scheduler_calendar.sql @@ -0,0 +1,43 @@ +-- calendaring syntax check -- +create or replace procedure eval16(calendar_str text) as +declare + start_date timestamp with time zone; + return_date_after timestamp with time zone; + next_run_date timestamp with time zone; +begin + start_date := '2003-2-1 10:30:00.111111+8'::timestamp with time zone; + return_date_after := start_date; + -- print 16 consecutive next dates -- + FOR i in 1..16 loop + DBE_SCHEDULER.EVALUATE_CALENDAR_STRING( + calendar_str, + start_date, return_date_after, next_run_date); + DBE_OUTPUT.PRINT_LINE('next_run_date: ' || next_run_date); + return_date_after := next_run_date; + end loop; +end; +/ + +show timezone; + +-- problems: ORA does not support these -- +call eval16('FREQ=weekly;INTERVAL=50;BYMONTH=2,3;BYHOUR=10;BYMINUTE=20,30,40;BYSECOND=0'); +call eval16('FREQ=secondly;BYMONTH=6;'); -- hard to find, but worked + +-- problem: ORA generate different result -- +call eval16('FREQ=weekly;INTERVAL=40;BYMONTH=2,3;BYHOUR=10;BYMINUTE=20,30,40;BYSECOND=0'); + +-- compiled scene -- +call eval16('FREQ=hourly;INTERVAL=2;BYHOUR=6,10;BYMINUTE=0;BYSECOND=0'); -- good +call eval16('FREQ=hourly;INTERVAL=2;BYHOUR=6,9;BYMINUTE=0;BYSECOND=0'); -- good, only 6 o'clock +call eval16('FREQ=weekly;INTERVAL=3;BYMONTH=2,3;BYHOUR=10;BYMINUTE=20,30,40;BYSECOND=0'); +call eval16('FREQ=yearly;INTERVAL=50;BYMONTH=2,3,4,5,6,7,8,9,11,12;BYHOUR=10;BYMINUTE=1,2,3,4,5,6,7,8,9,20,30,40;BYSECOND=0'); -- fine performance +call eval16('FREQ=secondly;INTERVAL=50;BYMONTH=2,3,4,5,6,7,8,9,11,12;BYHOUR=10;BYMINUTE=1,2,3,4,5,6,7,8,9,20,30,40;BYSECOND=0'); -- fixed, large loops +call eval16('FREQ=secondly;INTERVAL=50;BYMONTH=2,3,4,5,6,7,8,9,11,12;BYMONTHDAY=1,3,5,7,9;BYHOUR=1,3,10,13,15,17;BYMINUTE=1,2,3,4,5,6,7,8,9,20,30,40;BYSECOND=0'); -- a looooot of params +call eval16('FREQ=secondly;INTERVAL=50;BYMONTH=2,3,4,5,6,7,8,9,11,12;BYMONTHDAY=1,3,5,7,9;BYHOUR=1,3,5,10,13,15,17;BYMINUTE=20,30,40,1,2,3,4,5,6,7,8,9;BYSECOND=0'); -- still good +call eval16('FREQ=secondly;INTERVAL=59;BYMONTH=2,3;BYHOUR=10;BYMINUTE=20,30,40;BYSECOND=58'); -- secondly works fine +call eval16('FREQ=minutely;INTERVAL=50;BYMONTH=1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1;BYMONTHDAY=-1;BYHOUR=1;BYMINUTE=0;BYSECOND=0'); + +-- error scenes -- +call eval16('FREQ=secondly;INTERVAL=50;BYMONTH=6;BYMONTHDAY=6;BYHOUR=10;BYMINUTE=0;BYSECOND=1'); -- not reachable +call eval16('FREQ=secondly;BYMONTH=6;BYNOTHING=6;'); diff --git a/src/test/regress/sql/dbe_scheduler_privilege.sql b/src/test/regress/sql/dbe_scheduler_privilege.sql new file mode 100644 index 000000000..b9cdaad5b --- /dev/null +++ b/src/test/regress/sql/dbe_scheduler_privilege.sql @@ -0,0 +1,106 @@ +-- create users +create user scheduler_user1 password 'scheduler_user1.'; +create user scheduler_user2 password 'scheduler_user2.'; + +--grant +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'create job'); +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'create external job'); +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'run external job'); +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'execute any program'); +select attribute_name, attribute_value from gs_job_attribute; +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'create job'); +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'create external job'); +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'run external job'); +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'execute any program'); +select attribute_name, attribute_value from gs_job_attribute; +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'xxx'); + +-- no privilege +SET ROLE scheduler_user1 PASSWORD "scheduler_user1."; +select DBE_SCHEDULER.create_credential('cre_1', 'scheduler_user1', ''); -- failed +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 0, false, 'test'); -- failed +select DBE_SCHEDULER.create_schedule('schedule1', NULL, 'sysdate', NULL, 'test'); -- failed +select DBE_SCHEDULER.create_job(job_name=>'job1', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);', enabled=>true, auto_drop=>false); -- failed + +RESET ROLE; +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'create job'); + +-- create job privilege +SET ROLE scheduler_user1 PASSWORD "scheduler_user1."; +select DBE_SCHEDULER.create_program('program1', 'STORED_PROCEDURE', 'select pg_sleep(1);', 0, false, 'test'); +select DBE_SCHEDULER.create_schedule('schedule1', NULL, 'sysdate', NULL, 'test'); +select DBE_SCHEDULER.create_job(job_name=>'job1', job_type=>'STORED_PROCEDURE', job_action=>'select pg_sleep(1);', enabled=>true, auto_drop=>false); +select DBE_SCHEDULER.create_job(job_name=>'job2', program_name=>'program1'); + +RESET ROLE; +select count(*) from adm_scheduler_jobs; +SET ROLE scheduler_user1 PASSWORD "scheduler_user1."; + +-- create external job privilege +select DBE_SCHEDULER.create_program('program1', 'EXTERNAL_SCRIPT', '/usr/bin/pwd'); -- failed +select DBE_SCHEDULER.create_job(job_name=>'job1', job_type=>'EXTERNAL_SCRIPT', job_action=>'/usr/bin/pwd', enabled=>true, auto_drop=>false); -- failed + +RESET ROLE; +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'create external job'); + +SET ROLE scheduler_user1 PASSWORD "scheduler_user1."; +select DBE_SCHEDULER.create_program('program2', 'EXTERNAL_SCRIPT', '/usr/bin/pwd'); +select DBE_SCHEDULER.create_job(job_name=>'job3', job_type=>'EXTERNAL_SCRIPT', job_action=>'/usr/bin/pwd', enabled=>true, auto_drop=>false); + +-- cross user +RESET ROLE; +select DBE_SCHEDULER.grant_user_authorization('scheduler_user2', 'create job'); + +SET ROLE scheduler_user2 PASSWORD "scheduler_user2."; +select DBE_SCHEDULER.create_job(job_name=>'job4', program_name=>'program1'); -- failed + +RESET ROLE; +select DBE_SCHEDULER.grant_user_authorization('scheduler_user2', 'execute any program'); + +SET ROLE scheduler_user2 PASSWORD "scheduler_user2."; +select DBE_SCHEDULER.create_job(job_name=>'job4', program_name=>'program1'); +select DBE_SCHEDULER.create_job(job_name=>'job5', program_name=>'program2'); -- failed + +RESET ROLE; +select DBE_SCHEDULER.grant_user_authorization('scheduler_user2', 'create external job'); + +SET ROLE scheduler_user2 PASSWORD "scheduler_user2."; +select DBE_SCHEDULER.create_job(job_name=>'job5', program_name=>'program2'); + +RESET ROLE; +select count(*) from adm_scheduler_jobs; +SET ROLE scheduler_user2 PASSWORD "scheduler_user2."; + +select DBE_SCHEDULER.run_job(job_name=>'job4', use_current_session=>false); +select DBE_SCHEDULER.run_job(job_name=>'job5', use_current_session=>true); -- failed + +RESET ROLE; +select DBE_SCHEDULER.enable('job4'); +select enable from pg_job where job_name = 'job4'; +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user2', 'execute any program'); +select enable from pg_job where job_name = 'job4'; + +RESET ROLE; +select DBE_SCHEDULER.drop_job('job1', true); +select DBE_SCHEDULER.drop_job('job2', true); +select DBE_SCHEDULER.drop_job('job3', true); +select DBE_SCHEDULER.drop_job('job4', true); +select DBE_SCHEDULER.drop_job('job5', true); +select DBE_SCHEDULER.drop_program('program1', true); +select DBE_SCHEDULER.drop_program('program2', true); +select DBE_SCHEDULER.drop_schedule('schedule1', true); +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'create job'); +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'create external job'); +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'run external job'); +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'execute any program'); +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user2', 'create job'); +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user2', 'create external job'); +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user2', 'run external job'); +select DBE_SCHEDULER.revoke_user_authorization('scheduler_user1', 'execute any program'); + +-- check object cleanups -- +select DBE_SCHEDULER.grant_user_authorization('scheduler_user1', 'create job'); +select DBE_SCHEDULER.grant_user_authorization('scheduler_user2', 'execute any program'); +drop user scheduler_user1; +drop user scheduler_user2; +select attribute_name, attribute_value from gs_job_attribute; -- empty \ No newline at end of file diff --git a/src/test/regress/sql/dbe_scheduler_rename_user.sql b/src/test/regress/sql/dbe_scheduler_rename_user.sql new file mode 100644 index 000000000..85e735261 --- /dev/null +++ b/src/test/regress/sql/dbe_scheduler_rename_user.sql @@ -0,0 +1,28 @@ +-- create users +create user scheduler_user password 'scheduler_user@123.'; + +-- grant +select DBE_SCHEDULER.grant_user_authorization('scheduler_user', 'create job'); + +-- switch role/user and execute job +set role scheduler_user password "scheduler_user@123."; +create table my_tbl_01(tms date, phone text); +select DBE_SCHEDULER.create_job(job_name=>'job_01', job_type=>'PLSQL_BLOCK', job_action=>'insert into my_tbl_01 values (sysdate::date, 13001230123);', start_date=>sysdate, repeat_interval=>'FREQ=MINUTELY;INTERVAL=1', end_date=>sysdate+1,enabled=>true, auto_drop=>false); +select DBE_SCHEDULER.run_job('job_01', false); +select count(*) from pg_job where log_user = 'scheduler_user' and nspname = 'scheduler_user'; +select count(*) from pg_job where log_user = priv_user; +select count(*) from pg_job where job_name = 'job_01'; + +-- alter and rename pg_job user +reset role; +alter user scheduler_user rename to scheduler_new_user; + +-- switch new role/user to execute job +set role scheduler_new_user password "scheduler_user@123."; +select count(*) from pg_job where log_user = 'scheduler_new_user' and nspname = 'scheduler_new_user'; +select count(*) from pg_job where log_user != priv_user; + +-- return and stop job +reset role; +select DBE_SCHEDULER.drop_job('job_01', true); +select count(*) from pg_job where job_name = 'job_01'; \ No newline at end of file diff --git a/src/test/regress/sql/deferrable.sql b/src/test/regress/sql/deferrable.sql new file mode 100644 index 000000000..15fc7b0a7 --- /dev/null +++ b/src/test/regress/sql/deferrable.sql @@ -0,0 +1,184 @@ +DROP SCHEMA test_deferrable CASCADE; +CREATE SCHEMA test_deferrable; +SET CURRENT_SCHEMA TO test_deferrable; + +-- partition table for deferrable +drop table t_kenyon; +create table t_kenyon(id int primary key deferrable) +partition by range(id) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (6000) +)ENABLE ROW MOVEMENT; +insert into t_kenyon values(1); +begin; +set constraints all deferred; +insert into t_kenyon values(1); +end; + +begin; +set constraints all IMMEDIATE; +insert into t_kenyon values(1); +end; + +drop table t_kenyon; +create table t_kenyon(id int primary key not deferrable ) +partition by range(id) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (6000) +)ENABLE ROW MOVEMENT; +insert into t_kenyon values(1); +begin; +set constraints all deferred; +insert into t_kenyon values(1); +end; + +begin; +set constraints all IMMEDIATE; +insert into t_kenyon values(1); +end; + +drop table t_kenyon; +create table t_kenyon(id int primary key initially immediate ) +partition by range(id) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (6000) +)ENABLE ROW MOVEMENT; +insert into t_kenyon values(1); +begin; +set constraints all deferred; +insert into t_kenyon values(1); +end; + +begin; +set constraints all IMMEDIATE; +insert into t_kenyon values(1); +end; + +drop table t_kenyon; +create table t_kenyon(id int primary key initially deferred) +partition by range(id) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (6000) +)ENABLE ROW MOVEMENT; +insert into t_kenyon values(1); +begin; +set constraints all deferred; +insert into t_kenyon values(1); +end; + +begin; +set constraints all IMMEDIATE; +insert into t_kenyon values(1); +end; + + +-- foreign key for deferrable +drop table warehouse_t23; +drop table city_t23; +CREATE TABLE city_t23 +( + W_CITY VARCHAR(60) PRIMARY KEY, + W_ADDRESS TEXT +); +CREATE TABLE warehouse_t23 +( + W_INT int, + W_CITY VARCHAR(60) , + FOREIGN KEY(W_CITY) REFERENCES city_t23(W_CITY) deferrable +); +begin; +set constraints all deferred; +insert into warehouse_t23 values(1,'sss'); +end; + +begin; +set constraints all IMMEDIATE; +insert into warehouse_t23 values(1,'sss'); +end; + +drop table warehouse_t23; +drop table city_t23; +CREATE TABLE city_t23 +( + W_CITY VARCHAR(60) PRIMARY KEY, + W_ADDRESS TEXT +); +CREATE TABLE warehouse_t23 +( + W_INT int, + W_CITY VARCHAR(60) , + FOREIGN KEY(W_CITY) REFERENCES city_t23(W_CITY) not deferrable +); +begin; +set constraints all deferred; +insert into warehouse_t23 values(1,'sss'); +end; + +begin; +set constraints all IMMEDIATE; +insert into warehouse_t23 values(1,'sss'); +end; + +drop table warehouse_t23; +drop table city_t23; +CREATE TABLE city_t23 +( + W_CITY VARCHAR(60) PRIMARY KEY, + W_ADDRESS TEXT +); +CREATE TABLE warehouse_t23 +( + W_INT int, + W_CITY VARCHAR(60) , + FOREIGN KEY(W_CITY) REFERENCES city_t23(W_CITY) initially immediate +); +begin; +set constraints all deferred; +insert into warehouse_t23 values(1,'sss'); +end; + +begin; +set constraints all IMMEDIATE; +insert into warehouse_t23 values(1,'sss'); +end; + +drop table warehouse_t23; +drop table city_t23; +CREATE TABLE city_t23 +( + W_CITY VARCHAR(60) PRIMARY KEY, + W_ADDRESS TEXT +); +CREATE TABLE warehouse_t23 +( + W_INT int, + W_CITY VARCHAR(60) , + FOREIGN KEY(W_CITY) REFERENCES city_t23(W_CITY) initially deferred +); +begin; +set constraints all deferred; +insert into warehouse_t23 values(1,'sss'); +end; + +begin; +set constraints all IMMEDIATE; +insert into warehouse_t23 values(1,'sss'); +end; + +DROP SCHEMA test_deferrable CASCADE; diff --git a/src/test/regress/sql/dfs_orc_vec_nestloop.sql b/src/test/regress/sql/dfs_orc_vec_nestloop.sql index 8652e8db3..6ad0631e9 100644 --- a/src/test/regress/sql/dfs_orc_vec_nestloop.sql +++ b/src/test/regress/sql/dfs_orc_vec_nestloop.sql @@ -355,9 +355,6 @@ select count(*) from vector_nestloop_table_01 A where A.col_int not in (select B select A.col_int as c1, A.col_char as c2, A.col_vchar from vector_nestloop_table_01 A inner join vector_nestloop_table_01 B on A.col_int = B.col_int where A.col_int=1 and A.col_char = 'test_char_1' and B.col_char = 'test_char_1' order by 1, 2, 3; select count(*) from vector_nestloop_table_01 A inner join vector_nestloop_table_01 B on A.col_int = B.col_int where A.col_int=1 and A.col_char = 'test_char_1' and B.col_char = 'test_char_1'; ----- ---- Special Case: nestloop + hashjoin + operator with parameters pushed down (dts2014111302175/2014120306303) ----- CREATE INDEX vecvtor_nestloop_base_index_01 ON VECTOR_NESTLOOP_TABLE_05 USING psort (id) LOCAL(PARTITION b1_p1_id_idx, PARTITION b1_p2_id_idx, PARTITION b1_p3_id_idx) ; CREATE INDEX vecvtor_nestloop_base_index_02 ON VECTOR_NESTLOOP_TABLE_06 USING psort (id, c_d_id, c_id) LOCAL(PARTITION b5_p1_id_c_d_id_c_id_idx, PARTITION b5_p2_id_c_d_id_c_id_idx, PARTITION b5_p3_id_c_d_id_c_id_idx, PARTITION b5_p4_id_c_d_id_c_id_idx, PARTITION b5_p5_id_c_d_id_c_id_idx, PARTITION b5_p6_id_c_d_id_c_id_idx) ; CREATE INDEX vecvtor_nestloop_base_index_03 ON VECTOR_NESTLOOP_TABLE_07 USING psort (id, c_d_id, c_w_id) LOCAL(PARTITION b7_p1_id_c_d_id_c_w_id_idx, PARTITION b7_p2_id_c_d_id_c_w_id_idx, PARTITION b7_p3_id_c_d_id_c_w_id_idx, PARTITION b7_p4_id_c_d_id_c_w_id_idx, PARTITION b7_p5_id_c_d_id_c_w_id_idx, PARTITION b7_p6_id_c_d_id_c_w_id_idx, PARTITION b7_p7_id_c_d_id_c_w_id_idx, PARTITION b7_p8_id_c_d_id_c_w_id_idx, PARTITION b7_p9_id_c_d_id_c_w_id_idx, PARTITION b7_p10_id_c_d_id_c_w_id_idx, PARTITION b7_p11_id_c_d_id_c_w_id_idx) ; diff --git a/src/test/regress/sql/forall_save_exceptions.sql b/src/test/regress/sql/forall_save_exceptions.sql new file mode 100644 index 000000000..95bd85eb0 --- /dev/null +++ b/src/test/regress/sql/forall_save_exceptions.sql @@ -0,0 +1,981 @@ +create schema forall_save_exceptions; + +set search_path = forall_save_exceptions; + +CREATE TABLE if not exists test_forall(a char(10)); + +---- +-- test pragma exception_init +---- +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -1); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ + +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -15); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -24381); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + RAISE ex_dml_errors; +EXCEPTION + WHEN ex_dml_errors THEN + RAISE NOTICE 'test:%',SQLcode; +END; +/ + +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -1); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -2); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -3); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -4); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -5); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -6); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -7); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -8); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -9); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -10); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -9); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -8); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -7); + PRAGMA EXCEPTION_INIT(ex_dml_errors, -6); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + RAISE ex_dml_errors; +EXCEPTION + WHEN ex_dml_errors THEN + RAISE NOTICE 'test:%',SQLcode; +END; +/ + +-- only numeric initialization +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, raise_exception); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ + +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 'aaa'); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ + +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, '1.1'); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ + +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 1.1); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ + +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, '-1'); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ + +-- expresion not supported, sqlcode must <= 0, and must be int32 (the range is -2147483647~-1) +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 1-2); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ + +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 0); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ + +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 1); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ + +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 15); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ + +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -2147483648); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ + +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -2147483647); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ + +--exception init with not declared +DECLARE + l_error_count integer; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -1); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + +END; +/ + +--exception init with system error +DECLARE + x integer := 0; + y integer; + PRAGMA EXCEPTION_INIT(division_by_zero, -1); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + y := x / 0; +EXCEPTION + WHEN division_by_zero THEN + RAISE NOTICE 'test:%',SQLstate; +END; +/ + +DECLARE + x integer := 0; + y integer; + PRAGMA EXCEPTION_INIT(division_by_zero, -1); + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + y := x / 0; +EXCEPTION + WHEN division_by_zero THEN + RAISE NOTICE 'test:%',SQLstate; +END; +/ + +---- +-- support exception when custum exception +---- +DECLARE + ex_dml_errors EXCEPTION; +BEGIN + INSERT INTO test VALUES (1); +EXCEPTION + WHEN ex_dml_errors THEN + RAISE NOTICE 'DML error occurs'; + WHEN others THEN + RAISE NOTICE 'Other error occurs'; +END; +/ + +DECLARE + DEADLOCK_DETECTED EXCEPTION; + PRAGMA EXCEPTION_INIT(DEADLOCK_DETECTED, -60); +BEGIN + IF 1 > 0 THEN + RAISE DEADLOCK_DETECTED; + END IF; +EXCEPTION + WHEN DEADLOCK_DETECTED THEN + RAISE NOTICE 'test:%',SQLcode; +END; +/ + +---- +-- when exception init, SQLstate is the same as SQLcode +---- +DECLARE + DEADLOCK_DETECTED EXCEPTION; + PRAGMA EXCEPTION_INIT(DEADLOCK_DETECTED, -60); +BEGIN + IF 1 > 0 THEN + RAISE DEADLOCK_DETECTED; + END IF; +EXCEPTION + WHEN DEADLOCK_DETECTED THEN + RAISE NOTICE 'test:%',SQLstate; +END; +/ + +--the sqlerrm is " xxx: non-GaussDB Exception", xxx is |sqlcode| +DECLARE + DEADLOCK_DETECTED EXCEPTION; + PRAGMA EXCEPTION_INIT(DEADLOCK_DETECTED, -60); +BEGIN + IF 1 > 0 THEN + RAISE DEADLOCK_DETECTED; + END IF; +EXCEPTION + WHEN DEADLOCK_DETECTED THEN + RAISE NOTICE 'test:%',SQLerrm; +END; +/ + +-- when not init, SQLcode is generated default with type int, and SQLstate is a text +DECLARE + DEADLOCK_DETECTED EXCEPTION; +BEGIN + IF 1 > 0 THEN + RAISE DEADLOCK_DETECTED; + END IF; +EXCEPTION + WHEN DEADLOCK_DETECTED THEN + RAISE NOTICE 'test:%',SQLcode; +END; +/ + +DECLARE + DEADLOCK_DETECTED EXCEPTION; +BEGIN + IF 1 > 0 THEN + RAISE DEADLOCK_DETECTED; + END IF; +EXCEPTION + WHEN DEADLOCK_DETECTED THEN + RAISE NOTICE 'test:%',SQLstate; +END; +/ + +---- +-- forall support save exceptions grammar +---- + +-- function +CREATE OR REPLACE FUNCTION test_func(iter IN integer) RETURN integer as +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -24381); +BEGIN + FORALL i IN 1 .. 2 SAVE EXCEPTIONS + INSERT INTO test_forall + VALUES (1); +EXCEPTION + WHEN ex_dml_errors THEN + l_error_count := SQL%BULK_EXCEPTIONS.count; + DBe_OUTPUT.print_line('Number of failures: ' || l_error_count); + FOR i IN 1 .. l_error_count LOOP + DBE_OUTPUT.print_line('Error: ' || i || + ' Array Index: ' || SQL%BULK_EXCEPTIONS(i).error_index || + ' Message: ' || SQL%BULK_EXCEPTIONS(i).error_message); + END LOOP; + RETURN 0; +END; +/ + +-- procedure +CREATE OR REPLACE PROCEDURE test_proc(iter IN integer) as +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, 24381); +BEGIN + FORALL i IN 1 .. 2 SAVE EXCEPTIONS + INSERT INTO test_forall + VALUES (1); +EXCEPTION + WHEN ex_dml_errors THEN + l_error_count := SQL%BULK_EXCEPTIONS.count; + DBe_OUTPUT.print_line('Number of failures: ' || l_error_count); + FOR i IN 1 .. l_error_count LOOP + DBE_OUTPUT.print_line('Error: ' || i || + ' Array Index: ' || SQL%BULK_EXCEPTIONS(i).error_index || + ' Message: ' || SQL%BULK_EXCEPTIONS(i).error_message); + END LOOP; +END; +/ + +-- anonimous block +DECLARE + l_error_count integer; + ex_dml_errors EXCEPTION; + PRAGMA EXCEPTION_INIT(ex_dml_errors, -24381); +BEGIN + FORALL i IN 1 .. 10 + SAVE EXCEPTIONS + INSERT INTO test_forall + VALUES (1); +EXCEPTION + WHEN ex_dml_errors THEN + l_error_count := SQL%BULK_EXCEPTIONS.count; + DBe_OUTPUT.print_line('Number of failures: ' || l_error_count); + FOR i IN 1 .. l_error_count LOOP + DBe_OUTPUT.print_line('Error: ' || i || + ' Array Index: ' || SQL%BULK_EXCEPTIONS(i).error_index || + ' Message: ' || SQL%BULK_EXCEPTIONS(i).error_message); + END LOOP; +END; +/ + +-- test functionality +CREATE TABLE exception_test ( +id NUMBER(10) NOT NULL UNIQUE +); + +DECLARE + TYPE t_tab IS TABLE OF exception_test%ROWTYPE; + + l_tab t_tab := t_tab(); + l_error_count NUMBER; +BEGIN + -- Fill the collection. + FOR i IN 1 .. 100 LOOP + l_tab.extend; + l_tab(l_tab.last).id := i; + END LOOP; + + -- Cause a failure. + l_tab(50).id := NULL; + l_tab(51).id := NULL; + l_tab(60).id := 59; + + EXECUTE IMMEDIATE 'TRUNCATE TABLE exception_test'; + + BEGIN + FORALL i IN l_tab.first .. l_tab.last SAVE EXCEPTIONS + INSERT INTO exception_test + VALUES l_tab(i); + EXCEPTION + WHEN others THEN + l_error_count := SQL%BULK_EXCEPTIONS.count; + DBE_OUTPUT.print_line('Number of failures: ' || l_error_count); + FOR i IN 1 .. l_error_count LOOP + DBE_OUTPUT.print_line('Error: ' || i || + ' Array Index: ' || SQL%BULK_EXCEPTIONS(i).error_index || + ' Error Code: ' || -SQL%BULK_EXCEPTIONS(i).ERROR_CODE || + ' Error Message: ' || SQL%BULK_EXCEPTIONS(i).ERROR_MESSAGE); + END LOOP; + END; + + -- Cause a failure. To test leftovers in bulk exceptions + l_tab(50).id := 50; + l_tab(51).id := 51; + l_tab(60).id := 60; + l_tab(70).id := 69; + + EXECUTE IMMEDIATE 'TRUNCATE TABLE exception_test'; + + BEGIN + FORALL i IN l_tab.first .. l_tab.last SAVE EXCEPTIONS + INSERT INTO exception_test + VALUES l_tab(i); + EXCEPTION + WHEN others THEN + l_error_count := SQL%BULK_EXCEPTIONS.count; + DBE_OUTPUT.print_line('Number of failures: ' || l_error_count); + FOR i IN 1 .. l_error_count LOOP + DBE_OUTPUT.print_line('Error: ' || i || + ' Array Index: ' || SQL%BULK_EXCEPTIONS(i).error_index || + ' Error Code: ' || -SQL%BULK_EXCEPTIONS(i).ERROR_CODE || + ' Error Message: ' || SQL%BULK_EXCEPTIONS(i).ERROR_MESSAGE); + END LOOP; + END; +END; +/ + +-- try in function +CREATE OR REPLACE FUNCTION func_test_forall() RETURNS int AS +$BODY$ +DECLARE + TYPE t_tab IS TABLE OF exception_test%ROWTYPE; + + l_tab t_tab := t_tab(); + l_error_count NUMBER; +BEGIN + -- Fill the collection. + FOR i IN 1 .. 20 LOOP + l_tab.extend; + l_tab(l_tab.last).id := i; + END LOOP; + + -- Cause a failure. + l_tab(10).id := 9; + + EXECUTE IMMEDIATE 'TRUNCATE TABLE exception_test'; + + BEGIN + FORALL i IN l_tab.first .. l_tab.last SAVE EXCEPTIONS + INSERT INTO exception_test + VALUES l_tab(i); + EXCEPTION + WHEN others THEN + l_error_count := SQL%BULK_EXCEPTIONS.count; + DBE_OUTPUT.print_line('Number of failures: ' || l_error_count); + FOR i IN 1 .. l_error_count LOOP + DBE_OUTPUT.print_line('Error: ' || i || + ' Array Index: ' || SQL%BULK_EXCEPTIONS(i).error_index || + ' Error Code: ' || -SQL%BULK_EXCEPTIONS(i).ERROR_CODE || + ' Error Message: ' || SQL%BULK_EXCEPTIONS(i).ERROR_MESSAGE); + END LOOP; + END; + + return 0; +END; +$BODY$ +LANGUAGE plpgsql; + +select * from func_test_forall(); + +-- for all should only be followed by DMLs +DECLARE + l_error_count integer; + l_sql varchar(1024); +BEGIN + l_sql :='INSERT INTO test_forall VALUES (1);'; + FORALL i IN 1 .. 2 SAVE EXCEPTIONS + execute immediate l_sql using l_tab(i); +END; +/ + +DECLARE + l_error_count integer; + l_sql varchar(1024); +BEGIN + l_sql :='INSERT INTO test_forall VALUES (1);'; + FORALL i IN 1 .. 2 + execute immediate l_sql using l_tab(i); +END; +/ + +truncate test_forall; + +DECLARE + l_sql varchar(1024); + target int; +BEGIN + FORALL i IN 1 .. 2 --merge OK + merge into test_forall using (SELECT 1 a) src on test_forall.a = src.a WHEN NOT MATCHED THEN INSERT VALUES (src.a); + FORALL i IN 1 .. 2 --select OK + SELECT a into target from test_forall; + FORALL i IN 1 .. 2 --insert OK + INSERT INTO test_forall VALUES (1); + FORALL i IN 1 .. 2 --update OK + UPDATE test_forall SET a = 2; + FORALL i IN 1 .. 2 --delete OK + DELETE FROM test_forall; + FORALL i IN 1 .. 2 SAVE EXCEPTIONS --merge OK + merge into test_forall using (SELECT 1 a) src on test_forall.a = src.a WHEN NOT MATCHED THEN INSERT VALUES (src.a); + FORALL i IN 1 .. 2 SAVE EXCEPTIONS --select OK + SELECT a into target from test_forall; + FORALL i IN 1 .. 2 SAVE EXCEPTIONS --insert OK + INSERT INTO test_forall VALUES (1); + FORALL i IN 1 .. 2 SAVE EXCEPTIONS --update OK + UPDATE test_forall SET a = 2; + FORALL i IN 1 .. 2 SAVE EXCEPTIONS --delete OK + DELETE FROM test_forall; +END; +/ + +create table test_conflict(last_modified timestamp, comment text); +insert into test_conflict values(now(), 'donda'); + +-- default (equivalent to use_column under a compatibility) +CREATE or replace FUNCTION conf_func(id int, comment text) RETURNS text AS $$ + DECLARE + curtime timestamp := '2021-09-15 20:59:14'; + var text; + BEGIN + select comment into var from test_conflict; + return var; + END; +$$ LANGUAGE plpgsql; + +select * from conf_func(1,'off-season'); + +-- error +CREATE or replace FUNCTION conf_func(id int, comment text) RETURNS text AS $$ + #variable_conflict error + DECLARE + curtime timestamp := '2021-09-15 20:59:14'; + var text; + BEGIN + select comment into var from test_conflict; + return var; + END; +$$ LANGUAGE plpgsql; + +select * from conf_func(1,'off-season'); + +-- use_column +CREATE or replace FUNCTION conf_func(id int, comment text) RETURNS text AS $$ + #variable_conflict use_column + DECLARE + curtime timestamp := '2021-09-15 20:59:14'; + var text; + BEGIN + select comment into var from test_conflict; + return var; + END; +$$ LANGUAGE plpgsql; + +select * from conf_func(1,'off-season'); + +-- use_variable +CREATE or replace FUNCTION conf_func(id int, comment text) RETURNS text AS $$ + #variable_conflict use_variable + DECLARE + curtime timestamp := '2021-09-15 20:59:14'; + var text; + BEGIN + select comment into var from test_conflict; + return var; + END; +$$ LANGUAGE plpgsql; + +select * from conf_func(1,'off-season'); + +-- test original case +create table test_orig(c1 int,c2 int); +insert into test_orig values(1,2); +create or replace procedure pro_tblof_pro_004(c1 in number,c2 out number) +as + type ARRAY_INTEGER is table of int; + tblof001 ARRAY_INTEGER := ARRAY_INTEGER(); +begin + tblof001.extend(10); + tblof001(1) :=1; + c2 :=tblof001(1); + select c2 into tblof001(2) from test_orig; + DBE_OUTPUT.PRINT_LINE(tblof001(tblof001.FIRST)); + DBE_OUTPUT.PRINT_LINE('tblof001.last is '||tblof001.last); + DBE_OUTPUT.PRINT_LINE('tblof001.2 is '||tblof001(2)); + DBE_OUTPUT.PRINT_LINE('tblof001.3 is '||c2); +end; +/ + +declare +a number; +begin + pro_tblof_pro_004(1,a); + DBE_OUTPUT.PRINT_LINE(a); +end; +/ + +-- test nested forall save exceptions +drop type if exists type01; +drop table if exists t_06; +create table t_06(c1 numeric(6,0),c2 date,c3 char(4)); +drop table if exists t_07; +create table t_07(c1 numeric(8,0), c2 date, c3 char(10) not null); +create type type01 is table of t_07%rowtype; + +create or replace procedure p1(l_error_count out number) +as +l_error_count number:=0; +type02 type01; +begin +truncate table t_07; +for i in 1..10 loop +type02(i).c1=5000+i; +type02(i).c2='20210929'; +type02(i).c3=i||'id'; +end loop; +type02(2).c3=null; +forall i in 1..type02.count save exceptions +insert into t_07 +values type02(i); +exception + when forall_dml_error then + l_error_count := sql%bulk_exceptions.count; + dbe_output.print_line('number of failures: ' || l_error_count); + for i in 1 .. l_error_count loop + dbe_output.print_line('error: ' || i || + ' array index: ' || sql%bulk_exceptions[i].error_index || + ' messagecode: ' || sql%bulk_exceptions[i].error_code || + ' errormessage: ' || sql%bulk_exceptions[i].error_message); + end loop; + l_error_count := -(sql%bulk_exceptions.count)+type02.count; + dbe_output.print_line('successfully inserted: ' || l_error_count ||'rows'); + +end; +/ + +call p1(1); + +create or replace procedure p2(l_error_count out number) +as +l_error_count number:=0; +type02 type01; + +begin +for i in 1..10 loop +type02(i).c1=5001+i; +type02(i).c2='20210930'; +type02(i).c3=i||'id'; +end loop; +type02(1).c3='a12345'; +truncate table t_06; +insert into t_06(c1,c2,c3) select * from t_07; +forall i in 1..type02.count +update t_06 set c1=(select p1()), + c2=type02(i).c2 + where c1=type02(i).c1; + +exception + when forall_dml_error then + l_error_count := sql%bulk_exceptions.count; + dbe_output.print_line('number of failures: ' || l_error_count); + for i in 1 .. l_error_count loop + dbe_output.print_line('error: ' || i || + ' array index: ' || sql%bulk_exceptions[i].error_index || + ' messagecode: ' || sql%bulk_exceptions[i].error_code || + ' errormessage: ' || sql%bulk_exceptions[i].error_message); + end loop; + l_error_count := -(sql%bulk_exceptions.count)+type02.count; + dbe_output.print_line('successfully inserted: ' || l_error_count ||'rows'); + +end; +/ + +select p2(); + +create or replace procedure p2(l_error_count out number) +as +l_error_count number:=0; +type02 type01; + +begin +for i in 1..10 loop +type02(i).c1=5001+i; +type02(i).c2='20210930'; +type02(i).c3=i||'id'; +end loop; +type02(1).c3='a12345'; +truncate table t_06; +insert into t_06(c1,c2,c3) select * from t_07; +forall i in 1..type02.count save exceptions +update t_06 set c1=(select p1()), + c2=type02(i).c2 + where c1=type02(i).c1; + +exception + when forall_dml_error then + l_error_count := sql%bulk_exceptions.count; + dbe_output.print_line('number of failures: ' || l_error_count); + for i in 1 .. l_error_count loop + dbe_output.print_line('error: ' || i || + ' array index: ' || sql%bulk_exceptions[i].error_index || + ' messagecode: ' || sql%bulk_exceptions[i].error_code || + ' errormessage: ' || sql%bulk_exceptions[i].error_message); + end loop; + l_error_count := -(sql%bulk_exceptions.count)+type02.count; + dbe_output.print_line('successfully inserted: ' || l_error_count ||'rows'); + +end; +/ + +select p2(); + +--test with implicit_savepoint +drop table if exists tab_01; +create table tab_01( +c1 varchar2(6), +c2 varchar2(8) not null, +c3 number(9,1) default 10+238/5*3, +c4 varchar2(2), +c5 timestamp(6) check(c4 between 1 and 30), +constraint pk_tab01_c1 primary key(c1) +); + +drop table if exists tab_02; +create table tab_02( +c1 varchar2(10), +c2 varchar2(10) not null, +c3 number(15,0), +c4 varchar2(5), +c5 timestamp(6), +constraint pk_tab02_c1 primary key(c1) +); + +create or replace procedure save_exps_insert02(v1 in numeric) +as +l_error_count number; +type tab_02_type is table of tab_02%rowtype; +tab_02_01 tab_02_type; + +begin + + execute immediate 'truncate table tab_01'; + + for i in 1..50 loop + tab_02_01(i).c1:=i; + tab_02_01(i).c2:='001'||i; + tab_02_01(i).c4:=i; + tab_02_01(i).c5:='2010-12-12'; + end loop; + + tab_02_01(10).c1 := 9; + tab_02_01(8).c1 := 7; + tab_02_01(6).c1 := null; + tab_02_01(11).c2 := null; + tab_02_01(1).c3:=2; + tab_02_01(2).c3:=1234567890; + + forall i in 1..tab_02_01.count save exceptions + insert into tab_01(c1,c2,c3,c4,c5) + values (decode(tab_02_01(i).c1,'49','490',tab_02_01(i).c1), + 'p'||tab_02_01(i).c2, + tab_02_01(i).c3, + case when tab_02_01(i).c1=50 then substr(tab_02_01(i).c4,1,1) + when tab_02_01(i).c1=49 then substr(tab_02_01(i).c4,2,1) + else tab_02_01(i).c4 end , + tab_02_01(i).c5 ); + + exception + when forall_dml_error then + l_error_count := sql%bulk_exceptions.count; + dbe_output.print_line('number of failures: ' || l_error_count); + for i in 1 .. l_error_count loop + dbe_output.print_line('error: ' || i || + ' array index: ' || sql%bulk_exceptions[i].error_index || + ' messagecode: ' || sql%bulk_exceptions[i].error_code || + ' errormessage: ' || sql%bulk_exceptions[i].error_message); + end loop; + for i in 1..sql%bulk_exceptions.count loop + if -sql%bulk_exceptions[i].error_code=-33575106 then + insert into tab_01 values(100+tab_02_01(i).c1, + tab_02_01(i).c2, + v1, + tab_02_01(i).c4, + tab_02_01(i).c5); + end if; + end loop; + when others then + rollback ; + raise; +end; +/ + +set behavior_compat_options='plstmt_implicit_savepoint'; +call save_exps_insert02(9); + +---- +-- test decode coercion +---- +show sql_compatibility; + +create table test_decode_coercion( + col_bool bool, + col_sint int2, + col_int int, + col_bigint bigint, + col_char char(10), + col_bpchar bpchar, + col_varchar varchar, + col_text text, + col_date date, + col_time timestamp +); + +COPY test_decode_coercion(col_bool, col_sint, col_int, col_bigint, col_char, col_bpchar, col_varchar, col_text, col_date, col_time) FROM stdin; +f 1 0 256 11 111 1111 123456 2000-01-01 01:01:01 2000-01-01 01:01:01 +\. + +-- case 1. coerce first argument to second argument's unknown type +set sql_beta_feature = 'none'; + +select decode(2,'ff3',5,2); -- to be supported + +select case when 2 = 'ff3' then 5 else 2 end; -- to be supported + +-- valid coercions +select + decode(col_char, 'arbitrary', 1, 2), + decode(col_bpchar, 'arbitrary', 1, 2), + decode(col_varchar, 'arbitrary', 1, 2), + decode(col_text, 'arbitrary', 1, 2), + decode(col_date, '2021-09-17', 1, 2), + decode(col_time, '2000-01-01 01:01:01', 1, 2) +from test_decode_coercion; + +-- to be supported ones +select + decode(col_sint, 'arbitrary', 1, 2), + decode(col_int, 'arbitrary', 1, 2), + decode(col_bigint, 'arbitrary', 1, 2) +from test_decode_coercion; + +-- invalid +select + decode(col_bool, 'arbitrary', 1, 2) +from test_decode_coercion; + +-- invalid +select + decode(col_date, 'arbitrary', 1, 2) +from test_decode_coercion; + +-- invalid +select + decode(col_time, 'arbitrary', 1, 2) +from test_decode_coercion; + +set sql_beta_feature = 'a_style_coerce'; + +select decode(2,'ff3',5,2); -- now ok + +select case when 2 = 'ff3' then 5 else 2 end; -- now ok + +-- still valid +select + decode(col_char, 'arbitrary', 1, 2), + decode(col_bpchar, 'arbitrary', 1, 2), + decode(col_varchar, 'arbitrary', 1, 2), + decode(col_text, 'arbitrary', 1, 2), + decode(col_date, '2021-09-17', 1, 2), + decode(col_time, '2000-01-01 01:01:01', 1, 2) +from test_decode_coercion; + +-- now supported +select + decode(col_sint, 'arbitrary', 1, 2), + decode(col_int, 'arbitrary', 1, 2), + decode(col_bigint, 'arbitrary', 1, 2) +from test_decode_coercion; + +-- still fail +select + decode(col_bool, 'arbitrary', 1, 2) +from test_decode_coercion; + +-- still fail +select + decode(col_date, 'arbitrary', 1, 2) +from test_decode_coercion; + +-- still fail +select + decode(col_time, 'arbitrary', 1, 2) +from test_decode_coercion; + +-- case 2. decode case results need coercion +set sql_beta_feature = 'none'; + +select decode(2,3,'r',2); -- to be supported + +select case when 2 = 3 then 'r' else 2 end; -- to be supported + +-- valid coercions +select + decode(1, 2, 'never', col_char), + decode(1, 2, 'never', col_bpchar), + decode(1, 2, 'never', col_varchar), + decode(1, 2, 'never', col_text), + decode(1, 2, '2021-09-17', col_date), + decode(1, 2, '2000-01-01 01:01:01', col_time) +from test_decode_coercion; + +-- to be supported +select + decode(1, 2, 'never', col_sint), + decode(1, 2, 'never', col_int), + decode(1, 2, 'never', col_bigint) +from test_decode_coercion; + +-- invalid +select + decode(1, 2, 'never', col_bool) +from test_decode_coercion; + +-- invalid +select + decode(1, 2, 'never', col_date) +from test_decode_coercion; + +-- invalid +select + decode(1, 2, 'never', col_time) +from test_decode_coercion; + +set sql_beta_feature = 'a_style_coerce'; + +select decode(2,3,'r',2); -- now ok + +select case when 2 = 3 then 'r' else 2 end; -- now ok + +-- still valid +select + decode(1, 2, 'never', col_char), + decode(1, 2, 'never', col_bpchar), + decode(1, 2, 'never', col_varchar), + decode(1, 2, 'never', col_text) +from test_decode_coercion; + +-- now supported +select + decode(1, 2, 'never', col_sint), + decode(1, 2, 'never', col_int), + decode(1, 2, 'never', col_bigint) +from test_decode_coercion; + +-- still invalid +select + decode(1, 2, 'never', col_bool) +from test_decode_coercion; + +-- still invalid +select + decode(1, 2, 'never', col_date) +from test_decode_coercion; + +-- still invalid +select + decode(1, 2, 'never', col_time) +from test_decode_coercion; + +drop schema forall_save_exceptions cascade; + + diff --git a/src/test/regress/sql/force_vector_engine.sql b/src/test/regress/sql/force_vector_engine.sql new file mode 100644 index 000000000..f325b0776 --- /dev/null +++ b/src/test/regress/sql/force_vector_engine.sql @@ -0,0 +1,84 @@ +create schema force_vector_engine; +set current_schema=force_vector_engine; +create table force_vector_test(id int, val int); +insert into force_vector_test values(generate_series(1, 10000), generate_series(1, 1000)); +create table force_vector_test1(id int, val int); +insert into force_vector_test1 select * from force_vector_test; +create index on force_vector_test1(id); +analyze force_vector_test; +analyze force_vector_test1; +create table force_vector_test2(id int, val int) with (orientation=column); +insert into force_vector_test2 select * from force_vector_test; +analyze force_vector_test2; + +create function func_add_sql(a int, b int) +returns int +AS $$ +declare + res int; +begin + select a+b into res; + return res; +end; $$ +LANGUAGE plpgsql; + +set try_vector_engine_strategy='force'; +explain select count(*) from force_vector_test; +select count(*) from force_vector_test; +explain select count(*) from force_vector_test1 where id=2; +select count(*) from force_vector_test1 where id=2; +explain select count(*) from force_vector_test1 where id=2 and val=2; +select count(*) from force_vector_test1 where id=2 and val=2; +set enable_indexscan=off; +explain select count(*) from force_vector_test1 where id=2; +select count(*) from force_vector_test1 where id=2; +explain select count(*) from func_add_sql(1,2); +select count(*) from func_add_sql(1,2); +explain values (1, 'AAAAA', 'read'),(2, 'BBBBB', 'write') order by 1,2,3; +values (1, 'AAAAA', 'read'),(2, 'BBBBB', 'write') order by 1,2,3; +explain select * from force_vector_test where ctid='(0,1)' order by 2; +select * from force_vector_test where ctid='(0,1)' order by 2; +explain select * from force_vector_test t1, force_vector_test2 t2 where t1.id=t2.id order by t1.id limit 10; +select * from force_vector_test t1, force_vector_test2 t2 where t1.id=t2.id order by t1.id limit 10; + +set query_dop=1004; +explain select count(*) from force_vector_test; +select count(*) from force_vector_test; +set query_dop=1; + +create table force_vector_test3(id int, val int) with(storage_type=ustore); +select count(*) from force_vector_test3; +insert into force_vector_test3 select * from force_vector_test; +analyze force_vector_test3; +explain select count(*) from force_vector_test; +select count(*) from force_vector_test; + +create table force_vector_test4(c1 int, c2 double precision, c3 double precision, c4 point); +insert into force_vector_test4(c1, c2, c3) values(20, 2.3, 2.3); +select point(c2, c3) from force_vector_test4 where c1 = 20; +-- Do not use vectorization engine +explain select point(c2, c3) from force_vector_test4 where c1 = 20; + +create table force_vector_test5(id int, name varchar(1000)); +insert into force_vector_test5 values(1, 'apple'); +insert into force_vector_test5 values(2, 'pear'); +insert into force_vector_test5 values(3, 'apple pear'); +-- Using the Vectorization Engine +explain select count(*) from force_vector_test5 where id =1 or to_tsvector('ngram',name)@@to_tsquery('ngram','pear'); +select count(*) from force_vector_test5 where id =1 or to_tsvector('ngram',name)@@to_tsquery('ngram','pear'); + +create table force_vector_test6(a int, b int, c int); +insert into force_vector_test6 values(1,2,3); +alter table force_vector_test6 drop column b; +insert into force_vector_test6 select * from force_vector_test6; + +set try_vector_engine_strategy='off'; +drop table force_vector_test; +drop table force_vector_test1; +drop table force_vector_test2; +drop table force_vector_test3; +drop table force_vector_test4; +drop table force_vector_test5; +drop table force_vector_test6; +drop function func_add_sql; +drop schema force_vector_engine cascade; diff --git a/src/test/regress/sql/force_vector_engine2.sql b/src/test/regress/sql/force_vector_engine2.sql new file mode 100644 index 000000000..d288767d7 --- /dev/null +++ b/src/test/regress/sql/force_vector_engine2.sql @@ -0,0 +1,22 @@ +create schema test_force_vector2; +set current_schema=test_force_vector2; +create table force_vector_test(id int, val1 int, val2 numeric(10,5)); +insert into force_vector_test values(generate_series(1, 10000), generate_series(1, 1000), generate_series(1, 2000)); +analyze force_vector_test; +-- partition table +create table force_vector_partition(id int, val1 int, val2 text) +partition by range(id) ( + partition force_vector_p1 values less than (2001), + partition force_vector_p2 values less than (4001), + partition force_vector_p3 values less than (6001), + partition force_vector_p4 values less than (8001), + partition force_vector_p5 values less than (MAXVALUE) +); +insert into force_vector_partition values(generate_series(1, 10000), generate_series(1, 2000), generate_series(1, 5000)); +analyze force_vector_partition; + +explain (analyze on, timing off) select /*+ set(try_vector_engine_strategy force) */ id, val1*2, val2+val1 as val3 from force_vector_test where id < 5000 and val1 < 500 order by id limit 10; +explain (analyze on, timing off) select /*+ set(try_vector_engine_strategy force) */ id, avg(val1), sum(val2) from force_vector_partition group by id order by id limit 10; + +drop table force_vector_test; +drop schema test_force_vector2 cascade; diff --git a/src/test/regress/sql/function.sql b/src/test/regress/sql/function.sql index 26f777621..853270bd8 100644 --- a/src/test/regress/sql/function.sql +++ b/src/test/regress/sql/function.sql @@ -928,19 +928,6 @@ DROP FUNCTION func_increment_sql_1; DROP FUNCTION func_increment_sql_2; DROP FUNCTION fun_test_1; DROP FUNCTION fun_test_2; -CREATE OR REPLACE PROCEDURE test_spi() -AS DECLARE d_statement VARCHAR2(32767); -BEGIN - d_statement := 'EXPLAIN PLAN FOR select 1'; - BEGIN - EXECUTE IMMEDIATE d_statement; - END; - COMMIT; -END; -/ -select * from test_spi(); -\d+ plan_table -DROP PROCEDURE test_spi; \c regression; drop database IF EXISTS pl_test_funcion; diff --git a/src/test/regress/sql/function_get_table_def.sql b/src/test/regress/sql/function_get_table_def.sql index 8c17d48e0..a4d8e45ac 100644 --- a/src/test/regress/sql/function_get_table_def.sql +++ b/src/test/regress/sql/function_get_table_def.sql @@ -52,6 +52,17 @@ partition by range (id, a) select * from pg_get_tabledef('table_range3'); drop table table_range3; +create table table_range4 (id int primary key, a date, b varchar) +partition by range (id) +( + partition table_range4_p1 start (10) end (40) every (10), + partition table_range4_p2 end (70), + partition table_range4_p3 start (70), + partition table_range4_p4 start (100) end (150) every (20) +); +select * from pg_get_tabledef('table_range4'); +drop table table_range4; + --interval table create table table_interval1 (id int, a date, b varchar) partition by range (a) @@ -86,6 +97,16 @@ partition by list (b) select * from pg_get_tabledef('table_list2'); drop table table_list2; +create table table_list3 (id int primary key, a date, b varchar) +partition by list (b) +( + partition table_list3_p1 values ('1', '2', '3', '4'), + partition table_list3_p2 values ('5', '6', '7', '8'), + partition table_list3_p3 values (default) +); +select * from pg_get_tabledef('table_list3'); +drop table table_list3; + --hash table create table table_hash1 (id int, a date, b varchar) partition by hash (id) @@ -97,6 +118,97 @@ partition by hash (id) select * from pg_get_tabledef('table_hash1'); drop table table_hash1; +--subpartition table +CREATE TABLE list_range_1 ( + col_1 integer primary key, + col_2 integer, + col_3 character varying(30) unique, + col_4 integer +) +WITH (orientation=row, compression=no) +PARTITION BY LIST (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN (-10), + SUBPARTITION p_range_1_2 VALUES LESS THAN (0), + SUBPARTITION p_range_1_3 VALUES LESS THAN (10), + SUBPARTITION p_range_1_4 VALUES LESS THAN (20), + SUBPARTITION p_range_1_5 VALUES LESS THAN (50) + ), + PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10), + PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_range_3_1 VALUES LESS THAN (15), + SUBPARTITION p_range_3_2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_range_4_1 VALUES LESS THAN (-10), + SUBPARTITION p_range_4_2 VALUES LESS THAN (0), + SUBPARTITION p_range_4_3 VALUES LESS THAN (10), + SUBPARTITION p_range_4_4 VALUES LESS THAN (20), + SUBPARTITION p_range_4_5 VALUES LESS THAN (50) + ), + PARTITION p_list_5 VALUES (31,32,33,34,35,36,37,38,39,40), + PARTITION p_list_6 VALUES (41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_range_6_1 VALUES LESS THAN (-10), + SUBPARTITION p_range_6_2 VALUES LESS THAN (0), + SUBPARTITION p_range_6_3 VALUES LESS THAN (10), + SUBPARTITION p_range_6_4 VALUES LESS THAN (20), + SUBPARTITION p_range_6_5 VALUES LESS THAN (50) + ), + PARTITION p_list_7 VALUES (DEFAULT) +); +select * from pg_get_tabledef('list_range_1'); +drop table list_range_1; + +CREATE TABLE list_hash_2 ( + col_1 integer primary key, + col_2 integer, + col_3 character varying(30) unique, + col_4 integer +) +WITH (orientation=row, compression=no) +PARTITION BY LIST (col_2) SUBPARTITION BY HASH (col_3) +( + PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10) + ( + SUBPARTITION p_hash_1_1, + SUBPARTITION p_hash_1_2, + SUBPARTITION p_hash_1_3 + ), + PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10), + PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_hash_3_1, + SUBPARTITION p_hash_3_2 + ), + PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_hash_4_1, + SUBPARTITION p_hash_4_2, + SUBPARTITION p_hash_4_3, + SUBPARTITION p_hash_4_4, + SUBPARTITION p_hash_4_5 + ), + PARTITION p_list_5 VALUES (31,32,33,34,35,36,37,38,39,40), + PARTITION p_list_6 VALUES (41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_hash_6_1, + SUBPARTITION p_hash_6_2, + SUBPARTITION p_hash_6_3, + SUBPARTITION p_hash_6_4, + SUBPARTITION p_hash_6_5 + ), + PARTITION p_list_7 VALUES (DEFAULT) +); +create unique index list_hash_2_idx1 on list_hash_2(col_2, col_3, col_4) local; +create index list_hash_2_idx2 on list_hash_2(col_3, col_1) local; +create index list_hash_2_idx3 on list_hash_2(col_4) global; +select * from pg_get_tabledef('list_hash_2'); +drop table list_hash_2; + reset current_schema; drop schema test_get_table_def cascade; - diff --git a/src/test/regress/sql/gs_basebackup/init/compress_data.sql b/src/test/regress/sql/gs_basebackup/init/compress_data.sql deleted file mode 100644 index 3f6589eba..000000000 --- a/src/test/regress/sql/gs_basebackup/init/compress_data.sql +++ /dev/null @@ -1,4 +0,0 @@ -CREATE TABLE tbl_pc(id int, c1 text) WITH(compresstype=2, compress_chunk_size=512); -create index on tbl_pc(id) WITH (compresstype=2,compress_chunk_size=1024); -INSERT INTO tbl_pc SELECT id, id::text FROM generate_series(1,1000) id; -checkpoint; diff --git a/src/test/regress/sql/gs_basebackup/validate/compress_data.sql b/src/test/regress/sql/gs_basebackup/validate/compress_data.sql deleted file mode 100644 index 3dfe9780f..000000000 --- a/src/test/regress/sql/gs_basebackup/validate/compress_data.sql +++ /dev/null @@ -1,3 +0,0 @@ -select count(*) from tbl_pc; -set enable_seqscan=off; -select count(*) from tbl_pc; diff --git a/src/test/regress/sql/gs_db_privilege.sql b/src/test/regress/sql/gs_db_privilege.sql new file mode 100644 index 000000000..83ce71e84 --- /dev/null +++ b/src/test/regress/sql/gs_db_privilege.sql @@ -0,0 +1,181 @@ +-- prepare +CREATE ROLE db_priv_user PASSWORD '1234567i*'; +CREATE ROLE db_priv_user1 PASSWORD '1234567i*'; +CREATE ROLE db_priv_user2 PASSWORD '1234567i*'; +CREATE ROLE db_priv_user3 PASSWORD '1234567i*'; +CREATE ROLE db_priv_user4 PASSWORD '1234567i*'; +CREATE ROLE db_priv_user5 PASSWORD '1234567i*'; + +-- system relation privilege check +SET ROLE db_priv_user PASSWORD '1234567i*'; +SELECT * FROM gs_db_privilege ORDER BY oid; +SELECT * FROM gs_db_privileges ORDER BY rolename; +SELECT has_any_privilege('db_priv_user','UPDATE ANY TABLE'); + +-- pg_shdepend +RESET ROLE; +CREATE DATABASE db_priv_base; +\c db_priv_base +CREATE ROLE db_priv_user0 PASSWORD '1234567i*'; +CREATE ROLE db_priv_user00 PASSWORD '1234567i*'; + +select d.datname,a.rolname,p.privilege_type from pg_shdepend s join pg_authid a on s.refobjid=a.oid + join pg_database d on s.dbid=d.oid join gs_db_privilege p on s.objid=p.oid; --noting + +GRANT SELECT ANY TABLE,DROP ANY TABLE TO db_priv_user, db_priv_user0, db_priv_user00; +select d.datname,a.rolname,p.privilege_type from pg_shdepend s join pg_authid a on s.refobjid=a.oid + join pg_database d on s.dbid=d.oid join gs_db_privilege p on s.objid=p.oid order by a.rolname,p.privilege_type; --6 lines + +DROP USER db_priv_user00; +DROP USER db_priv_user00 CASCADE; + +DROP USER db_priv_user0; +REVOKE SELECT ANY TABLE FROM db_priv_user0; +DROP USER db_priv_user0; +REVOKE DROP ANY TABLE FROM db_priv_user0; +DROP USER db_priv_user0; + +\c postgres +GRANT SELECT ANY TABLE TO db_priv_user; +select d.datname,a.rolname,p.privilege_type from pg_shdepend s join pg_authid a on s.refobjid=a.oid + join pg_database d on s.dbid=d.oid join gs_db_privilege p on s.objid=p.oid; --1 line + +DROP USER db_priv_user CASCADE; + +\c db_priv_base +DROP USER db_priv_user CASCADE; +REVOKE SELECT ANY TABLE,DROP ANY TABLE FROM db_priv_user; +DROP USER db_priv_user CASCADE; + +\c postgres +DROP USER db_priv_user; +DROP USER db_priv_user CASCADE; + +\c regression +DROP DATABASE db_priv_base; + +--syntax and gs_db_privilege +RESET ROLE; +GRANT SELECT ANY TABLES TO db_priv_user3; --failed +REVOKE SELECT ANY TABLES FROM db_priv_user3; --failed + +GRANT DELETE ANY TABLE TO PUBLIC; --failed +REVOKE DELETE ANY TABLE FROM PUBLIC; --failed + +GRANT SELECT ANY TABLE TO db_priv_user; --failed +REVOKE SELECT ANY TABLE FROM db_priv_user; --failed + +GRANT SELECT ANY TABLE,DROP ANY TABLE TO db_priv_user1,db_priv_user2; +GRANT update any table TO db_priv_user3, db_priv_user4 WITH ADMIN OPTION; +SELECT * FROM gs_db_privileges ORDER BY rolename; + +GRANT SELECT ANY TABLE TO db_priv_user1; --no change +GRANT SELECT ANY TABLE TO db_priv_user2 WITH ADMIN OPTION; --change to yes +REVOKE ADMIN OPTION FOR DROP ANY TABLE FROM db_priv_user1,db_priv_user2; --no change +REVOKE ADMIN OPTION FOR update ANY TABLE FROM db_priv_user3; --change to no +REVOKE update ANY TABLE FROM db_priv_user4; --delete +SELECT * FROM gs_db_privileges ORDER BY rolename; + +REVOKE SELECT ANY TABLE,DROP ANY TABLE,update any table FROM db_priv_user1,db_priv_user2,db_priv_user3,db_priv_user4; +SELECT * FROM gs_db_privileges ORDER BY rolename; + +--privileges for grant +RESET ROLE; +GRANT SELECT ANY TABLE TO db_priv_user1 WITH ADMIN OPTION; +GRANT INSERT ANY TABLE TO db_priv_user1 WITH ADMIN OPTION; +GRANT UPDATE ANY TABLE TO db_priv_user1; +GRANT DELETE ANY TABLE TO db_priv_user1; + +SET ROLE db_priv_user1 PASSWORD '1234567i*'; +GRANT SELECT ANY TABLE,UPDATE ANY TABLE,INSERT ANY TABLE TO db_priv_user2; --failed +GRANT INSERT ANY TABLE,DELETE ANY TABLE TO db_priv_user2; --failed +GRANT SELECT ANY TABLE TO db_priv_user2 WITH ADMIN OPTION; +GRANT INSERT ANY TABLE TO db_priv_user2; +GRANT UPDATE ANY TABLE TO db_priv_user2; --failed +GRANT DELETE ANY TABLE TO db_priv_user2; --failed + +SET ROLE db_priv_user2 PASSWORD '1234567i*'; +GRANT SELECT ANY TABLE TO db_priv_user3; +GRANT INSERT ANY TABLE TO db_priv_user3; --failed +GRANT UPDATE ANY TABLE TO db_priv_user3; --failed +GRANT DELETE ANY TABLE TO db_priv_user3; --failed + +SET ROLE db_priv_user3 PASSWORD '1234567i*'; +GRANT SELECT ANY TABLE TO db_priv_user4; --failed +GRANT INSERT ANY TABLE TO db_priv_user4; --failed +GRANT UPDATE ANY TABLE TO db_priv_user4; --failed +GRANT DELETE ANY TABLE TO db_priv_user4; --failed + +RESET ROLE; +SELECT * FROM gs_db_privileges ORDER BY rolename; +GRANT db_priv_user2 TO db_priv_user3; +SET ROLE db_priv_user3 PASSWORD '1234567i*'; +GRANT SELECT ANY TABLE TO db_priv_user4; +GRANT INSERT ANY TABLE TO db_priv_user4; --failed +GRANT UPDATE ANY TABLE TO db_priv_user4; --failed +GRANT DELETE ANY TABLE TO db_priv_user4; --failed + +RESET ROLE; +GRANT db_priv_user3 TO db_priv_user4; +SET ROLE db_priv_user4 PASSWORD '1234567i*'; +GRANT SELECT ANY TABLE TO db_priv_user5; +GRANT INSERT ANY TABLE TO db_priv_user5; --failed +GRANT UPDATE ANY TABLE TO db_priv_user5; --failed +GRANT DELETE ANY TABLE TO db_priv_user5; --failed + +REVOKE ADMIN OPTION FOR SELECT ANY TABLE FROM db_priv_user2; +GRANT SELECT ANY TABLE TO db_priv_user5;--failed + +RESET ROLE; +GRANT db_priv_user1 TO db_priv_user5; +SET ROLE db_priv_user5 PASSWORD '1234567i*'; +REVOKE SELECT ANY TABLE FROM db_priv_user1,db_priv_user2,db_priv_user3,db_priv_user4,db_priv_user5; +REVOKE INSERT ANY TABLE FROM db_priv_user1,db_priv_user2,db_priv_user3,db_priv_user4,db_priv_user5; +REVOKE UPDATE ANY TABLE FROM db_priv_user1,db_priv_user2,db_priv_user3,db_priv_user4,db_priv_user5; + +--function has_any_privilege +RESET ROLE; +GRANT UPDATE ANY TABLE TO db_priv_user1 WITH ADMIN OPTION; +SELECT * FROM gs_db_privileges ORDER BY rolename; + +SELECT has_any_privilege('db_priv_user','SELECT ANY TABLE'); --error +SELECT has_any_privilege('db_priv_user1','SELECT ANY TABLE'); --error +SELECT has_any_privilege('db_priv_user1','SELECT ANY TABLES'); --error + +SELECT has_any_privilege('db_priv_user1','UPDATE ANY TABLE WITH ADMIN OPtION'); --t +SELECT has_any_privilege('db_priv_user1','update ANY TABLE WITH ADMIN OPtION'); --t +SELECT has_any_privilege('db_priv_user1','UPDATE ANY TABLE WITH admin OPtION'); --t + +SELECT has_any_privilege('db_priv_user1','update ANY TABLE'); --t +SELECT has_any_privilege('db_priv_user1','UPDATE ANY TABLE WITH ADMIN OPTION'); --t +SELECT has_any_privilege('db_priv_user1','DELETE ANY TABLE'); --t +SELECT has_any_privilege('db_priv_user1','DELETE ANY TABLE WITH ADMIN OPTION'); --f +SELECT has_any_privilege('db_priv_user1','CREATE ANY TABLE'); --f +SELECT has_any_privilege('db_priv_user1','CREATE ANY TABLE WITH ADMIN OPTION'); --f + +SELECT has_any_privilege('db_priv_user1','SELECT ANY TABLE, DELETE ANY TABLE WITH ADMIN OPTION'); --f +SELECT has_any_privilege('db_priv_user1','SELECT ANY TABLE, UPDATE ANY TABLE'); --t +SELECT has_any_privilege('db_priv_user1','CREATE ANY TABLE WITH ADMIN OPTION, DELETE ANY TABLE'); --t + +SELECT has_any_privilege('db_priv_user5','update ANY TABLE'); --t +SELECT has_any_privilege('db_priv_user5','UPDATE ANY TABLE WITH ADMIN OPTION'); --t +SELECT has_any_privilege('db_priv_user5','DELETE ANY TABLE'); --t +SELECT has_any_privilege('db_priv_user5','DELETE ANY TABLE WITH ADMIN OPTION'); --f +SELECT has_any_privilege('db_priv_user5','CREATE ANY TABLE'); --f +SELECT has_any_privilege('db_priv_user5','CREATE ANY TABLE WITH ADMIN OPTION'); --f + +SELECT has_any_privilege('db_priv_user5','SELECT ANY TABLE, DELETE ANY TABLE WITH ADMIN OPTION'); --f +SELECT has_any_privilege('db_priv_user5','SELECT ANY TABLE, UPDATE ANY TABLE'); --t +SELECT has_any_privilege('db_priv_user5','CREATE ANY TABLE WITH ADMIN OPTION, DELETE ANY TABLE'); --t + +--audit +RESET ROLE; +SELECT type,result,object_name,detail_info from pg_query_audit('2021-11-30','2099-12-28') + WHERE type='grant_role' AND object_name='db_priv_user0'; +SELECT type,result,object_name,detail_info from pg_query_audit('2021-11-30','2099-12-28') + WHERE type='revoke_role' AND object_name='db_priv_user0'; + +--clean +RESET ROLE; +DROP USER db_priv_user1 CASCADE; +DROP USER db_priv_user2,db_priv_user3,db_priv_user4,db_priv_user5; diff --git a/src/test/regress/sql/gsc_func.sql b/src/test/regress/sql/gsc_func.sql new file mode 100644 index 000000000..954bb6a67 --- /dev/null +++ b/src/test/regress/sql/gsc_func.sql @@ -0,0 +1,35 @@ +select * from gs_gsc_dbstat_info() limit 1; +select * from gs_gsc_dbstat_info(-1) limit 1; +select * from gs_gsc_dbstat_info(0) limit 1; +select * from gs_gsc_dbstat_info(1) limit 1; +select * from gs_gsc_dbstat_info(2) limit 1; + +select * from gs_gsc_catalog_detail() limit 1; +select * from gs_gsc_catalog_detail(-1) limit 1; +select * from gs_gsc_catalog_detail(0) limit 1; +select * from gs_gsc_catalog_detail(1) limit 1; +select * from gs_gsc_catalog_detail(-1, 1262) limit 1; +select * from gs_gsc_catalog_detail(0, 1262) limit 1; +select * from gs_gsc_catalog_detail(1, 1262) limit 1; +select * from gs_gsc_catalog_detail(-1, 1259) limit 1; +select * from gs_gsc_catalog_detail(0, 1259) limit 1; +select * from gs_gsc_catalog_detail(1, 1259) limit 1; +select * from gs_gsc_catalog_detail(2, 1259) limit 1; + +select * from gs_gsc_table_detail() limit 1; +select * from gs_gsc_table_detail(-1) limit 1; +select * from gs_gsc_table_detail(0) limit 1; +select * from gs_gsc_table_detail(1) limit 1; +select * from gs_gsc_table_detail(-1, 1262) limit 1; +select * from gs_gsc_table_detail(0, 1262) limit 1; +select * from gs_gsc_table_detail(1, 1262) limit 1; +select * from gs_gsc_table_detail(-1, 1259) limit 1; +select * from gs_gsc_table_detail(0, 1259) limit 1; +select * from gs_gsc_table_detail(1, 1259) limit 1; +select * from gs_gsc_table_detail(2, 1259) limit 1; + +select * from gs_gsc_clean() limit 1; +select * from gs_gsc_clean(-1) limit 1; +select * from gs_gsc_clean(0) limit 1; +select * from gs_gsc_clean(1) limit 1; +select * from gs_gsc_clean(2) limit 1; \ No newline at end of file diff --git a/src/test/regress/sql/hash_index_001.sql b/src/test/regress/sql/hash_index_001.sql deleted file mode 100644 index 1929a8ccf..000000000 --- a/src/test/regress/sql/hash_index_001.sql +++ /dev/null @@ -1,177 +0,0 @@ -------------------------------------- ----------- hash index part1---------- -------------------------------------- - -set enable_seqscan = off; -set enable_indexscan = off; ------------------- --- hash_table_1 -- ------------------- -drop table if exists hash_table_1 cascade; -create table hash_table_1 (id int, name varchar, sex varchar default 'male'); - -insert into hash_table_1 values (1, 'Smith'); -insert into hash_table_1 values (2, 'Jones'); -insert into hash_table_1 values (3, 'Williams', 'female'); -insert into hash_table_1 values (4, 'Taylor'); -insert into hash_table_1 values (5, 'Brown'); -insert into hash_table_1 values (6, 'Davies'); - -drop index if exists hash_t1_id1; -create index hash_t1_id1 on hash_table_1 using hash (id); --- error, does not support multicolumn indexes -drop index if exists hash_t1_id2; -create index hash_t1_id2 on hash_table_1 using hash (id, sex); - --- compare with hash_t1_id1 and hash_t1_id3, hash index can be create in same column -drop index if exists hash_t1_id3; -drop index if exists hash_t1_id4; -create index hash_t1_id3 on hash_table_1 using btree (id); -create index hash_t1_id4 on hash_table_1 using hash (id); - --- drop superfluous index now -drop index hash_t1_id3, hash_t1_id4; - --- insert into large volumns of data into hash_table_1 -insert into hash_table_1 select 4, 'XXX', 'XXX' from generate_series(1,50000); -insert into hash_table_1 select 6, 'XXX', 'XXX' from generate_series(1,50000); -analyse hash_table_1; - --- after insert, hash_t1_id1 is still work -explain(costs off) select * from hash_table_1 where id = 4; -select count(*) from hash_table_1 where id = 6; --50001 - --- do other dml action, then check hash_t1_id1 again -insert into hash_table_1 select random()*100, 'XXX', 'XXX' from generate_series(1,50000); -update hash_table_1 set id = 101, sex = 'male' where id = 60; -delete from hash_table_1 where id = 80; -explain(costs off) select * from hash_table_1 where id = 101; - --- cleanup env -drop table hash_table_1 cascade; - ------------------- --- hash_table_2 -- ------------------- -drop table if exists hash_table_2 cascade; -create table hash_table_2 (id int, name varchar, sex varchar default 'male'); -insert into hash_table_2 select random()*100, 'XXX', 'XXX' from generate_series(1,100000); - --- create index concurrently --- In this fastcheck, we only check it can run properly. However, in a real --- situation, you should run this sql in connection a first, then doing some DML( --- insert, delete, update) operation about this table in connection b as soon --- as possible. We expect the create index do not block DML operation. --- connection a -create index concurrently hash_t2_id1 on hash_table_2 using hash (id); --- connection b -insert into hash_table_2 select random()*100, 'XXX', 'XXX' from generate_series(1,100); -explain(costs off) select * from hash_table_2 where id = 40; - --- error, does not support unique indexes -create unique index hash_t2_id2 on hash_table_2 using hash (sex); - --- hash_t2_id3 occupies more disk space than hash_t2_id2 -create index hash_t2_id2 on hash_table_2 using hash (id) with (fillfactor=25); -create index hash_t2_id3 on hash_table_2 using hash (id) with (fillfactor=75); - -select count(*) from hash_table_2; --100100 - --- cleanup env -drop table hash_table_2 cascade; - ------------------- --- hash_table_3 -- ------------------- -drop schema if exists hash_sc_3 cascade; -drop tablespace if exists hash_sp_3; -create schema hash_sc_3; -create tablespace hash_sp_3 relative location 'tablespace/tablespace_1'; -create table hash_sc_3.hash_table_3 -( - id int, name varchar, - sex varchar default 'male' -) -tablespace hash_sp_3; --- create index specify schema and tablespace -create index concurrently hash_sc_3.hash_t3_id1 on hash_sc_3.hash_table_3 using hash (id); -create index hash_sc_3.hash_t3_id2 on hash_sc_3.hash_table_3 using hash (id) tablespace hash_sp_3; - -drop table hash_sc_3.hash_table_3 cascade; -drop schema hash_sc_3 cascade; -drop tablespace hash_sp_3; - ------------------- --- hash_table_4 -- ------------------- -drop table if exists hash_table_4 cascade; -create table hash_table_4 -( - id int, - name varchar, - sex varchar default 'male' -) -partition by range(id) -( - partition p1 values less than (1000), - partition p2 values less than (2000), - partition p3 values less than (3000), - partition p4 values less than (maxvalue) -); - --- hash index only support local index in partition table -drop index if exists hash_t4_id1; -drop index if exists hash_t4_id2; -drop index if exists hash_t4_id2_new; -create index hash_t4_id1 on hash_table_4 using hash(id) global; -create index hash_t4_id2 on hash_table_4 using hash(id) local -( - partition index_t4_p1, - partition index_t4_p2, - partition index_t4_p3, - partition index_t4_p4 -); - --- alter index rename, unusable -insert into hash_table_4 select random()*5000, 'XXX', 'XXX' from generate_series(1,1000); -alter index hash_t4_id2 rename to hash_t4_id2_new; -alter index hash_t4_id2_new modify partition index_t4_p2 unusable; -reindex index hash_t4_id2_new partition index_t4_p2; - -drop table hash_table_4 cascade; - ------------------- --- hash_table_5 -- ------------------- -drop table if exists hash_table_5; -create temporary table hash_table_5(id int, name varchar, sex varchar default 'male'); - -drop index if exists hash_t5_id1; -create index hash_t5_id1 on hash_table_5 using hash(id) with(fillfactor = 80); - -insert into hash_table_5 select random()*100, 'XXX', 'XXX' from generate_series(1,100); -update hash_table_5 set name = 'aaa' where id = 80; -alter index hash_t5_id1 set (fillfactor = 60); -alter index hash_t5_id1 reset (fillfactor); -explain (costs off) select * from hash_table_5 where id = 80; -drop table hash_table_5 cascade; - ------------------- --- hash_table_6 -- ------------------- -drop table if exists hash_table_6; -create global temporary table hash_table_6(id int, name varchar, sex varchar default 'male'); -drop index if exists hash_t6_id1; -create index hash_t6_id1 on hash_table_6 using hash((id*10)) with (fillfactor = 30); -insert into hash_table_6 select random()*100, 'XXX', 'XXX' from generate_series(1,1000); -delete from hash_table_6 where id in (50, 60, 70); -explain (costs off) select * from hash_table_6 where id*10 = 80; -drop table hash_table_6 cascade; - --- create unlogged table index, which will be delete in hash_index_002 -drop table if exists hash_table_7; -create unlogged table hash_table_7(id int, name varchar, sex varchar default 'male'); -insert into hash_table_7 select random()*100, 'XXX', 'XXX' from generate_series(1,1000); -create index hash_t7_id1 on hash_table_7 using hash(id) with (fillfactor = 30); -explain (costs off) select * from hash_table_7 where id = 80; -select count(*) from hash_table_7; \ No newline at end of file diff --git a/src/test/regress/sql/hash_index_002.sql b/src/test/regress/sql/hash_index_002.sql deleted file mode 100644 index e07f4e0ab..000000000 --- a/src/test/regress/sql/hash_index_002.sql +++ /dev/null @@ -1,68 +0,0 @@ -------------------------------------- ----------- hash index part2---------- -------------------------------------- - -set enable_seqscan = off; -set enable_indexscan = off; - --- continue to hash_index_001 -explain (costs off) select * from hash_table_7 where id = 80; -drop table hash_table_7 cascade; - --- low maintenance_work_mem -set maintenance_work_mem = '1MB'; - -drop table if exists hash_table_8; -create table hash_table_8(id int, name varchar, sex varchar default 'male'); -insert into hash_table_8 select random()*100, 'XXX', 'XXX' from generate_series(1,50000); -create index hash_t8_id1 on hash_table_8 using hash(id) with (fillfactor = 30); -explain (costs off) select * from hash_table_8 where id = 80; -drop table hash_table_8 cascade; - --- vacuum one page -set enable_indexscan = on; -set enable_bitmapscan = off; -set maintenance_work_mem = '100MB'; -alter system set autovacuum = off; - -drop table if exists hash_table_9; -create table hash_table_9(id int, name varchar, sex varchar default 'male'); -insert into hash_table_9 select random()*100, 'XXX', 'XXX' from generate_series(1,50000); -create index hash_t9_id1 on hash_table_9 using hash(id) with (fillfactor = 10); -create or replace procedure hash_proc_9(sid in integer) -is -begin -delete from hash_table_9 where id = sid; -perform * from hash_table_9 where id = sid; -insert into hash_table_9 select sid, random() * 10, 'xxx' from generate_series(1,5000); -end; -/ -call hash_proc_9(1); -call hash_proc_9(1); -call hash_proc_9(1); -call hash_proc_9(1); - -drop table hash_table_9 cascade; -drop procedure hash_proc_9; - --- some dml operator -drop table if exists hash_table_10; -create table hash_table_10(id int, num int, sex varchar default 'male'); -create index hash_t10_id1 on hash_table_10 using hash (id); -insert into hash_table_10 select random()*10, random()*10, 'XXX' from generate_series(1,5000); -insert into hash_table_10 select random()*10, random()*10, 'XXX' from generate_series(1,5000); -delete from hash_table_10 where id = 7 and num = 1; -insert into hash_table_10 select 7, random()*3, 'XXX' from generate_series(1,500); -delete from hash_table_10 where id = 5; -vacuum hash_table_10; -insert into hash_table_10 select random()*50, random()*3, 'XXX' from generate_series(1,50000); -delete from hash_table_10 where num = 2; -vacuum hash_table_10; -drop table hash_table_10 cascade; - ---reset all parameters -reset enable_indexscan; -reset enable_bitmapscan; -reset enable_seqscan; -reset maintenance_work_mem; -alter system set autovacuum = on; \ No newline at end of file diff --git a/src/test/regress/sql/huge_clob.sql b/src/test/regress/sql/huge_clob.sql new file mode 100644 index 000000000..34b9de6f9 --- /dev/null +++ b/src/test/regress/sql/huge_clob.sql @@ -0,0 +1,225 @@ +-- test create type table of +-- check compatibility -- +show sql_compatibility; -- expect A -- + +-- create new schema -- +drop schema if exists huge_clob; +create schema huge_clob; +set current_schema = huge_clob; + +create table bigclobtbl031(c1 int,c2 clob,c3 clob,c4 blob,c5 date,c6 timestamp,c7 varchar2); +insert into bigclobtbl031 values(generate_series(1,5),repeat('AAAA11111aaaaaaaaaaaaaaaaa',1000000),repeat('abdededfj12345679ujik',1000000),hextoraw(repeat('12345678990abcdef',1000)),sysdate,to_timestamp('','yyyy-mm-dd hh24:mi:ss.ff6'),7000); +update bigclobtbl031 set c2=c2||c2||c2||c2||c2; +update bigclobtbl031 set c2=c2||c2||c2||c2||c2; +update bigclobtbl031 set c2=c2||c2; +update bigclobtbl031 set c3='clobclob3'; + +--I1.clob in +create or replace procedure pro_cb4_031(c1 clob,c2 clob) +is +v1 clob; +v2 clob; +begin +v1:=dbe_lob.substr(c1,10,1); +v2:=dbe_lob.substr(c2,10,1); +raise info 'c1 is %',v1; +raise info 'c2 is %',v2; +end; +/ + +create or replace procedure pro_cb4_031_1 is +v1 clob; +v2 clob; +begin +execute immediate 'select c2 from bigclobtbl031 where c1=1' into v1; +execute immediate 'select c3 from bigclobtbl031 where c1=1' into v2; +pro_cb4_031(v1,v2); +end; +/ + +call pro_cb4_031_1(); + +--I2.clob > 1G out +create or replace procedure pro_cb4_031(c1 out clob,c2 out clob) +is +v1 clob; +v2 clob; +begin +execute immediate 'select c2 from bigclobtbl031 where c1=1' into v1; +execute immediate 'select c3 from bigclobtbl031 where c1=1' into v2; +c1:=v1; +c2:=v2; +end; +/ + +create or replace procedure pro_cb4_031_1 is +v1 clob; +v2 clob; +v3 clob; +v4 clob; +begin +pro_cb4_031(v1,v2); +v3:=dbe_lob.substr(v1,10,1); +v4:=dbe_lob.substr(v2,10,1); +raise info 'v3 is %',v3; +raise info 'v4 is %',v4; +end; +/ + +call pro_cb4_031_1(); + +-- <1G out +create or replace procedure pro_cb4_031(c1 out clob,c2 out clob) +is +v1 clob; +v2 clob; +begin +execute immediate 'select c3 from bigclobtbl031 where c1=1' into v1; +execute immediate 'select c3 from bigclobtbl031 where c1=2' into v2; +c1:=v1; +c2:=v2; +end; +/ + +call pro_cb4_031_1(); + +--I3.clob as inout +create or replace procedure pro_cb4_031(c1 inout clob,c2 inout clob) +is +v1 clob; +v2 clob; +begin +execute immediate 'select c2 from bigclobtbl031 where c1=1' into v1; +execute immediate 'select c3 from bigclobtbl031 where c1=1' into v2; +c1:=v1; +c2:=v2; +end; +/ + +create or replace procedure pro_cb4_031_1 is +v1 clob; +v2 clob; +v3 clob; +v4 clob; +begin +pro_cb4_031(v1,v2); +v3:=dbe_lob.substr(v1,10,1); +v4:=dbe_lob.substr(v2,10,1); +raise info 'v3 is %',v3; +raise info 'v4 is %',v4; +end; +/ + +call pro_cb4_031_1(); + +--I4. < 1GB clob inout +create or replace procedure pro_cb4_031(c1 inout clob,c2 clob,c3 out clob) +is +v1 clob; +v2 clob; +v3 clob; +begin +execute immediate 'select c3 from bigclobtbl031 where c1=1' into v1; +execute immediate 'select c3 from bigclobtbl031 where c1=2' into v2; +execute immediate 'select c3 from bigclobtbl031 where c1=3' into v3; +c1:=v1; +c2:=v2; +c3:=v3||'clob3clob3clob3clob3'; +end; +/ + +create or replace procedure pro_cb4_031_1 is +v1 clob; +v2 clob; +v3 clob; +v4 clob; +v5 clob; +v6 clob; +begin +pro_cb4_031(v1,v2,v3); +v4:=dbe_lob.substr(v1,10,1); +v5:=dbe_lob.substr(v2,10,1); +v6:=dbe_lob.substr(v3,10,1); +raise info 'v4 is %',v4; +raise info 'v5 is %',v5; +raise info 'v6 is %',v6; +end; +/ + +call pro_cb4_031_1(); +--I5. table of clob +create or replace procedure pro_cb4_031 is +type ty1 is table of clob; +v1 ty1; +begin +for i in 1..10 loop +execute immediate 'select c2 from bigclobtbl031 where c1='||i into v1(i); +update bigclobtbl030 set c3=v1(i)||v1(i) where c1=i; +end loop; +end; +/ + +call pro_cb4_031(); + +-- array +create or replace procedure pro_cb4_031 is +type ty1 is varray(10) of clob; +v1 ty1; +begin +for i in 1..10 loop +execute immediate 'select c2 from bigclobtbl031 where c1='||i into v1(i); +update bigclobtbl030 set c3=v1(i)||v1(i) where c1=i; +end loop; +end; +/ + +call pro_cb4_031(); +select c1,c2,length(c2),c3,length(c3) from bigclobtbl031 where c1>5 and c1<10 order by 1,2,3,4,5; +update bigclobtbl031 set c3='clob3clob3'; +--I6.record +create or replace procedure pro_cb4_031 is +type ty1 is record(c1 int,c2 clob); +v1 ty1; +begin +execute immediate 'select c2 from bigclobtbl031 where c1=1' into v1.c2; +end; +/ + +call pro_cb4_031(); + +--I7 fetch +create or replace procedure pro_cb4_037 is +v1 clob; +v2 clob; +v3 clob; +v4 int; +cursor cor1 is select c2 from bigclobtbl031 where c1=1; +begin +open cor1; +loop +fetch cor1 into v1; +fetch cor1 into v1; +fetch cor1 into v1; +fetch cor1 into v1; +fetch cor1 into v1; +exit when cor1%notfound; +end loop; +close cor1; +end; +/ + +call pro_cb4_037(); + +drop table if exists cloblongtbl; +create table cloblongtbl (a int, b clob, c clob); +insert into cloblongtbl values (generate_series(1,4),repeat('唐李白床前明月光,疑是地上霜,举头望明月,低头思故乡',5000000),repeat('唐李白床前明月光,疑是地上霜,举头望明月,低头思故乡',5000000)); +update cloblongtbl set b = b||b; +update cloblongtbl set c = c||c; +update cloblongtbl set b = b||b where a = 2; +update cloblongtbl set c = c||c where a = 3; +update cloblongtbl set b = b||b where a = 4; +update cloblongtbl set c = c||c where a = 4; +select a, length(b || c) from cloblongtbl order by 1; +drop table if exists cloblongtbl; +-- clean +drop schema if exists huge_clob cascade; \ No newline at end of file diff --git a/src/test/regress/sql/hw_cursor_part1.sql b/src/test/regress/sql/hw_cursor_part1.sql index d545bf203..21653585d 100644 --- a/src/test/regress/sql/hw_cursor_part1.sql +++ b/src/test/regress/sql/hw_cursor_part1.sql @@ -793,6 +793,41 @@ close c; end; / +create table t1_refcursor(a int); +insert into t1_refcursor values (1); +insert into t1_refcursor values (2); +create or replace procedure p3_refcursor (c1 out sys_refcursor) as +va t1_refcursor; +i int; +begin +open c1 for select * from t1_refcursor; +i = 1/0; +exception +when others then + raise info '%', 'exception'; +end; +/ +select * from p3_refcursor(); + +create or replace procedure p3 (c4 in int,c2 out int,c3 out int,c1 out sys_refcursor,cc2 out sys_refcursor) as +va t1_refcursor; +i int; +begin +begin +open cc2 for select * from t1_refcursor; +i = 1/0; +exception +when others then + raise info '%', 'exception2'; +end; +open c1 for select * from t1_refcursor; +c3:=1; +c2:=2; +end; +/ + +select * from p3(1); + START TRANSACTION; CURSOR sc FOR select * from generate_series(3, 13) i where i <> all (values (1),(2),(4)); MOVE FORWARD 10 IN sc; diff --git a/src/test/regress/sql/hw_cursor_part4.sql b/src/test/regress/sql/hw_cursor_part4.sql index babf0c158..ed3c0167b 100644 --- a/src/test/regress/sql/hw_cursor_part4.sql +++ b/src/test/regress/sql/hw_cursor_part4.sql @@ -86,7 +86,7 @@ INSERT INTO TBL_H248LNK_INFO VALUES(456); INSERT INTO TBL_H248LNK_INFO VALUES(789); CREATE TABLE TBL (I_MODULENO INTEGER); -CREATE OR REPLACE PROCEDURE TEST_CURSOR +CREATE OR REPLACE PROCEDURE TEST_CURSOR_4 AS TYPE CUR_TYPE IS REF CURSOR; CUR CUR_TYPE; @@ -196,8 +196,8 @@ BEGIN raise notice 'SQL%%ROWCOUNT :%',NVL(TO_CHAR(SQL%ROWCOUNT),'NULL'); END; / -CALL TEST_CURSOR(); -DROP PROCEDURE TEST_CURSOR; +CALL TEST_CURSOR_4(); +DROP PROCEDURE TEST_CURSOR_4; DROP TABLE TBL_H248LNK_INFO; DROP TABLE TBL; diff --git a/src/test/regress/sql/hw_cursor_part7.sql b/src/test/regress/sql/hw_cursor_part7.sql index fbcf6b743..80b43ac9c 100644 --- a/src/test/regress/sql/hw_cursor_part7.sql +++ b/src/test/regress/sql/hw_cursor_part7.sql @@ -526,7 +526,7 @@ INSERT INTO TBL_H248LNK_INFO VALUES(456); INSERT INTO TBL_H248LNK_INFO VALUES(789); CREATE TABLE TBL (I_MODULENO INTEGER); -CREATE OR REPLACE PROCEDURE TEST_CURSOR +CREATE OR REPLACE PROCEDURE TEST_CURSOR_7 AS TYPE CUR_TYPE IS REF CURSOR; CUR CUR_TYPE; @@ -636,8 +636,8 @@ BEGIN raise notice 'SQL%%ROWCOUNT :%',NVL(TO_CHAR(SQL%ROWCOUNT),'NULL'); END; / -CALL TEST_CURSOR(); -DROP PROCEDURE TEST_CURSOR; +CALL TEST_CURSOR_7(); +DROP PROCEDURE TEST_CURSOR_7; DROP TABLE TBL_H248LNK_INFO; DROP TABLE TBL; DROP TABLE TBL_RCWSCFG; @@ -736,4 +736,30 @@ END; CALL TEST_CRS_RPT_EMPTYSOR(0); CALL TEST_CRS_RPT_EMPTYSOR(1); +create table tb_test(col1 int); +create or replace procedure proc_test() +as +v_count int; +begin +insert into tb_test select 1; +update tb_test set col1=2; +select 1 into v_count; +raise notice '%',v_count||','||SQL%FOUND || ',' || SQL%ROWCOUNT; +end; +/ + +declare +v_count int; +begin +insert into tb_test select 1; +update tb_test set col1=2; +select 1 into v_count; +proc_test(); +v_count:=1; +raise notice '%',v_count||','||SQL%FOUND || ',' || SQL%ROWCOUNT; +end +/ + +drop table tb_test; + drop schema hw_cursor_part7 CASCADE; diff --git a/src/test/regress/sql/hw_cursor_part8.sql b/src/test/regress/sql/hw_cursor_part8.sql index e013423d0..3c58275b2 100644 --- a/src/test/regress/sql/hw_cursor_part8.sql +++ b/src/test/regress/sql/hw_cursor_part8.sql @@ -324,7 +324,7 @@ END; create table t1(a int); --test with query -create or replace procedure test_cursor() as +create or replace procedure test_cursor_8() as declare cursor cursor1 is with recursive StepCTE(a) @@ -353,7 +353,7 @@ select * from pro_cursor_c0019(); create table test_cursor_table(c1 int,c2 varchar); insert into test_cursor_table values(1,'Jack'),(2,'Rose'); -create or replace procedure test_cursor() as +create or replace procedure test_cursor_8() as declare type ref_cur is ref cursor; cur1 ref_cur; @@ -371,9 +371,9 @@ begin CLOSE cur1; end / -call test_cursor(); +call test_cursor_8(); -create or replace procedure test_cursor() as +create or replace procedure test_cursor_8() as declare type ref_cur is ref cursor; cur1 ref_cur; @@ -394,7 +394,7 @@ begin CLOSE cur1; end / -call test_cursor(); +call test_cursor_8(); create type pro_type_04 as ( v_tablefield character varying, v_tablefield2 character varying, v_tablename character varying, v_cur refcursor); diff --git a/src/test/regress/sql/hw_dbms_sql1.sql b/src/test/regress/sql/hw_dbms_sql1.sql new file mode 100644 index 000000000..cb70c4c01 --- /dev/null +++ b/src/test/regress/sql/hw_dbms_sql1.sql @@ -0,0 +1,2494 @@ +create database pl_test_cursor_part1 DBCOMPATIBILITY 'pg'; +\c pl_test_cursor_part1; +---bind_variable int +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,3,1,11); +query := 'select * from pro_dbe_sql_all_tb1_02 where a > y and a < z order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_variable(context_id, 'z', 10); +dbe_sql.sql_bind_variable(context_id, 'y', 1); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +---bind_variable clob +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b clob,c clob,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,HEXTORAW('DEADBEEF'),HEXTORAW('D'),11); +insert into pro_dbe_sql_all_tb1_02 values(6,HEXTORAW('DEADBEEF'),HEXTORAW('DE'),11); +query := 'select * from pro_dbe_sql_all_tb1_02 where b = y and c = z'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_variable(context_id, 'y', HEXTORAW('DEADBEEF')); +dbe_sql.sql_bind_variable(context_id, 'z', HEXTORAW('D')); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +---bind_array int\char\bytea\text\raw +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +v_id1 int[]; +v_id4 char[]; +v_id5 bytea[]; +v_id6 text[]; +v_id7 raw[]; +begin +v_id1[1] := 3; +v_id1[2] := 4; + +v_id5[1] := '2'; +v_id5[2] := '2'; +v_id5[3] := '3'; + +v_id4[1] := '3'; +v_id4[2] := '3'; +v_id4[3] := '3'; + +v_id6[1] := '11'; +v_id6[2] := '11'; + +v_id7[1] := '1'; +v_id7[2] := '1'; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b char,c bytea,d text,e raw); +insert into pro_dbe_sql_all_tb1_02 values(4,'3','2','11','1'); +insert into pro_dbe_sql_all_tb1_02 values(6,'3','1','11','1'); +query := 'select * from pro_dbe_sql_all_tb1_02 where a > y and b = f and c = i and d = j and e = k'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_array(context_id, 'y', v_id1,1,1); +dbe_sql.sql_bind_array(context_id, 'i', v_id5,2,3); +dbe_sql.sql_bind_array(context_id, 'f', v_id4,2,3); +dbe_sql.sql_bind_array(context_id, 'j', v_id6,2,2); +dbe_sql.sql_bind_array(context_id, 'k', v_id7,2,2); + +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +---bind_array error +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +v_id1 int[]; +begin +v_id1[1] := 3; +v_id1[2] := 4; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,3,1,11); +query := 'select * from pro_dbe_sql_all_tb1_02 where a > y order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_array(context_id, 'y', v_id1,-1,2); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +---bind_array error +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +v_id1 int[]; +begin +v_id1[1] := 3; +v_id1[2] := 4; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,3,1,11); +query := 'select * from pro_dbe_sql_all_tb1_02 where a > y order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_array(context_id, 'y', v_id1); +dbe_sql.sql_bind_array(context_id, 'y', v_id1,1,2); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + + +---bind_array error +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +v_id1 int[]; +begin +v_id1[1] := 3; +v_id1[2] := 4; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,3,1,11); +query := 'select * from pro_dbe_sql_all_tb1_02 where a > y and b =z order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_array(context_id, 'y', v_id1); +dbe_sql.sql_bind_array(context_id, 'y', v_id1,1,2); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +---bind_array error +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +v_id1 int[]; +begin +v_id1[1] := 3; +v_id1[2] := 4; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,3,1,11); +query := 'select * from pro_dbe_sql_all_tb1_02 where a > y order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_array(context_id, 'y', v_id1); +dbe_sql.sql_bind_variable(context_id, 'y', 1); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +---set_results_type +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int :=3; +--test +v_id1 int[]; +v_id4 character[]; +v_id5 bytea[]; +v_id6 text[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a text ,b int, c char, d text); +insert into pro_dbe_sql_all_tb1_02 values('1',9,'5','13'); +insert into pro_dbe_sql_all_tb1_02 values('2',10,'6','14'); +insert into pro_dbe_sql_all_tb1_02 values('3',11,'7','15'); +insert into pro_dbe_sql_all_tb1_02 values('4',12,'8','16'); +query := ' select * from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--定义列 +dbe_sql.set_results_type(context_id,1,v_id6,v_id,v_id2); +dbe_sql.set_results_type(context_id,2,v_id1,v_id,v_id2); +dbe_sql.set_results_type(context_id,3,v_id4,v_id,v_id2); +dbe_sql.set_results_type(context_id,4,v_id5,v_id,v_id2); + +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +v_id3 := dbe_sql.next_row(context_id); +v_id6 := dbe_sql.get_results(context_id,1,v_id6); +v_id1 := dbe_sql.get_results(context_id,2,v_id1); +v_id4 := dbe_sql.get_results(context_id,3,v_id4); +v_id5 := dbe_sql.get_results(context_id,4,v_id5); +exit when(v_id3 != 3); +end loop; + +FOR i IN v_id1.FIRST .. v_id1.LAST LOOP + dbe_output.print_line('int' || i || ' = ' || v_id1[i]); +END LOOP; +FOR j IN v_id4.FIRST .. v_id4.LAST LOOP + dbe_output.print_line('char' || j || ' = ' || v_id4[j]); +END LOOP; +FOR j IN v_id6.FIRST .. v_id6.LAST LOOP + dbe_output.print_line('text' || j || ' = ' || v_id6[j]); +END LOOP; +FOR j IN v_id5.FIRST .. v_id5.LAST LOOP + dbe_output.print_line('bytea' || j || ' = ' || v_id5[j]); +END LOOP; + +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + + +---set_results_type +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int :=3; +--test +v_id1 int[]; +v_id4 character[]; +v_id5 bytea[]; +v_id6 text[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a text ,b int, c char, d text); +insert into pro_dbe_sql_all_tb1_02 values('1',9,'5','13'); +insert into pro_dbe_sql_all_tb1_02 values('2',10,'6','14'); +insert into pro_dbe_sql_all_tb1_02 values('3',11,'7','15'); +insert into pro_dbe_sql_all_tb1_02 values('4',12,'8','16'); +query := ' select * from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--定义列 +dbe_sql.set_results_type(context_id,1,v_id6,v_id,-1); +dbe_sql.set_results_type(context_id,2,v_id1,v_id,v_id2); +dbe_sql.set_results_type(context_id,3,v_id4,v_id,v_id2); +dbe_sql.set_results_type(context_id,4,v_id5,v_id,v_id2); + +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +v_id3 := dbe_sql.next_row(context_id); +v_id6 := dbe_sql.get_results(context_id,1,v_id6); +v_id1 := dbe_sql.get_results(context_id,2,v_id1); +v_id4 := dbe_sql.get_results(context_id,3,v_id4); +v_id5 := dbe_sql.get_results(context_id,4,v_id5); +exit when(v_id3 != 3); +end loop; + +FOR i IN v_id1.FIRST .. v_id1.LAST LOOP + dbe_output.print_line('int' || i || ' = ' || v_id1[i]); +END LOOP; +FOR j IN v_id4.FIRST .. v_id4.LAST LOOP + dbe_output.print_line('char' || j || ' = ' || v_id4[j]); +END LOOP; +FOR j IN v_id6.FIRST .. v_id6.LAST LOOP + dbe_output.print_line('text' || j || ' = ' || v_id6[j]); +END LOOP; +FOR j IN v_id5.FIRST .. v_id5.LAST LOOP + dbe_output.print_line('bytea' || j || ' = ' || v_id5[j]); +END LOOP; + +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +--type +create or replace procedure pro_dbe_sql_all_02() +as +bb dbe_sql.date_table; +cc dbe_sql.number_table; +dd dbe_sql.varchar2_table; +ee dbe_sql.desc_tab; +begin + +bb(1) :=to_date('2016-11-24 10:30:10','yyyy-mm-dd hh24:mi:ss'); +cc(2) := 300; +dd(1) := 'gasdf'; +ee(1):= (111,1,'1',1,'1',1,1,1,1,1,false); +ee(2):= (222,1,'1',1,'1',1,1,1,1,1,false); +ee(3):= (333,1,'1',1,'1',1,1,1,1,1,false); + +RAISE INFO 'date_table: %' ,bb(1); +RAISE INFO 'number_table: %' ,cc(2); +RAISE INFO 'varchar2_table: %' ,dd(1); +RAISE INFO 'desc_tab: %' ,ee(1).col_type; +RAISE INFO 'desc_tab: %' ,ee(2).col_type; +RAISE INFO 'desc_tab: %' ,ee(3).col_type; +RAISE INFO 'desc_tab: %' ,ee(3).col_name; +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + + +--describe columns +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +type re_rssc is record (col_num int, desc_col dbe_sql.desc_tab); +employer re_rssc; +res re_rssc; +d int; +dd dbe_sql.desc_tab; +query varchar(2000); +begin +drop table if exists pro_dbe_sql_all_tb1_02; +create table pro_dbe_sql_all_tb1_02(a int ,b int); +insert into pro_dbe_sql_all_tb1_02 values(1,3); +insert into pro_dbe_sql_all_tb1_02 values(2,3); +query := 'select * from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--执行 +res := dbe_sql.sql_describe_columns(context_id, d,dd); + +--输出结果 +dbe_output.print_line('col_num:' || res.col_num); + +dbe_output.print_line('col_type:' || res.desc_col[1].col_type); +dbe_output.print_line('col_max_len:' || res.desc_col[1].col_max_len); +dbe_output.print_line('col_name:' || res.desc_col[1].col_name); +dbe_output.print_line('col_name_len:' || res.desc_col[1].col_name_len); +dbe_output.print_line('col_schema_name:' || res.desc_col[1].col_schema_name); +dbe_output.print_line('col_schema_name_len:' || res.desc_col[1].col_schema_name_len); +dbe_output.print_line('col_precision:' || res.desc_col[1].col_precision); +dbe_output.print_line('col_scale:' || res.desc_col[1].col_scale); +dbe_output.print_line('col_charsetid:' || res.desc_col[1].col_charsetid); +dbe_output.print_line('col_charsetform:' || res.desc_col[1].col_charsetform); +dbe_output.print_line('col_null_ok:' || res.desc_col[1].col_null_ok); + +dbe_output.print_line('col_type:' || res.desc_col[2].col_type); +dbe_output.print_line('col_max_len:' || res.desc_col[2].col_max_len); +dbe_output.print_line('col_name:' || res.desc_col[2].col_name); +dbe_output.print_line('col_name_len:' || res.desc_col[2].col_name_len); +dbe_output.print_line('col_schema_name:' || res.desc_col[2].col_schema_name); +dbe_output.print_line('col_schema_name_len:' || res.desc_col[2].col_schema_name_len); +dbe_output.print_line('col_precision:' || res.desc_col[2].col_precision); +dbe_output.print_line('col_scale:' || res.desc_col[2].col_scale); +dbe_output.print_line('col_charsetid:' || res.desc_col[2].col_charsetid); +dbe_output.print_line('col_charsetform:' || res.desc_col[2].col_charsetform); +dbe_output.print_line('col_null_ok:' || res.desc_col[2].col_null_ok); + +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + + +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +type re_rssc is record (col_num int, desc_col dbe_sql.desc_tab); +employer re_rssc; +d int; +dd dbe_sql.desc_tab; +res re_rssc; +query varchar(2000); +begin +drop table if exists pro_dbe_sql_all_tb1_02; +create table pro_dbe_sql_all_tb1_02(a int ,b int); +insert into pro_dbe_sql_all_tb1_02 values(1,3); +insert into pro_dbe_sql_all_tb1_02 values(2,3); +query := 'select a,b from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--执行 +res := dbe_sql.sql_describe_columns(context_id,d,dd); + +--输出结果 +dbe_output.print_line('col_num:' || res.col_num); + +dbe_output.print_line('col_type:' || res.desc_col[1].col_type); +dbe_output.print_line('col_max_len:' || res.desc_col[1].col_max_len); +dbe_output.print_line('col_name:' || res.desc_col[1].col_name); +dbe_output.print_line('col_name_len:' || res.desc_col[1].col_name_len); +dbe_output.print_line('col_schema_name:' || res.desc_col[1].col_schema_name); +dbe_output.print_line('col_schema_name_len:' || res.desc_col[1].col_schema_name_len); +dbe_output.print_line('col_precision:' || res.desc_col[1].col_precision); +dbe_output.print_line('col_scale:' || res.desc_col[1].col_scale); +dbe_output.print_line('col_charsetid:' || res.desc_col[1].col_charsetid); +dbe_output.print_line('col_charsetform:' || res.desc_col[1].col_charsetform); +dbe_output.print_line('col_null_ok:' || res.desc_col[1].col_null_ok); + +dbe_output.print_line('col_type:' || res.desc_col[2].col_type); +dbe_output.print_line('col_max_len:' || res.desc_col[2].col_max_len); +dbe_output.print_line('col_name:' || res.desc_col[2].col_name); +dbe_output.print_line('col_name_len:' || res.desc_col[2].col_name_len); +dbe_output.print_line('col_schema_name:' || res.desc_col[2].col_schema_name); +dbe_output.print_line('col_schema_name_len:' || res.desc_col[2].col_schema_name_len); +dbe_output.print_line('col_precision:' || res.desc_col[2].col_precision); +dbe_output.print_line('col_scale:' || res.desc_col[2].col_scale); +dbe_output.print_line('col_charsetid:' || res.desc_col[2].col_charsetid); +dbe_output.print_line('col_charsetform:' || res.desc_col[2].col_charsetform); +dbe_output.print_line('col_null_ok:' || res.desc_col[2].col_null_ok); + +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + + + + +------------------------------------------------anyelement +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out int,o_ret2 out int) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret:=10; +o_ret2:=30; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 int; +v3 int; +v2 int; +o_ret int; +o_retw int; +begin +query := 'call proc_test(i_Col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_Col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw,100); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +end; +/ + + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out text,o_ret2 out text) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret:='10'; +o_ret2:='30'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 text; +v3 text; +v2 int; +o_ret3 text; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_ret3,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_ret3,100); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +end; +/ + + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out bytea,o_ret2 out bytea) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret:='1'; +o_ret2:='3'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 bytea; +v3 bytea; +o_retw bytea; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw,100); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +end; +/ + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out character,o_ret2 out character) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret:='1'; +o_ret2:='3'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 character; +v3 character; +o_retw character; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw,100); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +end; +/ + +-------------------------------------------------------- +------------------------------------------------anyarray +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out text[],o_ret2 out text[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret(0):='10'; +o_ret(1):='20'; +o_ret2(0):='30'; +o_ret2(1):='40'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 text[]; +v3 text[]; +v2 int; +o_ret text[]; +o_retw text[]; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); +dbe_sql.sql_unregister_context(context_id); +--输出结果 +dbe_output.print_line('v1: '|| v1(0)); +dbe_output.print_line('v1: '|| v1(1)); +dbe_output.print_line('v1: '|| v3(0)); +dbe_output.print_line('v1: '|| v3(1)); +end; +/ + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out int[],o_ret2 out int[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret(0):='10'; +o_ret(1):='20'; +o_ret2(0):='30'; +o_ret2(1):='40'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 int[]; +v3 int[]; +v2 int; +o_ret int[]; +o_retw int[]; +begin +query := 'call proc_test(i_col1,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +RAISE INFO 'v1: %' ,v1(0); +RAISE INFO 'v1: %' ,v1(1); +RAISE INFO 'v3: %' ,v3(0); +RAISE INFO 'v3: %' ,v3(1); +end; +/ + + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out bytea[],o_ret2 out bytea[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret(0):='1'; +o_ret(1):='1'; +o_ret2(0):='1'; +o_ret2(1):='1'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 bytea[]; +v3 bytea[]; +v2 int; +o_ret bytea[]; +o_retw bytea[]; +begin +query := 'call proc_test(i_col1,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +RAISE INFO 'v1: %' ,v1(0); +RAISE INFO 'v1: %' ,v1(1); +RAISE INFO 'v3: %' ,v3(0); +RAISE INFO 'v3: %' ,v3(1); +end; +/ + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out text[],o_ret2 out text[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret(0):='100'; +o_ret(1):='100'; +o_ret2(0):='40'; +o_ret2(1):='30'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 text[]; +v3 text[]; +v2 int; +o_ret text[]; +o_retw text[]; +begin +query := 'call proc_test(i_col1,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +dbe_output.print_line('v1: '|| v1(0)); +dbe_output.print_line('v1: '|| v1(1)); +dbe_output.print_line('v1: '|| v3(0)); +dbe_output.print_line('v1: '|| v3(1)); +end; +/ + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out clob[],o_ret2 out clob[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret(0):='100'; +o_ret(1):='100'; +o_ret2(0):='40'; +o_ret2(1):='30'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 clob[]; +v3 clob[]; +v2 int; +o_ret clob[]; +o_retw clob[]; +begin +query := 'call proc_test(i_col1,o_ret,o_ret);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +--输出结果 +dbe_output.print_line('v1: '|| v1(0)); +dbe_output.print_line('v1: '|| v1(1)); +dbe_output.print_line('v1: '|| v3(0)); +dbe_output.print_line('v1: '|| v3(1)); +end; +/ + + +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out character[],o_ret2 out character[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret(0):='1'; +o_ret(1):='2'; +o_ret2(0):='3'; +o_ret2(1):='4'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 character[]; +v3 character[]; +v2 int; +o_retw character[]; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +dbe_sql.sql_unregister_context(context_id); +RAISE INFO 'v1: : %' ,v1(0); +RAISE INFO 'v1: : %' ,v1(1); + +RAISE INFO 'v1: : %' ,v3(0); + +RAISE INFO 'v1: : %' ,v3(1); + +end; +/ + +drop PROCEDURE proc_test; + +--------------------------支持同时-bind-----array-和-variable------------------------- +---------------------------bind-----array-----variable------------------------- +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +exe int[]; +dddd char; +begin +exe[1] := 4; +exe[2] := 6; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,5,1,11); +insert into pro_dbe_sql_all_tb1_02 values(6,10,1,11); +insert into pro_dbe_sql_all_tb1_02 values(6,20,1,11); +query := 'select * from pro_dbe_sql_all_tb1_02 where a = y and b < 20 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +--dbe_sql.sql_bind_variable(context_id, 'z', 20); +dbe_sql.sql_bind_array(context_id, 'y', exe); +--定义列 +dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +---------------------bind-----array-----variable---------------insert +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +exe int[]; +dddd char; +begin +exe[1] := 4; +exe[2] := 6; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,5,1,11); +insert into pro_dbe_sql_all_tb1_02 values(6,10,1,11); +insert into pro_dbe_sql_all_tb1_02 values(6,20,1,11); +query := 'insert into pro_dbe_sql_all_tb1_02 values(y,z)'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_variable(context_id, 'z', 20); +dbe_sql.sql_bind_array(context_id, 'y', exe); +--定义列 +dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + + + + +---------------------------支持---bind_variable----的列大小限制------------------------- +----------------------bind_variable--------------in------------------------------------- +---------------------------------------------------------------------------------------- +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id text; +v_id1 text; +--v_id int; +query varchar(2000); +execute_ret int; +begin +v_id1 := 'abc'; +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a text ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values('ab',3,2,11); +insert into pro_dbe_sql_all_tb1_02 values('abc',3,1,11); +query := 'select * from pro_dbe_sql_all_tb1_02 where a = y order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--绑定参数 +dbe_sql.sql_bind_variable(context_id, 'y', v_id1); +--dbe_sql.sql_bind_variable(context_id, 'y', 3); +--定义列 +dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +------------------------------------------------------------------------ +---------------------------bind_variable--------------inout------------- + +----##########################------------------------text: +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out text,o_ret2 out text) as +v_a varchar2; +begin +o_ret:='123'; +o_ret2:='34567'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 text; +v3 text; +v2 int; +--o_ret character[]; +o_retw text; +o_retw1 text; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,1); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw1,3); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +RAISE INFO 'v1: : %' ,v1; +RAISE INFO 'v1: : %' ,v3; +end; +/ + + +------##########################------------------------text: +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out text,o_ret2 out text) as +v_a varchar2; +begin +o_ret:='123'; +o_ret2:='34567'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 text; +v3 text; +v2 int; +--o_ret character[]; +o_retw text; +o_retw1 text; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,1); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw1,3); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +RAISE INFO 'v1: : %' ,v1; +RAISE INFO 'v1: : %' ,v3; +end; +/ + + +-----##########################------------------------bytea: +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out bytea,o_ret2 out bytea) as +v_a varchar2; +begin +o_ret:='123'; +o_ret2:='34567'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 bytea; +v3 bytea; +o_retw bytea; +o_retw1 bytea; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,1); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw1,3); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +RAISE INFO 'v1: : %' ,v1; +RAISE INFO 'v1: : %' ,v3; +end; +/ + + + +-----##########################------------------------bpchar: +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out bpchar,o_ret2 out bpchar) as +v_a varchar2; +begin +o_ret:='123'; +o_ret2:='34567'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 bpchar; +v3 bpchar; +o_retw bpchar; +o_retw1 bpchar; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw1,4); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); + +RAISE INFO 'v1: : %' ,v1; +RAISE INFO 'v1: : %' ,v3; +end; +/ + +-----------------------支持---set_result_type-----的列大小限制---------------------------- +---------------------------------------set_result_type-------column_value-------------text +------------------------------------------------------------------------------------------ +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info1 text :=1; + +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a text ,b clob); +insert into pro_dbe_sql_all_tb1_02 values('asbdrdgg',HEXTORAW('DEADBEEE')); +insert into pro_dbe_sql_all_tb1_02 values(2,in_raw); +query := 'select a from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_info1,10); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_info1); +--输出结果 +dbe_output.print_line('info:'|| 1 || ' info:' ||v_info1); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + + +---------------set_result_type-------column_value------------------------------bytea +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +nnn text; +v_info1 bytea; +query varchar(2000); +execute_ret int; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a bytea ,b clob); +insert into pro_dbe_sql_all_tb1_02 values('646464',HEXTORAW('DEADBEEE')); +insert into pro_dbe_sql_all_tb1_02 values('646464',in_raw); +query := 'select a from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_info1,10); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_result_raw(context_id,1,v_info1); + +--输出结果 +dbe_output.print_line('info:'|| 1 || ' info:' ||v_info1); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; + +-----------set_result_type-------column_value----------------------------bpchar + +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +nnn text; +v_info1 bpchar; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a bpchar ,b clob); +insert into pro_dbe_sql_all_tb1_02 values('646464',HEXTORAW('DEADBEEE')); +insert into pro_dbe_sql_all_tb1_02 values('646464',in_raw); +query := 'select a from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_info1,3); + +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_info1); + +--nnn := pkg_util.lob_rawtotext(v_info1); +--输出结果 +dbe_output.print_line('info:'|| 1 || ' info:' ||v_info1); +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; +---========================test raw /clob/blob function +create or replace procedure pro_get_variable_07(in_raw int,v_in out bigint,v_offset out bigint,ary1 out bigint[],ary2 out bigint[]) +as +context_id int; +v_id int :=3; + +v_id1 int[]; + +v_id5 bytea[]; +v_id6 text[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +begin +v_in:=10; +v_offset:=30; +ary1(0):='1'; +ary1(1):='2'; +ary1(2):='3'; +ary2(0):='12'; +ary2(1):='13'; +ary2(2):='14'; +end; +/ + +create or replace procedure call_get_variable_07() +as +context_id number; +query text; +define_column_ret int; +v1 bigint; +v3 bigint; +v2 bigint; +v4 bigint[]; +v5 bigint[]; +v_in bigint; +v_offset bigint; +ary1 bigint[]; +ary2 bigint[]; +o_retw bigint; +o_retw1 bigint[]; +begin +query := 'call pro_get_variable_07(in_raw,NULL,NULL,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'in_raw',1,10); +dbe_sql.sql_bind_variable(context_id, 'v_in',o_retw,100); +dbe_sql.sql_bind_variable(context_id, 'v_offset',o_retw,100); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); + +define_column_ret := dbe_sql.sql_run(context_id); +dbe_sql.get_variable_result(context_id,'v_in',v1); +dbe_sql.get_variable_result(context_id,'v_offset',v3); +dbe_sql.get_array_result_int(context_id,'ary1',v4); +dbe_sql.get_array_result_int(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v5: %' ,v5(0); +RAISE INFO 'v5: %' ,v5(1); +end; +/ +call call_get_variable_07(); + +----================================================== +create or replace procedure pro_get_variable_result_text_02(in_raw int,v_in out clob,v_offset out clob,ary1 out clob[],ary2 out clob[]) +as +context_id int; +v_id int :=3; +--test +v_id1 int[]; +v_id4 character[]; +v_id5 bytea[]; +v_id6 clob[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +begin +v_in:='abcdnfdfdfdafds'; +v_offset:='ccccccccccccccccccccccc'; +ary1(0):='aa'; +ary1(1):='bb'; +ary2(0):='cc'; +ary2(1):='dd'; +end; +/ + +create or replace procedure call_get_variable_text_02() +as +context_id number; +query clob; +define_column_ret int; +v1 clob; +v3 clob; +v2 clob; +v4 clob[]; +v5 clob[]; +v_in clob; +v_offset clob; +ary1 clob[]; +ary2 clob[]; +o_retw clob; +o_retw1 clob[]; +begin +query := 'call pro_get_variable_result_text_02(in_raw,NULL,NULL,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'in_raw',1,10); +dbe_sql.sql_bind_variable(context_id, 'v_in',o_retw,100); +dbe_sql.sql_bind_variable(context_id, 'v_offset',o_retw,100); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); +define_column_ret := dbe_sql.sql_run(context_id); + +v1:=dbe_sql.get_variable_result_text(context_id,'v_in'); +v2:=dbe_sql.get_variable_result_text(context_id,'v_offset'); +dbe_sql.get_array_result_text(context_id,'ary1',v4); +dbe_sql.get_array_result_text(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v2: %' ,v2; +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v5: %' ,v5(0); +RAISE INFO 'v5: %' ,v5(1); +dbe_sql.sql_unregister_context(context_id); +end; +/ + +call call_get_variable_07(); + +----================================================== +CREATE OR REPLACE PROCEDURE proc_get_variable_arr_result_text_03(i_col1 in int,o_ret out character varying,o_ret2 out character varying, ary1 out character varying[],ary2 out character varying[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret:=1; +o_ret2:=2; +ary1(0):='a'; +ary1(1):='d'; +ary1(2):='f'; +ary2(0):='f'; +ary2(1):='d'; +ary2(2):='f'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 character varying; +v3 character varying; +v2 character varying; +v4 character varying[]; +v5 character varying[]; +o_ret character varying; + +ary1 character varying[]; +ary2 character varying[]; +o_retw character varying; +o_retw1 character varying[]; +begin +query := 'call proc_get_variable_arr_result_text_03(i_col1,o_ret,o_ret2,ary1,ary2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); + +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,1); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw,1); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); +define_column_ret := dbe_sql.sql_run(context_id); +v1:=dbe_sql.get_variable_result_text(context_id,'o_ret'); +v2:=dbe_sql.get_variable_result_text(context_id,'o_ret2'); +dbe_sql.get_array_result_text(context_id,'ary1',v4); +dbe_sql.get_array_result_text(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v2: %' ,v2; +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v5: %' ,v5(2); +RAISE INFO 'v5: %' ,v5(0); +end; +/ + +----============================================= + +create or replace procedure proc_get_variable_result_raw_01(in_raw int,v_in out raw,v_offset out raw,ary1 out raw[],ary2 out raw[]) +as +context_id int; +v_id int :=3; +begin +v_in:=HEXTORAW('DEADBEEF'); +v_offset:=HEXTORAW('DEADBEEF'); +ary1(0):=HEXTORAW('DEADBEEF'); +ary1(1):=HEXTORAW('DEADBEEF'); +ary2(0):=HEXTORAW('DEADBEEF'); +ary2(1):=HEXTORAW('DEADBEEF'); +end; +/ + +create or replace procedure call_get_variable_arr_raw_01() +as +context_id number; +query text; +define_column_ret int; +v1 raw; +v3 raw; +v2 raw; +v4 raw[]; +v5 raw[]; +v_in raw; +v_offset raw; +ary2 raw[]; +ary1 raw[]; +o_retw raw; +o_retw1 raw[]; +begin +query := 'call proc_get_variable_result_raw_01(in_raw,v_in,v_offset,ary1,ary2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'in_raw',1,10); +dbe_sql.sql_bind_variable(context_id, 'v_in',o_retw,100); +dbe_sql.sql_bind_variable(context_id, 'v_offset',o_retw,100); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result_raw(context_id,'v_in',v1); +dbe_sql.get_variable_result_raw(context_id,'v_offset',v3); +dbe_sql.get_array_result_raw(context_id,'ary1',v4); +dbe_sql.get_array_result_raw(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v5: %' ,v5(0); +RAISE INFO 'v5: %' ,v5(1); +end; +/ +call call_get_variable_arr_raw_01(); + + +---============================================ + +create or replace procedure pro_get_variable_06(in_raw int,v_in out clob,v_offset out clob,ary1 out clob[],ary2 out clob[]) +as +context_id int; +v_id int :=3; + +v_id1 int[]; +v_id4 clob[]; +v_id5 bytea[]; +v_id6 text[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +begin +v_in:='aaa36'; +v_offset:='fdf5'; +ary1(0):='aafd'; +ary1(1):='fdsf'; +ary2(0):='fa'; +ary2(1):='fsafdasf'; +end; +/ + +create or replace procedure call_get_variable_06() +as +context_id number; +query text; +define_column_ret int; +v1 clob; +v3 clob; +v2 clob; +v4 clob[]; +v5 clob[]; +v_in clob; +v_offset clob; +ary1 clob[]; +ary2 clob[]; +o_retw clob; +o_retw1 clob[]; +begin +query := 'call pro_get_variable_06(in_raw,NULL,NULL,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'in_raw',1,10); +dbe_sql.sql_bind_variable(context_id, 'v_in',o_retw,100); +dbe_sql.sql_bind_variable(context_id, 'v_offset',o_retw,100); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'v_in',v1); +dbe_sql.get_variable_result(context_id,'v_offset',v3); +dbe_sql.get_variable_result(context_id,'ary1',v4); +dbe_sql.get_variable_result(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v5: %' ,v5(0); +RAISE INFO 'v5: %' ,v5(1); +end; +/ +call call_get_variable_06(); + +----=================================test 直接获取第n列====================== +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(b int, c char, d text); +insert into pro_dbe_sql_all_tb1_02 values(9,'5','13'); +insert into pro_dbe_sql_all_tb1_02 values(10,'6','14'); +insert into pro_dbe_sql_all_tb1_02 values(11,'7','15'); +insert into pro_dbe_sql_all_tb1_02 values(12,'8','16'); +create or replace procedure pro_dbe_sql_all_01() +as +context_id int; +v_id int :=3; +--test +v_id1 int[]; +v_id4 character[]; +v_id5 bytea[]; +v_id6 text[]; +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +col_type1 int; +col_type2 char; +col_type3 text; +col_type4 bytea; +begin +query := ' select * from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--定义列 +DBE_SQL.set_result_type_ints(context_id,2,v_id1,v_id,v_id2); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +v_id3 := dbe_sql.next_row(context_id); +v_id1 := DBE_SQL.get_results_int(context_id,2,v_id1); +exit when(v_id3 != 3); +end loop; +FOR i IN v_id1.FIRST .. v_id1.LAST LOOP +dbe_output.print_line('int' || i || ' = ' || v_id1[i]); +END LOOP; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ + +call pro_dbe_sql_all_01(); +drop table if exists pro_dbe_sql_all_tb1_02; + +----==============================test raw set_results_type== ==== +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(b raw, c raw, d clob); +insert into pro_dbe_sql_all_tb1_02 values('9','5','13'); +insert into pro_dbe_sql_all_tb1_02 values('10','6','14'); +insert into pro_dbe_sql_all_tb1_02 values('11','7','15'); +insert into pro_dbe_sql_all_tb1_02 values('12','8','16'); +create or replace procedure pro_dbe_sql_all_01() +as +context_id int; +v_id int :=3; +--test +v_id1 raw[]; +v_id4 character[]; +v_id5 bytea[]; +v_id6 text[]; +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +col_type1 int; +col_type2 char; +col_type3 text; +col_type4 bytea; +begin +query := ' select * from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--定义列 +DBE_SQL.set_results_type(context_id,1,v_id1,v_id,v_id2); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +v_id3 := dbe_sql.next_row(context_id); +DBE_SQL.get_results(context_id,1,v_id1); +exit when(v_id3 != 3); +end loop; +FOR i IN v_id1.FIRST .. v_id1.LAST LOOP +dbe_output.print_line('int' || i || ' = ' || v_id1[i]); +END LOOP; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ + +call pro_dbe_sql_all_01(); +drop table if exists pro_dbe_sql_all_tb1_02 ; + +----------------check NULL for is_active and sql_unregister_context + +create or replace procedure call_get_variable_06() +as +context_id int := NULL; +begin +raise notice '11111'; +if dbe_sql.is_active(context_id) then + raise notice '2222'; + dbe_sql.sql_unregister_context(context_id); +end if; +end; +/ +select * from call_get_variable_06(); + +create or replace procedure call_get_variable_06() +as +context_id int := NULL; +begin +dbe_sql.sql_unregister_context(context_id); +end; +/ +select * from call_get_variable_06(); +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info bytea :=1; +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +type re_rssc is record (col_num int, desc_col dbe_sql.desc_tab); +employer re_rssc; +res re_rssc; +d int; +dd dbe_sql.desc_tab; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a int ,b int,c int,d int); +insert into pro_dbe_sql_all_tb1_02 values(4,3,2,11); +insert into pro_dbe_sql_all_tb1_02 values(6,3,1,11); +query := 'select *,1,ss from pro_dbe_sql_all_tb1_02 where a > y and a < z order by s'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +dbe_sql.sql_set_sql(context_id, query, 1); +--描述 +res := dbe_sql.sql_describe_columns(context_id, d,dd); + +--绑定参数 +dbe_sql.sql_bind_variable(context_id, 'z', 10); +dbe_sql.sql_bind_variable(context_id, 'y', 1); +dbe_sql.sql_bind_variable(context_id, 's', 1); +dbe_sql.sql_bind_variable(context_id, 'ss', 1); +--定义列 +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_id); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_id); +--输出结果 +dbe_output.print_line('id:'|| v_id); +end loop; +dbe_output.print_line('col_num:' || res.col_num); +dbe_output.print_line('col_type:' || res.desc_col[1].col_type); +dbe_output.print_line('col_type:' || res.desc_col[2].col_type); +dbe_output.print_line('col_type:' || res.desc_col[3].col_type); +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); +--删除存储过程 +DROP PROCEDURE pro_dbe_sql_all_02; +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out text[],o_ret2 out text[]) as +v_a varchar2; +begin +if i_col1=1 then +select 2 into v_a; +end if; +o_ret(0):='10'; +o_ret(1):='20'; +o_ret2(0):='30'; +o_ret2(1):='40'; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 text[]; +v3 text[]; +v2 int; +o_ret text[]; +o_retw text[]; +v4 int[]; + +begin +v4(0):=1; +v4(1):=2; +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_array(context_id, 'i_col1',v4); +commit; +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); +dbe_sql.sql_unregister_context(context_id); +--输出结果 +dbe_output.print_line('v1: '|| v1(0)); +dbe_output.print_line('v1: '|| v1(1)); +dbe_output.print_line('v1: '|| v3(0)); +dbe_output.print_line('v1: '|| v3(1)); +end; +/ +----=============================================== +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,i_col2 in int,o_ret out text[],o_ret2 out text[]) as +v_a varchar2; +begin +if i_col1=1 then +o_ret(0):='10'; +o_ret(1):='20'; +o_ret2(0):='30'; +o_ret2(1):='40'; +end if; +if i_col1=2 and i_col2=1 then +o_ret(0):='100'; +o_ret(1):='200'; +o_ret2(0):='300'; +o_ret2(1):='400'; +end if; + +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 text[]; +v3 text[]; +v2 int; +o_ret text[]; +o_retw text[]; +v4 int[]; + +begin +v4(0):=1; +v4(1):=2; +query := 'call proc_test(i_col1,i_col2,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_array(context_id, 'i_col1',v4); +dbe_sql.sql_bind_variable(context_id, 'i_col2',1); +dbe_sql.sql_bind_array(context_id, 'o_ret',o_retw); +dbe_sql.sql_bind_array(context_id, 'o_ret2',o_retw); +define_column_ret := dbe_sql.sql_run(context_id); +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); +dbe_sql.sql_unregister_context(context_id); +--输出结果 +dbe_output.print_line('v1: '|| v1(0)); +dbe_output.print_line('v1: '|| v1(1)); +dbe_output.print_line('v1: '|| v3(0)); +dbe_output.print_line('v1: '|| v3(1)); +end; +/ + +-----=========================== +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out int,o_ret2 out int) as +v_a varchar2; +begin +if i_col1=1 then +o_ret:=10; +o_ret2:=30; +end if; +if i_col1=2 then +o_ret:=20; +o_ret2:=40; +else +o_ret:=100; +o_ret2:=200; +end if; +end; +/ + +declare +context_id number; +query text; +define_column_ret int; +v1 int; +v3 int; +v2 int; +o_ret int; +o_retw int; +begin +query := 'call proc_test(i_col1,NULL,NULL);'; +context_id := dbe_sql.register_context(); + +for i in 1..3 loop +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',i,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw,100); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +end loop; +dbe_sql.sql_unregister_context(context_id); +--输出结果 + +end; +/ + + +-----==================================================== +create or replace procedure pro_dbe_sql_all_02(in_raw raw,v_in int,v_offset int) +as +context_id int; +v_id int; +v_info1 text :=1; + +query varchar(2000); +execute_ret int; +define_column_ret_raw bytea :='1'; +define_column_ret int; +begin +drop table if exists pro_dbe_sql_all_tb1_02 ; +create table pro_dbe_sql_all_tb1_02(a text ,b clob); +insert into pro_dbe_sql_all_tb1_02 values('asbdrdgg',HEXTORAW('DEADBEEE')); +insert into pro_dbe_sql_all_tb1_02 values(2,in_raw); +query := 'select a from pro_dbe_sql_all_tb1_02 order by 1'; +--打开游标 +context_id := dbe_sql.register_context(); +--编译游标 +--定义列 +for i in 1..20 loop +dbe_sql.sql_set_sql(context_id, query, 1); +define_column_ret:= dbe_sql.set_result_type(context_id,1,v_info1,10); +--执行 +execute_ret := dbe_sql.sql_run(context_id); +loop +exit when (dbe_sql.next_row(context_id) <= 0); +--获取值 +dbe_sql.get_results(context_id,1,v_info1); +--输出结果 +dbe_output.print_line('info:'|| 1 || ' info:' ||v_info1); +end loop; +end loop; +--关闭游标 +dbe_sql.sql_unregister_context(context_id); +end; +/ +--调用存储过程 +call pro_dbe_sql_all_02(HEXTORAW('DEADBEEF'),0,1); + +---============================6.自治事物================ +--建表 +create table t2(a int, b int); +insert into t2 values(1,2); +select * from t2; +--创建包含自治事务的存储过程 +CREATE OR REPLACE PROCEDURE autonomous_4(a int, b int) AS +DECLARE + num3 int := a; + num4 int := b; + PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN + insert into t2 values(num3, num4); + dbe_output.print_line('just use call.'); +END; +/ +--创建调用自治事务存储过程的普通存储过程 +CREATE OR REPLACE PROCEDURE autonomous_5(a int, b int) AS +DECLARE +BEGIN + dbe_output.print_line('just no use call.'); + insert into t2 values(666, 666); + autonomous_4(a,b); + rollback; +END; +/ + +create or replace procedure proc_test3() as +context_id number; +query text; +define_column_ret int; +v1 varchar2; +proc_name varchar2; +begin +proc_name:='autonomous_5'; +query := 'call '||proc_name||'(o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'o_ret',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',1,10); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.sql_unregister_context(context_id); + +--输出结果 +RAISE INFO 'v1: %' ,v1; +end; +/ +CREATE OR REPLACE PACKAGE package_002 IS +PROCEDURE testpro1(var3 int); +END package_002; +/ + +--调用普通存储过程 +select autonomous_5(11,22); +--查看表结果 +select * from t2 order by a; + +------------------------------------------------------------ +CREATE OR REPLACE PROCEDURE proc_test(i_col1 in int,o_ret out text,o_ret2 out text) as +v_a varchar2; +i int; +begin +i =1/0; +exception +when others then + raise info '%', 'exception'; +end; +/ + +CREATE OR REPLACE PROCEDURE q(mm out int) as +declare +context_id number; +query text; +define_column_ret int; +v1 text; +v3 text; +v2 int; +o_retw text; +o_retw1 text; +begin +query := 'call proc_test(i_col1,o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'i_col1',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret',o_retw,1); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',o_retw1,3); +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.get_variable_result(context_id,'o_ret2',v3); +dbe_sql.sql_unregister_context(context_id); + +RAISE INFO 'v1: : %' ,v1; +RAISE INFO 'v1: : %' ,v3; +mm = 1; +end; +/ +select * from q(); + +create or replace procedure proc_get_variable_result_raw_01(in_raw int,v_in out blob,v_offset out blob,ary1 out blob[],ary2 out blob[]) +as +context_id int; +v_id int :=3; +--test +v_id1 int[]; +v_id4 blob[]; +v_id5 blob[]; +v_id6 text[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +i int := 1; +begin +v_in:=HEXTORAW('DEADBEEF'); +v_offset:=HEXTORAW('DEADBEEF'); +ary1(0):=HEXTORAW('DEADBEEF'); +ary1(1):=HEXTORAW('DEADBEEF'); +ary2(0):=HEXTORAW('DEADBEEF'); +ary2(1):=HEXTORAW('DEADBEEF'); +end; +/ + +create or replace procedure call_get_variable_raw_01() +as +context_id number; +query text; +define_column_ret int; +v1 blob; +v3 blob; +v2 blob; +v4 blob[]; +v5 blob[]; +v_in blob; +v_offset blob; +ary2 blob[]; +ary1 blob[]; +o_retw blob; +o_retw1 blob[]; +i int := 1; +begin +query := 'call proc_get_variable_result_raw_01(in_raw,NULL,NULL,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +--while i < 4 loop +dbe_sql.sql_bind_variable(context_id, 'in_raw',1,10); +dbe_sql.sql_bind_variable(context_id, 'v_in',o_retw,100); +dbe_sql.sql_bind_variable(context_id, 'v_offset',o_retw,100); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result_raw(context_id,'v_in',v1); +dbe_sql.get_variable_result_raw(context_id,'v_offset',v3); +dbe_sql.get_array_result_raw(context_id,'ary1',v4); +dbe_sql.get_array_result_raw(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v5: %' ,v5(0); +RAISE INFO 'v5: %' ,v5(1); +dbe_sql.sql_unregister_context(context_id); +end; +/ + +call call_get_variable_raw_01(); + +----------------------------------- +create or replace procedure proc_get_variable_result_raw_01(in_raw int,v_in out bytea,v_offset out bytea,ary1 out bytea[],ary2 out bytea[]) +as +context_id int; +v_id int :=3; +--test +v_id1 int[]; +v_id4 bytea[]; +v_id5 bytea[]; +v_id6 text[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +i int := 1; +begin +v_in:=HEXTORAW('DEADBEEF'); +v_offset:=HEXTORAW('DEADBEEF'); +ary1(0):=HEXTORAW('DEADBEEF'); +ary1(1):=HEXTORAW('DEADBEEF'); +ary2(0):=HEXTORAW('DEADBEEF'); +ary2(1):=HEXTORAW('DEADBEEF'); +end; +/ + +create or replace procedure call_get_variable_raw_01() +as +context_id number; +query text; +define_column_ret int; +v1 bytea; +v3 bytea; +v2 bytea; +v4 bytea[]; +v5 bytea[]; +v_in bytea; +v_offset bytea; +ary2 bytea[]; +ary1 bytea[]; +o_retw bytea; +o_retw1 bytea[]; +i int := 1; +begin +query := 'call proc_get_variable_result_raw_01(in_raw,NULL,NULL,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +--while i < 4 loop +dbe_sql.sql_bind_variable(context_id, 'in_raw',1,10); +dbe_sql.sql_bind_variable(context_id, 'v_in',o_retw,100); +dbe_sql.sql_bind_variable(context_id, 'v_offset',o_retw,100); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result_raw(context_id,'v_in',v1); +dbe_sql.get_variable_result_raw(context_id,'v_offset',v3); +dbe_sql.get_array_result_raw(context_id,'ary1',v4); +dbe_sql.get_array_result_raw(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v5: %' ,v5(0); +RAISE INFO 'v5: %' ,v5(1); +dbe_sql.sql_unregister_context(context_id); +end; +/ + +call call_get_variable_raw_01(); + +------------------------------------- +create or replace procedure proc_get_variable_result_raw_01(in_raw int,v_in out raw,v_offset out raw,ary1 out raw[],ary2 out raw[]) +as +context_id int; +v_id int :=3; +--test +v_id1 int[]; +v_id4 raw[]; +v_id5 raw[]; +v_id6 text[]; + +v_id2 int := 1; +v_id3 int; +query varchar(2000); +execute_ret int; +define_column_ret int; +i int := 1; +begin +v_in:=HEXTORAW('DEADBEEF'); +v_offset:=HEXTORAW('DEADBEEF'); +ary1(0):=HEXTORAW('DEADBEEF'); +ary1(1):=HEXTORAW('DEADBEEF'); +ary2(0):=HEXTORAW('DEADBEEF'); +ary2(1):=HEXTORAW('DEADBEEF'); +end; +/ + +create or replace procedure call_get_variable_raw_01() +as +context_id number; +query text; +define_column_ret int; +v1 raw; +v3 raw; +v2 raw; +v4 raw[]; +v5 raw[]; +v_in raw; +v_offset raw; +ary2 raw[]; +ary1 raw[]; +o_retw raw; +o_retw1 raw[]; +i int := 1; +begin +query := 'call proc_get_variable_result_raw_01(in_raw,NULL,NULL,NULL,NULL);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'in_raw',1,10); +dbe_sql.sql_bind_variable(context_id, 'v_in',o_retw,100); +dbe_sql.sql_bind_variable(context_id, 'v_offset',o_retw,100); +dbe_sql.sql_bind_array(context_id, 'ary1',o_retw1); +dbe_sql.sql_bind_array(context_id, 'ary2',o_retw1); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result_raw(context_id,'v_in',v1); +dbe_sql.get_variable_result_raw(context_id,'v_offset',v3); +dbe_sql.get_array_result_raw(context_id,'ary1',v4); +dbe_sql.get_array_result_raw(context_id,'ary2',v5); +--输出结果 +RAISE INFO 'v1: %' ,v1; +RAISE INFO 'v3: %' ,v3; +RAISE INFO 'v4: %' ,v4(0); +RAISE INFO 'v4: %' ,v4(1); +RAISE INFO 'v5: %' ,v5(0); +RAISE INFO 'v5: %' ,v5(1); +dbe_sql.sql_unregister_context(context_id); +end; +/ + +call call_get_variable_raw_01(); + +CREATE OR REPLACE FUNCTION x1(a in int) +RETURNS int +AS $$ +DECLARE +BEGIN + a:=11; + commit; + return 12; +END; +$$ LANGUAGE plpgsql; +create or replace procedure y(a in int) +as +declare +begin +savepoint aa; +a:= x1(1); +rollback to aa; +end; +/ +call y(1); +drop FUNCTION x1(); +drop procedure y(); diff --git a/src/test/regress/sql/hw_dbms_sql2.sql b/src/test/regress/sql/hw_dbms_sql2.sql new file mode 100644 index 000000000..18986c304 --- /dev/null +++ b/src/test/regress/sql/hw_dbms_sql2.sql @@ -0,0 +1,320 @@ +----===============1.嵌套============== +CREATE OR REPLACE PACKAGE package_001 IS +PROCEDURE testpro1(var3 int); +END package_001; +/ +create or replace package body package_001 is +procedure testpro1(var3 int) +is +begin +commit; +end; +end package_001; +/ + +create or replace procedure proc_test3() as +context_id number; +query text; +define_column_ret int; +v1 varchar2; +begin +query := 'call package_001.testpro1(o_ret);'; +context_id := dbe_sql.register_context(); +rollback; +dbe_sql.sql_set_sql(context_id, query, 1); +rollback; +dbe_sql.sql_bind_variable(context_id, 'o_ret',1,10); +define_column_ret := dbe_sql.sql_run(context_id); +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.sql_unregister_context(context_id); + +--输出结果 +RAISE INFO 'v1: %' ,v1; +end; +/ +call proc_test3(); +drop package package_001; +---===============1.嵌套============== +CREATE OR REPLACE PACKAGE package_001 IS +PROCEDURE testpro1(var3 int); +END package_001; +/ +create or replace package body package_001 is +procedure testpro1(var3 int) +is +begin +commit; +end; +end package_001; +/ + +create or replace procedure proc_test3() as +context_id number; +query text; +define_column_ret int; +v1 varchar2; +proc_name varchar2; +begin +proc_name:='package_001.testpro1'; +query := 'call '||proc_name||'(o_ret);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'o_ret',1,10); +define_column_ret := dbe_sql.sql_run(context_id); +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.sql_unregister_context(context_id); + +--输出结果 +RAISE INFO 'v1: %' ,v1; +end; +/ +call proc_test3(); +drop package package_001; + +---===============2.全局变量============== + +CREATE OR REPLACE PACKAGE package_001 IS +a int; +b int; +PROCEDURE testpro1(var3 int); +END package_001; +/ +create or replace package body package_001 is +procedure testpro1(var3 int) +is +begin +a = 10; +raise INFO 'a:%' ,a; +commit; +end; +end package_001; +/ + +create or replace procedure proc_test3() as +context_id number; +query text; +define_column_ret int; +v1 varchar2; +proc_name varchar2; +begin +proc_name:='package_001.testpro1'; +query := 'call '||proc_name||'(o_ret);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'o_ret',1,10); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.sql_unregister_context(context_id); + +--输出结果 +RAISE INFO 'v1: %' ,v1; +end; +/ +call proc_test3(); +drop package package_001; +----========================3.savepoint----------- +CREATE OR REPLACE PACKAGE package_001 IS +a int; +b int; +PROCEDURE testpro1(var3 int); +END package_001; +/ +create or replace package body package_001 is +procedure testpro1(var3 int) +is +begin +a=11; +savepoint s1; +a = 10; +ROLLBACK TO SAVEPOINT s1; +raise INFO 'a:%' ,a; +commit; +end; +end package_001; +/ + +create or replace procedure proc_test3() as +context_id number; +query text; +define_column_ret int; +v1 varchar2; +proc_name varchar2; +begin +proc_name:='package_001.testpro1'; +query := 'call '||proc_name||'(o_ret);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'o_ret',1,10); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.sql_unregister_context(context_id); + +--输出结果 +RAISE INFO 'v1: %' ,v1; +end; +/ +call proc_test3(); +drop package package_001; +----========================3.重载----------- +CREATE OR REPLACE PACKAGE package_001 IS +a int; +b int; +PROCEDURE testpro1(var3 int); +PROCEDURE testpro1(var3 int, var4 int); +END package_001; +/ +create or replace package body package_001 is +procedure testpro1(var3 int) +is +begin +a = 10; +raise INFO 'a:%' ,a; +commit; +end; +PROCEDURE testpro1(var3 int, var4 int) +is +begin +a = 11; +raise INFO 'a:%' ,a; +rollback; +end; +end package_001; +/ + +create or replace procedure proc_test3() as +context_id number; +query text; +define_column_ret int; +v1 varchar2; +proc_name varchar2; +begin +proc_name:='package_001.testpro1'; +query := 'call '||proc_name||'(o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'o_ret',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',1,10); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +--dbe_sql.sql_unregister_context(context_id); + +--输出结果 +RAISE INFO 'v1: %' ,v1; +end; +/ +call proc_test3(); +drop package package_001; + +---========================3.重载----------- +CREATE OR REPLACE PACKAGE package_001 IS +a int; +b int; +PROCEDURE testpro1(var3 int); +PROCEDURE testpro1(var3 int, var4 int); +END package_001; +/ +create or replace package body package_001 is +procedure testpro1(var3 int) +is +begin +a = 10; +raise INFO 'a:%' ,a; +commit; +end; +PROCEDURE testpro1(var3 int, var4 int) +is +begin +a = 11; +raise INFO 'a:%' ,a; +rollback; +end; +end package_001; +/ + +create or replace procedure proc_test3() as +context_id number; +query text; +define_column_ret int; +v1 varchar2; +proc_name varchar2; +begin +proc_name:='package_001.testpro1'; +query := 'call '||proc_name||'(o_ret,o_ret2);'; +context_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(context_id, query, 1); +dbe_sql.sql_bind_variable(context_id, 'o_ret',1,10); +dbe_sql.sql_bind_variable(context_id, 'o_ret2',1,10); + +define_column_ret := dbe_sql.sql_run(context_id); + +dbe_sql.get_variable_result(context_id,'o_ret',v1); +dbe_sql.sql_unregister_context(context_id); + +--输出结果 +RAISE INFO 'v1: %' ,v1; +end; +/ +CREATE OR REPLACE PACKAGE package_002 IS +PROCEDURE testpro1(var3 int); +END package_002; +/ +create or replace package body package_002 is +procedure testpro1(var3 int) +is +begin +perform proc_test3(); +commit; +end; +end package_002; +/ +CREATE OR REPLACE PACKAGE package_003 IS +PROCEDURE testpro1(var3 int); +END package_003; +/ +create or replace package body package_003 is +procedure testpro1(var3 int) +is +begin +perform package_002.testpro1(1); +commit; +end; +end package_003; +/ +call package_003.testpro1(1); +drop package package_001; +drop package package_002; +drop package package_003; +-----------------------cursor--------------------------- +create table t1(a int); +insert into t1 values (1); +insert into t1 values (2); +create or replace procedure p2 (c4 in int,c2 out int,c3 out int,c1 out sys_refcursor) as +va t1; +i int; +begin +open c1 for select * from t1; +begin +i = 1/0; +exception +when others then + c3=100; + raise info '%', 'exception1'; +end; +i=2/0; +exception +when others then + + c4=100; + c2=c4+10; + raise info '%', 'exception2'; +end; +/ +select * from p2(1); +drop table t1; +drop procedure p2; diff --git a/src/test/regress/sql/hw_es_multi_column_stats_eqclass.sql b/src/test/regress/sql/hw_es_multi_column_stats_eqclass.sql new file mode 100644 index 000000000..475498770 --- /dev/null +++ b/src/test/regress/sql/hw_es_multi_column_stats_eqclass.sql @@ -0,0 +1,64 @@ +-- join list occurs any error when optimizing multi-columns statistics using eqClass. +CREATE SCHEMA equivalent_class; +SET current_schema = equivalent_class; +CREATE TABLE dim_warehouse_info_t ( + warehouse_id numeric(10,0), + warehouse_name character varying(60) +) +WITH (orientation=row, compression=no); + +CREATE TABLE wms_abnormal_order ( + id bigint, + abnormal_order_no character varying(384), + abnormal_type character varying(384), + warehouse_id numeric(20,0) +) +WITH (orientation=column, compression=middle); + +CREATE TABLE wms_stocktaking_merchandise ( + id bigint, + stocktaking_serno character varying(90), + warehouse_id numeric(20,0), + abnormal_order_no character varying(384) +) +WITH (orientation=column, compression=middle); + +CREATE TABLE wms_stocktaking_order ( + id bigint, + stocktaking_serno character varying(90), + stocktaking_type character varying(384), + warehouse_id numeric(20,0) +) +WITH (orientation=column, compression=middle); + +SET enable_nestloop = off; +SET explain_perf_mode=pretty; +EXPLAIN(costs off) SELECT /* leading((mer (ab (wh ord)))) leading((ab (wh ord)))*/ + mer.abnormal_order_no , + ab.abnormal_order_no +FROM + wms_stocktaking_merchandise mer +LEFT JOIN + dim_warehouse_info_t wh +ON + wh.warehouse_id = mer.warehouse_id +LEFT JOIN + wms_stocktaking_order ord +ON + ord.warehouse_id = mer.warehouse_id +AND + ord.stocktaking_serno = mer.stocktaking_serno +LEFT JOIN wms_abnormal_order ab + ON ab.warehouse_id = mer.warehouse_id + AND ab.abnormal_order_no = mer.abnormal_order_no +AND ab.abnormal_order_no IN ('AB00000000194178', 'AB00000000194175') +WHERE ord.stocktaking_type = 'AF' +AND mer.abnormal_order_no IS NOT NULL +AND ab.abnormal_type IN ('PICK_ABNORMAL','SORTING_ABNORMAL','PACK_ABNORMAL') +AND wh.warehouse_name ='UKGF Warehouse' +AND mer.abnormal_order_no IN ('AB00000000194178' ,'AB00000000194175' ) +GROUP BY +mer.abnormal_order_no , +ab.abnormal_order_no ; + +DROP SCHEMA equivalent_class CASCADE; diff --git a/src/test/regress/sql/hw_grant_package.sql b/src/test/regress/sql/hw_grant_package.sql new file mode 100644 index 000000000..3f05a3daa --- /dev/null +++ b/src/test/regress/sql/hw_grant_package.sql @@ -0,0 +1,82 @@ +create user test_grant1 password 'Gauss123'; +create user test_grant2 password 'Gauss123'; +SET SESSION AUTHORIZATION test_grant1 password 'Gauss123'; +create type s_type as ( + id integer, + name varchar, + addr text +); +create or replace package pck3 is +type r2 is table of s_type index by varchar(10); +type r3 is table of s_type index by integer; +procedure p1; +procedure p2(b int, va r2, a int, vb r3); +end pck3; +/ +create or replace package body pck3 is +procedure p1 as +va r2; +vb r3; +b int; +begin +va('a') := (1, 'zhangsan', 'shanghai'); +vb(5) := (10086,'aa','bb'); +vb(233) := (10087,'aa','bb'); +p2(b,va,1,vb); +end; +procedure p2(b int, va r2, a int, vb r3) as +begin +raise info 'va:%', va('a'); +raise info 'vb(233):%', vb(233); +raise info 'vb:%', vb; +end; +end pck3; +/ +CREATE OR REPLACE package pkg_auth_1 +is +a int; +END pkg_auth_1; +/ +CREATE OR REPLACE package body pkg_auth_1 +is +END pkg_auth_1; +/ +CREATE OR REPLACE package pkg_auth_2 +is +b int; +procedure a(); +END pkg_auth_2; +/ +CREATE OR REPLACE package body pkg_auth_2 +is +procedure a +is +begin +pkg_auth_1.a:=1; +end; +END pkg_auth_2; +/ +grant usage on schema test_grant1 to test_grant2; +SET SESSION AUTHORIZATION test_grant2 password 'Gauss123'; +grant execute,drop on all packages in schema test_grant1 to test_grant2; +SET SESSION AUTHORIZATION test_grant1 password 'Gauss123'; +grant execute,drop on all packages in schema test_grant1 to test_grant2; +SET SESSION AUTHORIZATION test_grant2 password 'Gauss123'; +call test_grant1.pck3.p1(); +begin +test_grant1.pkg_auth_1.a:=1; +end; +/ +begin +test_grant1.pkg_auth_2.a(); +end; +/ +SET SESSION AUTHORIZATION test_grant2 password 'Gauss123'; +drop package test_grant1.pkg_auth_1; +SET SESSION AUTHORIZATION test_grant1 password 'Gauss123'; +drop package test_grant1.pkg_auth_2; +drop package pck3; +drop type s_type; +reset session AUTHORIZATION; +drop user test_grant1; +drop user test_grant2; diff --git a/src/test/regress/sql/hw_package.sql b/src/test/regress/sql/hw_package.sql index bc07bd0a1..494b62538 100644 --- a/src/test/regress/sql/hw_package.sql +++ b/src/test/regress/sql/hw_package.sql @@ -19,9 +19,15 @@ insert into test_package1 values(50); create table dams_ci.test1(col1 int); create schema pkgschema1; create schema pkgschema2; +set behavior_compat_options='allow_procedure_compile_check'; drop package if exists exp_pkg; - +create or replace package aa +is +procedure a(col1 int,col2 in int); +procedure a(col1 int,col2 in int,col3 out int); +end aa; +/ create or replace package exp_pkg as user_exp EXCEPTION; end exp_pkg; @@ -237,13 +243,13 @@ begin end; / drop function if exists func1; -create or replace package exp_pkg as +create or replace package exp_pkg1 as user_exp EXCEPTION; function func1(param int) return number; -end exp_pkg; +end exp_pkg1; / -create or replace package body exp_pkg as +create or replace package body exp_pkg1 as function func1(param int) return number as begin if (param = 1) then @@ -255,9 +261,9 @@ create or replace package body exp_pkg as raise info 'user_exp raise'; return 0; end; -end exp_pkg; +end exp_pkg1; / -select exp_pkg.func1(1); +select exp_pkg1.func1(1); create or replace package transaction_test as data1 character(20) := 'global data1'; @@ -281,7 +287,7 @@ end transaction_test; drop package transaction_test; -drop package if exists exp_pkg; +drop package if exists exp_pkg1; drop package autonomous_pkg_150_1; \sf feature_cross_test.func3 \sf func1 @@ -1186,6 +1192,7 @@ end pckg_test2; create user test1 password 'Gauss123'; create user test2 password 'Gauss123'; +ALTER DEFAULT PRIVILEGES IN SCHEMA test1 grant execute on packages to test2; SET SESSION AUTHORIZATION test1 password 'Gauss123'; set behavior_compat_options='plsql_security_definer'; drop package if exists pkg_auth_1; @@ -1238,6 +1245,101 @@ drop user test2; call pckg_test2.p1(); +create user t2 password 'Gauss_234'; +create user t3 password 'Gauss_234'; +create user t4 password 'Gauss_234'; +create user t5 password 'Gauss_234'; +SET SESSION AUTHORIZATION t2 password 'Gauss_234'; + +create table tab1(col1 int); +set behavior_compat_options='plsql_security_definer'; +create or replace package a3 authid current_user +is +procedure func_1(); +end a3; +/ +create or replace package body a3 +is +procedure func_1() +is +begin +insert into test1 values(1); +end; +end a3; +/ +\sf a3.func_1 +create or replace procedure test2 +is +curruser varchar2; +begin +select current_user into curruser; +raise notice '%',curruser; +insert into t2.tab1 values(1); +commit; +select current_user into curruser; +raise notice '%',curruser; +insert into t2.tab1 values(2); +end; +/ +select proacl,prosecdef from pg_proc where proname='test2'; +grant usage on schema t2 to t3; +grant usage,create on schema t2 to t3; +grant execute on all functions in schema t2 to t3; +SET SESSION AUTHORIZATION t3 password 'Gauss_234'; +call t2.test2(); +set behavior_compat_options='plsql_security_definer'; +create or replace procedure test3 +is +a int:=1; +begin +a:=2/0; +exception when others then +t2.test2(); +raise; +end; +/ +select proacl,prosecdef from pg_proc where proname='test3'; +grant usage on schema t3 to t3; +grant usage,create on schema t3 to t4; +grant execute on all functions in schema t3 to t4; +SET SESSION AUTHORIZATION t4 password 'Gauss_234'; +set behavior_compat_options='plsql_security_definer'; +create or replace procedure test4 +is +a int:=1; +begin +a:=2/0; +exception when others then +t3.test3(); +commit; +raise; +end; +/ +select proacl,prosecdef from pg_proc where proname='test4'; +grant usage on schema t4 to t5; +grant usage,create on schema t4 to t5; +grant execute on all functions in schema t4 to t5; +SET SESSION AUTHORIZATION t5 password 'Gauss_234'; +set behavior_compat_options='plsql_security_definer'; +create or replace procedure test5 +is +a int:=1; +begin +a:=2/0; +exception when others then +t4.test4(); +commit; +raise; +end; +/ +select proacl,prosecdef from pg_proc where proname='test5'; +call t5.test5(); +reset session AUTHORIZATION; +drop user t2 cascade; +drop user t3 cascade; +drop user t4 cascade; +drop user t5 cascade; + create or replace package pkg_same_arg_1 is procedure a(); @@ -1451,48 +1553,464 @@ END UT_P_PCKG_DAMS_RECEIVE; --package body definition of UT_P_PCKG_DAMS_RECEIVE / -create or replace function fun123(va in varchar2, vb in varchar2) -return character varying[] -as declare -vc varchar2[]; -begin -vc[1] := va; -vc[2] := vb; -raise info 'out'; -return vc; -end; +create or replace package pck1 as +procedure p1; +procedure p2; +end pck1; / - -create or replace package pck123 as -procedure p1(); -function fun123(va in varchar2, vb in varchar2) return character varying[]; -end pck123; -/ - -create or replace package body pck123 as +create or replace package body pck1 as procedure p1 as -va varchar2; -vb varchar2; -vc varchar2[]; begin -vc = fun123(va,','); ---vc = fun1(va,vb); +null; end; -function fun123(va in varchar2, vb in varchar2) return character varying[] -as declare -vc varchar2[]; +procedure p2 as begin -vc[1] := va; -vc[2] := vb; -return vc; +drop package pck1; end; -end pck123; +end pck1; / - -call pck123.p1(); - +call pck1.p2(); --test online help \h CREATE PACKAGE \h CREATE PACKAGE BODY \h DROP PACKAGE \h DROP PACKAGE BODY + +create schema pkgsch059; +set current_schema=pkgsch059; +create table pkgtbl059(c0 int,c1 number(5),c2 varchar2(20),c3 clob,c4 blob); +insert into pkgtbl059 values(1,1,'varchar1',repeat('clob1',2),'abcdef1'); +insert into pkgtbl059 values(2,2,'varchar10',repeat('clob2',2),'abcdef2'); + +create type type001 is(c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob); + +create or replace package pkg059 +is + type type001 is record(c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob); + type type002 is table of pkgsch059.type001 index by integer; + type type003 is table of type001 index by integer; + col1 type002:=type002(); + col2 type003:=type003(); + procedure proc059_1(col3 type002,col4 type003); + function func059_2(col5 int) return int; +end pkg059; +/ + +create or replace package body pkg059 +is +procedure proc059_1(col3 type002,col4 type003) +is +begin + raise info 'col3 is %',col3; + raise info 'col4 is %',col4; +end; +function func059_2(col5 int) return int +is +begin + pkg059.col1(1):=(1,1,'varchar1',repeat('clob1',2),'abcdef1'); + pkg059.col1(2):=(2,2,'varchar10',repeat('clob2',2),'abcdef2'); + pkg059.col2('1'):=col1(2); + pkg059.col2('-1'):=col1(1); + proc059_1(pkg059.col1,pkg059.col2); + return pkg059.col1(1).c1; +end; +end pkg059; +/ + +create or replace package pkg059_1 +is +procedure proc059_1_1(cp1 pkg059.type002,cp2 out pkg059.type003); +procedure func059_1_2(cf1 pkg059.type002,cf2 out pkg059.type003); +end pkg059_1; +/ + + +create or replace package body pkg059_1 +is +procedure proc059_1_1(cp1 pkg059.type002,cp2 out pkg059.type003) +is +cp3 varchar2(30); +begin + raise info 'pkg059.col1 %',pkg059.col1; + raise info 'pkg059.col2 %',pkg059.col2; + func059_1_2(cf1=>pkg059.col1,cf2=>pkg059.col2); + raise info 'cp1 is %',cp1; + raise info 'cp2 is %',cp2; + raise info 'cp3 is %',cp3; +end; +procedure func059_1_2(cf1 pkg059.type002,cf2 out pkg059.type003) +is +cf3 number; +cf4 varchar2(30):='cf4'; +begin + cf3:=3; + pkg059.func059_2(cf3); + raise info 'cf2(1).c1 is %',cf2(1); + -- return cf4; +end; +end pkg059_1; +/ + + +declare +de1 pkg059.type002; +de2 pkg059.type003; +count int:=2; +var2 varchar2(30); +begin +for i in 1..count loop +select c0,c1,c2,c3,c4 into de1(i).c1,de1(i).c2,de1(i).c3,de1(i).c4,de1(i).c5 from pkgtbl059 where c0=i; +select c0+200,c1+200,c2||'200',c3||'200',c4||'200' into de2(i).c1,de2(i).c2,de2(i).c3,de2(i).c4,de2(i).c5 from pkgtbl059 where c0=i; +end loop; + raise info 'de1 is %',de1; + raise info 'de2 is %',de2; + pkg059_1.proc059_1_1(de1,de2); + raise info 'de2 out is %',de2; +end; +/ + + + +create or replace package body pkg059 +is +procedure proc059_1(col3 type002,col4 type003) +is +begin + raise info 'col3 is %',col3; + raise info 'col4 is %',col4; +end; +function func059_2(col5 int) return int +is +begin + pkg059.col1(1):=(1,1,'varchar1',repeat('clob1',2),'abcdef1'); + pkg059.col1(2):=(2,2,'varchar10',repeat('clob2',2),'abcdef2'); + col2('1'):=col1(2); + col2('-1'):=col1(1); + proc059_1(pkg059.col1,pkg059.col2); + return pkg059.col1(1).c1; +end; +end pkg059; +/ + +declare +de1 pkg059.type002; +de2 pkg059.type003; +count int:=2; +var2 varchar2(30); +begin +for i in 1..count loop +select c0,c1,c2,c3,c4 into de1(i).c1,de1(i).c2,de1(i).c3,de1(i).c4,de1(i).c5 from pkgtbl059 where c0=i; +select c0+200,c1+200,c2||'200',c3||'200',c4||'200' into de2(i).c1,de2(i).c2,de2(i).c3,de2(i).c4,de2(i).c5 from pkgtbl059 where c0=i; +end loop; + raise info 'de1 is %',de1; + raise info 'de2 is %',de2; + pkg059_1.proc059_1_1(de1,de2); + raise info 'de2 out is %',de2; +end; +/ + +--test alter package owner +create user alt_package PASSWORD 'gauss@123'; +create user alt_package_2 PASSWORD 'gauss@123'; +create package alt_package.pck1_alter as +procedure p1(); +type r1 is record(a int, b int); +type r2 is table of r1; +type r3 is varray(10) of r1; +type r4 is ref cursor; +end pck1_alter; +/ +create package body alt_package.pck1_alter as +type r5 is record(a int, b int); +type r6 is table of r1; +type r7 is varray(10) of r1; +type r8 is ref cursor; +procedure p1 is +begin +null; +end; +procedure p2 is +begin +null; +end; +end pck1_alter; +/ +select usename from pg_user where usesysid = (select pkgowner from gs_package where pkgname = 'pck1_alter'); +SET SESSION AUTHORIZATION alt_package_2 password 'gauss@123'; +alter package alt_package.pck1_alter owner to alt_package_2; +SET SESSION AUTHORIZATION alt_package password 'gauss@123'; +alter package alt_package.pck1_alter owner to alt_package_2; +reset session AUTHORIZATION; +alter package alt_package.pck1_alter owner to alt_package_2; +SET SESSION AUTHORIZATION alt_package password 'gauss@123'; +alter package alt_package.pck1_alter owner to alt_package; +reset session AUTHORIZATION; +select usename from pg_user where usesysid = (select pkgowner from gs_package where pkgname = 'pck1_alter'); +SET SESSION AUTHORIZATION alt_package_2 password 'gauss@123'; +call alt_package.pck1_alter.p1(); +reset session AUTHORIZATION; +grant usage on schema alt_package to alt_package_2; +grant execute on package alt_package.pck1_alter to alt_package_2; +SET SESSION AUTHORIZATION alt_package_2 password 'gauss@123'; +call alt_package.pck1_alter.p1(); +declare +va alt_package.pck1_alter.r1; +vb alt_package.pck1_alter.r2; +vc alt_package.pck1_alter.r3; +vd alt_package.pck1_alter.r4; +begin +va := (1,1); +vb(1) := (2,3); +vc(1) := (3,4); +raise info '%,%,%', va,vb,vc; +end; +/ +reset session AUTHORIZATION; +drop package alt_package.pck1_alter; +drop user alt_package cascade; +drop user alt_package_2 cascade; + +-- test \h alter package +\h alter package + +-- test \sf procedure with authid +create or replace procedure p1() +AUTHID CURRENT_USER +is +begin +null; +end; +/ + +create or replace procedure p2() +AUTHID DEFINER +is +begin +null; +end; +/ + +create or replace procedure p3() +is +begin +null; +end; +/ +CREATE OR REPLACE PACKAGE ABORT IS +PROCEDURE testpro1(var3 int); +END ABORT; +/ +CREATE OR REPLACE PACKAGE body ABORT IS +PROCEDURE testpro1(var3 int) +is +begin +null; +end; +END ABORT; +/ +create or replace package autonomous_pkg_tmp +IS +count_public int := 10; +function autonomous_f_public(num1 int) +return int; +end autonomous_pkg_tmp; +/ +create or replace package body autonomous_pkg_tmp as +count_private int:=20; +function autonomous_f_public(num1 int) +return int +is +declare +re_int int; +begin count_public = num1 + count_public; +count_private = num1 + count_private; +re_int = count_public +count_private; +return re_int; +end; +begin +count_public:=0; +count_private:=0; +end autonomous_pkg_tmp; +/ +create function package_func_overload(col int, col2 out int) +return integer package +as +declare + col_type text; +begin + col := 122; + return 0; +end; +/ + +create procedure package_func_overload(col int, col2 out varchar) +package +as +declare + col_type text; +begin + col2 := '122'; +end; +/ + +reset session AUTHORIZATION; +begin +raise notice '%',autonomous_pkg_tmp.count_public; +end; +/ +create or replace package pck1 is +type tp1 is varray(10) of int; +function f1(in a int,c out tp1) return int; +end pck1; +/ + +create or replace package body pck1 is +function f1(in a int,c out tp1) return int +as +declare +begin +c(1):=a; +return a; +end; +end pck1; +/ + +declare +kk pck1.tp1; +x int := 10; +res int; +begin +res := pck1.f1(x,kk)+1; +raise info 'res:%',res; +end; +/ + +drop package if exists pck1; +drop package if exists pck2; + +create or replace package pck1 as +function func1() return int; +end pck1; +/ +create or replace package body pck1 as +xx int :=10; +function func1() return int as +begin + xx := xx + 1; + return xx; +end; +end pck1; +/ + +create or replace package pck2 as +function func1() return int; +end pck2; +/ +create or replace package body pck2 as +yy int := pck1.func1(); +function func1() return int as +begin +return yy; +end; +end pck2; +/ + +call pck2.func1(); + +drop package if exists pck1; +drop package if exists pck2; + +create or replace package pck1 as +function func2() return int; +end pck1; +/ +create or replace package body pck1 as +function func1() return int as +begin + return 10; +end; +function func2() return int as +begin + return func1(); +end; +end pck1; +/ + +create or replace package pck2 as +function func1() return int; +end pck2; +/ +create or replace package body pck2 as +xx int := pck1.func2(); +function func1() return int as +begin + return xx; +end; +end pck2; +/ +call pck2.func1(); +call pck1.func2(); + +\sf p1 +\sf p2 +\sf p3 +drop package if exists pck1; +drop function if exists func1; + +create or replace function func1() return int as +begin +return 5; +end; +/ + +create or replace package pck1 as +procedure proc(); +function func2() return int; +end pck1; +/ +create or replace package body pck1 as +xx integer := func1; +procedure proc() as +begin +raise info 'xx is %',xx; +end; +function func2() return int as +begin +return 10; +end; +function func1() return int as +begin +return 20; +end; +end pck1; +/ + +call pck1.proc(); + +drop function func1; + +create or replace package body pck1 as +xx integer := func1; +procedure proc() as +begin +raise info 'xx is %',xx; +end; +function func2() return int as +begin +return 10; +end; +end pck1; +/ +call pck1.proc(); + +--退出会话,重连调用 xx不应该是20,应该在定义时或者调用时就报函数不存在 +call pck1.proc(); +drop package pck1; +drop package pck2; +drop procedure p1; +drop procedure p2; +drop procedure p3; +reset behavior_compat_options; +drop package autonomous_pkg_tmp; +drop package abort; +drop schema pkgsch059 cascade; diff --git a/src/test/regress/sql/hw_package_function.sql b/src/test/regress/sql/hw_package_function.sql index 55b08d138..5740ca1be 100644 --- a/src/test/regress/sql/hw_package_function.sql +++ b/src/test/regress/sql/hw_package_function.sql @@ -98,7 +98,6 @@ DECLARE para3 bigint = 2; para4 varchar; BEGIN - package_func_overload(1, 1); package_func_overload(1, para1); package_func_overload(1, para2); package_func_overload(1, para3); @@ -620,4 +619,73 @@ end; drop schema package_schema cascade; drop schema package_nps cascade; + \c regression; + +drop schema if exists s1; +drop schema if exists s2; +create schema s1; +create schema s2; +set current_schema to s1; +create function package_func_overload_1(col int) +returns integer as $$ +declare +begin + return 0; +end; +$$ language plpgsql; +set current_schema to s2; +create function package_func_overload_1(col int) +returns integer as $$ +declare +begin + return 0; +end; +$$ language plpgsql; + +reset current_schema; +drop schema s1 cascade; +drop schema s2 cascade; + +create schema s; +set current_schema to s; +CREATE OR REPLACE PACKAGE p1 IS +PROCEDURE testpro1(var3 int); +PROCEDURE testpro1(var2 char); +END p1; +/ + +create function testpro1(col int) +returns integer as $$ +declare +begin + return 0; +end; +$$ language plpgsql; + +reset current_schema; +drop schema s cascade; + +drop package if exists pkg112; +create or replace package pkg112 +as +type ty1 is table of integer index by integer; +procedure p1(v1 in ty1,v2 out ty1,v3 inout ty1,v4 int); +procedure p1(v2 out ty1,v3 inout ty1,v4 int); +procedure p4(); +pv1 ty1; +end pkg112; +/ +set behavior_compat_options='proc_outparam_override'; +drop package if exists pkg112; +create or replace package pkg112 +as +type ty1 is table of integer index by integer; +procedure p1(v1 in ty1,v2 out ty1,v3 inout ty1,v4 int); +procedure p1(v2 out ty1,v3 inout ty1,v4 int); +procedure p4(); +pv1 ty1; +end pkg112; +/ +drop package if exists pkg112; +set behavior_compat_options=''; diff --git a/src/test/regress/sql/hw_package_single.sql b/src/test/regress/sql/hw_package_single.sql new file mode 100644 index 000000000..ced0cb9f0 --- /dev/null +++ b/src/test/regress/sql/hw_package_single.sql @@ -0,0 +1,350 @@ +create database pl_test_pkg_single DBCOMPATIBILITY 'pg'; +\c pl_test_pkg_single; +--test dbe_utility +CREATE OR REPLACE PROCEDURE p0() +AS +declare + a integer; + c integer; + b integer; +BEGIN + a:=1; + c:=0; + b := a / c; + dbe_output.print_line('result is: '||to_char(b)); +END; +/ + +CREATE OR REPLACE PROCEDURE p1() +AS +BEGIN + p0(); +END; +/ + +CREATE OR REPLACE PROCEDURE p2() +AS +BEGIN + p1(); +END; +/ + +--test dbe_utility.format_error_backtrack +CREATE OR REPLACE PROCEDURE p3_error() +AS +BEGIN + p2(); +EXCEPTION + WHEN OTHERS THEN + dbe_output.print_line(dbe_utility.format_error_backtrace()); +END; +/ +call p3_error(); + +--test dbe_utility.format_error_stack +CREATE OR REPLACE PROCEDURE p3_error_stack() +AS +BEGIN + p2(); +EXCEPTION + WHEN OTHERS THEN + dbe_output.print_line(dbe_utility.format_error_stack()); +END; +/ +call p3_error_stack(); + +CREATE OR REPLACE PROCEDURE p0() +AS +declare + a integer; + c integer; + b integer; +BEGIN + a:=1; + c:=1; + b := a / c; + dbe_output.print_line('result is: '||to_char(b)); +END; +/ + +--test dbe_utility.format_error_backtrace +CREATE OR REPLACE PROCEDURE p3_noError() +AS +BEGIN + p2(); +EXCEPTION + WHEN OTHERS THEN + dbe_output.print_line(utility.format_error_backtrace()); +END; +/ +call p3_noError(); + +--test dbe_utility.format_error_stack +CREATE OR REPLACE PROCEDURE p3_noError_stack() +AS +BEGIN + p2(); +EXCEPTION + WHEN OTHERS THEN + dbe_output.print_line(utility.format_error_stack()); +END; +/ +call p3_noError_stack(); + +--test dbe_utility.format_call_stack +CREATE OR REPLACE PROCEDURE p0() +AS +declare + a integer; + c integer; + b integer; +BEGIN + a:=1; + c:=1; + b := a / c; + dbe_output.print_line('result is: '||to_char(b)); + dbe_output.print_line(dbe_utility.format_call_stack()); +END; +/ + +CREATE OR REPLACE PROCEDURE p3_call_stack() +AS +BEGIN + p2(); +END; +/ +call p3_call_stack(); + +--test dbe_utility.get_time +CREATE OR REPLACE PROCEDURE test_get_time1() +AS +declare + start_time bigint; + end_time bigint; +BEGIN + start_time:= dbe_utility.get_time (); + pg_sleep(1); + end_time:=dbe_utility.get_time (); + dbe_output.print_line(end_time - start_time); +END; +/ +call test_get_time1(); + +CREATE OR REPLACE PROCEDURE test_get_time5() +AS +declare + start_time bigint; + end_time bigint; +BEGIN + start_time:= dbe_utility.get_time (); + pg_sleep(5); + end_time:=dbe_utility.get_time (); + dbe_output.print_line(end_time - start_time); +END; +/ +call test_get_time5(); + +--test dbe_match.edit_distance_similarity +select dbe_match.edit_distance_similarity('abcd', 'abcd'); +select dbe_match.edit_distance_similarity('aaaa', 'a'); +select dbe_match.edit_distance_similarity('aaaa', 'aaa'); + +--test dbe_raw +select dbe_raw.bit_or('a1234', '12'); +select dbe_raw.bit_or('0000', '1111'); +select dbe_raw.bit_or('0000', '11'); +select dbe_raw.bit_or('baf234', '11'); +select dbe_raw.bit_or('baf234', '00'); + +CREATE OR REPLACE PROCEDURE test_bitor() +AS +declare + a raw; + b raw; +BEGIN + a:= 'abc123'; + b:= '12'; + dbe_output.print_line(dbe_raw.bit_or(a, b)); +END; +/ +call test_bitor(); + +select DBE_RAW.cast_from_varchar2_to_raw('aaa'); +select dbe_raw.cast_to_varchar2('616161'); +select DBE_RAW.cast_from_varchar2_to_raw('cf12'); +select dbe_raw.cast_to_varchar2('63663132'); +select DBE_RAW.cast_from_varchar2_to_raw('341'); +select dbe_raw.cast_to_varchar2('333431'); + + +select dbe_raw.substr('aba', 1, 2); +CREATE OR REPLACE PROCEDURE test_substr() +AS +declare + a raw; +BEGIN + a:= 'abc123'; + dbe_output.print_line(dbe_raw.substr(a, 3, 2)); +END; +/ +call test_substr(); + +--test dbe_session +select DBE_SESSION.set_context('test', 'gaussdb', 'one'); +select DBE_SESSION.search_context('test', 'gaussdb'); +select DBE_SESSION.set_context('test', 'gaussdb', 'two'); +select DBE_SESSION.search_context('test', 'gaussdb'); +select DBE_SESSION.set_context('test', 'gaussdb', 'two'); +select DBE_SESSION.search_context('test', 'gaussdb'); +select DBE_SESSION.clear_context('test', 'test','gaussdb'); +select DBE_SESSION.search_context('test', 'gaussdb'); + +create or replace function test_set_context ( + namespace text, + attribute text, + value text +) +returns void AS $$ +BEGIN + DBE_SESSION.set_context(namespace, attribute, value); +END; +$$ LANGUAGE plpgsql; + +call test_set_context('test', 'name', 'tony'); + +create or replace function test_sys_context ( + namespace text, + attribute text +) +returns text AS $$ +BEGIN + return DBE_SESSION.search_context(namespace, attribute); +END; +$$ LANGUAGE plpgsql; + +call test_sys_context('test', 'name'); + +create or replace function test_clear_context2 ( + namespace text, + attribute text, + value text +) +returns void AS $$ +BEGIN + DBE_SESSION.clear_context(namespace, attribute, value); +END; +$$ LANGUAGE plpgsql; + +call test_clear_context('test', 'text', 'name'); +call test_sys_context('test', 'name'); + +create or replace function test_set_context2 ( + namespace text, + attribute text, + value text +) +returns void AS $$ +BEGIN + DBE_SESSION.set_context(namespace, attribute, value); +END; +$$ LANGUAGE plpgsql; + +call test_set_context2('CTX_P_GCMS_BIND_PKG', 'type', 'AAA'); + +create or replace function test_sys_context2 ( + namespace text, + attribute text +) +returns text AS $$ +BEGIN + return DBE_SESSION.search_context(namespace, attribute); +END; +$$ LANGUAGE plpgsql; + +call test_sys_context2('CTX_P_GCMS_BIND_PKG ', 'type',); + +create or replace function test_clear_context2 ( + namespace text, + attribute text, + value text +) +returns void AS $$ +BEGIN + DBE_SESSION.clear_context(namespace, attribute, value); +END; +$$ LANGUAGE plpgsql; + +call test_clear_context2('test', 'text', 'name'); +call test_sys_context2('test', 'name'); + +create or replace function test_set_context3 ( + namespace text, + attribute text, + value text +) +returns void AS $$ +BEGIN + DBE_SESSION.set_context(namespace, attribute, value); +END; +$$ LANGUAGE plpgsql; + +call test_set_context('test1', 'name1', 'tony1'); + +create or replace function test_sys_context3 ( + namespace text, + attribute text +) +returns text AS $$ +BEGIN + return DBE_SESSION.search_context(namespace, attribute); +END; +$$ LANGUAGE plpgsql; + +call test_sys_context('test1', 'name1'); + +create or replace function test_clear_context3 ( + namespace text, + attribute text, + value text +) +returns void AS $$ +BEGIN + DBE_SESSION.clear_context(namespace, attribute, value); +END; +$$ LANGUAGE plpgsql; + +call test_clear_context('test1', 'text1', 'name1'); +call test_sys_context('test', 'name'); + +create or replace procedure proc_test1(i_col1 in varchar2, o_ret out varchar2) as +begin +null; +end; +/ +create or replace procedure proc_test1(i_col1 in varchar2, o_ret out varchar2) as +v_cursor_id number; +o_ret1 varchar2; +v_execute number; +v_sql text; +begin +o_ret:='1'; +o_ret1 := '0'; +v_sql:='begin proc_test(i_col1,o_ret1); end;'; +v_cursor_id := dbe_sql.register_context(); +dbe_sql.sql_set_sql(v_cursor_id,v_sql,1); +perform dbe_sql.sql_bind_variable(v_cursor_id,'i_col1',i_col1,10); +perform dbe_sql.sql_bind_variable(v_cursor_id,'o_col1',o_ret1,10); +v_execute:=dbe_sql.sql_run(v_cursor_id); +exception +when others then +if dbe_sql.is_active(v_cursor_id) then +dbe_sql.sql_unregister_context(v_cursor_id); +end if; +end; +/ +select proc_test1('1',''); + +drop procedure proc_test1; +\c regression; +drop database IF EXISTS pl_test_pkg_single; + diff --git a/src/test/regress/sql/hw_package_variable.sql b/src/test/regress/sql/hw_package_variable.sql new file mode 100644 index 000000000..09d7980e5 --- /dev/null +++ b/src/test/regress/sql/hw_package_variable.sql @@ -0,0 +1,2244 @@ +drop schema if exists pkg_val_1 cascade; +drop schema if exists pkg_val_2 cascade; + +create schema pkg_val_1; +create schema pkg_val_2; + +set current_schema = pkg_val_2; +set behavior_compat_options='allow_procedure_compile_check'; + +--test package val assign +create or replace package pck1 is +type r1 is record (a int, b int); +type r2 is varray(10) of int; +type r3 is table of int; +type r4 is record (a r2); +va r1; +vb r2; +vc r3; +vd int; +vf r4; +end pck1; +/ +create or replace package body pck1 is +end pck1; +/ + +create or replace package pck2 is +ve int; +procedure p1; +end pck2; +/ +create or replace package body pck2 is +procedure p1 as +begin +pck1.va := (1,2); +pck1.va := (3,4); +pck1.va.a := 5; +pck1.va.a := pck1.va.a + 1; +pck1.vb(1) := 1; +pck1.vb(2) := 2 + pck1.vb(1); +pck1.vc(1) := 1; +pck1.vc(2) := 2 + pck1.vc(1); +pck1.vd := 4; +pck1.vd := 5; +pck1.vf.a(1) := 1; +pck1.vf.a(2) := 1 + pck1.vf.a(1); +pck2.ve := 6; +pck2.ve := 7; +raise info '%, %, %, %, %, %', pck1.va, pck1.vb, pck1.vc, pck1.vd, pck1.vf, ve; +end; +end pck2; +/ +call pck2.p1(); + +create or replace package body pck2 is +procedure p1 as +begin +select 11,22 into pck1.va; +select 33 into pck1.va.a; +select 11 into pck1.vb(1); +select 22 into pck1.vb(2); +select 33 into pck1.vc(1); +select 44 into pck1.vc(2); +select 55 into pck1.vd; +select 66 into pck1.vd; +select 77 into pck1.vf.a(1); +select 77 into pck2.ve; +select 88 into pck2.ve; +raise info '%, %, %, %, %,%', pck1.va, pck1.vb, pck1.vc, pck1.vd, pck1.vf, ve; +end; +end pck2; +/ +call pck2.p1(); + +DROP PACKAGE pck2; +DROP PACKAGE pck1; + +--test 跨schema.pkg.val +create or replace package pkg_val_1.pck1 is +type r1 is record (a int, b int); +type r2 is varray(10) of int; +type r3 is table of int; +type r4 is record (a r2); +va r1; +vb r2; +vc r3; +vd int; +vf r4; +end pck1; +/ +create or replace package body pkg_val_1.pck1 is +end pck1; +/ + +create or replace package pck2 is +ve int; +procedure p1; +end pck2; +/ +create or replace package body pck2 is +procedure p1 as +begin +pkg_val_1.pck1.va := (1,2); +pkg_val_1.pck1.va := (3,4); +pkg_val_1.pck1.va.a := 5; +pkg_val_1.pck1.vb(1) := 1; +pkg_val_1.pck1.vb(2) := pkg_val_1.pck1.vb(1) + 11; +pkg_val_1.pck1.vc(1) := 1; +pkg_val_1.pck1.vc(2) := 2 + pkg_val_1.pck1.vc(1); +pkg_val_1.pck1.vd := 4; +pkg_val_1.pck1.vd := 5; +pkg_val_1.pck1.vf.a(1) := 11; +pkg_val_2.pck2.ve := 6; +pkg_val_2.pck2.ve := 7; +raise info '%, %, %, %, %, %', pkg_val_1.pck1.va, pkg_val_1.pck1.vb, pkg_val_1.pck1.vc, pkg_val_1.pck1.vd, pkg_val_1.pck1.vf, ve; +end; +end pck2; +/ +call pck2.p1(); + +create or replace package body pck2 is +procedure p1 as +begin +select 11,22 into pkg_val_1.pck1.va; +--select 33 into pkg_val_1.pck1.va.a; not support now +select 11 into pkg_val_1.pck1.vb(1); +select 22 into pkg_val_1.pck1.vb(2); +select 33 into pkg_val_1.pck1.vc(1); +select 44 into pkg_val_1.pck1.vc(2); +select 55 into pkg_val_1.pck1.vd; +select 66 into pkg_val_1.pck1.vd; +select 77 into pkg_val_2.pck2.ve; +select 88 into pkg_val_2.pck2.ve; +raise info '%, %, %, %, %', pkg_val_1.pck1.va, pkg_val_1.pck1.vb, pkg_val_1.pck1.vc, pkg_val_1.pck1.vd, ve; +end; +end pck2; +/ +call pck2.p1(); + +DROP PACKAGE pck2; +DROP PACKAGE pkg_val_1.pck1; + +--test pkg.array.extend +create or replace package pck1 is +type ta is varray(10) of varchar(100); +tb ta; +end pck1; +/ + +create or replace package pck2 is +procedure proc1; +end pck2; +/ +create or replace package body pck2 is +procedure proc1() is +begin +pck1.tb.delete; +end; +end pck2; +/ + +DROP PACKAGE pck2; +DROP PACKAGE pck1; + +--test 跨packgae cursor +DROP TABLE if exists test_1; +create table test_1(a int, b int); +insert into test_1 values(11,22); + +create or replace package pck1 is +cursor c1 is select * from test_1; +end pck1; +/ + +create or replace package pck2 is +procedure p1; +end pck2; +/ +create or replace package body pck2 is +procedure p1 as +type r1 is record (a int, b int); +va r1; +begin +open pck1.c1; +fetch pck1.c1 into va; +raise info '%',va; +end; +end pck2; +/ +call pck2.p1(); + +DROP PACKAGE pck2; +DROP PACKAGE pck1; +DROP TABLE test_1; + +--test pkg.row.col引用 +create or replace package pck1 is +type r1 is record (a int, b int); +va r1; +end pck1; +/ + +create or replace package pck2 is +procedure p1; +end pck2; +/ +create or replace package body pck2 is +procedure p1 as +begin +pck1.va.a := 1; +pck1.va := (1,2); +pck1.va.a := pck1.va.b + 1; +pck1.va.a := pck1.va.a + 1; +raise info '%,', pck1.va; +end; +end pck2; +/ + +call pck2.p1(); + +DROP PACKAGE pck2; +DROP PACKAGE pck1; + +--test table var index by varchar2 +create or replace package pck1_zjc is + TYPE SalTabTyp is TABLE OF integer index by varchar(10); + aa SalTabTyp; +end pck1_zjc; +/ +declare + a integer; +begin + pck1_zjc.aa('a') = 1; + pck1_zjc.aa('b') = 2; + pck1_zjc.aa('c') = pck1_zjc.aa('a') + pck1_zjc.aa('b'); + RAISE INFO '%', pck1_zjc.aa; +end; +/ + +DROP PACKAGE pck1_zjc; + +--test table var index by varchar2 with different schema +create or replace package pkg_val_1.pck1_zjc is + TYPE SalTabTyp is TABLE OF integer index by varchar(10); + aa SalTabTyp; +end pck1_zjc; +/ +declare + a integer; +begin + pkg_val_1.pck1_zjc.aa('a') = 1; + pkg_val_1.pck1_zjc.aa('b') = 2; + pkg_val_1.pck1_zjc.aa('c') = pkg_val_1.pck1_zjc.aa('a') + pkg_val_1.pck1_zjc.aa('b'); + RAISE INFO '%', pkg_val_1.pck1_zjc.aa; +end; +/ + +DROP PACKAGE pkg_val_1.pck1_zjc; + +--test for table of multiset +create or replace package pck1_zjc is + TYPE SalTabTyp is TABLE OF integer; + aa SalTabTyp; + bb SalTabTyp; +end pck1_zjc; +/ + +declare + a integer; +begin + pck1_zjc.aa(0) = 1; + pck1_zjc.aa(2) = 2; + pck1_zjc.bb(0) = 2; + pck1_zjc.bb(1) = NULL; + pck1_zjc.aa = pck1_zjc.aa multiset union pck1_zjc.bb; + RAISE INFO '%', pck1_zjc.aa; + pck1_zjc.aa = pck1_zjc.aa multiset union distinct pck1_zjc.bb; + RAISE INFO '%', pck1_zjc.aa; + pck1_zjc.aa = pck1_zjc.aa multiset intersect pck1_zjc.bb; + RAISE INFO '%', pck1_zjc.aa; + pck1_zjc.aa = pck1_zjc.aa multiset intersect distinct pck1_zjc.bb; + RAISE INFO '%', pck1_zjc.aa; + pck1_zjc.aa = pck1_zjc.aa multiset except pck1_zjc.bb; + RAISE INFO '%', pck1_zjc.aa; + pck1_zjc.aa = pck1_zjc.aa multiset except distinct pck1_zjc.bb; + RAISE INFO '%', pck1_zjc.aa; +end; +/ + +DROP package pck1_zjc; + +--test for table of multiset:record of table +create or replace package pck1_zjc is + TYPE SalTabTyp is TABLE OF integer; + TYPE r1 is record (a SalTabTyp); + aa r1; + bb r1; +end pck1_zjc; +/ + +declare + a integer; + begin + pck1_zjc.aa.a(0) = 1; + pck1_zjc.aa.a(2) = 2; + pck1_zjc.bb.a(0) = 2; + pck1_zjc.bb.a(1) = NULL; + pck1_zjc.aa.a = pck1_zjc.aa.a multiset union pck1_zjc.bb.a; + RAISE INFO '%', pck1_zjc.aa; + pck1_zjc.aa.a = pck1_zjc.aa.a multiset union distinct pck1_zjc.bb.a; + RAISE INFO '%', pck1_zjc.aa.a; + pck1_zjc.aa.a = pck1_zjc.aa.a multiset intersect pck1_zjc.bb.a; + RAISE INFO '%', pck1_zjc.aa.a; + pck1_zjc.aa.a = pck1_zjc.aa.a multiset intersect distinct pck1_zjc.bb.a; + RAISE INFO '%', pck1_zjc.aa.a; + pck1_zjc.aa.a = pck1_zjc.aa.a multiset except pck1_zjc.bb.a; + RAISE INFO '%', pck1_zjc.aa.a; + pck1_zjc.aa.a = pck1_zjc.aa.a multiset except distinct pck1_zjc.bb.a; + RAISE INFO '%', pck1_zjc.aa.a; +end; +/ + +DROP package pck1_zjc; + +--test for table of multiset:跨schema +create or replace package pkg_val_1.pck1_zjc is + TYPE SalTabTyp is TABLE OF integer; + aa SalTabTyp; + bb SalTabTyp; +end pck1_zjc; +/ + +declare + a integer; + begin + pkg_val_1.pck1_zjc.aa(0) = 1; + pkg_val_1.pck1_zjc.aa(2) = 2; + pkg_val_1.pck1_zjc.bb(0) = 2; + pkg_val_1.pck1_zjc.bb(1) = NULL; + pkg_val_1.pck1_zjc.aa = pkg_val_1.pck1_zjc.aa multiset union pkg_val_1.pck1_zjc.bb; + RAISE INFO '%', pkg_val_1.pck1_zjc.aa; + pkg_val_1.pck1_zjc.aa = pkg_val_1.pck1_zjc.aa multiset union distinct pkg_val_1.pck1_zjc.bb; + RAISE INFO '%', pkg_val_1.pck1_zjc.aa; + pkg_val_1.pck1_zjc.aa = pkg_val_1.pck1_zjc.aa multiset intersect pkg_val_1.pck1_zjc.bb; + RAISE INFO '%', pkg_val_1.pck1_zjc.aa; + pkg_val_1.pck1_zjc.aa = pkg_val_1.pck1_zjc.aa multiset intersect distinct pkg_val_1.pck1_zjc.bb; + RAISE INFO '%', pkg_val_1.pck1_zjc.aa; + pkg_val_1.pck1_zjc.aa = pkg_val_1.pck1_zjc.aa multiset except pkg_val_1.pck1_zjc.bb; + RAISE INFO '%', pkg_val_1.pck1_zjc.aa; + pkg_val_1.pck1_zjc.aa = pkg_val_1.pck1_zjc.aa multiset except distinct pkg_val_1.pck1_zjc.bb; + RAISE INFO '%', pkg_val_1.pck1_zjc.aa; +end; +/ + +DROP package pkg_val_1.pck1_zjc; + +--test record of table +declare + TYPE SalTabTyp is TABLE OF integer; + TYPE r1 is record (a SalTabTyp); + aa r1; + bb r1; + begin + aa.a(0) = 1; + aa.a(2) = 2; + bb.a(0) = 2; + bb.a(1) = NULL; + aa.a = aa.a multiset union bb.a; + RAISE INFO '%', aa; + aa.a = aa.a multiset union distinct bb.a; + RAISE INFO '%', aa.a; + aa.a = aa.a multiset intersect bb.a; + RAISE INFO '%', aa.a; + aa.a = aa.a multiset intersect distinct bb.a; + RAISE INFO '%', aa.a; + aa.a = aa.a multiset except bb.a; + RAISE INFO '%', aa.a; + aa.a = aa.a multiset except distinct bb.a; + RAISE INFO '%', aa.a; +end; +/ + +--test record of record of table : not support yet +-- create or replace procedure pro1 is +-- TYPE SalTabTyp is TABLE OF integer; +-- TYPE r1 is record (a SalTabTyp); +-- TYPE r2 is record (a r1); +-- aa r2; +-- bb r2; +-- begin +-- aa.a.a(0) = 1; +-- aa.a.a(2) = 2; +-- bb.a.a(0) = 2; +-- bb.a.a(1) = NULL; +-- aa.a.a = aa.a.a multiset union bb.a.a; +-- RAISE INFO '%', aa.a.a; +-- aa.a.a = aa.a.a multiset union distinct bb.a.a; +-- RAISE INFO '%', aa.a.a; +-- aa.a.a = aa.a.a multiset intersect bb.a.a; +-- RAISE INFO '%', aa.a.a; +-- aa.a.a = aa.a.a multiset intersect distinct bb.a.a; +-- RAISE INFO '%', aa.a.a; +-- aa.a.a = aa.a.a multiset except bb.a.a; +-- RAISE INFO '%', aa.a.a; +-- aa.a.a = aa.a.a multiset except distinct bb.a.a; +-- RAISE INFO '%', aa.a.a; +-- end; +-- / + +--test package constant variable +create or replace package pck1 is + va constant int := 1; +end pck1; +/ + +declare +vb int; +begin +vb := 2; +pck1.va := vb; +end; +/ +DROP package pck1; + +--test error message when not found variable +create or replace package pck1 is + va constant int := 1; +end pck1; +/ + +declare +vb int; +begin +vb := 2; +pck1.vb := vb; +end; +/ + +declare +vb int; +begin +vb := 2; +pck2.vb := vb; +end; +/ + +DROP package pck1; + +--test 嵌套引用package变量 +create or replace package pck1 is +type a is record(a1 varchar2); +type b is record(b1 a,b2 varchar2); +vb b; +end pck1; +/ +--2.跨包record类型嵌套 +create or replace package pck2 is +procedure proc1(); +end pck2; +/ +create or replace package body pck2 is +procedure proc1() as +P1 varchar2; +begin +pck1.vb.b1.a1 :='abc'; +P1 :=pck1.vb.b1.a1; +raise info '%', P1; +end; +end pck2; +/ + +call pck2.proc1(); + +DROP PACKAGE pck2; +DROP PACKAGE pck1; +--test procedure param duplicate with package +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace package pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(r1 int, r2 int, r3 int, r4 int, ve int); +end pck1; +/ +DROP package pck1; + +--test procedure var duplicate with package public +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace package pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(a int); + procedure p2(a int); +end pck1; +/ + +create or replace package body pck1 is + procedure p1(a int) is + va int; + vb int; + vc int; + vd int; + ve int; + vf int; + r1 int; + r2 int; + r3 int; + r4 int; + begin + va := a; + vb := va + 1; + vc := vb + 1; + vd := vc + 1; + ve := vd + 1; + vf := ve + 1; + r1 := ve + 1; + r2 := r1 + 1; + r3 := r2 + 1; + r4 := r3 + 1; + raise info '%, %, %, %, %, %, %, %, %, %', va,vb,vc,vd,ve,vf,r1,r2,r3,r4; + end; + + procedure p2(a int) is + val1 r1; + val2 r2; + val3 r3; + begin + va := (1 , 2); + vb := array[3,4,5]; + vc := array[7,8,9]; + val1 := va; + val2 := vb; + val3 := vc; + raise info '%, %, %, %, %, %', va,vb,vc,val1,val2,val3; + end; +end pck1; +/ + +call pck1.p1(10); +call pck1.p2(10); +DROP package pck1; + +--test procedure var duplicate with package private +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace package pck1 is + procedure p1(a int); + procedure p2(a int); +end pck1; +/ + +create or replace package body pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(a int) is + va int; + vb int; + vc int; + vd int; + ve int; + vf int; + r1 int; + r2 int; + r3 int; + r4 int; + begin + va := a; + vb := va + 1; + vc := vb + 1; + vd := vc + 1; + ve := vd + 1; + vf := ve + 1; + r1 := ve + 1; + r2 := r1 + 1; + r3 := r2 + 1; + r4 := r3 + 1; + raise info '%, %, %, %, %, %, %, %, %, %', va,vb,vc,vd,ve,vf,r1,r2,r3,r4; + end; + + procedure p2(a int) is + val1 r1; + val2 r2; + val3 r3; + begin + va := (1 , 2); + vb := array[3,4,5]; + vc := array[7,8,9]; + val1 := va; + val2 := vb; + val3 := vc; + raise info '%, %, %, %, %, %', va,vb,vc,val1,val2,val3; + end; +end pck1; +/ + +call pck1.p1(10); +call pck1.p2(10); +DROP package pck1; + +--test procedure type duplicate with package public +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace package pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(a int); + procedure p2(a int); +end pck1; +/ + +create or replace package body pck1 is + procedure p1(a int) is + type r1 is record (a int := 1, b int := 2); + type r2 is record (a int := 3, b int := 4); + type r3 is record (a int := 7, b int := 6); + type r4 is record (a int := 9, b int := 8); + type va is record (a int := 11, b int := 10); + type vb is record (a int := 13, b int := 12); + type vc is record (a int := 15, b int := 14); + type vd is record (a int := 17, b int := 16); + type ve is record (a int := 19, b int := 18); + type vf is record (a int := 21, b int := 20); + val1 r1; + val2 r2; + val3 r3; + val4 r4; + val5 va; + val6 vb; + val7 vc; + val8 vd; + val9 ve; + val10 vf; + begin + raise info '%, %, %, %, %, %, %, %, %, %', val1,val2,val3,val4,val5,val6,val7,val8,val9,val10; + end; + + procedure p2(a int) is + val1 r1; + val2 r2; + val3 r3; + begin + va := (1 , 2); + vb := array[3,4,5]; + vc := array[7,8,9]; + val1 := va; + val2 := vb; + val3 := vc; + raise info '%, %, %, %, %, %', va,vb,vc,val1,val2,val3; + end; +end pck1; +/ + +call pck1.p1(10); +call pck1.p2(10); +DROP package pck1; + +--test procedure type duplicate with package private +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace package pck1 is + procedure p1(a int); + procedure p2(a int); +end pck1; +/ + +create or replace package body pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(a int) is + type r1 is record (a int := 1, b int := 2); + type r2 is record (a int := 3, b int := 4); + type r3 is record (a int := 7, b int := 6); + type r4 is record (a int := 9, b int := 8); + type va is record (a int := 11, b int := 10); + type vb is record (a int := 13, b int := 12); + type vc is record (a int := 15, b int := 14); + type vd is record (a int := 17, b int := 16); + type ve is record (a int := 19, b int := 18); + type vf is record (a int := 21, b int := 20); + val1 r1; + val2 r2; + val3 r3; + val4 r4; + val5 va; + val6 vb; + val7 vc; + val8 vd; + val9 ve; + val10 vf; + begin + raise info '%, %, %, %, %, %, %, %, %, %', val1,val2,val3,val4,val5,val6,val7,val8,val9,val10; + end; + + procedure p2(a int) is + val1 r1; + val2 r2; + val3 r3; + begin + va := (1 , 2); + vb := array[3,4,5]; + vc := array[7,8,9]; + val1 := va; + val2 := vb; + val3 := vc; + raise info '%, %, %, %, %, %', va,vb,vc,val1,val2,val3; + end; +end pck1; +/ + +call pck1.p1(10); +call pck1.p2(10); +DROP package pck1; + +--test public var duplicated with private var +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace package pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(a int); +end pck1; +/ + +create or replace package body pck1 is + va int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + vb int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + ve int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + vf int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + r1 int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + r2 int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + r3 int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + r4 int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + TYPE va is table of int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + TYPE r2 is table of int; + procedure p1(a int) is + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + va int; + procedure p1(a int) is + va int; + begin + NULL; + end; +end pck1; +/ + +DROP package pck1; + +--test procedure dulpicalte wite itself +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace package pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(a int); +end pck1; +/ + +create or replace package body pck1 is + procedure p1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + va int; + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + procedure p1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + vb int; + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + procedure p1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r1 int; + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + procedure p1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r2 int; + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + procedure p1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r4 int; + begin + NULL; + end; +end pck1; +/ + +DROP package pck1; + +--test procedure duplicate +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + va int; +begin + NULL; +end; +/ + +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + vb int; +begin + NULL; +end; +/ + +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vd r4; + vf int; + cursor ve is select * from test_t1; + vd int; +begin + NULL; +end; +/ + +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + ve int; +begin + NULL; +end; +/ + +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r1 int; +begin + NULL; +end; +/ + +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r2 int; +begin + NULL; +end; +/ + +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r3 int; +begin + NULL; +end; +/ + +create or replace procedure pro1(a int) is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + r4 int; +begin + NULL; +end; +/ + +DROP procedure pro1(int); +DROP table test_t1; + +-- test use type before define +DROP TABLE if exists test_t1; +create table test_t1(a int, b int); +create or replace package pck1 is + type r1 is record (a int, b int); + type r2 is varray(10) of int; + type r3 is table of int; + type r4 is ref cursor; + va r1; + vb r2; + vc r3; + vf int; + cursor ve is select * from test_t1; + procedure p1(a int); +end pck1; +/ + +create or replace package body pck1 is + procedure p1(a int) is + va r1; + r1 int; + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + procedure p1(a int) is + va r1; + type r1 is record (a int, b int); + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + procedure p1(a int) is + va r2; + r2 int; + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + procedure p1(a int) is + va r3; + r3 int; + begin + NULL; + end; +end pck1; +/ + +create or replace package body pck1 is + procedure p1(a int) is + va r4; + r4 int; + begin + NULL; + end; +end pck1; +/ + +DROP package pck1; +DROP table test_t1; + +-- test duplicated with used package variable +create or replace package pck1 is +TYPE r1 is record (a int, c varchar2(10), d int); +TYPE r2 is varray(10) of int; +va r1; +vb r2; +vc int; +procedure p1; +end pck1; +/ + +create or replace package body pck1 is +procedure p1 is +vd int := va; +va int; +begin +end; +end pck1; +/ + +create or replace package body pck1 is +procedure p1 is +vd r2 := vb; +vb int; +begin +end; +end pck1; +/ + +create or replace package body pck1 is +procedure p1 is +vd int := vc; +vc int; +begin +end; +end pck1; +/ + +DROP PACKAGE pck1; + +--test var name same with type name +create type o1 as (a int, b int); +create or replace package pck1 is +o1 o1; +end pck1; +/ + +create or replace package pck1 is +type o1 is varray(10) of o1; +end pck1; +/ + +create or replace package pck1 is +type o1 is table of o1; +end pck1; +/ + +create or replace package pck1 is +TYPE r1 is record (a int, c varchar2(10), d int); +TYPE r2 is varray(10) of int; +TYPE r3 is table of int; +procedure p1; +end pck1; +/ + +create or replace package body pck1 is +procedure p1 is +r1 r1; +begin +end; +end pck1; +/ + +create or replace package body pck1 is +procedure p1 is +r2 r2; +begin +end; +end pck1; +/ + +create or replace package body pck1 is +procedure p1 is +r3 r3; +begin +end; +end pck1; +/ + +create or replace package body pck1 is +procedure p1 is +r2 r3; +begin +end; +end pck1; +/ + +create or replace procedure pp1 is +type r1 is record (a int, b int); +r1 r1; +begin +null; +end; +/ + +create or replace procedure pp1 is +type r1 is varray(10) of int; +r1 r1; +begin +null; +end; +/ + +create or replace procedure pp1 is +type r1 is table of int; +r1 r1; +begin +null; +end; +/ + +create or replace procedure pp1 is +type r1 is table of int; +r2 r1; +begin +null; +end; +/ + +DROP procedure pp1; +DROP PACKAGE pck1; +DROP TYPE o1 cascade; + +-- test row.col%TYPE as procedure param type +-- (1) va.a%TYPE +create or replace package pck2 is +type ta is record(a int, b int); +va ta; +procedure p1(v1 in va.a%type); +end pck2; +/ + +create or replace package body pck2 is +procedure p1(v1 in va.a%type) is +begin +raise info '%', v1; +end; +end pck2; +/ + +call pck2.p1(11); +DROP package pck2; + +--(2) private va.a%TYPE +create or replace package pck2 is +type ta is record(a int, b int); +procedure p1; +end pck2; +/ + +create or replace package body pck2 is +va ta; + +procedure p2(v1 in va.a%type) is +begin +raise info '%', v1; +end; + +procedure p1 is +begin +p2(11); +end; + +end pck2; +/ + +call pck2.p1(); +DROP package pck2; + +-- (3) pck.va.a%TYPE +create or replace package pck1 is +type ta is record (a int); +va ta; +end pck1; +/ + +create or replace package pck2 is +type ta is record(a int, b int); +procedure p1(v1 in pck1.va.a%type); +end pck2; +/ + +create or replace package body pck2 is +procedure p1(v1 in pck1.va.a%type) is +begin +raise info '%', v1; +end; +end pck2; +/ + +call pck2.p1(11); +DROP package pck2; +DROP package pck1; + +-- (4) schema.pkg.row.col%TYPE +create or replace package pkg_val_1.pck1 is +type ta is record (a int); +va ta; +end pck1; +/ + +create or replace package pck2 is +type ta is record(a int, b int); +procedure p1(v1 in pkg_val_1.pck1.va.a%type); +end pck2; +/ + +create or replace package body pck2 is +procedure p1(v1 in pkg_val_1.pck1.va.a%type) is +begin +raise info '%', v1; +end; +end pck2; +/ + +call pck2.p1(11); +DROP package pck2; +DROP package pkg_val_1.pck1; + +--test pkg.val.col%TYPE +create or replace package pck1 as +type t1 is record(c1 int,c2 int); +va t1; +end pck1; +/ + +create or replace package pck2 as +vb pck1.va.c1%type; +end pck2; +/ + +DROP PACKAGE pck2; +DROP PACKAGE pck1; + +-- not allow declare ref cursor type variable in package +drop package if exists pck1; +create or replace package pck1 as +type t1 is ref cursor; +v1 t1; +end pck1; +/ +create or replace package pck1 as +type t1 is ref cursor; +-- v1 t1; +end pck1; +/ +create or replace package body pck1 as +type t2 is ref cursor; +v1 t2; +end pck1; +/ + +drop package if exists pck1; +create or replace package pck1 as +type t3 is ref cursor; +v3 pkg_val_2.pck1.t3; +end pck1; +/ +create or replace package pck1 as +type t3 is ref cursor; +-- v3 pkg_val_2.pck1.t3; +end pck1; +/ +create or replace package body pck1 as +type t4 is ref cursor; +v4 pkg_val_2.pck1.t4; +end pck1; +/ +drop package if exists pck1; + +-- test select into error in for in loop condition +create table tab1(a int,b int); +create or replace package pck1 is +procedure p1; +end pck1; +/ +create or replace package body pck1 is +procedure p1 is +v1 tab1%rowtype; +begin +for rec in (select a,b into v1 from tab1) loop +end loop; +end; +end pck1; +/ + +create or replace package pck1 as +func int; +function func() return int; +end pck1; +/ + +create or replace package pck1 as +func int; +procedure func(); +end pck1; +/ + +create or replace package pck1 as +func constant int; +procedure func(); +end pck1; +/ + +create or replace package pck1 as +type arr is varray(10) of int; +func arr; +procedure func(); +end pck1; +/ + +DROP PACKAGE pck1; +DROP TABLE tab1; +-- test 4 word parse now +-- (1) schema.pkg.row.col +create or replace package pkg_val_1.pck1 is +type r1 is record (a int, b int); +va r1; +end pck1; +/ + +create or replace package pck2 is +procedure p1(); +end pck2; +/ +create or replace package body pck2 is +procedure p1() is +v1 int; +begin +select 1 into pkg_val_1.pck1.va.a; +v1 := pkg_val_1.pck1.va.a; +pkg_val_1.pck1.va.b := 11; +raise info '%, %', v1, pkg_val_1.pck1.va; +end; +end pck2; +/ + +call pck2.p1(); + +DROP PACKAGE pck2; +DROP PACKAGE pkg_val_1.pck1; + +-- (2) schema.pkg.array.next +create or replace package pkg_val_1.pck1 is +type t1 is varray(10) of int; +va t1; +type t2 is table of int index by varchar2(10); +vb t2; +end pck1; +/ + +create or replace package pck2 is +procedure p1(); +end pck2; +/ +create or replace package body pck2 is +procedure p1() is +begin +pkg_val_1.pck1.va.extend(9); +pkg_val_1.pck1.va(1) := 111; +raise NOTICE '%',pkg_val_1.pck1.va.first; +raise NOTICE '%',pkg_val_1.pck1.va.count(); +pkg_val_1.pck1.va.delete; +raise NOTICE '%',pkg_val_1.pck1.va.first; +raise NOTICE '%',pkg_val_1.pck1.va.count(); + +pkg_val_1.pck1.vb('aa') := 222; +raise NOTICE '%',pkg_val_1.pck1.vb.first; +raise NOTICE '%',pkg_val_1.pck1.vb.count(); +pkg_val_1.pck1.vb.delete; +raise NOTICE '%',pkg_val_1.pck1.vb.first; +raise NOTICE '%',pkg_val_1.pck1.vb.count(); + +end; +end pck2; +/ + +call pck2.p1(); + +DROP PACKAGE pck2; +DROP PACKAGE pkg_val_1.pck1; + +-- (3) pkg.row.col1.col2 +create or replace package pck1 is +type r1 is record (a int, b int); +type r2 is record (a r1); +va r2; +end pck1; +/ + +create or replace package pck2 is +procedure p1(); +end pck2; +/ +create or replace package body pck2 is +procedure p1() is +v1 int; +begin +select 1 into pck1.va.a.a; +v1 := pck1.va.a.a; +pck1.va.a.b := 11; +raise info '%, %', v1, pck1.va.a; +end; +end pck2; +/ + +call pck2.p1(); + +DROP PACKAGE pck2; +DROP PACKAGE pck1; + +-- (4) pkg.row.col.extend +create or replace package pck1 is +type t1 is varray(10) of int; +type t2 is table of int index by varchar2(10); +type r1 is record(a t1, b t2); +va r1; +end pck1; +/ + +create or replace package pck2 is +procedure p1(); +end pck2; +/ +create or replace package body pck2 is +procedure p1() is +begin +pck1.va.a.extend(9); +pck1.va.a(1) := 111; +raise NOTICE '%',pck1.va.a.first; +raise NOTICE '%',pck1.va.a.count(); +pck1.va.a.delete; +raise NOTICE '%',pck1.va.a.first; +raise NOTICE '%',pck1.va.a.count(); + +pck1.va.b('aa') := 222; +raise NOTICE '%', pck1.va.b.first; +raise NOTICE '%', pck1.va.b.count(); +pck1.va.b.delete; +raise NOTICE '%', pck1.va.b.first; +raise NOTICE '%', pck1.va.b.count(); + +end; +end pck2; +/ + +call pck2.p1(); + +DROP PACKAGE pck2; +DROP PACKAGE pck1; + +-- (5) row.col1.col2.col3 +create or replace package pck2 is +procedure p1(); +end pck2; +/ +create or replace package body pck2 is +procedure p1() is +TYPE r1 is record (a int, b int); +TYPE r2 is record (a r1, b int); +TYPE r3 is record (a r2, b int); +v1 int; +va r3; +begin +select 1 into va.a.a.a; +v1 := va.a.a.a; +va.a.a.b := 11; +raise info '%, %', v1, va.a.a; +end; +end pck2; +/ + +call pck2.p1(); + +DROP PACKAGE pck2; + +--test package variable as default value +create or replace package pck1 as +type t1 is record(c1 int,c2 varchar2); +v1 t1 := (1,'a'); +procedure p1; +end pck1; +/ + +create or replace package body pck1 as +procedure p1 is +begin +v1 := (2, 'b'); +end; +end pck1; +/ + +create or replace package pck2 as +v2 int := pck1.v1.c1; +end pck2; +/ + +declare +a int; +begin +a := pck2.v2; +raise info '%', a; +a := pck1.v1.c1; +raise info '%', a; +pck1.p1(); +a := pck2.v2; +raise info '%', a; +a := pck1.v1.c1; +raise info '%', a; +end; +/ + +DROP PACKAGE pck2; +DROP PACKAGE pck1; + +--test package self with schema +create table pkg_val_2.t1(a int, b int); +create or replace package pkg_val_2.pck2 is +va int; +vb int; +procedure p1; +end pck2; +/ +create or replace package body pkg_val_2.pck2 is +procedure p1 is +cursor cur1 is select * from t1 where a between pkg_val_2.pck2.va and pkg_val_2.pck2.vb; +begin +pkg_val_2.pck2.va := 1; +raise info '%', pkg_val_2.pck2.va; +end; +end pck2; +/ + +call pkg_val_2.pck2.p1(); + +DROP PACKAGE pkg_val_2.pck2; +DROP TABLE pkg_val_2.t1; + +-- test package duplicate name +create or replace package pkg_val_1.pckg_test1 as +var varchar2 :='abc'; +var2 int :=4; +procedure p1(c1 int,c2 out varchar2); +end pckg_test1; +/ +create or replace package body pkg_val_1.pckg_test1 as +procedure p1(c1 int,c2 out varchar2) as +begin +c2 :=var||c1; +end; +end pckg_test1; +/ + +create or replace package pkg_val_2.pckg_test1 as +var2 varchar2; +procedure p1(); +end pckg_test1; +/ +create or replace package body pkg_val_2.pckg_test1 as +procedure p1() as +begin +pkg_val_1.pckg_test1.p1(pkg_val_1.pckg_test1.var2,var2); +raise info 'var2:%' ,var2; +end; +end pckg_test1; +/ +call pkg_val_2.pckg_test1.p1(); + +DROP PACKAGE pkg_val_2.pckg_test1; +DROP PACKAGE pkg_val_1.pckg_test1; + +-- 1. package variable as => out param +drop package if exists pkg_val_1.pckg_test1; +create or replace package pkg_val_1.pckg_test1 as +var varchar2 :='abc'; +procedure p1(c1 int,c2 out varchar2); +end pckg_test1; +/ +create or replace package body pkg_val_1.pckg_test1 as +procedure p1(c1 int,c2 out varchar2) as +begin +c2 :=var||c1; +end; +end pckg_test1; +/ + +drop package if exists pkg_val_2.pckg_test1; +create or replace package pkg_val_2.pckg_test1 as + +procedure p1(t1 int ,t2 out varchar2); +var2 varchar2; +end pckg_test1; +/ +create or replace package body pkg_val_2.pckg_test1 as +procedure p1(t1 int ,t2 out varchar2) as +begin +pkg_val_1.pckg_test1.p1(c1 => t1,c2 => pkg_val_1.pckg_test1.var); +raise info '%', pkg_val_1.pckg_test1.var; +end; +end pckg_test1; +/ +call pkg_val_2.pckg_test1.p1(3,''); + +drop package if exists pkg_val_1.pckg_test1; +drop package if exists pkg_val_2.pckg_test1; + +-- 2. package varibal as out param +create or replace package pkg_val_1.pckg_test1 as +var varchar2 :='abc'; +procedure p1(c1 int,c2 out varchar2); +end pckg_test1; +/ +create or replace package body pkg_val_1.pckg_test1 as +procedure p1(c1 int,c2 out varchar2) as +begin +c2 :=var||c1; +end; +end pckg_test1; +/ + +create or replace package pkg_val_2.pckg_test1 as + +procedure p1(t1 int ,t2 out varchar2); +var2 varchar2; +end pckg_test1; +/ +create or replace package body pkg_val_2.pckg_test1 as +procedure p1(t1 int ,t2 out varchar2) as +begin +pkg_val_1.pckg_test1.p1(t1,pkg_val_1.pckg_test1.var); +raise info '%', pkg_val_1.pckg_test1.var; +end; +end pckg_test1; +/ +call pkg_val_2.pckg_test1.p1(3,''); +create schema ss1; +create or replace package ss1.pkg8 is + progname varchar2(60); + workdate_bpc varchar2(10); +end pkg8; +/ + +create or replace package pkg7 is + var1 int:=1; +type t_pfxp_athfcdtl is record (dapcode int); +procedure proc1(); +end pkg7; +/ + +create table testtab (a int, b varchar2(10)); + +create or replace package body pkg7 is + procedure proc1() is + v_pfxp_athfcdtl t_pfxp_athfcdtl; + cursor cur_pfxp_athfcdtl is + select a from testtab where b=ss1.pkg8.workdate_bpc + order by a; + begin + raise notice 'pkg7'; + end; +end pkg7; +/ + +call pkg7.proc1(); + +drop table testtab; +drop package pkg7; +drop package ss1.pkg8; +drop schema ss1; +drop package if exists pkg_val_1.pckg_test1; +drop package if exists pkg_val_2.pckg_test1; + +--test package cursor +create table t1(a int, b int); +insert into t1 values(1,2); +insert into t1 values(2,4); +insert into t1 values(3,6); +create or replace package pck1 is +cursor c1 is select * from t1; +end pck1; +/ +create or replace package pck2 is +procedure p1; +end pck2; +/ +create or replace package body pck2 is +procedure p1 as +type r1 is record (a int, b int); +va r1; +begin +open pck1.c1; +loop +fetch pck1.c1 into va; +exit when pck1.c1%notfound; +raise info 'va: %',va; +raise info 'rowcount: %', pck1.c1%ROWCOUNT; +raise info 'isopend: %', pck1.c1%isopen; +raise info 'isfound: %', pck1.c1%found; +end loop; +close pck1.c1; +raise info 'isopend: %', pck1.c1%isopen; +end; +end pck2; +/ + +call pck2.p1(); + +DROP PACKAGE pck2; +DROP PACKAGE pck1; +DROP TABLE t1; + +-- test package cursor error +create or replace package pck1 is +c1 int; +end pck1; +/ + +create or replace package pck2 is +procedure p1; +end pck2; +/ +create or replace package body pck2 is +procedure p1 as +type r1 is record (a int, b int); +va r1; +begin +pck1.c1 := 1; +raise info 'rowcount: %', pck1.c1%ROWCOUNT; +raise info 'isopend: %', pck1.c1%isopen; +raise info 'isfound: %', pck1.c1%found; +end; +end pck2; +/ + +create or replace package body pck2 is +procedure p1 as +type r1 is record (a int, b int); +va r1; +begin +pck1.c1 := 1; +raise info 'rowcount: %', pck1.c2%ROWCOUNT; +raise info 'isopend: %', pck1.c2%isopen; +raise info 'isfound: %', pck1.c2%found; +end; +end pck2; +/ + +DROP PACKAGE pck2; +DROP PACKAGE pck1; + +-- test comment errors +CREATE OR REPLACE PROCEDURE test1 +IS /*aa*/ +a INT:=1; /*aa*/ +c INT:=2; /*aa*/ +BEGIN /*aa*/ +IF a<>1 THEN /*aa*/ +c:=3; /*aa*/ +END IF; /*aa*/ +END; /*aa*/ +/ + +CREATE TABLE t1(a INT, b INT); +CREATE OR REPLACE PROCEDURE test2 IS +va INT; +BEGIN +SELECT /* aa */ 1 INTO va; +RAISE INFO '%', va; +INSERT /* aa */ INTO t1 VALUES(3,2); +INSERT /* aa */ INTO t1 VALUES(3,3); +UPDATE /* aa */ t1 SET a = 1 WHERE b =2; +DELETE /* aa */ FROM t1 WHERE a = 1; +END; +/ + +CALL test2(); +SELECT * FROM t1; + +DROP PROCEDURE test1; +DROP PROCEDURE test2; +DROP TABLE t1; + +-- test create matview +drop table if exists materialized_view_tb; +create table materialized_view_tb(c1 int,c2 int); +create or replace package materialized_view_package as +procedure materialized_view_proc(); +end materialized_view_package; +/ +create or replace package body materialized_view_package AS +procedure materialized_view_proc() AS +begin +CREATE MATERIALIZED VIEW my_mv AS SELECT * FROM materialized_view_tb; +INSERT INTO materialized_view_tb VALUES(1,1),(2,2); +REFRESH MATERIALIZED VIEW my_mv; +end; +end materialized_view_package; +/ + +call materialized_view_package.materialized_view_proc(); + +DROP MATERIALIZED VIEW my_mv; +DROP PACKAGE materialized_view_package; +DROP TABLE materialized_view_tb; + +-- test drop package memory leak when ref other package variable +create type rec is (col1 varchar2,col2 varchar2); +create or replace package pckg_test as +type t_arr is table of rec; +type t_arr1 is table of varchar2; +v_arr t_arr; +v_arr1 t_arr1; +v_rec rec; +end pckg_test; +/ +create or replace package pckg_test1 as +procedure proc_test(i_var1 in varchar2,i_var2 in varchar2); +end pckg_test1; +/ + +create or replace package body pckg_test1 as +procedure proc_test(i_var1 in varchar2,i_var2 in varchar2) as +v_var1 varchar2; +begin +pckg_test.v_arr(1) := rec(1,2); +pckg_test.v_arr1(1) := 1; +pckg_test.v_rec.col1 :=1; +end; +end pckg_test1; +/ +call pckg_test1.proc_test('1','1'); + +drop package if exists pckg_test; +drop package if exists pckg_test1; +drop type if exists rec; + +-- test drop loop ref variable package +create or replace package pckg_test1 as +procedure p1; +var varchar2 := 'pck1'; +end pckg_test1; +/ + + +create or replace package pckg_test2 as +procedure pp1; +var2 varchar2 := 'pck2'; +end pckg_test2; +/ + +create or replace package pckg_test3 as +procedure ppp1; +var3 varchar2 := 'pck3'; +end pckg_test3; +/ + +create or replace package body pckg_test1 as +procedure p1 as +begin +raise info '%', pckg_test3.var3; +end; +end pckg_test1; +/ + +create or replace package body pckg_test2 as +procedure pp1 as +begin +raise info '%', pckg_test1.var; +end; +end pckg_test2; +/ + + +create or replace package body pckg_test3 as +procedure ppp1 as +begin +raise info '%', pckg_test2.var2; +end; +end pckg_test3; +/ + +call pckg_test3.ppp1(); +call pckg_test2.pp1(); +call pckg_test1.p1(); + + +drop package if exists pckg_test1; +drop package if exists pckg_test2; +drop package if exists pckg_test3; +-- test schema.pkg.cursor +create table t1 (a int, b int); + +create or replace package pck1 as +cursor c1 for select * from t1; +procedure p1; +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +va t1; +begin +open pkg_val_2.pck1.c1; +close pkg_val_2.pck1.c1; +end; +end pck1; +/ + +DROP PACKAGE pck1; + +-- test auto cursor +create or replace package pck2 as +cursor c1 for select * from t1; +procedure p1; +end pck2; +/ +create or replace package pck1 as +cursor c1 for select * from t1; +procedure p1; +end pck1; +/ + +-- cross package cursor +create or replace package body pck1 as +procedure p1 as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open pck2.c1; +close pck2.c1; +end; +end pck1; +/ + +-- own package cursor +create or replace package body pck1 as +procedure p1 as +PRAGMA AUTONOMOUS_TRANSACTION; +va t1; +begin +open c1; +close c1; +end; +end pck1; +/ + + +DROP PACKAGE pck1; +DROP PACKAGE pck2; +DROP TABLE t1; +reset behavior_compat_options; + +-- ref package cursor attr at first +create table t1(a int, b int); +create or replace package pck1 is +cursor c1 is select * from t1; +end pck1; +/ +begin +raise info 'isopend: %', pck1.c1%isopen; +end +/ +drop package pck1; +drop table t1; + +-- package cursor attribute reset +drop table t1; +create table t1 (a int, b int); +insert into t1 values (1,2); + +create or replace package pck1 as +cursor c1 for select * from t1; +procedure p1; +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +declare +va t1; +begin +raise info 'isopend: %', c1%isopen; +raise info 'rowcount: %', c1%rowcount; +open c1; +fetch c1 into va; +raise info 'va:%', va; +raise info 'isopend: %', c1%isopen; +raise info 'rowcount: %', c1%rowcount; +end; +end pck1; +/ + +call pck1.p1(); +call pck1.p1(); + +drop package pck1; +drop table t1; + +-- package cursor with arguments +drop table t1; +create table t1 (a int, b int); +insert into t1 values (1,2); +insert into t1 values (3,4); +create or replace package pck1 as +cursor c1(va int) for select * from t1 where a < va; +procedure p1; +end pck1; +/ + +create or replace procedure pp1() as +va t1; +begin +fetch pck1.c1 into va; +raise info 'va:%', va; +raise info 'isopend: %', pck1.c1%isopen; +raise info 'rowcount: %', pck1.c1%rowcount; +end; +/ + +create or replace package body pck1 as +procedure p1 as +declare +va t1; +begin +raise info 'isopend: %', c1%isopen; +raise info 'rowcount: %', c1%rowcount; +open pck1.c1(10); +fetch pck1.c1 into va; +raise info 'va:%', va; +raise info 'isopend: %', c1%isopen; +raise info 'rowcount: %', c1%rowcount; +pp1(); +end; +end pck1; +/ + +call pck1.p1(); + +declare +va t1; +begin +open pck1.c1(4); +fetch pck1.c1 into va; +end; +/ + +drop procedure pp1; +drop package pck1; +drop table t1; + +-- clean +DROP SCHEMA IF EXISTS pkg_val_1 CASCADE; +DROP SCHEMA IF EXISTS pkg_val_2 CASCADE; + diff --git a/src/test/regress/sql/hw_partition_add_drop_partition.sql b/src/test/regress/sql/hw_partition_add_drop_partition.sql new file mode 100644 index 000000000..4e87112e5 --- /dev/null +++ b/src/test/regress/sql/hw_partition_add_drop_partition.sql @@ -0,0 +1,514 @@ +DROP SCHEMA hw_partition_add_drop_partition CASCADE; +CREATE SCHEMA hw_partition_add_drop_partition; +SET CURRENT_SCHEMA TO hw_partition_add_drop_partition; + +-- +----range table---- +-- +--prepare +CREATE TABLE range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (time_id) +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01'), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01'), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01'), + PARTITION time_2011 VALUES LESS THAN ('2012-01-01') +); +INSERT INTO range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_sales_idx ON range_sales(product_id) LOCAL; + +--check for add partition +--fail, can not add subpartition on no-subpartitioned table +ALTER TABLE range_sales ADD PARTITION time_temp1 VALUES LESS THAN ('2013-01-01') + ( + SUBPARTITION time_temp1_part1 VALUES LESS THAN (200), + SUBPARTITION time_temp1_part2 VALUES LESS THAN (500), + SUBPARTITION time_temp1_part3 VALUES LESS THAN (800), + SUBPARTITION time_temp1_part4 VALUES LESS THAN (1200) + ); +--fail, out of range +ALTER TABLE range_sales ADD PARTITION time_temp2 VALUES LESS THAN ('2011-06-01'); +--fail, invalid format +ALTER TABLE range_sales ADD PARTITION time_temp3 VALUES ('2013-01-01'); +--success, add 1 partition +ALTER TABLE range_sales ADD PARTITION time_2012 VALUES LESS THAN ('2013-01-01'); +--success, add 1 partition +ALTER TABLE range_sales ADD PARTITION time_end VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_sales ADD PARTITION time_temp4 VALUES LESS THAN ('2014-01-01'); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_sales + +--check for drop partition (for) +--success, drop partition time_2009 +ALTER TABLE range_sales DROP PARTITION time_2009; +--success, drop partition time_2011 +ALTER TABLE range_sales DROP PARTITION FOR ('2011-06-01'); +--fail, invalid type +ALTER TABLE range_sales DROP PARTITION FOR (1); +--fail, number not equal to the number of partkey +ALTER TABLE range_sales DROP PARTITION FOR ('2011-06-01', 1); +--fail, can not drop subpartition on no-subpartition table +ALTER TABLE range_sales DROP SUBPARTITION FOR ('2011-06-01', 1); +--success, drop partition time_2012 +ALTER TABLE range_sales DROP PARTITION FOR ('2011-06-01'); + +--check for ok after drop +SELECT count(*) FROM range_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_sales + +-- +----range table, multiple partkeys---- +-- +--prepare +CREATE TABLE range2_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (time_id, product_id) +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01', 200), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01', 500), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01', 800), + PARTITION time_2011 VALUES LESS THAN ('2012-01-01', 1200) +); +INSERT INTO range2_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range2_sales_idx ON range2_sales(product_id) LOCAL; + +--check for add partition +--fail, can not add subpartition on no-subpartitioned table +ALTER TABLE range2_sales ADD PARTITION time_temp1 VALUES LESS THAN ('2013-01-01', 1500) + ( + SUBPARTITION time_temp1_part1 VALUES LESS THAN (200), + SUBPARTITION time_temp1_part2 VALUES LESS THAN (500), + SUBPARTITION time_temp1_part3 VALUES LESS THAN (800), + SUBPARTITION time_temp1_part4 VALUES LESS THAN (1200) + ); +--fail, out of range +ALTER TABLE range2_sales ADD PARTITION time_temp2 VALUES LESS THAN ('2011-06-01', 100); +--fail, invalid format +ALTER TABLE range2_sales ADD PARTITION time_temp3 VALUES ('2013-01-01', 1500); +--success, add 1 partition +ALTER TABLE range2_sales ADD PARTITION time_2012 VALUES LESS THAN ('2013-01-01', 1500); +--success, add 1 partition +ALTER TABLE range2_sales ADD PARTITION time_end VALUES LESS THAN (MAXVALUE, MAXVALUE); +--fail, out of range +ALTER TABLE range2_sales ADD PARTITION time_temp4 VALUES LESS THAN ('2014-01-01', 2000); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range2_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range2_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range2_sales + +--check for drop partition (for) +--success, drop partition time_2009 +ALTER TABLE range2_sales DROP PARTITION time_2009; +--success, drop partition time_2011 +ALTER TABLE range2_sales DROP PARTITION FOR ('2011-06-01', 600); +--fail, invalid type +ALTER TABLE range2_sales DROP PARTITION FOR (1, 100); +--fail, number not equal to the number of partkey +ALTER TABLE range2_sales DROP PARTITION FOR ('2011-06-01'); +--fail, can not drop subpartition on no-subpartition table +ALTER TABLE range2_sales DROP SUBPARTITION FOR ('2011-06-01', 1); +--success, drop partition time_2012 +ALTER TABLE range2_sales DROP PARTITION FOR ('2011-06-01', 100); + +--check for ok after drop +SELECT count(*) FROM range2_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range2_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range2_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range2_sales + +-- +----interval table---- +-- +--prepare +CREATE TABLE interval_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (time_id) INTERVAL ('1 year') +( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01'), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01'), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01') +); +INSERT INTO interval_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2009-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX interval_sales_idx ON interval_sales(product_id) LOCAL; + +--check for add partition +--fail, can not add subpartition on no-subpartitioned table +ALTER TABLE interval_sales ADD PARTITION time_temp1 VALUES LESS THAN ('2013-01-01') + ( + SUBPARTITION time_temp1_part1 VALUES LESS THAN (200), + SUBPARTITION time_temp1_part2 VALUES LESS THAN (500), + SUBPARTITION time_temp1_part3 VALUES LESS THAN (800), + SUBPARTITION time_temp1_part4 VALUES LESS THAN (1200) + ); +--fail, not support add interval +ALTER TABLE interval_sales ADD PARTITION time_2012 VALUES LESS THAN ('2013-01-01'); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='interval_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='interval_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ interval_sales + +--check for drop partition (for) +--success, drop partition time_2009 +ALTER TABLE interval_sales DROP PARTITION time_2009; +--success, drop partition sys_p1 +ALTER TABLE interval_sales DROP PARTITION FOR ('2011-06-01'); +--fail, invalid type +ALTER TABLE interval_sales DROP PARTITION FOR (1); +--fail, number not equal to the number of partkey +ALTER TABLE interval_sales DROP PARTITION FOR ('2010-06-01', 1); +--fail, can not drop subpartition on no-subpartition table +ALTER TABLE interval_sales DROP SUBPARTITION FOR ('2010-06-01', 1); + +--check for ok after drop +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='interval_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='interval_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ interval_sales + +-- +----list table---- +-- +--prepare +CREATE TABLE list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY LIST (channel_id) +( + PARTITION channel1 VALUES ('0', '1', '2'), + PARTITION channel2 VALUES ('3', '4', '5'), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') +); +INSERT INTO list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_sales_idx ON list_sales(product_id) LOCAL; + +--check for add partition +--fail, can not add subpartition on no-subpartitioned table +ALTER TABLE list_sales ADD PARTITION channel_temp1 VALUES ('X') + ( + SUBPARTITION channel_temp1_part1 VALUES LESS THAN (200), + SUBPARTITION channel_temp1_part2 VALUES LESS THAN (500), + SUBPARTITION channel_temp1_part3 VALUES LESS THAN (800), + SUBPARTITION channel_temp1_part4 VALUES LESS THAN (1200) + ); +--fail, out of range +ALTER TABLE list_sales ADD PARTITION channel_temp2 VALUES ('8', 'X'); +--fail, value conflict +ALTER TABLE list_sales ADD PARTITION channel_temp3 VALUES ('X', 'X', 'Z'); +--fail, invalid format +ALTER TABLE list_sales ADD PARTITION channel_temp4 VALUES LESS THAN('X'); +--success, add 1 partition +ALTER TABLE list_sales ADD PARTITION channel5 VALUES ('X', 'Z'); +--success, add 1 partition +ALTER TABLE list_sales ADD PARTITION channel_default VALUES (DEFAULT); +--fail, out of range +ALTER TABLE list_sales ADD PARTITION channel_temp5 VALUES ('P'); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_sales + +--check for drop partition (for) +--success, drop partition channel2 +ALTER TABLE list_sales DROP PARTITION channel2; +--success, drop partition channel3 +ALTER TABLE list_sales DROP PARTITION FOR ('6'); +--fail, invalid type +ALTER TABLE list_sales DROP PARTITION FOR (10); +--fail, number not equal to the number of partkey +ALTER TABLE list_sales DROP PARTITION FOR ('6', 1); +--fail, can not drop subpartition on no-subpartition table +ALTER TABLE list_sales DROP SUBPARTITION FOR ('6', 1); +--success, drop partition channel_default +ALTER TABLE list_sales DROP PARTITION FOR ('6'); + +--check for ok after drop +SELECT count(*) FROM list_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_sales + +-- +----hash table---- +-- +--prepare +CREATE TABLE hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY HASH (product_id) +( + PARTITION product1, + PARTITION product2, + PARTITION product3, + PARTITION product4 +); +INSERT INTO hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_sales_idx ON hash_sales(product_id) LOCAL; + +--check for add partition +--fail, not support add hash +ALTER TABLE hash_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_part1 VALUES LESS THAN (200), + SUBPARTITION product_temp1_part2 VALUES LESS THAN (500), + SUBPARTITION product_temp1_part3 VALUES LESS THAN (800), + SUBPARTITION product_temp1_part4 VALUES LESS THAN (1200) + ); +--fail, not support add hash +ALTER TABLE hash_sales ADD PARTITION product_temp2; +--fail, invalid format +ALTER TABLE hash_sales ADD PARTITION product_temp3 VALUES LESS THAN('X'); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_sales + +--check for drop partition (for) +--fail, not support drop hash +ALTER TABLE hash_sales DROP PARTITION product2; +--fail, not support drop hash +ALTER TABLE hash_sales DROP PARTITION FOR (0); +--fail, not support drop hash +ALTER TABLE hash_sales DROP PARTITION FOR (0, 0); +--fail, can not drop subpartition on no-subpartition table +ALTER TABLE hash_sales DROP SUBPARTITION FOR(0, 0); +--fail, can not drop subpartition on no-subpartition table +ALTER TABLE hash_sales DROP SUBPARTITION FOR(0); + +--check for ok after drop +SELECT count(*) FROM hash_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_sales + +create table test_range_pt (a int primary key, b int, c int) +partition by range(a) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (maxvalue) +)ENABLE ROW MOVEMENT; + +insert into test_range_pt values(1),(2001); + +create view vp1 as select * from test_range_pt partition for (1); + +alter table test_range_pt drop partition p1; + +create table tt ( a int, b int,c int); + +alter table test_range_pt exchange partition (p1) with table tt update global index; + +drop view vp1; +drop table test_range_pt; +drop table tt; + +--finish +DROP TABLE range_sales; +DROP TABLE range2_sales; +DROP TABLE interval_sales; +DROP TABLE list_sales; +DROP TABLE hash_sales; + +DROP SCHEMA hw_partition_add_drop_partition CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/hw_partition_hash_dql.sql b/src/test/regress/sql/hw_partition_hash_dql.sql index 65e576d44..151ee78de 100644 --- a/src/test/regress/sql/hw_partition_hash_dql.sql +++ b/src/test/regress/sql/hw_partition_hash_dql.sql @@ -1,579 +1,579 @@ - --- ----- test partition for (null) --- - --- 1. test ordinary - -- 1.1 range partitioned table - -- 1.2 interval partitioned table --- 2. test data column of partition key value - -- 2.1 text - -- 2.2 timestamp --- 3. MAXVALUE - -- 3.1 MAXVALUE is first column - -- 3.2 MAXVALUE is second column - -CREATE schema FVT_COMPRESS_QWER; -set search_path to FVT_COMPRESS_QWER; - - --- 1. test ordinary ----- 1.1 range partitioned table -create table test_partition_for_null_hash (a int, b int, c int, d int) -partition by hash (a) -( - partition test_partition_for_null_hash_p1, - partition test_partition_for_null_hash_p2, - partition test_partition_for_null_hash_p3 -); - -insert into test_partition_for_null_hash values (0, 0, 0, 0); -insert into test_partition_for_null_hash values (1, 1, 1, 1); -insert into test_partition_for_null_hash values (5, 5, 5, 5); - --- failed: inserted partition key does not map to any table partition -insert into test_partition_for_null_hash values (null, null, null, null); --- success -insert into test_partition_for_null_hash values (0, null, null, null); - - --- failed: The partition number is invalid or out-of-range -select * from test_partition_for_null_hash partition for (null) order by 1, 2, 3, 4; --- success -select * from test_partition_for_null_hash partition for (0) order by 1, 2, 3, 4; - - --- failed: The partition number is invalid or out-of-range -alter table test_partition_for_null_hash rename partition for (null) to test_partition_for_null_hash_part1; --- success -alter table test_partition_for_null_hash rename partition for (0) to test_partition_for_null_hash_part1; --- success -select * from test_partition_for_null_hash partition (test_partition_for_null_hash_part1) order by 1, 2, 3, 4; - -alter table test_partition_for_null_hash drop partition for (NULL); -alter table test_partition_for_null_hash drop partition for (0); - -CREATE TABLE select_hash_partition_table_000_3( - C_CHAR_1 CHAR(1), - C_CHAR_2 CHAR(10), - C_CHAR_3 CHAR(102400), - C_VARCHAR_1 VARCHAR(1), - C_VARCHAR_2 VARCHAR(10), - C_VARCHAR_3 VARCHAR(1024), - C_INT INTEGER, - C_BIGINT BIGINT, - C_SMALLINT SMALLINT, - C_FLOAT FLOAT, - C_NUMERIC numeric(10,5), - C_DP double precision, - C_DATE DATE, - C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, - C_TS_WITH TIMESTAMP WITH TIME ZONE ) - partition by hash (C_INT) -( - partition select_hash_partition_000_3_1, - partition select_hash_partition_000_3_2 -); - -create index select_list_partition_table_index_000_3 ON select_hash_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_list_partition_000_3_1, partition select_list_partition_000_3_3); -create view select_list_partition_table_view_000_3 as select * from select_hash_partition_table_000_3; - -INSERT INTO select_hash_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); -INSERT INTO select_hash_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); -INSERT INTO select_hash_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); -INSERT INTO select_hash_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); -INSERT INTO select_hash_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); -INSERT INTO select_hash_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); -INSERT INTO select_hash_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); -INSERT INTO select_hash_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_hash_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); - -select * from select_hash_partition_table_000_3 partition for (NULL) order by C_INT; - -alter table select_hash_partition_table_000_3 rename partition for (NULL) to select_hash_partition_table_000_3_p1; - -alter table select_hash_partition_table_000_3 drop partition for (NULL); - - -CREATE TABLE partition_wise_join_table_001_1 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision,RANK SMALLINT) -partition by hash(ID) -( - partition partition_wise_join_table_001_1_1, - partition partition_wise_join_table_001_1_2 -) ; - -INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 1-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,10000,13 ); -INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(41,49),'PARTITION WIASE JOIN 1-3-' || generate_series(40,60),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,15000,15 ); - -create index idx_partition_wise_join_table_001_1_1 on partition_wise_join_table_001_1(ID) LOCAL; -create index idx_partition_wise_join_table_001_1_2 on partition_wise_join_table_001_1(ID,NAME) LOCAL; -create index idx_partition_wise_join_table_001_1_3 on partition_wise_join_table_001_1(RANK) LOCAL; -create index idx_partition_wise_join_table_001_1_4 on partition_wise_join_table_001_1(RANK,SALARY,NAME) LOCAL; - -CREATE TABLE partition_wise_join_table_001_2 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision ) -partition by hash(ID) -( - partition partition_wise_join_table_001_1_1, - partition partition_wise_join_table_001_1_2 -); - -INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 2-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No 66# Science 4 Street of Xi'an of China $$,10000); -INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(71,79),'PARTITION WIASE JOIN 2-3-' || generate_series(70,80),90 + random() * 10,'1990-8-8',$$No 77# Science 4 Street of Xi'an of China $$,15000); - -CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_1 ON PARTITION_WISE_JOIN_TABLE_001_2(ID) LOCAL; -CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_2 ON PARTITION_WISE_JOIN_TABLE_001_2(ID,NAME) LOCAL; -CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_3 ON PARTITION_WISE_JOIN_TABLE_001_2(SALARY,NAME) LOCAL; - -SELECT A.ID,B.ID,A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; - -ANALYZE PARTITION_WISE_JOIN_TABLE_001_1; -ANALYZE PARTITION_WISE_JOIN_TABLE_001_2; - -SELECT A.ID,B.ID,A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; - -CREATE TABLE HW_PARTITION_SELECT_RT (A INT, B INT) -PARTITION BY hash(A) -( - PARTITION HW_PARTITION_SELECT_RT_P1, - PARTITION HW_PARTITION_SELECT_RT_P2, - PARTITION HW_PARTITION_SELECT_RT_P3 -); -EXPLAIN (COSTS OFF) SELECT B FROM (SELECT B FROM HW_PARTITION_SELECT_RT LIMIT 100) ORDER BY B; - -CREATE TABLE DTS2013112504143_TEST1(A INT) PARTITION BY HASH (A)(PARTITION DTS2013112504143_TEST1_P1); -CREATE TABLE DTS2013112504143_TEST2(A INT); -SELECT * FROM DTS2013112504143_TEST1 UNION ALL SELECT * FROM DTS2013112504143_TEST2 order by 1; - -CREATE TABLE select_partition_table_000_3( - C_CHAR_1 CHAR(1), - C_CHAR_2 CHAR(10), - C_CHAR_3 CHAR(102400), - C_VARCHAR_1 VARCHAR(1), - C_VARCHAR_2 VARCHAR(10), - C_VARCHAR_3 VARCHAR(1024), - C_INT INTEGER, - C_BIGINT BIGINT, - C_SMALLINT SMALLINT, - C_FLOAT FLOAT, - C_NUMERIC numeric(10,5), - C_DP double precision, - C_DATE DATE, - C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, - C_TS_WITH TIMESTAMP WITH TIME ZONE ) - partition by hash (C_INT) -( - partition select_partition_000_3_1, - partition select_partition_000_3_2 -); - -create index select_partition_table_index_000_3 ON select_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_partition_000_3_1, partition select_partition_000_3_3); -create view select_partition_table_view_000_3 as select * from select_partition_table_000_3; - -INSERT INTO select_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); -INSERT INTO select_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); -INSERT INTO select_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); -INSERT INTO select_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); -INSERT INTO select_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); -INSERT INTO select_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); -INSERT INTO select_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); -INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); - -explain (costs off, verbose on) select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; - -select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; - -create table hw_partition_select_rt5 (a int, b int, c int) -partition by hash(c) -( -partition hw_partition_select_rt5_p1 -); - -alter table hw_partition_select_rt5 drop column b; - -update hw_partition_select_rt5 set c=0 where c=-1; - -drop schema FVT_COMPRESS_QWER cascade; - ---begin: these test are related to explain output change about partition table. --- major change is as below - --1. - --Selected Partitions: 1 2 6 7 8 9 - -- \|/ - --Selected Partitions: 1..2,6..9 - --2. - --Selected Partitions: 1 3 5 7 9 - -- \|/ - --Selected Partitions: 1,3,5,7,9 -CREATE schema FVT_COMPRESS; -set search_path to FVT_COMPRESS; - - -create table test_explain_format_on_part_table (id int) -partition by hash(id) -( -partition p1, -partition p2, -partition p3, -partition p4, -partition p5, -partition p6, -partition p7, -partition p8, -partition p9 -); --- two continous segments, text formast -explain (verbose on, costs off) - select * from test_explain_format_on_part_table where id <15 or id >51; --- no continous segment, text formast -explain (verbose on, costs off) - select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; --- two continous segments, non-text formast -explain (verbose on, costs off, FORMAT JSON) - select * from test_explain_format_on_part_table where id <15 or id >51; --- no continous segment, non-text formast -explain (verbose on, costs off, FORMAT JSON) - select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; - -drop table test_explain_format_on_part_table; ---end: these test are related to explain output change about partition table. - -create table hw_partition_select_parttable ( - c1 int, - c2 int, - c3 text) -partition by hash(c1) -(partition hw_partition_select_parttable_p1, - partition hw_partition_select_parttable_p2, - partition hw_partition_select_parttable_p3); - - insert into hw_partition_select_parttable values (10,40,'abc'); - insert into hw_partition_select_parttable(c1,c2) values (100,20); - insert into hw_partition_select_parttable values(300,200); - -select * from hw_partition_select_parttable order by 1, 2, 3; - -select c1 from hw_partition_select_parttable order by 1; - -select c1,c2 from hw_partition_select_parttable order by 1, 2; - -select c2 from hw_partition_select_parttable order by 1; - -select c1,c2,c3 from hw_partition_select_parttable order by 1, 2, 3; - -select c1 from hw_partition_select_parttable where c1>50 and c1<300 order by 1; - -select * from hw_partition_select_parttable where c2>100 order by 1, 2, 3; - -create table t_select_datatype_int32(c1 int,c2 int,c3 int,c4 text) -partition by hash(c1) -(partition t_select_datatype_int32_p1, - partition t_select_datatype_int32_p2, - partition t_select_datatype_int32_p3, - partition t_select_datatype_int32_p4); - -insert into t_select_datatype_int32 values(-100,20,20,'a'), (100,300,300,'bb'), (150,75,500,NULL), (200,500,50,'ccc'), (250,50,50,NULL), (300,700,125,''), (450,35,150,'dddd'); - ---partition select for int32 ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=50 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=250 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=500 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=550 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=50 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<150 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=200 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=500 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=700 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=50 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>150 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>200 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=200 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=500 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<550 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1<=500 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<250 AND t_select_datatype_int32.c1<=250 AND t_select_datatype_int32.c1=200 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 OR t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1<=300 OR t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 OR t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<170 AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where (t_select_datatype_int32.c1<170 OR t_select_datatype_int32.c1<250) AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<400 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<=100 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; - ---IS NULL ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) OR - t_select_datatype_int32.c4 IS NULL - ORDER BY 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) AND - t_select_datatype_int32.c4 IS NULL - ORDER BY 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where - t_select_datatype_int32.c4 IS NULL AND - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) - ORDER BY 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where - t_select_datatype_int32.c4 IS NULL OR - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) - ORDER BY 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c4 IS NULL) AND - (t_select_datatype_int32.c2100) - ORDER BY 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) - ORDER BY 1, 2, 3, 4; - --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- --- check select contarins partition - --- ----- check select from range partition --- - -create table hw_partition_select_ordinary_table (a int, b int); - -create table test_select_hash_partition (a int, b int) -partition by hash(a) -( - partition test_select_hash_partition_p1, - partition test_select_hash_partition_p2, - partition test_select_hash_partition_p3 -); - -insert into test_select_hash_partition values(2); - ---success -select * from test_select_hash_partition partition (test_select_hash_partition_p1) order by 1, 2; - ---success -select * from test_select_hash_partition partition (test_select_hash_partition_p2) order by 1, 2; - ---success -select * from test_select_hash_partition partition (test_select_hash_partition_p3) order by 1, 2; - ---success -select * from test_select_hash_partition partition (test_select_hash_partition_p4) order by 1, 2; - ---success -select a from test_select_hash_partition partition (test_select_hash_partition_p2) order by 1; - ---success -select a from test_select_hash_partition partition for (0) order by 1; - ---success -select a from test_select_hash_partition partition for (1) order by 1; - ---success -select a from test_select_hash_partition partition for (2) order by 1; - ---success -select a from test_select_hash_partition partition for (5) order by 1; - ---success -select a from test_select_hash_partition partition for (8) order by 1; - --- fail: table is not partitioned table -select a from hw_partition_select_ordinary_table partition (test_select_hash_partition_p2); - --- fail: table is not partitioned table -select a from hw_partition_select_ordinary_table partition for (2); - --- --- -CREATE TABLE hw_partition_select_test(C_INT INTEGER) - partition by hash (C_INT) -( - partition hw_partition_select_test_part_1, - partition hw_partition_select_test_part_2, - partition hw_partition_select_test_part_3 -); -insert into hw_partition_select_test values(111); -insert into hw_partition_select_test values(555); -insert into hw_partition_select_test values(888); - -select a.* from hw_partition_select_test partition(hw_partition_select_test_part_1) a; - -create table hash_partitioned_table (a int) -partition by hash(a) -( - partition hash_partitioned_table_p1, - partition hash_partitioned_table_p2, - partition hash_partitioned_table_p3 -); - -insert into hash_partitioned_table values (1); -insert into hash_partitioned_table values (2); -insert into hash_partitioned_table values (5); -insert into hash_partitioned_table values (6); - -with tmp1 as (select a from hash_partitioned_table partition for (2)) select a from tmp1 order by 1; - -drop schema FVT_COMPRESS cascade; - - - - - + +-- +---- test partition for (null) +-- + +-- 1. test ordinary + -- 1.1 range partitioned table + -- 1.2 interval partitioned table +-- 2. test data column of partition key value + -- 2.1 text + -- 2.2 timestamp +-- 3. MAXVALUE + -- 3.1 MAXVALUE is first column + -- 3.2 MAXVALUE is second column + +CREATE schema FVT_COMPRESS_QWER; +set search_path to FVT_COMPRESS_QWER; + + +-- 1. test ordinary +---- 1.1 range partitioned table +create table test_partition_for_null_hash (a int, b int, c int, d int) +partition by hash (a) +( + partition test_partition_for_null_hash_p1, + partition test_partition_for_null_hash_p2, + partition test_partition_for_null_hash_p3 +); + +insert into test_partition_for_null_hash values (0, 0, 0, 0); +insert into test_partition_for_null_hash values (1, 1, 1, 1); +insert into test_partition_for_null_hash values (5, 5, 5, 5); + +-- failed: inserted partition key does not map to any table partition +insert into test_partition_for_null_hash values (null, null, null, null); +-- success +insert into test_partition_for_null_hash values (0, null, null, null); + + +-- failed: The partition number is invalid or out-of-range +select * from test_partition_for_null_hash partition for (null) order by 1, 2, 3, 4; +-- success +select * from test_partition_for_null_hash partition for (0) order by 1, 2, 3, 4; + + +-- failed: The partition number is invalid or out-of-range +alter table test_partition_for_null_hash rename partition for (null) to test_partition_for_null_hash_part1; +-- success +alter table test_partition_for_null_hash rename partition for (0) to test_partition_for_null_hash_part1; +-- success +select * from test_partition_for_null_hash partition (test_partition_for_null_hash_part1) order by 1, 2, 3, 4; + +alter table test_partition_for_null_hash drop partition for (NULL); +alter table test_partition_for_null_hash drop partition for (0); + +CREATE TABLE select_hash_partition_table_000_3( + C_CHAR_1 CHAR(1), + C_CHAR_2 CHAR(10), + C_CHAR_3 CHAR(102400), + C_VARCHAR_1 VARCHAR(1), + C_VARCHAR_2 VARCHAR(10), + C_VARCHAR_3 VARCHAR(1024), + C_INT INTEGER, + C_BIGINT BIGINT, + C_SMALLINT SMALLINT, + C_FLOAT FLOAT, + C_NUMERIC numeric(10,5), + C_DP double precision, + C_DATE DATE, + C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, + C_TS_WITH TIMESTAMP WITH TIME ZONE ) + partition by hash (C_INT) +( + partition select_hash_partition_000_3_1, + partition select_hash_partition_000_3_2 +); + +create index select_list_partition_table_index_000_3 ON select_hash_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_list_partition_000_3_1, partition select_list_partition_000_3_3); +create view select_list_partition_table_view_000_3 as select * from select_hash_partition_table_000_3; + +INSERT INTO select_hash_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); +INSERT INTO select_hash_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); +INSERT INTO select_hash_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); +INSERT INTO select_hash_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); +INSERT INTO select_hash_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); +INSERT INTO select_hash_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); +INSERT INTO select_hash_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); +INSERT INTO select_hash_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_hash_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_hash_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); + +select * from select_hash_partition_table_000_3 partition for (NULL) order by C_INT; + +alter table select_hash_partition_table_000_3 rename partition for (NULL) to select_hash_partition_table_000_3_p1; + +alter table select_hash_partition_table_000_3 drop partition for (NULL); + + +CREATE TABLE partition_wise_join_table_001_1 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision,RANK SMALLINT) +partition by hash(ID) +( + partition partition_wise_join_table_001_1_1, + partition partition_wise_join_table_001_1_2 +) ; + +INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 1-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,10000,13 ); +INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(41,49),'PARTITION WIASE JOIN 1-3-' || generate_series(40,60),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,15000,15 ); + +create index idx_partition_wise_join_table_001_1_1 on partition_wise_join_table_001_1(ID) LOCAL; +create index idx_partition_wise_join_table_001_1_2 on partition_wise_join_table_001_1(ID,NAME) LOCAL; +create index idx_partition_wise_join_table_001_1_3 on partition_wise_join_table_001_1(RANK) LOCAL; +create index idx_partition_wise_join_table_001_1_4 on partition_wise_join_table_001_1(RANK,SALARY,NAME) LOCAL; + +CREATE TABLE partition_wise_join_table_001_2 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision ) +partition by hash(ID) +( + partition partition_wise_join_table_001_1_1, + partition partition_wise_join_table_001_1_2 +); + +INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 2-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No 66# Science 4 Street of Xi'an of China $$,10000); +INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(71,79),'PARTITION WIASE JOIN 2-3-' || generate_series(70,80),90 + random() * 10,'1990-8-8',$$No 77# Science 4 Street of Xi'an of China $$,15000); + +CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_1 ON PARTITION_WISE_JOIN_TABLE_001_2(ID) LOCAL; +CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_2 ON PARTITION_WISE_JOIN_TABLE_001_2(ID,NAME) LOCAL; +CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_3 ON PARTITION_WISE_JOIN_TABLE_001_2(SALARY,NAME) LOCAL; + +SELECT A.ID,B.ID,A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; + +ANALYZE PARTITION_WISE_JOIN_TABLE_001_1; +ANALYZE PARTITION_WISE_JOIN_TABLE_001_2; + +SELECT A.ID,B.ID,A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; + +CREATE TABLE HW_PARTITION_SELECT_RT (A INT, B INT) +PARTITION BY hash(A) +( + PARTITION HW_PARTITION_SELECT_RT_P1, + PARTITION HW_PARTITION_SELECT_RT_P2, + PARTITION HW_PARTITION_SELECT_RT_P3 +); +EXPLAIN (COSTS OFF) SELECT B FROM (SELECT B FROM HW_PARTITION_SELECT_RT LIMIT 100) ORDER BY B; + +CREATE TABLE TESTTABLE_TEST1(A INT) PARTITION BY HASH (A)(PARTITION TESTTABLE_TEST1_P1); +CREATE TABLE TESTTABLE_TEST2(A INT); +SELECT * FROM TESTTABLE_TEST1 UNION ALL SELECT * FROM TESTTABLE_TEST2 order by 1; + +CREATE TABLE select_partition_table_000_3( + C_CHAR_1 CHAR(1), + C_CHAR_2 CHAR(10), + C_CHAR_3 CHAR(102400), + C_VARCHAR_1 VARCHAR(1), + C_VARCHAR_2 VARCHAR(10), + C_VARCHAR_3 VARCHAR(1024), + C_INT INTEGER, + C_BIGINT BIGINT, + C_SMALLINT SMALLINT, + C_FLOAT FLOAT, + C_NUMERIC numeric(10,5), + C_DP double precision, + C_DATE DATE, + C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, + C_TS_WITH TIMESTAMP WITH TIME ZONE ) + partition by hash (C_INT) +( + partition select_partition_000_3_1, + partition select_partition_000_3_2 +); + +create index select_partition_table_index_000_3 ON select_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_partition_000_3_1, partition select_partition_000_3_3); +create view select_partition_table_view_000_3 as select * from select_partition_table_000_3; + +INSERT INTO select_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); +INSERT INTO select_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); +INSERT INTO select_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); +INSERT INTO select_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); +INSERT INTO select_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); +INSERT INTO select_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); +INSERT INTO select_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); +INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); + +explain (costs off, verbose on) select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; + +select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; + +create table hw_partition_select_rt5 (a int, b int, c int) +partition by hash(c) +( +partition hw_partition_select_rt5_p1 +); + +alter table hw_partition_select_rt5 drop column b; + +update hw_partition_select_rt5 set c=0 where c=-1; + +drop schema FVT_COMPRESS_QWER cascade; + +--begin: these test are related to explain output change about partition table. +-- major change is as below + --1. + --Selected Partitions: 1 2 6 7 8 9 + -- \|/ + --Selected Partitions: 1..2,6..9 + --2. + --Selected Partitions: 1 3 5 7 9 + -- \|/ + --Selected Partitions: 1,3,5,7,9 +CREATE schema FVT_COMPRESS; +set search_path to FVT_COMPRESS; + + +create table test_explain_format_on_part_table (id int) +partition by hash(id) +( +partition p1, +partition p2, +partition p3, +partition p4, +partition p5, +partition p6, +partition p7, +partition p8, +partition p9 +); +-- two continous segments, text formast +explain (verbose on, costs off) + select * from test_explain_format_on_part_table where id <15 or id >51; +-- no continous segment, text formast +explain (verbose on, costs off) + select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; +-- two continous segments, non-text formast +explain (verbose on, costs off, FORMAT JSON) + select * from test_explain_format_on_part_table where id <15 or id >51; +-- no continous segment, non-text formast +explain (verbose on, costs off, FORMAT JSON) + select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; + +drop table test_explain_format_on_part_table; +--end: these test are related to explain output change about partition table. + +create table hw_partition_select_parttable ( + c1 int, + c2 int, + c3 text) +partition by hash(c1) +(partition hw_partition_select_parttable_p1, + partition hw_partition_select_parttable_p2, + partition hw_partition_select_parttable_p3); + + insert into hw_partition_select_parttable values (10,40,'abc'); + insert into hw_partition_select_parttable(c1,c2) values (100,20); + insert into hw_partition_select_parttable values(300,200); + +select * from hw_partition_select_parttable order by 1, 2, 3; + +select c1 from hw_partition_select_parttable order by 1; + +select c1,c2 from hw_partition_select_parttable order by 1, 2; + +select c2 from hw_partition_select_parttable order by 1; + +select c1,c2,c3 from hw_partition_select_parttable order by 1, 2, 3; + +select c1 from hw_partition_select_parttable where c1>50 and c1<300 order by 1; + +select * from hw_partition_select_parttable where c2>100 order by 1, 2, 3; + +create table t_select_datatype_int32(c1 int,c2 int,c3 int,c4 text) +partition by hash(c1) +(partition t_select_datatype_int32_p1, + partition t_select_datatype_int32_p2, + partition t_select_datatype_int32_p3, + partition t_select_datatype_int32_p4); + +insert into t_select_datatype_int32 values(-100,20,20,'a'), (100,300,300,'bb'), (150,75,500,NULL), (200,500,50,'ccc'), (250,50,50,NULL), (300,700,125,''), (450,35,150,'dddd'); + +--partition select for int32 +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=550 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=700 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<550 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1<=500 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<250 AND t_select_datatype_int32.c1<=250 AND t_select_datatype_int32.c1=200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 OR t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1<=300 OR t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 OR t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<170 AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where (t_select_datatype_int32.c1<170 OR t_select_datatype_int32.c1<250) AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<400 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<=100 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; + +--IS NULL +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) OR + t_select_datatype_int32.c4 IS NULL + ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) AND + t_select_datatype_int32.c4 IS NULL + ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + t_select_datatype_int32.c4 IS NULL AND + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) + ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + t_select_datatype_int32.c4 IS NULL OR + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) + ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c4 IS NULL) AND + (t_select_datatype_int32.c2100) + ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) + ORDER BY 1, 2, 3, 4; + +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +-- check select contarins partition + +-- +---- check select from range partition +-- + +create table hw_partition_select_ordinary_table (a int, b int); + +create table test_select_hash_partition (a int, b int) +partition by hash(a) +( + partition test_select_hash_partition_p1, + partition test_select_hash_partition_p2, + partition test_select_hash_partition_p3 +); + +insert into test_select_hash_partition values(2); + +--success +select * from test_select_hash_partition partition (test_select_hash_partition_p1) order by 1, 2; + +--success +select * from test_select_hash_partition partition (test_select_hash_partition_p2) order by 1, 2; + +--success +select * from test_select_hash_partition partition (test_select_hash_partition_p3) order by 1, 2; + +--success +select * from test_select_hash_partition partition (test_select_hash_partition_p4) order by 1, 2; + +--success +select a from test_select_hash_partition partition (test_select_hash_partition_p2) order by 1; + +--success +select a from test_select_hash_partition partition for (0) order by 1; + +--success +select a from test_select_hash_partition partition for (1) order by 1; + +--success +select a from test_select_hash_partition partition for (2) order by 1; + +--success +select a from test_select_hash_partition partition for (5) order by 1; + +--success +select a from test_select_hash_partition partition for (8) order by 1; + +-- fail: table is not partitioned table +select a from hw_partition_select_ordinary_table partition (test_select_hash_partition_p2); + +-- fail: table is not partitioned table +select a from hw_partition_select_ordinary_table partition for (2); + +-- +-- +CREATE TABLE hw_partition_select_test(C_INT INTEGER) + partition by hash (C_INT) +( + partition hw_partition_select_test_part_1, + partition hw_partition_select_test_part_2, + partition hw_partition_select_test_part_3 +); +insert into hw_partition_select_test values(111); +insert into hw_partition_select_test values(555); +insert into hw_partition_select_test values(888); + +select a.* from hw_partition_select_test partition(hw_partition_select_test_part_1) a; + +create table hash_partitioned_table (a int) +partition by hash(a) +( + partition hash_partitioned_table_p1, + partition hash_partitioned_table_p2, + partition hash_partitioned_table_p3 +); + +insert into hash_partitioned_table values (1); +insert into hash_partitioned_table values (2); +insert into hash_partitioned_table values (5); +insert into hash_partitioned_table values (6); + +with tmp1 as (select a from hash_partitioned_table partition for (2)) select a from tmp1 order by 1; + +drop schema FVT_COMPRESS cascade; + + + + + diff --git a/src/test/regress/sql/hw_partition_interval_index.sql b/src/test/regress/sql/hw_partition_interval_index.sql index bbc50b67f..5eb029865 100644 --- a/src/test/regress/sql/hw_partition_interval_index.sql +++ b/src/test/regress/sql/hw_partition_interval_index.sql @@ -323,11 +323,11 @@ INTERVAL('1 MONTH') PARTITION p1 VALUES LESS THAN (TO_DATE('6-5-2008', 'DD-MM-YYYY')) ); -select relname, case when reltoastrelid > 0 then 'TRUE' else 'FALSE' end as has_toastrelid, boundaries from pg_partition; +select relname, case when reltoastrelid > 0 then 'TRUE' else 'FALSE' end as has_toastrelid, boundaries from pg_partition order by relname; insert into interval_sales values (generate_series(1,10), generate_series(1,10), generate_series(TO_DATE('2020-01-01', 'YYYY-MM-DD'),TO_DATE('2020-07-01', 'YYYY-MM-DD'),'1 day'), 1, 1, 1, 1); -select relname, case when reltoastrelid > 0 then 'TRUE' else 'FALSE' end as has_toastrelid, boundaries from pg_partition; +select relname, case when reltoastrelid > 0 then 'TRUE' else 'FALSE' end as has_toastrelid, boundaries from pg_partition order by relname; drop table interval_sales; diff --git a/src/test/regress/sql/hw_partition_interval_select.sql b/src/test/regress/sql/hw_partition_interval_select.sql index a784c1d61..8516d6f3f 100644 --- a/src/test/regress/sql/hw_partition_interval_select.sql +++ b/src/test/regress/sql/hw_partition_interval_select.sql @@ -14,40 +14,40 @@ insert into interval_tab1 values(1,'2020-4-7 2:0:0', 1, 1); insert into interval_tab1 values(1,'2020-4-8 2:0:0', 1, 1); -select relname, boundaries from pg_partition; +select relname, boundaries from pg_partition order by 1,2; -select * from interval_tab1 where logdate < '2020-4-7 0:0:0'; +select * from interval_tab1 where logdate < '2020-4-7 0:0:0' order by 1,2,3,4; explain (costs off, verbose on) select * from interval_tab1 where logdate < '2020-4-7 0:0:0'; -select * from interval_tab1 where logdate > '2020-4-6'; +select * from interval_tab1 where logdate > '2020-4-6' order by 1,2,3,4; explain (costs off, verbose on) select * from interval_tab1 where logdate > '2020-4-6'; -select * from interval_tab1 where logdate = '2020-4-7 2:0:0'; +select * from interval_tab1 where logdate = '2020-4-7 2:0:0' order by 1,2,3,4; insert into interval_tab1 values(1,'2020-4-7 0:0:0', 1, 1); -select * from interval_tab1 where logdate = '2020-4-7 0:0:0'; +select * from interval_tab1 where logdate = '2020-4-7 0:0:0' order by 1,2,3,4; -select * from interval_tab1 where logdate != '2020-4-7 0:0:0'; +select * from interval_tab1 where logdate != '2020-4-7 0:0:0' order by 1,2,3,4; -select * from interval_tab1 where logdate >= '2020-4-7 0:0:0'; +select * from interval_tab1 where logdate >= '2020-4-7 0:0:0' order by 1,2,3,4; insert into interval_tab1 values(1,'2020-4-5 2:0:0', 1, 1); -select relname, boundaries from pg_partition; +select relname, boundaries from pg_partition order by 1,2; insert into interval_tab1 values(1,'2020-4-9 0:0:0', 1, 1); -select * from interval_tab1 where logdate >= '2020-4-7 0:0:0' and logdate < '2020-4-9 0:0:0'; +select * from interval_tab1 where logdate >= '2020-4-7 0:0:0' and logdate < '2020-4-9 0:0:0' order by 1,2,3,4; -select * from interval_tab1 where logdate > '2020-4-7 0:0:0' and logdate <= '2020-4-9 0:0:0'; +select * from interval_tab1 where logdate > '2020-4-7 0:0:0' and logdate <= '2020-4-9 0:0:0' order by 1,2,3,4; -select * from interval_tab1 where logdate >= '2020-4-7 0:0:0' and logdate <= '2020-4-9 0:0:0'; +select * from interval_tab1 where logdate >= '2020-4-7 0:0:0' and logdate <= '2020-4-9 0:0:0' order by 1,2,3,4; -select * from interval_tab1 where logdate > '2020-4-6 0:0:0' and logdate <= '2020-4-9 0:0:0'; +select * from interval_tab1 where logdate > '2020-4-6 0:0:0' and logdate <= '2020-4-9 0:0:0' order by 1,2,3,4; explain (costs off, verbose on) select * from interval_tab1 where logdate >= '2020-4-10 0:0:0'; diff --git a/src/test/regress/sql/hw_partition_list_dql.sql b/src/test/regress/sql/hw_partition_list_dql.sql index bcff32b0f..d43ce5c7e 100644 --- a/src/test/regress/sql/hw_partition_list_dql.sql +++ b/src/test/regress/sql/hw_partition_list_dql.sql @@ -1,619 +1,619 @@ - --- ----- test partition for (null) --- - --- 1. test ordinary - -- 1.1 range partitioned table - -- 1.2 interval partitioned table --- 2. test data column of partition key value - -- 2.1 text - -- 2.2 timestamp --- 3. MAXVALUE - -- 3.1 MAXVALUE is first column - -- 3.2 MAXVALUE is second column - -CREATE schema FVT_COMPRESS_QWER; -set search_path to FVT_COMPRESS_QWER; - - --- 1. test ordinary ----- 1.1 range partitioned table -create table test_partition_for_null_list (a int, b int, c int, d int) -partition by list (a) -( - partition test_partition_for_null_list_p1 values(0), - partition test_partition_for_null_list_p2 values(1,2,3), - partition test_partition_for_null_list_p3 values(4,5,6) -); - -insert into test_partition_for_null_list values (0, 0, 0, 0); -insert into test_partition_for_null_list values (1, 1, 1, 1); -insert into test_partition_for_null_list values (5, 5, 5, 5); - --- failed: inserted partition key does not map to any table partition -insert into test_partition_for_null_list values (null, null, null, null); --- success -insert into test_partition_for_null_list values (0, null, null, null); - - --- failed: The partition number is invalid or out-of-range -select * from test_partition_for_null_list partition for (null) order by 1, 2, 3, 4; --- success -select * from test_partition_for_null_list partition for (0) order by 1, 2, 3, 4; - - --- failed: The partition number is invalid or out-of-range -alter table test_partition_for_null_list rename partition for (null) to test_partition_for_null_list_part1; --- success -alter table test_partition_for_null_list rename partition for (0) to test_partition_for_null_list_part1; --- success -select * from test_partition_for_null_list partition (test_partition_for_null_list_part1) order by 1, 2, 3, 4; - - --- failed: The partition number is invalid or out-of-range -alter table test_partition_for_null_list drop partition for (null); --- success -alter table test_partition_for_null_list drop partition for (0); --- failed -select * from test_partition_for_null_list partition (test_partition_for_null_list_part1) order by 1, 2, 3, 4; - -CREATE TABLE select_list_partition_table_000_3( - C_CHAR_1 CHAR(1), - C_CHAR_2 CHAR(10), - C_CHAR_3 CHAR(102400), - C_VARCHAR_1 VARCHAR(1), - C_VARCHAR_2 VARCHAR(10), - C_VARCHAR_3 VARCHAR(1024), - C_INT INTEGER, - C_BIGINT BIGINT, - C_SMALLINT SMALLINT, - C_FLOAT FLOAT, - C_NUMERIC numeric(10,5), - C_DP double precision, - C_DATE DATE, - C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, - C_TS_WITH TIMESTAMP WITH TIME ZONE ) - partition by list (C_INT) -( - partition select_list_partition_000_3_1 values (111,222,333,444), - partition select_list_partition_000_3_2 values (555,666,777,888,999,1100,1600) -); - -create index select_list_partition_table_index_000_3 ON select_list_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_list_partition_000_3_1, partition select_list_partition_000_3_3); -create view select_list_partition_table_view_000_3 as select * from select_list_partition_table_000_3; - -INSERT INTO select_list_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); -INSERT INTO select_list_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); -INSERT INTO select_list_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); -INSERT INTO select_list_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); -INSERT INTO select_list_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); -INSERT INTO select_list_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); -INSERT INTO select_list_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); -INSERT INTO select_list_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_list_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); - -select * from select_list_partition_table_000_3 partition for (NULL) order by C_INT; - -alter table select_list_partition_table_000_3 rename partition for (NULL) to select_list_partition_table_000_3_p1; - -alter table select_list_partition_table_000_3 drop partition for (NULL); - - -CREATE TABLE partition_wise_join_table_001_1 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision,RANK SMALLINT) -partition by list(ID) -( - partition partition_wise_join_table_001_1_1 values (1,42,3,44,5,46,7,48,9), - partition partition_wise_join_table_001_1_2 values (41,2,43,4,45,6,47,8,49) -) ; - -INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 1-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,10000,13 ); -INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(41,49),'PARTITION WIASE JOIN 1-3-' || generate_series(40,60),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,15000,15 ); - -create index idx_partition_wise_join_table_001_1_1 on partition_wise_join_table_001_1(ID) LOCAL; -create index idx_partition_wise_join_table_001_1_2 on partition_wise_join_table_001_1(ID,NAME) LOCAL; -create index idx_partition_wise_join_table_001_1_3 on partition_wise_join_table_001_1(RANK) LOCAL; -create index idx_partition_wise_join_table_001_1_4 on partition_wise_join_table_001_1(RANK,SALARY,NAME) LOCAL; - -CREATE TABLE partition_wise_join_table_001_2 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision ) -partition by list(ID) -( - partition partition_wise_join_table_001_1_1 values (71,2,73,4,75,6,77,8,79), - partition partition_wise_join_table_001_1_2 values (1,72,3,74,5,76,7,78,9) -); - -INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 2-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No 66# Science 4 Street of Xi'an of China $$,10000); -INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(71,79),'PARTITION WIASE JOIN 2-3-' || generate_series(70,80),90 + random() * 10,'1990-8-8',$$No 77# Science 4 Street of Xi'an of China $$,15000); - -CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_1 ON PARTITION_WISE_JOIN_TABLE_001_2(ID) LOCAL; -CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_2 ON PARTITION_WISE_JOIN_TABLE_001_2(ID,NAME) LOCAL; -CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_3 ON PARTITION_WISE_JOIN_TABLE_001_2(SALARY,NAME) LOCAL; - -SELECT A.ID,B.ID, A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; - -ANALYZE PARTITION_WISE_JOIN_TABLE_001_1; -ANALYZE PARTITION_WISE_JOIN_TABLE_001_2; - -SELECT A.ID,B.ID, A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; - -CREATE TABLE HW_PARTITION_SELECT_RT (A INT, B INT) -PARTITION BY list (A) -( - PARTITION HW_PARTITION_SELECT_RT_P1 VALUES (0), - PARTITION HW_PARTITION_SELECT_RT_P2 VALUES (1,2,3), - PARTITION HW_PARTITION_SELECT_RT_P3 VALUES (4,5,6) -); -EXPLAIN (COSTS OFF) SELECT B FROM (SELECT B FROM HW_PARTITION_SELECT_RT LIMIT 100) ORDER BY B; - -CREATE TABLE DTS2013112504143_TEST1(A INT) PARTITION BY LIST (A)(PARTITION DTS2013112504143_TEST1_P1 VALUES (1,2,3,4,5,6,7,8,9)); -CREATE TABLE DTS2013112504143_TEST2(A INT); -SELECT * FROM DTS2013112504143_TEST1 UNION ALL SELECT * FROM DTS2013112504143_TEST2 order by 1; - -CREATE TABLE select_partition_table_000_3( - C_CHAR_1 CHAR(1), - C_CHAR_2 CHAR(10), - C_CHAR_3 CHAR(102400), - C_VARCHAR_1 VARCHAR(1), - C_VARCHAR_2 VARCHAR(10), - C_VARCHAR_3 VARCHAR(1024), - C_INT INTEGER, - C_BIGINT BIGINT, - C_SMALLINT SMALLINT, - C_FLOAT FLOAT, - C_NUMERIC numeric(10,5), - C_DP double precision, - C_DATE DATE, - C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, - C_TS_WITH TIMESTAMP WITH TIME ZONE ) - partition by list (C_INT) -( - partition select_partition_000_3_1 values (111,222,333,444), - partition select_partition_000_3_3 values (555,666,777,888,999,1100,1600) -); - -create index select_partition_table_index_000_3 ON select_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_partition_000_3_1, partition select_partition_000_3_3); -create view select_partition_table_view_000_3 as select * from select_partition_table_000_3; - -INSERT INTO select_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); -INSERT INTO select_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); -INSERT INTO select_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); -INSERT INTO select_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); -INSERT INTO select_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); -INSERT INTO select_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); -INSERT INTO select_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); -INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); -INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); - -explain (costs off, verbose on) select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; - -select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; - -create table hw_partition_select_rt5 (a int, b int, c int) -partition by list(c) -( -partition hw_partition_select_rt5_p1 values (0,1) -); - -alter table hw_partition_select_rt5 drop column b; - -update hw_partition_select_rt5 set c=0 where c=-1; - -drop schema FVT_COMPRESS_QWER cascade; - ---begin: these test are related to explain output change about partition table. --- major change is as below - --1. - --Selected Partitions: 1 2 6 7 8 9 - -- \|/ - --Selected Partitions: 1..2,6..9 - --2. - --Selected Partitions: 1 3 5 7 9 - -- \|/ - --Selected Partitions: 1,3,5,7,9 -CREATE schema FVT_COMPRESS; -set search_path to FVT_COMPRESS; - - -create table test_explain_format_on_part_table (id int) -partition by list(id) -( -partition p1 values (1,2,3,4,5,6,7,8,9), -partition p2 values (11,12,13,14,15,16,17,18,19), -partition p3 values (21,22,23,24,25,26,27,28,29), -partition p4 values (31,32,33,34,35,36,37,38,39), -partition p5 values (41,42,43,44,45,46,47,48,49), -partition p6 values (51,52,53,54,55,56,57,58,59), -partition p7 values (61,62,63,64,65,66,67,68,69), -partition p8 values (71,72,73,74,75,76,77,78,79), -partition p9 values (81,82,83,84,85,86,87,88,89) -); --- two continous segments, text formast -explain (verbose on, costs off) - select * from test_explain_format_on_part_table where id <15 or id >51; --- no continous segment, text formast -explain (verbose on, costs off) - select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; --- two continous segments, non-text formast -explain (verbose on, costs off, FORMAT JSON) - select * from test_explain_format_on_part_table where id <15 or id >51; --- no continous segment, non-text formast -explain (verbose on, costs off, FORMAT JSON) - select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; - -drop table test_explain_format_on_part_table; ---end: these test are related to explain output change about partition table. - -create table hw_partition_select_parttable ( - c1 int, - c2 int, - c3 text) -partition by list(c1) -(partition hw_partition_select_parttable_p1 values (10,20,30,40), - partition hw_partition_select_parttable_p2 values (50,60,70,80,90,100,110,120,130,140), - partition hw_partition_select_parttable_p3 values (150,200,250,300,350)); - - insert into hw_partition_select_parttable values (10,40,'abc'); - insert into hw_partition_select_parttable(c1,c2) values (100,20); - insert into hw_partition_select_parttable values(300,200); - -select * from hw_partition_select_parttable order by 1, 2, 3; - -select c1 from hw_partition_select_parttable order by 1; - -select c1,c2 from hw_partition_select_parttable order by 1, 2; - -select c2 from hw_partition_select_parttable order by 1; - -select c1,c2,c3 from hw_partition_select_parttable order by 1, 2, 3; - -select c1 from hw_partition_select_parttable where c1>50 and c1<300 order by 1; - -select * from hw_partition_select_parttable where c2>100 order by 1, 2, 3; - -create table t_select_datatype_int32(c1 int,c2 int,c3 int,c4 text) -partition by list(c1) -(partition t_select_datatype_int32_p1 values(-100, -50, 0, 50), - partition t_select_datatype_int32_p2 values(100, 150, 200, 250), - partition t_select_datatype_int32_p3 values(300, 350), - partition t_select_datatype_int32_p4 values(400, 450, 500)); - -insert into t_select_datatype_int32 values(-100,20,20,'a'), (100,300,300,'bb'), (150,75,500,NULL), (200,500,50,'ccc'), (250,50,50,NULL), (300,700,125,''), (450,35,150,'dddd'); - ---partition select for int32 ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=50 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=250 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=500 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1=550 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=50 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<150 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=200 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=500 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=700 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=50 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>150 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>200 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=200 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=500 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<550 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1<=500 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<250 AND t_select_datatype_int32.c1<=250 AND t_select_datatype_int32.c1=200 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 OR t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1<=300 OR t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 OR t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<170 AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where (t_select_datatype_int32.c1<170 OR t_select_datatype_int32.c1<250) AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<400 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<=100 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; - ---IS NULL ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) OR - t_select_datatype_int32.c4 IS NULL - ORDER BY 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) AND - t_select_datatype_int32.c4 IS NULL - ORDER BY 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where - t_select_datatype_int32.c4 IS NULL AND - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) - ORDER BY 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where - t_select_datatype_int32.c4 IS NULL OR - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) - ORDER BY 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c4 IS NULL) AND - (t_select_datatype_int32.c2100) - ORDER BY 1, 2, 3, 4; - ---success -select * from t_select_datatype_int32 where - (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND - (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) - ORDER BY 1, 2, 3, 4; - --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- --- check select contarins partition - --- ----- check select from range partition --- - -create table hw_partition_select_ordinary_table (a int, b int); - -create table test_select_list_partition (a int, b int) -partition by list(a) -( - partition test_select_list_partition_p1 values (0), - partition test_select_list_partition_p2 values (1,2,3), - partition test_select_list_partition_p3 values (4,5,6) -); - -insert into test_select_list_partition values(2); - ---success -select * from test_select_list_partition partition (test_select_list_partition_p1) order by 1, 2; - ---success -select * from test_select_list_partition partition (test_select_list_partition_p2) order by 1, 2; - ---success -select * from test_select_list_partition partition (test_select_list_partition_p3) order by 1, 2; - ---success -select * from test_select_list_partition partition (test_select_list_partition_p4) order by 1, 2; - ---success -select a from test_select_list_partition partition (test_select_list_partition_p2) order by 1; - ---success -select a from test_select_list_partition partition for (0) order by 1; - ---success -select a from test_select_list_partition partition for (1) order by 1; - ---success -select a from test_select_list_partition partition for (2) order by 1; - ---success -select a from test_select_list_partition partition for (5) order by 1; - ---success -select a from test_select_list_partition partition for (8) order by 1; - --- fail: table is not partitioned table -select a from hw_partition_select_ordinary_table partition (test_select_list_partition_p2); - --- fail: table is not partitioned table -select a from hw_partition_select_ordinary_table partition for (2); - --- --- -CREATE TABLE hw_partition_select_test(C_INT INTEGER) - partition by list (C_INT) -( - partition hw_partition_select_test_part_1 values (111,222,333), - partition hw_partition_select_test_part_2 values (444,555,666), - partition hw_partition_select_test_part_3 values (777,888,999) -); -insert into hw_partition_select_test values(111); -insert into hw_partition_select_test values(555); -insert into hw_partition_select_test values(888); - -select a.* from hw_partition_select_test partition(hw_partition_select_test_part_1) a; - -create table list_partitioned_table (a int) -partition by list(a) -( - partition list_partitioned_table_p1 values (0), - partition list_partitioned_table_p2 values (1,2,3), - partition list_partitioned_table_p3 values (4,5,6) -); - -insert into list_partitioned_table values (1); -insert into list_partitioned_table values (2); -insert into list_partitioned_table values (5); -insert into list_partitioned_table values (6); - -with tmp1 as (select a from list_partitioned_table partition for (2)) select a from tmp1 order by 1; - --- ----- select union select --- -create table UNION_TABLE_043_1(C_CHAR CHAR(103500), C_VARCHAR VARCHAR(1035), C_INT INTEGER not null, C_DP double precision, C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE) -partition by list (C_INT) -( - partition UNION_TABLE_043_1_1 values (111,222,333), - partition UNION_TABLE_043_1_2 values (444,555) -); -insert into UNION_TABLE_043_1 values('ABCDEFG','abcdefg',111,1.111,'2000-01-01 01:01:01'); -insert into UNION_TABLE_043_1 values('BCDEFGH','bcdefgh',222,2.222,'2000-02-02 02:02:02'); -insert into UNION_TABLE_043_1 values('CDEFGHI','cdefghi',333,3.333,'2000-03-03 03:03:03'); -insert into UNION_TABLE_043_1 values('DEFGHIJ','defghij',444,4.444,'2000-04-04 04:04:04'); -insert into UNION_TABLE_043_1 values('EFGHIJK','efghijk',555,5.555,'2000-05-05 05:05:05'); - - -create table UNION_TABLE_043_2(C_CHAR CHAR(103500), C_VARCHAR VARCHAR(1035), C_INT INTEGER not null, C_DP double precision, C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE) -partition by list (C_INT) -( - partition UNION_TABLE_043_2_1 values (111,222,333), - partition UNION_TABLE_043_2_2 values (444,555) -); -insert into UNION_TABLE_043_2 values('ABCDEFG','abcdefg',111,1.111,'2000-01-01 01:01:01'); -insert into UNION_TABLE_043_2 values('BCDEFGH','bcdefgh',222,2.222,'2010-02-02 02:02:02'); -insert into UNION_TABLE_043_2 values('CDEFGHI','cdefghi',333,3.333,'2000-03-03 03:03:03'); -insert into UNION_TABLE_043_2 values('DEFGHIJ','defghij',444,4.444,'2010-04-04 04:04:04'); -insert into UNION_TABLE_043_2 values('EFGHIJK','efghijk',555,5.555,'2020-05-05 05:05:05'); - -select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_1 union select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_2 order by 1,2,3; - -select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_1 partition (UNION_TABLE_043_1_1) union select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_2 partition (UNION_TABLE_043_2_1) order by 1,2,3; - -drop table UNION_TABLE_043_1; -drop table UNION_TABLE_043_2; - -drop schema FVT_COMPRESS cascade; - - - - - + +-- +---- test partition for (null) +-- + +-- 1. test ordinary + -- 1.1 range partitioned table + -- 1.2 interval partitioned table +-- 2. test data column of partition key value + -- 2.1 text + -- 2.2 timestamp +-- 3. MAXVALUE + -- 3.1 MAXVALUE is first column + -- 3.2 MAXVALUE is second column + +CREATE schema FVT_COMPRESS_QWER; +set search_path to FVT_COMPRESS_QWER; + + +-- 1. test ordinary +---- 1.1 range partitioned table +create table test_partition_for_null_list (a int, b int, c int, d int) +partition by list (a) +( + partition test_partition_for_null_list_p1 values(0), + partition test_partition_for_null_list_p2 values(1,2,3), + partition test_partition_for_null_list_p3 values(4,5,6) +); + +insert into test_partition_for_null_list values (0, 0, 0, 0); +insert into test_partition_for_null_list values (1, 1, 1, 1); +insert into test_partition_for_null_list values (5, 5, 5, 5); + +-- failed: inserted partition key does not map to any table partition +insert into test_partition_for_null_list values (null, null, null, null); +-- success +insert into test_partition_for_null_list values (0, null, null, null); + + +-- failed: The partition number is invalid or out-of-range +select * from test_partition_for_null_list partition for (null) order by 1, 2, 3, 4; +-- success +select * from test_partition_for_null_list partition for (0) order by 1, 2, 3, 4; + + +-- failed: The partition number is invalid or out-of-range +alter table test_partition_for_null_list rename partition for (null) to test_partition_for_null_list_part1; +-- success +alter table test_partition_for_null_list rename partition for (0) to test_partition_for_null_list_part1; +-- success +select * from test_partition_for_null_list partition (test_partition_for_null_list_part1) order by 1, 2, 3, 4; + + +-- failed: The partition number is invalid or out-of-range +alter table test_partition_for_null_list drop partition for (null); +-- success +alter table test_partition_for_null_list drop partition for (0); +-- failed +select * from test_partition_for_null_list partition (test_partition_for_null_list_part1) order by 1, 2, 3, 4; + +CREATE TABLE select_list_partition_table_000_3( + C_CHAR_1 CHAR(1), + C_CHAR_2 CHAR(10), + C_CHAR_3 CHAR(102400), + C_VARCHAR_1 VARCHAR(1), + C_VARCHAR_2 VARCHAR(10), + C_VARCHAR_3 VARCHAR(1024), + C_INT INTEGER, + C_BIGINT BIGINT, + C_SMALLINT SMALLINT, + C_FLOAT FLOAT, + C_NUMERIC numeric(10,5), + C_DP double precision, + C_DATE DATE, + C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, + C_TS_WITH TIMESTAMP WITH TIME ZONE ) + partition by list (C_INT) +( + partition select_list_partition_000_3_1 values (111,222,333,444), + partition select_list_partition_000_3_2 values (555,666,777,888,999,1100,1600) +); + +create index select_list_partition_table_index_000_3 ON select_list_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_list_partition_000_3_1, partition select_list_partition_000_3_3); +create view select_list_partition_table_view_000_3 as select * from select_list_partition_table_000_3; + +INSERT INTO select_list_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); +INSERT INTO select_list_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); +INSERT INTO select_list_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); +INSERT INTO select_list_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); +INSERT INTO select_list_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); +INSERT INTO select_list_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); +INSERT INTO select_list_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); +INSERT INTO select_list_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_list_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_list_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); + +select * from select_list_partition_table_000_3 partition for (NULL) order by C_INT; + +alter table select_list_partition_table_000_3 rename partition for (NULL) to select_list_partition_table_000_3_p1; + +alter table select_list_partition_table_000_3 drop partition for (NULL); + + +CREATE TABLE partition_wise_join_table_001_1 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision,RANK SMALLINT) +partition by list(ID) +( + partition partition_wise_join_table_001_1_1 values (1,42,3,44,5,46,7,48,9), + partition partition_wise_join_table_001_1_2 values (41,2,43,4,45,6,47,8,49) +) ; + +INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 1-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,10000,13 ); +INSERT INTO partition_wise_join_table_001_1 VALUES (generate_series(41,49),'PARTITION WIASE JOIN 1-3-' || generate_series(40,60),90 + random() * 10,'1990-8-8',$$No.88# Science 6 Street of Xi'an of China $$,15000,15 ); + +create index idx_partition_wise_join_table_001_1_1 on partition_wise_join_table_001_1(ID) LOCAL; +create index idx_partition_wise_join_table_001_1_2 on partition_wise_join_table_001_1(ID,NAME) LOCAL; +create index idx_partition_wise_join_table_001_1_3 on partition_wise_join_table_001_1(RANK) LOCAL; +create index idx_partition_wise_join_table_001_1_4 on partition_wise_join_table_001_1(RANK,SALARY,NAME) LOCAL; + +CREATE TABLE partition_wise_join_table_001_2 (ID INT NOT NULL,NAME VARCHAR(50) NOT NULL,SCORE NUMERIC(4,1),BIRTHDAY TIMESTAMP WITHOUT TIME ZONE,ADDRESS TEXT,SALARY double precision ) +partition by list(ID) +( + partition partition_wise_join_table_001_1_1 values (71,2,73,4,75,6,77,8,79), + partition partition_wise_join_table_001_1_2 values (1,72,3,74,5,76,7,78,9) +); + +INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(1,9),'PARTITION WIASE JOIN 2-1-' || generate_series(1,10),90 + random() * 10,'1990-8-8',$$No 66# Science 4 Street of Xi'an of China $$,10000); +INSERT INTO partition_wise_join_table_001_2 VALUES (generate_series(71,79),'PARTITION WIASE JOIN 2-3-' || generate_series(70,80),90 + random() * 10,'1990-8-8',$$No 77# Science 4 Street of Xi'an of China $$,15000); + +CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_1 ON PARTITION_WISE_JOIN_TABLE_001_2(ID) LOCAL; +CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_2 ON PARTITION_WISE_JOIN_TABLE_001_2(ID,NAME) LOCAL; +CREATE INDEX IDX_PARTITION_WISE_JOIN_TABLE_001_2_3 ON PARTITION_WISE_JOIN_TABLE_001_2(SALARY,NAME) LOCAL; + +SELECT A.ID,B.ID, A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; + +ANALYZE PARTITION_WISE_JOIN_TABLE_001_1; +ANALYZE PARTITION_WISE_JOIN_TABLE_001_2; + +SELECT A.ID,B.ID, A.RANK,B.SALARY,A.SALARY,A.ADDRESS,B.BIRTHDAY FROM PARTITION_WISE_JOIN_TABLE_001_1 A,PARTITION_WISE_JOIN_TABLE_001_2 B WHERE A.ID = B.ID AND A.ID < 100 OR A.ID >400 order by 1, 2; + +CREATE TABLE HW_PARTITION_SELECT_RT (A INT, B INT) +PARTITION BY list (A) +( + PARTITION HW_PARTITION_SELECT_RT_P1 VALUES (0), + PARTITION HW_PARTITION_SELECT_RT_P2 VALUES (1,2,3), + PARTITION HW_PARTITION_SELECT_RT_P3 VALUES (4,5,6) +); +EXPLAIN (COSTS OFF) SELECT B FROM (SELECT B FROM HW_PARTITION_SELECT_RT LIMIT 100) ORDER BY B; + +CREATE TABLE TESTTABLE_TEST1(A INT) PARTITION BY LIST (A)(PARTITION TESTTABLE_TEST1_P1 VALUES (1,2,3,4,5,6,7,8,9)); +CREATE TABLE TESTTABLE_TEST2(A INT); +SELECT * FROM TESTTABLE_TEST1 UNION ALL SELECT * FROM TESTTABLE_TEST2 order by 1; + +CREATE TABLE select_partition_table_000_3( + C_CHAR_1 CHAR(1), + C_CHAR_2 CHAR(10), + C_CHAR_3 CHAR(102400), + C_VARCHAR_1 VARCHAR(1), + C_VARCHAR_2 VARCHAR(10), + C_VARCHAR_3 VARCHAR(1024), + C_INT INTEGER, + C_BIGINT BIGINT, + C_SMALLINT SMALLINT, + C_FLOAT FLOAT, + C_NUMERIC numeric(10,5), + C_DP double precision, + C_DATE DATE, + C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE, + C_TS_WITH TIMESTAMP WITH TIME ZONE ) + partition by list (C_INT) +( + partition select_partition_000_3_1 values (111,222,333,444), + partition select_partition_000_3_3 values (555,666,777,888,999,1100,1600) +); + +create index select_partition_table_index_000_3 ON select_partition_table_000_3(C_CHAR_3,C_VARCHAR_3,C_INT,C_TS_WITHOUT) local(partition select_partition_000_3_1, partition select_partition_000_3_3); +create view select_partition_table_view_000_3 as select * from select_partition_table_000_3; + +INSERT INTO select_partition_table_000_3 VALUES('A','ABC','ABCDEFG','a','abc','abcdefg',111,111111,11,1.1,1.11,1.111,'2000-01-01','2000-01-01 01:01:01','2000-01-01 01:01:01+01'); +INSERT INTO select_partition_table_000_3 VALUES('B','BCD','BCDEFGH','b','bcd','bcdefgh',222,222222,22,2.2,2.22,2.222,'2000-02-02','2000-02-02 02:02:02','2000-02-02 02:02:02+02'); +INSERT INTO select_partition_table_000_3 VALUES('C','CDE','CDEFGHI','c','cde','cdefghi',333,333333,33,3.3,3.33,3.333,'2000-03-03','2000-03-03 03:03:03','2000-03-03 03:03:03+03'); +INSERT INTO select_partition_table_000_3 VALUES('D','DEF','DEFGHIJ','d','def','defghij',444,444444,44,4.4,4.44,4.444,'2000-04-04','2000-04-04 04:04:04','2000-04-04 04:04:04+04'); +INSERT INTO select_partition_table_000_3 VALUES('E','EFG','EFGHIJK','e','efg','efghijk',555,555555,55,5.5,5.55,5.555,'2000-05-05','2000-05-05 05:05:05','2000-05-05 05:05:05+05'); +INSERT INTO select_partition_table_000_3 VALUES('F','FGH','FGHIJKL','f','fgh','fghijkl',666,666666,66,6.6,6.66,6.666,'2000-06-06','2000-06-06 06:06:06','2000-06-06 06:06:06+06'); +INSERT INTO select_partition_table_000_3 VALUES('G','GHI','GHIJKLM','g','ghi','ghijklm',777,777777,77,7.7,7.77,7.777,'2000-07-07','2000-07-07 07:07:07','2000-07-07 07:07:07+07'); +INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_partition_table_000_3 VALUES('H','HIJ','HIJKLMN','h','hij','hijklmn',888,888888,88,8.8,8.88,8.888,'2000-08-08','2000-08-08 08:08:08','2000-08-08 08:08:08+08'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',999,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1100,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); +INSERT INTO select_partition_table_000_3 VALUES('I','IJK','IJKLMNO','i','ijk','ijklmno',1600,999999,99,9.9,9.99,9.999,'2000-09-09','2000-09-09 09:09:09','2000-09-09 09:09:09+09'); + +explain (costs off, verbose on) select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; + +select lower(C_CHAR_3), initcap(C_VARCHAR_3), sqrt(C_INT), C_NUMERIC- 1 + 2*6/3, rank() over w from select_partition_table_000_3 where C_INT > 600 or C_BIGINT < 444444 window w as (partition by C_TS_WITHOUT) order by 1,2,3,4,5; + +create table hw_partition_select_rt5 (a int, b int, c int) +partition by list(c) +( +partition hw_partition_select_rt5_p1 values (0,1) +); + +alter table hw_partition_select_rt5 drop column b; + +update hw_partition_select_rt5 set c=0 where c=-1; + +drop schema FVT_COMPRESS_QWER cascade; + +--begin: these test are related to explain output change about partition table. +-- major change is as below + --1. + --Selected Partitions: 1 2 6 7 8 9 + -- \|/ + --Selected Partitions: 1..2,6..9 + --2. + --Selected Partitions: 1 3 5 7 9 + -- \|/ + --Selected Partitions: 1,3,5,7,9 +CREATE schema FVT_COMPRESS; +set search_path to FVT_COMPRESS; + + +create table test_explain_format_on_part_table (id int) +partition by list(id) +( +partition p1 values (1,2,3,4,5,6,7,8,9), +partition p2 values (11,12,13,14,15,16,17,18,19), +partition p3 values (21,22,23,24,25,26,27,28,29), +partition p4 values (31,32,33,34,35,36,37,38,39), +partition p5 values (41,42,43,44,45,46,47,48,49), +partition p6 values (51,52,53,54,55,56,57,58,59), +partition p7 values (61,62,63,64,65,66,67,68,69), +partition p8 values (71,72,73,74,75,76,77,78,79), +partition p9 values (81,82,83,84,85,86,87,88,89) +); +-- two continous segments, text formast +explain (verbose on, costs off) + select * from test_explain_format_on_part_table where id <15 or id >51; +-- no continous segment, text formast +explain (verbose on, costs off) + select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; +-- two continous segments, non-text formast +explain (verbose on, costs off, FORMAT JSON) + select * from test_explain_format_on_part_table where id <15 or id >51; +-- no continous segment, non-text formast +explain (verbose on, costs off, FORMAT JSON) + select * from test_explain_format_on_part_table where id =5 or id =25 or id=45 or id = 65 or id = 85; + +drop table test_explain_format_on_part_table; +--end: these test are related to explain output change about partition table. + +create table hw_partition_select_parttable ( + c1 int, + c2 int, + c3 text) +partition by list(c1) +(partition hw_partition_select_parttable_p1 values (10,20,30,40), + partition hw_partition_select_parttable_p2 values (50,60,70,80,90,100,110,120,130,140), + partition hw_partition_select_parttable_p3 values (150,200,250,300,350)); + + insert into hw_partition_select_parttable values (10,40,'abc'); + insert into hw_partition_select_parttable(c1,c2) values (100,20); + insert into hw_partition_select_parttable values(300,200); + +select * from hw_partition_select_parttable order by 1, 2, 3; + +select c1 from hw_partition_select_parttable order by 1; + +select c1,c2 from hw_partition_select_parttable order by 1, 2; + +select c2 from hw_partition_select_parttable order by 1; + +select c1,c2,c3 from hw_partition_select_parttable order by 1, 2, 3; + +select c1 from hw_partition_select_parttable where c1>50 and c1<300 order by 1; + +select * from hw_partition_select_parttable where c2>100 order by 1, 2, 3; + +create table t_select_datatype_int32(c1 int,c2 int,c3 int,c4 text) +partition by list(c1) +(partition t_select_datatype_int32_p1 values(-100, -50, 0, 50), + partition t_select_datatype_int32_p2 values(100, 150, 200, 250), + partition t_select_datatype_int32_p3 values(300, 350), + partition t_select_datatype_int32_p4 values(400, 450, 500)); + +insert into t_select_datatype_int32 values(-100,20,20,'a'), (100,300,300,'bb'), (150,75,500,NULL), (200,500,50,'ccc'), (250,50,50,NULL), (300,700,125,''), (450,35,150,'dddd'); + +--partition select for int32 +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1=550 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<=700 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 AND t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<550 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1<=500 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<500 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 AND t_select_datatype_int32.c1>100 AND t_select_datatype_int32.c1>=100 AND t_select_datatype_int32.c1<250 AND t_select_datatype_int32.c1<=250 AND t_select_datatype_int32.c1=200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>0 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>50 OR t_select_datatype_int32.c1>=150 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1>=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1=100 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<200 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>100 OR t_select_datatype_int32.c1<=300 OR t_select_datatype_int32.c1>=100 OR t_select_datatype_int32.c1<300 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>250 OR t_select_datatype_int32.c1<50 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<170 AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where (t_select_datatype_int32.c1<170 OR t_select_datatype_int32.c1<250) AND ( t_select_datatype_int32.c1>600 OR t_select_datatype_int32.c1<150) order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>250 AND t_select_datatype_int32.c1<400 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<50 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where t_select_datatype_int32.c1>=-100 AND t_select_datatype_int32.c1<=100 OR t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1<700 order by 1, 2, 3, 4; + +--IS NULL +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) OR + t_select_datatype_int32.c4 IS NULL + ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) AND + t_select_datatype_int32.c4 IS NULL + ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + t_select_datatype_int32.c4 IS NULL AND + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) + ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + t_select_datatype_int32.c4 IS NULL OR + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) + ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c4 IS NULL) AND + (t_select_datatype_int32.c2100) + ORDER BY 1, 2, 3, 4; + +--success +select * from t_select_datatype_int32 where + (t_select_datatype_int32.c1>500 OR t_select_datatype_int32.c1<250) AND + (t_select_datatype_int32.c1>300 AND t_select_datatype_int32.c1100) + ORDER BY 1, 2, 3, 4; + +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +-- check select contarins partition + +-- +---- check select from range partition +-- + +create table hw_partition_select_ordinary_table (a int, b int); + +create table test_select_list_partition (a int, b int) +partition by list(a) +( + partition test_select_list_partition_p1 values (0), + partition test_select_list_partition_p2 values (1,2,3), + partition test_select_list_partition_p3 values (4,5,6) +); + +insert into test_select_list_partition values(2); + +--success +select * from test_select_list_partition partition (test_select_list_partition_p1) order by 1, 2; + +--success +select * from test_select_list_partition partition (test_select_list_partition_p2) order by 1, 2; + +--success +select * from test_select_list_partition partition (test_select_list_partition_p3) order by 1, 2; + +--success +select * from test_select_list_partition partition (test_select_list_partition_p4) order by 1, 2; + +--success +select a from test_select_list_partition partition (test_select_list_partition_p2) order by 1; + +--success +select a from test_select_list_partition partition for (0) order by 1; + +--success +select a from test_select_list_partition partition for (1) order by 1; + +--success +select a from test_select_list_partition partition for (2) order by 1; + +--success +select a from test_select_list_partition partition for (5) order by 1; + +--success +select a from test_select_list_partition partition for (8) order by 1; + +-- fail: table is not partitioned table +select a from hw_partition_select_ordinary_table partition (test_select_list_partition_p2); + +-- fail: table is not partitioned table +select a from hw_partition_select_ordinary_table partition for (2); + +-- +-- +CREATE TABLE hw_partition_select_test(C_INT INTEGER) + partition by list (C_INT) +( + partition hw_partition_select_test_part_1 values (111,222,333), + partition hw_partition_select_test_part_2 values (444,555,666), + partition hw_partition_select_test_part_3 values (777,888,999) +); +insert into hw_partition_select_test values(111); +insert into hw_partition_select_test values(555); +insert into hw_partition_select_test values(888); + +select a.* from hw_partition_select_test partition(hw_partition_select_test_part_1) a; + +create table list_partitioned_table (a int) +partition by list(a) +( + partition list_partitioned_table_p1 values (0), + partition list_partitioned_table_p2 values (1,2,3), + partition list_partitioned_table_p3 values (4,5,6) +); + +insert into list_partitioned_table values (1); +insert into list_partitioned_table values (2); +insert into list_partitioned_table values (5); +insert into list_partitioned_table values (6); + +with tmp1 as (select a from list_partitioned_table partition for (2)) select a from tmp1 order by 1; + +-- +---- select union select +-- +create table UNION_TABLE_043_1(C_CHAR CHAR(103500), C_VARCHAR VARCHAR(1035), C_INT INTEGER not null, C_DP double precision, C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE) +partition by list (C_INT) +( + partition UNION_TABLE_043_1_1 values (111,222,333), + partition UNION_TABLE_043_1_2 values (444,555) +); +insert into UNION_TABLE_043_1 values('ABCDEFG','abcdefg',111,1.111,'2000-01-01 01:01:01'); +insert into UNION_TABLE_043_1 values('BCDEFGH','bcdefgh',222,2.222,'2000-02-02 02:02:02'); +insert into UNION_TABLE_043_1 values('CDEFGHI','cdefghi',333,3.333,'2000-03-03 03:03:03'); +insert into UNION_TABLE_043_1 values('DEFGHIJ','defghij',444,4.444,'2000-04-04 04:04:04'); +insert into UNION_TABLE_043_1 values('EFGHIJK','efghijk',555,5.555,'2000-05-05 05:05:05'); + + +create table UNION_TABLE_043_2(C_CHAR CHAR(103500), C_VARCHAR VARCHAR(1035), C_INT INTEGER not null, C_DP double precision, C_TS_WITHOUT TIMESTAMP WITHOUT TIME ZONE) +partition by list (C_INT) +( + partition UNION_TABLE_043_2_1 values (111,222,333), + partition UNION_TABLE_043_2_2 values (444,555) +); +insert into UNION_TABLE_043_2 values('ABCDEFG','abcdefg',111,1.111,'2000-01-01 01:01:01'); +insert into UNION_TABLE_043_2 values('BCDEFGH','bcdefgh',222,2.222,'2010-02-02 02:02:02'); +insert into UNION_TABLE_043_2 values('CDEFGHI','cdefghi',333,3.333,'2000-03-03 03:03:03'); +insert into UNION_TABLE_043_2 values('DEFGHIJ','defghij',444,4.444,'2010-04-04 04:04:04'); +insert into UNION_TABLE_043_2 values('EFGHIJK','efghijk',555,5.555,'2020-05-05 05:05:05'); + +select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_1 union select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_2 order by 1,2,3; + +select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_1 partition (UNION_TABLE_043_1_1) union select C_INT,C_DP,C_TS_WITHOUT from UNION_TABLE_043_2 partition (UNION_TABLE_043_2_1) order by 1,2,3; + +drop table UNION_TABLE_043_1; +drop table UNION_TABLE_043_2; + +drop schema FVT_COMPRESS cascade; + + + + + diff --git a/src/test/regress/sql/hw_partition_pruning_2.sql b/src/test/regress/sql/hw_partition_pruning_2.sql index c0c277a5b..1393fc7f2 100644 --- a/src/test/regress/sql/hw_partition_pruning_2.sql +++ b/src/test/regress/sql/hw_partition_pruning_2.sql @@ -237,7 +237,7 @@ SELECT * FROM pruning_partition_table_000 WHERE C_INT>10; drop table pruning_partition_table_000; -create table t_pruning_DTS2013091303739_1(c1 int,c2 text) +create table t_pruning_TESTTABLE_1(c1 int,c2 text) partition by range(c1) ( partition p1 values less than(100), @@ -246,23 +246,23 @@ partition by range(c1) ); explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_1 where c1 IS NULL; +select * from t_pruning_TESTTABLE_1 where c1 IS NULL; explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_1 where c1 IS NOT NULL; +select * from t_pruning_TESTTABLE_1 where c1 IS NOT NULL; explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_1 where c1=null; +select * from t_pruning_TESTTABLE_1 where c1=null; explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_1 where c2=null; +select * from t_pruning_TESTTABLE_1 where c2=null; explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_1 where c2 IS NULL; +select * from t_pruning_TESTTABLE_1 where c2 IS NULL; -drop table t_pruning_DTS2013091303739_1; +drop table t_pruning_TESTTABLE_1; -create table t_pruning_DTS2013091303739_2(c1 int,c2 text) +create table t_pruning_TESTTABLE_2(c1 int,c2 text) partition by range(c1) ( partition p1 values less than(100), @@ -272,30 +272,30 @@ partition by range(c1) ); explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where c1 IS NULL; +select * from t_pruning_TESTTABLE_2 where c1 IS NULL; explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where c1 IS NOT NULL; +select * from t_pruning_TESTTABLE_2 where c1 IS NOT NULL; explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where c1=null; +select * from t_pruning_TESTTABLE_2 where c1=null; explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where null=c1; +select * from t_pruning_TESTTABLE_2 where null=c1; explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where c2=null; +select * from t_pruning_TESTTABLE_2 where c2=null; explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where c2 IS NULL; +select * from t_pruning_TESTTABLE_2 where c2 IS NULL; explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where c1 IS NULL and c1>150; +select * from t_pruning_TESTTABLE_2 where c1 IS NULL and c1>150; explain (ANALYZE false,VERBOSE false, COSTS false,BUFFERS false,TIMING false) -select * from t_pruning_DTS2013091303739_2 where c1 IS NULL OR c1<150; +select * from t_pruning_TESTTABLE_2 where c1 IS NULL OR c1<150; -drop table t_pruning_DTS2013091303739_2; +drop table t_pruning_TESTTABLE_2; diff --git a/src/test/regress/sql/hw_partition_pruning_multikey_2.sql b/src/test/regress/sql/hw_partition_pruning_multikey_2.sql index 2f37eac32..684fa6ec8 100644 --- a/src/test/regress/sql/hw_partition_pruning_multikey_2.sql +++ b/src/test/regress/sql/hw_partition_pruning_multikey_2.sql @@ -14,57 +14,57 @@ drop table range_table_LLT; -create table rt_DTS2014042814100 (a int, b int) +create table rt_TESTTABLE (a int, b int) partition by range(a, b) ( -partition rt_DTS2014042814100_p1 values less than(9, 3), -partition rt_DTS2014042814100_p2 values less than(10, 1) +partition rt_TESTTABLE_p1 values less than(9, 3), +partition rt_TESTTABLE_p2 values less than(10, 1) ); -insert into rt_DTS2014042814100 values (9, 4); +insert into rt_TESTTABLE values (9, 4); -select * from rt_DTS2014042814100 where b=4; +select * from rt_TESTTABLE where b=4; -drop table rt_DTS2014042814100; +drop table rt_TESTTABLE; -create table rt_DTS2014042814100 (a int, b int) +create table rt_TESTTABLE (a int, b int) partition by range(a, b) ( -partition rt_DTS2014042814100_p1 values less than(9, 1), -partition rt_DTS2014042814100_p2 values less than(10, 3) +partition rt_TESTTABLE_p1 values less than(9, 1), +partition rt_TESTTABLE_p2 values less than(10, 3) ); -insert into rt_DTS2014042814100 values (9, 4); +insert into rt_TESTTABLE values (9, 4); -select * from rt_DTS2014042814100 where b=4; +select * from rt_TESTTABLE where b=4; -drop table rt_DTS2014042814100; +drop table rt_TESTTABLE; -create table rt_DTS2014042814100 (a int, b int, c int) +create table rt_TESTTABLE (a int, b int, c int) partition by range(a, b, c) ( -partition rt_DTS2014042814100_p1 values less than(2, 9, 3), -partition rt_DTS2014042814100_p2 values less than(2, 10, 1) +partition rt_TESTTABLE_p1 values less than(2, 9, 3), +partition rt_TESTTABLE_p2 values less than(2, 10, 1) ); -insert into rt_DTS2014042814100 values (2, 9, 4); +insert into rt_TESTTABLE values (2, 9, 4); -select * from rt_DTS2014042814100 where c=4; +select * from rt_TESTTABLE where c=4; -drop table rt_DTS2014042814100; +drop table rt_TESTTABLE; -create table rt_DTS2014042814100 (a int, b int, c int) +create table rt_TESTTABLE (a int, b int, c int) partition by range(a, b, c) ( -partition rt_DTS2014042814100_p1 values less than(2, 9, 1), -partition rt_DTS2014042814100_p2 values less than(2, 10, 3) +partition rt_TESTTABLE_p1 values less than(2, 9, 1), +partition rt_TESTTABLE_p2 values less than(2, 10, 3) ); -insert into rt_DTS2014042814100 values (2, 9, 4); +insert into rt_TESTTABLE values (2, 9, 4); -select * from rt_DTS2014042814100 where c=4; +select * from rt_TESTTABLE where c=4; -drop table rt_DTS2014042814100; \ No newline at end of file +drop table rt_TESTTABLE; \ No newline at end of file diff --git a/src/test/regress/sql/hw_partition_select0.sql b/src/test/regress/sql/hw_partition_select0.sql index af48ec842..2b6c59380 100644 --- a/src/test/regress/sql/hw_partition_select0.sql +++ b/src/test/regress/sql/hw_partition_select0.sql @@ -500,9 +500,9 @@ EXPLAIN (COSTS OFF, NODES OFF) SELECT B FROM (SELECT B FROM HW_PARTITION_SELECT_ --EXPLAIN(COSTS OFF, NODES OFF) SELECT * FROM HW_PARTITION_SELECT_PTEST WHERE A = 500 OR A = 3000; --BITMAPSCAN --EXPLAIN(COSTS OFF, NODES OFF) SELECT A FROM HW_PARTITION_SELECT_PTEST WHERE A > 5000; -- INDEXONLYSCAN -CREATE TABLE DTS2013112504143_TEST1(A INT) PARTITION BY RANGE (A)(PARTITION DTS2013112504143_TEST1_P1 VALUES LESS THAN (10)); -CREATE TABLE DTS2013112504143_TEST2(A INT); -SELECT * FROM DTS2013112504143_TEST1 UNION ALL SELECT * FROM DTS2013112504143_TEST2 order by 1; +CREATE TABLE TESTTABLE_TEST1(A INT) PARTITION BY RANGE (A)(PARTITION TESTTABLE_TEST1_P1 VALUES LESS THAN (10)); +CREATE TABLE TESTTABLE_TEST2(A INT); +SELECT * FROM TESTTABLE_TEST1 UNION ALL SELECT * FROM TESTTABLE_TEST2 order by 1; create table hw_partition_select_rangetab( C_CHAR_3 CHAR(102400), C_INT INTEGER) diff --git a/src/test/regress/sql/hw_pbe.sql b/src/test/regress/sql/hw_pbe.sql index 58a9743c8..1ebd62f5f 100644 --- a/src/test/regress/sql/hw_pbe.sql +++ b/src/test/regress/sql/hw_pbe.sql @@ -640,9 +640,9 @@ drop table pbe_prunning_tmp; drop table pbe_prunning_000; SET enable_pbe_optimization to false; -create table t_DTS2017082304009 (id int,name text); -insert into t_DTS2017082304009 values (1,'a'),(2,'b'),(3,'c'), (4,'d'),(5,'e'); -prepare a as select * from t_DTS2017082304009 where id=$1; +create table t_TESTTABLE (id int,name text); +insert into t_TESTTABLE values (1,'a'),(2,'b'),(3,'c'), (4,'d'),(5,'e'); +prepare a as select * from t_TESTTABLE where id=$1; execute a (1); execute a (2); execute a (3); @@ -650,7 +650,7 @@ execute a (4); execute a (5); execute a (1); deallocate a; -drop table t_DTS2017082304009; +drop table t_TESTTABLE; -- Bugfix for FQS multi-table join create table t1(id1 int, id2 int, num int); @@ -770,11 +770,11 @@ drop table t1_xc_fqs; drop table t2_xc_fqs; set enable_pbe_optimization to true; -create table DTS2018081308755_t1 (id int, num int); -insert into DTS2018081308755_t1 values (1,1),(20,20); -explain analyze create table DTS2018081308755_t2 as select * from DTS2018081308755_t1; -drop table DTS2018081308755_t2; -drop table DTS2018081308755_t1; +create table TESTTABLE_t1 (id int, num int); +insert into TESTTABLE_t1 values (1,1),(20,20); +explain analyze create table TESTTABLE_t2 as select * from TESTTABLE_t1; +drop table TESTTABLE_t2; +drop table TESTTABLE_t1; create table cs_his(segment1 varchar(15), period_name varchar(15), currency_code varchar(15), frequency_code varchar(15), segment3 varchar(15), segment3_desc varchar(200), end_balance_dr numeric, end_balance_cr numeric) with (orientation=column); create table bs_his(row_desc varchar(800),row_id numeric,amount1 numeric, amount2 numeric, period_name varchar(80), frequency_code varchar(80), currency_code varchar(80), segment1 varchar(80)) with (orientation=column); diff --git a/src/test/regress/sql/hw_pct_type_and_rowtype.sql b/src/test/regress/sql/hw_pct_type_and_rowtype.sql new file mode 100644 index 000000000..c2bbfabed --- /dev/null +++ b/src/test/regress/sql/hw_pct_type_and_rowtype.sql @@ -0,0 +1,782 @@ +--For Composite Type with %TYPE and %ROWTYPE + +-- check compatibility -- +show sql_compatibility; -- expect ORA -- +-- create new schema -- +drop schema if exists sql_compositetype; +create schema sql_compositetype; +set current_schema = sql_compositetype; + +-- initialize table and type-- +create type ctype1 as (a int, b int); +create table foo(a int, b ctype1); + +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- + +--general create type with %TYPE and %ROWTYPE +create type ctype2 as (a foo.a%TYPE, b foo.b%TYPE); +create type ctype3 as (a foo%ROWTYPE); + +--create type with schema of table +create type ctype4 as (a sql_compositetype.foo.a%TYPE, b sql_compositetype.foo.b%TYPE); +create type ctype5 as (a sql_compositetype.foo%ROWTYPE); + +--create type with database and schema of table +create type ctype6 as (a regression.sql_compositetype.foo.a%TYPE, b regression.sql_compositetype.foo.b%TYPE); +create type ctype7 as (a regression.sql_compositetype.foo%ROWTYPE); + +--ERROR: %TYPE with table is not allowed +create type ctype8 as (a foo%TYPE); +create type ctype9 as (a sql_compositetype.foo%TYPE); +create type ctype10 as (a regression.sql_compositetype.foo%TYPE); + +--ERROR: %ROWTYPE with attribute is not allowed +create type ctype11 as (a foo.a%ROWTYPE, b foo.b%ROWTYPE); +create type ctype12 as (a sql_compositetype.foo.a%ROWTYPE, b sql_compositetype.foo.b%ROWTYPE); +create type ctype13 as (a regression.sql_compositetype.foo.a%ROWTYPE, b regression.sql_compositetype.foo.b%ROWTYPE); + +--ERROR: %TYPE and %ROWTYPE with type is not allowed +create type ctype14 as (a ctype1%TYPE); +create type ctype15 as (a ctype1%ROWTYPE); + +--ERROR: %ROWTYPE with incorrect database or schema is not allowed +create type ctype16 as (a postgres.sql_compositetype.foo%ROWTYPE, b postgres.sql_compositetype.foo%ROWTYPE); +create type ctype16 as (a regression.sql.foo%ROWTYPE, b regression.sql.foo%ROWTYPE); +create type ctype16 as (a sql.foo%ROWTYPE, b sql.foo%ROWTYPE); + +--ERROR: %ROWTYPE with more than 4 dots is not allowed +create type ctype16 as (a regression.sql_compositetype.foo.a%ROWTYPE, b regression.sql_compositetype.foo.b%ROWTYPE); +create type ctype16 as (a postgres.regression.sql_compositetype.foo.a%ROWTYPE, b postgres.regression.sql_compositetype.foo.b%ROWTYPE); + +--test select stmt for %TYPE and %ROWTYPE +create table t1(a int , b int); +create table t2(a int, b t1); +insert into t2 values(1,(2,3)); +drop function if exists get_t2; +create or replace function get_t2() RETURNS record as $$ +declare + v_a record; +begin + select * into v_a from t2; + return v_a; +end; +$$ language plpgsql; +select t.b from get_t2() as t (a t2.a%TYPE, b t1%ROWtype); + +--test update stmt for %TYPE and %ROWTYPE +update t2 SET a = t.a + 1 from get_t2() as t (a t2.a%TYPE, b t1%ROWtype); +select * from t2; + +--test alter type for %TYPE and %ROWTYPE +ALTER TYPE ctype2 ADD ATTRIBUTE c foo.a%TYPE; +ALTER TYPE ctype2 ADD ATTRIBUTE d foo%ROWTYPE; + +--test drop function for %TYPE and %ROWTYPE +create or replace function get_int(i int) RETURNS int as $$ +begin + return i.a; +end; +$$ language plpgsql; +drop function get_int(i foo.b%TYPE); --should fail +drop function get_int(i foo%ROWTYPE); --should fail +drop function get_int(foo.a%TYPE); --should success + +create or replace function get_int(i foo%ROWTYPE) RETURNS int as $$ +begin + return i.a; +end; +$$ language plpgsql; +drop function get_int(i foo.b%TYPE); --should fail +drop function get_int(i t2%ROWTYPE); --should fail +drop function get_int(i foo%ROWTYPE); --should success + +--test typmod whether reversed by %TYPE and %ROWTYPE +create table v1(a int, b varchar(2)); +create type ctype17 as (a v1.b%TYPE[],b v1%ROWTYPE); +create table v2(a int, b ctype17); +insert into v2 values(1, (array['aa','bb'], (2,'cc'))); +select * from v2; +insert into v2 values(1, (array['aaa','bb'], (2,'cc'))); +insert into v2 values(1, (array['aa','bb'], (2,'ccc'))); + +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- + +drop table if exists v2; +drop type if exists ctype17; +drop table if exists v1; +drop function if exists get_t2; +drop table if exists t2; +drop table if exists t1; +drop type if exists ctype16; +drop type if exists ctype15; +drop type if exists ctype14; +drop type if exists ctype13; +drop type if exists ctype12; +drop type if exists ctype11; +drop type if exists ctype10; +drop type if exists ctype9; +drop type if exists ctype8; +drop type if exists ctype7; +drop type if exists ctype6; +drop type if exists ctype5; +drop type if exists ctype4; +drop type if exists ctype3; +drop type if exists ctype2; +drop table if exists foo; +drop type if exists ctype1; + +-- clean up -- +drop schema if exists sql_compositetype cascade; + + + + + + + + + + + + + + + +--For Function Return Type with %TYPE, %ROWTYPE and [] + +-- check compatibility -- +show sql_compatibility; -- expect ORA -- + +-- create new schema -- +drop schema if exists sql_functionreturn; +create schema sql_functionreturn; +set current_schema = sql_functionreturn; + +-- initialize table and type-- +create type ctype1 as (a int, b int); +create table foo(a int, b ctype1); + +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- + +--test function return type for %TYPE with composite type +create or replace function get_ctype1 RETURNS foo.b%TYPE as $$ +declare + v_a ctype1; + begin + v_a := (1,2); + return v_a; +end; +$$ language plpgsql; +select get_ctype1(); + +--test function return type for %TYPE with simple type +create or replace function get_int RETURNS foo.a%TYPE as $$ +declare + v_a int; +begin + v_a := 1; + return v_a; +end; +$$ language plpgsql; +select get_int(); + +--test function return type for %TYPE[] with simple type +create or replace function get_intarray RETURNS foo.a%TYPE[] as $$ +declare + type arr is VARRAY(10) of int; + v_a arr := arr(); +begin + v_a.extend(1); + v_a(1) := 1; + v_a.extend(1); + v_a(2) := 2; + return v_a; +end; +$$ language plpgsql; +select get_intarray(); + +--test function return type for %TYPE[] with composite type +create or replace function get_ctype1array RETURNS foo.b%TYPE[] as $$ +declare + type arr is VARRAY(10) of ctype1; + v_a arr := arr(); +begin + v_a.extend(1); + v_a(1) := (1,2); + v_a.extend(1); + v_a(2) := (3,4); + return v_a; +end; +$$ language plpgsql; +select get_ctype1array(); + +--test function return type for %ROWTYPE +create or replace function get_foo RETURNS foo%ROWTYPE as $$ +declare + v_a foo; +begin + v_a := (1,(2,3)); +return v_a; +end; +$$ language plpgsql; +select get_foo(); + +--test function return type for %ROWTYPE[] +create or replace function get_fooarray RETURNS foo%ROWTYPE[] as $$ +declare + type arr is VARRAY(10) of foo; + v_a arr := arr(); +begin + v_a.extend(1); + v_a(1) := (1,(2,3)); + v_a.extend(1); + v_a(2) := (4,(5,6)); + return v_a; +end; +$$ language plpgsql; +select get_fooarray(); + +--test function return type for SETOF %TYPE[] with simple type +create or replace function get_set_intarray RETURNS SETOF foo.a%TYPE[] as $$ +declare + type arr is VARRAY(10) of int; + v_a arr := arr(); +begin + v_a.extend(1); + v_a(1) := 1; + RETURN NEXT v_a; + v_a.extend(1); + v_a(2) := 2; + RETURN NEXT v_a; + return; +end; +$$ language plpgsql; +select get_set_intarray(); + +--test %TYPE for variable +create or replace function f1(ss in int) return int as + va foo%ROWTYPE; + vb va.b%TYPE; + vc va.a%TYPE; +begin + va := (1, (2, 3)); + vb := (4, 5); + vc := 6; + raise info '% % %',va , vb, vc; + vb.a := vc; + va.b := vb; + va.a := vc; + raise info '% % %',va , vb, vc; + return va.a; +end; +/ +select f1(1); + +--ERROR: test %TYPE for variable, not existed field +create or replace function f1(ss in int) return int as + va foo%ROWTYPE; + vb va.b%TYPE; + vc va.c%TYPE; +begin + va := (1, (2, 3)); + vb := (4, 5); + vc := 6; + raise info '% % %',va , vb, vc; + vb.a := vc; + va.b := vb; + va.a := vc; + raise info '% % %',va , vb, vc; + return va.a; +end; +/ +DROP function f1(); + +--test synonym type +DROP SCHEMA IF EXISTS sql_compositetype_test; +CREATE SCHEMA sql_compositetype_test; + +CREATE TABLE sql_compositetype_test.tabfoo(a int, b int); +CREATE TYPE sql_compositetype_test.compfoo AS (f1 int, f2 text); + +CREATE OR REPLACE SYNONYM tabfoo for sql_compositetype_test.tabfoo; +CREATE OR REPLACE SYNONYM compfoo for sql_compositetype_test.compfoo; + +create table t1 (a int, b compfoo); +CREATE OR REPLACE PROCEDURE pro_test_tab (in_tabfoo tabfoo%rowtype) +AS +BEGIN +END; +/ + +--防止权限问题,将用户建成sysadmin +create user synonym_user_1 password 'hauwei@123' sysadmin; +create user synonym_user_2 password 'hauwei@123' sysadmin; +--授予public权限 +grant all privileges on schema sql_functionreturn to synonym_user_1; +grant all privileges on schema sql_functionreturn to synonym_user_2; +--创建测试表 +drop table if exists synonym_user_1.tb_test; +create table synonym_user_1.tb_test(col1 int,col2 int); +--创建同义词 +create or replace synonym sql_functionreturn.tb_test for synonym_user_1.tb_test; +create or replace synonym synonym_user_2.tb_test for synonym_user_1.tb_test; +--创建测试package +create or replace package synonym_user_2.pckg_test as +v_a tb_test.col1%type; +v_b tb_test%rowtype; +procedure proc_test(i_col1 in tb_test.col1%type,o_ret out tb_test.col1%type); +function func_test(i_col1 in int) return tb_test.col1%type; +function func_test1(i_col1 in int) return tb_test%rowtype; +end pckg_test; +/ +create or replace package body synonym_user_2.pckg_test as +procedure proc_test(i_col1 in tb_test.col1%type,o_ret out tb_test.col1%type)as +begin +select col1 into o_ret from tb_test where col1=i_col1; +end; +function func_test(i_col1 in int) return tb_test.col1%type as +begin +select col1 into v_a from tb_test where col1=i_col1; +return v_a; +end; +function func_test1(i_col1 in int) return tb_test%rowtype as +begin +for rec in (select col1,col2 from tb_test where col1=i_col1) loop +v_b.col1:=rec.col1; +v_b.col2:=rec.col2; +end loop; +return v_b; +end; +end pckg_test; +/ +DROP PACKAGE synonym_user_2.pckg_test; +DROP USER synonym_user_2 CASCADE; +DROP USER synonym_user_1 CASCADE; + +--test public synonym in PLpgSQL +set behavior_compat_options= 'bind_procedure_searchpath'; +--防止权限问题,将用户建成sysadmin +create user synonym_user_1 password 'Gauss_234' sysadmin; +create user synonym_user_2 password 'Gauss_234' sysadmin; +--授予public权限 +grant all privileges on schema public to synonym_user_1; +grant all privileges on schema public to synonym_user_2; +--创建测试表 +drop table if exists synonym_user_1.tb_test; +create table synonym_user_1.tb_test(col1 int,col2 int); +--创建同义词 +create or replace synonym public.tb_test for synonym_user_1.tb_test; +create or replace package synonym_user_2.pckg_test as +v_a tb_test.col1%type; +v_b tb_test%rowtype; +procedure proc_test(i_col1 in tb_test.col1%type,o_ret out tb_test.col1%type); +function func_test(i_col1 in int) return tb_test.col1%type; +function func_test1(i_col1 in int) return tb_test%rowtype; +end pckg_test; +/ +create or replace package body synonym_user_2.pckg_test as +procedure proc_test(i_col1 in tb_test.col1%type,o_ret out tb_test.col1%type)as +begin +select col1 into o_ret from tb_test where col1=i_col1; +end; +function func_test(i_col1 in int) return tb_test.col1%type as +begin +select col1 into v_a from tb_test where col1=i_col1; +return v_a; +end; +function func_test1(i_col1 in int) return tb_test%rowtype as +begin +for rec in (select col1,col2 from tb_test where col1=i_col1) loop +v_b.col1:=rec.col1; +v_b.col2:=rec.col2; +end loop; +return v_b; +end; +end pckg_test; +/ +DROP PACKAGE synonym_user_2.pckg_test; +DROP USER synonym_user_2 CASCADE; +DROP USER synonym_user_1 CASCADE; +reset behavior_compat_options; + +--test 同义词引用存储过程和package +set behavior_compat_options= 'bind_procedure_searchpath'; +--防止权限问题,将用户建成sysadmin +create user synonym_user_1 password 'Gauss_234' sysadmin; +create user synonym_user_2 password 'Gauss_234' sysadmin; +--授予public权限 +grant all privileges on schema public to synonym_user_1; +grant all privileges on schema public to synonym_user_2; +set current_schema = public; +--创建测试存储过程 +create or replace procedure synonym_user_1.proc_test()as +begin +raise info 'test procedure'; +end; +/ +--创建测试package +create or replace package synonym_user_1.pckg_test1 as +procedure proc_test2(); +end pckg_test1; +/ +create or replace package body synonym_user_1.pckg_test1 as +procedure proc_test2()as +begin +raise info 'test package procedure'; +end; +end pckg_test1; +/ +--创建同义词 +create or replace synonym public.proc_test for synonym_user_1.proc_test; +create or replace synonym public.pckg_test1 for synonym_user_1.pckg_test1; +show search_path; +--创建测试package +create or replace package synonym_user_2.pckg_test as +procedure proc_test1(); +end pckg_test; +/ +create or replace package body synonym_user_2.pckg_test as +procedure proc_test1()as + +begin +proc_test(); +pckg_test1.proc_test2(); +end; +end pckg_test; +/ + +call synonym_user_2.pckg_test.proc_test1(); +DROP PACKAGE synonym_user_2.pckg_test; +DROP PACKAGE synonym_user_1.pckg_test1; +DROP PROCEDURE synonym_user_1.proc_test(); +DROP USER synonym_user_2 CASCADE; +DROP USER synonym_user_1 CASCADE; +reset behavior_compat_options; + +--test Package return record.col%TYPE +create schema synonym_schema1; +create schema synonym_schema2; + +set search_path = synonym_schema1; + +--测试packgae内部引用 +create or replace package p_test1 as + type t1 is record(c1 varchar2, c2 int); + function f1(ss in t1) return t1.c2%TYPE; +end p_test1; +/ +create or replace package body p_test1 as + function f1(ss in t1) return t1.c2%TYPE as + begin + return ss.c2; + end; +end p_test1; +/ + +select p_test1.f1(('aa',5)); + +--测试跨packgae引用 +create or replace package p_test2 as + function ff1(ss in p_test1.t1) return p_test1.t1.c2%TYPE; + va p_test1.t1.c2%TYPE; +end p_test2; +/ +create or replace package body p_test2 as + vb p_test1.t1.c2%TYPE; + function ff1(ss in p_test1.t1) return p_test1.t1.c2%TYPE as + begin + return ss.c2; + end; +end p_test2; +/ + +select p_test2.ff1(('aa',55)); + +--测试跨schema packgae引用 +set search_path = synonym_schema2; + +create or replace package p_test2 as + function fff1(ss in synonym_schema1.p_test1.t1) return synonym_schema1.p_test1.t1.c2%TYPE; +end p_test2; +/ +create or replace package body p_test2 as + function fff1(ss in synonym_schema1.p_test1.t1) return synonym_schema1.p_test1.t1.c2%TYPE as + begin + return ss.c2; + end; +end p_test2; +/ + +select p_test2.fff1(('aa',555)); + +DROP PACKAGE p_test2; +DROP PACKAGE synonym_schema1.p_test2; +DROP PACKAGE synonym_schema1.p_test1; + +DROP SCHEMA synonym_schema2 CASCADE; +DROP SCHEMA synonym_schema1 CASCADE; + +set current_schema = sql_functionreturn; + +--test pkg.val%TYPE +create schema synonym_schema1; +create schema synonym_schema2; + +set search_path = synonym_schema1; + +create or replace package pck1 is +va int; +end pck1; +/ +create or replace package body pck1 as + function f1(ss in int) return int as + begin + return ss; + end; +end pck1; +/ + +--测试跨packgae引用 +create or replace package p_test2 as + va pck1.va%TYPE; + procedure p1 (a pck1.va%TYPE); +end p_test2; +/ +create or replace package body p_test2 as + procedure p1 (a pck1.va%TYPE) as + begin + NULL; + end; +end p_test2; +/ + +--测试跨schema packgae引用 +set search_path = synonym_schema2; +create or replace package p_test2 as + va synonym_schema1.pck1.va%TYPE; + procedure p1 (a synonym_schema1.pck1.va%TYPE); +end p_test2; +/ +create or replace package body p_test2 as + procedure p1 (a synonym_schema1.pck1.va%TYPE) as + begin + NULL; + end; +end p_test2; +/ + +DROP PACKAGE p_test2; +DROP PACKAGE synonym_schema1.p_test2; +DROP PACKAGE synonym_schema1.pck1; + +DROP SCHEMA synonym_schema2 CASCADE; +DROP SCHEMA synonym_schema1 CASCADE; + +set current_schema = sql_functionreturn; + +--test keyword table name used by keyword.col%TYPE +DROP TABLE if EXISTS type; +CREATE TABLE type(a int, b int); +create or replace package p_test1 as + type r1 is record(c1 type%ROWTYPE, c2 int); + procedure p1 (a in type%ROWTYPE); +end p_test1; +/ +create or replace procedure func1 as + va type%rowtype; + type r1 is record(c1 type%rowtype,c2 varchar2(20)); +begin + va := 'a'; + raise info '%',va; + va := 'b'; + raise info '%',va; +end; +/ + +DROP PROCEDURE func1; +DROP PACKAGE p_test1; +DROP TABLE type; + +--test row type default value +CREATE TABLE tb_test(a int, b int); +create or replace package pckg_test as +procedure proc_test(i_col1 in tb_test); +end pckg_test; +/ +create or replace package body pckg_test as +procedure proc_test(i_col1 in tb_test)as +v_idx tb_test%rowtype:=i_col1; +v_idx2 tb_test%rowtype; +begin +raise info '%', v_idx; +raise info '%', v_idx2; +end; +end pckg_test; +/ +call pckg_test.proc_test((11,22)); + +drop package pckg_test; +drop table tb_test; + +-- test record type default value +create or replace procedure p1 is +type r1 is record (a int :=1,b int :=2); +va r1 := (2,3); +begin +raise info '%', va; +end; +/ +call p1(); +create or replace procedure p1 is +type r1 is record (a int :=1,b int :=2); +va r1 := NULL; +begin +raise info '%', va; +end; +/ +call p1(); + +DROP procedure p1; + +--test record type default value in package +create or replace package pck1 is +type t1 is record(c1 int := 1, c2 int := 2); +va t1 := (4,5); +end pck1; +/ + +declare +begin +raise info '%',pck1.va; +end; +/ + +DROP package pck1; + +-- test rowVar%TYPE +-- (1) in procedure +create table test1 (a int , b int); +create or replace procedure p1() is +va test1%ROWTYPE; +vb va%ROWTYPE; +begin +vb := (1,2); +raise info '%',vb; +end; +/ +call p1(); +-- (1) record var%TYPE, should error +create or replace procedure p1() is +TYPE r1 is record (a int, b int); +va r1; +vb va%ROWTYPE; +begin +vb := (1,2); +raise info '%',vb; +end; +/ + +-- (2) in package +create or replace package pck1 is +va test1%ROWTYPE; +vb va%ROWTYPE; +end pck1; +/ +drop package pck1; +-- (2) record var%TYPE, should error +create or replace package pck1 is +TYPE r1 is record (a varchar(10), b int); +va r1; +vb va%ROWTYPE; +end pck1; +/ +drop package pck1; +-- (3) across package +create or replace package pck1 is +va test1%ROWTYPE; +end pck1; +/ + +create or replace package pck2 is +va pck1.va%ROWTYPE := (2,3); +end pck2; +/ + +declare +begin +raise info '%', pck2.va; +end; +/ +drop package pck2; +drop package pck1; +-- (3) record var%TYPE, should error +create or replace package pck1 is +TYPE r1 is record (a varchar(10), b int); +va r1; +end pck1; +/ + +create or replace package pck2 is +va pck1.va%ROWTYPE := (2,3); +end pck2; +/ +DROP PACKAGE pck2; +DROP PACKAGE pck1; +DROP PROCEDURE p1(); +DROP TABLE test1; +-- test array var%TYPE +create or replace procedure p1() is +type r1 is varray(10) of int; +va r1; +vb va%TYPE; +begin +vb(1) := 1; +raise info '%',vb; +end; +/ +call p1(); + +create or replace procedure p1() is +type r1 is table of int index by varchar2(10); +va r1; +vb va%TYPE; +begin +vb('aaa') := 1; +raise info '%',vb; +end; +/ + +call p1(); + +DROP PROCEDURE p1; + + +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- + +drop procedure if exists pro_test_tab; +drop table if exists t1; +drop synonym if exists compfoo; +drop synonym if exists tabfoo; +drop type if exists sql_compositetype_test.compfoo; +drop table if exists sql_compositetype_test.tabfoo; +drop function if exists get_set_intarray; +drop function if exists get_fooarray; +drop function if exists get_foo; +drop function if exists get_ctype1array; +drop function if exists get_intarray; +drop function if exists get_int; +drop function if exists get_ctype1; +drop table if exists foo; +drop type if exists ctype1; + +-- clean up -- +drop schema if exists sql_compositetype_test cascade; +drop schema if exists sql_functionreturn cascade; diff --git a/src/test/regress/sql/hw_subpartition_add_drop_partition.sql b/src/test/regress/sql/hw_subpartition_add_drop_partition.sql new file mode 100644 index 000000000..73effcad7 --- /dev/null +++ b/src/test/regress/sql/hw_subpartition_add_drop_partition.sql @@ -0,0 +1,1287 @@ +DROP SCHEMA hw_subpartition_add_drop_partition CASCADE; +CREATE SCHEMA hw_subpartition_add_drop_partition; +SET CURRENT_SCHEMA TO hw_subpartition_add_drop_partition; + +-- +----range-range table---- +-- +--prepare +CREATE TABLE range_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (customer_id) SUBPARTITION BY RANGE (time_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer1_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer1_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer1_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer2_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer2_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer2_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_all VALUES LESS THAN ('2012-01-01') + ) +); +INSERT INTO range_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_range_sales_idx ON range_range_sales(product_id) LOCAL; + +--check for add partition/subpartition +--fail, value conflict +ALTER TABLE range_range_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1500) + ( + SUBPARTITION customer_temp1_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer_temp1_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer_temp1_2010 VALUES LESS THAN ('2012-01-01'), + SUBPARTITION customer_temp1_2011 VALUES LESS THAN ('2011-01-01') + ); +--success, add 4 subpartition +ALTER TABLE range_range_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer5_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer5_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer5_2011 VALUES LESS THAN ('2012-01-01') + ); +--fail, out of range +ALTER TABLE range_range_sales ADD PARTITION customer_temp2 VALUES LESS THAN (1100); +--fail, invalid format +ALTER TABLE range_range_sales ADD PARTITION customer_temp3 VALUES (1300); +--success, add 1 default subpartition +ALTER TABLE range_range_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_range_sales ADD PARTITION customer_temp4 VALUES LESS THAN (1800); +--success, add 1 subpartition +ALTER TABLE range_range_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_2012 VALUES LESS THAN ('2013-01-01'); +--fail, out of range +ALTER TABLE range_range_sales MODIFY PARTITION customer3 ADD SUBPARTITION customer3_temp1 VALUES LESS THAN ('2015-01-01'); +--fail, out of range +ALTER TABLE range_range_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('2011-01-01'); +--fail, invalid format +ALTER TABLE range_range_sales MODIFY PARTITION customer2 ADD SUBPARTITION customer2_temp1 VALUES ('2015-01-01'); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_range_sales + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_range_sales DROP PARTITION customer2; +--success +ALTER TABLE range_range_sales DROP SUBPARTITION customer1_2008; +--fail, the only subpartition +ALTER TABLE range_range_sales DROP SUBPARTITION customer4_all; +--success, drop partition customer3 +ALTER TABLE range_range_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_range_sales DROP PARTITION FOR (400, '2010-01-01'); +--fail, number not equal to the number of partkey +ALTER TABLE range_range_sales DROP SUBPARTITION FOR (1400); +--fail, invalid type +ALTER TABLE range_range_sales DROP PARTITION FOR ('2010-01-01'); +--fail, invalid type +ALTER TABLE range_range_sales DROP SUBPARTITION FOR ('2010-01-01', 1400); +--success, drop subpartition customer5_2010 +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(1400, '2010-01-01'); +--fail, the only subpartition in customer6 +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(2000, '2009-01-01'); +--fail, no subpartition find +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(1400, '2012-01-01'); + +--check for ok after drop +SELECT count(*) FROM range_range_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_range_sales + +-- +----range-list table---- +-- +--prepare +CREATE TABLE range_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO range_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales_idx ON range_list_sales(product_id) LOCAL; + +--check for add partition/subpartition +--fail, value conflict +ALTER TABLE range_list_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1500) + ( + SUBPARTITION customer_temp1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer_temp1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer_temp1_channel3 VALUES ('6', '7', '5') + ); +--fail, value conflict +ALTER TABLE range_list_sales ADD PARTITION customer_temp2 VALUES LESS THAN (1500) + ( + SUBPARTITION customer_temp2_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer_temp2_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer_temp2_channel3 VALUES ('6', '7', '8', '7', '8') + ); +--success, add 4 subpartition +ALTER TABLE range_list_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer5_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer5_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer5_channel4 VALUES ('9') + ); +--fail, out of range +ALTER TABLE range_list_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1100); +--fail, invalid format +ALTER TABLE range_list_sales ADD PARTITION customer_temp4 VALUES (1300); +--success, add 1 default subpartition +ALTER TABLE range_list_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_list_sales ADD PARTITION customer_temp5 VALUES LESS THAN (1800); +--success, add 1 subpartition +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel5 VALUES ('X'); +--fail, out of range +ALTER TABLE range_list_sales MODIFY PARTITION customer2 ADD SUBPARTITION customer2_temp1 VALUES ('X'); +--fail, out of range +ALTER TABLE range_list_sales MODIFY PARTITION customer3 ADD SUBPARTITION customer3_temp1 VALUES ('X'); +--fail, invalid format +ALTER TABLE range_list_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X'); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_list_sales + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_list_sales DROP PARTITION customer2; +--success +ALTER TABLE range_list_sales DROP SUBPARTITION customer1_channel1; +--fail, the only subpartition +ALTER TABLE range_list_sales DROP SUBPARTITION customer4_channel1; +--success, drop partition customer3 +ALTER TABLE range_list_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_list_sales DROP PARTITION FOR (400, '4'); +--fail, number not equal to the number of partkey +ALTER TABLE range_list_sales DROP SUBPARTITION FOR (1400); +--fail, invalid type +ALTER TABLE range_list_sales DROP PARTITION FOR ('abc'); +--fail, invalid type +ALTER TABLE range_list_sales DROP SUBPARTITION FOR ('abc', 1400); +--success, drop subpartition customer5_channel3 +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(1400, '7'); +--fail, the only subpartition in customer6 +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(2000, 'X'); +--fail, no subpartition find +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(1100, 'X'); + +--check for ok after drop +SELECT count(*) FROM range_list_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_list_sales + +-- +----range-hash table---- +-- +--prepare +CREATE TABLE range_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY RANGE (customer_id) SUBPARTITION BY HASH (product_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_product1, + SUBPARTITION customer1_product2, + SUBPARTITION customer1_product3, + SUBPARTITION customer1_product4 + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_product1, + SUBPARTITION customer2_product2 + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_product1 + ) +); +INSERT INTO range_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_hash_sales_idx ON range_hash_sales(product_id) LOCAL; + +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE range_hash_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_product1, + SUBPARTITION customer5_product2, + SUBPARTITION customer5_product3, + SUBPARTITION customer5_product4 + ); +--fail, out of range +ALTER TABLE range_hash_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1100); +--fail, invalid format +ALTER TABLE range_hash_sales ADD PARTITION customer_temp2 VALUES (1300); +--success, add 1 default subpartition +ALTER TABLE range_hash_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_hash_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1800); +--fail, not support add hash +ALTER TABLE range_hash_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_temp1; +--fail, invalid format +ALTER TABLE range_hash_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X'); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_hash_sales + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_hash_sales DROP PARTITION customer2; +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION customer1_product1; +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION customer4_product1; +--success, drop partition customer3 +ALTER TABLE range_hash_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_hash_sales DROP PARTITION FOR (400, '2010-01-01'); +--fail, invalid type +ALTER TABLE range_hash_sales DROP PARTITION FOR ('2010-01-01'); +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION FOR(1400, 1); + +--check for ok after drop +SELECT count(*) FROM range_hash_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_hash_sales + +-- +----list-range table---- +-- +--prepare +CREATE TABLE list_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY LIST (channel_id) SUBPARTITION BY RANGE (customer_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_customer1 VALUES LESS THAN (200), + SUBPARTITION channel1_customer2 VALUES LESS THAN (500), + SUBPARTITION channel1_customer3 VALUES LESS THAN (800), + SUBPARTITION channel1_customer4 VALUES LESS THAN (1200) + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_customer1 VALUES LESS THAN (500), + SUBPARTITION channel2_customer2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_customer1 VALUES LESS THAN (1200) + ) +); +INSERT INTO list_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_range_sales_idx ON list_range_sales(product_id) LOCAL; + +--check for add partition/subpartition +--fail, value conflict +ALTER TABLE list_range_sales ADD PARTITION channel_temp1 VALUES ('X') + ( + SUBPARTITION channel_temp1_customer1 VALUES LESS THAN (200), + SUBPARTITION channel_temp1_customer2 VALUES LESS THAN (500), + SUBPARTITION channel_temp1_customer3 VALUES LESS THAN (800), + SUBPARTITION channel_temp1_customer4 VALUES LESS THAN (700) + ); +--success, add 4 subpartition +ALTER TABLE list_range_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_customer1 VALUES LESS THAN (200), + SUBPARTITION channel5_customer2 VALUES LESS THAN (500), + SUBPARTITION channel5_customer3 VALUES LESS THAN (800), + SUBPARTITION channel5_customer4 VALUES LESS THAN (1200) + ); +--fail, value conflict +ALTER TABLE list_range_sales ADD PARTITION channel_temp2 VALUES ('0', 'Z', 'C'); +--fail, invalid format +ALTER TABLE list_range_sales ADD PARTITION channel_temp3 VALUES LESS THAN ('Z'); +--success, add 1 default subpartition +ALTER TABLE list_range_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_range_sales ADD PARTITION channel_temp4 VALUES ('M', 'X'); +--success, add 1 subpartition +ALTER TABLE list_range_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_customer5 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE list_range_sales MODIFY PARTITION channel2 ADD SUBPARTITION channel2_temp1 VALUES LESS THAN (2000); +--fail, out of range +ALTER TABLE list_range_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3_temp1 VALUES LESS THAN (2000); +--fail, invalid format +ALTER TABLE list_range_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES (1500); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_range_sales + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_range_sales DROP PARTITION channel2; +--success +ALTER TABLE list_range_sales DROP SUBPARTITION channel1_customer1; +--fail, the only subpartition +ALTER TABLE list_range_sales DROP SUBPARTITION channel4_customer1; +--success, drop partition channel3 +ALTER TABLE list_range_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_range_sales DROP PARTITION FOR('X', 700); +--fail, number not equal to the number of partkey +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X'); +--fail, invalid type +ALTER TABLE list_range_sales DROP PARTITION FOR (10); +--fail, invalid type +ALTER TABLE list_range_sales DROP SUBPARTITION FOR(700, 'X'); +--success, drop subpartition channel5_customer3 +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X', 700); +--fail, the only subpartition in channel6 +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('K', 100); +--fail, no subpartition find +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X', 2500); + +--check for ok after drop +SELECT count(*) FROM list_range_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_range_sales + +-- +----list-list table---- +-- +--prepare +CREATE TABLE list_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY LIST (channel_id) SUBPARTITION BY LIST (type_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_type1 VALUES (0, 1, 2), + SUBPARTITION channel1_type2 VALUES (3, 4), + SUBPARTITION channel1_type3 VALUES (5, 6, 7), + SUBPARTITION channel1_type4 VALUES (8, 9) + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_type1 VALUES (0, 1, 2, 3), + SUBPARTITION channel2_type2 VALUES (DEFAULT) + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_type1 VALUES (0, 1, 2, 3, 4, 5, 6, 7, 8, 9) + ) +); +INSERT INTO list_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_list_sales_idx ON list_list_sales(product_id) LOCAL; + +--check for add partition/subpartition +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp1 VALUES ('X') + ( + SUBPARTITION channel_temp1_type1 VALUES (0, 1, 2), + SUBPARTITION channel_temp1_type2 VALUES (3, 4, 5), + SUBPARTITION channel_temp1_type3 VALUES (6, 7, 5) + ); +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp2 VALUES ('X') + ( + SUBPARTITION channel_temp2_type1 VALUES (0, 1, 2), + SUBPARTITION channel_temp2_type2 VALUES (3, 4, 5), + SUBPARTITION channel_temp2_type3 VALUES (6, 7, 8, 7, 8) + ); +--success, add 4 subpartition +ALTER TABLE list_list_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_type1 VALUES (0, 1, 2), + SUBPARTITION channel5_type2 VALUES (3, 4), + SUBPARTITION channel5_type3 VALUES (5, 6, 7), + SUBPARTITION channel5_type4 VALUES (8, 9) + ); +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp3 VALUES ('0', 'Z', 'C'); +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp4 VALUES ('Z', 'Z', 'C'); +--fail, invalid format +ALTER TABLE list_list_sales ADD PARTITION channel_temp5 VALUES LESS THAN ('Z'); +--success, add 1 default subpartition +ALTER TABLE list_list_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp6 VALUES ('M', 'X'); +--success, add 1 subpartition +ALTER TABLE list_list_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_type5 VALUES (DEFAULT); +--fail, out of range +ALTER TABLE list_list_sales MODIFY PARTITION channel2 ADD SUBPARTITION channel2_temp1 VALUES (10, 11, 12); +--fail, out of range +ALTER TABLE list_list_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3_temp1 VALUES (10, 11, 12); +--fail, invalid format +ALTER TABLE list_list_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_list_sales + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_list_sales DROP PARTITION channel2; +--success +ALTER TABLE list_list_sales DROP SUBPARTITION channel1_type1; +--fail, the only subpartition +ALTER TABLE list_list_sales DROP SUBPARTITION channel4_type1; +--success, drop partition channel3 +ALTER TABLE list_list_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_list_sales DROP PARTITION FOR('X', 6); +--fail, number not equal to the number of partkey +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X'); +--fail, invalid type +ALTER TABLE list_list_sales DROP PARTITION FOR (10); +--fail, invalid type +ALTER TABLE list_list_sales DROP SUBPARTITION FOR(10, 'X'); +--success, drop subpartition channel5_type3 +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X', 6); +--fail, the only subpartition in channel6 +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('K', 10); +--fail, no subpartition find +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X', 5); + +--check for ok after drop +SELECT count(*) FROM list_list_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_list_sales + +-- +----list-hash table---- +-- +--prepare +CREATE TABLE list_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY LIST (channel_id) SUBPARTITION BY HASH (product_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_product1, + SUBPARTITION channel1_product2, + SUBPARTITION channel1_product3, + SUBPARTITION channel1_product4 + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_product1, + SUBPARTITION channel2_product2 + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_product1 + ) +); +INSERT INTO list_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_hash_sales_idx ON list_hash_sales(product_id) LOCAL; + +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE list_hash_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_product1, + SUBPARTITION channel5_product2, + SUBPARTITION channel5_product3, + SUBPARTITION channel5_product4 + ); +--fail, value conflict +ALTER TABLE list_hash_sales ADD PARTITION channel_temp1 VALUES ('0', 'Z', 'C'); +--fail, value conflict +ALTER TABLE list_hash_sales ADD PARTITION channel_temp2 VALUES ('Z', 'Z', 'C'); +--fail, invalid format +ALTER TABLE list_hash_sales ADD PARTITION channel_temp3 VALUES LESS THAN ('Z'); +--success, add 1 default subpartition +ALTER TABLE list_hash_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_hash_sales ADD PARTITION channel_temp4 VALUES ('M', 'X'); +--fail, not support add hash +ALTER TABLE list_hash_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_temp1; +--fail, invalid format +ALTER TABLE list_hash_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_hash_sales + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_hash_sales DROP PARTITION channel2; +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION channel1_product1; +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION channel4_product1; +--success, drop partition channel3 +ALTER TABLE list_hash_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_hash_sales DROP PARTITION FOR ('6', '2010-01-01'); +--fail, invalid type +ALTER TABLE list_hash_sales DROP PARTITION FOR (10); +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION FOR('X', 6); + +--check for ok after drop +SELECT count(*) FROM list_hash_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_hash_sales + +-- +----hash-range table---- +-- +--prepare +CREATE TABLE hash_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY HASH (product_id) SUBPARTITION BY RANGE (customer_id) +( + PARTITION product1 + ( + SUBPARTITION product1_customer1 VALUES LESS THAN (200), + SUBPARTITION product1_customer2 VALUES LESS THAN (500), + SUBPARTITION product1_customer3 VALUES LESS THAN (800), + SUBPARTITION product1_customer4 VALUES LESS THAN (1200) + ), + PARTITION product2 + ( + SUBPARTITION product2_customer1 VALUES LESS THAN (500), + SUBPARTITION product2_customer2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_customer1 VALUES LESS THAN (1200) + ) +); +INSERT INTO hash_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_range_sales_idx ON hash_range_sales(product_id) LOCAL; + +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_range_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_customer1 VALUES LESS THAN (200), + SUBPARTITION product_temp1_customer2 VALUES LESS THAN (500), + SUBPARTITION product_temp1_customer3 VALUES LESS THAN (800), + SUBPARTITION product_temp1_customer4 VALUES LESS THAN (1200) + ); +--fail, not support add hash +ALTER TABLE hash_range_sales ADD PARTITION product_temp2; +--success, add 1 subpartition +ALTER TABLE hash_range_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_customer5 VALUES LESS THAN (1800); +--fail, out of range +ALTER TABLE hash_range_sales MODIFY PARTITION product2 ADD SUBPARTITION product2_temp1 VALUES LESS THAN (1800); +--fail, invalid format +ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES (DEFAULT); +--success, add 1 subpartition +ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_customer2 VALUES LESS THAN (MAXVALUE); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_range_sales + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION product2; +--success, drop subpartition product1_customer1 +ALTER TABLE hash_range_sales DROP SUBPARTITION product1_customer1; +--success, drop subpartition product4_customer1 +ALTER TABLE hash_range_sales DROP SUBPARTITION product4_customer1; +--fail, the only subpartition in product4 +ALTER TABLE hash_range_sales DROP SUBPARTITION product4_customer2; +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION FOR(0); +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION FOR(0, 100); +--fail, number not equal to the number of partkey +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0); +--fail, invalid type +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR('2010-01-01', 100); +--success, drop subpartition product1_customer2, but not suggest to do this operation +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0, 100); +--fail, no subpartition find +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0, 2300); + +--check for ok after drop +SELECT count(*) FROM hash_range_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_range_sales + +-- +----hash-list table---- +-- +--prepare +CREATE TABLE hash_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY HASH (product_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION product1 + ( + SUBPARTITION product1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION product1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION product1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION product1_channel4 VALUES ('9') + ), + PARTITION product2 + ( + SUBPARTITION product2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION product2_channel2 VALUES (DEFAULT) + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO hash_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_list_sales_idx ON hash_list_sales(product_id) LOCAL; + +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_list_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION product_temp1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION product_temp1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION product_temp1_channel4 VALUES ('9') + ); +--fail, not support add hash +ALTER TABLE hash_list_sales ADD PARTITION product_temp2; +--success, add 1 subpartition +ALTER TABLE hash_list_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_channel5 VALUES ('X'); +--fail, out of range +ALTER TABLE hash_list_sales MODIFY PARTITION product2 ADD SUBPARTITION product2_temp1 VALUES ('X'); +--fail, out of range +ALTER TABLE hash_list_sales MODIFY PARTITION product3 ADD SUBPARTITION product3_temp1 VALUES ('X'); +--fail, invalid format +ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES LESS THAN (MAXVALUE); +--success, add 1 subpartition +ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_channel2 VALUES (DEFAULT); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_list_sales + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION product2; +--success, drop subpartition product1_channel1 +ALTER TABLE hash_list_sales DROP SUBPARTITION product1_channel1; +--success, drop subpartition product4_channel1 +ALTER TABLE hash_list_sales DROP SUBPARTITION product4_channel1; +--fail, the only subpartition in product4 +ALTER TABLE hash_list_sales DROP SUBPARTITION product4_channel2; +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION FOR(0); +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION FOR(0, '4'); +--fail, number not equal to the number of partkey +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0); +--fail, invalid type +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR('2010-01-01', '4'); +--success, drop subpartition product1_channel2, but not suggest to do this operation +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0, '4'); +--fail, no subpartition find +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0, 'Z'); + +--check for ok after drop +SELECT count(*) FROM hash_list_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_list_sales + +-- +----hash-hash table---- +-- +--prepare +CREATE TABLE hash_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) +PARTITION BY HASH (product_id) SUBPARTITION BY HASH (customer_id) +( + PARTITION product1 + ( + SUBPARTITION product1_customer1, + SUBPARTITION product1_customer2, + SUBPARTITION product1_customer3, + SUBPARTITION product1_customer4 + ), + PARTITION product2 + ( + SUBPARTITION product2_customer1, + SUBPARTITION product2_customer2 + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_customer1 + ) +); +INSERT INTO hash_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_hash_sales_idx ON hash_hash_sales(product_id) LOCAL; + +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_hash_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_customer1, + SUBPARTITION product_temp1_customer2, + SUBPARTITION product_temp1_customer3, + SUBPARTITION product_temp1_customer4 + ); +--fail, not support add hash +ALTER TABLE hash_hash_sales ADD PARTITION product_temp2; +--fail, not support add hash +ALTER TABLE hash_hash_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_temp1; + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_hash_sales + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION product2; +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION product1_customer1; +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION product4_customer1; +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION FOR(0); +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION FOR(0, 0); +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION FOR(0, 0); +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION FOR(0); + +--check for ok after drop +SELECT count(*) FROM hash_hash_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_hash_sales + +--finish +DROP TABLE range_range_sales; +DROP TABLE range_list_sales; +DROP TABLE range_hash_sales; +DROP TABLE list_range_sales; +DROP TABLE list_list_sales; +DROP TABLE list_hash_sales; +DROP TABLE hash_range_sales; +DROP TABLE hash_list_sales; +DROP TABLE hash_hash_sales; + +DROP SCHEMA hw_subpartition_add_drop_partition CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/hw_subpartition_alter_table.sql b/src/test/regress/sql/hw_subpartition_alter_table.sql new file mode 100644 index 000000000..d16800cd3 --- /dev/null +++ b/src/test/regress/sql/hw_subpartition_alter_table.sql @@ -0,0 +1,152 @@ +DROP SCHEMA subpartition_alter_table CASCADE; +CREATE SCHEMA subpartition_alter_table; +SET CURRENT_SCHEMA TO subpartition_alter_table; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); + +--change column type +alter table range_range alter column user_no set data type char(30); +alter table range_range alter column sales_amt set data type varchar; +\d+ range_range + +-- rename +alter table range_range rename to hahahahahah; +alter table range_range rename partition p_201901 to hahahahahah; +alter table range_range rename partition p_201901_a to hahahahahah; + +--cluster +create index idx_range_range on range_range(month_code,user_no); +alter table range_range cluster on idx_range_range; + +-- move tablespace +CREATE TABLESPACE example1 RELATIVE LOCATION 'tablespace1/tablespace_1'; +alter table range_range move PARTITION p_201901 tablespace example1; +alter table range_range move PARTITION p_201901_a tablespace example1; +DROP TABLESPACE example1; + +-- merge +alter table range_range merge PARTITIONS p_201901 , p_201902 into PARTITION p_range_3; +alter table range_range merge SUBPARTITIONS p_201901 , p_201902 into PARTITION p_range_3; + +-- exchange +CREATE TABLE ori +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +); +ALTER TABLE range_range EXCHANGE PARTITION (p_201901) WITH TABLE ori; +ALTER TABLE range_range EXCHANGE SUBPARTITION (p_201901) WITH TABLE ori; + +-- drop +alter table range_range drop partition p_201901; +alter table range_range drop partition p_201901_a; +alter table range_range drop subpartition p_201901_a; + +-- add +alter table range_range add partition p_range_4 VALUES LESS THAN('201904'); + +-- split +alter table range_range split PARTITION p_201901 at (8) into ( PARTITION add_p_01 , PARTITION add_p_02 ); + +drop table ori; +drop table range_range; + +CREATE TABLE IF NOT EXISTS range_range_02 +( + col_1 int , + col_2 int , + col_3 VARCHAR2 ( 30 ) NOT NULL , + col_4 int +) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( 10 ) + ) +) ENABLE ROW MOVEMENT; + +create index on range_range_02(col_2) local; + +alter table range_range_02 MODIFY PARTITION p_range_2 UNUSABLE LOCAL INDEXES; + +alter table range_range_02 MODIFY PARTITION p_range_2 REBUILD UNUSABLE LOCAL INDEXES; + +alter table range_range_02 alter col_1 type char; + +alter table range_range_02 alter col_2 type char; + +drop table range_range_02; + +--validate constraint +CREATE TABLE hash_hash +( + col_1 int , + col_2 int NOT NULL , + col_3 VARCHAR2 ( 30 ) , + col_4 int +) +PARTITION BY hash (col_3) SUBPARTITION BY hash (col_2) +( + PARTITION p_hash_1 + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 , + SUBPARTITION p_hash_1_4 + ), + PARTITION p_hash_2 + ( + SUBPARTITION p_hash_2_1 , + SUBPARTITION p_hash_2_2 + ), + PARTITION p_hash_3, + PARTITION p_hash_4 + ( + SUBPARTITION p_hash_4_1 + ), + PARTITION p_hash_5 +); + +INSERT INTO hash_hash VALUES(null,1,1,1); +alter table hash_hash add constraint con_hash_hash check(col_1 is not null) NOT VALID ; +INSERT INTO hash_hash VALUES(null,2,1,1); --error +INSERT INTO hash_hash VALUES(1,3,1,1); --success +alter table hash_hash VALIDATE CONSTRAINT con_hash_hash; --error +delete from hash_hash where col_1 is null; +alter table hash_hash VALIDATE CONSTRAINT con_hash_hash; --success + +drop table hash_hash cascade; +-- clean +DROP SCHEMA subpartition_alter_table CASCADE; + +\h ALTER TABLE SUBPARTITION diff --git a/src/test/regress/sql/hw_subpartition_analyze_vacuum.sql b/src/test/regress/sql/hw_subpartition_analyze_vacuum.sql new file mode 100644 index 000000000..1d5012ed0 --- /dev/null +++ b/src/test/regress/sql/hw_subpartition_analyze_vacuum.sql @@ -0,0 +1,50 @@ +-- prepare +DROP SCHEMA subpartition_analyze_vacuum CASCADE; +CREATE SCHEMA subpartition_analyze_vacuum; +SET CURRENT_SCHEMA TO subpartition_analyze_vacuum; + +-- base function + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; +delete from range_list where month_code = '201902'; +select * from range_list order by 1, 2, 3, 4; +analyze range_list; +analyze range_list partition (p_201901); +vacuum range_list; +vacuum range_list partition (p_201901); + +drop table range_list; + +-- clean +DROP SCHEMA subpartition_analyze_vacuum CASCADE; diff --git a/src/test/regress/sql/hw_subpartition_createtable.sql b/src/test/regress/sql/hw_subpartition_createtable.sql new file mode 100644 index 000000000..cd72ff03a --- /dev/null +++ b/src/test/regress/sql/hw_subpartition_createtable.sql @@ -0,0 +1,1573 @@ + +--1.create table +--list_list list_hash list_range range_list range_hash range_range + +--prepare +DROP SCHEMA subpartition_createtable CASCADE; +CREATE SCHEMA subpartition_createtable; +SET CURRENT_SCHEMA TO subpartition_createtable; + +--1.1 normal table +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list; +drop table list_list; + +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into list_hash values('201902', '1', '1', 1); +insert into list_hash values('201902', '2', '1', 1); +insert into list_hash values('201902', '3', '1', 1); +insert into list_hash values('201903', '4', '1', 1); +insert into list_hash values('201903', '5', '1', 1); +insert into list_hash values('201903', '6', '1', 1); +select * from list_hash; +drop table list_hash; + +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('6') + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +insert into list_range values('201902', '1', '1', 1); +insert into list_range values('201902', '2', '1', 1); +insert into list_range values('201902', '3', '1', 1); +insert into list_range values('201903', '4', '1', 1); +insert into list_range values('201903', '5', '1', 1); +insert into list_range values('201903', '6', '1', 1); + +select * from list_range; +drop table list_range; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); + +select * from range_list; +drop table range_list; + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201902', '2', '1', 1); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +insert into range_hash values('201903', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); + +select * from range_hash; +drop table range_hash; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); + +select * from range_range; +drop table range_range; + +CREATE TABLE hash_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY hash (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into hash_list values('201901', '1', '1', 1); +insert into hash_list values('201901', '2', '1', 1); +insert into hash_list values('201901', '1', '1', 1); +insert into hash_list values('201903', '2', '1', 1); +insert into hash_list values('201903', '1', '1', 1); +insert into hash_list values('201903', '2', '1', 1); + +select * from hash_list; +drop table hash_list; + +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY hash (month_code) SUBPARTITION BY hash (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into hash_hash values('201901', '1', '1', 1); +insert into hash_hash values('201901', '2', '1', 1); +insert into hash_hash values('201901', '1', '1', 1); +insert into hash_hash values('201903', '2', '1', 1); +insert into hash_hash values('201903', '1', '1', 1); +insert into hash_hash values('201903', '2', '1', 1); + +select * from hash_hash; +drop table hash_hash; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY hash (month_code) SUBPARTITION BY range (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES LESS THAN ( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN ( '3' ) + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES LESS THAN ( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN ( '3' ) + ) +); +insert into hash_range values('201901', '1', '1', 1); +insert into hash_range values('201901', '2', '1', 1); +insert into hash_range values('201901', '1', '1', 1); +insert into hash_range values('201903', '2', '1', 1); +insert into hash_range values('201903', '1', '1', 1); +insert into hash_range values('201903', '2', '1', 1); + +select * from hash_range; +drop table hash_range; + + +--1.2 table with default subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_list; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_list; + + +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table list_hash; + +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_hash; + +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_hash; + +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +drop table list_range; + +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('6') + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_range; + +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_range; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +drop table range_list; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_list; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_list; + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table range_hash; + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_hash; + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_hash; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +drop table hash_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 +); +drop table hash_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES( '2' ), + SUBPARTITION p_201902_b VALUES( '3' ) + ) +); +drop table hash_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES( '2' ), + SUBPARTITION p_201901_b VALUES( '3' ) + ), + PARTITION p_201902 +); +drop table hash_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_range; + +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table hash_hash; + +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 +); +drop table hash_hash; + +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_hash; + + +--1.3 subpartition name check +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_a VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901 VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201901_subpartdefault1 VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; + + + +--1.4 subpartition key check +-- 一级分区和二级分区分区键是同一列 + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +--二级分区的键值一样 + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '1' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +--分区列不存在 +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_codeXXX) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_codeXXX) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + + +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('4') + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +drop table list_range; + + +--1.5 list subpartition whith default + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( default ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list partition (p_201901); +select * from list_list partition (p_201902); +drop table list_list; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( default ) + ) +); +drop table list_list; + +--1.6 declaration and definition of the subpatiiton type are same. +--error +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY hash (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( default ) + ) +); + +--1.7 add constraint +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); + +alter table range_range add constraint constraint_check CHECK (sales_amt IS NOT NULL); +insert into range_range values(1,1,1); +drop table range_range; + +-- drop partition column +CREATE TABLE range_hash_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) +PARTITION BY RANGE (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( -10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ), + PARTITION p_range_3 VALUES LESS THAN( 30) + ( + SUBPARTITION p_hash_3_1 , + SUBPARTITION p_hash_3_2 , + SUBPARTITION p_hash_3_3 + ), + PARTITION p_range_4 VALUES LESS THAN( 50) + ( + SUBPARTITION p_hash_4_1 , + SUBPARTITION p_hash_4_2 , + SUBPARTITION range_hash_02 + ), + PARTITION p_range_5 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; + +alter table range_hash_02 drop column col_1; + +alter table range_hash_02 drop column col_2; + +drop table range_hash_02; +--1.8 SET ROW MOVEMENT +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1', '2' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1', '2' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +alter table list_list disable ROW MOVEMENT; +insert into list_list values('201902', '1', '1', 1); +update list_list set month_code = '201903'; +update list_list set dept_code = '3'; +alter table list_list enable ROW MOVEMENT; +update list_list set month_code = '201903'; +update list_list set dept_code = '3'; +drop table list_list; + +--1.9 without subpartition declaration +create table test(a int) +partition by range(a) +( +partition p1 values less than(100) +( +subpartition subp1 values less than(50), +subpartition subp2 values less than(100) +), +partition p2 values less than(200), +partition p3 values less than(maxvalue) +); + +--1.10 create table like +CREATE TABLE range_range +( + col_1 int primary key, + col_2 int NOT NULL , + col_3 VARCHAR2 ( 30 ) NOT NULL , + col_4 int generated always as(2*col_2) stored , + check (col_4 >= col_2) +) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( 10 ) + ) +) ENABLE ROW MOVEMENT; + +CREATE TABLE range_range_02 (like range_range INCLUDING ALL ); +drop table range_range; +-- storage parameter +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH(ORIENTATION = COLUMN) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH(segment = on) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH(hashbucket = on) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH(STORAGE_TYPE = USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); + + +--ROW LEVEL SECURITY POLICY +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +CREATE ROW LEVEL SECURITY POLICY range_range_rls ON range_range USING(user_no = CURRENT_USER); + +drop table range_range; + +-- 账本数据库 +CREATE SCHEMA ledgernsp WITH BLOCKCHAIN; + +CREATE TABLE ledgernsp.range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); + +DROP SCHEMA ledgernsp; +-- create table as +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +) ENABLE ROW MOVEMENT; + +insert into range_range values(201902,1,1,1),(201902,1,1,1),(201902,3,1,1),(201903,1,1,1),(201903,2,1,1),(201903,2,1,1); + +select * from range_range subpartition(p_201901_a) where month_code in(201902,201903) order by 1,2,3,4; + +create table range_range_copy as select * from range_range subpartition(p_201901_a) where month_code in(201902,201903); + +select * from range_range_copy order by 1,2,3,4; + +drop table range_range; +drop table range_range_copy; + +--1.11 create index +create table range_range_03 +( + c_int int, + c_char1 char(3000), + c_char2 char(5000), + c_char3 char(6000), + c_varchar1 varchar(3000), + c_varchar2 varchar(5000), + c_varchar3 varchar, + c_varchar4 varchar, + c_text1 text, + c_text2 text, + c_text3 text, + c int, + primary key(c,c_int) +) with (parallel_workers=10) +partition by range (c_int) subpartition by range (c_char1) +( + partition p1 values less than(50) + ( + subpartition p1_1 values less than('c'), + subpartition p1_2 values less than(maxvalue) + ), + partition p2 values less than(100) + ( + subpartition p2_1 values less than('c'), + subpartition p2_2 values less than(maxvalue) + ), + partition p3 values less than(150) + ( + subpartition p3_1 values less than('c'), + subpartition p3_2 values less than(maxvalue) + ), + partition p4 values less than(200) + ( + subpartition p4_1 values less than('c'), + subpartition p4_2 values less than(maxvalue) + ), + partition p5 values less than(maxvalue)( + subpartition p5_1 values less than('c'), + subpartition p5_2 values less than(maxvalue) + ) +) enable row movement; + +create index range_range_03_idx1 on range_range_03 (c_varchar1) local; --success + +create index range_range_03_idx2 on range_range_03 (c_varchar2) local ( + partition cpt7_p1, + partition cpt7_p2, + partition cpt7_p3, + partition cpt7_p4, + partition cpt7_p5 +); --failed + +create index range_range_03_idx3 on range_range_03 (c_varchar3); --success, default global + +create index range_range_03_idx4 on range_range_03 (c_varchar4) global; --success + +create index range_range_03_idx5 on range_range_03 (c_varchar4) local; --failed, can not be same column with global index + +\d+ range_range_03 + +select pg_get_tabledef('range_range_03'); + +drop table range_range_03; + +--unique local index columns must contain the partition key +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +) ENABLE ROW MOVEMENT; +create unique index idx on range_range(month_code) local; +create unique index idx1 on range_range(month_code, user_no) local; +drop table range_range; + +-- partkey has timestampwithzone type +drop table hash_range; +CREATE TABLE hash_range +( + col_1 int PRIMARY KEY USING INDEX, + col_2 int NOT NULL , + col_3 int NOT NULL , + col_4 int, + col_19 TIMESTAMP WITH TIME ZONE +) +PARTITION BY HASH (col_2) SUBPARTITION BY RANGE (col_19) +( partition p_hash_1 + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + partition p_hash_2, + PARTITION p_hash_3, + PARTITION p_hash_4, + PARTITION p_hash_5, + PARTITION p_hash_7 +) ENABLE ROW MOVEMENT; +CREATE TABLE hash_range +( + col_1 int PRIMARY KEY USING INDEX, + col_2 int NOT NULL , + col_3 int NOT NULL , + col_4 int, + col_19 TIMESTAMP WITH TIME ZONE +) +PARTITION BY HASH (col_19) SUBPARTITION BY RANGE (col_2) +( partition p_hash_1 + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + partition p_hash_2, + PARTITION p_hash_3, + PARTITION p_hash_4, + PARTITION p_hash_5, + PARTITION p_hash_7 +) ENABLE ROW MOVEMENT; +drop table hash_range; + +-- test create table like only support range_range in subpartition +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); + +create table t1(like range_range including partition); +insert into t1 values('201902', '1', '1', 1); +insert into t1 values('201902', '2', '1', 1); +insert into t1 values('201902', '1', '1', 1); +insert into t1 values('201903', '2', '1', 1); +insert into t1 values('201903', '1', '1', 1); +insert into t1 values('201903', '2', '1', 1); + +explain (costs off) select * from t1; +select * from t1; +drop table t1; +drop table range_range; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +create table t1(like list_list including partition); +drop table list_list; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +create table t1(like range_list including partition); +drop table range_list; + +--clean +DROP SCHEMA subpartition_createtable CASCADE; + +\h CREATE TABLE SUBPARTITION diff --git a/src/test/regress/sql/hw_subpartition_ddl_index.sql b/src/test/regress/sql/hw_subpartition_ddl_index.sql new file mode 100644 index 000000000..ee88c7680 --- /dev/null +++ b/src/test/regress/sql/hw_subpartition_ddl_index.sql @@ -0,0 +1,157 @@ +-- +----test index is Ok when use ddl grammer for subpartition---- +-- +DROP SCHEMA hw_subpartition_ddl_index CASCADE; +CREATE SCHEMA hw_subpartition_ddl_index; +SET CURRENT_SCHEMA TO hw_subpartition_ddl_index; + +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_indexonlyscan = ON; +SET enable_bitmapscan = OFF; + +-- +--test for add/drop partition/subpartition +-- +--1. first, we create subpartitioned table, and index on the table +CREATE TABLE range_list_sales1 +( + product_id INT4 CONSTRAINT cc1 CHECK (product_id < 2500) NOT NULL, + customer_id INT4, + time_id DATE CONSTRAINT cc2 CHECK (time_id IS NOT NULL) UNIQUE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2), + comment VARCHAR2, + UNIQUE (product_id, customer_id, comment) +) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000), + 'test' || generate_series(1,1000); + +CREATE INDEX range_list_sales1_idx1 ON range_list_sales1(product_id, customer_id) GLOBAL; +CREATE INDEX range_list_sales1_idx2 ON range_list_sales1(channel_id) GLOBAL; +CREATE INDEX range_list_sales1_idx3 ON range_list_sales1(customer_id) LOCAL; +CREATE INDEX range_list_sales1_idx4 ON range_list_sales1(time_id, type_id) LOCAL; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +--2. add partition/subpartition will not influence the index +ALTER TABLE range_list_sales1 ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer5_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer5_channel3 VALUES ('6', '7', '8') + ); +ALTER TABLE range_list_sales1 ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +ALTER TABLE range_list_sales1 MODIFY PARTITION customer5 ADD SUBPARTITION customer5_channel4 VALUES ('9'); +INSERT INTO range_list_sales1 SELECT generate_series(1001,2000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1001,2000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000), + 'test' || generate_series(1001,2000); + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +--3. drop partition/subpartition update global index +ALTER TABLE range_list_sales1 DROP PARTITION customer3 UPDATE GLOBAL INDEX; +ALTER TABLE range_list_sales1 DROP PARTITION FOR (700) UPDATE GLOBAL INDEX; --customer4 +ALTER TABLE range_list_sales1 DROP SUBPARTITION FOR (700, '9') UPDATE GLOBAL INDEX; --customer5_channel4 + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +--4. if drop partition without update global index, the gpi will be invalid, we can rebuild the index +ALTER TABLE range_list_sales1 DROP PARTITION FOR (1600); + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +ALTER INDEX range_list_sales1_idx1 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +ALTER INDEX range_list_sales1_idx2 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +--5. if drop subpartition without update global index, the gpi will be invalid, we can rebuild the index +ALTER TABLE range_list_sales1 DROP SUBPARTITION customer5_channel3; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +ALTER INDEX range_list_sales1_idx1 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +ALTER INDEX range_list_sales1_idx2 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +DROP TABLE range_list_sales1; + +--finish, clean the environment +DROP SCHEMA hw_subpartition_ddl_index CASCADE; +RESET CURRENT_SCHEMA; +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_indexonlyscan; +RESET enable_bitmapscan; diff --git a/src/test/regress/sql/hw_subpartition_gpi.sql b/src/test/regress/sql/hw_subpartition_gpi.sql new file mode 100644 index 000000000..435d7a19b --- /dev/null +++ b/src/test/regress/sql/hw_subpartition_gpi.sql @@ -0,0 +1,533 @@ +-- prepare +DROP SCHEMA subpartition_gpi CASCADE; +CREATE SCHEMA subpartition_gpi; +SET CURRENT_SCHEMA TO subpartition_gpi; + +-- base function +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +drop table range_list; + +-- unique +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +create unique index idx_dept_code_global on range_list(dept_code) global; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +select * from range_list subpartition (p_201901_a); +select * from range_list subpartition (p_201901_b); +select count(*) from range_list; +--error +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select count(*) from range_list; + +delete from range_list; +drop index idx_dept_code_global; + +create unique index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '2', 1); +insert into range_list values('201903', '1', '3', 1); +insert into range_list values('201903', '2', '4', 1); +select * from range_list subpartition (p_201901_a); +select * from range_list subpartition (p_201901_b); +select * from range_list subpartition (p_201902_a); +select * from range_list subpartition (p_201902_b); +select count(*) from range_list; +--error +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201902', '1', '2', 1); +insert into range_list values('201902', '2', '2', 1); +insert into range_list values('201903', '1', '2', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201902', '1', '3', 1); +insert into range_list values('201902', '2', '3', 1); +insert into range_list values('201903', '1', '3', 1); +insert into range_list values('201903', '2', '3', 1); +insert into range_list values('201902', '1', '4', 1); +insert into range_list values('201902', '2', '4', 1); +insert into range_list values('201903', '1', '4', 1); +insert into range_list values('201903', '2', '4', 1); +select count(*) from range_list; + +drop table range_list; + +-- truncate subpartition +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +alter table range_list truncate subpartition p_201901_a update global index; + +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +alter table range_list truncate subpartition p_201901_b; + +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +drop table range_list; + +-- split subpartition +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values (default) + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values (default) + ) +); + +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +alter table range_list split subpartition p_201901_b values ('3') into +( + subpartition p_201901_b, + subpartition p_201901_c +) update global index; + +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +alter table range_list split subpartition p_201902_b values ('3') into +( + subpartition p_201902_b, + subpartition p_201902_c +); + +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +drop table range_list; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( MAXVALUE ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '3', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '5', '1', 1); +select * from range_range; + +create index idx_month_code_local on range_range(month_code) local; +create index idx_dept_code_global on range_range(dept_code) global; +create index idx_user_no_global on range_range(user_no) global; + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + +alter table range_range split subpartition p_201901_b at ('3') into +( + subpartition p_201901_c, + subpartition p_201901_d +) update global index; + +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + +alter table range_range split subpartition p_201902_b at ('3') into +( + subpartition p_201902_c, + subpartition p_201903_d +); + +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) primary key, + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) primary key, + user_no VARCHAR2 ( 30 ) , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) primary key, + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code, user_no) +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, user_no) +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +-- truncate with gpi +CREATE TABLE range_hash_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) +PARTITION BY RANGE (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( -10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ), + PARTITION p_range_3 VALUES LESS THAN( 30) + ( + SUBPARTITION p_hash_3_1 , + SUBPARTITION p_hash_3_2 , + SUBPARTITION p_hash_3_3 + ), + PARTITION p_range_4 VALUES LESS THAN( 50) + ( + SUBPARTITION p_hash_4_1 , + SUBPARTITION p_hash_4_2 , + SUBPARTITION range_hash_02 + ), + PARTITION p_range_5 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; + +create index idx on range_hash_02(col_1); + +truncate range_hash_02; + +drop table range_hash_02; + +-- clean +DROP SCHEMA subpartition_gpi CASCADE; + diff --git a/src/test/regress/sql/hw_subpartition_index.sql b/src/test/regress/sql/hw_subpartition_index.sql new file mode 100644 index 000000000..348a13216 --- /dev/null +++ b/src/test/regress/sql/hw_subpartition_index.sql @@ -0,0 +1,147 @@ +DROP SCHEMA subpartition_index CASCADE; +CREATE SCHEMA subpartition_index; +SET CURRENT_SCHEMA TO subpartition_index; + +CREATE TABLE source +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +); + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +insert into source values('201902', '1', '1', 1); +insert into source values('201902', '2', '1', 1); +insert into source values('201902', '1', '1', 1); +insert into source values('201903', '2', '1', 1); +insert into source values('201903', '1', '1', 1); +insert into source values('201903', '2', '1', 1); + +insert into range_list select * from source; + +CREATE INDEX range_list_idx ON range_list(month_code) LOCAL +( + PARTITION p_201901_idx + ( + SUBPARTITION p_201901_a_idx, + SUBPARTITION p_201901_b_idx + ), + PARTITION p_201902_idx + ( + SUBPARTITION p_201902_a_idx, + SUBPARTITION p_201902_b_idx + ) +); + +-- test subpartition index scan +explain (costs off) select * from range_list where month_code = '201902'; +select * from range_list where month_code = '201902' order by 1,2,3,4; +explain (costs off) select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201902'; +select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201902' order by 1,2,3,4; + +-- test index unusable and rebuild +ALTER INDEX range_list_idx MODIFY PARTITION p_201901_a_idx UNUSABLE; +select indisusable from pg_partition where relname = 'p_201901_a_idx'; +REINDEX INDEX range_list_idx PARTITION p_201901_a_idx; +select indisusable from pg_partition where relname = 'p_201901_a_idx'; + +truncate table range_list; +ALTER INDEX range_list_idx MODIFY PARTITION p_201901_a_idx UNUSABLE; +ALTER INDEX range_list_idx MODIFY PARTITION p_201901_b_idx UNUSABLE; +ALTER INDEX range_list_idx MODIFY PARTITION p_201902_a_idx UNUSABLE; +ALTER INDEX range_list_idx MODIFY PARTITION p_201902_b_idx UNUSABLE; +insert into range_list select * from source; + +explain (costs off) select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201902'; +explain (costs off) select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201903'; + +REINDEX INDEX range_list_idx; + +explain (costs off) select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201902'; +explain (costs off) select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201903'; +select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201902' order by 1,2,3,4; +select /*+ indexscan(range_list range_list_idx)*/* from range_list where month_code = '201903' order by 1,2,3,4; + +-- wrong case +CREATE INDEX range_list_idxx ON range_list(month_code) LOCAL +( + PARTITION p_201902_idx + ( + SUBPARTITION p_201901_a_idx, + SUBPARTITION p_201901_b_idx, + SUBPARTITION p_201902_a_idx, + SUBPARTITION p_201902_b_idx + ) +); + +CREATE INDEX range_list_idxx ON range_list(month_code) LOCAL +( + PARTITION p_201901_idx + ( + SUBPARTITION p_201901_a_idx, + SUBPARTITION p_201901_b_idx + ), + PARTITION p_201902_idx + ( + SUBPARTITION p_201902_a_idx, + SUBPARTITION p_201902_b_idx + ), + PARTITION p_201903_idx + ( + SUBPARTITION p_201902_a_idx + ) +); + +CREATE INDEX range_list_idxx ON range_list(month_code) LOCAL +( + PARTITION p_201901_idx + ( + SUBPARTITION p_201901_a_idx + ), + PARTITION p_201902_idx + ( + SUBPARTITION p_201902_a_idx, + SUBPARTITION p_201902_b_idx + ) +); + +CREATE INDEX range_list_idxx ON range_list(month_code) LOCAL +( + PARTITION p_201901_idx + ( + SUBPARTITION p_201901_a_idx + ), + PARTITION p_201902_idx + ( + SUBPARTITION p_201901_b_idx, + SUBPARTITION p_201902_a_idx, + SUBPARTITION p_201902_b_idx + ) +); + +drop table source; +drop table range_list; + +reset current_schema; +DROP SCHEMA subpartition_index CASCADE; diff --git a/src/test/regress/sql/hw_subpartition_scan.sql b/src/test/regress/sql/hw_subpartition_scan.sql new file mode 100644 index 000000000..aea37a98d --- /dev/null +++ b/src/test/regress/sql/hw_subpartition_scan.sql @@ -0,0 +1,485 @@ + +DROP SCHEMA subpartition_scan CASCADE; +CREATE SCHEMA subpartition_scan; +SET CURRENT_SCHEMA TO subpartition_scan; + +--scan +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); + +explain(costs off, verbose on) select * from range_list order by 1, 2, 3, 4; +select * from range_list order by 1, 2, 3, 4; + +create index idx_month_code on range_list(month_code) local; +create index idx_dept_code on range_list(dept_code) local; +create index idx_user_no on range_list(user_no) local; + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +set enable_bitmapscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +reset enable_seqscan; +reset enable_bitmapscan; + +drop table range_list; + +CREATE TABLE range_list +( + col_1 VARCHAR2 ( 30 ) , + col_2 VARCHAR2 ( 30 ) NOT NULL , + col_3 VARCHAR2 ( 30 ) NOT NULL , + ccol_4 VARCHAR2 ( 30 ), +col_5 VARCHAR2 ( 30 ), +col_6 VARCHAR2 ( 30 ), +col_7 VARCHAR2 ( 30 ), +col_8 VARCHAR2 ( 30 ), +col_9 VARCHAR2 ( 30 ), +col_10 VARCHAR2 ( 30 ), +col_11 VARCHAR2 ( 30 ), +col_12 VARCHAR2 ( 30 ), +col_13 VARCHAR2 ( 30 ), +col_14 VARCHAR2 ( 30 ), +col_15 VARCHAR2 ( 30 ), +col_16 VARCHAR2 ( 30 ), +col_17 VARCHAR2 ( 30 ), +col_18 VARCHAR2 ( 30 ), +col_19 VARCHAR2 ( 30 ), +col_20 VARCHAR2 ( 30 ), +col_21 VARCHAR2 ( 30 ), +col_22 VARCHAR2 ( 30 ), +col_23 VARCHAR2 ( 30 ), +col_24 VARCHAR2 ( 30 ), +col_25 VARCHAR2 ( 30 ), +col_26 VARCHAR2 ( 30 ), +col_27 VARCHAR2 ( 30 ), +col_28 VARCHAR2 ( 30 ), +col_29 VARCHAR2 ( 30 ), +col_30 VARCHAR2 ( 30 ), +col_31 VARCHAR2 ( 30 ), +col_32 VARCHAR2 ( 30 ), +col_33 VARCHAR2 ( 30 ), +col_34 VARCHAR2 ( 30 ), +col_35 VARCHAR2 ( 30 ), +col_36 VARCHAR2 ( 30 ), +col_37 VARCHAR2 ( 30 ), +col_38 VARCHAR2 ( 30 ), +col_39 VARCHAR2 ( 30 ), +col_40 VARCHAR2 ( 30 ), +col_41 VARCHAR2 ( 30 ), +col_42 VARCHAR2 ( 30 ), +col_43 VARCHAR2 ( 30 ), +col_44 VARCHAR2 ( 30 ), +col_45 VARCHAR2 ( 30 ), +col_46 VARCHAR2 ( 30 ), +col_47 VARCHAR2 ( 30 ), +col_48 VARCHAR2 ( 30 ), +col_49 VARCHAR2 ( 30 ), +col_50 VARCHAR2 ( 30 ), +col_51 VARCHAR2 ( 30 ), +col_52 VARCHAR2 ( 30 ), +col_53 VARCHAR2 ( 30 ), +col_54 VARCHAR2 ( 30 ), +col_55 VARCHAR2 ( 30 ), +col_56 VARCHAR2 ( 30 ), +col_57 VARCHAR2 ( 30 ), +col_58 VARCHAR2 ( 30 ), +col_59 VARCHAR2 ( 30 ), +col_60 VARCHAR2 ( 30 ), +col_61 VARCHAR2 ( 30 ), +col_62 VARCHAR2 ( 30 ), +col_63 VARCHAR2 ( 30 ), +col_64 VARCHAR2 ( 30 ), +col_65 VARCHAR2 ( 30 ), +col_66 VARCHAR2 ( 30 ), +col_67 VARCHAR2 ( 30 ), +col_68 VARCHAR2 ( 30 ), +col_69 VARCHAR2 ( 30 ), +col_70 VARCHAR2 ( 30 ), +col_71 VARCHAR2 ( 30 ), +col_72 VARCHAR2 ( 30 ), +col_73 VARCHAR2 ( 30 ), +col_74 VARCHAR2 ( 30 ), +col_75 VARCHAR2 ( 30 ), +col_76 VARCHAR2 ( 30 ), +col_77 VARCHAR2 ( 30 ), +col_78 VARCHAR2 ( 30 ), +col_79 VARCHAR2 ( 30 ), +col_80 VARCHAR2 ( 30 ), +col_81 VARCHAR2 ( 30 ), +col_82 VARCHAR2 ( 30 ), +col_83 VARCHAR2 ( 30 ) +) +PARTITION BY RANGE (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( '-10' ) + ( +SUBPARTITION p_list_1_1 VALUES ( '-1' ), +SUBPARTITION p_list_1_2 VALUES ( '-2' ), +SUBPARTITION p_list_1_3 VALUES ( '-3' ), +SUBPARTITION p_list_1_4 VALUES ( '-4' ), +SUBPARTITION p_list_1_5 VALUES ( '-5' ), +SUBPARTITION p_list_1_6 VALUES ( '-6' ), +SUBPARTITION p_list_1_7 VALUES ( '-7' ), +SUBPARTITION p_list_1_8 VALUES ( '-8' ), +SUBPARTITION p_list_1_9 VALUES ( '-9' ), +SUBPARTITION p_list_1_10 VALUES ( '-10' ), +SUBPARTITION p_list_1_11 VALUES ( '-11' ), +SUBPARTITION p_list_1_12 VALUES ( '-12' ), +SUBPARTITION p_list_1_13 VALUES ( '-13' ), +SUBPARTITION p_list_1_14 VALUES ( '-14' ), +SUBPARTITION p_list_1_15 VALUES ( '-15' ), +SUBPARTITION p_list_1_16 VALUES ( '-16' ), +SUBPARTITION p_list_1_17 VALUES ( '-17' ), +SUBPARTITION p_list_1_18 VALUES ( '-18' ), +SUBPARTITION p_list_1_19 VALUES ( '-19' ), +SUBPARTITION p_list_1_20 VALUES ( '-20' ), +SUBPARTITION p_list_1_21 VALUES ( '-21' ), +SUBPARTITION p_list_1_22 VALUES ( '-22' ), +SUBPARTITION p_list_1_23 VALUES ( '-23' ), +SUBPARTITION p_list_1_24 VALUES ( '-24' ), +SUBPARTITION p_list_1_25 VALUES ( '-25' ), +SUBPARTITION p_list_1_26 VALUES ( '-26' ), +SUBPARTITION p_list_1_27 VALUES ( '-27' ), +SUBPARTITION p_list_1_28 VALUES ( '-28' ), +SUBPARTITION p_list_1_29 VALUES ( '-29' ), +SUBPARTITION p_list_1_30 VALUES ( '-30' ), +SUBPARTITION p_list_1_31 VALUES ( '-31' ), +SUBPARTITION p_list_1_32 VALUES ( '-32' ), +SUBPARTITION p_list_1_33 VALUES ( '-33' ), +SUBPARTITION p_list_1_34 VALUES ( '-34' ), +SUBPARTITION p_list_1_35 VALUES ( '-35' ), +SUBPARTITION p_list_1_36 VALUES ( '-36' ), +SUBPARTITION p_list_1_37 VALUES ( '-37' ), +SUBPARTITION p_list_1_38 VALUES ( '-38' ), +SUBPARTITION p_list_1_39 VALUES ( '-39' ), +SUBPARTITION p_list_1_40 VALUES ( '-40' ), +SUBPARTITION p_list_1_41 VALUES ( '-41' ), +SUBPARTITION p_list_1_42 VALUES ( '-42' ), +SUBPARTITION p_list_1_43 VALUES ( '-43' ), +SUBPARTITION p_list_1_44 VALUES ( '-44' ), +SUBPARTITION p_list_1_45 VALUES ( '-45' ), +SUBPARTITION p_list_1_46 VALUES ( '-46' ), +SUBPARTITION p_list_1_47 VALUES ( '-47' ), +SUBPARTITION p_list_1_48 VALUES ( '-48' ), +SUBPARTITION p_list_1_49 VALUES ( '-49' ), +SUBPARTITION p_list_1_50 VALUES ( '-50' ), +SUBPARTITION p_list_1_51 VALUES ( default ) + ), + PARTITION p_range_2 VALUES LESS THAN('10 ') + ( +SUBPARTITION p_list_2_1 VALUES ( '1' ), +SUBPARTITION p_list_2_2 VALUES ( '2' ), +SUBPARTITION p_list_2_3 VALUES ( '3' ), +SUBPARTITION p_list_2_4 VALUES ( '4' ), +SUBPARTITION p_list_2_5 VALUES ( '5' ), +SUBPARTITION p_list_2__6 VALUES ( '-6' ), +SUBPARTITION p_list_2_6 VALUES ( '6' ), +SUBPARTITION p_list_2_7 VALUES ( '7' ), +SUBPARTITION p_list_2_8 VALUES ( '8' ), +SUBPARTITION p_list_2_9 VALUES ( '9' ), +SUBPARTITION p_list_2_10 VALUES ( '10' ), +SUBPARTITION p_list_2_11 VALUES ( '11' ), +SUBPARTITION p_list_2_12 VALUES ( '12' ), +SUBPARTITION p_list_2_13 VALUES ( '13' ), +SUBPARTITION p_list_2_14 VALUES ( '14' ), +SUBPARTITION p_list_2_15 VALUES ( '15' ), +SUBPARTITION p_list_2_16 VALUES ( '16' ), +SUBPARTITION p_list_2_17 VALUES ( '17' ), +SUBPARTITION p_list_2_18 VALUES ( '18' ), +SUBPARTITION p_list_2_19 VALUES ( '19' ), +SUBPARTITION p_list_2_20 VALUES ( '20' ), +SUBPARTITION p_list_2_21 VALUES ( '21' ), +SUBPARTITION p_list_2_22 VALUES ( '22' ), +SUBPARTITION p_list_2_23 VALUES ( '23' ), +SUBPARTITION p_list_2_24 VALUES ( '24' ), +SUBPARTITION p_list_2_25 VALUES ( '25' ), +SUBPARTITION p_list_2_26 VALUES ( '26' ), +SUBPARTITION p_list_2_27 VALUES ( '27' ), +SUBPARTITION p_list_2_28 VALUES ( '28' ), +SUBPARTITION p_list_2_29 VALUES ( '29' ), +SUBPARTITION p_list_2_30 VALUES ( '30' ), +SUBPARTITION p_list_2_31 VALUES ( '31' ), +SUBPARTITION p_list_2_32 VALUES ( '32' ), +SUBPARTITION p_list_2_33 VALUES ( '33' ), +SUBPARTITION p_list_2_34 VALUES ( '34' ), +SUBPARTITION p_list_2_35 VALUES ( '35' ), +SUBPARTITION p_list_2_36 VALUES ( '36' ), +SUBPARTITION p_list_2_37 VALUES ( '37' ), +SUBPARTITION p_list_2_38 VALUES ( '38' ), +SUBPARTITION p_list_2_39 VALUES ( '39' ), +SUBPARTITION p_list_2_40 VALUES ( '40' ), +SUBPARTITION p_list_2_41 VALUES ( '41' ), +SUBPARTITION p_list_2_42 VALUES ( '42' ), +SUBPARTITION p_list_2_43 VALUES ( '43' ), +SUBPARTITION p_list_2_44 VALUES ( '44' ), +SUBPARTITION p_list_2_45 VALUES ( '45' ), +SUBPARTITION p_list_2_46 VALUES ( '46' ), +SUBPARTITION p_list_2_47 VALUES ( '47' ), +SUBPARTITION p_list_2_48 VALUES ( '48' ), +SUBPARTITION p_list_2_49 VALUES ( '49' ), +SUBPARTITION p_list_2_50 VALUES ( '50' ), +SUBPARTITION p_list_2_51 VALUES ( default ) + ), + PARTITION p_range_3 VALUES LESS THAN( '20 '), + + PARTITION p_range_4 VALUES LESS THAN( '30' ) + ( + SUBPARTITION p_list_4_1 VALUES ( default ) + ), + PARTITION p_range_5 VALUES LESS THAN( '40' ) + ( + SUBPARTITION p_list_5_1 VALUES ( '41' ), +SUBPARTITION p_list_5_2 VALUES ( '42' ), +SUBPARTITION p_list_5_3 VALUES ( '43' ), +SUBPARTITION p_list_5_4 VALUES ( '44' ), +SUBPARTITION p_list_5_5 VALUES ( '45' ), +SUBPARTITION p_list_5_6 VALUES ( '46' ), +SUBPARTITION p_list_5_7 VALUES ( '47' ), +SUBPARTITION p_list_5_8 VALUES ( '48' ), +SUBPARTITION p_list_5_9 VALUES ( '49' ), +SUBPARTITION p_list_5_10 VALUES ( '50' ), +SUBPARTITION p_list_5_11 VALUES ( '51' ), +SUBPARTITION p_list_5_12 VALUES ( '52' ), +SUBPARTITION p_list_5_13 VALUES ( '53' ), +SUBPARTITION p_list_5_14 VALUES ( '54' ), +SUBPARTITION p_list_5_15 VALUES ( '55' ), +SUBPARTITION p_list_5_16 VALUES ( '56' ), +SUBPARTITION p_list_5_17 VALUES ( '57' ), +SUBPARTITION p_list_5_18 VALUES ( '58' ), +SUBPARTITION p_list_5_19 VALUES ( '59' ), +SUBPARTITION p_list_5_20 VALUES ( '60' ), +SUBPARTITION p_list_5_21 VALUES ( '61' ), +SUBPARTITION p_list_5_22 VALUES ( '62' ), +SUBPARTITION p_list_5_23 VALUES ( '63' ), +SUBPARTITION p_list_5_24 VALUES ( '64' ), +SUBPARTITION p_list_5_25 VALUES ( '65' ), +SUBPARTITION p_list_5_26 VALUES ( '66' ), +SUBPARTITION p_list_5_27 VALUES ( '67' ), +SUBPARTITION p_list_5_28 VALUES ( '68' ), +SUBPARTITION p_list_5_29 VALUES ( '69' ), +SUBPARTITION p_list_5_30 VALUES ( '70' ), +SUBPARTITION p_list_5_31 VALUES ( '71' ), +SUBPARTITION p_list_5_32 VALUES ( '72' ), +SUBPARTITION p_list_5_33 VALUES ( '73' ), +SUBPARTITION p_list_5_34 VALUES ( '74' ), +SUBPARTITION p_list_5_35 VALUES ( '75' ), +SUBPARTITION p_list_5_36 VALUES ( '76' ), +SUBPARTITION p_list_5_37 VALUES ( '77' ), +SUBPARTITION p_list_5_38 VALUES ( '78' ), +SUBPARTITION p_list_5_39 VALUES ( '79' ), +SUBPARTITION p_list_5_40 VALUES ( '80' ), +SUBPARTITION p_list_5_41 VALUES ( '81' ), +SUBPARTITION p_list_5_42 VALUES ( '82' ), +SUBPARTITION p_list_5_43 VALUES ( '83' ), +SUBPARTITION p_list_5_44 VALUES ( '84' ), +SUBPARTITION p_list_5_45 VALUES ( '85' ), +SUBPARTITION p_list_5_46 VALUES ( '86' ), +SUBPARTITION p_list_5_47 VALUES ( '87' ), +SUBPARTITION p_list_5_48 VALUES ( '88' ), +SUBPARTITION p_list_5_49 VALUES ( '89' ), +SUBPARTITION p_list_5_50 VALUES ( '90' ), +SUBPARTITION p_list_5_51 VALUES ( '91' ), +SUBPARTITION p_list_5_52 VALUES ( '92' ), +SUBPARTITION p_list_5_53 VALUES ( '93' ), +SUBPARTITION p_list_5_54 VALUES ( '94' ), +SUBPARTITION p_list_5_55 VALUES ( '95' ), +SUBPARTITION p_list_5_56 VALUES ( '96' ), +SUBPARTITION p_list_5_57 VALUES ( '97' ), +SUBPARTITION p_list_5_58 VALUES ( '98' ), +SUBPARTITION p_list_5_59 VALUES ( '99' ), +SUBPARTITION p_list_5_60 VALUES ( '100' ), +SUBPARTITION p_list_5_61 VALUES ( '101' ), +SUBPARTITION p_list_5_62 VALUES ( '102' ), +SUBPARTITION p_list_5_63 VALUES ( '103' ), +SUBPARTITION p_list_5_64 VALUES ( '104' ), +SUBPARTITION p_list_5_65 VALUES ( '105' ), +SUBPARTITION p_list_5_66 VALUES ( '106' ), +SUBPARTITION p_list_5_67 VALUES ( '107' ), +SUBPARTITION p_list_5_68 VALUES ( '108' ), +SUBPARTITION p_list_5_69 VALUES ( '109' ), +SUBPARTITION p_list_5_70 VALUES ( '110' ), +SUBPARTITION p_list_5_71 VALUES ( '111' ), +SUBPARTITION p_list_5_72 VALUES ( '112' ), +SUBPARTITION p_list_5_73 VALUES ( '113' ), +SUBPARTITION p_list_5_74 VALUES ( '114' ), +SUBPARTITION p_list_5_75 VALUES ( '115' ), +SUBPARTITION p_list_5_76 VALUES ( '116' ), +SUBPARTITION p_list_5_77 VALUES ( '117' ), +SUBPARTITION p_list_5_78 VALUES ( '118' ), +SUBPARTITION p_list_5_79 VALUES ( '119' ), +SUBPARTITION p_list_5_80 VALUES ( default ) + ), + PARTITION p_range_6 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; +create index on range_list(col_2) local; +explain (costs off, verbose off) select * from range_list where col_2 in (select col_1 from range_list where col_1 >10 and col_1<100) order by 1 limit 100; +ALTER INDEX range_list_col_2_idx MODIFY PARTITION p_list_5_14_col_2_idx UNUSABLE; +explain (costs off, verbose off) select * from range_list where col_2 in (select col_1 from range_list where col_1 >10 and col_1<100) order by 1 limit 100; +drop table range_list; + +create table range_range_jade(jid int,jn int,name varchar2)partition by range (jid) subpartition by range(jn) +( + partition hrp1 values less than(16)( + subpartition hrp1_1 values less than(16), +subpartition hrp1_2 values less than(26), +subpartition hrp1_3 values less than(36), + subpartition hrp1_4 values less than(maxvalue)), + partition hrp2 values less than(26)( + subpartition hrp2_1 values less than(maxvalue)), + partition hrp3 values less than(36)( + subpartition hrp3_1 values less than(16), +subpartition hrp3_2 values less than(26), + subpartition hrp3_3 values less than(maxvalue)), + partition hrp4 values less than(maxvalue)( + subpartition hrp4_1 values less than(16), + subpartition hrp4_2 values less than(maxvalue)) +)ENABLE ROW MOVEMENT; +-- no errors +set enable_partition_opfusion = on; +insert into range_range_jade values(1,2,'jade'); +reset enable_partition_opfusion; +drop table range_range_jade; + +drop table list_range_02; +CREATE TABLE IF NOT EXISTS list_range_02 +( + col_1 int , + col_2 int, +col_3 VARCHAR2 ( 30 ) , + col_4 int +) +PARTITION BY list (col_1) SUBPARTITION BY range (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_1_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_1_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_1_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_2 VALUES(1,2,3,4,5,6,7,8,9,10 ), + PARTITION p_list_3 VALUES(11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_range_3_1 VALUES LESS THAN( 15 ), + SUBPARTITION p_range_3_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_4 VALUES(21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_range_4_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_4_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_4_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_4_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_4_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_5 VALUES(31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_range_5_1 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_6 VALUES(41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_range_6_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_6_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_6_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_6_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_6_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_7 VALUES(default) +) ENABLE ROW MOVEMENT; + +create index index_01 on list_range_02(col_2) local ; + +explain (costs off) select * from list_range_02 where col_2 in + (select col_1 from list_range_02 subpartition(p_list_2_subpartdefault1) + where col_1 >10 and col_1 <100) and col_1 +col_2 =50 and col_2 in (100,200,300 ); +explain (format yaml, costs off) select * from list_range_02 where col_2 in + (select col_1 from list_range_02 subpartition(p_list_2_subpartdefault1) + where col_1 >10 and col_1 <100) and col_1 +col_2 =50 and col_2 in (100,200,300 ); +explain (format json, costs off) select * from list_range_02 where col_2 in + (select col_1 from list_range_02 subpartition(p_list_2_subpartdefault1) + where col_1 >10 and col_1 <100) and col_1 +col_2 =50 and col_2 in (100,200,300 ); +drop table list_range_02; + +CREATE TABLE IF NOT EXISTS list_list_02 +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) +PARTITION BY list (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( 0,-1,-2,-3,-4,-5,-6,-7,-8,-9 ), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_list_2 VALUES(0,1,2,3,4,5,6,7,8,9) + ( + SUBPARTITION p_list_2_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_2_2 VALUES ( default ), + SUBPARTITION p_list_2_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_2_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_2_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_3 VALUES(10,11,12,13,14,15,16,17,18,19) + ( + SUBPARTITION p_list_3_2 VALUES ( default ) + ), + PARTITION p_list_4 VALUES(default ), + PARTITION p_list_5 VALUES(20,21,22,23,24,25,26,27,28,29) + ( + SUBPARTITION p_list_5_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_5_2 VALUES ( default ), + SUBPARTITION p_list_5_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_5_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_5_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_6 VALUES(30,31,32,33,34,35,36,37,38,39), + PARTITION p_list_7 VALUES(40,41,42,43,44,45,46,47,48,49) + ( + SUBPARTITION p_list_7_1 VALUES ( default ) + ) +) ENABLE ROW MOVEMENT; + +explain (costs off) select * from list_list_02 where col_1=(select max(1) from list_list_02); +select * from list_list_02 where col_1=(select max(1) from list_list_02); +drop table list_list_02; +DROP SCHEMA subpartition_scan CASCADE; diff --git a/src/test/regress/sql/hw_subpartition_select.sql b/src/test/regress/sql/hw_subpartition_select.sql new file mode 100644 index 000000000..7a64775d5 --- /dev/null +++ b/src/test/regress/sql/hw_subpartition_select.sql @@ -0,0 +1,318 @@ +--prepare +DROP SCHEMA subpartition_select CASCADE; +CREATE SCHEMA subpartition_select; +SET CURRENT_SCHEMA TO subpartition_select; + +--select +CREATE TABLE t1 +( + c1 int, + c2 int +); +insert into t1 values(generate_series(201901,201910), generate_series(1,10)); + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '3', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '3', '1', 1); + +select * from range_list order by 1, 2, 3, 4; + +select * from range_list where user_no is not null order by 1, 2, 3, 4; +select * from range_list where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; +select * from range_list where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; +select * from range_list where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; +select * from range_list partition (p_201901) order by 1, 2, 3, 4; +select * from range_list partition (p_201902) order by 1, 2, 3, 4; +select * from range_list where user_no is not null and dept_code <> '2' UNION ALL select * from range_list partition (p_201902) order by 1, 2, 3, 4; +select * from range_list where user_no is not null and dept_code <> '2' UNION ALL select * from range_list partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + + + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); + +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201902', '2', '1', 1); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +insert into range_hash values('201903', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); + +select * from range_hash order by 1, 2, 3, 4; + +select * from range_hash where user_no is not null order by 1, 2, 3, 4; +select * from range_hash where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; +select * from range_hash where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; +select * from range_hash where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; +select * from range_hash partition (p_201901) order by 1, 2, 3, 4; +select * from range_hash partition (p_201902) order by 1, 2, 3, 4; +select * from range_hash where user_no is not null and dept_code <> '2' UNION ALL select * from range_hash partition (p_201902) order by 1, 2, 3, 4; +select * from range_hash where user_no is not null and dept_code <> '2' UNION ALL select * from range_hash partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + +explain (costs off, verbose on) select * from range_hash where ctid='(0,1)' order by 1, 2, 3, 4; +explain (costs off, verbose on) select * from range_hash where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; +explain (costs off, verbose on) select * from range_hash partition (p_201901) order by 1, 2, 3, 4; +explain (costs off, verbose on) select * from range_hash where user_no is not null and dept_code <> '2' UNION ALL select * from range_hash partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); + +select * from range_range order by 1, 2, 3, 4; + +select * from range_range where user_no is not null order by 1, 2, 3, 4; +select * from range_range where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; +select * from range_range where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; +select * from range_range where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; +select * from range_range partition (p_201901) order by 1, 2, 3, 4; +select * from range_range partition (p_201902) order by 1, 2, 3, 4; +select * from range_range where user_no is not null and dept_code <> '2' UNION ALL select * from range_range partition (p_201902) order by 1, 2, 3, 4; +select * from range_range where user_no is not null and dept_code <> '2' UNION ALL select * from range_range partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + +explain (costs off, verbose on) select * from range_range where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; +explain (costs off, verbose on) select * from range_range partition (p_201901) order by 1, 2, 3, 4; + +--view +create view view_temp as select * from range_list; +select * from view_temp; +--error +select * from view_temp partition (p_201901); +select * from view_temp partition (p_201902); +drop view view_temp; + +with tmp1 as (select * from range_list ) select * from tmp1 order by 1, 2, 3, 4; +with tmp1 as (select * from range_list partition (p_201901)) select * from tmp1 order by 1, 2, 3, 4; + +--join normal table +select * from range_list left join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_list left join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_list right join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_list right join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_list full join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_list full join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_list inner join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_list inner join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +explain (costs off, verbose on) select * from range_list inner join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_hash left join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_hash left join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_hash right join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_hash right join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_hash full join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_hash full join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_hash inner join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_hash inner join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +explain (costs off, verbose on) select * from range_hash inner join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_range left join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_range left join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_range right join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_range right join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_range full join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_range full join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_range inner join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_range inner join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +explain (costs off, verbose on) select * from range_range inner join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +--join range_list and range_hash + +select * from range_list left join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_list left join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_list right join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_list right join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_list full join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_list full join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_list inner join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_list inner join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +explain (costs off, verbose on) select * from range_list inner join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +--join range_hash and range_range + +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +explain (costs off, verbose on) select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +--join range_hash and range_range + +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +explain (costs off, verbose on) select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +drop table list_range_02; +CREATE TABLE IF NOT EXISTS list_range_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) +PARTITION BY list (col_1) SUBPARTITION BY range (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_1_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_1_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_1_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_2 VALUES(1,2,3,4,5,6,7,8,9,10 ), + PARTITION p_list_3 VALUES(11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_range_3_1 VALUES LESS THAN( 15 ), + SUBPARTITION p_range_3_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_4 VALUES(21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_range_4_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_4_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_4_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_4_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_4_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_5 VALUES(31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_range_5_1 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_6 VALUES(41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_range_6_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_6_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_6_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_6_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_6_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_7 VALUES(default) +) ENABLE ROW MOVEMENT; +create index index_01 on list_range_02(col_2) local ; + +INSERT INTO list_range_02 VALUES (GENERATE_SERIES(0, 19),GENERATE_SERIES(0, 1000),GENERATE_SERIES(0, 99)); + explain (costs off, verbose on) select * from list_range_02 where col_2 >500 and col_2 <8000 order by col_1; + +drop index index_01; +drop table list_range_02; + +create table pjade(jid int,jn int,name varchar2)partition by range(jid) subpartition by range(jn) +( + partition hrp1 values less than(16)( + subpartition hrp1_1 values less than(16), + subpartition hrp1_2 values less than(maxvalue)), + partition hrp2 values less than(maxvalue)( + subpartition hrp3_1 values less than(16), + subpartition hrp3_3 values less than(maxvalue)) +); + +create table cjade(jid int,jn int,name varchar2); +insert into pjade values(6,8,'tom'),(8,18,'jerry'),(16,8,'jade'),(18,20,'jack'); +insert into cjade values(6,8,'tom'),(8,18,'jerry'),(16,8,'jade'),(18,20,'jack'); +select * from pjade subpartition(hrp1_1) union select * from cjade order by 1,2,3; +select * from pjade subpartition(hrp1_1) p union select * from cjade order by 1,2,3; +select * from pjade subpartition(hrp1_1) union select * from cjade order by 1,2,3; +select * from pjade subpartition(hrp1_1) p union select * from cjade order by 1,2,3; +drop table pjade; +drop table cjade; + +DROP SCHEMA subpartition_select CASCADE; diff --git a/src/test/regress/sql/hw_subpartition_split.sql b/src/test/regress/sql/hw_subpartition_split.sql new file mode 100644 index 000000000..dc4a9e4ee --- /dev/null +++ b/src/test/regress/sql/hw_subpartition_split.sql @@ -0,0 +1,267 @@ +--prepare +DROP SCHEMA subpartition_split CASCADE; +CREATE SCHEMA subpartition_split; +SET CURRENT_SCHEMA TO subpartition_split; + +--split subpartition +-- list subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '3', '1', 1); +select * from list_list order by 1,2,3,4; + +select * from list_list subpartition (p_201901_a) order by 1,2,3,4; +select * from list_list subpartition (p_201901_b) order by 1,2,3,4; +alter table list_list split subpartition p_201901_b values (2) into +( + subpartition p_201901_b, + subpartition p_201901_c +); +select * from list_list subpartition (p_201901_a) order by 1,2,3,4; +select * from list_list subpartition (p_201901_b) order by 1,2,3,4; +select * from list_list subpartition (p_201901_c) order by 1,2,3,4; + +select * from list_list partition (p_201901); + +select * from list_list subpartition (p_201902_a) order by 1,2,3,4; +select * from list_list subpartition (p_201902_b) order by 1,2,3,4; +alter table list_list split subpartition p_201902_b values (2, 3) into +( + subpartition p_201902_b, + subpartition p_201902_c +); +select * from list_list subpartition (p_201902_a) order by 1,2,3,4; +select * from list_list subpartition (p_201902_b) order by 1,2,3,4; +select * from list_list subpartition (p_201902_c) order by 1,2,3,4; + +--error +alter table list_list split subpartition p_201902_a values (3) into +( + subpartition p_201902_ab, + subpartition p_201902_ac +); + +drop table list_list; + +-- range subpartition +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '3', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '5', '1', 1); +select * from range_range order by 1,2,3,4; + +select * from range_range subpartition (p_201901_a) order by 1,2,3,4; +select * from range_range subpartition (p_201901_b) order by 1,2,3,4; +alter table range_range split subpartition p_201901_b at (3) into +( + subpartition p_201901_c, + subpartition p_201901_d +); +select * from range_range subpartition (p_201901_a) order by 1,2,3,4; +select * from range_range subpartition (p_201901_b) order by 1,2,3,4; +select * from range_range subpartition (p_201901_c) order by 1,2,3,4; +select * from range_range subpartition (p_201901_d) order by 1,2,3,4; + +select * from range_range subpartition (p_201902_a) order by 1,2,3,4; +select * from range_range subpartition (p_201902_b) order by 1,2,3,4; +alter table range_range split subpartition p_201902_b at (3) into +( + subpartition p_201902_c, + subpartition p_201902_d +); +select * from range_range subpartition (p_201902_a) order by 1,2,3,4; +select * from range_range subpartition (p_201902_b) order by 1,2,3,4; +select * from range_range subpartition (p_201902_c) order by 1,2,3,4; +select * from range_range subpartition (p_201902_d) order by 1,2,3,4; + +drop table range_range; +--test syntax +CREATE TABLE IF NOT EXISTS list_hash +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) +PARTITION BY list (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10 ) + ( + SUBPARTITION p_hash_2_1 , + SUBPARTITION p_hash_2_2 , + SUBPARTITION p_hash_2_3 , + SUBPARTITION p_hash_2_4 , + SUBPARTITION p_hash_2_5 + ), + PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20), + PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30 ) + ( + SUBPARTITION p_hash_4_1 + ), + PARTITION p_list_5 VALUES (default) + ( + SUBPARTITION p_hash_5_1 + ), + PARTITION p_list_6 VALUES (31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_hash_6_1 , + SUBPARTITION p_hash_6_2 , + SUBPARTITION p_hash_6_3 + ) +) ENABLE ROW MOVEMENT ; + +alter table list_hash split subPARTITION p_hash_2_3 at(-10) into ( subPARTITION add_p_01 , subPARTITION add_p_02 ); + +drop table list_hash; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +alter table range_range split subpartition p_201901_b values (3) into +( + subpartition p_201901_c, + subpartition p_201901_d +) update global index; +drop table range_range; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +alter table list_list split subpartition p_201901_b at (2, 3) into +( + subpartition p_201901_b, + subpartition p_201901_c +); +drop table list_list; + +CREATE TABLE IF NOT EXISTS list_list_02 +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) +PARTITION BY list (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( 0,-1,-2,-3,-4,-5,-6,-7,-8,-9 ), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_list_2 VALUES(0,1,2,3,4,5,6,7,8,9) + ( + SUBPARTITION p_list_2_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_2_2 VALUES ( default ), + SUBPARTITION p_list_2_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_2_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_2_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_3 VALUES(10,11,12,13,14,15,16,17,18,19) + ( + SUBPARTITION p_list_3_2 VALUES ( default ) + ), + PARTITION p_list_4 VALUES(default ), + PARTITION p_list_5 VALUES(20,21,22,23,24,25,26,27,28,29) + ( + SUBPARTITION p_list_5_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_5_2 VALUES ( default ), + SUBPARTITION p_list_5_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_5_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_5_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_6 VALUES(30,31,32,33,34,35,36,37,38,39), + PARTITION p_list_7 VALUES(40,41,42,43,44,45,46,47,48,49) + ( + SUBPARTITION p_list_7_1 VALUES ( default ) + ) +) ENABLE ROW MOVEMENT; + +alter table list_list_02 split PARTITION for (5) at (8) into ( PARTITION add_p_01 , PARTITION add_p_02 ); +drop table list_list_02; +--clean +DROP SCHEMA subpartition_split CASCADE; diff --git a/src/test/regress/sql/hw_subpartition_truncate.sql b/src/test/regress/sql/hw_subpartition_truncate.sql new file mode 100644 index 000000000..a7a1cdc86 --- /dev/null +++ b/src/test/regress/sql/hw_subpartition_truncate.sql @@ -0,0 +1,70 @@ +--prepare +DROP SCHEMA subpartition_truncate CASCADE; +CREATE SCHEMA subpartition_truncate; +SET CURRENT_SCHEMA TO subpartition_truncate; + +--truncate partition/subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list; + +select * from list_list partition (p_201901); +alter table list_list truncate partition p_201901; +select * from list_list partition (p_201901); + +select * from list_list partition (p_201902); +alter table list_list truncate partition p_201902; +select * from list_list partition (p_201902); +select * from list_list; + +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); + +select * from list_list subpartition (p_201901_a); +alter table list_list truncate subpartition p_201901_a; +select * from list_list subpartition (p_201901_a); + +select * from list_list subpartition (p_201901_b); +alter table list_list truncate subpartition p_201901_b; +select * from list_list subpartition (p_201901_b); + +select * from list_list subpartition (p_201902_a); +alter table list_list truncate subpartition p_201902_a; +select * from list_list subpartition (p_201902_a); + +select * from list_list subpartition (p_201902_b); +alter table list_list truncate subpartition p_201902_b; +select * from list_list subpartition (p_201902_b); + +select * from list_list; + +drop table list_list; +DROP SCHEMA subpartition_truncate CASCADE; diff --git a/src/test/regress/sql/hw_subpartition_update.sql b/src/test/regress/sql/hw_subpartition_update.sql new file mode 100644 index 000000000..878508dc9 --- /dev/null +++ b/src/test/regress/sql/hw_subpartition_update.sql @@ -0,0 +1,150 @@ +--prepare +DROP SCHEMA subpartition_update CASCADE; +CREATE SCHEMA subpartition_update; +SET CURRENT_SCHEMA TO subpartition_update; + +--update +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +)DISABLE ROW MOVEMENT; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); + +select * from range_list order by 1, 2, 3, 4; + +--error +update range_list set month_code = '201903'; +--error +update range_list set dept_code = '2'; + +update range_list set user_no = '2'; +select * from range_list order by 1, 2, 3, 4; + +-- test for upsert and merge into, both should report error +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt=1; + +CREATE TABLE temp_table +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +); +insert into temp_table values('201802', '1', '1', 1), ('201901', '2', '1', 1), ('201702', '1', '1', 1); +MERGE INTO range_list t1 +USING temp_table t2 +ON (t1.dept_code = t2.dept_code) +WHEN MATCHED THEN + UPDATE SET t1.month_code = t2.month_code WHERE t1.dept_code > 1 +WHEN NOT MATCHED THEN + INSERT VALUES (t2.month_code, t2.dept_code, t2.user_no, t2.sales_amt) WHERE t2.sales_amt = 1; + +drop table temp_table; +drop table range_list; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +)ENABLE ROW MOVEMENT; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); + +select * from range_list order by 1, 2, 3, 4; + +select * from range_list subpartition (p_201901_a) order by 1, 2, 3, 4; +select * from range_list subpartition (p_201901_b) order by 1, 2, 3, 4; +update range_list set dept_code = '2' where month_code = '201902'; +select * from range_list subpartition (p_201901_a) order by 1, 2, 3, 4; +select * from range_list subpartition (p_201901_b) order by 1, 2, 3, 4; + +select * from range_list partition (p_201901) order by 1, 2, 3, 4; +select * from range_list partition (p_201902) order by 1, 2, 3, 4; +update range_list set month_code = '201903' where month_code = '201902'; +select * from range_list partition (p_201901) order by 1, 2, 3, 4; +select * from range_list partition (p_201902) order by 1, 2, 3, 4; + +drop table range_list; + +-- FOREIGN KEY +drop table tb_02; +CREATE TABLE tb_02 +( + col_1 int PRIMARY KEY, + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int +); + +drop table range_range_02 cascade; +CREATE TABLE range_range_02 +( + col_1 int , + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int , +FOREIGN KEY(col_1) REFERENCES tb_02(col_1) +) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 80 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( MAXVALUE ) + ) +); + +insert into tb_02 values(0,0,0,0); +insert into range_range_02 values(0,0,0,0); + +update tb_02 set col_1=8 where col_2=0; +drop table range_range_02 cascade; +drop table tb_02; +DROP SCHEMA subpartition_update CASCADE; diff --git a/src/test/regress/sql/hw_subpartition_view.sql b/src/test/regress/sql/hw_subpartition_view.sql new file mode 100644 index 000000000..cbf91f028 --- /dev/null +++ b/src/test/regress/sql/hw_subpartition_view.sql @@ -0,0 +1,126 @@ +-- CREATE partition table +create schema hw_subpartition_view; +set search_path = hw_subpartition_view; +create table tab_interval +( + c1 int, + c2 int, + logdate date not null +) +partition by range (logdate) +INTERVAL ('1 month') +( + PARTITION tab_interval_p0 VALUES LESS THAN ('2020-03-01'), + PARTITION tab_interval_p1 VALUES LESS THAN ('2020-04-01'), + PARTITION tab_interval_p2 VALUES LESS THAN ('2020-05-01') +); + +create index ip_index_local1 on tab_interval (c1) local; +create index gpi_index_test on tab_interval(c2) global; + +-- CREATE subpartition table +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2,3'), + SUBPARTITION p_201902_c values (DEFAULT) + ), + PARTITION p_max VALUES LESS THAN(maxvalue) +); + +create index idx_month_code on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; + +---- +-- owner +---- +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from MY_PART_TABLES where schema = 'hw_subpartition_view'; +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from SYS.MY_PART_TABLES where schema = 'hw_subpartition_view'; +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from MY_TAB_PARTITIONS where schema = 'hw_subpartition_view'; +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from SYS.MY_TAB_PARTITIONS where schema = 'hw_subpartition_view'; +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from MY_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from SYS.MY_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from MY_PART_INDEXES where schema = 'hw_subpartition_view'; +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from SYS.MY_PART_INDEXES where schema = 'hw_subpartition_view'; +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from MY_IND_PARTITIONS where schema = 'hw_subpartition_view'; +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from SYS.MY_IND_PARTITIONS where schema = 'hw_subpartition_view'; +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from MY_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from SYS.MY_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; + +---- +-- others with permission +---- +create user user_spv_authed password 'Gauss@123'; +grant select on range_list to user_spv_authed; +grant select on tab_interval to user_spv_authed; +grant usage on schema sys to user_spv_authed; +set role "user_spv_authed" password 'Gauss@123'; +-- permission denied +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from ADM_PART_TABLES where schema = 'hw_subpartition_view'; +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from ADM_TAB_PARTITIONS where schema = 'hw_subpartition_view'; +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from ADM_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from ADM_PART_INDEXES where schema = 'hw_subpartition_view'; +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from ADM_IND_PARTITIONS where schema = 'hw_subpartition_view'; +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from ADM_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; +-- visible if granted for DB_xxx views +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from DB_PART_TABLES where schema = 'hw_subpartition_view'; +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from DB_TAB_PARTITIONS where schema = 'hw_subpartition_view'; +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from DB_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from DB_PART_INDEXES where schema = 'hw_subpartition_view'; +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from DB_IND_PARTITIONS where schema = 'hw_subpartition_view'; +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from DB_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; +-- nothing for this guy's entry +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from MY_PART_TABLES where schema = 'hw_subpartition_view'; +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from MY_TAB_PARTITIONS where schema = 'hw_subpartition_view'; +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from MY_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from MY_PART_INDEXES where schema = 'hw_subpartition_view'; +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from MY_IND_PARTITIONS where schema = 'hw_subpartition_view'; +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from MY_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; +-- recover +reset role; + +---- +-- others without permission +---- +create user user_spv_notauthed password 'Gauss@123'; +set role "user_spv_notauthed" password 'Gauss@123'; +-- permission denied +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from ADM_PART_TABLES where schema = 'hw_subpartition_view'; +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from ADM_TAB_PARTITIONS where schema = 'hw_subpartition_view'; +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from ADM_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from ADM_PART_INDEXES where schema = 'hw_subpartition_view'; +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from ADM_IND_PARTITIONS where schema = 'hw_subpartition_view'; +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from ADM_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; +-- empty +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from DB_PART_TABLES where schema = 'hw_subpartition_view'; +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from DB_TAB_PARTITIONS where schema = 'hw_subpartition_view'; +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from DB_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from DB_PART_INDEXES where schema = 'hw_subpartition_view'; +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from DB_IND_PARTITIONS where schema = 'hw_subpartition_view'; +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from DB_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; +-- mpty +select table_name, partitioning_type, partition_count, partitioning_key_count, def_tablespace_name, schema, subpartitioning_type, def_subpartition_count, subpartitioning_key_count from MY_PART_TABLES where schema = 'hw_subpartition_view'; +select table_name, partition_name, high_value, tablespace_name , schema, subpartition_count, high_value_length from MY_TAB_PARTITIONS where schema = 'hw_subpartition_view'; +select table_name, partition_name, subpartition_name, high_value, tablespace_name, schema, high_value_length from MY_TAB_SUBPARTITIONS where schema = 'hw_subpartition_view'; +select def_tablespace_name, index_name, partition_count, partitioning_key_count, partitioning_type, schema, table_name , subpartitioning_type, def_subpartition_count, subpartitioning_key_count from MY_PART_INDEXES where schema = 'hw_subpartition_view'; +select index_name, partition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from MY_IND_PARTITIONS where schema = 'hw_subpartition_view'; +select index_name, partition_name, subpartition_name, def_tablespace_name, high_value, index_partition_usable, schema, high_value_length from MY_IND_SUBPARTITIONS where schema = 'hw_subpartition_view'; +-- recover +reset role; + +drop schema hw_subpartition_view cascade; \ No newline at end of file diff --git a/src/test/regress/sql/hw_to_timestamp.sql b/src/test/regress/sql/hw_to_timestamp.sql index 4f5979e5a..bee0e0c45 100644 --- a/src/test/regress/sql/hw_to_timestamp.sql +++ b/src/test/regress/sql/hw_to_timestamp.sql @@ -209,9 +209,6 @@ SELECT TO_CHAR(TO_DATE('27/OCT/17', 'DD-MON-RR') ,'DD') "Year"; SELECT to_date('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 11111'); SELECT to_date('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 11111'); -/*---------------------------------------------------------------------- -DTS2021082425612: support format "FF1"、"FF2"、"FF3"、"FF4"、"FF5"、"FF6" -----------------------------------------------------------------------*/ SELECT TO_CHAR(timestamp '2021-08-30 21:21:55.535744', 'YYYYMMDDHH24MISSFF1'); SELECT TO_CHAR(timestamp '2021-08-30 21:21:55.535744', 'YYYYMMDDHH24MISSFF2'); SELECT TO_CHAR(timestamp '2021-08-30 21:21:55.535744', 'YYYYMMDDHH24MISSFF3'); @@ -240,18 +237,12 @@ SELECT TO_CHAR(timestamptz '2021-08-30 21:21:55.535744+08', 'yyyymmddhh24missff4 SELECT TO_CHAR(timestamptz '2021-08-30 21:21:55.535744+08', 'yyyymmddhh24missff5'); SELECT TO_CHAR(timestamptz '2021-08-30 21:21:55.535744+08', 'yyyymmddhh24missff6'); -/*---------------------------------- -DTS2021083017908: support format "X" -----------------------------------*/ SELECT TO_CHAR(timestamp '2021-08-30 21:21:55.535744', 'YYYYMMDDHH24MISSXFF'); SELECT TO_CHAR(timestamptz '2021-08-30 21:21:55.535744+08', 'YYYYMMDDHH24MISSXFF'); SELECT TO_CHAR(timestamp '2021-08-30 21:21:55.535744', 'yyyymmddhh24missxff'); SELECT TO_CHAR(timestamptz '2021-08-30 21:21:55.535744+08', 'yyyymmddhh24missxff'); -/*---------------------------------------------- -DTS2017031602663: support timezone Asia/Beijing -----------------------------------------------*/ set timezone='Asia/Beijing'; select extract(timezone from now()); diff --git a/src/test/regress/sql/index_advisor.sql b/src/test/regress/sql/index_advisor.sql index c4e525bef..f7707c70b 100644 --- a/src/test/regress/sql/index_advisor.sql +++ b/src/test/regress/sql/index_advisor.sql @@ -9,34 +9,175 @@ ANALYZE t1; CREATE TABLE t2 (col1 int, col2 int); INSERT INTO t2 VALUES(generate_series(1, 1000),generate_series(1, 1000)); ANALYZE t2; +CREATE TEMP TABLE mytemp1 (col1 int, col2 int, col3 text); +INSERT INTO mytemp1 VALUES(generate_series(1, 3000),generate_series(1, 3000),repeat( chr(int4(random()*26)+65),4)); +ANALYZE mytemp1; ---single query --test where -SELECT * FROM gs_index_advise('SELECT * FROM t1 WHERE col1 = 10'); +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT * FROM t1 WHERE col1 = 10') as a; --test join -SELECT * FROM gs_index_advise('SELECT * FROM t1 join t2 on t1.col1 = t2.col1'); +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT * FROM t1 join t2 on t1.col1 = t2.col1') as a; --test multi table -SELECT * FROM gs_index_advise('SELECT count(*), t2.col1 FROM t1 join t2 on t1.col2 = t2.col2 WHERE t2.col2 > 2 GROUP BY t2.col1 ORDER BY t2.col1'); +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT count(*), t2.col1 FROM t1 join t2 on t1.col2 = t2.col2 WHERE t2.col2 > 2 GROUP BY t2.col1 ORDER BY t2.col1') as a; --test order by -SELECT * FROM gs_index_advise('SELECT * FROM t1 ORDER BY 2'); -SELECT * FROM gs_index_advise('SELECT * FROM t1 as a WHERE a.col2 in (SELECT col1 FROM t2 ORDER BY 1) ORDER BY 2'); -SELECT * FROM gs_index_advise('SELECT * FROM t1 WHERE col1 > 10 ORDER BY 1,col2'); -SELECT * FROM gs_index_advise('SELECT *, *FROM t1 ORDER BY 2, 4'); -SELECT * FROM gs_index_advise('SELECT *, col2 FROM t1 ORDER BY 1, 3'); +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT * FROM t1 ORDER BY 2') as a; +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT * FROM t1 as a WHERE a.col2 in (SELECT col1 FROM t2 ORDER BY 1) ORDER BY 2') as a; +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT * FROM t1 WHERE col1 > 10 ORDER BY 1,col2') as a; +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT *, *FROM t1 ORDER BY 2, 4') as a; +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT *, col2 FROM t1 ORDER BY 1, 3') as a; --test string overlength -SELECT * FROM gs_index_advise('SELECT * FROM t1 where col3 in (''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'',''bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'',''ccccccccccccccccccccccccccccccccccccccc'',''ddddddddddddddddddddddddddddddddddddddd'',''ffffffffffffffffffffffffffffffffffffffff'',''ggggggggggggggggggggggggggggggggggggggggggggggggggg'',''ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt'',''vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv'',''ggmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm'')'); +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT * FROM t1 where col3 in (''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'',''bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'',''ccccccccccccccccccccccccccccccccccccccc'',''ddddddddddddddddddddddddddddddddddddddd'',''ffffffffffffffffffffffffffffffffffffffff'',''ggggggggggggggggggggggggggggggggggggggggggggggggggg'',''ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt'',''vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv'',''ggmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm'')') as a; +--test union all +SELECT a.schema, a.table, a.column FROM gs_index_advise('select * from ((select col1, col2 from t1 where col1=1) union all (select col1, col2 from t2 where col1=1))') as a; +--test insert +SELECT a.schema, a.table, a.column FROM gs_index_advise('INSERT INTO t2 (SELECT col1, col2 from t1 where col1=1)') as a; +--test delete +SELECT a.schema, a.table, a.column FROM gs_index_advise('DELETE FROM t1 where col1 > (SELECT COUNT(*) from t1 where col1<1000)') as a; +--test update +SELECT a.schema, a.table, a.column FROM gs_index_advise('UPDATE t1 SET col1=(SELECT col2 from t2 where col1=10)') as a; +--test nested select +SELECT a.schema, a.table, a.column FROM gs_index_advise('select count(*) from (select t1.col1, t2.col2 from t1 join t2 on t1.col1 = t2.col1)') as a; +--test temp table +SELECT a.schema, a.table, a.column FROM gs_index_advise('SELECT * FROM mytemp1 WHERE col1 = 10') as a; +--test complex sql +SELECT a.schema, a.table, a.column FROM gs_index_advise('select * from ((select t1.col1, t2.col2 from t1 join t2 on t1.col1 = t2.col1) union all (select col1, col2 from t1 where col1=col2 and col2>200 and col3 in (''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'',''bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'',''ccccccccccccccccccccccccccccccccccccccc'',''ddddddddddddddddddddddddddddddddddddddd'',''ffffffffffffffffffffffffffffffffffffffff'',''ggggggggggggggggggggggggggggggggggggggggggggggggggg'',''ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt'',''vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv'',''ggmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm'') order by col3)) order by col2 limit 10') as a; +SELECT a.schema, a.table, a.column FROM gs_index_advise('select * from ((SELECT t1.col1, t1.col2 from t1 where col1=col2 and col1<99) UNION ALL (select col2, col1 from t1 where col1=col2 and col2>200 order by col1 DESC)) as t3 join t2 on t3.col1 = t2.col1 where t2.col2=4 and t3.col1<100 and t2.col1=4 order by t3.col1, t2.col2 DESC limit 100') as a; + ---virtual index --test hypopg_create_index SELECT * FROM hypopg_create_index('CREATE INDEX ON t1(col1)'); ---test hypopg_display_index +SELECT * FROM hypopg_create_index('CREATE INDEX ON t2(col1)'); +SELECT * FROM hypopg_create_index('SELECT * from t1'); +SELECT * FROM hypopg_create_index('UPDATE t2 SET col1=(SELECT col2 from t2 where col1=10)'); +SELECT * FROM hypopg_create_index('DELETE from t2 where col1 <10'); +SELECT * FROM hypopg_create_index('INSERT INTO t2 VALUES(generate_series(1001, 2000),generate_series(1001, 2000))'); +--test explain set enable_hypo_index = on;explain SELECT * FROM t1 WHERE col1 = 100; ---test hypopg_drop_index +explain UPDATE t1 SET col1=0 where col1=2; +explain UPDATE t1 SET col1=(SELECT col2 from t2 where col1=10); +explain INSERT INTO t1 SELECT * from t1 where col1=10; +explain DELETE FROM t1 where col1 > (SELECT COUNT(*) from t1 where col1<1000); + +--test partition table +create table range_part_a( +stu_id varchar2(100), +stu_name varchar2(100), +sex varchar2(1), +credit integer default 0 +)partition by range (credit) +(partition p_range_1 values less than (60), +partition p_range_2 values less than (120), +partition p_range_3 values less than (180), +partition p_range_4 values less than (240), +partition p_range_6 values less than (maxvalue) +); +create table range_part_b( +stu_id varchar2(100), +stu_name varchar2(100), +sex varchar2(1), +credit integer default 0 +)partition by range (credit) +(partition p_range_1 values less than (60), +partition p_range_2 values less than (120), +partition p_range_3 values less than (180), +partition p_range_4 values less than (240), +partition p_range_6 values less than (maxvalue) +); +CREATE TABLE range_subpart_a( +col_1 int, +col_2 int, +col_3 VARCHAR2 ( 30 ) , +col_4 int +)PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +(PARTITION p_range_1 VALUES LESS THAN( 1000 ) + (SUBPARTITION p_range_1_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), +PARTITION p_range_2 VALUES LESS THAN( 2001 ) + (SUBPARTITION p_range_2_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( MAXVALUE ) + ) +); +CREATE TABLE range_subpart_b( +col_1 int, +col_2 int, +col_3 VARCHAR2 ( 30 ) , +col_4 int +)PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +(PARTITION p_range_1 VALUES LESS THAN( 1000 ) + (SUBPARTITION p_range_1_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), +PARTITION p_range_2 VALUES LESS THAN( 30001 ) + (SUBPARTITION p_range_2_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( MAXVALUE ) + ) +); +INSERT INTO range_part_a VALUES(repeat( chr(int4(random()*26)+65),4),repeat( chr(int4(random()*26)+65),4),repeat( chr(int4(random()*26)+65),1),generate_series(1, 2000)); +ANALYZE range_part_a; +INSERT INTO range_part_b VALUES(repeat( chr(int4(random()*26)+65),4),repeat( chr(int4(random()*26)+65),4),repeat( chr(int4(random()*26)+65),1),generate_series(1, 3000)); +ANALYZE range_part_b; +INSERT INTO range_subpart_a VALUES(generate_series(1, 2000),generate_series(1, 2000),repeat( chr(int4(random()*26)+65),1),generate_series(1, 2000)); +ANALYZE range_subpart_a; +INSERT INTO range_subpart_b VALUES(generate_series(1, 3000),generate_series(1, 3000),repeat( chr(int4(random()*26)+65),1),generate_series(1, 3000)); +ANALYZE range_subpart_b; + +--single query +--test syntax error +select * from gs_index_advise('select * from range_part_a as a where a.stu_id in (select stu_id from range_part_a order by 4)order by 4'); + +--test local index +--partion +select * from gs_index_advise('select * from range_part_a where credit = 4'); +select * from gs_index_advise('select * from range_part_a where stu_id = ''10'' and credit = 4'); +select * from gs_index_advise('select * from range_part_a partition(p_range_1) where stu_id = ''10'''); +--subpartition +select * from gs_index_advise('select * from range_subpart_a partition(p_range_1) where col_2 = 2'); +select * from gs_index_advise('select * from range_subpart_a subpartition(p_range_1_1 ) where col_3 =''2'''); +select * from gs_index_advise('select * from range_subpart_a where col_1 =2 and col_2 = 3'); + +--test global index +--partion +select * from gs_index_advise('select * from range_part_a where stu_id = ''10'''); +--subpartition +select * from gs_index_advise('select * from range_subpart_a where col_1 = 10'); + + +--test subquery +--partition +select * from gs_index_advise('select * from range_part_a where stu_id = (select stu_id from range_part_a where stu_id=''10'') and credit = 2'); +--subpartition +select * from gs_index_advise('select * from range_subpart_a where col_1 = (select col_2 from range_part_a where col_3=''10'') and col_2 = 2'); + +--test join +--partition +select * from gs_index_advise('select * from range_part_a join range_part_b on range_part_b.credit = range_part_a.credit where range_part_a.stu_id = ''12'''); +select * from gs_index_advise('select * from range_part_a join range_part_b partition(p_range_1) on range_part_a.stu_id = range_part_b.stu_id where range_part_a.stu_id = ''12'''); +select * from gs_index_advise('select * from range_part_a partition(p_range_1) join range_part_b partition(p_range_1) on range_part_a.stu_id = range_part_b.stu_id where range_part_a.stu_id = ''12'''); +--subpartition +select * from gs_index_advise('select * from range_subpart_a join range_subpart_b on range_subpart_b.col_2 = range_subpart_a.col_2 where range_subpart_a.col_3 = ''12'''); +select * from gs_index_advise('select * from range_part_a join range_subpart_b on range_part_a.credit = range_subpart_b.col_2 where range_subpart_b.col_3 = ''12'''); +select * from gs_index_advise('select * from range_subpart_a partition(p_range_1) join range_subpart_b subpartition(p_range_1_1) on range_subpart_a.col_3 = range_subpart_b.col_3 where range_subpart_a.col_3 = ''12'''); + +--virtual index +select * from hypopg_create_index('create index on range_part_a(credit) local'); +select * from hypopg_create_index('create index on range_subpart_a(col_2) local'); +explain select * from range_part_a where stu_id = '10' and credit = 2; +select * from hypopg_create_index('create index on range_part_a(credit)'); +select * from hypopg_create_index('create index on range_subpart_a(col_2)'); + +--test hypopg_display_index SELECT * FROM hypopg_display_index(); --test hypopg_reset_index SELECT * FROM hypopg_reset_index(); DROP TABLE t1; DROP TABLE t2; +DROP TABLE range_part_a; +DROP TABLE range_part_b; +DROP TABLE range_subpart_a; +DROP TABLE range_subpart_b; \c regression; drop database IF EXISTS pl_test_ind_adv; diff --git a/src/test/regress/sql/int16.sql b/src/test/regress/sql/int16.sql new file mode 100644 index 000000000..5cafa3b6a --- /dev/null +++ b/src/test/regress/sql/int16.sql @@ -0,0 +1,97 @@ +-- +-- INT16 +-- Test int16 128-bit integers. +-- +CREATE SCHEMA schema_int16; +SET search_path = schema_int16; +SET ENABLE_BETA_FEATURES = OFF; + +-- should fail +CREATE TABLE INT16_TBL(q1 int16, q2 int16, q3 serial); + +SET ENABLE_BETA_FEATURES = ON; +CREATE TABLE INT16_TBL(q1 int16, q2 int16, q3 serial); + + +-- do not support create btree index on int16 for now +CREATE INDEX int16idx on INT16_TBL(q1); + +INSERT INTO INT16_TBL VALUES(' 123 ',' 456'); +INSERT INTO INT16_TBL VALUES('456 ','12345678901234567890123456789'); +INSERT INTO INT16_TBL VALUES('123456789012345678901234567890','123'); +INSERT INTO INT16_TBL VALUES(+1234567890123456789012345678901,'12345678901234567890123456789012'); +INSERT INTO INT16_TBL VALUES('+123456789012345678901234567890123','-1234567890123456789012345678901234'); + +-- test boundary +INSERT INTO INT16_TBL VALUES(170141183460469231731687303715884105727, -170141183460469231731687303715884105728); +INSERT INTO INT16_TBL VALUES(170141183460469231731687303715884105728, 0); +INSERT INTO INT16_TBL VALUES(0, -170141183460469231731687303715884105729); + +-- bad inputs +INSERT INTO INT16_TBL(q1) VALUES (' '); +INSERT INTO INT16_TBL(q1) VALUES ('xxx'); +INSERT INTO INT16_TBL(q1) VALUES ('4321170141183460469231731687303715884105727'); +INSERT INTO INT16_TBL(q1) VALUES ('-143170141183460469231731687303715884105727'); +INSERT INTO INT16_TBL(q1) VALUES ('- 123'); +INSERT INTO INT16_TBL(q1) VALUES (' 345 5'); +INSERT INTO INT16_TBL(q1) VALUES (''); + +-- do not support order for now +SELECT * FROM INT16_TBL ORDER BY q1,q2; + +-- support compare +SELECT count(*) FROM INT16_TBL WHERE q2 = q2 + 1; +SELECT count(*) FROM INT16_TBL WHERE q2 <> q2 - 1; +SELECT count(*) FROM INT16_TBL WHERE q2 < q2 * 1; +SELECT count(*) FROM INT16_TBL WHERE q2 <= q2 / 1; +SELECT count(*) FROM INT16_TBL WHERE q2 <= q2; +SELECT count(*) FROM INT16_TBL WHERE q2 > q2 + 1; +SELECT count(*) FROM INT16_TBL WHERE q2 >= q2 + 1; + +-- support type casts +\dC int16 +SELECT CAST(q1 AS int2) FROM INT16_TBL WHERE q3 < 3 ORDER BY q3; +SELECT CAST(q1 AS int4) FROM INT16_TBL WHERE q3 < 3 ORDER BY q3; +SELECT CAST(q1 AS int8) FROM INT16_TBL WHERE q3 < 3 ORDER BY q3; +SELECT CAST(q1 AS numeric) FROM INT16_TBL ORDER BY q3; +SELECT CAST(q1 AS float4) FROM INT16_TBL ORDER BY q3; +SELECT CAST(q1 AS float8) FROM INT16_TBL ORDER BY q3; +SELECT CAST(q1 AS boolean) FROM INT16_TBL ORDER BY q3; + + +-- some may overflow +SELECT CAST(q1 AS int2) FROM INT16_TBL ORDER BY q3; +SELECT CAST(q1 AS int4) FROM INT16_TBL ORDER BY q3; +SELECT CAST(q1 AS int8) FROM INT16_TBL ORDER BY q3; +SELECT CAST(q1 AS oid) FROM INT16_TBL ORDER BY q3; + +-- cast to int16 +CREATE TABLE TEST_TBL( + v1 bigint, + v2 boolean, + v3 double precision, + v4 integer, + v5 numeric, + v6 oid, + v7 real, + v8 smallint, + v9 tinyint); + +INSERT INTO TEST_TBL VALUES(1, 2, 3, 4, 5, 6, 7, 8, 9); + +CREATE TABLE CAST_TBL( + v1 int16, + v2 int16, + v3 int16, + v4 int16, + v5 int16, + v6 int16, + v7 int16, + v8 int16, + v9 int16); + +INSERT INTO CAST_TBL SELECT * FROM TEST_TBL; + +SELECT * FROM CAST_TBL; + +DROP SCHEMA schema_int16 CASCADE; \ No newline at end of file diff --git a/src/test/regress/sql/large_sequence.sql b/src/test/regress/sql/large_sequence.sql new file mode 100644 index 000000000..495dc42d5 --- /dev/null +++ b/src/test/regress/sql/large_sequence.sql @@ -0,0 +1,346 @@ +CREATE SCHEMA large_sequence; +SET CURRENT_SCHEMA = large_sequence; + +-- test psql support +CREATE SEQUENCE S1; +CREATE LARGE SEQUENCE S2; + +\d + +\ds + +\d S1 + +\d S2 + +COMMENT ON LARGE SEQUENCE S2 IS 'FOO'; +COMMENT ON LARGE SEQUENCE S2 IS NULL; + +-- temp sequences not support +CREATE TEMP LARGE SEQUENCE myseq2; +CREATE TEMP LARGE SEQUENCE myseq3; + +-- default no cache, no start, no cycle +CREATE LARGE SEQUENCE S +INCREMENT 17014118346046923173168730371588410572 +MINVALUE 17014118346046923173168730371588410573 +MAXVALUE 170141183460469231731687303715884105721; + +-- basic api +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); + +SELECT * FROM setval('s', 17014118346046923173168730371588410573); + +SELECT * FROM nextval('s'); + +SELECT * FROM lastval(); + +SELECT * FROM currval('s'); + +SELECT * FROM setval('s', 17014118346046923173168730371588410573, FALSE); + +SELECT * FROM setval('s'::text, 17014118346046923173168730371588410573); + +SELECT * FROM setval('s'::text, 17014118346046923173168730371588410573, FALSE); + +SELECT * FROM nextval('s'::text); + +SELECT * FROM currval('s'::text); + +SELECT * FROM setval('s'::regclass, 17014118346046923173168730371588410573); + +SELECT * FROM setval('s'::regclass, 17014118346046923173168730371588410573, FALSE); + +SELECT * FROM nextval('s'::regclass); + +SELECT * FROM currval('s'::regclass); + +-- needs drop large sequence +DROP SEQUENCE S; +DROP LARGE SEQUENCE S; + +-- cycle +CREATE LARGE SEQUENCE S +INCREMENT 17014118346046923173168730371588410572 +MINVALUE 17014118346046923173168730371588410573 +MAXVALUE 51042355038140769519506191114765231717 +CYCLE; + +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); + +DROP LARGE SEQUENCE S; + +-- cache +CREATE LARGE SEQUENCE S +INCREMENT 17014118346046923173168730371588410572 +MINVALUE 17014118346046923173168730371588410573 +MAXVALUE 170141183460469231731687303715884105721 +CACHE 5; + +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); +SELECT * FROM nextval('s'); + +DROP LARGE SEQUENCE S; + +-- start with +CREATE LARGE SEQUENCE S +INCREMENT -17014118346046923173168730371588410572 +MINVALUE 17014118346046923173168730371588410573 +MAXVALUE 170141183460469231731687303715884105721 +START WITH 170141183460469231731687303715884105720 +CACHE 5; + +DROP LARGE SEQUENCE S; + +CREATE LARGE SEQUENCE S +INCREMENT 100000000000000000000000000000000000 +MINVALUE 100000000000000000000000000000000000 +MAXVALUE 100000000000000000000000000000000000000; + +-- can create sequence with default nextval() +CREATE TABLE TAB_SEQ(c1 numeric, c2 numeric default nextval('S'), c3 serial); +INSERT INTO TAB_SEQ VALUES(0); +INSERT INTO TAB_SEQ VALUES(0); +INSERT INTO TAB_SEQ VALUES(0); +INSERT INTO TAB_SEQ VALUES(0); +INSERT INTO TAB_SEQ VALUES(0); +INSERT INTO TAB_SEQ VALUES(0); + +SELECT * FROM TAB_SEQ ORDER BY c3; + +\d TAB_SEQ +-- cannot drop +DROP LARGE SEQUENCE S; + +DROP LARGE SEQUENCE S CASCADE; + +-- default value is dropped accordingly +\d TAB_SEQ + +-- alter sequence +CREATE LARGE SEQUENCE foo; + +-- rename not supported +ALTER LARGE SEQUENCE foo RENAME TO bar; + +SELECT * FROM foo; + +-- alter maxvalue - ok +ALTER LARGE SEQUENCE foo MAXVALUE 1000; + +-- alter owner role -ok +CREATE ROLE role_foo PASSWORD '!@#123qwe'; +ALTER LARGE SEQUENCE foo OWNER TO role_foo; + +-- alter owner column - fail if owners are different +CREATE TABLE tab_foo (a bigint); +ALTER LARGE SEQUENCE foo OWNED BY tab_foo.a; +DROP LARGE SEQUENCE IF EXISTS foo; +CREATE LARGE SEQUENCE foo; +ALTER LARGE SEQUENCE IF EXISTS foo OWNED BY tab_foo.a; + +-- owner column set OK +DROP TABLE tab_foo; + +-- alter if exitsts works +ALTER LARGE SEQUENCE IF EXISTS foo MAXVALUE 100; +CREATE LARGE SEQUENCE foo INCREMENT 10 CYCLE; +ALTER LARGE SEQUENCE IF EXISTS foo MAXVALUE 30; + +SELECT * FROM nextval('foo'); +SELECT * FROM nextval('foo'); +SELECT * FROM nextval('foo'); +SELECT * FROM nextval('foo'); + +ALTER LARGE SEQUENCE IF EXISTS foo NO MAXVALUE; +SELECT * FROM nextval('foo'); +SELECT * FROM nextval('foo'); +SELECT * FROM nextval('foo'); +SELECT * FROM nextval('foo'); + +ALTER LARGE SEQUENCE IF EXISTS foo NOMAXVALUE; + +-- alter other attributes are not supported +ALTER LARGE SEQUENCE IF EXISTS foo MINVALUE 1; +ALTER LARGE SEQUENCE IF EXISTS foo NO CYCLE; +ALTER LARGE SEQUENCE IF EXISTS foo START 1; +ALTER LARGE SEQUENCE IF EXISTS foo CACHE 100; + +-- test for largeserial +CREATE TABLE serialTest (f1 text, f2 largeserial); +INSERT INTO serialTest VALUES ('foo'); +INSERT INTO serialTest VALUES ('bar'); +INSERT INTO serialTest VALUES ('force', 17014118346046923173168730371588410573); +INSERT INTO serialTest VALUES ('wrong', NULL); +SELECT * FROM serialTest; + +CREATE TABLE serialTest2 (f1 text, f2 serial, f3 smallserial, f4 serial2, + f5 bigserial, f6 serial8, f7 largeserial, f8 serial16); +INSERT INTO serialTest2 (f1) + VALUES ('test_defaults'); +INSERT INTO serialTest2 (f1, f2, f3, f4, f5, f6, f7, f8) + VALUES ('test_max_vals', 2147483647, 32767, 32767, 9223372036854775807, + 9223372036854775807, 170141183460469231731687303715884105727, 170141183460469231731687303715884105727), + ('test_min_vals', -2147483648, -32768, -32768, -9223372036854775808, + -9223372036854775808, -170141183460469231731687303715884105728, -170141183460469231731687303715884105728); + +INSERT INTO serialTest2 (f1, f7) + VALUES ('bogus', -170141183460469231731687303715884105729); + +INSERT INTO serialTest2 (f1, f8) + VALUES ('bogus', -170141183460469231731687303715884105729); + +INSERT INTO serialTest2 (f1, f7) + VALUES ('bogus', 170141183460469231731687303715884105728); + +INSERT INTO serialTest2 (f1, f8) + VALUES ('bogus', 170141183460469231731687303715884105728); + +SELECT * FROM serialTest2 ORDER BY f2 ASC; + +SELECT nextval('serialTest2_f2_seq'); +SELECT nextval('serialTest2_f3_seq'); +SELECT nextval('serialTest2_f4_seq'); +SELECT nextval('serialTest2_f5_seq'); +SELECT nextval('serialTest2_f6_seq'); +SELECT nextval('serialTest2_f7_seq'); +SELECT nextval('serialTest2_f8_seq'); + +-- Create table like +CREATE TABLE cat (like serialTest2); +INSERT INTO cat (f1) + VALUES ('eins'); +INSERT INTO cat (f1) + VALUES ('zwei'); +INSERT INTO cat (f1) + VALUES ('drei'); +INSERT INTO cat (f1) + VALUES ('funf'); +SELECT * FROM cat; + +-- renaming serial sequences +ALTER TABLE serialtest_f7_seq RENAME TO serialtest_f7_foo; +INSERT INTO serialTest VALUES ('more'); +SELECT * FROM serialTest; + +-- Check dependencies of serial and ordinary sequences +CREATE LARGE SEQUENCE myseq2; +CREATE LARGE SEQUENCE myseq3; +-- Cannot have type cast in nextval's argument +CREATE TABLE t1 ( + f1 serial, + f2 numeric DEFAULT nextval('myseq2'), + f3 numeric DEFAULT nextval('myseq3'::text) +); + +CREATE TABLE t1 ( + f1 largeserial, + f2 numeric DEFAULT nextval('myseq2'), + f3 numeric DEFAULT nextval('myseq3') +); + +-- Both drops should fail +DROP LARGE SEQUENCE t1_f1_seq; +DROP LARGE SEQUENCE myseq2; +DROP TABLE t1; +-- Fails because no longer existent: +DROP SEQUENCE t1_f1_seq; +DROP LARGE SEQUENCE myseq2; + +-- Information schema do not support large sequence for now +SELECT * FROM information_schema.sequences WHERE sequence_name in ('myseq3'); + +-- Privilige check +CREATE LARGE SEQUENCE priv_seq; +CREATE ROLE zeus PASSWORD '123qwe!@#'; + +GRANT SELECT ON priv_seq TO zeus; +GRANT ALL ON SCHEMA large_sequence TO zeus; +SET ROLE zeus PASSWORD '123qwe!@#'; +SELECT * FROM priv_seq; +\d priv_seq +SELECT nextval('priv_seq'); +ALTER LARGE SEQUENCE priv_seq MAXVALUE 100; +COMMENT ON LARGE SEQUENCE priv_seq IS 'FOO'; +DROP LARGE SEQUENCE priv_seq; + +RESET ROLE; +GRANT UPDATE ON priv_seq TO zeus; +SET ROLE zeus PASSWORD '123qwe!@#'; +SELECT nextval('priv_seq'); +ALTER LARGE SEQUENCE priv_seq MAXVALUE 100; +COMMENT ON LARGE SEQUENCE priv_seq IS 'FOO'; +DROP LARGE SEQUENCE priv_seq; + +RESET ROLE; +GRANT USAGE ON priv_seq TO zeus; +SET ROLE zeus PASSWORD '123qwe!@#'; +ALTER LARGE SEQUENCE priv_seq MAXVALUE 100; +COMMENT ON LARGE SEQUENCE priv_seq IS 'FOO'; +DROP LARGE SEQUENCE priv_seq; + +RESET ROLE; +GRANT ALTER ON priv_seq TO zeus; +SET ROLE zeus PASSWORD '123qwe!@#'; +ALTER LARGE SEQUENCE priv_seq MAXVALUE 100; +COMMENT ON LARGE SEQUENCE priv_seq IS 'FOO'; +DROP LARGE SEQUENCE priv_seq; + +RESET ROLE; +GRANT COMMENT ON priv_seq TO zeus; +SET ROLE zeus PASSWORD '123qwe!@#'; +COMMENT ON LARGE SEQUENCE priv_seq IS 'FOO'; +DROP LARGE SEQUENCE priv_seq; + +RESET ROLE; +GRANT DROP ON priv_seq TO zeus; +SET ROLE zeus PASSWORD '123qwe!@#'; +DROP LARGE SEQUENCE priv_seq; + +RESET ROLE; +CREATE SCHEMA seq_priv_schema; +CREATE LARGE SEQUENCE seq_priv_schema.priv_seq; +GRANT ALL ON SCHEMA seq_priv_schema TO zeus; +GRANT ALL ON ALL SEQUENCES IN SCHEMA seq_priv_schema TO zeus; +SET ROLE zeus PASSWORD '123qwe!@#'; +SET current_schema = seq_priv_schema; +SELECT * FROM priv_seq; +\d priv_seq +SELECT nextval('priv_seq'); +ALTER LARGE SEQUENCE priv_seq MAXVALUE 100; +COMMENT ON LARGE SEQUENCE priv_seq IS 'FOO'; +DROP LARGE SEQUENCE priv_seq; + +RESET ROLE; +DROP SCHEMA seq_priv_schema CASCADE; +REVOKE ALL ON SCHEMA large_sequence FROM zeus; +DROP ROLE zeus; + +SET current_schema = large_sequence; +create table ctest (a int) with (orientation=column); +create user ctest_user password 'huawei@123'; +alter table ctest owner to "ctest_user"; +drop user ctest_user cascade; + +DROP ROLE role_foo; +DROP SCHEMA large_sequence CASCADE; \ No newline at end of file diff --git a/src/test/regress/sql/leaky_function_operator.sql b/src/test/regress/sql/leaky_function_operator.sql index 5223f2ecc..a82c36f81 100644 --- a/src/test/regress/sql/leaky_function_operator.sql +++ b/src/test/regress/sql/leaky_function_operator.sql @@ -1,22 +1,22 @@ -- test leaky-function protections in selfuncs -- regress_user1 will own a table and provide a view for it. -grant all on schema public to public; create user regress_user1 password 'gauss@123'; create user regress_user2 password 'gauss@123'; SET SESSION AUTHORIZATION regress_user1 password 'gauss@123'; -CREATE TABLE public.atest12 as SELECT x AS a, 10001 - x AS b FROM generate_series(1,10000) x; -CREATE INDEX ON public.atest12 (a); -CREATE INDEX ON public.atest12 (abs(a)); -VACUUM ANALYZE public.atest12; +CREATE TABLE atest12 as SELECT x AS a, 10001 - x AS b FROM generate_series(1,10000) x; +CREATE INDEX ON atest12 (a); +CREATE INDEX ON atest12 (abs(a)); +VACUUM ANALYZE atest12; +GRANT USAGE ON SCHEMA regress_user1 TO regress_user2; -- Check if regress_user2 can break security. SET SESSION AUTHORIZATION regress_user2 password 'gauss@123'; -CREATE FUNCTION public.leak20(integer,integer) RETURNS boolean AS $$begin raise notice 'leak % %', $1, $2; return $1 > $2; end$$ LANGUAGE plpgsql immutable; -CREATE OPERATOR >>>> (procedure = public.leak20, leftarg = integer, rightarg = integer,restrict = scalargtsel); +CREATE FUNCTION leak20(integer,integer) RETURNS boolean AS $$begin raise notice 'leak % %', $1, $2; return $1 > $2; end$$ LANGUAGE plpgsql immutable; +CREATE OPERATOR >>>> (procedure = leak20, leftarg = integer, rightarg = integer,restrict = scalargtsel); -- This should not show any "leak" notices before failing. -EXPLAIN (COSTS OFF) SELECT * FROM public.atest12 ; +EXPLAIN (COSTS OFF) SELECT * FROM regress_user1.atest12; -- This should not show any "leak" notices before failing.(After Patch) -EXPLAIN (COSTS OFF) SELECT * FROM public.atest12 WHERE a >>>> 99; +EXPLAIN (COSTS OFF) SELECT * FROM regress_user1.atest12 WHERE a >>>> 99; diff --git a/src/test/regress/sql/limit1.sql b/src/test/regress/sql/limit1.sql index ffd2df41b..20327f045 100644 --- a/src/test/regress/sql/limit1.sql +++ b/src/test/regress/sql/limit1.sql @@ -66,6 +66,8 @@ select a.c1, b.c2, b.c3 from (select c1 from limit_table_03 order by c1 limit 2 select a.c1, b.c2, b.c3 from (select c1 from limit_table_03 order by c1 limit 2) a , limit_table_02 b where a.c1=b.c2 order by a.c1; select a.c1, b.c2, b.c3 from (select c1 from limit_table_03 order by c1 offset 1) a , limit_table_02 b where a.c1=b.c2 order by a.c1; +explain (verbose, costs off) select * from limit_table_01 where rownum <= 10; + drop table limit_table_01; drop table limit_table_02; drop table limit_table_03; diff --git a/src/test/regress/sql/llvm_vecagg3.sql b/src/test/regress/sql/llvm_vecagg3.sql index 3aed8162f..ab1b4e5dd 100644 --- a/src/test/regress/sql/llvm_vecagg3.sql +++ b/src/test/regress/sql/llvm_vecagg3.sql @@ -72,6 +72,11 @@ set analysis_options="on(HASH_CONFLICT)"; select sum(col_num3), avg(col_num3-(-0.25)) from llvm_vecagg_table_03 group by col_int order by 1, 2; reset analysis_options; +---- +--- test5 : test distinct in aggregate functions +---- +select col_int, max(distinct col_num1 order by col_num1), min(col_num2 order by col_num2) from llvm_vecagg_table_03 group by col_int order by 1, 2, 3; + ---- --- clean table and resource ---- diff --git a/src/test/regress/sql/merge_subquery.sql b/src/test/regress/sql/merge_subquery.sql new file mode 100644 index 000000000..657a5aa9f --- /dev/null +++ b/src/test/regress/sql/merge_subquery.sql @@ -0,0 +1,78 @@ +create table merge_subquery_test1(id int, val int); +create table merge_subquery_test2(id int, val int); +insert into merge_subquery_test1 values(generate_series(1, 10), generate_series(1, 5)); +insert into merge_subquery_test2 values(generate_series(1, 5), generate_series(21, 25)); +insert into merge_subquery_test2 values(generate_series(11, 15), generate_series(11, 15)); + +explain merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select mg2.val+mg1.val) +when not matched then + insert values(mg2.id, mg2.val); + +merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select mg2.val+mg1.val) +when not matched then + insert values(mg2.id, mg2.val); + +select * from merge_subquery_test1; + +delete from merge_subquery_test1 where id > 10; + +explain merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select sum(val) from merge_subquery_test2 mg3) +when not matched then + insert values(mg2.id, mg2.val); + +merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select sum(val) from merge_subquery_test2 mg3) +when not matched then + insert values(mg2.id, mg2.val); + +select * from merge_subquery_test1; + +delete from merge_subquery_test1 where id > 10; + +explain merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=mg2.val +when not matched then + insert values(mg2.id, (select mg2.val * 2)); + +merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=mg2.val +when not matched then + insert values(mg2.id, (select mg2.val * 2)); + +select * from merge_subquery_test1; + +delete from merge_subquery_test1 where id > 10; + +explain merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=mg2.val +when not matched then + insert values(mg2.id, (select mg3.val from merge_subquery_test1 mg3 limit 1)); + +merge into merge_subquery_test1 mg1 +using merge_subquery_test2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=mg2.val +when not matched then + insert values(mg2.id, (select mg3.val from merge_subquery_test1 mg3 limit 1)); + +select * from merge_subquery_test1; + +drop table merge_subquery_test1; +drop table merge_subquery_test2; diff --git a/src/test/regress/sql/merge_subquery2.sql b/src/test/regress/sql/merge_subquery2.sql new file mode 100644 index 000000000..d2d7eb4e0 --- /dev/null +++ b/src/test/regress/sql/merge_subquery2.sql @@ -0,0 +1,104 @@ +create schema merge_subquery2; +set current_schema=merge_subquery2; + +create table merge_subquery_utest1(id int, val int) with(storage_type=ustore); +create table merge_subquery_utest2(id int, val int) with(storage_type=ustore); +insert into merge_subquery_utest1 values(generate_series(1, 10), generate_series(1, 5)); +insert into merge_subquery_utest2 values(generate_series(1, 5), generate_series(21, 25)); +insert into merge_subquery_utest2 values(generate_series(11, 15), generate_series(11, 15)); + +explain merge into merge_subquery_utest1 mg1 +using merge_subquery_utest2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select mg2.val+mg1.val) +when not matched then + insert values(mg2.id, mg2.val); + +START TRANSACTION; +merge into merge_subquery_utest1 mg1 +using merge_subquery_utest2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select mg2.val+mg1.val) +when not matched then + insert values(mg2.id, mg2.val); +select * from merge_subquery_utest1; +ROLLBACK; + +explain merge into merge_subquery_utest1 mg1 +using merge_subquery_utest2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select sum(val) from merge_subquery_utest2 mg3) +when not matched then + insert values(mg2.id, mg2.val); + +START TRANSACTION; +merge into merge_subquery_utest1 mg1 +using merge_subquery_utest2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select sum(val) from merge_subquery_utest2 mg3) +when not matched then + insert values(mg2.id, mg2.val); +select * from merge_subquery_utest1; +ROLLBACK; + +explain merge into merge_subquery_utest1 mg1 +using merge_subquery_utest2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select mg3.val from (select * from merge_subquery_utest1) as mg3 where mg3.id in (select id from merge_subquery_utest2) limit 1) +when not matched then + insert values(mg2.id, mg2.val); + +START TRANSACTION; +merge into merge_subquery_utest1 mg1 +using merge_subquery_utest2 mg2 on mg1.id=mg2.id +when matched then + update set mg1.val=(select mg3.val from (select * from merge_subquery_utest1) as mg3 where mg3.id in (select id from merge_subquery_utest2) limit 1) +when not matched then + insert values(mg2.id, mg2.val); +select * from merge_subquery_utest1; +ROLLBACK; + +-- subpartition +create table partition_table(id int, val1 int, val2 int, val3 int) +partition by range (id) subpartition by list (val1) +( + partition p_1 values less than(5) + ( + subpartition p_11 values ('1','2'), + subpartition p_12 values ('3','4'), + subpartition p_13 values ('5') + ), + partition p_2 values less than(10) + ( + subpartition p_21 values ('1','2'), + subpartition p_22 values ('3','4'), + subpartition p_23 values ('5') + ), + partition p_3 values less than(20) + ( + subpartition p_31 values ('1','2'), + subpartition p_32 values ('3','4'), + subpartition p_33 values ('5') + ) +); +insert into partition_table values(generate_series(1, 10), generate_series(1,5), generate_series(1,2), generate_series(1,10)); + +explain +merge into partition_table t1 +using merge_subquery_utest2 t2 on t1.id=t2.id +when matched then + update set t1.val2 = (select t2.val + t1.val2) and + t1.val3 = (select t3.id from merge_subquery_utest1 t3 where id=3) +when not matched then + insert values(t2.id, t2.val, (select t4.val from merge_subquery_utest1 t4 where id=7), t2.val*2); + +START TRANSACTION; +merge into partition_table t1 +using merge_subquery_utest2 t2 on t1.id=t2.id +when matched then + update set t1.val2 = (select t2.val + t1.val2) and + t1.val3 = (select t3.id from merge_subquery_utest1 t3 where id=3) +when not matched then + insert values(t2.id, t2.val, (select t4.val from merge_subquery_utest1 t4 where id=7), t2.val*2); +select * from partition_table; +ROLLBACK; diff --git a/src/test/regress/sql/merge_subquery3.sql b/src/test/regress/sql/merge_subquery3.sql new file mode 100644 index 000000000..23c85d712 --- /dev/null +++ b/src/test/regress/sql/merge_subquery3.sql @@ -0,0 +1,246 @@ +-- 不相关表 +DROP TABLE IF EXISTS tmp_table; +CREATE TABLE tmp_table (id int, null_val date); +INSERT INTO tmp_table VALUES(generate_series(1,10), null); + +-- ******************************** +-- * 目标表:行存表;源表:行存表 * +-- ******************************** +DROP TABLE IF EXISTS target_table, source_table; +CREATE TABLE target_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=ROW); +CREATE TABLE source_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=ROW); + +INSERT INTO source_table VALUES (generate_series(11,20),'A'||(generate_series(11,20))||'Z', date'2000-03-01'+generate_series(11,20), generate_series(11,20)); +INSERT INTO source_table VALUES (21, null, null, null); + +-- 相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); + +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + 4 FROM target_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21 /* 返回null,select列 */), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - 10 AND c3 >= '2000-01-01') /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); + +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + +-- 非相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); + +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(id) + 4 FROM tmp_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT null_val FROM tmp_table WHERE id = 1 /* 返回null,select列 */), + (SELECT id FROM tmp_table WHERE id > 7 AND id < 9) /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.id + t2.id FROM tmp_table t1 JOIN tmp_table t2 ON t1.id = t2.id AND t1.id < 2 ) /* 多表级联 */); + +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + +-- 子查询嵌套 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); + +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + (SELECT id FROM tmp_table WHERE id > 3 AND id <= 4) FROM target_table /* SELECT列嵌套 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - (SELECT MAX(id) FROM tmp_table) AND c3 >= '2000-01-01') /* WHERE条件嵌套 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); + +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + +-- ******************************** +-- * 目标表:列存表;源表:列存表 * +-- ******************************** +DROP TABLE IF EXISTS target_table, source_table; +CREATE TABLE target_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=COLUMN); +CREATE TABLE source_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=COLUMN); + +INSERT INTO source_table VALUES (generate_series(11,20),'A'||(generate_series(11,20))||'Z', date'2000-03-01'+generate_series(11,20), generate_series(11,20)); +INSERT INTO source_table VALUES (21, null, null, null); + +-- 相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); + +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + 4 FROM target_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21 /* 返回null,select列 */), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - 10 AND c3 >= '2000-01-01') /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); + +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + +-- 非相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); + +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(id) + 4 FROM tmp_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT null_val FROM tmp_table WHERE id = 1 /* 返回null,select列 */), + (SELECT id FROM tmp_table WHERE id > 7 AND id < 9) /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.id + t2.id FROM tmp_table t1 JOIN tmp_table t2 ON t1.id = t2.id AND t1.id < 2 ) /* 多表级联 */); + +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + +-- 子查询嵌套 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); + +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + (SELECT id FROM tmp_table WHERE id > 3 AND id <= 4) FROM target_table /* SELECT列嵌套 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - (SELECT MAX(id) FROM tmp_table) AND c3 >= '2000-01-01') /* WHERE条件嵌套 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); + +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + +-- ******************************** +-- * 目标表:行存表;源表:列存表 * +-- ******************************** +DROP TABLE IF EXISTS target_table, source_table; +CREATE TABLE target_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=ROW); +CREATE TABLE source_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=COLUMN); + +INSERT INTO source_table VALUES (generate_series(11,20),'A'||(generate_series(11,20))||'Z', date'2000-03-01'+generate_series(11,20), generate_series(11,20)); +INSERT INTO source_table VALUES (21, null, null, null); + +-- 相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); + +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + 4 FROM target_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21 /* 返回null,select列 */), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - 10 AND c3 >= '2000-01-01') /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); + +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + +-- 非相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); + +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(id) + 4 FROM tmp_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT null_val FROM tmp_table WHERE id = 1 /* 返回null,select列 */), + (SELECT id FROM tmp_table WHERE id > 7 AND id < 9) /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.id + t2.id FROM tmp_table t1 JOIN tmp_table t2 ON t1.id = t2.id AND t1.id < 2 ) /* 多表级联 */); + +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + +-- 子查询嵌套 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); + +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + (SELECT id FROM tmp_table WHERE id > 3 AND id <= 4) FROM target_table /* SELECT列嵌套 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - (SELECT MAX(id) FROM tmp_table) AND c3 >= '2000-01-01') /* WHERE条件嵌套 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); + +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + +-- ******************************** +-- * 目标表:列存表;源表:行存表 * +-- ******************************** +DROP TABLE IF EXISTS target_table, source_table; +CREATE TABLE target_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=COLUMN); +CREATE TABLE source_table (c1 int, c2 varchar(200), c3 date, c4 numeric(18,9)) +WITH (ORIENTATION=ROW); + +INSERT INTO source_table VALUES (generate_series(11,20),'A'||(generate_series(11,20))||'Z', date'2000-03-01'+generate_series(11,20), generate_series(11,20)); +INSERT INTO source_table VALUES (21, null, null, null); + +-- 相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); + +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + 4 FROM target_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21 /* 返回null,select列 */), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - 10 AND c3 >= '2000-01-01') /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); + +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + +-- 非相关子查询 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); + +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(id) + 4 FROM tmp_table /* 返回单行单列 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT null_val FROM tmp_table WHERE id = 1 /* 返回null,select列 */), + (SELECT id FROM tmp_table WHERE id > 7 AND id < 9) /* 带WHERE条件 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.id + t2.id FROM tmp_table t1 JOIN tmp_table t2 ON t1.id = t2.id AND t1.id < 2 ) /* 多表级联 */); + +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + +-- 子查询嵌套 +TRUNCATE target_table; +INSERT INTO target_table VALUES (generate_series(1,10),'A'||(generate_series(1,10))||'Z', date'2000-03-01'+generate_series(1,10), generate_series(1,10)); + +MERGE INTO target_table t + USING source_table s ON t.c1 + (SELECT MIN(c1) + (SELECT id FROM tmp_table WHERE id > 3 AND id <= 4) FROM target_table /* SELECT列嵌套 */) = s.c1 +WHEN MATCHED THEN + UPDATE SET (c2, c3, c4) = (s.c2, + (SELECT c3 FROM source_table WHERE c1 = 21), + (SELECT c4 FROM target_table WHERE c1 = s.c1 - (SELECT MAX(id) FROM tmp_table) AND c3 >= '2000-01-01') /* WHERE条件嵌套 */) +WHEN NOT MATCHED THEN + INSERT VALUES (s.c1, s.c2, s.c3, + (SELECT t2.c4 FROM tmp_table t1 JOIN target_table t2 ON t1.id = t2.c1 AND t2.c1 + 8 = s.c1) /* 多表级联 */); + +SELECT c1, c2, to_char(c3, 'YYYY/MM/DD'), c4 FROM target_table ORDER BY c1; + +DROP TABLE IF EXISTS target_table, source_table, tmp_table; diff --git a/src/test/regress/sql/merge_where_col.sql b/src/test/regress/sql/merge_where_col.sql new file mode 100644 index 000000000..e573757c9 --- /dev/null +++ b/src/test/regress/sql/merge_where_col.sql @@ -0,0 +1,87 @@ +-- +-- MERGE INTO +-- + +-- part 1 +-- initial +CREATE SCHEMA merge_where_col; +SET current_schema = merge_where_col; + +drop table if exists merge_nest_tab1,dt2; +create table merge_nest_tab1(co1 numeric(20,4),co2 varchar2,co3 number,co4 date); +insert into merge_nest_tab1 values(generate_series(1,10),'hello'||generate_series(1,10),generate_series(1,10)*10,sysdate); +create table dt2(c1 numeric(20,4),c2 boolean,c3 character(40),c4 binary_double,c5 nchar(20)) WITH (ORIENTATION = COLUMN); +insert into dt2 values(generate_series(20,50),false,generate_series(20,50)||'gauss',generate_series(20,50)-0.99,'openopen'); + +-- we can't use columns of target table in insertion subquery(co1<45) for 'where' +BEGIN; +merge into merge_nest_tab1 a +USING dt2 b + ON a.co1=b.c1-20 + WHEN NOT matched THEN + insert(co1,co2,co3) values(100, + (SELECT 666)||'good', + (SELECT sum(c.c1) + FROM dt2 c + INNER JOIN merge_nest_tab1 d + ON c.c1=d.co1 )) + WHERE co1<45; +END; + +-- we can use columns of source table in insertion subquery(c1<45) for 'where' +BEGIN; +merge into merge_nest_tab1 a +USING dt2 b + ON a.co1=b.c1-20 + WHEN NOT matched THEN + insert(co1,co2,co3) values(100, + (SELECT 666)||'good', + (SELECT sum(c.c1) + FROM dt2 c + INNER JOIN merge_nest_tab1 d + ON c.c1=d.co1 )) + WHERE c1<45; +SELECT co1, co2, co3 FROM merge_nest_tab1 order by 1; +ROLLBACK; + +-- we can use columns of source table in insert subquery(c1<45) for 'where' +BEGIN; +merge into merge_nest_tab1 a +USING dt2 b + ON a.co1=b.c1-20 + WHEN matched THEN + UPDATE SET a.co3=a.co3 + b.c4, + a.co2='hello', + a.co4=(SELECT last_day(sysdate)) + WHERE c1 BETWEEN 1 AND 50; +SELECT co1, co2, co3 FROM merge_nest_tab1 order by 1; +ROLLBACK; + +-- part 2 +-- initial +drop table if exists tb_a,tb_b; + +create table tb_a(id int, a int, b int, c int, d int); +create table tb_b(id int, a int, b int, c int, d int); +insert into tb_a values(1, 1, 1, 1, 1); +insert into tb_a values(2, 2, 2, 2, 2); +insert into tb_a values(3, 3, 3, 3, 3); +insert into tb_a values(4, 4, 4, 4, 4); + +insert into tb_b values(1, 100, 1, 1, 1); +insert into tb_b values(2, 2, 2, 2, 2); +insert into tb_b values(3, 3, 3, 3, 3); +insert into tb_b values(4, 4, 4, 4, 4); + +-- if the column has the same name, the column in the target table takes precedence +BEGIN; +MERGE INTO tb_b bt +USING tb_a at + ON (at.id = bt.id) + WHEN MATCHED THEN + UPDATE SET a = at.a + 100 WHERE a =100; +SELECT * FROM tb_b ORDER BY 1; +ROLLBACK; + +-- clean up +DROP SCHEMA merge_where_col CASCADE; diff --git a/src/test/regress/sql/nlssort_pinyin.sql b/src/test/regress/sql/nlssort_pinyin.sql new file mode 100644 index 000000000..05e179349 --- /dev/null +++ b/src/test/regress/sql/nlssort_pinyin.sql @@ -0,0 +1,204 @@ +create schema nlssort_pinyin_schema; +set search_path = nlssort_pinyin_schema; + +-- test null +select nlssort(NULL, 'nls_sort=schinese_pinyin_m'); +select nlssort('', NULL); +select nlssort(NULL, NULL); + +-- test wrong parameter +select nlssort('', ' nls_sort = schinese_pinyin_m '); +select nlssort('', ' nls_sort = generic_m_ci '); +select nlssort('', 'nls_sort=s chinese_pinyin_m'); +select nlssort('', 'nls_sort=g eneric_m_ci'); +select nlssort('', 'nls_sort=schinese'); +select nlssort('', 'nls_sort=generic'); + +-- test single char nlssort code +select nlssort('', 'nls_sort=schinese_pinyin_m'); +select nlssort('', 'nls_sort=schinese_pinyin_m'); +select nlssort('$', 'nls_sort=schinese_pinyin_m'); +select nlssort('&', 'nls_sort=schinese_pinyin_m'); +select nlssort('''', 'nls_sort=schinese_pinyin_m'); +select nlssort('0', 'nls_sort=schinese_pinyin_m'); +select nlssort('A', 'nls_sort=schinese_pinyin_m'); +select nlssort('\', 'nls_sort=schinese_pinyin_m'); +select nlssort('a', 'nls_sort=schinese_pinyin_m'); +select nlssort('倰', 'nls_sort=schinese_pinyin_m'); +select nlssort('冔', 'nls_sort=schinese_pinyin_m'); +select nlssort('勆', 'nls_sort=schinese_pinyin_m'); +select nlssort('', 'nls_sort=schinese_pinyin_m'); +select nlssort('「', 'nls_sort=schinese_pinyin_m'); +select nlssort('★', 'nls_sort=schinese_pinyin_m'); +select nlssort('ⅰ', 'nls_sort=schinese_pinyin_m'); +select nlssort('⒈', 'nls_sort=schinese_pinyin_m'); +select nlssort('⑴', 'nls_sort=schinese_pinyin_m'); +select nlssort('①', 'nls_sort=schinese_pinyin_m'); +select nlssort('㈠', 'nls_sort=schinese_pinyin_m'); +select nlssort('Ⅰ', 'nls_sort=schinese_pinyin_m'); +select nlssort('Ⅴ', 'nls_sort=schinese_pinyin_m'); +select nlssort('', 'nls_sort=schinese_pinyin_m'); +select nlssort('0', 'nls_sort=schinese_pinyin_m'); +select nlssort('A', 'nls_sort=schinese_pinyin_m'); +select nlssort('a', 'nls_sort=schinese_pinyin_m'); +select nlssort('ぎ', 'nls_sort=schinese_pinyin_m'); +select nlssort('ガ', 'nls_sort=schinese_pinyin_m'); +select nlssort('α', 'nls_sort=schinese_pinyin_m'); +select nlssort('猋', 'nls_sort=schinese_pinyin_m'); +select nlssort('珬', 'nls_sort=schinese_pinyin_m'); +select nlssort('甂', 'nls_sort=schinese_pinyin_m'); +select nlssort('Ꮬ', 'nls_sort=schinese_pinyin_m'); +select nlssort('ᴂ', 'nls_sort=schinese_pinyin_m'); +select nlssort('겷', 'nls_sort=schinese_pinyin_m'); +select nlssort('뛑', 'nls_sort=schinese_pinyin_m'); +select nlssort('', 'nls_sort=schinese_pinyin_m'); +select nlssort('𡤝', 'nls_sort=schinese_pinyin_m'); +select nlssort('𦪫', 'nls_sort=schinese_pinyin_m'); +select nlssort('𰀅', 'nls_sort=schinese_pinyin_m'); + +select nlssort('', 'nls_sort=generic_m_ci'); +select nlssort('', 'nls_sort=generic_m_ci'); +select nlssort('$', 'nls_sort=generic_m_ci'); +select nlssort('&', 'nls_sort=generic_m_ci'); +select nlssort('''', 'nls_sort=generic_m_ci'); +select nlssort('0', 'nls_sort=generic_m_ci'); +select nlssort('A', 'nls_sort=generic_m_ci'); +select nlssort('\', 'nls_sort=generic_m_ci'); +select nlssort('a', 'nls_sort=generic_m_ci'); +select nlssort('倰', 'nls_sort=generic_m_ci'); +select nlssort('冔', 'nls_sort=generic_m_ci'); +select nlssort('勆', 'nls_sort=generic_m_ci'); +select nlssort('', 'nls_sort=generic_m_ci'); +select nlssort('「', 'nls_sort=generic_m_ci'); +select nlssort('★', 'nls_sort=generic_m_ci'); +select nlssort('ⅰ', 'nls_sort=generic_m_ci'); +select nlssort('⒈', 'nls_sort=generic_m_ci'); +select nlssort('⑴', 'nls_sort=generic_m_ci'); +select nlssort('①', 'nls_sort=generic_m_ci'); +select nlssort('㈠', 'nls_sort=generic_m_ci'); +select nlssort('Ⅰ', 'nls_sort=generic_m_ci'); +select nlssort('Ⅴ', 'nls_sort=generic_m_ci'); +select nlssort('', 'nls_sort=generic_m_ci'); +select nlssort('0', 'nls_sort=generic_m_ci'); +select nlssort('A', 'nls_sort=generic_m_ci'); +select nlssort('a', 'nls_sort=generic_m_ci'); +select nlssort('ぎ', 'nls_sort=generic_m_ci'); +select nlssort('ガ', 'nls_sort=generic_m_ci'); +select nlssort('α', 'nls_sort=generic_m_ci'); +select nlssort('猋', 'nls_sort=generic_m_ci'); +select nlssort('珬', 'nls_sort=generic_m_ci'); +select nlssort('甂', 'nls_sort=generic_m_ci'); +select nlssort('Ꮬ', 'nls_sort=generic_m_ci'); +select nlssort('ᴂ', 'nls_sort=generic_m_ci'); +select nlssort('겷', 'nls_sort=generic_m_ci'); +select nlssort('뛑', 'nls_sort=generic_m_ci'); +select nlssort('', 'nls_sort=generic_m_ci'); +select nlssort('𡤝', 'nls_sort=generic_m_ci'); +select nlssort('𦪫', 'nls_sort=generic_m_ci'); +select nlssort('𰀅', 'nls_sort=generic_m_ci'); + +-- test multi chars nlssort code +select nlssort(' ', 'nls_sort=schinese_pinyin_m'); +select nlssort('AbC啊 ', 'nls_sort=schinese_pinyin_m'); +select nlssort('AbC 啊 ', 'nls_sort=schinese_pinyin_m'); +select nlssort(' AbC啊 ', 'nls_sort=schinese_pinyin_m'); + +select nlssort(' ', 'nls_sort=generic_m_ci'); +select nlssort('AbC啊 ', 'nls_sort=generic_m_ci'); +select nlssort('AbC 啊 ', 'nls_sort=generic_m_ci'); +select nlssort(' AbC啊 ', 'nls_sort=generic_m_ci'); + +-- test nlssort func user in order by statement +drop table if exists tb_test; +create table tb_test(c1 text); + +insert into tb_test values(''); +insert into tb_test values(''); +insert into tb_test values('$'); +insert into tb_test values('&'); +insert into tb_test values(''''); +insert into tb_test values('0'); +insert into tb_test values('A'); +insert into tb_test values('\'); +insert into tb_test values('a'); +insert into tb_test values('倰'); +insert into tb_test values('冔'); +insert into tb_test values('勆'); +insert into tb_test values(''); +insert into tb_test values('「'); +insert into tb_test values('★'); +insert into tb_test values('ⅰ'); +insert into tb_test values('⒈'); +insert into tb_test values('⑴'); +insert into tb_test values('①'); +insert into tb_test values('㈠'); +insert into tb_test values('Ⅰ'); +insert into tb_test values('Ⅴ'); +insert into tb_test values(''); +insert into tb_test values('0'); +insert into tb_test values('A'); +insert into tb_test values('a'); +insert into tb_test values('ぎ'); +insert into tb_test values('ガ'); +insert into tb_test values('α'); +insert into tb_test values('猋'); +insert into tb_test values('珬'); +insert into tb_test values('甂'); +insert into tb_test values('Ꮬ'); +insert into tb_test values('ᴂ'); +insert into tb_test values('겷'); +insert into tb_test values('뛑'); +insert into tb_test values(''); +insert into tb_test values('𡤝'); +insert into tb_test values('𦪫'); +insert into tb_test values('𰀅'); +insert into tb_test values(' '); +insert into tb_test values('AbC啊 '); +insert into tb_test values('AbC 啊 '); +insert into tb_test values(' AbC啊 '); + +select c1, nlssort(c1, 'nls_sort=schinese_pinyin_m') from tb_test order by nlssort(c1, 'nls_sort=schinese_pinyin_m'); +select c1, nlssort(c1, 'nls_sort=generic_m_ci') from tb_test order by nlssort(c1, 'nls_sort=generic_m_ci'); + +-- test nlssort func used in procedure (compilation should not report errors) +drop table if exists tb_test; +create table tb_test(col1 varchar2); +create or replace package pckg_test as +procedure proc_test(i_col1 in varchar2); +function func_test(i_col1 in varchar2) return varchar2; +end pckg_test; +/ + +create or replace package body pckg_test as +procedure proc_test(i_col1 in varchar2) as +v_a varchar2; +v_b varchar2; +begin +if func_test(i_col1) < func_test('阿') then +v_a:= func_test(i_col1); +end if; +select nlssort(col1,'NLS_SORT=SCHINESE_PINYIN_M') into v_b from tb_test where col1=i_col1; +end; +function func_test(i_col1 in varchar2) return varchar2 as +begin +return nlssort(i_col1,'NLS_SORT=SCHINESE_PINYIN_M'); +end; +end pckg_test; +/ + +-- It will core when the length of the first argument is 0. +-- ORA compatibility mode treats "" as null, so test it in MySQL compatibility mode. +create database b_dbcompatibility TEMPLATE=template0 dbcompatibility='B'; +\c b_dbcompatibility +set client_encoding=utf8; + +select nlssort('', 'nls_sort=schinese_pinyin_m'); + +\c regression +clean connection to all force for database b_dbcompatibility; +drop database b_dbcompatibility; + +-- test nlssort is shippable or not +\sf nlssort + +drop schema nlssort_pinyin_schema cascade; \ No newline at end of file diff --git a/src/test/regress/sql/numeric_hide_tailing_zero.sql b/src/test/regress/sql/numeric_hide_tailing_zero.sql new file mode 100644 index 000000000..068170b14 --- /dev/null +++ b/src/test/regress/sql/numeric_hide_tailing_zero.sql @@ -0,0 +1,7 @@ +set behavior_compat_options=''; +select cast(123.123 as numeric(15,10)); +set behavior_compat_options='hide_tailing_zero'; +select cast(123.123 as numeric(15,10)); +select cast(0 as numeric(15,10)); +select cast(009.0000 as numeric(15,10)); +set behavior_compat_options=''; \ No newline at end of file diff --git a/src/test/regress/sql/out_param_func.sql b/src/test/regress/sql/out_param_func.sql new file mode 100644 index 000000000..def6c25c9 --- /dev/null +++ b/src/test/regress/sql/out_param_func.sql @@ -0,0 +1,929 @@ +create schema out_param_schema; +set current_schema= out_param_schema; +set behavior_compat_options='proc_outparam_override'; + +--1--------return 变量 +CREATE or replace FUNCTION func1(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +select func1(2, null); +call func1(2, NULL); +select * from func1(2,null); +select func1(a => 2, b => null); +select * from func1(a => 2, b => null); +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + select * into result from func1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func1(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + func1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result text; + a integer := 2; + b integer := NULL; +begin + func1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +---inout参数 +CREATE or replace FUNCTION func1_1(in a integer, inout b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +select func1_1(2, null); +call func1_1(2, NULL); +select * from func1_1(2,null); +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func1_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func1_1(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + func1_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result text; + a integer := 2; + b integer := NULL; +begin + func1_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +--2--------return 变量运算 +CREATE or replace FUNCTION func2(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return b + c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +select func2(2, null); +call func2(2, NULL); +select * from func2(2,null); +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func2(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + func2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result text; + a integer := 2; + b integer := NULL; +begin + func2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +--3------return 常量 +CREATE or replace FUNCTION func3(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return 123; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +select func3(2, null); +call func3(2, NULL); +select * from func3(2,null); +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func3(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func3(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + func3(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result text; + a integer := 2; + b integer := NULL; +begin + func3(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +--4------多out +CREATE or replace FUNCTION func4(in a integer, out b integer, out d integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + d := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +select func4(2,NULL,NULL); +call func4(2, NULL,NULL); +select * from func4(2, NULL,NULL); +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + result := func4(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + result := func4(a, b, d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + func4(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + func4(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ +---inout参数 +CREATE or replace FUNCTION func4_1(in a integer, inout b integer, inout d integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + d := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +select func4_1(2,NULL,NULL); +call func4_1(2, NULL,NULL); +select * from func4_1(2, NULL,NULL); +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + result := func4_1(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + result := func4_1(a, b, d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + func4_1(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + func4_1(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ + +--5-- 有out+ 无return 不支持,在执行时报错-- +--5.1 +CREATE or replace FUNCTION func5_1(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + --return; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +select func5_1(2, NULL); +call func5_1(2, NULL); +select * from func5_1(2, NULL); +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func5_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func5_1(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + func5_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +--5.2 +CREATE or replace FUNCTION func5_2(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +select func5_2(2, NULL); +call func5_2(2, NULL); +select * from func5_2(2, NULL); +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func5_2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func5_2(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + func5_2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +--6自治事务 +--6.1 单out +CREATE or replace FUNCTION func6_1(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +select func6_1(2, null); +call func6_1(2, NULL); +select * from func6_1(2,null); +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func6_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := func6_1(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + func6_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result text; + a integer := 2; + b integer := NULL; +begin + func6_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +--6.2 多out +CREATE or replace FUNCTION func6_2(in a integer, out b integer, out d integer) +RETURNS int +AS $$ +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; + c int; + BEGIN + c := 1; + b := a + c; + d := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +select func6_2(2,NULL,NULL); +call func6_2(2, NULL,NULL); +select * from func6_2(2, NULL,NULL); +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + result := func6_2(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + result := func6_2(a, b,d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + func6_2(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; + d integer := NULL; +begin + func6_2(a => a, b => b,d => d); + raise info 'b is: %', b; + raise info 'd is: %', d; + raise info 'result is: %', result; +end; +/ + +--7 packge +--7.1普通out出参 +create or replace package pck7_1 +is +function func7_1(in a int, out b int) +return int; +end pck7_1; +/ + +CREATE or replace package body pck7_1 as FUNCTION func7_1(in a int, out b integer) +RETURN int +AS +DECLARE + --PRAGMA AUTONOMOUS_TRANSACTION; + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; +end pck7_1; +/ + +select pck7_1.func7_1(2, null); +call pck7_1.func7_1(2, NULL); +select * from pck7_1.func7_1(2,null); +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := pck7_1.func7_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := pck7_1.func7_1(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + pck7_1.func7_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result text; + a integer := 2; + b integer := NULL; +begin + pck7_1.func7_1(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ +--7.2带自治事务out出参 + +create or replace package pck7_2 +is +function func7_2(in a int, out b int) +return int; +end pck7_2; +/ + +CREATE or replace package body pck7_2 as FUNCTION func7_2(in a int, out b integer) +RETURN int +AS +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; +end pck7_2; +/ + +select pck7_2.func7_2(2, null); +call pck7_2.func7_2(2, NULL); +select * from pck7_2.func7_2(2,null); +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := pck7_2.func7_2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + result := pck7_2.func7_2(a, b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result integer; + a integer := 2; + b integer := NULL; +begin + pck7_2.func7_2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +declare + result text; + a integer := 2; + b integer := NULL; +begin + pck7_2.func7_2(a => a, b => b); + raise info 'b is: %', b; + raise info 'result is: %', result; +end; +/ + +--8 out出参不允许重载限制 +--8.1 plpgsql语言的带out参数同名函数只能存在一个 +CREATE or replace FUNCTION func8_1(in a integer) +RETURNS int +AS $$ +DECLARE + b int; + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +CREATE or replace FUNCTION func8_1(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +CREATE or replace FUNCTION func8_1(in a integer, out b integer, out d integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + d := b; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +--8.2 同一schema、package下,不允许存在同名的plpgsql语言的out出参函数,但可以replace +CREATE or replace FUNCTION func8_2(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +CREATE or replace FUNCTION func8_2(in a integer, out b integer, out d integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + d := b; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +CREATE or replace FUNCTION func8_2(in a integer, out b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +create or replace package pck8_2 +is +function func8_2(in a int, out b int) +return int; +function func8_2(in a int, out b int, out d integer) +return int; +end pck8_2; +/ + +--8.3 同一schema、package下,允许存在同名的psql语言的不带out出参函数 +CREATE or replace FUNCTION func8_3(in a integer) +RETURNS int +AS $$ +DECLARE + c int; + b int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; +CREATE or replace FUNCTION func8_3(in a integer, in b integer) +RETURNS int +AS $$ +DECLARE + c int; + BEGIN + c := 1; + b := a + c; + return c; + END; $$ +LANGUAGE 'plpgsql' NOT FENCED; + +create or replace package pck8_3 +is +function func8_3(in a int) +return int; +function func8_3(in a int, in b int) +return int; +end pck8_3; +/ +select proname from pg_proc where proname = 'func8_3' order by 1; + +create or replace function f1(in a int, out b int) return int +as +declare +c int; +begin +c := a - 1; +b := a + 1; +return c; +end; +/ + +select * from generate_series(1,100) where generate_series > f1(90, null); + +declare +res int; +begin +res := f1(10, 888); -- out出参传入常量,报错 +raise info 'res is:%',res; +end; +/ + +drop function f1; + +create or replace package pck1 is +type tp1 is record(v01 number, v03 varchar2, v02 number); +function f1(in a int, out c tp1) return int; +end pck1; +/ + +create or replace package body pck1 is +function f1(in a int, out c tp1) return int +as +declare +begin +c.v01:=a; +return a; +end; +end pck1; +/ + +select pck1.f1(10,(1,'a',2)); +select *from pck1.f1(10,(1,'a',2)); +call pck1.f1(10,(1,'a',2)); + + +--clean +reset behavior_compat_options; + +drop schema out_param_schema cascade; diff --git a/src/test/regress/sql/parse_page.sql b/src/test/regress/sql/parse_page.sql new file mode 100644 index 000000000..4726d05ad --- /dev/null +++ b/src/test/regress/sql/parse_page.sql @@ -0,0 +1,53 @@ +START TRANSACTION; + +DROP TABLE IF EXISTS test_astore; +CREATE TABLE test_astore (user_id serial PRIMARY KEY, time_clock VARCHAR ( 50 )); +CREATE INDEX test_astore_idx ON test_astore(user_id); +insert into test_astore select generate_series(1, 200), clock_timestamp(); +update test_astore set time_clock = NULL where user_id = 150; +update test_astore set time_clock = 'time' where user_id = 150; +delete test_astore where user_id > 190; + +DROP TABLE IF EXISTS test_ustore; +CREATE TABLE test_ustore (user_id serial PRIMARY KEY, time_clock VARCHAR ( 50 )) with(storage_type=ustore); +CREATE INDEX test_ustore_idx ON test_ustore(user_id); +insert into test_ustore select generate_series(1, 200), clock_timestamp(); +update test_ustore set time_clock = NULL where user_id = 150; +update test_ustore set time_clock = 'time' where user_id = 150; +delete test_ustore where user_id > 190; + +DROP TABLE IF EXISTS test_segment; +CREATE TABLE test_segment (a int, b int, c int) with(segment=on); +INSERT INTO test_segment values(generate_series(1,10),generate_series(1,10), generate_series(1,10)); + +CREATE OR REPLACE FUNCTION gs_parse_page_bypath_test(tablename in varchar2, block_num in int, relation in varchar2, read_mem in bool) +RETURNS table (output text) +LANGUAGE plpgsql +AS +$$ +DECLARE + param1 text; +BEGIN + SELECT pg_relation_filepath(tablename) into param1; + return query SELECT gs_parse_page_bypath(''|| param1 ||'', block_num, relation, read_mem); +END; +$$ +; + +SELECT gs_parse_page_bypath_test('test_astore', 0, 'heap', true); +SELECT gs_parse_page_bypath_test('test_segment', 0, 'segment', true); +CHECKPOINT; +SELECT gs_parse_page_bypath_test('test_astore', -1, 'heap', true); +SELECT gs_parse_page_bypath_test('test_astore_idx', 1, 'btree', false); +SELECT gs_parse_page_bypath_test('test_ustore', -1, 'uheap', false); +SELECT gs_parse_page_bypath_test('test_ustore_idx', 1, 'ubtree', false); +SELECT gs_parse_page_bypath_test('test_segment', 0, 'segment', false); + +DROP INDEX IF EXISTS test_astore_idx; +DROP TABLE IF EXISTS test_astore; +DROP INDEX IF EXISTS test_ustore_idx; +DROP TABLE IF EXISTS test_ustore; +DROP TABLE IF EXISTS test_segment; + +COMMIT; + diff --git a/src/test/regress/sql/parse_xlog.sql b/src/test/regress/sql/parse_xlog.sql new file mode 100644 index 000000000..432b75463 --- /dev/null +++ b/src/test/regress/sql/parse_xlog.sql @@ -0,0 +1,78 @@ +-- gs_xlogdump_lsn +START TRANSACTION; + +CREATE OR REPLACE FUNCTION gs_xlogdump_lsn() +RETURNS table (output text) +LANGUAGE plpgsql +AS +$$ +DECLARE + param1 text; + param2 text; +BEGIN + SELECT pg_current_xlog_location() into param1; + CHECKPOINT; + SELECT pg_current_xlog_location() into param2; + return query SELECT gs_xlogdump_lsn(''|| param1 || '', ''|| param2 || ''); +END; +$$ +; + +SELECT gs_xlogdump_lsn(); +COMMIT; + +-- gs_xlogdump_xid +SELECT gs_xlogdump_xid('200'); + + +-- gs_xlogdump_tablepath +START TRANSACTION; + +CREATE OR REPLACE FUNCTION gs_xlogdump_tablepath() +RETURNS table (output text) +LANGUAGE plpgsql +AS +$$ +DECLARE + param1 text; + param2 text; +BEGIN + DROP TABLE IF EXISTS heap_t; + CREATE TABLE heap_t (i INT); + INSERT INTO heap_t SELECT * FROM generate_series(1,10); + CHECKPOINT; + SELECT pg_relation_filepath('heap_t') into param1; + return query SELECT gs_xlogdump_tablepath(''|| param1 || '', 0, 'heap'); +END; +$$ +; + +SELECT gs_xlogdump_tablepath(); +DROP TABLE heap_t; +COMMIT; + +-- gs_xlogdump_parsepage_tablepath +START TRANSACTION; + +CREATE OR REPLACE FUNCTION gs_xlogdump_parsepage_tablepath() +RETURNS table (output text) +LANGUAGE plpgsql +AS +$$ +DECLARE + param1 text; + param2 text; +BEGIN + DROP TABLE IF EXISTS heap_t1; + CREATE TABLE heap_t1 (i INT); + INSERT INTO heap_t1 SELECT * FROM generate_series(1,10); + CHECKPOINT; + SELECT pg_relation_filepath('heap_t1') into param1; + return query SELECT gs_xlogdump_parsepage_tablepath(''|| param1 || '', 0, 'heap', false); +END; +$$ +; + +SELECT gs_xlogdump_parsepage_tablepath(); +DROP TABLE heap_t1; +COMMIT; diff --git a/src/test/regress/sql/partition_dml_operations.sql b/src/test/regress/sql/partition_dml_operations.sql new file mode 100644 index 000000000..0ad0214d7 --- /dev/null +++ b/src/test/regress/sql/partition_dml_operations.sql @@ -0,0 +1,704 @@ +--select +create table tsource(ld int not null,sd int not null,jname varchar2) partition by range(ld) +( + partition ts1 values less than(6), + partition ts2 values less than(36) +); +insert into tsource values (5),(15); +select * from tsource partition (ts1); +select * from tsource partition for(5); +select * from tsource subpartition (ts1); +select * from tsource subpartition for(3,6); +drop table tsource; +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); + + +select * from range_list partition (p_201901); +select * from range_list subpartition (p_201901_a); +select * from range_list partition for ('201902'); +select * from range_list subpartition for ('201902','1'); + +drop table range_list; +--insert +create table test_range_pt (a int, b int, c int) +partition by range(a) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (maxvalue) +)ENABLE ROW MOVEMENT; + +insert into test_range_pt partition (p1) values(1); +insert into test_range_pt partition (p2) values(1); +insert into test_range_pt partition (p3) values(1); + +insert into test_range_pt partition for (1) values(1); +insert into test_range_pt partition for (2001) values(1); +insert into test_range_pt partition for (3001) values(1); + +drop table test_range_pt; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +insert into range_list partition (p_201901) values('201902', '1', '1', 1); +insert into range_list partition (p_201902) values('201902', '1', '1', 1); +insert into range_list partition (p_201902_a) values('201902', '1', '1', 1); +insert into range_list partition (p_201902_c) values('201902', '1', '1', 1); + +insert into range_list subpartition (p_201901_a) values('201902', '1', '1', 1); +insert into range_list subpartition (p_201901_b) values('201902', '1', '1', 1); +insert into range_list subpartition (p_201902_a) values('201902', '1', '1', 1); +insert into range_list subpartition (p_201902_b) values('201902', '1', '1', 1); + +insert into range_list subpartition (p_201901) values('201902', '1', '1', 1); +insert into range_list subpartition (p_201902) values('201902', '1', '1', 1); +insert into range_list subpartition (p_201903) values('201902', '1', '1', 1); + +insert into range_list partition for ('201902') values('201902', '1', '1', 1); +insert into range_list partition for ('201903') values('201902', '1', '1', 1); +insert into range_list partition for ('201910') values('201902', '1', '1', 1); + +insert into range_list subpartition for ('201902','1') values('201902', '1', '1', 1); +insert into range_list subpartition for ('201902','2') values('201902', '1', '1', 1); +insert into range_list subpartition for ('201903','1') values('201902', '1', '1', 1); +insert into range_list subpartition for ('201903','2') values('201902', '1', '1', 1); + +insert into range_list subpartition for ('201902') values('201902', '1', '1', 1); +insert into range_list subpartition for ('201910','1') values('201902', '1', '1', 1); + +drop table range_list; + +--update +create table test_range_pt (a int, b int, c int) +partition by range(a) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (maxvalue) +)ENABLE ROW MOVEMENT; + +insert into test_range_pt values(1, 1, 1); +insert into test_range_pt values(2001, 1, 1); +insert into test_range_pt values(3001, 1, 1); + +update test_range_pt partition (p1) set b = 2; +select * from test_range_pt; +update test_range_pt partition (p1) set a = 2; +select * from test_range_pt; + +update test_range_pt partition for (1) set b = 3; +select * from test_range_pt; +update test_range_pt partition for (1) set a = 3; +select * from test_range_pt; + +drop table test_range_pt; + + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select *from range_list; + +update range_list partition (p_201901) set user_no = '2'; +select *from range_list; +update range_list subpartition (p_201901_a) set user_no = '3'; +select *from range_list; + +update range_list partition for ('201902') set user_no = '4'; +select *from range_list; +update range_list subpartition for ('201902','2') set user_no = '5'; +select *from range_list; + +drop table range_list; + +--delete +create table test_range_pt (a int, b int, c int) +partition by range(a) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (maxvalue) +)ENABLE ROW MOVEMENT; + +insert into test_range_pt values(1, 1, 1); +insert into test_range_pt values(2001, 1, 1); +insert into test_range_pt values(3001, 1, 1); + +delete from test_range_pt partition (p1); +select * from test_range_pt; + +delete from test_range_pt partition for (2001); +select * from test_range_pt; + +drop table test_range_pt; + + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select *from range_list; + +delete from range_list partition (p_201901); +select *from range_list; +delete from range_list partition for ('201903'); +select *from range_list; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select *from range_list; +delete from range_list subpartition (p_201901_a); +select *from range_list; +delete from range_list subpartition for ('201903','2'); +select *from range_list; + +drop table range_list; + +--upsert +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) PRIMARY KEY , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +select * from range_list; +delete from range_list; + +create index idx1 on range_list(month_code,dept_code) local; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +select * from range_list; +delete from range_list; + +create index idx2 on range_list(month_code) global; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +select * from range_list; +delete from range_list; + +drop table range_list; + + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) PRIMARY KEY , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +select * from range_list; +delete from range_list; + +create index idx1 on range_list(month_code,dept_code) local; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +select * from range_list; +delete from range_list; + +create index idx2 on range_list(month_code) global; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +select * from range_list; +delete from range_list; + +drop table range_list; + + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) PRIMARY KEY , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list partition (p_201901) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list partition (p_201901) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list partition (p_201902) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 10; +select * from range_list; + +insert into range_list subpartition (p_201901_a) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 10; +insert into range_list subpartition (p_201901_b) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 20; +select * from range_list; + +insert into range_list partition for ('201902') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 30; +insert into range_list partition for ('201903') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 40; +select * from range_list; + +insert into range_list subpartition for ('201902','1') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 40; +insert into range_list subpartition for ('201902','2') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 50; +select * from range_list; + +drop table range_list; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int, + PRIMARY KEY(month_code, dept_code) +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list partition (p_201901) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list partition (p_201901) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 5; +insert into range_list partition (p_201902) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 10; +select * from range_list; + +insert into range_list subpartition (p_201901_a) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 10; +insert into range_list subpartition (p_201901_b) values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 20; +select * from range_list; + +insert into range_list partition for ('201902') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 30; +insert into range_list partition for ('201903') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 40; +select * from range_list; + +insert into range_list subpartition for ('201902','1') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 40; +insert into range_list subpartition for ('201902','2') values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt = 50; +select * from range_list; + +drop table range_list; + +drop table test_range_pt; +create table test_range_pt (a int, b int primary key, c int) +partition by range(a) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (maxvalue) +)ENABLE ROW MOVEMENT; + +insert into test_range_pt values (1,1),(2001,2001); +insert into test_range_pt partition (p1) values(1,2001) ON DUPLICATE KEY UPDATE a = 5; +drop table test_range_pt; + +drop table if exists tsource cascade; +create table tsource(ld int ,sd int not null,code int primary key) +partition by range(ld) subpartition by range(sd) +( + partition ts1 values less than(16)( + subpartition ts11 values less than(16), + subpartition ts12 values less than(66) + ), + partition ts2 values less than(66)( + subpartition ts21 values less than(16), + subpartition ts22 values less than(66) + ) +); +insert into tsource values(10,1,1),(60,1,2); +insert into tsource partition (ts1) values(10,1,2) on duplicate key update sd=3; +insert into tsource values(10,60,3); +insert into tsource subpartition (ts11) values(10,1,3) on duplicate key update sd=4; +drop table if exists tsource cascade; + +--Merge into +create table test_range_pt (a int, b int, c int) +partition by range(a) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (maxvalue) +)ENABLE ROW MOVEMENT; + +insert into test_range_pt values(1, 1, 1); +insert into test_range_pt values(2001,1 ,1); + +create table newtest_range_pt (a int, b int, c int) +partition by range(a) +( + partition p1 values less than (2000), + partition p2 values less than (3000), + partition p3 values less than (4000), + partition p4 values less than (5000), + partition p5 values less than (maxvalue) +)ENABLE ROW MOVEMENT; + +insert into newtest_range_pt values(1,2,2); +insert into newtest_range_pt values(2,2,2); +insert into newtest_range_pt values(2001,2,2); + +MERGE INTO test_range_pt p +USING newtest_range_pt np +ON p.a= np.a +WHEN MATCHED THEN + UPDATE SET b = np.b, c = np.c +WHEN NOT MATCHED THEN + INSERT VALUES (np.a, np.b, np.c); + +select * from test_range_pt; +select * from newtest_range_pt; + +delete from test_range_pt; +delete from newtest_range_pt; + +insert into test_range_pt values(1, 1, 1); +insert into test_range_pt values(2001,1 ,1); +insert into newtest_range_pt values(1,2,2); +insert into newtest_range_pt values(2,2,2); +insert into newtest_range_pt values(2001,2,2); +insert into newtest_range_pt values(2002,2,2); + +MERGE INTO test_range_pt partition (p1) p +USING newtest_range_pt partition (p1) np +ON p.a= np.a +WHEN MATCHED THEN + UPDATE SET b = np.b, c = np.c +WHEN NOT MATCHED THEN + INSERT VALUES (np.a, np.b, np.c); + +select * from test_range_pt; +select * from newtest_range_pt; + +delete from test_range_pt; +delete from newtest_range_pt; + +insert into test_range_pt values(1, 1, 1); +insert into test_range_pt values(2001,1 ,1); +insert into newtest_range_pt values(1,2,2); +insert into newtest_range_pt values(2,2,2); +insert into newtest_range_pt values(2001,2,2); +insert into newtest_range_pt values(2002,2,2); + +MERGE INTO test_range_pt partition for (1) p +USING newtest_range_pt partition for (1) np +ON p.a= np.a +WHEN MATCHED THEN + UPDATE SET b = np.b, c = np.c +WHEN NOT MATCHED THEN + INSERT VALUES (np.a, np.b, np.c); + +select * from test_range_pt; +select * from newtest_range_pt; + +drop table test_range_pt; +drop table newtest_range_pt; + + + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +insert into range_list values('201901', '1', '1', 1); +insert into range_list values('201902', '2', '1', 2); + +CREATE TABLE newrange_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into newrange_list values('201902', '1', '1', 1); +insert into newrange_list values('201903', '1', '1', 2); + +MERGE INTO range_list p +USING newrange_list np +ON p.month_code= np.month_code +WHEN MATCHED THEN + UPDATE SET dept_code = np.dept_code, user_no = np.user_no, sales_amt = np.sales_amt +WHEN NOT MATCHED THEN + INSERT VALUES (np.month_code, np.dept_code, np.user_no, np.sales_amt); + +select *from range_list; +select *from newrange_list; + +delete from range_list; +delete from newrange_list; + +insert into range_list values('201901', '1', '1', 1); +insert into range_list values('201902', '2', '1', 2); +insert into newrange_list values('201902', '1', '1', 1); +insert into newrange_list values('201903', '1', '1', 2); + +MERGE INTO range_list partition (p_201901) p +USING newrange_list partition (p_201901) np +ON p.month_code= np.month_code +WHEN MATCHED THEN + UPDATE SET dept_code = np.dept_code, user_no = np.user_no, sales_amt = np.sales_amt +WHEN NOT MATCHED THEN + INSERT VALUES (np.month_code, np.dept_code, np.user_no, np.sales_amt); + +select *from range_list; +select *from newrange_list; + +delete from range_list; +delete from newrange_list; + +insert into range_list values('201901', '1', '1', 1); +insert into range_list values('201902', '2', '1', 2); +insert into newrange_list values('201902', '1', '1', 1); +insert into newrange_list values('201903', '1', '1', 2); + +MERGE INTO range_list partition for ('201901') p +USING newrange_list partition for ('201901') np +ON p.month_code= np.month_code +WHEN MATCHED THEN + UPDATE SET dept_code = np.dept_code, user_no = np.user_no, sales_amt = np.sales_amt +WHEN NOT MATCHED THEN + INSERT VALUES (np.month_code, np.dept_code, np.user_no, np.sales_amt); + +select *from range_list; +select *from newrange_list; + +delete from range_list; +delete from newrange_list; + +insert into range_list values('201901', '1', '1', 1); +insert into range_list values('201902', '2', '1', 2); +insert into newrange_list values('201902', '1', '1', 1); +insert into newrange_list values('201903', '1', '1', 2); + +MERGE INTO range_list subpartition (p_201901_a) p +USING newrange_list subpartition (p_201901_a) np +ON p.month_code= np.month_code +WHEN MATCHED THEN + UPDATE SET dept_code = np.dept_code, user_no = np.user_no, sales_amt = np.sales_amt +WHEN NOT MATCHED THEN + INSERT VALUES (np.month_code, np.dept_code, np.user_no, np.sales_amt); + +select *from range_list; +select *from newrange_list; + +delete from range_list; +delete from newrange_list; + +insert into range_list values('201901', '1', '1', 1); +insert into range_list values('201902', '2', '1', 2); +insert into newrange_list values('201902', '1', '1', 1); +insert into newrange_list values('201903', '1', '1', 2); + +MERGE INTO range_list subpartition for ('201901', '1') p +USING newrange_list subpartition for ('201901', '1') np +ON p.month_code= np.month_code +WHEN MATCHED THEN + UPDATE SET dept_code = np.dept_code, user_no = np.user_no, sales_amt = np.sales_amt +WHEN NOT MATCHED THEN + INSERT VALUES (np.month_code, np.dept_code, np.user_no, np.sales_amt); + +select *from range_list; +select *from newrange_list; + +delete from range_list; +delete from newrange_list; + +insert into range_list values('201901', '1', '1', 1); +insert into range_list values('201902', '2', '1', 2); +insert into newrange_list values('201902', '1', '1', 1); +insert into newrange_list values('201903', '1', '1', 2); + +MERGE INTO range_list p +USING newrange_list np +ON p.month_code= np.month_code +WHEN MATCHED THEN + UPDATE SET dept_code = '3', user_no = np.user_no, sales_amt = np.sales_amt +WHEN NOT MATCHED THEN + INSERT VALUES (np.month_code, np.dept_code, np.user_no, np.sales_amt); + +select *from range_list; +select *from newrange_list; + +drop table range_list; +drop table newrange_list; + + +create table lm_list_range (id int,sd int,name varchar2) +partition by list(id) subpartition by range(sd)( + partition ts1 values(1,2,3,4,5) + (subpartition ts11 values less than(5),subpartition ts12 values less than(10),subpartition ts13 values less than(20)), + partition ts2 values(6,7,8,9,10), + partition ts3 values(11,12,13,14,15) + (subpartition ts31 values less than(5),subpartition ts32 values less than(10),subpartition ts33 values less than(20))); + +select * from lm_list_range partition for(5,34); +drop table lm_list_range; diff --git a/src/test/regress/sql/pg_job.sql b/src/test/regress/sql/pg_job.sql new file mode 100644 index 000000000..983a05453 --- /dev/null +++ b/src/test/regress/sql/pg_job.sql @@ -0,0 +1,27 @@ +create database pl_test_job DBCOMPATIBILITY 'pg'; +\c pl_test_job; +CREATE TABLE pg_job_test_1(COL1 INT); + +CREATE OR REPLACE PROCEDURE pg_job_test() +AS +aaa int; +BEGIN + FOR i IN 0..20 LOOP + INSERT INTO pg_job_test_1(COL1) VALUES (i); + IF i % 2 = 0 THEN + COMMIT; + ELSE + ROLLBACK; + END IF; + END LOOP; +END; +/ +select dbe_task.id_submit(103, 'call pg_job_test();', sysdate, 'sysdate+3.0/24'); +select pg_sleep(5); +select count(*) from pg_job_test_1; + +drop procedure pg_job_test; +drop table if exists pg_job_test_1; +call dbe_task.cancel(103); +\c regression; +drop database IF EXISTS pl_test_job; diff --git a/src/test/regress/sql/pl_debugger_client.sql b/src/test/regress/sql/pl_debugger_client.sql index c253e1a3f..e9efd11ff 100644 --- a/src/test/regress/sql/pl_debugger_client.sql +++ b/src/test/regress/sql/pl_debugger_client.sql @@ -361,6 +361,12 @@ select frameno, funcname, lineno, query from dbe_pldebugger.backtrace(); select funcname, lineno, query from dbe_pldebugger.continue(); +-- test_empty +select pg_sleep(1); + +select dbe_pldebugger.attach(nodename, port) from debug_info; + + -- test set_var select pg_sleep(1); diff --git a/src/test/regress/sql/pl_debugger_server.sql b/src/test/regress/sql/pl_debugger_server.sql index 10026085d..befdc6145 100644 --- a/src/test/regress/sql/pl_debugger_server.sql +++ b/src/test/regress/sql/pl_debugger_server.sql @@ -278,6 +278,25 @@ select * from turn_on_debugger('test_debug_recursive'); select * from test_debug_recursive (1, 1); +--test empty procedure +CREATE OR REPLACE PROCEDURE test_empty(i int,j out int) +AS +DECLARE +begin + +end; +/ + +truncate debug_info; + +select * from turn_on_debugger('test_empty'); + +select * from dbe_pldebugger.local_debug_server_info(); + +select * from debug_info; + +call test_empty(1, ''); + -- test set_var CREATE OR REPLACE PROCEDURE test_setvar(x int) AS DECLARE diff --git a/src/test/regress/sql/pldeveloper_gs_source.sql b/src/test/regress/sql/pldeveloper_gs_source.sql new file mode 100644 index 000000000..6f837e503 --- /dev/null +++ b/src/test/regress/sql/pldeveloper_gs_source.sql @@ -0,0 +1,304 @@ +create role gs_developper password 'Dev@9999'sysadmin; +set role gs_developper password 'Dev@9999'; +create schema gs_source; +set current_schema = gs_source; +set plsql_show_all_error = on; +truncate DBE_PLDEVELOPER.gs_source; +truncate DBE_PLDEVELOPER.gs_errors; +-- +-- basic cases +-- +-- function normal +CREATE OR REPLACE PROCEDURE gourav88(a inout int, b out int, c out int, d in int, e inout int) +PACKAGE +AS DECLARE V1 INT; +param1 INT; +param2 INT; +BEGIN +param1 := 10; +param2 := 20; +V1 := param1 + param2; +a:=10; +b:=100; +c:=1000; +create table if not exists gourav.gourav888 (a integer, b integer, c integer); +insert into gourav.gourav888 values(a,b,c); +END; +/ +create or replace function func1() returns boolean as $$ declare +sql_temp text; +begin + sql_temp := 'create table test(a int);'; + execute immediate sql_temp; + return true; +end; +$$ language plpgsql; +-- function fail +create or replace function func2() returns boolean as $$ declare +sql_temp text; +begin + sql_temp1 := 'create table test(a int);'; + execute immediate sql_temp; + return true; +end; +$$ language plpgsql; +-- procedure fail/procedure does not replace function with the same name +create or replace procedure func1 +is +begin +insert into fasd af asd asdf; +end; +/ +-- package +CREATE OR REPLACE PACKAGE emp_bonus9 AS +da int; +PROCEDURE aa(gg int,kk varchar2); +PROCEDURE aa(kk int,gg int); +END emp_bonus9; +/ +-- package body with failure +CREATE OR REPLACE PACKAGE BODY emp_bonus9 AS +dd int; +PROCEDURE aa(gg int,kk varchar2) +IS +BEGIN +insert int aa aa; +END; +PROCEDURE aa(kk int,gg int) +IS +BEGIN +insert into test1 values(77); +END; +END emp_bonus9; +/ +select rolname, name, status, type, src from DBE_PLDEVELOPER.gs_source s join pg_authid a on s.owner = a.oid order by name; +-- check if id is valid +select distinct p.pkgname, s.name from gs_package p, DBE_PLDEVELOPER.gs_source s where s.id = p.oid order by name; +select distinct p.proname, s.name from pg_proc p, DBE_PLDEVELOPER.gs_source s where s.id = p.oid order by name; +select s.name, count(*) from DBE_PLDEVELOPER.gs_errors E, DBE_PLDEVELOPER.gs_source s where s.id = e.id group by s.name order by name; +truncate DBE_PLDEVELOPER.gs_source; +-- +-- extended cases +-- +-- sensitive information masking +create or replace procedure mask +is +begin +create role phil password 'Phil@123'; +end; +/ +-- change of owner +create role jackson_src password 'Jackson#456' sysadmin; +set role jackson_src password 'Jackson#456'; +create or replace procedure func1 +is +begin +insert into fasd af asd asdf; +end; +/ +set role gs_developper password 'Dev@9999'; +set behavior_compat_options='allow_procedure_compile_check'; +-- [no log] trigger func +CREATE TABLE table_stats ( + table_name text primary key, + num_insert_query int DEFAULT 0); +CREATE FUNCTION count_insert_query() RETURNS TRIGGER AS $_$ +BEGIN + UPDATE table_stats SET num_insert_query = num_insert_query + 1 WHERE table_name = TG_TABLE_NAME; + RETURN NEW; +END $_$ LANGUAGE 'plpgsql'; +-- [no log] sql function +create function func0(integer,integer) RETURNS integer +AS 'SELECT $1 + $2;' +LANGUAGE SQL +IMMUTABLE SHIPPABLE +RETURNS NULL ON NULL INPUT; +-- [no log] duplicate function definition without replace mark +create function func1() returns boolean as $$ declare +sql_temp text; +begin + sql_temp := 'create table test(a int);'; + execute immediate sql_temp; + return true; +end; +$$ language plpgsql; +create or replace procedure p1(a varchar2(10)) +is +begin + CREATE ROW LEVEL SECURITY POLICY p02 ON document_row AS WHATEVER + USING (dlevel <= (SELECT aid FROM account_row WHERE aname = current_user)); + insert int asd asd; + insert into test1 values(1); + insert int asd asd; +end; +/ +select name,type,line,src from DBE_PLDEVELOPER.gs_errors order by name; +create or replace package pkg1 +is +procedure proc1(c ff%F); +end pkg1; +/ +select name,type,line,src from DBE_PLDEVELOPER.gs_errors order by name; +create or replace package pkg2 +is +a inv%d; +procedure proc1(c ff%F); +end pkg2; +/ +create or replace package pkg3 +is +a int; +end pkg3; +/ +create or replace package body pkg3 +is +a b c d; +procedure proc1() +is +begin +insert int asd asd; +end; +end pkg3; +/ +select name,type,line,src from DBE_PLDEVELOPER.gs_errors; + +create or replace procedure pro70 is +begin +savepoint save_a; +commit; +savepoint save_a; +end; +/ +create table t1 (id int); +create or replace procedure pro71 is +cursor c1 for select pro70() from t1; +val int; +begin +end; +/ +create or replace function bulk_f_039_1() returns int[] +LANGUAGE plpgsql AS +$$ +declare +var1 int[]; +CURSOR C1 IS select id,id from t1 order by 1 desc; +begin +return var1; +end; +$$; +create or replace procedure pro25 +as +type tpc1 is ref cursor; +--v_cur tpc1; +begin +open v_cur for select c1,c2 from tab1; +end; +/ + +CREATE OR REPLACE PACKAGE error2 IS +a int;b int; +FUNCTION func1(a in int, b inout int, c out int) return int; +FUNCTION func2(a in int, b inout int, c out int) return int; +END error2; +/ +CREATE OR REPLACE PACKAGE BODY error2 IS +FUNCTION func1 (a in int, b inout int c out int) return int +IS +a1 NUMBER; +BEGIN +a1 :=10; +RETURN(a1 + a + b); +END; +aaa; +FUNCTION func2 (a in int, b inout int c out int) return int +IS +a1 NUMBER; +BEGIN +a1 :=10; +RETURN(a1 + a + b); +END; +END error2; +/ + +select name,type,line,src from DBE_PLDEVELOPER.gs_errors order by name; +-- [no log] guc off +set plsql_show_all_error = off; +create or replace procedure func00 +is +begin +create role yyy password 'Gauss@123'; +end; +/ +set plsql_show_all_error=on; +create or replace procedure proc4 +is +begin +insert int a; +end; +/ +create or replace package pkg4 +is +a a; +end pkg4; +/ +select name,type,src,line from DBE_PLDEVELOPER.gs_errors order by name; +select name,type,status,src from DBE_PLDEVELOPER.gs_source order by name; +set plsql_show_all_error=off; +create or replace procedure proc4 +is +b int; +c int; +begin +insert int a; +end; +/ +create or replace package pkg4 +is +a a; +end pkg4; +/ +select name,type,status,src from DBE_PLDEVELOPER.gs_source order by name; +create or replace procedure proc5 +is +b int; +c int; +begin +insert int a; +end; +/ +create or replace package pkg5 +is +a a; +end pkg5; +/ +create or replace package pack3 is +array_v1 pack1.array_type1; +procedure pp1(); +end pack3; +/ +set behavior_compat_options='skip_insert_gs_source'; +create or replace procedure SkipInsertGsSource +is +begin +null; +end; +/ +set plsql_show_all_error to on; +select name,type,status,src from DBE_PLDEVELOPER.gs_source order by name; +select name,type,line,src from DBE_PLDEVELOPER.gs_errors order by name; +drop package if exists pkg4; +drop package if exists pkg5; +drop function if exists proc4; +drop function if exists proc5; +drop package pkg1; +drop package pkg2; +drop package pkg3; +drop package emp_bonus9; +select rolname, name, status, type, src from DBE_PLDEVELOPER.gs_source s join pg_authid a on s.owner = a.oid order by name; +truncate DBE_PLDEVELOPER.gs_source; +truncate DBE_PLDEVELOPER.gs_errors; +reset role; +reset behavior_compat_options; +drop schema gs_source cascade; +drop role jackson_src; +drop role gs_developper; diff --git a/src/test/regress/sql/pljson.sql b/src/test/regress/sql/pljson.sql new file mode 100644 index 000000000..04b7cc3db --- /dev/null +++ b/src/test/regress/sql/pljson.sql @@ -0,0 +1,3904 @@ +CREATE SCHEMA DBE_PLJSON; + +set current_schema=DBE_PLJSON; + +/* not used now */ +create type pljson_element as +( + obj_type number +); + +create type pljson_value as ( + /* 1 = object, 2 = array, 3 = string, 4 = number, 5 = bool, 6 = null */ + typeval number(1), + str varchar2(32767), + /* store 1 as true, 0 as false */ + num number, + num_double binary_double, + num_repr_number_p varchar2(1), + num_repr_double_p varchar2(1), + /* object or array in here */ + object_or_array pljson_element, + + extended_str clob, + mapname varchar2(4000), + mapindx number(32) +); + +create type pljson_list as ( + pljson_list_data pljson_value[] +); + +create type pljson as ( + pljson_list_data pljson_value[], + check_for_duplicate number +); + +alter type pljson_value add attribute arr pljson_list; +alter type pljson_value add attribute obj pljson; + +create or replace package pljson_value as + + function gs_pljson_value() return pljson_value; + function gs_pljson_value(b boolean) return pljson_value; + function gs_pljson_value(str varchar2, esc boolean default true) return pljson_value; + function gs_pljson_value(str clob, esc boolean default true) return pljson_value; + function gs_pljson_value(num number) return pljson_value; + function gs_pljson_value(num_double binary_double) return pljson_value; + function gs_pljson_value(elem pljson_element) return pljson_value; + function gs_makenull() return pljson_value; + + function gs_pljson_value(arr pljson_list) return pljson_value; + function gs_pljson_value(obj pljson) return pljson_value; + + function gs_get_type(json_value pljson_value) return varchar2; + function gs_get_string(json_value pljson_value, max_byte_size number default null, max_char_size number default null) return varchar2; + procedure gs_get_string_clob(json_value pljson_value, buf inout clob); + function gs_get_clob(json_value pljson_value) return clob; + function gs_get_bool(json_value pljson_value) return boolean; + function gs_get_number(json_value pljson_value) return number; + function gs_get_double(json_value pljson_value) return binary_double; + function gs_get_element(json_value pljson_value) return pljson_element; + function gs_get_null(json_value pljson_value) return varchar2; + + function gs_is_string(json_value pljson_value) return boolean; + function gs_is_bool(json_value pljson_value) return boolean; + function gs_is_number(json_value pljson_value) return boolean; + function gs_is_number_repr_number(json_value pljson_value) return boolean; + function gs_is_number_repr_double(json_value pljson_value) return boolean; + function gs_is_object(json_value pljson_value) return boolean; + function gs_is_array(json_value pljson_value) return boolean; + function gs_is_null(json_value pljson_value) return boolean; + + function gs_value_of(json_value pljson_value, max_byte_size number default null, max_char_size number default null) return varchar2; + + procedure gs_parse_number(json_value inout pljson_value, str varchar2); + function gs_number_toString(json_value pljson_value) return varchar2; + function gs_to_char(json_value pljson_value, spaces boolean default true, chars_per_line number default 0) return varchar2; + procedure gs_to_clob(json_value pljson_value, buf inout clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true); + procedure gs_print(json_value pljson_value, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null); + procedure htp(json_value pljson_value, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null); + +end pljson_value; +/ + +create or replace package pljson_list as + + function gs_pljson_list() return pljson_list; + function gs_pljson_list(str varchar2) return pljson_list; + function gs_pljson_list(str clob) return pljson_list; + function gs_pljson_list(str blob, charset varchar2 default 'UTF8') return pljson_list; + function gs_pljson_list(str_array varchar2[]) return pljson_list; + function gs_pljson_list(num_array number[]) return pljson_list; + function gs_pljson_list(elem pljson_value) return pljson_list; + + procedure gs_append(json_list inout pljson_list, elem pljson_value, _position integer default null); + procedure gs_append(json_list inout pljson_list, elem varchar2, _position integer default null); + procedure gs_append(json_list inout pljson_list, elem clob, _position integer default null); + procedure gs_append(json_list inout pljson_list, elem number, _position integer default null); + procedure gs_append(json_list inout pljson_list, elem binary_double, _position integer default null); + procedure gs_append(json_list inout pljson_list, elem boolean, _position integer default null); + procedure gs_append(json_list inout pljson_list, elem pljson_list, _position integer default null); + + procedure gs_remove(json_list inout pljson_list, _position integer); + procedure gs_remove_first(json_list inout pljson_list); + procedure gs_remove_last(json_list inout pljson_list); + + function gs_count(json_list pljson_list) return number; + function gs_get(json_list pljson_list, _position integer) return pljson_value; + function gs_get_string(json_list pljson_list, _position integer) return varchar2; + function gs_get_clob(json_list pljson_list, _position integer) return clob; + function gs_get_bool(json_list pljson_list, _position integer) return boolean; + function gs_get_number(json_list pljson_list, _position integer) return number; + function gs_get_double(json_list pljson_list, _position integer) return binary_double; + function gs_get_pljson_list(json_list pljson_list, _position integer) return pljson_list; + function gs_head(json_list pljson_list) return pljson_value; + function gs_last(json_list pljson_list) return pljson_value; + function gs_tail(json_list pljson_list) return pljson_list; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem pljson_value); + procedure gs_replace(json_list inout pljson_list, _position integer, elem varchar2); + procedure gs_replace(json_list inout pljson_list, _position integer, elem clob); + procedure gs_replace(json_list inout pljson_list, _position integer, elem number); + procedure gs_replace(json_list inout pljson_list, _position integer, elem binary_double); + procedure gs_replace(json_list inout pljson_list, _position integer, elem boolean); + procedure gs_replace(json_list inout pljson_list, _position integer, elem pljson_list); + + function gs_to_json_value(json_list pljson_list) return pljson_value; + + function gs_to_char(json_list pljson_list, spaces boolean default true, chars_per_line number default 0) return varchar2; + procedure gs_to_clob(json_list pljson_list, buf inout clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true); + procedure gs_print(json_list pljson_list, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null); + procedure htp(json_list pljson_list, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null); + + function gs_path(json_list pljson_list, json_path varchar2, base number default 1) return pljson_value; + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem pljson_value, base number default 1); + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem varchar2, base number default 1); + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem clob, base number default 1); + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem boolean, base number default 1); + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem number, base number default 1); + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem binary_double, base number default 1); + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem pljson_list, base number default 1); + + procedure gs_path_remove(json_list inout pljson_list, json_path varchar2, base number default 1); + +end pljson_list; +/ + +create or replace package pljson as + + function gs_pljson() return pljson; + function gs_pljson(str varchar2) return pljson; + function gs_pljson(str clob) return pljson; + function gs_pljson(str blob, charset varchar2 default 'UTF8') return pljson; + function gs_pljson(str_array varchar2[]) return pljson; + function gs_pljson(elem pljson_value) return pljson; + function gs_pljson(l pljson_list) return pljson; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value pljson_value, _position integer default null); + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value varchar2, _position integer default null); + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value clob, _position integer default null); + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value number, _position integer default null); + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value binary_double, _position integer default null); + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value boolean, _position integer default null); + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value pljson, _position integer default null); + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value pljson_list, _position integer default null); + + procedure gs_remove(pj pljson, pair_name varchar2); + + function gs_count(pj pljson) return number; + function gs_get(pj pljson, pair_name varchar2) return pljson_value; + function gs_get_string(pj pljson, pair_name varchar2) return varchar2; + function gs_get_clob(pj pljson, pair_name varchar2) return clob; + function gs_get_bool(pj pljson, pair_name varchar2) return boolean; + function gs_get_number(pj pljson, pair_name varchar2) return number; + function gs_get_double(pj pljson, pair_name varchar2) return binary_double; + function gs_get_pljson(pj pljson, pair_name varchar2) return pljson; + function gs_get_pljson_list(pj pljson, pair_name varchar2) return pljson_list; + function gs_get(pj pljson, _position integer) return pljson_value; + + function gs_index_of(pj pljson, pair_name varchar2) return number; + function gs_exist(pj pljson, pair_name varchar2) return boolean; + function gs_to_json_value(pj pljson) return pljson_value; + procedure gs_check_duplicate(pj inout pljson, v_set boolean); + procedure gs_remove_duplicates(pj inout pljson); + + function gs_to_char(pj pljson, spaces boolean default true, chars_per_line number default 0) return varchar2; + procedure gs_to_clob(pj pljson, buf inout clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true); + procedure gs_print(pj pljson, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null); + procedure htp(pj pljson, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null); + + function gs_path(pj pljson, json_path varchar2, base number default 1) return pljson_value; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem pljson_value, base number default 1); + procedure gs_path_put(pj inout pljson, json_path varchar2, elem varchar2, base number default 1); + procedure gs_path_put(pj inout pljson, json_path varchar2, elem clob, base number default 1); + procedure gs_path_put(pj inout pljson, json_path varchar2, elem boolean, base number default 1); + procedure gs_path_put(pj inout pljson, json_path varchar2, elem number, base number default 1); + procedure gs_path_put(pj inout pljson, json_path varchar2, elem binary_double, base number default 1); + procedure gs_path_put(pj inout pljson, json_path varchar2, elem pljson, base number default 1); + procedure gs_path_put(pj inout pljson, json_path varchar2, elem pljson_list, base number default 1); + + procedure gs_path_remove(pj inout pljson, json_path varchar2, base number default 1); + + function gs_get_keys(pj pljson) return pljson_list; + function gs_get_values(pj pljson) return pljson_list; + +end pljson; +/ + +create or replace package pljson_ext as + + function gs_parsePath(json_path varchar2, base number default 1) return pljson_list; + + --JSON Path getters + function gs_get_json_value(obj pljson, v_path varchar2, base number default 1) return pljson_value; + function gs_get_string(obj pljson, path varchar2, base number default 1) return varchar2; + function gs_get_bool(obj pljson, path varchar2, base number default 1) return boolean; + function gs_get_number(obj pljson, path varchar2, base number default 1) return number; + function gs_get_double(obj pljson, path varchar2, base number default 1) return binary_double; + function gs_get_json(obj pljson, path varchar2, base number default 1) return pljson; + function gs_get_json_list(obj pljson, path varchar2, base number default 1) return pljson_list; + + --JSON Path putters + procedure gs_put(obj inout pljson, path varchar2, elem pljson_value, base number default 1); + procedure gs_put(obj inout pljson, path varchar2, elem varchar2, base number default 1); + procedure gs_put(obj inout pljson, path varchar2, elem boolean, base number default 1); + procedure gs_put(obj inout pljson, path varchar2, elem number, base number default 1); + procedure gs_put(obj inout pljson, path varchar2, elem binary_double, base number default 1); + procedure gs_put(obj inout pljson, path varchar2, elem pljson, base number default 1); + procedure gs_put(obj inout pljson, path varchar2, elem pljson_list, base number default 1); + + procedure gs_remove(obj inout pljson, path varchar2, base number default 1); + + --Pretty print with JSON Path + --function pp(obj pljson, v_path varchar2) return varchar2; + procedure gs_pp(obj pljson, v_path varchar2); + procedure pp_htp(obj pljson, v_path varchar2); + + -- date function + format_string varchar2(30) := 'yyyy-mm-dd hh24:mi:ss'; + function gs_is_integer(v pljson_value) return boolean; + function gs_to_json_value(d date) return pljson_value; + function gs_is_date(v pljson_value) return boolean; + function gs_to_date(v pljson_value) return date; + function gs_to_date2(v pljson_value) return date; + function gs_get_date(obj pljson, path varchar2, base number default 1) return date; + procedure gs_put(obj inout pljson, path varchar2, elem date, base number default 1); + + function gs_encodeBase64Blob2Clob(p_blob blob) return clob; + function gs_decodeBase64Clob2Blob(p_clob clob) return blob; + + function gs_base64(binarydata blob) return pljson_list; + function gs_base64(l pljson_list) return blob; + + function gs_encode(binarydata blob) return pljson_value; + function gs_decode(v pljson_value) return blob; + + procedure gs_blob2clob(b blob, c out clob, charset varchar2 default 'UTF8'); + +end pljson_ext; +/ + +create type rToken as ( + type_name varchar2(7), + line integer, + col integer, + data varchar2(32767), + data_overflow clob +); + +create type json_src as ( + len number, _offset number, offset_chars number, src varchar2(32767), s_clob clob +); + +create or replace package pljson_parser as +/* +create type rToken as ( + type_name varchar2(7), + line integer, + col integer, + data varchar2(32767), + data_overflow clob +); + +create type json_src as ( + len number, _offset number, offset_chars number, src varchar2(32767), s_clob clob +); +*/ + +/* + type rToken is record ( + type_name varchar2(7), + line integer, + col integer, + data varchar2(32767), + data_overflow clob); + type rToken[] is table of rToken index by integer; + type json_src is record (len number, _offset number, offset_chars number, src varchar2(32767), s_clob clob); +*/ + + json_strict boolean not null := false; + + -- private + -- function gs_lengthcc(buf clob) return number; + -- function gs_prepareVarchar2(buf varchar2) return json_src; + -- function gs_prepareClob(buf clob) return json_src; + -- function gs_next_char(indx number, s inout json_src) return varchar2; + -- function gs_next_char2(indx number, s inout json_src, amount number default 1) return varchar2; + -- function gs_lexer(jsrc inout json_src) return rToken[]; + -- function gs_parseObj(tokens rToken[], indx inout integer) return pljson; + -- procedure print_token(t rToken); + + -- public + function gs_parser(str varchar2) return pljson; + function gs_parse_list(str varchar2) return pljson_list; + function gs_parse_any(str varchar2) return pljson_value; + function gs_parser(str clob) return pljson; + function gs_parse_list(str clob) return pljson_list; + function gs_parse_any(str clob) return pljson_value; + + procedure gs_remove_duplicates(obj inout pljson); + function gs_get_version() return varchar2; + +end pljson_parser; +/ + +create or replace package body pljson_parser as + + decimalpoint varchar2(1) := '.'; + + procedure s_error(text varchar2, line number, col number) as + begin + raise exception 'JSON Scanner exception'; + end; + + procedure s_error(text varchar2, tok rToken) as + begin + raise exception 'JSON Scanner exception'; + end; + + procedure p_error(text varchar2, tok rToken) as + begin + raise exception 'JSON Parser exception'; + end; + + -- make token + function mt(t varchar2, l integer, c integer, d varchar2) return rToken as + token rToken; + begin + token.type_name := t; + token.line := l; + token.col := c; + token.data := d; + return token; + end; + + procedure print_token(t rToken) as + begin + dbe_output.print_line('Line: '||t.line||' - Column: '||t.col||' - Type: '||t.type_name||' - Content: '||t.data); + end; + + function gs_lengthcc(buf clob) return number as + _offset number := 0; + len number := 0; + src varchar2(32767); + src_len number; + begin + while true loop + -- begin + src := dbe_lob.substr(buf, 4000, _offset+1); + -- exception + -- when ucs2_exception then + -- src := dbe_lob.substr(buf, 3999, offset+1); + -- end; + exit when src is null; + len := len + length(src); + _offset := _offset + length(src); --length2 + end loop; + return len; + end; + + -- procedure update_decimalpoint as + -- begin + -- select substr(value, 1, 1) + -- into decimalpoint + -- from nls_session_parameters + -- where parameter = 'NLS_NUMERIC_CHARACTERS'; + -- end; + function gs_prepareVarchar2(buf varchar2) return json_src as + temp json_src; + begin + temp.s_clob := buf; + temp.offset_chars := 0; + temp._offset := 0; + temp.src := substr(buf, 1, 4000); + temp.len := length(buf); + return temp; + end; + + function gs_prepareClob(buf clob) return json_src as + temp json_src; + begin + temp.s_clob := buf; + temp.offset_chars := 0; + temp._offset := 0; + temp.src := dbe_lob.substr(buf, 4000, temp._offset+1); + temp.len := gs_lengthcc(buf); --dbe_lob.get_length(buf); + return temp; + end; + + procedure gs_updateClob(v_extended inout clob, v_str varchar2) as + begin + dbe_lob.write_append(v_extended, length(v_str), v_str); + end; + + function gs_next_char(indx number, s inout json_src) return varchar2 as + begin + + if (indx > s.len) then + return null; + end if; + + if (indx > length(s.src) + s.offset_chars) then + while (indx > length(s.src) + s.offset_chars) loop + s.offset_chars := s.offset_chars + length(s.src); + s._offset := s._offset + length(s.src); -- length2 + -- begin exception + s.src := dbe_lob.substr(s.s_clob, 4000, s._offset+1); + end loop; + elsif (indx <= s.offset_chars) then + s.offset_chars := 0; + s._offset := 0; + -- begin exception (substr exception?) + s.src := dbe_lob.substr(s.s_clob, 4000, s.offset+1); + while (indx > length(s.src) + s.offset_chars) loop + s.offset_chars := s.offset_chars + length(s.src); + s._offset := s._offset + length(s.src); --length2 + s.src := dbe_lob.substr(s.s_clob, 4000, s.offset+1); + end loop; + end if; + + return substr(s.src, indx-s.offset_chars, 1); + end; + + function gs_next_char2(indx number, s inout json_src, amount number default 1) return varchar2 as + buf varchar2(32767) := ''; + begin + for i in 1..amount loop + buf := buf || gs_next_char(indx-1+i, s); + end loop; + return buf; + end; + + -- [a-zA-Z]([a-zA-Z0-9])* + procedure gs_lexName(jsrc inout json_src, tok inout rToken, indx inout integer) as + varbuf varchar2(32767) := ''; + buf varchar(4); + num number; + begin + buf := gs_next_char(indx, jsrc); + while (REGEXP_LIKE(buf, '^[[:alnum:]\_]$', 'i')) loop + varbuf := varbuf || buf; + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + if (buf is null) then + goto retname; + --debug('Premature string ending'); + end if; + end loop; + <> + --could check for reserved keywords here + --debug(varbuf); + tok.data := varbuf; + indx := indx - 1; + end; + + procedure gs_lexNumber(jsrc inout json_src, tok inout rToken, indx inout integer) as + numbuf varchar2(4000) := ''; + buf varchar2(4); + checkLoop boolean; + begin + buf := gs_next_char(indx, jsrc); + if (buf = '-') then numbuf := '-'; indx := indx + 1; end if; + buf := gs_next_char(indx, jsrc); + --0 or [1-9]([0-9])* + if (buf = '0') then + numbuf := numbuf || '0'; indx := indx + 1; + buf := gs_next_char(indx, jsrc); + elsif (buf >= '1' and buf <= '9') then + numbuf := numbuf || buf; indx := indx + 1; + --read digits + buf := gs_next_char(indx, jsrc); + while (buf >= '0' and buf <= '9') loop + numbuf := numbuf || buf; indx := indx + 1; + buf := gs_next_char(indx, jsrc); + end loop; + end if; + --fraction + if (buf = '.') then + numbuf := numbuf || buf; indx := indx + 1; + buf := gs_next_char(indx, jsrc); + checkLoop := FALSE; + while (buf >= '0' and buf <= '9') loop + checkLoop := TRUE; + numbuf := numbuf || buf; indx := indx + 1; + buf := gs_next_char(indx, jsrc); + end loop; + if (not checkLoop) then + s_error('Expected: digits in fraction', tok); + end if; + end if; + --exp part + if (buf in ('e', 'E')) then + numbuf := numbuf || buf; indx := indx + 1; + buf := gs_next_char(indx, jsrc); + if (buf = '+' or buf = '-') then + numbuf := numbuf || buf; indx := indx + 1; + buf := gs_next_char(indx, jsrc); + end if; + checkLoop := FALSE; + while (buf >= '0' and buf <= '9') loop + checkLoop := TRUE; + numbuf := numbuf || buf; indx := indx + 1; + buf := gs_next_char(indx, jsrc); + end loop; + if (not checkLoop) then + s_error('Expected: digits in exp', tok); + end if; + end if; + + tok.data := numbuf; + end; + + procedure gs_lexString(jsrc inout json_src, tok inout rToken, indx inout integer, endChar char) as + v_extended clob := null; + v_count number := 0; + varbuf varchar2(32767) := ''; + buf varchar(4); + wrong boolean; + max_string_chars number := 5000; + begin + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + while (buf != endChar) loop + --clob control + if (v_count > 8191) then + if (v_extended is null) then + dbe_lob.create_temporary(v_extended, true); + end if; + gs_updateClob(v_extended, varbuf); --unistr() + varbuf := ''; + v_count := 0; + end if; + if (buf = Chr(13) or buf = CHR(9) or buf = CHR(10)) then + s_error('Control characters not allowed (CHR(9),CHR(10),CHR(13))', tok); + end if; + if (buf = '\') then + --varbuf := varbuf || buf; + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + case + when buf in ('\') then + varbuf := varbuf || buf || buf; v_count := v_count + 2; + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + when buf in ('"', '/') then + varbuf := varbuf || buf; v_count := v_count + 1; + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + when buf = '''' then + if (json_strict = false) then + varbuf := varbuf || buf; v_count := v_count + 1; + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + else + s_error('strictmode - expected: " \ / b f n r t u ', tok); + end if; + when buf in ('b', 'f', 'n', 'r', 't') then + --backspace b = U+0008 + --formfeed f = U+000C + --newline n = U+000A + --carret r = U+000D + --tabulator t = U+0009 + case buf + when 'b' then varbuf := varbuf || chr(8); + when 'f' then varbuf := varbuf || chr(12); + when 'n' then varbuf := varbuf || chr(10); + when 'r' then varbuf := varbuf || chr(13); + when 't' then varbuf := varbuf || chr(9); + end case; + --varbuf := varbuf || buf; + v_count := v_count + 1; + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + when buf = 'u' then + --four hexadecimal chars + declare + four varchar2(4); + begin + four := gs_next_char2(indx+1, jsrc, 4); + wrong := FALSE; + if (upper(substr(four, 1, 1)) not in ('0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','a','b','c','d','e','f')) then wrong := TRUE; end if; + if (upper(substr(four, 2, 1)) not in ('0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','a','b','c','d','e','f')) then wrong := TRUE; end if; + if (upper(substr(four, 3, 1)) not in ('0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','a','b','c','d','e','f')) then wrong := TRUE; end if; + if (upper(substr(four, 4, 1)) not in ('0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','a','b','c','d','e','f')) then wrong := TRUE; end if; + if (wrong) then + s_error('expected: " \u([0-9][A-F]){4}', tok); + end if; + -- varbuf := varbuf || buf || four; + varbuf := varbuf || '\'||four;--chr(to_number(four,'XXXX')); + v_count := v_count + 5; + indx := indx + 5; + buf := gs_next_char(indx, jsrc); + end; + else + s_error('expected: " \ / b f n r t u ', tok); + end case; + else + varbuf := varbuf || buf; + v_count := v_count + 1; + indx := indx + 1; + buf := gs_next_char(indx, jsrc); + end if; + end loop; + + if (buf is null) then + s_error('string ending not found', tok); + end if; + + if (v_extended is not null) then + gs_updateClob(v_extended, varbuf); + tok.data_overflow := v_extended; + tok.data := PKG_UTIL.lob_read(v_extended, max_string_chars, 1, 0); + else + tok.data := varbuf; + end if; + end; + + --function gs_lexer(jsrc inout json_src) return rToken[] as + procedure gs_lexer(jsrc inout json_src, tokens out rToken[]) as + -- tokens rToken[]; + indx integer := 1; + tok_indx integer := 1; + buf varchar2(4); + lin_no number := 1; + col_no number := 0; + begin + while (indx <= jsrc.len) loop + --read into buf + buf := gs_next_char(indx, jsrc); + col_no := col_no + 1; + --convert to switch case + case + when buf = '{' then tokens[tok_indx] := mt('{', lin_no, col_no, null); tok_indx := tok_indx + 1; + when buf = '}' then tokens[tok_indx] := mt('}', lin_no, col_no, null); tok_indx := tok_indx + 1; + when buf = ',' then tokens[tok_indx] := mt(',', lin_no, col_no, null); tok_indx := tok_indx + 1; + when buf = ':' then tokens[tok_indx] := mt(':', lin_no, col_no, null); tok_indx := tok_indx + 1; + when buf = '[' then tokens[tok_indx] := mt('[', lin_no, col_no, null); tok_indx := tok_indx + 1; + when buf = ']' then tokens[tok_indx] := mt(']', lin_no, col_no, null); tok_indx := tok_indx + 1; + when buf = 't' then + if (gs_next_char2(indx, jsrc, 4) != 'true') then + if (json_strict = false and REGEXP_LIKE(buf, '^[[:alpha:]]$', 'i')) then + tokens[tok_indx] := mt('STRING', lin_no, col_no, null); + gs_lexName(jsrc, tokens[tok_indx], indx); + col_no := col_no + length(tokens[tok_indx].data) + 1; + tok_indx := tok_indx + 1; + else + s_error('Expected: ''true''', lin_no, col_no); + end if; + else + tokens[tok_indx] := mt('TRUE', lin_no, col_no, null); tok_indx := tok_indx + 1; + indx := indx + 3; + col_no := col_no + 3; + end if; + when buf = 'n' then + if (gs_next_char2(indx, jsrc, 4) != 'null') then + if (json_strict = false and REGEXP_LIKE(buf, '^[[:alpha:]]$', 'i')) then + tokens[tok_indx] := mt('STRING', lin_no, col_no, null); + gs_lexName(jsrc, tokens[tok_indx], indx); + col_no := col_no + length(tokens[tok_indx].data) + 1; + tok_indx := tok_indx + 1; + else + s_error('Expected: ''null''', lin_no, col_no); + end if; + else + tokens[tok_indx] := mt('NULL', lin_no, col_no, null); tok_indx := tok_indx + 1; + indx := indx + 3; + col_no := col_no + 3; + end if; + when buf = 'f' then + if (gs_next_char2(indx, jsrc, 5) != 'false') then + if (json_strict = false and REGEXP_LIKE(buf, '^[[:alpha:]]$', 'i')) then + tokens[tok_indx] := mt('STRING', lin_no, col_no, null); + gs_lexName(jsrc, tokens[tok_indx], indx); + col_no := col_no + length(tokens[tok_indx].data) + 1; + tok_indx := tok_indx + 1; + else + s_error('Expected: ''false''', lin_no, col_no); + end if; + else + tokens[tok_indx] := mt('FALSE', lin_no, col_no, null); tok_indx := tok_indx + 1; + indx := indx + 4; + col_no := col_no + 4; + end if; + /* -- 9 = TAB, 10 = \n, 13 = \r (Linux = \n, Windows = \r\n, Mac = \r */ + when (buf = Chr(10)) then --linux newlines + lin_no := lin_no + 1; + col_no := 0; + when (buf = Chr(13)) then --Windows or Mac way + lin_no := lin_no + 1; + col_no := 0; + if (jsrc.len >= indx+1) then -- better safe than sorry + buf := gs_next_char(indx+1, jsrc); + if (buf = Chr(10)) then --\r\n + indx := indx + 1; + end if; + end if; + when (buf = CHR(9)) then + null; --tabbing + when (buf in ('-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9')) then --number + tokens[tok_indx] := mt('NUMBER', lin_no, col_no, null); + gs_lexNumber(jsrc, tokens[tok_indx], indx); + indx := indx - 1; + col_no := col_no + length(tokens[tok_indx].data); + tok_indx := tok_indx + 1; + when buf = '"' then --string + tokens[tok_indx] := mt('STRING', lin_no, col_no, null); + -- len number, _offset number, offset_chars number, src varchar2(32767), s_clob clob + -- dbe_output.print_line('len: '||jsrc.len||' - offset: '||jsrc._offset||' - offset_chars: '||jsrc.offset_chars||' - src: '||jsrc.src); + gs_lexString(jsrc, tokens[tok_indx], indx, '"'); + -- dbe_output.print_line('len: '||jsrc.len||' - offset: '||jsrc._offset||' - offset_chars: '||jsrc.offset_chars||' - src: '||jsrc.src); + col_no := col_no + length(tokens[tok_indx].data) + 1; + tok_indx := tok_indx + 1; + when buf = '''' and json_strict = false then --string + tokens[tok_indx] := mt('STRING', lin_no, col_no, null); + gs_lexString(jsrc, tokens[tok_indx], indx, ''''); + col_no := col_no + length(tokens[tok_indx].data) + 1; --hovsa her + tok_indx := tok_indx + 1; + when json_strict = false and REGEXP_LIKE(buf, '^[[:alpha:]]$', 'i') then + tokens[tok_indx] := mt('STRING', lin_no, col_no, null); + gs_lexName(jsrc, tokens[tok_indx], indx); + if (tokens[tok_indx].data_overflow is not null) then + col_no := col_no + gs_lengthcc(tokens[tok_indx].data_overflow) + 1; --dbe_lob.get_length(tokens[tok_indx].data_overflow) + 1; + else + col_no := col_no + length(tokens[tok_indx].data) + 1; + end if; + tok_indx := tok_indx + 1; + when json_strict = false and buf||gs_next_char(indx+1, jsrc) = '/*' then --strip comments + declare + saveindx number := indx; + un_esc clob; + begin + indx := indx + 1; + loop + indx := indx + 1; + buf := gs_next_char(indx, jsrc)||gs_next_char(indx+1, jsrc); + exit when buf = '*/'; + exit when buf is null; + end loop; + + if (indx = saveindx+2) then + -- enter unescaped mode + -- un_esc := empty_clob(); + dbe_lob.create_temporary(un_esc, true); + indx := indx + 1; + loop + indx := indx + 1; + buf := gs_next_char(indx, jsrc)||gs_next_char(indx+1, jsrc)||gs_next_char(indx+2, jsrc)||gs_next_char(indx+3, jsrc); + exit when buf = '/**/'; + if buf is null then + s_error('Unexpected sequence /**/ to end unescaped data: '||buf, lin_no, col_no); + end if; + buf := gs_next_char(indx, jsrc); + dbe_lob.write_append(un_esc, length(buf), buf); + end loop; + tokens[tok_indx] := mt('ESTRING', lin_no, col_no, null); + tokens[tok_indx].data_overflow := un_esc; + col_no := col_no + gs_lengthcc(un_esc) + 1; --dbe_lob.get_length(un_esc) + 1; + tok_indx := tok_indx + 1; + indx := indx + 2; + end if; + indx := indx + 1; + end; + when buf = ' ' then null; --space + else + s_error('Unexpected char: '||buf, lin_no, col_no); + end case; + indx := indx + 1; + end loop; + + end; + + /* PARSER FUNCTIONS START */ + procedure gs_parseObj(tokens rToken[], indx inout integer, obj out pljson); + + -- parse array + procedure gs_parseArr(tokens rToken[], indx inout integer, ret_list out pljson_list) as + e_arr pljson_value[]; + v_count number := 0; + tok rToken; + pv pljson_value; + begin + --value, value, value ] + if (indx > tokens.count) then p_error('more elements in array was excepted', tok); end if; + tok := tokens[indx]; + while (tok.type_name != ']') loop + v_count := v_count + 1; + + -- print_token(tok); + + case tok.type_name + when 'TRUE' then e_arr[v_count] := pljson_value.gs_pljson_value(true); + when 'FALSE' then e_arr[v_count] := pljson_value.gs_pljson_value(false); + when 'NULL' then e_arr[v_count] := pljson_value.gs_pljson_value(); + when 'STRING' then + if tok.data_overflow is not null then + e_arr[v_count] := pljson_value.gs_pljson_value(tok.data_overflow); + else + e_arr[v_count] := pljson_value.gs_pljson_value(tok.data); + end if; + when 'ESTRING' then + e_arr[v_count] := pljson_value.gs_pljson_value(tok.data_overflow, false); + when 'NUMBER' then + pv := pljson_value.gs_pljson_value(0); + pljson_value.gs_parse_number(pv, replace(tok.data, '.', decimalpoint)); + e_arr[v_count] := pv; + when '[' then + declare + e_list pljson_list; + begin + indx := indx + 1; + gs_parseArr(tokens, indx, e_list); + e_arr[v_count] := pljson_list.gs_to_json_value(e_list); + end; + when '{' then + declare + temp_pj pljson; + begin + indx := indx + 1; + gs_parseObj(tokens, indx, temp_pj); + e_arr[v_count] := pljson.gs_to_json_value(temp_pj); + end; + else + p_error('Expected a value', tok); + end case; + indx := indx + 1; + if (indx > tokens.count) then p_error('] not found', tok); end if; + tok := tokens[indx]; + if (tok.type_name = ',') then --advance + indx := indx + 1; + if (indx > tokens.count) then p_error('more elements in array was excepted', tok); end if; + tok := tokens[indx]; + if (tok.type_name = ']') then --premature exit + p_error('Premature exit in array', tok); + end if; + elsif (tok.type_name != ']') then --error + p_error('Expected , or ]', tok); + end if; + end loop; + ret_list.pljson_list_data := e_arr; + end; + + -- parse member + procedure gs_parseMem(tokens rToken[], indx inout integer, mem_name varchar2, mem_indx number, mem out pljson_value) as + tok rToken; + pv pljson_value; + + begin + tok := tokens[indx]; + + -- print_token(tok); + + case tok.type_name + when 'TRUE' then mem := pljson_value.gs_pljson_value(true); + when 'FALSE' then mem := pljson_value.gs_pljson_value(false); + when 'NULL' then mem := pljson_value.gs_pljson_value(); + when 'STRING' then + if tok.data_overflow is not null then + mem := pljson_value.gs_pljson_value(tok.data_overflow); + else + mem := pljson_value.gs_pljson_value(tok.data); + end if; + when 'ESTRING' then mem := pljson_value.gs_pljson_value(tok.data_overflow, false); + when 'NUMBER' then + pv := pljson_value.gs_pljson_value(0); + pljson_value.gs_parse_number(pv, replace(tok.data, '.', decimalpoint)); + mem := pv; + when '[' then + declare + e_list pljson_list; + begin + indx := indx + 1; + gs_parseArr(tokens, indx, e_list); + mem := pljson_list.gs_to_json_value(e_list); + end; + when '{' then + declare + temp_pj pljson; + begin + indx := indx + 1; + gs_parseObj(tokens, indx, temp_pj); + mem := pljson.gs_to_json_value(temp_pj); + end; + else + p_error('Found '||tok.type_name, tok); + end case; + mem.mapname := mem_name; + mem.mapindx := mem_indx; + indx := indx + 1; + end; + + procedure gs_parseObj(tokens rToken[], indx inout integer, obj out pljson) as + type memmap is table of number index by varchar2(4000); + mymap memmap; + nullelemfound boolean := false; + + + tok rToken; + mem_name varchar(4000); + arr pljson_value[] := array[]::pljson_value[]; + begin + + while (indx <= tokens.count) loop + tok := tokens[indx]; + case tok.type_name + when 'STRING' then + --member + mem_name := substr(tok.data, 1, 4000); + -- begin exception + if (mem_name is null) then + if (nullelemfound) then + p_error('Duplicate empty member: ', tok); + else + nullelemfound := true; + end if; + elsif (mymap(mem_name) is not null) then + p_error('Duplicate member name: '||mem_name, tok); + end if; + + indx := indx + 1; + if (indx > tokens.count) then p_error('Unexpected end of input', tok); end if; + tok := tokens[indx]; + indx := indx + 1; + if (indx > tokens.count) then p_error('Unexpected end of input', tok); end if; + if (tok.type_name = ':') then + --parse + declare + jmb pljson_value; + x number; + begin + x := arr.count + 1; + gs_parseMem(tokens, indx, mem_name, x, jmb); + arr.extend; + arr[x] := jmb; + end; + + else + p_error('Expected '':''', tok); + end if; + --move indx forward if ',' is found + if (indx > tokens.count) then p_error('Unexpected end of input', tok); end if; + + tok := tokens[indx]; + if (tok.type_name = ',') then + + indx := indx + 1; + tok := tokens[indx]; + if (tok.type_name = '}') then --premature exit + p_error('Premature exit in json object', tok); + end if; + elsif (tok.type_name != '}') then + p_error('A comma seperator is probably missing', tok); + end if; + when '}' then + obj := pljson.gs_pljson(); + obj.pljson_list_data := arr; + return; + else + p_error('Expected string or }', tok); + end case; + end loop; + + p_error('} not found', tokens[indx-1]); + end; + + function gs_parser(str varchar2) return pljson as + tokens rToken[]; + obj pljson; + indx integer := 1; + jsrc json_src; + begin + -- update_decimalpoint(); + jsrc := gs_prepareVarchar2(str); + gs_lexer(jsrc, tokens); + if (tokens[indx].type_name = '{') then + indx := indx + 1; + gs_parseObj(tokens, indx, obj); + else + raise exception 'JSON Parser exception - no { start found'; + end if; + if (tokens.count != indx) then + p_error('} should end the JSON object', tokens[indx]); + end if; + + return obj; + end; + + function gs_parse_list(str varchar2) return pljson_list as + tokens rToken[]; + obj pljson_list; + indx integer := 1; + jsrc json_src; + begin + -- update_decimalpoint(); + jsrc := gs_prepareVarchar2(str); + gs_lexer(jsrc, tokens); + if (tokens[indx].type_name = '[') then + indx := indx + 1; + gs_parseArr(tokens, indx, obj); + else + raise exception 'JSON List Parser exception - no [ start found'; + end if; + if (tokens.count != indx) then + p_error('] should end the JSON List object', tokens[indx]); + end if; + + return obj; + end; + + function gs_parse_any(str varchar2) return pljson_value as + tokens rToken[]; + obj pljson_list; + ret pljson_value; + indx integer := 1; + jsrc json_src; + begin + -- update_decimalpoint(); + jsrc := gs_prepareVarchar2(str); + gs_lexer(jsrc, tokens); + tokens[tokens.count+1].type_name := ']'; + gs_parseArr(tokens, indx, obj); + if (tokens.count != indx) then + p_error('] should end the JSON List object', tokens[indx]); + end if; + ret = pljson_list.gs_head(obj); + return ret; + end; + + function gs_parser(str clob) return pljson as + tokens rToken[]; + obj pljson; + indx integer := 1; + jsrc json_src; + begin + -- update_decimalpoint(); + --dbe_output.print_line('Using clob'); + jsrc := gs_prepareClob(str); + gs_lexer(jsrc, tokens); + + -- for i in 1 .. tokens.count loop + -- print_token(tokens[i]); + -- end loop; + + if (tokens[indx].type_name = '{') then + indx := indx + 1; + gs_parseObj(tokens, indx, obj); + else + raise exception 'JSON Parser exception - no { start found'; + end if; + if (tokens.count != indx) then + p_error('} should end the JSON object', tokens[indx]); + end if; + + return obj; + end; + + function gs_parse_list(str clob) return pljson_list as + tokens rToken[]; + obj pljson_list; + indx integer := 1; + jsrc json_src; + begin + -- update_decimalpoint(); + jsrc := gs_prepareClob(str); + gs_lexer(jsrc, tokens); + if (tokens[indx].type_name = '[') then + indx := indx + 1; + gs_parseArr(tokens, indx, obj); + else + raise exception 'JSON List Parser exception - no [ start found'; + end if; + if (tokens.count != indx) then + p_error('] should end the JSON List object', tokens[indx]); + end if; + + return obj; + end; + + + function gs_parse_any(str clob) return pljson_value as + tokens rToken[]; + obj pljson_list; + ret pljson_value; + indx integer := 1; + jsrc json_src; + begin + -- update_decimalpoint(); + jsrc := gs_prepareClob(str); + gs_lexer(jsrc, tokens); + tokens[tokens.count+1].type_name := ']'; + gs_parseArr(tokens, indx, obj); + if (tokens.count != indx) then + p_error('] should end the JSON List object', tokens[indx]); + end if; + ret = pljson_list.gs_head(obj); + return ret; + end; + + procedure gs_remove_duplicates(obj inout pljson) as + type memberlist is table of pljson_value index by varchar2(4000); + members memberlist; + nulljsonvalue pljson_value; + validated pljson; + indx varchar2(4000); + tmp pljson_value; + begin + + validated := pljson.gs_pljson(); + for i in 1 .. pljson.gs_count(obj) loop + tmp = pljson.gs_get(obj, i); + if (tmp.mapname is null) then + nulljsonvalue := pljson.gs_get(obj, i); + else + tmp = pljson.gs_get(obj, i); + members(tmp.mapname) := pljson.gs_get(obj, i); + end if; + end loop; + + pljson.gs_check_duplicate(validated, false); + indx := members.first; + loop + exit when indx is null; + pljson.gs_put(validated, indx, members(indx)); + indx := members.next(indx); + end loop; + + if (nulljsonvalue is not null) then + pljson.gs_put(validated, '', nulljsonvalue); + end if; + validated.check_for_duplicate := obj.check_for_duplicate; + obj := validated; + end; + + function gs_get_version() return varchar2 as + begin + return 'version1.0'; + end; + +end pljson_parser; +/ + +create or replace package pljson_printer as + + indent_string varchar2(10) := ' '; --chr(9); for tab + --newline_char varchar2(2) := chr(10); -- Mac style + newline_char varchar2(2) := chr(13); -- Linux style + ascii_output boolean not null := true; + empty_string_as_null boolean not null := false; + escape_solidus boolean not null := false; + + function gs_pretty_print(obj pljson, spaces boolean default true, line_length number default 0) return varchar2; + function gs_pretty_print_list(obj pljson_list, spaces boolean default true, line_length number default 0) return varchar2; + function gs_pretty_print_any(json_part pljson_value, spaces boolean default true, line_length number default 0) return varchar2; + procedure gs_pretty_print(obj pljson, spaces boolean default true, buf inout clob, line_length number default 0, erase_clob boolean default true); + procedure gs_pretty_print_list(obj pljson_list, spaces boolean default true, buf inout clob, line_length number default 0, erase_clob boolean default true); + procedure gs_pretty_print_any(json_part pljson_value, spaces boolean default true, buf inout clob, line_length number default 0, erase_clob boolean default true); + + procedure gs_dbms_output_clob(my_clob clob, delim varchar2, jsonp varchar2 default null); + procedure htp_output_clob(my_clob clob, jsonp varchar2 default null); + -- made public just for testing/profiling... + function gs_escapeString(str varchar2) return varchar2; + +end pljson_printer; +/ + +create or replace package body pljson_printer as + + max_line_len number := 0; + cur_line_len number := 0; + + type Tmap_char_string is table of varchar2(40) index by varchar2(1); /* index by unicode char */ + char_map Tmap_char_string; + char_map_escape_solidus boolean := escape_solidus; + char_map_ascii_output boolean := ascii_output; + + function gs_llcheck(str varchar2) return varchar2 as + begin + --dbe_output.print_line(cur_line_len || ' : ' || str); + if (max_line_len > 0 and length(str)+cur_line_len > max_line_len) then + cur_line_len := length(str); + return newline_char || str; + else + cur_line_len := cur_line_len + length(str); + return str; + end if; + end; + + -- escapes a single character. + function gs_escapeChar(ch char) return varchar2 deterministic is + result varchar2(20); + begin + --backspace b = U+0008 + --formfeed f = U+000C + --newline n = U+000A + --carret r = U+000D + --tabulator t = U+0009 + result := ch; + + case ch + when chr( 8) then result := '\b'; + when chr( 9) then result := '\t'; + when chr(10) then result := '\n'; + when chr(12) then result := '\f'; + when chr(13) then result := '\r'; + when chr(34) then result := '\"'; + when chr(47) then if (escape_solidus) then result := '\/'; end if; + when chr(92) then result := '\\'; + else if (ascii(ch) >= 0 and ascii(ch) < 32) then + result := '\u' || replace(substr(to_char(ascii(ch), 'XXXX'), 2, 4), ' ', '0'); + elsif (ascii_output) then + result := replace(asciistr(ch), '\', '\u'); + end if; + end case; + return result; + end; + + function gs_escapeString(str varchar2) return varchar2 as + sb varchar2(32767) := ''; + buf varchar2(40); + ch varchar2(1); /* unicode char */ + begin + + if (str is null) then return ''; end if; + + -- clear the cache if global parameters have been changed + if char_map_escape_solidus <> escape_solidus or + char_map_ascii_output <> ascii_output + then + char_map.delete; + char_map_escape_solidus := escape_solidus; + char_map_ascii_output := ascii_output; + end if; + + for i in 1 .. length(str) loop + ch := substr(str, i, 1) ; + + --begin + -- it this char has already been processed, I have cached its escaped value + -- buf := char_map(ch); + + --exception when no_Data_found then + -- otherwise, i convert the value and add it to the cache + -- buf := gs_escapeChar(ch); + -- char_map(ch) := buf; + --end; + + buf := ch; + sb := sb || buf; + end loop; + return sb; + end; + + function gs_newline(spaces boolean) return varchar2 as + begin + cur_line_len := 0; + if (spaces) then return newline_char; else return ''; end if; + end; + + function gs_tab(indent number, spaces boolean) return varchar2 as + i varchar(200) := ''; + begin + if (not spaces) then return ''; end if; + for x in 1 .. indent loop i := i || indent_string; end loop; + return i; + end; + + function gs_getCommaSep(spaces boolean) return varchar2 as + begin + if (spaces) then return ', '; else return ','; end if; + end; + + function gs_getMemName(mem pljson_value, spaces boolean) return varchar2 as + begin + if (spaces) then + return gs_llcheck('"'||gs_escapeString(mem.mapname)||'"') || gs_llcheck(' : '); + else + return gs_llcheck('"'||gs_escapeString(mem.mapname)||'"') || gs_llcheck(':'); + end if; + end; + + /* Clob method start here */ + procedure gs_add_to_clob(buf_lob inout clob, buf_str inout varchar2, str varchar2) as + begin + if (lengthb(str) > 32767 - lengthb(buf_str)) then + dbe_lob.append(buf_lob, buf_str); + buf_str := str; + else + buf_str := buf_str || str; + end if; + end; + + procedure gs_flush_clob(buf_lob inout clob, buf_str inout varchar2) as + begin + dbe_lob.append(buf_lob, buf_str); + end; + /* Clob method end here */ + + /* Varchar2 method start here */ + procedure gs_add_buf(buf inout varchar2, str varchar2) as + begin + if (lengthb(str)>32767-lengthb(buf)) then + raise exception 'Length of result JSON more than 32767 bytes. Use to_clob() procedures'; + end if; + buf := buf || str; + end; + + procedure gs_ppString(elem pljson_value, buf inout varchar2) is + _offset number := 1; + v_str varchar(5000); + amount number := 5000; + begin + if empty_string_as_null and elem.extended_str is null and elem.str is null then + gs_add_buf(buf, 'null'); + else + -- gs_add_buf(buf, case when elem.num = 1 then '"' else '/**/' end); + if (elem.num = 1) then + gs_add_buf(buf, '"'); + else + gs_add_buf(buf, "/**/"); + end if; + + if (elem.extended_str is not null) then + while (_offset <= dbe_lob.get_length(elem.extended_str)) loop + v_str := PKG_UTIL.lob_read(elem.extended_str, amount, _offset, 0); + if (elem.num = 1) then + gs_add_buf(buf, gs_escapeString(v_str)); + else + gs_add_buf(buf, v_str); + end if; + _offset := _offset + amount; + end loop; + else + if (elem.num = 1) then + while (_offset <= length(elem.str)) loop + v_str:=substr(elem.str, _offset, amount); + gs_add_buf(buf, gs_escapeString(v_str)); + _offset := _offset + amount; + end loop; + else + gs_add_buf(buf, elem.str); + end if; + end if; + + -- gs_add_buf(buf, case when elem.num = 1 then '"' else '/**/' end); + if (elem.num = 1) then + gs_add_buf(buf, '"'); + else + gs_add_buf(buf, "/**/"); + end if; + end if; + end; + + procedure gs_ppObj(obj pljson, indent number, buf inout varchar2, spaces boolean); + + procedure gs_ppEA(input pljson_list, indent number, buf inout varchar2, spaces boolean) as + elem pljson_value; + arr pljson_value[]; + str varchar2(400); + begin + arr := input.pljson_list_data; + for y in 1 .. arr.count loop + elem := arr[y]; + + -- if (elem is not null) then + case elem.typeval + /* number */ + when 4 then + str := pljson_value.gs_number_toString(elem); + gs_add_buf(buf, gs_llcheck(str)); + /* string */ + when 3 then + gs_ppString(elem, buf); + /* bool */ + when 5 then + if (pljson_value.gs_get_bool(elem)) then + gs_add_buf(buf, gs_llcheck('true')); + else + gs_add_buf(buf, gs_llcheck('false')); + end if; + /* null */ + when 6 then + gs_add_buf(buf, gs_llcheck('null')); + /* array */ + when 2 then + gs_add_buf( buf, gs_llcheck('[')); + gs_ppEA(pljson_list.gs_pljson_list(elem), indent, buf, spaces); + gs_add_buf( buf, gs_llcheck(']')); + /* object */ + when 1 then + gs_ppObj(pljson.gs_pljson(elem), indent, buf, spaces); + else + gs_add_buf(buf, gs_llcheck(pljson_value.gs_get_type(elem))); + end case; + -- end if; + if (y != arr.count) then gs_add_buf(buf, gs_llcheck(gs_getCommaSep(spaces))); end if; + end loop; + end; + + -- Mem = Member + procedure gs_ppMem(mem pljson_value, indent number, buf inout varchar2, spaces boolean) as + str varchar2(400) := ''; + begin + gs_add_buf(buf, gs_llcheck(gs_tab(indent, spaces)) || gs_getMemName(mem, spaces)); + case mem.typeval + /* number */ + when 4 then + str := pljson_value.gs_number_toString(mem); + gs_add_buf(buf, gs_llcheck(str)); + /* string */ + when 3 then + gs_ppString(mem, buf); + /* bool */ + when 5 then + if (pljson_value.gs_get_bool(mem)) then + gs_add_buf(buf, gs_llcheck('true')); + else + gs_add_buf(buf, gs_llcheck('false')); + end if; + /* null */ + when 6 then + gs_add_buf(buf, gs_llcheck('null')); + /* array */ + when 2 then + gs_add_buf(buf, gs_llcheck('[')); + gs_ppEA(pljson_list.gs_pljson_list(mem), indent, buf, spaces); + gs_add_buf(buf, gs_llcheck(']')); + /* object */ + when 1 then + gs_ppObj(pljson.gs_pljson(mem), indent, buf, spaces); + else + gs_add_buf(buf, gs_llcheck(pljson_value.gs_get_type(mem))); + end case; + end; + + procedure gs_ppObj(obj pljson, indent number, buf inout varchar2, spaces boolean) as + begin + gs_add_buf(buf, gs_llcheck('{') || gs_newline(spaces)); + for m in 1 .. obj.pljson_list_data.count loop + gs_ppMem(obj.pljson_list_data[m], indent+1, buf, spaces); + if (m != obj.pljson_list_data.count) then + gs_add_buf(buf, gs_llcheck(',') || gs_newline(spaces)); + else + gs_add_buf(buf, gs_newline(spaces)); + end if; + end loop; + gs_add_buf(buf, gs_llcheck(gs_tab(indent, spaces)) || gs_llcheck('}')); -- || chr(13); + end; + + function gs_pretty_print(obj pljson, spaces boolean default true, line_length number default 0) return varchar2 as + buf varchar2(32767) := ''; + begin + max_line_len := line_length; + cur_line_len := 0; + gs_ppObj(obj, 0, buf, spaces); + return buf; + end; + + function gs_pretty_print_list(obj pljson_list, spaces boolean default true, line_length number default 0) return varchar2 as + buf varchar2(32767) :=''; + begin + max_line_len := line_length; + cur_line_len := 0; + gs_add_buf(buf, gs_llcheck('[')); + gs_ppEA(obj, 0, buf, spaces); + gs_add_buf(buf, gs_llcheck(']')); + return buf; + end; + + function gs_pretty_print_any(json_part pljson_value, spaces boolean default true, line_length number default 0) return varchar2 as + buf varchar2(32767) := ''; + begin + case json_part.typeval + /* number */ + when 4 then + buf := pljson_value.gs_number_toString(json_part); + /* string */ + when 3 then + gs_ppString(json_part, buf); + /* bool */ + when 5 then + if (pljson_value.gs_get_bool(json_part)) then buf := 'true'; else buf := 'false'; end if; + /* null */ + when 6 then + buf := 'null'; + /* array */ + when 2 then + buf := gs_pretty_print_list(pljson_list.gs_pljson_list(json_part), spaces, line_length); + /* object */ + when 1 then + buf := gs_pretty_print(pljson.gs_pljson(json_part), spaces, line_length); + else + buf := 'weird error: ' || pljson_value.gs_get_type(json_part); + end case; + return buf; + end; + + procedure gs_pretty_print(obj pljson, spaces boolean default true, buf inout clob, line_length number default 0, erase_clob boolean default true) as + buf_str varchar2(32767); + amount number := dbe_lob.get_length(buf); + begin + if (erase_clob and amount > 0) then + dbe_lob.STRIP(buf, 0); + end if; + + buf_str := gs_pretty_print(obj, spaces, line_length); + gs_flush_clob(buf, buf_str); + end; + + procedure gs_pretty_print_list(obj pljson_list, spaces boolean default true, buf inout clob, line_length number default 0, erase_clob boolean default true) as + buf_str varchar2(32767); + amount number := dbe_lob.get_length(buf); + begin + if (erase_clob and amount > 0) then + dbe_lob.STRIP(buf, 0); + end if; + + buf_str := gs_pretty_print_list(obj, spaces, line_length); + gs_flush_clob(buf, buf_str); + end; + + procedure gs_pretty_print_any(json_part pljson_value, spaces boolean default true, buf inout clob, line_length number default 0, erase_clob boolean default true) as + buf_str varchar2(32767) := ''; + amount number := dbe_lob.get_length(buf); + begin + if (erase_clob and amount > 0) then + dbe_lob.STRIP(buf, 0); + end if; + + buf_str := gs_pretty_print_any(json_part, spaces, line_length); + gs_flush_clob(buf, buf_str); + end; + + procedure gs_dbms_output_clob(my_clob clob, delim varchar2, jsonp varchar2 default null) as + prev number := 1; + indx number := 1; + size_of_nl number := length(delim); + v_str varchar2(32767); + amount number; + max_string_chars number := 5000; + begin + + if (jsonp is not null) then dbe_output.print_line(jsonp||'('); end if; + while (indx != 0) loop + --read every line + indx := dbe_lob.match(my_clob, delim, prev+1); + + if (indx = 0) then + --emit from prev to end; + amount := max_string_chars; + + loop + -- dbe_lob.read(my_clob, amount, prev, v_str); dbe_lob.read not exists + v_str := PKG_UTIL.lob_read(my_clob, amount, prev, 0); + + dbe_output.print_line(v_str); + prev := prev+amount; + exit when prev >= dbe_lob.get_length(my_clob); + end loop; + else + amount := indx - prev; + if (amount > max_string_chars) then + amount := max_string_chars; + + loop + -- dbe_lob.read(my_clob, amount, prev, v_str); dbe_lob.read not exists + v_str := PKG_UTIL.lob_read(my_clob, amount, prev, 0); + + dbe_output.print_line(v_str); + prev := prev+amount; + amount := indx - prev; + exit when prev >= indx - 1; + if (amount > max_string_chars) then + amount := max_string_chars; + end if; + end loop; + prev := indx + size_of_nl; + else + -- dbe_lob.read(my_clob, amount, prev, v_str); dbe_lob.read not exists + v_str := PKG_UTIL.lob_read(my_clob, amount, prev, 0); + dbe_output.print_line(v_str); + prev := indx + size_of_nl; + end if; + end if; + + end loop; + if (jsonp is not null) then dbe_output.print_line(')'); end if; + end; + + procedure htp_output_clob(my_clob clob, jsonp varchar2 default null) as + l_amt number default 4096; + l_off number default 1; + l_str varchar2(32000); + begin + raise NOTICE '%', 'NOT SUPPORT NOW'; + -- if (jsonp is not null) then htp.prn(jsonp||'('); end if; + --begin + -- loop + -- dbe_lob.read( my_clob, l_amt, l_off, l_str); + -- htp.prn( l_str ); + -- l_off := l_off+l_amt; + -- end loop; + --exception + -- when no_data_found then NULL; + --end; + -- if (jsonp is not null) then htp.prn(')'); end if; + end; + +end pljson_printer; +/ + +create or replace package body pljson_ext as + + /* + procedure gs_next_char as + begin + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end; + --skip ws + procedure skipws as begin while (buf in (chr(9), chr(10), chr(13), ' ')) loop gs_next_char; end loop; end; + */ + + --Json Path parser + function gs_parsePath(json_path varchar2, base number default 1) return pljson_list as + build_path varchar2(32767) := '['; + buf varchar2(4); + endstring varchar2(1); + indx number := 1; + ret pljson_list; + begin + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + + while (buf is not null) loop + if (buf = '.') then + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + + if (buf is null) then + raise exception 'JSON Path parse error: . is not a valid json_path end'; + end if; + if (not regexp_like(buf, '^[[:alnum:]\_ ]+', 'c')) then + raise exception 'JSON Path parse error: alpha-numeric character'; + end if; + + if (build_path != '[') then + build_path := build_path || ','; + end if; + + build_path := build_path || '"'; + while (regexp_like(buf, '^[[:alnum:]\_ ]+', 'c')) loop + build_path := build_path || buf; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end loop; + build_path := build_path || '"'; + + elsif (buf = '[') then + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + --skip ws + while (buf in (chr(9), chr(10), chr(13), ' ')) loop + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end loop; + + if (buf is null) then + raise exception 'JSON Path parse error: [ is not a valid json_path end'; + end if; + if (buf in ('1','2','3','4','5','6','7','8','9') or (buf = '0' and base = 0)) then + if (build_path != '[') then + build_path := build_path || ','; + end if; + while (buf in ('0','1','2','3','4','5','6','7','8','9')) loop + build_path := build_path || buf; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end loop; + elsif (regexp_like(buf, '^(\"|\'')', 'c')) then + endstring := buf; + if (build_path != '[') then + build_path := build_path || ','; + end if; + build_path := build_path || '"'; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + + if (buf is null) then + raise exception 'JSON Path parse error: premature json_path end'; + end if; + while (buf != endstring) loop + build_path := build_path || buf; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + if (buf is null) then + raise exception 'JSON Path parse error: premature json_path end'; + end if; + if (buf = '\') then + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + build_path := build_path || '\' || buf; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end if; + end loop; + build_path := build_path || '"'; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + else + raise exception 'JSON Path parse error'; + end if; + --skip ws + while (buf in (chr(9), chr(10), chr(13), ' ')) loop + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end loop; + if (buf is null) then + raise exception 'JSON Path parse error: premature json_path end'; + end if; + if (buf != ']') then + raise exception 'JSON Path parse error: no array ending found. found: '; + end if; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + --skip ws + while (buf in (chr(9), chr(10), chr(13), ' ')) loop + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end loop; + elsif (build_path = '[') then + if (not regexp_like(buf, '^[[:alnum:]\_ ]+', 'c')) then + raise exception 'JSON Path parse error'; + end if; + build_path := build_path || '"'; + while (regexp_like(buf, '^[[:alnum:]\_ ]+', 'c')) loop + build_path := build_path || buf; + -- gs_next_char + if (indx <= length(json_path)) then + buf := substr(json_path, indx, 1); + indx := indx + 1; + else + buf := null; + end if; + end loop; + build_path := build_path || '"'; + else + raise exception 'JSON Path parse error'; + end if; + + end loop; + + build_path := build_path || ']'; + build_path := replace(replace(replace(replace(replace(build_path, chr(9), '\t'), chr(10), '\n'), chr(13), '\f'), chr(8), '\b'), chr(14), '\r'); + + ret := pljson_list.gs_pljson_list(build_path); + if (base != 1) then + --fix base 0 to base 1 + declare + elem pljson_value; + begin + for i in 1 .. ret.count loop + elem := pljson_list.gs_get(ret, i); + if (pljson_value.gs_is_number(elem)) then + pljson_list.gs_replace(ret, i, pljson_value.gs_get_number(elem)+1); + end if; + end loop; + end; + end if; + return ret; + end; + + --JSON Path getters + function gs_get_json_value(obj pljson, v_path varchar2, base number default 1) return pljson_value as + path pljson_list; + ret pljson_value; + o pljson; + l pljson_list; + begin + + path := gs_parsePath(v_path, base); + ret := pljson.gs_to_json_value(obj); + if (pljson_list.gs_count(path) = 0) then + return ret; + end if; + + for i in 1 .. pljson_list.gs_count(path) loop + if (pljson_value.gs_is_string(pljson_list.gs_get(path, i))) then + --string fetch only on json + o := pljson.gs_pljson(ret); + ret := pljson.gs_get(o, pljson_value.gs_get_string(pljson_list.gs_get(path, i))); + else + --number fetch on json and json_list + if (pljson_value.gs_is_array(ret)) then + l := pljson_list.gs_pljson_list(ret); + ret := pljson_list.gs_get(l, pljson_value.gs_get_number(pljson_list.gs_get(path, i))); + else + o := pljson.gs_pljson(ret); + l := pljson.gs_get_values(o); + ret := pljson_list.gs_get(l, pljson_value.gs_get_number(pljson_list.gs_get(path, i))); + end if; + end if; + end loop; + + return ret; + end; + + --JSON Path getters + function gs_get_string(obj pljson, path varchar2, base number default 1) return varchar2 as + temp pljson_value; + begin + temp := gs_get_json_value(obj, path, base); + if (temp is null or not pljson_value.gs_is_string(temp)) then + return null; + else + return pljson_value.gs_get_string(temp); + end if; + end; + + function gs_get_number(obj pljson, path varchar2, base number default 1) return number as + temp pljson_value; + begin + temp := gs_get_json_value(obj, path, base); + if (temp is null or not pljson_value.gs_is_number(temp)) then + return null; + else + return pljson_value.gs_get_number(temp); + end if; + end; + + function gs_get_double(obj pljson, path varchar2, base number default 1) return binary_double as + temp pljson_value; + begin + temp := gs_get_json_value(obj, path, base); + if (temp is null or not pljson_value.gs_is_number(temp)) then + return null; + else + return pljson_value.gs_get_double(temp); + end if; + end; + + function gs_get_json(obj pljson, path varchar2, base number default 1) return pljson as + temp pljson_value; + ret pljson; + begin + temp := gs_get_json_value(obj, path, base); + if (temp is null or not pljson_value.gs_is_object(temp)) then + return null; + else + ret = pljson.gs_pljson(temp); + return ret; + end if; + end; + + function gs_get_json_list(obj pljson, path varchar2, base number default 1) return pljson_list as + temp pljson_value; + ret pljson_list; + begin + temp := gs_get_json_value(obj, path, base); + if (temp is null or not pljson_value.gs_is_array(temp)) then + return null; + else + ret = pljson_list.gs_pljson_list(temp); + return ret; + end if; + end; + + function gs_get_bool(obj pljson, path varchar2, base number default 1) return boolean as + temp pljson_value; + begin + temp := gs_get_json_value(obj, path, base); + if (temp is null or not pljson_value.gs_is_bool(temp)) then + return null; + else + return pljson_value.gs_get_bool(temp); + end if; + end; + + function gs_get_date(obj pljson, path varchar2, base number default 1) return date as + temp pljson_value; + begin + temp := gs_get_json_value(obj, path, base); + if (temp is null or not gs_is_date(temp)) then + return null; + else + return pljson_ext.gs_to_date(temp); + end if; + end; + + --extra function checks if number has no fraction + function gs_is_integer(v pljson_value) return boolean as + num number; + num_double binary_double; + int_number number(38); + int_double binary_double; + begin + + if (not pljson_value.gs_is_number(v)) then + raise exception 'not a number-value'; + end if; + + if (pljson_value.gs_is_number_repr_number(v)) then + num := pljson_value.gs_get_number(v); + int_number := trunc(num); + return (int_number = num); + elsif (pljson_value.gs_is_number_repr_double(v)) then + num_double := pljson_value.gs_get_double(v); + int_double := trunc(num_double); + return (int_double = num_double); + else + return false; + end if; + return false; + end; + + --extension enables json to store dates without compromising the implementation + function gs_to_json_value(d date) return pljson_value as + ret pljson_value; + begin + ret = pljson_value.gs_pljson_value(to_char(d, format_string)); + return ret; + end; + + --notice that a date type in json is also a varchar2 + function gs_is_date(v pljson_value) return boolean as + temp date; + begin + temp := pljson_ext.gs_to_date(v); + return true; + -- exception + -- when others then + -- return false; + end; + + --conversion is needed to extract dates + function gs_to_date(v pljson_value) return date as + begin + if (pljson_value.gs_is_string(v)) then + -- return STANDARD.to_date(pljson_value.gs_get_string(v), format_string); + return to_date(pljson_value.gs_get_string(v), format_string); + else + raise exception 'Anydata did not contain a date-value'; + end if; + end; + + -- alias so that old code doesn't break + function gs_to_date2(v pljson_value) return date as + begin + return gs_to_date(v); + end; + + function gs_decodeBase64Clob2Blob(p_clob clob) return blob as + r_blob blob; + clob_size number; + pos number; + c_buf varchar2(32767); + r_buf raw(32767); + v_read_size number; + v_line_size number; + begin + + dbe_lob.create_temporary(r_blob, false, 0); + clob_size := dbe_lob.get_length(p_clob); + v_line_size := 64; + if clob_size >= 65 and dbe_lob.substr(p_clob, 1, 65) = chr(10) then + v_line_size := 65; + elsif clob_size >= 66 and dbe_lob.substr(p_clob, 1, 65) = chr(13) then + v_line_size := 66; + elsif clob_size >= 77 and dbe_lob.substr(p_clob, 1, 77) = chr(10) then + v_line_size := 77; + elsif clob_size >= 78 and dbe_lob.substr(p_clob, 1, 77) = chr(13) then + v_line_size := 78; + end if; + v_read_size := floor(32767/v_line_size)*v_line_size; + + pos := 1; + while (pos < clob_size) loop + c_buf := PKG_UTIL.lob_read(p_clob, v_read_size, pos, 0); + r_buf := decode(PKG_UTIL.raw_cast_from_varchar2(c_buf), 'base64'); + -- r_buf := PKG_UTIL.raw_cast_from_varchar2(c_buf); + dbe_lob.write_append(r_blob, dbe_raw.get_length(r_buf), r_buf); + pos := pos + v_read_size; + end loop; + + return r_blob; + end; + + function gs_encodeBase64Blob2Clob(p_blob blob) return clob as + r_clob clob; + c_step integer := 12000; + c_buf varchar2(32767); + begin + + if p_blob is not null then + dbe_lob.create_temporary(r_clob, false, 0); + for i in 0 .. trunc((dbe_lob.get_length(p_blob) - 1)/c_step) loop + + c_buf := encode(PKG_UTIL.raw_cast_to_varchar2(dbe_lob.substr(p_blob, c_step, i * c_step + 1))::bytea, 'base64'); + -- c_buf := PKG_UTIL.raw_cast_to_varchar2(dbe_lob.substr(p_blob, c_step, i * c_step + 1)); + if substr(c_buf, length(c_buf)) != chr(10) then + c_buf := c_buf || CHR(13) || CHR(10); + end if; + dbe_lob.write_append(r_clob, length(c_buf), c_buf); + end loop; + end if; + + return r_clob; + end; + + /* JSON Path putter internal function */ + procedure gs_put_internal(obj inout pljson, v_path varchar2, elem pljson_value, base number) as + val pljson_value; + path pljson_list; + backreference pljson_list; + + keyval pljson_value; + keynum number; + keystring varchar2(4000); + temp pljson_value; + obj_temp pljson; + list_temp pljson_list; + inserter pljson_value; + begin + val := elem; + path := gs_parsePath(v_path, base); + if (pljson_list.gs_count(path) = 0) then + raise exception 'PLJSON_EXT put error: cannot put with empty string.'; + end if; + + --build backreference + for i in 1 .. pljson_list.gs_count(path) loop + --backreference.print(false); + keyval := pljson_list.gs_get(path, i); + if (pljson_value.gs_is_number(keyval)) then + --number index + keynum := pljson_value.gs_get_number(keyval); + if ((not pljson_value.gs_is_object(temp)) and (not pljson_value.gs_is_array(temp))) then + if (val is null) then + return; + end if; + pljson_list.gs_remove_last(backreference); + temp := pljson_list.gs_to_json_value(pljson_list.gs_pljson_list()); + pljson_list.gs_append(backreference, temp); + end if; + + if (pljson_value.gs_is_object(temp)) then + obj_temp := pljson.gs_pljson(temp); + if (pljson.gs_count(obj_temp) < keynum) then + if (val is null) then + return; + end if; + raise exception 'PLJSON_EXT put error: access object with too few members.'; + end if; + temp := pljson.gs_get(obj_temp, keynum); + else + list_temp := pljson_list.gs_pljson_list(temp); + if (pljson_list.gs_count(list_temp) < keynum) then + if (val is null) then + return; + end if; + --raise error or quit if val is null + for i in pljson_list.gs_count(list_temp)+1 .. keynum loop + pljson_list.gs_append(list_temp, pljson_value.gs_pljson_value()); + end loop; + pljson_list.gs_remove_last(backreference); + pljson_list.gs_append(backreference, list_temp); + end if; + + temp := pljson_list.gs_get(list_temp, keynum); + end if; + else + --string index + keystring := pljson_value.gs_get_string(keyval); + if (not pljson_value.gs_is_object(temp)) then + --backreference.print; + if (val is null) + then return; + end if; + pljson_list.gs_remove_last(backreference); + temp := pljson.gs_to_json_value(pljson.gs_pljson()); + pljson_list.gs_append(backreference, temp); + --raise_application_error(-20110, 'PLJSON_EXT put error: trying to access a non object with a string.'); + end if; + obj_temp := pljson.gs_pljson(temp); + temp := pljson.gs_get(obj_temp, keystring); + end if; + + if (temp is null) then + if (val is null) then + return; + end if; + + keyval := pljson_list.gs_get(path, i+1); + if (keyval is not null and pljson_value.gs_is_number(keyval)) then + temp := pljson_list.gs_to_json_value(pljson_list.gs_pljson_list()); + else + temp := pljson.gs_to_json_value(pljson.gs_pljson()); + end if; + end if; + pljson_list.gs_append(backreference, temp); + end loop; + + -- backreference.print(false); + -- path.print(false); + + --use backreference and path together + inserter := val; + for i in reverse 1 .. pljson_list.gs_count(backreference) loop + -- inserter.print(false); + if (i = 1) then + keyval := pljson_list.gs_get(path, 1); + if (pljson_value.gs_is_string(keyval)) then + keystring := pljson_value.gs_get_string(keyval); + else + keynum := pljson_value.gs_get_number(keyval); + declare + t1 pljson_value; + begin + t1 := pljson.gs_get(obj, keynum); + keystring := t1.mapname; + end; + end if; + if (inserter is null) then + pljson.gs_remove(obj, keystring); + else + pljson.gs_put(obj, keystring, inserter); + end if; + else + temp := pljson_list.gs_get(backreference, i-1); + if (pljson_value.gs_is_object(temp)) then + keyval := pljson_list.gs_get(path, i); + obj_temp := pljson.gs_pljson(temp); + if (pljson_value.gs_is_string(keyval)) then + keystring := pljson_value.gs_get_string(keyval); + else + keynum := pljson_value.gs_get_number(keyval); + declare + t1 pljson_value; + begin + t1 := pljson.gs_get(obj_temp, keynum); + keystring := t1.mapname; + end; + end if; + if (inserter is null) then + pljson.gs_remove(obj_temp, keystring); + if (obj_temp.count > 0) then + inserter := pljson.gs_to_json_value(obj_temp); + end if; + else + pljson.gs_put(obj_temp, keystring, inserter); + inserter := pljson.gs_to_json_value(obj_temp); + end if; + else + --array only number + keynum := pljson_value.gs_get_number(pljson_list.gs_get(path, i)); + list_temp := pljson_list.gs_pljson_list(temp); + pljson_list.gs_remove(list_temp, keynum); + if (not inserter is null) then + pljson_list.gs_append(list_temp, inserter, keynum); + inserter := pljson_list.gs_to_json_value(list_temp); + else + if (pljson_list.gs_count(list_temp) > 0) then + inserter := pljson_list.gs_to_json_value(list_temp); + end if; + end if; + end if; + end if; + + end loop; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem varchar2, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, pljson_value.gs_pljson_value(), base); + else + gs_put_internal(obj, path, pljson_value.gs_pljson_value(elem), base); + end if; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem number, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, pljson_value.gs_pljson_value(), base); + else + gs_put_internal(obj, path, pljson_value.gs_pljson_value(elem), base); + end if; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem binary_double, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, pljson_value.gs_pljson_value(), base); + else + gs_put_internal(obj, path, pljson_value.gs_pljson_value(elem), base); + end if; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem pljson, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, pljson_value.gs_pljson_value(), base); + else + gs_put_internal(obj, path, pljson.gs_to_json_value(elem), base); + end if; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem pljson_list, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, pljson_value.gs_pljson_value(), base); + else + gs_put_internal(obj, path, pljson_list.gs_to_json_value(elem), base); + end if; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem boolean, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, pljson_value.gs_pljson_value(), base); + else + gs_put_internal(obj, path, pljson_value.gs_pljson_value(elem), base); + end if; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem pljson_value, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, pljson_value.gs_pljson_value(), base); + else + gs_put_internal(obj, path, elem, base); + end if; + end; + + procedure gs_put(obj inout pljson, path varchar2, elem date, base number default 1) as + begin + if elem is null then + gs_put_internal(obj, path, gs_pljson_value(), base); + else + gs_put_internal(obj, path, pljson_ext.gs_to_json_value(elem), base); + end if; + end; + + procedure gs_remove(obj inout pljson, path varchar2, base number default 1) as + begin + pljson_ext.gs_put_internal(obj, path, null, base); + -- if (json_ext.gs_get_json_value(obj, path) is not null) then + -- end if; + end; + + --Pretty print with JSON Path + procedure gs_pp(obj pljson, v_path varchar2) as --using dbms_output.put_line + json_part pljson_value; + begin + json_part := gs_get_json_value(obj, v_path); + if (json_part is null) then + dbe_output.print_line(''); + else + dbe_output.print_line(pljson_printer.gs_pretty_print_any(json_part)); --escapes a possible internal string + end if; + end; + + procedure pp_htp(obj pljson, v_path varchar2) as --using htp.print + json_part pljson_value; + begin + /* + json_part := pljson_ext.gs_get_json_value(obj, v_path); + if (json_part is null) then + htp.print; + else + htp.print(pljson_printer.gs_pretty_print_any(json_part, false)); + end if; + */ + end; + + function gs_base64(binarydata blob) return pljson_list as + obj pljson_list; + c clob; + + v_clob_offset number := 1; + v_amount integer; + begin + + dbe_lob.create_temporary(c, false, 0); + c := gs_encodeBase64Blob2Clob(binarydata); + v_amount := dbe_lob.get_length(c); + v_clob_offset := 1; + --dbms_output.put_line('V amount: '||v_amount); + while (v_clob_offset < v_amount) loop + --dbms_output.put_line(v_offset); + --temp := ; + --dbms_output.put_line('size: '||length(temp)); + pljson_list.gs_append(obj, dbe_lob.SUBSTR(c, 4000, v_clob_offset)); + v_clob_offset := v_clob_offset + 4000; + end loop; + -- dbms_lob.freetemporary(c); + --dbms_output.put_line(obj.count); + --dbms_output.put_line(obj.get_last().to_char); + return obj; + end; + + function gs_base64(l pljson_list) return blob as + c clob; + b_ret blob; + begin + dbe_lob.create_temporary(c, false, 0); + for i in 1 .. pljson_list.gs_count(l) loop + dbe_lob.append(c, pljson_value.gs_get_string(pljson_list.gs_get(l, i))); + end loop; + b_ret := gs_decodeBase64Clob2Blob(c); + return b_ret; + end; + + function gs_encode(binarydata blob) return pljson_value as + obj pljson_value; + c clob; + begin + dbe_lob.create_temporary(c, false, 0); + c := gs_encodeBase64Blob2Clob(binarydata); + -- c := PKG_UTIL.lob_converttoclob(c, binarydata, 32767, 1, 1); + obj := pljson_value.gs_pljson_value(c); + return obj; + end; + + function gs_decode(v pljson_value) return blob as + c clob; + b_ret blob; + begin + c := pljson_value.gs_get_clob(v); + b_ret := gs_decodeBase64Clob2Blob(c); + -- b_ret := PKG_UTIL.lob_converttoblob(b_ret, c, 32767, 1, 1); + return b_ret; + end; + + procedure gs_blob2clob(b blob, c out clob, charset varchar2 default 'UTF8') as + v_dest_offset integer := 1; + v_src_offset integer := 1; + begin + dbe_lob.create_temporary(c, false, 0); + c := PKG_UTIL.lob_converttoclob(c, b, 32767, 1, 1); + end; + +end pljson_ext; +/ + +create or replace package body pljson_value as + + function gs_pljson_value() return pljson_value as + json_value pljson_value; + begin + json_value.typeval := 6; + return json_value; + end; + + function gs_pljson_value(b boolean) return pljson_value as + json_value pljson_value; + begin + json_value.typeval := 5; + json_value.num := 0; + if(b) then + json_value.num := 1; + end if; + if(b is null) then + json_value.typeval := 6; + end if; + return json_value; + end; + + function gs_pljson_value(str varchar2, esc boolean default true) return pljson_value as + json_value pljson_value; + begin + json_value.typeval := 3; + if(esc) then + json_value.num := 1; + else + json_value.num := 0; + end if; --message to pretty printer + json_value.str := str; + return json_value; + end; + + function gs_pljson_value(str clob, esc boolean default true) return pljson_value as + json_value pljson_value; + max_string_chars number := 5000; + lengthcc number; + begin + json_value.typeval := 3; + if(esc) then + json_value.num := 1; + else + json_value.num := 0; + end if; --message to pretty printer + + if (dbe_lob.get_length(str) > max_string_chars) then + json_value.extended_str := str; + end if; + + if dbe_lob.get_length(str) > 0 then + json_value.str := PKG_UTIL.lob_read(str, max_string_chars, 1, 0); + end if; + return json_value; + end; + + function gs_pljson_value(num number) return pljson_value as + json_value pljson_value; + begin + json_value.typeval := 4; + json_value.num := num; + json_value.num_repr_number_p := 't'; + json_value.num_double := num; + if (to_number(json_value.num_double) = json_value.num) then + json_value.num_repr_double_p := 't'; + else + json_value.num_repr_double_p := 'f'; + end if; + + if(json_value.num is null) then + json_value.typeval := 6; + end if; + return json_value; + end; + + function gs_pljson_value(num_double binary_double) return pljson_value as + json_value pljson_value; + begin + json_value.typeval := 4; + json_value.num_double := num_double; + json_value.num_repr_double_p := 't'; + json_value.num := num_double; + -- if (to_binary_double(json_value.num) = json_value.num_double) then + if (to_number(json_value.num) = json_value.num_double) then + json_value.num_repr_number_p := 't'; + else + json_value.num_repr_number_p := 'f'; + end if; + if(json_value.num_double is null) then + json_value.typeval := 6; + end if; + return json_value; + end; + + function gs_pljson_value(elem pljson_element) return pljson_value as + json_value pljson_value; + begin + /* + case + when elem is of (pljson) then self.typeval := 1; + when elem is of (pljson_list) then self.typeval := 2; + else raise_application_error(-20102, 'PLJSON_VALUE init error (PLJSON or PLJSON_LIST allowed)'); + end case; + self.object_or_array := elem; + if(self.object_or_array is null) then self.typeval := 6; end if; + */ + raise exception 'pljson element not support now'; + return json_value; + end; + + function gs_pljson_value(arr pljson_list) return pljson_value as + json_value pljson_value; + begin + json_value.typeval := 2; + json_value.arr := arr; + return json_value; + end; + + function gs_pljson_value(obj pljson) return pljson_value as + json_value pljson_value; + begin + json_value.typeval := 1; + json_value.obj := obj; + return json_value; + end; + + function gs_makenull() return pljson_value as + json_value pljson_value; + begin + return json_value; + end; + + function gs_get_type(json_value pljson_value) return varchar2 as + ret varchar2; + begin + case json_value.typeval + when 1 then ret := 'object'; + when 2 then ret := 'array'; + when 3 then ret := 'string'; + when 4 then ret := 'number'; + when 5 then ret := 'bool'; + when 6 then ret := 'null'; + else + ret := 'unknown type'; + end case; + return ret; + end; + + function gs_get_string(json_value pljson_value, max_byte_size number default null, max_char_size number default null) return varchar2 as + begin + if(json_value.typeval = 3) then + if(max_byte_size is not null) then + return substrb(json_value.str,1,max_byte_size); + elsif (max_char_size is not null) then + return substr(json_value.str,1,max_char_size); + else + return json_value.str; + end if; + end if; + return null; + end; + + procedure gs_get_string_clob(json_value pljson_value, buf inout clob) as + begin + dbe_lob.STRIP(buf, 0); + if(json_value.typeval = 3) then + if(json_value.extended_str is not null) then + dbe_lob.copy(buf, json_value.extended_str, dbe_lob.get_length(json_value.extended_str)); + else + dbe_lob.write_append(buf, length(json_value.str), json_value.str); + end if; + end if; + end; + + function gs_get_clob(json_value pljson_value) return clob as + begin + if(json_value.typeval = 3) then + if(json_value.extended_str is not null) then + return json_value.extended_str; + else + return json_value.str; + end if; + end if; + return null; + end; + + function gs_get_bool(json_value pljson_value) return boolean as + begin + if(json_value.typeval = 5) then + return json_value.num = 1; + end if; + return null; + end; + + function gs_get_number(json_value pljson_value) return number as + begin + if(json_value.typeval = 4) then + return json_value.num; + end if; + return null; + end; + + function gs_get_double(json_value pljson_value) return binary_double as + begin + if(json_value.typeval = 4) then + return json_value.num_double; + end if; + return null; + end; + + function gs_get_element(json_value pljson_value) return pljson_element as + begin + if (json_value.typeval in (1,2)) then + return json_value.object_or_array; + end if; + return null; + end; + + function gs_get_null(json_value pljson_value) return varchar2 as + begin + if(json_value.typeval = 6) then + return 'null'; + end if; + return null; + end; + + function gs_is_string(json_value pljson_value) return boolean as + begin + return json_value.typeval = 3; + end; + + function gs_is_bool(json_value pljson_value) return boolean as + begin + return json_value.typeval = 5; + end; + + function gs_is_number(json_value pljson_value) return boolean as + begin + return json_value.typeval = 4; + end; + + function gs_is_number_repr_number(json_value pljson_value) return boolean as + begin + if json_value.typeval != 4 then + return false; + end if; + return (json_value.num_repr_number_p = 't'); + end; + + function gs_is_number_repr_double(json_value pljson_value) return boolean as + begin + if json_value.typeval != 4 then + return false; + end if; + return (json_value.num_repr_double_p = 't'); + end; + + function gs_is_object(json_value pljson_value) return boolean as + begin + return json_value.typeval = 1; + end; + + function gs_is_array(json_value pljson_value) return boolean as + begin + return json_value.typeval = 2; + end; + + function gs_is_null(json_value pljson_value) return boolean as + begin + return json_value.typeval = 6; + end; + + function gs_value_of(json_value pljson_value, max_byte_size number default null, max_char_size number default null) return varchar2 as + begin + case json_value.typeval + when 1 then return 'json object'; + when 2 then return 'json array'; + when 3 then return pljson_value.gs_get_string(json_value, max_byte_size, max_char_size); + when 4 then return pljson_value.gs_get_number(json_value); + when 5 then if(pljson_value.gs_get_bool(json_value)) then return 'true'; else return 'false'; end if; + else return null; + end case; + end; + + procedure gs_parse_number(json_value inout pljson_value, str varchar2) as + begin + if json_value.typeval != 4 then + return; + end if; + + json_value.num := to_number(str); + json_value.num_repr_number_p := 't'; + -- json_value.num_double := to_binary_double(str); + json_value.num_double := to_number(str); + json_value.num_repr_double_p := 't'; + -- if (to_binary_double(json_value.num) != json_value.num_double) then + if (to_number(json_value.num) != json_value.num_double) then + json_value.num_repr_number_p := 'f'; + end if; + if (to_number(json_value.num_double) != json_value.num) then + json_value.num_repr_double_p := 'f'; + end if; + exception + when others then + raise exception 'input str is not vailed'; + end; + + function gs_number_toString(json_value pljson_value) return varchar2 as + num number; + num_double binary_double; + buf varchar2(4000); + begin + if (json_value.num_repr_number_p = 't') then + num := json_value.num; + if (num > 1e127) then + return '1e309'; -- json representation of infinity + end if; + if (num < -1e127) then + return '-1e309'; -- json representation of infinity + end if; + + buf := to_char(num); + if (-1 < num and num < 0 and substr(buf, 1, 2) = '-.') then + buf := '-0' || substr(buf, 2); + elsif (0 < num and num < 1 and substr(buf, 1, 1) = '.') then + buf := '0' || buf; + end if; + return buf; + else + num_double := json_value.num_double; + if (num_double = +BINARY_DOUBLE_INFINITY) then + return '1e309'; -- json representation of infinity + end if; + if (num_double = -BINARY_DOUBLE_INFINITY) then + return '-1e309'; -- json representation of infinity + end if; + + buf := to_char(num_double); + if (-1 < num_double and num_double < 0 and substr(buf, 1, 2) = '-.') then + buf := '-0' || substr(buf, 2); + elsif (0 < num_double and num_double < 1 and substr(buf, 1, 1) = '.') then + buf := '0' || buf; + end if; + return buf; + end if; + end; + + function gs_to_char(json_value pljson_value, spaces boolean default true, chars_per_line number default 0) return varchar2 as + begin + return pljson_printer.gs_pretty_print_any(json_value, spaces, chars_per_line); + end; + + -- procedure gs_to_clob(json_value pljson_value, buf inout clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true) as + -- begin + -- if(spaces is null) then + -- pljson_printer.gs_pretty_print_any(json_value, false, buf, chars_per_line, erase_clob); + -- else + -- pljson_printer.gs_pretty_print_any(json_value, spaces, buf, chars_per_line, erase_clob); + -- end if; + -- end; + + procedure gs_to_clob(json_value pljson_value, buf inout clob, spaces boolean default true, chars_per_line number default 0, erase_clob boolean default true) as + my_bufstr varchar2; + begin + my_bufstr := pljson_printer.gs_pretty_print_any(json_value, spaces, chars_per_line); + if (erase_clob) then + dbe_lob.STRIP(buf,0); + end if; + dbe_lob.append(buf, my_bufstr); + end; + + -- procedure gs_print(json_value pljson_value, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null) as + -- my_clob clob; + -- begin + -- dbe_lob.create_temporary(my_clob, true); + -- if (chars_per_line>32512) then + -- pljson_printer.gs_pretty_print_any(json_value, spaces, my_clob, 32512); + -- else + -- pljson_printer.gs_pretty_print_any(json_value, spaces, my_clob, chars_per_line); + -- end if; + -- pljson_printer.gs_dbms_output_clob(my_clob, pljson_printer.newline_char, jsonp); + -- end; + + procedure gs_print(json_value pljson_value, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null) as + my_clob clob; + my_bufstr varchar2; + begin + dbe_lob.create_temporary(my_clob, true); + if (chars_per_line>32512) then + my_bufstr := pljson_printer.gs_pretty_print_any(json_value, spaces, 32512); + else + my_bufstr := pljson_printer.gs_pretty_print_any(json_value, spaces, chars_per_line); + end if; + dbe_lob.append(my_clob, my_bufstr); + pljson_printer.gs_dbms_output_clob(my_clob, pljson_printer.newline_char, jsonp); + end; + + procedure htp(json_value pljson_value, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null) as + my_clob clob; + begin + raise exception 'htp not support'; + dbe_lob.create_temporary(my_clob, true); + pljson_printer.gs_pretty_print_any(json_value, spaces, my_clob, chars_per_line); + pljson_printer.htp_output_clob(my_clob, jsonp); + end; + +end pljson_value; +/ + +create or replace package body pljson_list as + + function gs_pljson_list() return pljson_list as + json_list pljson_list; + begin + return json_list; + end; + + function gs_pljson_list(str varchar2) return pljson_list as + json_list pljson_list; + begin + json_list := pljson_parser.gs_parse_list(str); + return json_list; + end; + + function gs_pljson_list(str clob) return pljson_list as + json_list pljson_list; + begin + json_list := pljson_parser.gs_parse_list(str); + return json_list; + end; + + function gs_pljson_list(str blob, charset varchar2 default 'UTF8') return pljson_list as + json_list pljson_list; + c_str clob; + begin + pljson_ext.gs_blob2clob(str, c_str, charset); + json_list := pljson_parser.gs_parse_list(c_str); + -- dbms_lob.freetemporary(c_str); + return json_list; + end; + + function gs_pljson_list(str_array varchar2[]) return pljson_list as + json_list pljson_list; + begin + -- json_list.pljson_list_data := pljson_value_array(); + for i in str_array.FIRST .. str_array.LAST loop + gs_append(json_list, str_array[i]); + end loop; + return json_list; + end; + + function gs_pljson_list(num_array number[]) return pljson_list as + json_list pljson_list; + begin + for i in str_array.FIRST .. str_array.LAST loop + gs_append(json_list, num_array[i]); + end loop; + return json_list; + end; + + function gs_pljson_list(elem pljson_value) return pljson_list as + ret_list pljson_list; + begin + -- self := treat(elem.object_or_array as pljson_list); + ret_list := elem.arr; + return ret_list; + end; + + + /* list management */ + procedure gs_append(json_list inout pljson_list, elem pljson_value, _position integer default null) as + indx integer; + insert_value pljson_value; + begin + insert_value := elem; + if insert_value is null then + insert_value := pljson_value.gs_pljson_value(); + end if; + if (_position is null or _position > pljson_list.gs_count(json_list)) then --end of list + indx := pljson_list.gs_count(json_list) + 1; + json_list.pljson_list_data.extend(1); + json_list.pljson_list_data[indx] := insert_value; + elsif (_position < 1) then --new first + indx := pljson_list.gs_count(json_list); + json_list.pljson_list_data.extend(1); + for x in reverse 0 .. indx loop + json_list.pljson_list_data[x+1] := json_list.pljson_list_data[x]; + end loop; + json_list.pljson_list_data[0] := insert_value; + else + indx := pljson_list.gs_count(json_list); + json_list.pljson_list_data.extend(1); + for x in reverse _position .. indx loop + json_list.pljson_list_data[x+1] := json_list.pljson_list_data(x); + end loop; + json_list.pljson_list_data[_position] := insert_value; + end if; + end; + + procedure gs_append(json_list inout pljson_list, elem varchar2, _position integer default null) as + begin + gs_append(json_list, pljson_value.gs_pljson_value(elem), _position); + end; + + procedure gs_append(json_list inout pljson_list, elem clob, _position integer default null) as + begin + gs_append(json_list, pljson_value.gs_pljson_value(elem), _position); + end; + + procedure gs_append(json_list inout pljson_list, elem number, _position integer default null) as + begin + if (elem is null) then + gs_append(json_list, pljson_value.gs_pljson_value(), _position); + else + gs_append(json_list, pljson_value.gs_pljson_value(elem), _position); + end if; + end; + + procedure gs_append(json_list inout pljson_list, elem binary_double, _position integer default null) as + begin + if (elem is null) then + gs_append(json_list, pljson_value.gs_pljson_value(), _position); + else + gs_append(json_list, pljson_value.gs_pljson_value(elem), _position); + end if; + end; + + procedure gs_append(json_list inout pljson_list, elem boolean, _position integer default null) as + begin + if (elem is null) then + gs_append(json_list, pljson_value.gs_pljson_value(), _position); + else + gs_append(json_list, pljson_value.gs_pljson_value(elem), _position); + end if; + end; + + procedure gs_append(json_list inout pljson_list, elem pljson_list, _position integer default null) as + begin + if (elem is null) then + gs_append(json_list, pljson_value.gs_pljson_value(), _position); + else + gs_append(json_list, pljson_list.gs_to_json_value(elem), _position); + end if; + end; + + procedure gs_remove(json_list inout pljson_list, _position integer) as + begin + if (_position is null or _position < 1 or _position > pljson_list.gs_count(json_list)) then + return; + end if; + for x in (_position+1) .. pljson_list.gs_count(json_list) loop + json_list.pljson_list_data[x-1] := json_list.pljson_list_data[x]; + end loop; + json_list.pljson_list_data.trim(1); + end; + + procedure gs_remove_first(json_list inout pljson_list) as + begin + for x in 2 .. pljson_list.gs_count(json_list) loop + json_list.pljson_list_data[x-1] := json_list.pljson_list_data[x]; + end loop; + if (pljson_list.gs_count(json_list) > 0) then + json_list.pljson_list_data.trim(1); + end if; + end; + + procedure gs_remove_last(json_list inout pljson_list) as + begin + if (pljson_list.gs_count(json_list) > 0) then + json_list.pljson_list_data.trim(1); + end if; + end; + + function gs_count(json_list pljson_list) return number as + begin + return json_list.pljson_list_data.count; + end; + + function gs_get(json_list pljson_list, _position integer) return pljson_value as + ret pljson_value; + begin + if (pljson_list.gs_count(json_list) >= _position and _position > 0) then + ret = json_list.pljson_list_data[_position]; + return ret; + end if; + return null; -- do not throw error, just return null + end; + + function gs_get_string(json_list pljson_list, _position integer) return varchar2 as + elem pljson_value; + ret varchar2; + begin + elem := pljson_list.gs_get(json_list, _position); + ret = pljson_value.gs_get_string(elem); + return ret; + end; + + function gs_get_clob(json_list pljson_list, _position integer) return clob as + elem pljson_value; + ret clob; + begin + elem := pljson_list.gs_get(json_list, _position); + ret = pljson_value.gs_get_clob(elem); + return ret; + end; + + function gs_get_bool(json_list pljson_list, _position integer) return boolean as + elem pljson_value; + ret boolean; + begin + elem := pljson_list.gs_get(json_list, _position); + ret = pljson_value.gs_get_bool(elem); + return ret; + end; + + function gs_get_number(json_list pljson_list, _position integer) return number as + elem pljson_value; + ret number; + begin + elem := pljson_list.gs_get(json_list, _position); + ret = pljson_value.gs_get_number(elem); + return ret; + end; + + function gs_get_double(json_list pljson_list, _position integer) return binary_double as + elem pljson_value; + ret binary_double; + begin + elem := pljson_list.gs_get(json_list, _position); + ret = pljson_value.gs_get_double(elem); + return ret; + end; + + function gs_get_pljson_list(json_list pljson_list, _position integer) return pljson_list as + elem pljson_value; + ret pljson_list; + begin + elem := pljson_list.gs_get(json_list, _position); + ret = pljson_list.gs_pljson_list(elem); + -- return treat(elem.object_or_array as pljson_list); + return ret; + end; + + function gs_head(json_list pljson_list) return pljson_value as + ret pljson_value; + begin + if (pljson_list.gs_count(json_list) > 0) then + ret = json_list.pljson_list_data[json_list.pljson_list_data.first]; + -- return json_list.pljson_list_data[0]; + return ret; + end if; + return null; -- do not throw error, just return null + end; + + function gs_last(json_list pljson_list) return pljson_value as + ret pljson_value; + begin + if (pljson_list.gs_count(json_list) > 0) then + ret = json_list.pljson_list_data[json_list.pljson_list_data.last]; + return ret; + end if; + return null; -- do not throw error, just return null + end; + + function gs_tail(json_list pljson_list) return pljson_list as + t pljson_list; + ret pljson_list; + begin + if (pljson_list.gs_count(json_list) > 0) then + t := json_list; --pljson_list(self.to_json_value); + pljson_list.gs_remove(t, 1); + return t; + else + ret = pljson_list.gs_pljson_list(); + return ret; + end if; + end; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem pljson_value) as + insert_value pljson_value; + indx number; + begin + insert_value := elem; + if insert_value is null then + insert_value := pljson_value.gs_pljson_value(); + end if; + if (_position > pljson_list.gs_count(json_list)) then --end of list + indx := pljson_list.gs_count(json_list) + 1; + json_list.pljson_list_data.extend(1); + json_list.pljson_list_data[indx] := insert_value; + elsif (_position < 1) then --maybe an error message here + null; + else + json_list.pljson_list_data[_position] := insert_value; + end if; + end; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem varchar2) as + begin + gs_replace(json_list, _position, pljson_value.gs_pljson_value(elem)); + end; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem clob) as + begin + gs_replace(json_list, _position, pljson_value.gs_pljson_value(elem)); + end; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem number) as + begin + if (elem is null) then + gs_replace(json_list, _position, pljson_value.gs_pljson_value()); + else + gs_replace(json_list, _position, pljson_value.gs_pljson_value(elem)); + end if; + end; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem binary_double) as + begin + if (elem is null) then + gs_replace(json_list, _position, pljson_value.gs_pljson_value()); + else + gs_replace(json_list, _position, pljson_value.gs_pljson_value(elem)); + end if; + end; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem boolean) as + begin + if (elem is null) then + gs_replace(json_list, _position, pljson_value.gs_pljson_value()); + else + gs_replace(json_list, _position, pljson_value.gs_pljson_value(elem)); + end if; + end; + + procedure gs_replace(json_list inout pljson_list, _position integer, elem pljson_list) as + begin + if (elem is null) then + gs_replace(json_list, _position, pljson_value.gs_pljson_value()); + else + gs_replace(json_list, _position, pljson_list.gs_to_json_value(elem)); + end if; + end; + + function gs_to_json_value(json_list pljson_list) return pljson_value as + ret pljson_value; + begin + ret = pljson_value.gs_pljson_value(json_list); + return ret; + end; + + /* output methods */ + function gs_to_char(json_list pljson_list, spaces boolean default true, chars_per_line number default 0) return varchar2 as + begin + if (spaces is null) then + return pljson_printer.gs_pretty_print_list(json_list, chars_per_line); + else + return pljson_printer.gs_pretty_print_list(json_list, spaces, chars_per_line); + end if; + end; + + procedure gs_to_clob(json_list pljson_list, buf inout clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true) as + begin + if (spaces is null) then + pljson_printer.gs_pretty_print_list(json_list, false, buf, chars_per_line, erase_clob); + else + pljson_printer.gs_pretty_print_list(json_list, spaces, buf, chars_per_line, erase_clob); + end if; + end; + + -- procedure gs_print(json_list pljson_list, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null) as + -- my_clob clob; + -- begin + -- dbe_lob.create_temporary(my_clob, true); + -- if (chars_per_line>32512) then + -- pljson_printer.gs_pretty_print_list(json_list, spaces, my_clob, 32512); + -- else + -- pljson_printer.gs_pretty_print_list(json_list, spaces, my_clob, chars_per_line); + -- end if; + -- pljson_printer.gs_dbms_output_clob(my_clob, pljson_printer.newline_char, jsonp); + -- end; + + procedure gs_print(json_list pljson_list, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null) as + my_clob clob; + my_bufstr varchar2; + begin + dbe_lob.create_temporary(my_clob, true); + if (chars_per_line>32512) then + my_bufstr := pljson_printer.gs_pretty_print_list(json_list, spaces, 32512); + else + my_bufstr := pljson_printer.gs_pretty_print_list(json_list, spaces, chars_per_line); + end if; + dbe_lob.append(my_clob, my_bufstr); + pljson_printer.gs_dbms_output_clob(my_clob, pljson_printer.newline_char, jsonp); + end; + + procedure htp(json_list pljson_list, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null) as + my_clob clob; + begin + dbe_lob.create_temporary(my_clob, true); + pljson_printer.gs_pretty_print_list(json_list, spaces, my_clob, chars_per_line); + pljson_printer.htp_output_clob(my_clob, jsonp); + end; + + /* json path */ + function gs_path(json_list pljson_list, json_path varchar2, base number default 1) return pljson_value as + cp pljson_list; + ret pljson_value; + begin + cp := json_list; + ret = pljson_ext.gs_get_json_value(pljson.gs_pljson(cp), json_path, base); + return ret; + end; + + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem pljson_value, base number default 1) as + objlist pljson; + jp pljson_list; + begin + jp := pljson_ext.gs_parsePath(json_path, base); + while (pljson_value.gs_get_number(pljson_list.gs_head(jp)) > pljson_list.gs_count(json_list)) loop + gs_append(json_list, pljson_value.gs_pljson_value()); + end loop; + objlist := pljson.gs_pljson(json_list); + pljson_ext.gs_put(objlist, json_path, elem, base); + json_list := pljson.gs_get_values(objlist); + end; + + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem varchar2, base number default 1) as + objlist pljson; + jp pljson_list; + begin + jp := pljson_ext.gs_parsePath(json_path, base); + while (pljson_value.gs_get_number(pljson_list.gs_head(jp)) > pljson_list.gs_count(json_list)) loop + gs_append(json_list, pljson_value.gs_pljson_value()); + end loop; + objlist := pljson.gs_pljson(json_list); + pljson_ext.gs_put(objlist, json_path, elem, base); + json_list := pljson.gs_get_values(objlist); + end; + + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem clob, base number default 1) as + objlist pljson; + jp pljson_list; + begin + jp := pljson_ext.gs_parsePath(json_path, base); + while (pljson_value.gs_get_number(pljson_list.gs_head(jp)) > pljson_list.gs_count(json_list)) loop + gs_append(json_list, pljson_value.gs_pljson_value()); + end loop; + objlist := pljson.gs_pljson(json_list); + pljson_ext.gs_put(objlist, json_path, elem, base); + json_list := pljson.gs_get_values(objlist); + end; + + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem number, base number default 1) as + objlist pljson; + jp pljson_list; + begin + jp := pljson_ext.gs_parsePath(json_path, base); + while (pljson_value.gs_get_number(pljson_list.gs_head(jp)) > pljson_list.gs_count(json_list)) loop + gs_append(json_list, pljson_value.gs_pljson_value()); + end loop; + objlist := pljson.gs_pljson(json_list); + if (elem is null) then + pljson_ext.gs_put(objlist, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(objlist, json_path, elem, base); + end if; + json_list := pljson.gs_get_values(objlist); + end; + + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem binary_double, base number default 1) as + objlist pljson; + jp pljson_list; + begin + jp := pljson_ext.gs_parsePath(json_path, base); + while (pljson_value.gs_get_number(pljson_list.gs_head(jp)) > pljson_list.gs_count(json_list)) loop + gs_append(json_list, pljson_value.gs_pljson_value()); + end loop; + objlist := pljson.gs_pljson(json_list); + if (elem is null) then + pljson_ext.gs_put(objlist, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(objlist, json_path, elem, base); + end if; + json_list := pljson.gs_get_values(objlist); + end; + + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem boolean, base number default 1) as + objlist pljson; + jp pljson_list; + begin + jp := pljson_ext.gs_parsePath(json_path, base); + while (pljson_value.gs_get_number(pljson_list.gs_head(jp)) > pljson_list.gs_count(json_list)) loop + gs_append(json_list, pljson_value.gs_pljson_value()); + end loop; + objlist := pljson.gs_pljson(json_list); + if (elem is null) then + pljson_ext.gs_put(objlist, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(objlist, json_path, elem, base); + end if; + json_list := pljson.gs_get_values(objlist); + end; + + procedure gs_path_put(json_list inout pljson_list, json_path varchar2, elem pljson_list, base number default 1) as + objlist pljson; + jp pljson_list; + begin + jp := pljson_ext.gs_parsePath(json_path, base); + while (pljson_value.gs_get_number(pljson_list.gs_head(jp)) > pljson_list.gs_count(json_list)) loop + pljson_list.gs_append(json_list, pljson_value.gs_pljson_value()); + end loop; + objlist := pljson.gs_pljson(json_list); + if (elem is null) then + pljson_ext.gs_put(objlist, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(objlist, json_path, elem, base); + end if; + json_list := pljson.gs_get_values(objlist); + end; + + /* json path_remove */ + procedure gs_path_remove(json_list inout pljson_list, json_path varchar2, base number default 1) as + objlist pljson; + begin + objlist := pljson.gs_pljson(json_list); + pljson_ext.gs_remove(objlist, json_path, base); + json_list := pljson.gs_get_values(objlist); + end; + +end pljson_list; +/ + +create or replace package body pljson as + + function gs_pljson() return pljson as + pj pljson; + begin + pj.check_for_duplicate := 1; + return pj; + end; + + function gs_pljson(str varchar2) return pljson as + pj pljson; + begin + pj := pljson_parser.gs_parser(str); + pj.check_for_duplicate := 1; + return pj; + end; + + function gs_pljson(str clob) return pljson as + pj pljson; + begin + pj := pljson_parser.gs_parser(str); + pj.check_for_duplicate := 1; + return pj; + end; + + function gs_pljson(str blob, charset varchar2 default 'UTF8') return pljson as + pj pljson; + c_str clob; + begin + pljson_ext.gs_blob2clob(str, c_str, charset); + pj := pljson_parser.gs_parser(c_str); + pj.check_for_duplicate := 1; + -- dbms_lob.freetemporary(c_str); + return pj; + end; + + function gs_pljson(str_array varchar2[]) return pljson as + pj pljson; + new_pair boolean := True; + pair_name varchar2(32767); + pair_value varchar2(32767); + begin + pj.check_for_duplicate := 1; + for i in str_array.FIRST .. str_array.LAST loop + if new_pair then + pair_name := str_array[i]; + new_pair := False; + else + pair_value := str_array[i]; + gs_put(pj, pair_name, pair_value); + new_pair := True; + end if; + end loop; + return pj; + end; + + function gs_pljson(elem pljson_value) return pljson as + pj pljson; + begin + -- self := treat(elem.object_or_array as pljson); + pj := elem.obj; + return pj; + end; + + function gs_pljson(l pljson_list) return pljson as + pj pljson; + temp pljson_value; + begin + for i in 1 .. pljson_list.gs_count(l) loop + temp = l.pljson_list_data[i]; + if(temp.mapname is null or temp.mapname like 'row%') then + temp.mapname := 'row'||i; + end if; + temp.mapindx := i; + end loop; + + pj.pljson_list_data := l.pljson_list_data; + pj.check_for_duplicate := 1; + return pj; + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value pljson_value, _position integer default null) as + insert_value pljson_value; + indx integer; + x number; + temp pljson_value; + begin + + insert_value := pair_value; + if insert_value is null then + insert_value := pljson_value.gs_pljson_value(); + end if; + insert_value.mapname := pair_name; + if (pj.check_for_duplicate = 1) then + temp := pljson.gs_get(pj, pair_name); + else + temp := null; + end if; + + if (temp is not null) then + insert_value.mapindx := temp.mapindx; + pj.pljson_list_data[temp.mapindx] := insert_value; + return; + elsif (_position is null or _position > pljson.gs_count(pj)) then + --insert at the end of the list + pj.pljson_list_data.extend(1); + insert_value.mapindx := pj.pljson_list_data.count; + pj.pljson_list_data[pj.pljson_list_data.count] := insert_value; + elsif (_position < 2) then + --insert at the start of the list + indx := pj.pljson_list_data.last; + pj.pljson_list_data.extend; + loop + exit when indx is null; + temp := pj.pljson_list_data[indx]; + temp.mapindx := indx+1; + pj.pljson_list_data[temp.mapindx] := temp; + indx := pj.pljson_list_data.prior(indx); + end loop; + insert_value.mapindx := 1; + pj.pljson_list_data[1] := insert_value; + else + --insert somewhere in the list + indx := pj.pljson_list_data.last; + pj.pljson_list_data.extend; + loop + temp := pj.pljson_list_data[indx]; + temp.mapindx := indx + 1; + pj.pljson_list_data[temp.mapindx] := temp; + exit when indx = _position; + indx := pj.pljson_list_data.prior(indx); + end loop; + insert_value.mapindx := _position; + pj.pljson_list_data[_position] := insert_value; + end if; + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value varchar2, _position integer default null) as + begin + gs_put(pj, pair_name, pljson_value.gs_pljson_value(pair_value), _position); + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value clob, _position integer default null) as + begin + gs_put(pj, pair_name, pljson_value.gs_pljson_value(pair_value), _position); + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value number, _position integer default null) as + begin + if (pair_value is null) then + gs_put(pj, pair_name, pljson_value.gs_pljson_value(), _position); + else + gs_put(pj, pair_name, pljson_value.gs_pljson_value(pair_value), _position); + end if; + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value binary_double, _position integer default null) as + begin + if (pair_value is null) then + gs_put(pj, pair_name, pljson_value.gs_pljson_value(), _position); + else + gs_put(pj, pair_name, pljson_value.gs_pljson_value(pair_value), _position); + end if; + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value boolean, _position integer default null) as + begin + if (pair_value is null) then + gs_put(pj, pair_name, pljson_value.gs_pljson_value(), _position); + else + gs_put(pj, pair_name, pljson_value.gs_pljson_value(pair_value), _position); + end if; + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value pljson, _position integer default null) as + begin + if (pair_value is null) then + gs_put(pj, pair_name, pljson_value.gs_pljson_value(), _position); + else + gs_put(pj, pair_name, pljson.gs_to_json_value(pair_value), _position); + end if; + end; + + procedure gs_put(pj inout pljson, pair_name varchar2, pair_value pljson_list, _position integer default null) as + begin + if (pair_value is null) then + gs_put(pj, pair_name, pljson_value.gs_pljson_value(), _position); + else + gs_put(pj, pair_name, pljson_list.gs_to_json_value(pair_value), _position); + end if; + end; + + procedure gs_remove(pj pljson, pair_name varchar2) as + temp pljson_value; + indx integer; + begin + temp := pljson.gs_get(pj, pair_name); + if (temp is null) then + return; + end if; + indx := pj.pljson_list_data.next(temp.mapindx); + loop + exit when indx is null; + exit when indx == arr_length(pj.pljson_list_data); + pj.pljson_list_data[indx].mapindx := indx - 1; + pj.pljson_list_data[indx-1] := pj.pljson_list_data[indx]; + indx := pj.pljson_list_data.next(indx); + end loop; + pj.pljson_list_data.trim(1); + end; + + function gs_count(pj pljson) return number as + begin + return pj.pljson_list_data.count; + end; + + function gs_get(pj pljson, pair_name varchar2) return pljson_value as + indx integer; + ret pljson_value; + begin + indx := pj.pljson_list_data.first; + loop + exit when indx is null; + if (pair_name is null and pj.pljson_list_data[indx].mapname is null) then + ret = pj.pljson_list_data[indx]; + return ret; + end if; + if (pj.pljson_list_data[indx].mapname = pair_name) then + ret = pj.pljson_list_data[indx]; + return ret; + end if; + indx := pj.pljson_list_data.next(indx); + end loop; + return null; + end; + + function gs_get_string(pj pljson, pair_name varchar2) return varchar2 as + elem pljson_value; + ret varchar2; + begin + elem := pljson.gs_get(pj, pair_name); + ret = pljson_value.gs_get_string(elem); + return ret; + end; + + function gs_get_clob(pj pljson, pair_name varchar2) return clob as + elem pljson_value; + ret clob; + begin + elem := pljson.gs_get(pj, pair_name); + ret = pljson_value.gs_get_clob(elem); + return ret; + end; + + function gs_get_number(pj pljson, pair_name varchar2) return number as + elem pljson_value; + ret number; + begin + elem := pljson.gs_get(pj, pair_name); + ret = pljson_value.gs_get_number(elem); + return ret; + end; + + function gs_get_double(pj pljson, pair_name varchar2) return binary_double as + elem pljson_value; + ret binary_double; + begin + elem := pljson.gs_get(pj, pair_name); + ret = pljson_value.gs_get_double(elem); + return ret; + end; + + function gs_get_bool(pj pljson, pair_name varchar2) return boolean as + elem pljson_value; + ret boolean; + begin + elem := pljson.gs_get(pj, pair_name); + ret = pljson_value.gs_get_bool(elem); + return ret; + end; + + function gs_get_pljson(pj pljson, pair_name varchar2) return pljson as + elem pljson_value; + ret pljson; + begin + elem := pljson.gs_get(pj, pair_name); + -- return treat(elem.object_or_array as pljson); + return ret; + end; + + function gs_get_pljson_list(pj pljson, pair_name varchar2) return pljson_list as + elem pljson_value; + ret pljson_list; + begin + elem := pljson.gs_get(pj, pair_name); + -- return treat(elem.object_or_array as pljson); + return ret; + end; + + function gs_get(pj pljson, _position integer) return pljson_value as + ret pljson_value; + begin + if (pljson.gs_count(pj) >= _position and _position > 0) then + ret = pj.pljson_list_data[_position]; + return ret; + end if; + return null; -- do not throw error, just return null + end; + + function gs_index_of(pj pljson, pair_name varchar2) return number as + indx integer; + begin + indx := pj.pljson_list_data.first; + loop + exit when indx is null; + if (pair_name is null and pj.pljson_list_data[indx].mapname is null) then + return indx; + end if; + if (pj.pljson_list_data[indx].mapname = pair_name) then + return indx; + end if; + indx := pj.pljson_list_data.next(indx); + end loop; + return -1; + end; + + function gs_exist(pj pljson, pair_name varchar2) return boolean as + begin + return (pljson.gs_get(pj, pair_name) is not null); + end; + + function gs_to_json_value(pj pljson) return pljson_value as + ret pljson_value; + begin + ret = pljson_value.gs_pljson_value(pj); + return ret; + end; + + procedure gs_check_duplicate(pj inout pljson, v_set boolean) as + begin + if (v_set) then + pj.check_for_duplicate := 1; + else + pj.check_for_duplicate := 0; + end if; + end; + + procedure gs_remove_duplicates(pj inout pljson) as + begin + pljson_parser.gs_remove_duplicates(pj); + end; + + function gs_to_char(pj pljson, spaces boolean default true, chars_per_line number default 0) return varchar2 as + begin + if(spaces is null) then + return pljson_printer.gs_pretty_print(pj, chars_per_line); + else + return pljson_printer.gs_pretty_print(pj, spaces, chars_per_line); + end if; + end; + + procedure gs_to_clob(pj pljson, buf inout clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true) as + begin + if(spaces is null) then + pljson_printer.gs_pretty_print(pj, false, buf, chars_per_line, erase_clob); + else + pljson_printer.gs_pretty_print(pj, spaces, buf, chars_per_line, erase_clob); + end if; + end; + + -- procedure gs_print(pj pljson, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null) as + -- my_clob clob; + -- begin + -- dbe_lob.create_temporary(my_clob, true); + -- if (chars_per_line>32512) then + -- pljson_printer.gs_pretty_print(pj, spaces, my_clob, 32512); + -- else + -- pljson_printer.gs_pretty_print(pj, spaces, my_clob, chars_per_line); + -- end if; + -- pljson_printer.gs_dbms_output_clob(my_clob, pljson_printer.newline_char, jsonp); + -- end; + + procedure gs_print(pj pljson, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null) as + my_clob clob; + my_bufstr varchar2; + begin + dbe_lob.create_temporary(my_clob, true); + if (chars_per_line>32512) then + my_bufstr := pljson_printer.gs_pretty_print(pj, spaces, 32512); + else + my_bufstr := pljson_printer.gs_pretty_print(pj, spaces, chars_per_line); + end if; + dbe_lob.append(my_clob,my_bufstr); + pljson_printer.gs_dbms_output_clob(my_clob, pljson_printer.newline_char, jsonp); + end; + + procedure htp(pj pljson, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null) as + my_clob clob; + begin + dbe_lob.create_temporary(my_clob, true); + pljson_printer.gs_pretty_print(pj, spaces, my_clob, chars_per_line); + pljson_printer.htp_output_clob(my_clob, jsonp); + end; + + function gs_path(pj pljson, json_path varchar2, base number default 1) return pljson_value as + ret pljson_value; + begin + ret = pljson_ext.gs_get_json_value(pj, json_path, base); + return ret; + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem pljson_value, base number default 1) as + begin + pljson_ext.gs_put(pj, json_path, elem, base); + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem varchar2, base number default 1) as + begin + pljson_ext.gs_put(pj, json_path, elem, base); + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem clob, base number default 1) as + begin + pljson_ext.gs_put(pj, json_path, elem, base); + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem number, base number default 1) as + begin + if (elem is null) then + pljson_ext.gs_put(pj, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(pj, json_path, elem, base); + end if; + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem binary_double, base number default 1) as + begin + if (elem is null) then + pljson_ext.gs_put(pj, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(pj, json_path, elem, base); + end if; + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem boolean, base number default 1) as + begin + if (elem is null) then + pljson_ext.gs_put(pj, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(pj, json_path, elem, base); + end if; + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem pljson, base number default 1) as + begin + if (elem is null) then + pljson_ext.gs_put(pj, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(pj, json_path, elem, base); + end if; + end; + + procedure gs_path_put(pj inout pljson, json_path varchar2, elem pljson_list, base number default 1) as + begin + if (elem is null) then + pljson_ext.gs_put(pj, json_path, pljson_value.gs_pljson_value(), base); + else + pljson_ext.gs_put(pj, json_path, elem, base); + end if; + end; + + procedure gs_path_remove(pj inout pljson, json_path varchar2, base number default 1) as + begin + pljson_ext.gs_remove(pj, json_path, base); + end; + + function gs_get_keys(pj pljson) return pljson_list as + keys pljson_list; + indx integer; + begin + keys := pljson_list.gs_pljson_list(); + indx := pj.pljson_list_data.first; + loop + exit when indx is null; + pljson_list.gs_append(keys, pj.pljson_list_data[indx].mapname); + indx := pj.pljson_list_data.next(indx); + end loop; + return keys; + end; + + function gs_get_values(pj pljson) return pljson_list as + vals pljson_list; + begin + vals := pljson_list.gs_pljson_list(); + vals.pljson_list_data := pj.pljson_list_data; + return vals; + end; + +end pljson; +/ + +reset current_schema; + +grant usage ON schema DBE_PLJSON TO public; + +set current_schema=DBE_PLJSON; + +create type t1 as ( a int ); +create type tt1 as ( b t1[] ); +alter type t1 add attribute arr tt1; + +create type t2 as ( a int ); +create type tt2 as (pljson_list_data t2[]); +alter type t2 add attribute arr tt2; + +declare +obj pljson; +begin +obj := pljson.gs_pljson('{"a": true }'); +pljson.gs_print(obj); +obj := pljson.gs_pljson(' +{ +"a": null, +"b": 12.243, +"c": 2e-3, +"d": [true, false, "abdc", [1,2,3]], +"e": [3, {"e2":3}], +"f": {"f2":true} +}'); +pljson.gs_print(obj); +pljson.gs_print(obj, false); +end; +/ + +reset current_schema; diff --git a/src/test/regress/sql/plpgsql_array.sql b/src/test/regress/sql/plpgsql_array.sql new file mode 100644 index 000000000..b045aa94a --- /dev/null +++ b/src/test/regress/sql/plpgsql_array.sql @@ -0,0 +1,471 @@ +-- FOR PL/pgSQL VARRAY scenarios -- + +-- check compatibility -- +show sql_compatibility; -- expect A -- + +-- create new schema -- +drop schema if exists plpgsql_array; +create schema plpgsql_array; +set current_schema = plpgsql_array; + +-- initialize tables -- +create table customers ( + id number(10) not null, + c_name varchar2(50) not null, + c_age number(8) not null, + c_address varchar2(50), + salary float(2) not null, + constraint customers_pk primary key (id) +); + +insert into customers (id, c_name, c_age, c_address, salary) values (1, 'Vera' ,32, 'Paris', 22999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (2, 'Zera' ,25, 'London', 5999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (3, 'Alice' ,22, 'Bangkok', 9800.98); +insert into customers (id, c_name, c_age, c_address, salary) values (4, 'Jim' ,26, 'Dubai', 18700.00); +insert into customers (id, c_name, c_age, c_address, salary) values (5, 'Kevin' ,28, 'Singapore', 18999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (6, 'Gauss' ,42, 'Beijing', 32999.00); + +create table tmp(a int, b varchar(100)); + +-- initialize functions, types etc. -- +create type mytype as ( + id integer, + biome varchar2(100) +); + +create type mytype2 as ( + id integer, + locale myType +); + +-- it turns any input to (9, (1, 'space')) +create or replace function myfunc(habitat in mytype2) +return mytype2 +is + ret mytype2; +begin + ret := (9, (1, 'space')); + return ret; +end; +/ + +-- type and function shares the same name -- +-- Oh~oh, what's gonna happened?? -- +create type functype as ( + id integer, + locale myType +); + +create or replace function functype(habitat in mytype2) +return mytype2 +is + ret mytype2; +begin + ret := (-1, (1, 'unknown realm')); + return ret; +end; +/ + +-- test function datatype priority -- +create or replace function name_list(inint in integer) +return integer +is + ret integer; +begin + ret := 1; + return ret; +end; +/ + +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- + +-- general declare + assign + access -- +-- support varray with parentheses in SQL -- +DECLARE + CURSOR c_customers is + SELECT c_name FROM customers order by id; + type c_list is varray (6) of customers.c_name%type; + name_list c_list := c_list(); + counter integer := 0; +BEGIN + FOR n IN c_customers LOOP + counter := counter + 1; -- 6 iterations -- + name_list.extend; + name_list(counter) := n.c_name; + END LOOP; + + insert into tmp values (NULL, name_list(1)); + insert into tmp values (NULL, name_list(3)); + name_list(2) := name_list(3); + insert into tmp values (NULL, name_list[2]); -- same as last one -- +END; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-- ERROR: mix of parens and brackets are not allowed -- +declare + cursor c_customers is (select c_name from customers order by id); + type c_list is varray(6) of customers.c_name%type; + name_list c_list := c_list(); + counter integer := 0; +begin + for n in c_customers loop + counter := counter + 1; -- 6 iterations -- + name_list.extend; + name_list(counter) := n.c_name; + end loop; + + insert into tmp values (null, name_list(1]); +end; +/ + +-- parentheses support in SQL 2 -- +-- array of record -- +declare + cursor c_customers is (select * from customers order by id); + type c_list is varray(6) of customers; + customer_list c_list := c_list(); + counter integer := 0; + name varchar2(50) := ''; +begin + for n in c_customers loop + counter := counter + 1; + customer_list.extend; + customer_list(counter) := (n.id, n.c_name, n.c_age, n.c_address, n.salary); -- insert record -- + name := customer_list(counter).c_name; + if customer_list(counter).c_age <= 30 then + dbe_output.print_line('Individual who is below 30: ' || customer_list(counter).c_name); + else + dbe_output.print_line('Individual who is above 30: ' || name); + end if; + insert into tmp values (customer_list(counter).c_age, customer_list(counter).salary); -- parentheses -- + end loop; +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-- batch initialization, batch insert varray-- +declare + type students is varray(6) of varchar2(10); + type grades is varray(6) of integer; + marks grades := grades('98', 97, 74 + 4, (87), 92, 100); -- batch initialize -- + names students default students('none'); -- default -- + total integer; +begin + names := students(); -- should append NULL then do the coerce -- + names := students('Vera ', 'Zera ', 'Alice', 'Jim ', 'Kevin', to_char('G') || 'auss'); -- batch insert -- + total := names.count; + dbe_output.print_line('Total '|| total || ' Students'); + for i in 1 .. total loop + dbe_output.print_line('Student: ' || names(i) || ' Marks: ' || marks(i)); + end loop; +end; +/ + +-- block above will be rewritten into this form (close to this form, but with parens and coerces)-- +declare + type students is varray(6) of varchar2(10); + type grades is varray(6) of integer; + marks grades := array['98', 97, 74 + 4, (87), 92, 100]; -- batch initialize -- + names students default array['none']; -- default -- + total integer; +begin + names := array[NULL]; + names := array['Vera ', 'Zera ', 'Alice', 'Jim ', 'Kevin', to_char('G') || 'auss']; -- batch insert -- + total := names.count; + dbe_output.print_line('Total '|| total || ' Students'); + for i in 1 .. total loop + dbe_output.print_line('Student: ' || names(i) || ' Marks: ' || marks(i)); + end loop; +end; +/ + +-- test of PL/SQL data type instantiation -- +-- If we specified our type (use PL/SQL like instantiation), all varray members .. -- +-- should be able to cast to the correct data type. -- +declare + type students is varray(5) of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + dbe_output.print_line('Student: ' || names(i)); + end loop; +end; +/ + +-- However, if we use the PL/pgSQL style instantiation, it is not guaranteed -- +-- error out for this one -- +declare + type students is varray(5) of varchar2(10); + names students; +begin + -- we can only make assumptions base on the first element, which, not always a good answer -- + names := array[1, 'Zera ', 'Alice', 'Jim ', 'Kevin']; + for i in 1 .. 5 loop + dbe_output.print_line('Student: ' || names(i)); + end loop; +end; +/ + + +-- test of uneven brackets -- +-- error out -- +declare + type students is varray(5) of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + dbe_output.print_line('Student: ' || names(i]); + end loop; +end; +/ + +-- Using composite type defined outside of precedure block -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, (1, 'ground')), + mytype2(1, (2, 'air')) + ); +begin + aa.extend(10); + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.3 is: ' || aa(2).locale.biome); -- ... water (not air) -- +end; +/ + +-- Note: array can handle proper type-in-type declaration -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.3 is: ' || aa(2).locale.biome); -- ... water (not air) -- +end; +/ + +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := mytype2(2, mytype(3, 'water')); + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.3 is: ' || aa(2).locale.biome); -- ... water (not air) -- +end; +/ + +-- working with functions -- +-- should be the same, except the result, make sure functions are correctly identified -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + myfunc((1, mytype(1, 'ground'))), -- for records, we need an extra parens to work -- + myfunc((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 9.1 is: ' || aa(2).locale.biome); -- ... space! -- +end; +/ + +-- This is what going to happened with functions and types shares teh same name -- +-- (Don't try this at home) -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + functype(1, mytype(1, 'ground')), -- we are prioritizing types here -- + functype(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.2 is: ' || aa(2).locale.biome); -- air -- +end; +/ + +drop type functype; -- abandon type functype -- + +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- here we have to use function functype -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + dbe_output.print_line('locale ?? is: ' || aa(1).id); + dbe_output.print_line('biome ??? is: ' || aa(2).locale.biome); -- weird places -- +end; +/ + +drop function functype; -- oops! -- + +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- not sure -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + dbe_output.print_line('This message worth 300 tons of gold (once printed).'); +end; +/ + +-- Multi-dimension arrays -- +declare + type arrayfirst is varray(10) of int; + arr arrayfirst := arrayfirst(1, 2, 3); + mat int[][] := ARRAY[arr, arr]; -- PLpgSQL style -- +begin + dbe_output.print_line('The magic number is: ' || mat(1)(2)); -- should be 2 -- +end; +/ + +-- assignments && statements test -- +declare + type arrayfirst is varray(10) of int; + arr arrayfirst := arrayfirst(1, 2, 3); + mat int[][] := ARRAY[arr, ARRAY[4, 5 ,6]]; -- PLpgSQL style -- +begin + dbe_output.print_line('The magic number is: ' || mat[2](1)); -- should be 4 -- + mat[1](3) = mat(2)[3]; + dbe_output.print_line('The magic number is: ' || mat[1](3)); -- should be 6 -- + + insert into tmp(a) values (mat[1](2)), (mat(1)[2]), (mat(1)(2)), (mat[1][2]); +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-- error out! -- +declare + type arrayfirst is varray(10) of int; + arr arrayfirst := arrayfirst(1, 2, 3); + type arraySecond is varray(10) of arrayfirst; -- Nested types are not supported, yet -- + mat arraySecond := arraySecond(arr, arr); +begin + dbe_output.print_line('The magic number is: ' || mat(1)(2)); -- should be 2 -- +end; +/ + +-- Should be empty -- +create or replace procedure pro1() as +declare + type students is varray(5) of varchar2(10); + names students := students(); +begin + raise NOTICE '%', names; + raise NOTICE '%', names.count; +end; +/ + +call pro1(); + +-- constant! -- +declare + type ta is table of varchar(100); + tb constant ta := ta('10','11'); +begin + tb(1) := 12; + dbe_output.print_line(tb[1]); +end; +/ + +declare + type ta is table of varchar(100); + tb constant ta := ta('10','11'); +begin + tb := ta('12','13'); + dbe_output.print_line(tb[1]); +end; +/ + +-- nested array -- +create or replace package pckg_test as + type rec1 is record(col1 varchar2); + type t_arr is table of rec1; + type rec2 is record(col1 t_arr, col2 t_arr); + type t_arr1 is table of rec2; +procedure proc_test(); +end pckg_test; +/ + +create or replace package body pckg_test as +procedure proc_test() as +v_arr t_arr1; +v_rec rec1; +begin + v_arr(1).col1 := array[ROW('hello')]; + v_arr(1).col2 := array[ROW('world')]; + v_rec := v_arr(1).col2[1]; -- normal bracket -- + raise notice '%', v_arr(1).col2(1); -- parentheses -- + insert into tmp(b) values (v_arr(1).col2(1)); -- sql -- +end; +end pckg_test; +/ +call pckg_test.proc_test(); +select * from tmp order by 1, 2; + + +CREATE OR REPLACE FUNCTION myarray_sort (ANYARRAY) +RETURNS ANYARRAY LANGUAGE SQL AS $$ +SELECT ARRAY( + SELECT $1[s.i] AS "foo" + FROM + generate_series(array_lower($1,1), array_upper($1,1)) AS s(i) + ORDER BY foo +); +$$; + +select myarray_sort(array[9,8,7,1,2,35]); + +create table testtbl (plg_id varchar2); + +declare + type array_vchar is VARRAY(20) of varchar2; + plg_id array_vchar := array_vchar(); + ans int; + begin + select count(1) into ans from testtbl where plg_id = plg_id(1); +end; + +drop table testtbl; + +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- + +drop package if exists pckg_test; +drop procedure if exists pro1; +drop function if exists functype; +drop function if exists myfunc; +drop function if exists myarray_sort; +drop table if exists tmp; +drop table if exists customers; +drop type if exists functype; +drop type if exists mytype2; +drop type if exists mytype; + +-- clean up -- +drop schema if exists plpgsql_array cascade; diff --git a/src/test/regress/sql/plpgsql_array_of_record.sql b/src/test/regress/sql/plpgsql_array_of_record.sql new file mode 100644 index 000000000..43c9b6985 --- /dev/null +++ b/src/test/regress/sql/plpgsql_array_of_record.sql @@ -0,0 +1,556 @@ +-- FOR PL/pgSQL ARRAY of RECORD TYPE scenarios -- + +-- check compatibility -- +show sql_compatibility; -- expect ORA -- + +-- create new schema -- +drop schema if exists plpgsql_arrayofrecord; +create schema plpgsql_arrayofrecord; +set current_schema = plpgsql_arrayofrecord; + +-- initialize table and type-- +create type info as (name varchar2(50), age int, address varchar2(20)); +create type customer as (id number(10), c_info info); + + +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- + +-- test record of record +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type cust is record (id number(10), info r1); + va cust; +begin + va := (1, ('Vera' ,32, 'Paris')); + return (va.info).age; +end; +$$ language plpgsql; +select get_age(); + +-- test record of array +create or replace function get_age RETURNS int as $$ +declare + type r1 is VARRAY(10) of customer; + type custs is record (c_list r1); + va custs; +begin + va.c_list := ARRAY[(1, ('Vera' ,32, 'Paris')),(2, ('Zera' ,25, 'London')),(3, ('Alice' ,22, 'Bangkok'))]; + return va.c_list[2].c_info.age; +end; +$$ language plpgsql; +select get_age(); + +-- test record of table +create or replace function get_age RETURNS int as $$ +declare + type r1 is table of customer index by varchar(10); + type custs is record (c_list r1); + va custs; +begin + va.c_list('a') := (1, ('Vera' ,32, 'Paris')); + va.c_list('aa') := (2, ('Zera' ,25, 'London')); + va.c_list('aaa') := (3, ('Alice' ,22, 'Bangkok')); + return va.c_list['aa'].c_info.age; +end; +$$ language plpgsql; +select get_age(); + +-- test array of record +create or replace function get_age RETURNS int as $$ +declare + type cust is record (id int, c_info info); + type custs is VARRAY(10) of cust; + va custs; + vb int; +begin + va := ARRAY[(1, ('Vera' ,32, 'Paris')),(2, ('Zera' ,25, 'London')),(3, ('Alice' ,22, 'Bangkok'))]; + return va[2].c_info.age; +end; +$$ language plpgsql; +select get_age(); + +-- test table of record +create or replace function get_age RETURNS int as $$ +declare + type cust is record (id int, c_info info); + type custs is table of cust index by varchar(10); + va custs; +begin + va('a') := (1, ('Vera' ,32, 'Paris')); + va('aa') := (2, ('Zera' ,25, 'London')); + va('aaa') := (3, ('Alice' ,22, 'Bangkok')); + return va['aa'].c_info.age; +end; +$$ language plpgsql; +select get_age(); + +-- test table of record assign value to attribute +create or replace function get_age RETURNS int as $$ +declare + type cust is record (id int, c_info info); + type custs is table of cust index by varchar(10); + va custs; +begin + va('a').id := 1; + va('a').c_info := ('Vera' ,32, 'Paris'); + va('aa').id := 2; + va('aa').c_info := ('Zera' ,25, 'London'); + va('aaa').id := 3; + va('aaa').c_info := ('Alice' ,22, 'Bangkok'); + return va['aa'].c_info.age; +end; +$$ language plpgsql; +select get_age(); + +-- test table of record assign value to attribute +create table custs_record (id int, c_info info); +insert into custs_record values(1, ('Vera' ,32, 'Paris')); +insert into custs_record values(2, ('Zera' ,25, 'London')); +insert into custs_record values(3, ('Alice' ,22, 'Bangkok')); + +create or replace function get_age RETURNS int as $$ +declare + type cust is record (id int, c_info info); + type custs is table of cust index by varchar(10); + va custs; +begin + select id into va('a').id from custs_record where id = 1; + select c_info into va('a').c_info from custs_record where id = 1; + select id into va('aa').id from custs_record where id = 2; + select c_info into va('aa').c_info from custs_record where id = 2; + select id into va('aa').id from custs_record where id = 3; + select c_info into va('aa').c_info from custs_record where id = 3; + return va['aa'].c_info.age; +end; +$$ language plpgsql; +select get_age(); + +drop table custs_record; + +-- test record of record of record +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r2 is record (id number(10), info r1); + type cust is record (c_info r2, id int); + + va cust; + vb int; +begin + va := ((1, ('Vera' ,32, 'Paris')),1); + vb := (va.c_info).info.age; + return vb; +end; +$$ language plpgsql; +select get_age(); + +-- test record of record: second use record +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r2 is record (id number(10), info r1); + type r3 is record (id number(10),info r1); + + va r3; + vb r2; + vc int; +begin + va := (1, ('Vera' ,32, 'Paris')); + vb.id := va.id; + vb.info := va.info; + vc := (vb.info).age; + return vc; +end; +$$ language plpgsql; +select get_age(); + +-- array of record of array of record +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is VARRAY(10) of r1; + type r2 is record (col1 r1_arr); + type r2_arr is VARRAY(10) of r2; + + va r2_arr; + vb int; +begin + va(1).col1 := ARRAY[('Vera' ,32, 'Paris'),('Zera' ,25, 'London')]; + va(2).col1 := ARRAY[('Jack' ,22, 'New York'),('Li' ,18, 'Beijing')]; + vb := va[2].col1[2].age; + return vb; +end; +$$ language plpgsql; +select get_age(); + +-- table of record of table of record +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is table of r1; + type r2 is record (col1 r1_arr); + type r2_arr is table of r2; + + va r2_arr; + vb int; +begin + va(1).col1 := ARRAY[('Vera' ,32, 'Paris'),('Zera' ,25, 'London')]; + va(2).col1 := ARRAY[('Jack' ,22, 'New York'),('Li' ,18, 'Beijing')]; + va(2).col1(2).age = 45; + raise info '%', va; + return 1; +end; +$$ language plpgsql; +select get_age(); + +-- record of table of record +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is table of r1; + type r2 is record (col1 r1_arr); + + va r2; + vb int; +begin + va.col1 := ARRAY[('Vera' ,32, 'Paris'),('Zera' ,25, 'London')]; + va.col1(2).age = 45; + raise info '%', va; + return 1; +end; +$$ language plpgsql; +select get_age(); + +--test: types should be droped when drop function +select typname from pg_type where typtype = 'c' and typarray != 0 and typnamespace = (select oid from pg_namespace where nspname = current_schema) order by typname desc; +drop function get_age(); +select typname from pg_type where typtype = 'c' and typarray != 0 and typnamespace = (select oid from pg_namespace where nspname = current_schema) order by typname desc; + +--test: name of special characters +create or replace function "GET::INT;;INT" RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is VARRAY(10) of r1; + type r2 is record (col1 r1_arr); + type r2_arr is VARRAY(10) of r2; + + va r2_arr; + vb int; +begin + va(1).col1 := ARRAY[('Vera' ,32, 'Paris'),('Zera' ,25, 'London')]; + va(2).col1 := ARRAY[('Jack' ,22, 'New York'),('Li' ,18, 'Beijing')]; + vb := va[2].col1[2].age; + return vb; +end; +$$ language plpgsql; +select "GET::INT;;INT"(); +DROP FUNCTION "GET::INT;;INT"(); + +-- test record duplicate name situations, record var +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r2 is record (id number(10), info r1); + type cust is record (c_info r2, id int); + type get_age_r1 is record (a int, b int); + va cust; + vb int; + vc get_age_r1; +begin + va := ((1, ('Vera' ,32, 'Paris',1,2,3)),1); + vb := (va.c_info).info.age; + vc := (1,2); + return vc.a; +end; +$$ language plpgsql; +select get_age(); + +-- test record duplicate name situations, record var +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type get_age_r1 is record (a int, b int); + type r2 is record (id number(10), info r1); + type cust is record (c_info r2, id int); + va cust; + vb int; + vc get_age_r1; +begin + va := ((1, ('Vera' ,32, 'Paris',1,2,3)),1); + vb := (va.c_info).info.age; + vc := (4,5); + return vc.b; +end; +$$ language plpgsql; +select get_age(); + +-- test record duplicate name situations, varray var +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type get_age_r1 is VARRAY(10) of int; + type r2 is record (id number(10), info r1); + type cust is record (c_info r2, id int); + va cust; + vb int; + vc get_age_r1; +begin + va := ((1, ('Vera' ,32, 'Paris',1,2,3)),1); + vb := (va.c_info).info.age; + vc := array[1,2,3,4,5]; + return vc(2); +end; +$$ language plpgsql; +select get_age(); + +-- test record duplicate name situations, varray var +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r2 is record (id number(10), info r1); + type cust is record (c_info r2, id int); + type get_age_r1 is VARRAY(10) of int; + va cust; + vb int; + vc get_age_r1; +begin + va := ((1, ('Vera' ,32, 'Paris',1,2,3)),1); + vb := (va.c_info).info.age; + vc := array[1,2,3,4,5]; + return vc(3); +end; +$$ language plpgsql; +select get_age(); + +-- test record duplicate name situations, var +create or replace function get_age RETURNS int as $$ +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + get_age_r1 int; + type r2 is record (id number(10), info r1); + type cust is record (c_info r2, id int); + va cust; + vb int; +begin + va := ((1, ('Vera' ,32, 'Paris',1,2,3)),1); + vb := (va.c_info).info.age; + get_age_r1 :=10; + return get_age_r1; +end; +$$ language plpgsql; +select get_age(); +drop function get_age(); + +-- ERROR: typename too long +create or replace function get_age RETURNS int as $$ +declare + type rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr1 is record (name varchar2(50), age int, address varchar2(20)); + type cust is record (id number(10), info rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr1); + va cust; +begin + va := (1, ('Vera' ,32, 'Paris')); + return (va.info).age; +end; +$$ language plpgsql; + +--ERROR: record type nested is not supported in anonymous block. +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type cust is record (id number(10), info r1); + va cust; +begin + va := (1, ('Vera' ,32, 'Paris')); +end; +/ + +--ERROR: record type nested is not supported in anonymous block. +declare + type cust is record (id int, c_info info); + type custs is VARRAY(10) of cust; + va custs; +begin + va := ARRAY[(1, ('Vera' ,32, 'Paris')),(2, ('Zera' ,25, 'London')),(3, ('Alice' ,22, 'Bangkok'))]; +end; +/ + +--ERROR: record type nested is not supported in anonymous block. +declare + type cust is record (id int, c_info info); + type custs is table of cust index by varchar(10); + va custs; +begin + va('a') := (1, ('Vera' ,32, 'Paris')); + va('aa') := (2, ('Zera' ,25, 'London')); + va('aaa') := (3, ('Alice' ,22, 'Bangkok')); +end; +/ + + +-- test package: public function +create or replace package package_test as + function get_age() return int; +end package_test; +/ +create or replace package body package_test as + function get_age() return int is +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is VARRAY(10) of r1; + type r2 is record (col1 r1_arr); + type r2_arr is VARRAY(10) of r2; + va r2_arr; + vb int; +begin + va(1).col1 := ARRAY[('Vera' ,32, 'Paris'),('Zera' ,25, 'London')]; + va(2).col1 := ARRAY[('Jack' ,22, 'New York'),('Li' ,18, 'Beijing')]; + vb := va[2].col1[2].age; + return vb; +end; +end package_test; +/ +select package_test.get_age(); +DROP PACKAGE package_test; + +-- test package: private function +create or replace package package_test as + a int; +end package_test; +/ +create or replace package body package_test as + function get_age() return int is +declare + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is VARRAY(10) of r1; + type r2 is record (col1 r1_arr); + type r2_arr is VARRAY(10) of r2; + va r2_arr; + vb int; +begin + va(1).col1 := ARRAY[('Vera' ,32, 'Paris'),('Zera' ,25, 'London')]; + va(2).col1 := ARRAY[('Jack' ,22, 'New York'),('Li' ,18, 'Beijing')]; + vb := va[2].col1[2].age; + return vb; +end; +end package_test; +/ +DROP PACKAGE package_test; + +-- test package: public variable +create or replace package package_test as + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is VARRAY(10) of r1; + type r2 is record (col1 r1_arr); + type r2_arr is VARRAY(10) of r2; +end package_test; +/ +create or replace package body package_test as + a int; +end package_test; +/ +DROP PACKAGE package_test; + +-- test package: private variable +create or replace package package_test as + a int; +end package_test; +/ +create or replace package body package_test as + type r1 is record (name varchar2(50), age int, address varchar2(20)); + type r1_arr is VARRAY(10) of r1; + type r2 is record (col1 r1_arr); + type r2_arr is VARRAY(10) of r2; +end package_test; +/ + +drop package package_test; +select typname from pg_type where typtype = 'c' and typarray != 0 and typnamespace = (select oid from pg_namespace where nspname = current_schema) order by typname desc; + +--test table of record variable initialization +create or replace package package_test is +type rec_data is record(aa varchar2(10)); +type tab_data is table of rec_data; +procedure p1; +end package_test; +/ +create or replace package body package_test is +procedure p1 is +var1 tab_data; +begin +var1 :=tab_data(); +end; +end package_test; +/ +call package_test.p1(); +drop package package_test; + +--test duplicated type name +-- create type "package_test.tab_data" as (a int, b int); +-- create or replace package package_test is +-- type rec_data is record(aa varchar2(10)); +-- type tab_data is table of rec_data; +-- procedure p1; +-- end package_test; +-- / +-- DROP TYPE "package_test.tab_data"; + +--test record.array.extend +create or replace package pck1 is +type ta is varray(10) of int; +type tb is record(va ta); +procedure p1; +end pck1; +/ + +create or replace package body pck1 is +procedure p1() is +v1 tb; +begin +v1.va.extend(9); +raise NOTICE '%',v1.va.first; +raise NOTICE '%',v1.va.count(); +v1.va.delete(); +end; +end pck1; +/ + +call pck1.p1(); +DROP PACKAGE pck1; + +-- test array of table nest same record +create or replace package pkg100 +as +type ty0 is record (a int, b varchar); +type ty1 is table of ty0 index by varchar; +type ty2 is varray(10) of ty0; +va ty1; +vb ty2; +procedure p1; +end pkg100; +/ +create or replace package body pkg100 +as +procedure p1 as +begin +va ('abc') := (1,'a'); +vb (1) := (2, 'b'); +raise info 'va: %', va; +raise info 'vb: %', vb; +end; +end pkg100; +/ + +call pkg100.p1(); + +drop package pkg100; +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- +drop function if exists get_age(); +drop type if exists customer; +drop type if exists info; + +-- clean up -- +drop schema if exists plpgsql_arrayofrecord cascade; diff --git a/src/test/regress/sql/plpgsql_array_opengauss.sql b/src/test/regress/sql/plpgsql_array_opengauss.sql new file mode 100644 index 000000000..ba897c4e4 --- /dev/null +++ b/src/test/regress/sql/plpgsql_array_opengauss.sql @@ -0,0 +1,412 @@ +-- FOR PL/pgSQL VARRAY scenarios -- + +-- check compatibility -- +show sql_compatibility; -- expect A -- + +-- create new schema -- +drop schema if exists plpgsql_array_opengauss; +create schema plpgsql_array_opengauss; +set current_schema = plpgsql_array_opengauss; + +-- initialize tables -- +create table customers ( + id number(10) not null, + c_name varchar2(50) not null, + c_age number(8) not null, + c_address varchar2(50), + salary float(2) not null, + constraint customers_pk primary key (id) +); + +insert into customers (id, c_name, c_age, c_address, salary) values (1, 'Vera' ,32, 'Paris', 22999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (2, 'Zera' ,25, 'London', 5999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (3, 'Alice' ,22, 'Bangkok', 9800.98); +insert into customers (id, c_name, c_age, c_address, salary) values (4, 'Jim' ,26, 'Dubai', 18700.00); +insert into customers (id, c_name, c_age, c_address, salary) values (5, 'Kevin' ,28, 'Singapore', 18999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (6, 'Gauss' ,42, 'Beijing', 32999.00); + +create table tmp(a int, b varchar(100)); + +-- initialize functions, types etc. -- +create type mytype as ( + id integer, + biome varchar2(100) +); + +create type mytype2 as ( + id integer, + locale myType +); + +-- it turns any input to (9, (1, 'space')) +create or replace function myfunc(habitat in mytype2) +return mytype2 +is + ret mytype2; +begin + ret := (9, (1, 'space')); + return ret; +end; +/ + +-- type and function shares the same name -- +-- Oh~oh, what's gonna happened?? -- +create type functype as ( + id integer, + locale myType +); + +create or replace function functype(habitat in mytype2) +return mytype2 +is + ret mytype2; +begin + ret := (-1, (1, 'unknown realm')); + return ret; +end; +/ + +-- test function datatype priority -- +create or replace function name_list(inint in integer) +return integer +is + ret integer; +begin + ret := 1; + return ret; +end; +/ + +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- + +-- general declare + assign + access -- +-- support varray with parentheses in SQL -- +DECLARE + CURSOR c_customers is + SELECT c_name FROM customers order by id; + type c_list is varray (6) of customers.c_name%type; + name_list c_list := c_list(); + counter integer := 0; +BEGIN + FOR n IN c_customers LOOP + counter := counter + 1; -- 6 iterations -- + name_list.extend; + name_list(counter) := n.c_name; + END LOOP; + + insert into tmp values (NULL, name_list(1)); + insert into tmp values (NULL, name_list(3)); + name_list(2) := name_list(3); + insert into tmp values (NULL, name_list[2]); -- same as last one -- +END; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-- ERROR: mix of parens and brackets are not allowed -- +declare + cursor c_customers is (select c_name from customers order by id); + type c_list is varray(6) of customers.c_name%type; + name_list c_list := c_list(); + counter integer := 0; +begin + for n in c_customers loop + counter := counter + 1; -- 6 iterations -- + name_list.extend; + name_list(counter) := n.c_name; + end loop; + + insert into tmp values (null, name_list(1]); +end; +/ + +-- parentheses support in SQL 2 -- +-- array of record -- +declare + cursor c_customers is (select * from customers order by id); + type c_list is varray(6) of customers; + customer_list c_list := c_list(); + counter integer := 0; + name varchar2(50) := ''; +begin + for n in c_customers loop + counter := counter + 1; + customer_list.extend; + customer_list(counter) := (n.id, n.c_name, n.c_age, n.c_address, n.salary); -- insert record -- + name := customer_list(counter).c_name; + if customer_list(counter).c_age <= 30 then + insert into tmp values (null, customer_list(counter).c_name); + else + insert into tmp values (null, name); + end if; + insert into tmp values (customer_list(counter).c_age, customer_list(counter).salary); -- parentheses -- + end loop; +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-- batch initialization, batch insert varray-- +declare + type students is varray(6) of varchar2(10); + type grades is varray(6) of integer; + marks grades := grades('98', 97, 74 + 4, (87), 92, 100); -- batch initialize -- + names students default students('none'); -- default -- + total integer; +begin + names := students(); -- should append NULL then do the coerce -- + names := students('Vera ', 'Zera ', 'Alice', 'Jim ', 'Kevin', to_char('G') || 'auss'); -- batch insert -- + total := names.count; + insert into tmp values (total, null); + for i in 1 .. total loop + insert into tmp values (marks(i), names(i)); + end loop; +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-- block above will be rewritten into this form (close to this form, but with parens and coerces)-- +declare + type students is varray(6) of varchar2(10); + type grades is varray(6) of integer; + marks grades := array['98', 97, 74 + 4, (87), 92, 100]; -- batch initialize -- + names students default array['none']; -- default -- + total integer; +begin + names := array[NULL]; + names := array['Vera ', 'Zera ', 'Alice', 'Jim ', 'Kevin', to_char('G') || 'auss']; -- batch insert -- + total := names.count; + insert into tmp values (total, null); + for i in 1 .. total loop + insert into tmp values (marks(i), names(i)); + end loop; +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-- test of PL/SQL data type instantiation -- +-- If we specified our type (use PL/SQL like instantiation), all varray members .. -- +-- should be able to cast to the correct data type. -- +declare + type students is varray(5) of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + insert into tmp values (null, names(i)); + end loop; +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-- However, if we use the PL/pgSQL style instantiation, it is not guaranteed -- +-- error out for this one -- +declare + type students is varray(5) of varchar2(10); + names students; +begin + -- we can only make assumptions base on the first element, which, not always a good answer -- + names := array[1, 'Zera ', 'Alice', 'Jim ', 'Kevin']; + for i in 1 .. 5 loop + insert into tmp values (null, names(i)); + end loop; +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-- test of uneven brackets -- +-- error out -- +declare + type students is varray(5) of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + insert into tmp values (null, names(i]); + end loop; +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-- Using composite type defined outside of precedure block -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, (1, 'ground')), + mytype2(1, (2, 'air')) + ); +begin + aa.extend(10); + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + insert into tmp values (aa(1).id, aa(2).locale.biome); +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-- Note: array can handle proper type-in-type declaration for now -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + insert into tmp values (aa(1).id, aa(2).locale.biome); +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := mytype2(2, mytype(3, 'water')); + insert into tmp values (aa(1).id, aa(2).locale.biome); +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-- working with functions -- +-- should be the same, except the result, make sure functions are correctly identified -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + myfunc((1, mytype(1, 'ground'))), -- for records, we need an extra parens to work -- + myfunc((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + insert into tmp values (aa(1).id, aa(2).locale.biome); +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-- This is what going to happened with functions and types shares teh same name -- +-- (Don't try this at home) -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + functype(1, mytype(1, 'ground')), -- we are prioritizing types here -- + functype(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + insert into tmp values (aa(1).id, aa(2).locale.biome); +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +drop type functype; -- abandon type functype -- + +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- here we have to use function functype -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + insert into tmp values (aa(1).id, aa(2).locale.biome); +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +drop function functype; -- oops! -- + +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- not sure -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); +end; +/ + +-- Multi-dimension arrays -- +declare + type arrayfirst is varray(10) of int; + arr arrayfirst := arrayfirst(1, 2, 3); + mat int[][] := ARRAY[arr, arr]; -- PLpgSQL style -- +begin + insert into tmp values (null, mat(1)(2)); +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-- assignments && statements test -- +declare + type arrayfirst is varray(10) of int; + arr arrayfirst := arrayfirst(1, 2, 3); + mat int[][] := ARRAY[arr, ARRAY[4, 5 ,6]]; -- PLpgSQL style -- +begin + insert into tmp(a) values (mat[1](2)), (mat(1)[2]), (mat(1)(2)), (mat[1][2]); +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-- error out! -- +declare + type arrayfirst is varray(10) of int; + arr arrayfirst := arrayfirst(1, 2, 3); + type arraySecond is varray(10) of arrayfirst; -- Nested types are not supported, yet -- + mat arraySecond := arraySecond(arr, arr); +begin + insert into tmp values (null, mat(1)(2)); +end; +/ + +select * from tmp order by 1, 2; +truncate tmp; + +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- + +drop function if exists functype; +drop function if exists myfunc; +drop table if exists tmp; +drop table if exists customers; +drop type if exists functype; +drop type if exists mytype2; +drop type if exists mytype; + +-- clean up -- +drop schema if exists plpgsql_array_opengauss cascade; \ No newline at end of file diff --git a/src/test/regress/sql/plpgsql_assign_list.sql b/src/test/regress/sql/plpgsql_assign_list.sql new file mode 100644 index 000000000..76fe3cc5c --- /dev/null +++ b/src/test/regress/sql/plpgsql_assign_list.sql @@ -0,0 +1,205 @@ +-- FOR PL/pgSQL ARRAY of RECORD TYPE scenarios -- + +-- check compatibility -- +show sql_compatibility; -- expect ORA -- + +-- create new schema -- +drop schema if exists plpgsql_assignlist; +create schema plpgsql_assignlist; +set current_schema = plpgsql_assignlist; + +-- initialize table and type-- +create type o1 as (o1a int, o1b int); +create type o2 as (o2a o1, o2b int); +create type o3 as (o3a o2, o3b int); +create type o4 as (o4a o3, o4b int); +create type o5 as (o5a o2[], o5b int); +create type o6 as (o6a o5, o6b int); + +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- + +-- test assign list without array: nested record +create or replace function get_age RETURNS integer as $$ +declare + type r1 is record (r1a int, r1b int); + type r2 is record (r2a r1, r2b int); + type r3 is record (r3a r2, r3b int); + va r3; +begin + va.r3a.r2a.r1a := 123; + raise info '%', va; + va := (((4,3),2),1); + raise info '%', va; + va.r3a.r2a.r1a := 456; + raise info '%', va; + return va.r3a.r2a.r1a; +end; +$$ language plpgsql; +select get_age(); + +-- test assign list without array: nested composite type +create or replace function get_age RETURNS integer as $$ +declare + va o4; +begin + va.o4a.o3a.o2a.o1a := 123; + raise info '%', va; + va.o4a.o3a.o2a := (456, 789); + raise info '%', va; + return va.o4a.o3a.o2a.o1a; +end; +$$ language plpgsql; +select get_age(); + +-- test assign list with array: array in first three word +create or replace function get_age RETURNS integer as $$ +declare + TYPE o3_arr is VARRAY(10) of o3; + va o3_arr; +begin + va(1).o3a.o2a.o1a := 123; + raise info '%', va; + va(2).o3a.o2a := (456, 789); + raise info '%', va; + va(3).o3a := ((123, 456),789); + raise info '%', va; + return va(2).o3a.o2a.o1b; +end; +$$ language plpgsql; +select get_age(); + + +-- test assign list with array: array in first three word +create or replace function get_age RETURNS integer as $$ +declare + va o5; +begin + va.o5a(1).o2a.o1a := 123; + raise info '%', va; + va.o5a(2).o2a := (456, 789); + raise info '%', va; + va.o5a(3) := ((123, 456),789); + raise info '%', va; + return va.o5a(2).o2a.o1a; +end; +$$ language plpgsql; +select get_age(); + +-- test assign list with array: array in first three word +create or replace function get_age RETURNS integer as $$ +declare + va o6; +begin + va.o6a.o5a(1).o2a.o1a := 123; + raise info '%', va; + va.o6a.o5a(2).o2a := (456, 789); + raise info '%', va; + va.o6a.o5a(3) := ((123, 456),789); + raise info '%', va; + return va.o6a.o5a(2).o2a.o1a; +end; +$$ language plpgsql; +select get_age(); + +-- test assign list with array: with record nested +create or replace function get_age RETURNS integer as $$ +declare + TYPE r1 is RECORD (r1a int, r1b int); + TYPE r1_arr is VARRAY(10) of r1; + TYPE r2 is RECORD (r2a r1_arr); + va r2; +begin + va.r2a(1).r1a := 123; + raise info '%', va.r2a(1).r1a; + va.r2a(2) := (456, 789); + raise info '%', va; + va.r2a(2).r1b := 999; + raise info '%', va; + return va.r2a(2).r1b; +end; +$$ language plpgsql; +select get_age(); + +-- test assign list with table: with record nested +create or replace function get_age RETURNS integer as $$ +declare + TYPE r1 is RECORD (r1a int, r1b int); + TYPE r1_arr is table of r1 index by varchar2(10); + TYPE r2 is RECORD (r2a r1_arr); + va r2; +begin + va.r2a('a').r1a := 123; + raise info '%', va.r2a('a').r1a; + va.r2a('aa') := (456, 789); + raise info '%', va; + va.r2a('aa').r1b := 999; + raise info '%', va; + return va.r2a('aa').r1b; +end; +$$ language plpgsql; +select get_age(); + +--test assign list with array: array not in first three word +create or replace function get_age RETURNS integer as $$ +declare + TYPE r1 is RECORD (r1a o6, r1b int); + va r1; +begin + va.r1a.o6a.o5a(1).o2a.o1a := 123; + raise info '%', va; + va.r1a.o6a.o5a(2).o2a := (456, 789); + raise info '%', va; + va.r1a.o6a.o5a(3) := ((123, 456),789); + raise info '%', va; + return va.r1a.o6a.o5a[2].o2a.o1a; +end; +$$ language plpgsql; +select get_age(); + +--test o1.col1.col2 ref +create type ct as (num int,info text); +create type ct1 as (num int,info ct); +create or replace package autonomous_pkg_a IS +count_public ct1 := (1,(1,'a')::ct)::ct1; +function autonomous_f_public(num1 int) return int; +end autonomous_pkg_a; +/ +create or replace package body autonomous_pkg_a as +count_private ct1 :=(2,(2,'b')::ct)::ct1; +function autonomous_f_public(num1 int) return int +is +declare +re_int int; +begin +count_public.num = num1 + count_public.num; +count_private.num = num1 + count_private.num; +raise info 'count_public.info.num: %', count_public.info.num; +count_public.info.num = count_public.info.num + num1; +raise info 'count_public.info.num: %', count_public.info.num; +count_private.info.num = count_private.info.num + num1; +re_int = count_public.num +count_private.num; +return re_int; +end; +end autonomous_pkg_a; +/ + +select autonomous_pkg_a.autonomous_f_public(10); +drop package autonomous_pkg_a; +drop type ct1; +drop type ct; + +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- +drop function if exists get_age(); +drop type if exists o6; +drop type if exists o5; +drop type if exists o4; +drop type if exists o3; +drop type if exists o2; +drop type if exists o1; + +-- clean up -- +drop schema if exists plpgsql_assignlist cascade; diff --git a/src/test/regress/sql/plpgsql_assign_value_to_array_attribute.sql b/src/test/regress/sql/plpgsql_assign_value_to_array_attribute.sql new file mode 100644 index 000000000..e3b64d956 --- /dev/null +++ b/src/test/regress/sql/plpgsql_assign_value_to_array_attribute.sql @@ -0,0 +1,464 @@ +-- FOR PL/pgSQL VARRAY Assign Value to Attribute scenarios -- + +-- check compatibility -- +-- create new schema -- +drop schema if exists plpgsql_arrayassign; +create schema plpgsql_arrayassign; +set current_schema = plpgsql_arrayassign; + +-- initialize table and type-- +create type info as (name varchar2(50), age int, address varchar2(20), salary float(2)); +create type customer as (id number(10), c_info info); +create table customers (id number(10), c_info info); + +insert into customers (id, c_info) values (1, ('Vera' ,32, 'Paris', 22999.00)); +insert into customers (id, c_info) values (2, ('Zera' ,25, 'London', 5999.00)); +insert into customers (id, c_info) values (3, ('Alice' ,22, 'Bangkok', 9800.98)); +insert into customers (id, c_info) values (4, ('Jim' ,26, 'Dubai', 18700.00)); +insert into customers (id, c_info) values (5, ('Kevin' ,28, 'Singapore', 18999.00)); +insert into customers (id, c_info) values (6, ('Gauss' ,42, 'Beijing', 32999.00)); + +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- + +-- test assign value by := +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_list(1) := (1, ('Vera' ,32, 'Paris', 22999.00)); + customer_list(2) := (2, ('Zera' ,25, 'London', 5999.00)); + customer_list(2).id := 3; + customer_list(2).c_info := ('Alice' ,22, 'Bangkok', 9800.98); + customer_list(3).id := 4; + customer_list(3).c_info := ('Jim' ,26, 'Dubai', 18700.00); + return customer_list; +end; +$$ language plpgsql; +select get_customers(); + +-- test assign value by select into +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_list(1) := (1, ('Vera' ,32, 'Paris', 22999.00)); + customer_list(2) := (2, ('Zera' ,25, 'London', 5999.00)); + select id into customer_list(2).id from customers where id = 3; + select c_info into customer_list(2).c_info from customers where id = 3; + select id into customer_list(3).id from customers where id = 4; + select c_info into customer_list(3).c_info from customers where id = 4; + return customer_list; +end; +$$ language plpgsql; +select get_customers(); + +-- test assign value by fetch into +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); + CURSOR C1 IS SELECT id FROM customers ORDER by id; + CURSOR C2 IS SELECT c_info FROM customers ORDER by id; +begin + customer_list(1) := (1, ('Vera' ,32, 'Paris', 22999.00)); + customer_list(2) := (2, ('Zera' ,25, 'London', 5999.00)); + OPEN C1; + OPEN C2; + FETCH C1 into customer_list(2).id; + FETCH C2 into customer_list(2).c_info; + FETCH C1 into customer_list(3).id; + FETCH C2 into customer_list(3).c_info; + CLOSE C1; + CLOSE C2; + return customer_list; +end; +$$ language plpgsql; +select get_customers(); + +-- test assign value by EXECUTE IMMEDIATE into +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_list(1) := (1, ('Vera' ,32, 'Paris', 22999.00)); + customer_list(2) := (2, ('Zera' ,25, 'London', 5999.00)); + EXECUTE IMMEDIATE 'select id from customers where id = :1' + INTO customer_list(2).id + USING IN 3; + EXECUTE IMMEDIATE 'select c_info from customers where id = :1' + INTO customer_list(2).c_info + USING IN 3; + EXECUTE IMMEDIATE 'select id from customers where id = :1' + INTO customer_list(3).id + USING IN 4; + EXECUTE IMMEDIATE 'select c_info from customers where id = :1' + INTO customer_list(3).c_info + USING IN 4; + return customer_list; +end; +$$ language plpgsql; +select get_customers(); + +-- test assign value in two-dimensional arrays +-- (only := support assign value in two-dimensional arrays) +create or replace function get_customers RETURNS customer[] as $$ +declare + customer_list customer[][]; +begin + customer_list:= array[[(1, ('Vera' ,32, 'Paris', 22999.00)),(2, ('Zera' ,25, 'London', 5999.00))], + [(3, ('Alice' ,22, 'Bangkok', 9800.98)),(4, ('Jim' ,26, 'Dubai', 18700.00))]]; + customer_list(1)(1).id := 5; + customer_list(1)(1).c_info := ('Kevin' ,28, 'Singapore', 18999.00); + customer_list(1)(2).id := 6; + customer_list(1)(2).c_info := ('Gauss' ,42, 'Beijing', 32999.00); + customer_list(2)(1).id := 7; + customer_list(2)(1).c_info := ('Bob' ,24, 'Shanghai', 28999.00); + customer_list(2)(2).id := 8; + customer_list(2)(2).c_info := ('Jack' ,56, 'Hongkong', 8999.00); + return customer_list; +end; +$$ language plpgsql; +select get_customers(); + +-- test assign value with loop, if, case +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); + CURSOR C1 IS SELECT * FROM customers ORDER by id; + counter int := 0; +begin + for n in C1 loop + counter := counter + 1; + customer_list(counter).id := n.id; + customer_list(counter).c_info := n.c_info; + end loop; + + IF counter > 1 THEN + counter := counter + 1; + select id into customer_list(counter).id from customers where id = 1; + select c_info into customer_list(counter).c_info from customers where id = 1; + ELSE + counter := counter + 1; + customer_list(counter).id := 7; + customer_list(counter).c_info := ('Bob' ,24, 'Shanghai', 28999.00); + END IF; + + CASE counter + WHEN 1 THEN + customer_list(counter + 1).id := 7; + customer_list(counter + 1).c_info := ('Bob' ,24, 'Shanghai', 28999.00); + WHEN 2 THEN + customer_list(counter + 1).id := 8; + customer_list(counter + 1).c_info := ('Bob' ,24, 'Shanghai', 28999.00); + ELSE + customer_list(counter + 1).id := 9; + customer_list(counter + 1).c_info := ('Bob' ,24, 'Shanghai', 28999.00); + END CASE; +return customer_list; +end; +$$ language plpgsql; +select get_customers(); + +--ERROR: assign value to deep level attribute is not supported yet +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).c_info.name; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test incorrect attribute name with T_CWORD type +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).a.b.c; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test incorrect attribute name with T_CWORD type +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).a.b.c.d.e.f; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test incorrect attribute name with T_DATUM type +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).customer_list.id; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test incorrect attribute name with T_DATUM type +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).customer_list.c; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test incorrect attribute name with T_DATUM type +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).customer_list; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test incorrect attribute name with type name +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).c_list; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test incorrect attribute name with T_WORD type +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).a; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test incorrect attribute name with keyword +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).if; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test incorrect attribute name with keyword +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1).end; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test missing attribute name +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + select 'bob' into customer_list(1). ; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test missing attribute name +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_list(1). := 'bob'; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test illegal attribute name +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_list(1).6a := 'bob'; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test illegal attribute name +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_list(1).%# := 'bob'; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test incorrect array name : spelling error +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_lis(1).id := 3; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test incorrect array name : scalar variable +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); + a int; +begin + a(1).id := 3; + return customer_list; +end; +$$ language plpgsql; +call get_customers(); + +--ERROR: test incorrect array name : array type name +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); + a int; +begin + c_list(1).id := 3; + return customer_list; +end; +$$ language plpgsql; + +--ERROR: test incorrect array name : array element type is not composite +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + type i_list is VARRAY(10) of int; + customer_list c_list:=c_list(); + a i_list := i_list(); +begin + a(1).id := 3; + return customer_list; +end; +$$ language plpgsql; +call get_customers(); + +--ERROR: test incorrect array name : array element type is record/row +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); + a customers; +begin + a(1).id := 3; + return customer_list; +end; +$$ language plpgsql; +call get_customers(); + +--ERROR: test array dimensions exceeds the maximum. +create or replace function get_customers RETURNS customer[] as $$ +declare + type c_list is VARRAY(10) of customer; + customer_list c_list:=c_list(); +begin + customer_list(1) := (1, ('Vera' ,32, 'Paris', 22999.00)); + customer_list(2) := (2, ('Zera' ,25, 'London', 5999.00)); + customer_list(1)(2)(3)(4)(5)(6)(7).id := 3; + return customer_list; +end; +$$ language plpgsql; +call get_customers(); + +-- Test PLPGSQL ARRAY defined with type from different schema +drop schema if exists test_pl_array_schema; +create schema test_pl_array_schema; +create TYPE test_pl_array_schema.desc_tab as ( + col_type int , + col_max_len int , + col_name VARCHAR2(32) , + col_name_len int , + col_schema_name VARCHAR2(32) , + col_schema_name_len int , + col_precision int , + col_scale int , + col_charsetid int , + col_charsetform int , + col_null_ok BOOLEAN); +create TYPE test_pl_array_schema.varchar2_table as (str VARCHAR2(2000)); +create TYPE test_pl_array_schema.number_table as (num NUMBER); +create TYPE test_pl_array_schema.date_table as (dat DATE); +create TYPE test_pl_array_schema.comp_table as (a int, b VARCHAR2(10)); + +create or replace function get_table_array RETURNS void as $$ +declare + type num_arr is VARRAY(10) of test_pl_array_schema.number_table; + v_a num_arr:=num_arr(); + type varchar2_arr is VARRAY(10) of test_pl_array_schema.varchar2_table; + v_b varchar2_arr:=varchar2_arr(); + type date_arr is VARRAY(10) of test_pl_array_schema.date_table; + v_c date_arr:=date_arr(); + type tab_arr is VARRAY(10) of test_pl_array_schema.desc_tab; + v_d tab_arr:=tab_arr(); + type comp_arr is VARRAY(10) of test_pl_array_schema.comp_table; + v_e comp_arr:=comp_arr(); +begin + v_a.extend(1); + v_a(1).num := 1; + v_b.extend(1); + v_b(1).str := 'aaa'; + v_c.extend(1); + v_c(1).dat := '2003-04-12 04:05:06'; + v_d.extend(1); + v_d(1).col_type := 22; + v_e.extend(1); + v_e(1) := (11,'haha'); + RAISE NOTICE '% % % % % % ', v_a(1).num, v_b(1).str, v_c(1).dat, v_d(1).col_type, v_e(1).a, v_e(1).b; +end; +$$ language plpgsql; +call get_table_array(); + +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- + +drop function if exists get_table_array; +drop type if exists test_pl_array_schema.comp_table; +drop type if exists test_pl_array_schema.date_table; +drop type if exists test_pl_array_schema.number_table; +drop type if exists test_pl_array_schema.varchar2_table; +drop type if exists test_pl_array_schema.desc_tab; +drop function if exists get_customers; +drop table if exists customers; +drop type if exists customer; +drop type if exists info; + +-- clean up -- +drop schema if exists test_pl_array_schema cascade; +drop schema if exists plpgsql_arrayassign cascade; diff --git a/src/test/regress/sql/plpgsql_bulk_collect.sql b/src/test/regress/sql/plpgsql_bulk_collect.sql new file mode 100644 index 000000000..a3cccc51a --- /dev/null +++ b/src/test/regress/sql/plpgsql_bulk_collect.sql @@ -0,0 +1,461 @@ +-- FOR PL/pgSQL bulk collect into scenarios -- + +-- check compatibility -- +show sql_compatibility; -- expect A -- + +-- create new schema -- +drop schema if exists plpgsql_bulk_collect; +create schema plpgsql_bulk_collect; +set current_schema = plpgsql_bulk_collect; + +-- initialize tables -- +create table customers ( + id number(10) not null, + c_name varchar2(50), + c_age number(8) not null, + c_address varchar2(50), + salary float(2) not null, + constraint customers_pk primary key (id) +); + +insert into customers (id, c_name, c_age, c_address, salary) values (1, 'Vera' ,32, 'Paris', 22999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (2, '' ,25, 'London', 5999.00); -- a missing value here +insert into customers (id, c_name, c_age, c_address, salary) values (3, 'Alice' ,22, 'Bangkok', 9800.98); +insert into customers (id, c_name, c_age, c_address, salary) values (4, 'Jim' ,26, 'Dubai', 18700.00); +insert into customers (id, c_name, c_age, c_address, salary) values (5, 'Kevin' ,28, 'Singapore', 18999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (6, 'Gauss' ,42, 'Beijing', 32999.00); + +create table bigtmp(a int, b varchar(10000)); +create table tmp(a int, b varchar(100)); + +create type mytype as ( + id integer, + biome varchar2(100) +); + +create table biomebook ( + id integer, + b_entry mytype +); + +insert into biomebook values (1, (1, 'savanna')); +insert into biomebook values (2, (2, 'giant-tree-taiga')); +insert into biomebook values (3, (9, 'nether')); + +-- returns a biome record -- +create or replace function biofunc() +return mytype +is + ret mytype; +begin + ret := (2, 'giant-tree-taiga'); + return ret; +end; +/ + +-- test function into target priority -- +create or replace function id_arr(inint in integer) +return integer +is + ret integer; +begin + ret := 1234; + return ret; +end; +/ + +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- + +-- Scene1: select ... bulk collect into -- +-- single scalar out -- +declare + type i_list is varray(6) of integer; + i_arr i_list; +begin + select 1 bulk collect into i_arr; + for i in 1..i_arr.count loop + dbe_output.print_line('id(' || i || '): ' || i_arr(i)); + end loop; +end; +/ + +-- multiple scalars out -- +declare + type i_list is varray(6) of integer; + i_arr i_list; +begin + select 1 bulk collect into i_arr from customers; + for i in 1..i_arr.count loop + dbe_output.print_line('id(' || i || '): ' || i_arr(i)); + end loop; +end; +/ + +-- records handling -- +declare + type ty_list is varray(6) of mytype; + ty_arr ty_list; +begin + select (1, 'savanna')::mytype bulk collect into ty_arr; + for i in 1..ty_arr.count loop + dbe_output.print_line('biome record: ' || ty_arr(i)); + end loop; +end; +/ + +declare + type ty_list is varray(6) of mytype; + ty_arr ty_list; +begin + select biofunc() bulk collect into ty_arr; + for i in 1..ty_arr.count loop + dbe_output.print_line('biome record: ' || ty_arr(i)); + end loop; +end; +/ + +declare + type ty_list is varray(6) of mytype; + ty_arr ty_list; +begin + select b_entry bulk collect into ty_arr from biomebook order by id; + for i in 1..ty_arr.count loop + dbe_output.print_line('biome record: ' || ty_arr(i)); + end loop; +end; +/ + +-- varray handling -- +declare + i_arr int[]; +begin + select 1 bulk collect into i_arr from customers; + for i in 1..i_arr.count loop + dbe_output.print_line('id(' || i || '): ' || i_arr(i)); + end loop; +end; +/ + +-- success, single target array -- +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; +begin + select id bulk collect into id_arr from customers order by id DESC; + for i in 1..id_arr.count loop + dbe_output.print_line('id(' || i || '): ' || id_arr(i)); + end loop; +end; +/ + +-- success, multi target support +declare + type tab is varray(6) of mytype; + tab1 tab := tab(); +begin + select id, c_name bulk collect into tab1 from customers order by id DESC; + raise info '%', tab1; +end; +/ + +declare + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; + cc varchar(100); +begin + select c_name bulk collect into name_arr from customers order by id; + for i in 1..name_arr.count loop + dbe_output.print_line('name(' || i || '): ' || name_arr(i)); + end loop; +end; +/ + +-- this will take the entire array as a single output -- +-- error out now -- +declare + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; +begin + select array(select c_name from customers order by id) bulk collect into name_arr; + for i in 1..name_arr.count loop + dbe_output.print_line('name(' || i || '): ' || name_arr(i)); + end loop; +end; +/ + +-- multi target, correct answer -- +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; +begin + select id, c_name bulk collect into id_arr, name_arr from customers order by id; + for i in 1..name_arr.count loop + dbe_output.print_line('id: ' || id_arr(i) || ' with name(' || i || '): ' || name_arr(i)); + end loop; +end; +/ + +-- should be able cast all elements -- +declare + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; +begin + select count(*) bulk collect into name_arr from customers; + for i in 1..name_arr.count loop + dbe_output.print_line('result ' || i || ': ' || name_arr(i)); + end loop; +end; +/ + +declare + type name_list is varray(6) of varchar2; + name_arr name_list; +begin + select 99 bulk collect into name_arr from customers; -- 6 of them -- + for i in 1..name_arr.count loop + dbe_output.print_line('result ' || i || ': ' || name_arr(i)); + end loop; +end; +/ + +declare + type id_list is varray(6) of integer; + id_arr id_list; +begin + select '696' bulk collect into id_arr from customers; + for i in 1..id_arr.count loop + dbe_output.print_line('result ' || i || ': ' || id_arr(i)); + end loop; +end; +/ + +-- intense bulk collect -- +-- 'massive' insert -- +insert into bigtmp select generate_series(1, 10000), '1'; + +declare + type i_list is varray(60000) of integer; + i_arr i_list; +begin + select a bulk collect into i_arr from bigtmp order by a; + dbe_output.print_line('result 556: ' || i_arr(556)); + dbe_output.print_line('result 1556: ' || i_arr(1556)); + dbe_output.print_line('result 9999: ' || i_arr(9999)); +end; +/ + +declare + type i_list is varray(60000) of integer; + i_arr i_list; +begin + select b bulk collect into i_arr from bigtmp order by a; -- typecasts + dbe_output.print_line('result 556: ' || i_arr(556)); + dbe_output.print_line('result 1556: ' || i_arr(1556)); + dbe_output.print_line('result 9999: ' || i_arr(9999)); +end; +/ + +-- toasted -- +insert into bigtmp values (99999997, 'O2Co3PFc0Bdx0pAf55wf7vFKI6sThhbc7dG3aLGbERpYYPRQ6uKoQKzROW2V3ciMkWoSXTeqs592ifC9TSkkubQvF3Ca05fzevCP1Sm3j4jQGyr9FoF2q9iaMVc6XlVsY3xcJpOoWwtzhhgBbSxwjD3w57FfwJHoRRmcftJAD2qOOsqzFXd8A2MGqqDLWdjQGxZQKwS8gtBYX36oTyMxdIf4XHM4b8mtESoQX7FdiMASWJoWbEkS5dAgiVFoYVHZvKx3PCCGf3xXkBSwC4F9kQccZpdgO4r7fLoSc0IsfO2eLxqzOP4yoLPMFobtOU0rJSpddD2BCo4IpW6LCQ2q5HQzQbBDCE3MfVng8ZVpakXoTP5KS4DJTvALLtSYxvrXZgr5Gki8pcrQQov7JDpJ8wclWF1tZ9fOILGtg5TOnDo8sSNgiPUYt4rzXllS2GgobxYlwAQH4uIDhXlYs8NbATQ13ro6WhLrjJFmMW7FNORUcgxGr76ejAG0oGKkmpZ8sXTn26ZiNNI5gV4pr07JHVTc5seTYVYPNg3dkVOhkVhLlFSaQeocerWnsNXJ3Qc7oMFZlfGJyquNzuXCr9yoEKx2BRmidu8L5pIwNvZOJgSjuP3mMkTXAYEreBFDCDuoUuPiRrxF5dFPNSqYGh8Vwly4Pnm2E5OxWnkA37VtbCx7db7EIXRps1ghUUUJPlPx63doze3w325O0voHu5kRFRqe6TFXAGDY2cDY4m91frMcaG06R7XG418Eh3aQevbEp3QpghAwIMTa4vPpceiCEWHIo9Tu540wPbmRRKquFyxwA66Yjl60ROybjhc7w0hiR1q0JPTVIEjfTGih8YEo5Bok2RjSMBpUyhqiAt64AYtdbrN4Iy2znLDxGs1TyoNsu46AUKKi5WOgGDb8VjIn5mluYdvbZ5hfcOI9XVNq0U80o7JBAYZih53klMLQ49bC2z0cWoBAa37WVpSeBUEM3gQw5stegIgU18ACJzD9XhBeIXnVbY1gGQy1QHJ7QouUnVVEt3cTYs8L5k3VPP4VqcppEz9UvcnwysS37pXzOZr03oBTGvM5iQQ7AgtuRBJUkzrp7XmE3wfFwxHThtMEpHv6lJwQOtkrdqvCgH1F5PCsTDtBtPOAQSRYwPLVD5j8ifeg89zcPRQohM6ndzgA15tjl8Lc3ZhxSe81c3k7RuB91BJ00H8e5HImo5pSRnUDH5CL1X7sVPkBAZ7Lyk0ShZEznFVO9Cixd7T7goIgsL2yFOlOExpHriaZCXBWUQTHR9MyKlsAiEUWfgtP4vs6LgqW8NAzGhhzwsJMYkWOM0jnlEm0VMGzaU19ND4YYYSGnzUb3Ai8DWQOr23wVfOBPUtG6RsXOvNjjJ98safY4xxaVkOH1BE3Wsj3d6xwEL6SnGB9x7rxDrzuBKlai0DLaprmhUmpRc2JJTbBvJsRu8feLrAsgVwoU1dGezVCuT8oYHxRvgkblevX5Ud1DNaRGjsL738WzZsJJvD0nD7cOiHJI6mjapiNJ8tFnu1JgqfcQqMxbP4atBI4jH4M3zxWGk4XFtMkAEKjOInaaMEbZ21gkicL2WTNlkWyOZbiYemQjCG1tgmFcHKMipiK8cXhsQeN7bij5dMzqpvkJ3Pmc13YqJLTlZ70hXDj4CvzQVdDsZtc3AEr4z49D0AiYSiPWyf09yXnMrT7PoUDUsug9vsliygXXjvaonkKLEs7kpLDAm7B2YrHge4QJJ2rNPZRtIaWXLegZKIjArp8sU8Cln4Tx4lo8450OfxFElTtduBDlq23yDwBcMb4z0pHwGLlayi1bhQ0Ymz7eRAHKnuRJd3hQdlG7c7xbbPon0ETZFXq42dDDzKxbsRgFEOHhI1wf8TpJYksK7BMX5CJmse5MVRarAJHJHSbVsTvrza2U6bxEkOzk4VI06KdXyoOKLN0g0PCrphFn1xadvj5Czr0ZLmD9hwzfQXARJNlYjeGCKF9N4Vgm3QOSdyQ8eJSYhTmU9rLGl3wuMZCPCLui2tLk9knhAAo0rlf9sJY6e5snZhcyGSCUJ9zQCxT4S3sRuEGx0NQcnEE89exgqcuwcV7konvoXCysRlwxnfszsSKyomCZAfuV4YzcWDz1Tx3Vw1PpntqUDQtHW7eQC8P2POV2yquwnKGdL8dGRHhdwrmCigAENuL1DElNB7IxFLXOFV1l2VFmJp1yrlLDlfMWWJPhElcl6sOeglrpAHxpdvvWGbysY15GC5T4bPT8W4Lc1vfahUTMywDpAZfdHtBWkrjtEwNgE7TnT24vglckMJhPvjxIKlC0IMOP7cfWZm38CuqhYWaaStDemaIKb4UJh4iSp4lvyKcDSx4HfNrshrv2zBYAYAnxntJk54H2rPSNV2p5s8hPAZCrEjlX0w0gtyrmRD3gNHCPqNXqaKdgo5A0bLYYrzErjVP5UI5lyMMe5JuL2O8TN0OjnKbZ8uAOPX7sMJPQ1cJnNFOmoecAaeBgcvxF5Rm'); +insert into bigtmp values (99999998, 'Pn5VELvD'); +insert into bigtmp values (99999999, '5lwSzzCVOoH5rW4Eggq2JR3Ne4SJpZwxtc4aNp7ieQn2Zt5BK3yBaF7dUBcpqH28Z32bbLvxjbexI32JqI4cnVn6Xo86VAl7R7e7smveMsekLtJQi1LUDZg7iqtHG9yMnDbYJo6RVb5OCEe20VOA6iNLl0lW6YYGF9GrHMSOoASPtOmttN8erIDfzVHjOmJqmz7nAJ3q64dygavaIfhRYeH3wGnDnYSL4XRN5ayDpyTqKo2ZoRgFWhfcWHBRQsfziktBY6P4lx4Qra3237ytn0jSFVJfEsTSr7wdhBxF7oSNFt3xuKpJQ72GklM8CZbEeXHRju13lw64dFt6Nbim9POnLQtff7EjWLpaeGbR7al7jmkB9iljwsMEh1FReBgUuOHB9XUFG2LK4QyX81xLmDq60vp6KkQqovTwIUEDzpuuHIb2wGqVaPa91Vw7JwUYui9ZbtHA2K64lQ0XgnQhWxzwOy0gFqeBEsDZ2aj6GNSoH9FPmhJm9Dn7WQw5boIZgeb6b2tLnZyxLNp0yTAhdJmwXTitBm0kz93rfQBPGfwLF5SNTYEdAZvedlSTIIB7D2HVq1nEiPxCOrOQ3C9PgFaods6gthyu6BjO5Mjc6eshS8zVL1YQ4tNcI7FOmX1pikLlqpQdl2h1lJgJTtftmOlSTGl59Ptl583YgqZVgTFtjpdH0MLxQeKXhunr0vuhU3FfhaItFmdXeewBANsnh6QitWKRNMJOYS3YX5HEDTY75dfLSkBCK1rWz35HQYfacy7hl8Dz5pgJmn3hDPi7F1Hy9edvkzxE9eBc8f6nojCM5PyxmBSJE4bYpnF4swLZFKDmyxZ6yxszU6gawBNWJ7bMLAitobTNGNJrgFQCOajbXEOhx19Yb8L3xRWqSqky4dFAvx0bO2IQepn4yawklBDHamYeSoOOhUdzddLKHuUbnCCWdfSPP1mDpfJsbyZ4rPkC8LxA0UVibwKaSBfE4WYiaiw5S291AyMRfdPKDddWCROWUp1WUjW6d03hf5UAQHxfgrBrVgJ06V5IL0GYbeUOm2ceSyGqaxwhwf1Y5DiRsPS0sadcOHdc7WpnB1ysykaYkkgmuyldMMO2UJjDiJomeF6UZ1ZsTagAdieRM3H21ZJ4xv8abOBnwoZA5ZbwlBq7KpVYoaaJ8GL3MkkTLGDo2bWL4ZNTjKfzhMNOL5fVMXKGHC046Afl3sD5WUmW36rfGkkirQ0W9sLz0rxk7lnjKHk3yJXrhHmBoLux71DtTuieIDSbROqlYS9f7p92495hGCmx1wH1NIQ8dmb6AxoZjktLbzG8SMz7ZbfbLGjQovG9Xw3aekVvwENcCEfIypMtVynhvsKpqfDJY6pCgbfyTm1ymCd6QSFTJ26ywDJVT7yFvWbE1b4k3s4PD3CoRJYKC5OsZcvgFu7T2qXPGy9qn6RVlq95lbMZNsPbZ1USRfxPtm73Y7EISqf7UcsfqBbYV6OKdswAdinpNgesSncGBXTtCfPx5Ue8kuR6rDYvCQBoCIug0WxhEbTOxfAPZkXX6FhWtYtuSUu9w7L5I8xBKhGyeD3IA3H1YgD1HYpzl7CaznQaVvN5cxAisaBpW51ZI7NqVuxRKkJAsu5HKp9ExCoQk4EQygJCqPLgexGYVgUl2maQdfMMWAHZhVwH839BvYHeL9KwrrATj6Ts7hNd0WXT5LR97vpiqzQ891Z3eYVD2X4MkuOHWEuBFd8AnhjNn471eIGRPmLhzj5JtbEfSRHDv278kRqr7hxhBoPSilnoCywIqHkrHJBWi9AvLA4KaHC6vdmqOwpE7vebCxxuID5acMYXr55FkUWYGTOAzGRslw5H5xapvsgu7F20MCVYTuKEnbQiyzMhPDuqKGoJJtf97tYBRrGUgVVwTaUXcOiKcVoOnDEL7zKtvgGH8fx0X46ZefNGFLFGO2Kkt7z62Ht0muntXdQygIWwg0uDi2u610aFQWDOG3RF2SXk7GWeyNr2OjvdJ7wwRe7HGmbfBL87e0VWCIrIjMB3Dg1yolFcsuOD7GMUrEOYCACfyM0TsPkQqkSNq1j5bN8JqY6FUzswebtVL1t88Xw45ETOPZeu8QpiYO79ofs3ArGPGZUFW8b3G1tJSsnhJ7l2AtHxW6WDsOAnQ8RtNPQu8KOfaLqtSVd11eHVprmSZUbwlsHwx5Hf0UUR43UERuUWIqONOgZIjxukVUO5HqougHCoTKTpQvcZ4Bs0bN5HC82ddPc0VjhaNOL8zZgsy8aO0BYFr5TdkiXldv3LqXziauo0eDfxonfS8LSBRtVmrXVccpwI3pIpsg9c7h05JGh6mXfDGirdhuHz3JRXlMze6dbVOQIg3Q316fJMZixgNo3ttbr8OrJnS915xzUfcLaBx4eEmmi7EXplgWdHUJC3wNuXvyFGEBAxnoe1zyDl9rtOiinr33Wyf7ae0FcGjE2Eml4zvI6xeznXvDip9u1QnsIvYQHxw3MsClNh5WDhndp6DCUT'); + +declare + type s_list is varray(60000) of varchar(10000); + s_arr s_list; +begin + select b bulk collect into s_arr from bigtmp where a > 99999990 order by a; + dbe_output.print_line('result 99999997: ' || s_arr(1)); + dbe_output.print_line('result 99999998: ' || s_arr(2)); + dbe_output.print_line('result 99999999: ' || s_arr(3)); +end; +/ + +-- ERROR handling -- +-- syntax error -- +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; +begin + select 1 collect bulk into id_arr; + for i in 1..id_arr.count loop + dbe_output.print_line('id(' || i || '): ' || id_arr(i)); + end loop; +end; +/ + +-- insert into is not a into at all! -- +declare + type name_list is varray(6) of varchar2; + name_arr name_list; +begin + insert bulk collect into tmp values (1, '2'); +end; +/ + +-- should error out with type cast failure -- +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; +begin + select array(select id from customers order by id) bulk collect into id_arr; + for i in 1..id_arr.count loop + dbe_output.print_line('id(' || i || '): ' || id_arr(i)); + end loop; +end; +/ + +-- query returns more columns than bulk collect targets, error -- +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; +begin + select id, c_name bulk collect into id_arr from customers; + for i in 1..id_arr.count loop + dbe_output.print_line('id(' || i || '): ' || id_arr(i)); + end loop; +end; +/ + +-- multi-dimensional array -- +declare + i_arr int[][]; +begin + select 1 bulk collect into i_arr[1] from customers; + for i in 1..i_arr.count loop + dbe_output.print_line('id(' || i || '): ' || i_arr(1)(i)); + end loop; +end; +/ + +-- multiple targets, keep erroring out, type cast failure -- +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; +begin + select array(select c_name from customers order by id), + array(select id from customers order by id) + bulk collect into name_arr, id_arr; + for i in 1..name_arr.count loop + dbe_output.print_line('id: ' || id_arr(i) || ' with name(' || i || '): ' || name_arr(i)); + end loop; +end; +/ + +-- should report error, into something not an array -- +declare + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; + cc record; +begin + select id bulk collect into cc from customers order by id DESC; + dbe_output.print_line('name count: ' || cc); +end; +/ + +-- .. even if it is actually a single value returning -- +declare + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; + cc integer; +begin + select count(*) bulk collect into cc from customers; + dbe_output.print_line('name count: ' || cc); +end; +/ + +-- returns nothing -- +declare + type s_list is varray(6) of integer; + s_arr s_list; +begin + select a bulk collect into s_arr from tmp; -- this is empty -- + dbe_output.print_line('ok, is empty'); +end; +/ + + +-- Scene2: returning -- +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; +begin + -- select id bulk collect into id_arr from customers; + delete from customers where id = 2 returning id bulk collect into id_arr; + dbe_output.print_line('delete id: ' || id_arr(1)); + + select count(*) bulk collect into id_arr from customers; + dbe_output.print_line('total left: ' || id_arr(1)); +rollback; +end; +/ + +declare + type name_list is varray(6) of customers.c_name%type; + name_arr name_list; +begin + -- select id bulk collect into id_arr from customers; + update customers set c_name = 'Carol' where id = 2 returning c_name bulk collect into name_arr; + dbe_output.print_line('updated name: ' || name_arr(1)); + select c_name bulk collect into name_arr from customers order by id; + for i in 1..name_arr.count loop + dbe_output.print_line('name(' || i || '): ' || name_arr(i)); + end loop; +rollback; +end; +/ + + +-- Scene3: fetch -- +-- error out, does not fetch direction statements -- +declare + cursor c_customers is select c_name from customers order by id; + type c_list is varray (6) of customers.c_name%type; + name_arr c_list := c_list(); +begin + open c_customers; + fetch all in c_customers bulk collect into name_arr; + exit when c_customers%NOTFOUND; + close c_customers; + + for i in 1..6 loop + dbe_output.print_line('name(' || i || '): ' || name_arr(i)); + end loop; +end; +/ + +-- fetch ... bulk collect into ... limit ... +declare + cursor c_customers is select c_name from customers order by id; + type c_list is varray (6) of customers.c_name%type; + name_arr c_list := c_list(); +begin + open c_customers; + fetch c_customers bulk collect into name_arr limit 4; + exit when c_customers%NOTFOUND; + close c_customers; + + for i in 1..6 loop + dbe_output.print_line('name(' || i || '): ' || name_arr(i)); + end loop; +end; +/ + +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- + +drop table if exists tmp; +drop table if exists biomebook; +drop table if exists bigtmp; +drop table if exists customers; +drop type if exists mytype cascade; + +-- clean up -- +drop schema if exists plpgsql_bulk_collect cascade; diff --git a/src/test/regress/sql/plpgsql_cursor_rowtype.sql b/src/test/regress/sql/plpgsql_cursor_rowtype.sql new file mode 100644 index 000000000..effcddd74 --- /dev/null +++ b/src/test/regress/sql/plpgsql_cursor_rowtype.sql @@ -0,0 +1,651 @@ +-- test cursor%type +-- check compatibility -- +-- create new schema -- +drop schema if exists plpgsql_cursor_rowtype; +create schema plpgsql_cursor_rowtype; +set current_schema = plpgsql_cursor_rowtype; +set behavior_compat_options='allow_procedure_compile_check'; + +create table emp (empno int, ename varchar(10), job varchar(10)); +insert into emp values (1, 'zhangsan', 'job1'); +insert into emp values (2, 'lisi', 'job2'); + +create or replace package pck1 is +vvv emp%rowtype; +cursor cur1 is +select * from emp where empno=vvv.empno and ename=vvv.ename; +emp_row cur1%rowtype; +procedure p1(); +end pck1; +/ + +create or replace package body pck1 is +procedure p1() is +a int; +begin +vvv.empno = 1; +vvv.ename = 'zhangsan'; +open cur1; +fetch cur1 into emp_row; +raise info '%', emp_row.job; +end; +end pck1; +/ + +call pck1.p1(); + +create or replace procedure pro_cursor_args +is + b varchar(10) := 'job1'; + cursor c_job + is + select empno,ename t + from emp + where job=b; + c_row c_job%rowtype; +begin + for c_row in c_job loop + raise info '%', c_row.t; + end loop; +end; +/ + +call pro_cursor_args(); + +create or replace procedure pro_cursor_no_args_1 +is + b varchar(10); + cursor c_job + is + select empno,ename t + from emp; + c_row c_job%rowtype; +begin + c_row.empno = 3; + raise info '%', c_row.empno; + for c_row in c_job loop + raise info '%', c_row.empno; + end loop; +end; +/ + +call pro_cursor_no_args_1(); + +-- test alias error +create or replace procedure pro_cursor_args +is + b varchar(10) := 'job1'; + cursor c_job + is + select empno,ename t + from emp + where job=b; + c_row c_job%rowtype; +begin + for c_row in c_job loop + raise info '%', c_row.ename; + end loop; +end; +/ + +call pro_cursor_args(); + +create or replace procedure pro_cursor_no_args_2 +is + b varchar(10); + cursor c_job + is + select empno,ename t + from emp; + c_row c_job%rowtype; +begin + open c_job; + fetch c_job into c_row; + raise info '%', c_row.empno; + fetch c_job into c_row; + raise info '%', c_row.empno; +end; +/ + +call pro_cursor_no_args_2(); + +create table test12(col1 varchar2,col2 varchar2); +insert into test12 values ('a', 'aa'); +insert into test12 values ('b', 'bb'); + +create or replace package pck2 is +cursor cur1 is select col1,col2 from test12; +var1 cur1%rowtype; +procedure pp1; +end pck2; +/ + +create or replace package body pck2 is +procedure pp1() is +cursor cur2 is +select col1,col2 from test12; +begin +var1.col1 = 'c'; +raise info '%', var1.col1; +open cur2; +fetch cur2 into var1; +raise info '%', var1.col1; +fetch cur2 into var1; +raise info '%', var1.col1; +end; +end pck2; +/ + +call pck2.pp1(); + +create or replace package pck3 is +cursor cur1 is select col1,col2 from test12; +var1 cur1%rowtype; +procedure ppp1; +procedure ppp2(a cur1%rowtype); +end pck3; +/ + +create or replace package body pck3 is +procedure ppp1() is +cursor cur2 is +select col1,col2 from test12; +begin +open cur2; +fetch cur2 into var1; +ppp2(var1); +raise info '%', var1.col1; +end; + +procedure ppp2(a cur1%rowtype) is +begin + raise info '%', a.col1; +end; +end pck3; +/ + +call pck3.ppp1(); + +create or replace package pck4 +is +v1 varchar2; +procedure proc1(a1 in v1%type); +end pck4; +/ + +create or replace package body pck4 +is +procedure proc1(a1 in v1%type) +is +begin +raise info '%', a1; +end; +end pck4; +/ + +call pck4.proc1('aa'); + +-- test cusor.col +create or replace package pck5 is +cursor cur1 is select col1,col2 from test12; +var1 cur1%rowtype; +var2 cur1.col1%type; +procedure ppppp1(a1 cur1.col1%type); +end pck5; +/ + +create or replace package body pck5 +is +procedure ppppp1(a1 cur1.col1%type) +is +begin +var2 = 2; +raise info '%', a1; +raise info '%', var2; +end; +end pck5; +/ + +call pck5.ppppp1(1); + +drop schema if exists schema1; +create schema schema1; +set search_path=schema1; +create table t11(a int, b varchar(10)); +insert into t11 values (1,'a'); + +set search_path=plpgsql_cursor_rowtype; + +create or replace procedure cursor1() +as +declare + c_b varchar(10); + cursor cur1 is select schema1.t11.* from schema1.t11 where b = c_b; + var1 cur1%rowtype; +begin + c_b = 'a'; + open cur1; + fetch cur1 into var1; + raise info '%', var1; + raise info '%', var1.a; +end; +/ + +call cursor1(); + +create or replace package pck6 is + c_b varchar(10); + cursor cur1 is select schema1.t11.* from schema1.t11 where b = c_b; + var1 cur1%rowtype; +procedure p2(); +end pck6; +/ + +create or replace package body pck6 +is +procedure p2() +is +begin + c_b = 'a'; + open cur1; + fetch cur1 into var1; + raise info '%', var1; + raise info '%', var1.a; +end; +end pck6; +/ + +call pck6.p2(); + +create table tb1 (c1 int,c2 varchar2); +insert into tb1 values(4,'a'); + +create or replace package pck7 as + cursor cur is select c1,c2 from tb1; + v_s cur%rowtype := (1,'1'); + function func1(c1 in cur%rowtype) return cur%rowtype; + procedure proc1(c1 out cur%rowtype); + procedure proc2(c1 inout cur%rowtype); +end pck7; +/ + +create or replace package body pck7 +is + function func1(c1 in cur%rowtype) return cur%rowtype + as + begin + return v_s; + end; + + procedure proc1 (c1 out cur%rowtype) + as + begin + c1 := (4,'d'); + end; + + procedure proc2(c1 inout cur%rowtype) + is + vs cur%rowtype := (2,'1'); + c2 cur%rowtype; + begin + c1 := func1(vs); + proc1(c2); + raise info '%', c2; + end; +end pck7; +/ + +call pck7.proc2(row(3,'c')); + +-- test duplicate column name +create or replace procedure pro_cursor_args +is + b varchar(10) := 'job1'; + cursor c_job + is + select empno,empno,ename + from emp + where job=b; + c_row c_job%rowtype; +begin + for c_row in c_job loop + raise info '%', c_row.empno; + end loop; +end; +/ + +call pro_cursor_args(); + +create or replace package pck8 is +cursor cur1 is select col2,col2 from test12; +procedure ppp1; +procedure ppp2(a cur1%rowtype); +end pck8; +/ + +insert into emp values (1, 'zhangsan', 'job3'); + +create or replace package pck8 is +vvv emp%rowtype; +cursor cur1 is +select empno,empno,job from emp where empno=vvv.empno and ename=vvv.ename; +emp_row cur1%rowtype; +procedure p1(); +end pck8; +/ + +create or replace package body pck8 is +procedure p1() is +a int; +begin +vvv.empno = 1; +vvv.ename = 'zhangsan'; +open cur1; +fetch cur1 into emp_row; +raise info '%', emp_row.job; +fetch cur1 into emp_row; +raise info '%', emp_row.job; +end; +end pck8; +/ + +call pck8.p1(); + +create or replace package pck9 is +vvv emp%rowtype; +cursor cur1 is +select empno,empno,job from emp where empno=vvv.empno and ename=vvv.ename; +emp_row record; +procedure p1(); +end pck9; +/ + +create or replace package body pck9 is +procedure p1() is +a int; +begin +vvv.empno = 1; +vvv.ename = 'zhangsan'; +open cur1; +fetch cur1 into emp_row; +raise info '%', emp_row.job; +fetch cur1 into emp_row; +raise info '%', emp_row.job; +end; +end pck9; +/ + +call pck9.p1(); + +create or replace package pck10 as + cursor cur is select c2,c2 from tb1; + function func1 return cur%rowtype; +end pck10; +/ + +create table FOR_LOOP_TEST_001( +deptno smallint, +ename char(100), +salary int +); + +create table FOR_LOOP_TEST_002( +deptno smallint, +ename char(100), +salary int +); + +insert into FOR_LOOP_TEST_001 values (10,'CLARK',7000),(10,'KING',8000),(10,'MILLER',12000),(20,'ADAMS',5000),(20,'FORD',4000); + +create or replace procedure test_forloop_001() +as +begin + for data in update FOR_LOOP_TEST_001 set salary=20000 where ename='CLARK' returning * loop + insert into FOR_LOOP_TEST_002 values(data.deptno,data.ename,data.salary); + end loop; +end; +/ + +call test_forloop_001(); +select * from FOR_LOOP_TEST_001; +select * from FOR_LOOP_TEST_002; + +--test execption close cursor +create or replace package pckg_test1 as +procedure p1; +end pckg_test1; +/ + +create or replace package body pckg_test1 as +procedure p1() is +a number; +begin +a := 2/0; +end; +end pckg_test1; +/ + +create or replace package pckg_test2 as +cursor CURRR is select * from FOR_LOOP_TEST_002; +curr_row CURRR%rowtype; +procedure p1; +end pckg_test2; +/ + +create or replace package body pckg_test2 as +procedure p1() is +a number; +begin +open CURRR; +fetch CURRR into curr_row; +raise info '%', curr_row; +pckg_test1.p1(); +exception +when others then +raise notice '%', '1111'; +close CURRR; +end; +end pckg_test2; +/ + +call pckg_test2.p1(); + +create or replace procedure pro_close_cursor1 +is + my_cursor REFCURSOR; + sql_stmt VARCHAR2(500); + curr_row record; +begin + sql_stmt := 'select * from FOR_LOOP_TEST_002'; + OPEN my_cursor FOR EXECUTE sql_stmt; + fetch my_cursor into curr_row; + raise info '%', curr_row; + pckg_test1.p1(); + exception + when others then + raise notice '%', '1111'; + close my_cursor; +end; +/ + +call pro_close_cursor1(); + +create or replace procedure pro_close_cursor2 +is + type cursor_type is ref cursor; + my_cursor cursor_type; + sql_stmt VARCHAR2(500); + curr_row record; +begin + sql_stmt := 'select * from FOR_LOOP_TEST_002'; + OPEN my_cursor FOR EXECUTE sql_stmt; + fetch my_cursor into curr_row; + raise info '%', curr_row; + pckg_test1.p1(); + exception + when others then + raise notice '%', '1111'; + close my_cursor; +end; +/ + +call pro_close_cursor2(); + +create table cs_trans_1(a int); +create or replace procedure pro_cs_trans_1() as +cursor c1 is select * from cs_trans_1 order by 1; +rec_1 cs_trans_1%rowtype; +va int; +begin +open c1; +va := 3/0; +exception +when division_by_zero then +close c1; +close c1; +end; +/ + +call pro_cs_trans_1(); + +create or replace procedure pro_cs_trans_1() as +cursor c1 is select * from cs_trans_1 order by 1; +rec_1 cs_trans_1%rowtype; +va int; +begin +open c1; +close c1; +va := 3/0; +exception +when division_by_zero then +close c1; +end; +/ + +call pro_cs_trans_1(); + +create or replace procedure pro_cs_trans_1() as +cursor c1 is select * from cs_trans_1 order by 1; +rec_1 cs_trans_1%rowtype; +va int; +begin +open c1; +close c1; +close c1; +va := 3/0; +close c1; +exception +when division_by_zero then +null; +when others then +raise info 'cursor alread closed'; +end; +/ + +call pro_cs_trans_1(); + +drop procedure pro_cs_trans_1; +drop table cs_trans_1; + +-- test for rec in select loop when rec is defined +set behavior_compat_options='proc_implicit_for_loop_variable'; +create table t1(a int, b int); +create table t2(a int, b int, c int); +insert into t1 values(1,1); +insert into t1 values(2,2); +insert into t1 values(3,3); +insert into t2 values(1,1,1); +insert into t2 values(2,2,2); +insert into t2 values(3,3,3); + +-- (a) definde as record +create or replace package pck_for is +type r1 is record(a int, b int); +temp_result t1; +procedure p1; +end pck_for; +/ +create or replace package body pck_for is +procedure p1 as +vb t1; +begin +for temp_result in select * from t2 loop +raise info '%', temp_result; + for temp_result in select * from t1 loop + raise info '%', temp_result; + end loop; +end loop; +raise info 'after loop: %', temp_result; +end; +end pck_for; +/ + +call pck_for.p1(); +drop package pck_for; + +-- (b) definde as scarlar +create or replace package pck_for is +temp_result int; +procedure p1; +end pck_for; +/ +create or replace package body pck_for is +procedure p1 as +vb t1; +begin +for temp_result in select * from t2 loop +raise info '%', temp_result; + for temp_result in select * from t1 loop + raise info '%', temp_result; + end loop; +end loop; +raise info 'after loop: %', temp_result; +end; +end pck_for; +/ + +call pck_for.p1(); +drop package pck_for; + +-- (c) select only one col +create or replace package pck_for is +temp_result int; +procedure p1; +end pck_for; +/ +create or replace package body pck_for is +procedure p1 as +vb t1; +begin +for temp_result in select c from t2 loop +raise info '%', temp_result; + for temp_result in select a from t1 loop + raise info '%', temp_result; + end loop; +end loop; +raise info 'after loop: %', temp_result; +end; +end pck_for; +/ + +call pck_for.p1(); +drop package pck_for; + +drop table t1; +drop table t2; +set behavior_compat_options=''; + +---- clean ---- +drop package pck1; +drop package pck2; +drop package pck3; +drop package pck4; +drop package pck5; +drop package pck6; +drop package pck7; +drop package pck8; +drop package pck9; +drop package pckg_test1; +drop package pckg_test2; +drop schema plpgsql_cursor_rowtype cascade; +drop schema schema1 cascade; diff --git a/src/test/regress/sql/plpgsql_inout_param.sql b/src/test/regress/sql/plpgsql_inout_param.sql new file mode 100644 index 000000000..0e24d7da6 --- /dev/null +++ b/src/test/regress/sql/plpgsql_inout_param.sql @@ -0,0 +1,1319 @@ +-- test create type table of +-- check compatibility -- +show sql_compatibility; -- expect A -- + +-- create new schema -- +drop schema if exists plpgsql_inout; +create schema plpgsql_inout; +set current_schema = plpgsql_inout; +set behavior_compat_options="proc_outparam_override"; +------------------------------------------------ +--------------------inout----------------------- +------------------------------------------------ +create or replace procedure proc1(a1 in out int, a2 in out int) +is +begin +a1 := a1 + 1; +a2 := a2 + 1; +end; +/ + +create or replace procedure proc2() +is +a1 int := 1; +a2 int := 2; +begin +raise info '%', a1; +proc1(a1, a2); +raise info 'a1:%', a1; +raise info 'a2:%', a2; +end; +/ + +call proc2(); + +create or replace procedure proc2() +is +a1 int := 1; +a2 int := 2; +begin +raise info '%', a1; +proc1(a1=>a1, a2=>a2); +raise info 'a1:%', a1; +raise info 'a2:%', a2; +end; +/ + +call proc2(); + +-- test table +create or replace procedure proc3() +is +type arr is table OF integer; +a2 arr; +a1 int; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(a2(1), a2(2)); +raise info 'a2:%', a2; +end; +/ + +call proc3(); + +create or replace procedure proc3() +is +type arr is table OF integer; +aa2 arr; +begin +aa2[1] = 2; +aa2[2] = 3; +proc1(a1=>aa2(1), a2=>aa2(2)); +raise info 'aa2:%', aa2; +end; +/ + +call proc3(); + +-- table nest error +create or replace procedure proc3() +is +type arr is table OF integer; +a2 arr; +a1 int; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(a2(1)(1), a2(2)); +raise info 'a2:%', a2; +end; +/ + +create or replace procedure proc3() +is +type arr is table OF integer; +a2 arr; +a1 int; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(a2[1], a2[2]); +raise info 'a2:%', a2; +end; +/ + +call proc3(); + +create or replace procedure proc3() +is +type arr is table OF integer; +aa2 arr; +aa1 int; +begin +aa2[1] = 2; +aa2[2] = 3; +aa1 := 1; +proc1(a1=>aa2[1], a2=>aa2[2]); +raise info 'aa2:%', aa2; +end; +/ + +call proc3(); + +-- test array +create or replace procedure proc4() +is +type arr is varray(10) OF integer; +a2 arr; +a1 int; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(a2(1), a2(2)); +raise info 'a2:%', a2; +end; +/ + +call proc4(); + +create or replace procedure proc4() +is +type arr is varray(10) OF integer; +a2 arr; +a1 int; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(a2(1), a2(2)); +raise info 'a2:%', a2; +end; +/ + +call proc4(); + +create or replace procedure proc4() +is +type arr is varray(10) OF integer; +a2 arr; +a1 int; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(a2[1], a2[2]); +raise info 'a2:%', a2; +end; +/ + +call proc4(); + +create or replace procedure proc4() +is +type arr is varray(10) OF integer; +aa2 arr; +aa1 int; +begin +aa2[1] = 2; +aa2[2] = 3; +aa1 := 1; +proc1(a1=>aa2[1], a2=>aa2[2]); +raise info 'aa2:%', aa2; +end; +/ + +call proc4(); + +create or replace procedure proc4() +is +a int[][] := ARRAY[ARRAY[1,2,3],ARRAY[4,5,6],ARRAY[7,8,9]]; +begin +a[1][2] = 2; +a[2][1] = 3; +proc1(a[2][3], a[3][2]); +raise info 'a2:%', a; +end; +/ + +call proc4(); + +create or replace procedure proc4() +is +a int[][] := ARRAY[ARRAY[1,2,3],ARRAY[4,5,6],ARRAY[7,8,9]]; +begin +a[1][2] = 2; +a[2][1] = 3; +proc1(a1=>a[2][3], a2=>a[3][2]); +raise info 'a2:%', a; +end; +/ + +call proc4(); + +create or replace procedure proc5(a1 in out int[]) +is +begin +a1[2] = 2; +end; +/ + +create or replace procedure proc6() +is +a1 int[]; +begin +a1[1] = 1; +proc5(a1); +raise info 'a1:%', a1; +end; +/ + +call proc6(); + +create or replace procedure proc6() +is +a1 int[]; +begin +a1[1] = 1; +proc5(a1=>a1); +raise info 'a1:%', a1; +end; +/ + +call proc6(); + +create or replace procedure proc7(a1 in out int[], a2 in out int) +is +begin +a1[2] = 2; +a2 = 2; +end; +/ + +create or replace procedure proc8() +is +a1 int[]; +a2 int; +begin +a1[1] = 1; +a2 = 1; +proc7(a1, a2); +raise info 'a1:%', a1; +raise info 'a2:%', a2; +end; +/ + +call proc8(); + +create or replace procedure proc8() +is +a1 int[]; +a2 int; +begin +a1[1] = 1; +a2 = 1; +proc7(a1=>a1, a2=>a2); +raise info 'a1:%', a1; +raise info 'a2:%', a2; +end; +/ + +call proc8(); + +create table tb_test(col1 int,col2 int,col3 int); +insert into tb_test values (1,1,1); +insert into tb_test values (2,2,2); + +-- test for rec +create or replace procedure proc9() +is +begin +for rec in (select col1,col2,col3 from tb_test) loop +proc1(rec.col1, rec.col2); +raise info 'col1:%', rec.col1; +raise info 'col2:%', rec.col2; +end loop; +end; +/ + +call proc9(); + +create or replace procedure proc9() +is +begin +for rec in (select col1,col2,col3 from tb_test) loop +proc1(a1=>rec.col1, a2=>rec.col2); +raise info 'col1:%', rec.col1; +raise info 'col2:%', rec.col2; +end loop; +end; +/ + +call proc9(); + +create or replace procedure proc10() +is +begin +for rec in (select col1,col2,col3 from tb_test) loop +proc1(rec.col1, rec.col2); +raise info 'col1:%', rec.col1; +end loop; +end; +/ + +call proc10(); + +create type info as (name varchar2(50), age int, address varchar2(20), salary float(2)); +-- test 1 out param +create or replace procedure proc12(a inout info) +is + +begin + a = ('Vera' ,32, 'Paris', 22999.00); +end; +/ + +-- test record +create or replace procedure proc11() +is + a info; +begin + proc12(a); + raise info '%', a; +end; +/ + +call proc11(); + +create or replace procedure proc11() +is + a info; +begin + proc12(a=>a); + raise info '%', a; +end; +/ + +call proc11(); + +-- test 2 out param +create or replace procedure proc20(a inout info, b inout int) +is + +begin + b = 1; + a = ('Vera' ,32, 'Paris', 22999.00); +end; +/ + +-- test record +create or replace procedure proc21() +is + a info; + b int; +begin + proc20(a,b); + raise info '%', a; + raise info '%', b; +end; +/ + +call proc21(); + +-- test record +create or replace procedure proc21() +is + a info; + b int; +begin + proc20(a=>a,b=>b); + raise info '%', a; + raise info '%', b; +end; +/ + +call proc21(); + +--test reord error +create or replace procedure proc11() +is + type r is record (name varchar2(50), age int, address varchar2(20), salary float(2)); + a r; +begin + a = ('Vera' ,33, 'Paris', 22999.00); + proc12(row(a)); + raise info '%', a; +end; +/ + +--test record nest +create or replace procedure proc12() +is + type r is varray(10) of info; + a r; + a2 int := 1; +begin + a[1] = ('Vera' ,33, 'Paris', 22999.00); + proc1(a[1].age, a2); + raise info '%', a; +end; +/ + +call proc12(); + +create or replace procedure proc12() +is + type r is varray(10) of info; + a r; + a2 int := 1; +begin + a[1] = ('Vera' ,33, 'Paris', 22999.00); + proc1(a1=>a[1].age, a2=>a2); + raise info '%', a; +end; +/ + +call proc12(); + +create or replace procedure proc12() +is + a info[][] := ARRAY[ARRAY[('',1,'',0), ('',2,'',0)],ARRAY[('',3,'',0), ('',4,'',0)]]; + a2 int := 1; +begin + proc1(a[1][2].age, a2); + raise info '%', a; +end; +/ + +call proc12(); + +create or replace procedure proc12() +is + a info[][] := ARRAY[ARRAY[('',1,'',0), ('',2,'',0)],ARRAY[('',3,'',0), ('',4,'',0)]]; + a2 int := 1; +begin + proc1(a1=>a[1][2].age, a2=>a2); + raise info '%', a; +end; +/ + +call proc12(); + +create type o1 as (a int, b int); +create type o2 as (a int, b o1); +create type o3 as (a int, b o2); + +create or replace procedure proc13() +is + a o2; + a2 int := 1; +begin + a.b.b = 1; + proc1(a.b.b, a2); + raise info '%', a; +end; +/ + +call proc13(); + +create or replace procedure proc13() +is + a o2; + a2 int := 1; +begin + a.b.b = 1; + proc1(a1=>a.b.b, a2=>a2); + raise info '%', a; +end; +/ + +call proc13(); + +create or replace procedure proc14() +is + a o3; + a2 int := 1; +begin + a.b.b.b = 1; + raise info '%', a; + proc1(a.b.b.b, a2); + raise info '%', a; +end; +/ + +call proc14(); + +create or replace procedure proc14() +is + a o3; + a2 int := 1; +begin + a.b.b.b = 1; + raise info '%', a; + proc1(a1=>a.b.b.b, a2=>a2); + raise info '%', a; +end; +/ + +call proc14(); + +create type customer as (id number(10), c_info info); +create table customers (id number(10), c_info info); + +insert into customers (id, c_info) values (1, ('Vera' ,32, 'Paris', 22999.00)); + +create or replace procedure proc15() +is +rec record; +begin +for rec in (select id, c_info from customers) loop +proc1(rec.c_info.id,1); +raise info '%', rec.c_info.id; +end loop; +end; +/ + +call proc15(); + +create or replace procedure proc15() +is +rec record; +begin +for rec in (select id, c_info from customers) loop +proc1(a1=>rec.c_info.id, a2=>1); +raise info '%', rec.c_info.id; +end loop; +end; +/ + +call proc15(); + +create or replace procedure proc16(a1 in out varchar) +is +begin +a1 := 'bbbb'; +end; +/ + +create or replace procedure proc17() +is +type arr is varray(10) OF varchar(10); +a arr; +begin +a[1] = 'aaa'; +proc16(a[1]); +raise info '%', a; +end; +/ + +call proc17(); + +create or replace procedure proc17() +is +type arr is varray(10) OF varchar(10); +a arr; +begin +a[1] = 'aaa'; +proc16(a1=>a[1]); +raise info '%', a; +end; +/ + +call proc17(); + +create or replace package pckg_test1 as +array_info info[][] := ARRAY[ARRAY[('',1,'',0), ('',2,'',0)],ARRAY[('',3,'',0), ('',4,'',0)]]; +array_int int[][] := ARRAY[ARRAY[1,2,3],ARRAY[4,5,6],ARRAY[7,8,9]]; +procedure pr_test(i_col1 inout int,i_col2 inout int); +procedure pr_test1(); +procedure pr_test2(); +procedure pr_test3(); +end pckg_test1; +/ + +create or replace package body pckg_test1 as +procedure pr_test(i_col1 inout int,i_col2 inout int)as +begin +i_col1 = i_col1+1; +i_col2 = i_col2+2; +end; + +procedure pr_test1()as +begin +for rec in (select col1,col2,col3 from tb_test) loop +raise info '%', rec.col2; +pr_test(rec.col1,rec.col2); +raise info '%', rec.col2; +end loop; +end; + +procedure pr_test2()as +a o2; +b o3; +begin +a.b.b = 1; +b.b.b.b = 1; +pr_test(a.b.b, b.b.b.b); +raise info '%', a; +raise info '%', b; + +pr_test(array_info[1][2].age,array_int[2][3]); +raise info '%',array_info; +raise info '%',array_int; + +end; + +procedure pr_test3()as +type arr is varray(10) OF integer; +a2 arr; +type tbl is table of integer; +a3 tbl; +begin +a2[1] = 1; +a3[2] = 1; +pr_test(a2[1],a3[2]); +raise info '%',a2; +raise info '%',a3; +end; +end pckg_test1; +/ + +call pckg_test1.pr_test1(); +call pckg_test1.pr_test2(); +call pckg_test1.pr_test3(); + +create or replace package body pckg_test1 as +procedure pr_test(i_col1 inout int,i_col2 inout int)as +begin +i_col1 = i_col1+1; +i_col2 = i_col2+2; +end; + +procedure pr_test1()as +begin +for rec in (select col1,col2,col3 from tb_test) loop +raise info '%', rec.col2; +pr_test(i_col1=>rec.col1, i_col2=>rec.col2); +raise info '%', rec.col2; +end loop; +end; + +procedure pr_test2()as +a o2; +b o3; +begin +a.b.b = 1; +b.b.b.b = 1; +pr_test(i_col1=>a.b.b, i_col2=>b.b.b.b); +raise info '%', a; +raise info '%', b; + +pr_test(i_col1=>array_info[1][2].age, i_col2=>array_int[2][3]); +raise info '%',array_info; +raise info '%',array_int; + +end; + +procedure pr_test3()as +type arr is varray(10) OF integer; +a2 arr; +type tbl is table of integer; +a3 tbl; +begin +a2[1] = 1; +a3[2] = 1; +pr_test(i_col1=>a2[1], i_col2=>a3[2]); +raise info '%',a2; +raise info '%',a3; +end; +end pckg_test1; +/ + +call pckg_test1.pr_test1(); +call pckg_test1.pr_test2(); +call pckg_test1.pr_test3(); + +create or replace procedure proc1(c1 out INT, c2 out INT) +is +begin +raise info '%', c1; +c1 := 10000; +c2 := 20000; +end; +/ +create or replace procedure proc3() +is +type arr is table OF INT; +a2 arr; +a1 INT; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(a2(1), a2(2)); +raise info 'a2:%', a2; +raise info 'a1:%', a1; +end; +/ + +call proc3(); + +create or replace procedure proc3() +is +type arr is table OF INT; +a2 arr; +a1 INT; +begin +a2[1] = 2; +a2[2] = 3; +a1 := 1; +proc1(c1=>a2(1), c2=>a2(2)); +raise info 'a2:%', a2; +raise info 'a1:%', a1; +end; +/ + +call proc3(); + +create or replace procedure proc1(a1 in BIGINT, a2 out BIGINT, a3 inout BIGINT) +is +begin +a1 := a1 + 10000; +a2 := a2 + 20000; +a3 := a3 + 30000; +end; +/ +create or replace procedure proc3() +is +type arr is table OF BIGINT; +a2 arr; +begin +a2[1] = 1; +a2[2] = 2; +a2[3] = 3; +proc1(a2(1), a2(2), a2(3)); +raise info 'a2:%', a2; +raise info 'a2:%', a2[1]; +raise info 'a2:%', a2[2]; +end; +/ + +call proc3(); + +create or replace procedure proc3() +is +type arr is table OF BIGINT; +c2 arr; +begin +c2[1] = 1; +c2[2] = 2; +c2[3] = 3; +proc1(a1=>c2(1), a2=>c2(2), a3=>c2(3)); +raise info 'a2:%', c2; +raise info 'a2:%', c2[1]; +raise info 'a2:%', c2[2]; +end; +/ + +call proc3(); + +create type t as (a int, b boolean); +-- complex type +create or replace procedure proc1(a1 in out t, a2 in out boolean) +is +begin +a1.a := a1.a + 1; +a1.b := false; +a2 := false; +end; +/ + +declare +a t; +b boolean; +begin +a.a = 1; +a.b = true; +b = true; +proc1(a,b); +raise info '%', a.a; +raise info '%', a.b; +raise info '%', b; +end; +/ + +declare +a t; +b boolean; +begin +a.a = 1; +a.b = true; +b = true; +proc1(a1=>a,a2=>b); +raise info '%', a.a; +raise info '%', a.b; +raise info '%', b; +end; +/ + +------------------------------------------------ +---------------------out------------------------ +------------------------------------------------ +create or replace procedure proc1(a1 out INT) +is +begin +a1 := 10000; +raise info 'a1:%', a1; +end; +/ + +create or replace procedure proc3() +is +type arr is table OF INT; +a2 arr; +a1 int := 2; +begin +a2[1] = 1; +proc1(a2(1)); +raise info 'a2:%', a2; +proc1(a1); +raise info 'a1:%', a1; +end; +/ + +call proc3(); + +create or replace procedure proc3() +is +type arr is table OF INT; +a2 arr; +a1 int := 2; +begin +a2[1] = 1; +proc1(a1=>a2(1)); +raise info 'a2:%', a2; +proc1(a1=>a1); +raise info 'a1:%', a1; +end; +/ + +call proc3(); + +create or replace procedure proc3() +is +type arr is varray(10) OF INT; +a2 arr; +a1 int := 2; +begin +a2[1] = 1; +proc1(a2(1)); +raise info 'a2:%', a2; +proc1(a1); +raise info 'a1:%', a1; +end; +/ + +call proc3(); + +create or replace procedure proc1(a1 out t) +is +begin +a1.a := 1; +a1.b := false; +end; +/ + +declare +a t; +begin +proc1(a); +raise info '%', a; +end; +/ + +declare +a t; +begin +proc1(a1=>a); +raise info '%', a; +end; +/ + + +create or replace procedure proc1(a1 out t, a2 out boolean) +is +begin +a1.a := 1; +a1.b := false; +a2 := false; +end; +/ + +declare +a t; +b boolean; +begin +proc1(a,b); +raise info '%', a; +raise info '%', b; +end; +/ + +declare +a t; +b boolean; +begin +proc1(a1=>a,a2=>b); +raise info '%', a; +raise info '%', b; +end; +/ + +create type t1 is table of int; +create or replace procedure p1(c1 in int, c2 out t1) +is +a int; +begin +a := c1; +raise info '%',a; +c2(1) := 1; +c2(2) := 2; +return; +end; +/ + +create or replace procedure p2() +is +a t1; +begin +p1(c1=>'12',c2=>a); +raise info '%',a; +end; +/ + +call p2(); + +create or replace package pck6 is +type tp_2 is record(v01 number, v03 varchar2, v02 number); +end pck6; +/ + +create or replace package pck5 is +type tp_1 is record(v01 number, v03 varchar2, v02 number); +procedure pp11(v01 out tp_1); +procedure pp11(v121 out number,v122 out pck6.tp_2); +end pck5; +/ + +create or replace package body pck5 is +procedure pp11(v01 out tp_1) is +v122 pck6.tp_2; +begin +pp11(v121 => v01.v01, v122 => v122); +raise notice 'v01 : %', v01.v01; +end; +procedure pp11(v121 out number,v122 out pck6.tp_2) is +v_id1 varchar2; +begin +select id1 into v_id1 from test_tb1 limit 1; +raise notice '%', v_id1; +v121 := 12; +EXCEPTION +when no_data_found then +raise notice 'no data found: %', v121||SQLERRM; +v121 :=1; +WHEN others then +raise notice 'others :%', v121||SQLERRM; +v121 := 2; +end; +end pck5; +/ + +create or replace function fun1 return number as +v01 pck5.tp_1; +begin +pck5.pp11(v01); +return 0; +end; +/ + +select fun1(); + +create or replace package body pck5 is +procedure pp11(v01 out tp_1) is +v122 pck6.tp_2; +begin +pp11(v01.v01, v122); +raise notice 'v01 : %', v01.v01; +end; +procedure pp11(v121 out number,v122 out pck6.tp_2) is +v_id1 varchar2; +begin +select id1 into v_id1 from test_tb1 limit 1; +raise notice '%', v_id1; +v121 := 12; +EXCEPTION +when no_data_found then +raise notice 'no data found: %', v121||SQLERRM; +v121 :=1; +WHEN others then +raise notice 'others :%', v121||SQLERRM; +v121 := 2; +end; +end pck5; +/ + +select fun1(); + +create or replace package pck7 is +type t_out is record(retcode number, + errcode number, + eerm varchar2(4000), + sqlcode varchar2(100), + sqlerrm varchar2(4000) + ); +success constant number(1) = 0; +fail constant number(1) = 1; +end pck7; +/ + +create or replace package pck8 is +v_out pck7.t_out; +procedure pp11(in_groupno in varchar2, + in_workdate in varchar2, + o_retcode out number); +procedure pp11(in_groupno in varchar2, + in_workdate in varchar2, + o_base out pck7.t_out); +end pck8; +/ + +create or replace package body pck8 is +procedure pp11(in_groupno in varchar2, + in_workdate in varchar2, + o_retcode out number + ) is +v_out pck7.t_out; +begin +pp11(in_groupno=>in_groupno, + in_workdate => in_workdate, + o_base => v_out); +raise notice 'v_out : %', v_out; +end; +procedure pp11(in_groupno in varchar2, + in_workdate in varchar2, + o_base out pck7.t_out)is +v_id1 varchar2; +begin +o_base := (1,1,'a','b','c'); +--o_base.retcode := 2; +end; +end pck8; +/ + +declare +va number; +begin +pck8.pp11('a','b',va); +end; +/ + +drop package pck5; +drop package pck6; +drop package pck7; +drop package pck8; +drop package pckg_test1; +drop package pckg_test2; + +create or replace procedure pp1(va int, vb int, vc out int) as +begin +null; +end; +/ +declare +v1 int; +begin +pp1(vd=>v1, va=>v1, vb=>v1); +end; +/ + +drop procedure pp1; + +-- two out param, one is valid (should error) +drop package if exists pck1; +create or replace package pck1 is +procedure p1(a out varchar2,b out int); +end pck1; +/ +create or replace package body pck1 is +procedure p1(a out varchar2,b out int) is +begin +b:=1; +a:='a'||b; +end; +end pck1; +/ + +declare +var varchar2; +begin +pck1.p1(var,1); +raise info 'var:%',var; +end; +/ + +drop package pck1; +-- two out param, one is valid, overload situation (should error) +create or replace package pkg070 +is +type type000 is record (c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob); +type type001 is table of integer index by integer; +procedure proc070_1(col2 out type001,col3 out int,col4 out type001,col5 out int); +procedure proc070_1(col4 out type001,col5 out int); +procedure proc070_2(); +end pkg070; +/ + +create or replace package body pkg070 +is +procedure proc070_1(col2 out type001,col3 out int,col4 out type001,col5 out int) +is +col1 type001; +begin +col2(1):=3; +col2(2):=4; +col3:=col2.count; +col4(2):=44; +col4(6):=55; +col5:=col4.count; +end; +procedure proc070_1(col4 out type001,col5 out int) +is +begin +col4(1):=4; +col4(2):=44; + --col4(3):=444; +col5:=col4.count; +raise info '2 parameter col5 is %',col5; +end; +procedure proc070_2() +is +tbcor1 type001; +tbcor2 type001; +begin +tbcor1(1):=1; +tbcor1(3):=3; +tbcor2(2):=2; +tbcor2(3):=23; +--proc070_1(tbcor1,tbcor1.count,tbcor2,tbcor2.count); +raise info 'tbcor1 is %',tbcor1; +raise info 'tbcor1.count is %',tbcor1.count; +raise info 'tbcor2 is %',tbcor2; +raise info 'tbcor2.count is %',tbcor2.count; +proc070_1(tbcor2,tbcor2.count); +raise info 'tbcor2 is %',tbcor2; +raise info 'tbcor2.count is %',tbcor2.count; +--raise info 'tbcor2.first is %',tbcor2.first; +end; +end pkg070; +/ + +call pkg070.proc070_2(); + +drop package pkg070; +-- two out param, one is valid, => situation (should error) +create or replace procedure pp1(a out int, b out int) as +begin +a := 1; +b := 1; +end; +/ + +declare +var1 int; +begin +pp1(a=>var1,b=>3); +end; +/ + +drop procedure pp1; + +-- test one in row, one out shcalar +-- 1 +drop package if exists pck1; +create or replace package pck1 is +type tp_1 is record(v01 number, v03 varchar2, v02 number); +type tp_2 is varray(10) of int; +procedure p1(a tp_1,b out varchar2); +procedure p1(a2 tp_2, b2 out varchar2); +end pck1; +/ + +create or replace package body pck1 is +procedure p1(a tp_1,b out varchar2) is +begin +b:=a.v01; +raise info 'b:%',b; +end; +procedure p1(a2 tp_2, b2 out varchar2) is +begin +b2:=a2(2); +raise info 'b2:%',b2; +end; +end pck1; +/ + +declare +var1 pck1.tp_1:=(2,'a',3); +var2 pck1.tp_2:=array[1,3]; +var varchar2; +begin +pck1.p1(var1,var); +raise info 'var:%', var; +end; +/ + +drop package if exists pck1; + +-- 2. +drop package if exists pck1; +create or replace package pck1 is +type tp_1 is record(v01 number, v03 varchar2, v02 number); +type tp_2 is record(v01 tp_1, v03 varchar2, v02 number); +procedure p1(a tp_1,b out int); +procedure p1(a2 in tp_2,b2 out int); +end pck1; +/ + +create or replace package body pck1 is +procedure p1(a tp_1,b out int) is +begin +b:=a.v02; +raise info 'b:%',b; +end; +procedure p1(a2 in tp_2,b2 out int) is +begin +b2:=a2.v01.v01; +raise info 'b2:%',b2; +end; +end pck1; +/ + +declare +var1 pck1.tp_1:=(1,'bb',3); +var2 pck1.tp_2:=((2,'aa',4),'c',5); +var int; +varr int; +begin +pck1.p1(var1,var); +pck1.p1(var2,varr); +raise info 'var:%',var; +end; +/ +drop package if exists pck1; + +--3. +drop table if exists tb_test; +create table tb_test(c1 int, c2 varchar2); +drop package if exists pck1; +create or replace package pck1 is +type tp_1 is record(v01 number, v03 varchar2, v02 number); +procedure p1(in a tb_test%rowtype,out b tp_1); +procedure p1(out a tp_1,in b tb_test%rowtype); +end pck1; +/ + +create or replace package body pck1 is +procedure p1(in a tb_test%rowtype,out b tp_1) is +begin +b.v01:=a.c1; +b.v03:=a.c2; +end; +procedure p1(out a tp_1,in b tb_test%rowtype) is +begin +a.v01:=b.c1+1; +a.v03:=b.c2; +end; +end pck1; +/ +declare +var1 pck1.tp_1; +var2 tb_test%rowtype:=(1,'a'); +var3 pck1.tp_1; +begin +pck1.p1(a=>var2,b=>var1); +raise info 'var1:%',var1; +pck1.p1(a=>var3,b=>var2); +raise info 'var3:%',var3; +end; +/ + +drop package pck1; +drop table tb_test; + +-- clean +drop schema if exists plpgsql_inout cascade; \ No newline at end of file diff --git a/src/test/regress/sql/plpgsql_insert_record.sql b/src/test/regress/sql/plpgsql_insert_record.sql new file mode 100644 index 000000000..ddcc4e91e --- /dev/null +++ b/src/test/regress/sql/plpgsql_insert_record.sql @@ -0,0 +1,196 @@ +-- test insert into table values record +-- check compatibility -- +show sql_compatibility; -- expect A -- + +-- create new schema -- +drop schema if exists plpgsql_table; +create schema plpgsql_table; +set current_schema = plpgsql_table; +set behavior_compat_options=''; +create table record_cursor_tbl(result varchar2(10), mod number); +insert into record_cursor_tbl values('a',2); + +create or replace procedure record_cursor_p1 +as +begin + for rec in (select a.mod || a.result, a.* from record_cursor_tbl a) loop + insert into record_cursor_tbl values(rec); + null; + end loop; +end; +/ +call record_cursor_p1(); +drop procedure record_cursor_p1; +drop table record_cursor_tbl; + +set behavior_compat_options='allow_procedure_compile_check'; +create table plpgsql_table.insert_table(a int, b int); +create table plpgsql_table.insert_table2(a int, b int); +create type plpgsql_table.ComType as (a int, b int); + +-- normal insert record type. +create or replace function testInsertRecord() RETURNS int as $$ +declare +TYPE RR1 is record(a int, b int); +r RR1; +r1 ComType; +TYPE AA1 is varray(100) of RR1; +TYPE AA2 is varray(100) of ComType; +TYPE TT1 is table of RR1; +TYPE TT2 is table of ComType; +a1 AA1; +a2 AA2; +t1 TT1; +t2 TT2; +begin +r = (1,1); +r1 = (1,1); +insert into insert_table values r; +insert into insert_table values r1; +insert into insert_table values(r.a, r.b); + +a1[0] = (2,2); +a1[1] = (3,3); +insert into insert_table values a1[0]; +insert into insert_table values a1(1); + +a2[0] = (4,4); +a2[1] = (5,5); +insert into insert_table values a2[0]; +insert into insert_table values a2(1); + +t1(0) = (6,6); +t1(1) = (7,7); +insert into insert_table values t1[0]; +insert into insert_table values t1(1); + +t2(0) = (8,8); +t2(1) = (9,9); +insert into insert_table values t2[0]; +insert into insert_table values t2(1); + +return 1; +end; +$$ language plpgsql; + +-- insert unsupport type variable. +create or replace function testInsertRecordError1() RETURNS int as $$ +declare +i int; +begin +i = 1; +insert into insert_table values i; +return 1; +end; +$$ language plpgsql; + +create or replace function testInsertRecordError2() RETURNS int as $$ +declare +TYPE RR1 is record(a int, b int); +r RR1; +i int; +begin +r = (1,1); +i = 1; +insert into insert_table values(1,1) r; +return 1; +end; +$$ language plpgsql; + +create or replace function testInsertRecordError3() RETURNS int as $$ +declare +TYPE RR1 is record(a int, b int); +r RR1; +r1 RR1;; +begin +r = (1,1); +r1 = (2,2); +insert into insert_table values r, r1; +return 1; +end; +$$ language plpgsql; + +create or replace function testInsertRecordError4() RETURNS int as $$ +declare +TYPE RR1 is record(a int, b int); +TYPE AA1 is varray(100) of RR1; +a1 AA1; +begin +a1[0] = (1,1); +a1[1] = (2,2); +insert into insert_table values a1; +return 1; +end; +$$ language plpgsql; + +create or replace function testInsertRecordError5() RETURNS int as $$ +declare +TYPE RR1 is record(a int, b int); +TYPE AA1 is table of RR1; +a1 AA1; +begin +a1[0] = (1,1); +a1[1] = (2,2); +insert into insert_table values a1; +return 1; +end; +$$ language plpgsql; + + +create or replace function testInsertRecordError6() RETURNS int as $$ +declare +TYPE RR1 is record(a int, b int); +TYPE AA1 is table of RR1; +a1 AA1; +begin +a1[0] = (1,1); +a1[1] = (2,2); +insert into insert_table values a1[0], a1[1]; +return 1; +end; +$$ language plpgsql; + +select testInsertRecord(); +select testInsertRecordError4(); +select testInsertRecordError5(); + +create or replace function testForInsertRec() RETURNS int as $$ +declare +begin +for rec in (select a, b from insert_table) loop +insert into insert_table2 values rec; +end loop; +return 1; +end; +$$ language plpgsql; + +create or replace function testForInsertRecError1() RETURNS int as $$ +declare +begin +for rec in (select a, b, 1 from insert_table) loop +insert into insert_table2 values rec; +end loop; +return 1; +end; +$$ language plpgsql; + +select testForInsertRec(); +select testForInsertRecError1(); + +select * from insert_table; +select * from insert_table2; + +reset behavior_compat_options; +drop table insert_table; +drop table insert_table2; +drop type ComType; +drop function testInsertRecord; +drop function testInsertRecordError1; +drop function testInsertRecordError2; +drop function testInsertRecordError3; +drop function testInsertRecordError4; +drop function testInsertRecordError5; +drop function testInsertRecordError6; +drop function testForInsertRec; +drop function testForInsertRecError1; +drop schema if exists plpgsql_table; diff --git a/src/test/regress/sql/plpgsql_multiset.sql b/src/test/regress/sql/plpgsql_multiset.sql new file mode 100644 index 000000000..0324dca8a --- /dev/null +++ b/src/test/regress/sql/plpgsql_multiset.sql @@ -0,0 +1,559 @@ +-- check compatibility -- +show sql_compatibility; -- expect A -- +drop schema if exists plpgsql_multiset; +create schema plpgsql_multiset; +set current_schema = plpgsql_multiset; + +create type m_type as ( + id integer, + name varchar, + addr text +); + +create type m_type1 as ( + id integer[], + name varchar, + addr text +); + +----------------------------------------------------- +------------------ multiset union ------------------- +----------------------------------------------------- +-- test index by error +declare + TYPE SalTabTyp is TABLE OF integer index by integer; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(0) = 1; + aa(2) = 2; + bb(0) = 2; + bb(1) = NULL; + aa = aa multiset union bb; + RAISE INFO '%', aa.count; +end; +/ + +-- test base type +declare + TYPE SalTabTyp is TABLE OF integer; + aa SalTabTyp; + bb SalTabTyp; + begin + aa(0) = 1; + aa(2) = 2; + bb(0) = 2; + bb(1) = NULL; + aa = aa multiset union bb; + RAISE INFO '%', aa; +end; +/ + +-- test different type +declare + TYPE SalTabTyp is TABLE OF integer; + aa SalTabTyp; + TYPE SalTabTyp1 is TABLE OF varchar(10); + bb SalTabTyp1; + begin + aa(0) = 1; + aa(2) = 2; + bb(0) = 'aa'; + bb(1) = NULL; + aa = aa multiset union bb; + RAISE INFO '%', aa; +end; +/ + +declare + TYPE SalTabTyp is TABLE OF varchar(10); + aa SalTabTyp; + TYPE SalTabTyp1 is TABLE OF integer; + bb SalTabTyp1; + begin + bb(0) = 1; + bb(2) = 2; + aa(0) = 'aa'; + aa(1) = NULL; + aa = aa multiset union bb; + RAISE INFO '%', aa; +end; +/ + +declare +TYPE tabint is TABLE OF integer; +bint1 tabint ; +bint2 tabint ; +begin +bint1(2) = 2; +bint1(3) = null; +bint1(4) = 4; +bint2(-1) = null; +bint2(0) = 0; +bint2(1) = null; +bint2(5) = 1; +bint2 = bint1 multiset union bint2; +RAISE INFO '%,%,%,%', bint2,bint2.first,bint2.last,bint2.count; +RAISE INFO '1:%', bint2(1); +RAISE INFO '2:%', bint2(2); +RAISE INFO '3:%', bint2(3); +RAISE INFO '4:%', bint2(4); +RAISE INFO '5:%', bint2(5); +end; +/ + +declare +TYPE tabint is TABLE OF integer; +bint1 tabint ; +bint2 tabint ; +begin +bint1(-1) = 2; +bint1(0) = 4; +bint2(4) = 3; +bint2(6) = 1; +bint2(5) = 0; +bint2 = bint1 multiset union bint2; +RAISE INFO '%,%,%,%', bint2,bint2.first,bint2.last,bint2.count; +RAISE INFO '1:%', bint2(1); +RAISE INFO '2:%', bint2(2); +RAISE INFO '3:%', bint2(3); +RAISE INFO '4:%', bint2(4); +RAISE INFO '5:%', bint2(5); +end; +/ + +-- test left null right non-null +declare + TYPE SalTabTyp is TABLE OF integer; + aa SalTabTyp; + bb SalTabTyp; + begin + bb(0) = 1; + bb(2) = 2; + aa = aa multiset union bb; + RAISE INFO '%', aa; +end; +/ + +-- test varchar +declare + TYPE SalTabTyp is TABLE OF varchar(10); + aa SalTabTyp; + bb SalTabTyp; + begin + aa(1) = 'abcde'; + aa(2) = 'mgssq'; + bb(1) = 'zxcvb'; + bb(2) = 'abcde'; + aa = aa multiset union distinct bb; + RAISE INFO '%', aa; +end; +/ + +-- test int[] error +declare + TYPE SalTabTyp is TABLE OF int[]; + aa SalTabTyp; + bb SalTabTyp; + begin + aa = aa multiset union distinct bb; + RAISE INFO '%', aa; +end; +/ + +-- distinct base type +declare + TYPE SalTabTyp is TABLE OF integer; + aa SalTabTyp; + bb SalTabTyp; + begin + aa(0) = 1; + aa(2) = 2; + bb(0) = 2; + bb(1) = NULL; + aa = aa multiset union distinct bb; + RAISE INFO '%', aa; +end; +/ + +-- test array +declare + TYPE SalTabTyp is TABLE OF m_type1; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + aa(1) = (ARRAY[1,2], 'lisi', 'beijing'); + bb(1) = (ARRAY[1,2], 'lisi', 'beijing'); + bb(2) = (ARRAY[2,1], 'lisi', 'beijing'); + cc = aa multiset union distinct bb; + RAISE INFO '%', cc; +end; +/ + +-- test left non-null right null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + aa(1) = (1, 'lisi', 'beijing'); + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (1, 'lisi', 'beijing'); + cc = aa multiset union distinct bb; + RAISE INFO '%', cc; + cc(2) = (3, 'lisi', 'beijing'); + RAISE INFO '%', cc; +end; +/ + +-- test left null right non-null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + bb(1) = (1, 'lisi', 'beijing'); + bb(2) = (2, 'lisi', 'beijing'); + bb(3) = (1, 'lisi', 'beijing'); + cc = aa multiset union distinct bb; + RAISE INFO '%', cc; +end; +/ + +-- test both null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + begin + aa = aa multiset union distinct bb; + RAISE INFO '%', aa; + aa(1) = (1, 'lisi', 'beijing'); + RAISE INFO '%', aa; +end; +/ + +-- test both non-null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + bb(0) = (3, 'lisi', 'beijing'); + bb(1) = (4, 'lisi', 'beijing'); + bb(2) = NULL; + aa = aa multiset union distinct bb; + RAISE INFO '%', aa; +end; +/ + +----------------------------------------------------- +---------------- multiset intersect ----------------- +----------------------------------------------------- +declare + TYPE SalTabTyp is TABLE OF integer; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(0) = 1; + aa(2) = 2; + bb(0) = 2; + bb(1) = NULL; + aa = aa multiset intersect bb; + RAISE INFO '%', aa; +end; +/ + +-- test left non-null right null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + aa(1) = (1, 'lisi', 'beijing'); + cc = aa multiset intersect bb; + RAISE INFO '%', cc; + cc(2) = (2, 'lisi', 'beijing'); + RAISE INFO '%', cc; +end; +/ + +-- test left null right non-null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + bb(1) = (1, 'lisi', 'beijing'); + cc = aa multiset intersect bb; + RAISE INFO '%', cc; + cc(2) = (2, 'lisi', 'beijing'); + RAISE INFO '%', cc; +end; +/ + +-- test both null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + begin + aa = aa multiset intersect bb; + RAISE INFO '%', aa; + aa(1) = (1, 'lisi', 'beijing'); + RAISE INFO '%', aa; +end; +/ + +-- test both non-null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + bb(0) = (3, 'lisi', 'beijing'); + bb(1) = (4, 'lisi', 'beijing'); + aa = aa multiset intersect bb; + RAISE INFO '%', aa; +end; +/ + +-- test both non-null left 2 same value +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + aa(4) = (3, 'lisi', 'beijing'); + aa(5) = (3, 'lisi', 'beijing'); + aa(6) = (4, 'lisi', 'beijing'); + + bb(0) = (3, 'lisi', 'beijing'); + bb(2) = (3, 'lisi', 'beijing'); + bb(1) = (4, 'lisi', 'beijing'); + aa = aa multiset intersect bb; + RAISE INFO '%', aa; +end; +/ + +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + aa(4) = (3, 'lisi', 'beijing'); + aa(6) = (4, 'lisi', 'beijing'); + + bb(0) = (3, 'lisi', 'beijing'); + bb(2) = (3, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + bb(1) = (4, 'lisi', 'beijing'); + aa = aa multiset intersect bb; + RAISE INFO '%', aa; +end; +/ + +-- test both non-null right 2 same value +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + bb(0) = (3, 'lisi', 'beijing'); + bb(1) = (3, 'lisi', 'beijing'); + bb(2) = (4, 'lisi', 'beijing'); + aa = aa multiset intersect bb; + RAISE INFO '%', aa; +end; +/ + +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + aa(4) = (3, 'lisi', 'beijing'); + aa(5) = NULL; + bb(0) = (3, 'lisi', 'beijing'); + bb(1) = (3, 'lisi', 'beijing'); + bb(2) = (4, 'lisi', 'beijing'); + bb(3) = NULL; + bb(4) = NULL; + aa = aa multiset intersect bb; + RAISE INFO '%', aa; +end; +/ + +-- test multiset intersect distinct +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + aa(4) = (3, 'lisi', 'beijing'); + aa(5) = NULL; + bb(0) = (3, 'lisi', 'beijing'); + bb(1) = (3, 'lisi', 'beijing'); + bb(2) = (4, 'lisi', 'beijing'); + bb(3) = NULL; + bb(4) = NULL; + aa = aa multiset intersect distinct bb; + RAISE INFO '%', aa; +end; +/ +----------------------------------------------------- +---------------- multiset except -------------------- +----------------------------------------------------- +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + aa(4) = (3, 'lisi', 'beijing'); + aa(5) = NULL; + bb(0) = (3, 'lisi', 'beijing'); + bb(1) = (3, 'lisi', 'beijing'); + bb(2) = (4, 'lisi', 'beijing'); + bb(3) = NULL; + aa = aa multiset except bb; + RAISE INFO '%', aa; +end; +/ + +-- test multiset except distinct +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + a integer; + begin + aa(1) = NULL; + aa(2) = (2, 'lisi', 'beijing'); + aa(3) = (3, 'lisi', 'beijing'); + aa(4) = (3, 'lisi', 'beijing'); + aa(5) = NULL; + bb(0) = (3, 'lisi', 'beijing'); + bb(2) = (4, 'lisi', 'beijing'); + bb(3) = NULL; + aa = aa multiset except distinct bb; + RAISE INFO '%', aa; +end; +/ + +-- test left non-null right null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + aa(1) = (1, 'lisi', 'beijing'); + aa(2) = (1, 'lisi', 'beijing'); + cc = aa multiset except bb; + RAISE INFO '%', cc; + cc(2) = (2, 'lisi', 'beijing'); + RAISE INFO '%', cc; +end; +/ + +-- test left non-null right null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + aa(1) = (1, 'lisi', 'beijing'); + aa(2) = (1, 'lisi', 'beijing'); + cc = aa multiset except distinct bb; + RAISE INFO '%', cc; + cc(2) = (2, 'lisi', 'beijing'); + RAISE INFO '%', cc; +end; +/ + +-- test left null right non-null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + bb(1) = (1, 'lisi', 'beijing'); + cc = aa multiset except distinct bb; + RAISE INFO '%', cc; + cc(2) = (2, 'lisi', 'beijing'); + RAISE INFO '%', cc; +end; +/ + +-- test left null right non-null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + cc SalTabTyp; + begin + bb(1) = (1, 'lisi', 'beijing'); + cc = aa multiset except bb; + RAISE INFO '%', cc; + cc(2) = (2, 'lisi', 'beijing'); + RAISE INFO '%', cc; +end; +/ + +-- test both null +declare + TYPE SalTabTyp is TABLE OF m_type; + aa SalTabTyp; + bb SalTabTyp; + begin + aa = aa multiset except bb; + RAISE INFO '%', aa; + aa(1) = (1, 'lisi', 'beijing'); + RAISE INFO '%', aa; +end; +/ + +drop type m_type; +drop type m_type1; +drop schema if exists plpgsql_multiset cascade; \ No newline at end of file diff --git a/src/test/regress/sql/plpgsql_nest_compile.sql b/src/test/regress/sql/plpgsql_nest_compile.sql new file mode 100644 index 000000000..5f819b6a0 --- /dev/null +++ b/src/test/regress/sql/plpgsql_nest_compile.sql @@ -0,0 +1,156 @@ +-- test create type table of +-- check compatibility -- +show sql_compatibility; -- expect A -- + +-- create new schema -- +drop schema if exists plpgsql_nest_compile; +create schema plpgsql_nest_compile; +set current_schema = plpgsql_nest_compile; + +-- test insert into select array +create table tb1 (a varchar(10), b varchar(10), c varchar(10)); + +create or replace procedure proc1() +as +declare +type arra is varray(10) of varchar; +a arra; +b arra; +begin +a(1) := 'a'; +a(2) := 'b'; +a(3) := 'c'; +a(4) := 'd'; +a(5) := 'e'; +a(6) := 'f'; +insert into tb1 select a(1),a(2),a(3); +insert into tb1 select a(4),a(5),a(6); +end; +/ + +call proc1(); + +select * from tb1; + +create or replace procedure proc2() +as +declare +type arra is table of varchar; +a arra; +b arra; +begin +a(1) := 'a1'; +a(2) := 'b1'; +a(3) := 'c1'; +a(4) := 'd1'; +a(5) := 'e1'; +a(6) := 'f1'; +insert into tb1 select a(1),a(2),a(3); +insert into tb1 select a(4),a(5),a(6); +end; +/ + +call proc2(); + +select * from tb1; + +create or replace package pck1 as +procedure proc1(c out varchar2); +end pck1; +/ + +create or replace package body pck1 as +procedure proc1(c out varchar2) +as +declare +type arra is table of varchar; +a arra; +b arra; +begin +a(1) := 'a2'; +a(2) := 'b2'; +a(3) := 'c2'; +a(4) := 'd2'; +a(5) := 'e2'; +a(6) := 'f2'; +insert into tb1 select a(1),a(2),a(3); +insert into tb1 select a(4),a(5),a(6); +end; +end pck1; +/ + +select * from pck1.proc1(); + +select * from tb1; + +drop package pck1; + +-- test for loop +create table emp( +deptno smallint, +ename char(100), +salary int +); + +create table emp_back( +deptno smallint, +ename char(100), +salary int +); + +insert into emp values (10,'CLARK',7000),(10,'KING',8000),(10,'MILLER',12000),(20,'ADAMS',5000),(20,'FORD',4000); + +create or replace PROCEDURE test_forloop_001() +As +begin +for data in delete from emp returning * loop +insert into emp_back values(data.deptno,data.ename,data.salary); +end loop; +end; +/ + +call test_forloop_001(); +select * from emp; +select * from emp_back; +create or replace package pack0 is + function f1(ss in int) return int; +end pack0; +/ +create or replace package body pack0 is +function f1(ss in int) return int as + va int; +begin + va := ss; + return va; +end; +end pack0; +/ +create or replace package pack01 is +procedure main(); +end pack01; +/ +create or replace package body pack01 is +xx number:=dbe_sql.register_context(); +yy1 int:=pack0.f1(1); +procedure main() is +yy int; +begin +yy :=pack0.f1(1); +end; +end pack01; +/ + +create or replace package body pack01 is +xx number:=dbe_sql.register_context(); +yy1 int:=pack0.f1(1); +procedure main() is +yy int; +begin +yy :=pack0.f1(1); +end; +end pack01; +/ +drop package pack01; +drop package pack0; + +drop schema if exists plpgsql_nest_compile cascade; diff --git a/src/test/regress/sql/plpgsql_override_out.sql b/src/test/regress/sql/plpgsql_override_out.sql new file mode 100644 index 000000000..174092d23 --- /dev/null +++ b/src/test/regress/sql/plpgsql_override_out.sql @@ -0,0 +1,446 @@ +-- test plsql's out param override +-- check compatibility -- +show sql_compatibility; -- expect A -- +drop schema if exists plpgsql_override_out; +create schema plpgsql_override_out; +set current_schema = plpgsql_override_out; +set behavior_compat_options = 'proc_outparam_override'; +create or replace package pck1 is +procedure p1; +procedure p1(v1 in varchar2); +procedure p1(v1 in varchar2, v2 in varchar2); +procedure p1(v1 in varchar2, v2 in varchar2, v3 in varchar2); + +procedure p1(v1 out int); +procedure p1(v1 out int, v2 out int); +procedure p1(v1 out int, v2 out int, v3 out int); + +procedure p1(v1 in varchar2, v2 out int); +procedure p1(v1 in varchar2, v2 out int, v3 out int); + +procedure p1(v1 out int, v2 in varchar2); +procedure p1(v1 out int, v2 in varchar2, v3 in varchar2); + +procedure p1(v1 inout int, v2 inout int, v3 inout int, v4 inout int); +procedure p1(v1 inout int, v2 inout int, v3 inout varchar2, v4 inout varchar2); + +procedure p; +end pck1; +/ + +create or replace package body pck1 is + +procedure p1 is +begin +raise notice 'p1'; +end; + +procedure p1(v1 in varchar2) is +begin +raise notice 'p1_1_varchar2'; +end; + +procedure p1(v1 in varchar2, v2 in varchar2) is +begin +raise notice 'p1_2_varchar2'; +end; + +procedure p1(v1 in varchar2, v2 in varchar2, v3 in varchar2) is +begin +raise notice 'p1_3_varchar2'; +end; + +procedure p1(v1 out int) is +begin +raise notice 'p1_1_int'; +end; + +procedure p1(v1 out int, v2 out int) is +begin +raise notice 'p1_2_int'; +end; + +procedure p1(v1 out int, v2 out int, v3 out int) is +begin +raise notice 'p1_3_int'; +end; + +procedure p1(v1 in varchar2, v2 out int) is +begin +raise notice 'p1_1_varchar_1_int'; +end; + +procedure p1(v1 in varchar2, v2 out int, v3 out int) is +begin +raise notice 'p1_1_varchar_2_int'; +end; + +procedure p1(v1 out int, v2 in varchar2) is +begin +raise notice 'p1_1_int_1_varchar'; +end; + +procedure p1(v1 out int, v2 in varchar2, v3 in varchar2) is +begin +raise notice 'p1_1_int_2_varchar'; +end; + +procedure p1(v1 inout int, v2 inout int, v3 inout int, v4 inout int) is +begin +raise notice 'p1_4_inout_4_int'; +end; + +procedure p1(v1 inout int, v2 inout int, v3 inout varchar2, v4 inout varchar2) is +begin +raise notice 'p1_4_inout_2_int_2_varchar'; +end; + +procedure p is +a1 varchar2(10); +a2 varchar2(10); +a3 varchar2(10); +a4 varchar2(10); +b1 int; +b2 int; +b3 int; +b4 int; +begin +a1 := 'a1'; +a2 := 'a2'; +a3 := 'a3'; +a4 := 'a4'; +b1 := 1; +b2 := 2; +b3 := 3; +b4 := 4; +p1(); +p1(a1); +p1(b1); +p1(a1, a2); +p1(a1, b1); +p1(b1, b2); +p1(b1, a1); +p1(a1, a2, a3); +p1(b1, b2, b3); +p1(a1, b1, b2); +p1(b1, a1, a2); +p1(b1, b2, b3, b4); +p1(b1, b2, a1, a2); +end; +end pck1; +/ +-- test procedure override with out args before in args +CREATE OR REPLACE PROCEDURE test_in_out_in(a in int, b inout int, c out int, d in varchar(200), e out varchar2(200)) +PACKAGE +AS +DECLARE +new_deptno NUMBER; +BEGIN +raise notice '%,%,%,%,%', a,b,c,d,e; +new_deptno :=10; +new_deptno := new_deptno+a+b; +END; +/ +call test_in_out_in(1,2,3,'a','b'); +begin; +CURSOR temp_cursor NO SCROLL FOR SELECT test_in_out_in(1,2,3,'a','b'); +FETCH FORWARD 1 FROM temp_cursor; +end; +SELECT * from test_in_out_in(1,2,3,'a','b'); + +set behavior_compat_options = ''; +call test_in_out_in(1,2,3,'a','b'); +begin; +CURSOR temp_cursor NO SCROLL FOR SELECT test_in_out_in(1,2,'a'); +FETCH FORWARD 1 FROM temp_cursor; +end; +SELECT * from test_in_out_in(1,2,'a'); + +---- +-- test in/out/inout args +---- + +-- test procedure +CREATE OR REPLACE PROCEDURE iob_proc(a in int, b out int, c inout int) +AS +DECLARE +BEGIN +raise notice '%,%,%', a,b,c; +END; +/ +set behavior_compat_options = ''; +call iob_proc(1,2,3); -- ok +call iob_proc(1,2); +select * from iob_proc(1,2,3); +select * from iob_proc(1,2); -- ok + +set behavior_compat_options = 'proc_outparam_override'; +call iob_proc(1,2,3); -- ok +call iob_proc(1,2); +select * from iob_proc(1,2,3); -- ok +select * from iob_proc(1,2); + +CREATE OR REPLACE PROCEDURE bio_proc(a inout int, b in int, c out int) +AS +DECLARE +BEGIN +raise notice '%,%,%', a,b,c; +END; +/ +set behavior_compat_options = ''; +call bio_proc(1,2,3); -- ok +call bio_proc(1,2); +select * from bio_proc(1,2,3); +select * from bio_proc(1,2); -- ok + +set behavior_compat_options = 'proc_outparam_override'; +call bio_proc(1,2,3); -- ok +call bio_proc(1,2); +select * from bio_proc(1,2,3); -- ok +select * from bio_proc(1,2); + +CREATE OR REPLACE PROCEDURE obi_proc(a out int, b inout int, c in int) +AS +DECLARE +BEGIN +raise notice '%,%,%', a,b,c; +END; +/ +set behavior_compat_options = ''; +call obi_proc(1,2,3); -- ok +call obi_proc(1,2); +select * from obi_proc(1,2,3); +select * from obi_proc(1,2); -- ok + +set behavior_compat_options = 'proc_outparam_override'; +call obi_proc(1,2,3); -- ok +call obi_proc(1,2); +select * from obi_proc(1,2,3); -- ok +select * from obi_proc(1,2); + +-- test function +CREATE OR REPLACE FUNCTION iob_func(a in int, b out int, c inout int) RETURNS SETOF RECORD +AS $$ +DECLARE +BEGIN +raise notice '%,%,%', a,b,c; +return; +END +$$ +LANGUAGE plpgsql; +set behavior_compat_options = ''; +call iob_func(1,2,3); --ok +call iob_func(1,2); +select * from iob_func(1,2,3); +select * from iob_func(1,2); -- ok + +set behavior_compat_options = 'proc_outparam_override'; +call iob_func(1,2,3); +call iob_func(1,2); +select * from iob_func(1,2,3); +select * from iob_func(1,2); -- ok + +CREATE OR REPLACE FUNCTION bio_func(a inout int, b in int, c out int) RETURNS SETOF RECORD +AS $$ +DECLARE +BEGIN +raise notice '%,%,%', a,b,c; +return; +END +$$ +LANGUAGE plpgsql; +set behavior_compat_options = ''; +call bio_func(1,2,3); -- ok +call bio_func(1,2); +select * from bio_func(1,2,3); +select * from bio_func(1,2); -- ok + +set behavior_compat_options = 'proc_outparam_override'; +call bio_func(1,2,3); +call bio_func(1,2); +select * from bio_func(1,2,3); +select * from bio_func(1,2); -- ok + +CREATE OR REPLACE FUNCTION obi_func(a out int, b inout int, c in int) RETURNS SETOF RECORD +AS $$ +DECLARE +BEGIN +raise notice '%,%,%', a,b,c; +return; +END +$$ +LANGUAGE plpgsql; +set behavior_compat_options = ''; +call obi_func(1,2,3); -- ok +call obi_func(1,2); +select * from obi_func(1,2,3); +select * from obi_func(1,2); -- ok + +set behavior_compat_options = 'proc_outparam_override'; +call obi_func(1,2,3); +call obi_func(1,2); +select * from obi_func(1,2,3); +select * from obi_func(1,2); -- ok + +drop procedure test_in_out_in; +drop package pck1; + +-- test override procedure with error param +set behavior_compat_options='proc_outparam_override'; +drop package if exists pck1; +create type o1_test as (v01 number, v03 varchar2, v02 number); +create or replace package pck1 is +procedure p1(a o1_test,b out varchar2); +procedure p1(a2 int[], b2 out varchar2); +end pck1; +/ + +create or replace package body pck1 is +procedure p1(a o1_test,b out varchar2) is +begin +b:=a.v01; +raise info 'b:%',b; +end; +procedure p1(a2 int[], b2 out varchar2) is +begin +b2:=a2(2); +raise info 'b2:%',b2; +end; +end pck1; +/ + +-- should error +declare +begin +pck1.p1((1,'b',2),'a'); +end; +/ +drop table if exists test_tb; +create table test_tb(c1 int,c2 varchar2); +insert into test_tb values(1,'a'),(2,'b'),(3,'c'); +drop package if exists pck1; +create or replace package pck1 is +type tp1 is record(v01 int, v02 varchar2); +procedure p1(a inout tp1,b varchar2); +end pck1; +/ + +create or replace package body pck1 is +procedure p1(a inout tp1,b varchar2) is +begin +select * into a from test_tb where c2=b; +end; +end pck1; +/ +declare +var pck1.tp1; +begin +perform pck1.p1(var,'a'); +end; +/ +set behavior_compat_options='proc_outparam_override'; +create or replace procedure p2(a int,b out int) is +begin +raise info 'a:%', a+1; +end; +/ +drop table if exists test_tb; +create table test_tb(c1 int,c2 varchar2); +insert into test_tb values(1,'a'),(2,'b'),(3,'c'); +drop package if exists pck1; +create or replace package pck1 is +type tp1 is table of varchar2(1024) index by varchar2(4000); +procedure p1(out_var out tp1,in_var varchar2); +end pck1; +/ +create or replace package body pck1 is +procedure p1(out_var out tp1,in_var varchar2) is +begin +select c1 into out_var(in_var) from test_tb limit 1; +out_var('aa'):='aa'; +end; +end pck1; +/ +declare +var pck1.tp1; +begin +perform pck1.p1(var,'a');--不支持,报错 +end; +/ +\df +drop package pck1; +drop type o1_test; +set behavior_compat_options = ''; + +create or replace procedure proc_test +as +work_date varchar2; +begin +work_date:='202208'; +end; +/ + +call proc_test(); + +create or replace procedure proc_test +as +workZ varchar2; +begin +workZ:='202208'; +end; +/ + +call proc_test(); + +create or replace procedure proc_test +as +read_1 varchar2; +begin +read_1:='202208'; +end; +/ + +call proc_test(); + +create or replace procedure proc_test +as +transaction_1 varchar2; +begin +transaction_1:='202208'; +end; +/ + +call proc_test(); + +create or replace procedure proc_test +as +isolation1 varchar2; +begin +isolation1:='202208'; +end; +/ + +call proc_test(); + +create or replace procedure proc_test +as +deferrableZ varchar2; +begin +deferrableZ:='202208'; +end; +/ + +call proc_test(); + +create or replace procedure proc_test +as +not_1 varchar2; +begin +not_1:='202208'; +end; +/ + +call proc_test(); +drop procedure proc_test; + +drop schema if exists plpgsql_override_out cascade; diff --git a/src/test/regress/sql/plpgsql_package_param.sql b/src/test/regress/sql/plpgsql_package_param.sql new file mode 100644 index 000000000..b822025e0 --- /dev/null +++ b/src/test/regress/sql/plpgsql_package_param.sql @@ -0,0 +1,23 @@ +create or replace package pkg1 as + function func_add_sql(a integer, b integer) return integer immutable; +end pkg1; +create or replace package body pkg1 as data1 integer; + function func_add_sql(a integer, b integer) return integer stable as + begin select a+b into data1; + return data1; + end; +end pkg1; +/ + +create or replace package pkg1 as + function func_add_sql(a integer, b integer) return integer immutable; +end pkg1; +create or replace package body pkg1 as data1 integer; + function func_add_sql(a integer, b integer) return integer immutable as + begin select a+b into data1; + return data1; + end; +end pkg1; +/ + +drop package if exists pkg1; \ No newline at end of file diff --git a/src/test/regress/sql/plpgsql_package_type.sql b/src/test/regress/sql/plpgsql_package_type.sql new file mode 100644 index 000000000..110240703 --- /dev/null +++ b/src/test/regress/sql/plpgsql_package_type.sql @@ -0,0 +1,977 @@ +-- FOR PL/pgSQL ARRAY of RECORD TYPE scenarios -- + +-- check compatibility -- +show sql_compatibility; -- expect ORA -- + +-- create new schema -- +drop schema if exists plpgsql_packagetype1; +create schema plpgsql_packagetype1; +drop schema if exists plpgsql_packagetype2; +create schema plpgsql_packagetype2; + + +-- initialize table and type-- + + + +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- + +-- test package type internal use +set search_path=plpgsql_packagetype1; +create or replace package p_test1 as + type t1 is record(c1 varchar2, c2 int); + type t2 is table of t1; + function f1(ss in t1) return t1; + function f2(ss in t2) return t2; + procedure p1(aa in t1,bb in t2,cc out t1,dd out t2); + procedure p2(aa in int); +end p_test1; +/ +create or replace package p_test1 as + type t1 is record(c1 varchar2, c2 int); + type t2 is table of t1; + function f1(ss in t1) return t1; + function f2(ss in t2) return t2; + procedure p1(aa in t1,bb in t2,cc out t1,dd out t2); + procedure p2(aa in int); +end p_test1; +/ +create or replace package body p_test1 as + vb char; + function f1(ss in t1) return t1 as + va t1; + begin + va := ss; + plpgsql_packagetype1.p_test1.vb := ''; + raise info '%',va; + return va; + end; + + function f2(ss in t2) return t2 as + vb t2; + begin + vb := ss; + return vb; + end; + + procedure p1(aa in t1,bb in t2,cc out t1,dd out t2) as + begin + cc := aa; + dd := bb; + raise info '% %', cc,dd; + end; + + procedure p2(aa in int) as + aa t1; + bb t2; + cc t1; + dd t2; + begin + aa := ('a',1); + bb := array[('b',2),('c',3)]; + p1(aa,bb,cc,dd); + end; + +end p_test1; +/ +create or replace package body p_test1 as + function f1(ss in t1) return t1 as + va t1; + begin + va := ss; + raise info '%',va; + return va; + end; + + function f2(ss in t2) return t2 as + vb t2; + begin + vb := ss; + return vb; + end; + + procedure p1(aa in t1,bb in t2,cc out t1,dd out t2) as + begin + cc := aa; + dd := bb; + raise info '% %', cc,dd; + end; + + procedure p2(aa in int) as + aa t1; + bb t2; + cc t1; + dd t2; + begin + aa := ('a',1); + bb := array[('b',2),('c',3)]; + p1(aa,bb,cc,dd); + end; + +end p_test1; +/ + +select p_test1.f1(('a',3)); +select p_test1.f2(array[('a',1)::p_test1.t1,('b',2)::p_test1.t1,('c',4)::p_test1.t1]); + +-- test package type used in another package +create or replace package p_test2 as + var1 p_test1.t1; + type t21 is record(c1 p_test1.t1); + type t22 is table of p_test1.t1; + function ff1(ss in p_test1.t1) return p_test1.t1; + procedure pp1(aa in p_test1.t1,bb in p_test1.t2,cc out p_test1.t1,dd out p_test1.t2); + procedure pp2(aa in int); +end p_test2; +/ +create or replace package body p_test2 as + function ff1(ss in p_test1.t1) return p_test1.t1 as + va p_test1.t1; + begin + va := ss; + raise info '%',va; + return va; + end; + + + procedure pp1(aa in p_test1.t1,bb in p_test1.t2,cc out p_test1.t1,dd out p_test1.t2) as + begin + cc := aa; + dd := bb; + raise info '% %', cc,dd; + end; + + procedure pp2(aa in int) as + aa p_test1.t1; + bb p_test1.t2; + cc p_test1.t1; + dd p_test1.t2; + begin + aa := ('a',1); + bb := array[('b',2),('c',3)]; + pp1(aa,bb,cc,dd); + end; + +end p_test2; +/ + +select p_test2.ff1(('a',3)); + +-- test package type used in another schema package +set search_path=plpgsql_packagetype2; +create or replace package p_test2 as + var1 plpgsql_packagetype1.p_test1.t1; + type t21 is record(c1 plpgsql_packagetype1.p_test1.t1); + type t22 is table of plpgsql_packagetype1.p_test1.t1; + function ff1(ss in plpgsql_packagetype1.p_test1.t1) return plpgsql_packagetype1.p_test1.t1; + procedure pp1(aa in plpgsql_packagetype1.p_test1.t1,bb in plpgsql_packagetype1.p_test1.t2,cc out plpgsql_packagetype1.p_test1.t1,dd out plpgsql_packagetype1.p_test1.t2); +end p_test2; +/ +create or replace package body p_test2 as + function ff1(ss in plpgsql_packagetype1.p_test1.t1) return plpgsql_packagetype1.p_test1.t1 as + va plpgsql_packagetype1.p_test1.t1; + begin + va := ss; + raise info '%',va; + return va; + end; + + + procedure pp1(aa in plpgsql_packagetype1.p_test1.t1,bb in plpgsql_packagetype1.p_test1.t2,cc out plpgsql_packagetype1.p_test1.t1,dd out plpgsql_packagetype1.p_test1.t2) as + begin + cc := aa; + dd := bb; + raise info '% %', cc,dd; + end; + + procedure pp2(aa in int) as + aa plpgsql_packagetype1.p_test1.t1; + bb plpgsql_packagetype1.p_test1.t2; + cc plpgsql_packagetype1.p_test1.t1; + dd plpgsql_packagetype1.p_test1.t2; + begin + aa := ('a',1); + bb := array[('b',2),('c',3)]; + pp1(aa,bb,cc,dd); + end; + +end p_test2; +/ + +select p_test2.ff1(('a',3)); + +drop package p_test2; +drop package plpgsql_packagetype1.p_test2; +drop package plpgsql_packagetype1.p_test1; + +--test ref cursortype +create or replace package test_cur +IS +type ref_cur is ref cursor; +end test_cur; +/ +create or replace package body test_cur +IS +a int; +end test_cur; +/ +create or replace package test_cur2 +IS +procedure proc1(cur1 test_cur.ref_cur); +end test_cur2; +/ +create or replace package body test_cur2 +IS +procedure proc1(cur1 test_cur.ref_cur) +is +BEGIN +cur1.a.a:=2; +end; +end test_cur2; +/ +create or replace package test_cur3 +IS +procedure proc11(cur1 test_cur.ref_cur); +function func11() return test_cur.ref_cur; +end test_cur3; +/ +create or replace package body test_cur3 +IS +procedure proc11(cur1 test_cur.ref_cur) +is +BEGIN +cur1.a.a:=2; +end; +function func11() return test_cur.ref_cur +is +BEGIN +return 1; +end; +end test_cur3; +/ +create or replace package test_cur4 +IS +procedure proc111(cur1 test_cur.ref_cur); +function func111 return test_cur.ref_cur; +end test_cur4; +/ +create or replace package body test_cur4 +IS +procedure proc111(cur1 test_cur.ref_cur) +is +BEGIN +cur1.a.a:=2; +cur.a.a:=2; +end; +function func111() return test_cur.ref_cur +is +BEGIN +return 1; +end; +end test_cur4; +/ +drop package if exists test_cur; +drop package if exists test_cur2; +drop package if exists test_cur3; +drop package if exists test_cur4; + +create or replace package pck1 is +type t1 is table of varchar2(10); +procedure pp11 (t1 in varchar2(10)); +procedure pp22 (t1 out varchar2(10)); +end pck1; +/ + +create or replace package body pck1 is +procedure pp11 (t1 in varchar2(10)) is +begin +raise info '%', t1; +end; +procedure pp22 (t1 out varchar2(10)) is +begin +t1 := 'bb'; +raise info '%', t1; +end; +end pck1; +/ +call pck1.pp11('aa'); +call pck1.pp22('cc'); + +DROP PACKAGE pck1; + +--test package, type with same name +create or replace package plpgsql_packagetype1.pck2 is +va int; +end pck2; +/ +create type plpgsql_packagetype1.pck2 as (a int, b int); + +DROP table if exists t1; +CREATE table t1 (a plpgsql_packagetype1.pck2); +insert into t1 values((1,2)); +select * from t1; + +create or replace package pck3 is +va plpgsql_packagetype1.pck2; +end pck3; +/ + +DROP package pck3; +DROP package plpgsql_packagetype1.pck2; +DROP table t1; +DROP type plpgsql_packagetype1.pck2; + +--test synonym name same with procedure itself +create procedure proc1 as +begin +null; +end; +/ +create synonym proc1 for proc1; + +call proc1(); + +DROP procedure proc1(); + +--test package type form record +create or replace package pck1 is +type t1 is record (a int, b int); +type ta is varray(10) of varchar2(100); +type tb is varray(10) of int; +type tc is varray(10) of t1; +type td is table of varchar2(100); +type te is table of int; +type tf is table of t1; +end pck1; +/ + +create or replace package pck2 is +type tb is record (col1 pck1.ta, col2 pck1.tb, col3 pck1.tc, col4 pck1.td, col5 pck1.te, col6 pck1.tf); +end pck2; +/ + +DROP PACKAGE pck2; +DROP PACKAGE pck1; + +-- test not support nested type +-- 1. array nest array +create or replace package pck1 as +type t1 is varray(10) of int; +type t2 is varray(10) of t1; +end pck1; +/ + +create or replace function func1() return int as +type t1 is varray(10) of int; +type t2 is varray(10) of t1; +begin +return 0; +end; +/ +-- 2. table nest array +create or replace package pck1 as +type t1 is varray(10) of int; +type t2 is table of t1; +end pck1; +/ + +create or replace function func1() return int as +type t1 is varray(10) of int; +type t2 is table of t1; +begin +return 0; +end; +/ +-- 3. array nest table +create or replace package pck1 as +type t1 is table of int; +type t2 is varray(10) of t1; +end pck1; +/ + +create or replace function func1() return int as +type t1 is table of int; +type t2 is varray(10) of t1; +begin +return 0; +end; +/ +-- 4. table nest table, will be supported soon +-- create or replace package pck1 as +-- type t1 is table of int; +-- type t2 is table of t1; +-- end pck1; +-- / + +-- create or replace function func1() return int as +-- type t1 is table of int; +-- type t2 is table of t1; +-- begin +-- return 0; +-- end; +-- / +-- 5. record nest ref cursor +drop package if exists pck1; +create or replace package pck1 as +type t1 is ref cursor; +type t2 is record(c1 t1,c2 int); +end pck1; +/ + +create or replace function func1() return int as +type t1 is ref cursor; +type t2 is record(c1 t1,c2 int); +begin +return 0; +end; +/ +-- 6. table nest ref cursor +create or replace package pck1 as +type t1 is ref cursor; +type t2 is table of t1; +end pck1; +/ + +create or replace function func1() return int as +type t1 is ref cursor; +type t2 is table of t1; +begin +return 0; +end; +/ +-- 7. varray nest ref cursor +create or replace package pck1 as +type t1 is ref cursor; +type t2 is varray(10) of t1; +end pck1; +/ + +create or replace function func1() return int as +type t1 is ref cursor; +type t2 is varray(10) of t1; +begin +return 0; +end; +/ +DROP package pck1; +DROP function func1(); +-- 8.package nest + +create or replace package pck1 as +type t1 is table of int; +type t2 is varray(10) of int; +type t3 is ref cursor; +end pck1; +/ +create or replace package pck2 as +type t1 is varray(10) of pck1.t2; +v1 t1; +function func1() return int; +end pck2; +/ + +-- 9.package nested +create or replace package pck2 as +type t1 is table of pck1.t1; +v1 t1; +function func1() return int; +end pck2; +/ + +-- 10.package nested +create or replace package pck2 as +type t1 is table of pck1.t3; +v1 t1; +function func1() return int; +end pck2; +/ + +-- 10.package nested +create or replace package pck2 as +type t1 is record(a pck1.t3); +v1 t1; +function func1() return int; +end pck2; +/ + +DROP package pck2; +DROP package pck1; + +-- test type nested by private type +create or replace package pck1 as +type t1 is varray(10) of int; +type t2 is record (a int, b int); +end pck1; +/ +create or replace package body pck1 as +type t3 is varray(10) of int; +type t4 is record (a t3, b int); +procedure p2 (a pck1.t2) is +type t5 is varray(10) of t4; +type t6 is varray(10) of t2; +begin +null; +end; +end pck1; +/ + +DROP PACKAGE pck1; + +-- test replace body, do not influence head +create or replace package pck1 as +type t1 is varray(10) of int; +type t2 is record (a int, b int); +end pck1; +/ + +create or replace package pck2 as +procedure p1 (a pck1.t1); +end pck2; +/ +create or replace package body pck2 as +procedure p1 (a pck1.t1) is +begin +null; +end; +end pck2; +/ + +create or replace package body pck1 as +procedure p1 (a int) is +begin +null; +end; +end pck1; +/ + +call pck2.p1(array[1,2,4]); + +DROP PACKAGE pck2; +DROP PACKAGE pck1; + +-- test package table of type +create or replace package pck1 is +type t1 is table of int index by varchar2(10); +end pck1; +/ + +create or replace package pck2 is +va pck1.t1; +procedure p1; +end pck2; +/ +create or replace package body pck2 is +procedure p1 is +begin +va('aaa') := 1; +va('a') := 2; +raise info '%', va; +end; +end pck2; +/ + +call pck2.p1(); + +DROP PACKAGE pck2; +DROP PACKAGE pck1; + +-- test package record type as procedure in out param +-- (1) in param +create or replace package pck1 as +type r1 is record (a int, b int); +procedure p1; +procedure p2(a in r1); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +va r1; +begin +va := (1,2); +p2(va); +va := (4,5); +p2(va); +end; +procedure p2 (a in r1) as +begin +raise info '%', a; +end; +end pck1; +/ +call pck1.p1(); +DROP PACKAGE pck1; +-- (2) out param +create or replace package pck1 as +type r1 is record (a int, b int); +procedure p1; +procedure p2(a out r1); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +va r1; +begin +raise info '%', va; +p2(va); +raise info '%', va; +end; +procedure p2 (a out r1) as +begin +a.a := 11; +a.b := 22; +end; +end pck1; +/ +call pck1.p1(); +DROP PACKAGE pck1; +-- (3) inout param +create or replace package pck1 as +type r1 is record (a int, b int); +procedure p1; +procedure p2(a inout r1); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +va r1; +begin +va := (11,22); +raise info '%', va; +p2(va); +raise info '%', va; +end; +procedure p2 (a inout r1) as +begin +a.a := a.a + 1; +a.b := a.b + 1; +end; +end pck1; +/ +call pck1.p1(); +DROP PACKAGE pck1; +-- test more column number +create or replace package pck1 as +type r1 is record (a int, b int); +procedure p1; +procedure p2(a out r1); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +type r2 is record (a int, b int, c int); +va r1; +vb r2; +begin +va := (1,2); +vb := (1,2,3); +raise info '%',va; +p2(vb); +raise info '%',va; +end; +procedure p2 (a out r1) as +begin +raise info '%', a; +a := (4,5); +end; +end pck1; +/ +call pck1.p1(); +DROP PACKAGE pck1; +-- test less column number +create or replace package pck1 as +type r1 is record (a int, b int); +procedure p1; +procedure p2(a out r1); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +type r2 is record (a int); +va r1; +vb r2; +begin +va := (1,2); +vb.a := 1; +raise info '%',va; +p2(vb); +raise info '%',va; +end; +procedure p2 (a out r1) as +begin +raise info '%', a; +a := (4,5); +end; +end pck1; +/ +call pck1.p1(); +DROP PACKAGE pck1; +-- test wrong column type +create or replace package pck1 as +type r1 is record (a int, b int); +procedure p1; +procedure p2(a out r1); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +type r2 is record (a int, b varchar2(10)); +va r1; +vb r2; +begin +va := (1,2); +vb := (1,'aa'); +raise info '%',va; +p2(vb); +raise info '%',va; +end; +procedure p2 (a out r1) as +begin +raise info '%', a; +a := (4,5); +end; +end pck1; +/ +call pck1.p1(); +DROP PACKAGE pck1; + +-- test package type alter +create or replace package pck1 is +type r1 is record (a int, b int); +type r2 is table of int index by varchar(10); +type r3 is varray(10) of int; +end pck1; +/ + +-- (1) grant or revoke +grant drop on type pck1.r1 to public; +grant alter on type pck1.r2 to public; +grant alter on type pck1.r3 to public; +revoke drop on type pck1.r1 from public; +revoke drop on type pck1.r2 from public; +revoke drop on type pck1.r3 from public; + +-- (2) drop +DROP TYPE pck1.r1 cascade; +DROP TYPE pck1.r2 cascade; +DROP TYPE pck1.r3 cascade; + +-- (3) alter: rename +ALTER TYPE pck1.r1 RENAME TO o1; +ALTER TYPE pck1.r2 RENAME TO o1; +ALTER TYPE pck1.r3 RENAME TO o1; + +-- (4) alter: owner +ALTER TYPE pck1.r1 OWNER TO CURRENT_USER; +ALTER TYPE pck1.r2 OWNER TO CURRENT_USER; +ALTER TYPE pck1.r3 OWNER TO CURRENT_USER; + +-- (5) alter: set schema +ALTER TYPE pck1.r1 SET SCHEMA public; +ALTER TYPE pck1.r2 SET SCHEMA public; +ALTER TYPE pck1.r3 SET SCHEMA public; + +DROP PACKAGE pck1; + +-- test package type as table of type column +create or replace package pck1 is +type r1 is record (a int, b int); +type r2 is table of int index by varchar(10); +type r3 is varray(10) of int; +end pck1; +/ + +-- (1) as table +create table t1(a pck1.r1); +create table t1(a pck1.r2); +create table t1(a pck1.r3); + +-- (2) as type +create type o1 as (a pck1.r1); +create type o1 as (a pck1.r2); +create type o1 as (a pck1.r3); + +-- (3) in procedure +create or replace procedure p1 as +begin +create table t1(a pck1.r1); +end; +/ +call p1(); + +create or replace procedure p1 as +begin +create table t1(a pck1.r2); +end; +/ +call p1(); + +create or replace procedure p1 as +begin +create table t1(a pck1.r3); +end; +/ +call p1(); + +create or replace procedure p1 as +begin +create type o1 as (a pck1.r1); +end; +/ +call p1(); + +create or replace procedure p1 as +begin +create type o1 as (a pck1.r2); +end; +/ +call p1(); + +create or replace procedure p1 as +begin +create type o1 as (a pck1.r3); +end; +/ +call p1(); + +DROP procedure p1; +DROP package pck1; + +-- test package-depended type clean + +create or replace package pck4 is +va int; +end pck4; +/ + +create or replace package body pck4 is +type test_pck4_recordtype is record (a int, b int); +type test_pck4_arraytype is varray(10) of int; +type test_pck4_tableoftype is table of int index by varchar2(10); +type test_pck4_refcursor is ref cursor; +end pck4; +/ + +select count(*) from pg_type where typname like '%.test_pck4%'; +select count(*) from PG_SYNONYM where synname like '%.test_pck4%'; + +create or replace package body pck4 is +vb int; +end pck4; +/ + +select count(*) from pg_type where typname like '%.test_pck4%'; +select count(*) from PG_SYNONYM where synname like '%.test_pck4%'; + +DROP PACKAGE pck4; + +-- test table of record index by varchar +create or replace package pkg045 +is +type type000 is record(c1 number(7,3),c2 varchar2(30)); +type type001 is table of type000 index by varchar2(30); +function proc045_2(col1 int) return int; +end pkg045; +/ + +create or replace package body pkg045 +is +function proc045_2(col1 int) return int +is +b2 pkg045.type001; +begin +b2('1').c1:=1.000; +b2('1').c2:='aaa'; +raise info '%,%', b2('1').c1, b2('1').c2; +return 1; +end; +end pkg045; +/ + +call pkg045.proc045_2(1); +drop package pkg045; + +-- test alter package/function owner +-- (a) alter package +reset search_path; +create user alt_user_1 PASSWORD 'gauss@123'; +create user alt_user_2 PASSWORD 'gauss@123'; + +SET SESSION AUTHORIZATION alt_user_1 password 'gauss@123'; +drop package if exists pck1; + +create or replace package pck1 as +type tt1 is record(c1 int,c2 int); +type tt2 is table of tt1; +type tt3 is varray(10) of tt1; +function func1()return int; +procedure proc1(); +end pck1; +/ + +create or replace package body pck1 as +function func1()return int as +type tt5 is record(c1 tt1,c2 int); +type tt6 is table of pck1.tt1; +type tt7 is varray(10) of pck1.tt1; +type tt8 is ref cursor; +type tt9 is table of tt5; +begin +return 0; +end; +procedure proc1() as +type tta is record(c1 tt1,c2 int); +type ttb is table of pck1.tt1; +type ttc is varray(10) of pck1.tt1; +type ttd is ref cursor; +type tte is table of tta; +begin +null; +end; +end pck1; +/ + +reset session AUTHORIZATION; +alter package alt_user_1.pck1 owner to alt_user_2; +------usename user2 +select usename from pg_user where usesysid = (select typowner from pg_type where typname like '%.tt1%' limit 1); +select usename from pg_user where usesysid = (select typowner from pg_type where typname like '%.tt2%' limit 1); +select usename from pg_user where usesysid = (select typowner from pg_type where typname like '%.tt3%' limit 1); +select usename from pg_user where usesysid = (select typowner from pg_type where typname like '%.tt5%' limit 1); +select usename from pg_user where usesysid = (select typowner from pg_type where typname like '%.tta%' limit 1); + +drop package alt_user_1.pck1; + +-- (b) alter function +SET SESSION AUTHORIZATION alt_user_1 password 'gauss@123'; +create or replace function func1()return int as +type ttt5 is record(c1 int,c2 int); +type ttt6 is table of int; +type ttt7 is varray(10) of int; +type ttt8 is ref cursor; +type ttt9 is table of ttt5; +begin +return 0; +end; +/ +reset session AUTHORIZATION; +alter function alt_user_1.func1() owner to alt_user_2; +select usename from pg_user where usesysid = (select typowner from pg_type where typname like '%.ttt5%' limit 1); +select usename from pg_user where usesysid = (select typowner from pg_type where typname like '%.ttt6%' limit 1); + +drop function alt_user_1.func1(); +drop user alt_user_1 cascade; +drop user alt_user_2 cascade; + + +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- +drop package p_test2; +drop package plpgsql_packagetype1.p_test2; +drop package plpgsql_packagetype1.p_test1; + +-- clean up -- +drop schema if exists plpgsql_packagetype2 cascade; +drop schema if exists plpgsql_packagetype1 cascade; diff --git a/src/test/regress/sql/plpgsql_record_attrname.sql b/src/test/regress/sql/plpgsql_record_attrname.sql new file mode 100644 index 000000000..8fb134bd1 --- /dev/null +++ b/src/test/regress/sql/plpgsql_record_attrname.sql @@ -0,0 +1,90 @@ +-- FOR PL/pgSQL ARRAY of RECORD TYPE scenarios -- + +-- check compatibility -- +show sql_compatibility; -- expect ORA -- + +-- create new schema -- +drop schema if exists plpgsql_record; +create schema plpgsql_record; +set search_path=plpgsql_record; + + + +-- initialize table and type-- +CREATE TABLE DCT_DATACLR_LOG(TYPE int NOT NULL ENABLE); + +---------------------------------------------------- +------------------ START OF TESTS ------------------ +---------------------------------------------------- + +-- test TYPE as a table col name +create or replace package p_test1 as + TYPE IN_CLEANLOG_TYPE IS RECORD(IN_TYPE DCT_DATACLR_LOG.TYPE%TYPE); + function f1(ss in IN_CLEANLOG_TYPE) return IN_CLEANLOG_TYPE; +end p_test1; +/ +create or replace package body p_test1 as + function f1(ss in IN_CLEANLOG_TYPE) return IN_CLEANLOG_TYPE as + va IN_CLEANLOG_TYPE; + begin + va := ss; + raise info '%',va; + return va; + end; +end p_test1; +/ + +select p_test1.f1(ROW(3)); + +-- test TYPE as a col name of record +create or replace package p_test1 as + TYPE IN_CLEANLOG_TYPE IS RECORD(TYPE int); + function f1(ss in IN_CLEANLOG_TYPE) return IN_CLEANLOG_TYPE; +end p_test1; +/ +create or replace package body p_test1 as + function f1(ss in IN_CLEANLOG_TYPE) return IN_CLEANLOG_TYPE as + va IN_CLEANLOG_TYPE; + begin + va := ss; + raise info '%',va; + return va; + end; +end p_test1; +/ + +select p_test1.f1(ROW(3)); + +--test RECORD col name of exist type and var name +create or replace package p_test2 is + type array_type is varray(10) of int; + type tab_type is table of int; + type r_type is record (a int, b int); + va array_type; + vb tab_type; + vc r_type; + type IN_CLEANLOG_TYPE is record (array_type int, tab_type int, r_type int, va int, vb int, vc int); + function f1(ss in IN_CLEANLOG_TYPE) return int; +end p_test2; +/ +create or replace package body p_test2 as + function f1(ss in IN_CLEANLOG_TYPE) return int as + vaa IN_CLEANLOG_TYPE; + begin + vaa := ss; + raise info '%',vaa; + return vaa.va; + end; +end p_test2; +/ + +select p_test2.f1((1,2,3,4,5,6)); + +-------------------------------------------------- +------------------ END OF TESTS ------------------ +-------------------------------------------------- +drop package p_test2; +drop package p_test1; + +-- clean up -- +drop schema if exists plpgsql_record cascade; diff --git a/src/test/regress/sql/plpgsql_savepoint.sql b/src/test/regress/sql/plpgsql_savepoint.sql new file mode 100644 index 000000000..b1f682c8d --- /dev/null +++ b/src/test/regress/sql/plpgsql_savepoint.sql @@ -0,0 +1,982 @@ +/* +################################################################################ +# TESTCASE NAME : plpgsql_savepoint +# COMPONENT(S) : plpgsql savepoint +################################################################################ +*/ + +CREATE TABLE pl_txn_t(tc1 INT, tc2 INT); + +-- normal case 1 +CREATE OR REPLACE PROCEDURE sp_normal_1 IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT SAVE_A; + + INSERT INTO pl_txn_t VALUES(2, 2); + SAVEPOINT SAVE_B; + + INSERT INTO pl_txn_t VALUES(3, 3); + SAVEPOINT SAVE_C; + + INSERT INTO pl_txn_t VALUES(4, 4); + INSERT INTO pl_txn_t VALUES(5, 5); + ROLLBACK TO SAVEPOINT SAVE_C; + + INSERT INTO pl_txn_t VALUES(6, 6); + ROLLBACK TO SAVEPOINT SAVE_B; + + INSERT INTO pl_txn_t VALUES(2, 2); + ROLLBACK TO SAVEPOINT SAVE_A; + END; + / +SELECT sp_normal_1(); +COMMIT; +SELECT sp_normal_1(), sp_normal_1(); +DROP PROCEDURE sp_normal_1; + +-- normal case 2 +CREATE OR REPLACE PROCEDURE sp_normal_2 IS + BEGIN + SAVEPOINT + SAVE_A; + INSERT INTO pl_txn_t VALUES(1, 1); + ROLLBACK TO SAVEPOINT + SAVE_A; + SAVEPOINT B; + END; + / +SELECT sp_normal_2(); +BEGIN; +SELECT sp_normal_2(); +SELECT sp_normal_2(); -- 执行失败,暂不支持语句外部SAVEPOINT +COMMIT; +DROP PROCEDURE sp_normal_2; + +-- savepoint name as variable in PL +CREATE OR REPLACE PROCEDURE sp_name_variable IS + sp_name NVARCHAR2(100) := 'SAVE_A'; + BEGIN + SAVEPOINT sp_name; + ROLLBACK TO sp_name; + END; + / +CALL sp_name_variable(); + +CREATE OR REPLACE PROCEDURE sp_name_variable IS + sp_name NVARCHAR2(100) := 'SAVE_A'; + BEGIN + SAVEPOINT sp_name; + ROLLBACK TO SAVE_A; -- no such savepoint + END; + / +CALL sp_name_variable(); +DROP PROCEDURE sp_name_variable; + +-- length of savepoint name is too big. +CREATE OR REPLACE PROCEDURE sp_name_length IS + BEGIN + SAVEPOINT sp_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx; + ROLLBACK TO sp_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx; + END; + / +CALL sp_name_length(); + +-- no savepoint outside statement +CREATE OR REPLACE PROCEDURE sp_no_outside IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + COMMIT; + ROLLBACK TO SAVEPOINT SAVE_A; + END; + / +CALL sp_no_outside(); +BEGIN; +SAVEPOINT SAVE_A; +ROLLBACK TO SAVEPOINT SAVE_A; +SELECT sp_no_outside(); +ROLLBACK; +BEGIN; +SAVEPOINT SAVE_A; +RELEASE SAVEPOINT SAVE_A; +CALL sp_no_outside(); +ROLLBACK; +DROP PROCEDURE sp_no_outside; + +-- savepoint + commit / rollback +CREATE OR REPLACE PROCEDURE sp_commit_rollback(p INT) IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT SAVE_A; + IF p%2 = 0 then + ROLLBACK; + ELSE + COMMIT; + END IF; + END; + / +SELECT sp_commit_rollback(0); +SELECT sp_commit_rollback(1); + +CREATE OR REPLACE PROCEDURE sp_commit_rollback IS + BEGIN + SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(1, 1); + ROLLBACK TO save_a; + COMMIT; + END; + / +CALL sp_commit_rollback(); +DROP PROCEDURE sp_commit_rollback; + +CREATE OR REPLACE PROCEDURE pl_commit IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + COMMIT; + END; + / +SELECT pl_commit(); +DROP PROCEDURE pl_commit; + +CREATE OR REPLACE PROCEDURE commit_drop_sp IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT SAVE_1; + INSERT INTO pl_txn_t VALUES(2, 2); + SAVEPOINT SAVE_2; + COMMIT; + INSERT INTO pl_txn_t VALUES(4, 4); + ROLLBACK TO SAVEPOINT SAVE_1; --commit已删除了SAVE_1 + END; + / +SELECT commit_drop_sp(); -- no such savepoint +DROP PROCEDURE commit_drop_sp; + +-- savepoint in cursor +CREATE OR REPLACE FUNCTION sp_inner RETURN INTEGER +AS + BEGIN + SAVEPOINT save_a; + COMMIT; + SAVEPOINT save_a; + RETURN 1; + END; + / +CREATE OR REPLACE PROCEDURE sp_in_cursor IS + CURSOR c1 FOR SELECT sp_inner() FROM pl_txn_t; + val INT; + BEGIN + SAVEPOINT save_a; + OPEN c1; + FETCH c1 INTO val; + CLOSE c1; + EXCEPTION + WHEN OTHERS THEN + RAISE NOTICE 'wrong 1'; + END; + / +SELECT sp_in_cursor(); +DROP PROCEDURE sp_in_cursor; +DROP PROCEDURE sp_inner; + +CREATE OR REPLACE FUNCTION sp_inner RETURN INTEGER +AS + BEGIN + ROLLBACK TO save_axxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx; + RETURN 1; + END; + / +CREATE OR REPLACE PROCEDURE sp_in_cursor is + CURSOR c1 FOR SELECT sp_inner() FROM pl_txn_t; + val INT; + BEGIN + SAVEPOINT save_a; + OPEN c1; + FETCH c1 INTO val; + CLOSE c1; + END; + / +CALL sp_in_cursor(); +DROP PROCEDURE sp_in_cursor; +DROP PROCEDURE sp_inner; + +-- savepoint in subroutine +CREATE OR REPLACE PROCEDURE sp_subroutine IS + BEGIN + SAVEPOINT save_0; + INSERT INTO pl_txn_t VALUES(1, 1); + ROLLBACK TO save_0; + SAVEPOINT save_2; + SAVEPOINT save_3; + END; + / +CREATE OR REPLACE PROCEDURE sp_in_subroutine IS + BEGIN + SAVEPOINT save_1; + sp_subroutine(); + INSERT INTO pl_txn_t VALUES(1, 1); + ROLLBACK TO save_1; + INSERT INTO pl_txn_t VALUES(2, 2); + END; + / +SELECT sp_in_subroutine(); +SELECT sp_in_subroutine(); + +CREATE OR REPLACE PROCEDURE sp_in_subroutine IS + BEGIN + sp_subroutine(); + sp_subroutine(); + END; + / +SELECT sp_in_subroutine(); +DROP PROCEDURE sp_in_subroutine; +DROP PROCEDURE sp_subroutine; + +-- duplicate name +CREATE OR REPLACE PROCEDURE sp_duplicate_name IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT SAVE_A; + ROLLBACK TO SAVEPOINT SAVE_A; + SAVEPOINT SAVE_A; + ROLLBACK TO SAVEPOINT SAVE_A; + END; + / +SELECT sp_duplicate_name(); +DROP PROCEDURE sp_duplicate_name; + +-- savepoint in SPI executor context +CREATE OR REPLACE PROCEDURE pl_subroutine IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + ROLLBACK; -- 该行会销毁PopOverrideSearchPath,导致不匹配 + END; + / +CREATE OR REPLACE PROCEDURE sp_spi_rollback IS + BEGIN + SAVEPOINT save_1; + pl_subroutine(); + END; + / +SELECT sp_spi_rollback(); +DROP PROCEDURE sp_spi_rollback; +DROP PROCEDURE pl_subroutine; + +CREATE OR REPLACE PROCEDURE pl_subroutine IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + ROLLBACK TO save_1; -- 该行会销毁SavepointTest0的调用上下文 + END; + / +CREATE OR REPLACE PROCEDURE sp_spi_rollbackto IS + BEGIN + SAVEPOINT save_1; + --ROLLBACK; + pl_subroutine(); + INSERT INTO pl_txn_t VALUES(2, 2); + ROLLBACK TO save_1; + INSERT INTO pl_txn_t VALUES(3, 3); + END; + / +SELECT sp_spi_rollbackto(); +SELECT sp_spi_rollbackto(); +DROP PROCEDURE sp_spi_rollbackto; +DROP PROCEDURE pl_subroutine; + +-- savepoint + subroutine's commit/rollback +CREATE OR REPLACE PROCEDURE pl_commit IS + BEGIN + COMMIT; -- snapshot destoryed when substransaction finishes. + END; + / +CREATE OR REPLACE PROCEDURE sp_inner_commit IS + BEGIN + SAVEPOINT SAVE_A0; + pl_commit(); + END; + / +SELECT sp_inner_commit(); +DROP PROCEDURE sp_inner_commit; +DROP PROCEDURE pl_commit; + +CREATE OR REPLACE PROCEDURE pl_rollback IS + BEGIN + ROLLBACK; -- snapshot destoryed when substransaction finishes. + END; + / +CREATE OR REPLACE PROCEDURE sp_inner_rollback IS + BEGIN + SAVEPOINT SAVE_A0; + pl_rollback(); + END; + / +CALL sp_inner_rollback(); +DROP PROCEDURE sp_inner_rollback; +DROP PROCEDURE pl_rollback; + +-- savepoint + exception +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + COMMIT; + SAVEPOINT SAVE_A; + RAISE exc_1; + EXCEPTION + WHEN OTHERS THEN + RAISE NOTICE 'wrong 1'; + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + BEGIN + SavepointTest(); + EXCEPTION + WHEN OTHERS THEN + ROLLBACK TO SAVE_A; + RAISE NOTICE 'wrong 2'; + END; + / +SELECT SavepointTest(); +SELECT SavepointTest0(); + +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT SAVE_A; + INSERT INTO pl_txn_t VALUES(2, 2); + ROLLBACK TO SAVEPOINT SAVE_B; + EXCEPTION + WHEN exc_1 THEN + ROLLBACK TO SAVEPOINT SAVE_A; + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + ROLLBACK TO SAVEPOINT SAVE_B; + INSERT INTO pl_txn_t VALUES(3,3); + END; + / +SELECT SavepointTest(); + +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT SAVE_A; + INSERT INTO pl_txn_t VALUES(2, 2); + RAISE exc_1; + EXCEPTION + WHEN exc_1 THEN + ROLLBACK TO SAVEPOINT SAVE_A; + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + ROLLBACK TO SAVEPOINT SAVE_B; + INSERT INTO pl_txn_t VALUES(3,3); + END; + / +SELECT SavepointTest(); +DROP PROCEDURE SavepointTest; +DROP PROCEDURE SavepointTest0; + +-- savepoint + cursor hold +CREATE OR REPLACE PROCEDURE SavepointTest IS + CURSOR c1 IS SELECT tc1 FROM pl_txn_t; + val INT; + val1 INT; + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + INSERT INTO pl_txn_t VALUES(2,2); + OPEN c1; + SAVEPOINT save_a; + FETCH c1 INTO val; + ROLLBACK TO save_a; + FETCH c1 INTO val1; + CLOSE c1; + END; + / +SELECT SavepointTest(); + +CREATE OR REPLACE PROCEDURE SavepointTest IS + CURSOR c1 IS SELECT tc1 FROM pl_txn_t; + val INT; + val1 INT; + BEGIN + INSERT INTO pl_txn_t values(1,1); + INSERT INTO pl_txn_t values(2,2); + SAVEPOINT save_a; + OPEN c1; + FETCH c1 INTO val; + ROLLBACK to save_a; + FETCH c1 INTO val1; --fetch out of sequence + CLOSE c1; + END; + / +SELECT SavepointTest(); + +CREATE OR REPLACE PROCEDURE SavepointTest IS + CURSOR c1 IS SELECT tc1 FROM pl_txn_t; + val INT; + val1 INT; + BEGIN + INSERT INTO pl_txn_t values(1,1); + INSERT INTO pl_txn_t values(2,2); + SAVEPOINT save_a; + OPEN c1; + FETCH c1 INTO val; + COMMIT; + FETCH c1 INTO val1; + CLOSE c1; + END; + / +SELECT SavepointTest(); +DROP PROCEDURE SavepointTest; + +-- spi connect +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + CURSOR c1 IS SELECT tc1 FROM pl_txn_t; + val INT; + val1 INT; + BEGIN + ROLLBACK TO SAVEPOINT SAVE_A0; + --INSERT INTO pl_txn_t VALUES(1, 1); + --INSERT INTO pl_txn_t VALUES(2, 2); + SAVEPOINT SAVE_A1; + OPEN c1; + FETCH c1 INTO val; + COMMIT; + FETCH c1 INTO val; + CLOSE c1; + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + BEGIN + SAVEPOINT SAVE_A0; + --INSERT INTO pl_txn_t VALUES(1, 1); + --INSERT INTO pl_txn_t VALUES(2, 2); + --SAVEPOINT SAVE_A1; + SavepointTest0(); + END; + / +SELECT SavepointTest(); +DROP PROCEDURE SavepointTest0; +DROP PROCEDURE SavepointTest; + +-- savepoint in exception, don't destory exception's subtransaction +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(2, 2); + COMMIT; + INSERT INTO pl_txn_t VALUES(3, 3); + SAVEPOINT save_b; + INSERT INTO pl_txn_t VALUES(4, 4); + EXCEPTION + WHEN exc_1 THEN + ROLLBACK TO SAVEPOINT save_a; + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + ROLLBACK TO SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(5, 5); + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT save_a; + SavepointTest0(); + ROLLBACK TO save_b; + END; + / +TRUNCATE pl_txn_t; +SELECT SavepointTest(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; + +-- exception's subtransaction id changes. +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + exc_1 EXCEPTION; + BEGIN + -- exception's subtransaction id is 3 + INSERT INTO pl_txn_t VALUES(2, 2); + COMMIT; -- exception's subtransaction id changes to 2. + INSERT INTO pl_txn_t VALUES(3, 3); + EXCEPTION + WHEN exc_1 THEN + ROLLBACK TO SAVEPOINT SAVE_A; + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + ROLLBACK TO SAVEPOINT SAVE_A; + INSERT INTO pl_txn_t VALUES(5, 5); + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT save_a; -- subtransaction id = 2 + SavepointTest0(); + END; + / +TRUNCATE pl_txn_t; +SELECT SavepointTest(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; + +-- automatic rollback to the last savepoint:save_b in exception +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + exc_1 EXCEPTION; + BEGIN + -- exception's subtransaction id is 3 + INSERT INTO pl_txn_t VALUES(2, 2); + COMMIT; -- destory save_a automatically + INSERT INTO pl_txn_t VALUES(3, 3); + SAVEPOINT save_b; + INSERT INTO pl_txn_t VALUES(4, 4); + RAISE exc_1; + EXCEPTION + -- auto rollback to save_b + WHEN exc_1 THEN + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + INSERT INTO pl_txn_t VALUES(5, 5); + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT save_a; + SavepointTest0(); + END; + / +TRUNCATE pl_txn_t; +SELECT SavepointTest(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; + +-- rollback to in exception +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + exc_1 EXCEPTION; + BEGIN + -- exception's subtransaction id is 3 + INSERT INTO pl_txn_t VALUES(2, 2); + COMMIT; -- destory save_a automatically + INSERT INTO pl_txn_t VALUES(3, 3); + SAVEPOINT save_b; + INSERT INTO pl_txn_t VALUES(4, 4); + SAVEPOINT save_c; + ROLLBACK TO save_none; -- no such savepoint + EXCEPTION + -- auto rollback to save_c + WHEN exc_1 THEN + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + ROLLBACK TO SAVEPOINT SAVE_b; + INSERT INTO pl_txn_t VALUES(5, 5); + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT save_a; + SavepointTest0(); + END; + / +TRUNCATE pl_txn_t; +SELECT SavepointTest(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; + +-- destory SPI connect while abort subtransaction +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + exc_1 EXCEPTION; + var1 INT; + BEGIN + SAVEPOINT save_c; + ROLLBACK TO SAVEPOINT save_a; + SELECT sum(t1.tc1 + t2.tc2) INTO var1 FROM pl_txn_t t1, pl_txn_t t2 WHERE T1.TC2 + 1 = T2.TC2 + 4; + CREATE TABLE pl_txn_t(tc1 INT, tc2 INT); + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT save_a; + SAVEPOINT save_b; + SavepointTest0(); + END; + / +SELECT SavepointTest(); + +-- savepoint outside STP +create or replace procedure SavepointTest is + exc_1 exception; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT save_a; + ROLLBACK TO save_out; + SAVEPOINT save_b; + END; + / +BEGIN; +INSERT INTO pl_txn_t VALUES(0, 0); +SAVEPOINT save_out; +SELECT SavepointTest(); +SELECT SavepointTest(); +ROLLBACK TO save_b; +COMMIT; + +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(2, 2); + INSERT INTO pl_txn_t VALUES(3, 3); + SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(4, 4); + RAISE exc_1; + EXCEPTION + WHEN exc_1 THEN + ROLLBACK TO SAVEPOINT save_out; + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + ROLLBACK TO SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(5, 5); + END; + / +BEGIN; +SAVEPOINT save_out; +SELECT SavepointTest(); +SELECT SavepointTest(); +ROLLBACK TO save_out; +END; + +-- don't switch to top portal's resourceowner since it is invalid. +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(2, 2); + INSERT INTO pl_txn_t VALUES(3, 3); + ROLLBACK TO SAVEPOINT save_out; + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + SavepointTest0(); + INSERT INTO pl_txn_t VALUES(1, 1); + RAISE exc_1; + EXCEPTION + WHEN exc_1 THEN + INSERT INTO pl_txn_t VALUES(4, 4); + WHEN OTHERS THEN + INSERT INTO pl_txn_t VALUES(6, 6); + END; + / +TRUNCATE pl_txn_t; +BEGIN; +SAVEPOINT save_out; +SELECT SavepointTest(); +SELECT * from pl_txn_t order by 1, 2; +END; + +-- exception's subtransaction is destoryed by rollbackiing to outside savepoint +CREATE OR REPLACE PROCEDURE SavepointTest0 IS + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(2, 2); + INSERT INTO pl_txn_t VALUES(3, 3); + SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(4, 4); + RAISE exc_1; + EXCEPTION + WHEN exc_1 THEN + ROLLBACK TO SAVEPOINT save_out; + RAISE NOTICE 'wrong 1'; + WHEN OTHERS THEN + RAISE NOTICE 'wrong 2'; + ROLLBACK TO SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(5, 5); + END; + / +CREATE OR REPLACE PROCEDURE SavepointTest IS + exc_1 EXCEPTION; + BEGIN + SavepointTest0(); + INSERT INTO pl_txn_t VALUES(1, 1); + RAISE exc_1; + EXCEPTION + WHEN exc_1 THEN + INSERT INTO pl_txn_t VALUES(4, 4); + WHEN OTHERS THEN + INSERT INTO pl_txn_t VALUES(6, 6); + END; + / +BEGIN; +SAVEPOINT save_out; +SELECT SavepointTest(); +SELECT SavepointTest(); +END; + +DROP PROCEDURE SavepointTest0; +DROP PROCEDURE SavepointTest; + +-- switch to stmt top portal memory context +CREATE OR REPLACE PROCEDURE SavepointTest IS + val VARCHAR(10) := '0'; + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT save_a; + val := val || '1'; + ROLLBACK TO SAVEPOINT save_a; + val := val || '2'; + END; + / +SELECT SavepointTest(); +SELECT SavepointTest(); +DROP PROCEDURE SavepointTest; + +-- don't support execute immedidate savepoint +CREATE OR REPLACE PROCEDURE SavepointTest IS + BEGIN + INSERT INTO pl_txn_t VALUES(1, 1); + SAVEPOINT SAVE_A; + + INSERT INTO pl_txn_t VALUES(2, 2); + execute immediate 'rollback to ' || 'save_a'; + + INSERT INTO pl_txn_t VALUES(2, 2); + ROLLBACK TO SAVEPOINT SAVE_A; + END; + / +select SavepointTest(); +DROP PROCEDURE SavepointTest; + +-- wrong during execut stage +CREATE OR REPLACE PROCEDURE sp_inner1 IS + BEGIN + SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(2,2); + INSERT INTO pl_txn_t VALUES(2,2); --wrong + END; + / +CREATE OR REPLACE PROCEDURE sp_test is + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + sp_inner1(); + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CREATE UNIQUE INDEX idx_unique_tc1_tc2 ON pl_txn_t(tc1, tc2); +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; +DROP INDEX idx_unique_tc1_tc2; + +-- wrong during plan stage +CREATE OR REPLACE PROCEDURE sp_inner1 IS + BEGIN + SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(2,2); + INSERT INTO pl_txn_t VALUES(2,2,2); --wrong + END; + / +CREATE OR REPLACE PROCEDURE sp_test is + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + sp_inner1(); + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; + +-- wrong during pl others +CREATE OR REPLACE PROCEDURE sp_inner1 IS + exc_1 EXCEPTION; + BEGIN + SAVEPOINT save_a; + INSERT INTO pl_txn_t VALUES(2,2); + RAISE exc_1; + END; + / +CREATE OR REPLACE PROCEDURE sp_test is + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + sp_inner1(); + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; +DROP PROCEDURE sp_test; +DROP PROCEDURE sp_inner1; + +-- don't rollback exception's subtxn +CREATE OR REPLACE PROCEDURE sp_test is + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + RAISE exc_1; + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; + +DROP PROCEDURE sp_test; + +-- release savepoint +CREATE OR REPLACE PROCEDURE sp_test is + exc_1 EXCEPTION; + BEGIN + SAVEPOINT s1; + INSERT INTO pl_txn_t VALUES(1,1); + RELEASE s1; + INSERT INTO pl_txn_t VALUES(2,2); + ROLLBACK TO s1; + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; + +-- rollback to savepoint before released one +CREATE OR REPLACE PROCEDURE sp_test is + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(0,0); + SAVEPOINT s1; + INSERT INTO pl_txn_t VALUES(1,1); + SAVEPOINT s2; + INSERT INTO pl_txn_t VALUES(2,2); + RELEASE s2; + INSERT INTO pl_txn_t VALUES(3,3); + ROLLBACK TO s1; + INSERT INTO pl_txn_t VALUES(4,4); + RAISE exc_1; + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; +DROP PROCEDURE sp_test; + +-- wrong during plan stage without savepoint +CREATE OR REPLACE PROCEDURE sp_test is + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + -- cast wrong with hold some resource + INSERT INTO pl_txn_t VALUES(1,1,1); --wrong execute + UPDATE pl_txn_t SET tc2 = 'null'::numeric; --wrong no execute + INSERT INTO pl_txn_t VALUES(2,2); -- no execute + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; + +-- wrong during execute stage without savepoint +CREATE OR REPLACE PROCEDURE sp_test is + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + -- cast wrong with hold some resource + UPDATE pl_txn_t SET tc2 = 0 WHERE tc1 / (tc2 - 1) = 1; + SELECT COUNT(1) FROM pl_txn_t WHERE tc1 / (tc2 - 1) = 1; + INSERT INTO pl_txn_t VALUES(2,2); -- no execute + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +CREATE OR REPLACE PROCEDURE sp_test1 is + exc_1 EXCEPTION; + BEGIN + INSERT INTO pl_txn_t VALUES(1,1); + -- cast wrong with hold some resource + SELECT COUNT(1) FROM pl_txn_t WHERE tc1 / (tc2 - 1) = 1; + INSERT INTO pl_txn_t VALUES(2,2); -- no execute + EXCEPTION + WHEN OTHERS THEN + RAISE INFO 'wrong1'; + END; + / +SET behavior_compat_options = 'plstmt_implicit_savepoint'; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; +TRUNCATE TABLE pl_txn_t; +CALL sp_test1(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; +SET behavior_compat_options = ''; +TRUNCATE TABLE pl_txn_t; +CALL sp_test(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; +TRUNCATE TABLE pl_txn_t; +CALL sp_test1(); +SELECT * FROM pl_txn_t ORDER BY 1, 2; +DROP PROCEDURE sp_test; +DROP PROCEDURE sp_test1; + +DROP TABLE pl_txn_t; + diff --git a/src/test/regress/sql/plpgsql_sql_with_proc_keyword.sql b/src/test/regress/sql/plpgsql_sql_with_proc_keyword.sql new file mode 100644 index 000000000..85e3bf0ef --- /dev/null +++ b/src/test/regress/sql/plpgsql_sql_with_proc_keyword.sql @@ -0,0 +1,199 @@ +drop schema if exists plpgsql_table; +create schema plpgsql_table; +set current_schema = plpgsql_table; + +create table test1(a varchar2(10),b varchar2(10)); + +create or replace package keyword_pkg is +cursor c1 is select * from test1 where a=(case when b='1' then 1 else 0 end); +procedure p1(); +end keyword_pkg; +/ + +create or replace package body keyword_pkg is +procedure p1() as +declare +rd record; +begin +open c1; +fetch c1 into rd; +end; +end keyword_pkg; +/ + +call keyword_pkg.p1(); + +drop table test1; +drop package keyword_pkg; + +create or replace package emp_bonus is +var1 int:=1;--公有变量 +var2 int:=2; +procedure testpro1(var3 int);--公有存储过程,可以被外部调用 +------------------------------------------------- + +-----package用例测试 + +-----create package specification语法格式 + +--create [ or replace ] package [ schema ] package_name + +--[ invoker_rights_clause ] { is | as } item_list_1 end package_name; + +--package specification(包规格)声明了包内的公有变量、函数、异常等,可以 + +--被外部函数或者存储过程调用。在package specification中只能声明存储过 + +--程,函数,不能定义存储过程或者函数。 + +--package只支持集中式,无法在分布式中使用。 + +--•在package specification中声明过的函数或者存储过程,必须在package body中找到定义。 + +--•在实例化中,无法调用带有commit/rollback的存储过程。 + +--•不能在trigger中调用package函数。 + +--•不能在外部sql中直接使用package当中的变量。 + +--•不允许在package外部调用package的私有变量和存储过程。 + +--•不支持其它存储过程不支持的用法,例如,在function中不允许调用commit/rollback,则package的function中同样无法调用commit/rollback。 + +--•不支持schema与package同名。 + +--•只支持a风格的存储过程和函数定义。 + +--•不支持package内有同名变量,包括包内同名参数。 + +--•package的全局变量为session级,不同session之间package的变量不共享。 + +--•package中调用自治事务的函数,不允许使用公有变量,以及递归的使用公有变量的函数。 + +--•package中不支持声明ref cursor类型。 + +--•create package specification语法格式create [ or replace ] package [ schema ] package_name + +-- [ invoker_rights_clause ] { is | as } item_list_1 end package_name; + +-- + +--invoker_rights_clause可以被声明为authid definer或者authid invoker,分别为定义者权限和调用者权限。 + +--item_list_1可以为声明的变量或者存储过程以及函数。 + +-- + +--package specification(包规格)声明了包内的公有变量、函数、异常等,可以被外部函数或者存储过程调用。在package specification中只能声明存储过程,函数,不能定义存储过程或者函数。 + +-- + +--•create package body语法格式。create [ or replace ] package body [ schema ] package_name + +-- { is | as } declare_section [ initialize_section ] end package_name; + +-- + +--package body(包体内)定义了包的私有变量,函数等。如果变量或者函数没有在package specification中声明过,那么这个变量或者函数则为私有变量或者函数。 + +-- + +--package body也可以声明实例化部分,用来初始化package,详见示例。 + +------------------------------------------------- +end emp_bonus; +/ + +create or replace package body emp_bonus is +var3 int:=3; +var4 int:=4; +procedure testpro1(var3 int) +is +begin +create table if not exists test1(col1 int); +insert into test1 values(var1); +insert into test1 values(var3); +------------------------------------------------- + +-----package用例测试 + +-----create package specification语法格式 + +--create [ or replace ] package [ schema ] package_name + +--[ invoker_rights_clause ] { is | as } item_list_1 end package_name; + +--package specification(包规格)声明了包内的公有变量、函数、异常等,可以 + +--被外部函数或者存储过程调用。在package specification中只能声明存储过 + +--程,函数,不能定义存储过程或者函数。 + +--package只支持集中式,无法在分布式中使用。 + +--•在package specification中声明过的函数或者存储过程,必须在package body中找到定义。 + +--•在实例化中,无法调用带有commit/rollback的存储过程。 + +--•不能在trigger中调用package函数。 + +--•不能在外部sql中直接使用package当中的变量。 + +--•不允许在package外部调用package的私有变量和存储过程。 + +--•不支持其它存储过程不支持的用法,例如,在function中不允许调用commit/rollback,则package的function中同样无法调用commit/rollback。 + +--•不支持schema与package同名。 + +--•只支持a风格的存储过程和函数定义。 + +--•不支持package内有同名变量,包括包内同名参数。 + +--•package的全局变量为session级,不同session之间package的变量不共享。 + +--•package中调用自治事务的函数,不允许使用公有变量,以及递归的使用公有变量的函数。 + +--•package中不支持声明ref cursor类型。 + +--•create package specification语法格式create [ or replace ] package [ schema ] package_name + +-- [ invoker_rights_clause ] { is | as } item_list_1 end package_name; + +-- + +--invoker_rights_clause可以被声明为authid definer或者authid invoker,分别为定义者权限和调用者权限。 + +--item_list_1可以为声明的变量或者存储过程以及函数。 + +-- + +--package specification(包规格)声明了包内的公有变量、函数、异常等,可以被外部函数或者存储过程调用。在package specification中只能声明存储过程,函数,不能定义存储过程或者函数。 + +-- + +--•create package body语法格式。create [ or replace ] package body [ schema ] package_name + +-- { is | as } declare_section [ initialize_section ] end package_name; + +-- + +--package body(包体内)定义了包的私有变量,函数等。如果变量或者函数没有在package specification中声明过,那么这个变量或者函数则为私有变量或者函数。 + +-- + +--package body也可以声明实例化部分,用来初始化package,详见示例。 + +------------------------------------------------- +end; +begin --实例化开始 +var4:=9; +testpro1(var4); +end emp_bonus; +/ + +drop package if exists emp_bonus; + + +drop schema plpgsql_table; + + diff --git a/src/test/regress/sql/plpgsql_table_opengauss.sql b/src/test/regress/sql/plpgsql_table_opengauss.sql new file mode 100644 index 000000000..e7846a916 --- /dev/null +++ b/src/test/regress/sql/plpgsql_table_opengauss.sql @@ -0,0 +1,766 @@ +-- test create type table of +-- check compatibility -- +-- create new schema -- +drop schema if exists plpgsql_table_opengauss; +create schema plpgsql_table_opengauss; +set current_schema = plpgsql_table_opengauss; + +--test inout param +CREATE TABLE INT8_TBL(q1 int8, q2 int8); +create view tt17v as select * from int8_tbl i where i in (values(i)); +select * from tt17v order by 1,2; + +create type s_type as ( + id integer, + name varchar, + addr text +); + +create type typeA as table of s_type; +create type typeB as table of s_type.id%type; +create type typeC as table of s_type.name%type; +create type typeD as table of varchar(100); + +-- test table of nest table of error +create type typeF as table of typeD; +-- don't support alter attr +alter type typeA ADD ATTRIBUTE a int; + +-- test type nest table of +create type type1 as table of varchar(10); +create type type2 as (c1 type1); +declare + a type2; + begin + a.c1(1) = ('aaa'); + a.c1(2) = ('bbb'); + RAISE INFO 'a.c1: %' ,a.c1; +end; +/ + +CREATE TYPE type3 as (a varchar2(1000),b varchar2(1000)); +CREATE TYPE type4 AS TABLE OF type3; +CREATE TYPE type5 as (c1 varchar2(1000),c2 varchar2(1000), c3 type4); +declare + a5 type5; + begin + a5.c1 = 'aaa'; + a5.c3(1) = ('1','2'); + a5.c3(2) = ('11','21'); + RAISE INFO 'a.c1: %' ,a5.c3[1]; +end; +/ + +-- test record nest table of +create table tycod01(c1 int[],c2 int); +insert into tycod01 values(array[1],1); +create type tycod02 as(c1 int,c2 tycod01%rowtype); + +create table tycod03(c1 int[],c2 tycod02,c3 tycod01); +insert into tycod03 values (array[3],(3,(array[3],3)),(array[3],3)); + +create type tycode23 is table of tycod03.c3%type; + +create or replace procedure recordnes23() +is +type tycode01 is table of varchar(20) index by varchar(20); +type tycode02 is record (c1 tycode01,c2 int,c3 tycode23); +tycode001 tycode02; +begin +tycode001.c1('aa'):=('22','33','44'); +tycode001.c1('bb'):=array['2222']; +tycode001.c2:=2222; +tycode001.c3(1):=(array[1],3); +raise info 'tycode001.c1 is %,tycode001.c2 is %,tycode001.c3 is %', tycode001.c1,tycode001.c2,tycode001.c3; +end; +/ + +call recordnes23(); + +-- test in paramter +create or replace procedure tableof_1(a typeA) +is + +begin + RAISE INFO 'a(1): %' ,a(1); + a(1) = (2, 'lisi', 'beijing'); + a(2) = (3, 'zahngwu', 'chengdu'); +end; +/ + +create or replace procedure tableof_2() +is + a typeA; +begin + a(1) = (1, 'zhangsan', 'shanghai'); + RAISE INFO 'before call a(1): %' ,a(1); + perform tableof_1(a); + RAISE INFO 'after call a(2): %' ,a(2); +end; +/ + +call tableof_2(); + +-- don't support create type = () +create or replace procedure tableof_3 + is + aa typeA = typeA(); + begin + RAISE INFO '%' ,aa; +end; +/ + +call tableof_3(); + +-- test return +create or replace function tableof_4() + return typeA as + a typeA; + begin + a(1) = (1, 'lisi', 'beijing'); + return a; +end; +/ + +select tableof_4(); + +create or replace function tableof_4() + return typeA as + a typeA; + begin + a(1) = (1, 'lisi', 'beijing'); + return a; +end; +/ + +select tableof_4(); + +create or replace function tableof_5() + return typeA as + a typeA; + b typeA; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ + +select tableof_5(); + +-- test cast +create or replace function tableof_6() + return typeC as + a typeA; + b typeC; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ + +select tableof_6(); + +--test return wrong type +create or replace function tableof_7() + return typeB as + a typeA; + b typeC; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ + +select tableof_7(); + +-- add one column from s_type +create type s_type_extend as ( + id integer, + name varchar, + addr text, + comment varchar +); + +create type typeA_ext as table of s_type_extend; + +create or replace function tableof_8() + return typeA_ext as + a typeA; + b typeA_ext; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu','good'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ + +select tableof_8(); + +-- test return index +create or replace function tableof_9() + return typeA as + a typeA; + begin + a(-1) = (1, 'lisi', 'beijing'); + a(2) = (2, 'zahngwu', 'chengdu'); + return a; +end; +/ + +select tableof_9(); + +create or replace procedure tableof_10() + as + a typeA; + begin + a = tableof_9(); + RAISE INFO 'a(-1):%' ,a(-1); + RAISE INFO 'a(0):%' ,a(0); + RAISE INFO 'a(2):%' ,a(2).id; +end; +/ + +call tableof_10(); + +create or replace procedure tableof_11() + as + a typeA; + begin + a = tableof_9(); + RAISE INFO 'a(-1):%' ,a(-1); +end; +/ + +call tableof_11(); + +-- test index by +create or replace procedure tableof_12 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by BINARY_INTEGER; + aa SalTabTyp; + begin + aa('aa') = 1; + aa('bb') = 2; + RAISE INFO '%' ,aa('aa'); + RAISE INFO '%' ,aa('bb'); +end; +/ + +call tableof_12(); + +create or replace procedure tableof_13 + is + TYPE SalTabTyp is TABLE OF integer index by varchar(10); + aa SalTabTyp; + begin + aa('aa') = 1; + aa('bb') = 2; + RAISE INFO '%' ,aa(0); + RAISE INFO '%' ,aa('bb'); +end; +/ + +call tableof_13(); + +create or replace procedure tableof_14 + is + TYPE SalTabTyp is TABLE OF integer index by varchar(10); + aa SalTabTyp; + b varchar(10); + begin + aa('a') = 1; + b = 'aa'; + aa(b) = 2; + RAISE INFO '%' ,aa('a'); + RAISE INFO '%' ,aa('aa'); + RAISE INFO '%' ,aa(b); +end; +/ + +call tableof_14(); + +create or replace procedure tableof_15 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by date; + aa SalTabTyp; + begin + +end; +/ + +create or replace procedure tableof_15 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by text; + aa SalTabTyp; + begin + +end; +/ + +-- test table = table +create or replace procedure tableof_16 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by BINARY_INTEGER; + aa SalTabTyp; + bb SalTabTyp; + begin + aa(-1) = 'b'; + aa(1) = 'a'; + RAISE INFO '%' ,aa(-1); + bb = aa; + RAISE INFO '%' ,bb(-1); + bb(8) = 'g'; + RAISE INFO '%' ,bb(8); + RAISE INFO '%' ,aa(8); + end; +/ + +call tableof_16(); + +-- test define +create or replace procedure tableof_17 + is + TYPE SalTabTyp is TABLE OF s_type%rowtype index by varchar(10); + aa SalTabTyp; + begin + aa('a') = (1, 'zhangsan', 'shanghai'); + aa('b') = (2, 'lisi', 'beijing'); + RAISE INFO '%' ,aa('a').id; + RAISE INFO '%' ,aa('b'); +end; +/ +call tableof_17(); + +create or replace procedure tableof_18 + is + TYPE SalTabTyp is TABLE OF s_type.id%type index by varchar(10); + aa SalTabTyp; + begin + aa('a') = 1; + aa('b') = 2; + RAISE INFO '%' ,aa('a'); + RAISE INFO '%' ,aa('b'); +end; +/ + +call tableof_18(); + + +-- test not null gram +create or replace procedure tableof_19 + is + TYPE SalTabTyp is TABLE OF s_type%rowtype not null index by varchar(10); + aa SalTabTyp; + begin + aa('a') = (1, 'zhangsan', 'shanghai'); + RAISE INFO '%' ,aa('a'); +end; +/ + +call tableof_19(); + +-- test assign one attr +create or replace procedure tableof_20 + is + TYPE SalTabTyp is TABLE OF s_type%rowtype not null index by varchar(10); + aa SalTabTyp; + begin + aa('a') = (1, 'zhangsan', 'shanghai'); + aa('a').id = 1; +end; +/ + +call tableof_20(); + +create type info as (name varchar2(50), age int, address varchar2(20), salary float(2)); +create type customer as (id number(10), c_info info); +create table customers (id number(10), c_info info); + +insert into customers (id, c_info) values (1, ('Vera' ,32, 'Paris', 22999.00)); +insert into customers (id, c_info) values (2, ('Zera' ,25, 'London', 5999.00)); +insert into customers (id, c_info) values (3, ('Alice' ,22, 'Bangkok', 9800.98)); +insert into customers (id, c_info) values (4, ('Jim' ,26, 'Dubai', 18700.00)); +insert into customers (id, c_info) values (5, ('Kevin' ,28, 'Singapore', 18999.00)); +insert into customers (id, c_info) values (6, ('Gauss' ,42, 'Beijing', 32999.00)); +-- test curosor fetch into +create or replace procedure tableof_21 +as +declare + TYPE id_1 is TABLE OF customer.id%type index by varchar(10); + TYPE c_info_1 is TABLE OF customers.c_info%type index by varchar(10); + CURSOR C1 IS SELECT id FROM customers order by id; + CURSOR C2 IS SELECT c_info FROM customers order by id; + info_a c_info_1:=c_info_1(); + id_a id_1:=id_1(); +begin + OPEN C1; + OPEN C2; + FETCH C1 into id_a(2); + FETCH C2 into info_a(2); + FETCH C1 into id_a(3); + FETCH C2 into info_a(3); + CLOSE C1; + CLOSE C2; + RAISE INFO '%', id_a; + RAISE INFO '%', info_a; +end; +/ + +call tableof_21(); + +-- test select into +create or replace procedure tableof_22 +as +declare + TYPE id_1 is TABLE OF customer.id%type index by varchar(10); + TYPE c_info_1 is TABLE OF customers.c_info%type index by varchar(10); + info_a c_info_1:=c_info_1(); + id_a id_1:=id_1(); +begin + select id into id_a(2) from customers where id = 3; + select c_info into info_a(2) from customers where id = 3; + select id into id_a(3) from customers where id = 4; + select c_info into info_a(3) from customers where id = 4; + RAISE INFO '%', id_a(2); + RAISE INFO '%', info_a(3).age; +end; +/ + +call tableof_22(); + +-- test curosor for +create or replace procedure tableof_23 +as +declare + type c_list is TABLE of customer; + customer_table c_list:=c_list(); + CURSOR C1 IS SELECT * FROM customers order by id; + counter int := 0; +begin + for n in C1 loop + counter := counter + 1; + customer_table(counter) := n; + end loop; + RAISE INFO '%', customer_table(3); +end; +/ + +call tableof_23(); + +create or replace procedure tableof_24 +as +declare + type c_list is TABLE of customers%rowtype; + customer_table c_list:=c_list(); + CURSOR C1 IS SELECT * FROM customers order by id; + counter int := 0; +begin + for n in C1 loop + counter := counter + 1; + customer_table(counter) := n; + end loop; + RAISE INFO '%', customer_table(4); +end; +/ + +call tableof_24(); + +-- test row type +create type typeE as table of s_type%rowtype; +create type typeE as table of customers%rowtype; + +create or replace procedure tableof_25 +as +declare + customer_table typeE; + CURSOR C1 IS SELECT * FROM customers order by id; + counter int := 0; +begin + for n in C1 loop + counter := counter + 1; + customer_table(counter) := n; + end loop; + RAISE INFO '%', customer_table(4); +end; +/ + +call tableof_25(); + +-- test insert +create or replace procedure tableof_26 +as +declare + type c_list is TABLE of customers%rowtype; + customer_table c_list:=c_list(); +begin + customer_table(1) := (7, ('Vera' ,32, 'Paris', 22999.00)); + customer_table(2) := (8, ('Vera' ,32, 'Paris', 22999.00)); + insert into customers values (customer_table(1).id, customer_table(1).c_info); + insert into customers values (customer_table(2).id, customer_table(2).c_info); +end; +/ + +call tableof_26(); +select * from customers where id = 7; + +-- expect error table[] +create or replace procedure tableof_27 +as +declare + type c_list is TABLE of customers%rowtype; + customer_table c_list:=c_list(); +begin + customer_table(1) := (7, ('Vera' ,32, 'Paris', 22999.00)); + insert into customers values (customer_table[1].id, customer_table[1].c_info); +end; +/ + +-- test deault +declare + type students is table of varchar2(10); + type grades is table of integer; + marks grades := grades(98, 97, 74 + 4, (87), 92, 100); -- batch initialize -- + names students default students('none'); -- default -- + total integer; +begin + names := students(); -- should append NULL then do the coerce -- + names := students('Vera ', 'Zera ', 'Alice', 'Jim ', 'Kevin', to_char('G') || 'auss'); -- batch insert -- + total := names.count; + RAISE INFO 'Total % Students', total; + for i in 1 .. total loop + RAISE INFO 'Student: % Marks: %', names(i), marks(i); + end loop; +end; +/ + +create type mytype as ( + id integer, + biome varchar2(100) +); + +create type mytype2 as ( + id integer, + locale myType +); +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + RAISE INFO 'locale id is: %', aa(1).id; + RAISE INFO 'biome 1.3 is: %', aa(2).locale.biome; +end; +/ + +-- test of uneven brackets -- +-- error out -- +declare + type students is table of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + RAISE INFO 'Student: %', names(i]; + end loop; +end; +/ + +-- Using composite type defined outside of precedure block -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, (1, 'ground')), + mytype2(1, (2, 'air')) + ); +begin + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + RAISE INFO 'locale id is: %', aa(1).id; + RAISE INFO 'biome 1.3 is: %', aa(2).locale.biome; +end; +/ + +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := mytype2(2, mytype(3, 'water')); + RAISE INFO 'locale id is: %', aa(1).id; + RAISE INFO 'biome 1.3 is: %', aa(2).locale.biome; +end; +/ + +create type functype as ( + id integer, + locale myType +); + +create or replace function functype(habitat in mytype2) +return mytype2 +is + ret mytype2; +begin + ret := (-1, (1, 'unknown realm')); + return ret; +end; +/ + +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + functype(1, mytype(1, 'ground')), -- we are prioritizing types here -- + functype(1, mytype(2, 'air')) + ); +begin + RAISE INFO 'locale id is: %', aa(1).id; + RAISE INFO 'biome 1.2 is: %', aa(2).locale.biome; -- air -- +end; +/ +-- abandon type functype +drop type functype; + +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- here we have to use function functype -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + RAISE INFO 'locale ?? is: %', aa(1).id; + RAISE INFO 'biome ??? is: %', aa(2).locale.biome; -- weird places -- +end; +/ + +drop function functype; +-- error +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- not sure -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + RAISE INFO 'This message worth 300 tons of gold (once printed).'; +end; +/ + +-- test table of array +declare + type arrayfirst is table(10) of int[]; + arr arrayfirst := arrayfirst(); +begin + +end; +/ + +create type typeG as (a int[]); +declare + type arrayfirst is table of typeG; + arr arrayfirst := arrayfirst(); +begin + arr(1) = row(ARRAY[1, 2, 3]); + RAISE INFO '%', arr(1).a[1]; +end; +/ + +-- test unreserved key word +declare + index int; +begin + index = 1; +end; +/ + +create or replace package pck1 as + type t1 is record(c1 int,c2 varchar2); + type t2 is table of int; + type t3 is varray(10) of int; + v1 t1; + v2 t2; + v3 t3; + v_c1 int; + v_c2 varchar2; +end pck1; +/ + +create or replace package body pck1 as + type t5 is record(c1 int,c2 varchar2); + type t6 is table of int; + type t7 is varray(10) of int; + v5 t5; + v6 t6; + v7 t7; +end pck1; +/ + +create or replace function func2() return int as +begin + pck1.v2 :=pck1.t2(); + pck1.v2.extend(3); + pck1.v2(0) := 1; + pck1.v2(1) := 2; + plpgsql_table_opengauss.pck1.v2(2) := 3; + raise info 'pck1.v2(0) is %',pck1.v2(0); + raise info 'pck1.v2(1) is %',pck1.v2(1); + raise info 'plpgsql_table_opengauss.pck1.v2(2) is %',plpgsql_table_opengauss.pck1.v2(2); + return 0; +end; +/ +call func2(); + +drop type typeA; +drop type typeB; +drop type s_type cascade; +drop type typeC; +drop type typeE; +drop type typeG; +drop type s_type_extend; +drop type typeA_ext; +drop type info; +drop type customer; +drop type mytype; +drop type mytype2; +drop procedure tableof_1; +drop procedure tableof_2; +drop procedure tableof_3; +drop function tableof_6; +drop function tableof_7; +drop function tableof_8; +drop procedure tableof_10; +drop procedure tableof_11; +drop procedure tableof_12; +drop procedure tableof_13; +drop procedure tableof_14; +drop procedure tableof_16; +drop procedure tableof_17; +drop procedure tableof_18; +drop procedure tableof_19; +drop procedure tableof_21; +drop procedure tableof_22; +drop procedure tableof_23; +drop procedure tableof_24; +drop procedure tableof_25; +drop procedure tableof_26; +drop procedure tableof_27; +drop table customers; +drop schema if exists plpgsql_table_opengauss cascade; diff --git a/src/test/regress/sql/plpgsql_tableof.sql b/src/test/regress/sql/plpgsql_tableof.sql new file mode 100644 index 000000000..719ddccc1 --- /dev/null +++ b/src/test/regress/sql/plpgsql_tableof.sql @@ -0,0 +1,1629 @@ +-- test create type table of +-- check compatibility -- +show sql_compatibility; -- expect A -- +-- create new schema -- +drop schema if exists plpgsql_table; +create schema plpgsql_table; +set current_schema = plpgsql_table; +set behavior_compat_options='allow_procedure_compile_check'; + +create type s_type as ( + id integer, + name varchar, + addr text +); + +create type typeA as table of s_type; +create type typeB as table of s_type.id%type; +create type typeC as table of s_type.name%type; +create type typeD as table of varchar(100); + +-- test alter +alter type typeA drop ATTRIBUTE s_type; +alter type typeA ADD ATTRIBUTE a int; +create type typeC2 as table of s_type.name%type; +alter type typeC2 RENAME TO typeC3; + +create or replace procedure tableof_alter() +is +a typeC3; +begin + a(1) = (1, 'zhangsan', 'shanghai'); + RAISE INFO 'call a(1): %' ,a(1); +end; +/ + +call tableof_alter(); + +-- test is +create type s_type_1 is ( + id integer, + name varchar, + addr text +); + +create type typeA1 is table of s_type_1; +create type typeB1 is table of s_type_1.id%type; +create type typeC1 is table of s_type_1.name%type; +create type typeD1 is table of varchar(100); + +create or replace function tableof_5_1() + return typeA1 as + a typeA1; + b typeA1; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ + +select tableof_5_1(); +drop procedure tableof_5_1; + +-- test as +create or replace procedure tableof_12_1 + is + TYPE SalTabTyp as TABLE OF varchar(10) index by BINARY_INTEGER; + aa SalTabTyp; + begin + aa(0) = 1; + aa(1) = 2; + RAISE INFO '%' ,aa(0); + RAISE INFO '%' ,aa(1); +end; +/ + +call tableof_12_1(); + +create or replace procedure tableof_12_2 + is + TYPE SalTabTyp as TABLE OF varchar(10) index by BINARY_INTEGER; + aa SalTabTyp; + begin + aa(-1) = 1; + aa(2) = 2; + RAISE INFO '%' ,aa(0); + RAISE INFO '%' ,aa(1); + RAISE INFO '%', aa.count; + RAISE INFO '%', aa; +end; +/ + +call tableof_12_2(); + +drop procedure tableof_12_1; +drop procedure tableof_12_2; +drop type s_type_1; +drop type typeA1; +drop type typeB1; +drop type typeC1; +drop type typeD1; + +-- test table of nest table of error +create type typeF as table of typeD; +-- don't support alter attr +alter type typeA ADD ATTRIBUTE a int; + +-- test in paramter +create or replace procedure tableof_1(a typeA) +is + +begin + RAISE INFO 'a(1): %' ,a(1); + a(1) = (2, 'lisi', 'beijing'); + a(2) = (3, 'zahngwu', 'chengdu'); +end; +/ + +create or replace procedure tableof_2() +is + a typeA; +begin + a(1) = (1, 'zhangsan', 'shanghai'); + RAISE INFO 'before call a(1): %' ,a(1); + perform tableof_1(a); + RAISE INFO 'after call a(2): %' ,a(2); +end; +/ + +call tableof_2(); + +-- don't support create type = () +create or replace procedure tableof_3 + is + aa typeA = typeA(); + begin + RAISE INFO '%' ,aa; +end; +/ + +call tableof_3(); + +-- test return +create or replace function tableof_4() + return typeA as + a typeA; + begin + a(1) = (1, 'lisi', 'beijing'); + return a; +end; +/ + +select tableof_4(); + +create or replace function tableof_4() + return typeA as + a typeA; + begin + a(1) = (1, 'lisi', 'beijing'); + return a; +end; +/ + +select tableof_4(); + +create or replace function tableof_5() + return typeA as + a typeA; + b typeA; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ + +select tableof_5(); + +-- test cast +create or replace function tableof_6() + return typeC as + a typeA; + b typeC; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ + +select tableof_6(); + +--test return wrong type +create or replace function tableof_7() + return typeB as + a typeA; + b typeC; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ + +select tableof_7(); + +-- add one column from s_type +create type s_type_extend as ( + id integer, + name varchar, + addr text, + comment varchar +); + +create type typeA_ext as table of s_type_extend; + +create or replace function tableof_8() + return typeA_ext as + a typeA; + b typeA_ext; + begin + a(1) = (1, 'lisi', 'beijing'); + b = a; + b(2) = (2, 'zahngwu', 'chengdu','good'); + RAISE INFO 'a:%' ,a; + return b; +end; +/ + +select tableof_8(); + +-- test return index +create or replace function tableof_9() + return typeA as + a typeA; + begin + a(-1) = (1, 'lisi', 'beijing'); + a(2) = (2, 'zahngwu', 'chengdu'); + return a; +end; +/ + +select tableof_9(); + +create or replace procedure tableof_10() + as + a typeA; + begin + a = tableof_9(); + RAISE INFO 'a(-1):%' ,a(-1); + RAISE INFO 'a(0):%' ,a(0); + RAISE INFO 'a(2):%' ,a(2).id; +end; +/ + +call tableof_10(); + +create or replace procedure tableof_11() + as + a typeA; + begin + a = tableof_9(); + RAISE INFO 'a(-1):%' ,a(-1); +end; +/ + +call tableof_11(); + +-- test index by +create or replace procedure tableof_12 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by BINARY_INTEGER; + aa SalTabTyp; + begin + aa('aa') = 1; + aa('bb') = 2; + RAISE INFO '%' ,aa('aa'); + RAISE INFO '%' ,aa('bb'); +end; +/ + +call tableof_12(); + +create or replace procedure tableof_13 + is + TYPE SalTabTyp is TABLE OF integer index by varchar(10); + aa SalTabTyp; + begin + aa('aa') = 1; + aa('bb') = 2; + RAISE INFO '%' ,aa(0); + RAISE INFO '%' ,aa('bb'); +end; +/ + +call tableof_13(); + +create or replace procedure tableof_14 + is + TYPE SalTabTyp is TABLE OF integer index by varchar(10); + aa SalTabTyp; + b varchar(10); + begin + aa('a') = 1; + b = 'aa'; + aa(b) = 2; + RAISE INFO '%' ,aa('a'); + RAISE INFO '%' ,aa('aa'); + RAISE INFO '%' ,aa(b); +end; +/ + +call tableof_14(); + +create or replace procedure tableof_15 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by date; + aa SalTabTyp; + begin + +end; +/ + +create or replace procedure tableof_15 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by text; + aa SalTabTyp; + begin + +end; +/ + +-- test table = table +create or replace procedure tableof_16 + is + TYPE SalTabTyp is TABLE OF varchar(10) index by BINARY_INTEGER; + aa SalTabTyp; + bb SalTabTyp; + begin + aa(-1) = 'b'; + aa(1) = 'a'; + RAISE INFO '%' ,aa(-1); + bb = aa; + RAISE INFO '%' ,bb(-1); + bb(8) = 'g'; + RAISE INFO '%' ,bb(8); + RAISE INFO '%' ,aa(8); + end; +/ + +call tableof_16(); + +-- test define +create or replace procedure tableof_17 + is + TYPE SalTabTyp is TABLE OF s_type%rowtype index by varchar(10); + aa SalTabTyp; + begin + aa('a') = (1, 'zhangsan', 'shanghai'); + aa('b') = (2, 'lisi', 'beijing'); + RAISE INFO '%' ,aa('a').id; + RAISE INFO '%' ,aa('b'); +end; +/ +call tableof_17(); + +create or replace procedure tableof_18 + is + declare + TYPE SalTabTyp is TABLE OF s_type.id%type index by varchar(10); + aa SalTabTyp; + begin + aa('a') = 1; + aa('b') = 2; + aa = NULL; + RAISE INFO '%' ,aa('a'); + RAISE INFO '%' ,aa('b'); + aa('a') = 1; + aa('C') = 2; + aa('b') = 3; + RAISE INFO '%' ,aa; +end; +/ + +call tableof_18(); + + +-- test not null gram +create or replace procedure tableof_19 + is + TYPE SalTabTyp is TABLE OF s_type%rowtype not null index by varchar(10); + aa SalTabTyp; + begin + aa('a') = (1, 'zhangsan', 'shanghai'); + RAISE INFO '%' ,aa('a'); +end; +/ + +call tableof_19(); + +-- test assign one attr +create or replace procedure tableof_20 + is + TYPE SalTabTyp is TABLE OF s_type%rowtype not null index by varchar(10); + aa SalTabTyp; + begin + aa('a') = (1, 'zhangsan', 'shanghai'); + aa('a').id = 1; +end; +/ + +call tableof_20(); + +create type info as (name varchar2(50), age int, address varchar2(20), salary float(2)); +create type customer as (id number(10), c_info info); +create table customers (id number(10), c_info info); + +insert into customers (id, c_info) values (1, ('Vera' ,32, 'Paris', 22999.00)); +insert into customers (id, c_info) values (2, ('Zera' ,25, 'London', 5999.00)); +insert into customers (id, c_info) values (3, ('Alice' ,22, 'Bangkok', 9800.98)); +insert into customers (id, c_info) values (4, ('Jim' ,26, 'Dubai', 18700.00)); +insert into customers (id, c_info) values (5, ('Kevin' ,28, 'Singapore', 18999.00)); +insert into customers (id, c_info) values (6, ('Gauss' ,42, 'Beijing', 32999.00)); +-- test curosor fetch into +create or replace procedure tableof_21 +as +declare + TYPE id_1 is TABLE OF customer.id%type index by varchar(10); + TYPE c_info_1 is TABLE OF customers.c_info%type index by varchar(10); + CURSOR C1 IS SELECT id FROM customers order by id; + CURSOR C2 IS SELECT c_info FROM customers order by id; + info_a c_info_1:=c_info_1(); + id_a id_1:=id_1(); +begin + OPEN C1; + OPEN C2; + FETCH C1 into id_a(2); + FETCH C2 into info_a(2); + FETCH C1 into id_a(3); + FETCH C2 into info_a(3); + CLOSE C1; + CLOSE C2; + RAISE INFO '%', id_a; + RAISE INFO '%', info_a; +end; +/ + +call tableof_21(); + +-- test select into +create or replace procedure tableof_22 +as +declare + TYPE id_1 is TABLE OF customer.id%type index by varchar(10); + TYPE c_info_1 is TABLE OF customers.c_info%type index by varchar(10); + info_a c_info_1:=c_info_1(); + id_a id_1:=id_1(); +begin + select id into id_a(2) from customers where id = 3; + select c_info into info_a(2) from customers where id = 3; + select id into id_a(3) from customers where id = 4; + select c_info into info_a(3) from customers where id = 4; + RAISE INFO '%', id_a(2); + RAISE INFO '%', info_a(3).age; +end; +/ + +call tableof_22(); + +-- test curosor for +create or replace procedure tableof_23 +as +declare + type c_list is TABLE of customer; + customer_table c_list:=c_list(); + CURSOR C1 IS SELECT * FROM customers order by id; + counter int := 0; +begin + for n in C1 loop + counter := counter + 1; + customer_table(counter) := n; + end loop; + RAISE INFO '%', customer_table(3); +end; +/ + +call tableof_23(); + +create or replace procedure tableof_24 +as +declare + type c_list is TABLE of customers%rowtype; + customer_table c_list:=c_list(); + CURSOR C1 IS SELECT * FROM customers order by id; + counter int := 0; +begin + for n in C1 loop + counter := counter + 1; + customer_table(counter) := n; + end loop; + RAISE INFO '%', customer_table(4); +end; +/ + +call tableof_24(); + +-- test row type +create type typeE as table of s_type%rowtype; +create type typeE as table of customers%rowtype; + +create or replace procedure tableof_25 +as +declare + customer_table typeE; + CURSOR C1 IS SELECT * FROM customers order by id; + counter int := 0; +begin + for n in C1 loop + counter := counter + 1; + customer_table(counter) := n; + end loop; + RAISE INFO '%', customer_table(4); +end; +/ + +call tableof_25(); + +-- test insert +create or replace procedure tableof_26 +as +declare + type c_list is TABLE of customers%rowtype; + customer_table c_list:=c_list(); +begin + customer_table(1) := (7, ('Vera' ,32, 'Paris', 22999.00)); + customer_table(2) := (8, ('Vera' ,32, 'Paris', 22999.00)); + insert into customers values (customer_table(1).id, customer_table(1).c_info); + insert into customers values (customer_table(2).id, customer_table(2).c_info); +end; +/ + +call tableof_26(); +select * from customers where id = 7; + +-- expect error table[] +create or replace procedure tableof_27 +as +declare + type c_list is TABLE of customers%rowtype; + customer_table c_list:=c_list(); +begin + customer_table(1) := (7, ('Vera' ,32, 'Paris', 22999.00)); + insert into customers values (customer_table[1].id, customer_table[1].c_info); +end; +/ + +-- test deault +declare + type students is table of varchar2(10); + type grades is table of integer; + marks grades := grades(98, 97, 74 + 4, (87), 92, 100); -- batch initialize -- + names students default students('none'); -- default -- + total integer; +begin + names := students(); -- should append NULL then do the coerce -- + names := students('Vera ', 'Zera ', 'Alice', 'Jim ', 'Kevin', to_char('G') || 'auss'); -- batch insert -- + total := names.count; + dbe_output.print_line('Total '|| total || ' Students'); + for i in 1 .. total loop + dbe_output.print_line('Student: ' || names(i) || ' Marks: ' || marks(i)); + end loop; +end; +/ + +create type mytype as ( + id integer, + biome varchar2(100) +); + +create type mytype2 as ( + id integer, + locale myType +); +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.3 is: ' || aa(2).locale.biome); -- ... water (not air) -- +end; +/ + +-- test of uneven brackets -- +-- error out -- +declare + type students is table of varchar2(10); + names students; +begin + names := students(1, 'Zera ', 'Alice', 'Jim ', 'Kevin'); -- should be able read all values correctly -- + for i in 1 .. 5 loop + dbe_output.print_line('Student: ' || names(i]); + end loop; +end; +/ + +-- Using composite type defined outside of precedure block -- +declare + type finaltype is varray(10) of mytype2; + aa finaltype := finaltype( + mytype2(1, (1, 'ground')), + mytype2(1, (2, 'air')) + ); +begin + aa(2) := (2, (3, 'water')); -- overwrite record (1, (2, 'air')) -- + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.3 is: ' || aa(2).locale.biome); -- ... water (not air) -- +end; +/ + +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + mytype2(1, mytype(1, 'ground')), + mytype2(1, mytype(2, 'air')) + ); +begin + aa.extend(10); + aa(2) := mytype2(2, mytype(3, 'water')); + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.3 is: ' || aa(2).locale.biome); -- ... water (not air) -- +end; +/ + +create type functype as ( + id integer, + locale myType +); + +create or replace function functype(habitat in mytype2) +return mytype2 +is + ret mytype2; +begin + ret := (-1, (1, 'unknown realm')); + return ret; +end; +/ + +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + functype(1, mytype(1, 'ground')), -- we are prioritizing types here -- + functype(1, mytype(2, 'air')) + ); +begin + dbe_output.print_line('locale id is: ' || aa(1).id); + dbe_output.print_line('biome 1.2 is: ' || aa(2).locale.biome); -- air -- +end; +/ +-- abandon type functype +drop type functype; + +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- here we have to use function functype -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + dbe_output.print_line('locale ?? is: ' || aa(1).id); + dbe_output.print_line('biome ??? is: ' || aa(2).locale.biome); -- weird places -- +end; +/ + +drop function functype; +-- error +declare + type finaltype is table of mytype2; + aa finaltype := finaltype( + functype((1, mytype(1, 'ground'))), -- not sure -- + functype((1, mytype(2, 'air'))) + ); +begin + aa.extend(10); + dbe_output.print_line('This message worth 300 tons of gold (once printed).'); +end; +/ + +-- test table of array +declare + type arrayfirst is table(10) of int[]; + arr arrayfirst := arrayfirst(); +begin + +end; +/ + +create type typeG as (a int[]); +declare + type arrayfirst is table of typeG; + arr arrayfirst := arrayfirst(); +begin + arr(1) = row(ARRAY[1, 2, 3]); + dbe_output.print_line(arr(1).a[1]); +end; +/ + +-- test unreserved key word +declare + index int; +begin + index = 1; +end; +/ + +-- test package +create or replace package aa +is +type students is table of int; +procedure kk(); +end aa; +/ + +create or replace package body aa +is +names students; +procedure kk +is +begin + names := students(1, 2, 3, 4, 5); -- should be able read all values correctly -- + for i in 1 .. 5 loop + raise info '%', names[i]; + end loop; +end; +end aa; +/ + +call aa.kk(); +drop package if exists aa; + +create or replace package pck2 is +procedure p1; +type r2 is table of int index by varchar(10); +va r2; +end pck2; +/ +create or replace package body pck2 is +procedure p1 as + +begin + +select 11 into va('a'); +select 111 into va('b'); +va('a') := 1111; + +raise info '%,', va; +end; +end pck2; +/ + +call pck2.p1(); +call pck2.p1(); +drop package pck2; + +reset current_schema; +show current_schema; +declare + type students is table of plpgsql_table.s_type; + a students; +begin + a(1) = (1, 'lisi', 'beijing'); +end; +/ +set current_schema = plpgsql_table; +-- test [:] +declare + TYPE SalTabTyp is TABLE OF integer index by varchar(10); +aa SalTabTyp; + begin +aa(1) = 1; +aa(2) = 2; +RAISE INFO '%' ,aa(1); +RAISE INFO '%' ,aa[1:2]; +end; +/ + +-- test [,] +declare + TYPE SalTabTyp is TABLE OF integer index by varchar(10); +aa SalTabTyp; + begin +aa(1) = 1; +aa(2) = 2; +RAISE INFO '%' ,aa(1); +RAISE INFO '%' ,aa[1,2]; +end; +/ + +-- test functions +declare + type b is table of int index by varchar; + a b; + c bool; +begin + a('a') = 1; + a('b') = 2; + c = a.exists('b'); + raise info '%', c; +end; +/ + +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by varchar(10); + aa SalTabTyp; + c int; + begin + aa('a') = 'abcde'; + aa('b') = 'fghij'; + c = aa.first; +end; +/ + +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by varchar(10); + aa SalTabTyp; + begin + aa('a') = 'abcde'; + aa('b') = 'fghij'; + aa.delete; +end; +/ + +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by varchar(10); + aa SalTabTyp; + begin + aa('a') = 'abcde'; + aa('b') = 'fghij'; + aa.trim; +end; +/ + +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by varchar(10); + aa SalTabTyp; + c varchar(10); + begin + aa('a') = 'abcde'; + aa('b') = 'fghij'; + c = aa.next(aa.first); +end; +/ + +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by varchar(10); + aa SalTabTyp; + c varchar(10); + begin + aa('a') = 'abcde'; + aa('b') = 'fghij'; + c = aa.prior('a'); +end; +/ + +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by varchar(10); + aa SalTabTyp; + c varchar(10); + begin + aa('a') = 'abcde'; + aa('b') = 'fghij'; + c = aa.last; +end; +/ + +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by varchar(10); + aa SalTabTyp; + c int; + begin + aa('a') = 'abcde'; + RAISE INFO '%', aa.exists('a'); +end; +/ + +declare + TYPE SalTabTyp is TABLE OF varchar(10) index by integer; + aa SalTabTyp; + c int; + begin + aa(1) = 'a'; + aa(-1) = 'c'; + aa(2) = 'b'; + raise info '%', aa.next(1); + raise info '%', aa.prior(1); +end; +/ + +declare + type ta is table of varchar(100); + tb constant ta := ta('10','11'); +begin + tb(1) := 12; + dbe_output.print_line(tb[1]); +end; +/ + +declare + type ta is table of varchar(100); + tb constant ta := ta('10','11'); +begin + tb := ta('12','13'); + dbe_output.print_line(tb[1]); +end; +/ + +reset sql_beta_feature ; +create or replace package pcknesttype is + type aa is table of int; + type bb is table of aa; + procedure proc1(); +end pcknesttype; +/ + +create or replace package body pcknesttype +is + mytab aa; + my2 bb; +procedure proc1 + is begin + mytab := aa(1,2,3,4); + my2 := bb(mytab); + +end; +end pcknesttype; +/ + +create or replace procedure tableof_nest1() +is + type data_type1 is table of s_type index by integer; + type data_table_type1 is table of data_type1 index by integer; + MyTab data_type1; + tmp_y data_type1; + yy data_table_type1; +begin + MyTab(1).id := 1; + MyTab(2).name := 'B'; + MyTab(3).addr := 'addr'; + yy(0) := MyTab; + yy(1)(1).id := 1; + yy(1)(1).name := 'yy'; + RAISE INFO 'call yy: %' ,yy(1)(1); + RAISE INFO 'call yy count: %' ,yy(0).count; + tmp_y := yy(1); + --RAISE INFO 'call yy(1) next: %' ,tmp_y.next(1); + --RAISE INFO 'call yy first: %' ,yy.first; + --RAISE INFO 'call yy next: %' ,yy.next(1); +end; +/ +call tableof_nest1(); + +create or replace procedure tableof_nest2() +is + type data_type1 is table of varchar2(100) index by integer; + type data_table_type1 is table of data_type1 index by integer; + MyTab data_type1; + tmp_y data_type1; + yy data_table_type1; +begin + MyTab(1) := 'A'; + MyTab(2) := 'B'; + MyTab(3) := 'C'; + yy(0) := MyTab; + yy(1)(1) := 'o'; + RAISE INFO 'call yy: %' ,yy(1)(1); + RAISE INFO 'call yy count: %' ,yy(0).count; + tmp_y := yy(1); + --RAISE INFO 'call yy(1) next: %' ,tmp_y.next(1); + --RAISE INFO 'call yy first: %' ,yy.first; + --RAISE INFO 'call yy next: %' ,yy.next(1); +end; +/ + +call tableof_nest2(); + +create or replace procedure tableof_nest3() +is + type data_type1 is table of varchar2(100) index by varchar2(24); + type data_table_type1 is table of data_type1 index by varchar2(24); + MyTab data_type1; + tmp_y data_type1; + yy data_table_type1; +begin + MyTab('a') := 'A'; + MyTab('b') := 'B'; + MyTab('c') := 'C'; + yy('a') := MyTab; + yy('b')('c') := 'o'; + RAISE INFO 'call yy: %' ,yy('a')('c'); + RAISE INFO 'call yy count: %' ,yy('a').count; + tmp_y := yy('b'); + --RAISE INFO 'call yy next: %' ,tmp_y.next('c'); + --RAISE INFO 'call yy first: %' ,tmp_y.first; + --RAISE INFO 'call yy next: %' ,yy.next('a'); +end; +/ +call tableof_nest3(); + +DECLARE + TYPE r1 is TABLE OF int; + type r2 is table of r1; + emp_id r2; +BEGIN + emp_id(1)(1) := 5*7784; + raise info '%,%', emp_id,emp_id(1)(1); +END; +/ + +create type type001 as(c1 int,c2 varchar); +create type type002 as(c1 type001,c2 type001.c2%type,c4 int); +create type type003 as table of type002; +create type type004 as(c1 type003,c2 int); + +create or replace procedure proc_1 as +typecol type004; +begin +typecol.c1(1).c1.c1=1; +typecol.c2=1; +raise info 'typecol %',typecol.c1(1).c1.c1; +raise info 'typecol %',typecol.c2; +raise info 'typecol %',typecol; +end; +/ + +call proc_1(); + +drop type type_nest_23,type_nest_22,type_nest_24,type_nest_25 cascade; +drop table type_nest_21 cascade; +create table type_nest_21 (c1 int,c2 text, c3 date); +create type type_nest_22 as(c1 type_nest_21,c2 type_nest_21.c2%type,c3 type_nest_21%rowtype); +create type type_nest_23 is table of type_nest_22; +create type type_nest_24 is table of type_nest_21; +create type type_nest_25 as(c1 type_nest_21,c2 type_nest_23); + +declare + type type1 is varray(6) of varchar2(10); + TYPE type2 is TABLE OF type_nest_21; + TYPE type3 is TABLE OF type2; + TYPE type4 is TABLE OF type3; + vtype5 type3; + vtype6 type_nest_25; +begin + vtype5(1)(1).c2 := 'abc'; + raise info '%', vtype5(1)(1).c2; +end; +/ + +declare + type type1 is varray(6) of varchar2(10); + TYPE type2 is TABLE OF type_nest_21; + TYPE type3 is TABLE OF type2; + vtype6 type3; +begin + vtype6(1)(1)(1).c2 := 'abc'; +end; +/ + +declare + TYPE record_user1 is table of type_nest_21; + TYPE record_user2 is table of record_user1; + TYPE record_user3 is table of record_user2; + v_record_user2 record_user2; + v_record_user3 record_user3; +begin + v_record_user2(1) :=1; +end; +/ + +declare + TYPE record_user1 is table of type_nest_21; + TYPE record_user2 is table of record_user1; + TYPE record_user3 is table of record_user2; + v_record_user2 record_user2; + v_record_user3 record_user3; +begin + v_record_user3(1)(1):=1; + v_record_user2(1).c1 :=1; +end; +/ +drop table customers cascade; +create table customers ( + id number(10) not null, + c_name varchar2(50), + c_age number(8) not null, + c_address varchar2(50), + salary float(2) not null, + constraint customers_pk primary key (id) +); + +--test collect in null +declare + cursor c_customers is select c_name from customers order by id; + type c_list is table of customers.c_name%type index by integer; + name_arr c_list := c_list(); +begin + name_arr(2) = (-1, 'Vera' ,32, 'Paris', 22999.00); + name_arr(7) = (-1, 'Vera' ,32, 'Paris', 22999.00); + -- bulk collect + cursor + open c_customers; + fetch c_customers bulk collect into name_arr; + close c_customers; + raise info '%', name_arr.count; + raise info '%', name_arr.last; + raise info '%', name_arr.exists(7); +end; +/ + +declare + type id_list is varray(6) of customers.id%type; + id_arr id_list; +begin + id_arr(1) = 1; + raise info '%', id_arr; + select id bulk collect into id_arr from customers order by id DESC; + raise info '%', id_arr; +end; +/ + +create type mytype1 as ( + id integer, + biome varchar2(100) +); + +-- success, multi target support +declare + type tab is varray(6) of mytype1; + tab1 tab := tab(); +begin + tab1(1) = (1,'a'); + raise info '%', tab1; + select id, c_name bulk collect into tab1 from customers order by id DESC; + raise info '%', tab1; +end; +/ + +insert into customers (id, c_name, c_age, c_address, salary) values (1, 'Vera' ,32, 'Paris', 22999.00); + +--test bulk collect into +declare + cursor c_customers is select c_name from customers order by id; + type c_list is table of customers.c_name%type index by integer; + name_arr c_list := c_list(); +begin + name_arr(2) = (-1, 'Vera' ,32, 'Paris', 22999.00); + name_arr(7) = (-1, 'Vera' ,32, 'Paris', 22999.00); + -- bulk collect + cursor + open c_customers; + fetch c_customers bulk collect into name_arr; + exit when c_customers%NOTFOUND; + close c_customers; + raise info '%', name_arr.count; + raise info '%', name_arr.last; + raise info '%', name_arr.exists(7); +end; +/ + +insert into customers (id, c_name, c_age, c_address, salary) values (2, '' ,25, 'London', 5999.00); -- a missing value here +insert into customers (id, c_name, c_age, c_address, salary) values (3, 'Alice' ,22, 'Bangkok', 9800.98); +insert into customers (id, c_name, c_age, c_address, salary) values (4, 'Jim' ,26, 'Dubai', 18700.00); +insert into customers (id, c_name, c_age, c_address, salary) values (5, 'Kevin' ,28, 'Singapore', 18999.00); +insert into customers (id, c_name, c_age, c_address, salary) values (6, 'Gauss' ,42, 'Beijing', 32999.00); +--test bulk collect into +declare + cursor c_customers is select c_name from customers order by id; + type c_list is table of customers.c_name%type index by integer; + name_arr c_list := c_list(); +begin + -- bulk collect + cursor + open c_customers; + fetch c_customers bulk collect into name_arr limit 4; + exit when c_customers%NOTFOUND; + close c_customers; + + for i in 1..6 loop + dbe_output.print_line('name(' || i || '): ' || name_arr(i)); + end loop; + + -- assign values directly + name_arr := ARRAY['qqqq', 'sfsafds', 'sadsadas']; + + for i in 1..6 loop + dbe_output.print_line('name(' || i || '): ' || name_arr(i)); + end loop; +end; +/ + +declare + cursor c_customers is select c_name from customers order by id; + type c_list is table of customers.c_name%type index by varchar; + name_arr c_list := c_list(); +begin + -- bulk collect + cursor + open c_customers; + fetch c_customers bulk collect into name_arr limit 4; + exit when c_customers%NOTFOUND; + close c_customers; +end; +/ + +create table pro_tblof_tbl_018_1(c1 int,c2 varchar(20)); +create table pro_tblof_tbl_018(c1 int,c2 pro_tblof_tbl_018_1); +create type pro_tblof_018 is table of pro_tblof_tbl_018%rowtype; + +insert into pro_tblof_tbl_018 values (1,(2,'aaa')); + +create or replace procedure pro_tblof_pro_018_11() +as + tblof001 pro_tblof_018; + cursor cor1 is select c2 from pro_tblof_tbl_018 order by c1 desc; + cursor cor2 is select c1 from pro_tblof_tbl_018 order by c1 desc; + tblcount int; +begin + select count(*) into tblcount from pro_tblof_tbl_018; + for i in 1..tblcount + loop + --open cor1; + -- fetch cor1 bulk collect into tblof001(i).c2; +-- EXIT WHEN cor1%NOTFOUND; + --close cor1; + open cor2; + fetch cor2 bulk collect into tblof001(i).c1; + exit when cor2%notfound; + close cor2; + i=i+1; + end loop; + for i in tblof001.first..tblof001.last + loop + if tblof001(i) is null then + tblof001(i)=tblof001(tblof001.next(i)); + end if; + dbe_output.print_line('tblof001 ('||i||')is '||tblof001(i).c1||'-----'||tblof001(i).c2); + end loop; + raise info 'tblof001 is %',tblof001; +end; +/ + +call pro_tblof_pro_018_11(); + +create or replace procedure p155() as +type t is table of varchar2 index by integer; +v t; +begin +raise info '%', v.count; +for i in 1..v.count loop +v(i):=0; +end loop; +end; +/ + +call p155(); + +create or replace procedure p156() as +type t is table of varchar2 index by varchar; +v t; +begin +raise info '%', v.count; +for i in 1..v.count loop +v(i):=0; +end loop; +end; +/ + +call p156(); + + create or replace procedure table_column + is + type rec_type is record (name varchar2(100), epno int); + TYPE SalTabTyp as TABLE of rec_type index by BINARY_INTEGER; + aa SalTabTyp; + begin + aa(0).epno = 1; + raise info '%', aa; + select '' into aa(0).name; + raise info '%', aa; +end; +/ + +call table_column(); + +create table pkgtbl054(c0 int,c1 number(5),c2 varchar2(20),c3 clob,c4 blob); +insert into pkgtbl054 values(1,1,'varchar1',repeat('clob1',20),'abcdef1'); +insert into pkgtbl054 values(2,2,'varchar10',repeat('clob2',20),'abcdef2'); + +create type type0011 as(c0 int,c1 number(5),c2 varchar2(20),c3 clob,c4 blob); + +create or replace package pkg054 +is +type type0011 is table of type0011%rowtype index by varchar2(20); +type type002 is table of type0011.c2%type index by integer; +col1 type0011; +col2 type002; +procedure proc054_1(col3 type0011,col4 type002); +function proc054_2(col5 int) return integer; +end pkg054; +/ + +create or replace package body pkg054 +is +procedure proc054_1(col3 type0011,col4 type002) +is +begin +raise info 'col13 is %',col3; +raise info 'col14 is %',col4; +exception + when others then + raise info 'sqlerrm is %',sqlerrm; +end; +function proc054_2(col5 int) return integer +as +begin + col1('1').c0:=128909887; +col1('1').c1:=12345; +col1('2').c2:='var2'; +col1('2').c3:='clobcol1'; +col1('2').c4:='123456'; +col2(1):=col1('2').c2; +col2(3):=col1('1').c3; +raise info 'col1 is %',col1; +raise info 'col2 is %',col2; + proc054_1(col3=>pkg054.col1,col4=>pkg054.col2); +return 1; +end; +end pkg054; +/ + +call pkg054.proc054_2(1); + +create or replace package body pkg054 +is +procedure proc054_1(col3 type0011,col4 type002) +is +begin +raise info 'col13 is %',col3; +raise info 'col14 is %',col4; +exception + when others then + raise info 'sqlerrm is %',sqlerrm; +end; +function proc054_2(col5 int) return integer +as +begin + col1('1').c0:=128909887; +col1('1').c1:=12345; +col1('2').c2:='var2'; +col1('2').c3:='clobcol1'; +col1('2').c4:='123456'; +col2(1):=col1('2').c2; +col2(3):=col1('1').c3; +raise info 'col1 is %',col1; +raise info 'col2 is %',col2; + proc054_1(pkg054.col1,pkg054.col2); +return 1; +end; +end pkg054; +/ + +call pkg054.proc054_2(1); + +drop package pkg054; +drop type type_nest_23,type_nest_22,type_nest_24,type_nest_25 cascade; +drop table type_nest_21; + +create or replace package pkg049 +is +type type001 is table of number(8) index by varchar2(30); +type type002 is record(c1 type001,c2 varchar2(30)); +function proc049_2(col1 int) return type001; +end pkg049; +/ + +create or replace function tableof_return_1(col1 int) return s_type[] +is +type type001 is table of s_type index by varchar2(30); +a type001; +begin +return a; +end; +/ + +create or replace function tableof_return_2(col1 int) return integer +is +type type001 is table of s_type index by varchar2(30); +a type001; +begin +a(1) = (1, 'lisi', 'beijing'); +return a(1).id; +end; +/ + +call tableof_return_2(1); + +create or replace function tableof_return_3(col1 int) return integer +is +type type001 is table of integer index by varchar2(30); +a type001; +begin +a(1) = 1; +a(2) = 2; +return a(1); +end; +/ + +call tableof_return_3(1); + +-- test varchar as index and text as index +drop table t1; +create table t1 (a varchar2(100), b varchar2(100), c number(10,0), d number(10,0), e number(10,0)); + +create or replace package pck1 as +type ra is table of varchar2 index by varchar2(100); +procedure p1 (v01 out ra); +end pck1; +/ +create or replace package body pck1 as +type rb is table of t1 index by varchar; +v02 rb; +v_buff varchar2(1000); +procedure p1(v01 out ra) as +v_value varchar2(200); +v_index varchar2(200); +i int := 1; +begin +v_value := 'testdaa11'||i; +v_index := 'test_' || i; +v02(v_index).a = v_value; +v01(v02(v_index).a) := v_value ||i; +raise info 'v02.a : %', v02(v_index).a; +raise info 'v01.first : %', v01.first(); +raise info 'v01(testdaa111) : %', v01('testdaa111'); +raise info 'v01(v01.first()) : %' ,v01(v01.first()); +raise info 'v01(v02(v_index).a) : %' ,v01(v02(v_index).a); +end; +end pck1; +/ + +call pck1.p1(''); + +drop package pck1; +drop table t1; + +create table blob1(c1 blob); +create or replace procedure testblob1(count int) +IS +begin +execute immediate 'insert into blob1 values(:p1);' using 1::bit(100)::varchar::blob; +end; +/ + +call testblob1(1); +drop table blob1; + +-- test table of as out param +create or replace package pck1 as +type r1 is table of int index by int; +type r2 is record(a int, b int); +procedure p1; +procedure p2(va out r2,vb out r1); +procedure p2(vc int, va out r2,vb out r1); +end pck1; +/ +create or replace package body pck1 as +procedure p1 as +va r2; +vb r1; +begin +p2(va, vb); +raise info '%',vb.first; +end; +procedure p2(va out r2, vb out r1) as +vc int; +begin +p2(vc,va,vb); +end; +procedure p2(vc int, va out r2, vb out r1) as +begin +va := (1,2); +vb(2) := 2; +end; +end pck1; +/ +call pck1.p1(); +drop package pck1; + +create or replace package pkgnest002 +as +type ty0 is table of integer index by integer; +type ty1 is table of ty0; +type ty3 is table of ty1 ; +procedure p1; +end pkgnest002; +/ + +create or replace package body pkgnest002 +as +procedure p1 +is +v1 ty0:=ty0(); +v2 ty1:=ty1(); +v21 ty1; +v22 ty3; +v31 ty3:=ty3(); +begin +raise info 'v1 is %',v1; +raise info 'v31 is %',v31; +v1(1):=1; +v1(2):=2; +v2(1):=v1; +v1(5):=5; +v2(2):=v1; +raise info 'v1 is %',v1(1); +raise info 'v2 is %',v2(1); +raise info 'v2 is %',v2(2); +v31(4):=v21; +raise info 'v31(4) is %', v31(4); +v21(1)(1):=-1; +raise info 'v21(1) is %', v21(1); +v21(2)(2):=-2; +v31(3):=v21; +v22:=v31; +raise info 'v31 is %', v31(3)(2); +v22:=v2; +end; +end pkgnest002; +/ + +call pkgnest002.p1(); +call pkgnest002.p1(); + +create or replace package pkgnest_auto +as +type ty1 is table of varchar2(20) index by varchar2; +type ty2 is table of ty1 index by varchar2; +type ty3 is table of ty2 index by varchar2; +function p1() return varchar2(20); +pv1 ty1; +pv2 ty2; +pv3 ty3; +end pkgnest_auto; +/ + +create or replace package body pkgnest_auto +as +function p1() return varchar2(20) +is +PRAGMA AUTONOMOUS_TRANSACTION; +begin +pv1(1):=10000; +pv1(2):=20000; +pv1(3):=30000; +pv2(1):=pv1; +pv2(2):=pv1; +pv2(3):=pv1; +pv3(1):=pv2; +pv3(2):=pv2; +pv3(3):=pv2; +return pv6.first; +end; +end pkgnest_auto; +/ + +call pkgnest_auto.p1(); + +---- clean ---- +drop type s_type; +drop type typeA; +drop type typeB; +drop type s_type cascade; +drop type typeC; +drop type typeE; +drop type typeG; +drop type s_type_extend cascade; +drop type typeA_ext; +drop type info cascade; +drop type customer; +drop type mytype cascade; +drop type mytype2; +drop procedure tableof_1; +drop procedure tableof_2; +drop procedure tableof_3; +drop function tableof_6; +drop function tableof_7; +drop function tableof_8; +drop procedure tableof_10; +drop procedure tableof_11; +drop procedure tableof_12; +drop procedure tableof_13; +drop procedure tableof_14; +drop procedure tableof_16; +drop procedure tableof_17; +drop procedure tableof_18; +drop procedure tableof_19; +drop procedure tableof_21; +drop procedure tableof_22; +drop procedure tableof_23; +drop procedure tableof_24; +drop procedure tableof_25; +drop procedure tableof_26; +drop procedure tableof_27; +drop package pcknesttype; +drop procedure tableof_nest2; +drop procedure tableof_nest3; +drop table customers; +drop package pkgnest002; +drop package pkgnest_auto; +drop schema if exists plpgsql_table cascade; diff --git a/src/test/regress/sql/plpgsql_tableofindex.sql b/src/test/regress/sql/plpgsql_tableofindex.sql new file mode 100644 index 000000000..f847a1656 --- /dev/null +++ b/src/test/regress/sql/plpgsql_tableofindex.sql @@ -0,0 +1,1085 @@ +-- test create type table of +-- check compatibility -- +show sql_compatibility; -- expect A -- +-- create new schema -- +drop schema if exists plpgsql_tableofindex; +create schema plpgsql_tableofindex; +set current_schema = plpgsql_tableofindex; + +create type s_type as ( + id integer, + name varchar, + addr text +); + +create type typeA as table of s_type; + +-- test 1 paramter - in +create or replace package pck2 is +type r2 is table of s_type index by varchar(10); +procedure p1; +procedure p2(va r2); +end pck2; +/ + +create or replace package body pck2 is +procedure p1 as +va r2; +begin +va('a') := (1, 'zhangsan', 'shanghai'); +va('c') := (2, 'zhangsanc', 'shanghai'); +p2(va); +end; + +procedure p2(va r2) as +begin +raise info 'a:%', va('a'); +raise info 'c:%', va('c'); +end; +end pck2; +/ + +call pck2.p1(); + +-- test 3 paramter - in +create or replace package pck3 is +type r2 is table of s_type index by varchar(10); +type r3 is table of s_type index by integer; +procedure p1; +procedure p2(b int, va r2, a int, vb r3); +end pck3; +/ + +create or replace package body pck3 is +procedure p1 as +va r2; +vb r3; +b int; +begin +va('a') := (1, 'zhangsan', 'shanghai'); +vb(5) := (10086,'aa','bb'); +vb(233) := (10087,'aa','bb'); +p2(b,va,1,vb); +end; + +procedure p2(b int, va r2, a int, vb r3) as +begin +raise info 'va:%', va('a'); +raise info 'vb(233):%', vb(233); +raise info 'vb:%', vb; +end; +end pck3; +/ + +call pck3.p1(); + +-- test 1 paramter - out +create or replace package pck4 is +type r2 is table of s_type index by varchar(10); +procedure p1; +procedure p2(va out r2); +end pck4; +/ + +create or replace package body pck4 is +procedure p1 as +va1 r2; +begin +p2(va1); +raise info '%,', va1('a'); +end; + +procedure p2(va out r2) as +begin +va('a') := (1, 'zhangsan', 'shanghai'); +end; +end pck4; +/ + +call pck4.p1(); + +-- test 3 paramter - out +create or replace package pck5 is +type r2 is table of s_type index by varchar(10); +type r3 is table of s_type index by integer; +procedure p1; +procedure p2(va out r2, a out int, vb out r3); +end pck5; +/ + +create or replace package body pck5 is +procedure p1 as +va1 r2; +va2 r3; +a int; +begin +p2(va1, a, va2); +raise info '%', a; +raise info '%', va1('a'); +raise info '%', va2(233); +end; + +procedure p2(va out r2, a out int, vb out r3) as +begin +va('a') := (1, 'zhangsan', 'shanghai'); +vb(233) := (10086,'aa','bb'); +a = 2; +end; +end pck5; +/ + +call pck5.p1(); + +-- test 3 paramter - out +create or replace package pck6 is +type r2 is table of s_type index by varchar(10); +type r3 is table of s_type index by integer; +procedure p1; +procedure p2(va in r2, a out int, vb out r3); +end pck6; +/ + +create or replace package body pck6 is +procedure p1 as +va1 r2; +va2 r3; +a int; +begin +va1('a') := (1, 'zhangsan', 'shanghai'); +p2(va1, a, va2); + +raise info '%', a; +raise info 'va:%', va1; +raise info '%', va2(233); +end; + +procedure p2(va in r2, a out int, vb out r3) as +begin +raise info '%', va('a'); +va('c') := (3, 'zhangsan', 'shanghai'); +vb(233) := (10086,'aa','bb'); +a = 2; +end; +end pck6; +/ + +call pck6.p1(); + +-- test 1 paramter - inout +create or replace package pck7 is +type r2 is table of s_type index by varchar(10); +procedure p1; +procedure p2(va inout r2); +end pck7; +/ + +create or replace package body pck7 is +procedure p1 as +va r2; +begin +va('a') := (1, 'zhangsan', 'shanghai'); +va('c') := (2, 'zhangsanc', 'shanghai'); +p2(va); +raise info 'e:%', va('e'); +raise info 'a:%', va('a'); +end; + +procedure p2(va inout r2) as +begin +raise info 'a:%', va('a'); +raise info 'c:%', va('c'); +va('e') := (3, 'zhangsanc', 'shanghai'); +end; +end pck7; +/ + +call pck7.p1(); + +-- test 3 paramter - inout +create or replace package pck8 is +type r2 is table of s_type index by varchar(10); +type r3 is table of s_type index by integer; +procedure p1; +procedure p2(va inout r2, a inout int, vb inout r3); +end pck8; +/ + +create or replace package body pck8 is +procedure p1 as +va r2; +vb r3; +a int; +begin +va('a') := (1, 'zhangsan', 'shanghai'); +vb(5) := (10086,'aa','bb'); +p2(va,a,vb); +raise info 'vb(233):%', vb(233); +raise info 'va(c):%', va('c'); +end; + +procedure p2(va inout r2, a inout int, vb inout r3) as +begin +raise info 'va:%', va('a'); +vb(233) := (10087,'aa','bb'); +va('c') := (2, 'zhangsanc', 'shanghai'); +raise info 'vb:%', vb; +end; +end pck8; +/ + +call pck8.p1(); + +-- test paramter - in && out +create or replace package pck9 is +type r3 is table of s_type index by integer; +procedure p1; +procedure p2(a int, b varchar2, c out varchar2,vb out r3); +end pck9; +/ + +create or replace package body pck9 is +procedure p1 as +vb r3; +a int; +b varchar2; +c varchar2; +begin +vb(5) := (10086,'aa','bb'); +a = 1; +b = 'dddd'; +p2(a,b,c,vb); +raise info 'c:%', c; +raise info 'vb(233):%', vb(233); +end; + +procedure p2(a int, b varchar2, c out varchar2,vb out r3) as +begin +c = 'aaaa'; +vb(233) := (10087,'aa','bb'); +end; +end pck9; +/ + +call pck9.p1(); + +drop type if exists type_nest_01 cascade; +drop type if exists type_nest_02 cascade; +drop type if exists type_nest_03 cascade; + +declare +type ty1 is table of integer index by integer; +v1 ty1 =ty1(1,2,3,45); +v2 ty1; +begin +v2(1):=12345; +v2(3):=56789; +raise info 'v1 is %',v1; +raise info 'v2.first is %',v2.first; +raise info 'v2.last is %',v2.last; +raise info 'v2 is %',v2; +end; +/ + +declare +type ty1 is table of integer index by integer; +v1 ty1; +v2 ty1; +begin +v1 =ty1(1,2,3,45); +v2(1):=12345; +v2(3):=56789; +raise info 'v1 is %',v1; +raise info 'v2.first is %',v2.first; +raise info 'v2.last is %',v2.last; +raise info 'v2 is %',v2; +end; +/ + +declare +type ty1 is table of integer index by integer; +v2 ty1=ty1(); +begin +v2(1):=12345; +v2(3):=56789; +raise info 'v2.first is %',v2.first; +raise info 'v2.last is %',v2.last; +raise info 'v2 is %',v2; +end; +/ + +declare +type ty1 is table of integer index by integer; +v2 ty1; +begin +v2=ty1(); +v2(1):=12345; +v2(3):=56789; +raise info 'v2.first is %',v2.first; +raise info 'v2.last is %',v2.last; +raise info 'v2 is %',v2; +end; +/ + +create type type_nest_01 as (a int, b int); +create type type_nest_02 as (a int, b sch1.type_nest_01); +create type type_nest_03 as (c1 int,c2 text, c3 date); + +create or replace package pack3 is + type t1 is table of number; + type t2 is table of t1; + + type type01 is table of type_nest_03; + type type02 is table of type01; + type type03 is table of type02; + + type type0001 is record(c1 number(5,2),c2 varchar2(20)); + type type0002 is table of type0001; + type type0003 is table of type0002; + v1 t2; + v2 type03; + v4 type0003; +procedure main(a int, b int); +end pack3; +/ + +create or replace package body pack3 is +procedure main(a int, b int) is +begin +v1(a)(1):=1; +v1(a)(2):=2; +v1(2)(1):=3; +v2(1)(1)(1):=(a,'a1','2021-10-01'); +v4(a)(1).c1:=890; +v4(a)(1).c2:='qwe'; +raise info 'v1 is,%',v1(b); +raise info 'v2 is,%',v2(b)(b); +raise info 'v4 is,%',v4(b); +end; +end pack3; +/ +call pack3.main(1,2); +call pack3.main(2,1); +drop package pack3; +drop type if exists type_nest_01 cascade; +drop type if exists type_nest_02 cascade; +drop type if exists type_nest_03 cascade; + +-- test table of index by in Autonomous +create or replace package pck1 as +type r2 is table of s_type index by varchar(10); +procedure p2 (c1 out r2); +end pck1; +/ + +create or replace package body pck1 as +procedure p2 (c1 out r2) as +PRAGMA AUTONOMOUS_TRANSACTION; +va int; +begin +null; +end; +end pck1; +/ + +drop package pck1; + +declare +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +a int; +v1 ty1; +begin +a = 1; +v1(1) = 1; +v1(5):=a||v1.first; +raise info 'v1 is %',v1; +end; +/ + +declare +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +v1 ty1; +begin +v1(1) = 1; +v1(5):= v1(1)||v1.first; +raise info 'v1 is %',v1; +end; +/ + +declare +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +v1 ty1; +v2 ty1; +begin +v1(1) = 1; +v2(2) = 2; +v1(5):= v1(1)||v2(2); +raise info 'v1 is %',v1; +end +/ + +declare +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +v1 ty1; +begin +v1(1) = 1; +v1(5):= v1.first || v1(1); +raise info 'v1 is %',v1; +end; +/ + +declare +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +v1 ty1; +v2 ty1; +begin +v1(1) = 1; +v2(2) = 2; +v1(5):= v1(1)||v2.first; +raise info 'v1 is %',v1; +end +/ + +declare +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +v1 ty1; +v2 ty1; +begin +v1(1) = 1; +v2(2) = 2; +v1(5):= v1.first ||v2.first; +raise info 'v1 is %',v1; +end +/ + +declare +type ty1 is table of varchar2(20) index by BINARY_INTEGER; +v1 ty1; +v2 ty1; +v3 ty1; +begin +v1(1) = 1; +v2(2) = 2; +v3(3) = 3; +v1(5):= v1(1)||v2(2)||v3(3); +raise info 'v1 is %',v1; +end +/ + +create table tytbl094(c1 int,c2 number(8,1),c3 varchar2(20),c4 date,c5 timestamp,c6 clob,c7 blob); + +create or replace procedure tableof_record_assign_1() +is + type type000 is table of tytbl094%rowtype index by integer; + type ta is record(c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob,c6 type000); + c1 ta; + c2 ta; +begin +c1 = c2; +end; +/ + +/* should error, but now we don't deal it. */ +create or replace procedure tableof_record_assign_3() +is + type type000 is table of tytbl094%rowtype index by integer; + type ta is record(c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob,c6 type000); + type tb is record(c1 int, c2 ta); + c1 tb; + c2 tb; +begin +c1 = c2; +end; +/ + +drop precedure tableof_record_assign_3(); + +create or replace package pck_tableof_record_assign is + type r3 is table of tytbl094 index by integer; + type ta is record(c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob,c6 r3); + c1 ta; +procedure p1; +end pck_tableof_record_assign; +/ + +create or replace package body pck_tableof_record_assign is +procedure p1 as +c2 ta; +begin +c1 = c2; +end; +end pck_tableof_record_assign; +/ + +drop package pck_tableof_record_assign; + +declare +type ty1 is table of tytbl094.c1%type index by BINARY_INTEGER; +v1 ty1; +begin +v1(4):=4; +v1(2):=2; +v1(5):=5; +v1.delete(5); +DBE_OUTPUT.PRINT_LINE(v1.first); +DBE_OUTPUT.PRINT_LINE(v1.next(v1.first)); +DBE_OUTPUT.PRINT_LINE(v1.last); +DBE_OUTPUT.PRINT_LINE(v1(5)); +DBE_OUTPUT.PRINT_LINE(v1.count); +end; +/ + +create or replace package pkg054 +is +type r2 is table of s_type index by varchar(10); +function proc054_2(col5 r2) return int; +end pkg054; +/ + +create or replace package pkg065 +is +type type000 is record(c1 int,c2 number,c3 varchar,c4 clob,c5 blob); +type type001 is table of type000 index by integer; +type type002 is table of type000 index by varchar2(20); +procedure proc065_11(col3 type001,col4 out type002); +procedure proc065_1(col3 type001,col4 out type001,col5 out type002); +function proc065_2(col5 int) return int; +end pkg065; +/ + +create or replace package body pkg065 +is +procedure proc065_11(col3 type001,col4 out type002) +is +begin +col4(1):=(41,41,'col4c3','col4c4','c4c5'); +col4(3):=(23,23,'col4c3','col4c4','c4c5'); +end; + +procedure proc065_1(col3 type001,col4 out type001,col5 out type002) +is +begin +col4(1):=(441,441,'col44c3','col44c4','c44c5'); +col5('aaa'):=(55,55,'col4c3','col4c4','c4c5'); +end; + +function proc065_2(col5 int) return int +as +c1 type001; +c2 type002; +c3 type002; +begin +proc065_1(c2,c2,c3); +raise info 'c2 is %',c2; +raise info 'c2.first is %',c2.first; +raise info 'c3 is %',c3; +raise info 'c3.first is %',c3.first; +return c1.count; +end; +end pkg065; +/ + +create or replace package body pkg065 +is +procedure proc065_11(col3 type001,col4 out type002) +is +begin +col4(1):=(41,41,'col4c3','col4c4','c4c5'); +col4(3):=(23,23,'col4c3','col4c4','c4c5'); +end; + +procedure proc065_1(col3 type001,col4 out type001,col5 out type002) +is +begin +col4(1):=(441,441,'col44c3','col44c4','c44c5'); +col5('aaa'):=(55,55,'col4c3','col4c4','c4c5'); +end; + +function proc065_2(col5 int) return int +as +c1 type001; +c2 type002; +c3 type002; +begin +proc065_1(c1,c2,c3); +raise info 'c2 is %',c2; +raise info 'c2.first is %',c2.first; +raise info 'c3 is %',c3; +raise info 'c3.first is %',c3.first; +return c1.count; +end; +end pkg065; +/ + +drop package pkg065; + +create or replace package pkg065 +is +type type000 is record(c1 int,c2 number,c3 varchar,c4 clob,c5 blob); +type type001 is table of type000 index by integer; +type type002 is table of type000 index by varchar2(20); +procedure proc065_1(col3 type001,col4 type002); +procedure proc065_1(col3 type001,col4 out type001,col5 out type002); +function proc065_2(col5 int) return int; +end pkg065; +/ + +create or replace package body pkg065 +is +procedure proc065_1(col3 type001,col4 type002) +is +begin +col4(1):=(41,41,'col4c3','col4c4','c4c5'); +col4(3):=(23,23,'col4c3','col4c4','c4c5'); +end; + +procedure proc065_1(col3 type001,col4 out type001,col5 out type002) +is +begin +col4(1):=(441,441,'col44c3','col44c4','c44c5'); +col5('aaa'):=(55,55,'col4c3','col4c4','c4c5'); +end; + +function proc065_2(col5 int) return int +as +c1 type001; +c2 type002; +c3 type002; +begin +proc065_1(c1,c2,c3); +raise info 'c2 is %',c2; +raise info 'c2.first is %',c2.first; +raise info 'c3 is %',c3; +raise info 'c3.first is %',c3.first; +return c1.count; +end; +end pkg065; +/ + +drop package pkg065; + +create or replace package pkg_nesttb +is + type type001 is table of number(5) index by integer; + type type002 is table of type001 index by integer; + type type003 is table of type002; + type type004 is table of type003; + type type005 is table of type004; + type type006 is table of type005; + pkgvar type006; + procedure checkfunc(); + procedure checkpkgvar(); + procedure checkpkgvar2(); + function checknestset() return integer; +end pkg_nesttb; +/ + + +create or replace package body pkg_nesttb +is + procedure checkfunc() + is + xx type006; + begin + xx(1)(2)(3)(4)(5)(6):=3; + xx(2)(2)(3)(4)(5)(6):=4; + xx(4)(2)(3)(4)(5)(6):=4; + raise info 'first %', xx.first; + raise info 'last %', xx.last; + raise info 'count %', xx.count; + raise info 'exist %', xx.exists(1); + raise info 'exist % ', xx.exists(5); + raise info 'next %', xx.next(xx.first); + raise info 'prior %', xx.prior(xx.last); + xx.delete(1); + raise info 'count %', xx.count; + raise info 'xx1 %', xx(1)(2)(3)(4)(5)(6); + xx.delete; + raise info 'count %', xx.count; + raise info 'xx %', xx; + end; + function checknestset() return integer + is + xx type002; + yy type002; + begin + xx(1)(2):=3; + xx(3)(2):=4; + yy := xx; + raise info 'xx %', xx(1)(2); + raise info 'yy %', yy(1)(2); + xx(1)(2):=10; + raise info 'yy %', yy(1)(2); + raise info 'xx %', xx(1)(2); + raise info 'xx(1) % ', xx('1'); + return 1; + end; + procedure checkpkgvar() + is + xx type006; + begin + xx(2)(2)(4)(2)(2)(3):=9; + raise info 'pkgvar %', pkgvar(2)(2)(4)(2)(2)(3); + pkgvar :=xx; + raise info 'pkgvar %', pkgvar(2)(2)(4)(2)(2)(3); + end; + procedure checkpkgvar2() + as + begin + pkgvar(1)(2)(3)(4)(5)(6):=100; + pkgvar(2)(2)(4)(2)(2)(3):=4; + raise info 'pkgvar %', pkgvar(2)(2)(4)(2)(2)(3); + end; + end pkg_nesttb; + / + + +create or replace package pkg_nesttb2 +is + type type001 is table of number(5) index by integer; + type type002 is table of type001 index by integer; + type type003 is table of type002; + type type004 is table of type003; + type type005 is table of type004; + type type006 is table of type005; + type type007 is table of type006; + pkgvar type007; +end pkg_nesttb2; +/ + + +create or replace package pkg_nesttb_char +is + type type001 is table of number(5) index by varchar; + type type002 is table of type001 index by varchar; + type type003 is table of type002 index by varchar; + type type004 is table of type003 index by varchar; + type type005 is table of type004 index by varchar; + type type006 is table of type005 index by varchar; + procedure checkfunc(); + function checknestset() return integer; + procedure checkpkgvar(); +end pkg_nesttb_char; +/ + + +create or replace package body pkg_nesttb_char +is + procedure checkfunc() + is + xx type006; + begin + xx('a')('b')('c')('c')('x')('z'):=3; + xx('x')('b')('c')('c')('x')('z'):=4; + xx('c')('b')('p')('c')('x')('z'):=4; + raise info 'first %', xx.first; + raise info 'last %', xx.last; + raise info 'count %', xx.count; + raise info 'exist %', xx.exists('a'); + raise info 'exist % ', xx.exists(''); + raise info 'next %', xx.next(xx.first); + raise info 'prior %', xx.prior(xx.last); + xx.delete('x'); + raise info 'count %', xx.count; + raise info 'xx1 %', xx('c')('b')('c')('c')('x')('z'); + xx.delete; + end; + function checknestset() return integer + is + xx type002; + yy type002; + begin + xx('b')('c'):=3; + xx('a')('c'):=4; + yy := xx; + raise info 'xx %', xx('b')('c'); + raise info 'yy %', yy('a')('c'); + xx('a')('c'):=10; + raise info 'yy %', yy('a')('c'); + raise info 'xx %', xx('a')('c'); + raise info 'xx(1) % ', xx('1'); + return 1; + end; + procedure checkpkgvar() + as + begin + raise info 'pkgvar %', pkg_nesttb.pkgvar(2)(2)(4)(2)(2)(3); + end; + end pkg_nesttb_char; + / + +call pkg_nesttb.checkfunc(); +call pkg_nesttb.checknestset(); +call pkg_nesttb_char.checkpkgvar(); +call pkg_nesttb.checkpkgvar(); +call pkg_nesttb.checkpkgvar2(); +call pkg_nesttb_char.checknestset(); +call pkg_nesttb_char.checkfunc(); + +create or replace package pck20 is +type tp_1 is table of varchar2; +type tp_2 is table of varchar2 index by varchar2; +procedure p1(a tp_1,b int); +procedure p1(a tp_2,b int); +end pck20; +/ + +create or replace package pck20 is +type tp_1 is table of varchar2; +type tp_2 is table of varchar2 index by varchar2; +procedure p1(a out tp_1,b int); +procedure p1(a out tp_2,b int); +end pck20; +/ + +set behavior_compat_options='proc_outparam_override'; +create or replace package pck20 is +type tp_1 is table of varchar2; +type tp_2 is table of varchar2 index by varchar2; +procedure p1(a out tp_1,b int); +procedure p1(a out tp_2,b int); +end pck20; +/ + +create or replace package pck20 is +type tp_1 is table of varchar2; +type tp_2 is table of varchar2 index by varchar2; +procedure p1(a tp_1,b int); +procedure p1(a tp_2,b int); +end pck20; +/ + +create table tytbl114(c1 varchar2(20),c2 int); +insert into tytbl114 values('aaaaa',1); +insert into tytbl114 values('bbbbb',2); +insert into tytbl114 values('ccccc',3); +insert into tytbl114 values('ddddd',4); +insert into tytbl114 values('eeeee',5); +insert into tytbl114 values('fffff',6); +--I1. out +create or replace package pkg114 +as +type ty0 is record (c1 int,c2 varchar2(20)); +type ty1 is table of ty0 index by integer; +procedure p1(c1 out ty1,c2 out ty1); +procedure p1(c1 out ty1,c2 out ty0); +procedure p4(); +pv1 ty1; +pv2 ty0; +end pkg114; +/ + +create or replace package body pkg114 +as +procedure p1(c1 out ty1,c2 out ty1) +is +begin +for i in 1..6 loop +select c2,c1 into c1(i).c1,c1(i).c2 from tytbl114 where c2=i; +end loop; +c2:=c1; +c1.delete(3); +raise info 'c1.count is %',c1.count; +raise info 'c2.count is %',c2.count; +end; +procedure p1(c1 out ty1,c2 out ty0) +is +begin +for i in 1..6 loop +select c2,c1 into c1(i).c1,c1(i).c2 from tytbl114 where c2=i; +end loop; +c2:=c1(1); +raise info 'c1.count is %',c1.count; +raise info 'c2 is %',c2; +end; +procedure p4() +is +v1 ty1; +begin +p1(pv1,v1); +raise info 'pv1 is %',pv1; +p1(pv1,pv2); +raise info 'pv1 is %',pv1; +raise info 'pv2 is %',pv2; +end; +end pkg114; +/ + +call pkg114.p4(); + +drop package pkg114; +drop table tytbl114; +create table tytblnest007(c1 int,c2 number(8,1),c3 varchar2(20),c4 date,c5 clob,c6 blob); +insert into tytblnest007 values(1,1,'000',to_date('2022-01-01 12:34:56','yyyy-mm-dd hh:mi:ss'),'000','00123'); +insert into tytblnest007 values(2,2.0,'111',to_date('2022-01-01 12:34:56','yyyy-mm-dd hh:mi:ss'),'111','00123'); +insert into tytblnest007 values(3,3.5,'222',to_date('2022-01-01 12:34:56','yyyy-mm-dd hh:mi:ss'),'222','00123'); +insert into tytblnest007 values(4,4.7,'333',to_date('2022-01-01 12:34:56','yyyy-mm-dd hh:mi:ss'),'333','00123'); +insert into tytblnest007 values(5,8.8,'444',to_date('2022-01-01 12:34:56','yyyy-mm-dd hh:mi:ss'),'444','00123'); +insert into tytblnest007 values(6,9.6,'555',to_date('2022-01-01 12:34:56','yyyy-mm-dd hh:mi:ss'),'555','00123'); +create or replace package pkgnest007 +as +type ty0 is table of varchar2(20); +type ty1 is table of ty0 index by integer; +procedure p1(c1 out ty0); +procedure p2(); +pv1 ty1; +pv0 ty0; +end pkgnest007; +/ + +create or replace package body pkgnest007 +as +procedure p1(c1 out ty0) +is +begin +for i in 1..6 loop + select c3 into c1(i) from tytblnest007 where c1=i; +end loop; +raise info 'c1 is %',c1; +raise info 'c1.count is %',c1.count; +raise info 'c1.first is %',c1.first; +raise info 'c1.last is %',c1.last; +end; +procedure p2() +is +begin +pv1(1)(1):='11'; +pv1(1)(2):='12'; +pv1(1)(4):='14'; +pv1(3)(0):='30'; +pv1(3)(2):='32'; +pv1(3)(3):='33'; +pv1(6)(4):='64'; +pv1(6)(5):='65'; +pv1(6)(6):='66'; +pv1(2)(1):='21'; +pv1(2)(2):='22'; +for i in pv1.first..pv1.last loop +raise info 'pv1 % is %',i,pv1(i); +end loop; +raise info 'pv1.count is %',pv1.count; +raise info 'pv1.first is %',pv1.first; +raise info 'pv1.first.next is %',pv1.next(pv1.first); +raise info 'pv1.first.next2 is %',pv1.next(pv1.next(pv1.first)); +raise info 'pv1.first.next3 is %',pv1.next(pv1.next(pv1.next(pv1.first))); +raise info 'pv1.last is %',pv1.last; + +raise info 'pv1.first value is %',pv1(pv1.first); +raise info 'pv1.first.next value is %',pv1(pv1.next(pv1.first)); +raise info 'pv1.first.next2 value is %',pv1(pv1.next(pv1.next(pv1.first))); +raise info 'pv1.first.next3 value is %',pv1(pv1.next(pv1.next(pv1.next(pv1.first)))); +pv1.delete; +raise info 'pv1.count is %',pv1.count; +end; +end pkgnest007; +/ + +call pkgnest007.p2(); + +drop package pkgnest007; +drop table tytblnest007; + +create table pkgtbl067067 (c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob); +insert into pkgtbl067067 values(1,1,'c365','c365','1122b'); +insert into pkgtbl067067 values(66,66,'c3665','c3665','1122b6'); +create or replace package pkg067067 +is +type type000 is table of pkgtbl067067%rowtype index by integer; +type type001 is record(c1 int,c2 number,c3 varchar2(30),c4 clob,c5 blob,c6 type000); +procedure proc067067_1(col3 type001,col4 out type001); +procedure proc067067_1(col3 type001,col4 out type001,col5 out type001); +procedure proc067067_2(col5 int); +end pkg067067; +/ + +create or replace package body pkg067067 +is +procedure proc067067_1(col3 type001,col4 out type001) +is +cursor cor is select c1,c2,c3,c4,c5 from pkgtbl067067 order by 1; +begin +open cor; +loop +fetch cor into col3.c1,col3.c2,col3.c3,col3.c4,col3.c5; +if col3.c1=1 then +col3.c6(1):=(col3.c1,col3.c2,col3.c3,col3.c4,col3.c5); +else +col3.c6(2):=(col3.c1,col3.c2,col3.c3,col3.c4,col3.c5); +end if; +exit when cor%notfound; +end loop; +raise info 'col3 is %',col3; +raise info 'col3.c5 is %',col3.c5; +raise info 'col3.c6.count is %',col3.c6.count; +col4:=col3; +--raise info 'col3.c6.first is %',col3.c6.first; +end; +procedure proc067067_1(col3 type001,col4 out type001,col5 out type001) +is +begin +col3.c1:=3441; +col3.c2:=3441; +col3.c3:='col344c3'; +col3.c4:='col344c4'; +col3.c5:='3c44c5'; +col3.c6(1):=(3441,3441,'col344c3','col344c4','3c44c5'); +col3.c6(2):=(23441,23441,'col2344c3','col2344c4','3c2344c5'); +col4.c1:=441; +col4.c2:=441; +col4.c3:='col44c3'; +col4.c4:='col44c4'; +col4.c5:='3c44c5'; +col4.c6(1):=(441,441,'col44c3','col44c4','3c44c5'); +col5.c1:=555; +col5.c2:=555; +col5.c3:='var555'; +col5.c4:='clob555'; +col5.c5:='b555'; +col5.c6(1):=(555,555,'var555','clob555','b555'); +end; +procedure proc067067_2(col5 int) +as +c1 type001; +c2 type001; +c3 type001; +begin +c1.c1:=1; +c1.c2:=1; +c1.c3:='c1c3'; +c1.c4:='c1c4'; +c1.c5:='c1c5'; +c1.c6(1):=(66,66,'66var','66clob','66bb'); +c2.c1:=1; +c2.c2:=1; +c2.c3:='c1c3'; +c2.c4:='c1c4'; +c2.c5:='c1c5'; +c2.c6(1):=(66,66,'66var','66clob','66bb'); +c3.c1:=22; +c3.c2:=22; +c3.c3:='varc3'; +c3.c4:='clobc4'; +c3.c5:='bbc5'; +c3.c6(2):=(2222,2222,'nest22','nestclob','bb22'); +proc067067_1(col3=>c1,col4=>c2,col5=>c3); +proc067067_1(col3=>c2,col4=>c3); +end; +end pkg067067; +/ + +drop package pkg067067; +drop table pkgtbl067067; + +set behavior_compat_options=''; + +drop package pkg_nesttb_char; +drop package pkg_nesttb; + +drop package pck2; +drop package pck3; +drop package pck4; +drop package pck5; +drop package pck6; +drop package pck7; +drop package pck8; +drop package pck9; +drop schema if exists plpgsql_tableofindex cascade; diff --git a/src/test/regress/sql/plsql_show_all_error.sql b/src/test/regress/sql/plsql_show_all_error.sql new file mode 100644 index 000000000..45be5d94a --- /dev/null +++ b/src/test/regress/sql/plsql_show_all_error.sql @@ -0,0 +1,1106 @@ +set plsql_show_all_error to on; +truncate DBE_PLDEVELOPER.gs_source; +truncate DBE_PLDEVELOPER.gs_errors; +create table vector_to_number_tab_002(COL_TONUM varchar) ; +insert into vector_to_number_tab_002 values(to_number(+123.456, '9.9EEEE')); +CREATE OR REPLACE PACKAGE AA AS + + type ref_cursor IS ref CURSOR; + + PROCEDURE get_info(appinfo OUT ref_cursor); + + PROCEDURE get_info(appname IN varchar2, servinfo OUT ref_cursor); + + PROCEDURE get_switch(appname IN varchar2, + switchinfo OUT ref_cursor); + + PROCEDURE get_use(checkers OUT ref_cursor); + + PROCEDURE get_define(bb OUT ref_cursor); + + PROCEDURE get_resource_define(bbOUT ref_cursor); + + PROCEDURE get_bb_info(bbRef OUT ref_cursor); + + PROCEDURE get_resource_info(bbRef OUT ref_cursor); +END AA; +/ +select id from dbe_pldeveloper.gs_source; +CREATE OR REPLACE PACKAGE AA AS + + type ref_cursor IS ref CURSOR; + + PROCEDURE get_info(appinfo OUT ref_cursor); + + PROCEDURE get_info(appname IN varchar2, servinfo OUT ref_cursor); + + PROCEDURE get_switch(appname IN varchar2, + switchinfo OUT ref_cursor); + + PROCEDURE get_use(checkers OUT ref_cursor); + + PROCEDURE get_define(bb OUT ref_cursor); + + PROCEDURE get_resource_define(bbOUT ref_cursor); + + PROCEDURE get_bb_info(bbRef OUT ref_cursor); + + PROCEDURE get_resource_info(bbRef OUT ref_cursor); +END AA; +/ +select id from dbe_pldeveloper.gs_source; + +CREATE OR REPLACE PROCEDURE exce_pro2() +AS +a int; +b int; +BEGIN + a:=2/0; + EXCEPTION + WHEN division_by_zeros THEN + insert into t1 value(1); +END; +/ + +create or replace procedure pro1 +as +begin +drop t1; --缺失table关键字 +drop tables t1; --错写 +crop table t1; --错写 +create table t1 t2(c1 int,c2 int);--表名不正确 +create table t1(c1 ,c2 int);--漏写数据类型 +create tables t1(c1 int,c2 int); --行号不对 +creat table t1(c1 int,c2 int); --未标行号 +creat table (c1 int,c2 int) t1; --顺序错误 +end; +/ + +select line,src from dbe_pldeveloper.gs_errors where name='pro1'; + +CREATE OR REPLACE PACKAGE AA is + + + TYPE r_list IS RECORD( + indextype aa_mx_stat_info.indextype%TYPE, + value number); + + TYPE cur_list IS REF CURSOR RETURN r_list; + + TYPE r_avgop_time IS RECORD( + opname aa_mx_stat_opavgtime_info.opname%TYPE, + avgtime number); + + TYPE cur_avgop_time IS REF CURSOR RETURN r_avgop_time; + + aaedure aa_update_value(i_hostname in varchar2, + i_indextype in varchar2, + i_value number); + + aaedure aa_value(o_list OUT cur_list); + + aaedure aa_del_value; + + aaedure aa_avgop_time(o_avgop_time OUT cur_avgop_time); + + aaedure aa_del_avgop_time; + + aaedure aa_update_monitor_flag(i_switchType in varchar2, + i_channelType in varchar2, + i_action in varchar2, + i_modifyuser in varchar2, + i_hostname in varchar2); +end AA; + +/ + +CREATE OR REPLACE PACKAGE AA as + + procedure aa_remove(p_self in out nocopy json, pair_name varchar2); + procedure aa_bb(p_self in out nocopy json, pair_name varchar2, pair_value json_value, position pls_integer default null); + procedure aa_bb(p_self in out nocopy json, pair_name varchar2, pair_value varchar2, position pls_integer default null); + procedure aa_bb(p_self in out nocopy json, pair_name varchar2, pair_value number, position pls_integer default null); + procedure aa_bb(p_self in out nocopy json, pair_name varchar2, pair_value boolean, position pls_integer default null); + procedure aa_check_duplicate(p_self in out nocopy json, v_set boolean); + procedure aa_remove_duplicates(p_self in out nocopy json); + + procedure aa_bb(p_self in out nocopy json, pair_name varchar2, pair_value json, position pls_integer default null); + procedure aa_bb(p_self in out nocopy json, pair_name varchar2, pair_value json_list, position pls_integer default null); + + function aa_count(p_self in json) return number; + function aa_get(p_self in json, pair_name varchar2) return json_value; + function aa_get(p_self in json, position pls_integer) return json_value; + function aa_index_of(p_self in json, pair_name varchar2) return number; + function aa_exist(p_self in json, pair_name varchar2) return boolean; + + function aa_to_char(p_self in json, spaces boolean default true, chars_per_line number default 0) return varchar2; + procedure aa_to_clob(p_self in json, buf in out nocopy clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true); + procedure aa_print(p_self in json, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null); + procedure aa_htp(p_self in json, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null); + + function aa_to_json_value(p_self in json) return json_value; + function aa_path(p_self in json, json_path varchar2, base number default 1) return json_value; + + procedure aa_path_bb(p_self in out nocopy json, json_path varchar2, elem json_value, base number default 1); + procedure aa_path_bb(p_self in out nocopy json, json_path varchar2, elem varchar2 , base number default 1); + procedure aa_path_bb(p_self in out nocopy json, json_path varchar2, elem number , base number default 1); + procedure aa_path_bb(p_self in out nocopy json, json_path varchar2, elem boolean , base number default 1); + procedure aa_path_bb(p_self in out nocopy json, json_path varchar2, elem json_list , base number default 1); + procedure aa_path_bb(p_self in out nocopy json, json_path varchar2, elem json , base number default 1); + + procedure aa_path_remove(p_self in out nocopy json, json_path varchar2, base number default 1); + + function aa_get_values(p_self in json) return json_list; + function aa_get_keys(p_self in json) return json_list; + + --json_list type methods + procedure aa_append(p_self in out nocopy json_list, elem json_value, position pls_integer default null); + procedure aa_append(p_self in out nocopy json_list, elem varchar2, position pls_integer default null); + procedure aa_append(p_self in out nocopy json_list, elem number, position pls_integer default null); + procedure aa_append(p_self in out nocopy json_list, elem boolean, position pls_integer default null); + procedure aa_append(p_self in out nocopy json_list, elem json_list, position pls_integer default null); + + procedure aa_replace(p_self in out nocopy json_list, position pls_integer, elem json_value); + procedure aa_replace(p_self in out nocopy json_list, position pls_integer, elem varchar2); + procedure aa_replace(p_self in out nocopy json_list, position pls_integer, elem number); + procedure aa_replace(p_self in out nocopy json_list, position pls_integer, elem boolean); + procedure aa_replace(p_self in out nocopy json_list, position pls_integer, elem json_list); + + function aa_count(p_self in json_list) return number; + procedure aa_remove(p_self in out nocopy json_list, position pls_integer); + procedure aa_remove_first(p_self in out nocopy json_list); + procedure aa_remove_last(p_self in out nocopy json_list); + function aa_get(p_self in json_list, position pls_integer) return json_value; + function aa_head(p_self in json_list) return json_value; + function aa_last(p_self in json_list) return json_value; + function aa_tail(p_self in json_list) return json_list; + + function aa_to_char(p_self in json_list, spaces boolean default true, chars_per_line number default 0) return varchar2; + procedure aa_to_clob(p_self in json_list, buf in out nocopy clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true); + procedure aa_print(p_self in json_list, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null); + procedure aa_htp(p_self in json_list, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null); + + function aa_path(p_self in json_list, json_path varchar2, base number default 1) return json_value; + procedure aa_path_bb(p_self in out nocopy json_list, json_path varchar2, elem json_value, base number default 1); + procedure aa_path_bb(p_self in out nocopy json_list, json_path varchar2, elem varchar2 , base number default 1); + procedure aa_path_bb(p_self in out nocopy json_list, json_path varchar2, elem number , base number default 1); + procedure aa_path_bb(p_self in out nocopy json_list, json_path varchar2, elem boolean , base number default 1); + procedure aa_path_bb(p_self in out nocopy json_list, json_path varchar2, elem json_list , base number default 1); + + procedure aa_path_remove(p_self in out nocopy json_list, json_path varchar2, base number default 1); + + function aa_to_json_value(p_self in json_list) return json_value; + + --json_value + + + function aa_get_type(p_self in json_value) return varchar2; + function aa_get_string(p_self in json_value, max_byte_size number default null, max_char_size number default null) return varchar2; + procedure aa_get_string(p_self in json_value, buf in out nocopy clob); + function aa_get_number(p_self in json_value) return number; + function aa_get_bool(p_self in json_value) return boolean; + function aa_get_null(p_self in json_value) return varchar2; + + function aa_is_object(p_self in json_value) return boolean; + function aa_is_array(p_self in json_value) return boolean; + function aa_is_string(p_self in json_value) return boolean; + function aa_is_number(p_self in json_value) return boolean; + function aa_is_bool(p_self in json_value) return boolean; + function aa_is_null(p_self in json_value) return boolean; + + function aa_to_char(p_self in json_value, spaces boolean default true, chars_per_line number default 0) return varchar2; + procedure aa_to_clob(p_self in json_value, buf in out nocopy clob, spaces boolean default false, chars_per_line number default 0, erase_clob boolean default true); + procedure aa_print(p_self in json_value, spaces boolean default true, chars_per_line number default 8192, jsonp varchar2 default null); + procedure aa_htp(p_self in json_value, spaces boolean default false, chars_per_line number default 0, jsonp varchar2 default null); + + function aa_value_of(p_self in json_value, max_byte_size number default null, max_char_size number default null) return varchar2; + + +end AA; +/ + +CREATE OR REPLACE PACKAGE aa AS + + null_as_empty_string boolean not null := true; + include_dates boolean not null := true; + include_clobs boolean not null := true; + include_blobs boolean not null := false; + + function executeList(stmt varchar2, bindvar json default null, cur_num number default null) return json_list; + + function executeObject(stmt varchar2, bindvar json default null, cur_num number default null) return json; + + function executeList2(stmt varchar2, bindvar json default null, cur_num NUMBER default null) return json_list; + + +end aa; +/ + +CREATE OR REPLACE PACKAGE AA IS + TYPE r_menu_list IS RECORD( + MENU_FLAG VARCHAR2(2), + MENU_num CTP_MENU.num%TYPE, + MENU_MINGZI CTP_MENU_NLS.MINGZI%TYPE, + MENU_STATUS CTP_MENU.STATUS%TYPE, + MENU_PARENT CTP_MENU.Parent_num%TYPE, + MENU_PRIVILAGE VARCHAR2(2), + MENU_SERIALNO CTP_MENU.Serialno%TYPE, + menu_LONG varchar2(2) + ); + TYPE r_AA IS RECORD( + AA_num CTP_AA.num%TYPE, + AA_MINGZI CTP_AA_NLS.MINGZI%TYPE, + AA_DESCRIPTION CTP_AA_NLS.DESCRIPTION%TYPE, + AA_LST_MODI_TIME CTP_AA.LST_MODI_TIME%TYPE, + AA_AA_LONG CTP_AA.AA_LONG%TYPE, + AA_BRANCH_num CTP_AA.BRANCH_num%TYPE, + AA_AA_LONG_ADMIN CTP_AA.AA_LONG_ADMIN%TYPE, + AA_BRANCH_num_ADMIN CTP_AA.BRANCH_num_ADMIN%TYPE, + AA_AA_CATEGORY CTP_AA.AA_CATEGORY%TYPE, + AA_LST_MODI_USER_num CTP_AA.LST_MODI_USER_num%TYPE, + AA_PRIVILEGE_ALL CTP_AA.PRIVILEGE_ALL%TYPE, + AA_PRIVILEGE_SELF CTP_AA.PRIVILEGE_SELF%TYPE, + AA_PRIVILEGE_OTHER CTP_AA.PRIVILEGE_OTHER%TYPE, + AA_PRIVILEGE CTP_AA.PRIVILEGE%TYPE, + AA_BRANCH_MINGZI CTP_BRANCH_NLS.MINGZI%TYPE, + AA_BRANCH_LONG CTP_BRANCH.BRANCH_LONG%TYPE + ); + + TYPE r_AA_list IS RECORD( + AA_num CTP_AA.num%TYPE, + AA_MINGZI CTP_AA_NLS.MINGZI%TYPE, + AA_DESCRIPTION CTP_AA_NLS.DESCRIPTION%TYPE, + AA_LST_MODI_TIME CTP_AA.LST_MODI_TIME%TYPE, + AA_AA_LONG CTP_AA.AA_LONG%TYPE, + AA_BRANCH_num CTP_AA.BRANCH_num%TYPE, + AA_AA_LONG_ADMIN CTP_AA.AA_LONG_ADMIN%TYPE, + AA_BRANCH_num_ADMIN CTP_AA.BRANCH_num_ADMIN%TYPE, + AA_AA_CATEGORY CTP_AA.AA_CATEGORY%TYPE, + AA_LST_MODI_USER_num CTP_AA.LST_MODI_USER_num%TYPE, + AA_PRIVILEGE_ALL CTP_AA.PRIVILEGE_ALL%TYPE, + AA_PRIVILEGE_SELF CTP_AA.PRIVILEGE_SELF%TYPE, + AA_PRIVILEGE_OTHER CTP_AA.PRIVILEGE_OTHER%TYPE, + AA_PRIVILEGE CTP_AA.PRIVILEGE%TYPE, + AA_BRANCH_MINGZI CTP_BRANCH_NLS.MINGZI%TYPE, + AA_BRANCH_LONG CTP_BRANCH.BRANCH_LONG%TYPE, + AA_USERNO VARCHAR2(5) + + ); + TYPE r_pos_AA_list IS RECORD( + AA_LST_MODI_USER_num CTP_AA.LST_MODI_USER_num%TYPE, + AA_LST_MODI_TIME CTP_AA.LST_MODI_TIME%TYPE, + AA_PRIVILEGE_ALL CTP_AA.PRIVILEGE_ALL%TYPE, + AA_PRIVILEGE_SELF CTP_AA.PRIVILEGE_SELF%TYPE, + AA_PRIVILEGE_OTHER CTP_AA.PRIVILEGE_OTHER%TYPE, + AA_num CTP_AA.num%TYPE, + AA_MINGZI CTP_AA_NLS.MINGZI%TYPE, + AA_DESCRIPTION CTP_AA_NLS.DESCRIPTION%TYPE, + AA_AA_LONG CTP_AA.AA_LONG%TYPE, + AA_BRANCH_num CTP_AA.BRANCH_num%TYPE, + AA_BRANCH_MINGZI CTP_BRANCH_NLS.MINGZI%TYPE + ); + + TYPE ref_AA IS REF CURSOR RETURN r_AA; + TYPE ref_AA_list IS REF CURSOR RETURN r_AA_list; + TYPE ret_pos_AA_list IS REF CURSOR RETURN r_pos_AA_list; + TYPE c_menu_list IS REF CURSOR RETURN r_menu_list; + + FUNCTION aa_LG_AA_GETUSERNUM( + AAnum IN VARCHAR2 + ) + RETURN INTEGER; + + PROCEDURE aa_LG_AA_GETBRANCHDEP( branchnum IN VARCHAR2, --机构编码 + out_flag OUT VARCHAR2, --存储过程返回标志 + branchDep OUT number --当前机构层级深度 + ) ; + + PROCEDURE aa_LG_AA_QUERYUSERNUM( + AAnum IN VARCHAR2, + out_flag OUT VARCHAR2, + usr_num OUT number); + + PROCEDURE aa_LG_AA_MODAAMENU( + privilege IN VARCHAR2, + menunum IN VARCHAR2, + AAnum IN VARCHAR2, + out_flag OUT VARCHAR2); + + PROCEDURE aa_LG_AA_GETAAMENU( + AALONG IN VARCHAR2, + branchnum IN VARCHAR2, + AAnum IN VARCHAR2, + Language IN VARCHAR2, + out_flag OUT VARCHAR2, + o_menu_list OUT c_menu_list); + + PROCEDURE aa_LG_AA_DELETEAA(AAnum IN VARCHAR2, + out_flag OUT VARCHAR2 + ); + + + + PROCEDURE aa_LG_AA_UPDATEAA(AALONG IN VARCHAR2, + usernum IN VARCHAR2, + priAll IN VARCHAR2, + priSelf IN VARCHAR2, + priOther IN VARCHAR2, + AAnum IN VARCHAR2, + AAMINGZI IN VARCHAR2, + AADes IN VARCHAR2, + Language IN VARCHAR2, + out_flag OUT VARCHAR2 + ) ; + + PROCEDURE aa_LG_AA_ADDAA( AAnum IN VARCHAR2, + branchnum IN VARCHAR2, + AALONG IN VARCHAR2, + usernum IN VARCHAR2, + priSelf IN VARCHAR2, + priAll IN VARCHAR2, + priOther IN VARCHAR2, + AAMINGZI IN VARCHAR2, + AADes IN VARCHAR2, + Language IN VARCHAR2, + out_flag OUT VARCHAR2 + ) ; + + PROCEDURE aa_LG_AA_GETAALIST(branchnum IN VARCHAR2, + branchLONG IN VARCHAR2, + languageCode IN VARCHAR2, + begNum IN VARCHAR2, + fetchNum IN VARCHAR2, + out_flag OUT VARCHAR2, + totalNum OUT VARCHAR2, + ret_AA_list OUT ret_pos_AA_list + ); + + PROCEDURE aa_LG_AA_SEAAABYMINGZI(branchnum IN VARCHAR2, + branchLONG IN VARCHAR2, + languageCode IN VARCHAR2, + begNum IN VARCHAR2, + fetchNum IN VARCHAR2, + keyword IN VARCHAR2, + out_flag OUT VARCHAR2, + o_totalNum OUT VARCHAR2, + ret_AA_list OUT ret_pos_AA_list + ); + + PROCEDURE LOG(proc_MINGZI IN VARCHAR2, + info IN VARCHAR2); +END AA; +/ + +CREATE OR REPLACE PACKAGE AA +IS +a int; +end AA; +/ + +CREATE OR REPLACE PACKAGE BODY AA IS + FUNCTION AA_BB_GETCCNUM( + BBId IN VARCHAR2 + ) + RETURN INTEGER IS + temp INTEGER; + BEGIN + SELECT count(*) INTO temp FROM DD_BB_CC_REL WHERE BB_ID=BBId; + RETURN temp; + EXCEPTION + WHEN OTHERS THEN + log('AA_BB_getCCnum()',SQLERRM(SQLCODE)); + RETURN -1; + END; + + PROCEDURE AA_BB_GETBRANCHDEP( branchId IN VARCHAR2, --机构编码 + out_flag OUT VARCHAR2, --存储过程返回标志 + branchDep OUT number --当前机构层级深度 + ) IS + maxLevel number; + curLevel number; + BEGIN + out_flag := '0'; + SELECT MAX(branch_Level) INTO maxLevel FROM DD_BRANCH; + SELECT branch_Level INTO curLevel FROM DD_BRANCH a where a.id=branchId; + branchDep := maxLevel-curLevel; + EXCEPTION + WHEN OTHERS THEN + log('AA_BB_deleteBB()',SQLERRM(SQLCODE)); + out_flag := '-1'; + rollback; + RETURN; + END; + + + PROCEDURE AA_BB_QUERYCCNUM(BBId IN VARCHAR2, --角色编码 + out_flag OUT VARCHAR2,--存储过程返回标志 + usr_num OUT number --关联用户数 + ) IS + BEGIN + out_flag := '0'; + usr_num:=AA_BB_getCCnum(BBId); + RETURN; + EXCEPTION + WHEN OTHERS THEN + log('AA_BB_queryCCnum()',SQLERRM(SQLCODE)); + out_flag := '-1'; + rollback; + RETURN; + END; + + + PROCEDURE AA_BB_MODBBMENU(privilege IN VARCHAR2, --菜单权限 + menuId IN VARCHAR2, --菜单编码 + BBId IN VARCHAR2, --角色编码 + out_flag OUT VARCHAR2 --存储过程返回标志 + ) IS + v_id VARCHAR2 (50); + v_flag VARCHAR2 (8); + + BEGIN + out_flag := '0'; + SELECT nvl(SUBSTR(menuId,1,INSTR(menuId,'M')-1),menuId) INTO v_id FROM dual; + SELECT SUBSTR(menuId,1,1) INTO v_flag FROM dual; + delete from DD_BB_MENU_REL where BB_ID=BBId and MENU_ID=v_id; + delete from DD_BB_ITEM_REL where BB_ID=BBId and ITEM_ID=v_id; + update DD_BB_CC_REL set MENUCHG_FLAG='1' where BB_ID=BBId; + + + IF privilege!='-1' THEN + IF v_flag ='M' THEN + insert into DD_BB_MENU_REL(BB_ID,MENU_ID,PRIVILEGE) VALUES(BBId,v_id,privilege); + END IF; + IF v_flag='I'THEN + insert into DD_BB_ITEM_REL(BB_ID,ITEM_ID,PRIVILEGE) VALUES(BBId,v_id,privilege); + END IF; + END IF; + commit; + RETURN; + EXCEPTION + WHEN OTHERS THEN + log('AA_BB_modifyBBmenu()',SQLERRM(SQLCODE)); + out_flag := '-1'; + rollback; + RETURN; + END; + + PROCEDURE AA_BB_GETBBMENU(BBLevel IN VARCHAR2, --角色级别 + branchId IN VARCHAR2, --机构编码 + BBId IN VARCHAR2, --角色编码 + Language IN VARCHAR2, --语言编码 + out_flag OUT VARCHAR2, --存储过程返回标志 + o_menu_list OUT c_menu_list --菜单列表 + ) IS + BEGIN + out_flag := '0'; + OPEN o_menu_list FOR +select 'M' flag, + menu.menuId menuId, + menu_nls.default_label menuName, + menu.menuState menuState, + menu.menuParentId menuParentId, + menu.menuPrivilege menuPrivilege, + menu.menCCialNo menCCialNo, + level blevel + from (select menuId, menuState, menuParentId, menuPrivilege, menCCialNo + from (select distinct a.id menuId, + F.name menuName, + a.status menuState, + a.parent_id menuParentId, + '-1' menuPrivilege, + a.serialNo menCCialNo + from DD_MENU a, DD_MENU_NLS F + start with a.id in + (SELECT distinct b.menu_id + FROM DD_MENU_ITEM_REL b + WHERE b.item_id in + (SELECT c.ID + FROM DD_ITEM c + WHERE c.status = '1' + and func_DD_lg_canBeUsedByBB(branchId, + BBLevel, + c.item_level, + c.item_branch_id) = '1')) + connect by a.id = prior a.parent_id + and f.locale = Language + and a.id = f.id) + where menuId not in (select d.menu_id + from DD_BB_menu_REL d + WHERE d.BB_ID = BBId) + union + select menuId, menuState, menuParentId, e.Privilege, menCCialNo + from (select distinct a.id menuId, + F.name menuName, + a.status menuState, + a.parent_id menuParentId, + '-1' menuPrivilege, + a.serialNo menCCialNo + from DD_MENU a, DD_MENU_NLS F + start with a.id in + (SELECT distinct b.menu_id + FROM DD_MENU_ITEM_REL b + WHERE b.item_id in + (SELECT c.ID + FROM DD_ITEM c + WHERE (c.status = '1' and + func_DD_lg_canBeUsedByBB(branchId, + BBLevel, + c.item_level, + c.item_branch_id) = '1'))) + connect by a.id = prior a.parent_id + and f.locale = Language + and a.id = f.id) d, + DD_BB_MENU_rel e + where e.BB_id = BBId + and e.menu_id = d.menuId) menu, + DD_menu_nls menu_nls + where menu_nls.locale = Language + and menu.menuId = menu_nls.id +CONNECT BY PRIOR MENUID = MENUPARENTID + START WITH MENUPARENTID IS NULL +UNION +select 'I' flag, + itemId, + itemName, + itemState, + menuId, + itemPrivilege, + itemSerialNo, + menu.blevel + 1 blevel + from (select distinct a.item_id itemId, + d.DEFAULT_LABEL itemName, + b.STATUS itemState, + a.menu_id menuId, + '-1' itemPrivilege, + a.serialno itemSerialNo + from DD_menu_item_rel a, DD_item b, DD_item_nls d + where b.status = '1' + and d.locale = Language + and b.id = d.id + and func_DD_lg_canBeUsedByBB(branchId, + BBLevel, + b.item_Level, + b.item_Branch_Id) = '1' + and b.id not in (select c.item_id + from DD_BB_ITEM_REL c + WHERE c.BB_ID = BBId) + and b.id = a.item_id + union + select distinct a.item_id itemId, + d.DEFAULT_LABEL itemName, + b.STATUS itemState, + a.menu_id menuId, + c.privilege itemPrivilege, + a.serialno itemSerialNo + from DD_menu_item_rel a, + DD_item b, + DD_item_nls d, + DD_BB_item_rel c + where b.status = '1' + and d.locale = Language + and b.id = d.id + and func_DD_lg_canBeUsedByBB(branchId, + BBLevel, + b.item_Level, + b.item_Branch_Id) = '1' + and c.BB_id = BBId + and c.item_id = b.id + and b.id = a.item_id) item, + (select t.id, level blevel + from DD_menu t + start with t.parent_id is null + connect by t.parent_id = prior t.id) menu + where item.menuid = menu.id; + + RETURN; + EXCEPTION + WHEN OTHERS THEN + if o_menu_list%isopen + then + close o_menu_list; + end if; + log('AA_BB_getBBmenu()',SQLERRM(SQLCODE)); + out_flag := '-1'; + RETURN; + END; + + + PROCEDURE AA_BB_DELETEBB(BBId IN VARCHAR2, --角色编码 + out_flag OUT VARCHAR2 --存储过程返回标志 + ) IS + BEGIN + out_flag := '0'; + DELETE FROM DD_BB_CC_REL WHERE BB_ID=BBId; + DELETE FROM DD_BB_ITEM_REL WHERE BB_ID=BBId; + DELETE FROM DD_BB_MENU_REL WHERE BB_ID=BBId; + DELETE FROM DD_BB_NLS WHERE ID=BBId; + DELETE FROM DD_BB WHERE ID=BBId; + --commit; + EXCEPTION + WHEN OTHERS THEN + log('AA_BB_deleteBB()',SQLERRM(SQLCODE)); + out_flag := '-1'; + rollback; + RETURN; + END; + + + PROCEDURE AA_BB_UPDATEBB(BBLevel IN VARCHAR2, --角色级别 + CCId IN VARCHAR2, --用户编码 + priAll IN VARCHAR2, --所有机构标识 + priSelf IN VARCHAR2, --本机构标识 + priOther IN VARCHAR2, --可维护下属机构级别 + BBId IN VARCHAR2, --角色编码 + BBName IN VARCHAR2, --角色名称 + BBDes IN VARCHAR2, --角色描述 + Language IN VARCHAR2, --语言编码 + out_flag OUT VARCHAR2 --存储过程返回标志 + ) IS + tmp_num number; + + BEGIN + out_flag := '0'; + + UPDATE DD_BB SET BB_LEVEL=BBLevel,LST_MODI_TIME=TO_CHAR(SYSDATE, 'YYYYMMDD'),LST_MODI_CC_ID=CCId,PRIVILEGE_ALL=priAll,PRIVILEGE_SELF=priSelf,PRIVILEGE_OTHER=priOther WHERE ID=BBId; + + -- UPDATE DD_BB_NLS SET NAME=BBName,DESCRIPTION=BBDes WHERE ID=BBId and LOCALE=Language; + select count(*) + into tmp_num + from DD_BB_NLS r + where r.id = BBId + AND r.locale = Language; + + if (tmp_num = 0) then + INSERT INTO DD_BB_NLS + (ID,NAME,DESCRIPTION,LOCALE) + VALUES + (BBId, + BBName, + BBDes, + Language); + else + UPDATE DD_BB_NLS SET NAME=BBName,DESCRIPTION=BBDes WHERE ID=BBId and LOCALE=Language; + end if; + --COMMIT; + EXCEPTION + WHEN OTHERS THEN + log('AA_BB_updateBB()',SQLERRM(SQLCODE)); + out_flag := '-1'; + ROLLBACK; + RETURN; + END; + + + PROCEDURE AA_BB_ADDBB( BBId IN VARCHAR2, --角色编码 + branchId IN VARCHAR2, --机构编码 + BBLevel IN VARCHAR2, --角色级别 + CCId IN VARCHAR2, --用户编码 + priSelf IN VARCHAR2, --本机构标识 + priAll IN VARCHAR2, --所有机构标识 + priOther IN VARCHAR2, --可维护下属机构级别 + BBName IN VARCHAR2, --角色名称 + BBDes IN VARCHAR2, --角色描述 + Language IN VARCHAR2, --语言编码 + out_flag OUT VARCHAR2 --存储过程返回标志 + ) IS +BBCount NUMBER; + BEGIN + out_flag := '0'; + + SELECT count(*) + INTO BBCount + FROM DD_BB a + WHERE a.id = BBId; + + IF BBCount != 0 THEN --该角色编号已经存在 + out_flag := '-1'; + RETURN; + END IF; + + INSERT INTO DD_BB(ID,BRANCH_ID,BB_LEVEL,LST_MODI_TIME,LST_MODI_CC_ID,PRIVILEGE_SELF,PRIVILEGE_ALL,PRIVILEGE_OTHER) VALUES(BBId,branchId,BBLevel,TO_CHAR(SYSDATE, 'YYYYMMDD'),CCId,priSelf,priAll,priOther); + + INSERT INTO DD_BB_NLS(ID,NAME,DESCRIPTION,LOCALE) VALUES(BBId,BBName,BBDes,Language); + + --COMMIT; + EXCEPTION + WHEN OTHERS THEN + PCKG_DD_LG_PUBLIC.log('AA_BB_addBB()',SQLERRM(SQLCODE)); + out_flag := '-2'; + ROLLBACK; + RETURN; + END; + + PROCEDURE AA_BB_GETBBLIST(branchId IN VARCHAR2, --机构编码 + branchLevel IN VARCHAR2, --机构级别 + languageCode IN VARCHAR2, --语言编码 + begNum IN VARCHAR2, --开始取数 + fetchNum IN VARCHAR2, --获取数 + out_flag OUT VARCHAR2, --存储过程返回标志 + totalNum OUT VARCHAR2, --最终获取数 + ret_BB_list OUT ret_pos_BB_list--角色列表 + )IS + + BEGIN + out_flag := '0'; + + SELECT COUNT(*) + INTO totalNum + FROM DD_BB A + WHERE(a.BRANCH_ID=branchId + or (substr(a.BB_level,branchLevel,1) ='1' + and branchId in + (select d.id + from DD_branch d start with d.id=a.BRANCH_ID + connect by prior d.id=d.parent_id))); + + OPEN ret_BB_list FOR + SELECT lastModifyCC, + lastModifyDate, + priAll, + priSelf, + priOther, + BBId, + BBName, + BBDes, + BBLevel, + BBBranchId, + BBBranchName + FROM + (SELECT lastModifyCC, + lastModifyDate, + priAll, + priSelf, + priOther, + BBId, + BBName, + BBDes, + BBLevel, + BBBranchId, + BBBranchName, + ROWNUM row_id + FROM + (SELECT DISTINCT A.LST_MODI_CC_ID lastModifyCC, + A.LST_MODI_TIME lastModifyDate, + A.PRIVILEGE_ALL priAll, + A.PRIVILEGE_SELF priSelf, + A.PRIVILEGE_OTHER priOther, + a.ID BBId, + a.NAME BBName, + a.DESCRIPTION BBDes, + a.BB_LEVEL BBLevel, + a.BRANCH_ID BBBranchId, + c.name BBBranchName, + ROWNUM ROW_ID + FROM (select e.LST_MODI_CC_ID, + e.LST_MODI_TIME, + e.PRIVILEGE_ALL, + e.PRIVILEGE_SELF, + e.PRIVILEGE_OTHER, + e.ID, + f.NAME, + f.DESCRIPTION, + e.BB_LEVEL, + e.BRANCH_ID + from DD_BB e + left outer join DD_BB_nls f on e.id = f.id + and f.locale = languageCode) A,DD_BRANCH_NLS c, (select b.id + from DD_branch b + start with b.id = branchId + connect by b.id = prior b.parent_id) tempb + WHERE (a.BRANCH_ID = branchId + or + substr(a.BB_level, branchLevel, 1) = '1') + AND C.ID = A.BRANCH_ID + AND tempb.id = a.branch_Id + AND C.LOCALE = languageCode + order by BBId asc) + WHERE ROW_ID =to_number(begNum); + + + EXCEPTION + WHEN OTHERS THEN + if ret_BB_list%isopen + then + close ret_BB_list; + end if; + log('AA_BB_getBBlist()',SQLERRM(SQLCODE)); + -- out_flag := SQLERRM(SQLCODE); + out_flag := '-1'; + RETURN; + END; + + + PROCEDURE AA_BB_SEABBBYNAME(branchId IN VARCHAR2, --机构编码 + branchLevel IN VARCHAR2, --机构级别 + languageCode IN VARCHAR2, --语言编码 + begNum IN VARCHAR2, --开始取数 + fetchNum IN VARCHAR2, --获取数 + keyword IN VARCHAR2, --角色名称关键字 + out_flag OUT VARCHAR2, --存储过程返回标志 + o_totalNum OUT VARCHAR2, --最终获取数 + ret_BB_list OUT ret_pos_BB_list --角色列表 + ) IS + v_BBName varchar2(20); + BEGIN + out_flag := '0'; + + v_BBName := keyword; + if(v_BBName is null) then + v_BBName := '%'; + end if; + + SELECT COUNT(*) INTO o_totalNum + FROM DD_BB A,DD_BB_NLS B + where B.NAME like '%'||v_BBName||'%' + AND A.ID = B.ID + AND B.LOCALE=languageCode; + + OPEN ret_BB_list FOR + SELECT lastModifyCC, + lastModifyDate, + priAll, + priSelf, + priOther, + BBId, + BBName, + BBDes, + BBLevel, + BBBranchId, + BBBranchName + FROM (SELECT lastModifyCC, + lastModifyDate, + priAll, + priSelf, + priOther, + BBId, + BBName, + BBDes, + BBLevel, + BBBranchId, + BBBranchName + FROM (SELECT lastModifyCC, + lastModifyDate, + priAll, + priSelf, + priOther, + BBId, + BBName, + BBDes, + BBLevel, + BBBranchId, + BBBranchName, + ROWNUM row_id + FROM (SELECT DISTINCT A.LST_MODI_CC_ID lastModifyCC, + A.LST_MODI_TIME lastModifyDate, + A.PRIVILEGE_ALL priAll, + A.PRIVILEGE_SELF priSelf, + A.PRIVILEGE_OTHER priOther, + a.ID BBId, + a.NAME BBName, + a.DESCRIPTION BBDes, + a.BB_LEVEL BBLevel, + a.BRANCH_ID BBBranchId, + c.name BBBranchName, + ROWNUM ROW_ID + FROM (select e.LST_MODI_CC_ID, + e.LST_MODI_TIME, + e.PRIVILEGE_ALL, + e.PRIVILEGE_SELF, + e.PRIVILEGE_OTHER, + e.ID, + f.NAME, + f.DESCRIPTION, + e.BB_LEVEL, + e.BRANCH_ID + from DD_BB e + left outer join DD_BB_nls f on e.id = f.id + and f.locale = languageCode) A,DD_BRANCH_NLS c, (select b.id + from DD_branch b + start with b.id = branchId + connect by b.id = prior b.parent_id) tempb + WHERE (a.BRANCH_ID = branchId + or + substr(a.BB_level, branchLevel, 1) = '1') + AND C.ID = A.BRANCH_ID + AND tempb.id = a.branch_Id + AND C.LOCALE = languageCode + order by BBId asc) + WHERE ROW_ID =to_number(begNum)) BBSet + WHERE BBSet.BBName LIKE '%'||v_BBName||'%'; + + EXCEPTION + WHEN OTHERS THEN + if ret_BB_list%isopen + then + close ret_BB_list; + end if; + log('DD_proc_BB_getBBlist()',SQLERRM(SQLCODE)); + out_flag := '-1'; + RETURN; + END; + + + PROCEDURE log(proc_name IN VARCHAR2, + info IN VARCHAR2) IS + PRAGMA AUTONOMOUS_TRANSACTION; + time_str VARCHAR2(100); + BEGIN + --IF check_log_on THEN + SELECT to_char(SYSDATE,'mm - dd - yyyy hh24 :mi :ss') + INTO time_str + FROM dual; + INSERT INTO DD_proc_log + VALUES + (proc_name, time_str, info); + COMMIT; + --END IF; + RETURN; + END; + +END AA ; +/ + +create or replace procedure test_pro1 +as +type tpc1 is ref cursor; +--v_cur tpc1; +begin +open v_cur for select c1,c2 from tab1; +end; +/ +create table if not exists tb(eno int); +create or replace procedure test_pro2 +as +type t1 is table of tb.eno%type; +v1 t1; +begin +forall i in 1 .. v1.count save exceptions + insert into tb values v1(i); +end; +/ +drop table if exists tb; +drop procedure if exists test_pro1; + +create table t1(c1 int, c2 int); +create or replace package pack2 is +procedure pro1(); +procedure pro2(); +end pack2; +/ +create or replace package body pack2 is +procedure pro1 +as +begin +update table t1 set c1=1 and c2=1; +end; +procedure pro2 +as +begin +update table t1 set c1=1 and c2=1; +end; +end pack2; +/ +drop table t1; +drop package pack2; + +create or replace function f(v int[]) return int +as +n int; +begin +n := v(); +return n; +end; +/ + +create or replace PACKAGE z_pk2 +AS + PROCEDURE pro(p1 int); +END z_pk2; +/ + +create or replace PACKAGE BODY z_pk2 +AS + p1 int := 1; + p2 int := 1 ; + PROCEDURE pro() + AS + p2 int; + BEGIN + select 1 into p2; + END; +END z_pk2; +/ + + +create or replace PACKAGE package_020 +AS + PROCEDURE pro1(p1 int,p2 int ,p3 VARCHAR2(5) ,p4 out int); + PROCEDURE pro2(p1 int,p2 out int,p3 inout varchar(20)); +END package_020; +/ +create or replace PACKAGE body package_020 +AS + PROCEDURE pro1(p1 int,p2 int ,p3 ,p4 out int ) + as + BEGIN + p4 := 0; + if p3 = '+' then + p4 := p1 + p2; + end if; + + if p3 = '-' then + p4 := p1 - p2; + end if; + + if p3 = '*' then + p4 := p1 * p2; + end if; + + if p3 = '/' then + p4 := p1 / p2; + end if; + END; + PROCEDURE pro2(p1 int,p2 out int,p3 inout varchar(20)) + AS + BEGIN + p2 := p1; + p3 := p1 ||'___a'; + END; +END package_020; +/ + +drop procedure pro1; +select line,src from dbe_pldeveloper.gs_errors order by line,src; + +create table pro_tblof_tbl_013(c1 number(3,2),c2 varchar2(20),c3 clob,c4 blob); +create type pro_tblof_013 is table of pro_tblof_tbl_013.c1%type; +create or replace procedure pro_tblof_pro_013_1() +as +tblof001 pro_tblof_013; +i int :=1; +cursor cor1 is select c1,c2,c3,c4 from pro_tblof_tbl_013 order by 1,2,3,4; +begin +open cor1; +loop +fetch cor1 into tblof001(i).c1,tblof001(i).c2,tblof001(i).c3,tblof001(i).c4; +EXIT WHEN cor1%NOTFOUND; +DBE_OUTPUT.PRINT_LINE('tblof001('||i||') is '||tblof001(i)); +i=i+1; +end loop; +close cor1; +raise info 'tblof001 is %',tblof001; +raise info 'i is%',i; +end; +/ +drop procedure pro_tblof_pro_013_1(); +drop type pro_tblof_013; +drop table pro_tblof_tbl_013; + +drop package if exists package_020; +drop package if exists z_pk2; +drop package if exists aa; +truncate DBE_PLDEVELOPER.gs_source; +truncate DBE_PLDEVELOPER.gs_errors; +set plsql_show_all_error to off; diff --git a/src/test/regress/sql/pri_any_package.sql b/src/test/regress/sql/pri_any_package.sql new file mode 100644 index 000000000..e2a188b72 --- /dev/null +++ b/src/test/regress/sql/pri_any_package.sql @@ -0,0 +1,166 @@ +CREATE USER test_create_any_package_role PASSWORD 'Gauss@1234'; +GRANT create any package to test_create_any_package_role; + +CREATE SCHEMA pri_package_schema; +set search_path=pri_package_schema; + +SET ROLE test_create_any_package_role PASSWORD 'Gauss@1234'; +set search_path=pri_package_schema; +drop package if exists pri_exp_pkg; + +create or replace package pri_exp_pkg as + user_exp EXCEPTION; +end pri_exp_pkg; +/ + +create or replace package body pri_exp_pkg as +end pri_exp_pkg; +/ + +create or replace function pri_func1(param int) return number +as +declare +a exception; +begin + if (param = 1) then + raise pri_exp_pkg.user_exp; + end if; + raise info 'number is %', param; + exception + when pri_exp_pkg.user_exp then + raise info 'user_exp raise'; + return 0; +end; +/ + +reset role; +set search_path=pri_package_schema; + +drop package if exists pkg_auth_1; +CREATE OR REPLACE package pkg_auth_1 +is +a int; +END pkg_auth_1; +/ +CREATE OR REPLACE package body pkg_auth_1 +is +END pkg_auth_1; +/ +drop package if exists pkg_auth_2; +CREATE OR REPLACE package pkg_auth_2 +is +b int; +procedure a(); +END pkg_auth_2; +/ +CREATE OR REPLACE package body pkg_auth_2 +is +procedure a +is +begin +pkg_auth_1.a:=1; +end; +END pkg_auth_2; +/ + +CREATE USER test_execute_any_package_role PASSWORD 'Gauss@1234'; +GRANT execute any package to test_execute_any_package_role; +SET ROLE test_execute_any_package_role PASSWORD 'Gauss@1234'; +set search_path=pri_package_schema; + +begin +pri_package_schema.pkg_auth_1.a:=1; +end; +/ + +begin +pri_package_schema.pkg_auth_2.b:=1; +end; +/ + + +reset role; +create user user_1 password 'Gauss@1234'; +create user user_2 password 'Gauss@1234'; +create user user_3 password 'Gauss@1234'; +create user user_any password 'Gauss@1234'; + +set role user_1 password 'Gauss@1234'; +create or replace package user_1.pri_pkg_same_arg_1 +is +a int; +end pri_pkg_same_arg_1; +/ +create or replace package body user_1.pri_pkg_same_arg_1 +is +end pri_pkg_same_arg_1; +/ + +set role user_2 password 'Gauss@1234'; +create or replace package user_2.pri_pkg_same_arg_2 +is +b int; +end pri_pkg_same_arg_2; +/ +create or replace package body user_2.pri_pkg_same_arg_2 +is +end pri_pkg_same_arg_2; +/ + +set role user_any password 'Gauss@1234'; +CREATE OR REPLACE package user_any.pkg_auth_2 +is +b int; +procedure a(); +END pkg_auth_2; +/ +CREATE OR REPLACE package body user_any.pkg_auth_2 +is +procedure a +is +begin +user_2.pri_pkg_same_arg_2.b:=1; +user_1.pri_pkg_same_arg_1.a:=2; +end; +END pkg_auth_2; +/ + +reset role; +GRANT create any package to user_any; +GRANT execute any package to user_any; +set role user_any password 'Gauss@1234'; +CREATE OR REPLACE package user_any.pkg_auth_2 +is +b int; +procedure a(); +END pkg_auth_2; +/ +CREATE OR REPLACE package body user_any.pkg_auth_2 +is +procedure a +is +begin +user_2.pri_pkg_same_arg_2.b:=1; +user_1.pri_pkg_same_arg_1.a:=2; +end; +END pkg_auth_2; +/ + +call user_any.pkg_auth_2.a(); + +set role user_3 password 'Gauss@1234'; +call user_any.pkg_auth_2.a(); +reset role; +GRANT execute any package to user_3; +set role user_3 password 'Gauss@1234'; +call user_any.pkg_auth_2.a(); + +reset role; +drop package pri_package_schema.pkg_auth_1; +drop package pri_package_schema.pkg_auth_2; +drop package pri_package_schema.pri_exp_pkg; +drop package user_1.pri_pkg_same_arg_1; +drop package user_2.pri_pkg_same_arg_2; +drop package user_any.pkg_auth_2; +drop schema pri_package_schema cascade; +drop user user_1,user_2,user_3,user_any,test_create_any_package_role,test_execute_any_package_role cascade; diff --git a/src/test/regress/sql/pri_create_any_index.sql b/src/test/regress/sql/pri_create_any_index.sql new file mode 100644 index 000000000..da7441913 --- /dev/null +++ b/src/test/regress/sql/pri_create_any_index.sql @@ -0,0 +1,67 @@ +CREATE USER test_create_any_index_role PASSWORD 'Gauss@1234'; +GRANT create any index to test_create_any_index_role; +CREATE USER test_create_any_index_role_test PASSWORD 'Gauss@1234'; +GRANT create any index to test_create_any_index_role_test; +CREATE SCHEMA pri_index_schema; +set search_path=pri_index_schema; + +CREATE TABLE pri_index_schema.pri_index +( + SM_SHIP_MODE_SK INTEGER NOT NULL, + SM_SHIP_MODE_ID CHAR(16) NOT NULL, + SM_TYPE CHAR(30) , + SM_CODE CHAR(10) , + SM_CARRIER CHAR(20) , + SM_CONTRACT CHAR(20) +); + +SET ROLE test_create_any_index_role PASSWORD 'Gauss@1234'; + +CREATE UNIQUE INDEX pri_index_schema.ds_ship_mode_t1_index1 ON pri_index_schema.pri_index(SM_SHIP_MODE_SK); +--failed +ALTER INDEX pri_index_schema.ds_ship_mode_t1_index1 UNUSABLE; +--在表上的SM_SHIP_MODE_SK字段上创建指定B-tree索引。 +CREATE INDEX pri_index_schema.ds_ship_mode_t1_index4 ON pri_index_schema.pri_index USING btree(SM_SHIP_MODE_SK); + +--在表上SM_CODE字段上创建表达式索引。 +CREATE INDEX pri_index_schema.ds_ship_mode_t1_index2 ON pri_index_schema.pri_index(SUBSTR(SM_CODE,1 ,4)); + +--在表上的SM_SHIP_MODE_SK字段上创建SM_SHIP_MODE_SK大于10的部分索引。 +CREATE UNIQUE INDEX pri_index_schema.ds_ship_mode_t1_index3 ON pri_index_schema.pri_index(SM_SHIP_MODE_SK) WHERE SM_SHIP_MODE_SK>10; + +--不能删除成功 +SET ROLE test_create_any_index_role_test PASSWORD 'Gauss@1234'; +DROP INDEX pri_index_schema.ds_ship_mode_t1_index1; +DROP INDEX pri_index_schema.ds_ship_mode_t1_index2; +DROP INDEX pri_index_schema.ds_ship_mode_t1_index3; +DROP INDEX pri_index_schema.ds_ship_mode_t1_index4; + +reset role; + +CREATE TABLE pri_index_schema.tmp_tbl(id int, c1 tsvector); + +SET ROLE test_create_any_index_role PASSWORD 'Gauss@1234'; +CREATE INDEX pri_index_schema.tmp_tbl_id_index ON pri_index_schema.tmp_tbl USING gist (c1); + +---failed +ALTER TABLE pri_index_schema.ds_ship_mode_t1_index1 ADD CONSTRAINT PK_TBL_DOMAIN PRIMARY KEY (SM_SHIP_MODE_SK) USING INDEX; +CREATE TABLE pri_index_schema.default_test (f1 int, f2 int); +CREATE SEQUENCE pri_index_schema.sequence_test1 START WITH 32; +CREATE FUNCTION pri_index_schema.pri_func_add_sql(integer, integer) RETURNS integer +AS 'select $1 + $2;' +LANGUAGE SQL +IMMUTABLE +RETURNS NULL ON NULL INPUT; +CREATE TYPE pri_index_schema.compfoo AS (f1 int, f2 text); + +DROP INDEX pri_index_schema.ds_ship_mode_t1_index1; +DROP INDEX pri_index_schema.ds_ship_mode_t1_index2; +DROP INDEX pri_index_schema.ds_ship_mode_t1_index3; +DROP INDEX pri_index_schema.ds_ship_mode_t1_index4; +DROP INDEX pri_index_schema.tmp_tbl_id_index; +reset role; +DROP TABLE pri_index; +DROP TABLE pri_index_schema.tmp_tbl; +DROP SCHEMA pri_index_schema cascade; +DROP USER test_create_any_index_role_test cascade; +DROP USER test_create_any_index_role cascade; \ No newline at end of file diff --git a/src/test/regress/sql/pri_create_any_sequence.sql b/src/test/regress/sql/pri_create_any_sequence.sql new file mode 100644 index 000000000..2578e2d43 --- /dev/null +++ b/src/test/regress/sql/pri_create_any_sequence.sql @@ -0,0 +1,65 @@ +CREATE USER test_create_any_sequence_role PASSWORD 'Gauss@1234'; +GRANT create any sequence to test_create_any_sequence_role; + +CREATE SCHEMA pri_sequence_schema; +set search_path=pri_sequence_schema; + +SET ROLE test_create_any_sequence_role PASSWORD 'Gauss@1234'; +set search_path=pri_sequence_schema; +--- +--- test creation of SERIAL column +--- +CREATE TABLE serialTest (f1 text, f2 serial); +reset role; +GRANT create any table to test_create_any_sequence_role; +SET ROLE test_create_any_sequence_role PASSWORD 'Gauss@1234'; +set search_path=pri_sequence_schema; +CREATE TABLE serialTest (f1 text, f2 serial); + +INSERT INTO serialTest VALUES ('foo'); +INSERT INTO serialTest VALUES ('bar'); +INSERT INTO serialTest VALUES ('force', 100); +SELECT * FROM serialTest ORDER BY f1, f2; + +reset role; +revoke create any table from test_create_any_sequence_role; +SET ROLE test_create_any_sequence_role PASSWORD 'Gauss@1234'; +-- basic sequence operations using both text and oid references +CREATE SEQUENCE sequence_test; + +SELECT setval('sequence_test'::text, 32); +SELECT nextval('sequence_test'::regclass); +SELECT setval('sequence_test'::text, 99, false); +SELECT nextval('sequence_test'::regclass); +SELECT setval('sequence_test'::regclass, 32); +SELECT nextval('sequence_test'::text); +SELECT setval('sequence_test'::regclass, 99, false); +SELECT nextval('sequence_test'::text); + +CREATE SEQUENCE sequence_test1 START WITH 32; +CREATE SEQUENCE sequence_test2 START WITH 24 INCREMENT BY 4 MAXVALUE 36 MINVALUE 5 CYCLE; +SELECT nextval('sequence_test2'); +SELECT nextval('sequence_test2'); + +create sequence seqCycle maxvalue 5 cycle; + +--normal case with cache +create sequence seq maxvalue 100 cache 5 increment 2 start 2; +select seq.nextval; +--failed +CREATE TYPE pri_person_type1 AS (id int, name text); --permission denied +CREATE TYPE pri_person_type2 AS (id int, name text); --permission denied +CREATE FUNCTION pri_func_add_sql(integer, integer) RETURNS integer +AS 'select $1 + $2;' +LANGUAGE SQL +IMMUTABLE +RETURNS NULL ON NULL INPUT; +reset role; +drop table pri_sequence_schema.serialtest; +DROP SEQUENCE pri_sequence_schema.sequence_test; +DROP SEQUENCE pri_sequence_schema.sequence_test1; +DROP SEQUENCE pri_sequence_schema.sequence_test2; +DROP SEQUENCE pri_sequence_schema.seqCycle; +DROP SEQUENCE pri_sequence_schema.seq; +DROP SCHEMA pri_sequence_schema cascade; +DROP USER test_create_any_sequence_role cascade; \ No newline at end of file diff --git a/src/test/regress/sql/pri_create_any_type.sql b/src/test/regress/sql/pri_create_any_type.sql new file mode 100644 index 000000000..4ab32922e --- /dev/null +++ b/src/test/regress/sql/pri_create_any_type.sql @@ -0,0 +1,84 @@ +CREATE USER test_create_any_type_role PASSWORD 'Gauss@1234'; +GRANT create any type to test_create_any_type_role; + +CREATE SCHEMA pri_type_schema; +set search_path=pri_type_schema; + +SET ROLE test_create_any_type_role PASSWORD 'Gauss@1234'; + +CREATE TYPE pri_type_schema.compfoo AS (f1 int, f2 text); +CREATE TABLE test_create_any_type_role.t1_compfoo(a int, b pri_type_schema.compfoo); + +--创建一个枚举类型 +CREATE TYPE pri_type_schema.bugstatus AS ENUM ('create', 'modify', 'closed'); +create type pri_type_schema.textrange_c as range(subtype=text, collation="C"); + +CREATE TYPE pri_type_schema.bigobj (INPUT = lo_filein, OUTPUT = lo_fileout, INTERNALLENGTH = VARIABLE); + +CREATE TYPE pri_type_schema.int42; +CREATE TYPE pri_type_schema.text_w_default; +reset role; +--校验type的drop权限 +CREATE USER test_create_any_type_role_test PASSWORD 'Gauss@1234'; +GRANT create any type to test_create_any_type_role_test; +SET ROLE test_create_any_type_role_test PASSWORD 'Gauss@1234'; +DROP TYPE pri_type_schema.compfoo; +reset role; + +CREATE FUNCTION pri_type_schema.int42_in(cstring) + RETURNS pri_type_schema.int42 + AS 'int4in' + LANGUAGE internal STRICT; +CREATE FUNCTION pri_type_schema.int42_out(pri_type_schema.int42) + RETURNS cstring + AS 'int4out' + LANGUAGE internal STRICT; + +CREATE FUNCTION pri_type_schema.text_w_default_in(cstring) + RETURNS pri_type_schema.text_w_default + AS 'textin' + LANGUAGE internal STRICT; + +CREATE FUNCTION pri_type_schema.text_w_default_out(pri_type_schema.text_w_default) + RETURNS cstring + AS 'textout' + LANGUAGE internal STRICT; +SET ROLE test_create_any_type_role PASSWORD 'Gauss@1234'; +CREATE TYPE pri_type_schema.int42 ( + internallength = 4, + input = pri_type_schema.int42_in, + output = pri_type_schema.int42_out, + alignment = int4, + default = 42, + passedbyvalue +); +CREATE TYPE pri_type_schema.text_w_default ( + internallength = variable, + input = pri_type_schema.text_w_default_in, + output = pri_type_schema.text_w_default_out, + alignment = int4, + default = 'zippo' +); +---failed +CREATE TABLE pri_type_schema.default_test (f1 int, f2 int); +CREATE SEQUENCE pri_type_schema.sequence_test1 START WITH 32; +CREATE FUNCTION pri_type_schema.pri_func_add_sql(integer, integer) RETURNS integer +AS 'select $1 + $2;' +LANGUAGE SQL +IMMUTABLE +RETURNS NULL ON NULL INPUT; + +reset role; +DROP TABLE test_create_any_type_role.t1_compfoo; +drop type pri_type_schema.compfoo; +drop type pri_type_schema.bugstatus; +drop function pri_type_schema.int42_in(cstring); +drop function pri_type_schema.int42_out(int42); +drop type pri_type_schema.int42; +drop function pri_type_schema.text_w_default_in(cstring); +drop function pri_type_schema.text_w_default_out(text_w_default); +drop type pri_type_schema.text_w_default; + +DROP SCHEMA pri_type_schema cascade; +DROP USER test_create_any_type_role cascade; +DROP USER test_create_any_type_role_test cascade; diff --git a/src/test/regress/sql/pri_create_drop_any_table.sql b/src/test/regress/sql/pri_create_drop_any_table.sql new file mode 100644 index 000000000..850d19360 --- /dev/null +++ b/src/test/regress/sql/pri_create_drop_any_table.sql @@ -0,0 +1,167 @@ +CREATE USER test_create_any_table_role PASSWORD 'Gauss@1234'; +GRANT create any table to test_create_any_table_role; + +CREATE SCHEMA pri_create_schema; +set search_path=pri_create_schema; + +SET ROLE test_create_any_table_role PASSWORD 'Gauss@1234'; + +CREATE table pri_create_schema.tb_pri (id int, name VARCHAR(10)); + --create table +CREATE TABLE pri_create_schema.TBL_DOMAIN_PRI +( + IDOMAINID NUMBER(10) NOT NULL, + SDOMAINNAME VARCHAR2(30) NOT NULL, + b int +); + +CREATE TABLE pri_create_schema.pri_test_hash (a int, b int); + +reset role; +CREATE TYPE pri_create_schema.pri_person_type1 AS (id int, name text); +CREATE TYPE pri_create_schema.pri_person_type2 AS (id int, name text); +SET ROLE test_create_any_table_role PASSWORD 'Gauss@1234'; +CREATE TABLE pri_create_schema.pri_persons OF pri_create_schema.pri_person_type1; +CREATE TABLE pri_create_schema.pri_stuff (id int); + + +--trigger +CREATE SEQUENCE pri_create_schema.serial1;--permission denied +create table pri_create_schema.pri_trigtest (i serial primary key);--failed + +reset role; +GRANT create any sequence to test_create_any_table_role; +GRANT create any index to test_create_any_table_role; +SET ROLE test_create_any_table_role PASSWORD 'Gauss@1234'; +CREATE SEQUENCE pri_create_schema.serial1; +create table pri_create_schema.pri_trigtest (i serial primary key); + +reset role; +revoke create any sequence,create any index from test_create_any_table_role; +SET ROLE test_create_any_table_role PASSWORD 'Gauss@1234'; +create function pri_create_schema.pri_trigtest() returns trigger as $$ +begin + raise notice '% % % %', TG_RELNAME, TG_OP, TG_WHEN, TG_LEVEL; + return new; +end;$$ language plpgsql; --failed ok +reset role; +create function pri_create_schema.pri_trigtest() returns trigger as $$ +begin + raise notice '% % % %', TG_RELNAME, TG_OP, TG_WHEN, TG_LEVEL; + return new; +end;$$ language plpgsql; + +create table pri_create_schema.pri_trigtest_test (i serial primary key); +SET ROLE test_create_any_table_role PASSWORD 'Gauss@1234'; +create trigger pri_trigtest_b_row_tg before insert or update or delete on pri_create_schema.pri_trigtest +for each row execute procedure pri_create_schema.pri_trigtest(); --success 在自己创建的表上创建触发器 +create trigger pri_trigtest_b_row_tg_test before insert or update or delete on pri_create_schema.pri_trigtest_test +for each row execute procedure pri_create_schema.pri_trigtest(); --failed +create table pri_create_schema.pri_storage_para_t1 (a int4, b text) +WITH +( + fillfactor =85, + autovacuum_enabled = ON, + toast.autovacuum_enabled = ON, + autovacuum_vacuum_threshold = 100, + toast.autovacuum_vacuum_threshold = 100, + autovacuum_vacuum_scale_factor = 10, + toast.autovacuum_vacuum_scale_factor = 10, + autovacuum_analyze_threshold = 8, + autovacuum_analyze_scale_factor = 9, +-- autovacuum_vacuum_cost_delay: Valid values are between "0" and "100". + autovacuum_vacuum_cost_delay = 90, + toast.autovacuum_vacuum_cost_delay = 92, +-- autovacuum_vacuum_cost_limit: Valid values are between "1" and "10000". + autovacuum_vacuum_cost_limit = 567, + toast.autovacuum_vacuum_cost_limit = 789, + autovacuum_freeze_min_age = 5000, + toast.autovacuum_freeze_min_age = 6000, +-- autovacuum_freeze_max_age: Valid values are between "100000000" and "2000000000". + autovacuum_freeze_max_age = 300000000, + toast.autovacuum_freeze_max_age = 250000000, + autovacuum_freeze_table_age = 170000000, + toast.autovacuum_freeze_table_age = 180000000 +) +partition by range (a) +( + partition pri_storage_para_t1_p1 values less than (10), + partition pri_storage_para_t1_p2 values less than (20), + partition pri_storage_para_t1_p3 values less than (100) +); + +CREATE TABLE pri_table(c_id int,c_first varchar(50) NOT NULL); + +--temp table +CREATE TEMP TABLE pri_temp1 (a int primary key); +reset role; +CREATE TABLE pri_t1 (num int, name text); +CREATE TABLE pri_t2 (num2 int, value text); +SET ROLE test_create_any_table_role PASSWORD 'Gauss@1234'; +CREATE TEMP TABLE pri_tt (num2 int, value text); + +CREATE VIEW pri_create_schema.pri_nontemp1 AS SELECT * FROM pri_create_schema.pri_t1 CROSS JOIN pri_create_schema.pri_t2; +CREATE VIEW pri_temporal1 AS SELECT * FROM pri_create_schema.pri_t1 CROSS JOIN pri_tt; + +create table pri_create_schema.replication_temp_test(id int); + +--create materialized view +create table pri_create_schema.t1(c1 int,c2 int); +insert into pri_create_schema.t1 values(1,1),(2,2); --success +create incremental materialized view pri_create_schema.mv1 as select * from pri_create_schema.t1; + +CREATE TABLE pri_create_schema.pri_store_returns +( + W_WAREHOUSE_SK INTEGER NOT NULL, + W_WAREHOUSE_ID CHAR(16) NOT NULL, + sr_item_sk VARCHAR(20) , + W_WAREHOUSE_SQ_FT INTEGER +); +CREATE TABLE pri_create_schema.store_returns_t1 AS SELECT * FROM pri_create_schema.pri_store_returns WHERE sr_item_sk > '4795'; + + +--failed +CREATE TYPE pri_create_schema.pri_type AS (id int, name text); --permission denied +\! gs_ktool -d all +\! gs_ktool -g +CREATE CLIENT MASTER KEY pri_create_schema.ImgCMK WITH (KEY_STORE = gs_ktool, KEY_PATH = "gs_ktool/1" , ALGORITHM = AES_256_CBC); +\! gs_ktool -d all +CREATE SEQUENCE pri_create_schema.sequence_test1 START WITH 32; +CREATE FUNCTION pri_create_schema.pri_func_add_sql(integer, integer) RETURNS integer +AS 'select $1 + $2;' +LANGUAGE SQL +IMMUTABLE +RETURNS NULL ON NULL INPUT; +reset role; +CREATE USER test_drop_any_table_role PASSWORD 'Gauss@1234'; +GRANT drop any table to test_drop_any_table_role; + +SET ROLE test_drop_any_table_role PASSWORD 'Gauss@1234'; +set search_path = pri_create_schema; +drop table tbl_domain_pri; +drop table pri_test_hash; +drop table pri_persons; +drop table pri_stuff; +drop table pri_trigtest_test; +drop table pri_storage_para_t1; +drop table pri_table; +drop view pri_temporal1; +drop view pri_nontemp1; +drop table pri_t1 cascade; +drop table pri_t2 cascade; +drop table replication_temp_test; +drop materialized view mv1; +drop table t1 cascade; +--failed +drop sequence serial1; +drop function pri_trigtest(); +drop type pri_create_schema.pri_person_type1; +drop type pri_create_schema.pri_person_type2; +drop SEQUENCE pri_create_schema.serial1; + +reset role; +drop type pri_create_schema.pri_person_type1; +drop type pri_create_schema.pri_person_type2; +drop SEQUENCE pri_create_schema.serial1; +DROP USER test_drop_any_table_role cascade; +DROP USER test_create_any_table_role cascade; \ No newline at end of file diff --git a/src/test/regress/sql/pri_dml_any_table.sql b/src/test/regress/sql/pri_dml_any_table.sql new file mode 100644 index 000000000..7297806e6 --- /dev/null +++ b/src/test/regress/sql/pri_dml_any_table.sql @@ -0,0 +1,59 @@ +CREATE USER test_select_any_table_role PASSWORD 'Gauss@1234'; + +CREATE SCHEMA pri_select_schema; +set search_path=pri_select_schema; +--create table +CREATE table pri_select_schema.tb_pri (id int, name VARCHAR(10)); +insert into pri_select_schema.tb_pri values(1,'joe'); + +SET ROLE test_select_any_table_role PASSWORD 'Gauss@1234'; +select * from pri_select_schema.tb_pri; +insert into pri_select_schema.tb_pri values(2,'ly'); +update pri_select_schema.tb_pri set name = 'gauss' where id = 1; +delete pri_select_schema.tb_pri; + +reset role; +GRANT select any table to test_select_any_table_role; +SET ROLE test_select_any_table_role PASSWORD 'Gauss@1234'; +select * from pri_select_schema.tb_pri; +insert into pri_select_schema.tb_pri values(1,'joe'); +update pri_select_schema.tb_pri set name = 'gauss' where id = 1; +delete pri_select_schema.tb_pri; + +reset role; +revoke select any table from test_select_any_table_role; +GRANT insert any table to test_select_any_table_role; +SET ROLE test_select_any_table_role PASSWORD 'Gauss@1234'; +select * from pri_select_schema.tb_pri; +insert into pri_select_schema.tb_pri values(2,'johy'); +update pri_select_schema.tb_pri set name = 'gauss' where id = 1; +delete pri_select_schema.tb_pri; + +reset role; +revoke insert any table from test_select_any_table_role; +GRANT update any table to test_select_any_table_role; +SET ROLE test_select_any_table_role PASSWORD 'Gauss@1234'; +select * from pri_select_schema.tb_pri; +insert into pri_select_schema.tb_pri values(3,'lili'); +--failed +update pri_select_schema.tb_pri set name = 'gauss' where id = 1; +delete pri_select_schema.tb_pri; +reset role; +grant select on table pri_select_schema.tb_pri to test_select_any_table_role; +SET ROLE test_select_any_table_role PASSWORD 'Gauss@1234'; +update pri_select_schema.tb_pri set name = 'gauss' where id = 1; + +reset role; +revoke select on table pri_select_schema.tb_pri from test_select_any_table_role; +revoke update any table from test_select_any_table_role; +GRANT delete any table to test_select_any_table_role; +SET ROLE test_select_any_table_role PASSWORD 'Gauss@1234'; +select * from pri_select_schema.tb_pri; +insert into pri_select_schema.tb_pri values(3,'lili'); +update pri_select_schema.tb_pri set name = 'gauss' where id = 3; +delete pri_select_schema.tb_pri; + +reset role; +drop table pri_select_schema.tb_pri; +DROP SCHEMA pri_select_schema cascade; +DROP USER test_select_any_table_role cascade; \ No newline at end of file diff --git a/src/test/regress/sql/pri_indepent_any.sql b/src/test/regress/sql/pri_indepent_any.sql new file mode 100644 index 000000000..47bff4318 --- /dev/null +++ b/src/test/regress/sql/pri_indepent_any.sql @@ -0,0 +1,82 @@ +--测试私有用户 +CREATE USER any_table_role PASSWORD 'Gauss@1234'; + +CREATE USER pri_user_independent WITH INDEPENDENT IDENTIFIED BY "1234@abc"; +set role pri_user_independent password "1234@abc"; +CREATE table pri_user_independent.tb_pri (id int, name VARCHAR(10)); +CREATE table pri_user_independent.tb_pri_test (id int, name VARCHAR(10)); +CREATE table pri_user_independent.tb_pri_test1 (id int, name VARCHAR(10)); +insert into pri_user_independent.tb_pri values(1, 'gauss'); + +--普通用户 +set role any_table_role PASSWORD 'Gauss@1234'; +select * from pri_user_independent.tb_pri; +insert into pri_user_independent.tb_pri values(1,'joe'); +update pri_user_independent.tb_pri set name = 'gauss' where id = 1; +delete from pri_user_independent.tb_pri; +create table pri_user_independent.tt1(id int); +ALTER TABLE pri_user_independent.tb_pri add column age int; +DROP table pri_user_independent.tb_pri_test; + +--初始用户 +reset role; +select * from pri_user_independent.tb_pri; +insert into pri_user_independent.tb_pri values(1,'joe'); +update pri_user_independent.tb_pri set name = 'gauss' where id = 1; +delete from pri_user_independent.tb_pri; +create table pri_user_independent.tt(id int); +ALTER TABLE pri_user_independent.tb_pri add column age int; +DROP table pri_user_independent.tb_pri_test; + +--select any table +reset role; +GRANT select any table to any_table_role; +SET ROLE any_table_role PASSWORD 'Gauss@1234'; +select * from pri_user_independent.tb_pri; + +--insert any table +reset role; +GRANT insert any table to any_table_role; +SET ROLE any_table_role PASSWORD 'Gauss@1234'; +insert into pri_user_independent.tb_pri values(2,'bob'); + +--update any table +reset role; +GRANT update any table to any_table_role; +SET ROLE any_table_role PASSWORD 'Gauss@1234'; +update pri_user_independent.tb_pri set name = 'Bob' where id = 2; + +--delete any table +reset role; +GRANT delete any table to any_table_role; +SET ROLE any_table_role PASSWORD 'Gauss@1234'; +delete from pri_user_independent.tb_pri; + +--create any table +reset role; +GRANT create any table to any_table_role; +SET ROLE any_table_role PASSWORD 'Gauss@1234'; +create table pri_user_independent.tt2(id int); + +--alter any table +reset role; +CREATE USER user_test_alter password 'Gauss@1234'; +GRANT alter any table to user_test_alter; +SET ROLE user_test_alter PASSWORD 'Gauss@1234'; +ALTER TABLE pri_user_independent.tb_pri drop column age; + +--drop any table +reset role; +GRANT drop any table to user_test_alter; +SET ROLE user_test_alter PASSWORD 'Gauss@1234'; +DROP table pri_user_independent.tb_pri_test1; + +reset role; +DROP TABLE pri_user_independent.tb_pri; +DROP TABLE pri_user_independent.tt; +DROP TABLE pri_user_independent.tt2; + +DROP USER user_test_alter cascade; +DROP USER any_table_role cascade; +DROP USER pri_user_independent cascade; + diff --git a/src/test/regress/sql/pri_samenew_schema.sql b/src/test/regress/sql/pri_samenew_schema.sql new file mode 100644 index 000000000..f4d72ab41 --- /dev/null +++ b/src/test/regress/sql/pri_samenew_schema.sql @@ -0,0 +1,209 @@ +DROP USER test_same_schema_user; +DROP USER ordinary_role; +CREATE USER test_same_schema_user PASSWORD 'Gauss@1234'; +CREATE USER ordinary_role PASSWORD 'Gauss@1234'; + +--test same schema +reset role; +SET ROLE ordinary_role PASSWORD 'Gauss@1234'; +--create table +CREATE TABLE test_drop_table(id int); +CREATE TABLE TBL_DOMAIN_PRI +( + IDOMAINID NUMBER(10) NOT NULL, + SDOMAINNAME VARCHAR2(30) NOT NULL, + b int +); +insert into TBL_DOMAIN_PRI values (1,'gauss',1); + +reset role; +GRANT create any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +CREATE table ordinary_role.tb_pri (id int, name VARCHAR(10)); +alter table ordinary_role.TBL_DOMAIN_PRI add column c int; +drop table ordinary_role.test_drop_table; +select * from ordinary_role.TBL_DOMAIN_PRI; +insert into ordinary_role.TBL_DOMAIN_PRI values (2,'gauss',2); +update ordinary_role.TBL_DOMAIN_PRI set b = 3 where IDOMAINID = 1; +reset role; +-- create any type +revoke create any table from test_same_schema_user; +GRANT create any type to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +CREATE TYPE ordinary_role.compfoo AS (f1 int, f2 text); +CREATE TABLE ordinary_role.t1_compfoo(a int, b ordinary_role.compfoo); +CREATE TYPE ordinary_role.bugstatus AS ENUM ('create', 'modify', 'closed'); +-- create any function +reset role; +revoke create any type from test_same_schema_user; +GRANT create any function to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +CREATE FUNCTION ordinary_role.pri_func_add_sql(integer, integer) RETURNS integer + AS 'select $1 + $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT; +--create any index +reset role; +revoke create any function from test_same_schema_user; +GRANT create any index to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +CREATE UNIQUE INDEX ordinary_role.ds_ship_mode_t1_index1 ON ordinary_role.TBL_DOMAIN_PRI(IDOMAINID); +reset role; +DROP INDEX ordinary_role.ds_ship_mode_t1_index1; +--create any sequence +reset role; +revoke create any index from test_same_schema_user; +GRANT create any sequence to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +CREATE SEQUENCE sequence_test; + +--alter any table +reset role; +revoke create any type from test_same_schema_user; +grant alter any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +alter table ordinary_role.TBL_DOMAIN_PRI add column c int; +drop table ordinary_role.test_drop_table; +select * from ordinary_role.TBL_DOMAIN_PRI; +insert into ordinary_role.TBL_DOMAIN_PRI values (2,'gauss',2,2); +update ordinary_role.TBL_DOMAIN_PRI set b = 3 where IDOMAINID = 1; + +--drop any table +reset role; +revoke alter any table from test_same_schema_user; +grant drop any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +alter table ordinary_role.TBL_DOMAIN_PRI add column c int; +drop table ordinary_role.test_drop_table; +select * from ordinary_role.TBL_DOMAIN_PRI; +insert into ordinary_role.TBL_DOMAIN_PRI values (2,'gauss',2,2); +update ordinary_role.TBL_DOMAIN_PRI set b = 3 where IDOMAINID = 1; + +--select any table +reset role; +revoke drop any table from test_same_schema_user; +grant select any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +alter table ordinary_role.TBL_DOMAIN_PRI add column c int; +drop table ordinary_role.test_drop_table; +select * from ordinary_role.TBL_DOMAIN_PRI; +insert into ordinary_role.TBL_DOMAIN_PRI values (2,'gauss',2,2); +update ordinary_role.TBL_DOMAIN_PRI set b = 3 where IDOMAINID = 1; + +--insert any table +reset role; +revoke select any table from test_same_schema_user; +grant insert any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +select * from ordinary_role.TBL_DOMAIN_PRI; +insert into ordinary_role.TBL_DOMAIN_PRI values (2,'gauss',2,2); +update ordinary_role.TBL_DOMAIN_PRI set b = 3 where IDOMAINID = 1; + +--update any table +reset role; +revoke insert any table from test_same_schema_user; +grant update any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +select * from ordinary_role.TBL_DOMAIN_PRI; +insert into ordinary_role.TBL_DOMAIN_PRI values (2,'gauss',2,2); +update ordinary_role.TBL_DOMAIN_PRI set b = 3 where IDOMAINID = 1; +reset role; +grant select any table to test_same_schema_user; +update ordinary_role.TBL_DOMAIN_PRI set b = 3 where IDOMAINID = 1; + +--delete any table +reset role; +revoke update any table from test_same_schema_user; +revoke select any table from test_same_schema_user; +grant delete any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +delete from ordinary_role.TBL_DOMAIN_PRI; + +-- test new schema +reset role; +revoke delete any table from test_same_schema_user; +GRANT create any table to test_same_schema_user; +CREATE SCHEMA pri_new_schema; +CREATE TABLE pri_new_schema.TBL_DOMAIN +( + IDOMAINID NUMBER(10) NOT NULL, + SDOMAINNAME VARCHAR2(30) NOT NULL, + b int +); +insert into pri_new_schema.TBL_DOMAIN values (1,'gauss',1); +CREATE TABLE pri_new_schema.test_new_table1(id int, name text); + +GRANT create any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +CREATE TABLE pri_new_schema.test_new_table2(id int, name text); + +reset role; +revoke create any table from test_same_schema_user; +grant alter any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +alter table pri_new_schema.TBL_DOMAIN add column d int; +drop table pri_new_schema.test_new_table1; +select * from pri_new_schema.TBL_DOMAIN; +insert into pri_new_schema.TBL_DOMAIN values (2,'gauss',2,2); +update pri_new_schema.TBL_DOMAIN set b = 3 where IDOMAINID = 1; + +reset role; +revoke alter any table from test_same_schema_user; +grant drop any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +alter table pri_new_schema.TBL_DOMAIN add column d int; +drop table pri_new_schema.test_new_table1; +select * from pri_new_schema.TBL_DOMAIN; +insert into pri_new_schema.TBL_DOMAIN values (2,'gauss',2,2); +update pri_new_schema.TBL_DOMAIN set b = 3 where IDOMAINID = 1; + +reset role; +revoke drop any table from test_same_schema_user; +grant select any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +alter table pri_new_schema.TBL_DOMAIN add column d int; +drop table pri_new_schema.test_new_table1; +select * from pri_new_schema.TBL_DOMAIN; +insert into pri_new_schema.TBL_DOMAIN values (2,'gauss',2,2); +update pri_new_schema.TBL_DOMAIN set b = 3 where IDOMAINID = 1; + +reset role; +revoke select any table from test_same_schema_user; +grant insert any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +select * from pri_new_schema.TBL_DOMAIN; +insert into pri_new_schema.TBL_DOMAIN values (2,'gauss',2,2); +update pri_new_schema.TBL_DOMAIN set b = 3 where IDOMAINID = 1; + +reset role; +revoke insert any table from test_same_schema_user; +grant update any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +select * from pri_new_schema.TBL_DOMAIN; +insert into pri_new_schema.TBL_DOMAIN values (2,'gauss',2,2); +update pri_new_schema.TBL_DOMAIN set b = 3 where IDOMAINID = 1; +reset role; +grant select any table to test_same_schema_user; +update pri_new_schema.TBL_DOMAIN set b = 3 where IDOMAINID = 1; + +reset role; +revoke update any table from test_same_schema_user; +revoke select any table from test_same_schema_user; +GRANT delete any table to test_same_schema_user; +SET ROLE test_same_schema_user PASSWORD 'Gauss@1234'; +delete pri_new_schema.TBL_DOMAIN; + +reset role; +revoke delete any table from test_same_schema_user; +drop table pri_new_schema.TBL_DOMAIN cascade; +drop table pri_new_schema.test_new_table2 cascade; +drop table ordinary_role.tb_pri cascade; +drop table ordinary_role.tbl_domain_pri cascade; +drop type ordinary_role.compfoo; +drop type ordinary_role.bugstatus; +drop function ordinary_role.pri_func_add_sql(integer,integer); +drop schema ordinary_role cascade; +drop user ordinary_role cascade; +drop user test_same_schema_user cascade; +drop schema pri_new_schema cascade; diff --git a/src/test/regress/sql/pri_sys_schema.sql b/src/test/regress/sql/pri_sys_schema.sql new file mode 100644 index 000000000..686b9ada2 --- /dev/null +++ b/src/test/regress/sql/pri_sys_schema.sql @@ -0,0 +1,81 @@ +CREATE USER test_any_role PASSWORD 'Gauss@1234'; +GRANT insert any table to test_any_role; +GRANT update any table to test_any_role; +GRANT delete any table to test_any_role; +CREATE USER test_user PASSWORD 'Gauss@1234'; + +--db4ai +reset role; +SET ROLE test_user PASSWORD 'Gauss@1234'; +insert into db4ai.snapshot(id) values(1); +update db4ai.snapshot set id = 2 where id = 1; +delete from db4ai.snapshot; +select * from db4ai.snapshot; + +reset role; +SET ROLE test_any_role PASSWORD 'Gauss@1234'; +insert into db4ai.snapshot(id) values(1); +update db4ai.snapshot set id = 2 where id = 1; +delete from db4ai.snapshot; +select * from db4ai.snapshot; + +--information_schema +reset role; +SET ROLE test_user PASSWORD 'Gauss@1234'; +insert into information_schema.sql_features(feature_id) values(1); +update information_schema.sql_features set feature_name = 'Embedded Ada1' where feature_id = 'B011'; +delete from information_schema.sql_features; +select * from information_schema.sql_features where feature_id = 'B011'; + +reset role; +SET ROLE test_any_role PASSWORD 'Gauss@1234'; +insert into information_schema.sql_features(feature_id) values(1); +update information_schema.sql_features set feature_name = 'Embedded Ada1' where feature_id = 'B011'; +delete from information_schema.sql_features; +select * from information_schema.sql_features where feature_id = 'B011'; + +--dbe_perf +reset role; +SET ROLE test_user PASSWORD 'Gauss@1234'; +select count(*) from dbe_perf.user_transaction; +delete from dbe_perf.user_transaction; + +reset role; +GRANT select any table to test_any_role; +GRANT delete any table to test_any_role; +SET ROLE test_any_role PASSWORD 'Gauss@1234'; +select count(*) from dbe_perf.user_transaction; +delete from dbe_perf.user_transaction; + +--cstore +reset role; +SET ROLE test_user PASSWORD 'Gauss@1234'; +select count(*) from sys.sys_dummy; +delete from sys.sys_dummy; +reset role; +SET ROLE test_any_role PASSWORD 'Gauss@1234'; +select count(*) from sys.sys_dummy; +delete from sys.sys_dummy; + +--pg_catalog +reset role; +SET ROLE test_user PASSWORD 'Gauss@1234'; +select count(*) from pg_catalog.pg_authid; + +reset role; +GRANT select any table to test_any_role; +SET ROLE test_any_role PASSWORD 'Gauss@1234'; +select count(*) from pg_catalog.pg_authid; + +--sys +reset role; +SET ROLE test_user PASSWORD 'Gauss@1234'; +select count(*) from sys.my_jobs; + +reset role; +GRANT select any table to test_any_role; +SET ROLE test_any_role PASSWORD 'Gauss@1234'; +select count(*) from sys.my_jobs; +reset role; +drop user test_any_role cascade; +drop user test_user cascade; diff --git a/src/test/regress/sql/query_rewrite.sql b/src/test/regress/sql/query_rewrite.sql index 2c46e7561..d68fedda6 100644 --- a/src/test/regress/sql/query_rewrite.sql +++ b/src/test/regress/sql/query_rewrite.sql @@ -34,5 +34,119 @@ explain (costs off) select * from t1 where ( '1' = '1' and ( '1' = '0' or exists --test const param eval: const param should be removed and convert to seqscan explain (costs off) select * from t1 where ( '1' = '0' and ( '1' = '1' or exists ( select /*+ rows(t2 #9999999) */ a from t2 where t1.a=t2.a))); +-- test for optimized join rel as sub-query +set qrw_inlist2join_optmode = 'rule_base'; + +CREATE TABLE t3 ( +slot integer NOT NULL, +cid bigint NOT NULL, +name character varying NOT NULL +) +WITH (orientation=row); + +insert into t3 (slot, cid, name) values(generate_series(1, 10), generate_series(1, 10), 'records.storage.state'); + +analyze t3; + +explain (costs off) +select + * +from + t3 +where + slot = '5' + and (name) in ( + select + name + from + t3 + where + slot = '5' + and cid in ( + 5, 1000, 1001, 1002, 1003, 1004, 1005, + 1006, 1007, 2000, 4000, 10781986, 10880002 + ) + limit + 50 + ); + +select + * +from + t3 +where + slot = '5' + and (name) in ( + select + name + from + t3 + where + slot = '5' + and cid in ( + 5, 1000, 1001, 1002, 1003, 1004, 1005, + 1006, 1007, 2000, 4000, 10781986, 10880002 + ) + limit + 50 + ); + +explain (costs off) +select + * +from + t3 +where + cid in ( + select + cid + from + t3 + where + slot = '5' + and (name) in ( + select + name + from + t3 + where + slot = '5' + and cid in ( + 5, 1000, 1001, 1002, 1003, 1004, 1005, + 1006, 1007, 2000, 4000, 10781986, 10880002 + ) + limit + 50 + ) + ); + +select + * +from + t3 +where + cid in ( + select + cid + from + t3 + where + slot = '5' + and (name) in ( + select + name + from + t3 + where + slot = '5' + and cid in ( + 5, 1000, 1001, 1002, 1003, 1004, 1005, + 1006, 1007, 2000, 4000, 10781986, 10880002 + ) + limit + 50 + ) + ); + drop schema query_rewrite cascade; reset current_schema; diff --git a/src/test/regress/sql/rawlike.sql b/src/test/regress/sql/rawlike.sql new file mode 100644 index 000000000..476afd0be --- /dev/null +++ b/src/test/regress/sql/rawlike.sql @@ -0,0 +1,13 @@ +create database utf8test template template0 encoding 'utf8'; +\c utf8test +create table rawlike_t1(c1 raw); +insert into rawlike_t1 values(hextoraw('D')); +select * from rawlike_t1 where c1 like hextoraw('D'); +insert into rawlike_t1 values(hextoraw('D9')); +select * from rawlike_t1 where c1 like hextoraw('D9'); +insert into rawlike_t1 values(hextoraw('D9a')); +select * from rawlike_t1 where c1 like hextoraw('D9a'); +select * from rawlike_t1 where c1 like hextoraw('D9f'); +drop table rawlike_t1; +\c postgres +drop database utf8test; \ No newline at end of file diff --git a/src/test/regress/sql/replace_func_with_two_args.sql b/src/test/regress/sql/replace_func_with_two_args.sql new file mode 100644 index 000000000..c05dee105 --- /dev/null +++ b/src/test/regress/sql/replace_func_with_two_args.sql @@ -0,0 +1,27 @@ +-- +-- replace function with two arguments +-- + +select replace('string', ''); +select replace('string', 'i'); +select replace('string', 'in'); +select replace('string', 'ing'); + +select replace('', 'ing'); +select replace(NULL, 'ing'); +select replace('ing', ''); +select replace('ing', NULL); +select replace('', ''); +select replace(NULL, NULL); + +select replace(123, '1'); +select replace('123', 1); +select replace(123, 1); + +select replace('abc\nabc', '\n'); +select replace('abc\nabc', E'\n'); +select replace(E'abc\nabc', E'\n'); + +select replace('~!@#$%^&*()', '!@'); + +select replace('高斯', '高'); \ No newline at end of file diff --git a/src/test/regress/sql/row_compression/normal_test.sql b/src/test/regress/sql/row_compression/normal_test.sql deleted file mode 100644 index 057bf3f95..000000000 --- a/src/test/regress/sql/row_compression/normal_test.sql +++ /dev/null @@ -1,63 +0,0 @@ -create schema normal_test; -CREATE TABLE normal_test.tbl_pc(id int, c1 text) WITH(compresstype=1); -\d+ normal_test.tbl_pc -INSERT INTO normal_test.tbl_pc SELECT id, id::text FROM generate_series(1,1000) id; -select count(*) from normal_test.tbl_pc; -select count(*) from normal_test.tbl_pc where id < 100; -checkpoint; -vacuum normal_test.tbl_pc; -select count(*) from normal_test.tbl_pc; -select count(*) from normal_test.tbl_pc where id < 100; - --- normal index -create index on normal_test.tbl_pc(id) WITH (compresstype=2,compress_chunk_size=1024); -alter index normal_test.tbl_pc_id_idx set (compresstype=1); --failed -alter index normal_test.tbl_pc_id_idx set (compress_chunk_size=2048); --failed -alter index normal_test.tbl_pc_id_idx set (compress_prealloc_chunks=2); --success -alter index normal_test.tbl_pc_id_idx set (compress_level=2); --success - -set enable_seqscan = off; -set enable_bitmapscan = off; -select count(*) from normal_test.tbl_pc; -CREATE TABLE normal_test.tbl_partition(id int) WITH(compresstype=2,compress_chunk_size=1024) partition by range(id) -( - partition p0 values less than(5000), - partition p1 values less than(10000), - partition p2 values less than(20000), - partition p3 values less than(30000), - partition p4 values less than(40000), - partition p5 values less than(50000), - partition p6 values less than(60000), - partition p7 values less than(70000) -); -insert into normal_test.tbl_partition select generate_series(1,65000); -select count(*) from normal_test.tbl_partition; -checkpoint; -vacuum normal_test.tbl_partition; -select count(*) from normal_test.tbl_partition; - --- exchange -select relname, reloptions from pg_partition where parentid in (Select relfilenode from pg_class where relname like 'tbl_partition') order by relname; -create table normal_test.exchange_table(id int) WITH(compresstype=2,compress_chunk_size=1024); -ALTER TABLE normal_test.tbl_partition EXCHANGE PARTITION FOR(2500) WITH TABLE normal_test.exchange_table; -select count(*) from normal_test.tbl_partition; - --- spilit -ALTER TABLE normal_test.tbl_partition SPLIT PARTITION p1 AT (7500) INTO (PARTITION p10, PARTITION p11); -select relname, reloptions from pg_partition where parentid in (Select relfilenode from pg_class where relname like 'tbl_partition') order by relname; - -create index on normal_test.tbl_partition(id) local WITH (compresstype=2,compress_chunk_size=1024); -\d+ normal_test.tbl_partition -select relname, reloptions from pg_partition where parentid in (Select relfilenode from pg_class where relname like 'tbl_partition_id_idx') order by relname; - - --- unsupport -alter index normal_test.tbl_partition_id_idx set (compresstype=1); -alter index normal_test.tbl_partition_id_idx set (compress_chunk_size=2048); -alter index normal_test.tbl_partition_id_idx set (compress_prealloc_chunks=2); -create index rolcompress_index on normal_test.tbl_pc(id) with (compress_chunk_size=4096); -create table rolcompress_table_001(a int) with (compresstype=2, compress_prealloc_chunks=3); --- support -alter table normal_test.tbl_pc set (compress_prealloc_chunks=1); - -drop schema normal_test cascade; diff --git a/src/test/regress/sql/row_compression/pg_table_size.sql b/src/test/regress/sql/row_compression/pg_table_size.sql deleted file mode 100644 index 054e51905..000000000 --- a/src/test/regress/sql/row_compression/pg_table_size.sql +++ /dev/null @@ -1,30 +0,0 @@ --- row table pg_table_size -create schema table_size_schema; -CREATE TABLE table_size_schema.normal_table(id int); -CREATE TABLE table_size_schema.compressed_table_1024(id int) WITH(compresstype=2, compress_chunk_size=1024); -CREATE TABLE table_size_schema.compressed_table_2048(id int) WITH(compresstype=2, compress_chunk_size=2048); -CREATE TABLE table_size_schema.compressed_table_4096(id int) WITH(compresstype=2, compress_chunk_size=4096); -select pg_table_size('table_size_schema.normal_table'); -select pg_table_size('table_size_schema.compressed_table_1024'); -select pg_table_size('table_size_schema.compressed_table_2048'); -select pg_table_size('table_size_schema.compressed_table_4096'); -drop schema table_size_schema cascade; - --- partition table pg_table_size -create schema partition_table_size_schema; -create table partition_table_size_schema.normal_partition(INV_DATE_SK integer) -partition by range(inv_date_sk)(partition p0 values less than(5000),partition p1 values less than(10000)); -create table partition_table_size_schema.compressed_partition_1024(INV_DATE_SK integer) -WITH(compresstype=2, compress_chunk_size=1024) -partition by range(inv_date_sk)(partition p0 values less than(5000),partition p1 values less than(10000)); -create table partition_table_size_schema.compressed_partition_2048(INV_DATE_SK integer) -WITH(compresstype=2, compress_chunk_size=2048) -partition by range(inv_date_sk)(partition p0 values less than(5000),partition p1 values less than(10000)); -create table partition_table_size_schema.compressed_partition_4096(INV_DATE_SK integer) -WITH(compresstype=2, compress_chunk_size=4096) -partition by range(inv_date_sk)(partition p0 values less than(5000),partition p1 values less than(10000)); -select pg_table_size('partition_table_size_schema.normal_partition'); -select pg_table_size('partition_table_size_schema.compressed_partition_1024'); -select pg_table_size('partition_table_size_schema.compressed_partition_2048'); -select pg_table_size('partition_table_size_schema.compressed_partition_4096'); -drop schema partition_table_size_schema cascade; diff --git a/src/test/regress/sql/row_compression/pg_tablespace_size.sql b/src/test/regress/sql/row_compression/pg_tablespace_size.sql deleted file mode 100644 index 94b5e6cb0..000000000 --- a/src/test/regress/sql/row_compression/pg_tablespace_size.sql +++ /dev/null @@ -1,14 +0,0 @@ -CREATE TABLESPACE normal_tablespace RELATIVE LOCATION 'normal_tablespace'; -SELECT pg_tablespace_size('normal_tablespace'); -CREATE TABLE normal_table(id int) TABLESPACE normal_tablespace; -SELECT pg_tablespace_size('normal_tablespace'); - -CREATE TABLESPACE compress_tablespace RELATIVE LOCATION 'compress_tablespace'; -SELECT pg_tablespace_size('compress_tablespace'); -CREATE TABLE compressed_table_1024(id int) WITH(compresstype=2, compress_chunk_size=1024) TABLESPACE compress_tablespace; -SELECT pg_tablespace_size('compress_tablespace'); -DROP TABLE normal_table; -DROP TABLESPACE normal_tablespace; -DROP TABLE compressed_table_1024; -DROP TABLESPACE compress_tablespace; - diff --git a/src/test/regress/sql/row_compression/unsupported_feature.sql b/src/test/regress/sql/row_compression/unsupported_feature.sql deleted file mode 100644 index 0fd73357b..000000000 --- a/src/test/regress/sql/row_compression/unsupported_feature.sql +++ /dev/null @@ -1,52 +0,0 @@ -create schema unspported_feature; --- unspport compressType: 3 -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compresstype=3, compress_chunk_size=1024); --- unspport compress_chunk_size: 2000 -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compresstype=2, compress_chunk_size=2000); --- unspport compress_prealloc_chunks: -1 -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compresstype=2, compress_prealloc_chunks=-1); --- unspport compress_prealloc_chunks: 8 -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compresstype=2, compress_prealloc_chunks=8); --- unspport compress_level: 128 -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compresstype=2, compress_level=128); --- compresstype cant be used with column table -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(ORIENTATION = 'column', compresstype=2); --- compresstype cant be used with temp table -CREATE TEMP TABLE compressed_temp_table_1024(id int) WITH(compresstype=2); --- compresstype cant be used with unlogged table -CREATE unlogged TABLE compressed_unlogged_table_1024(id int) WITH(compresstype=2); --- use compress_prealloc_chunks\compress_chunk_size\compress_level without compresstype -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compress_prealloc_chunks=5); -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compress_chunk_size=1024); -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compress_byte_convert=true); -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compress_diff_convert=true); -CREATE TABLE unspported_feature.compressed_table_1024(id int) WITH(compress_level=5); --- unspport exchange -CREATE TABLE unspported_feature.exchange_table(id int) WITH(compresstype=2); -CREATE TABLE unspported_feature.alter_table(id int) partition by range(id) -( - partition p0 values less than(5000), - partition p1 values less than(10000), - partition p2 values less than(20000), - partition p3 values less than(30000), - partition p4 values less than(40000), - partition p5 values less than(50000), - partition p6 values less than(60000), - partition p7 values less than(70000) -); -ALTER TABLE unspported_feature.alter_table EXCHANGE PARTITION FOR(2500) WITH TABLE unspported_feature.exchange_table; --- unspport alter compress_chunk_size -create TABLE unspported_feature.alter_table_option(id int) WITH(compresstype=2); -\d+ unspported_feature.alter_table_option -ALTER TABLE unspported_feature.alter_table_option SET(compresstype=0); -- fail -ALTER TABLE unspported_feature.alter_table_option SET(compress_chunk_size=2048); -- fail -ALTER TABLE unspported_feature.alter_table_option SET(compress_level=2, compress_prealloc_chunks=0); --- alter compress_byte_convert\compress_diff_convert -create table unspported_feature.rolcompress_table_001(a int) with (compresstype=2, compress_diff_convert=true); -- fail - -create table unspported_feature.t_rowcompress_0007(cid int, name varchar2) with (compresstype=1); -alter table unspported_feature.t_rowcompress_0007 set (compress_diff_convert=true); --fail -alter table unspported_feature.t_rowcompress_0007 set (compress_byte_convert=true, compress_diff_convert=true); --success -alter table unspported_feature.t_rowcompress_0007 set (compress_level=31); --failed - - diff --git a/src/test/regress/sql/segment_subpartition_add_drop_partition.sql b/src/test/regress/sql/segment_subpartition_add_drop_partition.sql new file mode 100644 index 000000000..7c89dd51d --- /dev/null +++ b/src/test/regress/sql/segment_subpartition_add_drop_partition.sql @@ -0,0 +1,1239 @@ +DROP SCHEMA segment_subpartition_add_drop_partition CASCADE; +CREATE SCHEMA segment_subpartition_add_drop_partition; +SET CURRENT_SCHEMA TO segment_subpartition_add_drop_partition; + +-- +----range-range table---- +-- +--prepare +CREATE TABLE range_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (customer_id) SUBPARTITION BY RANGE (time_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer1_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer1_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer1_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer2_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer2_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer2_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_all VALUES LESS THAN ('2012-01-01') + ) +); +INSERT INTO range_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_range_sales_idx ON range_range_sales(product_id) LOCAL; + +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE range_range_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer5_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer5_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer5_2011 VALUES LESS THAN ('2012-01-01') + ); +--fail, out of range +ALTER TABLE range_range_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1100); +--fail, invalid format +ALTER TABLE range_range_sales ADD PARTITION customer_temp2 VALUES (1300); +--success, add 1 default subpartition +ALTER TABLE range_range_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_range_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1800); +--success, add 1 subpartition +ALTER TABLE range_range_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_2012 VALUES LESS THAN ('2013-01-01'); +--fail, out of range +ALTER TABLE range_range_sales MODIFY PARTITION customer3 ADD SUBPARTITION customer3_temp1 VALUES LESS THAN ('2015-01-01'); +--fail, out of range +ALTER TABLE range_range_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('2011-01-01'); +--fail, invalid format +ALTER TABLE range_range_sales MODIFY PARTITION customer2 ADD SUBPARTITION customer2_temp1 VALUES ('2015-01-01'); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_range_sales + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_range_sales DROP PARTITION customer2; +--success +ALTER TABLE range_range_sales DROP SUBPARTITION customer1_2008; +--fail, the only subpartition +ALTER TABLE range_range_sales DROP SUBPARTITION customer4_all; +--success, drop partition customer3 +ALTER TABLE range_range_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_range_sales DROP PARTITION FOR (400, '2010-01-01'); +--fail, number not equal to the number of partkey +ALTER TABLE range_range_sales DROP SUBPARTITION FOR (1400); +--fail, invalid type +ALTER TABLE range_range_sales DROP PARTITION FOR ('2010-01-01'); +--fail, invalid type +ALTER TABLE range_range_sales DROP SUBPARTITION FOR ('2010-01-01', 1400); +--success, drop subpartition customer5_2010 +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(1400, '2010-01-01'); +--fail, the only subpartition in customer6 +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(2000, '2009-01-01'); +--fail, no subpartition find +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(1400, '2012-01-01'); + +--check for ok after drop +SELECT count(*) FROM range_range_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_range_sales + +-- +----range-list table---- +-- +--prepare +CREATE TABLE range_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO range_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales_idx ON range_list_sales(product_id) LOCAL; + +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE range_list_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer5_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer5_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer5_channel4 VALUES ('9') + ); +--fail, out of range +ALTER TABLE range_list_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1100); +--fail, invalid format +ALTER TABLE range_list_sales ADD PARTITION customer_temp2 VALUES (1300); +--success, add 1 default subpartition +ALTER TABLE range_list_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_list_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1800); +--success, add 1 subpartition +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel5 VALUES ('X'); +--fail, out of range +ALTER TABLE range_list_sales MODIFY PARTITION customer2 ADD SUBPARTITION customer2_temp1 VALUES ('X'); +--fail, out of range +ALTER TABLE range_list_sales MODIFY PARTITION customer3 ADD SUBPARTITION customer3_temp1 VALUES ('X'); +--fail, invalid format +ALTER TABLE range_list_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X'); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_list_sales + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_list_sales DROP PARTITION customer2; +--success +ALTER TABLE range_list_sales DROP SUBPARTITION customer1_channel1; +--fail, the only subpartition +ALTER TABLE range_list_sales DROP SUBPARTITION customer4_channel1; +--success, drop partition customer3 +ALTER TABLE range_list_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_list_sales DROP PARTITION FOR (400, '4'); +--fail, number not equal to the number of partkey +ALTER TABLE range_list_sales DROP SUBPARTITION FOR (1400); +--fail, invalid type +ALTER TABLE range_list_sales DROP PARTITION FOR ('abc'); +--fail, invalid type +ALTER TABLE range_list_sales DROP SUBPARTITION FOR ('abc', 1400); +--success, drop subpartition customer5_channel3 +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(1400, '7'); +--fail, the only subpartition in customer6 +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(2000, 'X'); +--fail, no subpartition find +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(1100, 'X'); + +--check for ok after drop +SELECT count(*) FROM range_list_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_list_sales + +-- +----range-hash table---- +-- +--prepare +CREATE TABLE range_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (customer_id) SUBPARTITION BY HASH (product_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_product1, + SUBPARTITION customer1_product2, + SUBPARTITION customer1_product3, + SUBPARTITION customer1_product4 + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_product1, + SUBPARTITION customer2_product2 + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_product1 + ) +); +INSERT INTO range_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_hash_sales_idx ON range_hash_sales(product_id) LOCAL; + +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE range_hash_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_product1, + SUBPARTITION customer5_product2, + SUBPARTITION customer5_product3, + SUBPARTITION customer5_product4 + ); +--fail, out of range +ALTER TABLE range_hash_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1100); +--fail, invalid format +ALTER TABLE range_hash_sales ADD PARTITION customer_temp2 VALUES (1300); +--success, add 1 default subpartition +ALTER TABLE range_hash_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_hash_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1800); +--fail, not support add hash +ALTER TABLE range_hash_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_temp1; +--fail, invalid format +ALTER TABLE range_hash_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X'); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_hash_sales + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_hash_sales DROP PARTITION customer2; +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION customer1_product1; +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION customer4_product1; +--success, drop partition customer3 +ALTER TABLE range_hash_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_hash_sales DROP PARTITION FOR (400, '2010-01-01'); +--fail, invalid type +ALTER TABLE range_hash_sales DROP PARTITION FOR ('2010-01-01'); +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION FOR(1400, 1); + +--check for ok after drop +SELECT count(*) FROM range_hash_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_hash_sales + +-- +----list-range table---- +-- +--prepare +CREATE TABLE list_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY LIST (channel_id) SUBPARTITION BY RANGE (customer_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_customer1 VALUES LESS THAN (200), + SUBPARTITION channel1_customer2 VALUES LESS THAN (500), + SUBPARTITION channel1_customer3 VALUES LESS THAN (800), + SUBPARTITION channel1_customer4 VALUES LESS THAN (1200) + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_customer1 VALUES LESS THAN (500), + SUBPARTITION channel2_customer2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_customer1 VALUES LESS THAN (1200) + ) +); +INSERT INTO list_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_range_sales_idx ON list_range_sales(product_id) LOCAL; + +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE list_range_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_customer1 VALUES LESS THAN (200), + SUBPARTITION channel5_customer2 VALUES LESS THAN (500), + SUBPARTITION channel5_customer3 VALUES LESS THAN (800), + SUBPARTITION channel5_customer4 VALUES LESS THAN (1200) + ); +--fail, value conflict +ALTER TABLE list_range_sales ADD PARTITION channel_temp1 VALUES ('0', 'Z', 'C'); +--fail, invalid format +ALTER TABLE list_range_sales ADD PARTITION channel_temp2 VALUES LESS THAN ('Z'); +--success, add 1 default subpartition +ALTER TABLE list_range_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_range_sales ADD PARTITION channel_temp3 VALUES ('M', 'X'); +--success, add 1 subpartition +ALTER TABLE list_range_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_customer5 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE list_range_sales MODIFY PARTITION channel2 ADD SUBPARTITION channel2_temp1 VALUES LESS THAN (2000); +--fail, out of range +ALTER TABLE list_range_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3_temp1 VALUES LESS THAN (2000); +--fail, invalid format +ALTER TABLE list_range_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES (1500); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_range_sales + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_range_sales DROP PARTITION channel2; +--success +ALTER TABLE list_range_sales DROP SUBPARTITION channel1_customer1; +--fail, the only subpartition +ALTER TABLE list_range_sales DROP SUBPARTITION channel4_customer1; +--success, drop partition channel3 +ALTER TABLE list_range_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_range_sales DROP PARTITION FOR('X', 700); +--fail, number not equal to the number of partkey +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X'); +--fail, invalid type +ALTER TABLE list_range_sales DROP PARTITION FOR (10); +--fail, invalid type +ALTER TABLE list_range_sales DROP SUBPARTITION FOR(700, 'X'); +--success, drop subpartition channel5_customer3 +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X', 700); +--fail, the only subpartition in channel6 +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('K', 100); +--fail, no subpartition find +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X', 2500); + +--check for ok after drop +SELECT count(*) FROM list_range_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_range_sales + +-- +----list-list table---- +-- +--prepare +CREATE TABLE list_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY LIST (channel_id) SUBPARTITION BY LIST (type_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_type1 VALUES (0, 1, 2), + SUBPARTITION channel1_type2 VALUES (3, 4), + SUBPARTITION channel1_type3 VALUES (5, 6, 7), + SUBPARTITION channel1_type4 VALUES (8, 9) + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_type1 VALUES (0, 1, 2, 3), + SUBPARTITION channel2_type2 VALUES (DEFAULT) + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_type1 VALUES (0, 1, 2, 3, 4, 5, 6, 7, 8, 9) + ) +); +INSERT INTO list_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_list_sales_idx ON list_list_sales(product_id) LOCAL; + +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE list_list_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_type1 VALUES (0, 1, 2), + SUBPARTITION channel5_type2 VALUES (3, 4), + SUBPARTITION channel5_type3 VALUES (5, 6, 7), + SUBPARTITION channel5_type4 VALUES (8, 9) + ); +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp1 VALUES ('0', 'Z', 'C'); +--fail, invalid format +ALTER TABLE list_list_sales ADD PARTITION channel_temp2 VALUES LESS THAN ('Z'); +--success, add 1 default subpartition +ALTER TABLE list_list_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp3 VALUES ('M', 'X'); +--success, add 1 subpartition +ALTER TABLE list_list_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_type5 VALUES (DEFAULT); +--fail, out of range +ALTER TABLE list_list_sales MODIFY PARTITION channel2 ADD SUBPARTITION channel2_temp1 VALUES (10, 11, 12); +--fail, out of range +ALTER TABLE list_list_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3_temp1 VALUES (10, 11, 12); +--fail, invalid format +ALTER TABLE list_list_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_list_sales + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_list_sales DROP PARTITION channel2; +--success +ALTER TABLE list_list_sales DROP SUBPARTITION channel1_type1; +--fail, the only subpartition +ALTER TABLE list_list_sales DROP SUBPARTITION channel4_type1; +--success, drop partition channel3 +ALTER TABLE list_list_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_list_sales DROP PARTITION FOR('X', 6); +--fail, number not equal to the number of partkey +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X'); +--fail, invalid type +ALTER TABLE list_list_sales DROP PARTITION FOR (10); +--fail, invalid type +ALTER TABLE list_list_sales DROP SUBPARTITION FOR(10, 'X'); +--success, drop subpartition channel5_type3 +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X', 6); +--fail, the only subpartition in channel6 +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('K', 10); +--fail, no subpartition find +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X', 5); + +--check for ok after drop +SELECT count(*) FROM list_list_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_list_sales + +-- +----list-hash table---- +-- +--prepare +CREATE TABLE list_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY LIST (channel_id) SUBPARTITION BY HASH (product_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_product1, + SUBPARTITION channel1_product2, + SUBPARTITION channel1_product3, + SUBPARTITION channel1_product4 + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_product1, + SUBPARTITION channel2_product2 + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_product1 + ) +); +INSERT INTO list_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_hash_sales_idx ON list_hash_sales(product_id) LOCAL; + +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE list_hash_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_product1, + SUBPARTITION channel5_product2, + SUBPARTITION channel5_product3, + SUBPARTITION channel5_product4 + ); +--fail, value conflict +ALTER TABLE list_hash_sales ADD PARTITION channel_temp1 VALUES ('0', 'Z', 'C'); +--fail, invalid format +ALTER TABLE list_hash_sales ADD PARTITION channel_temp2 VALUES LESS THAN ('Z'); +--success, add 1 default subpartition +ALTER TABLE list_hash_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_hash_sales ADD PARTITION channel_temp3 VALUES ('M', 'X'); +--fail, not support add hash +ALTER TABLE list_hash_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_temp1; +--fail, invalid format +ALTER TABLE list_hash_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_hash_sales + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_hash_sales DROP PARTITION channel2; +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION channel1_product1; +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION channel4_product1; +--success, drop partition channel3 +ALTER TABLE list_hash_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_hash_sales DROP PARTITION FOR ('6', '2010-01-01'); +--fail, invalid type +ALTER TABLE list_hash_sales DROP PARTITION FOR (10); +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION FOR('X', 6); + +--check for ok after drop +SELECT count(*) FROM list_hash_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_hash_sales + +-- +----hash-range table---- +-- +--prepare +CREATE TABLE hash_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY HASH (product_id) SUBPARTITION BY RANGE (customer_id) +( + PARTITION product1 + ( + SUBPARTITION product1_customer1 VALUES LESS THAN (200), + SUBPARTITION product1_customer2 VALUES LESS THAN (500), + SUBPARTITION product1_customer3 VALUES LESS THAN (800), + SUBPARTITION product1_customer4 VALUES LESS THAN (1200) + ), + PARTITION product2 + ( + SUBPARTITION product2_customer1 VALUES LESS THAN (500), + SUBPARTITION product2_customer2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_customer1 VALUES LESS THAN (1200) + ) +); +INSERT INTO hash_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_range_sales_idx ON hash_range_sales(product_id) LOCAL; + +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_range_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_customer1 VALUES LESS THAN (200), + SUBPARTITION product_temp1_customer2 VALUES LESS THAN (500), + SUBPARTITION product_temp1_customer3 VALUES LESS THAN (800), + SUBPARTITION product_temp1_customer4 VALUES LESS THAN (1200) + ); +--fail, not support add hash +ALTER TABLE hash_range_sales ADD PARTITION product_temp2; +--success, add 1 subpartition +ALTER TABLE hash_range_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_customer5 VALUES LESS THAN (1800); +--fail, out of range +ALTER TABLE hash_range_sales MODIFY PARTITION product2 ADD SUBPARTITION product2_temp1 VALUES LESS THAN (1800); +--fail, invalid format +ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES (DEFAULT); +--success, add 1 subpartition +ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_customer2 VALUES LESS THAN (MAXVALUE); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_range_sales + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION product2; +--success, drop subpartition product1_customer1 +ALTER TABLE hash_range_sales DROP SUBPARTITION product1_customer1; +--success, drop subpartition product4_customer1 +ALTER TABLE hash_range_sales DROP SUBPARTITION product4_customer1; +--fail, the only subpartition in product4 +ALTER TABLE hash_range_sales DROP SUBPARTITION product4_customer2; +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION FOR(0); +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION FOR(0, 100); +--fail, number not equal to the number of partkey +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0); +--fail, invalid type +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR('2010-01-01', 100); +--success, drop subpartition product1_customer2, but not suggest to do this operation +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0, 100); +--fail, no subpartition find +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0, 2300); + +--check for ok after drop +SELECT count(*) FROM hash_range_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_range_sales + +-- +----hash-list table---- +-- +--prepare +CREATE TABLE hash_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY HASH (product_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION product1 + ( + SUBPARTITION product1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION product1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION product1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION product1_channel4 VALUES ('9') + ), + PARTITION product2 + ( + SUBPARTITION product2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION product2_channel2 VALUES (DEFAULT) + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO hash_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_list_sales_idx ON hash_list_sales(product_id) LOCAL; + +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_list_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION product_temp1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION product_temp1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION product_temp1_channel4 VALUES ('9') + ); +--fail, not support add hash +ALTER TABLE hash_list_sales ADD PARTITION product_temp2; +--success, add 1 subpartition +ALTER TABLE hash_list_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_channel5 VALUES ('X'); +--fail, out of range +ALTER TABLE hash_list_sales MODIFY PARTITION product2 ADD SUBPARTITION product2_temp1 VALUES ('X'); +--fail, out of range +ALTER TABLE hash_list_sales MODIFY PARTITION product3 ADD SUBPARTITION product3_temp1 VALUES ('X'); +--fail, invalid format +ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES LESS THAN (MAXVALUE); +--success, add 1 subpartition +ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_channel2 VALUES (DEFAULT); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_list_sales + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION product2; +--success, drop subpartition product1_channel1 +ALTER TABLE hash_list_sales DROP SUBPARTITION product1_channel1; +--success, drop subpartition product4_channel1 +ALTER TABLE hash_list_sales DROP SUBPARTITION product4_channel1; +--fail, the only subpartition in product4 +ALTER TABLE hash_list_sales DROP SUBPARTITION product4_channel2; +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION FOR(0); +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION FOR(0, '4'); +--fail, number not equal to the number of partkey +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0); +--fail, invalid type +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR('2010-01-01', '4'); +--success, drop subpartition product1_channel2, but not suggest to do this operation +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0, '4'); +--fail, no subpartition find +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0, 'Z'); + +--check for ok after drop +SELECT count(*) FROM hash_list_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_list_sales + +-- +----hash-hash table---- +-- +--prepare +CREATE TABLE hash_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY HASH (product_id) SUBPARTITION BY HASH (customer_id) +( + PARTITION product1 + ( + SUBPARTITION product1_customer1, + SUBPARTITION product1_customer2, + SUBPARTITION product1_customer3, + SUBPARTITION product1_customer4 + ), + PARTITION product2 + ( + SUBPARTITION product2_customer1, + SUBPARTITION product2_customer2 + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_customer1 + ) +); +INSERT INTO hash_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_hash_sales_idx ON hash_hash_sales(product_id) LOCAL; + +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_hash_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_customer1, + SUBPARTITION product_temp1_customer2, + SUBPARTITION product_temp1_customer3, + SUBPARTITION product_temp1_customer4 + ); +--fail, not support add hash +ALTER TABLE hash_hash_sales ADD PARTITION product_temp2; +--fail, not support add hash +ALTER TABLE hash_hash_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_temp1; + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_hash_sales + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION product2; +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION product1_customer1; +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION product4_customer1; +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION FOR(0); +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION FOR(0, 0); +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION FOR(0, 0); +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION FOR(0); + +--check for ok after drop +SELECT count(*) FROM hash_hash_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_hash_sales + +--finish +DROP TABLE range_range_sales; +DROP TABLE range_list_sales; +DROP TABLE range_hash_sales; +DROP TABLE list_range_sales; +DROP TABLE list_list_sales; +DROP TABLE list_hash_sales; +DROP TABLE hash_range_sales; +DROP TABLE hash_list_sales; +DROP TABLE hash_hash_sales; + +DROP SCHEMA segment_subpartition_add_drop_partition CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/segment_subpartition_alter_table.sql b/src/test/regress/sql/segment_subpartition_alter_table.sql new file mode 100644 index 000000000..e494427a6 --- /dev/null +++ b/src/test/regress/sql/segment_subpartition_alter_table.sql @@ -0,0 +1,151 @@ +DROP SCHEMA segment_subpartition_alter_table CASCADE; +CREATE SCHEMA segment_subpartition_alter_table; +SET CURRENT_SCHEMA TO segment_subpartition_alter_table; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); + +--change column type +alter table range_range alter column user_no set data type char(30); +alter table range_range alter column sales_amt set data type varchar; +\d+ range_range + +-- rename +alter table range_range rename to hahahahahah; +alter table range_range rename partition p_201901 to hahahahahah; +alter table range_range rename partition p_201901_a to hahahahahah; + +--cluster +create index idx_range_range on range_range(month_code,user_no); +alter table range_range cluster on idx_range_range; + +-- move tablespace +CREATE TABLESPACE example1 RELATIVE LOCATION 'tablespace1/tablespace_1'; +alter table range_range move PARTITION p_201901 tablespace example1; +alter table range_range move PARTITION p_201901_a tablespace example1; +DROP TABLESPACE example1; + +-- merge +alter table range_range merge PARTITIONS p_201901 , p_201902 into PARTITION p_range_3; +alter table range_range merge SUBPARTITIONS p_201901 , p_201902 into PARTITION p_range_3; + +-- exchange +CREATE TABLE ori +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (SEGMENT=ON); +ALTER TABLE range_range EXCHANGE PARTITION (p_201901) WITH TABLE ori; +ALTER TABLE range_range EXCHANGE SUBPARTITION (p_201901) WITH TABLE ori; + +-- drop +alter table range_range drop partition p_201901; +alter table range_range drop partition p_201901_a; +alter table range_range drop subpartition p_201901_a; + +-- add +alter table range_range add partition p_range_4 VALUES LESS THAN('201904'); + +-- split +alter table range_range split PARTITION p_201901 at (8) into ( PARTITION add_p_01 , PARTITION add_p_02 ); + +drop table ori; +drop table range_range; + +CREATE TABLE IF NOT EXISTS range_range_02 +( + col_1 int , + col_2 int , + col_3 VARCHAR2 ( 30 ) NOT NULL , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( 10 ) + ) +) ENABLE ROW MOVEMENT; + +create index on range_range_02(col_2) local; + +alter table range_range_02 MODIFY PARTITION p_range_2 UNUSABLE LOCAL INDEXES; + +alter table range_range_02 MODIFY PARTITION p_range_2 REBUILD UNUSABLE LOCAL INDEXES; + +alter table range_range_02 alter col_1 type char; + +alter table range_range_02 alter col_2 type char; + +drop table range_range_02; + +--validate constraint +CREATE TABLE hash_hash +( + col_1 int , + col_2 int NOT NULL , + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY hash (col_3) SUBPARTITION BY hash (col_2) +( + PARTITION p_hash_1 + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 , + SUBPARTITION p_hash_1_4 + ), + PARTITION p_hash_2 + ( + SUBPARTITION p_hash_2_1 , + SUBPARTITION p_hash_2_2 + ), + PARTITION p_hash_3, + PARTITION p_hash_4 + ( + SUBPARTITION p_hash_4_1 + ), + PARTITION p_hash_5 +); + +INSERT INTO hash_hash VALUES(null,1,1,1); +alter table hash_hash add constraint con_hash_hash check(col_1 is not null) NOT VALID ; +INSERT INTO hash_hash VALUES(null,2,1,1); --error +INSERT INTO hash_hash VALUES(1,3,1,1); --success +alter table hash_hash VALIDATE CONSTRAINT con_hash_hash; --error +delete from hash_hash where col_1 is null; +alter table hash_hash VALIDATE CONSTRAINT con_hash_hash; --success + +drop table hash_hash cascade; +-- clean +DROP SCHEMA ustore_subpartition_alter_table CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/segment_subpartition_analyze_vacuum.sql b/src/test/regress/sql/segment_subpartition_analyze_vacuum.sql new file mode 100644 index 000000000..ac525077b --- /dev/null +++ b/src/test/regress/sql/segment_subpartition_analyze_vacuum.sql @@ -0,0 +1,51 @@ +-- prepare +DROP SCHEMA segment_subpartition_analyze_vacuum CASCADE; +CREATE SCHEMA segment_subpartition_analyze_vacuum; +SET CURRENT_SCHEMA TO segment_subpartition_analyze_vacuum; + +-- base function + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; +delete from range_list where month_code = '201902'; +select * from range_list order by 1, 2, 3, 4; +analyze range_list; +analyze range_list partition (p_201901); +vacuum range_list; +vacuum range_list partition (p_201901); + +drop table range_list; + +-- clean +DROP SCHEMA segment_subpartition_analyze_vacuum CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/segment_subpartition_createtable.sql b/src/test/regress/sql/segment_subpartition_createtable.sql new file mode 100644 index 000000000..794c9e904 --- /dev/null +++ b/src/test/regress/sql/segment_subpartition_createtable.sql @@ -0,0 +1,1404 @@ + +--1.create table +--list_list list_hash list_range range_list range_hash range_range + +--prepare +DROP SCHEMA segment_subpartition_createtable CASCADE; +CREATE SCHEMA segment_subpartition_createtable; +SET CURRENT_SCHEMA TO segment_subpartition_createtable; + +--1.1 normal table +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list; +drop table list_list; + +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into list_hash values('201902', '1', '1', 1); +insert into list_hash values('201902', '2', '1', 1); +insert into list_hash values('201902', '3', '1', 1); +insert into list_hash values('201903', '4', '1', 1); +insert into list_hash values('201903', '5', '1', 1); +insert into list_hash values('201903', '6', '1', 1); +select * from list_hash; +drop table list_hash; + +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('6') + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +insert into list_range values('201902', '1', '1', 1); +insert into list_range values('201902', '2', '1', 1); +insert into list_range values('201902', '3', '1', 1); +insert into list_range values('201903', '4', '1', 1); +insert into list_range values('201903', '5', '1', 1); +insert into list_range values('201903', '6', '1', 1); + +select * from list_range; +drop table list_range; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); + +select * from range_list; +drop table range_list; + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201902', '2', '1', 1); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +insert into range_hash values('201903', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); + +select * from range_hash; +drop table range_hash; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); + +select * from range_range; +drop table range_range; + +CREATE TABLE hash_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY hash (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into hash_list values('201901', '1', '1', 1); +insert into hash_list values('201901', '2', '1', 1); +insert into hash_list values('201901', '1', '1', 1); +insert into hash_list values('201903', '2', '1', 1); +insert into hash_list values('201903', '1', '1', 1); +insert into hash_list values('201903', '2', '1', 1); + +select * from hash_list; +drop table hash_list; + +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY hash (month_code) SUBPARTITION BY hash (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into hash_hash values('201901', '1', '1', 1); +insert into hash_hash values('201901', '2', '1', 1); +insert into hash_hash values('201901', '1', '1', 1); +insert into hash_hash values('201903', '2', '1', 1); +insert into hash_hash values('201903', '1', '1', 1); +insert into hash_hash values('201903', '2', '1', 1); + +select * from hash_hash; +drop table hash_hash; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY hash (month_code) SUBPARTITION BY range (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES LESS THAN ( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN ( '3' ) + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES LESS THAN ( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN ( '3' ) + ) +); +insert into hash_range values('201901', '1', '1', 1); +insert into hash_range values('201901', '2', '1', 1); +insert into hash_range values('201901', '1', '1', 1); +insert into hash_range values('201903', '2', '1', 1); +insert into hash_range values('201903', '1', '1', 1); +insert into hash_range values('201903', '2', '1', 1); + +select * from hash_range; +drop table hash_range; + + +--1.2 table with default subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_list; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_list; + + +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table list_hash; + +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_hash; + +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_hash; + +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +drop table list_range; + +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('6') + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_range; + +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_range; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +drop table range_list; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_list; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_list; + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table range_hash; + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_hash; + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_hash; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +drop table hash_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 +); +drop table hash_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES( '2' ), + SUBPARTITION p_201902_b VALUES( '3' ) + ) +); +drop table hash_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES( '2' ), + SUBPARTITION p_201901_b VALUES( '3' ) + ), + PARTITION p_201902 +); +drop table hash_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_range; + +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table hash_hash; + +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 +); +drop table hash_hash; + +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_hash; + + +--1.3 subpartition name check +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_a VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901 VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201901_subpartdefault1 VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; + + + +--1.4 subpartition key check +-- 一级分区和二级分区分区键是同一列 + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +--二级分区的键值一样 + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '1' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +--分区列不存在 +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_codeXXX) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_codeXXX) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + + +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('4') + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +drop table list_range; + + +--1.5 list subpartition whith default + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( default ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list partition (p_201901); +select * from list_list partition (p_201902); +drop table list_list; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( default ) + ) +); +drop table list_list; + +--1.6 declaration and definition of the subpatiiton type are same. +--error +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY hash (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( default ) + ) +); + +--1.7 add constraint +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); + +alter table range_range add constraint constraint_check CHECK (sales_amt IS NOT NULL); +insert into range_range values(1,1,1); +drop table range_range; + +-- drop partition column +CREATE TABLE range_hash_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( -10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ), + PARTITION p_range_3 VALUES LESS THAN( 30) + ( + SUBPARTITION p_hash_3_1 , + SUBPARTITION p_hash_3_2 , + SUBPARTITION p_hash_3_3 + ), + PARTITION p_range_4 VALUES LESS THAN( 50) + ( + SUBPARTITION p_hash_4_1 , + SUBPARTITION p_hash_4_2 , + SUBPARTITION range_hash_02 + ), + PARTITION p_range_5 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; + +alter table range_hash_02 drop column col_1; + +alter table range_hash_02 drop column col_2; + +drop table range_hash_02; +--1.8 SET ROW MOVEMENT +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1', '2' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1', '2' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +alter table list_list disable ROW MOVEMENT; +insert into list_list values('201902', '1', '1', 1); +update list_list set month_code = '201903'; +update list_list set dept_code = '3'; +alter table list_list enable ROW MOVEMENT; +update list_list set month_code = '201903'; +update list_list set dept_code = '3'; +drop table list_list; + +--1.9 without subpartition declaration +create table test(a int) WITH (SEGMENT=ON) +partition by range(a) +( +partition p1 values less than(100) +( +subpartition subp1 values less than(50), +subpartition subp2 values less than(100) +), +partition p2 values less than(200), +partition p3 values less than(maxvalue) +); + +--1.10 create table like +CREATE TABLE range_range +( + col_1 int primary key, + col_2 int NOT NULL , + col_3 VARCHAR2 ( 30 ) NOT NULL , + col_4 int generated always as(2*col_2) stored , + check (col_4 >= col_2) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( 10 ) + ) +) ENABLE ROW MOVEMENT; + +CREATE TABLE range_range_02 (like range_range INCLUDING ALL ); +drop table range_range; + +--ROW LEVEL SECURITY POLICY +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +CREATE ROW LEVEL SECURITY POLICY range_range_rls ON range_range USING(user_no = CURRENT_USER); + +drop table range_range; + +-- 账本数据库 +CREATE SCHEMA ledgernsp WITH BLOCKCHAIN; + +CREATE TABLE ledgernsp.range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); + +DROP SCHEMA ledgernsp; +-- create table as +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +) ENABLE ROW MOVEMENT; + +insert into range_range values(201902,1,1,1),(201902,1,1,1),(201902,3,1,1),(201903,1,1,1),(201903,2,1,1),(201903,2,1,1); + +select * from range_range subpartition(p_201901_a) where month_code in(201902,201903) order by 1,2,3,4; + +create table range_range_copy WITH (SEGMENT=ON) as select * from range_range subpartition(p_201901_a) where month_code in(201902,201903); + +select * from range_range_copy order by 1,2,3,4; + +drop table range_range; +drop table range_range_copy; + +--1.11 create index +create table range_range_03 +( + c_int int, + c_char1 char(3000), + c_char2 char(5000), + c_char3 char(6000), + c_varchar1 varchar(3000), + c_varchar2 varchar(5000), + c_varchar3 varchar, + c_varchar4 varchar, + c_text1 text, + c_text2 text, + c_text3 text, + c int, + primary key(c,c_int) +) with (parallel_workers=10, SEGMENT=ON) +partition by range (c_int) subpartition by range (c_char1) +( + partition p1 values less than(50) + ( + subpartition p1_1 values less than('c'), + subpartition p1_2 values less than(maxvalue) + ), + partition p2 values less than(100) + ( + subpartition p2_1 values less than('c'), + subpartition p2_2 values less than(maxvalue) + ), + partition p3 values less than(150) + ( + subpartition p3_1 values less than('c'), + subpartition p3_2 values less than(maxvalue) + ), + partition p4 values less than(200) + ( + subpartition p4_1 values less than('c'), + subpartition p4_2 values less than(maxvalue) + ), + partition p5 values less than(maxvalue)( + subpartition p5_1 values less than('c'), + subpartition p5_2 values less than(maxvalue) + ) +) enable row movement; + +create index range_range_03_idx1 on range_range_03 (c_varchar1) local; --success + +create index range_range_03_idx2 on range_range_03 (c_varchar2) local ( + partition cpt7_p1, + partition cpt7_p2, + partition cpt7_p3, + partition cpt7_p4, + partition cpt7_p5 +); --failed + +create index range_range_03_idx3 on range_range_03 (c_varchar3); --success, default global + +create index range_range_03_idx4 on range_range_03 (c_varchar4) global; --success + +create index range_range_03_idx5 on range_range_03 (c_varchar4) local; --failed, can not be same column with global index + +\d+ range_range_03 + +select pg_get_tabledef('range_range_03'); + +drop table range_range_03; + +--unique local index columns must contain the partition key +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +) ENABLE ROW MOVEMENT; +create unique index idx on range_range(month_code) local; +create unique index idx1 on range_range(month_code, user_no) local; +drop table range_range; + +-- partkey has timestampwithzone type +drop table hash_range; +CREATE TABLE hash_range +( + col_1 int PRIMARY KEY USING INDEX, + col_2 int NOT NULL , + col_3 int NOT NULL , + col_4 int, + col_19 TIMESTAMP WITH TIME ZONE +) WITH (SEGMENT=ON) +PARTITION BY HASH (col_2) SUBPARTITION BY RANGE (col_19) +( partition p_hash_1 + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + partition p_hash_2, + PARTITION p_hash_3, + PARTITION p_hash_4, + PARTITION p_hash_5, + PARTITION p_hash_7 +) ENABLE ROW MOVEMENT; + +CREATE TABLE hash_range +( + col_1 int PRIMARY KEY USING INDEX, + col_2 int NOT NULL , + col_3 int NOT NULL , + col_4 int, + col_19 TIMESTAMP WITH TIME ZONE +) WITH (SEGMENT=ON) +PARTITION BY HASH (col_19) SUBPARTITION BY RANGE (col_2) +( partition p_hash_1 + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + partition p_hash_2, + PARTITION p_hash_3, + PARTITION p_hash_4, + PARTITION p_hash_5, + PARTITION p_hash_7 +) ENABLE ROW MOVEMENT; +drop table hash_range; +--clean +DROP SCHEMA segment_subpartition_createtable CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/segment_subpartition_ddl_index.sql b/src/test/regress/sql/segment_subpartition_ddl_index.sql new file mode 100644 index 000000000..0d97a460d --- /dev/null +++ b/src/test/regress/sql/segment_subpartition_ddl_index.sql @@ -0,0 +1,153 @@ +-- +----test index is Ok when use ddl grammer for subpartition---- +-- +DROP SCHEMA segment_subpartition_ddl_index CASCADE; +CREATE SCHEMA segment_subpartition_ddl_index; +SET CURRENT_SCHEMA TO segment_subpartition_ddl_index; + +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_indexonlyscan = ON; +SET enable_bitmapscan = OFF; + +-- +--test for add/drop partition/subpartition +-- +--1. first, we create subpartitioned table, and index on the table +CREATE TABLE range_list_sales1 +( + product_id INT4, + customer_id INT4, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); + +CREATE INDEX range_list_sales1_idx1 ON range_list_sales1(product_id, customer_id) GLOBAL; +CREATE INDEX range_list_sales1_idx2 ON range_list_sales1(channel_id) GLOBAL; +CREATE INDEX range_list_sales1_idx3 ON range_list_sales1(customer_id) LOCAL; +CREATE INDEX range_list_sales1_idx4 ON range_list_sales1(time_id, type_id) LOCAL; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +--2. add partition/subpartition will not influence the index +ALTER TABLE range_list_sales1 ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer5_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer5_channel3 VALUES ('6', '7', '8') + ); +ALTER TABLE range_list_sales1 ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +ALTER TABLE range_list_sales1 MODIFY PARTITION customer5 ADD SUBPARTITION customer5_channel4 VALUES ('9'); +INSERT INTO range_list_sales1 SELECT generate_series(1001,2000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +--3. drop partition/subpartition update global index +ALTER TABLE range_list_sales1 DROP PARTITION customer3 UPDATE GLOBAL INDEX; +ALTER TABLE range_list_sales1 DROP PARTITION FOR (700) UPDATE GLOBAL INDEX; --customer4 +ALTER TABLE range_list_sales1 DROP SUBPARTITION FOR (700, '9') UPDATE GLOBAL INDEX; --customer5_channel4 + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +--4. if drop partition without update global index, the gpi will be invalid, we can rebuild the index +ALTER TABLE range_list_sales1 DROP PARTITION FOR (1600); + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +ALTER INDEX range_list_sales1_idx1 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +ALTER INDEX range_list_sales1_idx2 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +--5. if drop subpartition without update global index, the gpi will be invalid, we can rebuild the index +ALTER TABLE range_list_sales1 DROP SUBPARTITION customer5_channel3; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +ALTER INDEX range_list_sales1_idx1 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +ALTER INDEX range_list_sales1_idx2 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +DROP TABLE range_list_sales1; + +--finish, clean the environment +DROP SCHEMA segment_subpartition_ddl_index CASCADE; +RESET CURRENT_SCHEMA; +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_indexonlyscan; +RESET enable_bitmapscan; diff --git a/src/test/regress/sql/segment_subpartition_gpi.sql b/src/test/regress/sql/segment_subpartition_gpi.sql new file mode 100644 index 000000000..fd4301b22 --- /dev/null +++ b/src/test/regress/sql/segment_subpartition_gpi.sql @@ -0,0 +1,533 @@ +-- prepare +DROP SCHEMA segment_subpartition_gpi CASCADE; +CREATE SCHEMA segment_subpartition_gpi; +SET CURRENT_SCHEMA TO segment_subpartition_gpi; + +-- base function +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +drop table range_list; + +-- unique +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +create unique index idx_dept_code_global on range_list(dept_code) global; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +select * from range_list subpartition (p_201901_a); +select * from range_list subpartition (p_201901_b); +select count(*) from range_list; +--error +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select count(*) from range_list; + +delete from range_list; +drop index idx_dept_code_global; + +create unique index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '2', 1); +insert into range_list values('201903', '1', '3', 1); +insert into range_list values('201903', '2', '4', 1); +select * from range_list subpartition (p_201901_a); +select * from range_list subpartition (p_201901_b); +select * from range_list subpartition (p_201902_a); +select * from range_list subpartition (p_201902_b); +select count(*) from range_list; +--error +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201902', '1', '2', 1); +insert into range_list values('201902', '2', '2', 1); +insert into range_list values('201903', '1', '2', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201902', '1', '3', 1); +insert into range_list values('201902', '2', '3', 1); +insert into range_list values('201903', '1', '3', 1); +insert into range_list values('201903', '2', '3', 1); +insert into range_list values('201902', '1', '4', 1); +insert into range_list values('201902', '2', '4', 1); +insert into range_list values('201903', '1', '4', 1); +insert into range_list values('201903', '2', '4', 1); +select count(*) from range_list; + +drop table range_list; + +-- truncate subpartition +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +alter table range_list truncate subpartition p_201901_a update global index; + +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +alter table range_list truncate subpartition p_201901_b; + +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +drop table range_list; + +-- split subpartition +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values (default) + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values (default) + ) +); + +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +alter table range_list split subpartition p_201901_b values ('3') into +( + subpartition p_201901_b, + subpartition p_201901_c +) update global index; + +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +alter table range_list split subpartition p_201902_b values ('3') into +( + subpartition p_201902_b, + subpartition p_201902_c +); + +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +drop table range_list; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( MAXVALUE ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '3', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '5', '1', 1); +select * from range_range; + +create index idx_month_code_local on range_range(month_code) local; +create index idx_dept_code_global on range_range(dept_code) global; +create index idx_user_no_global on range_range(user_no) global; + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + +alter table range_range split subpartition p_201901_b at ('3') into +( + subpartition p_201901_c, + subpartition p_201901_d +) update global index; + +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + +alter table range_range split subpartition p_201902_b at ('3') into +( + subpartition p_201902_c, + subpartition p_201903_d +); + +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) primary key, + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) primary key, + user_no VARCHAR2 ( 30 ) , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) primary key, + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code, user_no) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, user_no) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +-- truncate with gpi +CREATE TABLE range_hash_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( -10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ), + PARTITION p_range_3 VALUES LESS THAN( 30) + ( + SUBPARTITION p_hash_3_1 , + SUBPARTITION p_hash_3_2 , + SUBPARTITION p_hash_3_3 + ), + PARTITION p_range_4 VALUES LESS THAN( 50) + ( + SUBPARTITION p_hash_4_1 , + SUBPARTITION p_hash_4_2 , + SUBPARTITION range_hash_02 + ), + PARTITION p_range_5 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; + +create index idx on range_hash_02(col_1); + +truncate range_hash_02; + +drop table range_hash_02; + +-- clean +DROP SCHEMA segment_subpartition_gpi CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/segment_subpartition_scan.sql b/src/test/regress/sql/segment_subpartition_scan.sql new file mode 100644 index 000000000..4a4e9c45b --- /dev/null +++ b/src/test/regress/sql/segment_subpartition_scan.sql @@ -0,0 +1,434 @@ +--prepare +DROP SCHEMA segment_subpartition_scan CASCADE; +CREATE SCHEMA segment_subpartition_scan; +SET CURRENT_SCHEMA TO segment_subpartition_scan; + +--scan +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); + +explain(costs off, verbose on) select * from range_list order by 1, 2, 3, 4; +select * from range_list order by 1, 2, 3, 4; + +create index idx_month_code on range_list(month_code) local; +create index idx_dept_code on range_list(dept_code) local; +create index idx_user_no on range_list(user_no) local; + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +set enable_bitmapscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +reset enable_seqscan; +reset enable_bitmapscan; + +drop table range_list; + +CREATE TABLE range_list +( + col_1 VARCHAR2 ( 30 ) , + col_2 VARCHAR2 ( 30 ) NOT NULL , + col_3 VARCHAR2 ( 30 ) NOT NULL , + ccol_4 VARCHAR2 ( 30 ), +col_5 VARCHAR2 ( 30 ), +col_6 VARCHAR2 ( 30 ), +col_7 VARCHAR2 ( 30 ), +col_8 VARCHAR2 ( 30 ), +col_9 VARCHAR2 ( 30 ), +col_10 VARCHAR2 ( 30 ), +col_11 VARCHAR2 ( 30 ), +col_12 VARCHAR2 ( 30 ), +col_13 VARCHAR2 ( 30 ), +col_14 VARCHAR2 ( 30 ), +col_15 VARCHAR2 ( 30 ), +col_16 VARCHAR2 ( 30 ), +col_17 VARCHAR2 ( 30 ), +col_18 VARCHAR2 ( 30 ), +col_19 VARCHAR2 ( 30 ), +col_20 VARCHAR2 ( 30 ), +col_21 VARCHAR2 ( 30 ), +col_22 VARCHAR2 ( 30 ), +col_23 VARCHAR2 ( 30 ), +col_24 VARCHAR2 ( 30 ), +col_25 VARCHAR2 ( 30 ), +col_26 VARCHAR2 ( 30 ), +col_27 VARCHAR2 ( 30 ), +col_28 VARCHAR2 ( 30 ), +col_29 VARCHAR2 ( 30 ), +col_30 VARCHAR2 ( 30 ), +col_31 VARCHAR2 ( 30 ), +col_32 VARCHAR2 ( 30 ), +col_33 VARCHAR2 ( 30 ), +col_34 VARCHAR2 ( 30 ), +col_35 VARCHAR2 ( 30 ), +col_36 VARCHAR2 ( 30 ), +col_37 VARCHAR2 ( 30 ), +col_38 VARCHAR2 ( 30 ), +col_39 VARCHAR2 ( 30 ), +col_40 VARCHAR2 ( 30 ), +col_41 VARCHAR2 ( 30 ), +col_42 VARCHAR2 ( 30 ), +col_43 VARCHAR2 ( 30 ), +col_44 VARCHAR2 ( 30 ), +col_45 VARCHAR2 ( 30 ), +col_46 VARCHAR2 ( 30 ), +col_47 VARCHAR2 ( 30 ), +col_48 VARCHAR2 ( 30 ), +col_49 VARCHAR2 ( 30 ), +col_50 VARCHAR2 ( 30 ), +col_51 VARCHAR2 ( 30 ), +col_52 VARCHAR2 ( 30 ), +col_53 VARCHAR2 ( 30 ), +col_54 VARCHAR2 ( 30 ), +col_55 VARCHAR2 ( 30 ), +col_56 VARCHAR2 ( 30 ), +col_57 VARCHAR2 ( 30 ), +col_58 VARCHAR2 ( 30 ), +col_59 VARCHAR2 ( 30 ), +col_60 VARCHAR2 ( 30 ), +col_61 VARCHAR2 ( 30 ), +col_62 VARCHAR2 ( 30 ), +col_63 VARCHAR2 ( 30 ), +col_64 VARCHAR2 ( 30 ), +col_65 VARCHAR2 ( 30 ), +col_66 VARCHAR2 ( 30 ), +col_67 VARCHAR2 ( 30 ), +col_68 VARCHAR2 ( 30 ), +col_69 VARCHAR2 ( 30 ), +col_70 VARCHAR2 ( 30 ), +col_71 VARCHAR2 ( 30 ), +col_72 VARCHAR2 ( 30 ), +col_73 VARCHAR2 ( 30 ), +col_74 VARCHAR2 ( 30 ), +col_75 VARCHAR2 ( 30 ), +col_76 VARCHAR2 ( 30 ), +col_77 VARCHAR2 ( 30 ), +col_78 VARCHAR2 ( 30 ), +col_79 VARCHAR2 ( 30 ), +col_80 VARCHAR2 ( 30 ), +col_81 VARCHAR2 ( 30 ), +col_82 VARCHAR2 ( 30 ), +col_83 VARCHAR2 ( 30 ) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( '-10' ) + ( +SUBPARTITION p_list_1_1 VALUES ( '-1' ), +SUBPARTITION p_list_1_2 VALUES ( '-2' ), +SUBPARTITION p_list_1_3 VALUES ( '-3' ), +SUBPARTITION p_list_1_4 VALUES ( '-4' ), +SUBPARTITION p_list_1_5 VALUES ( '-5' ), +SUBPARTITION p_list_1_6 VALUES ( '-6' ), +SUBPARTITION p_list_1_7 VALUES ( '-7' ), +SUBPARTITION p_list_1_8 VALUES ( '-8' ), +SUBPARTITION p_list_1_9 VALUES ( '-9' ), +SUBPARTITION p_list_1_10 VALUES ( '-10' ), +SUBPARTITION p_list_1_11 VALUES ( '-11' ), +SUBPARTITION p_list_1_12 VALUES ( '-12' ), +SUBPARTITION p_list_1_13 VALUES ( '-13' ), +SUBPARTITION p_list_1_14 VALUES ( '-14' ), +SUBPARTITION p_list_1_15 VALUES ( '-15' ), +SUBPARTITION p_list_1_16 VALUES ( '-16' ), +SUBPARTITION p_list_1_17 VALUES ( '-17' ), +SUBPARTITION p_list_1_18 VALUES ( '-18' ), +SUBPARTITION p_list_1_19 VALUES ( '-19' ), +SUBPARTITION p_list_1_20 VALUES ( '-20' ), +SUBPARTITION p_list_1_21 VALUES ( '-21' ), +SUBPARTITION p_list_1_22 VALUES ( '-22' ), +SUBPARTITION p_list_1_23 VALUES ( '-23' ), +SUBPARTITION p_list_1_24 VALUES ( '-24' ), +SUBPARTITION p_list_1_25 VALUES ( '-25' ), +SUBPARTITION p_list_1_26 VALUES ( '-26' ), +SUBPARTITION p_list_1_27 VALUES ( '-27' ), +SUBPARTITION p_list_1_28 VALUES ( '-28' ), +SUBPARTITION p_list_1_29 VALUES ( '-29' ), +SUBPARTITION p_list_1_30 VALUES ( '-30' ), +SUBPARTITION p_list_1_31 VALUES ( '-31' ), +SUBPARTITION p_list_1_32 VALUES ( '-32' ), +SUBPARTITION p_list_1_33 VALUES ( '-33' ), +SUBPARTITION p_list_1_34 VALUES ( '-34' ), +SUBPARTITION p_list_1_35 VALUES ( '-35' ), +SUBPARTITION p_list_1_36 VALUES ( '-36' ), +SUBPARTITION p_list_1_37 VALUES ( '-37' ), +SUBPARTITION p_list_1_38 VALUES ( '-38' ), +SUBPARTITION p_list_1_39 VALUES ( '-39' ), +SUBPARTITION p_list_1_40 VALUES ( '-40' ), +SUBPARTITION p_list_1_41 VALUES ( '-41' ), +SUBPARTITION p_list_1_42 VALUES ( '-42' ), +SUBPARTITION p_list_1_43 VALUES ( '-43' ), +SUBPARTITION p_list_1_44 VALUES ( '-44' ), +SUBPARTITION p_list_1_45 VALUES ( '-45' ), +SUBPARTITION p_list_1_46 VALUES ( '-46' ), +SUBPARTITION p_list_1_47 VALUES ( '-47' ), +SUBPARTITION p_list_1_48 VALUES ( '-48' ), +SUBPARTITION p_list_1_49 VALUES ( '-49' ), +SUBPARTITION p_list_1_50 VALUES ( '-50' ), +SUBPARTITION p_list_1_51 VALUES ( default ) + ), + PARTITION p_range_2 VALUES LESS THAN('10 ') + ( +SUBPARTITION p_list_2_1 VALUES ( '1' ), +SUBPARTITION p_list_2_2 VALUES ( '2' ), +SUBPARTITION p_list_2_3 VALUES ( '3' ), +SUBPARTITION p_list_2_4 VALUES ( '4' ), +SUBPARTITION p_list_2_5 VALUES ( '5' ), +SUBPARTITION p_list_2__6 VALUES ( '-6' ), +SUBPARTITION p_list_2_6 VALUES ( '6' ), +SUBPARTITION p_list_2_7 VALUES ( '7' ), +SUBPARTITION p_list_2_8 VALUES ( '8' ), +SUBPARTITION p_list_2_9 VALUES ( '9' ), +SUBPARTITION p_list_2_10 VALUES ( '10' ), +SUBPARTITION p_list_2_11 VALUES ( '11' ), +SUBPARTITION p_list_2_12 VALUES ( '12' ), +SUBPARTITION p_list_2_13 VALUES ( '13' ), +SUBPARTITION p_list_2_14 VALUES ( '14' ), +SUBPARTITION p_list_2_15 VALUES ( '15' ), +SUBPARTITION p_list_2_16 VALUES ( '16' ), +SUBPARTITION p_list_2_17 VALUES ( '17' ), +SUBPARTITION p_list_2_18 VALUES ( '18' ), +SUBPARTITION p_list_2_19 VALUES ( '19' ), +SUBPARTITION p_list_2_20 VALUES ( '20' ), +SUBPARTITION p_list_2_21 VALUES ( '21' ), +SUBPARTITION p_list_2_22 VALUES ( '22' ), +SUBPARTITION p_list_2_23 VALUES ( '23' ), +SUBPARTITION p_list_2_24 VALUES ( '24' ), +SUBPARTITION p_list_2_25 VALUES ( '25' ), +SUBPARTITION p_list_2_26 VALUES ( '26' ), +SUBPARTITION p_list_2_27 VALUES ( '27' ), +SUBPARTITION p_list_2_28 VALUES ( '28' ), +SUBPARTITION p_list_2_29 VALUES ( '29' ), +SUBPARTITION p_list_2_30 VALUES ( '30' ), +SUBPARTITION p_list_2_31 VALUES ( '31' ), +SUBPARTITION p_list_2_32 VALUES ( '32' ), +SUBPARTITION p_list_2_33 VALUES ( '33' ), +SUBPARTITION p_list_2_34 VALUES ( '34' ), +SUBPARTITION p_list_2_35 VALUES ( '35' ), +SUBPARTITION p_list_2_36 VALUES ( '36' ), +SUBPARTITION p_list_2_37 VALUES ( '37' ), +SUBPARTITION p_list_2_38 VALUES ( '38' ), +SUBPARTITION p_list_2_39 VALUES ( '39' ), +SUBPARTITION p_list_2_40 VALUES ( '40' ), +SUBPARTITION p_list_2_41 VALUES ( '41' ), +SUBPARTITION p_list_2_42 VALUES ( '42' ), +SUBPARTITION p_list_2_43 VALUES ( '43' ), +SUBPARTITION p_list_2_44 VALUES ( '44' ), +SUBPARTITION p_list_2_45 VALUES ( '45' ), +SUBPARTITION p_list_2_46 VALUES ( '46' ), +SUBPARTITION p_list_2_47 VALUES ( '47' ), +SUBPARTITION p_list_2_48 VALUES ( '48' ), +SUBPARTITION p_list_2_49 VALUES ( '49' ), +SUBPARTITION p_list_2_50 VALUES ( '50' ), +SUBPARTITION p_list_2_51 VALUES ( default ) + ), + PARTITION p_range_3 VALUES LESS THAN( '20 '), + + PARTITION p_range_4 VALUES LESS THAN( '30' ) + ( + SUBPARTITION p_list_4_1 VALUES ( default ) + ), + PARTITION p_range_5 VALUES LESS THAN( '40' ) + ( + SUBPARTITION p_list_5_1 VALUES ( '41' ), +SUBPARTITION p_list_5_2 VALUES ( '42' ), +SUBPARTITION p_list_5_3 VALUES ( '43' ), +SUBPARTITION p_list_5_4 VALUES ( '44' ), +SUBPARTITION p_list_5_5 VALUES ( '45' ), +SUBPARTITION p_list_5_6 VALUES ( '46' ), +SUBPARTITION p_list_5_7 VALUES ( '47' ), +SUBPARTITION p_list_5_8 VALUES ( '48' ), +SUBPARTITION p_list_5_9 VALUES ( '49' ), +SUBPARTITION p_list_5_10 VALUES ( '50' ), +SUBPARTITION p_list_5_11 VALUES ( '51' ), +SUBPARTITION p_list_5_12 VALUES ( '52' ), +SUBPARTITION p_list_5_13 VALUES ( '53' ), +SUBPARTITION p_list_5_14 VALUES ( '54' ), +SUBPARTITION p_list_5_15 VALUES ( '55' ), +SUBPARTITION p_list_5_16 VALUES ( '56' ), +SUBPARTITION p_list_5_17 VALUES ( '57' ), +SUBPARTITION p_list_5_18 VALUES ( '58' ), +SUBPARTITION p_list_5_19 VALUES ( '59' ), +SUBPARTITION p_list_5_20 VALUES ( '60' ), +SUBPARTITION p_list_5_21 VALUES ( '61' ), +SUBPARTITION p_list_5_22 VALUES ( '62' ), +SUBPARTITION p_list_5_23 VALUES ( '63' ), +SUBPARTITION p_list_5_24 VALUES ( '64' ), +SUBPARTITION p_list_5_25 VALUES ( '65' ), +SUBPARTITION p_list_5_26 VALUES ( '66' ), +SUBPARTITION p_list_5_27 VALUES ( '67' ), +SUBPARTITION p_list_5_28 VALUES ( '68' ), +SUBPARTITION p_list_5_29 VALUES ( '69' ), +SUBPARTITION p_list_5_30 VALUES ( '70' ), +SUBPARTITION p_list_5_31 VALUES ( '71' ), +SUBPARTITION p_list_5_32 VALUES ( '72' ), +SUBPARTITION p_list_5_33 VALUES ( '73' ), +SUBPARTITION p_list_5_34 VALUES ( '74' ), +SUBPARTITION p_list_5_35 VALUES ( '75' ), +SUBPARTITION p_list_5_36 VALUES ( '76' ), +SUBPARTITION p_list_5_37 VALUES ( '77' ), +SUBPARTITION p_list_5_38 VALUES ( '78' ), +SUBPARTITION p_list_5_39 VALUES ( '79' ), +SUBPARTITION p_list_5_40 VALUES ( '80' ), +SUBPARTITION p_list_5_41 VALUES ( '81' ), +SUBPARTITION p_list_5_42 VALUES ( '82' ), +SUBPARTITION p_list_5_43 VALUES ( '83' ), +SUBPARTITION p_list_5_44 VALUES ( '84' ), +SUBPARTITION p_list_5_45 VALUES ( '85' ), +SUBPARTITION p_list_5_46 VALUES ( '86' ), +SUBPARTITION p_list_5_47 VALUES ( '87' ), +SUBPARTITION p_list_5_48 VALUES ( '88' ), +SUBPARTITION p_list_5_49 VALUES ( '89' ), +SUBPARTITION p_list_5_50 VALUES ( '90' ), +SUBPARTITION p_list_5_51 VALUES ( '91' ), +SUBPARTITION p_list_5_52 VALUES ( '92' ), +SUBPARTITION p_list_5_53 VALUES ( '93' ), +SUBPARTITION p_list_5_54 VALUES ( '94' ), +SUBPARTITION p_list_5_55 VALUES ( '95' ), +SUBPARTITION p_list_5_56 VALUES ( '96' ), +SUBPARTITION p_list_5_57 VALUES ( '97' ), +SUBPARTITION p_list_5_58 VALUES ( '98' ), +SUBPARTITION p_list_5_59 VALUES ( '99' ), +SUBPARTITION p_list_5_60 VALUES ( '100' ), +SUBPARTITION p_list_5_61 VALUES ( '101' ), +SUBPARTITION p_list_5_62 VALUES ( '102' ), +SUBPARTITION p_list_5_63 VALUES ( '103' ), +SUBPARTITION p_list_5_64 VALUES ( '104' ), +SUBPARTITION p_list_5_65 VALUES ( '105' ), +SUBPARTITION p_list_5_66 VALUES ( '106' ), +SUBPARTITION p_list_5_67 VALUES ( '107' ), +SUBPARTITION p_list_5_68 VALUES ( '108' ), +SUBPARTITION p_list_5_69 VALUES ( '109' ), +SUBPARTITION p_list_5_70 VALUES ( '110' ), +SUBPARTITION p_list_5_71 VALUES ( '111' ), +SUBPARTITION p_list_5_72 VALUES ( '112' ), +SUBPARTITION p_list_5_73 VALUES ( '113' ), +SUBPARTITION p_list_5_74 VALUES ( '114' ), +SUBPARTITION p_list_5_75 VALUES ( '115' ), +SUBPARTITION p_list_5_76 VALUES ( '116' ), +SUBPARTITION p_list_5_77 VALUES ( '117' ), +SUBPARTITION p_list_5_78 VALUES ( '118' ), +SUBPARTITION p_list_5_79 VALUES ( '119' ), +SUBPARTITION p_list_5_80 VALUES ( default ) + ), + PARTITION p_range_6 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; +create index on range_list(col_2) local; +explain (costs off, verbose off) select * from range_list where col_2 in (select col_1 from range_list where col_1 >10 and col_1<100) order by 1 limit 100; +ALTER INDEX range_list_col_2_idx MODIFY PARTITION p_list_5_14_col_2_idx UNUSABLE; +explain (costs off, verbose off) select * from range_list where col_2 in (select col_1 from range_list where col_1 >10 and col_1<100) order by 1 limit 100; +drop table range_list; + +create table range_range_jade(jid int,jn int,name varchar2) WITH (SEGMENT=ON) partition by range (jid) subpartition by range(jn) +( + partition hrp1 values less than(16)( + subpartition hrp1_1 values less than(16), +subpartition hrp1_2 values less than(26), +subpartition hrp1_3 values less than(36), + subpartition hrp1_4 values less than(maxvalue)), + partition hrp2 values less than(26)( + subpartition hrp2_1 values less than(maxvalue)), + partition hrp3 values less than(36)( + subpartition hrp3_1 values less than(16), +subpartition hrp3_2 values less than(26), + subpartition hrp3_3 values less than(maxvalue)), + partition hrp4 values less than(maxvalue)( + subpartition hrp4_1 values less than(16), + subpartition hrp4_2 values less than(maxvalue)) +)ENABLE ROW MOVEMENT; +-- no errors +set enable_partition_opfusion = on; +insert into range_range_jade values(1,2,'jade'); +reset enable_partition_opfusion; +drop table range_range_jade; + +drop table list_range_02; +CREATE TABLE IF NOT EXISTS list_range_02 +( + col_1 int , + col_2 int, +col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY list (col_1) SUBPARTITION BY range (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_1_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_1_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_1_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_2 VALUES(1,2,3,4,5,6,7,8,9,10 ), + PARTITION p_list_3 VALUES(11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_range_3_1 VALUES LESS THAN( 15 ), + SUBPARTITION p_range_3_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_4 VALUES(21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_range_4_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_4_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_4_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_4_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_4_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_5 VALUES(31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_range_5_1 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_6 VALUES(41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_range_6_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_6_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_6_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_6_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_6_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_7 VALUES(default) +) ENABLE ROW MOVEMENT; + +create index index_01 on list_range_02(col_2) local ; + +explain (costs off) select * from list_range_02 where col_2 in + (select col_1 from list_range_02 subpartition(p_list_2_subpartdefault1) + where col_1 >10 and col_1 <100) and col_1 +col_2 =50 and col_2 in (100,200,300 ); +drop table list_range_02; +DROP SCHEMA segment_subpartition_scan CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/segment_subpartition_select.sql b/src/test/regress/sql/segment_subpartition_select.sql new file mode 100644 index 000000000..a841ba26b --- /dev/null +++ b/src/test/regress/sql/segment_subpartition_select.sql @@ -0,0 +1,302 @@ +--prepare +DROP SCHEMA segment_subpartition_select CASCADE; +CREATE SCHEMA segment_subpartition_select; +SET CURRENT_SCHEMA TO segment_subpartition_select; + +--select +CREATE TABLE t1 +( + c1 int, + c2 int +) WITH (SEGMENT=ON); +insert into t1 values(generate_series(201901,201910), generate_series(1,10)); + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '3', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '3', '1', 1); + +select * from range_list order by 1, 2, 3, 4; + +select * from range_list where user_no is not null order by 1, 2, 3, 4; +select * from range_list where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; +select * from range_list where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; +select * from range_list where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; +select * from range_list partition (p_201901) order by 1, 2, 3, 4; +select * from range_list partition (p_201902) order by 1, 2, 3, 4; +select * from range_list where user_no is not null and dept_code <> '2' UNION ALL select * from range_list partition (p_201902) order by 1, 2, 3, 4; +select * from range_list where user_no is not null and dept_code <> '2' UNION ALL select * from range_list partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + + + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); + +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201902', '2', '1', 1); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +insert into range_hash values('201903', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); + +select * from range_hash order by 1, 2, 3, 4; + +select * from range_hash where user_no is not null order by 1, 2, 3, 4; +select * from range_hash where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; +select * from range_hash where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; +select * from range_hash where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; +select * from range_hash partition (p_201901) order by 1, 2, 3, 4; +select * from range_hash partition (p_201902) order by 1, 2, 3, 4; +select * from range_hash where user_no is not null and dept_code <> '2' UNION ALL select * from range_hash partition (p_201902) order by 1, 2, 3, 4; +select * from range_hash where user_no is not null and dept_code <> '2' UNION ALL select * from range_hash partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); + +select * from range_range order by 1, 2, 3, 4; + +select * from range_range where user_no is not null order by 1, 2, 3, 4; +select * from range_range where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; +select * from range_range where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; +select * from range_range where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; +select * from range_range partition (p_201901) order by 1, 2, 3, 4; +select * from range_range partition (p_201902) order by 1, 2, 3, 4; +select * from range_range where user_no is not null and dept_code <> '2' UNION ALL select * from range_range partition (p_201902) order by 1, 2, 3, 4; +select * from range_range where user_no is not null and dept_code <> '2' UNION ALL select * from range_range partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + +--view +create view view_temp as select * from range_list; +select * from view_temp; +--error +select * from view_temp partition (p_201901); +select * from view_temp partition (p_201902); +drop view view_temp; + +with tmp1 as (select * from range_list ) select * from tmp1 order by 1, 2, 3, 4; +with tmp1 as (select * from range_list partition (p_201901)) select * from tmp1 order by 1, 2, 3, 4; + +--join normal table +select * from range_list left join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_list left join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_list right join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_list right join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_list full join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_list full join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_list inner join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_list inner join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + + +select * from range_hash left join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_hash left join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_hash right join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_hash right join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_hash full join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_hash full join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_hash inner join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_hash inner join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + + +select * from range_range left join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_range left join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_range right join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_range right join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_range full join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_range full join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_range inner join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_range inner join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +--join range_list and range_hash + +select * from range_list left join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_list left join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_list right join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_list right join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_list full join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_list full join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_list inner join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_list inner join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +--join range_hash and range_range + +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +--join range_hash and range_range + +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +drop table list_range_02; +CREATE TABLE IF NOT EXISTS list_range_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY list (col_1) SUBPARTITION BY range (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_1_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_1_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_1_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_2 VALUES(1,2,3,4,5,6,7,8,9,10 ), + PARTITION p_list_3 VALUES(11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_range_3_1 VALUES LESS THAN( 15 ), + SUBPARTITION p_range_3_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_4 VALUES(21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_range_4_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_4_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_4_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_4_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_4_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_5 VALUES(31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_range_5_1 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_6 VALUES(41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_range_6_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_6_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_6_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_6_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_6_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_7 VALUES(default) +) ENABLE ROW MOVEMENT; +create index index_01 on list_range_02(col_2) local ; + +INSERT INTO list_range_02 VALUES (GENERATE_SERIES(0, 19),GENERATE_SERIES(0, 1000),GENERATE_SERIES(0, 99)); + explain (costs off, verbose on) select * from list_range_02 where col_2 >500 and col_2 <8000 order by col_1; + +drop index index_01; +drop table list_range_02; + +create table pjade(jid int,jn int,name varchar2) WITH (SEGMENT=ON) partition by range(jid) subpartition by range(jn) +( + partition hrp1 values less than(16)( + subpartition hrp1_1 values less than(16), + subpartition hrp1_2 values less than(maxvalue)), + partition hrp2 values less than(maxvalue)( + subpartition hrp3_1 values less than(16), + subpartition hrp3_3 values less than(maxvalue)) +); + +create table cjade(jid int,jn int,name varchar2) WITH (SEGMENT=ON); +insert into pjade values(6,8,'tom'),(8,18,'jerry'),(16,8,'jade'),(18,20,'jack'); +insert into cjade values(6,8,'tom'),(8,18,'jerry'),(16,8,'jade'),(18,20,'jack'); +select * from pjade subpartition(hrp1_1) union select * from cjade order by 1,2,3; +select * from pjade subpartition(hrp1_1) p union select * from cjade order by 1,2,3; +select * from pjade subpartition(hrp1_1) union select * from cjade order by 1,2,3; +select * from pjade subpartition(hrp1_1) p union select * from cjade order by 1,2,3; +drop table pjade; +drop table cjade; + +DROP SCHEMA segment_subpartition_select CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/segment_subpartition_split.sql b/src/test/regress/sql/segment_subpartition_split.sql new file mode 100644 index 000000000..9959c7f12 --- /dev/null +++ b/src/test/regress/sql/segment_subpartition_split.sql @@ -0,0 +1,268 @@ +--prepare +DROP SCHEMA segment_subpartition_split CASCADE; +CREATE SCHEMA segment_subpartition_split; +SET CURRENT_SCHEMA TO segment_subpartition_split; + +--split subpartition +-- list subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '3', '1', 1); +select * from list_list order by 1,2,3,4; + +select * from list_list subpartition (p_201901_a) order by 1,2,3,4; +select * from list_list subpartition (p_201901_b) order by 1,2,3,4; +alter table list_list split subpartition p_201901_b values (2) into +( + subpartition p_201901_b, + subpartition p_201901_c +); +select * from list_list subpartition (p_201901_a) order by 1,2,3,4; +select * from list_list subpartition (p_201901_b) order by 1,2,3,4; +select * from list_list subpartition (p_201901_c) order by 1,2,3,4; + +select * from list_list partition (p_201901); + +select * from list_list subpartition (p_201902_a) order by 1,2,3,4; +select * from list_list subpartition (p_201902_b) order by 1,2,3,4; +alter table list_list split subpartition p_201902_b values (2, 3) into +( + subpartition p_201902_b, + subpartition p_201902_c +); +select * from list_list subpartition (p_201902_a) order by 1,2,3,4; +select * from list_list subpartition (p_201902_b) order by 1,2,3,4; +select * from list_list subpartition (p_201902_c) order by 1,2,3,4; + +--error +alter table list_list split subpartition p_201902_a values (3) into +( + subpartition p_201902_ab, + subpartition p_201902_ac +); + +drop table list_list; + +-- range subpartition +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '3', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '5', '1', 1); +select * from range_range order by 1,2,3,4; + +select * from range_range subpartition (p_201901_a) order by 1,2,3,4; +select * from range_range subpartition (p_201901_b) order by 1,2,3,4; +alter table range_range split subpartition p_201901_b at (3) into +( + subpartition p_201901_c, + subpartition p_201901_d +); +select * from range_range subpartition (p_201901_a) order by 1,2,3,4; +select * from range_range subpartition (p_201901_b) order by 1,2,3,4; +select * from range_range subpartition (p_201901_c) order by 1,2,3,4; +select * from range_range subpartition (p_201901_d) order by 1,2,3,4; + +select * from range_range subpartition (p_201902_a) order by 1,2,3,4; +select * from range_range subpartition (p_201902_b) order by 1,2,3,4; +alter table range_range split subpartition p_201902_b at (3) into +( + subpartition p_201902_c, + subpartition p_201902_d +); +select * from range_range subpartition (p_201902_a) order by 1,2,3,4; +select * from range_range subpartition (p_201902_b) order by 1,2,3,4; +select * from range_range subpartition (p_201902_c) order by 1,2,3,4; +select * from range_range subpartition (p_201902_d) order by 1,2,3,4; + +drop table range_range; +--test syntax +CREATE TABLE IF NOT EXISTS list_hash +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY list (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10 ) + ( + SUBPARTITION p_hash_2_1 , + SUBPARTITION p_hash_2_2 , + SUBPARTITION p_hash_2_3 , + SUBPARTITION p_hash_2_4 , + SUBPARTITION p_hash_2_5 + ), + PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20), + PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30 ) + ( + SUBPARTITION p_hash_4_1 + ), + PARTITION p_list_5 VALUES (default) + ( + SUBPARTITION p_hash_5_1 + ), + PARTITION p_list_6 VALUES (31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_hash_6_1 , + SUBPARTITION p_hash_6_2 , + SUBPARTITION p_hash_6_3 + ) +) ENABLE ROW MOVEMENT ; + +alter table list_hash split subPARTITION p_hash_2_3 at(-10) into ( subPARTITION add_p_01 , subPARTITION add_p_02 ); + +drop table list_hash; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +alter table range_range split subpartition p_201901_b values (3) into +( + subpartition p_201901_c, + subpartition p_201901_d +) update global index; +drop table range_range; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +alter table list_list split subpartition p_201901_b at (2, 3) into +( + subpartition p_201901_b, + subpartition p_201901_c +); +drop table list_list; + +CREATE TABLE IF NOT EXISTS list_list_02 +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) WITH (SEGMENT=ON) +PARTITION BY list (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( 0,-1,-2,-3,-4,-5,-6,-7,-8,-9 ), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_list_2 VALUES(0,1,2,3,4,5,6,7,8,9) + ( + SUBPARTITION p_list_2_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_2_2 VALUES ( default ), + SUBPARTITION p_list_2_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_2_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_2_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_3 VALUES(10,11,12,13,14,15,16,17,18,19) + ( + SUBPARTITION p_list_3_2 VALUES ( default ) + ), + PARTITION p_list_4 VALUES(default ), + PARTITION p_list_5 VALUES(20,21,22,23,24,25,26,27,28,29) + ( + SUBPARTITION p_list_5_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_5_2 VALUES ( default ), + SUBPARTITION p_list_5_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_5_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_5_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_6 VALUES(30,31,32,33,34,35,36,37,38,39), + PARTITION p_list_7 VALUES(40,41,42,43,44,45,46,47,48,49) + ( + SUBPARTITION p_list_7_1 VALUES ( default ) + ) +) ENABLE ROW MOVEMENT; + +alter table list_list_02 split PARTITION for (5) at (8) into ( PARTITION add_p_01 , PARTITION add_p_02 ); +drop table list_list_02; +--clean +DROP SCHEMA segment_subpartition_split CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/segment_subpartition_truncate.sql b/src/test/regress/sql/segment_subpartition_truncate.sql new file mode 100644 index 000000000..03ccbb808 --- /dev/null +++ b/src/test/regress/sql/segment_subpartition_truncate.sql @@ -0,0 +1,71 @@ +--prepare +DROP SCHEMA segment_subpartition_truncate CASCADE; +CREATE SCHEMA segment_subpartition_truncate; +SET CURRENT_SCHEMA TO segment_subpartition_truncate; + +--truncate partition/subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list; + +select * from list_list partition (p_201901); +alter table list_list truncate partition p_201901; +select * from list_list partition (p_201901); + +select * from list_list partition (p_201902); +alter table list_list truncate partition p_201902; +select * from list_list partition (p_201902); +select * from list_list; + +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); + +select * from list_list subpartition (p_201901_a); +alter table list_list truncate subpartition p_201901_a; +select * from list_list subpartition (p_201901_a); + +select * from list_list subpartition (p_201901_b); +alter table list_list truncate subpartition p_201901_b; +select * from list_list subpartition (p_201901_b); + +select * from list_list subpartition (p_201902_a); +alter table list_list truncate subpartition p_201902_a; +select * from list_list subpartition (p_201902_a); + +select * from list_list subpartition (p_201902_b); +alter table list_list truncate subpartition p_201902_b; +select * from list_list subpartition (p_201902_b); + +select * from list_list; + +drop table list_list; +DROP SCHEMA segment_subpartition_truncate CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/segment_subpartition_update.sql b/src/test/regress/sql/segment_subpartition_update.sql new file mode 100644 index 000000000..2dab577e3 --- /dev/null +++ b/src/test/regress/sql/segment_subpartition_update.sql @@ -0,0 +1,151 @@ +--prepare +DROP SCHEMA segment_subpartition_update CASCADE; +CREATE SCHEMA segment_subpartition_update; +SET CURRENT_SCHEMA TO segment_subpartition_update; + +--update +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +)DISABLE ROW MOVEMENT; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); + +select * from range_list order by 1, 2, 3, 4; + +--error +update range_list set month_code = '201903'; +--error +update range_list set dept_code = '2'; + +update range_list set user_no = '2'; +select * from range_list order by 1, 2, 3, 4; + +-- test for upsert and merge into, both should report error +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt=1; + +CREATE TABLE temp_table +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON); +insert into temp_table values('201802', '1', '1', 1), ('201901', '2', '1', 1), ('201702', '1', '1', 1); +MERGE INTO range_list t1 +USING temp_table t2 +ON (t1.dept_code = t2.dept_code) +WHEN MATCHED THEN + UPDATE SET t1.month_code = t2.month_code WHERE t1.dept_code > 1 +WHEN NOT MATCHED THEN + INSERT VALUES (t2.month_code, t2.dept_code, t2.user_no, t2.sales_amt) WHERE t2.sales_amt = 1; + +drop table temp_table; +drop table range_list; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (SEGMENT=ON) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +)ENABLE ROW MOVEMENT; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); + +select * from range_list order by 1, 2, 3, 4; + +select * from range_list subpartition (p_201901_a) order by 1, 2, 3, 4; +select * from range_list subpartition (p_201901_b) order by 1, 2, 3, 4; +update range_list set dept_code = '2' where month_code = '201902'; +select * from range_list subpartition (p_201901_a) order by 1, 2, 3, 4; +select * from range_list subpartition (p_201901_b) order by 1, 2, 3, 4; + +select * from range_list partition (p_201901) order by 1, 2, 3, 4; +select * from range_list partition (p_201902) order by 1, 2, 3, 4; +update range_list set month_code = '201903' where month_code = '201902'; +select * from range_list partition (p_201901) order by 1, 2, 3, 4; +select * from range_list partition (p_201902) order by 1, 2, 3, 4; + +drop table range_list; + +-- FOREIGN KEY +drop table tb_02; +CREATE TABLE tb_02 +( + col_1 int PRIMARY KEY, + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (SEGMENT=ON); + +drop table range_range_02 cascade; +CREATE TABLE range_range_02 +( + col_1 int , + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int , +FOREIGN KEY(col_1) REFERENCES tb_02(col_1) +) WITH (SEGMENT=ON) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 80 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( MAXVALUE ) + ) +); + +insert into tb_02 values(0,0,0,0); +insert into range_range_02 values(0,0,0,0); + +update tb_02 set col_1=8 where col_2=0; +drop table range_range_02 cascade; +drop table tb_02; +DROP SCHEMA segment_subpartition_update CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/select_where_func.sql b/src/test/regress/sql/select_where_func.sql new file mode 100644 index 000000000..15f29df63 --- /dev/null +++ b/src/test/regress/sql/select_where_func.sql @@ -0,0 +1,215 @@ +-- check compatibility -- +show sql_compatibility; -- expect A -- +drop schema if exists sch2; +create schema sch2; +set current_schema = sch2; + +create table tbl_1 (a int, b int); +insert into tbl_1 values (1,1); +insert into tbl_1 values (2,2); +insert into tbl_1 values (3,3); +insert into tbl_1 values (4,4); +insert into tbl_1 values (5,5); + +create or replace function func_1() returns integer AS $Summary$ +declare +begin +return 1; +end; +$Summary$ language plpgsql; + +create or replace function func_1(a int) returns integer AS $Summary$ +declare +begin +return a; +end; +$Summary$ language plpgsql; + +drop schema if exists sch1; +create schema sch1; +create or replace function sch1.func_1() returns integer AS $Summary$ +declare +begin +return 4; +end; +$Summary$ language plpgsql; + +-- package +create or replace package aa +is +function func_2() return integer; +end aa; +/ + +create or replace package body aa +is +function func_2 return integer +is +begin + return 3; +end; +end aa; +/ + +select aa.func_2(); + +create or replace package sch1.aa +is +function func_2() return integer; +end aa; +/ + +create or replace package body sch1.aa +is +function func_2 return integer +is +begin + return 5; +end; +end aa; +/ + +select sch1.aa.func_2(); + +select * from tbl_1 where a = func_1(); +select * from tbl_1 where a = func_1(2); +select * from tbl_1 where a = func_1; +-- pkg.fun +select * from tbl_1 where a = aa.func_2(); +select * from tbl_1 where a = aa.func_2; +-- schma.fun +select * from tbl_1 where a = sch1.func_1(); +select * from tbl_1 where a = sch1.func_1; +-- pkg.schma.fun +select * from tbl_1 where a = sch1.aa.func_2(); +select * from tbl_1 where a = sch1.aa.func_2; + +create or replace package a2 +as +v integer := 1; +function func_11() return integer; +end a2; +/ + +create or replace package body a2 +is +function func_11 return integer +is + b integer; +begin + return 1; +end; +end a2; +/ + +create or replace package a3 +is +function func_111() return integer; +end a3; +/ + +create or replace package body a3 +is +function func_111 return integer +is + cur sys_refcursor; + temp integer; +begin + open cur for + select a from tbl_1 t where (t.a = a2.v or t.b = 3); + fetch cur into temp; + RAISE INFO '%' , temp; + fetch cur into temp; + RAISE INFO '%' , temp; + return 3; +end; +end a3; +/ + +select a3.func_111(); + +create table t1(c1 int); +insert into t1 values(1),(2),(3),(4),(5),(6); + +create or replace package call_test as +function func1() return int; +function func2(c1 out int) return int; +function func3(c1 int default 3) return int; +procedure proc1(); +procedure proc2(c1 out int); +procedure proc3(c1 int default 3, c2 out int); +end call_test; +/ +create or replace package body call_test as +function func1() return int as +begin + return 1; +end; +function func2(c1 out int) return int as +begin + return 2; +end; +function func3(c1 int default 3) return int as +begin + return 3; +end; +procedure proc1() as +begin + raise info 'proc1'; +end; +procedure proc2(c1 out int) as +begin + c1 := 5; +end; +procedure proc3(c1 int default 3, c2 out int) as +begin + c2 := 6; +end; +end call_test; +/ + +begin +call_test.proc1; +end; +/ + +select * from t1 where c1 = call_test.func1; +select * from t1 where c1 = call_test.func2; +select * from t1 where c1 = call_test.func3; +select * from t1 where c1 = call_test.proc1; +select * from t1 where c1 = call_test.proc2; +select * from t1 where c1 = call_test.proc3; + +declare +var int; +begin +call_test.func2; +end; +/ + +declare +var int; +begin +select c1 into var from t1 where c1 = call_test.func1; +raise info 'var is %',var; +select c1 into var from t1 where c1 = call_test.func2; +raise info 'var is %',var; +select c1 into var from t1 where c1 = call_test.func3; +raise info 'var is %',var; +select c1 into var from t1 where c1 = call_test.proc2; +raise info 'var is %',var; +select c1 into var from t1 where c1 = call_test.proc3; +raise info 'var is %',var; +--call call_test.proc1; +end; +/ + +drop package call_test; +drop package sch1.aa; +drop package aa; +drop package a2; +drop package a3; +drop table tbl_1; +drop function func_1; +drop schema if exists sch1 cascade; +drop schema if exists sch2 cascade; diff --git a/src/test/regress/sql/single_node_function_commit_rollback.sql b/src/test/regress/sql/single_node_function_commit_rollback.sql new file mode 100644 index 000000000..40a2d442d --- /dev/null +++ b/src/test/regress/sql/single_node_function_commit_rollback.sql @@ -0,0 +1,999 @@ +create or replace function test_without_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 2; +end; +/ + +call test_without_commit(); +select * from test_commit; + +create or replace function test_empty_sp() return void +as +begin + insert into test_commit select 1; + insert into test_commit select 2; + insert into test_commit select 3; +end; +/ + +call test_empty_sp(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + commit; +end; +/ + +call test_commit(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_option() return void +shippable +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + commit; +end; +/ + +call test_commit_insert_option(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_delete() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + commit; +end; +/ + +call test_commit_insert_delete(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_update() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + update test_commit set b = 3 where a = 1; + commit; +end; +/ + +call test_commit_insert_update(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_update_delete() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + update test_commit set b = 3 where a = 1; + delete from test_commit where a = 1; + commit; +end; +/ + +call test_commit_insert_update_delete(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_delete_update() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + update test_commit set b = 3 where a = 2; + commit; +end; +/ + +call test_commit_insert_delete_update(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + commit; + insert into test_commit select 2, 2; + commit; +end; +/ + +call test_commit_commit(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_commit1() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + commit; + update test_commit set b = 3 where a = 2; + delete from test_commit where a = 1; + commit; +end; +/ + +call test_commit_commit1(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_rollback() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + commit; + insert into test_commit select 2, 2; + rollback; +end; +/ + +call test_commit_rollback(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_rollback1() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + commit; + update test_commit set b = 3 where a = 2; + delete from test_commit where a = 1; + rollback; +end; +/ + +call test_commit_rollback1(); +select * from test_commit; +drop table test_commit; + +create or replace function test_rollback_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + rollback; + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 3, 3; + insert into test_commit select 4, 4; + insert into test_commit select 5, 5; + update test_commit set b = 6 where a = 5; + delete from test_commit where a = 3; + commit; +end; +/ + +call test_rollback_commit(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_exception_rollback() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + commit; + raise exception 'raise exception after commit'; +exception + when others then + insert into test_commit select 2, 2; + rollback; +end; +/ + +call test_commit_insert_exception_rollback(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_exception_commit_rollback() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + commit; + raise exception 'raise exception after commit'; +exception + when others then + insert into test_commit select 2, 2; + commit; + rollback; +end; +/ + +call test_commit_insert_exception_commit_rollback(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_raise_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + commit; + RAISE EXCEPTION 'After commit'; +end; +/ + +call test_commit_insert_raise_commit(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_delete_raise_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + commit; + RAISE EXCEPTION 'After commit'; +end; +/ + +call test_commit_insert_delete_raise_commit(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_update_raise_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + update test_commit set b = 3 where a = 1; + commit; + RAISE EXCEPTION 'After commit'; +end; +/ + +call test_commit_insert_update_raise_commit(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_update_delete_raise_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + update test_commit set b = 3 where a = 1; + delete from test_commit where a = 1; + commit; + RAISE EXCEPTION 'After commit'; +end; +/ + +call test_commit_insert_update_delete_raise_commit(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_delete_update_raise_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + update test_commit set b = 3 where a = 2; + commit; + RAISE EXCEPTION 'After commit'; +end; +/ + +call test_commit_insert_delete_update_raise_commit(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_commit_raise() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + RAISE EXCEPTION 'Before commit'; + commit; +end; +/ + +call test_commit_insert_commit_raise(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_delete_commit_raise() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + RAISE EXCEPTION 'Before commit'; + commit; +end; +/ + +call test_commit_insert_delete_commit_raise(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_update_commit_raise() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + update test_commit set b = 3 where a = 1; + RAISE EXCEPTION 'Before commit'; + commit; +end; +/ + +call test_commit_insert_update_commit_raise(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_update_delete_commit_raise() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + update test_commit set b = 3 where a = 1; + delete from test_commit where a = 1; + RAISE EXCEPTION 'Before commit'; + commit; +end; +/ + +call test_commit_insert_update_delete_commit_raise(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_insert_delete_update_commit_raise() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + update test_commit set b = 3 where a = 2; + RAISE EXCEPTION 'Before commit'; + commit; +end; +/ + +call test_commit_insert_delete_update_commit_raise(); +select * from test_commit; +drop table test_commit; + +create or replace function test_exception_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + update test_commit set b = 3 where a = 2; + commit; +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ + +call test_exception_commit(); +select * from test_commit; +drop table test_commit; + +create or replace function test_exception_commit_commit_raise() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + update test_commit set b = 3 where a = 2; + commit; + RAISE EXCEPTION 'After commit'; +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ + +call test_exception_commit_commit_raise(); +select * from test_commit; +drop table test_commit; + +create or replace function test_exception_commit_raise_commit() return void +as +begin +drop table if exists test_commit; +create table test_commit(a int, b int); +insert into test_commit select 1, 1; +insert into test_commit select 2, 2; +delete from test_commit where a = 1; +update test_commit set b = 3 where a = 2; +RAISE EXCEPTION 'After commit'; +commit; +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ + +call test_exception_commit_raise_commit(); +select * from test_commit; +drop table test_commit; + +create or replace function test_gg_1() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int, b int); + insert into test_commit select 1, 1; + insert into test_commit select 2, 2; + delete from test_commit where a = 1; + update test_commit set b = 3 where a = 2; + commit; + insert into test_commit select 3, 3; + RAISE EXCEPTION 'After commit'; +EXCEPTION + when raise_exception then + rollback; + insert into test_commit select 4, 4; + commit; +end; +/ + +call test_gg_1(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_exception() return void +is +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 1; + commit; + delete from test_commit; + commit; + update test_commit set a=3; + commit; +exception + WHEN OTHERS THEN + insert into test_commit select 2; + commit; +end; +/ + +call test_commit_exception(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit2() return void +is +begin + drop table if exists test_commit; + create table test_commit(a int); + FOR i IN REVERSE 3..0 LOOP + insert into test_commit select i; + commit; + END LOOP; + FOR i IN REVERSE 2..4 LOOP + update test_commit set a=i; + commit; + END LOOP; +exception +WHEN OTHERS THEN +-- FOR i IN REVERSE 200...101 LOOP + insert into test_commit select 4; +-- END LOOP; + commit; +end; +/ + +call test_commit2(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit3() return void +is +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 1; + commit; + call test_commit2(); + update test_commit set a=2; + commit; +exception +WHEN OTHERS THEN + insert into test_commit select 3; + commit; +end; +/ + +call test_commit3(); +select * from test_commit; +drop table test_commit; + +create or replace function test_rollback_with_exception() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 1; + rollback; +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ + +call test_rollback_with_exception(); +select * from test_commit; +drop table test_commit; + +create or replace function test_nest_function_without_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 3; + commit; + test_without_commit(); +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ + +call test_nest_function_without_commit(); +select * from test_commit; +drop table test_commit; + +create or replace function test_nest_function() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 3; + commit; + test_commit(); +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ + +call test_nest_function(); +select * from test_commit; +drop table test_commit; + +create or replace function test_nest_function1() return void +as +begin + test_commit(); +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ + +call test_nest_function1(); +select * from test_commit; +drop table test_commit; + +create or replace function test_nest_function2() return void +as +begin + test_commit(); +end; +/ + +call test_nest_function2(); +select * from test_commit; +drop table test_commit; + +create or replace function test_nest_function_rollback() return void +as +begin + test_without_commit(); + rollback; +end; +/ + +call test_nest_function_rollback(); +select * from test_commit; +drop table test_commit; + +create or replace function test_nest_function_select() return void +as +begin + insert into tx select 3; + commit; + select test_commit(); +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ + +call test_nest_function_select(); +select * from test_commit; +drop table test_commit; + +create or replace function test_nest_function_calll() return void +as +begin + insert into tx select 3; + commit; + call test_commit(); +EXCEPTION + when raise_exception then + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ + +call test_nest_function_calll(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_exception_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 1; + raise exception 'Exception rollback'; + insert into test_commit select 2; +EXCEPTION + when raise_exception then + insert into test_commit select 3; + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ + +call test_commit_exception_commit(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_exception_commit_commit() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 1; + raise exception 'Exception rollback'; + insert into test_commit select 2; +EXCEPTION + when raise_exception then + insert into test_commit select 3; + commit; + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ + +call test_commit_exception_commit_commit(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_exception_commit_rollback() return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 1; + raise exception 'Exception rollback'; + insert into test_commit select 2; +EXCEPTION + when raise_exception then + insert into test_commit select 3; + rollback; + RAISE EXCEPTION '(%)', SQLERRM; +end; +/ + +call test_commit_exception_commit_rollback(); +select * from test_commit; +drop table test_commit; + +create or replace function test_rollback return void +as +begin + drop table if exists test_commit; + create table test_commit(a int); + insert into test_commit select 1; + rollback; + insert into test_commit select 2; +end; +/ + +call test_rollback(); +select * from test_commit; +drop table test_commit; + +create or replace function test_commit_inout(p inout int) return void +as +declare +begin + p = 3; + commit; + --DBE_OUTPUT.print_line('Cursor status:' + p); +end; +/ + +select test_commit_inout(1); + +create or replace function test_rollback_inout(p inout int) return void +as +declare +begin + p = 3; + rollback; + --DBE_OUTPUT.print_line('Cursor status:' + p); +end; +/ + +select test_rollback_inout(1); + +create or replace function test_rollback_out(p out int) return void +as +declare +begin + p = 3; + rollback; + --DBE_OUTPUT.print_line('Cursor status:' + p); +end; +/ + +select test_rollback_out(); + +create or replace function test_rollback1() return void +as +declare +begin + create table test1(col1 int); + insert into test1 values(1); + rollback; +end; +/ + +call test_rollback1(); + +create type func_type_04 as ( v_tablefield character varying, v_tablefield2 character varying, v_tablename character varying, v_cur refcursor); +create table test_cursor_table(c1 int,c2 varchar); +insert into test_cursor_table values(1,'Jack'),(2,'Rose'); + +CREATE or replace function func_base13_03(v_tablefield character varying, v_tablefield2 character varying,v_tablename character varying) return refcursor +AS +v_cur refcursor; +begin + open v_cur for + 'select '||v_tablefield||' as tablecode, '||v_tablefield2||' as tablename from '||v_tablename|| ' order by 1,2;'; + return v_cur; +end; +/ + +CREATE or replace function func_base13_04(v_tablefield character varying, v_tablefield2 character varying, v_tablename character varying) return void +AS + v_record func_type_04; + v_cur refcursor; + num int; +begin + num := 0; + v_cur := func_base13_03(v_tablefield, v_tablefield2, v_tablename); + loop + fetch v_cur into v_record; + num := num+1; + raise notice 'the num is %(%)', num,v_record; + EXIT WHEN v_cur%notfound; + end loop; +end; +/ + +call func_base13_04('c1','c2','test_cursor_table'); + +CREATE or replace function func_base13_05(v_tablefield character varying, v_tablefield2 character varying,v_tablename character varying) return refcursor +AS +v_cur refcursor; +begin + open v_cur for + 'select '||v_tablefield||' as tablecode, '||v_tablefield2||' as tablename from '||v_tablename|| ' order by 1,2;'; + commit; + return v_cur; +end; +/ + +CREATE or replace function func_base13_06(v_tablefield character varying, v_tablefield2 character varying, v_tablename character varying) return void +AS + v_record func_type_04; + v_cur refcursor; +begin + select func_base13_05(v_tablefield, v_tablefield2, v_tablename) into v_cur; + loop + fetch v_cur into v_record; + raise notice '(%)', v_record; + EXIT WHEN v_cur%notfound; + end loop; +end; +/ + +call func_base13_06('c1','c2','test_cursor_table'); + +CREATE or replace function func_base13_07(v_tablefield character varying, v_tablefield2 character varying,v_tablename character varying) return refcursor +AS +v_cur refcursor; +begin + open v_cur for + 'select '||v_tablefield||' as tablecode, '||v_tablefield2||' as tablename from '||v_tablename|| ' order by 1,2;'; + commit; + return v_cur; +end; +/ + +CREATE or replace function func_base13_08(v_tablefield character varying, v_tablefield2 character varying, v_tablename character varying) return void +AS + v_record func_type_04; + v_cur refcursor; +begin + select func_base13_07(v_tablefield, v_tablefield2, v_tablename) into v_cur; + + loop + fetch v_cur into v_record; + raise notice 'before commit(%)', v_record; + commit; + raise notice 'after commit(%)', v_record; + EXIT WHEN v_cur%notfound; + end loop; + return; +end; +/ + +call func_base13_08('c1','c2','test_cursor_table'); +select * from test_cursor_table; +drop table if exists test_cursor_table; + +CREATE TABLE EXAMPLE1(COL1 INT); + +CREATE OR REPLACE FUNCTION FUNCTION_EXAMPLE1 RETURN INT +AS +BEGIN + FOR i IN 0..20 LOOP + INSERT INTO EXAMPLE1 VALUES(i); + IF mod(i,2) = 0 THEN + COMMIT; + ELSE + ROLLBACK; + END IF; + END LOOP; + RETURN 1; +END; +/ + +select FUNCTION_EXAMPLE1(); +select * from FUNCTION_EXAMPLE1() where 1=1; +update EXAMPLE1 set COL1=666 where COL1=2 and FUNCTION_EXAMPLE1(); +select (select FUNCTION_EXAMPLE1()); +select (select * from FUNCTION_EXAMPLE1() where 1=1); + +create or replace function func1() return void +as +declare +a int; +begin +a := 1/0; +exception + WHEN division_by_zero THEN + raise notice '% % %',sqlstate,SQLCODE,sqlerrm; +end; +/ +call func1(); +drop function func1; +drop table if exists EXAMPLE1; +drop function FUNCTION_EXAMPLE1; +drop function test_without_commit; +drop function test_empty_sp; +drop function test_commit; +drop function test_commit_insert_option; +drop function test_commit_insert_delete; +drop function test_commit_insert_update; +drop function test_commit_insert_update_delete; +drop function test_commit_insert_delete_update; +drop function test_commit_commit; +drop function test_commit_commit1; +drop function test_commit_rollback; +drop function test_commit_rollback1; +drop function test_rollback_commit; +drop function test_commit_insert_exception_rollback; +drop function test_commit_insert_exception_commit_rollback; +drop function test_commit_insert_raise_commit; +drop function test_commit_insert_delete_raise_commit; +drop function test_commit_insert_update_raise_commit; +drop function test_commit_insert_update_delete_raise_commit; +drop function test_commit_insert_delete_update_raise_commit; +drop function test_commit_insert_commit_raise; +drop function test_commit_insert_delete_commit_raise; +drop function test_commit_insert_update_commit_raise; +drop function test_commit_insert_update_delete_commit_raise; +drop function test_commit_insert_delete_update_commit_raise; +drop function test_exception_commit; +drop function test_exception_commit_commit_raise; +drop function test_exception_commit_raise_commit; +drop function test_gg_1; +drop function test_commit_exception; +drop function test_commit2; +drop function test_commit3; +drop function test_rollback_with_exception; +drop function test_nest_function_without_commit; +drop function test_nest_function; +drop function test_nest_function1; +drop function test_nest_function2; +drop function test_nest_function_rollback; +drop function test_nest_function_select; +drop function test_nest_function_calll; +drop function test_commit_exception_commit; +drop function test_commit_exception_commit_commit; +drop function test_commit_exception_commit_rollback; +drop function test_rollback; +drop function test_commit_inout; +drop function test_rollback_inout; +drop function test_rollback_out; +drop function test_rollback1; +drop function func_base13_03; +drop function func_base13_04; +drop function func_base13_05; +drop function func_base13_06; +drop function func_base13_07; +drop function func_base13_08; + diff --git a/src/test/regress/sql/single_node_insert.sql b/src/test/regress/sql/single_node_insert.sql index 2f8c4fe61..e6ffdfefb 100644 --- a/src/test/regress/sql/single_node_insert.sql +++ b/src/test/regress/sql/single_node_insert.sql @@ -37,14 +37,14 @@ select col1, col2, char_length(col3) from inserttest; drop table inserttest; -create table s1_DTS2019021501700(id int, num int); -create sequence ss1_DTS2019021501700; -select setval('ss1_DTS2019021501700', 10); -select * from ss1_DTS2019021501700; -alter table s1_DTS2019021501700 alter column id set default nextval('ss1_DTS2019021501700'); -insert into s1_DTS2019021501700 (num) values (11); -insert into s1_DTS2019021501700 (num) values (12); -select * from s1_DTS2019021501700 order by id; -select * from ss1_DTS2019021501700; -drop table s1_DTS2019021501700; -drop sequence ss1_DTS2019021501700; \ No newline at end of file +create table s1_TESTTABLE(id int, num int); +create sequence ss1_TESTTABLE; +select setval('ss1_TESTTABLE', 10); +select * from ss1_TESTTABLE; +alter table s1_TESTTABLE alter column id set default nextval('ss1_TESTTABLE'); +insert into s1_TESTTABLE (num) values (11); +insert into s1_TESTTABLE (num) values (12); +select * from s1_TESTTABLE order by id; +select * from ss1_TESTTABLE; +drop table s1_TESTTABLE; +drop sequence ss1_TESTTABLE; \ No newline at end of file diff --git a/src/test/regress/sql/single_node_mergeinto.sql b/src/test/regress/sql/single_node_mergeinto.sql index 8235d28ef..a1385a749 100644 --- a/src/test/regress/sql/single_node_mergeinto.sql +++ b/src/test/regress/sql/single_node_mergeinto.sql @@ -246,6 +246,71 @@ delete from dtt where a = 5; -- now dtt: (1,1),(2,2) fkt:(1,1) merge into fkt using dtt on (dtt.a=fkt.a) when matched then update set fkt.b = 3 when not matched then insert values(dtt.a, dtt.b); select * from fkt; +-- test for merge with where clauses +create table explain_t1 (a int, b int); +create table explain_t2 (f1 int, f2 int); +explain (verbose on, costs off) merge into explain_t1 + using explain_t2 tt2 on explain_t1.a = tt2.f1 +when not matched then + insert values(1,3) where tt2.f1 = 1; + +explain (verbose on, costs off) merge into explain_t1 + using explain_t2 tt2 on explain_t1.a = tt2.f1 +when matched then + update set b = 10 where explain_t1.a = 1; + +explain (verbose on, costs off) merge into explain_t1 + using explain_t2 tt2 on explain_t1.a = tt2.f1 +when matched then + update set b = 10 where explain_t1.a = 1 +when not matched then + insert values(1,3) where tt2.f1 = 1; + +explain (verbose on, costs off) merge into explain_t1 + using explain_t2 tt2 on explain_t1.a = tt2.f1 +when matched then + update set b = 10 where tt2.f2 = 1; + +-- duplicate alias on source table +explain (verbose on, costs off) merge into explain_t2 t2 using ( + select + t1.a, + t1.b, + t1.a aa, + t1.b bb + from + explain_t1 t1 +) tmp on (t2.f1 = tmp.b) +when matched THEN + update + set + t2.f2 = tmp.aa + where + t2.f1 = tmp.bb; + +explain (verbose on, costs off) merge /*+ leading((t2 t1)) */ into explain_t2 t2 using ( + select + t1.a, + t1.b, + t1.a aa, + t1.b bb + from + explain_t1 t1 +) tmp on (t2.f1 = tmp.b) +when not matched THEN + insert values(1,3) where tmp.bb = 1; + +explain (verbose on, costs off) merge /*+ leading((t1 t2)) */ into explain_t2 t2 using ( + select + t1.a, + t1.b, + t1.a aa, + t1.b bb + from + explain_t1 t1 +) tmp on (t2.f1 = tmp.b) +when not matched THEN + insert values(1,3) where tmp.bb = 1; ------------------------------------------------ -- clean up diff --git a/src/test/regress/sql/single_node_path.sql b/src/test/regress/sql/single_node_path.sql index 7e69b539a..45bbee229 100644 --- a/src/test/regress/sql/single_node_path.sql +++ b/src/test/regress/sql/single_node_path.sql @@ -36,3 +36,27 @@ SELECT '' AS count, f1 AS closed_path FROM PATH_TBL WHERE isclosed(f1); SELECT '' AS count, pclose(f1) AS closed_path FROM PATH_TBL; SELECT '' AS count, popen(f1) AS open_path FROM PATH_TBL; + +-- test type coercion for index match +set enable_seqscan = off; +create table test2(column1 float8 not null, column2 char not null collate "C", column3 char(100) not null collate "C", column4 int); +create table test3(like test2 including all); +create index on test2(column1); +create index on test2(column2); +create index on test2(column3); +create index on test2(column4); +explain (costs off) update test2 set column4 = 0 from test3 where test2.column1 > test3.column2 and test2.column2 like test3.column2 and test3.column3 < test3.column3; +explain (costs off) select * from test2, test3 where test2.column1 > test3.column1 and test2.column2 like test3.column2; +explain (costs off) select /*+ nestloop(test2 test3) */* from test2, test3 where test2.column2 = test3.column2::varchar; + +/* cannot use index for bpchar <-> text */ +explain (costs off) merge into test2 using (select '1' AS c1, '5278' as c2) V ON (test2.column3 = V.c2) +WHEN NOT MATCHED THEN INSERT (column1, column2, column3, column4) VALUES (V.c1,1,V.c2,1); + +/* index with type coercion is acceptable */ +create index on test2(text(column3)); +explain (costs off) merge into test2 using (select '1' AS c1, '5278' as c2) V ON (test2.column3 = V.c2) +WHEN NOT MATCHED THEN INSERT (column1, column2, column3, column4) VALUES (V.c1,1,V.c2,1); + +drop table test2; +drop table test3; diff --git a/src/test/regress/sql/single_node_union.sql b/src/test/regress/sql/single_node_union.sql index 99a483c7f..3878e9c33 100644 --- a/src/test/regress/sql/single_node_union.sql +++ b/src/test/regress/sql/single_node_union.sql @@ -109,6 +109,27 @@ SELECT q1 FROM int8_tbl EXCEPT ALL SELECT q2 FROM int8_tbl; SELECT q1 FROM int8_tbl EXCEPT ALL SELECT DISTINCT q2 FROM int8_tbl; +-- Test that single node stream plan handles INTERSECT and EXCEPT correctly +set query_dop = 10; + +SELECT q2 FROM int8_tbl INTERSECT SELECT q1 FROM int8_tbl ORDER BY 1; + +SELECT q2 FROM int8_tbl INTERSECT ALL SELECT q1 FROM int8_tbl ORDER BY 1; + +SELECT q2 FROM int8_tbl EXCEPT SELECT q1 FROM int8_tbl ORDER BY 1; + +SELECT q2 FROM int8_tbl EXCEPT ALL SELECT q1 FROM int8_tbl ORDER BY 1; + +SELECT q2 FROM int8_tbl EXCEPT ALL SELECT DISTINCT q1 FROM int8_tbl ORDER BY 1; + +SELECT q1 FROM int8_tbl EXCEPT SELECT q2 FROM int8_tbl; + +SELECT q1 FROM int8_tbl EXCEPT ALL SELECT q2 FROM int8_tbl; + +SELECT q1 FROM int8_tbl EXCEPT ALL SELECT DISTINCT q2 FROM int8_tbl; + +set query_dop = 1; + -- -- Mixed types -- @@ -196,6 +217,27 @@ explain (costs off) SELECT * FROM t2) t WHERE ab = 'ab'; +-- Test that single node stream plan handles UNION correctly +set query_dop = 10; + +explain (costs off) + SELECT * FROM + (SELECT a || b AS ab FROM t1 + UNION ALL + SELECT * FROM t2) t + WHERE ab = 'ab'; + +explain (costs off) + SELECT * FROM + (SELECT a || b AS ab FROM t1 + UNION + SELECT * FROM t2) t + WHERE ab = 'ab'; + +set query_dop = 1; + + + -- -- Test that ORDER BY for UNION ALL can be pushed down to inheritance -- children. @@ -271,6 +313,59 @@ SELECT * FROM SELECT 2 AS t, 4 AS x) ss WHERE x > 3; +-- Test that single node stream plan handles UNION sub-selects correctly +set query_dop = 10; + +explain (costs off) + SELECT * FROM + (SELECT 1 AS t, * FROM tenk1 a + UNION ALL + SELECT 2 AS t, * FROM tenk1 b) c + WHERE t = 2; + +explain (costs off) +SELECT * FROM + (SELECT 1 AS t, 2 AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x < 4; + +SELECT * FROM + (SELECT 1 AS t, 2 AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x < 4; + +explain (costs off) +SELECT * FROM + (SELECT 1 AS t, generate_series(1,10) AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x < 4 +ORDER BY x; + +SELECT * FROM + (SELECT 1 AS t, generate_series(1,10) AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x < 4 +ORDER BY x; + +explain (costs off) +SELECT * FROM + (SELECT 1 AS t, (random()*3)::int AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x > 3; + +SELECT * FROM + (SELECT 1 AS t, (random()*3)::int AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x > 3; + +set query_dop = 1; + -- Test proper handling of parameterized appendrel paths when the -- potential join qual is expensive create function expensivefunc(int) returns int diff --git a/src/test/regress/sql/single_node_varchar.sql b/src/test/regress/sql/single_node_varchar.sql index 58d29ca4b..343967383 100644 --- a/src/test/regress/sql/single_node_varchar.sql +++ b/src/test/regress/sql/single_node_varchar.sql @@ -64,3 +64,19 @@ INSERT INTO VARCHAR_TBL (f1) VALUES ('abcde'); INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd '); SELECT '' AS four, * FROM VARCHAR_TBL; + +create table tab_1(col1 varchar(3)); +create table tab_2(col2 char(3)); +insert into tab_2 values(' '); +insert into tab_1 select col2 from tab_2; +select * from tab_1 where col1 is null; +select * from tab_1 where col1=' '; + +delete from tab_1; +set behavior_compat_options = 'char_coerce_compat'; +insert into tab_1 select col2 from tab_2; +select * from tab_1 where col1 is null; +select * from tab_1 where col1=' '; +set behavior_compat_options = ''; +drop table tab_1; +drop table tab_2; diff --git a/src/test/regress/sql/sqlcode_cursor.sql b/src/test/regress/sql/sqlcode_cursor.sql new file mode 100644 index 000000000..f862b3e61 --- /dev/null +++ b/src/test/regress/sql/sqlcode_cursor.sql @@ -0,0 +1,717 @@ +create schema hw_sqlcode; +set current_schema = hw_sqlcode; +/* ---------anonymous block------------ */ +/* no exception */ +DECLARE + a int; +BEGIN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ + +/* exception */ +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ + +CREATE or replace procedure func1_1 IS + --PRAGMA AUTONOMOUS_TRANSACTION; + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN others THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ + +DECLARE + a int; +BEGIN + func1_1(); + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +/* commit rollback */ +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + COMMIT; + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ + +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + ROLLBACK; + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ + +/* PRAGMA AUTONOMOUS_TRANSACTION; */ +CREATE OR REPLACE FUNCTION func5() RETURN void +AS +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; + a int; +BEGIN + a := 1/0; +END; +/ + +DECLARE + a int; +BEGIN + func5(); +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CREATE OR REPLACE FUNCTION func5_1() RETURN void +AS +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; + a int; +BEGIN + RAISE NOTICE 'AUTONOMOUS_TRANSACTION SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; + func5_1(); +END; +/ +CREATE or replace procedure func5_2 IS + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN others THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +DECLARE + a int; +BEGIN + func5_2(); + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ + +/* CALL function */ +CREATE OR REPLACE FUNCTION func7() RETURN void +AS +DECLARE + a int; +BEGIN + a := 1/0; +END; +/ + +DECLARE + a int; +BEGIN + func7(); +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ + +/* RAISE ERROR */ +DECLARE + a int; +BEGIN + RAISE sqlstate 'AA666'; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ + + +/* ---------function------------ */ +/* no exception */ +CREATE OR REPLACE FUNCTION func1() RETURN void +AS +BEGIN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ + +CALL func1(); + +/* exception */ +CREATE OR REPLACE FUNCTION func2() RETURN void +AS +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL func2(); + +/* commit rollback */ +CREATE OR REPLACE FUNCTION func3() RETURN void +AS +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + COMMIT; + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL func3(); + +CREATE OR REPLACE FUNCTION func4() RETURN void +AS +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + ROLLBACK; + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL func4(); + +/* PRAGMA AUTONOMOUS_TRANSACTION; */ +CREATE OR REPLACE FUNCTION func5() RETURN void +AS +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; + a int; +BEGIN + a := 1/0; +END; +/ + +CREATE OR REPLACE FUNCTION func6() RETURN void +AS +DECLARE + a int; +BEGIN + func5(); +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL func6(); + +/* CALL function */ +CREATE OR REPLACE FUNCTION func7() RETURN void +AS +DECLARE + a int; +BEGIN + a := 1/0; +END; +/ + +CREATE OR REPLACE FUNCTION func8() RETURN void +AS +DECLARE + a int; +BEGIN + func7(); +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL func8(); +/* RAISE ERROR */ +CREATE OR REPLACE FUNCTION func9() RETURN void +AS +DECLARE + a int; +BEGIN + RAISE sqlstate 'AA666'; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL func9(); + + +/* ---------PROCEDURE------------ */ +/* no exception */ +CREATE OR REPLACE PROCEDURE proc1() +AS +BEGIN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ + +CALL proc1(); + +/* exception */ +CREATE OR REPLACE PROCEDURE proc2() +AS +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL proc2(); + +/* commit rollback */ +CREATE OR REPLACE PROCEDURE proc3() +AS +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + COMMIT; + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL proc3(); + +CREATE OR REPLACE PROCEDURE proc4() +AS +DECLARE + a int; +BEGIN + a := 1/0; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + ROLLBACK; + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL proc4(); + +/* PRAGMA AUTONOMOUS_TRANSACTION; */ +CREATE OR REPLACE PROCEDURE proc5() +AS +DECLARE + PRAGMA AUTONOMOUS_TRANSACTION; + a int; +BEGIN + a := 1/0; +END; +/ + +CREATE OR REPLACE PROCEDURE proc6() +AS +DECLARE + a int; +BEGIN + proc5(); +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL proc6(); + +/* CALL function */ +CREATE OR REPLACE PROCEDURE proc7() +AS +DECLARE + a int; +BEGIN + a := 1/0; +END; +/ + +CREATE OR REPLACE PROCEDURE proc8() +AS +DECLARE + a int; +BEGIN + proc7(); +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL proc8(); +/* RAISE ERROR */ +CREATE OR REPLACE PROCEDURE proc9() +AS +DECLARE + a int; +BEGIN + RAISE sqlstate 'AA666'; +EXCEPTION + WHEN DIVISION_BY_ZERO THEN + RAISE NOTICE 'SQLSTATE = %, SQLCODE = %, SQLERRM = %',SQLSTATE,SQLCODE,SQLERRM; +END; +/ +CALL proc9(); + +DROP SCHEMA hw_sqlcode CASCADE; + +create schema hw_cursor_state; +set current_schema = hw_cursor_state; +set behavior_compat_options='COMPAT_CURSOR'; +/* ---------anonymous block------------ */ + +/*create*/ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +create table tb_test(col1 int); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ + +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ + +/* select */ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +select 1 into v_count; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ + +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ + +/* insert */ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +insert into tb_test select 1; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ + +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ + +/* update */ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +update tb_test set col1=2; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ + +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ + +/* delete */ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +delete from tb_test; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ + +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ + + +/*Same layer*/ + +CREATE OR REPLACE PROCEDURE proc_test1() +as +v_count int; +BEGIN +v_count := 1; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ + +CREATE OR REPLACE PROCEDURE proc_test2() +as +v_count int; +BEGIN +v_count := 1; +update tb_test set col1=2; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ + +DECLARE +v_count int; +BEGIN +proc_test2(); +proc_test1(); +end +/ + +/*EXCEPTION*/ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +update tb_test11 set col1=2; +end; +/ + +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +EXCEPTION +when others then +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ + +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +update tb_test11 set col1=2; +EXCEPTION +when others then +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ + +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ + +/*COMMIT ROLLBACK*/ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +update tb_test set col1=2; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +COMMIT; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ + +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ + +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +update tb_test set col1=2; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +ROLLBACK; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ + +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ + +/* PRAGMA AUTONOMOUS_TRANSACTION */ +CREATE OR REPLACE PROCEDURE proc_test() +as +DECLARE +v_count int; +PRAGMA AUTONOMOUS_TRANSACTION; +BEGIN +v_count := 1; +update tb_test set col1=2; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ + +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ + +/*drop*/ +CREATE OR REPLACE PROCEDURE proc_test() +as +v_count int; +BEGIN +v_count := 1; +drop table tb_test; +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ + +DECLARE +v_count int; +BEGIN +v_count := 1; +proc_test(); +RAISE NOTICE '%',v_count||','||SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end +/ +create table staff(id int, name varchar2(10)); +insert into staff values(1, 'xiaoming1'); +insert into staff values(2, 'xiaoming2'); +insert into staff values(3, 'xiaoming'); +insert into staff values(4, 'xiaoming4'); + +CREATE OR REPLACE FUNCTION fun_cursor1() return void AS +DECLARE +BEGIN +insert into staff values(3, 'xiaoming'); +dbe_output.print_line('cursor after insert'); +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +SAVEPOINT my_savepoint; +dbe_output.print_line('cursor after savepoint'); +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +update staff set name = 'wdc1' where id = 1; +dbe_output.print_line('cursor after update'); +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +ROLLBACK TO SAVEPOINT my_savepoint; +dbe_output.print_line('cursor after rollback to savepoint'); +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ + +CREATE OR REPLACE PROCEDURE fun_cursor2() AS +DECLARE +BEGIN +fun_cursor1(); +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +IF SQL%FOUND THEN +dbe_output.print_line('cursor effective'); +END IF; +delete from staff where id = 3; +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ + +call fun_cursor2(); + +CREATE OR REPLACE FUNCTION fun_cursor1() return void AS +DECLARE +BEGIN +insert into staff values(3, 'xiaoming'); +update staff set name = 'zcna' where id = 1; +--commit; +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +RAISE division_by_zero; +end; +/ + +CREATE OR REPLACE PROCEDURE fun_cursor2() AS +DECLARE +BEGIN +fun_cursor1(); +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +IF SQL%FOUND THEN +dbe_output.print_line('cursor effective'); +END IF; +delete from staff where id = 3; +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +EXCEPTION +WHEN division_by_zero THEN +RAISE NOTICE 'test:% ... %',SQLCODE,SQLSTATE; +RAISE NOTICE '%',SQL%FOUND ||','||SQL%NOTFOUND ||','||SQL%ISOPEN || ',' || SQL%ROWCOUNT; +end; +/ + +call fun_cursor2(); + +set behavior_compat_options = ''; +DROP SCHEMA hw_cursor_state CASCADE; + + diff --git a/src/test/regress/sql/sqlldr/.gitkeep b/src/test/regress/sql/sqlldr/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/test/regress/sql/sqlldr/load_to_copy_basic.sql b/src/test/regress/sql/sqlldr/load_to_copy_basic.sql new file mode 100644 index 000000000..c9b516a7e --- /dev/null +++ b/src/test/regress/sql/sqlldr/load_to_copy_basic.sql @@ -0,0 +1,82 @@ +-- setup +create table SQLLDR_TBL +( + ID NUMBER, + NAME VARCHAR2(20), + CON VARCHAR2(20), + DT DATE +); + +select copy_summary_create(); +select copy_error_log_create(); + +-- comments of load data +load truncate into table sqlldr_tbl +-- comments in load data +fields terminated by ',' +TRAILING NULLCOLS; + +load data truncate into table sqlldr_tbl fields terminated by ','; +load append into table sqlldr_tbl fields terminated '|'; +load data replace into table sqlldr_tbl fields terminated by '\t' TRAILING NULLCOLS; +load into table sqlldr_tbl fields terminated '|'; +load data insert into table sqlldr_tbl fields terminated by '\t' TRAILING NULLCOLS; + +load data infile 'test.csv' truncate into table sqlldr_tbl fields terminated by ',' TRAILING NULLCOLS; +load data infile 'test_ver.txt' append into table sqlldr_tbl fields terminated by '|'; +load data infile 'test_tab.txt' replace into table sqlldr_tbl fields terminated '\t'; +load data infile 'test_tab.txt' into table sqlldr_tbl fields terminated '\t'; +load data infile 'test_tab.txt' insert into table sqlldr_tbl fields terminated '\t'; + +load data infile 'test.csv' truncate into table sqlldr_tbl fields terminated by ',' TRAILING NULLCOLS; +load data infile 'test_ver.txt' append into table sqlldr_tbl fields terminated by '|'; +load data infile 'test_tab.txt' replace into table sqlldr_tbl fields terminated '\t'; +load data infile 'test_ver.txt' into table sqlldr_tbl fields terminated by '|'; +load data infile 'test_tab.txt' insert into table sqlldr_tbl fields terminated '\t'; + +load data infile 'test.csv' truncate into table sqlldr_tbl fields terminated by ',,' TRAILING NULLCOLS; +load data infile 'test_ver.txt' append into table sqlldr_tbl fields terminated by '||'; +load data infile 'test_tab.txt' replace into table sqlldr_tbl fields terminated '\t\t'; +load data infile 'test_ver.txt' into table sqlldr_tbl fields terminated by '||'; +load data infile 'test_tab.txt' insert into table sqlldr_tbl fields terminated '\t\t'; + +-- characterset +load data characterset utf8 infile 'test_tab.txt' truncate into table sqlldr_tbl; +load data characterset 'utf8' infile 'test_tab.txt' replace into table sqlldr_tbl; +load data characterset "utf8" infile 'test_tab.txt' replace into table sqlldr_tbl; +load data characterset AL32UTF8 infile 'test_tab.txt' replace into table sqlldr_tbl; +load data characterset al32utf8 infile 'test_tab.txt' replace into table sqlldr_tbl; +load data characterset zhs16gbk infile 'test_tab.txt' replace into table sqlldr_tbl; +load data characterset zhs32gb18030 infile 'test_tab.txt' replace into table sqlldr_tbl; + +-- when +load data infile "test.txt" truncate into table sqlldr_tbl WHEN (1-1) = '1' trailing nullcols; +load data infile "test.txt" truncate into table sqlldr_tbl WHEN (2-2) = '|' trailing nullcols; +load data infile "test.txt" truncate into table sqlldr_tbl WHEN (2-4) = 'XY' trailing nullcols; + +-- load when exceptions +load data infile "test.txt" truncate into table sqlldr_tbl WHEN (0-1) = '1'; +load data infile "test.txt" truncate into table sqlldr_tbl WHEN (2-0) = '|'; +load data infile "test.txt" truncate into table sqlldr_tbl WHEN (2-1) = 'XY'; +load data infile "test.txt" truncate into table sqlldr_tbl WHEN (-2-1) = 'XY'; + +-- copy when exceptions +\COPY sqlldr_tbl FROM STDIN ENCODING 'utf8' DELIMITER ',' WHEN (0-1) = '40'; +\COPY sqlldr_tbl FROM STDIN ENCODING 'utf8' DELIMITER ',' WHEN (2-0) = '40'; +\COPY sqlldr_tbl FROM STDIN ENCODING 'utf8' DELIMITER ',' WHEN (3-1) = '40'; +\COPY sqlldr_tbl FROM STDIN ENCODING 'utf8' DELIMITER ',' WHEN (-3-1) = '40'; + +-- options +OPTIONS() load data infile "test.txt" truncate into table sqlldr_tbl; +OPTIONS(skip=-1) load data infile "test.txt" truncate into table sqlldr_tbl; +OPTIONS(skip=0) load data infile "test.txt" truncate into table sqlldr_tbl; +OPTIONS(skip=100) load data infile "test.txt" truncate into table sqlldr_tbl; +OPTIONS(errors=-1) load data infile "test.txt" truncate into table sqlldr_tbl; +OPTIONS(errors=2) load data infile "test.txt" truncate into table sqlldr_tbl; +OPTIONS(errors=10) load data infile "test.txt" truncate into table sqlldr_tbl; +OPTIONS(data='file.csv') load data infile "test.txt" truncate into table sqlldr_tbl; +OPTIONS(data="file.csv") load data infile "test.txt" truncate into table sqlldr_tbl; +OPTIONS(data="file.csv", skip=10, errors=64) load data infile "test.txt" truncate into table sqlldr_tbl; + +-- teardown +drop table sqlldr_tbl; diff --git a/src/test/regress/sql/sw_bugfix-1.sql b/src/test/regress/sql/sw_bugfix-1.sql new file mode 100644 index 000000000..0ff32a29b --- /dev/null +++ b/src/test/regress/sql/sw_bugfix-1.sql @@ -0,0 +1,433 @@ +set client_min_messages = error; +SET CLIENT_ENCODING='UTF8'; +set current_schema=swtest; + +/* invalid data type */ +SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,SYS_CONNECT_BY_PATH(NAME,'|'),CONNECT_BY_ROOT(NAME),ID,CHA,VCH,TEX,DAT,TIM,TIS,PID,PCHA,PVCH,PTEX,PDAT,PTIM,PTIS +FROM TEST_HCB_FQB +START WITH ID=1 +CONNECT BY prior ID=PID +ORDER SIBLINGS BY NAME ASC; + +-- invalid use connect_by_root, will treate it as regular column report column does not exists error +SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,SYS_CONNECT_BY_PATH(NAME,'|'),CONNECT_BY_ROOT +FROM test_hcb_ptb +START WITH (ID=169 or ID=168) and CHA in ('afi','afg','afh') +CONNECT BY ID=PRIOR PID and CHA=PRIOR PCHA and VCH=PRIOR PVCH and DAT=PRIOR PDAT and TIM=PRIOR PTIM AND TIS=PRIOR PTIS +order by 1; + +SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,SYS_CONNECT_BY_PATH(NAME,'|'),CONNECT_BY_ROOT name +FROM test_hcb_ptb +START WITH (ID=169 or ID=168) and CHA in ('afi','afg','afh') +CONNECT BY ID=PRIOR PID and CHA=PRIOR PCHA and VCH=PRIOR PVCH and DAT=PRIOR PDAT and TIM=PRIOR PTIM AND TIS=PRIOR PTIS +order by 1; + +SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,SYS_CONNECT_BY_PATH(NAME,'|'),CONNECT_BY_ROOT(name) +FROM test_hcb_ptb +START WITH (ID=169 or ID=168) and CHA in ('afi','afg','afh') +CONNECT BY ID=PRIOR PID and CHA=PRIOR PCHA and VCH=PRIOR PVCH and DAT=PRIOR PDAT and TIM=PRIOR PTIM AND TIS=PRIOR PTIS +order by 1; + +/* Unsupported StartWith Scenarios */ +explain(costs off) +select * from test_hcb_ptbc t1 start with t1.id = 11 connect by prior t1.id = t1.pid; +select * from test_hcb_ptbc t1 start with t1.id = 11 connect by prior t1.id = t1.pid; +SELECT t1.id,t1.pid,t1.name,level FROM test_hcb_ptb t1,test_hcb_ptb t2 WHERE t1.id=t2.id START WITH t1.id=141 CONNECT BY PRIOR t1.id=t1.pid FOR UPDATE OF t2 NOWAIT; +SELECT t1.id, t1.pid,t1.name,level FROM core_066 t1 START WITH id = 117 CONNECT BY PRIOR id=pid FOR UPDATE; + +/* connect by root scenarios */ +select pid x,id,CONNECT_BY_ROOT ID from test_hcb_ptbc t1 start with id = 11 connect by prior id = pid; +select pid x,id,CONNECT_BY_ROOT ID alias_id from test_hcb_ptbc t1 start with id = 11 connect by prior id = pid; +select pid x,id,CONNECT_BY_ROOT t1.ID from test_hcb_ptbc t1 start with id = 11 connect by prior id = pid; +select pid x,id,CONNECT_BY_ROOT t1.ID alias_id from test_hcb_ptbc t1 start with id = 11 connect by prior id = pid; + +/* infinite loop issues */ +SELECT LEVEL,NAME,CONNECT_BY_ISLEAF,SYS_CONNECT_BY_PATH(NAME, '/'),CONNECT_BY_ROOT(ID) +FROM test_swcb_a +START WITH ID='00118' +CONNECT BY PRIOR ID=PID +ORDER SIBLINGS BY NAME; + +/* fromlist startwith for single table */ +select t1.ID,t1.VCH,pid,NAME,PTEX from TEST_HCB_FQB t1,TEST_SUBLINK t2 where t1.id=t2.id start with t1.id=1 CONNECT BY PRIOR t1.id = t1.pid; +explain (costs off) select t1.ID,t1.VCH,pid,NAME,PTEX from TEST_HCB_FQB t1,TEST_SUBLINK t2 where t1.id=t2.id start with t1.id=1 CONNECT BY PRIOR t1.id = t1.pid; + +CREATE OR REPLACE FUNCTION test_hcb_pro1(i_id in int) return int +AS +o_out int; +BEGIN +select count(*) into o_out from TEST_HCB_FQB t1 START WITH t1.id = i_id +CONNECT BY PRIOR t1.id = t1.pid; +return o_out; +END; +/ + +select test_hcb_pro1(11); +drop PROCEDURE test_hcb_pro1; + +/* startwith dealing with subqueries */ +select tt.id,tt.name from (select t1.ID,t1.VCH,pid,NAME,PTEX from TEST_HCB_FQB t1,TEST_SUBLINK t2 where t1.id=t2.id) tt +start with tt.id=1 CONNECT BY PRIOR tt.id = tt.pid ; + + +explain (costs off) select tt.id,tt.name from (select t1.ID,t1.VCH,pid,NAME,PTEX from TEST_HCB_FQB t1,TEST_SUBLINK t2 where t1.id=t2.id) tt +start with tt.id=1 CONNECT BY PRIOR tt.id = tt.pid ; + +select test.id,test.pid,test.name +from +(select t1.id id, t1.pid pid, t1.name name from TEST_HCB_FQB t1 + union + select t2.id id, t2.pid pid, t2.name name from TEST_HCB_FQB t2) test +start with test.id = 12 +connect by prior test.id = test.pid; + +/* startwith dealing with subqueries without alias */ +SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,CONNECT_BY_ROOT(NAME),SYS_CONNECT_BY_PATH(NAME, '/') +FROM (SELECT * FROM test_hcb_ptb) +START WITH CHA IN ('afi','afg','afh') +CONNECT BY PRIOR ID=PID +ORDER SIBLINGS BY NAME; + +SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,CONNECT_BY_ROOT(NAME),SYS_CONNECT_BY_PATH(NAME, '/') +FROM (SELECT * FROM test_hcb_ptb) +START WITH CHA IN ('afi','afg','afh') +CONNECT BY PRIOR ID=PID +ORDER SIBLINGS BY 1; + +SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,CONNECT_BY_ROOT(NAME),SYS_CONNECT_BY_PATH(NAME, '/') +FROM (SELECT * FROM test_hcb_ptb) +START WITH CHA IN ('afi','afg','afh') +CONNECT BY PRIOR ID=PID +ORDER SIBLINGS BY 999; + +SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,CONNECT_BY_ROOT(NAME),SYS_CONNECT_BY_PATH(NAME, '/') +FROM (SELECT * FROM test_hcb_ptb) +START WITH CHA IN ('afi','afg','afh') +CONNECT BY PRIOR ID=PID +ORDER SIBLINGS BY 1, LEVEL; + +SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,CONNECT_BY_ROOT(NAME),SYS_CONNECT_BY_PATH(NAME, '/') +FROM (SELECT * FROM test_hcb_ptb) +START WITH CHA IN ('afi','afg','afh') +CONNECT BY PRIOR ID=PID +ORDER SIBLINGS BY 1, HUAWEI; + +/* check siblings ordering */ +SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,SYS_CONNECT_BY_PATH(NAME,'|'),CONNECT_BY_ROOT(NAME) +FROM test_hcb_ptb +START WITH (ID=168 or ID=169) +CONNECT BY ID = PRIOR PID +ORDER SIBLINGS BY NAME ASC; + +-- connect_by_root/sys_connect_by_path() unsupported cases +explain +SELECT *, LEVEL, connect_by_isleaf, connect_by_iscycle, connect_by_root name_desc, sys_connect_by_path(level, '@') +FROM test_area +START WITH name = '中国' +CONNECT BY PRIOR id = fatherid; + +-- sys_connect_by_path() only supports char type +explain +SELECT *, LEVEL, connect_by_isleaf, connect_by_iscycle, connect_by_root name_desc, sys_connect_by_path(id, '@') +FROM test_area +START WITH name = '中国' +CONNECT BY PRIOR id = fatherid; + +/* sys_connect_by_path & connect_by_root can support char(xx) */ +SELECT name,LEVEL,connect_by_root(CHA) +FROM test_hcb_fqb +START WITH ID = 1 +CONNECT BY PRIOR CHA = PCHA +ORDER BY ID ASC; + +SELECT name,level,connect_by_root t1.cha as cha_col +FROM test_hcb_fqb t1 +START WITH id = 1 +CONNECT BY PRIOR cha = pcha +ORDER BY id ASC; + +SELECT name,LEVEL,sys_connect_by_path(CHA, '==》') +FROM test_hcb_fqb +START WITH ID = 1 +CONNECT BY PRIOR CHA = PCHA +ORDER BY ID ASC; + +/* empty delimiter in sys_connect_by_path(VCH,'') should be rejected */ +SELECT name,LEVEL,sys_connect_by_path(VCH,'') +FROM test_hcb_ptb +START WITH ID = 1 +CONNECT BY PRIOR CHA = PCHA +ORDER BY ID ASC; + +/* start with null must not cause core-dump error */ +SELECT * +FROM test_hcb_ptb +START WITH NULL +CONNECT BY PRIOR CHA = PCHA +ORDER BY ID ASC; + +/* start with pbe */ +PREPARE sthpt(int) AS SELECT t1.id,t1.pid,t1.name FROM test_hcb_ptb t1 START WITH id = $1 CONNECT BY PRIOR pid=id; +EXECUTE sthpt(141); + +/* with-clause used in startwith rewrite */ +explain (costs off) with subquery (id,pid,name) as +( +select t1.id,t1.pid,t1.name, LEVEL from test_hcb_ptb t1 where level>=1 + start with id = 141 connect by prior pid=id +) +select t1.id,t1.pid,t1.name,LEVEL from subquery t1 +start with id = 141 connect by prior pid=id; + +explain (costs off) select t1.id,t1.pid,t1.name,LEVEL +from (select t2.id,t2.pid,t2.name,LEVEL from test_hcb_ptb t2 where level>=1 start with t2.id = 141 connect by prior pid=id) t1 +where level>=1 start with id = 141 connect by prior pid=id; + +explain select sysdate from test_hcb_ptb t1 start with id = 141 connect by prior pid=id; +select count(sysdate) from test_hcb_ptb t1 start with id = 141 connect by prior pid=id; + +select t1.id,t1.pid,LEVEL,sys_connect_by_path(null, '->') pa, t1.name from test_hcb_ptb t1 start with id = 141 connect by prior id = pid; +select t1.id,t1.pid,LEVEL,sys_connect_by_path('id', '->') pa, t1.name from test_hcb_ptb t1 start with id = 141 connect by prior id = pid; +select t1.id,t1.pid,LEVEL,sys_connect_by_path(' ', '->') pa, t1.name from test_hcb_ptb t1 start with id = 141 connect by prior id = pid; + +explain select t1.id,t1.pid,t1.name,level from test_hcb_ptb t1 start with id=141 connect by prior id=pid Order By NLSSORT ( id, ' NLS_SORT = SCHINESE_PINYIN_M ' ); +select t1.id,t1.pid,t1.name,level from test_hcb_ptb t1 start with id=141 connect by prior id=pid Order By NLSSORT ( id, ' NLS_SORT = SCHINESE_PINYIN_M ' ); + +drop table if exists region cascade; +create table region +( + region_cd varchar(50) primary key , + REGION_MGR_ASSOCIATE_ID number(18,9), + c1 serial +); + +select region_mgr_associate_id from region; + +drop table if exists item_price_history cascade; +create table item_price_history +( + ITEM_ID number(39,10) primary key , + LOCATION_ID number(2,0) NULL,c1 serial +); + + +SELECT (MIN(region_cd)) Column_001, length(CAST('B' AS bytea), 'UTF8') Column_002 +FROM region , item_price_history +WHERE REGION_MGR_ASSOCIATE_ID = ITEM_ID +START WITH REGION_MGR_ASSOCIATE_ID NOT LIKE '_W_' +CONNECT BY PRIOR LOCATION_ID = REGION_MGR_ASSOCIATE_ID +GROUP BY 2; + +drop table item_price_history; +drop table region; + +create table test1(id int,pid int,name text, level int); +create table test2(id int,pid int,name text, connect_by_iscycle int); +create table test3(id int,pid int,name text, connect_by_isleaf int); +create table test4(id int,pid int,name text, c4 int); + +insert into test1 select id,pid,name,id%10 from test_hcb_ptb; +insert into test2 select id,pid,name,id%10 from test_hcb_ptb; +insert into test3 select id,pid,name,id%10 from test_hcb_ptb; +insert into test4 select id,pid,name,id%10 from test_hcb_ptb; + +/* level/connect_by_iscycle/connect_by_isleaf is for connect by's level value */ +select id,pid,name,test1.level, level from test1 start with id = 141 connect by prior pid=id; +select id,pid,name,test2.connect_by_iscycle, connect_by_iscycle from test2 start with id = 141 connect by prior pid=id; +select id,pid,name,test3.connect_by_isleaf, connect_by_isleaf from test3 start with id = 141 connect by prior pid=id; + +drop table test1; +drop table test2; +drop table test3; +drop table test4; + +/* 查询1 */ +SELECT TRAIT_VALUE_CD +FROM trait_value +START WITH TRAIT_VALUE_CD=TRAIT_VALUE_CD +CONNECT BY PRIOR UOM_CD LIKE '_E_'; + + +create table region +( + region_cd varchar(50) primary key , + REGION_MGR_ASSOCIATE_ID number(18,9),c1 serial +); + +create table item_price_history +( + ITEM_ID number(39,10) primary key , + LOCATION_ID number(2,0) NULL,c1 serial +); + +INSERT INTO REGION VALUES ('A', 0.123433); +INSERT INTO REGION VALUES ('B', NULL); +INSERT INTO REGION VALUES ('C', 2.232008908); +INSERT INTO REGION VALUES ('D', 3.878789); +INSERT INTO REGION VALUES ('E', 4.89060603); +INSERT INTO REGION VALUES ('F', 5.82703827); +INSERT INTO REGION VALUES ('G', NULL); +INSERT INTO REGION VALUES ('H', 7.3829083); + +INSERT INTO ITEM_PRICE_HISTORY VALUES (0.12, 4); +INSERT INTO ITEM_PRICE_HISTORY VALUES (1.3, 1); +INSERT INTO ITEM_PRICE_HISTORY VALUES (2.23, NULL); +INSERT INTO ITEM_PRICE_HISTORY VALUES (3.33, 3); +INSERT INTO ITEM_PRICE_HISTORY VALUES (4.98, 4); +INSERT INTO ITEM_PRICE_HISTORY VALUES (5.01, 5); +INSERT INTO ITEM_PRICE_HISTORY VALUES (6, 6); +INSERT INTO ITEM_PRICE_HISTORY VALUES (0.7, 7); +INSERT INTO ITEM_PRICE_HISTORY VALUES (0.08, 8); +INSERT INTO ITEM_PRICE_HISTORY VALUES (9.12, 9); + +/* 查询2 */ +SELECT 1 +FROM region , item_price_history +WHERE REGION_MGR_ASSOCIATE_ID = ITEM_ID +START WITH REGION_MGR_ASSOCIATE_ID NOT LIKE '_W_' +CONNECT BY PRIOR LOCATION_ID = REGION_MGR_ASSOCIATE_ID; + +drop table region; +drop table item_price_history; + +create table test1(c1 int, c2 int, c3 int); +insert into test1 values(1,1,1); +insert into test1 values(2,2,2); + +-- encountered with 200 iteration limit +select * from test1 t1 start with c1=1 connect by prior c2<>c3; +-- will return result when cycle is met +select * from test1 t1 start with c1=1 connect by NOCYCLE prior c2<>c3; + +drop table test1; + +-- error out a case when NOCYCLE is not specify and use connect_by_iscycle +select t1.id, LEVEL, connect_by_iscycle from test_hcb_ptb t1 start with id = 1 connect by prior id = pid; + + +create table mag_area +( + area_code varchar(10), + area_name varchar(120), + area_short_name varchar(120), + local_name varchar(80), + belong_area_code varchar(10), + bank_level varchar(8), + contry_code varchar(5), + part_code varchar(5), + time_zone varchar(9), + bank_code varchar(10), + group_code varchar(5), + mag_area_grade varchar(3), + mag_area_status varchar(1), + mag_area_broad varchar(1) +); + +create table mag_image_tpl +( + seq varchar(20), + area_code varchar(10), + archive_type varchar(3), + busitype varchar(8), + image_type varchar(8), + app_type varchar(10), + rule_id varchar(10), + valid_flag varchar(1), + modify_branch varchar(10), + modify_user varchar(9), + modify_time varchar(14) +); + + +explain +select a.rule_id, b.mag_area_grade, + max(b.mag_area_grade) OVER (PARTITION BY archive_type, busitype,image_type,app_type) max_level +FROM mag_image_tpl a, mag_area b +WHERE a.AREA_CODE IN ( + SELECT area_code + FROM mag_area + START WITH area_code = '1' + CONNECT BY PRIOR belong_area_code = area_code +) +AND a.archive_type = 'A' +AND a.BUSITYPE = 'B' +AND a.area_code = b.area_code; + + +select a.rule_id, b.mag_area_grade, + max(b.mag_area_grade) OVER (PARTITION BY archive_type, busitype,image_type,app_type) max_level +FROM mag_image_tpl a, mag_area b +WHERE a.AREA_CODE IN ( + SELECT area_code + FROM mag_area + START WITH area_code = '1' + CONNECT BY PRIOR belong_area_code = area_code +) +AND a.archive_type = 'A' +AND a.BUSITYPE = 'B' +AND a.area_code = b.area_code; + +drop table mag_area; +drop table mag_image_tpl; + +SELECT id, sys_connect_by_path(name_desc, '@') || id +FROM test_area +START WITH name = '耒阳市' +CONNECT BY id = PRIOR fatherid; + +explain +SELECT table_name || NVL('test','_B$') AS table_name + FROM (SELECT TRIM(SUBSTR(txt, + INSTR(txt, ',', 1, LEVEL) + 1, + INSTR(txt, ',', 1, LEVEL + 1) - + INSTR(txt, ',', 1, LEVEL) - 1)) AS table_name + FROM (SELECT ',' || REPLACE('test' , ' ', '') || ',' txt FROM sys_dummy) + CONNECT BY LEVEL <= LENGTH(REPLACE('test', ' ', '')) - LENGTH(REPLACE(REPLACE('test', ' ', ''), ',', '')) + 1); + +SELECT table_name || NVL('test','_B$') AS table_name + FROM (SELECT TRIM(SUBSTR(txt, + INSTR(txt, ',', 1, LEVEL) + 1, + INSTR(txt, ',', 1, LEVEL + 1) - + INSTR(txt, ',', 1, LEVEL) - 1)) AS table_name + FROM (SELECT ',' || REPLACE('test' , ' ', '') || ',' txt FROM sys_dummy) + CONNECT BY LEVEL <= LENGTH(REPLACE('test', ' ', '')) - LENGTH(REPLACE(REPLACE('test', ' ', ''), ',', '')) + 1); + +-- fix infinite recursive +explain select * from t1 start with id = 1 connect by prior id != pid; + +-- test keywords +CREATE TABLE start(connect int, prior int); +CREATE TABLE connect(start int, prior int); +CREATE TABLE prior(start int, connect int); +CREATE TABLE siblings(start int, connect int, prior int); + +INSERT INTO start VALUES(1,2); +INSERT INTO start VALUES(1,3); +INSERT INTO start VALUES(3,4); +INSERT INTO start VALUES(3,5); +INSERT INTO start VALUES(5,6); +INSERT INTO start VALUES(6,7); + +INSERT INTO connect VALUES(1,2); +INSERT INTO connect VALUES(1,3); +INSERT INTO connect VALUES(3,4); +INSERT INTO connect VALUES(3,5); +INSERT INTO connect VALUES(5,6); +INSERT INTO connect VALUES(6,7); + +EXPLAIN SELECT * FROM START START /* GAUSSDB */ WITH connect = 1 CONNECT +/*GAUSS*/BY PRIOR prior = prior; +EXPLAIN SELECT prior AS start, connect AS prior, prior FROM START START +START WITH connect = 1 CONNECT BY PRIOR /* test prior */ prior = prior; +EXPLAIN SELECT start AS connect, prior AS start FROM CONNECT +CONNECT CONNECT BY ROWNUM <5; +SELECT * FROM START START /*GAUSSDB*/ + WITH connect = 1 CONNECT +/*DB*/ BY PRIOR prior = connect; +SELECT prior AS start, connect AS prior, prior FROM START START START WITH connect = 1 CONNECT BY PRIOR prior = connect; +SELECT start AS connect, prior AS start FROM CONNECT CONNECT CONNECT BY ROWNUM <5; + +DROP TABLE IF EXISTS start; +DROP TABLE IF EXISTS connect; +DROP TABLE IF EXISTS siblings; +DROP TABLE IF EXISTS prior; diff --git a/src/test/regress/sql/sw_bugfix.sql b/src/test/regress/sql/sw_bugfix-2.sql similarity index 53% rename from src/test/regress/sql/sw_bugfix.sql rename to src/test/regress/sql/sw_bugfix-2.sql index ddb887896..bd1d52850 100644 --- a/src/test/regress/sql/sw_bugfix.sql +++ b/src/test/regress/sql/sw_bugfix-2.sql @@ -2,407 +2,6 @@ set client_min_messages = error; SET CLIENT_ENCODING='UTF8'; set current_schema=swtest; -/* invalid data type */ -SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,SYS_CONNECT_BY_PATH(NAME,'|'),CONNECT_BY_ROOT(NAME),ID,CHA,VCH,TEX,DAT,TIM,TIS,PID,PCHA,PVCH,PTEX,PDAT,PTIM,PTIS -FROM TEST_HCB_FQB -START WITH ID=1 -CONNECT BY prior ID=PID -ORDER SIBLINGS BY NAME ASC; - --- invalid use connect_by_root, will treate it as regular column report column does not exists error -SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,SYS_CONNECT_BY_PATH(NAME,'|'),CONNECT_BY_ROOT -FROM test_hcb_ptb -START WITH (ID=169 or ID=168) and CHA in ('afi','afg','afh') -CONNECT BY ID=PRIOR PID and CHA=PRIOR PCHA and VCH=PRIOR PVCH and DAT=PRIOR PDAT and TIM=PRIOR PTIM AND TIS=PRIOR PTIS -order by 1; - -SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,SYS_CONNECT_BY_PATH(NAME,'|'),CONNECT_BY_ROOT name -FROM test_hcb_ptb -START WITH (ID=169 or ID=168) and CHA in ('afi','afg','afh') -CONNECT BY ID=PRIOR PID and CHA=PRIOR PCHA and VCH=PRIOR PVCH and DAT=PRIOR PDAT and TIM=PRIOR PTIM AND TIS=PRIOR PTIS -order by 1; - -SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,SYS_CONNECT_BY_PATH(NAME,'|'),CONNECT_BY_ROOT(name) -FROM test_hcb_ptb -START WITH (ID=169 or ID=168) and CHA in ('afi','afg','afh') -CONNECT BY ID=PRIOR PID and CHA=PRIOR PCHA and VCH=PRIOR PVCH and DAT=PRIOR PDAT and TIM=PRIOR PTIM AND TIS=PRIOR PTIS -order by 1; - -/* Unsupported StartWith Scenarios */ -explain(costs off) -select * from test_hcb_ptbc t1 start with t1.id = 11 connect by prior t1.id = t1.pid; -select * from test_hcb_ptbc t1 start with t1.id = 11 connect by prior t1.id = t1.pid; -SELECT t1.id,t1.pid,t1.name,level FROM test_hcb_ptb t1,test_hcb_ptb t2 WHERE t1.id=t2.id START WITH t1.id=141 CONNECT BY PRIOR t1.id=t1.pid FOR UPDATE OF t2 NOWAIT; -SELECT t1.id, t1.pid,t1.name,level FROM core_066 t1 START WITH id = 117 CONNECT BY PRIOR id=pid FOR UPDATE; - -/* connect by root scenarios */ -select pid x,id,CONNECT_BY_ROOT ID from test_hcb_ptbc t1 start with id = 11 connect by prior id = pid; -select pid x,id,CONNECT_BY_ROOT ID alias_id from test_hcb_ptbc t1 start with id = 11 connect by prior id = pid; -select pid x,id,CONNECT_BY_ROOT t1.ID from test_hcb_ptbc t1 start with id = 11 connect by prior id = pid; -select pid x,id,CONNECT_BY_ROOT t1.ID alias_id from test_hcb_ptbc t1 start with id = 11 connect by prior id = pid; - -/* infinite loop issues */ -SELECT LEVEL,NAME,CONNECT_BY_ISLEAF,SYS_CONNECT_BY_PATH(NAME, '/'),CONNECT_BY_ROOT(ID) -FROM test_swcb_a -START WITH ID='00118' -CONNECT BY PRIOR ID=PID -ORDER SIBLINGS BY NAME; - -/* fromlist startwith for single table */ -select t1.ID,t1.VCH,pid,NAME,PTEX from TEST_HCB_FQB t1,TEST_SUBLINK t2 where t1.id=t2.id start with t1.id=1 CONNECT BY PRIOR t1.id = t1.pid; -explain (costs off) select t1.ID,t1.VCH,pid,NAME,PTEX from TEST_HCB_FQB t1,TEST_SUBLINK t2 where t1.id=t2.id start with t1.id=1 CONNECT BY PRIOR t1.id = t1.pid; - -/* swcb中参数为proceder参数 */ -CREATE OR REPLACE FUNCTION test_hcb_pro1(i_id in int) return int -AS -o_out int; -BEGIN -select count(*) into o_out from TEST_HCB_FQB t1 START WITH t1.id = i_id -CONNECT BY PRIOR t1.id = t1.pid; -return o_out; -END; -/ - -select test_hcb_pro1(11); -drop PROCEDURE test_hcb_pro1; - -/* startwith dealing with subqueries */ -select tt.id,tt.name from (select t1.ID,t1.VCH,pid,NAME,PTEX from TEST_HCB_FQB t1,TEST_SUBLINK t2 where t1.id=t2.id) tt -start with tt.id=1 CONNECT BY PRIOR tt.id = tt.pid ; - - -explain (costs off) select tt.id,tt.name from (select t1.ID,t1.VCH,pid,NAME,PTEX from TEST_HCB_FQB t1,TEST_SUBLINK t2 where t1.id=t2.id) tt -start with tt.id=1 CONNECT BY PRIOR tt.id = tt.pid ; - -select test.id,test.pid,test.name -from -(select t1.id id, t1.pid pid, t1.name name from TEST_HCB_FQB t1 - union - select t2.id id, t2.pid pid, t2.name name from TEST_HCB_FQB t2) test -start with test.id = 12 -connect by prior test.id = test.pid; - -/* startwith dealing with subqueries without alias */ -SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,CONNECT_BY_ROOT(NAME),SYS_CONNECT_BY_PATH(NAME, '/') -FROM (SELECT * FROM test_hcb_ptb) -START WITH CHA IN ('afi','afg','afh') -CONNECT BY PRIOR ID=PID -ORDER SIBLINGS BY NAME; - -SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,CONNECT_BY_ROOT(NAME),SYS_CONNECT_BY_PATH(NAME, '/') -FROM (SELECT * FROM test_hcb_ptb) -START WITH CHA IN ('afi','afg','afh') -CONNECT BY PRIOR ID=PID -ORDER SIBLINGS BY 1; - -SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,CONNECT_BY_ROOT(NAME),SYS_CONNECT_BY_PATH(NAME, '/') -FROM (SELECT * FROM test_hcb_ptb) -START WITH CHA IN ('afi','afg','afh') -CONNECT BY PRIOR ID=PID -ORDER SIBLINGS BY 999; - -SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,CONNECT_BY_ROOT(NAME),SYS_CONNECT_BY_PATH(NAME, '/') -FROM (SELECT * FROM test_hcb_ptb) -START WITH CHA IN ('afi','afg','afh') -CONNECT BY PRIOR ID=PID -ORDER SIBLINGS BY 1, LEVEL; - -SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,CONNECT_BY_ROOT(NAME),SYS_CONNECT_BY_PATH(NAME, '/') -FROM (SELECT * FROM test_hcb_ptb) -START WITH CHA IN ('afi','afg','afh') -CONNECT BY PRIOR ID=PID -ORDER SIBLINGS BY 1, HUAWEI; - -/* check siblings ordering */ -SELECT NAME,LEVEL,CONNECT_BY_ISLEAF,SYS_CONNECT_BY_PATH(NAME,'|'),CONNECT_BY_ROOT(NAME) -FROM test_hcb_ptb -START WITH (ID=168 or ID=169) -CONNECT BY ID = PRIOR PID -ORDER SIBLINGS BY NAME ASC; - --- connect_by_root/sys_connect_by_path() unsupported cases -explain -SELECT *, LEVEL, connect_by_isleaf, connect_by_iscycle, connect_by_root name_desc, sys_connect_by_path(level, '@') -FROM test_area -START WITH name = '中国' -CONNECT BY PRIOR id = fatherid; - --- sys_connect_by_path() only supports char type -explain -SELECT *, LEVEL, connect_by_isleaf, connect_by_iscycle, connect_by_root name_desc, sys_connect_by_path(id, '@') -FROM test_area -START WITH name = '中国' -CONNECT BY PRIOR id = fatherid; - -/* sys_connect_by_path & connect_by_root can support char(xx) */ -SELECT name,LEVEL,connect_by_root(CHA) -FROM test_hcb_fqb -START WITH ID = 1 -CONNECT BY PRIOR CHA = PCHA -ORDER BY ID ASC; - -SELECT name,level,connect_by_root t1.cha as cha_col -FROM test_hcb_fqb t1 -START WITH id = 1 -CONNECT BY PRIOR cha = pcha -ORDER BY id ASC; - -SELECT name,LEVEL,sys_connect_by_path(CHA, '==》') -FROM test_hcb_fqb -START WITH ID = 1 -CONNECT BY PRIOR CHA = PCHA -ORDER BY ID ASC; - -/* empty delimiter in sys_connect_by_path(VCH,'') should be rejected */ -SELECT name,LEVEL,sys_connect_by_path(VCH,'') -FROM test_hcb_ptb -START WITH ID = 1 -CONNECT BY PRIOR CHA = PCHA -ORDER BY ID ASC; - -/* start with null must not cause core-dump error */ -SELECT * -FROM test_hcb_ptb -START WITH NULL -CONNECT BY PRIOR CHA = PCHA -ORDER BY ID ASC; - -/* start with pbe */ -PREPARE sthpt(int) AS SELECT t1.id,t1.pid,t1.name FROM test_hcb_ptb t1 START WITH id = $1 CONNECT BY PRIOR pid=id; -EXECUTE sthpt(141); - -/* with-clause used in startwith rewrite */ -explain (costs off) with subquery (id,pid,name) as -( -select t1.id,t1.pid,t1.name, LEVEL from test_hcb_ptb t1 where level>=1 - start with id = 141 connect by prior pid=id -) -select t1.id,t1.pid,t1.name,LEVEL from subquery t1 -start with id = 141 connect by prior pid=id; - -explain (costs off) select t1.id,t1.pid,t1.name,LEVEL -from (select t2.id,t2.pid,t2.name,LEVEL from test_hcb_ptb t2 where level>=1 start with t2.id = 141 connect by prior pid=id) t1 -where level>=1 start with id = 141 connect by prior pid=id; - -/* core issue */ -explain select sysdate from test_hcb_ptb t1 start with id = 141 connect by prior pid=id; -select count(sysdate) from test_hcb_ptb t1 start with id = 141 connect by prior pid=id; - -/* core issue sys_connect_by_path(cosnt) core issue */ -select t1.id,t1.pid,LEVEL,sys_connect_by_path(null, '->') pa, t1.name from test_hcb_ptb t1 start with id = 141 connect by prior id = pid; -select t1.id,t1.pid,LEVEL,sys_connect_by_path('id', '->') pa, t1.name from test_hcb_ptb t1 start with id = 141 connect by prior id = pid; -select t1.id,t1.pid,LEVEL,sys_connect_by_path(' ', '->') pa, t1.name from test_hcb_ptb t1 start with id = 141 connect by prior id = pid; - -/* core issue, check args */ -explain select t1.id,t1.pid,t1.name,level from test_hcb_ptb t1 start with id=141 connect by prior id=pid Order By NLSSORT ( id, ' NLS_SORT = SCHINESE_PINYIN_M ' ); -select t1.id,t1.pid,t1.name,level from test_hcb_ptb t1 start with id=141 connect by prior id=pid Order By NLSSORT ( id, ' NLS_SORT = SCHINESE_PINYIN_M ' ); - -/* core issue, check args */ -drop table if exists region cascade; -create table region -( - region_cd varchar(50) primary key , - REGION_MGR_ASSOCIATE_ID number(18,9), - c1 serial -); - -select region_mgr_associate_id from region; - -drop table if exists item_price_history cascade; -create table item_price_history -( - ITEM_ID number(39,10) primary key , - LOCATION_ID number(2,0) NULL,c1 serial -); - - -SELECT (MIN(region_cd)) Column_001, length(CAST('B' AS bytea), 'UTF8') Column_002 -FROM region , item_price_history -WHERE REGION_MGR_ASSOCIATE_ID = ITEM_ID -START WITH REGION_MGR_ASSOCIATE_ID NOT LIKE '_W_' -CONNECT BY PRIOR LOCATION_ID = REGION_MGR_ASSOCIATE_ID -GROUP BY 2; - -drop table item_price_history; -drop table region; - -/* */ -create table test1(id int,pid int,name text, level int); -create table test2(id int,pid int,name text, connect_by_iscycle int); -create table test3(id int,pid int,name text, connect_by_isleaf int); -create table test4(id int,pid int,name text, c4 int); - -insert into test1 select id,pid,name,id%10 from test_hcb_ptb; -insert into test2 select id,pid,name,id%10 from test_hcb_ptb; -insert into test3 select id,pid,name,id%10 from test_hcb_ptb; -insert into test4 select id,pid,name,id%10 from test_hcb_ptb; - -/* level/connect_by_iscycle/connect_by_isleaf is for connect by's level value */ -select id,pid,name,test1.level, level from test1 start with id = 141 connect by prior pid=id; -select id,pid,name,test2.connect_by_iscycle, connect_by_iscycle from test2 start with id = 141 connect by prior pid=id; -select id,pid,name,test3.connect_by_isleaf, connect_by_isleaf from test3 start with id = 141 connect by prior pid=id; - -drop table test1; -drop table test2; -drop table test3; -drop table test4; - -/* */ -/* 查询1 */ -SELECT TRAIT_VALUE_CD -FROM trait_value -START WITH TRAIT_VALUE_CD=TRAIT_VALUE_CD -CONNECT BY PRIOR UOM_CD LIKE '_E_'; - - -create table region -( - region_cd varchar(50) primary key , - REGION_MGR_ASSOCIATE_ID number(18,9),c1 serial -); - -create table item_price_history -( - ITEM_ID number(39,10) primary key , - LOCATION_ID number(2,0) NULL,c1 serial -); - -INSERT INTO REGION VALUES ('A', 0.123433); -INSERT INTO REGION VALUES ('B', NULL); -INSERT INTO REGION VALUES ('C', 2.232008908); -INSERT INTO REGION VALUES ('D', 3.878789); -INSERT INTO REGION VALUES ('E', 4.89060603); -INSERT INTO REGION VALUES ('F', 5.82703827); -INSERT INTO REGION VALUES ('G', NULL); -INSERT INTO REGION VALUES ('H', 7.3829083); - -INSERT INTO ITEM_PRICE_HISTORY VALUES (0.12, 4); -INSERT INTO ITEM_PRICE_HISTORY VALUES (1.3, 1); -INSERT INTO ITEM_PRICE_HISTORY VALUES (2.23, NULL); -INSERT INTO ITEM_PRICE_HISTORY VALUES (3.33, 3); -INSERT INTO ITEM_PRICE_HISTORY VALUES (4.98, 4); -INSERT INTO ITEM_PRICE_HISTORY VALUES (5.01, 5); -INSERT INTO ITEM_PRICE_HISTORY VALUES (6, 6); -INSERT INTO ITEM_PRICE_HISTORY VALUES (0.7, 7); -INSERT INTO ITEM_PRICE_HISTORY VALUES (0.08, 8); -INSERT INTO ITEM_PRICE_HISTORY VALUES (9.12, 9); - -/* 查询2 */ -SELECT 1 -FROM region , item_price_history -WHERE REGION_MGR_ASSOCIATE_ID = ITEM_ID -START WITH REGION_MGR_ASSOCIATE_ID NOT LIKE '_W_' -CONNECT BY PRIOR LOCATION_ID = REGION_MGR_ASSOCIATE_ID; - -drop table region; -drop table item_price_history; - -/* */ -create table test1(c1 int, c2 int, c3 int); -insert into test1 values(1,1,1); -insert into test1 values(2,2,2); - --- encountered with 200 iteration limit -select * from test1 t1 start with c1=1 connect by prior c2<>c3; --- will return result when cycle is met -select * from test1 t1 start with c1=1 connect by NOCYCLE prior c2<>c3; - -drop table test1; - --- error out a case when NOCYCLE is not specify and use connect_by_iscycle -select t1.id, LEVEL, connect_by_iscycle from test_hcb_ptb t1 start with id = 1 connect by prior id = pid; - - -create table mag_area -( - area_code varchar(10), - area_name varchar(120), - area_short_name varchar(120), - local_name varchar(80), - belong_area_code varchar(10), - bank_level varchar(8), - contry_code varchar(5), - part_code varchar(5), - time_zone varchar(9), - bank_code varchar(10), - group_code varchar(5), - mag_area_grade varchar(3), - mag_area_status varchar(1), - mag_area_broad varchar(1) -); - -create table mag_image_tpl -( - seq varchar(20), - area_code varchar(10), - archive_type varchar(3), - busitype varchar(8), - image_type varchar(8), - app_type varchar(10), - rule_id varchar(10), - valid_flag varchar(1), - modify_branch varchar(10), - modify_user varchar(9), - modify_time varchar(14) -); - - -explain -select a.rule_id, b.mag_area_grade, - max(b.mag_area_grade) OVER (PARTITION BY archive_type, busitype,image_type,app_type) max_level -FROM mag_image_tpl a, mag_area b -WHERE a.AREA_CODE IN ( - SELECT area_code - FROM mag_area - START WITH area_code = '1' - CONNECT BY PRIOR belong_area_code = area_code -) -AND a.archive_type = 'A' -AND a.BUSITYPE = 'B' -AND a.area_code = b.area_code; - - -select a.rule_id, b.mag_area_grade, - max(b.mag_area_grade) OVER (PARTITION BY archive_type, busitype,image_type,app_type) max_level -FROM mag_image_tpl a, mag_area b -WHERE a.AREA_CODE IN ( - SELECT area_code - FROM mag_area - START WITH area_code = '1' - CONNECT BY PRIOR belong_area_code = area_code -) -AND a.archive_type = 'A' -AND a.BUSITYPE = 'B' -AND a.area_code = b.area_code; - -drop table mag_area; -drop table mag_image_tpl; - -SELECT id, sys_connect_by_path(name_desc, '@') || id -FROM test_area -START WITH name = '耒阳市' -CONNECT BY id = PRIOR fatherid; - -explain -SELECT table_name || NVL('test','_B$') AS table_name - FROM (SELECT TRIM(SUBSTR(txt, - INSTR(txt, ',', 1, LEVEL) + 1, - INSTR(txt, ',', 1, LEVEL + 1) - - INSTR(txt, ',', 1, LEVEL) - 1)) AS table_name - FROM (SELECT ',' || REPLACE('test' , ' ', '') || ',' txt FROM sys_dummy) - CONNECT BY LEVEL <= LENGTH(REPLACE('test', ' ', '')) - LENGTH(REPLACE(REPLACE('test', ' ', ''), ',', '')) + 1); - -SELECT table_name || NVL('test','_B$') AS table_name - FROM (SELECT TRIM(SUBSTR(txt, - INSTR(txt, ',', 1, LEVEL) + 1, - INSTR(txt, ',', 1, LEVEL + 1) - - INSTR(txt, ',', 1, LEVEL) - 1)) AS table_name - FROM (SELECT ',' || REPLACE('test' , ' ', '') || ',' txt FROM sys_dummy) - CONNECT BY LEVEL <= LENGTH(REPLACE('test', ' ', '')) - LENGTH(REPLACE(REPLACE('test', ' ', ''), ',', '')) + 1); - --- fix infinite recursive -explain select * from t1 start with id = 1 connect by prior id != pid; - create table tsc_rtbl(c_int int,c_varchar1 varchar,c_varchar2 varchar); alter table tsc_rtbl drop column c_varchar2; alter table tsc_rtbl add column c_varchar2 varchar; @@ -651,7 +250,6 @@ select t1.id bauer jack from t1; drop table t1; -/* limit + startwith 场景下执行阶段targetlist报错 */ CREATE TABLE log_part ( ts timestamp(6) without time zone DEFAULT now() NOT NULL, op character(1), @@ -729,19 +327,19 @@ EXPLAIN SELECT * FROM test_area START WITH name = '中国' CONNECT BY PRIOR id = SELECT * FROM test_area START WITH name = '中国' CONNECT BY PRIOR id = fatherid limit 10; -set max_recursive_times=100000000; +set max_recursive_times=1000; create table tt22(x int); create or replace view dual as select 'x' x; -insert into tt22 select level from dual connect by level <=1000000; +insert into tt22 select level from dual connect by level <=1000; select count(*) from tt22; set max_recursive_times=200; -insert into tt22 select level from dual connect by level <=1000000; +insert into tt22 select level from dual connect by level <=1000; drop table tt22; @@ -752,7 +350,6 @@ select t1.id,t1.pid,t1.name from test_hcb_ptb t1 start with id=141 connect by (p explain select t1.id,t1.pid,t1.name from test_hcb_ptb t1 start with id=141 connect by (prior pid)=id and prior pid>10 and null; select t1.id,t1.pid,t1.name from test_hcb_ptb t1 start with id=141 connect by (prior pid)=id and prior pid>10 and null; -/* connect by level/rownum 不支持not并且in 数据不准确 */ create table core_060(id varchar); insert into core_060 values ('a'),('b'),('c'); @@ -762,7 +359,6 @@ SELECT id,level FROM core_060 CONNECT BY cast(level as number(38,0))<3; drop table core_060; -/* 存在子查询时,随着数据递归层数的增加,性能下降明显 */ create table t_customer(id int, pid int,num int,depth int); -- verify nestloop can be material-optimized set enable_hashjoin = off; @@ -834,11 +430,53 @@ where t1.brand_cd IS NOT NULL CONNECT BY rownum < 3; drop table if exists brand_sw3 cascade; drop table if exists usview17_sw3 cascade; +-- check that order siblings by does not cause result consistency or performance issues +SELECT id,pid,name,rownum,level FROM test_hcb_ptb START WITH id=1 CONNECT BY PRIOR id=pid AND level<4 ORDER SIBLINGS BY 1 DESC; +SELECT id,pid,name,rownum,level FROM test_hcb_ptb START WITH id=1 CONNECT BY PRIOR id=pid AND level<4; +SELECT id,pid,name,rownum,level FROM test_hcb_ptb START WITH id=1 CONNECT BY NOCYCLE PRIOR id=pid AND level<4; -create table sw_test1(c0 int); -create table sw_test2(c0 text); +-- test sw dfx +drop table if exists sw_dummy; +create table sw_dummy(swid int); +insert into sw_dummy values(1); +explain performance select * from sw_dummy connect by level < 50; +drop table sw_dummy; -select * from sw_test1,sw_test2 where true connect by true; +--test null pointers in connect by walker +explain select * from t1 connect by exists(select distinct (select id from t1)); -drop table sw_test1; -drop table sw_test2; +--test join + where for start with .. connect by +select t1.id,t1.pid,t2.id from test_hcb_ptb t1 join test_hcb_ptb t2 on t1.id=t2.id where t1.id>1 start with t1.id=141 connect by prior t2.id=t1.pid; + +create or replace function prior(id int) returns int + LANGUAGE plpgsql AS $$ + begin + return id*3; + end; + $$; +select id,pid,prior(level) from test_hcb_ptb where prior(id)>10 start + with id=141 connect by prior pid=id; +select prior(1+1); +select prior(1); +select prior(1,1); +drop function prior(int); + +--test dfs rownum +SELECT id,pid,name,rownum,level FROM test_hcb_ptb START WITH id=1 CONNECT BY NOCYCLE PRIOR id=pid AND rownum<7; + +--test subquery pushdown +SELECT subq_0.c1 as c0 +from + (SELECT + 30 as c0, + ref_0.id as c1 + from + test_hcb_ptb as ref_0 + WHERE false) as subq_0 +WHERE true CONNECT BY EXISTS ( + SELECT + pg_stat_get_partition_tuples_inserted(subq_0.c0) as c1 + from + test_hcb_ptb as ref_7 +) +LIMIT 169; diff --git a/src/test/regress/sql/test_astore_multixact.sql b/src/test/regress/sql/test_astore_multixact.sql index 955f6ae11..90916e59e 100644 --- a/src/test/regress/sql/test_astore_multixact.sql +++ b/src/test/regress/sql/test_astore_multixact.sql @@ -106,36 +106,6 @@ end; / \parallel off -insert into astore_mult1 values (2, 2); -\parallel on 2 -begin -perform * from astore_mult1 where a = 2 for key share; -perform pg_sleep(2); -delete from astore_mult1 where a = 2; -end; -/ -begin -update astore_mult1 set b = 2 where a = 2; -perform pg_sleep(3); -end; -/ -\parallel off - -insert into astore_mult1 values (2, 2); -\parallel on 2 -begin -perform * from astore_mult1 where a = 2 for key share; -perform pg_sleep(2); -delete from astore_mult1 where a = 2; -end; -/ -begin -update astore_mult1 set b = 2 where a = 2; -perform pg_sleep(3); -end; -/ -\parallel off - vacuum freeze astore_mult1; vacuum freeze astore_mult2; diff --git a/src/test/regress/sql/test_ustore_index_cache_rightpage.sql b/src/test/regress/sql/test_ustore_index_cache_rightpage.sql new file mode 100644 index 000000000..9c64b76d3 --- /dev/null +++ b/src/test/regress/sql/test_ustore_index_cache_rightpage.sql @@ -0,0 +1,115 @@ +-- test fastpath mechanism for index insertion +create table fastpath (a int, b text, c numeric) with (storage_type=USTORE); +create unique index fpindex1 on fastpath(a); + +insert into fastpath values (1, 'b1', 100.00); +insert into fastpath values (1, 'b1', 100.00); -- unique key check + +truncate fastpath; +insert into fastpath select generate_series(1,10000), 'b', 100; + +-- vacuum the table so as to improve chances of index-only scans. we can't +-- guarantee if index-only scans will be picked up in all cases or not, but +-- that fuzziness actually helps the test. +vacuum fastpath; + +set enable_seqscan to false; +set enable_bitmapscan to false; + +select sum(a) from fastpath where a >= 5000 and a < 5700; +select sum(a) from fastpath where a >= 5000 and a < 5700; +select sum(a) from fastpath where a = 6456; +select sum(a) from fastpath where a >= 5000 and a < 5700; + +-- drop the only index on the table and compute hashes for +-- a few queries which orders the results in various different ways. +drop index fpindex1; +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + +-- now create a multi-column index with both column asc +create index fpindex2 on fastpath(a, b); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +-- again, vacuum here either forces index-only scans or creates fuzziness +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + +-- same queries with a different kind of index now. the final result must not +-- change irrespective of what kind of index we have. +drop index fpindex2; +create index fpindex3 on fastpath(a desc, b asc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + +-- repeat again +drop index fpindex3; +create index fpindex4 on fastpath(a asc, b desc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + +-- and again, this time indexing by (b, a). Note that column "b" has non-unique +-- values. +drop index fpindex4; +create index fpindex5 on fastpath(b asc, a desc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + +-- one last time +drop index fpindex5; +create index fpindex6 on fastpath(b desc, a desc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + +drop table fastpath; diff --git a/src/test/regress/sql/test_ustore_index_including.sql b/src/test/regress/sql/test_ustore_index_including.sql new file mode 100644 index 000000000..eb3b125f8 --- /dev/null +++ b/src/test/regress/sql/test_ustore_index_including.sql @@ -0,0 +1,192 @@ +/* + * 1.test CREATE INDEX + */ + +-- Regular index with included columns +set enable_default_ustore_table = on; +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE INDEX tbl_idx ON tbl using ubtree (c1, c2) INCLUDE (c3,c4); +-- must fail because of intersection of key and included columns +CREATE INDEX tbl_idx ON tbl using ubtree (c1, c2) INCLUDE (c1,c3); +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl'::regclass ORDER BY c.relname; +DROP TABLE tbl; + +-- Unique index and unique constraint +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_idx_unique ON tbl using ubtree (c1, c2) INCLUDE (c3, c4); +ALTER TABLE tbl add UNIQUE USING INDEX tbl_idx_unique; +ALTER TABLE tbl add UNIQUE (c1, c2) INCLUDE (c3, c4); +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl'::regclass ORDER BY c.relname; +DROP TABLE tbl; + +-- Unique index and unique constraint. Both must fail. +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_idx_unique ON tbl using ubtree (c1, c2) INCLUDE (c3, c4); +ALTER TABLE tbl add UNIQUE (c1, c2) INCLUDE (c3, c4); +DROP TABLE tbl; + +-- PK constraint +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT 1, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ALTER TABLE tbl add PRIMARY KEY (c1, c2) INCLUDE (c3, c4); +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl'::regclass ORDER BY c.relname; +DROP TABLE tbl; + +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT 1, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_idx_unique ON tbl using ubtree (c1, c2) INCLUDE (c3, c4); +ALTER TABLE tbl add PRIMARY KEY USING INDEX tbl_idx_unique; +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl'::regclass ORDER BY c.relname; +DROP TABLE tbl; + +-- PK constraint. Must fail. +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ALTER TABLE tbl add PRIMARY KEY (c1, c2) INCLUDE (c3, c4); +DROP TABLE tbl; + +/* + * 2. Test CREATE TABLE with constraint + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + CONSTRAINT covering UNIQUE(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; +SELECT pg_get_constraintdef(oid), conname, conkey, conincluding FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +DROP TABLE tbl; + +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + CONSTRAINT covering PRIMARY KEY(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; +SELECT pg_get_constraintdef(oid), conname, conkey, conincluding FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +INSERT INTO tbl SELECT 1, NULL, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +INSERT INTO tbl SELECT x, 2*x, NULL, NULL FROM generate_series(1,10) AS x; +DROP TABLE tbl; + +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + UNIQUE(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; +SELECT pg_get_constraintdef(oid), conname, conkey, conincluding FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +DROP TABLE tbl; + +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + PRIMARY KEY(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; +SELECT pg_get_constraintdef(oid), conname, conkey, conincluding FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +INSERT INTO tbl SELECT 1, NULL, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +INSERT INTO tbl SELECT x, 2*x, NULL, NULL FROM generate_series(1,10) AS x; +DROP TABLE tbl; + +/* + * 3.0 Test ALTER TABLE DROP COLUMN. + * Any column deletion leads to index deletion. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 int); +CREATE UNIQUE INDEX tbl_idx ON tbl using ubtree(c1, c2, c3, c4); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +DROP TABLE tbl; + +/* + * 3.1 Test ALTER TABLE DROP COLUMN. + * Included column deletion leads to the index deletion, + * AS well AS key columns deletion. It's explained in documentation. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box); +CREATE UNIQUE INDEX tbl_idx ON tbl using ubtree(c1, c2) INCLUDE(c3,c4); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +DROP TABLE tbl; + +/* + * 3.2 Test ALTER TABLE DROP COLUMN. + * Included column deletion leads to the index deletion. + * AS well AS key columns deletion. It's explained in documentation. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +ALTER TABLE tbl DROP COLUMN c1; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +DROP TABLE tbl; + +/* + * 4. CREATE INDEX + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,1000) AS x; +CREATE UNIQUE INDEX on tbl (c1, c2) INCLUDE (c3, c4); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +DROP TABLE tbl; + +/* + * 5. REINDEX + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +REINDEX INDEX tbl_c1_c2_c3_c4_key; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +ALTER TABLE tbl DROP COLUMN c1; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +DROP TABLE tbl; + +/* + * 7. Check various AMs. All but ubtree must fail. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 box, c4 box); +CREATE INDEX on tbl USING brin(c1, c2) INCLUDE (c3, c4); +CREATE INDEX on tbl USING gist(c3) INCLUDE (c4); +CREATE INDEX on tbl USING spgist(c3) INCLUDE (c4); +CREATE INDEX on tbl USING gin(c1, c2) INCLUDE (c3, c4); +CREATE INDEX on tbl USING hash(c1, c2) INCLUDE (c3, c4); +CREATE INDEX on tbl USING rtree(c1, c2) INCLUDE (c3, c4); +CREATE INDEX on tbl USING ubtree(c1, c2) INCLUDE (c3, c4); +DROP TABLE tbl; + +/* + * 8. Update, delete values in indexed table. + */ +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_idx_unique ON tbl using ubtree(c1, c2) INCLUDE (c3,c4); +UPDATE tbl SET c1 = 100 WHERE c1 = 2; +UPDATE tbl SET c1 = 1 WHERE c1 = 3; +-- should fail +UPDATE tbl SET c2 = 2 WHERE c1 = 1; +UPDATE tbl SET c3 = 1; +DELETE FROM tbl WHERE c1 = 5 OR c3 = 12; +DROP TABLE tbl; + +/* + * 9. Alter column type. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ALTER TABLE tbl ALTER c1 TYPE bigint; +ALTER TABLE tbl ALTER c3 TYPE bigint; +\d tbl +DROP TABLE tbl; +set enable_default_ustore_table = off; \ No newline at end of file diff --git a/src/test/regress/sql/test_ustore_lock.sql b/src/test/regress/sql/test_ustore_lock.sql index 2d5559843..7c6ddb65a 100644 --- a/src/test/regress/sql/test_ustore_lock.sql +++ b/src/test/regress/sql/test_ustore_lock.sql @@ -3,6 +3,12 @@ create table test_lock_for_update (c1 int) with (storage_type=USTORE); insert into test_lock_for_update values (1); +-- test for update/no key update/share/key share +select c1 from test_lock_for_update where c1 = 1 for update; +select c1 from test_lock_for_update where c1 = 1 for no key update; +select c1 from test_lock_for_update where c1 = 1 for share; +select c1 from test_lock_for_update where c1 = 1 for key share; + \parallel on 2 begin diff --git a/src/test/regress/sql/test_ustore_undo_view.sql b/src/test/regress/sql/test_ustore_undo_view.sql new file mode 100644 index 000000000..af6d3feac --- /dev/null +++ b/src/test/regress/sql/test_ustore_undo_view.sql @@ -0,0 +1,10 @@ +-- test test_ustore_undo_view +drop table if exists test_ustore_undo_view; +create table test_ustore_undo_view (c1 int) with (storage_type=USTORE); +insert into test_ustore_undo_view values(1); +select * from gs_undo_meta(0, -1, 0); +select * from gs_undo_translot(0,-1); +checkpoint; +select * from gs_undo_translot(1,-1); +select * from gs_undo_record(24); +drop table test_ustore_undo_view; diff --git a/src/test/regress/sql/test_ustore_undozone.sql b/src/test/regress/sql/test_ustore_undozone.sql new file mode 100644 index 000000000..3752c7ee1 --- /dev/null +++ b/src/test/regress/sql/test_ustore_undozone.sql @@ -0,0 +1,18 @@ +-- test test_undozone +drop table if exists test_undozone; +create table test_undozone (c1 int) with (storage_type=USTORE); +select count(*) from gs_undo_meta(0, -1, 0); +insert into test_undozone(c1) values(1); +select count(*) from gs_undo_meta(0, -1, 0); + +create temp table test_undozone_tmp (c1 int) with (storage_type=USTORE); +select count(*) from gs_undo_meta(0, -1, 0); +insert into test_undozone_tmp(c1) values(1); +select count(*) from gs_undo_meta(0, -1, 0); + +create unlogged table test_undozone_unlog (c1 int) with (storage_type=USTORE); +select count(*) from gs_undo_meta(0, -1, 0); +insert into test_undozone_unlog(c1) values(1); +select count(*) from gs_undo_meta(0, -1, 0); +drop table test_undozone; + diff --git a/src/test/regress/sql/test_ustore_update.sql b/src/test/regress/sql/test_ustore_update.sql index e341852d6..d0f75c5d1 100644 --- a/src/test/regress/sql/test_ustore_update.sql +++ b/src/test/regress/sql/test_ustore_update.sql @@ -83,15 +83,15 @@ insert into t4 values(4, 'def'); insert into t4 values(5, 'efg'); commit; -select * from t4; +select * from t4 order by c1; start transaction; update t4 set c2 = 'aaaabbbbccccdddd' where c1 = 3; update t4 set c2 = 'aaaabbbbccccdddd' where c1 = 2; -select * from t4; +select * from t4 order by c1; rollback; -select * from t4; +select * from t4 order by c1; -- Test updates involving mixed table types drop table if exists t5; diff --git a/src/test/regress/sql/toomanyparams.sql b/src/test/regress/sql/toomanyparams.sql new file mode 100644 index 000000000..d4879e892 --- /dev/null +++ b/src/test/regress/sql/toomanyparams.sql @@ -0,0 +1,1010 @@ +create or replace package a as +procedure f( +a in int, +a1 in int, +a2 in int, +a3 in int, +a4 in int, +a5 in int, +a6 in int, +a7 in int, +a8 in int, +a9 in int, +a10 in int, +a11 in int, +a12 in int, +a13 in int, +a14 in int, +a15 in int, +a16 in int, +a17 in int, +a18 in int, +a19 in int, +a20 in int, +a21 in int, +a22 in int, +a23 in int, +a24 in int, +a25 in int, +a26 in int, +a27 in int, +a28 in int, +a29 in int, +a30 in int, +a31 in int, +a32 in int, +a33 in int, +a34 in int, +a35 in int, +a36 in int, +a37 in int, +a38 in int, +a39 in int, +a40 in int, +a41 in int, +a42 in int, +a43 in int, +a44 in int, +a45 in int, +a46 in int, +a47 in int, +a48 in int, +a49 in int, +a50 in int, +a51 in int, +a52 in int, +a53 in int, +a54 in int, +a55 in int, +a56 in int, +a57 in int, +a58 in int, +a59 in int, +a60 in int, +a61 in int, +a62 in int, +a63 in int, +a64 in int, +a65 in int, +a66 in int, +a67 in int, +a68 in int, +a69 in int, +a70 in int, +a71 in int, +a72 in int, +a73 in int, +a74 in int, +a75 in int, +a76 in int, +a77 in int, +a78 in int, +a79 in int, +a80 in int, +a81 in int, +a82 in int, +a83 in int, +a84 in int, +a85 in int, +a86 in int, +a87 in int, +a88 in int, +a89 in int, +a90 in int, +a91 in int, +a92 in int, +a93 in int, +a94 in int, +a95 in int, +a96 in int, +a97 in int, +a98 in int, +a99 in int, +a100 in int, +aa in int, +aa1 in int, +aa2 in int, +aa3 in int, +aa4 in int, +aa5 in int, +aa6 in int, +aa7 in int, +aa8 in int, +aa9 in int, +aa10 in int, +aa11 in int, +aa12 in int, +aa13 in int, +aa14 in int, +aa15 in int, +aa16 in int, +aa17 in int, +aa18 in int, +aa19 in int, +aa20 in int, +aa21 in int, +aa22 in int, +aa23 in int, +aa24 in int, +aa25 in int, +aa26 in int, +aa27 in int, +aa28 in int, +aa29 in int, +aa30 in int, +aa31 in int, +aa32 in int, +aa33 in int, +aa34 in int, +aa35 in int, +aa36 in int, +aa37 in int, +aa38 in int, +aa39 in int, +aa40 in int, +aa41 in int, +aa42 in int, +aa43 in int, +aa44 in int, +aa45 in int, +aa46 in int, +aa47 in int, +aa48 in int, +aa49 in int, +aa50 in int, +aa51 in int, +aa52 in int, +aa53 in int, +aa54 in int, +aa55 in int, +aa56 in int, +aa57 in int, +aa58 in int, +aa59 in int, +aa60 in int, +aa61 in int, +aa62 in int, +aa63 in int, +aa64 in int, +aa65 in int, +aa66 in int, +aa67 in int, +aa68 in int, +aa69 in int, +aa70 in int, +aa71 in int, +aa72 in int, +aa73 in int, +aa74 in int, +aa75 in int, +aa76 in int, +aa77 in int, +aa78 in int, +aa79 in int, +aa80 in int, +aa81 in int, +aa82 in int, +aa83 in int, +aa84 in int, +aa85 in int, +aa86 in int, +aa87 in int, +aa88 in int, +aa89 in int, +aa90 in int, +aa91 in int, +aa92 in int, +aa93 in int, +aa94 in int, +aa95 in int, +aa96 in int, +aa97 in int, +aa98 in int, +aa99 in int, +aa100 in int, +aaa in int, +aaa1 in int, +aaa2 in int, +aaa3 in int, +aaa4 in int, +aaa5 in int, +aaa6 in int, +aaa7 in int, +aaa8 in int, +aaa9 in int, +aaa10 in int, +aaa11 in int, +aaa12 in int, +aaa13 in int, +aaa14 in int, +aaa15 in int, +aaa16 in int, +aaa17 in int, +aaa18 in int, +aaa19 in int, +aaa20 in int, +aaa21 in int, +aaa22 in int, +aaa23 in int, +aaa24 in int, +aaa25 in int, +aaa26 in int, +aaa27 in int, +aaa28 in int, +aaa29 in int, +aaa30 in int, +aaa31 in int, +aaa32 in int, +aaa33 in int, +aaa34 in int, +aaa35 in int, +aaa36 in int, +aaa37 in int, +aaa38 in int, +aaa39 in int, +aaa40 in int, +aaa41 in int, +aaa42 in int, +aaa43 in int, +aaa44 in int, +aaa45 in int, +aaa46 in int, +aaa47 in int, +aaa48 in int, +aaa49 in int, +aaa50 in int, +aaa51 in int, +aaa52 in int, +aaa53 in int, +aaa54 in int, +aaa55 in int, +aaa56 in int, +aaa57 in int, +aaa58 in int, +aaa59 in int, +aaa60 in int, +aaa61 in int, +aaa62 in int, +aaa63 in int, +aaa64 in int, +aaa65 in int, +aaa66 in int, +aaa67 in int, +aaa68 in int, +aaa69 in int, +aaa70 in int, +aaa71 in int, +aaa72 in int, +aaa73 in int, +aaa74 in int, +aaa75 in int, +aaa76 in int, +aaa77 in int, +aaa78 in int, +aaa79 in int, +aaa80 in int, +aaa81 in int, +aaa82 in int, +aaa83 in int, +aaa84 in int, +aaa85 in int, +aaa86 in int, +aaa87 in int, +aaa88 in int, +aaa89 in int, +aaa90 in int, +aaa91 in int, +aaa92 in int, +aaa93 in int, +aaa94 in int, +aaa95 in int, +aaa96 in int, +aaa97 in int, +aaa98 in int, +aaa99 in int, +aaa100 in int, +aaaa in int, +aaaa1 in int, +aaaa2 in int, +aaaa3 in int, +aaaa4 in int, +aaaa5 in int, +aaaa6 in int, +aaaa7 in int, +aaaa8 in int, +aaaa9 in int, +aaaa10 in int, +aaaa11 in int, +aaaa12 in int, +aaaa13 in int, +aaaa14 in int, +aaaa15 in int, +aaaa16 in int, +aaaa17 in int, +aaaa18 in int, +aaaa19 in int, +aaaa20 in int, +aaaa21 in int, +aaaa22 in int, +aaaa23 in int, +aaaa24 in int, +aaaa25 in int, +aaaa26 in int, +aaaa27 in int, +aaaa28 in int, +aaaa29 in int, +aaaa30 in int, +aaaa31 in int, +aaaa32 in int, +aaaa33 in int, +aaaa34 in int, +aaaa35 in int, +aaaa36 in int, +aaaa37 in int, +aaaa38 in int, +aaaa39 in int, +aaaa40 in int, +aaaa41 in int, +aaaa42 in int, +aaaa43 in int, +aaaa44 in int, +aaaa45 in int, +aaaa46 in int, +aaaa47 in int, +aaaa48 in int, +aaaa49 in int, +aaaa50 in int, +aaaa51 in int, +aaaa52 in int, +aaaa53 in int, +aaaa54 in int, +aaaa55 in int, +aaaa56 in int, +aaaa57 in int, +aaaa58 in int, +aaaa59 in int, +aaaa60 in int, +aaaa61 in int, +aaaa62 in int, +aaaa63 in int, +aaaa64 in int, +aaaa65 in int, +aaaa66 in int, +aaaa67 in int, +aaaa68 in int, +aaaa69 in int, +aaaa70 in int, +aaaa71 in int, +aaaa72 in int, +aaaa73 in int, +aaaa74 in int, +aaaa75 in int, +aaaa76 in int, +aaaa77 in int, +aaaa78 in int, +aaaa79 in int, +aaaa80 in int, +aaaa81 in int, +aaaa82 in int, +aaaa83 in int, +aaaa84 in int, +aaaa85 in int, +aaaa86 in int, +aaaa87 in int, +aaaa88 in int, +aaaa89 in int, +aaaa90 in int, +aaaa91 in int, +aaaa92 in int, +aaaa93 in int, +aaaa94 in int, +aaaa95 in int, +aaaa96 in int, +aaaa97 in int, +aaaa98 in int, +aaaa99 in int, +aaaa100 in int, +b1 out int, +b2 out int, +b3 out int, +b4 out int, +b5 out int, +b6 out int, +b7 out int, +b8 out int, +b9 out int, +b10 out int, +b11 out int, +b12 out int, +b13 out int, +b14 out int, +b15 out int, +b16 out int, +b17 out int, +b18 out int, +b19 out int, +b20 out int, +b21 out int, +b22 out int, +b23 out int, +b24 out int, +b25 out int, +b26 out int, +b27 out int, +b28 out int, +b29 out int, +b30 out int, +b31 out int, +b32 out int, +b33 out int, +b34 out int, +b35 out int, +b36 out int, +b37 out int, +b38 out int, +b39 out int, +b40 out int, +b41 out int, +b42 out int, +b43 out int, +b44 out int, +b45 out int, +b46 out int, +b47 out int, +b48 out int, +b49 out int, +b50 out int, +b51 out int, +b52 out int, +b53 out int, +b54 out int, +b55 out int, +b56 out int, +b57 out int, +b58 out int, +b59 out int, +b60 out int, +b61 out int, +b62 out int, +b63 out int, +b64 out int, +b65 out int, +b66 out int, +b67 out int, +b68 out int, +b69 out int, +b70 out int, +b71 out int, +b72 out int, +b73 out int, +b74 out int, +b75 out int, +b76 out int, +b77 out int, +b78 out int, +b79 out int, +b80 out int, +b81 out int, +b82 out int, +b83 out int, +b84 out int, +b85 out int, +b86 out int, +b87 out int, +b88 out int, +b89 out int, +b90 out int, +b91 out int, +b92 out int, +b93 out int, +b94 out int, +b95 out int, +b96 out int, +b97 out int, +b98 out int, +b99 out int, +b100 out int, +bb1 out int, +bb2 out int, +bb3 out int, +bb4 out int, +bb5 out int, +bb6 out int, +bb7 out int, +bb8 out int, +bb9 out int, +bb10 out int, +bb11 out int, +bb12 out int, +bb13 out int, +bb14 out int, +bb15 out int, +bb16 out int, +bb17 out int, +bb18 out int, +bb19 out int, +bb20 out int, +bb21 out int, +bb22 out int, +bb23 out int, +bb24 out int, +bb25 out int, +bb26 out int, +bb27 out int, +bb28 out int, +bb29 out int, +bb30 out int, +bb31 out int, +bb32 out int, +bb33 out int, +bb34 out int, +bb35 out int, +bb36 out int, +bb37 out int, +bb38 out int, +bb39 out int, +bb40 out int, +bb41 out int, +bb42 out int, +bb43 out int, +bb44 out int, +bb45 out int, +bb46 out int, +bb47 out int, +bb48 out int, +bb49 out int, +bb50 out int, +bb51 out int, +bb52 out int, +bb53 out int, +bb54 out int, +bb55 out int, +bb56 out int, +bb57 out int, +bb58 out int, +bb59 out int, +bb60 out int, +bb61 out int, +bb62 out int, +bb63 out int, +bb64 out int, +bb65 out int, +bb66 out int, +bb67 out int, +bb68 out int, +bb69 out int, +bb70 out int, +bb71 out int, +bb72 out int, +bb73 out int, +bb74 out int, +bb75 out int, +bb76 out int, +bb77 out int, +bb78 out int, +bb79 out int, +bb80 out int, +bb81 out int, +bb82 out int, +bb83 out int, +bb84 out int, +bb85 out int, +bb86 out int, +bb87 out int, +bb88 out int, +bb89 out int, +bb90 out int, +bb91 out int, +bb92 out int, +bb93 out int, +bb94 out int, +bb95 out int, +bb96 out int, +bb97 out int, +bb98 out int, +bb99 out int, +bb100 out int, +bbb1 out int, +bbb2 out int, +bbb3 out int, +bbb4 out int, +bbb5 out int, +bbb6 out int, +bbb7 out int, +bbb8 out int, +bbb9 out int, +bbb10 out int, +bbb11 out int, +bbb12 out int, +bbb13 out int, +bbb14 out int, +bbb15 out int, +bbb16 out int, +bbb17 out int, +bbb18 out int, +bbb19 out int, +bbb20 out int, +bbb21 out int, +bbb22 out int, +bbb23 out int, +bbb24 out int, +bbb25 out int, +bbb26 out int, +bbb27 out int, +bbb28 out int, +bbb29 out int, +bbb30 out int, +bbb31 out int, +bbb32 out int, +bbb33 out int, +bbb34 out int, +bbb35 out int, +bbb36 out int, +bbb37 out int, +bbb38 out int, +bbb39 out int, +bbb40 out int, +bbb41 out int, +bbb42 out int, +bbb43 out int, +bbb44 out int, +bbb45 out int, +bbb46 out int, +bbb47 out int, +bbb48 out int, +bbb49 out int, +bbb50 out int, +bbb51 out int, +bbb52 out int, +bbb53 out int, +bbb54 out int, +bbb55 out int, +bbb56 out int, +bbb57 out int, +bbb58 out int, +bbb59 out int, +bbb60 out int, +bbb61 out int, +bbb62 out int, +bbb63 out int, +bbb64 out int, +bbb65 out int, +bbb66 out int, +bbb67 out int, +bbb68 out int, +bbb69 out int, +bbb70 out int, +bbb71 out int, +bbb72 out int, +bbb73 out int, +bbb74 out int, +bbb75 out int, +bbb76 out int, +bbb77 out int, +bbb78 out int, +bbb79 out int, +bbb80 out int, +bbb81 out int, +bbb82 out int, +bbb83 out int, +bbb84 out int, +bbb85 out int, +bbb86 out int, +bbb87 out int, +bbb88 out int, +bbb89 out int, +bbb90 out int, +bbb91 out int, +bbb92 out int, +bbb93 out int, +bbb94 out int, +bbb95 out int, +bbb96 out int, +bbb97 out int, +bbb98 out int, +bbb99 out int, +bbb100 out int, +bbbb1 out int, +bbbb2 out int, +bbbb3 out int, +bbbb4 out int, +bbbb5 out int, +bbbb6 out int, +bbbb7 out int, +bbbb8 out int, +bbbb9 out int, +bbbb10 out int, +bbbb11 out int, +bbbb12 out int, +bbbb13 out int, +bbbb14 out int, +bbbb15 out int, +bbbb16 out int, +bbbb17 out int, +bbbb18 out int, +bbbb19 out int, +bbbb20 out int, +bbbb21 out int, +bbbb22 out int, +bbbb23 out int, +bbbb24 out int, +bbbb25 out int, +bbbb26 out int, +bbbb27 out int, +bbbb28 out int, +bbbb29 out int, +bbbb30 out int, +bbbb31 out int, +bbbb32 out int, +bbbb33 out int, +bbbb34 out int, +bbbb35 out int, +bbbb36 out int, +bbbb37 out int, +bbbb38 out int, +bbbb39 out int, +bbbb40 out int, +bbbb41 out int, +bbbb42 out int, +bbbb43 out int, +bbbb44 out int, +bbbb45 out int, +bbbb46 out int, +bbbb47 out int, +bbbb48 out int, +bbbb49 out int, +bbbb50 out int, +bbbb51 out int, +bbbb52 out int, +bbbb53 out int, +bbbb54 out int, +bbbb55 out int, +bbbb56 out int, +bbbb57 out int, +bbbb58 out int, +bbbb59 out int, +bbbb60 out int, +bbbb61 out int, +bbbb62 out int, +bbbb63 out int, +bbbb64 out int, +bbbb65 out int, +bbbb66 out int, +bbbb67 out int, +bbbb68 out int, +bbbb69 out int, +bbbb70 out int, +bbbb71 out int, +bbbb72 out int, +bbbb73 out int, +bbbb74 out int, +bbbb75 out int, +bbbb76 out int, +bbbb77 out int, +bbbb78 out int, +bbbb79 out int, +bbbb80 out int, +bbbb81 out int, +bbbb82 out int, +bbbb83 out int, +bbbb84 out int, +bbbb85 out int, +bbbb86 out int, +bbbb87 out int, +bbbb88 out int, +bbbb89 out int, +bbbb90 out int, +bbbb91 out int, +bbbb92 out int, +bbbb93 out int, +bbbb94 out int, +bbbb95 out int, +bbbb96 out int, +bbbb97 out int, +bbbb98 out int, +bbbb99 out int, +bbbb100 out int, +abbbb1 out int, +abbbb2 out int, +abbbb3 out int, +abbbb4 out int, +abbbb5 out int, +abbbb6 out int, +abbbb7 out int, +abbbb8 out int, +abbbb9 out int, +abbbb10 out int, +abbbb11 out int, +abbbb12 out int, +abbbb13 out int, +abbbb14 out int, +abbbb15 out int, +abbbb16 out int, +abbbb17 out int, +abbbb18 out int, +abbbb19 out int, +abbbb20 out int, +abbbb21 out int, +abbbb22 out int, +abbbb23 out int, +abbbb24 out int, +abbbb25 out int, +abbbb26 out int, +abbbb27 out int, +abbbb28 out int, +abbbb29 out int, +abbbb30 out int, +abbbb31 out int, +abbbb32 out int, +abbbb33 out int, +abbbb34 out int, +abbbb35 out int, +abbbb36 out int, +abbbb37 out int, +abbbb38 out int, +abbbb39 out int, +abbbb40 out int, +abbbb41 out int, +abbbb42 out int, +abbbb43 out int, +abbbb44 out int, +abbbb45 out int, +abbbb46 out int, +abbbb47 out int, +abbbb48 out int, +abbbb49 out int, +abbbb50 out int, +abbbb51 out int, +abbbb52 out int, +abbbb53 out int, +abbbb54 out int, +abbbb55 out int, +abbbb56 out int, +abbbb57 out int, +abbbb58 out int, +abbbb59 out int, +abbbb60 out int, +abbbb61 out int, +abbbb62 out int, +abbbb63 out int, +abbbb64 out int, +abbbb65 out int, +abbbb66 out int, +abbbb67 out int, +abbbb68 out int, +abbbb69 out int, +abbbb70 out int, +abbbb71 out int, +abbbb72 out int, +abbbb73 out int, +abbbb74 out int, +abbbb75 out int, +abbbb76 out int, +abbbb77 out int, +abbbb78 out int, +abbbb79 out int, +abbbb80 out int, +abbbb81 out int, +abbbb82 out int, +abbbb83 out int, +abbbb84 out int, +abbbb85 out int, +abbbb86 out int, +abbbb87 out int, +abbbb88 out int, +abbbb89 out int, +abbbb90 out int, +abbbb91 out int, +abbbb92 out int, +abbbb93 out int, +abbbb94 out int, +abbbb95 out int, +abbbb96 out int, +abbbb97 out int, +abbbb98 out int, +abbbb99 out int, +abbbb100 out int, +aabbbb1 out int, +aabbbb2 out int, +aabbbb3 out int, +aabbbb4 out int, +aabbbb5 out int, +aabbbb6 out int, +aabbbb7 out int, +aabbbb8 out int, +aabbbb9 out int, +aabbbb10 out int, +aabbbb11 out int, +aabbbb12 out int, +aabbbb13 out int, +aabbbb14 out int, +aabbbb15 out int, +aabbbb16 out int, +aabbbb17 out int, +aabbbb18 out int, +aabbbb19 out int, +aabbbb20 out int, +aabbbb21 out int, +aabbbb22 out int, +aabbbb23 out int, +aabbbb24 out int, +aabbbb25 out int, +aabbbb26 out int, +aabbbb27 out int, +aabbbb28 out int, +aabbbb29 out int, +aabbbb30 out int, +aabbbb31 out int, +aabbbb32 out int, +aabbbb33 out int, +aabbbb34 out int, +aabbbb35 out int, +aabbbb36 out int, +aabbbb37 out int, +aabbbb38 out int, +aabbbb39 out int, +aabbbb40 out int, +aabbbb41 out int, +aabbbb42 out int, +aabbbb43 out int, +aabbbb44 out int, +aabbbb45 out int, +aabbbb46 out int, +aabbbb47 out int, +aabbbb48 out int, +aabbbb49 out int, +aabbbb50 out int, +aabbbb51 out int, +aabbbb52 out int, +aabbbb53 out int, +aabbbb54 out int, +aabbbb55 out int, +aabbbb56 out int, +aabbbb57 out int, +aabbbb58 out int, +aabbbb59 out int, +aabbbb60 out int, +aabbbb61 out int, +aabbbb62 out int, +aabbbb63 out int, +aabbbb64 out int, +aabbbb65 out int, +aabbbb66 out int, +aabbbb67 out int, +aabbbb68 out int, +aabbbb69 out int, +aabbbb70 out int, +aabbbb71 out int, +aabbbb72 out int, +aabbbb73 out int, +aabbbb74 out int, +aabbbb75 out int, +aabbbb76 out int, +aabbbb77 out int, +aabbbb78 out int, +aabbbb79 out int, +aabbbb80 out int, +aabbbb81 out int, +aabbbb82 out int, +aabbbb83 out int, +aabbbb84 out int, +aabbbb85 out int, +aabbbb86 out int, +aabbbb87 out int, +aabbbb88 out int, +aabbbb89 out int, +aabbbb90 out int, +aabbbb91 out int, +aabbbb92 out int, +aabbbb93 out int, +aabbbb94 out int, +aabbbb95 out int, +aabbbb96 out int, +aabbbb97 out int, +aabbbb98 out int, +aabbbb99 out int, +aabbbb100 out int +); + +end a; +/ diff --git a/src/test/regress/sql/tpch_vector_optimal.sql b/src/test/regress/sql/tpch_vector_optimal.sql new file mode 100644 index 000000000..e5748fa42 --- /dev/null +++ b/src/test/regress/sql/tpch_vector_optimal.sql @@ -0,0 +1,679 @@ +set try_vector_engine_strategy=optimal; + +explain (costs off) +select + l_returnflag, + l_linestatus, + sum(l_quantity) as sum_qty, + sum(l_extendedprice) as sum_base_price, + sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, + sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, + avg(l_quantity) as avg_qty, + avg(l_extendedprice) as avg_price, + avg(l_discount) as avg_disc, + count(*) as count_order +from + lineitem +where + l_shipdate <= date '1998-12-01' - interval '3 day' +group by + l_returnflag, + l_linestatus +order by + l_returnflag, + l_linestatus +; + +explain (costs off) +select + s_acctbal, + s_name, + n_name, + p_partkey, + p_mfgr, + s_address, + s_phone, + s_comment +from + part, + supplier, + partsupp, + nation, + region +where + p_partkey = ps_partkey + and s_suppkey = ps_suppkey + and p_size = 15 + and p_type like 'SMALL%' + and s_nationkey = n_nationkey + and n_regionkey = r_regionkey + and r_name = 'EUROPE ' + and ps_supplycost = ( + select + min(ps_supplycost) + from + partsupp, + supplier, + nation, + region + where + p_partkey = ps_partkey + and s_suppkey = ps_suppkey + and s_nationkey = n_nationkey + and n_regionkey = r_regionkey + and r_name = 'EUROPE ' + ) +order by + s_acctbal desc, + n_name, + s_name, + p_partkey +limit 100 +; + +explain (costs off) +select + l_orderkey, + sum(l_extendedprice * (1 - l_discount)) as revenue, + o_orderdate, + o_shippriority +from + customer, + orders, + lineitem +where + c_mktsegment = 'BUILDING' + and c_custkey = o_custkey + and l_orderkey = o_orderkey + and o_orderdate < '1995-03-15'::date + and l_shipdate > '1995-03-15'::date +group by + l_orderkey, + o_orderdate, + o_shippriority +order by + revenue desc, + o_orderdate +limit 10 +; + +explain (costs off) +select + o_orderpriority, + count(*) as order_count +from + orders +where + o_orderdate >= '1993-07-01'::date + and o_orderdate < '1993-07-01'::date + interval '3 month' + and exists ( + select + * + from + lineitem + where + l_orderkey = o_orderkey + and l_commitdate < l_receiptdate + ) +group by + o_orderpriority +order by + o_orderpriority; + +explain (costs off) +select + n_name, + sum(l_extendedprice * (1 - l_discount)) as revenue +from + customer, + orders, + lineitem, + supplier, + nation, + region +where + c_custkey = o_custkey + and l_orderkey = o_orderkey + and l_suppkey = s_suppkey + and c_nationkey = s_nationkey + and s_nationkey = n_nationkey + and n_regionkey = r_regionkey + and r_name = 'ASIA' + and o_orderdate >= '1994-01-01'::date + and o_orderdate < '1994-01-01'::date + interval '1 year' +group by + n_name +order by + revenue desc; + +explain (costs off) +select + sum(l_extendedprice * l_discount) as revenue +from + lineitem +where + l_shipdate >= '1994-01-01'::date + and l_shipdate < '1994-01-01'::date + interval '1 year' + and l_discount >= 0.06 - 0.01 + and l_discount <= 0.06 + 0.01 + and l_quantity < 24; + +explain (costs off) +select + supp_nation, + cust_nation, + l_year, + sum(volume) as revenue +from + ( + select + n1.n_name as supp_nation, + n2.n_name as cust_nation, + extract(year from l_shipdate) as l_year, + l_extendedprice * (1 - l_discount) as volume + from + supplier, + lineitem, + orders, + customer, + nation n1, + nation n2 + where + s_suppkey = l_suppkey + and o_orderkey = l_orderkey + and c_custkey = o_custkey + and s_nationkey = n1.n_nationkey + and c_nationkey = n2.n_nationkey + and ( + (n1.n_name = 'FRANCE ' and n2.n_name = 'GERMANY') + or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE') + ) + and l_shipdate >= date '1995-01-01' + and l_shipdate <= date '1996-12-31' + ) as shipping +group by + supp_nation, + cust_nation, + l_year +order by + supp_nation, + cust_nation, + l_year; + +explain (costs off) +select + o_year, + sum(case + when nation = 'BRAZIL ' then volume + else 0 + end) / sum(volume) as mkt_share +from + ( + select + extract(year from o_orderdate) as o_year, + l_extendedprice * (1 - l_discount) as volume, + n2.n_name as nation + from + part, + supplier, + lineitem, + orders, + customer, + nation n1, + nation n2, + region + where + p_partkey = l_partkey + and s_suppkey = l_suppkey + and l_orderkey = o_orderkey + and o_custkey = c_custkey + and c_nationkey = n1.n_nationkey + and n1.n_regionkey = r_regionkey + and r_name = 'AMERICA' + and s_nationkey = n2.n_nationkey + and o_orderdate >= date '1995-01-01' + and o_orderdate <= date '1996-12-31' + and p_type = 'ECONOMY ANODIZED STEEL' + ) as all_nations +group by + o_year +order by + o_year; + +explain (costs off) +select + nation, + o_year, + sum(amount) as sum_profit +from + ( + select + n_name as nation, + o_orderdate as o_year, + l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount + from + part, + supplier, + lineitem, + partsupp, + orders, + nation + where + s_suppkey = l_suppkey + and ps_suppkey = l_suppkey + and ps_partkey = l_partkey + and p_partkey = l_partkey + and o_orderkey = l_orderkey + and s_nationkey = n_nationkey + and p_name like '%green%' + ) as profit +group by + nation, + o_year +order by + nation, + o_year desc; + +explain (costs off) +select + c_custkey, + c_name, + sum(l_extendedprice * (1 - l_discount)) as revenue, + c_acctbal, + n_name, + c_address, + c_phone, + c_comment +from + customer, + orders, + lineitem, + nation +where + c_custkey = o_custkey + and l_orderkey = o_orderkey + and o_orderdate >= date '1993-10-01' + and o_orderdate < date '1993-10-01' + interval '3 month' + and l_returnflag = 'R' + and c_nationkey = n_nationkey +group by + c_custkey, + c_name, + c_acctbal, + c_phone, + n_name, + c_address, + c_comment +order by + revenue desc +limit 20 +; + +explain (costs off) +select + ps_partkey, + sum(ps_supplycost * ps_availqty) as value +from + partsupp, + supplier, + nation +where + ps_suppkey = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'GERMANY' +group by + ps_partkey having + sum(ps_supplycost * ps_availqty) > ( + select + sum(ps_supplycost * ps_availqty) * 0.0001 + from + partsupp, + supplier, + nation + where + ps_suppkey = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'GERMANY' + ) +order by + value desc; + +explain (costs off) +select + l_shipmode, + sum(case + when o_orderpriority = '1-URGENT' + or o_orderpriority = '2-HIGH' + then 1 + else 0 + end) as high_line_count, + sum(case + when o_orderpriority <> '1-URGENT' + and o_orderpriority <> '2-HIGH' + then 1 + else 0 + end) as low_line_count +from + orders, + lineitem +where + o_orderkey = l_orderkey + and l_shipmode IN ('MAIL ', 'SHIP ') + and l_commitdate < l_receiptdate + and l_shipdate < l_commitdate + and l_receiptdate >= date '1994-01-01' + and l_receiptdate < date '1994-01-01' + interval '1 year' +group by + l_shipmode +order by + l_shipmode; + +explain (costs off) +select + c_count, + count(*) as custdist +from + ( + select + c_custkey, + count(o_orderkey) + from + customer left outer join orders on + c_custkey = o_custkey + and o_comment not like '%special%request%' + group by + c_custkey + ) as c_orders (c_custkey, c_count) +group by + c_count +order by + custdist desc, + c_count desc; + +explain (costs off) +select + 100.00 * sum(case + when p_type like 'PROMO%' + then l_extendedprice * (1 - l_discount) + else 0 + end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue +from + lineitem, + part +where + l_partkey = p_partkey + and l_shipdate >= date '1995-09-01' + and l_shipdate < date '1995-09-01' + interval '1 month' +; + +explain (costs off) +with revenue (supplier_no, total_revenue) as +( + select + l_suppkey, + sum(l_extendedprice * (1 - l_discount)) + from + lineitem + where + l_shipdate >= date '1996-01-01' + and l_shipdate < date '1996-01-01' + interval '3 month' + group by + l_suppkey +) +select + s_suppkey, + s_name, + s_address, + s_phone, + total_revenue +from + supplier, + revenue +where + s_suppkey = supplier_no +order by + s_suppkey +; + +explain (costs off) +select + p_brand, + p_type, + p_size, + count(ps_suppkey) as supplier_cnt +from + partsupp, + part +where + p_partkey = ps_partkey + and p_brand <> 'Brand#45' + and p_type not like 'MEDIUM POLISHED%' + and p_size in (49, 14, 23, 45, 19, 3, 36, 9) + and ps_suppkey not in ( + select + s_suppkey + from + supplier + where s_comment like '%Customer%Complaints%' + ) +group by + p_brand, + p_type, + p_size +order by + supplier_cnt desc, + p_brand, + p_type, + p_size +limit 100 +; + +explain (costs off) +select + sum(l_extendedprice) / 7.0 as avg_yearly +from + lineitem, + part +where + p_partkey = l_partkey + and p_brand = 'Brand#23' + and p_container = 'MED BOX' + and l_quantity < ( + select + 0.2 * avg(l_quantity) + from + lineitem + where + l_partkey = p_partkey + ) +; + +explain (costs off) +select + c_name, + c_custkey, + o_orderkey, + o_orderdate, + o_totalprice, + sum(l_quantity) +from + customer, + orders, + lineitem +where + o_orderkey in ( + select + l_orderkey + from + lineitem + group by + l_orderkey having + sum(l_quantity) > 300 + ) + and c_custkey = o_custkey + and o_orderkey = l_orderkey +group by + c_name, + c_custkey, + o_orderkey, + o_orderdate, + o_totalprice +order by + o_totalprice desc, + o_orderdate +limit 100; + +explain (costs off) +select + sum(l_extendedprice* (1 - l_discount)) as revenue +from + lineitem, + part +where + ( + p_partkey = l_partkey + and substring(p_brand, 1, 7) = 'Brand#1' + and p_container in ('SM CASE', 'SM BOX ', 'SM PACK', 'SM PKG ') + and l_quantity >= 1 and l_quantity <= 1 + 10 + and p_size between 1 and 5 + and l_shipmode in ('AIR ', 'AIR REG') + and substring(l_shipinstruct, 1, 7) = 'DELIVER' + ) + or + ( + p_partkey = l_partkey + and substring(p_brand, 1, 7) = 'Brand#2' + and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') + and l_quantity >= 10 and l_quantity <= 10 + 10 + and p_size between 1 and 10 + and l_shipmode in ('AIR ', 'AIR REG') + and substring(l_shipinstruct, 1, 7) = 'DELIVER' + ) + or + ( + p_partkey = l_partkey + and substring(p_brand, 1, 7) = 'Brand#3' + and p_container in ('LG CASE', 'LG BOX ', 'LG PACK', 'LG PKG ') + and l_quantity >= 20 and l_quantity <= 20 + 10 + and p_size between 1 and 15 + and l_shipmode in ('AIR ', 'AIR REG') + and substring(l_shipinstruct, 1, 7) = 'DELIVER' + ); + +explain (costs off) +select + s_name, + s_address +from + supplier, + nation +where + s_suppkey in ( + select + ps_suppkey + from + partsupp + where + ps_partkey in ( + select + p_partkey + from + part + ) + and ps_availqty > ( + select + 0.5 * sum(l_quantity) + from + lineitem + where + l_partkey = ps_partkey + and l_suppkey = ps_suppkey + and l_shipdate >= date '1994-01-01' + and l_shipdate < date '1994-01-01' + interval '1 year' + ) + ) + and s_nationkey = n_nationkey + and n_name = 'CANADA' +order by + s_name; + +explain (costs off) +select + s_name, + count(*) as numwait +from + supplier, + lineitem l1, + orders, + nation +where + s_suppkey = l1.l_suppkey + and o_orderkey = l1.l_orderkey + and o_orderstatus = 'F' + and l1.l_receiptdate > l1.l_commitdate + and exists ( + select + * + from + lineitem l2 + where + l2.l_orderkey = l1.l_orderkey + and l2.l_suppkey <> l1.l_suppkey + ) + and not exists ( + select + * + from + lineitem l3 + where + l3.l_orderkey = l1.l_orderkey + and l3.l_suppkey <> l1.l_suppkey + and l3.l_receiptdate > l3.l_commitdate + ) + and s_nationkey = n_nationkey + and n_name = 'SAUDI ARABIA' +group by + s_name +order by + numwait desc, + s_name +limit 100; + +explain (costs off) +select + cntrycode, + count(*) as numcust, + sum(c_acctbal) as totacctbal +from + ( + select + substring(c_phone from 1 for 2) as cntrycode, + c_acctbal + from + customer + where + substring(c_phone from 1 for 2) in + ('13', '31', '23', '29', '30', '18', '17') + and c_acctbal > ( + select + avg(c_acctbal) + from + customer + where + c_acctbal > 0.00 + and substring(c_phone from 1 for 2) in + ('13', '31', '23', '29', '30', '18', '17') + ) + and not exists ( + select + * + from + orders + where + o_custkey = c_custkey + ) + ) as custsale +group by + cntrycode +order by + cntrycode; diff --git a/src/test/regress/sql/trunc_func_for_date.sql b/src/test/regress/sql/trunc_func_for_date.sql new file mode 100644 index 000000000..37e525d15 --- /dev/null +++ b/src/test/regress/sql/trunc_func_for_date.sql @@ -0,0 +1,65 @@ +--- +--- data type 1 : timestamp +--- + +-- format can recognize +select trunc(timestamp '2021-08-11 20:19:39', 'cc'); -- century +select trunc(timestamp '2021-08-11 20:19:39', 'yyyy'); -- year +select trunc(timestamp '2021-08-11 20:19:39', 'q'); -- quarter +select trunc(timestamp '2021-08-11 20:19:39', 'mm'); -- month +select trunc(timestamp '2021-08-11 20:19:39', 'j'); -- day +select trunc(timestamp '2021-08-11 20:19:39', 'dd'); -- day +select trunc(timestamp '2021-08-11 20:19:39', 'ddd'); -- day +select trunc(timestamp '2021-08-11 20:19:39', 'hh'); -- hour +select trunc(timestamp '2021-08-11 20:19:39', 'mi'); -- minute + +-- format can not recognize +select trunc(timestamp '2021-08-11 20:19:39', 'qq'); -- quarter +select trunc(timestamp '2021-08-11 20:19:39', 'mmm'); -- month +select trunc(timestamp '2021-08-11 20:19:39', 'dddd'); -- day +select trunc(timestamp '2021-08-11 20:19:39', 'hhh'); -- hour + +--- +--- data type 2 : timestamptz +--- + +-- format can recognize +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'cc'); -- century +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'yyyy'); -- year +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'q'); -- quarter +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'mm'); -- month +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'j'); -- day +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'dd'); -- day +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'ddd'); -- day +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'hh'); -- hour +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'mi'); -- minute + +-- format can't recognize +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'qq'); -- quarter +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'mmm'); -- month +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'dddd'); -- day +select trunc(timestamptz '2021-08-12 08:48:26.366526+08', 'hhh'); -- hour + +--- +--- data type 3 : interval +--- + +-- format can recognize +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'cc'); -- century +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'yyyy'); -- year +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'q'); -- quarter +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'mm'); -- month +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'j'); -- day +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'dd'); -- day +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'ddd'); -- day +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'hh'); -- hour +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'mi'); -- minute + +-- format can not recognize +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'qq'); -- quarter +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'mmm'); -- month +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'dddd'); -- day +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'hhh'); -- hour + +-- not supported +select trunc(interval '2 years 3 months 4 days 5 hours 6 minutes 7 seconds', 'w'); -- week \ No newline at end of file diff --git a/src/test/regress/sql/truncate_gpi.sql b/src/test/regress/sql/truncate_gpi.sql new file mode 100644 index 000000000..a9e61ed26 --- /dev/null +++ b/src/test/regress/sql/truncate_gpi.sql @@ -0,0 +1,197 @@ +set datestyle = 'ISO, MDY'; + +create materialized view pg_partition_before_truncate as + select oid, relname, reloptions, parentid, boundaries + from pg_partition where parentid = ( + select oid from pg_class where relname like 'tg_%' + ); + +create view check_truncate_results as + select pg_class.relname tablename, + bef.relname relname, + bef.oid < aft.oid oid_changed, + bef.parentid = aft.parentid parentid_ok, + bef.boundaries = aft.boundaries boundaries_ok + from pg_partition_before_truncate bef, pg_partition aft, pg_class + where bef.relname = aft.relname + and bef.parentid = aft.parentid + and bef.parentid = pg_class.oid + order by bef.oid; + +-- range +create table tg_range(a date, b int) +partition by range(a) +( + partition p1 values less than ('2022-01-31 00:00:00'), + partition p2 values less than ('2022-02-28 00:00:00'), + partition p3 values less than ('2022-03-31 00:00:00') +); + +create index i_tg_range_global_b on tg_range(b) global; +create index i_tg_range_global_a_b on tg_range(a,b) global; +create index i_tg_range_local_a on tg_range(a) local; + +insert into tg_range select '2022-1-5'::date+n1*'1 month'::interval+10*n2*'1 day'::interval, 10*(n1+1)+(n2+1) from generate_series(0,2) t1(n1), generate_series(0,2) t2(n2); +refresh materialized view pg_partition_before_truncate; + +begin; +alter table tg_range truncate partition p1 update global index; +alter table tg_range truncate partition p2 update global index; + +select relname, reloptions, boundaries from pg_partition_before_truncate + where parentid = (select oid from pg_class where relname = 'tg_range') order by oid; +select relname, reloptions, boundaries from pg_partition + where parentid = (select oid from pg_class where relname = 'tg_range') order by oid; +select * from check_truncate_results where tablename = 'tg_range'; + +select * from tg_range; + +explain(costs off) select /*+ indexscan(tg_range i_tg_range_global_b) */ * from tg_range where b < 40; +select /*+ indexscan(tg_range i_tg_range_global_b) */ * from tg_range where b < 40; +explain(costs off) select /*+ indexscan(tg_range i_tg_range_local_a) */ * from tg_range where a < '2022-03-31 00:00:00'; +select /*+ indexscan(tg_range i_tg_range_local_a) */ * from tg_range where a < '2022-03-31 00:00:00'; + +rollback; + +select relname, reloptions, boundaries from pg_partition_before_truncate + where parentid = (select oid from pg_class where relname = 'tg_range') order by oid; +select relname, reloptions, boundaries from pg_partition + where parentid = (select oid from pg_class where relname = 'tg_range') order by oid; +select * from check_truncate_results where tablename = 'tg_range'; + +select * from tg_range; + +explain(costs off) select /*+ indexscan(tg_range i_tg_range_global_b) */ * from tg_range where b < 40; +select /*+ indexscan(tg_range i_tg_range_global_b) */ * from tg_range where b < 40; +explain(costs off) select /*+ indexscan(tg_range i_tg_range_local_a) */ * from tg_range where a < '2022-03-31 00:00:00'; +select /*+ indexscan(tg_range i_tg_range_local_a) */ * from tg_range where a < '2022-03-31 00:00:00'; + +drop table tg_range; + +-- range without gpi +create table tg_range_no_gpi(a date, b int) +partition by range(a) +( + partition p1 values less than ('2022-01-31 00:00:00'), + partition p2 values less than ('2022-02-28 00:00:00'), + partition p3 values less than ('2022-03-31 00:00:00') +); + +insert into tg_range_no_gpi select '2022-1-5'::date+n1*'1 month'::interval+10*n2*'1 day'::interval, 10*(n1+1)+(n2+1) from generate_series(0,2) t1(n1), generate_series(0,2) t2(n2); +refresh materialized view pg_partition_before_truncate; + +alter table tg_range_no_gpi truncate partition p1 update global index; +alter table tg_range_no_gpi truncate partition p2 update global index; + +select relname, reloptions, boundaries from pg_partition_before_truncate + where parentid = (select oid from pg_class where relname = 'tg_range_no_gpi') order by oid; +select relname, reloptions, boundaries from pg_partition + where parentid = (select oid from pg_class where relname = 'tg_range_no_gpi') order by oid; +select * from check_truncate_results where tablename = 'tg_range_no_gpi'; + +select * from tg_range_no_gpi; + +drop table tg_range_no_gpi; +-- list +create table tg_list(a int, b int) +partition by list(a) +( + partition p1 values (0,3,6), + partition p2 values (1,4,7), + partition p3 values (default) +); + +create index i_tg_list_global_b on tg_list(b) global; +create index i_tg_list_global_a_b on tg_list(a,b) global; +create index i_tg_list_local_a on tg_list(a) local; + +insert into tg_list select a,b from generate_series(0,8) t1(a), generate_series(0,8) t2(b); +refresh materialized view pg_partition_before_truncate; + +begin; +alter table tg_list truncate partition p1 update global index; +alter table tg_list truncate partition p2 update global index; +alter table tg_list truncate partition p3 update global index; + +select relname, reloptions, boundaries from pg_partition_before_truncate + where parentid = (select oid from pg_class where relname = 'tg_list') order by oid; +select relname, reloptions, boundaries from pg_partition + where parentid = (select oid from pg_class where relname = 'tg_list') order by oid; +select * from check_truncate_results where tablename = 'tg_list'; + +select * from tg_list; + +explain(costs off) select /*+ indexscan(tg_list i_tg_list_global_b) */ * from tg_list where b < 9; +select /*+ indexscan(tg_list i_tg_list_global_b) */ * from tg_list where b < 9; +explain(costs off) select /*+ indexscan(tg_list i_tg_list_local_a) */ * from tg_list where a < 9; +select /*+ indexscan(tg_list i_tg_list_local_a) */ * from tg_list where a << 9; + +rollback; + +select relname, reloptions, boundaries from pg_partition_before_truncate + where parentid = (select oid from pg_class where relname = 'tg_list') order by oid; +select relname, reloptions, boundaries from pg_partition + where parentid = (select oid from pg_class where relname = 'tg_list') order by oid; +select * from check_truncate_results where tablename = 'tg_list'; + +select * from tg_list; + +explain(costs off) select /*+ indexscan(tg_list i_tg_list_global_b) */ * from tg_list where b < 9; +select /*+ indexscan(tg_list i_tg_list_global_b) */ * from tg_list where b < 9; +explain(costs off) select /*+ indexscan(tg_list i_tg_list_local_a) */ * from tg_list where a < 9; +select /*+ indexscan(tg_list i_tg_list_local_a) */ * from tg_list where a < 9; + +drop table tg_list; + +-- hash +create table tg_hash(a int, b int) +partition by hash(a) +( + partition p1, + partition p2, + partition p3 +); + +create index i_tg_hash_global_b on tg_hash(b) global; +create index i_tg_hash_global_a_b on tg_hash(a,b) global; +create index i_tg_hash_local_a on tg_hash(a) local; + +insert into tg_hash select a,b from generate_series(0,8) t1(a), generate_series(0,8) t2(b); +refresh materialized view pg_partition_before_truncate; + +begin; +alter table tg_hash truncate partition p1 update global index; +alter table tg_hash truncate partition p2 update global index; + +select relname, reloptions, boundaries from pg_partition_before_truncate + where parentid = (select oid from pg_class where relname = 'tg_hash') order by oid; +select relname, reloptions, boundaries from pg_partition + where parentid = (select oid from pg_class where relname = 'tg_hash') order by oid; +select * from check_truncate_results where tablename = 'tg_hash'; + +select * from tg_hash; + +explain(costs off) select /*+ indexscan(tg_hash i_tg_hash_global_b) */ * from tg_hash where b < 9; +select /*+ indexscan(tg_hash i_tg_hash_global_b) */ * from tg_hash where b < 9; +explain(costs off) select /*+ indexscan(tg_hash i_tg_hash_local_a) */ * from tg_hash where a < 9; +select /*+ indexscan(tg_hash i_tg_hash_local_a) */ * from tg_hash where a << 9; + +rollback; + +select relname, reloptions, boundaries from pg_partition_before_truncate + where parentid = (select oid from pg_class where relname = 'tg_hash') order by oid; +select relname, reloptions, boundaries from pg_partition + where parentid = (select oid from pg_class where relname = 'tg_hash') order by oid; +select * from check_truncate_results where tablename = 'tg_hash'; + +select * from tg_hash; + +explain(costs off) select /*+ indexscan(tg_hash i_tg_hash_global_b) */ * from tg_hash where b < 9; +select /*+ indexscan(tg_hash i_tg_hash_global_b) */ * from tg_hash where b < 9; +explain(costs off) select /*+ indexscan(tg_hash i_tg_hash_local_a) */ * from tg_hash where a < 9; +select /*+ indexscan(tg_hash i_tg_hash_local_a) */ * from tg_hash where a < 9; + +drop table tg_hash; +-- 清理过程 +drop view check_truncate_results; +drop materialized view pg_partition_before_truncate; \ No newline at end of file diff --git a/src/test/regress/sql/updatable_views.sql b/src/test/regress/sql/updatable_views.sql new file mode 100644 index 000000000..84cfba786 --- /dev/null +++ b/src/test/regress/sql/updatable_views.sql @@ -0,0 +1,81 @@ +CREATE USER regress_view_user1 PASSWORD 'Gauss@123'; +CREATE USER regress_view_user2 PASSWORD 'Gauss@123'; + +-- nested-view permissions check +CREATE TABLE base_tbl(a int, b text, c float); +INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0); + +SET SESSION AUTHORIZATION regress_view_user1 PASSWORD 'Gauss@123'; +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl; +GRANT ALL ON SCHEMA regress_view_user1 TO regress_view_user2; +SELECT * FROM rw_view1; -- not allowed +SELECT * FROM rw_view1 FOR UPDATE; -- not allowed +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- not allowed + +SET SESSION AUTHORIZATION regress_view_user2 PASSWORD 'Gauss@123'; +CREATE VIEW rw_view2 AS SELECT * FROM regress_view_user1.rw_view1; +SELECT * FROM rw_view2; -- not allowed +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed + +RESET SESSION AUTHORIZATION; +GRANT SELECT ON base_tbl TO regress_view_user1; + +SET SESSION AUTHORIZATION regress_view_user1 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view1; +SELECT * FROM rw_view1 FOR UPDATE; -- not allowed +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- unlike pgsql, we do not support updating views + +SET SESSION AUTHORIZATION regress_view_user2 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view2; -- not allowed +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed + +SET SESSION AUTHORIZATION regress_view_user1 PASSWORD 'Gauss@123'; +GRANT SELECT ON rw_view1 TO regress_view_user2; + +SET SESSION AUTHORIZATION regress_view_user2 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view2; +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- unlike pgsql, we do not support updating views + +RESET SESSION AUTHORIZATION; +GRANT UPDATE ON base_tbl TO regress_view_user1; + +SET SESSION AUTHORIZATION regress_view_user1 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view1; +SELECT * FROM rw_view1 FOR UPDATE; +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- unlike pgsql, we do not support updating views + +SET SESSION AUTHORIZATION regress_view_user2 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view2; +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- unlike pgsql, we do not support updating views + +SET SESSION AUTHORIZATION regress_view_user1 PASSWORD 'Gauss@123'; +GRANT UPDATE ON rw_view1 TO regress_view_user2; + +SET SESSION AUTHORIZATION regress_view_user2 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view2; +SELECT * FROM rw_view2 FOR UPDATE; +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- unlike pgsql, we do not support updating views + +RESET SESSION AUTHORIZATION; +REVOKE UPDATE ON base_tbl FROM regress_view_user1; + +SET SESSION AUTHORIZATION regress_view_user1 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view1; +SELECT * FROM rw_view1 FOR UPDATE; -- not allowed +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- unlike pgsql, we do not support updating views + +SET SESSION AUTHORIZATION regress_view_user2 PASSWORD 'Gauss@123'; +SELECT * FROM rw_view2; +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- unlike pgsql, we do not support updating views + +RESET SESSION AUTHORIZATION; + +DROP TABLE base_tbl CASCADE; + +DROP USER regress_view_user1; +DROP USER regress_view_user2; \ No newline at end of file diff --git a/src/test/regress/sql/update_for_wait_s1.sql b/src/test/regress/sql/update_for_wait_s1.sql new file mode 100644 index 000000000..b989d4f09 --- /dev/null +++ b/src/test/regress/sql/update_for_wait_s1.sql @@ -0,0 +1,79 @@ +create table hw_t1 (id1 int, id2 int, num int); +insert into hw_t1 values (1,11,11), (2,21,21), (3,31,31), (4,41,41), (5,51,51); + +/*----------------test1 Locking succeeded. */ +select current_time; +begin; +select * from hw_t1 where id1 = 3 for update; +select pg_sleep(2); +--time delay +end; +select current_time; +select pg_sleep(1);--wait session2 + +/*----------------test2 Locking failed. */ +select current_time; +begin; +select * from hw_t1 where id1 = 3 for update; +select pg_sleep(4); +--time delay +end; +select current_time; +create table t1(val int, val2 int); +insert into t1 values(1,11),(2,11); insert into t1 values(1,11),(2,11); +insert into t1 values(3,11),(4,11); insert into t1 values(5,11),(6,11); + +/*----------------test3 Locking succeeded. */ +select current_time; +begin; +select * from (select * from t1 for update of t1 nowait) as foo; +--time delay +select pg_sleep(2); +--time delay +end; +select current_time; +select pg_sleep(1);--wait session2 + +/*----------------test4 Locking failed. */ +select current_time; +begin; +select * from (select * from t1 for update of t1 nowait) as foo; +--time delay +select pg_sleep(4); +--time delay +end; +select current_time; +/*----------------test5 Locking update. */ +select current_time; +begin; +update hw_t1 set num=666 where id1 = 2; +select pg_sleep(4); +--time 4 +end; +select current_time; +/*----------------test5_1 Locking update. */ +select current_time; +begin; +update hw_t1 set num=666; +select pg_sleep(4); +--time 4 +end; +select current_time; +/*----------------test6 Locking delete. */ +select current_time; +begin; +delete hw_t1 where id1 = 3; +select pg_sleep(4); +--time 4 +end; +select current_time; +/*----------------test6_1 Locking delete. */ +select current_time; +begin; +delete from hw_t1; +select pg_sleep(4); +--time 4 +end; +select current_time; +drop table hw_t1; +drop table t1; \ No newline at end of file diff --git a/src/test/regress/sql/update_for_wait_s2.sql b/src/test/regress/sql/update_for_wait_s2.sql new file mode 100644 index 000000000..4635c25c0 --- /dev/null +++ b/src/test/regress/sql/update_for_wait_s2.sql @@ -0,0 +1,49 @@ +\timing on +select pg_sleep(1); +/*----------------test1 Locking succeeded. */ +select current_time; +select * from hw_t1 where id1 = 3 for update WAIT 10; +select current_time; +--time delay +select pg_sleep(2);--wait session1 +--time delay +/*----------------test2 Locking failed. */ +select current_time; +select * from hw_t1 where id1 = 3 for update WAIT 2; +select current_time; +--time delay +select pg_sleep(2);--wait session1 +--time delay +/*----------------test3 Locking succeeded. */ +select current_time; +select * from (select * from t1 for update of t1 wait 10) as foo; +select current_time; +--time delay +select pg_sleep(2); +--time delay +/*----------------test4 Locking failed. */ +select current_time; +select * from (select * from t1 for update of t1 wait 2) as foo; +select current_time; +--time delay +select pg_sleep(2); +/*----------------test5 Locking update. */ +select current_time; +select * from hw_t1 where id1 = 2 for update wait 2; +select current_time; +select pg_sleep(2); +/*----------------test5_1 Locking update. */ +select current_time; +select * from hw_t1 where id1 = 2 for update wait 2; +select current_time; +/*----------------test6 Locking delete. */ +select pg_sleep(2); +select current_time; +select * from hw_t1 where id1 = 3 for update WAIT 2; +select current_time; +select pg_sleep(2); +/*----------------test6_1 Locking delete. */ +select current_time; +select * from hw_t1 where id1 = 2 for update WAIT 2; +select current_time; +\timing off \ No newline at end of file diff --git a/src/test/regress/sql/uppercase_attribute_name.sql b/src/test/regress/sql/uppercase_attribute_name.sql new file mode 100644 index 000000000..266883150 --- /dev/null +++ b/src/test/regress/sql/uppercase_attribute_name.sql @@ -0,0 +1,118 @@ +drop table if exists test_tb; +create table test_tb(col1 int, Col_2 int, "col_第三列" int, "CoL_Four" int); + +insert into test_tb values(1, 1, 1, 1); +insert into test_tb values(2, 2, 2, 2); +insert into test_tb values(3, 3, 3, 3); + +select * from test_tb order by col1; + +set uppercase_attribute_name=true; + +-- During \d(+) one table, PQfnumber() is matched according to the lowercase column name. +-- So we need to restrict uppercase_attribute_name to not take effect in \d(+). +\d+ pg_class + +select * from test_tb order by col1; + +reset uppercase_attribute_name; + +drop table test_tb; + +-- utf8 encoding +create database utf8 encoding='utf8' LC_COLLATE='en_US.UTF-8' LC_CTYPE ='en_US.UTF-8' TEMPLATE=template0 dbcompatibility='A'; +\c utf8 +set client_encoding=utf8; + +drop table if exists test_tb; +create table test_tb(col1 int, Col_2 int, "col_第三列" int, "CoL_Four" int); + +insert into test_tb values(1, 1, 1, 1); +insert into test_tb values(2, 2, 2, 2); +insert into test_tb values(3, 3, 3, 3); + +select * from test_tb order by col1; + +set uppercase_attribute_name=true; + +select * from test_tb order by col1; + +reset uppercase_attribute_name; + +drop table test_tb; + +-- gbk encoding +create database gbk encoding='gbk' LC_COLLATE='zh_CN.GBK' LC_CTYPE ='zh_CN.GBK' TEMPLATE=template0 dbcompatibility='A'; +\c gbk +set client_encoding=utf8; + +drop table if exists test_tb; +create table test_tb(col1 int, Col_2 int, "col_第三列" int, "CoL_Four" int); + +insert into test_tb values(1, 1, 1, 1); +insert into test_tb values(2, 2, 2, 2); +insert into test_tb values(3, 3, 3, 3); + +select * from test_tb order by col1; + +set uppercase_attribute_name=true; + +select * from test_tb order by col1; + +reset uppercase_attribute_name; + +drop table test_tb; + +-- gb18030 encoding +create database gb18030 encoding='gb18030' LC_COLLATE='zh_CN.GB18030' LC_CTYPE ='zh_CN.GB18030' TEMPLATE=template0 dbcompatibility='A'; +\c gb18030 +set client_encoding=utf8; + +drop table if exists test_tb; +create table test_tb(col1 int, Col_2 int, "col_第三列" int, "CoL_Four" int); + +insert into test_tb values(1, 1, 1, 1); +insert into test_tb values(2, 2, 2, 2); +insert into test_tb values(3, 3, 3, 3); + +select * from test_tb order by col1; + +set uppercase_attribute_name=true; + +select * from test_tb order by col1; + +reset uppercase_attribute_name; + +drop table test_tb; + +-- 'B' dbcompatibility +create database b_dbcompatibility TEMPLATE=template0 dbcompatibility='B'; +\c b_dbcompatibility +set client_encoding=utf8; + +drop table if exists test_tb; +create table test_tb(col1 int, Col_2 int, "col_第三列" int, "CoL_Four" int); + +insert into test_tb values(1, 1, 1, 1); +insert into test_tb values(2, 2, 2, 2); +insert into test_tb values(3, 3, 3, 3); + +select * from test_tb order by col1; + +set uppercase_attribute_name=true; + +select * from test_tb order by col1; + +reset uppercase_attribute_name; + +drop table test_tb; + +\c regression +clean connection to all force for database utf8; +clean connection to all force for database gbk; +clean connection to all force for database gb18030; +clean connection to all force for database b_dbcompatibility; +drop database utf8; +drop database gbk; +drop database gb18030; +drop database b_dbcompatibility; \ No newline at end of file diff --git a/src/test/regress/sql/upsert_grammer_test_01.sql b/src/test/regress/sql/upsert_grammer_test_01.sql index b0f4727ab..22d441682 100644 --- a/src/test/regress/sql/upsert_grammer_test_01.sql +++ b/src/test/regress/sql/upsert_grammer_test_01.sql @@ -51,6 +51,16 @@ INSERT INTO t_grammer (c1, c3, c4.a) VALUES(84, '{81, 82, 83}', 850) ON DUPLICAT INSERT INTO t_grammer VALUES(91, 1, '{91, 92, 93}', ROW(94, 95)) ON DUPLICATE KEY UPDATE NOTHING; INSERT INTO t_grammer (c5, c1, c2, c4) VALUES('{91,92}', 92, DEFAULT, ROW(910, 920)) ON DUPLICATE KEY UPDATE NOTHING; +-- support INSERT with assigned alias +INSERT INTO t_grammer AS T1 VALUES(991, 1); +INSERT INTO t_grammer T2 VALUES(992, 1); +INSERT INTO t_grammer * AS T1 VALUES(993, 1); +INSERT INTO t_grammer AS T1 VALUES(991, 2) ON DUPLICATE KEY UPDATE T1.C2 = EXCLUDED.C2 + 1; +INSERT INTO t_grammer T2 VALUES(992, 3) ON DUPLICATE KEY UPDATE T2.C2 = EXCLUDED.C2 + 1 WHERE T2.C1 > 100; +INSERT INTO t_grammer * AS T1 VALUES(993, 4) ON DUPLICATE KEY UPDATE NOTHING; +-- excluded is reserved keyword +INSERT INTO t_grammer * AS excluded VALUES(993, 4) ON DUPLICATE KEY UPDATE NOTHING; + -- UPDATE target: unsupport with schema but support with tablename INSERT INTO t_grammer VALUES(0, 0, '{0,0}', ROW(0, 0), '{107, 108}') ON DUPLICATE KEY UPDATE upsert_test.t_grammer.c2 = c2 * 10, upsert_test.t_grammer.c3[1:2] = c5[1:2], upsert_test.t_grammer.c4.a = c1; diff --git a/src/test/regress/sql/upsert_restriction.sql b/src/test/regress/sql/upsert_restriction.sql index cec6a356c..7c5c1012d 100644 --- a/src/test/regress/sql/upsert_restriction.sql +++ b/src/test/regress/sql/upsert_restriction.sql @@ -83,10 +83,9 @@ insert into up_neg_05 values(1,1,1,1,1) on duplicate key update c2 = 1; insert into up_neg_05 values(1,1,1,1,1) on duplicate key update c3 = 2; insert into up_neg_05 values(1,1,1,1,1) on duplicate key update c1 =1, c2 = 1, c3=2, c4=1,c5=1; ----- where clause -insert into up_neg_05 values(1,1,1,1,1) on duplicate key update c4 = 1 where c1=1; ---- from clause insert into up_neg_05 values(1,1,1,1,1) on duplicate key update c4 = 1 from up_neg_04 where c1=1; + ---- update partition key insert into up_neg_07 values(101, 1, 300) on duplicate key update c3=101; insert into up_neg_08 values(101, 1, 300) on duplicate key update c3=101; diff --git a/src/test/regress/sql/upsert_where.sql b/src/test/regress/sql/upsert_where.sql new file mode 100644 index 000000000..a677ae697 --- /dev/null +++ b/src/test/regress/sql/upsert_where.sql @@ -0,0 +1,134 @@ +---- +-- setup +---- +CREATE SCHEMA schema_upsert_where; +SET search_path = schema_upsert_where; + +---- +-- basic syntax +---- +CREATE TABLE tab_target1(c1 int unique, c2 int); +INSERT INTO tab_target1 VALUES(generate_series(1,10), generate_series(1,10)); + +CREATE TABLE tab_target2(c1 int unique, c2 int unique, c3 int); +INSERT INTO tab_target2 VALUES(generate_series(1,10), generate_series(1,10), generate_series(1,10)); + +CREATE TABLE tab_target3(c1 int, c2 int, c3 int, unique(c1, c2)) ; +INSERT INTO tab_target3 VALUES(generate_series(1,10), generate_series(1,10), generate_series(1,10)); + +CREATE TABLE tab_source(c1 int, c2 int, c3 int, c4 int); +INSERT INTO tab_source VALUES(generate_series(1,10), generate_series(10,1, -1), generate_series(1,10), generate_series(1,10)); + +begin; +-- no conflict where clause is ignored (of course) +INSERT INTO tab_target1 VALUES(0,0) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE false; +INSERT INTO tab_target1 VALUES(1,2) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 = 1; +SELECT * FROM tab_target1 ORDER BY 1,2; +INSERT INTO tab_target1 VALUES(1,3) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 + excluded.c2 = 5; +SELECT * FROM tab_target1 ORDER BY 1,2; + +-- multiple values +INSERT INTO tab_target1 VALUES(2,3),(3,4) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 = 2; +SELECT * FROM tab_target1 ORDER BY 1,2; +INSERT INTO tab_target1 VALUES(3,4),(3,5),(3,6),(3,7),(3,8),(3,9) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 = 3; +SELECT * FROM tab_target1 ORDER BY 1,2; + +-- from source table +INSERT INTO tab_target1 (SELECT c1, c2 FROM tab_source) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c1 > 5; +SELECT * FROM tab_target1 ORDER BY 1,2; + +-- multiple confliction - where clause will not affect target row of upsert +INSERT INTO tab_target2 VALUES(1,2,10) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c1 = 1; +INSERT INTO tab_target2 VALUES(2,3,30) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c1 != 2; +SELECT * FROM tab_target2 ORDER BY 1,2,3; + +-- multi-column unique constraint with coercion +INSERT INTO tab_target3 VALUES(1,1,10) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c1 < 5; +INSERT INTO tab_target3 VALUES(2,2,20) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c2 between 1 and 3; +INSERT INTO tab_target3 VALUES(3,3,30) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c2 = 3 and (c3 < 30 or c1 = 2) and (not (c1 between 20 and 30)); +SELECT * FROM tab_target3 ORDER BY 1,2,3; + +-- test pbe +PREPARE P1 AS INSERT INTO tab_target3 VALUES($1,$2,$3) ON DUPLICATE KEY UPDATE c3 = excluded.c3 + $4 WHERE c1 < $5; +EXECUTE P1(4, 4, 40, 4, 4); +SELECT * FROM tab_target3 ORDER BY 1,2,3; +EXECUTE P1(4, 4, 40, 4, 5); +SELECT * FROM tab_target3 ORDER BY 1,2,3; + +---- +-- test a_expr cases +---- +INSERT INTO tab_target1 VALUES(0,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE NULL; +INSERT INTO tab_target1 VALUES(1,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE 0::boolean; +INSERT INTO tab_target1 VALUES(2,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE false and true; +INSERT INTO tab_target1 VALUES(3,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE 'abc' not like '%c'; +INSERT INTO tab_target1 VALUES(4,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE 1 IS NULL; +INSERT INTO tab_target1 VALUES(5,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE + (timestamp with time zone '2000-11-26', timestamp with time zone '2000-11-27') + OVERLAPS (timestamp with time zone '2000-11-27 12:00', timestamp with time zone '2000-11-30'); +INSERT INTO tab_target1 VALUES(6,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE + row(1,1) is distinct from row(1,1); +INSERT INTO tab_target1 VALUES(7,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE 1 is unknown; +INSERT INTO tab_target1 VALUES(8,-1) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE 1 not in (1,2,3); +SELECT COUNT(*) FROM tab_target1 WHERE c2 = -1; +rollback; + +-- explain analyze +begin; +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF) INSERT INTO tab_target1 VALUES(1,2) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 + excluded.c2 = 3; + +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF) INSERT INTO tab_target1 VALUES(3,4),(3,5),(3,6),(3,7),(3,8),(3,9) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 = 3; + +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF) INSERT INTO tab_target2 VALUES(1,2,10) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c1 = 1; + +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF) INSERT INTO tab_target3 VALUES(3,3,30) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c2 = 3 and (c3 < 30 or c1 = 2) and (not (c1 between 20 and 30)); + +EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF) EXECUTE P1(4, 4, 40, 4, 5); +rollback; +---- +-- test with synonym +---- +create synonym stt1 for tab_target1; +create synonym stt2 for tab_target2; +create synonym stt3 for tab_target3; +create synonym sts for tab_source; + +begin; +-- no conflict where clause is ignored (of course) +INSERT INTO stt1 VALUES(0,0) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE false; +INSERT INTO stt1 VALUES(1,2) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 = 1; +SELECT * FROM stt1 ORDER BY 1,2; +INSERT INTO stt1 VALUES(1,3) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 + excluded.c2 = 5; +SELECT * FROM stt1 ORDER BY 1,2; + +-- multiple values +INSERT INTO stt1 VALUES(2,3),(3,4) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 = 2; +SELECT * FROM stt1 ORDER BY 1,2; +INSERT INTO stt1 VALUES(3,4),(3,5),(3,6),(3,7),(3,8),(3,9) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c2 = 3; +SELECT * FROM stt1 ORDER BY 1,2; + +-- from source table +INSERT INTO stt1 (SELECT c1, c2 FROM sts) ON DUPLICATE KEY UPDATE c2 = excluded.c2 WHERE c1 > 5; +SELECT * FROM stt1 ORDER BY 1,2; + +-- multiple confliction - where clause will not affect target row of upsert +INSERT INTO stt2 VALUES(1,2,10) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c1 = 1; +INSERT INTO stt2 VALUES(2,3,30) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c1 != 2; +SELECT * FROM stt2 ORDER BY 1,2,3; + +-- multi-column unique constraint +INSERT INTO stt3 VALUES(1,1,10) ON DUPLICATE KEY UPDATE c3 = excluded.c3 WHERE c1 < 5; +SELECT * FROM stt3 ORDER BY 1,2,3; + +-- test pbe +deallocate p1; +PREPARE P1 AS INSERT INTO stt3 VALUES($1,$2,$3) ON DUPLICATE KEY UPDATE c3 = excluded.c3 + $4 WHERE c1 < $5; +EXECUTE P1(2, 2, 20, 2, 2); +SELECT * FROM stt3 ORDER BY 1,2,3; +EXECUTE P1(2, 2, 20, 2, 3); +SELECT * FROM tab_target3 ORDER BY 1,2,3; +rollback; + + + +DROP SCHEMA schema_upsert_where CASCADE; diff --git a/src/test/regress/sql/upsert_where_sublink.sql b/src/test/regress/sql/upsert_where_sublink.sql new file mode 100644 index 000000000..fad69608c --- /dev/null +++ b/src/test/regress/sql/upsert_where_sublink.sql @@ -0,0 +1,888 @@ +---- +-- setup +---- +CREATE SCHEMA schema_upsert_where_sublink; +SET search_path = schema_upsert_where_sublink; + +create table tab_target( +c1 int unique not null, +c2 bigint default 0, +c3 numeric default 0, +c4 varchar(100) default 'abcdefjfieE##$#KFAEOJop13SEFJeo', +primary key(c2,c3)); + +INSERT INTO tab_target (c1, c2, c3) VALUES( + generate_series(1,10), + generate_series(1,10), + generate_series(1,10)); + +CREATE TABLE tab_source(c1 int, c2 int, c3 int, c4 int); +INSERT INTO tab_source VALUES(generate_series(1,10), generate_series(10,1, -1), generate_series(1,10), generate_series(1,10)); + +--------------------------------------- +-- not corelated sublink +--------------------------------------- +begin; +-- in/not in sublink +-- multi confliction -> primary key first +insert into tab_target values(1,1,1) on duplicate key update c4 = 'conflict1' where excluded.c1 in (select 1); + +insert into tab_target values(1,1,1) on duplicate key update c4 = 'ERROR' where excluded.c1 not in (select 1); + +insert into tab_target values(1,2,2) on duplicate key update c4 = 'conflict2' where excluded.c3 in (select c1 from tab_source); + +insert into tab_target values(1,2,2) on duplicate key update c4 = 'ERROR' where excluded.c3 not in (select c1 from tab_source); + +insert into tab_target values(0,3,3) on duplicate key update c4 = 'conflict3' where excluded.c1 not in (select 1); + +insert into tab_target values(0,3,3) on duplicate key update c4 = 'ERROR' where excluded.c1 in (select 1); + +-- (not) exists sublink +insert into tab_target values(4,1,2) on duplicate key update c4 = 'conflict4' where exists (select c1 from tab_source where c4 = 4); + +insert into tab_target values(4,1,2) on duplicate key update c4 = 'ERROR' where not exists (select c1 from tab_source where c4 = 4); + +insert into tab_target values(0,5,5) on duplicate key update c4 = 'conflict5' where not exists (select c2 from tab_source where c4 = 4 and c1 = 1); + +insert into tab_target values(0,5,5) on duplicate key update c4 = 'ERROR' where exists (select c2 from tab_source where c4 = 4 and c1 = 1); + +-- any/some +insert into tab_target values(6,0,0) on duplicate key update c4 = 'conflict6' where excluded.c3 = any (select 0); + +insert into tab_target values(6,0,0) on duplicate key update c4 = 'ERROR' where excluded.c3 != any (select 0); + +insert into tab_target values(7,0,0) on duplicate key update c4 = 'conflict7' where excluded.c3 > some (select -1); + +insert into tab_target values(7,0,0) on duplicate key update c4 = 'ERROR' where excluded.c3 < some (select -1); + +-- opr sublink +insert into tab_target values(8,8,8) on duplicate key update c4 = 'conflict8' where not (excluded.c3 > (select c2 from tab_source where c3 < 8 limit 1)); + +insert into tab_target values(8,8,8) on duplicate key update c4 = 'ERROR' where (excluded.c3 > (select c2 from tab_source where c3 < 8 limit 1)); + +-- nested sublink +insert into tab_target values(9,9,9) on duplicate key update c4 = 'conflict9' where excluded.c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = 9 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + +insert into tab_target values(9,9,9) on duplicate key update c4 = 'ERROR' where excluded.c1 != ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = 9 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + +-- sublink with CTE +insert into tab_target values(10,10,10) on duplicate key update c4 = 'conflict10' where c1 = ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = 10 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + +insert into tab_target values(10,10,10) on duplicate key update c4 = 'ERROR' where c1 != ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = 10 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + +select * from tab_target order by 1,2,3,4; + +rollback; + +-- check plan +begin; +-- in/not in sublink +-- multi confliction -> primary key first +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,1,1) on duplicate key update c4 = 'conflict1' where excluded.c1 in (select 1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,1,1) on duplicate key update c4 = 'ERROR' where excluded.c1 not in (select 1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,2,2) on duplicate key update c4 = 'conflict2' where excluded.c3 in (select c1 from tab_source); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,2,2) on duplicate key update c4 = 'ERROR' where excluded.c3 not in (select c1 from tab_source); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,3,3) on duplicate key update c4 = 'conflict3' where excluded.c1 not in (select 1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,3,3) on duplicate key update c4 = 'ERROR' where excluded.c1 in (select 1); + +-- (not) exists sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(4,1,2) on duplicate key update c4 = 'conflict4' where exists (select c1 from tab_source where c4 = 4); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(4,1,2) on duplicate key update c4 = 'ERROR' where not exists (select c1 from tab_source where c4 = 4); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,5,5) on duplicate key update c4 = 'conflict5' where not exists (select c2 from tab_source where c4 = 4 and c1 = 1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,5,5) on duplicate key update c4 = 'ERROR' where exists (select c2 from tab_source where c4 = 4 and c1 = 1); + +-- any/some +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,0,0) on duplicate key update c4 = 'conflict6' where excluded.c3 = any (select 0); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,0,0) on duplicate key update c4 = 'ERROR' where excluded.c3 != any (select 0); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(7,0,0) on duplicate key update c4 = 'conflict7' where excluded.c3 > some (select -1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(7,0,0) on duplicate key update c4 = 'ERROR' where excluded.c3 < some (select -1); + +-- opr sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(8,8,8) on duplicate key update c4 = 'conflict8' where not (excluded.c3 > (select c2 from tab_source where c3 < 8 limit 1)); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(8,8,8) on duplicate key update c4 = 'ERROR' where (excluded.c3 > (select c2 from tab_source where c3 < 8 limit 1)); + +-- nested sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(9,9,9) on duplicate key update c4 = 'conflict9' where excluded.c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = 9 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(9,9,9) on duplicate key update c4 = 'ERROR' where excluded.c1 != ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = 9 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + +-- sublink with CTE +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(10,10,10) on duplicate key update c4 = 'conflict10' where c1 = ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = 10 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(10,10,10) on duplicate key update c4 = 'ERROR' where c1 != ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = 10 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + +select * from tab_target order by 1,2,3,4; + +rollback; + +--------------------------------------- +-- corelated sublink - reference target +--------------------------------------- +begin; +-- in/not in sublink +-- multi confliction -> primary key first +insert into tab_target values(1,1,1) on duplicate key update c4 = 'conflict1' where excluded.c1 in (select 1 where tab_target.c1 = 1); + +insert into tab_target values(1,1,1) on duplicate key update c4 = 'ERROR' where excluded.c1 not in (select 1 where tab_target.c1 = 1); + +insert into tab_target values(1,2,2) on duplicate key update c4 = 'conflict2' where excluded.c3 in (select c1 from tab_source where tab_target.c1 = c1); + +insert into tab_target values(1,2,2) on duplicate key update c4 = 'ERROR' where excluded.c3 not in (select c1 from tab_source where tab_target.c1 = c1); + +insert into tab_target values(0,3,3) on duplicate key update c4 = 'conflict3' where excluded.c1 not in (select 1 where tab_target.c1 = 1); + +insert into tab_target values(0,3,3) on duplicate key update c4 = 'ERROR' where excluded.c1 in (select 1 where tab_target.c1 = 1); + +-- (not) exists sublink +insert into tab_target values(4,1,2) on duplicate key update c4 = 'conflict4' where exists (select c1 from tab_source where c4 = 4 and c3 = tab_target.c3); + +insert into tab_target values(4,1,2) on duplicate key update c4 = 'ERROR' where not exists (select c1 from tab_source where c4 = 4 and c3 = tab_target.c3); + +insert into tab_target values(0,5,5) on duplicate key update c4 = 'conflict5' where not exists (select c2 from tab_source where c4 = 4 and c3 = tab_target.c3); + +insert into tab_target values(0,5,5) on duplicate key update c4 = 'ERROR' where exists (select c2 from tab_source where c4 = 4 and c3 = tab_target.c3); + +-- any/some +insert into tab_target values(6,0,6) on duplicate key update c4 = 'conflict6' where excluded.c3 = any (select c3 from tab_source where tab_target.c1 = c1); + +insert into tab_target values(6,0,6) on duplicate key update c4 = 'ERROR' where excluded.c3 != any (select c3 from tab_source where tab_target.c1 = c1); + +insert into tab_target values(7,0,7) on duplicate key update c4 = 'conflict7' where excluded.c3 > some (select -1 from tab_source where tab_target.c1 = c1); + +insert into tab_target values(7,0,7) on duplicate key update c4 = 'ERROR' where excluded.c3 < some (select -1 from tab_source where tab_target.c1 = c1); + +-- opr sublink +insert into tab_target values(8,8,8) on duplicate key update c4 = 'conflict8' where not (excluded.c3 > (select c2 from tab_source where c3 < tab_target.c1 limit 1)); + +insert into tab_target values(8,8,8) on duplicate key update c4 = 'ERROR' where (excluded.c3 > (select c2 from tab_source where c3 < tab_target.c1 limit 1)); + +-- nested sublink +insert into tab_target values(9,9,9) on duplicate key update c4 = 'conflict9' where excluded.c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = tab_target.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + +insert into tab_target values(9,9,9) on duplicate key update c4 = 'ERROR' where tab_target.c1 != ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = excluded.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + +-- sublink with CTE +insert into tab_target values(10,10,10) on duplicate key update c4 = 'conflict10' where c1 = ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = tab_target.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + +insert into tab_target values(10,10,10) on duplicate key update c4 = 'ERROR' where c1 != ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = tab_target.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + +select * from tab_target order by 1,2,3,4; + +rollback; + +-- check plan +begin; +-- in/not in sublink +-- multi confliction -> primary key first +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,1,1) on duplicate key update c4 = 'conflict1' where excluded.c1 in (select 1 where tab_target.c1 = 1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,1,1) on duplicate key update c4 = 'ERROR' where excluded.c1 not in (select 1 where tab_target.c1 = 1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,2,2) on duplicate key update c4 = 'conflict2' where excluded.c3 in (select c1 from tab_source where tab_target.c1 = c1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,2,2) on duplicate key update c4 = 'ERROR' where excluded.c3 not in (select c1 from tab_source where tab_target.c1 = c1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,3,3) on duplicate key update c4 = 'conflict3' where excluded.c1 not in (select 1 where tab_target.c1 = 1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,3,3) on duplicate key update c4 = 'ERROR' where excluded.c1 in (select 1 where tab_target.c1 = 1); + +-- (not) exists sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(4,1,2) on duplicate key update c4 = 'conflict4' where exists (select c1 from tab_source where c4 = 4 and c3 = tab_target.c3); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(4,1,2) on duplicate key update c4 = 'ERROR' where not exists (select c1 from tab_source where c4 = 4 and c3 = tab_target.c3); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,5,5) on duplicate key update c4 = 'conflict5' where not exists (select c2 from tab_source where c4 = 4 and c3 = tab_target.c3); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,5,5) on duplicate key update c4 = 'ERROR' where exists (select c2 from tab_source where c4 = 4 and c3 = tab_target.c3); + +-- any/some +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,0,6) on duplicate key update c4 = 'conflict6' where excluded.c3 = any (select c3 from tab_source where tab_target.c1 = c1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,0,6) on duplicate key update c4 = 'ERROR' where excluded.c3 != any (select c3 from tab_source where tab_target.c1 = c1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(7,0,7) on duplicate key update c4 = 'conflict7' where excluded.c3 > some (select -1 from tab_source where tab_target.c1 = c1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(7,0,7) on duplicate key update c4 = 'ERROR' where excluded.c3 < some (select -1 from tab_source where tab_target.c1 = c1); + +-- opr sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(8,8,8) on duplicate key update c4 = 'conflict8' where not (excluded.c3 > (select c2 from tab_source where c3 < tab_target.c1 limit 1)); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(8,8,8) on duplicate key update c4 = 'ERROR' where (excluded.c3 > (select c2 from tab_source where c3 < tab_target.c1 limit 1)); + +-- nested sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(9,9,9) on duplicate key update c4 = 'conflict9' where excluded.c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = tab_target.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(9,9,9) on duplicate key update c4 = 'ERROR' where tab_target.c1 != ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = excluded.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + +-- sublink with CTE +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(10,10,10) on duplicate key update c4 = 'conflict10' where c1 = ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = tab_target.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(10,10,10) on duplicate key update c4 = 'ERROR' where c1 != ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = tab_target.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + +select * from tab_target order by 1,2,3,4; + +rollback; + +----------------------------------------- +-- corelated sublink - reference conflict +----------------------------------------- +begin; +-- in/not in sublink +-- multi confliction -> primary key first +insert into tab_target values(1,1,1) on duplicate key update c4 = 'conflict1' where excluded.c1 in (select 1 where excluded.c1 = 1); + +insert into tab_target values(1,1,1) on duplicate key update c4 = 'ERROR' where excluded.c1 not in (select 1 where excluded.c1 = 1); + +insert into tab_target values(1,2,2) on duplicate key update c4 = 'conflict2' where excluded.c3 in (select c1 + 1 from tab_source where excluded.c1 = c1); + +insert into tab_target values(1,2,2) on duplicate key update c4 = 'ERROR' where excluded.c3 not in (select c1 + 1 from tab_source where excluded.c1 = c1); + +insert into tab_target values(0,3,3) on duplicate key update c4 = 'conflict3' where excluded.c1 not in (select 1 where excluded.c1 = 1); + +insert into tab_target values(0,3,3) on duplicate key update c4 = 'ERROR' where excluded.c1 in (select 1 where excluded.c1 = 1); + +-- (not) exists sublink +insert into tab_target values(4,1,4) on duplicate key update c4 = 'conflict4' where exists (select c1 from tab_source where c4 = 4 and c3 = excluded.c3); + +insert into tab_target values(4,1,4) on duplicate key update c4 = 'ERROR' where not exists (select c1 from tab_source where c4 = 4 and c3 = excluded.c3); + +insert into tab_target values(0,5,5) on duplicate key update c4 = 'conflict5' where not exists (select c2 from tab_source where c4 = 4 and c3 = excluded.c3); + +insert into tab_target values(0,5,5) on duplicate key update c4 = 'ERROR' where exists (select c2 from tab_source where c4 = 4 and c3 = excluded.c3); + +-- any/some +insert into tab_target values(6,0,6) on duplicate key update c4 = 'conflict6' where excluded.c3 = any (select c3 from tab_source where excluded.c1 = c1); + +insert into tab_target values(6,0,6) on duplicate key update c4 = 'ERROR' where excluded.c3 != any (select c3 from tab_source where excluded.c1 = c1); + +insert into tab_target values(7,0,7) on duplicate key update c4 = 'conflict7' where excluded.c3 > some (select -1 from tab_source where excluded.c1 = c1); + +insert into tab_target values(7,0,7) on duplicate key update c4 = 'ERROR' where excluded.c3 < some (select -1 from tab_source where excluded.c1 = c1); + +-- opr sublink +insert into tab_target values(8,8,8) on duplicate key update c4 = 'conflict8' where not (excluded.c3 > (select c2 from tab_source where c3 < excluded.c1 limit 1)); + +insert into tab_target values(8,8,8) on duplicate key update c4 = 'ERROR' where (excluded.c3 > (select c2 from tab_source where c3 < excluded.c1 limit 1)); + +-- nested sublink +insert into tab_target values(9,9,9) on duplicate key update c4 = 'conflict9' where excluded.c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = excluded.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + +insert into tab_target values(9,9,9) on duplicate key update c4 = 'ERROR' where excluded.c1 != ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = excluded.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + +-- sublink with CTE +insert into tab_target values(10,10,10) on duplicate key update c4 = 'conflict10' where c1 = ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = excluded.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + +insert into tab_target values(10,10,10) on duplicate key update c4 = 'ERROR' where c1 != ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = excluded.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + +select * from tab_target order by 1,2,3,4; + +rollback; + +-- check plan +begin; +-- in/not in sublink +-- multi confliction -> primary key first +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,1,1) on duplicate key update c4 = 'conflict1' where excluded.c1 in (select 1 where excluded.c1 = 1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,1,1) on duplicate key update c4 = 'ERROR' where excluded.c1 not in (select 1 where excluded.c1 = 1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,2,2) on duplicate key update c4 = 'conflict2' where excluded.c3 in (select c1 + 1 from tab_source where excluded.c1 = c1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,2,2) on duplicate key update c4 = 'ERROR' where excluded.c3 not in (select c1 + 1 from tab_source where excluded.c1 = c1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,3,3) on duplicate key update c4 = 'conflict3' where excluded.c1 not in (select 1 where excluded.c1 = 1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,3,3) on duplicate key update c4 = 'ERROR' where excluded.c1 in (select 1 where excluded.c1 = 1); + +-- (not) exists sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(4,1,4) on duplicate key update c4 = 'conflict4' where exists (select c1 from tab_source where c4 = 4 and c3 = excluded.c3); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(4,1,4) on duplicate key update c4 = 'ERROR' where not exists (select c1 from tab_source where c4 = 4 and c3 = excluded.c3); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,5,5) on duplicate key update c4 = 'conflict5' where not exists (select c2 from tab_source where c4 = 4 and c3 = excluded.c3); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(0,5,5) on duplicate key update c4 = 'ERROR' where exists (select c2 from tab_source where c4 = 4 and c3 = excluded.c3); + +-- any/some +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,0,6) on duplicate key update c4 = 'conflict6' where excluded.c3 = any (select c3 from tab_source where excluded.c1 = c1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,0,6) on duplicate key update c4 = 'ERROR' where excluded.c3 != any (select c3 from tab_source where excluded.c1 = c1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(7,0,7) on duplicate key update c4 = 'conflict7' where excluded.c3 > some (select -1 from tab_source where excluded.c1 = c1); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(7,0,7) on duplicate key update c4 = 'ERROR' where excluded.c3 < some (select -1 from tab_source where excluded.c1 = c1); + +-- opr sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(8,8,8) on duplicate key update c4 = 'conflict8' where not (excluded.c3 > (select c2 from tab_source where c3 < excluded.c1 limit 1)); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(8,8,8) on duplicate key update c4 = 'ERROR' where (excluded.c3 > (select c2 from tab_source where c3 < excluded.c1 limit 1)); + +-- nested sublink +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(9,9,9) on duplicate key update c4 = 'conflict9' where excluded.c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = excluded.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(9,9,9) on duplicate key update c4 = 'ERROR' where excluded.c1 != ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from tab_source where c1 = ( + select c1 from ( + select c1 from ( + select c1 from ( + select c1 from tab_source where c1 = excluded.c1 + ) + ) + ) + ) + ) + ) + ) + ) + ) +); + +-- sublink with CTE +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(10,10,10) on duplicate key update c4 = 'conflict10' where c1 = ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = excluded.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(10,10,10) on duplicate key update c4 = 'ERROR' where c1 != ( + with cte1 as ( + with cte2 as ( + with cte3 as ( + with cte4 as ( + with cte5 as ( + select c1 from tab_source where c3 = excluded.c1 + ) select c1 from cte5 + ) select c1 from cte4 + ) select c1 from cte3 + ) select c1 from cte2 + ) select c1 from cte1 limit 1 +); + +select * from tab_target order by 1,2,3,4; + +rollback; + +-------- +-- misc +-------- +begin; +-- agg + group by +insert into tab_target values(1,2,3) on duplicate key update c4 = 'conflict1' where excluded.c2 = (select count(1) from tab_source where c2 < 3); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(1,2,3) on duplicate key update c4 = 'ERROR' where excluded.c2 != (select count(1) from tab_source where c2 < 3); + +-- limit + offset +insert into tab_target values(2,3,5) on duplicate key update c4 = 'conflict2' where excluded.c2 = (select c2 from tab_source order by c1 asc limit 1 offset 7); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(2,3,5) on duplicate key update c4 = 'ERROR' where excluded.c2 != (select c2 from tab_source order by c1 asc limit 1 offset 7) + and c3 = (select c1 from tab_source order by c1 desc limit 1 offset 7); + +-- window funcs +insert into tab_target values(3,5,7) on duplicate key update c4 = 'conflict3' where c2 = (select sum_rows from ( + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following) as sum_rows + FROM generate_series(1, 3) i order by 2 limit 1 +)); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(3,5,7) on duplicate key update c4 = 'ERROR' where c2 > (select sum_rows from ( + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following) as sum_rows + FROM generate_series(1, 3) i order by 2 limit 1 +)); + +-- setopt +insert into tab_target values(4,8,9) on duplicate key update c4 = 'conflict4' where c1 = ( + select count(*) / 2.5 from ( + (select c1, c2 from tab_source) + union + (select c2, c1 from tab_source) + ) +); + +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(4,8,9) on duplicate key update c4 = 'ERROR' where c1 = ( + select count(*) / 4 from ( + (select c1, c2 from tab_source) + union all + (select c2, c1 from tab_source) + minus + (select c1, c4 from tab_source) + intersect + (select c2, c3 from tab_source) + ) +); + +-- with param +prepare p1 as +insert into tab_target values($1,$2,$3) on duplicate key update c4 = $4 where c1 in ( + with cte as ( + select c1 from tab_source where c2 in ( + select c2 from tab_source where c1 <= $5 + ) + ) select c1 from cte where c1 >= $6 +); + +-- gplan not supported yet +set plan_cache_mode = force_generic_plan; +explain (analyze on, verbose off, timing off, costs off) +execute p1(5, 6, 7, 'conflict5', 5, 5); + +set plan_cache_mode = force_custom_plan; +explain (analyze on, verbose off, timing off, costs off) +execute p1(5, 6, 7, 'ERROR', 4, 5); + +-- test with hint +-- blockname +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,8,7) on duplicate key update c4 = 'conflict6' where c1 = ( + select /*+ blockname(tt) */ c4 from tab_source where c1 = 6 +); + +-- no_expand +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(6,8,7) on duplicate key update c4 = 'ERROR' where c1 != ( + select /*+ no_expand */ c4 from tab_source where c1 = 6 +); + +-- leading/join +explain (analyze on, verbose off, timing off, costs off) +insert into tab_target values(7,4,7) on duplicate key update c4 = 'conflict7' where c1 = ( + select /*+ leading((t2 t1)) mergejoin(t1 t2) */ t1.c4 from tab_source t1, tab_source t2 where t1.c2 = t2.c2 and t1.c3 = 7 +); + +-- rowmarks +insert into tab_target values(8,4,3) on duplicate key update c4 = 'conflict8' where c1 in (select c4 from tab_source where c4 = 8 for update); + +insert into tab_target values(9,6,3) on duplicate key update c4 = 'conflict9' where c1 = (select c3 from tab_source where c1 = 9 for share); + +select * from tab_target where c1 < 10 order by 1,2,3,4; +rollback; + + +DROP SCHEMA schema_upsert_where_sublink CASCADE; \ No newline at end of file diff --git a/src/test/regress/sql/ustore_subpartition_add_drop_partition.sql b/src/test/regress/sql/ustore_subpartition_add_drop_partition.sql new file mode 100644 index 000000000..f05134af8 --- /dev/null +++ b/src/test/regress/sql/ustore_subpartition_add_drop_partition.sql @@ -0,0 +1,1239 @@ +DROP SCHEMA ustore_subpartition_add_drop_partition CASCADE; +CREATE SCHEMA ustore_subpartition_add_drop_partition; +SET CURRENT_SCHEMA TO ustore_subpartition_add_drop_partition; + +-- +----range-range table---- +-- +--prepare +CREATE TABLE range_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (customer_id) SUBPARTITION BY RANGE (time_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer1_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer1_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer1_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer2_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer2_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer2_2011 VALUES LESS THAN ('2012-01-01') + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_all VALUES LESS THAN ('2012-01-01') + ) +); +INSERT INTO range_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_range_sales_idx ON range_range_sales(product_id) LOCAL; + +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE range_range_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_2008 VALUES LESS THAN ('2009-01-01'), + SUBPARTITION customer5_2009 VALUES LESS THAN ('2010-01-01'), + SUBPARTITION customer5_2010 VALUES LESS THAN ('2011-01-01'), + SUBPARTITION customer5_2011 VALUES LESS THAN ('2012-01-01') + ); +--fail, out of range +ALTER TABLE range_range_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1100); +--fail, invalid format +ALTER TABLE range_range_sales ADD PARTITION customer_temp2 VALUES (1300); +--success, add 1 default subpartition +ALTER TABLE range_range_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_range_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1800); +--success, add 1 subpartition +ALTER TABLE range_range_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_2012 VALUES LESS THAN ('2013-01-01'); +--fail, out of range +ALTER TABLE range_range_sales MODIFY PARTITION customer3 ADD SUBPARTITION customer3_temp1 VALUES LESS THAN ('2015-01-01'); +--fail, out of range +ALTER TABLE range_range_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('2011-01-01'); +--fail, invalid format +ALTER TABLE range_range_sales MODIFY PARTITION customer2 ADD SUBPARTITION customer2_temp1 VALUES ('2015-01-01'); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_range_sales + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_range_sales DROP PARTITION customer2; +--success +ALTER TABLE range_range_sales DROP SUBPARTITION customer1_2008; +--fail, the only subpartition +ALTER TABLE range_range_sales DROP SUBPARTITION customer4_all; +--success, drop partition customer3 +ALTER TABLE range_range_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_range_sales DROP PARTITION FOR (400, '2010-01-01'); +--fail, number not equal to the number of partkey +ALTER TABLE range_range_sales DROP SUBPARTITION FOR (1400); +--fail, invalid type +ALTER TABLE range_range_sales DROP PARTITION FOR ('2010-01-01'); +--fail, invalid type +ALTER TABLE range_range_sales DROP SUBPARTITION FOR ('2010-01-01', 1400); +--success, drop subpartition customer5_2010 +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(1400, '2010-01-01'); +--fail, the only subpartition in customer6 +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(2000, '2009-01-01'); +--fail, no subpartition find +ALTER TABLE range_range_sales DROP SUBPARTITION FOR(1400, '2012-01-01'); + +--check for ok after drop +SELECT count(*) FROM range_range_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_range_sales + +-- +----range-list table---- +-- +--prepare +CREATE TABLE range_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO range_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_list_sales_idx ON range_list_sales(product_id) LOCAL; + +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE range_list_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer5_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer5_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer5_channel4 VALUES ('9') + ); +--fail, out of range +ALTER TABLE range_list_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1100); +--fail, invalid format +ALTER TABLE range_list_sales ADD PARTITION customer_temp2 VALUES (1300); +--success, add 1 default subpartition +ALTER TABLE range_list_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_list_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1800); +--success, add 1 subpartition +ALTER TABLE range_list_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_channel5 VALUES ('X'); +--fail, out of range +ALTER TABLE range_list_sales MODIFY PARTITION customer2 ADD SUBPARTITION customer2_temp1 VALUES ('X'); +--fail, out of range +ALTER TABLE range_list_sales MODIFY PARTITION customer3 ADD SUBPARTITION customer3_temp1 VALUES ('X'); +--fail, invalid format +ALTER TABLE range_list_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X'); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_list_sales + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_list_sales DROP PARTITION customer2; +--success +ALTER TABLE range_list_sales DROP SUBPARTITION customer1_channel1; +--fail, the only subpartition +ALTER TABLE range_list_sales DROP SUBPARTITION customer4_channel1; +--success, drop partition customer3 +ALTER TABLE range_list_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_list_sales DROP PARTITION FOR (400, '4'); +--fail, number not equal to the number of partkey +ALTER TABLE range_list_sales DROP SUBPARTITION FOR (1400); +--fail, invalid type +ALTER TABLE range_list_sales DROP PARTITION FOR ('abc'); +--fail, invalid type +ALTER TABLE range_list_sales DROP SUBPARTITION FOR ('abc', 1400); +--success, drop subpartition customer5_channel3 +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(1400, '7'); +--fail, the only subpartition in customer6 +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(2000, 'X'); +--fail, no subpartition find +ALTER TABLE range_list_sales DROP SUBPARTITION FOR(1100, 'X'); + +--check for ok after drop +SELECT count(*) FROM range_list_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_list_sales + +-- +----range-hash table---- +-- +--prepare +CREATE TABLE range_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (customer_id) SUBPARTITION BY HASH (product_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_product1, + SUBPARTITION customer1_product2, + SUBPARTITION customer1_product3, + SUBPARTITION customer1_product4 + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_product1, + SUBPARTITION customer2_product2 + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_product1 + ) +); +INSERT INTO range_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX range_hash_sales_idx ON range_hash_sales(product_id) LOCAL; + +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE range_hash_sales ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_product1, + SUBPARTITION customer5_product2, + SUBPARTITION customer5_product3, + SUBPARTITION customer5_product4 + ); +--fail, out of range +ALTER TABLE range_hash_sales ADD PARTITION customer_temp1 VALUES LESS THAN (1100); +--fail, invalid format +ALTER TABLE range_hash_sales ADD PARTITION customer_temp2 VALUES (1300); +--success, add 1 default subpartition +ALTER TABLE range_hash_sales ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE range_hash_sales ADD PARTITION customer_temp3 VALUES LESS THAN (1800); +--fail, not support add hash +ALTER TABLE range_hash_sales MODIFY PARTITION customer1 ADD SUBPARTITION customer1_temp1; +--fail, invalid format +ALTER TABLE range_hash_sales MODIFY PARTITION customer4 ADD SUBPARTITION customer4_temp1 VALUES LESS THAN ('X'); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_hash_sales + +--check for drop partition/subpartition (for) +--success, drop partition customer2 +ALTER TABLE range_hash_sales DROP PARTITION customer2; +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION customer1_product1; +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION customer4_product1; +--success, drop partition customer3 +ALTER TABLE range_hash_sales DROP PARTITION FOR (400); +--fail, number not equal to the number of partkey +ALTER TABLE range_hash_sales DROP PARTITION FOR (400, '2010-01-01'); +--fail, invalid type +ALTER TABLE range_hash_sales DROP PARTITION FOR ('2010-01-01'); +--fail, not support drop hash +ALTER TABLE range_hash_sales DROP SUBPARTITION FOR(1400, 1); + +--check for ok after drop +SELECT count(*) FROM range_hash_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='range_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='range_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ range_hash_sales + +-- +----list-range table---- +-- +--prepare +CREATE TABLE list_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (channel_id) SUBPARTITION BY RANGE (customer_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_customer1 VALUES LESS THAN (200), + SUBPARTITION channel1_customer2 VALUES LESS THAN (500), + SUBPARTITION channel1_customer3 VALUES LESS THAN (800), + SUBPARTITION channel1_customer4 VALUES LESS THAN (1200) + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_customer1 VALUES LESS THAN (500), + SUBPARTITION channel2_customer2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_customer1 VALUES LESS THAN (1200) + ) +); +INSERT INTO list_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_range_sales_idx ON list_range_sales(product_id) LOCAL; + +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE list_range_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_customer1 VALUES LESS THAN (200), + SUBPARTITION channel5_customer2 VALUES LESS THAN (500), + SUBPARTITION channel5_customer3 VALUES LESS THAN (800), + SUBPARTITION channel5_customer4 VALUES LESS THAN (1200) + ); +--fail, value conflict +ALTER TABLE list_range_sales ADD PARTITION channel_temp1 VALUES ('0', 'Z', 'C'); +--fail, invalid format +ALTER TABLE list_range_sales ADD PARTITION channel_temp2 VALUES LESS THAN ('Z'); +--success, add 1 default subpartition +ALTER TABLE list_range_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_range_sales ADD PARTITION channel_temp3 VALUES ('M', 'X'); +--success, add 1 subpartition +ALTER TABLE list_range_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_customer5 VALUES LESS THAN (MAXVALUE); +--fail, out of range +ALTER TABLE list_range_sales MODIFY PARTITION channel2 ADD SUBPARTITION channel2_temp1 VALUES LESS THAN (2000); +--fail, out of range +ALTER TABLE list_range_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3_temp1 VALUES LESS THAN (2000); +--fail, invalid format +ALTER TABLE list_range_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES (1500); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_range_sales + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_range_sales DROP PARTITION channel2; +--success +ALTER TABLE list_range_sales DROP SUBPARTITION channel1_customer1; +--fail, the only subpartition +ALTER TABLE list_range_sales DROP SUBPARTITION channel4_customer1; +--success, drop partition channel3 +ALTER TABLE list_range_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_range_sales DROP PARTITION FOR('X', 700); +--fail, number not equal to the number of partkey +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X'); +--fail, invalid type +ALTER TABLE list_range_sales DROP PARTITION FOR (10); +--fail, invalid type +ALTER TABLE list_range_sales DROP SUBPARTITION FOR(700, 'X'); +--success, drop subpartition channel5_customer3 +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X', 700); +--fail, the only subpartition in channel6 +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('K', 100); +--fail, no subpartition find +ALTER TABLE list_range_sales DROP SUBPARTITION FOR('X', 2500); + +--check for ok after drop +SELECT count(*) FROM list_range_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_range_sales + +-- +----list-list table---- +-- +--prepare +CREATE TABLE list_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (channel_id) SUBPARTITION BY LIST (type_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_type1 VALUES (0, 1, 2), + SUBPARTITION channel1_type2 VALUES (3, 4), + SUBPARTITION channel1_type3 VALUES (5, 6, 7), + SUBPARTITION channel1_type4 VALUES (8, 9) + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_type1 VALUES (0, 1, 2, 3), + SUBPARTITION channel2_type2 VALUES (DEFAULT) + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_type1 VALUES (0, 1, 2, 3, 4, 5, 6, 7, 8, 9) + ) +); +INSERT INTO list_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_list_sales_idx ON list_list_sales(product_id) LOCAL; + +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE list_list_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_type1 VALUES (0, 1, 2), + SUBPARTITION channel5_type2 VALUES (3, 4), + SUBPARTITION channel5_type3 VALUES (5, 6, 7), + SUBPARTITION channel5_type4 VALUES (8, 9) + ); +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp1 VALUES ('0', 'Z', 'C'); +--fail, invalid format +ALTER TABLE list_list_sales ADD PARTITION channel_temp2 VALUES LESS THAN ('Z'); +--success, add 1 default subpartition +ALTER TABLE list_list_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_list_sales ADD PARTITION channel_temp3 VALUES ('M', 'X'); +--success, add 1 subpartition +ALTER TABLE list_list_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_type5 VALUES (DEFAULT); +--fail, out of range +ALTER TABLE list_list_sales MODIFY PARTITION channel2 ADD SUBPARTITION channel2_temp1 VALUES (10, 11, 12); +--fail, out of range +ALTER TABLE list_list_sales MODIFY PARTITION channel3 ADD SUBPARTITION channel3_temp1 VALUES (10, 11, 12); +--fail, invalid format +ALTER TABLE list_list_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_list_sales + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_list_sales DROP PARTITION channel2; +--success +ALTER TABLE list_list_sales DROP SUBPARTITION channel1_type1; +--fail, the only subpartition +ALTER TABLE list_list_sales DROP SUBPARTITION channel4_type1; +--success, drop partition channel3 +ALTER TABLE list_list_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_list_sales DROP PARTITION FOR('X', 6); +--fail, number not equal to the number of partkey +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X'); +--fail, invalid type +ALTER TABLE list_list_sales DROP PARTITION FOR (10); +--fail, invalid type +ALTER TABLE list_list_sales DROP SUBPARTITION FOR(10, 'X'); +--success, drop subpartition channel5_type3 +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X', 6); +--fail, the only subpartition in channel6 +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('K', 10); +--fail, no subpartition find +ALTER TABLE list_list_sales DROP SUBPARTITION FOR('X', 5); + +--check for ok after drop +SELECT count(*) FROM list_list_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_list_sales + +-- +----list-hash table---- +-- +--prepare +CREATE TABLE list_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (channel_id) SUBPARTITION BY HASH (product_id) +( + PARTITION channel1 VALUES ('0', '1', '2') + ( + SUBPARTITION channel1_product1, + SUBPARTITION channel1_product2, + SUBPARTITION channel1_product3, + SUBPARTITION channel1_product4 + ), + PARTITION channel2 VALUES ('3', '4', '5') + ( + SUBPARTITION channel2_product1, + SUBPARTITION channel2_product2 + ), + PARTITION channel3 VALUES ('6', '7'), + PARTITION channel4 VALUES ('8', '9') + ( + SUBPARTITION channel4_product1 + ) +); +INSERT INTO list_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX list_hash_sales_idx ON list_hash_sales(product_id) LOCAL; + +--check for add partition/subpartition +--success, add 4 subpartition +ALTER TABLE list_hash_sales ADD PARTITION channel5 VALUES ('X') + ( + SUBPARTITION channel5_product1, + SUBPARTITION channel5_product2, + SUBPARTITION channel5_product3, + SUBPARTITION channel5_product4 + ); +--fail, value conflict +ALTER TABLE list_hash_sales ADD PARTITION channel_temp1 VALUES ('0', 'Z', 'C'); +--fail, invalid format +ALTER TABLE list_hash_sales ADD PARTITION channel_temp2 VALUES LESS THAN ('Z'); +--success, add 1 default subpartition +ALTER TABLE list_hash_sales ADD PARTITION channel6 VALUES (DEFAULT); +--fail, value conflict +ALTER TABLE list_hash_sales ADD PARTITION channel_temp3 VALUES ('M', 'X'); +--fail, not support add hash +ALTER TABLE list_hash_sales MODIFY PARTITION channel1 ADD SUBPARTITION channel1_temp1; +--fail, invalid format +ALTER TABLE list_hash_sales MODIFY PARTITION channel4 ADD SUBPARTITION channel4_temp1 VALUES LESS THAN (1500); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_hash_sales + +--check for drop partition/subpartition (for) +--success, drop partition channel2 +ALTER TABLE list_hash_sales DROP PARTITION channel2; +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION channel1_product1; +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION channel4_product1; +--success, drop partition channel3 +ALTER TABLE list_hash_sales DROP PARTITION FOR ('6'); +--fail, number not equal to the number of partkey +ALTER TABLE list_hash_sales DROP PARTITION FOR ('6', '2010-01-01'); +--fail, invalid type +ALTER TABLE list_hash_sales DROP PARTITION FOR (10); +--fail, not support drop hash +ALTER TABLE list_hash_sales DROP SUBPARTITION FOR('X', 6); + +--check for ok after drop +SELECT count(*) FROM list_hash_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='list_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='list_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ list_hash_sales + +-- +----hash-range table---- +-- +--prepare +CREATE TABLE hash_range_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (product_id) SUBPARTITION BY RANGE (customer_id) +( + PARTITION product1 + ( + SUBPARTITION product1_customer1 VALUES LESS THAN (200), + SUBPARTITION product1_customer2 VALUES LESS THAN (500), + SUBPARTITION product1_customer3 VALUES LESS THAN (800), + SUBPARTITION product1_customer4 VALUES LESS THAN (1200) + ), + PARTITION product2 + ( + SUBPARTITION product2_customer1 VALUES LESS THAN (500), + SUBPARTITION product2_customer2 VALUES LESS THAN (MAXVALUE) + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_customer1 VALUES LESS THAN (1200) + ) +); +INSERT INTO hash_range_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_range_sales_idx ON hash_range_sales(product_id) LOCAL; + +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_range_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_customer1 VALUES LESS THAN (200), + SUBPARTITION product_temp1_customer2 VALUES LESS THAN (500), + SUBPARTITION product_temp1_customer3 VALUES LESS THAN (800), + SUBPARTITION product_temp1_customer4 VALUES LESS THAN (1200) + ); +--fail, not support add hash +ALTER TABLE hash_range_sales ADD PARTITION product_temp2; +--success, add 1 subpartition +ALTER TABLE hash_range_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_customer5 VALUES LESS THAN (1800); +--fail, out of range +ALTER TABLE hash_range_sales MODIFY PARTITION product2 ADD SUBPARTITION product2_temp1 VALUES LESS THAN (1800); +--fail, invalid format +ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES (DEFAULT); +--success, add 1 subpartition +ALTER TABLE hash_range_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_customer2 VALUES LESS THAN (MAXVALUE); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_range_sales + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION product2; +--success, drop subpartition product1_customer1 +ALTER TABLE hash_range_sales DROP SUBPARTITION product1_customer1; +--success, drop subpartition product4_customer1 +ALTER TABLE hash_range_sales DROP SUBPARTITION product4_customer1; +--fail, the only subpartition in product4 +ALTER TABLE hash_range_sales DROP SUBPARTITION product4_customer2; +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION FOR(0); +--fail, not support drop hash +ALTER TABLE hash_range_sales DROP PARTITION FOR(0, 100); +--fail, number not equal to the number of partkey +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0); +--fail, invalid type +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR('2010-01-01', 100); +--success, drop subpartition product1_customer2, but not suggest to do this operation +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0, 100); +--fail, no subpartition find +ALTER TABLE hash_range_sales DROP SUBPARTITION FOR(0, 2300); + +--check for ok after drop +SELECT count(*) FROM hash_range_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_range_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_range_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_range_sales + +-- +----hash-list table---- +-- +--prepare +CREATE TABLE hash_list_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (product_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION product1 + ( + SUBPARTITION product1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION product1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION product1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION product1_channel4 VALUES ('9') + ), + PARTITION product2 + ( + SUBPARTITION product2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION product2_channel2 VALUES (DEFAULT) + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO hash_list_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_list_sales_idx ON hash_list_sales(product_id) LOCAL; + +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_list_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION product_temp1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION product_temp1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION product_temp1_channel4 VALUES ('9') + ); +--fail, not support add hash +ALTER TABLE hash_list_sales ADD PARTITION product_temp2; +--success, add 1 subpartition +ALTER TABLE hash_list_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_channel5 VALUES ('X'); +--fail, out of range +ALTER TABLE hash_list_sales MODIFY PARTITION product2 ADD SUBPARTITION product2_temp1 VALUES ('X'); +--fail, out of range +ALTER TABLE hash_list_sales MODIFY PARTITION product3 ADD SUBPARTITION product3_temp1 VALUES ('X'); +--fail, invalid format +ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_temp1 VALUES LESS THAN (MAXVALUE); +--success, add 1 subpartition +ALTER TABLE hash_list_sales MODIFY PARTITION product4 ADD SUBPARTITION product4_channel2 VALUES (DEFAULT); + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_list_sales + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION product2; +--success, drop subpartition product1_channel1 +ALTER TABLE hash_list_sales DROP SUBPARTITION product1_channel1; +--success, drop subpartition product4_channel1 +ALTER TABLE hash_list_sales DROP SUBPARTITION product4_channel1; +--fail, the only subpartition in product4 +ALTER TABLE hash_list_sales DROP SUBPARTITION product4_channel2; +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION FOR(0); +--fail, not support drop hash +ALTER TABLE hash_list_sales DROP PARTITION FOR(0, '4'); +--fail, number not equal to the number of partkey +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0); +--fail, invalid type +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR('2010-01-01', '4'); +--success, drop subpartition product1_channel2, but not suggest to do this operation +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0, '4'); +--fail, no subpartition find +ALTER TABLE hash_list_sales DROP SUBPARTITION FOR(0, 'Z'); + +--check for ok after drop +SELECT count(*) FROM hash_list_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_list_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_list_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_list_sales + +-- +----hash-hash table---- +-- +--prepare +CREATE TABLE hash_hash_sales +( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (product_id) SUBPARTITION BY HASH (customer_id) +( + PARTITION product1 + ( + SUBPARTITION product1_customer1, + SUBPARTITION product1_customer2, + SUBPARTITION product1_customer3, + SUBPARTITION product1_customer4 + ), + PARTITION product2 + ( + SUBPARTITION product2_customer1, + SUBPARTITION product2_customer2 + ), + PARTITION product3, + PARTITION product4 + ( + SUBPARTITION product4_customer1 + ) +); +INSERT INTO hash_hash_sales SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); +CREATE INDEX hash_hash_sales_idx ON hash_hash_sales(product_id) LOCAL; + +--check for add partition/subpartition +--fail, not support add hash +ALTER TABLE hash_hash_sales ADD PARTITION product_temp1 + ( + SUBPARTITION product_temp1_customer1, + SUBPARTITION product_temp1_customer2, + SUBPARTITION product_temp1_customer3, + SUBPARTITION product_temp1_customer4 + ); +--fail, not support add hash +ALTER TABLE hash_hash_sales ADD PARTITION product_temp2; +--fail, not support add hash +ALTER TABLE hash_hash_sales MODIFY PARTITION product1 ADD SUBPARTITION product1_temp1; + +--check for ok after add +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_hash_sales + +--check for drop partition/subpartition (for) +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION product2; +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION product1_customer1; +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION product4_customer1; +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION FOR(0); +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP PARTITION FOR(0, 0); +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION FOR(0, 0); +--fail, not support drop hash +ALTER TABLE hash_hash_sales DROP SUBPARTITION FOR(0); + +--check for ok after drop +SELECT count(*) FROM hash_hash_sales; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.reltablespace, p1.partkey, p1.boundaries + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid + OR p1.parentid IN ( + SELECT p2.oid FROM pg_class c2, pg_partition p2, pg_namespace n2 + WHERE c2.relname='hash_hash_sales' + AND c2.relnamespace=n2.oid + AND n2.nspname=CURRENT_SCHEMA + AND (p2.parentid=c2.oid) + )) + ORDER BY p1.parttype, p1.relname; +SELECT p1.relname, p1.parttype, p1.partstrategy, p1.relfilenode!=0 hasfilenode, p1.indisusable + FROM pg_class c1, pg_partition p1, pg_namespace n1 + WHERE c1.relname='hash_hash_sales_idx' + AND c1.relnamespace=n1.oid + AND n1.nspname=CURRENT_SCHEMA + AND (p1.parentid=c1.oid) + ORDER BY p1.relname; +\d+ hash_hash_sales + +--finish +DROP TABLE range_range_sales; +DROP TABLE range_list_sales; +DROP TABLE range_hash_sales; +DROP TABLE list_range_sales; +DROP TABLE list_list_sales; +DROP TABLE list_hash_sales; +DROP TABLE hash_range_sales; +DROP TABLE hash_list_sales; +DROP TABLE hash_hash_sales; + +DROP SCHEMA ustore_subpartition_add_drop_partition CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/ustore_subpartition_alter_table.sql b/src/test/regress/sql/ustore_subpartition_alter_table.sql new file mode 100644 index 000000000..94ab2f422 --- /dev/null +++ b/src/test/regress/sql/ustore_subpartition_alter_table.sql @@ -0,0 +1,151 @@ +DROP SCHEMA ustore_subpartition_alter_table CASCADE; +CREATE SCHEMA ustore_subpartition_alter_table; +SET CURRENT_SCHEMA TO ustore_subpartition_alter_table; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); + +--change column type +alter table range_range alter column user_no set data type char(30); +alter table range_range alter column sales_amt set data type varchar; +\d+ range_range + +-- rename +alter table range_range rename to hahahahahah; +alter table range_range rename partition p_201901 to hahahahahah; +alter table range_range rename partition p_201901_a to hahahahahah; + +--cluster +create index idx_range_range on range_range(month_code,user_no); +alter table range_range cluster on idx_range_range; + +-- move tablespace +CREATE TABLESPACE example1 RELATIVE LOCATION 'tablespace1/tablespace_1'; +alter table range_range move PARTITION p_201901 tablespace example1; +alter table range_range move PARTITION p_201901_a tablespace example1; +DROP TABLESPACE example1; + +-- merge +alter table range_range merge PARTITIONS p_201901 , p_201902 into PARTITION p_range_3; +alter table range_range merge SUBPARTITIONS p_201901 , p_201902 into PARTITION p_range_3; + +-- exchange +CREATE TABLE ori +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (STORAGE_TYPE=USTORE); +ALTER TABLE range_range EXCHANGE PARTITION (p_201901) WITH TABLE ori; +ALTER TABLE range_range EXCHANGE SUBPARTITION (p_201901) WITH TABLE ori; + +-- drop +alter table range_range drop partition p_201901; +alter table range_range drop partition p_201901_a; +alter table range_range drop subpartition p_201901_a; + +-- add +alter table range_range add partition p_range_4 VALUES LESS THAN('201904'); + +-- split +alter table range_range split PARTITION p_201901 at (8) into ( PARTITION add_p_01 , PARTITION add_p_02 ); + +drop table ori; +drop table range_range; + +CREATE TABLE IF NOT EXISTS range_range_02 +( + col_1 int , + col_2 int , + col_3 VARCHAR2 ( 30 ) NOT NULL , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( 10 ) + ) +) ENABLE ROW MOVEMENT; + +create index on range_range_02(col_2) local; + +alter table range_range_02 MODIFY PARTITION p_range_2 UNUSABLE LOCAL INDEXES; + +alter table range_range_02 MODIFY PARTITION p_range_2 REBUILD UNUSABLE LOCAL INDEXES; + +alter table range_range_02 alter col_1 type char; + +alter table range_range_02 alter col_2 type char; + +drop table range_range_02; + +--validate constraint +CREATE TABLE hash_hash +( + col_1 int , + col_2 int NOT NULL , + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY hash (col_3) SUBPARTITION BY hash (col_2) +( + PARTITION p_hash_1 + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 , + SUBPARTITION p_hash_1_4 + ), + PARTITION p_hash_2 + ( + SUBPARTITION p_hash_2_1 , + SUBPARTITION p_hash_2_2 + ), + PARTITION p_hash_3, + PARTITION p_hash_4 + ( + SUBPARTITION p_hash_4_1 + ), + PARTITION p_hash_5 +); + +INSERT INTO hash_hash VALUES(null,1,1,1); +alter table hash_hash add constraint con_hash_hash check(col_1 is not null) NOT VALID ; +INSERT INTO hash_hash VALUES(null,2,1,1); --error +INSERT INTO hash_hash VALUES(1,3,1,1); --success +alter table hash_hash VALIDATE CONSTRAINT con_hash_hash; --error +delete from hash_hash where col_1 is null; +alter table hash_hash VALIDATE CONSTRAINT con_hash_hash; --success + +drop table hash_hash cascade; +-- clean +DROP SCHEMA ustore_subpartition_alter_table CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/ustore_subpartition_analyze_vacuum.sql b/src/test/regress/sql/ustore_subpartition_analyze_vacuum.sql new file mode 100644 index 000000000..07e5765f4 --- /dev/null +++ b/src/test/regress/sql/ustore_subpartition_analyze_vacuum.sql @@ -0,0 +1,51 @@ +-- prepare +DROP SCHEMA ustore_subpartition_analyze_vacuum CASCADE; +CREATE SCHEMA ustore_subpartition_analyze_vacuum; +SET CURRENT_SCHEMA TO ustore_subpartition_analyze_vacuum; + +-- base function + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; +delete from range_list where month_code = '201902'; +select * from range_list order by 1, 2, 3, 4; +analyze range_list; +analyze range_list partition (p_201901); +vacuum range_list; +vacuum range_list partition (p_201901); + +drop table range_list; + +-- clean +DROP SCHEMA ustore_subpartition_analyze_vacuum CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/ustore_subpartition_createtable.sql b/src/test/regress/sql/ustore_subpartition_createtable.sql new file mode 100644 index 000000000..6acb8b055 --- /dev/null +++ b/src/test/regress/sql/ustore_subpartition_createtable.sql @@ -0,0 +1,1404 @@ + +--1.create table +--list_list list_hash list_range range_list range_hash range_range + +--prepare +DROP SCHEMA ustore_subpartition_createtable CASCADE; +CREATE SCHEMA ustore_subpartition_createtable; +SET CURRENT_SCHEMA TO ustore_subpartition_createtable; + +--1.1 normal table +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list; +drop table list_list; + +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into list_hash values('201902', '1', '1', 1); +insert into list_hash values('201902', '2', '1', 1); +insert into list_hash values('201902', '3', '1', 1); +insert into list_hash values('201903', '4', '1', 1); +insert into list_hash values('201903', '5', '1', 1); +insert into list_hash values('201903', '6', '1', 1); +select * from list_hash; +drop table list_hash; + +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('6') + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +insert into list_range values('201902', '1', '1', 1); +insert into list_range values('201902', '2', '1', 1); +insert into list_range values('201902', '3', '1', 1); +insert into list_range values('201903', '4', '1', 1); +insert into list_range values('201903', '5', '1', 1); +insert into list_range values('201903', '6', '1', 1); + +select * from list_range; +drop table list_range; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); + +select * from range_list; +drop table range_list; + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201902', '2', '1', 1); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +insert into range_hash values('201903', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); + +select * from range_hash; +drop table range_hash; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); + +select * from range_range; +drop table range_range; + +CREATE TABLE hash_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY hash (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into hash_list values('201901', '1', '1', 1); +insert into hash_list values('201901', '2', '1', 1); +insert into hash_list values('201901', '1', '1', 1); +insert into hash_list values('201903', '2', '1', 1); +insert into hash_list values('201903', '1', '1', 1); +insert into hash_list values('201903', '2', '1', 1); + +select * from hash_list; +drop table hash_list; + +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY hash (month_code) SUBPARTITION BY hash (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +insert into hash_hash values('201901', '1', '1', 1); +insert into hash_hash values('201901', '2', '1', 1); +insert into hash_hash values('201901', '1', '1', 1); +insert into hash_hash values('201903', '2', '1', 1); +insert into hash_hash values('201903', '1', '1', 1); +insert into hash_hash values('201903', '2', '1', 1); + +select * from hash_hash; +drop table hash_hash; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY hash (month_code) SUBPARTITION BY range (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES LESS THAN ( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN ( '3' ) + ), + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES LESS THAN ( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN ( '3' ) + ) +); +insert into hash_range values('201901', '1', '1', 1); +insert into hash_range values('201901', '2', '1', 1); +insert into hash_range values('201901', '1', '1', 1); +insert into hash_range values('201903', '2', '1', 1); +insert into hash_range values('201903', '1', '1', 1); +insert into hash_range values('201903', '2', '1', 1); + +select * from hash_range; +drop table hash_range; + + +--1.2 table with default subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_list; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_list; + + +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table list_hash; + +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_hash; + +CREATE TABLE list_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_hash; + +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +drop table list_range; + +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('6') + ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_range; + +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) +); +drop table list_range; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); +drop table range_list; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_list; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_list; + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table range_hash; + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_hash; + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_hash; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) +); +drop table range_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +drop table hash_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 +); +drop table hash_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a VALUES( '2' ), + SUBPARTITION p_201902_b VALUES( '3' ) + ) +); +drop table hash_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a VALUES( '2' ), + SUBPARTITION p_201901_b VALUES( '3' ) + ), + PARTITION p_201902 +); +drop table hash_range; + +CREATE TABLE hash_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_range; + +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); +drop table hash_hash; + +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 +); +drop table hash_hash; + +CREATE TABLE hash_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901, + PARTITION p_201902 +); +drop table hash_hash; + + +--1.3 subpartition name check +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_a VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901 VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201901_subpartdefault1 VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; + + + +--1.4 subpartition key check +-- 一级分区和二级分区分区键是同一列 + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (month_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +--二级分区的键值一样 + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '1' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +--分区列不存在 +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_codeXXX) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_codeXXX) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); + + +CREATE TABLE list_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a values less than ('4'), + SUBPARTITION p_201901_b values less than ('4') + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a values less than ('3'), + SUBPARTITION p_201902_b values less than ('6') + ) +); +drop table list_range; + + +--1.5 list subpartition whith default + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( '2' ) + ), + PARTITION p_201902 VALUES ( default ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list partition (p_201901); +select * from list_list partition (p_201902); +drop table list_list; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +drop table list_list; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( default ) + ) +); +drop table list_list; + +--1.6 declaration and definition of the subpatiiton type are same. +--error +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY hash (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( default ) + ) +); + +--1.7 add constraint +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); + +alter table range_range add constraint constraint_check CHECK (sales_amt IS NOT NULL); +insert into range_range values(1,1,1); +drop table range_range; + +-- drop partition column +CREATE TABLE range_hash_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( -10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ), + PARTITION p_range_3 VALUES LESS THAN( 30) + ( + SUBPARTITION p_hash_3_1 , + SUBPARTITION p_hash_3_2 , + SUBPARTITION p_hash_3_3 + ), + PARTITION p_range_4 VALUES LESS THAN( 50) + ( + SUBPARTITION p_hash_4_1 , + SUBPARTITION p_hash_4_2 , + SUBPARTITION range_hash_02 + ), + PARTITION p_range_5 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; + +alter table range_hash_02 drop column col_1; + +alter table range_hash_02 drop column col_2; + +drop table range_hash_02; +--1.8 SET ROW MOVEMENT +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1', '2' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1', '2' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +alter table list_list disable ROW MOVEMENT; +insert into list_list values('201902', '1', '1', 1); +update list_list set month_code = '201903'; +update list_list set dept_code = '3'; +alter table list_list enable ROW MOVEMENT; +update list_list set month_code = '201903'; +update list_list set dept_code = '3'; +drop table list_list; + +--1.9 without subpartition declaration +create table test(a int) WITH (STORAGE_TYPE=USTORE) +partition by range(a) +( +partition p1 values less than(100) +( +subpartition subp1 values less than(50), +subpartition subp2 values less than(100) +), +partition p2 values less than(200), +partition p3 values less than(maxvalue) +); + +--1.10 create table like +CREATE TABLE range_range +( + col_1 int primary key, + col_2 int NOT NULL , + col_3 VARCHAR2 ( 30 ) NOT NULL , + col_4 int generated always as(2*col_2) stored , + check (col_4 >= col_2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( 10 ) + ) +) ENABLE ROW MOVEMENT; + +CREATE TABLE range_range_02 (like range_range INCLUDING ALL ); +drop table range_range; + +--ROW LEVEL SECURITY POLICY +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +CREATE ROW LEVEL SECURITY POLICY range_range_rls ON range_range USING(user_no = CURRENT_USER); + +drop table range_range; + +-- 账本数据库 +CREATE SCHEMA ledgernsp WITH BLOCKCHAIN; + +CREATE TABLE ledgernsp.range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); + +DROP SCHEMA ledgernsp; +-- create table as +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +) ENABLE ROW MOVEMENT; + +insert into range_range values(201902,1,1,1),(201902,1,1,1),(201902,3,1,1),(201903,1,1,1),(201903,2,1,1),(201903,2,1,1); + +select * from range_range subpartition(p_201901_a) where month_code in(201902,201903) order by 1,2,3,4; + +create table range_range_copy WITH (STORAGE_TYPE=USTORE) as select * from range_range subpartition(p_201901_a) where month_code in(201902,201903); + +select * from range_range_copy order by 1,2,3,4; + +drop table range_range; +drop table range_range_copy; + +--1.11 create index +create table range_range_03 +( + c_int int, + c_char1 char(3000), + c_char2 char(5000), + c_char3 char(6000), + c_varchar1 varchar(3000), + c_varchar2 varchar(5000), + c_varchar3 varchar, + c_varchar4 varchar, + c_text1 text, + c_text2 text, + c_text3 text, + c int, + primary key(c,c_int) +) with (parallel_workers=10, STORAGE_TYPE=USTORE) +partition by range (c_int) subpartition by range (c_char1) +( + partition p1 values less than(50) + ( + subpartition p1_1 values less than('c'), + subpartition p1_2 values less than(maxvalue) + ), + partition p2 values less than(100) + ( + subpartition p2_1 values less than('c'), + subpartition p2_2 values less than(maxvalue) + ), + partition p3 values less than(150) + ( + subpartition p3_1 values less than('c'), + subpartition p3_2 values less than(maxvalue) + ), + partition p4 values less than(200) + ( + subpartition p4_1 values less than('c'), + subpartition p4_2 values less than(maxvalue) + ), + partition p5 values less than(maxvalue)( + subpartition p5_1 values less than('c'), + subpartition p5_2 values less than(maxvalue) + ) +) enable row movement; + +create index range_range_03_idx1 on range_range_03 (c_varchar1) local; --success + +create index range_range_03_idx2 on range_range_03 (c_varchar2) local ( + partition cpt7_p1, + partition cpt7_p2, + partition cpt7_p3, + partition cpt7_p4, + partition cpt7_p5 +); --failed + +create index range_range_03_idx3 on range_range_03 (c_varchar3); --success, default global + +create index range_range_03_idx4 on range_range_03 (c_varchar4) global; --success + +create index range_range_03_idx5 on range_range_03 (c_varchar4) local; --failed, can not be same column with global index + +\d+ range_range_03 + +select pg_get_tabledef('range_range_03'); + +drop table range_range_03; + +--unique local index columns must contain the partition key +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +) ENABLE ROW MOVEMENT; +create unique index idx on range_range(month_code) local; +create unique index idx1 on range_range(month_code, user_no) local; +drop table range_range; + +-- partkey has timestampwithzone type +drop table hash_range; +CREATE TABLE hash_range +( + col_1 int PRIMARY KEY USING INDEX, + col_2 int NOT NULL , + col_3 int NOT NULL , + col_4 int, + col_19 TIMESTAMP WITH TIME ZONE +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (col_2) SUBPARTITION BY RANGE (col_19) +( partition p_hash_1 + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + partition p_hash_2, + PARTITION p_hash_3, + PARTITION p_hash_4, + PARTITION p_hash_5, + PARTITION p_hash_7 +) ENABLE ROW MOVEMENT; + +CREATE TABLE hash_range +( + col_1 int PRIMARY KEY USING INDEX, + col_2 int NOT NULL , + col_3 int NOT NULL , + col_4 int, + col_19 TIMESTAMP WITH TIME ZONE +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY HASH (col_19) SUBPARTITION BY RANGE (col_2) +( partition p_hash_1 + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + partition p_hash_2, + PARTITION p_hash_3, + PARTITION p_hash_4, + PARTITION p_hash_5, + PARTITION p_hash_7 +) ENABLE ROW MOVEMENT; +drop table hash_range; +--clean +DROP SCHEMA ustore_subpartition_createtable CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/ustore_subpartition_ddl_index.sql b/src/test/regress/sql/ustore_subpartition_ddl_index.sql new file mode 100644 index 000000000..c16eab02a --- /dev/null +++ b/src/test/regress/sql/ustore_subpartition_ddl_index.sql @@ -0,0 +1,153 @@ +-- +----test index is Ok when use ddl grammer for subpartition---- +-- +DROP SCHEMA ustore_subpartition_ddl_index CASCADE; +CREATE SCHEMA ustore_subpartition_ddl_index; +SET CURRENT_SCHEMA TO ustore_subpartition_ddl_index; + +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_indexonlyscan = ON; +SET enable_bitmapscan = OFF; + +-- +--test for add/drop partition/subpartition +-- +--1. first, we create subpartitioned table, and index on the table +CREATE TABLE range_list_sales1 +( + product_id INT4, + customer_id INT4, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (customer_id) SUBPARTITION BY LIST (channel_id) +( + PARTITION customer1 VALUES LESS THAN (200) + ( + SUBPARTITION customer1_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer1_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer1_channel3 VALUES ('6', '7', '8'), + SUBPARTITION customer1_channel4 VALUES ('9') + ), + PARTITION customer2 VALUES LESS THAN (500) + ( + SUBPARTITION customer2_channel1 VALUES ('0', '1', '2', '3', '4'), + SUBPARTITION customer2_channel2 VALUES (DEFAULT) + ), + PARTITION customer3 VALUES LESS THAN (800), + PARTITION customer4 VALUES LESS THAN (1200) + ( + SUBPARTITION customer4_channel1 VALUES ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') + ) +); +INSERT INTO range_list_sales1 SELECT generate_series(1,1000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); + +CREATE INDEX range_list_sales1_idx1 ON range_list_sales1(product_id, customer_id) GLOBAL; +CREATE INDEX range_list_sales1_idx2 ON range_list_sales1(channel_id) GLOBAL; +CREATE INDEX range_list_sales1_idx3 ON range_list_sales1(customer_id) LOCAL; +CREATE INDEX range_list_sales1_idx4 ON range_list_sales1(time_id, type_id) LOCAL; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +--2. add partition/subpartition will not influence the index +ALTER TABLE range_list_sales1 ADD PARTITION customer5 VALUES LESS THAN (1500) + ( + SUBPARTITION customer5_channel1 VALUES ('0', '1', '2'), + SUBPARTITION customer5_channel2 VALUES ('3', '4', '5'), + SUBPARTITION customer5_channel3 VALUES ('6', '7', '8') + ); +ALTER TABLE range_list_sales1 ADD PARTITION customer6 VALUES LESS THAN (MAXVALUE); +ALTER TABLE range_list_sales1 MODIFY PARTITION customer5 ADD SUBPARTITION customer5_channel4 VALUES ('9'); +INSERT INTO range_list_sales1 SELECT generate_series(1001,2000), + generate_series(1,1000), + date_pli('2008-01-01', generate_series(1,1000)), + generate_series(1,1000)%10, + generate_series(1,1000)%10, + generate_series(1,1000)%1000, + generate_series(1,1000); + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +--3. drop partition/subpartition update global index +ALTER TABLE range_list_sales1 DROP PARTITION customer3 UPDATE GLOBAL INDEX; +ALTER TABLE range_list_sales1 DROP PARTITION FOR (700) UPDATE GLOBAL INDEX; --customer4 +ALTER TABLE range_list_sales1 DROP SUBPARTITION FOR (700, '9') UPDATE GLOBAL INDEX; --customer5_channel4 + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +--4. if drop partition without update global index, the gpi will be invalid, we can rebuild the index +ALTER TABLE range_list_sales1 DROP PARTITION FOR (1600); + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +ALTER INDEX range_list_sales1_idx1 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +ALTER INDEX range_list_sales1_idx2 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +--5. if drop subpartition without update global index, the gpi will be invalid, we can rebuild the index +ALTER TABLE range_list_sales1 DROP SUBPARTITION customer5_channel3; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +ALTER INDEX range_list_sales1_idx1 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx1) */ COUNT(product_id) FROM range_list_sales1; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +ALTER INDEX range_list_sales1_idx2 REBUILD; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx2) */ COUNT(channel_id) FROM range_list_sales1; + +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx3) */ COUNT(customer_id) FROM range_list_sales1; +EXPLAIN(costs off) SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; +SELECT /*+ indexonlyscan(range_list_sales1 range_list_sales1_idx4) */ COUNT(time_id) FROM range_list_sales1; + +DROP TABLE range_list_sales1; + +--finish, clean the environment +DROP SCHEMA ustore_subpartition_ddl_index CASCADE; +RESET CURRENT_SCHEMA; +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_indexonlyscan; +RESET enable_bitmapscan; diff --git a/src/test/regress/sql/ustore_subpartition_gpi.sql b/src/test/regress/sql/ustore_subpartition_gpi.sql new file mode 100644 index 000000000..a555751b6 --- /dev/null +++ b/src/test/regress/sql/ustore_subpartition_gpi.sql @@ -0,0 +1,533 @@ +-- prepare +DROP SCHEMA ustore_subpartition_gpi CASCADE; +CREATE SCHEMA ustore_subpartition_gpi; +SET CURRENT_SCHEMA TO ustore_subpartition_gpi; + +-- base function +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +drop table range_list; + +-- unique +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +create unique index idx_dept_code_global on range_list(dept_code) global; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +select * from range_list subpartition (p_201901_a); +select * from range_list subpartition (p_201901_b); +select count(*) from range_list; +--error +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select count(*) from range_list; + +delete from range_list; +drop index idx_dept_code_global; + +create unique index idx_user_no_global on range_list(user_no) global; +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '2', 1); +insert into range_list values('201903', '1', '3', 1); +insert into range_list values('201903', '2', '4', 1); +select * from range_list subpartition (p_201901_a); +select * from range_list subpartition (p_201901_b); +select * from range_list subpartition (p_201902_a); +select * from range_list subpartition (p_201902_b); +select count(*) from range_list; +--error +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201902', '1', '2', 1); +insert into range_list values('201902', '2', '2', 1); +insert into range_list values('201903', '1', '2', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201902', '1', '3', 1); +insert into range_list values('201902', '2', '3', 1); +insert into range_list values('201903', '1', '3', 1); +insert into range_list values('201903', '2', '3', 1); +insert into range_list values('201902', '1', '4', 1); +insert into range_list values('201902', '2', '4', 1); +insert into range_list values('201903', '1', '4', 1); +insert into range_list values('201903', '2', '4', 1); +select count(*) from range_list; + +drop table range_list; + +-- truncate subpartition +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +alter table range_list truncate subpartition p_201901_a update global index; + +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +alter table range_list truncate subpartition p_201901_b; + +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +drop table range_list; + +-- split subpartition +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values (default) + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values (default) + ) +); + +create index idx_month_code_local on range_list(month_code) local; +create index idx_dept_code_global on range_list(dept_code) global; +create index idx_user_no_global on range_list(user_no) global; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '2', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +select * from range_list order by 1, 2, 3, 4; + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +alter table range_list split subpartition p_201901_b values ('3') into +( + subpartition p_201901_b, + subpartition p_201901_c +) update global index; + +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +alter table range_list split subpartition p_201902_b values ('3') into +( + subpartition p_201902_b, + subpartition p_201902_c +); + +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +drop table range_list; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( MAXVALUE ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '3', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '5', '1', 1); +select * from range_range; + +create index idx_month_code_local on range_range(month_code) local; +create index idx_dept_code_global on range_range(dept_code) global; +create index idx_user_no_global on range_range(user_no) global; + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + +alter table range_range split subpartition p_201901_b at ('3') into +( + subpartition p_201901_c, + subpartition p_201901_d +) update global index; + +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + +alter table range_range split subpartition p_201902_b at ('3') into +( + subpartition p_201902_c, + subpartition p_201903_d +); + +explain(costs off, verbose on) select * from range_range where month_code = '201902' order by 1, 2, 3, 4; +select * from range_range where month_code = '201902' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where dept_code = '1' order by 1, 2, 3, 4; +select * from range_range where dept_code = '1' order by 1, 2, 3, 4; + +explain(costs off, verbose on) select * from range_range where user_no = '1' order by 1, 2, 3, 4; +select * from range_range where user_no = '1' order by 1, 2, 3, 4; + +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) primary key, + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) primary key, + user_no VARCHAR2 ( 30 ) , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) primary key, + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, dept_code, user_no) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) , + dept_code VARCHAR2 ( 30 ) , + user_no VARCHAR2 ( 30 ) , + sales_amt int, + primary key(month_code, user_no) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +select relkind from pg_class + where relname = 'range_range_pkey' + and relnamespace=(select oid from pg_namespace where nspname=CURRENT_SCHEMA); +drop table range_range; + +-- truncate with gpi +CREATE TABLE range_hash_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( -10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ), + PARTITION p_range_3 VALUES LESS THAN( 30) + ( + SUBPARTITION p_hash_3_1 , + SUBPARTITION p_hash_3_2 , + SUBPARTITION p_hash_3_3 + ), + PARTITION p_range_4 VALUES LESS THAN( 50) + ( + SUBPARTITION p_hash_4_1 , + SUBPARTITION p_hash_4_2 , + SUBPARTITION range_hash_02 + ), + PARTITION p_range_5 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; + +create index idx on range_hash_02(col_1); + +truncate range_hash_02; + +drop table range_hash_02; + +-- clean +DROP SCHEMA ustore_subpartition_gpi CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/ustore_subpartition_scan.sql b/src/test/regress/sql/ustore_subpartition_scan.sql new file mode 100644 index 000000000..63e9dc90f --- /dev/null +++ b/src/test/regress/sql/ustore_subpartition_scan.sql @@ -0,0 +1,434 @@ +--prepare +DROP SCHEMA ustore_subpartition_scan CASCADE; +CREATE SCHEMA ustore_subpartition_scan; +SET CURRENT_SCHEMA TO ustore_subpartition_scan; + +--scan +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); + +explain(costs off, verbose on) select * from range_list order by 1, 2, 3, 4; +select * from range_list order by 1, 2, 3, 4; + +create index idx_month_code on range_list(month_code) local; +create index idx_dept_code on range_list(dept_code) local; +create index idx_user_no on range_list(user_no) local; + +set enable_seqscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +set enable_bitmapscan = off; +explain(costs off, verbose on) select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +select * from range_list where month_code = '201902' order by 1, 2, 3, 4; +explain(costs off, verbose on) select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +select * from range_list where dept_code = '1' order by 1, 2, 3, 4; +explain(costs off, verbose on) select * from range_list where user_no = '1' order by 1, 2, 3, 4; +select * from range_list where user_no = '1' order by 1, 2, 3, 4; + +reset enable_seqscan; +reset enable_bitmapscan; + +drop table range_list; + +CREATE TABLE range_list +( + col_1 VARCHAR2 ( 30 ) , + col_2 VARCHAR2 ( 30 ) NOT NULL , + col_3 VARCHAR2 ( 30 ) NOT NULL , + ccol_4 VARCHAR2 ( 30 ), +col_5 VARCHAR2 ( 30 ), +col_6 VARCHAR2 ( 30 ), +col_7 VARCHAR2 ( 30 ), +col_8 VARCHAR2 ( 30 ), +col_9 VARCHAR2 ( 30 ), +col_10 VARCHAR2 ( 30 ), +col_11 VARCHAR2 ( 30 ), +col_12 VARCHAR2 ( 30 ), +col_13 VARCHAR2 ( 30 ), +col_14 VARCHAR2 ( 30 ), +col_15 VARCHAR2 ( 30 ), +col_16 VARCHAR2 ( 30 ), +col_17 VARCHAR2 ( 30 ), +col_18 VARCHAR2 ( 30 ), +col_19 VARCHAR2 ( 30 ), +col_20 VARCHAR2 ( 30 ), +col_21 VARCHAR2 ( 30 ), +col_22 VARCHAR2 ( 30 ), +col_23 VARCHAR2 ( 30 ), +col_24 VARCHAR2 ( 30 ), +col_25 VARCHAR2 ( 30 ), +col_26 VARCHAR2 ( 30 ), +col_27 VARCHAR2 ( 30 ), +col_28 VARCHAR2 ( 30 ), +col_29 VARCHAR2 ( 30 ), +col_30 VARCHAR2 ( 30 ), +col_31 VARCHAR2 ( 30 ), +col_32 VARCHAR2 ( 30 ), +col_33 VARCHAR2 ( 30 ), +col_34 VARCHAR2 ( 30 ), +col_35 VARCHAR2 ( 30 ), +col_36 VARCHAR2 ( 30 ), +col_37 VARCHAR2 ( 30 ), +col_38 VARCHAR2 ( 30 ), +col_39 VARCHAR2 ( 30 ), +col_40 VARCHAR2 ( 30 ), +col_41 VARCHAR2 ( 30 ), +col_42 VARCHAR2 ( 30 ), +col_43 VARCHAR2 ( 30 ), +col_44 VARCHAR2 ( 30 ), +col_45 VARCHAR2 ( 30 ), +col_46 VARCHAR2 ( 30 ), +col_47 VARCHAR2 ( 30 ), +col_48 VARCHAR2 ( 30 ), +col_49 VARCHAR2 ( 30 ), +col_50 VARCHAR2 ( 30 ), +col_51 VARCHAR2 ( 30 ), +col_52 VARCHAR2 ( 30 ), +col_53 VARCHAR2 ( 30 ), +col_54 VARCHAR2 ( 30 ), +col_55 VARCHAR2 ( 30 ), +col_56 VARCHAR2 ( 30 ), +col_57 VARCHAR2 ( 30 ), +col_58 VARCHAR2 ( 30 ), +col_59 VARCHAR2 ( 30 ), +col_60 VARCHAR2 ( 30 ), +col_61 VARCHAR2 ( 30 ), +col_62 VARCHAR2 ( 30 ), +col_63 VARCHAR2 ( 30 ), +col_64 VARCHAR2 ( 30 ), +col_65 VARCHAR2 ( 30 ), +col_66 VARCHAR2 ( 30 ), +col_67 VARCHAR2 ( 30 ), +col_68 VARCHAR2 ( 30 ), +col_69 VARCHAR2 ( 30 ), +col_70 VARCHAR2 ( 30 ), +col_71 VARCHAR2 ( 30 ), +col_72 VARCHAR2 ( 30 ), +col_73 VARCHAR2 ( 30 ), +col_74 VARCHAR2 ( 30 ), +col_75 VARCHAR2 ( 30 ), +col_76 VARCHAR2 ( 30 ), +col_77 VARCHAR2 ( 30 ), +col_78 VARCHAR2 ( 30 ), +col_79 VARCHAR2 ( 30 ), +col_80 VARCHAR2 ( 30 ), +col_81 VARCHAR2 ( 30 ), +col_82 VARCHAR2 ( 30 ), +col_83 VARCHAR2 ( 30 ) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( '-10' ) + ( +SUBPARTITION p_list_1_1 VALUES ( '-1' ), +SUBPARTITION p_list_1_2 VALUES ( '-2' ), +SUBPARTITION p_list_1_3 VALUES ( '-3' ), +SUBPARTITION p_list_1_4 VALUES ( '-4' ), +SUBPARTITION p_list_1_5 VALUES ( '-5' ), +SUBPARTITION p_list_1_6 VALUES ( '-6' ), +SUBPARTITION p_list_1_7 VALUES ( '-7' ), +SUBPARTITION p_list_1_8 VALUES ( '-8' ), +SUBPARTITION p_list_1_9 VALUES ( '-9' ), +SUBPARTITION p_list_1_10 VALUES ( '-10' ), +SUBPARTITION p_list_1_11 VALUES ( '-11' ), +SUBPARTITION p_list_1_12 VALUES ( '-12' ), +SUBPARTITION p_list_1_13 VALUES ( '-13' ), +SUBPARTITION p_list_1_14 VALUES ( '-14' ), +SUBPARTITION p_list_1_15 VALUES ( '-15' ), +SUBPARTITION p_list_1_16 VALUES ( '-16' ), +SUBPARTITION p_list_1_17 VALUES ( '-17' ), +SUBPARTITION p_list_1_18 VALUES ( '-18' ), +SUBPARTITION p_list_1_19 VALUES ( '-19' ), +SUBPARTITION p_list_1_20 VALUES ( '-20' ), +SUBPARTITION p_list_1_21 VALUES ( '-21' ), +SUBPARTITION p_list_1_22 VALUES ( '-22' ), +SUBPARTITION p_list_1_23 VALUES ( '-23' ), +SUBPARTITION p_list_1_24 VALUES ( '-24' ), +SUBPARTITION p_list_1_25 VALUES ( '-25' ), +SUBPARTITION p_list_1_26 VALUES ( '-26' ), +SUBPARTITION p_list_1_27 VALUES ( '-27' ), +SUBPARTITION p_list_1_28 VALUES ( '-28' ), +SUBPARTITION p_list_1_29 VALUES ( '-29' ), +SUBPARTITION p_list_1_30 VALUES ( '-30' ), +SUBPARTITION p_list_1_31 VALUES ( '-31' ), +SUBPARTITION p_list_1_32 VALUES ( '-32' ), +SUBPARTITION p_list_1_33 VALUES ( '-33' ), +SUBPARTITION p_list_1_34 VALUES ( '-34' ), +SUBPARTITION p_list_1_35 VALUES ( '-35' ), +SUBPARTITION p_list_1_36 VALUES ( '-36' ), +SUBPARTITION p_list_1_37 VALUES ( '-37' ), +SUBPARTITION p_list_1_38 VALUES ( '-38' ), +SUBPARTITION p_list_1_39 VALUES ( '-39' ), +SUBPARTITION p_list_1_40 VALUES ( '-40' ), +SUBPARTITION p_list_1_41 VALUES ( '-41' ), +SUBPARTITION p_list_1_42 VALUES ( '-42' ), +SUBPARTITION p_list_1_43 VALUES ( '-43' ), +SUBPARTITION p_list_1_44 VALUES ( '-44' ), +SUBPARTITION p_list_1_45 VALUES ( '-45' ), +SUBPARTITION p_list_1_46 VALUES ( '-46' ), +SUBPARTITION p_list_1_47 VALUES ( '-47' ), +SUBPARTITION p_list_1_48 VALUES ( '-48' ), +SUBPARTITION p_list_1_49 VALUES ( '-49' ), +SUBPARTITION p_list_1_50 VALUES ( '-50' ), +SUBPARTITION p_list_1_51 VALUES ( default ) + ), + PARTITION p_range_2 VALUES LESS THAN('10 ') + ( +SUBPARTITION p_list_2_1 VALUES ( '1' ), +SUBPARTITION p_list_2_2 VALUES ( '2' ), +SUBPARTITION p_list_2_3 VALUES ( '3' ), +SUBPARTITION p_list_2_4 VALUES ( '4' ), +SUBPARTITION p_list_2_5 VALUES ( '5' ), +SUBPARTITION p_list_2__6 VALUES ( '-6' ), +SUBPARTITION p_list_2_6 VALUES ( '6' ), +SUBPARTITION p_list_2_7 VALUES ( '7' ), +SUBPARTITION p_list_2_8 VALUES ( '8' ), +SUBPARTITION p_list_2_9 VALUES ( '9' ), +SUBPARTITION p_list_2_10 VALUES ( '10' ), +SUBPARTITION p_list_2_11 VALUES ( '11' ), +SUBPARTITION p_list_2_12 VALUES ( '12' ), +SUBPARTITION p_list_2_13 VALUES ( '13' ), +SUBPARTITION p_list_2_14 VALUES ( '14' ), +SUBPARTITION p_list_2_15 VALUES ( '15' ), +SUBPARTITION p_list_2_16 VALUES ( '16' ), +SUBPARTITION p_list_2_17 VALUES ( '17' ), +SUBPARTITION p_list_2_18 VALUES ( '18' ), +SUBPARTITION p_list_2_19 VALUES ( '19' ), +SUBPARTITION p_list_2_20 VALUES ( '20' ), +SUBPARTITION p_list_2_21 VALUES ( '21' ), +SUBPARTITION p_list_2_22 VALUES ( '22' ), +SUBPARTITION p_list_2_23 VALUES ( '23' ), +SUBPARTITION p_list_2_24 VALUES ( '24' ), +SUBPARTITION p_list_2_25 VALUES ( '25' ), +SUBPARTITION p_list_2_26 VALUES ( '26' ), +SUBPARTITION p_list_2_27 VALUES ( '27' ), +SUBPARTITION p_list_2_28 VALUES ( '28' ), +SUBPARTITION p_list_2_29 VALUES ( '29' ), +SUBPARTITION p_list_2_30 VALUES ( '30' ), +SUBPARTITION p_list_2_31 VALUES ( '31' ), +SUBPARTITION p_list_2_32 VALUES ( '32' ), +SUBPARTITION p_list_2_33 VALUES ( '33' ), +SUBPARTITION p_list_2_34 VALUES ( '34' ), +SUBPARTITION p_list_2_35 VALUES ( '35' ), +SUBPARTITION p_list_2_36 VALUES ( '36' ), +SUBPARTITION p_list_2_37 VALUES ( '37' ), +SUBPARTITION p_list_2_38 VALUES ( '38' ), +SUBPARTITION p_list_2_39 VALUES ( '39' ), +SUBPARTITION p_list_2_40 VALUES ( '40' ), +SUBPARTITION p_list_2_41 VALUES ( '41' ), +SUBPARTITION p_list_2_42 VALUES ( '42' ), +SUBPARTITION p_list_2_43 VALUES ( '43' ), +SUBPARTITION p_list_2_44 VALUES ( '44' ), +SUBPARTITION p_list_2_45 VALUES ( '45' ), +SUBPARTITION p_list_2_46 VALUES ( '46' ), +SUBPARTITION p_list_2_47 VALUES ( '47' ), +SUBPARTITION p_list_2_48 VALUES ( '48' ), +SUBPARTITION p_list_2_49 VALUES ( '49' ), +SUBPARTITION p_list_2_50 VALUES ( '50' ), +SUBPARTITION p_list_2_51 VALUES ( default ) + ), + PARTITION p_range_3 VALUES LESS THAN( '20 '), + + PARTITION p_range_4 VALUES LESS THAN( '30' ) + ( + SUBPARTITION p_list_4_1 VALUES ( default ) + ), + PARTITION p_range_5 VALUES LESS THAN( '40' ) + ( + SUBPARTITION p_list_5_1 VALUES ( '41' ), +SUBPARTITION p_list_5_2 VALUES ( '42' ), +SUBPARTITION p_list_5_3 VALUES ( '43' ), +SUBPARTITION p_list_5_4 VALUES ( '44' ), +SUBPARTITION p_list_5_5 VALUES ( '45' ), +SUBPARTITION p_list_5_6 VALUES ( '46' ), +SUBPARTITION p_list_5_7 VALUES ( '47' ), +SUBPARTITION p_list_5_8 VALUES ( '48' ), +SUBPARTITION p_list_5_9 VALUES ( '49' ), +SUBPARTITION p_list_5_10 VALUES ( '50' ), +SUBPARTITION p_list_5_11 VALUES ( '51' ), +SUBPARTITION p_list_5_12 VALUES ( '52' ), +SUBPARTITION p_list_5_13 VALUES ( '53' ), +SUBPARTITION p_list_5_14 VALUES ( '54' ), +SUBPARTITION p_list_5_15 VALUES ( '55' ), +SUBPARTITION p_list_5_16 VALUES ( '56' ), +SUBPARTITION p_list_5_17 VALUES ( '57' ), +SUBPARTITION p_list_5_18 VALUES ( '58' ), +SUBPARTITION p_list_5_19 VALUES ( '59' ), +SUBPARTITION p_list_5_20 VALUES ( '60' ), +SUBPARTITION p_list_5_21 VALUES ( '61' ), +SUBPARTITION p_list_5_22 VALUES ( '62' ), +SUBPARTITION p_list_5_23 VALUES ( '63' ), +SUBPARTITION p_list_5_24 VALUES ( '64' ), +SUBPARTITION p_list_5_25 VALUES ( '65' ), +SUBPARTITION p_list_5_26 VALUES ( '66' ), +SUBPARTITION p_list_5_27 VALUES ( '67' ), +SUBPARTITION p_list_5_28 VALUES ( '68' ), +SUBPARTITION p_list_5_29 VALUES ( '69' ), +SUBPARTITION p_list_5_30 VALUES ( '70' ), +SUBPARTITION p_list_5_31 VALUES ( '71' ), +SUBPARTITION p_list_5_32 VALUES ( '72' ), +SUBPARTITION p_list_5_33 VALUES ( '73' ), +SUBPARTITION p_list_5_34 VALUES ( '74' ), +SUBPARTITION p_list_5_35 VALUES ( '75' ), +SUBPARTITION p_list_5_36 VALUES ( '76' ), +SUBPARTITION p_list_5_37 VALUES ( '77' ), +SUBPARTITION p_list_5_38 VALUES ( '78' ), +SUBPARTITION p_list_5_39 VALUES ( '79' ), +SUBPARTITION p_list_5_40 VALUES ( '80' ), +SUBPARTITION p_list_5_41 VALUES ( '81' ), +SUBPARTITION p_list_5_42 VALUES ( '82' ), +SUBPARTITION p_list_5_43 VALUES ( '83' ), +SUBPARTITION p_list_5_44 VALUES ( '84' ), +SUBPARTITION p_list_5_45 VALUES ( '85' ), +SUBPARTITION p_list_5_46 VALUES ( '86' ), +SUBPARTITION p_list_5_47 VALUES ( '87' ), +SUBPARTITION p_list_5_48 VALUES ( '88' ), +SUBPARTITION p_list_5_49 VALUES ( '89' ), +SUBPARTITION p_list_5_50 VALUES ( '90' ), +SUBPARTITION p_list_5_51 VALUES ( '91' ), +SUBPARTITION p_list_5_52 VALUES ( '92' ), +SUBPARTITION p_list_5_53 VALUES ( '93' ), +SUBPARTITION p_list_5_54 VALUES ( '94' ), +SUBPARTITION p_list_5_55 VALUES ( '95' ), +SUBPARTITION p_list_5_56 VALUES ( '96' ), +SUBPARTITION p_list_5_57 VALUES ( '97' ), +SUBPARTITION p_list_5_58 VALUES ( '98' ), +SUBPARTITION p_list_5_59 VALUES ( '99' ), +SUBPARTITION p_list_5_60 VALUES ( '100' ), +SUBPARTITION p_list_5_61 VALUES ( '101' ), +SUBPARTITION p_list_5_62 VALUES ( '102' ), +SUBPARTITION p_list_5_63 VALUES ( '103' ), +SUBPARTITION p_list_5_64 VALUES ( '104' ), +SUBPARTITION p_list_5_65 VALUES ( '105' ), +SUBPARTITION p_list_5_66 VALUES ( '106' ), +SUBPARTITION p_list_5_67 VALUES ( '107' ), +SUBPARTITION p_list_5_68 VALUES ( '108' ), +SUBPARTITION p_list_5_69 VALUES ( '109' ), +SUBPARTITION p_list_5_70 VALUES ( '110' ), +SUBPARTITION p_list_5_71 VALUES ( '111' ), +SUBPARTITION p_list_5_72 VALUES ( '112' ), +SUBPARTITION p_list_5_73 VALUES ( '113' ), +SUBPARTITION p_list_5_74 VALUES ( '114' ), +SUBPARTITION p_list_5_75 VALUES ( '115' ), +SUBPARTITION p_list_5_76 VALUES ( '116' ), +SUBPARTITION p_list_5_77 VALUES ( '117' ), +SUBPARTITION p_list_5_78 VALUES ( '118' ), +SUBPARTITION p_list_5_79 VALUES ( '119' ), +SUBPARTITION p_list_5_80 VALUES ( default ) + ), + PARTITION p_range_6 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; +create index on range_list(col_2) local; +explain (costs off, verbose off) select * from range_list where col_2 in (select col_1 from range_list where col_1 >10 and col_1<100) order by 1 limit 100; +ALTER INDEX range_list_col_2_idx MODIFY PARTITION p_list_5_14_col_2_idx UNUSABLE; +explain (costs off, verbose off) select * from range_list where col_2 in (select col_1 from range_list where col_1 >10 and col_1<100) order by 1 limit 100; +drop table range_list; + +create table range_range_jade(jid int,jn int,name varchar2) WITH (STORAGE_TYPE=USTORE) partition by range (jid) subpartition by range(jn) +( + partition hrp1 values less than(16)( + subpartition hrp1_1 values less than(16), +subpartition hrp1_2 values less than(26), +subpartition hrp1_3 values less than(36), + subpartition hrp1_4 values less than(maxvalue)), + partition hrp2 values less than(26)( + subpartition hrp2_1 values less than(maxvalue)), + partition hrp3 values less than(36)( + subpartition hrp3_1 values less than(16), +subpartition hrp3_2 values less than(26), + subpartition hrp3_3 values less than(maxvalue)), + partition hrp4 values less than(maxvalue)( + subpartition hrp4_1 values less than(16), + subpartition hrp4_2 values less than(maxvalue)) +)ENABLE ROW MOVEMENT; +-- no errors +set enable_partition_opfusion = on; +insert into range_range_jade values(1,2,'jade'); +reset enable_partition_opfusion; +drop table range_range_jade; + +drop table list_range_02; +CREATE TABLE IF NOT EXISTS list_range_02 +( + col_1 int , + col_2 int, +col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY list (col_1) SUBPARTITION BY range (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_1_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_1_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_1_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_2 VALUES(1,2,3,4,5,6,7,8,9,10 ), + PARTITION p_list_3 VALUES(11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_range_3_1 VALUES LESS THAN( 15 ), + SUBPARTITION p_range_3_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_4 VALUES(21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_range_4_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_4_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_4_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_4_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_4_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_5 VALUES(31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_range_5_1 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_6 VALUES(41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_range_6_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_6_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_6_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_6_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_6_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_7 VALUES(default) +) ENABLE ROW MOVEMENT; + +create index index_01 on list_range_02(col_2) local ; + +explain (costs off) select * from list_range_02 where col_2 in + (select col_1 from list_range_02 subpartition(p_list_2_subpartdefault1) + where col_1 >10 and col_1 <100) and col_1 +col_2 =50 and col_2 in (100,200,300 ); +drop table list_range_02; +DROP SCHEMA ustore_subpartition_scan CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/ustore_subpartition_select.sql b/src/test/regress/sql/ustore_subpartition_select.sql new file mode 100644 index 000000000..a29772ac7 --- /dev/null +++ b/src/test/regress/sql/ustore_subpartition_select.sql @@ -0,0 +1,302 @@ +--prepare +DROP SCHEMA ustore_subpartition_select CASCADE; +CREATE SCHEMA ustore_subpartition_select; +SET CURRENT_SCHEMA TO ustore_subpartition_select; + +--select +CREATE TABLE t1 +( + c1 int, + c2 int +) WITH (STORAGE_TYPE=USTORE); +insert into t1 values(generate_series(201901,201910), generate_series(1,10)); + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +); + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '3', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '3', '1', 1); + +select * from range_list order by 1, 2, 3, 4; + +select * from range_list where user_no is not null order by 1, 2, 3, 4; +select * from range_list where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; +select * from range_list where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; +select * from range_list where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; +select * from range_list partition (p_201901) order by 1, 2, 3, 4; +select * from range_list partition (p_201902) order by 1, 2, 3, 4; +select * from range_list where user_no is not null and dept_code <> '2' UNION ALL select * from range_list partition (p_201902) order by 1, 2, 3, 4; +select * from range_list where user_no is not null and dept_code <> '2' UNION ALL select * from range_list partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + + + +CREATE TABLE range_hash +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY HASH (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a, + SUBPARTITION p_201901_b + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a, + SUBPARTITION p_201902_b + ) +); + +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201902', '2', '1', 1); +insert into range_hash values('201902', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); +insert into range_hash values('201903', '1', '1', 1); +insert into range_hash values('201903', '2', '1', 1); + +select * from range_hash order by 1, 2, 3, 4; + +select * from range_hash where user_no is not null order by 1, 2, 3, 4; +select * from range_hash where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; +select * from range_hash where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; +select * from range_hash where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; +select * from range_hash partition (p_201901) order by 1, 2, 3, 4; +select * from range_hash partition (p_201902) order by 1, 2, 3, 4; +select * from range_hash where user_no is not null and dept_code <> '2' UNION ALL select * from range_hash partition (p_201902) order by 1, 2, 3, 4; +select * from range_hash where user_no is not null and dept_code <> '2' UNION ALL select * from range_hash partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( '3' ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '3' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); + +select * from range_range order by 1, 2, 3, 4; + +select * from range_range where user_no is not null order by 1, 2, 3, 4; +select * from range_range where user_no is not null and dept_code = user_no order by 1, 2, 3, 4; +select * from range_range where user_no is not null and dept_code in ('2') order by 1, 2, 3, 4; +select * from range_range where user_no is not null and dept_code <> '2' order by 1, 2, 3, 4; +select * from range_range partition (p_201901) order by 1, 2, 3, 4; +select * from range_range partition (p_201902) order by 1, 2, 3, 4; +select * from range_range where user_no is not null and dept_code <> '2' UNION ALL select * from range_range partition (p_201902) order by 1, 2, 3, 4; +select * from range_range where user_no is not null and dept_code <> '2' UNION ALL select * from range_range partition (p_201902) where dept_code in ('2') order by 1, 2, 3, 4; + +--view +create view view_temp as select * from range_list; +select * from view_temp; +--error +select * from view_temp partition (p_201901); +select * from view_temp partition (p_201902); +drop view view_temp; + +with tmp1 as (select * from range_list ) select * from tmp1 order by 1, 2, 3, 4; +with tmp1 as (select * from range_list partition (p_201901)) select * from tmp1 order by 1, 2, 3, 4; + +--join normal table +select * from range_list left join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_list left join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_list right join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_list right join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_list full join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_list full join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_list inner join t1 on range_list.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_list inner join t1 on range_list.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + + +select * from range_hash left join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_hash left join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_hash right join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_hash right join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_hash full join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_hash full join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_hash inner join t1 on range_hash.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_hash inner join t1 on range_hash.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + + +select * from range_range left join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_range left join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_range right join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_range right join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_range full join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_range full join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +select * from range_range inner join t1 on range_range.month_code = t1.c1 order by 1, 2, 3, 4, 5, 6; +select * from range_range inner join t1 on range_range.month_code = t1.c1 where dept_code = 2 order by 1, 2, 3, 4, 5, 6; + +--join range_list and range_hash + +select * from range_list left join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_list left join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_list right join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_list right join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_list full join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_list full join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_list inner join range_hash on range_list.month_code = range_hash.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_list inner join range_hash on range_list.month_code = range_hash.month_code where range_list.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +--join range_hash and range_range + +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +--join range_hash and range_range + +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash left join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash right join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash full join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code order by 1, 2, 3, 4, 5, 6, 7, 8; +select * from range_hash inner join range_range on range_hash.month_code = range_range.month_code where range_hash.dept_code = 2 order by 1, 2, 3, 4, 5, 6, 7, 8; + +drop table list_range_02; +CREATE TABLE IF NOT EXISTS list_range_02 +( + col_1 int , + col_2 int, + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY list (col_1) SUBPARTITION BY range (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_1_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_1_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_1_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_2 VALUES(1,2,3,4,5,6,7,8,9,10 ), + PARTITION p_list_3 VALUES(11,12,13,14,15,16,17,18,19,20) + ( + SUBPARTITION p_range_3_1 VALUES LESS THAN( 15 ), + SUBPARTITION p_range_3_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_4 VALUES(21,22,23,24,25,26,27,28,29,30) + ( + SUBPARTITION p_range_4_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_4_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_4_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_4_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_4_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_5 VALUES(31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_range_5_1 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_list_6 VALUES(41,42,43,44,45,46,47,48,49,50) + ( + SUBPARTITION p_range_6_1 VALUES LESS THAN( -10 ), + SUBPARTITION p_range_6_2 VALUES LESS THAN( 0 ), + SUBPARTITION p_range_6_3 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_6_4 VALUES LESS THAN( 20 ), + SUBPARTITION p_range_6_5 VALUES LESS THAN( 50 ) + ), + PARTITION p_list_7 VALUES(default) +) ENABLE ROW MOVEMENT; +create index index_01 on list_range_02(col_2) local ; + +INSERT INTO list_range_02 VALUES (GENERATE_SERIES(0, 19),GENERATE_SERIES(0, 1000),GENERATE_SERIES(0, 99)); + explain (costs off, verbose on) select * from list_range_02 where col_2 >500 and col_2 <8000 order by col_1; + +drop index index_01; +drop table list_range_02; + +create table pjade(jid int,jn int,name varchar2) WITH (STORAGE_TYPE=USTORE) partition by range(jid) subpartition by range(jn) +( + partition hrp1 values less than(16)( + subpartition hrp1_1 values less than(16), + subpartition hrp1_2 values less than(maxvalue)), + partition hrp2 values less than(maxvalue)( + subpartition hrp3_1 values less than(16), + subpartition hrp3_3 values less than(maxvalue)) +); + +create table cjade(jid int,jn int,name varchar2) WITH (STORAGE_TYPE=USTORE); +insert into pjade values(6,8,'tom'),(8,18,'jerry'),(16,8,'jade'),(18,20,'jack'); +insert into cjade values(6,8,'tom'),(8,18,'jerry'),(16,8,'jade'),(18,20,'jack'); +select * from pjade subpartition(hrp1_1) union select * from cjade order by 1,2,3; +select * from pjade subpartition(hrp1_1) p union select * from cjade order by 1,2,3; +select * from pjade subpartition(hrp1_1) union select * from cjade order by 1,2,3; +select * from pjade subpartition(hrp1_1) p union select * from cjade order by 1,2,3; +drop table pjade; +drop table cjade; + +DROP SCHEMA ustore_subpartition_select CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/ustore_subpartition_split.sql b/src/test/regress/sql/ustore_subpartition_split.sql new file mode 100644 index 000000000..e667f63a3 --- /dev/null +++ b/src/test/regress/sql/ustore_subpartition_split.sql @@ -0,0 +1,268 @@ +--prepare +DROP SCHEMA ustore_subpartition_split CASCADE; +CREATE SCHEMA ustore_subpartition_split; +SET CURRENT_SCHEMA TO ustore_subpartition_split; + +--split subpartition +-- list subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '3', '1', 1); +select * from list_list order by 1,2,3,4; + +select * from list_list subpartition (p_201901_a) order by 1,2,3,4; +select * from list_list subpartition (p_201901_b) order by 1,2,3,4; +alter table list_list split subpartition p_201901_b values (2) into +( + subpartition p_201901_b, + subpartition p_201901_c +); +select * from list_list subpartition (p_201901_a) order by 1,2,3,4; +select * from list_list subpartition (p_201901_b) order by 1,2,3,4; +select * from list_list subpartition (p_201901_c) order by 1,2,3,4; + +select * from list_list partition (p_201901); + +select * from list_list subpartition (p_201902_a) order by 1,2,3,4; +select * from list_list subpartition (p_201902_b) order by 1,2,3,4; +alter table list_list split subpartition p_201902_b values (2, 3) into +( + subpartition p_201902_b, + subpartition p_201902_c +); +select * from list_list subpartition (p_201902_a) order by 1,2,3,4; +select * from list_list subpartition (p_201902_b) order by 1,2,3,4; +select * from list_list subpartition (p_201902_c) order by 1,2,3,4; + +--error +alter table list_list split subpartition p_201902_a values (3) into +( + subpartition p_201902_ab, + subpartition p_201902_ac +); + +drop table list_list; + +-- range subpartition +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +insert into range_range values('201902', '1', '1', 1); +insert into range_range values('201902', '2', '1', 1); +insert into range_range values('201902', '3', '1', 1); +insert into range_range values('201903', '1', '1', 1); +insert into range_range values('201903', '2', '1', 1); +insert into range_range values('201903', '5', '1', 1); +select * from range_range order by 1,2,3,4; + +select * from range_range subpartition (p_201901_a) order by 1,2,3,4; +select * from range_range subpartition (p_201901_b) order by 1,2,3,4; +alter table range_range split subpartition p_201901_b at (3) into +( + subpartition p_201901_c, + subpartition p_201901_d +); +select * from range_range subpartition (p_201901_a) order by 1,2,3,4; +select * from range_range subpartition (p_201901_b) order by 1,2,3,4; +select * from range_range subpartition (p_201901_c) order by 1,2,3,4; +select * from range_range subpartition (p_201901_d) order by 1,2,3,4; + +select * from range_range subpartition (p_201902_a) order by 1,2,3,4; +select * from range_range subpartition (p_201902_b) order by 1,2,3,4; +alter table range_range split subpartition p_201902_b at (3) into +( + subpartition p_201902_c, + subpartition p_201902_d +); +select * from range_range subpartition (p_201902_a) order by 1,2,3,4; +select * from range_range subpartition (p_201902_b) order by 1,2,3,4; +select * from range_range subpartition (p_201902_c) order by 1,2,3,4; +select * from range_range subpartition (p_201902_d) order by 1,2,3,4; + +drop table range_range; +--test syntax +CREATE TABLE IF NOT EXISTS list_hash +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY list (col_1) SUBPARTITION BY hash (col_2) +( + PARTITION p_list_1 VALUES (-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_hash_1_1 , + SUBPARTITION p_hash_1_2 , + SUBPARTITION p_hash_1_3 + ), + PARTITION p_list_2 VALUES (1,2,3,4,5,6,7,8,9,10 ) + ( + SUBPARTITION p_hash_2_1 , + SUBPARTITION p_hash_2_2 , + SUBPARTITION p_hash_2_3 , + SUBPARTITION p_hash_2_4 , + SUBPARTITION p_hash_2_5 + ), + PARTITION p_list_3 VALUES (11,12,13,14,15,16,17,18,19,20), + PARTITION p_list_4 VALUES (21,22,23,24,25,26,27,28,29,30 ) + ( + SUBPARTITION p_hash_4_1 + ), + PARTITION p_list_5 VALUES (default) + ( + SUBPARTITION p_hash_5_1 + ), + PARTITION p_list_6 VALUES (31,32,33,34,35,36,37,38,39,40) + ( + SUBPARTITION p_hash_6_1 , + SUBPARTITION p_hash_6_2 , + SUBPARTITION p_hash_6_3 + ) +) ENABLE ROW MOVEMENT ; + +alter table list_hash split subPARTITION p_hash_2_3 at(-10) into ( subPARTITION add_p_01 , subPARTITION add_p_02 ); + +drop table list_hash; + +CREATE TABLE range_range +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY RANGE (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201901_b VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_201902 VALUES LESS THAN( '201904' ) + ( + SUBPARTITION p_201902_a VALUES LESS THAN( '2' ), + SUBPARTITION p_201902_b VALUES LESS THAN( '6' ) + ) +); +alter table range_range split subpartition p_201901_b values (3) into +( + subpartition p_201901_c, + subpartition p_201901_d +) update global index; +drop table range_range; + +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( default ) + ) +); +alter table list_list split subpartition p_201901_b at (2, 3) into +( + subpartition p_201901_b, + subpartition p_201901_c +); +drop table list_list; + +CREATE TABLE IF NOT EXISTS list_list_02 +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY list (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( 0,-1,-2,-3,-4,-5,-6,-7,-8,-9 ), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_list_2 VALUES(0,1,2,3,4,5,6,7,8,9) + ( + SUBPARTITION p_list_2_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_2_2 VALUES ( default ), + SUBPARTITION p_list_2_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_2_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_2_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_3 VALUES(10,11,12,13,14,15,16,17,18,19) + ( + SUBPARTITION p_list_3_2 VALUES ( default ) + ), + PARTITION p_list_4 VALUES(default ), + PARTITION p_list_5 VALUES(20,21,22,23,24,25,26,27,28,29) + ( + SUBPARTITION p_list_5_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_5_2 VALUES ( default ), + SUBPARTITION p_list_5_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_5_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_5_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_6 VALUES(30,31,32,33,34,35,36,37,38,39), + PARTITION p_list_7 VALUES(40,41,42,43,44,45,46,47,48,49) + ( + SUBPARTITION p_list_7_1 VALUES ( default ) + ) +) ENABLE ROW MOVEMENT; + +alter table list_list_02 split PARTITION for (5) at (8) into ( PARTITION add_p_01 , PARTITION add_p_02 ); +drop table list_list_02; +--clean +DROP SCHEMA ustore_subpartition_split CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/ustore_subpartition_truncate.sql b/src/test/regress/sql/ustore_subpartition_truncate.sql new file mode 100644 index 000000000..b5cafdee1 --- /dev/null +++ b/src/test/regress/sql/ustore_subpartition_truncate.sql @@ -0,0 +1,71 @@ +--prepare +DROP SCHEMA ustore_subpartition_truncate CASCADE; +CREATE SCHEMA ustore_subpartition_truncate; +SET CURRENT_SCHEMA TO ustore_subpartition_truncate; + +--truncate partition/subpartition +CREATE TABLE list_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY LIST (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES ( '201902' ) + ( + SUBPARTITION p_201901_a VALUES ( '1' ), + SUBPARTITION p_201901_b VALUES ( default ) + ), + PARTITION p_201902 VALUES ( '201903' ) + ( + SUBPARTITION p_201902_a VALUES ( '1' ), + SUBPARTITION p_201902_b VALUES ( '2' ) + ) +); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +select * from list_list; + +select * from list_list partition (p_201901); +alter table list_list truncate partition p_201901; +select * from list_list partition (p_201901); + +select * from list_list partition (p_201902); +alter table list_list truncate partition p_201902; +select * from list_list partition (p_201902); +select * from list_list; + +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201902', '2', '1', 1); +insert into list_list values('201902', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); +insert into list_list values('201903', '1', '1', 1); +insert into list_list values('201903', '2', '1', 1); + +select * from list_list subpartition (p_201901_a); +alter table list_list truncate subpartition p_201901_a; +select * from list_list subpartition (p_201901_a); + +select * from list_list subpartition (p_201901_b); +alter table list_list truncate subpartition p_201901_b; +select * from list_list subpartition (p_201901_b); + +select * from list_list subpartition (p_201902_a); +alter table list_list truncate subpartition p_201902_a; +select * from list_list subpartition (p_201902_a); + +select * from list_list subpartition (p_201902_b); +alter table list_list truncate subpartition p_201902_b; +select * from list_list subpartition (p_201902_b); + +select * from list_list; + +drop table list_list; +DROP SCHEMA ustore_subpartition_truncate CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/ustore_subpartition_update.sql b/src/test/regress/sql/ustore_subpartition_update.sql new file mode 100644 index 000000000..fca459950 --- /dev/null +++ b/src/test/regress/sql/ustore_subpartition_update.sql @@ -0,0 +1,269 @@ +--prepare +DROP SCHEMA ustore_subpartition_update CASCADE; +CREATE SCHEMA ustore_subpartition_update; +SET CURRENT_SCHEMA TO ustore_subpartition_update; + +--update +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +)DISABLE ROW MOVEMENT; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); + +select * from range_list order by 1, 2, 3, 4; + +--error +update range_list set month_code = '201903'; +--error +update range_list set dept_code = '2'; + +update range_list set user_no = '2'; +select * from range_list order by 1, 2, 3, 4; + +-- test for upsert and merge into, both should report error +insert into range_list values('201902', '1', '1', 1) ON DUPLICATE KEY UPDATE sales_amt=1; + +CREATE TABLE temp_table +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE); +insert into temp_table values('201802', '1', '1', 1), ('201901', '2', '1', 1), ('201702', '1', '1', 1); +MERGE INTO range_list t1 +USING temp_table t2 +ON (t1.dept_code = t2.dept_code) +WHEN MATCHED THEN + UPDATE SET t1.month_code = t2.month_code WHERE t1.dept_code > 1 +WHEN NOT MATCHED THEN + INSERT VALUES (t2.month_code, t2.dept_code, t2.user_no, t2.sales_amt) WHERE t2.sales_amt = 1; + +drop table temp_table; +drop table range_list; + +CREATE TABLE range_list +( + month_code VARCHAR2 ( 30 ) NOT NULL , + dept_code VARCHAR2 ( 30 ) NOT NULL , + user_no VARCHAR2 ( 30 ) NOT NULL , + sales_amt int +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (month_code) SUBPARTITION BY LIST (dept_code) +( + PARTITION p_201901 VALUES LESS THAN( '201903' ) + ( + SUBPARTITION p_201901_a values ('1'), + SUBPARTITION p_201901_b values ('2') + ), + PARTITION p_201902 VALUES LESS THAN( '201910' ) + ( + SUBPARTITION p_201902_a values ('1'), + SUBPARTITION p_201902_b values ('2') + ) +)ENABLE ROW MOVEMENT; + +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201902', '2', '1', 1); +insert into range_list values('201902', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); +insert into range_list values('201903', '1', '1', 1); +insert into range_list values('201903', '2', '1', 1); + +select * from range_list order by 1, 2, 3, 4; + +select * from range_list subpartition (p_201901_a) order by 1, 2, 3, 4; +select * from range_list subpartition (p_201901_b) order by 1, 2, 3, 4; +update range_list set dept_code = '2' where month_code = '201902'; +select * from range_list subpartition (p_201901_a) order by 1, 2, 3, 4; +select * from range_list subpartition (p_201901_b) order by 1, 2, 3, 4; + +select * from range_list partition (p_201901) order by 1, 2, 3, 4; +select * from range_list partition (p_201902) order by 1, 2, 3, 4; +update range_list set month_code = '201903' where month_code = '201902'; +select * from range_list partition (p_201901) order by 1, 2, 3, 4; +select * from range_list partition (p_201902) order by 1, 2, 3, 4; + +drop table range_list; + +-- FOREIGN KEY +drop table tb_02; +CREATE TABLE tb_02 +( + col_1 int PRIMARY KEY, + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int +) WITH (STORAGE_TYPE=USTORE); + +drop table range_range_02 cascade; +CREATE TABLE range_range_02 +( + col_1 int , + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int , +FOREIGN KEY(col_1) REFERENCES tb_02(col_1) +) WITH (STORAGE_TYPE=USTORE) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 80 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 50 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( MAXVALUE ) + ) +); + +insert into tb_02 values(0,0,0,0); +insert into range_range_02 values(0,0,0,0); + +update tb_02 set col_1=8 where col_2=0; +drop table range_range_02 cascade; +drop table tb_02; + +drop table tb_02; +CREATE TABLE tb_02 +( + col_1 int PRIMARY KEY, + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int +); + +drop table list_list_02 cascade; +CREATE TABLE list_list_02 +( + col_1 int , + col_2 int , + col_3 VARCHAR2 ( 30 ) , + col_4 int , + FOREIGN KEY(col_1) REFERENCES tb_02(col_1) +) +PARTITION BY list (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( 0,-1,-2,-3,-4,-5,-6,-7,-8,-9 ), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_list_2 VALUES(0,1,2,3,4,5,6,7,8,9) + ( + SUBPARTITION p_list_2_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_2_2 VALUES ( default ), + SUBPARTITION p_list_2_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_2_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_2_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_3 VALUES(10,11,12,13,14,15,16,17,18,19) + ( + SUBPARTITION p_list_3_2 VALUES ( default ) + ), + PARTITION p_list_4 VALUES(default ), + PARTITION p_list_5 VALUES(20,21,22,23,24,25,26,27,28,29) + ( + SUBPARTITION p_list_5_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_5_2 VALUES ( default ), + SUBPARTITION p_list_5_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_5_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_5_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_6 VALUES(30,31,32,33,34,35,36,37,38,39), + PARTITION p_list_7 VALUES(40,41,42,43,44,45,46,47,48,49) + ( + SUBPARTITION p_list_7_1 VALUES ( default ) + ) +) ENABLE ROW MOVEMENT; + +insert into list_list_02 values(0,0,0,0); + +insert into tb_02 values(0,0,0,0); +insert into list_list_02 values(0,0,0,0); +update list_list_02 set col_2=8 where col_2=0; +update list_list_02 set col_1=8 where col_2=8; +delete list_list_02 where col_2=8; + +drop table tb_02; +drop table list_list_02 cascade; + +drop table range_list_02; +CREATE TABLE IF NOT EXISTS range_list_02 +( + col_1 int , + col_2 int , + col_3 int , + col_4 int +) +PARTITION BY RANGE (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( -10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( '-1','-2','-3','-4','-5'), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_list_2_1 VALUES ( '6','7','8','9','10'), + SUBPARTITION p_list_2_2 VALUES ( default ) + ), + PARTITION p_range_3 VALUES LESS THAN( 30 ) + ( + SUBPARTITION p_list_3_1 VALUES ( default ) + ), + PARTITION p_range_4 VALUES LESS THAN( 40 ) + ( + SUBPARTITION p_list_4_1 VALUES ( default ) + ), + PARTITION p_range_5 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; + +create unique index on range_list_02(col_1,col_2); +INSERT INTO range_list_02 VALUES (GENERATE_SERIES(-500, 500,2),GENERATE_SERIES(1500, 2500,2), +GENERATE_SERIES(3500, 4500,2)); +insert into range_list_02 values(1,1,1,1),(4,4,4,4),(5,5,5,5),(8,8,8,8),(9,9,9,9); +insert into range_list_02 values(11,11,1,1),(15,15,5,5),(18,81,8,8),(29,9,9,9); +insert into range_list_02 values(21,11,1,1),(15,150,5,5),(18,811,8,8),(-2978,31,9,9); +insert into range_list_02 values(-1,1,1,1),(-1,-15,5,5),(-8,7,8,8),(-9,29,9,9); +insert into range_list_02 values(-8,18,1); + +update range_list_02 set col_4=80 where col_1=4; +update range_list_02 set col_4=col_1 where col_1<5; +update range_list_02 set col_4=col_1+ col_2 where col_1<5; +update range_list_02 set col_1=83,col_2=8 where col_1=8; +update range_list_02 set col_2=-56 where col_1=-2978; +update range_list_02 set col_2=80 where col_2=-3; +update range_list_02 set col_2=27 where col_2=80; +alter table range_list_02 truncate partition p_range_1; +alter table range_list_02 truncate SUBPARTITION p_list_2_2; +delete from range_list_02 where col_1 >188 ; +delete from range_list_02 where col_2 <10 and col_3>5; +drop table range_list_02; + +DROP SCHEMA ustore_subpartition_update CASCADE; +RESET CURRENT_SCHEMA; diff --git a/src/test/regress/sql/vec_set_func.sql b/src/test/regress/sql/vec_set_func.sql index 801aece5f..8ad0123b9 100644 --- a/src/test/regress/sql/vec_set_func.sql +++ b/src/test/regress/sql/vec_set_func.sql @@ -1,3 +1,4 @@ +set enable_vector_engine=on; create table hl_test002(a int,b varchar2(15), c varchar2(15)); insert into hl_test002 values(1,'gauss,ap', 'xue,dong,pu'); insert into hl_test002 values(1,'gauss,ap', NULL); @@ -11,15 +12,21 @@ insert into hl_test002 values(2,'wang,,rui', NULL); create table hl_test001(a int,b varchar2(15), c varchar2(15)) with (ORIENTATION = COLUMN); insert into hl_test001 select * from hl_test002; -select a,b,c,regexp_split_to_table(b,E',') from hl_test001 order by 1, 2, 3, 4; -select a,b,c,regexp_split_to_table(b,NULL) from hl_test001 order by 1, 2, 3, 4; -select a,b,c,regexp_split_to_table(b,E','), regexp_split_to_table(c,E',') from hl_test001 order by 1, 2, 3, 4, 5; -select regexp_split_to_table(b,E','), generate_series(1, 3) from hl_test001; +create table hl_test003(a int,b int[5]) with (ORIENTATION = COLUMN); +insert into hl_test003 values(1, array[1,2,3]),(2,array[5,4,6]); -select a,b,c,regexp_split_to_table(regexp_split_to_table(b,E','), E'u') from hl_test001 order by 1, 2, 3, 4; -select a,b,c,substring(regexp_split_to_table(b,E','), 1, 100) from hl_test001 order by 1, 2, 3, 4; -select a,b,c,regexp_split_to_table(substring(b,1, 100), E',') from hl_test001 order by 1, 2, 3, 4; +select a,b,c,regexp_split_to_table(b,E',') from hl_test001 order by 1, 2, 3, 4 nulls last; +select a,b,c,regexp_split_to_table(b,NULL) from hl_test001 order by 1, 2, 3, 4 nulls last; +select a,b,c,regexp_split_to_table(b,E','), regexp_split_to_table(c,E',') from hl_test001 order by 1, 2, 3, 4, 5 nulls last; +select regexp_split_to_table(b,E','), generate_series(1, 3) from hl_test001; +select a, b, unnest(b) from hl_test003; + +select a,b,c,regexp_split_to_table(regexp_split_to_table(b,E','), E'u') from hl_test001 order by 1, 2, 3, 4 nulls last; +select a,b,c,substring(regexp_split_to_table(b,E','), 1, 100) from hl_test001 order by 1, 2, 3, 4 nulls last; +select a,b,c,regexp_split_to_table(substring(b,1, 100), E',') from hl_test001 order by 1, 2, 3, 4 nulls last; drop table hl_test001; drop table hl_test002; +drop table hl_test003; +reset enable_vector_engine; diff --git a/src/test/regress/sql/vector_procedure.sql b/src/test/regress/sql/vector_procedure.sql new file mode 100644 index 000000000..c2c3e3a49 --- /dev/null +++ b/src/test/regress/sql/vector_procedure.sql @@ -0,0 +1,40 @@ +create schema force_vector_engine; +set current_schema=force_vector_engine; + +create type pro_tblof_ty_015 as (c1 int,c2 char(10),c3 clob,c4 blob); +create table pro_tblof_tbl_015(c1 int,c2 pro_tblof_ty_015); +insert into pro_tblof_tbl_015 values(1,(1,'char',repeat('静夜思',16),hextoraw('12345'))); +insert into pro_tblof_tbl_015 values(2,(2,'char',repeat('静夜思',16),hextoraw('12345'))); +insert into pro_tblof_tbl_015 values(3,(3,'char',repeat('静夜思',16),hextoraw('12345'))); +insert into pro_tblof_tbl_015 values(4,(4,'char',repeat('静夜思',16),hextoraw('12345'))); +insert into pro_tblof_tbl_015 values(5,(5,'char',repeat('静夜思',16),hextoraw('12345'))); +create type pro_tblof_015 is table of pro_tblof_ty_015; + +create or replace procedure pro_tblof_pro_015(col1 int,col2 int) +as +tblof001 pro_tblof_015; +tblof002 pro_tblof_ty_015; +i int:=1; +begin +select count(*) into col1 from pro_tblof_tbl_015; +loop +select c2 into tblof001(i) from pro_tblof_tbl_015 where c1=i; +if tblof001(i).c1%2=0 then +tblof001(i).c1=0; +else +tblof001(i).c1=1; +end if; +i=i+1; +if i>col1 then +exit; +end if; +end loop; +raise info 'tblof001 is %',tblof001; +raise info 'i is %',i; +end; +/ + +set try_vector_engine_strategy='force'; +call pro_tblof_pro_015(6,6); +set try_vector_engine_strategy='off'; +drop schema force_vector_engine cascade; \ No newline at end of file diff --git a/src/test/regress/sql/vector_subpartition.sql b/src/test/regress/sql/vector_subpartition.sql new file mode 100644 index 000000000..461986b70 --- /dev/null +++ b/src/test/regress/sql/vector_subpartition.sql @@ -0,0 +1,151 @@ +DROP SCHEMA IF exists vector_subpartition; +CREATE SCHEMA vector_subpartition; +set current_schema=vector_subpartition; + +set try_vector_engine_strategy=force; + +CREATE TABLE IF NOT EXISTS range_range_02 +(col_1 int, col_2 int, col_3 int , col_4 int) +PARTITION BY RANGE (col_1) SUBPARTITION BY RANGE (col_2) +( + PARTITION p_range_1 VALUES LESS THAN( 10 ) + ( + SUBPARTITION p_range_1_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_1_2 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_2 VALUES LESS THAN( 20 ) + ( + SUBPARTITION p_range_2_1 VALUES LESS THAN( 5 ), + SUBPARTITION p_range_2_2 VALUES LESS THAN( 10 ), + SUBPARTITION p_range_2_3 VALUES LESS THAN( MAXVALUE ) + ), + PARTITION p_range_3 VALUES LESS THAN( MAXVALUE ) +) ENABLE ROW MOVEMENT; + +INSERT INTO range_range_02 VALUES (GENERATE_SERIES(-190, 1900),GENERATE_SERIES(-290, 1800),GENERATE_SERIES(-90, 2000)); + +create unique index on range_range_02 (col_1,col_2 nulls first) where col_2 < 4; +create unique index on range_range_02 (col_1,col_2,col_3 nulls first) where col_2 < 4; + +explain (costs off) select /*+ indexscan(range_range_02 + range_range_02_col_1_col_2_idx)*/ * from range_range_02 where col_2 in (select + col_1 from range_range_02 aa where col_1 >10 and col_1 <100) and col_1 +col_2 =50 + and col_2 < 4; + +CREATE TABLE list_list +( + col_1 int primary key, + col_2 int NOT NULL , + col_3 VARCHAR2 ( 30 ) NOT NULL , + col_4 int generated always as(2*col_2) stored , + check (col_4 >= col_2) +) with(FILLFACTOR=80) +PARTITION BY list (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( 0,-1,-2,-3,-4,-5,-6,-7,-8,-9 ), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_list_2 VALUES(0,1,2,3,4,5,6,7,8,9) + ( + SUBPARTITION p_list_2_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_2_2 VALUES ( default ), + SUBPARTITION p_list_2_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_2_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_2_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_3 VALUES(10,11,12,13,14,15,16,17,18,19) + ( + SUBPARTITION p_list_3_2 VALUES ( default ) + ), + PARTITION p_list_4 VALUES(default ), + PARTITION p_list_5 VALUES(20,21,22,23,24,25,26,27,28,29) + ( + SUBPARTITION p_list_5_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_5_2 VALUES ( default ), + SUBPARTITION p_list_5_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_5_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_5_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_6 VALUES(30,31,32,33,34,35,36,37,38,39), + PARTITION p_list_7 VALUES(40,41,42,43,44,45,46,47,48,49) + ( + SUBPARTITION p_list_7_1 VALUES ( default ) + ) +) ENABLE ROW MOVEMENT; + +insert into list_list values(1,1,'aa'); +insert into list_list values(5,5,'bb'); +insert into list_list values(11,2,'cc'); +insert into list_list values(19,8,'dd'); + +explain (costs off) select * from list_list; +select * from list_list; + +drop table if exists list_list_02; +CREATE TABLE IF NOT EXISTS list_list_02 +( + col_1 int , + col_2 int DEFAULT 20 , + col_3 int , + col_4 int +) +PARTITION BY list (col_1) SUBPARTITION BY list (col_2) +( + PARTITION p_list_1 VALUES(-1,-2,-3,-4,-5,-6,-7,-8,-9,-10 ) + ( + SUBPARTITION p_list_1_1 VALUES ( 0,-1,-2,-3,-4,-5,-6,-7,-8,-9 ), + SUBPARTITION p_list_1_2 VALUES ( default ) + ), + PARTITION p_list_2 VALUES(0,1,2,3,4,5,6,7,8,9) + ( + SUBPARTITION p_list_2_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_2_2 VALUES ( default ), + SUBPARTITION p_list_2_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_2_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_2_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_3 VALUES(10,11,12,13,14,15,16,17,18,19) + ( + SUBPARTITION p_list_3_2 VALUES ( default ) + ), + PARTITION p_list_4 VALUES(default ), + PARTITION p_list_5 VALUES(20,21,22,23,24,25,26,27,28,29) + ( + SUBPARTITION p_list_5_1 VALUES ( 0,1,2,3,4,5,6,7,8,9 ), + SUBPARTITION p_list_5_2 VALUES ( default ), + SUBPARTITION p_list_5_3 VALUES ( 10,11,12,13,14,15,16,17,18,19), + SUBPARTITION p_list_5_4 VALUES ( 20,21,22,23,24,25,26,27,28,29 ), + SUBPARTITION p_list_5_5 VALUES ( 30,31,32,33,34,35,36,37,38,39 ) + ), + PARTITION p_list_6 VALUES(30,31,32,33,34,35,36,37,38,39), + PARTITION p_list_7 VALUES(40,41,42,43,44,45,46,47,48,49) + ( + SUBPARTITION p_list_7_1 VALUES ( default ) + ) +) ENABLE ROW MOVEMENT; + +insert into list_list_02(col_1,col_3,col_4) values(1,1,1),(5,5,5); +select * from list_list_02; + +set try_vector_engine_strategy=force; +select * from list_list_02 partition(p_list_2); + +truncate list_list_02; +insert into list_list_02 values(0,0,0,0); +insert into list_list_02 values(-11,1,1,1),(-14,1,4,4),(-25,15,5,5),(-808,8,8,8),(-9,9,9,9); +insert into list_list_02 values(1,11,1,12),(4,41,4,48),(5,54,5,57),(8,87,8,84),(9,19,9,97); +insert into list_list_02 values(11,1,1,13),(14,1,4,49),(15,5,5,52),(18,8,8,81),(19,1,9,93); + +create index index_01 on list_list_02(col_2 ASC ) local; +create index index_02 on list_list_02(col_2 DESC) local; +create index index_03 on list_list_02(col_2 NULLS first) local; +create index index_04 on list_list_02(col_2 NULLS LAST ) local; + +explain (analyze on, timing off) select /*+ indexscan (list_list_02 index_01)*/ * from list_list_02 where col_2 in (select col_1 from list_list_02 aa where col_1 >10 and col_1<100) order by 2 asc limit 100; + +explain (analyze on, timing off) select * from list_list_02 where ctid='(0,2)'; + +set current_schema=public; +drop schema vector_subpartition cascade; diff --git a/src/test/regress/sql/wlm_memory_trace.sql b/src/test/regress/sql/wlm_memory_trace.sql new file mode 100644 index 000000000..e76414810 --- /dev/null +++ b/src/test/regress/sql/wlm_memory_trace.sql @@ -0,0 +1,11 @@ +\c postgres +select * from gs_get_shared_memctx_detail('CBBTopMemoryContext') limit 1; +select * from gs_get_shared_memctx_detail('AbnormalContext'); +select * from gs_get_shared_memctx_detail(NULL); +select * from gs_get_session_memctx_detail('CBBTopMemoryContext') limit 1; +select * from gs_get_session_memctx_detail('AbnormalContext'); +select * from gs_get_session_memctx_detail(NULL); +select * from gs_get_thread_memctx_detail(100, 'CBBTopMemoryContext'); +select * from gs_get_thread_memctx_detail(100, NULL); +select gs_get_thread_memctx_detail(tid, 'CBBTopMemoryContext') from pv_thread_memory_context where contextname = 'CBBTopMemoryContext' limit 1; +select gs_get_thread_memctx_detail(tid, NULL) from pv_thread_memory_context where contextname = 'CBBTopMemoryContext'; \ No newline at end of file diff --git a/src/test/regress/sql/xc_node.sql b/src/test/regress/sql/xc_node.sql index 73e788d98..696c3c7e2 100644 --- a/src/test/regress/sql/xc_node.sql +++ b/src/test/regress/sql/xc_node.sql @@ -75,25 +75,25 @@ ABORT; select node_name from pgxc_node order by node_name; execute direct on (coordinator2) 'select node_name from pgxc_node order by node_name;'; -create table DTS2018120706312_t1 (id int, num int) distribute by replication; +create table TESTTABLE_t1 (id int, num int) distribute by replication; execute direct on (coordinator1) 'select * from pgxc_node_str()'; execute direct on (coordinator2) 'select * from pgxc_node_str()'; execute direct on (datanode1) 'select * from pgxc_node_str()'; -execute direct on (coordinator1) 'select * from DTS2018120706312_t1'; -execute direct on (coordinator2) 'select * from DTS2018120706312_t1'; -execute direct on (datanode1) 'select * from DTS2018120706312_t1'; +execute direct on (coordinator1) 'select * from TESTTABLE_t1'; +execute direct on (coordinator2) 'select * from TESTTABLE_t1'; +execute direct on (datanode1) 'select * from TESTTABLE_t1'; -execute direct on (coordinator1) 'execute direct on (coordinator1) ''select * from DTS2018120706312_t1'''; -execute direct on (coordinator2) 'execute direct on (coordinator1) ''select * from DTS2018120706312_t1'''; -execute direct on (datanode1) 'execute direct on (coordinator1) ''select * from DTS2018120706312_t1'''; -execute direct on (coordinator1) 'execute direct on (datanode1) ''select * from DTS2018120706312_t1'''; -execute direct on (coordinator2) 'execute direct on (datanode1) ''select * from DTS2018120706312_t1'''; -execute direct on (datanode1) 'execute direct on (datanode1) ''select * from DTS2018120706312_t1'''; +execute direct on (coordinator1) 'execute direct on (coordinator1) ''select * from TESTTABLE_t1'''; +execute direct on (coordinator2) 'execute direct on (coordinator1) ''select * from TESTTABLE_t1'''; +execute direct on (datanode1) 'execute direct on (coordinator1) ''select * from TESTTABLE_t1'''; +execute direct on (coordinator1) 'execute direct on (datanode1) ''select * from TESTTABLE_t1'''; +execute direct on (coordinator2) 'execute direct on (datanode1) ''select * from TESTTABLE_t1'''; +execute direct on (datanode1) 'execute direct on (datanode1) ''select * from TESTTABLE_t1'''; execute direct on (coordinator1)'select count(*) from gs_wlm_operator_info'; execute direct on (coordinator2)'select count(*) from gs_wlm_operator_info'; -drop table DTS2018120706312_t1; +drop table TESTTABLE_t1; execute direct on (datanode1) ''; diff --git a/src/test/regress/sql/xc_rownum.sql b/src/test/regress/sql/xc_rownum.sql index 6194183f3..dfd5a188a 100644 --- a/src/test/regress/sql/xc_rownum.sql +++ b/src/test/regress/sql/xc_rownum.sql @@ -5,6 +5,20 @@ select oid rownum from pg_class; select oid as rownum from pg_class; +--test compat +drop table if exists tb_test; +create table tb_test(c1 int,c2 varchar2,c3 varchar2); +insert into tb_test values(1,'a','b'); +create or replace view v_test as select rownum from tb_test; +\d+ v_test +set behavior_compat_options = 'rownum_type_compat'; +create or replace view v_test1 as select rownum from tb_test; +\d+ v_test1 +set behavior_compat_options = ''; + +drop view v_test; +drop view v_test1; +drop table tb_test; ------------------------------------ --test the basic function of rownum ------------------------------------ @@ -417,8 +431,36 @@ explain select * from student where rownum != 6.5; explain select * from student where rownum > 6.5; explain select * from student where rownum >= 6.5; -explain delete from student where 3 > rownum; -explain delete from student where 3 < rownum; +-- optimize rownum to limit +-- rownum bigint to numeric +select rownum from student where rownum < 6.4; +select rownum from student where rownum < 6.5; +select rownum from student where rownum <= 6.4; +select rownum from student where rownum <= 6.5; +select rownum from student where rownum > 0.5; +select rownum from student where rownum > 1.5; +select rownum from student where rownum >= 0.5; +select rownum from student where rownum >= 1.5; +set behavior_compat_options = 'rownum_type_compat'; +explain (costs off) select * from student where rownum < 6.5; +explain (costs off) select * from student where rownum <= 6.5; +select rownum from student where rownum < 6.4; +select rownum from student where rownum < 6.5; +select rownum from student where rownum <= 6.4; +select rownum from student where rownum <= 6.5; +explain (costs off) select * from student where rownum > 6.5; +explain (costs off) select * from student where rownum >= 6.5; +select rownum from student where rownum > 0.5; +select rownum from student where rownum > 1.5; +select rownum from student where rownum >= 0.5; +select rownum from student where rownum >= 1.5; +explain (costs off) select * from student where rownum = 6.5; +explain (costs off) select * from student where rownum != 6.5; +-- reset +set behavior_compat_options = ''; + +explain (costs off) delete from student where 3 > rownum; +explain (costs off) delete from student where 3 < rownum; explain delete from student where rownum < 5 or rownum < 6; explain delete from student where rownum > 5 or rownum > 6; @@ -460,6 +502,28 @@ explain select id from test where rownum < 5 group by id; explain select id from student where rownum < 3 union select id from (select id from student order by 1) where rownum < 5; select * from test where id < 2 union select * from (select * from test order by id desc) where rownum < 5; +-- ROWNUM for Column-Oriented +create table student_cstore1(id int, stuname varchar(10) ) WITH (orientation=column) ; +create table student_cstore2(id int, stuname varchar(10) ) WITH (orientation=column) ; +insert into student_cstore1 select * from student; +-- test rownum for cstorescan +select * from student_cstore1 where rownum < 5; +select rownum, * from student_cstore1 where rownum < 1; +select rownum, * from student_cstore1 where rownum <= 1; +select rownum, * from student_cstore1 where rownum <= 10; +select rownum, * from student_cstore1 where stuname = 'stu5' and rownum < 4; +select rownum, stuname from student_cstore1 where stuname = 'stu5' or rownum < 8; +-- test rownum for join +insert into student_cstore2 select * from student; +select * from student_cstore2 where rownum > 2; +select * from student_cstore2 where rownum = 2; +select rownum, sc1.stuname, sc2.id from student_cstore2 as sc1, student_cstore2 as sc2 where sc1.id = sc2.id; + +-- test rownum for agg +select * from (select rownum, max(id) as max_id from student_cstore1 group by rownum) as t order by max_id; + +drop table student_cstore1; +drop table student_cstore2; drop table student; drop table test; @@ -530,3 +594,5 @@ select rownum,* from partition_hash; select * from partition_hash where rownum < 5; drop table partition_hash; + + diff --git a/src/test/ut/CMakeLists.txt b/src/test/ut/CMakeLists.txt new file mode 100644 index 000000000..b6e50b699 --- /dev/null +++ b/src/test/ut/CMakeLists.txt @@ -0,0 +1,20 @@ +# -D, required option +add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0 PGXC) +set(UNIT_TEST_LINK_OPTIONS_LIB_LIST -Wl,-z,relro,-z,now -Wl,-z,noexecstack -Wl,-E -z muldefs) + +# -I +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/include ${MOCKCPP_INCLUDE_PATH} ${GTEST_INCLUDE_PATH} ${MOCKCPP_3RDPARTY_PATH}) + +# -L +link_directories(${CMAKE_BINARY_DIR}/lib ${GTEST_LIB_PATH} ${MOCKCPP_LIB_PATH} +${CJSON_LIB_PATH} ${DCF_LIB_PATH} ${XGBOOST_LIB_PATH} ${ZSTD_LIB_PATH} +${LIBPARQUET_LIB_PATH} ${ARROW_LIB_PATH} ${LIBOBS_LIB_PATH} ${LZ4_LIB_PATH}) + +# -l +set(UNIT_TEST_BASE_LIB_LIST pthread gtest_main gtest mockcpp cjson dcf xgboost zstd nghttp2 iconv eSDKOBS eSDKLogAPI log4cpp parquet arrow lz4 db) + +add_subdirectory(demo) +add_subdirectory(db4ai) + +set(UT_TEST_TARGET_LIST ut_demo_test ut_direct_ml_test) +add_custom_target(all_ut_test_opengauss DEPENDS ${UT_TEST_TARGET_LIST} COMMAND echo "end unit test all...") diff --git a/src/test/ut/db4ai/CMakeLists.txt b/src/test/ut/db4ai/CMakeLists.txt new file mode 100644 index 000000000..2cdce9948 --- /dev/null +++ b/src/test/ut/db4ai/CMakeLists.txt @@ -0,0 +1,31 @@ +set(TGT_ut_direct_ml_SRC ${CMAKE_CURRENT_SOURCE_DIR}/ut_direct_ml.cpp) +INCLUDE_DIRECTORIES( + ${PROJECT_SRC_DIR}/include +) + + +set(ut_direct_ml_objects + $ + ) + +add_subdirectory(direct_ml) + +add_executable(ut_direct_ml ${TGT_ut_direct_ml_SRC} ${ut_direct_ml_objects}) +target_compile_options(ut_direct_ml PRIVATE ${OPTIMIZE_LEVEL}) +target_link_options(ut_direct_ml PRIVATE ${UNIT_TEST_LINK_OPTIONS_LIB_LIST}) + +TARGET_LINK_LIBRARIES(ut_direct_ml ${UNIT_TEST_BASE_LIB_LIST}) +add_custom_command(TARGET ut_direct_ml + POST_BUILD + COMMAND mkdir -p ${CMAKE_BINARY_DIR}/ut_bin + COMMAND rm -rf ${CMAKE_BINARY_DIR}/ut_bin/ut_direct_ml + COMMAND cp ${CMAKE_BINARY_DIR}/${openGauss}/src/test/ut/db4ai/ut_direct_ml ${CMAKE_BINARY_DIR}/ut_bin/ut_direct_ml + COMMAND chmod +x ${CMAKE_BINARY_DIR}/ut_bin/ut_direct_ml + COMMAND rm -rf ${CMAKE_CURRENT_SOURCE_DIR}/ut_bin/data + COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/data ${CMAKE_BINARY_DIR}/ut_bin/ + ) +add_custom_target(ut_direct_ml_test + DEPENDS ut_direct_ml + COMMAND ${CMAKE_BINARY_DIR}/ut_bin/ut_direct_ml || sleep 0 + COMMENT "begin unit test..." + ) \ No newline at end of file diff --git a/src/test/ut/db4ai/data/coordinates.txt b/src/test/ut/db4ai/data/coordinates.txt new file mode 100644 index 000000000..993882fbf --- /dev/null +++ b/src/test/ut/db4ai/data/coordinates.txt @@ -0,0 +1,10 @@ +-3.61098631069999998, -22.6172568158999994, 71.5002217129000002, 157.726688654100002, -39.2273861486999991, 118.277085579200005, 179.311368741100011 +-30.4310199511000015, 125.357124882600004, -27.384517924699999, -4.63293814239999957, 79.8982470108000058, 53.9955889248999981, 8.58503389460000044 +-67.9280164728999978, -3.90913184339999997, 89.3555545521000028, -1.76477945900000011, -129.65569087770001, -66.7980506008999981, 18.1708778307999985 +-54.0872242299000021, -138.043394277900006, 171.655186027399992, 41.8111803999999978, 152.495658744600007, 68.6476331272999971, -58.8555970856999977 +-14.4232687365000007, 147.556079692200001, -20.6478974919999985, -26.0697278875000009, -13.8217971890999998, -254.339721705199992, 57.7418536710000012 +83.105079034100001, -54.8248471396999975, -63.8031502692999979, .708853169499999991, -66.9853694378000029, -82.7762331386999932, 61.2500030360999972 +-36.7588410158000016, -28.9493409118999985, -91.687467081500003, -58.3805458798000032, 6.46152901590000006, 4.67438597730000005, -75.0726978667999987 +13.3918608119000009, -8.81009918310000018, 167.4408406254, 73.3641107292999948, 99.7524631601999943, -127.7502081003, -239.671528273299998 +135.529122074100002, 1.38508608310000003, 75.1363695022999991, -157.490917820600004, -23.7378253056999995, 33.6490592628000016, -7.9599672791999998 +-73.1345967305999949, -62.9817681270000023, 207.069457080299998, -90.3006007433999969, 173.55455376110001, 138.308886012699986, -37.5366639877000026 diff --git a/src/test/ut/db4ai/data/ecoli.csv b/src/test/ut/db4ai/data/ecoli.csv new file mode 100644 index 000000000..ff231a903 --- /dev/null +++ b/src/test/ut/db4ai/data/ecoli.csv @@ -0,0 +1,336 @@ +0.49,0.29,0.48,0.50,0.56,0.24,0.35,cp +0.07,0.40,0.48,0.50,0.54,0.35,0.44,cp +0.56,0.40,0.48,0.50,0.49,0.37,0.46,cp +0.59,0.49,0.48,0.50,0.52,0.45,0.36,cp +0.23,0.32,0.48,0.50,0.55,0.25,0.35,cp +0.67,0.39,0.48,0.50,0.36,0.38,0.46,cp +0.29,0.28,0.48,0.50,0.44,0.23,0.34,cp +0.21,0.34,0.48,0.50,0.51,0.28,0.39,cp +0.20,0.44,0.48,0.50,0.46,0.51,0.57,cp +0.42,0.40,0.48,0.50,0.56,0.18,0.30,cp +0.42,0.24,0.48,0.50,0.57,0.27,0.37,cp +0.25,0.48,0.48,0.50,0.44,0.17,0.29,cp +0.39,0.32,0.48,0.50,0.46,0.24,0.35,cp +0.51,0.50,0.48,0.50,0.46,0.32,0.35,cp +0.22,0.43,0.48,0.50,0.48,0.16,0.28,cp +0.25,0.40,0.48,0.50,0.46,0.44,0.52,cp +0.34,0.45,0.48,0.50,0.38,0.24,0.35,cp +0.44,0.27,0.48,0.50,0.55,0.52,0.58,cp +0.23,0.40,0.48,0.50,0.39,0.28,0.38,cp +0.41,0.57,0.48,0.50,0.39,0.21,0.32,cp +0.40,0.45,0.48,0.50,0.38,0.22,0.00,cp +0.31,0.23,0.48,0.50,0.73,0.05,0.14,cp +0.51,0.54,0.48,0.50,0.41,0.34,0.43,cp +0.30,0.16,0.48,0.50,0.56,0.11,0.23,cp +0.36,0.39,0.48,0.50,0.48,0.22,0.23,cp +0.29,0.37,0.48,0.50,0.48,0.44,0.52,cp +0.25,0.40,0.48,0.50,0.47,0.33,0.42,cp +0.21,0.51,0.48,0.50,0.50,0.32,0.41,cp +0.43,0.37,0.48,0.50,0.53,0.35,0.44,cp +0.43,0.39,0.48,0.50,0.47,0.31,0.41,cp +0.53,0.38,0.48,0.50,0.44,0.26,0.36,cp +0.34,0.33,0.48,0.50,0.38,0.35,0.44,cp +0.56,0.51,0.48,0.50,0.34,0.37,0.46,cp +0.40,0.29,0.48,0.50,0.42,0.35,0.44,cp +0.24,0.35,0.48,0.50,0.31,0.19,0.31,cp +0.36,0.54,0.48,0.50,0.41,0.38,0.46,cp +0.29,0.52,0.48,0.50,0.42,0.29,0.39,cp +0.65,0.47,0.48,0.50,0.59,0.30,0.40,cp +0.32,0.42,0.48,0.50,0.35,0.28,0.38,cp +0.38,0.46,0.48,0.50,0.48,0.22,0.29,cp +0.33,0.45,0.48,0.50,0.52,0.32,0.41,cp +0.30,0.37,0.48,0.50,0.59,0.41,0.49,cp +0.40,0.50,0.48,0.50,0.45,0.39,0.47,cp +0.28,0.38,0.48,0.50,0.50,0.33,0.42,cp +0.61,0.45,0.48,0.50,0.48,0.35,0.41,cp +0.17,0.38,0.48,0.50,0.45,0.42,0.50,cp +0.44,0.35,0.48,0.50,0.55,0.55,0.61,cp +0.43,0.40,0.48,0.50,0.39,0.28,0.39,cp +0.42,0.35,0.48,0.50,0.58,0.15,0.27,cp +0.23,0.33,0.48,0.50,0.43,0.33,0.43,cp +0.37,0.52,0.48,0.50,0.42,0.42,0.36,cp +0.29,0.30,0.48,0.50,0.45,0.03,0.17,cp +0.22,0.36,0.48,0.50,0.35,0.39,0.47,cp +0.23,0.58,0.48,0.50,0.37,0.53,0.59,cp +0.47,0.47,0.48,0.50,0.22,0.16,0.26,cp +0.54,0.47,0.48,0.50,0.28,0.33,0.42,cp +0.51,0.37,0.48,0.50,0.35,0.36,0.45,cp +0.40,0.35,0.48,0.50,0.45,0.33,0.42,cp +0.44,0.34,0.48,0.50,0.30,0.33,0.43,cp +0.42,0.38,0.48,0.50,0.54,0.34,0.43,cp +0.44,0.56,0.48,0.50,0.50,0.46,0.54,cp +0.52,0.36,0.48,0.50,0.41,0.28,0.38,cp +0.36,0.41,0.48,0.50,0.48,0.47,0.54,cp +0.18,0.30,0.48,0.50,0.46,0.24,0.35,cp +0.47,0.29,0.48,0.50,0.51,0.33,0.43,cp +0.24,0.43,0.48,0.50,0.54,0.52,0.59,cp +0.25,0.37,0.48,0.50,0.41,0.33,0.42,cp +0.52,0.57,0.48,0.50,0.42,0.47,0.54,cp +0.25,0.37,0.48,0.50,0.43,0.26,0.36,cp +0.35,0.48,0.48,0.50,0.56,0.40,0.48,cp +0.26,0.26,0.48,0.50,0.34,0.25,0.35,cp +0.44,0.51,0.48,0.50,0.47,0.26,0.36,cp +0.37,0.50,0.48,0.50,0.42,0.36,0.45,cp +0.44,0.42,0.48,0.50,0.42,0.25,0.20,cp +0.24,0.43,0.48,0.50,0.37,0.28,0.38,cp +0.42,0.30,0.48,0.50,0.48,0.26,0.36,cp +0.48,0.42,0.48,0.50,0.45,0.25,0.35,cp +0.41,0.48,0.48,0.50,0.51,0.44,0.51,cp +0.44,0.28,0.48,0.50,0.43,0.27,0.37,cp +0.29,0.41,0.48,0.50,0.48,0.38,0.46,cp +0.34,0.28,0.48,0.50,0.41,0.35,0.44,cp +0.41,0.43,0.48,0.50,0.45,0.31,0.41,cp +0.29,0.47,0.48,0.50,0.41,0.23,0.34,cp +0.34,0.55,0.48,0.50,0.58,0.31,0.41,cp +0.36,0.56,0.48,0.50,0.43,0.45,0.53,cp +0.40,0.46,0.48,0.50,0.52,0.49,0.56,cp +0.50,0.49,0.48,0.50,0.49,0.46,0.53,cp +0.52,0.44,0.48,0.50,0.37,0.36,0.42,cp +0.50,0.51,0.48,0.50,0.27,0.23,0.34,cp +0.53,0.42,0.48,0.50,0.16,0.29,0.39,cp +0.34,0.46,0.48,0.50,0.52,0.35,0.44,cp +0.40,0.42,0.48,0.50,0.37,0.27,0.27,cp +0.41,0.43,0.48,0.50,0.50,0.24,0.25,cp +0.30,0.45,0.48,0.50,0.36,0.21,0.32,cp +0.31,0.47,0.48,0.50,0.29,0.28,0.39,cp +0.64,0.76,0.48,0.50,0.45,0.35,0.38,cp +0.35,0.37,0.48,0.50,0.30,0.34,0.43,cp +0.57,0.54,0.48,0.50,0.37,0.28,0.33,cp +0.65,0.55,0.48,0.50,0.34,0.37,0.28,cp +0.51,0.46,0.48,0.50,0.58,0.31,0.41,cp +0.38,0.40,0.48,0.50,0.63,0.25,0.35,cp +0.24,0.57,0.48,0.50,0.63,0.34,0.43,cp +0.38,0.26,0.48,0.50,0.54,0.16,0.28,cp +0.33,0.47,0.48,0.50,0.53,0.18,0.29,cp +0.24,0.34,0.48,0.50,0.38,0.30,0.40,cp +0.26,0.50,0.48,0.50,0.44,0.32,0.41,cp +0.44,0.49,0.48,0.50,0.39,0.38,0.40,cp +0.43,0.32,0.48,0.50,0.33,0.45,0.52,cp +0.49,0.43,0.48,0.50,0.49,0.30,0.40,cp +0.47,0.28,0.48,0.50,0.56,0.20,0.25,cp +0.32,0.33,0.48,0.50,0.60,0.06,0.20,cp +0.34,0.35,0.48,0.50,0.51,0.49,0.56,cp +0.35,0.34,0.48,0.50,0.46,0.30,0.27,cp +0.38,0.30,0.48,0.50,0.43,0.29,0.39,cp +0.38,0.44,0.48,0.50,0.43,0.20,0.31,cp +0.41,0.51,0.48,0.50,0.58,0.20,0.31,cp +0.34,0.42,0.48,0.50,0.41,0.34,0.43,cp +0.51,0.49,0.48,0.50,0.53,0.14,0.26,cp +0.25,0.51,0.48,0.50,0.37,0.42,0.50,cp +0.29,0.28,0.48,0.50,0.50,0.42,0.50,cp +0.25,0.26,0.48,0.50,0.39,0.32,0.42,cp +0.24,0.41,0.48,0.50,0.49,0.23,0.34,cp +0.17,0.39,0.48,0.50,0.53,0.30,0.39,cp +0.04,0.31,0.48,0.50,0.41,0.29,0.39,cp +0.61,0.36,0.48,0.50,0.49,0.35,0.44,cp +0.34,0.51,0.48,0.50,0.44,0.37,0.46,cp +0.28,0.33,0.48,0.50,0.45,0.22,0.33,cp +0.40,0.46,0.48,0.50,0.42,0.35,0.44,cp +0.23,0.34,0.48,0.50,0.43,0.26,0.37,cp +0.37,0.44,0.48,0.50,0.42,0.39,0.47,cp +0.00,0.38,0.48,0.50,0.42,0.48,0.55,cp +0.39,0.31,0.48,0.50,0.38,0.34,0.43,cp +0.30,0.44,0.48,0.50,0.49,0.22,0.33,cp +0.27,0.30,0.48,0.50,0.71,0.28,0.39,cp +0.17,0.52,0.48,0.50,0.49,0.37,0.46,cp +0.36,0.42,0.48,0.50,0.53,0.32,0.41,cp +0.30,0.37,0.48,0.50,0.43,0.18,0.30,cp +0.26,0.40,0.48,0.50,0.36,0.26,0.37,cp +0.40,0.41,0.48,0.50,0.55,0.22,0.33,cp +0.22,0.34,0.48,0.50,0.42,0.29,0.39,cp +0.44,0.35,0.48,0.50,0.44,0.52,0.59,cp +0.27,0.42,0.48,0.50,0.37,0.38,0.43,cp +0.16,0.43,0.48,0.50,0.54,0.27,0.37,cp +0.06,0.61,0.48,0.50,0.49,0.92,0.37,im +0.44,0.52,0.48,0.50,0.43,0.47,0.54,im +0.63,0.47,0.48,0.50,0.51,0.82,0.84,im +0.23,0.48,0.48,0.50,0.59,0.88,0.89,im +0.34,0.49,0.48,0.50,0.58,0.85,0.80,im +0.43,0.40,0.48,0.50,0.58,0.75,0.78,im +0.46,0.61,0.48,0.50,0.48,0.86,0.87,im +0.27,0.35,0.48,0.50,0.51,0.77,0.79,im +0.52,0.39,0.48,0.50,0.65,0.71,0.73,im +0.29,0.47,0.48,0.50,0.71,0.65,0.69,im +0.55,0.47,0.48,0.50,0.57,0.78,0.80,im +0.12,0.67,0.48,0.50,0.74,0.58,0.63,im +0.40,0.50,0.48,0.50,0.65,0.82,0.84,im +0.73,0.36,0.48,0.50,0.53,0.91,0.92,im +0.84,0.44,0.48,0.50,0.48,0.71,0.74,im +0.48,0.45,0.48,0.50,0.60,0.78,0.80,im +0.54,0.49,0.48,0.50,0.40,0.87,0.88,im +0.48,0.41,0.48,0.50,0.51,0.90,0.88,im +0.50,0.66,0.48,0.50,0.31,0.92,0.92,im +0.72,0.46,0.48,0.50,0.51,0.66,0.70,im +0.47,0.55,0.48,0.50,0.58,0.71,0.75,im +0.33,0.56,0.48,0.50,0.33,0.78,0.80,im +0.64,0.58,0.48,0.50,0.48,0.78,0.73,im +0.54,0.57,0.48,0.50,0.56,0.81,0.83,im +0.47,0.59,0.48,0.50,0.52,0.76,0.79,im +0.63,0.50,0.48,0.50,0.59,0.85,0.86,im +0.49,0.42,0.48,0.50,0.53,0.79,0.81,im +0.31,0.50,0.48,0.50,0.57,0.84,0.85,im +0.74,0.44,0.48,0.50,0.55,0.88,0.89,im +0.33,0.45,0.48,0.50,0.45,0.88,0.89,im +0.45,0.40,0.48,0.50,0.61,0.74,0.77,im +0.71,0.40,0.48,0.50,0.71,0.70,0.74,im +0.50,0.37,0.48,0.50,0.66,0.64,0.69,im +0.66,0.53,0.48,0.50,0.59,0.66,0.66,im +0.60,0.61,0.48,0.50,0.54,0.67,0.71,im +0.83,0.37,0.48,0.50,0.61,0.71,0.74,im +0.34,0.51,0.48,0.50,0.67,0.90,0.90,im +0.63,0.54,0.48,0.50,0.65,0.79,0.81,im +0.70,0.40,0.48,0.50,0.56,0.86,0.83,im +0.60,0.50,1.00,0.50,0.54,0.77,0.80,im +0.16,0.51,0.48,0.50,0.33,0.39,0.48,im +0.74,0.70,0.48,0.50,0.66,0.65,0.69,im +0.20,0.46,0.48,0.50,0.57,0.78,0.81,im +0.89,0.55,0.48,0.50,0.51,0.72,0.76,im +0.70,0.46,0.48,0.50,0.56,0.78,0.73,im +0.12,0.43,0.48,0.50,0.63,0.70,0.74,im +0.61,0.52,0.48,0.50,0.54,0.67,0.52,im +0.33,0.37,0.48,0.50,0.46,0.65,0.69,im +0.63,0.65,0.48,0.50,0.66,0.67,0.71,im +0.41,0.51,0.48,0.50,0.53,0.75,0.78,im +0.34,0.67,0.48,0.50,0.52,0.76,0.79,im +0.58,0.34,0.48,0.50,0.56,0.87,0.81,im +0.59,0.56,0.48,0.50,0.55,0.80,0.82,im +0.51,0.40,0.48,0.50,0.57,0.62,0.67,im +0.50,0.57,0.48,0.50,0.71,0.61,0.66,im +0.60,0.46,0.48,0.50,0.45,0.81,0.83,im +0.37,0.47,0.48,0.50,0.39,0.76,0.79,im +0.58,0.55,0.48,0.50,0.57,0.70,0.74,im +0.36,0.47,0.48,0.50,0.51,0.69,0.72,im +0.39,0.41,0.48,0.50,0.52,0.72,0.75,im +0.35,0.51,0.48,0.50,0.61,0.71,0.74,im +0.31,0.44,0.48,0.50,0.50,0.79,0.82,im +0.61,0.66,0.48,0.50,0.46,0.87,0.88,im +0.48,0.49,0.48,0.50,0.52,0.77,0.71,im +0.11,0.50,0.48,0.50,0.58,0.72,0.68,im +0.31,0.36,0.48,0.50,0.58,0.94,0.94,im +0.68,0.51,0.48,0.50,0.71,0.75,0.78,im +0.69,0.39,0.48,0.50,0.57,0.76,0.79,im +0.52,0.54,0.48,0.50,0.62,0.76,0.79,im +0.46,0.59,0.48,0.50,0.36,0.76,0.23,im +0.36,0.45,0.48,0.50,0.38,0.79,0.17,im +0.00,0.51,0.48,0.50,0.35,0.67,0.44,im +0.10,0.49,0.48,0.50,0.41,0.67,0.21,im +0.30,0.51,0.48,0.50,0.42,0.61,0.34,im +0.61,0.47,0.48,0.50,0.00,0.80,0.32,im +0.63,0.75,0.48,0.50,0.64,0.73,0.66,im +0.71,0.52,0.48,0.50,0.64,1.00,0.99,im +0.85,0.53,0.48,0.50,0.53,0.52,0.35,imS +0.63,0.49,0.48,0.50,0.54,0.76,0.79,imS +0.75,0.55,1.00,1.00,0.40,0.47,0.30,imL +0.70,0.39,1.00,0.50,0.51,0.82,0.84,imL +0.72,0.42,0.48,0.50,0.65,0.77,0.79,imU +0.79,0.41,0.48,0.50,0.66,0.81,0.83,imU +0.83,0.48,0.48,0.50,0.65,0.76,0.79,imU +0.69,0.43,0.48,0.50,0.59,0.74,0.77,imU +0.79,0.36,0.48,0.50,0.46,0.82,0.70,imU +0.78,0.33,0.48,0.50,0.57,0.77,0.79,imU +0.75,0.37,0.48,0.50,0.64,0.70,0.74,imU +0.59,0.29,0.48,0.50,0.64,0.75,0.77,imU +0.67,0.37,0.48,0.50,0.54,0.64,0.68,imU +0.66,0.48,0.48,0.50,0.54,0.70,0.74,imU +0.64,0.46,0.48,0.50,0.48,0.73,0.76,imU +0.76,0.71,0.48,0.50,0.50,0.71,0.75,imU +0.84,0.49,0.48,0.50,0.55,0.78,0.74,imU +0.77,0.55,0.48,0.50,0.51,0.78,0.74,imU +0.81,0.44,0.48,0.50,0.42,0.67,0.68,imU +0.58,0.60,0.48,0.50,0.59,0.73,0.76,imU +0.63,0.42,0.48,0.50,0.48,0.77,0.80,imU +0.62,0.42,0.48,0.50,0.58,0.79,0.81,imU +0.86,0.39,0.48,0.50,0.59,0.89,0.90,imU +0.81,0.53,0.48,0.50,0.57,0.87,0.88,imU +0.87,0.49,0.48,0.50,0.61,0.76,0.79,imU +0.47,0.46,0.48,0.50,0.62,0.74,0.77,imU +0.76,0.41,0.48,0.50,0.50,0.59,0.62,imU +0.70,0.53,0.48,0.50,0.70,0.86,0.87,imU +0.64,0.45,0.48,0.50,0.67,0.61,0.66,imU +0.81,0.52,0.48,0.50,0.57,0.78,0.80,imU +0.73,0.26,0.48,0.50,0.57,0.75,0.78,imU +0.49,0.61,1.00,0.50,0.56,0.71,0.74,imU +0.88,0.42,0.48,0.50,0.52,0.73,0.75,imU +0.84,0.54,0.48,0.50,0.75,0.92,0.70,imU +0.63,0.51,0.48,0.50,0.64,0.72,0.76,imU +0.86,0.55,0.48,0.50,0.63,0.81,0.83,imU +0.79,0.54,0.48,0.50,0.50,0.66,0.68,imU +0.57,0.38,0.48,0.50,0.06,0.49,0.33,imU +0.78,0.44,0.48,0.50,0.45,0.73,0.68,imU +0.78,0.68,0.48,0.50,0.83,0.40,0.29,om +0.63,0.69,0.48,0.50,0.65,0.41,0.28,om +0.67,0.88,0.48,0.50,0.73,0.50,0.25,om +0.61,0.75,0.48,0.50,0.51,0.33,0.33,om +0.67,0.84,0.48,0.50,0.74,0.54,0.37,om +0.74,0.90,0.48,0.50,0.57,0.53,0.29,om +0.73,0.84,0.48,0.50,0.86,0.58,0.29,om +0.75,0.76,0.48,0.50,0.83,0.57,0.30,om +0.77,0.57,0.48,0.50,0.88,0.53,0.20,om +0.74,0.78,0.48,0.50,0.75,0.54,0.15,om +0.68,0.76,0.48,0.50,0.84,0.45,0.27,om +0.56,0.68,0.48,0.50,0.77,0.36,0.45,om +0.65,0.51,0.48,0.50,0.66,0.54,0.33,om +0.52,0.81,0.48,0.50,0.72,0.38,0.38,om +0.64,0.57,0.48,0.50,0.70,0.33,0.26,om +0.60,0.76,1.00,0.50,0.77,0.59,0.52,om +0.69,0.59,0.48,0.50,0.77,0.39,0.21,om +0.63,0.49,0.48,0.50,0.79,0.45,0.28,om +0.71,0.71,0.48,0.50,0.68,0.43,0.36,om +0.68,0.63,0.48,0.50,0.73,0.40,0.30,om +0.77,0.57,1.00,0.50,0.37,0.54,0.01,omL +0.66,0.49,1.00,0.50,0.54,0.56,0.36,omL +0.71,0.46,1.00,0.50,0.52,0.59,0.30,omL +0.67,0.55,1.00,0.50,0.66,0.58,0.16,omL +0.68,0.49,1.00,0.50,0.62,0.55,0.28,omL +0.74,0.49,0.48,0.50,0.42,0.54,0.36,pp +0.70,0.61,0.48,0.50,0.56,0.52,0.43,pp +0.66,0.86,0.48,0.50,0.34,0.41,0.36,pp +0.73,0.78,0.48,0.50,0.58,0.51,0.31,pp +0.65,0.57,0.48,0.50,0.47,0.47,0.51,pp +0.72,0.86,0.48,0.50,0.17,0.55,0.21,pp +0.67,0.70,0.48,0.50,0.46,0.45,0.33,pp +0.67,0.81,0.48,0.50,0.54,0.49,0.23,pp +0.67,0.61,0.48,0.50,0.51,0.37,0.38,pp +0.63,1.00,0.48,0.50,0.35,0.51,0.49,pp +0.57,0.59,0.48,0.50,0.39,0.47,0.33,pp +0.71,0.71,0.48,0.50,0.40,0.54,0.39,pp +0.66,0.74,0.48,0.50,0.31,0.38,0.43,pp +0.67,0.81,0.48,0.50,0.25,0.42,0.25,pp +0.64,0.72,0.48,0.50,0.49,0.42,0.19,pp +0.68,0.82,0.48,0.50,0.38,0.65,0.56,pp +0.32,0.39,0.48,0.50,0.53,0.28,0.38,pp +0.70,0.64,0.48,0.50,0.47,0.51,0.47,pp +0.63,0.57,0.48,0.50,0.49,0.70,0.20,pp +0.74,0.82,0.48,0.50,0.49,0.49,0.41,pp +0.63,0.86,0.48,0.50,0.39,0.47,0.34,pp +0.63,0.83,0.48,0.50,0.40,0.39,0.19,pp +0.63,0.71,0.48,0.50,0.60,0.40,0.39,pp +0.71,0.86,0.48,0.50,0.40,0.54,0.32,pp +0.68,0.78,0.48,0.50,0.43,0.44,0.42,pp +0.64,0.84,0.48,0.50,0.37,0.45,0.40,pp +0.74,0.47,0.48,0.50,0.50,0.57,0.42,pp +0.75,0.84,0.48,0.50,0.35,0.52,0.33,pp +0.63,0.65,0.48,0.50,0.39,0.44,0.35,pp +0.69,0.67,0.48,0.50,0.30,0.39,0.24,pp +0.70,0.71,0.48,0.50,0.42,0.84,0.85,pp +0.69,0.80,0.48,0.50,0.46,0.57,0.26,pp +0.64,0.66,0.48,0.50,0.41,0.39,0.20,pp +0.63,0.80,0.48,0.50,0.46,0.31,0.29,pp +0.66,0.71,0.48,0.50,0.41,0.50,0.35,pp +0.69,0.59,0.48,0.50,0.46,0.44,0.52,pp +0.68,0.67,0.48,0.50,0.49,0.40,0.34,pp +0.64,0.78,0.48,0.50,0.50,0.36,0.38,pp +0.62,0.78,0.48,0.50,0.47,0.49,0.54,pp +0.76,0.73,0.48,0.50,0.44,0.39,0.39,pp +0.64,0.81,0.48,0.50,0.37,0.39,0.44,pp +0.29,0.39,0.48,0.50,0.52,0.40,0.48,pp +0.62,0.83,0.48,0.50,0.46,0.36,0.40,pp +0.56,0.54,0.48,0.50,0.43,0.37,0.30,pp +0.69,0.66,0.48,0.50,0.41,0.50,0.25,pp +0.69,0.65,0.48,0.50,0.63,0.48,0.41,pp +0.43,0.59,0.48,0.50,0.52,0.49,0.56,pp +0.74,0.56,0.48,0.50,0.47,0.68,0.30,pp +0.71,0.57,0.48,0.50,0.48,0.35,0.32,pp +0.61,0.60,0.48,0.50,0.44,0.39,0.38,pp +0.59,0.61,0.48,0.50,0.42,0.42,0.37,pp +0.74,0.74,0.48,0.50,0.31,0.53,0.52,pp diff --git a/src/test/ut/db4ai/data/kmeans_7_1000_10_train.txt b/src/test/ut/db4ai/data/kmeans_7_1000_10_train.txt new file mode 100644 index 000000000..1f4463527 --- /dev/null +++ b/src/test/ut/db4ai/data/kmeans_7_1000_10_train.txt @@ -0,0 +1,1000 @@ +82.2331969052,-52.1530986202,-64.0339866001,-.3254986397,-64.6012142076,-81.5499670645,59.6012626709 +-68.3281513964,-2.125308613,88.779729847,-.4175925756,-128.3231357393,-66.5842091526,18.9756894134 +-73.7828756028,-64.2826256076,206.4240641515,-90.5394269423,173.9694921811,139.2345162292,-38.0608840774 +-55.498606193,-137.6288396111,171.33957665,41.3780102564,151.1708762977,69.5250224786,-58.2361549753 +-52.7822332392,-140.3147320544,170.4933532733,42.8555077881,152.240935353,70.3131634349,-58.2979448124 +-54.2074644232,-137.6155363974,170.6321020484,42.2189973639,152.5557889504,69.403579877,-58.0380485559 +-13.145605243,146.275927275,-20.2919549703,-25.4424950737,-14.183644585,-253.6697700576,59.4418787872 +-29.3006835288,125.4828476601,-28.2365014971,-5.3208613384,78.9437471166,53.2184669494,9.1290857024 +-14.2797077528,146.9905757673,-20.7657358009,-25.9450014833,-15.4778858536,-255.6833154457,56.4675426708 +134.9146855583,.7561256429,75.0268264251,-156.8178624378,-24.8670868581,33.1481892333,-9.7994489961 +-30.8683612074,124.3709952284,-27.8274993024,-4.5774163063,79.9436110343,55.2800256791,8.4929650633 +-53.6772240453,-138.5586145437,168.7745933397,41.0891507763,152.835044766,68.0224315511,-58.5247472096 +-66.8370716975,-4.2898406002,87.2093247088,-2.528904866,-129.5046784599,-67.3258412794,19.0503193587 +-12.787355155,145.9673002515,-20.9056804158,-28.254400301,-12.3319076325,-253.0453105088,58.5627206039 +82.9044859462,-56.2367974125,-64.8394467107,3.501721407,-68.6840331135,-82.9752392028,61.5050556577 +-4.0092974908,-21.8384161354,72.4452087143,156.6041432085,-37.9117065685,117.0383220658,178.9676635074 +-72.7756295753,-62.39059476,207.2675553053,-90.5981649464,170.7697778151,138.4786280949,-37.7704812081 +81.8543259705,-55.2281324753,-63.9745642248,.7533785504,-67.2335495326,-82.967475518,62.1427516768 +-14.1644972765,148.1746993705,-21.1153356016,-25.0905562301,-13.4311932366,-255.2437710209,58.1019464204 +-66.8242441386,-4.7348425578,88.1825526365,-2.6093579688,-129.4718920989,-67.2585467619,17.5593622182 +-67.6039101311,-1.9711979476,91.3656168555,-1.9922806119,-129.3379826788,-65.8595455201,18.9227355048 +-71.2074847591,-62.2320151646,206.5748088676,-91.1574936731,175.2068355085,138.3916531723,-37.4461827255 +-73.4266602667,-62.57586823,205.9686745069,-89.3984819362,173.8420729391,137.073850983,-36.7529641598 +12.4348396826,-7.5503116215,165.7673800803,74.5277877289,99.8024936083,-127.3516618358,-237.4476858327 +-36.6101385643,-29.4554822954,-89.5772770596,-56.8096348812,7.0147006019,3.5976442549,-75.1925724434 +-4.1555539019,-23.3577278681,72.8573049959,157.7555710128,-40.6938415777,118.1470156311,178.2816128384 +-74.4938204594,-61.7291701206,207.846473091,-90.2387707447,173.3838739193,139.1720072485,-36.475621811 +-55.8573255962,-138.986369726,171.7221204986,39.4983127187,153.8886431005,67.9529893345,-59.0650505411 +-74.0640902823,-64.4356363037,206.1762334101,-91.6966004336,172.9832478462,138.8225383063,-37.0342182302 +136.5126363473,.1819397749,75.0325445051,-158.9297940529,-23.4750765984,35.3071511916,-7.4005096177 +-15.5335253954,147.8638242128,-20.9518237678,-26.3612105924,-14.328525906,-254.191037851,59.4952579982 +14.6522858658,-8.9609563807,167.6829183638,74.0064217519,98.2981455823,-126.6716736203,-240.4391270169 +-55.4064185776,-138.9506555072,172.6118086237,40.5197357891,152.5587131529,70.3708849832,-58.8709534169 +-38.0225116126,-29.0453132779,-93.20936752,-58.6314464582,6.4721151632,4.0906415117,-74.0925594576 +-30.438142459,124.5108854902,-28.5692989189,-3.7308620438,81.1646967269,53.460766254,8.2404589044 +11.1876118017,-9.3347938118,168.1062974322,72.8673458548,100.5461374775,-127.9339667057,-238.6821994445 +83.0732589079,-54.7566120593,-63.5674680426,-.8545868376,-67.7404984217,-81.6847841743,60.7009108647 +-68.3382531417,-5.1120851091,88.0626348021,.2246662534,-128.9865192777,-67.4969595021,18.7377258302 +84.3692017646,-56.7830254841,-63.9712312161,1.5464008486,-66.1741913605,-82.3001920025,61.2180077257 +-69.573433075,-4.858962223,89.2902616319,-1.4466551724,-130.9049649089,-67.4765685196,17.7985949794 +-54.1714813309,-138.4371756904,173.2032288077,41.434141528,152.4308416616,68.4670298555,-59.5491341172 +81.348957719,-55.7691703982,-63.2800922186,1.6478808718,-65.9903216356,-82.3714960343,60.1670989095 +83.5258988897,-54.5098187148,-63.8498001758,.56258612,-67.2801712492,-81.9322021049,60.320890529 +136.48313282,.6914982133,73.336017125,-155.2923116648,-24.5003350229,33.7763801866,-8.3801105083 +-67.1385903384,-2.2680150802,89.3028643556,-2.272801955,-129.6195575393,-66.2308958241,16.6574885485 +-12.5995557714,146.4368496962,-21.8677723335,-23.6427373702,-13.474327473,-253.1831821352,56.7262349091 +12.6566485282,-8.0575196905,168.7412451993,74.1352825262,100.7875323908,-128.4129940606,-239.025993862 +84.1751892417,-53.9966074543,-62.5267679542,1.5441856367,-67.2858819821,-82.4340422932,61.3396540874 +-13.6379037345,147.604234133,-19.9721606334,-27.4260397092,-12.8447068043,-254.3388762289,56.8228640374 +-73.3182199171,-63.5920336485,206.7225997131,-89.9786674496,172.3122602325,136.7222295222,-38.2368941597 +-29.1459735368,126.1747431198,-26.6983921544,-3.3198146452,79.7965998441,54.1171510558,8.5469439826 +-68.5157849594,-4.0041770582,87.1119757162,-2.6176249246,-129.5518342794,-67.6068358534,18.0335493042 +15.593660411,-9.3512661746,166.4714584056,73.0710690214,99.1754557753,-128.3400276655,-239.5040983374 +84.8832786,-54.8535181208,-65.0371028174,1.294289429,-65.528613504,-82.9022929572,63.3722607737 +-4.0430804908,-21.5531819032,72.7231949205,155.9758820499,-38.5718505785,118.7782431117,178.525087936 +136.6876734645,1.0899946542,75.1221590876,-156.7037146608,-24.3760787167,34.0356488344,-5.6516885463 +-53.599301465,-138.4418595722,172.4531562858,40.6768389014,153.8186716959,68.5302118561,-57.8802140131 +-31.9718634718,125.4774113465,-26.9366213783,-1.76036506,80.0570659187,53.7995087762,9.1009443403 +-73.2066236933,-64.4846606171,206.9184329115,-89.6807087989,174.9476919199,138.3896327505,-37.4375033206 +-13.216833908,146.3251846881,-20.6349335091,-26.992718941,-13.0159039208,-254.1735813864,58.4671686536 +-4.6934617909,-22.0856468988,73.216718301,156.1231846257,-40.7101195227,119.2508844177,179.5725999577 +85.7810815656,-54.1287081151,-62.4071278147,.2828061414,-66.2884160783,-82.5508017258,61.0004974124 +-68.0828056247,-3.6822308907,89.9247089802,-1.5573652827,-130.618836536,-65.1201733738,18.6372096145 +-67.237076511,-3.276810144,89.2530775244,-2.7010668503,-129.777433616,-66.8165545534,17.9696372401 +-72.3937453436,-63.2556692743,207.5125759355,-90.047836992,173.7236180363,138.9736200424,-38.8440136254 +-66.8987783719,-2.9203363044,89.2087019633,-2.2811317261,-130.0253683612,-68.530209792,18.0613910606 +82.8140514837,-54.3561616786,-63.7116663945,2.8498952502,-67.5873781617,-82.6228207884,62.9913491615 +-66.5513449798,-4.8688692701,88.9881262099,-3.5258984887,-129.8292311041,-65.7613869708,18.754834736 +-36.740955344,-30.3268728908,-92.5217490208,-58.3359480673,5.7091700116,3.7254507415,-74.5395679947 +-36.963007899,-29.8777122034,-91.5288154897,-58.5852668034,4.8103761658,3.3452580857,-73.2703965083 +14.2816756687,-7.8719610296,166.3346687503,74.2259703227,99.8197174104,-129.6762354795,-240.7517547302 +-14.0517373284,148.5596531834,-21.3353850578,-28.4406347354,-13.3563446671,-254.5657845903,58.4107039045 +-3.8581071317,-21.597020109,71.2672214402,156.8699846389,-39.3337168902,119.3656767236,178.5235905886 +-12.7554637173,148.1705635493,-19.5674377594,-27.8469419285,-13.0096194972,-253.5571183332,56.1403615128 +12.472272893,-9.0659118736,167.5328333565,74.1825215765,100.7958997009,-129.5793038274,-240.2151197473 +-68.4074540362,-3.3501361178,90.2095503547,-1.0143773193,-128.2230959554,-66.6098475658,18.3282452495 +-53.1025524794,-139.7276070426,170.9505482542,42.6761113169,152.4952644217,69.7214334849,-58.2222924753 +-29.967006014,124.2351815974,-26.5469931645,-2.5558755644,81.2153311,53.2321890359,8.0102944653 +-38.2462469167,-28.6647480086,-90.4746482119,-57.0639188743,6.123945571,2.4041341612,-76.0069780333 +-35.9639439099,-27.0760892385,-93.6802249298,-59.0457281375,5.2171875254,5.0387680449,-74.9431760559 +-72.4617267521,-64.3636236429,207.8248931488,-89.6180075525,173.6206873892,136.2893398079,-37.3544590379 +-54.378840259,-136.7184744718,171.7436741809,41.1445992671,151.8669430855,67.3004069799,-58.5728457469 +-37.1735749333,-27.5026353323,-91.4330157538,-58.9182870835,8.4673320216,5.8919793577,-75.1672299394 +11.1109129148,-7.674881928,167.1620775639,75.5759639907,99.2299534018,-126.9257573012,-241.8520181935 +-68.2055318788,-3.6578617995,90.0424413438,-.214598151,-128.6461453463,-66.8574199619,18.8542826117 +-65.6708299343,-1.7027002133,89.3450548135,-1.974475542,-130.3387611828,-67.0318911954,17.2097057358 +-53.7847574266,-137.3177721254,171.8143685395,41.1142175602,153.6048071662,67.7008770215,-58.2455322987 +-52.9000554416,-136.4914215546,172.1683270287,42.5563838351,154.4336017127,69.4690788889,-58.1655307452 +-15.1759452226,147.25387601,-21.3183560583,-26.89108294,-12.7192077661,-254.8332250516,58.0853134192 +-73.8380318204,-63.2898584002,204.807299712,-89.7006216859,174.210758766,138.5159701871,-38.5217614437 +-72.7886405916,-61.8735826762,206.0697942571,-90.4096756044,174.1260903509,138.8305421826,-36.510498367 +-68.095466565,-2.2878413773,88.1454312876,-1.093076714,-129.1222684349,-66.2180124821,20.838740005 +11.0448534914,-9.4239984206,165.2790400367,72.362767609,99.2901955319,-128.5677691617,-239.4871362351 +-73.7422048227,-63.4502197354,206.6879223958,-91.4125897033,172.7552671392,139.3141135828,-38.0682024724 +-67.367963328,-4.7365600038,88.6836702337,-1.9023176153,-129.9628457935,-67.2451506686,18.60796853 +12.137239315,-10.1871260264,169.2703649451,73.141284917,99.6521531009,-129.0546331983,-239.2072783997 +-37.788967003,-30.9224861925,-92.3253433276,-58.10240885,6.0364090379,6.1673235893,-73.8171643606 +-3.9721401412,-22.871229786,71.928811909,156.6565610939,-38.7578380256,119.620324742,180.4818670565 +-14.31726118,146.7796850303,-20.4415486474,-25.7501252685,-15.0998199246,-254.9700145009,56.3273527194 +-73.7355988032,-63.2150794232,207.3734151142,-89.9268903656,174.2485974174,136.6799945941,-36.6991691783 +135.4429391433,2.8480491886,75.2551773503,-156.4786400339,-24.0496318926,33.0385644058,-8.3060667194 +135.7520207165,1.5165310274,76.3329669526,-158.0002679742,-24.1220564552,33.3890774674,-8.0890891144 +-68.9161800273,-4.7353236693,89.1887669675,-1.2148674385,-128.8379452038,-66.791369065,17.9161559431 +-30.9770162549,124.1643365244,-28.3626604226,-4.2929952179,80.0129118197,55.6439779224,8.2362363748 +83.2177288396,-56.9644928886,-63.762779534,1.4286228595,-68.216662929,-82.5183900856,61.6704076569 +136.2260449101,1.5084413468,75.9408463052,-156.5226137615,-23.9947627292,33.5018008113,-8.6001536129 +12.3996402623,-8.863750177,168.2574244172,73.1933630085,99.4487626804,-127.3633659086,-240.1447934224 +-37.0139499573,-28.9469377845,-91.9168656115,-59.4766347583,7.8884614058,6.5380201163,-75.3425367813 +-14.5384595276,145.9096534541,-20.570124278,-26.1053768829,-13.7947960439,-254.8684978483,57.5008509913 +-55.3178640124,-137.8788461796,173.407581487,43.178605774,153.0744588245,68.4546369885,-58.1649154361 +-14.0530001285,148.572966403,-22.1678158951,-25.1133611233,-12.9925701302,-253.8559633107,58.4111417319 +-15.294030317,148.735282824,-20.4331570233,-25.5952636725,-13.6260120781,-254.4413853098,59.3277761088 +83.4702774812,-53.6975897884,-64.3869398338,.3514828473,-66.2253685449,-84.0934739682,61.0252004074 +-67.1401409507,-5.6129609224,88.628998223,-2.3987454304,-129.039653465,-65.9447268682,17.825919179 +135.9665622947,1.2489012417,76.2373070609,-158.5525302897,-24.4854727816,35.8946775599,-5.381692503 +-14.57984607,147.1535472217,-21.4239513627,-24.8299109291,-12.0267722111,-253.0875974815,54.8385846248 +-54.3735539729,-138.503871216,170.9260651144,42.0058104382,153.132437118,68.1626980115,-60.1753491074 +-14.6033395855,146.9377847412,-21.0753533359,-25.8980736685,-12.7723876455,-255.4942557521,57.1788893656 +-11.8173601967,147.6274261835,-19.8427967302,-25.4454187558,-12.8000781732,-255.6365518759,57.1062945976 +-15.9155206105,146.3377139276,-21.0231290724,-25.8802839199,-14.0710184751,-256.4669610031,59.5580563031 +82.5449795382,-54.5178159465,-64.7440207647,.7665936722,-67.028305739,-81.9765892001,59.3782980496 +-1.9794342903,-23.0708474098,68.0272690782,157.5849113384,-39.8125456741,119.6269069511,180.5665788394 +-30.234027453,126.1914775701,-29.3619089412,-3.6147039809,81.7467490415,53.7680371208,6.9698126125 +-14.7763856533,146.0661838846,-19.9114788119,-26.6994351706,-13.3118350748,-251.8363164091,57.0719359674 +-15.626994137,147.6409300088,-18.9868159781,-25.2605839391,-14.9626585728,-254.6401597354,58.858639723 +-15.4597427095,147.0920645719,-20.6139034845,-27.8001749239,-12.3278834002,-253.6201921119,57.9206289421 +-31.4459516242,126.1294791824,-27.9354787217,-4.736052188,79.3890518974,55.7281016686,7.113305344 +-3.7659606044,-22.0773389435,72.1993881033,156.0790914683,-38.3414186421,118.5487138751,178.820163757 +-72.718828728,-62.5897072073,207.5915827664,-90.3532381919,176.1404391868,139.1263844358,-36.2609958931 +-3.5456277947,-22.4717120553,72.2755258522,156.8577501759,-39.2380506443,116.6793009286,179.3425289928 +-72.5416345031,-62.8260412567,207.0342731716,-90.4104511139,172.5670830592,138.0452091444,-37.2722602163 +-54.2001640003,-137.7571439105,170.5091490642,42.5196232358,151.6903013765,68.2670591671,-58.896775147 +83.2689788537,-54.6858898518,-63.8525150797,-.4289360169,-66.2520520733,-82.8909356127,59.5006305194 +-29.789520833,125.2081999724,-27.5793338992,-5.025437807,79.3545918598,54.9532246888,9.0228688349 +-36.8943430212,-27.8765082828,-90.527561725,-59.6105395391,6.0755585709,4.4856696777,-75.293581635 +-3.1101619448,-21.6754903219,71.0544383445,157.3430454069,-39.5050693656,117.0837144063,179.7956827151 +83.8084066332,-55.1533990383,-63.1777899447,1.9507935014,-66.4714661385,-81.9851014535,62.5570525247 +-15.8756667132,147.3555756328,-21.4269776073,-24.4987049402,-14.3922599775,-255.5914808993,57.050371438 +-72.5618776755,-65.3195132726,206.8930022309,-89.9061895316,173.6213880495,138.3945199752,-37.4938953485 +-39.514609577,-28.7314622549,-93.070836563,-58.097391571,6.7867199767,5.0437414237,-75.2760763225 +-29.6200772356,123.8364600897,-27.9360194214,-6.3476961184,79.2277186282,53.7164186822,8.0777306285 +-68.4097786689,-2.5073820404,90.1907657408,-1.8278268545,-130.0035985482,-67.4398524121,18.207827973 +-2.8456936958,-23.5045552269,71.0435748819,156.1897850788,-40.1503980425,119.8972241753,179.9148567938 +-31.2837967095,126.4148284971,-27.7805239472,-4.074861402,80.7824937635,53.2194256097,10.3592674221 +-34.5973592022,-28.2379760222,-90.9790358473,-58.9833697878,6.3925044857,3.1174862649,-73.4981214707 +-36.4793628692,-28.6182262362,-91.2317858481,-57.8422401495,6.0951106206,5.1132309971,-74.9870047362 +-15.7025587819,149.1048607913,-21.3809928331,-26.9208775443,-13.9222233169,-252.8208221127,56.2485382658 +82.3801722479,-55.0916646907,-62.493262962,-.8577214889,-66.9052312427,-82.3539282856,61.1718667467 +136.5316828689,2.1234174159,75.5617720759,-156.622604016,-23.3604950288,33.9427130186,-7.656908916 +-53.7701947259,-136.7182099882,173.2640149649,41.93905331,153.7688163431,68.9294145232,-59.9440167233 +-54.8225330047,-138.9217294627,171.6804814913,40.7205450409,152.3897351101,69.2734567823,-59.7097994622 +-66.3504887118,-4.7494619029,91.3972931384,.5650623762,-130.8007261637,-66.3984928297,18.9498202552 +-15.2615677799,149.5043215106,-21.3288126571,-25.3930721536,-12.5599472384,-254.6109752087,58.6873712998 +-73.9776944968,-62.8260959004,205.8717234383,-89.5429480818,172.9355025606,136.9913534906,-37.2397758722 +-3.2860582456,-21.4512384155,72.2928781111,157.8094447808,-39.7245993539,118.2233924491,180.7612933014 +-72.6091371452,-62.0095503197,206.6324953436,-90.6627004742,173.6839513926,138.2017158736,-37.7275675976 +137.4816675556,2.2988784087,76.0751456588,-158.9898691604,-23.9700441499,33.5936043085,-8.6286369096 +-35.0451606047,-29.0799836347,-92.582694912,-58.9250265877,5.1499446911,5.2479229661,-73.1998764027 +136.9898853103,.3434215742,75.8419771519,-157.2512558587,-23.0911023238,33.0184022564,-9.3611207611 +-36.6999115878,-27.8340364217,-91.4522712243,-57.3678191013,6.6406347761,7.233319141,-75.520489842 +-36.6559863728,-27.1951378311,-92.5126367231,-58.8785547835,5.961376598,4.7612890022,-75.4495618311 +-68.020470652,-4.4927097951,89.8028661278,-.8133537187,-130.0669528153,-66.1656707626,17.6730088834 +-29.5033776449,124.2838672591,-29.9866242953,-4.8806511325,80.5750102127,52.9572847968,7.0486537656 +-14.6818628602,147.773932953,-21.2980122831,-25.9069051268,-13.8606892837,-254.5835841135,57.4434582173 +85.1939406869,-54.7554213916,-63.7436525704,.5177424499,-68.5939789874,-82.621046867,62.1886471373 +-67.1682817905,-4.6406495673,88.1110668642,-2.3595634059,-132.017304984,-66.3811294362,19.440841834 +82.3750080134,-54.9844145737,-63.7716429731,.065971728,-66.8800776717,-82.2503399366,61.1248301163 +-28.8154381737,124.8321757424,-26.0479902562,-5.8466450287,80.3767572107,56.0850134729,9.4231905107 +-17.0957044478,148.2228900925,-19.9021814267,-25.9686277952,-14.2643151337,-253.4053393431,57.8479468558 +-50.2712611709,-138.1186836931,172.8839308815,41.4671689183,151.8927221306,66.9109282168,-60.2052692231 +83.3060391658,-54.3926797336,-64.6076349808,.9323980511,-68.2408270896,-83.2817394343,60.8336537969 +-54.6585992951,-137.0846250044,172.9305201633,41.3053231019,151.1771618477,68.0639703599,-57.9686898093 +-36.1431527568,-28.5391528697,-89.0556081305,-60.395373877,6.9966521877,4.8638316582,-74.6890083034 +-2.5224496004,-21.1699834719,71.6498934718,157.3859136513,-39.3577639589,118.8085523982,178.1799123107 +83.9505157941,-56.1500349475,-64.4179197899,1.6168344624,-65.5923467764,-82.286940089,59.6206221064 +-15.3447020013,146.2145573921,-19.6160099604,-26.3016338159,-12.4276509896,-252.2386329455,59.9397362042 +-66.5788625335,-3.8518247699,88.1449949143,-2.5004745687,-129.0098842801,-66.5134015225,16.8932525865 +-68.1149471094,-2.3497768695,88.0371292056,-2.0910072102,-130.1293922535,-64.9808577384,19.7705459095 +-67.7898292891,-3.6110787994,88.516063898,-2.2649960975,-129.8608797889,-68.6338801096,18.468892907 +-28.7855828789,125.5702133873,-28.7414367615,-5.1703227522,79.119845497,53.9152667734,8.0318200433 +-73.2136105174,-60.8239239641,205.8794800467,-90.886890512,173.1632541089,138.7663896454,-36.9209705409 +-54.0024694237,-137.7514294051,171.8799857197,40.8431833115,153.3126534378,69.6572068965,-59.1319781137 +-30.8031719737,126.2685818432,-26.2608626284,-4.9027710893,78.4885632219,55.1534073638,5.2234693128 +-71.7710231181,-61.5518443865,208.6046780312,-89.3427531435,173.6178211251,135.1018484907,-37.8744554568 +-74.0751680656,-63.8105122575,207.1062045635,-89.3088292961,172.3676974761,137.2831422732,-37.7824196401 +-13.0671633798,148.5197019463,-18.7422111314,-24.0920072135,-14.0394500031,-254.8714092124,57.9675359157 +-39.0140408008,-28.1348512749,-90.0488996038,-56.955655976,6.9873940791,3.681670592,-73.7321403676 +-35.8504898298,-31.2657238718,-91.1833252796,-56.5012521209,6.5525391659,5.4495174403,-75.4017571217 +-30.8391532417,126.0944418788,-27.8011025067,-5.6132090423,80.5302820755,53.3078092463,11.1342280273 +134.9843373214,1.676533355,75.0355154996,-156.3886522681,-26.0277349795,33.7858193968,-6.9500315947 +-73.6743271502,-62.9368083198,206.4544509399,-90.7812201026,172.5422186506,135.8998918105,-36.7575689986 +-14.710620898,146.4332436417,-19.8079239526,-24.5693554332,-14.593255849,-253.148260113,57.0830853892 +-15.0654829139,147.912655895,-20.214839928,-27.3855855789,-13.7300059367,-253.2750765407,58.5362390238 +-36.1135354013,-29.8856315915,-90.2648562684,-58.6325202743,7.113457072,5.2369517414,-73.6792390517 +-53.1082722289,-137.003831537,170.4680474237,42.5503384059,153.0055878012,69.7986089673,-58.5810264003 +136.3830684619,1.9592077122,74.8308632687,-157.3275671722,-24.607851091,33.3065672406,-7.073305711 +-68.5803169899,-3.7763373968,89.7991491096,-1.6069515016,-129.1521855237,-65.0858434335,19.9312529468 +-13.5281049346,148.6002190733,-19.4276905141,-27.3224766947,-14.8484377488,-254.1700899333,59.8287790582 +-3.1418555333,-22.8766041225,71.4031939159,157.5481773373,-40.3079648482,116.6999368069,179.9493930907 +-35.6427600054,-29.3882099766,-91.8976345796,-57.6340324,3.4325831307,5.4600605079,-74.413476947 +-1.9083622832,-24.1931242877,71.5073052685,156.188931558,-39.2806992848,118.2813867014,180.3411967826 +-14.4081739185,147.4290019948,-20.7524103437,-26.0022859226,-15.1098129649,-255.4748380182,55.1762463339 +-67.3742800976,-3.7067552239,89.2458759955,-2.2326545001,-129.733584203,-68.5588986393,16.5220875925 +-14.3681964815,148.8065907037,-22.531420381,-25.695199158,-14.6972263052,-256.3407569176,56.404941653 +84.2846889057,-54.7279758086,-63.9434792398,.4639519521,-67.302661029,-84.8980669384,60.2793843541 +-36.1875359706,-27.9964195823,-90.5720408144,-58.7402532316,5.6632690853,4.9421171165,-75.3242302574 +-36.8466415136,-28.3431291629,-90.2057924702,-58.529859661,5.3033414728,6.0881507335,-75.8992646515 +134.0361600873,.6422072178,75.9713701421,-158.6546148744,-24.0923507434,34.1020619965,-8.5350400735 +82.3374440532,-54.2758751072,-65.1619410475,1.2050685211,-67.4680003188,-82.5554324058,61.1663907597 +135.6859855419,2.1684132366,75.615532764,-157.9841785371,-23.0205595907,34.4049234048,-8.298596332 +-3.9785662818,-22.8422239354,71.091627149,158.0198545985,-40.613453155,118.1344935459,179.000055245 +-29.9080649225,125.279325824,-28.3980379358,-3.8731819271,80.5075439696,55.0089703245,6.6431158861 +-31.2552859141,126.3272490028,-27.5724011402,-5.3536822747,81.4358022932,55.3210334369,8.3080893459 +136.2038053049,.0898577055,74.8314180323,-156.8665701722,-22.7745130767,33.7470763953,-8.635593494 +-13.6782942452,147.1338657033,-22.092724689,-26.3875989706,-13.9309437678,-255.6630280019,58.0515516017 +-35.6518951841,-29.9328206835,-91.5281627149,-57.5082476622,7.495686847,3.9352261321,-76.5702241941 +13.2623423238,-10.0131862772,168.5443149025,72.9783169761,99.3770853145,-128.0661847016,-239.8803921038 +-73.2920434499,-62.3250027131,204.635174328,-90.1493058613,172.3306853239,137.1949370396,-37.8399119185 +-55.4730801396,-138.3981145812,172.0174315579,43.6858124182,152.8610247811,68.5436360185,-58.6723430629 +84.1393409711,-51.7988100775,-63.6508131536,1.0091916472,-66.1985962013,-83.8492040979,60.5505796349 +-30.0355416731,124.6144844035,-27.0502005606,-3.5521144443,79.1665184995,55.3562831944,9.3609875838 +136.8509959895,1.8145723404,73.9908721576,-157.2120904922,-25.7517566139,33.3945957965,-7.4370028733 +-66.7878493064,-3.6401231588,89.5769883748,-1.2417161835,-130.3521429052,-65.3984828946,17.8200717405 +85.0982123789,-56.937793824,-64.0695831474,.5339491277,-65.2087276627,-82.4711115744,62.2360949367 +-71.5953296401,-61.3600153568,206.8991674302,-88.4451729634,174.310343443,138.4848636636,-38.6952211318 +-30.3032197383,125.6006991063,-26.5686447735,-3.0209806061,79.6624428385,53.6550170167,9.197201847 +14.396968788,-9.7159883141,166.4347373382,72.0489592908,100.2966925365,-128.6906587038,-239.110163516 +-4.7103909649,-22.6830704858,70.2380097398,158.1739819436,-37.6497921605,118.2860003605,180.7258795274 +-54.8391514603,-137.7574254564,171.4621305107,41.6251763815,151.2164066507,68.7451626257,-59.995233975 +-29.7373786962,124.6308431929,-26.5193100938,-4.2088360605,81.0197389945,55.1915254661,8.4413461444 +82.3664924375,-55.4920637761,-64.5122377384,.0007745138,-66.7355359397,-84.5078739507,61.3168798967 +-72.4279388855,-65.2138423915,205.3536859384,-89.4646117819,174.7765828149,138.1316918817,-38.1658116561 +-36.6358671435,-27.8790362151,-91.1660970563,-58.3073824123,6.8209757987,5.1638038466,-74.7862476041 +-2.830950811,-21.4813114012,72.344850642,157.6222781328,-39.1882623332,119.3390004159,177.6001229565 +-36.393547743,-27.5026350538,-91.8687549012,-57.8341269939,5.9076222982,3.6154488548,-75.0567286973 +-36.5783217504,-27.6497843072,-92.6888548868,-58.6873242114,5.4633939227,3.4770944154,-73.3649561251 +-66.7839846259,-4.0919922646,89.0916108937,-1.6366658049,-129.8262557409,-66.2725354158,18.5104475493 +137.9544399008,2.4651742707,75.7178031336,-156.777839935,-24.0484741765,33.6177416852,-9.099193842 +-73.6021194314,-62.1581888383,206.9441744014,-90.5964575717,172.464882929,139.0438911286,-37.0737466703 +83.0793641105,-54.1188596371,-64.9074919401,2.0922552039,-67.7900622698,-80.1319751277,62.212345323 +82.2293881615,-55.1050460034,-65.4435132034,-.1915915859,-67.8358866906,-83.1665754614,61.640117094 +-3.6746864486,-22.5154198106,72.8700390456,157.8263673419,-39.7105715879,118.2577915715,180.3761043102 +-34.9179434616,-29.0015375055,-91.5706701588,-58.3711691511,5.2848253293,5.300580192,-73.3778949843 +-35.1626840014,-28.7730544612,-92.7517069824,-59.2829851846,7.3089189385,3.8910409019,-75.9721550686 +-54.5544675368,-139.6851536469,172.1193564503,41.7822642886,152.8251010425,67.7412090613,-59.3470004157 +-30.4407688114,124.5827175616,-25.5781235902,-5.7239602549,79.3745252601,54.2978191816,9.9553974138 +-3.2210192483,-23.0775519619,72.3433461827,159.2615854129,-38.1901258869,117.3812489709,178.1283731433 +135.3841924327,2.3965892943,74.9895579212,-157.3661665483,-23.4382520522,34.2468997351,-8.5354259287 +-68.5271902625,-2.6023579473,88.976045014,-.6739918427,-130.2775389287,-65.91128357,18.780428722 +-13.8504615985,149.5862215096,-21.3496704723,-24.8436004765,-13.1706094801,-255.5520444727,59.1517921926 +135.7554320451,.681106769,75.778522462,-158.2217049195,-23.9891604977,34.6508808571,-9.2286913399 +-28.9252458187,126.1740045375,-27.2676234705,-4.0718317026,79.3614106265,54.6774612764,7.0468567845 +-36.3614963517,-28.4268241268,-92.641674444,-60.7501552578,6.0989312314,4.4216049631,-74.1256054226 +-3.1310084239,-21.3138317953,72.0855270782,157.6701895196,-38.4693265427,116.6320751035,178.9148785475 +-54.0220910086,-138.4750936721,171.7684608542,41.8812091097,153.6697128686,68.4585948093,-60.0310054526 +-75.020220449,-64.8431619839,206.1116718356,-88.4312073476,172.8259253824,138.2476609549,-38.2595700863 +-4.9020365956,-21.3393293004,71.5973253533,156.8805119236,-38.7714573384,117.8517583359,178.6002366338 +-29.9832075131,126.4475006958,-28.213141077,-6.1311189509,81.252279424,54.1746477508,8.2646887129 +16.218224289,-9.4717776612,166.7958447316,75.1297878438,101.2661429373,-127.4082909847,-239.7496667703 +-68.3001110475,-3.504776324,89.8846002594,-.885535755,-129.9042084038,-68.4035474782,18.9213019024 +134.8120338772,1.9033389267,73.9450471431,-156.179121153,-21.7938034692,32.0599772448,-7.5089571728 +-73.5262471732,-61.668320111,205.6527984163,-91.5028590977,174.2976807545,139.1554764404,-38.0169767699 +-5.7059934684,-23.6466481887,73.2641257521,157.9793240381,-39.1008152406,118.528667084,180.1231194636 +13.0729255923,-8.4738993525,167.2533800479,72.9968360192,99.8185373925,-127.2044127131,-239.5960234058 +-72.1742098479,-63.8907676925,206.7528988446,-88.7806828565,173.2387143939,138.1407901053,-38.0649936537 +-4.7982612504,-24.245660246,71.7186080885,157.4095985806,-39.2887572795,117.0575999157,180.2634209897 +81.9125649987,-55.2967007585,-63.7874805311,-.6553636472,-67.0251715199,-83.8053307166,61.6151665337 +-74.1025675408,-64.4637960957,206.3767450611,-90.727694631,173.1150140613,139.0198853596,-36.8712875181 +-4.137885084,-20.9524707272,72.2003575595,158.8031445651,-38.822406058,118.2573502475,178.2474338536 +135.5136772633,1.0027251232,75.2918402463,-159.0310073316,-24.2432904398,35.5000052733,-6.6243144296 +134.4611087233,-.3948278998,75.3000805841,-156.8244790037,-22.5874288052,33.6646605613,-9.0208588959 +-31.0921607312,124.0846177336,-26.4481235366,-4.6865276961,79.0868910682,54.5494858755,8.4382214044 +-67.4204481196,-3.1901206395,89.1165756423,-2.5611771472,-129.6128091826,-65.9573549552,17.1887305106 +-29.0007192494,126.0430501825,-28.2510970585,-5.0494677532,79.3672829747,53.8593326067,9.4881998044 +-55.4385520923,-138.6756507314,171.3593904303,41.230134567,152.8203424219,69.2863424586,-60.6325184011 +11.9310565378,-10.070888477,167.2238173505,71.4528143118,99.6298600695,-130.2566741788,-239.3421867084 +-39.1566069754,-28.4325761342,-93.3688983069,-57.4135929711,6.9424327798,2.8277275794,-76.0135872225 +-14.8517762882,146.910907783,-21.6361830411,-27.2116078126,-14.1632026211,-254.1722108632,56.9111885211 +-54.5326210735,-138.4735033914,171.2930190847,42.6702556131,152.5350468876,69.6769541483,-60.0734642551 +-35.6436645942,-29.1915891157,-92.0407020609,-57.5054020636,4.8814479605,6.4134188658,-76.3005078797 +-13.1584262711,145.6854710436,-19.7520236197,-25.7391537495,-12.6759066525,-253.3072817561,57.8859600098 +-53.6604969088,-136.6066820645,172.6984202246,41.4972882945,151.7356913938,68.0985811723,-59.4071979777 +-29.7676055141,125.4109202142,-27.933751161,-3.2440140574,79.2928403008,56.3528158779,7.9254329902 +-36.7732461331,-28.9541073094,-93.6287810776,-58.7626729748,6.4347090123,5.6655164833,-75.0761919142 +-66.7247379704,-3.9467903568,90.0618882381,-.4186946385,-131.6705538253,-66.1923287227,17.9484400175 +-3.6219852258,-19.4668879057,71.4863847139,158.6430143408,-40.486588707,119.0023061029,179.1102554281 +13.4319493889,-10.2633716855,166.8193833193,73.1104846614,100.1022778983,-127.362414161,-240.5156629554 +-30.9165157483,126.3031363673,-26.2842329485,-4.7934874924,80.7860830335,55.3822641051,9.0641719032 +-3.9390765067,-22.0399181669,70.6264734064,158.3693402883,-39.4483687808,118.1216356907,179.9884982577 +-53.6793064006,-136.8899399573,171.4821403571,42.3875205167,152.9351642167,68.8097456412,-58.3845837213 +-31.0760073327,126.4374266997,-27.7964774612,-5.6715842108,78.4038766715,52.6931508148,10.3116868763 +13.6067703678,-7.1658713733,167.7058392262,73.3477873658,100.4928198911,-127.177166989,-237.9214785591 +83.9627382532,-54.8391583218,-64.3974904302,1.2723649158,-66.7240573356,-82.5359892377,61.2940045643 +-15.1038700403,146.0297905407,-20.036735398,-26.0294671901,-14.0107272923,-255.9002387842,56.1442227435 +-54.4222412155,-138.663300635,171.2082772356,40.9541702984,153.7720766854,66.9948656861,-59.044461903 +81.7893820761,-54.8000624185,-63.622851366,-1.8770162556,-66.9218548327,-81.5602313218,61.9236686851 +-68.5590861821,-3.6905523843,89.2405137941,-1.9428221967,-130.8269022707,-66.2054552076,17.0328091306 +-36.353169066,-28.5928663633,-91.3811872116,-57.118967717,5.2394117646,3.9629691813,-75.3124140074 +-67.2388397561,-1.7609223278,89.8536075585,-2.116696163,-129.6827367897,-66.378217462,19.2372248016 +-72.6054580315,-62.8501705636,207.29086209,-88.7058892761,172.8772352365,137.414349038,-38.9985834827 +-3.6374465887,-23.3214271782,72.0877655771,156.813959957,-40.008355168,118.299046864,180.3105218469 +12.9377843826,-9.3783442802,167.1757070325,72.348973358,99.8909811198,-128.103195839,-240.468740298 +-72.8745317284,-63.0666000846,206.6992900294,-90.8328808753,175.4289977084,136.6016755268,-38.0738433974 +-68.5753986296,-4.8691670086,89.4504759683,-1.4347766695,-128.7879171524,-66.7545969763,19.7611219818 +-15.5072278433,146.8963236087,-20.2795875073,-26.9266720054,-12.8629440394,-255.2269153289,59.2702699821 +-2.4455049677,-23.180372673,72.0919305564,157.7026476595,-40.8020249457,119.5496532093,180.1803192453 +-30.5285981659,123.7255702791,-27.5423107221,-4.1893470467,82.653894523,53.8821397009,7.8528875814 +82.3073095635,-53.8629347742,-64.1408592074,-.1100193416,-68.4377379929,-82.1490186674,61.4952566295 +-73.1689708727,-62.7283800781,206.9492226639,-90.0324315479,172.8607156393,137.4154653532,-37.8754386406 +-28.7279156408,126.6044933152,-28.1195163306,-4.611073325,79.4160960379,54.302762631,8.8116985199 +-67.6852243404,-2.7548809754,90.2423711369,-1.7395917511,-130.4467205877,-66.2374001459,19.3611468062 +-2.4179946722,-21.9021094008,70.4697393247,157.0907685167,-41.6049366056,116.9041126175,179.3799712469 +134.4945387753,.9766104928,73.7767140648,-156.5408786682,-24.5507364812,33.1313418634,-7.6890761207 +-4.574310606,-21.6955638887,71.9937904603,157.683256105,-41.0893380831,116.9141953532,179.9577420348 +-54.0679736682,-139.3848188641,171.7846823939,42.0533814012,153.2167642264,70.0482281093,-59.3542000058 +-2.1801227486,-22.2637944958,72.1406644663,157.6100668166,-38.3189176663,117.4132742325,181.9471778973 +-.337191169,-23.8483301815,71.6747184159,157.7139084314,-39.4697371827,119.690717007,181.0505066622 +-68.5237227835,-3.0676129896,90.7343004495,-1.8432208655,-130.0462596273,-66.6171234814,18.9049239967 +-31.4769998629,125.0635845163,-26.3812575305,-4.7207013844,80.697902949,54.6305787726,8.5964863545 +-69.0680372179,-4.2693221139,90.0734054338,-1.7859359802,-129.8855632682,-66.8437899765,17.3412131584 +-4.5759892394,-23.3594640884,71.432181159,158.4809844956,-38.597736723,118.4175281378,180.737402484 +13.5883713915,-9.9797320326,168.233335483,73.8980952198,100.5575018384,-128.2252151131,-237.9999013815 +134.4156235589,-1.4953354002,76.3712605761,-160.7978324911,-23.3424532359,34.0506858232,-8.9308822705 +-30.3146538934,125.4053869098,-26.9192996506,-3.4283719528,79.1889837828,52.4308498509,9.1851066453 +83.9427444235,-53.7852352933,-64.1191365963,1.0019074119,-65.6732339746,-82.102846966,60.1391207193 +-14.439545452,147.4139933673,-19.9715726777,-27.1516994022,-13.5751469033,-253.9183745057,58.8448411458 +82.4046430836,-55.349685846,-63.5817824783,-.1162744667,-69.9583793922,-83.4970684727,60.328731854 +-68.1279663037,-3.9626996316,89.4796262894,-.4931322239,-128.8286795658,-66.8331114855,17.4007027224 +-72.6395237969,-62.4046510115,207.5404790355,-89.9489806164,173.7858275065,138.4934586457,-38.3020272538 +-36.2720764098,-28.7378684132,-91.4904073308,-58.0357688806,5.9667731351,4.9062231901,-76.2014481842 +-36.3171020366,-28.2207596538,-91.6478256955,-59.1208883295,5.9366503008,4.7462486881,-75.9001992936 +-3.7001734894,-23.2384622704,72.4689622688,157.0871050916,-39.0787390469,117.2281073598,181.0504708709 +-15.3015900625,146.943298239,-20.4320078227,-27.799063846,-14.2461238067,-255.33294678,58.9403657125 +82.0822442314,-54.218674668,-63.0468979398,1.6505549386,-66.8806292443,-82.6218515485,62.3748211045 +84.2707675012,-56.176293538,-64.9114787932,-1.7743143234,-67.1746005256,-82.7546547637,61.5477522145 +134.8964445485,-.7745762431,76.7376879663,-157.9792170672,-22.6729166597,32.5275683262,-7.665778498 +-55.1542933547,-136.6607263131,173.3079356886,39.668643368,151.2198576962,68.5633171759,-58.8892787505 +-3.0678329078,-21.5676624799,72.6093357312,157.0549005984,-39.7456186515,118.9797550446,179.5815666706 +-54.3258921619,-140.012989172,171.2076096449,40.6745854369,151.7238860669,67.6954667372,-57.7521811314 +-52.8099527968,-139.0247016186,173.1257536955,41.7332173041,151.7954760878,68.5994207889,-59.916894632 +-54.8540542357,-138.66069685,170.9486005082,42.2842000045,152.8948336178,68.8561152669,-58.682415113 +-52.7366187166,-137.2648856404,171.1543214947,41.191934212,152.608208822,67.6912345076,-60.6033944194 +-30.4394732898,125.4611537231,-25.4578253574,-6.7925627793,79.5244858125,54.5513233105,9.6242478555 +132.3698943337,1.8801803118,74.6864823533,-157.1944148885,-24.6601998416,32.1229900336,-7.7336033874 +-73.2324319348,-62.9467209118,205.8593119649,-89.2433374021,173.3683144711,136.4003209056,-36.7586760829 +-73.4503356013,-64.2366176468,207.7707407288,-90.1548005023,171.794730339,136.8506481396,-37.7795871514 +-4.5997445807,-20.9538712463,71.7613266877,158.8316720843,-38.8925729888,117.5334562869,180.3636235383 +134.1596321729,3.5877315031,76.2054616118,-156.9475955408,-23.8013907299,32.258680039,-7.2370519186 +-53.0682859022,-137.6651426635,173.8010779745,41.6864172544,152.9498239138,67.3740370505,-57.9532879757 +13.1588892923,-8.485381106,167.3332517218,73.5134350604,99.1049035592,-127.8807909312,-239.0658439645 +136.8027511859,1.2301206785,76.1080646593,-158.3931170228,-23.5974956414,32.7952578836,-9.2863996219 +-4.5150692524,-20.688557035,71.0351757481,158.3571065426,-39.8746549136,119.7402342009,180.8293211893 +-31.1904604954,124.5708000234,-27.7721710264,-3.4891774023,79.8813959807,53.5358782039,7.1548671522 +-5.1140089536,-21.8125793407,71.4969146442,158.0545361826,-39.3954532811,117.9091281554,178.9030332492 +-13.9806947937,148.0569384565,-18.9323652942,-28.2688903831,-12.8666429542,-253.9519130005,58.3210015656 +-14.1160567405,148.5275311085,-20.8209087247,-25.9367800529,-13.6907815606,-255.1247854564,57.5249750626 +83.2840323973,-55.0486029801,-63.2585045804,-2.0786107621,-66.5086635352,-83.535788304,59.4578314574 +-15.9596957105,146.2288680229,-20.0131014673,-26.4551768697,-14.4882177763,-254.2324978029,59.5975896974 +13.544197176,-8.7334080223,166.3566196109,73.2756155434,100.2812997354,-128.1913923331,-238.8961753486 +-53.7128283403,-138.3979427653,172.1066193859,42.5917198194,151.7523135664,70.1173217345,-59.6940361271 +15.1186425768,-9.1735056383,169.3430135777,73.7716906705,98.7510206575,-127.4642225581,-240.0075200905 +-36.8242119297,-29.2693653909,-91.0319742106,-58.334943599,5.5959047158,5.7701879988,-75.3133221436 +-30.2616379805,125.7821651835,-27.1021576334,-5.2430525828,79.74222047,53.7661737538,7.1668065709 +-2.2605019331,-21.6904226746,71.1521840511,159.199382131,-39.3397753111,118.3083181747,180.2461952052 +-37.0036031873,-29.4482850773,-91.0833725531,-57.2189708623,5.7762303701,3.2619207302,-75.1777580642 +-31.7677305102,123.6072042756,-28.5829518671,-5.7500523302,79.1037526358,55.1592152744,7.9748001089 +-67.0480960699,-2.9137259804,88.1417083412,-1.3878491486,-129.1453986386,-66.9953466138,18.3496971968 +135.948919976,2.4737861952,77.2069894284,-159.2490810894,-24.9751170817,34.4851588996,-9.36227567 +-72.7951401482,-63.5162873354,207.5201844075,-88.9467046442,174.1424620904,139.8026281712,-35.1163860593 +14.2269943278,-8.9426710958,165.9323092006,74.0399524996,99.743881083,-127.5038451143,-238.5280314039 +-55.9416069633,-136.4902920146,170.1486551178,39.9920048161,151.4001771467,66.581600853,-57.5498770292 +135.9984572428,.6937800822,75.3386970773,-156.8937536648,-24.0372012924,33.339364667,-8.9580930486 +-69.0207135254,-3.5826658561,89.6220883857,-1.0840667566,-128.6375320896,-66.5648325989,18.0414452434 +134.7541026177,-.2627272507,74.5618725033,-156.4590372241,-24.1377895396,34.6104668711,-6.7748843556 +-35.8516595633,-29.1287694366,-90.4796487693,-58.3083034581,4.1315185533,4.9459866852,-75.3398523897 +12.1952924087,-8.1069531933,166.4362055544,71.5292163379,99.031010777,-128.297970854,-240.8420870517 +135.9379461648,-.3057446895,75.2709279631,-157.3084078476,-22.8129364267,35.3208211467,-7.4247628744 +82.8248631513,-53.6317901136,-62.9267095555,.957609293,-65.5252946702,-80.5871361953,61.9411975173 +-74.2501196837,-61.818968025,207.1763744001,-90.7244906374,174.2774353862,139.4898864081,-37.2395598246 +83.4159650612,-56.2757162002,-63.6615619086,1.4469084075,-64.0034586792,-83.2168250357,62.7626936502 +135.9000677681,.8761531592,74.4870909815,-157.6796683448,-22.8869882722,32.4096104181,-6.8680538417 +-14.2488391343,148.3729075907,-20.2069368244,-26.5103452276,-14.1955075854,-253.7282086344,58.2236334806 +-72.9672558092,-63.6066763671,206.0622201541,-89.470715493,173.081483771,137.4387922642,-37.9136158203 +83.6830487606,-54.543393344,-63.9167628082,.7108688205,-67.5477954148,-83.532540014,59.9120104355 +-3.0847802155,-21.8926001374,72.3539103228,157.3147898922,-38.7018793798,120.5005070724,179.9691796313 +82.8597236315,-56.4009351051,-62.1206503405,.5319015917,-65.2500303156,-84.3787952403,61.9090705981 +136.0733052083,1.6067798553,74.7313479037,-157.1418811423,-23.1666131816,34.904250031,-7.892125707 +-36.6115388723,-27.5114395248,-92.7798547386,-58.898474555,6.1704388837,4.8216274869,-75.8207413357 +-3.4945459003,-24.3107131526,73.5305238862,158.0323462324,-37.7038040866,118.661961726,180.1033993376 +-6.0794131787,-22.8988784668,71.5743871565,157.9649767352,-40.2358029232,118.2835897756,177.5329512648 +81.2811061426,-55.5554629752,-64.8411147008,.9980811448,-66.8084859092,-80.7059969915,62.2060752321 +-38.2889416058,-27.848903568,-91.7631569211,-59.4008834821,7.6520395532,4.7443617246,-74.0475525266 +-15.3390345459,148.0244780188,-21.2077789085,-26.0871822669,-13.551151984,-256.3968818874,58.7296787398 +-2.5716582118,-23.9563823402,71.2193284449,158.2784793472,-39.2967819308,116.6398618765,177.2613106169 +-68.0900819071,-2.4303993611,89.8473277301,-3.2256148107,-129.9950793541,-67.9741722389,17.387268219 +-31.2475494935,125.7556599555,-26.4479279984,-5.419245028,79.8859390105,52.9234599888,6.5456107892 +-73.8886718555,-64.6114521144,205.8903241832,-91.6229273916,172.6374585081,138.1547930983,-38.2041100359 +-36.1901313146,-29.1576517665,-89.4393073268,-56.9105566427,5.0812719343,3.1400254768,-75.8893936498 +-4.7947679907,-22.5316925801,70.9663549932,157.5690740782,-40.3109130265,118.2213864641,177.9495102277 +-2.3642392801,-21.8121260889,71.7851628985,154.4832708567,-39.654246046,117.7426986804,180.1620638929 +-68.2506056238,-2.0791143814,89.6707411404,-1.1066196826,-129.6904445478,-67.0530093122,18.4013138696 +-73.3596768655,-61.3356509148,208.3098124357,-89.7752847383,173.3995951552,137.4812443017,-37.8578036286 +-73.3325507102,-64.2782887828,208.2324335996,-91.5672371451,172.8023930641,137.4842028327,-37.7714613337 +134.0321326035,2.3568868833,75.5105639521,-157.116714023,-23.9869701666,32.7534381643,-8.8716901635 +136.9235902471,.8197155308,74.3839347818,-159.6674528601,-23.7258933496,32.8176690961,-8.0290590027 +136.17838862,4.828063421,75.7190486058,-158.9577791356,-24.4176446172,32.9234765539,-5.8695199018 +-30.7107659272,126.630409346,-26.3145093749,-5.6950767345,79.1509637844,53.6639835387,7.2737362079 +-34.9640689238,-31.3823554654,-91.2668583143,-58.3250176394,6.0331671743,5.1468988424,-75.0056422704 +11.9580762138,-9.8837857711,169.2380139588,72.4884597358,99.5821925587,-129.1138258327,-240.2701967599 +-37.3738217295,-28.5750313798,-90.3011131978,-58.4512741288,7.5065730294,6.6404476869,-73.9398522033 +81.6934267334,-54.8189789593,-61.8530851691,.1084654115,-68.2229771375,-84.2581167829,61.4194370507 +-37.2362407457,-29.3112894892,-90.4457706798,-60.0752813476,5.1886719399,5.3492542918,-76.1936330269 +-12.5661320967,146.721385055,-21.3469558237,-26.0048908553,-13.6971628644,-253.4483824912,57.5825573866 +-36.5591745318,-29.4025043394,-89.7993468872,-58.4535642875,6.6072107789,4.9386656732,-74.7753429755 +-68.8438516818,-4.5695703686,88.6744913885,-1.3509023453,-130.6089390693,-67.6114661569,20.2298324708 +-1.5325187759,-23.8426684404,74.2887675848,158.4541644406,-39.2714823182,119.1284981865,178.1515518151 +14.5668601406,-8.7463001228,165.8472454632,72.4213335427,101.2391007128,-126.4539029718,-239.7614545419 +-75.1300591647,-62.5899818412,206.818890486,-90.4630154238,172.6945023723,136.3441323235,-38.1227732279 +-72.9702096763,-64.0975013838,207.0698711218,-89.2792884665,175.1463277985,138.4117552306,-37.7980578832 +-13.7582971719,148.5350978428,-20.7135797903,-25.3025371373,-14.3021474729,-255.0853821563,55.6987566093 +82.6183870774,-54.991555952,-64.6552753243,.1037635572,-67.8882587017,-83.0366902279,60.5079687486 +14.2813286926,-9.2789681031,166.8171983951,72.8925666508,100.4737346406,-129.4166958384,-239.3831063303 +-68.5703988872,-4.8216520502,88.4040049158,-2.1281490412,-128.889571263,-67.5539178126,18.4416509561 +-37.0531478394,-29.4392142606,-90.7802437756,-56.8247765586,6.3011054313,4.4122897377,-74.9132653885 +-37.9492274852,-29.6350549601,-89.5832932178,-57.8089688201,4.9443017178,4.2675062961,-74.3885388123 +85.2268578533,-53.315084557,-63.0325029567,.6814138046,-67.9331944558,-82.9320223288,61.5303560012 +-30.7599062085,126.5821268991,-27.5942834367,-4.4373797847,80.0189786356,53.9027651664,11.1534492628 +-35.6840822352,-29.3920579628,-91.4268346135,-59.2572127554,4.1452043432,4.2216679318,-76.0713275208 +-72.6031303847,-61.6395302837,207.183149512,-90.3367239116,173.9746161828,137.1491159746,-37.3177260633 +-54.60146973,-139.4589631433,171.7196341099,42.2888661579,153.5309012803,68.6147419557,-58.2814101176 +-14.0618776472,147.7521833329,-21.5157554572,-24.243914322,-13.0472001114,-253.4977334733,57.9035092456 +-29.2870073651,125.5781459532,-27.3373296819,-6.3911274985,80.1002577575,54.6283207887,7.9316354939 +-71.7283261986,-63.105873683,207.2101800036,-91.6282583587,175.680076301,138.9417905196,-36.7574640602 +-37.5803728371,-29.2631145957,-92.1032007927,-58.7355515086,5.050970969,4.1870456535,-73.7920081627 +11.1490049247,-8.3301400602,166.8858428252,74.0694735013,101.5314313752,-127.3006481003,-241.326870958 +-2.485506766,-22.5175828006,72.1881951577,158.4856143386,-37.1512777989,118.6421667911,179.0193603321 +-54.0914355284,-136.5221156945,171.7235723244,41.6406698919,152.0724264858,68.0877593555,-58.3887872404 +13.2434743622,-7.6459609218,167.7055384702,72.3532651231,99.5889448832,-128.0261415422,-240.290478019 +84.9942771076,-54.5738042301,-65.5486943638,-.4156096541,-68.8698548145,-82.7694185886,62.755513635 +15.9594435943,-8.3276963651,167.4112785544,72.5091931089,98.8569567469,-128.610741902,-240.7720913657 +-4.5113660005,-21.6653366717,71.0896248319,157.1618516483,-39.1672918501,117.271520083,181.4386646998 +-75.2236798675,-62.6998074519,207.2727052534,-92.1962202115,173.0044369738,136.650490833,-39.158713631 +-37.4563920331,-29.0148905041,-89.8964098566,-58.2016438555,7.0076360135,2.7908356894,-75.7285717711 +-54.0571913288,-138.234559482,171.5959919266,43.7310428434,153.7028297432,66.0972220437,-59.4935062016 +-15.6506566782,148.2008562621,-21.1232205026,-25.0168059389,-13.8482784342,-253.7609465427,57.6506758162 +-3.9557576856,-25.7503527443,71.6275994407,155.9649710909,-40.7408616583,119.1874578281,180.0021664273 +81.3675784547,-55.6589500677,-64.4765164706,1.327829722,-67.2016782405,-82.0866446015,60.5762513931 +-67.235586723,-4.9174387339,89.773855159,-3.0469227315,-129.9419319324,-66.8988222124,17.2913400225 +-29.9102155335,124.5739892381,-27.6628633852,-4.7240141075,78.9441167056,53.141744096,10.8225188443 +-14.8026842749,148.8488468863,-21.8911994095,-26.0491367575,-13.0629811311,-255.2466394275,57.9820855631 +134.3676815218,2.6187120289,76.6285976903,-158.582080486,-25.612437202,34.5480126277,-8.6292558174 +-37.9746964503,-29.1987449679,-91.1175755527,-57.6105589853,5.1973047239,5.6917096278,-75.2165132238 +-66.7180782884,-3.7635491916,89.5918767825,-1.8079280246,-129.4673117612,-68.3678407041,17.6640506126 +-15.6466946543,149.3491671054,-20.0395516082,-25.7008432605,-14.4344789859,-253.2715511346,56.6134678133 +-68.1571461261,-3.7530921121,89.4037124582,-1.2728729536,-130.282830047,-65.1069194925,19.8058332304 +-35.8987682151,-28.4436307202,-91.3962218926,-58.7291354032,7.8057639837,4.4408317668,-74.7447935289 +136.7759240514,2.1614651413,73.8373087376,-156.0740884144,-24.9574798443,34.4453843727,-8.0152780242 +81.5456500452,-56.1449526067,-64.9255838097,.2586492233,-67.0391371857,-82.9236517591,59.5835902295 +-74.5562459319,-62.3640913291,207.5195724851,-91.1944112964,171.1854251076,140.5955220537,-39.3069486893 +16.7302906787,-7.0091641568,169.197119236,74.0315843889,98.7022950554,-127.420837564,-239.2164472349 +82.44777502,-54.7469956771,-61.9232703173,.4298734181,-66.3937158642,-82.7284502192,62.1140825685 +14.2090287915,-9.2738334748,168.2936355592,72.7315529922,98.7294546236,-128.6896628666,-239.6227785269 +-72.3521522062,-62.1933144635,206.3610013636,-89.4567080047,173.3059222095,137.6091862039,-38.8465207295 +-72.3542005234,-62.2316171046,205.3765437529,-89.2981623881,173.395784142,136.7384492224,-37.9842138359 +-76.1055380974,-61.9224349484,206.0582364733,-90.9334106169,172.4497510558,136.6926427139,-36.3772558852 +83.2640044583,-53.2935477935,-62.3612584256,-.9298085819,-65.6356342988,-83.4405687567,63.3250199495 +12.5678164345,-7.654303463,170.2788341751,73.5441169852,100.444466613,-128.0538942933,-242.3235832023 +-28.8601663984,125.5107754425,-29.3868760871,-6.4303083355,79.5888178857,55.8043494029,9.8629938667 +136.987162077,1.6544595249,75.5192979086,-157.835640998,-23.8271173719,32.3296168668,-9.0195146042 +133.8596008489,3.5731777613,75.8496460118,-157.514550069,-23.4849258298,33.2101258728,-6.9263227858 +-54.1613714103,-137.1262958365,169.0359571837,40.4712937257,152.3134693851,69.2158501076,-58.9087071698 +15.640051679,-8.9387156956,168.5295106673,73.9192335281,101.1274428412,-126.3433368886,-239.9027825964 +12.591947309,-8.5304477049,168.1310681615,74.9208586171,99.5421825113,-127.6831005883,-239.3994944465 +135.7671184684,.8864338765,75.6249647084,-155.7171730215,-24.4919542756,33.4475242136,-8.6121400091 +12.9969438358,-10.4796564921,166.4956927818,73.3676807227,100.9082026266,-125.8727767462,-239.559329343 +-37.9090021181,-27.3898152977,-91.2371107953,-58.6036022507,5.1247182076,5.2107416252,-73.6407100025 +-35.2863335754,-29.7426464356,-92.0879294984,-56.8197953907,6.6262732007,3.7368002148,-74.1894518791 +-69.2794025171,-3.3193514145,89.7516162877,-.8744518916,-129.491740781,-68.4564363229,17.3695745599 +-54.7848496518,-138.654014692,172.3255503039,41.7550077339,152.7373907312,68.603879485,-59.79213763 +-73.2942287127,-64.2500344005,208.7581554759,-90.1853289091,174.2450616117,138.5477754429,-38.1814283704 +80.9942103045,-55.6425747641,-64.0014642766,2.3604987853,-66.835889541,-83.1593385843,62.8076920107 +-37.807334006,-27.377919523,-92.2392420552,-58.3470753118,5.8681588,5.2269676212,-75.1303777977 +-3.4455068408,-22.2170924655,72.1055132865,158.5702450046,-38.4101232149,117.2572896012,180.3126601684 +13.9236292546,-7.6844443634,166.175216646,72.3676178397,100.2347204827,-127.6336191242,-238.6360100915 +13.2871066603,-10.0007917467,166.9705991355,71.0565954551,97.0935201285,-125.8843745701,-239.8158270415 +-16.1859232343,148.0815101309,-20.5647875138,-26.4618637457,-13.283046177,-253.409207342,56.7931695343 +-67.3178857657,-2.972872926,90.8185997279,-1.0811912732,-129.5096122256,-66.5258848922,18.7708146067 +-38.251830897,-29.0566402097,-89.4174637254,-58.4749593076,6.8523743019,6.0003901606,-75.0666430914 +-14.9609934282,148.1499401663,-21.4066478839,-25.5545290687,-13.8657650722,-254.9116933093,58.8628765922 +-36.8489203069,-28.0550718066,-92.1395081996,-58.9967386163,8.0785654442,4.3087379136,-75.3373802253 +-68.5275507538,-5.0890439057,87.2739364429,-1.442732934,-128.9282982598,-67.4964761508,19.2516091985 +-29.3190368646,124.7649531204,-26.9224341302,-4.4385834681,81.7667350781,56.1647051568,8.1516477501 +-13.9028340153,149.3329879924,-21.4093303545,-26.9367996835,-13.5901887577,-252.8843342235,56.6367683088 +-5.3311583256,-24.0410846844,71.3524317347,157.4888941354,-38.6403407499,119.9423706532,178.5865402762 +-29.2839484805,125.3018422191,-27.500822855,-4.5274740245,79.6209583597,53.6542379635,9.6438889398 +81.491142087,-55.9223321335,-63.46489705,.0049755915,-67.2700581934,-82.5479534999,61.6353067915 +13.8440055359,-8.9284298683,167.5493079916,73.4901222381,101.2204298608,-129.3025160912,-239.6389113851 +81.8886671108,-54.9009096841,-63.7529058829,1.1857326762,-65.5313795693,-83.0069517513,60.650365977 +-37.2115271516,-28.664309613,-91.4103387688,-58.7718015536,5.4080863338,2.0303156759,-76.2737029053 +-15.6046481857,147.0597335496,-20.689006201,-24.8315677718,-15.2513041055,-252.5335906053,55.1870326661 +-66.7600607498,-2.7062259582,88.7145343276,-.3766352814,-129.2744890378,-68.1341502519,17.7935291272 +-53.888050747,-137.1025585366,171.2652457206,42.521270159,153.356649941,68.1696494527,-58.3137012784 +-13.6844766745,147.0314623529,-20.7165627034,-25.9764785618,-12.6979687464,-253.5703340585,56.906400784 +13.2156865714,-11.225828026,166.4796484425,73.3451320985,99.7109286956,-127.884213849,-239.5545793374 +-54.2792542299,-139.3300651382,171.9046006108,41.9150471537,152.4976920419,70.1909733839,-59.4602861576 +-67.0466671349,-2.0839663643,88.627093327,-1.833196592,-129.8142103986,-66.0673409471,17.7495587148 +12.8366137441,-8.5620067305,168.3220655857,72.6724112739,101.6611109172,-126.5547000954,-237.9411038288 +-13.9162399764,149.5862222863,-20.8096334515,-24.4232788252,-13.2059186428,-253.1403522912,56.9385427585 +82.9836167959,-55.3343267646,-62.8430754225,2.2726136935,-68.7749913296,-84.1410219228,61.5350770783 +-14.8911554097,147.1178555099,-21.4067361551,-25.1828842325,-13.2243398084,-255.0985841602,57.9232728784 +-55.8594627205,-137.9800533098,171.9095880217,42.8435167481,150.3231161691,70.3416074977,-59.177146813 +82.6959760864,-54.0509988523,-65.1680478369,1.3043953021,-68.2879733563,-80.8378878504,59.9473412018 +81.8635534534,-55.5099680096,-63.3904813715,.7377009345,-66.4553113426,-82.6389429567,60.4456188309 +-3.3662452608,-21.853477846,71.7706392188,158.0871090846,-38.7712374886,117.4155169809,180.2540277631 +-66.8919716827,-3.3069542,88.6560189269,-.4527198008,-129.4394994618,-67.4142902566,17.2142592724 +135.6453092612,3.7248323819,75.466178816,-157.9552172794,-24.1287434882,34.7312370305,-6.6253708028 +-15.403938803,146.532925031,-21.4121308464,-28.1391563496,-12.5242715858,-254.3563098353,56.5702899908 +136.0809907628,1.2627010756,74.2533578342,-157.3131979253,-22.7482915423,34.2560426922,-6.8674708514 +-53.7240866944,-138.2925828045,171.7989561046,42.2968162646,152.2468148361,68.2691428463,-58.021972818 +135.5748000535,1.2395311101,75.6454897466,-158.8987495932,-23.9722280128,33.015187334,-7.4328985098 +-72.9577012383,-63.6192655559,206.5004806529,-87.5320319291,173.4008856932,138.0035396424,-38.2061712886 +-37.4092313444,-27.4149740059,-93.5862517105,-59.7389454637,7.6315701118,5.0850488569,-73.5835650135 +-37.3523732765,-29.5671416599,-91.6921548557,-57.3168701266,7.1123423569,5.4967453768,-73.4898281189 +-29.6810670043,126.8292569783,-28.6427672381,-4.5719726307,78.807823211,53.6521772741,7.3746123823 +81.8551973734,-53.6548268903,-66.197334555,.9696637698,-66.5219783051,-82.3954366578,61.0999218941 +-73.4694939168,-62.8513608875,206.3507265065,-90.3344627559,173.3004701973,138.5108088235,-39.1834668158 +-67.4400177459,-2.5462782883,89.678294501,-3.1439124781,-130.0673694145,-66.0060504203,19.3005075357 +-53.776426865,-138.8357835622,170.7143391371,41.2134086993,153.2740494554,69.4600741897,-57.2697159543 +-3.839027696,-22.3108129669,70.8194824195,157.0716399197,-39.7300334573,117.8704182782,180.5021995225 +-66.4783542778,-3.2974596236,89.2993653,-1.1742117358,-128.7783438267,-67.3142141081,18.5056107071 +-67.7689954811,-4.9754846323,87.685942202,-.289633087,-129.1852204204,-67.0831165207,18.8736741496 +-67.6190257534,-2.0761948792,90.2342899401,-2.5511516012,-128.1882978399,-64.8314945978,19.5306723756 +134.7252372117,1.883031225,74.8140368235,-158.8570645399,-22.7733159293,33.7057736951,-7.3669163012 +13.5778744581,-7.8180849878,167.0001467424,72.6693492713,98.4312653613,-126.8019817193,-240.0120779442 +-12.8916033224,147.0174297279,-19.3691234682,-24.9637838771,-12.9704775165,-253.5925821579,57.6465357378 +11.0508769214,-8.4733965998,166.5941895505,74.1607344438,99.9181309051,-127.7988101122,-241.339135551 +-3.9974917211,-22.6932479411,71.0797341538,156.60771695,-38.1473100785,118.1475870151,179.1324152051 +-66.9419304081,-4.9782082856,90.5916659834,-2.3462648394,-129.4043404441,-68.696068879,19.0443727667 +-4.8979116074,-22.2864303884,70.9985454931,158.0466634167,-40.8570675972,117.8681502842,180.3084863242 +83.5606672223,-54.2969383566,-63.6906529315,-1.6262413086,-67.9934353963,-82.2002723411,62.8122184193 +12.9790854508,-10.0164012654,166.6902415977,74.0478938884,100.5090203764,-128.6428779944,-240.657557989 +-54.7464219811,-137.045185992,173.2195870003,40.8235235614,153.0175468698,70.9718489385,-58.7904773892 +-4.6666730661,-21.3378495986,73.1743012337,158.6864392418,-38.8853573011,118.5184983019,178.7407544474 +82.9910607,-55.5225638504,-61.6740947927,-.5004482526,-66.3421390631,-83.8111949201,59.3116804356 +83.3913665663,-54.4034280397,-63.2882500808,-.2253739318,-66.2272151731,-83.3162392793,60.1853655907 +-53.8956431561,-138.4250134105,172.0467094713,42.8113040754,152.2745384185,68.2450933208,-60.139000185 +134.750418648,1.5943522402,75.188026095,-156.6642654546,-23.8737804585,33.0879573937,-10.0166012638 +13.8141887,-8.822888688,168.1146697081,73.8956519693,100.840531646,-127.2958862145,-239.5250787029 +-54.3161376142,-138.703698143,173.7336875715,41.0759002509,153.1934780415,68.9918020748,-59.2149190503 +13.7679632903,-8.8256168867,167.1188843018,73.3568753546,100.0525117708,-128.6574577406,-240.2838282156 +-4.4436850253,-24.0827042462,70.9059723989,157.2466267327,-38.0890438049,118.3624095258,179.5884185809 +135.7973595711,2.0579805847,76.129437895,-157.4757104294,-21.8527945352,31.8981799653,-8.4875016634 +83.5028749413,-55.2462740096,-63.9276607196,.4407104838,-67.1981030236,-81.9358879972,62.3767680942 +-37.025951468,-28.4620565659,-91.0783264609,-59.5692154854,5.4884378747,6.1171090931,-75.2274929647 +-55.246669028,-138.1388006121,171.8034504925,41.5903225726,152.1708791967,68.701729637,-59.4130683598 +134.7518412682,2.3640983232,76.1482893528,-157.4432566846,-24.7651515047,33.8840588776,-7.6192661378 +-31.2756729595,125.9042314362,-27.5017740324,-5.322243647,78.4556134295,52.6141893642,7.5613572491 +-66.7335106537,-4.1074970093,88.2314343375,-2.5293502472,-129.1109221664,-68.9661874029,17.1960021195 +82.6136106393,-56.0855262531,-63.4909830446,1.8974442818,-66.3352225427,-81.5481234107,60.8540930877 +135.9391504057,1.8536547594,76.1341654411,-157.2598146486,-24.7301612995,35.6093959444,-8.1002823866 +-3.9799594259,-22.1698364866,71.7180371255,157.9419208682,-39.2161588412,119.2063475542,179.473867589 +-29.2117931157,125.385432634,-27.9206995144,-5.1586660143,82.5651410407,54.1742623806,7.7357153295 +-2.8447738102,-22.7075116857,71.5547637829,159.9608070879,-39.5938685902,117.6965482267,179.2427759561 +-72.2838503957,-64.6010876131,206.2703894808,-90.3013828201,175.2188732488,139.0828700153,-37.0588604881 +-34.3564160856,-28.3450246753,-90.6773124476,-59.6565580901,7.5846773826,4.3819573723,-74.4325648491 +11.7585843645,-9.8432312721,168.2217813381,72.1557652509,100.3718033923,-128.0415307983,-239.5626400193 +-3.8253012249,-24.1329744907,73.0549704236,157.7584294695,-39.5300000126,117.7245328358,179.2872396631 +-4.7324161531,-22.6166745142,73.3281289392,158.9903722395,-39.6467253109,119.9196131167,178.8848697973 +-29.5652455779,125.4519894022,-26.7712008484,-4.2385833868,78.5499419098,53.3695061053,9.9037365641 +-56.6039718229,-136.2025264164,170.4702979046,41.1064435705,152.9458991261,70.359104484,-58.3856334469 +-54.5786134329,-138.6222307231,172.1711824918,43.4523671719,153.0563456949,69.0731236658,-61.4505201893 +84.2330354457,-54.5618606412,-62.9161565385,-.7636751825,-65.5539441925,-81.0986616338,61.9304378344 +-13.6613001125,147.0334386564,-19.1495328206,-25.1537280885,-14.6554164613,-255.7400964065,56.8564684696 +14.36093698,-9.8748903095,168.0513121547,73.6856125934,100.0359129419,-127.0359137803,-239.8976474625 +-3.2106022035,-21.8837701981,73.0986699545,158.7910393304,-38.9975973201,118.5509794975,179.2332527909 +-74.3406590179,-61.011722478,206.3973637356,-89.3940849633,172.5482704135,138.9349633084,-37.7352425206 +83.4095072016,-54.3757875095,-62.6404974873,1.2214802666,-67.6203770592,-83.393152772,60.5880424765 +14.0863711263,-9.4838224952,167.383038566,73.205984581,99.7652957831,-126.3870003172,-241.0319782127 +-73.6305475623,-63.5016119412,206.7209980045,-88.5815335655,173.9510031824,137.8668991966,-36.6391790965 +-73.0939473407,-63.6393350924,208.1280795601,-89.9411342615,174.2266257267,136.9515290089,-36.9578822624 +-14.7216244892,146.8664688271,-19.7261409425,-28.0173934915,-14.909166857,-252.6189080483,57.3759531096 +82.5041352496,-55.7713103966,-64.3189413631,-1.5660529897,-68.3875313932,-83.252390654,61.2213579859 +-14.6471550277,147.8115363986,-21.5238153906,-26.0921942957,-13.6622459,-255.4692874505,56.6526455045 +-13.217704477,148.0600069644,-20.5702514293,-25.5698266103,-11.5443713176,-253.9492852408,57.6556318012 +-72.6942094739,-63.258402553,205.246363295,-90.2559198328,173.8998699305,136.970422517,-38.0573666731 +-1.2419784268,-21.9547921087,71.0870191797,160.694892016,-39.5188971101,117.0232974882,179.5001521883 +14.0746028149,-8.7906880617,167.1668945781,74.700508155,99.739781339,-127.4447261704,-240.2457422314 +135.4594018488,-.048371177,74.7140129878,-156.8237173652,-23.6688615886,31.7638011745,-8.579404414 +-32.4484666906,125.5529070878,-24.8814416758,-4.8634835763,81.1417346417,51.4818019193,9.0207851661 +-2.8460306984,-22.0100835205,70.6629729695,158.0026460451,-40.3032794425,117.9688962208,176.7876871003 +136.0626634396,.47800982,76.0351524756,-157.4793936356,-21.9285250531,33.9689542502,-7.251100647 +-66.2009096872,-2.5237372305,90.2281671574,-.418558734,-129.1783683565,-67.4044308418,19.0790400442 +-54.1125062026,-138.7354537869,171.4596310282,40.9364097974,154.2688648977,67.7664671214,-58.0900821183 +-67.6922893028,-2.3937761522,87.8980437314,-1.4444179709,-128.3141577296,-65.3878674912,17.2766669939 +-5.1799213741,-24.5721989848,70.8056391582,156.681101183,-41.373553154,119.6301819963,178.863777965 +-67.0816967512,-2.7706116425,89.223770138,-1.1664557369,-130.1029453965,-67.3659691531,18.0600638392 +-37.0855610004,-29.3917912402,-91.2336653085,-57.3933088484,6.0905044858,5.9272706573,-75.1387580294 +-54.765482453,-137.1879612581,171.9234546383,42.544834449,154.4091079164,67.8614317497,-57.8456240277 +14.6380622722,-9.021931627,166.9298757845,72.5871384879,100.2728373138,-125.6980787161,-238.7903658472 +14.6305775841,-7.549211855,167.3614627093,73.0345980143,101.0584622945,-128.5863032457,-240.3652639654 +-4.4697364914,-24.8231196595,72.5436363814,157.3561299008,-38.504849443,117.2039662492,179.7679650866 +-35.8096896941,-28.7468263278,-91.5102469555,-57.4488036401,6.2956077089,3.2838188031,-74.604196098 +-36.7734929896,-29.1555289066,-91.8517177573,-57.2768780261,6.9100561521,5.2680835116,-74.2518582752 +-2.0637614591,-21.0947441334,72.2889630288,156.6480995508,-37.8677042964,117.6136679597,179.1074812855 +135.6110550506,2.8269250836,75.923191056,-158.6072615335,-24.457300335,33.38807013,-9.2062799989 +136.1935304537,1.6626616339,75.5961921872,-159.160264976,-25.2039702231,31.9974594897,-8.1123729387 +14.5334753076,-7.9613965283,168.8725284357,72.6517919395,101.2791317594,-127.8459748151,-239.7674800572 +-54.5608112987,-137.1722630034,170.1415371119,41.6790011534,152.9497541989,68.2584986193,-60.2657799896 +-69.2146396454,-3.976607724,89.2168417523,-2.4557943348,-129.7197218666,-66.4184010957,19.3596218191 +-55.049162824,-139.9738516724,171.747244481,41.8051268914,152.634638448,69.6678643378,-59.3255213537 +-2.5212504447,-22.611323557,70.9884238414,157.7233304521,-38.3434917488,116.5833931402,179.436683217 +-13.5366724552,146.4560236553,-21.5594130493,-26.5928365082,-13.9070114584,-252.6820189965,56.9016675699 +83.896491119,-54.0685026884,-63.1860416217,.3597960013,-67.6875107761,-82.9461502233,61.0867406592 +-54.2726639198,-136.8223888294,172.6871246558,42.0892587656,152.4667048474,68.8746757889,-57.4407750538 +-35.2917433525,-28.605099952,-93.2843844244,-56.4934484479,6.9372149394,2.8698145659,-74.4254905891 +-13.5609801621,147.5494677977,-19.9856622804,-26.7973712127,-13.1563406295,-255.4126368265,56.7083005196 +-15.4589908314,148.5972725823,-18.3296499745,-24.5968950607,-14.9288687172,-257.0844822912,56.861365491 +-29.6392666276,126.4424762577,-26.8596824608,-5.6749342659,80.8015223221,55.2153839158,8.6569342888 +13.4916479444,-8.7313286161,167.4419145552,71.5267972534,98.0896510506,-126.6204831177,-239.2099536483 +135.9954703786,1.5853969642,75.6827342846,-158.0184311232,-23.2069968428,34.4748390099,-6.6930871839 +14.2991397675,-7.4943854933,168.2351347962,72.9684246982,100.3251933738,-128.3846532652,-240.206949383 +83.5749043364,-54.1252031311,-63.9219532611,-.0321972422,-66.7913045756,-84.8882306864,62.9182934213 +-53.0026509401,-138.2067887157,171.6384585659,39.5694595995,152.7172923973,69.0920335708,-59.6691872013 +-37.0627717065,-30.51224411,-92.7241867391,-57.667083729,5.0079166107,5.7018881905,-73.8948986424 +136.0361967791,.7194283495,74.626758876,-156.9380432249,-22.8843591864,35.0494668062,-8.9383288004 +-71.4120939421,-63.8758087252,207.1428001203,-88.7749524731,173.3632720874,139.6497152188,-35.6835021233 +-37.2211453149,-26.9218873628,-92.4346248817,-57.4738037519,6.8540926622,4.5286475312,-75.7610462264 +-3.8794966467,-23.621904464,70.9184498282,157.4667272087,-40.2694217203,119.209128235,178.9465957173 +11.5415130834,-9.391848194,168.0881784318,71.6966062832,99.8947233648,-127.2605535192,-238.1200392772 +-53.2193551032,-136.805992501,171.2872436079,42.4144059686,152.0256785491,68.704262193,-57.6423576608 +-30.8487816631,125.4195009339,-26.5801563555,-5.867454727,81.5496364197,54.9141146564,10.2802764155 +-32.1853109991,126.4814527335,-27.6697283935,-4.8838229287,80.097262175,52.8840700405,9.6971315552 +12.1421156715,-8.9913275037,167.0190172324,72.4755391652,100.3497606859,-127.6809537357,-240.2751562961 +15.2208099694,-9.8467655247,168.8663711451,73.7605243446,99.8955874172,-128.6616088734,-238.04624668 +13.8595132709,-8.0675463365,167.5265933957,72.791973196,100.0855445855,-127.7369880457,-238.9098654101 +12.6398192805,-9.7886188686,168.6078265221,72.2319159327,100.3139213665,-127.4256642619,-238.3894786916 +136.3980813752,.8781103857,74.0845012038,-156.5856950131,-23.1328568541,32.8195142856,-8.3821559342 +-53.8930349447,-139.7582776285,172.1854806385,40.7524631565,153.4767932113,68.866300914,-59.1013346355 +-4.2913559412,-21.5440593826,71.9285207263,156.2006285754,-40.5511549494,117.9068062965,177.4266139137 +-38.1025078119,-29.1531154103,-92.0506566096,-59.0766603471,6.1666719766,3.9649106448,-74.7031659237 +135.4778574449,3.0467109544,75.0783712761,-158.6478212801,-24.0291137598,33.0458323048,-9.4469713049 +-65.3754551698,-4.9037743212,89.6279906022,-2.0985035294,-128.6696549126,-66.534444071,19.6910319205 +-67.1934570474,-4.112676132,88.1828394827,-2.9676201257,-131.318798102,-67.0512306694,18.2753440789 +-35.0732903332,-30.1417393373,-91.5398666745,-57.5672933827,6.4602949463,4.5112110335,-75.367464579 +135.2566747784,2.4623438515,76.4929746222,-157.6205163101,-24.3942400236,35.8187289181,-8.7388704612 +-54.8817783749,-137.5668384954,172.038486232,43.0286162092,151.6683509071,70.0034593869,-59.5305576001 +-36.1180862308,-28.3666161522,-90.7708993609,-58.5329382702,8.2124391368,4.8732833301,-74.9741231879 +-29.664306299,125.4385080328,-27.4087004693,-4.6139499887,81.0415507346,52.4926976896,8.8652598263 +13.2660782498,-9.389800866,168.1843861527,74.3564640362,100.0608987101,-128.9884460618,-237.7219766898 +83.6421853857,-52.8162095782,-62.4359327442,2.3021599507,-67.9489502555,-82.2671690822,60.0613149681 +-3.4269702603,-23.4445379746,72.8384790775,159.2059901883,-39.1598210467,117.6064244918,181.2676906096 +-73.4380423602,-63.4541384237,207.2207727658,-90.7371036394,171.718445941,138.6736637559,-36.2453116634 +-13.7264936388,148.4582102196,-19.610874354,-26.1219567147,-13.8463779013,-254.2791791385,57.8654709058 +-15.0176045489,147.5345317464,-21.7057093797,-25.2211265116,-15.9433860504,-251.8811546874,58.5913427172 +82.3776302054,-55.7755797841,-64.9085419465,.0631950586,-67.3247841083,-83.7432852051,60.4853014104 +14.7854980352,-9.3621821505,166.0210719258,72.7790815965,100.8035720918,-127.6358570262,-239.4555621706 +-68.6893287479,-3.804210652,89.4989239244,-2.5610791363,-130.0041626794,-65.4144520358,17.7754125234 +-36.3789212979,-29.2252959261,-90.4004299756,-58.440322083,4.4530855295,3.6424486308,-74.6006515915 +-36.8278648991,-28.2344257328,-93.2852095661,-59.1271335492,6.1839943064,4.665627919,-73.3800368304 +13.0484440205,-8.5609078795,166.0630465196,73.0953149234,100.1426165579,-128.2304355496,-239.7421471725 +13.3151689279,-7.9437267028,166.5925930355,72.9408063549,98.8543900527,-127.6782144791,-239.4253462054 +83.5759794023,-56.0621293961,-62.4144504048,.0079269934,-67.3471429627,-83.205335723,61.4918352251 +-55.169669548,-137.5603904088,171.5457321054,41.4246146564,152.6431407638,68.3666916709,-59.8706177109 +-71.0090666109,-61.9037958564,207.6897932311,-91.7380300112,172.4715637573,136.673510921,-35.8191295724 +-3.9487512949,-22.8370953833,71.7419093501,159.2370087771,-37.1167205217,119.836928429,179.7749864494 +-31.2600615791,124.3734911403,-28.1766334844,-3.8150976187,79.6191676083,54.6512356972,9.1896530197 +82.1902764643,-56.2675028947,-64.2066613639,1.0582353468,-67.2802486092,-81.9136389512,59.9631076189 +-1.0835296714,-24.7662155544,72.7070358437,157.4100397263,-38.9397926816,118.4543539577,181.1683818904 +-2.2480354109,-23.0229908937,70.4913632108,159.4208799783,-38.5027973723,118.5204374178,181.4004436328 +-36.8629185294,-29.317880678,-92.7786056318,-57.5001876382,5.8790860077,4.3129073614,-74.9323511374 +83.479613837,-53.3943657229,-63.9756389055,.4090012746,-67.5034573729,-81.7290267824,61.189528673 +-29.6885069886,125.234688979,-28.1733075107,-3.6268892699,80.6444424339,53.2613924307,6.7150015019 +-14.617618762,146.6989760986,-20.2253323131,-24.6290069127,-14.1847676455,-255.2477713679,58.2801142846 +-72.7292609614,-62.2157543618,208.1810953241,-89.9161957586,171.3695482202,138.5312736493,-37.5227512543 +135.283111863,.4102896374,72.8839547199,-158.796083988,-23.6378551966,32.0744306238,-6.0697777953 +-52.8966967641,-137.7333801594,170.6680226264,42.8291424306,152.3172494785,69.6562601381,-59.5561084428 +82.7365059143,-54.7350563557,-64.8915903094,1.0067669725,-67.2122551821,-82.811765369,59.5109534728 +-13.1255396519,148.4948231313,-20.7452812243,-26.4868357611,-10.913955671,-254.7285644758,56.9542625879 +-71.3301990486,-63.0680970896,207.0780266832,-90.9449037899,173.7099523763,138.5142362053,-37.2868488505 +13.4655912478,-7.6446627201,169.0876643914,73.7425118988,99.6300688965,-127.6513758185,-240.2742487395 +-4.6935101885,-21.9763404376,72.8747836384,157.5476936293,-39.0281214871,118.2146138219,179.4203939452 +-72.8757107669,-63.4742213959,207.2023954189,-90.8934679621,173.0869859465,138.7119514207,-36.0889695073 +-72.2546183017,-61.932897426,206.7565721038,-90.1225116562,173.4367702553,139.495926009,-35.8260483718 +-29.2999682995,124.039878129,-27.3632864711,-5.4495649696,80.485061643,52.2246359818,8.4558474378 +-52.8790574667,-138.7960301263,173.0286042641,43.6823542157,152.307483698,67.6490436638,-59.7012679061 +-37.2681494967,-28.1716356901,-89.7625248958,-59.6449734589,5.5213338073,4.8617012201,-73.8178162347 +-32.2013479408,124.9301741818,-25.9582323025,-3.2587593817,80.6578181734,53.4876189175,7.6547286155 +133.9458751741,.6816245256,75.9505568334,-158.8721322537,-24.5718458862,33.7318628031,-9.6475650633 +84.1242904852,-55.0515189698,-65.4680591851,1.3332940861,-66.9758179688,-83.0450327443,59.1762986768 +-14.5928256895,148.1551357187,-22.6307623571,-26.4998299725,-13.1584376305,-254.276806331,58.2455769878 +-14.7328470446,148.2681024882,-20.7998425383,-24.2257153533,-12.8754820682,-253.8072303471,59.2347967962 +-67.659141186,-4.5320968378,89.0249026399,-3.4550561611,-130.6101322546,-65.4343719538,17.3145853491 +-72.5175516698,-61.4548125064,205.8129416331,-90.5780351126,172.9534605179,138.2930767628,-37.0142951455 +-13.0819250371,148.6116612742,-19.0307349555,-25.8964884189,-14.4394856871,-253.6962363349,57.7741235753 +135.5177063659,3.101149149,77.2566425743,-156.6375897417,-23.2345979485,34.0172821685,-8.8242205799 +-67.3309529625,-3.4360678318,89.9347507047,-2.5517428954,-130.3727932148,-66.0828832673,17.9866670897 +136.9281445983,.293675942,75.0122048551,-157.3309711881,-23.2824803531,33.061371175,-6.2278048258 +13.5333382572,-8.3525819719,166.7774827632,73.3455008427,100.9706766496,-130.4365746065,-240.9073401749 +-68.1669318136,-5.0751223333,89.3294297256,-.48657279,-130.6459933697,-66.2713425872,19.3268085127 +-36.3804118784,-27.6229598416,-91.5859918905,-56.7887457376,7.9442484759,4.1746759848,-75.8281487134 +13.5203234377,-7.8803786408,167.8161968238,73.3456311612,99.8778410301,-128.1369452311,-238.8985155892 +-4.9342530223,-22.6523482293,71.5605980598,157.3890146426,-37.710353637,118.1197953675,178.4348817938 +-53.4057722219,-136.7929587075,172.1579533757,43.5086325489,152.1630644625,69.6771764364,-58.2538046215 +83.4367888614,-55.8417200763,-61.6506881195,1.6207227706,-66.7508934709,-81.1676397916,61.9629775174 +-30.0154701718,124.4646294515,-26.1160289947,-4.845762238,81.4249318044,52.3767646074,8.6986828431 +-14.8060775988,146.3307390467,-19.8254932635,-25.6003352476,-13.6246164885,-255.168215551,56.5918076573 +-37.1298193804,-30.3791385464,-91.9190636985,-60.5977490808,8.1313363938,4.7259788813,-75.2114197358 +11.7601214089,-7.7413941571,166.8188394124,73.0069109535,99.090620567,-125.0725059283,-241.601906379 +135.3730928528,1.4662548924,75.5893287947,-157.5181636252,-24.9666072342,32.6127617246,-6.889975331 +-68.1353489371,-4.4177739169,89.2218615089,-1.1644408207,-128.9365558577,-67.2319064569,18.167547014 +-68.1009061098,-2.4082743761,89.7098627187,-2.2277652894,-131.2390614638,-66.5671254414,19.7014847889 +136.9710934032,1.0206292487,75.851021523,-157.0119022812,-24.5710083061,32.7992111073,-6.6505751596 +-13.6155185272,147.0152726134,-19.6997013722,-27.7640686494,-13.2359346697,-254.0651473003,58.467521925 +14.1560161664,-8.6170642084,166.9857264176,73.8531802582,99.2850096425,-127.7092284834,-239.1667179424 +-30.8245856138,125.350335361,-27.9384602422,-3.779863576,79.077851033,53.7115779635,9.0085221603 +-55.4954586664,-138.1870343156,171.5296527744,43.0952034821,154.3577746087,68.5204882925,-58.6650878185 +-35.6242133148,-30.0280023742,-92.0316543699,-60.5566115343,7.3772580355,4.2479008349,-75.9669209485 +-14.1644561898,146.951951413,-20.7125685821,-26.6717184661,-14.4446090108,-255.8161640638,57.1648107481 +-4.8697747273,-21.0043282634,69.8180172941,157.7844718173,-38.5381221639,119.2483466925,179.7126674068 +-52.9629125364,-138.8068425029,170.4509716717,42.9881391211,152.1455770769,67.5113066074,-58.2829916457 +-1.4821856432,-22.1805273319,72.0318301401,158.9056867,-38.4037679477,118.0981668665,179.2049722416 +-2.5882928346,-22.6044601383,70.1387124714,159.6770911377,-38.7404647369,118.3816803843,182.1040273084 +13.2546165848,-9.2618623993,165.9699215912,74.017435745,100.4392792432,-129.4441637717,-240.3762529386 +-74.9797305335,-63.6972260449,205.9786238579,-90.0040002742,175.1117261855,138.7155720066,-37.5860144609 +-4.542206054,-21.3467204042,72.349559362,159.0972910965,-40.4680525239,118.3821329492,178.9519118837 +-2.0103602397,-23.3467722059,71.3988126127,157.730357631,-38.1824780558,118.1968581269,179.0487701229 +-15.75250552,146.5112376825,-20.4392168604,-25.5539093052,-12.8596206261,-254.691833564,56.7607688588 +-14.8915191668,147.7780907674,-20.4842667766,-24.9667526986,-14.6484624859,-254.5374545077,55.5276611682 +-4.1990256292,-21.9952426718,71.8567049478,157.1190495026,-38.5083520675,118.4006173141,179.1453866102 +-54.4643181137,-138.5638786316,171.6080737305,41.8731717454,153.4645920442,68.050267323,-57.2837084025 +13.2505547728,-9.6291426927,168.2596347608,73.7740710185,99.7852149258,-125.5158116785,-240.5171009991 +136.7978483999,3.166490107,74.2552356492,-155.2986712253,-23.68347706,33.664085385,-6.3936929078 +-50.8794242764,-137.1904750174,170.4254782375,41.6712508236,154.8486283755,68.9822567548,-60.3263849306 +-30.7819057147,125.3547089328,-26.8638535569,-4.4164537522,80.0699045698,54.0886912676,8.2615792426 +82.2022814586,-54.8571894377,-64.1375664853,-.889366368,-69.2992516256,-83.5794874282,61.4833272257 +14.0422460851,-7.2674147665,166.2925536084,74.3283271844,99.9359031548,-126.6082843115,-240.6131663886 +-53.8786938897,-138.2875236871,172.6115877304,42.4945588815,152.7724682682,69.5161846424,-57.8172484681 +-30.4558201098,125.101503461,-27.6602702968,-4.8418974234,79.4540030873,54.4575895777,8.865553842 +134.2794008723,.9832257578,74.8176281638,-156.86051464,-24.5190800657,32.7794951717,-8.9702289436 +-30.7240500725,125.2188920886,-25.2195419614,-4.8407403729,78.7486838702,53.1209705146,8.5001901567 +82.149041233,-54.4463331976,-64.2428381602,.7130038328,-66.5622691976,-84.860320095,61.3950876171 +-72.8435466248,-61.1423119072,206.3214507817,-90.8162492398,172.5451759075,138.6873173473,-37.3266347361 +-2.821053212,-22.8180731385,72.2433550627,157.1075065401,-37.3364390441,118.1794418627,177.5840200681 +-15.7204810473,146.7290825719,-20.2267277805,-25.4899370937,-13.3580337952,-255.6170539556,59.1805881474 +11.8601905988,-8.6564229079,168.8189485551,72.4547618299,100.6103856612,-127.5328370677,-239.1875352899 +-69.6844336796,-2.7835049389,88.5146733673,-1.3830369372,-128.314138521,-66.938633749,20.4919218307 +136.939149322,1.7581599362,75.2638969039,-157.5137836701,-22.0219076866,33.8237635572,-8.7589633502 +-73.1092441137,-63.4250533263,206.8308413647,-90.0149113479,171.219685094,138.1545101294,-36.3418008426 +-3.3362908217,-21.8087756556,70.95981569,158.3505867013,-37.4699588056,117.8998187461,178.6214267298 +-32.3485130089,123.8106953574,-27.7939721721,-4.6736633773,78.5314338305,53.5397710706,8.748832297 +-72.6644617927,-63.8679108307,207.5628339839,-90.6844396648,174.7755140692,139.3912861353,-37.3944116937 +-53.3374815629,-137.5685229859,171.7949043873,43.2479565796,152.3365510525,69.3373436846,-59.7395135176 +135.9330590087,1.793946876,77.1038316524,-157.349517917,-25.1425650283,33.1988649018,-7.5499724315 +-72.8454250167,-63.7830638029,207.2204301615,-90.4553344934,173.9326752818,137.8431976074,-38.0082265163 +13.7146463511,-8.6363810917,166.8940304587,72.8067270886,101.1333410954,-128.886770137,-239.2707523142 +-28.6179732231,126.2375483612,-27.5204250494,-4.9468887986,78.547135087,54.2342395236,8.1514645628 +13.9301580501,-8.2599864322,166.5179924048,75.3146727527,100.08122259,-126.7000804081,-239.1163561858 +-14.6107095085,145.0470206641,-19.9030434959,-24.1976166977,-13.6584682809,-253.6783593824,57.5573991535 +82.7574838477,-55.2085796518,-64.4560220966,-.1644999553,-66.7307982918,-81.7541306597,63.1542244709 +135.0201449102,1.6139870065,75.4048900566,-156.5927177486,-22.7433081656,33.9221733709,-7.1255937356 +-28.5094240999,124.3946108189,-28.9197653038,-4.6983407158,78.9161510001,53.6569695224,9.2927847136 +136.0214808849,2.0870770318,75.278670344,-158.5799716437,-21.2777797245,33.3416306714,-6.8831199587 +-67.7085280016,-2.4155744784,89.0595490009,-2.6747035298,-130.0186583911,-66.3840277183,19.1969779732 +-54.4718204489,-138.813807101,170.0043843179,41.1096052897,152.2596316968,68.5545385814,-59.4285500732 +-12.9537306636,148.8307702125,-19.7445626379,-25.0001894311,-13.7722001182,-255.7690209899,58.7570653103 +-36.9896427088,-29.9601582952,-91.4596773676,-60.6171613605,6.4540328246,2.2363200588,-75.6452357383 +-29.1989921837,125.8531117103,-26.4284411261,-4.0895188403,80.9190278383,54.3598555686,10.0089185047 +-30.4350873206,126.0744274124,-27.463122615,-4.8093924114,79.7924426499,54.161181215,7.563239162 +-53.7629735638,-135.8434063363,172.120676856,41.6094410742,152.2247019042,66.5754614286,-59.6322614606 +84.2251767238,-55.6916350723,-62.6641254219,1.151505673,-66.5750346154,-83.5509726651,60.4637018796 +135.9366470698,.4607322421,76.2708365189,-156.7168951346,-23.7765347745,34.600756183,-8.2661270757 +-36.7807613426,-29.8892899107,-93.2875879116,-56.28063338,4.7239027024,3.4918811401,-76.0616188239 +-74.1377743002,-64.2526252569,207.2736297421,-90.6353907146,175.5946178954,138.9340991253,-36.8521780353 +-31.1306648021,123.9048262246,-28.2137101075,-4.8234785251,81.1799103688,54.0345374167,7.9817934334 +-28.4960124569,124.8913777455,-28.6107174623,-4.6035685254,78.5825687183,53.8918346291,8.2070322439 +-72.2112817061,-61.9107557146,205.081378784,-91.8777388347,173.5132555768,135.8094010908,-37.8708539808 +-66.8749246539,-3.9095845237,89.0695398732,-.5999868132,-128.8207103804,-67.0102478319,18.2566309478 +-66.7004795335,-4.8587908605,88.666875666,.3018733429,-129.3423266525,-68.2061106181,17.1278077527 +135.9375295734,1.2523810571,75.2687842921,-155.9013154561,-24.2784109237,34.1958435442,-7.7008669535 +83.4378506333,-55.6417576122,-63.0492545537,.9525105406,-67.3068469179,-83.5879228833,60.1735395038 +-3.5500562671,-21.1491883915,69.9339334117,157.932462115,-39.4252896153,118.7813125412,178.5344350533 +-31.0597475016,126.7314954816,-28.9612200969,-3.3612346355,80.770288327,56.1576756767,8.7614870846 +-66.3249102096,-3.6953047503,89.5379426629,-2.2077536189,-129.3100080317,-65.7195672116,17.6510159293 +-35.6292217165,-29.5455178064,-89.153912987,-57.3444579612,7.8762431579,4.7784879352,-74.8861920735 +-67.5250006386,-4.6114517482,88.6203895853,-3.8293682607,-129.9674772882,-64.5891095269,18.0018868159 +-66.8629058931,-4.5035625684,89.4111698786,-2.4973481943,-129.6899672324,-66.9798621248,17.6729923751 +-73.0169878498,-62.8101502191,206.2468256137,-90.0977331198,173.5196216289,138.900384753,-39.6100887337 +-53.4269458753,-138.6353804126,171.8017437226,42.3439820155,153.1133793754,68.4732459087,-59.1306606079 +-15.0588829579,148.8947263033,-21.9275961748,-25.3145274293,-11.9493259448,-254.1915400121,58.5687112294 +-66.2351143993,-2.8028729456,91.0859838367,-1.1250034556,-130.153647321,-65.892869689,18.3326029858 +-2.2936418616,-23.4857843109,71.2223891055,158.957886228,-37.2321924474,118.5905502411,177.5365568336 +-30.190291183,125.3939224405,-26.4780098761,-4.2800440911,79.852184799,54.3136120142,9.2594042616 +-66.3208841499,-2.2707204485,89.1771246037,-1.673218184,-129.7632682965,-65.2338537046,18.5124242938 +136.613440068,2.8719485582,74.055298829,-156.8754992557,-25.0096250015,32.0327794426,-9.1072588837 +-55.6357901129,-138.225068484,172.3789074724,43.7626911107,151.5643960659,70.0276357871,-58.9167821532 +-13.5959728086,147.8675955149,-20.0712383869,-26.3652158221,-12.4677682693,-254.3337145438,58.3295511689 +-69.0529410185,-5.4805377395,90.0903773327,-.2230077294,-128.5047127613,-66.5838858449,19.6941066991 +-3.8031241901,-22.9554997765,73.4847356246,159.7263088119,-36.6711084889,120.250834623,179.5765072525 +83.188616744,-54.061413145,-64.6999953743,.9052241095,-68.3633713559,-84.3683874124,62.0717265988 +-35.7349325242,-28.9477477733,-91.0401052838,-58.346112842,7.4236012481,3.9538769944,-77.2433376729 +-75.0827228648,-61.490547923,208.2949194042,-90.7002690997,173.788169811,138.1510744424,-37.8904556992 +135.3490013665,2.2428524791,75.8899991941,-155.2753489975,-24.279779074,32.3278328506,-6.2422545688 +-68.1650800073,-3.643990638,90.1223058815,-2.0930356333,-128.3154659462,-67.5858584794,18.7544442194 +-3.5015828318,-22.0511303671,72.1194584339,158.2167537083,-40.28667415,118.4402669632,178.4015900296 +-68.3284661414,-3.6207891946,88.7889694429,-2.6528385913,-129.7213406047,-67.0923287164,18.8473277523 +-1.3272755043,-23.1508198339,71.0209028843,158.4366831372,-37.9655519777,117.146075684,180.2329376547 +135.0118267803,.8643868582,75.1265599223,-156.0987835735,-24.2189831364,34.0131659528,-8.7561185077 +-4.3159615029,-22.3730557598,68.8503043007,156.2226081096,-39.4945382343,117.3438424115,179.5801076706 +-14.9859611349,149.3198463981,-20.0079227079,-26.1267630206,-12.9190935931,-255.1670540861,59.3496340661 +83.1572945043,-54.7121488937,-64.7028300408,.9649180087,-65.1928941154,-82.0777643235,62.4175548295 +-4.2062514984,-23.5217177645,71.4045071934,156.3798122953,-38.178454798,117.127275498,177.4458749702 +82.2970142525,-55.4658010347,-62.9661630003,-.5915573736,-66.3206982836,-82.0628410395,60.419160981 +-73.7900044995,-62.6200236322,205.568167571,-91.3020436726,174.6387766144,136.9475035652,-36.8175552115 +82.3947505467,-55.0077224157,-64.1179318648,1.687295586,-67.6903434227,-82.1867213925,60.1211654933 +81.3784213525,-54.3043740219,-65.1475623343,.2195629406,-68.479929388,-83.9121686383,61.7796305813 +81.0572721578,-54.6860685866,-63.3419085141,2.0219746731,-65.2415823109,-83.0571917124,60.4759814015 +-55.8781490747,-139.0069575582,171.4102986431,41.9883413192,153.0298928446,67.1256775254,-58.7983929825 +134.8704789466,1.2278722566,75.6466156774,-158.8934803397,-23.9415958617,31.643664557,-8.7889603197 +13.2662382931,-8.4586707707,168.1046426293,72.4460169466,100.1022032317,-128.6392397283,-239.7655019156 +14.1340665798,-8.8747438821,166.1743736361,72.6819730597,98.8951622854,-126.1202296931,-237.030168225 +134.4730002523,1.5487621033,75.3685814369,-158.6399508341,-23.0331946327,33.9300863629,-6.5983268338 +-11.9621725816,148.2843862461,-20.9630766622,-25.674538193,-13.913599772,-255.2723888392,57.5377396068 +-73.6494421794,-64.4942918892,206.4420515799,-89.4383409452,171.4575567346,137.9672678499,-39.4579546701 +-53.7169979735,-137.2412093808,172.5283718992,40.0985646993,154.4347830464,68.558423757,-59.912870761 +-73.9946272175,-62.3931933589,206.1294092808,-90.129957361,173.5278044642,137.6563472427,-37.057719847 +-30.5884372316,124.0469342897,-27.1254454842,-2.8897119133,79.3268463804,53.1540897032,10.1500676035 +-14.5045116788,147.4867270205,-20.0415999004,-26.2486027634,-13.8052510414,-255.8477467616,57.2253340531 +-72.9679366748,-64.2340613463,205.1167365636,-91.2297624104,173.4592799841,139.7526829533,-38.217702556 +134.1698608802,.5017641043,75.1245911639,-158.1692070634,-23.7695204906,32.9130662302,-9.3066062617 +-3.8877862927,-22.4164969265,71.54478742,158.6041665756,-38.7395461516,116.4045707266,179.5096044633 +-31.6372270703,124.6250080955,-27.525140783,-4.0959075417,81.4188626838,52.3157225273,7.1344416024 +-53.9981533314,-137.0217345201,172.05134827,39.8367985027,151.1792663822,69.1165812673,-58.2773293696 +-73.2848246919,-63.4854758614,208.6999667837,-92.7529830134,173.6392518485,137.5405915751,-36.3228192942 +134.672972512,1.749156155,75.8277922813,-159.0705161886,-23.2160904391,32.773639116,-6.9582688412 +-37.1940537238,-30.6619120086,-92.1381837869,-57.2767227521,6.2042507021,5.9983173913,-75.5685291114 +13.5730740496,-7.5317947329,168.011898401,72.8932030002,100.215821535,-125.732883003,-237.5218234074 +14.2297802525,-8.0355497504,167.1496451023,73.6622833067,100.745167579,-129.2402746636,-238.0004261926 +-35.4907530977,-30.1247617774,-90.670003111,-61.165741005,5.2375870015,4.2563684961,-74.926452717 +-53.3558197419,-137.737398561,171.1645953191,42.3379996847,151.0894005799,69.718653368,-58.2089923801 +14.5679985363,-10.5863009784,167.340747186,71.427652104,100.0082984936,-127.9407058819,-238.9607515895 +83.0004333172,-55.8534118907,-63.9580303685,1.3140316179,-67.2822192859,-83.9430135834,62.4764462358 +134.8782100384,1.963006689,74.783312672,-158.3786165099,-24.3143204243,33.409370749,-10.8114056924 +82.9507727489,-55.7776983672,-63.4922357093,1.9457548219,-66.2281905922,-83.398534785,60.8876118691 +-37.1138187154,-29.3643381511,-92.3826032142,-58.1111238269,4.7889646032,4.5706493866,-75.6167788057 +14.3160223321,-9.6785270289,166.4130541014,73.427207834,99.4527271098,-127.5141847815,-238.1974001338 +13.1719017304,-9.5344805432,167.0098959696,74.3442508593,98.4633762748,-128.0436353082,-240.7462393163 +-73.7725934806,-64.456519544,207.1480176927,-91.456415605,173.4362283912,139.3536529201,-37.0776158982 +-72.0749006414,-62.9022485431,205.5253648909,-91.0532313216,174.214758583,136.244749972,-39.8822075236 +-72.5080702544,-62.2192648904,207.1271343031,-89.1067748034,173.13221845,137.6095574655,-35.9968849406 +12.8113370474,-6.5231521259,168.1189584954,72.5412670217,98.6205713739,-129.0109547217,-238.6141077847 +13.4786697315,-9.1152402648,166.7789299753,73.8230377177,101.9609009477,-128.4477927883,-239.3686877037 +-14.8058024033,150.1902271723,-20.3040935783,-26.6308483446,-14.9306043279,-254.7887256149,59.0746238534 +83.6018829185,-54.6451319796,-63.502232179,.1557087482,-66.2891644284,-81.0677369299,60.8147362115 +-37.075247742,-27.7775009001,-90.9841363582,-55.9584975339,8.2819799162,4.9021559627,-75.761619923 +-15.1911640483,150.296435994,-20.9130531757,-27.4114903315,-13.2529947361,-253.5005140947,57.4231475412 +135.8483634091,1.8289526954,75.0403686288,-158.1557638515,-23.5814261941,33.2253729643,-9.4991935666 +15.292367374,-6.9624965786,167.3522653069,73.7585551103,98.7155362563,-128.4679509195,-239.8587383029 +-54.3041290097,-138.6019127935,169.7781700274,41.0414779344,154.6081817411,68.1586605576,-57.8655717403 +135.7417237617,3.9903400028,76.2669282724,-158.2394678608,-26.8825545495,32.610557313,-7.8613689258 +-28.0245483952,124.8302045966,-27.4621993125,-3.7922110031,79.6116359706,54.7415589061,8.5089513355 +-66.8001692774,-3.9257010778,87.9768099581,-1.4463817746,-129.8983244102,-66.4075143881,19.597559494 +15.8281881501,-9.5994169244,168.4951122228,73.7209639394,100.8947747521,-127.0177691771,-240.2219958652 +12.6007809688,-7.8551810018,168.1012912011,74.7651412918,99.1994034912,-129.109149189,-241.8597628498 +-67.2231791962,-3.4527769146,87.827249416,-.8398553036,-128.7125959899,-66.7146845353,16.7893387787 +-67.535124358,-3.8876930156,89.59025587,-1.2893433935,-129.0261262413,-66.2836444902,18.8133924699 +137.5887771685,1.5253199248,77.2565897788,-158.7204518042,-22.5376656441,35.1343782074,-8.009052466 +134.1013474312,1.9817429015,73.4464811003,-156.9775934243,-22.9825522385,32.3868964367,-7.8833365484 +-36.9163635759,-29.1578074029,-91.7362455849,-57.3028450364,6.5761098287,6.1378012996,-75.7785679583 +-35.763989633,-29.5679456934,-89.7390256003,-58.6185038307,5.1409000907,5.7938728195,-74.466168073 +136.3634757423,2.7348134819,75.9769547551,-157.4318029681,-24.5906270041,33.7013930657,-7.9389565798 +-67.9250853205,-3.1175991763,89.5629250292,-2.8807745709,-129.4604862939,-65.9788029189,17.492619911 +-54.1457772998,-137.7892903846,170.5484890583,42.7384131844,154.0446737686,71.1211429868,-60.4585046958 +136.2638534491,1.4346486379,76.650727687,-156.3174746339,-24.0196600255,34.3790406153,-8.3521474171 +84.7244895219,-55.0481712892,-63.3770304292,2.4570333292,-65.7765694281,-83.9020053608,62.1169430493 +-14.782419719,146.3627118948,-20.9189888776,-25.4739254536,-13.1464660053,-253.9162904908,58.1084366716 +134.6699013777,.9431972064,74.6647276301,-156.1662940519,-24.3114282455,33.4738228399,-9.4212831037 +13.4433058609,-9.8667162882,168.3103478554,72.8512171819,99.4611705211,-128.4146031528,-238.5718158893 +-14.029376195,146.9622087973,-19.9711384443,-27.6547134934,-14.4467031722,-254.0443464501,57.7944317099 +-36.5493703152,-28.2335226528,-90.9240222351,-59.8144848794,5.0696688224,4.2714121999,-72.8816812729 +83.873074776,-54.3384460251,-63.5127500194,1.8878199241,-66.5705469901,-81.9451271729,61.2018463695 +-54.9083148595,-136.7175756864,170.6129955765,42.9040752216,150.4320660506,68.6544220434,-59.9478511172 +-68.9423863458,-5.2529674569,90.9765899776,-1.6103713638,-130.0125947094,-66.8785755638,18.660843454 +-38.6048158771,-28.6775958198,-90.1271783891,-59.5851779751,5.6130835168,6.0742345741,-75.2802892422 +-35.1567243353,-30.5914231719,-91.6729947108,-57.8293493373,7.7267003113,4.9936393851,-75.0842184657 +136.1422587864,1.0395220257,73.9253323481,-158.0973764558,-22.0937647468,33.739777411,-8.8713286238 +-74.9304995069,-63.1282810059,206.7334714259,-90.5230721594,173.2884360476,137.331368087,-36.9833154783 +-36.6884706178,-30.0915584102,-91.3845233785,-57.6105230766,6.7133469796,5.3356305702,-73.67978511 +-31.0283038796,125.7917193868,-28.5723241155,-4.6306495478,79.3097452181,52.6364507687,8.5992670144 +83.3978409266,-54.4655468657,-63.9869196277,.4922774565,-66.9429551344,-84.1447891481,61.8152858381 +-29.26582215,125.366955425,-26.3928979329,-5.8939507658,78.8398373384,52.9922364515,10.1730084999 +-32.138744798,122.5802114842,-27.1544990292,-4.8481379824,79.0946976394,54.9787427609,7.7224576976 +-53.3888710974,-138.229003408,170.1977071651,42.4479560721,152.3567879959,69.3491037923,-59.6202850565 +-56.3792626929,-139.5126155354,171.8718583492,42.8921126684,150.9939211473,66.2303924922,-59.5570354523 +-72.7644759303,-63.137376371,206.577670831,-89.6181633062,172.12094815,138.1958980869,-37.8007861145 +-53.3252510658,-137.2354550558,171.9940929703,40.0865069734,152.328568063,68.5660066134,-59.0154978438 +82.7615326799,-55.5020325343,-65.2828923987,-.4200288833,-67.9168800642,-83.4195400846,62.1324931561 +-54.2686620206,-138.3789235252,171.7663609978,42.3269309566,150.6503362818,68.1062310888,-58.8228804274 +-73.9586499108,-62.0943279951,206.6999576636,-89.813878149,173.0506404987,138.1786827373,-36.4876514763 +136.2152548126,1.14380366,76.9563273042,-156.5284163527,-24.2264882358,33.9193963632,-9.249316494 +-67.1392787628,-3.7770639936,89.8216294619,-1.9528792763,-129.0907770151,-66.9527517631,17.5642842281 +-38.6495370296,-29.0372391544,-91.5276075134,-57.1168842897,8.2725903207,3.967463504,-73.8284166848 +14.7086087212,-9.643083328,168.2148009228,72.5605953228,99.6670328852,-126.254010205,-239.2737614798 +-30.4959663499,125.6517963105,-29.4245894084,-4.9408261183,79.827957213,52.2869664895,8.3547557774 +-67.6667687446,-3.5034435845,89.5020794554,-.6833007727,-129.6516251202,-66.2410581879,17.0762547953 +135.4977285447,1.1523758951,74.5536589666,-157.3382562432,-21.4221036678,33.7950818816,-6.5195236976 +-68.5543729846,-3.4581250719,90.1862210276,-3.462388704,-130.1517882411,-66.5831133967,16.8627331579 +14.8208249916,-9.0314849412,166.9221817601,71.9467496926,98.968503894,-127.8306369468,-240.5132593469 +-30.7035464711,127.8427960848,-27.9108303224,-4.7917673176,79.5929544187,54.3569523323,8.8053763384 +-14.6404732206,147.6884374837,-19.9609047772,-27.0623976822,-14.7308341575,-252.5506086555,57.5762364197 +-3.2405775636,-23.7347824689,68.9330604544,158.7525276697,-39.5395796561,117.7410533233,180.3731727787 +84.5941924662,-54.9415827451,-63.4877155248,.4295655506,-68.5087362105,-84.5947665313,61.0681938383 +-73.5859063491,-63.1890610181,206.9706464742,-90.2692013734,173.9686356381,138.7130416786,-36.7728354193 +-53.9990893703,-138.2243789807,170.9259018217,44.0273372,152.7704386148,67.9858657633,-58.3754039026 +12.3435787382,-8.9538715523,167.6216984447,75.6990302512,100.1041620319,-127.378796713,-240.3926344461 +-52.8010485962,-137.7254321008,171.7106759517,39.0419469325,152.2496954349,68.8138755027,-59.4944033605 +-2.4036840911,-22.1368921845,72.0991411658,157.1668287695,-38.9991555595,119.3255160377,178.7223314252 +-31.6647210219,126.7719524658,-28.5541187936,-3.201236087,79.3950086053,54.6627781187,8.921950078 +136.699070804,.7193723699,73.4265102755,-157.5090598221,-25.4495301084,33.9768240092,-8.6142131042 +81.9542155045,-54.9835677318,-64.0145987786,.3678851299,-65.9403729467,-83.3369291334,59.4591476223 +13.3697315562,-7.596901897,167.1807324706,73.2455803405,98.936761703,-127.1838475231,-240.3362521555 +-30.8476228248,125.7916940817,-29.187814477,-3.9919699753,80.7579608272,54.1725256871,9.3620917536 +-54.7564720287,-135.9546266994,171.0458779052,43.4541914261,154.0097104601,70.134376603,-58.7866095352 +-53.4647230039,-137.546287789,170.3155642792,42.1736542666,151.2494853337,69.0394773205,-57.830307241 +134.3412982708,-.5415952516,74.7456646702,-156.9958367107,-24.0903418341,32.6580627931,-7.6026462212 +135.9466464651,1.1460248682,74.6191631986,-157.7715484166,-25.1021503548,33.7189761663,-5.9270964986 +-29.3254230719,125.911779881,-29.2532286474,-5.0951924847,79.8839285935,54.0876795413,8.8020188135 +-72.630064242,-62.1119949943,206.4496809921,-90.1841830192,173.3292136308,138.46236365,-37.8706157631 +-73.52414214,-63.271324203,205.921821648,-90.8439520469,172.3241955776,139.319579684,-37.8511951056 +135.1118822345,2.0135851446,75.2621815417,-158.0876384396,-24.1963527229,33.430242797,-8.8905320973 +-15.8960623104,145.6239128321,-21.8857266117,-26.0872731148,-12.9441299788,-254.7884517323,58.2606743833 +-29.3931543677,125.5081893873,-28.5912022089,-3.0867434631,77.1319836691,54.6486406761,6.7388548949 +83.874593961,-55.8174257191,-63.922268084,1.8925309502,-68.2320982617,-81.9997587839,59.8270273876 +13.3358205419,-11.0713923824,166.4338012305,73.5750082135,98.6789782147,-127.6765539864,-239.4808366505 +-14.7956366612,146.6097542264,-20.961176452,-24.3633580406,-12.0343309286,-255.2709793481,56.9426006072 +-36.9377409714,-27.8521918229,-90.5500400308,-58.3904923366,6.445802741,3.0431872671,-74.69043308 +12.4562605687,-8.4321549717,167.30068184,72.4807828528,101.3208962872,-128.5617339051,-237.1876684667 +133.9975533168,2.4281064062,76.6540760085,-158.744773283,-23.525805776,32.7157055736,-7.3590515509 +-29.8562080023,127.7707492121,-27.5419513472,-3.9128929198,80.9797838111,54.1705949523,8.2325284161 +-68.9619133931,-3.3620164538,89.6812276786,.1678822163,-128.6741480725,-67.2249293636,17.5055388337 +134.9616175677,.7064027719,75.2266519517,-157.2715396898,-24.1381226746,34.7256149679,-7.0788149597 +-15.3671228193,144.5044903669,-21.460563651,-26.9894957048,-13.8406152436,-253.8253937794,58.4150894859 +-67.4767041112,-5.8667939063,89.538205679,-1.8081790862,-126.7861496313,-66.9839978282,18.3954376862 +-30.8974884493,125.1353603152,-27.3948681045,-3.2195138073,79.0522035108,54.5071599575,6.7452958363 +-54.2136061799,-137.5420420493,172.9925723271,41.9477547648,152.6928669029,68.1489189826,-57.8947859078 +-29.8975680431,125.4662942806,-28.0151587141,-5.4090196737,78.7748896162,55.2192312533,6.4443876283 +-29.3397838577,125.4359826135,-26.2429592014,-3.3961533853,81.1976769256,53.6747309711,8.2356431823 +12.1008033393,-8.7737714705,170.0337258162,73.5749009486,98.7540108954,-126.9753079163,-240.0434625115 +-13.8303079791,147.1997612674,-18.4581855779,-26.3086070107,-14.4976859363,-254.0348433271,58.2015673528 +-73.7696152178,-61.5795278586,207.6684315153,-90.7547663283,172.8032667264,138.4417286718,-38.2572305922 +-3.6468526898,-22.4432133379,71.5570919569,158.3631189906,-38.9395644052,119.1008157832,180.275933999 +-37.4331972565,-29.0897754596,-91.5765284453,-58.9425195135,6.3975280097,5.1037805505,-72.9627546343 +-29.0947480363,126.3835780034,-27.6768821872,-3.9995020141,78.8402580777,54.7978562507,8.7833482993 +83.8410344518,-54.1851016624,-61.8228715439,2.1203033983,-65.9856933881,-82.5740024783,61.4357236803 +-67.4652545665,-2.3131765527,88.7139645534,-2.2225356784,-129.1145961769,-66.3373819395,16.57562589 +-35.9441410778,-28.9332878425,-93.2335460267,-59.8150035991,5.8000311474,4.4829572219,-75.6195731807 +-54.1706985739,-136.414746927,170.5659162951,41.6750304642,151.0392001929,69.506555679,-58.263056612 +-71.7043803366,-63.1282178416,206.0747640496,-90.602483715,172.6504533733,138.4155449077,-37.6907189769 +-54.7166836685,-138.3636377744,170.6514369394,41.8943690583,151.5242444065,68.1729106023,-59.9074638788 +-29.0563520198,124.5795631746,-28.0210103261,-3.8974638786,79.8634907409,53.2073922451,8.3878036347 +-29.455681545,124.6103837489,-26.3132510588,-4.1055280899,80.672768253,54.7074555299,7.4443851345 +-31.0572480935,126.2123264046,-28.6387600934,-4.6239123057,79.7103817273,54.1022420103,8.0716641686 +-29.1840846778,125.4416220305,-26.5707949399,-3.5262636669,80.6582389468,53.99222713,8.4964890666 +-73.9703753474,-62.8701471834,207.5739134339,-90.6202946374,174.9634965557,138.7268173471,-38.7972049161 +-32.0341528079,123.7705859277,-27.9931386604,-4.9188114324,79.7340105644,54.6985395933,8.1067042026 +11.6681861006,-9.0077396167,167.1814542622,73.7287296903,99.8890092323,-128.0754164322,-239.5484298688 +-14.6218711423,148.2304679388,-21.6714196899,-27.6476779177,-13.8466255987,-256.7284657213,58.5033602282 +-12.7358520608,147.9248283958,-19.5864410728,-26.9529071886,-14.1597486442,-252.5925576274,58.5432796367 +-72.3362198128,-63.2122410487,207.4704366131,-91.433140775,173.4830784678,138.4009073649,-39.4319960222 +-31.3007180429,126.0350209074,-27.2271583018,-4.3468613805,80.4901099741,51.7450595417,7.6228769518 +-74.2830650744,-63.4312220855,207.4910171388,-88.1226083322,174.2051626162,136.3797210626,-39.0429487557 +13.7797713774,-9.2222013289,168.4106056866,75.0603379896,98.5157498525,-128.6081259043,-238.5055490574 +14.3471247256,-8.856069664,166.8738316365,72.8925651505,99.9242544066,-128.2266444657,-240.4499513351 +-2.8791071759,-23.6318944822,71.3530429401,157.0706376117,-38.9543551878,118.5596295969,177.1598546825 +-54.2053203782,-139.0283597876,172.3595932839,40.4028234207,152.5369234553,69.0069979012,-59.6448719766 +-3.2925167626,-24.0647909546,72.2504568408,157.4340663403,-38.218173995,119.5145484966,179.3055730058 +-53.1399442704,-136.7435601401,173.6313147932,43.694183998,151.925096805,68.7130855654,-59.0748929729 +-72.4816946624,-63.7015458656,207.8374930812,-90.3269171098,172.3531731044,137.7271581579,-37.7328178808 +-53.5158450949,-138.5753473626,171.4062893725,40.254764177,153.4997208198,68.2046855324,-58.4508580497 +-52.3179542472,-137.8518394044,172.8637114474,39.9749658501,152.9846793041,68.4674407337,-58.7694262601 +-30.6827766612,125.4645722495,-27.5725564361,-4.3143989098,80.5865339206,55.1548194168,9.5372063568 +-72.3562584473,-64.6429421838,207.3849471794,-90.4939019017,173.7829431403,137.1551279975,-38.7199768309 +-71.8365750844,-63.5296257515,207.7112425806,-89.7805306882,174.953036765,140.3031734758,-36.4831024209 +-36.655084217,-27.5810558645,-91.4410907628,-59.1250499517,6.5738908538,3.5400671125,-75.2206420177 +-34.7732472511,-28.729499108,-91.1423645619,-59.8575703371,5.5025452356,4.9022755135,-75.6714004456 +84.2870579828,-56.0917593577,-63.7515890931,1.8632388953,-67.3817791103,-84.4850346398,60.6721258827 +-29.9660737514,125.0433690454,-28.0127602261,-3.4160579603,79.2134373268,53.6377320897,8.9709860168 +-68.2304214814,-2.5849549003,89.7587703081,-3.6966875851,-129.82511012,-66.3539913289,18.4303547639 +-30.6742985906,125.3893882192,-28.0547963366,-4.0254434208,79.9891185846,53.5822513906,7.7083742803 +-30.0095364878,125.3814101022,-27.9822419029,-5.3548044882,80.2127266376,55.0956411991,9.9458539407 +136.0681433344,2.6973561353,75.394389524,-157.2732988857,-23.4016941742,34.5376964475,-9.2661863356 +13.0207774931,-9.5543901977,166.160116766,71.9754462228,99.3699945731,-126.2688873113,-241.0031970462 +136.3270086587,2.3244839737,73.2557490682,-157.8586523447,-23.0086289895,31.993519709,-7.0430210857 +-69.3339084535,-4.031741437,88.6872562222,-3.9402126857,-129.0975294984,-65.9124445164,17.9101122297 +-72.6224515894,-62.7893783199,208.816639311,-89.199437337,174.2241455164,138.2968146203,-38.2605121058 +-68.8652881495,-4.2038054848,89.5638255941,-.5782046189,-130.0833008362,-68.6115837227,17.8381526118 +-2.5220059046,-22.2313212054,71.9276879014,157.7087449837,-37.7665290328,120.0581476462,177.3181189421 +-37.1467059068,-30.6241146162,-92.2693321361,-57.3521914103,6.9804643617,3.4430608551,-75.4307744944 +83.4975038059,-55.1951485836,-62.9411335603,.0803939184,-67.4300652279,-80.5529836199,61.9526976599 +82.5118832447,-54.7058834859,-61.1516877613,.1342897139,-68.2838944007,-82.9414811071,60.6009538114 +-3.5430422856,-22.1704984143,73.5029771705,158.6903980905,-39.4715098766,117.5980521212,178.1993169871 diff --git a/src/test/ut/db4ai/data/kmeans_7_10_predict.txt b/src/test/ut/db4ai/data/kmeans_7_10_predict.txt new file mode 100644 index 000000000..ad35568f0 --- /dev/null +++ b/src/test/ut/db4ai/data/kmeans_7_10_predict.txt @@ -0,0 +1,10 @@ +-3.6109863107,-22.6172568159,71.5002217129,157.7266886541,-39.2273861487,118.2770855792,179.3113687411 +-30.4310199511,125.3571248826,-27.3845179247,-4.6329381424,79.8982470108,53.9955889249,8.5850338946 +-67.9280164729,-3.9091318434,89.3555545521,-1.764779459,-129.6556908777,-66.7980506009,18.1708778308 +-54.0872242299,-138.0433942779,171.6551860274,41.8111804,152.4956587446,68.6476331273,-58.8555970857 +-14.4232687365,147.5560796922,-20.647897492,-26.0697278875,-13.8217971891,-254.3397217052,57.741853671 +83.1050790341,-54.8248471397,-63.8031502693,.7088531695,-66.9853694378,-82.7762331387,61.2500030361 +-36.7588410158,-28.9493409119,-91.6874670815,-58.3805458798,6.4615290159,4.6743859773,-75.0726978668 +13.3918608119,-8.8100991831,167.4408406254,73.3641107293,99.7524631602,-127.7502081003,-239.6715282733 +135.5291220741,1.3850860831,75.1363695023,-157.4909178206,-23.7378253057,33.6490592628,-7.9599672792 +-73.1345967306,-62.981768127,207.0694570803,-90.3006007434,173.5545537611,138.3088860127,-37.5366639877 diff --git a/src/test/ut/db4ai/data/moons.csv b/src/test/ut/db4ai/data/moons.csv new file mode 100644 index 000000000..6d01f203e --- /dev/null +++ b/src/test/ut/db4ai/data/moons.csv @@ -0,0 +1,200 @@ + 0, 0.611058, 0.664072 + 1, 0.870532, 0.121105 + 1, 0.339841, 0.51475 + 0, 0.0160473, 0.304442 + 1, 0.918411, 0.247067 + 1, 0.955784, 0.324352 + 0, 0.662193, 0.476362 + 1, 0.634094, 0.0551296 + 0, 0.637545, 0.570553 + 1, 0.708009, 0.0278832 + 0, 0.228992, 0.848534 + 1, 0.426666, 0.206189 + 1, 0.934605, 0.271048 + 1, 0.740371, 0.104826 + 0, 0.0801694, 0.736775 + 0, 0.664083, 0.475467 + 0, 0.521793, 0.883628 + 1, 0.459354, 0.168577 + 0, 0.460578, 0.884899 + 0, 0.0952446, 0.845304 + 1, 0.969984, 0.357193 + 1, 0.625349, 0.0628603 + 0, 0.411418, 0.950217 + 1, 0.76404, 0.126598 + 0, 0.256604, 0.96748 + 1, 0.425001, 0.303937 + 1, 0.945382, 0.291013 + 1, 0.956444, 0.376948 + 1, 0.400387, 0.298003 + 0, 0.391582, 0.889785 + 0, 0.635026, 0.664683 + 0, 0.272918, 0.856374 + 1, 0.664774, 0.0700606 + 1, 0.360438, 0.467077 + 0, 0.594042, 0.754773 + 1, 0.965188, 0.47744 + 1, 0.487849, 0.211306 + 0, 0.658003, 0.40965 + 0, 0.073795, 0.665577 + 1, 0.333383, 0.565037 + 1, 0.526357, 0.093939 + 0, 0.204711, 0.867508 + 0, 0.558133, 0.778938 + 1, 0.379975, 0.390364 + 1, 0.847907, 0.145022 + 0, 0.225955, 0.925125 + 0, 0.675867, 0.465584 + 0, 0.113966, 0.744253 + 1, 0.398943, 0.285 + 1, 0.626441, 0.073145 + 0, 0.121105, 0.832534 + 1, 0.452843, 0.231713 + 1, 0.98764, 0.373061 + 0, 0.0539148, 0.384356 + 1, 0.684955, 0.0486028 + 0, 0.590545, 0.745526 + 0, 0.524613, 0.793396 + 1, 0.748135, 0.0633139 + 0, 0.427366, 0.917534 + 0, 0.279554, 0.947917 + 1, 0.460432, 0.174816 + 0, 0.0178346, 0.447998 + 1, 0.923209, 0.274897 + 0, 0.629644, 0.669498 + 1, 0.702697, 0.0683983 + 0, 0.000378676, 0.492354 + 0, 0.295366, 0.96121 + 1, 0.82105, 0.120462 + 0, 0.356514, 0.938892 + 0, 0.0436248, 0.624985 + 1, 0.560436, 0.0693472 + 0, 0.0258734, 0.594792 + 1, 0.496689, 0.0691713 + 1, 0.304005, 0.665775 + 1, 0.61069, 0.0919615 + 0, 0.039866, 0.577054 + 1, 0.363877, 0.461894 + 0, 0.646552, 0.596761 + 0, 0.543128, 0.821364 + 1, 0.582841, 0.125729 + 1, 0.98736, 0.42472 + 0, 0.340077, 0.937744 + 1, 0.553252, 0.0492268 + 1, 0.99603, 0.618496 + 1, 0.354218, 0.49465 + 0, 0.230597, 0.968554 + 0, 0.655504, 0.375041 + 0, 0.236948, 0.929958 + 1, 0.540735, 0.110312 + 1, 0.990789, 0.58695 + 0, 0.453925, 0.913991 + 0, 0.668597, 0.56107 + 1, 0.990225, 0.508189 + 0, 0.128367, 0.810164 + 1, 0.996392, 0.569794 + 1, 0.923731, 0.280673 + 1, 0.90858, 0.212665 + 1, 0.478263, 0.196756 + 0, 0.0149978, 0.496467 + 1, 0.367482, 0.396618 + 1, 0.767727, 0 + 1, 0.98563, 0.535933 + 0, 0.357172, 1 + 0, 0.68033, 0.335491 + 0, 0.0552652, 0.53451 + 1, 0.372836, 0.525341 + 0, 0.0269639, 0.412506 + 0, 0.193347, 0.92896 + 1, 0.847207, 0.139021 + 0, 0.422232, 0.889845 + 0, 0.157467, 0.833649 + 0, 0.358856, 0.966176 + 0, 0.340436, 0.994358 + 0, 0.0706009, 0.590184 + 0, 0.64651, 0.576923 + 1, 0.81031, 0.0478281 + 0, 0.542911, 0.748787 + 1, 0.975844, 0.43231 + 1, 0.534373, 0.109746 + 0, 0.58723, 0.657048 + 0, 0.62865, 0.675761 + 0, 0.116807, 0.694895 + 1, 0.336752, 0.552525 + 0, 0.337064, 0.949499 + 1, 0.326293, 0.60402 + 0, 0.0903431, 0.761584 + 0, 0.644772, 0.375003 + 1, 0.40431, 0.377206 + 1, 0.783398, 0.112471 + 1, 0.988742, 0.656447 + 0, 0.670682, 0.513458 + 0, 0.171147, 0.809187 + 0, 0.0241084, 0.63433 + 1, 0.422478, 0.257235 + 1, 0.739717, 0.103764 + 1, 0.864843, 0.241822 + 1, 0.721618, 0.106495 + 0, 0.0201201, 0.480061 + 1, 1, 0.652923 + 0, 0.634227, 0.584884 + 1, 0.379639, 0.342614 + 1, 0.342705, 0.633357 + 0, 0.49095, 0.919641 + 0, 0.231214, 0.942067 + 0, 0.204833, 0.882167 + 0, 0.675308, 0.39016 + 1, 0.4215, 0.356828 + 1, 0.783929, 0.0817289 + 0, 0.428999, 0.964569 + 1, 0.344826, 0.499974 + 1, 0.500535, 0.126854 + 1, 0.890636, 0.140425 + 0, 0.62039, 0.624712 + 0, 0.457462, 0.895657 + 1, 0.536822, 0.138598 + 0, 0.289061, 0.931962 + 1, 0.754048, 0.0643849 + 1, 0.455275, 0.241324 + 0, 0.517047, 0.921328 + 0, 0.554618, 0.817978 + 1, 0.39776, 0.378246 + 1, 0.70652, 0.0447936 + 0, 0.542096, 0.854375 + 1, 0.826479, 0.152793 + 1, 0.475503, 0.127769 + 1, 0.887947, 0.293697 + 0, 0.500609, 0.86514 + 1, 0.575215, 0.0954533 + 1, 0.852925, 0.139911 + 1, 0.650967, 0.0034987 + 0, 0.545427, 0.85482 + 0, 0, 0.365958 + 0, 0.0512313, 0.666855 + 0, 0.104578, 0.66202 + 1, 0.394258, 0.367781 + 1, 0.652097, 0.0798683 + 1, 0.943768, 0.59443 + 0, 0.130458, 0.76599 + 0, 0.108463, 0.710976 + 0, 0.541369, 0.786866 + 1, 0.82386, 0.123767 + 0, 0.0594803, 0.656242 + 1, 0.970959, 0.459512 + 1, 0.924945, 0.184422 + 1, 0.978202, 0.509941 + 0, 0.455045, 0.940488 + 0, 0.292082, 0.887179 + 1, 0.596118, 0.0381829 + 0, 0.0596729, 0.570906 + 0, 0.143212, 0.82623 + 0, 0.0457369, 0.583305 + 0, 0.484093, 0.911647 + 0, 0.608407, 0.672718 + 0, 0.650929, 0.513175 + 1, 0.341517, 0.574961 + 0, 0.173157, 0.868028 + 1, 0.956039, 0.497058 + 1, 0.403601, 0.345525 + 0, 0.386929, 0.956949 + 1, 0.87148, 0.215393 diff --git a/src/test/ut/db4ai/data/patients.txt b/src/test/ut/db4ai/data/patients.txt new file mode 100644 index 000000000..cea3d0930 --- /dev/null +++ b/src/test/ut/db4ai/data/patients.txt @@ -0,0 +1,20 @@ + 1 , 1 , 1 , 70 + 2 , 1 , 1 , 80 + 3 , 1 , 1 , 50 + 4 , 1 , 0 , 60 + 5 , 1 , 0 , 40 + 6 , 1 , 0 , 65 + 7 , 1 , 0 , 75 + 8 , 1 , 0 , 80 + 9 , 1 , 0 , 70 + 10 , 1 , 0 , 60 + 11 , 0 , 1 , 65 + 12 , 0 , 1 , 50 + 13 , 0 , 1 , 45 + 14 , 0 , 1 , 35 + 15 , 0 , 1 , 40 + 16 , 0 , 1 , 50 + 17 , 0 , 0 , 55 + 18 , 0 , 0 , 45 + 19 , 0 , 0 , 50 + 20 , 0 , 0 , 60 diff --git a/src/test/ut/db4ai/data/pca_7_1000_predict.txt b/src/test/ut/db4ai/data/pca_7_1000_predict.txt new file mode 100644 index 000000000..73c30627b --- /dev/null +++ b/src/test/ut/db4ai/data/pca_7_1000_predict.txt @@ -0,0 +1,10 @@ +-.6792801647,-.0390913184,.8935555455,-.0176477946,-1.2965569088,-.667980506,.1817087783 +.8310507903,-.5482484714,-.6380315027,.0070885317,-.6698536944,-.8277623314,.6125000304 +-.3675884102,-.2894934091,-.9168746708,-.5838054588,.0646152902,.0467438598,-.7507269787 +-.1442326874,1.4755607969,-.2064789749,-.2606972789,-.1382179719,-2.5433972171,.5774185367 +-.0361098631,-.2261725682,.7150022171,1.5772668865,-.3922738615,1.1827708558,1.7931136874 +-.7313459673,-.6298176813,2.0706945708,-.9030060074,1.7355455376,1.3830888601,-.3753666399 +-.3043101995,1.2535712488,-.2738451792,-.0463293814,.7989824701,.5399558892,.0858503389 +1.3552912207,.0138508608,.751363695,-1.5749091782,-.2373782531,.3364905926,-.0795996728 +-.5408722423,-1.3804339428,1.7165518603,.418111804,1.5249565874,.6864763313,-.5885559709 +1.3167479094,-.8329841448,.7739602974,-.8035154065,-.085430275,1.4961978952,.3977667935 diff --git a/src/test/ut/db4ai/data/pca_7_1000_train.txt b/src/test/ut/db4ai/data/pca_7_1000_train.txt new file mode 100644 index 000000000..7bf075e65 --- /dev/null +++ b/src/test/ut/db4ai/data/pca_7_1000_train.txt @@ -0,0 +1,1000 @@ +-.6792801647,-.0390913184,.8935555455,-.0176477946,-1.2965569088,-.667980506,.1817087783 +.8310507903,-.5482484714,-.6380315027,.0070885317,-.6698536944,-.8277623314,.6125000304 +-.3675884102,-.2894934091,-.9168746708,-.5838054588,.0646152902,.0467438598,-.7507269787 +-.1442326874,1.4755607969,-.2064789749,-.2606972789,-.1382179719,-2.5433972171,.5774185367 +-.0361098631,-.2261725682,.7150022171,1.5772668865,-.3922738615,1.1827708558,1.7931136874 +-.7313459673,-.6298176813,2.0706945708,-.9030060074,1.7355455376,1.3830888601,-.3753666399 +-.3043101995,1.2535712488,-.2738451792,-.0463293814,.7989824701,.5399558892,.0858503389 +1.3552912207,.0138508608,.751363695,-1.5749091782,-.2373782531,.3364905926,-.0795996728 +-.5408722423,-1.3804339428,1.7165518603,.418111804,1.5249565874,.6864763313,-.5885559709 +1.3167479094,-.8329841448,.7739602974,-.8035154065,-.085430275,1.4961978952,.3977667935 +3.3384298669,1.8009350264,1.7562786106,.6674736596,-1.0501681048,.3293705362,.4550810385 +-2.3470073205,-.6138992374,-2.1618005887,-1.0013431202,-.4622676283,-.8175610614,.1843920382 +-.394916976,-1.669557309,-.9451478436,.0035699935,1.1557394664,1.8774313541,.1121989303 +.1812132377,1.2783044503,.5710577756,-.470907729,.4633583748,2.0173250973,2.1497048659 +-.3710833188,-.7442910145,-1.2807238594,-1.3886645065,-.3824685871,1.481320789,-1.3316687728 +-1.0482820737,-.1437723691,.1808578193,2.3349195219,.3516988717,.3714113872,-.7211061728 +1.7267817649,-.3634064552,1.9021729523,.4075799412,-1.0014425027,.2859855421,-.3359918172 +.9072789557,1.3157136899,.7942941708,-.3956860311,.5727302136,-.6344451649,-.5354211097 +.7641553545,.1930349747,-.4551142077,.4890695289,-.4674535177,.0409796169,.5048103309 +2.8263634771,-.6616784781,-.6449958938,1.7656771145,1.5136797771,.3419171156,-.078138497 +-.1761742405,-2.4157288428,-.9611921829,-.0189786307,-.0415344646,-.1340057487,.1169489359 +.6945103145,-.6737233121,-.0578020594,-.1581261483,.0128326229,1.3632077831,-1.3604499394 +.9690761682,-1.0647911264,.6104715293,.3215018641,.2834497817,.71429432,-.2261191892 +.1860136462,.9920141954,-.440693883,-.694761458,-1.3211977989,.948226381,-.3405496708 +-.9570211293,1.2597875617,-1.673460545,1.1636769997,.0500304481,.3985462645,2.2238424406 +1.4289641797,-.2213857581,-.5186588653,-1.4173610367,-.7839592662,-.0804288465,-.8417310736 +1.2462014604,-.2118324439,-.5109648409,-.7769722414,.5203741536,2.0521293842,.8811624261 +-.3189352195,.3361998306,-.1874605775,-.36727471,.0660742323,.5457953872,.0755048675 +-.1372442271,-.4517632162,-1.4709190342,.6533250158,.686816083,-1.6939556714,-.7047246652 +-.7352122837,.7525794926,1.3004045739,.7711717969,1.0350692306,-.6627859603,.6455344113 +.8379194407,.7745494328,-.2911955231,.2981725774,.9927044188,-1.4900665634,1.6711020807 +-1.4608042741,-1.2607892938,-.2170232748,-1.9112964175,-.1226030907,-2.5064660785,.329341565 +-.2329715196,.3247180771,-.1075889036,.1493243311,-.647559601,-.1305828309,.6056843088 +.1414774454,.4575172113,-.6633578621,-.0186098865,1.2182134894,-2.6863665062,-1.2358119015 +-1.6317394029,1.0687050261,-.622001213,-.3571997757,-.6618425932,2.6777021719,-1.9303781057 +.3879105655,-.4121021457,.9697650612,1.6962272604,-1.2367133077,-.857917804,1.1659792159 +-.1295184881,-1.203087094,1.1034742771,-.3857937532,-.3753778458,-.3159766013,-.2088638304 +.3227855392,.1737180915,-.5468101667,-.5573836407,1.3808779352,-1.1365620367,.4007759592 +-.0221292557,1.2131972862,-.2601081548,-.1185303888,-.8157014572,.5663605772,-.6647238822 +.5382972382,.550112751,-.9228482206,1.9505620234,.3287594298,1.0501276921,.5551720875 +.0737304359,1.165436463,1.646823766,.3784011696,-.1223942637,.0988322817,-.6027204662 +.8171679797,-.4637342916,.8527949338,-.6325577371,-1.0230085366,-.9394547663,.0487497465 +.8351335159,-.1325719127,-1.5085314248,.6758417703,-.0085820772,.246362986,1.1434968694 +1.3936372233,-.5520829673,-1.4197686996,-.5850291328,1.0511089316,.114351074,.2159661027 +-.8240443773,1.1557957201,2.8379935497,.1800062559,.6920034528,-.303686193,-2.652054929 +1.0051079762,-.9058891309,-1.0061032872,-1.3151514384,.5442293763,-.9404506035,.5613647574 +.040088577,-1.4532725023,-.6214573061,-.2536260679,.3498147381,.3877939393,-.8441346821 +.452144724,-.1183306852,.1084673662,.1260115089,1.4679667006,-1.5523079909,.0326168883 +-.05604027,-2.2612931992,-1.0070393949,.2108974843,-1.0734849455,.0736541139,.1906916228 +-.5805237644,2.2869470572,.67811787,-.8228437075,-1.1318917863,-1.2607466214,1.0574204886 +2.2481908672,-.1286165124,1.0886700419,.5551227989,1.374979681,1.4068712117,-.2312543231 +.682742003,.0194111215,-.2739460473,1.3363974258,-.0126818212,.3054819298,-.574213958 +1.2604250539,-.1508571976,.2420777384,.6423110226,-1.4543175779,1.07853448,-.7675987436 +-.076691884,.8663724803,-.8482475898,-.4233043744,-.8980731075,.0719936212,.2461820679 +.2149095559,1.6442278099,.2649986008,-.0163233634,.7403567309,.5730411113,1.7500497142 +-2.2809478971,1.1352172552,-.2787630615,2.2118532614,-.5225097584,.8244507991,-2.1804899202 +-2.3409838905,.3367025834,-.8466510748,.7966237146,.1656677449,-.048602012,-1.6676072777 +-1.7236747113,-.1976404336,-.2593863631,.364618961,.1365460721,-.3252083319,.1230984045 +1.1416144957,.8487026548,1.4316878103,-.7123187898,1.5266685992,-.0957667148,-.0959517839 +-.9922205496,-.0536509939,.8165837918,-.1707477207,-.3037004798,.3868421917,-.4732651491 +.4223278881,-.0127895049,.6738290828,.53154124,1.0880684858,.4543218857,.1464495704 +-.1483864496,1.1641382614,.2646978448,-1.0108456061,-.163518277,-.2759334419,-.6189497457 +-.9195879189,-.2558126905,.0919927311,.8184108473,1.0434365407,-1.8290957272,-.543591474 +-.5552470678,.2480924526,.8812249603,-.6916994554,1.908647757,1.1955080049,1.7304244445 +-.9356002432,.3779442114,-.1401587854,-.8833278764,1.568433127,-.8115258049,2.4838598066 +-1.1965684031,.7031459899,-1.004635071,-1.8348943913,-.7214523832,-.5477627537,-1.1705587784 +2.4363273382,-.7893177412,1.0542715974,.3568532102,1.1423115919,.7324389232,-.5504675919 +-1.6332764474,-1.0331320889,.7809407127,-1.2083454784,.6193402321,-.291322698,.108888254 +.9552639137,-.0459704809,-.5670089889,-.4715455787,.1717912463,-.4764363654,-.7784230618 +-.7520415314,-.9785196855,1.1669858967,-1.1321947966,.5614582063,.3245438384,1.2820495818 +.1284626258,.9297205423,.3753561984,-.018479568,.1253778699,-.3867371308,.7730126841 +-1.2910574725,.0363277127,2.5928851908,.2107902194,-.9984522648,.774900184,-.3719342382 +-.1256225188,.3514284124,.6638020039,-.9180937827,.3497400715,-.889031628,-.0939736423 +-.4127753611,-1.2063020823,-.7505990277,.6837831592,.7565572162,-.8926698941,-.9860297156 +-.1257825621,-.5797016828,.7435455274,.9923533069,.3084355499,-1.2382379615,1.9495515835 +.5317684427,1.1256548198,-1.2656239794,-.9964928895,.4822573225,.1165889761,1.0355181818 +1.8289491575,-1.0366663416,1.4255305197,.3964136153,.143124257,-.9114007732,1.6252815933 +1.9005065621,1.8476026046,-.0885753185,.394444381,-1.0369269039,-.7177428192,-.1872100296 +2.2017995991,-.5411669915,-.9693822198,-.2930417079,-.5770073849,-.5898195652,.1674299359 +.6503852732,1.5426844166,-1.1482870169,.9642164551,.1834399946,1.1419237887,-.9416381152 +.1523363642,.0766911609,-1.0842210144,-.0884951859,.5288365752,-.4411842328,.7753529247 +.3761024784,-.0155177036,-.3219563236,-.0072353747,.3000486106,-.9072496403,-.6122999423 +.0868089196,-.3051410817,-.6619106501,.4589269885,2.2084377875,-.697584688,.3028405696 +2.5675827825,.482402818,-.029562071,-.8549176204,-.8955064133,-.8605338017,-1.1005630924 +-.2199590815,-.72438136,-.4309446558,.98014013,-1.2890868854,-.293427208,-1.074711043 +-.7910798431,.9549181813,.6604505757,1.4010305625,-.553059669,-1.3589410887,-2.1882345765 +-2.2042490102,-.5246946287,.6654568068,-.4967648745,.7936743173,-.1837586054,.9893288288 +-.7999135029,.2796514783,.6902275361,1.5567478878,-.2102806489,.067107512,.2720338268 +.0997871325,.0787705671,.0010739298,-1.8373134758,-1.6628121096,1.1297249826,.461574625 +1.1749993288,.0637990604,-1.5935951622,-.9427771866,1.4866375526,1.2963051285,-.0899262686 +1.1761377244,-1.7762017953,-.1000934394,-1.9364586253,.2558353334,-.1904977816,.7107766839 +.467652459,.7425528467,.0857527703,-.5721375333,.3330814253,.0132200546,.7616628632 +.1965105797,-1.1696328495,.7924948576,.5339844905,.8050386782,-.4750070129,1.6716268918 +-.1047541516,-1.1906925636,-.4702414899,-2.3075152742,-2.6589430317,1.8658335302,-.1442987682 +.9241615203,-.8684278458,-1.027786524,.0630971047,-.2997360504,.2360233188,1.4741281396 +-1.2546214968,-1.3770268432,1.8295243197,-.2228258122,-.1003100593,-1.304425098,.4642498737 +.051445049,-1.056617105,.86950723,-.5128935474,-.2912926391,-.6643950525,1.099712384 +-1.8503477285,-.5817490109,.6473378064,-1.667504446,.1422602046,.4896545811,1.5514889962 +-1.2497451403,-.1812283206,-.421823393,-.888571564,.5972975257,.0692543646,-.6036280227 +.8898148568,.9381381535,-1.1061718751,.8618595934,.0672542502,-1.9260273792,-1.0802264569 +.8894678807,-.46886892,-.6236422302,-.4715440785,.7212714804,-1.6664877381,.288421943 +-1.4337845981,-1.0736865879,1.7971733335,-.8756509934,-.1702706015,-1.3636177324,-.5986684865 +.742205768,-.064644699,-1.2664669893,-.6821376696,-.8573008748,1.6299784071,2.6413600483 +-1.531670213,.1536762752,1.3781079297,-.9093488993,.857922501,.2173710326,.4839929834 +-2.2428558871,.479959123,-.5549978001,.705362772,1.778968215,.44956,-1.6553426847 +-.4540764292,-.5682450971,-.2651335928,-1.0151373712,.1385179596,-.3529877387,-.7972120247 +1.2387167722,1.2608873282,-.0793779161,-.329512715,1.3059991343,-.8360951454,-.693735692 +-.141306039,-.8190435096,.8187941354,.4099602893,.0327517656,2.2343964218,-.8455727258 +-.3434167914,.2491913037,-1.3777941058,-.2687958059,.3901533976,-.4802274493,-.0706188992 +-.4794375633,.5589957255,.8539958026,.7504021397,1.4325949224,.1882030351,.1573674187 +-.5991737895,1.3067738961,-.3795095381,1.0907876162,-.621848051,.8867670308,.6095508912 +-.6310697092,.2185794591,-.115040758,-.1780427377,-1.171211393,.5925953932,-1.1380687002 +.9860860649,-1.0690764422,1.2361114314,-.5814853804,.2513504336,-1.8980182781,.8734949359 +1.2099381846,.1455826517,.2363222305,-.0431485656,.1883791166,-1.5697901033,-.5068272182 +.487998727,1.362853555,.3227399489,-1.3791330192,-.4116785367,.7920001806,1.1296297049 +-.3225891508,1.8300174619,.3151865883,.6581597763,-.03475367,-.2549587113,.2304360389 +1.2032785025,-.0376585135,.7063336861,1.3460848204,-2.0148629476,.6057218781,-.2224378133 +.7894261345,1.6411167632,-.0526901965,-.508022496,.0361333384,.5671547767,-1.5133892823 +.0029311524,.7915326671,.2073704771,-1.115995112,.1952045839,.8192476819,-.6782579198 +1.0651105798,-.5944307251,.0556153266,-.7325687353,-.0342763546,-.1818115239,-.4978854557 +.1590209918,-1.0663527889,-1.6696123501,1.475146372,.4704704573,-.2850659198,.7027963188 +.6924297499,-1.0083068906,.4183006069,-1.2821432725,-.2862410547,-.1007716115,-.8795378083 +1.1945058192,-.198365166,-1.1241202146,-.7645707882,.5447687113,-2.168136802,-.9748757113 +1.0292381011,.988795539,-.1468525888,-.5163522671,-.3696774835,-1.7321591911,-.1094867702 +.7887377101,.1320678498,.4660749099,-.1880998173,.5649138626,-.1547011623,-.6065936027 +1.0909447754,-.3807087569,-2.1462298433,-.7641254071,.1510124178,-.5277906786,.8794415279 +-.2775154059,.2512700439,.6868867918,1.550181308,1.0095455314,-.059369361,.6834047809 +.5537363753,.2023766195,-.1096785566,-.4678750411,-.0778933252,-1.7608480384,-1.6487902383 +-1.645416602,-.9498303797,-.0652929202,.3181242866,-1.2492740311,-.6785179188,-.3722828514 +-1.0338969201,.5471153896,.3256731265,1.9326616753,.9815428052,-.4268787627,-.6653389971 +.7878755222,-1.7038290791,-.7265563291,-.6339659714,.6160374127,.8533237326,-.3449586518 +-.1728896369,1.5008574672,.3543081666,-.4629858305,-1.5833705861,.2309251595,1.5306069581 +-.1547891518,.2269009527,.5691544281,.2074141762,-.9631456582,1.677877227,.4663317837 +-1.1249245456,-1.5714058961,.7348227806,1.5417717296,1.1509781165,.214164756,1.5232288683 +-.1620654342,1.4787324823,.4917731781,-1.4608353517,-.3393884763,-1.176121638,-.7836096118 +.2427921326,1.1542508679,.8868165848,.0251877079,-.7910297099,.560650455,1.1902689754 +.7597346825,-.7315177239,-1.2444876879,-.5947839469,-2.3616141063,.4169211646,1.2699640032 +.5600531449,-.8274281604,-.6718843184,-.1375381563,-.3071549158,-.4471000677,.4370906992 +-1.4058919806,-.1226095936,-.6682983299,-2.1754332268,.5581613793,.8856060844,-.2607656011 +1.6031062633,.213827093,.1823881108,-.4429741599,.345682846,1.0784833893,-.5198619015 +-1.0143698729,-1.3438356136,1.6210354255,.1544080951,-.3569038317,-.0805249629,.4899656232 +1.0360447902,.6021776433,-.6995356252,1.3120596581,.216191416,-.6162396558,-.9566185584 +-.761312275,.1049211913,.1433693723,-.7962996773,-.3484718017,1.383598565,-.3954653074 +-.2291296532,.1560397312,.0481579062,.4919065053,-.6271391693,1.6911311084,1.6349553996 +-.3720945745,.4043555193,.5290457074,.8792437039,-.2485175261,-1.6054968773,.7504240716 +-.9881635544,-.826191826,-.1667875846,.5499120204,.817745674,.0066815359,-.2547218877 +-.4102366687,-1.2029532657,-1.29291975,1.9894457124,.6691716001,-.6989089013,.5668479994 +-.5877684865,-.0950452149,-2.2435788359,-.8528454657,.1038565984,-.8087852525,-.1373285266 +1.1037723343,-.8257107145,-1.1730019156,-.8445785098,.1837987788,-.460496161,-.6115156126 +-.481762196,1.4017498029,.8352111887,-.0630473955,-.3479076704,-.6418018112,.0369501422 +-.1869306365,1.5593549739,-1.3184253465,-.3262277512,-.4737013758,1.8171928624,1.5996680787 +-.5957063106,.8415188537,1.3787458974,-.0784414065,-.3905687496,.1809271194,.7340461659 +.690939962,.6323216994,-.1024770277,-.9362873913,-.1217427383,-.0185039525,-.2012405907 +.7048372767,.4563549288,-1.5283051361,.9249241554,.9430948878,.0833660656,-1.3815390521 +-.1674500921,1.621290466,-1.2101232644,.671702745,.5334224429,.5800381187,2.6678621742 +-.2370635344,.2651412054,.7667513294,-.3282561744,1.3402249315,-.7878078785,.5835663886 +1.607132323,1.6384113949,-.1784299483,.091561275,-.1075774187,1.5641968963,.341546463 +.3089907195,1.8329369642,.878735388,-.7863721422,1.4673930379,1.966556003,1.3597945448 +1.1278471955,-.0165692345,-1.378744594,.3183976844,-.2426335324,.3905362127,1.4266816632 +.5075683533,.7190112039,-.2389789098,-.7963976883,.0428816952,.8406956457,-.9821473202 +1.1401671665,.2690086845,.2214338228,.5230632754,-.6964520275,1.3995677063,-.3508060903 +-.9372716766,-.2946736415,.2082710421,1.1865748401,-.4276099584,-1.8135331219,-.332725219 +-.6423824143,-.9125202069,-.9515496362,-.3633695822,.7661196147,-.7558672117,.2707731253 +1.4496621952,.6116722198,-.0561892521,.5905677231,.877347051,-.5161635072,.3347328763 +1.1679557231,1.2029058851,-.6410202245,1.3881441776,.3812018399,-1.336099651,-.3773487036 +1.6929020736,1.1062588978,1.7304292846,.6397760034,-.4979564433,.9051809119,.161725155 +-.9158352089,-.6604385252,-.6810631636,.4138771136,-.9532481915,-.813415556,2.05895464 +-1.0926970525,.3264659873,.2665338336,.6807127024,1.0181587881,.2332180019,-.1294325874 +.881349338,1.825165479,-.7284612251,-.068417133,-.1585195208,.7307096538,-.421319116 +-.4004496684,.2883426487,-.5665851092,-.8880591324,-.0656497269,-.2942781156,.6764499215 +1.5775277611,-.8403300596,2.0417385863,2.3298418352,-1.1450352859,.3995577712,.7789424244 +-1.3513860442,.5897804288,.3960617357,.8903275674,.1639500967,-1.658385722,-.8013032709 +-.4001349235,1.7838232304,-.5758247051,1.3471868834,1.3325551384,.2138414482,.8048115826 +.5970635104,.4730640116,.5791961526,-.7869634365,-.717102337,.7151673335,-.1842107411 +-.2073324642,-.5086420735,-.1336930432,.6003386383,.7191350201,-.433855856,-.0033308168 +.1381871838,.298053044,-.8394906541,-.5002166385,-.2051889112,-1.8358295087,.2980150762 +-.6473821567,-.9600351652,.0949214162,.3300027895,.8677737254,.0434536245,1.590244151 +-.0924541791,-.5835779517,.4473115758,.9514257402,-.4112619376,.6323798382,-.4978689474 +.2612477284,.4056882588,.1465249034,1.0814786863,.0040657576,.5569924129,-1.0946230355 +-1.7564172067,1.1256269045,-.8408811848,.3817425218,1.3415523568,-.1405831481,2.3210439999 +.4513123617,-1.9576620629,.1826511269,-.0433996272,2.8695412465,-.1859472273,.2245598554 +-.5995342809,-1.1799120623,-2.0816181091,.322046525,.7273926179,-.69842555,1.0807313677 +.2357271702,1.5153556912,-1.4575108207,.3203614881,1.3415331482,1.4101831096,-.8942108369 +1.053091819,-.0004526803,-.2860146789,1.1647926457,.8349804973,-.212197231,.0857531171 +1.2275369394,-.9496590171,-.6886788861,2.0666528019,.3133642252,-1.4080600173,-1.0430700781 +.879920403,.9954058629,-1.2138462109,.3769303103,.5102922392,-.1972960129,.178819366 +-1.140020745,-.3601902706,.7178508817,-.0211565213,-.2298723905,-.0457393757,-.8296646724 +.3928921149,.0214388278,.234701318,.4754360655,.6295646365,.5144061107,.6425146391 +-.6523005169,.1327944465,.4435945575,.1578279574,.503505354,1.7122071674,1.760375116 +-1.2866231725,-.0674758806,-.1387127998,-.6910148758,-.0640309888,.3796495052,1.1887439883 +.8463197218,1.1385202008,-.1317844141,.5983237221,-.4472545187,-.5679185523,-.1108139916 +.3241063418,1.9379338958,2.0100623034,-.227501153,.3177081989,.9385050808,.751857674 +1.3491539395,.0573070735,-1.2105596378,-.7356951097,.6458065976,.2846490784,-1.2776252443 +.4030158343,-.7023199049,-.7351649668,-2.0645888017,-.3117864105,2.208941074,-.1689910149 +-.3024050085,1.324176943,.403215756,-1.9319081261,-.1694192423,.4440592719,.2594769331 +.7345594256,-.2035442886,-1.1727150694,-1.2028406668,-1.6631072242,-.2531800686,.1044662481 +1.7271067858,1.3853946129,.8726126054,1.3462207249,.4773225213,-.606380241,.9081622134 +2.2571865387,2.2064316301,-.0104997386,-.2096960831,-.683070305,-.2338405945,-.961172095 +.4627619064,1.5959552907,-.6415899986,-.4577562195,.5410947008,.4606686614,-1.5952519408 +-.2389153407,-1.16599049,-.0261248265,1.278206669,-.990302492,.5267080136,1.1559306819 +.268875287,-.6229649945,-.3306519122,-1.6902767021,-.9544413769,1.3636786471,-.8562924817 +.6891767168,2.1482095156,.4980530064,-.351916704,-.027045912,.4198331389,1.0663469708 +2.5525613031,-.9946424778,.2724360502,-.3337240704,.9860359652,.2636065298,1.5201540897 +1.144031847,-.1828604213,-.2639436584,.1281136541,-.1705648631,.5255151851,.3395697185 +-.1999498308,-.0535677883,.1240717373,1.2716472351,.8270113119,-.0350608846,-.7701751084 +1.3766714931,-.9597374268,-.3674283422,-1.7611190297,-.1735402264,1.03666363,.5839569052 +.2194884714,1.493557365,-.2960055512,-.9099240708,-.3629675134,.4140228825,1.0261001424 +-.6263565117,.4510067715,.8306664755,-1.697609245,-.4960973634,.2149372042,-1.3081446729 +.6101307073,.9362589174,1.4630451758,.6835881858,.1460786522,.2721657086,.5999367759 +1.1819789487,-1.2669122181,.0515611763,1.1543857257,-.3964096725,-1.7088015011,-.5778771534 +-.9560378011,.3785139421,-.4396878909,.0041506632,.4231002402,-2.0840869563,.1450845811 +1.0192114511,-.2266718301,-1.6649089157,.6244409166,.009551469,-.2687996056,-2.0737043593 +-.8718821289,2.6717485195,-.2308363308,-1.0343518092,2.3841552301,1.2262660742,-1.6487403652 +.4709003682,-1.2372822564,1.3886998645,-.7009261761,-.3617735249,-.4291025843,.2418321891 +1.0701102076,.8282396854,1.2763823151,.8353324671,-.3005125443,.3421908455,.0896510513 +.2927618925,.359300274,-.1837693583,-.216575713,.0424143034,-1.3685560094,.5652828021 +.7033275992,-.3285518986,.6253603246,1.2419403319,.5139032993,.7911316852,1.3070494886 +-.1543062851,-.9528512276,.31091456,1.2369016524,.7571788456,-.6223016463,-.3623911669 +-1.2164119233,-.0760625444,.0502443865,.4768795066,1.4539898685,-.2307186127,-.5996370591 +-.5931957893,.1189636538,2.6514625081,-.5745634557,-1.298524963,-.1652479684,-.6490492246 +-.7300710207,-.159567434,.0315072963,-.6428814416,.1052917661,.5258932021,-.1251729198 +-1.2498816607,1.1700202494,-2.3941842857,.2608106002,.4633911327,.3807964809,-.150081142 +-1.756121315,-.9443232586,.5230580507,.9390277022,.9950478021,.4047371044,-1.0829041266 +.1789533633,-.2237558404,.544645689,-2.7874639317,.4767059026,-.7595551653,-1.7921715787 +.3977959073,-.4214268699,-.1245104502,-.2681426857,-.2127335858,.8403451415,1.1267650582 +-.2802158828,1.1930570261,.8764407138,.2487561234,1.4600747675,2.1890969434,.6911944812 +-.7385865965,-.6672166365,-.7090874691,-.7080786557,.249833498,-1.731640812,.0668768607 +-1.0228348026,.6061724717,.7562523296,.9417017691,.1047401935,.1543815902,1.1248180684 +-1.7266576816,.5204731178,-1.344412065,-.489290229,-1.4945599502,-1.1359354996,.5296275453 +1.1279564116,.2629864985,.8869937308,-1.4725283521,1.4314252453,1.6775715049,.6804347984 +2.1217788192,1.5097625827,.7706473126,-.0274393649,-.947825018,-.1557891901,.2803529651 +-.1140183341,-.6977167107,2.1290554766,-1.2093014221,.6432303746,-1.0349617814,-1.9383226005 +1.6194104878,-.2233241495,.4261198401,1.7481801596,1.2088000097,-1.1257722221,.8669400132 +.0522154703,.112698246,-.8996797715,.2560648391,1.7924753224,.6984688152,1.1675517934 +.5779697266,.2814537957,-.1136125389,.0020156509,-.562425977,-.7563068753,-1.3379926006 +-2.1108687296,-.8177276244,-.1983140073,1.6516456157,.1494798967,-.3831054456,1.5576889746 +-.6573040141,.0778514626,1.879879952,-.2789797514,.5916535736,.0477829195,.8640795325 +-.3475951864,-.3837325121,-.6528718273,-.8733531248,.254571146,1.022102479,1.9042214348 +.7695149269,-.9925785794,-.1191178147,1.1836777806,-1.246728824,.7764743548,-1.4229756484 +-1.2507530636,-.4032853356,-.1714139555,.0445253809,-.2481800949,-.1912423794,.8927486407 +-.7676349809,.5489720324,-1.3587907782,.4962153515,-.482630881,.2208007329,-.0836122764 +1.1200976898,-.8667879327,1.1390248475,.4426525034,.4103348224,-.7747395264,-.7863011564 +1.7781995659,-.0286709811,-1.233952548,.5854362594,1.4567559338,-.1260598185,2.1222577377 +-1.3156969579,.0247847212,.1802989034,-2.5858694252,.063514605,1.2160018169,.673665649 +-1.4116523007,.0058681804,1.9500651002,-.600387758,-1.2376076997,-1.4818836442,.1694340146 +.1126498055,-2.139645749,.0403707353,.71976969,-1.2312934912,.2578430531,.4204046208 +-1.6139369471,-1.0974849938,.3382532194,-.7038775781,-.2846887556,.2282796388,.3853037555 +1.034261937,3.0260370622,.1523371157,.3003384777,.7867732364,-1.0729709592,-.6994234012 +-.3435463542,-.6771853946,-1.4797421293,-1.1288820528,-.9315106265,-.6433069459,.8824901201 +1.8891980736,.2510429096,-1.7455440945,-1.1244628237,-1.8844853767,.0068145501,1.5055105989 +1.9931333449,-2.1129466843,-.2664328781,-.1749040418,1.7766417751,.3051215643,.9860919007 +-.0318201262,.0682350804,.2356822267,-1.5634400072,-.755128984,1.0914489643,-.5490921714 +.8376653894,1.0396118464,-.315986327,.2930542424,1.3121354631,.6733861727,-1.1108823168 +-.9027975755,-.0323422981,-.334416216,-1.5982195375,-2.3138821878,-.8032542895,.2333241897 +.3651984471,1.1272573512,-.5837895644,-.3573703223,.7600008929,-1.3172408295,-.2248026286 +.2009601317,.4321674061,-.8044847115,.2235448815,-1.2554576518,-.5055062956,-.4163492392 +1.4891134321,-.1167356054,.3154347445,-.279287619,-1.5233667727,-1.8185333926,-.1818091978 +.4555881882,.527908783,.1124973379,-2.3350944781,-1.0080659585,.5759607976,1.5622153833 +2.6760025315,.6961390246,1.3960224546,-.4260470281,.6969533594,.2254314129,-.2495056237 +1.1796098716,.0968713311,-.1403289705,-.2449012175,-.3172915913,-2.1218337997,-.9706186819 +.1638998197,.1389572879,-.0493648104,-1.1377891864,.7333173644,-.114702474,-1.7493725167 +1.1656884671,-1.3514463983,-1.1083285239,-2.4831674929,-.1892310878,.021578375,.2977491784 +.8576592191,-.0143111822,-.5943401609,.5635117463,.2613121022,.240243901,.0440015282 +.4208198557,.3150284248,-.0466499065,-.1462670496,-.2948018115,.8440310338,-.9291125071 +-2.0478068762,.1387785531,.4612417552,1.3131215036,1.7437871269,-.2809585737,-.7740216346 +-.7103284874,-.182875276,-.3147815954,.9784424164,-.704973985,.5895117462,-1.1288375427 +-1.7375005793,-.834102928,-.6733662012,.6189765524,-.2163088028,.6895885372,-.6737516429 +-.8756908726,-.2801988637,-1.6403629341,-.9004447554,-.8505172528,-.3903423227,.390114058 +.7914120849,.7563444513,.6171086477,-.3490571682,-.7021413384,-.1699170846,-.1632623769 +.3745348029,1.4304814167,-.1724886362,-.299851895,-.5180879351,1.0472063563,-.060474363 +-1.1508635296,-.1587205921,-.2114485092,-.3409680396,1.0449964911,-.5606959947,-1.7908554138 +2.0888616529,.0694257481,.0594976989,-.1911107196,-1.6086095496,.1551862717,.9386441013 +-.2910275504,.468685461,.0914838749,2.1410420807,-.6020087239,.1534123503,1.7413461255 +.3044281676,.4490596302,1.1626527821,.5126270971,-.6350076214,-.6169196333,-.6619605595 +-.6009437844,-.946463257,-.5157910938,-2.2749061593,-1.4021619554,-.4761575154,-.0286450501 +-.7274488287,-.9507326444,-1.1053916771,-.6456581109,-.3394146705,-.9670520664,-.7647016256 +-.8080647815,-.640953895,.8369872691,-1.3004105432,.6646711541,.7133920991,-.830842055 +-.2005930879,-1.4119502728,-1.0362964413,2.7928682375,-1.6986636758,-.1990060641,.2550526216 +-1.2415255806,-.6851208699,.4126688979,.028847765,.5300580952,.137290182,-.8043842051 +.7359554178,.6397454772,1.9802787254,1.4114502288,.9996760496,.2022306604,.1857206442 +-1.5594289888,-1.320105467,-1.1224335403,-.4502039462,-.0537677479,-.1474186204,-1.6664128066 +-.1046457169,-1.0285647511,-.1548800992,.6051784484,-.2968498481,-1.1667804447,1.2264431998 +.3924247719,-.3703014439,.862016709,-.6284592511,-.4446957901,2.2232495188,.7026946238 +.3108860271,-1.4508690605,.1415883608,.738055238,2.9819107585,-.440591897,1.5126906141 +-.1214622382,-.5094796249,.9600748468,1.563760524,-1.7896218919,-1.3647887841,.2850740422 +-.2453554026,-1.5760879654,1.6824999289,-.1769515778,1.7353391221,-1.6025621016,.659067562 +.1589254243,1.5312993461,1.4418918437,-1.6386617514,1.349735139,-.664335618,2.0750169134 +-.0257149236,.7059875026,-1.1043416707,1.3834020343,-.804692832,2.644258011,.9623422869 +-.9148025697,-1.442655755,-.4035110946,.3493821773,-.2948791714,.8625941875,-1.2868954171 +.5371063516,2.0086375615,1.3672175252,1.5933067812,-.9635808178,.5090640565,-1.1886880679 +-.7977694706,.9619123655,-.337708938,-.8188725111,-1.4523685551,.6272144713,.2452535934 +-.4914683948,-1.2606791134,.3121672247,1.1885911123,.650146895,1.228109728,-.3959099484 +-.7249067861,-.266817551,1.3098873073,-1.5665746585,.080138195,.4223048531,-.0781362893 +.767995742,.4864011146,.29040025,1.1789667545,.4148224477,.8311059658,-.0481566666 +-.5600994958,.3070311932,-.9408704953,.0577405026,-.0429363012,.7996439386,-1.8717049865 +.3327715992,-.8169104726,.7538957157,.2436573711,-.3214774801,-.8116897446,-1.0764635322 +-.4091029477,.7738482874,-1.3648975675,.5955421325,-1.3026039186,1.9383452883,-1.3026618342 +-1.8239728915,-.7306158355,-1.0379644315,.2892279753,.1768835285,2.0702361472,.9560721961 +.4698253024,.6996440086,-.1188029917,-.7410504118,.1940648621,-2.1119975477,1.6682903852 +.84543676,-1.3251878078,-.6147695206,.9079812929,1.3930226614,.4892930497,-1.6293809297 +.2862875322,.4214191,.5149001885,-.9342271014,.7581542646,-.5400061406,-1.0646374454 +.3317098273,-1.0168729366,2.1524621498,.911869601,.2344759668,1.6085933471,.7129744813 +1.2641227305,-1.9581783444,-.1680809467,.837547679,.8111780773,.4760411362,-.0319953103 +-.3685731197,.0897907839,-1.0884400401,.297913803,-.2268857444,-.0355322303,-1.7390495633 +-.7004359505,-.5248387064,.221367791,-.8251276362,-2.9730099544,-.720835334,-.9212711821 +-1.1925140353,-.4718536189,.0156697383,-1.3642168168,-.0398020821,-1.0290975779,.3651634977 +.4968038845,.1797151601,.3009180904,-.5531444214,.6962050094,1.7084962088,-.4352668245 +.0835377099,.7634339947,-.896845105,.19637094,-1.3780019182,-1.5921542737,.8217235627 +-.4866919567,-.1667088123,-.8521250549,-.6050896124,-.9028892639,-.2604570892,-.7420342874 +-.509308481,.7777052218,1.9249421857,-1.2644275791,-.9401952085,.1873152428,1.2548816321 +-.6743562407,-.1404345477,.1109386362,-.5619736337,-.0640010061,.4293945732,2.1099432325 +-.3549776997,-.4149972392,-.6951361327,.269422053,-1.6725644127,-.1037365907,-.5440809389 +.615688259,.4101880422,2.631858951,-2.0148279972,.5351231719,.1894456809,.3836895634 +.2794781466,.3311146757,.4556812334,.5383057303,-.3664183953,.4388450198,.0856931306 +-1.4874059009,.2845929033,1.2128188695,1.3166270055,-.3375834449,-2.2702518161,-.9342801665 +1.1069458317,-.9834797716,.1593043665,.8722982177,1.0341578312,-.7391598452,-1.4975263273 +-.8215318214,-.3137736838,-.4157337112,-.3550056288,-1.4105580469,-.4873403238,1.2806897041 +-2.7557685612,.217878657,-1.3833694815,.2831543088,.3251909609,.3693554463,-.2033784557 +.9491513217,.2025145841,.1772201259,.9317422398,-.165921307,-1.3905671743,.4685017688 +.1805192653,1.2995566047,-1.0013878053,-.3067783316,-.9981350932,-1.1972915619,1.7077417417 +-2.255199785,.814489637,1.6385674777,1.4248899038,.5258650632,-.9927153853,1.3405574992 +.070370398,-1.1422174983,.302943703,.7700228033,.2518179637,.6612445929,1.3929127568 +-.3709783646,-1.4297976345,-.2315966171,-2.217203201,1.6698073779,.051592904,-.138721869 +-1.2636705968,-.095972366,-1.5219004385,-.2509005784,.0105861474,-.5837444656,.9801384092 +1.134627701,-1.0786614623,-.3441872885,-2.1760656545,.9157290196,-.4264851425,-.8942230817 +.486764606,.2114724987,.1970597507,.3447769992,-.4947558807,.2318372128,-1.1287503174 +-1.8906960139,-.0878982424,.1598595681,1.2636615902,1.8110613049,-.7069224733,1.244281182 +-.4623042991,2.0274535491,-.7471578002,.9067421279,.3925636463,-.1457384461,-.6883483596 +.3652932728,1.4467058581,-.1812878198,.546418886,-.5539067177,-1.0589371225,.0159691695 +-.435212708,-1.7125710967,-.4507167054,1.1038231277,-.2572783138,1.323931414,-.4958312445 +.9948513828,-.6186047815,1.9484414811,-.2379579508,-1.3206289252,1.1194868421,.6065297938 +-.6975510173,-.0655495922,1.7910572249,.1789020243,.5461069977,-1.883550288,-.6558739043 +-.0900792911,.8942691053,-.4520411181,-.6161927365,1.6170364283,-.3656480638,-.2646823585 +-.0653709139,-.320024479,.6554928709,.0456022809,-.8656243,1.0958020214,-.2406242768 +.1473021435,1.4379013871,-1.0923876571,-.5179286752,-.2910901322,.1472415096,-.7480434689 +-.0146519738,-.2061879947,-.1642506759,1.1036678537,.4485271363,.5936975343,.8208395916 +.7948971059,1.8732516734,-1.9927578483,-.6651822576,-1.2443414905,.3643820676,.1295218109 +.3973446641,.5225167851,-.9542073625,-2.369609378,-.3625977844,-.2527810143,.9470924442 +-1.2158554345,-.249404056,.5698915287,.7699868945,-1.264224292,1.0173236504,-.143815357 +-.3164067262,1.1718400118,.7033307232,2.422048346,1.8204509003,.2277699853,-.6889220562 +-1.1501611023,1.5595256142,.4503562862,-.2230563708,-1.3368108083,.5363556478,1.4319878643 +-.0144051173,-.0047663975,-1.9413139961,-.382127095,-.0268200035,.991130506,-.0034940474 +1.1151764216,-.2422482038,-.3532349795,.8751438162,-1.5800810554,1.7390328884,-1.2278100129 +-1.0301259872,-1.9731452806,-.6378762461,.2781370299,-.425119978,1.492937612,1.2555335062 +.5713050452,.9529213296,1.1154262671,-.3597073518,-.7982599306,.2677311392,-.2515323906 +-1.4929898812,-.1072992978,2.2700033561,-.0944134278,.390845286,1.3260041833,.0060547754 +1.4670976633,.3442409599,-1.5969173429,1.8870974319,.4756859235,-1.8045714115,.6472072777 +-1.8459748613,.2717450921,1.5602886923,-1.2046320953,-.8484454991,1.3998485967,-.2075913754 +-2.3977659597,.5167647777,-1.6814312255,.9669529087,.4809037639,-1.8466583979,-.9408893557 +-.2041668832,-.9283712915,.1586515917,-.2047209236,-1.6511528501,-1.3291278917,1.8023013585 +.1487024515,-.5061413835,2.1101900218,1.5709109987,.553171586,-1.0767417224,-.1198745766 +-.4147339175,1.4467055796,.2544513277,-.5377412036,2.0058030057,1.2175933804,-.0945320726 +-.0878004978,.606211749,1.4816746113,-.1493137811,-1.1581875431,1.4137647561,-.8265667847 +.3799197179,-.2759550142,1.2870371058,-.0597762031,-2.0084434863,-1.0319373465,.4720462753 +.1037567987,1.3682850474,.2463763187,-.7445040719,.1123618379,-1.1343188649,-.1479441509 +-.1788999556,1.097149089,1.1374270506,-.0099464568,-.0157262749,-1.6311987102,.3822647868 +1.1160810104,-.4388690647,-.2101674981,.7465134798,-3.0289458852,.7856745305,.6592209198 +-.6149807137,.3743095321,1.3863538836,-.070728249,1.0450440135,1.9660617095,1.1328456635 +2.4024249301,.6043162366,1.0101546339,-1.2760122103,1.1231483668,-.292428605,.6401330177 +2.1614818136,.7113648897,.7084312342,-.602823908,-.0690245301,-1.5568997124,1.5745763961 +-1.0484929903,1.5714213889,-.5517749737,.0334705681,-.5933702159,.5525816438,-.0576799309 +1.713680411,-.1306427228,-.8952278306,-.5444807079,-1.3115843248,.5735369888,1.8728214642 +.908351186,-2.3163829599,.5041418019,1.879293759,.09101015,.775131463,-.3290592549 +-.1355020054,1.0728326291,1.1599053565,-1.2299936593,-.385970445,-.1887162997,-.2208837682 +-.5935322607,-.617800748,-.0046877742,1.0636757532,.650813341,.8223593995,1.5828697479 +1.268087918,-1.1754208655,1.0174639705,-2.7851951252,-1.2239420143,-.4180174812,.1462451498 +.640754785,.5827247597,.9165677205,-.1523923904,1.7509101209,.1988973528,.0985746789 +1.8408975542,-.0521965936,.1167969227,.0093767288,-1.1767036866,.6261942147,1.6948028825 +.1229738723,1.0703046968,.5213700251,.0731634675,.3594467828,.4894178693,.2864502627 +1.4725074404,-.7933055237,-.400462417,1.5607504892,.1647441849,-.9375857625,.8832459877 +-.3267199847,-.4424503283,.453801773,.9872370314,-.3710245301,1.25288468,-.0660601626 +-.2551089415,.0024031274,-.2293985301,-1.0960888784,1.4269323899,1.863634139,-.2698389145 +-.1040775136,-.3685397661,-1.0911385503,.8803582416,-.5824430081,-.361478616,.1403467294 +1.6855506826,-1.1923984254,.147600407,.8132524971,-.0012340696,-.1631749438,-.2947667122 +.0589294279,1.1153044902,.2351958572,1.0127267785,.1791057602,2.5589331637,-.4477919752 +1.5961570144,.1762864507,-1.0642399009,-.9024393048,.8473899226,-.7833450755,-.8994572018 +-.0690238833,.7149151791,-1.5977424846,-.7465876694,-.2775347095,-.0087580584,1.6926610364 +1.6021166805,-1.64208226,.0144723707,.5511965425,1.2651712954,.3192534078,-.0115205989 +.199666484,-.4531634275,1.8881201942,-.0730184077,.1456817631,.2642796959,.2973548913 +1.7947720919,-2.4330145535,.4206087672,.0555282405,-.4283618416,.472512865,.0670555964 +.4417389791,.7285812581,.039641386,-.7403424496,-.524878715,.0718627108,-.8275014268 +-.4773997299,-.3619485772,1.2416964016,-1.6947354678,-1.272857076,.6748683145,-1.1209351601 +-.2671104522,.487284346,.6091406205,-1.1886696055,-.9730911412,1.4427231158,-.1547950979 +-.1575225601,-.208466491,-.0487785035,1.0777008434,.1145808129,1.4634153222,-.7058700915 +.6453056145,-.9362906796,1.422610813,-.2519743945,.6519280562,.5625657641,1.3934588151 +-1.53010059,1.1004373439,-.0756898396,-1.0203376023,1.1905105373,.0699757473,1.0251453402 +-.0219203268,-.9399489988,-1.6001208301,2.0999124998,-1.7376263135,-1.1825048372,-.9889209571 +.5687097012,-.2083108546,2.2481597547,1.4699892371,-1.3802570815,-1.5343605006,-.816695783 +-.6503903286,1.5343669061,-1.898784629,-1.3583995839,1.170041096,.4106628795,1.4891328533 +.1028546429,1.7542030808,-.8251696417,-.4980089037,-.5001524178,.0869030248,-.3768639643 +-1.1903864694,-.6857140482,2.1041738637,.5715770597,-1.5172272981,-.4068796813,.6841590545 +1.9855937647,.2198418039,.5451025196,-1.4770244573,-.9589837802,.2278895362,-.5987025788 +-.3039306908,-1.5629031981,-1.0367196576,.7134621508,-1.4536124051,1.0275022131,1.1777992244 +.2094707006,.7158182591,.7634448464,-1.4339389996,-1.3918601935,-.4029737775,2.1910165939 +1.1296192993,-.5961768945,2.5335540944,1.0360879186,1.414714142,.1041019578,.1865057933 +-.2447621716,-.4989441654,.6040945284,1.1615750175,-.6852986457,-1.4124652472,-.1050601974 +.8600728007,.5057101917,.2912451889,-.3485895234,1.3442349678,-.2335542106,.3279043379 +-.2943068236,-.4898733487,.9072233058,1.5557693212,-.1604235845,-.2620962396,.1594324783 +.9071814525,-.1794285247,1.2078183122,.0722424217,-2.3300104626,.2716007078,-.2671545229 +1.0747587806,-.4427170509,.260632468,-.8766668756,-2.3163246727,-.4527180456,-.998629654 +.4056719498,.3564745486,.3062798699,1.2615781628,-1.2221172513,-.7114167961,-.2397161406 +.3784291374,1.3263810703,.101475191,1.5918001422,1.48271946,-.4997099926,-.7554508466 +1.0239084916,.0015931386,.6473617976,.0344330379,.9620722322,-.720508983,-2.1706398061 +.0178856717,-1.3775319789,-.8342819393,.0445978125,-.7523590043,-.9489352359,.5331298721 +.814699938,.0160530694,-1.5460789453,-1.4344577193,-.6614978685,-.1914287555,-.5468753139 +-.4526861358,.2850312989,.2771283126,-.3912556738,-1.0534426821,-2.6440703014,-1.2010050385 +-.387864891,-1.6747737043,-.5818650547,1.0283544695,.5189353458,-1.2313251222,-.3580766276 +-1.3436667961,-.2037744984,-.3631895281,-.6961144673,-.2948570393,-.7094753326,.3695319431 +-.230801693,-1.0108173833,.2277897139,-2.2366154807,-.0074961913,-2.4380659186,-.5725378715 +-.4285075517,-.6451719092,-.9882855491,-1.141879925,-.341405432,.167510842,-.8306651498 +1.6359135815,-1.5887794407,-.2577829238,-2.1846724135,1.4898895566,1.2944111964,.8208669329 +.371531408,1.0035734912,-.6874875658,-2.3709068479,.465452522,-.2260628851,.6688502335 +-.0812429423,-.0693526716,.6062975916,-.1788748758,.0165461477,-1.5080250564,-.5165196179 +1.2977290846,.9387434391,-.0973837323,-.4171078736,2.9078415181,-.3888427707,-.787591083 +.5204347212,1.7769083003,-.7614328625,-.867071796,.2316084314,1.4553874816,-1.1050853622 +.015094818,-.1270776974,-.1045128517,.0674419649,-1.2880157758,-1.135116313,-2.5656073371 +-.4678866732,-.4382241823,-.7588386631,.8868436551,.5974573807,-.758862455,.1814192074 +-.8783213261,-.6127814532,.2158896693,-1.7293359585,-.4243266177,-.9932250748,1.1985120415 +1.531665414,-.5386499643,1.2787740238,1.1059440104,.8513196725,.7471395473,-.0953179331 +.1435609836,-.5655039249,-.1178383089,.1247264042,-1.6560886645,-1.3435937405,-1.2743110002 +1.6874166757,.3687487036,1.0614564192,-.883179301,-.3379514551,1.7471640778,.8014259657 +-.5943358124,-.0215479458,-1.0578118877,.8486013759,-2.1215888613,2.4585670178,.8494890463 +.25877146,.6186196783,-.4674381096,.9791716574,.3906039525,-.9040493157,.3600927495 +-1.2234259178,1.7930874132,.6083458838,.368884627,-.6126817968,1.0681705705,-1.1283858577 +1.4695380728,1.2746905203,.9033348541,1.0695384564,.0495970708,-1.4292992848,1.0152116394 +-.2585941237,.2178532608,-.6501147911,.1628227608,-.0388920946,-.2438624083,-.2983954537 +-1.110256659,.3077445206,-.3039262758,-.2914827049,-.5067287169,.1486838542,1.7534043273 +.5070287601,2.0301425941,-.1617359595,1.6464490623,.6158785463,1.1993694139,-.8033109125 +-.3095783081,.712022796,-.1519450463,1.8440125343,.9463151209,.5324913581,1.4929431252 +1.3413436994,1.055581582,1.6171625365,.1732394686,-.6176884981,.6434853702,.0322699043 +-.2983557527,-.6896108651,.9217565495,-1.947665604,-1.0873696679,1.7208136568,-.3659005613 +-.3825336668,2.6341474801,.3438039137,-.5611204571,-1.1088071388,-.4490039098,1.3327701824 +-1.2792900454,1.5487810991,-.7330953412,-.8511496567,-.1004261278,1.5188995925,-1.4933154052 +.1744296022,.8168278985,.4409606676,-.4406173401,-.3737103963,.6115130708,.4817798096 +-.1565773335,-.4025324705,-.7760538707,1.2398169584,1.795024978,1.2521242237,-2.9032690461 +-.3723679247,-.9463254658,-.31327896,1.7063698469,1.7874662605,-.9312576429,-.7992530638 +-1.035722095,1.0411928901,2.3182475175,1.4728328269,-1.1070715281,-2.744760586,-.8804881799 +.7449744912,-.4222139888,-1.444827197,-.3178710831,-.1091465788,-1.3233062967,.3096979307 +.785365002,.0481544408,.6757368586,-1.3563118217,.9770903848,.0008454762,-.9189896336 +-.6422141775,.3565762028,.433057564,-1.3158576914,.0917912524,1.0646451644,.7943853528 +-.9806700666,-1.0231546612,-.7642333544,-2.0694284621,1.2975256033,-.0165881302,-1.1715636802 +-1.036473973,-.4640151203,.0339940075,-1.7304470364,1.4939137889,.7195295933,.1787752711 +.6649715646,.9790181506,-.0656822983,.7671907502,-.4803502839,-.7456604511,-2.0430970616 +.4425739428,.5008587643,1.7155321978,-2.1991624955,.9551542349,.3878087047,.5791478946 +-.8382990434,1.9482418185,-.6809151651,.6766557339,1.2618499507,-.2712535035,.9455176288 +.8865962813,-1.1000560369,-.9115155573,-.5231086206,-.0852142693,1.6577027087,-.8401861011 +-1.2273879417,.6447765699,-.4753230106,1.0529219486,-.0264812451,.5787751624,-.0911778548 +.7387920619,-.5246173392,-.0686652114,.0932493257,1.1238284426,.7693876467,-.8354528869 +-.8707615805,1.1792031318,.2147404687,.474464215,.195785111,-.1016636047,1.5859224378 +-1.1813794492,-.4963461426,-.041108709,1.2381601157,-1.4295069164,1.8061310998,-2.5548210048 +-.0162767155,-.1420863249,.6763248143,-1.0819715147,.2466502857,.4213471994,1.1029874748 +-.187440772,-2.509059028,.7448539961,1.8721111898,.1633289082,.6613623227,-.1844545175 +.307211996,.9714514163,-.1730112327,.1329478346,.1310156285,-.7850637512,-.2168786083 +-1.2972123108,-.8269971203,.4211697115,.5797907938,.4637633939,-1.2773322505,1.4387344764 +-.1986024058,.6743882466,-1.0235221979,-1.5779500302,-.0248284096,-2.3887440162,.7615065572 +1.8571366398,-.8346946371,-.6990583317,.0648370322,.1246343247,.891339214,-.1592962844 +-1.2037254005,.0848503166,1.6610815139,.8091439485,-1.1408613837,-.3004380302,1.116786052 +-1.4727935739,-1.9321668601,-1.2378291197,-.0175452273,.8776672103,-.4487300272,.5188207123 +-.6356142214,1.3386466111,-1.2796986829,.7552004582,1.8724712443,.1481816931,.8268575584 +-.6806013038,-1.5262891515,.611162094,.0402606975,-.1889301032,-1.560517079,-1.5976309275 +-.2172044842,.1323577915,.6869927148,-.9926697947,-.9090369684,1.7891130496,-.1656172513 +-.1800708491,-.6182949509,-.4274558439,.171654219,1.0494095436,-1.1545340469,-.5629643054 +-.5626923985,1.7637667059,.6399747841,-.0570351331,.902703596,-.827332381,1.6077803951 +.3938925414,-.5938708949,.6767590476,-1.5849856059,-.6249059831,.295375255,.0525780389 +-.1151907912,-1.6464262381,.077773214,-.0356489954,.0270011451,-.5287761431,-.2410026796 +.5929607573,-.3563184248,2.1897119141,-.2388791232,-.6758887472,.304878378,.4597136818 +1.6678050192,.6144838571,1.0804597326,-1.777214041,.8121776919,.782603372,-1.6014921581 +.3613910892,.1961036408,-.8678579652,1.8258135655,.7745970777,.8419882318,.1616555746 +-2.6724357113,.6668104003,.7457160653,.1011000924,-.4425179446,.9343823621,.1060931849 +-1.536426974,-1.3272116692,.6347960247,-.3854489822,-.6664205872,.1072239023,1.8557360264 +-.3828088623,-1.2253406455,.8224042285,.46939264,.1971807006,-.8284938458,-1.1500460137 +-1.492251874,-1.2183657646,-.3752315804,.1894439676,-.249221286,-2.127239298,1.8162026322 +2.6059085398,.0713464913,.8051007618,.6243091317,1.0217190159,-1.2968301707,-.6355590733 +.1060075565,-.7763946619,.2063488446,.3196026191,-1.2780227355,-.6302927957,-1.4145009516 +2.4610961549,.728306554,-.3151791702,.3951896945,-.0918025829,-.9326671341,-.2041140642 +-.9157658094,.4683983266,-.5598814165,-.0174543794,.2706452051,-2.0571601822,.9878250689 +-.7678953118,2.7403563018,-.2651556838,-1.341762444,.5688024529,.8392076105,-.3187061298 +-1.4523979767,-.2005040594,-.7790801153,1.5710229473,-.5704627884,-1.2517591941,-.691482233 +-.2873521615,-1.1228360505,.8399735394,1.5003724543,-.7714586599,1.1914615922,-.6587682817 +1.3561053567,.9636222541,1.9056863606,1.9777206741,-.2176528141,-.5316875072,.2256822447 +-1.3292367835,-1.0448420097,.2086806316,.5158185823,.962176563,-.3521118588,-.9810848121 +-.4682504303,.2220110752,.1636307154,1.1029751889,-.8266652969,-.1977328026,-2.2141925028 +1.2648424654,-1.8706086486,.8958738723,.330574138,1.1458905365,1.0324399491,.1441063388 +1.823712965,-1.119229996,-1.2198748415,2.4269905174,.3474697161,1.1565395699,-1.0156187619 +-.1695569531,.5990560265,-1.9828648651,-.430102085,.6633595586,.0629153742,.5037233168 +.8951638018,1.0441393811,1.2202069779,-1.2527488071,-1.0266405597,.1696317719,2.0869253872 +-.3591509825,-1.1933677973,-.2710913856,.5958024339,.6753311838,.4234312144,.3665830006 +-.3794155385,1.2927671941,-1.2433019175,.02059113,.758816058,-.9069177224,.2402318921 +.8622885743,-.0066118944,.6622352116,-.7276433251,.6654565596,-1.0729151213,-1.0335531514 +-.9438540829,-3.0515893253,-.812666159,-.9197678173,-.0188180545,.5143279257,.6732358149 +-.2238862912,.2554567064,-.8759178986,-.0224664081,.1595512891,-1.1295657454,-1.0892081665 +.6967750977,.9021305274,1.037023138,-.0522288272,-.0245807122,.0605425666,.1236172349 +1.2064348284,-1.230895004,.0129639829,-.9229910535,.8058932683,.1661403188,.7253149826 +.2588125467,-.6041282792,-.0646710901,-.6019905785,-.6228118217,-1.4764423586,-.5770429229 +-.5377246918,.5938604741,-.7587503919,.5151988189,-.0439678831,-.5719716042,1.1210229212 +.055072255,1.2505110116,-1.883522889,.3745287295,-.8754291161,-2.0010352124,-1.3369120179 +1.2055642595,.5039272723,.0776460627,.4999012773,2.2774258715,.3904364644,-.0862218697 +-1.0839591068,-.6597560834,.3683099846,-.8569441178,.9588531497,-.8871936238,1.5284163111 +.8272959279,.3115158227,.5766591051,-.2954879346,1.3540289197,.0060071614,.587697498 +-1.7626544979,.5254304387,.0831099782,-.3921358581,.5387510121,.9305143632,-.9486841367 +.5728071379,2.0301418174,-.7017729803,1.226127411,.651187709,-1.2123227675,1.4099385217 +-.3531169168,-1.4898958076,.7364186801,-.6297072831,.5099621143,2.5034052961,-.6699177035 +-.1943500256,-.8571035936,.4225651789,1.4407209748,-.3629704565,-.9080496627,.5382606136 +.761968624,-.5226410358,1.4983646714,.915999799,-.8336192722,-1.4003747014,-.8853852013 +-.9214332648,-1.3415223001,1.0318875315,-.2319059284,1.3941461995,2.1010887596,2.1978825333 +.3702686079,1.0168867108,-1.5199184031,.9563667642,.8292270589,.4837583945,.669288061 +1.2776634935,-1.2801524172,.3559425217,.6272328138,-.3618473959,.6699516476,1.7000251162 +-.7526764861,-.3022036822,-.6704585663,-.8213550524,1.1025894229,-.4935033464,.3434597483 +.8077502093,-.5408070787,.9481961198,-1.6943407619,.5858625194,.2745744048,.725668254 +1.7026240275,-1.5758674718,.0070835556,-1.5377570961,-.0533131361,.0043011222,1.0298280414 +.3704087471,-1.1175256529,-2.5671612585,1.0258390156,-.3121935074,-.536032256,1.0618040375 +-.328090196,.5773386491,-.8737483065,.6426516342,-.2209826321,-.1554498885,.6771295166 +-1.3232667117,-.0350914133,.0603763469,-.3376740116,1.5170325117,-.1572902117,-.8764869473 +-.3377649842,-.2198385674,.2416876371,1.510320123,2.1106656271,1.5598428497,.4636177083 +1.089735866,.0059332589,-.5117978716,-.003358202,.8838943999,-1.6936924391,.1253144758 +1.3173444491,-.868527495,-.2778326074,1.2311975738,1.9951937013,.3134646619,-1.7748119076 +-.344771375,-3.1330959284,.1273777278,-1.7617175633,-1.5134755095,.9103722489,.6907976861 +-.7049751922,.2442010562,-2.6499174123,-1.5040805446,-.2671520855,-.9332431677,.2687389295 +-.3983111801,.7788406805,.9449870013,-1.1225454456,1.3156795803,-1.2387635134,-.3437052337 +-.3675799711,-.2249671195,-.408594564,.2931659444,-1.3860670063,-.1425920334,-.3113134961 +-.0637001379,.1018370053,1.3698173327,.0996786878,-.4831854392,-.0192940078,1.0647355691 +-.3689731152,.4474203293,.2178154126,.215232214,.0112273075,.929261975,.1624988478 +-.9312197434,1.2705364118,.8493376491,1.3706024424,-1.2406663751,.1050473699,-.3594568574 +.3249280651,1.1660184004,.7926563982,.0827561267,-.4972132052,-.0536931301,1.4499245603 +-1.5030226429,.8046774752,-.0033070687,.3278475285,-.1680671324,-.3679574238,-.408335492 +-2.0950071577,-1.0293913727,1.7639040392,.252635384,.1265709081,.2515815047,.8117507225 +.3184695481,-1.4475341387,.7502351279,-.2926223138,1.0092121537,1.2374629173,-.0057957353 +.0609300436,1.4680684244,-1.5662883012,.2057734609,-.1979034666,.504226962,-.7769336879 +1.165481343,-.5631158571,.5917088435,-.0240409946,-1.574638797,1.2725676301,.8689505041 +.274695489,.8084811603,-.5404060229,.6238980471,1.7574273432,-.3772668331,-.6899420113 +.7899330987,-.2008163226,.7431333498,-.619182114,1.8909471047,-.0976437165,-1.727348673 +.4799778868,1.3034250206,.5853053653,-.0564991345,.758059606,-1.6450104757,-.3964901936 +-.1549742937,.5399178725,.6991663904,-1.6475971858,.8859675067,.2716282959,-.4912049842 +1.600626071,-.72951539,-.1014091002,.0036689769,1.044908093,-.0802274524,-.2625986183 +-1.1872749398,-1.62840343,.2183863756,-.3170900735,-.0613711307,-1.2194856635,.9520522486 +.7662125005,-.0902548698,.05454207,2.2341184338,-.3664824414,-.5805373525,-.068592785 +.4691307774,-.2593473065,-.097027797,-.1785113168,-1.0805786994,-1.5771487724,.6380243496 +-.5268987733,1.6647860887,.7001358466,1.076455911,.4049800907,-.0197353317,-1.0639348876 +-.98875827,1.6633855697,.2611049748,1.1049834302,.3348131599,-.7436292924,1.0522547972 +-.2280413854,.306443849,-.6807392934,-.6550487344,-.5026473086,-.406667301,1.1908307813 +.067944025,.4467584016,2.0027554575,.9637094363,-.2441237278,-.679033458,-1.1120517541 +2.5274566392,-2.1489587385,1.2068141308,-.3166489278,.2875934671,.1772683785,1.8570131493 +.1164404103,-1.6934563366,2.0303021733,.3056575782,1.5235820621,.3848761467,.7920305964 +-.4320941801,1.0640749127,1.2229732076,-1.7508066042,.6555355702,.5011575325,-.7862808052 +2.0784675348,-1.2254116245,2.7885458719,.7274757865,-.0440961695,.8514126073,-1.1598169261 +1.6315520204,-.4535905939,-3.4729526348,-.1417773157,-.5851595253,1.3498213719,1.2552100983 +-.247120821,1.0202367069,-.2330002727,-.8567040153,-.1063307414,1.0885911443,-.7877781525 +1.3504843776,.9268341413,-.3480376618,1.4726934769,-.1123891623,.0312325955,.9348264641 +-2.4684268681,-.2816216509,.0741654436,.238288081,-1.0084167745,.0065041964,-1.7784174764 +-.9040829417,1.9286997809,-.4650459648,.6304178885,-.6472687649,1.4631486217,1.5179524482 +-.2143149142,-1.5157176747,1.5547487107,.0317408154,-.3026138638,-.5525527434,-.024129078 +-1.1214298424,.0005823017,1.8279072262,1.2636835854,-.4193391621,1.6425275375,-.4264989438 +.5008243659,.941766494,-.4457833684,-.3836432472,-.2776832168,-1.1933711729,.484313974 +1.5472248516,1.5225126825,.7887413159,-1.0785891033,1.3596818524,-.6634176196,-.2038874556 +-.1921378794,-.3382429606,1.9845139117,1.9996201577,2.5562776599,1.9737490437,.2651385113 +.3899670623,-.4602951459,.8431244698,1.5348967588,1.0372602619,-.8958366083,-1.1829955978 +.7318791347,-1.0146376662,-.1471787728,-.6560510424,.273030961,.2825440177,-2.1515140586 +-.0109989152,3.1503689102,-.013836999,.9163256867,-1.2592025582,.7252205237,-.201113313 +-.8326987147,-1.4654474303,-.594249314,-.4800619214,1.1383423438,.0853239466,.2770498398 +-.6803696305,1.0731974333,.4282990134,-1.5260600787,-1.3237688007,-.3702792827,-1.8847548275 +.5262060951,.7246566785,.8536886099,-.411898762,.5255067689,2.2234214932,.6578108902 +-.0891871787,-.6212054545,.9687405559,-.6395835626,.1486471018,-1.0489782194,1.7391021298 +.4003841072,.7334866178,1.5984482416,1.0643506763,.2297888286,.2738939183,-.0781159503 +-1.2869252967,.3308264275,-.5016762198,.3199747626,-1.6296814484,-.408935295,.9971175831 +-.3611538305,-.2539729701,.428590196,-1.0701275603,.4695481231,1.3432391628,1.1704983153 +2.1288006675,.4367294841,.5316084272,1.1789980459,.8236182011,-.1789187127,-.1063964995 +1.0226934761,.0127966776,-1.3615092415,1.9504024835,.4869214118,.104594805,2.7926585672 +.1094034789,.5661264488,.619236721,.4900650542,-1.0592880013,.163181384,-.9097787116 +-1.0824754802,.5316099171,1.7164965881,-1.6035040284,-1.482733374,.9737988384,.2612312166 +-1.18378168,.0855642358,-.5338667197,-.157614576,-1.0835268777,-.0556991151,-1.3618585135 +3.2737951417,-1.2310733656,.174496703,-.0127802227,-.242351034,1.4136314278,1.7391379211 +1.2467470306,.8051307271,.2849411856,-3.2434177974,-.4268598972,-.5343868988,.8506951518 +-1.0556867554,1.2794072174,1.6740795208,.9597505876,.3420288477,.2414127226,-.5706142938 +1.2073022196,.4803646315,.5989194529,-.5598598847,.2282305893,1.0484304585,-.5890373159 +-.5445675912,-.7404710522,1.357083283,.0288823587,-1.466455429,-.1300699481,-1.0297559027 +1.1929916385,.7151474151,-1.0304823882,-.6359201375,-2.3775504568,-1.3729729617,.0686025057 +-1.0994046542,-.0658136699,-1.2622119731,.4472932895,1.5775939883,.0089147813,1.4145107862 +1.0393280988,-1.3391255243,-.280893268,.5517906931,-.0693957821,-1.6372237027,-2.0500581242 +-.276799982,.2007598894,.044565707,.8774779214,.4878399971,-1.8725148526,.1982357222 +.1654794699,.4001643504,.6052915736,.8435563505,.8172629338,-1.019795978,1.0012914273 +.7800354997,1.1359454147,.8446289291,-.1044105213,.0391238155,1.0619148367,-1.7112457846 +.5431534029,1.0495943361,1.1091140183,-.6717880557,-.5182325027,.7026694654,.2701979294 +-.8587501807,-2.2058628435,1.0434146684,-.3705587534,.7225367057,-1.07311933,.4565963455 +.2447410499,.76377897,.2704175059,.3604204304,.4561486602,-.8615685984,.942659022 +.1840160504,-.8272811587,1.3382573646,1.4793015342,.0675651021,-.6706610875,1.9563218684 +-1.5689350634,-1.9549421689,-.6945825547,-1.0455874711,-2.1461670052,1.3530964171,-.4475907762 +-.268510336,-1.004647648,-.5817718847,-.2599614454,-1.0420355715,.9320426558,-.3647730238 +-.0358663792,.174043478,.056870244,.6364303365,.2878217435,.823730204,.9645652578 +-.9633242953,.9216929272,.4935687473,-.0434325491,-1.8619519344,-1.3628902261,.6463732936 +-1.2910502849,1.2779275156,.0971036404,-.8461767305,.4559288104,-.4253272434,-.7111321074 +1.3629508997,-.4057340778,-1.0088585021,1.6941913242,.7245887765,.2433518386,2.0890748916 +2.3690078839,.6624647072,-.4132025332,2.9682033618,-.2915109614,-1.253788091,.1887834472 +.065358516,.1455447606,.7753041393,-.8689384782,-.0106644956,-1.5977846506,.0311602516 +-1.2587884166,1.6129285525,-1.6822044188,.0577831632,.6892639849,.9712611133,.4012986657 +1.4308635621,.3534623201,.6404427534,-.1166218375,.9084684824,-.8638113467,2.6358091561 +-.5952651877,-.9044609486,-.0957145195,-1.3468763588,1.0489313508,-1.1498100812,-1.8654937709 +-1.7201720149,-1.4238278685,-.1477899783,-.2377945187,.5870453989,1.665285074,-.7248284649 +-.3865054104,-.0759911252,-.4204875591,-1.1189717041,1.0800760703,-.1294985642,-.1789535361 +-.9003796899,.9519201442,-.410596881,-.5648370058,.0600942987,-1.0055654962,2.1272959586 +1.1254795446,.0996740153,.6879734448,.7589256845,2.0761083499,.3650812119,-.292008409 +-.9650029287,-.7422072725,-.068040554,.7542958414,.6296494258,.1404425586,1.4260337429 +1.0885367103,1.447273344,.1496717589,-.3407750029,-.1303778102,.531466819,-1.1314564304 +-1.0825238778,.6409163784,1.3745619255,-.1789950249,.1992646617,-.0624717573,.1090252041 +1.0889804061,.3859356106,.4274661885,-.0179436704,1.4608571159,1.7810620669,-1.9932497991 +-.0264602781,-.7041703623,.5875438642,-.9127286971,-.7809690193,.0219612848,.9991531057 +2.2837108063,-.533563018,-.4793188286,.7099944831,1.261834171,-1.1310098952,.9215689136 +.7649556122,.6071732954,-.8372487435,.2759573909,-1.0758932938,-.3081893584,-2.5236816408 +.7652926149,-.887298411,-.456646831,-1.5369035754,-.9230118938,1.6201385961,.6034880526 +-.5880393185,.6220141441,.3564832349,-.6076391515,.7190340812,.1235317349,-.165982131 +.9603868827,-.9089995655,-.3165582357,1.5199178869,-.3158393672,-.1680959074,-.528329666 +-.8600304869,.5885747681,-.9400477995,.1706433824,-.026749297,-.65253877,.4789441407 +.7983769178,-.2304729217,.4009795328,-1.1325400317,-.0714752934,.0920213523,-1.8953320345 +-1.9481261343,1.4912202041,1.2254623239,-.3996683563,.2336160499,-.1578115702,-.3537917116 +.6728699785,-1.3818555159,.7554360685,.6825931909,.0661336281,-2.0195462048,.1822049498 +-.8240531802,.8874401319,-.3694994168,.4867225944,-.5039132624,-.1302032754,1.0490125114 +.4157680026,.3920609198,.5221256861,-.0526374486,2.5858854256,.8174984231,1.2756680946 +-.3034456296,-.4723702967,.1513156855,-.436502896,-1.8361078202,.3647777432,1.2913523243 +.8507463349,-1.619319486,-.7990675995,-.0007820767,1.6643194876,.7739840027,.4778034996 +1.9271119715,.7497529625,-.4946482127,-.8568929297,1.6522817473,.0827671597,.0904812622 +.1673409214,-.6249082401,-1.0072369262,.8298852504,-.4730699902,-.8700937485,-.3769518326 +.4053357692,.7660137652,1.1116382438,.3844049848,-2.1850055409,.2223876366,.0139127334 +1.3635736125,1.4299237405,1.5352209509,.9578475999,.063267364,-3.2070375219,-.3377914691 +.2891717139,-.8012956759,.1509730812,-.15473375,.3781215207,-.4656884053,-.4715625286 +.345956139,1.1081854508,-.9996628232,-.109074861,.5715365898,.52165617,1.0261656207 +.5121451412,.1923898071,1.7471822307,1.1011634064,.6695917552,-.0120713923,-.7238481182 +.5727190551,-2.3377451456,-.1764548494,.3944112118,.0668342884,.0856339625,.0427686391 +-.655407769,.3617444948,-1.5012895093,-1.0014429292,1.0842228533,-1.3613824474,.7191087762 +.7783382833,-1.6611740568,.3154900991,-.1933011583,.2283893791,-1.1537580151,-1.1833128432 +-1.7959027764,-.1465128789,-.3359856544,-.222471416,-.2661177136,-.9775179257,.5533485093 +.3589671553,.591173367,.1980982249,-.2975642031,-2.784775946,.1697420822,-.2338172204 +-.4513096186,-.2072928911,-.0988106061,.03139937,.414081877,.4041556659,.7638285684 +-.6010020727,-.2333112961,.3039580339,.3737103778,.6940436563,-1.6288914185,.8374948093 +-.6350184872,1.4022402685,.598974435,-.4541655849,-.7512870347,.1328426591,-.7205666046 +-1.9954624341,.3917862858,-.2505665943,-.1624146804,-.8600513888,-1.9647536892,-.5861092402 +.2600650022,-.0848319576,-.3701670509,-.532280132,1.8744439473,-1.7072104859,-.5371794097 +1.406270532,-.124105556,.1407229232,-1.3276576153,2.1255225399,.6329045069,.7791999275 +-.2250801349,1.6461172122,1.2403553554,.5253160051,-.1549586059,-.8276417109,-.3211396409 +-1.1484683439,-.4494539585,.4215600584,2.1779924112,.650608855,-1.9291649501,-1.506284768 +.3701208003,-.1556082439,-.4917862493,.6824374372,-1.4336056111,-.1129879258,-.2641221268 +-1.1155229531,1.1628001021,.1069173198,-.423889894,.7228816251,1.1810003954,.2971041631 +.8799784289,1.048870701,-.3128849766,.1780890871,-.1177835058,1.1870399963,1.7106156159 +.9233150245,1.0710124124,-1.9880782963,-1.5771380913,-.0412981843,-2.4994849218,-.3341899931 +-.8357786168,.1116209437,.5044563536,-.3196938941,1.4089427946,.4179313344,-1.2605409284 +-2.0890831369,.2819606751,.2032481731,-1.8956194682,-.5501167874,-1.6583951796,-1.6220496433 +-.1502279613,-.5037077344,1.6305097034,-2.45238227,.0846980873,-.7682944376,1.2138446935 +.5929622275,.1557268704,-.0351839087,-.1098503705,-.9874707019,-.2636768683,.2644037713 +.0406493899,-.6575669654,1.0586224798,.3594664818,.6720719656,-1.3573570038,.5787817253 +-1.8856237184,-1.8613938569,-.9577852447,1.8693933958,-.7286283788,-.0612250577,-.7229060987 +1.7225027885,-.8940405982,.07334304,1.5256482703,-.1912816737,1.3408292061,1.8531618644 +-.5148454488,-1.5125237621,-.6274055004,.8622597981,-2.0969970265,-.3416181628,-1.9212906824 +-.3895454094,-.289556076,-1.1476354323,-.5433513035,-1.2303581835,1.0106936713,-.3145311179 +.5314663458,1.3422378433,.1136924317,-.0361231682,.4200624217,-1.1597700381,.2189379244 +-2.9709413669,1.0593331786,-1.011220607,-.6328098735,-1.1048027053,-1.6162432988,1.1594081025 +.740851387,-.2739011473,.4431188552,.2527637514,.1690642752,.6647340297,-1.3073496377 +-.940571335,-.8287441305,.0367474832,.9917714473,-1.1868562851,-1.0257437395,-.2457556524 +2.1255301197,1.0779722706,.6203361508,-1.4374292678,-1.0829900038,-1.6353750917,1.7175344153 +-1.2060622873,1.970045649,-.6720933447,.9065157801,-1.0062833476,.6260772958,-.198578533 +-.7540751249,-1.6296839874,-1.1791328972,-1.3223266482,-.9170952531,-.1540929144,-.6674460482 +.4950729337,.5771171155,.4710219552,.351620127,.2312737454,.184572633,-.7653632661 +.5045324886,.8697731327,-.6197760882,.1164177242,-.2253401303,.1534776373,-.3339517754 +-.7034350898,-.3080902731,-2.2621573684,.5999790575,.6562050049,.2070841744,-.985097456 +.0253526169,-.4432851993,-.2386157156,.2856893954,-2.3348686672,-.1543758833,1.194863145 +-.3157388707,-1.2548495198,.7012836485,.1458002411,-1.7598234222,-1.458237873,-.2429231637 +-.1596319821,-1.2682662735,1.6886983956,.1152718343,.6905078506,.2388894302,-.6447643828 +1.5392670905,1.6217527702,-.1702896501,1.85542778,.7557896818,.1759776509,-1.1585571441 +.2588859636,-.4924532689,.1329383386,-.5928672187,-.4675678146,.403065408,1.4476944804 +-.4675227008,.8235792887,-.125282679,-.2958568283,-1.0896708321,.7350051159,.4629173174 +.6265264762,.7625032366,.0576772228,1.1938259399,-.4223353111,-.6993285471,1.5397790471 +-.0978352042,.0350472152,-1.2101451155,1.0572633413,-.18623929,-1.9085651071,.7779879048 +.7803962071,.7501510224,-1.6929133274,1.0024383553,-.1587696191,-1.5704367902,-.4475498483 +.2910501057,1.8394562198,-.7480062986,-.5156484964,-1.0093778536,.3784313346,.2100292516 +-.0720269627,-1.5028924901,-.1510241688,.6198919445,1.3931381587,.0807467378,.0991606671 +-.1574467193,.6567654139,-2.4342827524,.1512948821,-1.2238684372,-1.1139489731,-.3032479308 +1.2980216462,-.5478576245,.6417855003,.5200700552,1.3984830039,1.9942874632,1.0535615668 +.5291386991,.1315975634,.2214050097,1.5947114673,-.6773185247,-.8945369746,-1.461919495 +.3394565824,-.5345192084,.4507273272,1.3538960991,.5879083293,1.4937421586,2.4202779284 +-.4959508317,-.5198438142,-.3484590758,1.7190671779,.3964494213,-.441986816,.8974848912 +.1643870543,-1.1157332567,.0004140415,1.0213122769,1.5917740373,.102869218,-.2613938955 +-.6076080922,-.4684516084,-.3815346845,-1.1119889599,-.799286622,1.0052275701,-.5315384847 +.7824445244,.7884536635,-.7084557167,.8438927387,-.2486315516,-.6996998088,-1.3098567418 +-.9294935517,-1.4538681767,-.8932236702,-1.3959996902,-.5713059149,.5136522936,.5024457575 +-.5397304196,.0449598072,-.6150061404,-.4806193592,-1.0123351106,-2.4089942022,.7790949891 +-1.0031775696,-1.2708571299,.2041726618,-.3347899712,2.0400641343,.6252131126,.6844859524 +1.804397682,-.0863289626,.0085696029,-.6443030465,.1553986152,.2053501926,.2498151372 +.5254595854,.9722178073,-.4369617367,-.3620997308,.1293976315,-.107170139,-.1909036099 +.7066578451,-2.2320742645,-1.7157711419,.8359889615,1.2220290538,-.177194131,-.6291476684 +.6170450608,1.5269556206,-1.2565154473,-.2774343693,-.6010932433,-.0158092498,.5223688422 +.4403872567,-.276634426,-1.8230937853,.0446809105,.3453161694,-1.3384634957,-.5207026855 +-.6482788722,-1.3008574806,-.6453929288,-.2388261989,.41493842,.9256302165,-.5242200897 +-.3916504426,1.313448016,-1.4166586641,-1.2022583543,.7431269933,.8465904277,-.4803127822 +-.0343741421,.2533880489,-.1202344164,.2681691955,-.6938381219,-.8934206594,-.3387746529 +-1.3592237288,1.2525980064,.7770160107,.0618299987,-.1706798418,.8631212359,1.0610421767 +1.0596960892,.0795195839,-1.5440921894,-.7526305783,.6602048219,-2.0641360407,-2.3455435359 +-.8430977662,.1556722266,-1.197733642,.7576526615,-.6190512006,-1.3175325221,.2968881155 +-1.8451338029,-.7154579179,-1.0908332224,.2966004691,1.5571724243,.406685994,-.0493504732 +-.3348971862,.1304072396,-.7187305738,-.0338620125,-.2540835638,.2019228108,-1.6468028281 +-1.4216492013,.617676798,.4501154048,-.8938105531,-2.3691286536,2.286636041,-1.7702847016 +-.0790137868,2.1578441629,-1.1899770336,-.5862897686,-.3912996522,.4575036327,.6156934468 +-.63799675,-1.474751417,.0785606124,-1.1558148617,-.1183253699,1.0447669075,.4590480894 +-.2920635362,.405899897,-1.1007825734,.9021188072,.287519178,-1.2350350297,.7836998279 +-.9679708102,-1.4820279687,-.6927120193,-.4270938876,-.4395396998,.710999347,.6653764696 +.1768954923,-.6374974289,-.5689764274,2.7685688143,-.1536680679,-.3053463703,-.6695073009 +.1176088808,.171617908,-.8226314666,.2028676236,-.0349321322,.5914987403,-2.073424746 +.6529020682,-.7197777386,.7680360009,-.0263163664,-1.2013806568,-.5817278547,-.1961538932 +-.1836231865,-.6102655214,-.3468573672,.3219332938,-1.2422935286,-1.5866564905,-.7002301721 +1.430216394,-.1464497145,-.9946930307,-.3018829716,-.9041003878,.106658895,-.1540549892 +.1666600558,-1.2522932193,-1.9527205167,-.929161667,-.095273777,1.4437969406,-.6810385683 +-.1979539796,-1.2965206558,1.1629765193,-1.2666364017,-.7521606971,-.82468318,-.234797346 +.4701349379,-.8861427036,.4933769036,-.3838389214,1.2209603081,1.0824001227,.142252294 +1.6155817774,-.5249491402,1.3365276685,-1.2137068863,.4785101999,2.089424548,.8381566162 +.2407287682,.0367975579,.9065080486,.3528940513,-.0460622118,.3180230893,.674370367 +-.4373412562,-.9861296543,-.4429813777,.0555218361,.0453640235,1.2844367542,-.0920688313 +1.1651978012,.0098305424,.9916199918,-1.2610126234,-1.0584096723,-1.0033524734,1.5879746053 +1.5057741324,.8168796549,.1168944543,.5611064398,-.5368363842,.6818723515,-1.5381771101 +-.27252652,2.4856712022,-.5263123976,-.1588291752,-.3052925921,.3613634073,.2203424438 +1.6454370723,.2130885047,-1.3569188367,-.5373846098,-.7784015138,-.0803221515,-.5532138513 +-.8446530084,.5471065536,-.1172561077,-.6893055046,-1.4426335813,-1.3813995607,-1.0236766455 +.7917533235,1.0853513751,.5248354639,-1.0419961235,.9032753113,1.2197949909,.0719003942 +-.6449873815,1.080301817,-.4119595365,-1.0386460684,-1.4943703393,-1.3024381101,1.7266529817 +.7499529469,1.4721320957,-1.2582493133,.0609655117,-1.0904237998,-.3434116508,-1.2104215123 +1.1470714706,-.0552826636,-.1163049302,.1054641179,-.2772886511,-.3413509614,1.0588550453 +-.6287275504,1.374370599,-1.5767021722,1.2717035069,.8720413162,2.1620867518,.17645319 +.8657743732,.0948645195,.6133170764,.3943547556,-1.348305101,-.6260828196,1.3187026695 +1.1303364223,.1257227775,-.8519835723,-.687923196,-.9544998942,-.7771219755,.5440518078 +-.3721520225,.9114569606,1.1236552963,-.2698329469,-1.4096837889,1.1578184389,-3.3615645818 +-2.0174467395,.1957822052,2.5030762489,-.2305454339,1.2434876309,-2.5137870056,.4357512715 +-.2517567101,.1074473669,-.1880385114,.3185392326,.6882869098,1.1592304919,.9521724622 +-.0649463987,.2946714279,-2.0400714837,-.3078879759,-.0702897978,-1.7086224354,-.2302781172 +.3954782781,-.7426404791,.3343173641,1.0808236981,-.7317285113,1.3606942695,.7759536892 +-.5972839285,.4345945042,-1.1878061907,.0022885945,-.5885017927,-1.3591381562,.0142331198 +.1693819706,.4250403009,.2823602914,-.6101144404,-.1560265408,-.2294151712,-1.4182273237 +1.4303007017,.6859252999,-.8665791338,-.4165296108,-.5309640361,-.1362563182,.9031659098 +-.2930301213,-.138232794,2.1649759633,-.2078022305,-1.1495631406,-.8746184103,-.0848437379 +1.5708535527,.1536505599,-2.0023581624,-1.7973701931,-.3094291251,1.808760478,1.2779599721 +-.2432786395,.0322633366,-.6702784119,.6074947216,.0908715738,-.4133375343,-.8766596143 +.1278002128,.2435742237,.8158731512,1.6119575363,-.2358041723,-.3405719082,.6121679524 +-.6262281424,.855201522,-1.2542421686,.0090258367,-.1878652835,.1066530854,-.513369726 +.5748119489,2.4136243294,-.1574334225,.7200452226,1.0815368003,.1750060274,-.3525054785 +-.0040673694,.7173025297,-.0786046903,-.176454269,-.1058043609,.1655922901,-1.0217947326 +-1.7703279897,-.4269507008,1.4262856223,1.3741787607,.7595711626,-.5079700074,-.9303052791 +1.144012586,.2210210705,.0471882429,-1.7581893561,.2020107467,.6327318638,-.6533984007 +1.1310516517,-1.3172467536,.0212314536,-.8166268272,.5868146322,-1.7709529432,-.1291864567 +-.4664684982,-.2217645675,-.0103501798,1.4134243351,-.8460435,.5115710326,-1.8397380582 +1.3746679313,-.777561708,-.6364924013,.7354742638,-.0347562698,-.7881966798,-.1972302599 +-1.7077248469,-2.7769133984,.2300188955,-.2151998401,-.8035493714,.983153836,-.862576197 +.9753384061,-.7467411337,1.071266866,.5274100525,.7745212422,.711866605,-1.1406487601 +-.7594405442,-.7863248592,-.3876531017,1.1437607401,-.0168510301,-.459710721,-1.4301667424 +-1.0459799118,-.2935403663,1.0032603942,-.087763242,.7996559382,.6349898477,.0114524599 +-.8696980918,.6778960247,.157359623,.2860767619,.5918629633,-2.2505293833,-.9621569428 +.5208044176,-.7831356445,-.2783454605,-.0910759651,-.9541303052,-.8538448289,2.2374849497 +1.1119830866,-.5921717623,.4620837946,.1943546743,1.8684880674,2.1691162319,-.4333861445 +-.1574172805,-1.3101905929,.2590724405,1.7432262291,-.5714006303,-.8414992217,1.5650337089 +-.5459963038,-1.1927883583,-.9781424979,.3399429245,.1146648089,1.6483889975,-.3487975198 +.447812438,1.0903758131,-.8286231523,-1.4981808085,1.3540324132,.1790588259,-.3203451817 +.7667136521,.0813831501,-.0241825446,.0189881537,1.1433037239,-1.5028912353,.2802259318 +1.2850464143,.8176182372,.6861257704,1.3131234972,-.1016471667,.1215621309,-.0380899119 +2.4064715559,-.526920286,-.0776813878,.8407271392,-.2866110402,.7459699812,-.0760825591 +-1.6031328568,-1.5865389549,-.6086207357,-.28587329,-.1642364464,.7029506684,-.478329692 +1.3362719148,1.0264531207,-.2923642624,.6334361283,-1.0579889331,.8022673257,.1983144047 +1.813046728,.8804234785,-.1359071247,-.3139506562,-1.3511119238,.2386505987,-.4335693318 +-.4081332906,.7373169962,-.416584582,-.9802708999,.6320350647,-.6877796786,2.5491941328 +.8109427156,-1.5206647929,-.5515014967,-1.7147579761,-.6705283826,-.2791702428,-.5073032661 +.4214834633,.0242852196,-.5977239782,-.7218663458,.3144796268,1.1000522741,1.3608200462 +.9276423063,-1.0732576235,-2.6021063706,-.2477129901,.6767632019,-1.0383041281,-1.536380129 +-.0975782148,-1.6315546036,-.1577927974,.4435910957,2.7556475123,-.113449224,-.7321463132 +.6414991182,-.1489249102,-.1948159745,-.3924996646,-.543655151,.9576357639,.4378349403 +-1.2062071192,-.7321167871,-.1406228583,.5370306007,1.520615673,-1.6798663976,-1.4505922922 +-1.5408435207,.1202864638,.4478965464,2.8725730824,.1588189079,-.1960801487,.5159104457 +1.0378655835,.1510645047,-1.2066842842,1.5461946792,-2.7662633417,.6530517512,-1.8461789997 +-.4854957972,.9460114847,1.1002849762,-.16054935,.8878360227,1.3866751802,.4791380086 +-.8527767584,1.0577036145,-.3960060225,.5580767404,.8842467527,-.7761633152,1.7742335275 +-.3935656627,-.0067895216,-.5539423175,.8530745664,-.8203959778,-.2840109614,.4234882658 +-.8165295423,.3985350728,.9365899264,-.7863068856,-.0123080003,-1.0721289361,-2.0394231054 +.5229550286,-.0777990587,-1.013520011,.7597562153,.6092969588,1.0133813996,-1.9419180085 +.533451908,.1091693979,-.6306407894,-.7760815313,-1.1233573946,1.2236423284,-2.1406462663 +.4649461997,-.3137558372,-.6282423014,1.2168801821,-.684809684,-.3578568352,.3859521223 +-.0084533387,.1040288404,1.9266925673,-2.1596246369,-.3737611983,.5557343856,1.0392139609 +1.7031043104,1.2473684326,-.7349984059,.0218648174,-.4821509729,.3071737061,.2266646253 +-1.3367105591,-1.749920607,-1.1984339424,-1.1171141878,-.794494375,1.1636263495,-.6102337857 +-.0097488603,-.774407321,1.8063943345,-1.0910221125,-.5237217507,.3022302567,1.3703635192 +-.829041628,-.9836337423,-.7921155597,.8178405237,-.2790794025,.6556467723,.6046191251 +1.9350074943,-.4657471371,-1.2261995376,.029369617,-1.3156782925,-.1037542958,-.3780016507 +1.9215958512,-.9625140638,-1.5352473791,-.0654025734,-.9820960107,-.3386194025,.707750819 +-.3288862574,1.2250020165,-.209765512,.1955583577,.1207316248,-.0928237585,2.5684153682 +.4640139371,-1.1219432852,.8375247602,2.077062578,1.3170840892,-.763399889,-.5747394293 +-.2797459761,1.2732844634,1.0700085499,-1.0621385921,-.7472832264,-.3316053862,-1.3112976867 +1.2192268354,.0283077513,-.5361815897,-.5257278719,2.6668940299,.1786734557,-.849318565 +-.66114078,-1.2725071491,.9363943881,-.0535895537,-.8113559426,.5538969506,-.1468124902 +1.2469352733,.0844971479,.8137229848,1.1066744755,.759991936,-.0033617949,-.088544828 +-1.014931673,.7723542998,-.550960797,-.1031140456,-.5091951134,1.7325127437,-1.4717285506 +-.6996448509,-1.452298658,-.8291921827,-.1905403827,1.281663358,.0389484918,-.6032404612 +-1.754291048,1.1243278509,-.2852104688,-.2508847863,.1990151642,-1.1115188845,1.1120976606 +.4155497793,-.8924954311,1.26848893,-.2128240956,1.5266847936,-1.6188243176,.1136489485 +-1.2337010707,1.4148275832,-1.1696008689,1.4317020554,-.5032384055,.6671891938,.3369161834 +-.0071225078,-.8462393924,-1.1847809942,.9020760986,1.2664497161,-.5348226709,-.3445749902 +.693641255,-.7262816898,.8652078309,.4241020818,1.1214919838,1.1959365412,-.1436877502 +-.4177617119,.0623760512,.8043615692,-1.2345165846,1.6513894089,.9185257315,1.695242521 +-.0248001587,-.2556214216,-.2757523721,-.208959281,-.4442439235,.4620006528,.2805199474 +-.4166028736,.4345691991,-1.8032965523,.640968167,.8597138164,.1769367622,.777057859 +.7425129625,-.1224359037,-.788789586,1.0060488725,.7461954231,-.7341964942,-1.8700323927 +.1969924982,.8343526875,-1.9773910165,1.0182341615,1.8485020307,-.2275518041,-1.6152212821 +.1163660578,.0482620272,.4652182742,1.2045661896,-.709263228,-1.564739074,.6000727507 +-.8242659629,.9701241202,-.1878832155,-.7207441323,1.5375552824,1.325444512,-.2769445487 +-.3508857636,-.0024159499,.5206643678,.2164843902,.171657559,.0931023427,-.323454652 +-1.9174930577,-1.5464295252,-.4094542474,-.040725235,-1.3668131803,-.4558178543,.1637984024 +1.2320277674,.4959868276,.9560767986,.5434193021,1.0207808275,.3642666437,1.4238846102 +.6634144371,.0537953315,-.5492332363,1.388924085,-.60540671,2.357226953,-.6596009044 +1.0912360934,.0788577308,1.1415587233,1.2367847571,1.2994299149,-.3208579538,-.3493907123 +1.1055968792,.5546549984,-1.8687107227,-.4622543424,-.0143184173,.0920906164,.2169849189 +-.6509120356,.5779206059,-.3530568303,-.8876986893,-.5764951187,-.2396885138,-2.8514384132 +-1.4969894706,.9718008001,.3741944497,.3742037976,-.2491448609,-.8956210985,-.9117228843 +-.7772808058,.9790122401,1.0119198505,.047661136,-1.027326199,.2349996149,.3407011414 +.2126016877,2.6052539197,1.1305587701,-.7485500403,-3.1447292438,-1.0385019498,.0985983534 +1.458040003,.2693734417,.3829284063,-.3447231774,-.0892920662,-1.319442396,-1.059547325 +2.4253178268,1.0800881876,.5814336312,.7130778856,-.3106488708,-.0313175776,-1.1392265628 +.8689593011,-.5069756974,-1.0518682985,.9052228074,.6049684516,-.8295449772,-.422188655 +-.5675045063,-.6786833112,.0902824494,.2193781307,-.400297369,1.0765557051,.8811523195 +-3.1592277404,.4950942286,-.4498871491,.296502932,-.9223745359,-1.5260692292,.2263638918 +-1.2497212017,-.4018603253,-.3187413385,.6304031806,-.78125476,-.869564091,-1.0102616644 +1.1699487299,-.6657137133,-1.7098592268,-.0181420016,-1.7117048027,.3277647464,-.654245825 +-.8038848623,.4979451418,-.3223326788,-1.3661467193,.9645093764,.0567144323,.593050978 +.6969228361,.1233552637,.8044768028,.9683040591,-.2569374235,-.1472584514,-.6401863337 +-.1801207076,.857766396,.7536296918,2.2155688231,-.5419537683,-1.3212264122,1.7177127104 +.1161871872,2.3397462988,.3298093137,-.4642994589,-.3909181825,1.0821777677,1.3345964764 +.370945694,-.508932924,-.6492785208,-.1887505242,.8508370335,-1.2394488447,1.0919134375 +-1.0345832987,-.4084755903,-1.3596554376,.9500391524,-.8129111755,-.5177173994,.2708911585 +-1.5832468999,-.7034615575,.8141873311,-1.3812144331,-.8340205805,.0828035403,-1.6875977841 +1.4100272479,.3730738531,.1275274016,-.0228658495,1.7159176191,.1747042944,-.798996071 +-1.4929619868,-.7428788654,.8350006398,-1.1636970538,-.3545254377,.4530027337,-.5750727943 +.0819329765,1.4418390005,.7868215537,-1.116343713,-.7194750294,-.2609891328,-1.2463127197 +1.0025607948,.7383313328,.4254025736,.8683138046,.3773302769,.2936537558,.3030583632 +.6131367124,-.3455640575,-1.2110371542,-.6064586352,1.6440605588,.0907181482,-.9113613446 +.0456779795,-.145554973,.5091202443,-1.4078317727,-.2344027071,-.6338719288,.5270687694 +.5441831343,.2216937722,-.4050215986,.3490366783,.571212124,1.2551907682,.0678415722 +1.4419713291,-.3644568344,.7146520207,.4790155393,-.8331830004,-.8498481555,1.3093921196 +2.0596550945,.1402338416,2.1202202765,-1.2295339836,1.2001596615,1.4853189446,-.0490851867 +.417524391,-.2390612149,-.5172063037,-.280630596,-1.3643250491,.0699169035,2.0328707806 +-1.5315687572,1.0430203231,1.5177065062,-1.2538554624,.2120195296,-.9333536892,.6009157283 +1.2736291119,-.1549654046,.971695157,-.9021992023,.1403296643,-.8538013792,-1.3264323427 +-.6144365158,-.6289604403,-.1095430772,.6730553827,-1.1292615524,-.5008700294,-1.8394817169 +-1.1878238032,-1.9266813347,-.3907048321,.4950811098,-.3525165284,-.9909964697,.357321058 +-.1560292212,.0811688093,.4529592924,-.0272458046,-1.2287819285,-1.0362975382,1.0699919482 +-.2460102111,-.9747964458,-2.2524147824,-1.3051661675,.0999701091,-1.574628639,1.8901894839 +.6861327386,-.2412824231,1.8199578019,.9625014679,-.4886629301,.2703371004,-1.2893492148 +.5390212603,1.3122700521,.2580200217,.2176189348,.3361311315,.8886371847,-1.3062190564 +.9835142732,-1.2031463083,-.1038249973,-1.4388762323,.2627487073,1.6580919288,.5594576615 +.2379963943,-.4986522066,.4885952061,1.773744799,-.7541289699,-.2015350492,-.6521727299 +.2228986424,.1314449443,1.1965974502,-.5093501536,-.3842311495,-.2599817954,-.1291218352 +.319241335,.4438666123,-.0960008735,-.664846031,.1563991116,-.4236862985,-1.5392262874 +.7978865846,.9393978905,-1.8806204341,-.3677345242,.7291963162,-1.6555395538,.9169461935 +-.2724472956,1.0772577684,1.3566051199,-.1295984895,-.6564147179,2.1696696553,-.778903182 +-.0861829308,1.4629631055,.118807848,1.0122777867,-.3118065869,-.610494857,-.3460994402 +-.7170881968,.5182528436,-1.1913223593,1.3117966676,1.9440218365,-1.589082018,.4510101064 +1.2687263258,1.7814040238,-.8811338531,2.1922465953,.0543482457,.0150261222,1.5662743714 +.268237497,.6728945016,.9930683927,.0152073912,1.8850307705,-1.7508792975,-.5275343841 +.4075249958,-.924353841,1.1344670165,.774022686,-.0387094688,.9516969202,-.3061597965 +-.1449296414,1.0115032112,-.1468115812,.1247512722,.2995732535,.5978404723,-.5754586495 +.507074705,-.6656577337,-.5096106263,.5528745957,.8534661193,1.4004075434,-.9783615212 +.4374402207,-.1361848414,1.1009375586,-1.0616124691,-.7476474759,2.2456182971,2.5782747762 +1.394468173,-.5653705524,-.7524347205,-2.1765350396,.0119319561,-.8313901666,-.0690917235 +.4663483046,.200310881,.5463647822,-.5275133027,.5308284629,.8257797472,1.2668800953 +.8539463879,.5741216291,-.3055062337,.1633506484,-.8700257853,-.3424920221,.8866615682 +-.5089771638,.2289009234,.2685205543,.898200072,.9945171401,.2731141081,.8343735436 +-1.0680133507,-1.7799139829,.1637110817,.6664388169,1.1503965005,.0156012985,-1.0608916167 +-.0313935293,-.2327101881,-.5827105357,.1526615774,2.3157216379,.1460226188,1.4404435816 +.8343536682,1.3497273987,.8405852528,.0591148525,-.8528016984,.0523338029,.0210106994 +-1.1134985151,-2.8804214833,1.2348910738,-3.3069146705,.3953720698,.4016265604,-.9709149913 +-1.3694899012,2.20264542,1.0690921094,.5433222798,-.0635654242,-1.3903792238,.7229153606 +.734731375,.0495625548,1.5143581847,1.1734431867,-.2818347198,.7299813525,-.3921801379 +.6746832308,-1.2952283776,-.30495147,.6243476484,.963312229,.0980171325,-.6756262148 +.4197979019,1.0887001121,2.0706199261,-1.7581632688,-1.237291776,.8360996368,-1.4023083908 +.4084074994,-.132705026,.1324147898,1.5896023645,-.540585618,.5467842814,.2591003257 +-.4172398396,.6284990614,.1258120394,-.596720619,-.4585274172,-.2188164658,-.930564818 +-.0154448108,-.3823609599,.1554707439,-1.540089511,-.5054651342,1.8509460105,1.3356528496 +.5335413656,-.9070762631,.8987829733,.011524185,1.8093002526,.3198949874,.7088666322 +.4100283317,.4685686762,.9977959388,.2311031719,-.9923359939,1.9603366816,-.1403151074 +1.084317994,1.486862475,-1.0810706733,.6154185649,-1.2717996959,-1.6162798202,-1.1472916045 +-1.3592611938,-.8833219788,-.0117783384,-.6782892428,-.0316951849,-.7359930326,-1.3466389825 +1.1585513904,-.295091429,-.0142104147,.7872031598,-.638253411,.3865895716,2.3082787329 +-.5172952938,-.520699225,-.00980958,1.3921342471,-.4811578307,.36410669,-.7961512285 +-.0512646291,1.6616248713,-.0579982262,-1.1569034596,-.2912884541,-.603226958,-1.4870040257 +-.6586431275,-.1572138265,.5102461751,-1.4025625192,-.203770556,-2.0053947058,-.8289930405 +1.3218739154,.4294862572,-1.1454973447,.2788273284,-2.0139313082,-.2544634663,.5229644059 +-.0697202253,-1.4334572601,-.4223565145,.6672004554,.068963717,-1.8852580883,-.6194371348 +-1.0561218217,.1636760202,.2322119346,-1.1490330135,.704630673,.2810271001,1.3616404454 +-.7750194564,-1.6478133338,-.574496999,1.0318805965,-.3999642339,.9614076083,1.1850829236 +-.778703426,.2092661571,.0516565927,.826652366,-.1359551528,-.5611018691,-2.0566339846 +.4088240907,-1.6908307726,.1345584608,.1825099729,.924888879,1.6717618839,.5352044048 +-.5447847526,.2914472719,-.1008540027,1.1022655524,-2.2899096738,.136760134,1.0099356845 +1.2468019773,.7763790582,-1.2990607648,1.4168294062,-1.2196545386,.7963251099,-.055310745 +-.8592206964,-.4418888768,-.4716418722,1.3246237687,-.5736029398,-.1752364229,-1.4613158245 +.4923588108,.7019909487,.1423008416,-1.0890538231,2.4600455812,-.3074285914,1.0768473205 +-1.4277746428,.5966568184,-1.6898884021,.5133243963,.7552730672,-1.262162826,.0766307308 +.6644083796,.2775755508,.4598226849,-1.6693471555,-1.4661449174,-1.6515997731,-.1524056595 +1.4607632362,-1.0416645089,.7056076495,.2396619619,.6467229819,-.6306570064,-1.4011534819 +1.3990225242,-1.0914101412,-.1241646472,.1599466325,.4553449526,-.5876880878,1.7321624534 +-.6326775256,-2.1596623262,1.601318464,-.4882992466,1.064908646,-1.1214909366,.2941887812 +-1.6695212252,2.1880916782,.7132765095,-.0236322484,.2528994759,-.4389333899,1.0336444934 +.1568634678,.7833271535,.4791632617,-.4932607165,.717265715,.755864142,-.3386290528 +.2263099711,-.7039793141,.6421529597,-.7307870989,-.251335192,1.0018215943,-1.2687240607 +.6492665459,3.4429773379,.5826791035,-1.4668613151,-.6798193115,-.7255827089,2.0904473774 +.4693351687,-.6913060009,.202327575,.5971641558,-.2993759867,-.3096945958,-.9981257694 +1.9525454815,.9137923255,.9387761565,-1.4989513399,-.2322188442,-.0554549543,-.6686696304 +-.8561495621,.3640700718,.691422779,-1.579598368,.5217348666,-.8754201468,1.001698438 +-.0114157082,1.7160630659,2.120273072,.8533280789,.5032273572,.3682229057,-.8642533007 +-1.1614405523,1.2336259458,1.492228188,-1.0911626654,-1.8746118963,.8989533649,-.6692885382 +.954010746,-.6935878699,-1.8003523773,2.1986061558,-.7625097172,.1273209238,-.4201432291 +.4039369347,.4088607929,1.9674621501,.1413999036,-1.4047397226,-.450194361,.4099948477 +.5518686888,-.1223850076,-.8830116681,.1777198953,.9895337633,.6069834294,1.0924964278 +-.6782582231,.8554330198,.2682686108,.7336540489,1.9134491718,-.7862013775,1.0099730581 +-.4735870688,.8711312745,-1.5136489155,-.1321792466,.4540954543,-.389134508,-1.4101829039 +.2085303402,-.2441294092,.956401703,.6833784815,.2768095236,.8685515152,1.0383486176 +1.0845732897,-.1633944378,-.0167274616,-2.2417208005,.2216336527,.4444004436,-.8135901156 +-.491389203,-.5788364452,.5159964644,1.6411867718,.5606869503,.4254905386,-2.5949231036 +.3242506661,2.1999879416,.4654908286,-.2017393258,-.2709568404,-2.0721716987,-.7766643748 +.681452008,1.2504355703,.5027673483,1.6974521489,-.3325942821,1.0295433091,.6017924643 +1.1905274658,.3100141184,-.987163401,1.0179620306,-.1784092661,1.0086270108,-.700511357 +.4879227648,-.3984652943,.7979702584,-1.1343414986,1.3230129512,-.1174212712,.9753830726 +-.2169047798,-.5585185156,-1.8770160001,-.7697024656,2.1125229965,-.4889725697,.9900253455 +-1.4082344365,-.1436400377,-.125533253,1.2840230821,1.8621158641,-.1271448347,.1905092673 +-.5142455001,-1.4155688655,.0644480825,.4776857579,1.0352425357,-.0328911715,.5741869682 +-.8210906296,1.3258185915,-1.0421904509,1.0928948216,-2.063592694,.0067889162,-1.0922540315 +-.0042112986,1.5212785834,.068386297,-.1705105082,-.4232322588,-.5598737718,.4668098453 +.3170295039,1.3251842896,1.6088289375,.12787291,1.2731575985,.281781396,-1.0884196375 +-.7519272304,.2859688215,-.1930555167,-.1860040185,-1.2792520939,.0975294984,-1.1396368892 +-.1129397704,.2862503674,-1.1460369633,.7084428358,-.8053573681,-.3805739601,-.0411780612 +-.6294594386,-.3202434965,-1.0037490881,.0831886583,-.9714143381,-.4747225249,-1.0518667931 +.3702262564,.802184897,.8731858717,-1.7126157007,1.9391243018,-.0892093703,-1.0572736752 +-.1180961484,-.9849655098,.7044072564,-1.4083569793,.0412647107,.359364774,-.7892748909 +.1941892852,-1.7148833506,.5302946111,-1.0587172435,.9811344667,.2186677868,-.2457375498 +.9472799595,1.2998341378,1.9761287657,1.883003598,-.5705619396,.0654524381,-.2192958872 +3.2077999535,.8529192605,-1.2297077899,-.1399295764,2.3529696309,.3346236275,-1.4707878449 +-.7353087748,-.8783351848,.0252954639,-1.0906353591,-.1059236345,.6258236551,-.8542023764 +-.2289133843,-.6603038651,2.078501544,-.7352801492,.6978192969,.3441689476,-.3593219645 +-1.8543827334,1.5531022633,-1.5065309096,-1.8191755839,-1.0954815979,-2.0660322742,1.3057200565 +.7619731641,.8079392221,.3389069429,-1.7246734266,-.1670906816,-.0816265139,-.1599007581 +-1.1594447981,-.0954063342,.1482644651,-.2208578274,-.3247795479,.0540965098,-.5574712741 +-2.292038463,-1.4692212575,.2166723217,1.0809322684,-1.5017375973,-2.4172406351,-.7014383665 +.731404488,.3059957169,-.4905907083,.5268192847,-1.4062581647,1.0710202408,.6466047057 +-.5713750652,.9587692735,1.2753341359,-.5058572981,-1.3184968969,-.5836627673,.8869072765 +-.084257101,-.3937814125,1.5480427802,-.377038872,-.064817083,-.1806032718,-.6935370314 +.9846717505,-1.6842127647,-.7046377733,.8649309169,-.0003943229,1.0738003576,.6333046104 +-1.2306397825,.1645480983,1.7523954596,1.367425374,.5788000799,-.1929961388,.6906816496 +.0300329011,-.1911652042,-.0591941009,1.9198624433,1.2071709986,-2.5504110836,-.6379091159 +.571379135,-.5319530847,-.248896655,-1.5564162231,1.0040620752,-.4429475948,.404739036 +.4100001845,-.5152202658,-2.8805926877,-.7220296237,.3393860214,-.6252015761,.3308498762 +-.7668300058,-.6173025721,-.7065855193,.4730196045,.3991748732,.2084821396,.1731819727 +.0881348596,-.1809847028,-.7292842058,2.2161568,.2747798702,-.661767364,.4801931831 +-1.7722384906,.0633409681,.2544019943,1.0323363481,-2.1725425755,1.6939743705,-.3215497272 +-.3350169856,-.6199063571,-.4469087918,-.8570101016,1.2764179408,-1.6527674412,-.1888648172 +-.19203,-1.2866708603,.2494145834,.1038667537,.0020332973,1.5433402567,-.6046890719 +1.2081667632,-.7526358484,1.3734182367,1.8711738156,-.1881750467,-.9985894634,-.8456708204 +.1991734829,.9408357413,-.3899403068,.710089759,.8609911964,-.4779836746,.5418958074 +-1.3858559097,-.3547203033,.3622455304,1.8746320182,.3653660365,-.1039971088,.1832540228 +-.1854396899,1.2210054485,1.0319386283,.2780783656,-.0289538973,.2270426616,1.4148220319 +-1.0670691248,1.3826679647,1.6527496612,-2.142537032,-1.2758010485,-.0843159513,-.0336816647 +.0192505617,-1.3414245862,.1294963665,.2422010012,.7211054818,1.4005949821,-.4986029201 +-1.0824453181,.4830038691,-.1094539221,-.3865657436,.1474820192,-.2809414563,-1.0150206252 +.4079178293,1.1534543205,-.1730456703,.5763401167,.4395054721,.162112514,.4710133644 +1.1243116935,-.763448225,-1.2042143558,1.1769587211,-.3500816677,-1.1363265199,.57260544 +-.0252819728,-.6920595091,-.1955549992,-.8747706026,1.7732061531,-.8811660058,.7655149674 +.6983531325,-.1856091301,-1.4574788623,.6367756721,-.1388707487,.7014706651,-.7646879707 +-.6976254219,-.6106204141,.6703642764,-.0561726661,.2417319866,-.0437536422,-.9365405443 +-.4453968436,-.4301091135,-.3621669428,.8590752131,.039388143,1.029321021,-1.2178671693 +-.12638195,.5013522286,1.3373862997,.1365743648,.1972081583,-.4987141447,.960811178 +1.3049909906,-2.2713377765,-1.1618327542,1.0443273881,-.2547233917,1.6655303077,.5576522733 +-1.3513278624,-.6322564535,-.2957955972,-.581045833,.3246836773,.6387093313,-1.7769213153 +-.1202401933,.4278578805,-1.023083979,.4078169639,.0601302058,.7559467497,.8175485298 +.4267273211,1.4367122134,1.0432341972,-.3138921056,-.7599673508,-.549051955,-.551600892 +-.0741471804,.9170984414,-2.6192288437,-1.3398866743,-.1821893595,.5682169803,-.053110084 +-.3770938838,-.5204843537,-.0471122969,.0619913453,.9689332996,-.5973658043,1.5718886832 +-1.7909248448,-.9635632803,-.2448873844,.1771609192,.5342341,-1.5219556019,.0572041033 +-.6692477988,2.0887675785,-.6093081223,1.6430110261,1.5140517155,1.4867434758,.0689875505 +1.2861756337,.3179621771,.0554899243,-2.7692334675,-.2459633097,.1662423755,-.6388062748 +.3024668033,.7256221524,.1591825121,-.6969628398,1.1091484216,-.9467561058,.610064787 +1.7692699827,.1915548735,1.20852542,-1.8362145499,.4890205595,-.1801923935,.0861708256 +.0890708985,1.0216597578,.3961622425,-1.9743818973,-1.3163923624,.4689481401,.5782677162 +3.815963059,-.0752894152,1.2287448541,-.3440114817,-.6029366141,-1.7367049104,-1.3496721373 +.3631375355,-.2491885266,.1437700771,.4856358646,-.2488439085,-.3784902809,.8336242677 +.622501226,.4971064889,-1.3396217482,.3624738665,-1.2461734109,.3918441932,1.0252898448 +-.384596219,-.7704128231,-1.6508017095,-.7015751103,-.2360270478,-.0930945458,-.5729529874 +-.0585530699,.2541038933,-1.1066969692,.9272327844,1.5490150239,2.4735098596,-1.6029076101 +1.2772714331,-.9813073407,1.4705676681,-.0779630959,-.7001826568,-.0482123383,-1.0612975463 +.3743958896,-.3545484874,.4514333585,.7805394194,-.7433451782,1.4696886073,-.8384390414 +1.3506055132,.7785086375,-.5008645328,-.619246188,.1125500774,-.9563986196,-1.7477973337 +-1.4113819631,.4145546667,-.3156093775,-.4331701436,-1.3247824469,.8773893514,.6194421105 +-.1814377907,-.3355292473,.1111749703,.5157505566,-1.8453224628,-.5414020384,.0327166584 +1.0189383277,.3782516144,2.145891947,-.1247631457,.4541651692,-1.2735960767,.90230911 +-1.548565883,-.1816742061,.723721445,1.9515107107,-.9312626787,1.3800026599,-.0611850675 +-2.516747593,1.8408678615,-1.1848881228,-.7047368295,.4502403815,1.7114713567,.4699636389 +-.9619385941,-1.9304573946,.0920584536,-.0060535086,.1389797034,1.0202312106,-.4699242679 +-1.3191943477,-.9072612293,.9566225962,-1.2914446109,.0630544082,1.7232518559,-.0153563312 +-.2863297431,-.4604769381,-.7291209131,.1946300382,.6367783734,-.4849351157,-1.3197520216 +.1915810738,-.3816191326,.3915234439,1.0001236753,-.2211203261,-.4025398064,-1.2834030993 +.8678691267,1.2374017769,-.3679424196,.6032255686,-.4699801955,.0566290658,1.2132394249 +-1.7701013663,-.9429754481,.0669344712,-2.3128676813,1.3929843559,-.6946437928,-.2094534553 +-.083474344,1.6286473509,-1.0892697323,-.1361499359,-1.4564585517,.8589225518,.5925404737 +.0847548062,.2919648728,.2247996922,-.9679970885,.8169946931,1.0095737692,-.2763810279 +-.2916160292,1.3249198061,.0884881535,-.6665811329,-.6287156591,-1.3472261474,.2827513389 +-.467243307,-1.641759369,.4641704229,-.0289161114,.3294422979,-.9064240659,-.4914033299 diff --git a/src/test/ut/db4ai/data/rain.txt b/src/test/ut/db4ai/data/rain.txt new file mode 100644 index 000000000..22a8cdcef --- /dev/null +++ b/src/test/ut/db4ai/data/rain.txt @@ -0,0 +1,20 @@ +1,Bendigo,7.6,14.3,9,69,24,28,75,72,1010.8,1013.2,7,5,9.4,12.2,1,0 +2,Nuriootpa,8.6,14.3,5.6,56,30,31,72,49,1015.2,1016.9,7,6,10.9,13.6,1,0 +3,Tuggera0ng,11.6,22.7,2.6,28,0,13,99,67,1016.4,1014.1,0,0,15.9,21.7,1,1 +4,Witchcliffe,11.1,28.2,0,43,17,26,64,46,1020.7,1016,0,0,17.9,26.2,0,0 +5,Brisbane,16.7,26.6,0,24,0,6,75,62,1021.6,1016.8,4,3,21.9,23.4,0,0 +6,0rahHead,19.4,26.7,0.2,56,15,26,80,71,1012.7,1010.7,0,0,21.9,24,0,1 +7,MelbourneAirport,10.2,21.2,0,35,9,15,73,46,1024.2,1020.1,8,2,13.7,20.8,0,0 +8,BadgerysCreek,16.2,25,23.4,48,19,28,78,56,1018.3,1018.1,0,0,21.3,23.2,1,0 +9,Melbourne,18.2,21.3,0,31,11,22,83,76,1011.9,1010.4,8,7,18.9,20.8,0,1 +10,Woomera,9.7,22,0,33,13,17,61,31,1023.8,1020.5,1,7,14,20.9,0,0 +11,Melbourne,0,0,0,33,20,17,0,0,0,0,0,0,0,0,0,0 +12,MountGinini,5,21,0,0,20,0,48,0,0,0,0,0,11.6,0,0,0 +13,Tuggera0ng,10,22.3,0,35,9,11,70,38,1018.4,1014.5,0,0,13.1,21,0,0 +14,Woomera,21.3,29.8,0,50,17,31,52,38,1006.4,1004.9,7,7,23.8,27.9,0,0 +15,WaggaWagga,7.9,13.8,1.4,48,17,22,90,44,1014.8,1011.6,7,6,8.7,13,1,0 +16,Moree,13.8,28.1,0,0,30,15,55,31,1020.6,1015.2,7,1,19.4,27.2,0,0 +17,Woomera,10,16.6,0,39,9,24,58,59,1018.2,1017.6,7,4,12,14.1,0,0 +18,Woomera,8.5,24,0,31,15,9,48,20,1025.7,1021.1,3,3,15.3,23,0,0 +19,Albany,12.5,25,0,0,9,0,49,0,1017.8,1015.3,0,0,21.5,0,0,0 +20,Cairns,25.2,33.9,0,43,17,31,70,50,1009.4,1006,7,7,29.3,32.3,0,1 diff --git a/src/test/ut/db4ai/direct_ml/CMakeLists.txt b/src/test/ut/db4ai/direct_ml/CMakeLists.txt new file mode 100644 index 000000000..e260f0df2 --- /dev/null +++ b/src/test/ut/db4ai/direct_ml/CMakeLists.txt @@ -0,0 +1,10 @@ +set(TGT_ut_db4ai_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/gd_direct.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/kmeans_direct.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/xgboost_direct.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/svm_direct.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/pg_mock.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/readers.cpp +) +add_library(ut_direct_ml_inner STATIC ${TGT_ut_db4ai_SRC}) +target_link_libraries(ut_direct_ml_inner ${UNIT_TEST_BASE_LIB_LIST} ${UNIT_TEST_ut_direct_ml_LIB_LIST}) diff --git a/src/test/ut/db4ai/direct_ml/Makefile b/src/test/ut/db4ai/direct_ml/Makefile new file mode 100644 index 000000000..56d969f45 --- /dev/null +++ b/src/test/ut/db4ai/direct_ml/Makefile @@ -0,0 +1,32 @@ +#--------------------------------------------------------------------------------------- +# +# IDENTIFICATION +# src/test/db4ai +# +# --------------------------------------------------------------------------------------- + + +subdir = src/test/db4ai +top_builddir = ../../.. + +include $(top_builddir)/src/Makefile.global + +PLATFORM_ARCH = $(shell uname -p) +ifeq ($(PLATFORM_ARCH),x86_64) + override CPPFLAGS += -mavx +endif + +ifneq "$(MAKECMDGOALS)" "clean" + ifneq "$(MAKECMDGOALS)" "distclean" + ifneq "$(shell which g++ |grep hutaf_llt |wc -l)" "1" + -include $(DEPEND) + endif + endif +endif + +OBJS = main.o kernels.o pg_mock.o readers.o db4ai_gd_direct.o db4ai_kmeans_direct.o db4ai_xgboost_direct.o + +include $(top_srcdir)/src/gausskernel/common.mk + + + diff --git a/src/test/ut/db4ai/direct_ml/direct_algos.h b/src/test/ut/db4ai/direct_ml/direct_algos.h new file mode 100644 index 000000000..b74d0be66 --- /dev/null +++ b/src/test/ut/db4ai/direct_ml/direct_algos.h @@ -0,0 +1,138 @@ +/** +Copyright (c) 2021 Huawei Technologies Co.,Ltd. +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +--------------------------------------------------------------------------------------- + * + * direct_algos.h + * Declaration of the currently implemented DB4AI algorithms using the direct API + * + * + * IDENTIFICATION + * src/test/ut/db4ai/direct_ml/direct_algos.h + * + * --------------------------------------------------------------------------------------- +**/ + +#ifndef DB4AI_DIRECT_ALGOS_H +#define DB4AI_DIRECT_ALGOS_H + +#include "mock.h" + +/* + * template for your next wonderful algorithm + * + * class AmazingDB4AIAlgo final : public ModelCRTP { + * private: + * friend ModelCRTP; + * + * using ModelCRTP::ModelCRTP; + * + * void do_init(const char *datapath); + * void do_end(); + * bool do_train(char const *model_name, Hyperparameters const *hp); + * bool do_predict(); + * }; + * + */ + +/* + * logistic regression + */ +class LogisticRegressionDirectAPI final : public ModelCRTP { +private: + // this friendship is enough for our purpose + friend ModelCRTP; + + // inheriting constructors of the base class + using ModelCRTP::ModelCRTP; + + bool do_train(char const *model_name, Hyperparameters const *hp, Descriptor *td, Reader *reader); + bool do_predict(Reader *reader); +}; + +/* + * svm classifiaction + */ +class SvmcDirectAPI final : public ModelCRTP { +private: + // this friendship is enough for our purpose + friend ModelCRTP; + + // inheriting constructors of the base class + using ModelCRTP::ModelCRTP; + + bool do_train(char const *model_name, Hyperparameters const *hp, Descriptor *td, Reader *reader); + bool do_predict(Reader *reader); +}; + +/* + * multiclass + */ +class MulticlassDirectAPI final : public ModelCRTP { +private: + // this friendship is enough for our purpose + friend ModelCRTP; + + // inheriting constructors of the base class + using ModelCRTP::ModelCRTP; + + bool do_train(char const *model_name, Hyperparameters const *hp, Descriptor *td, Reader *reader); + bool do_predict(Reader *reader); +}; + +/* + * principal components + */ +class PrincipalComponentsDirectAPI final : public ModelCRTP { +private: + // this friendship is enough for our purpose + friend ModelCRTP; + + // inheriting constructors of the base class + using ModelCRTP::ModelCRTP; + + bool do_train(char const *model_name, Hyperparameters const *hp, Descriptor *td, Reader *reader); + bool do_predict(Reader *reader); +}; + +/* + * k-means + */ +class KMeansDirectAPI final : public ModelCRTP { +private: + // this friendship is enough for our purpose + friend ModelCRTP; + + // inheriting constructors of the base class + using ModelCRTP::ModelCRTP; + + bool do_train(char const *model_name, Hyperparameters const *hp, Descriptor *td, Reader *reader); + bool do_predict(Reader *reader); +}; + + +/* + * xgboost + */ +class XGBoostDirectAPI final : public ModelCRTP { +private: + // this friendship is enough for our purpose + friend ModelCRTP; + + // inheriting constructors of the base class + using ModelCRTP::ModelCRTP; + + bool do_train(char const *model_name, Hyperparameters const *hp, Descriptor *td, Reader *reader); + bool do_predict(Reader *reader); +}; + +#endif //DB4AI_DIRECT_ALGOS_H diff --git a/src/test/ut/db4ai/direct_ml/gd_direct.cpp b/src/test/ut/db4ai/direct_ml/gd_direct.cpp new file mode 100644 index 000000000..c6396ea7c --- /dev/null +++ b/src/test/ut/db4ai/direct_ml/gd_direct.cpp @@ -0,0 +1,173 @@ +/** +Copyright (c) 2021 Huawei Technologies Co.,Ltd. +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +--------------------------------------------------------------------------------------- + * + * gd_direct.cpp + * Implementation of GD-based algorithms using DB4AI's direct API + * + * + * IDENTIFICATION + * src/test/ut/db4ai/direct_ml/gd_direct.cpp + * + * --------------------------------------------------------------------------------------- +**/ +#include "direct_algos.h" + +/* + * implementation of logistic regression + */ + +bool LogisticRegressionDirectAPI::do_train(char const *model_name, Hyperparameters const *hp, + Descriptor *td, Reader *reader) +{ + model = model_fit(model_name, LOGISTIC_REGRESSION, hp->hps, hp->count, + td->coltypes, td->colbyvals, td->coltyplen, + td->count, &LogisticRegressionDirectAPI::fetch, + &LogisticRegressionDirectAPI::rescan, reader); + if (model->status != ERRCODE_SUCCESSFUL_COMPLETION) + elog(ERROR, "could not train model, error %d\n", model->status); + + return true; +} + +bool LogisticRegressionDirectAPI::do_predict(Reader *reader) +{ + int row = 0; + ModelPredictor pred = model_prepare_predict(model); + const ModelTuple *rtuple; + while ((rtuple = reader->fetch()) != nullptr) { + bool isnull = rtuple->isnull[1]; + if (!isnull) { + bool target = DatumGetBool(rtuple->values[0]); + Datum dt = model_predict(pred, &rtuple->values[1], &rtuple->isnull[1], &rtuple->typid[1], 1); + printf(" - %d %d %d\n", ++row, target, DatumGetBool(dt)); + } + } + + return true; +} + +/* + * implementation of SVM classification + */ + +bool SvmcDirectAPI::do_train(char const *model_name, Hyperparameters const *hp, + Descriptor *td, Reader *reader) +{ + model = model_fit(model_name, SVM_CLASSIFICATION, hp->hps, hp->count, + td->coltypes, td->colbyvals, td->coltyplen, + td->count, &SvmcDirectAPI::fetch, + &SvmcDirectAPI::rescan, reader); + if (model->status != ERRCODE_SUCCESSFUL_COMPLETION) + elog(ERROR, "could not train model, error %d\n", model->status); + + double accuracy = lfirst_node(TrainingScore, list_head(model->scores))->value; + printf(" - train accuracy=%.3f iterations=%d", accuracy, model->num_actual_iterations); + + return true; +} + +bool SvmcDirectAPI::do_predict(Reader *reader) +{ + int row = 0; + int hit = 0; + ModelPredictor pred = model_prepare_predict(model); + const ModelTuple *rtuple; + while ((rtuple = reader->fetch()) != nullptr) { + bool isnull = rtuple->isnull[1]; + if (!isnull) { + bool target = DatumGetInt32(rtuple->values[0]); + Datum dt = model_predict(pred, &rtuple->values[1], &rtuple->isnull[1], &rtuple->typid[1], 2); + if (target == DatumGetInt32(dt)) + hit++; + row++; + } + } + printf(" - predict %d: accuracy=%.3f\n", row, (double)hit / row); + return true; +} + +/* + * implementation of multiclass + */ + +bool MulticlassDirectAPI::do_train(char const *model_name, Hyperparameters const *hp, + Descriptor *td, Reader *reader) +{ + model = model_fit(model_name, MULTICLASS, hp->hps, hp->count, + td->coltypes, td->colbyvals, td->coltyplen, + td->count, &MulticlassDirectAPI::fetch, + &MulticlassDirectAPI::rescan, reader); + if (model->status != ERRCODE_SUCCESSFUL_COMPLETION) + elog(ERROR, "could not train model, error %d\n", model->status); + + return true; +} + +bool MulticlassDirectAPI::do_predict(Reader *reader) +{ + int rows = 0; + int hit = 0; + ModelPredictor pred = model_prepare_predict(model); + const ModelTuple *rtuple; + while ((rtuple = reader->fetch()) != nullptr) { + bool isnull = rtuple->isnull[1]; + if (!isnull) { + Datum dt = model_predict(pred, &rtuple->values[1], &rtuple->isnull[1], &rtuple->typid[1], rtuple->ncolumns-1); + + char* target = TextDatumGetCString(rtuple->values[0]); + char* pred = TextDatumGetCString(dt); + if ((rows++ % 5) == 0) + printf(" - %d <%s> <%s>\n", rows, target, pred); + + if (strcmp(target, pred) == 0) + hit++; + } + } + printf("accuracy = %.5f\n", hit / (double)rows); + return true; +} + +/* + * implementation of principal components + */ + +bool PrincipalComponentsDirectAPI::do_train(char const *model_name, Hyperparameters const *hp, + Descriptor *td, Reader *reader) +{ + model = model_fit(model_name, PCA, hp->hps, hp->count, + td->coltypes, td->colbyvals, td->coltyplen, + td->count, &PrincipalComponentsDirectAPI::fetch, + &PrincipalComponentsDirectAPI::rescan, reader); + if (model->status != ERRCODE_SUCCESSFUL_COMPLETION) + elog(ERROR, "could not train model, error %d\n", model->status); + + return true; +} + +bool PrincipalComponentsDirectAPI::do_predict(Reader *reader) +{ + ModelPredictor pred = model_prepare_predict(model); + int r = 0; + const ModelTuple *rtuple; + while ((rtuple = reader->fetch()) != nullptr) { + Datum value = rtuple->values[0]; + bool isnull = false; + Oid type = FLOAT8ARRAYOID; + Datum dt = model_predict(pred, &value, &isnull, &type, 1); + printf(" - point %d: %s\n", ++r, + DatumGetCString(OidFunctionCall2Coll(F_ARRAY_OUT, InvalidOid, dt, FLOAT8ARRAYOID))); + } + + return true; +} \ No newline at end of file diff --git a/src/test/ut/db4ai/direct_ml/kmeans_direct.cpp b/src/test/ut/db4ai/direct_ml/kmeans_direct.cpp new file mode 100644 index 000000000..200cdf19c --- /dev/null +++ b/src/test/ut/db4ai/direct_ml/kmeans_direct.cpp @@ -0,0 +1,55 @@ +/** +Copyright (c) 2021 Huawei Technologies Co.,Ltd. +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +--------------------------------------------------------------------------------------- + * + * kmeans_direct.cpp + * Implementation of k-means using DB4AI's direct API + * + * + * IDENTIFICATION + * src/test/ut/db4ai/direct_ml/kmeans_direct.cpp + * + * --------------------------------------------------------------------------------------- +**/ + +#include "direct_algos.h" + +extern ArrayType *construct_empty_md_array(uint32_t const num_centroids, uint32_t const dimension); + +bool KMeansDirectAPI::do_train(char const *model_name, Hyperparameters const *hp, + Descriptor *td, Reader *reader) +{ + model = model_fit(model_name, KMEANS, hp->hps, hp->count, + td->coltypes, td->colbyvals, td->coltyplen, + td->count, &KMeansDirectAPI::fetch, + &KMeansDirectAPI::rescan, reader); + if (model->status != ERRCODE_SUCCESSFUL_COMPLETION) + elog(ERROR, "could not train model, error %d\n", model->status); + + return true; +} + +bool KMeansDirectAPI::do_predict(Reader *reader) +{ + ModelPredictor pred = model_prepare_predict(model); + int r = 0; + const ModelTuple *rtuple; + while ((rtuple = reader->fetch()) != nullptr) { + Datum value = rtuple->values[0]; + bool isnull = false; + Oid type = FLOAT8ARRAYOID; + Datum dt = model_predict(pred, &value, &isnull, &type, 1); + printf(" - point %d: %d\n", ++r, DatumGetInt32(dt)); + } + return true; +} \ No newline at end of file diff --git a/src/test/ut/db4ai/direct_ml/mock.h b/src/test/ut/db4ai/direct_ml/mock.h new file mode 100644 index 000000000..cc9cb756f --- /dev/null +++ b/src/test/ut/db4ai/direct_ml/mock.h @@ -0,0 +1,232 @@ +/** +Copyright (c) 2021 Huawei Technologies Co.,Ltd. +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +--------------------------------------------------------------------------------------- + * + * mock.h + * Header file of easy to test + * + * + * IDENTIFICATION + * src/test/ut/db4ai/direct_ml/mock.h + * + * --------------------------------------------------------------------------------------- +**/ + +#ifndef DB4AI_DB4AI_MOCK_H +#define DB4AI_DB4AI_MOCK_H + +#include +#include +#include +#include +#include +#include "readers.h" + +#define TEST_MAX_HYPERPARAMETERS (30) +#define TEST_MAX_ATTRIBUTES (1000) + +void pg_mock_init(int log_level, int working_mem_mb); + +uint64_t get_clock_usecs(); + +Datum OidFunctionCall2Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2); + +struct Hyperparameters { + int count = 0; + Hyperparameter hps[TEST_MAX_HYPERPARAMETERS]; + + Hyperparameters() : count{0} + { + memset_s(hps, sizeof(hps), 0, sizeof(hps)); + } + + void add(const char *name, Oid type, Datum value) + { + Assert(count < TEST_MAX_HYPERPARAMETERS); + hps[count].name = name; + hps[count].type = type; + hps[count].value = value; + count++; + } + + void add_bool(const char *name, bool value) + { + add(name, BOOLOID, BoolGetDatum(value)); + } + + void add_int(const char *name, int value) + { + add(name, INT4OID, Int32GetDatum(value)); + } + + void add_float(const char *name, double value) + { + add(name, FLOAT8OID, Float8GetDatum(value)); + } + + void add_enum(const char *name, const char *value) + { + add(name, ANYENUMOID, CStringGetDatum(value)); + } + + void reset() + { + memset_s(this, sizeof(Hyperparameters), 0, sizeof(Hyperparameters)); + } +}; + +struct Descriptor { + int count = 0; + Oid coltypes[TEST_MAX_ATTRIBUTES]; + bool colbyvals[TEST_MAX_ATTRIBUTES]; + int16 coltyplen[TEST_MAX_ATTRIBUTES]; + + Descriptor() : count{0} + { + memset_s(coltypes, sizeof(coltypes), 0, sizeof(coltypes)); + memset_s(colbyvals, sizeof(colbyvals), 0, sizeof(colbyvals)); + memset_s(coltyplen, sizeof(coltyplen), 0, sizeof(coltyplen)); + } + void reset() + { + memset_s(this, sizeof(Descriptor), 0, sizeof(Descriptor)); + } +}; + +/* + * this class provides an interface to any DB4AI algorithm implemented using the + * direct API. any method of this interface not implemented will hinder compilation + */ +template +class ModelCRTP { +protected: + const Model *model; + + static bool fetch(void *callback_data, ModelTuple *tuple) { + auto reader = reinterpret_cast(callback_data); + const ModelTuple *rtuple = reader->fetch(); + if (rtuple == nullptr) + return false; + + tuple->values = rtuple->values; + tuple->isnull = rtuple->isnull; + tuple->typid = rtuple->typid; + tuple->typbyval = rtuple->typbyval; + tuple->typlen = rtuple->typlen; + tuple->ncolumns = rtuple->ncolumns; + return true; + } + + static void rescan(void *callback_data) { + auto reader = reinterpret_cast(callback_data); + reader->rescan(); + } + +public: + ModelCRTP() : model{nullptr} + {} + + // no interest in these + ModelCRTP(ModelCRTP const&) = delete; + + ModelCRTP(ModelCRTP const&&) = delete; + + ModelCRTP operator=(ModelCRTP const&) = delete; + + ModelCRTP operator=(ModelCRTP const&&) = delete; + + void set_model(const Model *model) + { + this->model = model; + } + + Model const *get_model() + { + return model; + } + + bool train(char const *name, Hyperparameters const *hps, Descriptor *td, Reader *reader) + { + return static_cast(this)->do_train(name, hps, td, reader); + } + + bool predict(Reader *reader) + { + return static_cast(this)->do_predict(reader); + } + +}; + +typedef struct Execution_Times { + double training_time_s = 0.; + double prediction_time_s = 0.; +} Execution_Times; + +/* + * this class can be used in the main to run everything: train, explain, predict + * see main.cpp for an example + */ +template +class Tester final { + A algo; + +public: + Tester() = default; + + // no interest in these + Tester(Tester const&) = delete; + + Tester(Tester const&&) = delete; + + Tester operator=(Tester const&) = delete; + + Tester operator=(Tester const&&) = delete; + + Execution_Times const run(Hyperparameters const *hps, Descriptor *td, char const *name, + ExplainFormat const format, Reader *rd_train, Reader *rd_test = nullptr) + { + Execution_Times times; + printf("TRAIN\n"); + uint64_t start = get_clock_usecs(); + algo.train(name, hps, td, rd_train); + uint64_t end = get_clock_usecs(); + times.training_time_s = static_cast((end - start)) / 1000000.0; + printf("\nTrain done in %.6f secs\n\n", times.training_time_s); + + // printf("DB4AI MODEL\n-----\n%s\n-------\n\n", algo.explain(format)); + + Model const *model = algo.get_model(); + model_store(model); + printf("Model stored\n"); + + model = model_load(model->model_name); + + algo.set_model(model); + + if (rd_test == nullptr) + rd_test = rd_train; + + if (rd_test == rd_train) + rd_test->rescan(); + + printf("PREDICT\n"); + start = get_clock_usecs(); + algo.predict(rd_test); + end = get_clock_usecs(); + times.prediction_time_s = static_cast((end - start)) / 1000000.0; + printf("\nPrediction done in %.6f secs\n", times.prediction_time_s); + return times; + } +}; + +#endif //DB4AI_DB4AI_MOCK_H diff --git a/src/test/ut/db4ai/direct_ml/pg_mock.cpp b/src/test/ut/db4ai/direct_ml/pg_mock.cpp new file mode 100644 index 000000000..9eb72fe44 --- /dev/null +++ b/src/test/ut/db4ai/direct_ml/pg_mock.cpp @@ -0,0 +1,506 @@ +/** +Copyright (c) 2021 Huawei Technologies Co.,Ltd. +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +--------------------------------------------------------------------------------------- + * + * pg_mock.h + * Easy to test + * + * + * IDENTIFICATION + * src/test/ut/db4ai/direct_ml/pg_mock.h + * + * --------------------------------------------------------------------------------------- +**/ +#include "postgres.h" +#include "utils/memutils.h" +#include "utils/guc.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/fmgroids.h" +#include "cjson/cJSON.h" +#include "db4ai/model_warehouse.h" +#include "db4ai/aifuncs.h" + +extern Datum int8out(PG_FUNCTION_ARGS); + +static Datum mock_array_out(PG_FUNCTION_ARGS) +{ + MemoryContext old_cxt; + MemoryContext cxt = NULL; + bool new_flinfo = (fcinfo->flinfo == NULL); + if (new_flinfo) { + cxt = AllocSetContextCreate(CurrentMemoryContext, "mock_array_out", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + old_cxt = MemoryContextSwitchTo(cxt); + fcinfo->flinfo = (FmgrInfo *)palloc0(sizeof(FmgrInfo)); + fcinfo->flinfo->fn_mcxt = cxt; + MemoryContextSwitchTo(old_cxt); + } + + Datum dt = array_out(fcinfo); + + if (new_flinfo) + MemoryContextDelete(cxt); + + return dt; +} + +enum OidTypeMap { + OIDTYPEMAP_BOOLOID, + OIDTYPEMAP_INT1OID, + OIDTYPEMAP_INT2OID, + OIDTYPEMAP_INT4OID, + OIDTYPEMAP_INT8OID, + OIDTYPEMAP_FLOAT4OID, + OIDTYPEMAP_FLOAT8OID, + OIDTYPEMAP_CSTRINGOID, + OIDTYPEMAP_TEXTOID, + OIDTYPEMAP_VARCHAROID, + OIDTYPEMAP_FLOAT8ARRAYOID, + OIDTYPEMAP_BOOLARRAYOID, + OIDTYPEMAP_TEXTARRAYOID, + OIDTYPEMAP_INT4ARRAYOID, + OIDTYPEMAP_MAPPINGS_ +}; + +// keep same order as enum +struct OidTypeInfo { + bool typbyval; + int typlen; + char typalign; + int arrtyp; + int elemtyp; + PGFunction outfunc; +} oid_types[OIDTYPEMAP_MAPPINGS_] = { + {true, 1, 'c', BOOLARRAYOID, 0, boolout}, + {true, 1, 'c', INT1ARRAYOID, 0, int1out}, + {true, 2, 's', INT2ARRAYOID, 0, int2out}, + {true, 4, 'i', INT4ARRAYOID, 0, int4out}, + {true, 8, 'd', INT8ARRAYOID, 0, int8out}, + {true, 4, 'i', FLOAT4ARRAYOID, 0, float4out}, + {true, 8, 'd', FLOAT8ARRAYOID, 0, float8out}, + {false, -2, 'c', CSTRINGARRAYOID, 0, cstring_out}, + {false, -1, 'i', TEXTARRAYOID, 0, textout}, + {false, -1, 'i', VARCHARARRAYOID, 0, varcharout}, + // arrays + {false, -1, 'd', FLOAT8ARRAYOID, FLOAT8OID, mock_array_out}, + {false, -1, 'i', BOOLARRAYOID, BOOLOID, mock_array_out}, + {false, -1, 'i', TEXTARRAYOID, TEXTOID, mock_array_out}, + {false, -1, 'i', INT4ARRAYOID, INT4OID, mock_array_out}, +}; + +static OidTypeInfo *get_type_map(Oid typid) +{ + int map = -1; + switch (typid) { + case BOOLOID: + map = OIDTYPEMAP_BOOLOID; + break; + case INT1OID: + map = OIDTYPEMAP_INT1OID; + break; + case INT2OID: + map = OIDTYPEMAP_INT2OID; + break; + case INT4OID: + map = OIDTYPEMAP_INT4OID; + break; + case INT8OID: + map = OIDTYPEMAP_INT8OID; + break; + case FLOAT4OID: + map = OIDTYPEMAP_FLOAT4OID; + break; + case FLOAT8OID: + map = OIDTYPEMAP_FLOAT8OID; + break; + case CSTRINGOID: + map = OIDTYPEMAP_CSTRINGOID; + break; + case TEXTOID: + map = OIDTYPEMAP_TEXTOID; + break; + case VARCHAROID: + map = OIDTYPEMAP_VARCHAROID; + break; + case FLOAT8ARRAYOID: + map = OIDTYPEMAP_FLOAT8ARRAYOID; + break; + case BOOLARRAYOID: + map = OIDTYPEMAP_BOOLARRAYOID; + break; + case TEXTARRAYOID: + map = OIDTYPEMAP_TEXTARRAYOID; + break; + case INT4ARRAYOID: + map = OIDTYPEMAP_INT4ARRAYOID; + break; + default: + Assert(false); + elog(FATAL, "get_type_map %d NOT IMPLEMENTED", typid); + } + + return &oid_types[map]; +} + +static PGFunction mock_get_output_func(Oid functionId) +{ + return get_type_map(functionId)->outfunc; +} + +// overwrite +void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt) +{ + // fmgr_info_cxt_security(functionId, finfo, mcxt, false) + finfo->fn_oid = functionId; + finfo->fn_extra = NULL; + finfo->fn_mcxt = mcxt; + finfo->fn_expr = NULL; /* caller may set this later */ + finfo->fn_fenced = false; + finfo->fnLibPath = NULL; + + finfo->fn_addr = mock_get_output_func(functionId); +} + +// overwrite +void get_type_io_data(Oid typid, IOFuncSelector which_func, int16 *typlen, bool *typbyval, char *typalign, + char *typdelim, Oid *typioparam, Oid *func) +{ + Assert(which_func == IOFunc_output); + get_typlenbyvalalign(typid, typlen, typbyval, typalign); + *typdelim = ','; + *typioparam = 0; // TODO + *func = typid; +} + +// overwrite +char *OidOutputFunctionCall(Oid functionId, Datum val) +{ + PGFunction func = mock_get_output_func(functionId); + return DatumGetCString(DirectFunctionCall1(func, val)); +} + +// overwrite +Oid get_element_type(Oid typid) +{ + return get_type_map(typid)->elemtyp; +} + +// overwrite +Oid get_array_type(Oid typid) +{ + return get_type_map(typid)->arrtyp; +} + +// overwrite +void getTypeOutputInfo(Oid type, Oid *typOutput, bool *typIsVarlena) +{ + *typOutput = type; + *typIsVarlena = !get_type_map(type)->typbyval; +} + +// overwrite +void get_typlenbyvalalign(Oid typid, int16 *typlen, bool *typbyval, char *typalign) +{ + OidTypeInfo *typinfo = get_type_map(typid); + *typbyval = typinfo->typbyval; + *typlen = typinfo->typlen; + *typalign = typinfo->typalign; +} + +Oid get_base_element_type(Oid typid) +{ + return get_element_type(getBaseType(typid)); +} + +Oid getBaseType(Oid typid) +{ + // default interface + // int32 typmod = -1; + // return getBaseTypeAndTypmod(typid, &typmod); + + OidTypeInfo *typinfo = get_type_map(typid); + // just to check if it is supported + (void)typinfo; + return typid; +} + +Datum OidFunctionCall2Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2) +{ + if (functionId != F_ARRAY_OUT || collation != InvalidOid) + elog(FATAL, "OidFunctionCall2Coll %d:%d NOT IMPLEMENTED", functionId, collation); + + PGFunction func = mock_get_output_func(DatumGetInt32(arg2)); + return DirectFunctionCall1(func, arg1); +} + +//////////////////////////////////////////////////////////////// + +void model_store(const Model *model) { + char filename[256]; + strcpy(filename, model->model_name); + strcat(filename, ".json"); + + FILE *fp = fopen(filename, "w"); + if (fp == NULL) + elog(FATAL, "cannot save model '%s'", filename); + + cJSON *doc = cJSON_CreateObject(); + cJSON_AddStringToObject(doc, "name", model->model_name); + cJSON_AddNumberToObject(doc, "owner", -1); + cJSON_AddStringToObject(doc, "algorithm", algorithm_ml_to_string(model->algorithm)); + cJSON_AddStringToObject(doc, "create_time", DatumGetCString(DirectFunctionCall1(timestamptz_out, GetCurrentTimestamp()))); + cJSON_AddNumberToObject(doc, "processed_tuples", model->processed_tuples); + cJSON_AddNumberToObject(doc, "discarded_tuples", model->discarded_tuples); + cJSON_AddNumberToObject(doc, "preprocess_time", model->pre_time_secs); + cJSON_AddNumberToObject(doc, "exec_time", model->exec_time_secs); + cJSON_AddNumberToObject(doc, "return_type", model->return_type); + + if (model->hyperparameters != nullptr) { + cJSON *hyperps = cJSON_AddObjectToObject(doc, "hyperparameters"); + foreach_cell(it, model->hyperparameters) + { + Hyperparameter *cell = (Hyperparameter *)lfirst(it); + cJSON *hyperp = cJSON_AddObjectToObject(hyperps, cell->name); + switch (cell->type) { + case BOOLOID: + cJSON_AddBoolToObject(hyperp, "value", DatumGetBool(cell->value)); + break; + case INT4OID: + cJSON_AddNumberToObject(hyperp, "value", DatumGetInt32(cell->value)); + break; + case FLOAT8OID: + cJSON_AddNumberToObject(hyperp, "value", DatumGetFloat8(cell->value)); + break; + case VARCHAROID: + cJSON_AddStringToObject(hyperp, "value", text_to_cstring(DatumGetVarCharP(cell->value))); + break; + case CSTRINGOID: + cJSON_AddStringToObject(hyperp, "value", DatumGetCString(cell->value)); + break; + default: + printf("unsupported cell type %d\n", cell->type); + Assert(false); + } + cJSON_AddNumberToObject(hyperp, "type", cell->type); + } + } + + if (model->scores != nullptr) { + cJSON *scores = cJSON_AddObjectToObject(doc, "scores"); + foreach_cell(it, model->scores) + { + TrainingScore *cell = (TrainingScore *)lfirst(it); + cJSON_AddNumberToObject(scores, cell->name, cell->value); + } + } + + if (model->data.version != DB4AI_MODEL_UNDEFINED) { + if (model->data.version >= DB4AI_MODEL_INVALID) + ereport(ERROR, (errmodule(MOD_DB4AI), errcode(ERRCODE_INVALID_STATUS), + errmsg("Invalid model version %d", model->data.version))); + + cJSON_AddNumberToObject(doc, "version", model->data.version); + + char *buff = (char*)palloc(model->data.size * 2 + 1); + char *dst = buff; + uint8_t* src = (uint8_t*)model->data.raw_data; + for (size_t i=0 ; idata.size ; i++) { + sprintf(dst, "%02X", *src++); + dst += 2; + } + *dst = 0; + cJSON_AddStringToObject(doc, "data", buff); + pfree(buff); + } + + fputs(cJSON_Print(doc), fp); + fclose(fp); +} + +const Model *model_load(const char *model_name) { + char filename[256]; + strcpy(filename, model_name); + strcat(filename, ".json"); + + FILE *fp = fopen(filename, "rb"); + if (fp == NULL) + elog(FATAL, "file '%s' does not exists", filename); + + fseek(fp, 0L, SEEK_END); + size_t len = ftell(fp); + fseek(fp, 0L, SEEK_SET); + + char *buffer = (char*)palloc(len + 1); + if (fread(buffer, 1, len, fp) != len) + elog(FATAL, "cannot read model from '%s'", filename); + + buffer[len] = 0; + fclose(fp); + + cJSON *doc = cJSON_Parse(buffer); + if (doc == NULL) + elog(FATAL, "invalid model in '%s'", filename); + + pfree(buffer); + Model *model = (Model*) palloc0(sizeof(Model)); + + cJSON *item = cJSON_GetObjectItem(doc, "name"); + Assert(cJSON_IsString(item)); + model->model_name = pstrdup(cJSON_GetStringValue(item)); + + item = cJSON_GetObjectItem(doc, "owner"); + Assert(cJSON_IsNumber(item) && cJSON_GetNumberValue(item) == -1); + + item = cJSON_GetObjectItem(doc, "algorithm"); + Assert(cJSON_IsString(item)); + model->algorithm = get_algorithm_ml(cJSON_GetStringValue(item)); + + item = cJSON_GetObjectItem(doc, "processed_tuples"); + Assert(cJSON_IsNumber(item)); + model->processed_tuples = (int64_t)cJSON_GetNumberValue(item); + + item = cJSON_GetObjectItem(doc, "discarded_tuples"); + Assert(cJSON_IsNumber(item)); + model->discarded_tuples = (int64_t)cJSON_GetNumberValue(item); + + item = cJSON_GetObjectItem(doc, "preprocess_time"); + Assert(cJSON_IsNumber(item)); + model->pre_time_secs = cJSON_GetNumberValue(item); + + item = cJSON_GetObjectItem(doc, "exec_time"); + Assert(cJSON_IsNumber(item)); + model->exec_time_secs = cJSON_GetNumberValue(item); + + item = cJSON_GetObjectItem(doc, "return_type"); + Assert(cJSON_IsNumber(item)); + model->return_type = (Oid)cJSON_GetNumberValue(item); + + model->hyperparameters = NULL; + cJSON *hyperps = cJSON_GetObjectItem(doc, "hyperparameters"); + if (hyperps != NULL) { + Assert(cJSON_IsObject(hyperps)); + cJSON *hyperp; + cJSON_ArrayForEach(hyperp, hyperps) { + Assert(cJSON_IsObject(hyperp)); + + Hyperparameter *cell = (Hyperparameter *)palloc(sizeof(Hyperparameter)); + cell->name = pstrdup(hyperp->string); + + item = cJSON_GetObjectItem(hyperp, "type"); + Assert(cJSON_IsNumber(item)); + cell->type = (Oid)cJSON_GetNumberValue(item); + + item = cJSON_GetObjectItem(hyperp, "value"); + switch (cell->type) { + case BOOLOID: + Assert(cJSON_IsBool(item)); + cell->value = BoolGetDatum(cJSON_GetNumberValue(item) != 0); + break; + case INT4OID: + Assert(cJSON_IsNumber(item)); + cell->value = Int32GetDatum((int) cJSON_GetNumberValue(item)); + break; + case FLOAT8OID: + Assert(cJSON_IsNumber(item)); + cell->value = Float8GetDatum(cJSON_GetNumberValue(item)); + break; + case VARCHAROID: + Assert(cJSON_IsString(item)); + cell->value = CStringGetTextDatum(pstrdup(cJSON_GetStringValue(item))); + break; + case CSTRINGOID: + Assert(cJSON_IsString(item)); + cell->value = CStringGetDatum(pstrdup(cJSON_GetStringValue(item))); + break; + default: + Assert(false); + } + model->hyperparameters = lappend(model->hyperparameters, cell); + } + } + + model->scores = NULL; + cJSON *scores = cJSON_GetObjectItem(doc, "scores"); + if (scores != NULL) { + Assert(cJSON_IsObject(scores)); + cJSON_ArrayForEach(item, scores) { + Assert(cJSON_IsNumber(item)); + + TrainingScore *cell = (TrainingScore *)palloc(sizeof(TrainingScore)); + cell->name = pstrdup(item->string); + cell->value = cJSON_GetNumberValue(item); + model->scores = lappend(model->scores, cell); + } + } + + item = cJSON_GetObjectItem(doc, "version"); + Assert(cJSON_IsNumber(item)); + model->data.version = (SerializedModelVersion)cJSON_GetNumberValue(item); + if (model->data.version == DB4AI_MODEL_UNDEFINED) { + model->data.raw_data = NULL; + model->data.size = 0; + } else { + item = cJSON_GetObjectItem(doc, "data"); + Assert(cJSON_IsString(item)); + + char *data = cJSON_GetStringValue(item); + model->data.size = strlen(data) / 2; + model->data.raw_data = palloc(model->data.size); + + unsigned int v; + uint8_t* dst = (uint8_t*)model->data.raw_data; + for (size_t c=0 ; cdata.size ; c++) { + sscanf(data, "%2x", &v); + *dst++ = v; + data += 2; + } + } + + return model; +} + +//////////////////////////////////////////////////////////////// + +void pg_mock_init(int log_level, int working_mem_mb) +{ + knl_instance_init(); + + MemoryContextInit(); + PmTopMemoryContext = t_thrd.top_mem_cxt; + + knl_thread_init(MASTER_THREAD); + + SelfMemoryContext = THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_DEFAULT); + MemoryContextSwitchTo(THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_DEFAULT)); + + t_thrd.fake_session = create_session_context(t_thrd.top_mem_cxt, 0); + t_thrd.fake_session->status = KNL_SESS_FAKE; + u_sess = t_thrd.fake_session; + + u_sess->attr.attr_memory.work_mem = working_mem_mb * 1024; // in KBytes + + session_timezone = pg_tzset("GMT"); + + log_min_messages = log_level; +} + +uint64_t get_clock_usecs() +{ + struct timeval tv; + gettimeofday(&tv, NULL); + return tv.tv_sec * 1000000ULL + tv.tv_usec; +} + diff --git a/src/test/ut/db4ai/direct_ml/readers.cpp b/src/test/ut/db4ai/direct_ml/readers.cpp new file mode 100644 index 000000000..ca6c9b185 --- /dev/null +++ b/src/test/ut/db4ai/direct_ml/readers.cpp @@ -0,0 +1,862 @@ +/** +Copyright (c) 2021 Huawei Technologies Co.,Ltd. +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +--------------------------------------------------------------------------------------- + * + * readers.cpp + * Read data files + * + * + * IDENTIFICATION + * src/test/ut/db4ai/direct_ml/readers.cpp + * + * --------------------------------------------------------------------------------------- +**/ + +#include "readers.h" + +extern ArrayType *construct_empty_md_array(uint32_t const num_centroids, uint32_t const dimension); + +#define MaxColumnsCSV 2048 + +bool ReaderCSV::has_nulls() { + return !no_nulls; +} + +bool ReaderCSV::is_open() { + return (allocated > 0); +} + +void ReaderCSV::open() { + Assert(!is_open()); + + FILE* fp = fopen(filename, "rb"); + if (fp == nullptr) + elog(ERROR, "invalid file '%s'", filename); + + fseek(fp, 0, SEEK_END); + int end = ftell(fp); + fseek(fp, 0, SEEK_SET); + + /* + * if a line is larger than 1 GB we're in trouble because we + * won't be able to allocate it in one chunk (not a problem for now) + * who has a single line of 1 GB anyway? + */ + const int MaxLineSize = 1000 * 1000 * 1000; + char *buffer = (char*)palloc(MaxLineSize); + + nrows = 0; + tuple.ncolumns = 0; + + char **tokens = (char**) palloc(sizeof(char*) * MaxColumnsCSV); + + if (header) { + // read first line + if (fgets(buffer, MaxLineSize, fp) != buffer) + elog(ERROR, "cannot read CSV header"); + + tuple.ncolumns = tokenize(buffer, tokens, delim); + if (tuple.ncolumns == 0) + elog(ERROR, "empty or wrong CSV header"); + } + + int remaining_types = 0; + tuple.typid = nullptr; + while (fgets(buffer, MaxLineSize, fp) == buffer) { + // printf("ROW <%s>\n", buffer); + int nc = tokenize(buffer, tokens, delim); + + // empty line, do not continue scanning + if (nc == 0) + break; + + if (nrows == 0) { + // speculate with the number of rows + double bc = ftell(fp); + if (header) + bc /= 2; + + allocated = (int)(end / bc + 1); + // printf("allocate %d\n", allocated); + rows = (uint8_t**)palloc(sizeof(uint8_t*) * allocated); + } else if (nrows == allocated) { + // resize + int pos = ftell(fp); + double ratio = pos / (double)nrows; + allocated += (int)((end - pos) / ratio + 10); + // printf("reallocate %d\n", allocated); + rows = (uint8_t**)repalloc(rows, sizeof(uint8_t*) * allocated); + } + + if (header) { + if (nc != tuple.ncolumns) + elog(ERROR, "expected %d columns in row %d, found %d", tuple.ncolumns, nrows, nc); + } else { + tuple.ncolumns = nc; + header = false; + } + + // prepare datatypes the first time + if (tuple.typid == nullptr) { + remaining_types = tuple.ncolumns; + tuple.typid = (Oid*)palloc0(tuple.ncolumns * sizeof(Oid)); + tuple.typlen = (int16*)palloc0(tuple.ncolumns * sizeof(int16)); + tuple.typbyval = (bool*)palloc0(tuple.ncolumns * sizeof(bool)); + } + + // extract remaining datatypes + char typalign; + char *endp; + if (remaining_types > 0) { + for (int c=0 ; c 0) { + len--; + if (str[len]!='\n' && str[len]!='\r') + break; + str[len] = 0; + } + + int ntokens = 0; + if (len > 0) { + bool has_delim; + do { + has_delim = false; + + // trim left + while (*str != '\0') { + if (*str!=' ' && *str!='\t') + break; + str++; + } + + // printf("<%s>\n", str); + + bool quoted = (*str == '\"'); + if (quoted) + str++; + + bool after_quoted = false; + char *start = str; + char *end = str; + while (*str != 0) { + char ch = *str++; + if (after_quoted && ch != delim && ch != ' ' && ch != '\t') + elog(ERROR, "invalid quoted string <%s>", start); + + if (ch == '\"') { + if (*str == '\"') { + if (!quoted) + elog(ERROR, "invalid quoted string <%s>", start); + + str++; // ignore it + } else { + if (quoted) { + after_quoted = true; + quoted = false; + continue; + } else + elog(ERROR, "invalid quoted string <%s>", start); + } + } + + if (ch == delim) { + if (!quoted) { + has_delim = true; + break; + } + } + + *end++ = ch; + } + *end = 0; + + if (quoted) + elog(ERROR, "invalid quoted string <%s>", start); + + // trim right + while (end > start) { + end--; + if (*end!=' ' && *end!='\t') + break; + *end = 0; + } + + if (ntokens == MaxColumnsCSV) + elog(FATAL, "too many columns %d, maximum %d", ntokens, MaxColumnsCSV); + + // printf(" -- %d <%s>\n", ntokens, start); + tokens[ntokens++] = start; + } while(*str != 0); + + if (has_delim) { + // null value at end + if (ntokens == MaxColumnsCSV) + elog(FATAL, "too many columns (adding last null)"); + + tokens[ntokens++] = str; + } + } + + return ntokens; +} + +void ReaderCSV::shuffle(int seed) { + srand(seed); + for (int r=0; ropen(); + + int ncolumns = parent->get_columns(); + for (int c=0 ; cget_column_type(c); + if (typ != BOOLOID && typ != INT4OID && typ != FLOAT8OID) + elog(ERROR, "cannot vectorize columns of type %d", typ); + + if (typ != FLOAT8OID) + only_float8 = false; + } + + arr = construct_empty_md_array(1, ncolumns); + + char typalign; + typid = FLOAT8ARRAYOID; + get_typlenbyvalalign(typid, &typlen, &typbyval, &typalign); + isnull = false; + value = PointerGetDatum(arr); + + tuple.ncolumns = 1; + tuple.isnull = &isnull; + tuple.values = &value; + tuple.typid = &typid; + tuple.typbyval = &typbyval; + tuple.typlen = &typlen; +} + +void ReaderVectorFloat::close() { + Assert(is_open()); + pfree(arr); + tuple.ncolumns = 0; + + parent->close(); +} + +bool ReaderVectorFloat::is_open() { + return tuple.ncolumns > 0; +} + +int ReaderVectorFloat::get_columns() { + Assert(is_open()); + return 1; +} + +Oid ReaderVectorFloat::get_column_type(int column) { + Assert(is_open()); + Assert(column == 0); + return FLOAT8ARRAYOID; +} + +void ReaderVectorFloat::rescan() { + Assert(is_open()); + parent->rescan(); +} + +const ModelTuple * ReaderVectorFloat::fetch() { + Assert(is_open()); + const ModelTuple *rtuple = parent->fetch(); + if (rtuple == nullptr) + return nullptr; + + double *ptr = (double*) ARR_DATA_PTR(arr); + if (only_float8 && !parent->has_nulls()) { + // just copy directly + int bc = rtuple->ncolumns * sizeof(float8); + errno_t rc = memcpy_s(ptr, bc, rtuple->values, bc); + securec_check(rc, "\0", "\0"); + } else { + for (int c=0 ;cncolumns ; c++) { + double value = 0; + if (!rtuple->isnull[c]) { + Datum dt = rtuple->values[c]; + Oid typid = rtuple->typid[c]; + // sorted by most frequent use + if (typid == FLOAT8OID) + value = DatumGetFloat8(dt); + else if (typid == INT4OID) + value = DatumGetInt32(dt); + else { + Assert(typid == BOOLOID); + value = DatumGetBool(dt); + } + *ptr++ = value; + } + } + } + + return &tuple; +} + + +/////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////// +// ReaderProjection + +bool ReaderProjection::has_nulls() { + return !no_nulls; +} + +ReaderProjection::ReaderProjection(Reader *parent, Datum *projection) +: parent(parent) { + // first count the size of the projection and the number of column + tuple.ncolumns = 0; + tuple.values = nullptr; + + Datum *proj = projection; + while (*proj != 0) { + // scan expression + do { + proj += 2; // atom + } while (*proj != 0); + proj++; // return + tuple.ncolumns++; + } + proj++; + Assert(tuple.ncolumns > 0); + + errno_t rc; + int bc = (proj - projection) * sizeof(Datum); + this->projection = (Datum*)palloc(bc); + rc = memcpy_s(this->projection, bc, projection, bc); + securec_check(rc, "\0", "\0"); +} + +bool ReaderProjection::is_open() { + return tuple.values != nullptr; +} + +void ReaderProjection::open() { + Assert(!is_open()); + parent->open(); + + // allocate tuple + tuple.values = (Datum*)palloc(tuple.ncolumns * sizeof(Datum)); + tuple.isnull = (bool*)palloc(tuple.ncolumns * sizeof(bool)); + tuple.typid = (Oid*)palloc(tuple.ncolumns * sizeof(Oid)); + tuple.typlen = (int16*)palloc(tuple.ncolumns * sizeof(int16)); + tuple.typbyval = (bool*)palloc(tuple.ncolumns * sizeof(bool)); + + // projection types are unknown, evaluate the + // projection with a fake row + int ncols = parent->get_columns(); + bool isnull[ncols]; + Oid typid[ncols]; + Datum values[ncols]; + for (int c=0 ; cget_column_type(c); + switch (typid[c]) { + case INT4OID: + values[c] = Int32GetDatum(1); + break; + case FLOAT8OID: + values[c] = Float8GetDatum(1); + break; + case BOOLOID: + values[c] = BoolGetDatum(true); + break; + case CSTRINGOID: + values[c] = CStringGetDatum("fake"); + break; + case TEXTOID: + values[c] = CStringGetTextDatum("fake"); + break; + default: + elog(ERROR, "projection of type %d not supported", typid[c]); + break; + } + } + + nlocators = 0; + no_nulls = true; + eval(values, isnull, typid, true); +} + +void ReaderProjection::close() { + Assert(is_open()); + + pfree(tuple.values); + pfree(tuple.isnull); + pfree(tuple.typid); + pfree(tuple.typlen); + pfree(tuple.typbyval); + tuple.values = nullptr; + + parent->close(); +} + +int ReaderProjection::get_columns() { + Assert(is_open()); + return tuple.ncolumns; +} + +Oid ReaderProjection::get_column_type(int column) { + Assert(is_open()); + Assert(column < tuple.ncolumns); + return tuple.typid[column]; +} + +void ReaderProjection::rescan() { + Assert(is_open()); + parent->rescan(); +} + +const ModelTuple * ReaderProjection::fetch() { + Assert(is_open()); + + const ModelTuple *rtuple = parent->fetch(); + if (rtuple == nullptr) + return nullptr; + + eval(rtuple->values, rtuple->isnull, rtuple->typid, false); + return &tuple; +} + +void ReaderProjection::eval(Datum* row, bool *isnull, Oid *typid, bool is_fake) { + const int MaxLocators = sizeof(locators) / sizeof(locators[0]); + const int MaxStackSize = 20; + Datum stack_values[MaxStackSize]; + bool stack_isnull[MaxStackSize]; + Oid stack_typid[MaxStackSize]; + int col = 0; + int index; + PGFunction func; + const FmgrBuiltin *fl; + Datum *proj = projection; + while (*proj != 0) { + int sp = 0; + bool has_func = false; + // scan expression + do { + int op = DatumGetInt32(*proj++); + switch (op) { + case 1: + // call func + has_func = true; + fl = nullptr; + func = (PGFunction) DatumGetPointer(*proj++); + if (is_fake) { + // look for function into the catalog + if (nlocators == MaxLocators) + elog(ERROR, "too many func locators"); + + bool found = false; + fl = fmgr_builtins; + for (int b=0 ; !found && bfunc == func); + if (found) + locators[nlocators++] = fl; + else + fl++; + } + Assert(found); + } else { + for (int b=0 ; bfunc == func) + break; + } + } + + if (sp < fl->nargs) + elog(ERROR, "not enough operands (%d of %d) for function '%s'", sp, fl->nargs, fl->funcName); + + switch (fl->nargs) { + case 1: + if (stack_isnull[sp-1]) + elog(ERROR, "function parameters cannot be NULL"); + + stack_values[sp-1] = DirectFunctionCall1(func, stack_values[sp-1]); + break; + case 2: + // printf("CALL[%d] TYPE %d\n", sp, fl->rettype); + + sp--; + if (stack_isnull[sp-1] || stack_isnull[sp]) + elog(ERROR, "function parameters cannot be NULL"); + + stack_values[sp-1] = DirectFunctionCall2(func, stack_values[sp-1], stack_values[sp]); + break; + default: + elog(ERROR, "direct call with %d parameters not implemented", fl->nargs); + break; + } + + stack_isnull[sp-1] = false; + stack_typid[sp-1] = fl->rettype; + break; + case 2: + // push value of column from input row + if (sp == MaxStackSize) + elog(ERROR, "stack overflow in projection"); + + index = DatumGetInt32(*proj++); + if (index >= parent->get_columns()) + elog(ERROR, "invalid input column %d (of %d) for projection", index, parent->get_columns()); + + // printf("PUSH[%d] COL %d TYPE %d\n", sp, index, typid[index]); + + stack_values[sp] = row[index]; + stack_isnull[sp] = isnull[index]; + stack_typid[sp] = typid[index]; + sp++; + break; + case INT4OID: + case TEXTOID: + // push int into stack + if (sp == MaxStackSize) + elog(ERROR, "stack overflow in projection"); + stack_values[sp] = *proj++; + stack_isnull[sp] = false; + stack_typid[sp] = op; + sp++; + break; + default: + elog(FATAL, "unkown operation %d", op); + } + } while (*proj != 0); + proj++; + + // get result + if (sp != 1) + elog(ERROR, "invalid projection expression"); + + if (is_fake) { + // may return nulls? + if (!has_func && no_nulls && parent->has_nulls()) + no_nulls = false; + + // just get the info + if (stack_isnull[0]) + elog(ERROR, "cannot infer datatype of column %d in projection", col); + + char typalign; + tuple.typid[col] = stack_typid[0]; + get_typlenbyvalalign(tuple.typid[col], &tuple.typlen[col], &tuple.typbyval[col], &typalign); + // printf("COLUMN %d TYPE %d:%d:%d\n", col, tuple.typid[col], tuple.typlen[col], tuple.typbyval[col]); + } else { + tuple.isnull[col] = stack_isnull[0]; + if (!tuple.isnull[col]) { + // printf("FETCH %d TYPE %d:%d\n", col, tuple.typid[col], stack_typid[0]); + Assert(tuple.typid[col] == stack_typid[0]); + tuple.values[col] = stack_values[0]; + } + } + + col++; + } +} + + +/////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////// + +void test_readers() { + Datum proj[] = { + // first + DP_PROJECT_COLUMN(0), + DP_PROJECT_INT4(5), + DP_PROJECT_CALL(int4lt), + DP_PROJECT_RETURN, + // second + DP_PROJECT_COLUMN(3), + DP_PROJECT_RETURN, + // done + DP_PROJECT_RETURN, + }; + (void)proj; + + ReaderCSV csv( + // "/usr1/db4ai/opengauss/src/test/db4ai/data/patients.txt", false + // "/usr1/db4ai/opengauss/src/test/db4ai/data/coordinates.txt", false + // "/usr1/datasets/test.txt", true + // "/usr1/datasets/perfect.txt", true + "/usr1/datasets/houses.txt", false, true, '|' + // "/usr1/datasets/s_cir.txt", false + // "/usr1/datasets/virus.csv", false + // with text + // "/usr1/datasets/cities.txt", true + // "/usr1/datasets/addresses.txt", false, false + ); + Reader *rd = &csv; + + // ReaderVectorFloat vf(rd); + // rd = &vf; + + // ReaderProjection vp(rd, proj); + // rd = &vp; + + rd->open(); + + printf("COLS=%d {", rd->get_columns()); + for (int c=0 ; cget_columns() ; c++) + printf(" %s%d", c>0 ? "," : "", rd->get_column_type(c)); + printf(" }\n"); + + int nrows = 0; + while (rd->fetch() != nullptr) + nrows++; + + csv.rescan(); + + int nr = 0; + const ModelTuple *tuple; + while ((tuple = rd->fetch()) != nullptr) { + if (nr < 10 || nr > nrows-10) { + printf("#%d : {", nr+1); + for (int c=0 ; cncolumns; c++) { + if (c > 0) + printf(", "); + // printf("%d:", c+1);fflush(stdout); + if (!tuple->isnull[c]) { + if (tuple->typid[c] == CSTRINGOID) + printf("\""); + + Oid typOutput; + bool typIsVarlena; + getTypeOutputInfo(tuple->typid[c], &typOutput, &typIsVarlena); + char *str = OidOutputFunctionCall(typOutput, tuple->values[c]); + printf("%s",str); + pfree(str); + + if (tuple->typid[c] == CSTRINGOID) + printf("\""); + } + } + printf("}\n"); + } + nr++; + } + Assert(nr == nrows); + + rd->close(); +} + + diff --git a/src/test/ut/db4ai/direct_ml/readers.h b/src/test/ut/db4ai/direct_ml/readers.h new file mode 100644 index 000000000..5ec8533ce --- /dev/null +++ b/src/test/ut/db4ai/direct_ml/readers.h @@ -0,0 +1,372 @@ +/** +Copyright (c) 2021 Huawei Technologies Co.,Ltd. +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +--------------------------------------------------------------------------------------- + * + * readers.h + * Header file of reading data files + * + * + * IDENTIFICATION + * src/test/ut/db4ai/direct_ml/readers.h + * + * --------------------------------------------------------------------------------------- +**/ + + +#ifndef MOCK_READERS_H +#define MOCK_READERS_H + +#include "postgres.h" +#include "utils/lsyscache.h" +#include "utils/builtins.h" +#include "utils/fmgrtab.h" +#include "db4ai/kernel.h" + +///////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////// + +class Reader { +public: + virtual ~Reader() {} + + virtual bool has_nulls() = 0; + + virtual void open() = 0; + + virtual bool is_open() = 0; + + virtual void close() = 0; + + virtual int get_columns() = 0; + + virtual Oid get_column_type(int column) = 0; + + virtual const ModelTuple *fetch() = 0; + + virtual void rescan() = 0; + +protected: + Reader() {} // to force inheritance +}; + +class ReaderCSV : public Reader { +public: + ReaderCSV(const char* filename, bool header, bool no_nulls = true, char delim = ',') + : header(header) + , delim(delim) + , no_nulls(no_nulls) + , allocated(0) { + this->filename = pstrdup(filename); + } + + virtual bool has_nulls() override; + + virtual void open() override; + + virtual bool is_open() override; + + virtual void close() override; + + virtual int get_columns() override; + + virtual Oid get_column_type(int column) override; + + virtual const ModelTuple *fetch() override; + + virtual void rescan() override; + + void shuffle(int seed); + +private: + char* filename; + bool header; + char delim; + bool no_nulls; + int allocated; + int nrows; + uint8_t **rows; // nulls + Datums + int next; + ModelTuple tuple; + + int tokenize(char* str, char ** tokens, char delim); + void promote(int column, Oid new_typid); +}; + +///////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////// + +class ReaderVectorFloat : public Reader { +public: + ReaderVectorFloat(Reader *parent) + : parent(parent) + , arr(nullptr) + , only_float8(true) { + tuple.ncolumns = 0; + } + + virtual bool has_nulls() override; + + virtual void open() override; + + virtual bool is_open() override; + + virtual void close() override; + + virtual int get_columns() override; + + virtual Oid get_column_type(int column) override; + + virtual const ModelTuple *fetch() override; + + virtual void rescan() override; + +private: + Reader *parent; + ModelTuple tuple; + Oid typid; + int16 typlen; + bool typbyval; + bool isnull; + Datum value; + ArrayType *arr; + bool only_float8; +}; + +///////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////// + +// A projection expression is a postorder sequence of operations (INT4, COLUMN, CALL, ...) +// finished by a RETURN. Functions can be any defined into builtins.h. Evaluation is with +// a stack machine. For example, to multiple column 3 by 2 and to return column 1: +// +// -- first column +// DP_PROJECT_COLUMN(3) +// DP_PROJECT_INT4(2) +// DP_PROJECT_CALL(int4mul) +// DP_PROJECT_RETURN +// -- second column +// DP_PROJECT_COLUMN(1) +// DP_PROJECT_RETURN +// -- done +// DP_PROJECT_RETURN +// +// A projection is a sequence of projection expression +// followed by an extra final RETURN (empty expresion) + +// Utility macros to define projections. Notice that each atom of the projection +// is defined by a pair of Datums except the end (RETURN) which is a single one, +// and that the first element of the pair is always an INT4OID +#define DP_PROJECT_RETURN 0 +#define DP_PROJECT_CALL(_FUNC) Int32GetDatum(1), PointerGetDatum(_FUNC) +#define DP_PROJECT_COLUMN(_COL) Int32GetDatum(2), Int32GetDatum(_COL) +#define DP_PROJECT_INT4(_VAL) Int32GetDatum(INT4OID), Int32GetDatum(_VAL) +#define DP_PROJECT_TEXT(_VAL) Int32GetDatum(TEXTOID), CStringGetTextDatum(_VAL) + +class ReaderProjection : public Reader { +public: + ReaderProjection(Reader *parent, Datum *projection); + + virtual bool has_nulls() override; + + virtual void open() override; + + virtual bool is_open() override; + + virtual void close() override; + + virtual int get_columns() override; + + virtual Oid get_column_type(int column) override; + + virtual const ModelTuple *fetch() override; + + virtual void rescan() override; + +private: + Reader *parent; + ModelTuple tuple; + Datum *projection; + const FmgrBuiltin *locators[10]; + int nlocators; + bool no_nulls; + + void eval(Datum* row, bool *isnull, Oid *typid, bool is_fake); +}; + +//////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////// + +template +class ReaderKernel : public Reader { +public: + ReaderKernel(Reader *parent) + : parent(parent) { + } + + bool has_nulls() override { + Assert(parent->is_open()); + return parent->has_nulls(); + } + + void open() override { + Assert(!parent->is_open()); + parent->open(); + for (int c=1 ; cget_columns() ; c++) { + Assert(parent->get_column_type(c) == FLOAT8OID); + } + init(); + } + + bool is_open() override { + return parent->is_open(); + } + + void close() override { + Assert(parent->is_open()); + release(); + parent->close(); + } + + int get_columns() override { + Assert(parent->is_open()); + return tuple.ncolumns; + } + + Oid get_column_type(int column) override { + Assert(parent->is_open()); + if (column == 0) + return parent->get_column_type(0); + return FLOAT8OID; + } + + const ModelTuple *fetch() override { + Assert(parent->is_open()); + const ModelTuple *ptuple = parent->fetch(); + if (ptuple != NULL && !ptuple->isnull[0]) { + tuple.values[0] = ptuple->values[0]; + tuple.isnull[0] = ptuple->isnull[0]; + tuple.typid[0] = ptuple->typid[0]; + tuple.typbyval[0] = ptuple->typbyval[0]; + tuple.typlen[0] = ptuple->typlen[0]; + + Datum *pvalues = &ptuple->values[1]; + float8 *pdata = tmp.data; + for (int c=1 ; c<=tmp.rows ; c++) { + Assert(!ptuple->isnull[c]); + Assert(ptuple->typid[c] == FLOAT8OID); + *pdata++ = DatumGetFloat8(*pvalues++); + } + + kernel.km.transform(&kernel.km, &tmp, &out); + + pdata = out.data; + pvalues = &tuple.values[1]; + for (int c=0 ; cis_open()); + parent->rescan(); + // gamma may have changed + release(); + init(); + } + +protected: + _K kernel; + Matrix out; + +private: + Reader *parent; + ModelTuple tuple; + Matrix tmp; + + virtual void init_kernel(int features, int components, int seed) = 0; + + void init() { + int features = parent->get_columns() - 1; + int seed = 1; + double D = Max(128, 2.0 * features); + + init_kernel(features, D, seed); + tuple.ncolumns = kernel.km.coefficients + 1; + + tuple.isnull = (bool*)palloc(sizeof(bool) * tuple.ncolumns); + tuple.values = (Datum*)palloc(sizeof(Datum) * tuple.ncolumns); + tuple.typid = (Oid*)palloc(sizeof(Oid) * tuple.ncolumns); + tuple.typbyval = (bool*)palloc(sizeof(bool) * tuple.ncolumns); + tuple.typlen = (int16*)palloc(sizeof(int16) * tuple.ncolumns); + + for (int c=1 ; c { +public: + ReaderGaussian(Reader *parent, double *gamma) + : ReaderKernel(parent) + , gamma(gamma) { + } + +private: + double *gamma; + + void init_kernel(int features, int components, int seed) { + kernel_init_gaussian(&kernel, features, components, *gamma, seed); + } +}; + +class ReaderPolynomial : public ReaderKernel { +public: + ReaderPolynomial(Reader *parent, int *degree, double *coef0) + : ReaderKernel(parent) + , degree(degree) + , coef0(coef0) { + } + +private: + int *degree; + double *coef0; + + void init_kernel(int features, int components, int seed) { + kernel_init_polynomial(&kernel, features, components, *degree, *coef0, seed); + } +}; + +/////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////// + +void test_readers(); + +#endif + + diff --git a/src/test/ut/db4ai/direct_ml/svm_direct.cpp b/src/test/ut/db4ai/direct_ml/svm_direct.cpp new file mode 100644 index 000000000..a4aca4530 --- /dev/null +++ b/src/test/ut/db4ai/direct_ml/svm_direct.cpp @@ -0,0 +1,375 @@ +/** +Copyright (c) 2021 Huawei Technologies Co.,Ltd. +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +--------------------------------------------------------------------------------------- + * + * svm_direct.cpp + * Read data files + * + * + * IDENTIFICATION + * src/test/ut/db4ai/direct_ml/svm_direct.cpp + * + * --------------------------------------------------------------------------------------- +**/ + +#include +#include "direct_algos.h" +#include "readers.h" + +// #define USE_GAUSSIAN +// #define USE_POLYNOMIAL + +struct TestGaussian { + const char *name; + int batch_size; + double decay; + double lambda; + double learning_rate; + double tolerance; + int max_iterations; + int seed; + double accuracy; +} gaussian_tests[] = { + { "separable", 4, 0.00000000, 451.43192744, 0.00128971, 0.00098990, 1, 200, 1.000 }, + { "almost_linear", 8, 0.84938336, 614.68837452, 0.17139901, 0.00000021, 87, 11, 0.985 }, + { "one", 1, 0.00000486, 355.01855320, 0.34238902, 0.00000246, 2, 71, 1.000 }, + { "clusters", 2, 0.01072456, 41.53315701, 0.51013572, 0.00000101, 14, 2, 0.630 }, + { "circle", 2, 0.04287848, 796.73487315, 0.00442136, 0.00000088, 7, 121, 0.675 }, + { "moons", 2, 0.01318587, 955.48766920, 0.01601325, 0.00010955, 4, 17, 0.890 }, + { "polynomial", 8, 0.41294937, 141.17150921, 0.10135892, 0.00120927, 7, 27, 0.920 }, + { "blob_1", 6, 0.03236468, 728.35294835, 0.89775039, 0.00571308, 5, 33, 0.930 }, + { "blob_2", 8, 0.84938336, 614.68837452, 0.13304777, 0.00009542, 52, 32, 0.900 }, + { "blob_3", 6, 0.21813823, 21.44762832, 0.49231980, 0.00000030, 21, 28, 0.895 }, + { "blob_4", 4, 0.04729034, 289.17150662, 0.03038806, 0.00000228, 6, 188, 0.878 }, +}; + +#define NUM_TEST_GAUSSIAN (sizeof(gaussian_tests) / sizeof(gaussian_tests[0])) + +class TestKernels final : public ModelCRTP { +private: + friend ModelCRTP; + using ModelCRTP::ModelCRTP; + + bool do_train(char const *model_name, Hyperparameters const *hp, Descriptor *td, Reader *reader) { + model = model_fit(model_name, SVM_CLASSIFICATION, hp->hps, hp->count, + td->coltypes, td->colbyvals, td->coltyplen, + td->count, &fetch, &rescan, reader); + if (model->status != ERRCODE_SUCCESSFUL_COMPLETION) + elog(ERROR, "could not train model, error %d\n", model->status); + + return true; + } + + bool do_predict(Reader *reader) { + char filename[256]; + sprintf(filename, "/usr1/datasets/reg/reg_%s." +#ifdef USE_GAUSSIAN + "gaussian" +#elif defined(USE_POLYNOMIAL) + "polynomial" +#else + "linear" +#endif + ".csv", model->model_name); + FILE *fp = fopen(filename, "w"); + int row = 0; + int hit = 0; + ModelPredictor pred = model_prepare_predict(model); + const ModelTuple *rtuple; + while ((rtuple = reader->fetch()) != nullptr) { + bool isnull = rtuple->isnull[0]; + if (!isnull) { + int target = DatumGetInt32(rtuple->values[0]); + row++; + + Datum dt = model_predict(pred, &rtuple->values[1], &rtuple->isnull[1], &rtuple->typid[1], rtuple->ncolumns-1); + fprintf(fp, "%d\n", DatumGetInt32(dt)); + if (false) + printf(" - %d %d %d\n", ++row, target, DatumGetInt32(dt)); + + if (target == DatumGetInt32(dt)) + hit++; + } + } + fclose(fp); + printf(" accuracy=%.3f (%d rows)\n", (double)hit / row, row); + return true; + } +}; + +// [0:1) +inline double rnd_next(struct drand48_data *rnd) { + double r; + drand48_r(rnd, &r); + return r; +} + +// [0:max) +inline int rnd_next(struct drand48_data *rnd, int max) { + long int r; + lrand48_r(rnd, &r); + return r % max; +} + +// [min:max) +inline int rnd_next(struct drand48_data *rnd, int min, int max) { + return rnd_next(rnd, max - min) + min; +} + +inline double rnd_next(struct drand48_data *rnd, double min, double max) { + return rnd_next(rnd) * (max - min) + min; +} + +inline double rnd_next_log(struct drand48_data *rnd, double min, double max) { + static constexpr double EPSILON = 1E-12; + double log_min_range = log(min + EPSILON); + double log_max_range = log(max + EPSILON); + double r = rnd_next(rnd, log_min_range, log_max_range); + double v = exp(r); + // printf("-- %.16g %.16g %.16g %.16g\n", min, max, r, v); + return v; +} + +struct HpoTask { + int id; + // hyperparameters + int batch_size; + double decay; + double lambda; + double learning_rate; + double tolerance; + double gamma; + int degree; + double coef0; + int seed; + // results + Model const *model; + double accuracy; +}; + +#ifdef USE_GAUSSIAN +#define SVM_HYPERP_HPO 6 +#elif defined(USE_POLYNOMIAL) +#define SVM_HYPERP_HPO 7 +#else +#define SVM_HYPERP_HPO 5 +#endif +static void set_hyperp(HpoTask *task, int hyperp, struct drand48_data *rnd) { + static int batch_sizes[] = { 1, 2, 4, 6, 8 }; + switch (hyperp) { + case 0: // batch_size + task->batch_size = batch_sizes[rnd_next(rnd, 5)]; + break; + case 1: // decay + task->decay = rnd_next_log(rnd, 0.0, 0.9); + break; + case 2: // lambda + task->lambda = rnd_next_log(rnd, 1E-3, 1E3); + break; + case 3: // learning_rate + task->learning_rate = rnd_next_log(rnd, 1E-3, 1.0); + break; + case 4: // tolerance + task->tolerance = rnd_next_log(rnd, 1E-7, 1E-1); + break; +#ifdef USE_GAUSSIAN + case 5: // gamma + task->gamma = rnd_next_log(rnd, 1E-1, 1); + break; +#endif +#ifdef USE_POLYNOMIAL + case 5: // degree + task->degree = rnd_next(rnd, 3, 5); + break; + case 6: // coef0 + task->coef0 = rnd_next_log(rnd, 1E-2, 1E1); + break; +#endif + default: + Assert(false); + } +} + +static void hpo_train(const char* name, HpoTask *tasks, int count, Reader *reader, Descriptor *td, double *gamma, int *degree, double *coef0) { + Hyperparameters hps; + for (int t=0 ; tseed = t + 1; + + hps.reset(); + hps.add_int("batch_size", tasks->batch_size); + hps.add_float("decay", tasks->decay); + hps.add_float("lambda", tasks->lambda); + hps.add_float("learning_rate", tasks->learning_rate); + hps.add_float("tolerance", tasks->tolerance); + // hps.add_float("gamma", tasks->gamma); + hps.add_int("seed", tasks->seed); + hps.add_int("max_iterations", 100); + // hps.add_bool("verbose", true); + + *gamma = tasks->gamma; + *degree = tasks->degree; + *coef0 = tasks->coef0; + reader->rescan(); + + MemoryContext cxt = AllocSetContextCreate(CurrentMemoryContext, "hpo", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + MemoryContext old_cxt = MemoryContextSwitchTo(cxt); + + TestKernels test; + test.train(name, &hps, td, reader); + // printf("%s\n",test.explain(EXPLAIN_FORMAT_YAML)); + + tasks->model = test.get_model(); + TrainingScore *score = lfirst_node(TrainingScore, list_head(tasks->model->scores)); + tasks->accuracy = score->value; + + // printf("== %d: %d %.16g %.16g %.16g %.16g = %d %.16g\n", tasks->id, + // tasks->batch_size, tasks->decay, tasks->lambda, tasks->learning_rate, tasks->tolerance, + // tasks->model->num_actual_iterations, tasks->accuracy); fflush(stdout); + + MemoryContextSwitchTo(old_cxt); + } +} + +static int f_cmp_tasks(const void *lhs, const void *rhs) { + const HpoTask *plhs = (const HpoTask *)lhs; + const HpoTask *prhs = (const HpoTask *)rhs; + // printf("** %d:%.16g %d:%.16g\n", plhs->id, plhs->accuracy, prhs->id, prhs->accuracy); + if (plhs->accuracy > prhs->accuracy) + return -1; + + if (plhs->accuracy < prhs->accuracy) + return 1; + + if (plhs->model->num_actual_iterations < prhs->model->num_actual_iterations) + return -1; + + if (plhs->model->num_actual_iterations > prhs->model->num_actual_iterations) + return 1; + + return 0; +} + +static void hpo_drop(HpoTask *tasks, int count) { + while (count-- > 0) { + MemoryContextDelete(tasks->model->memory_context); + tasks++; + } +} + +static void hpo_mutate(const HpoTask *src, HpoTask *dst, int count, int id, struct drand48_data *rnd) { + while (count-- > 0) { + *dst = *src; + set_hyperp(dst, rnd_next(rnd, SVM_HYPERP_HPO), rnd); + src++; + dst++; + } +} + +static const Model* hpo(TestGaussian *ptest, Reader *reader, int quota, double *gamma, int *degree, double *coef0) { + // random grid + int qpart = quota / 5; + HpoTask tasks[qpart * 2]; + + // fill the first partition + struct drand48_data rnd; + srand48_r(1000000, &rnd); + for (int q=0 ; qid = q; + set_hyperp(task, h, &rnd); + } + } + + Descriptor td; + char typalign; + td.count = reader->get_columns(); + for (int i=0 ; iget_column_type(i); + get_typlenbyvalalign(td.coltypes[i], &td.coltyplen[i], &td.colbyvals[i], &typalign); + } + + // train the first partition + hpo_train(ptest->name, tasks, qpart, reader, &td, gamma, degree, coef0); + qsort(tasks, qpart, sizeof(HpoTask), f_cmp_tasks); + + // do mutations + for (int m=0 ; m<4 && tasks[0].accuracy < 1.0 ; m++) { + // printf("{");for (int i=0; iname, tasks+qpart, qpart, reader, &td, gamma, degree, coef0); + qsort(tasks, qpart * 2, sizeof(HpoTask), f_cmp_tasks); + hpo_drop(tasks+qpart, qpart); + } + + // drop all except first + hpo_drop(tasks+1, qpart-1); + + printf("batch=%d %.8f, %.8f, %.8f, %.8f, %.8f, %2d, %.8f seed=%d acc=%.3f iter=%d\n", + tasks[0].batch_size, tasks[0].decay, tasks[0].lambda, tasks[0].learning_rate, tasks[0].tolerance, + tasks[0].gamma, tasks[0].degree, tasks[0].coef0, + tasks[0].seed, tasks[0].accuracy, tasks[0].model->num_actual_iterations); fflush(stdout); + + // printf("** %d\n", tasks[0].id); + TestKernels test; + test.set_model(tasks[0].model); + *gamma = tasks[0].gamma; + *degree = tasks[0].degree; + *coef0 = tasks[0].coef0; + reader->rescan(); + test.predict(reader); + + return tasks[0].model; +} + + +/////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////// + +void test_kernels() { + char filename[PATH_MAX]; + TestGaussian *ptest = gaussian_tests; + for (int i=0 ; i<(int)NUM_TEST_GAUSSIAN ; i++, ptest++) { + printf("RUNNING %s:\n", ptest->name);fflush(stdout); + + sprintf(filename, "/usr1/datasets/reg/reg_%s.csv", ptest->name); + ReaderCSV reg(filename, false); + Reader *reader = ® + + double gamma = 0.1; + int degree = 3; + double coef0 = 1.0; +#ifdef USE_GAUSSIAN + ReaderGaussian gauss(reader, &gamma); + reader = &gauss; +#elif defined(USE_POLYNOMIAL) + ReaderPolynomial poly(reader, °ree, &coef0); + reader = &poly; +#endif + + reader->open(); + const Model* model = hpo(ptest, reader, 5000, &gamma, °ree, &coef0); + // TrainingScore *score = lfirst_node(TrainingScore, list_head(model->scores)); + MemoryContextDelete(model->memory_context); + reader->close(); + + // break; + } + + printf("done.\n");fflush(stdout); +} + diff --git a/src/test/ut/db4ai/direct_ml/xgboost_direct.cpp b/src/test/ut/db4ai/direct_ml/xgboost_direct.cpp new file mode 100644 index 000000000..e09395da2 --- /dev/null +++ b/src/test/ut/db4ai/direct_ml/xgboost_direct.cpp @@ -0,0 +1,59 @@ +/** +Copyright (c) 2021 Huawei Technologies Co.,Ltd. +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +--------------------------------------------------------------------------------------- + * + * gd_direct.cpp + * Implementation of GD-based algorithms using DB4AI's direct API + * + * + * IDENTIFICATION + * src/test/ut/db4ai/direct_ml/gd_direct.cpp + * + * --------------------------------------------------------------------------------------- +**/ + +#include "direct_algos.h" + +/* + * implementation of logistic regression + */ + +bool XGBoostDirectAPI::do_train(char const *model_name, Hyperparameters const *hp, + Descriptor *td, Reader *reader) +{ + model = model_fit(model_name, XG_BIN_LOGISTIC, hp->hps, hp->count, + td->coltypes, td->colbyvals, td->coltyplen, + td->count, &XGBoostDirectAPI::fetch, + &XGBoostDirectAPI::rescan, reader); + if (model->status != ERRCODE_SUCCESSFUL_COMPLETION) + elog(ERROR, "could not train model, error %d\n", model->status); + + return true; +} + +bool XGBoostDirectAPI::do_predict(Reader *reader) +{ + ModelPredictor pred = model_prepare_predict(model); + const ModelTuple *rtuple; + while ((rtuple = reader->fetch()) != nullptr) { + Datum value = rtuple->values[0]; + bool isnull = false; + Oid type = INT4OID; + Datum dt = model_predict(pred, &value, &isnull, &type, 1); + bool target = DatumGetInt32(rtuple->values[1]); + bool predicted = (DatumGetFloat8(dt) < 0.2); + printf(" - %d %d %d\n", DatumGetInt32(rtuple->values[0]), target, predicted); + } + + return true; +} \ No newline at end of file diff --git a/src/test/ut/db4ai/ut_direct_ml.cpp b/src/test/ut/db4ai/ut_direct_ml.cpp new file mode 100644 index 000000000..2b6cc5bac --- /dev/null +++ b/src/test/ut/db4ai/ut_direct_ml.cpp @@ -0,0 +1,363 @@ +/** +Copyright (c) 2021 Huawei Technologies Co.,Ltd. +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +--------------------------------------------------------------------------------------- + * + * ut_direct_ml.cpp + * DirectML test master File + * + * + * IDENTIFICATION + * src/test/ut/db4ai/ut_direct_ml.cpp + * + * --------------------------------------------------------------------------------------- +**/ +#include "ut_direct_ml.h" + +#include +#include +#include "direct_ml/direct_algos.h" +#include "direct_ml/readers.h" + +GUNIT_TEST_REGISTRATION(UTDirectML, TestLogisticRegression); +GUNIT_TEST_REGISTRATION(UTDirectML, TestSvmKernel); +GUNIT_TEST_REGISTRATION(UTDirectML, TestSvmMulticlass); +GUNIT_TEST_REGISTRATION(UTDirectML, TestKMeans); +GUNIT_TEST_REGISTRATION(UTDirectML, TestRCA); +GUNIT_TEST_REGISTRATION(UTDirectML, TestXGBoost); + +void UTDirectML::SetUp() +{ + // to be compatible with openGauss' default configuration + pg_mock_init(NOTICE, 64); +} + +void UTDirectML::TearDown() {} + +void UTDirectML::TestLogisticRegression() +{ + char filetrain[PATH_MAX]; + char filepredict[PATH_MAX]; + char datapath[PATH_MAX]; + char *gauss_home = gs_getenv_r("GAUSSHOME"); + strcpy(datapath, gauss_home); + strcat(datapath, "/ut_bin/data/"); + Hyperparameters hps; + Descriptor td; + + hps.add_int("batch_size", 20); + hps.add_float("decay", 1.0); + hps.add_float("learning_rate", 5.0); + hps.add_int("max_iterations", 30); + hps.add_float("tolerance", 1e-7); + hps.add_int("seed", 1); + hps.add_enum("optimizer", "ngd"); + + td.coltypes[0] = BOOLOID; + td.coltypes[1] = INT4OID; + td.colbyvals[0] = td.colbyvals[1] = true; + td.coltyplen[0] = td.coltyplen[1] = sizeof(int32_t); + td.count = 2; + + strcpy(filetrain, datapath); + strcat(filetrain, "patients.txt"); + strcpy(filepredict, filetrain); + ReaderCSV patients(filetrain, false); + + Datum proj_patients_anxiety[] = { + DP_PROJECT_COLUMN(1), DP_PROJECT_INT4(1), DP_PROJECT_CALL(int4eq), DP_PROJECT_RETURN, + DP_PROJECT_COLUMN(3), DP_PROJECT_RETURN, DP_PROJECT_RETURN, + }; + ReaderProjection patients_anxiety(&patients, proj_patients_anxiety); + + Tester logisticRegressionModel; + patients_anxiety.open(); + logisticRegressionModel.run(&hps, &td, "patients_logregr", EXPLAIN_FORMAT_JSON, &patients_anxiety); + patients_anxiety.close(); +} + +void UTDirectML::TestSvmKernel() +{ + char filetrain[PATH_MAX]; + char filepredict[PATH_MAX]; + char datapath[PATH_MAX]; + char *gauss_home = gs_getenv_r("GAUSSHOME"); + strcpy(datapath, gauss_home); + strcat(datapath, "/ut_bin/data/"); + Hyperparameters hps; + Descriptor td; + + td.coltypes[0] = INT4OID; + td.colbyvals[0] = true; + td.coltyplen[0] = 4; + for (int c = 1; c < 2; c++) { + td.coltypes[c] = FLOAT4OID; + td.colbyvals[c] = true; + td.coltyplen[c] = 4; + } + td.count = 3; + + strcpy(filetrain, datapath); + strcat(filetrain, "moons.csv"); + + // linear + { + ReaderCSV moons_rd(filetrain, false); + hps.reset(); + hps.add_int("batch_size", 8); + hps.add_float("decay", 1e-20); + hps.add_float("lambda", 920.90725960); + hps.add_float("learning_rate", 0.01215337); + hps.add_float("tolerance", 0.06377824); + hps.add_int("seed", 54); + hps.add_int("max_iterations", 2); + hps.add_enum("kernel", "linear"); + + Tester svmLinear; + moons_rd.open(); + svmLinear.run(&hps, &td, "moons_linear", EXPLAIN_FORMAT_JSON, &moons_rd); + moons_rd.close(); + } + + // gaussian + { + ReaderCSV moons_rd(filetrain, false); + + hps.reset(); + hps.add_int("batch_size", 4); + hps.add_float("decay", 0.80858937); + hps.add_float("lambda", 274.28986109); + hps.add_float("learning_rate", 0.16556385); + hps.add_float("tolerance", 0.00714786); + hps.add_float("gamma", 0.96736585); + hps.add_int("seed", 1); + hps.add_int("max_iterations", 33); + hps.add_enum("kernel", "gaussian"); + + Tester svmGaussian; + moons_rd.open(); + svmGaussian.run(&hps, &td, "moons_gaussian", EXPLAIN_FORMAT_JSON, &moons_rd); + moons_rd.close(); + } + + // polynomial + { + ReaderCSV moons_rd(filetrain, false); + + hps.reset(); + hps.add_int("batch_size", 2); + hps.add_float("decay", 0.87908244); + hps.add_float("lambda", 53.75794302); + hps.add_float("learning_rate", 0.40456318); + hps.add_float("tolerance", 0.00003070); + hps.add_int("degree", 4); + hps.add_float("coef0", 1.11311435); + hps.add_int("seed", 1); + hps.add_int("max_iterations", 100); + hps.add_enum("kernel", "polynomial"); + + Tester svmPolynomial; + moons_rd.open(); + svmPolynomial.run(&hps, &td, "moons_polynomial", EXPLAIN_FORMAT_JSON, &moons_rd); + moons_rd.close(); + } +} + +void UTDirectML::TestSvmMulticlass() +{ + char filetrain[PATH_MAX]; + char filepredict[PATH_MAX]; + char datapath[PATH_MAX]; + char *gauss_home = gs_getenv_r("GAUSSHOME"); + strcpy(datapath, gauss_home); + strcat(datapath, "/ut_bin/data/"); + Hyperparameters hps; + Descriptor td; + + hps.reset(); + hps.add_float("lambda", 50.0); + hps.add_float("learning_rate", 2.0); + hps.add_float("tolerance", 1e-7); + hps.add_int("max_iterations", 1000); + + td.coltypes[0] = BOOLOID; + td.colbyvals[0] = true; + td.coltyplen[0] = 1; + for (int c = 1; c < 8; c++) { + td.coltypes[c] = FLOAT4OID; + td.colbyvals[c] = true; + td.coltyplen[c] = 4; + } + td.count = 8; + + strcpy(filetrain, datapath); + strcat(filetrain, "ecoli.csv"); + ReaderCSV ecoli_rd(filetrain, false); + + Datum proj_ecoli[] = { + DP_PROJECT_COLUMN(7), DP_PROJECT_RETURN, DP_PROJECT_COLUMN(0), DP_PROJECT_RETURN, DP_PROJECT_COLUMN(1), + DP_PROJECT_RETURN, DP_PROJECT_COLUMN(2), DP_PROJECT_RETURN, DP_PROJECT_COLUMN(3), DP_PROJECT_RETURN, + DP_PROJECT_COLUMN(4), DP_PROJECT_RETURN, DP_PROJECT_COLUMN(5), DP_PROJECT_RETURN, DP_PROJECT_COLUMN(6), + DP_PROJECT_RETURN, DP_PROJECT_RETURN, + }; + ReaderProjection ecoli(&ecoli_rd, proj_ecoli); + + Tester svmMulticlassModel; + ecoli.open(); + ecoli_rd.shuffle(time(NULL)); + svmMulticlassModel.run(&hps, &td, "ecoli_svm_multiclass", EXPLAIN_FORMAT_JSON, &ecoli); + + hps.reset(); + hps.add_float("learning_rate", 30.0); + hps.add_float("tolerance", 1e-7); + hps.add_int("max_iterations", 1000); + hps.add_enum("classifier", "logistic_regression"); + + Tester logregMulticlassModel; + ecoli.rescan(); + ecoli_rd.shuffle(time(NULL)); + logregMulticlassModel.run(&hps, &td, "ecoli_logreg_multiclass", EXPLAIN_FORMAT_JSON, &ecoli); + ecoli.close(); +} + +void UTDirectML::TestKMeans() +{ + char filetrain[PATH_MAX]; + char filepredict[PATH_MAX]; + char datapath[PATH_MAX]; + char *gauss_home = gs_getenv_r("GAUSSHOME"); + strcpy(datapath, gauss_home); + strcat(datapath, "/ut_bin/data/"); + Hyperparameters hps; + Descriptor td; + + hps.reset(); + hps.add_int("max_iterations", 50); + hps.add_int("num_centroids", 10); + hps.add_float("tolerance", 0.00001); + hps.add_int("batch_size", 1000); + hps.add_int("num_features", 7); + hps.add_enum("distance_function", "L2_Squared"); + hps.add_enum("seeding_function", "Random++"); + hps.add_int("verbose", 2); + hps.add_int("seed", 1255025990); + + td.reset(); + td.coltypes[0] = FLOAT8ARRAYOID; + td.colbyvals[0] = FLOAT8PASSBYVAL; + td.coltyplen[0] = -1; + td.count = 1; + + strcpy(filetrain, datapath); + strcpy(filepredict, datapath); + strcat(filetrain, "kmeans_7_1000_10_train.txt"); + strcat(filepredict, "kmeans_7_10_predict.txt"); + + ReaderCSV kmeans_training_coordinates_file(filetrain, false); + ReaderCSV kmeans_prediction_coordinates_file(filepredict, false); + ReaderVectorFloat kmeans_train_coordinates(&kmeans_training_coordinates_file); + ReaderVectorFloat kmeans_prediction_coordinates(&kmeans_prediction_coordinates_file); + kmeans_train_coordinates.open(); + kmeans_prediction_coordinates.open(); + + Tester kMeansModel; + kMeansModel.run(&hps, &td, "my_kmeans_pp_l2_sqr_fastcheck", EXPLAIN_FORMAT_JSON, &kmeans_train_coordinates, + &kmeans_prediction_coordinates); + kmeans_train_coordinates.close(); + kmeans_prediction_coordinates.close(); +} + +void UTDirectML::TestRCA() +{ + char filetrain[PATH_MAX]; + char filepredict[PATH_MAX]; + char datapath[PATH_MAX]; + char *gauss_home = gs_getenv_r("GAUSSHOME"); + strcpy(datapath, gauss_home); + strcat(datapath, "/ut_bin/data/"); + Hyperparameters hps; + Descriptor td; + + hps.reset(); + hps.add_int("number_components", 7); + hps.add_float("tolerance", 0.0001); + hps.add_int("batch_size", 1000); + hps.add_int("max_iterations", 10); + hps.add_int("seed", 96176977); + + td.reset(); + td.coltypes[0] = FLOAT8ARRAYOID; + td.colbyvals[0] = FLOAT8PASSBYVAL; + td.coltyplen[0] = -1; + td.count = 1; + + strcpy(filetrain, datapath); + strcpy(filepredict, datapath); + strcat(filetrain, "pca_7_1000_train.txt"); + strcat(filepredict, "pca_7_1000_predict.txt"); + + ReaderCSV pca_training_coordinates_file(filetrain, false); + ReaderCSV pca_prediction_coordinates_file(filepredict, false); + ReaderVectorFloat pca_train_coordinates(&pca_training_coordinates_file); + ReaderVectorFloat pca_prediction_coordinates(&pca_prediction_coordinates_file); + pca_train_coordinates.open(); + pca_prediction_coordinates.open(); + + printf("configuration tested: %s\n", filetrain); + + Tester pcaModel; + pcaModel.run(&hps, &td, "pca_fit_7_7_1000_fastcheck", EXPLAIN_FORMAT_JSON, &pca_train_coordinates, + &pca_prediction_coordinates); + pca_train_coordinates.close(); + pca_prediction_coordinates.close(); +} + +void UTDirectML::TestXGBoost() +{ + + char filetrain[PATH_MAX]; + char filepredict[PATH_MAX]; + char datapath[PATH_MAX]; + Hyperparameters hps; + Descriptor td; + + char *gauss_home = gs_getenv_r("GAUSSHOME"); + strcpy(datapath, gauss_home); + strcat(datapath, "/ut_bin/data/"); + + hps.reset(); + hps.add_int("n_iter", 10); + hps.add_int("batch_size", 1000000); + hps.add_int("nthread", 4); + hps.add_int("seed", 3141); + + td.reset(); + td.coltypes[0] = INT4OID; + td.coltypes[1] = INT4OID; + td.colbyvals[0] = td.colbyvals[1] = INT4OID; + td.coltyplen[0] = td.coltyplen[1] = sizeof(int32_t); + td.count = 2; + + strcpy(filetrain, datapath); + strcat(filetrain, "rain.txt"); + + ReaderCSV rain_csv(filetrain, false); + Datum proj_rain[] = { + DP_PROJECT_COLUMN(16), DP_PROJECT_RETURN, DP_PROJECT_COLUMN(17), DP_PROJECT_RETURN, DP_PROJECT_RETURN, + }; + ReaderProjection rain(&rain_csv, proj_rain); + rain.open(); + + Tester xgModel; + xgModel.run(&hps, &td, "xgboost_fastcheck", EXPLAIN_FORMAT_JSON, &rain); + rain.close(); +} \ No newline at end of file diff --git a/src/test/ut/db4ai/ut_direct_ml.h b/src/test/ut/db4ai/ut_direct_ml.h new file mode 100644 index 000000000..fd6db3245 --- /dev/null +++ b/src/test/ut/db4ai/ut_direct_ml.h @@ -0,0 +1,54 @@ +/** +Copyright (c) 2021 Huawei Technologies Co.,Ltd. +openGauss is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + + http://license.coscl.org.cn/MulanPSL2 + +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +--------------------------------------------------------------------------------------- + * + * ut_direct_ml.h + * Header file of directML test master File + * + * + * IDENTIFICATION + * src/test/ut/db4ai/ut_direct_ml.h + * + * --------------------------------------------------------------------------------------- +**/ +#ifndef UT_DIRECT_ML_H +#define UT_DIRECT_ML_H + +#include "gunit_test.h" +#include "mockcpp/mockcpp.hpp" + +#include "direct_ml/readers.h" +#include "direct_ml/mock.h" + +#define PATH_MAX 4096 +void test_kernels(); +void test_readers(); + +class UTDirectML : public testing::Test { + + GUNIT_TEST_SUITE(UTDirectML); + +public: + virtual void SetUp(); + virtual void TearDown(); + +public: + void TestLogisticRegression(); + void TestSvmKernel(); + void TestSvmMulticlass(); + void TestKMeans(); + void TestRCA(); + void TestXGBoost(); +}; + +#endif diff --git a/src/test/ut/demo/CMakeLists.txt b/src/test/ut/demo/CMakeLists.txt new file mode 100644 index 000000000..c9e36354b --- /dev/null +++ b/src/test/ut/demo/CMakeLists.txt @@ -0,0 +1,26 @@ +#This is the CMAKE for build ut_demo components. +set(TGT_ut_demo_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/ut_demo.cpp + ) + +INCLUDE_DIRECTORIES( + ${PROJECT_SRC_DIR}/include +) +add_executable(ut_demo_opengauss ${TGT_ut_demo_SRC}) +TARGET_LINK_LIBRARIES(ut_demo_opengauss ${UNIT_TEST_BASE_LIB_LIST}) + +target_compile_options(ut_demo_opengauss PRIVATE ${OPTIMIZE_LEVEL}) +target_link_options(ut_demo_opengauss PRIVATE ${UNIT_TEST_LINK_OPTIONS_LIB_LIST}) +add_custom_command(TARGET ut_demo_opengauss + POST_BUILD + COMMAND mkdir -p ${CMAKE_BINARY_DIR}/ut_bin + COMMAND rm -rf ${CMAKE_BINARY_DIR}/ut_bin/ut_demo_opengauss + COMMAND cp ${CMAKE_BINARY_DIR}/${openGauss}/src/test/ut/demo/ut_demo_opengauss ${CMAKE_BINARY_DIR}/ut_bin/ut_demo_opengauss + COMMAND chmod +x ${CMAKE_BINARY_DIR}/ut_bin/ut_demo_opengauss + ) +# convenient to test +add_custom_target(ut_demo_test + DEPENDS ut_demo_opengauss + COMMAND ${CMAKE_BINARY_DIR}/ut_bin/ut_demo_opengauss || sleep 0 + COMMENT "begin unit test..." + ) diff --git a/src/test/ut/demo/ut_demo.cpp b/src/test/ut/demo/ut_demo.cpp new file mode 100644 index 000000000..46a04a1a1 --- /dev/null +++ b/src/test/ut/demo/ut_demo.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * IDENTIFICATION + * src/test/ut/demo/ut_demo.cpp + * + * --------------------------------------------------------------------------------------- + */ +#include "ut_demo.h" + +#include + +#include + + +using namespace std; + +GUNIT_TEST_REGISTRATION(ut_demo, TestCase01) + +void ut_demo::SetUp() {} + +void ut_demo::TearDown() {} + +/* TestCase01 */ +extern int teststr(int abc); +extern int testmock(int mocka, int mockb); + +int my_teststr(int abc) { return 0; } + +void ut_demo::TestCase01() { + ASSERT_EQ(3, testmock(1, 2)); + MOCKER(teststr).stubs().will(invoke(my_teststr)); + ASSERT_EQ(0, testmock(2, 3)); + GlobalMockObject::reset(); + ASSERT_EQ(10, testmock(5, 5)); +} + diff --git a/src/test/ut/demo/ut_demo.h b/src/test/ut/demo/ut_demo.h new file mode 100644 index 000000000..17cca3b04 --- /dev/null +++ b/src/test/ut/demo/ut_demo.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * IDENTIFICATION + * src/test/ut/demo/ut_demo.h + * + * --------------------------------------------------------------------------------------- + */ +#ifndef UT_DEMO_H +#define UT_DEMO_H + +#include "gunit_test.h" +#include "mockcpp/mockcpp.hpp" + +class ut_demo : public testing::Test { + GUNIT_TEST_SUITE(ut_demo); + + public: + virtual void SetUp(); + + virtual void TearDown(); + + public: + void TestCase01(); + void TestCase02(); +}; + +int teststr(int abc) { return 20; } + +int testmock(int mocka, int mockb) { + int testa = 10; + int testb = teststr(testa); + if (testb == 20) + return mocka + mockb; + else + return 0; +} + +#endif diff --git a/src/test/ut/include/gunit_test.h b/src/test/ut/include/gunit_test.h new file mode 100644 index 000000000..5eb355dc9 --- /dev/null +++ b/src/test/ut/include/gunit_test.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2020 Huawei Technologies Co.,Ltd. + * + * openGauss is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + * --------------------------------------------------------------------------------------- + * + * IDENTIFICATION + * src/test/ut/include/gunit_test.h + * + * --------------------------------------------------------------------------------------- + */ +#ifndef GTEST_BACKEND_H +#define GTEST_BACKEND_H + +#include "gtest/gtest.h" + +#define GUNIT_TEST_SUITE(test_fixture) \ +public: \ + test_fixture(){}; \ + virtual ~test_fixture(){}; + +#define GUNIT_TEST_REGISTRATION(test_fixture, test_name) \ + TEST_F(test_fixture, test_name) \ + { \ + test_fixture##_##test_name##_Test().test_name(); \ + } + +#endif